From e02cda008591317b1625707ff8e115a4841aa889 Mon Sep 17 00:00:00 2001 From: Timos Ampelikiotis Date: Tue, 10 Oct 2023 11:40:56 +0000 Subject: Introduce Virtio-loopback epsilon release: Epsilon release introduces a new compatibility layer which make virtio-loopback design to work with QEMU and rust-vmm vhost-user backend without require any changes. Signed-off-by: Timos Ampelikiotis Change-Id: I52e57563e08a7d0bdc002f8e928ee61ba0c53dd9 --- hw/9pfs/9p-local.c | 1601 +++++++ hw/9pfs/9p-local.h | 20 + hw/9pfs/9p-posix-acl.c | 161 + hw/9pfs/9p-proxy.c | 1243 ++++++ hw/9pfs/9p-proxy.h | 96 + hw/9pfs/9p-synth.c | 641 +++ hw/9pfs/9p-synth.h | 70 + hw/9pfs/9p-util.c | 64 + hw/9pfs/9p-util.h | 81 + hw/9pfs/9p-xattr-user.c | 114 + hw/9pfs/9p-xattr.c | 291 ++ hw/9pfs/9p-xattr.h | 75 + hw/9pfs/9p.c | 4260 +++++++++++++++++++ hw/9pfs/9p.h | 482 +++ hw/9pfs/Kconfig | 9 + hw/9pfs/codir.c | 360 ++ hw/9pfs/cofile.c | 285 ++ hw/9pfs/cofs.c | 377 ++ hw/9pfs/coth.c | 46 + hw/9pfs/coth.h | 113 + hw/9pfs/coxattr.c | 114 + hw/9pfs/meson.build | 20 + hw/9pfs/trace-events | 50 + hw/9pfs/trace.h | 1 + hw/9pfs/virtio-9p-device.c | 279 ++ hw/9pfs/virtio-9p.h | 20 + hw/9pfs/xen-9p-backend.c | 510 +++ hw/9pfs/xen-9pfs.h | 25 + hw/Kconfig | 85 + hw/acpi/Kconfig | 62 + hw/acpi/acpi-cpu-hotplug-stub.c | 50 + hw/acpi/acpi-mem-hotplug-stub.c | 35 + hw/acpi/acpi-nvdimm-stub.c | 8 + hw/acpi/acpi-pci-hotplug-stub.c | 47 + hw/acpi/acpi-stub.c | 29 + hw/acpi/acpi-x86-stub.c | 14 + hw/acpi/acpi_interface.c | 25 + hw/acpi/aml-build-stub.c | 93 + hw/acpi/aml-build.c | 2439 +++++++++++ hw/acpi/bios-linker-loader.c | 351 ++ hw/acpi/core.c | 738 ++++ hw/acpi/cpu.c | 711 ++++ hw/acpi/cpu_hotplug.c | 333 ++ hw/acpi/generic_event_device.c | 433 ++ hw/acpi/ghes-stub.c | 22 + hw/acpi/ghes.c | 460 ++ hw/acpi/hmat.c | 267 ++ hw/acpi/hmat.h | 43 + hw/acpi/ich9.c | 595 +++ hw/acpi/ipmi-stub.c | 15 + hw/acpi/ipmi.c | 107 + hw/acpi/memory_hotplug.c | 730 ++++ hw/acpi/meson.build | 33 + hw/acpi/nvdimm.c | 1378 ++++++ hw/acpi/pci.c | 61 + hw/acpi/pcihp.c | 564 +++ hw/acpi/piix4.c | 710 ++++ hw/acpi/tco.c | 261 ++ hw/acpi/tpm.c | 459 ++ hw/acpi/trace-events | 57 + hw/acpi/trace.h | 1 + hw/acpi/utils.c | 48 + hw/acpi/viot.c | 114 + hw/acpi/viot.h | 13 + hw/acpi/vmgenid.c | 263 ++ hw/adc/Kconfig | 5 + hw/adc/aspeed_adc.c | 427 ++ hw/adc/max111x.c | 236 ++ hw/adc/meson.build | 5 + hw/adc/npcm7xx_adc.c | 301 ++ hw/adc/stm32f2xx_adc.c | 308 ++ hw/adc/trace-events | 8 + hw/adc/trace.h | 1 + hw/adc/zynq-xadc.c | 305 ++ hw/alpha/Kconfig | 11 + hw/alpha/alpha_sys.h | 21 + hw/alpha/dp264.c | 222 + hw/alpha/meson.build | 8 + hw/alpha/pci.c | 91 + hw/alpha/trace-events | 4 + hw/alpha/trace.h | 1 + hw/alpha/typhoon.c | 952 +++++ hw/arm/Kconfig | 550 +++ hw/arm/allwinner-a10.c | 191 + hw/arm/allwinner-h3.c | 457 ++ hw/arm/armsse.c | 1729 ++++++++ hw/arm/armv7m.c | 629 +++ hw/arm/aspeed.c | 1327 ++++++ hw/arm/aspeed_ast2600.c | 554 +++ hw/arm/aspeed_soc.c | 533 +++ hw/arm/bcm2835_peripherals.c | 415 ++ hw/arm/bcm2836.c | 245 ++ hw/arm/boot.c | 1351 ++++++ hw/arm/collie.c | 93 + hw/arm/cubieboard.c | 114 + hw/arm/digic.c | 107 + hw/arm/digic_boards.c | 149 + hw/arm/exynos4210.c | 513 +++ hw/arm/exynos4_boards.c | 195 + hw/arm/fsl-imx25.c | 349 ++ hw/arm/fsl-imx31.c | 253 ++ hw/arm/fsl-imx6.c | 482 +++ hw/arm/fsl-imx6ul.c | 651 +++ hw/arm/fsl-imx7.c | 592 +++ hw/arm/gumstix.c | 147 + hw/arm/highbank.c | 459 ++ hw/arm/imx25_pdk.c | 152 + hw/arm/integratorcp.c | 744 ++++ hw/arm/kzm.c | 142 + hw/arm/mainstone.c | 179 + hw/arm/mcimx6ul-evk.c | 75 + hw/arm/mcimx7d-sabre.c | 75 + hw/arm/meson.build | 62 + hw/arm/microbit.c | 84 + hw/arm/mps2-tz.c | 1448 +++++++ hw/arm/mps2.c | 564 +++ hw/arm/msf2-soc.c | 266 ++ hw/arm/msf2-som.c | 111 + hw/arm/musca.c | 677 +++ hw/arm/musicpal.c | 1756 ++++++++ hw/arm/netduino2.c | 62 + hw/arm/netduinoplus2.c | 62 + hw/arm/npcm7xx.c | 813 ++++ hw/arm/npcm7xx_boards.c | 497 +++ hw/arm/nrf51_soc.c | 243 ++ hw/arm/nseries.c | 1468 +++++++ hw/arm/omap1.c | 4085 ++++++++++++++++++ hw/arm/omap2.c | 2714 ++++++++++++ hw/arm/omap_sx1.c | 257 ++ hw/arm/orangepi.c | 125 + hw/arm/palm.c | 320 ++ hw/arm/pxa2xx.c | 2384 +++++++++++ hw/arm/pxa2xx_gpio.c | 369 ++ hw/arm/pxa2xx_pic.c | 339 ++ hw/arm/raspi.c | 399 ++ hw/arm/realview.c | 468 +++ hw/arm/sabrelite.c | 115 + hw/arm/sbsa-ref.c | 868 ++++ hw/arm/smmu-common.c | 569 +++ hw/arm/smmu-internal.h | 112 + hw/arm/smmuv3-internal.h | 631 +++ hw/arm/smmuv3.c | 1583 +++++++ hw/arm/spitz.c | 1279 ++++++ hw/arm/stellaris.c | 1392 ++++++ hw/arm/stm32f100_soc.c | 209 + hw/arm/stm32f205_soc.c | 230 + hw/arm/stm32f405_soc.c | 310 ++ hw/arm/stm32vldiscovery.c | 65 + hw/arm/strongarm.c | 1649 ++++++++ hw/arm/strongarm.h | 67 + hw/arm/sysbus-fdt.c | 571 +++ hw/arm/tosa.c | 326 ++ hw/arm/trace-events | 55 + hw/arm/trace.h | 1 + hw/arm/versatilepb.c | 475 +++ hw/arm/vexpress.c | 857 ++++ hw/arm/virt-acpi-build.c | 1152 +++++ hw/arm/virt.c | 3033 ++++++++++++++ hw/arm/xilinx_zynq.c | 377 ++ hw/arm/xlnx-versal-virt.c | 726 ++++ hw/arm/xlnx-versal.c | 510 +++ hw/arm/xlnx-zcu102.c | 302 ++ hw/arm/xlnx-zynqmp.c | 780 ++++ hw/arm/z2.c | 355 ++ hw/audio/Kconfig | 52 + hw/audio/ac97.c | 1437 +++++++ hw/audio/adlib.c | 328 ++ hw/audio/cs4231.c | 184 + hw/audio/cs4231a.c | 723 ++++ hw/audio/es1370.c | 930 +++++ hw/audio/fmopl.c | 1211 ++++++ hw/audio/fmopl.h | 103 + hw/audio/gus.c | 323 ++ hw/audio/gusemu.h | 88 + hw/audio/gusemu_hal.c | 556 +++ hw/audio/gusemu_mixer.c | 241 ++ hw/audio/gustate.h | 132 + hw/audio/hda-codec-common.h | 456 ++ hw/audio/hda-codec.c | 960 +++++ hw/audio/intel-hda-defs.h | 717 ++++ hw/audio/intel-hda.c | 1331 ++++++ hw/audio/intel-hda.h | 66 + hw/audio/lm4549.c | 336 ++ hw/audio/lm4549.h | 44 + hw/audio/marvell_88w8618.c | 313 ++ hw/audio/meson.build | 14 + hw/audio/pcspk.c | 263 ++ hw/audio/pl041.c | 660 +++ hw/audio/pl041.h | 135 + hw/audio/pl041.hx | 81 + hw/audio/sb16.c | 1476 +++++++ hw/audio/soundhw.c | 177 + hw/audio/trace-events | 13 + hw/audio/trace.h | 1 + hw/audio/via-ac97.c | 93 + hw/audio/wm8750.c | 736 ++++ hw/avr/Kconfig | 9 + hw/avr/arduino.c | 159 + hw/avr/atmega.c | 457 ++ hw/avr/atmega.h | 51 + hw/avr/boot.c | 116 + hw/avr/boot.h | 33 + hw/avr/meson.build | 6 + hw/block/Kconfig | 46 + hw/block/block.c | 238 ++ hw/block/cdrom.c | 155 + hw/block/dataplane/meson.build | 2 + hw/block/dataplane/trace-events | 5 + hw/block/dataplane/trace.h | 1 + hw/block/dataplane/virtio-blk.c | 369 ++ hw/block/dataplane/virtio-blk.h | 31 + hw/block/dataplane/xen-block.c | 828 ++++ hw/block/dataplane/xen-block.h | 30 + hw/block/ecc.c | 91 + hw/block/fdc-internal.h | 158 + hw/block/fdc-isa.c | 320 ++ hw/block/fdc-sysbus.c | 251 ++ hw/block/fdc.c | 2384 +++++++++++ hw/block/hd-geometry.c | 163 + hw/block/m25p80.c | 1661 ++++++++ hw/block/meson.build | 22 + hw/block/nand.c | 814 ++++ hw/block/onenand.c | 872 ++++ hw/block/pflash_cfi01.c | 1043 +++++ hw/block/pflash_cfi02.c | 1031 +++++ hw/block/swim.c | 491 +++ hw/block/tc58128.c | 208 + hw/block/trace-events | 84 + hw/block/trace.h | 1 + hw/block/vhost-user-blk.c | 620 +++ hw/block/virtio-blk.c | 1353 ++++++ hw/block/xen-block.c | 1024 +++++ hw/block/xen_blkif.h | 147 + hw/char/Kconfig | 73 + hw/char/avr_usart.c | 321 ++ hw/char/bcm2835_aux.c | 321 ++ hw/char/cadence_uart.c | 648 +++ hw/char/cmsdk-apb-uart.c | 409 ++ hw/char/debugcon.c | 145 + hw/char/digic-uart.c | 203 + hw/char/escc.c | 1006 +++++ hw/char/etraxfs_ser.c | 267 ++ hw/char/exynos4210_uart.c | 738 ++++ hw/char/goldfish_tty.c | 285 ++ hw/char/grlib_apbuart.c | 304 ++ hw/char/ibex_uart.c | 569 +++ hw/char/imx_serial.c | 392 ++ hw/char/ipoctal232.c | 608 +++ hw/char/mcf_uart.c | 366 ++ hw/char/mchp_pfsoc_mmuart.c | 163 + hw/char/meson.build | 41 + hw/char/nrf51_uart.c | 335 ++ hw/char/omap_uart.c | 187 + hw/char/parallel-isa.c | 42 + hw/char/parallel.c | 669 +++ hw/char/pl011.c | 451 ++ hw/char/renesas_sci.c | 351 ++ hw/char/riscv_htif.c | 260 ++ hw/char/sclpconsole-lm.c | 373 ++ hw/char/sclpconsole.c | 289 ++ hw/char/serial-isa.c | 182 + hw/char/serial-pci-multi.c | 222 + hw/char/serial-pci.c | 130 + hw/char/serial.c | 1127 +++++ hw/char/sh_serial.c | 465 +++ hw/char/shakti_uart.c | 186 + hw/char/sifive_uart.c | 289 ++ hw/char/spapr_vty.c | 272 ++ hw/char/stm32f2xx_usart.c | 243 ++ hw/char/terminal3270.c | 321 ++ hw/char/trace-events | 107 + hw/char/trace.h | 1 + hw/char/virtio-console.c | 309 ++ hw/char/virtio-serial-bus.c | 1209 ++++++ hw/char/xen_console.c | 298 ++ hw/char/xilinx_uartlite.c | 257 ++ hw/core/Kconfig | 29 + hw/core/bus.c | 341 ++ hw/core/clock-vmstate.c | 63 + hw/core/clock.c | 195 + hw/core/cpu-common.c | 298 ++ hw/core/cpu-sysemu.c | 145 + hw/core/fw-path-provider.c | 54 + hw/core/generic-loader.c | 221 + hw/core/gpio.c | 197 + hw/core/guest-loader.c | 144 + hw/core/guest-loader.h | 34 + hw/core/hotplug-stubs.c | 34 + hw/core/hotplug.c | 71 + hw/core/irq.c | 146 + hw/core/loader-fit.c | 335 ++ hw/core/loader.c | 1760 ++++++++ hw/core/machine-hmp-cmds.c | 132 + hw/core/machine-qmp-cmds.c | 246 ++ hw/core/machine-smp.c | 181 + hw/core/machine.c | 1262 ++++++ hw/core/meson.build | 56 + hw/core/nmi.c | 88 + hw/core/null-machine.c | 59 + hw/core/numa.c | 872 ++++ hw/core/or-irq.c | 149 + hw/core/platform-bus.c | 230 + hw/core/ptimer.c | 486 +++ hw/core/qdev-clock.c | 212 + hw/core/qdev-fw.c | 96 + hw/core/qdev-hotplug.c | 73 + hw/core/qdev-prop-internal.h | 28 + hw/core/qdev-properties-system.c | 1121 +++++ hw/core/qdev-properties.c | 955 +++++ hw/core/qdev.c | 947 +++++ hw/core/register.c | 342 ++ hw/core/reset.c | 72 + hw/core/resettable.c | 301 ++ hw/core/split-irq.c | 92 + hw/core/stream.c | 34 + hw/core/sysbus.c | 367 ++ hw/core/trace-events | 37 + hw/core/trace.h | 1 + hw/core/uboot_image.h | 159 + hw/core/vm-change-state-handler.c | 62 + hw/core/vmstate-if.c | 23 + hw/cpu/Kconfig | 8 + hw/cpu/a15mpcore.c | 180 + hw/cpu/a9mpcore.c | 194 + hw/cpu/arm11mpcore.c | 169 + hw/cpu/cluster.c | 100 + hw/cpu/core.c | 106 + hw/cpu/meson.build | 6 + hw/cpu/realview_mpcore.c | 137 + hw/cris/Kconfig | 9 + hw/cris/axis_dev88.c | 352 ++ hw/cris/boot.c | 102 + hw/cris/boot.h | 16 + hw/cris/meson.build | 5 + hw/display/Kconfig | 136 + hw/display/artist.c | 1500 +++++++ hw/display/ati.c | 1052 +++++ hw/display/ati_2d.c | 206 + hw/display/ati_dbg.c | 273 ++ hw/display/ati_int.h | 107 + hw/display/ati_regs.h | 481 +++ hw/display/bcm2835_fb.c | 470 +++ hw/display/blizzard.c | 1026 +++++ hw/display/bochs-display.c | 390 ++ hw/display/cg3.c | 396 ++ hw/display/cirrus_vga.c | 3024 ++++++++++++++ hw/display/cirrus_vga_internal.h | 103 + hw/display/cirrus_vga_isa.c | 99 + hw/display/cirrus_vga_rop.h | 246 ++ hw/display/cirrus_vga_rop2.h | 284 ++ hw/display/dpcd.c | 165 + hw/display/edid-generate.c | 563 +++ hw/display/edid-region.c | 33 + hw/display/exynos4210_fimd.c | 1972 +++++++++ hw/display/framebuffer.c | 122 + hw/display/framebuffer.h | 65 + hw/display/g364fb.c | 554 +++ hw/display/i2c-ddc.c | 129 + hw/display/jazz_led.c | 320 ++ hw/display/macfb.c | 807 ++++ hw/display/meson.build | 100 + hw/display/next-fb.c | 144 + hw/display/omap_dss.c | 1093 +++++ hw/display/omap_lcdc.c | 508 +++ hw/display/pl110.c | 609 +++ hw/display/pl110_template.h | 301 ++ hw/display/pxa2xx_lcd.c | 1451 +++++++ hw/display/qxl-logger.c | 274 ++ hw/display/qxl-render.c | 337 ++ hw/display/qxl.c | 2524 +++++++++++ hw/display/qxl.h | 177 + hw/display/ramfb-standalone.c | 65 + hw/display/ramfb.c | 135 + hw/display/sii9022.c | 193 + hw/display/sm501.c | 2124 ++++++++++ hw/display/ssd0303.c | 336 ++ hw/display/ssd0323.c | 388 ++ hw/display/tc6393xb.c | 568 +++ hw/display/tcx.c | 914 ++++ hw/display/trace-events | 176 + hw/display/trace.h | 1 + hw/display/vga-access.h | 49 + hw/display/vga-helpers.h | 431 ++ hw/display/vga-isa-mm.c | 114 + hw/display/vga-isa.c | 115 + hw/display/vga-pci.c | 420 ++ hw/display/vga.c | 2305 ++++++++++ hw/display/vga_int.h | 198 + hw/display/vga_regs.h | 159 + hw/display/vhost-user-gpu-pci.c | 53 + hw/display/vhost-user-gpu.c | 608 +++ hw/display/vhost-user-vga.c | 54 + hw/display/virtio-gpu-base.c | 290 ++ hw/display/virtio-gpu-gl.c | 171 + hw/display/virtio-gpu-pci-gl.c | 58 + hw/display/virtio-gpu-pci.c | 102 + hw/display/virtio-gpu-udmabuf-stubs.c | 28 + hw/display/virtio-gpu-udmabuf.c | 230 + hw/display/virtio-gpu-virgl.c | 634 +++ hw/display/virtio-gpu.c | 1459 +++++++ hw/display/virtio-vga-gl.c | 50 + hw/display/virtio-vga.c | 280 ++ hw/display/virtio-vga.h | 29 + hw/display/vmware_vga.c | 1366 ++++++ hw/display/xenfb.c | 987 +++++ hw/display/xlnx_dp.c | 1383 ++++++ hw/dma/Kconfig | 32 + hw/dma/bcm2835_dma.c | 410 ++ hw/dma/etraxfs_dma.c | 780 ++++ hw/dma/i82374.c | 168 + hw/dma/i8257.c | 657 +++ hw/dma/meson.build | 16 + hw/dma/omap_dma.c | 2124 ++++++++++ hw/dma/pl080.c | 449 ++ hw/dma/pl330.c | 1702 ++++++++ hw/dma/pxa2xx_dma.c | 591 +++ hw/dma/rc4030.c | 754 ++++ hw/dma/sifive_pdma.c | 351 ++ hw/dma/soc_dma.c | 361 ++ hw/dma/sparc32_dma.c | 449 ++ hw/dma/trace-events | 46 + hw/dma/trace.h | 1 + hw/dma/xilinx_axidma.c | 662 +++ hw/dma/xlnx-zdma.c | 847 ++++ hw/dma/xlnx-zynq-devcfg.c | 402 ++ hw/dma/xlnx_csu_dma.c | 743 ++++ hw/dma/xlnx_dpdma.c | 790 ++++ hw/gpio/Kconfig | 15 + hw/gpio/aspeed_gpio.c | 996 +++++ hw/gpio/bcm2835_gpio.c | 344 ++ hw/gpio/gpio_key.c | 109 + hw/gpio/gpio_pwr.c | 70 + hw/gpio/imx_gpio.c | 355 ++ hw/gpio/max7310.c | 218 + hw/gpio/meson.build | 14 + hw/gpio/mpc8xxx.c | 224 + hw/gpio/npcm7xx_gpio.c | 424 ++ hw/gpio/nrf51_gpio.c | 318 ++ hw/gpio/omap_gpio.c | 813 ++++ hw/gpio/pl061.c | 603 +++ hw/gpio/sifive_gpio.c | 397 ++ hw/gpio/trace-events | 29 + hw/gpio/trace.h | 1 + hw/gpio/zaurus.c | 305 ++ hw/hppa/Kconfig | 16 + hw/hppa/dino.c | 608 +++ hw/hppa/hppa_hardware.h | 51 + hw/hppa/hppa_sys.h | 24 + hw/hppa/lasi.c | 367 ++ hw/hppa/machine.c | 374 ++ hw/hppa/meson.build | 4 + hw/hppa/pci.c | 88 + hw/hppa/trace-events | 14 + hw/hppa/trace.h | 1 + hw/hyperv/Kconfig | 13 + hw/hyperv/hyperv.c | 663 +++ hw/hyperv/hyperv_testdev.c | 326 ++ hw/hyperv/meson.build | 3 + hw/hyperv/trace-events | 18 + hw/hyperv/trace.h | 1 + hw/hyperv/vmbus.c | 2785 ++++++++++++ hw/i2c/Kconfig | 38 + hw/i2c/aspeed_i2c.c | 1038 +++++ hw/i2c/bitbang_i2c.c | 226 + hw/i2c/core.c | 357 ++ hw/i2c/exynos4210_i2c.c | 333 ++ hw/i2c/i2c_mux_pca954x.c | 290 ++ hw/i2c/imx_i2c.c | 333 ++ hw/i2c/meson.build | 19 + hw/i2c/microbit_i2c.c | 130 + hw/i2c/mpc_i2c.c | 359 ++ hw/i2c/npcm7xx_smbus.c | 1098 +++++ hw/i2c/omap_i2c.c | 549 +++ hw/i2c/pm_smbus.c | 497 +++ hw/i2c/pmbus_device.c | 1612 +++++++ hw/i2c/ppc4xx_i2c.c | 377 ++ hw/i2c/smbus_eeprom.c | 300 ++ hw/i2c/smbus_ich9.c | 155 + hw/i2c/smbus_master.c | 164 + hw/i2c/smbus_slave.c | 237 ++ hw/i2c/trace-events | 33 + hw/i2c/trace.h | 1 + hw/i2c/versatile_i2c.c | 112 + hw/i386/Kconfig | 139 + hw/i386/acpi-build.c | 2873 +++++++++++++ hw/i386/acpi-build.h | 15 + hw/i386/acpi-common.c | 165 + hw/i386/acpi-common.h | 15 + hw/i386/acpi-microvm.c | 258 ++ hw/i386/acpi-microvm.h | 8 + hw/i386/amd_iommu.c | 1662 ++++++++ hw/i386/amd_iommu.h | 372 ++ hw/i386/e820_memory_layout.c | 59 + hw/i386/e820_memory_layout.h | 42 + hw/i386/fw_cfg.c | 221 + hw/i386/fw_cfg.h | 30 + hw/i386/generic_event_device_x86.c | 36 + hw/i386/intel_iommu.c | 3900 +++++++++++++++++ hw/i386/intel_iommu_internal.h | 518 +++ hw/i386/kvm/apic.c | 271 ++ hw/i386/kvm/clock.c | 350 ++ hw/i386/kvm/i8254.c | 337 ++ hw/i386/kvm/i8259.c | 167 + hw/i386/kvm/ioapic.c | 165 + hw/i386/kvm/meson.build | 8 + hw/i386/kvmvapic.c | 871 ++++ hw/i386/meson.build | 37 + hw/i386/microvm-dt.c | 348 ++ hw/i386/microvm-dt.h | 8 + hw/i386/microvm.c | 779 ++++ hw/i386/multiboot.c | 415 ++ hw/i386/multiboot.h | 16 + hw/i386/pc.c | 1777 ++++++++ hw/i386/pc_piix.c | 951 +++++ hw/i386/pc_q35.c | 620 +++ hw/i386/pc_sysfw.c | 262 ++ hw/i386/pc_sysfw_ovmf-stubs.c | 26 + hw/i386/pc_sysfw_ovmf.c | 151 + hw/i386/port92.c | 127 + hw/i386/sgx-epc.c | 185 + hw/i386/sgx-stub.c | 34 + hw/i386/sgx.c | 243 ++ hw/i386/trace-events | 121 + hw/i386/trace.h | 1 + hw/i386/vmmouse.c | 327 ++ hw/i386/vmport.c | 313 ++ hw/i386/x86-iommu-stub.c | 38 + hw/i386/x86-iommu.c | 162 + hw/i386/x86.c | 1399 +++++++ hw/i386/xen/meson.build | 7 + hw/i386/xen/trace-events | 28 + hw/i386/xen/trace.h | 1 + hw/i386/xen/xen-hvm.c | 1620 +++++++ hw/i386/xen/xen-mapcache.c | 593 +++ hw/i386/xen/xen_apic.c | 103 + hw/i386/xen/xen_platform.c | 519 +++ hw/i386/xen/xen_pvdevice.c | 154 + hw/ide/Kconfig | 59 + hw/ide/ahci-allwinner.c | 127 + hw/ide/ahci.c | 1843 ++++++++ hw/ide/ahci_internal.h | 393 ++ hw/ide/atapi.c | 1377 ++++++ hw/ide/cmd646.c | 351 ++ hw/ide/core.c | 2946 +++++++++++++ hw/ide/ich.c | 197 + hw/ide/ioport.c | 68 + hw/ide/isa.c | 138 + hw/ide/macio.c | 513 +++ hw/ide/meson.build | 14 + hw/ide/microdrive.c | 643 +++ hw/ide/mmio.c | 189 + hw/ide/pci.c | 534 +++ hw/ide/piix.c | 279 ++ hw/ide/qdev.c | 371 ++ hw/ide/sii3112.c | 322 ++ hw/ide/trace-events | 124 + hw/ide/trace.h | 1 + hw/ide/via.c | 243 ++ hw/input/Kconfig | 48 + hw/input/adb-internal.h | 53 + hw/input/adb-kbd.c | 408 ++ hw/input/adb-mouse.c | 279 ++ hw/input/adb.c | 324 ++ hw/input/ads7846.c | 186 + hw/input/hid.c | 624 +++ hw/input/lasips2.c | 288 ++ hw/input/lm832x.c | 528 +++ hw/input/meson.build | 18 + hw/input/pckbd.c | 814 ++++ hw/input/pl050.c | 210 + hw/input/ps2.c | 1220 ++++++ hw/input/pxa2xx_keypad.c | 331 ++ hw/input/stellaris_input.c | 93 + hw/input/trace-events | 60 + hw/input/trace.h | 1 + hw/input/tsc2005.c | 559 +++ hw/input/tsc210x.c | 1253 ++++++ hw/input/vhost-user-input.c | 130 + hw/input/virtio-input-hid.c | 522 +++ hw/input/virtio-input-host.c | 260 ++ hw/input/virtio-input.c | 343 ++ hw/intc/Kconfig | 75 + hw/intc/allwinner-a10-pic.c | 215 + hw/intc/apic.c | 928 ++++ hw/intc/apic_common.c | 497 +++ hw/intc/arm_gic.c | 2146 ++++++++++ hw/intc/arm_gic_common.c | 394 ++ hw/intc/arm_gic_kvm.c | 619 +++ hw/intc/arm_gicv2m.c | 200 + hw/intc/arm_gicv3.c | 420 ++ hw/intc/arm_gicv3_common.c | 561 +++ hw/intc/arm_gicv3_cpuif.c | 2700 ++++++++++++ hw/intc/arm_gicv3_dist.c | 914 ++++ hw/intc/arm_gicv3_its.c | 1323 ++++++ hw/intc/arm_gicv3_its_common.c | 159 + hw/intc/arm_gicv3_its_kvm.c | 266 ++ hw/intc/arm_gicv3_kvm.c | 900 ++++ hw/intc/arm_gicv3_redist.c | 738 ++++ hw/intc/armv7m_nvic.c | 2735 ++++++++++++ hw/intc/aspeed_vic.c | 363 ++ hw/intc/bcm2835_ic.c | 243 ++ hw/intc/bcm2836_control.c | 408 ++ hw/intc/etraxfs_pic.c | 172 + hw/intc/exynos4210_combiner.c | 461 ++ hw/intc/exynos4210_gic.c | 482 +++ hw/intc/gic_internal.h | 322 ++ hw/intc/gicv3_internal.h | 611 +++ hw/intc/goldfish_pic.c | 219 + hw/intc/grlib_irqmp.c | 362 ++ hw/intc/heathrow_pic.c | 210 + hw/intc/i8259.c | 466 +++ hw/intc/i8259_common.c | 219 + hw/intc/imx_avic.c | 366 ++ hw/intc/imx_gpcv2.c | 126 + hw/intc/intc.c | 41 + hw/intc/ioapic.c | 513 +++ hw/intc/ioapic_common.c | 224 + hw/intc/loongson_liointc.c | 249 ++ hw/intc/m68k_irqc.c | 119 + hw/intc/meson.build | 59 + hw/intc/mips_gic.c | 468 +++ hw/intc/omap_intc.c | 690 +++ hw/intc/ompic.c | 181 + hw/intc/openpic.c | 1645 ++++++++ hw/intc/openpic_kvm.c | 294 ++ hw/intc/pl190.c | 297 ++ hw/intc/pnv_xive.c | 1987 +++++++++ hw/intc/pnv_xive_regs.h | 248 ++ hw/intc/ppc-uic.c | 321 ++ hw/intc/realview_gic.c | 86 + hw/intc/riscv_aclint.c | 460 ++ hw/intc/rx_icu.c | 395 ++ hw/intc/s390_flic.c | 503 +++ hw/intc/s390_flic_kvm.c | 679 +++ hw/intc/sh_intc.c | 449 ++ hw/intc/sifive_plic.c | 563 +++ hw/intc/slavio_intctl.c | 475 +++ hw/intc/spapr_xive.c | 1830 ++++++++ hw/intc/spapr_xive_kvm.c | 869 ++++ hw/intc/trace-events | 248 ++ hw/intc/trace.h | 1 + hw/intc/vgic_common.h | 35 + hw/intc/xics.c | 751 ++++ hw/intc/xics_kvm.c | 509 +++ hw/intc/xics_pnv.c | 202 + hw/intc/xics_spapr.c | 476 +++ hw/intc/xilinx_intc.c | 206 + hw/intc/xive.c | 1983 +++++++++ hw/intc/xlnx-pmu-iomod-intc.c | 558 +++ hw/intc/xlnx-zynqmp-ipi.c | 380 ++ hw/ipack/Kconfig | 4 + hw/ipack/ipack.c | 123 + hw/ipack/meson.build | 1 + hw/ipack/tpci200.c | 664 +++ hw/ipmi/Kconfig | 37 + hw/ipmi/ipmi.c | 138 + hw/ipmi/ipmi_bmc_extern.c | 548 +++ hw/ipmi/ipmi_bmc_sim.c | 2232 ++++++++++ hw/ipmi/ipmi_bt.c | 437 ++ hw/ipmi/ipmi_kcs.c | 423 ++ hw/ipmi/isa_ipmi_bt.c | 173 + hw/ipmi/isa_ipmi_kcs.c | 180 + hw/ipmi/meson.build | 11 + hw/ipmi/pci_ipmi_bt.c | 148 + hw/ipmi/pci_ipmi_kcs.c | 148 + hw/ipmi/smbus_ipmi.c | 385 ++ hw/isa/Kconfig | 72 + hw/isa/apm.c | 97 + hw/isa/i82378.c | 151 + hw/isa/isa-bus.c | 309 ++ hw/isa/isa-superio.c | 212 + hw/isa/lpc_ich9.c | 857 ++++ hw/isa/meson.build | 11 + hw/isa/pc87312.c | 387 ++ hw/isa/piix3.c | 395 ++ hw/isa/piix4.c | 275 ++ hw/isa/smc37c669-superio.c | 116 + hw/isa/trace-events | 23 + hw/isa/trace.h | 1 + hw/isa/vt82c686.c | 754 ++++ hw/m68k/Kconfig | 34 + hw/m68k/an5206.c | 102 + hw/m68k/bootinfo.h | 59 + hw/m68k/mcf5206.c | 629 +++ hw/m68k/mcf5208.c | 363 ++ hw/m68k/mcf_intc.c | 217 + hw/m68k/meson.build | 8 + hw/m68k/next-cube.c | 1056 +++++ hw/m68k/next-kbd.c | 289 ++ hw/m68k/q800.c | 711 ++++ hw/m68k/virt.c | 324 ++ hw/mem/Kconfig | 13 + hw/mem/memory-device.c | 346 ++ hw/mem/meson.build | 9 + hw/mem/npcm7xx_mc.c | 84 + hw/mem/nvdimm.c | 266 ++ hw/mem/pc-dimm.c | 307 ++ hw/mem/sparse-mem.c | 150 + hw/mem/trace-events | 8 + hw/mem/trace.h | 1 + hw/meson.build | 66 + hw/microblaze/Kconfig | 21 + hw/microblaze/boot.c | 216 + hw/microblaze/boot.h | 11 + hw/microblaze/meson.build | 7 + hw/microblaze/petalogix_ml605_mmu.c | 220 + hw/microblaze/petalogix_s3adsp1800_mmu.c | 138 + hw/microblaze/xlnx-zynqmp-pmu.c | 187 + hw/mips/Kconfig | 61 + hw/mips/bootloader.c | 204 + hw/mips/boston.c | 835 ++++ hw/mips/cps.c | 210 + hw/mips/fuloong2e.c | 350 ++ hw/mips/fw_cfg.c | 35 + hw/mips/fw_cfg.h | 19 + hw/mips/gt64xxx_pci.c | 1300 ++++++ hw/mips/jazz.c | 441 ++ hw/mips/loongson3_bootp.c | 151 + hw/mips/loongson3_bootp.h | 236 ++ hw/mips/loongson3_virt.c | 634 +++ hw/mips/malta.c | 1464 +++++++ hw/mips/meson.build | 15 + hw/mips/mips_int.c | 88 + hw/mips/mipssim.c | 246 ++ hw/mips/trace-events | 6 + hw/mips/trace.h | 1 + hw/misc/Kconfig | 174 + hw/misc/a9scu.c | 153 + hw/misc/allwinner-cpucfg.c | 282 ++ hw/misc/allwinner-h3-ccu.c | 242 ++ hw/misc/allwinner-h3-dramc.c | 358 ++ hw/misc/allwinner-h3-sysctrl.c | 140 + hw/misc/allwinner-sid.c | 169 + hw/misc/applesmc.c | 372 ++ hw/misc/arm11scu.c | 104 + hw/misc/arm_integrator_debug.c | 100 + hw/misc/arm_l2x0.c | 203 + hw/misc/arm_sysctl.c | 662 +++ hw/misc/armsse-cpu-pwrctrl.c | 149 + hw/misc/armsse-cpuid.c | 135 + hw/misc/armsse-mhu.c | 200 + hw/misc/armv7m_ras.c | 93 + hw/misc/aspeed_hace.c | 389 ++ hw/misc/aspeed_lpc.c | 486 +++ hw/misc/aspeed_scu.c | 740 ++++ hw/misc/aspeed_sdmc.c | 522 +++ hw/misc/aspeed_xdma.c | 245 ++ hw/misc/auxbus.c | 337 ++ hw/misc/avr_power.c | 113 + hw/misc/bcm2835_cprman.c | 813 ++++ hw/misc/bcm2835_mbox.c | 340 ++ hw/misc/bcm2835_mphi.c | 191 + hw/misc/bcm2835_powermgt.c | 160 + hw/misc/bcm2835_property.c | 436 ++ hw/misc/bcm2835_rng.c | 147 + hw/misc/bcm2835_thermal.c | 135 + hw/misc/cbus.c | 619 +++ hw/misc/debugexit.c | 84 + hw/misc/eccmemctl.c | 357 ++ hw/misc/edu.c | 442 ++ hw/misc/empty_slot.c | 109 + hw/misc/exynos4210_clk.c | 166 + hw/misc/exynos4210_pmu.c | 522 +++ hw/misc/exynos4210_rng.c | 277 ++ hw/misc/grlib_ahb_apb_pnp.c | 301 ++ hw/misc/imx25_ccm.c | 320 ++ hw/misc/imx31_ccm.c | 347 ++ hw/misc/imx6_ccm.c | 783 ++++ hw/misc/imx6_src.c | 311 ++ hw/misc/imx6ul_ccm.c | 938 +++++ hw/misc/imx7_ccm.c | 287 ++ hw/misc/imx7_gpr.c | 124 + hw/misc/imx7_snvs.c | 83 + hw/misc/imx_ccm.c | 86 + hw/misc/imx_rngc.c | 277 ++ hw/misc/iotkit-secctl.c | 845 ++++ hw/misc/iotkit-sysctl.c | 872 ++++ hw/misc/iotkit-sysinfo.c | 187 + hw/misc/ivshmem.c | 1135 +++++ hw/misc/led.c | 161 + hw/misc/mac_via.c | 1221 ++++++ hw/misc/macio/Kconfig | 11 + hw/misc/macio/cuda.c | 629 +++ hw/misc/macio/gpio.c | 219 + hw/misc/macio/mac_dbdma.c | 940 +++++ hw/misc/macio/macio.c | 504 +++ hw/misc/macio/meson.build | 8 + hw/misc/macio/pmu.c | 871 ++++ hw/misc/macio/trace-events | 42 + hw/misc/macio/trace.h | 1 + hw/misc/mchp_pfsoc_dmc.c | 215 + hw/misc/mchp_pfsoc_ioscb.c | 241 ++ hw/misc/mchp_pfsoc_sysreg.c | 98 + hw/misc/meson.build | 128 + hw/misc/mips_cmgcr.c | 255 ++ hw/misc/mips_cpc.c | 195 + hw/misc/mips_itu.c | 581 +++ hw/misc/mos6522.c | 541 +++ hw/misc/mps2-fpgaio.c | 355 ++ hw/misc/mps2-scc.c | 396 ++ hw/misc/msf2-sysreg.c | 163 + hw/misc/mst_fpga.c | 269 ++ hw/misc/npcm7xx_clk.c | 1095 +++++ hw/misc/npcm7xx_gcr.c | 269 ++ hw/misc/npcm7xx_mft.c | 540 +++ hw/misc/npcm7xx_pwm.c | 569 +++ hw/misc/npcm7xx_rng.c | 180 + hw/misc/nrf51_rng.c | 266 ++ hw/misc/omap_clk.c | 1267 ++++++ hw/misc/omap_gpmc.c | 898 ++++ hw/misc/omap_l4.c | 163 + hw/misc/omap_sdrc.c | 168 + hw/misc/omap_tap.c | 118 + hw/misc/pc-testdev.c | 216 + hw/misc/pca9552.c | 437 ++ hw/misc/pci-testdev.c | 361 ++ hw/misc/pvpanic-isa.c | 93 + hw/misc/pvpanic-pci.c | 93 + hw/misc/pvpanic.c | 70 + hw/misc/sbsa_ec.c | 98 + hw/misc/sga.c | 71 + hw/misc/sifive_e_prci.c | 124 + hw/misc/sifive_test.c | 99 + hw/misc/sifive_u_otp.c | 301 ++ hw/misc/sifive_u_prci.c | 169 + hw/misc/slavio_misc.c | 515 +++ hw/misc/stm32f2xx_syscfg.c | 161 + hw/misc/stm32f4xx_exti.c | 188 + hw/misc/stm32f4xx_syscfg.c | 171 + hw/misc/trace-events | 255 ++ hw/misc/trace.h | 1 + hw/misc/tz-mpc.c | 636 +++ hw/misc/tz-msc.c | 312 ++ hw/misc/tz-ppc.c | 352 ++ hw/misc/unimp.c | 99 + hw/misc/virt_ctrl.c | 150 + hw/misc/vmcoreinfo.c | 108 + hw/misc/xlnx-versal-xramc.c | 253 ++ hw/misc/zynq_slcr.c | 637 +++ hw/net/Kconfig | 155 + hw/net/allwinner-sun8i-emac.c | 887 ++++ hw/net/allwinner_emac.c | 540 +++ hw/net/cadence_gem.c | 1720 ++++++++ hw/net/can/can_kvaser_pci.c | 324 ++ hw/net/can/can_mioe3680_pci.c | 267 ++ hw/net/can/can_pcm3680_pci.c | 268 ++ hw/net/can/can_sja1000.c | 988 +++++ hw/net/can/can_sja1000.h | 147 + hw/net/can/ctu_can_fd_frame.h | 189 + hw/net/can/ctu_can_fd_regs.h | 971 +++++ hw/net/can/ctucan_core.c | 687 +++ hw/net/can/ctucan_core.h | 126 + hw/net/can/ctucan_pci.c | 281 ++ hw/net/can/meson.build | 7 + hw/net/can/trace-events | 9 + hw/net/can/trace.h | 1 + hw/net/can/xlnx-zynqmp-can.c | 1160 +++++ hw/net/dp8393x.c | 996 +++++ hw/net/e1000.c | 1856 ++++++++ hw/net/e1000_regs.h | 1250 ++++++ hw/net/e1000e.c | 735 ++++ hw/net/e1000e_core.c | 3507 ++++++++++++++++ hw/net/e1000e_core.h | 158 + hw/net/e1000x_common.c | 267 ++ hw/net/e1000x_common.h | 216 + hw/net/eepro100.c | 2097 ++++++++++ hw/net/etraxfs_eth.c | 688 +++ hw/net/fsl_etsec/etsec.c | 469 +++ hw/net/fsl_etsec/etsec.h | 178 + hw/net/fsl_etsec/miim.c | 147 + hw/net/fsl_etsec/registers.c | 296 ++ hw/net/fsl_etsec/registers.h | 329 ++ hw/net/fsl_etsec/rings.c | 652 +++ hw/net/ftgmac100.c | 1344 ++++++ hw/net/i82596.c | 754 ++++ hw/net/i82596.h | 55 + hw/net/imx_fec.c | 1370 ++++++ hw/net/lan9118.c | 1417 +++++++ hw/net/lance.c | 172 + hw/net/lasi_i82596.c | 189 + hw/net/mcf_fec.c | 692 +++ hw/net/meson.build | 67 + hw/net/mipsnet.c | 297 ++ hw/net/msf2-emac.c | 588 +++ hw/net/ne2000-isa.c | 152 + hw/net/ne2000-pci.c | 136 + hw/net/ne2000.c | 700 ++++ hw/net/ne2000.h | 42 + hw/net/net_rx_pkt.c | 643 +++ hw/net/net_rx_pkt.h | 367 ++ hw/net/net_tx_pkt.c | 674 +++ hw/net/net_tx_pkt.h | 204 + hw/net/npcm7xx_emc.c | 859 ++++ hw/net/opencores_eth.c | 773 ++++ hw/net/pcnet-pci.c | 295 ++ hw/net/pcnet.c | 1752 ++++++++ hw/net/pcnet.h | 68 + hw/net/rocker/qmp-norocker.c | 51 + hw/net/rocker/rocker.c | 1535 +++++++ hw/net/rocker/rocker.h | 84 + hw/net/rocker/rocker_desc.c | 361 ++ hw/net/rocker/rocker_desc.h | 52 + hw/net/rocker/rocker_fp.c | 267 ++ hw/net/rocker/rocker_fp.h | 54 + hw/net/rocker/rocker_hw.h | 493 +++ hw/net/rocker/rocker_of_dpa.c | 2597 ++++++++++++ hw/net/rocker/rocker_of_dpa.h | 22 + hw/net/rocker/rocker_tlv.h | 244 ++ hw/net/rocker/rocker_world.c | 100 + hw/net/rocker/rocker_world.h | 61 + hw/net/rtl8139.c | 3460 +++++++++++++++ hw/net/smc91c111.c | 834 ++++ hw/net/spapr_llan.c | 885 ++++ hw/net/stellaris_enet.c | 526 +++ hw/net/sungem.c | 1454 +++++++ hw/net/sunhme.c | 983 +++++ hw/net/trace-events | 455 ++ hw/net/trace.h | 1 + hw/net/tulip.c | 1047 +++++ hw/net/tulip.h | 268 ++ hw/net/vhost_net-stub.c | 103 + hw/net/vhost_net.c | 514 +++ hw/net/virtio-net.c | 3730 +++++++++++++++++ hw/net/vmware_utils.h | 153 + hw/net/vmxnet3.c | 2561 ++++++++++++ hw/net/vmxnet3.h | 807 ++++ hw/net/vmxnet3_defs.h | 140 + hw/net/vmxnet_debug.h | 145 + hw/net/xen_nic.c | 411 ++ hw/net/xgmac.c | 442 ++ hw/net/xilinx_axienet.c | 1072 +++++ hw/net/xilinx_ethlite.c | 282 ++ hw/nios2/10m50_devboard.c | 115 + hw/nios2/Kconfig | 12 + hw/nios2/boot.c | 229 + hw/nios2/boot.h | 10 + hw/nios2/generic_nommu.c | 101 + hw/nios2/meson.build | 6 + hw/nubus/Kconfig | 2 + hw/nubus/mac-nubus-bridge.c | 63 + hw/nubus/meson.build | 7 + hw/nubus/nubus-bridge.c | 53 + hw/nubus/nubus-bus.c | 188 + hw/nubus/nubus-device.c | 120 + hw/nubus/trace-events | 7 + hw/nubus/trace.h | 1 + hw/nvme/Kconfig | 4 + hw/nvme/ctrl.c | 6731 ++++++++++++++++++++++++++++++ hw/nvme/dif.c | 509 +++ hw/nvme/meson.build | 1 + hw/nvme/ns.c | 595 +++ hw/nvme/nvme.h | 556 +++ hw/nvme/subsys.c | 96 + hw/nvme/trace-events | 200 + hw/nvme/trace.h | 1 + hw/nvram/Kconfig | 36 + hw/nvram/chrp_nvram.c | 102 + hw/nvram/ds1225y.c | 172 + hw/nvram/eeprom93xx.c | 342 ++ hw/nvram/eeprom_at24c.c | 207 + hw/nvram/fw_cfg-interface.c | 23 + hw/nvram/fw_cfg.c | 1395 +++++++ hw/nvram/mac_nvram.c | 185 + hw/nvram/meson.build | 21 + hw/nvram/npcm7xx_otp.c | 440 ++ hw/nvram/nrf51_nvm.c | 400 ++ hw/nvram/spapr_nvram.c | 290 ++ hw/nvram/trace-events | 19 + hw/nvram/trace.h | 1 + hw/nvram/xlnx-bbram.c | 545 +++ hw/nvram/xlnx-efuse-crc.c | 119 + hw/nvram/xlnx-efuse.c | 283 ++ hw/nvram/xlnx-versal-efuse-cache.c | 114 + hw/nvram/xlnx-versal-efuse-ctrl.c | 793 ++++ hw/nvram/xlnx-zynqmp-efuse.c | 861 ++++ hw/openrisc/Kconfig | 6 + hw/openrisc/cputimer.c | 145 + hw/openrisc/meson.build | 5 + hw/openrisc/openrisc_sim.c | 195 + hw/pci-bridge/Kconfig | 29 + hw/pci-bridge/dec.c | 164 + hw/pci-bridge/dec.h | 9 + hw/pci-bridge/gen_pcie_root_port.c | 178 + hw/pci-bridge/i82801b11.c | 122 + hw/pci-bridge/ioh3420.c | 130 + hw/pci-bridge/meson.build | 14 + hw/pci-bridge/pci_bridge_dev.c | 308 ++ hw/pci-bridge/pci_expander_bridge.c | 385 ++ hw/pci-bridge/pcie_pci_bridge.c | 180 + hw/pci-bridge/pcie_root_port.c | 198 + hw/pci-bridge/simba.c | 102 + hw/pci-bridge/xio3130_downstream.c | 190 + hw/pci-bridge/xio3130_upstream.c | 159 + hw/pci-host/Kconfig | 79 + hw/pci-host/bonito.c | 819 ++++ hw/pci-host/designware.c | 773 ++++ hw/pci-host/gpex-acpi.c | 269 ++ hw/pci-host/gpex.c | 240 ++ hw/pci-host/grackle.c | 176 + hw/pci-host/i440fx.c | 412 ++ hw/pci-host/meson.build | 36 + hw/pci-host/mv64361.c | 951 +++++ hw/pci-host/mv643xx.h | 918 ++++ hw/pci-host/pam.c | 70 + hw/pci-host/pnv_phb3.c | 1186 ++++++ hw/pci-host/pnv_phb3_msi.c | 348 ++ hw/pci-host/pnv_phb3_pbcq.c | 358 ++ hw/pci-host/pnv_phb4.c | 1437 +++++++ hw/pci-host/pnv_phb4_pec.c | 593 +++ hw/pci-host/ppce500.c | 547 +++ hw/pci-host/q35.c | 721 ++++ hw/pci-host/raven.c | 445 ++ hw/pci-host/remote.c | 75 + hw/pci-host/sabre.c | 526 +++ hw/pci-host/sh_pci.c | 201 + hw/pci-host/trace-events | 34 + hw/pci-host/trace.h | 1 + hw/pci-host/uninorth.c | 582 +++ hw/pci-host/versatile.c | 548 +++ hw/pci-host/xen_igd_pt.c | 119 + hw/pci-host/xilinx-pcie.c | 331 ++ hw/pci/Kconfig | 15 + hw/pci/meson.build | 19 + hw/pci/msi.c | 442 ++ hw/pci/msix.c | 671 +++ hw/pci/pci-stub.c | 93 + hw/pci/pci.c | 2896 +++++++++++++ hw/pci/pci_bridge.c | 482 +++ hw/pci/pci_host.c | 252 ++ hw/pci/pcie.c | 1079 +++++ hw/pci/pcie_aer.c | 1045 +++++ hw/pci/pcie_host.c | 136 + hw/pci/pcie_port.c | 185 + hw/pci/shpc.c | 728 ++++ hw/pci/slotid_cap.c | 50 + hw/pci/trace-events | 12 + hw/pci/trace.h | 1 + hw/pcmcia/Kconfig | 2 + hw/pcmcia/meson.build | 2 + hw/pcmcia/pcmcia.c | 24 + hw/pcmcia/pxa2xx.c | 263 ++ hw/ppc/Kconfig | 153 + hw/ppc/e500-ccsr.h | 18 + hw/ppc/e500.c | 1174 ++++++ hw/ppc/e500.h | 49 + hw/ppc/e500plat.c | 122 + hw/ppc/fdt.c | 49 + hw/ppc/fw_cfg.c | 45 + hw/ppc/mac.h | 108 + hw/ppc/mac_newworld.c | 663 +++ hw/ppc/mac_oldworld.c | 455 ++ hw/ppc/meson.build | 90 + hw/ppc/mpc8544_guts.c | 142 + hw/ppc/mpc8544ds.c | 74 + hw/ppc/pef.c | 142 + hw/ppc/pegasos2.c | 952 +++++ hw/ppc/pnv.c | 2132 ++++++++++ hw/ppc/pnv_bmc.c | 313 ++ hw/ppc/pnv_core.c | 441 ++ hw/ppc/pnv_homer.c | 382 ++ hw/ppc/pnv_lpc.c | 853 ++++ hw/ppc/pnv_occ.c | 302 ++ hw/ppc/pnv_pnor.c | 141 + hw/ppc/pnv_psi.c | 967 +++++ hw/ppc/pnv_xscom.c | 324 ++ hw/ppc/ppc.c | 1465 +++++++ hw/ppc/ppc405.h | 72 + hw/ppc/ppc405_boards.c | 564 +++ hw/ppc/ppc405_uc.c | 1547 +++++++ hw/ppc/ppc440.h | 27 + hw/ppc/ppc440_bamboo.c | 307 ++ hw/ppc/ppc440_pcix.c | 538 +++ hw/ppc/ppc440_uc.c | 1377 ++++++ hw/ppc/ppc4xx_devs.c | 715 ++++ hw/ppc/ppc4xx_pci.c | 389 ++ hw/ppc/ppc_booke.c | 369 ++ hw/ppc/ppce500_spin.c | 209 + hw/ppc/prep.c | 440 ++ hw/ppc/prep_systemio.c | 315 ++ hw/ppc/rs6000_mc.c | 238 ++ hw/ppc/sam460ex.c | 516 +++ hw/ppc/spapr.c | 5136 +++++++++++++++++++++++ hw/ppc/spapr_caps.c | 944 +++++ hw/ppc/spapr_cpu_core.c | 391 ++ hw/ppc/spapr_drc.c | 1326 ++++++ hw/ppc/spapr_events.c | 1082 +++++ hw/ppc/spapr_hcall.c | 1557 +++++++ hw/ppc/spapr_iommu.c | 718 ++++ hw/ppc/spapr_irq.c | 599 +++ hw/ppc/spapr_numa.c | 697 ++++ hw/ppc/spapr_nvdimm.c | 528 +++ hw/ppc/spapr_ovec.c | 241 ++ hw/ppc/spapr_pci.c | 2530 +++++++++++ hw/ppc/spapr_pci_nvlink2.c | 445 ++ hw/ppc/spapr_pci_vfio.c | 217 + hw/ppc/spapr_rng.c | 162 + hw/ppc/spapr_rtas.c | 636 +++ hw/ppc/spapr_rtas_ddw.c | 291 ++ hw/ppc/spapr_rtc.c | 190 + hw/ppc/spapr_softmmu.c | 612 +++ hw/ppc/spapr_tpm_proxy.c | 177 + hw/ppc/spapr_vio.c | 741 ++++ hw/ppc/spapr_vof.c | 167 + hw/ppc/trace-events | 143 + hw/ppc/trace.h | 1 + hw/ppc/virtex_ml507.c | 316 ++ hw/ppc/vof.c | 1062 +++++ hw/rdma/Kconfig | 3 + hw/rdma/meson.build | 10 + hw/rdma/rdma.c | 30 + hw/rdma/rdma_backend.c | 1401 +++++++ hw/rdma/rdma_backend.h | 129 + hw/rdma/rdma_backend_defs.h | 76 + hw/rdma/rdma_rm.c | 816 ++++ hw/rdma/rdma_rm.h | 97 + hw/rdma/rdma_rm_defs.h | 146 + hw/rdma/rdma_utils.c | 125 + hw/rdma/rdma_utils.h | 64 + hw/rdma/trace-events | 31 + hw/rdma/trace.h | 1 + hw/rdma/vmw/pvrdma.h | 144 + hw/rdma/vmw/pvrdma_cmd.c | 825 ++++ hw/rdma/vmw/pvrdma_dev_ring.c | 142 + hw/rdma/vmw/pvrdma_dev_ring.h | 46 + hw/rdma/vmw/pvrdma_main.c | 723 ++++ hw/rdma/vmw/pvrdma_qp_ops.c | 298 ++ hw/rdma/vmw/pvrdma_qp_ops.h | 28 + hw/rdma/vmw/trace-events | 17 + hw/rdma/vmw/trace.h | 1 + hw/remote/Kconfig | 4 + hw/remote/iohub.c | 118 + hw/remote/machine.c | 79 + hw/remote/memory.c | 63 + hw/remote/meson.build | 13 + hw/remote/message.c | 230 + hw/remote/mpqemu-link.c | 264 ++ hw/remote/proxy-memory-listener.c | 226 + hw/remote/proxy.c | 387 ++ hw/remote/remote-obj.c | 203 + hw/remote/trace-events | 4 + hw/remote/trace.h | 1 + hw/riscv/Kconfig | 83 + hw/riscv/boot.c | 319 ++ hw/riscv/meson.build | 13 + hw/riscv/microchip_pfsoc.c | 624 +++ hw/riscv/numa.c | 241 ++ hw/riscv/opentitan.c | 284 ++ hw/riscv/riscv_hart.c | 89 + hw/riscv/shakti_c.c | 190 + hw/riscv/sifive_e.c | 294 ++ hw/riscv/sifive_u.c | 999 +++++ hw/riscv/spike.c | 342 ++ hw/riscv/virt.c | 1038 +++++ hw/rtc/Kconfig | 27 + hw/rtc/allwinner-rtc.c | 411 ++ hw/rtc/aspeed_rtc.c | 181 + hw/rtc/ds1338.c | 242 ++ hw/rtc/exynos4210_rtc.c | 617 +++ hw/rtc/goldfish_rtc.c | 298 ++ hw/rtc/m41t80.c | 120 + hw/rtc/m48t59-internal.h | 75 + hw/rtc/m48t59-isa.c | 161 + hw/rtc/m48t59.c | 686 +++ hw/rtc/mc146818rtc.c | 1059 +++++ hw/rtc/meson.build | 16 + hw/rtc/pl031.c | 356 ++ hw/rtc/sun4v-rtc.c | 97 + hw/rtc/trace-events | 33 + hw/rtc/trace.h | 1 + hw/rtc/twl92230.c | 882 ++++ hw/rtc/xlnx-zynqmp-rtc.c | 275 ++ hw/rx/Kconfig | 10 + hw/rx/meson.build | 5 + hw/rx/rx-gdbsim.c | 203 + hw/rx/rx62n.c | 311 ++ hw/s390x/3270-ccw.c | 181 + hw/s390x/Kconfig | 12 + hw/s390x/ap-bridge.c | 90 + hw/s390x/ap-device.c | 37 + hw/s390x/ccw-device.c | 89 + hw/s390x/ccw-device.h | 52 + hw/s390x/css-bridge.c | 166 + hw/s390x/css.c | 2673 ++++++++++++ hw/s390x/event-facility.c | 539 +++ hw/s390x/ipl.c | 777 ++++ hw/s390x/ipl.h | 289 ++ hw/s390x/meson.build | 58 + hw/s390x/pv.c | 174 + hw/s390x/s390-ccw.c | 201 + hw/s390x/s390-pci-bus.c | 1423 +++++++ hw/s390x/s390-pci-inst.c | 1310 ++++++ hw/s390x/s390-pci-vfio.c | 274 ++ hw/s390x/s390-skeys-kvm.c | 81 + hw/s390x/s390-skeys.c | 499 +++ hw/s390x/s390-stattrib-kvm.c | 195 + hw/s390x/s390-stattrib.c | 410 ++ hw/s390x/s390-virtio-ccw.c | 1109 +++++ hw/s390x/s390-virtio-hcall.c | 41 + hw/s390x/s390-virtio-hcall.h | 23 + hw/s390x/sclp.c | 476 +++ hw/s390x/sclpcpu.c | 106 + hw/s390x/sclpquiesce.c | 150 + hw/s390x/tod-kvm.c | 163 + hw/s390x/tod-tcg.c | 89 + hw/s390x/tod.c | 139 + hw/s390x/trace-events | 21 + hw/s390x/trace.h | 1 + hw/s390x/vhost-user-fs-ccw.c | 75 + hw/s390x/vhost-vsock-ccw.c | 73 + hw/s390x/virtio-ccw-9p.c | 66 + hw/s390x/virtio-ccw-balloon.c | 71 + hw/s390x/virtio-ccw-blk.c | 68 + hw/s390x/virtio-ccw-crypto.c | 69 + hw/s390x/virtio-ccw-gpu.c | 73 + hw/s390x/virtio-ccw-input.c | 119 + hw/s390x/virtio-ccw-net.c | 71 + hw/s390x/virtio-ccw-rng.c | 68 + hw/s390x/virtio-ccw-scsi.c | 125 + hw/s390x/virtio-ccw-serial.c | 79 + hw/s390x/virtio-ccw.c | 1302 ++++++ hw/s390x/virtio-ccw.h | 242 ++ hw/scsi/Kconfig | 55 + hw/scsi/emulation.c | 42 + hw/scsi/esp-pci.c | 564 +++ hw/scsi/esp.c | 1440 +++++++ hw/scsi/lsi53c895a.c | 2375 +++++++++++ hw/scsi/megasas.c | 2554 ++++++++++++ hw/scsi/meson.build | 26 + hw/scsi/mfi.h | 1272 ++++++ hw/scsi/mpi.h | 1153 +++++ hw/scsi/mptconfig.c | 904 ++++ hw/scsi/mptendian.c | 205 + hw/scsi/mptsas.c | 1447 +++++++ hw/scsi/mptsas.h | 105 + hw/scsi/scsi-bus.c | 1855 ++++++++ hw/scsi/scsi-disk.c | 3160 ++++++++++++++ hw/scsi/scsi-generic.c | 822 ++++ hw/scsi/spapr_vscsi.c | 1300 ++++++ hw/scsi/srp.h | 247 ++ hw/scsi/trace-events | 354 ++ hw/scsi/trace.h | 1 + hw/scsi/vhost-scsi-common.c | 171 + hw/scsi/vhost-scsi.c | 331 ++ hw/scsi/vhost-user-scsi.c | 239 ++ hw/scsi/viosrp.h | 217 + hw/scsi/virtio-scsi-dataplane.c | 273 ++ hw/scsi/virtio-scsi.c | 1138 +++++ hw/scsi/vmw_pvscsi.c | 1357 ++++++ hw/scsi/vmw_pvscsi.h | 434 ++ hw/sd/Kconfig | 25 + hw/sd/allwinner-sdhost.c | 873 ++++ hw/sd/aspeed_sdhci.c | 213 + hw/sd/bcm2835_sdhost.c | 459 ++ hw/sd/cadence_sdhci.c | 191 + hw/sd/core.c | 274 ++ hw/sd/meson.build | 13 + hw/sd/npcm7xx_sdhci.c | 182 + hw/sd/omap_mmc.c | 665 +++ hw/sd/pl181.c | 551 +++ hw/sd/pxa2xx_mmci.c | 604 +++ hw/sd/sd.c | 2227 ++++++++++ hw/sd/sdhci-internal.h | 346 ++ hw/sd/sdhci-pci.c | 87 + hw/sd/sdhci.c | 1869 +++++++++ hw/sd/sdmmc-internal.c | 72 + hw/sd/sdmmc-internal.h | 40 + hw/sd/ssi-sd.c | 445 ++ hw/sd/trace-events | 74 + hw/sd/trace.h | 1 + hw/sensor/Kconfig | 23 + hw/sensor/adm1272.c | 543 +++ hw/sensor/dps310.c | 225 + hw/sensor/emc141x.c | 326 ++ hw/sensor/max34451.c | 775 ++++ hw/sensor/meson.build | 6 + hw/sensor/tmp105.c | 328 ++ hw/sensor/tmp421.c | 391 ++ hw/sh4/Kconfig | 24 + hw/sh4/meson.build | 9 + hw/sh4/r2d.c | 380 ++ hw/sh4/sh7750.c | 904 ++++ hw/sh4/sh7750_regnames.c | 99 + hw/sh4/sh7750_regnames.h | 6 + hw/sh4/sh7750_regs.h | 1297 ++++++ hw/sh4/shix.c | 85 + hw/sh4/trace-events | 3 + hw/sh4/trace.h | 1 + hw/smbios/Kconfig | 2 + hw/smbios/meson.build | 13 + hw/smbios/smbios-stub.c | 31 + hw/smbios/smbios.c | 1358 ++++++ hw/smbios/smbios_build.h | 103 + hw/smbios/smbios_type_38-stub.c | 15 + hw/smbios/smbios_type_38.c | 119 + hw/sparc/Kconfig | 29 + hw/sparc/leon3.c | 390 ++ hw/sparc/meson.build | 6 + hw/sparc/sun4m.c | 1494 +++++++ hw/sparc/sun4m_iommu.c | 412 ++ hw/sparc/trace-events | 21 + hw/sparc/trace.h | 1 + hw/sparc64/Kconfig | 21 + hw/sparc64/meson.build | 6 + hw/sparc64/niagara.c | 182 + hw/sparc64/sparc64.c | 297 ++ hw/sparc64/sun4u.c | 863 ++++ hw/sparc64/sun4u_iommu.c | 342 ++ hw/sparc64/trace-events | 23 + hw/sparc64/trace.h | 1 + hw/ssi/Kconfig | 22 + hw/ssi/aspeed_smc.c | 1710 ++++++++ hw/ssi/imx_spi.c | 500 +++ hw/ssi/meson.build | 11 + hw/ssi/mss-spi.c | 422 ++ hw/ssi/npcm7xx_fiu.c | 572 +++ hw/ssi/omap_spi.c | 381 ++ hw/ssi/pl022.c | 316 ++ hw/ssi/sifive_spi.c | 357 ++ hw/ssi/ssi.c | 146 + hw/ssi/stm32f2xx_spi.c | 226 + hw/ssi/trace-events | 22 + hw/ssi/trace.h | 1 + hw/ssi/xilinx_spi.c | 390 ++ hw/ssi/xilinx_spips.c | 1501 +++++++ hw/timer/Kconfig | 62 + hw/timer/a9gtimer.c | 377 ++ hw/timer/allwinner-a10-pit.c | 316 ++ hw/timer/altera_timer.c | 244 ++ hw/timer/arm_mptimer.c | 331 ++ hw/timer/arm_timer.c | 419 ++ hw/timer/armv7m_systick.c | 310 ++ hw/timer/aspeed_timer.c | 756 ++++ hw/timer/avr_timer16.c | 621 +++ hw/timer/bcm2835_systmr.c | 178 + hw/timer/cadence_ttc.c | 503 +++ hw/timer/cmsdk-apb-dualtimer.c | 559 +++ hw/timer/cmsdk-apb-timer.c | 286 ++ hw/timer/digic-timer.c | 186 + hw/timer/etraxfs_timer.c | 376 ++ hw/timer/exynos4210_mct.c | 1568 +++++++ hw/timer/exynos4210_pwm.c | 445 ++ hw/timer/grlib_gptimer.c | 432 ++ hw/timer/hpet.c | 807 ++++ hw/timer/i8254.c | 385 ++ hw/timer/i8254_common.c | 271 ++ hw/timer/ibex_timer.c | 312 ++ hw/timer/imx_epit.c | 377 ++ hw/timer/imx_gpt.c | 583 +++ hw/timer/meson.build | 40 + hw/timer/mips_gictimer.c | 145 + hw/timer/mss-timer.c | 311 ++ hw/timer/npcm7xx_timer.c | 714 ++++ hw/timer/nrf51_timer.c | 404 ++ hw/timer/omap_gptimer.c | 514 +++ hw/timer/omap_synctimer.c | 110 + hw/timer/pxa2xx_timer.c | 621 +++ hw/timer/renesas_cmt.c | 283 ++ hw/timer/renesas_tmr.c | 493 +++ hw/timer/sh_timer.c | 373 ++ hw/timer/sifive_pwm.c | 468 +++ hw/timer/slavio_timer.c | 450 ++ hw/timer/sse-counter.c | 473 +++ hw/timer/sse-timer.c | 471 +++ hw/timer/stellaris-gptm.c | 332 ++ hw/timer/stm32f2xx_timer.c | 347 ++ hw/timer/trace-events | 101 + hw/timer/trace.h | 1 + hw/timer/xilinx_timer.c | 273 ++ hw/tpm/Kconfig | 25 + hw/tpm/meson.build | 8 + hw/tpm/tpm_crb.c | 345 ++ hw/tpm/tpm_ppi.c | 57 + hw/tpm/tpm_ppi.h | 45 + hw/tpm/tpm_prop.h | 33 + hw/tpm/tpm_spapr.c | 430 ++ hw/tpm/tpm_tis.h | 90 + hw/tpm/tpm_tis_common.c | 867 ++++ hw/tpm/tpm_tis_isa.c | 173 + hw/tpm/tpm_tis_sysbus.c | 162 + hw/tpm/trace-events | 38 + hw/tpm/trace.h | 1 + hw/tricore/Kconfig | 9 + hw/tricore/meson.build | 7 + hw/tricore/tc27x_soc.c | 242 ++ hw/tricore/triboard.c | 95 + hw/tricore/tricore_testboard.c | 118 + hw/tricore/tricore_testdevice.c | 82 + hw/usb/Kconfig | 135 + hw/usb/bus.c | 776 ++++ hw/usb/ccid-card-emulated.c | 622 +++ hw/usb/ccid-card-passthru.c | 424 ++ hw/usb/ccid.h | 62 + hw/usb/chipidea.c | 176 + hw/usb/combined-packet.c | 190 + hw/usb/core.c | 821 ++++ hw/usb/desc-msos.c | 239 ++ hw/usb/desc.c | 812 ++++ hw/usb/desc.h | 244 ++ hw/usb/dev-audio.c | 1029 +++++ hw/usb/dev-hid.c | 879 ++++ hw/usb/dev-hub.c | 704 ++++ hw/usb/dev-mtp.c | 2121 ++++++++++ hw/usb/dev-network.c | 1429 +++++++ hw/usb/dev-serial.c | 709 ++++ hw/usb/dev-smartcard-reader.c | 1496 +++++++ hw/usb/dev-storage-bot.c | 63 + hw/usb/dev-storage-classic.c | 157 + hw/usb/dev-storage.c | 589 +++ hw/usb/dev-uas.c | 991 +++++ hw/usb/dev-wacom.c | 383 ++ hw/usb/hcd-dwc2.c | 1478 +++++++ hw/usb/hcd-dwc2.h | 186 + hw/usb/hcd-dwc3.c | 688 +++ hw/usb/hcd-ehci-pci.c | 235 ++ hw/usb/hcd-ehci-sysbus.c | 305 ++ hw/usb/hcd-ehci.c | 2598 ++++++++++++ hw/usb/hcd-ehci.h | 383 ++ hw/usb/hcd-musb.c | 1553 +++++++ hw/usb/hcd-ohci-pci.c | 164 + hw/usb/hcd-ohci.c | 2028 +++++++++ hw/usb/hcd-ohci.h | 121 + hw/usb/hcd-uhci.c | 1370 ++++++ hw/usb/hcd-uhci.h | 94 + hw/usb/hcd-xhci-nec.c | 85 + hw/usb/hcd-xhci-pci.c | 262 ++ hw/usb/hcd-xhci-pci.h | 44 + hw/usb/hcd-xhci-sysbus.c | 123 + hw/usb/hcd-xhci-sysbus.h | 31 + hw/usb/hcd-xhci.c | 3613 ++++++++++++++++ hw/usb/hcd-xhci.h | 228 + hw/usb/host-libusb.c | 1978 +++++++++ hw/usb/host.h | 44 + hw/usb/imx-usb-phy.c | 224 + hw/usb/libhw.c | 69 + hw/usb/meson.build | 84 + hw/usb/pcap.c | 253 ++ hw/usb/quirks-ftdi-ids.h | 1249 ++++++ hw/usb/quirks-pl2303-ids.h | 150 + hw/usb/quirks.c | 54 + hw/usb/quirks.h | 918 ++++ hw/usb/redirect.c | 2618 ++++++++++++ hw/usb/trace-events | 347 ++ hw/usb/trace.h | 1 + hw/usb/tusb6010.c | 850 ++++ hw/usb/u2f-emulated.c | 405 ++ hw/usb/u2f-passthru.c | 552 +++ hw/usb/u2f.c | 351 ++ hw/usb/u2f.h | 92 + hw/usb/vt82c686-uhci-pci.c | 58 + hw/usb/xen-usb.c | 1097 +++++ hw/usb/xlnx-usb-subsystem.c | 92 + hw/usb/xlnx-versal-usb2-ctrl-regs.c | 228 + hw/vfio/Kconfig | 43 + hw/vfio/amd-xgbe.c | 61 + hw/vfio/ap.c | 186 + hw/vfio/calxeda-xgmac.c | 61 + hw/vfio/ccw.c | 791 ++++ hw/vfio/common.c | 2596 ++++++++++++ hw/vfio/display.c | 545 +++ hw/vfio/igd.c | 616 +++ hw/vfio/meson.build | 19 + hw/vfio/migration.c | 911 ++++ hw/vfio/pci-quirks.c | 1769 ++++++++ hw/vfio/pci.c | 3328 +++++++++++++++ hw/vfio/pci.h | 227 + hw/vfio/platform.c | 720 ++++ hw/vfio/spapr.c | 255 ++ hw/vfio/trace-events | 167 + hw/vfio/trace.h | 1 + hw/virtio/Kconfig | 70 + hw/virtio/meson.build | 55 + hw/virtio/trace-events | 130 + hw/virtio/trace.h | 1 + hw/virtio/vhost-backend.c | 407 ++ hw/virtio/vhost-scsi-pci.c | 104 + hw/virtio/vhost-stub.c | 17 + hw/virtio/vhost-user-blk-pci.c | 109 + hw/virtio/vhost-user-fs-pci.c | 88 + hw/virtio/vhost-user-fs.c | 332 ++ hw/virtio/vhost-user-i2c-pci.c | 69 + hw/virtio/vhost-user-i2c.c | 288 ++ hw/virtio/vhost-user-input-pci.c | 50 + hw/virtio/vhost-user-rng-pci.c | 79 + hw/virtio/vhost-user-rng.c | 289 ++ hw/virtio/vhost-user-scsi-pci.c | 110 + hw/virtio/vhost-user-vsock-pci.c | 86 + hw/virtio/vhost-user-vsock.c | 184 + hw/virtio/vhost-user.c | 2529 +++++++++++ hw/virtio/vhost-vdpa.c | 798 ++++ hw/virtio/vhost-vsock-common.c | 288 ++ hw/virtio/vhost-vsock-pci.c | 96 + hw/virtio/vhost-vsock.c | 241 ++ hw/virtio/vhost.c | 1864 +++++++++ hw/virtio/virtio-9p-pci.c | 91 + hw/virtio/virtio-balloon-pci.c | 88 + hw/virtio/virtio-balloon.c | 1086 +++++ hw/virtio/virtio-blk-pci.c | 107 + hw/virtio/virtio-bus.c | 363 ++ hw/virtio/virtio-crypto-pci.c | 94 + hw/virtio/virtio-crypto.c | 1006 +++++ hw/virtio/virtio-input-host-pci.c | 47 + hw/virtio/virtio-input-pci.c | 155 + hw/virtio/virtio-iommu-pci.c | 114 + hw/virtio/virtio-iommu.c | 1225 ++++++ hw/virtio/virtio-mem-pci.c | 163 + hw/virtio/virtio-mem-pci.h | 35 + hw/virtio/virtio-mem.c | 1283 ++++++ hw/virtio/virtio-mmio.c | 865 ++++ hw/virtio/virtio-net-pci.c | 108 + hw/virtio/virtio-pci.c | 2237 ++++++++++ hw/virtio/virtio-pci.h | 255 ++ hw/virtio/virtio-pmem-pci.c | 129 + hw/virtio/virtio-pmem-pci.h | 35 + hw/virtio/virtio-pmem.c | 198 + hw/virtio/virtio-rng-pci.c | 82 + hw/virtio/virtio-rng.c | 289 ++ hw/virtio/virtio-scsi-pci.c | 114 + hw/virtio/virtio-serial-pci.c | 117 + hw/virtio/virtio.c | 3876 +++++++++++++++++ hw/watchdog/Kconfig | 22 + hw/watchdog/cmsdk-apb-watchdog.c | 414 ++ hw/watchdog/meson.build | 8 + hw/watchdog/sbsa_gwdt.c | 294 ++ hw/watchdog/trace-events | 11 + hw/watchdog/trace.h | 1 + hw/watchdog/watchdog.c | 135 + hw/watchdog/wdt_aspeed.c | 397 ++ hw/watchdog/wdt_diag288.c | 145 + hw/watchdog/wdt_i6300esb.c | 500 +++ hw/watchdog/wdt_ib700.c | 160 + hw/watchdog/wdt_imx2.c | 304 ++ hw/xen/meson.build | 20 + hw/xen/trace-events | 43 + hw/xen/trace.h | 1 + hw/xen/xen-backend.c | 175 + hw/xen/xen-bus-helper.c | 182 + hw/xen/xen-bus.c | 1405 +++++++ hw/xen/xen-host-pci-device.c | 402 ++ hw/xen/xen-host-pci-device.h | 58 + hw/xen/xen-legacy-backend.c | 843 ++++ hw/xen/xen_devconfig.c | 129 + hw/xen/xen_pt.c | 1005 +++++ hw/xen/xen_pt.h | 336 ++ hw/xen/xen_pt_config_init.c | 2110 ++++++++++ hw/xen/xen_pt_graphics.c | 291 ++ hw/xen/xen_pt_load_rom.c | 92 + hw/xen/xen_pt_msi.c | 650 +++ hw/xen/xen_pt_stub.c | 22 + hw/xen/xen_pvdev.c | 319 ++ hw/xenpv/meson.build | 3 + hw/xenpv/xen_machine_pv.c | 97 + hw/xtensa/Kconfig | 14 + hw/xtensa/bootparam.h | 49 + hw/xtensa/meson.build | 11 + hw/xtensa/mx_pic.c | 354 ++ hw/xtensa/pic_cpu.c | 130 + hw/xtensa/sim.c | 134 + hw/xtensa/virt.c | 132 + hw/xtensa/xtensa_memory.c | 51 + hw/xtensa/xtensa_memory.h | 37 + hw/xtensa/xtensa_sim.h | 34 + hw/xtensa/xtfpga.c | 746 ++++ 1562 files changed, 682918 insertions(+) create mode 100644 hw/9pfs/9p-local.c create mode 100644 hw/9pfs/9p-local.h create mode 100644 hw/9pfs/9p-posix-acl.c create mode 100644 hw/9pfs/9p-proxy.c create mode 100644 hw/9pfs/9p-proxy.h create mode 100644 hw/9pfs/9p-synth.c create mode 100644 hw/9pfs/9p-synth.h create mode 100644 hw/9pfs/9p-util.c create mode 100644 hw/9pfs/9p-util.h create mode 100644 hw/9pfs/9p-xattr-user.c create mode 100644 hw/9pfs/9p-xattr.c create mode 100644 hw/9pfs/9p-xattr.h create mode 100644 hw/9pfs/9p.c create mode 100644 hw/9pfs/9p.h create mode 100644 hw/9pfs/Kconfig create mode 100644 hw/9pfs/codir.c create mode 100644 hw/9pfs/cofile.c create mode 100644 hw/9pfs/cofs.c create mode 100644 hw/9pfs/coth.c create mode 100644 hw/9pfs/coth.h create mode 100644 hw/9pfs/coxattr.c create mode 100644 hw/9pfs/meson.build create mode 100644 hw/9pfs/trace-events create mode 100644 hw/9pfs/trace.h create mode 100644 hw/9pfs/virtio-9p-device.c create mode 100644 hw/9pfs/virtio-9p.h create mode 100644 hw/9pfs/xen-9p-backend.c create mode 100644 hw/9pfs/xen-9pfs.h create mode 100644 hw/Kconfig create mode 100644 hw/acpi/Kconfig create mode 100644 hw/acpi/acpi-cpu-hotplug-stub.c create mode 100644 hw/acpi/acpi-mem-hotplug-stub.c create mode 100644 hw/acpi/acpi-nvdimm-stub.c create mode 100644 hw/acpi/acpi-pci-hotplug-stub.c create mode 100644 hw/acpi/acpi-stub.c create mode 100644 hw/acpi/acpi-x86-stub.c create mode 100644 hw/acpi/acpi_interface.c create mode 100644 hw/acpi/aml-build-stub.c create mode 100644 hw/acpi/aml-build.c create mode 100644 hw/acpi/bios-linker-loader.c create mode 100644 hw/acpi/core.c create mode 100644 hw/acpi/cpu.c create mode 100644 hw/acpi/cpu_hotplug.c create mode 100644 hw/acpi/generic_event_device.c create mode 100644 hw/acpi/ghes-stub.c create mode 100644 hw/acpi/ghes.c create mode 100644 hw/acpi/hmat.c create mode 100644 hw/acpi/hmat.h create mode 100644 hw/acpi/ich9.c create mode 100644 hw/acpi/ipmi-stub.c create mode 100644 hw/acpi/ipmi.c create mode 100644 hw/acpi/memory_hotplug.c create mode 100644 hw/acpi/meson.build create mode 100644 hw/acpi/nvdimm.c create mode 100644 hw/acpi/pci.c create mode 100644 hw/acpi/pcihp.c create mode 100644 hw/acpi/piix4.c create mode 100644 hw/acpi/tco.c create mode 100644 hw/acpi/tpm.c create mode 100644 hw/acpi/trace-events create mode 100644 hw/acpi/trace.h create mode 100644 hw/acpi/utils.c create mode 100644 hw/acpi/viot.c create mode 100644 hw/acpi/viot.h create mode 100644 hw/acpi/vmgenid.c create mode 100644 hw/adc/Kconfig create mode 100644 hw/adc/aspeed_adc.c create mode 100644 hw/adc/max111x.c create mode 100644 hw/adc/meson.build create mode 100644 hw/adc/npcm7xx_adc.c create mode 100644 hw/adc/stm32f2xx_adc.c create mode 100644 hw/adc/trace-events create mode 100644 hw/adc/trace.h create mode 100644 hw/adc/zynq-xadc.c create mode 100644 hw/alpha/Kconfig create mode 100644 hw/alpha/alpha_sys.h create mode 100644 hw/alpha/dp264.c create mode 100644 hw/alpha/meson.build create mode 100644 hw/alpha/pci.c create mode 100644 hw/alpha/trace-events create mode 100644 hw/alpha/trace.h create mode 100644 hw/alpha/typhoon.c create mode 100644 hw/arm/Kconfig create mode 100644 hw/arm/allwinner-a10.c create mode 100644 hw/arm/allwinner-h3.c create mode 100644 hw/arm/armsse.c create mode 100644 hw/arm/armv7m.c create mode 100644 hw/arm/aspeed.c create mode 100644 hw/arm/aspeed_ast2600.c create mode 100644 hw/arm/aspeed_soc.c create mode 100644 hw/arm/bcm2835_peripherals.c create mode 100644 hw/arm/bcm2836.c create mode 100644 hw/arm/boot.c create mode 100644 hw/arm/collie.c create mode 100644 hw/arm/cubieboard.c create mode 100644 hw/arm/digic.c create mode 100644 hw/arm/digic_boards.c create mode 100644 hw/arm/exynos4210.c create mode 100644 hw/arm/exynos4_boards.c create mode 100644 hw/arm/fsl-imx25.c create mode 100644 hw/arm/fsl-imx31.c create mode 100644 hw/arm/fsl-imx6.c create mode 100644 hw/arm/fsl-imx6ul.c create mode 100644 hw/arm/fsl-imx7.c create mode 100644 hw/arm/gumstix.c create mode 100644 hw/arm/highbank.c create mode 100644 hw/arm/imx25_pdk.c create mode 100644 hw/arm/integratorcp.c create mode 100644 hw/arm/kzm.c create mode 100644 hw/arm/mainstone.c create mode 100644 hw/arm/mcimx6ul-evk.c create mode 100644 hw/arm/mcimx7d-sabre.c create mode 100644 hw/arm/meson.build create mode 100644 hw/arm/microbit.c create mode 100644 hw/arm/mps2-tz.c create mode 100644 hw/arm/mps2.c create mode 100644 hw/arm/msf2-soc.c create mode 100644 hw/arm/msf2-som.c create mode 100644 hw/arm/musca.c create mode 100644 hw/arm/musicpal.c create mode 100644 hw/arm/netduino2.c create mode 100644 hw/arm/netduinoplus2.c create mode 100644 hw/arm/npcm7xx.c create mode 100644 hw/arm/npcm7xx_boards.c create mode 100644 hw/arm/nrf51_soc.c create mode 100644 hw/arm/nseries.c create mode 100644 hw/arm/omap1.c create mode 100644 hw/arm/omap2.c create mode 100644 hw/arm/omap_sx1.c create mode 100644 hw/arm/orangepi.c create mode 100644 hw/arm/palm.c create mode 100644 hw/arm/pxa2xx.c create mode 100644 hw/arm/pxa2xx_gpio.c create mode 100644 hw/arm/pxa2xx_pic.c create mode 100644 hw/arm/raspi.c create mode 100644 hw/arm/realview.c create mode 100644 hw/arm/sabrelite.c create mode 100644 hw/arm/sbsa-ref.c create mode 100644 hw/arm/smmu-common.c create mode 100644 hw/arm/smmu-internal.h create mode 100644 hw/arm/smmuv3-internal.h create mode 100644 hw/arm/smmuv3.c create mode 100644 hw/arm/spitz.c create mode 100644 hw/arm/stellaris.c create mode 100644 hw/arm/stm32f100_soc.c create mode 100644 hw/arm/stm32f205_soc.c create mode 100644 hw/arm/stm32f405_soc.c create mode 100644 hw/arm/stm32vldiscovery.c create mode 100644 hw/arm/strongarm.c create mode 100644 hw/arm/strongarm.h create mode 100644 hw/arm/sysbus-fdt.c create mode 100644 hw/arm/tosa.c create mode 100644 hw/arm/trace-events create mode 100644 hw/arm/trace.h create mode 100644 hw/arm/versatilepb.c create mode 100644 hw/arm/vexpress.c create mode 100644 hw/arm/virt-acpi-build.c create mode 100644 hw/arm/virt.c create mode 100644 hw/arm/xilinx_zynq.c create mode 100644 hw/arm/xlnx-versal-virt.c create mode 100644 hw/arm/xlnx-versal.c create mode 100644 hw/arm/xlnx-zcu102.c create mode 100644 hw/arm/xlnx-zynqmp.c create mode 100644 hw/arm/z2.c create mode 100644 hw/audio/Kconfig create mode 100644 hw/audio/ac97.c create mode 100644 hw/audio/adlib.c create mode 100644 hw/audio/cs4231.c create mode 100644 hw/audio/cs4231a.c create mode 100644 hw/audio/es1370.c create mode 100644 hw/audio/fmopl.c create mode 100644 hw/audio/fmopl.h create mode 100644 hw/audio/gus.c create mode 100644 hw/audio/gusemu.h create mode 100644 hw/audio/gusemu_hal.c create mode 100644 hw/audio/gusemu_mixer.c create mode 100644 hw/audio/gustate.h create mode 100644 hw/audio/hda-codec-common.h create mode 100644 hw/audio/hda-codec.c create mode 100644 hw/audio/intel-hda-defs.h create mode 100644 hw/audio/intel-hda.c create mode 100644 hw/audio/intel-hda.h create mode 100644 hw/audio/lm4549.c create mode 100644 hw/audio/lm4549.h create mode 100644 hw/audio/marvell_88w8618.c create mode 100644 hw/audio/meson.build create mode 100644 hw/audio/pcspk.c create mode 100644 hw/audio/pl041.c create mode 100644 hw/audio/pl041.h create mode 100644 hw/audio/pl041.hx create mode 100644 hw/audio/sb16.c create mode 100644 hw/audio/soundhw.c create mode 100644 hw/audio/trace-events create mode 100644 hw/audio/trace.h create mode 100644 hw/audio/via-ac97.c create mode 100644 hw/audio/wm8750.c create mode 100644 hw/avr/Kconfig create mode 100644 hw/avr/arduino.c create mode 100644 hw/avr/atmega.c create mode 100644 hw/avr/atmega.h create mode 100644 hw/avr/boot.c create mode 100644 hw/avr/boot.h create mode 100644 hw/avr/meson.build create mode 100644 hw/block/Kconfig create mode 100644 hw/block/block.c create mode 100644 hw/block/cdrom.c create mode 100644 hw/block/dataplane/meson.build create mode 100644 hw/block/dataplane/trace-events create mode 100644 hw/block/dataplane/trace.h create mode 100644 hw/block/dataplane/virtio-blk.c create mode 100644 hw/block/dataplane/virtio-blk.h create mode 100644 hw/block/dataplane/xen-block.c create mode 100644 hw/block/dataplane/xen-block.h create mode 100644 hw/block/ecc.c create mode 100644 hw/block/fdc-internal.h create mode 100644 hw/block/fdc-isa.c create mode 100644 hw/block/fdc-sysbus.c create mode 100644 hw/block/fdc.c create mode 100644 hw/block/hd-geometry.c create mode 100644 hw/block/m25p80.c create mode 100644 hw/block/meson.build create mode 100644 hw/block/nand.c create mode 100644 hw/block/onenand.c create mode 100644 hw/block/pflash_cfi01.c create mode 100644 hw/block/pflash_cfi02.c create mode 100644 hw/block/swim.c create mode 100644 hw/block/tc58128.c create mode 100644 hw/block/trace-events create mode 100644 hw/block/trace.h create mode 100644 hw/block/vhost-user-blk.c create mode 100644 hw/block/virtio-blk.c create mode 100644 hw/block/xen-block.c create mode 100644 hw/block/xen_blkif.h create mode 100644 hw/char/Kconfig create mode 100644 hw/char/avr_usart.c create mode 100644 hw/char/bcm2835_aux.c create mode 100644 hw/char/cadence_uart.c create mode 100644 hw/char/cmsdk-apb-uart.c create mode 100644 hw/char/debugcon.c create mode 100644 hw/char/digic-uart.c create mode 100644 hw/char/escc.c create mode 100644 hw/char/etraxfs_ser.c create mode 100644 hw/char/exynos4210_uart.c create mode 100644 hw/char/goldfish_tty.c create mode 100644 hw/char/grlib_apbuart.c create mode 100644 hw/char/ibex_uart.c create mode 100644 hw/char/imx_serial.c create mode 100644 hw/char/ipoctal232.c create mode 100644 hw/char/mcf_uart.c create mode 100644 hw/char/mchp_pfsoc_mmuart.c create mode 100644 hw/char/meson.build create mode 100644 hw/char/nrf51_uart.c create mode 100644 hw/char/omap_uart.c create mode 100644 hw/char/parallel-isa.c create mode 100644 hw/char/parallel.c create mode 100644 hw/char/pl011.c create mode 100644 hw/char/renesas_sci.c create mode 100644 hw/char/riscv_htif.c create mode 100644 hw/char/sclpconsole-lm.c create mode 100644 hw/char/sclpconsole.c create mode 100644 hw/char/serial-isa.c create mode 100644 hw/char/serial-pci-multi.c create mode 100644 hw/char/serial-pci.c create mode 100644 hw/char/serial.c create mode 100644 hw/char/sh_serial.c create mode 100644 hw/char/shakti_uart.c create mode 100644 hw/char/sifive_uart.c create mode 100644 hw/char/spapr_vty.c create mode 100644 hw/char/stm32f2xx_usart.c create mode 100644 hw/char/terminal3270.c create mode 100644 hw/char/trace-events create mode 100644 hw/char/trace.h create mode 100644 hw/char/virtio-console.c create mode 100644 hw/char/virtio-serial-bus.c create mode 100644 hw/char/xen_console.c create mode 100644 hw/char/xilinx_uartlite.c create mode 100644 hw/core/Kconfig create mode 100644 hw/core/bus.c create mode 100644 hw/core/clock-vmstate.c create mode 100644 hw/core/clock.c create mode 100644 hw/core/cpu-common.c create mode 100644 hw/core/cpu-sysemu.c create mode 100644 hw/core/fw-path-provider.c create mode 100644 hw/core/generic-loader.c create mode 100644 hw/core/gpio.c create mode 100644 hw/core/guest-loader.c create mode 100644 hw/core/guest-loader.h create mode 100644 hw/core/hotplug-stubs.c create mode 100644 hw/core/hotplug.c create mode 100644 hw/core/irq.c create mode 100644 hw/core/loader-fit.c create mode 100644 hw/core/loader.c create mode 100644 hw/core/machine-hmp-cmds.c create mode 100644 hw/core/machine-qmp-cmds.c create mode 100644 hw/core/machine-smp.c create mode 100644 hw/core/machine.c create mode 100644 hw/core/meson.build create mode 100644 hw/core/nmi.c create mode 100644 hw/core/null-machine.c create mode 100644 hw/core/numa.c create mode 100644 hw/core/or-irq.c create mode 100644 hw/core/platform-bus.c create mode 100644 hw/core/ptimer.c create mode 100644 hw/core/qdev-clock.c create mode 100644 hw/core/qdev-fw.c create mode 100644 hw/core/qdev-hotplug.c create mode 100644 hw/core/qdev-prop-internal.h create mode 100644 hw/core/qdev-properties-system.c create mode 100644 hw/core/qdev-properties.c create mode 100644 hw/core/qdev.c create mode 100644 hw/core/register.c create mode 100644 hw/core/reset.c create mode 100644 hw/core/resettable.c create mode 100644 hw/core/split-irq.c create mode 100644 hw/core/stream.c create mode 100644 hw/core/sysbus.c create mode 100644 hw/core/trace-events create mode 100644 hw/core/trace.h create mode 100644 hw/core/uboot_image.h create mode 100644 hw/core/vm-change-state-handler.c create mode 100644 hw/core/vmstate-if.c create mode 100644 hw/cpu/Kconfig create mode 100644 hw/cpu/a15mpcore.c create mode 100644 hw/cpu/a9mpcore.c create mode 100644 hw/cpu/arm11mpcore.c create mode 100644 hw/cpu/cluster.c create mode 100644 hw/cpu/core.c create mode 100644 hw/cpu/meson.build create mode 100644 hw/cpu/realview_mpcore.c create mode 100644 hw/cris/Kconfig create mode 100644 hw/cris/axis_dev88.c create mode 100644 hw/cris/boot.c create mode 100644 hw/cris/boot.h create mode 100644 hw/cris/meson.build create mode 100644 hw/display/Kconfig create mode 100644 hw/display/artist.c create mode 100644 hw/display/ati.c create mode 100644 hw/display/ati_2d.c create mode 100644 hw/display/ati_dbg.c create mode 100644 hw/display/ati_int.h create mode 100644 hw/display/ati_regs.h create mode 100644 hw/display/bcm2835_fb.c create mode 100644 hw/display/blizzard.c create mode 100644 hw/display/bochs-display.c create mode 100644 hw/display/cg3.c create mode 100644 hw/display/cirrus_vga.c create mode 100644 hw/display/cirrus_vga_internal.h create mode 100644 hw/display/cirrus_vga_isa.c create mode 100644 hw/display/cirrus_vga_rop.h create mode 100644 hw/display/cirrus_vga_rop2.h create mode 100644 hw/display/dpcd.c create mode 100644 hw/display/edid-generate.c create mode 100644 hw/display/edid-region.c create mode 100644 hw/display/exynos4210_fimd.c create mode 100644 hw/display/framebuffer.c create mode 100644 hw/display/framebuffer.h create mode 100644 hw/display/g364fb.c create mode 100644 hw/display/i2c-ddc.c create mode 100644 hw/display/jazz_led.c create mode 100644 hw/display/macfb.c create mode 100644 hw/display/meson.build create mode 100644 hw/display/next-fb.c create mode 100644 hw/display/omap_dss.c create mode 100644 hw/display/omap_lcdc.c create mode 100644 hw/display/pl110.c create mode 100644 hw/display/pl110_template.h create mode 100644 hw/display/pxa2xx_lcd.c create mode 100644 hw/display/qxl-logger.c create mode 100644 hw/display/qxl-render.c create mode 100644 hw/display/qxl.c create mode 100644 hw/display/qxl.h create mode 100644 hw/display/ramfb-standalone.c create mode 100644 hw/display/ramfb.c create mode 100644 hw/display/sii9022.c create mode 100644 hw/display/sm501.c create mode 100644 hw/display/ssd0303.c create mode 100644 hw/display/ssd0323.c create mode 100644 hw/display/tc6393xb.c create mode 100644 hw/display/tcx.c create mode 100644 hw/display/trace-events create mode 100644 hw/display/trace.h create mode 100644 hw/display/vga-access.h create mode 100644 hw/display/vga-helpers.h create mode 100644 hw/display/vga-isa-mm.c create mode 100644 hw/display/vga-isa.c create mode 100644 hw/display/vga-pci.c create mode 100644 hw/display/vga.c create mode 100644 hw/display/vga_int.h create mode 100644 hw/display/vga_regs.h create mode 100644 hw/display/vhost-user-gpu-pci.c create mode 100644 hw/display/vhost-user-gpu.c create mode 100644 hw/display/vhost-user-vga.c create mode 100644 hw/display/virtio-gpu-base.c create mode 100644 hw/display/virtio-gpu-gl.c create mode 100644 hw/display/virtio-gpu-pci-gl.c create mode 100644 hw/display/virtio-gpu-pci.c create mode 100644 hw/display/virtio-gpu-udmabuf-stubs.c create mode 100644 hw/display/virtio-gpu-udmabuf.c create mode 100644 hw/display/virtio-gpu-virgl.c create mode 100644 hw/display/virtio-gpu.c create mode 100644 hw/display/virtio-vga-gl.c create mode 100644 hw/display/virtio-vga.c create mode 100644 hw/display/virtio-vga.h create mode 100644 hw/display/vmware_vga.c create mode 100644 hw/display/xenfb.c create mode 100644 hw/display/xlnx_dp.c create mode 100644 hw/dma/Kconfig create mode 100644 hw/dma/bcm2835_dma.c create mode 100644 hw/dma/etraxfs_dma.c create mode 100644 hw/dma/i82374.c create mode 100644 hw/dma/i8257.c create mode 100644 hw/dma/meson.build create mode 100644 hw/dma/omap_dma.c create mode 100644 hw/dma/pl080.c create mode 100644 hw/dma/pl330.c create mode 100644 hw/dma/pxa2xx_dma.c create mode 100644 hw/dma/rc4030.c create mode 100644 hw/dma/sifive_pdma.c create mode 100644 hw/dma/soc_dma.c create mode 100644 hw/dma/sparc32_dma.c create mode 100644 hw/dma/trace-events create mode 100644 hw/dma/trace.h create mode 100644 hw/dma/xilinx_axidma.c create mode 100644 hw/dma/xlnx-zdma.c create mode 100644 hw/dma/xlnx-zynq-devcfg.c create mode 100644 hw/dma/xlnx_csu_dma.c create mode 100644 hw/dma/xlnx_dpdma.c create mode 100644 hw/gpio/Kconfig create mode 100644 hw/gpio/aspeed_gpio.c create mode 100644 hw/gpio/bcm2835_gpio.c create mode 100644 hw/gpio/gpio_key.c create mode 100644 hw/gpio/gpio_pwr.c create mode 100644 hw/gpio/imx_gpio.c create mode 100644 hw/gpio/max7310.c create mode 100644 hw/gpio/meson.build create mode 100644 hw/gpio/mpc8xxx.c create mode 100644 hw/gpio/npcm7xx_gpio.c create mode 100644 hw/gpio/nrf51_gpio.c create mode 100644 hw/gpio/omap_gpio.c create mode 100644 hw/gpio/pl061.c create mode 100644 hw/gpio/sifive_gpio.c create mode 100644 hw/gpio/trace-events create mode 100644 hw/gpio/trace.h create mode 100644 hw/gpio/zaurus.c create mode 100644 hw/hppa/Kconfig create mode 100644 hw/hppa/dino.c create mode 100644 hw/hppa/hppa_hardware.h create mode 100644 hw/hppa/hppa_sys.h create mode 100644 hw/hppa/lasi.c create mode 100644 hw/hppa/machine.c create mode 100644 hw/hppa/meson.build create mode 100644 hw/hppa/pci.c create mode 100644 hw/hppa/trace-events create mode 100644 hw/hppa/trace.h create mode 100644 hw/hyperv/Kconfig create mode 100644 hw/hyperv/hyperv.c create mode 100644 hw/hyperv/hyperv_testdev.c create mode 100644 hw/hyperv/meson.build create mode 100644 hw/hyperv/trace-events create mode 100644 hw/hyperv/trace.h create mode 100644 hw/hyperv/vmbus.c create mode 100644 hw/i2c/Kconfig create mode 100644 hw/i2c/aspeed_i2c.c create mode 100644 hw/i2c/bitbang_i2c.c create mode 100644 hw/i2c/core.c create mode 100644 hw/i2c/exynos4210_i2c.c create mode 100644 hw/i2c/i2c_mux_pca954x.c create mode 100644 hw/i2c/imx_i2c.c create mode 100644 hw/i2c/meson.build create mode 100644 hw/i2c/microbit_i2c.c create mode 100644 hw/i2c/mpc_i2c.c create mode 100644 hw/i2c/npcm7xx_smbus.c create mode 100644 hw/i2c/omap_i2c.c create mode 100644 hw/i2c/pm_smbus.c create mode 100644 hw/i2c/pmbus_device.c create mode 100644 hw/i2c/ppc4xx_i2c.c create mode 100644 hw/i2c/smbus_eeprom.c create mode 100644 hw/i2c/smbus_ich9.c create mode 100644 hw/i2c/smbus_master.c create mode 100644 hw/i2c/smbus_slave.c create mode 100644 hw/i2c/trace-events create mode 100644 hw/i2c/trace.h create mode 100644 hw/i2c/versatile_i2c.c create mode 100644 hw/i386/Kconfig create mode 100644 hw/i386/acpi-build.c create mode 100644 hw/i386/acpi-build.h create mode 100644 hw/i386/acpi-common.c create mode 100644 hw/i386/acpi-common.h create mode 100644 hw/i386/acpi-microvm.c create mode 100644 hw/i386/acpi-microvm.h create mode 100644 hw/i386/amd_iommu.c create mode 100644 hw/i386/amd_iommu.h create mode 100644 hw/i386/e820_memory_layout.c create mode 100644 hw/i386/e820_memory_layout.h create mode 100644 hw/i386/fw_cfg.c create mode 100644 hw/i386/fw_cfg.h create mode 100644 hw/i386/generic_event_device_x86.c create mode 100644 hw/i386/intel_iommu.c create mode 100644 hw/i386/intel_iommu_internal.h create mode 100644 hw/i386/kvm/apic.c create mode 100644 hw/i386/kvm/clock.c create mode 100644 hw/i386/kvm/i8254.c create mode 100644 hw/i386/kvm/i8259.c create mode 100644 hw/i386/kvm/ioapic.c create mode 100644 hw/i386/kvm/meson.build create mode 100644 hw/i386/kvmvapic.c create mode 100644 hw/i386/meson.build create mode 100644 hw/i386/microvm-dt.c create mode 100644 hw/i386/microvm-dt.h create mode 100644 hw/i386/microvm.c create mode 100644 hw/i386/multiboot.c create mode 100644 hw/i386/multiboot.h create mode 100644 hw/i386/pc.c create mode 100644 hw/i386/pc_piix.c create mode 100644 hw/i386/pc_q35.c create mode 100644 hw/i386/pc_sysfw.c create mode 100644 hw/i386/pc_sysfw_ovmf-stubs.c create mode 100644 hw/i386/pc_sysfw_ovmf.c create mode 100644 hw/i386/port92.c create mode 100644 hw/i386/sgx-epc.c create mode 100644 hw/i386/sgx-stub.c create mode 100644 hw/i386/sgx.c create mode 100644 hw/i386/trace-events create mode 100644 hw/i386/trace.h create mode 100644 hw/i386/vmmouse.c create mode 100644 hw/i386/vmport.c create mode 100644 hw/i386/x86-iommu-stub.c create mode 100644 hw/i386/x86-iommu.c create mode 100644 hw/i386/x86.c create mode 100644 hw/i386/xen/meson.build create mode 100644 hw/i386/xen/trace-events create mode 100644 hw/i386/xen/trace.h create mode 100644 hw/i386/xen/xen-hvm.c create mode 100644 hw/i386/xen/xen-mapcache.c create mode 100644 hw/i386/xen/xen_apic.c create mode 100644 hw/i386/xen/xen_platform.c create mode 100644 hw/i386/xen/xen_pvdevice.c create mode 100644 hw/ide/Kconfig create mode 100644 hw/ide/ahci-allwinner.c create mode 100644 hw/ide/ahci.c create mode 100644 hw/ide/ahci_internal.h create mode 100644 hw/ide/atapi.c create mode 100644 hw/ide/cmd646.c create mode 100644 hw/ide/core.c create mode 100644 hw/ide/ich.c create mode 100644 hw/ide/ioport.c create mode 100644 hw/ide/isa.c create mode 100644 hw/ide/macio.c create mode 100644 hw/ide/meson.build create mode 100644 hw/ide/microdrive.c create mode 100644 hw/ide/mmio.c create mode 100644 hw/ide/pci.c create mode 100644 hw/ide/piix.c create mode 100644 hw/ide/qdev.c create mode 100644 hw/ide/sii3112.c create mode 100644 hw/ide/trace-events create mode 100644 hw/ide/trace.h create mode 100644 hw/ide/via.c create mode 100644 hw/input/Kconfig create mode 100644 hw/input/adb-internal.h create mode 100644 hw/input/adb-kbd.c create mode 100644 hw/input/adb-mouse.c create mode 100644 hw/input/adb.c create mode 100644 hw/input/ads7846.c create mode 100644 hw/input/hid.c create mode 100644 hw/input/lasips2.c create mode 100644 hw/input/lm832x.c create mode 100644 hw/input/meson.build create mode 100644 hw/input/pckbd.c create mode 100644 hw/input/pl050.c create mode 100644 hw/input/ps2.c create mode 100644 hw/input/pxa2xx_keypad.c create mode 100644 hw/input/stellaris_input.c create mode 100644 hw/input/trace-events create mode 100644 hw/input/trace.h create mode 100644 hw/input/tsc2005.c create mode 100644 hw/input/tsc210x.c create mode 100644 hw/input/vhost-user-input.c create mode 100644 hw/input/virtio-input-hid.c create mode 100644 hw/input/virtio-input-host.c create mode 100644 hw/input/virtio-input.c create mode 100644 hw/intc/Kconfig create mode 100644 hw/intc/allwinner-a10-pic.c create mode 100644 hw/intc/apic.c create mode 100644 hw/intc/apic_common.c create mode 100644 hw/intc/arm_gic.c create mode 100644 hw/intc/arm_gic_common.c create mode 100644 hw/intc/arm_gic_kvm.c create mode 100644 hw/intc/arm_gicv2m.c create mode 100644 hw/intc/arm_gicv3.c create mode 100644 hw/intc/arm_gicv3_common.c create mode 100644 hw/intc/arm_gicv3_cpuif.c create mode 100644 hw/intc/arm_gicv3_dist.c create mode 100644 hw/intc/arm_gicv3_its.c create mode 100644 hw/intc/arm_gicv3_its_common.c create mode 100644 hw/intc/arm_gicv3_its_kvm.c create mode 100644 hw/intc/arm_gicv3_kvm.c create mode 100644 hw/intc/arm_gicv3_redist.c create mode 100644 hw/intc/armv7m_nvic.c create mode 100644 hw/intc/aspeed_vic.c create mode 100644 hw/intc/bcm2835_ic.c create mode 100644 hw/intc/bcm2836_control.c create mode 100644 hw/intc/etraxfs_pic.c create mode 100644 hw/intc/exynos4210_combiner.c create mode 100644 hw/intc/exynos4210_gic.c create mode 100644 hw/intc/gic_internal.h create mode 100644 hw/intc/gicv3_internal.h create mode 100644 hw/intc/goldfish_pic.c create mode 100644 hw/intc/grlib_irqmp.c create mode 100644 hw/intc/heathrow_pic.c create mode 100644 hw/intc/i8259.c create mode 100644 hw/intc/i8259_common.c create mode 100644 hw/intc/imx_avic.c create mode 100644 hw/intc/imx_gpcv2.c create mode 100644 hw/intc/intc.c create mode 100644 hw/intc/ioapic.c create mode 100644 hw/intc/ioapic_common.c create mode 100644 hw/intc/loongson_liointc.c create mode 100644 hw/intc/m68k_irqc.c create mode 100644 hw/intc/meson.build create mode 100644 hw/intc/mips_gic.c create mode 100644 hw/intc/omap_intc.c create mode 100644 hw/intc/ompic.c create mode 100644 hw/intc/openpic.c create mode 100644 hw/intc/openpic_kvm.c create mode 100644 hw/intc/pl190.c create mode 100644 hw/intc/pnv_xive.c create mode 100644 hw/intc/pnv_xive_regs.h create mode 100644 hw/intc/ppc-uic.c create mode 100644 hw/intc/realview_gic.c create mode 100644 hw/intc/riscv_aclint.c create mode 100644 hw/intc/rx_icu.c create mode 100644 hw/intc/s390_flic.c create mode 100644 hw/intc/s390_flic_kvm.c create mode 100644 hw/intc/sh_intc.c create mode 100644 hw/intc/sifive_plic.c create mode 100644 hw/intc/slavio_intctl.c create mode 100644 hw/intc/spapr_xive.c create mode 100644 hw/intc/spapr_xive_kvm.c create mode 100644 hw/intc/trace-events create mode 100644 hw/intc/trace.h create mode 100644 hw/intc/vgic_common.h create mode 100644 hw/intc/xics.c create mode 100644 hw/intc/xics_kvm.c create mode 100644 hw/intc/xics_pnv.c create mode 100644 hw/intc/xics_spapr.c create mode 100644 hw/intc/xilinx_intc.c create mode 100644 hw/intc/xive.c create mode 100644 hw/intc/xlnx-pmu-iomod-intc.c create mode 100644 hw/intc/xlnx-zynqmp-ipi.c create mode 100644 hw/ipack/Kconfig create mode 100644 hw/ipack/ipack.c create mode 100644 hw/ipack/meson.build create mode 100644 hw/ipack/tpci200.c create mode 100644 hw/ipmi/Kconfig create mode 100644 hw/ipmi/ipmi.c create mode 100644 hw/ipmi/ipmi_bmc_extern.c create mode 100644 hw/ipmi/ipmi_bmc_sim.c create mode 100644 hw/ipmi/ipmi_bt.c create mode 100644 hw/ipmi/ipmi_kcs.c create mode 100644 hw/ipmi/isa_ipmi_bt.c create mode 100644 hw/ipmi/isa_ipmi_kcs.c create mode 100644 hw/ipmi/meson.build create mode 100644 hw/ipmi/pci_ipmi_bt.c create mode 100644 hw/ipmi/pci_ipmi_kcs.c create mode 100644 hw/ipmi/smbus_ipmi.c create mode 100644 hw/isa/Kconfig create mode 100644 hw/isa/apm.c create mode 100644 hw/isa/i82378.c create mode 100644 hw/isa/isa-bus.c create mode 100644 hw/isa/isa-superio.c create mode 100644 hw/isa/lpc_ich9.c create mode 100644 hw/isa/meson.build create mode 100644 hw/isa/pc87312.c create mode 100644 hw/isa/piix3.c create mode 100644 hw/isa/piix4.c create mode 100644 hw/isa/smc37c669-superio.c create mode 100644 hw/isa/trace-events create mode 100644 hw/isa/trace.h create mode 100644 hw/isa/vt82c686.c create mode 100644 hw/m68k/Kconfig create mode 100644 hw/m68k/an5206.c create mode 100644 hw/m68k/bootinfo.h create mode 100644 hw/m68k/mcf5206.c create mode 100644 hw/m68k/mcf5208.c create mode 100644 hw/m68k/mcf_intc.c create mode 100644 hw/m68k/meson.build create mode 100644 hw/m68k/next-cube.c create mode 100644 hw/m68k/next-kbd.c create mode 100644 hw/m68k/q800.c create mode 100644 hw/m68k/virt.c create mode 100644 hw/mem/Kconfig create mode 100644 hw/mem/memory-device.c create mode 100644 hw/mem/meson.build create mode 100644 hw/mem/npcm7xx_mc.c create mode 100644 hw/mem/nvdimm.c create mode 100644 hw/mem/pc-dimm.c create mode 100644 hw/mem/sparse-mem.c create mode 100644 hw/mem/trace-events create mode 100644 hw/mem/trace.h create mode 100644 hw/meson.build create mode 100644 hw/microblaze/Kconfig create mode 100644 hw/microblaze/boot.c create mode 100644 hw/microblaze/boot.h create mode 100644 hw/microblaze/meson.build create mode 100644 hw/microblaze/petalogix_ml605_mmu.c create mode 100644 hw/microblaze/petalogix_s3adsp1800_mmu.c create mode 100644 hw/microblaze/xlnx-zynqmp-pmu.c create mode 100644 hw/mips/Kconfig create mode 100644 hw/mips/bootloader.c create mode 100644 hw/mips/boston.c create mode 100644 hw/mips/cps.c create mode 100644 hw/mips/fuloong2e.c create mode 100644 hw/mips/fw_cfg.c create mode 100644 hw/mips/fw_cfg.h create mode 100644 hw/mips/gt64xxx_pci.c create mode 100644 hw/mips/jazz.c create mode 100644 hw/mips/loongson3_bootp.c create mode 100644 hw/mips/loongson3_bootp.h create mode 100644 hw/mips/loongson3_virt.c create mode 100644 hw/mips/malta.c create mode 100644 hw/mips/meson.build create mode 100644 hw/mips/mips_int.c create mode 100644 hw/mips/mipssim.c create mode 100644 hw/mips/trace-events create mode 100644 hw/mips/trace.h create mode 100644 hw/misc/Kconfig create mode 100644 hw/misc/a9scu.c create mode 100644 hw/misc/allwinner-cpucfg.c create mode 100644 hw/misc/allwinner-h3-ccu.c create mode 100644 hw/misc/allwinner-h3-dramc.c create mode 100644 hw/misc/allwinner-h3-sysctrl.c create mode 100644 hw/misc/allwinner-sid.c create mode 100644 hw/misc/applesmc.c create mode 100644 hw/misc/arm11scu.c create mode 100644 hw/misc/arm_integrator_debug.c create mode 100644 hw/misc/arm_l2x0.c create mode 100644 hw/misc/arm_sysctl.c create mode 100644 hw/misc/armsse-cpu-pwrctrl.c create mode 100644 hw/misc/armsse-cpuid.c create mode 100644 hw/misc/armsse-mhu.c create mode 100644 hw/misc/armv7m_ras.c create mode 100644 hw/misc/aspeed_hace.c create mode 100644 hw/misc/aspeed_lpc.c create mode 100644 hw/misc/aspeed_scu.c create mode 100644 hw/misc/aspeed_sdmc.c create mode 100644 hw/misc/aspeed_xdma.c create mode 100644 hw/misc/auxbus.c create mode 100644 hw/misc/avr_power.c create mode 100644 hw/misc/bcm2835_cprman.c create mode 100644 hw/misc/bcm2835_mbox.c create mode 100644 hw/misc/bcm2835_mphi.c create mode 100644 hw/misc/bcm2835_powermgt.c create mode 100644 hw/misc/bcm2835_property.c create mode 100644 hw/misc/bcm2835_rng.c create mode 100644 hw/misc/bcm2835_thermal.c create mode 100644 hw/misc/cbus.c create mode 100644 hw/misc/debugexit.c create mode 100644 hw/misc/eccmemctl.c create mode 100644 hw/misc/edu.c create mode 100644 hw/misc/empty_slot.c create mode 100644 hw/misc/exynos4210_clk.c create mode 100644 hw/misc/exynos4210_pmu.c create mode 100644 hw/misc/exynos4210_rng.c create mode 100644 hw/misc/grlib_ahb_apb_pnp.c create mode 100644 hw/misc/imx25_ccm.c create mode 100644 hw/misc/imx31_ccm.c create mode 100644 hw/misc/imx6_ccm.c create mode 100644 hw/misc/imx6_src.c create mode 100644 hw/misc/imx6ul_ccm.c create mode 100644 hw/misc/imx7_ccm.c create mode 100644 hw/misc/imx7_gpr.c create mode 100644 hw/misc/imx7_snvs.c create mode 100644 hw/misc/imx_ccm.c create mode 100644 hw/misc/imx_rngc.c create mode 100644 hw/misc/iotkit-secctl.c create mode 100644 hw/misc/iotkit-sysctl.c create mode 100644 hw/misc/iotkit-sysinfo.c create mode 100644 hw/misc/ivshmem.c create mode 100644 hw/misc/led.c create mode 100644 hw/misc/mac_via.c create mode 100644 hw/misc/macio/Kconfig create mode 100644 hw/misc/macio/cuda.c create mode 100644 hw/misc/macio/gpio.c create mode 100644 hw/misc/macio/mac_dbdma.c create mode 100644 hw/misc/macio/macio.c create mode 100644 hw/misc/macio/meson.build create mode 100644 hw/misc/macio/pmu.c create mode 100644 hw/misc/macio/trace-events create mode 100644 hw/misc/macio/trace.h create mode 100644 hw/misc/mchp_pfsoc_dmc.c create mode 100644 hw/misc/mchp_pfsoc_ioscb.c create mode 100644 hw/misc/mchp_pfsoc_sysreg.c create mode 100644 hw/misc/meson.build create mode 100644 hw/misc/mips_cmgcr.c create mode 100644 hw/misc/mips_cpc.c create mode 100644 hw/misc/mips_itu.c create mode 100644 hw/misc/mos6522.c create mode 100644 hw/misc/mps2-fpgaio.c create mode 100644 hw/misc/mps2-scc.c create mode 100644 hw/misc/msf2-sysreg.c create mode 100644 hw/misc/mst_fpga.c create mode 100644 hw/misc/npcm7xx_clk.c create mode 100644 hw/misc/npcm7xx_gcr.c create mode 100644 hw/misc/npcm7xx_mft.c create mode 100644 hw/misc/npcm7xx_pwm.c create mode 100644 hw/misc/npcm7xx_rng.c create mode 100644 hw/misc/nrf51_rng.c create mode 100644 hw/misc/omap_clk.c create mode 100644 hw/misc/omap_gpmc.c create mode 100644 hw/misc/omap_l4.c create mode 100644 hw/misc/omap_sdrc.c create mode 100644 hw/misc/omap_tap.c create mode 100644 hw/misc/pc-testdev.c create mode 100644 hw/misc/pca9552.c create mode 100644 hw/misc/pci-testdev.c create mode 100644 hw/misc/pvpanic-isa.c create mode 100644 hw/misc/pvpanic-pci.c create mode 100644 hw/misc/pvpanic.c create mode 100644 hw/misc/sbsa_ec.c create mode 100644 hw/misc/sga.c create mode 100644 hw/misc/sifive_e_prci.c create mode 100644 hw/misc/sifive_test.c create mode 100644 hw/misc/sifive_u_otp.c create mode 100644 hw/misc/sifive_u_prci.c create mode 100644 hw/misc/slavio_misc.c create mode 100644 hw/misc/stm32f2xx_syscfg.c create mode 100644 hw/misc/stm32f4xx_exti.c create mode 100644 hw/misc/stm32f4xx_syscfg.c create mode 100644 hw/misc/trace-events create mode 100644 hw/misc/trace.h create mode 100644 hw/misc/tz-mpc.c create mode 100644 hw/misc/tz-msc.c create mode 100644 hw/misc/tz-ppc.c create mode 100644 hw/misc/unimp.c create mode 100644 hw/misc/virt_ctrl.c create mode 100644 hw/misc/vmcoreinfo.c create mode 100644 hw/misc/xlnx-versal-xramc.c create mode 100644 hw/misc/zynq_slcr.c create mode 100644 hw/net/Kconfig create mode 100644 hw/net/allwinner-sun8i-emac.c create mode 100644 hw/net/allwinner_emac.c create mode 100644 hw/net/cadence_gem.c create mode 100644 hw/net/can/can_kvaser_pci.c create mode 100644 hw/net/can/can_mioe3680_pci.c create mode 100644 hw/net/can/can_pcm3680_pci.c create mode 100644 hw/net/can/can_sja1000.c create mode 100644 hw/net/can/can_sja1000.h create mode 100644 hw/net/can/ctu_can_fd_frame.h create mode 100644 hw/net/can/ctu_can_fd_regs.h create mode 100644 hw/net/can/ctucan_core.c create mode 100644 hw/net/can/ctucan_core.h create mode 100644 hw/net/can/ctucan_pci.c create mode 100644 hw/net/can/meson.build create mode 100644 hw/net/can/trace-events create mode 100644 hw/net/can/trace.h create mode 100644 hw/net/can/xlnx-zynqmp-can.c create mode 100644 hw/net/dp8393x.c create mode 100644 hw/net/e1000.c create mode 100644 hw/net/e1000_regs.h create mode 100644 hw/net/e1000e.c create mode 100644 hw/net/e1000e_core.c create mode 100644 hw/net/e1000e_core.h create mode 100644 hw/net/e1000x_common.c create mode 100644 hw/net/e1000x_common.h create mode 100644 hw/net/eepro100.c create mode 100644 hw/net/etraxfs_eth.c create mode 100644 hw/net/fsl_etsec/etsec.c create mode 100644 hw/net/fsl_etsec/etsec.h create mode 100644 hw/net/fsl_etsec/miim.c create mode 100644 hw/net/fsl_etsec/registers.c create mode 100644 hw/net/fsl_etsec/registers.h create mode 100644 hw/net/fsl_etsec/rings.c create mode 100644 hw/net/ftgmac100.c create mode 100644 hw/net/i82596.c create mode 100644 hw/net/i82596.h create mode 100644 hw/net/imx_fec.c create mode 100644 hw/net/lan9118.c create mode 100644 hw/net/lance.c create mode 100644 hw/net/lasi_i82596.c create mode 100644 hw/net/mcf_fec.c create mode 100644 hw/net/meson.build create mode 100644 hw/net/mipsnet.c create mode 100644 hw/net/msf2-emac.c create mode 100644 hw/net/ne2000-isa.c create mode 100644 hw/net/ne2000-pci.c create mode 100644 hw/net/ne2000.c create mode 100644 hw/net/ne2000.h create mode 100644 hw/net/net_rx_pkt.c create mode 100644 hw/net/net_rx_pkt.h create mode 100644 hw/net/net_tx_pkt.c create mode 100644 hw/net/net_tx_pkt.h create mode 100644 hw/net/npcm7xx_emc.c create mode 100644 hw/net/opencores_eth.c create mode 100644 hw/net/pcnet-pci.c create mode 100644 hw/net/pcnet.c create mode 100644 hw/net/pcnet.h create mode 100644 hw/net/rocker/qmp-norocker.c create mode 100644 hw/net/rocker/rocker.c create mode 100644 hw/net/rocker/rocker.h create mode 100644 hw/net/rocker/rocker_desc.c create mode 100644 hw/net/rocker/rocker_desc.h create mode 100644 hw/net/rocker/rocker_fp.c create mode 100644 hw/net/rocker/rocker_fp.h create mode 100644 hw/net/rocker/rocker_hw.h create mode 100644 hw/net/rocker/rocker_of_dpa.c create mode 100644 hw/net/rocker/rocker_of_dpa.h create mode 100644 hw/net/rocker/rocker_tlv.h create mode 100644 hw/net/rocker/rocker_world.c create mode 100644 hw/net/rocker/rocker_world.h create mode 100644 hw/net/rtl8139.c create mode 100644 hw/net/smc91c111.c create mode 100644 hw/net/spapr_llan.c create mode 100644 hw/net/stellaris_enet.c create mode 100644 hw/net/sungem.c create mode 100644 hw/net/sunhme.c create mode 100644 hw/net/trace-events create mode 100644 hw/net/trace.h create mode 100644 hw/net/tulip.c create mode 100644 hw/net/tulip.h create mode 100644 hw/net/vhost_net-stub.c create mode 100644 hw/net/vhost_net.c create mode 100644 hw/net/virtio-net.c create mode 100644 hw/net/vmware_utils.h create mode 100644 hw/net/vmxnet3.c create mode 100644 hw/net/vmxnet3.h create mode 100644 hw/net/vmxnet3_defs.h create mode 100644 hw/net/vmxnet_debug.h create mode 100644 hw/net/xen_nic.c create mode 100644 hw/net/xgmac.c create mode 100644 hw/net/xilinx_axienet.c create mode 100644 hw/net/xilinx_ethlite.c create mode 100644 hw/nios2/10m50_devboard.c create mode 100644 hw/nios2/Kconfig create mode 100644 hw/nios2/boot.c create mode 100644 hw/nios2/boot.h create mode 100644 hw/nios2/generic_nommu.c create mode 100644 hw/nios2/meson.build create mode 100644 hw/nubus/Kconfig create mode 100644 hw/nubus/mac-nubus-bridge.c create mode 100644 hw/nubus/meson.build create mode 100644 hw/nubus/nubus-bridge.c create mode 100644 hw/nubus/nubus-bus.c create mode 100644 hw/nubus/nubus-device.c create mode 100644 hw/nubus/trace-events create mode 100644 hw/nubus/trace.h create mode 100644 hw/nvme/Kconfig create mode 100644 hw/nvme/ctrl.c create mode 100644 hw/nvme/dif.c create mode 100644 hw/nvme/meson.build create mode 100644 hw/nvme/ns.c create mode 100644 hw/nvme/nvme.h create mode 100644 hw/nvme/subsys.c create mode 100644 hw/nvme/trace-events create mode 100644 hw/nvme/trace.h create mode 100644 hw/nvram/Kconfig create mode 100644 hw/nvram/chrp_nvram.c create mode 100644 hw/nvram/ds1225y.c create mode 100644 hw/nvram/eeprom93xx.c create mode 100644 hw/nvram/eeprom_at24c.c create mode 100644 hw/nvram/fw_cfg-interface.c create mode 100644 hw/nvram/fw_cfg.c create mode 100644 hw/nvram/mac_nvram.c create mode 100644 hw/nvram/meson.build create mode 100644 hw/nvram/npcm7xx_otp.c create mode 100644 hw/nvram/nrf51_nvm.c create mode 100644 hw/nvram/spapr_nvram.c create mode 100644 hw/nvram/trace-events create mode 100644 hw/nvram/trace.h create mode 100644 hw/nvram/xlnx-bbram.c create mode 100644 hw/nvram/xlnx-efuse-crc.c create mode 100644 hw/nvram/xlnx-efuse.c create mode 100644 hw/nvram/xlnx-versal-efuse-cache.c create mode 100644 hw/nvram/xlnx-versal-efuse-ctrl.c create mode 100644 hw/nvram/xlnx-zynqmp-efuse.c create mode 100644 hw/openrisc/Kconfig create mode 100644 hw/openrisc/cputimer.c create mode 100644 hw/openrisc/meson.build create mode 100644 hw/openrisc/openrisc_sim.c create mode 100644 hw/pci-bridge/Kconfig create mode 100644 hw/pci-bridge/dec.c create mode 100644 hw/pci-bridge/dec.h create mode 100644 hw/pci-bridge/gen_pcie_root_port.c create mode 100644 hw/pci-bridge/i82801b11.c create mode 100644 hw/pci-bridge/ioh3420.c create mode 100644 hw/pci-bridge/meson.build create mode 100644 hw/pci-bridge/pci_bridge_dev.c create mode 100644 hw/pci-bridge/pci_expander_bridge.c create mode 100644 hw/pci-bridge/pcie_pci_bridge.c create mode 100644 hw/pci-bridge/pcie_root_port.c create mode 100644 hw/pci-bridge/simba.c create mode 100644 hw/pci-bridge/xio3130_downstream.c create mode 100644 hw/pci-bridge/xio3130_upstream.c create mode 100644 hw/pci-host/Kconfig create mode 100644 hw/pci-host/bonito.c create mode 100644 hw/pci-host/designware.c create mode 100644 hw/pci-host/gpex-acpi.c create mode 100644 hw/pci-host/gpex.c create mode 100644 hw/pci-host/grackle.c create mode 100644 hw/pci-host/i440fx.c create mode 100644 hw/pci-host/meson.build create mode 100644 hw/pci-host/mv64361.c create mode 100644 hw/pci-host/mv643xx.h create mode 100644 hw/pci-host/pam.c create mode 100644 hw/pci-host/pnv_phb3.c create mode 100644 hw/pci-host/pnv_phb3_msi.c create mode 100644 hw/pci-host/pnv_phb3_pbcq.c create mode 100644 hw/pci-host/pnv_phb4.c create mode 100644 hw/pci-host/pnv_phb4_pec.c create mode 100644 hw/pci-host/ppce500.c create mode 100644 hw/pci-host/q35.c create mode 100644 hw/pci-host/raven.c create mode 100644 hw/pci-host/remote.c create mode 100644 hw/pci-host/sabre.c create mode 100644 hw/pci-host/sh_pci.c create mode 100644 hw/pci-host/trace-events create mode 100644 hw/pci-host/trace.h create mode 100644 hw/pci-host/uninorth.c create mode 100644 hw/pci-host/versatile.c create mode 100644 hw/pci-host/xen_igd_pt.c create mode 100644 hw/pci-host/xilinx-pcie.c create mode 100644 hw/pci/Kconfig create mode 100644 hw/pci/meson.build create mode 100644 hw/pci/msi.c create mode 100644 hw/pci/msix.c create mode 100644 hw/pci/pci-stub.c create mode 100644 hw/pci/pci.c create mode 100644 hw/pci/pci_bridge.c create mode 100644 hw/pci/pci_host.c create mode 100644 hw/pci/pcie.c create mode 100644 hw/pci/pcie_aer.c create mode 100644 hw/pci/pcie_host.c create mode 100644 hw/pci/pcie_port.c create mode 100644 hw/pci/shpc.c create mode 100644 hw/pci/slotid_cap.c create mode 100644 hw/pci/trace-events create mode 100644 hw/pci/trace.h create mode 100644 hw/pcmcia/Kconfig create mode 100644 hw/pcmcia/meson.build create mode 100644 hw/pcmcia/pcmcia.c create mode 100644 hw/pcmcia/pxa2xx.c create mode 100644 hw/ppc/Kconfig create mode 100644 hw/ppc/e500-ccsr.h create mode 100644 hw/ppc/e500.c create mode 100644 hw/ppc/e500.h create mode 100644 hw/ppc/e500plat.c create mode 100644 hw/ppc/fdt.c create mode 100644 hw/ppc/fw_cfg.c create mode 100644 hw/ppc/mac.h create mode 100644 hw/ppc/mac_newworld.c create mode 100644 hw/ppc/mac_oldworld.c create mode 100644 hw/ppc/meson.build create mode 100644 hw/ppc/mpc8544_guts.c create mode 100644 hw/ppc/mpc8544ds.c create mode 100644 hw/ppc/pef.c create mode 100644 hw/ppc/pegasos2.c create mode 100644 hw/ppc/pnv.c create mode 100644 hw/ppc/pnv_bmc.c create mode 100644 hw/ppc/pnv_core.c create mode 100644 hw/ppc/pnv_homer.c create mode 100644 hw/ppc/pnv_lpc.c create mode 100644 hw/ppc/pnv_occ.c create mode 100644 hw/ppc/pnv_pnor.c create mode 100644 hw/ppc/pnv_psi.c create mode 100644 hw/ppc/pnv_xscom.c create mode 100644 hw/ppc/ppc.c create mode 100644 hw/ppc/ppc405.h create mode 100644 hw/ppc/ppc405_boards.c create mode 100644 hw/ppc/ppc405_uc.c create mode 100644 hw/ppc/ppc440.h create mode 100644 hw/ppc/ppc440_bamboo.c create mode 100644 hw/ppc/ppc440_pcix.c create mode 100644 hw/ppc/ppc440_uc.c create mode 100644 hw/ppc/ppc4xx_devs.c create mode 100644 hw/ppc/ppc4xx_pci.c create mode 100644 hw/ppc/ppc_booke.c create mode 100644 hw/ppc/ppce500_spin.c create mode 100644 hw/ppc/prep.c create mode 100644 hw/ppc/prep_systemio.c create mode 100644 hw/ppc/rs6000_mc.c create mode 100644 hw/ppc/sam460ex.c create mode 100644 hw/ppc/spapr.c create mode 100644 hw/ppc/spapr_caps.c create mode 100644 hw/ppc/spapr_cpu_core.c create mode 100644 hw/ppc/spapr_drc.c create mode 100644 hw/ppc/spapr_events.c create mode 100644 hw/ppc/spapr_hcall.c create mode 100644 hw/ppc/spapr_iommu.c create mode 100644 hw/ppc/spapr_irq.c create mode 100644 hw/ppc/spapr_numa.c create mode 100644 hw/ppc/spapr_nvdimm.c create mode 100644 hw/ppc/spapr_ovec.c create mode 100644 hw/ppc/spapr_pci.c create mode 100644 hw/ppc/spapr_pci_nvlink2.c create mode 100644 hw/ppc/spapr_pci_vfio.c create mode 100644 hw/ppc/spapr_rng.c create mode 100644 hw/ppc/spapr_rtas.c create mode 100644 hw/ppc/spapr_rtas_ddw.c create mode 100644 hw/ppc/spapr_rtc.c create mode 100644 hw/ppc/spapr_softmmu.c create mode 100644 hw/ppc/spapr_tpm_proxy.c create mode 100644 hw/ppc/spapr_vio.c create mode 100644 hw/ppc/spapr_vof.c create mode 100644 hw/ppc/trace-events create mode 100644 hw/ppc/trace.h create mode 100644 hw/ppc/virtex_ml507.c create mode 100644 hw/ppc/vof.c create mode 100644 hw/rdma/Kconfig create mode 100644 hw/rdma/meson.build create mode 100644 hw/rdma/rdma.c create mode 100644 hw/rdma/rdma_backend.c create mode 100644 hw/rdma/rdma_backend.h create mode 100644 hw/rdma/rdma_backend_defs.h create mode 100644 hw/rdma/rdma_rm.c create mode 100644 hw/rdma/rdma_rm.h create mode 100644 hw/rdma/rdma_rm_defs.h create mode 100644 hw/rdma/rdma_utils.c create mode 100644 hw/rdma/rdma_utils.h create mode 100644 hw/rdma/trace-events create mode 100644 hw/rdma/trace.h create mode 100644 hw/rdma/vmw/pvrdma.h create mode 100644 hw/rdma/vmw/pvrdma_cmd.c create mode 100644 hw/rdma/vmw/pvrdma_dev_ring.c create mode 100644 hw/rdma/vmw/pvrdma_dev_ring.h create mode 100644 hw/rdma/vmw/pvrdma_main.c create mode 100644 hw/rdma/vmw/pvrdma_qp_ops.c create mode 100644 hw/rdma/vmw/pvrdma_qp_ops.h create mode 100644 hw/rdma/vmw/trace-events create mode 100644 hw/rdma/vmw/trace.h create mode 100644 hw/remote/Kconfig create mode 100644 hw/remote/iohub.c create mode 100644 hw/remote/machine.c create mode 100644 hw/remote/memory.c create mode 100644 hw/remote/meson.build create mode 100644 hw/remote/message.c create mode 100644 hw/remote/mpqemu-link.c create mode 100644 hw/remote/proxy-memory-listener.c create mode 100644 hw/remote/proxy.c create mode 100644 hw/remote/remote-obj.c create mode 100644 hw/remote/trace-events create mode 100644 hw/remote/trace.h create mode 100644 hw/riscv/Kconfig create mode 100644 hw/riscv/boot.c create mode 100644 hw/riscv/meson.build create mode 100644 hw/riscv/microchip_pfsoc.c create mode 100644 hw/riscv/numa.c create mode 100644 hw/riscv/opentitan.c create mode 100644 hw/riscv/riscv_hart.c create mode 100644 hw/riscv/shakti_c.c create mode 100644 hw/riscv/sifive_e.c create mode 100644 hw/riscv/sifive_u.c create mode 100644 hw/riscv/spike.c create mode 100644 hw/riscv/virt.c create mode 100644 hw/rtc/Kconfig create mode 100644 hw/rtc/allwinner-rtc.c create mode 100644 hw/rtc/aspeed_rtc.c create mode 100644 hw/rtc/ds1338.c create mode 100644 hw/rtc/exynos4210_rtc.c create mode 100644 hw/rtc/goldfish_rtc.c create mode 100644 hw/rtc/m41t80.c create mode 100644 hw/rtc/m48t59-internal.h create mode 100644 hw/rtc/m48t59-isa.c create mode 100644 hw/rtc/m48t59.c create mode 100644 hw/rtc/mc146818rtc.c create mode 100644 hw/rtc/meson.build create mode 100644 hw/rtc/pl031.c create mode 100644 hw/rtc/sun4v-rtc.c create mode 100644 hw/rtc/trace-events create mode 100644 hw/rtc/trace.h create mode 100644 hw/rtc/twl92230.c create mode 100644 hw/rtc/xlnx-zynqmp-rtc.c create mode 100644 hw/rx/Kconfig create mode 100644 hw/rx/meson.build create mode 100644 hw/rx/rx-gdbsim.c create mode 100644 hw/rx/rx62n.c create mode 100644 hw/s390x/3270-ccw.c create mode 100644 hw/s390x/Kconfig create mode 100644 hw/s390x/ap-bridge.c create mode 100644 hw/s390x/ap-device.c create mode 100644 hw/s390x/ccw-device.c create mode 100644 hw/s390x/ccw-device.h create mode 100644 hw/s390x/css-bridge.c create mode 100644 hw/s390x/css.c create mode 100644 hw/s390x/event-facility.c create mode 100644 hw/s390x/ipl.c create mode 100644 hw/s390x/ipl.h create mode 100644 hw/s390x/meson.build create mode 100644 hw/s390x/pv.c create mode 100644 hw/s390x/s390-ccw.c create mode 100644 hw/s390x/s390-pci-bus.c create mode 100644 hw/s390x/s390-pci-inst.c create mode 100644 hw/s390x/s390-pci-vfio.c create mode 100644 hw/s390x/s390-skeys-kvm.c create mode 100644 hw/s390x/s390-skeys.c create mode 100644 hw/s390x/s390-stattrib-kvm.c create mode 100644 hw/s390x/s390-stattrib.c create mode 100644 hw/s390x/s390-virtio-ccw.c create mode 100644 hw/s390x/s390-virtio-hcall.c create mode 100644 hw/s390x/s390-virtio-hcall.h create mode 100644 hw/s390x/sclp.c create mode 100644 hw/s390x/sclpcpu.c create mode 100644 hw/s390x/sclpquiesce.c create mode 100644 hw/s390x/tod-kvm.c create mode 100644 hw/s390x/tod-tcg.c create mode 100644 hw/s390x/tod.c create mode 100644 hw/s390x/trace-events create mode 100644 hw/s390x/trace.h create mode 100644 hw/s390x/vhost-user-fs-ccw.c create mode 100644 hw/s390x/vhost-vsock-ccw.c create mode 100644 hw/s390x/virtio-ccw-9p.c create mode 100644 hw/s390x/virtio-ccw-balloon.c create mode 100644 hw/s390x/virtio-ccw-blk.c create mode 100644 hw/s390x/virtio-ccw-crypto.c create mode 100644 hw/s390x/virtio-ccw-gpu.c create mode 100644 hw/s390x/virtio-ccw-input.c create mode 100644 hw/s390x/virtio-ccw-net.c create mode 100644 hw/s390x/virtio-ccw-rng.c create mode 100644 hw/s390x/virtio-ccw-scsi.c create mode 100644 hw/s390x/virtio-ccw-serial.c create mode 100644 hw/s390x/virtio-ccw.c create mode 100644 hw/s390x/virtio-ccw.h create mode 100644 hw/scsi/Kconfig create mode 100644 hw/scsi/emulation.c create mode 100644 hw/scsi/esp-pci.c create mode 100644 hw/scsi/esp.c create mode 100644 hw/scsi/lsi53c895a.c create mode 100644 hw/scsi/megasas.c create mode 100644 hw/scsi/meson.build create mode 100644 hw/scsi/mfi.h create mode 100644 hw/scsi/mpi.h create mode 100644 hw/scsi/mptconfig.c create mode 100644 hw/scsi/mptendian.c create mode 100644 hw/scsi/mptsas.c create mode 100644 hw/scsi/mptsas.h create mode 100644 hw/scsi/scsi-bus.c create mode 100644 hw/scsi/scsi-disk.c create mode 100644 hw/scsi/scsi-generic.c create mode 100644 hw/scsi/spapr_vscsi.c create mode 100644 hw/scsi/srp.h create mode 100644 hw/scsi/trace-events create mode 100644 hw/scsi/trace.h create mode 100644 hw/scsi/vhost-scsi-common.c create mode 100644 hw/scsi/vhost-scsi.c create mode 100644 hw/scsi/vhost-user-scsi.c create mode 100644 hw/scsi/viosrp.h create mode 100644 hw/scsi/virtio-scsi-dataplane.c create mode 100644 hw/scsi/virtio-scsi.c create mode 100644 hw/scsi/vmw_pvscsi.c create mode 100644 hw/scsi/vmw_pvscsi.h create mode 100644 hw/sd/Kconfig create mode 100644 hw/sd/allwinner-sdhost.c create mode 100644 hw/sd/aspeed_sdhci.c create mode 100644 hw/sd/bcm2835_sdhost.c create mode 100644 hw/sd/cadence_sdhci.c create mode 100644 hw/sd/core.c create mode 100644 hw/sd/meson.build create mode 100644 hw/sd/npcm7xx_sdhci.c create mode 100644 hw/sd/omap_mmc.c create mode 100644 hw/sd/pl181.c create mode 100644 hw/sd/pxa2xx_mmci.c create mode 100644 hw/sd/sd.c create mode 100644 hw/sd/sdhci-internal.h create mode 100644 hw/sd/sdhci-pci.c create mode 100644 hw/sd/sdhci.c create mode 100644 hw/sd/sdmmc-internal.c create mode 100644 hw/sd/sdmmc-internal.h create mode 100644 hw/sd/ssi-sd.c create mode 100644 hw/sd/trace-events create mode 100644 hw/sd/trace.h create mode 100644 hw/sensor/Kconfig create mode 100644 hw/sensor/adm1272.c create mode 100644 hw/sensor/dps310.c create mode 100644 hw/sensor/emc141x.c create mode 100644 hw/sensor/max34451.c create mode 100644 hw/sensor/meson.build create mode 100644 hw/sensor/tmp105.c create mode 100644 hw/sensor/tmp421.c create mode 100644 hw/sh4/Kconfig create mode 100644 hw/sh4/meson.build create mode 100644 hw/sh4/r2d.c create mode 100644 hw/sh4/sh7750.c create mode 100644 hw/sh4/sh7750_regnames.c create mode 100644 hw/sh4/sh7750_regnames.h create mode 100644 hw/sh4/sh7750_regs.h create mode 100644 hw/sh4/shix.c create mode 100644 hw/sh4/trace-events create mode 100644 hw/sh4/trace.h create mode 100644 hw/smbios/Kconfig create mode 100644 hw/smbios/meson.build create mode 100644 hw/smbios/smbios-stub.c create mode 100644 hw/smbios/smbios.c create mode 100644 hw/smbios/smbios_build.h create mode 100644 hw/smbios/smbios_type_38-stub.c create mode 100644 hw/smbios/smbios_type_38.c create mode 100644 hw/sparc/Kconfig create mode 100644 hw/sparc/leon3.c create mode 100644 hw/sparc/meson.build create mode 100644 hw/sparc/sun4m.c create mode 100644 hw/sparc/sun4m_iommu.c create mode 100644 hw/sparc/trace-events create mode 100644 hw/sparc/trace.h create mode 100644 hw/sparc64/Kconfig create mode 100644 hw/sparc64/meson.build create mode 100644 hw/sparc64/niagara.c create mode 100644 hw/sparc64/sparc64.c create mode 100644 hw/sparc64/sun4u.c create mode 100644 hw/sparc64/sun4u_iommu.c create mode 100644 hw/sparc64/trace-events create mode 100644 hw/sparc64/trace.h create mode 100644 hw/ssi/Kconfig create mode 100644 hw/ssi/aspeed_smc.c create mode 100644 hw/ssi/imx_spi.c create mode 100644 hw/ssi/meson.build create mode 100644 hw/ssi/mss-spi.c create mode 100644 hw/ssi/npcm7xx_fiu.c create mode 100644 hw/ssi/omap_spi.c create mode 100644 hw/ssi/pl022.c create mode 100644 hw/ssi/sifive_spi.c create mode 100644 hw/ssi/ssi.c create mode 100644 hw/ssi/stm32f2xx_spi.c create mode 100644 hw/ssi/trace-events create mode 100644 hw/ssi/trace.h create mode 100644 hw/ssi/xilinx_spi.c create mode 100644 hw/ssi/xilinx_spips.c create mode 100644 hw/timer/Kconfig create mode 100644 hw/timer/a9gtimer.c create mode 100644 hw/timer/allwinner-a10-pit.c create mode 100644 hw/timer/altera_timer.c create mode 100644 hw/timer/arm_mptimer.c create mode 100644 hw/timer/arm_timer.c create mode 100644 hw/timer/armv7m_systick.c create mode 100644 hw/timer/aspeed_timer.c create mode 100644 hw/timer/avr_timer16.c create mode 100644 hw/timer/bcm2835_systmr.c create mode 100644 hw/timer/cadence_ttc.c create mode 100644 hw/timer/cmsdk-apb-dualtimer.c create mode 100644 hw/timer/cmsdk-apb-timer.c create mode 100644 hw/timer/digic-timer.c create mode 100644 hw/timer/etraxfs_timer.c create mode 100644 hw/timer/exynos4210_mct.c create mode 100644 hw/timer/exynos4210_pwm.c create mode 100644 hw/timer/grlib_gptimer.c create mode 100644 hw/timer/hpet.c create mode 100644 hw/timer/i8254.c create mode 100644 hw/timer/i8254_common.c create mode 100644 hw/timer/ibex_timer.c create mode 100644 hw/timer/imx_epit.c create mode 100644 hw/timer/imx_gpt.c create mode 100644 hw/timer/meson.build create mode 100644 hw/timer/mips_gictimer.c create mode 100644 hw/timer/mss-timer.c create mode 100644 hw/timer/npcm7xx_timer.c create mode 100644 hw/timer/nrf51_timer.c create mode 100644 hw/timer/omap_gptimer.c create mode 100644 hw/timer/omap_synctimer.c create mode 100644 hw/timer/pxa2xx_timer.c create mode 100644 hw/timer/renesas_cmt.c create mode 100644 hw/timer/renesas_tmr.c create mode 100644 hw/timer/sh_timer.c create mode 100644 hw/timer/sifive_pwm.c create mode 100644 hw/timer/slavio_timer.c create mode 100644 hw/timer/sse-counter.c create mode 100644 hw/timer/sse-timer.c create mode 100644 hw/timer/stellaris-gptm.c create mode 100644 hw/timer/stm32f2xx_timer.c create mode 100644 hw/timer/trace-events create mode 100644 hw/timer/trace.h create mode 100644 hw/timer/xilinx_timer.c create mode 100644 hw/tpm/Kconfig create mode 100644 hw/tpm/meson.build create mode 100644 hw/tpm/tpm_crb.c create mode 100644 hw/tpm/tpm_ppi.c create mode 100644 hw/tpm/tpm_ppi.h create mode 100644 hw/tpm/tpm_prop.h create mode 100644 hw/tpm/tpm_spapr.c create mode 100644 hw/tpm/tpm_tis.h create mode 100644 hw/tpm/tpm_tis_common.c create mode 100644 hw/tpm/tpm_tis_isa.c create mode 100644 hw/tpm/tpm_tis_sysbus.c create mode 100644 hw/tpm/trace-events create mode 100644 hw/tpm/trace.h create mode 100644 hw/tricore/Kconfig create mode 100644 hw/tricore/meson.build create mode 100644 hw/tricore/tc27x_soc.c create mode 100644 hw/tricore/triboard.c create mode 100644 hw/tricore/tricore_testboard.c create mode 100644 hw/tricore/tricore_testdevice.c create mode 100644 hw/usb/Kconfig create mode 100644 hw/usb/bus.c create mode 100644 hw/usb/ccid-card-emulated.c create mode 100644 hw/usb/ccid-card-passthru.c create mode 100644 hw/usb/ccid.h create mode 100644 hw/usb/chipidea.c create mode 100644 hw/usb/combined-packet.c create mode 100644 hw/usb/core.c create mode 100644 hw/usb/desc-msos.c create mode 100644 hw/usb/desc.c create mode 100644 hw/usb/desc.h create mode 100644 hw/usb/dev-audio.c create mode 100644 hw/usb/dev-hid.c create mode 100644 hw/usb/dev-hub.c create mode 100644 hw/usb/dev-mtp.c create mode 100644 hw/usb/dev-network.c create mode 100644 hw/usb/dev-serial.c create mode 100644 hw/usb/dev-smartcard-reader.c create mode 100644 hw/usb/dev-storage-bot.c create mode 100644 hw/usb/dev-storage-classic.c create mode 100644 hw/usb/dev-storage.c create mode 100644 hw/usb/dev-uas.c create mode 100644 hw/usb/dev-wacom.c create mode 100644 hw/usb/hcd-dwc2.c create mode 100644 hw/usb/hcd-dwc2.h create mode 100644 hw/usb/hcd-dwc3.c create mode 100644 hw/usb/hcd-ehci-pci.c create mode 100644 hw/usb/hcd-ehci-sysbus.c create mode 100644 hw/usb/hcd-ehci.c create mode 100644 hw/usb/hcd-ehci.h create mode 100644 hw/usb/hcd-musb.c create mode 100644 hw/usb/hcd-ohci-pci.c create mode 100644 hw/usb/hcd-ohci.c create mode 100644 hw/usb/hcd-ohci.h create mode 100644 hw/usb/hcd-uhci.c create mode 100644 hw/usb/hcd-uhci.h create mode 100644 hw/usb/hcd-xhci-nec.c create mode 100644 hw/usb/hcd-xhci-pci.c create mode 100644 hw/usb/hcd-xhci-pci.h create mode 100644 hw/usb/hcd-xhci-sysbus.c create mode 100644 hw/usb/hcd-xhci-sysbus.h create mode 100644 hw/usb/hcd-xhci.c create mode 100644 hw/usb/hcd-xhci.h create mode 100644 hw/usb/host-libusb.c create mode 100644 hw/usb/host.h create mode 100644 hw/usb/imx-usb-phy.c create mode 100644 hw/usb/libhw.c create mode 100644 hw/usb/meson.build create mode 100644 hw/usb/pcap.c create mode 100644 hw/usb/quirks-ftdi-ids.h create mode 100644 hw/usb/quirks-pl2303-ids.h create mode 100644 hw/usb/quirks.c create mode 100644 hw/usb/quirks.h create mode 100644 hw/usb/redirect.c create mode 100644 hw/usb/trace-events create mode 100644 hw/usb/trace.h create mode 100644 hw/usb/tusb6010.c create mode 100644 hw/usb/u2f-emulated.c create mode 100644 hw/usb/u2f-passthru.c create mode 100644 hw/usb/u2f.c create mode 100644 hw/usb/u2f.h create mode 100644 hw/usb/vt82c686-uhci-pci.c create mode 100644 hw/usb/xen-usb.c create mode 100644 hw/usb/xlnx-usb-subsystem.c create mode 100644 hw/usb/xlnx-versal-usb2-ctrl-regs.c create mode 100644 hw/vfio/Kconfig create mode 100644 hw/vfio/amd-xgbe.c create mode 100644 hw/vfio/ap.c create mode 100644 hw/vfio/calxeda-xgmac.c create mode 100644 hw/vfio/ccw.c create mode 100644 hw/vfio/common.c create mode 100644 hw/vfio/display.c create mode 100644 hw/vfio/igd.c create mode 100644 hw/vfio/meson.build create mode 100644 hw/vfio/migration.c create mode 100644 hw/vfio/pci-quirks.c create mode 100644 hw/vfio/pci.c create mode 100644 hw/vfio/pci.h create mode 100644 hw/vfio/platform.c create mode 100644 hw/vfio/spapr.c create mode 100644 hw/vfio/trace-events create mode 100644 hw/vfio/trace.h create mode 100644 hw/virtio/Kconfig create mode 100644 hw/virtio/meson.build create mode 100644 hw/virtio/trace-events create mode 100644 hw/virtio/trace.h create mode 100644 hw/virtio/vhost-backend.c create mode 100644 hw/virtio/vhost-scsi-pci.c create mode 100644 hw/virtio/vhost-stub.c create mode 100644 hw/virtio/vhost-user-blk-pci.c create mode 100644 hw/virtio/vhost-user-fs-pci.c create mode 100644 hw/virtio/vhost-user-fs.c create mode 100644 hw/virtio/vhost-user-i2c-pci.c create mode 100644 hw/virtio/vhost-user-i2c.c create mode 100644 hw/virtio/vhost-user-input-pci.c create mode 100644 hw/virtio/vhost-user-rng-pci.c create mode 100644 hw/virtio/vhost-user-rng.c create mode 100644 hw/virtio/vhost-user-scsi-pci.c create mode 100644 hw/virtio/vhost-user-vsock-pci.c create mode 100644 hw/virtio/vhost-user-vsock.c create mode 100644 hw/virtio/vhost-user.c create mode 100644 hw/virtio/vhost-vdpa.c create mode 100644 hw/virtio/vhost-vsock-common.c create mode 100644 hw/virtio/vhost-vsock-pci.c create mode 100644 hw/virtio/vhost-vsock.c create mode 100644 hw/virtio/vhost.c create mode 100644 hw/virtio/virtio-9p-pci.c create mode 100644 hw/virtio/virtio-balloon-pci.c create mode 100644 hw/virtio/virtio-balloon.c create mode 100644 hw/virtio/virtio-blk-pci.c create mode 100644 hw/virtio/virtio-bus.c create mode 100644 hw/virtio/virtio-crypto-pci.c create mode 100644 hw/virtio/virtio-crypto.c create mode 100644 hw/virtio/virtio-input-host-pci.c create mode 100644 hw/virtio/virtio-input-pci.c create mode 100644 hw/virtio/virtio-iommu-pci.c create mode 100644 hw/virtio/virtio-iommu.c create mode 100644 hw/virtio/virtio-mem-pci.c create mode 100644 hw/virtio/virtio-mem-pci.h create mode 100644 hw/virtio/virtio-mem.c create mode 100644 hw/virtio/virtio-mmio.c create mode 100644 hw/virtio/virtio-net-pci.c create mode 100644 hw/virtio/virtio-pci.c create mode 100644 hw/virtio/virtio-pci.h create mode 100644 hw/virtio/virtio-pmem-pci.c create mode 100644 hw/virtio/virtio-pmem-pci.h create mode 100644 hw/virtio/virtio-pmem.c create mode 100644 hw/virtio/virtio-rng-pci.c create mode 100644 hw/virtio/virtio-rng.c create mode 100644 hw/virtio/virtio-scsi-pci.c create mode 100644 hw/virtio/virtio-serial-pci.c create mode 100644 hw/virtio/virtio.c create mode 100644 hw/watchdog/Kconfig create mode 100644 hw/watchdog/cmsdk-apb-watchdog.c create mode 100644 hw/watchdog/meson.build create mode 100644 hw/watchdog/sbsa_gwdt.c create mode 100644 hw/watchdog/trace-events create mode 100644 hw/watchdog/trace.h create mode 100644 hw/watchdog/watchdog.c create mode 100644 hw/watchdog/wdt_aspeed.c create mode 100644 hw/watchdog/wdt_diag288.c create mode 100644 hw/watchdog/wdt_i6300esb.c create mode 100644 hw/watchdog/wdt_ib700.c create mode 100644 hw/watchdog/wdt_imx2.c create mode 100644 hw/xen/meson.build create mode 100644 hw/xen/trace-events create mode 100644 hw/xen/trace.h create mode 100644 hw/xen/xen-backend.c create mode 100644 hw/xen/xen-bus-helper.c create mode 100644 hw/xen/xen-bus.c create mode 100644 hw/xen/xen-host-pci-device.c create mode 100644 hw/xen/xen-host-pci-device.h create mode 100644 hw/xen/xen-legacy-backend.c create mode 100644 hw/xen/xen_devconfig.c create mode 100644 hw/xen/xen_pt.c create mode 100644 hw/xen/xen_pt.h create mode 100644 hw/xen/xen_pt_config_init.c create mode 100644 hw/xen/xen_pt_graphics.c create mode 100644 hw/xen/xen_pt_load_rom.c create mode 100644 hw/xen/xen_pt_msi.c create mode 100644 hw/xen/xen_pt_stub.c create mode 100644 hw/xen/xen_pvdev.c create mode 100644 hw/xenpv/meson.build create mode 100644 hw/xenpv/xen_machine_pv.c create mode 100644 hw/xtensa/Kconfig create mode 100644 hw/xtensa/bootparam.h create mode 100644 hw/xtensa/meson.build create mode 100644 hw/xtensa/mx_pic.c create mode 100644 hw/xtensa/pic_cpu.c create mode 100644 hw/xtensa/sim.c create mode 100644 hw/xtensa/virt.c create mode 100644 hw/xtensa/xtensa_memory.c create mode 100644 hw/xtensa/xtensa_memory.h create mode 100644 hw/xtensa/xtensa_sim.h create mode 100644 hw/xtensa/xtfpga.c (limited to 'hw') diff --git a/hw/9pfs/9p-local.c b/hw/9pfs/9p-local.c new file mode 100644 index 000000000..210d9e770 --- /dev/null +++ b/hw/9pfs/9p-local.c @@ -0,0 +1,1601 @@ +/* + * 9p Posix callback + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "9p.h" +#include "9p-local.h" +#include "9p-xattr.h" +#include "9p-util.h" +#include "fsdev/qemu-fsdev.h" /* local_ops */ +#include +#include +#include +#include +#include +#include "qemu/xattr.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include +#include +#ifdef CONFIG_LINUX_MAGIC_H +#include +#endif +#include + +#ifndef XFS_SUPER_MAGIC +#define XFS_SUPER_MAGIC 0x58465342 +#endif +#ifndef EXT2_SUPER_MAGIC +#define EXT2_SUPER_MAGIC 0xEF53 +#endif +#ifndef REISERFS_SUPER_MAGIC +#define REISERFS_SUPER_MAGIC 0x52654973 +#endif +#ifndef BTRFS_SUPER_MAGIC +#define BTRFS_SUPER_MAGIC 0x9123683E +#endif + +typedef struct { + int mountfd; +} LocalData; + +int local_open_nofollow(FsContext *fs_ctx, const char *path, int flags, + mode_t mode) +{ + LocalData *data = fs_ctx->private; + int fd = data->mountfd; + + while (*path && fd != -1) { + const char *c; + int next_fd; + char *head; + + /* Only relative paths without consecutive slashes */ + assert(*path != '/'); + + head = g_strdup(path); + c = qemu_strchrnul(path, '/'); + if (*c) { + /* Intermediate path element */ + head[c - path] = 0; + path = c + 1; + next_fd = openat_dir(fd, head); + } else { + /* Rightmost path element */ + next_fd = openat_file(fd, head, flags, mode); + path = c; + } + g_free(head); + if (fd != data->mountfd) { + close_preserve_errno(fd); + } + fd = next_fd; + } + + assert(fd != data->mountfd); + return fd; +} + +int local_opendir_nofollow(FsContext *fs_ctx, const char *path) +{ + return local_open_nofollow(fs_ctx, path, O_DIRECTORY | O_RDONLY, 0); +} + +static void renameat_preserve_errno(int odirfd, const char *opath, int ndirfd, + const char *npath) +{ + int serrno = errno; + renameat(odirfd, opath, ndirfd, npath); + errno = serrno; +} + +static void unlinkat_preserve_errno(int dirfd, const char *path, int flags) +{ + int serrno = errno; + unlinkat(dirfd, path, flags); + errno = serrno; +} + +#define VIRTFS_META_DIR ".virtfs_metadata" +#define VIRTFS_META_ROOT_FILE VIRTFS_META_DIR "_root" + +static FILE *local_fopenat(int dirfd, const char *name, const char *mode) +{ + int fd, o_mode = 0; + FILE *fp; + int flags; + /* + * only supports two modes + */ + if (mode[0] == 'r') { + flags = O_RDONLY; + } else if (mode[0] == 'w') { + flags = O_WRONLY | O_TRUNC | O_CREAT; + o_mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; + } else { + return NULL; + } + fd = openat_file(dirfd, name, flags, o_mode); + if (fd == -1) { + return NULL; + } + fp = fdopen(fd, mode); + if (!fp) { + close(fd); + } + return fp; +} + +#define ATTR_MAX 100 +static void local_mapped_file_attr(int dirfd, const char *name, + struct stat *stbuf) +{ + FILE *fp; + char buf[ATTR_MAX]; + int map_dirfd; + + if (strcmp(name, ".")) { + map_dirfd = openat_dir(dirfd, VIRTFS_META_DIR); + if (map_dirfd == -1) { + return; + } + + fp = local_fopenat(map_dirfd, name, "r"); + close_preserve_errno(map_dirfd); + } else { + fp = local_fopenat(dirfd, VIRTFS_META_ROOT_FILE, "r"); + } + if (!fp) { + return; + } + memset(buf, 0, ATTR_MAX); + while (fgets(buf, ATTR_MAX, fp)) { + if (!strncmp(buf, "virtfs.uid", 10)) { + stbuf->st_uid = atoi(buf + 11); + } else if (!strncmp(buf, "virtfs.gid", 10)) { + stbuf->st_gid = atoi(buf + 11); + } else if (!strncmp(buf, "virtfs.mode", 11)) { + stbuf->st_mode = atoi(buf + 12); + } else if (!strncmp(buf, "virtfs.rdev", 11)) { + stbuf->st_rdev = atoi(buf + 12); + } + memset(buf, 0, ATTR_MAX); + } + fclose(fp); +} + +static int local_lstat(FsContext *fs_ctx, V9fsPath *fs_path, struct stat *stbuf) +{ + int err = -1; + char *dirpath = g_path_get_dirname(fs_path->data); + char *name = g_path_get_basename(fs_path->data); + int dirfd; + + dirfd = local_opendir_nofollow(fs_ctx, dirpath); + if (dirfd == -1) { + goto out; + } + + err = fstatat(dirfd, name, stbuf, AT_SYMLINK_NOFOLLOW); + if (err) { + goto err_out; + } + if (fs_ctx->export_flags & V9FS_SM_MAPPED) { + /* Actual credentials are part of extended attrs */ + uid_t tmp_uid; + gid_t tmp_gid; + mode_t tmp_mode; + dev_t tmp_dev; + + if (fgetxattrat_nofollow(dirfd, name, "user.virtfs.uid", &tmp_uid, + sizeof(uid_t)) > 0) { + stbuf->st_uid = le32_to_cpu(tmp_uid); + } + if (fgetxattrat_nofollow(dirfd, name, "user.virtfs.gid", &tmp_gid, + sizeof(gid_t)) > 0) { + stbuf->st_gid = le32_to_cpu(tmp_gid); + } + if (fgetxattrat_nofollow(dirfd, name, "user.virtfs.mode", &tmp_mode, + sizeof(mode_t)) > 0) { + stbuf->st_mode = le32_to_cpu(tmp_mode); + } + if (fgetxattrat_nofollow(dirfd, name, "user.virtfs.rdev", &tmp_dev, + sizeof(dev_t)) > 0) { + stbuf->st_rdev = le64_to_cpu(tmp_dev); + } + } else if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { + local_mapped_file_attr(dirfd, name, stbuf); + } + +err_out: + close_preserve_errno(dirfd); +out: + g_free(name); + g_free(dirpath); + return err; +} + +static int local_set_mapped_file_attrat(int dirfd, const char *name, + FsCred *credp) +{ + FILE *fp; + int ret; + char buf[ATTR_MAX]; + int uid = -1, gid = -1, mode = -1, rdev = -1; + int map_dirfd = -1, map_fd; + bool is_root = !strcmp(name, "."); + + if (is_root) { + fp = local_fopenat(dirfd, VIRTFS_META_ROOT_FILE, "r"); + if (!fp) { + if (errno == ENOENT) { + goto update_map_file; + } else { + return -1; + } + } + } else { + ret = mkdirat(dirfd, VIRTFS_META_DIR, 0700); + if (ret < 0 && errno != EEXIST) { + return -1; + } + + map_dirfd = openat_dir(dirfd, VIRTFS_META_DIR); + if (map_dirfd == -1) { + return -1; + } + + fp = local_fopenat(map_dirfd, name, "r"); + if (!fp) { + if (errno == ENOENT) { + goto update_map_file; + } else { + close_preserve_errno(map_dirfd); + return -1; + } + } + } + memset(buf, 0, ATTR_MAX); + while (fgets(buf, ATTR_MAX, fp)) { + if (!strncmp(buf, "virtfs.uid", 10)) { + uid = atoi(buf + 11); + } else if (!strncmp(buf, "virtfs.gid", 10)) { + gid = atoi(buf + 11); + } else if (!strncmp(buf, "virtfs.mode", 11)) { + mode = atoi(buf + 12); + } else if (!strncmp(buf, "virtfs.rdev", 11)) { + rdev = atoi(buf + 12); + } + memset(buf, 0, ATTR_MAX); + } + fclose(fp); + +update_map_file: + if (is_root) { + fp = local_fopenat(dirfd, VIRTFS_META_ROOT_FILE, "w"); + } else { + fp = local_fopenat(map_dirfd, name, "w"); + /* We can't go this far with map_dirfd not being a valid file descriptor + * but some versions of gcc aren't smart enough to see it. + */ + if (map_dirfd != -1) { + close_preserve_errno(map_dirfd); + } + } + if (!fp) { + return -1; + } + + map_fd = fileno(fp); + assert(map_fd != -1); + ret = fchmod(map_fd, 0600); + assert(ret == 0); + + if (credp->fc_uid != -1) { + uid = credp->fc_uid; + } + if (credp->fc_gid != -1) { + gid = credp->fc_gid; + } + if (credp->fc_mode != (mode_t)-1) { + mode = credp->fc_mode; + } + if (credp->fc_rdev != -1) { + rdev = credp->fc_rdev; + } + + if (uid != -1) { + fprintf(fp, "virtfs.uid=%d\n", uid); + } + if (gid != -1) { + fprintf(fp, "virtfs.gid=%d\n", gid); + } + if (mode != -1) { + fprintf(fp, "virtfs.mode=%d\n", mode); + } + if (rdev != -1) { + fprintf(fp, "virtfs.rdev=%d\n", rdev); + } + fclose(fp); + + return 0; +} + +static int fchmodat_nofollow(int dirfd, const char *name, mode_t mode) +{ + struct stat stbuf; + int fd, ret; + + /* FIXME: this should be handled with fchmodat(AT_SYMLINK_NOFOLLOW). + * Unfortunately, the linux kernel doesn't implement it yet. + */ + + /* First, we clear non-racing symlinks out of the way. */ + if (fstatat(dirfd, name, &stbuf, AT_SYMLINK_NOFOLLOW)) { + return -1; + } + if (S_ISLNK(stbuf.st_mode)) { + errno = ELOOP; + return -1; + } + + fd = openat_file(dirfd, name, O_RDONLY | O_PATH_9P_UTIL | O_NOFOLLOW, 0); +#if O_PATH_9P_UTIL == 0 + /* Fallback for systems that don't support O_PATH: we depend on the file + * being readable or writable. + */ + if (fd == -1) { + /* In case the file is writable-only and isn't a directory. */ + if (errno == EACCES) { + fd = openat_file(dirfd, name, O_WRONLY, 0); + } + if (fd == -1 && errno == EISDIR) { + errno = EACCES; + } + } + if (fd == -1) { + return -1; + } + ret = fchmod(fd, mode); +#else + /* Access modes are ignored when O_PATH is supported. If name is a symbolic + * link, O_PATH | O_NOFOLLOW causes openat(2) to return a file descriptor + * referring to the symbolic link. + */ + if (fd == -1) { + return -1; + } + + /* Now we handle racing symlinks. */ + ret = fstat(fd, &stbuf); + if (!ret) { + if (S_ISLNK(stbuf.st_mode)) { + errno = ELOOP; + ret = -1; + } else { + char *proc_path = g_strdup_printf("/proc/self/fd/%d", fd); + ret = chmod(proc_path, mode); + g_free(proc_path); + } + } +#endif + close_preserve_errno(fd); + return ret; +} + +static int local_set_xattrat(int dirfd, const char *path, FsCred *credp) +{ + int err; + + if (credp->fc_uid != -1) { + uint32_t tmp_uid = cpu_to_le32(credp->fc_uid); + err = fsetxattrat_nofollow(dirfd, path, "user.virtfs.uid", &tmp_uid, + sizeof(uid_t), 0); + if (err) { + return err; + } + } + if (credp->fc_gid != -1) { + uint32_t tmp_gid = cpu_to_le32(credp->fc_gid); + err = fsetxattrat_nofollow(dirfd, path, "user.virtfs.gid", &tmp_gid, + sizeof(gid_t), 0); + if (err) { + return err; + } + } + if (credp->fc_mode != (mode_t)-1) { + uint32_t tmp_mode = cpu_to_le32(credp->fc_mode); + err = fsetxattrat_nofollow(dirfd, path, "user.virtfs.mode", &tmp_mode, + sizeof(mode_t), 0); + if (err) { + return err; + } + } + if (credp->fc_rdev != -1) { + uint64_t tmp_rdev = cpu_to_le64(credp->fc_rdev); + err = fsetxattrat_nofollow(dirfd, path, "user.virtfs.rdev", &tmp_rdev, + sizeof(dev_t), 0); + if (err) { + return err; + } + } + return 0; +} + +static int local_set_cred_passthrough(FsContext *fs_ctx, int dirfd, + const char *name, FsCred *credp) +{ + if (fchownat(dirfd, name, credp->fc_uid, credp->fc_gid, + AT_SYMLINK_NOFOLLOW) < 0) { + /* + * If we fail to change ownership and if we are + * using security model none. Ignore the error + */ + if ((fs_ctx->export_flags & V9FS_SEC_MASK) != V9FS_SM_NONE) { + return -1; + } + } + + return fchmodat_nofollow(dirfd, name, credp->fc_mode & 07777); +} + +static ssize_t local_readlink(FsContext *fs_ctx, V9fsPath *fs_path, + char *buf, size_t bufsz) +{ + ssize_t tsize = -1; + + if ((fs_ctx->export_flags & V9FS_SM_MAPPED) || + (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE)) { + int fd; + + fd = local_open_nofollow(fs_ctx, fs_path->data, O_RDONLY, 0); + if (fd == -1) { + return -1; + } + do { + tsize = read(fd, (void *)buf, bufsz); + } while (tsize == -1 && errno == EINTR); + close_preserve_errno(fd); + } else if ((fs_ctx->export_flags & V9FS_SM_PASSTHROUGH) || + (fs_ctx->export_flags & V9FS_SM_NONE)) { + char *dirpath = g_path_get_dirname(fs_path->data); + char *name = g_path_get_basename(fs_path->data); + int dirfd; + + dirfd = local_opendir_nofollow(fs_ctx, dirpath); + if (dirfd == -1) { + goto out; + } + + tsize = readlinkat(dirfd, name, buf, bufsz); + close_preserve_errno(dirfd); + out: + g_free(name); + g_free(dirpath); + } + return tsize; +} + +static int local_close(FsContext *ctx, V9fsFidOpenState *fs) +{ + return close(fs->fd); +} + +static int local_closedir(FsContext *ctx, V9fsFidOpenState *fs) +{ + return closedir(fs->dir.stream); +} + +static int local_open(FsContext *ctx, V9fsPath *fs_path, + int flags, V9fsFidOpenState *fs) +{ + int fd; + + fd = local_open_nofollow(ctx, fs_path->data, flags, 0); + if (fd == -1) { + return -1; + } + fs->fd = fd; + return fs->fd; +} + +static int local_opendir(FsContext *ctx, + V9fsPath *fs_path, V9fsFidOpenState *fs) +{ + int dirfd; + DIR *stream; + + dirfd = local_opendir_nofollow(ctx, fs_path->data); + if (dirfd == -1) { + return -1; + } + + stream = fdopendir(dirfd); + if (!stream) { + close(dirfd); + return -1; + } + fs->dir.stream = stream; + return 0; +} + +static void local_rewinddir(FsContext *ctx, V9fsFidOpenState *fs) +{ + rewinddir(fs->dir.stream); +} + +static off_t local_telldir(FsContext *ctx, V9fsFidOpenState *fs) +{ + return telldir(fs->dir.stream); +} + +static bool local_is_mapped_file_metadata(FsContext *fs_ctx, const char *name) +{ + return + !strcmp(name, VIRTFS_META_DIR) || !strcmp(name, VIRTFS_META_ROOT_FILE); +} + +static struct dirent *local_readdir(FsContext *ctx, V9fsFidOpenState *fs) +{ + struct dirent *entry; + +again: + entry = readdir(fs->dir.stream); + if (!entry) { + return NULL; + } + + if (ctx->export_flags & V9FS_SM_MAPPED) { + entry->d_type = DT_UNKNOWN; + } else if (ctx->export_flags & V9FS_SM_MAPPED_FILE) { + if (local_is_mapped_file_metadata(ctx, entry->d_name)) { + /* skip the meta data */ + goto again; + } + entry->d_type = DT_UNKNOWN; + } + + return entry; +} + +static void local_seekdir(FsContext *ctx, V9fsFidOpenState *fs, off_t off) +{ + seekdir(fs->dir.stream, off); +} + +static ssize_t local_preadv(FsContext *ctx, V9fsFidOpenState *fs, + const struct iovec *iov, + int iovcnt, off_t offset) +{ +#ifdef CONFIG_PREADV + return preadv(fs->fd, iov, iovcnt, offset); +#else + int err = lseek(fs->fd, offset, SEEK_SET); + if (err == -1) { + return err; + } else { + return readv(fs->fd, iov, iovcnt); + } +#endif +} + +static ssize_t local_pwritev(FsContext *ctx, V9fsFidOpenState *fs, + const struct iovec *iov, + int iovcnt, off_t offset) +{ + ssize_t ret; +#ifdef CONFIG_PREADV + ret = pwritev(fs->fd, iov, iovcnt, offset); +#else + int err = lseek(fs->fd, offset, SEEK_SET); + if (err == -1) { + return err; + } else { + ret = writev(fs->fd, iov, iovcnt); + } +#endif +#ifdef CONFIG_SYNC_FILE_RANGE + if (ret > 0 && ctx->export_flags & V9FS_IMMEDIATE_WRITEOUT) { + /* + * Initiate a writeback. This is not a data integrity sync. + * We want to ensure that we don't leave dirty pages in the cache + * after write when writeout=immediate is sepcified. + */ + sync_file_range(fs->fd, offset, ret, + SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE); + } +#endif + return ret; +} + +static int local_chmod(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp) +{ + char *dirpath = g_path_get_dirname(fs_path->data); + char *name = g_path_get_basename(fs_path->data); + int ret = -1; + int dirfd; + + dirfd = local_opendir_nofollow(fs_ctx, dirpath); + if (dirfd == -1) { + goto out; + } + + if (fs_ctx->export_flags & V9FS_SM_MAPPED) { + ret = local_set_xattrat(dirfd, name, credp); + } else if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { + ret = local_set_mapped_file_attrat(dirfd, name, credp); + } else if (fs_ctx->export_flags & V9FS_SM_PASSTHROUGH || + fs_ctx->export_flags & V9FS_SM_NONE) { + ret = fchmodat_nofollow(dirfd, name, credp->fc_mode); + } + close_preserve_errno(dirfd); + +out: + g_free(dirpath); + g_free(name); + return ret; +} + +static int local_mknod(FsContext *fs_ctx, V9fsPath *dir_path, + const char *name, FsCred *credp) +{ + int err = -1; + int dirfd; + + if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE && + local_is_mapped_file_metadata(fs_ctx, name)) { + errno = EINVAL; + return -1; + } + + dirfd = local_opendir_nofollow(fs_ctx, dir_path->data); + if (dirfd == -1) { + return -1; + } + + if (fs_ctx->export_flags & V9FS_SM_MAPPED || + fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { + err = mknodat(dirfd, name, fs_ctx->fmode | S_IFREG, 0); + if (err == -1) { + goto out; + } + + if (fs_ctx->export_flags & V9FS_SM_MAPPED) { + err = local_set_xattrat(dirfd, name, credp); + } else { + err = local_set_mapped_file_attrat(dirfd, name, credp); + } + if (err == -1) { + goto err_end; + } + } else if (fs_ctx->export_flags & V9FS_SM_PASSTHROUGH || + fs_ctx->export_flags & V9FS_SM_NONE) { + err = mknodat(dirfd, name, credp->fc_mode, credp->fc_rdev); + if (err == -1) { + goto out; + } + err = local_set_cred_passthrough(fs_ctx, dirfd, name, credp); + if (err == -1) { + goto err_end; + } + } + goto out; + +err_end: + unlinkat_preserve_errno(dirfd, name, 0); +out: + close_preserve_errno(dirfd); + return err; +} + +static int local_mkdir(FsContext *fs_ctx, V9fsPath *dir_path, + const char *name, FsCred *credp) +{ + int err = -1; + int dirfd; + + if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE && + local_is_mapped_file_metadata(fs_ctx, name)) { + errno = EINVAL; + return -1; + } + + dirfd = local_opendir_nofollow(fs_ctx, dir_path->data); + if (dirfd == -1) { + return -1; + } + + if (fs_ctx->export_flags & V9FS_SM_MAPPED || + fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { + err = mkdirat(dirfd, name, fs_ctx->dmode); + if (err == -1) { + goto out; + } + credp->fc_mode = credp->fc_mode | S_IFDIR; + + if (fs_ctx->export_flags & V9FS_SM_MAPPED) { + err = local_set_xattrat(dirfd, name, credp); + } else { + err = local_set_mapped_file_attrat(dirfd, name, credp); + } + if (err == -1) { + goto err_end; + } + } else if (fs_ctx->export_flags & V9FS_SM_PASSTHROUGH || + fs_ctx->export_flags & V9FS_SM_NONE) { + err = mkdirat(dirfd, name, credp->fc_mode); + if (err == -1) { + goto out; + } + err = local_set_cred_passthrough(fs_ctx, dirfd, name, credp); + if (err == -1) { + goto err_end; + } + } + goto out; + +err_end: + unlinkat_preserve_errno(dirfd, name, AT_REMOVEDIR); +out: + close_preserve_errno(dirfd); + return err; +} + +static int local_fstat(FsContext *fs_ctx, int fid_type, + V9fsFidOpenState *fs, struct stat *stbuf) +{ + int err, fd; + + if (fid_type == P9_FID_DIR) { + fd = dirfd(fs->dir.stream); + } else { + fd = fs->fd; + } + + err = fstat(fd, stbuf); + if (err) { + return err; + } + if (fs_ctx->export_flags & V9FS_SM_MAPPED) { + /* Actual credentials are part of extended attrs */ + uid_t tmp_uid; + gid_t tmp_gid; + mode_t tmp_mode; + dev_t tmp_dev; + + if (fgetxattr(fd, "user.virtfs.uid", &tmp_uid, sizeof(uid_t)) > 0) { + stbuf->st_uid = le32_to_cpu(tmp_uid); + } + if (fgetxattr(fd, "user.virtfs.gid", &tmp_gid, sizeof(gid_t)) > 0) { + stbuf->st_gid = le32_to_cpu(tmp_gid); + } + if (fgetxattr(fd, "user.virtfs.mode", &tmp_mode, sizeof(mode_t)) > 0) { + stbuf->st_mode = le32_to_cpu(tmp_mode); + } + if (fgetxattr(fd, "user.virtfs.rdev", &tmp_dev, sizeof(dev_t)) > 0) { + stbuf->st_rdev = le64_to_cpu(tmp_dev); + } + } else if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { + errno = EOPNOTSUPP; + return -1; + } + return err; +} + +static int local_open2(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, + int flags, FsCred *credp, V9fsFidOpenState *fs) +{ + int fd = -1; + int err = -1; + int dirfd; + + if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE && + local_is_mapped_file_metadata(fs_ctx, name)) { + errno = EINVAL; + return -1; + } + + /* + * Mark all the open to not follow symlinks + */ + flags |= O_NOFOLLOW; + + dirfd = local_opendir_nofollow(fs_ctx, dir_path->data); + if (dirfd == -1) { + return -1; + } + + /* Determine the security model */ + if (fs_ctx->export_flags & V9FS_SM_MAPPED || + fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { + fd = openat_file(dirfd, name, flags, fs_ctx->fmode); + if (fd == -1) { + goto out; + } + credp->fc_mode = credp->fc_mode | S_IFREG; + if (fs_ctx->export_flags & V9FS_SM_MAPPED) { + /* Set cleint credentials in xattr */ + err = local_set_xattrat(dirfd, name, credp); + } else { + err = local_set_mapped_file_attrat(dirfd, name, credp); + } + if (err == -1) { + goto err_end; + } + } else if ((fs_ctx->export_flags & V9FS_SM_PASSTHROUGH) || + (fs_ctx->export_flags & V9FS_SM_NONE)) { + fd = openat_file(dirfd, name, flags, credp->fc_mode); + if (fd == -1) { + goto out; + } + err = local_set_cred_passthrough(fs_ctx, dirfd, name, credp); + if (err == -1) { + goto err_end; + } + } + err = fd; + fs->fd = fd; + goto out; + +err_end: + unlinkat_preserve_errno(dirfd, name, + flags & O_DIRECTORY ? AT_REMOVEDIR : 0); + close_preserve_errno(fd); +out: + close_preserve_errno(dirfd); + return err; +} + + +static int local_symlink(FsContext *fs_ctx, const char *oldpath, + V9fsPath *dir_path, const char *name, FsCred *credp) +{ + int err = -1; + int dirfd; + + if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE && + local_is_mapped_file_metadata(fs_ctx, name)) { + errno = EINVAL; + return -1; + } + + dirfd = local_opendir_nofollow(fs_ctx, dir_path->data); + if (dirfd == -1) { + return -1; + } + + /* Determine the security model */ + if (fs_ctx->export_flags & V9FS_SM_MAPPED || + fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { + int fd; + ssize_t oldpath_size, write_size; + + fd = openat_file(dirfd, name, O_CREAT | O_EXCL | O_RDWR, + fs_ctx->fmode); + if (fd == -1) { + goto out; + } + /* Write the oldpath (target) to the file. */ + oldpath_size = strlen(oldpath); + do { + write_size = write(fd, (void *)oldpath, oldpath_size); + } while (write_size == -1 && errno == EINTR); + close_preserve_errno(fd); + + if (write_size != oldpath_size) { + goto err_end; + } + /* Set cleint credentials in symlink's xattr */ + credp->fc_mode = credp->fc_mode | S_IFLNK; + + if (fs_ctx->export_flags & V9FS_SM_MAPPED) { + err = local_set_xattrat(dirfd, name, credp); + } else { + err = local_set_mapped_file_attrat(dirfd, name, credp); + } + if (err == -1) { + goto err_end; + } + } else if (fs_ctx->export_flags & V9FS_SM_PASSTHROUGH || + fs_ctx->export_flags & V9FS_SM_NONE) { + err = symlinkat(oldpath, dirfd, name); + if (err) { + goto out; + } + err = fchownat(dirfd, name, credp->fc_uid, credp->fc_gid, + AT_SYMLINK_NOFOLLOW); + if (err == -1) { + /* + * If we fail to change ownership and if we are + * using security model none. Ignore the error + */ + if ((fs_ctx->export_flags & V9FS_SEC_MASK) != V9FS_SM_NONE) { + goto err_end; + } else { + err = 0; + } + } + } + goto out; + +err_end: + unlinkat_preserve_errno(dirfd, name, 0); +out: + close_preserve_errno(dirfd); + return err; +} + +static int local_link(FsContext *ctx, V9fsPath *oldpath, + V9fsPath *dirpath, const char *name) +{ + char *odirpath = g_path_get_dirname(oldpath->data); + char *oname = g_path_get_basename(oldpath->data); + int ret = -1; + int odirfd, ndirfd; + + if (ctx->export_flags & V9FS_SM_MAPPED_FILE && + local_is_mapped_file_metadata(ctx, name)) { + errno = EINVAL; + goto out; + } + + odirfd = local_opendir_nofollow(ctx, odirpath); + if (odirfd == -1) { + goto out; + } + + ndirfd = local_opendir_nofollow(ctx, dirpath->data); + if (ndirfd == -1) { + close_preserve_errno(odirfd); + goto out; + } + + ret = linkat(odirfd, oname, ndirfd, name, 0); + if (ret < 0) { + goto out_close; + } + + /* now link the virtfs_metadata files */ + if (ctx->export_flags & V9FS_SM_MAPPED_FILE) { + int omap_dirfd, nmap_dirfd; + + ret = mkdirat(ndirfd, VIRTFS_META_DIR, 0700); + if (ret < 0 && errno != EEXIST) { + goto err_undo_link; + } + + omap_dirfd = openat_dir(odirfd, VIRTFS_META_DIR); + if (omap_dirfd == -1) { + goto err; + } + + nmap_dirfd = openat_dir(ndirfd, VIRTFS_META_DIR); + if (nmap_dirfd == -1) { + close_preserve_errno(omap_dirfd); + goto err; + } + + ret = linkat(omap_dirfd, oname, nmap_dirfd, name, 0); + close_preserve_errno(nmap_dirfd); + close_preserve_errno(omap_dirfd); + if (ret < 0 && errno != ENOENT) { + goto err_undo_link; + } + + ret = 0; + } + goto out_close; + +err: + ret = -1; +err_undo_link: + unlinkat_preserve_errno(ndirfd, name, 0); +out_close: + close_preserve_errno(ndirfd); + close_preserve_errno(odirfd); +out: + g_free(oname); + g_free(odirpath); + return ret; +} + +static int local_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size) +{ + int fd, ret; + + fd = local_open_nofollow(ctx, fs_path->data, O_WRONLY, 0); + if (fd == -1) { + return -1; + } + ret = ftruncate(fd, size); + close_preserve_errno(fd); + return ret; +} + +static int local_chown(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp) +{ + char *dirpath = g_path_get_dirname(fs_path->data); + char *name = g_path_get_basename(fs_path->data); + int ret = -1; + int dirfd; + + dirfd = local_opendir_nofollow(fs_ctx, dirpath); + if (dirfd == -1) { + goto out; + } + + if ((credp->fc_uid == -1 && credp->fc_gid == -1) || + (fs_ctx->export_flags & V9FS_SM_PASSTHROUGH) || + (fs_ctx->export_flags & V9FS_SM_NONE)) { + ret = fchownat(dirfd, name, credp->fc_uid, credp->fc_gid, + AT_SYMLINK_NOFOLLOW); + } else if (fs_ctx->export_flags & V9FS_SM_MAPPED) { + ret = local_set_xattrat(dirfd, name, credp); + } else if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { + ret = local_set_mapped_file_attrat(dirfd, name, credp); + } + + close_preserve_errno(dirfd); +out: + g_free(name); + g_free(dirpath); + return ret; +} + +static int local_utimensat(FsContext *s, V9fsPath *fs_path, + const struct timespec *buf) +{ + char *dirpath = g_path_get_dirname(fs_path->data); + char *name = g_path_get_basename(fs_path->data); + int dirfd, ret = -1; + + dirfd = local_opendir_nofollow(s, dirpath); + if (dirfd == -1) { + goto out; + } + + ret = utimensat(dirfd, name, buf, AT_SYMLINK_NOFOLLOW); + close_preserve_errno(dirfd); +out: + g_free(dirpath); + g_free(name); + return ret; +} + +static int local_unlinkat_common(FsContext *ctx, int dirfd, const char *name, + int flags) +{ + int ret; + + if (ctx->export_flags & V9FS_SM_MAPPED_FILE) { + int map_dirfd; + + /* We need to remove the metadata as well: + * - the metadata directory if we're removing a directory + * - the metadata file in the parent's metadata directory + * + * If any of these are missing (ie, ENOENT) then we're probably + * trying to remove something that wasn't created in mapped-file + * mode. We just ignore the error. + */ + if (flags == AT_REMOVEDIR) { + int fd; + + fd = openat_dir(dirfd, name); + if (fd == -1) { + return -1; + } + ret = unlinkat(fd, VIRTFS_META_DIR, AT_REMOVEDIR); + close_preserve_errno(fd); + if (ret < 0 && errno != ENOENT) { + return -1; + } + } + map_dirfd = openat_dir(dirfd, VIRTFS_META_DIR); + if (map_dirfd != -1) { + ret = unlinkat(map_dirfd, name, 0); + close_preserve_errno(map_dirfd); + if (ret < 0 && errno != ENOENT) { + return -1; + } + } else if (errno != ENOENT) { + return -1; + } + } + + return unlinkat(dirfd, name, flags); +} + +static int local_remove(FsContext *ctx, const char *path) +{ + struct stat stbuf; + char *dirpath = g_path_get_dirname(path); + char *name = g_path_get_basename(path); + int flags = 0; + int dirfd; + int err = -1; + + dirfd = local_opendir_nofollow(ctx, dirpath); + if (dirfd == -1) { + goto out; + } + + if (fstatat(dirfd, name, &stbuf, AT_SYMLINK_NOFOLLOW) < 0) { + goto err_out; + } + + if (S_ISDIR(stbuf.st_mode)) { + flags |= AT_REMOVEDIR; + } + + err = local_unlinkat_common(ctx, dirfd, name, flags); +err_out: + close_preserve_errno(dirfd); +out: + g_free(name); + g_free(dirpath); + return err; +} + +static int local_fsync(FsContext *ctx, int fid_type, + V9fsFidOpenState *fs, int datasync) +{ + int fd; + + if (fid_type == P9_FID_DIR) { + fd = dirfd(fs->dir.stream); + } else { + fd = fs->fd; + } + + if (datasync) { + return qemu_fdatasync(fd); + } else { + return fsync(fd); + } +} + +static int local_statfs(FsContext *s, V9fsPath *fs_path, struct statfs *stbuf) +{ + int fd, ret; + + fd = local_open_nofollow(s, fs_path->data, O_RDONLY, 0); + if (fd == -1) { + return -1; + } + ret = fstatfs(fd, stbuf); + close_preserve_errno(fd); + return ret; +} + +static ssize_t local_lgetxattr(FsContext *ctx, V9fsPath *fs_path, + const char *name, void *value, size_t size) +{ + char *path = fs_path->data; + + return v9fs_get_xattr(ctx, path, name, value, size); +} + +static ssize_t local_llistxattr(FsContext *ctx, V9fsPath *fs_path, + void *value, size_t size) +{ + char *path = fs_path->data; + + return v9fs_list_xattr(ctx, path, value, size); +} + +static int local_lsetxattr(FsContext *ctx, V9fsPath *fs_path, const char *name, + void *value, size_t size, int flags) +{ + char *path = fs_path->data; + + return v9fs_set_xattr(ctx, path, name, value, size, flags); +} + +static int local_lremovexattr(FsContext *ctx, V9fsPath *fs_path, + const char *name) +{ + char *path = fs_path->data; + + return v9fs_remove_xattr(ctx, path, name); +} + +static int local_name_to_path(FsContext *ctx, V9fsPath *dir_path, + const char *name, V9fsPath *target) +{ + if (ctx->export_flags & V9FS_SM_MAPPED_FILE && + local_is_mapped_file_metadata(ctx, name)) { + errno = EINVAL; + return -1; + } + + if (dir_path) { + if (!strcmp(name, ".")) { + /* "." relative to "foo/bar" is "foo/bar" */ + v9fs_path_copy(target, dir_path); + } else if (!strcmp(name, "..")) { + if (!strcmp(dir_path->data, ".")) { + /* ".." relative to the root is "." */ + v9fs_path_sprintf(target, "."); + } else { + char *tmp = g_path_get_dirname(dir_path->data); + /* Symbolic links are resolved by the client. We can assume + * that ".." relative to "foo/bar" is equivalent to "foo" + */ + v9fs_path_sprintf(target, "%s", tmp); + g_free(tmp); + } + } else { + assert(!strchr(name, '/')); + v9fs_path_sprintf(target, "%s/%s", dir_path->data, name); + } + } else if (!strcmp(name, "/") || !strcmp(name, ".") || + !strcmp(name, "..")) { + /* This is the root fid */ + v9fs_path_sprintf(target, "."); + } else { + assert(!strchr(name, '/')); + v9fs_path_sprintf(target, "./%s", name); + } + return 0; +} + +static int local_renameat(FsContext *ctx, V9fsPath *olddir, + const char *old_name, V9fsPath *newdir, + const char *new_name) +{ + int ret; + int odirfd, ndirfd; + + if (ctx->export_flags & V9FS_SM_MAPPED_FILE && + (local_is_mapped_file_metadata(ctx, old_name) || + local_is_mapped_file_metadata(ctx, new_name))) { + errno = EINVAL; + return -1; + } + + odirfd = local_opendir_nofollow(ctx, olddir->data); + if (odirfd == -1) { + return -1; + } + + ndirfd = local_opendir_nofollow(ctx, newdir->data); + if (ndirfd == -1) { + close_preserve_errno(odirfd); + return -1; + } + + ret = renameat(odirfd, old_name, ndirfd, new_name); + if (ret < 0) { + goto out; + } + + if (ctx->export_flags & V9FS_SM_MAPPED_FILE) { + int omap_dirfd, nmap_dirfd; + + ret = mkdirat(ndirfd, VIRTFS_META_DIR, 0700); + if (ret < 0 && errno != EEXIST) { + goto err_undo_rename; + } + + omap_dirfd = openat_dir(odirfd, VIRTFS_META_DIR); + if (omap_dirfd == -1) { + goto err; + } + + nmap_dirfd = openat_dir(ndirfd, VIRTFS_META_DIR); + if (nmap_dirfd == -1) { + close_preserve_errno(omap_dirfd); + goto err; + } + + /* rename the .virtfs_metadata files */ + ret = renameat(omap_dirfd, old_name, nmap_dirfd, new_name); + close_preserve_errno(nmap_dirfd); + close_preserve_errno(omap_dirfd); + if (ret < 0 && errno != ENOENT) { + goto err_undo_rename; + } + + ret = 0; + } + goto out; + +err: + ret = -1; +err_undo_rename: + renameat_preserve_errno(ndirfd, new_name, odirfd, old_name); +out: + close_preserve_errno(ndirfd); + close_preserve_errno(odirfd); + return ret; +} + +static void v9fs_path_init_dirname(V9fsPath *path, const char *str) +{ + path->data = g_path_get_dirname(str); + path->size = strlen(path->data) + 1; +} + +static int local_rename(FsContext *ctx, const char *oldpath, + const char *newpath) +{ + int err; + char *oname = g_path_get_basename(oldpath); + char *nname = g_path_get_basename(newpath); + V9fsPath olddir, newdir; + + v9fs_path_init_dirname(&olddir, oldpath); + v9fs_path_init_dirname(&newdir, newpath); + + err = local_renameat(ctx, &olddir, oname, &newdir, nname); + + v9fs_path_free(&newdir); + v9fs_path_free(&olddir); + g_free(nname); + g_free(oname); + + return err; +} + +static int local_unlinkat(FsContext *ctx, V9fsPath *dir, + const char *name, int flags) +{ + int ret; + int dirfd; + + if (ctx->export_flags & V9FS_SM_MAPPED_FILE && + local_is_mapped_file_metadata(ctx, name)) { + errno = EINVAL; + return -1; + } + + dirfd = local_opendir_nofollow(ctx, dir->data); + if (dirfd == -1) { + return -1; + } + + ret = local_unlinkat_common(ctx, dirfd, name, flags); + close_preserve_errno(dirfd); + return ret; +} + +#ifdef FS_IOC_GETVERSION +static int local_ioc_getversion(FsContext *ctx, V9fsPath *path, + mode_t st_mode, uint64_t *st_gen) +{ + int err; + V9fsFidOpenState fid_open; + + /* + * Do not try to open special files like device nodes, fifos etc + * We can get fd for regular files and directories only + */ + if (!S_ISREG(st_mode) && !S_ISDIR(st_mode)) { + errno = ENOTTY; + return -1; + } + err = local_open(ctx, path, O_RDONLY, &fid_open); + if (err < 0) { + return err; + } + err = ioctl(fid_open.fd, FS_IOC_GETVERSION, st_gen); + local_close(ctx, &fid_open); + return err; +} +#endif + +static int local_ioc_getversion_init(FsContext *ctx, LocalData *data, Error **errp) +{ +#ifdef FS_IOC_GETVERSION + struct statfs stbuf; + + /* + * use ioc_getversion only if the ioctl is definied + */ + if (fstatfs(data->mountfd, &stbuf) < 0) { + error_setg_errno(errp, errno, + "failed to stat file system at '%s'", ctx->fs_root); + return -1; + } + switch (stbuf.f_type) { + case EXT2_SUPER_MAGIC: + case BTRFS_SUPER_MAGIC: + case REISERFS_SUPER_MAGIC: + case XFS_SUPER_MAGIC: + ctx->exops.get_st_gen = local_ioc_getversion; + break; + } +#endif + return 0; +} + +static int local_init(FsContext *ctx, Error **errp) +{ + LocalData *data = g_malloc(sizeof(*data)); + + data->mountfd = open(ctx->fs_root, O_DIRECTORY | O_RDONLY); + if (data->mountfd == -1) { + error_setg_errno(errp, errno, "failed to open '%s'", ctx->fs_root); + goto err; + } + + if (local_ioc_getversion_init(ctx, data, errp) < 0) { + close(data->mountfd); + goto err; + } + + if (ctx->export_flags & V9FS_SM_PASSTHROUGH) { + ctx->xops = passthrough_xattr_ops; + } else if (ctx->export_flags & V9FS_SM_MAPPED) { + ctx->xops = mapped_xattr_ops; + } else if (ctx->export_flags & V9FS_SM_NONE) { + ctx->xops = none_xattr_ops; + } else if (ctx->export_flags & V9FS_SM_MAPPED_FILE) { + /* + * xattr operation for mapped-file and passthrough + * remain same. + */ + ctx->xops = passthrough_xattr_ops; + } + ctx->export_flags |= V9FS_PATHNAME_FSCONTEXT; + + ctx->private = data; + return 0; + +err: + g_free(data); + return -1; +} + +static void local_cleanup(FsContext *ctx) +{ + LocalData *data = ctx->private; + + if (!data) { + return; + } + + close(data->mountfd); + g_free(data); +} + +static void error_append_security_model_hint(Error *const *errp) +{ + error_append_hint(errp, "Valid options are: security_model=" + "[passthrough|mapped-xattr|mapped-file|none]\n"); +} + +static int local_parse_opts(QemuOpts *opts, FsDriverEntry *fse, Error **errp) +{ + ERRP_GUARD(); + const char *sec_model = qemu_opt_get(opts, "security_model"); + const char *path = qemu_opt_get(opts, "path"); + const char *multidevs = qemu_opt_get(opts, "multidevs"); + + if (!sec_model) { + error_setg(errp, "security_model property not set"); + error_append_security_model_hint(errp); + return -1; + } + + if (!strcmp(sec_model, "passthrough")) { + fse->export_flags |= V9FS_SM_PASSTHROUGH; + } else if (!strcmp(sec_model, "mapped") || + !strcmp(sec_model, "mapped-xattr")) { + fse->export_flags |= V9FS_SM_MAPPED; + } else if (!strcmp(sec_model, "none")) { + fse->export_flags |= V9FS_SM_NONE; + } else if (!strcmp(sec_model, "mapped-file")) { + fse->export_flags |= V9FS_SM_MAPPED_FILE; + } else { + error_setg(errp, "invalid security_model property '%s'", sec_model); + error_append_security_model_hint(errp); + return -1; + } + + if (multidevs) { + if (!strcmp(multidevs, "remap")) { + fse->export_flags &= ~V9FS_FORBID_MULTIDEVS; + fse->export_flags |= V9FS_REMAP_INODES; + } else if (!strcmp(multidevs, "forbid")) { + fse->export_flags &= ~V9FS_REMAP_INODES; + fse->export_flags |= V9FS_FORBID_MULTIDEVS; + } else if (!strcmp(multidevs, "warn")) { + fse->export_flags &= ~V9FS_FORBID_MULTIDEVS; + fse->export_flags &= ~V9FS_REMAP_INODES; + } else { + error_setg(errp, "invalid multidevs property '%s'", + multidevs); + error_append_hint(errp, "Valid options are: multidevs=" + "[remap|forbid|warn]\n"); + return -1; + } + } + + if (!path) { + error_setg(errp, "path property not set"); + return -1; + } + + if (fsdev_throttle_parse_opts(opts, &fse->fst, errp)) { + error_prepend(errp, "invalid throttle configuration: "); + return -1; + } + + if (fse->export_flags & V9FS_SM_MAPPED || + fse->export_flags & V9FS_SM_MAPPED_FILE) { + fse->fmode = + qemu_opt_get_number(opts, "fmode", SM_LOCAL_MODE_BITS) & 0777; + fse->dmode = + qemu_opt_get_number(opts, "dmode", SM_LOCAL_DIR_MODE_BITS) & 0777; + } else { + if (qemu_opt_find(opts, "fmode")) { + error_setg(errp, "fmode is only valid for mapped security modes"); + return -1; + } + if (qemu_opt_find(opts, "dmode")) { + error_setg(errp, "dmode is only valid for mapped security modes"); + return -1; + } + } + + fse->path = g_strdup(path); + + return 0; +} + +FileOperations local_ops = { + .parse_opts = local_parse_opts, + .init = local_init, + .cleanup = local_cleanup, + .lstat = local_lstat, + .readlink = local_readlink, + .close = local_close, + .closedir = local_closedir, + .open = local_open, + .opendir = local_opendir, + .rewinddir = local_rewinddir, + .telldir = local_telldir, + .readdir = local_readdir, + .seekdir = local_seekdir, + .preadv = local_preadv, + .pwritev = local_pwritev, + .chmod = local_chmod, + .mknod = local_mknod, + .mkdir = local_mkdir, + .fstat = local_fstat, + .open2 = local_open2, + .symlink = local_symlink, + .link = local_link, + .truncate = local_truncate, + .rename = local_rename, + .chown = local_chown, + .utimensat = local_utimensat, + .remove = local_remove, + .fsync = local_fsync, + .statfs = local_statfs, + .lgetxattr = local_lgetxattr, + .llistxattr = local_llistxattr, + .lsetxattr = local_lsetxattr, + .lremovexattr = local_lremovexattr, + .name_to_path = local_name_to_path, + .renameat = local_renameat, + .unlinkat = local_unlinkat, +}; diff --git a/hw/9pfs/9p-local.h b/hw/9pfs/9p-local.h new file mode 100644 index 000000000..32c72749d --- /dev/null +++ b/hw/9pfs/9p-local.h @@ -0,0 +1,20 @@ +/* + * 9p local backend utilities + * + * Copyright IBM, Corp. 2017 + * + * Authors: + * Greg Kurz + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_9P_LOCAL_H +#define QEMU_9P_LOCAL_H + +int local_open_nofollow(FsContext *fs_ctx, const char *path, int flags, + mode_t mode); +int local_opendir_nofollow(FsContext *fs_ctx, const char *path); + +#endif diff --git a/hw/9pfs/9p-posix-acl.c b/hw/9pfs/9p-posix-acl.c new file mode 100644 index 000000000..eadae270d --- /dev/null +++ b/hw/9pfs/9p-posix-acl.c @@ -0,0 +1,161 @@ +/* + * 9p system.posix* xattr callback + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Aneesh Kumar K.V + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "qemu/xattr.h" +#include "9p.h" +#include "fsdev/file-op-9p.h" +#include "9p-xattr.h" + +#define MAP_ACL_ACCESS "user.virtfs.system.posix_acl_access" +#define MAP_ACL_DEFAULT "user.virtfs.system.posix_acl_default" +#define ACL_ACCESS "system.posix_acl_access" +#define ACL_DEFAULT "system.posix_acl_default" + +static ssize_t mp_pacl_getxattr(FsContext *ctx, const char *path, + const char *name, void *value, size_t size) +{ + return local_getxattr_nofollow(ctx, path, MAP_ACL_ACCESS, value, size); +} + +static ssize_t mp_pacl_listxattr(FsContext *ctx, const char *path, + char *name, void *value, size_t osize) +{ + ssize_t len = sizeof(ACL_ACCESS); + + if (!value) { + return len; + } + + if (osize < len) { + errno = ERANGE; + return -1; + } + + /* len includes the trailing NUL */ + memcpy(value, ACL_ACCESS, len); + return 0; +} + +static int mp_pacl_setxattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size, int flags) +{ + return local_setxattr_nofollow(ctx, path, MAP_ACL_ACCESS, value, size, + flags); +} + +static int mp_pacl_removexattr(FsContext *ctx, + const char *path, const char *name) +{ + int ret; + + ret = local_removexattr_nofollow(ctx, path, MAP_ACL_ACCESS); + if (ret == -1 && errno == ENODATA) { + /* + * We don't get ENODATA error when trying to remove a + * posix acl that is not present. So don't throw the error + * even in case of mapped security model + */ + errno = 0; + ret = 0; + } + return ret; +} + +static ssize_t mp_dacl_getxattr(FsContext *ctx, const char *path, + const char *name, void *value, size_t size) +{ + return local_getxattr_nofollow(ctx, path, MAP_ACL_DEFAULT, value, size); +} + +static ssize_t mp_dacl_listxattr(FsContext *ctx, const char *path, + char *name, void *value, size_t osize) +{ + ssize_t len = sizeof(ACL_DEFAULT); + + if (!value) { + return len; + } + + if (osize < len) { + errno = ERANGE; + return -1; + } + + /* len includes the trailing NUL */ + memcpy(value, ACL_DEFAULT, len); + return 0; +} + +static int mp_dacl_setxattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size, int flags) +{ + return local_setxattr_nofollow(ctx, path, MAP_ACL_DEFAULT, value, size, + flags); +} + +static int mp_dacl_removexattr(FsContext *ctx, + const char *path, const char *name) +{ + int ret; + + ret = local_removexattr_nofollow(ctx, path, MAP_ACL_DEFAULT); + if (ret == -1 && errno == ENODATA) { + /* + * We don't get ENODATA error when trying to remove a + * posix acl that is not present. So don't throw the error + * even in case of mapped security model + */ + errno = 0; + ret = 0; + } + return ret; +} + + +XattrOperations mapped_pacl_xattr = { + .name = "system.posix_acl_access", + .getxattr = mp_pacl_getxattr, + .setxattr = mp_pacl_setxattr, + .listxattr = mp_pacl_listxattr, + .removexattr = mp_pacl_removexattr, +}; + +XattrOperations mapped_dacl_xattr = { + .name = "system.posix_acl_default", + .getxattr = mp_dacl_getxattr, + .setxattr = mp_dacl_setxattr, + .listxattr = mp_dacl_listxattr, + .removexattr = mp_dacl_removexattr, +}; + +XattrOperations passthrough_acl_xattr = { + .name = "system.posix_acl_", + .getxattr = pt_getxattr, + .setxattr = pt_setxattr, + .listxattr = pt_listxattr, + .removexattr = pt_removexattr, +}; + +XattrOperations none_acl_xattr = { + .name = "system.posix_acl_", + .getxattr = notsup_getxattr, + .setxattr = notsup_setxattr, + .listxattr = notsup_listxattr, + .removexattr = notsup_removexattr, +}; diff --git a/hw/9pfs/9p-proxy.c b/hw/9pfs/9p-proxy.c new file mode 100644 index 000000000..09bd9f146 --- /dev/null +++ b/hw/9pfs/9p-proxy.c @@ -0,0 +1,1243 @@ +/* + * 9p Proxy callback + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * M. Mohan Kumar + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include +#include +#include "qemu-common.h" +#include "9p.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "fsdev/qemu-fsdev.h" +#include "9p-proxy.h" + +typedef struct V9fsProxy { + int sockfd; + QemuMutex mutex; + struct iovec in_iovec; + struct iovec out_iovec; +} V9fsProxy; + +/* + * Return received file descriptor on success in *status. + * errno is also returned on *status (which will be < 0) + * return < 0 on transport error. + */ +static int v9fs_receivefd(int sockfd, int *status) +{ + struct iovec iov; + struct msghdr msg; + struct cmsghdr *cmsg; + int retval, data, fd; + union MsgControl msg_control; + + iov.iov_base = &data; + iov.iov_len = sizeof(data); + + memset(&msg, 0, sizeof(msg)); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = &msg_control; + msg.msg_controllen = sizeof(msg_control); + + do { + retval = recvmsg(sockfd, &msg, 0); + } while (retval < 0 && errno == EINTR); + if (retval <= 0) { + return retval; + } + /* + * data is set to V9FS_FD_VALID, if ancillary data is sent. If this + * request doesn't need ancillary data (fd) or an error occurred, + * data is set to negative errno value. + */ + if (data != V9FS_FD_VALID) { + *status = data; + return 0; + } + /* + * File descriptor (fd) is sent in the ancillary data. Check if we + * indeed received it. One of the reasons to fail to receive it is if + * we exceeded the maximum number of file descriptors! + */ + for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) { + if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)) || + cmsg->cmsg_level != SOL_SOCKET || + cmsg->cmsg_type != SCM_RIGHTS) { + continue; + } + fd = *((int *)CMSG_DATA(cmsg)); + *status = fd; + return 0; + } + *status = -ENFILE; /* Ancillary data sent but not received */ + return 0; +} + +static ssize_t socket_read(int sockfd, void *buff, size_t size) +{ + ssize_t retval, total = 0; + + while (size) { + retval = read(sockfd, buff, size); + if (retval == 0) { + return -EIO; + } + if (retval < 0) { + if (errno == EINTR) { + continue; + } + return -errno; + } + size -= retval; + buff += retval; + total += retval; + } + return total; +} + +/* Converts proxy_statfs to VFS statfs structure */ +static void prstatfs_to_statfs(struct statfs *stfs, ProxyStatFS *prstfs) +{ + memset(stfs, 0, sizeof(*stfs)); + stfs->f_type = prstfs->f_type; + stfs->f_bsize = prstfs->f_bsize; + stfs->f_blocks = prstfs->f_blocks; + stfs->f_bfree = prstfs->f_bfree; + stfs->f_bavail = prstfs->f_bavail; + stfs->f_files = prstfs->f_files; + stfs->f_ffree = prstfs->f_ffree; + stfs->f_fsid.__val[0] = prstfs->f_fsid[0] & 0xFFFFFFFFU; + stfs->f_fsid.__val[1] = prstfs->f_fsid[1] >> 32 & 0xFFFFFFFFU; + stfs->f_namelen = prstfs->f_namelen; + stfs->f_frsize = prstfs->f_frsize; +} + +/* Converts proxy_stat structure to VFS stat structure */ +static void prstat_to_stat(struct stat *stbuf, ProxyStat *prstat) +{ + memset(stbuf, 0, sizeof(*stbuf)); + stbuf->st_dev = prstat->st_dev; + stbuf->st_ino = prstat->st_ino; + stbuf->st_nlink = prstat->st_nlink; + stbuf->st_mode = prstat->st_mode; + stbuf->st_uid = prstat->st_uid; + stbuf->st_gid = prstat->st_gid; + stbuf->st_rdev = prstat->st_rdev; + stbuf->st_size = prstat->st_size; + stbuf->st_blksize = prstat->st_blksize; + stbuf->st_blocks = prstat->st_blocks; + stbuf->st_atim.tv_sec = prstat->st_atim_sec; + stbuf->st_atim.tv_nsec = prstat->st_atim_nsec; + stbuf->st_mtime = prstat->st_mtim_sec; + stbuf->st_mtim.tv_nsec = prstat->st_mtim_nsec; + stbuf->st_ctime = prstat->st_ctim_sec; + stbuf->st_ctim.tv_nsec = prstat->st_ctim_nsec; +} + +/* + * Response contains two parts + * {header, data} + * header.type == T_ERROR, data -> -errno + * header.type == T_SUCCESS, data -> response + * size of errno/response is given by header.size + * returns < 0, on transport error. response is + * valid only if status >= 0. + */ +static int v9fs_receive_response(V9fsProxy *proxy, int type, + int *status, void *response) +{ + int retval; + ProxyHeader header; + struct iovec *reply = &proxy->in_iovec; + + *status = 0; + reply->iov_len = 0; + retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ); + if (retval < 0) { + return retval; + } + reply->iov_len = PROXY_HDR_SZ; + retval = proxy_unmarshal(reply, 0, "dd", &header.type, &header.size); + assert(retval == 4 * 2); + /* + * if response size > PROXY_MAX_IO_SZ, read the response but ignore it and + * return -ENOBUFS + */ + if (header.size > PROXY_MAX_IO_SZ) { + int count; + while (header.size > 0) { + count = MIN(PROXY_MAX_IO_SZ, header.size); + count = socket_read(proxy->sockfd, reply->iov_base, count); + if (count < 0) { + return count; + } + header.size -= count; + } + *status = -ENOBUFS; + return 0; + } + + retval = socket_read(proxy->sockfd, + reply->iov_base + PROXY_HDR_SZ, header.size); + if (retval < 0) { + return retval; + } + reply->iov_len += header.size; + /* there was an error during processing request */ + if (header.type == T_ERROR) { + int ret; + ret = proxy_unmarshal(reply, PROXY_HDR_SZ, "d", status); + assert(ret == 4); + return 0; + } + + switch (type) { + case T_LSTAT: { + ProxyStat prstat; + retval = proxy_unmarshal(reply, PROXY_HDR_SZ, + "qqqdddqqqqqqqqqq", &prstat.st_dev, + &prstat.st_ino, &prstat.st_nlink, + &prstat.st_mode, &prstat.st_uid, + &prstat.st_gid, &prstat.st_rdev, + &prstat.st_size, &prstat.st_blksize, + &prstat.st_blocks, + &prstat.st_atim_sec, &prstat.st_atim_nsec, + &prstat.st_mtim_sec, &prstat.st_mtim_nsec, + &prstat.st_ctim_sec, &prstat.st_ctim_nsec); + assert(retval == 8 * 3 + 4 * 3 + 8 * 10); + prstat_to_stat(response, &prstat); + break; + } + case T_STATFS: { + ProxyStatFS prstfs; + retval = proxy_unmarshal(reply, PROXY_HDR_SZ, + "qqqqqqqqqqq", &prstfs.f_type, + &prstfs.f_bsize, &prstfs.f_blocks, + &prstfs.f_bfree, &prstfs.f_bavail, + &prstfs.f_files, &prstfs.f_ffree, + &prstfs.f_fsid[0], &prstfs.f_fsid[1], + &prstfs.f_namelen, &prstfs.f_frsize); + assert(retval == 8 * 11); + prstatfs_to_statfs(response, &prstfs); + break; + } + case T_READLINK: { + V9fsString target; + v9fs_string_init(&target); + retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "s", &target); + strcpy(response, target.data); + v9fs_string_free(&target); + break; + } + case T_LGETXATTR: + case T_LLISTXATTR: { + V9fsString xattr; + v9fs_string_init(&xattr); + retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "s", &xattr); + memcpy(response, xattr.data, xattr.size); + v9fs_string_free(&xattr); + break; + } + case T_GETVERSION: + retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "q", response); + assert(retval == 8); + break; + default: + return -1; + } + if (retval < 0) { + *status = retval; + } + return 0; +} + +/* + * return < 0 on transport error. + * *status is valid only if return >= 0 + */ +static int v9fs_receive_status(V9fsProxy *proxy, + struct iovec *reply, int *status) +{ + int retval; + ProxyHeader header; + + *status = 0; + reply->iov_len = 0; + retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ); + if (retval < 0) { + return retval; + } + reply->iov_len = PROXY_HDR_SZ; + retval = proxy_unmarshal(reply, 0, "dd", &header.type, &header.size); + assert(retval == 4 * 2); + retval = socket_read(proxy->sockfd, + reply->iov_base + PROXY_HDR_SZ, header.size); + if (retval < 0) { + return retval; + } + reply->iov_len += header.size; + retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "d", status); + assert(retval == 4); + return 0; +} + +/* + * Proxy->header and proxy->request written to socket by QEMU process. + * This request read by proxy helper process + * returns 0 on success and -errno on error + */ +static int v9fs_request(V9fsProxy *proxy, int type, void *response, ...) +{ + dev_t rdev; + va_list ap; + int size = 0; + int retval = 0; + uint64_t offset; + ProxyHeader header = { 0, 0}; + struct timespec spec[2]; + int flags, mode, uid, gid; + V9fsString *name, *value; + V9fsString *path, *oldpath; + struct iovec *iovec = NULL, *reply = NULL; + + qemu_mutex_lock(&proxy->mutex); + + if (proxy->sockfd == -1) { + retval = -EIO; + goto err_out; + } + iovec = &proxy->out_iovec; + reply = &proxy->in_iovec; + va_start(ap, response); + switch (type) { + case T_OPEN: + path = va_arg(ap, V9fsString *); + flags = va_arg(ap, int); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sd", path, flags); + if (retval > 0) { + header.size = retval; + header.type = T_OPEN; + } + break; + case T_CREATE: + path = va_arg(ap, V9fsString *); + flags = va_arg(ap, int); + mode = va_arg(ap, int); + uid = va_arg(ap, int); + gid = va_arg(ap, int); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sdddd", path, + flags, mode, uid, gid); + if (retval > 0) { + header.size = retval; + header.type = T_CREATE; + } + break; + case T_MKNOD: + path = va_arg(ap, V9fsString *); + mode = va_arg(ap, int); + rdev = va_arg(ap, long int); + uid = va_arg(ap, int); + gid = va_arg(ap, int); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ddsdq", + uid, gid, path, mode, rdev); + if (retval > 0) { + header.size = retval; + header.type = T_MKNOD; + } + break; + case T_MKDIR: + path = va_arg(ap, V9fsString *); + mode = va_arg(ap, int); + uid = va_arg(ap, int); + gid = va_arg(ap, int); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ddsd", + uid, gid, path, mode); + if (retval > 0) { + header.size = retval; + header.type = T_MKDIR; + } + break; + case T_SYMLINK: + oldpath = va_arg(ap, V9fsString *); + path = va_arg(ap, V9fsString *); + uid = va_arg(ap, int); + gid = va_arg(ap, int); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ddss", + uid, gid, oldpath, path); + if (retval > 0) { + header.size = retval; + header.type = T_SYMLINK; + } + break; + case T_LINK: + oldpath = va_arg(ap, V9fsString *); + path = va_arg(ap, V9fsString *); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ss", + oldpath, path); + if (retval > 0) { + header.size = retval; + header.type = T_LINK; + } + break; + case T_LSTAT: + path = va_arg(ap, V9fsString *); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path); + if (retval > 0) { + header.size = retval; + header.type = T_LSTAT; + } + break; + case T_READLINK: + path = va_arg(ap, V9fsString *); + size = va_arg(ap, int); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sd", path, size); + if (retval > 0) { + header.size = retval; + header.type = T_READLINK; + } + break; + case T_STATFS: + path = va_arg(ap, V9fsString *); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path); + if (retval > 0) { + header.size = retval; + header.type = T_STATFS; + } + break; + case T_CHMOD: + path = va_arg(ap, V9fsString *); + mode = va_arg(ap, int); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sd", path, mode); + if (retval > 0) { + header.size = retval; + header.type = T_CHMOD; + } + break; + case T_CHOWN: + path = va_arg(ap, V9fsString *); + uid = va_arg(ap, int); + gid = va_arg(ap, int); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sdd", path, uid, gid); + if (retval > 0) { + header.size = retval; + header.type = T_CHOWN; + } + break; + case T_TRUNCATE: + path = va_arg(ap, V9fsString *); + offset = va_arg(ap, uint64_t); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sq", path, offset); + if (retval > 0) { + header.size = retval; + header.type = T_TRUNCATE; + } + break; + case T_UTIME: + path = va_arg(ap, V9fsString *); + spec[0].tv_sec = va_arg(ap, long); + spec[0].tv_nsec = va_arg(ap, long); + spec[1].tv_sec = va_arg(ap, long); + spec[1].tv_nsec = va_arg(ap, long); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sqqqq", path, + spec[0].tv_sec, spec[1].tv_nsec, + spec[1].tv_sec, spec[1].tv_nsec); + if (retval > 0) { + header.size = retval; + header.type = T_UTIME; + } + break; + case T_RENAME: + oldpath = va_arg(ap, V9fsString *); + path = va_arg(ap, V9fsString *); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ss", oldpath, path); + if (retval > 0) { + header.size = retval; + header.type = T_RENAME; + } + break; + case T_REMOVE: + path = va_arg(ap, V9fsString *); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path); + if (retval > 0) { + header.size = retval; + header.type = T_REMOVE; + } + break; + case T_LGETXATTR: + size = va_arg(ap, int); + path = va_arg(ap, V9fsString *); + name = va_arg(ap, V9fsString *); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, + "dss", size, path, name); + if (retval > 0) { + header.size = retval; + header.type = T_LGETXATTR; + } + break; + case T_LLISTXATTR: + size = va_arg(ap, int); + path = va_arg(ap, V9fsString *); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ds", size, path); + if (retval > 0) { + header.size = retval; + header.type = T_LLISTXATTR; + } + break; + case T_LSETXATTR: + path = va_arg(ap, V9fsString *); + name = va_arg(ap, V9fsString *); + value = va_arg(ap, V9fsString *); + size = va_arg(ap, int); + flags = va_arg(ap, int); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "sssdd", + path, name, value, size, flags); + if (retval > 0) { + header.size = retval; + header.type = T_LSETXATTR; + } + break; + case T_LREMOVEXATTR: + path = va_arg(ap, V9fsString *); + name = va_arg(ap, V9fsString *); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "ss", path, name); + if (retval > 0) { + header.size = retval; + header.type = T_LREMOVEXATTR; + } + break; + case T_GETVERSION: + path = va_arg(ap, V9fsString *); + retval = proxy_marshal(iovec, PROXY_HDR_SZ, "s", path); + if (retval > 0) { + header.size = retval; + header.type = T_GETVERSION; + } + break; + default: + error_report("Invalid type %d", type); + retval = -EINVAL; + break; + } + va_end(ap); + + if (retval < 0) { + goto err_out; + } + + /* marshal the header details */ + retval = proxy_marshal(iovec, 0, "dd", header.type, header.size); + assert(retval == 4 * 2); + header.size += PROXY_HDR_SZ; + + retval = qemu_write_full(proxy->sockfd, iovec->iov_base, header.size); + if (retval != header.size) { + goto close_error; + } + + switch (type) { + case T_OPEN: + case T_CREATE: + /* + * A file descriptor is returned as response for + * T_OPEN,T_CREATE on success + */ + if (v9fs_receivefd(proxy->sockfd, &retval) < 0) { + goto close_error; + } + break; + case T_MKNOD: + case T_MKDIR: + case T_SYMLINK: + case T_LINK: + case T_CHMOD: + case T_CHOWN: + case T_RENAME: + case T_TRUNCATE: + case T_UTIME: + case T_REMOVE: + case T_LSETXATTR: + case T_LREMOVEXATTR: + if (v9fs_receive_status(proxy, reply, &retval) < 0) { + goto close_error; + } + break; + case T_LSTAT: + case T_READLINK: + case T_STATFS: + case T_GETVERSION: + if (v9fs_receive_response(proxy, type, &retval, response) < 0) { + goto close_error; + } + break; + case T_LGETXATTR: + case T_LLISTXATTR: + if (!size) { + if (v9fs_receive_status(proxy, reply, &retval) < 0) { + goto close_error; + } + } else { + if (v9fs_receive_response(proxy, type, &retval, response) < 0) { + goto close_error; + } + } + break; + } + +err_out: + qemu_mutex_unlock(&proxy->mutex); + return retval; + +close_error: + close(proxy->sockfd); + proxy->sockfd = -1; + qemu_mutex_unlock(&proxy->mutex); + return -EIO; +} + +static int proxy_lstat(FsContext *fs_ctx, V9fsPath *fs_path, struct stat *stbuf) +{ + int retval; + retval = v9fs_request(fs_ctx->private, T_LSTAT, stbuf, fs_path); + if (retval < 0) { + errno = -retval; + return -1; + } + return retval; +} + +static ssize_t proxy_readlink(FsContext *fs_ctx, V9fsPath *fs_path, + char *buf, size_t bufsz) +{ + int retval; + retval = v9fs_request(fs_ctx->private, T_READLINK, buf, fs_path, bufsz); + if (retval < 0) { + errno = -retval; + return -1; + } + return strlen(buf); +} + +static int proxy_close(FsContext *ctx, V9fsFidOpenState *fs) +{ + return close(fs->fd); +} + +static int proxy_closedir(FsContext *ctx, V9fsFidOpenState *fs) +{ + return closedir(fs->dir.stream); +} + +static int proxy_open(FsContext *ctx, V9fsPath *fs_path, + int flags, V9fsFidOpenState *fs) +{ + fs->fd = v9fs_request(ctx->private, T_OPEN, NULL, fs_path, flags); + if (fs->fd < 0) { + errno = -fs->fd; + fs->fd = -1; + } + return fs->fd; +} + +static int proxy_opendir(FsContext *ctx, + V9fsPath *fs_path, V9fsFidOpenState *fs) +{ + int serrno, fd; + + fs->dir.stream = NULL; + fd = v9fs_request(ctx->private, T_OPEN, NULL, fs_path, O_DIRECTORY); + if (fd < 0) { + errno = -fd; + return -1; + } + fs->dir.stream = fdopendir(fd); + if (!fs->dir.stream) { + serrno = errno; + close(fd); + errno = serrno; + return -1; + } + return 0; +} + +static void proxy_rewinddir(FsContext *ctx, V9fsFidOpenState *fs) +{ + rewinddir(fs->dir.stream); +} + +static off_t proxy_telldir(FsContext *ctx, V9fsFidOpenState *fs) +{ + return telldir(fs->dir.stream); +} + +static struct dirent *proxy_readdir(FsContext *ctx, V9fsFidOpenState *fs) +{ + return readdir(fs->dir.stream); +} + +static void proxy_seekdir(FsContext *ctx, V9fsFidOpenState *fs, off_t off) +{ + seekdir(fs->dir.stream, off); +} + +static ssize_t proxy_preadv(FsContext *ctx, V9fsFidOpenState *fs, + const struct iovec *iov, + int iovcnt, off_t offset) +{ + ssize_t ret; +#ifdef CONFIG_PREADV + ret = preadv(fs->fd, iov, iovcnt, offset); +#else + ret = lseek(fs->fd, offset, SEEK_SET); + if (ret >= 0) { + ret = readv(fs->fd, iov, iovcnt); + } +#endif + return ret; +} + +static ssize_t proxy_pwritev(FsContext *ctx, V9fsFidOpenState *fs, + const struct iovec *iov, + int iovcnt, off_t offset) +{ + ssize_t ret; + +#ifdef CONFIG_PREADV + ret = pwritev(fs->fd, iov, iovcnt, offset); +#else + ret = lseek(fs->fd, offset, SEEK_SET); + if (ret >= 0) { + ret = writev(fs->fd, iov, iovcnt); + } +#endif +#ifdef CONFIG_SYNC_FILE_RANGE + if (ret > 0 && ctx->export_flags & V9FS_IMMEDIATE_WRITEOUT) { + /* + * Initiate a writeback. This is not a data integrity sync. + * We want to ensure that we don't leave dirty pages in the cache + * after write when writeout=immediate is sepcified. + */ + sync_file_range(fs->fd, offset, ret, + SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE); + } +#endif + return ret; +} + +static int proxy_chmod(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp) +{ + int retval; + retval = v9fs_request(fs_ctx->private, T_CHMOD, NULL, fs_path, + credp->fc_mode); + if (retval < 0) { + errno = -retval; + } + return retval; +} + +static int proxy_mknod(FsContext *fs_ctx, V9fsPath *dir_path, + const char *name, FsCred *credp) +{ + int retval; + V9fsString fullname; + + v9fs_string_init(&fullname); + v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name); + + retval = v9fs_request(fs_ctx->private, T_MKNOD, NULL, &fullname, + credp->fc_mode, credp->fc_rdev, + credp->fc_uid, credp->fc_gid); + v9fs_string_free(&fullname); + if (retval < 0) { + errno = -retval; + retval = -1; + } + return retval; +} + +static int proxy_mkdir(FsContext *fs_ctx, V9fsPath *dir_path, + const char *name, FsCred *credp) +{ + int retval; + V9fsString fullname; + + v9fs_string_init(&fullname); + v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name); + + retval = v9fs_request(fs_ctx->private, T_MKDIR, NULL, &fullname, + credp->fc_mode, credp->fc_uid, credp->fc_gid); + v9fs_string_free(&fullname); + if (retval < 0) { + errno = -retval; + retval = -1; + } + return retval; +} + +static int proxy_fstat(FsContext *fs_ctx, int fid_type, + V9fsFidOpenState *fs, struct stat *stbuf) +{ + int fd; + + if (fid_type == P9_FID_DIR) { + fd = dirfd(fs->dir.stream); + } else { + fd = fs->fd; + } + return fstat(fd, stbuf); +} + +static int proxy_open2(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, + int flags, FsCred *credp, V9fsFidOpenState *fs) +{ + V9fsString fullname; + + v9fs_string_init(&fullname); + v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name); + + fs->fd = v9fs_request(fs_ctx->private, T_CREATE, NULL, &fullname, flags, + credp->fc_mode, credp->fc_uid, credp->fc_gid); + v9fs_string_free(&fullname); + if (fs->fd < 0) { + errno = -fs->fd; + fs->fd = -1; + } + return fs->fd; +} + +static int proxy_symlink(FsContext *fs_ctx, const char *oldpath, + V9fsPath *dir_path, const char *name, FsCred *credp) +{ + int retval; + V9fsString fullname, target; + + v9fs_string_init(&fullname); + v9fs_string_init(&target); + + v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name); + v9fs_string_sprintf(&target, "%s", oldpath); + + retval = v9fs_request(fs_ctx->private, T_SYMLINK, NULL, &target, &fullname, + credp->fc_uid, credp->fc_gid); + v9fs_string_free(&fullname); + v9fs_string_free(&target); + if (retval < 0) { + errno = -retval; + retval = -1; + } + return retval; +} + +static int proxy_link(FsContext *ctx, V9fsPath *oldpath, + V9fsPath *dirpath, const char *name) +{ + int retval; + V9fsString newpath; + + v9fs_string_init(&newpath); + v9fs_string_sprintf(&newpath, "%s/%s", dirpath->data, name); + + retval = v9fs_request(ctx->private, T_LINK, NULL, oldpath, &newpath); + v9fs_string_free(&newpath); + if (retval < 0) { + errno = -retval; + retval = -1; + } + return retval; +} + +static int proxy_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size) +{ + int retval; + + retval = v9fs_request(ctx->private, T_TRUNCATE, NULL, fs_path, size); + if (retval < 0) { + errno = -retval; + return -1; + } + return 0; +} + +static int proxy_rename(FsContext *ctx, const char *oldpath, + const char *newpath) +{ + int retval; + V9fsString oldname, newname; + + v9fs_string_init(&oldname); + v9fs_string_init(&newname); + + v9fs_string_sprintf(&oldname, "%s", oldpath); + v9fs_string_sprintf(&newname, "%s", newpath); + retval = v9fs_request(ctx->private, T_RENAME, NULL, &oldname, &newname); + v9fs_string_free(&oldname); + v9fs_string_free(&newname); + if (retval < 0) { + errno = -retval; + } + return retval; +} + +static int proxy_chown(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp) +{ + int retval; + retval = v9fs_request(fs_ctx->private, T_CHOWN, NULL, fs_path, + credp->fc_uid, credp->fc_gid); + if (retval < 0) { + errno = -retval; + } + return retval; +} + +static int proxy_utimensat(FsContext *s, V9fsPath *fs_path, + const struct timespec *buf) +{ + int retval; + retval = v9fs_request(s->private, T_UTIME, NULL, fs_path, + buf[0].tv_sec, buf[0].tv_nsec, + buf[1].tv_sec, buf[1].tv_nsec); + if (retval < 0) { + errno = -retval; + } + return retval; +} + +static int proxy_remove(FsContext *ctx, const char *path) +{ + int retval; + V9fsString name; + v9fs_string_init(&name); + v9fs_string_sprintf(&name, "%s", path); + retval = v9fs_request(ctx->private, T_REMOVE, NULL, &name); + v9fs_string_free(&name); + if (retval < 0) { + errno = -retval; + } + return retval; +} + +static int proxy_fsync(FsContext *ctx, int fid_type, + V9fsFidOpenState *fs, int datasync) +{ + int fd; + + if (fid_type == P9_FID_DIR) { + fd = dirfd(fs->dir.stream); + } else { + fd = fs->fd; + } + + if (datasync) { + return qemu_fdatasync(fd); + } else { + return fsync(fd); + } +} + +static int proxy_statfs(FsContext *s, V9fsPath *fs_path, struct statfs *stbuf) +{ + int retval; + retval = v9fs_request(s->private, T_STATFS, stbuf, fs_path); + if (retval < 0) { + errno = -retval; + return -1; + } + return retval; +} + +static ssize_t proxy_lgetxattr(FsContext *ctx, V9fsPath *fs_path, + const char *name, void *value, size_t size) +{ + int retval; + V9fsString xname; + + v9fs_string_init(&xname); + v9fs_string_sprintf(&xname, "%s", name); + retval = v9fs_request(ctx->private, T_LGETXATTR, value, size, fs_path, + &xname); + v9fs_string_free(&xname); + if (retval < 0) { + errno = -retval; + } + return retval; +} + +static ssize_t proxy_llistxattr(FsContext *ctx, V9fsPath *fs_path, + void *value, size_t size) +{ + int retval; + retval = v9fs_request(ctx->private, T_LLISTXATTR, value, size, fs_path); + if (retval < 0) { + errno = -retval; + } + return retval; +} + +static int proxy_lsetxattr(FsContext *ctx, V9fsPath *fs_path, const char *name, + void *value, size_t size, int flags) +{ + int retval; + V9fsString xname, xvalue; + + v9fs_string_init(&xname); + v9fs_string_sprintf(&xname, "%s", name); + + v9fs_string_init(&xvalue); + xvalue.size = size; + xvalue.data = g_malloc(size); + memcpy(xvalue.data, value, size); + + retval = v9fs_request(ctx->private, T_LSETXATTR, value, fs_path, &xname, + &xvalue, size, flags); + v9fs_string_free(&xname); + v9fs_string_free(&xvalue); + if (retval < 0) { + errno = -retval; + } + return retval; +} + +static int proxy_lremovexattr(FsContext *ctx, V9fsPath *fs_path, + const char *name) +{ + int retval; + V9fsString xname; + + v9fs_string_init(&xname); + v9fs_string_sprintf(&xname, "%s", name); + retval = v9fs_request(ctx->private, T_LREMOVEXATTR, NULL, fs_path, &xname); + v9fs_string_free(&xname); + if (retval < 0) { + errno = -retval; + } + return retval; +} + +static int proxy_name_to_path(FsContext *ctx, V9fsPath *dir_path, + const char *name, V9fsPath *target) +{ + if (dir_path) { + v9fs_path_sprintf(target, "%s/%s", dir_path->data, name); + } else { + v9fs_path_sprintf(target, "%s", name); + } + return 0; +} + +static int proxy_renameat(FsContext *ctx, V9fsPath *olddir, + const char *old_name, V9fsPath *newdir, + const char *new_name) +{ + int ret; + V9fsString old_full_name, new_full_name; + + v9fs_string_init(&old_full_name); + v9fs_string_init(&new_full_name); + + v9fs_string_sprintf(&old_full_name, "%s/%s", olddir->data, old_name); + v9fs_string_sprintf(&new_full_name, "%s/%s", newdir->data, new_name); + + ret = proxy_rename(ctx, old_full_name.data, new_full_name.data); + v9fs_string_free(&old_full_name); + v9fs_string_free(&new_full_name); + return ret; +} + +static int proxy_unlinkat(FsContext *ctx, V9fsPath *dir, + const char *name, int flags) +{ + int ret; + V9fsString fullname; + v9fs_string_init(&fullname); + + v9fs_string_sprintf(&fullname, "%s/%s", dir->data, name); + ret = proxy_remove(ctx, fullname.data); + v9fs_string_free(&fullname); + + return ret; +} + +static int proxy_ioc_getversion(FsContext *fs_ctx, V9fsPath *path, + mode_t st_mode, uint64_t *st_gen) +{ + int err; + + /* Do not try to open special files like device nodes, fifos etc + * we can get fd for regular files and directories only + */ + if (!S_ISREG(st_mode) && !S_ISDIR(st_mode)) { + errno = ENOTTY; + return -1; + } + err = v9fs_request(fs_ctx->private, T_GETVERSION, st_gen, path); + if (err < 0) { + errno = -err; + err = -1; + } + return err; +} + +static int connect_namedsocket(const char *path, Error **errp) +{ + int sockfd; + struct sockaddr_un helper; + + if (strlen(path) >= sizeof(helper.sun_path)) { + error_setg(errp, "socket name too long"); + return -1; + } + sockfd = socket(AF_UNIX, SOCK_STREAM, 0); + if (sockfd < 0) { + error_setg_errno(errp, errno, "failed to create client socket"); + return -1; + } + strcpy(helper.sun_path, path); + helper.sun_family = AF_UNIX; + if (connect(sockfd, (struct sockaddr *)&helper, sizeof(helper)) < 0) { + error_setg_errno(errp, errno, "failed to connect to '%s'", path); + close(sockfd); + return -1; + } + + /* remove the socket for security reasons */ + unlink(path); + return sockfd; +} + +static void error_append_socket_sockfd_hint(Error *const *errp) +{ + error_append_hint(errp, "Either specify socket=/some/path where /some/path" + " points to a listening AF_UNIX socket or sock_fd=fd" + " where fd is a file descriptor to a connected AF_UNIX" + " socket\n"); +} + +static int proxy_parse_opts(QemuOpts *opts, FsDriverEntry *fs, Error **errp) +{ + const char *socket = qemu_opt_get(opts, "socket"); + const char *sock_fd = qemu_opt_get(opts, "sock_fd"); + + if (!socket && !sock_fd) { + error_setg(errp, "both socket and sock_fd properties are missing"); + error_append_socket_sockfd_hint(errp); + return -1; + } + if (socket && sock_fd) { + error_setg(errp, "both socket and sock_fd properties are set"); + error_append_socket_sockfd_hint(errp); + return -1; + } + if (socket) { + fs->path = g_strdup(socket); + fs->export_flags |= V9FS_PROXY_SOCK_NAME; + } else { + fs->path = g_strdup(sock_fd); + fs->export_flags |= V9FS_PROXY_SOCK_FD; + } + return 0; +} + +static int proxy_init(FsContext *ctx, Error **errp) +{ + V9fsProxy *proxy = g_malloc(sizeof(V9fsProxy)); + int sock_id; + + if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) { + sock_id = connect_namedsocket(ctx->fs_root, errp); + } else { + sock_id = atoi(ctx->fs_root); + if (sock_id < 0) { + error_setg(errp, "socket descriptor not initialized"); + } + } + if (sock_id < 0) { + g_free(proxy); + return -1; + } + g_free(ctx->fs_root); + ctx->fs_root = NULL; + + proxy->in_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ); + proxy->in_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ; + proxy->out_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ); + proxy->out_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ; + + ctx->private = proxy; + proxy->sockfd = sock_id; + qemu_mutex_init(&proxy->mutex); + + ctx->export_flags |= V9FS_PATHNAME_FSCONTEXT; + ctx->exops.get_st_gen = proxy_ioc_getversion; + return 0; +} + +static void proxy_cleanup(FsContext *ctx) +{ + V9fsProxy *proxy = ctx->private; + + if (!proxy) { + return; + } + + g_free(proxy->out_iovec.iov_base); + g_free(proxy->in_iovec.iov_base); + if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) { + close(proxy->sockfd); + } + g_free(proxy); +} + +FileOperations proxy_ops = { + .parse_opts = proxy_parse_opts, + .init = proxy_init, + .cleanup = proxy_cleanup, + .lstat = proxy_lstat, + .readlink = proxy_readlink, + .close = proxy_close, + .closedir = proxy_closedir, + .open = proxy_open, + .opendir = proxy_opendir, + .rewinddir = proxy_rewinddir, + .telldir = proxy_telldir, + .readdir = proxy_readdir, + .seekdir = proxy_seekdir, + .preadv = proxy_preadv, + .pwritev = proxy_pwritev, + .chmod = proxy_chmod, + .mknod = proxy_mknod, + .mkdir = proxy_mkdir, + .fstat = proxy_fstat, + .open2 = proxy_open2, + .symlink = proxy_symlink, + .link = proxy_link, + .truncate = proxy_truncate, + .rename = proxy_rename, + .chown = proxy_chown, + .utimensat = proxy_utimensat, + .remove = proxy_remove, + .fsync = proxy_fsync, + .statfs = proxy_statfs, + .lgetxattr = proxy_lgetxattr, + .llistxattr = proxy_llistxattr, + .lsetxattr = proxy_lsetxattr, + .lremovexattr = proxy_lremovexattr, + .name_to_path = proxy_name_to_path, + .renameat = proxy_renameat, + .unlinkat = proxy_unlinkat, +}; diff --git a/hw/9pfs/9p-proxy.h b/hw/9pfs/9p-proxy.h new file mode 100644 index 000000000..b84301d00 --- /dev/null +++ b/hw/9pfs/9p-proxy.h @@ -0,0 +1,96 @@ +/* + * 9p Proxy callback + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * M. Mohan Kumar + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#ifndef QEMU_9P_PROXY_H +#define QEMU_9P_PROXY_H + +#define PROXY_MAX_IO_SZ (64 * 1024) +#define V9FS_FD_VALID INT_MAX + +/* + * proxy iovec only support one element and + * marsha/unmarshal doesn't do little endian conversion. + */ +#define proxy_unmarshal(in_sg, offset, fmt, args...) \ + v9fs_iov_unmarshal(in_sg, 1, offset, 0, fmt, ##args) +#define proxy_marshal(out_sg, offset, fmt, args...) \ + v9fs_iov_marshal(out_sg, 1, offset, 0, fmt, ##args) + +union MsgControl { + struct cmsghdr cmsg; + char control[CMSG_SPACE(sizeof(int))]; +}; + +typedef struct { + uint32_t type; + uint32_t size; +} ProxyHeader; + +#define PROXY_HDR_SZ (sizeof(ProxyHeader)) + +enum { + T_SUCCESS = 0, + T_ERROR, + T_OPEN, + T_CREATE, + T_MKNOD, + T_MKDIR, + T_SYMLINK, + T_LINK, + T_LSTAT, + T_READLINK, + T_STATFS, + T_CHMOD, + T_CHOWN, + T_TRUNCATE, + T_UTIME, + T_RENAME, + T_REMOVE, + T_LGETXATTR, + T_LLISTXATTR, + T_LSETXATTR, + T_LREMOVEXATTR, + T_GETVERSION, +}; + +typedef struct { + uint64_t st_dev; + uint64_t st_ino; + uint64_t st_nlink; + uint32_t st_mode; + uint32_t st_uid; + uint32_t st_gid; + uint64_t st_rdev; + uint64_t st_size; + uint64_t st_blksize; + uint64_t st_blocks; + uint64_t st_atim_sec; + uint64_t st_atim_nsec; + uint64_t st_mtim_sec; + uint64_t st_mtim_nsec; + uint64_t st_ctim_sec; + uint64_t st_ctim_nsec; +} ProxyStat; + +typedef struct { + uint64_t f_type; + uint64_t f_bsize; + uint64_t f_blocks; + uint64_t f_bfree; + uint64_t f_bavail; + uint64_t f_files; + uint64_t f_ffree; + uint64_t f_fsid[2]; + uint64_t f_namelen; + uint64_t f_frsize; +} ProxyStatFS; +#endif diff --git a/hw/9pfs/9p-synth.c b/hw/9pfs/9p-synth.c new file mode 100644 index 000000000..b38088e06 --- /dev/null +++ b/hw/9pfs/9p-synth.c @@ -0,0 +1,641 @@ +/* + * 9p synthetic file system support + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Malahal Naineni + * Aneesh Kumar K.V + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "9p.h" +#include "fsdev/qemu-fsdev.h" +#include "9p-synth.h" +#include "qemu/rcu.h" +#include "qemu/rcu_queue.h" +#include "qemu/cutils.h" +#include "sysemu/qtest.h" + +/* Root node for synth file system */ +static V9fsSynthNode synth_root = { + .name = "/", + .actual_attr = { + .mode = 0555 | S_IFDIR, + .nlink = 1, + }, + .attr = &synth_root.actual_attr, +}; + +static QemuMutex synth_mutex; +static int synth_node_count; +/* set to 1 when the synth fs is ready */ +static int synth_fs; + +static V9fsSynthNode *v9fs_add_dir_node(V9fsSynthNode *parent, int mode, + const char *name, + V9fsSynthNodeAttr *attr, int inode) +{ + V9fsSynthNode *node; + + /* Add directory type and remove write bits */ + mode = ((mode & 0777) | S_IFDIR) & ~(S_IWUSR | S_IWGRP | S_IWOTH); + node = g_malloc0(sizeof(V9fsSynthNode)); + if (attr) { + /* We are adding .. or . entries */ + node->attr = attr; + node->attr->nlink++; + } else { + node->attr = &node->actual_attr; + node->attr->inode = inode; + node->attr->nlink = 1; + /* We don't allow write to directories */ + node->attr->mode = mode; + node->attr->write = NULL; + node->attr->read = NULL; + } + node->private = node; + pstrcpy(node->name, sizeof(node->name), name); + QLIST_INSERT_HEAD_RCU(&parent->child, node, sibling); + return node; +} + +int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode, + const char *name, V9fsSynthNode **result) +{ + int ret; + V9fsSynthNode *node, *tmp; + + if (!synth_fs) { + return EAGAIN; + } + if (!name || (strlen(name) >= NAME_MAX)) { + return EINVAL; + } + if (!parent) { + parent = &synth_root; + } + QEMU_LOCK_GUARD(&synth_mutex); + QLIST_FOREACH(tmp, &parent->child, sibling) { + if (!strcmp(tmp->name, name)) { + ret = EEXIST; + return ret; + } + } + /* Add the name */ + node = v9fs_add_dir_node(parent, mode, name, NULL, synth_node_count++); + v9fs_add_dir_node(node, parent->attr->mode, "..", + parent->attr, parent->attr->inode); + v9fs_add_dir_node(node, node->attr->mode, ".", + node->attr, node->attr->inode); + *result = node; + ret = 0; + return ret; +} + +int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode, + const char *name, v9fs_synth_read read, + v9fs_synth_write write, void *arg) +{ + int ret; + V9fsSynthNode *node, *tmp; + + if (!synth_fs) { + return EAGAIN; + } + if (!name || (strlen(name) >= NAME_MAX)) { + return EINVAL; + } + if (!parent) { + parent = &synth_root; + } + + QEMU_LOCK_GUARD(&synth_mutex); + QLIST_FOREACH(tmp, &parent->child, sibling) { + if (!strcmp(tmp->name, name)) { + ret = EEXIST; + return ret; + } + } + /* Add file type and remove write bits */ + mode = ((mode & 0777) | S_IFREG); + node = g_malloc0(sizeof(V9fsSynthNode)); + node->attr = &node->actual_attr; + node->attr->inode = synth_node_count++; + node->attr->nlink = 1; + node->attr->read = read; + node->attr->write = write; + node->attr->mode = mode; + node->private = arg; + pstrcpy(node->name, sizeof(node->name), name); + QLIST_INSERT_HEAD_RCU(&parent->child, node, sibling); + ret = 0; + return ret; +} + +static void synth_fill_statbuf(V9fsSynthNode *node, struct stat *stbuf) +{ + stbuf->st_dev = 0; + stbuf->st_ino = node->attr->inode; + stbuf->st_mode = node->attr->mode; + stbuf->st_nlink = node->attr->nlink; + stbuf->st_uid = 0; + stbuf->st_gid = 0; + stbuf->st_rdev = 0; + stbuf->st_size = 0; + stbuf->st_blksize = 0; + stbuf->st_blocks = 0; + stbuf->st_atime = 0; + stbuf->st_mtime = 0; + stbuf->st_ctime = 0; +} + +static int synth_lstat(FsContext *fs_ctx, + V9fsPath *fs_path, struct stat *stbuf) +{ + V9fsSynthNode *node = *(V9fsSynthNode **)fs_path->data; + + synth_fill_statbuf(node, stbuf); + return 0; +} + +static int synth_fstat(FsContext *fs_ctx, int fid_type, + V9fsFidOpenState *fs, struct stat *stbuf) +{ + V9fsSynthOpenState *synth_open = fs->private; + synth_fill_statbuf(synth_open->node, stbuf); + return 0; +} + +static int synth_opendir(FsContext *ctx, + V9fsPath *fs_path, V9fsFidOpenState *fs) +{ + V9fsSynthOpenState *synth_open; + V9fsSynthNode *node = *(V9fsSynthNode **)fs_path->data; + + synth_open = g_malloc(sizeof(*synth_open)); + synth_open->node = node; + node->open_count++; + fs->private = synth_open; + return 0; +} + +static int synth_closedir(FsContext *ctx, V9fsFidOpenState *fs) +{ + V9fsSynthOpenState *synth_open = fs->private; + V9fsSynthNode *node = synth_open->node; + + node->open_count--; + g_free(synth_open); + fs->private = NULL; + return 0; +} + +static off_t synth_telldir(FsContext *ctx, V9fsFidOpenState *fs) +{ + V9fsSynthOpenState *synth_open = fs->private; + return synth_open->offset; +} + +static void synth_seekdir(FsContext *ctx, V9fsFidOpenState *fs, off_t off) +{ + V9fsSynthOpenState *synth_open = fs->private; + synth_open->offset = off; +} + +static void synth_rewinddir(FsContext *ctx, V9fsFidOpenState *fs) +{ + synth_seekdir(ctx, fs, 0); +} + +static void synth_direntry(V9fsSynthNode *node, + struct dirent *entry, off_t off) +{ + strcpy(entry->d_name, node->name); + entry->d_ino = node->attr->inode; + entry->d_off = off + 1; +} + +static struct dirent *synth_get_dentry(V9fsSynthNode *dir, + struct dirent *entry, off_t off) +{ + int i = 0; + V9fsSynthNode *node; + + rcu_read_lock(); + QLIST_FOREACH(node, &dir->child, sibling) { + /* This is the off child of the directory */ + if (i == off) { + break; + } + i++; + } + rcu_read_unlock(); + if (!node) { + /* end of directory */ + return NULL; + } + synth_direntry(node, entry, off); + return entry; +} + +static struct dirent *synth_readdir(FsContext *ctx, V9fsFidOpenState *fs) +{ + struct dirent *entry; + V9fsSynthOpenState *synth_open = fs->private; + V9fsSynthNode *node = synth_open->node; + entry = synth_get_dentry(node, &synth_open->dent, synth_open->offset); + if (entry) { + synth_open->offset++; + } + return entry; +} + +static int synth_open(FsContext *ctx, V9fsPath *fs_path, + int flags, V9fsFidOpenState *fs) +{ + V9fsSynthOpenState *synth_open; + V9fsSynthNode *node = *(V9fsSynthNode **)fs_path->data; + + synth_open = g_malloc(sizeof(*synth_open)); + synth_open->node = node; + node->open_count++; + fs->private = synth_open; + return 0; +} + +static int synth_open2(FsContext *fs_ctx, V9fsPath *dir_path, + const char *name, int flags, + FsCred *credp, V9fsFidOpenState *fs) +{ + errno = ENOSYS; + return -1; +} + +static int synth_close(FsContext *ctx, V9fsFidOpenState *fs) +{ + V9fsSynthOpenState *synth_open = fs->private; + V9fsSynthNode *node = synth_open->node; + + node->open_count--; + g_free(synth_open); + fs->private = NULL; + return 0; +} + +static ssize_t synth_pwritev(FsContext *ctx, V9fsFidOpenState *fs, + const struct iovec *iov, + int iovcnt, off_t offset) +{ + int i, count = 0, wcount; + V9fsSynthOpenState *synth_open = fs->private; + V9fsSynthNode *node = synth_open->node; + if (!node->attr->write) { + errno = EPERM; + return -1; + } + for (i = 0; i < iovcnt; i++) { + wcount = node->attr->write(iov[i].iov_base, iov[i].iov_len, + offset, node->private); + offset += wcount; + count += wcount; + /* If we wrote less than requested. we are done */ + if (wcount < iov[i].iov_len) { + break; + } + } + return count; +} + +static ssize_t synth_preadv(FsContext *ctx, V9fsFidOpenState *fs, + const struct iovec *iov, + int iovcnt, off_t offset) +{ + int i, count = 0, rcount; + V9fsSynthOpenState *synth_open = fs->private; + V9fsSynthNode *node = synth_open->node; + if (!node->attr->read) { + errno = EPERM; + return -1; + } + for (i = 0; i < iovcnt; i++) { + rcount = node->attr->read(iov[i].iov_base, iov[i].iov_len, + offset, node->private); + offset += rcount; + count += rcount; + /* If we read less than requested. we are done */ + if (rcount < iov[i].iov_len) { + break; + } + } + return count; +} + +static int synth_truncate(FsContext *ctx, V9fsPath *path, off_t offset) +{ + errno = ENOSYS; + return -1; +} + +static int synth_chmod(FsContext *fs_ctx, V9fsPath *path, FsCred *credp) +{ + errno = EPERM; + return -1; +} + +static int synth_mknod(FsContext *fs_ctx, V9fsPath *path, + const char *buf, FsCred *credp) +{ + errno = EPERM; + return -1; +} + +static int synth_mkdir(FsContext *fs_ctx, V9fsPath *path, + const char *buf, FsCred *credp) +{ + errno = EPERM; + return -1; +} + +static ssize_t synth_readlink(FsContext *fs_ctx, V9fsPath *path, + char *buf, size_t bufsz) +{ + errno = ENOSYS; + return -1; +} + +static int synth_symlink(FsContext *fs_ctx, const char *oldpath, + V9fsPath *newpath, const char *buf, FsCred *credp) +{ + errno = EPERM; + return -1; +} + +static int synth_link(FsContext *fs_ctx, V9fsPath *oldpath, + V9fsPath *newpath, const char *buf) +{ + errno = EPERM; + return -1; +} + +static int synth_rename(FsContext *ctx, const char *oldpath, + const char *newpath) +{ + errno = EPERM; + return -1; +} + +static int synth_chown(FsContext *fs_ctx, V9fsPath *path, FsCred *credp) +{ + errno = EPERM; + return -1; +} + +static int synth_utimensat(FsContext *fs_ctx, V9fsPath *path, + const struct timespec *buf) +{ + errno = EPERM; + return 0; +} + +static int synth_remove(FsContext *ctx, const char *path) +{ + errno = EPERM; + return -1; +} + +static int synth_fsync(FsContext *ctx, int fid_type, + V9fsFidOpenState *fs, int datasync) +{ + errno = ENOSYS; + return 0; +} + +static int synth_statfs(FsContext *s, V9fsPath *fs_path, + struct statfs *stbuf) +{ + stbuf->f_type = 0xABCD; + stbuf->f_bsize = 512; + stbuf->f_blocks = 0; + stbuf->f_files = synth_node_count; + stbuf->f_namelen = NAME_MAX; + return 0; +} + +static ssize_t synth_lgetxattr(FsContext *ctx, V9fsPath *path, + const char *name, void *value, size_t size) +{ + errno = ENOTSUP; + return -1; +} + +static ssize_t synth_llistxattr(FsContext *ctx, V9fsPath *path, + void *value, size_t size) +{ + errno = ENOTSUP; + return -1; +} + +static int synth_lsetxattr(FsContext *ctx, V9fsPath *path, + const char *name, void *value, + size_t size, int flags) +{ + errno = ENOTSUP; + return -1; +} + +static int synth_lremovexattr(FsContext *ctx, + V9fsPath *path, const char *name) +{ + errno = ENOTSUP; + return -1; +} + +static int synth_name_to_path(FsContext *ctx, V9fsPath *dir_path, + const char *name, V9fsPath *target) +{ + V9fsSynthNode *node; + V9fsSynthNode *dir_node; + + /* "." and ".." are not allowed */ + if (!strcmp(name, ".") || !strcmp(name, "..")) { + errno = EINVAL; + return -1; + + } + if (!dir_path) { + dir_node = &synth_root; + } else { + dir_node = *(V9fsSynthNode **)dir_path->data; + } + if (!strcmp(name, "/")) { + node = dir_node; + goto out; + } + /* search for the name in the childern */ + rcu_read_lock(); + QLIST_FOREACH(node, &dir_node->child, sibling) { + if (!strcmp(node->name, name)) { + break; + } + } + rcu_read_unlock(); + + if (!node) { + errno = ENOENT; + return -1; + } +out: + /* Copy the node pointer to fid */ + g_free(target->data); + target->data = g_memdup(&node, sizeof(void *)); + target->size = sizeof(void *); + return 0; +} + +static int synth_renameat(FsContext *ctx, V9fsPath *olddir, + const char *old_name, V9fsPath *newdir, + const char *new_name) +{ + errno = EPERM; + return -1; +} + +static int synth_unlinkat(FsContext *ctx, V9fsPath *dir, + const char *name, int flags) +{ + errno = EPERM; + return -1; +} + +static ssize_t v9fs_synth_qtest_write(void *buf, int len, off_t offset, + void *arg) +{ + return 1; +} + +static ssize_t v9fs_synth_qtest_flush_write(void *buf, int len, off_t offset, + void *arg) +{ + bool should_block = !!*(uint8_t *)buf; + + if (should_block) { + /* This will cause the server to call us again until we're cancelled */ + errno = EINTR; + return -1; + } + + return 1; +} + +static int synth_init(FsContext *ctx, Error **errp) +{ + QLIST_INIT(&synth_root.child); + qemu_mutex_init(&synth_mutex); + + /* Add "." and ".." entries for root */ + v9fs_add_dir_node(&synth_root, synth_root.attr->mode, + "..", synth_root.attr, synth_root.attr->inode); + v9fs_add_dir_node(&synth_root, synth_root.attr->mode, + ".", synth_root.attr, synth_root.attr->inode); + + /* Mark the subsystem is ready for use */ + synth_fs = 1; + + if (qtest_enabled()) { + V9fsSynthNode *node = NULL; + int i, ret; + + /* Directory hierarchy for WALK test */ + for (i = 0; i < P9_MAXWELEM; i++) { + char *name = g_strdup_printf(QTEST_V9FS_SYNTH_WALK_FILE, i); + + ret = qemu_v9fs_synth_mkdir(node, 0700, name, &node); + assert(!ret); + g_free(name); + } + + /* File for LOPEN test */ + ret = qemu_v9fs_synth_add_file(NULL, 0, QTEST_V9FS_SYNTH_LOPEN_FILE, + NULL, NULL, ctx); + assert(!ret); + + /* File for WRITE test */ + ret = qemu_v9fs_synth_add_file(NULL, 0, QTEST_V9FS_SYNTH_WRITE_FILE, + NULL, v9fs_synth_qtest_write, ctx); + assert(!ret); + + /* File for FLUSH test */ + ret = qemu_v9fs_synth_add_file(NULL, 0, QTEST_V9FS_SYNTH_FLUSH_FILE, + NULL, v9fs_synth_qtest_flush_write, + ctx); + assert(!ret); + + /* Directory for READDIR test */ + { + V9fsSynthNode *dir = NULL; + ret = qemu_v9fs_synth_mkdir( + NULL, 0700, QTEST_V9FS_SYNTH_READDIR_DIR, &dir + ); + assert(!ret); + for (i = 0; i < QTEST_V9FS_SYNTH_READDIR_NFILES; ++i) { + char *name = g_strdup_printf( + QTEST_V9FS_SYNTH_READDIR_FILE, i + ); + ret = qemu_v9fs_synth_add_file( + dir, 0, name, NULL, NULL, ctx + ); + assert(!ret); + g_free(name); + } + } + } + + return 0; +} + +FileOperations synth_ops = { + .init = synth_init, + .lstat = synth_lstat, + .readlink = synth_readlink, + .close = synth_close, + .closedir = synth_closedir, + .open = synth_open, + .opendir = synth_opendir, + .rewinddir = synth_rewinddir, + .telldir = synth_telldir, + .readdir = synth_readdir, + .seekdir = synth_seekdir, + .preadv = synth_preadv, + .pwritev = synth_pwritev, + .chmod = synth_chmod, + .mknod = synth_mknod, + .mkdir = synth_mkdir, + .fstat = synth_fstat, + .open2 = synth_open2, + .symlink = synth_symlink, + .link = synth_link, + .truncate = synth_truncate, + .rename = synth_rename, + .chown = synth_chown, + .utimensat = synth_utimensat, + .remove = synth_remove, + .fsync = synth_fsync, + .statfs = synth_statfs, + .lgetxattr = synth_lgetxattr, + .llistxattr = synth_llistxattr, + .lsetxattr = synth_lsetxattr, + .lremovexattr = synth_lremovexattr, + .name_to_path = synth_name_to_path, + .renameat = synth_renameat, + .unlinkat = synth_unlinkat, +}; diff --git a/hw/9pfs/9p-synth.h b/hw/9pfs/9p-synth.h new file mode 100644 index 000000000..036d7e4a5 --- /dev/null +++ b/hw/9pfs/9p-synth.h @@ -0,0 +1,70 @@ +/* + * 9p + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Aneesh Kumar K.V + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_9P_SYNTH_H +#define QEMU_9P_SYNTH_H + +typedef struct V9fsSynthNode V9fsSynthNode; +typedef ssize_t (*v9fs_synth_read)(void *buf, int len, off_t offset, + void *arg); +typedef ssize_t (*v9fs_synth_write)(void *buf, int len, off_t offset, + void *arg); +typedef struct V9fsSynthNodeAttr { + int mode; + int inode; + int nlink; + v9fs_synth_read read; + v9fs_synth_write write; +} V9fsSynthNodeAttr; + +struct V9fsSynthNode { + QLIST_HEAD(, V9fsSynthNode) child; + QLIST_ENTRY(V9fsSynthNode) sibling; + char name[NAME_MAX]; + V9fsSynthNodeAttr *attr; + V9fsSynthNodeAttr actual_attr; + void *private; + int open_count; +}; + +typedef struct V9fsSynthOpenState { + off_t offset; + V9fsSynthNode *node; + struct dirent dent; +} V9fsSynthOpenState; + +int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode, + const char *name, V9fsSynthNode **result); +int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode, + const char *name, v9fs_synth_read read, + v9fs_synth_write write, void *arg); + +/* qtest stuff */ + +#define QTEST_V9FS_SYNTH_WALK_FILE "WALK%d" +#define QTEST_V9FS_SYNTH_LOPEN_FILE "LOPEN" +#define QTEST_V9FS_SYNTH_WRITE_FILE "WRITE" + +/* for READDIR test */ +#define QTEST_V9FS_SYNTH_READDIR_DIR "ReadDirDir" +#define QTEST_V9FS_SYNTH_READDIR_FILE "ReadDirFile%d" +#define QTEST_V9FS_SYNTH_READDIR_NFILES 100 + +/* Any write to the "FLUSH" file is handled one byte at a time by the + * backend. If the byte is zero, the backend returns success (ie, 1), + * otherwise it forces the server to try again forever. Thus allowing + * the client to cancel the request. + */ +#define QTEST_V9FS_SYNTH_FLUSH_FILE "FLUSH" + +#endif diff --git a/hw/9pfs/9p-util.c b/hw/9pfs/9p-util.c new file mode 100644 index 000000000..3221d9b49 --- /dev/null +++ b/hw/9pfs/9p-util.c @@ -0,0 +1,64 @@ +/* + * 9p utilities + * + * Copyright IBM, Corp. 2017 + * + * Authors: + * Greg Kurz + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "qemu/xattr.h" +#include "9p-util.h" + +ssize_t fgetxattrat_nofollow(int dirfd, const char *filename, const char *name, + void *value, size_t size) +{ + char *proc_path = g_strdup_printf("/proc/self/fd/%d/%s", dirfd, filename); + int ret; + + ret = lgetxattr(proc_path, name, value, size); + g_free(proc_path); + return ret; +} + +ssize_t flistxattrat_nofollow(int dirfd, const char *filename, + char *list, size_t size) +{ + char *proc_path = g_strdup_printf("/proc/self/fd/%d/%s", dirfd, filename); + int ret; + + ret = llistxattr(proc_path, list, size); + g_free(proc_path); + return ret; +} + +ssize_t fremovexattrat_nofollow(int dirfd, const char *filename, + const char *name) +{ + char *proc_path = g_strdup_printf("/proc/self/fd/%d/%s", dirfd, filename); + int ret; + + ret = lremovexattr(proc_path, name); + g_free(proc_path); + return ret; +} + +int fsetxattrat_nofollow(int dirfd, const char *filename, const char *name, + void *value, size_t size, int flags) +{ + char *proc_path = g_strdup_printf("/proc/self/fd/%d/%s", dirfd, filename); + int ret; + + ret = lsetxattr(proc_path, name, value, size, flags); + g_free(proc_path); + return ret; +} diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h new file mode 100644 index 000000000..546f46dc7 --- /dev/null +++ b/hw/9pfs/9p-util.h @@ -0,0 +1,81 @@ +/* + * 9p utilities + * + * Copyright IBM, Corp. 2017 + * + * Authors: + * Greg Kurz + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_9P_UTIL_H +#define QEMU_9P_UTIL_H + +#ifdef O_PATH +#define O_PATH_9P_UTIL O_PATH +#else +#define O_PATH_9P_UTIL 0 +#endif + +static inline void close_preserve_errno(int fd) +{ + int serrno = errno; + close(fd); + errno = serrno; +} + +static inline int openat_dir(int dirfd, const char *name) +{ + return openat(dirfd, name, + O_DIRECTORY | O_RDONLY | O_NOFOLLOW | O_PATH_9P_UTIL); +} + +static inline int openat_file(int dirfd, const char *name, int flags, + mode_t mode) +{ + int fd, serrno, ret; + +again: + fd = openat(dirfd, name, flags | O_NOFOLLOW | O_NOCTTY | O_NONBLOCK, + mode); + if (fd == -1) { + if (errno == EPERM && (flags & O_NOATIME)) { + /* + * The client passed O_NOATIME but we lack permissions to honor it. + * Rather than failing the open, fall back without O_NOATIME. This + * doesn't break the semantics on the client side, as the Linux + * open(2) man page notes that O_NOATIME "may not be effective on + * all filesystems". In particular, NFS and other network + * filesystems ignore it entirely. + */ + flags &= ~O_NOATIME; + goto again; + } + return -1; + } + + serrno = errno; + /* O_NONBLOCK was only needed to open the file. Let's drop it. We don't + * do that with O_PATH since fcntl(F_SETFL) isn't supported, and openat() + * ignored it anyway. + */ + if (!(flags & O_PATH_9P_UTIL)) { + ret = fcntl(fd, F_SETFL, flags); + assert(!ret); + } + errno = serrno; + return fd; +} + +ssize_t fgetxattrat_nofollow(int dirfd, const char *path, const char *name, + void *value, size_t size); +int fsetxattrat_nofollow(int dirfd, const char *path, const char *name, + void *value, size_t size, int flags); +ssize_t flistxattrat_nofollow(int dirfd, const char *filename, + char *list, size_t size); +ssize_t fremovexattrat_nofollow(int dirfd, const char *filename, + const char *name); + +#endif diff --git a/hw/9pfs/9p-xattr-user.c b/hw/9pfs/9p-xattr-user.c new file mode 100644 index 000000000..f2ae9582e --- /dev/null +++ b/hw/9pfs/9p-xattr-user.c @@ -0,0 +1,114 @@ +/* + * 9p user. xattr callback + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Aneesh Kumar K.V + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "9p.h" +#include "fsdev/file-op-9p.h" +#include "9p-xattr.h" + + +static ssize_t mp_user_getxattr(FsContext *ctx, const char *path, + const char *name, void *value, size_t size) +{ + if (strncmp(name, "user.virtfs.", 12) == 0) { + /* + * Don't allow fetch of user.virtfs namesapce + * in case of mapped security + */ + errno = ENOATTR; + return -1; + } + return local_getxattr_nofollow(ctx, path, name, value, size); +} + +static ssize_t mp_user_listxattr(FsContext *ctx, const char *path, + char *name, void *value, size_t size) +{ + int name_size = strlen(name) + 1; + if (strncmp(name, "user.virtfs.", 12) == 0) { + + /* check if it is a mapped posix acl */ + if (strncmp(name, "user.virtfs.system.posix_acl_", 29) == 0) { + /* adjust the name and size */ + name += 12; + name_size -= 12; + } else { + /* + * Don't allow fetch of user.virtfs namesapce + * in case of mapped security + */ + return 0; + } + } + if (!value) { + return name_size; + } + + if (size < name_size) { + errno = ERANGE; + return -1; + } + + /* name_size includes the trailing NUL. */ + memcpy(value, name, name_size); + return name_size; +} + +static int mp_user_setxattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size, int flags) +{ + if (strncmp(name, "user.virtfs.", 12) == 0) { + /* + * Don't allow fetch of user.virtfs namesapce + * in case of mapped security + */ + errno = EACCES; + return -1; + } + return local_setxattr_nofollow(ctx, path, name, value, size, flags); +} + +static int mp_user_removexattr(FsContext *ctx, + const char *path, const char *name) +{ + if (strncmp(name, "user.virtfs.", 12) == 0) { + /* + * Don't allow fetch of user.virtfs namesapce + * in case of mapped security + */ + errno = EACCES; + return -1; + } + return local_removexattr_nofollow(ctx, path, name); +} + +XattrOperations mapped_user_xattr = { + .name = "user.", + .getxattr = mp_user_getxattr, + .setxattr = mp_user_setxattr, + .listxattr = mp_user_listxattr, + .removexattr = mp_user_removexattr, +}; + +XattrOperations passthrough_user_xattr = { + .name = "user.", + .getxattr = pt_getxattr, + .setxattr = pt_setxattr, + .listxattr = pt_listxattr, + .removexattr = pt_removexattr, +}; diff --git a/hw/9pfs/9p-xattr.c b/hw/9pfs/9p-xattr.c new file mode 100644 index 000000000..9ae69dd8d --- /dev/null +++ b/hw/9pfs/9p-xattr.c @@ -0,0 +1,291 @@ +/* + * 9p xattr callback + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Aneesh Kumar K.V + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "9p.h" +#include "fsdev/file-op-9p.h" +#include "9p-xattr.h" +#include "9p-util.h" +#include "9p-local.h" + + +static XattrOperations *get_xattr_operations(XattrOperations **h, + const char *name) +{ + XattrOperations *xops; + for (xops = *(h)++; xops != NULL; xops = *(h)++) { + if (!strncmp(name, xops->name, strlen(xops->name))) { + return xops; + } + } + return NULL; +} + +ssize_t v9fs_get_xattr(FsContext *ctx, const char *path, + const char *name, void *value, size_t size) +{ + XattrOperations *xops = get_xattr_operations(ctx->xops, name); + if (xops) { + return xops->getxattr(ctx, path, name, value, size); + } + errno = EOPNOTSUPP; + return -1; +} + +ssize_t pt_listxattr(FsContext *ctx, const char *path, + char *name, void *value, size_t size) +{ + int name_size = strlen(name) + 1; + if (!value) { + return name_size; + } + + if (size < name_size) { + errno = ERANGE; + return -1; + } + + /* no need for strncpy: name_size is strlen(name)+1 */ + memcpy(value, name, name_size); + return name_size; +} + +/* + * Get the list and pass to each layer to find out whether + * to send the data or not + */ +ssize_t v9fs_list_xattr(FsContext *ctx, const char *path, + void *value, size_t vsize) +{ + ssize_t size = 0; + void *ovalue = value; + XattrOperations *xops; + char *orig_value, *orig_value_start; + ssize_t xattr_len, parsed_len = 0, attr_len; + char *dirpath, *name; + int dirfd; + + /* Get the actual len */ + dirpath = g_path_get_dirname(path); + dirfd = local_opendir_nofollow(ctx, dirpath); + g_free(dirpath); + if (dirfd == -1) { + return -1; + } + + name = g_path_get_basename(path); + xattr_len = flistxattrat_nofollow(dirfd, name, value, 0); + if (xattr_len <= 0) { + g_free(name); + close_preserve_errno(dirfd); + return xattr_len; + } + + /* Now fetch the xattr and find the actual size */ + orig_value = g_malloc(xattr_len); + xattr_len = flistxattrat_nofollow(dirfd, name, orig_value, xattr_len); + g_free(name); + close_preserve_errno(dirfd); + if (xattr_len < 0) { + g_free(orig_value); + return -1; + } + + /* store the orig pointer */ + orig_value_start = orig_value; + while (xattr_len > parsed_len) { + xops = get_xattr_operations(ctx->xops, orig_value); + if (!xops) { + goto next_entry; + } + + if (!value) { + size += xops->listxattr(ctx, path, orig_value, value, vsize); + } else { + size = xops->listxattr(ctx, path, orig_value, value, vsize); + if (size < 0) { + goto err_out; + } + value += size; + vsize -= size; + } +next_entry: + /* Got the next entry */ + attr_len = strlen(orig_value) + 1; + parsed_len += attr_len; + orig_value += attr_len; + } + if (value) { + size = value - ovalue; + } + +err_out: + g_free(orig_value_start); + return size; +} + +int v9fs_set_xattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size, int flags) +{ + XattrOperations *xops = get_xattr_operations(ctx->xops, name); + if (xops) { + return xops->setxattr(ctx, path, name, value, size, flags); + } + errno = EOPNOTSUPP; + return -1; + +} + +int v9fs_remove_xattr(FsContext *ctx, + const char *path, const char *name) +{ + XattrOperations *xops = get_xattr_operations(ctx->xops, name); + if (xops) { + return xops->removexattr(ctx, path, name); + } + errno = EOPNOTSUPP; + return -1; + +} + +ssize_t local_getxattr_nofollow(FsContext *ctx, const char *path, + const char *name, void *value, size_t size) +{ + char *dirpath = g_path_get_dirname(path); + char *filename = g_path_get_basename(path); + int dirfd; + ssize_t ret = -1; + + dirfd = local_opendir_nofollow(ctx, dirpath); + if (dirfd == -1) { + goto out; + } + + ret = fgetxattrat_nofollow(dirfd, filename, name, value, size); + close_preserve_errno(dirfd); +out: + g_free(dirpath); + g_free(filename); + return ret; +} + +ssize_t pt_getxattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size) +{ + return local_getxattr_nofollow(ctx, path, name, value, size); +} + +ssize_t local_setxattr_nofollow(FsContext *ctx, const char *path, + const char *name, void *value, size_t size, + int flags) +{ + char *dirpath = g_path_get_dirname(path); + char *filename = g_path_get_basename(path); + int dirfd; + ssize_t ret = -1; + + dirfd = local_opendir_nofollow(ctx, dirpath); + if (dirfd == -1) { + goto out; + } + + ret = fsetxattrat_nofollow(dirfd, filename, name, value, size, flags); + close_preserve_errno(dirfd); +out: + g_free(dirpath); + g_free(filename); + return ret; +} + +int pt_setxattr(FsContext *ctx, const char *path, const char *name, void *value, + size_t size, int flags) +{ + return local_setxattr_nofollow(ctx, path, name, value, size, flags); +} + +ssize_t local_removexattr_nofollow(FsContext *ctx, const char *path, + const char *name) +{ + char *dirpath = g_path_get_dirname(path); + char *filename = g_path_get_basename(path); + int dirfd; + ssize_t ret = -1; + + dirfd = local_opendir_nofollow(ctx, dirpath); + if (dirfd == -1) { + goto out; + } + + ret = fremovexattrat_nofollow(dirfd, filename, name); + close_preserve_errno(dirfd); +out: + g_free(dirpath); + g_free(filename); + return ret; +} + +int pt_removexattr(FsContext *ctx, const char *path, const char *name) +{ + return local_removexattr_nofollow(ctx, path, name); +} + +ssize_t notsup_getxattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size) +{ + errno = ENOTSUP; + return -1; +} + +int notsup_setxattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size, int flags) +{ + errno = ENOTSUP; + return -1; +} + +ssize_t notsup_listxattr(FsContext *ctx, const char *path, char *name, + void *value, size_t size) +{ + return 0; +} + +int notsup_removexattr(FsContext *ctx, const char *path, const char *name) +{ + errno = ENOTSUP; + return -1; +} + +XattrOperations *mapped_xattr_ops[] = { + &mapped_user_xattr, + &mapped_pacl_xattr, + &mapped_dacl_xattr, + NULL, +}; + +XattrOperations *passthrough_xattr_ops[] = { + &passthrough_user_xattr, + &passthrough_acl_xattr, + NULL, +}; + +/* for .user none model should be same as passthrough */ +XattrOperations *none_xattr_ops[] = { + &passthrough_user_xattr, + &none_acl_xattr, + NULL, +}; diff --git a/hw/9pfs/9p-xattr.h b/hw/9pfs/9p-xattr.h new file mode 100644 index 000000000..35bcd24f7 --- /dev/null +++ b/hw/9pfs/9p-xattr.h @@ -0,0 +1,75 @@ +/* + * 9p + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Aneesh Kumar K.V + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_9P_XATTR_H +#define QEMU_9P_XATTR_H + +#include "qemu/xattr.h" + +struct XattrOperations { + const char *name; + ssize_t (*getxattr)(FsContext *ctx, const char *path, + const char *name, void *value, size_t size); + ssize_t (*listxattr)(FsContext *ctx, const char *path, + char *name, void *value, size_t size); + int (*setxattr)(FsContext *ctx, const char *path, const char *name, + void *value, size_t size, int flags); + int (*removexattr)(FsContext *ctx, + const char *path, const char *name); +}; + +ssize_t local_getxattr_nofollow(FsContext *ctx, const char *path, + const char *name, void *value, size_t size); +ssize_t local_setxattr_nofollow(FsContext *ctx, const char *path, + const char *name, void *value, size_t size, + int flags); +ssize_t local_removexattr_nofollow(FsContext *ctx, const char *path, + const char *name); + +extern XattrOperations mapped_user_xattr; +extern XattrOperations passthrough_user_xattr; + +extern XattrOperations mapped_pacl_xattr; +extern XattrOperations mapped_dacl_xattr; +extern XattrOperations passthrough_acl_xattr; +extern XattrOperations none_acl_xattr; + +extern XattrOperations *mapped_xattr_ops[]; +extern XattrOperations *passthrough_xattr_ops[]; +extern XattrOperations *none_xattr_ops[]; + +ssize_t v9fs_get_xattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size); +ssize_t v9fs_list_xattr(FsContext *ctx, const char *path, void *value, + size_t vsize); +int v9fs_set_xattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size, int flags); +int v9fs_remove_xattr(FsContext *ctx, const char *path, const char *name); + +ssize_t pt_listxattr(FsContext *ctx, const char *path, char *name, void *value, + size_t size); +ssize_t pt_getxattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size); +int pt_setxattr(FsContext *ctx, const char *path, const char *name, void *value, + size_t size, int flags); +int pt_removexattr(FsContext *ctx, const char *path, const char *name); + +ssize_t notsup_getxattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size); +int notsup_setxattr(FsContext *ctx, const char *path, const char *name, + void *value, size_t size, int flags); +ssize_t notsup_listxattr(FsContext *ctx, const char *path, char *name, + void *value, size_t size); +int notsup_removexattr(FsContext *ctx, const char *path, const char *name); + +#endif diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c new file mode 100644 index 000000000..15b3f4d38 --- /dev/null +++ b/hw/9pfs/9p.c @@ -0,0 +1,4260 @@ +/* + * Virtio 9p backend + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/iov.h" +#include "qemu/main-loop.h" +#include "qemu/sockets.h" +#include "virtio-9p.h" +#include "fsdev/qemu-fsdev.h" +#include "9p-xattr.h" +#include "coth.h" +#include "trace.h" +#include "migration/blocker.h" +#include "qemu/xxhash.h" +#include +#include + +int open_fd_hw; +int total_open_fd; +static int open_fd_rc; + +enum { + Oread = 0x00, + Owrite = 0x01, + Ordwr = 0x02, + Oexec = 0x03, + Oexcl = 0x04, + Otrunc = 0x10, + Orexec = 0x20, + Orclose = 0x40, + Oappend = 0x80, +}; + +P9ARRAY_DEFINE_TYPE(V9fsPath, v9fs_path_free); + +static ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...) +{ + ssize_t ret; + va_list ap; + + va_start(ap, fmt); + ret = pdu->s->transport->pdu_vmarshal(pdu, offset, fmt, ap); + va_end(ap); + + return ret; +} + +static ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...) +{ + ssize_t ret; + va_list ap; + + va_start(ap, fmt); + ret = pdu->s->transport->pdu_vunmarshal(pdu, offset, fmt, ap); + va_end(ap); + + return ret; +} + +static int omode_to_uflags(int8_t mode) +{ + int ret = 0; + + switch (mode & 3) { + case Oread: + ret = O_RDONLY; + break; + case Ordwr: + ret = O_RDWR; + break; + case Owrite: + ret = O_WRONLY; + break; + case Oexec: + ret = O_RDONLY; + break; + } + + if (mode & Otrunc) { + ret |= O_TRUNC; + } + + if (mode & Oappend) { + ret |= O_APPEND; + } + + if (mode & Oexcl) { + ret |= O_EXCL; + } + + return ret; +} + +typedef struct DotlOpenflagMap { + int dotl_flag; + int open_flag; +} DotlOpenflagMap; + +static int dotl_to_open_flags(int flags) +{ + int i; + /* + * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY + * and P9_DOTL_NOACCESS + */ + int oflags = flags & O_ACCMODE; + + DotlOpenflagMap dotl_oflag_map[] = { + { P9_DOTL_CREATE, O_CREAT }, + { P9_DOTL_EXCL, O_EXCL }, + { P9_DOTL_NOCTTY , O_NOCTTY }, + { P9_DOTL_TRUNC, O_TRUNC }, + { P9_DOTL_APPEND, O_APPEND }, + { P9_DOTL_NONBLOCK, O_NONBLOCK } , + { P9_DOTL_DSYNC, O_DSYNC }, + { P9_DOTL_FASYNC, FASYNC }, + { P9_DOTL_DIRECT, O_DIRECT }, + { P9_DOTL_LARGEFILE, O_LARGEFILE }, + { P9_DOTL_DIRECTORY, O_DIRECTORY }, + { P9_DOTL_NOFOLLOW, O_NOFOLLOW }, + { P9_DOTL_NOATIME, O_NOATIME }, + { P9_DOTL_SYNC, O_SYNC }, + }; + + for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) { + if (flags & dotl_oflag_map[i].dotl_flag) { + oflags |= dotl_oflag_map[i].open_flag; + } + } + + return oflags; +} + +void cred_init(FsCred *credp) +{ + credp->fc_uid = -1; + credp->fc_gid = -1; + credp->fc_mode = -1; + credp->fc_rdev = -1; +} + +static int get_dotl_openflags(V9fsState *s, int oflags) +{ + int flags; + /* + * Filter the client open flags + */ + flags = dotl_to_open_flags(oflags); + flags &= ~(O_NOCTTY | O_ASYNC | O_CREAT); + /* + * Ignore direct disk access hint until the server supports it. + */ + flags &= ~O_DIRECT; + return flags; +} + +void v9fs_path_init(V9fsPath *path) +{ + path->data = NULL; + path->size = 0; +} + +void v9fs_path_free(V9fsPath *path) +{ + g_free(path->data); + path->data = NULL; + path->size = 0; +} + + +void GCC_FMT_ATTR(2, 3) +v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...) +{ + va_list ap; + + v9fs_path_free(path); + + va_start(ap, fmt); + /* Bump the size for including terminating NULL */ + path->size = g_vasprintf(&path->data, fmt, ap) + 1; + va_end(ap); +} + +void v9fs_path_copy(V9fsPath *dst, const V9fsPath *src) +{ + v9fs_path_free(dst); + dst->size = src->size; + dst->data = g_memdup(src->data, src->size); +} + +int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath, + const char *name, V9fsPath *path) +{ + int err; + err = s->ops->name_to_path(&s->ctx, dirpath, name, path); + if (err < 0) { + err = -errno; + } + return err; +} + +/* + * Return TRUE if s1 is an ancestor of s2. + * + * E.g. "a/b" is an ancestor of "a/b/c" but not of "a/bc/d". + * As a special case, We treat s1 as ancestor of s2 if they are same! + */ +static int v9fs_path_is_ancestor(V9fsPath *s1, V9fsPath *s2) +{ + if (!strncmp(s1->data, s2->data, s1->size - 1)) { + if (s2->data[s1->size - 1] == '\0' || s2->data[s1->size - 1] == '/') { + return 1; + } + } + return 0; +} + +static size_t v9fs_string_size(V9fsString *str) +{ + return str->size; +} + +/* + * returns 0 if fid got re-opened, 1 if not, < 0 on error */ +static int coroutine_fn v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f) +{ + int err = 1; + if (f->fid_type == P9_FID_FILE) { + if (f->fs.fd == -1) { + do { + err = v9fs_co_open(pdu, f, f->open_flags); + } while (err == -EINTR && !pdu->cancelled); + } + } else if (f->fid_type == P9_FID_DIR) { + if (f->fs.dir.stream == NULL) { + do { + err = v9fs_co_opendir(pdu, f); + } while (err == -EINTR && !pdu->cancelled); + } + } + return err; +} + +static V9fsFidState *coroutine_fn get_fid(V9fsPDU *pdu, int32_t fid) +{ + int err; + V9fsFidState *f; + V9fsState *s = pdu->s; + + QSIMPLEQ_FOREACH(f, &s->fid_list, next) { + BUG_ON(f->clunked); + if (f->fid == fid) { + /* + * Update the fid ref upfront so that + * we don't get reclaimed when we yield + * in open later. + */ + f->ref++; + /* + * check whether we need to reopen the + * file. We might have closed the fd + * while trying to free up some file + * descriptors. + */ + err = v9fs_reopen_fid(pdu, f); + if (err < 0) { + f->ref--; + return NULL; + } + /* + * Mark the fid as referenced so that the LRU + * reclaim won't close the file descriptor + */ + f->flags |= FID_REFERENCED; + return f; + } + } + return NULL; +} + +static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid) +{ + V9fsFidState *f; + + QSIMPLEQ_FOREACH(f, &s->fid_list, next) { + /* If fid is already there return NULL */ + BUG_ON(f->clunked); + if (f->fid == fid) { + return NULL; + } + } + f = g_malloc0(sizeof(V9fsFidState)); + f->fid = fid; + f->fid_type = P9_FID_NONE; + f->ref = 1; + /* + * Mark the fid as referenced so that the LRU + * reclaim won't close the file descriptor + */ + f->flags |= FID_REFERENCED; + QSIMPLEQ_INSERT_TAIL(&s->fid_list, f, next); + + v9fs_readdir_init(s->proto_version, &f->fs.dir); + v9fs_readdir_init(s->proto_version, &f->fs_reclaim.dir); + + return f; +} + +static int coroutine_fn v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp) +{ + int retval = 0; + + if (fidp->fs.xattr.xattrwalk_fid) { + /* getxattr/listxattr fid */ + goto free_value; + } + /* + * if this is fid for setxattr. clunk should + * result in setxattr localcall + */ + if (fidp->fs.xattr.len != fidp->fs.xattr.copied_len) { + /* clunk after partial write */ + retval = -EINVAL; + goto free_out; + } + if (fidp->fs.xattr.len) { + retval = v9fs_co_lsetxattr(pdu, &fidp->path, &fidp->fs.xattr.name, + fidp->fs.xattr.value, + fidp->fs.xattr.len, + fidp->fs.xattr.flags); + } else { + retval = v9fs_co_lremovexattr(pdu, &fidp->path, &fidp->fs.xattr.name); + } +free_out: + v9fs_string_free(&fidp->fs.xattr.name); +free_value: + g_free(fidp->fs.xattr.value); + return retval; +} + +static int coroutine_fn free_fid(V9fsPDU *pdu, V9fsFidState *fidp) +{ + int retval = 0; + + if (fidp->fid_type == P9_FID_FILE) { + /* If we reclaimed the fd no need to close */ + if (fidp->fs.fd != -1) { + retval = v9fs_co_close(pdu, &fidp->fs); + } + } else if (fidp->fid_type == P9_FID_DIR) { + if (fidp->fs.dir.stream != NULL) { + retval = v9fs_co_closedir(pdu, &fidp->fs); + } + } else if (fidp->fid_type == P9_FID_XATTR) { + retval = v9fs_xattr_fid_clunk(pdu, fidp); + } + v9fs_path_free(&fidp->path); + g_free(fidp); + return retval; +} + +static int coroutine_fn put_fid(V9fsPDU *pdu, V9fsFidState *fidp) +{ + BUG_ON(!fidp->ref); + fidp->ref--; + /* + * Don't free the fid if it is in reclaim list + */ + if (!fidp->ref && fidp->clunked) { + if (fidp->fid == pdu->s->root_fid) { + /* + * if the clunked fid is root fid then we + * have unmounted the fs on the client side. + * delete the migration blocker. Ideally, this + * should be hooked to transport close notification + */ + if (pdu->s->migration_blocker) { + migrate_del_blocker(pdu->s->migration_blocker); + error_free(pdu->s->migration_blocker); + pdu->s->migration_blocker = NULL; + } + } + return free_fid(pdu, fidp); + } + return 0; +} + +static V9fsFidState *clunk_fid(V9fsState *s, int32_t fid) +{ + V9fsFidState *fidp; + + QSIMPLEQ_FOREACH(fidp, &s->fid_list, next) { + if (fidp->fid == fid) { + QSIMPLEQ_REMOVE(&s->fid_list, fidp, V9fsFidState, next); + fidp->clunked = true; + return fidp; + } + } + return NULL; +} + +void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu) +{ + int reclaim_count = 0; + V9fsState *s = pdu->s; + V9fsFidState *f; + QSLIST_HEAD(, V9fsFidState) reclaim_list = + QSLIST_HEAD_INITIALIZER(reclaim_list); + + QSIMPLEQ_FOREACH(f, &s->fid_list, next) { + /* + * Unlink fids cannot be reclaimed. Check + * for them and skip them. Also skip fids + * currently being operated on. + */ + if (f->ref || f->flags & FID_NON_RECLAIMABLE) { + continue; + } + /* + * if it is a recently referenced fid + * we leave the fid untouched and clear the + * reference bit. We come back to it later + * in the next iteration. (a simple LRU without + * moving list elements around) + */ + if (f->flags & FID_REFERENCED) { + f->flags &= ~FID_REFERENCED; + continue; + } + /* + * Add fids to reclaim list. + */ + if (f->fid_type == P9_FID_FILE) { + if (f->fs.fd != -1) { + /* + * Up the reference count so that + * a clunk request won't free this fid + */ + f->ref++; + QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next); + f->fs_reclaim.fd = f->fs.fd; + f->fs.fd = -1; + reclaim_count++; + } + } else if (f->fid_type == P9_FID_DIR) { + if (f->fs.dir.stream != NULL) { + /* + * Up the reference count so that + * a clunk request won't free this fid + */ + f->ref++; + QSLIST_INSERT_HEAD(&reclaim_list, f, reclaim_next); + f->fs_reclaim.dir.stream = f->fs.dir.stream; + f->fs.dir.stream = NULL; + reclaim_count++; + } + } + if (reclaim_count >= open_fd_rc) { + break; + } + } + /* + * Now close the fid in reclaim list. Free them if they + * are already clunked. + */ + while (!QSLIST_EMPTY(&reclaim_list)) { + f = QSLIST_FIRST(&reclaim_list); + QSLIST_REMOVE(&reclaim_list, f, V9fsFidState, reclaim_next); + if (f->fid_type == P9_FID_FILE) { + v9fs_co_close(pdu, &f->fs_reclaim); + } else if (f->fid_type == P9_FID_DIR) { + v9fs_co_closedir(pdu, &f->fs_reclaim); + } + /* + * Now drop the fid reference, free it + * if clunked. + */ + put_fid(pdu, f); + } +} + +static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path) +{ + int err; + V9fsState *s = pdu->s; + V9fsFidState *fidp, *fidp_next; + + fidp = QSIMPLEQ_FIRST(&s->fid_list); + if (!fidp) { + return 0; + } + + /* + * v9fs_reopen_fid() can yield : a reference on the fid must be held + * to ensure its pointer remains valid and we can safely pass it to + * QSIMPLEQ_NEXT(). The corresponding put_fid() can also yield so + * we must keep a reference on the next fid as well. So the logic here + * is to get a reference on a fid and only put it back during the next + * iteration after we could get a reference on the next fid. Start with + * the first one. + */ + for (fidp->ref++; fidp; fidp = fidp_next) { + if (fidp->path.size == path->size && + !memcmp(fidp->path.data, path->data, path->size)) { + /* Mark the fid non reclaimable. */ + fidp->flags |= FID_NON_RECLAIMABLE; + + /* reopen the file/dir if already closed */ + err = v9fs_reopen_fid(pdu, fidp); + if (err < 0) { + put_fid(pdu, fidp); + return err; + } + } + + fidp_next = QSIMPLEQ_NEXT(fidp, next); + + if (fidp_next) { + /* + * Ensure the next fid survives a potential clunk request during + * put_fid() below and v9fs_reopen_fid() in the next iteration. + */ + fidp_next->ref++; + } + + /* We're done with this fid */ + put_fid(pdu, fidp); + } + + return 0; +} + +static void coroutine_fn virtfs_reset(V9fsPDU *pdu) +{ + V9fsState *s = pdu->s; + V9fsFidState *fidp; + + /* Free all fids */ + while (!QSIMPLEQ_EMPTY(&s->fid_list)) { + /* Get fid */ + fidp = QSIMPLEQ_FIRST(&s->fid_list); + fidp->ref++; + + /* Clunk fid */ + QSIMPLEQ_REMOVE(&s->fid_list, fidp, V9fsFidState, next); + fidp->clunked = true; + + put_fid(pdu, fidp); + } +} + +#define P9_QID_TYPE_DIR 0x80 +#define P9_QID_TYPE_SYMLINK 0x02 + +#define P9_STAT_MODE_DIR 0x80000000 +#define P9_STAT_MODE_APPEND 0x40000000 +#define P9_STAT_MODE_EXCL 0x20000000 +#define P9_STAT_MODE_MOUNT 0x10000000 +#define P9_STAT_MODE_AUTH 0x08000000 +#define P9_STAT_MODE_TMP 0x04000000 +#define P9_STAT_MODE_SYMLINK 0x02000000 +#define P9_STAT_MODE_LINK 0x01000000 +#define P9_STAT_MODE_DEVICE 0x00800000 +#define P9_STAT_MODE_NAMED_PIPE 0x00200000 +#define P9_STAT_MODE_SOCKET 0x00100000 +#define P9_STAT_MODE_SETUID 0x00080000 +#define P9_STAT_MODE_SETGID 0x00040000 +#define P9_STAT_MODE_SETVTX 0x00010000 + +#define P9_STAT_MODE_TYPE_BITS (P9_STAT_MODE_DIR | \ + P9_STAT_MODE_SYMLINK | \ + P9_STAT_MODE_LINK | \ + P9_STAT_MODE_DEVICE | \ + P9_STAT_MODE_NAMED_PIPE | \ + P9_STAT_MODE_SOCKET) + +/* Mirrors all bits of a byte. So e.g. binary 10100000 would become 00000101. */ +static inline uint8_t mirror8bit(uint8_t byte) +{ + return (byte * 0x0202020202ULL & 0x010884422010ULL) % 1023; +} + +/* Same as mirror8bit() just for a 64 bit data type instead for a byte. */ +static inline uint64_t mirror64bit(uint64_t value) +{ + return ((uint64_t)mirror8bit(value & 0xff) << 56) | + ((uint64_t)mirror8bit((value >> 8) & 0xff) << 48) | + ((uint64_t)mirror8bit((value >> 16) & 0xff) << 40) | + ((uint64_t)mirror8bit((value >> 24) & 0xff) << 32) | + ((uint64_t)mirror8bit((value >> 32) & 0xff) << 24) | + ((uint64_t)mirror8bit((value >> 40) & 0xff) << 16) | + ((uint64_t)mirror8bit((value >> 48) & 0xff) << 8) | + ((uint64_t)mirror8bit((value >> 56) & 0xff)); +} + +/** + * @brief Parameter k for the Exponential Golomb algorihm to be used. + * + * The smaller this value, the smaller the minimum bit count for the Exp. + * Golomb generated affixes will be (at lowest index) however for the + * price of having higher maximum bit count of generated affixes (at highest + * index). Likewise increasing this parameter yields in smaller maximum bit + * count for the price of having higher minimum bit count. + * + * In practice that means: a good value for k depends on the expected amount + * of devices to be exposed by one export. For a small amount of devices k + * should be small, for a large amount of devices k might be increased + * instead. The default of k=0 should be fine for most users though. + * + * @b IMPORTANT: In case this ever becomes a runtime parameter; the value of + * k should not change as long as guest is still running! Because that would + * cause completely different inode numbers to be generated on guest. + */ +#define EXP_GOLOMB_K 0 + +/** + * @brief Exponential Golomb algorithm for arbitrary k (including k=0). + * + * The Exponential Golomb algorithm generates @b prefixes (@b not suffixes!) + * with growing length and with the mathematical property of being + * "prefix-free". The latter means the generated prefixes can be prepended + * in front of arbitrary numbers and the resulting concatenated numbers are + * guaranteed to be always unique. + * + * This is a minor adjustment to the original Exp. Golomb algorithm in the + * sense that lowest allowed index (@param n) starts with 1, not with zero. + * + * @param n - natural number (or index) of the prefix to be generated + * (1, 2, 3, ...) + * @param k - parameter k of Exp. Golomb algorithm to be used + * (see comment on EXP_GOLOMB_K macro for details about k) + */ +static VariLenAffix expGolombEncode(uint64_t n, int k) +{ + const uint64_t value = n + (1 << k) - 1; + const int bits = (int) log2(value) + 1; + return (VariLenAffix) { + .type = AffixType_Prefix, + .value = value, + .bits = bits + MAX((bits - 1 - k), 0) + }; +} + +/** + * @brief Converts a suffix into a prefix, or a prefix into a suffix. + * + * Simply mirror all bits of the affix value, for the purpose to preserve + * respectively the mathematical "prefix-free" or "suffix-free" property + * after the conversion. + * + * If a passed prefix is suitable to create unique numbers, then the + * returned suffix is suitable to create unique numbers as well (and vice + * versa). + */ +static VariLenAffix invertAffix(const VariLenAffix *affix) +{ + return (VariLenAffix) { + .type = + (affix->type == AffixType_Suffix) ? + AffixType_Prefix : AffixType_Suffix, + .value = + mirror64bit(affix->value) >> + ((sizeof(affix->value) * 8) - affix->bits), + .bits = affix->bits + }; +} + +/** + * @brief Generates suffix numbers with "suffix-free" property. + * + * This is just a wrapper function on top of the Exp. Golomb algorithm. + * + * Since the Exp. Golomb algorithm generates prefixes, but we need suffixes, + * this function converts the Exp. Golomb prefixes into appropriate suffixes + * which are still suitable for generating unique numbers. + * + * @param n - natural number (or index) of the suffix to be generated + * (1, 2, 3, ...) + */ +static VariLenAffix affixForIndex(uint64_t index) +{ + VariLenAffix prefix; + prefix = expGolombEncode(index, EXP_GOLOMB_K); + return invertAffix(&prefix); /* convert prefix to suffix */ +} + +/* creative abuse of tb_hash_func7, which is based on xxhash */ +static uint32_t qpp_hash(QppEntry e) +{ + return qemu_xxhash7(e.ino_prefix, e.dev, 0, 0, 0); +} + +static uint32_t qpf_hash(QpfEntry e) +{ + return qemu_xxhash7(e.ino, e.dev, 0, 0, 0); +} + +static bool qpd_cmp_func(const void *obj, const void *userp) +{ + const QpdEntry *e1 = obj, *e2 = userp; + return e1->dev == e2->dev; +} + +static bool qpp_cmp_func(const void *obj, const void *userp) +{ + const QppEntry *e1 = obj, *e2 = userp; + return e1->dev == e2->dev && e1->ino_prefix == e2->ino_prefix; +} + +static bool qpf_cmp_func(const void *obj, const void *userp) +{ + const QpfEntry *e1 = obj, *e2 = userp; + return e1->dev == e2->dev && e1->ino == e2->ino; +} + +static void qp_table_remove(void *p, uint32_t h, void *up) +{ + g_free(p); +} + +static void qp_table_destroy(struct qht *ht) +{ + if (!ht || !ht->map) { + return; + } + qht_iter(ht, qp_table_remove, NULL); + qht_destroy(ht); +} + +static void qpd_table_init(struct qht *ht) +{ + qht_init(ht, qpd_cmp_func, 1, QHT_MODE_AUTO_RESIZE); +} + +static void qpp_table_init(struct qht *ht) +{ + qht_init(ht, qpp_cmp_func, 1, QHT_MODE_AUTO_RESIZE); +} + +static void qpf_table_init(struct qht *ht) +{ + qht_init(ht, qpf_cmp_func, 1 << 16, QHT_MODE_AUTO_RESIZE); +} + +/* + * Returns how many (high end) bits of inode numbers of the passed fs + * device shall be used (in combination with the device number) to + * generate hash values for qpp_table entries. + * + * This function is required if variable length suffixes are used for inode + * number mapping on guest level. Since a device may end up having multiple + * entries in qpp_table, each entry most probably with a different suffix + * length, we thus need this function in conjunction with qpd_table to + * "agree" about a fix amount of bits (per device) to be always used for + * generating hash values for the purpose of accessing qpp_table in order + * get consistent behaviour when accessing qpp_table. + */ +static int qid_inode_prefix_hash_bits(V9fsPDU *pdu, dev_t dev) +{ + QpdEntry lookup = { + .dev = dev + }, *val; + uint32_t hash = dev; + VariLenAffix affix; + + val = qht_lookup(&pdu->s->qpd_table, &lookup, hash); + if (!val) { + val = g_malloc0(sizeof(QpdEntry)); + *val = lookup; + affix = affixForIndex(pdu->s->qp_affix_next); + val->prefix_bits = affix.bits; + qht_insert(&pdu->s->qpd_table, val, hash, NULL); + pdu->s->qp_ndevices++; + } + return val->prefix_bits; +} + +/** + * @brief Slow / full mapping host inode nr -> guest inode nr. + * + * This function performs a slower and much more costly remapping of an + * original file inode number on host to an appropriate different inode + * number on guest. For every (dev, inode) combination on host a new + * sequential number is generated, cached and exposed as inode number on + * guest. + * + * This is just a "last resort" fallback solution if the much faster/cheaper + * qid_path_suffixmap() failed. In practice this slow / full mapping is not + * expected ever to be used at all though. + * + * @see qid_path_suffixmap() for details + * + */ +static int qid_path_fullmap(V9fsPDU *pdu, const struct stat *stbuf, + uint64_t *path) +{ + QpfEntry lookup = { + .dev = stbuf->st_dev, + .ino = stbuf->st_ino + }, *val; + uint32_t hash = qpf_hash(lookup); + VariLenAffix affix; + + val = qht_lookup(&pdu->s->qpf_table, &lookup, hash); + + if (!val) { + if (pdu->s->qp_fullpath_next == 0) { + /* no more files can be mapped :'( */ + error_report_once( + "9p: No more prefixes available for remapping inodes from " + "host to guest." + ); + return -ENFILE; + } + + val = g_malloc0(sizeof(QppEntry)); + *val = lookup; + + /* new unique inode and device combo */ + affix = affixForIndex( + 1ULL << (sizeof(pdu->s->qp_affix_next) * 8) + ); + val->path = (pdu->s->qp_fullpath_next++ << affix.bits) | affix.value; + pdu->s->qp_fullpath_next &= ((1ULL << (64 - affix.bits)) - 1); + qht_insert(&pdu->s->qpf_table, val, hash, NULL); + } + + *path = val->path; + return 0; +} + +/** + * @brief Quick mapping host inode nr -> guest inode nr. + * + * This function performs quick remapping of an original file inode number + * on host to an appropriate different inode number on guest. This remapping + * of inodes is required to avoid inode nr collisions on guest which would + * happen if the 9p export contains more than 1 exported file system (or + * more than 1 file system data set), because unlike on host level where the + * files would have different device nrs, all files exported by 9p would + * share the same device nr on guest (the device nr of the virtual 9p device + * that is). + * + * Inode remapping is performed by chopping off high end bits of the original + * inode number from host, shifting the result upwards and then assigning a + * generated suffix number for the low end bits, where the same suffix number + * will be shared by all inodes with the same device id AND the same high end + * bits that have been chopped off. That approach utilizes the fact that inode + * numbers very likely share the same high end bits (i.e. due to their common + * sequential generation by file systems) and hence we only have to generate + * and track a very limited amount of suffixes in practice due to that. + * + * We generate variable size suffixes for that purpose. The 1st generated + * suffix will only have 1 bit and hence we only need to chop off 1 bit from + * the original inode number. The subsequent suffixes being generated will + * grow in (bit) size subsequently, i.e. the 2nd and 3rd suffix being + * generated will have 3 bits and hence we have to chop off 3 bits from their + * original inodes, and so on. That approach of using variable length suffixes + * (i.e. over fixed size ones) utilizes the fact that in practice only a very + * limited amount of devices are shared by the same export (e.g. typically + * less than 2 dozen devices per 9p export), so in practice we need to chop + * off less bits than with fixed size prefixes and yet are flexible to add + * new devices at runtime below host's export directory at any time without + * having to reboot guest nor requiring to reconfigure guest for that. And due + * to the very limited amount of original high end bits that we chop off that + * way, the total amount of suffixes we need to generate is less than by using + * fixed size prefixes and hence it also improves performance of the inode + * remapping algorithm, and finally has the nice side effect that the inode + * numbers on guest will be much smaller & human friendly. ;-) + */ +static int qid_path_suffixmap(V9fsPDU *pdu, const struct stat *stbuf, + uint64_t *path) +{ + const int ino_hash_bits = qid_inode_prefix_hash_bits(pdu, stbuf->st_dev); + QppEntry lookup = { + .dev = stbuf->st_dev, + .ino_prefix = (uint16_t) (stbuf->st_ino >> (64 - ino_hash_bits)) + }, *val; + uint32_t hash = qpp_hash(lookup); + + val = qht_lookup(&pdu->s->qpp_table, &lookup, hash); + + if (!val) { + if (pdu->s->qp_affix_next == 0) { + /* we ran out of affixes */ + warn_report_once( + "9p: Potential degraded performance of inode remapping" + ); + return -ENFILE; + } + + val = g_malloc0(sizeof(QppEntry)); + *val = lookup; + + /* new unique inode affix and device combo */ + val->qp_affix_index = pdu->s->qp_affix_next++; + val->qp_affix = affixForIndex(val->qp_affix_index); + qht_insert(&pdu->s->qpp_table, val, hash, NULL); + } + /* assuming generated affix to be suffix type, not prefix */ + *path = (stbuf->st_ino << val->qp_affix.bits) | val->qp_affix.value; + return 0; +} + +static int stat_to_qid(V9fsPDU *pdu, const struct stat *stbuf, V9fsQID *qidp) +{ + int err; + size_t size; + + if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) { + /* map inode+device to qid path (fast path) */ + err = qid_path_suffixmap(pdu, stbuf, &qidp->path); + if (err == -ENFILE) { + /* fast path didn't work, fall back to full map */ + err = qid_path_fullmap(pdu, stbuf, &qidp->path); + } + if (err) { + return err; + } + } else { + if (pdu->s->dev_id != stbuf->st_dev) { + if (pdu->s->ctx.export_flags & V9FS_FORBID_MULTIDEVS) { + error_report_once( + "9p: Multiple devices detected in same VirtFS export. " + "Access of guest to additional devices is (partly) " + "denied due to virtfs option 'multidevs=forbid' being " + "effective." + ); + return -ENODEV; + } else { + warn_report_once( + "9p: Multiple devices detected in same VirtFS export, " + "which might lead to file ID collisions and severe " + "misbehaviours on guest! You should either use a " + "separate export for each device shared from host or " + "use virtfs option 'multidevs=remap'!" + ); + } + } + memset(&qidp->path, 0, sizeof(qidp->path)); + size = MIN(sizeof(stbuf->st_ino), sizeof(qidp->path)); + memcpy(&qidp->path, &stbuf->st_ino, size); + } + + qidp->version = stbuf->st_mtime ^ (stbuf->st_size << 8); + qidp->type = 0; + if (S_ISDIR(stbuf->st_mode)) { + qidp->type |= P9_QID_TYPE_DIR; + } + if (S_ISLNK(stbuf->st_mode)) { + qidp->type |= P9_QID_TYPE_SYMLINK; + } + + return 0; +} + +V9fsPDU *pdu_alloc(V9fsState *s) +{ + V9fsPDU *pdu = NULL; + + if (!QLIST_EMPTY(&s->free_list)) { + pdu = QLIST_FIRST(&s->free_list); + QLIST_REMOVE(pdu, next); + QLIST_INSERT_HEAD(&s->active_list, pdu, next); + } + return pdu; +} + +void pdu_free(V9fsPDU *pdu) +{ + V9fsState *s = pdu->s; + + g_assert(!pdu->cancelled); + QLIST_REMOVE(pdu, next); + QLIST_INSERT_HEAD(&s->free_list, pdu, next); +} + +static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len) +{ + int8_t id = pdu->id + 1; /* Response */ + V9fsState *s = pdu->s; + int ret; + + /* + * The 9p spec requires that successfully cancelled pdus receive no reply. + * Sending a reply would confuse clients because they would + * assume that any EINTR is the actual result of the operation, + * rather than a consequence of the cancellation. However, if + * the operation completed (succesfully or with an error other + * than caused be cancellation), we do send out that reply, both + * for efficiency and to avoid confusing the rest of the state machine + * that assumes passing a non-error here will mean a successful + * transmission of the reply. + */ + bool discard = pdu->cancelled && len == -EINTR; + if (discard) { + trace_v9fs_rcancel(pdu->tag, pdu->id); + pdu->size = 0; + goto out_notify; + } + + if (len < 0) { + int err = -len; + len = 7; + + if (s->proto_version != V9FS_PROTO_2000L) { + V9fsString str; + + str.data = strerror(err); + str.size = strlen(str.data); + + ret = pdu_marshal(pdu, len, "s", &str); + if (ret < 0) { + goto out_notify; + } + len += ret; + id = P9_RERROR; + } + + ret = pdu_marshal(pdu, len, "d", err); + if (ret < 0) { + goto out_notify; + } + len += ret; + + if (s->proto_version == V9FS_PROTO_2000L) { + id = P9_RLERROR; + } + trace_v9fs_rerror(pdu->tag, pdu->id, err); /* Trace ERROR */ + } + + /* fill out the header */ + if (pdu_marshal(pdu, 0, "dbw", (int32_t)len, id, pdu->tag) < 0) { + goto out_notify; + } + + /* keep these in sync */ + pdu->size = len; + pdu->id = id; + +out_notify: + pdu->s->transport->push_and_notify(pdu); + + /* Now wakeup anybody waiting in flush for this request */ + if (!qemu_co_queue_next(&pdu->complete)) { + pdu_free(pdu); + } +} + +static mode_t v9mode_to_mode(uint32_t mode, V9fsString *extension) +{ + mode_t ret; + + ret = mode & 0777; + if (mode & P9_STAT_MODE_DIR) { + ret |= S_IFDIR; + } + + if (mode & P9_STAT_MODE_SYMLINK) { + ret |= S_IFLNK; + } + if (mode & P9_STAT_MODE_SOCKET) { + ret |= S_IFSOCK; + } + if (mode & P9_STAT_MODE_NAMED_PIPE) { + ret |= S_IFIFO; + } + if (mode & P9_STAT_MODE_DEVICE) { + if (extension->size && extension->data[0] == 'c') { + ret |= S_IFCHR; + } else { + ret |= S_IFBLK; + } + } + + if (!(ret & ~0777)) { + ret |= S_IFREG; + } + + if (mode & P9_STAT_MODE_SETUID) { + ret |= S_ISUID; + } + if (mode & P9_STAT_MODE_SETGID) { + ret |= S_ISGID; + } + if (mode & P9_STAT_MODE_SETVTX) { + ret |= S_ISVTX; + } + + return ret; +} + +static int donttouch_stat(V9fsStat *stat) +{ + if (stat->type == -1 && + stat->dev == -1 && + stat->qid.type == 0xff && + stat->qid.version == (uint32_t) -1 && + stat->qid.path == (uint64_t) -1 && + stat->mode == -1 && + stat->atime == -1 && + stat->mtime == -1 && + stat->length == -1 && + !stat->name.size && + !stat->uid.size && + !stat->gid.size && + !stat->muid.size && + stat->n_uid == -1 && + stat->n_gid == -1 && + stat->n_muid == -1) { + return 1; + } + + return 0; +} + +static void v9fs_stat_init(V9fsStat *stat) +{ + v9fs_string_init(&stat->name); + v9fs_string_init(&stat->uid); + v9fs_string_init(&stat->gid); + v9fs_string_init(&stat->muid); + v9fs_string_init(&stat->extension); +} + +static void v9fs_stat_free(V9fsStat *stat) +{ + v9fs_string_free(&stat->name); + v9fs_string_free(&stat->uid); + v9fs_string_free(&stat->gid); + v9fs_string_free(&stat->muid); + v9fs_string_free(&stat->extension); +} + +static uint32_t stat_to_v9mode(const struct stat *stbuf) +{ + uint32_t mode; + + mode = stbuf->st_mode & 0777; + if (S_ISDIR(stbuf->st_mode)) { + mode |= P9_STAT_MODE_DIR; + } + + if (S_ISLNK(stbuf->st_mode)) { + mode |= P9_STAT_MODE_SYMLINK; + } + + if (S_ISSOCK(stbuf->st_mode)) { + mode |= P9_STAT_MODE_SOCKET; + } + + if (S_ISFIFO(stbuf->st_mode)) { + mode |= P9_STAT_MODE_NAMED_PIPE; + } + + if (S_ISBLK(stbuf->st_mode) || S_ISCHR(stbuf->st_mode)) { + mode |= P9_STAT_MODE_DEVICE; + } + + if (stbuf->st_mode & S_ISUID) { + mode |= P9_STAT_MODE_SETUID; + } + + if (stbuf->st_mode & S_ISGID) { + mode |= P9_STAT_MODE_SETGID; + } + + if (stbuf->st_mode & S_ISVTX) { + mode |= P9_STAT_MODE_SETVTX; + } + + return mode; +} + +static int coroutine_fn stat_to_v9stat(V9fsPDU *pdu, V9fsPath *path, + const char *basename, + const struct stat *stbuf, + V9fsStat *v9stat) +{ + int err; + + memset(v9stat, 0, sizeof(*v9stat)); + + err = stat_to_qid(pdu, stbuf, &v9stat->qid); + if (err < 0) { + return err; + } + v9stat->mode = stat_to_v9mode(stbuf); + v9stat->atime = stbuf->st_atime; + v9stat->mtime = stbuf->st_mtime; + v9stat->length = stbuf->st_size; + + v9fs_string_free(&v9stat->uid); + v9fs_string_free(&v9stat->gid); + v9fs_string_free(&v9stat->muid); + + v9stat->n_uid = stbuf->st_uid; + v9stat->n_gid = stbuf->st_gid; + v9stat->n_muid = 0; + + v9fs_string_free(&v9stat->extension); + + if (v9stat->mode & P9_STAT_MODE_SYMLINK) { + err = v9fs_co_readlink(pdu, path, &v9stat->extension); + if (err < 0) { + return err; + } + } else if (v9stat->mode & P9_STAT_MODE_DEVICE) { + v9fs_string_sprintf(&v9stat->extension, "%c %u %u", + S_ISCHR(stbuf->st_mode) ? 'c' : 'b', + major(stbuf->st_rdev), minor(stbuf->st_rdev)); + } else if (S_ISDIR(stbuf->st_mode) || S_ISREG(stbuf->st_mode)) { + v9fs_string_sprintf(&v9stat->extension, "%s %lu", + "HARDLINKCOUNT", (unsigned long)stbuf->st_nlink); + } + + v9fs_string_sprintf(&v9stat->name, "%s", basename); + + v9stat->size = 61 + + v9fs_string_size(&v9stat->name) + + v9fs_string_size(&v9stat->uid) + + v9fs_string_size(&v9stat->gid) + + v9fs_string_size(&v9stat->muid) + + v9fs_string_size(&v9stat->extension); + return 0; +} + +#define P9_STATS_MODE 0x00000001ULL +#define P9_STATS_NLINK 0x00000002ULL +#define P9_STATS_UID 0x00000004ULL +#define P9_STATS_GID 0x00000008ULL +#define P9_STATS_RDEV 0x00000010ULL +#define P9_STATS_ATIME 0x00000020ULL +#define P9_STATS_MTIME 0x00000040ULL +#define P9_STATS_CTIME 0x00000080ULL +#define P9_STATS_INO 0x00000100ULL +#define P9_STATS_SIZE 0x00000200ULL +#define P9_STATS_BLOCKS 0x00000400ULL + +#define P9_STATS_BTIME 0x00000800ULL +#define P9_STATS_GEN 0x00001000ULL +#define P9_STATS_DATA_VERSION 0x00002000ULL + +#define P9_STATS_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */ +#define P9_STATS_ALL 0x00003fffULL /* Mask for All fields above */ + + +/** + * Convert host filesystem's block size into an appropriate block size for + * 9p client (guest OS side). The value returned suggests an "optimum" block + * size for 9p I/O, i.e. to maximize performance. + * + * @pdu: 9p client request + * @blksize: host filesystem's block size + */ +static int32_t blksize_to_iounit(const V9fsPDU *pdu, int32_t blksize) +{ + int32_t iounit = 0; + V9fsState *s = pdu->s; + + /* + * iounit should be multiples of blksize (host filesystem block size) + * as well as less than (client msize - P9_IOHDRSZ) + */ + if (blksize) { + iounit = QEMU_ALIGN_DOWN(s->msize - P9_IOHDRSZ, blksize); + } + if (!iounit) { + iounit = s->msize - P9_IOHDRSZ; + } + return iounit; +} + +static int32_t stat_to_iounit(const V9fsPDU *pdu, const struct stat *stbuf) +{ + return blksize_to_iounit(pdu, stbuf->st_blksize); +} + +static int stat_to_v9stat_dotl(V9fsPDU *pdu, const struct stat *stbuf, + V9fsStatDotl *v9lstat) +{ + memset(v9lstat, 0, sizeof(*v9lstat)); + + v9lstat->st_mode = stbuf->st_mode; + v9lstat->st_nlink = stbuf->st_nlink; + v9lstat->st_uid = stbuf->st_uid; + v9lstat->st_gid = stbuf->st_gid; + v9lstat->st_rdev = stbuf->st_rdev; + v9lstat->st_size = stbuf->st_size; + v9lstat->st_blksize = stat_to_iounit(pdu, stbuf); + v9lstat->st_blocks = stbuf->st_blocks; + v9lstat->st_atime_sec = stbuf->st_atime; + v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec; + v9lstat->st_mtime_sec = stbuf->st_mtime; + v9lstat->st_mtime_nsec = stbuf->st_mtim.tv_nsec; + v9lstat->st_ctime_sec = stbuf->st_ctime; + v9lstat->st_ctime_nsec = stbuf->st_ctim.tv_nsec; + /* Currently we only support BASIC fields in stat */ + v9lstat->st_result_mask = P9_STATS_BASIC; + + return stat_to_qid(pdu, stbuf, &v9lstat->qid); +} + +static void print_sg(struct iovec *sg, int cnt) +{ + int i; + + printf("sg[%d]: {", cnt); + for (i = 0; i < cnt; i++) { + if (i) { + printf(", "); + } + printf("(%p, %zd)", sg[i].iov_base, sg[i].iov_len); + } + printf("}\n"); +} + +/* Will call this only for path name based fid */ +static void v9fs_fix_path(V9fsPath *dst, V9fsPath *src, int len) +{ + V9fsPath str; + v9fs_path_init(&str); + v9fs_path_copy(&str, dst); + v9fs_path_sprintf(dst, "%s%s", src->data, str.data + len); + v9fs_path_free(&str); +} + +static inline bool is_ro_export(FsContext *ctx) +{ + return ctx->export_flags & V9FS_RDONLY; +} + +static void coroutine_fn v9fs_version(void *opaque) +{ + ssize_t err; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + V9fsString version; + size_t offset = 7; + + v9fs_string_init(&version); + err = pdu_unmarshal(pdu, offset, "ds", &s->msize, &version); + if (err < 0) { + goto out; + } + trace_v9fs_version(pdu->tag, pdu->id, s->msize, version.data); + + virtfs_reset(pdu); + + if (!strcmp(version.data, "9P2000.u")) { + s->proto_version = V9FS_PROTO_2000U; + } else if (!strcmp(version.data, "9P2000.L")) { + s->proto_version = V9FS_PROTO_2000L; + } else { + v9fs_string_sprintf(&version, "unknown"); + /* skip min. msize check, reporting invalid version has priority */ + goto marshal; + } + + if (s->msize < P9_MIN_MSIZE) { + err = -EMSGSIZE; + error_report( + "9pfs: Client requested msize < minimum msize (" + stringify(P9_MIN_MSIZE) ") supported by this server." + ); + goto out; + } + + /* 8192 is the default msize of Linux clients */ + if (s->msize <= 8192 && !(s->ctx.export_flags & V9FS_NO_PERF_WARN)) { + warn_report_once( + "9p: degraded performance: a reasonable high msize should be " + "chosen on client/guest side (chosen msize is <= 8192). See " + "https://wiki.qemu.org/Documentation/9psetup#msize for details." + ); + } + +marshal: + err = pdu_marshal(pdu, offset, "ds", s->msize, &version); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_version_return(pdu->tag, pdu->id, s->msize, version.data); +out: + pdu_complete(pdu, err); + v9fs_string_free(&version); +} + +static void coroutine_fn v9fs_attach(void *opaque) +{ + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + int32_t fid, afid, n_uname; + V9fsString uname, aname; + V9fsFidState *fidp; + size_t offset = 7; + V9fsQID qid; + ssize_t err; + struct stat stbuf; + + v9fs_string_init(&uname); + v9fs_string_init(&aname); + err = pdu_unmarshal(pdu, offset, "ddssd", &fid, + &afid, &uname, &aname, &n_uname); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_attach(pdu->tag, pdu->id, fid, afid, uname.data, aname.data); + + fidp = alloc_fid(s, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + fidp->uid = n_uname; + err = v9fs_co_name_to_path(pdu, NULL, "/", &fidp->path); + if (err < 0) { + err = -EINVAL; + clunk_fid(s, fid); + goto out; + } + err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (err < 0) { + err = -EINVAL; + clunk_fid(s, fid); + goto out; + } + err = stat_to_qid(pdu, &stbuf, &qid); + if (err < 0) { + err = -EINVAL; + clunk_fid(s, fid); + goto out; + } + + /* + * disable migration if we haven't done already. + * attach could get called multiple times for the same export. + */ + if (!s->migration_blocker) { + error_setg(&s->migration_blocker, + "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'", + s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag); + err = migrate_add_blocker(s->migration_blocker, NULL); + if (err < 0) { + error_free(s->migration_blocker); + s->migration_blocker = NULL; + clunk_fid(s, fid); + goto out; + } + s->root_fid = fid; + } + + err = pdu_marshal(pdu, offset, "Q", &qid); + if (err < 0) { + clunk_fid(s, fid); + goto out; + } + err += offset; + + memcpy(&s->root_st, &stbuf, sizeof(stbuf)); + trace_v9fs_attach_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&uname); + v9fs_string_free(&aname); +} + +static void coroutine_fn v9fs_stat(void *opaque) +{ + int32_t fid; + V9fsStat v9stat; + ssize_t err = 0; + size_t offset = 7; + struct stat stbuf; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + char *basename; + + err = pdu_unmarshal(pdu, offset, "d", &fid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_stat(pdu->tag, pdu->id, fid); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (err < 0) { + goto out; + } + basename = g_path_get_basename(fidp->path.data); + err = stat_to_v9stat(pdu, &fidp->path, basename, &stbuf, &v9stat); + g_free(basename); + if (err < 0) { + goto out; + } + err = pdu_marshal(pdu, offset, "wS", 0, &v9stat); + if (err < 0) { + v9fs_stat_free(&v9stat); + goto out; + } + trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode, + v9stat.atime, v9stat.mtime, v9stat.length); + err += offset; + v9fs_stat_free(&v9stat); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); +} + +static void coroutine_fn v9fs_getattr(void *opaque) +{ + int32_t fid; + size_t offset = 7; + ssize_t retval = 0; + struct stat stbuf; + V9fsFidState *fidp; + uint64_t request_mask; + V9fsStatDotl v9stat_dotl; + V9fsPDU *pdu = opaque; + + retval = pdu_unmarshal(pdu, offset, "dq", &fid, &request_mask); + if (retval < 0) { + goto out_nofid; + } + trace_v9fs_getattr(pdu->tag, pdu->id, fid, request_mask); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + retval = -ENOENT; + goto out_nofid; + } + /* + * Currently we only support BASIC fields in stat, so there is no + * need to look at request_mask. + */ + retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (retval < 0) { + goto out; + } + retval = stat_to_v9stat_dotl(pdu, &stbuf, &v9stat_dotl); + if (retval < 0) { + goto out; + } + + /* fill st_gen if requested and supported by underlying fs */ + if (request_mask & P9_STATS_GEN) { + retval = v9fs_co_st_gen(pdu, &fidp->path, stbuf.st_mode, &v9stat_dotl); + switch (retval) { + case 0: + /* we have valid st_gen: update result mask */ + v9stat_dotl.st_result_mask |= P9_STATS_GEN; + break; + case -EINTR: + /* request cancelled, e.g. by Tflush */ + goto out; + default: + /* failed to get st_gen: not fatal, ignore */ + break; + } + } + retval = pdu_marshal(pdu, offset, "A", &v9stat_dotl); + if (retval < 0) { + goto out; + } + retval += offset; + trace_v9fs_getattr_return(pdu->tag, pdu->id, v9stat_dotl.st_result_mask, + v9stat_dotl.st_mode, v9stat_dotl.st_uid, + v9stat_dotl.st_gid); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, retval); +} + +/* Attribute flags */ +#define P9_ATTR_MODE (1 << 0) +#define P9_ATTR_UID (1 << 1) +#define P9_ATTR_GID (1 << 2) +#define P9_ATTR_SIZE (1 << 3) +#define P9_ATTR_ATIME (1 << 4) +#define P9_ATTR_MTIME (1 << 5) +#define P9_ATTR_CTIME (1 << 6) +#define P9_ATTR_ATIME_SET (1 << 7) +#define P9_ATTR_MTIME_SET (1 << 8) + +#define P9_ATTR_MASK 127 + +static void coroutine_fn v9fs_setattr(void *opaque) +{ + int err = 0; + int32_t fid; + V9fsFidState *fidp; + size_t offset = 7; + V9fsIattr v9iattr; + V9fsPDU *pdu = opaque; + + err = pdu_unmarshal(pdu, offset, "dI", &fid, &v9iattr); + if (err < 0) { + goto out_nofid; + } + + trace_v9fs_setattr(pdu->tag, pdu->id, fid, + v9iattr.valid, v9iattr.mode, v9iattr.uid, v9iattr.gid, + v9iattr.size, v9iattr.atime_sec, v9iattr.mtime_sec); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + if (v9iattr.valid & P9_ATTR_MODE) { + err = v9fs_co_chmod(pdu, &fidp->path, v9iattr.mode); + if (err < 0) { + goto out; + } + } + if (v9iattr.valid & (P9_ATTR_ATIME | P9_ATTR_MTIME)) { + struct timespec times[2]; + if (v9iattr.valid & P9_ATTR_ATIME) { + if (v9iattr.valid & P9_ATTR_ATIME_SET) { + times[0].tv_sec = v9iattr.atime_sec; + times[0].tv_nsec = v9iattr.atime_nsec; + } else { + times[0].tv_nsec = UTIME_NOW; + } + } else { + times[0].tv_nsec = UTIME_OMIT; + } + if (v9iattr.valid & P9_ATTR_MTIME) { + if (v9iattr.valid & P9_ATTR_MTIME_SET) { + times[1].tv_sec = v9iattr.mtime_sec; + times[1].tv_nsec = v9iattr.mtime_nsec; + } else { + times[1].tv_nsec = UTIME_NOW; + } + } else { + times[1].tv_nsec = UTIME_OMIT; + } + err = v9fs_co_utimensat(pdu, &fidp->path, times); + if (err < 0) { + goto out; + } + } + /* + * If the only valid entry in iattr is ctime we can call + * chown(-1,-1) to update the ctime of the file + */ + if ((v9iattr.valid & (P9_ATTR_UID | P9_ATTR_GID)) || + ((v9iattr.valid & P9_ATTR_CTIME) + && !((v9iattr.valid & P9_ATTR_MASK) & ~P9_ATTR_CTIME))) { + if (!(v9iattr.valid & P9_ATTR_UID)) { + v9iattr.uid = -1; + } + if (!(v9iattr.valid & P9_ATTR_GID)) { + v9iattr.gid = -1; + } + err = v9fs_co_chown(pdu, &fidp->path, v9iattr.uid, + v9iattr.gid); + if (err < 0) { + goto out; + } + } + if (v9iattr.valid & (P9_ATTR_SIZE)) { + err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size); + if (err < 0) { + goto out; + } + } + err = offset; + trace_v9fs_setattr_return(pdu->tag, pdu->id); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); +} + +static int v9fs_walk_marshal(V9fsPDU *pdu, uint16_t nwnames, V9fsQID *qids) +{ + int i; + ssize_t err; + size_t offset = 7; + + err = pdu_marshal(pdu, offset, "w", nwnames); + if (err < 0) { + return err; + } + offset += err; + for (i = 0; i < nwnames; i++) { + err = pdu_marshal(pdu, offset, "Q", &qids[i]); + if (err < 0) { + return err; + } + offset += err; + } + return offset; +} + +static bool name_is_illegal(const char *name) +{ + return !*name || strchr(name, '/') != NULL; +} + +static bool same_stat_id(const struct stat *a, const struct stat *b) +{ + return a->st_dev == b->st_dev && a->st_ino == b->st_ino; +} + +static void coroutine_fn v9fs_walk(void *opaque) +{ + int name_idx; + g_autofree V9fsQID *qids = NULL; + int i, err = 0; + V9fsPath dpath, path; + P9ARRAY_REF(V9fsPath) pathes = NULL; + uint16_t nwnames; + struct stat stbuf, fidst; + g_autofree struct stat *stbufs = NULL; + size_t offset = 7; + int32_t fid, newfid; + P9ARRAY_REF(V9fsString) wnames = NULL; + V9fsFidState *fidp; + V9fsFidState *newfidp = NULL; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + V9fsQID qid; + + err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames); + if (err < 0) { + pdu_complete(pdu, err); + return ; + } + offset += err; + + trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames); + + if (nwnames > P9_MAXWELEM) { + err = -EINVAL; + goto out_nofid; + } + if (nwnames) { + P9ARRAY_NEW(V9fsString, wnames, nwnames); + qids = g_new0(V9fsQID, nwnames); + stbufs = g_new0(struct stat, nwnames); + P9ARRAY_NEW(V9fsPath, pathes, nwnames); + for (i = 0; i < nwnames; i++) { + err = pdu_unmarshal(pdu, offset, "s", &wnames[i]); + if (err < 0) { + goto out_nofid; + } + if (name_is_illegal(wnames[i].data)) { + err = -ENOENT; + goto out_nofid; + } + offset += err; + } + } + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + + v9fs_path_init(&dpath); + v9fs_path_init(&path); + /* + * Both dpath and path initially point to fidp. + * Needed to handle request with nwnames == 0 + */ + v9fs_path_copy(&dpath, &fidp->path); + v9fs_path_copy(&path, &fidp->path); + + /* + * To keep latency (i.e. overall execution time for processing this + * Twalk client request) as small as possible, run all the required fs + * driver code altogether inside the following block. + */ + v9fs_co_run_in_worker({ + if (v9fs_request_cancelled(pdu)) { + err = -EINTR; + break; + } + err = s->ops->lstat(&s->ctx, &dpath, &fidst); + if (err < 0) { + err = -errno; + break; + } + stbuf = fidst; + for (name_idx = 0; name_idx < nwnames; name_idx++) { + if (v9fs_request_cancelled(pdu)) { + err = -EINTR; + break; + } + if (!same_stat_id(&pdu->s->root_st, &stbuf) || + strcmp("..", wnames[name_idx].data)) + { + err = s->ops->name_to_path(&s->ctx, &dpath, + wnames[name_idx].data, + &pathes[name_idx]); + if (err < 0) { + err = -errno; + break; + } + if (v9fs_request_cancelled(pdu)) { + err = -EINTR; + break; + } + err = s->ops->lstat(&s->ctx, &pathes[name_idx], &stbuf); + if (err < 0) { + err = -errno; + break; + } + stbufs[name_idx] = stbuf; + v9fs_path_copy(&dpath, &pathes[name_idx]); + } + } + }); + /* + * Handle all the rest of this Twalk request on main thread ... + */ + if (err < 0) { + goto out; + } + + err = stat_to_qid(pdu, &fidst, &qid); + if (err < 0) { + goto out; + } + stbuf = fidst; + + /* reset dpath and path */ + v9fs_path_copy(&dpath, &fidp->path); + v9fs_path_copy(&path, &fidp->path); + + for (name_idx = 0; name_idx < nwnames; name_idx++) { + if (!same_stat_id(&pdu->s->root_st, &stbuf) || + strcmp("..", wnames[name_idx].data)) + { + stbuf = stbufs[name_idx]; + err = stat_to_qid(pdu, &stbuf, &qid); + if (err < 0) { + goto out; + } + v9fs_path_copy(&path, &pathes[name_idx]); + v9fs_path_copy(&dpath, &path); + } + memcpy(&qids[name_idx], &qid, sizeof(qid)); + } + if (fid == newfid) { + if (fidp->fid_type != P9_FID_NONE) { + err = -EINVAL; + goto out; + } + v9fs_path_write_lock(s); + v9fs_path_copy(&fidp->path, &path); + v9fs_path_unlock(s); + } else { + newfidp = alloc_fid(s, newfid); + if (newfidp == NULL) { + err = -EINVAL; + goto out; + } + newfidp->uid = fidp->uid; + v9fs_path_copy(&newfidp->path, &path); + } + err = v9fs_walk_marshal(pdu, nwnames, qids); + trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids); +out: + put_fid(pdu, fidp); + if (newfidp) { + put_fid(pdu, newfidp); + } + v9fs_path_free(&dpath); + v9fs_path_free(&path); +out_nofid: + pdu_complete(pdu, err); +} + +static int32_t coroutine_fn get_iounit(V9fsPDU *pdu, V9fsPath *path) +{ + struct statfs stbuf; + int err = v9fs_co_statfs(pdu, path, &stbuf); + + return blksize_to_iounit(pdu, (err >= 0) ? stbuf.f_bsize : 0); +} + +static void coroutine_fn v9fs_open(void *opaque) +{ + int flags; + int32_t fid; + int32_t mode; + V9fsQID qid; + int iounit = 0; + ssize_t err = 0; + size_t offset = 7; + struct stat stbuf; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + if (s->proto_version == V9FS_PROTO_2000L) { + err = pdu_unmarshal(pdu, offset, "dd", &fid, &mode); + } else { + uint8_t modebyte; + err = pdu_unmarshal(pdu, offset, "db", &fid, &modebyte); + mode = modebyte; + } + if (err < 0) { + goto out_nofid; + } + trace_v9fs_open(pdu->tag, pdu->id, fid, mode); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + if (fidp->fid_type != P9_FID_NONE) { + err = -EINVAL; + goto out; + } + + err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (err < 0) { + goto out; + } + err = stat_to_qid(pdu, &stbuf, &qid); + if (err < 0) { + goto out; + } + if (S_ISDIR(stbuf.st_mode)) { + err = v9fs_co_opendir(pdu, fidp); + if (err < 0) { + goto out; + } + fidp->fid_type = P9_FID_DIR; + err = pdu_marshal(pdu, offset, "Qd", &qid, 0); + if (err < 0) { + goto out; + } + err += offset; + } else { + if (s->proto_version == V9FS_PROTO_2000L) { + flags = get_dotl_openflags(s, mode); + } else { + flags = omode_to_uflags(mode); + } + if (is_ro_export(&s->ctx)) { + if (mode & O_WRONLY || mode & O_RDWR || + mode & O_APPEND || mode & O_TRUNC) { + err = -EROFS; + goto out; + } + } + err = v9fs_co_open(pdu, fidp, flags); + if (err < 0) { + goto out; + } + fidp->fid_type = P9_FID_FILE; + fidp->open_flags = flags; + if (flags & O_EXCL) { + /* + * We let the host file system do O_EXCL check + * We should not reclaim such fd + */ + fidp->flags |= FID_NON_RECLAIMABLE; + } + iounit = get_iounit(pdu, &fidp->path); + err = pdu_marshal(pdu, offset, "Qd", &qid, iounit); + if (err < 0) { + goto out; + } + err += offset; + } + trace_v9fs_open_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, iounit); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); +} + +static void coroutine_fn v9fs_lcreate(void *opaque) +{ + int32_t dfid, flags, mode; + gid_t gid; + ssize_t err = 0; + ssize_t offset = 7; + V9fsString name; + V9fsFidState *fidp; + struct stat stbuf; + V9fsQID qid; + int32_t iounit; + V9fsPDU *pdu = opaque; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dsddd", &dfid, + &name, &flags, &mode, &gid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_lcreate(pdu->tag, pdu->id, dfid, flags, mode, gid); + + if (name_is_illegal(name.data)) { + err = -ENOENT; + goto out_nofid; + } + + if (!strcmp(".", name.data) || !strcmp("..", name.data)) { + err = -EEXIST; + goto out_nofid; + } + + fidp = get_fid(pdu, dfid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + if (fidp->fid_type != P9_FID_NONE) { + err = -EINVAL; + goto out; + } + + flags = get_dotl_openflags(pdu->s, flags); + err = v9fs_co_open2(pdu, fidp, &name, gid, + flags | O_CREAT, mode, &stbuf); + if (err < 0) { + goto out; + } + fidp->fid_type = P9_FID_FILE; + fidp->open_flags = flags; + if (flags & O_EXCL) { + /* + * We let the host file system do O_EXCL check + * We should not reclaim such fd + */ + fidp->flags |= FID_NON_RECLAIMABLE; + } + iounit = get_iounit(pdu, &fidp->path); + err = stat_to_qid(pdu, &stbuf, &qid); + if (err < 0) { + goto out; + } + err = pdu_marshal(pdu, offset, "Qd", &qid, iounit); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_lcreate_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, iounit); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&name); +} + +static void coroutine_fn v9fs_fsync(void *opaque) +{ + int err; + int32_t fid; + int datasync; + size_t offset = 7; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + + err = pdu_unmarshal(pdu, offset, "dd", &fid, &datasync); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_fsync(pdu->tag, pdu->id, fid, datasync); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + err = v9fs_co_fsync(pdu, fidp, datasync); + if (!err) { + err = offset; + } + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); +} + +static void coroutine_fn v9fs_clunk(void *opaque) +{ + int err; + int32_t fid; + size_t offset = 7; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + err = pdu_unmarshal(pdu, offset, "d", &fid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_clunk(pdu->tag, pdu->id, fid); + + fidp = clunk_fid(s, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + /* + * Bump the ref so that put_fid will + * free the fid. + */ + fidp->ref++; + err = put_fid(pdu, fidp); + if (!err) { + err = offset; + } +out_nofid: + pdu_complete(pdu, err); +} + +/* + * Create a QEMUIOVector for a sub-region of PDU iovecs + * + * @qiov: uninitialized QEMUIOVector + * @skip: number of bytes to skip from beginning of PDU + * @size: number of bytes to include + * @is_write: true - write, false - read + * + * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up + * with qemu_iovec_destroy(). + */ +static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu, + size_t skip, size_t size, + bool is_write) +{ + QEMUIOVector elem; + struct iovec *iov; + unsigned int niov; + + if (is_write) { + pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip); + } else { + pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip); + } + + qemu_iovec_init_external(&elem, iov, niov); + qemu_iovec_init(qiov, niov); + qemu_iovec_concat(qiov, &elem, skip, size); +} + +static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp, + uint64_t off, uint32_t max_count) +{ + ssize_t err; + size_t offset = 7; + uint64_t read_count; + QEMUIOVector qiov_full; + + if (fidp->fs.xattr.len < off) { + read_count = 0; + } else { + read_count = fidp->fs.xattr.len - off; + } + if (read_count > max_count) { + read_count = max_count; + } + err = pdu_marshal(pdu, offset, "d", read_count); + if (err < 0) { + return err; + } + offset += err; + + v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false); + err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0, + ((char *)fidp->fs.xattr.value) + off, + read_count); + qemu_iovec_destroy(&qiov_full); + if (err < 0) { + return err; + } + offset += err; + return offset; +} + +static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu, + V9fsFidState *fidp, + uint32_t max_count) +{ + V9fsPath path; + V9fsStat v9stat; + int len, err = 0; + int32_t count = 0; + struct stat stbuf; + off_t saved_dir_pos; + struct dirent *dent; + + /* save the directory position */ + saved_dir_pos = v9fs_co_telldir(pdu, fidp); + if (saved_dir_pos < 0) { + return saved_dir_pos; + } + + while (1) { + v9fs_path_init(&path); + + v9fs_readdir_lock(&fidp->fs.dir); + + err = v9fs_co_readdir(pdu, fidp, &dent); + if (err || !dent) { + break; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, dent->d_name, &path); + if (err < 0) { + break; + } + err = v9fs_co_lstat(pdu, &path, &stbuf); + if (err < 0) { + break; + } + err = stat_to_v9stat(pdu, &path, dent->d_name, &stbuf, &v9stat); + if (err < 0) { + break; + } + if ((count + v9stat.size + 2) > max_count) { + v9fs_readdir_unlock(&fidp->fs.dir); + + /* Ran out of buffer. Set dir back to old position and return */ + v9fs_co_seekdir(pdu, fidp, saved_dir_pos); + v9fs_stat_free(&v9stat); + v9fs_path_free(&path); + return count; + } + + /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */ + len = pdu_marshal(pdu, 11 + count, "S", &v9stat); + + v9fs_readdir_unlock(&fidp->fs.dir); + + if (len < 0) { + v9fs_co_seekdir(pdu, fidp, saved_dir_pos); + v9fs_stat_free(&v9stat); + v9fs_path_free(&path); + return len; + } + count += len; + v9fs_stat_free(&v9stat); + v9fs_path_free(&path); + saved_dir_pos = dent->d_off; + } + + v9fs_readdir_unlock(&fidp->fs.dir); + + v9fs_path_free(&path); + if (err < 0) { + return err; + } + return count; +} + +static void coroutine_fn v9fs_read(void *opaque) +{ + int32_t fid; + uint64_t off; + ssize_t err = 0; + int32_t count = 0; + size_t offset = 7; + uint32_t max_count; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &max_count); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + if (fidp->fid_type == P9_FID_DIR) { + if (s->proto_version != V9FS_PROTO_2000U) { + warn_report_once( + "9p: bad client: T_read request on directory only expected " + "with 9P2000.u protocol version" + ); + err = -EOPNOTSUPP; + goto out; + } + if (off == 0) { + v9fs_co_rewinddir(pdu, fidp); + } + count = v9fs_do_readdir_with_stat(pdu, fidp, max_count); + if (count < 0) { + err = count; + goto out; + } + err = pdu_marshal(pdu, offset, "d", count); + if (err < 0) { + goto out; + } + err += offset + count; + } else if (fidp->fid_type == P9_FID_FILE) { + QEMUIOVector qiov_full; + QEMUIOVector qiov; + int32_t len; + + v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, false); + qemu_iovec_init(&qiov, qiov_full.niov); + do { + qemu_iovec_reset(&qiov); + qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count); + if (0) { + print_sg(qiov.iov, qiov.niov); + } + /* Loop in case of EINTR */ + do { + len = v9fs_co_preadv(pdu, fidp, qiov.iov, qiov.niov, off); + if (len >= 0) { + off += len; + count += len; + } + } while (len == -EINTR && !pdu->cancelled); + if (len < 0) { + /* IO error return the error */ + err = len; + goto out_free_iovec; + } + } while (count < max_count && len > 0); + err = pdu_marshal(pdu, offset, "d", count); + if (err < 0) { + goto out_free_iovec; + } + err += offset + count; +out_free_iovec: + qemu_iovec_destroy(&qiov); + qemu_iovec_destroy(&qiov_full); + } else if (fidp->fid_type == P9_FID_XATTR) { + err = v9fs_xattr_read(s, pdu, fidp, off, max_count); + } else { + err = -EINVAL; + } + trace_v9fs_read_return(pdu->tag, pdu->id, count, err); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); +} + +/** + * Returns size required in Rreaddir response for the passed dirent @p name. + * + * @param name - directory entry's name (i.e. file name, directory name) + * @returns required size in bytes + */ +size_t v9fs_readdir_response_size(V9fsString *name) +{ + /* + * Size of each dirent on the wire: size of qid (13) + size of offset (8) + * size of type (1) + size of name.size (2) + strlen(name.data) + */ + return 24 + v9fs_string_size(name); +} + +static void v9fs_free_dirents(struct V9fsDirEnt *e) +{ + struct V9fsDirEnt *next = NULL; + + for (; e; e = next) { + next = e->next; + g_free(e->dent); + g_free(e->st); + g_free(e); + } +} + +static int coroutine_fn v9fs_do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, + off_t offset, int32_t max_count) +{ + size_t size; + V9fsQID qid; + V9fsString name; + int len, err = 0; + int32_t count = 0; + struct dirent *dent; + struct stat *st; + struct V9fsDirEnt *entries = NULL; + + /* + * inode remapping requires the device id, which in turn might be + * different for different directory entries, so if inode remapping is + * enabled we have to make a full stat for each directory entry + */ + const bool dostat = pdu->s->ctx.export_flags & V9FS_REMAP_INODES; + + /* + * Fetch all required directory entries altogether on a background IO + * thread from fs driver. We don't want to do that for each entry + * individually, because hopping between threads (this main IO thread + * and background IO driver thread) would sum up to huge latencies. + */ + count = v9fs_co_readdir_many(pdu, fidp, &entries, offset, max_count, + dostat); + if (count < 0) { + err = count; + count = 0; + goto out; + } + count = 0; + + for (struct V9fsDirEnt *e = entries; e; e = e->next) { + dent = e->dent; + + if (pdu->s->ctx.export_flags & V9FS_REMAP_INODES) { + st = e->st; + /* e->st should never be NULL, but just to be sure */ + if (!st) { + err = -1; + break; + } + + /* remap inode */ + err = stat_to_qid(pdu, st, &qid); + if (err < 0) { + break; + } + } else { + /* + * Fill up just the path field of qid because the client uses + * only that. To fill the entire qid structure we will have + * to stat each dirent found, which is expensive. For the + * latter reason we don't call stat_to_qid() here. Only drawback + * is that no multi-device export detection of stat_to_qid() + * would be done and provided as error to the user here. But + * user would get that error anyway when accessing those + * files/dirs through other ways. + */ + size = MIN(sizeof(dent->d_ino), sizeof(qid.path)); + memcpy(&qid.path, &dent->d_ino, size); + /* Fill the other fields with dummy values */ + qid.type = 0; + qid.version = 0; + } + + v9fs_string_init(&name); + v9fs_string_sprintf(&name, "%s", dent->d_name); + + /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */ + len = pdu_marshal(pdu, 11 + count, "Qqbs", + &qid, dent->d_off, + dent->d_type, &name); + + v9fs_string_free(&name); + + if (len < 0) { + err = len; + break; + } + + count += len; + } + +out: + v9fs_free_dirents(entries); + if (err < 0) { + return err; + } + return count; +} + +static void coroutine_fn v9fs_readdir(void *opaque) +{ + int32_t fid; + V9fsFidState *fidp; + ssize_t retval = 0; + size_t offset = 7; + uint64_t initial_offset; + int32_t count; + uint32_t max_count; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + retval = pdu_unmarshal(pdu, offset, "dqd", &fid, + &initial_offset, &max_count); + if (retval < 0) { + goto out_nofid; + } + trace_v9fs_readdir(pdu->tag, pdu->id, fid, initial_offset, max_count); + + /* Enough space for a R_readdir header: size[4] Rreaddir tag[2] count[4] */ + if (max_count > s->msize - 11) { + max_count = s->msize - 11; + warn_report_once( + "9p: bad client: T_readdir with count > msize - 11" + ); + } + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + retval = -EINVAL; + goto out_nofid; + } + if (!fidp->fs.dir.stream) { + retval = -EINVAL; + goto out; + } + if (s->proto_version != V9FS_PROTO_2000L) { + warn_report_once( + "9p: bad client: T_readdir request only expected with 9P2000.L " + "protocol version" + ); + retval = -EOPNOTSUPP; + goto out; + } + count = v9fs_do_readdir(pdu, fidp, (off_t) initial_offset, max_count); + if (count < 0) { + retval = count; + goto out; + } + retval = pdu_marshal(pdu, offset, "d", count); + if (retval < 0) { + goto out; + } + retval += count + offset; + trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, retval); +} + +static int v9fs_xattr_write(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp, + uint64_t off, uint32_t count, + struct iovec *sg, int cnt) +{ + int i, to_copy; + ssize_t err = 0; + uint64_t write_count; + size_t offset = 7; + + + if (fidp->fs.xattr.len < off) { + return -ENOSPC; + } + write_count = fidp->fs.xattr.len - off; + if (write_count > count) { + write_count = count; + } + err = pdu_marshal(pdu, offset, "d", write_count); + if (err < 0) { + return err; + } + err += offset; + fidp->fs.xattr.copied_len += write_count; + /* + * Now copy the content from sg list + */ + for (i = 0; i < cnt; i++) { + if (write_count > sg[i].iov_len) { + to_copy = sg[i].iov_len; + } else { + to_copy = write_count; + } + memcpy((char *)fidp->fs.xattr.value + off, sg[i].iov_base, to_copy); + /* updating vs->off since we are not using below */ + off += to_copy; + write_count -= to_copy; + } + + return err; +} + +static void coroutine_fn v9fs_write(void *opaque) +{ + ssize_t err; + int32_t fid; + uint64_t off; + uint32_t count; + int32_t len = 0; + int32_t total = 0; + size_t offset = 7; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + QEMUIOVector qiov_full; + QEMUIOVector qiov; + + err = pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &count); + if (err < 0) { + pdu_complete(pdu, err); + return; + } + offset += err; + v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true); + trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + if (fidp->fid_type == P9_FID_FILE) { + if (fidp->fs.fd == -1) { + err = -EINVAL; + goto out; + } + } else if (fidp->fid_type == P9_FID_XATTR) { + /* + * setxattr operation + */ + err = v9fs_xattr_write(s, pdu, fidp, off, count, + qiov_full.iov, qiov_full.niov); + goto out; + } else { + err = -EINVAL; + goto out; + } + qemu_iovec_init(&qiov, qiov_full.niov); + do { + qemu_iovec_reset(&qiov); + qemu_iovec_concat(&qiov, &qiov_full, total, qiov_full.size - total); + if (0) { + print_sg(qiov.iov, qiov.niov); + } + /* Loop in case of EINTR */ + do { + len = v9fs_co_pwritev(pdu, fidp, qiov.iov, qiov.niov, off); + if (len >= 0) { + off += len; + total += len; + } + } while (len == -EINTR && !pdu->cancelled); + if (len < 0) { + /* IO error return the error */ + err = len; + goto out_qiov; + } + } while (total < count && len > 0); + + offset = 7; + err = pdu_marshal(pdu, offset, "d", total); + if (err < 0) { + goto out_qiov; + } + err += offset; + trace_v9fs_write_return(pdu->tag, pdu->id, total, err); +out_qiov: + qemu_iovec_destroy(&qiov); +out: + put_fid(pdu, fidp); +out_nofid: + qemu_iovec_destroy(&qiov_full); + pdu_complete(pdu, err); +} + +static void coroutine_fn v9fs_create(void *opaque) +{ + int32_t fid; + int err = 0; + size_t offset = 7; + V9fsFidState *fidp; + V9fsQID qid; + int32_t perm; + int8_t mode; + V9fsPath path; + struct stat stbuf; + V9fsString name; + V9fsString extension; + int iounit; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + v9fs_path_init(&path); + v9fs_string_init(&name); + v9fs_string_init(&extension); + err = pdu_unmarshal(pdu, offset, "dsdbs", &fid, &name, + &perm, &mode, &extension); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_create(pdu->tag, pdu->id, fid, name.data, perm, mode); + + if (name_is_illegal(name.data)) { + err = -ENOENT; + goto out_nofid; + } + + if (!strcmp(".", name.data) || !strcmp("..", name.data)) { + err = -EEXIST; + goto out_nofid; + } + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + if (fidp->fid_type != P9_FID_NONE) { + err = -EINVAL; + goto out; + } + if (perm & P9_STAT_MODE_DIR) { + err = v9fs_co_mkdir(pdu, fidp, &name, perm & 0777, + fidp->uid, -1, &stbuf); + if (err < 0) { + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + goto out; + } + v9fs_path_write_lock(s); + v9fs_path_copy(&fidp->path, &path); + v9fs_path_unlock(s); + err = v9fs_co_opendir(pdu, fidp); + if (err < 0) { + goto out; + } + fidp->fid_type = P9_FID_DIR; + } else if (perm & P9_STAT_MODE_SYMLINK) { + err = v9fs_co_symlink(pdu, fidp, &name, + extension.data, -1 , &stbuf); + if (err < 0) { + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + goto out; + } + v9fs_path_write_lock(s); + v9fs_path_copy(&fidp->path, &path); + v9fs_path_unlock(s); + } else if (perm & P9_STAT_MODE_LINK) { + int32_t ofid = atoi(extension.data); + V9fsFidState *ofidp = get_fid(pdu, ofid); + if (ofidp == NULL) { + err = -EINVAL; + goto out; + } + err = v9fs_co_link(pdu, ofidp, fidp, &name); + put_fid(pdu, ofidp); + if (err < 0) { + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + fidp->fid_type = P9_FID_NONE; + goto out; + } + v9fs_path_write_lock(s); + v9fs_path_copy(&fidp->path, &path); + v9fs_path_unlock(s); + err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (err < 0) { + fidp->fid_type = P9_FID_NONE; + goto out; + } + } else if (perm & P9_STAT_MODE_DEVICE) { + char ctype; + uint32_t major, minor; + mode_t nmode = 0; + + if (sscanf(extension.data, "%c %u %u", &ctype, &major, &minor) != 3) { + err = -errno; + goto out; + } + + switch (ctype) { + case 'c': + nmode = S_IFCHR; + break; + case 'b': + nmode = S_IFBLK; + break; + default: + err = -EIO; + goto out; + } + + nmode |= perm & 0777; + err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, + makedev(major, minor), nmode, &stbuf); + if (err < 0) { + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + goto out; + } + v9fs_path_write_lock(s); + v9fs_path_copy(&fidp->path, &path); + v9fs_path_unlock(s); + } else if (perm & P9_STAT_MODE_NAMED_PIPE) { + err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, + 0, S_IFIFO | (perm & 0777), &stbuf); + if (err < 0) { + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + goto out; + } + v9fs_path_write_lock(s); + v9fs_path_copy(&fidp->path, &path); + v9fs_path_unlock(s); + } else if (perm & P9_STAT_MODE_SOCKET) { + err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, + 0, S_IFSOCK | (perm & 0777), &stbuf); + if (err < 0) { + goto out; + } + err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); + if (err < 0) { + goto out; + } + v9fs_path_write_lock(s); + v9fs_path_copy(&fidp->path, &path); + v9fs_path_unlock(s); + } else { + err = v9fs_co_open2(pdu, fidp, &name, -1, + omode_to_uflags(mode) | O_CREAT, perm, &stbuf); + if (err < 0) { + goto out; + } + fidp->fid_type = P9_FID_FILE; + fidp->open_flags = omode_to_uflags(mode); + if (fidp->open_flags & O_EXCL) { + /* + * We let the host file system do O_EXCL check + * We should not reclaim such fd + */ + fidp->flags |= FID_NON_RECLAIMABLE; + } + } + iounit = get_iounit(pdu, &fidp->path); + err = stat_to_qid(pdu, &stbuf, &qid); + if (err < 0) { + goto out; + } + err = pdu_marshal(pdu, offset, "Qd", &qid, iounit); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_create_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, iounit); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&name); + v9fs_string_free(&extension); + v9fs_path_free(&path); +} + +static void coroutine_fn v9fs_symlink(void *opaque) +{ + V9fsPDU *pdu = opaque; + V9fsString name; + V9fsString symname; + V9fsFidState *dfidp; + V9fsQID qid; + struct stat stbuf; + int32_t dfid; + int err = 0; + gid_t gid; + size_t offset = 7; + + v9fs_string_init(&name); + v9fs_string_init(&symname); + err = pdu_unmarshal(pdu, offset, "dssd", &dfid, &name, &symname, &gid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_symlink(pdu->tag, pdu->id, dfid, name.data, symname.data, gid); + + if (name_is_illegal(name.data)) { + err = -ENOENT; + goto out_nofid; + } + + if (!strcmp(".", name.data) || !strcmp("..", name.data)) { + err = -EEXIST; + goto out_nofid; + } + + dfidp = get_fid(pdu, dfid); + if (dfidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + err = v9fs_co_symlink(pdu, dfidp, &name, symname.data, gid, &stbuf); + if (err < 0) { + goto out; + } + err = stat_to_qid(pdu, &stbuf, &qid); + if (err < 0) { + goto out; + } + err = pdu_marshal(pdu, offset, "Q", &qid); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_symlink_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path); +out: + put_fid(pdu, dfidp); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&name); + v9fs_string_free(&symname); +} + +static void coroutine_fn v9fs_flush(void *opaque) +{ + ssize_t err; + int16_t tag; + size_t offset = 7; + V9fsPDU *cancel_pdu = NULL; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + err = pdu_unmarshal(pdu, offset, "w", &tag); + if (err < 0) { + pdu_complete(pdu, err); + return; + } + trace_v9fs_flush(pdu->tag, pdu->id, tag); + + if (pdu->tag == tag) { + warn_report("the guest sent a self-referencing 9P flush request"); + } else { + QLIST_FOREACH(cancel_pdu, &s->active_list, next) { + if (cancel_pdu->tag == tag) { + break; + } + } + } + if (cancel_pdu) { + cancel_pdu->cancelled = 1; + /* + * Wait for pdu to complete. + */ + qemu_co_queue_wait(&cancel_pdu->complete, NULL); + if (!qemu_co_queue_next(&cancel_pdu->complete)) { + cancel_pdu->cancelled = 0; + pdu_free(cancel_pdu); + } + } + pdu_complete(pdu, 7); +} + +static void coroutine_fn v9fs_link(void *opaque) +{ + V9fsPDU *pdu = opaque; + int32_t dfid, oldfid; + V9fsFidState *dfidp, *oldfidp; + V9fsString name; + size_t offset = 7; + int err = 0; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data); + + if (name_is_illegal(name.data)) { + err = -ENOENT; + goto out_nofid; + } + + if (!strcmp(".", name.data) || !strcmp("..", name.data)) { + err = -EEXIST; + goto out_nofid; + } + + dfidp = get_fid(pdu, dfid); + if (dfidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + + oldfidp = get_fid(pdu, oldfid); + if (oldfidp == NULL) { + err = -ENOENT; + goto out; + } + err = v9fs_co_link(pdu, oldfidp, dfidp, &name); + if (!err) { + err = offset; + } + put_fid(pdu, oldfidp); +out: + put_fid(pdu, dfidp); +out_nofid: + v9fs_string_free(&name); + pdu_complete(pdu, err); +} + +/* Only works with path name based fid */ +static void coroutine_fn v9fs_remove(void *opaque) +{ + int32_t fid; + int err = 0; + size_t offset = 7; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + + err = pdu_unmarshal(pdu, offset, "d", &fid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_remove(pdu->tag, pdu->id, fid); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + /* if fs driver is not path based, return EOPNOTSUPP */ + if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) { + err = -EOPNOTSUPP; + goto out_err; + } + /* + * IF the file is unlinked, we cannot reopen + * the file later. So don't reclaim fd + */ + err = v9fs_mark_fids_unreclaim(pdu, &fidp->path); + if (err < 0) { + goto out_err; + } + err = v9fs_co_remove(pdu, &fidp->path); + if (!err) { + err = offset; + } +out_err: + /* For TREMOVE we need to clunk the fid even on failed remove */ + clunk_fid(pdu->s, fidp->fid); + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); +} + +static void coroutine_fn v9fs_unlinkat(void *opaque) +{ + int err = 0; + V9fsString name; + int32_t dfid, flags, rflags = 0; + size_t offset = 7; + V9fsPath path; + V9fsFidState *dfidp; + V9fsPDU *pdu = opaque; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dsd", &dfid, &name, &flags); + if (err < 0) { + goto out_nofid; + } + + if (name_is_illegal(name.data)) { + err = -ENOENT; + goto out_nofid; + } + + if (!strcmp(".", name.data)) { + err = -EINVAL; + goto out_nofid; + } + + if (!strcmp("..", name.data)) { + err = -ENOTEMPTY; + goto out_nofid; + } + + if (flags & ~P9_DOTL_AT_REMOVEDIR) { + err = -EINVAL; + goto out_nofid; + } + + if (flags & P9_DOTL_AT_REMOVEDIR) { + rflags |= AT_REMOVEDIR; + } + + dfidp = get_fid(pdu, dfid); + if (dfidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + /* + * IF the file is unlinked, we cannot reopen + * the file later. So don't reclaim fd + */ + v9fs_path_init(&path); + err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path); + if (err < 0) { + goto out_err; + } + err = v9fs_mark_fids_unreclaim(pdu, &path); + if (err < 0) { + goto out_err; + } + err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, rflags); + if (!err) { + err = offset; + } +out_err: + put_fid(pdu, dfidp); + v9fs_path_free(&path); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&name); +} + + +/* Only works with path name based fid */ +static int coroutine_fn v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp, + int32_t newdirfid, + V9fsString *name) +{ + int err = 0; + V9fsPath new_path; + V9fsFidState *tfidp; + V9fsState *s = pdu->s; + V9fsFidState *dirfidp = NULL; + + v9fs_path_init(&new_path); + if (newdirfid != -1) { + dirfidp = get_fid(pdu, newdirfid); + if (dirfidp == NULL) { + return -ENOENT; + } + if (fidp->fid_type != P9_FID_NONE) { + err = -EINVAL; + goto out; + } + err = v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path); + if (err < 0) { + goto out; + } + } else { + char *dir_name = g_path_get_dirname(fidp->path.data); + V9fsPath dir_path; + + v9fs_path_init(&dir_path); + v9fs_path_sprintf(&dir_path, "%s", dir_name); + g_free(dir_name); + + err = v9fs_co_name_to_path(pdu, &dir_path, name->data, &new_path); + v9fs_path_free(&dir_path); + if (err < 0) { + goto out; + } + } + err = v9fs_co_rename(pdu, &fidp->path, &new_path); + if (err < 0) { + goto out; + } + /* + * Fixup fid's pointing to the old name to + * start pointing to the new name + */ + QSIMPLEQ_FOREACH(tfidp, &s->fid_list, next) { + if (v9fs_path_is_ancestor(&fidp->path, &tfidp->path)) { + /* replace the name */ + v9fs_fix_path(&tfidp->path, &new_path, strlen(fidp->path.data)); + } + } +out: + if (dirfidp) { + put_fid(pdu, dirfidp); + } + v9fs_path_free(&new_path); + return err; +} + +/* Only works with path name based fid */ +static void coroutine_fn v9fs_rename(void *opaque) +{ + int32_t fid; + ssize_t err = 0; + size_t offset = 7; + V9fsString name; + int32_t newdirfid; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dds", &fid, &newdirfid, &name); + if (err < 0) { + goto out_nofid; + } + + if (name_is_illegal(name.data)) { + err = -ENOENT; + goto out_nofid; + } + + if (!strcmp(".", name.data) || !strcmp("..", name.data)) { + err = -EISDIR; + goto out_nofid; + } + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + if (fidp->fid_type != P9_FID_NONE) { + err = -EINVAL; + goto out; + } + /* if fs driver is not path based, return EOPNOTSUPP */ + if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) { + err = -EOPNOTSUPP; + goto out; + } + v9fs_path_write_lock(s); + err = v9fs_complete_rename(pdu, fidp, newdirfid, &name); + v9fs_path_unlock(s); + if (!err) { + err = offset; + } +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&name); +} + +static int coroutine_fn v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir, + V9fsString *old_name, + V9fsPath *newdir, + V9fsString *new_name) +{ + V9fsFidState *tfidp; + V9fsPath oldpath, newpath; + V9fsState *s = pdu->s; + int err; + + v9fs_path_init(&oldpath); + v9fs_path_init(&newpath); + err = v9fs_co_name_to_path(pdu, olddir, old_name->data, &oldpath); + if (err < 0) { + goto out; + } + err = v9fs_co_name_to_path(pdu, newdir, new_name->data, &newpath); + if (err < 0) { + goto out; + } + + /* + * Fixup fid's pointing to the old name to + * start pointing to the new name + */ + QSIMPLEQ_FOREACH(tfidp, &s->fid_list, next) { + if (v9fs_path_is_ancestor(&oldpath, &tfidp->path)) { + /* replace the name */ + v9fs_fix_path(&tfidp->path, &newpath, strlen(oldpath.data)); + } + } +out: + v9fs_path_free(&oldpath); + v9fs_path_free(&newpath); + return err; +} + +static int coroutine_fn v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid, + V9fsString *old_name, + int32_t newdirfid, + V9fsString *new_name) +{ + int err = 0; + V9fsState *s = pdu->s; + V9fsFidState *newdirfidp = NULL, *olddirfidp = NULL; + + olddirfidp = get_fid(pdu, olddirfid); + if (olddirfidp == NULL) { + err = -ENOENT; + goto out; + } + if (newdirfid != -1) { + newdirfidp = get_fid(pdu, newdirfid); + if (newdirfidp == NULL) { + err = -ENOENT; + goto out; + } + } else { + newdirfidp = get_fid(pdu, olddirfid); + } + + err = v9fs_co_renameat(pdu, &olddirfidp->path, old_name, + &newdirfidp->path, new_name); + if (err < 0) { + goto out; + } + if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { + /* Only for path based fid we need to do the below fixup */ + err = v9fs_fix_fid_paths(pdu, &olddirfidp->path, old_name, + &newdirfidp->path, new_name); + } +out: + if (olddirfidp) { + put_fid(pdu, olddirfidp); + } + if (newdirfidp) { + put_fid(pdu, newdirfidp); + } + return err; +} + +static void coroutine_fn v9fs_renameat(void *opaque) +{ + ssize_t err = 0; + size_t offset = 7; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + int32_t olddirfid, newdirfid; + V9fsString old_name, new_name; + + v9fs_string_init(&old_name); + v9fs_string_init(&new_name); + err = pdu_unmarshal(pdu, offset, "dsds", &olddirfid, + &old_name, &newdirfid, &new_name); + if (err < 0) { + goto out_err; + } + + if (name_is_illegal(old_name.data) || name_is_illegal(new_name.data)) { + err = -ENOENT; + goto out_err; + } + + if (!strcmp(".", old_name.data) || !strcmp("..", old_name.data) || + !strcmp(".", new_name.data) || !strcmp("..", new_name.data)) { + err = -EISDIR; + goto out_err; + } + + v9fs_path_write_lock(s); + err = v9fs_complete_renameat(pdu, olddirfid, + &old_name, newdirfid, &new_name); + v9fs_path_unlock(s); + if (!err) { + err = offset; + } + +out_err: + pdu_complete(pdu, err); + v9fs_string_free(&old_name); + v9fs_string_free(&new_name); +} + +static void coroutine_fn v9fs_wstat(void *opaque) +{ + int32_t fid; + int err = 0; + int16_t unused; + V9fsStat v9stat; + size_t offset = 7; + struct stat stbuf; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + v9fs_stat_init(&v9stat); + err = pdu_unmarshal(pdu, offset, "dwS", &fid, &unused, &v9stat); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_wstat(pdu->tag, pdu->id, fid, + v9stat.mode, v9stat.atime, v9stat.mtime); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + /* do we need to sync the file? */ + if (donttouch_stat(&v9stat)) { + err = v9fs_co_fsync(pdu, fidp, 0); + goto out; + } + if (v9stat.mode != -1) { + uint32_t v9_mode; + err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); + if (err < 0) { + goto out; + } + v9_mode = stat_to_v9mode(&stbuf); + if ((v9stat.mode & P9_STAT_MODE_TYPE_BITS) != + (v9_mode & P9_STAT_MODE_TYPE_BITS)) { + /* Attempting to change the type */ + err = -EIO; + goto out; + } + err = v9fs_co_chmod(pdu, &fidp->path, + v9mode_to_mode(v9stat.mode, + &v9stat.extension)); + if (err < 0) { + goto out; + } + } + if (v9stat.mtime != -1 || v9stat.atime != -1) { + struct timespec times[2]; + if (v9stat.atime != -1) { + times[0].tv_sec = v9stat.atime; + times[0].tv_nsec = 0; + } else { + times[0].tv_nsec = UTIME_OMIT; + } + if (v9stat.mtime != -1) { + times[1].tv_sec = v9stat.mtime; + times[1].tv_nsec = 0; + } else { + times[1].tv_nsec = UTIME_OMIT; + } + err = v9fs_co_utimensat(pdu, &fidp->path, times); + if (err < 0) { + goto out; + } + } + if (v9stat.n_gid != -1 || v9stat.n_uid != -1) { + err = v9fs_co_chown(pdu, &fidp->path, v9stat.n_uid, v9stat.n_gid); + if (err < 0) { + goto out; + } + } + if (v9stat.name.size != 0) { + v9fs_path_write_lock(s); + err = v9fs_complete_rename(pdu, fidp, -1, &v9stat.name); + v9fs_path_unlock(s); + if (err < 0) { + goto out; + } + } + if (v9stat.length != -1) { + err = v9fs_co_truncate(pdu, &fidp->path, v9stat.length); + if (err < 0) { + goto out; + } + } + err = offset; +out: + put_fid(pdu, fidp); +out_nofid: + v9fs_stat_free(&v9stat); + pdu_complete(pdu, err); +} + +static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf) +{ + uint32_t f_type; + uint32_t f_bsize; + uint64_t f_blocks; + uint64_t f_bfree; + uint64_t f_bavail; + uint64_t f_files; + uint64_t f_ffree; + uint64_t fsid_val; + uint32_t f_namelen; + size_t offset = 7; + int32_t bsize_factor; + + /* + * compute bsize factor based on host file system block size + * and client msize + */ + bsize_factor = (s->msize - P9_IOHDRSZ) / stbuf->f_bsize; + if (!bsize_factor) { + bsize_factor = 1; + } + f_type = stbuf->f_type; + f_bsize = stbuf->f_bsize; + f_bsize *= bsize_factor; + /* + * f_bsize is adjusted(multiplied) by bsize factor, so we need to + * adjust(divide) the number of blocks, free blocks and available + * blocks by bsize factor + */ + f_blocks = stbuf->f_blocks / bsize_factor; + f_bfree = stbuf->f_bfree / bsize_factor; + f_bavail = stbuf->f_bavail / bsize_factor; + f_files = stbuf->f_files; + f_ffree = stbuf->f_ffree; + fsid_val = (unsigned int) stbuf->f_fsid.__val[0] | + (unsigned long long)stbuf->f_fsid.__val[1] << 32; + f_namelen = stbuf->f_namelen; + + return pdu_marshal(pdu, offset, "ddqqqqqqd", + f_type, f_bsize, f_blocks, f_bfree, + f_bavail, f_files, f_ffree, + fsid_val, f_namelen); +} + +static void coroutine_fn v9fs_statfs(void *opaque) +{ + int32_t fid; + ssize_t retval = 0; + size_t offset = 7; + V9fsFidState *fidp; + struct statfs stbuf; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + retval = pdu_unmarshal(pdu, offset, "d", &fid); + if (retval < 0) { + goto out_nofid; + } + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + retval = -ENOENT; + goto out_nofid; + } + retval = v9fs_co_statfs(pdu, &fidp->path, &stbuf); + if (retval < 0) { + goto out; + } + retval = v9fs_fill_statfs(s, pdu, &stbuf); + if (retval < 0) { + goto out; + } + retval += offset; +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, retval); +} + +static void coroutine_fn v9fs_mknod(void *opaque) +{ + + int mode; + gid_t gid; + int32_t fid; + V9fsQID qid; + int err = 0; + int major, minor; + size_t offset = 7; + V9fsString name; + struct stat stbuf; + V9fsFidState *fidp; + V9fsPDU *pdu = opaque; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dsdddd", &fid, &name, &mode, + &major, &minor, &gid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_mknod(pdu->tag, pdu->id, fid, mode, major, minor); + + if (name_is_illegal(name.data)) { + err = -ENOENT; + goto out_nofid; + } + + if (!strcmp(".", name.data) || !strcmp("..", name.data)) { + err = -EEXIST; + goto out_nofid; + } + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, gid, + makedev(major, minor), mode, &stbuf); + if (err < 0) { + goto out; + } + err = stat_to_qid(pdu, &stbuf, &qid); + if (err < 0) { + goto out; + } + err = pdu_marshal(pdu, offset, "Q", &qid); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_mknod_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&name); +} + +/* + * Implement posix byte range locking code + * Server side handling of locking code is very simple, because 9p server in + * QEMU can handle only one client. And most of the lock handling + * (like conflict, merging) etc is done by the VFS layer itself, so no need to + * do any thing in * qemu 9p server side lock code path. + * So when a TLOCK request comes, always return success + */ +static void coroutine_fn v9fs_lock(void *opaque) +{ + V9fsFlock flock; + size_t offset = 7; + struct stat stbuf; + V9fsFidState *fidp; + int32_t fid, err = 0; + V9fsPDU *pdu = opaque; + + v9fs_string_init(&flock.client_id); + err = pdu_unmarshal(pdu, offset, "dbdqqds", &fid, &flock.type, + &flock.flags, &flock.start, &flock.length, + &flock.proc_id, &flock.client_id); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_lock(pdu->tag, pdu->id, fid, + flock.type, flock.start, flock.length); + + + /* We support only block flag now (that too ignored currently) */ + if (flock.flags & ~P9_LOCK_FLAGS_BLOCK) { + err = -EINVAL; + goto out_nofid; + } + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + err = v9fs_co_fstat(pdu, fidp, &stbuf); + if (err < 0) { + goto out; + } + err = pdu_marshal(pdu, offset, "b", P9_LOCK_SUCCESS); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_lock_return(pdu->tag, pdu->id, P9_LOCK_SUCCESS); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&flock.client_id); +} + +/* + * When a TGETLOCK request comes, always return success because all lock + * handling is done by client's VFS layer. + */ +static void coroutine_fn v9fs_getlock(void *opaque) +{ + size_t offset = 7; + struct stat stbuf; + V9fsFidState *fidp; + V9fsGetlock glock; + int32_t fid, err = 0; + V9fsPDU *pdu = opaque; + + v9fs_string_init(&glock.client_id); + err = pdu_unmarshal(pdu, offset, "dbqqds", &fid, &glock.type, + &glock.start, &glock.length, &glock.proc_id, + &glock.client_id); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_getlock(pdu->tag, pdu->id, fid, + glock.type, glock.start, glock.length); + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + err = v9fs_co_fstat(pdu, fidp, &stbuf); + if (err < 0) { + goto out; + } + glock.type = P9_LOCK_TYPE_UNLCK; + err = pdu_marshal(pdu, offset, "bqqds", glock.type, + glock.start, glock.length, glock.proc_id, + &glock.client_id); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_getlock_return(pdu->tag, pdu->id, glock.type, glock.start, + glock.length, glock.proc_id); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&glock.client_id); +} + +static void coroutine_fn v9fs_mkdir(void *opaque) +{ + V9fsPDU *pdu = opaque; + size_t offset = 7; + int32_t fid; + struct stat stbuf; + V9fsQID qid; + V9fsString name; + V9fsFidState *fidp; + gid_t gid; + int mode; + int err = 0; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dsdd", &fid, &name, &mode, &gid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_mkdir(pdu->tag, pdu->id, fid, name.data, mode, gid); + + if (name_is_illegal(name.data)) { + err = -ENOENT; + goto out_nofid; + } + + if (!strcmp(".", name.data) || !strcmp("..", name.data)) { + err = -EEXIST; + goto out_nofid; + } + + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + err = v9fs_co_mkdir(pdu, fidp, &name, mode, fidp->uid, gid, &stbuf); + if (err < 0) { + goto out; + } + err = stat_to_qid(pdu, &stbuf, &qid); + if (err < 0) { + goto out; + } + err = pdu_marshal(pdu, offset, "Q", &qid); + if (err < 0) { + goto out; + } + err += offset; + trace_v9fs_mkdir_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path, err); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&name); +} + +static void coroutine_fn v9fs_xattrwalk(void *opaque) +{ + int64_t size; + V9fsString name; + ssize_t err = 0; + size_t offset = 7; + int32_t fid, newfid; + V9fsFidState *file_fidp; + V9fsFidState *xattr_fidp = NULL; + V9fsPDU *pdu = opaque; + V9fsState *s = pdu->s; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dds", &fid, &newfid, &name); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_xattrwalk(pdu->tag, pdu->id, fid, newfid, name.data); + + file_fidp = get_fid(pdu, fid); + if (file_fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + xattr_fidp = alloc_fid(s, newfid); + if (xattr_fidp == NULL) { + err = -EINVAL; + goto out; + } + v9fs_path_copy(&xattr_fidp->path, &file_fidp->path); + if (!v9fs_string_size(&name)) { + /* + * listxattr request. Get the size first + */ + size = v9fs_co_llistxattr(pdu, &xattr_fidp->path, NULL, 0); + if (size < 0) { + err = size; + clunk_fid(s, xattr_fidp->fid); + goto out; + } + /* + * Read the xattr value + */ + xattr_fidp->fs.xattr.len = size; + xattr_fidp->fid_type = P9_FID_XATTR; + xattr_fidp->fs.xattr.xattrwalk_fid = true; + xattr_fidp->fs.xattr.value = g_malloc0(size); + if (size) { + err = v9fs_co_llistxattr(pdu, &xattr_fidp->path, + xattr_fidp->fs.xattr.value, + xattr_fidp->fs.xattr.len); + if (err < 0) { + clunk_fid(s, xattr_fidp->fid); + goto out; + } + } + err = pdu_marshal(pdu, offset, "q", size); + if (err < 0) { + goto out; + } + err += offset; + } else { + /* + * specific xattr fid. We check for xattr + * presence also collect the xattr size + */ + size = v9fs_co_lgetxattr(pdu, &xattr_fidp->path, + &name, NULL, 0); + if (size < 0) { + err = size; + clunk_fid(s, xattr_fidp->fid); + goto out; + } + /* + * Read the xattr value + */ + xattr_fidp->fs.xattr.len = size; + xattr_fidp->fid_type = P9_FID_XATTR; + xattr_fidp->fs.xattr.xattrwalk_fid = true; + xattr_fidp->fs.xattr.value = g_malloc0(size); + if (size) { + err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path, + &name, xattr_fidp->fs.xattr.value, + xattr_fidp->fs.xattr.len); + if (err < 0) { + clunk_fid(s, xattr_fidp->fid); + goto out; + } + } + err = pdu_marshal(pdu, offset, "q", size); + if (err < 0) { + goto out; + } + err += offset; + } + trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size); +out: + put_fid(pdu, file_fidp); + if (xattr_fidp) { + put_fid(pdu, xattr_fidp); + } +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&name); +} + +static void coroutine_fn v9fs_xattrcreate(void *opaque) +{ + int flags, rflags = 0; + int32_t fid; + uint64_t size; + ssize_t err = 0; + V9fsString name; + size_t offset = 7; + V9fsFidState *file_fidp; + V9fsFidState *xattr_fidp; + V9fsPDU *pdu = opaque; + + v9fs_string_init(&name); + err = pdu_unmarshal(pdu, offset, "dsqd", &fid, &name, &size, &flags); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags); + + if (flags & ~(P9_XATTR_CREATE | P9_XATTR_REPLACE)) { + err = -EINVAL; + goto out_nofid; + } + + if (flags & P9_XATTR_CREATE) { + rflags |= XATTR_CREATE; + } + + if (flags & P9_XATTR_REPLACE) { + rflags |= XATTR_REPLACE; + } + + if (size > XATTR_SIZE_MAX) { + err = -E2BIG; + goto out_nofid; + } + + file_fidp = get_fid(pdu, fid); + if (file_fidp == NULL) { + err = -EINVAL; + goto out_nofid; + } + if (file_fidp->fid_type != P9_FID_NONE) { + err = -EINVAL; + goto out_put_fid; + } + + /* Make the file fid point to xattr */ + xattr_fidp = file_fidp; + xattr_fidp->fid_type = P9_FID_XATTR; + xattr_fidp->fs.xattr.copied_len = 0; + xattr_fidp->fs.xattr.xattrwalk_fid = false; + xattr_fidp->fs.xattr.len = size; + xattr_fidp->fs.xattr.flags = rflags; + v9fs_string_init(&xattr_fidp->fs.xattr.name); + v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name); + xattr_fidp->fs.xattr.value = g_malloc0(size); + err = offset; +out_put_fid: + put_fid(pdu, file_fidp); +out_nofid: + pdu_complete(pdu, err); + v9fs_string_free(&name); +} + +static void coroutine_fn v9fs_readlink(void *opaque) +{ + V9fsPDU *pdu = opaque; + size_t offset = 7; + V9fsString target; + int32_t fid; + int err = 0; + V9fsFidState *fidp; + + err = pdu_unmarshal(pdu, offset, "d", &fid); + if (err < 0) { + goto out_nofid; + } + trace_v9fs_readlink(pdu->tag, pdu->id, fid); + fidp = get_fid(pdu, fid); + if (fidp == NULL) { + err = -ENOENT; + goto out_nofid; + } + + v9fs_string_init(&target); + err = v9fs_co_readlink(pdu, &fidp->path, &target); + if (err < 0) { + goto out; + } + err = pdu_marshal(pdu, offset, "s", &target); + if (err < 0) { + v9fs_string_free(&target); + goto out; + } + err += offset; + trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data); + v9fs_string_free(&target); +out: + put_fid(pdu, fidp); +out_nofid: + pdu_complete(pdu, err); +} + +static CoroutineEntry *pdu_co_handlers[] = { + [P9_TREADDIR] = v9fs_readdir, + [P9_TSTATFS] = v9fs_statfs, + [P9_TGETATTR] = v9fs_getattr, + [P9_TSETATTR] = v9fs_setattr, + [P9_TXATTRWALK] = v9fs_xattrwalk, + [P9_TXATTRCREATE] = v9fs_xattrcreate, + [P9_TMKNOD] = v9fs_mknod, + [P9_TRENAME] = v9fs_rename, + [P9_TLOCK] = v9fs_lock, + [P9_TGETLOCK] = v9fs_getlock, + [P9_TRENAMEAT] = v9fs_renameat, + [P9_TREADLINK] = v9fs_readlink, + [P9_TUNLINKAT] = v9fs_unlinkat, + [P9_TMKDIR] = v9fs_mkdir, + [P9_TVERSION] = v9fs_version, + [P9_TLOPEN] = v9fs_open, + [P9_TATTACH] = v9fs_attach, + [P9_TSTAT] = v9fs_stat, + [P9_TWALK] = v9fs_walk, + [P9_TCLUNK] = v9fs_clunk, + [P9_TFSYNC] = v9fs_fsync, + [P9_TOPEN] = v9fs_open, + [P9_TREAD] = v9fs_read, +#if 0 + [P9_TAUTH] = v9fs_auth, +#endif + [P9_TFLUSH] = v9fs_flush, + [P9_TLINK] = v9fs_link, + [P9_TSYMLINK] = v9fs_symlink, + [P9_TCREATE] = v9fs_create, + [P9_TLCREATE] = v9fs_lcreate, + [P9_TWRITE] = v9fs_write, + [P9_TWSTAT] = v9fs_wstat, + [P9_TREMOVE] = v9fs_remove, +}; + +static void coroutine_fn v9fs_op_not_supp(void *opaque) +{ + V9fsPDU *pdu = opaque; + pdu_complete(pdu, -EOPNOTSUPP); +} + +static void coroutine_fn v9fs_fs_ro(void *opaque) +{ + V9fsPDU *pdu = opaque; + pdu_complete(pdu, -EROFS); +} + +static inline bool is_read_only_op(V9fsPDU *pdu) +{ + switch (pdu->id) { + case P9_TREADDIR: + case P9_TSTATFS: + case P9_TGETATTR: + case P9_TXATTRWALK: + case P9_TLOCK: + case P9_TGETLOCK: + case P9_TREADLINK: + case P9_TVERSION: + case P9_TLOPEN: + case P9_TATTACH: + case P9_TSTAT: + case P9_TWALK: + case P9_TCLUNK: + case P9_TFSYNC: + case P9_TOPEN: + case P9_TREAD: + case P9_TAUTH: + case P9_TFLUSH: + return 1; + default: + return 0; + } +} + +void pdu_submit(V9fsPDU *pdu, P9MsgHeader *hdr) +{ + Coroutine *co; + CoroutineEntry *handler; + V9fsState *s = pdu->s; + + pdu->size = le32_to_cpu(hdr->size_le); + pdu->id = hdr->id; + pdu->tag = le16_to_cpu(hdr->tag_le); + + if (pdu->id >= ARRAY_SIZE(pdu_co_handlers) || + (pdu_co_handlers[pdu->id] == NULL)) { + handler = v9fs_op_not_supp; + } else if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) { + handler = v9fs_fs_ro; + } else { + handler = pdu_co_handlers[pdu->id]; + } + + qemu_co_queue_init(&pdu->complete); + co = qemu_coroutine_create(handler, pdu); + qemu_coroutine_enter(co); +} + +/* Returns 0 on success, 1 on failure. */ +int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t, + Error **errp) +{ + ERRP_GUARD(); + int i, len; + struct stat stat; + FsDriverEntry *fse; + V9fsPath path; + int rc = 1; + + assert(!s->transport); + s->transport = t; + + /* initialize pdu allocator */ + QLIST_INIT(&s->free_list); + QLIST_INIT(&s->active_list); + for (i = 0; i < MAX_REQ; i++) { + QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next); + s->pdus[i].s = s; + s->pdus[i].idx = i; + } + + v9fs_path_init(&path); + + fse = get_fsdev_fsentry(s->fsconf.fsdev_id); + + if (!fse) { + /* We don't have a fsdev identified by fsdev_id */ + error_setg(errp, "9pfs device couldn't find fsdev with the " + "id = %s", + s->fsconf.fsdev_id ? s->fsconf.fsdev_id : "NULL"); + goto out; + } + + if (!s->fsconf.tag) { + /* we haven't specified a mount_tag */ + error_setg(errp, "fsdev with id %s needs mount_tag arguments", + s->fsconf.fsdev_id); + goto out; + } + + s->ctx.export_flags = fse->export_flags; + s->ctx.fs_root = g_strdup(fse->path); + s->ctx.exops.get_st_gen = NULL; + len = strlen(s->fsconf.tag); + if (len > MAX_TAG_LEN - 1) { + error_setg(errp, "mount tag '%s' (%d bytes) is longer than " + "maximum (%d bytes)", s->fsconf.tag, len, MAX_TAG_LEN - 1); + goto out; + } + + s->tag = g_strdup(s->fsconf.tag); + s->ctx.uid = -1; + + s->ops = fse->ops; + + s->ctx.fmode = fse->fmode; + s->ctx.dmode = fse->dmode; + + QSIMPLEQ_INIT(&s->fid_list); + qemu_co_rwlock_init(&s->rename_lock); + + if (s->ops->init(&s->ctx, errp) < 0) { + error_prepend(errp, "cannot initialize fsdev '%s': ", + s->fsconf.fsdev_id); + goto out; + } + + /* + * Check details of export path, We need to use fs driver + * call back to do that. Since we are in the init path, we don't + * use co-routines here. + */ + if (s->ops->name_to_path(&s->ctx, NULL, "/", &path) < 0) { + error_setg(errp, + "error in converting name to path %s", strerror(errno)); + goto out; + } + if (s->ops->lstat(&s->ctx, &path, &stat)) { + error_setg(errp, "share path %s does not exist", fse->path); + goto out; + } else if (!S_ISDIR(stat.st_mode)) { + error_setg(errp, "share path %s is not a directory", fse->path); + goto out; + } + + s->dev_id = stat.st_dev; + + /* init inode remapping : */ + /* hash table for variable length inode suffixes */ + qpd_table_init(&s->qpd_table); + /* hash table for slow/full inode remapping (most users won't need it) */ + qpf_table_init(&s->qpf_table); + /* hash table for quick inode remapping */ + qpp_table_init(&s->qpp_table); + s->qp_ndevices = 0; + s->qp_affix_next = 1; /* reserve 0 to detect overflow */ + s->qp_fullpath_next = 1; + + s->ctx.fst = &fse->fst; + fsdev_throttle_init(s->ctx.fst); + + rc = 0; +out: + if (rc) { + v9fs_device_unrealize_common(s); + } + v9fs_path_free(&path); + return rc; +} + +void v9fs_device_unrealize_common(V9fsState *s) +{ + if (s->ops && s->ops->cleanup) { + s->ops->cleanup(&s->ctx); + } + if (s->ctx.fst) { + fsdev_throttle_cleanup(s->ctx.fst); + } + g_free(s->tag); + qp_table_destroy(&s->qpd_table); + qp_table_destroy(&s->qpp_table); + qp_table_destroy(&s->qpf_table); + g_free(s->ctx.fs_root); +} + +typedef struct VirtfsCoResetData { + V9fsPDU pdu; + bool done; +} VirtfsCoResetData; + +static void coroutine_fn virtfs_co_reset(void *opaque) +{ + VirtfsCoResetData *data = opaque; + + virtfs_reset(&data->pdu); + data->done = true; +} + +void v9fs_reset(V9fsState *s) +{ + VirtfsCoResetData data = { .pdu = { .s = s }, .done = false }; + Coroutine *co; + + while (!QLIST_EMPTY(&s->active_list)) { + aio_poll(qemu_get_aio_context(), true); + } + + co = qemu_coroutine_create(virtfs_co_reset, &data); + qemu_coroutine_enter(co); + + while (!data.done) { + aio_poll(qemu_get_aio_context(), true); + } +} + +static void __attribute__((__constructor__)) v9fs_set_fd_limit(void) +{ + struct rlimit rlim; + if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) { + error_report("Failed to get the resource limit"); + exit(1); + } + open_fd_hw = rlim.rlim_cur - MIN(400, rlim.rlim_cur / 3); + open_fd_rc = rlim.rlim_cur / 2; +} diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h new file mode 100644 index 000000000..1567b6784 --- /dev/null +++ b/hw/9pfs/9p.h @@ -0,0 +1,482 @@ +#ifndef QEMU_9P_H +#define QEMU_9P_H + +#include +#include +#include +#include "fsdev/file-op-9p.h" +#include "fsdev/9p-iov-marshal.h" +#include "qemu/thread.h" +#include "qemu/coroutine.h" +#include "qemu/qht.h" + +enum { + P9_TLERROR = 6, + P9_RLERROR, + P9_TSTATFS = 8, + P9_RSTATFS, + P9_TLOPEN = 12, + P9_RLOPEN, + P9_TLCREATE = 14, + P9_RLCREATE, + P9_TSYMLINK = 16, + P9_RSYMLINK, + P9_TMKNOD = 18, + P9_RMKNOD, + P9_TRENAME = 20, + P9_RRENAME, + P9_TREADLINK = 22, + P9_RREADLINK, + P9_TGETATTR = 24, + P9_RGETATTR, + P9_TSETATTR = 26, + P9_RSETATTR, + P9_TXATTRWALK = 30, + P9_RXATTRWALK, + P9_TXATTRCREATE = 32, + P9_RXATTRCREATE, + P9_TREADDIR = 40, + P9_RREADDIR, + P9_TFSYNC = 50, + P9_RFSYNC, + P9_TLOCK = 52, + P9_RLOCK, + P9_TGETLOCK = 54, + P9_RGETLOCK, + P9_TLINK = 70, + P9_RLINK, + P9_TMKDIR = 72, + P9_RMKDIR, + P9_TRENAMEAT = 74, + P9_RRENAMEAT, + P9_TUNLINKAT = 76, + P9_RUNLINKAT, + P9_TVERSION = 100, + P9_RVERSION, + P9_TAUTH = 102, + P9_RAUTH, + P9_TATTACH = 104, + P9_RATTACH, + P9_TERROR = 106, + P9_RERROR, + P9_TFLUSH = 108, + P9_RFLUSH, + P9_TWALK = 110, + P9_RWALK, + P9_TOPEN = 112, + P9_ROPEN, + P9_TCREATE = 114, + P9_RCREATE, + P9_TREAD = 116, + P9_RREAD, + P9_TWRITE = 118, + P9_RWRITE, + P9_TCLUNK = 120, + P9_RCLUNK, + P9_TREMOVE = 122, + P9_RREMOVE, + P9_TSTAT = 124, + P9_RSTAT, + P9_TWSTAT = 126, + P9_RWSTAT, +}; + + +/* qid.types */ +enum { + P9_QTDIR = 0x80, + P9_QTAPPEND = 0x40, + P9_QTEXCL = 0x20, + P9_QTMOUNT = 0x10, + P9_QTAUTH = 0x08, + P9_QTTMP = 0x04, + P9_QTSYMLINK = 0x02, + P9_QTLINK = 0x01, + P9_QTFILE = 0x00, +}; + +typedef enum P9ProtoVersion { + V9FS_PROTO_2000U = 0x01, + V9FS_PROTO_2000L = 0x02, +} P9ProtoVersion; + +/** + * @brief Minimum message size supported by this 9pfs server. + * + * A client establishes a session by sending a Tversion request along with a + * 'msize' parameter which suggests the server a maximum message size ever to be + * used for communication (for both requests and replies) between client and + * server during that session. If client suggests a 'msize' smaller than this + * value then session is denied by server with an error response. + */ +#define P9_MIN_MSIZE 4096 + +#define P9_NOTAG UINT16_MAX +#define P9_NOFID UINT32_MAX +#define P9_MAXWELEM 16 + +#define FID_REFERENCED 0x1 +#define FID_NON_RECLAIMABLE 0x2 +static inline char *rpath(FsContext *ctx, const char *path) +{ + return g_strdup_printf("%s/%s", ctx->fs_root, path); +} + +/* + * ample room for Twrite/Rread header + * size[4] Tread/Twrite tag[2] fid[4] offset[8] count[4] + */ +#define P9_IOHDRSZ 24 + +typedef struct V9fsPDU V9fsPDU; +typedef struct V9fsState V9fsState; +typedef struct V9fsTransport V9fsTransport; + +typedef struct { + uint32_t size_le; + uint8_t id; + uint16_t tag_le; +} QEMU_PACKED P9MsgHeader; +/* According to the specification, 9p messages start with a 7-byte header. + * Since most of the code uses this header size in literal form, we must be + * sure this is indeed the case. + */ +QEMU_BUILD_BUG_ON(sizeof(P9MsgHeader) != 7); + +struct V9fsPDU { + uint32_t size; + uint16_t tag; + uint8_t id; + uint8_t cancelled; + CoQueue complete; + V9fsState *s; + QLIST_ENTRY(V9fsPDU) next; + uint32_t idx; +}; + + +/* FIXME + * 1) change user needs to set groups and stuff + */ + +#define MAX_REQ 128 +#define MAX_TAG_LEN 32 + +#define BUG_ON(cond) assert(!(cond)) + +typedef struct V9fsFidState V9fsFidState; + +enum { + P9_FID_NONE = 0, + P9_FID_FILE, + P9_FID_DIR, + P9_FID_XATTR, +}; + +typedef struct V9fsConf +{ + /* tag name for the device */ + char *tag; + char *fsdev_id; +} V9fsConf; + +/* 9p2000.L xattr flags (matches Linux values) */ +#define P9_XATTR_CREATE 1 +#define P9_XATTR_REPLACE 2 + +typedef struct V9fsXattr +{ + uint64_t copied_len; + uint64_t len; + void *value; + V9fsString name; + int flags; + bool xattrwalk_fid; +} V9fsXattr; + +typedef struct V9fsDir { + DIR *stream; + P9ProtoVersion proto_version; + /* readdir mutex type used for 9P2000.u protocol variant */ + CoMutex readdir_mutex_u; + /* readdir mutex type used for 9P2000.L protocol variant */ + QemuMutex readdir_mutex_L; +} V9fsDir; + +static inline void v9fs_readdir_lock(V9fsDir *dir) +{ + if (dir->proto_version == V9FS_PROTO_2000U) { + qemu_co_mutex_lock(&dir->readdir_mutex_u); + } else { + qemu_mutex_lock(&dir->readdir_mutex_L); + } +} + +static inline void v9fs_readdir_unlock(V9fsDir *dir) +{ + if (dir->proto_version == V9FS_PROTO_2000U) { + qemu_co_mutex_unlock(&dir->readdir_mutex_u); + } else { + qemu_mutex_unlock(&dir->readdir_mutex_L); + } +} + +static inline void v9fs_readdir_init(P9ProtoVersion proto_version, V9fsDir *dir) +{ + dir->proto_version = proto_version; + if (proto_version == V9FS_PROTO_2000U) { + qemu_co_mutex_init(&dir->readdir_mutex_u); + } else { + qemu_mutex_init(&dir->readdir_mutex_L); + } +} + +/** + * Type for 9p fs drivers' (a.k.a. 9p backends) result of readdir requests, + * which is a chained list of directory entries. + */ +typedef struct V9fsDirEnt { + /* mandatory (must not be NULL) information for all readdir requests */ + struct dirent *dent; + /* + * optional (may be NULL): A full stat of each directory entry is just + * done if explicitly told to fs driver. + */ + struct stat *st; + /* + * instead of an array, directory entries are always returned as + * chained list, that's because the amount of entries retrieved by fs + * drivers is dependent on the individual entries' name (since response + * messages are size limited), so the final amount cannot be estimated + * before hand + */ + struct V9fsDirEnt *next; +} V9fsDirEnt; + +/* + * Filled by fs driver on open and other + * calls. + */ +union V9fsFidOpenState { + int fd; + V9fsDir dir; + V9fsXattr xattr; + /* + * private pointer for fs drivers, that + * have its own internal representation of + * open files. + */ + void *private; +}; + +struct V9fsFidState { + int fid_type; + int32_t fid; + V9fsPath path; + V9fsFidOpenState fs; + V9fsFidOpenState fs_reclaim; + int flags; + int open_flags; + uid_t uid; + int ref; + bool clunked; + QSIMPLEQ_ENTRY(V9fsFidState) next; + QSLIST_ENTRY(V9fsFidState) reclaim_next; +}; + +typedef enum AffixType_t { + AffixType_Prefix, + AffixType_Suffix, /* A.k.a. postfix. */ +} AffixType_t; + +/** + * @brief Unique affix of variable length. + * + * An affix is (currently) either a suffix or a prefix, which is either + * going to be prepended (prefix) or appended (suffix) with some other + * number for the goal to generate unique numbers. Accordingly the + * suffixes (or prefixes) we generate @b must all have the mathematical + * property of being suffix-free (or prefix-free in case of prefixes) + * so that no matter what number we concatenate the affix with, that we + * always reliably get unique numbers as result after concatenation. + */ +typedef struct VariLenAffix { + AffixType_t type; /* Whether this affix is a suffix or a prefix. */ + uint64_t value; /* Actual numerical value of this affix. */ + /* + * Lenght of the affix, that is how many (of the lowest) bits of @c value + * must be used for appending/prepending this affix to its final resulting, + * unique number. + */ + int bits; +} VariLenAffix; + +/* See qid_inode_prefix_hash_bits(). */ +typedef struct { + dev_t dev; /* FS device on host. */ + /* + * How many (high) bits of the original inode number shall be used for + * hashing. + */ + int prefix_bits; +} QpdEntry; + +/* QID path prefix entry, see stat_to_qid */ +typedef struct { + dev_t dev; + uint16_t ino_prefix; + uint32_t qp_affix_index; + VariLenAffix qp_affix; +} QppEntry; + +/* QID path full entry, as above */ +typedef struct { + dev_t dev; + ino_t ino; + uint64_t path; +} QpfEntry; + +struct V9fsState { + QLIST_HEAD(, V9fsPDU) free_list; + QLIST_HEAD(, V9fsPDU) active_list; + QSIMPLEQ_HEAD(, V9fsFidState) fid_list; + FileOperations *ops; + FsContext ctx; + char *tag; + P9ProtoVersion proto_version; + int32_t msize; + V9fsPDU pdus[MAX_REQ]; + const V9fsTransport *transport; + /* + * lock ensuring atomic path update + * on rename. + */ + CoRwlock rename_lock; + int32_t root_fid; + Error *migration_blocker; + V9fsConf fsconf; + struct stat root_st; + dev_t dev_id; + struct qht qpd_table; + struct qht qpp_table; + struct qht qpf_table; + uint64_t qp_ndevices; /* Amount of entries in qpd_table. */ + uint16_t qp_affix_next; + uint64_t qp_fullpath_next; +}; + +/* 9p2000.L open flags */ +#define P9_DOTL_RDONLY 00000000 +#define P9_DOTL_WRONLY 00000001 +#define P9_DOTL_RDWR 00000002 +#define P9_DOTL_NOACCESS 00000003 +#define P9_DOTL_CREATE 00000100 +#define P9_DOTL_EXCL 00000200 +#define P9_DOTL_NOCTTY 00000400 +#define P9_DOTL_TRUNC 00001000 +#define P9_DOTL_APPEND 00002000 +#define P9_DOTL_NONBLOCK 00004000 +#define P9_DOTL_DSYNC 00010000 +#define P9_DOTL_FASYNC 00020000 +#define P9_DOTL_DIRECT 00040000 +#define P9_DOTL_LARGEFILE 00100000 +#define P9_DOTL_DIRECTORY 00200000 +#define P9_DOTL_NOFOLLOW 00400000 +#define P9_DOTL_NOATIME 01000000 +#define P9_DOTL_CLOEXEC 02000000 +#define P9_DOTL_SYNC 04000000 + +/* 9p2000.L at flags */ +#define P9_DOTL_AT_REMOVEDIR 0x200 + +/* 9P2000.L lock type */ +#define P9_LOCK_TYPE_RDLCK 0 +#define P9_LOCK_TYPE_WRLCK 1 +#define P9_LOCK_TYPE_UNLCK 2 + +#define P9_LOCK_SUCCESS 0 +#define P9_LOCK_BLOCKED 1 +#define P9_LOCK_ERROR 2 +#define P9_LOCK_GRACE 3 + +#define P9_LOCK_FLAGS_BLOCK 1 +#define P9_LOCK_FLAGS_RECLAIM 2 + +typedef struct V9fsFlock +{ + uint8_t type; + uint32_t flags; + uint64_t start; /* absolute offset */ + uint64_t length; + uint32_t proc_id; + V9fsString client_id; +} V9fsFlock; + +typedef struct V9fsGetlock +{ + uint8_t type; + uint64_t start; /* absolute offset */ + uint64_t length; + uint32_t proc_id; + V9fsString client_id; +} V9fsGetlock; + +extern int open_fd_hw; +extern int total_open_fd; + +static inline void v9fs_path_write_lock(V9fsState *s) +{ + if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { + qemu_co_rwlock_wrlock(&s->rename_lock); + } +} + +static inline void v9fs_path_read_lock(V9fsState *s) +{ + if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { + qemu_co_rwlock_rdlock(&s->rename_lock); + } +} + +static inline void v9fs_path_unlock(V9fsState *s) +{ + if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { + qemu_co_rwlock_unlock(&s->rename_lock); + } +} + +static inline uint8_t v9fs_request_cancelled(V9fsPDU *pdu) +{ + return pdu->cancelled; +} + +void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu); +void v9fs_path_init(V9fsPath *path); +void v9fs_path_free(V9fsPath *path); +void v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...); +void v9fs_path_copy(V9fsPath *dst, const V9fsPath *src); +size_t v9fs_readdir_response_size(V9fsString *name); +int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath, + const char *name, V9fsPath *path); +int v9fs_device_realize_common(V9fsState *s, const V9fsTransport *t, + Error **errp); +void v9fs_device_unrealize_common(V9fsState *s); + +V9fsPDU *pdu_alloc(V9fsState *s); +void pdu_free(V9fsPDU *pdu); +void pdu_submit(V9fsPDU *pdu, P9MsgHeader *hdr); +void v9fs_reset(V9fsState *s); + +struct V9fsTransport { + ssize_t (*pdu_vmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt, + va_list ap); + ssize_t (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt, + va_list ap); + void (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov, + unsigned int *pniov, size_t size); + void (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov, + unsigned int *pniov, size_t size); + void (*push_and_notify)(V9fsPDU *pdu); +}; + +#endif diff --git a/hw/9pfs/Kconfig b/hw/9pfs/Kconfig new file mode 100644 index 000000000..3ae574966 --- /dev/null +++ b/hw/9pfs/Kconfig @@ -0,0 +1,9 @@ +config FSDEV_9P + bool + depends on VIRTFS + +config VIRTIO_9P + bool + default y + depends on VIRTFS && VIRTIO + select FSDEV_9P diff --git a/hw/9pfs/codir.c b/hw/9pfs/codir.c new file mode 100644 index 000000000..032cce04c --- /dev/null +++ b/hw/9pfs/codir.c @@ -0,0 +1,360 @@ +/* + * 9p backend + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Aneesh Kumar K.V + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "fsdev/qemu-fsdev.h" +#include "qemu/thread.h" +#include "qemu/coroutine.h" +#include "qemu/main-loop.h" +#include "coth.h" + +/* + * Intended to be called from bottom-half (e.g. background I/O thread) + * context. + */ +static int do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, struct dirent **dent) +{ + int err = 0; + V9fsState *s = pdu->s; + struct dirent *entry; + + errno = 0; + entry = s->ops->readdir(&s->ctx, &fidp->fs); + if (!entry && errno) { + *dent = NULL; + err = -errno; + } else { + *dent = entry; + } + return err; +} + +/* + * TODO: This will be removed for performance reasons. + * Use v9fs_co_readdir_many() instead. + */ +int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp, + struct dirent **dent) +{ + int err; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker({ + err = do_readdir(pdu, fidp, dent); + }); + return err; +} + +/* + * This is solely executed on a background IO thread. + * + * See v9fs_co_readdir_many() (as its only user) below for details. + */ +static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, + struct V9fsDirEnt **entries, off_t offset, + int32_t maxsize, bool dostat) +{ + V9fsState *s = pdu->s; + V9fsString name; + int len, err = 0; + int32_t size = 0; + off_t saved_dir_pos; + struct dirent *dent; + struct V9fsDirEnt *e = NULL; + V9fsPath path; + struct stat stbuf; + + *entries = NULL; + v9fs_path_init(&path); + + /* + * TODO: Here should be a warn_report_once() if lock failed. + * + * With a good 9p client we should not get into concurrency here, + * because a good client would not use the same fid for concurrent + * requests. We do the lock here for safety reasons though. However + * the client would then suffer performance issues, so better log that + * issue here. + */ + v9fs_readdir_lock(&fidp->fs.dir); + + /* seek directory to requested initial position */ + if (offset == 0) { + s->ops->rewinddir(&s->ctx, &fidp->fs); + } else { + s->ops->seekdir(&s->ctx, &fidp->fs, offset); + } + + /* save the directory position */ + saved_dir_pos = s->ops->telldir(&s->ctx, &fidp->fs); + if (saved_dir_pos < 0) { + err = saved_dir_pos; + goto out; + } + + while (true) { + /* interrupt loop if request was cancelled by a Tflush request */ + if (v9fs_request_cancelled(pdu)) { + err = -EINTR; + break; + } + + /* get directory entry from fs driver */ + err = do_readdir(pdu, fidp, &dent); + if (err || !dent) { + break; + } + + /* + * stop this loop as soon as it would exceed the allowed maximum + * response message size for the directory entries collected so far, + * because anything beyond that size would need to be discarded by + * 9p controller (main thread / top half) anyway + */ + v9fs_string_init(&name); + v9fs_string_sprintf(&name, "%s", dent->d_name); + len = v9fs_readdir_response_size(&name); + v9fs_string_free(&name); + if (size + len > maxsize) { + /* this is not an error case actually */ + break; + } + + /* append next node to result chain */ + if (!e) { + *entries = e = g_malloc0(sizeof(V9fsDirEnt)); + } else { + e = e->next = g_malloc0(sizeof(V9fsDirEnt)); + } + e->dent = g_malloc0(sizeof(struct dirent)); + memcpy(e->dent, dent, sizeof(struct dirent)); + + /* perform a full stat() for directory entry if requested by caller */ + if (dostat) { + err = s->ops->name_to_path( + &s->ctx, &fidp->path, dent->d_name, &path + ); + if (err < 0) { + err = -errno; + break; + } + + err = s->ops->lstat(&s->ctx, &path, &stbuf); + if (err < 0) { + err = -errno; + break; + } + + e->st = g_malloc0(sizeof(struct stat)); + memcpy(e->st, &stbuf, sizeof(struct stat)); + } + + size += len; + saved_dir_pos = dent->d_off; + } + + /* restore (last) saved position */ + s->ops->seekdir(&s->ctx, &fidp->fs, saved_dir_pos); + +out: + v9fs_readdir_unlock(&fidp->fs.dir); + v9fs_path_free(&path); + if (err < 0) { + return err; + } + return size; +} + +/** + * @brief Reads multiple directory entries in one rush. + * + * Retrieves the requested (max. amount of) directory entries from the fs + * driver. This function must only be called by the main IO thread (top half). + * Internally this function call will be dispatched to a background IO thread + * (bottom half) where it is eventually executed by the fs driver. + * + * @discussion Acquiring multiple directory entries in one rush from the fs + * driver, instead of retrieving each directory entry individually, is very + * beneficial from performance point of view. Because for every fs driver + * request latency is added, which in practice could lead to overall + * latencies of several hundred ms for reading all entries (of just a single + * directory) if every directory entry was individually requested from fs + * driver. + * + * @note You must @b ALWAYS call @c v9fs_free_dirents(entries) after calling + * v9fs_co_readdir_many(), both on success and on error cases of this + * function, to avoid memory leaks once @p entries are no longer needed. + * + * @param pdu - the causing 9p (T_readdir) client request + * @param fidp - already opened directory where readdir shall be performed on + * @param entries - output for directory entries (must not be NULL) + * @param offset - initial position inside the directory the function shall + * seek to before retrieving the directory entries + * @param maxsize - maximum result message body size (in bytes) + * @param dostat - whether a stat() should be performed and returned for + * each directory entry + * @returns resulting response message body size (in bytes) on success, + * negative error code otherwise + */ +int coroutine_fn v9fs_co_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, + struct V9fsDirEnt **entries, + off_t offset, int32_t maxsize, + bool dostat) +{ + int err = 0; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker({ + err = do_readdir_many(pdu, fidp, entries, offset, maxsize, dostat); + }); + return err; +} + +off_t v9fs_co_telldir(V9fsPDU *pdu, V9fsFidState *fidp) +{ + off_t err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker( + { + err = s->ops->telldir(&s->ctx, &fidp->fs); + if (err < 0) { + err = -errno; + } + }); + return err; +} + +void coroutine_fn v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp, + off_t offset) +{ + V9fsState *s = pdu->s; + if (v9fs_request_cancelled(pdu)) { + return; + } + v9fs_co_run_in_worker( + { + s->ops->seekdir(&s->ctx, &fidp->fs, offset); + }); +} + +void coroutine_fn v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp) +{ + V9fsState *s = pdu->s; + if (v9fs_request_cancelled(pdu)) { + return; + } + v9fs_co_run_in_worker( + { + s->ops->rewinddir(&s->ctx, &fidp->fs); + }); +} + +int coroutine_fn v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp, + V9fsString *name, mode_t mode, uid_t uid, + gid_t gid, struct stat *stbuf) +{ + int err; + FsCred cred; + V9fsPath path; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + cred_init(&cred); + cred.fc_mode = mode; + cred.fc_uid = uid; + cred.fc_gid = gid; + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->mkdir(&s->ctx, &fidp->path, name->data, &cred); + if (err < 0) { + err = -errno; + } else { + v9fs_path_init(&path); + err = v9fs_name_to_path(s, &fidp->path, name->data, &path); + if (!err) { + err = s->ops->lstat(&s->ctx, &path, stbuf); + if (err < 0) { + err = -errno; + } + } + v9fs_path_free(&path); + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->opendir(&s->ctx, &fidp->path, &fidp->fs); + if (err < 0) { + err = -errno; + } else { + err = 0; + } + }); + v9fs_path_unlock(s); + if (!err) { + total_open_fd++; + if (total_open_fd > open_fd_hw) { + v9fs_reclaim_fd(pdu); + } + } + return err; +} + +int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker( + { + err = s->ops->closedir(&s->ctx, fs); + if (err < 0) { + err = -errno; + } + }); + if (!err) { + total_open_fd--; + } + return err; +} diff --git a/hw/9pfs/cofile.c b/hw/9pfs/cofile.c new file mode 100644 index 000000000..20f93a90e --- /dev/null +++ b/hw/9pfs/cofile.c @@ -0,0 +1,285 @@ +/* + * 9p backend + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Aneesh Kumar K.V + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "fsdev/qemu-fsdev.h" +#include "qemu/thread.h" +#include "qemu/coroutine.h" +#include "qemu/main-loop.h" +#include "coth.h" + +int coroutine_fn v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode, + V9fsStatDotl *v9stat) +{ + int err = 0; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + if (s->ctx.exops.get_st_gen) { + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ctx.exops.get_st_gen(&s->ctx, path, st_mode, + &v9stat->st_gen); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + } + return err; +} + +int coroutine_fn v9fs_co_lstat(V9fsPDU *pdu, V9fsPath *path, struct stat *stbuf) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->lstat(&s->ctx, path, stbuf); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_fstat(V9fsPDU *pdu, V9fsFidState *fidp, + struct stat *stbuf) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker( + { + err = s->ops->fstat(&s->ctx, fidp->fid_type, &fidp->fs, stbuf); + if (err < 0) { + err = -errno; + } + }); + /* + * Some FS driver (local:mapped-file) can't support fetching attributes + * using file descriptor. Use Path name in that case. + */ + if (err == -EOPNOTSUPP) { + err = v9fs_co_lstat(pdu, &fidp->path, stbuf); + if (err == -ENOENT) { + /* + * fstat on an unlinked file. Work with partial results + * returned from s->ops->fstat + */ + err = 0; + } + } + return err; +} + +int coroutine_fn v9fs_co_open(V9fsPDU *pdu, V9fsFidState *fidp, int flags) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->open(&s->ctx, &fidp->path, flags, &fidp->fs); + if (err == -1) { + err = -errno; + } else { + err = 0; + } + }); + v9fs_path_unlock(s); + if (!err) { + total_open_fd++; + if (total_open_fd > open_fd_hw) { + v9fs_reclaim_fd(pdu); + } + } + return err; +} + +int coroutine_fn v9fs_co_open2(V9fsPDU *pdu, V9fsFidState *fidp, + V9fsString *name, gid_t gid, int flags, int mode, + struct stat *stbuf) +{ + int err; + FsCred cred; + V9fsPath path; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + cred_init(&cred); + cred.fc_mode = mode & 07777; + cred.fc_uid = fidp->uid; + cred.fc_gid = gid; + /* + * Hold the directory fid lock so that directory path name + * don't change. Take the write lock to be sure this fid + * cannot be used by another operation. + */ + v9fs_path_write_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->open2(&s->ctx, &fidp->path, + name->data, flags, &cred, &fidp->fs); + if (err < 0) { + err = -errno; + } else { + v9fs_path_init(&path); + err = v9fs_name_to_path(s, &fidp->path, name->data, &path); + if (!err) { + err = s->ops->lstat(&s->ctx, &path, stbuf); + if (err < 0) { + err = -errno; + s->ops->close(&s->ctx, &fidp->fs); + } else { + v9fs_path_copy(&fidp->path, &path); + } + } else { + s->ops->close(&s->ctx, &fidp->fs); + } + v9fs_path_free(&path); + } + }); + v9fs_path_unlock(s); + if (!err) { + total_open_fd++; + if (total_open_fd > open_fd_hw) { + v9fs_reclaim_fd(pdu); + } + } + return err; +} + +int coroutine_fn v9fs_co_close(V9fsPDU *pdu, V9fsFidOpenState *fs) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker( + { + err = s->ops->close(&s->ctx, fs); + if (err < 0) { + err = -errno; + } + }); + if (!err) { + total_open_fd--; + } + return err; +} + +int coroutine_fn v9fs_co_fsync(V9fsPDU *pdu, V9fsFidState *fidp, int datasync) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker( + { + err = s->ops->fsync(&s->ctx, fidp->fid_type, &fidp->fs, datasync); + if (err < 0) { + err = -errno; + } + }); + return err; +} + +int coroutine_fn v9fs_co_link(V9fsPDU *pdu, V9fsFidState *oldfid, + V9fsFidState *newdirfid, V9fsString *name) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->link(&s->ctx, &oldfid->path, + &newdirfid->path, name->data); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp, + struct iovec *iov, int iovcnt, int64_t offset) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + fsdev_co_throttle_request(s->ctx.fst, true, iov, iovcnt); + v9fs_co_run_in_worker( + { + err = s->ops->pwritev(&s->ctx, &fidp->fs, iov, iovcnt, offset); + if (err < 0) { + err = -errno; + } + }); + return err; +} + +int coroutine_fn v9fs_co_preadv(V9fsPDU *pdu, V9fsFidState *fidp, + struct iovec *iov, int iovcnt, int64_t offset) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + fsdev_co_throttle_request(s->ctx.fst, false, iov, iovcnt); + v9fs_co_run_in_worker( + { + err = s->ops->preadv(&s->ctx, &fidp->fs, iov, iovcnt, offset); + if (err < 0) { + err = -errno; + } + }); + return err; +} diff --git a/hw/9pfs/cofs.c b/hw/9pfs/cofs.c new file mode 100644 index 000000000..9d0adc2e7 --- /dev/null +++ b/hw/9pfs/cofs.c @@ -0,0 +1,377 @@ +/* + * 9p backend + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Aneesh Kumar K.V + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "fsdev/qemu-fsdev.h" +#include "qemu/thread.h" +#include "qemu/coroutine.h" +#include "qemu/main-loop.h" +#include "coth.h" + +static ssize_t __readlink(V9fsState *s, V9fsPath *path, V9fsString *buf) +{ + ssize_t len, maxlen = PATH_MAX; + + buf->data = g_malloc(PATH_MAX); + for (;;) { + len = s->ops->readlink(&s->ctx, path, buf->data, maxlen); + if (len < 0) { + g_free(buf->data); + buf->data = NULL; + buf->size = 0; + break; + } else if (len == maxlen) { + /* + * We dodn't have space to put the NULL or we have more + * to read. Increase the size and try again + */ + maxlen *= 2; + g_free(buf->data); + buf->data = g_malloc(maxlen); + continue; + } + /* + * Null terminate the readlink output + */ + buf->data[len] = '\0'; + buf->size = len; + break; + } + return len; +} + +int coroutine_fn v9fs_co_readlink(V9fsPDU *pdu, V9fsPath *path, V9fsString *buf) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = __readlink(s, path, buf); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_statfs(V9fsPDU *pdu, V9fsPath *path, + struct statfs *stbuf) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->statfs(&s->ctx, path, stbuf); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_chmod(V9fsPDU *pdu, V9fsPath *path, mode_t mode) +{ + int err; + FsCred cred; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + cred_init(&cred); + cred.fc_mode = mode; + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->chmod(&s->ctx, path, &cred); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_utimensat(V9fsPDU *pdu, V9fsPath *path, + struct timespec times[2]) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->utimensat(&s->ctx, path, times); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_chown(V9fsPDU *pdu, V9fsPath *path, uid_t uid, + gid_t gid) +{ + int err; + FsCred cred; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + cred_init(&cred); + cred.fc_uid = uid; + cred.fc_gid = gid; + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->chown(&s->ctx, path, &cred); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_truncate(V9fsPDU *pdu, V9fsPath *path, off_t size) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->truncate(&s->ctx, path, size); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_mknod(V9fsPDU *pdu, V9fsFidState *fidp, + V9fsString *name, uid_t uid, gid_t gid, + dev_t dev, mode_t mode, struct stat *stbuf) +{ + int err; + V9fsPath path; + FsCred cred; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + cred_init(&cred); + cred.fc_uid = uid; + cred.fc_gid = gid; + cred.fc_mode = mode; + cred.fc_rdev = dev; + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->mknod(&s->ctx, &fidp->path, name->data, &cred); + if (err < 0) { + err = -errno; + } else { + v9fs_path_init(&path); + err = v9fs_name_to_path(s, &fidp->path, name->data, &path); + if (!err) { + err = s->ops->lstat(&s->ctx, &path, stbuf); + if (err < 0) { + err = -errno; + } + } + v9fs_path_free(&path); + } + }); + v9fs_path_unlock(s); + return err; +} + +/* Only works with path name based fid */ +int coroutine_fn v9fs_co_remove(V9fsPDU *pdu, V9fsPath *path) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->remove(&s->ctx, path->data); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_unlinkat(V9fsPDU *pdu, V9fsPath *path, + V9fsString *name, int flags) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->unlinkat(&s->ctx, path, name->data, flags); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +/* Only work with path name based fid */ +int coroutine_fn v9fs_co_rename(V9fsPDU *pdu, V9fsPath *oldpath, + V9fsPath *newpath) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker( + { + err = s->ops->rename(&s->ctx, oldpath->data, newpath->data); + if (err < 0) { + err = -errno; + } + }); + return err; +} + +int coroutine_fn v9fs_co_renameat(V9fsPDU *pdu, V9fsPath *olddirpath, + V9fsString *oldname, V9fsPath *newdirpath, + V9fsString *newname) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker( + { + err = s->ops->renameat(&s->ctx, olddirpath, oldname->data, + newdirpath, newname->data); + if (err < 0) { + err = -errno; + } + }); + return err; +} + +int coroutine_fn v9fs_co_symlink(V9fsPDU *pdu, V9fsFidState *dfidp, + V9fsString *name, const char *oldpath, + gid_t gid, struct stat *stbuf) +{ + int err; + FsCred cred; + V9fsPath path; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + cred_init(&cred); + cred.fc_uid = dfidp->uid; + cred.fc_gid = gid; + cred.fc_mode = 0777; + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->symlink(&s->ctx, oldpath, &dfidp->path, + name->data, &cred); + if (err < 0) { + err = -errno; + } else { + v9fs_path_init(&path); + err = v9fs_name_to_path(s, &dfidp->path, name->data, &path); + if (!err) { + err = s->ops->lstat(&s->ctx, &path, stbuf); + if (err < 0) { + err = -errno; + } + } + v9fs_path_free(&path); + } + }); + v9fs_path_unlock(s); + return err; +} + +/* + * For path name based fid we don't block. So we can + * directly call the fs driver ops. + */ +int coroutine_fn v9fs_co_name_to_path(V9fsPDU *pdu, V9fsPath *dirpath, + const char *name, V9fsPath *path) +{ + int err; + V9fsState *s = pdu->s; + + if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { + err = s->ops->name_to_path(&s->ctx, dirpath, name, path); + if (err < 0) { + err = -errno; + } + } else { + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_co_run_in_worker( + { + err = s->ops->name_to_path(&s->ctx, dirpath, name, path); + if (err < 0) { + err = -errno; + } + }); + } + return err; +} diff --git a/hw/9pfs/coth.c b/hw/9pfs/coth.c new file mode 100644 index 000000000..2802d41cc --- /dev/null +++ b/hw/9pfs/coth.c @@ -0,0 +1,46 @@ +/* + * 9p backend + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Harsh Prateek Bora + * Venkateswararao Jujjuri(JV) + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "block/thread-pool.h" +#include "qemu/coroutine.h" +#include "qemu/main-loop.h" +#include "coth.h" + +/* Called from QEMU I/O thread. */ +static void coroutine_enter_cb(void *opaque, int ret) +{ + Coroutine *co = opaque; + qemu_coroutine_enter(co); +} + +/* Called from worker thread. */ +static int coroutine_enter_func(void *arg) +{ + Coroutine *co = arg; + qemu_coroutine_enter(co); + return 0; +} + +void co_run_in_worker_bh(void *opaque) +{ + Coroutine *co = opaque; + thread_pool_submit_aio(aio_get_thread_pool(qemu_get_aio_context()), + coroutine_enter_func, co, coroutine_enter_cb, co); +} diff --git a/hw/9pfs/coth.h b/hw/9pfs/coth.h new file mode 100644 index 000000000..f83c7dda7 --- /dev/null +++ b/hw/9pfs/coth.h @@ -0,0 +1,113 @@ +/* + * 9p backend + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Harsh Prateek Bora + * Venkateswararao Jujjuri(JV) + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_9P_COTH_H +#define QEMU_9P_COTH_H + +#include "qemu/thread.h" +#include "qemu/coroutine.h" +#include "9p.h" + +/** + * we want to use bottom half because we want to make sure the below + * sequence of events. + * + * 1. Yield the coroutine in the QEMU thread. + * 2. Submit the coroutine to a worker thread. + * 3. Enter the coroutine in the worker thread. + * we cannot swap step 1 and 2, because that would imply worker thread + * can enter coroutine while step1 is still running + * + * @b PERFORMANCE @b CONSIDERATIONS: As a rule of thumb, keep in mind + * that hopping between threads adds @b latency! So when handling a + * 9pfs request, avoid calling v9fs_co_run_in_worker() too often, because + * this might otherwise sum up to a significant, huge overall latency for + * providing the response for just a single request. For that reason it + * is highly recommended to fetch all data from fs driver with a single + * fs driver request on a background I/O thread (bottom half) in one rush + * first and then eventually assembling the final response from that data + * on main I/O thread (top half). + */ +#define v9fs_co_run_in_worker(code_block) \ + do { \ + QEMUBH *co_bh; \ + co_bh = qemu_bh_new(co_run_in_worker_bh, \ + qemu_coroutine_self()); \ + qemu_bh_schedule(co_bh); \ + /* \ + * yield in qemu thread and re-enter back \ + * in worker thread \ + */ \ + qemu_coroutine_yield(); \ + qemu_bh_delete(co_bh); \ + do { \ + code_block; \ + } while (0); \ + /* re-enter back to qemu thread */ \ + qemu_coroutine_yield(); \ + } while (0) + +void co_run_in_worker_bh(void *); +int coroutine_fn v9fs_co_readlink(V9fsPDU *, V9fsPath *, V9fsString *); +int coroutine_fn v9fs_co_readdir(V9fsPDU *, V9fsFidState *, struct dirent **); +int coroutine_fn v9fs_co_readdir_many(V9fsPDU *, V9fsFidState *, + struct V9fsDirEnt **, off_t, int32_t, + bool); +off_t coroutine_fn v9fs_co_telldir(V9fsPDU *, V9fsFidState *); +void coroutine_fn v9fs_co_seekdir(V9fsPDU *, V9fsFidState *, off_t); +void coroutine_fn v9fs_co_rewinddir(V9fsPDU *, V9fsFidState *); +int coroutine_fn v9fs_co_statfs(V9fsPDU *, V9fsPath *, struct statfs *); +int coroutine_fn v9fs_co_lstat(V9fsPDU *, V9fsPath *, struct stat *); +int coroutine_fn v9fs_co_chmod(V9fsPDU *, V9fsPath *, mode_t); +int coroutine_fn v9fs_co_utimensat(V9fsPDU *, V9fsPath *, struct timespec [2]); +int coroutine_fn v9fs_co_chown(V9fsPDU *, V9fsPath *, uid_t, gid_t); +int coroutine_fn v9fs_co_truncate(V9fsPDU *, V9fsPath *, off_t); +int coroutine_fn v9fs_co_llistxattr(V9fsPDU *, V9fsPath *, void *, size_t); +int coroutine_fn v9fs_co_lgetxattr(V9fsPDU *, V9fsPath *, + V9fsString *, void *, size_t); +int coroutine_fn v9fs_co_mknod(V9fsPDU *, V9fsFidState *, V9fsString *, uid_t, + gid_t, dev_t, mode_t, struct stat *); +int coroutine_fn v9fs_co_mkdir(V9fsPDU *, V9fsFidState *, V9fsString *, + mode_t, uid_t, gid_t, struct stat *); +int coroutine_fn v9fs_co_remove(V9fsPDU *, V9fsPath *); +int coroutine_fn v9fs_co_rename(V9fsPDU *, V9fsPath *, V9fsPath *); +int coroutine_fn v9fs_co_unlinkat(V9fsPDU *, V9fsPath *, V9fsString *, + int flags); +int coroutine_fn v9fs_co_renameat(V9fsPDU *, V9fsPath *, V9fsString *, + V9fsPath *, V9fsString *); +int coroutine_fn v9fs_co_fstat(V9fsPDU *, V9fsFidState *, struct stat *); +int coroutine_fn v9fs_co_opendir(V9fsPDU *, V9fsFidState *); +int coroutine_fn v9fs_co_open(V9fsPDU *, V9fsFidState *, int); +int coroutine_fn v9fs_co_open2(V9fsPDU *, V9fsFidState *, V9fsString *, + gid_t, int, int, struct stat *); +int coroutine_fn v9fs_co_lsetxattr(V9fsPDU *, V9fsPath *, V9fsString *, + void *, size_t, int); +int coroutine_fn v9fs_co_lremovexattr(V9fsPDU *, V9fsPath *, V9fsString *); +int coroutine_fn v9fs_co_closedir(V9fsPDU *, V9fsFidOpenState *); +int coroutine_fn v9fs_co_close(V9fsPDU *, V9fsFidOpenState *); +int coroutine_fn v9fs_co_fsync(V9fsPDU *, V9fsFidState *, int); +int coroutine_fn v9fs_co_symlink(V9fsPDU *, V9fsFidState *, V9fsString *, + const char *, gid_t, struct stat *); +int coroutine_fn v9fs_co_link(V9fsPDU *, V9fsFidState *, + V9fsFidState *, V9fsString *); +int coroutine_fn v9fs_co_pwritev(V9fsPDU *, V9fsFidState *, + struct iovec *, int, int64_t); +int coroutine_fn v9fs_co_preadv(V9fsPDU *, V9fsFidState *, + struct iovec *, int, int64_t); +int coroutine_fn v9fs_co_name_to_path(V9fsPDU *, V9fsPath *, + const char *, V9fsPath *); +int coroutine_fn v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t, + V9fsStatDotl *v9stat); + +#endif diff --git a/hw/9pfs/coxattr.c b/hw/9pfs/coxattr.c new file mode 100644 index 000000000..dbcd09e0f --- /dev/null +++ b/hw/9pfs/coxattr.c @@ -0,0 +1,114 @@ +/* + * 9p backend + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Aneesh Kumar K.V + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "fsdev/qemu-fsdev.h" +#include "qemu/thread.h" +#include "qemu/coroutine.h" +#include "qemu/main-loop.h" +#include "coth.h" + +int coroutine_fn v9fs_co_llistxattr(V9fsPDU *pdu, V9fsPath *path, void *value, + size_t size) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->llistxattr(&s->ctx, path, value, size); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_lgetxattr(V9fsPDU *pdu, V9fsPath *path, + V9fsString *xattr_name, void *value, + size_t size) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->lgetxattr(&s->ctx, path, + xattr_name->data, + value, size); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_lsetxattr(V9fsPDU *pdu, V9fsPath *path, + V9fsString *xattr_name, void *value, + size_t size, int flags) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->lsetxattr(&s->ctx, path, + xattr_name->data, value, + size, flags); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} + +int coroutine_fn v9fs_co_lremovexattr(V9fsPDU *pdu, V9fsPath *path, + V9fsString *xattr_name) +{ + int err; + V9fsState *s = pdu->s; + + if (v9fs_request_cancelled(pdu)) { + return -EINTR; + } + v9fs_path_read_lock(s); + v9fs_co_run_in_worker( + { + err = s->ops->lremovexattr(&s->ctx, path, xattr_name->data); + if (err < 0) { + err = -errno; + } + }); + v9fs_path_unlock(s); + return err; +} diff --git a/hw/9pfs/meson.build b/hw/9pfs/meson.build new file mode 100644 index 000000000..99be5d911 --- /dev/null +++ b/hw/9pfs/meson.build @@ -0,0 +1,20 @@ +fs_ss = ss.source_set() +fs_ss.add(files( + '9p-local.c', + '9p-posix-acl.c', + '9p-proxy.c', + '9p-synth.c', + '9p-util.c', + '9p-xattr-user.c', + '9p-xattr.c', + '9p.c', + 'codir.c', + 'cofile.c', + 'cofs.c', + 'coth.c', + 'coxattr.c', +)) +fs_ss.add(when: 'CONFIG_XEN', if_true: files('xen-9p-backend.c')) +softmmu_ss.add_all(when: 'CONFIG_FSDEV_9P', if_true: fs_ss) + +specific_ss.add(when: 'CONFIG_VIRTIO_9P', if_true: files('virtio-9p-device.c')) diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events new file mode 100644 index 000000000..6c77966c0 --- /dev/null +++ b/hw/9pfs/trace-events @@ -0,0 +1,50 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# 9p.c +v9fs_rcancel(uint16_t tag, uint8_t id) "tag %d id %d" +v9fs_rerror(uint16_t tag, uint8_t id, int err) "tag %d id %d err %d" +v9fs_version(uint16_t tag, uint8_t id, int32_t msize, char* version) "tag %d id %d msize %d version %s" +v9fs_version_return(uint16_t tag, uint8_t id, int32_t msize, char* version) "tag %d id %d msize %d version %s" +v9fs_attach(uint16_t tag, uint8_t id, int32_t fid, int32_t afid, char* uname, char* aname) "tag %u id %u fid %d afid %d uname %s aname %s" +v9fs_attach_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path) "tag %u id %u type %u version %u path %"PRIu64 +v9fs_stat(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d" +v9fs_stat_return(uint16_t tag, uint8_t id, int32_t mode, int32_t atime, int32_t mtime, int64_t length) "tag %d id %d stat={mode %d atime %d mtime %d length %"PRId64"}" +v9fs_getattr(uint16_t tag, uint8_t id, int32_t fid, uint64_t request_mask) "tag %d id %d fid %d request_mask %"PRIu64 +v9fs_getattr_return(uint16_t tag, uint8_t id, uint64_t result_mask, uint32_t mode, uint32_t uid, uint32_t gid) "tag %d id %d getattr={result_mask %"PRId64" mode %u uid %u gid %u}" +v9fs_walk(uint16_t tag, uint8_t id, int32_t fid, int32_t newfid, uint16_t nwnames) "tag %d id %d fid %d newfid %d nwnames %d" +v9fs_walk_return(uint16_t tag, uint8_t id, uint16_t nwnames, void* qids) "tag %d id %d nwnames %d qids %p" +v9fs_open(uint16_t tag, uint8_t id, int32_t fid, int32_t mode) "tag %d id %d fid %d mode %d" +v9fs_open_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path, int iounit) "tag %u id %u qid={type %u version %u path %"PRIu64"} iounit %d" +v9fs_lcreate(uint16_t tag, uint8_t id, int32_t dfid, int32_t flags, int32_t mode, uint32_t gid) "tag %d id %d dfid %d flags %d mode %d gid %u" +v9fs_lcreate_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path, int32_t iounit) "tag %u id %u qid={type %u version %u path %"PRIu64"} iounit %d" +v9fs_fsync(uint16_t tag, uint8_t id, int32_t fid, int datasync) "tag %d id %d fid %d datasync %d" +v9fs_clunk(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d" +v9fs_read(uint16_t tag, uint8_t id, int32_t fid, uint64_t off, uint32_t max_count) "tag %d id %d fid %d off %"PRIu64" max_count %u" +v9fs_read_return(uint16_t tag, uint8_t id, int32_t count, ssize_t err) "tag %d id %d count %d err %zd" +v9fs_readdir(uint16_t tag, uint8_t id, int32_t fid, uint64_t offset, uint32_t max_count) "tag %d id %d fid %d offset %"PRIu64" max_count %u" +v9fs_readdir_return(uint16_t tag, uint8_t id, uint32_t count, ssize_t retval) "tag %d id %d count %u retval %zd" +v9fs_write(uint16_t tag, uint8_t id, int32_t fid, uint64_t off, uint32_t count, int cnt) "tag %d id %d fid %d off %"PRIu64" count %u cnt %d" +v9fs_write_return(uint16_t tag, uint8_t id, int32_t total, ssize_t err) "tag %d id %d total %d err %zd" +v9fs_create(uint16_t tag, uint8_t id, int32_t fid, char* name, int32_t perm, int8_t mode) "tag %d id %d fid %d name %s perm %d mode %d" +v9fs_create_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path, int iounit) "tag %u id %u qid={type %u version %u path %"PRIu64"} iounit %d" +v9fs_symlink(uint16_t tag, uint8_t id, int32_t fid, char* name, char* symname, uint32_t gid) "tag %d id %d fid %d name %s symname %s gid %u" +v9fs_symlink_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path) "tag %u id %u qid={type %u version %u path %"PRIu64"}" +v9fs_flush(uint16_t tag, uint8_t id, int16_t flush_tag) "tag %d id %d flush_tag %d" +v9fs_link(uint16_t tag, uint8_t id, int32_t dfid, int32_t oldfid, char* name) "tag %d id %d dfid %d oldfid %d name %s" +v9fs_remove(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d" +v9fs_wstat(uint16_t tag, uint8_t id, int32_t fid, int32_t mode, int32_t atime, int32_t mtime) "tag %u id %u fid %d stat={mode %d atime %d mtime %d}" +v9fs_mknod(uint16_t tag, uint8_t id, int32_t fid, int mode, int major, int minor) "tag %d id %d fid %d mode %d major %d minor %d" +v9fs_mknod_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path) "tag %u id %u qid={type %u version %u path %"PRIu64"}" +v9fs_lock(uint16_t tag, uint8_t id, int32_t fid, uint8_t type, uint64_t start, uint64_t length) "tag %d id %d fid %d type %d start %"PRIu64" length %"PRIu64 +v9fs_lock_return(uint16_t tag, uint8_t id, int8_t status) "tag %d id %d status %d" +v9fs_getlock(uint16_t tag, uint8_t id, int32_t fid, uint8_t type, uint64_t start, uint64_t length)"tag %d id %d fid %d type %d start %"PRIu64" length %"PRIu64 +v9fs_getlock_return(uint16_t tag, uint8_t id, uint8_t type, uint64_t start, uint64_t length, uint32_t proc_id) "tag %d id %d type %d start %"PRIu64" length %"PRIu64" proc_id %u" +v9fs_mkdir(uint16_t tag, uint8_t id, int32_t fid, char* name, int mode, uint32_t gid) "tag %u id %u fid %d name %s mode %d gid %u" +v9fs_mkdir_return(uint16_t tag, uint8_t id, uint8_t type, uint32_t version, uint64_t path, int err) "tag %u id %u qid={type %u version %u path %"PRIu64"} err %d" +v9fs_xattrwalk(uint16_t tag, uint8_t id, int32_t fid, int32_t newfid, char* name) "tag %d id %d fid %d newfid %d name %s" +v9fs_xattrwalk_return(uint16_t tag, uint8_t id, int64_t size) "tag %d id %d size %"PRId64 +v9fs_xattrcreate(uint16_t tag, uint8_t id, int32_t fid, char* name, uint64_t size, int flags) "tag %d id %d fid %d name %s size %"PRIu64" flags %d" +v9fs_readlink(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d" +v9fs_readlink_return(uint16_t tag, uint8_t id, char* target) "tag %d id %d name %s" +v9fs_setattr(uint16_t tag, uint8_t id, int32_t fid, int32_t valid, int32_t mode, int32_t uid, int32_t gid, int64_t size, int64_t atime_sec, int64_t mtime_sec) "tag %u id %u fid %d iattr={valid %d mode %d uid %d gid %d size %"PRId64" atime=%"PRId64" mtime=%"PRId64" }" +v9fs_setattr_return(uint16_t tag, uint8_t id) "tag %u id %u" diff --git a/hw/9pfs/trace.h b/hw/9pfs/trace.h new file mode 100644 index 000000000..6104fe2a7 --- /dev/null +++ b/hw/9pfs/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_9pfs.h" diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c new file mode 100644 index 000000000..54ee93b71 --- /dev/null +++ b/hw/9pfs/virtio-9p-device.c @@ -0,0 +1,279 @@ +/* + * Virtio 9p backend + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "hw/virtio/virtio.h" +#include "qemu/sockets.h" +#include "virtio-9p.h" +#include "fsdev/qemu-fsdev.h" +#include "coth.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-access.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "sysemu/qtest.h" + +static void virtio_9p_push_and_notify(V9fsPDU *pdu) +{ + V9fsState *s = pdu->s; + V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); + VirtQueueElement *elem = v->elems[pdu->idx]; + + /* push onto queue and notify */ + virtqueue_push(v->vq, elem, pdu->size); + g_free(elem); + v->elems[pdu->idx] = NULL; + + /* FIXME: we should batch these completions */ + virtio_notify(VIRTIO_DEVICE(v), v->vq); +} + +static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq) +{ + V9fsVirtioState *v = (V9fsVirtioState *)vdev; + V9fsState *s = &v->state; + V9fsPDU *pdu; + ssize_t len; + VirtQueueElement *elem; + + while ((pdu = pdu_alloc(s))) { + P9MsgHeader out; + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + goto out_free_pdu; + } + + if (iov_size(elem->in_sg, elem->in_num) < 7) { + virtio_error(vdev, + "The guest sent a VirtFS request without space for " + "the reply"); + goto out_free_req; + } + + len = iov_to_buf(elem->out_sg, elem->out_num, 0, &out, 7); + if (len != 7) { + virtio_error(vdev, "The guest sent a malformed VirtFS request: " + "header size is %zd, should be 7", len); + goto out_free_req; + } + + v->elems[pdu->idx] = elem; + + pdu_submit(pdu, &out); + } + + return; + +out_free_req: + virtqueue_detach_element(vq, elem, 0); + g_free(elem); +out_free_pdu: + pdu_free(pdu); +} + +static uint64_t virtio_9p_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + virtio_add_feature(&features, VIRTIO_9P_MOUNT_TAG); + return features; +} + +static void virtio_9p_get_config(VirtIODevice *vdev, uint8_t *config) +{ + int len; + struct virtio_9p_config *cfg; + V9fsVirtioState *v = VIRTIO_9P(vdev); + V9fsState *s = &v->state; + + len = strlen(s->tag); + cfg = g_malloc0(sizeof(struct virtio_9p_config) + len); + virtio_stw_p(vdev, &cfg->tag_len, len); + /* We don't copy the terminating null to config space */ + memcpy(cfg->tag, s->tag, len); + memcpy(config, cfg, v->config_size); + g_free(cfg); +} + +static void virtio_9p_reset(VirtIODevice *vdev) +{ + V9fsVirtioState *v = (V9fsVirtioState *)vdev; + + v9fs_reset(&v->state); +} + +static ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset, + const char *fmt, va_list ap) +{ + V9fsState *s = pdu->s; + V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); + VirtQueueElement *elem = v->elems[pdu->idx]; + ssize_t ret; + + ret = v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap); + if (ret < 0) { + VirtIODevice *vdev = VIRTIO_DEVICE(v); + + virtio_error(vdev, "Failed to encode VirtFS reply type %d", + pdu->id + 1); + } + return ret; +} + +static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset, + const char *fmt, va_list ap) +{ + V9fsState *s = pdu->s; + V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); + VirtQueueElement *elem = v->elems[pdu->idx]; + ssize_t ret; + + ret = v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, ap); + if (ret < 0) { + VirtIODevice *vdev = VIRTIO_DEVICE(v); + + virtio_error(vdev, "Failed to decode VirtFS request type %d", pdu->id); + } + return ret; +} + +static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov, + unsigned int *pniov, size_t size) +{ + V9fsState *s = pdu->s; + V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); + VirtQueueElement *elem = v->elems[pdu->idx]; + size_t buf_size = iov_size(elem->in_sg, elem->in_num); + + if (buf_size < size) { + VirtIODevice *vdev = VIRTIO_DEVICE(v); + + virtio_error(vdev, + "VirtFS reply type %d needs %zu bytes, buffer has %zu", + pdu->id + 1, size, buf_size); + } + + *piov = elem->in_sg; + *pniov = elem->in_num; +} + +static void virtio_init_out_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov, + unsigned int *pniov, size_t size) +{ + V9fsState *s = pdu->s; + V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); + VirtQueueElement *elem = v->elems[pdu->idx]; + size_t buf_size = iov_size(elem->out_sg, elem->out_num); + + if (buf_size < size) { + VirtIODevice *vdev = VIRTIO_DEVICE(v); + + virtio_error(vdev, + "VirtFS request type %d needs %zu bytes, buffer has %zu", + pdu->id, size, buf_size); + } + + *piov = elem->out_sg; + *pniov = elem->out_num; +} + +static const V9fsTransport virtio_9p_transport = { + .pdu_vmarshal = virtio_pdu_vmarshal, + .pdu_vunmarshal = virtio_pdu_vunmarshal, + .init_in_iov_from_pdu = virtio_init_in_iov_from_pdu, + .init_out_iov_from_pdu = virtio_init_out_iov_from_pdu, + .push_and_notify = virtio_9p_push_and_notify, +}; + +static void virtio_9p_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + V9fsVirtioState *v = VIRTIO_9P(dev); + V9fsState *s = &v->state; + FsDriverEntry *fse = get_fsdev_fsentry(s->fsconf.fsdev_id); + + if (qtest_enabled() && fse) { + fse->export_flags |= V9FS_NO_PERF_WARN; + } + + if (v9fs_device_realize_common(s, &virtio_9p_transport, errp)) { + return; + } + + v->config_size = sizeof(struct virtio_9p_config) + strlen(s->fsconf.tag); + virtio_init(vdev, "virtio-9p", VIRTIO_ID_9P, v->config_size); + v->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output); +} + +static void virtio_9p_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + V9fsVirtioState *v = VIRTIO_9P(dev); + V9fsState *s = &v->state; + + virtio_delete_queue(v->vq); + virtio_cleanup(vdev); + v9fs_device_unrealize_common(s); +} + +/* virtio-9p device */ + +static const VMStateDescription vmstate_virtio_9p = { + .name = "virtio-9p", + .minimum_version_id = 1, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static Property virtio_9p_properties[] = { + DEFINE_PROP_STRING("mount_tag", V9fsVirtioState, state.fsconf.tag), + DEFINE_PROP_STRING("fsdev", V9fsVirtioState, state.fsconf.fsdev_id), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_9p_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_9p_properties); + dc->vmsd = &vmstate_virtio_9p; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + vdc->realize = virtio_9p_device_realize; + vdc->unrealize = virtio_9p_device_unrealize; + vdc->get_features = virtio_9p_get_features; + vdc->get_config = virtio_9p_get_config; + vdc->reset = virtio_9p_reset; +} + +static const TypeInfo virtio_device_info = { + .name = TYPE_VIRTIO_9P, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(V9fsVirtioState), + .class_init = virtio_9p_class_init, +}; + +static void virtio_9p_register_types(void) +{ + type_register_static(&virtio_device_info); +} + +type_init(virtio_9p_register_types) diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h new file mode 100644 index 000000000..20fa118f3 --- /dev/null +++ b/hw/9pfs/virtio-9p.h @@ -0,0 +1,20 @@ +#ifndef QEMU_VIRTIO_9P_H +#define QEMU_VIRTIO_9P_H + +#include "standard-headers/linux/virtio_9p.h" +#include "hw/virtio/virtio.h" +#include "9p.h" +#include "qom/object.h" + +struct V9fsVirtioState { + VirtIODevice parent_obj; + VirtQueue *vq; + size_t config_size; + VirtQueueElement *elems[MAX_REQ]; + V9fsState state; +}; + +#define TYPE_VIRTIO_9P "virtio-9p-device" +OBJECT_DECLARE_SIMPLE_TYPE(V9fsVirtioState, VIRTIO_9P) + +#endif diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c new file mode 100644 index 000000000..65c4979c3 --- /dev/null +++ b/hw/9pfs/xen-9p-backend.c @@ -0,0 +1,510 @@ +/* + * Xen 9p backend + * + * Copyright Aporeto 2017 + * + * Authors: + * Stefano Stabellini + * + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" + +#include "hw/9pfs/9p.h" +#include "hw/xen/xen-legacy-backend.h" +#include "hw/9pfs/xen-9pfs.h" +#include "qapi/error.h" +#include "qemu/config-file.h" +#include "qemu/main-loop.h" +#include "qemu/option.h" +#include "fsdev/qemu-fsdev.h" + +#define VERSIONS "1" +#define MAX_RINGS 8 +#define MAX_RING_ORDER 9 + +typedef struct Xen9pfsRing { + struct Xen9pfsDev *priv; + + int ref; + xenevtchn_handle *evtchndev; + int evtchn; + int local_port; + int ring_order; + struct xen_9pfs_data_intf *intf; + unsigned char *data; + struct xen_9pfs_data ring; + + struct iovec *sg; + QEMUBH *bh; + Coroutine *co; + + /* local copies, so that we can read/write PDU data directly from + * the ring */ + RING_IDX out_cons, out_size, in_cons; + bool inprogress; +} Xen9pfsRing; + +typedef struct Xen9pfsDev { + struct XenLegacyDevice xendev; /* must be first */ + V9fsState state; + char *path; + char *security_model; + char *tag; + char *id; + + int num_rings; + Xen9pfsRing *rings; +} Xen9pfsDev; + +static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev); + +static void xen_9pfs_in_sg(Xen9pfsRing *ring, + struct iovec *in_sg, + int *num, + uint32_t idx, + uint32_t size) +{ + RING_IDX cons, prod, masked_prod, masked_cons; + + cons = ring->intf->in_cons; + prod = ring->intf->in_prod; + xen_rmb(); + masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order)); + masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + + if (masked_prod < masked_cons) { + in_sg[0].iov_base = ring->ring.in + masked_prod; + in_sg[0].iov_len = masked_cons - masked_prod; + *num = 1; + } else { + in_sg[0].iov_base = ring->ring.in + masked_prod; + in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod; + in_sg[1].iov_base = ring->ring.in; + in_sg[1].iov_len = masked_cons; + *num = 2; + } +} + +static void xen_9pfs_out_sg(Xen9pfsRing *ring, + struct iovec *out_sg, + int *num, + uint32_t idx) +{ + RING_IDX cons, prod, masked_prod, masked_cons; + + cons = ring->intf->out_cons; + prod = ring->intf->out_prod; + xen_rmb(); + masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order)); + masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + + if (masked_cons < masked_prod) { + out_sg[0].iov_base = ring->ring.out + masked_cons; + out_sg[0].iov_len = ring->out_size; + *num = 1; + } else { + if (ring->out_size > + (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) { + out_sg[0].iov_base = ring->ring.out + masked_cons; + out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - + masked_cons; + out_sg[1].iov_base = ring->ring.out; + out_sg[1].iov_len = ring->out_size - + (XEN_FLEX_RING_SIZE(ring->ring_order) - + masked_cons); + *num = 2; + } else { + out_sg[0].iov_base = ring->ring.out + masked_cons; + out_sg[0].iov_len = ring->out_size; + *num = 1; + } + } +} + +static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu, + size_t offset, + const char *fmt, + va_list ap) +{ + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + struct iovec in_sg[2]; + int num; + ssize_t ret; + + xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings], + in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512)); + + ret = v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap); + if (ret < 0) { + xen_pv_printf(&xen_9pfs->xendev, 0, + "Failed to encode VirtFS reply type %d\n", + pdu->id + 1); + xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing); + xen_9pfs_disconnect(&xen_9pfs->xendev); + } + return ret; +} + +static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu, + size_t offset, + const char *fmt, + va_list ap) +{ + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + struct iovec out_sg[2]; + int num; + ssize_t ret; + + xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings], + out_sg, &num, pdu->idx); + + ret = v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap); + if (ret < 0) { + xen_pv_printf(&xen_9pfs->xendev, 0, + "Failed to decode VirtFS request type %d\n", pdu->id); + xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing); + xen_9pfs_disconnect(&xen_9pfs->xendev); + } + return ret; +} + +static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu, + struct iovec **piov, + unsigned int *pniov, + size_t size) +{ + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings]; + int num; + + g_free(ring->sg); + + ring->sg = g_new0(struct iovec, 2); + xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx); + *piov = ring->sg; + *pniov = num; +} + +static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu, + struct iovec **piov, + unsigned int *pniov, + size_t size) +{ + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings]; + int num; + size_t buf_size; + + g_free(ring->sg); + + ring->sg = g_new0(struct iovec, 2); + ring->co = qemu_coroutine_self(); + /* make sure other threads see ring->co changes before continuing */ + smp_wmb(); + +again: + xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size); + buf_size = iov_size(ring->sg, num); + if (buf_size < size) { + qemu_coroutine_yield(); + goto again; + } + ring->co = NULL; + /* make sure other threads see ring->co changes before continuing */ + smp_wmb(); + + *piov = ring->sg; + *pniov = num; +} + +static void xen_9pfs_push_and_notify(V9fsPDU *pdu) +{ + RING_IDX prod; + Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state); + Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings]; + + g_free(ring->sg); + ring->sg = NULL; + + ring->intf->out_cons = ring->out_cons; + xen_wmb(); + + prod = ring->intf->in_prod; + xen_rmb(); + ring->intf->in_prod = prod + pdu->size; + xen_wmb(); + + ring->inprogress = false; + xenevtchn_notify(ring->evtchndev, ring->local_port); + + qemu_bh_schedule(ring->bh); +} + +static const V9fsTransport xen_9p_transport = { + .pdu_vmarshal = xen_9pfs_pdu_vmarshal, + .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal, + .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu, + .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu, + .push_and_notify = xen_9pfs_push_and_notify, +}; + +static int xen_9pfs_init(struct XenLegacyDevice *xendev) +{ + return 0; +} + +static int xen_9pfs_receive(Xen9pfsRing *ring) +{ + P9MsgHeader h; + RING_IDX cons, prod, masked_prod, masked_cons, queued; + V9fsPDU *pdu; + + if (ring->inprogress) { + return 0; + } + + cons = ring->intf->out_cons; + prod = ring->intf->out_prod; + xen_rmb(); + + queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + if (queued < sizeof(h)) { + return 0; + } + ring->inprogress = true; + + masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order)); + masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + + xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h), + masked_prod, &masked_cons, + XEN_FLEX_RING_SIZE(ring->ring_order)); + if (queued < le32_to_cpu(h.size_le)) { + return 0; + } + + /* cannot fail, because we only handle one request per ring at a time */ + pdu = pdu_alloc(&ring->priv->state); + ring->out_size = le32_to_cpu(h.size_le); + ring->out_cons = cons + le32_to_cpu(h.size_le); + + pdu_submit(pdu, &h); + + return 0; +} + +static void xen_9pfs_bh(void *opaque) +{ + Xen9pfsRing *ring = opaque; + bool wait; + +again: + wait = ring->co != NULL && qemu_coroutine_entered(ring->co); + /* paired with the smb_wmb barriers in xen_9pfs_init_in_iov_from_pdu */ + smp_rmb(); + if (wait) { + cpu_relax(); + goto again; + } + + if (ring->co != NULL) { + qemu_coroutine_enter_if_inactive(ring->co); + } + xen_9pfs_receive(ring); +} + +static void xen_9pfs_evtchn_event(void *opaque) +{ + Xen9pfsRing *ring = opaque; + evtchn_port_t port; + + port = xenevtchn_pending(ring->evtchndev); + xenevtchn_unmask(ring->evtchndev, port); + + qemu_bh_schedule(ring->bh); +} + +static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev) +{ + Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); + int i; + + for (i = 0; i < xen_9pdev->num_rings; i++) { + if (xen_9pdev->rings[i].evtchndev != NULL) { + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), + NULL, NULL, NULL); + xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, + xen_9pdev->rings[i].local_port); + xen_9pdev->rings[i].evtchndev = NULL; + } + } +} + +static int xen_9pfs_free(struct XenLegacyDevice *xendev) +{ + Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); + int i; + + if (xen_9pdev->rings[0].evtchndev != NULL) { + xen_9pfs_disconnect(xendev); + } + + for (i = 0; i < xen_9pdev->num_rings; i++) { + if (xen_9pdev->rings[i].data != NULL) { + xen_be_unmap_grant_refs(&xen_9pdev->xendev, + xen_9pdev->rings[i].data, + (1 << xen_9pdev->rings[i].ring_order)); + } + if (xen_9pdev->rings[i].intf != NULL) { + xen_be_unmap_grant_refs(&xen_9pdev->xendev, + xen_9pdev->rings[i].intf, + 1); + } + if (xen_9pdev->rings[i].bh != NULL) { + qemu_bh_delete(xen_9pdev->rings[i].bh); + } + } + + g_free(xen_9pdev->id); + g_free(xen_9pdev->tag); + g_free(xen_9pdev->path); + g_free(xen_9pdev->security_model); + g_free(xen_9pdev->rings); + return 0; +} + +static int xen_9pfs_connect(struct XenLegacyDevice *xendev) +{ + Error *err = NULL; + int i; + Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); + V9fsState *s = &xen_9pdev->state; + QemuOpts *fsdev; + + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings", + &xen_9pdev->num_rings) == -1 || + xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) { + return -1; + } + + xen_9pdev->rings = g_new0(Xen9pfsRing, xen_9pdev->num_rings); + for (i = 0; i < xen_9pdev->num_rings; i++) { + char *str; + int ring_order; + + xen_9pdev->rings[i].priv = xen_9pdev; + xen_9pdev->rings[i].evtchn = -1; + xen_9pdev->rings[i].local_port = -1; + + str = g_strdup_printf("ring-ref%u", i); + if (xenstore_read_fe_int(&xen_9pdev->xendev, str, + &xen_9pdev->rings[i].ref) == -1) { + g_free(str); + goto out; + } + g_free(str); + str = g_strdup_printf("event-channel-%u", i); + if (xenstore_read_fe_int(&xen_9pdev->xendev, str, + &xen_9pdev->rings[i].evtchn) == -1) { + g_free(str); + goto out; + } + g_free(str); + + xen_9pdev->rings[i].intf = + xen_be_map_grant_ref(&xen_9pdev->xendev, + xen_9pdev->rings[i].ref, + PROT_READ | PROT_WRITE); + if (!xen_9pdev->rings[i].intf) { + goto out; + } + ring_order = xen_9pdev->rings[i].intf->ring_order; + if (ring_order > MAX_RING_ORDER) { + goto out; + } + xen_9pdev->rings[i].ring_order = ring_order; + xen_9pdev->rings[i].data = + xen_be_map_grant_refs(&xen_9pdev->xendev, + xen_9pdev->rings[i].intf->ref, + (1 << ring_order), + PROT_READ | PROT_WRITE); + if (!xen_9pdev->rings[i].data) { + goto out; + } + xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data; + xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + + XEN_FLEX_RING_SIZE(ring_order); + + xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]); + xen_9pdev->rings[i].out_cons = 0; + xen_9pdev->rings[i].out_size = 0; + xen_9pdev->rings[i].inprogress = false; + + + xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0); + if (xen_9pdev->rings[i].evtchndev == NULL) { + goto out; + } + qemu_set_cloexec(xenevtchn_fd(xen_9pdev->rings[i].evtchndev)); + xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain + (xen_9pdev->rings[i].evtchndev, + xendev->dom, + xen_9pdev->rings[i].evtchn); + if (xen_9pdev->rings[i].local_port == -1) { + xen_pv_printf(xendev, 0, + "xenevtchn_bind_interdomain failed port=%d\n", + xen_9pdev->rings[i].evtchn); + goto out; + } + xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), + xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]); + } + + xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model"); + xen_9pdev->path = xenstore_read_be_str(xendev, "path"); + xen_9pdev->id = s->fsconf.fsdev_id = + g_strdup_printf("xen9p%d", xendev->dev); + xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag"); + fsdev = qemu_opts_create(qemu_find_opts("fsdev"), + s->fsconf.tag, + 1, NULL); + qemu_opt_set(fsdev, "fsdriver", "local", NULL); + qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL); + qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL); + qemu_opts_set_id(fsdev, s->fsconf.fsdev_id); + qemu_fsdev_add(fsdev, &err); + if (err) { + error_report_err(err); + } + v9fs_device_realize_common(s, &xen_9p_transport, NULL); + + return 0; + +out: + xen_9pfs_free(xendev); + return -1; +} + +static void xen_9pfs_alloc(struct XenLegacyDevice *xendev) +{ + xenstore_write_be_str(xendev, "versions", VERSIONS); + xenstore_write_be_int(xendev, "max-rings", MAX_RINGS); + xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER); +} + +struct XenDevOps xen_9pfs_ops = { + .size = sizeof(Xen9pfsDev), + .flags = DEVOPS_FLAG_NEED_GNTDEV, + .alloc = xen_9pfs_alloc, + .init = xen_9pfs_init, + .initialise = xen_9pfs_connect, + .disconnect = xen_9pfs_disconnect, + .free = xen_9pfs_free, +}; diff --git a/hw/9pfs/xen-9pfs.h b/hw/9pfs/xen-9pfs.h new file mode 100644 index 000000000..241e2216a --- /dev/null +++ b/hw/9pfs/xen-9pfs.h @@ -0,0 +1,25 @@ +/* + * Xen 9p backend + * + * Copyright Aporeto 2017 + * + * Authors: + * Stefano Stabellini + * + * This work is licensed under the terms of the GNU GPL version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#ifndef HW_9PFS_XEN_9PFS_H +#define HW_9PFS_XEN_9PFS_H + +#include "hw/xen/interface/io/protocols.h" +#include "hw/xen/interface/io/ring.h" + +/* + * Do not merge into xen-9p-backend.c: clang doesn't allow unused static + * inline functions in c files. + */ +DEFINE_XEN_FLEX_RING_AND_INTF(xen_9pfs); + +#endif diff --git a/hw/Kconfig b/hw/Kconfig new file mode 100644 index 000000000..ad20cce0a --- /dev/null +++ b/hw/Kconfig @@ -0,0 +1,85 @@ +# devices Kconfig +source 9pfs/Kconfig +source acpi/Kconfig +source adc/Kconfig +source audio/Kconfig +source block/Kconfig +source char/Kconfig +source core/Kconfig +source display/Kconfig +source dma/Kconfig +source gpio/Kconfig +source hyperv/Kconfig +source i2c/Kconfig +source ide/Kconfig +source input/Kconfig +source intc/Kconfig +source ipack/Kconfig +source ipmi/Kconfig +source isa/Kconfig +source mem/Kconfig +source misc/Kconfig +source net/Kconfig +source nubus/Kconfig +source nvme/Kconfig +source nvram/Kconfig +source pci-bridge/Kconfig +source pci-host/Kconfig +source pcmcia/Kconfig +source pci/Kconfig +source rdma/Kconfig +source remote/Kconfig +source rtc/Kconfig +source scsi/Kconfig +source sd/Kconfig +source sensor/Kconfig +source smbios/Kconfig +source ssi/Kconfig +source timer/Kconfig +source tpm/Kconfig +source usb/Kconfig +source virtio/Kconfig +source vfio/Kconfig +source watchdog/Kconfig + +# arch Kconfig +source arm/Kconfig +source alpha/Kconfig +source avr/Kconfig +source cris/Kconfig +source hppa/Kconfig +source i386/Kconfig +source m68k/Kconfig +source microblaze/Kconfig +source mips/Kconfig +source nios2/Kconfig +source openrisc/Kconfig +source ppc/Kconfig +source riscv/Kconfig +source rx/Kconfig +source s390x/Kconfig +source sh4/Kconfig +source sparc/Kconfig +source sparc64/Kconfig +source tricore/Kconfig +source xtensa/Kconfig + +# Symbols used by multiple targets +config TEST_DEVICES + bool + +config XILINX + bool + select PTIMER # for hw/timer/xilinx_timer.c + +config XILINX_AXI + bool + select PTIMER # for hw/dma/xilinx_axidma.c + +config XLNX_ZYNQMP + bool + select REGISTER + select CAN_BUS + select PTIMER + select XLNX_BBRAM + select XLNX_EFUSE_ZYNQMP diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig new file mode 100644 index 000000000..622b0b50b --- /dev/null +++ b/hw/acpi/Kconfig @@ -0,0 +1,62 @@ +config ACPI + bool + +config ACPI_X86 + bool + select ACPI + select ACPI_NVDIMM + select ACPI_CPU_HOTPLUG + select ACPI_MEMORY_HOTPLUG + select ACPI_HMAT + select ACPI_PIIX4 + select ACPI_PCIHP + +config ACPI_X86_ICH + bool + select ACPI_X86 + +config ACPI_CPU_HOTPLUG + bool + +config ACPI_MEMORY_HOTPLUG + bool + select MEM_DEVICE + +config ACPI_NVDIMM + bool + depends on ACPI + +config ACPI_PIIX4 + bool + depends on ACPI + +config ACPI_PCIHP + bool + depends on ACPI + +config ACPI_HMAT + bool + depends on ACPI + +config ACPI_APEI + bool + depends on ACPI + +config ACPI_PCI + bool + depends on ACPI && PCI + +config ACPI_VMGENID + bool + default y + depends on PC + +config ACPI_VIOT + bool + depends on ACPI + +config ACPI_HW_REDUCED + bool + select ACPI + select ACPI_MEMORY_HOTPLUG + select ACPI_NVDIMM diff --git a/hw/acpi/acpi-cpu-hotplug-stub.c b/hw/acpi/acpi-cpu-hotplug-stub.c new file mode 100644 index 000000000..3fc4b14c2 --- /dev/null +++ b/hw/acpi/acpi-cpu-hotplug-stub.c @@ -0,0 +1,50 @@ +#include "qemu/osdep.h" +#include "hw/acpi/cpu_hotplug.h" +#include "migration/vmstate.h" + + +/* Following stubs are all related to ACPI cpu hotplug */ +const VMStateDescription vmstate_cpu_hotplug; + +void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu, + CPUHotplugState *cpuhp_state, + uint16_t io_port) +{ + return; +} + +void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner, + AcpiCpuHotplug *gpe_cpu, uint16_t base) +{ + return; +} + +void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list) +{ + return; +} + +void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev, + CPUHotplugState *cpu_st, DeviceState *dev, Error **errp) +{ + return; +} + +void legacy_acpi_cpu_plug_cb(HotplugHandler *hotplug_dev, + AcpiCpuHotplug *g, DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev, + CPUHotplugState *cpu_st, + DeviceState *dev, Error **errp) +{ + return; +} diff --git a/hw/acpi/acpi-mem-hotplug-stub.c b/hw/acpi/acpi-mem-hotplug-stub.c new file mode 100644 index 000000000..73a076a26 --- /dev/null +++ b/hw/acpi/acpi-mem-hotplug-stub.c @@ -0,0 +1,35 @@ +#include "qemu/osdep.h" +#include "hw/acpi/memory_hotplug.h" +#include "migration/vmstate.h" + +const VMStateDescription vmstate_memory_hotplug; + +void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner, + MemHotplugState *state, hwaddr io_base) +{ + return; +} + +void acpi_memory_ospm_status(MemHotplugState *mem_st, ACPIOSTInfoList ***list) +{ + return; +} + +void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_memory_unplug_cb(MemHotplugState *mem_st, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_memory_unplug_request_cb(HotplugHandler *hotplug_dev, + MemHotplugState *mem_st, + DeviceState *dev, Error **errp) +{ + return; +} diff --git a/hw/acpi/acpi-nvdimm-stub.c b/hw/acpi/acpi-nvdimm-stub.c new file mode 100644 index 000000000..8baff9be6 --- /dev/null +++ b/hw/acpi/acpi-nvdimm-stub.c @@ -0,0 +1,8 @@ +#include "qemu/osdep.h" +#include "hw/mem/nvdimm.h" +#include "hw/hotplug.h" + +void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + return; +} diff --git a/hw/acpi/acpi-pci-hotplug-stub.c b/hw/acpi/acpi-pci-hotplug-stub.c new file mode 100644 index 000000000..734e4c598 --- /dev/null +++ b/hw/acpi/acpi-pci-hotplug-stub.c @@ -0,0 +1,47 @@ +#include "qemu/osdep.h" +#include "hw/acpi/pcihp.h" +#include "migration/vmstate.h" + +const VMStateDescription vmstate_acpi_pcihp_pci_status; + +void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus, + MemoryRegion *address_space_io, bool bridges_enabled, + uint16_t io_base) +{ + return; +} + +void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_pcihp_device_unplug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, + DeviceState *dev, Error **errp) +{ + return; +} + +void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, + AcpiPciHpState *s, DeviceState *dev, + Error **errp) +{ + return; +} + +void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off) +{ + return; +} + +bool vmstate_acpi_pcihp_use_acpi_index(void *opaque, int version_id) +{ + return false; +} diff --git a/hw/acpi/acpi-stub.c b/hw/acpi/acpi-stub.c new file mode 100644 index 000000000..4c9d081ed --- /dev/null +++ b/hw/acpi/acpi-stub.c @@ -0,0 +1,29 @@ +/* + * ACPI stubs for platforms that don't support ACPI. + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2016 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qmp/qerror.h" +#include "hw/acpi/acpi.h" + +void acpi_table_add(const QemuOpts *opts, Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); +} diff --git a/hw/acpi/acpi-x86-stub.c b/hw/acpi/acpi-x86-stub.c new file mode 100644 index 000000000..3df1e090f --- /dev/null +++ b/hw/acpi/acpi-x86-stub.c @@ -0,0 +1,14 @@ +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/i386/acpi-build.h" + +void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled) +{ +} + +Object *acpi_get_i386_pci_host(void) +{ + return NULL; +} diff --git a/hw/acpi/acpi_interface.c b/hw/acpi/acpi_interface.c new file mode 100644 index 000000000..6583917b8 --- /dev/null +++ b/hw/acpi/acpi_interface.c @@ -0,0 +1,25 @@ +#include "qemu/osdep.h" +#include "hw/acpi/acpi_dev_interface.h" +#include "qemu/module.h" + +void acpi_send_event(DeviceState *dev, AcpiEventStatusBits event) +{ + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(dev); + if (adevc->send_event) { + AcpiDeviceIf *adev = ACPI_DEVICE_IF(dev); + adevc->send_event(adev, event); + } +} + +static void register_types(void) +{ + static const TypeInfo acpi_dev_if_info = { + .name = TYPE_ACPI_DEVICE_IF, + .parent = TYPE_INTERFACE, + .class_size = sizeof(AcpiDeviceIfClass), + }; + + type_register_static(&acpi_dev_if_info); +} + +type_init(register_types) diff --git a/hw/acpi/aml-build-stub.c b/hw/acpi/aml-build-stub.c new file mode 100644 index 000000000..8d8ad1a31 --- /dev/null +++ b/hw/acpi/aml-build-stub.c @@ -0,0 +1,93 @@ +/* + * ACPI aml builder stubs for platforms that don't support ACPI. + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2016 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" + +void aml_append(Aml *parent_ctx, Aml *child) +{ +} + +Aml *aml_resource_template(void) +{ + return NULL; +} + +Aml *aml_device(const char *name_format, ...) +{ + return NULL; +} + +Aml *aml_eisaid(const char *str) +{ + return NULL; +} + +Aml *aml_name_decl(const char *name, Aml *val) +{ + return NULL; +} + +Aml *aml_io(AmlIODecode dec, uint16_t min_base, uint16_t max_base, + uint8_t aln, uint8_t len) +{ + return NULL; +} + +Aml *aml_irq_no_flags(uint8_t irq) +{ + return NULL; +} + +Aml *aml_interrupt(AmlConsumerAndProducer con_and_pro, + AmlLevelAndEdge level_and_edge, + AmlActiveHighAndLow high_and_low, AmlShared shared, + uint32_t *irq_list, uint8_t irq_count) +{ + return NULL; +} + +Aml *aml_memory32_fixed(uint32_t addr, uint32_t size, + AmlReadAndWrite read_and_write) +{ + return NULL; +} + +Aml *aml_int(const uint64_t val) +{ + return NULL; +} + +Aml *aml_package(uint8_t num_elements) +{ + return NULL; +} + +Aml *aml_dma(AmlDmaType typ, AmlDmaBusMaster bm, AmlTransferSize sz, + uint8_t channel) +{ + return NULL; +} + +Aml *aml_buffer(int buffer_size, uint8_t *byte_list) +{ + return NULL; +} diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c new file mode 100644 index 000000000..b3b3310df --- /dev/null +++ b/hw/acpi/aml-build.c @@ -0,0 +1,2439 @@ +/* Support for generating ACPI tables and passing them to Guests + * + * Copyright (C) 2015 Red Hat Inc + * + * Author: Michael S. Tsirkin + * Author: Igor Mammedov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include +#include "hw/acpi/aml-build.h" +#include "qemu/bswap.h" +#include "qemu/bitops.h" +#include "sysemu/numa.h" +#include "hw/boards.h" +#include "hw/acpi/tpm.h" +#include "hw/pci/pci_host.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_bridge.h" +#include "qemu/cutils.h" + +static GArray *build_alloc_array(void) +{ + return g_array_new(false, true /* clear */, 1); +} + +static void build_free_array(GArray *array) +{ + g_array_free(array, true); +} + +static void build_prepend_byte(GArray *array, uint8_t val) +{ + g_array_prepend_val(array, val); +} + +static void build_append_byte(GArray *array, uint8_t val) +{ + g_array_append_val(array, val); +} + +static void build_append_padded_str(GArray *array, const char *str, + size_t maxlen, char pad) +{ + size_t i; + size_t len = strlen(str); + + g_assert(len <= maxlen); + g_array_append_vals(array, str, len); + for (i = maxlen - len; i > 0; i--) { + g_array_append_val(array, pad); + } +} + +static void build_append_array(GArray *array, GArray *val) +{ + g_array_append_vals(array, val->data, val->len); +} + +#define ACPI_NAMESEG_LEN 4 + +void crs_range_insert(GPtrArray *ranges, uint64_t base, uint64_t limit) +{ + CrsRangeEntry *entry; + + entry = g_malloc(sizeof(*entry)); + entry->base = base; + entry->limit = limit; + + g_ptr_array_add(ranges, entry); +} + +static void crs_range_free(gpointer data) +{ + CrsRangeEntry *entry = (CrsRangeEntry *)data; + g_free(entry); +} + +void crs_range_set_init(CrsRangeSet *range_set) +{ + range_set->io_ranges = g_ptr_array_new_with_free_func(crs_range_free); + range_set->mem_ranges = g_ptr_array_new_with_free_func(crs_range_free); + range_set->mem_64bit_ranges = + g_ptr_array_new_with_free_func(crs_range_free); +} + +void crs_range_set_free(CrsRangeSet *range_set) +{ + g_ptr_array_free(range_set->io_ranges, true); + g_ptr_array_free(range_set->mem_ranges, true); + g_ptr_array_free(range_set->mem_64bit_ranges, true); +} + +static gint crs_range_compare(gconstpointer a, gconstpointer b) +{ + CrsRangeEntry *entry_a = *(CrsRangeEntry **)a; + CrsRangeEntry *entry_b = *(CrsRangeEntry **)b; + + if (entry_a->base < entry_b->base) { + return -1; + } else if (entry_a->base > entry_b->base) { + return 1; + } else { + return 0; + } +} + +/* + * crs_replace_with_free_ranges - given the 'used' ranges within [start - end] + * interval, computes the 'free' ranges from the same interval. + * Example: If the input array is { [a1 - a2],[b1 - b2] }, the function + * will return { [base - a1], [a2 - b1], [b2 - limit] }. + */ +void crs_replace_with_free_ranges(GPtrArray *ranges, + uint64_t start, uint64_t end) +{ + GPtrArray *free_ranges = g_ptr_array_new(); + uint64_t free_base = start; + int i; + + g_ptr_array_sort(ranges, crs_range_compare); + for (i = 0; i < ranges->len; i++) { + CrsRangeEntry *used = g_ptr_array_index(ranges, i); + + if (free_base < used->base) { + crs_range_insert(free_ranges, free_base, used->base - 1); + } + + free_base = used->limit + 1; + } + + if (free_base < end) { + crs_range_insert(free_ranges, free_base, end); + } + + g_ptr_array_set_size(ranges, 0); + for (i = 0; i < free_ranges->len; i++) { + g_ptr_array_add(ranges, g_ptr_array_index(free_ranges, i)); + } + + g_ptr_array_free(free_ranges, true); +} + +/* + * crs_range_merge - merges adjacent ranges in the given array. + * Array elements are deleted and replaced with the merged ranges. + */ +static void crs_range_merge(GPtrArray *range) +{ + GPtrArray *tmp = g_ptr_array_new_with_free_func(crs_range_free); + CrsRangeEntry *entry; + uint64_t range_base, range_limit; + int i; + + if (!range->len) { + return; + } + + g_ptr_array_sort(range, crs_range_compare); + + entry = g_ptr_array_index(range, 0); + range_base = entry->base; + range_limit = entry->limit; + for (i = 1; i < range->len; i++) { + entry = g_ptr_array_index(range, i); + if (entry->base - 1 == range_limit) { + range_limit = entry->limit; + } else { + crs_range_insert(tmp, range_base, range_limit); + range_base = entry->base; + range_limit = entry->limit; + } + } + crs_range_insert(tmp, range_base, range_limit); + + g_ptr_array_set_size(range, 0); + for (i = 0; i < tmp->len; i++) { + entry = g_ptr_array_index(tmp, i); + crs_range_insert(range, entry->base, entry->limit); + } + g_ptr_array_free(tmp, true); +} + +static void +build_append_nameseg(GArray *array, const char *seg) +{ + int len; + + len = strlen(seg); + assert(len <= ACPI_NAMESEG_LEN); + + g_array_append_vals(array, seg, len); + /* Pad up to ACPI_NAMESEG_LEN characters if necessary. */ + g_array_append_vals(array, "____", ACPI_NAMESEG_LEN - len); +} + +static void GCC_FMT_ATTR(2, 0) +build_append_namestringv(GArray *array, const char *format, va_list ap) +{ + char *s; + char **segs; + char **segs_iter; + int seg_count = 0; + + s = g_strdup_vprintf(format, ap); + segs = g_strsplit(s, ".", 0); + g_free(s); + + /* count segments */ + segs_iter = segs; + while (*segs_iter) { + ++segs_iter; + ++seg_count; + } + /* + * ACPI 5.0 spec: 20.2.2 Name Objects Encoding: + * "SegCount can be from 1 to 255" + */ + assert(seg_count > 0 && seg_count <= 255); + + /* handle RootPath || PrefixPath */ + s = *segs; + while (*s == '\\' || *s == '^') { + build_append_byte(array, *s); + ++s; + } + + switch (seg_count) { + case 1: + if (!*s) { + build_append_byte(array, 0x00); /* NullName */ + } else { + build_append_nameseg(array, s); + } + break; + + case 2: + build_append_byte(array, 0x2E); /* DualNamePrefix */ + build_append_nameseg(array, s); + build_append_nameseg(array, segs[1]); + break; + default: + build_append_byte(array, 0x2F); /* MultiNamePrefix */ + build_append_byte(array, seg_count); + + /* handle the 1st segment manually due to prefix/root path */ + build_append_nameseg(array, s); + + /* add the rest of segments */ + segs_iter = segs + 1; + while (*segs_iter) { + build_append_nameseg(array, *segs_iter); + ++segs_iter; + } + break; + } + g_strfreev(segs); +} + +GCC_FMT_ATTR(2, 3) +static void build_append_namestring(GArray *array, const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + build_append_namestringv(array, format, ap); + va_end(ap); +} + +/* 5.4 Definition Block Encoding */ +enum { + PACKAGE_LENGTH_1BYTE_SHIFT = 6, /* Up to 63 - use extra 2 bits. */ + PACKAGE_LENGTH_2BYTE_SHIFT = 4, + PACKAGE_LENGTH_3BYTE_SHIFT = 12, + PACKAGE_LENGTH_4BYTE_SHIFT = 20, +}; + +static void +build_prepend_package_length(GArray *package, unsigned length, bool incl_self) +{ + uint8_t byte; + unsigned length_bytes; + + if (length + 1 < (1 << PACKAGE_LENGTH_1BYTE_SHIFT)) { + length_bytes = 1; + } else if (length + 2 < (1 << PACKAGE_LENGTH_3BYTE_SHIFT)) { + length_bytes = 2; + } else if (length + 3 < (1 << PACKAGE_LENGTH_4BYTE_SHIFT)) { + length_bytes = 3; + } else { + length_bytes = 4; + } + + /* + * NamedField uses PkgLength encoding but it doesn't include length + * of PkgLength itself. + */ + if (incl_self) { + /* + * PkgLength is the length of the inclusive length of the data + * and PkgLength's length itself when used for terms with + * explitit length. + */ + length += length_bytes; + } + + switch (length_bytes) { + case 1: + byte = length; + build_prepend_byte(package, byte); + return; + case 4: + byte = length >> PACKAGE_LENGTH_4BYTE_SHIFT; + build_prepend_byte(package, byte); + length &= (1 << PACKAGE_LENGTH_4BYTE_SHIFT) - 1; + /* fall through */ + case 3: + byte = length >> PACKAGE_LENGTH_3BYTE_SHIFT; + build_prepend_byte(package, byte); + length &= (1 << PACKAGE_LENGTH_3BYTE_SHIFT) - 1; + /* fall through */ + case 2: + byte = length >> PACKAGE_LENGTH_2BYTE_SHIFT; + build_prepend_byte(package, byte); + length &= (1 << PACKAGE_LENGTH_2BYTE_SHIFT) - 1; + /* fall through */ + } + /* + * Most significant two bits of byte zero indicate how many following bytes + * are in PkgLength encoding. + */ + byte = ((length_bytes - 1) << PACKAGE_LENGTH_1BYTE_SHIFT) | length; + build_prepend_byte(package, byte); +} + +static void +build_append_pkg_length(GArray *array, unsigned length, bool incl_self) +{ + GArray *tmp = build_alloc_array(); + + build_prepend_package_length(tmp, length, incl_self); + build_append_array(array, tmp); + build_free_array(tmp); +} + +static void build_package(GArray *package, uint8_t op) +{ + build_prepend_package_length(package, package->len, true); + build_prepend_byte(package, op); +} + +static void build_extop_package(GArray *package, uint8_t op) +{ + build_package(package, op); + build_prepend_byte(package, 0x5B); /* ExtOpPrefix */ +} + +void build_append_int_noprefix(GArray *table, uint64_t value, int size) +{ + int i; + + for (i = 0; i < size; ++i) { + build_append_byte(table, value & 0xFF); + value = value >> 8; + } +} + +static void build_append_int(GArray *table, uint64_t value) +{ + if (value == 0x00) { + build_append_byte(table, 0x00); /* ZeroOp */ + } else if (value == 0x01) { + build_append_byte(table, 0x01); /* OneOp */ + } else if (value <= 0xFF) { + build_append_byte(table, 0x0A); /* BytePrefix */ + build_append_int_noprefix(table, value, 1); + } else if (value <= 0xFFFF) { + build_append_byte(table, 0x0B); /* WordPrefix */ + build_append_int_noprefix(table, value, 2); + } else if (value <= 0xFFFFFFFF) { + build_append_byte(table, 0x0C); /* DWordPrefix */ + build_append_int_noprefix(table, value, 4); + } else { + build_append_byte(table, 0x0E); /* QWordPrefix */ + build_append_int_noprefix(table, value, 8); + } +} + +/* Generic Address Structure (GAS) + * ACPI 2.0/3.0: 5.2.3.1 Generic Address Structure + * 2.0 compat note: + * @access_width must be 0, see ACPI 2.0:Table 5-1 + */ +void build_append_gas(GArray *table, AmlAddressSpace as, + uint8_t bit_width, uint8_t bit_offset, + uint8_t access_width, uint64_t address) +{ + build_append_int_noprefix(table, as, 1); + build_append_int_noprefix(table, bit_width, 1); + build_append_int_noprefix(table, bit_offset, 1); + build_append_int_noprefix(table, access_width, 1); + build_append_int_noprefix(table, address, 8); +} + +/* + * Build NAME(XXXX, 0x00000000) where 0x00000000 is encoded as a dword, + * and return the offset to 0x00000000 for runtime patching. + * + * Warning: runtime patching is best avoided. Only use this as + * a replacement for DataTableRegion (for guests that don't + * support it). + */ +int +build_append_named_dword(GArray *array, const char *name_format, ...) +{ + int offset; + va_list ap; + + build_append_byte(array, 0x08); /* NameOp */ + va_start(ap, name_format); + build_append_namestringv(array, name_format, ap); + va_end(ap); + + build_append_byte(array, 0x0C); /* DWordPrefix */ + + offset = array->len; + build_append_int_noprefix(array, 0x00000000, 4); + assert(array->len == offset + 4); + + return offset; +} + +static GPtrArray *alloc_list; + +static Aml *aml_alloc(void) +{ + Aml *var = g_new0(typeof(*var), 1); + + g_ptr_array_add(alloc_list, var); + var->block_flags = AML_NO_OPCODE; + var->buf = build_alloc_array(); + return var; +} + +static Aml *aml_opcode(uint8_t op) +{ + Aml *var = aml_alloc(); + + var->op = op; + var->block_flags = AML_OPCODE; + return var; +} + +static Aml *aml_bundle(uint8_t op, AmlBlockFlags flags) +{ + Aml *var = aml_alloc(); + + var->op = op; + var->block_flags = flags; + return var; +} + +static void aml_free(gpointer data, gpointer user_data) +{ + Aml *var = data; + build_free_array(var->buf); + g_free(var); +} + +Aml *init_aml_allocator(void) +{ + assert(!alloc_list); + alloc_list = g_ptr_array_new(); + return aml_alloc(); +} + +void free_aml_allocator(void) +{ + g_ptr_array_foreach(alloc_list, aml_free, NULL); + g_ptr_array_free(alloc_list, true); + alloc_list = 0; +} + +/* pack data with DefBuffer encoding */ +static void build_buffer(GArray *array, uint8_t op) +{ + GArray *data = build_alloc_array(); + + build_append_int(data, array->len); + g_array_prepend_vals(array, data->data, data->len); + build_free_array(data); + build_package(array, op); +} + +void aml_append(Aml *parent_ctx, Aml *child) +{ + GArray *buf = build_alloc_array(); + build_append_array(buf, child->buf); + + switch (child->block_flags) { + case AML_OPCODE: + build_append_byte(parent_ctx->buf, child->op); + break; + case AML_EXT_PACKAGE: + build_extop_package(buf, child->op); + break; + case AML_PACKAGE: + build_package(buf, child->op); + break; + case AML_RES_TEMPLATE: + build_append_byte(buf, 0x79); /* EndTag */ + /* + * checksum operations are treated as succeeded if checksum + * field is zero. [ACPI Spec 1.0b, 6.4.2.8 End Tag] + */ + build_append_byte(buf, 0); + /* fall through, to pack resources in buffer */ + case AML_BUFFER: + build_buffer(buf, child->op); + break; + case AML_NO_OPCODE: + break; + default: + assert(0); + break; + } + build_append_array(parent_ctx->buf, buf); + build_free_array(buf); +} + +/* ACPI 1.0b: 16.2.5.1 Namespace Modifier Objects Encoding: DefScope */ +Aml *aml_scope(const char *name_format, ...) +{ + va_list ap; + Aml *var = aml_bundle(0x10 /* ScopeOp */, AML_PACKAGE); + va_start(ap, name_format); + build_append_namestringv(var->buf, name_format, ap); + va_end(ap); + return var; +} + +/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefReturn */ +Aml *aml_return(Aml *val) +{ + Aml *var = aml_opcode(0xA4 /* ReturnOp */); + aml_append(var, val); + return var; +} + +/* ACPI 1.0b: 16.2.6.3 Debug Objects Encoding: DebugObj */ +Aml *aml_debug(void) +{ + Aml *var = aml_alloc(); + build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */ + build_append_byte(var->buf, 0x31); /* DebugOp */ + return var; +} + +/* + * ACPI 1.0b: 16.2.3 Data Objects Encoding: + * encodes: ByteConst, WordConst, DWordConst, QWordConst, ZeroOp, OneOp + */ +Aml *aml_int(const uint64_t val) +{ + Aml *var = aml_alloc(); + build_append_int(var->buf, val); + return var; +} + +/* + * helper to construct NameString, which returns Aml object + * for using with aml_append or other aml_* terms + */ +Aml *aml_name(const char *name_format, ...) +{ + va_list ap; + Aml *var = aml_alloc(); + va_start(ap, name_format); + build_append_namestringv(var->buf, name_format, ap); + va_end(ap); + return var; +} + +/* ACPI 1.0b: 16.2.5.1 Namespace Modifier Objects Encoding: DefName */ +Aml *aml_name_decl(const char *name, Aml *val) +{ + Aml *var = aml_opcode(0x08 /* NameOp */); + build_append_namestring(var->buf, "%s", name); + aml_append(var, val); + return var; +} + +/* ACPI 1.0b: 16.2.6.1 Arg Objects Encoding */ +Aml *aml_arg(int pos) +{ + uint8_t op = 0x68 /* ARG0 op */ + pos; + + assert(pos <= 6); + return aml_opcode(op); +} + +/* ACPI 2.0a: 17.2.4.4 Type 2 Opcodes Encoding: DefToInteger */ +Aml *aml_to_integer(Aml *arg) +{ + Aml *var = aml_opcode(0x99 /* ToIntegerOp */); + aml_append(var, arg); + build_append_byte(var->buf, 0x00 /* NullNameOp */); + return var; +} + +/* ACPI 2.0a: 17.2.4.4 Type 2 Opcodes Encoding: DefToHexString */ +Aml *aml_to_hexstring(Aml *src, Aml *dst) +{ + Aml *var = aml_opcode(0x98 /* ToHexStringOp */); + aml_append(var, src); + if (dst) { + aml_append(var, dst); + } else { + build_append_byte(var->buf, 0x00 /* NullNameOp */); + } + return var; +} + +/* ACPI 2.0a: 17.2.4.4 Type 2 Opcodes Encoding: DefToBuffer */ +Aml *aml_to_buffer(Aml *src, Aml *dst) +{ + Aml *var = aml_opcode(0x96 /* ToBufferOp */); + aml_append(var, src); + if (dst) { + aml_append(var, dst); + } else { + build_append_byte(var->buf, 0x00 /* NullNameOp */); + } + return var; +} + +/* ACPI 2.0a: 17.2.4.4 Type 2 Opcodes Encoding: DefToDecimalString */ +Aml *aml_to_decimalstring(Aml *src, Aml *dst) +{ + Aml *var = aml_opcode(0x97 /* ToDecimalStringOp */); + aml_append(var, src); + if (dst) { + aml_append(var, dst); + } else { + build_append_byte(var->buf, 0x00 /* NullNameOp */); + } + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefStore */ +Aml *aml_store(Aml *val, Aml *target) +{ + Aml *var = aml_opcode(0x70 /* StoreOp */); + aml_append(var, val); + aml_append(var, target); + return var; +} + +/** + * build_opcode_2arg_dst: + * @op: 1-byte opcode + * @arg1: 1st operand + * @arg2: 2nd operand + * @dst: optional target to store to, set to NULL if it's not required + * + * An internal helper to compose AML terms that have + * "Op Operand Operand Target" + * pattern. + * + * Returns: The newly allocated and composed according to patter Aml object. + */ +static Aml * +build_opcode_2arg_dst(uint8_t op, Aml *arg1, Aml *arg2, Aml *dst) +{ + Aml *var = aml_opcode(op); + aml_append(var, arg1); + aml_append(var, arg2); + if (dst) { + aml_append(var, dst); + } else { + build_append_byte(var->buf, 0x00 /* NullNameOp */); + } + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefAnd */ +Aml *aml_and(Aml *arg1, Aml *arg2, Aml *dst) +{ + return build_opcode_2arg_dst(0x7B /* AndOp */, arg1, arg2, dst); +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefOr */ +Aml *aml_or(Aml *arg1, Aml *arg2, Aml *dst) +{ + return build_opcode_2arg_dst(0x7D /* OrOp */, arg1, arg2, dst); +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLAnd */ +Aml *aml_land(Aml *arg1, Aml *arg2) +{ + Aml *var = aml_opcode(0x90 /* LAndOp */); + aml_append(var, arg1); + aml_append(var, arg2); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLOr */ +Aml *aml_lor(Aml *arg1, Aml *arg2) +{ + Aml *var = aml_opcode(0x91 /* LOrOp */); + aml_append(var, arg1); + aml_append(var, arg2); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefShiftLeft */ +Aml *aml_shiftleft(Aml *arg1, Aml *count) +{ + return build_opcode_2arg_dst(0x79 /* ShiftLeftOp */, arg1, count, NULL); +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefShiftRight */ +Aml *aml_shiftright(Aml *arg1, Aml *count, Aml *dst) +{ + return build_opcode_2arg_dst(0x7A /* ShiftRightOp */, arg1, count, dst); +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLLess */ +Aml *aml_lless(Aml *arg1, Aml *arg2) +{ + Aml *var = aml_opcode(0x95 /* LLessOp */); + aml_append(var, arg1); + aml_append(var, arg2); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefAdd */ +Aml *aml_add(Aml *arg1, Aml *arg2, Aml *dst) +{ + return build_opcode_2arg_dst(0x72 /* AddOp */, arg1, arg2, dst); +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefSubtract */ +Aml *aml_subtract(Aml *arg1, Aml *arg2, Aml *dst) +{ + return build_opcode_2arg_dst(0x74 /* SubtractOp */, arg1, arg2, dst); +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefIncrement */ +Aml *aml_increment(Aml *arg) +{ + Aml *var = aml_opcode(0x75 /* IncrementOp */); + aml_append(var, arg); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefDecrement */ +Aml *aml_decrement(Aml *arg) +{ + Aml *var = aml_opcode(0x76 /* DecrementOp */); + aml_append(var, arg); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefIndex */ +Aml *aml_index(Aml *arg1, Aml *idx) +{ + return build_opcode_2arg_dst(0x88 /* IndexOp */, arg1, idx, NULL); +} + +/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefNotify */ +Aml *aml_notify(Aml *arg1, Aml *arg2) +{ + Aml *var = aml_opcode(0x86 /* NotifyOp */); + aml_append(var, arg1); + aml_append(var, arg2); + return var; +} + +/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefBreak */ +Aml *aml_break(void) +{ + Aml *var = aml_opcode(0xa5 /* BreakOp */); + return var; +} + +/* helper to call method without argument */ +Aml *aml_call0(const char *method) +{ + Aml *var = aml_alloc(); + build_append_namestring(var->buf, "%s", method); + return var; +} + +/* helper to call method with 1 argument */ +Aml *aml_call1(const char *method, Aml *arg1) +{ + Aml *var = aml_alloc(); + build_append_namestring(var->buf, "%s", method); + aml_append(var, arg1); + return var; +} + +/* helper to call method with 2 arguments */ +Aml *aml_call2(const char *method, Aml *arg1, Aml *arg2) +{ + Aml *var = aml_alloc(); + build_append_namestring(var->buf, "%s", method); + aml_append(var, arg1); + aml_append(var, arg2); + return var; +} + +/* helper to call method with 3 arguments */ +Aml *aml_call3(const char *method, Aml *arg1, Aml *arg2, Aml *arg3) +{ + Aml *var = aml_alloc(); + build_append_namestring(var->buf, "%s", method); + aml_append(var, arg1); + aml_append(var, arg2); + aml_append(var, arg3); + return var; +} + +/* helper to call method with 4 arguments */ +Aml *aml_call4(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4) +{ + Aml *var = aml_alloc(); + build_append_namestring(var->buf, "%s", method); + aml_append(var, arg1); + aml_append(var, arg2); + aml_append(var, arg3); + aml_append(var, arg4); + return var; +} + +/* helper to call method with 5 arguments */ +Aml *aml_call5(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4, + Aml *arg5) +{ + Aml *var = aml_alloc(); + build_append_namestring(var->buf, "%s", method); + aml_append(var, arg1); + aml_append(var, arg2); + aml_append(var, arg3); + aml_append(var, arg4); + aml_append(var, arg5); + return var; +} + +/* helper to call method with 5 arguments */ +Aml *aml_call6(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4, + Aml *arg5, Aml *arg6) +{ + Aml *var = aml_alloc(); + build_append_namestring(var->buf, "%s", method); + aml_append(var, arg1); + aml_append(var, arg2); + aml_append(var, arg3); + aml_append(var, arg4); + aml_append(var, arg5); + aml_append(var, arg6); + return var; +} + +/* + * ACPI 5.0: 6.4.3.8.1 GPIO Connection Descriptor + * Type 1, Large Item Name 0xC + */ + +static Aml *aml_gpio_connection(AmlGpioConnectionType type, + AmlConsumerAndProducer con_and_pro, + uint8_t flags, AmlPinConfig pin_config, + uint16_t output_drive, + uint16_t debounce_timeout, + const uint32_t pin_list[], uint32_t pin_count, + const char *resource_source_name, + const uint8_t *vendor_data, + uint16_t vendor_data_len) +{ + Aml *var = aml_alloc(); + const uint16_t min_desc_len = 0x16; + uint16_t resource_source_name_len, length; + uint16_t pin_table_offset, resource_source_name_offset, vendor_data_offset; + uint32_t i; + + assert(resource_source_name); + resource_source_name_len = strlen(resource_source_name) + 1; + length = min_desc_len + resource_source_name_len + vendor_data_len; + pin_table_offset = min_desc_len + 1; + resource_source_name_offset = pin_table_offset + pin_count * 2; + vendor_data_offset = resource_source_name_offset + resource_source_name_len; + + build_append_byte(var->buf, 0x8C); /* GPIO Connection Descriptor */ + build_append_int_noprefix(var->buf, length, 2); /* Length */ + build_append_byte(var->buf, 1); /* Revision ID */ + build_append_byte(var->buf, type); /* GPIO Connection Type */ + /* General Flags (2 bytes) */ + build_append_int_noprefix(var->buf, con_and_pro, 2); + /* Interrupt and IO Flags (2 bytes) */ + build_append_int_noprefix(var->buf, flags, 2); + /* Pin Configuration 0 = Default 1 = Pull-up 2 = Pull-down 3 = No Pull */ + build_append_byte(var->buf, pin_config); + /* Output Drive Strength (2 bytes) */ + build_append_int_noprefix(var->buf, output_drive, 2); + /* Debounce Timeout (2 bytes) */ + build_append_int_noprefix(var->buf, debounce_timeout, 2); + /* Pin Table Offset (2 bytes) */ + build_append_int_noprefix(var->buf, pin_table_offset, 2); + build_append_byte(var->buf, 0); /* Resource Source Index */ + /* Resource Source Name Offset (2 bytes) */ + build_append_int_noprefix(var->buf, resource_source_name_offset, 2); + /* Vendor Data Offset (2 bytes) */ + build_append_int_noprefix(var->buf, vendor_data_offset, 2); + /* Vendor Data Length (2 bytes) */ + build_append_int_noprefix(var->buf, vendor_data_len, 2); + /* Pin Number (2n bytes)*/ + for (i = 0; i < pin_count; i++) { + build_append_int_noprefix(var->buf, pin_list[i], 2); + } + + /* Resource Source Name */ + build_append_namestring(var->buf, "%s", resource_source_name); + build_append_byte(var->buf, '\0'); + + /* Vendor-defined Data */ + if (vendor_data != NULL) { + g_array_append_vals(var->buf, vendor_data, vendor_data_len); + } + + return var; +} + +/* + * ACPI 5.0: 19.5.53 + * GpioInt(GPIO Interrupt Connection Resource Descriptor Macro) + */ +Aml *aml_gpio_int(AmlConsumerAndProducer con_and_pro, + AmlLevelAndEdge edge_level, + AmlActiveHighAndLow active_level, AmlShared shared, + AmlPinConfig pin_config, uint16_t debounce_timeout, + const uint32_t pin_list[], uint32_t pin_count, + const char *resource_source_name, + const uint8_t *vendor_data, uint16_t vendor_data_len) +{ + uint8_t flags = edge_level | (active_level << 1) | (shared << 3); + + return aml_gpio_connection(AML_INTERRUPT_CONNECTION, con_and_pro, flags, + pin_config, 0, debounce_timeout, pin_list, + pin_count, resource_source_name, vendor_data, + vendor_data_len); +} + +/* + * ACPI 1.0b: 6.4.3.4 32-Bit Fixed Location Memory Range Descriptor + * (Type 1, Large Item Name 0x6) + */ +Aml *aml_memory32_fixed(uint32_t addr, uint32_t size, + AmlReadAndWrite read_and_write) +{ + Aml *var = aml_alloc(); + build_append_byte(var->buf, 0x86); /* Memory32Fixed Resource Descriptor */ + build_append_byte(var->buf, 9); /* Length, bits[7:0] value = 9 */ + build_append_byte(var->buf, 0); /* Length, bits[15:8] value = 0 */ + build_append_byte(var->buf, read_and_write); /* Write status, 1 rw 0 ro */ + + /* Range base address */ + build_append_byte(var->buf, extract32(addr, 0, 8)); /* bits[7:0] */ + build_append_byte(var->buf, extract32(addr, 8, 8)); /* bits[15:8] */ + build_append_byte(var->buf, extract32(addr, 16, 8)); /* bits[23:16] */ + build_append_byte(var->buf, extract32(addr, 24, 8)); /* bits[31:24] */ + + /* Range length */ + build_append_byte(var->buf, extract32(size, 0, 8)); /* bits[7:0] */ + build_append_byte(var->buf, extract32(size, 8, 8)); /* bits[15:8] */ + build_append_byte(var->buf, extract32(size, 16, 8)); /* bits[23:16] */ + build_append_byte(var->buf, extract32(size, 24, 8)); /* bits[31:24] */ + return var; +} + +/* + * ACPI 5.0: 6.4.3.6 Extended Interrupt Descriptor + * Type 1, Large Item Name 0x9 + */ +Aml *aml_interrupt(AmlConsumerAndProducer con_and_pro, + AmlLevelAndEdge level_and_edge, + AmlActiveHighAndLow high_and_low, AmlShared shared, + uint32_t *irq_list, uint8_t irq_count) +{ + int i; + Aml *var = aml_alloc(); + uint8_t irq_flags = con_and_pro | (level_and_edge << 1) + | (high_and_low << 2) | (shared << 3); + const int header_bytes_in_len = 2; + uint16_t len = header_bytes_in_len + irq_count * sizeof(uint32_t); + + assert(irq_count > 0); + + build_append_byte(var->buf, 0x89); /* Extended irq descriptor */ + build_append_byte(var->buf, len & 0xFF); /* Length, bits[7:0] */ + build_append_byte(var->buf, len >> 8); /* Length, bits[15:8] */ + build_append_byte(var->buf, irq_flags); /* Interrupt Vector Information. */ + build_append_byte(var->buf, irq_count); /* Interrupt table length */ + + /* Interrupt Number List */ + for (i = 0; i < irq_count; i++) { + build_append_int_noprefix(var->buf, irq_list[i], 4); + } + return var; +} + +/* ACPI 1.0b: 6.4.2.5 I/O Port Descriptor */ +Aml *aml_io(AmlIODecode dec, uint16_t min_base, uint16_t max_base, + uint8_t aln, uint8_t len) +{ + Aml *var = aml_alloc(); + build_append_byte(var->buf, 0x47); /* IO port descriptor */ + build_append_byte(var->buf, dec); + build_append_byte(var->buf, min_base & 0xff); + build_append_byte(var->buf, (min_base >> 8) & 0xff); + build_append_byte(var->buf, max_base & 0xff); + build_append_byte(var->buf, (max_base >> 8) & 0xff); + build_append_byte(var->buf, aln); + build_append_byte(var->buf, len); + return var; +} + +/* + * ACPI 1.0b: 6.4.2.1.1 ASL Macro for IRQ Descriptor + * + * More verbose description at: + * ACPI 5.0: 19.5.64 IRQNoFlags (Interrupt Resource Descriptor Macro) + * 6.4.2.1 IRQ Descriptor + */ +Aml *aml_irq_no_flags(uint8_t irq) +{ + uint16_t irq_mask; + Aml *var = aml_alloc(); + + assert(irq < 16); + build_append_byte(var->buf, 0x22); /* IRQ descriptor 2 byte form */ + + irq_mask = 1U << irq; + build_append_byte(var->buf, irq_mask & 0xFF); /* IRQ mask bits[7:0] */ + build_append_byte(var->buf, irq_mask >> 8); /* IRQ mask bits[15:8] */ + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLNot */ +Aml *aml_lnot(Aml *arg) +{ + Aml *var = aml_opcode(0x92 /* LNotOp */); + aml_append(var, arg); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLEqual */ +Aml *aml_equal(Aml *arg1, Aml *arg2) +{ + Aml *var = aml_opcode(0x93 /* LequalOp */); + aml_append(var, arg1); + aml_append(var, arg2); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLGreater */ +Aml *aml_lgreater(Aml *arg1, Aml *arg2) +{ + Aml *var = aml_opcode(0x94 /* LGreaterOp */); + aml_append(var, arg1); + aml_append(var, arg2); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLGreaterEqual */ +Aml *aml_lgreater_equal(Aml *arg1, Aml *arg2) +{ + /* LGreaterEqualOp := LNotOp LLessOp */ + Aml *var = aml_opcode(0x92 /* LNotOp */); + build_append_byte(var->buf, 0x95 /* LLessOp */); + aml_append(var, arg1); + aml_append(var, arg2); + return var; +} + +/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefIfElse */ +Aml *aml_if(Aml *predicate) +{ + Aml *var = aml_bundle(0xA0 /* IfOp */, AML_PACKAGE); + aml_append(var, predicate); + return var; +} + +/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefElse */ +Aml *aml_else(void) +{ + Aml *var = aml_bundle(0xA1 /* ElseOp */, AML_PACKAGE); + return var; +} + +/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefWhile */ +Aml *aml_while(Aml *predicate) +{ + Aml *var = aml_bundle(0xA2 /* WhileOp */, AML_PACKAGE); + aml_append(var, predicate); + return var; +} + +/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefMethod */ +Aml *aml_method(const char *name, int arg_count, AmlSerializeFlag sflag) +{ + Aml *var = aml_bundle(0x14 /* MethodOp */, AML_PACKAGE); + int methodflags; + + /* + * MethodFlags: + * bit 0-2: ArgCount (0-7) + * bit 3: SerializeFlag + * 0: NotSerialized + * 1: Serialized + * bit 4-7: reserved (must be 0) + */ + assert(arg_count < 8); + methodflags = arg_count | (sflag << 3); + + build_append_namestring(var->buf, "%s", name); + build_append_byte(var->buf, methodflags); /* MethodFlags: ArgCount */ + return var; +} + +/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefDevice */ +Aml *aml_device(const char *name_format, ...) +{ + va_list ap; + Aml *var = aml_bundle(0x82 /* DeviceOp */, AML_EXT_PACKAGE); + va_start(ap, name_format); + build_append_namestringv(var->buf, name_format, ap); + va_end(ap); + return var; +} + +/* ACPI 1.0b: 6.4.1 ASL Macros for Resource Descriptors */ +Aml *aml_resource_template(void) +{ + /* ResourceTemplate is a buffer of Resources with EndTag at the end */ + Aml *var = aml_bundle(0x11 /* BufferOp */, AML_RES_TEMPLATE); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefBuffer + * Pass byte_list as NULL to request uninitialized buffer to reserve space. + */ +Aml *aml_buffer(int buffer_size, uint8_t *byte_list) +{ + int i; + Aml *var = aml_bundle(0x11 /* BufferOp */, AML_BUFFER); + + for (i = 0; i < buffer_size; i++) { + if (byte_list == NULL) { + build_append_byte(var->buf, 0x0); + } else { + build_append_byte(var->buf, byte_list[i]); + } + } + + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefPackage */ +Aml *aml_package(uint8_t num_elements) +{ + Aml *var = aml_bundle(0x12 /* PackageOp */, AML_PACKAGE); + build_append_byte(var->buf, num_elements); + return var; +} + +/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefOpRegion */ +Aml *aml_operation_region(const char *name, AmlRegionSpace rs, + Aml *offset, uint32_t len) +{ + Aml *var = aml_alloc(); + build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */ + build_append_byte(var->buf, 0x80); /* OpRegionOp */ + build_append_namestring(var->buf, "%s", name); + build_append_byte(var->buf, rs); + aml_append(var, offset); + build_append_int(var->buf, len); + return var; +} + +/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: NamedField */ +Aml *aml_named_field(const char *name, unsigned length) +{ + Aml *var = aml_alloc(); + build_append_nameseg(var->buf, name); + build_append_pkg_length(var->buf, length, false); + return var; +} + +/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: ReservedField */ +Aml *aml_reserved_field(unsigned length) +{ + Aml *var = aml_alloc(); + /* ReservedField := 0x00 PkgLength */ + build_append_byte(var->buf, 0x00); + build_append_pkg_length(var->buf, length, false); + return var; +} + +/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefField */ +Aml *aml_field(const char *name, AmlAccessType type, AmlLockRule lock, + AmlUpdateRule rule) +{ + Aml *var = aml_bundle(0x81 /* FieldOp */, AML_EXT_PACKAGE); + uint8_t flags = rule << 5 | type; + + flags |= lock << 4; /* LockRule at 4 bit offset */ + + build_append_namestring(var->buf, "%s", name); + build_append_byte(var->buf, flags); + return var; +} + +static +Aml *create_field_common(int opcode, Aml *srcbuf, Aml *index, const char *name) +{ + Aml *var = aml_opcode(opcode); + aml_append(var, srcbuf); + aml_append(var, index); + build_append_namestring(var->buf, "%s", name); + return var; +} + +/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefCreateField */ +Aml *aml_create_field(Aml *srcbuf, Aml *bit_index, Aml *num_bits, + const char *name) +{ + Aml *var = aml_alloc(); + build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */ + build_append_byte(var->buf, 0x13); /* CreateFieldOp */ + aml_append(var, srcbuf); + aml_append(var, bit_index); + aml_append(var, num_bits); + build_append_namestring(var->buf, "%s", name); + return var; +} + +/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefCreateDWordField */ +Aml *aml_create_dword_field(Aml *srcbuf, Aml *index, const char *name) +{ + return create_field_common(0x8A /* CreateDWordFieldOp */, + srcbuf, index, name); +} + +/* ACPI 2.0a: 17.2.4.2 Named Objects Encoding: DefCreateQWordField */ +Aml *aml_create_qword_field(Aml *srcbuf, Aml *index, const char *name) +{ + return create_field_common(0x8F /* CreateQWordFieldOp */, + srcbuf, index, name); +} + +/* ACPI 1.0b: 16.2.3 Data Objects Encoding: String */ +Aml *aml_string(const char *name_format, ...) +{ + Aml *var = aml_opcode(0x0D /* StringPrefix */); + va_list ap; + char *s; + int len; + + va_start(ap, name_format); + len = g_vasprintf(&s, name_format, ap); + va_end(ap); + + g_array_append_vals(var->buf, s, len + 1); + g_free(s); + + return var; +} + +/* ACPI 1.0b: 16.2.6.2 Local Objects Encoding */ +Aml *aml_local(int num) +{ + uint8_t op = 0x60 /* Local0Op */ + num; + + assert(num <= 7); + return aml_opcode(op); +} + +/* ACPI 2.0a: 17.2.2 Data Objects Encoding: DefVarPackage */ +Aml *aml_varpackage(uint32_t num_elements) +{ + Aml *var = aml_bundle(0x13 /* VarPackageOp */, AML_PACKAGE); + build_append_int(var->buf, num_elements); + return var; +} + +/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefProcessor */ +Aml *aml_processor(uint8_t proc_id, uint32_t pblk_addr, uint8_t pblk_len, + const char *name_format, ...) +{ + va_list ap; + Aml *var = aml_bundle(0x83 /* ProcessorOp */, AML_EXT_PACKAGE); + va_start(ap, name_format); + build_append_namestringv(var->buf, name_format, ap); + va_end(ap); + build_append_byte(var->buf, proc_id); /* ProcID */ + build_append_int_noprefix(var->buf, pblk_addr, sizeof(pblk_addr)); + build_append_byte(var->buf, pblk_len); /* PblkLen */ + return var; +} + +static uint8_t Hex2Digit(char c) +{ + if (c >= 'A') { + return c - 'A' + 10; + } + + return c - '0'; +} + +/* ACPI 1.0b: 15.2.3.6.4.1 EISAID Macro - Convert EISA ID String To Integer */ +Aml *aml_eisaid(const char *str) +{ + Aml *var = aml_alloc(); + uint32_t id; + + g_assert(strlen(str) == 7); + id = (str[0] - 0x40) << 26 | + (str[1] - 0x40) << 21 | + (str[2] - 0x40) << 16 | + Hex2Digit(str[3]) << 12 | + Hex2Digit(str[4]) << 8 | + Hex2Digit(str[5]) << 4 | + Hex2Digit(str[6]); + + build_append_byte(var->buf, 0x0C); /* DWordPrefix */ + build_append_int_noprefix(var->buf, bswap32(id), sizeof(id)); + return var; +} + +/* ACPI 1.0b: 6.4.3.5.5 Word Address Space Descriptor: bytes 3-5 */ +static Aml *aml_as_desc_header(AmlResourceType type, AmlMinFixed min_fixed, + AmlMaxFixed max_fixed, AmlDecode dec, + uint8_t type_flags) +{ + uint8_t flags = max_fixed | min_fixed | dec; + Aml *var = aml_alloc(); + + build_append_byte(var->buf, type); + build_append_byte(var->buf, flags); + build_append_byte(var->buf, type_flags); /* Type Specific Flags */ + return var; +} + +/* ACPI 1.0b: 6.4.3.5.5 Word Address Space Descriptor */ +static Aml *aml_word_as_desc(AmlResourceType type, AmlMinFixed min_fixed, + AmlMaxFixed max_fixed, AmlDecode dec, + uint16_t addr_gran, uint16_t addr_min, + uint16_t addr_max, uint16_t addr_trans, + uint16_t len, uint8_t type_flags) +{ + Aml *var = aml_alloc(); + + build_append_byte(var->buf, 0x88); /* Word Address Space Descriptor */ + /* minimum length since we do not encode optional fields */ + build_append_byte(var->buf, 0x0D); + build_append_byte(var->buf, 0x0); + + aml_append(var, + aml_as_desc_header(type, min_fixed, max_fixed, dec, type_flags)); + build_append_int_noprefix(var->buf, addr_gran, sizeof(addr_gran)); + build_append_int_noprefix(var->buf, addr_min, sizeof(addr_min)); + build_append_int_noprefix(var->buf, addr_max, sizeof(addr_max)); + build_append_int_noprefix(var->buf, addr_trans, sizeof(addr_trans)); + build_append_int_noprefix(var->buf, len, sizeof(len)); + return var; +} + +/* ACPI 1.0b: 6.4.3.5.3 DWord Address Space Descriptor */ +static Aml *aml_dword_as_desc(AmlResourceType type, AmlMinFixed min_fixed, + AmlMaxFixed max_fixed, AmlDecode dec, + uint32_t addr_gran, uint32_t addr_min, + uint32_t addr_max, uint32_t addr_trans, + uint32_t len, uint8_t type_flags) +{ + Aml *var = aml_alloc(); + + build_append_byte(var->buf, 0x87); /* DWord Address Space Descriptor */ + /* minimum length since we do not encode optional fields */ + build_append_byte(var->buf, 23); + build_append_byte(var->buf, 0x0); + + + aml_append(var, + aml_as_desc_header(type, min_fixed, max_fixed, dec, type_flags)); + build_append_int_noprefix(var->buf, addr_gran, sizeof(addr_gran)); + build_append_int_noprefix(var->buf, addr_min, sizeof(addr_min)); + build_append_int_noprefix(var->buf, addr_max, sizeof(addr_max)); + build_append_int_noprefix(var->buf, addr_trans, sizeof(addr_trans)); + build_append_int_noprefix(var->buf, len, sizeof(len)); + return var; +} + +/* ACPI 1.0b: 6.4.3.5.1 QWord Address Space Descriptor */ +static Aml *aml_qword_as_desc(AmlResourceType type, AmlMinFixed min_fixed, + AmlMaxFixed max_fixed, AmlDecode dec, + uint64_t addr_gran, uint64_t addr_min, + uint64_t addr_max, uint64_t addr_trans, + uint64_t len, uint8_t type_flags) +{ + Aml *var = aml_alloc(); + + build_append_byte(var->buf, 0x8A); /* QWord Address Space Descriptor */ + /* minimum length since we do not encode optional fields */ + build_append_byte(var->buf, 0x2B); + build_append_byte(var->buf, 0x0); + + aml_append(var, + aml_as_desc_header(type, min_fixed, max_fixed, dec, type_flags)); + build_append_int_noprefix(var->buf, addr_gran, sizeof(addr_gran)); + build_append_int_noprefix(var->buf, addr_min, sizeof(addr_min)); + build_append_int_noprefix(var->buf, addr_max, sizeof(addr_max)); + build_append_int_noprefix(var->buf, addr_trans, sizeof(addr_trans)); + build_append_int_noprefix(var->buf, len, sizeof(len)); + return var; +} + +/* + * ACPI 1.0b: 6.4.3.5.6 ASL Macros for WORD Address Descriptor + * + * More verbose description at: + * ACPI 5.0: 19.5.141 WordBusNumber (Word Bus Number Resource Descriptor Macro) + */ +Aml *aml_word_bus_number(AmlMinFixed min_fixed, AmlMaxFixed max_fixed, + AmlDecode dec, uint16_t addr_gran, + uint16_t addr_min, uint16_t addr_max, + uint16_t addr_trans, uint16_t len) + +{ + return aml_word_as_desc(AML_BUS_NUMBER_RANGE, min_fixed, max_fixed, dec, + addr_gran, addr_min, addr_max, addr_trans, len, 0); +} + +/* + * ACPI 1.0b: 6.4.3.5.6 ASL Macros for WORD Address Descriptor + * + * More verbose description at: + * ACPI 5.0: 19.5.142 WordIO (Word IO Resource Descriptor Macro) + */ +Aml *aml_word_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed, + AmlDecode dec, AmlISARanges isa_ranges, + uint16_t addr_gran, uint16_t addr_min, + uint16_t addr_max, uint16_t addr_trans, + uint16_t len) + +{ + return aml_word_as_desc(AML_IO_RANGE, min_fixed, max_fixed, dec, + addr_gran, addr_min, addr_max, addr_trans, len, + isa_ranges); +} + +/* + * ACPI 1.0b: 6.4.3.5.4 ASL Macros for DWORD Address Descriptor + * + * More verbose description at: + * ACPI 5.0: 19.5.33 DWordIO (DWord IO Resource Descriptor Macro) + */ +Aml *aml_dword_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed, + AmlDecode dec, AmlISARanges isa_ranges, + uint32_t addr_gran, uint32_t addr_min, + uint32_t addr_max, uint32_t addr_trans, + uint32_t len) + +{ + return aml_dword_as_desc(AML_IO_RANGE, min_fixed, max_fixed, dec, + addr_gran, addr_min, addr_max, addr_trans, len, + isa_ranges); +} + +/* + * ACPI 1.0b: 6.4.3.5.4 ASL Macros for DWORD Address Space Descriptor + * + * More verbose description at: + * ACPI 5.0: 19.5.34 DWordMemory (DWord Memory Resource Descriptor Macro) + */ +Aml *aml_dword_memory(AmlDecode dec, AmlMinFixed min_fixed, + AmlMaxFixed max_fixed, AmlCacheable cacheable, + AmlReadAndWrite read_and_write, + uint32_t addr_gran, uint32_t addr_min, + uint32_t addr_max, uint32_t addr_trans, + uint32_t len) +{ + uint8_t flags = read_and_write | (cacheable << 1); + + return aml_dword_as_desc(AML_MEMORY_RANGE, min_fixed, max_fixed, + dec, addr_gran, addr_min, addr_max, + addr_trans, len, flags); +} + +/* + * ACPI 1.0b: 6.4.3.5.2 ASL Macros for QWORD Address Space Descriptor + * + * More verbose description at: + * ACPI 5.0: 19.5.102 QWordMemory (QWord Memory Resource Descriptor Macro) + */ +Aml *aml_qword_memory(AmlDecode dec, AmlMinFixed min_fixed, + AmlMaxFixed max_fixed, AmlCacheable cacheable, + AmlReadAndWrite read_and_write, + uint64_t addr_gran, uint64_t addr_min, + uint64_t addr_max, uint64_t addr_trans, + uint64_t len) +{ + uint8_t flags = read_and_write | (cacheable << 1); + + return aml_qword_as_desc(AML_MEMORY_RANGE, min_fixed, max_fixed, + dec, addr_gran, addr_min, addr_max, + addr_trans, len, flags); +} + +/* ACPI 1.0b: 6.4.2.2 DMA Format/6.4.2.2.1 ASL Macro for DMA Descriptor */ +Aml *aml_dma(AmlDmaType typ, AmlDmaBusMaster bm, AmlTransferSize sz, + uint8_t channel) +{ + Aml *var = aml_alloc(); + uint8_t flags = sz | bm << 2 | typ << 5; + + assert(channel < 8); + build_append_byte(var->buf, 0x2A); /* Byte 0: DMA Descriptor */ + build_append_byte(var->buf, 1U << channel); /* Byte 1: _DMA - DmaChannel */ + build_append_byte(var->buf, flags); /* Byte 2 */ + return var; +} + +/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefSleep */ +Aml *aml_sleep(uint64_t msec) +{ + Aml *var = aml_alloc(); + build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */ + build_append_byte(var->buf, 0x22); /* SleepOp */ + aml_append(var, aml_int(msec)); + return var; +} + +static uint8_t Hex2Byte(const char *src) +{ + int hi, lo; + + hi = Hex2Digit(src[0]); + assert(hi >= 0); + assert(hi <= 15); + + lo = Hex2Digit(src[1]); + assert(lo >= 0); + assert(lo <= 15); + return (hi << 4) | lo; +} + +/* + * ACPI 3.0: 17.5.124 ToUUID (Convert String to UUID Macro) + * e.g. UUID: aabbccdd-eeff-gghh-iijj-kkllmmnnoopp + * call aml_touuid("aabbccdd-eeff-gghh-iijj-kkllmmnnoopp"); + */ +Aml *aml_touuid(const char *uuid) +{ + Aml *var = aml_bundle(0x11 /* BufferOp */, AML_BUFFER); + + assert(strlen(uuid) == 36); + assert(uuid[8] == '-'); + assert(uuid[13] == '-'); + assert(uuid[18] == '-'); + assert(uuid[23] == '-'); + + build_append_byte(var->buf, Hex2Byte(uuid + 6)); /* dd - at offset 00 */ + build_append_byte(var->buf, Hex2Byte(uuid + 4)); /* cc - at offset 01 */ + build_append_byte(var->buf, Hex2Byte(uuid + 2)); /* bb - at offset 02 */ + build_append_byte(var->buf, Hex2Byte(uuid + 0)); /* aa - at offset 03 */ + + build_append_byte(var->buf, Hex2Byte(uuid + 11)); /* ff - at offset 04 */ + build_append_byte(var->buf, Hex2Byte(uuid + 9)); /* ee - at offset 05 */ + + build_append_byte(var->buf, Hex2Byte(uuid + 16)); /* hh - at offset 06 */ + build_append_byte(var->buf, Hex2Byte(uuid + 14)); /* gg - at offset 07 */ + + build_append_byte(var->buf, Hex2Byte(uuid + 19)); /* ii - at offset 08 */ + build_append_byte(var->buf, Hex2Byte(uuid + 21)); /* jj - at offset 09 */ + + build_append_byte(var->buf, Hex2Byte(uuid + 24)); /* kk - at offset 10 */ + build_append_byte(var->buf, Hex2Byte(uuid + 26)); /* ll - at offset 11 */ + build_append_byte(var->buf, Hex2Byte(uuid + 28)); /* mm - at offset 12 */ + build_append_byte(var->buf, Hex2Byte(uuid + 30)); /* nn - at offset 13 */ + build_append_byte(var->buf, Hex2Byte(uuid + 32)); /* oo - at offset 14 */ + build_append_byte(var->buf, Hex2Byte(uuid + 34)); /* pp - at offset 15 */ + + return var; +} + +/* + * ACPI 2.0b: 16.2.3.6.4.3 Unicode Macro (Convert Ascii String To Unicode) + */ +Aml *aml_unicode(const char *str) +{ + int i = 0; + Aml *var = aml_bundle(0x11 /* BufferOp */, AML_BUFFER); + + do { + build_append_byte(var->buf, str[i]); + build_append_byte(var->buf, 0); + i++; + } while (i <= strlen(str)); + + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefRefOf */ +Aml *aml_refof(Aml *arg) +{ + Aml *var = aml_opcode(0x71 /* RefOfOp */); + aml_append(var, arg); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefDerefOf */ +Aml *aml_derefof(Aml *arg) +{ + Aml *var = aml_opcode(0x83 /* DerefOfOp */); + aml_append(var, arg); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefSizeOf */ +Aml *aml_sizeof(Aml *arg) +{ + Aml *var = aml_opcode(0x87 /* SizeOfOp */); + aml_append(var, arg); + return var; +} + +/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefMutex */ +Aml *aml_mutex(const char *name, uint8_t sync_level) +{ + Aml *var = aml_alloc(); + build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */ + build_append_byte(var->buf, 0x01); /* MutexOp */ + build_append_namestring(var->buf, "%s", name); + assert(!(sync_level & 0xF0)); + build_append_byte(var->buf, sync_level); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefAcquire */ +Aml *aml_acquire(Aml *mutex, uint16_t timeout) +{ + Aml *var = aml_alloc(); + build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */ + build_append_byte(var->buf, 0x23); /* AcquireOp */ + aml_append(var, mutex); + build_append_int_noprefix(var->buf, timeout, sizeof(timeout)); + return var; +} + +/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefRelease */ +Aml *aml_release(Aml *mutex) +{ + Aml *var = aml_alloc(); + build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */ + build_append_byte(var->buf, 0x27); /* ReleaseOp */ + aml_append(var, mutex); + return var; +} + +/* ACPI 1.0b: 16.2.5.1 Name Space Modifier Objects Encoding: DefAlias */ +Aml *aml_alias(const char *source_object, const char *alias_object) +{ + Aml *var = aml_opcode(0x06 /* AliasOp */); + aml_append(var, aml_name("%s", source_object)); + aml_append(var, aml_name("%s", alias_object)); + return var; +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefConcat */ +Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target) +{ + return build_opcode_2arg_dst(0x73 /* ConcatOp */, source1, source2, + target); +} + +/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefObjectType */ +Aml *aml_object_type(Aml *object) +{ + Aml *var = aml_opcode(0x8E /* ObjectTypeOp */); + aml_append(var, object); + return var; +} + +void acpi_table_begin(AcpiTable *desc, GArray *array) +{ + + desc->array = array; + desc->table_offset = array->len; + + /* + * ACPI spec 1.0b + * 5.2.3 System Description Table Header + */ + g_assert(strlen(desc->sig) == 4); + g_array_append_vals(array, desc->sig, 4); /* Signature */ + /* + * reserve space for Length field, which will be patched by + * acpi_table_end() when the table creation is finished. + */ + build_append_int_noprefix(array, 0, 4); /* Length */ + build_append_int_noprefix(array, desc->rev, 1); /* Revision */ + build_append_int_noprefix(array, 0, 1); /* Checksum */ + build_append_padded_str(array, desc->oem_id, 6, ' '); /* OEMID */ + /* OEM Table ID */ + build_append_padded_str(array, desc->oem_table_id, 8, ' '); + build_append_int_noprefix(array, 1, 4); /* OEM Revision */ + g_array_append_vals(array, ACPI_BUILD_APPNAME8, 4); /* Creator ID */ + build_append_int_noprefix(array, 1, 4); /* Creator Revision */ +} + +void acpi_table_end(BIOSLinker *linker, AcpiTable *desc) +{ + /* + * ACPI spec 1.0b + * 5.2.3 System Description Table Header + * Table 5-2 DESCRIPTION_HEADER Fields + */ + const unsigned checksum_offset = 9; + uint32_t table_len = desc->array->len - desc->table_offset; + uint32_t table_len_le = cpu_to_le32(table_len); + gchar *len_ptr = &desc->array->data[desc->table_offset + 4]; + + /* patch "Length" field that has been reserved by acpi_table_begin() + * to the actual length, i.e. accumulated table length from + * acpi_table_begin() till acpi_table_end() + */ + memcpy(len_ptr, &table_len_le, sizeof table_len_le); + + bios_linker_loader_add_checksum(linker, ACPI_BUILD_TABLE_FILE, + desc->table_offset, table_len, desc->table_offset + checksum_offset); +} + +void *acpi_data_push(GArray *table_data, unsigned size) +{ + unsigned off = table_data->len; + g_array_set_size(table_data, off + size); + return table_data->data + off; +} + +unsigned acpi_data_len(GArray *table) +{ + assert(g_array_get_element_size(table) == 1); + return table->len; +} + +void acpi_add_table(GArray *table_offsets, GArray *table_data) +{ + uint32_t offset = table_data->len; + g_array_append_val(table_offsets, offset); +} + +void acpi_build_tables_init(AcpiBuildTables *tables) +{ + tables->rsdp = g_array_new(false, true /* clear */, 1); + tables->table_data = g_array_new(false, true /* clear */, 1); + tables->tcpalog = g_array_new(false, true /* clear */, 1); + tables->vmgenid = g_array_new(false, true /* clear */, 1); + tables->hardware_errors = g_array_new(false, true /* clear */, 1); + tables->linker = bios_linker_loader_init(); +} + +void acpi_build_tables_cleanup(AcpiBuildTables *tables, bool mfre) +{ + bios_linker_loader_cleanup(tables->linker); + g_array_free(tables->rsdp, true); + g_array_free(tables->table_data, true); + g_array_free(tables->tcpalog, mfre); + g_array_free(tables->vmgenid, mfre); + g_array_free(tables->hardware_errors, mfre); +} + +/* + * ACPI spec 5.2.5.3 Root System Description Pointer (RSDP). + * (Revision 1.0 or later) + */ +void +build_rsdp(GArray *tbl, BIOSLinker *linker, AcpiRsdpData *rsdp_data) +{ + int tbl_off = tbl->len; /* Table offset in the RSDP file */ + + switch (rsdp_data->revision) { + case 0: + /* With ACPI 1.0, we must have an RSDT pointer */ + g_assert(rsdp_data->rsdt_tbl_offset); + break; + case 2: + /* With ACPI 2.0+, we must have an XSDT pointer */ + g_assert(rsdp_data->xsdt_tbl_offset); + break; + default: + /* Only revisions 0 (ACPI 1.0) and 2 (ACPI 2.0+) are valid for RSDP */ + g_assert_not_reached(); + } + + bios_linker_loader_alloc(linker, ACPI_BUILD_RSDP_FILE, tbl, 16, + true /* fseg memory */); + + g_array_append_vals(tbl, "RSD PTR ", 8); /* Signature */ + build_append_int_noprefix(tbl, 0, 1); /* Checksum */ + g_array_append_vals(tbl, rsdp_data->oem_id, 6); /* OEMID */ + build_append_int_noprefix(tbl, rsdp_data->revision, 1); /* Revision */ + build_append_int_noprefix(tbl, 0, 4); /* RsdtAddress */ + if (rsdp_data->rsdt_tbl_offset) { + /* RSDT address to be filled by guest linker */ + bios_linker_loader_add_pointer(linker, ACPI_BUILD_RSDP_FILE, + tbl_off + 16, 4, + ACPI_BUILD_TABLE_FILE, + *rsdp_data->rsdt_tbl_offset); + } + + /* Checksum to be filled by guest linker */ + bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE, + tbl_off, 20, /* ACPI rev 1.0 RSDP size */ + 8); + + if (rsdp_data->revision == 0) { + /* ACPI 1.0 RSDP, we're done */ + return; + } + + build_append_int_noprefix(tbl, 36, 4); /* Length */ + + /* XSDT address to be filled by guest linker */ + build_append_int_noprefix(tbl, 0, 8); /* XsdtAddress */ + /* We already validated our xsdt pointer */ + bios_linker_loader_add_pointer(linker, ACPI_BUILD_RSDP_FILE, + tbl_off + 24, 8, + ACPI_BUILD_TABLE_FILE, + *rsdp_data->xsdt_tbl_offset); + + build_append_int_noprefix(tbl, 0, 1); /* Extended Checksum */ + build_append_int_noprefix(tbl, 0, 3); /* Reserved */ + + /* Extended checksum to be filled by Guest linker */ + bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE, + tbl_off, 36, /* ACPI rev 2.0 RSDP size */ + 32); +} + +/* + * ACPI 1.0 Root System Description Table (RSDT) + */ +void +build_rsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets, + const char *oem_id, const char *oem_table_id) +{ + int i; + AcpiTable table = { .sig = "RSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + for (i = 0; i < table_offsets->len; ++i) { + uint32_t ref_tbl_offset = g_array_index(table_offsets, uint32_t, i); + uint32_t rsdt_entry_offset = table.array->len; + + /* reserve space for entry */ + build_append_int_noprefix(table.array, 0, 4); + + /* mark position of RSDT entry to be filled by Guest linker */ + bios_linker_loader_add_pointer(linker, + ACPI_BUILD_TABLE_FILE, rsdt_entry_offset, 4, + ACPI_BUILD_TABLE_FILE, ref_tbl_offset); + + } + acpi_table_end(linker, &table); +} + +/* + * ACPI 2.0 eXtended System Description Table (XSDT) + */ +void +build_xsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets, + const char *oem_id, const char *oem_table_id) +{ + int i; + AcpiTable table = { .sig = "XSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + + for (i = 0; i < table_offsets->len; ++i) { + uint64_t ref_tbl_offset = g_array_index(table_offsets, uint32_t, i); + uint64_t xsdt_entry_offset = table.array->len; + + /* reserve space for entry */ + build_append_int_noprefix(table.array, 0, 8); + + /* mark position of RSDT entry to be filled by Guest linker */ + bios_linker_loader_add_pointer(linker, + ACPI_BUILD_TABLE_FILE, xsdt_entry_offset, 8, + ACPI_BUILD_TABLE_FILE, ref_tbl_offset); + } + acpi_table_end(linker, &table); +} + +/* + * ACPI spec, Revision 4.0 + * 5.2.16.2 Memory Affinity Structure + */ +void build_srat_memory(GArray *table_data, uint64_t base, + uint64_t len, int node, MemoryAffinityFlags flags) +{ + build_append_int_noprefix(table_data, 1, 1); /* Type */ + build_append_int_noprefix(table_data, 40, 1); /* Length */ + build_append_int_noprefix(table_data, node, 4); /* Proximity Domain */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, base, 4); /* Base Address Low */ + /* Base Address High */ + build_append_int_noprefix(table_data, base >> 32, 4); + build_append_int_noprefix(table_data, len, 4); /* Length Low */ + build_append_int_noprefix(table_data, len >> 32, 4); /* Length High */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + build_append_int_noprefix(table_data, flags, 4); /* Flags */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ +} + +/* + * ACPI spec 5.2.17 System Locality Distance Information Table + * (Revision 2.0 or later) + */ +void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, + const char *oem_id, const char *oem_table_id) +{ + int i, j; + int nb_numa_nodes = ms->numa_state->num_nodes; + AcpiTable table = { .sig = "SLIT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + + build_append_int_noprefix(table_data, nb_numa_nodes, 8); + for (i = 0; i < nb_numa_nodes; i++) { + for (j = 0; j < nb_numa_nodes; j++) { + assert(ms->numa_state->nodes[i].distance[j]); + build_append_int_noprefix(table_data, + ms->numa_state->nodes[i].distance[j], + 1); + } + } + acpi_table_end(linker, &table); +} + +/* + * ACPI spec, Revision 6.3 + * 5.2.29.1 Processor hierarchy node structure (Type 0) + */ +static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, + uint32_t parent, uint32_t id, + uint32_t *priv_rsrc, + uint32_t priv_num) +{ + int i; + + build_append_byte(tbl, 0); /* Type 0 - processor */ + build_append_byte(tbl, 20 + priv_num * 4); /* Length */ + build_append_int_noprefix(tbl, 0, 2); /* Reserved */ + build_append_int_noprefix(tbl, flags, 4); /* Flags */ + build_append_int_noprefix(tbl, parent, 4); /* Parent */ + build_append_int_noprefix(tbl, id, 4); /* ACPI Processor ID */ + + /* Number of private resources */ + build_append_int_noprefix(tbl, priv_num, 4); + + /* Private resources[N] */ + if (priv_num > 0) { + assert(priv_rsrc); + for (i = 0; i < priv_num; i++) { + build_append_int_noprefix(tbl, priv_rsrc[i], 4); + } + } +} + +/* + * ACPI spec, Revision 6.3 + * 5.2.29 Processor Properties Topology Table (PPTT) + */ +void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, + const char *oem_id, const char *oem_table_id) +{ + int pptt_start = table_data->len; + int uid = 0; + int socket; + AcpiTable table = { .sig = "PPTT", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + + for (socket = 0; socket < ms->smp.sockets; socket++) { + uint32_t socket_offset = table_data->len - pptt_start; + int core; + + build_processor_hierarchy_node( + table_data, + /* + * Physical package - represents the boundary + * of a physical package + */ + (1 << 0), + 0, socket, NULL, 0); + + for (core = 0; core < ms->smp.cores; core++) { + uint32_t core_offset = table_data->len - pptt_start; + int thread; + + if (ms->smp.threads > 1) { + build_processor_hierarchy_node( + table_data, + (0 << 0), /* not a physical package */ + socket_offset, core, NULL, 0); + + for (thread = 0; thread < ms->smp.threads; thread++) { + build_processor_hierarchy_node( + table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 2) | /* Processor is a Thread */ + (1 << 3), /* Node is a Leaf */ + core_offset, uid++, NULL, 0); + } + } else { + build_processor_hierarchy_node( + table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 3), /* Node is a Leaf */ + socket_offset, uid++, NULL, 0); + } + } + } + + acpi_table_end(linker, &table); +} + +/* build rev1/rev3/rev5.1 FADT */ +void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, + const char *oem_id, const char *oem_table_id) +{ + int off; + AcpiTable table = { .sig = "FACP", .rev = f->rev, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, tbl); + + /* FACS address to be filled by Guest linker at runtime */ + off = tbl->len; + build_append_int_noprefix(tbl, 0, 4); /* FIRMWARE_CTRL */ + if (f->facs_tbl_offset) { /* don't patch if not supported by platform */ + bios_linker_loader_add_pointer(linker, + ACPI_BUILD_TABLE_FILE, off, 4, + ACPI_BUILD_TABLE_FILE, *f->facs_tbl_offset); + } + + /* DSDT address to be filled by Guest linker at runtime */ + off = tbl->len; + build_append_int_noprefix(tbl, 0, 4); /* DSDT */ + if (f->dsdt_tbl_offset) { /* don't patch if not supported by platform */ + bios_linker_loader_add_pointer(linker, + ACPI_BUILD_TABLE_FILE, off, 4, + ACPI_BUILD_TABLE_FILE, *f->dsdt_tbl_offset); + } + + /* ACPI1.0: INT_MODEL, ACPI2.0+: Reserved */ + build_append_int_noprefix(tbl, f->int_model /* Multiple APIC */, 1); + /* Preferred_PM_Profile */ + build_append_int_noprefix(tbl, 0 /* Unspecified */, 1); + build_append_int_noprefix(tbl, f->sci_int, 2); /* SCI_INT */ + build_append_int_noprefix(tbl, f->smi_cmd, 4); /* SMI_CMD */ + build_append_int_noprefix(tbl, f->acpi_enable_cmd, 1); /* ACPI_ENABLE */ + build_append_int_noprefix(tbl, f->acpi_disable_cmd, 1); /* ACPI_DISABLE */ + build_append_int_noprefix(tbl, 0 /* not supported */, 1); /* S4BIOS_REQ */ + /* ACPI1.0: Reserved, ACPI2.0+: PSTATE_CNT */ + build_append_int_noprefix(tbl, 0, 1); + build_append_int_noprefix(tbl, f->pm1a_evt.address, 4); /* PM1a_EVT_BLK */ + build_append_int_noprefix(tbl, 0, 4); /* PM1b_EVT_BLK */ + build_append_int_noprefix(tbl, f->pm1a_cnt.address, 4); /* PM1a_CNT_BLK */ + build_append_int_noprefix(tbl, 0, 4); /* PM1b_CNT_BLK */ + build_append_int_noprefix(tbl, 0, 4); /* PM2_CNT_BLK */ + build_append_int_noprefix(tbl, f->pm_tmr.address, 4); /* PM_TMR_BLK */ + build_append_int_noprefix(tbl, f->gpe0_blk.address, 4); /* GPE0_BLK */ + build_append_int_noprefix(tbl, 0, 4); /* GPE1_BLK */ + /* PM1_EVT_LEN */ + build_append_int_noprefix(tbl, f->pm1a_evt.bit_width / 8, 1); + /* PM1_CNT_LEN */ + build_append_int_noprefix(tbl, f->pm1a_cnt.bit_width / 8, 1); + build_append_int_noprefix(tbl, 0, 1); /* PM2_CNT_LEN */ + build_append_int_noprefix(tbl, f->pm_tmr.bit_width / 8, 1); /* PM_TMR_LEN */ + /* GPE0_BLK_LEN */ + build_append_int_noprefix(tbl, f->gpe0_blk.bit_width / 8, 1); + build_append_int_noprefix(tbl, 0, 1); /* GPE1_BLK_LEN */ + build_append_int_noprefix(tbl, 0, 1); /* GPE1_BASE */ + build_append_int_noprefix(tbl, 0, 1); /* CST_CNT */ + build_append_int_noprefix(tbl, f->plvl2_lat, 2); /* P_LVL2_LAT */ + build_append_int_noprefix(tbl, f->plvl3_lat, 2); /* P_LVL3_LAT */ + build_append_int_noprefix(tbl, 0, 2); /* FLUSH_SIZE */ + build_append_int_noprefix(tbl, 0, 2); /* FLUSH_STRIDE */ + build_append_int_noprefix(tbl, 0, 1); /* DUTY_OFFSET */ + build_append_int_noprefix(tbl, 0, 1); /* DUTY_WIDTH */ + build_append_int_noprefix(tbl, 0, 1); /* DAY_ALRM */ + build_append_int_noprefix(tbl, 0, 1); /* MON_ALRM */ + build_append_int_noprefix(tbl, f->rtc_century, 1); /* CENTURY */ + build_append_int_noprefix(tbl, 0, 2); /* IAPC_BOOT_ARCH */ + build_append_int_noprefix(tbl, 0, 1); /* Reserved */ + build_append_int_noprefix(tbl, f->flags, 4); /* Flags */ + + if (f->rev == 1) { + goto done; + } + + build_append_gas_from_struct(tbl, &f->reset_reg); /* RESET_REG */ + build_append_int_noprefix(tbl, f->reset_val, 1); /* RESET_VALUE */ + /* Since ACPI 5.1 */ + if ((f->rev >= 6) || ((f->rev == 5) && f->minor_ver > 0)) { + build_append_int_noprefix(tbl, f->arm_boot_arch, 2); /* ARM_BOOT_ARCH */ + /* FADT Minor Version */ + build_append_int_noprefix(tbl, f->minor_ver, 1); + } else { + build_append_int_noprefix(tbl, 0, 3); /* Reserved upto ACPI 5.0 */ + } + build_append_int_noprefix(tbl, 0, 8); /* X_FIRMWARE_CTRL */ + + /* XDSDT address to be filled by Guest linker at runtime */ + off = tbl->len; + build_append_int_noprefix(tbl, 0, 8); /* X_DSDT */ + if (f->xdsdt_tbl_offset) { + bios_linker_loader_add_pointer(linker, + ACPI_BUILD_TABLE_FILE, off, 8, + ACPI_BUILD_TABLE_FILE, *f->xdsdt_tbl_offset); + } + + build_append_gas_from_struct(tbl, &f->pm1a_evt); /* X_PM1a_EVT_BLK */ + /* X_PM1b_EVT_BLK */ + build_append_gas(tbl, AML_AS_SYSTEM_MEMORY, 0 , 0, 0, 0); + build_append_gas_from_struct(tbl, &f->pm1a_cnt); /* X_PM1a_CNT_BLK */ + /* X_PM1b_CNT_BLK */ + build_append_gas(tbl, AML_AS_SYSTEM_MEMORY, 0 , 0, 0, 0); + /* X_PM2_CNT_BLK */ + build_append_gas(tbl, AML_AS_SYSTEM_MEMORY, 0 , 0, 0, 0); + build_append_gas_from_struct(tbl, &f->pm_tmr); /* X_PM_TMR_BLK */ + build_append_gas_from_struct(tbl, &f->gpe0_blk); /* X_GPE0_BLK */ + build_append_gas(tbl, AML_AS_SYSTEM_MEMORY, 0 , 0, 0, 0); /* X_GPE1_BLK */ + + if (f->rev <= 4) { + goto done; + } + + /* SLEEP_CONTROL_REG */ + build_append_gas_from_struct(tbl, &f->sleep_ctl); + /* SLEEP_STATUS_REG */ + build_append_gas_from_struct(tbl, &f->sleep_sts); + + /* TODO: extra fields need to be added to support revisions above rev5 */ + assert(f->rev == 5); + +done: + acpi_table_end(linker, &table); +} + +#ifdef CONFIG_TPM +/* + * build_tpm2 - Build the TPM2 table as specified in + * table 7: TCG Hardware Interface Description Table Format for TPM 2.0 + * of TCG ACPI Specification, Family “1.2” and “2.0”, Version 1.2, Rev 8 + */ +void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog, + const char *oem_id, const char *oem_table_id) +{ + uint8_t start_method_params[12] = {}; + unsigned log_addr_offset; + uint64_t control_area_start_address; + TPMIf *tpmif = tpm_find(); + uint32_t start_method; + AcpiTable table = { .sig = "TPM2", .rev = 4, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + + /* Platform Class */ + build_append_int_noprefix(table_data, TPM2_ACPI_CLASS_CLIENT, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); + if (TPM_IS_TIS_ISA(tpmif) || TPM_IS_TIS_SYSBUS(tpmif)) { + control_area_start_address = 0; + start_method = TPM2_START_METHOD_MMIO; + } else if (TPM_IS_CRB(tpmif)) { + control_area_start_address = TPM_CRB_ADDR_CTRL; + start_method = TPM2_START_METHOD_CRB; + } else { + g_assert_not_reached(); + } + /* Address of Control Area */ + build_append_int_noprefix(table_data, control_area_start_address, 8); + /* Start Method */ + build_append_int_noprefix(table_data, start_method, 4); + + /* Platform Specific Parameters */ + g_array_append_vals(table_data, &start_method_params, + ARRAY_SIZE(start_method_params)); + + /* Log Area Minimum Length */ + build_append_int_noprefix(table_data, TPM_LOG_AREA_MINIMUM_SIZE, 4); + + acpi_data_push(tcpalog, TPM_LOG_AREA_MINIMUM_SIZE); + bios_linker_loader_alloc(linker, ACPI_BUILD_TPMLOG_FILE, tcpalog, 1, + false); + + log_addr_offset = table_data->len; + + /* Log Area Start Address to be filled by Guest linker */ + build_append_int_noprefix(table_data, 0, 8); + bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, + log_addr_offset, 8, + ACPI_BUILD_TPMLOG_FILE, 0); + acpi_table_end(linker, &table); +} +#endif + +Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set, uint32_t io_offset, + uint32_t mmio32_offset, uint64_t mmio64_offset, + uint16_t bus_nr_offset) +{ + Aml *crs = aml_resource_template(); + CrsRangeSet temp_range_set; + CrsRangeEntry *entry; + uint8_t max_bus = pci_bus_num(host->bus); + uint8_t type; + int devfn; + int i; + + crs_range_set_init(&temp_range_set); + for (devfn = 0; devfn < ARRAY_SIZE(host->bus->devices); devfn++) { + uint64_t range_base, range_limit; + PCIDevice *dev = host->bus->devices[devfn]; + + if (!dev) { + continue; + } + + for (i = 0; i < PCI_NUM_REGIONS; i++) { + PCIIORegion *r = &dev->io_regions[i]; + + range_base = r->addr; + range_limit = r->addr + r->size - 1; + + /* + * Work-around for old bioses + * that do not support multiple root buses + */ + if (!range_base || range_base > range_limit) { + continue; + } + + if (r->type & PCI_BASE_ADDRESS_SPACE_IO) { + crs_range_insert(temp_range_set.io_ranges, + range_base, range_limit); + } else { /* "memory" */ + uint64_t length = range_limit - range_base + 1; + if (range_limit <= UINT32_MAX && length <= UINT32_MAX) { + crs_range_insert(temp_range_set.mem_ranges, range_base, + range_limit); + } else { + crs_range_insert(temp_range_set.mem_64bit_ranges, + range_base, range_limit); + } + } + } + + type = dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; + if (type == PCI_HEADER_TYPE_BRIDGE) { + uint8_t subordinate = dev->config[PCI_SUBORDINATE_BUS]; + if (subordinate > max_bus) { + max_bus = subordinate; + } + + range_base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_IO); + range_limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_IO); + + /* + * Work-around for old bioses + * that do not support multiple root buses + */ + if (range_base && range_base <= range_limit) { + crs_range_insert(temp_range_set.io_ranges, + range_base, range_limit); + } + + range_base = + pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_MEMORY); + range_limit = + pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_MEMORY); + + /* + * Work-around for old bioses + * that do not support multiple root buses + */ + if (range_base && range_base <= range_limit) { + uint64_t length = range_limit - range_base + 1; + if (range_limit <= UINT32_MAX && length <= UINT32_MAX) { + crs_range_insert(temp_range_set.mem_ranges, + range_base, range_limit); + } else { + crs_range_insert(temp_range_set.mem_64bit_ranges, + range_base, range_limit); + } + } + + range_base = + pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); + range_limit = + pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); + + /* + * Work-around for old bioses + * that do not support multiple root buses + */ + if (range_base && range_base <= range_limit) { + uint64_t length = range_limit - range_base + 1; + if (range_limit <= UINT32_MAX && length <= UINT32_MAX) { + crs_range_insert(temp_range_set.mem_ranges, + range_base, range_limit); + } else { + crs_range_insert(temp_range_set.mem_64bit_ranges, + range_base, range_limit); + } + } + } + } + + crs_range_merge(temp_range_set.io_ranges); + for (i = 0; i < temp_range_set.io_ranges->len; i++) { + entry = g_ptr_array_index(temp_range_set.io_ranges, i); + aml_append(crs, + aml_dword_io(AML_MIN_FIXED, AML_MAX_FIXED, + AML_POS_DECODE, AML_ENTIRE_RANGE, + 0, entry->base, entry->limit, io_offset, + entry->limit - entry->base + 1)); + crs_range_insert(range_set->io_ranges, entry->base, entry->limit); + } + + crs_range_merge(temp_range_set.mem_ranges); + for (i = 0; i < temp_range_set.mem_ranges->len; i++) { + entry = g_ptr_array_index(temp_range_set.mem_ranges, i); + assert(entry->limit <= UINT32_MAX && + (entry->limit - entry->base + 1) <= UINT32_MAX); + aml_append(crs, + aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, + AML_MAX_FIXED, AML_NON_CACHEABLE, + AML_READ_WRITE, + 0, entry->base, entry->limit, mmio32_offset, + entry->limit - entry->base + 1)); + crs_range_insert(range_set->mem_ranges, entry->base, entry->limit); + } + + crs_range_merge(temp_range_set.mem_64bit_ranges); + for (i = 0; i < temp_range_set.mem_64bit_ranges->len; i++) { + entry = g_ptr_array_index(temp_range_set.mem_64bit_ranges, i); + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, + AML_MAX_FIXED, AML_NON_CACHEABLE, + AML_READ_WRITE, + 0, entry->base, entry->limit, mmio64_offset, + entry->limit - entry->base + 1)); + crs_range_insert(range_set->mem_64bit_ranges, + entry->base, entry->limit); + } + + crs_range_set_free(&temp_range_set); + + aml_append(crs, + aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, + 0, + pci_bus_num(host->bus), + max_bus, + bus_nr_offset, + max_bus - pci_bus_num(host->bus) + 1)); + + return crs; +} + +/* ACPI 5.0: 6.4.3.8.2 Serial Bus Connection Descriptors */ +static Aml *aml_serial_bus_device(uint8_t serial_bus_type, uint8_t flags, + uint16_t type_flags, + uint8_t revid, uint16_t data_length, + uint16_t resource_source_len) +{ + Aml *var = aml_alloc(); + uint16_t length = data_length + resource_source_len + 9; + + build_append_byte(var->buf, 0x8e); /* Serial Bus Connection Descriptor */ + build_append_int_noprefix(var->buf, length, sizeof(length)); + build_append_byte(var->buf, 1); /* Revision ID */ + build_append_byte(var->buf, 0); /* Resource Source Index */ + build_append_byte(var->buf, serial_bus_type); /* Serial Bus Type */ + build_append_byte(var->buf, flags); /* General Flags */ + build_append_int_noprefix(var->buf, type_flags, /* Type Specific Flags */ + sizeof(type_flags)); + build_append_byte(var->buf, revid); /* Type Specification Revision ID */ + build_append_int_noprefix(var->buf, data_length, sizeof(data_length)); + + return var; +} + +/* ACPI 5.0: 6.4.3.8.2.1 I2C Serial Bus Connection Resource Descriptor */ +Aml *aml_i2c_serial_bus_device(uint16_t address, const char *resource_source) +{ + uint16_t resource_source_len = strlen(resource_source) + 1; + Aml *var = aml_serial_bus_device(AML_SERIAL_BUS_TYPE_I2C, 0, 0, 1, + 6, resource_source_len); + + /* Connection Speed. Just set to 100K for now, it doesn't really matter. */ + build_append_int_noprefix(var->buf, 100000, 4); + build_append_int_noprefix(var->buf, address, sizeof(address)); + + /* This is a string, not a name, so just copy it directly in. */ + g_array_append_vals(var->buf, resource_source, resource_source_len); + + return var; +} diff --git a/hw/acpi/bios-linker-loader.c b/hw/acpi/bios-linker-loader.c new file mode 100644 index 000000000..108061828 --- /dev/null +++ b/hw/acpi/bios-linker-loader.c @@ -0,0 +1,351 @@ +/* Dynamic linker/loader of ACPI tables + * + * Copyright (C) 2013 Red Hat Inc + * + * Author: Michael S. Tsirkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/acpi/bios-linker-loader.h" +#include "hw/nvram/fw_cfg.h" + +#include "qemu/bswap.h" + +/* + * Linker/loader is a paravirtualized interface that passes commands to guest. + * The commands can be used to request guest to + * - allocate memory chunks and initialize them from QEMU FW CFG files + * - link allocated chunks by storing pointer to one chunk into another + * - calculate ACPI checksum of part of the chunk and store into same chunk + */ +#define BIOS_LINKER_LOADER_FILESZ FW_CFG_MAX_FILE_PATH + +struct BiosLinkerLoaderEntry { + uint32_t command; + union { + /* + * COMMAND_ALLOCATE - allocate a table from @alloc.file + * subject to @alloc.align alignment (must be power of 2) + * and @alloc.zone (can be HIGH or FSEG) requirements. + * + * Must appear exactly once for each file, and before + * this file is referenced by any other command. + */ + struct { + char file[BIOS_LINKER_LOADER_FILESZ]; + uint32_t align; + uint8_t zone; + } alloc; + + /* + * COMMAND_ADD_POINTER - patch the table (originating from + * @dest_file) at @pointer.offset, by adding a pointer to the table + * originating from @src_file. 1,2,4 or 8 byte unsigned + * addition is used depending on @pointer.size. + */ + struct { + char dest_file[BIOS_LINKER_LOADER_FILESZ]; + char src_file[BIOS_LINKER_LOADER_FILESZ]; + uint32_t offset; + uint8_t size; + } pointer; + + /* + * COMMAND_ADD_CHECKSUM - calculate checksum of the range specified by + * @cksum_start and @cksum_length fields, + * and then add the value at @cksum.offset. + * Checksum simply sums -X for each byte X in the range + * using 8-bit math. + */ + struct { + char file[BIOS_LINKER_LOADER_FILESZ]; + uint32_t offset; + uint32_t start; + uint32_t length; + } cksum; + + /* + * COMMAND_WRITE_POINTER - write the fw_cfg file (originating from + * @dest_file) at @wr_pointer.offset, by adding a pointer to + * @src_offset within the table originating from @src_file. + * 1,2,4 or 8 byte unsigned addition is used depending on + * @wr_pointer.size. + */ + struct { + char dest_file[BIOS_LINKER_LOADER_FILESZ]; + char src_file[BIOS_LINKER_LOADER_FILESZ]; + uint32_t dst_offset; + uint32_t src_offset; + uint8_t size; + } wr_pointer; + + /* padding */ + char pad[124]; + }; +} QEMU_PACKED; +typedef struct BiosLinkerLoaderEntry BiosLinkerLoaderEntry; + +enum { + BIOS_LINKER_LOADER_COMMAND_ALLOCATE = 0x1, + BIOS_LINKER_LOADER_COMMAND_ADD_POINTER = 0x2, + BIOS_LINKER_LOADER_COMMAND_ADD_CHECKSUM = 0x3, + BIOS_LINKER_LOADER_COMMAND_WRITE_POINTER = 0x4, +}; + +enum { + BIOS_LINKER_LOADER_ALLOC_ZONE_HIGH = 0x1, + BIOS_LINKER_LOADER_ALLOC_ZONE_FSEG = 0x2, +}; + +/* + * BiosLinkerFileEntry: + * + * An internal type used for book-keeping file entries + */ +typedef struct BiosLinkerFileEntry { + char *name; /* file name */ + GArray *blob; /* data accosiated with @name */ +} BiosLinkerFileEntry; + +/* + * bios_linker_loader_init: allocate a new linker object instance. + * + * After initialization, linker commands can be added, and will + * be stored in the linker.cmd_blob array. + */ +BIOSLinker *bios_linker_loader_init(void) +{ + BIOSLinker *linker = g_new(BIOSLinker, 1); + + linker->cmd_blob = g_array_new(false, true /* clear */, 1); + linker->file_list = g_array_new(false, true /* clear */, + sizeof(BiosLinkerFileEntry)); + return linker; +} + +/* Free linker wrapper */ +void bios_linker_loader_cleanup(BIOSLinker *linker) +{ + int i; + BiosLinkerFileEntry *entry; + + g_array_free(linker->cmd_blob, true); + + for (i = 0; i < linker->file_list->len; i++) { + entry = &g_array_index(linker->file_list, BiosLinkerFileEntry, i); + g_free(entry->name); + } + g_array_free(linker->file_list, true); + g_free(linker); +} + +static const BiosLinkerFileEntry * +bios_linker_find_file(const BIOSLinker *linker, const char *name) +{ + int i; + BiosLinkerFileEntry *entry; + + for (i = 0; i < linker->file_list->len; i++) { + entry = &g_array_index(linker->file_list, BiosLinkerFileEntry, i); + if (!strcmp(entry->name, name)) { + return entry; + } + } + return NULL; +} + +/* + * board code must realize fw_cfg first, as a fixed device, before + * another device realize function call bios_linker_loader_can_write_pointer() + */ +bool bios_linker_loader_can_write_pointer(void) +{ + FWCfgState *fw_cfg = fw_cfg_find(); + return fw_cfg && fw_cfg_dma_enabled(fw_cfg); +} + +/* + * bios_linker_loader_alloc: ask guest to load file into guest memory. + * + * @linker: linker object instance + * @file_name: name of the file blob to be loaded + * @file_blob: pointer to blob corresponding to @file_name + * @alloc_align: required minimal alignment in bytes. Must be a power of 2. + * @alloc_fseg: request allocation in FSEG zone (useful for the RSDP ACPI table) + * + * Note: this command must precede any other linker command using this file. + */ +void bios_linker_loader_alloc(BIOSLinker *linker, + const char *file_name, + GArray *file_blob, + uint32_t alloc_align, + bool alloc_fseg) +{ + BiosLinkerLoaderEntry entry; + BiosLinkerFileEntry file = { g_strdup(file_name), file_blob}; + + assert(!(alloc_align & (alloc_align - 1))); + + assert(!bios_linker_find_file(linker, file_name)); + g_array_append_val(linker->file_list, file); + + memset(&entry, 0, sizeof entry); + strncpy(entry.alloc.file, file_name, sizeof entry.alloc.file - 1); + entry.command = cpu_to_le32(BIOS_LINKER_LOADER_COMMAND_ALLOCATE); + entry.alloc.align = cpu_to_le32(alloc_align); + entry.alloc.zone = alloc_fseg ? BIOS_LINKER_LOADER_ALLOC_ZONE_FSEG : + BIOS_LINKER_LOADER_ALLOC_ZONE_HIGH; + + /* Alloc entries must come first, so prepend them */ + g_array_prepend_vals(linker->cmd_blob, &entry, sizeof entry); +} + +/* + * bios_linker_loader_add_checksum: ask guest to add checksum of ACPI + * table in the specified file at the specified offset. + * + * Checksum calculation simply sums -X for each byte X in the range + * using 8-bit math (i.e. ACPI checksum). + * + * @linker: linker object instance + * @file: file that includes the checksum to be calculated + * and the data to be checksummed + * @start_offset, @size: range of data in the file to checksum, + * relative to the start of file blob + * @checksum_offset: location of the checksum to be patched within file blob, + * relative to the start of file blob + */ +void bios_linker_loader_add_checksum(BIOSLinker *linker, const char *file_name, + unsigned start_offset, unsigned size, + unsigned checksum_offset) +{ + BiosLinkerLoaderEntry entry; + const BiosLinkerFileEntry *file = bios_linker_find_file(linker, file_name); + + assert(file); + assert(start_offset < file->blob->len); + assert(start_offset + size <= file->blob->len); + assert(checksum_offset >= start_offset); + assert(checksum_offset + 1 <= start_offset + size); + + *(file->blob->data + checksum_offset) = 0; + memset(&entry, 0, sizeof entry); + strncpy(entry.cksum.file, file_name, sizeof entry.cksum.file - 1); + entry.command = cpu_to_le32(BIOS_LINKER_LOADER_COMMAND_ADD_CHECKSUM); + entry.cksum.offset = cpu_to_le32(checksum_offset); + entry.cksum.start = cpu_to_le32(start_offset); + entry.cksum.length = cpu_to_le32(size); + + g_array_append_vals(linker->cmd_blob, &entry, sizeof entry); +} + +/* + * bios_linker_loader_add_pointer: ask guest to patch address in + * destination file with a pointer to source file + * + * @linker: linker object instance + * @dest_file: destination file that must be changed + * @dst_patched_offset: location within destination file blob to be patched + * with the pointer to @src_file+@src_offset (i.e. source + * blob allocated in guest memory + @src_offset), in bytes + * @dst_patched_offset_size: size of the pointer to be patched + * at @dst_patched_offset in @dest_file blob, in bytes + * @src_file: source file who's address must be taken + * @src_offset: location within source file blob to which + * @dest_file+@dst_patched_offset will point to after + * firmware's executed ADD_POINTER command + */ +void bios_linker_loader_add_pointer(BIOSLinker *linker, + const char *dest_file, + uint32_t dst_patched_offset, + uint8_t dst_patched_size, + const char *src_file, + uint32_t src_offset) +{ + uint64_t le_src_offset; + BiosLinkerLoaderEntry entry; + const BiosLinkerFileEntry *dst_file = + bios_linker_find_file(linker, dest_file); + const BiosLinkerFileEntry *source_file = + bios_linker_find_file(linker, src_file); + + assert(dst_file); + assert(source_file); + assert(dst_patched_offset < dst_file->blob->len); + assert(dst_patched_offset + dst_patched_size <= dst_file->blob->len); + assert(src_offset < source_file->blob->len); + + memset(&entry, 0, sizeof entry); + strncpy(entry.pointer.dest_file, dest_file, + sizeof entry.pointer.dest_file - 1); + strncpy(entry.pointer.src_file, src_file, + sizeof entry.pointer.src_file - 1); + entry.command = cpu_to_le32(BIOS_LINKER_LOADER_COMMAND_ADD_POINTER); + entry.pointer.offset = cpu_to_le32(dst_patched_offset); + entry.pointer.size = dst_patched_size; + assert(dst_patched_size == 1 || dst_patched_size == 2 || + dst_patched_size == 4 || dst_patched_size == 8); + + le_src_offset = cpu_to_le64(src_offset); + memcpy(dst_file->blob->data + dst_patched_offset, + &le_src_offset, dst_patched_size); + + g_array_append_vals(linker->cmd_blob, &entry, sizeof entry); +} + +/* + * bios_linker_loader_write_pointer: ask guest to write a pointer to the + * source file into the destination file, and write it back to QEMU via + * fw_cfg DMA. + * + * @linker: linker object instance + * @dest_file: destination file that must be written + * @dst_patched_offset: location within destination file blob to be patched + * with the pointer to @src_file, in bytes + * @dst_patched_offset_size: size of the pointer to be patched + * at @dst_patched_offset in @dest_file blob, in bytes + * @src_file: source file who's address must be taken + * @src_offset: location within source file blob to which + * @dest_file+@dst_patched_offset will point to after + * firmware's executed WRITE_POINTER command + */ +void bios_linker_loader_write_pointer(BIOSLinker *linker, + const char *dest_file, + uint32_t dst_patched_offset, + uint8_t dst_patched_size, + const char *src_file, + uint32_t src_offset) +{ + BiosLinkerLoaderEntry entry; + const BiosLinkerFileEntry *source_file = + bios_linker_find_file(linker, src_file); + + assert(source_file); + assert(src_offset < source_file->blob->len); + memset(&entry, 0, sizeof entry); + strncpy(entry.wr_pointer.dest_file, dest_file, + sizeof entry.wr_pointer.dest_file - 1); + strncpy(entry.wr_pointer.src_file, src_file, + sizeof entry.wr_pointer.src_file - 1); + entry.command = cpu_to_le32(BIOS_LINKER_LOADER_COMMAND_WRITE_POINTER); + entry.wr_pointer.dst_offset = cpu_to_le32(dst_patched_offset); + entry.wr_pointer.src_offset = cpu_to_le32(src_offset); + entry.wr_pointer.size = dst_patched_size; + assert(dst_patched_size == 1 || dst_patched_size == 2 || + dst_patched_size == 4 || dst_patched_size == 8); + + g_array_append_vals(linker->cmd_blob, &entry, sizeof entry); +} diff --git a/hw/acpi/core.c b/hw/acpi/core.c new file mode 100644 index 000000000..1e004d007 --- /dev/null +++ b/hw/acpi/core.c @@ -0,0 +1,738 @@ +/* + * ACPI implementation + * + * Copyright (c) 2006 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/acpi/acpi.h" +#include "hw/nvram/fw_cfg.h" +#include "qemu/config-file.h" +#include "qapi/error.h" +#include "qapi/opts-visitor.h" +#include "qapi/qapi-events-run-state.h" +#include "qapi/qapi-visit-acpi.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "sysemu/runstate.h" + +struct acpi_table_header { + uint16_t _length; /* our length, not actual part of the hdr */ + /* allows easier parsing for fw_cfg clients */ + char sig[4] + QEMU_NONSTRING; /* ACPI signature (4 ASCII characters) */ + uint32_t length; /* Length of table, in bytes, including header */ + uint8_t revision; /* ACPI Specification minor version # */ + uint8_t checksum; /* To make sum of entire table == 0 */ + char oem_id[6] + QEMU_NONSTRING; /* OEM identification */ + char oem_table_id[8] + QEMU_NONSTRING; /* OEM table identification */ + uint32_t oem_revision; /* OEM revision number */ + char asl_compiler_id[4] + QEMU_NONSTRING; /* ASL compiler vendor ID */ + uint32_t asl_compiler_revision; /* ASL compiler revision number */ +} QEMU_PACKED; + +#define ACPI_TABLE_HDR_SIZE sizeof(struct acpi_table_header) +#define ACPI_TABLE_PFX_SIZE sizeof(uint16_t) /* size of the extra prefix */ + +static const char unsigned dfl_hdr[ACPI_TABLE_HDR_SIZE - ACPI_TABLE_PFX_SIZE] = + "QEMU\0\0\0\0\1\0" /* sig (4), len(4), revno (1), csum (1) */ + "QEMUQEQEMUQEMU\1\0\0\0" /* OEM id (6), table (8), revno (4) */ + "QEMU\1\0\0\0" /* ASL compiler ID (4), version (4) */ + ; + +char unsigned *acpi_tables; +size_t acpi_tables_len; + +static QemuOptsList qemu_acpi_opts = { + .name = "acpi", + .implied_opt_name = "data", + .head = QTAILQ_HEAD_INITIALIZER(qemu_acpi_opts.head), + .desc = { { 0 } } /* validated with OptsVisitor */ +}; + +static void acpi_register_config(void) +{ + qemu_add_opts(&qemu_acpi_opts); +} + +opts_init(acpi_register_config); + +static int acpi_checksum(const uint8_t *data, int len) +{ + int sum, i; + sum = 0; + for (i = 0; i < len; i++) { + sum += data[i]; + } + return (-sum) & 0xff; +} + + +/* Install a copy of the ACPI table specified in @blob. + * + * If @has_header is set, @blob starts with the System Description Table Header + * structure. Otherwise, "dfl_hdr" is prepended. In any case, each header field + * is optionally overwritten from @hdrs. + * + * It is valid to call this function with + * (@blob == NULL && bloblen == 0 && !has_header). + * + * @hdrs->file and @hdrs->data are ignored. + * + * SIZE_MAX is considered "infinity" in this function. + * + * The number of tables that can be installed is not limited, but the 16-bit + * counter at the beginning of "acpi_tables" wraps around after UINT16_MAX. + */ +static void acpi_table_install(const char unsigned *blob, size_t bloblen, + bool has_header, + const struct AcpiTableOptions *hdrs, + Error **errp) +{ + size_t body_start; + const char unsigned *hdr_src; + size_t body_size, acpi_payload_size; + struct acpi_table_header *ext_hdr; + unsigned changed_fields; + + /* Calculate where the ACPI table body starts within the blob, plus where + * to copy the ACPI table header from. + */ + if (has_header) { + /* _length | ACPI header in blob | blob body + * ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^ + * ACPI_TABLE_PFX_SIZE sizeof dfl_hdr body_size + * == body_start + * + * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + * acpi_payload_size == bloblen + */ + body_start = sizeof dfl_hdr; + + if (bloblen < body_start) { + error_setg(errp, "ACPI table claiming to have header is too " + "short, available: %zu, expected: %zu", bloblen, + body_start); + return; + } + hdr_src = blob; + } else { + /* _length | ACPI header in template | blob body + * ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^ + * ACPI_TABLE_PFX_SIZE sizeof dfl_hdr body_size + * == bloblen + * + * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + * acpi_payload_size + */ + body_start = 0; + hdr_src = dfl_hdr; + } + body_size = bloblen - body_start; + acpi_payload_size = sizeof dfl_hdr + body_size; + + if (acpi_payload_size > UINT16_MAX) { + error_setg(errp, "ACPI table too big, requested: %zu, max: %u", + acpi_payload_size, (unsigned)UINT16_MAX); + return; + } + + /* We won't fail from here on. Initialize / extend the globals. */ + if (acpi_tables == NULL) { + acpi_tables_len = sizeof(uint16_t); + acpi_tables = g_malloc0(acpi_tables_len); + } + + acpi_tables = g_realloc(acpi_tables, acpi_tables_len + + ACPI_TABLE_PFX_SIZE + + sizeof dfl_hdr + body_size); + + ext_hdr = (struct acpi_table_header *)(acpi_tables + acpi_tables_len); + acpi_tables_len += ACPI_TABLE_PFX_SIZE; + + memcpy(acpi_tables + acpi_tables_len, hdr_src, sizeof dfl_hdr); + acpi_tables_len += sizeof dfl_hdr; + + if (blob != NULL) { + memcpy(acpi_tables + acpi_tables_len, blob + body_start, body_size); + acpi_tables_len += body_size; + } + + /* increase number of tables */ + stw_le_p(acpi_tables, lduw_le_p(acpi_tables) + 1u); + + /* Update the header fields. The strings need not be NUL-terminated. */ + changed_fields = 0; + ext_hdr->_length = cpu_to_le16(acpi_payload_size); + + if (hdrs->has_sig) { + strncpy(ext_hdr->sig, hdrs->sig, sizeof ext_hdr->sig); + ++changed_fields; + } + + if (has_header && le32_to_cpu(ext_hdr->length) != acpi_payload_size) { + warn_report("ACPI table has wrong length, header says " + "%" PRIu32 ", actual size %zu bytes", + le32_to_cpu(ext_hdr->length), acpi_payload_size); + } + ext_hdr->length = cpu_to_le32(acpi_payload_size); + + if (hdrs->has_rev) { + ext_hdr->revision = hdrs->rev; + ++changed_fields; + } + + ext_hdr->checksum = 0; + + if (hdrs->has_oem_id) { + strncpy(ext_hdr->oem_id, hdrs->oem_id, sizeof ext_hdr->oem_id); + ++changed_fields; + } + if (hdrs->has_oem_table_id) { + strncpy(ext_hdr->oem_table_id, hdrs->oem_table_id, + sizeof ext_hdr->oem_table_id); + ++changed_fields; + } + if (hdrs->has_oem_rev) { + ext_hdr->oem_revision = cpu_to_le32(hdrs->oem_rev); + ++changed_fields; + } + if (hdrs->has_asl_compiler_id) { + strncpy(ext_hdr->asl_compiler_id, hdrs->asl_compiler_id, + sizeof ext_hdr->asl_compiler_id); + ++changed_fields; + } + if (hdrs->has_asl_compiler_rev) { + ext_hdr->asl_compiler_revision = cpu_to_le32(hdrs->asl_compiler_rev); + ++changed_fields; + } + + if (!has_header && changed_fields == 0) { + warn_report("ACPI table: no headers are specified"); + } + + /* recalculate checksum */ + ext_hdr->checksum = acpi_checksum((const char unsigned *)ext_hdr + + ACPI_TABLE_PFX_SIZE, acpi_payload_size); +} + +void acpi_table_add(const QemuOpts *opts, Error **errp) +{ + AcpiTableOptions *hdrs = NULL; + char **pathnames = NULL; + char **cur; + size_t bloblen = 0; + char unsigned *blob = NULL; + + { + Visitor *v; + + v = opts_visitor_new(opts); + visit_type_AcpiTableOptions(v, NULL, &hdrs, errp); + visit_free(v); + } + + if (!hdrs) { + goto out; + } + if (hdrs->has_file == hdrs->has_data) { + error_setg(errp, "'-acpitable' requires one of 'data' or 'file'"); + goto out; + } + + pathnames = g_strsplit(hdrs->has_file ? hdrs->file : hdrs->data, ":", 0); + if (pathnames == NULL || pathnames[0] == NULL) { + error_setg(errp, "'-acpitable' requires at least one pathname"); + goto out; + } + + /* now read in the data files, reallocating buffer as needed */ + for (cur = pathnames; *cur; ++cur) { + int fd = open(*cur, O_RDONLY | O_BINARY); + + if (fd < 0) { + error_setg(errp, "can't open file %s: %s", *cur, strerror(errno)); + goto out; + } + + for (;;) { + char unsigned data[8192]; + ssize_t r; + + r = read(fd, data, sizeof data); + if (r == 0) { + break; + } else if (r > 0) { + blob = g_realloc(blob, bloblen + r); + memcpy(blob + bloblen, data, r); + bloblen += r; + } else if (errno != EINTR) { + error_setg(errp, "can't read file %s: %s", *cur, + strerror(errno)); + close(fd); + goto out; + } + } + + close(fd); + } + + acpi_table_install(blob, bloblen, hdrs->has_file, hdrs, errp); + +out: + g_free(blob); + g_strfreev(pathnames); + qapi_free_AcpiTableOptions(hdrs); +} + +unsigned acpi_table_len(void *current) +{ + struct acpi_table_header *hdr = current - sizeof(hdr->_length); + return hdr->_length; +} + +static +void *acpi_table_hdr(void *h) +{ + struct acpi_table_header *hdr = h; + return &hdr->sig; +} + +uint8_t *acpi_table_first(void) +{ + if (!acpi_tables) { + return NULL; + } + return acpi_table_hdr(acpi_tables + ACPI_TABLE_PFX_SIZE); +} + +uint8_t *acpi_table_next(uint8_t *current) +{ + uint8_t *next = current + acpi_table_len(current); + + if (next - acpi_tables >= acpi_tables_len) { + return NULL; + } else { + return acpi_table_hdr(next); + } +} + +int acpi_get_slic_oem(AcpiSlicOem *oem) +{ + uint8_t *u; + + for (u = acpi_table_first(); u; u = acpi_table_next(u)) { + struct acpi_table_header *hdr = (void *)(u - sizeof(hdr->_length)); + + if (memcmp(hdr->sig, "SLIC", 4) == 0) { + oem->id = hdr->oem_id; + oem->table_id = hdr->oem_table_id; + return 0; + } + } + return -1; +} + +static void acpi_notify_wakeup(Notifier *notifier, void *data) +{ + ACPIREGS *ar = container_of(notifier, ACPIREGS, wakeup); + WakeupReason *reason = data; + + switch (*reason) { + case QEMU_WAKEUP_REASON_RTC: + ar->pm1.evt.sts |= + (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_RT_CLOCK_STATUS); + break; + case QEMU_WAKEUP_REASON_PMTIMER: + ar->pm1.evt.sts |= + (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_TIMER_STATUS); + break; + case QEMU_WAKEUP_REASON_OTHER: + /* ACPI_BITMASK_WAKE_STATUS should be set on resume. + Pretend that resume was caused by power button */ + ar->pm1.evt.sts |= + (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_POWER_BUTTON_STATUS); + break; + default: + break; + } +} + +/* ACPI PM1a EVT */ +uint16_t acpi_pm1_evt_get_sts(ACPIREGS *ar) +{ + /* Compare ns-clock, not PM timer ticks, because + acpi_pm_tmr_update function uses ns for setting the timer. */ + int64_t d = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + if (d >= muldiv64(ar->tmr.overflow_time, + NANOSECONDS_PER_SECOND, PM_TIMER_FREQUENCY)) { + ar->pm1.evt.sts |= ACPI_BITMASK_TIMER_STATUS; + } + return ar->pm1.evt.sts; +} + +static void acpi_pm1_evt_write_sts(ACPIREGS *ar, uint16_t val) +{ + uint16_t pm1_sts = acpi_pm1_evt_get_sts(ar); + if (pm1_sts & val & ACPI_BITMASK_TIMER_STATUS) { + /* if TMRSTS is reset, then compute the new overflow time */ + acpi_pm_tmr_calc_overflow_time(ar); + } + ar->pm1.evt.sts &= ~val; +} + +static void acpi_pm1_evt_write_en(ACPIREGS *ar, uint16_t val) +{ + ar->pm1.evt.en = val; + qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC, + val & ACPI_BITMASK_RT_CLOCK_ENABLE); + qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_PMTIMER, + val & ACPI_BITMASK_TIMER_ENABLE); +} + +void acpi_pm1_evt_power_down(ACPIREGS *ar) +{ + if (ar->pm1.evt.en & ACPI_BITMASK_POWER_BUTTON_ENABLE) { + ar->pm1.evt.sts |= ACPI_BITMASK_POWER_BUTTON_STATUS; + ar->tmr.update_sci(ar); + } +} + +void acpi_pm1_evt_reset(ACPIREGS *ar) +{ + ar->pm1.evt.sts = 0; + ar->pm1.evt.en = 0; + qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC, 0); + qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_PMTIMER, 0); +} + +static uint64_t acpi_pm_evt_read(void *opaque, hwaddr addr, unsigned width) +{ + ACPIREGS *ar = opaque; + switch (addr) { + case 0: + return acpi_pm1_evt_get_sts(ar); + case 2: + return ar->pm1.evt.en; + default: + return 0; + } +} + +static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + ACPIREGS *ar = opaque; + switch (addr) { + case 0: + acpi_pm1_evt_write_sts(ar, val); + ar->pm1.evt.update_sci(ar); + break; + case 2: + acpi_pm1_evt_write_en(ar, val); + ar->pm1.evt.update_sci(ar); + break; + } +} + +static const MemoryRegionOps acpi_pm_evt_ops = { + .read = acpi_pm_evt_read, + .write = acpi_pm_evt_write, + .impl.min_access_size = 2, + .valid.min_access_size = 1, + .valid.max_access_size = 2, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +void acpi_pm1_evt_init(ACPIREGS *ar, acpi_update_sci_fn update_sci, + MemoryRegion *parent) +{ + ar->pm1.evt.update_sci = update_sci; + memory_region_init_io(&ar->pm1.evt.io, memory_region_owner(parent), + &acpi_pm_evt_ops, ar, "acpi-evt", 4); + memory_region_add_subregion(parent, 0, &ar->pm1.evt.io); +} + +/* ACPI PM_TMR */ +void acpi_pm_tmr_update(ACPIREGS *ar, bool enable) +{ + int64_t expire_time; + + /* schedule a timer interruption if needed */ + if (enable) { + expire_time = muldiv64(ar->tmr.overflow_time, NANOSECONDS_PER_SECOND, + PM_TIMER_FREQUENCY); + timer_mod(ar->tmr.timer, expire_time); + } else { + timer_del(ar->tmr.timer); + } +} + +static inline int64_t acpi_pm_tmr_get_clock(void) +{ + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), PM_TIMER_FREQUENCY, + NANOSECONDS_PER_SECOND); +} + +void acpi_pm_tmr_calc_overflow_time(ACPIREGS *ar) +{ + int64_t d = acpi_pm_tmr_get_clock(); + ar->tmr.overflow_time = (d + 0x800000LL) & ~0x7fffffLL; +} + +static uint32_t acpi_pm_tmr_get(ACPIREGS *ar) +{ + uint32_t d = acpi_pm_tmr_get_clock(); + return d & 0xffffff; +} + +static void acpi_pm_tmr_timer(void *opaque) +{ + ACPIREGS *ar = opaque; + + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_PMTIMER, NULL); + ar->tmr.update_sci(ar); +} + +static uint64_t acpi_pm_tmr_read(void *opaque, hwaddr addr, unsigned width) +{ + return acpi_pm_tmr_get(opaque); +} + +static void acpi_pm_tmr_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + /* nothing */ +} + +static const MemoryRegionOps acpi_pm_tmr_ops = { + .read = acpi_pm_tmr_read, + .write = acpi_pm_tmr_write, + .impl.min_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +void acpi_pm_tmr_init(ACPIREGS *ar, acpi_update_sci_fn update_sci, + MemoryRegion *parent) +{ + ar->tmr.update_sci = update_sci; + ar->tmr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, acpi_pm_tmr_timer, ar); + memory_region_init_io(&ar->tmr.io, memory_region_owner(parent), + &acpi_pm_tmr_ops, ar, "acpi-tmr", 4); + memory_region_add_subregion(parent, 8, &ar->tmr.io); +} + +void acpi_pm_tmr_reset(ACPIREGS *ar) +{ + ar->tmr.overflow_time = 0; + timer_del(ar->tmr.timer); +} + +/* ACPI PM1aCNT */ +static void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val) +{ + ar->pm1.cnt.cnt = val & ~(ACPI_BITMASK_SLEEP_ENABLE); + + if (val & ACPI_BITMASK_SLEEP_ENABLE) { + /* change suspend type */ + uint16_t sus_typ = (val >> 10) & 7; + switch (sus_typ) { + case 0: /* soft power off */ + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + break; + case 1: + qemu_system_suspend_request(); + break; + default: + if (sus_typ == ar->pm1.cnt.s4_val) { /* S4 request */ + qapi_event_send_suspend_disk(); + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } + break; + } + } +} + +void acpi_pm1_cnt_update(ACPIREGS *ar, + bool sci_enable, bool sci_disable) +{ + /* ACPI specs 3.0, 4.7.2.5 */ + if (ar->pm1.cnt.acpi_only) { + return; + } + + if (sci_enable) { + ar->pm1.cnt.cnt |= ACPI_BITMASK_SCI_ENABLE; + } else if (sci_disable) { + ar->pm1.cnt.cnt &= ~ACPI_BITMASK_SCI_ENABLE; + } +} + +static uint64_t acpi_pm_cnt_read(void *opaque, hwaddr addr, unsigned width) +{ + ACPIREGS *ar = opaque; + return ar->pm1.cnt.cnt; +} + +static void acpi_pm_cnt_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + acpi_pm1_cnt_write(opaque, val); +} + +static const MemoryRegionOps acpi_pm_cnt_ops = { + .read = acpi_pm_cnt_read, + .write = acpi_pm_cnt_write, + .impl.min_access_size = 2, + .valid.min_access_size = 1, + .valid.max_access_size = 2, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +void acpi_pm1_cnt_init(ACPIREGS *ar, MemoryRegion *parent, + bool disable_s3, bool disable_s4, uint8_t s4_val, + bool acpi_only) +{ + FWCfgState *fw_cfg; + + ar->pm1.cnt.s4_val = s4_val; + ar->pm1.cnt.acpi_only = acpi_only; + ar->wakeup.notify = acpi_notify_wakeup; + qemu_register_wakeup_notifier(&ar->wakeup); + + /* + * Register wake-up support in QMP query-current-machine API + */ + qemu_register_wakeup_support(); + + memory_region_init_io(&ar->pm1.cnt.io, memory_region_owner(parent), + &acpi_pm_cnt_ops, ar, "acpi-cnt", 2); + memory_region_add_subregion(parent, 4, &ar->pm1.cnt.io); + + fw_cfg = fw_cfg_find(); + if (fw_cfg) { + uint8_t suspend[6] = {128, 0, 0, 129, 128, 128}; + suspend[3] = 1 | ((!disable_s3) << 7); + suspend[4] = s4_val | ((!disable_s4) << 7); + + fw_cfg_add_file(fw_cfg, "etc/system-states", g_memdup(suspend, 6), 6); + } +} + +void acpi_pm1_cnt_reset(ACPIREGS *ar) +{ + ar->pm1.cnt.cnt = 0; + if (ar->pm1.cnt.acpi_only) { + ar->pm1.cnt.cnt |= ACPI_BITMASK_SCI_ENABLE; + } +} + +/* ACPI GPE */ +void acpi_gpe_init(ACPIREGS *ar, uint8_t len) +{ + ar->gpe.len = len; + /* Only first len / 2 bytes are ever used, + * but the caller in ich9.c migrates full len bytes. + * TODO: fix ich9.c and drop the extra allocation. + */ + ar->gpe.sts = g_malloc0(len); + ar->gpe.en = g_malloc0(len); +} + +void acpi_gpe_reset(ACPIREGS *ar) +{ + memset(ar->gpe.sts, 0, ar->gpe.len / 2); + memset(ar->gpe.en, 0, ar->gpe.len / 2); +} + +static uint8_t *acpi_gpe_ioport_get_ptr(ACPIREGS *ar, uint32_t addr) +{ + uint8_t *cur = NULL; + + if (addr < ar->gpe.len / 2) { + cur = ar->gpe.sts + addr; + } else if (addr < ar->gpe.len) { + cur = ar->gpe.en + addr - ar->gpe.len / 2; + } else { + abort(); + } + + return cur; +} + +void acpi_gpe_ioport_writeb(ACPIREGS *ar, uint32_t addr, uint32_t val) +{ + uint8_t *cur; + + cur = acpi_gpe_ioport_get_ptr(ar, addr); + if (addr < ar->gpe.len / 2) { + /* GPE_STS */ + *cur = (*cur) & ~val; + } else if (addr < ar->gpe.len) { + /* GPE_EN */ + *cur = val; + } else { + abort(); + } +} + +uint32_t acpi_gpe_ioport_readb(ACPIREGS *ar, uint32_t addr) +{ + uint8_t *cur; + uint32_t val; + + cur = acpi_gpe_ioport_get_ptr(ar, addr); + val = 0; + if (cur != NULL) { + val = *cur; + } + + return val; +} + +void acpi_send_gpe_event(ACPIREGS *ar, qemu_irq irq, + AcpiEventStatusBits status) +{ + ar->gpe.sts[0] |= status; + acpi_update_sci(ar, irq); +} + +void acpi_update_sci(ACPIREGS *regs, qemu_irq irq) +{ + int sci_level, pm1a_sts; + + pm1a_sts = acpi_pm1_evt_get_sts(regs); + + sci_level = ((pm1a_sts & + regs->pm1.evt.en & ACPI_BITMASK_PM1_COMMON_ENABLED) != 0) || + ((regs->gpe.sts[0] & regs->gpe.en[0]) != 0); + + qemu_set_irq(irq, sci_level); + + /* schedule a timer interruption if needed */ + acpi_pm_tmr_update(regs, + (regs->pm1.evt.en & ACPI_BITMASK_TIMER_ENABLE) && + !(pm1a_sts & ACPI_BITMASK_TIMER_STATUS)); +} diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c new file mode 100644 index 000000000..b20903ea3 --- /dev/null +++ b/hw/acpi/cpu.c @@ -0,0 +1,711 @@ +#include "qemu/osdep.h" +#include "migration/vmstate.h" +#include "hw/acpi/cpu.h" +#include "qapi/error.h" +#include "qapi/qapi-events-acpi.h" +#include "trace.h" +#include "sysemu/numa.h" + +#define ACPI_CPU_HOTPLUG_REG_LEN 12 +#define ACPI_CPU_SELECTOR_OFFSET_WR 0 +#define ACPI_CPU_FLAGS_OFFSET_RW 4 +#define ACPI_CPU_CMD_OFFSET_WR 5 +#define ACPI_CPU_CMD_DATA_OFFSET_RW 8 +#define ACPI_CPU_CMD_DATA2_OFFSET_R 0 + +#define OVMF_CPUHP_SMI_CMD 4 + +enum { + CPHP_GET_NEXT_CPU_WITH_EVENT_CMD = 0, + CPHP_OST_EVENT_CMD = 1, + CPHP_OST_STATUS_CMD = 2, + CPHP_GET_CPU_ID_CMD = 3, + CPHP_CMD_MAX +}; + +static ACPIOSTInfo *acpi_cpu_device_status(int idx, AcpiCpuStatus *cdev) +{ + ACPIOSTInfo *info = g_new0(ACPIOSTInfo, 1); + + info->slot_type = ACPI_SLOT_TYPE_CPU; + info->slot = g_strdup_printf("%d", idx); + info->source = cdev->ost_event; + info->status = cdev->ost_status; + if (cdev->cpu) { + DeviceState *dev = DEVICE(cdev->cpu); + if (dev->id) { + info->device = g_strdup(dev->id); + info->has_device = true; + } + } + return info; +} + +void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list) +{ + ACPIOSTInfoList ***tail = list; + int i; + + for (i = 0; i < cpu_st->dev_count; i++) { + QAPI_LIST_APPEND(*tail, acpi_cpu_device_status(i, &cpu_st->devs[i])); + } +} + +static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t val = 0; + CPUHotplugState *cpu_st = opaque; + AcpiCpuStatus *cdev; + + if (cpu_st->selector >= cpu_st->dev_count) { + return val; + } + + cdev = &cpu_st->devs[cpu_st->selector]; + switch (addr) { + case ACPI_CPU_FLAGS_OFFSET_RW: /* pack and return is_* fields */ + val |= cdev->cpu ? 1 : 0; + val |= cdev->is_inserting ? 2 : 0; + val |= cdev->is_removing ? 4 : 0; + val |= cdev->fw_remove ? 16 : 0; + trace_cpuhp_acpi_read_flags(cpu_st->selector, val); + break; + case ACPI_CPU_CMD_DATA_OFFSET_RW: + switch (cpu_st->command) { + case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD: + val = cpu_st->selector; + break; + case CPHP_GET_CPU_ID_CMD: + val = cdev->arch_id & 0xFFFFFFFF; + break; + default: + break; + } + trace_cpuhp_acpi_read_cmd_data(cpu_st->selector, val); + break; + case ACPI_CPU_CMD_DATA2_OFFSET_R: + switch (cpu_st->command) { + case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD: + val = 0; + break; + case CPHP_GET_CPU_ID_CMD: + val = cdev->arch_id >> 32; + break; + default: + break; + } + trace_cpuhp_acpi_read_cmd_data2(cpu_st->selector, val); + break; + default: + break; + } + return val; +} + +static void cpu_hotplug_wr(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + CPUHotplugState *cpu_st = opaque; + AcpiCpuStatus *cdev; + ACPIOSTInfo *info; + + assert(cpu_st->dev_count); + + if (addr) { + if (cpu_st->selector >= cpu_st->dev_count) { + trace_cpuhp_acpi_invalid_idx_selected(cpu_st->selector); + return; + } + } + + switch (addr) { + case ACPI_CPU_SELECTOR_OFFSET_WR: /* current CPU selector */ + cpu_st->selector = data; + trace_cpuhp_acpi_write_idx(cpu_st->selector); + break; + case ACPI_CPU_FLAGS_OFFSET_RW: /* set is_* fields */ + cdev = &cpu_st->devs[cpu_st->selector]; + if (data & 2) { /* clear insert event */ + cdev->is_inserting = false; + trace_cpuhp_acpi_clear_inserting_evt(cpu_st->selector); + } else if (data & 4) { /* clear remove event */ + cdev->is_removing = false; + trace_cpuhp_acpi_clear_remove_evt(cpu_st->selector); + } else if (data & 8) { + DeviceState *dev = NULL; + HotplugHandler *hotplug_ctrl = NULL; + + if (!cdev->cpu || cdev->cpu == first_cpu) { + trace_cpuhp_acpi_ejecting_invalid_cpu(cpu_st->selector); + break; + } + + trace_cpuhp_acpi_ejecting_cpu(cpu_st->selector); + dev = DEVICE(cdev->cpu); + hotplug_ctrl = qdev_get_hotplug_handler(dev); + hotplug_handler_unplug(hotplug_ctrl, dev, NULL); + object_unparent(OBJECT(dev)); + cdev->fw_remove = false; + } else if (data & 16) { + if (!cdev->cpu || cdev->cpu == first_cpu) { + trace_cpuhp_acpi_fw_remove_invalid_cpu(cpu_st->selector); + break; + } + trace_cpuhp_acpi_fw_remove_cpu(cpu_st->selector); + cdev->fw_remove = true; + } + break; + case ACPI_CPU_CMD_OFFSET_WR: + trace_cpuhp_acpi_write_cmd(cpu_st->selector, data); + if (data < CPHP_CMD_MAX) { + cpu_st->command = data; + if (cpu_st->command == CPHP_GET_NEXT_CPU_WITH_EVENT_CMD) { + uint32_t iter = cpu_st->selector; + + do { + cdev = &cpu_st->devs[iter]; + if (cdev->is_inserting || cdev->is_removing || + cdev->fw_remove) { + cpu_st->selector = iter; + trace_cpuhp_acpi_cpu_has_events(cpu_st->selector, + cdev->is_inserting, cdev->is_removing); + break; + } + iter = iter + 1 < cpu_st->dev_count ? iter + 1 : 0; + } while (iter != cpu_st->selector); + } + } + break; + case ACPI_CPU_CMD_DATA_OFFSET_RW: + switch (cpu_st->command) { + case CPHP_OST_EVENT_CMD: { + cdev = &cpu_st->devs[cpu_st->selector]; + cdev->ost_event = data; + trace_cpuhp_acpi_write_ost_ev(cpu_st->selector, cdev->ost_event); + break; + } + case CPHP_OST_STATUS_CMD: { + cdev = &cpu_st->devs[cpu_st->selector]; + cdev->ost_status = data; + info = acpi_cpu_device_status(cpu_st->selector, cdev); + qapi_event_send_acpi_device_ost(info); + qapi_free_ACPIOSTInfo(info); + trace_cpuhp_acpi_write_ost_status(cpu_st->selector, + cdev->ost_status); + break; + } + default: + break; + } + break; + default: + break; + } +} + +static const MemoryRegionOps cpu_hotplug_ops = { + .read = cpu_hotplug_rd, + .write = cpu_hotplug_wr, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner, + CPUHotplugState *state, hwaddr base_addr) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *id_list; + int i; + + assert(mc->possible_cpu_arch_ids); + id_list = mc->possible_cpu_arch_ids(machine); + state->dev_count = id_list->len; + state->devs = g_new0(typeof(*state->devs), state->dev_count); + for (i = 0; i < id_list->len; i++) { + state->devs[i].cpu = CPU(id_list->cpus[i].cpu); + state->devs[i].arch_id = id_list->cpus[i].arch_id; + } + memory_region_init_io(&state->ctrl_reg, owner, &cpu_hotplug_ops, state, + "acpi-cpu-hotplug", ACPI_CPU_HOTPLUG_REG_LEN); + memory_region_add_subregion(as, base_addr, &state->ctrl_reg); +} + +static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev) +{ + CPUClass *k = CPU_GET_CLASS(dev); + uint64_t cpu_arch_id = k->get_arch_id(CPU(dev)); + int i; + + for (i = 0; i < cpu_st->dev_count; i++) { + if (cpu_arch_id == cpu_st->devs[i].arch_id) { + return &cpu_st->devs[i]; + } + } + return NULL; +} + +void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev, + CPUHotplugState *cpu_st, DeviceState *dev, Error **errp) +{ + AcpiCpuStatus *cdev; + + cdev = get_cpu_status(cpu_st, dev); + if (!cdev) { + return; + } + + cdev->cpu = CPU(dev); + if (dev->hotplugged) { + cdev->is_inserting = true; + acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS); + } +} + +void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev, + CPUHotplugState *cpu_st, + DeviceState *dev, Error **errp) +{ + AcpiCpuStatus *cdev; + + cdev = get_cpu_status(cpu_st, dev); + if (!cdev) { + return; + } + + cdev->is_removing = true; + acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS); +} + +void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st, + DeviceState *dev, Error **errp) +{ + AcpiCpuStatus *cdev; + + cdev = get_cpu_status(cpu_st, dev); + if (!cdev) { + return; + } + + cdev->cpu = NULL; +} + +static const VMStateDescription vmstate_cpuhp_sts = { + .name = "CPU hotplug device state", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(is_inserting, AcpiCpuStatus), + VMSTATE_BOOL(is_removing, AcpiCpuStatus), + VMSTATE_UINT32(ost_event, AcpiCpuStatus), + VMSTATE_UINT32(ost_status, AcpiCpuStatus), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_cpu_hotplug = { + .name = "CPU hotplug state", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(selector, CPUHotplugState), + VMSTATE_UINT8(command, CPUHotplugState), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, CPUHotplugState, dev_count, + vmstate_cpuhp_sts, AcpiCpuStatus), + VMSTATE_END_OF_LIST() + } +}; + +#define CPU_NAME_FMT "C%.03X" +#define CPUHP_RES_DEVICE "PRES" +#define CPU_LOCK "CPLK" +#define CPU_STS_METHOD "CSTA" +#define CPU_SCAN_METHOD "CSCN" +#define CPU_NOTIFY_METHOD "CTFY" +#define CPU_EJECT_METHOD "CEJ0" +#define CPU_OST_METHOD "COST" +#define CPU_ADDED_LIST "CNEW" + +#define CPU_ENABLED "CPEN" +#define CPU_SELECTOR "CSEL" +#define CPU_COMMAND "CCMD" +#define CPU_DATA "CDAT" +#define CPU_INSERT_EVENT "CINS" +#define CPU_REMOVE_EVENT "CRMV" +#define CPU_EJECT_EVENT "CEJ0" +#define CPU_FW_EJECT_EVENT "CEJF" + +void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, + hwaddr io_base, + const char *res_root, + const char *event_handler_method) +{ + Aml *ifctx; + Aml *field; + Aml *method; + Aml *cpu_ctrl_dev; + Aml *cpus_dev; + Aml *zero = aml_int(0); + Aml *one = aml_int(1); + Aml *sb_scope = aml_scope("_SB"); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine); + char *cphp_res_path = g_strdup_printf("%s." CPUHP_RES_DEVICE, res_root); + Object *obj = object_resolve_path_type("", TYPE_ACPI_DEVICE_IF, NULL); + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj); + AcpiDeviceIf *adev = ACPI_DEVICE_IF(obj); + + cpu_ctrl_dev = aml_device("%s", cphp_res_path); + { + Aml *crs; + + aml_append(cpu_ctrl_dev, + aml_name_decl("_HID", aml_eisaid("PNP0A06"))); + aml_append(cpu_ctrl_dev, + aml_name_decl("_UID", aml_string("CPU Hotplug resources"))); + aml_append(cpu_ctrl_dev, aml_mutex(CPU_LOCK, 0)); + + crs = aml_resource_template(); + aml_append(crs, aml_io(AML_DECODE16, io_base, io_base, 1, + ACPI_CPU_HOTPLUG_REG_LEN)); + aml_append(cpu_ctrl_dev, aml_name_decl("_CRS", crs)); + + /* declare CPU hotplug MMIO region with related access fields */ + aml_append(cpu_ctrl_dev, + aml_operation_region("PRST", AML_SYSTEM_IO, aml_int(io_base), + ACPI_CPU_HOTPLUG_REG_LEN)); + + field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK, + AML_WRITE_AS_ZEROS); + aml_append(field, aml_reserved_field(ACPI_CPU_FLAGS_OFFSET_RW * 8)); + /* 1 if enabled, read only */ + aml_append(field, aml_named_field(CPU_ENABLED, 1)); + /* (read) 1 if has a insert event. (write) 1 to clear event */ + aml_append(field, aml_named_field(CPU_INSERT_EVENT, 1)); + /* (read) 1 if has a remove event. (write) 1 to clear event */ + aml_append(field, aml_named_field(CPU_REMOVE_EVENT, 1)); + /* initiates device eject, write only */ + aml_append(field, aml_named_field(CPU_EJECT_EVENT, 1)); + /* tell firmware to do device eject, write only */ + aml_append(field, aml_named_field(CPU_FW_EJECT_EVENT, 1)); + aml_append(field, aml_reserved_field(3)); + aml_append(field, aml_named_field(CPU_COMMAND, 8)); + aml_append(cpu_ctrl_dev, field); + + field = aml_field("PRST", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE); + /* CPU selector, write only */ + aml_append(field, aml_named_field(CPU_SELECTOR, 32)); + /* flags + cmd + 2byte align */ + aml_append(field, aml_reserved_field(4 * 8)); + aml_append(field, aml_named_field(CPU_DATA, 32)); + aml_append(cpu_ctrl_dev, field); + + if (opts.has_legacy_cphp) { + method = aml_method("_INI", 0, AML_SERIALIZED); + /* switch off legacy CPU hotplug HW and use new one, + * on reboot system is in new mode and writing 0 + * in CPU_SELECTOR selects BSP, which is NOP at + * the time _INI is called */ + aml_append(method, aml_store(zero, aml_name(CPU_SELECTOR))); + aml_append(cpu_ctrl_dev, method); + } + } + aml_append(sb_scope, cpu_ctrl_dev); + + cpus_dev = aml_device("\\_SB.CPUS"); + { + int i; + Aml *ctrl_lock = aml_name("%s.%s", cphp_res_path, CPU_LOCK); + Aml *cpu_selector = aml_name("%s.%s", cphp_res_path, CPU_SELECTOR); + Aml *is_enabled = aml_name("%s.%s", cphp_res_path, CPU_ENABLED); + Aml *cpu_cmd = aml_name("%s.%s", cphp_res_path, CPU_COMMAND); + Aml *cpu_data = aml_name("%s.%s", cphp_res_path, CPU_DATA); + Aml *ins_evt = aml_name("%s.%s", cphp_res_path, CPU_INSERT_EVENT); + Aml *rm_evt = aml_name("%s.%s", cphp_res_path, CPU_REMOVE_EVENT); + Aml *ej_evt = aml_name("%s.%s", cphp_res_path, CPU_EJECT_EVENT); + Aml *fw_ej_evt = aml_name("%s.%s", cphp_res_path, CPU_FW_EJECT_EVENT); + + aml_append(cpus_dev, aml_name_decl("_HID", aml_string("ACPI0010"))); + aml_append(cpus_dev, aml_name_decl("_CID", aml_eisaid("PNP0A05"))); + + method = aml_method(CPU_NOTIFY_METHOD, 2, AML_NOTSERIALIZED); + for (i = 0; i < arch_ids->len; i++) { + Aml *cpu = aml_name(CPU_NAME_FMT, i); + Aml *uid = aml_arg(0); + Aml *event = aml_arg(1); + + ifctx = aml_if(aml_equal(uid, aml_int(i))); + { + aml_append(ifctx, aml_notify(cpu, event)); + } + aml_append(method, ifctx); + } + aml_append(cpus_dev, method); + + method = aml_method(CPU_STS_METHOD, 1, AML_SERIALIZED); + { + Aml *idx = aml_arg(0); + Aml *sta = aml_local(0); + + aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); + aml_append(method, aml_store(idx, cpu_selector)); + aml_append(method, aml_store(zero, sta)); + ifctx = aml_if(aml_equal(is_enabled, one)); + { + aml_append(ifctx, aml_store(aml_int(0xF), sta)); + } + aml_append(method, ifctx); + aml_append(method, aml_release(ctrl_lock)); + aml_append(method, aml_return(sta)); + } + aml_append(cpus_dev, method); + + method = aml_method(CPU_EJECT_METHOD, 1, AML_SERIALIZED); + { + Aml *idx = aml_arg(0); + + aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); + aml_append(method, aml_store(idx, cpu_selector)); + if (opts.fw_unplugs_cpu) { + aml_append(method, aml_store(one, fw_ej_evt)); + aml_append(method, aml_store(aml_int(OVMF_CPUHP_SMI_CMD), + aml_name("%s", opts.smi_path))); + } else { + aml_append(method, aml_store(one, ej_evt)); + } + aml_append(method, aml_release(ctrl_lock)); + } + aml_append(cpus_dev, method); + + method = aml_method(CPU_SCAN_METHOD, 0, AML_SERIALIZED); + { + const uint8_t max_cpus_per_pass = 255; + Aml *else_ctx; + Aml *while_ctx, *while_ctx2; + Aml *has_event = aml_local(0); + Aml *dev_chk = aml_int(1); + Aml *eject_req = aml_int(3); + Aml *next_cpu_cmd = aml_int(CPHP_GET_NEXT_CPU_WITH_EVENT_CMD); + Aml *num_added_cpus = aml_local(1); + Aml *cpu_idx = aml_local(2); + Aml *uid = aml_local(3); + Aml *has_job = aml_local(4); + Aml *new_cpus = aml_name(CPU_ADDED_LIST); + + aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); + + /* + * Windows versions newer than XP (including Windows 10/Windows + * Server 2019), do support* VarPackageOp but, it is cripled to hold + * the same elements number as old PackageOp. + * For compatibility with Windows XP (so it won't crash) use ACPI1.0 + * PackageOp which can hold max 255 elements. + * + * use named package as old Windows don't support it in local var + */ + aml_append(method, aml_name_decl(CPU_ADDED_LIST, + aml_package(max_cpus_per_pass))); + + aml_append(method, aml_store(zero, uid)); + aml_append(method, aml_store(one, has_job)); + /* + * CPU_ADDED_LIST can hold limited number of elements, outer loop + * allows to process CPUs in batches which let us to handle more + * CPUs than CPU_ADDED_LIST can hold. + */ + while_ctx2 = aml_while(aml_equal(has_job, one)); + { + aml_append(while_ctx2, aml_store(zero, has_job)); + + aml_append(while_ctx2, aml_store(one, has_event)); + aml_append(while_ctx2, aml_store(zero, num_added_cpus)); + + /* + * Scan CPUs, till there are CPUs with events or + * CPU_ADDED_LIST capacity is exhausted + */ + while_ctx = aml_while(aml_land(aml_equal(has_event, one), + aml_lless(uid, aml_int(arch_ids->len)))); + { + /* + * clear loop exit condition, ins_evt/rm_evt checks will + * set it to 1 while next_cpu_cmd returns a CPU with events + */ + aml_append(while_ctx, aml_store(zero, has_event)); + + aml_append(while_ctx, aml_store(uid, cpu_selector)); + aml_append(while_ctx, aml_store(next_cpu_cmd, cpu_cmd)); + + /* + * wrap around case, scan is complete, exit loop. + * It happens since events are not cleared in scan loop, + * so next_cpu_cmd continues to find already processed CPUs + */ + ifctx = aml_if(aml_lless(cpu_data, uid)); + { + aml_append(ifctx, aml_break()); + } + aml_append(while_ctx, ifctx); + + /* + * if CPU_ADDED_LIST is full, exit inner loop and process + * collected CPUs + */ + ifctx = aml_if( + aml_equal(num_added_cpus, aml_int(max_cpus_per_pass))); + { + aml_append(ifctx, aml_store(one, has_job)); + aml_append(ifctx, aml_break()); + } + aml_append(while_ctx, ifctx); + + aml_append(while_ctx, aml_store(cpu_data, uid)); + ifctx = aml_if(aml_equal(ins_evt, one)); + { + /* cache added CPUs to Notify/Wakeup later */ + aml_append(ifctx, aml_store(uid, + aml_index(new_cpus, num_added_cpus))); + aml_append(ifctx, aml_increment(num_added_cpus)); + aml_append(ifctx, aml_store(one, has_event)); + } + aml_append(while_ctx, ifctx); + else_ctx = aml_else(); + ifctx = aml_if(aml_equal(rm_evt, one)); + { + aml_append(ifctx, + aml_call2(CPU_NOTIFY_METHOD, uid, eject_req)); + aml_append(ifctx, aml_store(one, rm_evt)); + aml_append(ifctx, aml_store(one, has_event)); + } + aml_append(else_ctx, ifctx); + aml_append(while_ctx, else_ctx); + aml_append(while_ctx, aml_increment(uid)); + } + aml_append(while_ctx2, while_ctx); + + /* + * in case FW negotiated ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT, + * make upcall to FW, so it can pull in new CPUs before + * OS is notified and wakes them up + */ + if (opts.smi_path) { + ifctx = aml_if(aml_lgreater(num_added_cpus, zero)); + { + aml_append(ifctx, aml_store(aml_int(OVMF_CPUHP_SMI_CMD), + aml_name("%s", opts.smi_path))); + } + aml_append(while_ctx2, ifctx); + } + + /* Notify OSPM about new CPUs and clear insert events */ + aml_append(while_ctx2, aml_store(zero, cpu_idx)); + while_ctx = aml_while(aml_lless(cpu_idx, num_added_cpus)); + { + aml_append(while_ctx, + aml_store(aml_derefof(aml_index(new_cpus, cpu_idx)), + uid)); + aml_append(while_ctx, + aml_call2(CPU_NOTIFY_METHOD, uid, dev_chk)); + aml_append(while_ctx, aml_store(uid, aml_debug())); + aml_append(while_ctx, aml_store(uid, cpu_selector)); + aml_append(while_ctx, aml_store(one, ins_evt)); + aml_append(while_ctx, aml_increment(cpu_idx)); + } + aml_append(while_ctx2, while_ctx); + /* + * If another batch is needed, then it will resume scanning + * exactly at -- and not after -- the last CPU that's currently + * in CPU_ADDED_LIST. In other words, the last CPU in + * CPU_ADDED_LIST is going to be re-checked. That's OK: we've + * just cleared the insert event for *all* CPUs in + * CPU_ADDED_LIST, including the last one. So the scan will + * simply seek past it. + */ + } + aml_append(method, while_ctx2); + aml_append(method, aml_release(ctrl_lock)); + } + aml_append(cpus_dev, method); + + method = aml_method(CPU_OST_METHOD, 4, AML_SERIALIZED); + { + Aml *uid = aml_arg(0); + Aml *ev_cmd = aml_int(CPHP_OST_EVENT_CMD); + Aml *st_cmd = aml_int(CPHP_OST_STATUS_CMD); + + aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); + aml_append(method, aml_store(uid, cpu_selector)); + aml_append(method, aml_store(ev_cmd, cpu_cmd)); + aml_append(method, aml_store(aml_arg(1), cpu_data)); + aml_append(method, aml_store(st_cmd, cpu_cmd)); + aml_append(method, aml_store(aml_arg(2), cpu_data)); + aml_append(method, aml_release(ctrl_lock)); + } + aml_append(cpus_dev, method); + + /* build Processor object for each processor */ + for (i = 0; i < arch_ids->len; i++) { + Aml *dev; + Aml *uid = aml_int(i); + GArray *madt_buf = g_array_new(0, 1, 1); + int arch_id = arch_ids->cpus[i].arch_id; + + if (opts.acpi_1_compatible && arch_id < 255) { + dev = aml_processor(i, 0, 0, CPU_NAME_FMT, i); + } else { + dev = aml_device(CPU_NAME_FMT, i); + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007"))); + aml_append(dev, aml_name_decl("_UID", uid)); + } + + method = aml_method("_STA", 0, AML_SERIALIZED); + aml_append(method, aml_return(aml_call1(CPU_STS_METHOD, uid))); + aml_append(dev, method); + + /* build _MAT object */ + assert(adevc && adevc->madt_cpu); + adevc->madt_cpu(adev, i, arch_ids, madt_buf, + true); /* set enabled flag */ + aml_append(dev, aml_name_decl("_MAT", + aml_buffer(madt_buf->len, (uint8_t *)madt_buf->data))); + g_array_free(madt_buf, true); + + if (CPU(arch_ids->cpus[i].cpu) != first_cpu) { + method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); + aml_append(method, aml_call1(CPU_EJECT_METHOD, uid)); + aml_append(dev, method); + } + + method = aml_method("_OST", 3, AML_SERIALIZED); + aml_append(method, + aml_call4(CPU_OST_METHOD, uid, aml_arg(0), + aml_arg(1), aml_arg(2)) + ); + aml_append(dev, method); + + /* Linux guests discard SRAT info for non-present CPUs + * as a result _PXM is required for all CPUs which might + * be hot-plugged. For simplicity, add it for all CPUs. + */ + if (arch_ids->cpus[i].props.has_node_id) { + aml_append(dev, aml_name_decl("_PXM", + aml_int(arch_ids->cpus[i].props.node_id))); + } + + aml_append(cpus_dev, dev); + } + } + aml_append(sb_scope, cpus_dev); + aml_append(table, sb_scope); + + method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED); + aml_append(method, aml_call0("\\_SB.CPUS." CPU_SCAN_METHOD)); + aml_append(table, method); + + g_free(cphp_res_path); +} diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c new file mode 100644 index 000000000..53654f863 --- /dev/null +++ b/hw/acpi/cpu_hotplug.c @@ -0,0 +1,333 @@ +/* + * QEMU ACPI hotplug utilities + * + * Copyright (C) 2013 Red Hat Inc + * + * Authors: + * Igor Mammedov + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/acpi/cpu_hotplug.h" +#include "qapi/error.h" +#include "hw/core/cpu.h" +#include "hw/i386/pc.h" +#include "hw/pci/pci.h" +#include "qemu/error-report.h" + +#define CPU_EJECT_METHOD "CPEJ" +#define CPU_MAT_METHOD "CPMA" +#define CPU_ON_BITMAP "CPON" +#define CPU_STATUS_METHOD "CPST" +#define CPU_STATUS_MAP "PRS" +#define CPU_SCAN_METHOD "PRSC" + +static uint64_t cpu_status_read(void *opaque, hwaddr addr, unsigned int size) +{ + AcpiCpuHotplug *cpus = opaque; + uint64_t val = cpus->sts[addr]; + + return val; +} + +static void cpu_status_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + /* firmware never used to write in CPU present bitmap so use + this fact as means to switch QEMU into modern CPU hotplug + mode by writing 0 at the beginning of legacy CPU bitmap + */ + if (addr == 0 && data == 0) { + AcpiCpuHotplug *cpus = opaque; + object_property_set_bool(cpus->device, "cpu-hotplug-legacy", false, + &error_abort); + } +} + +static const MemoryRegionOps AcpiCpuHotplug_ops = { + .read = cpu_status_read, + .write = cpu_status_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu) +{ + CPUClass *k = CPU_GET_CLASS(cpu); + int64_t cpu_id; + + cpu_id = k->get_arch_id(cpu); + if ((cpu_id / 8) >= ACPI_GPE_PROC_LEN) { + object_property_set_bool(g->device, "cpu-hotplug-legacy", false, + &error_abort); + return; + } + + g->sts[cpu_id / 8] |= (1 << (cpu_id % 8)); +} + +void legacy_acpi_cpu_plug_cb(HotplugHandler *hotplug_dev, + AcpiCpuHotplug *g, DeviceState *dev, Error **errp) +{ + acpi_set_cpu_present_bit(g, CPU(dev)); + acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS); +} + +void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner, + AcpiCpuHotplug *gpe_cpu, uint16_t base) +{ + CPUState *cpu; + + memory_region_init_io(&gpe_cpu->io, owner, &AcpiCpuHotplug_ops, + gpe_cpu, "acpi-cpu-hotplug", ACPI_GPE_PROC_LEN); + memory_region_add_subregion(parent, base, &gpe_cpu->io); + gpe_cpu->device = owner; + + CPU_FOREACH(cpu) { + acpi_set_cpu_present_bit(gpe_cpu, cpu); + } +} + +void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu, + CPUHotplugState *cpuhp_state, + uint16_t io_port) +{ + MemoryRegion *parent = pci_address_space_io(PCI_DEVICE(gpe_cpu->device)); + + memory_region_del_subregion(parent, &gpe_cpu->io); + cpu_hotplug_hw_init(parent, gpe_cpu->device, cpuhp_state, io_port); +} + +void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine, + uint16_t io_base) +{ + Aml *dev; + Aml *crs; + Aml *pkg; + Aml *field; + Aml *method; + Aml *if_ctx; + Aml *else_ctx; + int i, apic_idx; + Aml *sb_scope = aml_scope("_SB"); + uint8_t madt_tmpl[8] = {0x00, 0x08, 0x00, 0x00, 0x00, 0, 0, 0}; + Aml *cpu_id = aml_arg(1); + Aml *apic_id = aml_arg(0); + Aml *cpu_on = aml_local(0); + Aml *madt = aml_local(1); + Aml *cpus_map = aml_name(CPU_ON_BITMAP); + Aml *zero = aml_int(0); + Aml *one = aml_int(1); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine); + X86MachineState *x86ms = X86_MACHINE(machine); + + /* + * _MAT method - creates an madt apic buffer + * apic_id = Arg0 = Local APIC ID + * cpu_id = Arg1 = Processor ID + * cpu_on = Local0 = CPON flag for this cpu + * madt = Local1 = Buffer (in madt apic form) to return + */ + method = aml_method(CPU_MAT_METHOD, 2, AML_NOTSERIALIZED); + aml_append(method, + aml_store(aml_derefof(aml_index(cpus_map, apic_id)), cpu_on)); + aml_append(method, + aml_store(aml_buffer(sizeof(madt_tmpl), madt_tmpl), madt)); + /* Update the processor id, lapic id, and enable/disable status */ + aml_append(method, aml_store(cpu_id, aml_index(madt, aml_int(2)))); + aml_append(method, aml_store(apic_id, aml_index(madt, aml_int(3)))); + aml_append(method, aml_store(cpu_on, aml_index(madt, aml_int(4)))); + aml_append(method, aml_return(madt)); + aml_append(sb_scope, method); + + /* + * _STA method - return ON status of cpu + * apic_id = Arg0 = Local APIC ID + * cpu_on = Local0 = CPON flag for this cpu + */ + method = aml_method(CPU_STATUS_METHOD, 1, AML_NOTSERIALIZED); + aml_append(method, + aml_store(aml_derefof(aml_index(cpus_map, apic_id)), cpu_on)); + if_ctx = aml_if(cpu_on); + { + aml_append(if_ctx, aml_return(aml_int(0xF))); + } + aml_append(method, if_ctx); + else_ctx = aml_else(); + { + aml_append(else_ctx, aml_return(zero)); + } + aml_append(method, else_ctx); + aml_append(sb_scope, method); + + method = aml_method(CPU_EJECT_METHOD, 2, AML_NOTSERIALIZED); + aml_append(method, aml_sleep(200)); + aml_append(sb_scope, method); + + method = aml_method(CPU_SCAN_METHOD, 0, AML_NOTSERIALIZED); + { + Aml *while_ctx, *if_ctx2, *else_ctx2; + Aml *bus_check_evt = aml_int(1); + Aml *remove_evt = aml_int(3); + Aml *status_map = aml_local(5); /* Local5 = active cpu bitmap */ + Aml *byte = aml_local(2); /* Local2 = last read byte from bitmap */ + Aml *idx = aml_local(0); /* Processor ID / APIC ID iterator */ + Aml *is_cpu_on = aml_local(1); /* Local1 = CPON flag for cpu */ + Aml *status = aml_local(3); /* Local3 = active state for cpu */ + + aml_append(method, aml_store(aml_name(CPU_STATUS_MAP), status_map)); + aml_append(method, aml_store(zero, byte)); + aml_append(method, aml_store(zero, idx)); + + /* While (idx < SizeOf(CPON)) */ + while_ctx = aml_while(aml_lless(idx, aml_sizeof(cpus_map))); + aml_append(while_ctx, + aml_store(aml_derefof(aml_index(cpus_map, idx)), is_cpu_on)); + + if_ctx = aml_if(aml_and(idx, aml_int(0x07), NULL)); + { + /* Shift down previously read bitmap byte */ + aml_append(if_ctx, aml_shiftright(byte, one, byte)); + } + aml_append(while_ctx, if_ctx); + + else_ctx = aml_else(); + { + /* Read next byte from cpu bitmap */ + aml_append(else_ctx, aml_store(aml_derefof(aml_index(status_map, + aml_shiftright(idx, aml_int(3), NULL))), byte)); + } + aml_append(while_ctx, else_ctx); + + aml_append(while_ctx, aml_store(aml_and(byte, one, NULL), status)); + if_ctx = aml_if(aml_lnot(aml_equal(is_cpu_on, status))); + { + /* State change - update CPON with new state */ + aml_append(if_ctx, aml_store(status, aml_index(cpus_map, idx))); + if_ctx2 = aml_if(aml_equal(status, one)); + { + aml_append(if_ctx2, + aml_call2(AML_NOTIFY_METHOD, idx, bus_check_evt)); + } + aml_append(if_ctx, if_ctx2); + else_ctx2 = aml_else(); + { + aml_append(else_ctx2, + aml_call2(AML_NOTIFY_METHOD, idx, remove_evt)); + } + } + aml_append(if_ctx, else_ctx2); + aml_append(while_ctx, if_ctx); + + aml_append(while_ctx, aml_increment(idx)); /* go to next cpu */ + aml_append(method, while_ctx); + } + aml_append(sb_scope, method); + + /* The current AML generator can cover the APIC ID range [0..255], + * inclusive, for VCPU hotplug. */ + QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256); + if (x86ms->apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) { + error_report("max_cpus is too large. APIC ID of last CPU is %u", + x86ms->apic_id_limit - 1); + exit(1); + } + + /* create PCI0.PRES device and its _CRS to reserve CPU hotplug MMIO */ + dev = aml_device("PCI0." stringify(CPU_HOTPLUG_RESOURCE_DEVICE)); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A06"))); + aml_append(dev, + aml_name_decl("_UID", aml_string("CPU Hotplug resources")) + ); + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + crs = aml_resource_template(); + aml_append(crs, + aml_io(AML_DECODE16, io_base, io_base, 1, ACPI_GPE_PROC_LEN) + ); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(sb_scope, dev); + /* declare CPU hotplug MMIO region and PRS field to access it */ + aml_append(sb_scope, aml_operation_region( + "PRST", AML_SYSTEM_IO, aml_int(io_base), ACPI_GPE_PROC_LEN)); + field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("PRS", 256)); + aml_append(sb_scope, field); + + /* build Processor object for each processor */ + for (i = 0; i < apic_ids->len; i++) { + int apic_id = apic_ids->cpus[i].arch_id; + + assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT); + + dev = aml_processor(i, 0, 0, "CP%.02X", apic_id); + + method = aml_method("_MAT", 0, AML_NOTSERIALIZED); + aml_append(method, + aml_return(aml_call2(CPU_MAT_METHOD, aml_int(apic_id), aml_int(i)) + )); + aml_append(dev, method); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, + aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(apic_id)))); + aml_append(dev, method); + + method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); + aml_append(method, + aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(apic_id), + aml_arg(0))) + ); + aml_append(dev, method); + + aml_append(sb_scope, dev); + } + + /* build this code: + * Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...} + */ + /* Arg0 = APIC ID */ + method = aml_method(AML_NOTIFY_METHOD, 2, AML_NOTSERIALIZED); + for (i = 0; i < apic_ids->len; i++) { + int apic_id = apic_ids->cpus[i].arch_id; + + if_ctx = aml_if(aml_equal(aml_arg(0), aml_int(apic_id))); + aml_append(if_ctx, + aml_notify(aml_name("CP%.02X", apic_id), aml_arg(1)) + ); + aml_append(method, if_ctx); + } + aml_append(sb_scope, method); + + /* build "Name(CPON, Package() { One, One, ..., Zero, Zero, ... })" + * + * Note: The ability to create variable-sized packages was first + * introduced in ACPI 2.0. ACPI 1.0 only allowed fixed-size packages + * ith up to 255 elements. Windows guests up to win2k8 fail when + * VarPackageOp is used. + */ + pkg = x86ms->apic_id_limit <= 255 ? aml_package(x86ms->apic_id_limit) : + aml_varpackage(x86ms->apic_id_limit); + + for (i = 0, apic_idx = 0; i < apic_ids->len; i++) { + int apic_id = apic_ids->cpus[i].arch_id; + + for (; apic_idx < apic_id; apic_idx++) { + aml_append(pkg, aml_int(0)); + } + aml_append(pkg, aml_int(apic_ids->cpus[i].cpu ? 1 : 0)); + apic_idx = apic_id + 1; + } + aml_append(sb_scope, aml_name_decl(CPU_ON_BITMAP, pkg)); + aml_append(ctx, sb_scope); + + method = aml_method("\\_GPE._E02", 0, AML_NOTSERIALIZED); + aml_append(method, aml_call0("\\_SB." CPU_SCAN_METHOD)); + aml_append(ctx, method); +} diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c new file mode 100644 index 000000000..e28457a7d --- /dev/null +++ b/hw/acpi/generic_event_device.c @@ -0,0 +1,433 @@ +/* + * + * Copyright (c) 2018 Intel Corporation + * Copyright (c) 2019 Huawei Technologies R & D (UK) Ltd + * Written by Samuel Ortiz, Shameer Kolothum + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/generic_event_device.h" +#include "hw/irq.h" +#include "hw/mem/pc-dimm.h" +#include "hw/mem/nvdimm.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "sysemu/runstate.h" + +static const uint32_t ged_supported_events[] = { + ACPI_GED_MEM_HOTPLUG_EVT, + ACPI_GED_PWR_DOWN_EVT, + ACPI_GED_NVDIMM_HOTPLUG_EVT, +}; + +/* + * The ACPI Generic Event Device (GED) is a hardware-reduced specific + * device[ACPI v6.1 Section 5.6.9] that handles all platform events, + * including the hotplug ones. Platforms need to specify their own + * GED Event bitmap to describe what kind of events they want to support + * through GED. This routine uses a single interrupt for the GED device, + * relying on IO memory region to communicate the type of device + * affected by the interrupt. This way, we can support up to 32 events + * with a unique interrupt. + */ +void build_ged_aml(Aml *table, const char *name, HotplugHandler *hotplug_dev, + uint32_t ged_irq, AmlRegionSpace rs, hwaddr ged_base) +{ + AcpiGedState *s = ACPI_GED(hotplug_dev); + Aml *crs = aml_resource_template(); + Aml *evt, *field; + Aml *dev = aml_device("%s", name); + Aml *evt_sel = aml_local(0); + Aml *esel = aml_name(AML_GED_EVT_SEL); + + /* _CRS interrupt */ + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_EDGE, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &ged_irq, 1)); + + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0013"))); + aml_append(dev, aml_name_decl("_UID", aml_string(GED_DEVICE))); + aml_append(dev, aml_name_decl("_CRS", crs)); + + /* Append IO region */ + aml_append(dev, aml_operation_region(AML_GED_EVT_REG, rs, + aml_int(ged_base + ACPI_GED_EVT_SEL_OFFSET), + ACPI_GED_EVT_SEL_LEN)); + field = aml_field(AML_GED_EVT_REG, AML_DWORD_ACC, AML_NOLOCK, + AML_WRITE_AS_ZEROS); + aml_append(field, aml_named_field(AML_GED_EVT_SEL, + ACPI_GED_EVT_SEL_LEN * BITS_PER_BYTE)); + aml_append(dev, field); + + /* + * For each GED event we: + * - Add a conditional block for each event, inside a loop. + * - Call a method for each supported GED event type. + * + * The resulting ASL code looks like: + * + * Local0 = ESEL + * If ((Local0 & One) == One) + * { + * MethodEvent0() + * } + * + * If ((Local0 & 0x2) == 0x2) + * { + * MethodEvent1() + * } + * ... + */ + evt = aml_method("_EVT", 1, AML_SERIALIZED); + { + Aml *if_ctx; + uint32_t i; + uint32_t ged_events = ctpop32(s->ged_event_bitmap); + + /* Local0 = ESEL */ + aml_append(evt, aml_store(esel, evt_sel)); + + for (i = 0; i < ARRAY_SIZE(ged_supported_events) && ged_events; i++) { + uint32_t event = s->ged_event_bitmap & ged_supported_events[i]; + + if (!event) { + continue; + } + + if_ctx = aml_if(aml_equal(aml_and(evt_sel, aml_int(event), NULL), + aml_int(event))); + switch (event) { + case ACPI_GED_MEM_HOTPLUG_EVT: + aml_append(if_ctx, aml_call0(MEMORY_DEVICES_CONTAINER "." + MEMORY_SLOT_SCAN_METHOD)); + break; + case ACPI_GED_PWR_DOWN_EVT: + aml_append(if_ctx, + aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE), + aml_int(0x80))); + break; + case ACPI_GED_NVDIMM_HOTPLUG_EVT: + aml_append(if_ctx, + aml_notify(aml_name("\\_SB.NVDR"), + aml_int(0x80))); + break; + default: + /* + * Please make sure all the events in ged_supported_events[] + * are handled above. + */ + g_assert_not_reached(); + } + + aml_append(evt, if_ctx); + ged_events--; + } + + if (ged_events) { + error_report("Unsupported events specified"); + abort(); + } + } + + /* Append _EVT method */ + aml_append(dev, evt); + + aml_append(table, dev); +} + +void acpi_dsdt_add_power_button(Aml *scope) +{ + Aml *dev = aml_device(ACPI_POWER_BUTTON_DEVICE); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0C0C"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + aml_append(scope, dev); +} + +/* Memory read by the GED _EVT AML dynamic method */ +static uint64_t ged_evt_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t val = 0; + GEDState *ged_st = opaque; + + switch (addr) { + case ACPI_GED_EVT_SEL_OFFSET: + /* Read the selector value and reset it */ + val = ged_st->sel; + ged_st->sel = 0; + break; + default: + break; + } + + return val; +} + +/* Nothing is expected to be written to the GED memory region */ +static void ged_evt_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ +} + +static const MemoryRegionOps ged_evt_ops = { + .read = ged_evt_read, + .write = ged_evt_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t ged_regs_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void ged_regs_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + bool slp_en; + int slp_typ; + + switch (addr) { + case ACPI_GED_REG_SLEEP_CTL: + slp_typ = (data >> 2) & 0x07; + slp_en = (data >> 5) & 0x01; + if (slp_en && slp_typ == 5) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } + return; + case ACPI_GED_REG_SLEEP_STS: + return; + case ACPI_GED_REG_RESET: + if (data == ACPI_GED_RESET_VALUE) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + return; + } +} + +static const MemoryRegionOps ged_regs_ops = { + .read = ged_regs_read, + .write = ged_regs_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void acpi_ged_device_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + AcpiGedState *s = ACPI_GED(hotplug_dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { + nvdimm_acpi_plug_cb(hotplug_dev, dev); + } else { + acpi_memory_plug_cb(hotplug_dev, &s->memhp_state, dev, errp); + } + } else { + error_setg(errp, "virt: device plug request for unsupported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +static void acpi_ged_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + AcpiGedState *s = ACPI_GED(hotplug_dev); + + if ((object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) && + !(object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)))) { + acpi_memory_unplug_request_cb(hotplug_dev, &s->memhp_state, dev, errp); + } else { + error_setg(errp, "acpi: device unplug request for unsupported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +static void acpi_ged_unplug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + AcpiGedState *s = ACPI_GED(hotplug_dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + acpi_memory_unplug_cb(&s->memhp_state, dev, errp); + } else { + error_setg(errp, "acpi: device unplug for unsupported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev) +{ + AcpiGedState *s = ACPI_GED(adev); + GEDState *ged_st = &s->ged_state; + uint32_t sel; + + if (ev & ACPI_MEMORY_HOTPLUG_STATUS) { + sel = ACPI_GED_MEM_HOTPLUG_EVT; + } else if (ev & ACPI_POWER_DOWN_STATUS) { + sel = ACPI_GED_PWR_DOWN_EVT; + } else if (ev & ACPI_NVDIMM_HOTPLUG_STATUS) { + sel = ACPI_GED_NVDIMM_HOTPLUG_EVT; + } else { + /* Unknown event. Return without generating interrupt. */ + warn_report("GED: Unsupported event %d. No irq injected", ev); + return; + } + + /* + * Set the GED selector field to communicate the event type. + * This will be read by GED aml code to select the appropriate + * event method. + */ + ged_st->sel |= sel; + + /* Trigger the event by sending an interrupt to the guest. */ + qemu_irq_pulse(s->irq); +} + +static Property acpi_ged_properties[] = { + DEFINE_PROP_UINT32("ged-event", AcpiGedState, ged_event_bitmap, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_memhp_state = { + .name = "acpi-ged/memhp", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_MEMORY_HOTPLUG(memhp_state, AcpiGedState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ged_state = { + .name = "acpi-ged-state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(sel, GEDState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ghes = { + .name = "acpi-ghes", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(ghes_addr_le, AcpiGhesState), + VMSTATE_END_OF_LIST() + }, +}; + +static bool ghes_needed(void *opaque) +{ + AcpiGedState *s = opaque; + return s->ghes_state.ghes_addr_le; +} + +static const VMStateDescription vmstate_ghes_state = { + .name = "acpi-ged/ghes", + .version_id = 1, + .minimum_version_id = 1, + .needed = ghes_needed, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(ghes_state, AcpiGedState, 1, + vmstate_ghes, AcpiGhesState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_acpi_ged = { + .name = "acpi-ged", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(ged_state, AcpiGedState, 1, vmstate_ged_state, GEDState), + VMSTATE_END_OF_LIST(), + }, + .subsections = (const VMStateDescription * []) { + &vmstate_memhp_state, + &vmstate_ghes_state, + NULL + } +}; + +static void acpi_ged_initfn(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + AcpiGedState *s = ACPI_GED(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + GEDState *ged_st = &s->ged_state; + + memory_region_init_io(&ged_st->evt, obj, &ged_evt_ops, ged_st, + TYPE_ACPI_GED, ACPI_GED_EVT_SEL_LEN); + sysbus_init_mmio(sbd, &ged_st->evt); + + sysbus_init_irq(sbd, &s->irq); + + s->memhp_state.is_enabled = true; + /* + * GED handles memory hotplug event and acpi-mem-hotplug + * memory region gets initialized here. Create an exclusive + * container for memory hotplug IO and expose it as GED sysbus + * MMIO so that boards can map it separately. + */ + memory_region_init(&s->container_memhp, OBJECT(dev), "memhp container", + MEMORY_HOTPLUG_IO_LEN); + sysbus_init_mmio(sbd, &s->container_memhp); + acpi_memory_hotplug_init(&s->container_memhp, OBJECT(dev), + &s->memhp_state, 0); + + memory_region_init_io(&ged_st->regs, obj, &ged_regs_ops, ged_st, + TYPE_ACPI_GED "-regs", ACPI_GED_REG_COUNT); + sysbus_init_mmio(sbd, &ged_st->regs); +} + +static void acpi_ged_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(class); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(class); + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_CLASS(class); + + dc->desc = "ACPI Generic Event Device"; + device_class_set_props(dc, acpi_ged_properties); + dc->vmsd = &vmstate_acpi_ged; + + hc->plug = acpi_ged_device_plug_cb; + hc->unplug_request = acpi_ged_unplug_request_cb; + hc->unplug = acpi_ged_unplug_cb; + + adevc->send_event = acpi_ged_send_event; +} + +static const TypeInfo acpi_ged_info = { + .name = TYPE_ACPI_GED, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AcpiGedState), + .instance_init = acpi_ged_initfn, + .class_init = acpi_ged_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { TYPE_ACPI_DEVICE_IF }, + { } + } +}; + +static void acpi_ged_register_types(void) +{ + type_register_static(&acpi_ged_info); +} + +type_init(acpi_ged_register_types) diff --git a/hw/acpi/ghes-stub.c b/hw/acpi/ghes-stub.c new file mode 100644 index 000000000..c315de180 --- /dev/null +++ b/hw/acpi/ghes-stub.c @@ -0,0 +1,22 @@ +/* + * Support for generating APEI tables and recording CPER for Guests: + * stub functions. + * + * Copyright (c) 2021 Linaro, Ltd + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/acpi/ghes.h" + +int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address) +{ + return -1; +} + +bool acpi_ghes_present(void) +{ + return false; +} diff --git a/hw/acpi/ghes.c b/hw/acpi/ghes.c new file mode 100644 index 000000000..45d9a809c --- /dev/null +++ b/hw/acpi/ghes.c @@ -0,0 +1,460 @@ +/* + * Support for generating APEI tables and recording CPER for Guests + * + * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO., LTD. + * + * Author: Dongjiu Geng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/acpi/ghes.h" +#include "hw/acpi/aml-build.h" +#include "qemu/error-report.h" +#include "hw/acpi/generic_event_device.h" +#include "hw/nvram/fw_cfg.h" +#include "qemu/uuid.h" + +#define ACPI_GHES_ERRORS_FW_CFG_FILE "etc/hardware_errors" +#define ACPI_GHES_DATA_ADDR_FW_CFG_FILE "etc/hardware_errors_addr" + +/* The max size in bytes for one error block */ +#define ACPI_GHES_MAX_RAW_DATA_LENGTH (1 * KiB) + +/* Now only support ARMv8 SEA notification type error source */ +#define ACPI_GHES_ERROR_SOURCE_COUNT 1 + +/* Generic Hardware Error Source version 2 */ +#define ACPI_GHES_SOURCE_GENERIC_ERROR_V2 10 + +/* Address offset in Generic Address Structure(GAS) */ +#define GAS_ADDR_OFFSET 4 + +/* + * The total size of Generic Error Data Entry + * ACPI 6.1/6.2: 18.3.2.7.1 Generic Error Data, + * Table 18-343 Generic Error Data Entry + */ +#define ACPI_GHES_DATA_LENGTH 72 + +/* The memory section CPER size, UEFI 2.6: N.2.5 Memory Error Section */ +#define ACPI_GHES_MEM_CPER_LENGTH 80 + +/* Masks for block_status flags */ +#define ACPI_GEBS_UNCORRECTABLE 1 + +/* + * Total size for Generic Error Status Block except Generic Error Data Entries + * ACPI 6.2: 18.3.2.7.1 Generic Error Data, + * Table 18-380 Generic Error Status Block + */ +#define ACPI_GHES_GESB_SIZE 20 + +/* + * Values for error_severity field + */ +enum AcpiGenericErrorSeverity { + ACPI_CPER_SEV_RECOVERABLE = 0, + ACPI_CPER_SEV_FATAL = 1, + ACPI_CPER_SEV_CORRECTED = 2, + ACPI_CPER_SEV_NONE = 3, +}; + +/* + * Hardware Error Notification + * ACPI 4.0: 17.3.2.7 Hardware Error Notification + * Composes dummy Hardware Error Notification descriptor of specified type + */ +static void build_ghes_hw_error_notification(GArray *table, const uint8_t type) +{ + /* Type */ + build_append_int_noprefix(table, type, 1); + /* + * Length: + * Total length of the structure in bytes + */ + build_append_int_noprefix(table, 28, 1); + /* Configuration Write Enable */ + build_append_int_noprefix(table, 0, 2); + /* Poll Interval */ + build_append_int_noprefix(table, 0, 4); + /* Vector */ + build_append_int_noprefix(table, 0, 4); + /* Switch To Polling Threshold Value */ + build_append_int_noprefix(table, 0, 4); + /* Switch To Polling Threshold Window */ + build_append_int_noprefix(table, 0, 4); + /* Error Threshold Value */ + build_append_int_noprefix(table, 0, 4); + /* Error Threshold Window */ + build_append_int_noprefix(table, 0, 4); +} + +/* + * Generic Error Data Entry + * ACPI 6.1: 18.3.2.7.1 Generic Error Data + */ +static void acpi_ghes_generic_error_data(GArray *table, + const uint8_t *section_type, uint32_t error_severity, + uint8_t validation_bits, uint8_t flags, + uint32_t error_data_length, QemuUUID fru_id, + uint64_t time_stamp) +{ + const uint8_t fru_text[20] = {0}; + + /* Section Type */ + g_array_append_vals(table, section_type, 16); + + /* Error Severity */ + build_append_int_noprefix(table, error_severity, 4); + /* Revision */ + build_append_int_noprefix(table, 0x300, 2); + /* Validation Bits */ + build_append_int_noprefix(table, validation_bits, 1); + /* Flags */ + build_append_int_noprefix(table, flags, 1); + /* Error Data Length */ + build_append_int_noprefix(table, error_data_length, 4); + + /* FRU Id */ + g_array_append_vals(table, fru_id.data, ARRAY_SIZE(fru_id.data)); + + /* FRU Text */ + g_array_append_vals(table, fru_text, sizeof(fru_text)); + + /* Timestamp */ + build_append_int_noprefix(table, time_stamp, 8); +} + +/* + * Generic Error Status Block + * ACPI 6.1: 18.3.2.7.1 Generic Error Data + */ +static void acpi_ghes_generic_error_status(GArray *table, uint32_t block_status, + uint32_t raw_data_offset, uint32_t raw_data_length, + uint32_t data_length, uint32_t error_severity) +{ + /* Block Status */ + build_append_int_noprefix(table, block_status, 4); + /* Raw Data Offset */ + build_append_int_noprefix(table, raw_data_offset, 4); + /* Raw Data Length */ + build_append_int_noprefix(table, raw_data_length, 4); + /* Data Length */ + build_append_int_noprefix(table, data_length, 4); + /* Error Severity */ + build_append_int_noprefix(table, error_severity, 4); +} + +/* UEFI 2.6: N.2.5 Memory Error Section */ +static void acpi_ghes_build_append_mem_cper(GArray *table, + uint64_t error_physical_addr) +{ + /* + * Memory Error Record + */ + + /* Validation Bits */ + build_append_int_noprefix(table, + (1ULL << 14) | /* Type Valid */ + (1ULL << 1) /* Physical Address Valid */, + 8); + /* Error Status */ + build_append_int_noprefix(table, 0, 8); + /* Physical Address */ + build_append_int_noprefix(table, error_physical_addr, 8); + /* Skip all the detailed information normally found in such a record */ + build_append_int_noprefix(table, 0, 48); + /* Memory Error Type */ + build_append_int_noprefix(table, 0 /* Unknown error */, 1); + /* Skip all the detailed information normally found in such a record */ + build_append_int_noprefix(table, 0, 7); +} + +static int acpi_ghes_record_mem_error(uint64_t error_block_address, + uint64_t error_physical_addr) +{ + GArray *block; + + /* Memory Error Section Type */ + const uint8_t uefi_cper_mem_sec[] = + UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ + 0xED, 0x7C, 0x83, 0xB1); + + /* invalid fru id: ACPI 4.0: 17.3.2.6.1 Generic Error Data, + * Table 17-13 Generic Error Data Entry + */ + QemuUUID fru_id = {}; + uint32_t data_length; + + block = g_array_new(false, true /* clear */, 1); + + /* This is the length if adding a new generic error data entry*/ + data_length = ACPI_GHES_DATA_LENGTH + ACPI_GHES_MEM_CPER_LENGTH; + /* + * It should not run out of the preallocated memory if adding a new generic + * error data entry + */ + assert((data_length + ACPI_GHES_GESB_SIZE) <= + ACPI_GHES_MAX_RAW_DATA_LENGTH); + + /* Build the new generic error status block header */ + acpi_ghes_generic_error_status(block, ACPI_GEBS_UNCORRECTABLE, + 0, 0, data_length, ACPI_CPER_SEV_RECOVERABLE); + + /* Build this new generic error data entry header */ + acpi_ghes_generic_error_data(block, uefi_cper_mem_sec, + ACPI_CPER_SEV_RECOVERABLE, 0, 0, + ACPI_GHES_MEM_CPER_LENGTH, fru_id, 0); + + /* Build the memory section CPER for above new generic error data entry */ + acpi_ghes_build_append_mem_cper(block, error_physical_addr); + + /* Write the generic error data entry into guest memory */ + cpu_physical_memory_write(error_block_address, block->data, block->len); + + g_array_free(block, true); + + return 0; +} + +/* + * Build table for the hardware error fw_cfg blob. + * Initialize "etc/hardware_errors" and "etc/hardware_errors_addr" fw_cfg blobs. + * See docs/specs/acpi_hest_ghes.rst for blobs format. + */ +void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker) +{ + int i, error_status_block_offset; + + /* Build error_block_address */ + for (i = 0; i < ACPI_GHES_ERROR_SOURCE_COUNT; i++) { + build_append_int_noprefix(hardware_errors, 0, sizeof(uint64_t)); + } + + /* Build read_ack_register */ + for (i = 0; i < ACPI_GHES_ERROR_SOURCE_COUNT; i++) { + /* + * Initialize the value of read_ack_register to 1, so GHES can be + * writeable after (re)boot. + * ACPI 6.2: 18.3.2.8 Generic Hardware Error Source version 2 + * (GHESv2 - Type 10) + */ + build_append_int_noprefix(hardware_errors, 1, sizeof(uint64_t)); + } + + /* Generic Error Status Block offset in the hardware error fw_cfg blob */ + error_status_block_offset = hardware_errors->len; + + /* Reserve space for Error Status Data Block */ + acpi_data_push(hardware_errors, + ACPI_GHES_MAX_RAW_DATA_LENGTH * ACPI_GHES_ERROR_SOURCE_COUNT); + + /* Tell guest firmware to place hardware_errors blob into RAM */ + bios_linker_loader_alloc(linker, ACPI_GHES_ERRORS_FW_CFG_FILE, + hardware_errors, sizeof(uint64_t), false); + + for (i = 0; i < ACPI_GHES_ERROR_SOURCE_COUNT; i++) { + /* + * Tell firmware to patch error_block_address entries to point to + * corresponding "Generic Error Status Block" + */ + bios_linker_loader_add_pointer(linker, + ACPI_GHES_ERRORS_FW_CFG_FILE, sizeof(uint64_t) * i, + sizeof(uint64_t), ACPI_GHES_ERRORS_FW_CFG_FILE, + error_status_block_offset + i * ACPI_GHES_MAX_RAW_DATA_LENGTH); + } + + /* + * tell firmware to write hardware_errors GPA into + * hardware_errors_addr fw_cfg, once the former has been initialized. + */ + bios_linker_loader_write_pointer(linker, ACPI_GHES_DATA_ADDR_FW_CFG_FILE, + 0, sizeof(uint64_t), ACPI_GHES_ERRORS_FW_CFG_FILE, 0); +} + +/* Build Generic Hardware Error Source version 2 (GHESv2) */ +static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker) +{ + uint64_t address_offset; + /* + * Type: + * Generic Hardware Error Source version 2(GHESv2 - Type 10) + */ + build_append_int_noprefix(table_data, ACPI_GHES_SOURCE_GENERIC_ERROR_V2, 2); + /* Source Id */ + build_append_int_noprefix(table_data, source_id, 2); + /* Related Source Id */ + build_append_int_noprefix(table_data, 0xffff, 2); + /* Flags */ + build_append_int_noprefix(table_data, 0, 1); + /* Enabled */ + build_append_int_noprefix(table_data, 1, 1); + + /* Number of Records To Pre-allocate */ + build_append_int_noprefix(table_data, 1, 4); + /* Max Sections Per Record */ + build_append_int_noprefix(table_data, 1, 4); + /* Max Raw Data Length */ + build_append_int_noprefix(table_data, ACPI_GHES_MAX_RAW_DATA_LENGTH, 4); + + address_offset = table_data->len; + /* Error Status Address */ + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 0x40, 0, + 4 /* QWord access */, 0); + bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, + address_offset + GAS_ADDR_OFFSET, sizeof(uint64_t), + ACPI_GHES_ERRORS_FW_CFG_FILE, source_id * sizeof(uint64_t)); + + switch (source_id) { + case ACPI_HEST_SRC_ID_SEA: + /* + * Notification Structure + * Now only enable ARMv8 SEA notification type + */ + build_ghes_hw_error_notification(table_data, ACPI_GHES_NOTIFY_SEA); + break; + default: + error_report("Not support this error source"); + abort(); + } + + /* Error Status Block Length */ + build_append_int_noprefix(table_data, ACPI_GHES_MAX_RAW_DATA_LENGTH, 4); + + /* + * Read Ack Register + * ACPI 6.1: 18.3.2.8 Generic Hardware Error Source + * version 2 (GHESv2 - Type 10) + */ + address_offset = table_data->len; + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 0x40, 0, + 4 /* QWord access */, 0); + bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, + address_offset + GAS_ADDR_OFFSET, + sizeof(uint64_t), ACPI_GHES_ERRORS_FW_CFG_FILE, + (ACPI_GHES_ERROR_SOURCE_COUNT + source_id) * sizeof(uint64_t)); + + /* + * Read Ack Preserve field + * We only provide the first bit in Read Ack Register to OSPM to write + * while the other bits are preserved. + */ + build_append_int_noprefix(table_data, ~0x1ULL, 8); + /* Read Ack Write */ + build_append_int_noprefix(table_data, 0x1, 8); +} + +/* Build Hardware Error Source Table */ +void acpi_build_hest(GArray *table_data, BIOSLinker *linker, + const char *oem_id, const char *oem_table_id) +{ + AcpiTable table = { .sig = "HEST", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + + /* Error Source Count */ + build_append_int_noprefix(table_data, ACPI_GHES_ERROR_SOURCE_COUNT, 4); + build_ghes_v2(table_data, ACPI_HEST_SRC_ID_SEA, linker); + + acpi_table_end(linker, &table); +} + +void acpi_ghes_add_fw_cfg(AcpiGhesState *ags, FWCfgState *s, + GArray *hardware_error) +{ + /* Create a read-only fw_cfg file for GHES */ + fw_cfg_add_file(s, ACPI_GHES_ERRORS_FW_CFG_FILE, hardware_error->data, + hardware_error->len); + + /* Create a read-write fw_cfg file for Address */ + fw_cfg_add_file_callback(s, ACPI_GHES_DATA_ADDR_FW_CFG_FILE, NULL, NULL, + NULL, &(ags->ghes_addr_le), sizeof(ags->ghes_addr_le), false); + + ags->present = true; +} + +int acpi_ghes_record_errors(uint8_t source_id, uint64_t physical_address) +{ + uint64_t error_block_addr, read_ack_register_addr, read_ack_register = 0; + uint64_t start_addr; + bool ret = -1; + AcpiGedState *acpi_ged_state; + AcpiGhesState *ags; + + assert(source_id < ACPI_HEST_SRC_ID_RESERVED); + + acpi_ged_state = ACPI_GED(object_resolve_path_type("", TYPE_ACPI_GED, + NULL)); + g_assert(acpi_ged_state); + ags = &acpi_ged_state->ghes_state; + + start_addr = le64_to_cpu(ags->ghes_addr_le); + + if (physical_address) { + + if (source_id < ACPI_HEST_SRC_ID_RESERVED) { + start_addr += source_id * sizeof(uint64_t); + } + + cpu_physical_memory_read(start_addr, &error_block_addr, + sizeof(error_block_addr)); + + error_block_addr = le64_to_cpu(error_block_addr); + + read_ack_register_addr = start_addr + + ACPI_GHES_ERROR_SOURCE_COUNT * sizeof(uint64_t); + + cpu_physical_memory_read(read_ack_register_addr, + &read_ack_register, sizeof(read_ack_register)); + + /* zero means OSPM does not acknowledge the error */ + if (!read_ack_register) { + error_report("OSPM does not acknowledge previous error," + " so can not record CPER for current error anymore"); + } else if (error_block_addr) { + read_ack_register = cpu_to_le64(0); + /* + * Clear the Read Ack Register, OSPM will write it to 1 when + * it acknowledges this error. + */ + cpu_physical_memory_write(read_ack_register_addr, + &read_ack_register, sizeof(uint64_t)); + + ret = acpi_ghes_record_mem_error(error_block_addr, + physical_address); + } else + error_report("can not find Generic Error Status Block"); + } + + return ret; +} + +bool acpi_ghes_present(void) +{ + AcpiGedState *acpi_ged_state; + AcpiGhesState *ags; + + acpi_ged_state = ACPI_GED(object_resolve_path_type("", TYPE_ACPI_GED, + NULL)); + + if (!acpi_ged_state) { + return false; + } + ags = &acpi_ged_state->ghes_state; + return ags->present; +} diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c new file mode 100644 index 000000000..6913ebf73 --- /dev/null +++ b/hw/acpi/hmat.c @@ -0,0 +1,267 @@ +/* + * HMAT ACPI Implementation + * + * Copyright(C) 2019 Intel Corporation. + * + * Author: + * Liu jingqi + * Tao Xu + * + * HMAT is defined in ACPI 6.3: 5.2.27 Heterogeneous Memory Attribute Table + * (HMAT) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "sysemu/numa.h" +#include "hw/acpi/hmat.h" + +/* + * ACPI 6.3: + * 5.2.27.3 Memory Proximity Domain Attributes Structure: Table 5-145 + */ +static void build_hmat_mpda(GArray *table_data, uint16_t flags, + uint32_t initiator, uint32_t mem_node) +{ + + /* Memory Proximity Domain Attributes Structure */ + /* Type */ + build_append_int_noprefix(table_data, 0, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); + /* Length */ + build_append_int_noprefix(table_data, 40, 4); + /* Flags */ + build_append_int_noprefix(table_data, flags, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); + /* Proximity Domain for the Attached Initiator */ + build_append_int_noprefix(table_data, initiator, 4); + /* Proximity Domain for the Memory */ + build_append_int_noprefix(table_data, mem_node, 4); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); + /* + * Reserved: + * Previously defined as the Start Address of the System Physical + * Address Range. Deprecated since ACPI Spec 6.3. + */ + build_append_int_noprefix(table_data, 0, 8); + /* + * Reserved: + * Previously defined as the Range Length of the region in bytes. + * Deprecated since ACPI Spec 6.3. + */ + build_append_int_noprefix(table_data, 0, 8); +} + +/* + * ACPI 6.3: 5.2.27.4 System Locality Latency and Bandwidth Information + * Structure: Table 5-146 + */ +static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, + uint32_t num_initiator, uint32_t num_target, + uint32_t *initiator_list) +{ + int i, index; + HMAT_LB_Data *lb_data; + uint16_t *entry_list; + uint32_t base; + /* Length in bytes for entire structure */ + uint32_t lb_length + = 32 /* Table length upto and including Entry Base Unit */ + + 4 * num_initiator /* Initiator Proximity Domain List */ + + 4 * num_target /* Target Proximity Domain List */ + + 2 * num_initiator * num_target; /* Latency or Bandwidth Entries */ + + /* Type */ + build_append_int_noprefix(table_data, 1, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); + /* Length */ + build_append_int_noprefix(table_data, lb_length, 4); + /* Flags: Bits [3:0] Memory Hierarchy, Bits[7:4] Reserved */ + assert(!(hmat_lb->hierarchy >> 4)); + build_append_int_noprefix(table_data, hmat_lb->hierarchy, 1); + /* Data Type */ + build_append_int_noprefix(table_data, hmat_lb->data_type, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); + /* Number of Initiator Proximity Domains (s) */ + build_append_int_noprefix(table_data, num_initiator, 4); + /* Number of Target Proximity Domains (t) */ + build_append_int_noprefix(table_data, num_target, 4); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); + + /* Entry Base Unit */ + if (hmat_lb->data_type <= HMAT_LB_DATA_WRITE_LATENCY) { + /* Convert latency base from nanoseconds to picosecond */ + base = hmat_lb->base * 1000; + } else { + /* Convert bandwidth base from Byte to Megabyte */ + base = hmat_lb->base / MiB; + } + build_append_int_noprefix(table_data, base, 8); + + /* Initiator Proximity Domain List */ + for (i = 0; i < num_initiator; i++) { + build_append_int_noprefix(table_data, initiator_list[i], 4); + } + + /* Target Proximity Domain List */ + for (i = 0; i < num_target; i++) { + build_append_int_noprefix(table_data, i, 4); + } + + /* Latency or Bandwidth Entries */ + entry_list = g_malloc0(num_initiator * num_target * sizeof(uint16_t)); + for (i = 0; i < hmat_lb->list->len; i++) { + lb_data = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); + index = lb_data->initiator * num_target + lb_data->target; + + entry_list[index] = (uint16_t)(lb_data->data / hmat_lb->base); + } + + for (i = 0; i < num_initiator * num_target; i++) { + build_append_int_noprefix(table_data, entry_list[i], 2); + } + + g_free(entry_list); +} + +/* ACPI 6.3: 5.2.27.5 Memory Side Cache Information Structure: Table 5-147 */ +static void build_hmat_cache(GArray *table_data, uint8_t total_levels, + NumaHmatCacheOptions *hmat_cache) +{ + /* + * Cache Attributes: Bits [3:0] – Total Cache Levels + * for this Memory Proximity Domain + */ + uint32_t cache_attr = total_levels; + + /* Bits [7:4] : Cache Level described in this structure */ + cache_attr |= (uint32_t) hmat_cache->level << 4; + + /* Bits [11:8] - Cache Associativity */ + cache_attr |= (uint32_t) hmat_cache->associativity << 8; + + /* Bits [15:12] - Write Policy */ + cache_attr |= (uint32_t) hmat_cache->policy << 12; + + /* Bits [31:16] - Cache Line size in bytes */ + cache_attr |= (uint32_t) hmat_cache->line << 16; + + /* Type */ + build_append_int_noprefix(table_data, 2, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); + /* Length */ + build_append_int_noprefix(table_data, 32, 4); + /* Proximity Domain for the Memory */ + build_append_int_noprefix(table_data, hmat_cache->node_id, 4); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); + /* Memory Side Cache Size */ + build_append_int_noprefix(table_data, hmat_cache->size, 8); + /* Cache Attributes */ + build_append_int_noprefix(table_data, cache_attr, 4); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); + /* + * Number of SMBIOS handles (n) + * Linux kernel uses Memory Side Cache Information Structure + * without SMBIOS entries for now, so set Number of SMBIOS handles + * as 0. + */ + build_append_int_noprefix(table_data, 0, 2); +} + +/* Build HMAT sub table structures */ +static void hmat_build_table_structs(GArray *table_data, NumaState *numa_state) +{ + uint16_t flags; + uint32_t num_initiator = 0; + uint32_t initiator_list[MAX_NODES]; + int i, hierarchy, type, cache_level, total_levels; + HMAT_LB_Info *hmat_lb; + NumaHmatCacheOptions *hmat_cache; + + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + + for (i = 0; i < numa_state->num_nodes; i++) { + flags = 0; + + if (numa_state->nodes[i].initiator < MAX_NODES) { + flags |= HMAT_PROXIMITY_INITIATOR_VALID; + } + + build_hmat_mpda(table_data, flags, numa_state->nodes[i].initiator, i); + } + + for (i = 0; i < numa_state->num_nodes; i++) { + if (numa_state->nodes[i].has_cpu) { + initiator_list[num_initiator++] = i; + } + } + + /* + * ACPI 6.3: 5.2.27.4 System Locality Latency and Bandwidth Information + * Structure: Table 5-146 + */ + for (hierarchy = HMAT_LB_MEM_MEMORY; + hierarchy <= HMAT_LB_MEM_CACHE_3RD_LEVEL; hierarchy++) { + for (type = HMAT_LB_DATA_ACCESS_LATENCY; + type <= HMAT_LB_DATA_WRITE_BANDWIDTH; type++) { + hmat_lb = numa_state->hmat_lb[hierarchy][type]; + + if (hmat_lb && hmat_lb->list->len) { + build_hmat_lb(table_data, hmat_lb, num_initiator, + numa_state->num_nodes, initiator_list); + } + } + } + + /* + * ACPI 6.3: 5.2.27.5 Memory Side Cache Information Structure: + * Table 5-147 + */ + for (i = 0; i < numa_state->num_nodes; i++) { + total_levels = 0; + for (cache_level = 1; cache_level < HMAT_LB_LEVELS; cache_level++) { + if (numa_state->hmat_cache[i][cache_level]) { + total_levels++; + } + } + for (cache_level = 0; cache_level <= total_levels; cache_level++) { + hmat_cache = numa_state->hmat_cache[i][cache_level]; + if (hmat_cache) { + build_hmat_cache(table_data, total_levels, hmat_cache); + } + } + } +} + +void build_hmat(GArray *table_data, BIOSLinker *linker, NumaState *numa_state, + const char *oem_id, const char *oem_table_id) +{ + AcpiTable table = { .sig = "HMAT", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + hmat_build_table_structs(table_data, numa_state); + acpi_table_end(linker, &table); +} diff --git a/hw/acpi/hmat.h b/hw/acpi/hmat.h new file mode 100644 index 000000000..b57f0e7e8 --- /dev/null +++ b/hw/acpi/hmat.h @@ -0,0 +1,43 @@ +/* + * HMAT ACPI Implementation Header + * + * Copyright(C) 2019 Intel Corporation. + * + * Author: + * Liu jingqi + * Tao Xu + * + * HMAT is defined in ACPI 6.3: 5.2.27 Heterogeneous Memory Attribute Table + * (HMAT) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#ifndef HMAT_H +#define HMAT_H + +#include "hw/acpi/aml-build.h" + +/* + * ACPI 6.3: 5.2.27.3 Memory Proximity Domain Attributes Structure, + * Table 5-145, Field "flag", Bit [0]: set to 1 to indicate that data in + * the Proximity Domain for the Attached Initiator field is valid. + * Other bits reserved. + */ +#define HMAT_PROXIMITY_INITIATOR_VALID 0x1 + +void build_hmat(GArray *table_data, BIOSLinker *linker, NumaState *numa_state, + const char *oem_id, const char *oem_table_id); + +#endif diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c new file mode 100644 index 000000000..ebe08ed83 --- /dev/null +++ b/hw/acpi/ich9.c @@ -0,0 +1,595 @@ +/* + * ACPI implementation + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2009 Isaku Yamahata + * VA Linux Systems Japan K.K. + * Copyright (C) 2012 Jason Baron + * + * This is based on acpi.c. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "qemu/timer.h" +#include "hw/core/cpu.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/tco.h" + +#include "hw/i386/ich9.h" +#include "hw/mem/pc-dimm.h" +#include "hw/mem/nvdimm.h" + +//#define DEBUG + +#ifdef DEBUG +#define ICH9_DEBUG(fmt, ...) \ +do { printf("%s "fmt, __func__, ## __VA_ARGS__); } while (0) +#else +#define ICH9_DEBUG(fmt, ...) do { } while (0) +#endif + +static void ich9_pm_update_sci_fn(ACPIREGS *regs) +{ + ICH9LPCPMRegs *pm = container_of(regs, ICH9LPCPMRegs, acpi_regs); + acpi_update_sci(&pm->acpi_regs, pm->irq); +} + +static uint64_t ich9_gpe_readb(void *opaque, hwaddr addr, unsigned width) +{ + ICH9LPCPMRegs *pm = opaque; + return acpi_gpe_ioport_readb(&pm->acpi_regs, addr); +} + +static void ich9_gpe_writeb(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + ICH9LPCPMRegs *pm = opaque; + acpi_gpe_ioport_writeb(&pm->acpi_regs, addr, val); + acpi_update_sci(&pm->acpi_regs, pm->irq); +} + +static const MemoryRegionOps ich9_gpe_ops = { + .read = ich9_gpe_readb, + .write = ich9_gpe_writeb, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t ich9_smi_readl(void *opaque, hwaddr addr, unsigned width) +{ + ICH9LPCPMRegs *pm = opaque; + switch (addr) { + case 0: + return pm->smi_en; + case 4: + return pm->smi_sts; + default: + return 0; + } +} + +static void ich9_smi_writel(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + ICH9LPCPMRegs *pm = opaque; + TCOIORegs *tr = &pm->tco_regs; + uint64_t tco_en; + + switch (addr) { + case 0: + tco_en = pm->smi_en & ICH9_PMIO_SMI_EN_TCO_EN; + /* once TCO_LOCK bit is set, TCO_EN bit cannot be overwritten */ + if (tr->tco.cnt1 & TCO_LOCK) { + val = (val & ~ICH9_PMIO_SMI_EN_TCO_EN) | tco_en; + } + pm->smi_en &= ~pm->smi_en_wmask; + pm->smi_en |= (val & pm->smi_en_wmask); + break; + } +} + +static const MemoryRegionOps ich9_smi_ops = { + .read = ich9_smi_readl, + .write = ich9_smi_writel, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +void ich9_pm_iospace_update(ICH9LPCPMRegs *pm, uint32_t pm_io_base) +{ + ICH9_DEBUG("to 0x%x\n", pm_io_base); + + assert((pm_io_base & ICH9_PMIO_MASK) == 0); + + pm->pm_io_base = pm_io_base; + memory_region_transaction_begin(); + memory_region_set_enabled(&pm->io, pm->pm_io_base != 0); + memory_region_set_address(&pm->io, pm->pm_io_base); + memory_region_transaction_commit(); +} + +static int ich9_pm_post_load(void *opaque, int version_id) +{ + ICH9LPCPMRegs *pm = opaque; + uint32_t pm_io_base = pm->pm_io_base; + pm->pm_io_base = 0; + ich9_pm_iospace_update(pm, pm_io_base); + return 0; +} + +#define VMSTATE_GPE_ARRAY(_field, _state) \ + { \ + .name = (stringify(_field)), \ + .version_id = 0, \ + .num = ICH9_PMIO_GPE0_LEN, \ + .info = &vmstate_info_uint8, \ + .size = sizeof(uint8_t), \ + .flags = VMS_ARRAY | VMS_POINTER, \ + .offset = vmstate_offset_pointer(_state, _field, uint8_t), \ + } + +static bool vmstate_test_use_memhp(void *opaque) +{ + ICH9LPCPMRegs *s = opaque; + return s->acpi_memory_hotplug.is_enabled; +} + +static const VMStateDescription vmstate_memhp_state = { + .name = "ich9_pm/memhp", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .needed = vmstate_test_use_memhp, + .fields = (VMStateField[]) { + VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, ICH9LPCPMRegs), + VMSTATE_END_OF_LIST() + } +}; + +static bool vmstate_test_use_tco(void *opaque) +{ + ICH9LPCPMRegs *s = opaque; + return s->enable_tco; +} + +static const VMStateDescription vmstate_tco_io_state = { + .name = "ich9_pm/tco", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .needed = vmstate_test_use_tco, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(tco_regs, ICH9LPCPMRegs, 1, vmstate_tco_io_sts, + TCOIORegs), + VMSTATE_END_OF_LIST() + } +}; + +static bool vmstate_test_use_cpuhp(void *opaque) +{ + ICH9LPCPMRegs *s = opaque; + return !s->cpu_hotplug_legacy; +} + +static int vmstate_cpuhp_pre_load(void *opaque) +{ + ICH9LPCPMRegs *s = opaque; + Object *obj = OBJECT(s->gpe_cpu.device); + object_property_set_bool(obj, "cpu-hotplug-legacy", false, &error_abort); + return 0; +} + +static const VMStateDescription vmstate_cpuhp_state = { + .name = "ich9_pm/cpuhp", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .needed = vmstate_test_use_cpuhp, + .pre_load = vmstate_cpuhp_pre_load, + .fields = (VMStateField[]) { + VMSTATE_CPU_HOTPLUG(cpuhp_state, ICH9LPCPMRegs), + VMSTATE_END_OF_LIST() + } +}; + +static bool vmstate_test_use_pcihp(void *opaque) +{ + ICH9LPCPMRegs *s = opaque; + + return s->use_acpi_hotplug_bridge; +} + +static const VMStateDescription vmstate_pcihp_state = { + .name = "ich9_pm/pcihp", + .version_id = 1, + .minimum_version_id = 1, + .needed = vmstate_test_use_pcihp, + .fields = (VMStateField[]) { + VMSTATE_PCI_HOTPLUG(acpi_pci_hotplug, + ICH9LPCPMRegs, + NULL, NULL), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_ich9_pm = { + .name = "ich9_pm", + .version_id = 1, + .minimum_version_id = 1, + .post_load = ich9_pm_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT16(acpi_regs.pm1.evt.sts, ICH9LPCPMRegs), + VMSTATE_UINT16(acpi_regs.pm1.evt.en, ICH9LPCPMRegs), + VMSTATE_UINT16(acpi_regs.pm1.cnt.cnt, ICH9LPCPMRegs), + VMSTATE_TIMER_PTR(acpi_regs.tmr.timer, ICH9LPCPMRegs), + VMSTATE_INT64(acpi_regs.tmr.overflow_time, ICH9LPCPMRegs), + VMSTATE_GPE_ARRAY(acpi_regs.gpe.sts, ICH9LPCPMRegs), + VMSTATE_GPE_ARRAY(acpi_regs.gpe.en, ICH9LPCPMRegs), + VMSTATE_UINT32(smi_en, ICH9LPCPMRegs), + VMSTATE_UINT32(smi_sts, ICH9LPCPMRegs), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_memhp_state, + &vmstate_tco_io_state, + &vmstate_cpuhp_state, + &vmstate_pcihp_state, + NULL + } +}; + +static void pm_reset(void *opaque) +{ + ICH9LPCPMRegs *pm = opaque; + ich9_pm_iospace_update(pm, 0); + + acpi_pm1_evt_reset(&pm->acpi_regs); + acpi_pm1_cnt_reset(&pm->acpi_regs); + acpi_pm_tmr_reset(&pm->acpi_regs); + acpi_gpe_reset(&pm->acpi_regs); + + pm->smi_en = 0; + if (!pm->smm_enabled) { + /* Mark SMM as already inited to prevent SMM from running. */ + pm->smi_en |= ICH9_PMIO_SMI_EN_APMC_EN; + } + pm->smi_en_wmask = ~0; + + if (pm->use_acpi_hotplug_bridge) { + acpi_pcihp_reset(&pm->acpi_pci_hotplug, true); + } + + acpi_update_sci(&pm->acpi_regs, pm->irq); +} + +static void pm_powerdown_req(Notifier *n, void *opaque) +{ + ICH9LPCPMRegs *pm = container_of(n, ICH9LPCPMRegs, powerdown_notifier); + + acpi_pm1_evt_power_down(&pm->acpi_regs); +} + +void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, + bool smm_enabled, + qemu_irq sci_irq) +{ + memory_region_init(&pm->io, OBJECT(lpc_pci), "ich9-pm", ICH9_PMIO_SIZE); + memory_region_set_enabled(&pm->io, false); + memory_region_add_subregion(pci_address_space_io(lpc_pci), + 0, &pm->io); + + acpi_pm_tmr_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io); + acpi_pm1_evt_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io); + acpi_pm1_cnt_init(&pm->acpi_regs, &pm->io, pm->disable_s3, pm->disable_s4, + pm->s4_val, !pm->smm_compat && !smm_enabled); + + acpi_gpe_init(&pm->acpi_regs, ICH9_PMIO_GPE0_LEN); + memory_region_init_io(&pm->io_gpe, OBJECT(lpc_pci), &ich9_gpe_ops, pm, + "acpi-gpe0", ICH9_PMIO_GPE0_LEN); + memory_region_add_subregion(&pm->io, ICH9_PMIO_GPE0_STS, &pm->io_gpe); + + memory_region_init_io(&pm->io_smi, OBJECT(lpc_pci), &ich9_smi_ops, pm, + "acpi-smi", 8); + memory_region_add_subregion(&pm->io, ICH9_PMIO_SMI_EN, &pm->io_smi); + + pm->smm_enabled = smm_enabled; + + pm->enable_tco = true; + acpi_pm_tco_init(&pm->tco_regs, &pm->io); + + if (pm->use_acpi_hotplug_bridge) { + acpi_pcihp_init(OBJECT(lpc_pci), + &pm->acpi_pci_hotplug, + pci_get_bus(lpc_pci), + pci_address_space_io(lpc_pci), + true, + ACPI_PCIHP_ADDR_ICH9); + + qbus_set_hotplug_handler(BUS(pci_get_bus(lpc_pci)), + OBJECT(lpc_pci)); + } + + pm->irq = sci_irq; + qemu_register_reset(pm_reset, pm); + pm->powerdown_notifier.notify = pm_powerdown_req; + qemu_register_powerdown_notifier(&pm->powerdown_notifier); + + legacy_acpi_cpu_hotplug_init(pci_address_space_io(lpc_pci), + OBJECT(lpc_pci), &pm->gpe_cpu, ICH9_CPU_HOTPLUG_IO_BASE); + + if (pm->acpi_memory_hotplug.is_enabled) { + acpi_memory_hotplug_init(pci_address_space_io(lpc_pci), OBJECT(lpc_pci), + &pm->acpi_memory_hotplug, + ACPI_MEMORY_HOTPLUG_BASE); + } +} + +static void ich9_pm_get_gpe0_blk(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + ICH9LPCPMRegs *pm = opaque; + uint32_t value = pm->pm_io_base + ICH9_PMIO_GPE0_STS; + + visit_type_uint32(v, name, &value, errp); +} + +static bool ich9_pm_get_memory_hotplug_support(Object *obj, Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + + return s->pm.acpi_memory_hotplug.is_enabled; +} + +static void ich9_pm_set_memory_hotplug_support(Object *obj, bool value, + Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + + s->pm.acpi_memory_hotplug.is_enabled = value; +} + +static bool ich9_pm_get_cpu_hotplug_legacy(Object *obj, Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + + return s->pm.cpu_hotplug_legacy; +} + +static void ich9_pm_set_cpu_hotplug_legacy(Object *obj, bool value, + Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + + assert(!value); + if (s->pm.cpu_hotplug_legacy && value == false) { + acpi_switch_to_modern_cphp(&s->pm.gpe_cpu, &s->pm.cpuhp_state, + ICH9_CPU_HOTPLUG_IO_BASE); + } + s->pm.cpu_hotplug_legacy = value; +} + +static bool ich9_pm_get_enable_tco(Object *obj, Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + return s->pm.enable_tco; +} + +static void ich9_pm_set_enable_tco(Object *obj, bool value, Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + s->pm.enable_tco = value; +} + +static bool ich9_pm_get_acpi_pci_hotplug(Object *obj, Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + + return s->pm.use_acpi_hotplug_bridge; +} + +static void ich9_pm_set_acpi_pci_hotplug(Object *obj, bool value, Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + + s->pm.use_acpi_hotplug_bridge = value; +} + +static bool ich9_pm_get_keep_pci_slot_hpc(Object *obj, Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + + return s->pm.keep_pci_slot_hpc; +} + +static void ich9_pm_set_keep_pci_slot_hpc(Object *obj, bool value, Error **errp) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(obj); + + s->pm.keep_pci_slot_hpc = value; +} + +void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm) +{ + static const uint32_t gpe0_len = ICH9_PMIO_GPE0_LEN; + pm->acpi_memory_hotplug.is_enabled = true; + pm->cpu_hotplug_legacy = true; + pm->disable_s3 = 0; + pm->disable_s4 = 0; + pm->s4_val = 2; + pm->use_acpi_hotplug_bridge = true; + pm->keep_pci_slot_hpc = true; + + object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE, + &pm->pm_io_base, OBJ_PROP_FLAG_READ); + object_property_add(obj, ACPI_PM_PROP_GPE0_BLK, "uint32", + ich9_pm_get_gpe0_blk, + NULL, NULL, pm); + object_property_add_uint32_ptr(obj, ACPI_PM_PROP_GPE0_BLK_LEN, + &gpe0_len, OBJ_PROP_FLAG_READ); + object_property_add_bool(obj, "memory-hotplug-support", + ich9_pm_get_memory_hotplug_support, + ich9_pm_set_memory_hotplug_support); + object_property_add_bool(obj, "cpu-hotplug-legacy", + ich9_pm_get_cpu_hotplug_legacy, + ich9_pm_set_cpu_hotplug_legacy); + object_property_add_uint8_ptr(obj, ACPI_PM_PROP_S3_DISABLED, + &pm->disable_s3, OBJ_PROP_FLAG_READWRITE); + object_property_add_uint8_ptr(obj, ACPI_PM_PROP_S4_DISABLED, + &pm->disable_s4, OBJ_PROP_FLAG_READWRITE); + object_property_add_uint8_ptr(obj, ACPI_PM_PROP_S4_VAL, + &pm->s4_val, OBJ_PROP_FLAG_READWRITE); + object_property_add_bool(obj, ACPI_PM_PROP_TCO_ENABLED, + ich9_pm_get_enable_tco, + ich9_pm_set_enable_tco); + object_property_add_bool(obj, ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, + ich9_pm_get_acpi_pci_hotplug, + ich9_pm_set_acpi_pci_hotplug); + object_property_add_bool(obj, "x-keep-pci-slot-hpc", + ich9_pm_get_keep_pci_slot_hpc, + ich9_pm_set_keep_pci_slot_hpc); +} + +void ich9_pm_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_pre_plug_cb(hotplug_dev, dev, errp); + return; + } + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) && + !lpc->pm.acpi_memory_hotplug.is_enabled) { + error_setg(errp, + "memory hotplug is not enabled: %s.memory-hotplug-support " + "is not set", object_get_typename(OBJECT(lpc))); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + uint64_t negotiated = lpc->smi_negotiated_features; + + if (negotiated & BIT_ULL(ICH9_LPC_SMI_F_BROADCAST_BIT) && + !(negotiated & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT))) { + error_setg(errp, "cpu hotplug with SMI wasn't enabled by firmware"); + error_append_hint(errp, "update machine type to newer than 5.1 " + "and firmware that suppors CPU hotplug with SMM"); + } + } +} + +void ich9_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { + nvdimm_acpi_plug_cb(hotplug_dev, dev); + } else { + acpi_memory_plug_cb(hotplug_dev, &lpc->pm.acpi_memory_hotplug, + dev, errp); + } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + if (lpc->pm.cpu_hotplug_legacy) { + legacy_acpi_cpu_plug_cb(hotplug_dev, &lpc->pm.gpe_cpu, dev, errp); + } else { + acpi_cpu_plug_cb(hotplug_dev, &lpc->pm.cpuhp_state, dev, errp); + } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_plug_cb(hotplug_dev, &lpc->pm.acpi_pci_hotplug, + dev, errp); + } else { + error_setg(errp, "acpi: device plug request for not supported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +void ich9_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev); + + if (lpc->pm.acpi_memory_hotplug.is_enabled && + object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + acpi_memory_unplug_request_cb(hotplug_dev, + &lpc->pm.acpi_memory_hotplug, dev, + errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) && + !lpc->pm.cpu_hotplug_legacy) { + uint64_t negotiated = lpc->smi_negotiated_features; + + if (negotiated & BIT_ULL(ICH9_LPC_SMI_F_BROADCAST_BIT) && + !(negotiated & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT))) { + error_setg(errp, "cpu hot-unplug with SMI wasn't enabled " + "by firmware"); + error_append_hint(errp, "update machine type to a version having " + "x-smi-cpu-hotunplug=on and firmware that " + "supports CPU hot-unplug with SMM"); + return; + } + + acpi_cpu_unplug_request_cb(hotplug_dev, &lpc->pm.cpuhp_state, + dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_unplug_request_cb(hotplug_dev, + &lpc->pm.acpi_pci_hotplug, + dev, errp); + } else { + error_setg(errp, "acpi: device unplug request for not supported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev); + + if (lpc->pm.acpi_memory_hotplug.is_enabled && + object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + acpi_memory_unplug_cb(&lpc->pm.acpi_memory_hotplug, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) && + !lpc->pm.cpu_hotplug_legacy) { + acpi_cpu_unplug_cb(&lpc->pm.cpuhp_state, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_unplug_cb(hotplug_dev, &lpc->pm.acpi_pci_hotplug, + dev, errp); + } else { + error_setg(errp, "acpi: device unplug for not supported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(adev); + + acpi_memory_ospm_status(&s->pm.acpi_memory_hotplug, list); + if (!s->pm.cpu_hotplug_legacy) { + acpi_cpu_ospm_status(&s->pm.cpuhp_state, list); + } +} diff --git a/hw/acpi/ipmi-stub.c b/hw/acpi/ipmi-stub.c new file mode 100644 index 000000000..8634fb325 --- /dev/null +++ b/hw/acpi/ipmi-stub.c @@ -0,0 +1,15 @@ +/* + * IPMI ACPI firmware handling + * + * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/acpi/ipmi.h" + +void build_acpi_ipmi_devices(Aml *table, BusState *bus, const char *resource) +{ +} diff --git a/hw/acpi/ipmi.c b/hw/acpi/ipmi.c new file mode 100644 index 000000000..96e48eba1 --- /dev/null +++ b/hw/acpi/ipmi.c @@ -0,0 +1,107 @@ +/* + * IPMI ACPI firmware handling + * + * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/ipmi/ipmi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/ipmi.h" + +static Aml *aml_ipmi_crs(IPMIFwInfo *info, const char *resource) +{ + Aml *crs = aml_resource_template(); + + /* + * The base address is fixed and cannot change. That may be different + * if someone does PCI, but we aren't there yet. + */ + switch (info->memspace) { + case IPMI_MEMSPACE_IO: + aml_append(crs, aml_io(AML_DECODE16, info->base_address, + info->base_address + info->register_length - 1, + info->register_spacing, info->register_length)); + break; + case IPMI_MEMSPACE_MEM32: + aml_append(crs, + aml_dword_memory(AML_POS_DECODE, + AML_MIN_FIXED, AML_MAX_FIXED, + AML_NON_CACHEABLE, AML_READ_WRITE, + 0xffffffff, + info->base_address, + info->base_address + info->register_length - 1, + info->register_spacing, info->register_length)); + break; + case IPMI_MEMSPACE_MEM64: + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, + AML_MIN_FIXED, AML_MAX_FIXED, + AML_NON_CACHEABLE, AML_READ_WRITE, + 0xffffffffffffffffULL, + info->base_address, + info->base_address + info->register_length - 1, + info->register_spacing, info->register_length)); + break; + case IPMI_MEMSPACE_SMBUS: + aml_append(crs, aml_i2c_serial_bus_device(info->base_address, + resource)); + break; + default: + abort(); + } + + if (info->interrupt_number) { + aml_append(crs, aml_irq_no_flags(info->interrupt_number)); + } + + return crs; +} + +static Aml *aml_ipmi_device(IPMIFwInfo *info, const char *resource) +{ + Aml *dev; + uint16_t version = ((info->ipmi_spec_major_revision << 8) + | (info->ipmi_spec_minor_revision << 4)); + + assert(info->ipmi_spec_minor_revision <= 15); + + dev = aml_device("MI%d", info->uuid); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("IPI0001"))); + aml_append(dev, aml_name_decl("_STR", aml_string("ipmi_%s", + info->interface_name))); + aml_append(dev, aml_name_decl("_UID", aml_int(info->uuid))); + aml_append(dev, aml_name_decl("_CRS", aml_ipmi_crs(info, resource))); + aml_append(dev, aml_name_decl("_IFT", aml_int(info->interface_type))); + aml_append(dev, aml_name_decl("_SRV", aml_int(version))); + + return dev; +} + +void build_acpi_ipmi_devices(Aml *scope, BusState *bus, const char *resource) +{ + + BusChild *kid; + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + IPMIInterface *ii; + IPMIInterfaceClass *iic; + IPMIFwInfo info; + Object *obj = object_dynamic_cast(OBJECT(kid->child), + TYPE_IPMI_INTERFACE); + + if (!obj) { + continue; + } + + ii = IPMI_INTERFACE(obj); + iic = IPMI_INTERFACE_GET_CLASS(obj); + memset(&info, 0, sizeof(info)); + iic->get_fwinfo(ii, &info); + aml_append(scope, aml_ipmi_device(&info, resource)); + } +} diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c new file mode 100644 index 000000000..d0fffcf78 --- /dev/null +++ b/hw/acpi/memory_hotplug.c @@ -0,0 +1,730 @@ +#include "qemu/osdep.h" +#include "hw/acpi/memory_hotplug.h" +#include "hw/acpi/pc-hotplug.h" +#include "hw/mem/pc-dimm.h" +#include "hw/qdev-core.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qapi/error.h" +#include "qapi/qapi-events-acpi.h" +#include "qapi/qapi-events-machine.h" +#include "qapi/qapi-events-qdev.h" + +#define MEMORY_SLOTS_NUMBER "MDNR" +#define MEMORY_HOTPLUG_IO_REGION "HPMR" +#define MEMORY_SLOT_ADDR_LOW "MRBL" +#define MEMORY_SLOT_ADDR_HIGH "MRBH" +#define MEMORY_SLOT_SIZE_LOW "MRLL" +#define MEMORY_SLOT_SIZE_HIGH "MRLH" +#define MEMORY_SLOT_PROXIMITY "MPX" +#define MEMORY_SLOT_ENABLED "MES" +#define MEMORY_SLOT_INSERT_EVENT "MINS" +#define MEMORY_SLOT_REMOVE_EVENT "MRMV" +#define MEMORY_SLOT_EJECT "MEJ" +#define MEMORY_SLOT_SLECTOR "MSEL" +#define MEMORY_SLOT_OST_EVENT "MOEV" +#define MEMORY_SLOT_OST_STATUS "MOSC" +#define MEMORY_SLOT_LOCK "MLCK" +#define MEMORY_SLOT_STATUS_METHOD "MRST" +#define MEMORY_SLOT_CRS_METHOD "MCRS" +#define MEMORY_SLOT_OST_METHOD "MOST" +#define MEMORY_SLOT_PROXIMITY_METHOD "MPXM" +#define MEMORY_SLOT_EJECT_METHOD "MEJ0" +#define MEMORY_SLOT_NOTIFY_METHOD "MTFY" +#define MEMORY_HOTPLUG_DEVICE "MHPD" + +static ACPIOSTInfo *acpi_memory_device_status(int slot, MemStatus *mdev) +{ + ACPIOSTInfo *info = g_new0(ACPIOSTInfo, 1); + + info->slot_type = ACPI_SLOT_TYPE_DIMM; + info->slot = g_strdup_printf("%d", slot); + info->source = mdev->ost_event; + info->status = mdev->ost_status; + if (mdev->dimm) { + DeviceState *dev = DEVICE(mdev->dimm); + if (dev->id) { + info->device = g_strdup(dev->id); + info->has_device = true; + } + } + return info; +} + +void acpi_memory_ospm_status(MemHotplugState *mem_st, ACPIOSTInfoList ***list) +{ + ACPIOSTInfoList ***tail = list; + int i; + + for (i = 0; i < mem_st->dev_count; i++) { + QAPI_LIST_APPEND(*tail, + acpi_memory_device_status(i, &mem_st->devs[i])); + } +} + +static uint64_t acpi_memory_hotplug_read(void *opaque, hwaddr addr, + unsigned int size) +{ + uint32_t val = 0; + MemHotplugState *mem_st = opaque; + MemStatus *mdev; + Object *o; + + if (mem_st->selector >= mem_st->dev_count) { + trace_mhp_acpi_invalid_slot_selected(mem_st->selector); + return 0; + } + + mdev = &mem_st->devs[mem_st->selector]; + o = OBJECT(mdev->dimm); + switch (addr) { + case 0x0: /* Lo part of phys address where DIMM is mapped */ + val = o ? object_property_get_uint(o, PC_DIMM_ADDR_PROP, NULL) : 0; + trace_mhp_acpi_read_addr_lo(mem_st->selector, val); + break; + case 0x4: /* Hi part of phys address where DIMM is mapped */ + val = + o ? object_property_get_uint(o, PC_DIMM_ADDR_PROP, NULL) >> 32 : 0; + trace_mhp_acpi_read_addr_hi(mem_st->selector, val); + break; + case 0x8: /* Lo part of DIMM size */ + val = o ? object_property_get_uint(o, PC_DIMM_SIZE_PROP, NULL) : 0; + trace_mhp_acpi_read_size_lo(mem_st->selector, val); + break; + case 0xc: /* Hi part of DIMM size */ + val = + o ? object_property_get_uint(o, PC_DIMM_SIZE_PROP, NULL) >> 32 : 0; + trace_mhp_acpi_read_size_hi(mem_st->selector, val); + break; + case 0x10: /* node proximity for _PXM method */ + val = o ? object_property_get_uint(o, PC_DIMM_NODE_PROP, NULL) : 0; + trace_mhp_acpi_read_pxm(mem_st->selector, val); + break; + case 0x14: /* pack and return is_* fields */ + val |= mdev->is_enabled ? 1 : 0; + val |= mdev->is_inserting ? 2 : 0; + val |= mdev->is_removing ? 4 : 0; + trace_mhp_acpi_read_flags(mem_st->selector, val); + break; + default: + val = ~0; + break; + } + return val; +} + +static void acpi_memory_hotplug_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + MemHotplugState *mem_st = opaque; + MemStatus *mdev; + ACPIOSTInfo *info; + DeviceState *dev = NULL; + HotplugHandler *hotplug_ctrl = NULL; + Error *local_err = NULL; + + if (!mem_st->dev_count) { + return; + } + + if (addr) { + if (mem_st->selector >= mem_st->dev_count) { + trace_mhp_acpi_invalid_slot_selected(mem_st->selector); + return; + } + } + + switch (addr) { + case 0x0: /* DIMM slot selector */ + mem_st->selector = data; + trace_mhp_acpi_write_slot(mem_st->selector); + break; + case 0x4: /* _OST event */ + mdev = &mem_st->devs[mem_st->selector]; + if (data == 1) { + /* TODO: handle device insert OST event */ + } else if (data == 3) { + /* TODO: handle device remove OST event */ + } + mdev->ost_event = data; + trace_mhp_acpi_write_ost_ev(mem_st->selector, mdev->ost_event); + break; + case 0x8: /* _OST status */ + mdev = &mem_st->devs[mem_st->selector]; + mdev->ost_status = data; + trace_mhp_acpi_write_ost_status(mem_st->selector, mdev->ost_status); + /* TODO: implement memory removal on guest signal */ + + info = acpi_memory_device_status(mem_st->selector, mdev); + qapi_event_send_acpi_device_ost(info); + qapi_free_ACPIOSTInfo(info); + break; + case 0x14: /* set is_* fields */ + mdev = &mem_st->devs[mem_st->selector]; + if (data & 2) { /* clear insert event */ + mdev->is_inserting = false; + trace_mhp_acpi_clear_insert_evt(mem_st->selector); + } else if (data & 4) { + mdev->is_removing = false; + trace_mhp_acpi_clear_remove_evt(mem_st->selector); + } else if (data & 8) { + if (!mdev->is_enabled) { + trace_mhp_acpi_ejecting_invalid_slot(mem_st->selector); + break; + } + + dev = DEVICE(mdev->dimm); + hotplug_ctrl = qdev_get_hotplug_handler(dev); + /* call pc-dimm unplug cb */ + hotplug_handler_unplug(hotplug_ctrl, dev, &local_err); + if (local_err) { + trace_mhp_acpi_pc_dimm_delete_failed(mem_st->selector); + + /* + * Send both MEM_UNPLUG_ERROR and DEVICE_UNPLUG_GUEST_ERROR + * while the deprecation of MEM_UNPLUG_ERROR is + * pending. + */ + qapi_event_send_mem_unplug_error(dev->id ? : "", + error_get_pretty(local_err)); + qapi_event_send_device_unplug_guest_error(!!dev->id, dev->id, + dev->canonical_path); + error_free(local_err); + break; + } + object_unparent(OBJECT(dev)); + trace_mhp_acpi_pc_dimm_deleted(mem_st->selector); + } + break; + default: + break; + } + +} +static const MemoryRegionOps acpi_memory_hotplug_ops = { + .read = acpi_memory_hotplug_read, + .write = acpi_memory_hotplug_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner, + MemHotplugState *state, hwaddr io_base) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + + state->dev_count = machine->ram_slots; + if (!state->dev_count) { + return; + } + + state->devs = g_malloc0(sizeof(*state->devs) * state->dev_count); + memory_region_init_io(&state->io, owner, &acpi_memory_hotplug_ops, state, + "acpi-mem-hotplug", MEMORY_HOTPLUG_IO_LEN); + memory_region_add_subregion(as, io_base, &state->io); +} + +/** + * acpi_memory_slot_status: + * @mem_st: memory hotplug state + * @dev: device + * @errp: set in case of an error + * + * Obtain a single memory slot status. + * + * This function will be called by memory unplug request cb and unplug cb. + */ +static MemStatus * +acpi_memory_slot_status(MemHotplugState *mem_st, + DeviceState *dev, Error **errp) +{ + Error *local_err = NULL; + int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, + &local_err); + + if (local_err) { + error_propagate(errp, local_err); + return NULL; + } + + if (slot >= mem_st->dev_count) { + char *dev_path = object_get_canonical_path(OBJECT(dev)); + error_setg(errp, "acpi_memory_slot_status: " + "device [%s] returned invalid memory slot[%d]", + dev_path, slot); + g_free(dev_path); + return NULL; + } + + return &mem_st->devs[slot]; +} + +void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st, + DeviceState *dev, Error **errp) +{ + MemStatus *mdev; + DeviceClass *dc = DEVICE_GET_CLASS(dev); + + if (!dc->hotpluggable) { + return; + } + + mdev = acpi_memory_slot_status(mem_st, dev, errp); + if (!mdev) { + return; + } + + mdev->dimm = dev; + mdev->is_enabled = true; + if (dev->hotplugged) { + mdev->is_inserting = true; + acpi_send_event(DEVICE(hotplug_dev), ACPI_MEMORY_HOTPLUG_STATUS); + } +} + +void acpi_memory_unplug_request_cb(HotplugHandler *hotplug_dev, + MemHotplugState *mem_st, + DeviceState *dev, Error **errp) +{ + MemStatus *mdev; + + mdev = acpi_memory_slot_status(mem_st, dev, errp); + if (!mdev) { + return; + } + + mdev->is_removing = true; + acpi_send_event(DEVICE(hotplug_dev), ACPI_MEMORY_HOTPLUG_STATUS); +} + +void acpi_memory_unplug_cb(MemHotplugState *mem_st, + DeviceState *dev, Error **errp) +{ + MemStatus *mdev; + + mdev = acpi_memory_slot_status(mem_st, dev, errp); + if (!mdev) { + return; + } + + mdev->is_enabled = false; + mdev->dimm = NULL; +} + +static const VMStateDescription vmstate_memhp_sts = { + .name = "memory hotplug device state", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(is_enabled, MemStatus), + VMSTATE_BOOL(is_inserting, MemStatus), + VMSTATE_UINT32(ost_event, MemStatus), + VMSTATE_UINT32(ost_status, MemStatus), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_memory_hotplug = { + .name = "memory hotplug state", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(selector, MemHotplugState), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, MemHotplugState, dev_count, + vmstate_memhp_sts, MemStatus), + VMSTATE_END_OF_LIST() + } +}; + +void build_memory_hotplug_aml(Aml *table, uint32_t nr_mem, + const char *res_root, + const char *event_handler_method, + AmlRegionSpace rs, hwaddr memhp_io_base) +{ + int i; + Aml *ifctx; + Aml *method; + Aml *dev_container; + Aml *mem_ctrl_dev; + char *mhp_res_path; + + mhp_res_path = g_strdup_printf("%s." MEMORY_HOTPLUG_DEVICE, res_root); + mem_ctrl_dev = aml_device("%s", mhp_res_path); + { + Aml *crs; + + aml_append(mem_ctrl_dev, aml_name_decl("_HID", aml_string("PNP0A06"))); + aml_append(mem_ctrl_dev, + aml_name_decl("_UID", aml_string("Memory hotplug resources"))); + + crs = aml_resource_template(); + if (rs == AML_SYSTEM_IO) { + aml_append(crs, + aml_io(AML_DECODE16, memhp_io_base, memhp_io_base, 0, + MEMORY_HOTPLUG_IO_LEN) + ); + } else { + aml_append(crs, aml_memory32_fixed(memhp_io_base, + MEMORY_HOTPLUG_IO_LEN, AML_READ_WRITE)); + } + aml_append(mem_ctrl_dev, aml_name_decl("_CRS", crs)); + + aml_append(mem_ctrl_dev, aml_operation_region( + MEMORY_HOTPLUG_IO_REGION, rs, + aml_int(memhp_io_base), MEMORY_HOTPLUG_IO_LEN) + ); + + } + aml_append(table, mem_ctrl_dev); + + dev_container = aml_device(MEMORY_DEVICES_CONTAINER); + { + Aml *field; + Aml *one = aml_int(1); + Aml *zero = aml_int(0); + Aml *ret_val = aml_local(0); + Aml *slot_arg0 = aml_arg(0); + Aml *slots_nr = aml_name(MEMORY_SLOTS_NUMBER); + Aml *ctrl_lock = aml_name(MEMORY_SLOT_LOCK); + Aml *slot_selector = aml_name(MEMORY_SLOT_SLECTOR); + char *mmio_path = g_strdup_printf("%s." MEMORY_HOTPLUG_IO_REGION, + mhp_res_path); + + aml_append(dev_container, aml_name_decl("_HID", aml_string("PNP0A06"))); + aml_append(dev_container, + aml_name_decl("_UID", aml_string("DIMM devices"))); + + assert(nr_mem <= ACPI_MAX_RAM_SLOTS); + aml_append(dev_container, + aml_name_decl(MEMORY_SLOTS_NUMBER, aml_int(nr_mem)) + ); + + field = aml_field(mmio_path, AML_DWORD_ACC, + AML_NOLOCK, AML_PRESERVE); + aml_append(field, /* read only */ + aml_named_field(MEMORY_SLOT_ADDR_LOW, 32)); + aml_append(field, /* read only */ + aml_named_field(MEMORY_SLOT_ADDR_HIGH, 32)); + aml_append(field, /* read only */ + aml_named_field(MEMORY_SLOT_SIZE_LOW, 32)); + aml_append(field, /* read only */ + aml_named_field(MEMORY_SLOT_SIZE_HIGH, 32)); + aml_append(field, /* read only */ + aml_named_field(MEMORY_SLOT_PROXIMITY, 32)); + aml_append(dev_container, field); + + field = aml_field(mmio_path, AML_BYTE_ACC, + AML_NOLOCK, AML_WRITE_AS_ZEROS); + aml_append(field, aml_reserved_field(160 /* bits, Offset(20) */)); + aml_append(field, /* 1 if enabled, read only */ + aml_named_field(MEMORY_SLOT_ENABLED, 1)); + aml_append(field, + /*(read) 1 if has a insert event. (write) 1 to clear event */ + aml_named_field(MEMORY_SLOT_INSERT_EVENT, 1)); + aml_append(field, + /* (read) 1 if has a remove event. (write) 1 to clear event */ + aml_named_field(MEMORY_SLOT_REMOVE_EVENT, 1)); + aml_append(field, + /* initiates device eject, write only */ + aml_named_field(MEMORY_SLOT_EJECT, 1)); + aml_append(dev_container, field); + + field = aml_field(mmio_path, AML_DWORD_ACC, + AML_NOLOCK, AML_PRESERVE); + aml_append(field, /* DIMM selector, write only */ + aml_named_field(MEMORY_SLOT_SLECTOR, 32)); + aml_append(field, /* _OST event code, write only */ + aml_named_field(MEMORY_SLOT_OST_EVENT, 32)); + aml_append(field, /* _OST status code, write only */ + aml_named_field(MEMORY_SLOT_OST_STATUS, 32)); + aml_append(dev_container, field); + g_free(mmio_path); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + ifctx = aml_if(aml_equal(slots_nr, zero)); + { + aml_append(ifctx, aml_return(zero)); + } + aml_append(method, ifctx); + /* present, functioning, decoding, not shown in UI */ + aml_append(method, aml_return(aml_int(0xB))); + aml_append(dev_container, method); + + aml_append(dev_container, aml_mutex(MEMORY_SLOT_LOCK, 0)); + + method = aml_method(MEMORY_SLOT_SCAN_METHOD, 0, AML_NOTSERIALIZED); + { + Aml *else_ctx; + Aml *while_ctx; + Aml *idx = aml_local(0); + Aml *eject_req = aml_int(3); + Aml *dev_chk = aml_int(1); + + ifctx = aml_if(aml_equal(slots_nr, zero)); + { + aml_append(ifctx, aml_return(zero)); + } + aml_append(method, ifctx); + + aml_append(method, aml_store(zero, idx)); + aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); + /* build AML that: + * loops over all slots and Notifies DIMMs with + * Device Check or Eject Request notifications if + * slot has corresponding status bit set and clears + * slot status. + */ + while_ctx = aml_while(aml_lless(idx, slots_nr)); + { + Aml *ins_evt = aml_name(MEMORY_SLOT_INSERT_EVENT); + Aml *rm_evt = aml_name(MEMORY_SLOT_REMOVE_EVENT); + + aml_append(while_ctx, aml_store(idx, slot_selector)); + ifctx = aml_if(aml_equal(ins_evt, one)); + { + aml_append(ifctx, + aml_call2(MEMORY_SLOT_NOTIFY_METHOD, + idx, dev_chk)); + aml_append(ifctx, aml_store(one, ins_evt)); + } + aml_append(while_ctx, ifctx); + + else_ctx = aml_else(); + ifctx = aml_if(aml_equal(rm_evt, one)); + { + aml_append(ifctx, + aml_call2(MEMORY_SLOT_NOTIFY_METHOD, + idx, eject_req)); + aml_append(ifctx, aml_store(one, rm_evt)); + } + aml_append(else_ctx, ifctx); + aml_append(while_ctx, else_ctx); + + aml_append(while_ctx, aml_add(idx, one, idx)); + } + aml_append(method, while_ctx); + aml_append(method, aml_release(ctrl_lock)); + aml_append(method, aml_return(one)); + } + aml_append(dev_container, method); + + method = aml_method(MEMORY_SLOT_STATUS_METHOD, 1, AML_NOTSERIALIZED); + { + Aml *slot_enabled = aml_name(MEMORY_SLOT_ENABLED); + + aml_append(method, aml_store(zero, ret_val)); + aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); + aml_append(method, + aml_store(aml_to_integer(slot_arg0), slot_selector)); + + ifctx = aml_if(aml_equal(slot_enabled, one)); + { + aml_append(ifctx, aml_store(aml_int(0xF), ret_val)); + } + aml_append(method, ifctx); + + aml_append(method, aml_release(ctrl_lock)); + aml_append(method, aml_return(ret_val)); + } + aml_append(dev_container, method); + + method = aml_method(MEMORY_SLOT_CRS_METHOD, 1, AML_SERIALIZED); + { + Aml *mr64 = aml_name("MR64"); + Aml *mr32 = aml_name("MR32"); + Aml *crs_tmpl = aml_resource_template(); + Aml *minl = aml_name("MINL"); + Aml *minh = aml_name("MINH"); + Aml *maxl = aml_name("MAXL"); + Aml *maxh = aml_name("MAXH"); + Aml *lenl = aml_name("LENL"); + Aml *lenh = aml_name("LENH"); + + aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); + aml_append(method, aml_store(aml_to_integer(slot_arg0), + slot_selector)); + + aml_append(crs_tmpl, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_CACHEABLE, AML_READ_WRITE, + 0, 0x0, 0xFFFFFFFFFFFFFFFEULL, 0, + 0xFFFFFFFFFFFFFFFFULL)); + aml_append(method, aml_name_decl("MR64", crs_tmpl)); + aml_append(method, + aml_create_dword_field(mr64, aml_int(14), "MINL")); + aml_append(method, + aml_create_dword_field(mr64, aml_int(18), "MINH")); + aml_append(method, + aml_create_dword_field(mr64, aml_int(38), "LENL")); + aml_append(method, + aml_create_dword_field(mr64, aml_int(42), "LENH")); + aml_append(method, + aml_create_dword_field(mr64, aml_int(22), "MAXL")); + aml_append(method, + aml_create_dword_field(mr64, aml_int(26), "MAXH")); + + aml_append(method, + aml_store(aml_name(MEMORY_SLOT_ADDR_HIGH), minh)); + aml_append(method, + aml_store(aml_name(MEMORY_SLOT_ADDR_LOW), minl)); + aml_append(method, + aml_store(aml_name(MEMORY_SLOT_SIZE_HIGH), lenh)); + aml_append(method, + aml_store(aml_name(MEMORY_SLOT_SIZE_LOW), lenl)); + + /* 64-bit math: MAX = MIN + LEN - 1 */ + aml_append(method, aml_add(minl, lenl, maxl)); + aml_append(method, aml_add(minh, lenh, maxh)); + ifctx = aml_if(aml_lless(maxl, minl)); + { + aml_append(ifctx, aml_add(maxh, one, maxh)); + } + aml_append(method, ifctx); + ifctx = aml_if(aml_lless(maxl, one)); + { + aml_append(ifctx, aml_subtract(maxh, one, maxh)); + } + aml_append(method, ifctx); + aml_append(method, aml_subtract(maxl, one, maxl)); + + /* return 32-bit _CRS if addr/size is in low mem */ + /* TODO: remove it since all hotplugged DIMMs are in high mem */ + ifctx = aml_if(aml_equal(maxh, zero)); + { + crs_tmpl = aml_resource_template(); + aml_append(crs_tmpl, + aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, + AML_MAX_FIXED, AML_CACHEABLE, + AML_READ_WRITE, + 0, 0x0, 0xFFFFFFFE, 0, + 0xFFFFFFFF)); + aml_append(ifctx, aml_name_decl("MR32", crs_tmpl)); + aml_append(ifctx, + aml_create_dword_field(mr32, aml_int(10), "MIN")); + aml_append(ifctx, + aml_create_dword_field(mr32, aml_int(14), "MAX")); + aml_append(ifctx, + aml_create_dword_field(mr32, aml_int(22), "LEN")); + aml_append(ifctx, aml_store(minl, aml_name("MIN"))); + aml_append(ifctx, aml_store(maxl, aml_name("MAX"))); + aml_append(ifctx, aml_store(lenl, aml_name("LEN"))); + + aml_append(ifctx, aml_release(ctrl_lock)); + aml_append(ifctx, aml_return(mr32)); + } + aml_append(method, ifctx); + + aml_append(method, aml_release(ctrl_lock)); + aml_append(method, aml_return(mr64)); + } + aml_append(dev_container, method); + + method = aml_method(MEMORY_SLOT_PROXIMITY_METHOD, 1, + AML_NOTSERIALIZED); + { + Aml *proximity = aml_name(MEMORY_SLOT_PROXIMITY); + + aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); + aml_append(method, aml_store(aml_to_integer(slot_arg0), + slot_selector)); + aml_append(method, aml_store(proximity, ret_val)); + aml_append(method, aml_release(ctrl_lock)); + aml_append(method, aml_return(ret_val)); + } + aml_append(dev_container, method); + + method = aml_method(MEMORY_SLOT_OST_METHOD, 4, AML_NOTSERIALIZED); + { + Aml *ost_evt = aml_name(MEMORY_SLOT_OST_EVENT); + Aml *ost_status = aml_name(MEMORY_SLOT_OST_STATUS); + + aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); + aml_append(method, aml_store(aml_to_integer(slot_arg0), + slot_selector)); + aml_append(method, aml_store(aml_arg(1), ost_evt)); + aml_append(method, aml_store(aml_arg(2), ost_status)); + aml_append(method, aml_release(ctrl_lock)); + } + aml_append(dev_container, method); + + method = aml_method(MEMORY_SLOT_EJECT_METHOD, 2, AML_NOTSERIALIZED); + { + Aml *eject = aml_name(MEMORY_SLOT_EJECT); + + aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); + aml_append(method, aml_store(aml_to_integer(slot_arg0), + slot_selector)); + aml_append(method, aml_store(one, eject)); + aml_append(method, aml_release(ctrl_lock)); + } + aml_append(dev_container, method); + + /* build memory devices */ + for (i = 0; i < nr_mem; i++) { + Aml *dev; + const char *s; + + dev = aml_device("MP%02X", i); + aml_append(dev, aml_name_decl("_UID", aml_string("0x%02X", i))); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C80"))); + + method = aml_method("_CRS", 0, AML_NOTSERIALIZED); + s = MEMORY_SLOT_CRS_METHOD; + aml_append(method, aml_return(aml_call1(s, aml_name("_UID")))); + aml_append(dev, method); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + s = MEMORY_SLOT_STATUS_METHOD; + aml_append(method, aml_return(aml_call1(s, aml_name("_UID")))); + aml_append(dev, method); + + method = aml_method("_PXM", 0, AML_NOTSERIALIZED); + s = MEMORY_SLOT_PROXIMITY_METHOD; + aml_append(method, aml_return(aml_call1(s, aml_name("_UID")))); + aml_append(dev, method); + + method = aml_method("_OST", 3, AML_NOTSERIALIZED); + s = MEMORY_SLOT_OST_METHOD; + aml_append(method, + aml_call4(s, aml_name("_UID"), aml_arg(0), + aml_arg(1), aml_arg(2))); + aml_append(dev, method); + + method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); + s = MEMORY_SLOT_EJECT_METHOD; + aml_append(method, + aml_call2(s, aml_name("_UID"), aml_arg(0))); + aml_append(dev, method); + + aml_append(dev_container, dev); + } + + /* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) { + * If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ... } + */ + method = aml_method(MEMORY_SLOT_NOTIFY_METHOD, 2, AML_NOTSERIALIZED); + for (i = 0; i < nr_mem; i++) { + ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i))); + aml_append(ifctx, + aml_notify(aml_name("MP%.02X", i), aml_arg(1)) + ); + aml_append(method, ifctx); + } + aml_append(dev_container, method); + } + aml_append(table, dev_container); + + if (event_handler_method) { + method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED); + aml_append(method, aml_call0(MEMORY_DEVICES_CONTAINER "." + MEMORY_SLOT_SCAN_METHOD)); + aml_append(table, method); + } + + g_free(mhp_res_path); +} diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build new file mode 100644 index 000000000..adf6347bc --- /dev/null +++ b/hw/acpi/meson.build @@ -0,0 +1,33 @@ +acpi_ss = ss.source_set() +acpi_ss.add(files( + 'acpi_interface.c', + 'aml-build.c', + 'bios-linker-loader.c', + 'core.c', + 'utils.c', +)) +acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_true: files('cpu.c', 'cpu_hotplug.c')) +acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_false: files('acpi-cpu-hotplug-stub.c')) +acpi_ss.add(when: 'CONFIG_ACPI_MEMORY_HOTPLUG', if_true: files('memory_hotplug.c')) +acpi_ss.add(when: 'CONFIG_ACPI_MEMORY_HOTPLUG', if_false: files('acpi-mem-hotplug-stub.c')) +acpi_ss.add(when: 'CONFIG_ACPI_NVDIMM', if_true: files('nvdimm.c')) +acpi_ss.add(when: 'CONFIG_ACPI_NVDIMM', if_false: files('acpi-nvdimm-stub.c')) +acpi_ss.add(when: 'CONFIG_ACPI_PCI', if_true: files('pci.c')) +acpi_ss.add(when: 'CONFIG_ACPI_VMGENID', if_true: files('vmgenid.c')) +acpi_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device.c')) +acpi_ss.add(when: 'CONFIG_ACPI_HMAT', if_true: files('hmat.c')) +acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files('ghes-stub.c')) +acpi_ss.add(when: 'CONFIG_ACPI_PIIX4', if_true: files('piix4.c')) +acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_true: files('pcihp.c')) +acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_false: files('acpi-pci-hotplug-stub.c')) +acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c')) +acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c')) +acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c')) +acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c')) +acpi_ss.add(when: 'CONFIG_TPM', if_true: files('tpm.c')) +softmmu_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c')) +softmmu_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss) +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c', 'aml-build-stub.c', + 'acpi-x86-stub.c', 'ipmi-stub.c', 'ghes-stub.c', + 'acpi-mem-hotplug-stub.c', 'acpi-cpu-hotplug-stub.c', + 'acpi-pci-hotplug-stub.c', 'acpi-nvdimm-stub.c')) diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c new file mode 100644 index 000000000..0d43da19e --- /dev/null +++ b/hw/acpi/nvdimm.c @@ -0,0 +1,1378 @@ +/* + * NVDIMM ACPI Implementation + * + * Copyright(C) 2015 Intel Corporation. + * + * Author: + * Xiao Guangrong + * + * NFIT is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT) + * and the DSM specification can be found at: + * http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf + * + * Currently, it only supports PMEM Virtualization. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "qemu/uuid.h" +#include "qapi/error.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/bios-linker-loader.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/mem/nvdimm.h" +#include "qemu/nvdimm-utils.h" + +/* + * define Byte Addressable Persistent Memory (PM) Region according to + * ACPI 6.0: 5.2.25.1 System Physical Address Range Structure. + */ +static const uint8_t nvdimm_nfit_spa_uuid[] = + UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33, + 0x18, 0xb7, 0x8c, 0xdb); + +/* + * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware + * Interface Table (NFIT). + */ + +/* + * System Physical Address Range Structure + * + * It describes the system physical address ranges occupied by NVDIMMs and + * the types of the regions. + */ +struct NvdimmNfitSpa { + uint16_t type; + uint16_t length; + uint16_t spa_index; + uint16_t flags; + uint32_t reserved; + uint32_t proximity_domain; + uint8_t type_guid[16]; + uint64_t spa_base; + uint64_t spa_length; + uint64_t mem_attr; +} QEMU_PACKED; +typedef struct NvdimmNfitSpa NvdimmNfitSpa; + +/* + * Memory Device to System Physical Address Range Mapping Structure + * + * It enables identifying each NVDIMM region and the corresponding SPA + * describing the memory interleave + */ +struct NvdimmNfitMemDev { + uint16_t type; + uint16_t length; + uint32_t nfit_handle; + uint16_t phys_id; + uint16_t region_id; + uint16_t spa_index; + uint16_t dcr_index; + uint64_t region_len; + uint64_t region_offset; + uint64_t region_dpa; + uint16_t interleave_index; + uint16_t interleave_ways; + uint16_t flags; + uint16_t reserved; +} QEMU_PACKED; +typedef struct NvdimmNfitMemDev NvdimmNfitMemDev; + +#define ACPI_NFIT_MEM_NOT_ARMED (1 << 3) + +/* + * NVDIMM Control Region Structure + * + * It describes the NVDIMM and if applicable, Block Control Window. + */ +struct NvdimmNfitControlRegion { + uint16_t type; + uint16_t length; + uint16_t dcr_index; + uint16_t vendor_id; + uint16_t device_id; + uint16_t revision_id; + uint16_t sub_vendor_id; + uint16_t sub_device_id; + uint16_t sub_revision_id; + uint8_t reserved[6]; + uint32_t serial_number; + uint16_t fic; + uint16_t num_bcw; + uint64_t bcw_size; + uint64_t cmd_offset; + uint64_t cmd_size; + uint64_t status_offset; + uint64_t status_size; + uint16_t flags; + uint8_t reserved2[6]; +} QEMU_PACKED; +typedef struct NvdimmNfitControlRegion NvdimmNfitControlRegion; + +/* + * NVDIMM Platform Capabilities Structure + * + * Defined in section 5.2.25.9 of ACPI 6.2 Errata A, September 2017 + */ +struct NvdimmNfitPlatformCaps { + uint16_t type; + uint16_t length; + uint8_t highest_cap; + uint8_t reserved[3]; + uint32_t capabilities; + uint8_t reserved2[4]; +} QEMU_PACKED; +typedef struct NvdimmNfitPlatformCaps NvdimmNfitPlatformCaps; + +/* + * Module serial number is a unique number for each device. We use the + * slot id of NVDIMM device to generate this number so that each device + * associates with a different number. + * + * 0x123456 is a magic number we arbitrarily chose. + */ +static uint32_t nvdimm_slot_to_sn(int slot) +{ + return 0x123456 + slot; +} + +/* + * handle is used to uniquely associate nfit_memdev structure with NVDIMM + * ACPI device - nfit_memdev.nfit_handle matches with the value returned + * by ACPI device _ADR method. + * + * We generate the handle with the slot id of NVDIMM device and reserve + * 0 for NVDIMM root device. + */ +static uint32_t nvdimm_slot_to_handle(int slot) +{ + return slot + 1; +} + +/* + * index uniquely identifies the structure, 0 is reserved which indicates + * that the structure is not valid or the associated structure is not + * present. + * + * Each NVDIMM device needs two indexes, one for nfit_spa and another for + * nfit_dc which are generated by the slot id of NVDIMM device. + */ +static uint16_t nvdimm_slot_to_spa_index(int slot) +{ + return (slot + 1) << 1; +} + +/* See the comments of nvdimm_slot_to_spa_index(). */ +static uint32_t nvdimm_slot_to_dcr_index(int slot) +{ + return nvdimm_slot_to_spa_index(slot) + 1; +} + +static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle) +{ + NVDIMMDevice *nvdimm = NULL; + GSList *list, *device_list = nvdimm_get_device_list(); + + for (list = device_list; list; list = list->next) { + NVDIMMDevice *nvd = list->data; + int slot = object_property_get_int(OBJECT(nvd), PC_DIMM_SLOT_PROP, + NULL); + + if (nvdimm_slot_to_handle(slot) == handle) { + nvdimm = nvd; + break; + } + } + + g_slist_free(device_list); + return nvdimm; +} + +/* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */ +static void +nvdimm_build_structure_spa(GArray *structures, DeviceState *dev) +{ + NvdimmNfitSpa *nfit_spa; + uint64_t addr = object_property_get_uint(OBJECT(dev), PC_DIMM_ADDR_PROP, + NULL); + uint64_t size = object_property_get_uint(OBJECT(dev), PC_DIMM_SIZE_PROP, + NULL); + uint32_t node = object_property_get_uint(OBJECT(dev), PC_DIMM_NODE_PROP, + NULL); + int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, + NULL); + + nfit_spa = acpi_data_push(structures, sizeof(*nfit_spa)); + + nfit_spa->type = cpu_to_le16(0 /* System Physical Address Range + Structure */); + nfit_spa->length = cpu_to_le16(sizeof(*nfit_spa)); + nfit_spa->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot)); + + /* + * Control region is strict as all the device info, such as SN, index, + * is associated with slot id. + */ + nfit_spa->flags = cpu_to_le16(1 /* Control region is strictly for + management during hot add/online + operation */ | + 2 /* Data in Proximity Domain field is + valid*/); + + /* NUMA node. */ + nfit_spa->proximity_domain = cpu_to_le32(node); + /* the region reported as PMEM. */ + memcpy(nfit_spa->type_guid, nvdimm_nfit_spa_uuid, + sizeof(nvdimm_nfit_spa_uuid)); + + nfit_spa->spa_base = cpu_to_le64(addr); + nfit_spa->spa_length = cpu_to_le64(size); + + /* It is the PMEM and can be cached as writeback. */ + nfit_spa->mem_attr = cpu_to_le64(0x8ULL /* EFI_MEMORY_WB */ | + 0x8000ULL /* EFI_MEMORY_NV */); +} + +/* + * ACPI 6.0: 5.2.25.2 Memory Device to System Physical Address Range Mapping + * Structure + */ +static void +nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev) +{ + NvdimmNfitMemDev *nfit_memdev; + NVDIMMDevice *nvdimm = NVDIMM(OBJECT(dev)); + uint64_t size = object_property_get_uint(OBJECT(dev), PC_DIMM_SIZE_PROP, + NULL); + int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, + NULL); + uint32_t handle = nvdimm_slot_to_handle(slot); + + nfit_memdev = acpi_data_push(structures, sizeof(*nfit_memdev)); + + nfit_memdev->type = cpu_to_le16(1 /* Memory Device to System Address + Range Map Structure*/); + nfit_memdev->length = cpu_to_le16(sizeof(*nfit_memdev)); + nfit_memdev->nfit_handle = cpu_to_le32(handle); + + /* + * associate memory device with System Physical Address Range + * Structure. + */ + nfit_memdev->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot)); + /* associate memory device with Control Region Structure. */ + nfit_memdev->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot)); + + /* The memory region on the device. */ + nfit_memdev->region_len = cpu_to_le64(size); + /* The device address starts from 0. */ + nfit_memdev->region_dpa = cpu_to_le64(0); + + /* Only one interleave for PMEM. */ + nfit_memdev->interleave_ways = cpu_to_le16(1); + + if (nvdimm->unarmed) { + nfit_memdev->flags |= cpu_to_le16(ACPI_NFIT_MEM_NOT_ARMED); + } +} + +/* + * ACPI 6.0: 5.2.25.5 NVDIMM Control Region Structure. + */ +static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev) +{ + NvdimmNfitControlRegion *nfit_dcr; + int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, + NULL); + uint32_t sn = nvdimm_slot_to_sn(slot); + + nfit_dcr = acpi_data_push(structures, sizeof(*nfit_dcr)); + + nfit_dcr->type = cpu_to_le16(4 /* NVDIMM Control Region Structure */); + nfit_dcr->length = cpu_to_le16(sizeof(*nfit_dcr)); + nfit_dcr->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot)); + + /* vendor: Intel. */ + nfit_dcr->vendor_id = cpu_to_le16(0x8086); + nfit_dcr->device_id = cpu_to_le16(1); + + /* The _DSM method is following Intel's DSM specification. */ + nfit_dcr->revision_id = cpu_to_le16(1 /* Current Revision supported + in ACPI 6.0 is 1. */); + nfit_dcr->serial_number = cpu_to_le32(sn); + nfit_dcr->fic = cpu_to_le16(0x301 /* Format Interface Code: + Byte addressable, no energy backed. + See ACPI 6.2, sect 5.2.25.6 and + JEDEC Annex L Release 3. */); +} + +/* + * ACPI 6.2 Errata A: 5.2.25.9 NVDIMM Platform Capabilities Structure + */ +static void +nvdimm_build_structure_caps(GArray *structures, uint32_t capabilities) +{ + NvdimmNfitPlatformCaps *nfit_caps; + + nfit_caps = acpi_data_push(structures, sizeof(*nfit_caps)); + + nfit_caps->type = cpu_to_le16(7 /* NVDIMM Platform Capabilities */); + nfit_caps->length = cpu_to_le16(sizeof(*nfit_caps)); + nfit_caps->highest_cap = 31 - clz32(capabilities); + nfit_caps->capabilities = cpu_to_le32(capabilities); +} + +static GArray *nvdimm_build_device_structure(NVDIMMState *state) +{ + GSList *device_list, *list = nvdimm_get_device_list(); + GArray *structures = g_array_new(false, true /* clear */, 1); + + for (device_list = list; device_list; device_list = device_list->next) { + DeviceState *dev = device_list->data; + + /* build System Physical Address Range Structure. */ + nvdimm_build_structure_spa(structures, dev); + + /* + * build Memory Device to System Physical Address Range Mapping + * Structure. + */ + nvdimm_build_structure_memdev(structures, dev); + + /* build NVDIMM Control Region Structure. */ + nvdimm_build_structure_dcr(structures, dev); + } + g_slist_free(list); + + if (state->persistence) { + nvdimm_build_structure_caps(structures, state->persistence); + } + + return structures; +} + +static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf) +{ + fit_buf->fit = g_array_new(false, true /* clear */, 1); +} + +static void nvdimm_build_fit_buffer(NVDIMMState *state) +{ + NvdimmFitBuffer *fit_buf = &state->fit_buf; + + g_array_free(fit_buf->fit, true); + fit_buf->fit = nvdimm_build_device_structure(state); + fit_buf->dirty = true; +} + +void nvdimm_plug(NVDIMMState *state) +{ + nvdimm_build_fit_buffer(state); +} + +/* + * NVDIMM Firmware Interface Table + * @signature: "NFIT" + * + * It provides information that allows OSPM to enumerate NVDIMM present in + * the platform and associate system physical address ranges created by the + * NVDIMMs. + * + * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT) + */ + +static void nvdimm_build_nfit(NVDIMMState *state, GArray *table_offsets, + GArray *table_data, BIOSLinker *linker, + const char *oem_id, const char *oem_table_id) +{ + NvdimmFitBuffer *fit_buf = &state->fit_buf; + AcpiTable table = { .sig = "NFIT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_add_table(table_offsets, table_data); + + acpi_table_begin(&table, table_data); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); + /* NVDIMM device structures. */ + g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len); + acpi_table_end(linker, &table); +} + +#define NVDIMM_DSM_MEMORY_SIZE 4096 + +struct NvdimmDsmIn { + uint32_t handle; + uint32_t revision; + uint32_t function; + /* the remaining size in the page is used by arg3. */ + union { + uint8_t arg3[4084]; + }; +} QEMU_PACKED; +typedef struct NvdimmDsmIn NvdimmDsmIn; +QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmIn) != NVDIMM_DSM_MEMORY_SIZE); + +struct NvdimmDsmOut { + /* the size of buffer filled by QEMU. */ + uint32_t len; + uint8_t data[4092]; +} QEMU_PACKED; +typedef struct NvdimmDsmOut NvdimmDsmOut; +QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmOut) != NVDIMM_DSM_MEMORY_SIZE); + +struct NvdimmDsmFunc0Out { + /* the size of buffer filled by QEMU. */ + uint32_t len; + uint32_t supported_func; +} QEMU_PACKED; +typedef struct NvdimmDsmFunc0Out NvdimmDsmFunc0Out; + +struct NvdimmDsmFuncNoPayloadOut { + /* the size of buffer filled by QEMU. */ + uint32_t len; + uint32_t func_ret_status; +} QEMU_PACKED; +typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut; + +struct NvdimmFuncGetLabelSizeOut { + /* the size of buffer filled by QEMU. */ + uint32_t len; + uint32_t func_ret_status; /* return status code. */ + uint32_t label_size; /* the size of label data area. */ + /* + * Maximum size of the namespace label data length supported by + * the platform in Get/Set Namespace Label Data functions. + */ + uint32_t max_xfer; +} QEMU_PACKED; +typedef struct NvdimmFuncGetLabelSizeOut NvdimmFuncGetLabelSizeOut; +QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelSizeOut) > NVDIMM_DSM_MEMORY_SIZE); + +struct NvdimmFuncGetLabelDataIn { + uint32_t offset; /* the offset in the namespace label data area. */ + uint32_t length; /* the size of data is to be read via the function. */ +} QEMU_PACKED; +typedef struct NvdimmFuncGetLabelDataIn NvdimmFuncGetLabelDataIn; +QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataIn) + + offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE); + +struct NvdimmFuncGetLabelDataOut { + /* the size of buffer filled by QEMU. */ + uint32_t len; + uint32_t func_ret_status; /* return status code. */ + uint8_t out_buf[]; /* the data got via Get Namesapce Label function. */ +} QEMU_PACKED; +typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut; +QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > NVDIMM_DSM_MEMORY_SIZE); + +struct NvdimmFuncSetLabelDataIn { + uint32_t offset; /* the offset in the namespace label data area. */ + uint32_t length; /* the size of data is to be written via the function. */ + uint8_t in_buf[]; /* the data written to label data area. */ +} QEMU_PACKED; +typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn; +QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) + + offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE); + +struct NvdimmFuncReadFITIn { + uint32_t offset; /* the offset into FIT buffer. */ +} QEMU_PACKED; +typedef struct NvdimmFuncReadFITIn NvdimmFuncReadFITIn; +QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITIn) + + offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE); + +struct NvdimmFuncReadFITOut { + /* the size of buffer filled by QEMU. */ + uint32_t len; + uint32_t func_ret_status; /* return status code. */ + uint8_t fit[]; /* the FIT data. */ +} QEMU_PACKED; +typedef struct NvdimmFuncReadFITOut NvdimmFuncReadFITOut; +QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITOut) > NVDIMM_DSM_MEMORY_SIZE); + +static void +nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr) +{ + NvdimmDsmFunc0Out func0 = { + .len = cpu_to_le32(sizeof(func0)), + .supported_func = cpu_to_le32(supported_func), + }; + cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof(func0)); +} + +static void +nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr) +{ + NvdimmDsmFuncNoPayloadOut out = { + .len = cpu_to_le32(sizeof(out)), + .func_ret_status = cpu_to_le32(func_ret_status), + }; + cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out)); +} + +#define NVDIMM_DSM_RET_STATUS_SUCCESS 0 /* Success */ +#define NVDIMM_DSM_RET_STATUS_UNSUPPORT 1 /* Not Supported */ +#define NVDIMM_DSM_RET_STATUS_NOMEMDEV 2 /* Non-Existing Memory Device */ +#define NVDIMM_DSM_RET_STATUS_INVALID 3 /* Invalid Input Parameters */ +#define NVDIMM_DSM_RET_STATUS_FIT_CHANGED 0x100 /* FIT Changed */ + +#define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000 + +/* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */ +static void nvdimm_dsm_func_read_fit(NVDIMMState *state, NvdimmDsmIn *in, + hwaddr dsm_mem_addr) +{ + NvdimmFitBuffer *fit_buf = &state->fit_buf; + NvdimmFuncReadFITIn *read_fit; + NvdimmFuncReadFITOut *read_fit_out; + GArray *fit; + uint32_t read_len = 0, func_ret_status; + int size; + + read_fit = (NvdimmFuncReadFITIn *)in->arg3; + read_fit->offset = le32_to_cpu(read_fit->offset); + + fit = fit_buf->fit; + + nvdimm_debug("Read FIT: offset 0x%x FIT size 0x%x Dirty %s.\n", + read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No"); + + if (read_fit->offset > fit->len) { + func_ret_status = NVDIMM_DSM_RET_STATUS_INVALID; + goto exit; + } + + /* It is the first time to read FIT. */ + if (!read_fit->offset) { + fit_buf->dirty = false; + } else if (fit_buf->dirty) { /* FIT has been changed during RFIT. */ + func_ret_status = NVDIMM_DSM_RET_STATUS_FIT_CHANGED; + goto exit; + } + + func_ret_status = NVDIMM_DSM_RET_STATUS_SUCCESS; + read_len = MIN(fit->len - read_fit->offset, + NVDIMM_DSM_MEMORY_SIZE - sizeof(NvdimmFuncReadFITOut)); + +exit: + size = sizeof(NvdimmFuncReadFITOut) + read_len; + read_fit_out = g_malloc(size); + + read_fit_out->len = cpu_to_le32(size); + read_fit_out->func_ret_status = cpu_to_le32(func_ret_status); + memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len); + + cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size); + + g_free(read_fit_out); +} + +static void +nvdimm_dsm_handle_reserved_root_method(NVDIMMState *state, + NvdimmDsmIn *in, hwaddr dsm_mem_addr) +{ + switch (in->function) { + case 0x0: + nvdimm_dsm_function0(0x1 | 1 << 1 /* Read FIT */, dsm_mem_addr); + return; + case 0x1 /* Read FIT */: + nvdimm_dsm_func_read_fit(state, in, dsm_mem_addr); + return; + } + + nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr); +} + +static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr) +{ + /* + * function 0 is called to inquire which functions are supported by + * OSPM + */ + if (!in->function) { + nvdimm_dsm_function0(0 /* No function supported other than + function 0 */, dsm_mem_addr); + return; + } + + /* No function except function 0 is supported yet. */ + nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr); +} + +/* + * the max transfer size is the max size transferred by both a + * 'Get Namespace Label Data' function and a 'Set Namespace Label Data' + * function. + */ +static uint32_t nvdimm_get_max_xfer_label_size(void) +{ + uint32_t max_get_size, max_set_size, dsm_memory_size; + + dsm_memory_size = NVDIMM_DSM_MEMORY_SIZE; + + /* + * the max data ACPI can read one time which is transferred by + * the response of 'Get Namespace Label Data' function. + */ + max_get_size = dsm_memory_size - sizeof(NvdimmFuncGetLabelDataOut); + + /* + * the max data ACPI can write one time which is transferred by + * 'Set Namespace Label Data' function. + */ + max_set_size = dsm_memory_size - offsetof(NvdimmDsmIn, arg3) - + sizeof(NvdimmFuncSetLabelDataIn); + + return MIN(max_get_size, max_set_size); +} + +/* + * DSM Spec Rev1 4.4 Get Namespace Label Size (Function Index 4). + * + * It gets the size of Namespace Label data area and the max data size + * that Get/Set Namespace Label Data functions can transfer. + */ +static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr) +{ + NvdimmFuncGetLabelSizeOut label_size_out = { + .len = cpu_to_le32(sizeof(label_size_out)), + }; + uint32_t label_size, mxfer; + + label_size = nvdimm->label_size; + mxfer = nvdimm_get_max_xfer_label_size(); + + nvdimm_debug("label_size 0x%x, max_xfer 0x%x.\n", label_size, mxfer); + + label_size_out.func_ret_status = cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS); + label_size_out.label_size = cpu_to_le32(label_size); + label_size_out.max_xfer = cpu_to_le32(mxfer); + + cpu_physical_memory_write(dsm_mem_addr, &label_size_out, + sizeof(label_size_out)); +} + +static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm, + uint32_t offset, uint32_t length) +{ + uint32_t ret = NVDIMM_DSM_RET_STATUS_INVALID; + + if (offset + length < offset) { + nvdimm_debug("offset 0x%x + length 0x%x is overflow.\n", offset, + length); + return ret; + } + + if (nvdimm->label_size < offset + length) { + nvdimm_debug("position 0x%x is beyond label data (len = %" PRIx64 ").\n", + offset + length, nvdimm->label_size); + return ret; + } + + if (length > nvdimm_get_max_xfer_label_size()) { + nvdimm_debug("length (0x%x) is larger than max_xfer (0x%x).\n", + length, nvdimm_get_max_xfer_label_size()); + return ret; + } + + return NVDIMM_DSM_RET_STATUS_SUCCESS; +} + +/* + * DSM Spec Rev1 4.5 Get Namespace Label Data (Function Index 5). + */ +static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, + hwaddr dsm_mem_addr) +{ + NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm); + NvdimmFuncGetLabelDataIn *get_label_data; + NvdimmFuncGetLabelDataOut *get_label_data_out; + uint32_t status; + int size; + + get_label_data = (NvdimmFuncGetLabelDataIn *)in->arg3; + get_label_data->offset = le32_to_cpu(get_label_data->offset); + get_label_data->length = le32_to_cpu(get_label_data->length); + + nvdimm_debug("Read Label Data: offset 0x%x length 0x%x.\n", + get_label_data->offset, get_label_data->length); + + status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset, + get_label_data->length); + if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) { + nvdimm_dsm_no_payload(status, dsm_mem_addr); + return; + } + + size = sizeof(*get_label_data_out) + get_label_data->length; + assert(size <= NVDIMM_DSM_MEMORY_SIZE); + get_label_data_out = g_malloc(size); + + get_label_data_out->len = cpu_to_le32(size); + get_label_data_out->func_ret_status = + cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS); + nvc->read_label_data(nvdimm, get_label_data_out->out_buf, + get_label_data->length, get_label_data->offset); + + cpu_physical_memory_write(dsm_mem_addr, get_label_data_out, size); + g_free(get_label_data_out); +} + +/* + * DSM Spec Rev1 4.6 Set Namespace Label Data (Function Index 6). + */ +static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, + hwaddr dsm_mem_addr) +{ + NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm); + NvdimmFuncSetLabelDataIn *set_label_data; + uint32_t status; + + set_label_data = (NvdimmFuncSetLabelDataIn *)in->arg3; + + set_label_data->offset = le32_to_cpu(set_label_data->offset); + set_label_data->length = le32_to_cpu(set_label_data->length); + + nvdimm_debug("Write Label Data: offset 0x%x length 0x%x.\n", + set_label_data->offset, set_label_data->length); + + status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset, + set_label_data->length); + if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) { + nvdimm_dsm_no_payload(status, dsm_mem_addr); + return; + } + + assert(offsetof(NvdimmDsmIn, arg3) + sizeof(*set_label_data) + + set_label_data->length <= NVDIMM_DSM_MEMORY_SIZE); + + nvc->write_label_data(nvdimm, set_label_data->in_buf, + set_label_data->length, set_label_data->offset); + nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_SUCCESS, dsm_mem_addr); +} + +static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr) +{ + NVDIMMDevice *nvdimm = nvdimm_get_device_by_handle(in->handle); + + /* See the comments in nvdimm_dsm_root(). */ + if (!in->function) { + uint32_t supported_func = 0; + + if (nvdimm && nvdimm->label_size) { + supported_func |= 0x1 /* Bit 0 indicates whether there is + support for any functions other + than function 0. */ | + 1 << 4 /* Get Namespace Label Size */ | + 1 << 5 /* Get Namespace Label Data */ | + 1 << 6 /* Set Namespace Label Data */; + } + nvdimm_dsm_function0(supported_func, dsm_mem_addr); + return; + } + + if (!nvdimm) { + nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_NOMEMDEV, + dsm_mem_addr); + return; + } + + /* Encode DSM function according to DSM Spec Rev1. */ + switch (in->function) { + case 4 /* Get Namespace Label Size */: + if (nvdimm->label_size) { + nvdimm_dsm_label_size(nvdimm, dsm_mem_addr); + return; + } + break; + case 5 /* Get Namespace Label Data */: + if (nvdimm->label_size) { + nvdimm_dsm_get_label_data(nvdimm, in, dsm_mem_addr); + return; + } + break; + case 0x6 /* Set Namespace Label Data */: + if (nvdimm->label_size) { + nvdimm_dsm_set_label_data(nvdimm, in, dsm_mem_addr); + return; + } + break; + } + + nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr); +} + +static uint64_t +nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size) +{ + nvdimm_debug("BUG: we never read _DSM IO Port.\n"); + return 0; +} + +static void +nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + NVDIMMState *state = opaque; + NvdimmDsmIn *in; + hwaddr dsm_mem_addr = val; + + nvdimm_debug("dsm memory address 0x%" HWADDR_PRIx ".\n", dsm_mem_addr); + + /* + * The DSM memory is mapped to guest address space so an evil guest + * can change its content while we are doing DSM emulation. Avoid + * this by copying DSM memory to QEMU local memory. + */ + in = g_new(NvdimmDsmIn, 1); + cpu_physical_memory_read(dsm_mem_addr, in, sizeof(*in)); + + in->revision = le32_to_cpu(in->revision); + in->function = le32_to_cpu(in->function); + in->handle = le32_to_cpu(in->handle); + + nvdimm_debug("Revision 0x%x Handler 0x%x Function 0x%x.\n", in->revision, + in->handle, in->function); + + if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) { + nvdimm_debug("Revision 0x%x is not supported, expect 0x%x.\n", + in->revision, 0x1); + nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr); + goto exit; + } + + if (in->handle == NVDIMM_QEMU_RSVD_HANDLE_ROOT) { + nvdimm_dsm_handle_reserved_root_method(state, in, dsm_mem_addr); + goto exit; + } + + /* Handle 0 is reserved for NVDIMM Root Device. */ + if (!in->handle) { + nvdimm_dsm_root(in, dsm_mem_addr); + goto exit; + } + + nvdimm_dsm_device(in, dsm_mem_addr); + +exit: + g_free(in); +} + +static const MemoryRegionOps nvdimm_dsm_ops = { + .read = nvdimm_dsm_read, + .write = nvdimm_dsm_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + if (dev->hotplugged) { + acpi_send_event(DEVICE(hotplug_dev), ACPI_NVDIMM_HOTPLUG_STATUS); + } +} + +void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io, + struct AcpiGenericAddress dsm_io, + FWCfgState *fw_cfg, Object *owner) +{ + state->dsm_io = dsm_io; + memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state, + "nvdimm-acpi-io", dsm_io.bit_width >> 3); + memory_region_add_subregion(io, dsm_io.address, &state->io_mr); + + state->dsm_mem = g_array_new(false, true /* clear */, 1); + acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn)); + fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data, + state->dsm_mem->len); + + nvdimm_init_fit_buffer(&state->fit_buf); +} + +#define NVDIMM_COMMON_DSM "NCAL" +#define NVDIMM_ACPI_MEM_ADDR "MEMA" + +#define NVDIMM_DSM_MEMORY "NRAM" +#define NVDIMM_DSM_IOPORT "NPIO" + +#define NVDIMM_DSM_NOTIFY "NTFI" +#define NVDIMM_DSM_HANDLE "HDLE" +#define NVDIMM_DSM_REVISION "REVS" +#define NVDIMM_DSM_FUNCTION "FUNC" +#define NVDIMM_DSM_ARG3 "FARG" + +#define NVDIMM_DSM_OUT_BUF_SIZE "RLEN" +#define NVDIMM_DSM_OUT_BUF "ODAT" + +#define NVDIMM_DSM_RFIT_STATUS "RSTA" + +#define NVDIMM_QEMU_RSVD_UUID "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62" + +static void nvdimm_build_common_dsm(Aml *dev, + NVDIMMState *nvdimm_state) +{ + Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *elsectx2; + Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid; + Aml *pckg, *pckg_index, *pckg_buf, *field, *dsm_out_buf, *dsm_out_buf_size; + Aml *whilectx, *offset; + uint8_t byte_list[1]; + AmlRegionSpace rs; + + method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED); + uuid = aml_arg(0); + function = aml_arg(2); + handle = aml_arg(4); + dsm_mem = aml_local(6); + dsm_out_buf = aml_local(7); + + aml_append(method, aml_store(aml_name(NVDIMM_ACPI_MEM_ADDR), dsm_mem)); + + if (nvdimm_state->dsm_io.space_id == AML_AS_SYSTEM_IO) { + rs = AML_SYSTEM_IO; + } else { + rs = AML_SYSTEM_MEMORY; + } + + /* map DSM memory and IO into ACPI namespace. */ + aml_append(method, aml_operation_region(NVDIMM_DSM_IOPORT, rs, + aml_int(nvdimm_state->dsm_io.address), + nvdimm_state->dsm_io.bit_width >> 3)); + aml_append(method, aml_operation_region(NVDIMM_DSM_MEMORY, + AML_SYSTEM_MEMORY, dsm_mem, sizeof(NvdimmDsmIn))); + + /* + * DSM notifier: + * NVDIMM_DSM_NOTIFY: write the address of DSM memory and notify QEMU to + * emulate the access. + * + * It is the IO port so that accessing them will cause VM-exit, the + * control will be transferred to QEMU. + */ + field = aml_field(NVDIMM_DSM_IOPORT, AML_DWORD_ACC, AML_NOLOCK, + AML_PRESERVE); + aml_append(field, aml_named_field(NVDIMM_DSM_NOTIFY, + nvdimm_state->dsm_io.bit_width)); + aml_append(method, field); + + /* + * DSM input: + * NVDIMM_DSM_HANDLE: store device's handle, it's zero if the _DSM call + * happens on NVDIMM Root Device. + * NVDIMM_DSM_REVISION: store the Arg1 of _DSM call. + * NVDIMM_DSM_FUNCTION: store the Arg2 of _DSM call. + * NVDIMM_DSM_ARG3: store the Arg3 of _DSM call which is a Package + * containing function-specific arguments. + * + * They are RAM mapping on host so that these accesses never cause + * VM-EXIT. + */ + field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK, + AML_PRESERVE); + aml_append(field, aml_named_field(NVDIMM_DSM_HANDLE, + sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE)); + aml_append(field, aml_named_field(NVDIMM_DSM_REVISION, + sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE)); + aml_append(field, aml_named_field(NVDIMM_DSM_FUNCTION, + sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE)); + aml_append(field, aml_named_field(NVDIMM_DSM_ARG3, + (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE)); + aml_append(method, field); + + /* + * DSM output: + * NVDIMM_DSM_OUT_BUF_SIZE: the size of the buffer filled by QEMU. + * NVDIMM_DSM_OUT_BUF: the buffer QEMU uses to store the result. + * + * Since the page is reused by both input and out, the input data + * will be lost after storing new result into ODAT so we should fetch + * all the input data before writing the result. + */ + field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK, + AML_PRESERVE); + aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF_SIZE, + sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE)); + aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF, + (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE)); + aml_append(method, field); + + /* + * do not support any method if DSM memory address has not been + * patched. + */ + unpatched = aml_equal(dsm_mem, aml_int(0x0)); + + expected_uuid = aml_local(0); + + ifctx = aml_if(aml_equal(handle, aml_int(0x0))); + aml_append(ifctx, aml_store( + aml_touuid("2F10E7A4-9E91-11E4-89D3-123B93F75CBA") + /* UUID for NVDIMM Root Device */, expected_uuid)); + aml_append(method, ifctx); + elsectx = aml_else(); + ifctx = aml_if(aml_equal(handle, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT))); + aml_append(ifctx, aml_store(aml_touuid(NVDIMM_QEMU_RSVD_UUID + /* UUID for QEMU internal use */), expected_uuid)); + aml_append(elsectx, ifctx); + elsectx2 = aml_else(); + aml_append(elsectx2, aml_store( + aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66") + /* UUID for NVDIMM Devices */, expected_uuid)); + aml_append(elsectx, elsectx2); + aml_append(method, elsectx); + + uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid)); + + unsupport = aml_if(aml_or(unpatched, uuid_invalid, NULL)); + + /* + * function 0 is called to inquire what functions are supported by + * OSPM + */ + ifctx = aml_if(aml_equal(function, aml_int(0))); + byte_list[0] = 0 /* No function Supported */; + aml_append(ifctx, aml_return(aml_buffer(1, byte_list))); + aml_append(unsupport, ifctx); + + /* No function is supported yet. */ + byte_list[0] = NVDIMM_DSM_RET_STATUS_UNSUPPORT; + aml_append(unsupport, aml_return(aml_buffer(1, byte_list))); + aml_append(method, unsupport); + + /* + * The HDLE indicates the DSM function is issued from which device, + * it reserves 0 for root device and is the handle for NVDIMM devices. + * See the comments in nvdimm_slot_to_handle(). + */ + aml_append(method, aml_store(handle, aml_name(NVDIMM_DSM_HANDLE))); + aml_append(method, aml_store(aml_arg(1), aml_name(NVDIMM_DSM_REVISION))); + aml_append(method, aml_store(function, aml_name(NVDIMM_DSM_FUNCTION))); + + /* + * The fourth parameter (Arg3) of _DSM is a package which contains + * a buffer, the layout of the buffer is specified by UUID (Arg0), + * Revision ID (Arg1) and Function Index (Arg2) which are documented + * in the DSM Spec. + */ + pckg = aml_arg(3); + ifctx = aml_if(aml_and(aml_equal(aml_object_type(pckg), + aml_int(4 /* Package */)) /* It is a Package? */, + aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */, + NULL)); + + pckg_index = aml_local(2); + pckg_buf = aml_local(3); + aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index)); + aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf)); + aml_append(ifctx, aml_store(pckg_buf, aml_name(NVDIMM_DSM_ARG3))); + aml_append(method, ifctx); + + /* + * tell QEMU about the real address of DSM memory, then QEMU + * gets the control and fills the result in DSM memory. + */ + aml_append(method, aml_store(dsm_mem, aml_name(NVDIMM_DSM_NOTIFY))); + + dsm_out_buf_size = aml_local(1); + /* RLEN is not included in the payload returned to guest. */ + aml_append(method, aml_subtract(aml_name(NVDIMM_DSM_OUT_BUF_SIZE), + aml_int(4), dsm_out_buf_size)); + + /* + * As per ACPI spec 6.3, Table 19-419 Object Conversion Rules, if + * the Buffer Field <= to the size of an Integer (in bits), it will + * be treated as an integer. Moreover, the integer size depends on + * DSDT tables revision number. If revision number is < 2, integer + * size is 32 bits, otherwise it is 64 bits. + * Because of this CreateField() canot be used if RLEN < Integer Size. + * + * Also please note that APCI ASL operator SizeOf() doesn't support + * Integer and there isn't any other way to figure out the Integer + * size. Hence we assume 8 byte as Integer size and if RLEN < 8 bytes, + * build dsm_out_buf byte by byte. + */ + ifctx = aml_if(aml_lless(dsm_out_buf_size, aml_int(8))); + offset = aml_local(2); + aml_append(ifctx, aml_store(aml_int(0), offset)); + aml_append(ifctx, aml_name_decl("TBUF", aml_buffer(1, NULL))); + aml_append(ifctx, aml_store(aml_buffer(0, NULL), dsm_out_buf)); + + whilectx = aml_while(aml_lless(offset, dsm_out_buf_size)); + /* Copy 1 byte at offset from ODAT to temporary buffer(TBUF). */ + aml_append(whilectx, aml_store(aml_derefof(aml_index( + aml_name(NVDIMM_DSM_OUT_BUF), offset)), + aml_index(aml_name("TBUF"), aml_int(0)))); + aml_append(whilectx, aml_concatenate(dsm_out_buf, aml_name("TBUF"), + dsm_out_buf)); + aml_append(whilectx, aml_increment(offset)); + aml_append(ifctx, whilectx); + + aml_append(ifctx, aml_return(dsm_out_buf)); + aml_append(method, ifctx); + + /* If RLEN >= Integer size, just use CreateField() operator */ + aml_append(method, aml_store(aml_shiftleft(dsm_out_buf_size, aml_int(3)), + dsm_out_buf_size)); + aml_append(method, aml_create_field(aml_name(NVDIMM_DSM_OUT_BUF), + aml_int(0), dsm_out_buf_size, "OBUF")); + aml_append(method, aml_return(aml_name("OBUF"))); + + aml_append(dev, method); +} + +static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle) +{ + Aml *method; + + method = aml_method("_DSM", 4, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_call5(NVDIMM_COMMON_DSM, aml_arg(0), + aml_arg(1), aml_arg(2), aml_arg(3), + aml_int(handle)))); + aml_append(dev, method); +} + +static void nvdimm_build_fit(Aml *dev) +{ + Aml *method, *pkg, *buf, *buf_size, *offset, *call_result; + Aml *whilectx, *ifcond, *ifctx, *elsectx, *fit; + + buf = aml_local(0); + buf_size = aml_local(1); + fit = aml_local(2); + + aml_append(dev, aml_name_decl(NVDIMM_DSM_RFIT_STATUS, aml_int(0))); + + /* build helper function, RFIT. */ + method = aml_method("RFIT", 1, AML_SERIALIZED); + aml_append(method, aml_name_decl("OFST", aml_int(0))); + + /* prepare input package. */ + pkg = aml_package(1); + aml_append(method, aml_store(aml_arg(0), aml_name("OFST"))); + aml_append(pkg, aml_name("OFST")); + + /* call Read_FIT function. */ + call_result = aml_call5(NVDIMM_COMMON_DSM, + aml_touuid(NVDIMM_QEMU_RSVD_UUID), + aml_int(1) /* Revision 1 */, + aml_int(0x1) /* Read FIT */, + pkg, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT)); + aml_append(method, aml_store(call_result, buf)); + + /* handle _DSM result. */ + aml_append(method, aml_create_dword_field(buf, + aml_int(0) /* offset at byte 0 */, "STAU")); + + aml_append(method, aml_store(aml_name("STAU"), + aml_name(NVDIMM_DSM_RFIT_STATUS))); + + /* if something is wrong during _DSM. */ + ifcond = aml_equal(aml_int(NVDIMM_DSM_RET_STATUS_SUCCESS), + aml_name("STAU")); + ifctx = aml_if(aml_lnot(ifcond)); + aml_append(ifctx, aml_return(aml_buffer(0, NULL))); + aml_append(method, ifctx); + + aml_append(method, aml_store(aml_sizeof(buf), buf_size)); + aml_append(method, aml_subtract(buf_size, + aml_int(4) /* the size of "STAU" */, + buf_size)); + + /* if we read the end of fit. */ + ifctx = aml_if(aml_equal(buf_size, aml_int(0))); + aml_append(ifctx, aml_return(aml_buffer(0, NULL))); + aml_append(method, ifctx); + + aml_append(method, aml_create_field(buf, + aml_int(4 * BITS_PER_BYTE), /* offset at byte 4.*/ + aml_shiftleft(buf_size, aml_int(3)), "BUFF")); + aml_append(method, aml_return(aml_name("BUFF"))); + aml_append(dev, method); + + /* build _FIT. */ + method = aml_method("_FIT", 0, AML_SERIALIZED); + offset = aml_local(3); + + aml_append(method, aml_store(aml_buffer(0, NULL), fit)); + aml_append(method, aml_store(aml_int(0), offset)); + + whilectx = aml_while(aml_int(1)); + aml_append(whilectx, aml_store(aml_call1("RFIT", offset), buf)); + aml_append(whilectx, aml_store(aml_sizeof(buf), buf_size)); + + /* + * if fit buffer was changed during RFIT, read from the beginning + * again. + */ + ifctx = aml_if(aml_equal(aml_name(NVDIMM_DSM_RFIT_STATUS), + aml_int(NVDIMM_DSM_RET_STATUS_FIT_CHANGED))); + aml_append(ifctx, aml_store(aml_buffer(0, NULL), fit)); + aml_append(ifctx, aml_store(aml_int(0), offset)); + aml_append(whilectx, ifctx); + + elsectx = aml_else(); + + /* finish fit read if no data is read out. */ + ifctx = aml_if(aml_equal(buf_size, aml_int(0))); + aml_append(ifctx, aml_return(fit)); + aml_append(elsectx, ifctx); + + /* update the offset. */ + aml_append(elsectx, aml_add(offset, buf_size, offset)); + /* append the data we read out to the fit buffer. */ + aml_append(elsectx, aml_concatenate(fit, buf, fit)); + aml_append(whilectx, elsectx); + aml_append(method, whilectx); + + aml_append(dev, method); +} + +static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots) +{ + uint32_t slot; + + for (slot = 0; slot < ram_slots; slot++) { + uint32_t handle = nvdimm_slot_to_handle(slot); + Aml *nvdimm_dev; + + nvdimm_dev = aml_device("NV%02X", slot); + + /* + * ACPI 6.0: 9.20 NVDIMM Devices: + * + * _ADR object that is used to supply OSPM with unique address + * of the NVDIMM device. This is done by returning the NFIT Device + * handle that is used to identify the associated entries in ACPI + * table NFIT or _FIT. + */ + aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle))); + + nvdimm_build_device_dsm(nvdimm_dev, handle); + aml_append(root_dev, nvdimm_dev); + } +} + +static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, + BIOSLinker *linker, + NVDIMMState *nvdimm_state, + uint32_t ram_slots, const char *oem_id) +{ + int mem_addr_offset; + Aml *ssdt, *sb_scope, *dev; + AcpiTable table = { .sig = "SSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = "NVDIMM" }; + + acpi_add_table(table_offsets, table_data); + + acpi_table_begin(&table, table_data); + ssdt = init_aml_allocator(); + sb_scope = aml_scope("\\_SB"); + + dev = aml_device("NVDR"); + + /* + * ACPI 6.0: 9.20 NVDIMM Devices: + * + * The ACPI Name Space device uses _HID of ACPI0012 to identify the root + * NVDIMM interface device. Platform firmware is required to contain one + * such device in _SB scope if NVDIMMs support is exposed by platform to + * OSPM. + * For each NVDIMM present or intended to be supported by platform, + * platform firmware also exposes an ACPI Namespace Device under the + * root device. + */ + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012"))); + + nvdimm_build_common_dsm(dev, nvdimm_state); + + /* 0 is reserved for root device. */ + nvdimm_build_device_dsm(dev, 0); + nvdimm_build_fit(dev); + + nvdimm_build_nvdimm_devices(dev, ram_slots); + + aml_append(sb_scope, dev); + aml_append(ssdt, sb_scope); + + /* copy AML table into ACPI tables blob and patch header there */ + g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len); + mem_addr_offset = build_append_named_dword(table_data, + NVDIMM_ACPI_MEM_ADDR); + + bios_linker_loader_alloc(linker, + NVDIMM_DSM_MEM_FILE, nvdimm_state->dsm_mem, + sizeof(NvdimmDsmIn), false /* high memory */); + bios_linker_loader_add_pointer(linker, + ACPI_BUILD_TABLE_FILE, mem_addr_offset, sizeof(uint32_t), + NVDIMM_DSM_MEM_FILE, 0); + free_aml_allocator(); + /* + * must be executed as the last so that pointer patching command above + * would be executed by guest before it recalculated checksum which were + * scheduled by acpi_table_end() + */ + acpi_table_end(linker, &table); +} + +void nvdimm_build_srat(GArray *table_data) +{ + GSList *device_list, *list = nvdimm_get_device_list(); + + for (device_list = list; device_list; device_list = device_list->next) { + DeviceState *dev = device_list->data; + Object *obj = OBJECT(dev); + uint64_t addr, size; + int node; + + node = object_property_get_int(obj, PC_DIMM_NODE_PROP, &error_abort); + addr = object_property_get_uint(obj, PC_DIMM_ADDR_PROP, &error_abort); + size = object_property_get_uint(obj, PC_DIMM_SIZE_PROP, &error_abort); + + build_srat_memory(table_data, addr, size, node, + MEM_AFFINITY_ENABLED | MEM_AFFINITY_NON_VOLATILE); + } + g_slist_free(list); +} + +void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, + BIOSLinker *linker, NVDIMMState *state, + uint32_t ram_slots, const char *oem_id, + const char *oem_table_id) +{ + GSList *device_list; + + /* no nvdimm device can be plugged. */ + if (!ram_slots) { + return; + } + + nvdimm_build_ssdt(table_offsets, table_data, linker, state, + ram_slots, oem_id); + + device_list = nvdimm_get_device_list(); + /* no NVDIMM device is plugged. */ + if (!device_list) { + return; + } + + nvdimm_build_nfit(state, table_offsets, table_data, linker, + oem_id, oem_table_id); + g_slist_free(device_list); +} diff --git a/hw/acpi/pci.c b/hw/acpi/pci.c new file mode 100644 index 000000000..20b70dcd8 --- /dev/null +++ b/hw/acpi/pci.c @@ -0,0 +1,61 @@ +/* + * Support for generating PCI related ACPI tables and passing them to Guests + * + * Copyright (C) 2006 Fabrice Bellard + * Copyright (C) 2008-2010 Kevin O'Connor + * Copyright (C) 2013-2019 Red Hat Inc + * Copyright (C) 2019 Intel Corporation + * + * Author: Wei Yang + * Author: Michael S. Tsirkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/pci.h" +#include "hw/pci/pcie_host.h" + +/* + * PCI Firmware Specification, Revision 3.0 + * 4.1.2 MCFG Table Description. + */ +void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info, + const char *oem_id, const char *oem_table_id) +{ + AcpiTable table = { .sig = "MCFG", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + + /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); + /* + * Memory Mapped Enhanced Configuration Space Base Address Allocation + * Structure + */ + /* Base address, processor-relative */ + build_append_int_noprefix(table_data, info->base, 8); + /* PCI segment group number */ + build_append_int_noprefix(table_data, 0, 2); + /* Starting PCI Bus number */ + build_append_int_noprefix(table_data, 0, 1); + /* Final PCI Bus number */ + build_append_int_noprefix(table_data, PCIE_MMCFG_BUS(info->size - 1), 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); + + acpi_table_end(linker, &table); +} diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c new file mode 100644 index 000000000..30405b511 --- /dev/null +++ b/hw/acpi/pcihp.c @@ -0,0 +1,564 @@ +/* + * QEMU<->ACPI BIOS PCI hotplug interface + * + * QEMU supports PCI hotplug via ACPI. This module + * implements the interface between QEMU and the ACPI BIOS. + * Interface specification - see docs/specs/acpi_pci_hotplug.txt + * + * Copyright (c) 2013, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com) + * Copyright (c) 2006 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/acpi/pcihp.h" + +#include "hw/pci-host/i440fx.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_host.h" +#include "hw/pci/pcie_port.h" +#include "hw/i386/acpi-build.h" +#include "hw/acpi/acpi.h" +#include "hw/pci/pci_bus.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qom/qom-qobject.h" +#include "trace.h" + +#define ACPI_PCIHP_SIZE 0x0018 +#define PCI_UP_BASE 0x0000 +#define PCI_DOWN_BASE 0x0004 +#define PCI_EJ_BASE 0x0008 +#define PCI_RMV_BASE 0x000c +#define PCI_SEL_BASE 0x0010 +#define PCI_AIDX_BASE 0x0014 + +typedef struct AcpiPciHpFind { + int bsel; + PCIBus *bus; +} AcpiPciHpFind; + +static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data) +{ + return a - b; +} + +static GSequence *pci_acpi_index_list(void) +{ + static GSequence *used_acpi_index_list; + + if (!used_acpi_index_list) { + used_acpi_index_list = g_sequence_new(NULL); + } + return used_acpi_index_list; +} + +static int acpi_pcihp_get_bsel(PCIBus *bus) +{ + Error *local_err = NULL; + uint64_t bsel = object_property_get_uint(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, + &local_err); + + if (local_err || bsel >= ACPI_PCIHP_MAX_HOTPLUG_BUS) { + if (local_err) { + error_free(local_err); + } + return -1; + } else { + return bsel; + } +} + +/* Assign BSEL property to all buses. In the future, this can be changed + * to only assign to buses that support hotplug. + */ +static void *acpi_set_bsel(PCIBus *bus, void *opaque) +{ + unsigned *bsel_alloc = opaque; + unsigned *bus_bsel; + + if (qbus_is_hotpluggable(BUS(bus))) { + bus_bsel = g_malloc(sizeof *bus_bsel); + + *bus_bsel = (*bsel_alloc)++; + object_property_add_uint32_ptr(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, + bus_bsel, OBJ_PROP_FLAG_READ); + } + + return bsel_alloc; +} + +static void acpi_set_pci_info(void) +{ + static bool bsel_is_set; + Object *host = acpi_get_i386_pci_host(); + PCIBus *bus; + unsigned bsel_alloc = ACPI_PCIHP_BSEL_DEFAULT; + + if (bsel_is_set) { + return; + } + bsel_is_set = true; + + if (!host) { + return; + } + + bus = PCI_HOST_BRIDGE(host)->bus; + if (bus) { + /* Scan all PCI buses. Set property to enable acpi based hotplug. */ + pci_for_each_bus_depth_first(bus, acpi_set_bsel, NULL, &bsel_alloc); + } +} + +static void acpi_pcihp_disable_root_bus(void) +{ + static bool root_hp_disabled; + Object *host = acpi_get_i386_pci_host(); + PCIBus *bus; + + if (root_hp_disabled) { + return; + } + + bus = PCI_HOST_BRIDGE(host)->bus; + if (bus) { + /* setting the hotplug handler to NULL makes the bus non-hotpluggable */ + qbus_set_hotplug_handler(BUS(bus), NULL); + } + root_hp_disabled = true; + return; +} + +static void acpi_pcihp_test_hotplug_bus(PCIBus *bus, void *opaque) +{ + AcpiPciHpFind *find = opaque; + if (find->bsel == acpi_pcihp_get_bsel(bus)) { + find->bus = bus; + } +} + +static PCIBus *acpi_pcihp_find_hotplug_bus(AcpiPciHpState *s, int bsel) +{ + AcpiPciHpFind find = { .bsel = bsel, .bus = NULL }; + + if (bsel < 0) { + return NULL; + } + + pci_for_each_bus(s->root, acpi_pcihp_test_hotplug_bus, &find); + + /* Make bsel 0 eject root bus if bsel property is not set, + * for compatibility with non acpi setups. + * TODO: really needed? + */ + if (!bsel && !find.bus) { + find.bus = s->root; + } + + /* + * Check if find.bus is actually hotpluggable. If bsel is set to + * NULL for example on the root bus in order to make it + * non-hotpluggable, find.bus will match the root bus when bsel + * is 0. See acpi_pcihp_test_hotplug_bus() above. Since the + * bus is not hotpluggable however, we should not select the bus. + * Instead, we should set find.bus to NULL in that case. In the check + * below, we generalize this case for all buses, not just the root bus. + * The callers of this function check for a null return value and + * handle them appropriately. + */ + if (find.bus && !qbus_is_hotpluggable(BUS(find.bus))) { + find.bus = NULL; + } + return find.bus; +} + +static bool acpi_pcihp_pc_no_hotplug(AcpiPciHpState *s, PCIDevice *dev) +{ + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); + DeviceClass *dc = DEVICE_GET_CLASS(dev); + /* + * ACPI doesn't allow hotplug of bridge devices. Don't allow + * hot-unplug of bridge devices unless they were added by hotplug + * (and so, not described by acpi). + */ + return (pc->is_bridge && !dev->qdev.hotplugged) || !dc->hotpluggable; +} + +static void acpi_pcihp_eject_slot(AcpiPciHpState *s, unsigned bsel, unsigned slots) +{ + HotplugHandler *hotplug_ctrl; + BusChild *kid, *next; + int slot = ctz32(slots); + PCIBus *bus = acpi_pcihp_find_hotplug_bus(s, bsel); + + trace_acpi_pci_eject_slot(bsel, slot); + + if (!bus || slot > 31) { + return; + } + + /* Mark request as complete */ + s->acpi_pcihp_pci_status[bsel].down &= ~(1U << slot); + s->acpi_pcihp_pci_status[bsel].up &= ~(1U << slot); + + QTAILQ_FOREACH_SAFE(kid, &bus->qbus.children, sibling, next) { + DeviceState *qdev = kid->child; + PCIDevice *dev = PCI_DEVICE(qdev); + if (PCI_SLOT(dev->devfn) == slot) { + if (!acpi_pcihp_pc_no_hotplug(s, dev)) { + /* + * partially_hotplugged is used by virtio-net failover: + * failover has asked the guest OS to unplug the device + * but we need to keep some references to the device + * to be able to plug it back in case of failure so + * we don't execute hotplug_handler_unplug(). + */ + if (dev->partially_hotplugged) { + /* + * pending_deleted_event is set to true when + * virtio-net failover asks to unplug the device, + * and set to false here when the operation is done + * This is used by the migration loop to detect the + * end of the operation and really start the migration. + */ + qdev->pending_deleted_event = false; + } else { + hotplug_ctrl = qdev_get_hotplug_handler(qdev); + hotplug_handler_unplug(hotplug_ctrl, qdev, &error_abort); + object_unparent(OBJECT(qdev)); + } + } + } + } +} + +static void acpi_pcihp_update_hotplug_bus(AcpiPciHpState *s, int bsel) +{ + BusChild *kid, *next; + PCIBus *bus = acpi_pcihp_find_hotplug_bus(s, bsel); + + /* Execute any pending removes during reset */ + while (s->acpi_pcihp_pci_status[bsel].down) { + acpi_pcihp_eject_slot(s, bsel, s->acpi_pcihp_pci_status[bsel].down); + } + + s->acpi_pcihp_pci_status[bsel].hotplug_enable = ~0; + + if (!bus) { + return; + } + QTAILQ_FOREACH_SAFE(kid, &bus->qbus.children, sibling, next) { + DeviceState *qdev = kid->child; + PCIDevice *pdev = PCI_DEVICE(qdev); + int slot = PCI_SLOT(pdev->devfn); + + if (acpi_pcihp_pc_no_hotplug(s, pdev)) { + s->acpi_pcihp_pci_status[bsel].hotplug_enable &= ~(1U << slot); + } + } +} + +static void acpi_pcihp_update(AcpiPciHpState *s) +{ + int i; + + for (i = 0; i < ACPI_PCIHP_MAX_HOTPLUG_BUS; ++i) { + acpi_pcihp_update_hotplug_bus(s, i); + } +} + +void acpi_pcihp_reset(AcpiPciHpState *s, bool acpihp_root_off) +{ + if (acpihp_root_off) { + acpi_pcihp_disable_root_bus(); + } + acpi_set_pci_info(); + acpi_pcihp_update(s); +} + +#define ONBOARD_INDEX_MAX (16 * 1024 - 1) + +void acpi_pcihp_device_pre_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + PCIDevice *pdev = PCI_DEVICE(dev); + + /* Only hotplugged devices need the hotplug capability. */ + if (dev->hotplugged && + acpi_pcihp_get_bsel(pci_get_bus(pdev)) < 0) { + error_setg(errp, "Unsupported bus. Bus doesn't have property '" + ACPI_PCIHP_PROP_BSEL "' set"); + return; + } + + /* + * capped by systemd (see: udev-builtin-net_id.c) + * as it's the only known user honor it to avoid users + * misconfigure QEMU and then wonder why acpi-index doesn't work + */ + if (pdev->acpi_index > ONBOARD_INDEX_MAX) { + error_setg(errp, "acpi-index should be less or equal to %u", + ONBOARD_INDEX_MAX); + return; + } + + /* + * make sure that acpi-index is unique across all present PCI devices + */ + if (pdev->acpi_index) { + GSequence *used_indexes = pci_acpi_index_list(); + + if (g_sequence_lookup(used_indexes, GINT_TO_POINTER(pdev->acpi_index), + g_cmp_uint32, NULL)) { + error_setg(errp, "a PCI device with acpi-index = %" PRIu32 + " already exist", pdev->acpi_index); + return; + } + g_sequence_insert_sorted(used_indexes, + GINT_TO_POINTER(pdev->acpi_index), + g_cmp_uint32, NULL); + } +} + +void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, + DeviceState *dev, Error **errp) +{ + PCIDevice *pdev = PCI_DEVICE(dev); + int slot = PCI_SLOT(pdev->devfn); + int bsel; + + /* Don't send event when device is enabled during qemu machine creation: + * it is present on boot, no hotplug event is necessary. We do send an + * event when the device is disabled later. */ + if (!dev->hotplugged) { + /* + * Overwrite the default hotplug handler with the ACPI PCI one + * for cold plugged bridges only. + */ + if (!s->legacy_piix && + object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { + PCIBus *sec = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); + + /* Remove all hot-plug handlers if hot-plug is disabled on slot */ + if (object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT) && + !PCIE_SLOT(pdev)->hotplug) { + qbus_set_hotplug_handler(BUS(sec), NULL); + return; + } + + qbus_set_hotplug_handler(BUS(sec), OBJECT(hotplug_dev)); + /* We don't have to overwrite any other hotplug handler yet */ + assert(QLIST_EMPTY(&sec->child)); + } + + return; + } + + bsel = acpi_pcihp_get_bsel(pci_get_bus(pdev)); + g_assert(bsel >= 0); + s->acpi_pcihp_pci_status[bsel].up |= (1U << slot); + acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); +} + +void acpi_pcihp_device_unplug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, + DeviceState *dev, Error **errp) +{ + PCIDevice *pdev = PCI_DEVICE(dev); + + trace_acpi_pci_unplug(PCI_SLOT(pdev->devfn), + acpi_pcihp_get_bsel(pci_get_bus(pdev))); + + /* + * clean up acpi-index so it could reused by another device + */ + if (pdev->acpi_index) { + GSequence *used_indexes = pci_acpi_index_list(); + + g_sequence_remove(g_sequence_lookup(used_indexes, + GINT_TO_POINTER(pdev->acpi_index), + g_cmp_uint32, NULL)); + } + + qdev_unrealize(dev); +} + +void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev, + AcpiPciHpState *s, DeviceState *dev, + Error **errp) +{ + PCIDevice *pdev = PCI_DEVICE(dev); + int slot = PCI_SLOT(pdev->devfn); + int bsel = acpi_pcihp_get_bsel(pci_get_bus(pdev)); + + trace_acpi_pci_unplug_request(bsel, slot); + + if (bsel < 0) { + error_setg(errp, "Unsupported bus. Bus doesn't have property '" + ACPI_PCIHP_PROP_BSEL "' set"); + return; + } + + /* + * pending_deleted_event is used by virtio-net failover to detect the + * end of the unplug operation, the flag is set to false in + * acpi_pcihp_eject_slot() when the operation is completed. + */ + pdev->qdev.pending_deleted_event = true; + s->acpi_pcihp_pci_status[bsel].down |= (1U << slot); + acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); +} + +static uint64_t pci_read(void *opaque, hwaddr addr, unsigned int size) +{ + AcpiPciHpState *s = opaque; + uint32_t val = 0; + int bsel = s->hotplug_select; + + if (bsel < 0 || bsel >= ACPI_PCIHP_MAX_HOTPLUG_BUS) { + return 0; + } + + switch (addr) { + case PCI_UP_BASE: + val = s->acpi_pcihp_pci_status[bsel].up; + if (!s->legacy_piix) { + s->acpi_pcihp_pci_status[bsel].up = 0; + } + trace_acpi_pci_up_read(val); + break; + case PCI_DOWN_BASE: + val = s->acpi_pcihp_pci_status[bsel].down; + trace_acpi_pci_down_read(val); + break; + case PCI_EJ_BASE: + trace_acpi_pci_features_read(val); + break; + case PCI_RMV_BASE: + val = s->acpi_pcihp_pci_status[bsel].hotplug_enable; + trace_acpi_pci_rmv_read(val); + break; + case PCI_SEL_BASE: + val = s->hotplug_select; + trace_acpi_pci_sel_read(val); + break; + case PCI_AIDX_BASE: + val = s->acpi_index; + s->acpi_index = 0; + trace_acpi_pci_acpi_index_read(val); + break; + default: + break; + } + + return val; +} + +static void pci_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + int slot; + PCIBus *bus; + BusChild *kid, *next; + AcpiPciHpState *s = opaque; + + s->acpi_index = 0; + switch (addr) { + case PCI_AIDX_BASE: + /* + * fetch acpi-index for specified slot so that follow up read from + * PCI_AIDX_BASE can return it to guest + */ + slot = ctz32(data); + + if (s->hotplug_select >= ACPI_PCIHP_MAX_HOTPLUG_BUS) { + break; + } + + bus = acpi_pcihp_find_hotplug_bus(s, s->hotplug_select); + QTAILQ_FOREACH_SAFE(kid, &bus->qbus.children, sibling, next) { + Object *o = OBJECT(kid->child); + PCIDevice *dev = PCI_DEVICE(o); + if (PCI_SLOT(dev->devfn) == slot) { + s->acpi_index = object_property_get_uint(o, "acpi-index", NULL); + break; + } + } + trace_acpi_pci_acpi_index_write(s->hotplug_select, slot, s->acpi_index); + break; + case PCI_EJ_BASE: + if (s->hotplug_select >= ACPI_PCIHP_MAX_HOTPLUG_BUS) { + break; + } + acpi_pcihp_eject_slot(s, s->hotplug_select, data); + trace_acpi_pci_ej_write(addr, data); + break; + case PCI_SEL_BASE: + s->hotplug_select = s->legacy_piix ? ACPI_PCIHP_BSEL_DEFAULT : data; + trace_acpi_pci_sel_write(addr, data); + default: + break; + } +} + +static const MemoryRegionOps acpi_pcihp_io_ops = { + .read = pci_read, + .write = pci_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus, + MemoryRegion *address_space_io, bool bridges_enabled, + uint16_t io_base) +{ + s->io_len = ACPI_PCIHP_SIZE; + s->io_base = io_base; + + s->root = root_bus; + s->legacy_piix = !bridges_enabled; + + memory_region_init_io(&s->io, owner, &acpi_pcihp_io_ops, s, + "acpi-pci-hotplug", s->io_len); + memory_region_add_subregion(address_space_io, s->io_base, &s->io); + + object_property_add_uint16_ptr(owner, ACPI_PCIHP_IO_BASE_PROP, &s->io_base, + OBJ_PROP_FLAG_READ); + object_property_add_uint16_ptr(owner, ACPI_PCIHP_IO_LEN_PROP, &s->io_len, + OBJ_PROP_FLAG_READ); +} + +bool vmstate_acpi_pcihp_use_acpi_index(void *opaque, int version_id) +{ + AcpiPciHpState *s = opaque; + return s->acpi_index; +} + +const VMStateDescription vmstate_acpi_pcihp_pci_status = { + .name = "acpi_pcihp_pci_status", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(up, AcpiPciHpPciStatus), + VMSTATE_UINT32(down, AcpiPciHpPciStatus), + VMSTATE_END_OF_LIST() + } +}; diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c new file mode 100644 index 000000000..f0b5fac44 --- /dev/null +++ b/hw/acpi/piix4.c @@ -0,0 +1,710 @@ +/* + * ACPI implementation + * + * Copyright (c) 2006 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/southbridge/piix.h" +#include "hw/irq.h" +#include "hw/isa/apm.h" +#include "hw/i2c/pm_smbus.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/acpi/acpi.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "sysemu/xen.h" +#include "qapi/error.h" +#include "qemu/range.h" +#include "hw/acpi/pcihp.h" +#include "hw/acpi/cpu_hotplug.h" +#include "hw/acpi/cpu.h" +#include "hw/hotplug.h" +#include "hw/mem/pc-dimm.h" +#include "hw/mem/nvdimm.h" +#include "hw/acpi/memory_hotplug.h" +#include "hw/acpi/acpi_dev_interface.h" +#include "migration/vmstate.h" +#include "hw/core/cpu.h" +#include "trace.h" +#include "qom/object.h" + +#define GPE_BASE 0xafe0 +#define GPE_LEN 4 + +#define ACPI_PCIHP_ADDR_PIIX4 0xae00 + +struct pci_status { + uint32_t up; /* deprecated, maintained for migration compatibility */ + uint32_t down; +}; + +struct PIIX4PMState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + MemoryRegion io; + uint32_t io_base; + + MemoryRegion io_gpe; + ACPIREGS ar; + + APMState apm; + + PMSMBus smb; + uint32_t smb_io_base; + + qemu_irq irq; + qemu_irq smi_irq; + int smm_enabled; + bool smm_compat; + Notifier machine_ready; + Notifier powerdown_notifier; + + AcpiPciHpState acpi_pci_hotplug; + bool use_acpi_hotplug_bridge; + bool use_acpi_root_pci_hotplug; + + uint8_t disable_s3; + uint8_t disable_s4; + uint8_t s4_val; + + bool cpu_hotplug_legacy; + AcpiCpuHotplug gpe_cpu; + CPUHotplugState cpuhp_state; + + MemHotplugState acpi_memory_hotplug; +}; + +OBJECT_DECLARE_SIMPLE_TYPE(PIIX4PMState, PIIX4_PM) + +static void piix4_acpi_system_hot_add_init(MemoryRegion *parent, + PCIBus *bus, PIIX4PMState *s); + +#define ACPI_ENABLE 0xf1 +#define ACPI_DISABLE 0xf0 + +static void pm_tmr_timer(ACPIREGS *ar) +{ + PIIX4PMState *s = container_of(ar, PIIX4PMState, ar); + acpi_update_sci(&s->ar, s->irq); +} + +static void apm_ctrl_changed(uint32_t val, void *arg) +{ + PIIX4PMState *s = arg; + PCIDevice *d = PCI_DEVICE(s); + + /* ACPI specs 3.0, 4.7.2.5 */ + acpi_pm1_cnt_update(&s->ar, val == ACPI_ENABLE, val == ACPI_DISABLE); + if (val == ACPI_ENABLE || val == ACPI_DISABLE) { + return; + } + + if (d->config[0x5b] & (1 << 1)) { + if (s->smi_irq) { + qemu_irq_raise(s->smi_irq); + } + } +} + +static void pm_io_space_update(PIIX4PMState *s) +{ + PCIDevice *d = PCI_DEVICE(s); + + s->io_base = le32_to_cpu(*(uint32_t *)(d->config + 0x40)); + s->io_base &= 0xffc0; + + memory_region_transaction_begin(); + memory_region_set_enabled(&s->io, d->config[0x80] & 1); + memory_region_set_address(&s->io, s->io_base); + memory_region_transaction_commit(); +} + +static void smbus_io_space_update(PIIX4PMState *s) +{ + PCIDevice *d = PCI_DEVICE(s); + + s->smb_io_base = le32_to_cpu(*(uint32_t *)(d->config + 0x90)); + s->smb_io_base &= 0xffc0; + + memory_region_transaction_begin(); + memory_region_set_enabled(&s->smb.io, d->config[0xd2] & 1); + memory_region_set_address(&s->smb.io, s->smb_io_base); + memory_region_transaction_commit(); +} + +static void pm_write_config(PCIDevice *d, + uint32_t address, uint32_t val, int len) +{ + pci_default_write_config(d, address, val, len); + if (range_covers_byte(address, len, 0x80) || + ranges_overlap(address, len, 0x40, 4)) { + pm_io_space_update((PIIX4PMState *)d); + } + if (range_covers_byte(address, len, 0xd2) || + ranges_overlap(address, len, 0x90, 4)) { + smbus_io_space_update((PIIX4PMState *)d); + } +} + +static int vmstate_acpi_post_load(void *opaque, int version_id) +{ + PIIX4PMState *s = opaque; + + pm_io_space_update(s); + smbus_io_space_update(s); + return 0; +} + +#define VMSTATE_GPE_ARRAY(_field, _state) \ + { \ + .name = (stringify(_field)), \ + .version_id = 0, \ + .info = &vmstate_info_uint16, \ + .size = sizeof(uint16_t), \ + .flags = VMS_SINGLE | VMS_POINTER, \ + .offset = vmstate_offset_pointer(_state, _field, uint8_t), \ + } + +static const VMStateDescription vmstate_gpe = { + .name = "gpe", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_GPE_ARRAY(sts, ACPIGPE), + VMSTATE_GPE_ARRAY(en, ACPIGPE), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pci_status = { + .name = "pci_status", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(up, struct AcpiPciHpPciStatus), + VMSTATE_UINT32(down, struct AcpiPciHpPciStatus), + VMSTATE_END_OF_LIST() + } +}; + +static bool vmstate_test_use_acpi_hotplug_bridge(void *opaque, int version_id) +{ + PIIX4PMState *s = opaque; + return s->use_acpi_hotplug_bridge; +} + +static bool vmstate_test_no_use_acpi_hotplug_bridge(void *opaque, + int version_id) +{ + PIIX4PMState *s = opaque; + return !s->use_acpi_hotplug_bridge; +} + +static bool vmstate_test_use_memhp(void *opaque) +{ + PIIX4PMState *s = opaque; + return s->acpi_memory_hotplug.is_enabled; +} + +static const VMStateDescription vmstate_memhp_state = { + .name = "piix4_pm/memhp", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .needed = vmstate_test_use_memhp, + .fields = (VMStateField[]) { + VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, PIIX4PMState), + VMSTATE_END_OF_LIST() + } +}; + +static bool vmstate_test_use_cpuhp(void *opaque) +{ + PIIX4PMState *s = opaque; + return !s->cpu_hotplug_legacy; +} + +static int vmstate_cpuhp_pre_load(void *opaque) +{ + Object *obj = OBJECT(opaque); + object_property_set_bool(obj, "cpu-hotplug-legacy", false, &error_abort); + return 0; +} + +static const VMStateDescription vmstate_cpuhp_state = { + .name = "piix4_pm/cpuhp", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .needed = vmstate_test_use_cpuhp, + .pre_load = vmstate_cpuhp_pre_load, + .fields = (VMStateField[]) { + VMSTATE_CPU_HOTPLUG(cpuhp_state, PIIX4PMState), + VMSTATE_END_OF_LIST() + } +}; + +static bool piix4_vmstate_need_smbus(void *opaque, int version_id) +{ + return pm_smbus_vmstate_needed(); +} + +/* qemu-kvm 1.2 uses version 3 but advertised as 2 + * To support incoming qemu-kvm 1.2 migration, change version_id + * and minimum_version_id to 2 below (which breaks migration from + * qemu 1.2). + * + */ +static const VMStateDescription vmstate_acpi = { + .name = "piix4_pm", + .version_id = 3, + .minimum_version_id = 3, + .post_load = vmstate_acpi_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PIIX4PMState), + VMSTATE_UINT16(ar.pm1.evt.sts, PIIX4PMState), + VMSTATE_UINT16(ar.pm1.evt.en, PIIX4PMState), + VMSTATE_UINT16(ar.pm1.cnt.cnt, PIIX4PMState), + VMSTATE_STRUCT(apm, PIIX4PMState, 0, vmstate_apm, APMState), + VMSTATE_STRUCT_TEST(smb, PIIX4PMState, piix4_vmstate_need_smbus, 3, + pmsmb_vmstate, PMSMBus), + VMSTATE_TIMER_PTR(ar.tmr.timer, PIIX4PMState), + VMSTATE_INT64(ar.tmr.overflow_time, PIIX4PMState), + VMSTATE_STRUCT(ar.gpe, PIIX4PMState, 2, vmstate_gpe, ACPIGPE), + VMSTATE_STRUCT_TEST( + acpi_pci_hotplug.acpi_pcihp_pci_status[ACPI_PCIHP_BSEL_DEFAULT], + PIIX4PMState, + vmstate_test_no_use_acpi_hotplug_bridge, + 2, vmstate_pci_status, + struct AcpiPciHpPciStatus), + VMSTATE_PCI_HOTPLUG(acpi_pci_hotplug, PIIX4PMState, + vmstate_test_use_acpi_hotplug_bridge, + vmstate_acpi_pcihp_use_acpi_index), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_memhp_state, + &vmstate_cpuhp_state, + NULL + } +}; + +static void piix4_pm_reset(DeviceState *dev) +{ + PIIX4PMState *s = PIIX4_PM(dev); + PCIDevice *d = PCI_DEVICE(s); + uint8_t *pci_conf = d->config; + + pci_conf[0x58] = 0; + pci_conf[0x59] = 0; + pci_conf[0x5a] = 0; + pci_conf[0x5b] = 0; + + pci_conf[0x40] = 0x01; /* PM io base read only bit */ + pci_conf[0x80] = 0; + + if (!s->smm_enabled) { + /* Mark SMM as already inited (until KVM supports SMM). */ + pci_conf[0x5B] = 0x02; + } + + acpi_pm1_evt_reset(&s->ar); + acpi_pm1_cnt_reset(&s->ar); + acpi_pm_tmr_reset(&s->ar); + acpi_gpe_reset(&s->ar); + acpi_update_sci(&s->ar, s->irq); + + pm_io_space_update(s); + acpi_pcihp_reset(&s->acpi_pci_hotplug, !s->use_acpi_root_pci_hotplug); +} + +static void piix4_pm_powerdown_req(Notifier *n, void *opaque) +{ + PIIX4PMState *s = container_of(n, PIIX4PMState, powerdown_notifier); + + assert(s != NULL); + acpi_pm1_evt_power_down(&s->ar); +} + +static void piix4_device_pre_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + PIIX4PMState *s = PIIX4_PM(hotplug_dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_pre_plug_cb(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (!s->acpi_memory_hotplug.is_enabled) { + error_setg(errp, + "memory hotplug is not enabled: %s.memory-hotplug-support " + "is not set", object_get_typename(OBJECT(s))); + } + } else if ( + !object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + error_setg(errp, "acpi: device pre plug request for not supported" + " device type: %s", object_get_typename(OBJECT(dev))); + } +} + +static void piix4_device_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + PIIX4PMState *s = PIIX4_PM(hotplug_dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { + nvdimm_acpi_plug_cb(hotplug_dev, dev); + } else { + acpi_memory_plug_cb(hotplug_dev, &s->acpi_memory_hotplug, + dev, errp); + } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_plug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + if (s->cpu_hotplug_legacy) { + legacy_acpi_cpu_plug_cb(hotplug_dev, &s->gpe_cpu, dev, errp); + } else { + acpi_cpu_plug_cb(hotplug_dev, &s->cpuhp_state, dev, errp); + } + } else { + g_assert_not_reached(); + } +} + +static void piix4_device_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + PIIX4PMState *s = PIIX4_PM(hotplug_dev); + + if (s->acpi_memory_hotplug.is_enabled && + object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + acpi_memory_unplug_request_cb(hotplug_dev, &s->acpi_memory_hotplug, + dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_unplug_request_cb(hotplug_dev, &s->acpi_pci_hotplug, + dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) && + !s->cpu_hotplug_legacy) { + acpi_cpu_unplug_request_cb(hotplug_dev, &s->cpuhp_state, dev, errp); + } else { + error_setg(errp, "acpi: device unplug request for not supported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +static void piix4_device_unplug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + PIIX4PMState *s = PIIX4_PM(hotplug_dev); + + if (s->acpi_memory_hotplug.is_enabled && + object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + acpi_memory_unplug_cb(&s->acpi_memory_hotplug, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + acpi_pcihp_device_unplug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev, + errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) && + !s->cpu_hotplug_legacy) { + acpi_cpu_unplug_cb(&s->cpuhp_state, dev, errp); + } else { + error_setg(errp, "acpi: device unplug for not supported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +static void piix4_pm_machine_ready(Notifier *n, void *opaque) +{ + PIIX4PMState *s = container_of(n, PIIX4PMState, machine_ready); + PCIDevice *d = PCI_DEVICE(s); + MemoryRegion *io_as = pci_address_space_io(d); + uint8_t *pci_conf; + + pci_conf = d->config; + pci_conf[0x5f] = 0x10 | + (memory_region_present(io_as, 0x378) ? 0x80 : 0); + pci_conf[0x63] = 0x60; + pci_conf[0x67] = (memory_region_present(io_as, 0x3f8) ? 0x08 : 0) | + (memory_region_present(io_as, 0x2f8) ? 0x90 : 0); +} + +static void piix4_pm_add_properties(PIIX4PMState *s) +{ + static const uint8_t acpi_enable_cmd = ACPI_ENABLE; + static const uint8_t acpi_disable_cmd = ACPI_DISABLE; + static const uint32_t gpe0_blk = GPE_BASE; + static const uint32_t gpe0_blk_len = GPE_LEN; + static const uint16_t sci_int = 9; + + object_property_add_uint8_ptr(OBJECT(s), ACPI_PM_PROP_ACPI_ENABLE_CMD, + &acpi_enable_cmd, OBJ_PROP_FLAG_READ); + object_property_add_uint8_ptr(OBJECT(s), ACPI_PM_PROP_ACPI_DISABLE_CMD, + &acpi_disable_cmd, OBJ_PROP_FLAG_READ); + object_property_add_uint32_ptr(OBJECT(s), ACPI_PM_PROP_GPE0_BLK, + &gpe0_blk, OBJ_PROP_FLAG_READ); + object_property_add_uint32_ptr(OBJECT(s), ACPI_PM_PROP_GPE0_BLK_LEN, + &gpe0_blk_len, OBJ_PROP_FLAG_READ); + object_property_add_uint16_ptr(OBJECT(s), ACPI_PM_PROP_SCI_INT, + &sci_int, OBJ_PROP_FLAG_READ); + object_property_add_uint32_ptr(OBJECT(s), ACPI_PM_PROP_PM_IO_BASE, + &s->io_base, OBJ_PROP_FLAG_READ); +} + +static void piix4_pm_realize(PCIDevice *dev, Error **errp) +{ + PIIX4PMState *s = PIIX4_PM(dev); + uint8_t *pci_conf; + + pci_conf = dev->config; + pci_conf[0x06] = 0x80; + pci_conf[0x07] = 0x02; + pci_conf[0x09] = 0x00; + pci_conf[0x3d] = 0x01; // interrupt pin 1 + + /* APM */ + apm_init(dev, &s->apm, apm_ctrl_changed, s); + + if (!s->smm_enabled) { + /* Mark SMM as already inited to prevent SMM from running. KVM does not + * support SMM mode. */ + pci_conf[0x5B] = 0x02; + } + + /* XXX: which specification is used ? The i82731AB has different + mappings */ + pci_conf[0x90] = s->smb_io_base | 1; + pci_conf[0x91] = s->smb_io_base >> 8; + pci_conf[0xd2] = 0x09; + pm_smbus_init(DEVICE(dev), &s->smb, true); + memory_region_set_enabled(&s->smb.io, pci_conf[0xd2] & 1); + memory_region_add_subregion(pci_address_space_io(dev), + s->smb_io_base, &s->smb.io); + + memory_region_init(&s->io, OBJECT(s), "piix4-pm", 64); + memory_region_set_enabled(&s->io, false); + memory_region_add_subregion(pci_address_space_io(dev), + 0, &s->io); + + acpi_pm_tmr_init(&s->ar, pm_tmr_timer, &s->io); + acpi_pm1_evt_init(&s->ar, pm_tmr_timer, &s->io); + acpi_pm1_cnt_init(&s->ar, &s->io, s->disable_s3, s->disable_s4, s->s4_val, + !s->smm_compat && !s->smm_enabled); + acpi_gpe_init(&s->ar, GPE_LEN); + + s->powerdown_notifier.notify = piix4_pm_powerdown_req; + qemu_register_powerdown_notifier(&s->powerdown_notifier); + + s->machine_ready.notify = piix4_pm_machine_ready; + qemu_add_machine_init_done_notifier(&s->machine_ready); + + piix4_acpi_system_hot_add_init(pci_address_space_io(dev), + pci_get_bus(dev), s); + qbus_set_hotplug_handler(BUS(pci_get_bus(dev)), OBJECT(s)); + + piix4_pm_add_properties(s); +} + +I2CBus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base, + qemu_irq sci_irq, qemu_irq smi_irq, + int smm_enabled, DeviceState **piix4_pm) +{ + PCIDevice *pci_dev; + DeviceState *dev; + PIIX4PMState *s; + + pci_dev = pci_new(devfn, TYPE_PIIX4_PM); + dev = DEVICE(pci_dev); + qdev_prop_set_uint32(dev, "smb_io_base", smb_io_base); + if (piix4_pm) { + *piix4_pm = dev; + } + + s = PIIX4_PM(dev); + s->irq = sci_irq; + s->smi_irq = smi_irq; + s->smm_enabled = smm_enabled; + if (xen_enabled()) { + s->use_acpi_hotplug_bridge = false; + } + + pci_realize_and_unref(pci_dev, bus, &error_fatal); + + return s->smb.smbus; +} + +static uint64_t gpe_readb(void *opaque, hwaddr addr, unsigned width) +{ + PIIX4PMState *s = opaque; + uint32_t val = acpi_gpe_ioport_readb(&s->ar, addr); + + trace_piix4_gpe_readb(addr, width, val); + return val; +} + +static void gpe_writeb(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + PIIX4PMState *s = opaque; + + trace_piix4_gpe_writeb(addr, width, val); + acpi_gpe_ioport_writeb(&s->ar, addr, val); + acpi_update_sci(&s->ar, s->irq); +} + +static const MemoryRegionOps piix4_gpe_ops = { + .read = gpe_readb, + .write = gpe_writeb, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + + +static bool piix4_get_cpu_hotplug_legacy(Object *obj, Error **errp) +{ + PIIX4PMState *s = PIIX4_PM(obj); + + return s->cpu_hotplug_legacy; +} + +static void piix4_set_cpu_hotplug_legacy(Object *obj, bool value, Error **errp) +{ + PIIX4PMState *s = PIIX4_PM(obj); + + assert(!value); + if (s->cpu_hotplug_legacy && value == false) { + acpi_switch_to_modern_cphp(&s->gpe_cpu, &s->cpuhp_state, + PIIX4_CPU_HOTPLUG_IO_BASE); + } + s->cpu_hotplug_legacy = value; +} + +static void piix4_acpi_system_hot_add_init(MemoryRegion *parent, + PCIBus *bus, PIIX4PMState *s) +{ + memory_region_init_io(&s->io_gpe, OBJECT(s), &piix4_gpe_ops, s, + "acpi-gpe0", GPE_LEN); + memory_region_add_subregion(parent, GPE_BASE, &s->io_gpe); + + if (s->use_acpi_hotplug_bridge || s->use_acpi_root_pci_hotplug) { + acpi_pcihp_init(OBJECT(s), &s->acpi_pci_hotplug, bus, parent, + s->use_acpi_hotplug_bridge, ACPI_PCIHP_ADDR_PIIX4); + } + + s->cpu_hotplug_legacy = true; + object_property_add_bool(OBJECT(s), "cpu-hotplug-legacy", + piix4_get_cpu_hotplug_legacy, + piix4_set_cpu_hotplug_legacy); + legacy_acpi_cpu_hotplug_init(parent, OBJECT(s), &s->gpe_cpu, + PIIX4_CPU_HOTPLUG_IO_BASE); + + if (s->acpi_memory_hotplug.is_enabled) { + acpi_memory_hotplug_init(parent, OBJECT(s), &s->acpi_memory_hotplug, + ACPI_MEMORY_HOTPLUG_BASE); + } +} + +static void piix4_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list) +{ + PIIX4PMState *s = PIIX4_PM(adev); + + acpi_memory_ospm_status(&s->acpi_memory_hotplug, list); + if (!s->cpu_hotplug_legacy) { + acpi_cpu_ospm_status(&s->cpuhp_state, list); + } +} + +static void piix4_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev) +{ + PIIX4PMState *s = PIIX4_PM(adev); + + acpi_send_gpe_event(&s->ar, s->irq, ev); +} + +static Property piix4_pm_properties[] = { + DEFINE_PROP_UINT32("smb_io_base", PIIX4PMState, smb_io_base, 0), + DEFINE_PROP_UINT8(ACPI_PM_PROP_S3_DISABLED, PIIX4PMState, disable_s3, 0), + DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_DISABLED, PIIX4PMState, disable_s4, 0), + DEFINE_PROP_UINT8(ACPI_PM_PROP_S4_VAL, PIIX4PMState, s4_val, 2), + DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, PIIX4PMState, + use_acpi_hotplug_bridge, true), + DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCI_ROOTHP, PIIX4PMState, + use_acpi_root_pci_hotplug, true), + DEFINE_PROP_BOOL("memory-hotplug-support", PIIX4PMState, + acpi_memory_hotplug.is_enabled, true), + DEFINE_PROP_BOOL("smm-compat", PIIX4PMState, smm_compat, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void piix4_pm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_CLASS(klass); + + k->realize = piix4_pm_realize; + k->config_write = pm_write_config; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82371AB_3; + k->revision = 0x03; + k->class_id = PCI_CLASS_BRIDGE_OTHER; + dc->reset = piix4_pm_reset; + dc->desc = "PM"; + dc->vmsd = &vmstate_acpi; + device_class_set_props(dc, piix4_pm_properties); + /* + * Reason: part of PIIX4 southbridge, needs to be wired up, + * e.g. by mips_malta_init() + */ + dc->user_creatable = false; + dc->hotpluggable = false; + hc->pre_plug = piix4_device_pre_plug_cb; + hc->plug = piix4_device_plug_cb; + hc->unplug_request = piix4_device_unplug_request_cb; + hc->unplug = piix4_device_unplug_cb; + adevc->ospm_status = piix4_ospm_status; + adevc->send_event = piix4_send_gpe; + adevc->madt_cpu = pc_madt_cpu_entry; +} + +static const TypeInfo piix4_pm_info = { + .name = TYPE_PIIX4_PM, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PIIX4PMState), + .class_init = piix4_pm_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { TYPE_ACPI_DEVICE_IF }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + } +}; + +static void piix4_pm_register_types(void) +{ + type_register_static(&piix4_pm_info); +} + +type_init(piix4_pm_register_types) diff --git a/hw/acpi/tco.c b/hw/acpi/tco.c new file mode 100644 index 000000000..cf1e68a53 --- /dev/null +++ b/hw/acpi/tco.c @@ -0,0 +1,261 @@ +/* + * QEMU ICH9 TCO emulation + * + * Copyright (c) 2015 Paulo Alcantara + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "sysemu/watchdog.h" +#include "hw/i386/ich9.h" +#include "migration/vmstate.h" + +#include "hw/acpi/tco.h" +#include "trace.h" + +enum { + TCO_RLD_DEFAULT = 0x0000, + TCO_DAT_IN_DEFAULT = 0x00, + TCO_DAT_OUT_DEFAULT = 0x00, + TCO1_STS_DEFAULT = 0x0000, + TCO2_STS_DEFAULT = 0x0000, + TCO1_CNT_DEFAULT = 0x0000, + TCO2_CNT_DEFAULT = 0x0008, + TCO_MESSAGE1_DEFAULT = 0x00, + TCO_MESSAGE2_DEFAULT = 0x00, + TCO_WDCNT_DEFAULT = 0x00, + TCO_TMR_DEFAULT = 0x0004, + SW_IRQ_GEN_DEFAULT = 0x03, +}; + +static inline void tco_timer_reload(TCOIORegs *tr) +{ + int ticks = tr->tco.tmr & TCO_TMR_MASK; + int64_t nsec = (int64_t)ticks * TCO_TICK_NSEC; + + trace_tco_timer_reload(ticks, nsec / 1000000); + tr->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + nsec; + timer_mod(tr->tco_timer, tr->expire_time); +} + +static inline void tco_timer_stop(TCOIORegs *tr) +{ + tr->expire_time = -1; + timer_del(tr->tco_timer); +} + +static void tco_timer_expired(void *opaque) +{ + TCOIORegs *tr = opaque; + ICH9LPCPMRegs *pm = container_of(tr, ICH9LPCPMRegs, tco_regs); + ICH9LPCState *lpc = container_of(pm, ICH9LPCState, pm); + uint32_t gcs = pci_get_long(lpc->chip_config + ICH9_CC_GCS); + + trace_tco_timer_expired(tr->timeouts_no, + lpc->pin_strap.spkr_hi, + !!(gcs & ICH9_CC_GCS_NO_REBOOT)); + tr->tco.rld = 0; + tr->tco.sts1 |= TCO_TIMEOUT; + if (++tr->timeouts_no == 2) { + tr->tco.sts2 |= TCO_SECOND_TO_STS; + tr->tco.sts2 |= TCO_BOOT_STS; + tr->timeouts_no = 0; + + if (!lpc->pin_strap.spkr_hi && !(gcs & ICH9_CC_GCS_NO_REBOOT)) { + watchdog_perform_action(); + tco_timer_stop(tr); + return; + } + } + + if (pm->smi_en & ICH9_PMIO_SMI_EN_TCO_EN) { + ich9_generate_smi(); + } + tr->tco.rld = tr->tco.tmr; + tco_timer_reload(tr); +} + +/* NOTE: values of 0 or 1 will be ignored by ICH */ +static inline int can_start_tco_timer(TCOIORegs *tr) +{ + return !(tr->tco.cnt1 & TCO_TMR_HLT) && tr->tco.tmr > 1; +} + +static uint32_t tco_ioport_readw(TCOIORegs *tr, uint32_t addr) +{ + uint16_t rld; + + switch (addr) { + case TCO_RLD: + if (tr->expire_time != -1) { + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + int64_t elapsed = (tr->expire_time - now) / TCO_TICK_NSEC; + rld = (uint16_t)elapsed | (tr->tco.rld & ~TCO_RLD_MASK); + } else { + rld = tr->tco.rld; + } + return rld; + case TCO_DAT_IN: + return tr->tco.din; + case TCO_DAT_OUT: + return tr->tco.dout; + case TCO1_STS: + return tr->tco.sts1; + case TCO2_STS: + return tr->tco.sts2; + case TCO1_CNT: + return tr->tco.cnt1; + case TCO2_CNT: + return tr->tco.cnt2; + case TCO_MESSAGE1: + return tr->tco.msg1; + case TCO_MESSAGE2: + return tr->tco.msg2; + case TCO_WDCNT: + return tr->tco.wdcnt; + case TCO_TMR: + return tr->tco.tmr; + case SW_IRQ_GEN: + return tr->sw_irq_gen; + } + return 0; +} + +static void tco_ioport_writew(TCOIORegs *tr, uint32_t addr, uint32_t val) +{ + switch (addr) { + case TCO_RLD: + tr->timeouts_no = 0; + if (can_start_tco_timer(tr)) { + tr->tco.rld = tr->tco.tmr; + tco_timer_reload(tr); + } else { + tr->tco.rld = val; + } + break; + case TCO_DAT_IN: + tr->tco.din = val; + tr->tco.sts1 |= SW_TCO_SMI; + ich9_generate_smi(); + break; + case TCO_DAT_OUT: + tr->tco.dout = val; + tr->tco.sts1 |= TCO_INT_STS; + /* TODO: cause an interrupt, as selected by the TCO_INT_SEL bits */ + break; + case TCO1_STS: + tr->tco.sts1 = val & TCO1_STS_MASK; + break; + case TCO2_STS: + tr->tco.sts2 = val & TCO2_STS_MASK; + break; + case TCO1_CNT: + val &= TCO1_CNT_MASK; + /* + * once TCO_LOCK bit is set, it can not be cleared by software. a reset + * is required to change this bit from 1 to 0 -- it defaults to 0. + */ + tr->tco.cnt1 = val | (tr->tco.cnt1 & TCO_LOCK); + if (can_start_tco_timer(tr)) { + tr->tco.rld = tr->tco.tmr; + tco_timer_reload(tr); + } else { + tco_timer_stop(tr); + } + break; + case TCO2_CNT: + tr->tco.cnt2 = val; + break; + case TCO_MESSAGE1: + tr->tco.msg1 = val; + break; + case TCO_MESSAGE2: + tr->tco.msg2 = val; + break; + case TCO_WDCNT: + tr->tco.wdcnt = val; + break; + case TCO_TMR: + tr->tco.tmr = val; + break; + case SW_IRQ_GEN: + tr->sw_irq_gen = val; + break; + } +} + +static uint64_t tco_io_readw(void *opaque, hwaddr addr, unsigned width) +{ + TCOIORegs *tr = opaque; + return tco_ioport_readw(tr, addr); +} + +static void tco_io_writew(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + TCOIORegs *tr = opaque; + tco_ioport_writew(tr, addr, val); +} + +static const MemoryRegionOps tco_io_ops = { + .read = tco_io_readw, + .write = tco_io_writew, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 2, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +void acpi_pm_tco_init(TCOIORegs *tr, MemoryRegion *parent) +{ + *tr = (TCOIORegs) { + .tco = { + .rld = TCO_RLD_DEFAULT, + .din = TCO_DAT_IN_DEFAULT, + .dout = TCO_DAT_OUT_DEFAULT, + .sts1 = TCO1_STS_DEFAULT, + .sts2 = TCO2_STS_DEFAULT, + .cnt1 = TCO1_CNT_DEFAULT, + .cnt2 = TCO2_CNT_DEFAULT, + .msg1 = TCO_MESSAGE1_DEFAULT, + .msg2 = TCO_MESSAGE2_DEFAULT, + .wdcnt = TCO_WDCNT_DEFAULT, + .tmr = TCO_TMR_DEFAULT, + }, + .sw_irq_gen = SW_IRQ_GEN_DEFAULT, + .tco_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tco_timer_expired, tr), + .expire_time = -1, + .timeouts_no = 0, + }; + memory_region_init_io(&tr->io, memory_region_owner(parent), + &tco_io_ops, tr, "sm-tco", ICH9_PMIO_TCO_LEN); + memory_region_add_subregion(parent, ICH9_PMIO_TCO_RLD, &tr->io); +} + +const VMStateDescription vmstate_tco_io_sts = { + .name = "tco io device status", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(tco.rld, TCOIORegs), + VMSTATE_UINT8(tco.din, TCOIORegs), + VMSTATE_UINT8(tco.dout, TCOIORegs), + VMSTATE_UINT16(tco.sts1, TCOIORegs), + VMSTATE_UINT16(tco.sts2, TCOIORegs), + VMSTATE_UINT16(tco.cnt1, TCOIORegs), + VMSTATE_UINT16(tco.cnt2, TCOIORegs), + VMSTATE_UINT8(tco.msg1, TCOIORegs), + VMSTATE_UINT8(tco.msg2, TCOIORegs), + VMSTATE_UINT8(tco.wdcnt, TCOIORegs), + VMSTATE_UINT16(tco.tmr, TCOIORegs), + VMSTATE_UINT8(sw_irq_gen, TCOIORegs), + VMSTATE_TIMER_PTR(tco_timer, TCOIORegs), + VMSTATE_INT64(expire_time, TCOIORegs), + VMSTATE_UINT8(timeouts_no, TCOIORegs), + VMSTATE_END_OF_LIST() + } +}; diff --git a/hw/acpi/tpm.c b/hw/acpi/tpm.c new file mode 100644 index 000000000..cdc022753 --- /dev/null +++ b/hw/acpi/tpm.c @@ -0,0 +1,459 @@ +/* Support for generating ACPI TPM tables + * + * Copyright (C) 2018 IBM, Corp. + * Copyright (C) 2018 Red Hat Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/acpi/tpm.h" + +void tpm_build_ppi_acpi(TPMIf *tpm, Aml *dev) +{ + Aml *method, *field, *ifctx, *ifctx2, *ifctx3, *func_mask, + *not_implemented, *pak, *tpm2, *tpm3, *pprm, *pprq, *zero, *one; + + if (!object_property_get_bool(OBJECT(tpm), "ppi", &error_abort)) { + return; + } + + zero = aml_int(0); + one = aml_int(1); + func_mask = aml_int(TPM_PPI_FUNC_MASK); + not_implemented = aml_int(TPM_PPI_FUNC_NOT_IMPLEMENTED); + + /* + * TPP2 is for the registers that ACPI code used to pass + * the PPI code and parameter (PPRQ, PPRM) to the firmware. + */ + aml_append(dev, + aml_operation_region("TPP2", AML_SYSTEM_MEMORY, + aml_int(TPM_PPI_ADDR_BASE + 0x100), + 0x5A)); + field = aml_field("TPP2", AML_ANY_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("PPIN", 8)); + aml_append(field, aml_named_field("PPIP", 32)); + aml_append(field, aml_named_field("PPRP", 32)); + aml_append(field, aml_named_field("PPRQ", 32)); + aml_append(field, aml_named_field("PPRM", 32)); + aml_append(field, aml_named_field("LPPR", 32)); + aml_append(dev, field); + pprq = aml_name("PPRQ"); + pprm = aml_name("PPRM"); + + aml_append(dev, + aml_operation_region( + "TPP3", AML_SYSTEM_MEMORY, + aml_int(TPM_PPI_ADDR_BASE + + 0x15a /* movv, docs/specs/tpm.rst */), + 0x1)); + field = aml_field("TPP3", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("MOVV", 8)); + aml_append(dev, field); + + /* + * DerefOf in Windows is broken with SYSTEM_MEMORY. Use a dynamic + * operation region inside of a method for getting FUNC[op]. + */ + method = aml_method("TPFN", 1, AML_SERIALIZED); + { + Aml *op = aml_arg(0); + ifctx = aml_if(aml_lgreater_equal(op, aml_int(0x100))); + { + aml_append(ifctx, aml_return(zero)); + } + aml_append(method, ifctx); + + aml_append(method, + aml_operation_region("TPP1", AML_SYSTEM_MEMORY, + aml_add(aml_int(TPM_PPI_ADDR_BASE), op, NULL), 0x1)); + field = aml_field("TPP1", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("TPPF", 8)); + aml_append(method, field); + aml_append(method, aml_return(aml_name("TPPF"))); + } + aml_append(dev, method); + + /* + * Use global TPM2 & TPM3 variables to workaround Windows ACPI bug + * when returning packages. + */ + pak = aml_package(2); + aml_append(pak, zero); + aml_append(pak, zero); + aml_append(dev, aml_name_decl("TPM2", pak)); + tpm2 = aml_name("TPM2"); + + pak = aml_package(3); + aml_append(pak, zero); + aml_append(pak, zero); + aml_append(pak, zero); + aml_append(dev, aml_name_decl("TPM3", pak)); + tpm3 = aml_name("TPM3"); + + method = aml_method("_DSM", 4, AML_SERIALIZED); + { + uint8_t zerobyte[1] = { 0 }; + Aml *function, *arguments, *rev, *op, *op_arg, *op_flags, *uuid; + + uuid = aml_arg(0); + rev = aml_arg(1); + function = aml_arg(2); + arguments = aml_arg(3); + op = aml_local(0); + op_flags = aml_local(1); + + /* Physical Presence Interface */ + ifctx = aml_if( + aml_equal(uuid, + aml_touuid("3DDDFAA6-361B-4EB4-A424-8D10089D1653"))); + { + /* standard DSM query function */ + ifctx2 = aml_if(aml_equal(function, zero)); + { + uint8_t byte_list[2] = { 0xff, 0x01 }; /* functions 1-8 */ + + aml_append(ifctx2, + aml_return(aml_buffer(sizeof(byte_list), + byte_list))); + } + aml_append(ifctx, ifctx2); + + /* + * PPI 1.0: 2.1.1 Get Physical Presence Interface Version + * + * Arg 2 (Integer): Function Index = 1 + * Arg 3 (Package): Arguments = Empty Package + * Returns: Type: String + */ + ifctx2 = aml_if(aml_equal(function, one)); + { + aml_append(ifctx2, aml_return(aml_string("1.3"))); + } + aml_append(ifctx, ifctx2); + + /* + * PPI 1.0: 2.1.3 Submit TPM Operation Request to Pre-OS Environment + * + * Arg 2 (Integer): Function Index = 2 + * Arg 3 (Package): Arguments = Package: Type: Integer + * Operation Value of the Request + * Returns: Type: Integer + * 0: Success + * 1: Operation Value of the Request Not Supported + * 2: General Failure + */ + ifctx2 = aml_if(aml_equal(function, aml_int(2))); + { + /* get opcode */ + aml_append(ifctx2, + aml_store(aml_derefof(aml_index(arguments, + zero)), op)); + + /* get opcode flags */ + aml_append(ifctx2, + aml_store(aml_call1("TPFN", op), op_flags)); + + /* if func[opcode] & TPM_PPI_FUNC_NOT_IMPLEMENTED */ + ifctx3 = aml_if( + aml_equal( + aml_and(op_flags, func_mask, NULL), + not_implemented)); + { + /* 1: Operation Value of the Request Not Supported */ + aml_append(ifctx3, aml_return(one)); + } + aml_append(ifctx2, ifctx3); + + aml_append(ifctx2, aml_store(op, pprq)); + aml_append(ifctx2, aml_store(zero, pprm)); + /* 0: success */ + aml_append(ifctx2, aml_return(zero)); + } + aml_append(ifctx, ifctx2); + + /* + * PPI 1.0: 2.1.4 Get Pending TPM Operation Requested By the OS + * + * Arg 2 (Integer): Function Index = 3 + * Arg 3 (Package): Arguments = Empty Package + * Returns: Type: Package of Integers + * Integer 1: Function Return code + * 0: Success + * 1: General Failure + * Integer 2: Pending operation requested by the OS + * 0: None + * >0: Operation Value of the Pending Request + * Integer 3: Optional argument to pending operation + * requested by the OS + * 0: None + * >0: Argument Value of the Pending Request + */ + ifctx2 = aml_if(aml_equal(function, aml_int(3))); + { + /* + * Revision ID of 1, no integer parameter beyond + * parameter two are expected + */ + ifctx3 = aml_if(aml_equal(rev, one)); + { + /* TPM2[1] = PPRQ */ + aml_append(ifctx3, + aml_store(pprq, aml_index(tpm2, one))); + aml_append(ifctx3, aml_return(tpm2)); + } + aml_append(ifctx2, ifctx3); + + /* + * A return value of {0, 23, 1} indicates that + * operation 23 with argument 1 is pending. + */ + ifctx3 = aml_if(aml_equal(rev, aml_int(2))); + { + /* TPM3[1] = PPRQ */ + aml_append(ifctx3, + aml_store(pprq, aml_index(tpm3, one))); + /* TPM3[2] = PPRM */ + aml_append(ifctx3, + aml_store(pprm, aml_index(tpm3, aml_int(2)))); + aml_append(ifctx3, aml_return(tpm3)); + } + aml_append(ifctx2, ifctx3); + } + aml_append(ifctx, ifctx2); + + /* + * PPI 1.0: 2.1.5 Get Platform-Specific Action to Transition to + * Pre-OS Environment + * + * Arg 2 (Integer): Function Index = 4 + * Arg 3 (Package): Arguments = Empty Package + * Returns: Type: Integer + * 0: None + * 1: Shutdown + * 2: Reboot + * 3: OS Vendor-specific + */ + ifctx2 = aml_if(aml_equal(function, aml_int(4))); + { + /* reboot */ + aml_append(ifctx2, aml_return(aml_int(2))); + } + aml_append(ifctx, ifctx2); + + /* + * PPI 1.0: 2.1.6 Return TPM Operation Response to OS Environment + * + * Arg 2 (Integer): Function Index = 5 + * Arg 3 (Package): Arguments = Empty Package + * Returns: Type: Package of Integer + * Integer 1: Function Return code + * 0: Success + * 1: General Failure + * Integer 2: Most recent operation request + * 0: None + * >0: Operation Value of the most recent request + * Integer 3: Response to the most recent operation request + * 0: Success + * 0x00000001..0x00000FFF: Corresponding TPM + * error code + * 0xFFFFFFF0: User Abort or timeout of dialog + * 0xFFFFFFF1: firmware Failure + */ + ifctx2 = aml_if(aml_equal(function, aml_int(5))); + { + /* TPM3[1] = LPPR */ + aml_append(ifctx2, + aml_store(aml_name("LPPR"), + aml_index(tpm3, one))); + /* TPM3[2] = PPRP */ + aml_append(ifctx2, + aml_store(aml_name("PPRP"), + aml_index(tpm3, aml_int(2)))); + aml_append(ifctx2, aml_return(tpm3)); + } + aml_append(ifctx, ifctx2); + + /* + * PPI 1.0: 2.1.7 Submit preferred user language + * + * Arg 2 (Integer): Function Index = 6 + * Arg 3 (Package): Arguments = String Package + * Preferred language code + * Returns: Type: Integer + * Function Return Code + * 3: Not implemented + */ + ifctx2 = aml_if(aml_equal(function, aml_int(6))); + { + /* 3 = not implemented */ + aml_append(ifctx2, aml_return(aml_int(3))); + } + aml_append(ifctx, ifctx2); + + /* + * PPI 1.1: 2.1.7 Submit TPM Operation Request to + * Pre-OS Environment 2 + * + * Arg 2 (Integer): Function Index = 7 + * Arg 3 (Package): Arguments = Package: Type: Integer + * Integer 1: Operation Value of the Request + * Integer 2: Argument for Operation (optional) + * Returns: Type: Integer + * 0: Success + * 1: Not Implemented + * 2: General Failure + * 3: Operation blocked by current firmware settings + */ + ifctx2 = aml_if(aml_equal(function, aml_int(7))); + { + /* get opcode */ + aml_append(ifctx2, aml_store(aml_derefof(aml_index(arguments, + zero)), + op)); + + /* get opcode flags */ + aml_append(ifctx2, aml_store(aml_call1("TPFN", op), + op_flags)); + /* if func[opcode] & TPM_PPI_FUNC_NOT_IMPLEMENTED */ + ifctx3 = aml_if( + aml_equal( + aml_and(op_flags, func_mask, NULL), + not_implemented)); + { + /* 1: not implemented */ + aml_append(ifctx3, aml_return(one)); + } + aml_append(ifctx2, ifctx3); + + /* if func[opcode] & TPM_PPI_FUNC_BLOCKED */ + ifctx3 = aml_if( + aml_equal( + aml_and(op_flags, func_mask, NULL), + aml_int(TPM_PPI_FUNC_BLOCKED))); + { + /* 3: blocked by firmware */ + aml_append(ifctx3, aml_return(aml_int(3))); + } + aml_append(ifctx2, ifctx3); + + /* revision to integer */ + ifctx3 = aml_if(aml_equal(rev, one)); + { + /* revision 1 */ + /* PPRQ = op */ + aml_append(ifctx3, aml_store(op, pprq)); + /* no argument, PPRM = 0 */ + aml_append(ifctx3, aml_store(zero, pprm)); + } + aml_append(ifctx2, ifctx3); + + ifctx3 = aml_if(aml_equal(rev, aml_int(2))); + { + /* revision 2 */ + /* PPRQ = op */ + op_arg = aml_derefof(aml_index(arguments, one)); + aml_append(ifctx3, aml_store(op, pprq)); + /* PPRM = arg3[1] */ + aml_append(ifctx3, aml_store(op_arg, pprm)); + } + aml_append(ifctx2, ifctx3); + /* 0: success */ + aml_append(ifctx2, aml_return(zero)); + } + aml_append(ifctx, ifctx2); + + /* + * PPI 1.1: 2.1.8 Get User Confirmation Status for Operation + * + * Arg 2 (Integer): Function Index = 8 + * Arg 3 (Package): Arguments = Package: Type: Integer + * Operation Value that may need user confirmation + * Returns: Type: Integer + * 0: Not implemented + * 1: Firmware only + * 2: Blocked for OS by firmware configuration + * 3: Allowed and physically present user required + * 4: Allowed and physically present user not required + */ + ifctx2 = aml_if(aml_equal(function, aml_int(8))); + { + /* get opcode */ + aml_append(ifctx2, + aml_store(aml_derefof(aml_index(arguments, + zero)), + op)); + + /* get opcode flags */ + aml_append(ifctx2, aml_store(aml_call1("TPFN", op), + op_flags)); + /* return confirmation status code */ + aml_append(ifctx2, + aml_return( + aml_and(op_flags, func_mask, NULL))); + } + aml_append(ifctx, ifctx2); + + aml_append(ifctx, aml_return(aml_buffer(1, zerobyte))); + } + aml_append(method, ifctx); + + /* + * "TCG Platform Reset Attack Mitigation Specification 1.00", + * Chapter 6 "ACPI _DSM Function" + */ + ifctx = aml_if( + aml_equal(uuid, + aml_touuid("376054ED-CC13-4675-901C-4756D7F2D45D"))); + { + /* standard DSM query function */ + ifctx2 = aml_if(aml_equal(function, zero)); + { + uint8_t byte_list[1] = { 0x03 }; /* functions 1-2 supported */ + + aml_append(ifctx2, + aml_return(aml_buffer(sizeof(byte_list), + byte_list))); + } + aml_append(ifctx, ifctx2); + + /* + * TCG Platform Reset Attack Mitigation Specification 1.0 Ch.6 + * + * Arg 2 (Integer): Function Index = 1 + * Arg 3 (Package): Arguments = Package: Type: Integer + * Operation Value of the Request + * Returns: Type: Integer + * 0: Success + * 1: General Failure + */ + ifctx2 = aml_if(aml_equal(function, one)); + { + aml_append(ifctx2, + aml_store(aml_derefof(aml_index(arguments, zero)), + op)); + { + aml_append(ifctx2, aml_store(op, aml_name("MOVV"))); + + /* 0: success */ + aml_append(ifctx2, aml_return(zero)); + } + } + aml_append(ifctx, ifctx2); + } + aml_append(method, ifctx); + } + aml_append(dev, method); +} diff --git a/hw/acpi/trace-events b/hw/acpi/trace-events new file mode 100644 index 000000000..974d770e8 --- /dev/null +++ b/hw/acpi/trace-events @@ -0,0 +1,57 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# memory_hotplug.c +mhp_acpi_invalid_slot_selected(uint32_t slot) "0x%"PRIx32 +mhp_acpi_ejecting_invalid_slot(uint32_t slot) "0x%"PRIx32 +mhp_acpi_read_addr_lo(uint32_t slot, uint32_t addr) "slot[0x%"PRIx32"] addr lo: 0x%"PRIx32 +mhp_acpi_read_addr_hi(uint32_t slot, uint32_t addr) "slot[0x%"PRIx32"] addr hi: 0x%"PRIx32 +mhp_acpi_read_size_lo(uint32_t slot, uint32_t size) "slot[0x%"PRIx32"] size lo: 0x%"PRIx32 +mhp_acpi_read_size_hi(uint32_t slot, uint32_t size) "slot[0x%"PRIx32"] size hi: 0x%"PRIx32 +mhp_acpi_read_pxm(uint32_t slot, uint32_t pxm) "slot[0x%"PRIx32"] proximity: 0x%"PRIx32 +mhp_acpi_read_flags(uint32_t slot, uint32_t flags) "slot[0x%"PRIx32"] flags: 0x%"PRIx32 +mhp_acpi_write_slot(uint32_t slot) "set active slot: 0x%"PRIx32 +mhp_acpi_write_ost_ev(uint32_t slot, uint32_t ev) "slot[0x%"PRIx32"] OST EVENT: 0x%"PRIx32 +mhp_acpi_write_ost_status(uint32_t slot, uint32_t st) "slot[0x%"PRIx32"] OST STATUS: 0x%"PRIx32 +mhp_acpi_clear_insert_evt(uint32_t slot) "slot[0x%"PRIx32"] clear insert event" +mhp_acpi_clear_remove_evt(uint32_t slot) "slot[0x%"PRIx32"] clear remove event" +mhp_acpi_pc_dimm_deleted(uint32_t slot) "slot[0x%"PRIx32"] pc-dimm deleted" +mhp_acpi_pc_dimm_delete_failed(uint32_t slot) "slot[0x%"PRIx32"] pc-dimm delete failed" + +# cpu.c +cpuhp_acpi_invalid_idx_selected(uint32_t idx) "0x%"PRIx32 +cpuhp_acpi_read_flags(uint32_t idx, uint8_t flags) "idx[0x%"PRIx32"] flags: 0x%"PRIx8 +cpuhp_acpi_write_idx(uint32_t idx) "set active cpu idx: 0x%"PRIx32 +cpuhp_acpi_write_cmd(uint32_t idx, uint8_t cmd) "idx[0x%"PRIx32"] cmd: 0x%"PRIx8 +cpuhp_acpi_read_cmd_data(uint32_t idx, uint32_t data) "idx[0x%"PRIx32"] data: 0x%"PRIx32 +cpuhp_acpi_read_cmd_data2(uint32_t idx, uint32_t data) "idx[0x%"PRIx32"] data: 0x%"PRIx32 +cpuhp_acpi_cpu_has_events(uint32_t idx, bool ins, bool rm) "idx[0x%"PRIx32"] inserting: %d, removing: %d" +cpuhp_acpi_clear_inserting_evt(uint32_t idx) "idx[0x%"PRIx32"]" +cpuhp_acpi_clear_remove_evt(uint32_t idx) "idx[0x%"PRIx32"]" +cpuhp_acpi_ejecting_invalid_cpu(uint32_t idx) "0x%"PRIx32 +cpuhp_acpi_ejecting_cpu(uint32_t idx) "0x%"PRIx32 +cpuhp_acpi_fw_remove_invalid_cpu(uint32_t idx) "0x%"PRIx32 +cpuhp_acpi_fw_remove_cpu(uint32_t idx) "0x%"PRIx32 +cpuhp_acpi_write_ost_ev(uint32_t slot, uint32_t ev) "idx[0x%"PRIx32"] OST EVENT: 0x%"PRIx32 +cpuhp_acpi_write_ost_status(uint32_t slot, uint32_t st) "idx[0x%"PRIx32"] OST STATUS: 0x%"PRIx32 + +# pcihp.c +acpi_pci_eject_slot(unsigned bsel, unsigned slot) "bsel: %u slot: %u" +acpi_pci_unplug(int bsel, int slot) "bsel: %d slot: %d" +acpi_pci_unplug_request(int bsel, int slot) "bsel: %d slot: %d" +acpi_pci_up_read(uint32_t val) "%" PRIu32 +acpi_pci_down_read(uint32_t val) "%" PRIu32 +acpi_pci_features_read(uint32_t val) "%" PRIu32 +acpi_pci_acpi_index_read(uint32_t val) "%" PRIu32 +acpi_pci_acpi_index_write(unsigned bsel, unsigned slot, uint32_t aidx) "bsel: %u slot: %u aidx: %" PRIu32 +acpi_pci_rmv_read(uint32_t val) "%" PRIu32 +acpi_pci_sel_read(uint32_t val) "%" PRIu32 +acpi_pci_ej_write(uint64_t addr, uint64_t data) "0x%" PRIx64 " <== %" PRIu64 +acpi_pci_sel_write(uint64_t addr, uint64_t data) "0x%" PRIx64 " <== %" PRIu64 + +# piix4.c +piix4_gpe_readb(uint64_t addr, unsigned width, uint64_t val) "addr: 0x%" PRIx64 " width: %d ==> 0x%" PRIx64 +piix4_gpe_writeb(uint64_t addr, unsigned width, uint64_t val) "addr: 0x%" PRIx64 " width: %d <== 0x%" PRIx64 + +# tco.c +tco_timer_reload(int ticks, int msec) "ticks=%d (%d ms)" +tco_timer_expired(int timeouts_no, bool strap, bool no_reboot) "timeouts_no=%d no_reboot=%d/%d" diff --git a/hw/acpi/trace.h b/hw/acpi/trace.h new file mode 100644 index 000000000..a7f7da700 --- /dev/null +++ b/hw/acpi/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_acpi.h" diff --git a/hw/acpi/utils.c b/hw/acpi/utils.c new file mode 100644 index 000000000..0c486ea29 --- /dev/null +++ b/hw/acpi/utils.c @@ -0,0 +1,48 @@ +/* + * Utilities for generating ACPI tables and passing them to Guests + * + * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019 Red Hat Inc + * + * Author: Wei Yang + * Author: Michael S. Tsirkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/utils.h" +#include "hw/loader.h" + +MemoryRegion *acpi_add_rom_blob(FWCfgCallback update, void *opaque, + GArray *blob, const char *name) +{ + uint64_t max_size; + + /* Reserve RAM space for tables: add another order of magnitude. */ + if (!strcmp(name, ACPI_BUILD_TABLE_FILE)) { + max_size = 0x200000; + } else if (!strcmp(name, ACPI_BUILD_LOADER_FILE)) { + max_size = 0x10000; + } else if (!strcmp(name, ACPI_BUILD_RSDP_FILE)) { + max_size = 0x1000; + } else { + g_assert_not_reached(); + } + g_assert(acpi_data_len(blob) <= max_size); + + return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1, + name, update, opaque, NULL, true); +} diff --git a/hw/acpi/viot.c b/hw/acpi/viot.c new file mode 100644 index 000000000..c1af75206 --- /dev/null +++ b/hw/acpi/viot.c @@ -0,0 +1,114 @@ +/* + * ACPI Virtual I/O Translation table implementation + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/viot.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" + +struct viot_pci_ranges { + GArray *blob; + size_t count; + uint16_t output_node; +}; + +/* Build PCI range for a given PCI host bridge */ +static int build_pci_range_node(Object *obj, void *opaque) +{ + struct viot_pci_ranges *pci_ranges = opaque; + GArray *blob = pci_ranges->blob; + + if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) { + PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; + + if (bus && !pci_bus_bypass_iommu(bus)) { + int min_bus, max_bus; + + pci_bus_range(bus, &min_bus, &max_bus); + + /* Type */ + build_append_int_noprefix(blob, 1 /* PCI range */, 1); + /* Reserved */ + build_append_int_noprefix(blob, 0, 1); + /* Length */ + build_append_int_noprefix(blob, 24, 2); + /* Endpoint start */ + build_append_int_noprefix(blob, PCI_BUILD_BDF(min_bus, 0), 4); + /* PCI Segment start */ + build_append_int_noprefix(blob, 0, 2); + /* PCI Segment end */ + build_append_int_noprefix(blob, 0, 2); + /* PCI BDF start */ + build_append_int_noprefix(blob, PCI_BUILD_BDF(min_bus, 0), 2); + /* PCI BDF end */ + build_append_int_noprefix(blob, PCI_BUILD_BDF(max_bus, 0xff), 2); + /* Output node */ + build_append_int_noprefix(blob, pci_ranges->output_node, 2); + /* Reserved */ + build_append_int_noprefix(blob, 0, 6); + + pci_ranges->count++; + } + } + + return 0; +} + +/* + * Generate a VIOT table with one PCI-based virtio-iommu that manages PCI + * endpoints. + * + * Defined in the ACPI Specification (Version TBD) + */ +void build_viot(MachineState *ms, GArray *table_data, BIOSLinker *linker, + uint16_t virtio_iommu_bdf, const char *oem_id, + const char *oem_table_id) +{ + /* The virtio-iommu node follows the 48-bytes header */ + int viommu_off = 48; + AcpiTable table = { .sig = "VIOT", .rev = 0, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + struct viot_pci_ranges pci_ranges = { + .output_node = viommu_off, + .blob = g_array_new(false, true /* clear */, 1), + }; + + /* Build the list of PCI ranges that this viommu manages */ + object_child_foreach_recursive(OBJECT(ms), build_pci_range_node, + &pci_ranges); + + /* ACPI table header */ + acpi_table_begin(&table, table_data); + /* Node count */ + build_append_int_noprefix(table_data, pci_ranges.count + 1, 2); + /* Node offset */ + build_append_int_noprefix(table_data, viommu_off, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); + + /* Virtio-iommu node */ + /* Type */ + build_append_int_noprefix(table_data, 3 /* virtio-pci IOMMU */, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); + /* Length */ + build_append_int_noprefix(table_data, 16, 2); + /* PCI Segment */ + build_append_int_noprefix(table_data, 0, 2); + /* PCI BDF number */ + build_append_int_noprefix(table_data, virtio_iommu_bdf, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); + + /* PCI ranges found above */ + g_array_append_vals(table_data, pci_ranges.blob->data, + pci_ranges.blob->len); + g_array_free(pci_ranges.blob, true); + + acpi_table_end(linker, &table); +} + diff --git a/hw/acpi/viot.h b/hw/acpi/viot.h new file mode 100644 index 000000000..9fe565bb8 --- /dev/null +++ b/hw/acpi/viot.h @@ -0,0 +1,13 @@ +/* + * ACPI Virtual I/O Translation Table implementation + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef VIOT_H +#define VIOT_H + +void build_viot(MachineState *ms, GArray *table_data, BIOSLinker *linker, + uint16_t virtio_iommu_bdf, const char *oem_id, + const char *oem_table_id); + +#endif /* VIOT_H */ diff --git a/hw/acpi/vmgenid.c b/hw/acpi/vmgenid.c new file mode 100644 index 000000000..0c9f158ac --- /dev/null +++ b/hw/acpi/vmgenid.c @@ -0,0 +1,263 @@ +/* + * Virtual Machine Generation ID Device + * + * Copyright (C) 2017 Skyport Systems. + * + * Author: Ben Warren + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qemu/module.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/vmgenid.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "sysemu/reset.h" + +void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid, + BIOSLinker *linker, const char *oem_id) +{ + Aml *ssdt, *dev, *scope, *method, *addr, *if_ctx; + uint32_t vgia_offset; + QemuUUID guid_le; + AcpiTable table = { .sig = "SSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = "VMGENID" }; + + /* Fill in the GUID values. These need to be converted to little-endian + * first, since that's what the guest expects + */ + g_array_set_size(guid, VMGENID_FW_CFG_SIZE - ARRAY_SIZE(guid_le.data)); + guid_le = qemu_uuid_bswap(vms->guid); + /* The GUID is written at a fixed offset into the fw_cfg file + * in order to implement the "OVMF SDT Header probe suppressor" + * see docs/specs/vmgenid.txt for more details + */ + g_array_insert_vals(guid, VMGENID_GUID_OFFSET, guid_le.data, + ARRAY_SIZE(guid_le.data)); + + /* Put VMGNEID into a separate SSDT table */ + acpi_table_begin(&table, table_data); + ssdt = init_aml_allocator(); + + /* Storage for the GUID address */ + vgia_offset = table_data->len + + build_append_named_dword(ssdt->buf, "VGIA"); + scope = aml_scope("\\_SB"); + dev = aml_device("VGEN"); + aml_append(dev, aml_name_decl("_HID", aml_string("QEMUVGID"))); + aml_append(dev, aml_name_decl("_CID", aml_string("VM_Gen_Counter"))); + aml_append(dev, aml_name_decl("_DDN", aml_string("VM_Gen_Counter"))); + + /* Simple status method to check that address is linked and non-zero */ + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + addr = aml_local(0); + aml_append(method, aml_store(aml_int(0xf), addr)); + if_ctx = aml_if(aml_equal(aml_name("VGIA"), aml_int(0))); + aml_append(if_ctx, aml_store(aml_int(0), addr)); + aml_append(method, if_ctx); + aml_append(method, aml_return(addr)); + aml_append(dev, method); + + /* the ADDR method returns two 32-bit words representing the lower and + * upper halves * of the physical address of the fw_cfg blob + * (holding the GUID) + */ + method = aml_method("ADDR", 0, AML_NOTSERIALIZED); + + addr = aml_local(0); + aml_append(method, aml_store(aml_package(2), addr)); + + aml_append(method, aml_store(aml_add(aml_name("VGIA"), + aml_int(VMGENID_GUID_OFFSET), NULL), + aml_index(addr, aml_int(0)))); + aml_append(method, aml_store(aml_int(0), aml_index(addr, aml_int(1)))); + aml_append(method, aml_return(addr)); + + aml_append(dev, method); + aml_append(scope, dev); + aml_append(ssdt, scope); + + /* attach an ACPI notify */ + method = aml_method("\\_GPE._E05", 0, AML_NOTSERIALIZED); + aml_append(method, aml_notify(aml_name("\\_SB.VGEN"), aml_int(0x80))); + aml_append(ssdt, method); + + g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len); + + /* Allocate guest memory for the Data fw_cfg blob */ + bios_linker_loader_alloc(linker, VMGENID_GUID_FW_CFG_FILE, guid, 4096, + false /* page boundary, high memory */); + + /* Patch address of GUID fw_cfg blob into the ADDR fw_cfg blob + * so QEMU can write the GUID there. The address is expected to be + * < 4GB, but write 64 bits anyway. + * The address that is patched in is offset in order to implement + * the "OVMF SDT Header probe suppressor" + * see docs/specs/vmgenid.txt for more details. + */ + bios_linker_loader_write_pointer(linker, + VMGENID_ADDR_FW_CFG_FILE, 0, sizeof(uint64_t), + VMGENID_GUID_FW_CFG_FILE, VMGENID_GUID_OFFSET); + + /* Patch address of GUID fw_cfg blob into the AML so OSPM can retrieve + * and read it. Note that while we provide storage for 64 bits, only + * the least-signficant 32 get patched into AML. + */ + bios_linker_loader_add_pointer(linker, + ACPI_BUILD_TABLE_FILE, vgia_offset, sizeof(uint32_t), + VMGENID_GUID_FW_CFG_FILE, 0); + + /* must be called after above command to ensure correct table checksum */ + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +void vmgenid_add_fw_cfg(VmGenIdState *vms, FWCfgState *s, GArray *guid) +{ + /* Create a read-only fw_cfg file for GUID */ + fw_cfg_add_file(s, VMGENID_GUID_FW_CFG_FILE, guid->data, + VMGENID_FW_CFG_SIZE); + /* Create a read-write fw_cfg file for Address */ + fw_cfg_add_file_callback(s, VMGENID_ADDR_FW_CFG_FILE, NULL, NULL, NULL, + vms->vmgenid_addr_le, + ARRAY_SIZE(vms->vmgenid_addr_le), false); +} + +static void vmgenid_update_guest(VmGenIdState *vms) +{ + Object *obj = object_resolve_path_type("", TYPE_ACPI_DEVICE_IF, NULL); + uint32_t vmgenid_addr; + QemuUUID guid_le; + + if (obj) { + /* Write the GUID to guest memory */ + memcpy(&vmgenid_addr, vms->vmgenid_addr_le, sizeof(vmgenid_addr)); + vmgenid_addr = le32_to_cpu(vmgenid_addr); + /* A zero value in vmgenid_addr means that BIOS has not yet written + * the address + */ + if (vmgenid_addr) { + /* QemuUUID has the first three words as big-endian, and expect + * that any GUIDs passed in will always be BE. The guest, + * however, will expect the fields to be little-endian. + * Perform a byte swap immediately before writing. + */ + guid_le = qemu_uuid_bswap(vms->guid); + /* The GUID is written at a fixed offset into the fw_cfg file + * in order to implement the "OVMF SDT Header probe suppressor" + * see docs/specs/vmgenid.txt for more details. + */ + cpu_physical_memory_write(vmgenid_addr, guid_le.data, + sizeof(guid_le.data)); + /* Send _GPE.E05 event */ + acpi_send_event(DEVICE(obj), ACPI_VMGENID_CHANGE_STATUS); + } + } +} + +/* After restoring an image, we need to update the guest memory and notify + * it of a potential change to VM Generation ID + */ +static int vmgenid_post_load(void *opaque, int version_id) +{ + VmGenIdState *vms = opaque; + vmgenid_update_guest(vms); + return 0; +} + +static const VMStateDescription vmstate_vmgenid = { + .name = "vmgenid", + .version_id = 1, + .minimum_version_id = 1, + .post_load = vmgenid_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(vmgenid_addr_le, VmGenIdState, sizeof(uint64_t)), + VMSTATE_END_OF_LIST() + }, +}; + +static void vmgenid_handle_reset(void *opaque) +{ + VmGenIdState *vms = VMGENID(opaque); + /* Clear the guest-allocated GUID address when the VM resets */ + memset(vms->vmgenid_addr_le, 0, ARRAY_SIZE(vms->vmgenid_addr_le)); +} + +static void vmgenid_realize(DeviceState *dev, Error **errp) +{ + VmGenIdState *vms = VMGENID(dev); + + if (!bios_linker_loader_can_write_pointer()) { + error_setg(errp, "%s requires DMA write support in fw_cfg, " + "which this machine type does not provide", TYPE_VMGENID); + return; + } + + /* Given that this function is executing, there is at least one VMGENID + * device. Check if there are several. + */ + if (!find_vmgenid_dev()) { + error_setg(errp, "at most one %s device is permitted", TYPE_VMGENID); + return; + } + + qemu_register_reset(vmgenid_handle_reset, vms); + + vmgenid_update_guest(vms); +} + +static Property vmgenid_device_properties[] = { + DEFINE_PROP_UUID(VMGENID_GUID, VmGenIdState, guid), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vmgenid_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_vmgenid; + dc->realize = vmgenid_realize; + device_class_set_props(dc, vmgenid_device_properties); + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo vmgenid_device_info = { + .name = TYPE_VMGENID, + .parent = TYPE_DEVICE, + .instance_size = sizeof(VmGenIdState), + .class_init = vmgenid_device_class_init, +}; + +static void vmgenid_register_types(void) +{ + type_register_static(&vmgenid_device_info); +} + +type_init(vmgenid_register_types) + +GuidInfo *qmp_query_vm_generation_id(Error **errp) +{ + GuidInfo *info; + VmGenIdState *vms; + Object *obj = find_vmgenid_dev(); + + if (!obj) { + error_setg(errp, "VM Generation ID device not found"); + return NULL; + } + vms = VMGENID(obj); + + info = g_malloc0(sizeof(*info)); + info->guid = qemu_uuid_unparse_strdup(&vms->guid); + return info; +} diff --git a/hw/adc/Kconfig b/hw/adc/Kconfig new file mode 100644 index 000000000..a825bd3d3 --- /dev/null +++ b/hw/adc/Kconfig @@ -0,0 +1,5 @@ +config STM32F2XX_ADC + bool + +config MAX111X + bool diff --git a/hw/adc/aspeed_adc.c b/hw/adc/aspeed_adc.c new file mode 100644 index 000000000..c5fcae29f --- /dev/null +++ b/hw/adc/aspeed_adc.c @@ -0,0 +1,427 @@ +/* + * Aspeed ADC + * + * Copyright 2017-2021 IBM Corp. + * + * Andrew Jeffery + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/adc/aspeed_adc.h" +#include "trace.h" + +#define ASPEED_ADC_MEMORY_REGION_SIZE 0x1000 +#define ASPEED_ADC_ENGINE_MEMORY_REGION_SIZE 0x100 +#define ASPEED_ADC_ENGINE_CH_EN_MASK 0xffff0000 +#define ASPEED_ADC_ENGINE_CH_EN(x) ((BIT(x)) << 16) +#define ASPEED_ADC_ENGINE_INIT BIT(8) +#define ASPEED_ADC_ENGINE_AUTO_COMP BIT(5) +#define ASPEED_ADC_ENGINE_COMP BIT(4) +#define ASPEED_ADC_ENGINE_MODE_MASK 0x0000000e +#define ASPEED_ADC_ENGINE_MODE_OFF (0b000 << 1) +#define ASPEED_ADC_ENGINE_MODE_STANDBY (0b001 << 1) +#define ASPEED_ADC_ENGINE_MODE_NORMAL (0b111 << 1) +#define ASPEED_ADC_ENGINE_EN BIT(0) +#define ASPEED_ADC_HYST_EN BIT(31) + +#define ASPEED_ADC_L_MASK ((1 << 10) - 1) +#define ASPEED_ADC_L(x) ((x) & ASPEED_ADC_L_MASK) +#define ASPEED_ADC_H(x) (((x) >> 16) & ASPEED_ADC_L_MASK) +#define ASPEED_ADC_LH_MASK (ASPEED_ADC_L_MASK << 16 | ASPEED_ADC_L_MASK) +#define LOWER_CHANNEL_MASK ((1 << 10) - 1) +#define LOWER_CHANNEL_DATA(x) ((x) & LOWER_CHANNEL_MASK) +#define UPPER_CHANNEL_DATA(x) (((x) >> 16) & LOWER_CHANNEL_MASK) + +#define TO_REG(addr) (addr >> 2) + +#define ENGINE_CONTROL TO_REG(0x00) +#define INTERRUPT_CONTROL TO_REG(0x04) +#define VGA_DETECT_CONTROL TO_REG(0x08) +#define CLOCK_CONTROL TO_REG(0x0C) +#define DATA_CHANNEL_1_AND_0 TO_REG(0x10) +#define DATA_CHANNEL_7_AND_6 TO_REG(0x1C) +#define DATA_CHANNEL_9_AND_8 TO_REG(0x20) +#define DATA_CHANNEL_15_AND_14 TO_REG(0x2C) +#define BOUNDS_CHANNEL_0 TO_REG(0x30) +#define BOUNDS_CHANNEL_7 TO_REG(0x4C) +#define BOUNDS_CHANNEL_8 TO_REG(0x50) +#define BOUNDS_CHANNEL_15 TO_REG(0x6C) +#define HYSTERESIS_CHANNEL_0 TO_REG(0x70) +#define HYSTERESIS_CHANNEL_7 TO_REG(0x8C) +#define HYSTERESIS_CHANNEL_8 TO_REG(0x90) +#define HYSTERESIS_CHANNEL_15 TO_REG(0xAC) +#define INTERRUPT_SOURCE TO_REG(0xC0) +#define COMPENSATING_AND_TRIMMING TO_REG(0xC4) + +static inline uint32_t update_channels(uint32_t current) +{ + return ((((current >> 16) & ASPEED_ADC_L_MASK) + 7) << 16) | + ((current + 5) & ASPEED_ADC_L_MASK); +} + +static bool breaks_threshold(AspeedADCEngineState *s, int reg) +{ + assert(reg >= DATA_CHANNEL_1_AND_0 && + reg < DATA_CHANNEL_1_AND_0 + s->nr_channels / 2); + + int a_bounds_reg = BOUNDS_CHANNEL_0 + (reg - DATA_CHANNEL_1_AND_0) * 2; + int b_bounds_reg = a_bounds_reg + 1; + uint32_t a_and_b = s->regs[reg]; + uint32_t a_bounds = s->regs[a_bounds_reg]; + uint32_t b_bounds = s->regs[b_bounds_reg]; + uint32_t a = ASPEED_ADC_L(a_and_b); + uint32_t b = ASPEED_ADC_H(a_and_b); + uint32_t a_lower = ASPEED_ADC_L(a_bounds); + uint32_t a_upper = ASPEED_ADC_H(a_bounds); + uint32_t b_lower = ASPEED_ADC_L(b_bounds); + uint32_t b_upper = ASPEED_ADC_H(b_bounds); + + return (a < a_lower || a > a_upper) || + (b < b_lower || b > b_upper); +} + +static uint32_t read_channel_sample(AspeedADCEngineState *s, int reg) +{ + assert(reg >= DATA_CHANNEL_1_AND_0 && + reg < DATA_CHANNEL_1_AND_0 + s->nr_channels / 2); + + /* Poor man's sampling */ + uint32_t value = s->regs[reg]; + s->regs[reg] = update_channels(s->regs[reg]); + + if (breaks_threshold(s, reg)) { + s->regs[INTERRUPT_CONTROL] |= BIT(reg - DATA_CHANNEL_1_AND_0); + qemu_irq_raise(s->irq); + } + + return value; +} + +static uint64_t aspeed_adc_engine_read(void *opaque, hwaddr addr, + unsigned int size) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(opaque); + int reg = TO_REG(addr); + uint32_t value = 0; + + switch (reg) { + case BOUNDS_CHANNEL_8 ... BOUNDS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "bounds register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - BOUNDS_CHANNEL_0); + break; + } + /* fallthrough */ + case HYSTERESIS_CHANNEL_8 ... HYSTERESIS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "hysteresis register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - HYSTERESIS_CHANNEL_0); + break; + } + /* fallthrough */ + case BOUNDS_CHANNEL_0 ... BOUNDS_CHANNEL_7: + case HYSTERESIS_CHANNEL_0 ... HYSTERESIS_CHANNEL_7: + case ENGINE_CONTROL: + case INTERRUPT_CONTROL: + case VGA_DETECT_CONTROL: + case CLOCK_CONTROL: + case INTERRUPT_SOURCE: + case COMPENSATING_AND_TRIMMING: + value = s->regs[reg]; + break; + case DATA_CHANNEL_9_AND_8 ... DATA_CHANNEL_15_AND_14: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "data register %u invalid, only 0...3 valid\n", + __func__, s->engine_id, reg - DATA_CHANNEL_1_AND_0); + break; + } + /* fallthrough */ + case DATA_CHANNEL_1_AND_0 ... DATA_CHANNEL_7_AND_6: + value = read_channel_sample(s, reg); + /* Allow 16-bit reads of the data registers */ + if (addr & 0x2) { + assert(size == 2); + value >>= 16; + } + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: engine[%u]: 0x%" HWADDR_PRIx "\n", + __func__, s->engine_id, addr); + break; + } + + trace_aspeed_adc_engine_read(s->engine_id, addr, value); + return value; +} + +static void aspeed_adc_engine_write(void *opaque, hwaddr addr, uint64_t value, + unsigned int size) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(opaque); + int reg = TO_REG(addr); + uint32_t init = 0; + + trace_aspeed_adc_engine_write(s->engine_id, addr, value); + + switch (reg) { + case ENGINE_CONTROL: + init = !!(value & ASPEED_ADC_ENGINE_EN); + init *= ASPEED_ADC_ENGINE_INIT; + + value &= ~ASPEED_ADC_ENGINE_INIT; + value |= init; + + value &= ~ASPEED_ADC_ENGINE_AUTO_COMP; + break; + case INTERRUPT_CONTROL: + case VGA_DETECT_CONTROL: + case CLOCK_CONTROL: + break; + case DATA_CHANNEL_9_AND_8 ... DATA_CHANNEL_15_AND_14: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "data register %u invalid, only 0...3 valid\n", + __func__, s->engine_id, reg - DATA_CHANNEL_1_AND_0); + return; + } + /* fallthrough */ + case BOUNDS_CHANNEL_8 ... BOUNDS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "bounds register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - BOUNDS_CHANNEL_0); + return; + } + /* fallthrough */ + case DATA_CHANNEL_1_AND_0 ... DATA_CHANNEL_7_AND_6: + case BOUNDS_CHANNEL_0 ... BOUNDS_CHANNEL_7: + value &= ASPEED_ADC_LH_MASK; + break; + case HYSTERESIS_CHANNEL_8 ... HYSTERESIS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "hysteresis register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - HYSTERESIS_CHANNEL_0); + return; + } + /* fallthrough */ + case HYSTERESIS_CHANNEL_0 ... HYSTERESIS_CHANNEL_7: + value &= (ASPEED_ADC_HYST_EN | ASPEED_ADC_LH_MASK); + break; + case INTERRUPT_SOURCE: + value &= 0xffff; + break; + case COMPENSATING_AND_TRIMMING: + value &= 0xf; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: engine[%u]: " + "0x%" HWADDR_PRIx " 0x%" PRIx64 "\n", + __func__, s->engine_id, addr, value); + break; + } + + s->regs[reg] = value; +} + +static const MemoryRegionOps aspeed_adc_engine_ops = { + .read = aspeed_adc_engine_read, + .write = aspeed_adc_engine_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 2, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static const uint32_t aspeed_adc_resets[ASPEED_ADC_NR_REGS] = { + [ENGINE_CONTROL] = 0x00000000, + [INTERRUPT_CONTROL] = 0x00000000, + [VGA_DETECT_CONTROL] = 0x0000000f, + [CLOCK_CONTROL] = 0x0000000f, +}; + +static void aspeed_adc_engine_reset(DeviceState *dev) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(dev); + + memcpy(s->regs, aspeed_adc_resets, sizeof(aspeed_adc_resets)); +} + +static void aspeed_adc_engine_realize(DeviceState *dev, Error **errp) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + g_autofree char *name = g_strdup_printf(TYPE_ASPEED_ADC_ENGINE ".%d", + s->engine_id); + + assert(s->engine_id < 2); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_adc_engine_ops, s, name, + ASPEED_ADC_ENGINE_MEMORY_REGION_SIZE); + + sysbus_init_mmio(sbd, &s->mmio); +} + +static const VMStateDescription vmstate_aspeed_adc_engine = { + .name = TYPE_ASPEED_ADC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedADCEngineState, ASPEED_ADC_NR_REGS), + VMSTATE_END_OF_LIST(), + } +}; + +static Property aspeed_adc_engine_properties[] = { + DEFINE_PROP_UINT32("engine-id", AspeedADCEngineState, engine_id, 0), + DEFINE_PROP_UINT32("nr-channels", AspeedADCEngineState, nr_channels, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_adc_engine_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_adc_engine_realize; + dc->reset = aspeed_adc_engine_reset; + device_class_set_props(dc, aspeed_adc_engine_properties); + dc->desc = "Aspeed Analog-to-Digital Engine"; + dc->vmsd = &vmstate_aspeed_adc_engine; +} + +static const TypeInfo aspeed_adc_engine_info = { + .name = TYPE_ASPEED_ADC_ENGINE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedADCEngineState), + .class_init = aspeed_adc_engine_class_init, +}; + +static void aspeed_adc_instance_init(Object *obj) +{ + AspeedADCState *s = ASPEED_ADC(obj); + AspeedADCClass *aac = ASPEED_ADC_GET_CLASS(obj); + uint32_t nr_channels = ASPEED_ADC_NR_CHANNELS / aac->nr_engines; + + for (int i = 0; i < aac->nr_engines; i++) { + AspeedADCEngineState *engine = &s->engines[i]; + object_initialize_child(obj, "engine[*]", engine, + TYPE_ASPEED_ADC_ENGINE); + qdev_prop_set_uint32(DEVICE(engine), "engine-id", i); + qdev_prop_set_uint32(DEVICE(engine), "nr-channels", nr_channels); + } +} + +static void aspeed_adc_set_irq(void *opaque, int n, int level) +{ + AspeedADCState *s = opaque; + AspeedADCClass *aac = ASPEED_ADC_GET_CLASS(s); + uint32_t pending = 0; + + /* TODO: update Global IRQ status register on AST2600 (Need specs) */ + for (int i = 0; i < aac->nr_engines; i++) { + uint32_t irq_status = s->engines[i].regs[INTERRUPT_CONTROL] & 0xFF; + pending |= irq_status << (i * 8); + } + + qemu_set_irq(s->irq, !!pending); +} + +static void aspeed_adc_realize(DeviceState *dev, Error **errp) +{ + AspeedADCState *s = ASPEED_ADC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedADCClass *aac = ASPEED_ADC_GET_CLASS(dev); + + qdev_init_gpio_in_named_with_opaque(DEVICE(sbd), aspeed_adc_set_irq, + s, NULL, aac->nr_engines); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init(&s->mmio, OBJECT(s), TYPE_ASPEED_ADC, + ASPEED_ADC_MEMORY_REGION_SIZE); + + sysbus_init_mmio(sbd, &s->mmio); + + for (int i = 0; i < aac->nr_engines; i++) { + Object *eng = OBJECT(&s->engines[i]); + + if (!sysbus_realize(SYS_BUS_DEVICE(eng), errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(eng), 0, + qdev_get_gpio_in(DEVICE(sbd), i)); + memory_region_add_subregion(&s->mmio, + i * ASPEED_ADC_ENGINE_MEMORY_REGION_SIZE, + &s->engines[i].mmio); + } +} + +static void aspeed_adc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); + + dc->realize = aspeed_adc_realize; + dc->desc = "Aspeed Analog-to-Digital Converter"; + aac->nr_engines = 1; +} + +static void aspeed_2600_adc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); + + dc->desc = "ASPEED 2600 ADC Controller"; + aac->nr_engines = 2; +} + +static const TypeInfo aspeed_adc_info = { + .name = TYPE_ASPEED_ADC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_adc_instance_init, + .instance_size = sizeof(AspeedADCState), + .class_init = aspeed_adc_class_init, + .class_size = sizeof(AspeedADCClass), + .abstract = true, +}; + +static const TypeInfo aspeed_2400_adc_info = { + .name = TYPE_ASPEED_2400_ADC, + .parent = TYPE_ASPEED_ADC, +}; + +static const TypeInfo aspeed_2500_adc_info = { + .name = TYPE_ASPEED_2500_ADC, + .parent = TYPE_ASPEED_ADC, +}; + +static const TypeInfo aspeed_2600_adc_info = { + .name = TYPE_ASPEED_2600_ADC, + .parent = TYPE_ASPEED_ADC, + .class_init = aspeed_2600_adc_class_init, +}; + +static void aspeed_adc_register_types(void) +{ + type_register_static(&aspeed_adc_engine_info); + type_register_static(&aspeed_adc_info); + type_register_static(&aspeed_2400_adc_info); + type_register_static(&aspeed_2500_adc_info); + type_register_static(&aspeed_2600_adc_info); +} + +type_init(aspeed_adc_register_types); diff --git a/hw/adc/max111x.c b/hw/adc/max111x.c new file mode 100644 index 000000000..e8bf4cccd --- /dev/null +++ b/hw/adc/max111x.c @@ -0,0 +1,236 @@ +/* + * Maxim MAX1110/1111 ADC chip emulation. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This code is licensed under the GNU GPLv2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/adc/max111x.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "hw/qdev-properties.h" + +/* Control-byte bitfields */ +#define CB_PD0 (1 << 0) +#define CB_PD1 (1 << 1) +#define CB_SGL (1 << 2) +#define CB_UNI (1 << 3) +#define CB_SEL0 (1 << 4) +#define CB_SEL1 (1 << 5) +#define CB_SEL2 (1 << 6) +#define CB_START (1 << 7) + +#define CHANNEL_NUM(v, b0, b1, b2) \ + ((((v) >> (2 + (b0))) & 4) | \ + (((v) >> (3 + (b1))) & 2) | \ + (((v) >> (4 + (b2))) & 1)) + +static uint32_t max111x_read(MAX111xState *s) +{ + if (!s->tb1) + return 0; + + switch (s->cycle ++) { + case 1: + return s->rb2; + case 2: + return s->rb3; + } + + return 0; +} + +/* Interpret a control-byte */ +static void max111x_write(MAX111xState *s, uint32_t value) +{ + int measure, chan; + + /* Ignore the value if START bit is zero */ + if (!(value & CB_START)) + return; + + s->cycle = 0; + + if (!(value & CB_PD1)) { + s->tb1 = 0; + return; + } + + s->tb1 = value; + + if (s->inputs == 8) + chan = CHANNEL_NUM(value, 1, 0, 2); + else + chan = CHANNEL_NUM(value & ~CB_SEL0, 0, 1, 2); + + if (value & CB_SGL) + measure = s->input[chan] - s->com; + else + measure = s->input[chan] - s->input[chan ^ 1]; + + if (!(value & CB_UNI)) + measure ^= 0x80; + + s->rb2 = (measure >> 2) & 0x3f; + s->rb3 = (measure << 6) & 0xc0; + + /* FIXME: When should the IRQ be lowered? */ + qemu_irq_raise(s->interrupt); +} + +static uint32_t max111x_transfer(SSIPeripheral *dev, uint32_t value) +{ + MAX111xState *s = MAX_111X(dev); + max111x_write(s, value); + return max111x_read(s); +} + +static const VMStateDescription vmstate_max111x = { + .name = "max111x", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_SSI_PERIPHERAL(parent_obj, MAX111xState), + VMSTATE_UINT8(tb1, MAX111xState), + VMSTATE_UINT8(rb2, MAX111xState), + VMSTATE_UINT8(rb3, MAX111xState), + VMSTATE_INT32_EQUAL(inputs, MAX111xState, NULL), + VMSTATE_INT32(com, MAX111xState), + VMSTATE_ARRAY_INT32_UNSAFE(input, MAX111xState, inputs, + vmstate_info_uint8, uint8_t), + VMSTATE_END_OF_LIST() + } +}; + +static void max111x_input_set(void *opaque, int line, int value) +{ + MAX111xState *s = MAX_111X(opaque); + + assert(line >= 0 && line < s->inputs); + s->input[line] = value; +} + +static int max111x_init(SSIPeripheral *d, int inputs) +{ + DeviceState *dev = DEVICE(d); + MAX111xState *s = MAX_111X(dev); + + qdev_init_gpio_out(dev, &s->interrupt, 1); + qdev_init_gpio_in(dev, max111x_input_set, inputs); + + s->inputs = inputs; + + return 0; +} + +static void max1110_realize(SSIPeripheral *dev, Error **errp) +{ + max111x_init(dev, 8); +} + +static void max1111_realize(SSIPeripheral *dev, Error **errp) +{ + max111x_init(dev, 4); +} + +static void max111x_reset(DeviceState *dev) +{ + MAX111xState *s = MAX_111X(dev); + int i; + + for (i = 0; i < s->inputs; i++) { + s->input[i] = s->reset_input[i]; + } + s->com = 0; + s->tb1 = 0; + s->rb2 = 0; + s->rb3 = 0; + s->cycle = 0; +} + +static Property max1110_properties[] = { + /* Reset values for ADC inputs */ + DEFINE_PROP_UINT8("input0", MAX111xState, reset_input[0], 0xf0), + DEFINE_PROP_UINT8("input1", MAX111xState, reset_input[1], 0xe0), + DEFINE_PROP_UINT8("input2", MAX111xState, reset_input[2], 0xd0), + DEFINE_PROP_UINT8("input3", MAX111xState, reset_input[3], 0xc0), + DEFINE_PROP_END_OF_LIST(), +}; + +static Property max1111_properties[] = { + /* Reset values for ADC inputs */ + DEFINE_PROP_UINT8("input0", MAX111xState, reset_input[0], 0xf0), + DEFINE_PROP_UINT8("input1", MAX111xState, reset_input[1], 0xe0), + DEFINE_PROP_UINT8("input2", MAX111xState, reset_input[2], 0xd0), + DEFINE_PROP_UINT8("input3", MAX111xState, reset_input[3], 0xc0), + DEFINE_PROP_UINT8("input4", MAX111xState, reset_input[4], 0xb0), + DEFINE_PROP_UINT8("input5", MAX111xState, reset_input[5], 0xa0), + DEFINE_PROP_UINT8("input6", MAX111xState, reset_input[6], 0x90), + DEFINE_PROP_UINT8("input7", MAX111xState, reset_input[7], 0x80), + DEFINE_PROP_END_OF_LIST(), +}; + +static void max111x_class_init(ObjectClass *klass, void *data) +{ + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->transfer = max111x_transfer; + dc->reset = max111x_reset; + dc->vmsd = &vmstate_max111x; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo max111x_info = { + .name = TYPE_MAX_111X, + .parent = TYPE_SSI_PERIPHERAL, + .instance_size = sizeof(MAX111xState), + .class_init = max111x_class_init, + .abstract = true, +}; + +static void max1110_class_init(ObjectClass *klass, void *data) +{ + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = max1110_realize; + device_class_set_props(dc, max1110_properties); +} + +static const TypeInfo max1110_info = { + .name = TYPE_MAX_1110, + .parent = TYPE_MAX_111X, + .class_init = max1110_class_init, +}; + +static void max1111_class_init(ObjectClass *klass, void *data) +{ + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = max1111_realize; + device_class_set_props(dc, max1111_properties); +} + +static const TypeInfo max1111_info = { + .name = TYPE_MAX_1111, + .parent = TYPE_MAX_111X, + .class_init = max1111_class_init, +}; + +static void max111x_register_types(void) +{ + type_register_static(&max111x_info); + type_register_static(&max1110_info); + type_register_static(&max1111_info); +} + +type_init(max111x_register_types) diff --git a/hw/adc/meson.build b/hw/adc/meson.build new file mode 100644 index 000000000..b29ac7ccd --- /dev/null +++ b/hw/adc/meson.build @@ -0,0 +1,5 @@ +softmmu_ss.add(when: 'CONFIG_STM32F2XX_ADC', if_true: files('stm32f2xx_adc.c')) +softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_adc.c')) +softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_adc.c')) +softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq-xadc.c')) +softmmu_ss.add(when: 'CONFIG_MAX111X', if_true: files('max111x.c')) diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c new file mode 100644 index 000000000..0f0a9f63e --- /dev/null +++ b/hw/adc/npcm7xx_adc.c @@ -0,0 +1,301 @@ +/* + * Nuvoton NPCM7xx ADC Module + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "hw/adc/npcm7xx_adc.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/registerfields.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qemu/units.h" +#include "trace.h" + +REG32(NPCM7XX_ADC_CON, 0x0) +REG32(NPCM7XX_ADC_DATA, 0x4) + +/* Register field definitions. */ +#define NPCM7XX_ADC_CON_MUX(rv) extract32(rv, 24, 4) +#define NPCM7XX_ADC_CON_INT_EN BIT(21) +#define NPCM7XX_ADC_CON_REFSEL BIT(19) +#define NPCM7XX_ADC_CON_INT BIT(18) +#define NPCM7XX_ADC_CON_EN BIT(17) +#define NPCM7XX_ADC_CON_RST BIT(16) +#define NPCM7XX_ADC_CON_CONV BIT(14) +#define NPCM7XX_ADC_CON_DIV(rv) extract32(rv, 1, 8) + +#define NPCM7XX_ADC_MAX_RESULT 1023 +#define NPCM7XX_ADC_DEFAULT_IREF 2000000 +#define NPCM7XX_ADC_CONV_CYCLES 20 +#define NPCM7XX_ADC_RESET_CYCLES 10 +#define NPCM7XX_ADC_R0_INPUT 500000 +#define NPCM7XX_ADC_R1_INPUT 1500000 + +static void npcm7xx_adc_reset(NPCM7xxADCState *s) +{ + timer_del(&s->conv_timer); + s->con = 0x000c0001; + s->data = 0x00000000; +} + +static uint32_t npcm7xx_adc_convert(uint32_t input, uint32_t ref) +{ + uint32_t result; + + result = input * (NPCM7XX_ADC_MAX_RESULT + 1) / ref; + if (result > NPCM7XX_ADC_MAX_RESULT) { + result = NPCM7XX_ADC_MAX_RESULT; + } + + return result; +} + +static uint32_t npcm7xx_adc_prescaler(NPCM7xxADCState *s) +{ + return 2 * (NPCM7XX_ADC_CON_DIV(s->con) + 1); +} + +static void npcm7xx_adc_start_timer(Clock *clk, QEMUTimer *timer, + uint32_t cycles, uint32_t prescaler) +{ + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + int64_t ticks = cycles; + int64_t ns; + + ticks *= prescaler; + ns = clock_ticks_to_ns(clk, ticks); + ns += now; + timer_mod(timer, ns); +} + +static void npcm7xx_adc_start_convert(NPCM7xxADCState *s) +{ + uint32_t prescaler = npcm7xx_adc_prescaler(s); + + npcm7xx_adc_start_timer(s->clock, &s->conv_timer, NPCM7XX_ADC_CONV_CYCLES, + prescaler); +} + +static void npcm7xx_adc_convert_done(void *opaque) +{ + NPCM7xxADCState *s = opaque; + uint32_t input = NPCM7XX_ADC_CON_MUX(s->con); + uint32_t ref = (s->con & NPCM7XX_ADC_CON_REFSEL) + ? s->iref : s->vref; + + if (input >= NPCM7XX_ADC_NUM_INPUTS) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid input: %u\n", + __func__, input); + return; + } + s->data = npcm7xx_adc_convert(s->adci[input], ref); + if (s->con & NPCM7XX_ADC_CON_INT_EN) { + s->con |= NPCM7XX_ADC_CON_INT; + qemu_irq_raise(s->irq); + } + s->con &= ~NPCM7XX_ADC_CON_CONV; +} + +static void npcm7xx_adc_calibrate(NPCM7xxADCState *adc) +{ + adc->calibration_r_values[0] = npcm7xx_adc_convert(NPCM7XX_ADC_R0_INPUT, + adc->iref); + adc->calibration_r_values[1] = npcm7xx_adc_convert(NPCM7XX_ADC_R1_INPUT, + adc->iref); +} + +static void npcm7xx_adc_write_con(NPCM7xxADCState *s, uint32_t new_con) +{ + uint32_t old_con = s->con; + + /* Write ADC_INT to 1 to clear it */ + if (new_con & NPCM7XX_ADC_CON_INT) { + new_con &= ~NPCM7XX_ADC_CON_INT; + qemu_irq_lower(s->irq); + } else if (old_con & NPCM7XX_ADC_CON_INT) { + new_con |= NPCM7XX_ADC_CON_INT; + } + + s->con = new_con; + + if (s->con & NPCM7XX_ADC_CON_RST) { + npcm7xx_adc_reset(s); + return; + } + + if ((s->con & NPCM7XX_ADC_CON_EN)) { + if (s->con & NPCM7XX_ADC_CON_CONV) { + if (!(old_con & NPCM7XX_ADC_CON_CONV)) { + npcm7xx_adc_start_convert(s); + } + } else { + timer_del(&s->conv_timer); + } + } +} + +static uint64_t npcm7xx_adc_read(void *opaque, hwaddr offset, unsigned size) +{ + uint64_t value = 0; + NPCM7xxADCState *s = opaque; + + switch (offset) { + case A_NPCM7XX_ADC_CON: + value = s->con; + break; + + case A_NPCM7XX_ADC_DATA: + value = s->data; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid offset 0x%04" HWADDR_PRIx "\n", + __func__, offset); + break; + } + + trace_npcm7xx_adc_read(DEVICE(s)->canonical_path, offset, value); + return value; +} + +static void npcm7xx_adc_write(void *opaque, hwaddr offset, uint64_t v, + unsigned size) +{ + NPCM7xxADCState *s = opaque; + + trace_npcm7xx_adc_write(DEVICE(s)->canonical_path, offset, v); + switch (offset) { + case A_NPCM7XX_ADC_CON: + npcm7xx_adc_write_con(s, v); + break; + + case A_NPCM7XX_ADC_DATA: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n", + __func__, offset); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid offset 0x%04" HWADDR_PRIx "\n", + __func__, offset); + break; + } + +} + +static const struct MemoryRegionOps npcm7xx_adc_ops = { + .read = npcm7xx_adc_read, + .write = npcm7xx_adc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm7xx_adc_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxADCState *s = NPCM7XX_ADC(obj); + + npcm7xx_adc_reset(s); +} + +static void npcm7xx_adc_hold_reset(Object *obj) +{ + NPCM7xxADCState *s = NPCM7XX_ADC(obj); + + qemu_irq_lower(s->irq); +} + +static void npcm7xx_adc_init(Object *obj) +{ + NPCM7xxADCState *s = NPCM7XX_ADC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + int i; + + sysbus_init_irq(sbd, &s->irq); + + timer_init_ns(&s->conv_timer, QEMU_CLOCK_VIRTUAL, + npcm7xx_adc_convert_done, s); + memory_region_init_io(&s->iomem, obj, &npcm7xx_adc_ops, s, + TYPE_NPCM7XX_ADC, 4 * KiB); + sysbus_init_mmio(sbd, &s->iomem); + s->clock = qdev_init_clock_in(DEVICE(s), "clock", NULL, NULL, 0); + + for (i = 0; i < NPCM7XX_ADC_NUM_INPUTS; ++i) { + object_property_add_uint32_ptr(obj, "adci[*]", + &s->adci[i], OBJ_PROP_FLAG_WRITE); + } + object_property_add_uint32_ptr(obj, "vref", + &s->vref, OBJ_PROP_FLAG_WRITE); + npcm7xx_adc_calibrate(s); +} + +static const VMStateDescription vmstate_npcm7xx_adc = { + .name = "npcm7xx-adc", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_TIMER(conv_timer, NPCM7xxADCState), + VMSTATE_UINT32(con, NPCM7xxADCState), + VMSTATE_UINT32(data, NPCM7xxADCState), + VMSTATE_CLOCK(clock, NPCM7xxADCState), + VMSTATE_UINT32_ARRAY(adci, NPCM7xxADCState, NPCM7XX_ADC_NUM_INPUTS), + VMSTATE_UINT32(vref, NPCM7xxADCState), + VMSTATE_UINT32(iref, NPCM7xxADCState), + VMSTATE_UINT16_ARRAY(calibration_r_values, NPCM7xxADCState, + NPCM7XX_ADC_NUM_CALIB), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property npcm7xx_timer_properties[] = { + DEFINE_PROP_UINT32("iref", NPCM7xxADCState, iref, NPCM7XX_ADC_DEFAULT_IREF), + DEFINE_PROP_END_OF_LIST(), +}; + +static void npcm7xx_adc_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx ADC Module"; + dc->vmsd = &vmstate_npcm7xx_adc; + rc->phases.enter = npcm7xx_adc_enter_reset; + rc->phases.hold = npcm7xx_adc_hold_reset; + + device_class_set_props(dc, npcm7xx_timer_properties); +} + +static const TypeInfo npcm7xx_adc_info = { + .name = TYPE_NPCM7XX_ADC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxADCState), + .class_init = npcm7xx_adc_class_init, + .instance_init = npcm7xx_adc_init, +}; + +static void npcm7xx_adc_register_types(void) +{ + type_register_static(&npcm7xx_adc_info); +} + +type_init(npcm7xx_adc_register_types); diff --git a/hw/adc/stm32f2xx_adc.c b/hw/adc/stm32f2xx_adc.c new file mode 100644 index 000000000..01a0b14e6 --- /dev/null +++ b/hw/adc/stm32f2xx_adc.c @@ -0,0 +1,308 @@ +/* + * STM32F2XX ADC + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/adc/stm32f2xx_adc.h" + +#ifndef STM_ADC_ERR_DEBUG +#define STM_ADC_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(lvl, fmt, args...) do { \ + if (STM_ADC_ERR_DEBUG >= lvl) { \ + qemu_log("%s: " fmt, __func__, ## args); \ + } \ +} while (0) + +#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args) + +static void stm32f2xx_adc_reset(DeviceState *dev) +{ + STM32F2XXADCState *s = STM32F2XX_ADC(dev); + + s->adc_sr = 0x00000000; + s->adc_cr1 = 0x00000000; + s->adc_cr2 = 0x00000000; + s->adc_smpr1 = 0x00000000; + s->adc_smpr2 = 0x00000000; + s->adc_jofr[0] = 0x00000000; + s->adc_jofr[1] = 0x00000000; + s->adc_jofr[2] = 0x00000000; + s->adc_jofr[3] = 0x00000000; + s->adc_htr = 0x00000FFF; + s->adc_ltr = 0x00000000; + s->adc_sqr1 = 0x00000000; + s->adc_sqr2 = 0x00000000; + s->adc_sqr3 = 0x00000000; + s->adc_jsqr = 0x00000000; + s->adc_jdr[0] = 0x00000000; + s->adc_jdr[1] = 0x00000000; + s->adc_jdr[2] = 0x00000000; + s->adc_jdr[3] = 0x00000000; + s->adc_dr = 0x00000000; +} + +static uint32_t stm32f2xx_adc_generate_value(STM32F2XXADCState *s) +{ + /* Attempts to fake some ADC values */ + s->adc_dr = s->adc_dr + 7; + + switch ((s->adc_cr1 & ADC_CR1_RES) >> 24) { + case 0: + /* 12-bit */ + s->adc_dr &= 0xFFF; + break; + case 1: + /* 10-bit */ + s->adc_dr &= 0x3FF; + break; + case 2: + /* 8-bit */ + s->adc_dr &= 0xFF; + break; + default: + /* 6-bit */ + s->adc_dr &= 0x3F; + } + + if (s->adc_cr2 & ADC_CR2_ALIGN) { + return (s->adc_dr << 1) & 0xFFF0; + } else { + return s->adc_dr; + } +} + +static uint64_t stm32f2xx_adc_read(void *opaque, hwaddr addr, + unsigned int size) +{ + STM32F2XXADCState *s = opaque; + + DB_PRINT("Address: 0x%" HWADDR_PRIx "\n", addr); + + if (addr >= ADC_COMMON_ADDRESS) { + qemu_log_mask(LOG_UNIMP, + "%s: ADC Common Register Unsupported\n", __func__); + } + + switch (addr) { + case ADC_SR: + return s->adc_sr; + case ADC_CR1: + return s->adc_cr1; + case ADC_CR2: + return s->adc_cr2 & 0xFFFFFFF; + case ADC_SMPR1: + return s->adc_smpr1; + case ADC_SMPR2: + return s->adc_smpr2; + case ADC_JOFR1: + case ADC_JOFR2: + case ADC_JOFR3: + case ADC_JOFR4: + qemu_log_mask(LOG_UNIMP, "%s: " \ + "Injection ADC is not implemented, the registers are " \ + "included for compatibility\n", __func__); + return s->adc_jofr[(addr - ADC_JOFR1) / 4]; + case ADC_HTR: + return s->adc_htr; + case ADC_LTR: + return s->adc_ltr; + case ADC_SQR1: + return s->adc_sqr1; + case ADC_SQR2: + return s->adc_sqr2; + case ADC_SQR3: + return s->adc_sqr3; + case ADC_JSQR: + qemu_log_mask(LOG_UNIMP, "%s: " \ + "Injection ADC is not implemented, the registers are " \ + "included for compatibility\n", __func__); + return s->adc_jsqr; + case ADC_JDR1: + case ADC_JDR2: + case ADC_JDR3: + case ADC_JDR4: + qemu_log_mask(LOG_UNIMP, "%s: " \ + "Injection ADC is not implemented, the registers are " \ + "included for compatibility\n", __func__); + return s->adc_jdr[(addr - ADC_JDR1) / 4] - + s->adc_jofr[(addr - ADC_JDR1) / 4]; + case ADC_DR: + if ((s->adc_cr2 & ADC_CR2_ADON) && (s->adc_cr2 & ADC_CR2_SWSTART)) { + s->adc_cr2 ^= ADC_CR2_SWSTART; + return stm32f2xx_adc_generate_value(s); + } else { + return 0; + } + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + } + + return 0; +} + +static void stm32f2xx_adc_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + STM32F2XXADCState *s = opaque; + uint32_t value = (uint32_t) val64; + + DB_PRINT("Address: 0x%" HWADDR_PRIx ", Value: 0x%x\n", + addr, value); + + if (addr >= 0x100) { + qemu_log_mask(LOG_UNIMP, + "%s: ADC Common Register Unsupported\n", __func__); + } + + switch (addr) { + case ADC_SR: + s->adc_sr &= (value & 0x3F); + break; + case ADC_CR1: + s->adc_cr1 = value; + break; + case ADC_CR2: + s->adc_cr2 = value; + break; + case ADC_SMPR1: + s->adc_smpr1 = value; + break; + case ADC_SMPR2: + s->adc_smpr2 = value; + break; + case ADC_JOFR1: + case ADC_JOFR2: + case ADC_JOFR3: + case ADC_JOFR4: + s->adc_jofr[(addr - ADC_JOFR1) / 4] = (value & 0xFFF); + qemu_log_mask(LOG_UNIMP, "%s: " \ + "Injection ADC is not implemented, the registers are " \ + "included for compatibility\n", __func__); + break; + case ADC_HTR: + s->adc_htr = value; + break; + case ADC_LTR: + s->adc_ltr = value; + break; + case ADC_SQR1: + s->adc_sqr1 = value; + break; + case ADC_SQR2: + s->adc_sqr2 = value; + break; + case ADC_SQR3: + s->adc_sqr3 = value; + break; + case ADC_JSQR: + s->adc_jsqr = value; + qemu_log_mask(LOG_UNIMP, "%s: " \ + "Injection ADC is not implemented, the registers are " \ + "included for compatibility\n", __func__); + break; + case ADC_JDR1: + case ADC_JDR2: + case ADC_JDR3: + case ADC_JDR4: + s->adc_jdr[(addr - ADC_JDR1) / 4] = value; + qemu_log_mask(LOG_UNIMP, "%s: " \ + "Injection ADC is not implemented, the registers are " \ + "included for compatibility\n", __func__); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + } +} + +static const MemoryRegionOps stm32f2xx_adc_ops = { + .read = stm32f2xx_adc_read, + .write = stm32f2xx_adc_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static const VMStateDescription vmstate_stm32f2xx_adc = { + .name = TYPE_STM32F2XX_ADC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(adc_sr, STM32F2XXADCState), + VMSTATE_UINT32(adc_cr1, STM32F2XXADCState), + VMSTATE_UINT32(adc_cr2, STM32F2XXADCState), + VMSTATE_UINT32(adc_smpr1, STM32F2XXADCState), + VMSTATE_UINT32(adc_smpr2, STM32F2XXADCState), + VMSTATE_UINT32_ARRAY(adc_jofr, STM32F2XXADCState, 4), + VMSTATE_UINT32(adc_htr, STM32F2XXADCState), + VMSTATE_UINT32(adc_ltr, STM32F2XXADCState), + VMSTATE_UINT32(adc_sqr1, STM32F2XXADCState), + VMSTATE_UINT32(adc_sqr2, STM32F2XXADCState), + VMSTATE_UINT32(adc_sqr3, STM32F2XXADCState), + VMSTATE_UINT32(adc_jsqr, STM32F2XXADCState), + VMSTATE_UINT32_ARRAY(adc_jdr, STM32F2XXADCState, 4), + VMSTATE_UINT32(adc_dr, STM32F2XXADCState), + VMSTATE_END_OF_LIST() + } +}; + +static void stm32f2xx_adc_init(Object *obj) +{ + STM32F2XXADCState *s = STM32F2XX_ADC(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, &stm32f2xx_adc_ops, s, + TYPE_STM32F2XX_ADC, 0x100); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void stm32f2xx_adc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = stm32f2xx_adc_reset; + dc->vmsd = &vmstate_stm32f2xx_adc; +} + +static const TypeInfo stm32f2xx_adc_info = { + .name = TYPE_STM32F2XX_ADC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32F2XXADCState), + .instance_init = stm32f2xx_adc_init, + .class_init = stm32f2xx_adc_class_init, +}; + +static void stm32f2xx_adc_register_types(void) +{ + type_register_static(&stm32f2xx_adc_info); +} + +type_init(stm32f2xx_adc_register_types) diff --git a/hw/adc/trace-events b/hw/adc/trace-events new file mode 100644 index 000000000..5a4c444d7 --- /dev/null +++ b/hw/adc/trace-events @@ -0,0 +1,8 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# npcm7xx_adc.c +npcm7xx_adc_read(const char *id, uint64_t offset, uint32_t value) " %s offset: 0x%04" PRIx64 " value 0x%04" PRIx32 +npcm7xx_adc_write(const char *id, uint64_t offset, uint32_t value) "%s offset: 0x%04" PRIx64 " value 0x%04" PRIx32 + +aspeed_adc_engine_read(uint32_t engine_id, uint64_t addr, uint64_t value) "engine[%u] 0x%" PRIx64 " 0x%" PRIx64 +aspeed_adc_engine_write(uint32_t engine_id, uint64_t addr, uint64_t value) "engine[%u] 0x%" PRIx64 " 0x%" PRIx64 diff --git a/hw/adc/trace.h b/hw/adc/trace.h new file mode 100644 index 000000000..b71d5b5b4 --- /dev/null +++ b/hw/adc/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_adc.h" diff --git a/hw/adc/zynq-xadc.c b/hw/adc/zynq-xadc.c new file mode 100644 index 000000000..cfc7bab06 --- /dev/null +++ b/hw/adc/zynq-xadc.c @@ -0,0 +1,305 @@ +/* + * ADC registers for Xilinx Zynq Platform + * + * Copyright (c) 2015 Guenter Roeck + * Based on hw/misc/zynq_slcr.c, written by Michal Simek + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/adc/zynq-xadc.h" +#include "migration/vmstate.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "qemu/module.h" + +enum { + CFG = 0x000 / 4, + INT_STS, + INT_MASK, + MSTS, + CMDFIFO, + RDFIFO, + MCTL, +}; + +#define CFG_ENABLE BIT(31) +#define CFG_CFIFOTH_SHIFT 20 +#define CFG_CFIFOTH_LENGTH 4 +#define CFG_DFIFOTH_SHIFT 16 +#define CFG_DFIFOTH_LENGTH 4 +#define CFG_WEDGE BIT(13) +#define CFG_REDGE BIT(12) +#define CFG_TCKRATE_SHIFT 8 +#define CFG_TCKRATE_LENGTH 2 + +#define CFG_TCKRATE_DIV(x) (0x1 << (x - 1)) + +#define CFG_IGAP_SHIFT 0 +#define CFG_IGAP_LENGTH 5 + +#define INT_CFIFO_LTH BIT(9) +#define INT_DFIFO_GTH BIT(8) +#define INT_OT BIT(7) +#define INT_ALM_SHIFT 0 +#define INT_ALM_LENGTH 7 +#define INT_ALM_MASK (((1 << INT_ALM_LENGTH) - 1) << INT_ALM_SHIFT) + +#define INT_ALL (INT_CFIFO_LTH | INT_DFIFO_GTH | INT_OT | INT_ALM_MASK) + +#define MSTS_CFIFO_LVL_SHIFT 16 +#define MSTS_CFIFO_LVL_LENGTH 4 +#define MSTS_DFIFO_LVL_SHIFT 12 +#define MSTS_DFIFO_LVL_LENGTH 4 +#define MSTS_CFIFOF BIT(11) +#define MSTS_CFIFOE BIT(10) +#define MSTS_DFIFOF BIT(9) +#define MSTS_DFIFOE BIT(8) +#define MSTS_OT BIT(7) +#define MSTS_ALM_SHIFT 0 +#define MSTS_ALM_LENGTH 7 + +#define MCTL_RESET BIT(4) + +#define CMD_NOP 0x00 +#define CMD_READ 0x01 +#define CMD_WRITE 0x02 + +static void zynq_xadc_update_ints(ZynqXADCState *s) +{ + + /* We are fast, commands are actioned instantly so the CFIFO is always + * empty (and below threshold). + */ + s->regs[INT_STS] |= INT_CFIFO_LTH; + + if (s->xadc_dfifo_entries > + extract32(s->regs[CFG], CFG_DFIFOTH_SHIFT, CFG_DFIFOTH_LENGTH)) { + s->regs[INT_STS] |= INT_DFIFO_GTH; + } + + qemu_set_irq(s->qemu_irq, !!(s->regs[INT_STS] & ~s->regs[INT_MASK])); +} + +static void zynq_xadc_reset(DeviceState *d) +{ + ZynqXADCState *s = ZYNQ_XADC(d); + + s->regs[CFG] = 0x14 << CFG_IGAP_SHIFT | + CFG_TCKRATE_DIV(4) << CFG_TCKRATE_SHIFT | CFG_REDGE; + s->regs[INT_STS] = INT_CFIFO_LTH; + s->regs[INT_MASK] = 0xffffffff; + s->regs[CMDFIFO] = 0; + s->regs[RDFIFO] = 0; + s->regs[MCTL] = MCTL_RESET; + + memset(s->xadc_regs, 0, sizeof(s->xadc_regs)); + memset(s->xadc_dfifo, 0, sizeof(s->xadc_dfifo)); + s->xadc_dfifo_entries = 0; + + zynq_xadc_update_ints(s); +} + +static uint16_t xadc_pop_dfifo(ZynqXADCState *s) +{ + uint16_t rv = s->xadc_dfifo[0]; + int i; + + if (s->xadc_dfifo_entries > 0) { + s->xadc_dfifo_entries--; + } + for (i = 0; i < s->xadc_dfifo_entries; i++) { + s->xadc_dfifo[i] = s->xadc_dfifo[i + 1]; + } + s->xadc_dfifo[s->xadc_dfifo_entries] = 0; + zynq_xadc_update_ints(s); + return rv; +} + +static void xadc_push_dfifo(ZynqXADCState *s, uint16_t regval) +{ + if (s->xadc_dfifo_entries < ZYNQ_XADC_FIFO_DEPTH) { + s->xadc_dfifo[s->xadc_dfifo_entries++] = s->xadc_read_reg_previous; + } + s->xadc_read_reg_previous = regval; + zynq_xadc_update_ints(s); +} + +static bool zynq_xadc_check_offset(hwaddr offset, bool rnw) +{ + switch (offset) { + case CFG: + case INT_MASK: + case INT_STS: + case MCTL: + return true; + case RDFIFO: + case MSTS: + return rnw; /* read only */ + case CMDFIFO: + return !rnw; /* write only */ + default: + return false; + } +} + +static uint64_t zynq_xadc_read(void *opaque, hwaddr offset, unsigned size) +{ + ZynqXADCState *s = opaque; + int reg = offset / 4; + uint32_t rv = 0; + + if (!zynq_xadc_check_offset(reg, true)) { + qemu_log_mask(LOG_GUEST_ERROR, "zynq_xadc: Invalid read access to " + "addr %" HWADDR_PRIx "\n", offset); + return 0; + } + + switch (reg) { + case CFG: + case INT_MASK: + case INT_STS: + case MCTL: + rv = s->regs[reg]; + break; + case MSTS: + rv = MSTS_CFIFOE; + rv |= s->xadc_dfifo_entries << MSTS_DFIFO_LVL_SHIFT; + if (!s->xadc_dfifo_entries) { + rv |= MSTS_DFIFOE; + } else if (s->xadc_dfifo_entries == ZYNQ_XADC_FIFO_DEPTH) { + rv |= MSTS_DFIFOF; + } + break; + case RDFIFO: + rv = xadc_pop_dfifo(s); + break; + } + return rv; +} + +static void zynq_xadc_write(void *opaque, hwaddr offset, uint64_t val, + unsigned size) +{ + ZynqXADCState *s = (ZynqXADCState *)opaque; + int reg = offset / 4; + int xadc_reg; + int xadc_cmd; + int xadc_data; + + if (!zynq_xadc_check_offset(reg, false)) { + qemu_log_mask(LOG_GUEST_ERROR, "zynq_xadc: Invalid write access " + "to addr %" HWADDR_PRIx "\n", offset); + return; + } + + switch (reg) { + case CFG: + s->regs[CFG] = val; + break; + case INT_STS: + s->regs[INT_STS] &= ~val; + break; + case INT_MASK: + s->regs[INT_MASK] = val & INT_ALL; + break; + case CMDFIFO: + xadc_cmd = extract32(val, 26, 4); + xadc_reg = extract32(val, 16, 10); + xadc_data = extract32(val, 0, 16); + + if (s->regs[MCTL] & MCTL_RESET) { + qemu_log_mask(LOG_GUEST_ERROR, "zynq_xadc: Sending command " + "while comm channel held in reset: %" PRIx32 "\n", + (uint32_t) val); + break; + } + + if (xadc_reg >= ZYNQ_XADC_NUM_ADC_REGS && xadc_cmd != CMD_NOP) { + qemu_log_mask(LOG_GUEST_ERROR, "read/write op to invalid xadc " + "reg 0x%x\n", xadc_reg); + break; + } + + switch (xadc_cmd) { + case CMD_READ: + xadc_push_dfifo(s, s->xadc_regs[xadc_reg]); + break; + case CMD_WRITE: + s->xadc_regs[xadc_reg] = xadc_data; + /* fallthrough */ + case CMD_NOP: + xadc_push_dfifo(s, 0); + break; + } + break; + case MCTL: + s->regs[MCTL] = val & 0x00fffeff; + break; + } + zynq_xadc_update_ints(s); +} + +static const MemoryRegionOps xadc_ops = { + .read = zynq_xadc_read, + .write = zynq_xadc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void zynq_xadc_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ZynqXADCState *s = ZYNQ_XADC(obj); + + memory_region_init_io(&s->iomem, obj, &xadc_ops, s, "zynq-xadc", + ZYNQ_XADC_MMIO_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->qemu_irq); +} + +static const VMStateDescription vmstate_zynq_xadc = { + .name = "zynq-xadc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, ZynqXADCState, ZYNQ_XADC_NUM_IO_REGS), + VMSTATE_UINT16_ARRAY(xadc_regs, ZynqXADCState, + ZYNQ_XADC_NUM_ADC_REGS), + VMSTATE_UINT16_ARRAY(xadc_dfifo, ZynqXADCState, + ZYNQ_XADC_FIFO_DEPTH), + VMSTATE_UINT16(xadc_read_reg_previous, ZynqXADCState), + VMSTATE_UINT16(xadc_dfifo_entries, ZynqXADCState), + VMSTATE_END_OF_LIST() + } +}; + +static void zynq_xadc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_zynq_xadc; + dc->reset = zynq_xadc_reset; +} + +static const TypeInfo zynq_xadc_info = { + .class_init = zynq_xadc_class_init, + .name = TYPE_ZYNQ_XADC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ZynqXADCState), + .instance_init = zynq_xadc_init, +}; + +static void zynq_xadc_register_types(void) +{ + type_register_static(&zynq_xadc_info); +} + +type_init(zynq_xadc_register_types) diff --git a/hw/alpha/Kconfig b/hw/alpha/Kconfig new file mode 100644 index 000000000..9af650c94 --- /dev/null +++ b/hw/alpha/Kconfig @@ -0,0 +1,11 @@ +config DP264 + bool + imply PCI_DEVICES + imply TEST_DEVICES + imply E1000_PCI + select I82378 + select IDE_CMD646 + select MC146818RTC + select PCI + select PCKBD + select SMC37C669 diff --git a/hw/alpha/alpha_sys.h b/hw/alpha/alpha_sys.h new file mode 100644 index 000000000..2263e821d --- /dev/null +++ b/hw/alpha/alpha_sys.h @@ -0,0 +1,21 @@ +/* Alpha cores and system support chips. */ + +#ifndef HW_ALPHA_SYS_H +#define HW_ALPHA_SYS_H + +#include "target/alpha/cpu-qom.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/boards.h" +#include "hw/intc/i8259.h" + + +PCIBus *typhoon_init(MemoryRegion *, qemu_irq *, qemu_irq *, AlphaCPU *[4], + pci_map_irq_fn, uint8_t devfn_min); + +/* alpha_pci.c. */ +extern const MemoryRegionOps alpha_pci_ignore_ops; +extern const MemoryRegionOps alpha_pci_conf1_ops; +extern const MemoryRegionOps alpha_pci_iack_ops; + +#endif diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c new file mode 100644 index 000000000..c78ed96d0 --- /dev/null +++ b/hw/alpha/dp264.c @@ -0,0 +1,222 @@ +/* + * QEMU Alpha DP264/CLIPPER hardware system emulator. + * + * Choose CLIPPER IRQ mappings over, say, DP264, MONET, or WEBBRICK + * variants because CLIPPER doesn't have an SMC669 SuperIO controller + * that we need to emulate as well. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "elf.h" +#include "hw/loader.h" +#include "alpha_sys.h" +#include "qemu/error-report.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/ide/pci.h" +#include "hw/isa/superio.h" +#include "net/net.h" +#include "qemu/cutils.h" +#include "qemu/datadir.h" +#include "net/net.h" + +#define MAX_IDE_BUS 2 + +static uint64_t cpu_alpha_superpage_to_phys(void *opaque, uint64_t addr) +{ + if (((addr >> 41) & 3) == 2) { + addr &= 0xffffffffffull; + } + return addr; +} + +/* Note that there are at least 3 viewpoints of IRQ numbers on Alpha systems. + (0) The dev_irq_n lines into the cpu, which we totally ignore, + (1) The DRIR lines in the typhoon chipset, + (2) The "vector" aka mangled interrupt number reported by SRM PALcode, + (3) The interrupt number assigned by the kernel. + The following function is concerned with (1) only. */ + +static int clipper_pci_map_irq(PCIDevice *d, int irq_num) +{ + int slot = d->devfn >> 3; + + assert(irq_num >= 0 && irq_num <= 3); + + return (slot + 1) * 4 + irq_num; +} + +static void clipper_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *initrd_filename = machine->initrd_filename; + AlphaCPU *cpus[4]; + PCIBus *pci_bus; + PCIDevice *pci_dev; + DeviceState *i82378_dev; + ISABus *isa_bus; + qemu_irq rtc_irq; + qemu_irq isa_irq; + long size, i; + char *palcode_filename; + uint64_t palcode_entry; + uint64_t kernel_entry, kernel_low; + unsigned int smp_cpus = machine->smp.cpus; + + /* Create up to 4 cpus. */ + memset(cpus, 0, sizeof(cpus)); + for (i = 0; i < smp_cpus; ++i) { + cpus[i] = ALPHA_CPU(cpu_create(machine->cpu_type)); + } + + /* + * arg0 -> memory size + * arg1 -> kernel entry point + * arg2 -> config word + * + * Config word: bits 0-5 -> ncpus + * bit 6 -> nographics option (for HWRPB CTB) + * + * See init_hwrpb() in the PALcode. + */ + cpus[0]->env.trap_arg0 = ram_size; + cpus[0]->env.trap_arg1 = 0; + cpus[0]->env.trap_arg2 = smp_cpus | (!machine->enable_graphics << 6); + + /* + * Init the chipset. Because we're using CLIPPER IRQ mappings, + * the minimum PCI device IdSel is 1. + */ + pci_bus = typhoon_init(machine->ram, &isa_irq, &rtc_irq, cpus, + clipper_pci_map_irq, PCI_DEVFN(1, 0)); + + /* + * Init the PCI -> ISA bridge. + * + * Technically, PCI-based Alphas shipped with one of three different + * PCI-ISA bridges: + * + * - Intel i82378 SIO + * - Cypress CY82c693UB + * - ALI M1533 + * + * (An Intel i82375 PCI-EISA bridge was also used on some models.) + * + * For simplicity, we model an i82378 here, even though it wouldn't + * have been on any Tsunami/Typhoon systems; it's close enough, and + * we don't want to deal with modelling the CY82c693UB (which has + * incompatible edge/level control registers, plus other peripherals + * like IDE and USB) or the M1533 (which also has IDE and USB). + * + * Importantly, we need to provide a PCI device node for it, otherwise + * some operating systems won't notice there's an ISA bus to configure. + */ + i82378_dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(7, 0), "i82378")); + isa_bus = ISA_BUS(qdev_get_child_bus(i82378_dev, "isa.0")); + + /* Connect the ISA PIC to the Typhoon IRQ used for ISA interrupts. */ + qdev_connect_gpio_out(i82378_dev, 0, isa_irq); + + /* Since we have an SRM-compatible PALcode, use the SRM epoch. */ + mc146818_rtc_init(isa_bus, 1900, rtc_irq); + + /* VGA setup. Don't bother loading the bios. */ + pci_vga_init(pci_bus); + + /* Network setup. e1000 is good enough, failing Tulip support. */ + for (i = 0; i < nb_nics; i++) { + pci_nic_init_nofail(&nd_table[i], pci_bus, "e1000", NULL); + } + + /* Super I/O */ + isa_create_simple(isa_bus, TYPE_SMC37C669_SUPERIO); + + /* IDE disk setup. */ + pci_dev = pci_create_simple(pci_bus, -1, "cmd646-ide"); + pci_ide_create_devs(pci_dev); + + /* Load PALcode. Given that this is not "real" cpu palcode, + but one explicitly written for the emulation, we might as + well load it directly from and ELF image. */ + palcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, + machine->firmware ?: "palcode-clipper"); + if (palcode_filename == NULL) { + error_report("no palcode provided"); + exit(1); + } + size = load_elf(palcode_filename, NULL, cpu_alpha_superpage_to_phys, + NULL, &palcode_entry, NULL, NULL, NULL, + 0, EM_ALPHA, 0, 0); + if (size < 0) { + error_report("could not load palcode '%s'", palcode_filename); + exit(1); + } + g_free(palcode_filename); + + /* Start all cpus at the PALcode RESET entry point. */ + for (i = 0; i < smp_cpus; ++i) { + cpus[i]->env.pc = palcode_entry; + cpus[i]->env.palbr = palcode_entry; + } + + /* Load a kernel. */ + if (kernel_filename) { + uint64_t param_offset; + + size = load_elf(kernel_filename, NULL, cpu_alpha_superpage_to_phys, + NULL, &kernel_entry, &kernel_low, NULL, NULL, + 0, EM_ALPHA, 0, 0); + if (size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + + cpus[0]->env.trap_arg1 = kernel_entry; + + param_offset = kernel_low - 0x6000; + + if (kernel_cmdline) { + pstrcpy_targphys("cmdline", param_offset, 0x100, kernel_cmdline); + } + + if (initrd_filename) { + long initrd_base; + int64_t initrd_size; + + initrd_size = get_image_size(initrd_filename); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + initrd_filename); + exit(1); + } + + /* Put the initrd image as high in memory as possible. */ + initrd_base = (ram_size - initrd_size) & TARGET_PAGE_MASK; + load_image_targphys(initrd_filename, initrd_base, + ram_size - initrd_base); + + address_space_stq(&address_space_memory, param_offset + 0x100, + initrd_base + 0xfffffc0000000000ULL, + MEMTXATTRS_UNSPECIFIED, + NULL); + address_space_stq(&address_space_memory, param_offset + 0x108, + initrd_size, MEMTXATTRS_UNSPECIFIED, NULL); + } + } +} + +static void clipper_machine_init(MachineClass *mc) +{ + mc->desc = "Alpha DP264/CLIPPER"; + mc->init = clipper_init; + mc->block_default_type = IF_IDE; + mc->max_cpus = 4; + mc->is_default = true; + mc->default_cpu_type = ALPHA_CPU_TYPE_NAME("ev67"); + mc->default_ram_id = "ram"; +} + +DEFINE_MACHINE("clipper", clipper_machine_init) diff --git a/hw/alpha/meson.build b/hw/alpha/meson.build new file mode 100644 index 000000000..81ca21577 --- /dev/null +++ b/hw/alpha/meson.build @@ -0,0 +1,8 @@ +alpha_ss = ss.source_set() +alpha_ss.add(when: 'CONFIG_DP264', if_true: files( + 'dp264.c', + 'pci.c', + 'typhoon.c', +)) + +hw_arch += {'alpha': alpha_ss} diff --git a/hw/alpha/pci.c b/hw/alpha/pci.c new file mode 100644 index 000000000..72251fcdf --- /dev/null +++ b/hw/alpha/pci.c @@ -0,0 +1,91 @@ +/* + * QEMU Alpha PCI support functions. + * + * Some of this isn't very Alpha specific at all. + * + * ??? Sparse memory access not implemented. + */ + +#include "qemu/osdep.h" +#include "alpha_sys.h" +#include "qemu/log.h" +#include "trace.h" + + +/* Fallback for unassigned PCI I/O operations. Avoids MCHK. */ + +static uint64_t ignore_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void ignore_write(void *opaque, hwaddr addr, uint64_t v, unsigned size) +{ +} + +const MemoryRegionOps alpha_pci_ignore_ops = { + .read = ignore_read, + .write = ignore_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + + +/* PCI config space reads/writes, to byte-word addressable memory. */ +static uint64_t bw_conf1_read(void *opaque, hwaddr addr, + unsigned size) +{ + PCIBus *b = opaque; + return pci_data_read(b, addr, size); +} + +static void bw_conf1_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PCIBus *b = opaque; + pci_data_write(b, addr, val, size); +} + +const MemoryRegionOps alpha_pci_conf1_ops = { + .read = bw_conf1_read, + .write = bw_conf1_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +/* PCI/EISA Interrupt Acknowledge Cycle. */ + +static uint64_t iack_read(void *opaque, hwaddr addr, unsigned size) +{ + return pic_read_irq(isa_pic); +} + +static void special_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + trace_alpha_pci_iack_write(); +} + +const MemoryRegionOps alpha_pci_iack_ops = { + .read = iack_read, + .write = special_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; diff --git a/hw/alpha/trace-events b/hw/alpha/trace-events new file mode 100644 index 000000000..952a81640 --- /dev/null +++ b/hw/alpha/trace-events @@ -0,0 +1,4 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# pci.c +alpha_pci_iack_write(void) "" diff --git a/hw/alpha/trace.h b/hw/alpha/trace.h new file mode 100644 index 000000000..20fe69819 --- /dev/null +++ b/hw/alpha/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_alpha.h" diff --git a/hw/alpha/typhoon.c b/hw/alpha/typhoon.c new file mode 100644 index 000000000..bd39c8ca8 --- /dev/null +++ b/hw/alpha/typhoon.c @@ -0,0 +1,952 @@ +/* + * DEC 21272 (TSUNAMI/TYPHOON) chipset emulation. + * + * Written by Richard Henderson. + * + * This work is licensed under the GNU GPL license version 2 or later. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/irq.h" +#include "alpha_sys.h" +#include "qom/object.h" + + +#define TYPE_TYPHOON_PCI_HOST_BRIDGE "typhoon-pcihost" +#define TYPE_TYPHOON_IOMMU_MEMORY_REGION "typhoon-iommu-memory-region" + +typedef struct TyphoonCchip { + MemoryRegion region; + uint64_t misc; + uint64_t drir; + uint64_t dim[4]; + uint32_t iic[4]; + AlphaCPU *cpu[4]; +} TyphoonCchip; + +typedef struct TyphoonWindow { + uint64_t wba; + uint64_t wsm; + uint64_t tba; +} TyphoonWindow; + +typedef struct TyphoonPchip { + MemoryRegion region; + MemoryRegion reg_iack; + MemoryRegion reg_mem; + MemoryRegion reg_io; + MemoryRegion reg_conf; + + AddressSpace iommu_as; + IOMMUMemoryRegion iommu; + + uint64_t ctl; + TyphoonWindow win[4]; +} TyphoonPchip; + +OBJECT_DECLARE_SIMPLE_TYPE(TyphoonState, TYPHOON_PCI_HOST_BRIDGE) + +struct TyphoonState { + PCIHostState parent_obj; + + TyphoonCchip cchip; + TyphoonPchip pchip; + MemoryRegion dchip_region; +}; + +/* Called when one of DRIR or DIM changes. */ +static void cpu_irq_change(AlphaCPU *cpu, uint64_t req) +{ + /* If there are any non-masked interrupts, tell the cpu. */ + if (cpu != NULL) { + CPUState *cs = CPU(cpu); + if (req) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + } +} + +static MemTxResult cchip_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + CPUState *cpu = current_cpu; + TyphoonState *s = opaque; + uint64_t ret = 0; + + switch (addr) { + case 0x0000: + /* CSC: Cchip System Configuration Register. */ + /* All sorts of data here; probably the only thing relevant is + PIP<14> Pchip 1 Present = 0. */ + break; + + case 0x0040: + /* MTR: Memory Timing Register. */ + /* All sorts of stuff related to real DRAM. */ + break; + + case 0x0080: + /* MISC: Miscellaneous Register. */ + ret = s->cchip.misc | (cpu->cpu_index & 3); + break; + + case 0x00c0: + /* MPD: Memory Presence Detect Register. */ + break; + + case 0x0100: /* AAR0 */ + case 0x0140: /* AAR1 */ + case 0x0180: /* AAR2 */ + case 0x01c0: /* AAR3 */ + /* AAR: Array Address Register. */ + /* All sorts of information about DRAM. */ + break; + + case 0x0200: + /* DIM0: Device Interrupt Mask Register, CPU0. */ + ret = s->cchip.dim[0]; + break; + case 0x0240: + /* DIM1: Device Interrupt Mask Register, CPU1. */ + ret = s->cchip.dim[1]; + break; + case 0x0280: + /* DIR0: Device Interrupt Request Register, CPU0. */ + ret = s->cchip.dim[0] & s->cchip.drir; + break; + case 0x02c0: + /* DIR1: Device Interrupt Request Register, CPU1. */ + ret = s->cchip.dim[1] & s->cchip.drir; + break; + case 0x0300: + /* DRIR: Device Raw Interrupt Request Register. */ + ret = s->cchip.drir; + break; + + case 0x0340: + /* PRBEN: Probe Enable Register. */ + break; + + case 0x0380: + /* IIC0: Interval Ignore Count Register, CPU0. */ + ret = s->cchip.iic[0]; + break; + case 0x03c0: + /* IIC1: Interval Ignore Count Register, CPU1. */ + ret = s->cchip.iic[1]; + break; + + case 0x0400: /* MPR0 */ + case 0x0440: /* MPR1 */ + case 0x0480: /* MPR2 */ + case 0x04c0: /* MPR3 */ + /* MPR: Memory Programming Register. */ + break; + + case 0x0580: + /* TTR: TIGbus Timing Register. */ + /* All sorts of stuff related to interrupt delivery timings. */ + break; + case 0x05c0: + /* TDR: TIGbug Device Timing Register. */ + break; + + case 0x0600: + /* DIM2: Device Interrupt Mask Register, CPU2. */ + ret = s->cchip.dim[2]; + break; + case 0x0640: + /* DIM3: Device Interrupt Mask Register, CPU3. */ + ret = s->cchip.dim[3]; + break; + case 0x0680: + /* DIR2: Device Interrupt Request Register, CPU2. */ + ret = s->cchip.dim[2] & s->cchip.drir; + break; + case 0x06c0: + /* DIR3: Device Interrupt Request Register, CPU3. */ + ret = s->cchip.dim[3] & s->cchip.drir; + break; + + case 0x0700: + /* IIC2: Interval Ignore Count Register, CPU2. */ + ret = s->cchip.iic[2]; + break; + case 0x0740: + /* IIC3: Interval Ignore Count Register, CPU3. */ + ret = s->cchip.iic[3]; + break; + + case 0x0780: + /* PWR: Power Management Control. */ + break; + + case 0x0c00: /* CMONCTLA */ + case 0x0c40: /* CMONCTLB */ + case 0x0c80: /* CMONCNT01 */ + case 0x0cc0: /* CMONCNT23 */ + break; + + default: + return MEMTX_ERROR; + } + + *data = ret; + return MEMTX_OK; +} + +static uint64_t dchip_read(void *opaque, hwaddr addr, unsigned size) +{ + /* Skip this. It's all related to DRAM timing and setup. */ + return 0; +} + +static MemTxResult pchip_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + TyphoonState *s = opaque; + uint64_t ret = 0; + + switch (addr) { + case 0x0000: + /* WSBA0: Window Space Base Address Register. */ + ret = s->pchip.win[0].wba; + break; + case 0x0040: + /* WSBA1 */ + ret = s->pchip.win[1].wba; + break; + case 0x0080: + /* WSBA2 */ + ret = s->pchip.win[2].wba; + break; + case 0x00c0: + /* WSBA3 */ + ret = s->pchip.win[3].wba; + break; + + case 0x0100: + /* WSM0: Window Space Mask Register. */ + ret = s->pchip.win[0].wsm; + break; + case 0x0140: + /* WSM1 */ + ret = s->pchip.win[1].wsm; + break; + case 0x0180: + /* WSM2 */ + ret = s->pchip.win[2].wsm; + break; + case 0x01c0: + /* WSM3 */ + ret = s->pchip.win[3].wsm; + break; + + case 0x0200: + /* TBA0: Translated Base Address Register. */ + ret = s->pchip.win[0].tba; + break; + case 0x0240: + /* TBA1 */ + ret = s->pchip.win[1].tba; + break; + case 0x0280: + /* TBA2 */ + ret = s->pchip.win[2].tba; + break; + case 0x02c0: + /* TBA3 */ + ret = s->pchip.win[3].tba; + break; + + case 0x0300: + /* PCTL: Pchip Control Register. */ + ret = s->pchip.ctl; + break; + case 0x0340: + /* PLAT: Pchip Master Latency Register. */ + break; + case 0x03c0: + /* PERROR: Pchip Error Register. */ + break; + case 0x0400: + /* PERRMASK: Pchip Error Mask Register. */ + break; + case 0x0440: + /* PERRSET: Pchip Error Set Register. */ + break; + case 0x0480: + /* TLBIV: Translation Buffer Invalidate Virtual Register (WO). */ + break; + case 0x04c0: + /* TLBIA: Translation Buffer Invalidate All Register (WO). */ + break; + case 0x0500: /* PMONCTL */ + case 0x0540: /* PMONCNT */ + case 0x0800: /* SPRST */ + break; + + default: + return MEMTX_ERROR; + } + + *data = ret; + return MEMTX_OK; +} + +static MemTxResult cchip_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size, + MemTxAttrs attrs) +{ + TyphoonState *s = opaque; + uint64_t oldval, newval; + + switch (addr) { + case 0x0000: + /* CSC: Cchip System Configuration Register. */ + /* All sorts of data here; nothing relevant RW. */ + break; + + case 0x0040: + /* MTR: Memory Timing Register. */ + /* All sorts of stuff related to real DRAM. */ + break; + + case 0x0080: + /* MISC: Miscellaneous Register. */ + newval = oldval = s->cchip.misc; + newval &= ~(val & 0x10000ff0); /* W1C fields */ + if (val & 0x100000) { + newval &= ~0xff0000ull; /* ACL clears ABT and ABW */ + } else { + newval |= val & 0x00f00000; /* ABT field is W1S */ + if ((newval & 0xf0000) == 0) { + newval |= val & 0xf0000; /* ABW field is W1S iff zero */ + } + } + newval |= (val & 0xf000) >> 4; /* IPREQ field sets IPINTR. */ + + newval &= ~0xf0000000000ull; /* WO and RW fields */ + newval |= val & 0xf0000000000ull; + s->cchip.misc = newval; + + /* Pass on changes to IPI and ITI state. */ + if ((newval ^ oldval) & 0xff0) { + int i; + for (i = 0; i < 4; ++i) { + AlphaCPU *cpu = s->cchip.cpu[i]; + if (cpu != NULL) { + CPUState *cs = CPU(cpu); + /* IPI can be either cleared or set by the write. */ + if (newval & (1 << (i + 8))) { + cpu_interrupt(cs, CPU_INTERRUPT_SMP); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_SMP); + } + + /* ITI can only be cleared by the write. */ + if ((newval & (1 << (i + 4))) == 0) { + cpu_reset_interrupt(cs, CPU_INTERRUPT_TIMER); + } + } + } + } + break; + + case 0x00c0: + /* MPD: Memory Presence Detect Register. */ + break; + + case 0x0100: /* AAR0 */ + case 0x0140: /* AAR1 */ + case 0x0180: /* AAR2 */ + case 0x01c0: /* AAR3 */ + /* AAR: Array Address Register. */ + /* All sorts of information about DRAM. */ + break; + + case 0x0200: /* DIM0 */ + /* DIM: Device Interrupt Mask Register, CPU0. */ + s->cchip.dim[0] = val; + cpu_irq_change(s->cchip.cpu[0], val & s->cchip.drir); + break; + case 0x0240: /* DIM1 */ + /* DIM: Device Interrupt Mask Register, CPU1. */ + s->cchip.dim[1] = val; + cpu_irq_change(s->cchip.cpu[1], val & s->cchip.drir); + break; + + case 0x0280: /* DIR0 (RO) */ + case 0x02c0: /* DIR1 (RO) */ + case 0x0300: /* DRIR (RO) */ + break; + + case 0x0340: + /* PRBEN: Probe Enable Register. */ + break; + + case 0x0380: /* IIC0 */ + s->cchip.iic[0] = val & 0xffffff; + break; + case 0x03c0: /* IIC1 */ + s->cchip.iic[1] = val & 0xffffff; + break; + + case 0x0400: /* MPR0 */ + case 0x0440: /* MPR1 */ + case 0x0480: /* MPR2 */ + case 0x04c0: /* MPR3 */ + /* MPR: Memory Programming Register. */ + break; + + case 0x0580: + /* TTR: TIGbus Timing Register. */ + /* All sorts of stuff related to interrupt delivery timings. */ + break; + case 0x05c0: + /* TDR: TIGbug Device Timing Register. */ + break; + + case 0x0600: + /* DIM2: Device Interrupt Mask Register, CPU2. */ + s->cchip.dim[2] = val; + cpu_irq_change(s->cchip.cpu[2], val & s->cchip.drir); + break; + case 0x0640: + /* DIM3: Device Interrupt Mask Register, CPU3. */ + s->cchip.dim[3] = val; + cpu_irq_change(s->cchip.cpu[3], val & s->cchip.drir); + break; + + case 0x0680: /* DIR2 (RO) */ + case 0x06c0: /* DIR3 (RO) */ + break; + + case 0x0700: /* IIC2 */ + s->cchip.iic[2] = val & 0xffffff; + break; + case 0x0740: /* IIC3 */ + s->cchip.iic[3] = val & 0xffffff; + break; + + case 0x0780: + /* PWR: Power Management Control. */ + break; + + case 0x0c00: /* CMONCTLA */ + case 0x0c40: /* CMONCTLB */ + case 0x0c80: /* CMONCNT01 */ + case 0x0cc0: /* CMONCNT23 */ + break; + + default: + return MEMTX_ERROR; + } + + return MEMTX_OK; +} + +static void dchip_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + /* Skip this. It's all related to DRAM timing and setup. */ +} + +static MemTxResult pchip_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size, + MemTxAttrs attrs) +{ + TyphoonState *s = opaque; + uint64_t oldval; + + switch (addr) { + case 0x0000: + /* WSBA0: Window Space Base Address Register. */ + s->pchip.win[0].wba = val & 0xfff00003u; + break; + case 0x0040: + /* WSBA1 */ + s->pchip.win[1].wba = val & 0xfff00003u; + break; + case 0x0080: + /* WSBA2 */ + s->pchip.win[2].wba = val & 0xfff00003u; + break; + case 0x00c0: + /* WSBA3 */ + s->pchip.win[3].wba = (val & 0x80fff00001ull) | 2; + break; + + case 0x0100: + /* WSM0: Window Space Mask Register. */ + s->pchip.win[0].wsm = val & 0xfff00000u; + break; + case 0x0140: + /* WSM1 */ + s->pchip.win[1].wsm = val & 0xfff00000u; + break; + case 0x0180: + /* WSM2 */ + s->pchip.win[2].wsm = val & 0xfff00000u; + break; + case 0x01c0: + /* WSM3 */ + s->pchip.win[3].wsm = val & 0xfff00000u; + break; + + case 0x0200: + /* TBA0: Translated Base Address Register. */ + s->pchip.win[0].tba = val & 0x7fffffc00ull; + break; + case 0x0240: + /* TBA1 */ + s->pchip.win[1].tba = val & 0x7fffffc00ull; + break; + case 0x0280: + /* TBA2 */ + s->pchip.win[2].tba = val & 0x7fffffc00ull; + break; + case 0x02c0: + /* TBA3 */ + s->pchip.win[3].tba = val & 0x7fffffc00ull; + break; + + case 0x0300: + /* PCTL: Pchip Control Register. */ + oldval = s->pchip.ctl; + oldval &= ~0x00001cff0fc7ffull; /* RW fields */ + oldval |= val & 0x00001cff0fc7ffull; + s->pchip.ctl = oldval; + break; + + case 0x0340: + /* PLAT: Pchip Master Latency Register. */ + break; + case 0x03c0: + /* PERROR: Pchip Error Register. */ + break; + case 0x0400: + /* PERRMASK: Pchip Error Mask Register. */ + break; + case 0x0440: + /* PERRSET: Pchip Error Set Register. */ + break; + + case 0x0480: + /* TLBIV: Translation Buffer Invalidate Virtual Register. */ + break; + + case 0x04c0: + /* TLBIA: Translation Buffer Invalidate All Register (WO). */ + break; + + case 0x0500: + /* PMONCTL */ + case 0x0540: + /* PMONCNT */ + case 0x0800: + /* SPRST */ + break; + + default: + return MEMTX_ERROR; + } + + return MEMTX_OK; +} + +static const MemoryRegionOps cchip_ops = { + .read_with_attrs = cchip_read, + .write_with_attrs = cchip_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static const MemoryRegionOps dchip_ops = { + .read = dchip_read, + .write = dchip_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static const MemoryRegionOps pchip_ops = { + .read_with_attrs = pchip_read, + .write_with_attrs = pchip_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* A subroutine of typhoon_translate_iommu that builds an IOMMUTLBEntry + using the given translated address and mask. */ +static bool make_iommu_tlbe(hwaddr taddr, hwaddr mask, IOMMUTLBEntry *ret) +{ + *ret = (IOMMUTLBEntry) { + .target_as = &address_space_memory, + .translated_addr = taddr, + .addr_mask = mask, + .perm = IOMMU_RW, + }; + return true; +} + +/* A subroutine of typhoon_translate_iommu that handles scatter-gather + translation, given the address of the PTE. */ +static bool pte_translate(hwaddr pte_addr, IOMMUTLBEntry *ret) +{ + uint64_t pte = address_space_ldq(&address_space_memory, pte_addr, + MEMTXATTRS_UNSPECIFIED, NULL); + + /* Check valid bit. */ + if ((pte & 1) == 0) { + return false; + } + + return make_iommu_tlbe((pte & 0x3ffffe) << 12, 0x1fff, ret); +} + +/* A subroutine of typhoon_translate_iommu that handles one of the + four single-address-cycle translation windows. */ +static bool window_translate(TyphoonWindow *win, hwaddr addr, + IOMMUTLBEntry *ret) +{ + uint32_t wba = win->wba; + uint64_t wsm = win->wsm; + uint64_t tba = win->tba; + uint64_t wsm_ext = wsm | 0xfffff; + + /* Check for window disabled. */ + if ((wba & 1) == 0) { + return false; + } + + /* Check for window hit. */ + if ((addr & ~wsm_ext) != (wba & 0xfff00000u)) { + return false; + } + + if (wba & 2) { + /* Scatter-gather translation. */ + hwaddr pte_addr; + + /* See table 10-6, Generating PTE address for PCI DMA Address. */ + pte_addr = tba & ~(wsm >> 10); + pte_addr |= (addr & (wsm | 0xfe000)) >> 10; + return pte_translate(pte_addr, ret); + } else { + /* Direct-mapped translation. */ + return make_iommu_tlbe(tba & ~wsm_ext, wsm_ext, ret); + } +} + +/* Handle PCI-to-system address translation. */ +/* TODO: A translation failure here ought to set PCI error codes on the + Pchip and generate a machine check interrupt. */ +static IOMMUTLBEntry typhoon_translate_iommu(IOMMUMemoryRegion *iommu, + hwaddr addr, + IOMMUAccessFlags flag, + int iommu_idx) +{ + TyphoonPchip *pchip = container_of(iommu, TyphoonPchip, iommu); + IOMMUTLBEntry ret; + int i; + + if (addr <= 0xffffffffu) { + /* Single-address cycle. */ + + /* Check for the Window Hole, inhibiting matching. */ + if ((pchip->ctl & 0x20) + && addr >= 0x80000 + && addr <= 0xfffff) { + goto failure; + } + + /* Check the first three windows. */ + for (i = 0; i < 3; ++i) { + if (window_translate(&pchip->win[i], addr, &ret)) { + goto success; + } + } + + /* Check the fourth window for DAC disable. */ + if ((pchip->win[3].wba & 0x80000000000ull) == 0 + && window_translate(&pchip->win[3], addr, &ret)) { + goto success; + } + } else { + /* Double-address cycle. */ + + if (addr >= 0x10000000000ull && addr < 0x20000000000ull) { + /* Check for the DMA monster window. */ + if (pchip->ctl & 0x40) { + /* See 10.1.4.4; in particular <39:35> is ignored. */ + make_iommu_tlbe(0, 0x007ffffffffull, &ret); + goto success; + } + } + + if (addr >= 0x80000000000ull && addr <= 0xfffffffffffull) { + /* Check the fourth window for DAC enable and window enable. */ + if ((pchip->win[3].wba & 0x80000000001ull) == 0x80000000001ull) { + uint64_t pte_addr; + + pte_addr = pchip->win[3].tba & 0x7ffc00000ull; + pte_addr |= (addr & 0xffffe000u) >> 10; + if (pte_translate(pte_addr, &ret)) { + goto success; + } + } + } + } + + failure: + ret = (IOMMUTLBEntry) { .perm = IOMMU_NONE }; + success: + return ret; +} + +static AddressSpace *typhoon_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + TyphoonState *s = opaque; + return &s->pchip.iommu_as; +} + +static void typhoon_set_irq(void *opaque, int irq, int level) +{ + TyphoonState *s = opaque; + uint64_t drir; + int i; + + /* Set/Reset the bit in CCHIP.DRIR based on IRQ+LEVEL. */ + drir = s->cchip.drir; + if (level) { + drir |= 1ull << irq; + } else { + drir &= ~(1ull << irq); + } + s->cchip.drir = drir; + + for (i = 0; i < 4; ++i) { + cpu_irq_change(s->cchip.cpu[i], s->cchip.dim[i] & drir); + } +} + +static void typhoon_set_isa_irq(void *opaque, int irq, int level) +{ + typhoon_set_irq(opaque, 55, level); +} + +static void typhoon_set_timer_irq(void *opaque, int irq, int level) +{ + TyphoonState *s = opaque; + int i; + + /* Thankfully, the mc146818rtc code doesn't track the IRQ state, + and so we don't have to worry about missing interrupts just + because we never actually ACK the interrupt. Just ignore any + case of the interrupt level going low. */ + if (level == 0) { + return; + } + + /* Deliver the interrupt to each CPU, considering each CPU's IIC. */ + for (i = 0; i < 4; ++i) { + AlphaCPU *cpu = s->cchip.cpu[i]; + if (cpu != NULL) { + uint32_t iic = s->cchip.iic[i]; + + /* ??? The verbage in Section 10.2.2.10 isn't 100% clear. + Bit 24 is the OverFlow bit, RO, and set when the count + decrements past 0. When is OF cleared? My guess is that + OF is actually cleared when the IIC is written, and that + the ICNT field always decrements. At least, that's an + interpretation that makes sense, and "allows the CPU to + determine exactly how mant interval timer ticks were + skipped". At least within the next 4M ticks... */ + + iic = ((iic - 1) & 0x1ffffff) | (iic & 0x1000000); + s->cchip.iic[i] = iic; + + if (iic & 0x1000000) { + /* Set the ITI bit for this cpu. */ + s->cchip.misc |= 1 << (i + 4); + /* And signal the interrupt. */ + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_TIMER); + } + } + } +} + +static void typhoon_alarm_timer(void *opaque) +{ + TyphoonState *s = (TyphoonState *)((uintptr_t)opaque & ~3); + int cpu = (uintptr_t)opaque & 3; + + /* Set the ITI bit for this cpu. */ + s->cchip.misc |= 1 << (cpu + 4); + cpu_interrupt(CPU(s->cchip.cpu[cpu]), CPU_INTERRUPT_TIMER); +} + +PCIBus *typhoon_init(MemoryRegion *ram, qemu_irq *p_isa_irq, + qemu_irq *p_rtc_irq, AlphaCPU *cpus[4], + pci_map_irq_fn sys_map_irq, uint8_t devfn_min) +{ + MemoryRegion *addr_space = get_system_memory(); + DeviceState *dev; + TyphoonState *s; + PCIHostState *phb; + PCIBus *b; + int i; + + dev = qdev_new(TYPE_TYPHOON_PCI_HOST_BRIDGE); + + s = TYPHOON_PCI_HOST_BRIDGE(dev); + phb = PCI_HOST_BRIDGE(dev); + + s->cchip.misc = 0x800000000ull; /* Revision: Typhoon. */ + s->pchip.win[3].wba = 2; /* Window 3 SG always enabled. */ + + /* Remember the CPUs so that we can deliver interrupts to them. */ + for (i = 0; i < 4; i++) { + AlphaCPU *cpu = cpus[i]; + s->cchip.cpu[i] = cpu; + if (cpu != NULL) { + cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + typhoon_alarm_timer, + (void *)((uintptr_t)s + i)); + } + } + + *p_isa_irq = qemu_allocate_irq(typhoon_set_isa_irq, s, 0); + *p_rtc_irq = qemu_allocate_irq(typhoon_set_timer_irq, s, 0); + + /* Main memory region, 0x00.0000.0000. Real hardware supports 32GB, + but the address space hole reserved at this point is 8TB. */ + memory_region_add_subregion(addr_space, 0, ram); + + /* TIGbus, 0x801.0000.0000, 1GB. */ + /* ??? The TIGbus is used for delivering interrupts, and access to + the flash ROM. I'm not sure that we need to implement it at all. */ + + /* Pchip0 CSRs, 0x801.8000.0000, 256MB. */ + memory_region_init_io(&s->pchip.region, OBJECT(s), &pchip_ops, s, "pchip0", + 256 * MiB); + memory_region_add_subregion(addr_space, 0x80180000000ULL, + &s->pchip.region); + + /* Cchip CSRs, 0x801.A000.0000, 256MB. */ + memory_region_init_io(&s->cchip.region, OBJECT(s), &cchip_ops, s, "cchip0", + 256 * MiB); + memory_region_add_subregion(addr_space, 0x801a0000000ULL, + &s->cchip.region); + + /* Dchip CSRs, 0x801.B000.0000, 256MB. */ + memory_region_init_io(&s->dchip_region, OBJECT(s), &dchip_ops, s, "dchip0", + 256 * MiB); + memory_region_add_subregion(addr_space, 0x801b0000000ULL, + &s->dchip_region); + + /* Pchip0 PCI memory, 0x800.0000.0000, 4GB. */ + memory_region_init(&s->pchip.reg_mem, OBJECT(s), "pci0-mem", 4 * GiB); + memory_region_add_subregion(addr_space, 0x80000000000ULL, + &s->pchip.reg_mem); + + /* Pchip0 PCI I/O, 0x801.FC00.0000, 32MB. */ + memory_region_init_io(&s->pchip.reg_io, OBJECT(s), &alpha_pci_ignore_ops, + NULL, "pci0-io", 32 * MiB); + memory_region_add_subregion(addr_space, 0x801fc000000ULL, + &s->pchip.reg_io); + + b = pci_register_root_bus(dev, "pci", + typhoon_set_irq, sys_map_irq, s, + &s->pchip.reg_mem, &s->pchip.reg_io, + devfn_min, 64, TYPE_PCI_BUS); + phb->bus = b; + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* Host memory as seen from the PCI side, via the IOMMU. */ + memory_region_init_iommu(&s->pchip.iommu, sizeof(s->pchip.iommu), + TYPE_TYPHOON_IOMMU_MEMORY_REGION, OBJECT(s), + "iommu-typhoon", UINT64_MAX); + address_space_init(&s->pchip.iommu_as, MEMORY_REGION(&s->pchip.iommu), + "pchip0-pci"); + pci_setup_iommu(b, typhoon_pci_dma_iommu, s); + + /* Pchip0 PCI special/interrupt acknowledge, 0x801.F800.0000, 64MB. */ + memory_region_init_io(&s->pchip.reg_iack, OBJECT(s), &alpha_pci_iack_ops, + b, "pci0-iack", 64 * MiB); + memory_region_add_subregion(addr_space, 0x801f8000000ULL, + &s->pchip.reg_iack); + + /* Pchip0 PCI configuration, 0x801.FE00.0000, 16MB. */ + memory_region_init_io(&s->pchip.reg_conf, OBJECT(s), &alpha_pci_conf1_ops, + b, "pci0-conf", 16 * MiB); + memory_region_add_subregion(addr_space, 0x801fe000000ULL, + &s->pchip.reg_conf); + + /* For the record, these are the mappings for the second PCI bus. + We can get away with not implementing them because we indicate + via the Cchip.CSC bit that Pchip1 is not present. */ + /* Pchip1 PCI memory, 0x802.0000.0000, 4GB. */ + /* Pchip1 CSRs, 0x802.8000.0000, 256MB. */ + /* Pchip1 PCI special/interrupt acknowledge, 0x802.F800.0000, 64MB. */ + /* Pchip1 PCI I/O, 0x802.FC00.0000, 32MB. */ + /* Pchip1 PCI configuration, 0x802.FE00.0000, 16MB. */ + + return b; +} + +static const TypeInfo typhoon_pcihost_info = { + .name = TYPE_TYPHOON_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(TyphoonState), +}; + +static void typhoon_iommu_memory_region_class_init(ObjectClass *klass, + void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = typhoon_translate_iommu; +} + +static const TypeInfo typhoon_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_TYPHOON_IOMMU_MEMORY_REGION, + .class_init = typhoon_iommu_memory_region_class_init, +}; + +static void typhoon_register_types(void) +{ + type_register_static(&typhoon_pcihost_info); + type_register_static(&typhoon_iommu_memory_region_info); +} + +type_init(typhoon_register_types) diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig new file mode 100644 index 000000000..2d37d29f0 --- /dev/null +++ b/hw/arm/Kconfig @@ -0,0 +1,550 @@ +config ARM_VIRT + bool + imply PCI_DEVICES + imply TEST_DEVICES + imply VFIO_AMD_XGBE + imply VFIO_PLATFORM + imply VFIO_XGMAC + imply TPM_TIS_SYSBUS + imply NVDIMM + select ARM_GIC + select ACPI + select ARM_SMMUV3 + select GPIO_KEY + select FW_CFG_DMA + select PCI_EXPRESS + select PCI_EXPRESS_GENERIC_BRIDGE + select PFLASH_CFI01 + select PL011 # UART + select PL031 # RTC + select PL061 # GPIO + select GPIO_PWR + select PLATFORM_BUS + select SMBIOS + select VIRTIO_MMIO + select ACPI_PCI + select MEM_DEVICE + select DIMM + select ACPI_HW_REDUCED + select ACPI_APEI + +config CHEETAH + bool + select OMAP + select TSC210X + +config CUBIEBOARD + bool + select ALLWINNER_A10 + +config DIGIC + bool + select PTIMER + select PFLASH_CFI02 + +config EXYNOS4 + bool + select A9MPCORE + select I2C + select LAN9118 + select PL310 # cache controller + select PTIMER + select SDHCI + select USB_EHCI_SYSBUS + select OR_IRQ + +config HIGHBANK + bool + select A9MPCORE + select A15MPCORE + select AHCI + select ARM_TIMER # sp804 + select ARM_V7M + select PL011 # UART + select PL022 # SPI + select PL031 # RTC + select PL061 # GPIO + select PL310 # cache controller + select XGMAC # ethernet + +config INTEGRATOR + bool + select ARM_TIMER + select INTEGRATOR_DEBUG + select PL011 # UART + select PL031 # RTC + select PL041 # audio + select PL050 # keyboard/mouse + select PL110 # pl111 LCD controller + select PL181 # display + select SMC91C111 + +config MAINSTONE + bool + select PXA2XX + select PFLASH_CFI01 + select SMC91C111 + +config MUSCA + bool + select ARMSSE + select PL011 + select PL031 + select SPLIT_IRQ + select UNIMP + +config MUSICPAL + bool + select OR_IRQ + select BITBANG_I2C + select MARVELL_88W8618 + select PTIMER + select PFLASH_CFI02 + select SERIAL + select WM8750 + +config NETDUINO2 + bool + select STM32F205_SOC + +config NETDUINOPLUS2 + bool + select STM32F405_SOC + +config NSERIES + bool + select OMAP + select TMP105 # tempature sensor + select BLIZZARD # LCD/TV controller + select ONENAND + select TSC210X # touchscreen/sensors/audio + select TSC2005 # touchscreen/sensors/keypad + select LM832X # GPIO keyboard chip + select TWL92230 # energy-management + select TUSB6010 + +config OMAP + bool + select FRAMEBUFFER + select I2C + select ECC + select NAND + select PFLASH_CFI01 + select SD + select SERIAL + +config PXA2XX + bool + select FRAMEBUFFER + select I2C + select SERIAL + select SD + select SSI + select USB_OHCI + select PCMCIA + +config GUMSTIX + bool + select PFLASH_CFI01 + select SMC91C111 + select PXA2XX + +config TOSA + bool + select ZAURUS # scoop + select MICRODRIVE + select PXA2XX + select LED + +config SPITZ + bool + select ADS7846 # touch-screen controller + select MAX111X # A/D converter + select WM8750 # audio codec + select MAX7310 # GPIO expander + select ZAURUS # scoop + select NAND # memory + select ECC # Error-correcting for NAND + select MICRODRIVE + select PXA2XX + +config Z2 + bool + select PFLASH_CFI01 + select WM8750 + select PL011 # UART + select PXA2XX + +config REALVIEW + bool + imply PCI_DEVICES + imply PCI_TESTDEV + select SMC91C111 + select LAN9118 + select A9MPCORE + select A15MPCORE + select ARM11MPCORE + select ARM_TIMER + select VERSATILE_PCI + select WM8750 # audio codec + select LSI_SCSI_PCI + select PCI + select PL011 # UART + select PL031 # RTC + select PL041 # audio codec + select PL050 # keyboard/mouse + select PL061 # GPIO + select PL080 # DMA controller + select PL110 + select PL181 # display + select PL310 # cache controller + select VERSATILE_I2C + select DS1338 # I2C RTC+NVRAM + select USB_OHCI + +config SBSA_REF + bool + imply PCI_DEVICES + select AHCI + select ARM_SMMUV3 + select GPIO_KEY + select PCI_EXPRESS + select PCI_EXPRESS_GENERIC_BRIDGE + select PFLASH_CFI01 + select PL011 # UART + select PL031 # RTC + select PL061 # GPIO + select USB_EHCI_SYSBUS + select WDT_SBSA + +config SABRELITE + bool + select FSL_IMX6 + select SSI_M25P80 + +config STELLARIS + bool + select ARM_V7M + select CMSDK_APB_WATCHDOG + select I2C + select PL011 # UART + select PL022 # SPI + select PL061 # GPIO + select SSD0303 # OLED display + select SSD0323 # OLED display + select SSI_SD + select STELLARIS_INPUT + select STELLARIS_ENET # ethernet + select STELLARIS_GPTM # general purpose timer module + select UNIMP + +config STM32VLDISCOVERY + bool + select STM32F100_SOC + +config STRONGARM + bool + select PXA2XX + +config COLLIE + bool + select PFLASH_CFI01 + select ZAURUS # scoop + select STRONGARM + +config SX1 + bool + select OMAP + +config VERSATILE + bool + select ARM_TIMER # sp804 + select PFLASH_CFI01 + select LSI_SCSI_PCI + select PL050 # keyboard/mouse + select PL080 # DMA controller + select PL190 # Vector PIC + select REALVIEW + select USB_OHCI + +config VEXPRESS + bool + select A9MPCORE + select A15MPCORE + select ARM_MPTIMER + select ARM_TIMER # sp804 + select LAN9118 + select PFLASH_CFI01 + select PL011 # UART + select PL041 # audio codec + select PL181 # display + select REALVIEW + select SII9022 + select VIRTIO_MMIO + +config ZYNQ + bool + select A9MPCORE + select CADENCE # UART + select PFLASH_CFI02 + select PL330 + select SDHCI + select SSI_M25P80 + select USB_EHCI_SYSBUS + select XILINX # UART + select XILINX_AXI + select XILINX_SPI + select XILINX_SPIPS + select ZYNQ_DEVCFG + +config ARM_V7M + bool + # currently v7M must be included in a TCG build due to translate.c + default y if TCG && (ARM || AARCH64) + select PTIMER + select ARM_COMPATIBLE_SEMIHOSTING + +config ALLWINNER_A10 + bool + select AHCI + select ALLWINNER_A10_PIT + select ALLWINNER_A10_PIC + select ALLWINNER_EMAC + select SERIAL + select UNIMP + +config ALLWINNER_H3 + bool + select ALLWINNER_A10_PIT + select ALLWINNER_SUN8I_EMAC + select SERIAL + select ARM_TIMER + select ARM_GIC + select UNIMP + select USB_OHCI + select USB_EHCI_SYSBUS + select SD + +config RASPI + bool + select FRAMEBUFFER + select PL011 # UART + select SDHCI + select USB_DWC2 + +config STM32F100_SOC + bool + select ARM_V7M + select STM32F2XX_USART + select STM32F2XX_SPI + +config STM32F205_SOC + bool + select ARM_V7M + select OR_IRQ + select STM32F2XX_TIMER + select STM32F2XX_USART + select STM32F2XX_SYSCFG + select STM32F2XX_ADC + select STM32F2XX_SPI + +config STM32F405_SOC + bool + select ARM_V7M + select OR_IRQ + select STM32F4XX_SYSCFG + select STM32F4XX_EXTI + +config XLNX_ZYNQMP_ARM + bool + select AHCI + select ARM_GIC + select CADENCE + select DDC + select DPCD + select SDHCI + select SSI + select SSI_M25P80 + select XILINX_AXI + select XILINX_SPIPS + select XLNX_CSU_DMA + select XLNX_ZYNQMP + select XLNX_ZDMA + +config XLNX_VERSAL + bool + select ARM_GIC + select PL011 + select CADENCE + select VIRTIO_MMIO + select UNIMP + select XLNX_ZDMA + select XLNX_ZYNQMP + select OR_IRQ + select XLNX_BBRAM + select XLNX_EFUSE_VERSAL + +config NPCM7XX + bool + select A9MPCORE + select ADM1272 + select ARM_GIC + select SMBUS + select AT24C # EEPROM + select MAX34451 + select PL310 # cache controller + select PMBUS + select SERIAL + select SSI + select UNIMP + select PCA954X + +config FSL_IMX25 + bool + select IMX + select IMX_FEC + select IMX_I2C + select WDT_IMX2 + select SDHCI + +config FSL_IMX31 + bool + select SERIAL + select IMX + select IMX_I2C + select WDT_IMX2 + select LAN9118 + +config FSL_IMX6 + bool + select A9MPCORE + select IMX + select IMX_FEC + select IMX_I2C + select IMX_USBPHY + select WDT_IMX2 + select SDHCI + +config ASPEED_SOC + bool + select DS1338 + select FTGMAC100 + select I2C + select DPS310 + select PCA9552 + select SERIAL + select SMBUS_EEPROM + select PCA954X + select SSI + select SSI_M25P80 + select TMP105 + select TMP421 + select EMC141X + select UNIMP + select LED + +config MPS2 + bool + select ARMSSE + select LAN9118 + select MPS2_FPGAIO + select MPS2_SCC + select OR_IRQ + select PL022 # SPI + select PL080 # DMA controller + select SPLIT_IRQ + select UNIMP + select CMSDK_APB_WATCHDOG + select VERSATILE_I2C + +config FSL_IMX7 + bool + imply PCI_DEVICES + imply TEST_DEVICES + select A15MPCORE + select PCI + select IMX + select IMX_FEC + select IMX_I2C + select WDT_IMX2 + select PCI_EXPRESS_DESIGNWARE + select SDHCI + select UNIMP + +config ARM_SMMUV3 + bool + +config FSL_IMX6UL + bool + select A15MPCORE + select IMX + select IMX_FEC + select IMX_I2C + select WDT_IMX2 + select SDHCI + select UNIMP + +config MICROBIT + bool + select NRF51_SOC + +config NRF51_SOC + bool + select I2C + select ARM_V7M + select UNIMP + +config EMCRAFT_SF2 + bool + select MSF2 + select SSI_M25P80 + +config MSF2 + bool + select ARM_V7M + select PTIMER + select SERIAL + select SSI + select UNIMP + +config ZAURUS + bool + select NAND + select ECC + +config A9MPCORE + bool + select A9_GTIMER + select A9SCU # snoop control unit + select ARM_GIC + select ARM_MPTIMER + +config A15MPCORE + bool + select ARM_GIC + +config ARM11MPCORE + bool + select ARM11SCU + +config ARMSSE + bool + select ARM_V7M + select ARMSSE_CPU_PWRCTRL + select ARMSSE_CPUID + select ARMSSE_MHU + select CMSDK_APB_TIMER + select CMSDK_APB_DUALTIMER + select CMSDK_APB_UART + select CMSDK_APB_WATCHDOG + select IOTKIT_SECCTL + select IOTKIT_SYSCTL + select IOTKIT_SYSINFO + select OR_IRQ + select SPLIT_IRQ + select TZ_MPC + select TZ_MSC + select TZ_PPC + select UNIMP + select SSE_COUNTER + select SSE_TIMER diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c new file mode 100644 index 000000000..05e84728c --- /dev/null +++ b/hw/arm/allwinner-a10.c @@ -0,0 +1,191 @@ +/* + * Allwinner A10 SoC emulation + * + * Copyright (C) 2013 Li Guang + * Written by Li Guang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "hw/arm/allwinner-a10.h" +#include "hw/misc/unimp.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/usb/hcd-ohci.h" + +#define AW_A10_MMC0_BASE 0x01c0f000 +#define AW_A10_PIC_REG_BASE 0x01c20400 +#define AW_A10_PIT_REG_BASE 0x01c20c00 +#define AW_A10_UART0_REG_BASE 0x01c28000 +#define AW_A10_EMAC_BASE 0x01c0b000 +#define AW_A10_EHCI_BASE 0x01c14000 +#define AW_A10_OHCI_BASE 0x01c14400 +#define AW_A10_SATA_BASE 0x01c18000 +#define AW_A10_RTC_BASE 0x01c20d00 + +static void aw_a10_init(Object *obj) +{ + AwA10State *s = AW_A10(obj); + + object_initialize_child(obj, "cpu", &s->cpu, + ARM_CPU_TYPE_NAME("cortex-a8")); + + object_initialize_child(obj, "intc", &s->intc, TYPE_AW_A10_PIC); + + object_initialize_child(obj, "timer", &s->timer, TYPE_AW_A10_PIT); + + object_initialize_child(obj, "emac", &s->emac, TYPE_AW_EMAC); + + object_initialize_child(obj, "sata", &s->sata, TYPE_ALLWINNER_AHCI); + + if (machine_usb(current_machine)) { + int i; + + for (i = 0; i < AW_A10_NUM_USB; i++) { + object_initialize_child(obj, "ehci[*]", &s->ehci[i], + TYPE_PLATFORM_EHCI); + object_initialize_child(obj, "ohci[*]", &s->ohci[i], + TYPE_SYSBUS_OHCI); + } + } + + object_initialize_child(obj, "mmc0", &s->mmc0, TYPE_AW_SDHOST_SUN4I); + + object_initialize_child(obj, "rtc", &s->rtc, TYPE_AW_RTC_SUN4I); +} + +static void aw_a10_realize(DeviceState *dev, Error **errp) +{ + AwA10State *s = AW_A10(dev); + SysBusDevice *sysbusdev; + + if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->intc), errp)) { + return; + } + sysbusdev = SYS_BUS_DEVICE(&s->intc); + sysbus_mmio_map(sysbusdev, 0, AW_A10_PIC_REG_BASE); + sysbus_connect_irq(sysbusdev, 0, + qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ)); + sysbus_connect_irq(sysbusdev, 1, + qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ)); + qdev_pass_gpios(DEVICE(&s->intc), dev, NULL); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timer), errp)) { + return; + } + sysbusdev = SYS_BUS_DEVICE(&s->timer); + sysbus_mmio_map(sysbusdev, 0, AW_A10_PIT_REG_BASE); + sysbus_connect_irq(sysbusdev, 0, qdev_get_gpio_in(dev, 22)); + sysbus_connect_irq(sysbusdev, 1, qdev_get_gpio_in(dev, 23)); + sysbus_connect_irq(sysbusdev, 2, qdev_get_gpio_in(dev, 24)); + sysbus_connect_irq(sysbusdev, 3, qdev_get_gpio_in(dev, 25)); + sysbus_connect_irq(sysbusdev, 4, qdev_get_gpio_in(dev, 67)); + sysbus_connect_irq(sysbusdev, 5, qdev_get_gpio_in(dev, 68)); + + memory_region_init_ram(&s->sram_a, OBJECT(dev), "sram A", 48 * KiB, + &error_fatal); + memory_region_add_subregion(get_system_memory(), 0x00000000, &s->sram_a); + create_unimplemented_device("a10-sram-ctrl", 0x01c00000, 4 * KiB); + + /* FIXME use qdev NIC properties instead of nd_table[] */ + if (nd_table[0].used) { + qemu_check_nic_model(&nd_table[0], TYPE_AW_EMAC); + qdev_set_nic_properties(DEVICE(&s->emac), &nd_table[0]); + } + if (!sysbus_realize(SYS_BUS_DEVICE(&s->emac), errp)) { + return; + } + sysbusdev = SYS_BUS_DEVICE(&s->emac); + sysbus_mmio_map(sysbusdev, 0, AW_A10_EMAC_BASE); + sysbus_connect_irq(sysbusdev, 0, qdev_get_gpio_in(dev, 55)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sata), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sata), 0, AW_A10_SATA_BASE); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->sata), 0, qdev_get_gpio_in(dev, 56)); + + /* FIXME use a qdev chardev prop instead of serial_hd() */ + serial_mm_init(get_system_memory(), AW_A10_UART0_REG_BASE, 2, + qdev_get_gpio_in(dev, 1), + 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); + + if (machine_usb(current_machine)) { + int i; + + for (i = 0; i < AW_A10_NUM_USB; i++) { + char bus[16]; + + sprintf(bus, "usb-bus.%d", i); + + object_property_set_bool(OBJECT(&s->ehci[i]), "companion-enable", + true, &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, + AW_A10_EHCI_BASE + i * 0x8000); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, + qdev_get_gpio_in(dev, 39 + i)); + + object_property_set_str(OBJECT(&s->ohci[i]), "masterbus", bus, + &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->ohci[i]), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ohci[i]), 0, + AW_A10_OHCI_BASE + i * 0x8000); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ohci[i]), 0, + qdev_get_gpio_in(dev, 64 + i)); + } + } + + /* SD/MMC */ + object_property_set_link(OBJECT(&s->mmc0), "dma-memory", + OBJECT(get_system_memory()), &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->mmc0), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mmc0), 0, AW_A10_MMC0_BASE); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->mmc0), 0, qdev_get_gpio_in(dev, 32)); + object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->mmc0), + "sd-bus"); + + /* RTC */ + sysbus_realize(SYS_BUS_DEVICE(&s->rtc), &error_fatal); + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->rtc), 0, AW_A10_RTC_BASE, 10); +} + +static void aw_a10_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = aw_a10_realize; + /* Reason: Uses serial_hds and nd_table in realize function */ + dc->user_creatable = false; +} + +static const TypeInfo aw_a10_type_info = { + .name = TYPE_AW_A10, + .parent = TYPE_DEVICE, + .instance_size = sizeof(AwA10State), + .instance_init = aw_a10_init, + .class_init = aw_a10_class_init, +}; + +static void aw_a10_register_types(void) +{ + type_register_static(&aw_a10_type_info); +} + +type_init(aw_a10_register_types) diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c new file mode 100644 index 000000000..f9b7ed187 --- /dev/null +++ b/hw/arm/allwinner-h3.c @@ -0,0 +1,457 @@ +/* + * Allwinner H3 System on Chip emulation + * + * Copyright (C) 2019 Niek Linnenbank + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "hw/qdev-core.h" +#include "hw/sysbus.h" +#include "hw/char/serial.h" +#include "hw/misc/unimp.h" +#include "hw/usb/hcd-ehci.h" +#include "hw/loader.h" +#include "sysemu/sysemu.h" +#include "hw/arm/allwinner-h3.h" + +/* Memory map */ +const hwaddr allwinner_h3_memmap[] = { + [AW_H3_DEV_SRAM_A1] = 0x00000000, + [AW_H3_DEV_SRAM_A2] = 0x00044000, + [AW_H3_DEV_SRAM_C] = 0x00010000, + [AW_H3_DEV_SYSCTRL] = 0x01c00000, + [AW_H3_DEV_MMC0] = 0x01c0f000, + [AW_H3_DEV_SID] = 0x01c14000, + [AW_H3_DEV_EHCI0] = 0x01c1a000, + [AW_H3_DEV_OHCI0] = 0x01c1a400, + [AW_H3_DEV_EHCI1] = 0x01c1b000, + [AW_H3_DEV_OHCI1] = 0x01c1b400, + [AW_H3_DEV_EHCI2] = 0x01c1c000, + [AW_H3_DEV_OHCI2] = 0x01c1c400, + [AW_H3_DEV_EHCI3] = 0x01c1d000, + [AW_H3_DEV_OHCI3] = 0x01c1d400, + [AW_H3_DEV_CCU] = 0x01c20000, + [AW_H3_DEV_PIT] = 0x01c20c00, + [AW_H3_DEV_UART0] = 0x01c28000, + [AW_H3_DEV_UART1] = 0x01c28400, + [AW_H3_DEV_UART2] = 0x01c28800, + [AW_H3_DEV_UART3] = 0x01c28c00, + [AW_H3_DEV_EMAC] = 0x01c30000, + [AW_H3_DEV_DRAMCOM] = 0x01c62000, + [AW_H3_DEV_DRAMCTL] = 0x01c63000, + [AW_H3_DEV_DRAMPHY] = 0x01c65000, + [AW_H3_DEV_GIC_DIST] = 0x01c81000, + [AW_H3_DEV_GIC_CPU] = 0x01c82000, + [AW_H3_DEV_GIC_HYP] = 0x01c84000, + [AW_H3_DEV_GIC_VCPU] = 0x01c86000, + [AW_H3_DEV_RTC] = 0x01f00000, + [AW_H3_DEV_CPUCFG] = 0x01f01c00, + [AW_H3_DEV_SDRAM] = 0x40000000 +}; + +/* List of unimplemented devices */ +struct AwH3Unimplemented { + const char *device_name; + hwaddr base; + hwaddr size; +} unimplemented[] = { + { "d-engine", 0x01000000, 4 * MiB }, + { "d-inter", 0x01400000, 128 * KiB }, + { "dma", 0x01c02000, 4 * KiB }, + { "nfdc", 0x01c03000, 4 * KiB }, + { "ts", 0x01c06000, 4 * KiB }, + { "keymem", 0x01c0b000, 4 * KiB }, + { "lcd0", 0x01c0c000, 4 * KiB }, + { "lcd1", 0x01c0d000, 4 * KiB }, + { "ve", 0x01c0e000, 4 * KiB }, + { "mmc1", 0x01c10000, 4 * KiB }, + { "mmc2", 0x01c11000, 4 * KiB }, + { "crypto", 0x01c15000, 4 * KiB }, + { "msgbox", 0x01c17000, 4 * KiB }, + { "spinlock", 0x01c18000, 4 * KiB }, + { "usb0-otg", 0x01c19000, 4 * KiB }, + { "usb0-phy", 0x01c1a000, 4 * KiB }, + { "usb1-phy", 0x01c1b000, 4 * KiB }, + { "usb2-phy", 0x01c1c000, 4 * KiB }, + { "usb3-phy", 0x01c1d000, 4 * KiB }, + { "smc", 0x01c1e000, 4 * KiB }, + { "pio", 0x01c20800, 1 * KiB }, + { "owa", 0x01c21000, 1 * KiB }, + { "pwm", 0x01c21400, 1 * KiB }, + { "keyadc", 0x01c21800, 1 * KiB }, + { "pcm0", 0x01c22000, 1 * KiB }, + { "pcm1", 0x01c22400, 1 * KiB }, + { "pcm2", 0x01c22800, 1 * KiB }, + { "audio", 0x01c22c00, 2 * KiB }, + { "smta", 0x01c23400, 1 * KiB }, + { "ths", 0x01c25000, 1 * KiB }, + { "uart0", 0x01c28000, 1 * KiB }, + { "uart1", 0x01c28400, 1 * KiB }, + { "uart2", 0x01c28800, 1 * KiB }, + { "uart3", 0x01c28c00, 1 * KiB }, + { "twi0", 0x01c2ac00, 1 * KiB }, + { "twi1", 0x01c2b000, 1 * KiB }, + { "twi2", 0x01c2b400, 1 * KiB }, + { "scr", 0x01c2c400, 1 * KiB }, + { "gpu", 0x01c40000, 64 * KiB }, + { "hstmr", 0x01c60000, 4 * KiB }, + { "spi0", 0x01c68000, 4 * KiB }, + { "spi1", 0x01c69000, 4 * KiB }, + { "csi", 0x01cb0000, 320 * KiB }, + { "tve", 0x01e00000, 64 * KiB }, + { "hdmi", 0x01ee0000, 128 * KiB }, + { "r_timer", 0x01f00800, 1 * KiB }, + { "r_intc", 0x01f00c00, 1 * KiB }, + { "r_wdog", 0x01f01000, 1 * KiB }, + { "r_prcm", 0x01f01400, 1 * KiB }, + { "r_twd", 0x01f01800, 1 * KiB }, + { "r_cir-rx", 0x01f02000, 1 * KiB }, + { "r_twi", 0x01f02400, 1 * KiB }, + { "r_uart", 0x01f02800, 1 * KiB }, + { "r_pio", 0x01f02c00, 1 * KiB }, + { "r_pwm", 0x01f03800, 1 * KiB }, + { "core-dbg", 0x3f500000, 128 * KiB }, + { "tsgen-ro", 0x3f506000, 4 * KiB }, + { "tsgen-ctl", 0x3f507000, 4 * KiB }, + { "ddr-mem", 0x40000000, 2 * GiB }, + { "n-brom", 0xffff0000, 32 * KiB }, + { "s-brom", 0xffff0000, 64 * KiB } +}; + +/* Per Processor Interrupts */ +enum { + AW_H3_GIC_PPI_MAINT = 9, + AW_H3_GIC_PPI_HYPTIMER = 10, + AW_H3_GIC_PPI_VIRTTIMER = 11, + AW_H3_GIC_PPI_SECTIMER = 13, + AW_H3_GIC_PPI_PHYSTIMER = 14 +}; + +/* Shared Processor Interrupts */ +enum { + AW_H3_GIC_SPI_UART0 = 0, + AW_H3_GIC_SPI_UART1 = 1, + AW_H3_GIC_SPI_UART2 = 2, + AW_H3_GIC_SPI_UART3 = 3, + AW_H3_GIC_SPI_TIMER0 = 18, + AW_H3_GIC_SPI_TIMER1 = 19, + AW_H3_GIC_SPI_MMC0 = 60, + AW_H3_GIC_SPI_EHCI0 = 72, + AW_H3_GIC_SPI_OHCI0 = 73, + AW_H3_GIC_SPI_EHCI1 = 74, + AW_H3_GIC_SPI_OHCI1 = 75, + AW_H3_GIC_SPI_EHCI2 = 76, + AW_H3_GIC_SPI_OHCI2 = 77, + AW_H3_GIC_SPI_EHCI3 = 78, + AW_H3_GIC_SPI_OHCI3 = 79, + AW_H3_GIC_SPI_EMAC = 82 +}; + +/* Allwinner H3 general constants */ +enum { + AW_H3_GIC_NUM_SPI = 128 +}; + +void allwinner_h3_bootrom_setup(AwH3State *s, BlockBackend *blk) +{ + const int64_t rom_size = 32 * KiB; + g_autofree uint8_t *buffer = g_new0(uint8_t, rom_size); + + if (blk_pread(blk, 8 * KiB, buffer, rom_size) < 0) { + error_setg(&error_fatal, "%s: failed to read BlockBackend data", + __func__); + return; + } + + rom_add_blob("allwinner-h3.bootrom", buffer, rom_size, + rom_size, s->memmap[AW_H3_DEV_SRAM_A1], + NULL, NULL, NULL, NULL, false); +} + +static void allwinner_h3_init(Object *obj) +{ + AwH3State *s = AW_H3(obj); + + s->memmap = allwinner_h3_memmap; + + for (int i = 0; i < AW_H3_NUM_CPUS; i++) { + object_initialize_child(obj, "cpu[*]", &s->cpus[i], + ARM_CPU_TYPE_NAME("cortex-a7")); + } + + object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GIC); + + object_initialize_child(obj, "timer", &s->timer, TYPE_AW_A10_PIT); + object_property_add_alias(obj, "clk0-freq", OBJECT(&s->timer), + "clk0-freq"); + object_property_add_alias(obj, "clk1-freq", OBJECT(&s->timer), + "clk1-freq"); + + object_initialize_child(obj, "ccu", &s->ccu, TYPE_AW_H3_CCU); + + object_initialize_child(obj, "sysctrl", &s->sysctrl, TYPE_AW_H3_SYSCTRL); + + object_initialize_child(obj, "cpucfg", &s->cpucfg, TYPE_AW_CPUCFG); + + object_initialize_child(obj, "sid", &s->sid, TYPE_AW_SID); + object_property_add_alias(obj, "identifier", OBJECT(&s->sid), + "identifier"); + + object_initialize_child(obj, "mmc0", &s->mmc0, TYPE_AW_SDHOST_SUN5I); + + object_initialize_child(obj, "emac", &s->emac, TYPE_AW_SUN8I_EMAC); + + object_initialize_child(obj, "dramc", &s->dramc, TYPE_AW_H3_DRAMC); + object_property_add_alias(obj, "ram-addr", OBJECT(&s->dramc), + "ram-addr"); + object_property_add_alias(obj, "ram-size", OBJECT(&s->dramc), + "ram-size"); + + object_initialize_child(obj, "rtc", &s->rtc, TYPE_AW_RTC_SUN6I); +} + +static void allwinner_h3_realize(DeviceState *dev, Error **errp) +{ + AwH3State *s = AW_H3(dev); + unsigned i; + + /* CPUs */ + for (i = 0; i < AW_H3_NUM_CPUS; i++) { + + /* Provide Power State Coordination Interface */ + qdev_prop_set_int32(DEVICE(&s->cpus[i]), "psci-conduit", + QEMU_PSCI_CONDUIT_SMC); + + /* Disable secondary CPUs */ + qdev_prop_set_bit(DEVICE(&s->cpus[i]), "start-powered-off", + i > 0); + + /* All exception levels required */ + qdev_prop_set_bit(DEVICE(&s->cpus[i]), "has_el3", true); + qdev_prop_set_bit(DEVICE(&s->cpus[i]), "has_el2", true); + + /* Mark realized */ + qdev_realize(DEVICE(&s->cpus[i]), NULL, &error_fatal); + } + + /* Generic Interrupt Controller */ + qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", AW_H3_GIC_NUM_SPI + + GIC_INTERNAL); + qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2); + qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", AW_H3_NUM_CPUS); + qdev_prop_set_bit(DEVICE(&s->gic), "has-security-extensions", false); + qdev_prop_set_bit(DEVICE(&s->gic), "has-virtualization-extensions", true); + sysbus_realize(SYS_BUS_DEVICE(&s->gic), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 0, s->memmap[AW_H3_DEV_GIC_DIST]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 1, s->memmap[AW_H3_DEV_GIC_CPU]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 2, s->memmap[AW_H3_DEV_GIC_HYP]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gic), 3, s->memmap[AW_H3_DEV_GIC_VCPU]); + + /* + * Wire the outputs from each CPU's generic timer and the GICv3 + * maintenance interrupt signal to the appropriate GIC PPI inputs, + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. + */ + for (i = 0; i < AW_H3_NUM_CPUS; i++) { + DeviceState *cpudev = DEVICE(&s->cpus[i]); + int ppibase = AW_H3_GIC_NUM_SPI + i * GIC_INTERNAL + GIC_NR_SGIS; + int irq; + /* + * Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs used for this board. + */ + const int timer_irq[] = { + [GTIMER_PHYS] = AW_H3_GIC_PPI_PHYSTIMER, + [GTIMER_VIRT] = AW_H3_GIC_PPI_VIRTTIMER, + [GTIMER_HYP] = AW_H3_GIC_PPI_HYPTIMER, + [GTIMER_SEC] = AW_H3_GIC_PPI_SECTIMER, + }; + + /* Connect CPU timer outputs to GIC PPI inputs */ + for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { + qdev_connect_gpio_out(cpudev, irq, + qdev_get_gpio_in(DEVICE(&s->gic), + ppibase + timer_irq[irq])); + } + + /* Connect GIC outputs to CPU interrupt inputs */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i, + qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + AW_H3_NUM_CPUS, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + (2 * AW_H3_NUM_CPUS), + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + (3 * AW_H3_NUM_CPUS), + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + + /* GIC maintenance signal */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + (4 * AW_H3_NUM_CPUS), + qdev_get_gpio_in(DEVICE(&s->gic), + ppibase + AW_H3_GIC_PPI_MAINT)); + } + + /* Timer */ + sysbus_realize(SYS_BUS_DEVICE(&s->timer), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->timer), 0, s->memmap[AW_H3_DEV_PIT]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TIMER0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer), 1, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TIMER1)); + + /* SRAM */ + memory_region_init_ram(&s->sram_a1, OBJECT(dev), "sram A1", + 64 * KiB, &error_abort); + memory_region_init_ram(&s->sram_a2, OBJECT(dev), "sram A2", + 32 * KiB, &error_abort); + memory_region_init_ram(&s->sram_c, OBJECT(dev), "sram C", + 44 * KiB, &error_abort); + memory_region_add_subregion(get_system_memory(), s->memmap[AW_H3_DEV_SRAM_A1], + &s->sram_a1); + memory_region_add_subregion(get_system_memory(), s->memmap[AW_H3_DEV_SRAM_A2], + &s->sram_a2); + memory_region_add_subregion(get_system_memory(), s->memmap[AW_H3_DEV_SRAM_C], + &s->sram_c); + + /* Clock Control Unit */ + sysbus_realize(SYS_BUS_DEVICE(&s->ccu), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccu), 0, s->memmap[AW_H3_DEV_CCU]); + + /* System Control */ + sysbus_realize(SYS_BUS_DEVICE(&s->sysctrl), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sysctrl), 0, s->memmap[AW_H3_DEV_SYSCTRL]); + + /* CPU Configuration */ + sysbus_realize(SYS_BUS_DEVICE(&s->cpucfg), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->cpucfg), 0, s->memmap[AW_H3_DEV_CPUCFG]); + + /* Security Identifier */ + sysbus_realize(SYS_BUS_DEVICE(&s->sid), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sid), 0, s->memmap[AW_H3_DEV_SID]); + + /* SD/MMC */ + object_property_set_link(OBJECT(&s->mmc0), "dma-memory", + OBJECT(get_system_memory()), &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->mmc0), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mmc0), 0, s->memmap[AW_H3_DEV_MMC0]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->mmc0), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_MMC0)); + + object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->mmc0), + "sd-bus"); + + /* EMAC */ + /* FIXME use qdev NIC properties instead of nd_table[] */ + if (nd_table[0].used) { + qemu_check_nic_model(&nd_table[0], TYPE_AW_SUN8I_EMAC); + qdev_set_nic_properties(DEVICE(&s->emac), &nd_table[0]); + } + object_property_set_link(OBJECT(&s->emac), "dma-memory", + OBJECT(get_system_memory()), &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->emac), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->emac), 0, s->memmap[AW_H3_DEV_EMAC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->emac), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_EMAC)); + + /* Universal Serial Bus */ + sysbus_create_simple(TYPE_AW_H3_EHCI, s->memmap[AW_H3_DEV_EHCI0], + qdev_get_gpio_in(DEVICE(&s->gic), + AW_H3_GIC_SPI_EHCI0)); + sysbus_create_simple(TYPE_AW_H3_EHCI, s->memmap[AW_H3_DEV_EHCI1], + qdev_get_gpio_in(DEVICE(&s->gic), + AW_H3_GIC_SPI_EHCI1)); + sysbus_create_simple(TYPE_AW_H3_EHCI, s->memmap[AW_H3_DEV_EHCI2], + qdev_get_gpio_in(DEVICE(&s->gic), + AW_H3_GIC_SPI_EHCI2)); + sysbus_create_simple(TYPE_AW_H3_EHCI, s->memmap[AW_H3_DEV_EHCI3], + qdev_get_gpio_in(DEVICE(&s->gic), + AW_H3_GIC_SPI_EHCI3)); + + sysbus_create_simple("sysbus-ohci", s->memmap[AW_H3_DEV_OHCI0], + qdev_get_gpio_in(DEVICE(&s->gic), + AW_H3_GIC_SPI_OHCI0)); + sysbus_create_simple("sysbus-ohci", s->memmap[AW_H3_DEV_OHCI1], + qdev_get_gpio_in(DEVICE(&s->gic), + AW_H3_GIC_SPI_OHCI1)); + sysbus_create_simple("sysbus-ohci", s->memmap[AW_H3_DEV_OHCI2], + qdev_get_gpio_in(DEVICE(&s->gic), + AW_H3_GIC_SPI_OHCI2)); + sysbus_create_simple("sysbus-ohci", s->memmap[AW_H3_DEV_OHCI3], + qdev_get_gpio_in(DEVICE(&s->gic), + AW_H3_GIC_SPI_OHCI3)); + + /* UART0. For future clocktree API: All UARTS are connected to APB2_CLK. */ + serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART0], 2, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART0), + 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); + /* UART1 */ + serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART1], 2, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART1), + 115200, serial_hd(1), DEVICE_NATIVE_ENDIAN); + /* UART2 */ + serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART2], 2, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART2), + 115200, serial_hd(2), DEVICE_NATIVE_ENDIAN); + /* UART3 */ + serial_mm_init(get_system_memory(), s->memmap[AW_H3_DEV_UART3], 2, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_UART3), + 115200, serial_hd(3), DEVICE_NATIVE_ENDIAN); + + /* DRAMC */ + sysbus_realize(SYS_BUS_DEVICE(&s->dramc), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 0, s->memmap[AW_H3_DEV_DRAMCOM]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 1, s->memmap[AW_H3_DEV_DRAMCTL]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dramc), 2, s->memmap[AW_H3_DEV_DRAMPHY]); + + /* RTC */ + sysbus_realize(SYS_BUS_DEVICE(&s->rtc), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, s->memmap[AW_H3_DEV_RTC]); + + /* Unimplemented devices */ + for (i = 0; i < ARRAY_SIZE(unimplemented); i++) { + create_unimplemented_device(unimplemented[i].device_name, + unimplemented[i].base, + unimplemented[i].size); + } +} + +static void allwinner_h3_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = allwinner_h3_realize; + /* Reason: uses serial_hd() in realize function */ + dc->user_creatable = false; +} + +static const TypeInfo allwinner_h3_type_info = { + .name = TYPE_AW_H3, + .parent = TYPE_DEVICE, + .instance_size = sizeof(AwH3State), + .instance_init = allwinner_h3_init, + .class_init = allwinner_h3_class_init, +}; + +static void allwinner_h3_register_types(void) +{ + type_register_static(&allwinner_h3_type_info); +} + +type_init(allwinner_h3_register_types) diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c new file mode 100644 index 000000000..aecdeb981 --- /dev/null +++ b/hw/arm/armsse.c @@ -0,0 +1,1729 @@ +/* + * Arm SSE (Subsystems for Embedded): IoTKit + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/bitops.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/registerfields.h" +#include "hw/arm/armsse.h" +#include "hw/arm/armsse-version.h" +#include "hw/arm/boot.h" +#include "hw/irq.h" +#include "hw/qdev-clock.h" + +/* + * The SSE-300 puts some devices in different places to the + * SSE-200 (and original IoTKit). We use an array of these structs + * to define how each variant lays out these devices. (Parts of the + * SoC that are the same for all variants aren't handled via these + * data structures.) + */ + +#define NO_IRQ -1 +#define NO_PPC -1 +/* + * Special values for ARMSSEDeviceInfo::irq to indicate that this + * device uses one of the inputs to the OR gate that feeds into the + * CPU NMI input. + */ +#define NMI_0 10000 +#define NMI_1 10001 + +typedef struct ARMSSEDeviceInfo { + const char *name; /* name to use for the QOM object; NULL terminates list */ + const char *type; /* QOM type name */ + unsigned int index; /* Which of the N devices of this type is this ? */ + hwaddr addr; + hwaddr size; /* only needed for TYPE_UNIMPLEMENTED_DEVICE */ + int ppc; /* Index of APB PPC this device is wired up to, or NO_PPC */ + int ppc_port; /* Port number of this device on the PPC */ + int irq; /* NO_IRQ, or 0..NUM_SSE_IRQS-1, or NMI_0 or NMI_1 */ + bool slowclk; /* true if device uses the slow 32KHz clock */ +} ARMSSEDeviceInfo; + +struct ARMSSEInfo { + const char *name; + const char *cpu_type; + uint32_t sse_version; + int sram_banks; + uint32_t sram_bank_base; + int num_cpus; + uint32_t sys_version; + uint32_t iidr; + uint32_t cpuwait_rst; + bool has_mhus; + bool has_cachectrl; + bool has_cpusecctrl; + bool has_cpuid; + bool has_cpu_pwrctrl; + bool has_sse_counter; + bool has_tcms; + Property *props; + const ARMSSEDeviceInfo *devinfo; + const bool *irq_is_common; +}; + +static Property iotkit_properties[] = { + DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64), + DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15), + DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000), + DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true), + DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true), + DEFINE_PROP_END_OF_LIST() +}; + +static Property sse200_properties[] = { + DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64), + DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 15), + DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000), + DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], false), + DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], false), + DEFINE_PROP_BOOL("CPU1_FPU", ARMSSE, cpu_fpu[1], true), + DEFINE_PROP_BOOL("CPU1_DSP", ARMSSE, cpu_dsp[1], true), + DEFINE_PROP_END_OF_LIST() +}; + +static Property sse300_properties[] = { + DEFINE_PROP_LINK("memory", ARMSSE, board_memory, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_UINT32("EXP_NUMIRQ", ARMSSE, exp_numirq, 64), + DEFINE_PROP_UINT32("SRAM_ADDR_WIDTH", ARMSSE, sram_addr_width, 18), + DEFINE_PROP_UINT32("init-svtor", ARMSSE, init_svtor, 0x10000000), + DEFINE_PROP_BOOL("CPU0_FPU", ARMSSE, cpu_fpu[0], true), + DEFINE_PROP_BOOL("CPU0_DSP", ARMSSE, cpu_dsp[0], true), + DEFINE_PROP_END_OF_LIST() +}; + +static const ARMSSEDeviceInfo iotkit_devices[] = { + { + .name = "timer0", + .type = TYPE_CMSDK_APB_TIMER, + .index = 0, + .addr = 0x40000000, + .ppc = 0, + .ppc_port = 0, + .irq = 3, + }, + { + .name = "timer1", + .type = TYPE_CMSDK_APB_TIMER, + .index = 1, + .addr = 0x40001000, + .ppc = 0, + .ppc_port = 1, + .irq = 4, + }, + { + .name = "s32ktimer", + .type = TYPE_CMSDK_APB_TIMER, + .index = 2, + .addr = 0x4002f000, + .ppc = 1, + .ppc_port = 0, + .irq = 2, + .slowclk = true, + }, + { + .name = "dualtimer", + .type = TYPE_CMSDK_APB_DUALTIMER, + .index = 0, + .addr = 0x40002000, + .ppc = 0, + .ppc_port = 2, + .irq = 5, + }, + { + .name = "s32kwatchdog", + .type = TYPE_CMSDK_APB_WATCHDOG, + .index = 0, + .addr = 0x5002e000, + .ppc = NO_PPC, + .irq = NMI_0, + .slowclk = true, + }, + { + .name = "nswatchdog", + .type = TYPE_CMSDK_APB_WATCHDOG, + .index = 1, + .addr = 0x40081000, + .ppc = NO_PPC, + .irq = 1, + }, + { + .name = "swatchdog", + .type = TYPE_CMSDK_APB_WATCHDOG, + .index = 2, + .addr = 0x50081000, + .ppc = NO_PPC, + .irq = NMI_1, + }, + { + .name = "armsse-sysinfo", + .type = TYPE_IOTKIT_SYSINFO, + .index = 0, + .addr = 0x40020000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "armsse-sysctl", + .type = TYPE_IOTKIT_SYSCTL, + .index = 0, + .addr = 0x50021000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = NULL, + } +}; + +static const ARMSSEDeviceInfo sse200_devices[] = { + { + .name = "timer0", + .type = TYPE_CMSDK_APB_TIMER, + .index = 0, + .addr = 0x40000000, + .ppc = 0, + .ppc_port = 0, + .irq = 3, + }, + { + .name = "timer1", + .type = TYPE_CMSDK_APB_TIMER, + .index = 1, + .addr = 0x40001000, + .ppc = 0, + .ppc_port = 1, + .irq = 4, + }, + { + .name = "s32ktimer", + .type = TYPE_CMSDK_APB_TIMER, + .index = 2, + .addr = 0x4002f000, + .ppc = 1, + .ppc_port = 0, + .irq = 2, + .slowclk = true, + }, + { + .name = "dualtimer", + .type = TYPE_CMSDK_APB_DUALTIMER, + .index = 0, + .addr = 0x40002000, + .ppc = 0, + .ppc_port = 2, + .irq = 5, + }, + { + .name = "s32kwatchdog", + .type = TYPE_CMSDK_APB_WATCHDOG, + .index = 0, + .addr = 0x5002e000, + .ppc = NO_PPC, + .irq = NMI_0, + .slowclk = true, + }, + { + .name = "nswatchdog", + .type = TYPE_CMSDK_APB_WATCHDOG, + .index = 1, + .addr = 0x40081000, + .ppc = NO_PPC, + .irq = 1, + }, + { + .name = "swatchdog", + .type = TYPE_CMSDK_APB_WATCHDOG, + .index = 2, + .addr = 0x50081000, + .ppc = NO_PPC, + .irq = NMI_1, + }, + { + .name = "armsse-sysinfo", + .type = TYPE_IOTKIT_SYSINFO, + .index = 0, + .addr = 0x40020000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "armsse-sysctl", + .type = TYPE_IOTKIT_SYSCTL, + .index = 0, + .addr = 0x50021000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "CPU0CORE_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 0, + .addr = 0x50023000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "CPU1CORE_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 1, + .addr = 0x50025000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "DBG_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 2, + .addr = 0x50029000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "RAM0_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 3, + .addr = 0x5002a000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "RAM1_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 4, + .addr = 0x5002b000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "RAM2_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 5, + .addr = 0x5002c000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "RAM3_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 6, + .addr = 0x5002d000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "SYS_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 7, + .addr = 0x50022000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = NULL, + } +}; + +static const ARMSSEDeviceInfo sse300_devices[] = { + { + .name = "timer0", + .type = TYPE_SSE_TIMER, + .index = 0, + .addr = 0x48000000, + .ppc = 0, + .ppc_port = 0, + .irq = 3, + }, + { + .name = "timer1", + .type = TYPE_SSE_TIMER, + .index = 1, + .addr = 0x48001000, + .ppc = 0, + .ppc_port = 1, + .irq = 4, + }, + { + .name = "timer2", + .type = TYPE_SSE_TIMER, + .index = 2, + .addr = 0x48002000, + .ppc = 0, + .ppc_port = 2, + .irq = 5, + }, + { + .name = "timer3", + .type = TYPE_SSE_TIMER, + .index = 3, + .addr = 0x48003000, + .ppc = 0, + .ppc_port = 5, + .irq = 27, + }, + { + .name = "s32ktimer", + .type = TYPE_CMSDK_APB_TIMER, + .index = 0, + .addr = 0x4802f000, + .ppc = 1, + .ppc_port = 0, + .irq = 2, + .slowclk = true, + }, + { + .name = "s32kwatchdog", + .type = TYPE_CMSDK_APB_WATCHDOG, + .index = 0, + .addr = 0x4802e000, + .ppc = NO_PPC, + .irq = NMI_0, + .slowclk = true, + }, + { + .name = "watchdog", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 0, + .addr = 0x48040000, + .size = 0x2000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "armsse-sysinfo", + .type = TYPE_IOTKIT_SYSINFO, + .index = 0, + .addr = 0x48020000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "armsse-sysctl", + .type = TYPE_IOTKIT_SYSCTL, + .index = 0, + .addr = 0x58021000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "SYS_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 1, + .addr = 0x58022000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "CPU0CORE_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 2, + .addr = 0x50023000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "MGMT_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 3, + .addr = 0x50028000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = "DEBUG_PPU", + .type = TYPE_UNIMPLEMENTED_DEVICE, + .index = 4, + .addr = 0x50029000, + .size = 0x1000, + .ppc = NO_PPC, + .irq = NO_IRQ, + }, + { + .name = NULL, + } +}; + +/* Is internal IRQ n shared between CPUs in a multi-core SSE ? */ +static const bool sse200_irq_is_common[32] = { + [0 ... 5] = true, + /* 6, 7: per-CPU MHU interrupts */ + [8 ... 12] = true, + /* 13: per-CPU icache interrupt */ + /* 14: reserved */ + [15 ... 20] = true, + /* 21: reserved */ + [22 ... 26] = true, + /* 27: reserved */ + /* 28, 29: per-CPU CTI interrupts */ + /* 30, 31: reserved */ +}; + +static const bool sse300_irq_is_common[32] = { + [0 ... 5] = true, + /* 6, 7: per-CPU MHU interrupts */ + [8 ... 12] = true, + /* 13: reserved */ + [14 ... 16] = true, + /* 17-25: reserved */ + [26 ... 27] = true, + /* 28, 29: per-CPU CTI interrupts */ + /* 30, 31: reserved */ +}; + +static const ARMSSEInfo armsse_variants[] = { + { + .name = TYPE_IOTKIT, + .sse_version = ARMSSE_IOTKIT, + .cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"), + .sram_banks = 1, + .sram_bank_base = 0x20000000, + .num_cpus = 1, + .sys_version = 0x41743, + .iidr = 0, + .cpuwait_rst = 0, + .has_mhus = false, + .has_cachectrl = false, + .has_cpusecctrl = false, + .has_cpuid = false, + .has_cpu_pwrctrl = false, + .has_sse_counter = false, + .has_tcms = false, + .props = iotkit_properties, + .devinfo = iotkit_devices, + .irq_is_common = sse200_irq_is_common, + }, + { + .name = TYPE_SSE200, + .sse_version = ARMSSE_SSE200, + .cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"), + .sram_banks = 4, + .sram_bank_base = 0x20000000, + .num_cpus = 2, + .sys_version = 0x22041743, + .iidr = 0, + .cpuwait_rst = 2, + .has_mhus = true, + .has_cachectrl = true, + .has_cpusecctrl = true, + .has_cpuid = true, + .has_cpu_pwrctrl = false, + .has_sse_counter = false, + .has_tcms = false, + .props = sse200_properties, + .devinfo = sse200_devices, + .irq_is_common = sse200_irq_is_common, + }, + { + .name = TYPE_SSE300, + .sse_version = ARMSSE_SSE300, + .cpu_type = ARM_CPU_TYPE_NAME("cortex-m55"), + .sram_banks = 2, + .sram_bank_base = 0x21000000, + .num_cpus = 1, + .sys_version = 0x7e00043b, + .iidr = 0x74a0043b, + .cpuwait_rst = 0, + .has_mhus = false, + .has_cachectrl = false, + .has_cpusecctrl = true, + .has_cpuid = true, + .has_cpu_pwrctrl = true, + .has_sse_counter = true, + .has_tcms = true, + .props = sse300_properties, + .devinfo = sse300_devices, + .irq_is_common = sse300_irq_is_common, + }, +}; + +static uint32_t armsse_sys_config_value(ARMSSE *s, const ARMSSEInfo *info) +{ + /* Return the SYS_CONFIG value for this SSE */ + uint32_t sys_config; + + switch (info->sse_version) { + case ARMSSE_IOTKIT: + sys_config = 0; + sys_config = deposit32(sys_config, 0, 4, info->sram_banks); + sys_config = deposit32(sys_config, 4, 4, s->sram_addr_width - 12); + break; + case ARMSSE_SSE200: + sys_config = 0; + sys_config = deposit32(sys_config, 0, 4, info->sram_banks); + sys_config = deposit32(sys_config, 4, 5, s->sram_addr_width); + sys_config = deposit32(sys_config, 24, 4, 2); + if (info->num_cpus > 1) { + sys_config = deposit32(sys_config, 10, 1, 1); + sys_config = deposit32(sys_config, 20, 4, info->sram_banks - 1); + sys_config = deposit32(sys_config, 28, 4, 2); + } + break; + case ARMSSE_SSE300: + sys_config = 0; + sys_config = deposit32(sys_config, 0, 4, info->sram_banks); + sys_config = deposit32(sys_config, 4, 5, s->sram_addr_width); + sys_config = deposit32(sys_config, 16, 3, 3); /* CPU0 = Cortex-M55 */ + break; + default: + g_assert_not_reached(); + } + return sys_config; +} + +/* Clock frequency in HZ of the 32KHz "slow clock" */ +#define S32KCLK (32 * 1000) + +/* + * Create an alias region in @container of @size bytes starting at @base + * which mirrors the memory starting at @orig. + */ +static void make_alias(ARMSSE *s, MemoryRegion *mr, MemoryRegion *container, + const char *name, hwaddr base, hwaddr size, hwaddr orig) +{ + memory_region_init_alias(mr, NULL, name, container, orig, size); + /* The alias is even lower priority than unimplemented_device regions */ + memory_region_add_subregion_overlap(container, base, mr, -1500); +} + +static void irq_status_forwarder(void *opaque, int n, int level) +{ + qemu_irq destirq = opaque; + + qemu_set_irq(destirq, level); +} + +static void nsccfg_handler(void *opaque, int n, int level) +{ + ARMSSE *s = ARM_SSE(opaque); + + s->nsccfg = level; +} + +static void armsse_forward_ppc(ARMSSE *s, const char *ppcname, int ppcnum) +{ + /* Each of the 4 AHB and 4 APB PPCs that might be present in a + * system using the ARMSSE has a collection of control lines which + * are provided by the security controller and which we want to + * expose as control lines on the ARMSSE device itself, so the + * code using the ARMSSE can wire them up to the PPCs. + */ + SplitIRQ *splitter = &s->ppc_irq_splitter[ppcnum]; + DeviceState *armssedev = DEVICE(s); + DeviceState *dev_secctl = DEVICE(&s->secctl); + DeviceState *dev_splitter = DEVICE(splitter); + char *name; + + name = g_strdup_printf("%s_nonsec", ppcname); + qdev_pass_gpios(dev_secctl, armssedev, name); + g_free(name); + name = g_strdup_printf("%s_ap", ppcname); + qdev_pass_gpios(dev_secctl, armssedev, name); + g_free(name); + name = g_strdup_printf("%s_irq_enable", ppcname); + qdev_pass_gpios(dev_secctl, armssedev, name); + g_free(name); + name = g_strdup_printf("%s_irq_clear", ppcname); + qdev_pass_gpios(dev_secctl, armssedev, name); + g_free(name); + + /* irq_status is a little more tricky, because we need to + * split it so we can send it both to the security controller + * and to our OR gate for the NVIC interrupt line. + * Connect up the splitter's outputs, and create a GPIO input + * which will pass the line state to the input splitter. + */ + name = g_strdup_printf("%s_irq_status", ppcname); + qdev_connect_gpio_out(dev_splitter, 0, + qdev_get_gpio_in_named(dev_secctl, + name, 0)); + qdev_connect_gpio_out(dev_splitter, 1, + qdev_get_gpio_in(DEVICE(&s->ppc_irq_orgate), ppcnum)); + s->irq_status_in[ppcnum] = qdev_get_gpio_in(dev_splitter, 0); + qdev_init_gpio_in_named_with_opaque(armssedev, irq_status_forwarder, + s->irq_status_in[ppcnum], name, 1); + g_free(name); +} + +static void armsse_forward_sec_resp_cfg(ARMSSE *s) +{ + /* Forward the 3rd output from the splitter device as a + * named GPIO output of the armsse object. + */ + DeviceState *dev = DEVICE(s); + DeviceState *dev_splitter = DEVICE(&s->sec_resp_splitter); + + qdev_init_gpio_out_named(dev, &s->sec_resp_cfg, "sec_resp_cfg", 1); + s->sec_resp_cfg_in = qemu_allocate_irq(irq_status_forwarder, + s->sec_resp_cfg, 1); + qdev_connect_gpio_out(dev_splitter, 2, s->sec_resp_cfg_in); +} + +static void armsse_init(Object *obj) +{ + ARMSSE *s = ARM_SSE(obj); + ARMSSEClass *asc = ARM_SSE_GET_CLASS(obj); + const ARMSSEInfo *info = asc->info; + const ARMSSEDeviceInfo *devinfo; + int i; + + assert(info->sram_banks <= MAX_SRAM_BANKS); + assert(info->num_cpus <= SSE_MAX_CPUS); + + s->mainclk = qdev_init_clock_in(DEVICE(s), "MAINCLK", NULL, NULL, 0); + s->s32kclk = qdev_init_clock_in(DEVICE(s), "S32KCLK", NULL, NULL, 0); + + memory_region_init(&s->container, obj, "armsse-container", UINT64_MAX); + + for (i = 0; i < info->num_cpus; i++) { + /* + * We put each CPU in its own cluster as they are logically + * distinct and may be configured differently. + */ + char *name; + + name = g_strdup_printf("cluster%d", i); + object_initialize_child(obj, name, &s->cluster[i], TYPE_CPU_CLUSTER); + qdev_prop_set_uint32(DEVICE(&s->cluster[i]), "cluster-id", i); + g_free(name); + + name = g_strdup_printf("armv7m%d", i); + object_initialize_child(OBJECT(&s->cluster[i]), name, &s->armv7m[i], + TYPE_ARMV7M); + qdev_prop_set_string(DEVICE(&s->armv7m[i]), "cpu-type", info->cpu_type); + g_free(name); + name = g_strdup_printf("arm-sse-cpu-container%d", i); + memory_region_init(&s->cpu_container[i], obj, name, UINT64_MAX); + g_free(name); + if (i > 0) { + name = g_strdup_printf("arm-sse-container-alias%d", i); + memory_region_init_alias(&s->container_alias[i - 1], obj, + name, &s->container, 0, UINT64_MAX); + g_free(name); + } + } + + for (devinfo = info->devinfo; devinfo->name; devinfo++) { + assert(devinfo->ppc == NO_PPC || devinfo->ppc < ARRAY_SIZE(s->apb_ppc)); + if (!strcmp(devinfo->type, TYPE_CMSDK_APB_TIMER)) { + assert(devinfo->index < ARRAY_SIZE(s->timer)); + object_initialize_child(obj, devinfo->name, + &s->timer[devinfo->index], + TYPE_CMSDK_APB_TIMER); + } else if (!strcmp(devinfo->type, TYPE_CMSDK_APB_DUALTIMER)) { + assert(devinfo->index == 0); + object_initialize_child(obj, devinfo->name, &s->dualtimer, + TYPE_CMSDK_APB_DUALTIMER); + } else if (!strcmp(devinfo->type, TYPE_SSE_TIMER)) { + assert(devinfo->index < ARRAY_SIZE(s->sse_timer)); + object_initialize_child(obj, devinfo->name, + &s->sse_timer[devinfo->index], + TYPE_SSE_TIMER); + } else if (!strcmp(devinfo->type, TYPE_CMSDK_APB_WATCHDOG)) { + assert(devinfo->index < ARRAY_SIZE(s->cmsdk_watchdog)); + object_initialize_child(obj, devinfo->name, + &s->cmsdk_watchdog[devinfo->index], + TYPE_CMSDK_APB_WATCHDOG); + } else if (!strcmp(devinfo->type, TYPE_IOTKIT_SYSINFO)) { + assert(devinfo->index == 0); + object_initialize_child(obj, devinfo->name, &s->sysinfo, + TYPE_IOTKIT_SYSINFO); + } else if (!strcmp(devinfo->type, TYPE_IOTKIT_SYSCTL)) { + assert(devinfo->index == 0); + object_initialize_child(obj, devinfo->name, &s->sysctl, + TYPE_IOTKIT_SYSCTL); + } else if (!strcmp(devinfo->type, TYPE_UNIMPLEMENTED_DEVICE)) { + assert(devinfo->index < ARRAY_SIZE(s->unimp)); + object_initialize_child(obj, devinfo->name, + &s->unimp[devinfo->index], + TYPE_UNIMPLEMENTED_DEVICE); + } else { + g_assert_not_reached(); + } + } + + object_initialize_child(obj, "secctl", &s->secctl, TYPE_IOTKIT_SECCTL); + + for (i = 0; i < ARRAY_SIZE(s->apb_ppc); i++) { + g_autofree char *name = g_strdup_printf("apb-ppc%d", i); + object_initialize_child(obj, name, &s->apb_ppc[i], TYPE_TZ_PPC); + } + + for (i = 0; i < info->sram_banks; i++) { + char *name = g_strdup_printf("mpc%d", i); + object_initialize_child(obj, name, &s->mpc[i], TYPE_TZ_MPC); + g_free(name); + } + object_initialize_child(obj, "mpc-irq-orgate", &s->mpc_irq_orgate, + TYPE_OR_IRQ); + + for (i = 0; i < IOTS_NUM_EXP_MPC + info->sram_banks; i++) { + char *name = g_strdup_printf("mpc-irq-splitter-%d", i); + SplitIRQ *splitter = &s->mpc_irq_splitter[i]; + + object_initialize_child(obj, name, splitter, TYPE_SPLIT_IRQ); + g_free(name); + } + + if (info->has_mhus) { + object_initialize_child(obj, "mhu0", &s->mhu[0], TYPE_ARMSSE_MHU); + object_initialize_child(obj, "mhu1", &s->mhu[1], TYPE_ARMSSE_MHU); + } + if (info->has_cachectrl) { + for (i = 0; i < info->num_cpus; i++) { + char *name = g_strdup_printf("cachectrl%d", i); + + object_initialize_child(obj, name, &s->cachectrl[i], + TYPE_UNIMPLEMENTED_DEVICE); + g_free(name); + } + } + if (info->has_cpusecctrl) { + for (i = 0; i < info->num_cpus; i++) { + char *name = g_strdup_printf("cpusecctrl%d", i); + + object_initialize_child(obj, name, &s->cpusecctrl[i], + TYPE_UNIMPLEMENTED_DEVICE); + g_free(name); + } + } + if (info->has_cpuid) { + for (i = 0; i < info->num_cpus; i++) { + char *name = g_strdup_printf("cpuid%d", i); + + object_initialize_child(obj, name, &s->cpuid[i], + TYPE_ARMSSE_CPUID); + g_free(name); + } + } + if (info->has_cpu_pwrctrl) { + for (i = 0; i < info->num_cpus; i++) { + char *name = g_strdup_printf("cpu_pwrctrl%d", i); + + object_initialize_child(obj, name, &s->cpu_pwrctrl[i], + TYPE_ARMSSE_CPU_PWRCTRL); + g_free(name); + } + } + if (info->has_sse_counter) { + object_initialize_child(obj, "sse-counter", &s->sse_counter, + TYPE_SSE_COUNTER); + } + + object_initialize_child(obj, "nmi-orgate", &s->nmi_orgate, TYPE_OR_IRQ); + object_initialize_child(obj, "ppc-irq-orgate", &s->ppc_irq_orgate, + TYPE_OR_IRQ); + object_initialize_child(obj, "sec-resp-splitter", &s->sec_resp_splitter, + TYPE_SPLIT_IRQ); + for (i = 0; i < ARRAY_SIZE(s->ppc_irq_splitter); i++) { + char *name = g_strdup_printf("ppc-irq-splitter-%d", i); + SplitIRQ *splitter = &s->ppc_irq_splitter[i]; + + object_initialize_child(obj, name, splitter, TYPE_SPLIT_IRQ); + g_free(name); + } + if (info->num_cpus > 1) { + for (i = 0; i < ARRAY_SIZE(s->cpu_irq_splitter); i++) { + if (info->irq_is_common[i]) { + char *name = g_strdup_printf("cpu-irq-splitter%d", i); + SplitIRQ *splitter = &s->cpu_irq_splitter[i]; + + object_initialize_child(obj, name, splitter, TYPE_SPLIT_IRQ); + g_free(name); + } + } + } +} + +static void armsse_exp_irq(void *opaque, int n, int level) +{ + qemu_irq *irqarray = opaque; + + qemu_set_irq(irqarray[n], level); +} + +static void armsse_mpcexp_status(void *opaque, int n, int level) +{ + ARMSSE *s = ARM_SSE(opaque); + qemu_set_irq(s->mpcexp_status_in[n], level); +} + +static qemu_irq armsse_get_common_irq_in(ARMSSE *s, int irqno) +{ + /* + * Return a qemu_irq which can be used to signal IRQ n to + * all CPUs in the SSE. + */ + ARMSSEClass *asc = ARM_SSE_GET_CLASS(s); + const ARMSSEInfo *info = asc->info; + + assert(info->irq_is_common[irqno]); + + if (info->num_cpus == 1) { + /* Only one CPU -- just connect directly to it */ + return qdev_get_gpio_in(DEVICE(&s->armv7m[0]), irqno); + } else { + /* Connect to the splitter which feeds all CPUs */ + return qdev_get_gpio_in(DEVICE(&s->cpu_irq_splitter[irqno]), 0); + } +} + +static void armsse_realize(DeviceState *dev, Error **errp) +{ + ARMSSE *s = ARM_SSE(dev); + ARMSSEClass *asc = ARM_SSE_GET_CLASS(dev); + const ARMSSEInfo *info = asc->info; + const ARMSSEDeviceInfo *devinfo; + int i; + MemoryRegion *mr; + SysBusDevice *sbd_apb_ppc0; + SysBusDevice *sbd_secctl; + DeviceState *dev_apb_ppc0; + DeviceState *dev_apb_ppc1; + DeviceState *dev_secctl; + DeviceState *dev_splitter; + uint32_t addr_width_max; + + ERRP_GUARD(); + + if (!s->board_memory) { + error_setg(errp, "memory property was not set"); + return; + } + + if (!clock_has_source(s->mainclk)) { + error_setg(errp, "MAINCLK clock was not connected"); + } + if (!clock_has_source(s->s32kclk)) { + error_setg(errp, "S32KCLK clock was not connected"); + } + + assert(info->num_cpus <= SSE_MAX_CPUS); + + /* max SRAM_ADDR_WIDTH: 24 - log2(SRAM_NUM_BANK) */ + assert(is_power_of_2(info->sram_banks)); + addr_width_max = 24 - ctz32(info->sram_banks); + if (s->sram_addr_width < 1 || s->sram_addr_width > addr_width_max) { + error_setg(errp, "SRAM_ADDR_WIDTH must be between 1 and %d", + addr_width_max); + return; + } + + /* Handling of which devices should be available only to secure + * code is usually done differently for M profile than for A profile. + * Instead of putting some devices only into the secure address space, + * devices exist in both address spaces but with hard-wired security + * permissions that will cause the CPU to fault for non-secure accesses. + * + * The ARMSSE has an IDAU (Implementation Defined Access Unit), + * which specifies hard-wired security permissions for different + * areas of the physical address space. For the ARMSSE IDAU, the + * top 4 bits of the physical address are the IDAU region ID, and + * if bit 28 (ie the lowest bit of the ID) is 0 then this is an NS + * region, otherwise it is an S region. + * + * The various devices and RAMs are generally all mapped twice, + * once into a region that the IDAU defines as secure and once + * into a non-secure region. They sit behind either a Memory + * Protection Controller (for RAM) or a Peripheral Protection + * Controller (for devices), which allow a more fine grained + * configuration of whether non-secure accesses are permitted. + * + * (The other place that guest software can configure security + * permissions is in the architected SAU (Security Attribution + * Unit), which is entirely inside the CPU. The IDAU can upgrade + * the security attributes for a region to more restrictive than + * the SAU specifies, but cannot downgrade them.) + * + * 0x10000000..0x1fffffff alias of 0x00000000..0x0fffffff + * 0x20000000..0x2007ffff 32KB FPGA block RAM + * 0x30000000..0x3fffffff alias of 0x20000000..0x2fffffff + * 0x40000000..0x4000ffff base peripheral region 1 + * 0x40010000..0x4001ffff CPU peripherals (none for ARMSSE) + * 0x40020000..0x4002ffff system control element peripherals + * 0x40080000..0x400fffff base peripheral region 2 + * 0x50000000..0x5fffffff alias of 0x40000000..0x4fffffff + */ + + memory_region_add_subregion_overlap(&s->container, 0, s->board_memory, -2); + + for (i = 0; i < info->num_cpus; i++) { + DeviceState *cpudev = DEVICE(&s->armv7m[i]); + Object *cpuobj = OBJECT(&s->armv7m[i]); + int j; + char *gpioname; + + qdev_connect_clock_in(cpudev, "cpuclk", s->mainclk); + /* The SSE subsystems do not wire up a systick refclk */ + + qdev_prop_set_uint32(cpudev, "num-irq", s->exp_numirq + NUM_SSE_IRQS); + /* + * In real hardware the initial Secure VTOR is set from the INITSVTOR* + * registers in the IoT Kit System Control Register block. In QEMU + * we set the initial value here, and also the reset value of the + * sysctl register, from this object's QOM init-svtor property. + * If the guest changes the INITSVTOR* registers at runtime then the + * code in iotkit-sysctl.c will update the CPU init-svtor property + * (which will then take effect on the next CPU warm-reset). + * + * Note that typically a board using the SSE-200 will have a system + * control processor whose boot firmware initializes the INITSVTOR* + * registers before powering up the CPUs. QEMU doesn't emulate + * the control processor, so instead we behave in the way that the + * firmware does: the initial value should be set by the board code + * (using the init-svtor property on the ARMSSE object) to match + * whatever its firmware does. + */ + qdev_prop_set_uint32(cpudev, "init-svtor", s->init_svtor); + /* + * CPUs start powered down if the corresponding bit in the CPUWAIT + * register is 1. In real hardware the CPUWAIT register reset value is + * a configurable property of the SSE-200 (via the CPUWAIT0_RST and + * CPUWAIT1_RST parameters), but since all the boards we care about + * start CPU0 and leave CPU1 powered off, we hard-code that in + * info->cpuwait_rst for now. We can add QOM properties for this + * later if necessary. + */ + if (extract32(info->cpuwait_rst, i, 1)) { + if (!object_property_set_bool(cpuobj, "start-powered-off", true, + errp)) { + return; + } + } + if (!s->cpu_fpu[i]) { + if (!object_property_set_bool(cpuobj, "vfp", false, errp)) { + return; + } + } + if (!s->cpu_dsp[i]) { + if (!object_property_set_bool(cpuobj, "dsp", false, errp)) { + return; + } + } + + if (i > 0) { + memory_region_add_subregion_overlap(&s->cpu_container[i], 0, + &s->container_alias[i - 1], -1); + } else { + memory_region_add_subregion_overlap(&s->cpu_container[i], 0, + &s->container, -1); + } + object_property_set_link(cpuobj, "memory", + OBJECT(&s->cpu_container[i]), &error_abort); + object_property_set_link(cpuobj, "idau", OBJECT(s), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(cpuobj), errp)) { + return; + } + /* + * The cluster must be realized after the armv7m container, as + * the container's CPU object is only created on realize, and the + * CPU must exist and have been parented into the cluster before + * the cluster is realized. + */ + if (!qdev_realize(DEVICE(&s->cluster[i]), NULL, errp)) { + return; + } + + /* Connect EXP_IRQ/EXP_CPUn_IRQ GPIOs to the NVIC's lines 32 and up */ + s->exp_irqs[i] = g_new(qemu_irq, s->exp_numirq); + for (j = 0; j < s->exp_numirq; j++) { + s->exp_irqs[i][j] = qdev_get_gpio_in(cpudev, j + NUM_SSE_IRQS); + } + if (i == 0) { + gpioname = g_strdup("EXP_IRQ"); + } else { + gpioname = g_strdup_printf("EXP_CPU%d_IRQ", i); + } + qdev_init_gpio_in_named_with_opaque(dev, armsse_exp_irq, + s->exp_irqs[i], + gpioname, s->exp_numirq); + g_free(gpioname); + } + + /* Wire up the splitters that connect common IRQs to all CPUs */ + if (info->num_cpus > 1) { + for (i = 0; i < ARRAY_SIZE(s->cpu_irq_splitter); i++) { + if (info->irq_is_common[i]) { + Object *splitter = OBJECT(&s->cpu_irq_splitter[i]); + DeviceState *devs = DEVICE(splitter); + int cpunum; + + if (!object_property_set_int(splitter, "num-lines", + info->num_cpus, errp)) { + return; + } + if (!qdev_realize(DEVICE(splitter), NULL, errp)) { + return; + } + for (cpunum = 0; cpunum < info->num_cpus; cpunum++) { + DeviceState *cpudev = DEVICE(&s->armv7m[cpunum]); + + qdev_connect_gpio_out(devs, cpunum, + qdev_get_gpio_in(cpudev, i)); + } + } + } + } + + /* Set up the big aliases first */ + make_alias(s, &s->alias1, &s->container, "alias 1", + 0x10000000, 0x10000000, 0x00000000); + make_alias(s, &s->alias2, &s->container, + "alias 2", 0x30000000, 0x10000000, 0x20000000); + /* The 0x50000000..0x5fffffff region is not a pure alias: it has + * a few extra devices that only appear there (generally the + * control interfaces for the protection controllers). + * We implement this by mapping those devices over the top of this + * alias MR at a higher priority. Some of the devices in this range + * are per-CPU, so we must put this alias in the per-cpu containers. + */ + for (i = 0; i < info->num_cpus; i++) { + make_alias(s, &s->alias3[i], &s->cpu_container[i], + "alias 3", 0x50000000, 0x10000000, 0x40000000); + } + + /* Security controller */ + object_property_set_int(OBJECT(&s->secctl), "sse-version", + info->sse_version, &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->secctl), errp)) { + return; + } + sbd_secctl = SYS_BUS_DEVICE(&s->secctl); + dev_secctl = DEVICE(&s->secctl); + sysbus_mmio_map(sbd_secctl, 0, 0x50080000); + sysbus_mmio_map(sbd_secctl, 1, 0x40080000); + + s->nsc_cfg_in = qemu_allocate_irq(nsccfg_handler, s, 1); + qdev_connect_gpio_out_named(dev_secctl, "nsc_cfg", 0, s->nsc_cfg_in); + + /* The sec_resp_cfg output from the security controller must be split into + * multiple lines, one for each of the PPCs within the ARMSSE and one + * that will be an output from the ARMSSE to the system. + */ + if (!object_property_set_int(OBJECT(&s->sec_resp_splitter), + "num-lines", 3, errp)) { + return; + } + if (!qdev_realize(DEVICE(&s->sec_resp_splitter), NULL, errp)) { + return; + } + dev_splitter = DEVICE(&s->sec_resp_splitter); + qdev_connect_gpio_out_named(dev_secctl, "sec_resp_cfg", 0, + qdev_get_gpio_in(dev_splitter, 0)); + + /* Each SRAM bank lives behind its own Memory Protection Controller */ + for (i = 0; i < info->sram_banks; i++) { + char *ramname = g_strdup_printf("armsse.sram%d", i); + SysBusDevice *sbd_mpc; + uint32_t sram_bank_size = 1 << s->sram_addr_width; + + memory_region_init_ram(&s->sram[i], NULL, ramname, + sram_bank_size, errp); + g_free(ramname); + if (*errp) { + return; + } + object_property_set_link(OBJECT(&s->mpc[i]), "downstream", + OBJECT(&s->sram[i]), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mpc[i]), errp)) { + return; + } + /* Map the upstream end of the MPC into the right place... */ + sbd_mpc = SYS_BUS_DEVICE(&s->mpc[i]); + memory_region_add_subregion(&s->container, + info->sram_bank_base + i * sram_bank_size, + sysbus_mmio_get_region(sbd_mpc, 1)); + /* ...and its register interface */ + memory_region_add_subregion(&s->container, 0x50083000 + i * 0x1000, + sysbus_mmio_get_region(sbd_mpc, 0)); + } + + /* We must OR together lines from the MPC splitters to go to the NVIC */ + if (!object_property_set_int(OBJECT(&s->mpc_irq_orgate), "num-lines", + IOTS_NUM_EXP_MPC + info->sram_banks, + errp)) { + return; + } + if (!qdev_realize(DEVICE(&s->mpc_irq_orgate), NULL, errp)) { + return; + } + qdev_connect_gpio_out(DEVICE(&s->mpc_irq_orgate), 0, + armsse_get_common_irq_in(s, 9)); + + /* This OR gate wires together outputs from the secure watchdogs to NMI */ + if (!object_property_set_int(OBJECT(&s->nmi_orgate), "num-lines", 2, + errp)) { + return; + } + if (!qdev_realize(DEVICE(&s->nmi_orgate), NULL, errp)) { + return; + } + qdev_connect_gpio_out(DEVICE(&s->nmi_orgate), 0, + qdev_get_gpio_in_named(DEVICE(&s->armv7m), "NMI", 0)); + + /* The SSE-300 has a System Counter / System Timestamp Generator */ + if (info->has_sse_counter) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->sse_counter); + + qdev_connect_clock_in(DEVICE(sbd), "CLK", s->mainclk); + if (!sysbus_realize(sbd, errp)) { + return; + } + /* + * The control frame is only in the Secure region; + * the status frame is in the NS region (and visible in the + * S region via the alias mapping). + */ + memory_region_add_subregion(&s->container, 0x58100000, + sysbus_mmio_get_region(sbd, 0)); + memory_region_add_subregion(&s->container, 0x48101000, + sysbus_mmio_get_region(sbd, 1)); + } + + if (info->has_tcms) { + /* The SSE-300 has an ITCM at 0x0000_0000 and a DTCM at 0x2000_0000 */ + memory_region_init_ram(&s->itcm, NULL, "sse300-itcm", 512 * KiB, errp); + if (*errp) { + return; + } + memory_region_init_ram(&s->dtcm, NULL, "sse300-dtcm", 512 * KiB, errp); + if (*errp) { + return; + } + memory_region_add_subregion(&s->container, 0x00000000, &s->itcm); + memory_region_add_subregion(&s->container, 0x20000000, &s->dtcm); + } + + /* Devices behind APB PPC0: + * 0x40000000: timer0 + * 0x40001000: timer1 + * 0x40002000: dual timer + * 0x40003000: MHU0 (SSE-200 only) + * 0x40004000: MHU1 (SSE-200 only) + * We must configure and realize each downstream device and connect + * it to the appropriate PPC port; then we can realize the PPC and + * map its upstream ends to the right place in the container. + */ + for (devinfo = info->devinfo; devinfo->name; devinfo++) { + SysBusDevice *sbd; + qemu_irq irq; + + if (!strcmp(devinfo->type, TYPE_CMSDK_APB_TIMER)) { + sbd = SYS_BUS_DEVICE(&s->timer[devinfo->index]); + + qdev_connect_clock_in(DEVICE(sbd), "pclk", + devinfo->slowclk ? s->s32kclk : s->mainclk); + if (!sysbus_realize(sbd, errp)) { + return; + } + mr = sysbus_mmio_get_region(sbd, 0); + } else if (!strcmp(devinfo->type, TYPE_CMSDK_APB_DUALTIMER)) { + sbd = SYS_BUS_DEVICE(&s->dualtimer); + + qdev_connect_clock_in(DEVICE(sbd), "TIMCLK", s->mainclk); + if (!sysbus_realize(sbd, errp)) { + return; + } + mr = sysbus_mmio_get_region(sbd, 0); + } else if (!strcmp(devinfo->type, TYPE_SSE_TIMER)) { + sbd = SYS_BUS_DEVICE(&s->sse_timer[devinfo->index]); + + assert(info->has_sse_counter); + object_property_set_link(OBJECT(sbd), "counter", + OBJECT(&s->sse_counter), &error_abort); + if (!sysbus_realize(sbd, errp)) { + return; + } + mr = sysbus_mmio_get_region(sbd, 0); + } else if (!strcmp(devinfo->type, TYPE_CMSDK_APB_WATCHDOG)) { + sbd = SYS_BUS_DEVICE(&s->cmsdk_watchdog[devinfo->index]); + + qdev_connect_clock_in(DEVICE(sbd), "WDOGCLK", + devinfo->slowclk ? s->s32kclk : s->mainclk); + if (!sysbus_realize(sbd, errp)) { + return; + } + mr = sysbus_mmio_get_region(sbd, 0); + } else if (!strcmp(devinfo->type, TYPE_IOTKIT_SYSINFO)) { + sbd = SYS_BUS_DEVICE(&s->sysinfo); + + object_property_set_int(OBJECT(&s->sysinfo), "SYS_VERSION", + info->sys_version, &error_abort); + object_property_set_int(OBJECT(&s->sysinfo), "SYS_CONFIG", + armsse_sys_config_value(s, info), + &error_abort); + object_property_set_int(OBJECT(&s->sysinfo), "sse-version", + info->sse_version, &error_abort); + object_property_set_int(OBJECT(&s->sysinfo), "IIDR", + info->iidr, &error_abort); + if (!sysbus_realize(sbd, errp)) { + return; + } + mr = sysbus_mmio_get_region(sbd, 0); + } else if (!strcmp(devinfo->type, TYPE_IOTKIT_SYSCTL)) { + /* System control registers */ + sbd = SYS_BUS_DEVICE(&s->sysctl); + + object_property_set_int(OBJECT(&s->sysctl), "sse-version", + info->sse_version, &error_abort); + object_property_set_int(OBJECT(&s->sysctl), "CPUWAIT_RST", + info->cpuwait_rst, &error_abort); + object_property_set_int(OBJECT(&s->sysctl), "INITSVTOR0_RST", + s->init_svtor, &error_abort); + object_property_set_int(OBJECT(&s->sysctl), "INITSVTOR1_RST", + s->init_svtor, &error_abort); + if (!sysbus_realize(sbd, errp)) { + return; + } + mr = sysbus_mmio_get_region(sbd, 0); + } else if (!strcmp(devinfo->type, TYPE_UNIMPLEMENTED_DEVICE)) { + sbd = SYS_BUS_DEVICE(&s->unimp[devinfo->index]); + + qdev_prop_set_string(DEVICE(sbd), "name", devinfo->name); + qdev_prop_set_uint64(DEVICE(sbd), "size", devinfo->size); + if (!sysbus_realize(sbd, errp)) { + return; + } + mr = sysbus_mmio_get_region(sbd, 0); + } else { + g_assert_not_reached(); + } + + switch (devinfo->irq) { + case NO_IRQ: + irq = NULL; + break; + case 0 ... NUM_SSE_IRQS - 1: + irq = armsse_get_common_irq_in(s, devinfo->irq); + break; + case NMI_0: + case NMI_1: + irq = qdev_get_gpio_in(DEVICE(&s->nmi_orgate), + devinfo->irq - NMI_0); + break; + default: + g_assert_not_reached(); + } + + if (irq) { + sysbus_connect_irq(sbd, 0, irq); + } + + /* + * Devices connected to a PPC are connected to the port here; + * we will map the upstream end of that port to the right address + * in the container later after the PPC has been realized. + * Devices not connected to a PPC can be mapped immediately. + */ + if (devinfo->ppc != NO_PPC) { + TZPPC *ppc = &s->apb_ppc[devinfo->ppc]; + g_autofree char *portname = g_strdup_printf("port[%d]", + devinfo->ppc_port); + object_property_set_link(OBJECT(ppc), portname, OBJECT(mr), + &error_abort); + } else { + memory_region_add_subregion(&s->container, devinfo->addr, mr); + } + } + + if (info->has_mhus) { + /* + * An SSE-200 with only one CPU should have only one MHU created, + * with the region where the second MHU usually is being RAZ/WI. + * We don't implement that SSE-200 config; if we want to support + * it then this code needs to be enhanced to handle creating the + * RAZ/WI region instead of the second MHU. + */ + assert(info->num_cpus == ARRAY_SIZE(s->mhu)); + + for (i = 0; i < ARRAY_SIZE(s->mhu); i++) { + char *port; + int cpunum; + SysBusDevice *mhu_sbd = SYS_BUS_DEVICE(&s->mhu[i]); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mhu[i]), errp)) { + return; + } + port = g_strdup_printf("port[%d]", i + 3); + mr = sysbus_mmio_get_region(mhu_sbd, 0); + object_property_set_link(OBJECT(&s->apb_ppc[0]), port, OBJECT(mr), + &error_abort); + g_free(port); + + /* + * Each MHU has an irq line for each CPU: + * MHU 0 irq line 0 -> CPU 0 IRQ 6 + * MHU 0 irq line 1 -> CPU 1 IRQ 6 + * MHU 1 irq line 0 -> CPU 0 IRQ 7 + * MHU 1 irq line 1 -> CPU 1 IRQ 7 + */ + for (cpunum = 0; cpunum < info->num_cpus; cpunum++) { + DeviceState *cpudev = DEVICE(&s->armv7m[cpunum]); + + sysbus_connect_irq(mhu_sbd, cpunum, + qdev_get_gpio_in(cpudev, 6 + i)); + } + } + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->apb_ppc[0]), errp)) { + return; + } + + sbd_apb_ppc0 = SYS_BUS_DEVICE(&s->apb_ppc[0]); + dev_apb_ppc0 = DEVICE(&s->apb_ppc[0]); + + if (info->has_mhus) { + mr = sysbus_mmio_get_region(sbd_apb_ppc0, 3); + memory_region_add_subregion(&s->container, 0x40003000, mr); + mr = sysbus_mmio_get_region(sbd_apb_ppc0, 4); + memory_region_add_subregion(&s->container, 0x40004000, mr); + } + for (i = 0; i < IOTS_APB_PPC0_NUM_PORTS; i++) { + qdev_connect_gpio_out_named(dev_secctl, "apb_ppc0_nonsec", i, + qdev_get_gpio_in_named(dev_apb_ppc0, + "cfg_nonsec", i)); + qdev_connect_gpio_out_named(dev_secctl, "apb_ppc0_ap", i, + qdev_get_gpio_in_named(dev_apb_ppc0, + "cfg_ap", i)); + } + qdev_connect_gpio_out_named(dev_secctl, "apb_ppc0_irq_enable", 0, + qdev_get_gpio_in_named(dev_apb_ppc0, + "irq_enable", 0)); + qdev_connect_gpio_out_named(dev_secctl, "apb_ppc0_irq_clear", 0, + qdev_get_gpio_in_named(dev_apb_ppc0, + "irq_clear", 0)); + qdev_connect_gpio_out(dev_splitter, 0, + qdev_get_gpio_in_named(dev_apb_ppc0, + "cfg_sec_resp", 0)); + + /* All the PPC irq lines (from the 2 internal PPCs and the 8 external + * ones) are sent individually to the security controller, and also + * ORed together to give a single combined PPC interrupt to the NVIC. + */ + if (!object_property_set_int(OBJECT(&s->ppc_irq_orgate), + "num-lines", NUM_PPCS, errp)) { + return; + } + if (!qdev_realize(DEVICE(&s->ppc_irq_orgate), NULL, errp)) { + return; + } + qdev_connect_gpio_out(DEVICE(&s->ppc_irq_orgate), 0, + armsse_get_common_irq_in(s, 10)); + + /* + * 0x40010000 .. 0x4001ffff (and the 0x5001000... secure-only alias): + * private per-CPU region (all these devices are SSE-200 only): + * 0x50010000: L1 icache control registers + * 0x50011000: CPUSECCTRL (CPU local security control registers) + * 0x4001f000 and 0x5001f000: CPU_IDENTITY register block + * The SSE-300 has an extra: + * 0x40012000 and 0x50012000: CPU_PWRCTRL register block + */ + if (info->has_cachectrl) { + for (i = 0; i < info->num_cpus; i++) { + char *name = g_strdup_printf("cachectrl%d", i); + MemoryRegion *mr; + + qdev_prop_set_string(DEVICE(&s->cachectrl[i]), "name", name); + g_free(name); + qdev_prop_set_uint64(DEVICE(&s->cachectrl[i]), "size", 0x1000); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->cachectrl[i]), errp)) { + return; + } + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->cachectrl[i]), 0); + memory_region_add_subregion(&s->cpu_container[i], 0x50010000, mr); + } + } + if (info->has_cpusecctrl) { + for (i = 0; i < info->num_cpus; i++) { + char *name = g_strdup_printf("CPUSECCTRL%d", i); + MemoryRegion *mr; + + qdev_prop_set_string(DEVICE(&s->cpusecctrl[i]), "name", name); + g_free(name); + qdev_prop_set_uint64(DEVICE(&s->cpusecctrl[i]), "size", 0x1000); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpusecctrl[i]), errp)) { + return; + } + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->cpusecctrl[i]), 0); + memory_region_add_subregion(&s->cpu_container[i], 0x50011000, mr); + } + } + if (info->has_cpuid) { + for (i = 0; i < info->num_cpus; i++) { + MemoryRegion *mr; + + qdev_prop_set_uint32(DEVICE(&s->cpuid[i]), "CPUID", i); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpuid[i]), errp)) { + return; + } + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->cpuid[i]), 0); + memory_region_add_subregion(&s->cpu_container[i], 0x4001F000, mr); + } + } + if (info->has_cpu_pwrctrl) { + for (i = 0; i < info->num_cpus; i++) { + MemoryRegion *mr; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpu_pwrctrl[i]), errp)) { + return; + } + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->cpu_pwrctrl[i]), 0); + memory_region_add_subregion(&s->cpu_container[i], 0x40012000, mr); + } + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->apb_ppc[1]), errp)) { + return; + } + + dev_apb_ppc1 = DEVICE(&s->apb_ppc[1]); + qdev_connect_gpio_out_named(dev_secctl, "apb_ppc1_nonsec", 0, + qdev_get_gpio_in_named(dev_apb_ppc1, + "cfg_nonsec", 0)); + qdev_connect_gpio_out_named(dev_secctl, "apb_ppc1_ap", 0, + qdev_get_gpio_in_named(dev_apb_ppc1, + "cfg_ap", 0)); + qdev_connect_gpio_out_named(dev_secctl, "apb_ppc1_irq_enable", 0, + qdev_get_gpio_in_named(dev_apb_ppc1, + "irq_enable", 0)); + qdev_connect_gpio_out_named(dev_secctl, "apb_ppc1_irq_clear", 0, + qdev_get_gpio_in_named(dev_apb_ppc1, + "irq_clear", 0)); + qdev_connect_gpio_out(dev_splitter, 1, + qdev_get_gpio_in_named(dev_apb_ppc1, + "cfg_sec_resp", 0)); + + /* + * Now both PPCs are realized we can map the upstream ends of + * ports which correspond to entries in the devinfo array. + * The ports which are connected to non-devinfo devices have + * already been mapped. + */ + for (devinfo = info->devinfo; devinfo->name; devinfo++) { + SysBusDevice *ppc_sbd; + + if (devinfo->ppc == NO_PPC) { + continue; + } + ppc_sbd = SYS_BUS_DEVICE(&s->apb_ppc[devinfo->ppc]); + mr = sysbus_mmio_get_region(ppc_sbd, devinfo->ppc_port); + memory_region_add_subregion(&s->container, devinfo->addr, mr); + } + + for (i = 0; i < ARRAY_SIZE(s->ppc_irq_splitter); i++) { + Object *splitter = OBJECT(&s->ppc_irq_splitter[i]); + + if (!object_property_set_int(splitter, "num-lines", 2, errp)) { + return; + } + if (!qdev_realize(DEVICE(splitter), NULL, errp)) { + return; + } + } + + for (i = 0; i < IOTS_NUM_AHB_EXP_PPC; i++) { + char *ppcname = g_strdup_printf("ahb_ppcexp%d", i); + + armsse_forward_ppc(s, ppcname, i); + g_free(ppcname); + } + + for (i = 0; i < IOTS_NUM_APB_EXP_PPC; i++) { + char *ppcname = g_strdup_printf("apb_ppcexp%d", i); + + armsse_forward_ppc(s, ppcname, i + IOTS_NUM_AHB_EXP_PPC); + g_free(ppcname); + } + + for (i = NUM_EXTERNAL_PPCS; i < NUM_PPCS; i++) { + /* Wire up IRQ splitter for internal PPCs */ + DeviceState *devs = DEVICE(&s->ppc_irq_splitter[i]); + char *gpioname = g_strdup_printf("apb_ppc%d_irq_status", + i - NUM_EXTERNAL_PPCS); + TZPPC *ppc = &s->apb_ppc[i - NUM_EXTERNAL_PPCS]; + + qdev_connect_gpio_out(devs, 0, + qdev_get_gpio_in_named(dev_secctl, gpioname, 0)); + qdev_connect_gpio_out(devs, 1, + qdev_get_gpio_in(DEVICE(&s->ppc_irq_orgate), i)); + qdev_connect_gpio_out_named(DEVICE(ppc), "irq", 0, + qdev_get_gpio_in(devs, 0)); + g_free(gpioname); + } + + /* Wire up the splitters for the MPC IRQs */ + for (i = 0; i < IOTS_NUM_EXP_MPC + info->sram_banks; i++) { + SplitIRQ *splitter = &s->mpc_irq_splitter[i]; + DeviceState *dev_splitter = DEVICE(splitter); + + if (!object_property_set_int(OBJECT(splitter), "num-lines", 2, + errp)) { + return; + } + if (!qdev_realize(DEVICE(splitter), NULL, errp)) { + return; + } + + if (i < IOTS_NUM_EXP_MPC) { + /* Splitter input is from GPIO input line */ + s->mpcexp_status_in[i] = qdev_get_gpio_in(dev_splitter, 0); + qdev_connect_gpio_out(dev_splitter, 0, + qdev_get_gpio_in_named(dev_secctl, + "mpcexp_status", i)); + } else { + /* Splitter input is from our own MPC */ + qdev_connect_gpio_out_named(DEVICE(&s->mpc[i - IOTS_NUM_EXP_MPC]), + "irq", 0, + qdev_get_gpio_in(dev_splitter, 0)); + qdev_connect_gpio_out(dev_splitter, 0, + qdev_get_gpio_in_named(dev_secctl, + "mpc_status", + i - IOTS_NUM_EXP_MPC)); + } + + qdev_connect_gpio_out(dev_splitter, 1, + qdev_get_gpio_in(DEVICE(&s->mpc_irq_orgate), i)); + } + /* Create GPIO inputs which will pass the line state for our + * mpcexp_irq inputs to the correct splitter devices. + */ + qdev_init_gpio_in_named(dev, armsse_mpcexp_status, "mpcexp_status", + IOTS_NUM_EXP_MPC); + + armsse_forward_sec_resp_cfg(s); + + /* Forward the MSC related signals */ + qdev_pass_gpios(dev_secctl, dev, "mscexp_status"); + qdev_pass_gpios(dev_secctl, dev, "mscexp_clear"); + qdev_pass_gpios(dev_secctl, dev, "mscexp_ns"); + qdev_connect_gpio_out_named(dev_secctl, "msc_irq", 0, + armsse_get_common_irq_in(s, 11)); + + /* + * Expose our container region to the board model; this corresponds + * to the AHB Slave Expansion ports which allow bus master devices + * (eg DMA controllers) in the board model to make transactions into + * devices in the ARMSSE. + */ + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->container); +} + +static void armsse_idau_check(IDAUInterface *ii, uint32_t address, + int *iregion, bool *exempt, bool *ns, bool *nsc) +{ + /* + * For ARMSSE systems the IDAU responses are simple logical functions + * of the address bits. The NSC attribute is guest-adjustable via the + * NSCCFG register in the security controller. + */ + ARMSSE *s = ARM_SSE(ii); + int region = extract32(address, 28, 4); + + *ns = !(region & 1); + *nsc = (region == 1 && (s->nsccfg & 1)) || (region == 3 && (s->nsccfg & 2)); + /* 0xe0000000..0xe00fffff and 0xf0000000..0xf00fffff are exempt */ + *exempt = (address & 0xeff00000) == 0xe0000000; + *iregion = region; +} + +static const VMStateDescription armsse_vmstate = { + .name = "iotkit", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(mainclk, ARMSSE), + VMSTATE_CLOCK(s32kclk, ARMSSE), + VMSTATE_UINT32(nsccfg, ARMSSE), + VMSTATE_END_OF_LIST() + } +}; + +static void armsse_reset(DeviceState *dev) +{ + ARMSSE *s = ARM_SSE(dev); + + s->nsccfg = 0; +} + +static void armsse_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(klass); + ARMSSEClass *asc = ARM_SSE_CLASS(klass); + const ARMSSEInfo *info = data; + + dc->realize = armsse_realize; + dc->vmsd = &armsse_vmstate; + device_class_set_props(dc, info->props); + dc->reset = armsse_reset; + iic->check = armsse_idau_check; + asc->info = info; +} + +static const TypeInfo armsse_info = { + .name = TYPE_ARM_SSE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARMSSE), + .class_size = sizeof(ARMSSEClass), + .instance_init = armsse_init, + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { TYPE_IDAU_INTERFACE }, + { } + } +}; + +static void armsse_register_types(void) +{ + int i; + + type_register_static(&armsse_info); + + for (i = 0; i < ARRAY_SIZE(armsse_variants); i++) { + TypeInfo ti = { + .name = armsse_variants[i].name, + .parent = TYPE_ARM_SSE, + .class_init = armsse_class_init, + .class_data = (void *)&armsse_variants[i], + }; + type_register(&ti); + } +} + +type_init(armsse_register_types); diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c new file mode 100644 index 000000000..8d08db80b --- /dev/null +++ b/hw/arm/armv7m.c @@ -0,0 +1,629 @@ +/* + * ARMV7M System emulation. + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/arm/armv7m.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/arm/boot.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" +#include "elf.h" +#include "sysemu/reset.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "target/arm/idau.h" +#include "migration/vmstate.h" + +/* Bitbanded IO. Each word corresponds to a single bit. */ + +/* Get the byte address of the real memory for a bitband access. */ +static inline hwaddr bitband_addr(BitBandState *s, hwaddr offset) +{ + return s->base | (offset & 0x1ffffff) >> 5; +} + +static MemTxResult bitband_read(void *opaque, hwaddr offset, + uint64_t *data, unsigned size, MemTxAttrs attrs) +{ + BitBandState *s = opaque; + uint8_t buf[4]; + MemTxResult res; + int bitpos, bit; + hwaddr addr; + + assert(size <= 4); + + /* Find address in underlying memory and round down to multiple of size */ + addr = bitband_addr(s, offset) & (-size); + res = address_space_read(&s->source_as, addr, attrs, buf, size); + if (res) { + return res; + } + /* Bit position in the N bytes read... */ + bitpos = (offset >> 2) & ((size * 8) - 1); + /* ...converted to byte in buffer and bit in byte */ + bit = (buf[bitpos >> 3] >> (bitpos & 7)) & 1; + *data = bit; + return MEMTX_OK; +} + +static MemTxResult bitband_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size, MemTxAttrs attrs) +{ + BitBandState *s = opaque; + uint8_t buf[4]; + MemTxResult res; + int bitpos, bit; + hwaddr addr; + + assert(size <= 4); + + /* Find address in underlying memory and round down to multiple of size */ + addr = bitband_addr(s, offset) & (-size); + res = address_space_read(&s->source_as, addr, attrs, buf, size); + if (res) { + return res; + } + /* Bit position in the N bytes read... */ + bitpos = (offset >> 2) & ((size * 8) - 1); + /* ...converted to byte in buffer and bit in byte */ + bit = 1 << (bitpos & 7); + if (value & 1) { + buf[bitpos >> 3] |= bit; + } else { + buf[bitpos >> 3] &= ~bit; + } + return address_space_write(&s->source_as, addr, attrs, buf, size); +} + +static const MemoryRegionOps bitband_ops = { + .read_with_attrs = bitband_read, + .write_with_attrs = bitband_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 1, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, +}; + +static void bitband_init(Object *obj) +{ + BitBandState *s = BITBAND(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &bitband_ops, s, + "bitband", 0x02000000); + sysbus_init_mmio(dev, &s->iomem); +} + +static void bitband_realize(DeviceState *dev, Error **errp) +{ + BitBandState *s = BITBAND(dev); + + if (!s->source_memory) { + error_setg(errp, "source-memory property not set"); + return; + } + + address_space_init(&s->source_as, s->source_memory, "bitband-source"); +} + +/* Board init. */ + +static const hwaddr bitband_input_addr[ARMV7M_NUM_BITBANDS] = { + 0x20000000, 0x40000000 +}; + +static const hwaddr bitband_output_addr[ARMV7M_NUM_BITBANDS] = { + 0x22000000, 0x42000000 +}; + +static MemTxResult v7m_sysreg_ns_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + MemoryRegion *mr = opaque; + + if (attrs.secure) { + /* S accesses to the alias act like NS accesses to the real region */ + attrs.secure = 0; + return memory_region_dispatch_write(mr, addr, value, + size_memop(size) | MO_TE, attrs); + } else { + /* NS attrs are RAZ/WI for privileged, and BusFault for user */ + if (attrs.user) { + return MEMTX_ERROR; + } + return MEMTX_OK; + } +} + +static MemTxResult v7m_sysreg_ns_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + MemoryRegion *mr = opaque; + + if (attrs.secure) { + /* S accesses to the alias act like NS accesses to the real region */ + attrs.secure = 0; + return memory_region_dispatch_read(mr, addr, data, + size_memop(size) | MO_TE, attrs); + } else { + /* NS attrs are RAZ/WI for privileged, and BusFault for user */ + if (attrs.user) { + return MEMTX_ERROR; + } + *data = 0; + return MEMTX_OK; + } +} + +static const MemoryRegionOps v7m_sysreg_ns_ops = { + .read_with_attrs = v7m_sysreg_ns_read, + .write_with_attrs = v7m_sysreg_ns_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static MemTxResult v7m_systick_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + ARMv7MState *s = opaque; + MemoryRegion *mr; + + /* Direct the access to the correct systick */ + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); + return memory_region_dispatch_write(mr, addr, value, + size_memop(size) | MO_TE, attrs); +} + +static MemTxResult v7m_systick_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + ARMv7MState *s = opaque; + MemoryRegion *mr; + + /* Direct the access to the correct systick */ + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0); + return memory_region_dispatch_read(mr, addr, data, size_memop(size) | MO_TE, + attrs); +} + +static const MemoryRegionOps v7m_systick_ops = { + .read_with_attrs = v7m_systick_read, + .write_with_attrs = v7m_systick_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* + * Unassigned portions of the PPB space are RAZ/WI for privileged + * accesses, and fault for non-privileged accesses. + */ +static MemTxResult ppb_default_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + qemu_log_mask(LOG_UNIMP, "Read of unassigned area of PPB: offset 0x%x\n", + (uint32_t)addr); + if (attrs.user) { + return MEMTX_ERROR; + } + *data = 0; + return MEMTX_OK; +} + +static MemTxResult ppb_default_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + qemu_log_mask(LOG_UNIMP, "Write of unassigned area of PPB: offset 0x%x\n", + (uint32_t)addr); + if (attrs.user) { + return MEMTX_ERROR; + } + return MEMTX_OK; +} + +static const MemoryRegionOps ppb_default_ops = { + .read_with_attrs = ppb_default_read, + .write_with_attrs = ppb_default_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 8, +}; + +static void armv7m_instance_init(Object *obj) +{ + ARMv7MState *s = ARMV7M(obj); + int i; + + /* Can't init the cpu here, we don't yet know which model to use */ + + memory_region_init(&s->container, obj, "armv7m-container", UINT64_MAX); + + object_initialize_child(obj, "nvic", &s->nvic, TYPE_NVIC); + object_property_add_alias(obj, "num-irq", + OBJECT(&s->nvic), "num-irq"); + + object_initialize_child(obj, "systick-reg-ns", &s->systick[M_REG_NS], + TYPE_SYSTICK); + /* + * We can't initialize the secure systick here, as we don't know + * yet if we need it. + */ + + for (i = 0; i < ARRAY_SIZE(s->bitband); i++) { + object_initialize_child(obj, "bitband[*]", &s->bitband[i], + TYPE_BITBAND); + } + + s->refclk = qdev_init_clock_in(DEVICE(obj), "refclk", NULL, NULL, 0); + s->cpuclk = qdev_init_clock_in(DEVICE(obj), "cpuclk", NULL, NULL, 0); +} + +static void armv7m_realize(DeviceState *dev, Error **errp) +{ + ARMv7MState *s = ARMV7M(dev); + SysBusDevice *sbd; + Error *err = NULL; + int i; + + if (!s->board_memory) { + error_setg(errp, "memory property was not set"); + return; + } + + memory_region_add_subregion_overlap(&s->container, 0, s->board_memory, -1); + + s->cpu = ARM_CPU(object_new_with_props(s->cpu_type, OBJECT(s), "cpu", + &err, NULL)); + if (err != NULL) { + error_propagate(errp, err); + return; + } + + object_property_set_link(OBJECT(s->cpu), "memory", OBJECT(&s->container), + &error_abort); + if (object_property_find(OBJECT(s->cpu), "idau")) { + object_property_set_link(OBJECT(s->cpu), "idau", s->idau, + &error_abort); + } + if (object_property_find(OBJECT(s->cpu), "init-svtor")) { + if (!object_property_set_uint(OBJECT(s->cpu), "init-svtor", + s->init_svtor, errp)) { + return; + } + } + if (object_property_find(OBJECT(s->cpu), "init-nsvtor")) { + if (!object_property_set_uint(OBJECT(s->cpu), "init-nsvtor", + s->init_nsvtor, errp)) { + return; + } + } + if (object_property_find(OBJECT(s->cpu), "start-powered-off")) { + if (!object_property_set_bool(OBJECT(s->cpu), "start-powered-off", + s->start_powered_off, errp)) { + return; + } + } + if (object_property_find(OBJECT(s->cpu), "vfp")) { + if (!object_property_set_bool(OBJECT(s->cpu), "vfp", s->vfp, errp)) { + return; + } + } + if (object_property_find(OBJECT(s->cpu), "dsp")) { + if (!object_property_set_bool(OBJECT(s->cpu), "dsp", s->dsp, errp)) { + return; + } + } + + /* + * Tell the CPU where the NVIC is; it will fail realize if it doesn't + * have one. Similarly, tell the NVIC where its CPU is. + */ + s->cpu->env.nvic = &s->nvic; + s->nvic.cpu = s->cpu; + + if (!qdev_realize(DEVICE(s->cpu), NULL, errp)) { + return; + } + + /* Note that we must realize the NVIC after the CPU */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->nvic), errp)) { + return; + } + + /* Alias the NVIC's input and output GPIOs as our own so the board + * code can wire them up. (We do this in realize because the + * NVIC doesn't create the input GPIO array until realize.) + */ + qdev_pass_gpios(DEVICE(&s->nvic), dev, NULL); + qdev_pass_gpios(DEVICE(&s->nvic), dev, "SYSRESETREQ"); + qdev_pass_gpios(DEVICE(&s->nvic), dev, "NMI"); + + /* + * We map various devices into the container MR at their architected + * addresses. In particular, we map everything corresponding to the + * "System PPB" space. This is the range from 0xe0000000 to 0xe00fffff + * and includes the NVIC, the System Control Space (system registers), + * the systick timer, and for CPUs with the Security extension an NS + * banked version of all of these. + * + * The default behaviour for unimplemented registers/ranges + * (for instance the Data Watchpoint and Trace unit at 0xe0001000) + * is to RAZ/WI for privileged access and BusFault for non-privileged + * access. + * + * The NVIC and System Control Space (SCS) starts at 0xe000e000 + * and looks like this: + * 0x004 - ICTR + * 0x010 - 0xff - systick + * 0x100..0x7ec - NVIC + * 0x7f0..0xcff - Reserved + * 0xd00..0xd3c - SCS registers + * 0xd40..0xeff - Reserved or Not implemented + * 0xf00 - STIR + * + * Some registers within this space are banked between security states. + * In v8M there is a second range 0xe002e000..0xe002efff which is the + * NonSecure alias SCS; secure accesses to this behave like NS accesses + * to the main SCS range, and non-secure accesses (including when + * the security extension is not implemented) are RAZ/WI. + * Note that both the main SCS range and the alias range are defined + * to be exempt from memory attribution (R_BLJT) and so the memory + * transaction attribute always matches the current CPU security + * state (attrs.secure == env->v7m.secure). In the v7m_sysreg_ns_ops + * wrappers we change attrs.secure to indicate the NS access; so + * generally code determining which banked register to use should + * use attrs.secure; code determining actual behaviour of the system + * should use env->v7m.secure. + * + * Within the PPB space, some MRs overlap, and the priority + * of overlapping regions is: + * - default region (for RAZ/WI and BusFault) : -1 + * - system register regions (provided by the NVIC) : 0 + * - systick : 1 + * This is because the systick device is a small block of registers + * in the middle of the other system control registers. + */ + + memory_region_init_io(&s->defaultmem, OBJECT(s), &ppb_default_ops, s, + "nvic-default", 0x100000); + memory_region_add_subregion_overlap(&s->container, 0xe0000000, + &s->defaultmem, -1); + + /* Wire the NVIC up to the CPU */ + sbd = SYS_BUS_DEVICE(&s->nvic); + sysbus_connect_irq(sbd, 0, + qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ)); + + memory_region_add_subregion(&s->container, 0xe000e000, + sysbus_mmio_get_region(sbd, 0)); + if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { + /* Create the NS alias region for the NVIC sysregs */ + memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s), + &v7m_sysreg_ns_ops, + sysbus_mmio_get_region(sbd, 0), + "nvic_sysregs_ns", 0x1000); + memory_region_add_subregion(&s->container, 0xe002e000, + &s->sysreg_ns_mem); + } + + /* Create and map the systick devices */ + qdev_connect_clock_in(DEVICE(&s->systick[M_REG_NS]), "refclk", s->refclk); + qdev_connect_clock_in(DEVICE(&s->systick[M_REG_NS]), "cpuclk", s->cpuclk); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0, + qdev_get_gpio_in_named(DEVICE(&s->nvic), + "systick-trigger", M_REG_NS)); + + if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { + /* + * We couldn't init the secure systick device in instance_init + * as we didn't know then if the CPU had the security extensions; + * so we have to do it here. + */ + object_initialize_child(OBJECT(dev), "systick-reg-s", + &s->systick[M_REG_S], TYPE_SYSTICK); + qdev_connect_clock_in(DEVICE(&s->systick[M_REG_S]), "refclk", + s->refclk); + qdev_connect_clock_in(DEVICE(&s->systick[M_REG_S]), "cpuclk", + s->cpuclk); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->systick[M_REG_S]), errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0, + qdev_get_gpio_in_named(DEVICE(&s->nvic), + "systick-trigger", M_REG_S)); + } + + memory_region_init_io(&s->systickmem, OBJECT(s), + &v7m_systick_ops, s, + "v7m_systick", 0xe0); + + memory_region_add_subregion_overlap(&s->container, 0xe000e010, + &s->systickmem, 1); + if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { + memory_region_init_io(&s->systick_ns_mem, OBJECT(s), + &v7m_sysreg_ns_ops, &s->systickmem, + "v7m_systick_ns", 0xe0); + memory_region_add_subregion_overlap(&s->container, 0xe002e010, + &s->systick_ns_mem, 1); + } + + /* If the CPU has RAS support, create the RAS register block */ + if (cpu_isar_feature(aa32_ras, s->cpu)) { + object_initialize_child(OBJECT(dev), "armv7m-ras", + &s->ras, TYPE_ARMV7M_RAS); + sbd = SYS_BUS_DEVICE(&s->ras); + if (!sysbus_realize(sbd, errp)) { + return; + } + memory_region_add_subregion_overlap(&s->container, 0xe0005000, + sysbus_mmio_get_region(sbd, 0), 1); + } + + for (i = 0; i < ARRAY_SIZE(s->bitband); i++) { + if (s->enable_bitband) { + Object *obj = OBJECT(&s->bitband[i]); + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->bitband[i]); + + if (!object_property_set_int(obj, "base", + bitband_input_addr[i], errp)) { + return; + } + object_property_set_link(obj, "source-memory", + OBJECT(s->board_memory), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(obj), errp)) { + return; + } + + memory_region_add_subregion(&s->container, bitband_output_addr[i], + sysbus_mmio_get_region(sbd, 0)); + } else { + object_unparent(OBJECT(&s->bitband[i])); + } + } +} + +static Property armv7m_properties[] = { + DEFINE_PROP_STRING("cpu-type", ARMv7MState, cpu_type), + DEFINE_PROP_LINK("memory", ARMv7MState, board_memory, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_LINK("idau", ARMv7MState, idau, TYPE_IDAU_INTERFACE, Object *), + DEFINE_PROP_UINT32("init-svtor", ARMv7MState, init_svtor, 0), + DEFINE_PROP_UINT32("init-nsvtor", ARMv7MState, init_nsvtor, 0), + DEFINE_PROP_BOOL("enable-bitband", ARMv7MState, enable_bitband, false), + DEFINE_PROP_BOOL("start-powered-off", ARMv7MState, start_powered_off, + false), + DEFINE_PROP_BOOL("vfp", ARMv7MState, vfp, true), + DEFINE_PROP_BOOL("dsp", ARMv7MState, dsp, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_armv7m = { + .name = "armv7m", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(refclk, SysTickState), + VMSTATE_CLOCK(cpuclk, SysTickState), + VMSTATE_END_OF_LIST() + } +}; + +static void armv7m_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = armv7m_realize; + dc->vmsd = &vmstate_armv7m; + device_class_set_props(dc, armv7m_properties); +} + +static const TypeInfo armv7m_info = { + .name = TYPE_ARMV7M, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARMv7MState), + .instance_init = armv7m_instance_init, + .class_init = armv7m_class_init, +}; + +static void armv7m_reset(void *opaque) +{ + ARMCPU *cpu = opaque; + + cpu_reset(CPU(cpu)); +} + +void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size) +{ + int image_size; + uint64_t entry; + int big_endian; + AddressSpace *as; + int asidx; + CPUState *cs = CPU(cpu); + +#ifdef TARGET_WORDS_BIGENDIAN + big_endian = 1; +#else + big_endian = 0; +#endif + + if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) { + asidx = ARMASIdx_S; + } else { + asidx = ARMASIdx_NS; + } + as = cpu_get_address_space(cs, asidx); + + if (kernel_filename) { + image_size = load_elf_as(kernel_filename, NULL, NULL, NULL, + &entry, NULL, NULL, + NULL, big_endian, EM_ARM, 1, 0, as); + if (image_size < 0) { + image_size = load_image_targphys_as(kernel_filename, 0, + mem_size, as); + } + if (image_size < 0) { + error_report("Could not load kernel '%s'", kernel_filename); + exit(1); + } + } + + /* CPU objects (unlike devices) are not automatically reset on system + * reset, so we must always register a handler to do so. Unlike + * A-profile CPUs, we don't need to do anything special in the + * handler to arrange that it starts correctly. + * This is arguably the wrong place to do this, but it matches the + * way A-profile does it. Note that this means that every M profile + * board must call this function! + */ + qemu_register_reset(armv7m_reset, cpu); +} + +static Property bitband_properties[] = { + DEFINE_PROP_UINT32("base", BitBandState, base, 0), + DEFINE_PROP_LINK("source-memory", BitBandState, source_memory, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void bitband_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = bitband_realize; + device_class_set_props(dc, bitband_properties); +} + +static const TypeInfo bitband_info = { + .name = TYPE_BITBAND, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BitBandState), + .instance_init = bitband_init, + .class_init = bitband_class_init, +}; + +static void armv7m_register_types(void) +{ + type_register_static(&bitband_info); + type_register_static(&armv7m_info); +} + +type_init(armv7m_register_types) diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c new file mode 100644 index 000000000..a77f46b3a --- /dev/null +++ b/hw/arm/aspeed.c @@ -0,0 +1,1327 @@ +/* + * OpenPOWER Palmetto BMC + * + * Andrew Jeffery + * + * Copyright 2016 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/boot.h" +#include "hw/arm/aspeed.h" +#include "hw/arm/aspeed_soc.h" +#include "hw/i2c/i2c_mux_pca954x.h" +#include "hw/i2c/smbus_eeprom.h" +#include "hw/misc/pca9552.h" +#include "hw/sensor/tmp105.h" +#include "hw/misc/led.h" +#include "hw/qdev-properties.h" +#include "sysemu/block-backend.h" +#include "hw/loader.h" +#include "qemu/error-report.h" +#include "qemu/units.h" + +static struct arm_boot_info aspeed_board_binfo = { + .board_id = -1, /* device-tree-only board */ +}; + +struct AspeedMachineState { + /* Private */ + MachineState parent_obj; + /* Public */ + + AspeedSoCState soc; + MemoryRegion ram_container; + MemoryRegion max_ram; + bool mmio_exec; + char *fmc_model; + char *spi_model; +}; + +/* Palmetto hardware value: 0x120CE416 */ +#define PALMETTO_BMC_HW_STRAP1 ( \ + SCU_AST2400_HW_STRAP_DRAM_SIZE(DRAM_SIZE_256MB) | \ + SCU_AST2400_HW_STRAP_DRAM_CONFIG(2 /* DDR3 with CL=6, CWL=5 */) | \ + SCU_AST2400_HW_STRAP_ACPI_DIS | \ + SCU_AST2400_HW_STRAP_SET_CLK_SOURCE(AST2400_CLK_48M_IN) | \ + SCU_HW_STRAP_VGA_CLASS_CODE | \ + SCU_HW_STRAP_LPC_RESET_PIN | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_M_S_EN) | \ + SCU_AST2400_HW_STRAP_SET_CPU_AHB_RATIO(AST2400_CPU_AHB_RATIO_2_1) | \ + SCU_HW_STRAP_SPI_WIDTH | \ + SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \ + SCU_AST2400_HW_STRAP_BOOT_MODE(AST2400_SPI_BOOT)) + +/* TODO: Find the actual hardware value */ +#define SUPERMICROX11_BMC_HW_STRAP1 ( \ + SCU_AST2400_HW_STRAP_DRAM_SIZE(DRAM_SIZE_128MB) | \ + SCU_AST2400_HW_STRAP_DRAM_CONFIG(2) | \ + SCU_AST2400_HW_STRAP_ACPI_DIS | \ + SCU_AST2400_HW_STRAP_SET_CLK_SOURCE(AST2400_CLK_48M_IN) | \ + SCU_HW_STRAP_VGA_CLASS_CODE | \ + SCU_HW_STRAP_LPC_RESET_PIN | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_M_S_EN) | \ + SCU_AST2400_HW_STRAP_SET_CPU_AHB_RATIO(AST2400_CPU_AHB_RATIO_2_1) | \ + SCU_HW_STRAP_SPI_WIDTH | \ + SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \ + SCU_AST2400_HW_STRAP_BOOT_MODE(AST2400_SPI_BOOT)) + +/* AST2500 evb hardware value: 0xF100C2E6 */ +#define AST2500_EVB_HW_STRAP1 (( \ + AST2500_HW_STRAP1_DEFAULTS | \ + SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ + SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ + SCU_AST2500_HW_STRAP_UART_DEBUG | \ + SCU_AST2500_HW_STRAP_DDR4_ENABLE | \ + SCU_HW_STRAP_MAC1_RGMII | \ + SCU_HW_STRAP_MAC0_RGMII) & \ + ~SCU_HW_STRAP_2ND_BOOT_WDT) + +/* Romulus hardware value: 0xF10AD206 */ +#define ROMULUS_BMC_HW_STRAP1 ( \ + AST2500_HW_STRAP1_DEFAULTS | \ + SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ + SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ + SCU_AST2500_HW_STRAP_UART_DEBUG | \ + SCU_AST2500_HW_STRAP_DDR4_ENABLE | \ + SCU_AST2500_HW_STRAP_ACPI_ENABLE | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER)) + +/* Sonorapass hardware value: 0xF100D216 */ +#define SONORAPASS_BMC_HW_STRAP1 ( \ + SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ + SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ + SCU_AST2500_HW_STRAP_UART_DEBUG | \ + SCU_AST2500_HW_STRAP_RESERVED28 | \ + SCU_AST2500_HW_STRAP_DDR4_ENABLE | \ + SCU_HW_STRAP_VGA_CLASS_CODE | \ + SCU_HW_STRAP_LPC_RESET_PIN | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER) | \ + SCU_AST2500_HW_STRAP_SET_AXI_AHB_RATIO(AXI_AHB_RATIO_2_1) | \ + SCU_HW_STRAP_VGA_BIOS_ROM | \ + SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \ + SCU_AST2500_HW_STRAP_RESERVED1) + +/* Swift hardware value: 0xF11AD206 */ +#define SWIFT_BMC_HW_STRAP1 ( \ + AST2500_HW_STRAP1_DEFAULTS | \ + SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ + SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ + SCU_AST2500_HW_STRAP_UART_DEBUG | \ + SCU_AST2500_HW_STRAP_DDR4_ENABLE | \ + SCU_H_PLL_BYPASS_EN | \ + SCU_AST2500_HW_STRAP_ACPI_ENABLE | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER)) + +#define G220A_BMC_HW_STRAP1 ( \ + SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ + SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ + SCU_AST2500_HW_STRAP_UART_DEBUG | \ + SCU_AST2500_HW_STRAP_RESERVED28 | \ + SCU_AST2500_HW_STRAP_DDR4_ENABLE | \ + SCU_HW_STRAP_2ND_BOOT_WDT | \ + SCU_HW_STRAP_VGA_CLASS_CODE | \ + SCU_HW_STRAP_LPC_RESET_PIN | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER) | \ + SCU_AST2500_HW_STRAP_SET_AXI_AHB_RATIO(AXI_AHB_RATIO_2_1) | \ + SCU_HW_STRAP_VGA_SIZE_SET(VGA_64M_DRAM) | \ + SCU_AST2500_HW_STRAP_RESERVED1) + +/* FP5280G2 hardware value: 0XF100D286 */ +#define FP5280G2_BMC_HW_STRAP1 ( \ + SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ + SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ + SCU_AST2500_HW_STRAP_UART_DEBUG | \ + SCU_AST2500_HW_STRAP_RESERVED28 | \ + SCU_AST2500_HW_STRAP_DDR4_ENABLE | \ + SCU_HW_STRAP_VGA_CLASS_CODE | \ + SCU_HW_STRAP_LPC_RESET_PIN | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER) | \ + SCU_AST2500_HW_STRAP_SET_AXI_AHB_RATIO(AXI_AHB_RATIO_2_1) | \ + SCU_HW_STRAP_MAC1_RGMII | \ + SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \ + SCU_AST2500_HW_STRAP_RESERVED1) + +/* Witherspoon hardware value: 0xF10AD216 (but use romulus definition) */ +#define WITHERSPOON_BMC_HW_STRAP1 ROMULUS_BMC_HW_STRAP1 + +/* Quanta-Q71l hardware value */ +#define QUANTA_Q71L_BMC_HW_STRAP1 ( \ + SCU_AST2400_HW_STRAP_DRAM_SIZE(DRAM_SIZE_128MB) | \ + SCU_AST2400_HW_STRAP_DRAM_CONFIG(2/* DDR3 with CL=6, CWL=5 */) | \ + SCU_AST2400_HW_STRAP_ACPI_DIS | \ + SCU_AST2400_HW_STRAP_SET_CLK_SOURCE(AST2400_CLK_24M_IN) | \ + SCU_HW_STRAP_VGA_CLASS_CODE | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_PASS_THROUGH) | \ + SCU_AST2400_HW_STRAP_SET_CPU_AHB_RATIO(AST2400_CPU_AHB_RATIO_2_1) | \ + SCU_HW_STRAP_SPI_WIDTH | \ + SCU_HW_STRAP_VGA_SIZE_SET(VGA_8M_DRAM) | \ + SCU_AST2400_HW_STRAP_BOOT_MODE(AST2400_SPI_BOOT)) + +/* AST2600 evb hardware value */ +#define AST2600_EVB_HW_STRAP1 0x000000C0 +#define AST2600_EVB_HW_STRAP2 0x00000003 + +/* Tacoma hardware value */ +#define TACOMA_BMC_HW_STRAP1 0x00000000 +#define TACOMA_BMC_HW_STRAP2 0x00000040 + +/* Rainier hardware value: (QEMU prototype) */ +#define RAINIER_BMC_HW_STRAP1 0x00000000 +#define RAINIER_BMC_HW_STRAP2 0x00000000 + +/* Fuji hardware value */ +#define FUJI_BMC_HW_STRAP1 0x00000000 +#define FUJI_BMC_HW_STRAP2 0x00000000 + +/* + * The max ram region is for firmwares that scan the address space + * with load/store to guess how much RAM the SoC has. + */ +static uint64_t max_ram_read(void *opaque, hwaddr offset, unsigned size) +{ + return 0; +} + +static void max_ram_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + /* Discard writes */ +} + +static const MemoryRegionOps max_ram_ops = { + .read = max_ram_read, + .write = max_ram_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +#define AST_SMP_MAILBOX_BASE 0x1e6e2180 +#define AST_SMP_MBOX_FIELD_ENTRY (AST_SMP_MAILBOX_BASE + 0x0) +#define AST_SMP_MBOX_FIELD_GOSIGN (AST_SMP_MAILBOX_BASE + 0x4) +#define AST_SMP_MBOX_FIELD_READY (AST_SMP_MAILBOX_BASE + 0x8) +#define AST_SMP_MBOX_FIELD_POLLINSN (AST_SMP_MAILBOX_BASE + 0xc) +#define AST_SMP_MBOX_CODE (AST_SMP_MAILBOX_BASE + 0x10) +#define AST_SMP_MBOX_GOSIGN 0xabbaab00 + +static void aspeed_write_smpboot(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + static const uint32_t poll_mailbox_ready[] = { + /* + * r2 = per-cpu go sign value + * r1 = AST_SMP_MBOX_FIELD_ENTRY + * r0 = AST_SMP_MBOX_FIELD_GOSIGN + */ + 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5 */ + 0xe21000ff, /* ands r0, r0, #255 */ + 0xe59f201c, /* ldr r2, [pc, #28] */ + 0xe1822000, /* orr r2, r2, r0 */ + + 0xe59f1018, /* ldr r1, [pc, #24] */ + 0xe59f0018, /* ldr r0, [pc, #24] */ + + 0xe320f002, /* wfe */ + 0xe5904000, /* ldr r4, [r0] */ + 0xe1520004, /* cmp r2, r4 */ + 0x1afffffb, /* bne */ + 0xe591f000, /* ldr pc, [r1] */ + AST_SMP_MBOX_GOSIGN, + AST_SMP_MBOX_FIELD_ENTRY, + AST_SMP_MBOX_FIELD_GOSIGN, + }; + + rom_add_blob_fixed("aspeed.smpboot", poll_mailbox_ready, + sizeof(poll_mailbox_ready), + info->smp_loader_start); +} + +static void aspeed_reset_secondary(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + AddressSpace *as = arm_boot_address_space(cpu, info); + CPUState *cs = CPU(cpu); + + /* info->smp_bootreg_addr */ + address_space_stl_notdirty(as, AST_SMP_MBOX_FIELD_GOSIGN, 0, + MEMTXATTRS_UNSPECIFIED, NULL); + cpu_set_pc(cs, info->smp_loader_start); +} + +#define FIRMWARE_ADDR 0x0 + +static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size, + Error **errp) +{ + BlockBackend *blk = blk_by_legacy_dinfo(dinfo); + uint8_t *storage; + int64_t size; + + /* The block backend size should have already been 'validated' by + * the creation of the m25p80 object. + */ + size = blk_getlength(blk); + if (size <= 0) { + error_setg(errp, "failed to get flash size"); + return; + } + + if (rom_size > size) { + rom_size = size; + } + + storage = g_new0(uint8_t, rom_size); + if (blk_pread(blk, 0, storage, rom_size) < 0) { + error_setg(errp, "failed to read the initial flash content"); + return; + } + + rom_add_blob_fixed("aspeed.boot_rom", storage, rom_size, addr); + g_free(storage); +} + +static void aspeed_board_init_flashes(AspeedSMCState *s, + const char *flashtype) +{ + int i ; + + for (i = 0; i < s->num_cs; ++i) { + DriveInfo *dinfo = drive_get_next(IF_MTD); + qemu_irq cs_line; + DeviceState *dev; + + dev = qdev_new(flashtype); + if (dinfo) { + qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo)); + } + qdev_realize_and_unref(dev, BUS(s->spi), &error_fatal); + + cs_line = qdev_get_gpio_in_named(dev, SSI_GPIO_CS, 0); + sysbus_connect_irq(SYS_BUS_DEVICE(s), i + 1, cs_line); + } +} + +static void sdhci_attach_drive(SDHCIState *sdhci, DriveInfo *dinfo) +{ + DeviceState *card; + + if (!dinfo) { + return; + } + card = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + qdev_realize_and_unref(card, + qdev_get_child_bus(DEVICE(sdhci), "sd-bus"), + &error_fatal); +} + +static void aspeed_machine_init(MachineState *machine) +{ + AspeedMachineState *bmc = ASPEED_MACHINE(machine); + AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(machine); + AspeedSoCClass *sc; + DriveInfo *drive0 = drive_get(IF_MTD, 0, 0); + ram_addr_t max_ram_size; + int i; + NICInfo *nd = &nd_table[0]; + + memory_region_init(&bmc->ram_container, NULL, "aspeed-ram-container", + 4 * GiB); + memory_region_add_subregion(&bmc->ram_container, 0, machine->ram); + + object_initialize_child(OBJECT(machine), "soc", &bmc->soc, amc->soc_name); + + sc = ASPEED_SOC_GET_CLASS(&bmc->soc); + + /* + * This will error out if isize is not supported by memory controller. + */ + object_property_set_uint(OBJECT(&bmc->soc), "ram-size", machine->ram_size, + &error_fatal); + + for (i = 0; i < sc->macs_num; i++) { + if ((amc->macs_mask & (1 << i)) && nd->used) { + qemu_check_nic_model(nd, TYPE_FTGMAC100); + qdev_set_nic_properties(DEVICE(&bmc->soc.ftgmac100[i]), nd); + nd++; + } + } + + object_property_set_int(OBJECT(&bmc->soc), "hw-strap1", amc->hw_strap1, + &error_abort); + object_property_set_int(OBJECT(&bmc->soc), "hw-strap2", amc->hw_strap2, + &error_abort); + object_property_set_int(OBJECT(&bmc->soc), "num-cs", amc->num_cs, + &error_abort); + object_property_set_link(OBJECT(&bmc->soc), "dram", + OBJECT(machine->ram), &error_abort); + if (machine->kernel_filename) { + /* + * When booting with a -kernel command line there is no u-boot + * that runs to unlock the SCU. In this case set the default to + * be unlocked as the kernel expects + */ + object_property_set_int(OBJECT(&bmc->soc), "hw-prot-key", + ASPEED_SCU_PROT_KEY, &error_abort); + } + qdev_prop_set_uint32(DEVICE(&bmc->soc), "uart-default", + amc->uart_default); + qdev_realize(DEVICE(&bmc->soc), NULL, &error_abort); + + memory_region_add_subregion(get_system_memory(), + sc->memmap[ASPEED_DEV_SDRAM], + &bmc->ram_container); + + max_ram_size = object_property_get_uint(OBJECT(&bmc->soc), "max-ram-size", + &error_abort); + memory_region_init_io(&bmc->max_ram, NULL, &max_ram_ops, NULL, + "max_ram", max_ram_size - machine->ram_size); + memory_region_add_subregion(&bmc->ram_container, machine->ram_size, &bmc->max_ram); + + aspeed_board_init_flashes(&bmc->soc.fmc, bmc->fmc_model ? + bmc->fmc_model : amc->fmc_model); + aspeed_board_init_flashes(&bmc->soc.spi[0], bmc->spi_model ? + bmc->spi_model : amc->spi_model); + + /* Install first FMC flash content as a boot rom. */ + if (drive0) { + AspeedSMCFlash *fl = &bmc->soc.fmc.flashes[0]; + MemoryRegion *boot_rom = g_new(MemoryRegion, 1); + uint64_t size = memory_region_size(&fl->mmio); + + /* + * create a ROM region using the default mapping window size of + * the flash module. The window size is 64MB for the AST2400 + * SoC and 128MB for the AST2500 SoC, which is twice as big as + * needed by the flash modules of the Aspeed machines. + */ + if (ASPEED_MACHINE(machine)->mmio_exec) { + memory_region_init_alias(boot_rom, NULL, "aspeed.boot_rom", + &fl->mmio, 0, size); + memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, + boot_rom); + } else { + memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", + size, &error_abort); + memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, + boot_rom); + write_boot_rom(drive0, FIRMWARE_ADDR, size, &error_abort); + } + } + + if (machine->kernel_filename && sc->num_cpus > 1) { + /* With no u-boot we must set up a boot stub for the secondary CPU */ + MemoryRegion *smpboot = g_new(MemoryRegion, 1); + memory_region_init_ram(smpboot, NULL, "aspeed.smpboot", + 0x80, &error_abort); + memory_region_add_subregion(get_system_memory(), + AST_SMP_MAILBOX_BASE, smpboot); + + aspeed_board_binfo.write_secondary_boot = aspeed_write_smpboot; + aspeed_board_binfo.secondary_cpu_reset_hook = aspeed_reset_secondary; + aspeed_board_binfo.smp_loader_start = AST_SMP_MBOX_CODE; + } + + aspeed_board_binfo.ram_size = machine->ram_size; + aspeed_board_binfo.loader_start = sc->memmap[ASPEED_DEV_SDRAM]; + aspeed_board_binfo.nb_cpus = sc->num_cpus; + + if (amc->i2c_init) { + amc->i2c_init(bmc); + } + + for (i = 0; i < bmc->soc.sdhci.num_slots; i++) { + sdhci_attach_drive(&bmc->soc.sdhci.slots[i], drive_get_next(IF_SD)); + } + + if (bmc->soc.emmc.num_slots) { + sdhci_attach_drive(&bmc->soc.emmc.slots[0], drive_get_next(IF_SD)); + } + + arm_load_kernel(ARM_CPU(first_cpu), machine, &aspeed_board_binfo); +} + +static void at24c_eeprom_init(I2CBus *bus, uint8_t addr, uint32_t rsize) +{ + I2CSlave *i2c_dev = i2c_slave_new("at24c-eeprom", addr); + DeviceState *dev = DEVICE(i2c_dev); + + qdev_prop_set_uint32(dev, "rom-size", rsize); + i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort); +} + +static void palmetto_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + DeviceState *dev; + uint8_t *eeprom_buf = g_malloc0(32 * 1024); + + /* The palmetto platform expects a ds3231 RTC but a ds1338 is + * enough to provide basic RTC features. Alarms will be missing */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 0), "ds1338", 0x68); + + smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 0), 0x50, + eeprom_buf); + + /* add a TMP423 temperature sensor */ + dev = DEVICE(i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), + "tmp423", 0x4c)); + object_property_set_int(OBJECT(dev), "temperature0", 31000, &error_abort); + object_property_set_int(OBJECT(dev), "temperature1", 28000, &error_abort); + object_property_set_int(OBJECT(dev), "temperature2", 20000, &error_abort); + object_property_set_int(OBJECT(dev), "temperature3", 110000, &error_abort); +} + +static void quanta_q71l_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + /* + * The quanta-q71l platform expects tmp75s which are compatible with + * tmp105s. + */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 1), "tmp105", 0x4c); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 1), "tmp105", 0x4e); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 1), "tmp105", 0x4f); + + /* TODO: i2c-1: Add baseboard FRU eeprom@54 24c64 */ + /* TODO: i2c-1: Add Frontpanel FRU eeprom@57 24c64 */ + /* TODO: Add Memory Riser i2c mux and eeproms. */ + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), "pca9546", 0x74); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), "pca9548", 0x77); + + /* TODO: i2c-3: Add BIOS FRU eeprom@56 24c64 */ + + /* i2c-7 */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "pca9546", 0x70); + /* - i2c@0: pmbus@59 */ + /* - i2c@1: pmbus@58 */ + /* - i2c@2: pmbus@58 */ + /* - i2c@3: pmbus@59 */ + + /* TODO: i2c-7: Add PDB FRU eeprom@52 */ + /* TODO: i2c-8: Add BMC FRU eeprom@50 */ +} + +static void ast2500_evb_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + uint8_t *eeprom_buf = g_malloc0(8 * 1024); + + smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 3), 0x50, + eeprom_buf); + + /* The AST2500 EVB expects a LM75 but a TMP105 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), + TYPE_TMP105, 0x4d); + + /* The AST2500 EVB does not have an RTC. Let's pretend that one is + * plugged on the I2C bus header */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "ds1338", 0x32); +} + +static void ast2600_evb_i2c_init(AspeedMachineState *bmc) +{ + /* Start with some devices on our I2C busses */ + ast2500_evb_i2c_init(bmc); +} + +static void romulus_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + /* The romulus board expects Epson RX8900 I2C RTC but a ds1338 is + * good enough */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "ds1338", 0x32); +} + +static void swift_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 3), "pca9552", 0x60); + + /* The swift board expects a TMP275 but a TMP105 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "tmp105", 0x48); + /* The swift board expects a pca9551 but a pca9552 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "pca9552", 0x60); + + /* The swift board expects an Epson RX8900 RTC but a ds1338 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "ds1338", 0x32); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "pca9552", 0x60); + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp423", 0x4c); + /* The swift board expects a pca9539 but a pca9552 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "pca9552", 0x74); + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 10), "tmp423", 0x4c); + /* The swift board expects a pca9539 but a pca9552 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 10), "pca9552", + 0x74); + + /* The swift board expects a TMP275 but a TMP105 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 12), "tmp105", 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 12), "tmp105", 0x4a); +} + +static void sonorapass_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + /* bus 2 : */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), "tmp105", 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), "tmp105", 0x49); + /* bus 2 : pca9546 @ 0x73 */ + + /* bus 3 : pca9548 @ 0x70 */ + + /* bus 4 : */ + uint8_t *eeprom4_54 = g_malloc0(8 * 1024); + smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 4), 0x54, + eeprom4_54); + /* PCA9539 @ 0x76, but PCA9552 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), "pca9552", 0x76); + /* PCA9539 @ 0x77, but PCA9552 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), "pca9552", 0x77); + + /* bus 6 : */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), "tmp105", 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), "tmp105", 0x49); + /* bus 6 : pca9546 @ 0x73 */ + + /* bus 8 : */ + uint8_t *eeprom8_56 = g_malloc0(8 * 1024); + smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 8), 0x56, + eeprom8_56); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "pca9552", 0x60); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "pca9552", 0x61); + /* bus 8 : adc128d818 @ 0x1d */ + /* bus 8 : adc128d818 @ 0x1f */ + + /* + * bus 13 : pca9548 @ 0x71 + * - channel 3: + * - tmm421 @ 0x4c + * - tmp421 @ 0x4e + * - tmp421 @ 0x4f + */ + +} + +static void witherspoon_bmc_i2c_init(AspeedMachineState *bmc) +{ + static const struct { + unsigned gpio_id; + LEDColor color; + const char *description; + bool gpio_polarity; + } pca1_leds[] = { + {13, LED_COLOR_GREEN, "front-fault-4", GPIO_POLARITY_ACTIVE_LOW}, + {14, LED_COLOR_GREEN, "front-power-3", GPIO_POLARITY_ACTIVE_LOW}, + {15, LED_COLOR_GREEN, "front-id-5", GPIO_POLARITY_ACTIVE_LOW}, + }; + AspeedSoCState *soc = &bmc->soc; + uint8_t *eeprom_buf = g_malloc0(8 * 1024); + DeviceState *dev; + LEDState *led; + + /* Bus 3: TODO bmp280@77 */ + /* Bus 3: TODO max31785@52 */ + dev = DEVICE(i2c_slave_new(TYPE_PCA9552, 0x60)); + qdev_prop_set_string(dev, "description", "pca1"); + i2c_slave_realize_and_unref(I2C_SLAVE(dev), + aspeed_i2c_get_bus(&soc->i2c, 3), + &error_fatal); + + for (size_t i = 0; i < ARRAY_SIZE(pca1_leds); i++) { + led = led_create_simple(OBJECT(bmc), + pca1_leds[i].gpio_polarity, + pca1_leds[i].color, + pca1_leds[i].description); + qdev_connect_gpio_out(dev, pca1_leds[i].gpio_id, + qdev_get_gpio_in(DEVICE(led), 0)); + } + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 3), "dps310", 0x76); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), "tmp423", 0x4c); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), "tmp423", 0x4c); + + /* The Witherspoon expects a TMP275 but a TMP105 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), TYPE_TMP105, + 0x4a); + + /* The witherspoon board expects Epson RX8900 I2C RTC but a ds1338 is + * good enough */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "ds1338", 0x32); + + smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 11), 0x51, + eeprom_buf); + dev = DEVICE(i2c_slave_new(TYPE_PCA9552, 0x60)); + qdev_prop_set_string(dev, "description", "pca0"); + i2c_slave_realize_and_unref(I2C_SLAVE(dev), + aspeed_i2c_get_bus(&soc->i2c, 11), + &error_fatal); + /* Bus 11: TODO ucd90160@64 */ +} + +static void g220a_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + DeviceState *dev; + + dev = DEVICE(i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 3), + "emc1413", 0x4c)); + object_property_set_int(OBJECT(dev), "temperature0", 31000, &error_abort); + object_property_set_int(OBJECT(dev), "temperature1", 28000, &error_abort); + object_property_set_int(OBJECT(dev), "temperature2", 20000, &error_abort); + + dev = DEVICE(i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 12), + "emc1413", 0x4c)); + object_property_set_int(OBJECT(dev), "temperature0", 31000, &error_abort); + object_property_set_int(OBJECT(dev), "temperature1", 28000, &error_abort); + object_property_set_int(OBJECT(dev), "temperature2", 20000, &error_abort); + + dev = DEVICE(i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 13), + "emc1413", 0x4c)); + object_property_set_int(OBJECT(dev), "temperature0", 31000, &error_abort); + object_property_set_int(OBJECT(dev), "temperature1", 28000, &error_abort); + object_property_set_int(OBJECT(dev), "temperature2", 20000, &error_abort); + + static uint8_t eeprom_buf[2 * 1024] = { + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0xfe, + 0x01, 0x06, 0x00, 0xc9, 0x42, 0x79, 0x74, 0x65, + 0x64, 0x61, 0x6e, 0x63, 0x65, 0xc5, 0x47, 0x32, + 0x32, 0x30, 0x41, 0xc4, 0x41, 0x41, 0x42, 0x42, + 0xc4, 0x43, 0x43, 0x44, 0x44, 0xc4, 0x45, 0x45, + 0x46, 0x46, 0xc4, 0x48, 0x48, 0x47, 0x47, 0xc1, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7, + }; + smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 4), 0x57, + eeprom_buf); +} + +static void aspeed_eeprom_init(I2CBus *bus, uint8_t addr, uint32_t rsize) +{ + I2CSlave *i2c_dev = i2c_slave_new("at24c-eeprom", addr); + DeviceState *dev = DEVICE(i2c_dev); + + qdev_prop_set_uint32(dev, "rom-size", rsize); + i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort); +} + +static void fp5280g2_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + I2CSlave *i2c_mux; + + /* The at24c256 */ + at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 1), 0x50, 32768); + + /* The fp5280g2 expects a TMP112 but a TMP105 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), TYPE_TMP105, + 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), TYPE_TMP105, + 0x49); + + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), + "pca9546", 0x70); + /* It expects a TMP112 but a TMP105 is compatible */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 0), TYPE_TMP105, + 0x4a); + + /* It expects a ds3232 but a ds1338 is good enough */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), "ds1338", 0x68); + + /* It expects a pca9555 but a pca9552 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), TYPE_PCA9552, + 0x20); +} + +static void rainier_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + I2CSlave *i2c_mux; + + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 0), 0x51, 32 * KiB); + + /* The rainier expects a TMP275 but a TMP105 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), TYPE_TMP105, + 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), TYPE_TMP105, + 0x49); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), TYPE_TMP105, + 0x4a); + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), + "pca9546", 0x70); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 2), 0x52, 64 * KiB); + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), TYPE_TMP105, + 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), TYPE_TMP105, + 0x49); + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), + "pca9546", 0x70); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB); + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), TYPE_TMP105, + 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), TYPE_TMP105, + 0x4a); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), TYPE_TMP105, + 0x4b); + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), + "pca9546", 0x70); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 2), 0x50, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 3), 0x51, 64 * KiB); + + /* Bus 7: TODO max31785@52 */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "pca9552", 0x61); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "dps310", 0x76); + /* Bus 7: TODO si7021-a20@20 */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), TYPE_TMP105, + 0x48); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 7), 0x50, 64 * KiB); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 7), 0x51, 64 * KiB); + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), TYPE_TMP105, + 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), TYPE_TMP105, + 0x4a); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 8), 0x50, 64 * KiB); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 8), 0x51, 64 * KiB); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "pca9552", 0x61); + /* Bus 8: ucd90320@11 */ + /* Bus 8: ucd90320@b */ + /* Bus 8: ucd90320@c */ + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp423", 0x4c); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp423", 0x4d); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 9), 0x50, 128 * KiB); + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 10), "tmp423", 0x4c); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 10), "tmp423", 0x4d); + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 10), 0x50, 128 * KiB); + + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), TYPE_TMP105, + 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), TYPE_TMP105, + 0x49); + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), + "pca9546", 0x70); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB); + aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB); + + + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 13), 0x50, 64 * KiB); + + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 14), 0x50, 64 * KiB); + + aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 15), 0x50, 64 * KiB); +} + +static void get_pca9548_channels(I2CBus *bus, uint8_t mux_addr, + I2CBus **channels) +{ + I2CSlave *mux = i2c_slave_create_simple(bus, "pca9548", mux_addr); + for (int i = 0; i < 8; i++) { + channels[i] = pca954x_i2c_get_bus(mux, i); + } +} + +#define TYPE_LM75 TYPE_TMP105 +#define TYPE_TMP75 TYPE_TMP105 +#define TYPE_TMP422 "tmp422" + +static void fuji_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + I2CBus *i2c[144] = {}; + + for (int i = 0; i < 16; i++) { + i2c[i] = aspeed_i2c_get_bus(&soc->i2c, i); + } + I2CBus *i2c180 = i2c[2]; + I2CBus *i2c480 = i2c[8]; + I2CBus *i2c600 = i2c[11]; + + get_pca9548_channels(i2c180, 0x70, &i2c[16]); + get_pca9548_channels(i2c480, 0x70, &i2c[24]); + /* NOTE: The device tree skips [32, 40) in the alias numbering */ + get_pca9548_channels(i2c600, 0x77, &i2c[40]); + get_pca9548_channels(i2c[24], 0x71, &i2c[48]); + get_pca9548_channels(i2c[25], 0x72, &i2c[56]); + get_pca9548_channels(i2c[26], 0x76, &i2c[64]); + get_pca9548_channels(i2c[27], 0x76, &i2c[72]); + for (int i = 0; i < 8; i++) { + get_pca9548_channels(i2c[40 + i], 0x76, &i2c[80 + i * 8]); + } + + i2c_slave_create_simple(i2c[17], TYPE_LM75, 0x4c); + i2c_slave_create_simple(i2c[17], TYPE_LM75, 0x4d); + + aspeed_eeprom_init(i2c[19], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[20], 0x50, 2 * KiB); + aspeed_eeprom_init(i2c[22], 0x52, 2 * KiB); + + i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x48); + i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x49); + i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x4a); + i2c_slave_create_simple(i2c[3], TYPE_TMP422, 0x4c); + + aspeed_eeprom_init(i2c[8], 0x51, 64 * KiB); + i2c_slave_create_simple(i2c[8], TYPE_LM75, 0x4a); + + i2c_slave_create_simple(i2c[50], TYPE_LM75, 0x4c); + aspeed_eeprom_init(i2c[50], 0x52, 64 * KiB); + i2c_slave_create_simple(i2c[51], TYPE_TMP75, 0x48); + i2c_slave_create_simple(i2c[52], TYPE_TMP75, 0x49); + + i2c_slave_create_simple(i2c[59], TYPE_TMP75, 0x48); + i2c_slave_create_simple(i2c[60], TYPE_TMP75, 0x49); + + aspeed_eeprom_init(i2c[65], 0x53, 64 * KiB); + i2c_slave_create_simple(i2c[66], TYPE_TMP75, 0x49); + i2c_slave_create_simple(i2c[66], TYPE_TMP75, 0x48); + aspeed_eeprom_init(i2c[68], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[69], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[70], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[71], 0x52, 64 * KiB); + + aspeed_eeprom_init(i2c[73], 0x53, 64 * KiB); + i2c_slave_create_simple(i2c[74], TYPE_TMP75, 0x49); + i2c_slave_create_simple(i2c[74], TYPE_TMP75, 0x48); + aspeed_eeprom_init(i2c[76], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[77], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[78], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[79], 0x52, 64 * KiB); + aspeed_eeprom_init(i2c[28], 0x50, 2 * KiB); + + for (int i = 0; i < 8; i++) { + aspeed_eeprom_init(i2c[81 + i * 8], 0x56, 64 * KiB); + i2c_slave_create_simple(i2c[82 + i * 8], TYPE_TMP75, 0x48); + i2c_slave_create_simple(i2c[83 + i * 8], TYPE_TMP75, 0x4b); + i2c_slave_create_simple(i2c[84 + i * 8], TYPE_TMP75, 0x4a); + } +} + +static bool aspeed_get_mmio_exec(Object *obj, Error **errp) +{ + return ASPEED_MACHINE(obj)->mmio_exec; +} + +static void aspeed_set_mmio_exec(Object *obj, bool value, Error **errp) +{ + ASPEED_MACHINE(obj)->mmio_exec = value; +} + +static void aspeed_machine_instance_init(Object *obj) +{ + ASPEED_MACHINE(obj)->mmio_exec = false; +} + +static char *aspeed_get_fmc_model(Object *obj, Error **errp) +{ + AspeedMachineState *bmc = ASPEED_MACHINE(obj); + return g_strdup(bmc->fmc_model); +} + +static void aspeed_set_fmc_model(Object *obj, const char *value, Error **errp) +{ + AspeedMachineState *bmc = ASPEED_MACHINE(obj); + + g_free(bmc->fmc_model); + bmc->fmc_model = g_strdup(value); +} + +static char *aspeed_get_spi_model(Object *obj, Error **errp) +{ + AspeedMachineState *bmc = ASPEED_MACHINE(obj); + return g_strdup(bmc->spi_model); +} + +static void aspeed_set_spi_model(Object *obj, const char *value, Error **errp) +{ + AspeedMachineState *bmc = ASPEED_MACHINE(obj); + + g_free(bmc->spi_model); + bmc->spi_model = g_strdup(value); +} + +static void aspeed_machine_class_props_init(ObjectClass *oc) +{ + object_class_property_add_bool(oc, "execute-in-place", + aspeed_get_mmio_exec, + aspeed_set_mmio_exec); + object_class_property_set_description(oc, "execute-in-place", + "boot directly from CE0 flash device"); + + object_class_property_add_str(oc, "fmc-model", aspeed_get_fmc_model, + aspeed_set_fmc_model); + object_class_property_set_description(oc, "fmc-model", + "Change the FMC Flash model"); + object_class_property_add_str(oc, "spi-model", aspeed_get_spi_model, + aspeed_set_spi_model); + object_class_property_set_description(oc, "spi-model", + "Change the SPI Flash model"); +} + +static int aspeed_soc_num_cpus(const char *soc_name) +{ + AspeedSoCClass *sc = ASPEED_SOC_CLASS(object_class_by_name(soc_name)); + return sc->num_cpus; +} + +static void aspeed_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->init = aspeed_machine_init; + mc->no_floppy = 1; + mc->no_cdrom = 1; + mc->no_parallel = 1; + mc->default_ram_id = "ram"; + amc->macs_mask = ASPEED_MAC0_ON; + amc->uart_default = ASPEED_DEV_UART5; + + aspeed_machine_class_props_init(oc); +} + +static void aspeed_machine_palmetto_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "OpenPOWER Palmetto BMC (ARM926EJ-S)"; + amc->soc_name = "ast2400-a1"; + amc->hw_strap1 = PALMETTO_BMC_HW_STRAP1; + amc->fmc_model = "n25q256a"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 1; + amc->i2c_init = palmetto_bmc_i2c_init; + mc->default_ram_size = 256 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static void aspeed_machine_quanta_q71l_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Quanta-Q71l BMC (ARM926EJ-S)"; + amc->soc_name = "ast2400-a1"; + amc->hw_strap1 = QUANTA_Q71L_BMC_HW_STRAP1; + amc->fmc_model = "n25q256a"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 1; + amc->i2c_init = quanta_q71l_bmc_i2c_init; + mc->default_ram_size = 128 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +} + +static void aspeed_machine_supermicrox11_bmc_class_init(ObjectClass *oc, + void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Supermicro X11 BMC (ARM926EJ-S)"; + amc->soc_name = "ast2400-a1"; + amc->hw_strap1 = SUPERMICROX11_BMC_HW_STRAP1; + amc->fmc_model = "mx25l25635e"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 1; + amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; + amc->i2c_init = palmetto_bmc_i2c_init; + mc->default_ram_size = 256 * MiB; +} + +static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Aspeed AST2500 EVB (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = AST2500_EVB_HW_STRAP1; + amc->fmc_model = "w25q256"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 1; + amc->i2c_init = ast2500_evb_i2c_init; + mc->default_ram_size = 512 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "OpenPOWER Romulus BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = ROMULUS_BMC_HW_STRAP1; + amc->fmc_model = "n25q256a"; + amc->spi_model = "mx66l1g45g"; + amc->num_cs = 2; + amc->i2c_init = romulus_bmc_i2c_init; + mc->default_ram_size = 512 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "OCP SonoraPass BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = SONORAPASS_BMC_HW_STRAP1; + amc->fmc_model = "mx66l1g45g"; + amc->spi_model = "mx66l1g45g"; + amc->num_cs = 2; + amc->i2c_init = sonorapass_bmc_i2c_init; + mc->default_ram_size = 512 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static void aspeed_machine_swift_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "OpenPOWER Swift BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = SWIFT_BMC_HW_STRAP1; + amc->fmc_model = "mx66l1g45g"; + amc->spi_model = "mx66l1g45g"; + amc->num_cs = 2; + amc->i2c_init = swift_bmc_i2c_init; + mc->default_ram_size = 512 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); + + mc->deprecation_reason = "redundant system. Please use a similar " + "OpenPOWER BMC, Witherspoon or Romulus."; +}; + +static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "OpenPOWER Witherspoon BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = WITHERSPOON_BMC_HW_STRAP1; + amc->fmc_model = "mx25l25635e"; + amc->spi_model = "mx66l1g45g"; + amc->num_cs = 2; + amc->i2c_init = witherspoon_bmc_i2c_init; + mc->default_ram_size = 512 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Aspeed AST2600 EVB (Cortex-A7)"; + amc->soc_name = "ast2600-a3"; + amc->hw_strap1 = AST2600_EVB_HW_STRAP1; + amc->hw_strap2 = AST2600_EVB_HW_STRAP2; + amc->fmc_model = "w25q512jv"; + amc->spi_model = "mx66u51235f"; + amc->num_cs = 1; + amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON | ASPEED_MAC2_ON | + ASPEED_MAC3_ON; + amc->i2c_init = ast2600_evb_i2c_init; + mc->default_ram_size = 1 * GiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static void aspeed_machine_tacoma_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "OpenPOWER Tacoma BMC (Cortex-A7)"; + amc->soc_name = "ast2600-a3"; + amc->hw_strap1 = TACOMA_BMC_HW_STRAP1; + amc->hw_strap2 = TACOMA_BMC_HW_STRAP2; + amc->fmc_model = "mx66l1g45g"; + amc->spi_model = "mx66l1g45g"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC2_ON; + amc->i2c_init = witherspoon_bmc_i2c_init; /* Same board layout */ + mc->default_ram_size = 1 * GiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static void aspeed_machine_g220a_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Bytedance G220A BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = G220A_BMC_HW_STRAP1; + amc->fmc_model = "n25q512a"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; + amc->i2c_init = g220a_bmc_i2c_init; + mc->default_ram_size = 1024 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Inspur FP5280G2 BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = FP5280G2_BMC_HW_STRAP1; + amc->fmc_model = "n25q512a"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; + amc->i2c_init = fp5280g2_bmc_i2c_init; + mc->default_ram_size = 512 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "IBM Rainier BMC (Cortex-A7)"; + amc->soc_name = "ast2600-a3"; + amc->hw_strap1 = RAINIER_BMC_HW_STRAP1; + amc->hw_strap2 = RAINIER_BMC_HW_STRAP2; + amc->fmc_model = "mx66l1g45g"; + amc->spi_model = "mx66l1g45g"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; + amc->i2c_init = rainier_bmc_i2c_init; + mc->default_ram_size = 1 * GiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +/* On 32-bit hosts, lower RAM to 1G because of the 2047 MB limit */ +#if HOST_LONG_BITS == 32 +#define FUJI_BMC_RAM_SIZE (1 * GiB) +#else +#define FUJI_BMC_RAM_SIZE (2 * GiB) +#endif + +static void aspeed_machine_fuji_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Facebook Fuji BMC (Cortex-A7)"; + amc->soc_name = "ast2600-a3"; + amc->hw_strap1 = FUJI_BMC_HW_STRAP1; + amc->hw_strap2 = FUJI_BMC_HW_STRAP2; + amc->fmc_model = "mx66l1g45g"; + amc->spi_model = "mx66l1g45g"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC3_ON; + amc->i2c_init = fuji_bmc_i2c_init; + amc->uart_default = ASPEED_DEV_UART1; + mc->default_ram_size = FUJI_BMC_RAM_SIZE; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + +static const TypeInfo aspeed_machine_types[] = { + { + .name = MACHINE_TYPE_NAME("palmetto-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_palmetto_class_init, + }, { + .name = MACHINE_TYPE_NAME("supermicrox11-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_supermicrox11_bmc_class_init, + }, { + .name = MACHINE_TYPE_NAME("ast2500-evb"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_ast2500_evb_class_init, + }, { + .name = MACHINE_TYPE_NAME("romulus-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_romulus_class_init, + }, { + .name = MACHINE_TYPE_NAME("swift-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_swift_class_init, + }, { + .name = MACHINE_TYPE_NAME("sonorapass-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_sonorapass_class_init, + }, { + .name = MACHINE_TYPE_NAME("witherspoon-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_witherspoon_class_init, + }, { + .name = MACHINE_TYPE_NAME("ast2600-evb"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_ast2600_evb_class_init, + }, { + .name = MACHINE_TYPE_NAME("tacoma-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_tacoma_class_init, + }, { + .name = MACHINE_TYPE_NAME("g220a-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_g220a_class_init, + }, { + .name = MACHINE_TYPE_NAME("fp5280g2-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_fp5280g2_class_init, + }, { + .name = MACHINE_TYPE_NAME("quanta-q71l-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_quanta_q71l_class_init, + }, { + .name = MACHINE_TYPE_NAME("rainier-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_rainier_class_init, + }, { + .name = MACHINE_TYPE_NAME("fuji-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_fuji_class_init, + }, { + .name = TYPE_ASPEED_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(AspeedMachineState), + .instance_init = aspeed_machine_instance_init, + .class_size = sizeof(AspeedMachineClass), + .class_init = aspeed_machine_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(aspeed_machine_types) diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c new file mode 100644 index 000000000..0384357a9 --- /dev/null +++ b/hw/arm/aspeed_ast2600.c @@ -0,0 +1,554 @@ +/* + * ASPEED SoC 2600 family + * + * Copyright (c) 2016-2019, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/misc/unimp.h" +#include "hw/arm/aspeed_soc.h" +#include "hw/char/serial.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "hw/i2c/aspeed_i2c.h" +#include "net/net.h" +#include "sysemu/sysemu.h" + +#define ASPEED_SOC_IOMEM_SIZE 0x00200000 + +static const hwaddr aspeed_soc_ast2600_memmap[] = { + [ASPEED_DEV_SRAM] = 0x10000000, + /* 0x16000000 0x17FFFFFF : AHB BUS do LPC Bus bridge */ + [ASPEED_DEV_IOMEM] = 0x1E600000, + [ASPEED_DEV_PWM] = 0x1E610000, + [ASPEED_DEV_FMC] = 0x1E620000, + [ASPEED_DEV_SPI1] = 0x1E630000, + [ASPEED_DEV_SPI2] = 0x1E641000, + [ASPEED_DEV_EHCI1] = 0x1E6A1000, + [ASPEED_DEV_EHCI2] = 0x1E6A3000, + [ASPEED_DEV_MII1] = 0x1E650000, + [ASPEED_DEV_MII2] = 0x1E650008, + [ASPEED_DEV_MII3] = 0x1E650010, + [ASPEED_DEV_MII4] = 0x1E650018, + [ASPEED_DEV_ETH1] = 0x1E660000, + [ASPEED_DEV_ETH3] = 0x1E670000, + [ASPEED_DEV_ETH2] = 0x1E680000, + [ASPEED_DEV_ETH4] = 0x1E690000, + [ASPEED_DEV_VIC] = 0x1E6C0000, + [ASPEED_DEV_HACE] = 0x1E6D0000, + [ASPEED_DEV_SDMC] = 0x1E6E0000, + [ASPEED_DEV_SCU] = 0x1E6E2000, + [ASPEED_DEV_XDMA] = 0x1E6E7000, + [ASPEED_DEV_ADC] = 0x1E6E9000, + [ASPEED_DEV_VIDEO] = 0x1E700000, + [ASPEED_DEV_SDHCI] = 0x1E740000, + [ASPEED_DEV_EMMC] = 0x1E750000, + [ASPEED_DEV_GPIO] = 0x1E780000, + [ASPEED_DEV_GPIO_1_8V] = 0x1E780800, + [ASPEED_DEV_RTC] = 0x1E781000, + [ASPEED_DEV_TIMER1] = 0x1E782000, + [ASPEED_DEV_WDT] = 0x1E785000, + [ASPEED_DEV_LPC] = 0x1E789000, + [ASPEED_DEV_IBT] = 0x1E789140, + [ASPEED_DEV_I2C] = 0x1E78A000, + [ASPEED_DEV_UART1] = 0x1E783000, + [ASPEED_DEV_UART5] = 0x1E784000, + [ASPEED_DEV_VUART] = 0x1E787000, + [ASPEED_DEV_SDRAM] = 0x80000000, +}; + +#define ASPEED_A7MPCORE_ADDR 0x40460000 + +#define AST2600_MAX_IRQ 197 + +/* Shared Peripheral Interrupt values below are offset by -32 from datasheet */ +static const int aspeed_soc_ast2600_irqmap[] = { + [ASPEED_DEV_UART1] = 47, + [ASPEED_DEV_UART2] = 48, + [ASPEED_DEV_UART3] = 49, + [ASPEED_DEV_UART4] = 50, + [ASPEED_DEV_UART5] = 8, + [ASPEED_DEV_VUART] = 8, + [ASPEED_DEV_FMC] = 39, + [ASPEED_DEV_SDMC] = 0, + [ASPEED_DEV_SCU] = 12, + [ASPEED_DEV_ADC] = 78, + [ASPEED_DEV_XDMA] = 6, + [ASPEED_DEV_SDHCI] = 43, + [ASPEED_DEV_EHCI1] = 5, + [ASPEED_DEV_EHCI2] = 9, + [ASPEED_DEV_EMMC] = 15, + [ASPEED_DEV_GPIO] = 40, + [ASPEED_DEV_GPIO_1_8V] = 11, + [ASPEED_DEV_RTC] = 13, + [ASPEED_DEV_TIMER1] = 16, + [ASPEED_DEV_TIMER2] = 17, + [ASPEED_DEV_TIMER3] = 18, + [ASPEED_DEV_TIMER4] = 19, + [ASPEED_DEV_TIMER5] = 20, + [ASPEED_DEV_TIMER6] = 21, + [ASPEED_DEV_TIMER7] = 22, + [ASPEED_DEV_TIMER8] = 23, + [ASPEED_DEV_WDT] = 24, + [ASPEED_DEV_PWM] = 44, + [ASPEED_DEV_LPC] = 35, + [ASPEED_DEV_IBT] = 143, + [ASPEED_DEV_I2C] = 110, /* 110 -> 125 */ + [ASPEED_DEV_ETH1] = 2, + [ASPEED_DEV_ETH2] = 3, + [ASPEED_DEV_HACE] = 4, + [ASPEED_DEV_ETH3] = 32, + [ASPEED_DEV_ETH4] = 33, + [ASPEED_DEV_KCS] = 138, /* 138 -> 142 */ +}; + +static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + + return qdev_get_gpio_in(DEVICE(&s->a7mpcore), sc->irqmap[ctrl]); +} + +static void aspeed_soc_ast2600_init(Object *obj) +{ + AspeedSoCState *s = ASPEED_SOC(obj); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + int i; + char socname[8]; + char typename[64]; + + if (sscanf(sc->name, "%7s", socname) != 1) { + g_assert_not_reached(); + } + + for (i = 0; i < sc->num_cpus; i++) { + object_initialize_child(obj, "cpu[*]", &s->cpu[i], sc->cpu_type); + } + + snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname); + object_initialize_child(obj, "scu", &s->scu, typename); + qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev", + sc->silicon_rev); + object_property_add_alias(obj, "hw-strap1", OBJECT(&s->scu), + "hw-strap1"); + object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu), + "hw-strap2"); + object_property_add_alias(obj, "hw-prot-key", OBJECT(&s->scu), + "hw-prot-key"); + + object_initialize_child(obj, "a7mpcore", &s->a7mpcore, + TYPE_A15MPCORE_PRIV); + + object_initialize_child(obj, "rtc", &s->rtc, TYPE_ASPEED_RTC); + + snprintf(typename, sizeof(typename), "aspeed.timer-%s", socname); + object_initialize_child(obj, "timerctrl", &s->timerctrl, typename); + + snprintf(typename, sizeof(typename), "aspeed.adc-%s", socname); + object_initialize_child(obj, "adc", &s->adc, typename); + + snprintf(typename, sizeof(typename), "aspeed.i2c-%s", socname); + object_initialize_child(obj, "i2c", &s->i2c, typename); + + snprintf(typename, sizeof(typename), "aspeed.fmc-%s", socname); + object_initialize_child(obj, "fmc", &s->fmc, typename); + object_property_add_alias(obj, "num-cs", OBJECT(&s->fmc), "num-cs"); + + for (i = 0; i < sc->spis_num; i++) { + snprintf(typename, sizeof(typename), "aspeed.spi%d-%s", i + 1, socname); + object_initialize_child(obj, "spi[*]", &s->spi[i], typename); + } + + for (i = 0; i < sc->ehcis_num; i++) { + object_initialize_child(obj, "ehci[*]", &s->ehci[i], + TYPE_PLATFORM_EHCI); + } + + snprintf(typename, sizeof(typename), "aspeed.sdmc-%s", socname); + object_initialize_child(obj, "sdmc", &s->sdmc, typename); + object_property_add_alias(obj, "ram-size", OBJECT(&s->sdmc), + "ram-size"); + object_property_add_alias(obj, "max-ram-size", OBJECT(&s->sdmc), + "max-ram-size"); + + for (i = 0; i < sc->wdts_num; i++) { + snprintf(typename, sizeof(typename), "aspeed.wdt-%s", socname); + object_initialize_child(obj, "wdt[*]", &s->wdt[i], typename); + } + + for (i = 0; i < sc->macs_num; i++) { + object_initialize_child(obj, "ftgmac100[*]", &s->ftgmac100[i], + TYPE_FTGMAC100); + + object_initialize_child(obj, "mii[*]", &s->mii[i], TYPE_ASPEED_MII); + } + + snprintf(typename, sizeof(typename), TYPE_ASPEED_XDMA "-%s", socname); + object_initialize_child(obj, "xdma", &s->xdma, typename); + + snprintf(typename, sizeof(typename), "aspeed.gpio-%s", socname); + object_initialize_child(obj, "gpio", &s->gpio, typename); + + snprintf(typename, sizeof(typename), "aspeed.gpio-%s-1_8v", socname); + object_initialize_child(obj, "gpio_1_8v", &s->gpio_1_8v, typename); + + object_initialize_child(obj, "sd-controller", &s->sdhci, + TYPE_ASPEED_SDHCI); + + object_property_set_int(OBJECT(&s->sdhci), "num-slots", 2, &error_abort); + + /* Init sd card slot class here so that they're under the correct parent */ + for (i = 0; i < ASPEED_SDHCI_NUM_SLOTS; ++i) { + object_initialize_child(obj, "sd-controller.sdhci[*]", + &s->sdhci.slots[i], TYPE_SYSBUS_SDHCI); + } + + object_initialize_child(obj, "emmc-controller", &s->emmc, + TYPE_ASPEED_SDHCI); + + object_property_set_int(OBJECT(&s->emmc), "num-slots", 1, &error_abort); + + object_initialize_child(obj, "emmc-controller.sdhci", &s->emmc.slots[0], + TYPE_SYSBUS_SDHCI); + + object_initialize_child(obj, "lpc", &s->lpc, TYPE_ASPEED_LPC); + + snprintf(typename, sizeof(typename), "aspeed.hace-%s", socname); + object_initialize_child(obj, "hace", &s->hace, typename); +} + +/* + * ASPEED ast2600 has 0xf as cluster ID + * + * https://developer.arm.com/documentation/ddi0388/e/the-system-control-coprocessors/summary-of-system-control-coprocessor-registers/multiprocessor-affinity-register + */ +static uint64_t aspeed_calc_affinity(int cpu) +{ + return (0xf << ARM_AFF1_SHIFT) | cpu; +} + +static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) +{ + int i; + AspeedSoCState *s = ASPEED_SOC(dev); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + Error *err = NULL; + qemu_irq irq; + + /* IO space */ + create_unimplemented_device("aspeed_soc.io", sc->memmap[ASPEED_DEV_IOMEM], + ASPEED_SOC_IOMEM_SIZE); + + /* Video engine stub */ + create_unimplemented_device("aspeed.video", sc->memmap[ASPEED_DEV_VIDEO], + 0x1000); + + /* CPU */ + for (i = 0; i < sc->num_cpus; i++) { + if (sc->num_cpus > 1) { + object_property_set_int(OBJECT(&s->cpu[i]), "reset-cbar", + ASPEED_A7MPCORE_ADDR, &error_abort); + } + object_property_set_int(OBJECT(&s->cpu[i]), "mp-affinity", + aspeed_calc_affinity(i), &error_abort); + + object_property_set_int(OBJECT(&s->cpu[i]), "cntfrq", 1125000000, + &error_abort); + + if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { + return; + } + } + + /* A7MPCORE */ + object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", sc->num_cpus, + &error_abort); + object_property_set_int(OBJECT(&s->a7mpcore), "num-irq", + ROUND_UP(AST2600_MAX_IRQ + GIC_INTERNAL, 32), + &error_abort); + + sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, ASPEED_A7MPCORE_ADDR); + + for (i = 0; i < sc->num_cpus; i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->a7mpcore); + DeviceState *d = DEVICE(qemu_get_cpu(i)); + + irq = qdev_get_gpio_in(d, ARM_CPU_IRQ); + sysbus_connect_irq(sbd, i, irq); + irq = qdev_get_gpio_in(d, ARM_CPU_FIQ); + sysbus_connect_irq(sbd, i + sc->num_cpus, irq); + irq = qdev_get_gpio_in(d, ARM_CPU_VIRQ); + sysbus_connect_irq(sbd, i + 2 * sc->num_cpus, irq); + irq = qdev_get_gpio_in(d, ARM_CPU_VFIQ); + sysbus_connect_irq(sbd, i + 3 * sc->num_cpus, irq); + } + + /* SRAM */ + memory_region_init_ram(&s->sram, OBJECT(dev), "aspeed.sram", + sc->sram_size, &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), + sc->memmap[ASPEED_DEV_SRAM], &s->sram); + + /* SCU */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + + /* RTC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_RTC)); + + /* Timer */ + object_property_set_link(OBJECT(&s->timerctrl), "scu", OBJECT(&s->scu), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0, + sc->memmap[ASPEED_DEV_TIMER1]); + for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { + qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); + } + + /* ADC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + + /* UART - attach an 8250 to the IO space as our UART */ + serial_mm_init(get_system_memory(), sc->memmap[s->uart_default], 2, + aspeed_soc_get_irq(s, s->uart_default), 38400, + serial_hd(0), DEVICE_LITTLE_ENDIAN); + + /* I2C */ + object_property_set_link(OBJECT(&s->i2c), "dram", OBJECT(s->dram_mr), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); + for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { + qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), + sc->irqmap[ASPEED_DEV_I2C] + i); + /* The AST2600 I2C controller has one IRQ per bus. */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq); + } + + /* FMC, The number of CS is set at the board level */ + object_property_set_link(OBJECT(&s->fmc), "dram", OBJECT(s->dram_mr), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->fmc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1, + ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + + /* SPI */ + for (i = 0; i < sc->spis_num; i++) { + object_property_set_link(OBJECT(&s->spi[i]), "dram", + OBJECT(s->dram_mr), &error_abort); + object_property_set_int(OBJECT(&s->spi[i]), "num-cs", 1, &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, + sc->memmap[ASPEED_DEV_SPI1 + i]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1, + ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); + } + + /* EHCI */ + for (i = 0; i < sc->ehcis_num; i++) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, + sc->memmap[ASPEED_DEV_EHCI1 + i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_EHCI1 + i)); + } + + /* SDMC - SDRAM Memory Controller */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdmc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, sc->memmap[ASPEED_DEV_SDMC]); + + /* Watch dog */ + for (i = 0; i < sc->wdts_num; i++) { + AspeedWDTClass *awc = ASPEED_WDT_GET_CLASS(&s->wdt[i]); + + object_property_set_link(OBJECT(&s->wdt[i]), "scu", OBJECT(&s->scu), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, + sc->memmap[ASPEED_DEV_WDT] + i * awc->offset); + } + + /* Net */ + for (i = 0; i < sc->macs_num; i++) { + object_property_set_bool(OBJECT(&s->ftgmac100[i]), "aspeed", true, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ftgmac100[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + sc->memmap[ASPEED_DEV_ETH1 + i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_ETH1 + i)); + + object_property_set_link(OBJECT(&s->mii[i]), "nic", + OBJECT(&s->ftgmac100[i]), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mii[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mii[i]), 0, + sc->memmap[ASPEED_DEV_MII1 + i]); + } + + /* XDMA */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->xdma), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->xdma), 0, + sc->memmap[ASPEED_DEV_XDMA]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->xdma), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_XDMA)); + + /* GPIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio_1_8v), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio_1_8v), 0, + sc->memmap[ASPEED_DEV_GPIO_1_8V]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio_1_8v), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_GPIO_1_8V)); + + /* SDHCI */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdhci), 0, + sc->memmap[ASPEED_DEV_SDHCI]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI)); + + /* eMMC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->emmc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->emmc), 0, sc->memmap[ASPEED_DEV_EMMC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->emmc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_EMMC)); + + /* LPC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->lpc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]); + + /* Connect the LPC IRQ to the GIC. It is otherwise unused. */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_LPC)); + + /* + * On the AST2600 LPC subdevice IRQs are connected straight to the GIC. + * + * LPC subdevice IRQ sources are offset from 1 because the LPC model caters + * to the AST2400 and AST2500. SoCs before the AST2600 have one LPC IRQ + * shared across the subdevices, and the shared IRQ output to the VIC is at + * offset 0. + */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_1, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_1)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_2, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_2)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_3, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_3)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_4, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_4)); + + /* HACE */ + object_property_set_link(OBJECT(&s->hace), "dram", OBJECT(s->dram_mr), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->hace), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->hace), 0, sc->memmap[ASPEED_DEV_HACE]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); +} + +static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); + + dc->realize = aspeed_soc_ast2600_realize; + + sc->name = "ast2600-a3"; + sc->cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); + sc->silicon_rev = AST2600_A3_SILICON_REV; + sc->sram_size = 0x16400; + sc->spis_num = 2; + sc->ehcis_num = 2; + sc->wdts_num = 4; + sc->macs_num = 4; + sc->irqmap = aspeed_soc_ast2600_irqmap; + sc->memmap = aspeed_soc_ast2600_memmap; + sc->num_cpus = 2; +} + +static const TypeInfo aspeed_soc_ast2600_type_info = { + .name = "ast2600-a3", + .parent = TYPE_ASPEED_SOC, + .instance_size = sizeof(AspeedSoCState), + .instance_init = aspeed_soc_ast2600_init, + .class_init = aspeed_soc_ast2600_class_init, + .class_size = sizeof(AspeedSoCClass), +}; + +static void aspeed_soc_register_types(void) +{ + type_register_static(&aspeed_soc_ast2600_type_info); +}; + +type_init(aspeed_soc_register_types) diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c new file mode 100644 index 000000000..7d53cf2f5 --- /dev/null +++ b/hw/arm/aspeed_soc.c @@ -0,0 +1,533 @@ +/* + * ASPEED SoC family + * + * Andrew Jeffery + * Jeremy Kerr + * + * Copyright 2016 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/misc/unimp.h" +#include "hw/arm/aspeed_soc.h" +#include "hw/char/serial.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "hw/i2c/aspeed_i2c.h" +#include "net/net.h" +#include "sysemu/sysemu.h" + +#define ASPEED_SOC_IOMEM_SIZE 0x00200000 + +static const hwaddr aspeed_soc_ast2400_memmap[] = { + [ASPEED_DEV_IOMEM] = 0x1E600000, + [ASPEED_DEV_FMC] = 0x1E620000, + [ASPEED_DEV_SPI1] = 0x1E630000, + [ASPEED_DEV_EHCI1] = 0x1E6A1000, + [ASPEED_DEV_VIC] = 0x1E6C0000, + [ASPEED_DEV_SDMC] = 0x1E6E0000, + [ASPEED_DEV_SCU] = 0x1E6E2000, + [ASPEED_DEV_HACE] = 0x1E6E3000, + [ASPEED_DEV_XDMA] = 0x1E6E7000, + [ASPEED_DEV_VIDEO] = 0x1E700000, + [ASPEED_DEV_ADC] = 0x1E6E9000, + [ASPEED_DEV_SRAM] = 0x1E720000, + [ASPEED_DEV_SDHCI] = 0x1E740000, + [ASPEED_DEV_GPIO] = 0x1E780000, + [ASPEED_DEV_RTC] = 0x1E781000, + [ASPEED_DEV_TIMER1] = 0x1E782000, + [ASPEED_DEV_WDT] = 0x1E785000, + [ASPEED_DEV_PWM] = 0x1E786000, + [ASPEED_DEV_LPC] = 0x1E789000, + [ASPEED_DEV_IBT] = 0x1E789140, + [ASPEED_DEV_I2C] = 0x1E78A000, + [ASPEED_DEV_ETH1] = 0x1E660000, + [ASPEED_DEV_ETH2] = 0x1E680000, + [ASPEED_DEV_UART1] = 0x1E783000, + [ASPEED_DEV_UART5] = 0x1E784000, + [ASPEED_DEV_VUART] = 0x1E787000, + [ASPEED_DEV_SDRAM] = 0x40000000, +}; + +static const hwaddr aspeed_soc_ast2500_memmap[] = { + [ASPEED_DEV_IOMEM] = 0x1E600000, + [ASPEED_DEV_FMC] = 0x1E620000, + [ASPEED_DEV_SPI1] = 0x1E630000, + [ASPEED_DEV_SPI2] = 0x1E631000, + [ASPEED_DEV_EHCI1] = 0x1E6A1000, + [ASPEED_DEV_EHCI2] = 0x1E6A3000, + [ASPEED_DEV_VIC] = 0x1E6C0000, + [ASPEED_DEV_SDMC] = 0x1E6E0000, + [ASPEED_DEV_SCU] = 0x1E6E2000, + [ASPEED_DEV_HACE] = 0x1E6E3000, + [ASPEED_DEV_XDMA] = 0x1E6E7000, + [ASPEED_DEV_ADC] = 0x1E6E9000, + [ASPEED_DEV_VIDEO] = 0x1E700000, + [ASPEED_DEV_SRAM] = 0x1E720000, + [ASPEED_DEV_SDHCI] = 0x1E740000, + [ASPEED_DEV_GPIO] = 0x1E780000, + [ASPEED_DEV_RTC] = 0x1E781000, + [ASPEED_DEV_TIMER1] = 0x1E782000, + [ASPEED_DEV_WDT] = 0x1E785000, + [ASPEED_DEV_PWM] = 0x1E786000, + [ASPEED_DEV_LPC] = 0x1E789000, + [ASPEED_DEV_IBT] = 0x1E789140, + [ASPEED_DEV_I2C] = 0x1E78A000, + [ASPEED_DEV_ETH1] = 0x1E660000, + [ASPEED_DEV_ETH2] = 0x1E680000, + [ASPEED_DEV_UART1] = 0x1E783000, + [ASPEED_DEV_UART5] = 0x1E784000, + [ASPEED_DEV_VUART] = 0x1E787000, + [ASPEED_DEV_SDRAM] = 0x80000000, +}; + +static const int aspeed_soc_ast2400_irqmap[] = { + [ASPEED_DEV_UART1] = 9, + [ASPEED_DEV_UART2] = 32, + [ASPEED_DEV_UART3] = 33, + [ASPEED_DEV_UART4] = 34, + [ASPEED_DEV_UART5] = 10, + [ASPEED_DEV_VUART] = 8, + [ASPEED_DEV_FMC] = 19, + [ASPEED_DEV_EHCI1] = 5, + [ASPEED_DEV_EHCI2] = 13, + [ASPEED_DEV_SDMC] = 0, + [ASPEED_DEV_SCU] = 21, + [ASPEED_DEV_ADC] = 31, + [ASPEED_DEV_GPIO] = 20, + [ASPEED_DEV_RTC] = 22, + [ASPEED_DEV_TIMER1] = 16, + [ASPEED_DEV_TIMER2] = 17, + [ASPEED_DEV_TIMER3] = 18, + [ASPEED_DEV_TIMER4] = 35, + [ASPEED_DEV_TIMER5] = 36, + [ASPEED_DEV_TIMER6] = 37, + [ASPEED_DEV_TIMER7] = 38, + [ASPEED_DEV_TIMER8] = 39, + [ASPEED_DEV_WDT] = 27, + [ASPEED_DEV_PWM] = 28, + [ASPEED_DEV_LPC] = 8, + [ASPEED_DEV_I2C] = 12, + [ASPEED_DEV_ETH1] = 2, + [ASPEED_DEV_ETH2] = 3, + [ASPEED_DEV_XDMA] = 6, + [ASPEED_DEV_SDHCI] = 26, + [ASPEED_DEV_HACE] = 4, +}; + +#define aspeed_soc_ast2500_irqmap aspeed_soc_ast2400_irqmap + +static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + + return qdev_get_gpio_in(DEVICE(&s->vic), sc->irqmap[ctrl]); +} + +static void aspeed_soc_init(Object *obj) +{ + AspeedSoCState *s = ASPEED_SOC(obj); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + int i; + char socname[8]; + char typename[64]; + + if (sscanf(sc->name, "%7s", socname) != 1) { + g_assert_not_reached(); + } + + for (i = 0; i < sc->num_cpus; i++) { + object_initialize_child(obj, "cpu[*]", &s->cpu[i], sc->cpu_type); + } + + snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname); + object_initialize_child(obj, "scu", &s->scu, typename); + qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev", + sc->silicon_rev); + object_property_add_alias(obj, "hw-strap1", OBJECT(&s->scu), + "hw-strap1"); + object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu), + "hw-strap2"); + object_property_add_alias(obj, "hw-prot-key", OBJECT(&s->scu), + "hw-prot-key"); + + object_initialize_child(obj, "vic", &s->vic, TYPE_ASPEED_VIC); + + object_initialize_child(obj, "rtc", &s->rtc, TYPE_ASPEED_RTC); + + snprintf(typename, sizeof(typename), "aspeed.timer-%s", socname); + object_initialize_child(obj, "timerctrl", &s->timerctrl, typename); + + snprintf(typename, sizeof(typename), "aspeed.adc-%s", socname); + object_initialize_child(obj, "adc", &s->adc, typename); + + snprintf(typename, sizeof(typename), "aspeed.i2c-%s", socname); + object_initialize_child(obj, "i2c", &s->i2c, typename); + + snprintf(typename, sizeof(typename), "aspeed.fmc-%s", socname); + object_initialize_child(obj, "fmc", &s->fmc, typename); + object_property_add_alias(obj, "num-cs", OBJECT(&s->fmc), "num-cs"); + + for (i = 0; i < sc->spis_num; i++) { + snprintf(typename, sizeof(typename), "aspeed.spi%d-%s", i + 1, socname); + object_initialize_child(obj, "spi[*]", &s->spi[i], typename); + } + + for (i = 0; i < sc->ehcis_num; i++) { + object_initialize_child(obj, "ehci[*]", &s->ehci[i], + TYPE_PLATFORM_EHCI); + } + + snprintf(typename, sizeof(typename), "aspeed.sdmc-%s", socname); + object_initialize_child(obj, "sdmc", &s->sdmc, typename); + object_property_add_alias(obj, "ram-size", OBJECT(&s->sdmc), + "ram-size"); + object_property_add_alias(obj, "max-ram-size", OBJECT(&s->sdmc), + "max-ram-size"); + + for (i = 0; i < sc->wdts_num; i++) { + snprintf(typename, sizeof(typename), "aspeed.wdt-%s", socname); + object_initialize_child(obj, "wdt[*]", &s->wdt[i], typename); + } + + for (i = 0; i < sc->macs_num; i++) { + object_initialize_child(obj, "ftgmac100[*]", &s->ftgmac100[i], + TYPE_FTGMAC100); + } + + snprintf(typename, sizeof(typename), TYPE_ASPEED_XDMA "-%s", socname); + object_initialize_child(obj, "xdma", &s->xdma, typename); + + snprintf(typename, sizeof(typename), "aspeed.gpio-%s", socname); + object_initialize_child(obj, "gpio", &s->gpio, typename); + + object_initialize_child(obj, "sdc", &s->sdhci, TYPE_ASPEED_SDHCI); + + object_property_set_int(OBJECT(&s->sdhci), "num-slots", 2, &error_abort); + + /* Init sd card slot class here so that they're under the correct parent */ + for (i = 0; i < ASPEED_SDHCI_NUM_SLOTS; ++i) { + object_initialize_child(obj, "sdhci[*]", &s->sdhci.slots[i], + TYPE_SYSBUS_SDHCI); + } + + object_initialize_child(obj, "lpc", &s->lpc, TYPE_ASPEED_LPC); + + snprintf(typename, sizeof(typename), "aspeed.hace-%s", socname); + object_initialize_child(obj, "hace", &s->hace, typename); +} + +static void aspeed_soc_realize(DeviceState *dev, Error **errp) +{ + int i; + AspeedSoCState *s = ASPEED_SOC(dev); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + Error *err = NULL; + + /* IO space */ + create_unimplemented_device("aspeed_soc.io", sc->memmap[ASPEED_DEV_IOMEM], + ASPEED_SOC_IOMEM_SIZE); + + /* Video engine stub */ + create_unimplemented_device("aspeed.video", sc->memmap[ASPEED_DEV_VIDEO], + 0x1000); + + /* CPU */ + for (i = 0; i < sc->num_cpus; i++) { + if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { + return; + } + } + + /* SRAM */ + memory_region_init_ram(&s->sram, OBJECT(dev), "aspeed.sram", + sc->sram_size, &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), + sc->memmap[ASPEED_DEV_SRAM], &s->sram); + + /* SCU */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + + /* VIC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->vic), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->vic), 0, sc->memmap[ASPEED_DEV_VIC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0, + qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1, + qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ)); + + /* RTC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_RTC)); + + /* Timer */ + object_property_set_link(OBJECT(&s->timerctrl), "scu", OBJECT(&s->scu), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0, + sc->memmap[ASPEED_DEV_TIMER1]); + for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { + qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); + } + + /* ADC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + + /* UART - attach an 8250 to the IO space as our UART */ + serial_mm_init(get_system_memory(), sc->memmap[s->uart_default], 2, + aspeed_soc_get_irq(s, s->uart_default), 38400, + serial_hd(0), DEVICE_LITTLE_ENDIAN); + + /* I2C */ + object_property_set_link(OBJECT(&s->i2c), "dram", OBJECT(s->dram_mr), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_I2C)); + + /* FMC, The number of CS is set at the board level */ + object_property_set_link(OBJECT(&s->fmc), "dram", OBJECT(s->dram_mr), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->fmc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1, + ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + + /* SPI */ + for (i = 0; i < sc->spis_num; i++) { + object_property_set_int(OBJECT(&s->spi[i]), "num-cs", 1, &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, + sc->memmap[ASPEED_DEV_SPI1 + i]); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1, + ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); + } + + /* EHCI */ + for (i = 0; i < sc->ehcis_num; i++) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, + sc->memmap[ASPEED_DEV_EHCI1 + i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_EHCI1 + i)); + } + + /* SDMC - SDRAM Memory Controller */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdmc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, sc->memmap[ASPEED_DEV_SDMC]); + + /* Watch dog */ + for (i = 0; i < sc->wdts_num; i++) { + AspeedWDTClass *awc = ASPEED_WDT_GET_CLASS(&s->wdt[i]); + + object_property_set_link(OBJECT(&s->wdt[i]), "scu", OBJECT(&s->scu), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, + sc->memmap[ASPEED_DEV_WDT] + i * awc->offset); + } + + /* Net */ + for (i = 0; i < sc->macs_num; i++) { + object_property_set_bool(OBJECT(&s->ftgmac100[i]), "aspeed", true, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ftgmac100[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + sc->memmap[ASPEED_DEV_ETH1 + i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_ETH1 + i)); + } + + /* XDMA */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->xdma), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->xdma), 0, + sc->memmap[ASPEED_DEV_XDMA]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->xdma), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_XDMA)); + + /* GPIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); + + /* SDHCI */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdhci), 0, + sc->memmap[ASPEED_DEV_SDHCI]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI)); + + /* LPC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->lpc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]); + + /* Connect the LPC IRQ to the VIC */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_LPC)); + + /* + * On the AST2400 and AST2500 the one LPC IRQ is shared between all of the + * subdevices. Connect the LPC subdevice IRQs to the LPC controller IRQ (by + * contrast, on the AST2600, the subdevice IRQs are connected straight to + * the GIC). + * + * LPC subdevice IRQ sources are offset from 1 because the shared IRQ output + * to the VIC is at offset 0. + */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_1, + qdev_get_gpio_in(DEVICE(&s->lpc), aspeed_lpc_kcs_1)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_2, + qdev_get_gpio_in(DEVICE(&s->lpc), aspeed_lpc_kcs_2)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_3, + qdev_get_gpio_in(DEVICE(&s->lpc), aspeed_lpc_kcs_3)); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 1 + aspeed_lpc_kcs_4, + qdev_get_gpio_in(DEVICE(&s->lpc), aspeed_lpc_kcs_4)); + + /* HACE */ + object_property_set_link(OBJECT(&s->hace), "dram", OBJECT(s->dram_mr), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->hace), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->hace), 0, sc->memmap[ASPEED_DEV_HACE]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); +} +static Property aspeed_soc_properties[] = { + DEFINE_PROP_LINK("dram", AspeedSoCState, dram_mr, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_UINT32("uart-default", AspeedSoCState, uart_default, + ASPEED_DEV_UART5), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_soc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = aspeed_soc_realize; + /* Reason: Uses serial_hds and nd_table in realize() directly */ + dc->user_creatable = false; + device_class_set_props(dc, aspeed_soc_properties); +} + +static const TypeInfo aspeed_soc_type_info = { + .name = TYPE_ASPEED_SOC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(AspeedSoCState), + .class_size = sizeof(AspeedSoCClass), + .class_init = aspeed_soc_class_init, + .abstract = true, +}; + +static void aspeed_soc_ast2400_class_init(ObjectClass *oc, void *data) +{ + AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); + + sc->name = "ast2400-a1"; + sc->cpu_type = ARM_CPU_TYPE_NAME("arm926"); + sc->silicon_rev = AST2400_A1_SILICON_REV; + sc->sram_size = 0x8000; + sc->spis_num = 1; + sc->ehcis_num = 1; + sc->wdts_num = 2; + sc->macs_num = 2; + sc->irqmap = aspeed_soc_ast2400_irqmap; + sc->memmap = aspeed_soc_ast2400_memmap; + sc->num_cpus = 1; +} + +static const TypeInfo aspeed_soc_ast2400_type_info = { + .name = "ast2400-a1", + .parent = TYPE_ASPEED_SOC, + .instance_init = aspeed_soc_init, + .instance_size = sizeof(AspeedSoCState), + .class_init = aspeed_soc_ast2400_class_init, +}; + +static void aspeed_soc_ast2500_class_init(ObjectClass *oc, void *data) +{ + AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc); + + sc->name = "ast2500-a1"; + sc->cpu_type = ARM_CPU_TYPE_NAME("arm1176"); + sc->silicon_rev = AST2500_A1_SILICON_REV; + sc->sram_size = 0x9000; + sc->spis_num = 2; + sc->ehcis_num = 2; + sc->wdts_num = 3; + sc->macs_num = 2; + sc->irqmap = aspeed_soc_ast2500_irqmap; + sc->memmap = aspeed_soc_ast2500_memmap; + sc->num_cpus = 1; +} + +static const TypeInfo aspeed_soc_ast2500_type_info = { + .name = "ast2500-a1", + .parent = TYPE_ASPEED_SOC, + .instance_init = aspeed_soc_init, + .instance_size = sizeof(AspeedSoCState), + .class_init = aspeed_soc_ast2500_class_init, +}; +static void aspeed_soc_register_types(void) +{ + type_register_static(&aspeed_soc_type_info); + type_register_static(&aspeed_soc_ast2400_type_info); + type_register_static(&aspeed_soc_ast2500_type_info); +}; + +type_init(aspeed_soc_register_types) diff --git a/hw/arm/bcm2835_peripherals.c b/hw/arm/bcm2835_peripherals.c new file mode 100644 index 000000000..48538c936 --- /dev/null +++ b/hw/arm/bcm2835_peripherals.c @@ -0,0 +1,415 @@ +/* + * Raspberry Pi emulation (c) 2012 Gregory Estrade + * Upstreaming code cleanup [including bcm2835_*] (c) 2013 Jan Petrous + * + * Rasperry Pi 2 emulation and refactoring Copyright (c) 2015, Microsoft + * Written by Andrew Baumann + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/arm/bcm2835_peripherals.h" +#include "hw/misc/bcm2835_mbox_defs.h" +#include "hw/arm/raspi_platform.h" +#include "sysemu/sysemu.h" + +/* Peripheral base address on the VC (GPU) system bus */ +#define BCM2835_VC_PERI_BASE 0x7e000000 + +/* Capabilities for SD controller: no DMA, high-speed, default clocks etc. */ +#define BCM2835_SDHC_CAPAREG 0x52134b4 + +static void create_unimp(BCM2835PeripheralState *ps, + UnimplementedDeviceState *uds, + const char *name, hwaddr ofs, hwaddr size) +{ + object_initialize_child(OBJECT(ps), name, uds, TYPE_UNIMPLEMENTED_DEVICE); + qdev_prop_set_string(DEVICE(uds), "name", name); + qdev_prop_set_uint64(DEVICE(uds), "size", size); + sysbus_realize(SYS_BUS_DEVICE(uds), &error_fatal); + memory_region_add_subregion_overlap(&ps->peri_mr, ofs, + sysbus_mmio_get_region(SYS_BUS_DEVICE(uds), 0), -1000); +} + +static void bcm2835_peripherals_init(Object *obj) +{ + BCM2835PeripheralState *s = BCM2835_PERIPHERALS(obj); + + /* Memory region for peripheral devices, which we export to our parent */ + memory_region_init(&s->peri_mr, obj,"bcm2835-peripherals", 0x1000000); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->peri_mr); + + /* Internal memory region for peripheral bus addresses (not exported) */ + memory_region_init(&s->gpu_bus_mr, obj, "bcm2835-gpu", (uint64_t)1 << 32); + + /* Internal memory region for request/response communication with + * mailbox-addressable peripherals (not exported) + */ + memory_region_init(&s->mbox_mr, obj, "bcm2835-mbox", + MBOX_CHAN_COUNT << MBOX_AS_CHAN_SHIFT); + + /* Interrupt Controller */ + object_initialize_child(obj, "ic", &s->ic, TYPE_BCM2835_IC); + + /* SYS Timer */ + object_initialize_child(obj, "systimer", &s->systmr, + TYPE_BCM2835_SYSTIMER); + + /* UART0 */ + object_initialize_child(obj, "uart0", &s->uart0, TYPE_PL011); + + /* AUX / UART1 */ + object_initialize_child(obj, "aux", &s->aux, TYPE_BCM2835_AUX); + + /* Mailboxes */ + object_initialize_child(obj, "mbox", &s->mboxes, TYPE_BCM2835_MBOX); + + object_property_add_const_link(OBJECT(&s->mboxes), "mbox-mr", + OBJECT(&s->mbox_mr)); + + /* Framebuffer */ + object_initialize_child(obj, "fb", &s->fb, TYPE_BCM2835_FB); + object_property_add_alias(obj, "vcram-size", OBJECT(&s->fb), "vcram-size"); + + object_property_add_const_link(OBJECT(&s->fb), "dma-mr", + OBJECT(&s->gpu_bus_mr)); + + /* Property channel */ + object_initialize_child(obj, "property", &s->property, + TYPE_BCM2835_PROPERTY); + object_property_add_alias(obj, "board-rev", OBJECT(&s->property), + "board-rev"); + + object_property_add_const_link(OBJECT(&s->property), "fb", + OBJECT(&s->fb)); + object_property_add_const_link(OBJECT(&s->property), "dma-mr", + OBJECT(&s->gpu_bus_mr)); + + /* Random Number Generator */ + object_initialize_child(obj, "rng", &s->rng, TYPE_BCM2835_RNG); + + /* Extended Mass Media Controller */ + object_initialize_child(obj, "sdhci", &s->sdhci, TYPE_SYSBUS_SDHCI); + + /* SDHOST */ + object_initialize_child(obj, "sdhost", &s->sdhost, TYPE_BCM2835_SDHOST); + + /* DMA Channels */ + object_initialize_child(obj, "dma", &s->dma, TYPE_BCM2835_DMA); + + object_property_add_const_link(OBJECT(&s->dma), "dma-mr", + OBJECT(&s->gpu_bus_mr)); + + /* Thermal */ + object_initialize_child(obj, "thermal", &s->thermal, TYPE_BCM2835_THERMAL); + + /* GPIO */ + object_initialize_child(obj, "gpio", &s->gpio, TYPE_BCM2835_GPIO); + + object_property_add_const_link(OBJECT(&s->gpio), "sdbus-sdhci", + OBJECT(&s->sdhci.sdbus)); + object_property_add_const_link(OBJECT(&s->gpio), "sdbus-sdhost", + OBJECT(&s->sdhost.sdbus)); + + /* Mphi */ + object_initialize_child(obj, "mphi", &s->mphi, TYPE_BCM2835_MPHI); + + /* DWC2 */ + object_initialize_child(obj, "dwc2", &s->dwc2, TYPE_DWC2_USB); + + /* CPRMAN clock manager */ + object_initialize_child(obj, "cprman", &s->cprman, TYPE_BCM2835_CPRMAN); + + object_property_add_const_link(OBJECT(&s->dwc2), "dma-mr", + OBJECT(&s->gpu_bus_mr)); + + /* Power Management */ + object_initialize_child(obj, "powermgt", &s->powermgt, + TYPE_BCM2835_POWERMGT); +} + +static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) +{ + BCM2835PeripheralState *s = BCM2835_PERIPHERALS(dev); + Object *obj; + MemoryRegion *ram; + Error *err = NULL; + uint64_t ram_size, vcram_size; + int n; + + obj = object_property_get_link(OBJECT(dev), "ram", &error_abort); + + ram = MEMORY_REGION(obj); + ram_size = memory_region_size(ram); + + /* Map peripherals and RAM into the GPU address space. */ + memory_region_init_alias(&s->peri_mr_alias, OBJECT(s), + "bcm2835-peripherals", &s->peri_mr, 0, + memory_region_size(&s->peri_mr)); + + memory_region_add_subregion_overlap(&s->gpu_bus_mr, BCM2835_VC_PERI_BASE, + &s->peri_mr_alias, 1); + + /* RAM is aliased four times (different cache configurations) on the GPU */ + for (n = 0; n < 4; n++) { + memory_region_init_alias(&s->ram_alias[n], OBJECT(s), + "bcm2835-gpu-ram-alias[*]", ram, 0, ram_size); + memory_region_add_subregion_overlap(&s->gpu_bus_mr, (hwaddr)n << 30, + &s->ram_alias[n], 0); + } + + /* Interrupt Controller */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ic), errp)) { + return; + } + + /* CPRMAN clock manager */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->cprman), errp)) { + return; + } + memory_region_add_subregion(&s->peri_mr, CPRMAN_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->cprman), 0)); + qdev_connect_clock_in(DEVICE(&s->uart0), "clk", + qdev_get_clock_out(DEVICE(&s->cprman), "uart-out")); + + memory_region_add_subregion(&s->peri_mr, ARMCTRL_IC_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->ic), 0)); + sysbus_pass_irq(SYS_BUS_DEVICE(s), SYS_BUS_DEVICE(&s->ic)); + + /* Sys Timer */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->systmr), errp)) { + return; + } + memory_region_add_subregion(&s->peri_mr, ST_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systmr), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->systmr), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_TIMER0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->systmr), 1, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_TIMER1)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->systmr), 2, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_TIMER2)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->systmr), 3, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_TIMER3)); + + /* UART0 */ + qdev_prop_set_chr(DEVICE(&s->uart0), "chardev", serial_hd(0)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart0), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, UART0_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->uart0), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart0), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_UART0)); + + /* AUX / UART1 */ + qdev_prop_set_chr(DEVICE(&s->aux), "chardev", serial_hd(1)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->aux), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, AUX_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->aux), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->aux), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_AUX)); + + /* Mailboxes */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mboxes), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, ARMCTRL_0_SBM_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mboxes), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->mboxes), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_ARM_IRQ, + INTERRUPT_ARM_MAILBOX)); + + /* Framebuffer */ + vcram_size = object_property_get_uint(OBJECT(s), "vcram-size", &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (!object_property_set_uint(OBJECT(&s->fb), "vcram-base", + ram_size - vcram_size, errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->fb), errp)) { + return; + } + + memory_region_add_subregion(&s->mbox_mr, MBOX_CHAN_FB << MBOX_AS_CHAN_SHIFT, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->fb), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->fb), 0, + qdev_get_gpio_in(DEVICE(&s->mboxes), MBOX_CHAN_FB)); + + /* Property channel */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->property), errp)) { + return; + } + + memory_region_add_subregion(&s->mbox_mr, + MBOX_CHAN_PROPERTY << MBOX_AS_CHAN_SHIFT, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->property), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->property), 0, + qdev_get_gpio_in(DEVICE(&s->mboxes), MBOX_CHAN_PROPERTY)); + + /* Random Number Generator */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rng), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, RNG_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->rng), 0)); + + /* Extended Mass Media Controller + * + * Compatible with: + * - SD Host Controller Specification Version 3.0 Draft 1.0 + * - SDIO Specification Version 3.0 + * - MMC Specification Version 4.4 + * + * For the exact details please refer to the Arasan documentation: + * SD3.0_Host_AHB_eMMC4.4_Usersguide_ver5.9_jan11_10.pdf + */ + object_property_set_uint(OBJECT(&s->sdhci), "sd-spec-version", 3, + &error_abort); + object_property_set_uint(OBJECT(&s->sdhci), "capareg", + BCM2835_SDHC_CAPAREG, &error_abort); + object_property_set_bool(OBJECT(&s->sdhci), "pending-insert-quirk", true, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, EMMC1_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->sdhci), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_ARASANSDIO)); + + /* SDHOST */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhost), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, MMCI0_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->sdhost), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhost), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_SDIO)); + + /* DMA Channels */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->dma), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, DMA_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 0)); + memory_region_add_subregion(&s->peri_mr, DMA15_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 1)); + + for (n = 0; n <= 12; n++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), n, + qdev_get_gpio_in_named(DEVICE(&s->ic), + BCM2835_IC_GPU_IRQ, + INTERRUPT_DMA0 + n)); + } + + /* THERMAL */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->thermal), errp)) { + return; + } + memory_region_add_subregion(&s->peri_mr, THERMAL_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->thermal), 0)); + + /* GPIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, GPIO_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gpio), 0)); + + object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->gpio), "sd-bus"); + + /* Mphi */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mphi), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, MPHI_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mphi), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->mphi), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_HOSTPORT)); + + /* DWC2 */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->dwc2), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, USB_OTG_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dwc2), 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->dwc2), 0, + qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, + INTERRUPT_USB)); + + /* Power Management */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->powermgt), errp)) { + return; + } + + memory_region_add_subregion(&s->peri_mr, PM_OFFSET, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->powermgt), 0)); + + create_unimp(s, &s->txp, "bcm2835-txp", TXP_OFFSET, 0x1000); + create_unimp(s, &s->armtmr, "bcm2835-sp804", ARMCTRL_TIMER0_1_OFFSET, 0x40); + create_unimp(s, &s->i2s, "bcm2835-i2s", I2S_OFFSET, 0x100); + create_unimp(s, &s->smi, "bcm2835-smi", SMI_OFFSET, 0x100); + create_unimp(s, &s->spi[0], "bcm2835-spi0", SPI0_OFFSET, 0x20); + create_unimp(s, &s->bscsl, "bcm2835-spis", BSC_SL_OFFSET, 0x100); + create_unimp(s, &s->i2c[0], "bcm2835-i2c0", BSC0_OFFSET, 0x20); + create_unimp(s, &s->i2c[1], "bcm2835-i2c1", BSC1_OFFSET, 0x20); + create_unimp(s, &s->i2c[2], "bcm2835-i2c2", BSC2_OFFSET, 0x20); + create_unimp(s, &s->otp, "bcm2835-otp", OTP_OFFSET, 0x80); + create_unimp(s, &s->dbus, "bcm2835-dbus", DBUS_OFFSET, 0x8000); + create_unimp(s, &s->ave0, "bcm2835-ave0", AVE0_OFFSET, 0x8000); + create_unimp(s, &s->v3d, "bcm2835-v3d", V3D_OFFSET, 0x1000); + create_unimp(s, &s->sdramc, "bcm2835-sdramc", SDRAMC_OFFSET, 0x100); +} + +static void bcm2835_peripherals_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = bcm2835_peripherals_realize; +} + +static const TypeInfo bcm2835_peripherals_type_info = { + .name = TYPE_BCM2835_PERIPHERALS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835PeripheralState), + .instance_init = bcm2835_peripherals_init, + .class_init = bcm2835_peripherals_class_init, +}; + +static void bcm2835_peripherals_register_types(void) +{ + type_register_static(&bcm2835_peripherals_type_info); +} + +type_init(bcm2835_peripherals_register_types) diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c new file mode 100644 index 000000000..24354338c --- /dev/null +++ b/hw/arm/bcm2836.c @@ -0,0 +1,245 @@ +/* + * Raspberry Pi emulation (c) 2012 Gregory Estrade + * Upstreaming code cleanup [including bcm2835_*] (c) 2013 Jan Petrous + * + * Rasperry Pi 2 emulation and refactoring Copyright (c) 2015, Microsoft + * Written by Andrew Baumann + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/arm/bcm2836.h" +#include "hw/arm/raspi_platform.h" +#include "hw/sysbus.h" + +typedef struct BCM283XClass { + /*< private >*/ + DeviceClass parent_class; + /*< public >*/ + const char *name; + const char *cpu_type; + unsigned core_count; + hwaddr peri_base; /* Peripheral base address seen by the CPU */ + hwaddr ctrl_base; /* Interrupt controller and mailboxes etc. */ + int clusterid; +} BCM283XClass; + +#define BCM283X_CLASS(klass) \ + OBJECT_CLASS_CHECK(BCM283XClass, (klass), TYPE_BCM283X) +#define BCM283X_GET_CLASS(obj) \ + OBJECT_GET_CLASS(BCM283XClass, (obj), TYPE_BCM283X) + +static Property bcm2836_enabled_cores_property = + DEFINE_PROP_UINT32("enabled-cpus", BCM283XState, enabled_cpus, 0); + +static void bcm2836_init(Object *obj) +{ + BCM283XState *s = BCM283X(obj); + BCM283XClass *bc = BCM283X_GET_CLASS(obj); + int n; + + for (n = 0; n < bc->core_count; n++) { + object_initialize_child(obj, "cpu[*]", &s->cpu[n].core, + bc->cpu_type); + } + if (bc->core_count > 1) { + qdev_property_add_static(DEVICE(obj), &bcm2836_enabled_cores_property); + qdev_prop_set_uint32(DEVICE(obj), "enabled-cpus", bc->core_count); + } + + if (bc->ctrl_base) { + object_initialize_child(obj, "control", &s->control, + TYPE_BCM2836_CONTROL); + } + + object_initialize_child(obj, "peripherals", &s->peripherals, + TYPE_BCM2835_PERIPHERALS); + object_property_add_alias(obj, "board-rev", OBJECT(&s->peripherals), + "board-rev"); + object_property_add_alias(obj, "vcram-size", OBJECT(&s->peripherals), + "vcram-size"); +} + +static bool bcm283x_common_realize(DeviceState *dev, Error **errp) +{ + BCM283XState *s = BCM283X(dev); + BCM283XClass *bc = BCM283X_GET_CLASS(dev); + Object *obj; + + /* common peripherals from bcm2835 */ + + obj = object_property_get_link(OBJECT(dev), "ram", &error_abort); + + object_property_add_const_link(OBJECT(&s->peripherals), "ram", obj); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->peripherals), errp)) { + return false; + } + + object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->peripherals), + "sd-bus"); + + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->peripherals), 0, + bc->peri_base, 1); + return true; +} + +static void bcm2835_realize(DeviceState *dev, Error **errp) +{ + BCM283XState *s = BCM283X(dev); + + if (!bcm283x_common_realize(dev, errp)) { + return; + } + + if (!qdev_realize(DEVICE(&s->cpu[0].core), NULL, errp)) { + return; + } + + /* Connect irq/fiq outputs from the interrupt controller. */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 0, + qdev_get_gpio_in(DEVICE(&s->cpu[0].core), ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 1, + qdev_get_gpio_in(DEVICE(&s->cpu[0].core), ARM_CPU_FIQ)); +} + +static void bcm2836_realize(DeviceState *dev, Error **errp) +{ + BCM283XState *s = BCM283X(dev); + BCM283XClass *bc = BCM283X_GET_CLASS(dev); + int n; + + if (!bcm283x_common_realize(dev, errp)) { + return; + } + + /* bcm2836 interrupt controller (and mailboxes, etc.) */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->control), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->control), 0, bc->ctrl_base); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 0, + qdev_get_gpio_in_named(DEVICE(&s->control), "gpu-irq", 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->peripherals), 1, + qdev_get_gpio_in_named(DEVICE(&s->control), "gpu-fiq", 0)); + + for (n = 0; n < BCM283X_NCPUS; n++) { + /* TODO: this should be converted to a property of ARM_CPU */ + s->cpu[n].core.mp_affinity = (bc->clusterid << 8) | n; + + /* set periphbase/CBAR value for CPU-local registers */ + if (!object_property_set_int(OBJECT(&s->cpu[n].core), "reset-cbar", + bc->peri_base, errp)) { + return; + } + + /* start powered off if not enabled */ + if (!object_property_set_bool(OBJECT(&s->cpu[n].core), + "start-powered-off", + n >= s->enabled_cpus, + errp)) { + return; + } + + if (!qdev_realize(DEVICE(&s->cpu[n].core), NULL, errp)) { + return; + } + + /* Connect irq/fiq outputs from the interrupt controller. */ + qdev_connect_gpio_out_named(DEVICE(&s->control), "irq", n, + qdev_get_gpio_in(DEVICE(&s->cpu[n].core), ARM_CPU_IRQ)); + qdev_connect_gpio_out_named(DEVICE(&s->control), "fiq", n, + qdev_get_gpio_in(DEVICE(&s->cpu[n].core), ARM_CPU_FIQ)); + + /* Connect timers from the CPU to the interrupt controller */ + qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_PHYS, + qdev_get_gpio_in_named(DEVICE(&s->control), "cntpnsirq", n)); + qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_VIRT, + qdev_get_gpio_in_named(DEVICE(&s->control), "cntvirq", n)); + qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_HYP, + qdev_get_gpio_in_named(DEVICE(&s->control), "cnthpirq", n)); + qdev_connect_gpio_out(DEVICE(&s->cpu[n].core), GTIMER_SEC, + qdev_get_gpio_in_named(DEVICE(&s->control), "cntpsirq", n)); + } +} + +static void bcm283x_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + /* Reason: Must be wired up in code (see raspi_init() function) */ + dc->user_creatable = false; +} + +static void bcm2835_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + BCM283XClass *bc = BCM283X_CLASS(oc); + + bc->cpu_type = ARM_CPU_TYPE_NAME("arm1176"); + bc->core_count = 1; + bc->peri_base = 0x20000000; + dc->realize = bcm2835_realize; +}; + +static void bcm2836_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + BCM283XClass *bc = BCM283X_CLASS(oc); + + bc->cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); + bc->core_count = BCM283X_NCPUS; + bc->peri_base = 0x3f000000; + bc->ctrl_base = 0x40000000; + bc->clusterid = 0xf; + dc->realize = bcm2836_realize; +}; + +#ifdef TARGET_AARCH64 +static void bcm2837_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + BCM283XClass *bc = BCM283X_CLASS(oc); + + bc->cpu_type = ARM_CPU_TYPE_NAME("cortex-a53"); + bc->core_count = BCM283X_NCPUS; + bc->peri_base = 0x3f000000; + bc->ctrl_base = 0x40000000; + bc->clusterid = 0x0; + dc->realize = bcm2836_realize; +}; +#endif + +static const TypeInfo bcm283x_types[] = { + { + .name = TYPE_BCM2835, + .parent = TYPE_BCM283X, + .class_init = bcm2835_class_init, + }, { + .name = TYPE_BCM2836, + .parent = TYPE_BCM283X, + .class_init = bcm2836_class_init, +#ifdef TARGET_AARCH64 + }, { + .name = TYPE_BCM2837, + .parent = TYPE_BCM283X, + .class_init = bcm2837_class_init, +#endif + }, { + .name = TYPE_BCM283X, + .parent = TYPE_DEVICE, + .instance_size = sizeof(BCM283XState), + .instance_init = bcm2836_init, + .class_size = sizeof(BCM283XClass), + .class_init = bcm283x_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(bcm283x_types) diff --git a/hw/arm/boot.c b/hw/arm/boot.c new file mode 100644 index 000000000..74ad397b1 --- /dev/null +++ b/hw/arm/boot.c @@ -0,0 +1,1351 @@ +/* + * ARM kernel loader. + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include +#include "hw/arm/boot.h" +#include "hw/arm/linux-boot-if.h" +#include "sysemu/kvm.h" +#include "sysemu/sysemu.h" +#include "sysemu/numa.h" +#include "hw/boards.h" +#include "sysemu/reset.h" +#include "hw/loader.h" +#include "elf.h" +#include "sysemu/device_tree.h" +#include "qemu/config-file.h" +#include "qemu/option.h" +#include "qemu/units.h" + +/* Kernel boot protocol is specified in the kernel docs + * Documentation/arm/Booting and Documentation/arm64/booting.txt + * They have different preferred image load offsets from system RAM base. + */ +#define KERNEL_ARGS_ADDR 0x100 +#define KERNEL_NOLOAD_ADDR 0x02000000 +#define KERNEL_LOAD_ADDR 0x00010000 +#define KERNEL64_LOAD_ADDR 0x00080000 + +#define ARM64_TEXT_OFFSET_OFFSET 8 +#define ARM64_MAGIC_OFFSET 56 + +#define BOOTLOADER_MAX_SIZE (4 * KiB) + +AddressSpace *arm_boot_address_space(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + /* Return the address space to use for bootloader reads and writes. + * We prefer the secure address space if the CPU has it and we're + * going to boot the guest into it. + */ + int asidx; + CPUState *cs = CPU(cpu); + + if (arm_feature(&cpu->env, ARM_FEATURE_EL3) && info->secure_boot) { + asidx = ARMASIdx_S; + } else { + asidx = ARMASIdx_NS; + } + + return cpu_get_address_space(cs, asidx); +} + +typedef enum { + FIXUP_NONE = 0, /* do nothing */ + FIXUP_TERMINATOR, /* end of insns */ + FIXUP_BOARDID, /* overwrite with board ID number */ + FIXUP_BOARD_SETUP, /* overwrite with board specific setup code address */ + FIXUP_ARGPTR_LO, /* overwrite with pointer to kernel args */ + FIXUP_ARGPTR_HI, /* overwrite with pointer to kernel args (high half) */ + FIXUP_ENTRYPOINT_LO, /* overwrite with kernel entry point */ + FIXUP_ENTRYPOINT_HI, /* overwrite with kernel entry point (high half) */ + FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */ + FIXUP_BOOTREG, /* overwrite with boot register address */ + FIXUP_DSB, /* overwrite with correct DSB insn for cpu */ + FIXUP_MAX, +} FixupType; + +typedef struct ARMInsnFixup { + uint32_t insn; + FixupType fixup; +} ARMInsnFixup; + +static const ARMInsnFixup bootloader_aarch64[] = { + { 0x580000c0 }, /* ldr x0, arg ; Load the lower 32-bits of DTB */ + { 0xaa1f03e1 }, /* mov x1, xzr */ + { 0xaa1f03e2 }, /* mov x2, xzr */ + { 0xaa1f03e3 }, /* mov x3, xzr */ + { 0x58000084 }, /* ldr x4, entry ; Load the lower 32-bits of kernel entry */ + { 0xd61f0080 }, /* br x4 ; Jump to the kernel entry point */ + { 0, FIXUP_ARGPTR_LO }, /* arg: .word @DTB Lower 32-bits */ + { 0, FIXUP_ARGPTR_HI}, /* .word @DTB Higher 32-bits */ + { 0, FIXUP_ENTRYPOINT_LO }, /* entry: .word @Kernel Entry Lower 32-bits */ + { 0, FIXUP_ENTRYPOINT_HI }, /* .word @Kernel Entry Higher 32-bits */ + { 0, FIXUP_TERMINATOR } +}; + +/* A very small bootloader: call the board-setup code (if needed), + * set r0-r2, then jump to the kernel. + * If we're not calling boot setup code then we don't copy across + * the first BOOTLOADER_NO_BOARD_SETUP_OFFSET insns in this array. + */ + +static const ARMInsnFixup bootloader[] = { + { 0xe28fe004 }, /* add lr, pc, #4 */ + { 0xe51ff004 }, /* ldr pc, [pc, #-4] */ + { 0, FIXUP_BOARD_SETUP }, +#define BOOTLOADER_NO_BOARD_SETUP_OFFSET 3 + { 0xe3a00000 }, /* mov r0, #0 */ + { 0xe59f1004 }, /* ldr r1, [pc, #4] */ + { 0xe59f2004 }, /* ldr r2, [pc, #4] */ + { 0xe59ff004 }, /* ldr pc, [pc, #4] */ + { 0, FIXUP_BOARDID }, + { 0, FIXUP_ARGPTR_LO }, + { 0, FIXUP_ENTRYPOINT_LO }, + { 0, FIXUP_TERMINATOR } +}; + +/* Handling for secondary CPU boot in a multicore system. + * Unlike the uniprocessor/primary CPU boot, this is platform + * dependent. The default code here is based on the secondary + * CPU boot protocol used on realview/vexpress boards, with + * some parameterisation to increase its flexibility. + * QEMU platform models for which this code is not appropriate + * should override write_secondary_boot and secondary_cpu_reset_hook + * instead. + * + * This code enables the interrupt controllers for the secondary + * CPUs and then puts all the secondary CPUs into a loop waiting + * for an interprocessor interrupt and polling a configurable + * location for the kernel secondary CPU entry point. + */ +#define DSB_INSN 0xf57ff04f +#define CP15_DSB_INSN 0xee070f9a /* mcr cp15, 0, r0, c7, c10, 4 */ + +static const ARMInsnFixup smpboot[] = { + { 0xe59f2028 }, /* ldr r2, gic_cpu_if */ + { 0xe59f0028 }, /* ldr r0, bootreg_addr */ + { 0xe3a01001 }, /* mov r1, #1 */ + { 0xe5821000 }, /* str r1, [r2] - set GICC_CTLR.Enable */ + { 0xe3a010ff }, /* mov r1, #0xff */ + { 0xe5821004 }, /* str r1, [r2, 4] - set GIC_PMR.Priority to 0xff */ + { 0, FIXUP_DSB }, /* dsb */ + { 0xe320f003 }, /* wfi */ + { 0xe5901000 }, /* ldr r1, [r0] */ + { 0xe1110001 }, /* tst r1, r1 */ + { 0x0afffffb }, /* beq */ + { 0xe12fff11 }, /* bx r1 */ + { 0, FIXUP_GIC_CPU_IF }, /* gic_cpu_if: .word 0x.... */ + { 0, FIXUP_BOOTREG }, /* bootreg_addr: .word 0x.... */ + { 0, FIXUP_TERMINATOR } +}; + +static void write_bootloader(const char *name, hwaddr addr, + const ARMInsnFixup *insns, uint32_t *fixupcontext, + AddressSpace *as) +{ + /* Fix up the specified bootloader fragment and write it into + * guest memory using rom_add_blob_fixed(). fixupcontext is + * an array giving the values to write in for the fixup types + * which write a value into the code array. + */ + int i, len; + uint32_t *code; + + len = 0; + while (insns[len].fixup != FIXUP_TERMINATOR) { + len++; + } + + code = g_new0(uint32_t, len); + + for (i = 0; i < len; i++) { + uint32_t insn = insns[i].insn; + FixupType fixup = insns[i].fixup; + + switch (fixup) { + case FIXUP_NONE: + break; + case FIXUP_BOARDID: + case FIXUP_BOARD_SETUP: + case FIXUP_ARGPTR_LO: + case FIXUP_ARGPTR_HI: + case FIXUP_ENTRYPOINT_LO: + case FIXUP_ENTRYPOINT_HI: + case FIXUP_GIC_CPU_IF: + case FIXUP_BOOTREG: + case FIXUP_DSB: + insn = fixupcontext[fixup]; + break; + default: + abort(); + } + code[i] = tswap32(insn); + } + + assert((len * sizeof(uint32_t)) < BOOTLOADER_MAX_SIZE); + + rom_add_blob_fixed_as(name, code, len * sizeof(uint32_t), addr, as); + + g_free(code); +} + +static void default_write_secondary(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + uint32_t fixupcontext[FIXUP_MAX]; + AddressSpace *as = arm_boot_address_space(cpu, info); + + fixupcontext[FIXUP_GIC_CPU_IF] = info->gic_cpu_if_addr; + fixupcontext[FIXUP_BOOTREG] = info->smp_bootreg_addr; + if (arm_feature(&cpu->env, ARM_FEATURE_V7)) { + fixupcontext[FIXUP_DSB] = DSB_INSN; + } else { + fixupcontext[FIXUP_DSB] = CP15_DSB_INSN; + } + + write_bootloader("smpboot", info->smp_loader_start, + smpboot, fixupcontext, as); +} + +void arm_write_secure_board_setup_dummy_smc(ARMCPU *cpu, + const struct arm_boot_info *info, + hwaddr mvbar_addr) +{ + AddressSpace *as = arm_boot_address_space(cpu, info); + int n; + uint32_t mvbar_blob[] = { + /* mvbar_addr: secure monitor vectors + * Default unimplemented and unused vectors to spin. Makes it + * easier to debug (as opposed to the CPU running away). + */ + 0xeafffffe, /* (spin) */ + 0xeafffffe, /* (spin) */ + 0xe1b0f00e, /* movs pc, lr ;SMC exception return */ + 0xeafffffe, /* (spin) */ + 0xeafffffe, /* (spin) */ + 0xeafffffe, /* (spin) */ + 0xeafffffe, /* (spin) */ + 0xeafffffe, /* (spin) */ + }; + uint32_t board_setup_blob[] = { + /* board setup addr */ + 0xee110f51, /* mrc p15, 0, r0, c1, c1, 2 ;read NSACR */ + 0xe3800b03, /* orr r0, #0xc00 ;set CP11, CP10 */ + 0xee010f51, /* mcr p15, 0, r0, c1, c1, 2 ;write NSACR */ + 0xe3a00e00 + (mvbar_addr >> 4), /* mov r0, #mvbar_addr */ + 0xee0c0f30, /* mcr p15, 0, r0, c12, c0, 1 ;set MVBAR */ + 0xee110f11, /* mrc p15, 0, r0, c1 , c1, 0 ;read SCR */ + 0xe3800031, /* orr r0, #0x31 ;enable AW, FW, NS */ + 0xee010f11, /* mcr p15, 0, r0, c1, c1, 0 ;write SCR */ + 0xe1a0100e, /* mov r1, lr ;save LR across SMC */ + 0xe1600070, /* smc #0 ;call monitor to flush SCR */ + 0xe1a0f001, /* mov pc, r1 ;return */ + }; + + /* check that mvbar_addr is correctly aligned and relocatable (using MOV) */ + assert((mvbar_addr & 0x1f) == 0 && (mvbar_addr >> 4) < 0x100); + + /* check that these blobs don't overlap */ + assert((mvbar_addr + sizeof(mvbar_blob) <= info->board_setup_addr) + || (info->board_setup_addr + sizeof(board_setup_blob) <= mvbar_addr)); + + for (n = 0; n < ARRAY_SIZE(mvbar_blob); n++) { + mvbar_blob[n] = tswap32(mvbar_blob[n]); + } + rom_add_blob_fixed_as("board-setup-mvbar", mvbar_blob, sizeof(mvbar_blob), + mvbar_addr, as); + + for (n = 0; n < ARRAY_SIZE(board_setup_blob); n++) { + board_setup_blob[n] = tswap32(board_setup_blob[n]); + } + rom_add_blob_fixed_as("board-setup", board_setup_blob, + sizeof(board_setup_blob), info->board_setup_addr, as); +} + +static void default_reset_secondary(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + AddressSpace *as = arm_boot_address_space(cpu, info); + CPUState *cs = CPU(cpu); + + address_space_stl_notdirty(as, info->smp_bootreg_addr, + 0, MEMTXATTRS_UNSPECIFIED, NULL); + cpu_set_pc(cs, info->smp_loader_start); +} + +static inline bool have_dtb(const struct arm_boot_info *info) +{ + return info->dtb_filename || info->get_dtb; +} + +#define WRITE_WORD(p, value) do { \ + address_space_stl_notdirty(as, p, value, \ + MEMTXATTRS_UNSPECIFIED, NULL); \ + p += 4; \ +} while (0) + +static void set_kernel_args(const struct arm_boot_info *info, AddressSpace *as) +{ + int initrd_size = info->initrd_size; + hwaddr base = info->loader_start; + hwaddr p; + + p = base + KERNEL_ARGS_ADDR; + /* ATAG_CORE */ + WRITE_WORD(p, 5); + WRITE_WORD(p, 0x54410001); + WRITE_WORD(p, 1); + WRITE_WORD(p, 0x1000); + WRITE_WORD(p, 0); + /* ATAG_MEM */ + /* TODO: handle multiple chips on one ATAG list */ + WRITE_WORD(p, 4); + WRITE_WORD(p, 0x54410002); + WRITE_WORD(p, info->ram_size); + WRITE_WORD(p, info->loader_start); + if (initrd_size) { + /* ATAG_INITRD2 */ + WRITE_WORD(p, 4); + WRITE_WORD(p, 0x54420005); + WRITE_WORD(p, info->initrd_start); + WRITE_WORD(p, initrd_size); + } + if (info->kernel_cmdline && *info->kernel_cmdline) { + /* ATAG_CMDLINE */ + int cmdline_size; + + cmdline_size = strlen(info->kernel_cmdline); + address_space_write(as, p + 8, MEMTXATTRS_UNSPECIFIED, + info->kernel_cmdline, cmdline_size + 1); + cmdline_size = (cmdline_size >> 2) + 1; + WRITE_WORD(p, cmdline_size + 2); + WRITE_WORD(p, 0x54410009); + p += cmdline_size * 4; + } + if (info->atag_board) { + /* ATAG_BOARD */ + int atag_board_len; + uint8_t atag_board_buf[0x1000]; + + atag_board_len = (info->atag_board(info, atag_board_buf) + 3) & ~3; + WRITE_WORD(p, (atag_board_len + 8) >> 2); + WRITE_WORD(p, 0x414f4d50); + address_space_write(as, p, MEMTXATTRS_UNSPECIFIED, + atag_board_buf, atag_board_len); + p += atag_board_len; + } + /* ATAG_END */ + WRITE_WORD(p, 0); + WRITE_WORD(p, 0); +} + +static void set_kernel_args_old(const struct arm_boot_info *info, + AddressSpace *as) +{ + hwaddr p; + const char *s; + int initrd_size = info->initrd_size; + hwaddr base = info->loader_start; + + /* see linux/include/asm-arm/setup.h */ + p = base + KERNEL_ARGS_ADDR; + /* page_size */ + WRITE_WORD(p, 4096); + /* nr_pages */ + WRITE_WORD(p, info->ram_size / 4096); + /* ramdisk_size */ + WRITE_WORD(p, 0); +#define FLAG_READONLY 1 +#define FLAG_RDLOAD 4 +#define FLAG_RDPROMPT 8 + /* flags */ + WRITE_WORD(p, FLAG_READONLY | FLAG_RDLOAD | FLAG_RDPROMPT); + /* rootdev */ + WRITE_WORD(p, (31 << 8) | 0); /* /dev/mtdblock0 */ + /* video_num_cols */ + WRITE_WORD(p, 0); + /* video_num_rows */ + WRITE_WORD(p, 0); + /* video_x */ + WRITE_WORD(p, 0); + /* video_y */ + WRITE_WORD(p, 0); + /* memc_control_reg */ + WRITE_WORD(p, 0); + /* unsigned char sounddefault */ + /* unsigned char adfsdrives */ + /* unsigned char bytes_per_char_h */ + /* unsigned char bytes_per_char_v */ + WRITE_WORD(p, 0); + /* pages_in_bank[4] */ + WRITE_WORD(p, 0); + WRITE_WORD(p, 0); + WRITE_WORD(p, 0); + WRITE_WORD(p, 0); + /* pages_in_vram */ + WRITE_WORD(p, 0); + /* initrd_start */ + if (initrd_size) { + WRITE_WORD(p, info->initrd_start); + } else { + WRITE_WORD(p, 0); + } + /* initrd_size */ + WRITE_WORD(p, initrd_size); + /* rd_start */ + WRITE_WORD(p, 0); + /* system_rev */ + WRITE_WORD(p, 0); + /* system_serial_low */ + WRITE_WORD(p, 0); + /* system_serial_high */ + WRITE_WORD(p, 0); + /* mem_fclk_21285 */ + WRITE_WORD(p, 0); + /* zero unused fields */ + while (p < base + KERNEL_ARGS_ADDR + 256 + 1024) { + WRITE_WORD(p, 0); + } + s = info->kernel_cmdline; + if (s) { + address_space_write(as, p, MEMTXATTRS_UNSPECIFIED, s, strlen(s) + 1); + } else { + WRITE_WORD(p, 0); + } +} + +static int fdt_add_memory_node(void *fdt, uint32_t acells, hwaddr mem_base, + uint32_t scells, hwaddr mem_len, + int numa_node_id) +{ + char *nodename; + int ret; + + nodename = g_strdup_printf("/memory@%" PRIx64, mem_base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory"); + ret = qemu_fdt_setprop_sized_cells(fdt, nodename, "reg", acells, mem_base, + scells, mem_len); + if (ret < 0) { + goto out; + } + + /* only set the NUMA ID if it is specified */ + if (numa_node_id >= 0) { + ret = qemu_fdt_setprop_cell(fdt, nodename, + "numa-node-id", numa_node_id); + } +out: + g_free(nodename); + return ret; +} + +static void fdt_add_psci_node(void *fdt) +{ + uint32_t cpu_suspend_fn; + uint32_t cpu_off_fn; + uint32_t cpu_on_fn; + uint32_t migrate_fn; + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(0)); + const char *psci_method; + int64_t psci_conduit; + int rc; + + psci_conduit = object_property_get_int(OBJECT(armcpu), + "psci-conduit", + &error_abort); + switch (psci_conduit) { + case QEMU_PSCI_CONDUIT_DISABLED: + return; + case QEMU_PSCI_CONDUIT_HVC: + psci_method = "hvc"; + break; + case QEMU_PSCI_CONDUIT_SMC: + psci_method = "smc"; + break; + default: + g_assert_not_reached(); + } + + /* + * If /psci node is present in provided DTB, assume that no fixup + * is necessary and all PSCI configuration should be taken as-is + */ + rc = fdt_path_offset(fdt, "/psci"); + if (rc >= 0) { + return; + } + + qemu_fdt_add_subnode(fdt, "/psci"); + if (armcpu->psci_version == 2) { + const char comp[] = "arm,psci-0.2\0arm,psci"; + qemu_fdt_setprop(fdt, "/psci", "compatible", comp, sizeof(comp)); + + cpu_off_fn = QEMU_PSCI_0_2_FN_CPU_OFF; + if (arm_feature(&armcpu->env, ARM_FEATURE_AARCH64)) { + cpu_suspend_fn = QEMU_PSCI_0_2_FN64_CPU_SUSPEND; + cpu_on_fn = QEMU_PSCI_0_2_FN64_CPU_ON; + migrate_fn = QEMU_PSCI_0_2_FN64_MIGRATE; + } else { + cpu_suspend_fn = QEMU_PSCI_0_2_FN_CPU_SUSPEND; + cpu_on_fn = QEMU_PSCI_0_2_FN_CPU_ON; + migrate_fn = QEMU_PSCI_0_2_FN_MIGRATE; + } + } else { + qemu_fdt_setprop_string(fdt, "/psci", "compatible", "arm,psci"); + + cpu_suspend_fn = QEMU_PSCI_0_1_FN_CPU_SUSPEND; + cpu_off_fn = QEMU_PSCI_0_1_FN_CPU_OFF; + cpu_on_fn = QEMU_PSCI_0_1_FN_CPU_ON; + migrate_fn = QEMU_PSCI_0_1_FN_MIGRATE; + } + + /* We adopt the PSCI spec's nomenclature, and use 'conduit' to refer + * to the instruction that should be used to invoke PSCI functions. + * However, the device tree binding uses 'method' instead, so that is + * what we should use here. + */ + qemu_fdt_setprop_string(fdt, "/psci", "method", psci_method); + + qemu_fdt_setprop_cell(fdt, "/psci", "cpu_suspend", cpu_suspend_fn); + qemu_fdt_setprop_cell(fdt, "/psci", "cpu_off", cpu_off_fn); + qemu_fdt_setprop_cell(fdt, "/psci", "cpu_on", cpu_on_fn); + qemu_fdt_setprop_cell(fdt, "/psci", "migrate", migrate_fn); +} + +int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, + hwaddr addr_limit, AddressSpace *as, MachineState *ms) +{ + void *fdt = NULL; + int size, rc, n = 0; + uint32_t acells, scells; + unsigned int i; + hwaddr mem_base, mem_len; + char **node_path; + Error *err = NULL; + + if (binfo->dtb_filename) { + char *filename; + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, binfo->dtb_filename); + if (!filename) { + fprintf(stderr, "Couldn't open dtb file %s\n", binfo->dtb_filename); + goto fail; + } + + fdt = load_device_tree(filename, &size); + if (!fdt) { + fprintf(stderr, "Couldn't open dtb file %s\n", filename); + g_free(filename); + goto fail; + } + g_free(filename); + } else { + fdt = binfo->get_dtb(binfo, &size); + if (!fdt) { + fprintf(stderr, "Board was unable to create a dtb blob\n"); + goto fail; + } + } + + if (addr_limit > addr && size > (addr_limit - addr)) { + /* Installing the device tree blob at addr would exceed addr_limit. + * Whether this constitutes failure is up to the caller to decide, + * so just return 0 as size, i.e., no error. + */ + g_free(fdt); + return 0; + } + + acells = qemu_fdt_getprop_cell(fdt, "/", "#address-cells", + NULL, &error_fatal); + scells = qemu_fdt_getprop_cell(fdt, "/", "#size-cells", + NULL, &error_fatal); + if (acells == 0 || scells == 0) { + fprintf(stderr, "dtb file invalid (#address-cells or #size-cells 0)\n"); + goto fail; + } + + if (scells < 2 && binfo->ram_size >= 4 * GiB) { + /* This is user error so deserves a friendlier error message + * than the failure of setprop_sized_cells would provide + */ + fprintf(stderr, "qemu: dtb file not compatible with " + "RAM size > 4GB\n"); + goto fail; + } + + /* nop all root nodes matching /memory or /memory@unit-address */ + node_path = qemu_fdt_node_unit_path(fdt, "memory", &err); + if (err) { + error_report_err(err); + goto fail; + } + while (node_path[n]) { + if (g_str_has_prefix(node_path[n], "/memory")) { + qemu_fdt_nop_node(fdt, node_path[n]); + } + n++; + } + g_strfreev(node_path); + + /* + * We drop all the memory nodes which correspond to empty NUMA nodes + * from the device tree, because the Linux NUMA binding document + * states they should not be generated. Linux will get the NUMA node + * IDs of the empty NUMA nodes from the distance map if they are needed. + * This means QEMU users may be obliged to provide command lines which + * configure distance maps when the empty NUMA node IDs are needed and + * Linux's default distance map isn't sufficient. + */ + if (ms->numa_state != NULL && ms->numa_state->num_nodes > 0) { + mem_base = binfo->loader_start; + for (i = 0; i < ms->numa_state->num_nodes; i++) { + mem_len = ms->numa_state->nodes[i].node_mem; + if (!mem_len) { + continue; + } + + rc = fdt_add_memory_node(fdt, acells, mem_base, + scells, mem_len, i); + if (rc < 0) { + fprintf(stderr, "couldn't add /memory@%"PRIx64" node\n", + mem_base); + goto fail; + } + + mem_base += mem_len; + } + } else { + rc = fdt_add_memory_node(fdt, acells, binfo->loader_start, + scells, binfo->ram_size, -1); + if (rc < 0) { + fprintf(stderr, "couldn't add /memory@%"PRIx64" node\n", + binfo->loader_start); + goto fail; + } + } + + rc = fdt_path_offset(fdt, "/chosen"); + if (rc < 0) { + qemu_fdt_add_subnode(fdt, "/chosen"); + } + + if (ms->kernel_cmdline && *ms->kernel_cmdline) { + rc = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", + ms->kernel_cmdline); + if (rc < 0) { + fprintf(stderr, "couldn't set /chosen/bootargs\n"); + goto fail; + } + } + + if (binfo->initrd_size) { + rc = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", + binfo->initrd_start); + if (rc < 0) { + fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); + goto fail; + } + + rc = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", + binfo->initrd_start + binfo->initrd_size); + if (rc < 0) { + fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); + goto fail; + } + } + + fdt_add_psci_node(fdt); + + if (binfo->modify_dtb) { + binfo->modify_dtb(binfo, fdt); + } + + qemu_fdt_dumpdtb(fdt, size); + + /* Put the DTB into the memory map as a ROM image: this will ensure + * the DTB is copied again upon reset, even if addr points into RAM. + */ + rom_add_blob_fixed_as("dtb", fdt, size, addr, as); + + g_free(fdt); + + return size; + +fail: + g_free(fdt); + return -1; +} + +static void do_cpu_reset(void *opaque) +{ + ARMCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + const struct arm_boot_info *info = env->boot_info; + + cpu_reset(cs); + if (info) { + if (!info->is_linux) { + int i; + /* Jump to the entry point. */ + uint64_t entry = info->entry; + + switch (info->endianness) { + case ARM_ENDIANNESS_LE: + env->cp15.sctlr_el[1] &= ~SCTLR_E0E; + for (i = 1; i < 4; ++i) { + env->cp15.sctlr_el[i] &= ~SCTLR_EE; + } + env->uncached_cpsr &= ~CPSR_E; + break; + case ARM_ENDIANNESS_BE8: + env->cp15.sctlr_el[1] |= SCTLR_E0E; + for (i = 1; i < 4; ++i) { + env->cp15.sctlr_el[i] |= SCTLR_EE; + } + env->uncached_cpsr |= CPSR_E; + break; + case ARM_ENDIANNESS_BE32: + env->cp15.sctlr_el[1] |= SCTLR_B; + break; + case ARM_ENDIANNESS_UNKNOWN: + break; /* Board's decision */ + default: + g_assert_not_reached(); + } + + cpu_set_pc(cs, entry); + } else { + /* If we are booting Linux then we need to check whether we are + * booting into secure or non-secure state and adjust the state + * accordingly. Out of reset, ARM is defined to be in secure state + * (SCR.NS = 0), we change that here if non-secure boot has been + * requested. + */ + if (arm_feature(env, ARM_FEATURE_EL3)) { + /* AArch64 is defined to come out of reset into EL3 if enabled. + * If we are booting Linux then we need to adjust our EL as + * Linux expects us to be in EL2 or EL1. AArch32 resets into + * SVC, which Linux expects, so no privilege/exception level to + * adjust. + */ + if (env->aarch64) { + env->cp15.scr_el3 |= SCR_RW; + if (arm_feature(env, ARM_FEATURE_EL2)) { + env->cp15.hcr_el2 |= HCR_RW; + env->pstate = PSTATE_MODE_EL2h; + } else { + env->pstate = PSTATE_MODE_EL1h; + } + if (cpu_isar_feature(aa64_pauth, cpu)) { + env->cp15.scr_el3 |= SCR_API | SCR_APK; + } + if (cpu_isar_feature(aa64_mte, cpu)) { + env->cp15.scr_el3 |= SCR_ATA; + } + if (cpu_isar_feature(aa64_sve, cpu)) { + env->cp15.cptr_el[3] |= CPTR_EZ; + } + /* AArch64 kernels never boot in secure mode */ + assert(!info->secure_boot); + /* This hook is only supported for AArch32 currently: + * bootloader_aarch64[] will not call the hook, and + * the code above has already dropped us into EL2 or EL1. + */ + assert(!info->secure_board_setup); + } + + if (arm_feature(env, ARM_FEATURE_EL2)) { + /* If we have EL2 then Linux expects the HVC insn to work */ + env->cp15.scr_el3 |= SCR_HCE; + } + + /* Set to non-secure if not a secure boot */ + if (!info->secure_boot && + (cs != first_cpu || !info->secure_board_setup)) { + /* Linux expects non-secure state */ + env->cp15.scr_el3 |= SCR_NS; + /* Set NSACR.{CP11,CP10} so NS can access the FPU */ + env->cp15.nsacr |= 3 << 10; + } + } + + if (!env->aarch64 && !info->secure_boot && + arm_feature(env, ARM_FEATURE_EL2)) { + /* + * This is an AArch32 boot not to Secure state, and + * we have Hyp mode available, so boot the kernel into + * Hyp mode. This is not how the CPU comes out of reset, + * so we need to manually put it there. + */ + cpsr_write(env, ARM_CPU_MODE_HYP, CPSR_M, CPSRWriteRaw); + } + + if (cs == first_cpu) { + AddressSpace *as = arm_boot_address_space(cpu, info); + + cpu_set_pc(cs, info->loader_start); + + if (!have_dtb(info)) { + if (old_param) { + set_kernel_args_old(info, as); + } else { + set_kernel_args(info, as); + } + } + } else { + info->secondary_cpu_reset_hook(cpu, info); + } + } + arm_rebuild_hflags(env); + } +} + +/** + * load_image_to_fw_cfg() - Load an image file into an fw_cfg entry identified + * by key. + * @fw_cfg: The firmware config instance to store the data in. + * @size_key: The firmware config key to store the size of the loaded + * data under, with fw_cfg_add_i32(). + * @data_key: The firmware config key to store the loaded data under, + * with fw_cfg_add_bytes(). + * @image_name: The name of the image file to load. If it is NULL, the + * function returns without doing anything. + * @try_decompress: Whether the image should be decompressed (gunzipped) before + * adding it to fw_cfg. If decompression fails, the image is + * loaded as-is. + * + * In case of failure, the function prints an error message to stderr and the + * process exits with status 1. + */ +static void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key, + uint16_t data_key, const char *image_name, + bool try_decompress) +{ + size_t size = -1; + uint8_t *data; + + if (image_name == NULL) { + return; + } + + if (try_decompress) { + size = load_image_gzipped_buffer(image_name, + LOAD_IMAGE_MAX_GUNZIP_BYTES, &data); + } + + if (size == (size_t)-1) { + gchar *contents; + gsize length; + + if (!g_file_get_contents(image_name, &contents, &length, NULL)) { + error_report("failed to load \"%s\"", image_name); + exit(1); + } + size = length; + data = (uint8_t *)contents; + } + + fw_cfg_add_i32(fw_cfg, size_key, size); + fw_cfg_add_bytes(fw_cfg, data_key, data, size); +} + +static int do_arm_linux_init(Object *obj, void *opaque) +{ + if (object_dynamic_cast(obj, TYPE_ARM_LINUX_BOOT_IF)) { + ARMLinuxBootIf *albif = ARM_LINUX_BOOT_IF(obj); + ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_GET_CLASS(obj); + struct arm_boot_info *info = opaque; + + if (albifc->arm_linux_init) { + albifc->arm_linux_init(albif, info->secure_boot); + } + } + return 0; +} + +static int64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, + int elf_machine, AddressSpace *as) +{ + bool elf_is64; + union { + Elf32_Ehdr h32; + Elf64_Ehdr h64; + } elf_header; + int data_swab = 0; + bool big_endian; + int64_t ret = -1; + Error *err = NULL; + + + load_elf_hdr(info->kernel_filename, &elf_header, &elf_is64, &err); + if (err) { + error_free(err); + return ret; + } + + if (elf_is64) { + big_endian = elf_header.h64.e_ident[EI_DATA] == ELFDATA2MSB; + info->endianness = big_endian ? ARM_ENDIANNESS_BE8 + : ARM_ENDIANNESS_LE; + } else { + big_endian = elf_header.h32.e_ident[EI_DATA] == ELFDATA2MSB; + if (big_endian) { + if (bswap32(elf_header.h32.e_flags) & EF_ARM_BE8) { + info->endianness = ARM_ENDIANNESS_BE8; + } else { + info->endianness = ARM_ENDIANNESS_BE32; + /* In BE32, the CPU has a different view of the per-byte + * address map than the rest of the system. BE32 ELF files + * are organised such that they can be programmed through + * the CPU's per-word byte-reversed view of the world. QEMU + * however loads ELF files independently of the CPU. So + * tell the ELF loader to byte reverse the data for us. + */ + data_swab = 2; + } + } else { + info->endianness = ARM_ENDIANNESS_LE; + } + } + + ret = load_elf_as(info->kernel_filename, NULL, NULL, NULL, + pentry, lowaddr, highaddr, NULL, big_endian, elf_machine, + 1, data_swab, as); + if (ret <= 0) { + /* The header loaded but the image didn't */ + exit(1); + } + + return ret; +} + +static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base, + hwaddr *entry, AddressSpace *as) +{ + hwaddr kernel_load_offset = KERNEL64_LOAD_ADDR; + uint64_t kernel_size = 0; + uint8_t *buffer; + int size; + + /* On aarch64, it's the bootloader's job to uncompress the kernel. */ + size = load_image_gzipped_buffer(filename, LOAD_IMAGE_MAX_GUNZIP_BYTES, + &buffer); + + if (size < 0) { + gsize len; + + /* Load as raw file otherwise */ + if (!g_file_get_contents(filename, (char **)&buffer, &len, NULL)) { + return -1; + } + size = len; + } + + /* check the arm64 magic header value -- very old kernels may not have it */ + if (size > ARM64_MAGIC_OFFSET + 4 && + memcmp(buffer + ARM64_MAGIC_OFFSET, "ARM\x64", 4) == 0) { + uint64_t hdrvals[2]; + + /* The arm64 Image header has text_offset and image_size fields at 8 and + * 16 bytes into the Image header, respectively. The text_offset field + * is only valid if the image_size is non-zero. + */ + memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals)); + + kernel_size = le64_to_cpu(hdrvals[1]); + + if (kernel_size != 0) { + kernel_load_offset = le64_to_cpu(hdrvals[0]); + + /* + * We write our startup "bootloader" at the very bottom of RAM, + * so that bit can't be used for the image. Luckily the Image + * format specification is that the image requests only an offset + * from a 2MB boundary, not an absolute load address. So if the + * image requests an offset that might mean it overlaps with the + * bootloader, we can just load it starting at 2MB+offset rather + * than 0MB + offset. + */ + if (kernel_load_offset < BOOTLOADER_MAX_SIZE) { + kernel_load_offset += 2 * MiB; + } + } + } + + /* + * Kernels before v3.17 don't populate the image_size field, and + * raw images have no header. For those our best guess at the size + * is the size of the Image file itself. + */ + if (kernel_size == 0) { + kernel_size = size; + } + + *entry = mem_base + kernel_load_offset; + rom_add_blob_fixed_as(filename, buffer, size, *entry, as); + + g_free(buffer); + + return kernel_size; +} + +static void arm_setup_direct_kernel_boot(ARMCPU *cpu, + struct arm_boot_info *info) +{ + /* Set up for a direct boot of a kernel image file. */ + CPUState *cs; + AddressSpace *as = arm_boot_address_space(cpu, info); + int kernel_size; + int initrd_size; + int is_linux = 0; + uint64_t elf_entry; + /* Addresses of first byte used and first byte not used by the image */ + uint64_t image_low_addr = 0, image_high_addr = 0; + int elf_machine; + hwaddr entry; + static const ARMInsnFixup *primary_loader; + uint64_t ram_end = info->loader_start + info->ram_size; + + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + primary_loader = bootloader_aarch64; + elf_machine = EM_AARCH64; + } else { + primary_loader = bootloader; + if (!info->write_board_setup) { + primary_loader += BOOTLOADER_NO_BOARD_SETUP_OFFSET; + } + elf_machine = EM_ARM; + } + + if (!info->secondary_cpu_reset_hook) { + info->secondary_cpu_reset_hook = default_reset_secondary; + } + if (!info->write_secondary_boot) { + info->write_secondary_boot = default_write_secondary; + } + + if (info->nb_cpus == 0) + info->nb_cpus = 1; + + /* Assume that raw images are linux kernels, and ELF images are not. */ + kernel_size = arm_load_elf(info, &elf_entry, &image_low_addr, + &image_high_addr, elf_machine, as); + if (kernel_size > 0 && have_dtb(info)) { + /* + * If there is still some room left at the base of RAM, try and put + * the DTB there like we do for images loaded with -bios or -pflash. + */ + if (image_low_addr > info->loader_start + || image_high_addr < info->loader_start) { + /* + * Set image_low_addr as address limit for arm_load_dtb if it may be + * pointing into RAM, otherwise pass '0' (no limit) + */ + if (image_low_addr < info->loader_start) { + image_low_addr = 0; + } + info->dtb_start = info->loader_start; + info->dtb_limit = image_low_addr; + } + } + entry = elf_entry; + if (kernel_size < 0) { + uint64_t loadaddr = info->loader_start + KERNEL_NOLOAD_ADDR; + kernel_size = load_uimage_as(info->kernel_filename, &entry, &loadaddr, + &is_linux, NULL, NULL, as); + if (kernel_size >= 0) { + image_low_addr = loadaddr; + image_high_addr = image_low_addr + kernel_size; + } + } + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && kernel_size < 0) { + kernel_size = load_aarch64_image(info->kernel_filename, + info->loader_start, &entry, as); + is_linux = 1; + if (kernel_size >= 0) { + image_low_addr = entry; + image_high_addr = image_low_addr + kernel_size; + } + } else if (kernel_size < 0) { + /* 32-bit ARM */ + entry = info->loader_start + KERNEL_LOAD_ADDR; + kernel_size = load_image_targphys_as(info->kernel_filename, entry, + ram_end - KERNEL_LOAD_ADDR, as); + is_linux = 1; + if (kernel_size >= 0) { + image_low_addr = entry; + image_high_addr = image_low_addr + kernel_size; + } + } + if (kernel_size < 0) { + error_report("could not load kernel '%s'", info->kernel_filename); + exit(1); + } + + if (kernel_size > info->ram_size) { + error_report("kernel '%s' is too large to fit in RAM " + "(kernel size %d, RAM size %" PRId64 ")", + info->kernel_filename, kernel_size, info->ram_size); + exit(1); + } + + info->entry = entry; + + /* + * We want to put the initrd far enough into RAM that when the + * kernel is uncompressed it will not clobber the initrd. However + * on boards without much RAM we must ensure that we still leave + * enough room for a decent sized initrd, and on boards with large + * amounts of RAM we must avoid the initrd being so far up in RAM + * that it is outside lowmem and inaccessible to the kernel. + * So for boards with less than 256MB of RAM we put the initrd + * halfway into RAM, and for boards with 256MB of RAM or more we put + * the initrd at 128MB. + * We also refuse to put the initrd somewhere that will definitely + * overlay the kernel we just loaded, though for kernel formats which + * don't tell us their exact size (eg self-decompressing 32-bit kernels) + * we might still make a bad choice here. + */ + info->initrd_start = info->loader_start + + MIN(info->ram_size / 2, 128 * MiB); + if (image_high_addr) { + info->initrd_start = MAX(info->initrd_start, image_high_addr); + } + info->initrd_start = TARGET_PAGE_ALIGN(info->initrd_start); + + if (is_linux) { + uint32_t fixupcontext[FIXUP_MAX]; + + if (info->initrd_filename) { + + if (info->initrd_start >= ram_end) { + error_report("not enough space after kernel to load initrd"); + exit(1); + } + + initrd_size = load_ramdisk_as(info->initrd_filename, + info->initrd_start, + ram_end - info->initrd_start, as); + if (initrd_size < 0) { + initrd_size = load_image_targphys_as(info->initrd_filename, + info->initrd_start, + ram_end - + info->initrd_start, + as); + } + if (initrd_size < 0) { + error_report("could not load initrd '%s'", + info->initrd_filename); + exit(1); + } + if (info->initrd_start + initrd_size > ram_end) { + error_report("could not load initrd '%s': " + "too big to fit into RAM after the kernel", + info->initrd_filename); + exit(1); + } + } else { + initrd_size = 0; + } + info->initrd_size = initrd_size; + + fixupcontext[FIXUP_BOARDID] = info->board_id; + fixupcontext[FIXUP_BOARD_SETUP] = info->board_setup_addr; + + /* + * for device tree boot, we pass the DTB directly in r2. Otherwise + * we point to the kernel args. + */ + if (have_dtb(info)) { + hwaddr align; + + if (elf_machine == EM_AARCH64) { + /* + * Some AArch64 kernels on early bootup map the fdt region as + * + * [ ALIGN_DOWN(fdt, 2MB) ... ALIGN_DOWN(fdt, 2MB) + 2MB ] + * + * Let's play safe and prealign it to 2MB to give us some space. + */ + align = 2 * MiB; + } else { + /* + * Some 32bit kernels will trash anything in the 4K page the + * initrd ends in, so make sure the DTB isn't caught up in that. + */ + align = 4 * KiB; + } + + /* Place the DTB after the initrd in memory with alignment. */ + info->dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size, + align); + if (info->dtb_start >= ram_end) { + error_report("Not enough space for DTB after kernel/initrd"); + exit(1); + } + fixupcontext[FIXUP_ARGPTR_LO] = info->dtb_start; + fixupcontext[FIXUP_ARGPTR_HI] = info->dtb_start >> 32; + } else { + fixupcontext[FIXUP_ARGPTR_LO] = + info->loader_start + KERNEL_ARGS_ADDR; + fixupcontext[FIXUP_ARGPTR_HI] = + (info->loader_start + KERNEL_ARGS_ADDR) >> 32; + if (info->ram_size >= 4 * GiB) { + error_report("RAM size must be less than 4GB to boot" + " Linux kernel using ATAGS (try passing a device tree" + " using -dtb)"); + exit(1); + } + } + fixupcontext[FIXUP_ENTRYPOINT_LO] = entry; + fixupcontext[FIXUP_ENTRYPOINT_HI] = entry >> 32; + + write_bootloader("bootloader", info->loader_start, + primary_loader, fixupcontext, as); + + if (info->nb_cpus > 1) { + info->write_secondary_boot(cpu, info); + } + if (info->write_board_setup) { + info->write_board_setup(cpu, info); + } + + /* + * Notify devices which need to fake up firmware initialization + * that we're doing a direct kernel boot. + */ + object_child_foreach_recursive(object_get_root(), + do_arm_linux_init, info); + } + info->is_linux = is_linux; + + for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) { + ARM_CPU(cs)->env.boot_info = info; + } +} + +static void arm_setup_firmware_boot(ARMCPU *cpu, struct arm_boot_info *info) +{ + /* Set up for booting firmware (which might load a kernel via fw_cfg) */ + + if (have_dtb(info)) { + /* + * If we have a device tree blob, but no kernel to supply it to (or + * the kernel is supposed to be loaded by the bootloader), copy the + * DTB to the base of RAM for the bootloader to pick up. + */ + info->dtb_start = info->loader_start; + } + + if (info->kernel_filename) { + FWCfgState *fw_cfg; + bool try_decompressing_kernel; + + fw_cfg = fw_cfg_find(); + + if (!fw_cfg) { + error_report("This machine type does not support loading both " + "a guest firmware/BIOS image and a guest kernel at " + "the same time. You should change your QEMU command " + "line to specify one or the other, but not both."); + exit(1); + } + + try_decompressing_kernel = arm_feature(&cpu->env, + ARM_FEATURE_AARCH64); + + /* + * Expose the kernel, the command line, and the initrd in fw_cfg. + * We don't process them here at all, it's all left to the + * firmware. + */ + load_image_to_fw_cfg(fw_cfg, + FW_CFG_KERNEL_SIZE, FW_CFG_KERNEL_DATA, + info->kernel_filename, + try_decompressing_kernel); + load_image_to_fw_cfg(fw_cfg, + FW_CFG_INITRD_SIZE, FW_CFG_INITRD_DATA, + info->initrd_filename, false); + + if (info->kernel_cmdline) { + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, + strlen(info->kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, + info->kernel_cmdline); + } + } + + /* + * We will start from address 0 (typically a boot ROM image) in the + * same way as hardware. Leave env->boot_info NULL, so that + * do_cpu_reset() knows it does not need to alter the PC on reset. + */ +} + +void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) +{ + CPUState *cs; + AddressSpace *as = arm_boot_address_space(cpu, info); + + /* + * CPU objects (unlike devices) are not automatically reset on system + * reset, so we must always register a handler to do so. If we're + * actually loading a kernel, the handler is also responsible for + * arranging that we start it correctly. + */ + for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) { + qemu_register_reset(do_cpu_reset, ARM_CPU(cs)); + } + + /* + * The board code is not supposed to set secure_board_setup unless + * running its code in secure mode is actually possible, and KVM + * doesn't support secure. + */ + assert(!(info->secure_board_setup && kvm_enabled())); + info->kernel_filename = ms->kernel_filename; + info->kernel_cmdline = ms->kernel_cmdline; + info->initrd_filename = ms->initrd_filename; + info->dtb_filename = ms->dtb; + info->dtb_limit = 0; + + /* Load the kernel. */ + if (!info->kernel_filename || info->firmware_loaded) { + arm_setup_firmware_boot(cpu, info); + } else { + arm_setup_direct_kernel_boot(cpu, info); + } + + if (!info->skip_dtb_autoload && have_dtb(info)) { + if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) { + exit(1); + } + } +} + +static const TypeInfo arm_linux_boot_if_info = { + .name = TYPE_ARM_LINUX_BOOT_IF, + .parent = TYPE_INTERFACE, + .class_size = sizeof(ARMLinuxBootIfClass), +}; + +static void arm_linux_boot_register_types(void) +{ + type_register_static(&arm_linux_boot_if_info); +} + +type_init(arm_linux_boot_register_types) diff --git a/hw/arm/collie.c b/hw/arm/collie.c new file mode 100644 index 000000000..8df31e279 --- /dev/null +++ b/hw/arm/collie.c @@ -0,0 +1,93 @@ +/* + * SA-1110-based Sharp Zaurus SL-5500 platform. + * + * Copyright (C) 2011 Dmitry Eremin-Solenikov + * + * This code is licensed under GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "hw/sysbus.h" +#include "hw/boards.h" +#include "strongarm.h" +#include "hw/arm/boot.h" +#include "hw/block/flash.h" +#include "exec/address-spaces.h" +#include "cpu.h" +#include "qom/object.h" + +struct CollieMachineState { + MachineState parent; + + StrongARMState *sa1110; +}; + +#define TYPE_COLLIE_MACHINE MACHINE_TYPE_NAME("collie") +OBJECT_DECLARE_SIMPLE_TYPE(CollieMachineState, COLLIE_MACHINE) + +static struct arm_boot_info collie_binfo = { + .loader_start = SA_SDCS0, + .ram_size = 0x20000000, +}; + +static void collie_init(MachineState *machine) +{ + DriveInfo *dinfo; + MachineClass *mc = MACHINE_GET_CLASS(machine); + CollieMachineState *cms = COLLIE_MACHINE(machine); + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + cms->sa1110 = sa1110_init(machine->cpu_type); + + memory_region_add_subregion(get_system_memory(), SA_SDCS0, machine->ram); + + dinfo = drive_get(IF_PFLASH, 0, 0); + pflash_cfi01_register(SA_CS0, "collie.fl1", 0x02000000, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + 64 * KiB, 4, 0x00, 0x00, 0x00, 0x00, 0); + + dinfo = drive_get(IF_PFLASH, 0, 1); + pflash_cfi01_register(SA_CS1, "collie.fl2", 0x02000000, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + 64 * KiB, 4, 0x00, 0x00, 0x00, 0x00, 0); + + sysbus_create_simple("scoop", 0x40800000, NULL); + + collie_binfo.board_id = 0x208; + arm_load_kernel(cms->sa1110->cpu, machine, &collie_binfo); +} + +static void collie_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Sharp SL-5500 (Collie) PDA (SA-1110)"; + mc->init = collie_init; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("sa1110"); + mc->default_ram_size = 0x20000000; + mc->default_ram_id = "strongarm.sdram"; +} + +static const TypeInfo collie_machine_typeinfo = { + .name = TYPE_COLLIE_MACHINE, + .parent = TYPE_MACHINE, + .class_init = collie_machine_class_init, + .instance_size = sizeof(CollieMachineState), +}; + +static void collie_machine_register_types(void) +{ + type_register_static(&collie_machine_typeinfo); +} +type_init(collie_machine_register_types); diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c new file mode 100644 index 000000000..294ba5de6 --- /dev/null +++ b/hw/arm/cubieboard.c @@ -0,0 +1,114 @@ +/* + * cubieboard emulation + * + * Copyright (C) 2013 Li Guang + * Written by Li Guang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/arm/allwinner-a10.h" + +static struct arm_boot_info cubieboard_binfo = { + .loader_start = AW_A10_SDRAM_BASE, + .board_id = 0x1008, +}; + +static void cubieboard_init(MachineState *machine) +{ + AwA10State *a10; + Error *err = NULL; + DriveInfo *di; + BlockBackend *blk; + BusState *bus; + DeviceState *carddev; + + /* BIOS is not supported by this board */ + if (machine->firmware) { + error_report("BIOS not supported for this machine"); + exit(1); + } + + /* This board has fixed size RAM (512MiB or 1GiB) */ + if (machine->ram_size != 512 * MiB && + machine->ram_size != 1 * GiB) { + error_report("This machine can only be used with 512MiB or 1GiB RAM"); + exit(1); + } + + /* Only allow Cortex-A8 for this board */ + if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a8")) != 0) { + error_report("This board can only be used with cortex-a8 CPU"); + exit(1); + } + + a10 = AW_A10(object_new(TYPE_AW_A10)); + object_property_add_child(OBJECT(machine), "soc", OBJECT(a10)); + object_unref(OBJECT(a10)); + + if (!object_property_set_int(OBJECT(&a10->emac), "phy-addr", 1, &err)) { + error_reportf_err(err, "Couldn't set phy address: "); + exit(1); + } + + if (!object_property_set_int(OBJECT(&a10->timer), "clk0-freq", 32768, + &err)) { + error_reportf_err(err, "Couldn't set clk0 frequency: "); + exit(1); + } + + if (!object_property_set_int(OBJECT(&a10->timer), "clk1-freq", 24000000, + &err)) { + error_reportf_err(err, "Couldn't set clk1 frequency: "); + exit(1); + } + + if (!qdev_realize(DEVICE(a10), NULL, &err)) { + error_reportf_err(err, "Couldn't realize Allwinner A10: "); + exit(1); + } + + /* Retrieve SD bus */ + di = drive_get_next(IF_SD); + blk = di ? blk_by_legacy_dinfo(di) : NULL; + bus = qdev_get_child_bus(DEVICE(a10), "sd-bus"); + + /* Plug in SD card */ + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); + + memory_region_add_subregion(get_system_memory(), AW_A10_SDRAM_BASE, + machine->ram); + + /* TODO create and connect IDE devices for ide_drive_get() */ + + cubieboard_binfo.ram_size = machine->ram_size; + arm_load_kernel(&a10->cpu, machine, &cubieboard_binfo); +} + +static void cubieboard_machine_init(MachineClass *mc) +{ + mc->desc = "cubietech cubieboard (Cortex-A8)"; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a8"); + mc->default_ram_size = 1 * GiB; + mc->init = cubieboard_init; + mc->block_default_type = IF_IDE; + mc->units_per_default_bus = 1; + mc->ignore_memory_transaction_failures = true; + mc->default_ram_id = "cubieboard.ram"; +} + +DEFINE_MACHINE("cubieboard", cubieboard_machine_init) diff --git a/hw/arm/digic.c b/hw/arm/digic.c new file mode 100644 index 000000000..614232165 --- /dev/null +++ b/hw/arm/digic.c @@ -0,0 +1,107 @@ +/* + * QEMU model of the Canon DIGIC SoC. + * + * Copyright (C) 2013 Antony Pavlov + * + * This model is based on reverse engineering efforts + * made by CHDK (http://chdk.wikia.com) and + * Magic Lantern (http://www.magiclantern.fm) projects + * contributors. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/arm/digic.h" +#include "hw/qdev-properties.h" +#include "sysemu/sysemu.h" + +#define DIGIC4_TIMER_BASE(n) (0xc0210000 + (n) * 0x100) + +#define DIGIC_UART_BASE 0xc0800000 + +static void digic_init(Object *obj) +{ + DigicState *s = DIGIC(obj); + int i; + + object_initialize_child(obj, "cpu", &s->cpu, ARM_CPU_TYPE_NAME("arm946")); + + for (i = 0; i < DIGIC4_NB_TIMERS; i++) { +#define DIGIC_TIMER_NAME_MLEN 11 + char name[DIGIC_TIMER_NAME_MLEN]; + + snprintf(name, DIGIC_TIMER_NAME_MLEN, "timer[%d]", i); + object_initialize_child(obj, name, &s->timer[i], TYPE_DIGIC_TIMER); + } + + object_initialize_child(obj, "uart", &s->uart, TYPE_DIGIC_UART); +} + +static void digic_realize(DeviceState *dev, Error **errp) +{ + DigicState *s = DIGIC(dev); + SysBusDevice *sbd; + int i; + + if (!object_property_set_bool(OBJECT(&s->cpu), "reset-hivecs", true, + errp)) { + return; + } + + if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { + return; + } + + for (i = 0; i < DIGIC4_NB_TIMERS; i++) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timer[i]), errp)) { + return; + } + + sbd = SYS_BUS_DEVICE(&s->timer[i]); + sysbus_mmio_map(sbd, 0, DIGIC4_TIMER_BASE(i)); + } + + qdev_prop_set_chr(DEVICE(&s->uart), "chardev", serial_hd(0)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart), errp)) { + return; + } + + sbd = SYS_BUS_DEVICE(&s->uart); + sysbus_mmio_map(sbd, 0, DIGIC_UART_BASE); +} + +static void digic_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = digic_realize; + /* Reason: Uses serial_hds in the realize function --> not usable twice */ + dc->user_creatable = false; +} + +static const TypeInfo digic_type_info = { + .name = TYPE_DIGIC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(DigicState), + .instance_init = digic_init, + .class_init = digic_class_init, +}; + +static void digic_register_types(void) +{ + type_register_static(&digic_type_info); +} + +type_init(digic_register_types) diff --git a/hw/arm/digic_boards.c b/hw/arm/digic_boards.c new file mode 100644 index 000000000..b771a3d8b --- /dev/null +++ b/hw/arm/digic_boards.c @@ -0,0 +1,149 @@ +/* + * QEMU model of the Canon DIGIC boards (cameras indeed :). + * + * Copyright (C) 2013 Antony Pavlov + * + * This model is based on reverse engineering efforts + * made by CHDK (http://chdk.wikia.com) and + * Magic Lantern (http://www.magiclantern.fm) projects + * contributors. + * + * See docs here: + * http://magiclantern.wikia.com/wiki/Register_Map + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "hw/boards.h" +#include "qemu/error-report.h" +#include "hw/arm/digic.h" +#include "hw/block/flash.h" +#include "hw/loader.h" +#include "sysemu/qtest.h" +#include "qemu/units.h" +#include "qemu/cutils.h" + +#define DIGIC4_ROM0_BASE 0xf0000000 +#define DIGIC4_ROM1_BASE 0xf8000000 +#define DIGIC4_ROM_MAX_SIZE 0x08000000 + +typedef struct DigicBoard { + void (*add_rom0)(DigicState *, hwaddr, const char *); + const char *rom0_def_filename; + void (*add_rom1)(DigicState *, hwaddr, const char *); + const char *rom1_def_filename; +} DigicBoard; + +static void digic4_board_init(MachineState *machine, DigicBoard *board) +{ + Error *err = NULL; + DigicState *s = DIGIC(object_new(TYPE_DIGIC)); + MachineClass *mc = MACHINE_GET_CLASS(machine); + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + if (!qdev_realize(DEVICE(s), NULL, &err)) { + error_reportf_err(err, "Couldn't realize DIGIC SoC: "); + exit(1); + } + + memory_region_add_subregion(get_system_memory(), 0, machine->ram); + + if (board->add_rom0) { + board->add_rom0(s, DIGIC4_ROM0_BASE, + machine->firmware ?: board->rom0_def_filename); + } + + if (board->add_rom1) { + board->add_rom1(s, DIGIC4_ROM1_BASE, + machine->firmware ?: board->rom1_def_filename); + } +} + +static void digic_load_rom(DigicState *s, hwaddr addr, + hwaddr max_size, const char *filename) +{ + target_long rom_size; + + if (qtest_enabled()) { + /* qtest runs no code so don't attempt a ROM load which + * could fail and result in a spurious test failure. + */ + return; + } + + if (filename) { + char *fn = qemu_find_file(QEMU_FILE_TYPE_BIOS, filename); + + if (!fn) { + error_report("Couldn't find rom image '%s'.", filename); + exit(1); + } + + rom_size = load_image_targphys(fn, addr, max_size); + if (rom_size < 0 || rom_size > max_size) { + error_report("Couldn't load rom image '%s'.", filename); + exit(1); + } + g_free(fn); + } +} + +/* + * Samsung K8P3215UQB + * 64M Bit (4Mx16) Page Mode / Multi-Bank NOR Flash Memory + */ +static void digic4_add_k8p3215uqb_rom(DigicState *s, hwaddr addr, + const char *filename) +{ +#define FLASH_K8P3215UQB_SIZE (4 * 1024 * 1024) +#define FLASH_K8P3215UQB_SECTOR_SIZE (64 * 1024) + + pflash_cfi02_register(addr, "pflash", FLASH_K8P3215UQB_SIZE, + NULL, FLASH_K8P3215UQB_SECTOR_SIZE, + DIGIC4_ROM_MAX_SIZE / FLASH_K8P3215UQB_SIZE, + 4, + 0x00EC, 0x007E, 0x0003, 0x0001, + 0x0555, 0x2aa, 0); + + digic_load_rom(s, addr, FLASH_K8P3215UQB_SIZE, filename); +} + +static DigicBoard digic4_board_canon_a1100 = { + .add_rom1 = digic4_add_k8p3215uqb_rom, + .rom1_def_filename = "canon-a1100-rom1.bin", +}; + +static void canon_a1100_init(MachineState *machine) +{ + digic4_board_init(machine, &digic4_board_canon_a1100); +} + +static void canon_a1100_machine_init(MachineClass *mc) +{ + mc->desc = "Canon PowerShot A1100 IS (ARM946)"; + mc->init = &canon_a1100_init; + mc->ignore_memory_transaction_failures = true; + mc->default_ram_size = 64 * MiB; + mc->default_ram_id = "ram"; +} + +DEFINE_MACHINE("canon-a1100", canon_a1100_machine_init) diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c new file mode 100644 index 000000000..0299e81f8 --- /dev/null +++ b/hw/arm/exynos4210.c @@ -0,0 +1,513 @@ +/* + * Samsung exynos4210 SoC emulation + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. All rights reserved. + * Maksim Kozlov + * Evgeny Voevodin + * Igor Mitsyanko + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/cpu/a9mpcore.h" +#include "hw/irq.h" +#include "sysemu/blockdev.h" +#include "sysemu/sysemu.h" +#include "hw/sysbus.h" +#include "hw/arm/boot.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "hw/arm/exynos4210.h" +#include "hw/sd/sdhci.h" +#include "hw/usb/hcd-ehci.h" + +#define EXYNOS4210_CHIPID_ADDR 0x10000000 + +/* PWM */ +#define EXYNOS4210_PWM_BASE_ADDR 0x139D0000 + +/* RTC */ +#define EXYNOS4210_RTC_BASE_ADDR 0x10070000 + +/* MCT */ +#define EXYNOS4210_MCT_BASE_ADDR 0x10050000 + +/* I2C */ +#define EXYNOS4210_I2C_SHIFT 0x00010000 +#define EXYNOS4210_I2C_BASE_ADDR 0x13860000 +/* Interrupt Group of External Interrupt Combiner for I2C */ +#define EXYNOS4210_I2C_INTG 27 +#define EXYNOS4210_HDMI_INTG 16 + +/* UART's definitions */ +#define EXYNOS4210_UART0_BASE_ADDR 0x13800000 +#define EXYNOS4210_UART1_BASE_ADDR 0x13810000 +#define EXYNOS4210_UART2_BASE_ADDR 0x13820000 +#define EXYNOS4210_UART3_BASE_ADDR 0x13830000 +#define EXYNOS4210_UART0_FIFO_SIZE 256 +#define EXYNOS4210_UART1_FIFO_SIZE 64 +#define EXYNOS4210_UART2_FIFO_SIZE 16 +#define EXYNOS4210_UART3_FIFO_SIZE 16 +/* Interrupt Group of External Interrupt Combiner for UART */ +#define EXYNOS4210_UART_INT_GRP 26 + +/* External GIC */ +#define EXYNOS4210_EXT_GIC_CPU_BASE_ADDR 0x10480000 +#define EXYNOS4210_EXT_GIC_DIST_BASE_ADDR 0x10490000 + +/* Combiner */ +#define EXYNOS4210_EXT_COMBINER_BASE_ADDR 0x10440000 +#define EXYNOS4210_INT_COMBINER_BASE_ADDR 0x10448000 + +/* SD/MMC host controllers */ +#define EXYNOS4210_SDHCI_CAPABILITIES 0x05E80080 +#define EXYNOS4210_SDHCI_BASE_ADDR 0x12510000 +#define EXYNOS4210_SDHCI_ADDR(n) (EXYNOS4210_SDHCI_BASE_ADDR + \ + 0x00010000 * (n)) +#define EXYNOS4210_SDHCI_NUMBER 4 + +/* PMU SFR base address */ +#define EXYNOS4210_PMU_BASE_ADDR 0x10020000 + +/* Clock controller SFR base address */ +#define EXYNOS4210_CLK_BASE_ADDR 0x10030000 + +/* PRNG/HASH SFR base address */ +#define EXYNOS4210_RNG_BASE_ADDR 0x10830400 + +/* Display controllers (FIMD) */ +#define EXYNOS4210_FIMD0_BASE_ADDR 0x11C00000 + +/* EHCI */ +#define EXYNOS4210_EHCI_BASE_ADDR 0x12580000 + +/* DMA */ +#define EXYNOS4210_PL330_BASE0_ADDR 0x12680000 +#define EXYNOS4210_PL330_BASE1_ADDR 0x12690000 +#define EXYNOS4210_PL330_BASE2_ADDR 0x12850000 + +static uint8_t chipid_and_omr[] = { 0x11, 0x02, 0x21, 0x43, + 0x09, 0x00, 0x00, 0x00 }; + +static uint64_t exynos4210_chipid_and_omr_read(void *opaque, hwaddr offset, + unsigned size) +{ + assert(offset < sizeof(chipid_and_omr)); + return chipid_and_omr[offset]; +} + +static void exynos4210_chipid_and_omr_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + return; +} + +static const MemoryRegionOps exynos4210_chipid_and_omr_ops = { + .read = exynos4210_chipid_and_omr_read, + .write = exynos4210_chipid_and_omr_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .max_access_size = 1, + } +}; + +void exynos4210_write_secondary(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + int n; + uint32_t smpboot[] = { + 0xe59f3034, /* ldr r3, External gic_cpu_if */ + 0xe59f2034, /* ldr r2, Internal gic_cpu_if */ + 0xe59f0034, /* ldr r0, startaddr */ + 0xe3a01001, /* mov r1, #1 */ + 0xe5821000, /* str r1, [r2] */ + 0xe5831000, /* str r1, [r3] */ + 0xe3a010ff, /* mov r1, #0xff */ + 0xe5821004, /* str r1, [r2, #4] */ + 0xe5831004, /* str r1, [r3, #4] */ + 0xf57ff04f, /* dsb */ + 0xe320f003, /* wfi */ + 0xe5901000, /* ldr r1, [r0] */ + 0xe1110001, /* tst r1, r1 */ + 0x0afffffb, /* beq */ + 0xe12fff11, /* bx r1 */ + EXYNOS4210_EXT_GIC_CPU_BASE_ADDR, + 0, /* gic_cpu_if: base address of Internal GIC CPU interface */ + 0 /* bootreg: Boot register address is held here */ + }; + smpboot[ARRAY_SIZE(smpboot) - 1] = info->smp_bootreg_addr; + smpboot[ARRAY_SIZE(smpboot) - 2] = info->gic_cpu_if_addr; + for (n = 0; n < ARRAY_SIZE(smpboot); n++) { + smpboot[n] = tswap32(smpboot[n]); + } + rom_add_blob_fixed("smpboot", smpboot, sizeof(smpboot), + info->smp_loader_start); +} + +static uint64_t exynos4210_calc_affinity(int cpu) +{ + /* Exynos4210 has 0x9 as cluster ID */ + return (0x9 << ARM_AFF1_SHIFT) | cpu; +} + +static DeviceState *pl330_create(uint32_t base, qemu_or_irq *orgate, + qemu_irq irq, int nreq, int nevents, int width) +{ + SysBusDevice *busdev; + DeviceState *dev; + int i; + + dev = qdev_new("pl330"); + object_property_set_link(OBJECT(dev), "memory", + OBJECT(get_system_memory()), + &error_fatal); + qdev_prop_set_uint8(dev, "num_events", nevents); + qdev_prop_set_uint8(dev, "num_chnls", 8); + qdev_prop_set_uint8(dev, "num_periph_req", nreq); + + qdev_prop_set_uint8(dev, "wr_cap", 4); + qdev_prop_set_uint8(dev, "wr_q_dep", 8); + qdev_prop_set_uint8(dev, "rd_cap", 4); + qdev_prop_set_uint8(dev, "rd_q_dep", 8); + qdev_prop_set_uint8(dev, "data_width", width); + qdev_prop_set_uint16(dev, "data_buffer_dep", width); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, base); + + object_property_set_int(OBJECT(orgate), "num-lines", nevents + 1, + &error_abort); + qdev_realize(DEVICE(orgate), NULL, &error_abort); + + for (i = 0; i < nevents + 1; i++) { + sysbus_connect_irq(busdev, i, qdev_get_gpio_in(DEVICE(orgate), i)); + } + qdev_connect_gpio_out(DEVICE(orgate), 0, irq); + return dev; +} + +static void exynos4210_realize(DeviceState *socdev, Error **errp) +{ + Exynos4210State *s = EXYNOS4210_SOC(socdev); + MemoryRegion *system_mem = get_system_memory(); + qemu_irq gate_irq[EXYNOS4210_NCPUS][EXYNOS4210_IRQ_GATE_NINPUTS]; + SysBusDevice *busdev; + DeviceState *dev, *uart[4], *pl330[3]; + int i, n; + + for (n = 0; n < EXYNOS4210_NCPUS; n++) { + Object *cpuobj = object_new(ARM_CPU_TYPE_NAME("cortex-a9")); + + /* By default A9 CPUs have EL3 enabled. This board does not currently + * support EL3 so the CPU EL3 property is disabled before realization. + */ + if (object_property_find(cpuobj, "has_el3")) { + object_property_set_bool(cpuobj, "has_el3", false, &error_fatal); + } + + s->cpu[n] = ARM_CPU(cpuobj); + object_property_set_int(cpuobj, "mp-affinity", + exynos4210_calc_affinity(n), &error_abort); + object_property_set_int(cpuobj, "reset-cbar", + EXYNOS4210_SMP_PRIVATE_BASE_ADDR, + &error_abort); + qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); + } + + /*** IRQs ***/ + + s->irq_table = exynos4210_init_irq(&s->irqs); + + /* IRQ Gate */ + for (i = 0; i < EXYNOS4210_NCPUS; i++) { + dev = qdev_new("exynos4210.irq_gate"); + qdev_prop_set_uint32(dev, "n_in", EXYNOS4210_IRQ_GATE_NINPUTS); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + /* Get IRQ Gate input in gate_irq */ + for (n = 0; n < EXYNOS4210_IRQ_GATE_NINPUTS; n++) { + gate_irq[i][n] = qdev_get_gpio_in(dev, n); + } + busdev = SYS_BUS_DEVICE(dev); + + /* Connect IRQ Gate output to CPU's IRQ line */ + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(DEVICE(s->cpu[i]), ARM_CPU_IRQ)); + } + + /* Private memory region and Internal GIC */ + dev = qdev_new(TYPE_A9MPCORE_PRIV); + qdev_prop_set_uint32(dev, "num-cpu", EXYNOS4210_NCPUS); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, EXYNOS4210_SMP_PRIVATE_BASE_ADDR); + for (n = 0; n < EXYNOS4210_NCPUS; n++) { + sysbus_connect_irq(busdev, n, gate_irq[n][0]); + } + for (n = 0; n < EXYNOS4210_INT_GIC_NIRQ; n++) { + s->irqs.int_gic_irq[n] = qdev_get_gpio_in(dev, n); + } + + /* Cache controller */ + sysbus_create_simple("l2x0", EXYNOS4210_L2X0_BASE_ADDR, NULL); + + /* External GIC */ + dev = qdev_new("exynos4210.gic"); + qdev_prop_set_uint32(dev, "num-cpu", EXYNOS4210_NCPUS); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + /* Map CPU interface */ + sysbus_mmio_map(busdev, 0, EXYNOS4210_EXT_GIC_CPU_BASE_ADDR); + /* Map Distributer interface */ + sysbus_mmio_map(busdev, 1, EXYNOS4210_EXT_GIC_DIST_BASE_ADDR); + for (n = 0; n < EXYNOS4210_NCPUS; n++) { + sysbus_connect_irq(busdev, n, gate_irq[n][1]); + } + for (n = 0; n < EXYNOS4210_EXT_GIC_NIRQ; n++) { + s->irqs.ext_gic_irq[n] = qdev_get_gpio_in(dev, n); + } + + /* Internal Interrupt Combiner */ + dev = qdev_new("exynos4210.combiner"); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + for (n = 0; n < EXYNOS4210_MAX_INT_COMBINER_OUT_IRQ; n++) { + sysbus_connect_irq(busdev, n, s->irqs.int_gic_irq[n]); + } + exynos4210_combiner_get_gpioin(&s->irqs, dev, 0); + sysbus_mmio_map(busdev, 0, EXYNOS4210_INT_COMBINER_BASE_ADDR); + + /* External Interrupt Combiner */ + dev = qdev_new("exynos4210.combiner"); + qdev_prop_set_uint32(dev, "external", 1); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + for (n = 0; n < EXYNOS4210_MAX_INT_COMBINER_OUT_IRQ; n++) { + sysbus_connect_irq(busdev, n, s->irqs.ext_gic_irq[n]); + } + exynos4210_combiner_get_gpioin(&s->irqs, dev, 1); + sysbus_mmio_map(busdev, 0, EXYNOS4210_EXT_COMBINER_BASE_ADDR); + + /* Initialize board IRQs. */ + exynos4210_init_board_irqs(&s->irqs); + + /*** Memory ***/ + + /* Chip-ID and OMR */ + memory_region_init_io(&s->chipid_mem, OBJECT(socdev), + &exynos4210_chipid_and_omr_ops, NULL, + "exynos4210.chipid", sizeof(chipid_and_omr)); + memory_region_add_subregion(system_mem, EXYNOS4210_CHIPID_ADDR, + &s->chipid_mem); + + /* Internal ROM */ + memory_region_init_rom(&s->irom_mem, OBJECT(socdev), "exynos4210.irom", + EXYNOS4210_IROM_SIZE, &error_fatal); + memory_region_add_subregion(system_mem, EXYNOS4210_IROM_BASE_ADDR, + &s->irom_mem); + /* mirror of iROM */ + memory_region_init_alias(&s->irom_alias_mem, OBJECT(socdev), + "exynos4210.irom_alias", &s->irom_mem, 0, + EXYNOS4210_IROM_SIZE); + memory_region_add_subregion(system_mem, EXYNOS4210_IROM_MIRROR_BASE_ADDR, + &s->irom_alias_mem); + + /* Internal RAM */ + memory_region_init_ram(&s->iram_mem, NULL, "exynos4210.iram", + EXYNOS4210_IRAM_SIZE, &error_fatal); + memory_region_add_subregion(system_mem, EXYNOS4210_IRAM_BASE_ADDR, + &s->iram_mem); + + /* PMU. + * The only reason of existence at the moment is that secondary CPU boot + * loader uses PMU INFORM5 register as a holding pen. + */ + sysbus_create_simple("exynos4210.pmu", EXYNOS4210_PMU_BASE_ADDR, NULL); + + sysbus_create_simple("exynos4210.clk", EXYNOS4210_CLK_BASE_ADDR, NULL); + sysbus_create_simple("exynos4210.rng", EXYNOS4210_RNG_BASE_ADDR, NULL); + + /* PWM */ + sysbus_create_varargs("exynos4210.pwm", EXYNOS4210_PWM_BASE_ADDR, + s->irq_table[exynos4210_get_irq(22, 0)], + s->irq_table[exynos4210_get_irq(22, 1)], + s->irq_table[exynos4210_get_irq(22, 2)], + s->irq_table[exynos4210_get_irq(22, 3)], + s->irq_table[exynos4210_get_irq(22, 4)], + NULL); + /* RTC */ + sysbus_create_varargs("exynos4210.rtc", EXYNOS4210_RTC_BASE_ADDR, + s->irq_table[exynos4210_get_irq(23, 0)], + s->irq_table[exynos4210_get_irq(23, 1)], + NULL); + + /* Multi Core Timer */ + dev = qdev_new("exynos4210.mct"); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + for (n = 0; n < 4; n++) { + /* Connect global timer interrupts to Combiner gpio_in */ + sysbus_connect_irq(busdev, n, + s->irq_table[exynos4210_get_irq(1, 4 + n)]); + } + /* Connect local timer interrupts to Combiner gpio_in */ + sysbus_connect_irq(busdev, 4, + s->irq_table[exynos4210_get_irq(51, 0)]); + sysbus_connect_irq(busdev, 5, + s->irq_table[exynos4210_get_irq(35, 3)]); + sysbus_mmio_map(busdev, 0, EXYNOS4210_MCT_BASE_ADDR); + + /*** I2C ***/ + for (n = 0; n < EXYNOS4210_I2C_NUMBER; n++) { + uint32_t addr = EXYNOS4210_I2C_BASE_ADDR + EXYNOS4210_I2C_SHIFT * n; + qemu_irq i2c_irq; + + if (n < 8) { + i2c_irq = s->irq_table[exynos4210_get_irq(EXYNOS4210_I2C_INTG, n)]; + } else { + i2c_irq = s->irq_table[exynos4210_get_irq(EXYNOS4210_HDMI_INTG, 1)]; + } + + dev = qdev_new("exynos4210.i2c"); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_connect_irq(busdev, 0, i2c_irq); + sysbus_mmio_map(busdev, 0, addr); + s->i2c_if[n] = (I2CBus *)qdev_get_child_bus(dev, "i2c"); + } + + + /*** UARTs ***/ + uart[0] = exynos4210_uart_create(EXYNOS4210_UART0_BASE_ADDR, + EXYNOS4210_UART0_FIFO_SIZE, 0, serial_hd(0), + s->irq_table[exynos4210_get_irq(EXYNOS4210_UART_INT_GRP, 0)]); + + uart[1] = exynos4210_uart_create(EXYNOS4210_UART1_BASE_ADDR, + EXYNOS4210_UART1_FIFO_SIZE, 1, serial_hd(1), + s->irq_table[exynos4210_get_irq(EXYNOS4210_UART_INT_GRP, 1)]); + + uart[2] = exynos4210_uart_create(EXYNOS4210_UART2_BASE_ADDR, + EXYNOS4210_UART2_FIFO_SIZE, 2, serial_hd(2), + s->irq_table[exynos4210_get_irq(EXYNOS4210_UART_INT_GRP, 2)]); + + uart[3] = exynos4210_uart_create(EXYNOS4210_UART3_BASE_ADDR, + EXYNOS4210_UART3_FIFO_SIZE, 3, serial_hd(3), + s->irq_table[exynos4210_get_irq(EXYNOS4210_UART_INT_GRP, 3)]); + + /*** SD/MMC host controllers ***/ + for (n = 0; n < EXYNOS4210_SDHCI_NUMBER; n++) { + DeviceState *carddev; + BlockBackend *blk; + DriveInfo *di; + + /* Compatible with: + * - SD Host Controller Specification Version 2.0 + * - SDIO Specification Version 2.0 + * - MMC Specification Version 4.3 + * - SDMA + * - ADMA2 + * + * As this part of the Exynos4210 is not publically available, + * we used the "HS-MMC Controller S3C2416X RISC Microprocessor" + * public datasheet which is very similar (implementing + * MMC Specification Version 4.0 being the only difference noted) + */ + dev = qdev_new(TYPE_S3C_SDHCI); + qdev_prop_set_uint64(dev, "capareg", EXYNOS4210_SDHCI_CAPABILITIES); + + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, EXYNOS4210_SDHCI_ADDR(n)); + sysbus_connect_irq(busdev, 0, s->irq_table[exynos4210_get_irq(29, n)]); + + di = drive_get(IF_SD, 0, n); + blk = di ? blk_by_legacy_dinfo(di) : NULL; + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive(carddev, "drive", blk); + qdev_realize_and_unref(carddev, qdev_get_child_bus(dev, "sd-bus"), + &error_fatal); + } + + /*** Display controller (FIMD) ***/ + sysbus_create_varargs("exynos4210.fimd", EXYNOS4210_FIMD0_BASE_ADDR, + s->irq_table[exynos4210_get_irq(11, 0)], + s->irq_table[exynos4210_get_irq(11, 1)], + s->irq_table[exynos4210_get_irq(11, 2)], + NULL); + + sysbus_create_simple(TYPE_EXYNOS4210_EHCI, EXYNOS4210_EHCI_BASE_ADDR, + s->irq_table[exynos4210_get_irq(28, 3)]); + + /*** DMA controllers ***/ + pl330[0] = pl330_create(EXYNOS4210_PL330_BASE0_ADDR, + &s->pl330_irq_orgate[0], + s->irq_table[exynos4210_get_irq(21, 0)], + 32, 32, 32); + pl330[1] = pl330_create(EXYNOS4210_PL330_BASE1_ADDR, + &s->pl330_irq_orgate[1], + s->irq_table[exynos4210_get_irq(21, 1)], + 32, 32, 32); + pl330[2] = pl330_create(EXYNOS4210_PL330_BASE2_ADDR, + &s->pl330_irq_orgate[2], + s->irq_table[exynos4210_get_irq(20, 1)], + 1, 31, 64); + + sysbus_connect_irq(SYS_BUS_DEVICE(uart[0]), 1, + qdev_get_gpio_in(pl330[0], 15)); + sysbus_connect_irq(SYS_BUS_DEVICE(uart[1]), 1, + qdev_get_gpio_in(pl330[1], 15)); + sysbus_connect_irq(SYS_BUS_DEVICE(uart[2]), 1, + qdev_get_gpio_in(pl330[0], 17)); + sysbus_connect_irq(SYS_BUS_DEVICE(uart[3]), 1, + qdev_get_gpio_in(pl330[1], 17)); +} + +static void exynos4210_init(Object *obj) +{ + Exynos4210State *s = EXYNOS4210_SOC(obj); + int i; + + for (i = 0; i < ARRAY_SIZE(s->pl330_irq_orgate); i++) { + char *name = g_strdup_printf("pl330-irq-orgate%d", i); + qemu_or_irq *orgate = &s->pl330_irq_orgate[i]; + + object_initialize_child(obj, name, orgate, TYPE_OR_IRQ); + g_free(name); + } +} + +static void exynos4210_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = exynos4210_realize; +} + +static const TypeInfo exynos4210_info = { + .name = TYPE_EXYNOS4210_SOC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210State), + .instance_init = exynos4210_init, + .class_init = exynos4210_class_init, +}; + +static void exynos4210_register_types(void) +{ + type_register_static(&exynos4210_info); +} + +type_init(exynos4210_register_types) diff --git a/hw/arm/exynos4_boards.c b/hw/arm/exynos4_boards.c new file mode 100644 index 000000000..35dd9875d --- /dev/null +++ b/hw/arm/exynos4_boards.c @@ -0,0 +1,195 @@ +/* + * Samsung exynos4 SoC based boards emulation + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. All rights reserved. + * Maksim Kozlov + * Evgeny Voevodin + * Igor Mitsyanko + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/sysbus.h" +#include "net/net.h" +#include "hw/arm/boot.h" +#include "exec/address-spaces.h" +#include "hw/arm/exynos4210.h" +#include "hw/net/lan9118.h" +#include "hw/qdev-properties.h" +#include "hw/boards.h" +#include "hw/irq.h" + +#define SMDK_LAN9118_BASE_ADDR 0x05000000 + +typedef enum Exynos4BoardType { + EXYNOS4_BOARD_NURI, + EXYNOS4_BOARD_SMDKC210, + EXYNOS4_NUM_OF_BOARDS +} Exynos4BoardType; + +typedef struct Exynos4BoardState { + Exynos4210State soc; + MemoryRegion dram0_mem; + MemoryRegion dram1_mem; +} Exynos4BoardState; + +static int exynos4_board_id[EXYNOS4_NUM_OF_BOARDS] = { + [EXYNOS4_BOARD_NURI] = 0xD33, + [EXYNOS4_BOARD_SMDKC210] = 0xB16, +}; + +static int exynos4_board_smp_bootreg_addr[EXYNOS4_NUM_OF_BOARDS] = { + [EXYNOS4_BOARD_NURI] = EXYNOS4210_SECOND_CPU_BOOTREG, + [EXYNOS4_BOARD_SMDKC210] = EXYNOS4210_SECOND_CPU_BOOTREG, +}; + +static unsigned long exynos4_board_ram_size[EXYNOS4_NUM_OF_BOARDS] = { + [EXYNOS4_BOARD_NURI] = 1 * GiB, + [EXYNOS4_BOARD_SMDKC210] = 1 * GiB, +}; + +static struct arm_boot_info exynos4_board_binfo = { + .loader_start = EXYNOS4210_BASE_BOOT_ADDR, + .smp_loader_start = EXYNOS4210_SMP_BOOT_ADDR, + .nb_cpus = EXYNOS4210_NCPUS, + .write_secondary_boot = exynos4210_write_secondary, +}; + +static void lan9215_init(uint32_t base, qemu_irq irq) +{ + DeviceState *dev; + SysBusDevice *s; + + /* This should be a 9215 but the 9118 is close enough */ + if (nd_table[0].used) { + qemu_check_nic_model(&nd_table[0], "lan9118"); + dev = qdev_new(TYPE_LAN9118); + qdev_set_nic_properties(dev, &nd_table[0]); + qdev_prop_set_uint32(dev, "mode_16bit", 1); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, base); + sysbus_connect_irq(s, 0, irq); + } +} + +static void exynos4_boards_init_ram(Exynos4BoardState *s, + MemoryRegion *system_mem, + unsigned long ram_size) +{ + unsigned long mem_size = ram_size; + + if (mem_size > EXYNOS4210_DRAM_MAX_SIZE) { + memory_region_init_ram(&s->dram1_mem, NULL, "exynos4210.dram1", + mem_size - EXYNOS4210_DRAM_MAX_SIZE, + &error_fatal); + memory_region_add_subregion(system_mem, EXYNOS4210_DRAM1_BASE_ADDR, + &s->dram1_mem); + mem_size = EXYNOS4210_DRAM_MAX_SIZE; + } + + memory_region_init_ram(&s->dram0_mem, NULL, "exynos4210.dram0", mem_size, + &error_fatal); + memory_region_add_subregion(system_mem, EXYNOS4210_DRAM0_BASE_ADDR, + &s->dram0_mem); +} + +static Exynos4BoardState * +exynos4_boards_init_common(MachineState *machine, + Exynos4BoardType board_type) +{ + Exynos4BoardState *s = g_new(Exynos4BoardState, 1); + + exynos4_board_binfo.ram_size = exynos4_board_ram_size[board_type]; + exynos4_board_binfo.board_id = exynos4_board_id[board_type]; + exynos4_board_binfo.smp_bootreg_addr = + exynos4_board_smp_bootreg_addr[board_type]; + exynos4_board_binfo.gic_cpu_if_addr = + EXYNOS4210_SMP_PRIVATE_BASE_ADDR + 0x100; + + exynos4_boards_init_ram(s, get_system_memory(), + exynos4_board_ram_size[board_type]); + + object_initialize_child(OBJECT(machine), "soc", &s->soc, + TYPE_EXYNOS4210_SOC); + sysbus_realize(SYS_BUS_DEVICE(&s->soc), &error_fatal); + + return s; +} + +static void nuri_init(MachineState *machine) +{ + exynos4_boards_init_common(machine, EXYNOS4_BOARD_NURI); + + arm_load_kernel(ARM_CPU(first_cpu), machine, &exynos4_board_binfo); +} + +static void smdkc210_init(MachineState *machine) +{ + Exynos4BoardState *s = exynos4_boards_init_common(machine, + EXYNOS4_BOARD_SMDKC210); + + lan9215_init(SMDK_LAN9118_BASE_ADDR, + qemu_irq_invert(s->soc.irq_table[exynos4210_get_irq(37, 1)])); + arm_load_kernel(ARM_CPU(first_cpu), machine, &exynos4_board_binfo); +} + +static void nuri_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Samsung NURI board (Exynos4210)"; + mc->init = nuri_init; + mc->max_cpus = EXYNOS4210_NCPUS; + mc->min_cpus = EXYNOS4210_NCPUS; + mc->default_cpus = EXYNOS4210_NCPUS; + mc->ignore_memory_transaction_failures = true; +} + +static const TypeInfo nuri_type = { + .name = MACHINE_TYPE_NAME("nuri"), + .parent = TYPE_MACHINE, + .class_init = nuri_class_init, +}; + +static void smdkc210_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Samsung SMDKC210 board (Exynos4210)"; + mc->init = smdkc210_init; + mc->max_cpus = EXYNOS4210_NCPUS; + mc->min_cpus = EXYNOS4210_NCPUS; + mc->default_cpus = EXYNOS4210_NCPUS; + mc->ignore_memory_transaction_failures = true; +} + +static const TypeInfo smdkc210_type = { + .name = MACHINE_TYPE_NAME("smdkc210"), + .parent = TYPE_MACHINE, + .class_init = smdkc210_class_init, +}; + +static void exynos4_machines_init(void) +{ + type_register_static(&nuri_type); + type_register_static(&smdkc210_type); +} + +type_init(exynos4_machines_init) diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c new file mode 100644 index 000000000..24c437459 --- /dev/null +++ b/hw/arm/fsl-imx25.c @@ -0,0 +1,349 @@ +/* + * Copyright (c) 2013 Jean-Christophe Dubois + * + * i.MX25 SOC emulation. + * + * Based on hw/arm/xlnx-zynqmp.c + * + * Copyright (C) 2015 Xilinx Inc + * Written by Peter Crosthwaite + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/fsl-imx25.h" +#include "sysemu/sysemu.h" +#include "hw/qdev-properties.h" +#include "chardev/char.h" + +#define IMX25_ESDHC_CAPABILITIES 0x07e20000 + +static void fsl_imx25_init(Object *obj) +{ + FslIMX25State *s = FSL_IMX25(obj); + int i; + + object_initialize_child(obj, "cpu", &s->cpu, ARM_CPU_TYPE_NAME("arm926")); + + object_initialize_child(obj, "avic", &s->avic, TYPE_IMX_AVIC); + + object_initialize_child(obj, "ccm", &s->ccm, TYPE_IMX25_CCM); + + for (i = 0; i < FSL_IMX25_NUM_UARTS; i++) { + object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_IMX_SERIAL); + } + + for (i = 0; i < FSL_IMX25_NUM_GPTS; i++) { + object_initialize_child(obj, "gpt[*]", &s->gpt[i], TYPE_IMX25_GPT); + } + + for (i = 0; i < FSL_IMX25_NUM_EPITS; i++) { + object_initialize_child(obj, "epit[*]", &s->epit[i], TYPE_IMX_EPIT); + } + + object_initialize_child(obj, "fec", &s->fec, TYPE_IMX_FEC); + + object_initialize_child(obj, "rngc", &s->rngc, TYPE_IMX_RNGC); + + for (i = 0; i < FSL_IMX25_NUM_I2CS; i++) { + object_initialize_child(obj, "i2c[*]", &s->i2c[i], TYPE_IMX_I2C); + } + + for (i = 0; i < FSL_IMX25_NUM_GPIOS; i++) { + object_initialize_child(obj, "gpio[*]", &s->gpio[i], TYPE_IMX_GPIO); + } + + for (i = 0; i < FSL_IMX25_NUM_ESDHCS; i++) { + object_initialize_child(obj, "sdhc[*]", &s->esdhc[i], TYPE_IMX_USDHC); + } + + for (i = 0; i < FSL_IMX25_NUM_USBS; i++) { + object_initialize_child(obj, "usb[*]", &s->usb[i], TYPE_CHIPIDEA); + } + + object_initialize_child(obj, "wdt", &s->wdt, TYPE_IMX2_WDT); +} + +static void fsl_imx25_realize(DeviceState *dev, Error **errp) +{ + FslIMX25State *s = FSL_IMX25(dev); + uint8_t i; + Error *err = NULL; + + if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->avic), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->avic), 0, FSL_IMX25_AVIC_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->avic), 0, + qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->avic), 1, + qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ccm), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0, FSL_IMX25_CCM_ADDR); + + /* Initialize all UARTs */ + for (i = 0; i < FSL_IMX25_NUM_UARTS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } serial_table[FSL_IMX25_NUM_UARTS] = { + { FSL_IMX25_UART1_ADDR, FSL_IMX25_UART1_IRQ }, + { FSL_IMX25_UART2_ADDR, FSL_IMX25_UART2_IRQ }, + { FSL_IMX25_UART3_ADDR, FSL_IMX25_UART3_IRQ }, + { FSL_IMX25_UART4_ADDR, FSL_IMX25_UART4_IRQ }, + { FSL_IMX25_UART5_ADDR, FSL_IMX25_UART5_IRQ } + }; + + qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", serial_hd(i)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, serial_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + serial_table[i].irq)); + } + + /* Initialize all GPT timers */ + for (i = 0; i < FSL_IMX25_NUM_GPTS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } gpt_table[FSL_IMX25_NUM_GPTS] = { + { FSL_IMX25_GPT1_ADDR, FSL_IMX25_GPT1_IRQ }, + { FSL_IMX25_GPT2_ADDR, FSL_IMX25_GPT2_IRQ }, + { FSL_IMX25_GPT3_ADDR, FSL_IMX25_GPT3_IRQ }, + { FSL_IMX25_GPT4_ADDR, FSL_IMX25_GPT4_IRQ } + }; + + s->gpt[i].ccm = IMX_CCM(&s->ccm); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpt[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt[i]), 0, gpt_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + gpt_table[i].irq)); + } + + /* Initialize all EPIT timers */ + for (i = 0; i < FSL_IMX25_NUM_EPITS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } epit_table[FSL_IMX25_NUM_EPITS] = { + { FSL_IMX25_EPIT1_ADDR, FSL_IMX25_EPIT1_IRQ }, + { FSL_IMX25_EPIT2_ADDR, FSL_IMX25_EPIT2_IRQ } + }; + + s->epit[i].ccm = IMX_CCM(&s->ccm); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->epit[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->epit[i]), 0, epit_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + epit_table[i].irq)); + } + + object_property_set_uint(OBJECT(&s->fec), "phy-num", s->phy_num, &err); + qdev_set_nic_properties(DEVICE(&s->fec), &nd_table[0]); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->fec), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->fec), 0, FSL_IMX25_FEC_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->fec), 0, + qdev_get_gpio_in(DEVICE(&s->avic), FSL_IMX25_FEC_IRQ)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rngc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->rngc), 0, FSL_IMX25_RNGC_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rngc), 0, + qdev_get_gpio_in(DEVICE(&s->avic), FSL_IMX25_RNGC_IRQ)); + + /* Initialize all I2C */ + for (i = 0; i < FSL_IMX25_NUM_I2CS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } i2c_table[FSL_IMX25_NUM_I2CS] = { + { FSL_IMX25_I2C1_ADDR, FSL_IMX25_I2C1_IRQ }, + { FSL_IMX25_I2C2_ADDR, FSL_IMX25_I2C2_IRQ }, + { FSL_IMX25_I2C3_ADDR, FSL_IMX25_I2C3_IRQ } + }; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, i2c_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + i2c_table[i].irq)); + } + + /* Initialize all GPIOs */ + for (i = 0; i < FSL_IMX25_NUM_GPIOS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } gpio_table[FSL_IMX25_NUM_GPIOS] = { + { FSL_IMX25_GPIO1_ADDR, FSL_IMX25_GPIO1_IRQ }, + { FSL_IMX25_GPIO2_ADDR, FSL_IMX25_GPIO2_IRQ }, + { FSL_IMX25_GPIO3_ADDR, FSL_IMX25_GPIO3_IRQ }, + { FSL_IMX25_GPIO4_ADDR, FSL_IMX25_GPIO4_IRQ } + }; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, gpio_table[i].addr); + /* Connect GPIO IRQ to PIC */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + gpio_table[i].irq)); + } + + /* Initialize all SDHC */ + for (i = 0; i < FSL_IMX25_NUM_ESDHCS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } esdhc_table[FSL_IMX25_NUM_ESDHCS] = { + { FSL_IMX25_ESDHC1_ADDR, FSL_IMX25_ESDHC1_IRQ }, + { FSL_IMX25_ESDHC2_ADDR, FSL_IMX25_ESDHC2_IRQ }, + }; + + object_property_set_uint(OBJECT(&s->esdhc[i]), "sd-spec-version", 2, + &error_abort); + object_property_set_uint(OBJECT(&s->esdhc[i]), "capareg", + IMX25_ESDHC_CAPABILITIES, &error_abort); + object_property_set_uint(OBJECT(&s->esdhc[i]), "vendor", + SDHCI_VENDOR_IMX, &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->esdhc[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->esdhc[i]), 0, esdhc_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->esdhc[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + esdhc_table[i].irq)); + } + + /* USB */ + for (i = 0; i < FSL_IMX25_NUM_USBS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } usb_table[FSL_IMX25_NUM_USBS] = { + { FSL_IMX25_USB1_ADDR, FSL_IMX25_USB1_IRQ }, + { FSL_IMX25_USB2_ADDR, FSL_IMX25_USB2_IRQ }, + }; + + sysbus_realize(SYS_BUS_DEVICE(&s->usb[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, usb_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + usb_table[i].irq)); + } + + /* Watchdog */ + object_property_set_bool(OBJECT(&s->wdt), "pretimeout-support", true, + &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->wdt), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt), 0, FSL_IMX25_WDT_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + FSL_IMX25_WDT_IRQ)); + + /* initialize 2 x 16 KB ROM */ + memory_region_init_rom(&s->rom[0], OBJECT(dev), "imx25.rom0", + FSL_IMX25_ROM0_SIZE, &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), FSL_IMX25_ROM0_ADDR, + &s->rom[0]); + memory_region_init_rom(&s->rom[1], OBJECT(dev), "imx25.rom1", + FSL_IMX25_ROM1_SIZE, &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), FSL_IMX25_ROM1_ADDR, + &s->rom[1]); + + /* initialize internal RAM (128 KB) */ + memory_region_init_ram(&s->iram, NULL, "imx25.iram", FSL_IMX25_IRAM_SIZE, + &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), FSL_IMX25_IRAM_ADDR, + &s->iram); + + /* internal RAM (128 KB) is aliased over 128 MB - 128 KB */ + memory_region_init_alias(&s->iram_alias, OBJECT(dev), "imx25.iram_alias", + &s->iram, 0, FSL_IMX25_IRAM_ALIAS_SIZE); + memory_region_add_subregion(get_system_memory(), FSL_IMX25_IRAM_ALIAS_ADDR, + &s->iram_alias); +} + +static Property fsl_imx25_properties[] = { + DEFINE_PROP_UINT32("fec-phy-num", FslIMX25State, phy_num, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void fsl_imx25_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, fsl_imx25_properties); + dc->realize = fsl_imx25_realize; + dc->desc = "i.MX25 SOC"; + /* + * Reason: uses serial_hds in realize and the imx25 board does not + * support multiple CPUs + */ + dc->user_creatable = false; +} + +static const TypeInfo fsl_imx25_type_info = { + .name = TYPE_FSL_IMX25, + .parent = TYPE_DEVICE, + .instance_size = sizeof(FslIMX25State), + .instance_init = fsl_imx25_init, + .class_init = fsl_imx25_class_init, +}; + +static void fsl_imx25_register_types(void) +{ + type_register_static(&fsl_imx25_type_info); +} + +type_init(fsl_imx25_register_types) diff --git a/hw/arm/fsl-imx31.c b/hw/arm/fsl-imx31.c new file mode 100644 index 000000000..def27bb91 --- /dev/null +++ b/hw/arm/fsl-imx31.c @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2013 Jean-Christophe Dubois + * + * i.MX31 SOC emulation. + * + * Based on hw/arm/fsl-imx31.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/fsl-imx31.h" +#include "sysemu/sysemu.h" +#include "exec/address-spaces.h" +#include "hw/qdev-properties.h" +#include "chardev/char.h" + +static void fsl_imx31_init(Object *obj) +{ + FslIMX31State *s = FSL_IMX31(obj); + int i; + + object_initialize_child(obj, "cpu", &s->cpu, ARM_CPU_TYPE_NAME("arm1136")); + + object_initialize_child(obj, "avic", &s->avic, TYPE_IMX_AVIC); + + object_initialize_child(obj, "ccm", &s->ccm, TYPE_IMX31_CCM); + + for (i = 0; i < FSL_IMX31_NUM_UARTS; i++) { + object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_IMX_SERIAL); + } + + object_initialize_child(obj, "gpt", &s->gpt, TYPE_IMX31_GPT); + + for (i = 0; i < FSL_IMX31_NUM_EPITS; i++) { + object_initialize_child(obj, "epit[*]", &s->epit[i], TYPE_IMX_EPIT); + } + + for (i = 0; i < FSL_IMX31_NUM_I2CS; i++) { + object_initialize_child(obj, "i2c[*]", &s->i2c[i], TYPE_IMX_I2C); + } + + for (i = 0; i < FSL_IMX31_NUM_GPIOS; i++) { + object_initialize_child(obj, "gpio[*]", &s->gpio[i], TYPE_IMX_GPIO); + } + + object_initialize_child(obj, "wdt", &s->wdt, TYPE_IMX2_WDT); +} + +static void fsl_imx31_realize(DeviceState *dev, Error **errp) +{ + FslIMX31State *s = FSL_IMX31(dev); + uint16_t i; + Error *err = NULL; + + if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->avic), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->avic), 0, FSL_IMX31_AVIC_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->avic), 0, + qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->avic), 1, + qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ccm), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0, FSL_IMX31_CCM_ADDR); + + /* Initialize all UARTS */ + for (i = 0; i < FSL_IMX31_NUM_UARTS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } serial_table[FSL_IMX31_NUM_UARTS] = { + { FSL_IMX31_UART1_ADDR, FSL_IMX31_UART1_IRQ }, + { FSL_IMX31_UART2_ADDR, FSL_IMX31_UART2_IRQ }, + }; + + qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", serial_hd(i)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, serial_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + serial_table[i].irq)); + } + + s->gpt.ccm = IMX_CCM(&s->ccm); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpt), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt), 0, FSL_IMX31_GPT_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt), 0, + qdev_get_gpio_in(DEVICE(&s->avic), FSL_IMX31_GPT_IRQ)); + + /* Initialize all EPIT timers */ + for (i = 0; i < FSL_IMX31_NUM_EPITS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } epit_table[FSL_IMX31_NUM_EPITS] = { + { FSL_IMX31_EPIT1_ADDR, FSL_IMX31_EPIT1_IRQ }, + { FSL_IMX31_EPIT2_ADDR, FSL_IMX31_EPIT2_IRQ }, + }; + + s->epit[i].ccm = IMX_CCM(&s->ccm); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->epit[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->epit[i]), 0, epit_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + epit_table[i].irq)); + } + + /* Initialize all I2C */ + for (i = 0; i < FSL_IMX31_NUM_I2CS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } i2c_table[FSL_IMX31_NUM_I2CS] = { + { FSL_IMX31_I2C1_ADDR, FSL_IMX31_I2C1_IRQ }, + { FSL_IMX31_I2C2_ADDR, FSL_IMX31_I2C2_IRQ }, + { FSL_IMX31_I2C3_ADDR, FSL_IMX31_I2C3_IRQ } + }; + + /* Initialize the I2C */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c[i]), errp)) { + return; + } + /* Map I2C memory */ + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, i2c_table[i].addr); + /* Connect I2C IRQ to PIC */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + i2c_table[i].irq)); + } + + /* Initialize all GPIOs */ + for (i = 0; i < FSL_IMX31_NUM_GPIOS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } gpio_table[FSL_IMX31_NUM_GPIOS] = { + { FSL_IMX31_GPIO1_ADDR, FSL_IMX31_GPIO1_IRQ }, + { FSL_IMX31_GPIO2_ADDR, FSL_IMX31_GPIO2_IRQ }, + { FSL_IMX31_GPIO3_ADDR, FSL_IMX31_GPIO3_IRQ } + }; + + object_property_set_bool(OBJECT(&s->gpio[i]), "has-edge-sel", false, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, gpio_table[i].addr); + /* Connect GPIO IRQ to PIC */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0, + qdev_get_gpio_in(DEVICE(&s->avic), + gpio_table[i].irq)); + } + + /* Watchdog */ + sysbus_realize(SYS_BUS_DEVICE(&s->wdt), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt), 0, FSL_IMX31_WDT_ADDR); + + /* On a real system, the first 16k is a `secure boot rom' */ + memory_region_init_rom(&s->secure_rom, OBJECT(dev), "imx31.secure_rom", + FSL_IMX31_SECURE_ROM_SIZE, &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), FSL_IMX31_SECURE_ROM_ADDR, + &s->secure_rom); + + /* There is also a 16k ROM */ + memory_region_init_rom(&s->rom, OBJECT(dev), "imx31.rom", + FSL_IMX31_ROM_SIZE, &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), FSL_IMX31_ROM_ADDR, + &s->rom); + + /* initialize internal RAM (16 KB) */ + memory_region_init_ram(&s->iram, NULL, "imx31.iram", FSL_IMX31_IRAM_SIZE, + &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), FSL_IMX31_IRAM_ADDR, + &s->iram); + + /* internal RAM (16 KB) is aliased over 256 MB - 16 KB */ + memory_region_init_alias(&s->iram_alias, OBJECT(dev), "imx31.iram_alias", + &s->iram, 0, FSL_IMX31_IRAM_ALIAS_SIZE); + memory_region_add_subregion(get_system_memory(), FSL_IMX31_IRAM_ALIAS_ADDR, + &s->iram_alias); +} + +static void fsl_imx31_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = fsl_imx31_realize; + dc->desc = "i.MX31 SOC"; + /* + * Reason: uses serial_hds in realize and the kzm board does not + * support multiple CPUs + */ + dc->user_creatable = false; +} + +static const TypeInfo fsl_imx31_type_info = { + .name = TYPE_FSL_IMX31, + .parent = TYPE_DEVICE, + .instance_size = sizeof(FslIMX31State), + .instance_init = fsl_imx31_init, + .class_init = fsl_imx31_class_init, +}; + +static void fsl_imx31_register_types(void) +{ + type_register_static(&fsl_imx31_type_info); +} + +type_init(fsl_imx31_register_types) diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c new file mode 100644 index 000000000..00dafe3f6 --- /dev/null +++ b/hw/arm/fsl-imx6.c @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2015 Jean-Christophe Dubois + * + * i.MX6 SOC emulation. + * + * Based on hw/arm/fsl-imx31.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/fsl-imx6.h" +#include "hw/usb/imx-usb-phy.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "sysemu/sysemu.h" +#include "chardev/char.h" +#include "qemu/error-report.h" +#include "qemu/module.h" + +#define IMX6_ESDHC_CAPABILITIES 0x057834b4 + +#define NAME_SIZE 20 + +static void fsl_imx6_init(Object *obj) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + FslIMX6State *s = FSL_IMX6(obj); + char name[NAME_SIZE]; + int i; + + for (i = 0; i < MIN(ms->smp.cpus, FSL_IMX6_NUM_CPUS); i++) { + snprintf(name, NAME_SIZE, "cpu%d", i); + object_initialize_child(obj, name, &s->cpu[i], + ARM_CPU_TYPE_NAME("cortex-a9")); + } + + object_initialize_child(obj, "a9mpcore", &s->a9mpcore, TYPE_A9MPCORE_PRIV); + + object_initialize_child(obj, "ccm", &s->ccm, TYPE_IMX6_CCM); + + object_initialize_child(obj, "src", &s->src, TYPE_IMX6_SRC); + + for (i = 0; i < FSL_IMX6_NUM_UARTS; i++) { + snprintf(name, NAME_SIZE, "uart%d", i + 1); + object_initialize_child(obj, name, &s->uart[i], TYPE_IMX_SERIAL); + } + + object_initialize_child(obj, "gpt", &s->gpt, TYPE_IMX6_GPT); + + for (i = 0; i < FSL_IMX6_NUM_EPITS; i++) { + snprintf(name, NAME_SIZE, "epit%d", i + 1); + object_initialize_child(obj, name, &s->epit[i], TYPE_IMX_EPIT); + } + + for (i = 0; i < FSL_IMX6_NUM_I2CS; i++) { + snprintf(name, NAME_SIZE, "i2c%d", i + 1); + object_initialize_child(obj, name, &s->i2c[i], TYPE_IMX_I2C); + } + + for (i = 0; i < FSL_IMX6_NUM_GPIOS; i++) { + snprintf(name, NAME_SIZE, "gpio%d", i + 1); + object_initialize_child(obj, name, &s->gpio[i], TYPE_IMX_GPIO); + } + + for (i = 0; i < FSL_IMX6_NUM_ESDHCS; i++) { + snprintf(name, NAME_SIZE, "sdhc%d", i + 1); + object_initialize_child(obj, name, &s->esdhc[i], TYPE_IMX_USDHC); + } + + for (i = 0; i < FSL_IMX6_NUM_USB_PHYS; i++) { + snprintf(name, NAME_SIZE, "usbphy%d", i); + object_initialize_child(obj, name, &s->usbphy[i], TYPE_IMX_USBPHY); + } + for (i = 0; i < FSL_IMX6_NUM_USBS; i++) { + snprintf(name, NAME_SIZE, "usb%d", i); + object_initialize_child(obj, name, &s->usb[i], TYPE_CHIPIDEA); + } + + for (i = 0; i < FSL_IMX6_NUM_ECSPIS; i++) { + snprintf(name, NAME_SIZE, "spi%d", i + 1); + object_initialize_child(obj, name, &s->spi[i], TYPE_IMX_SPI); + } + for (i = 0; i < FSL_IMX6_NUM_WDTS; i++) { + snprintf(name, NAME_SIZE, "wdt%d", i); + object_initialize_child(obj, name, &s->wdt[i], TYPE_IMX2_WDT); + } + + + object_initialize_child(obj, "eth", &s->eth, TYPE_IMX_ENET); +} + +static void fsl_imx6_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + FslIMX6State *s = FSL_IMX6(dev); + uint16_t i; + Error *err = NULL; + unsigned int smp_cpus = ms->smp.cpus; + + if (smp_cpus > FSL_IMX6_NUM_CPUS) { + error_setg(errp, "%s: Only %d CPUs are supported (%d requested)", + TYPE_FSL_IMX6, FSL_IMX6_NUM_CPUS, smp_cpus); + return; + } + + for (i = 0; i < smp_cpus; i++) { + + /* On uniprocessor, the CBAR is set to 0 */ + if (smp_cpus > 1) { + object_property_set_int(OBJECT(&s->cpu[i]), "reset-cbar", + FSL_IMX6_A9MPCORE_ADDR, &error_abort); + } + + /* All CPU but CPU 0 start in power off mode */ + if (i) { + object_property_set_bool(OBJECT(&s->cpu[i]), "start-powered-off", + true, &error_abort); + } + + if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { + return; + } + } + + object_property_set_int(OBJECT(&s->a9mpcore), "num-cpu", smp_cpus, + &error_abort); + + object_property_set_int(OBJECT(&s->a9mpcore), "num-irq", + FSL_IMX6_MAX_IRQ + GIC_INTERNAL, &error_abort); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->a9mpcore), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->a9mpcore), 0, FSL_IMX6_A9MPCORE_ADDR); + + for (i = 0; i < smp_cpus; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i, + qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i + smp_cpus, + qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_FIQ)); + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ccm), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0, FSL_IMX6_CCM_ADDR); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->src), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->src), 0, FSL_IMX6_SRC_ADDR); + + /* Initialize all UARTs */ + for (i = 0; i < FSL_IMX6_NUM_UARTS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } serial_table[FSL_IMX6_NUM_UARTS] = { + { FSL_IMX6_UART1_ADDR, FSL_IMX6_UART1_IRQ }, + { FSL_IMX6_UART2_ADDR, FSL_IMX6_UART2_IRQ }, + { FSL_IMX6_UART3_ADDR, FSL_IMX6_UART3_IRQ }, + { FSL_IMX6_UART4_ADDR, FSL_IMX6_UART4_IRQ }, + { FSL_IMX6_UART5_ADDR, FSL_IMX6_UART5_IRQ }, + }; + + qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", serial_hd(i)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, serial_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + serial_table[i].irq)); + } + + s->gpt.ccm = IMX_CCM(&s->ccm); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpt), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt), 0, FSL_IMX6_GPT_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt), 0, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + FSL_IMX6_GPT_IRQ)); + + /* Initialize all EPIT timers */ + for (i = 0; i < FSL_IMX6_NUM_EPITS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } epit_table[FSL_IMX6_NUM_EPITS] = { + { FSL_IMX6_EPIT1_ADDR, FSL_IMX6_EPIT1_IRQ }, + { FSL_IMX6_EPIT2_ADDR, FSL_IMX6_EPIT2_IRQ }, + }; + + s->epit[i].ccm = IMX_CCM(&s->ccm); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->epit[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->epit[i]), 0, epit_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + epit_table[i].irq)); + } + + /* Initialize all I2C */ + for (i = 0; i < FSL_IMX6_NUM_I2CS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } i2c_table[FSL_IMX6_NUM_I2CS] = { + { FSL_IMX6_I2C1_ADDR, FSL_IMX6_I2C1_IRQ }, + { FSL_IMX6_I2C2_ADDR, FSL_IMX6_I2C2_IRQ }, + { FSL_IMX6_I2C3_ADDR, FSL_IMX6_I2C3_IRQ } + }; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, i2c_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + i2c_table[i].irq)); + } + + /* Initialize all GPIOs */ + for (i = 0; i < FSL_IMX6_NUM_GPIOS; i++) { + static const struct { + hwaddr addr; + unsigned int irq_low; + unsigned int irq_high; + } gpio_table[FSL_IMX6_NUM_GPIOS] = { + { + FSL_IMX6_GPIO1_ADDR, + FSL_IMX6_GPIO1_LOW_IRQ, + FSL_IMX6_GPIO1_HIGH_IRQ + }, + { + FSL_IMX6_GPIO2_ADDR, + FSL_IMX6_GPIO2_LOW_IRQ, + FSL_IMX6_GPIO2_HIGH_IRQ + }, + { + FSL_IMX6_GPIO3_ADDR, + FSL_IMX6_GPIO3_LOW_IRQ, + FSL_IMX6_GPIO3_HIGH_IRQ + }, + { + FSL_IMX6_GPIO4_ADDR, + FSL_IMX6_GPIO4_LOW_IRQ, + FSL_IMX6_GPIO4_HIGH_IRQ + }, + { + FSL_IMX6_GPIO5_ADDR, + FSL_IMX6_GPIO5_LOW_IRQ, + FSL_IMX6_GPIO5_HIGH_IRQ + }, + { + FSL_IMX6_GPIO6_ADDR, + FSL_IMX6_GPIO6_LOW_IRQ, + FSL_IMX6_GPIO6_HIGH_IRQ + }, + { + FSL_IMX6_GPIO7_ADDR, + FSL_IMX6_GPIO7_LOW_IRQ, + FSL_IMX6_GPIO7_HIGH_IRQ + }, + }; + + object_property_set_bool(OBJECT(&s->gpio[i]), "has-edge-sel", true, + &error_abort); + object_property_set_bool(OBJECT(&s->gpio[i]), "has-upper-pin-irq", + true, &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, gpio_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + gpio_table[i].irq_low)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + gpio_table[i].irq_high)); + } + + /* Initialize all SDHC */ + for (i = 0; i < FSL_IMX6_NUM_ESDHCS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } esdhc_table[FSL_IMX6_NUM_ESDHCS] = { + { FSL_IMX6_uSDHC1_ADDR, FSL_IMX6_uSDHC1_IRQ }, + { FSL_IMX6_uSDHC2_ADDR, FSL_IMX6_uSDHC2_IRQ }, + { FSL_IMX6_uSDHC3_ADDR, FSL_IMX6_uSDHC3_IRQ }, + { FSL_IMX6_uSDHC4_ADDR, FSL_IMX6_uSDHC4_IRQ }, + }; + + /* UHS-I SDIO3.0 SDR104 1.8V ADMA */ + object_property_set_uint(OBJECT(&s->esdhc[i]), "sd-spec-version", 3, + &error_abort); + object_property_set_uint(OBJECT(&s->esdhc[i]), "capareg", + IMX6_ESDHC_CAPABILITIES, &error_abort); + object_property_set_uint(OBJECT(&s->esdhc[i]), "vendor", + SDHCI_VENDOR_IMX, &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->esdhc[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->esdhc[i]), 0, esdhc_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->esdhc[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + esdhc_table[i].irq)); + } + + /* USB */ + for (i = 0; i < FSL_IMX6_NUM_USB_PHYS; i++) { + sysbus_realize(SYS_BUS_DEVICE(&s->usbphy[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usbphy[i]), 0, + FSL_IMX6_USBPHY1_ADDR + i * 0x1000); + } + for (i = 0; i < FSL_IMX6_NUM_USBS; i++) { + static const int FSL_IMX6_USBn_IRQ[] = { + FSL_IMX6_USB_OTG_IRQ, + FSL_IMX6_USB_HOST1_IRQ, + FSL_IMX6_USB_HOST2_IRQ, + FSL_IMX6_USB_HOST3_IRQ, + }; + + sysbus_realize(SYS_BUS_DEVICE(&s->usb[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, + FSL_IMX6_USBOH3_USB_ADDR + i * 0x200); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + FSL_IMX6_USBn_IRQ[i])); + } + + /* Initialize all ECSPI */ + for (i = 0; i < FSL_IMX6_NUM_ECSPIS; i++) { + static const struct { + hwaddr addr; + unsigned int irq; + } spi_table[FSL_IMX6_NUM_ECSPIS] = { + { FSL_IMX6_eCSPI1_ADDR, FSL_IMX6_ECSPI1_IRQ }, + { FSL_IMX6_eCSPI2_ADDR, FSL_IMX6_ECSPI2_IRQ }, + { FSL_IMX6_eCSPI3_ADDR, FSL_IMX6_ECSPI3_IRQ }, + { FSL_IMX6_eCSPI4_ADDR, FSL_IMX6_ECSPI4_IRQ }, + { FSL_IMX6_eCSPI5_ADDR, FSL_IMX6_ECSPI5_IRQ }, + }; + + /* Initialize the SPI */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, spi_table[i].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + spi_table[i].irq)); + } + + object_property_set_uint(OBJECT(&s->eth), "phy-num", s->phy_num, &err); + qdev_set_nic_properties(DEVICE(&s->eth), &nd_table[0]); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->eth), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth), 0, FSL_IMX6_ENET_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth), 0, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + FSL_IMX6_ENET_MAC_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth), 1, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + FSL_IMX6_ENET_MAC_1588_IRQ)); + + /* + * Watchdog + */ + for (i = 0; i < FSL_IMX6_NUM_WDTS; i++) { + static const hwaddr FSL_IMX6_WDOGn_ADDR[FSL_IMX6_NUM_WDTS] = { + FSL_IMX6_WDOG1_ADDR, + FSL_IMX6_WDOG2_ADDR, + }; + static const int FSL_IMX6_WDOGn_IRQ[FSL_IMX6_NUM_WDTS] = { + FSL_IMX6_WDOG1_IRQ, + FSL_IMX6_WDOG2_IRQ, + }; + + object_property_set_bool(OBJECT(&s->wdt[i]), "pretimeout-support", + true, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, FSL_IMX6_WDOGn_ADDR[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a9mpcore), + FSL_IMX6_WDOGn_IRQ[i])); + } + + /* ROM memory */ + memory_region_init_rom(&s->rom, OBJECT(dev), "imx6.rom", + FSL_IMX6_ROM_SIZE, &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), FSL_IMX6_ROM_ADDR, + &s->rom); + + /* CAAM memory */ + memory_region_init_rom(&s->caam, OBJECT(dev), "imx6.caam", + FSL_IMX6_CAAM_MEM_SIZE, &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), FSL_IMX6_CAAM_MEM_ADDR, + &s->caam); + + /* OCRAM memory */ + memory_region_init_ram(&s->ocram, NULL, "imx6.ocram", FSL_IMX6_OCRAM_SIZE, + &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(get_system_memory(), FSL_IMX6_OCRAM_ADDR, + &s->ocram); + + /* internal OCRAM (256 KB) is aliased over 1 MB */ + memory_region_init_alias(&s->ocram_alias, OBJECT(dev), "imx6.ocram_alias", + &s->ocram, 0, FSL_IMX6_OCRAM_ALIAS_SIZE); + memory_region_add_subregion(get_system_memory(), FSL_IMX6_OCRAM_ALIAS_ADDR, + &s->ocram_alias); +} + +static Property fsl_imx6_properties[] = { + DEFINE_PROP_UINT32("fec-phy-num", FslIMX6State, phy_num, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void fsl_imx6_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, fsl_imx6_properties); + dc->realize = fsl_imx6_realize; + dc->desc = "i.MX6 SOC"; + /* Reason: Uses serial_hd() in the realize() function */ + dc->user_creatable = false; +} + +static const TypeInfo fsl_imx6_type_info = { + .name = TYPE_FSL_IMX6, + .parent = TYPE_DEVICE, + .instance_size = sizeof(FslIMX6State), + .instance_init = fsl_imx6_init, + .class_init = fsl_imx6_class_init, +}; + +static void fsl_imx6_register_types(void) +{ + type_register_static(&fsl_imx6_type_info); +} + +type_init(fsl_imx6_register_types) diff --git a/hw/arm/fsl-imx6ul.c b/hw/arm/fsl-imx6ul.c new file mode 100644 index 000000000..1d1a708dd --- /dev/null +++ b/hw/arm/fsl-imx6ul.c @@ -0,0 +1,651 @@ +/* + * Copyright (c) 2018 Jean-Christophe Dubois + * + * i.MX6UL SOC emulation. + * + * Based on hw/arm/fsl-imx7.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/fsl-imx6ul.h" +#include "hw/misc/unimp.h" +#include "hw/usb/imx-usb-phy.h" +#include "hw/boards.h" +#include "sysemu/sysemu.h" +#include "qemu/error-report.h" +#include "qemu/module.h" + +#define NAME_SIZE 20 + +static void fsl_imx6ul_init(Object *obj) +{ + FslIMX6ULState *s = FSL_IMX6UL(obj); + char name[NAME_SIZE]; + int i; + + object_initialize_child(obj, "cpu0", &s->cpu, + ARM_CPU_TYPE_NAME("cortex-a7")); + + /* + * A7MPCORE + */ + object_initialize_child(obj, "a7mpcore", &s->a7mpcore, + TYPE_A15MPCORE_PRIV); + + /* + * CCM + */ + object_initialize_child(obj, "ccm", &s->ccm, TYPE_IMX6UL_CCM); + + /* + * SRC + */ + object_initialize_child(obj, "src", &s->src, TYPE_IMX6_SRC); + + /* + * GPCv2 + */ + object_initialize_child(obj, "gpcv2", &s->gpcv2, TYPE_IMX_GPCV2); + + /* + * SNVS + */ + object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS); + + /* + * GPR + */ + object_initialize_child(obj, "gpr", &s->gpr, TYPE_IMX7_GPR); + + /* + * GPIOs 1 to 5 + */ + for (i = 0; i < FSL_IMX6UL_NUM_GPIOS; i++) { + snprintf(name, NAME_SIZE, "gpio%d", i); + object_initialize_child(obj, name, &s->gpio[i], TYPE_IMX_GPIO); + } + + /* + * GPT 1, 2 + */ + for (i = 0; i < FSL_IMX6UL_NUM_GPTS; i++) { + snprintf(name, NAME_SIZE, "gpt%d", i); + object_initialize_child(obj, name, &s->gpt[i], TYPE_IMX7_GPT); + } + + /* + * EPIT 1, 2 + */ + for (i = 0; i < FSL_IMX6UL_NUM_EPITS; i++) { + snprintf(name, NAME_SIZE, "epit%d", i + 1); + object_initialize_child(obj, name, &s->epit[i], TYPE_IMX_EPIT); + } + + /* + * eCSPI + */ + for (i = 0; i < FSL_IMX6UL_NUM_ECSPIS; i++) { + snprintf(name, NAME_SIZE, "spi%d", i + 1); + object_initialize_child(obj, name, &s->spi[i], TYPE_IMX_SPI); + } + + /* + * I2C + */ + for (i = 0; i < FSL_IMX6UL_NUM_I2CS; i++) { + snprintf(name, NAME_SIZE, "i2c%d", i + 1); + object_initialize_child(obj, name, &s->i2c[i], TYPE_IMX_I2C); + } + + /* + * UART + */ + for (i = 0; i < FSL_IMX6UL_NUM_UARTS; i++) { + snprintf(name, NAME_SIZE, "uart%d", i); + object_initialize_child(obj, name, &s->uart[i], TYPE_IMX_SERIAL); + } + + /* + * Ethernet + */ + for (i = 0; i < FSL_IMX6UL_NUM_ETHS; i++) { + snprintf(name, NAME_SIZE, "eth%d", i); + object_initialize_child(obj, name, &s->eth[i], TYPE_IMX_ENET); + } + + /* USB */ + for (i = 0; i < FSL_IMX6UL_NUM_USB_PHYS; i++) { + snprintf(name, NAME_SIZE, "usbphy%d", i); + object_initialize_child(obj, name, &s->usbphy[i], TYPE_IMX_USBPHY); + } + for (i = 0; i < FSL_IMX6UL_NUM_USBS; i++) { + snprintf(name, NAME_SIZE, "usb%d", i); + object_initialize_child(obj, name, &s->usb[i], TYPE_CHIPIDEA); + } + + /* + * SDHCI + */ + for (i = 0; i < FSL_IMX6UL_NUM_USDHCS; i++) { + snprintf(name, NAME_SIZE, "usdhc%d", i); + object_initialize_child(obj, name, &s->usdhc[i], TYPE_IMX_USDHC); + } + + /* + * Watchdog + */ + for (i = 0; i < FSL_IMX6UL_NUM_WDTS; i++) { + snprintf(name, NAME_SIZE, "wdt%d", i); + object_initialize_child(obj, name, &s->wdt[i], TYPE_IMX2_WDT); + } +} + +static void fsl_imx6ul_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + FslIMX6ULState *s = FSL_IMX6UL(dev); + int i; + char name[NAME_SIZE]; + SysBusDevice *sbd; + DeviceState *d; + + if (ms->smp.cpus > 1) { + error_setg(errp, "%s: Only a single CPU is supported (%d requested)", + TYPE_FSL_IMX6UL, ms->smp.cpus); + return; + } + + object_property_set_int(OBJECT(&s->cpu), "psci-conduit", + QEMU_PSCI_CONDUIT_SMC, &error_abort); + qdev_realize(DEVICE(&s->cpu), NULL, &error_abort); + + /* + * A7MPCORE + */ + object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", 1, &error_abort); + object_property_set_int(OBJECT(&s->a7mpcore), "num-irq", + FSL_IMX6UL_MAX_IRQ + GIC_INTERNAL, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, FSL_IMX6UL_A7MPCORE_ADDR); + + sbd = SYS_BUS_DEVICE(&s->a7mpcore); + d = DEVICE(&s->cpu); + + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(d, ARM_CPU_IRQ)); + sysbus_connect_irq(sbd, 1, qdev_get_gpio_in(d, ARM_CPU_FIQ)); + sysbus_connect_irq(sbd, 2, qdev_get_gpio_in(d, ARM_CPU_VIRQ)); + sysbus_connect_irq(sbd, 3, qdev_get_gpio_in(d, ARM_CPU_VFIQ)); + + /* + * A7MPCORE DAP + */ + create_unimplemented_device("a7mpcore-dap", FSL_IMX6UL_A7MPCORE_DAP_ADDR, + 0x100000); + + /* + * GPT 1, 2 + */ + for (i = 0; i < FSL_IMX6UL_NUM_GPTS; i++) { + static const hwaddr FSL_IMX6UL_GPTn_ADDR[FSL_IMX6UL_NUM_GPTS] = { + FSL_IMX6UL_GPT1_ADDR, + FSL_IMX6UL_GPT2_ADDR, + }; + + static const int FSL_IMX6UL_GPTn_IRQ[FSL_IMX6UL_NUM_GPTS] = { + FSL_IMX6UL_GPT1_IRQ, + FSL_IMX6UL_GPT2_IRQ, + }; + + s->gpt[i].ccm = IMX_CCM(&s->ccm); + sysbus_realize(SYS_BUS_DEVICE(&s->gpt[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt[i]), 0, + FSL_IMX6UL_GPTn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_GPTn_IRQ[i])); + } + + /* + * EPIT 1, 2 + */ + for (i = 0; i < FSL_IMX6UL_NUM_EPITS; i++) { + static const hwaddr FSL_IMX6UL_EPITn_ADDR[FSL_IMX6UL_NUM_EPITS] = { + FSL_IMX6UL_EPIT1_ADDR, + FSL_IMX6UL_EPIT2_ADDR, + }; + + static const int FSL_IMX6UL_EPITn_IRQ[FSL_IMX6UL_NUM_EPITS] = { + FSL_IMX6UL_EPIT1_IRQ, + FSL_IMX6UL_EPIT2_IRQ, + }; + + s->epit[i].ccm = IMX_CCM(&s->ccm); + sysbus_realize(SYS_BUS_DEVICE(&s->epit[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->epit[i]), 0, + FSL_IMX6UL_EPITn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_EPITn_IRQ[i])); + } + + /* + * GPIO + */ + for (i = 0; i < FSL_IMX6UL_NUM_GPIOS; i++) { + static const hwaddr FSL_IMX6UL_GPIOn_ADDR[FSL_IMX6UL_NUM_GPIOS] = { + FSL_IMX6UL_GPIO1_ADDR, + FSL_IMX6UL_GPIO2_ADDR, + FSL_IMX6UL_GPIO3_ADDR, + FSL_IMX6UL_GPIO4_ADDR, + FSL_IMX6UL_GPIO5_ADDR, + }; + + static const int FSL_IMX6UL_GPIOn_LOW_IRQ[FSL_IMX6UL_NUM_GPIOS] = { + FSL_IMX6UL_GPIO1_LOW_IRQ, + FSL_IMX6UL_GPIO2_LOW_IRQ, + FSL_IMX6UL_GPIO3_LOW_IRQ, + FSL_IMX6UL_GPIO4_LOW_IRQ, + FSL_IMX6UL_GPIO5_LOW_IRQ, + }; + + static const int FSL_IMX6UL_GPIOn_HIGH_IRQ[FSL_IMX6UL_NUM_GPIOS] = { + FSL_IMX6UL_GPIO1_HIGH_IRQ, + FSL_IMX6UL_GPIO2_HIGH_IRQ, + FSL_IMX6UL_GPIO3_HIGH_IRQ, + FSL_IMX6UL_GPIO4_HIGH_IRQ, + FSL_IMX6UL_GPIO5_HIGH_IRQ, + }; + + sysbus_realize(SYS_BUS_DEVICE(&s->gpio[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, + FSL_IMX6UL_GPIOn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_GPIOn_LOW_IRQ[i])); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_GPIOn_HIGH_IRQ[i])); + } + + /* + * IOMUXC and IOMUXC_GPR + */ + for (i = 0; i < 1; i++) { + static const hwaddr FSL_IMX6UL_IOMUXCn_ADDR[FSL_IMX6UL_NUM_IOMUXCS] = { + FSL_IMX6UL_IOMUXC_ADDR, + FSL_IMX6UL_IOMUXC_GPR_ADDR, + }; + + snprintf(name, NAME_SIZE, "iomuxc%d", i); + create_unimplemented_device(name, FSL_IMX6UL_IOMUXCn_ADDR[i], 0x4000); + } + + /* + * CCM + */ + sysbus_realize(SYS_BUS_DEVICE(&s->ccm), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0, FSL_IMX6UL_CCM_ADDR); + + /* + * SRC + */ + sysbus_realize(SYS_BUS_DEVICE(&s->src), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->src), 0, FSL_IMX6UL_SRC_ADDR); + + /* + * GPCv2 + */ + sysbus_realize(SYS_BUS_DEVICE(&s->gpcv2), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpcv2), 0, FSL_IMX6UL_GPC_ADDR); + + /* Initialize all ECSPI */ + for (i = 0; i < FSL_IMX6UL_NUM_ECSPIS; i++) { + static const hwaddr FSL_IMX6UL_SPIn_ADDR[FSL_IMX6UL_NUM_ECSPIS] = { + FSL_IMX6UL_ECSPI1_ADDR, + FSL_IMX6UL_ECSPI2_ADDR, + FSL_IMX6UL_ECSPI3_ADDR, + FSL_IMX6UL_ECSPI4_ADDR, + }; + + static const int FSL_IMX6UL_SPIn_IRQ[FSL_IMX6UL_NUM_ECSPIS] = { + FSL_IMX6UL_ECSPI1_IRQ, + FSL_IMX6UL_ECSPI2_IRQ, + FSL_IMX6UL_ECSPI3_IRQ, + FSL_IMX6UL_ECSPI4_IRQ, + }; + + /* Initialize the SPI */ + sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, + FSL_IMX6UL_SPIn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_SPIn_IRQ[i])); + } + + /* + * I2C + */ + for (i = 0; i < FSL_IMX6UL_NUM_I2CS; i++) { + static const hwaddr FSL_IMX6UL_I2Cn_ADDR[FSL_IMX6UL_NUM_I2CS] = { + FSL_IMX6UL_I2C1_ADDR, + FSL_IMX6UL_I2C2_ADDR, + FSL_IMX6UL_I2C3_ADDR, + FSL_IMX6UL_I2C4_ADDR, + }; + + static const int FSL_IMX6UL_I2Cn_IRQ[FSL_IMX6UL_NUM_I2CS] = { + FSL_IMX6UL_I2C1_IRQ, + FSL_IMX6UL_I2C2_IRQ, + FSL_IMX6UL_I2C3_IRQ, + FSL_IMX6UL_I2C4_IRQ, + }; + + sysbus_realize(SYS_BUS_DEVICE(&s->i2c[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, FSL_IMX6UL_I2Cn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_I2Cn_IRQ[i])); + } + + /* + * UART + */ + for (i = 0; i < FSL_IMX6UL_NUM_UARTS; i++) { + static const hwaddr FSL_IMX6UL_UARTn_ADDR[FSL_IMX6UL_NUM_UARTS] = { + FSL_IMX6UL_UART1_ADDR, + FSL_IMX6UL_UART2_ADDR, + FSL_IMX6UL_UART3_ADDR, + FSL_IMX6UL_UART4_ADDR, + FSL_IMX6UL_UART5_ADDR, + FSL_IMX6UL_UART6_ADDR, + FSL_IMX6UL_UART7_ADDR, + FSL_IMX6UL_UART8_ADDR, + }; + + static const int FSL_IMX6UL_UARTn_IRQ[FSL_IMX6UL_NUM_UARTS] = { + FSL_IMX6UL_UART1_IRQ, + FSL_IMX6UL_UART2_IRQ, + FSL_IMX6UL_UART3_IRQ, + FSL_IMX6UL_UART4_IRQ, + FSL_IMX6UL_UART5_IRQ, + FSL_IMX6UL_UART6_IRQ, + FSL_IMX6UL_UART7_IRQ, + FSL_IMX6UL_UART8_IRQ, + }; + + qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", serial_hd(i)); + + sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, + FSL_IMX6UL_UARTn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_UARTn_IRQ[i])); + } + + /* + * Ethernet + */ + for (i = 0; i < FSL_IMX6UL_NUM_ETHS; i++) { + static const hwaddr FSL_IMX6UL_ENETn_ADDR[FSL_IMX6UL_NUM_ETHS] = { + FSL_IMX6UL_ENET1_ADDR, + FSL_IMX6UL_ENET2_ADDR, + }; + + static const int FSL_IMX6UL_ENETn_IRQ[FSL_IMX6UL_NUM_ETHS] = { + FSL_IMX6UL_ENET1_IRQ, + FSL_IMX6UL_ENET2_IRQ, + }; + + static const int FSL_IMX6UL_ENETn_TIMER_IRQ[FSL_IMX6UL_NUM_ETHS] = { + FSL_IMX6UL_ENET1_TIMER_IRQ, + FSL_IMX6UL_ENET2_TIMER_IRQ, + }; + + object_property_set_uint(OBJECT(&s->eth[i]), "phy-num", + s->phy_num[i], &error_abort); + object_property_set_uint(OBJECT(&s->eth[i]), "tx-ring-num", + FSL_IMX6UL_ETH_NUM_TX_RINGS, &error_abort); + qdev_set_nic_properties(DEVICE(&s->eth[i]), &nd_table[i]); + sysbus_realize(SYS_BUS_DEVICE(&s->eth[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth[i]), 0, + FSL_IMX6UL_ENETn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_ENETn_IRQ[i])); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 1, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_ENETn_TIMER_IRQ[i])); + } + + /* USB */ + for (i = 0; i < FSL_IMX6UL_NUM_USB_PHYS; i++) { + sysbus_realize(SYS_BUS_DEVICE(&s->usbphy[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usbphy[i]), 0, + FSL_IMX6UL_USBPHY1_ADDR + i * 0x1000); + } + + for (i = 0; i < FSL_IMX6UL_NUM_USBS; i++) { + static const int FSL_IMX6UL_USBn_IRQ[] = { + FSL_IMX6UL_USB1_IRQ, + FSL_IMX6UL_USB2_IRQ, + }; + sysbus_realize(SYS_BUS_DEVICE(&s->usb[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, + FSL_IMX6UL_USBO2_USB_ADDR + i * 0x200); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_USBn_IRQ[i])); + } + + /* + * USDHC + */ + for (i = 0; i < FSL_IMX6UL_NUM_USDHCS; i++) { + static const hwaddr FSL_IMX6UL_USDHCn_ADDR[FSL_IMX6UL_NUM_USDHCS] = { + FSL_IMX6UL_USDHC1_ADDR, + FSL_IMX6UL_USDHC2_ADDR, + }; + + static const int FSL_IMX6UL_USDHCn_IRQ[FSL_IMX6UL_NUM_USDHCS] = { + FSL_IMX6UL_USDHC1_IRQ, + FSL_IMX6UL_USDHC2_IRQ, + }; + + object_property_set_uint(OBJECT(&s->usdhc[i]), "vendor", + SDHCI_VENDOR_IMX, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->usdhc[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usdhc[i]), 0, + FSL_IMX6UL_USDHCn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usdhc[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_USDHCn_IRQ[i])); + } + + /* + * SNVS + */ + sysbus_realize(SYS_BUS_DEVICE(&s->snvs), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX6UL_SNVS_HP_ADDR); + + /* + * Watchdog + */ + for (i = 0; i < FSL_IMX6UL_NUM_WDTS; i++) { + static const hwaddr FSL_IMX6UL_WDOGn_ADDR[FSL_IMX6UL_NUM_WDTS] = { + FSL_IMX6UL_WDOG1_ADDR, + FSL_IMX6UL_WDOG2_ADDR, + FSL_IMX6UL_WDOG3_ADDR, + }; + static const int FSL_IMX6UL_WDOGn_IRQ[FSL_IMX6UL_NUM_WDTS] = { + FSL_IMX6UL_WDOG1_IRQ, + FSL_IMX6UL_WDOG2_IRQ, + FSL_IMX6UL_WDOG3_IRQ, + }; + + object_property_set_bool(OBJECT(&s->wdt[i]), "pretimeout-support", + true, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, + FSL_IMX6UL_WDOGn_ADDR[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX6UL_WDOGn_IRQ[i])); + } + + /* + * GPR + */ + sysbus_realize(SYS_BUS_DEVICE(&s->gpr), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpr), 0, FSL_IMX6UL_IOMUXC_GPR_ADDR); + + /* + * SDMA + */ + create_unimplemented_device("sdma", FSL_IMX6UL_SDMA_ADDR, 0x4000); + + /* + * SAI (Audio SSI (Synchronous Serial Interface)) + */ + create_unimplemented_device("sai1", FSL_IMX6UL_SAI1_ADDR, 0x4000); + create_unimplemented_device("sai2", FSL_IMX6UL_SAI2_ADDR, 0x4000); + create_unimplemented_device("sai3", FSL_IMX6UL_SAI3_ADDR, 0x4000); + + /* + * PWM + */ + create_unimplemented_device("pwm1", FSL_IMX6UL_PWM1_ADDR, 0x4000); + create_unimplemented_device("pwm2", FSL_IMX6UL_PWM2_ADDR, 0x4000); + create_unimplemented_device("pwm3", FSL_IMX6UL_PWM3_ADDR, 0x4000); + create_unimplemented_device("pwm4", FSL_IMX6UL_PWM4_ADDR, 0x4000); + + /* + * Audio ASRC (asynchronous sample rate converter) + */ + create_unimplemented_device("asrc", FSL_IMX6UL_ASRC_ADDR, 0x4000); + + /* + * CAN + */ + create_unimplemented_device("can1", FSL_IMX6UL_CAN1_ADDR, 0x4000); + create_unimplemented_device("can2", FSL_IMX6UL_CAN2_ADDR, 0x4000); + + /* + * APHB_DMA + */ + create_unimplemented_device("aphb_dma", FSL_IMX6UL_APBH_DMA_ADDR, + FSL_IMX6UL_APBH_DMA_SIZE); + + /* + * ADCs + */ + for (i = 0; i < FSL_IMX6UL_NUM_ADCS; i++) { + static const hwaddr FSL_IMX6UL_ADCn_ADDR[FSL_IMX6UL_NUM_ADCS] = { + FSL_IMX6UL_ADC1_ADDR, + FSL_IMX6UL_ADC2_ADDR, + }; + + snprintf(name, NAME_SIZE, "adc%d", i); + create_unimplemented_device(name, FSL_IMX6UL_ADCn_ADDR[i], 0x4000); + } + + /* + * LCD + */ + create_unimplemented_device("lcdif", FSL_IMX6UL_LCDIF_ADDR, 0x4000); + + /* + * ROM memory + */ + memory_region_init_rom(&s->rom, OBJECT(dev), "imx6ul.rom", + FSL_IMX6UL_ROM_SIZE, &error_abort); + memory_region_add_subregion(get_system_memory(), FSL_IMX6UL_ROM_ADDR, + &s->rom); + + /* + * CAAM memory + */ + memory_region_init_rom(&s->caam, OBJECT(dev), "imx6ul.caam", + FSL_IMX6UL_CAAM_MEM_SIZE, &error_abort); + memory_region_add_subregion(get_system_memory(), FSL_IMX6UL_CAAM_MEM_ADDR, + &s->caam); + + /* + * OCRAM memory + */ + memory_region_init_ram(&s->ocram, NULL, "imx6ul.ocram", + FSL_IMX6UL_OCRAM_MEM_SIZE, + &error_abort); + memory_region_add_subregion(get_system_memory(), FSL_IMX6UL_OCRAM_MEM_ADDR, + &s->ocram); + + /* + * internal OCRAM (128 KB) is aliased over 512 KB + */ + memory_region_init_alias(&s->ocram_alias, OBJECT(dev), + "imx6ul.ocram_alias", &s->ocram, 0, + FSL_IMX6UL_OCRAM_ALIAS_SIZE); + memory_region_add_subregion(get_system_memory(), + FSL_IMX6UL_OCRAM_ALIAS_ADDR, &s->ocram_alias); +} + +static Property fsl_imx6ul_properties[] = { + DEFINE_PROP_UINT32("fec1-phy-num", FslIMX6ULState, phy_num[0], 0), + DEFINE_PROP_UINT32("fec2-phy-num", FslIMX6ULState, phy_num[1], 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void fsl_imx6ul_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, fsl_imx6ul_properties); + dc->realize = fsl_imx6ul_realize; + dc->desc = "i.MX6UL SOC"; + /* Reason: Uses serial_hds and nd_table in realize() directly */ + dc->user_creatable = false; +} + +static const TypeInfo fsl_imx6ul_type_info = { + .name = TYPE_FSL_IMX6UL, + .parent = TYPE_DEVICE, + .instance_size = sizeof(FslIMX6ULState), + .instance_init = fsl_imx6ul_init, + .class_init = fsl_imx6ul_class_init, +}; + +static void fsl_imx6ul_register_types(void) +{ + type_register_static(&fsl_imx6ul_type_info); +} +type_init(fsl_imx6ul_register_types) diff --git a/hw/arm/fsl-imx7.c b/hw/arm/fsl-imx7.c new file mode 100644 index 000000000..149885f2b --- /dev/null +++ b/hw/arm/fsl-imx7.c @@ -0,0 +1,592 @@ +/* + * Copyright (c) 2018, Impinj, Inc. + * + * i.MX7 SoC definitions + * + * Author: Andrey Smirnov + * + * Based on hw/arm/fsl-imx6.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/fsl-imx7.h" +#include "hw/misc/unimp.h" +#include "hw/boards.h" +#include "sysemu/sysemu.h" +#include "qemu/error-report.h" +#include "qemu/module.h" + +#define NAME_SIZE 20 + +static void fsl_imx7_init(Object *obj) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + FslIMX7State *s = FSL_IMX7(obj); + char name[NAME_SIZE]; + int i; + + for (i = 0; i < MIN(ms->smp.cpus, FSL_IMX7_NUM_CPUS); i++) { + snprintf(name, NAME_SIZE, "cpu%d", i); + object_initialize_child(obj, name, &s->cpu[i], + ARM_CPU_TYPE_NAME("cortex-a7")); + } + + /* + * A7MPCORE + */ + object_initialize_child(obj, "a7mpcore", &s->a7mpcore, + TYPE_A15MPCORE_PRIV); + + /* + * GPIOs 1 to 7 + */ + for (i = 0; i < FSL_IMX7_NUM_GPIOS; i++) { + snprintf(name, NAME_SIZE, "gpio%d", i); + object_initialize_child(obj, name, &s->gpio[i], TYPE_IMX_GPIO); + } + + /* + * GPT1, 2, 3, 4 + */ + for (i = 0; i < FSL_IMX7_NUM_GPTS; i++) { + snprintf(name, NAME_SIZE, "gpt%d", i); + object_initialize_child(obj, name, &s->gpt[i], TYPE_IMX7_GPT); + } + + /* + * CCM + */ + object_initialize_child(obj, "ccm", &s->ccm, TYPE_IMX7_CCM); + + /* + * Analog + */ + object_initialize_child(obj, "analog", &s->analog, TYPE_IMX7_ANALOG); + + /* + * GPCv2 + */ + object_initialize_child(obj, "gpcv2", &s->gpcv2, TYPE_IMX_GPCV2); + + for (i = 0; i < FSL_IMX7_NUM_ECSPIS; i++) { + snprintf(name, NAME_SIZE, "spi%d", i + 1); + object_initialize_child(obj, name, &s->spi[i], TYPE_IMX_SPI); + } + + + for (i = 0; i < FSL_IMX7_NUM_I2CS; i++) { + snprintf(name, NAME_SIZE, "i2c%d", i + 1); + object_initialize_child(obj, name, &s->i2c[i], TYPE_IMX_I2C); + } + + /* + * UART + */ + for (i = 0; i < FSL_IMX7_NUM_UARTS; i++) { + snprintf(name, NAME_SIZE, "uart%d", i); + object_initialize_child(obj, name, &s->uart[i], TYPE_IMX_SERIAL); + } + + /* + * Ethernet + */ + for (i = 0; i < FSL_IMX7_NUM_ETHS; i++) { + snprintf(name, NAME_SIZE, "eth%d", i); + object_initialize_child(obj, name, &s->eth[i], TYPE_IMX_ENET); + } + + /* + * SDHCI + */ + for (i = 0; i < FSL_IMX7_NUM_USDHCS; i++) { + snprintf(name, NAME_SIZE, "usdhc%d", i); + object_initialize_child(obj, name, &s->usdhc[i], TYPE_IMX_USDHC); + } + + /* + * SNVS + */ + object_initialize_child(obj, "snvs", &s->snvs, TYPE_IMX7_SNVS); + + /* + * Watchdog + */ + for (i = 0; i < FSL_IMX7_NUM_WDTS; i++) { + snprintf(name, NAME_SIZE, "wdt%d", i); + object_initialize_child(obj, name, &s->wdt[i], TYPE_IMX2_WDT); + } + + /* + * GPR + */ + object_initialize_child(obj, "gpr", &s->gpr, TYPE_IMX7_GPR); + + object_initialize_child(obj, "pcie", &s->pcie, TYPE_DESIGNWARE_PCIE_HOST); + + for (i = 0; i < FSL_IMX7_NUM_USBS; i++) { + snprintf(name, NAME_SIZE, "usb%d", i); + object_initialize_child(obj, name, &s->usb[i], TYPE_CHIPIDEA); + } +} + +static void fsl_imx7_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + FslIMX7State *s = FSL_IMX7(dev); + Object *o; + int i; + qemu_irq irq; + char name[NAME_SIZE]; + unsigned int smp_cpus = ms->smp.cpus; + + if (smp_cpus > FSL_IMX7_NUM_CPUS) { + error_setg(errp, "%s: Only %d CPUs are supported (%d requested)", + TYPE_FSL_IMX7, FSL_IMX7_NUM_CPUS, smp_cpus); + return; + } + + for (i = 0; i < smp_cpus; i++) { + o = OBJECT(&s->cpu[i]); + + object_property_set_int(o, "psci-conduit", QEMU_PSCI_CONDUIT_SMC, + &error_abort); + + /* On uniprocessor, the CBAR is set to 0 */ + if (smp_cpus > 1) { + object_property_set_int(o, "reset-cbar", FSL_IMX7_A7MPCORE_ADDR, + &error_abort); + } + + if (i) { + /* Secondary CPUs start in PSCI powered-down state */ + object_property_set_bool(o, "start-powered-off", true, + &error_abort); + } + + qdev_realize(DEVICE(o), NULL, &error_abort); + } + + /* + * A7MPCORE + */ + object_property_set_int(OBJECT(&s->a7mpcore), "num-cpu", smp_cpus, + &error_abort); + object_property_set_int(OBJECT(&s->a7mpcore), "num-irq", + FSL_IMX7_MAX_IRQ + GIC_INTERNAL, &error_abort); + + sysbus_realize(SYS_BUS_DEVICE(&s->a7mpcore), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->a7mpcore), 0, FSL_IMX7_A7MPCORE_ADDR); + + for (i = 0; i < smp_cpus; i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->a7mpcore); + DeviceState *d = DEVICE(qemu_get_cpu(i)); + + irq = qdev_get_gpio_in(d, ARM_CPU_IRQ); + sysbus_connect_irq(sbd, i, irq); + irq = qdev_get_gpio_in(d, ARM_CPU_FIQ); + sysbus_connect_irq(sbd, i + smp_cpus, irq); + irq = qdev_get_gpio_in(d, ARM_CPU_VIRQ); + sysbus_connect_irq(sbd, i + 2 * smp_cpus, irq); + irq = qdev_get_gpio_in(d, ARM_CPU_VFIQ); + sysbus_connect_irq(sbd, i + 3 * smp_cpus, irq); + } + + /* + * A7MPCORE DAP + */ + create_unimplemented_device("a7mpcore-dap", FSL_IMX7_A7MPCORE_DAP_ADDR, + 0x100000); + + /* + * GPT1, 2, 3, 4 + */ + for (i = 0; i < FSL_IMX7_NUM_GPTS; i++) { + static const hwaddr FSL_IMX7_GPTn_ADDR[FSL_IMX7_NUM_GPTS] = { + FSL_IMX7_GPT1_ADDR, + FSL_IMX7_GPT2_ADDR, + FSL_IMX7_GPT3_ADDR, + FSL_IMX7_GPT4_ADDR, + }; + + s->gpt[i].ccm = IMX_CCM(&s->ccm); + sysbus_realize(SYS_BUS_DEVICE(&s->gpt[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt[i]), 0, FSL_IMX7_GPTn_ADDR[i]); + } + + for (i = 0; i < FSL_IMX7_NUM_GPIOS; i++) { + static const hwaddr FSL_IMX7_GPIOn_ADDR[FSL_IMX7_NUM_GPIOS] = { + FSL_IMX7_GPIO1_ADDR, + FSL_IMX7_GPIO2_ADDR, + FSL_IMX7_GPIO3_ADDR, + FSL_IMX7_GPIO4_ADDR, + FSL_IMX7_GPIO5_ADDR, + FSL_IMX7_GPIO6_ADDR, + FSL_IMX7_GPIO7_ADDR, + }; + + sysbus_realize(SYS_BUS_DEVICE(&s->gpio[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, FSL_IMX7_GPIOn_ADDR[i]); + } + + /* + * IOMUXC and IOMUXC_LPSR + */ + for (i = 0; i < FSL_IMX7_NUM_IOMUXCS; i++) { + static const hwaddr FSL_IMX7_IOMUXCn_ADDR[FSL_IMX7_NUM_IOMUXCS] = { + FSL_IMX7_IOMUXC_ADDR, + FSL_IMX7_IOMUXC_LPSR_ADDR, + }; + + snprintf(name, NAME_SIZE, "iomuxc%d", i); + create_unimplemented_device(name, FSL_IMX7_IOMUXCn_ADDR[i], + FSL_IMX7_IOMUXCn_SIZE); + } + + /* + * CCM + */ + sysbus_realize(SYS_BUS_DEVICE(&s->ccm), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0, FSL_IMX7_CCM_ADDR); + + /* + * Analog + */ + sysbus_realize(SYS_BUS_DEVICE(&s->analog), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->analog), 0, FSL_IMX7_ANALOG_ADDR); + + /* + * GPCv2 + */ + sysbus_realize(SYS_BUS_DEVICE(&s->gpcv2), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpcv2), 0, FSL_IMX7_GPC_ADDR); + + /* Initialize all ECSPI */ + for (i = 0; i < FSL_IMX7_NUM_ECSPIS; i++) { + static const hwaddr FSL_IMX7_SPIn_ADDR[FSL_IMX7_NUM_ECSPIS] = { + FSL_IMX7_ECSPI1_ADDR, + FSL_IMX7_ECSPI2_ADDR, + FSL_IMX7_ECSPI3_ADDR, + FSL_IMX7_ECSPI4_ADDR, + }; + + static const int FSL_IMX7_SPIn_IRQ[FSL_IMX7_NUM_ECSPIS] = { + FSL_IMX7_ECSPI1_IRQ, + FSL_IMX7_ECSPI2_IRQ, + FSL_IMX7_ECSPI3_IRQ, + FSL_IMX7_ECSPI4_IRQ, + }; + + /* Initialize the SPI */ + sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, + FSL_IMX7_SPIn_ADDR[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX7_SPIn_IRQ[i])); + } + + for (i = 0; i < FSL_IMX7_NUM_I2CS; i++) { + static const hwaddr FSL_IMX7_I2Cn_ADDR[FSL_IMX7_NUM_I2CS] = { + FSL_IMX7_I2C1_ADDR, + FSL_IMX7_I2C2_ADDR, + FSL_IMX7_I2C3_ADDR, + FSL_IMX7_I2C4_ADDR, + }; + + static const int FSL_IMX7_I2Cn_IRQ[FSL_IMX7_NUM_I2CS] = { + FSL_IMX7_I2C1_IRQ, + FSL_IMX7_I2C2_IRQ, + FSL_IMX7_I2C3_IRQ, + FSL_IMX7_I2C4_IRQ, + }; + + sysbus_realize(SYS_BUS_DEVICE(&s->i2c[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, FSL_IMX7_I2Cn_ADDR[i]); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX7_I2Cn_IRQ[i])); + } + + /* + * UART + */ + for (i = 0; i < FSL_IMX7_NUM_UARTS; i++) { + static const hwaddr FSL_IMX7_UARTn_ADDR[FSL_IMX7_NUM_UARTS] = { + FSL_IMX7_UART1_ADDR, + FSL_IMX7_UART2_ADDR, + FSL_IMX7_UART3_ADDR, + FSL_IMX7_UART4_ADDR, + FSL_IMX7_UART5_ADDR, + FSL_IMX7_UART6_ADDR, + FSL_IMX7_UART7_ADDR, + }; + + static const int FSL_IMX7_UARTn_IRQ[FSL_IMX7_NUM_UARTS] = { + FSL_IMX7_UART1_IRQ, + FSL_IMX7_UART2_IRQ, + FSL_IMX7_UART3_IRQ, + FSL_IMX7_UART4_IRQ, + FSL_IMX7_UART5_IRQ, + FSL_IMX7_UART6_IRQ, + FSL_IMX7_UART7_IRQ, + }; + + + qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", serial_hd(i)); + + sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, FSL_IMX7_UARTn_ADDR[i]); + + irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_UARTn_IRQ[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, irq); + } + + /* + * Ethernet + */ + for (i = 0; i < FSL_IMX7_NUM_ETHS; i++) { + static const hwaddr FSL_IMX7_ENETn_ADDR[FSL_IMX7_NUM_ETHS] = { + FSL_IMX7_ENET1_ADDR, + FSL_IMX7_ENET2_ADDR, + }; + + object_property_set_uint(OBJECT(&s->eth[i]), "phy-num", + s->phy_num[i], &error_abort); + object_property_set_uint(OBJECT(&s->eth[i]), "tx-ring-num", + FSL_IMX7_ETH_NUM_TX_RINGS, &error_abort); + qdev_set_nic_properties(DEVICE(&s->eth[i]), &nd_table[i]); + sysbus_realize(SYS_BUS_DEVICE(&s->eth[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth[i]), 0, FSL_IMX7_ENETn_ADDR[i]); + + irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_ENET_IRQ(i, 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 0, irq); + irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_ENET_IRQ(i, 3)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth[i]), 1, irq); + } + + /* + * USDHC + */ + for (i = 0; i < FSL_IMX7_NUM_USDHCS; i++) { + static const hwaddr FSL_IMX7_USDHCn_ADDR[FSL_IMX7_NUM_USDHCS] = { + FSL_IMX7_USDHC1_ADDR, + FSL_IMX7_USDHC2_ADDR, + FSL_IMX7_USDHC3_ADDR, + }; + + static const int FSL_IMX7_USDHCn_IRQ[FSL_IMX7_NUM_USDHCS] = { + FSL_IMX7_USDHC1_IRQ, + FSL_IMX7_USDHC2_IRQ, + FSL_IMX7_USDHC3_IRQ, + }; + + object_property_set_uint(OBJECT(&s->usdhc[i]), "vendor", + SDHCI_VENDOR_IMX, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->usdhc[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usdhc[i]), 0, + FSL_IMX7_USDHCn_ADDR[i]); + + irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_USDHCn_IRQ[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usdhc[i]), 0, irq); + } + + /* + * SNVS + */ + sysbus_realize(SYS_BUS_DEVICE(&s->snvs), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->snvs), 0, FSL_IMX7_SNVS_ADDR); + + /* + * SRC + */ + create_unimplemented_device("src", FSL_IMX7_SRC_ADDR, FSL_IMX7_SRC_SIZE); + + /* + * Watchdog + */ + for (i = 0; i < FSL_IMX7_NUM_WDTS; i++) { + static const hwaddr FSL_IMX7_WDOGn_ADDR[FSL_IMX7_NUM_WDTS] = { + FSL_IMX7_WDOG1_ADDR, + FSL_IMX7_WDOG2_ADDR, + FSL_IMX7_WDOG3_ADDR, + FSL_IMX7_WDOG4_ADDR, + }; + static const int FSL_IMX7_WDOGn_IRQ[FSL_IMX7_NUM_WDTS] = { + FSL_IMX7_WDOG1_IRQ, + FSL_IMX7_WDOG2_IRQ, + FSL_IMX7_WDOG3_IRQ, + FSL_IMX7_WDOG4_IRQ, + }; + + object_property_set_bool(OBJECT(&s->wdt[i]), "pretimeout-support", + true, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), &error_abort); + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt[i]), 0, FSL_IMX7_WDOGn_ADDR[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->wdt[i]), 0, + qdev_get_gpio_in(DEVICE(&s->a7mpcore), + FSL_IMX7_WDOGn_IRQ[i])); + } + + /* + * SDMA + */ + create_unimplemented_device("sdma", FSL_IMX7_SDMA_ADDR, FSL_IMX7_SDMA_SIZE); + + /* + * CAAM + */ + create_unimplemented_device("caam", FSL_IMX7_CAAM_ADDR, FSL_IMX7_CAAM_SIZE); + + /* + * PWM + */ + create_unimplemented_device("pwm1", FSL_IMX7_PWM1_ADDR, FSL_IMX7_PWMn_SIZE); + create_unimplemented_device("pwm2", FSL_IMX7_PWM2_ADDR, FSL_IMX7_PWMn_SIZE); + create_unimplemented_device("pwm3", FSL_IMX7_PWM3_ADDR, FSL_IMX7_PWMn_SIZE); + create_unimplemented_device("pwm4", FSL_IMX7_PWM4_ADDR, FSL_IMX7_PWMn_SIZE); + + /* + * CAN + */ + create_unimplemented_device("can1", FSL_IMX7_CAN1_ADDR, FSL_IMX7_CANn_SIZE); + create_unimplemented_device("can2", FSL_IMX7_CAN2_ADDR, FSL_IMX7_CANn_SIZE); + + /* + * SAI (Audio SSI (Synchronous Serial Interface)) + */ + create_unimplemented_device("sai1", FSL_IMX7_SAI1_ADDR, FSL_IMX7_SAIn_SIZE); + create_unimplemented_device("sai2", FSL_IMX7_SAI2_ADDR, FSL_IMX7_SAIn_SIZE); + create_unimplemented_device("sai2", FSL_IMX7_SAI3_ADDR, FSL_IMX7_SAIn_SIZE); + + /* + * OCOTP + */ + create_unimplemented_device("ocotp", FSL_IMX7_OCOTP_ADDR, + FSL_IMX7_OCOTP_SIZE); + + sysbus_realize(SYS_BUS_DEVICE(&s->gpr), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpr), 0, FSL_IMX7_GPR_ADDR); + + sysbus_realize(SYS_BUS_DEVICE(&s->pcie), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->pcie), 0, FSL_IMX7_PCIE_REG_ADDR); + + irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTA_IRQ); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 0, irq); + irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTB_IRQ); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 1, irq); + irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTC_IRQ); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 2, irq); + irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_PCI_INTD_IRQ); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie), 3, irq); + + + for (i = 0; i < FSL_IMX7_NUM_USBS; i++) { + static const hwaddr FSL_IMX7_USBMISCn_ADDR[FSL_IMX7_NUM_USBS] = { + FSL_IMX7_USBMISC1_ADDR, + FSL_IMX7_USBMISC2_ADDR, + FSL_IMX7_USBMISC3_ADDR, + }; + + static const hwaddr FSL_IMX7_USBn_ADDR[FSL_IMX7_NUM_USBS] = { + FSL_IMX7_USB1_ADDR, + FSL_IMX7_USB2_ADDR, + FSL_IMX7_USB3_ADDR, + }; + + static const int FSL_IMX7_USBn_IRQ[FSL_IMX7_NUM_USBS] = { + FSL_IMX7_USB1_IRQ, + FSL_IMX7_USB2_IRQ, + FSL_IMX7_USB3_IRQ, + }; + + sysbus_realize(SYS_BUS_DEVICE(&s->usb[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->usb[i]), 0, + FSL_IMX7_USBn_ADDR[i]); + + irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), FSL_IMX7_USBn_IRQ[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->usb[i]), 0, irq); + + snprintf(name, NAME_SIZE, "usbmisc%d", i); + create_unimplemented_device(name, FSL_IMX7_USBMISCn_ADDR[i], + FSL_IMX7_USBMISCn_SIZE); + } + + /* + * ADCs + */ + for (i = 0; i < FSL_IMX7_NUM_ADCS; i++) { + static const hwaddr FSL_IMX7_ADCn_ADDR[FSL_IMX7_NUM_ADCS] = { + FSL_IMX7_ADC1_ADDR, + FSL_IMX7_ADC2_ADDR, + }; + + snprintf(name, NAME_SIZE, "adc%d", i); + create_unimplemented_device(name, FSL_IMX7_ADCn_ADDR[i], + FSL_IMX7_ADCn_SIZE); + } + + /* + * LCD + */ + create_unimplemented_device("lcdif", FSL_IMX7_LCDIF_ADDR, + FSL_IMX7_LCDIF_SIZE); + + /* + * DMA APBH + */ + create_unimplemented_device("dma-apbh", FSL_IMX7_DMA_APBH_ADDR, + FSL_IMX7_DMA_APBH_SIZE); + /* + * PCIe PHY + */ + create_unimplemented_device("pcie-phy", FSL_IMX7_PCIE_PHY_ADDR, + FSL_IMX7_PCIE_PHY_SIZE); +} + +static Property fsl_imx7_properties[] = { + DEFINE_PROP_UINT32("fec1-phy-num", FslIMX7State, phy_num[0], 0), + DEFINE_PROP_UINT32("fec2-phy-num", FslIMX7State, phy_num[1], 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void fsl_imx7_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, fsl_imx7_properties); + dc->realize = fsl_imx7_realize; + + /* Reason: Uses serial_hds and nd_table in realize() directly */ + dc->user_creatable = false; + dc->desc = "i.MX7 SOC"; +} + +static const TypeInfo fsl_imx7_type_info = { + .name = TYPE_FSL_IMX7, + .parent = TYPE_DEVICE, + .instance_size = sizeof(FslIMX7State), + .instance_init = fsl_imx7_init, + .class_init = fsl_imx7_class_init, +}; + +static void fsl_imx7_register_types(void) +{ + type_register_static(&fsl_imx7_type_info); +} +type_init(fsl_imx7_register_types) diff --git a/hw/arm/gumstix.c b/hw/arm/gumstix.c new file mode 100644 index 000000000..3a4bc332c --- /dev/null +++ b/hw/arm/gumstix.c @@ -0,0 +1,147 @@ +/* + * Gumstix Platforms + * + * Copyright (c) 2007 by Thorsten Zitterell + * + * Code based on spitz platform by Andrzej Zaborowski + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +/* + * Example usage: + * + * connex: + * ======= + * create image: + * # dd of=flash bs=1k count=16k if=/dev/zero + * # dd of=flash bs=1k conv=notrunc if=u-boot.bin + * # dd of=flash bs=1k conv=notrunc seek=256 if=rootfs.arm_nofpu.jffs2 + * start it: + * # qemu-system-arm -M connex -pflash flash -monitor null -nographic + * + * verdex: + * ======= + * create image: + * # dd of=flash bs=1k count=32k if=/dev/zero + * # dd of=flash bs=1k conv=notrunc if=u-boot.bin + * # dd of=flash bs=1k conv=notrunc seek=256 if=rootfs.arm_nofpu.jffs2 + * # dd of=flash bs=1k conv=notrunc seek=31744 if=uImage + * start it: + * # qemu-system-arm -M verdex -pflash flash -monitor null -nographic -m 289 + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "hw/arm/pxa.h" +#include "net/net.h" +#include "hw/block/flash.h" +#include "hw/net/smc91c111.h" +#include "hw/boards.h" +#include "exec/address-spaces.h" +#include "sysemu/qtest.h" +#include "cpu.h" + +static const int sector_len = 128 * 1024; + +static void connex_init(MachineState *machine) +{ + PXA2xxState *cpu; + DriveInfo *dinfo; + MemoryRegion *address_space_mem = get_system_memory(); + + uint32_t connex_rom = 0x01000000; + uint32_t connex_ram = 0x04000000; + + cpu = pxa255_init(address_space_mem, connex_ram); + + dinfo = drive_get(IF_PFLASH, 0, 0); + if (!dinfo && !qtest_enabled()) { + error_report("A flash image must be given with the " + "'pflash' parameter"); + exit(1); + } + + if (!pflash_cfi01_register(0x00000000, "connext.rom", connex_rom, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + sector_len, 2, 0, 0, 0, 0, 0)) { + error_report("Error registering flash memory"); + exit(1); + } + + /* Interrupt line of NIC is connected to GPIO line 36 */ + smc91c111_init(&nd_table[0], 0x04000300, + qdev_get_gpio_in(cpu->gpio, 36)); +} + +static void verdex_init(MachineState *machine) +{ + PXA2xxState *cpu; + DriveInfo *dinfo; + MemoryRegion *address_space_mem = get_system_memory(); + + uint32_t verdex_rom = 0x02000000; + uint32_t verdex_ram = 0x10000000; + + cpu = pxa270_init(address_space_mem, verdex_ram, machine->cpu_type); + + dinfo = drive_get(IF_PFLASH, 0, 0); + if (!dinfo && !qtest_enabled()) { + error_report("A flash image must be given with the " + "'pflash' parameter"); + exit(1); + } + + if (!pflash_cfi01_register(0x00000000, "verdex.rom", verdex_rom, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + sector_len, 2, 0, 0, 0, 0, 0)) { + error_report("Error registering flash memory"); + exit(1); + } + + /* Interrupt line of NIC is connected to GPIO line 99 */ + smc91c111_init(&nd_table[0], 0x04000300, + qdev_get_gpio_in(cpu->gpio, 99)); +} + +static void connex_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Gumstix Connex (PXA255)"; + mc->init = connex_init; + mc->ignore_memory_transaction_failures = true; +} + +static const TypeInfo connex_type = { + .name = MACHINE_TYPE_NAME("connex"), + .parent = TYPE_MACHINE, + .class_init = connex_class_init, +}; + +static void verdex_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Gumstix Verdex (PXA270)"; + mc->init = verdex_init; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0"); +} + +static const TypeInfo verdex_type = { + .name = MACHINE_TYPE_NAME("verdex"), + .parent = TYPE_MACHINE, + .class_init = verdex_class_init, +}; + +static void gumstix_machine_init(void) +{ + type_register_static(&connex_type); + type_register_static(&verdex_type); +} + +type_init(gumstix_machine_init) diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c new file mode 100644 index 000000000..c3cb315db --- /dev/null +++ b/hw/arm/highbank.c @@ -0,0 +1,459 @@ +/* + * Calxeda Highbank SoC emulation + * + * Copyright (c) 2010-2012 Calxeda + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/arm/boot.h" +#include "hw/loader.h" +#include "net/net.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "qemu/error-report.h" +#include "hw/char/pl011.h" +#include "hw/ide/ahci.h" +#include "hw/cpu/a9mpcore.h" +#include "hw/cpu/a15mpcore.h" +#include "qemu/log.h" +#include "qom/object.h" +#include "cpu.h" + +#define SMP_BOOT_ADDR 0x100 +#define SMP_BOOT_REG 0x40 +#define MPCORE_PERIPHBASE 0xfff10000 + +#define MVBAR_ADDR 0x200 +#define BOARD_SETUP_ADDR (MVBAR_ADDR + 8 * sizeof(uint32_t)) + +#define NIRQ_GIC 160 + +/* Board init. */ + +static void hb_write_board_setup(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + arm_write_secure_board_setup_dummy_smc(cpu, info, MVBAR_ADDR); +} + +static void hb_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info) +{ + int n; + uint32_t smpboot[] = { + 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5 - read current core id */ + 0xe210000f, /* ands r0, r0, #0x0f */ + 0xe3a03040, /* mov r3, #0x40 - jump address is 0x40 + 0x10 * core id */ + 0xe0830200, /* add r0, r3, r0, lsl #4 */ + 0xe59f2024, /* ldr r2, privbase */ + 0xe3a01001, /* mov r1, #1 */ + 0xe5821100, /* str r1, [r2, #256] - set GICC_CTLR.Enable */ + 0xe3a010ff, /* mov r1, #0xff */ + 0xe5821104, /* str r1, [r2, #260] - set GICC_PMR.Priority to 0xff */ + 0xf57ff04f, /* dsb */ + 0xe320f003, /* wfi */ + 0xe5901000, /* ldr r1, [r0] */ + 0xe1110001, /* tst r1, r1 */ + 0x0afffffb, /* beq */ + 0xe12fff11, /* bx r1 */ + MPCORE_PERIPHBASE /* privbase: MPCore peripheral base address. */ + }; + for (n = 0; n < ARRAY_SIZE(smpboot); n++) { + smpboot[n] = tswap32(smpboot[n]); + } + rom_add_blob_fixed_as("smpboot", smpboot, sizeof(smpboot), SMP_BOOT_ADDR, + arm_boot_address_space(cpu, info)); +} + +static void hb_reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info) +{ + CPUARMState *env = &cpu->env; + + switch (info->nb_cpus) { + case 4: + address_space_stl_notdirty(&address_space_memory, + SMP_BOOT_REG + 0x30, 0, + MEMTXATTRS_UNSPECIFIED, NULL); + /* fallthrough */ + case 3: + address_space_stl_notdirty(&address_space_memory, + SMP_BOOT_REG + 0x20, 0, + MEMTXATTRS_UNSPECIFIED, NULL); + /* fallthrough */ + case 2: + address_space_stl_notdirty(&address_space_memory, + SMP_BOOT_REG + 0x10, 0, + MEMTXATTRS_UNSPECIFIED, NULL); + env->regs[15] = SMP_BOOT_ADDR; + break; + default: + break; + } +} + +#define NUM_REGS 0x200 +static void hb_regs_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + uint32_t *regs = opaque; + + if (offset == 0xf00) { + if (value == 1 || value == 2) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } else if (value == 3) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } + } + + if (offset / 4 >= NUM_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "highbank: bad write offset 0x%" HWADDR_PRIx "\n", offset); + return; + } + regs[offset / 4] = value; +} + +static uint64_t hb_regs_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t value; + uint32_t *regs = opaque; + + if (offset / 4 >= NUM_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "highbank: bad read offset 0x%" HWADDR_PRIx "\n", offset); + return 0; + } + value = regs[offset / 4]; + + if ((offset == 0x100) || (offset == 0x108) || (offset == 0x10C)) { + value |= 0x30000000; + } + + return value; +} + +static const MemoryRegionOps hb_mem_ops = { + .read = hb_regs_read, + .write = hb_regs_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +#define TYPE_HIGHBANK_REGISTERS "highbank-regs" +OBJECT_DECLARE_SIMPLE_TYPE(HighbankRegsState, HIGHBANK_REGISTERS) + +struct HighbankRegsState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + uint32_t regs[NUM_REGS]; +}; + +static const VMStateDescription vmstate_highbank_regs = { + .name = "highbank-regs", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, HighbankRegsState, NUM_REGS), + VMSTATE_END_OF_LIST(), + }, +}; + +static void highbank_regs_reset(DeviceState *dev) +{ + HighbankRegsState *s = HIGHBANK_REGISTERS(dev); + + s->regs[0x40] = 0x05F20121; + s->regs[0x41] = 0x2; + s->regs[0x42] = 0x05F30121; + s->regs[0x43] = 0x05F40121; +} + +static void highbank_regs_init(Object *obj) +{ + HighbankRegsState *s = HIGHBANK_REGISTERS(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &hb_mem_ops, s->regs, + "highbank_regs", 0x1000); + sysbus_init_mmio(dev, &s->iomem); +} + +static void highbank_regs_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Calxeda Highbank registers"; + dc->vmsd = &vmstate_highbank_regs; + dc->reset = highbank_regs_reset; +} + +static const TypeInfo highbank_regs_info = { + .name = TYPE_HIGHBANK_REGISTERS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(HighbankRegsState), + .instance_init = highbank_regs_init, + .class_init = highbank_regs_class_init, +}; + +static void highbank_regs_register_types(void) +{ + type_register_static(&highbank_regs_info); +} + +type_init(highbank_regs_register_types) + +static struct arm_boot_info highbank_binfo; + +enum cxmachines { + CALXEDA_HIGHBANK, + CALXEDA_MIDWAY, +}; + +/* ram_size must be set to match the upper bound of memory in the + * device tree (linux/arch/arm/boot/dts/highbank.dts), which is + * normally 0xff900000 or -m 4089. When running this board on a + * 32-bit host, set the reg value of memory to 0xf7ff00000 in the + * device tree and pass -m 2047 to QEMU. + */ +static void calxeda_init(MachineState *machine, enum cxmachines machine_id) +{ + DeviceState *dev = NULL; + SysBusDevice *busdev; + qemu_irq pic[128]; + int n; + unsigned int smp_cpus = machine->smp.cpus; + qemu_irq cpu_irq[4]; + qemu_irq cpu_fiq[4]; + qemu_irq cpu_virq[4]; + qemu_irq cpu_vfiq[4]; + MemoryRegion *sysram; + MemoryRegion *sysmem; + char *sysboot_filename; + + switch (machine_id) { + case CALXEDA_HIGHBANK: + machine->cpu_type = ARM_CPU_TYPE_NAME("cortex-a9"); + break; + case CALXEDA_MIDWAY: + machine->cpu_type = ARM_CPU_TYPE_NAME("cortex-a15"); + break; + default: + assert(0); + } + + for (n = 0; n < smp_cpus; n++) { + Object *cpuobj; + ARMCPU *cpu; + + cpuobj = object_new(machine->cpu_type); + cpu = ARM_CPU(cpuobj); + + object_property_set_int(cpuobj, "psci-conduit", QEMU_PSCI_CONDUIT_SMC, + &error_abort); + + if (n) { + /* Secondary CPUs start in PSCI powered-down state */ + object_property_set_bool(cpuobj, "start-powered-off", true, + &error_abort); + } + + if (object_property_find(cpuobj, "reset-cbar")) { + object_property_set_int(cpuobj, "reset-cbar", MPCORE_PERIPHBASE, + &error_abort); + } + qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); + cpu_irq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ); + cpu_fiq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_FIQ); + cpu_virq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_VIRQ); + cpu_vfiq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_VFIQ); + } + + sysmem = get_system_memory(); + /* SDRAM at address zero. */ + memory_region_add_subregion(sysmem, 0, machine->ram); + + sysram = g_new(MemoryRegion, 1); + memory_region_init_ram(sysram, NULL, "highbank.sysram", 0x8000, + &error_fatal); + memory_region_add_subregion(sysmem, 0xfff88000, sysram); + if (machine->firmware != NULL) { + sysboot_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, machine->firmware); + if (sysboot_filename != NULL) { + if (load_image_targphys(sysboot_filename, 0xfff88000, 0x8000) < 0) { + error_report("Unable to load %s", machine->firmware); + exit(1); + } + g_free(sysboot_filename); + } else { + error_report("Unable to find %s", machine->firmware); + exit(1); + } + } + + switch (machine_id) { + case CALXEDA_HIGHBANK: + dev = qdev_new("l2x0"); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0xfff12000); + + dev = qdev_new(TYPE_A9MPCORE_PRIV); + break; + case CALXEDA_MIDWAY: + dev = qdev_new(TYPE_A15MPCORE_PRIV); + break; + } + qdev_prop_set_uint32(dev, "num-cpu", smp_cpus); + qdev_prop_set_uint32(dev, "num-irq", NIRQ_GIC); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE); + for (n = 0; n < smp_cpus; n++) { + sysbus_connect_irq(busdev, n, cpu_irq[n]); + sysbus_connect_irq(busdev, n + smp_cpus, cpu_fiq[n]); + sysbus_connect_irq(busdev, n + 2 * smp_cpus, cpu_virq[n]); + sysbus_connect_irq(busdev, n + 3 * smp_cpus, cpu_vfiq[n]); + } + + for (n = 0; n < 128; n++) { + pic[n] = qdev_get_gpio_in(dev, n); + } + + dev = qdev_new("sp804"); + qdev_prop_set_uint32(dev, "freq0", 150000000); + qdev_prop_set_uint32(dev, "freq1", 150000000); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0xfff34000); + sysbus_connect_irq(busdev, 0, pic[18]); + pl011_create(0xfff36000, pic[20], serial_hd(0)); + + dev = qdev_new(TYPE_HIGHBANK_REGISTERS); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0xfff3c000); + + sysbus_create_simple("pl061", 0xfff30000, pic[14]); + sysbus_create_simple("pl061", 0xfff31000, pic[15]); + sysbus_create_simple("pl061", 0xfff32000, pic[16]); + sysbus_create_simple("pl061", 0xfff33000, pic[17]); + sysbus_create_simple("pl031", 0xfff35000, pic[19]); + sysbus_create_simple("pl022", 0xfff39000, pic[23]); + + sysbus_create_simple(TYPE_SYSBUS_AHCI, 0xffe08000, pic[83]); + + if (nd_table[0].used) { + qemu_check_nic_model(&nd_table[0], "xgmac"); + dev = qdev_new("xgmac"); + qdev_set_nic_properties(dev, &nd_table[0]); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xfff50000); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[77]); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, pic[78]); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, pic[79]); + + qemu_check_nic_model(&nd_table[1], "xgmac"); + dev = qdev_new("xgmac"); + qdev_set_nic_properties(dev, &nd_table[1]); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xfff51000); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[80]); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, pic[81]); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, pic[82]); + } + + /* TODO create and connect IDE devices for ide_drive_get() */ + + highbank_binfo.ram_size = machine->ram_size; + /* highbank requires a dtb in order to boot, and the dtb will override + * the board ID. The following value is ignored, so set it to -1 to be + * clear that the value is meaningless. + */ + highbank_binfo.board_id = -1; + highbank_binfo.nb_cpus = smp_cpus; + highbank_binfo.loader_start = 0; + highbank_binfo.write_secondary_boot = hb_write_secondary; + highbank_binfo.secondary_cpu_reset_hook = hb_reset_secondary; + highbank_binfo.board_setup_addr = BOARD_SETUP_ADDR; + highbank_binfo.write_board_setup = hb_write_board_setup; + highbank_binfo.secure_board_setup = true; + + arm_load_kernel(ARM_CPU(first_cpu), machine, &highbank_binfo); +} + +static void highbank_init(MachineState *machine) +{ + calxeda_init(machine, CALXEDA_HIGHBANK); +} + +static void midway_init(MachineState *machine) +{ + calxeda_init(machine, CALXEDA_MIDWAY); +} + +static void highbank_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Calxeda Highbank (ECX-1000)"; + mc->init = highbank_init; + mc->block_default_type = IF_IDE; + mc->units_per_default_bus = 1; + mc->max_cpus = 4; + mc->ignore_memory_transaction_failures = true; + mc->default_ram_id = "highbank.dram"; +} + +static const TypeInfo highbank_type = { + .name = MACHINE_TYPE_NAME("highbank"), + .parent = TYPE_MACHINE, + .class_init = highbank_class_init, +}; + +static void midway_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Calxeda Midway (ECX-2000)"; + mc->init = midway_init; + mc->block_default_type = IF_IDE; + mc->units_per_default_bus = 1; + mc->max_cpus = 4; + mc->ignore_memory_transaction_failures = true; + mc->default_ram_id = "highbank.dram"; +} + +static const TypeInfo midway_type = { + .name = MACHINE_TYPE_NAME("midway"), + .parent = TYPE_MACHINE, + .class_init = midway_class_init, +}; + +static void calxeda_machines_init(void) +{ + type_register_static(&highbank_type); + type_register_static(&midway_type); +} + +type_init(calxeda_machines_init) diff --git a/hw/arm/imx25_pdk.c b/hw/arm/imx25_pdk.c new file mode 100644 index 000000000..bd16acd4d --- /dev/null +++ b/hw/arm/imx25_pdk.c @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2013 Jean-Christophe Dubois + * + * PDK Board System emulation. + * + * Based on hw/arm/kzm.c + * + * Copyright (c) 2008 OKL and 2011 NICTA + * Written by Hans at OK-Labs + * Updated by Peter Chubb. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/arm/fsl-imx25.h" +#include "hw/boards.h" +#include "qemu/error-report.h" +#include "sysemu/qtest.h" +#include "hw/i2c/i2c.h" +#include "qemu/cutils.h" + +/* Memory map for PDK Emulation Baseboard: + * 0x00000000-0x7fffffff See i.MX25 SOC fr support + * 0x80000000-0x87ffffff RAM + Alias EMULATED + * 0x90000000-0x9fffffff RAM + Alias EMULATED + * 0xa0000000-0xa7ffffff Flash IGNORED + * 0xa8000000-0xafffffff Flash IGNORED + * 0xb0000000-0xb1ffffff SRAM IGNORED + * 0xb2000000-0xb3ffffff SRAM IGNORED + * 0xb4000000-0xb5ffffff CS4 IGNORED + * 0xb6000000-0xb8000fff Reserved IGNORED + * 0xb8001000-0xb8001fff SDRAM CTRL reg IGNORED + * 0xb8002000-0xb8002fff WEIM CTRL reg IGNORED + * 0xb8003000-0xb8003fff M3IF CTRL reg IGNORED + * 0xb8004000-0xb8004fff EMI CTRL reg IGNORED + * 0xb8005000-0xbaffffff Reserved IGNORED + * 0xbb000000-0xbb000fff NAND flash area buf IGNORED + * 0xbb001000-0xbb0011ff NAND flash reserved IGNORED + * 0xbb001200-0xbb001dff Reserved IGNORED + * 0xbb001e00-0xbb001fff NAN flash CTRL reg IGNORED + * 0xbb012000-0xbfffffff Reserved IGNORED + * 0xc0000000-0xffffffff Reserved IGNORED + */ + +typedef struct IMX25PDK { + FslIMX25State soc; + MemoryRegion ram_alias; +} IMX25PDK; + +static struct arm_boot_info imx25_pdk_binfo; + +static void imx25_pdk_init(MachineState *machine) +{ + IMX25PDK *s = g_new0(IMX25PDK, 1); + unsigned int ram_size; + unsigned int alias_offset; + int i; + + object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_FSL_IMX25); + + qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); + + /* We need to initialize our memory */ + if (machine->ram_size > (FSL_IMX25_SDRAM0_SIZE + FSL_IMX25_SDRAM1_SIZE)) { + char *sz = size_to_str(FSL_IMX25_SDRAM0_SIZE + FSL_IMX25_SDRAM1_SIZE); + error_report("RAM size more than %s is not supported", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + memory_region_add_subregion(get_system_memory(), FSL_IMX25_SDRAM0_ADDR, + machine->ram); + + /* initialize the alias memory if any */ + for (i = 0, ram_size = machine->ram_size, alias_offset = 0; + (i < 2) && ram_size; i++) { + unsigned int size; + static const struct { + hwaddr addr; + unsigned int size; + } ram[2] = { + { FSL_IMX25_SDRAM0_ADDR, FSL_IMX25_SDRAM0_SIZE }, + { FSL_IMX25_SDRAM1_ADDR, FSL_IMX25_SDRAM1_SIZE }, + }; + + size = MIN(ram_size, ram[i].size); + + ram_size -= size; + + if (size < ram[i].size) { + memory_region_init_alias(&s->ram_alias, NULL, "ram.alias", + machine->ram, + alias_offset, ram[i].size - size); + memory_region_add_subregion(get_system_memory(), + ram[i].addr + size, &s->ram_alias); + } + + alias_offset += ram[i].size; + } + + imx25_pdk_binfo.ram_size = machine->ram_size; + imx25_pdk_binfo.loader_start = FSL_IMX25_SDRAM0_ADDR; + imx25_pdk_binfo.board_id = 1771, + imx25_pdk_binfo.nb_cpus = 1; + + for (i = 0; i < FSL_IMX25_NUM_ESDHCS; i++) { + BusState *bus; + DeviceState *carddev; + DriveInfo *di; + BlockBackend *blk; + + di = drive_get_next(IF_SD); + blk = di ? blk_by_legacy_dinfo(di) : NULL; + bus = qdev_get_child_bus(DEVICE(&s->soc.esdhc[i]), "sd-bus"); + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); + } + + /* + * We test explicitly for qtest here as it is not done (yet?) in + * arm_load_kernel(). Without this the "make check" command would + * fail. + */ + if (!qtest_enabled()) { + arm_load_kernel(&s->soc.cpu, machine, &imx25_pdk_binfo); + } +} + +static void imx25_pdk_machine_init(MachineClass *mc) +{ + mc->desc = "ARM i.MX25 PDK board (ARM926)"; + mc->init = imx25_pdk_init; + mc->ignore_memory_transaction_failures = true; + mc->default_ram_id = "imx25.ram"; +} + +DEFINE_MACHINE("imx25-pdk", imx25_pdk_machine_init) diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c new file mode 100644 index 000000000..16e898595 --- /dev/null +++ b/hw/arm/integratorcp.c @@ -0,0 +1,744 @@ +/* + * ARM Integrator CP System emulation. + * + * Copyright (c) 2005-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/boards.h" +#include "hw/arm/boot.h" +#include "hw/misc/arm_integrator_debug.h" +#include "hw/net/smc91c111.h" +#include "net/net.h" +#include "exec/address-spaces.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "hw/char/pl011.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/sd/sd.h" +#include "qom/object.h" + +#define TYPE_INTEGRATOR_CM "integrator_core" +OBJECT_DECLARE_SIMPLE_TYPE(IntegratorCMState, INTEGRATOR_CM) + +struct IntegratorCMState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + uint32_t memsz; + MemoryRegion flash; + uint32_t cm_osc; + uint32_t cm_ctrl; + uint32_t cm_lock; + uint32_t cm_auxosc; + uint32_t cm_sdram; + uint32_t cm_init; + uint32_t cm_flags; + uint32_t cm_nvflags; + uint32_t cm_refcnt_offset; + uint32_t int_level; + uint32_t irq_enabled; + uint32_t fiq_enabled; +}; + +static uint8_t integrator_spd[128] = { + 128, 8, 4, 11, 9, 1, 64, 0, 2, 0xa0, 0xa0, 0, 0, 8, 0, 1, + 0xe, 4, 0x1c, 1, 2, 0x20, 0xc0, 0, 0, 0, 0, 0x30, 0x28, 0x30, 0x28, 0x40 +}; + +static const VMStateDescription vmstate_integratorcm = { + .name = "integratorcm", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cm_osc, IntegratorCMState), + VMSTATE_UINT32(cm_ctrl, IntegratorCMState), + VMSTATE_UINT32(cm_lock, IntegratorCMState), + VMSTATE_UINT32(cm_auxosc, IntegratorCMState), + VMSTATE_UINT32(cm_sdram, IntegratorCMState), + VMSTATE_UINT32(cm_init, IntegratorCMState), + VMSTATE_UINT32(cm_flags, IntegratorCMState), + VMSTATE_UINT32(cm_nvflags, IntegratorCMState), + VMSTATE_UINT32(int_level, IntegratorCMState), + VMSTATE_UINT32(irq_enabled, IntegratorCMState), + VMSTATE_UINT32(fiq_enabled, IntegratorCMState), + VMSTATE_END_OF_LIST() + } +}; + +static uint64_t integratorcm_read(void *opaque, hwaddr offset, + unsigned size) +{ + IntegratorCMState *s = opaque; + if (offset >= 0x100 && offset < 0x200) { + /* CM_SPD */ + if (offset >= 0x180) + return 0; + return integrator_spd[offset >> 2]; + } + switch (offset >> 2) { + case 0: /* CM_ID */ + return 0x411a3001; + case 1: /* CM_PROC */ + return 0; + case 2: /* CM_OSC */ + return s->cm_osc; + case 3: /* CM_CTRL */ + return s->cm_ctrl; + case 4: /* CM_STAT */ + return 0x00100000; + case 5: /* CM_LOCK */ + if (s->cm_lock == 0xa05f) { + return 0x1a05f; + } else { + return s->cm_lock; + } + case 6: /* CM_LMBUSCNT */ + /* ??? High frequency timer. */ + hw_error("integratorcm_read: CM_LMBUSCNT"); + case 7: /* CM_AUXOSC */ + return s->cm_auxosc; + case 8: /* CM_SDRAM */ + return s->cm_sdram; + case 9: /* CM_INIT */ + return s->cm_init; + case 10: /* CM_REFCNT */ + /* This register, CM_REFCNT, provides a 32-bit count value. + * The count increments at the fixed reference clock frequency of 24MHz + * and can be used as a real-time counter. + */ + return (uint32_t)muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24, + 1000) - s->cm_refcnt_offset; + case 12: /* CM_FLAGS */ + return s->cm_flags; + case 14: /* CM_NVFLAGS */ + return s->cm_nvflags; + case 16: /* CM_IRQ_STAT */ + return s->int_level & s->irq_enabled; + case 17: /* CM_IRQ_RSTAT */ + return s->int_level; + case 18: /* CM_IRQ_ENSET */ + return s->irq_enabled; + case 20: /* CM_SOFT_INTSET */ + return s->int_level & 1; + case 24: /* CM_FIQ_STAT */ + return s->int_level & s->fiq_enabled; + case 25: /* CM_FIQ_RSTAT */ + return s->int_level; + case 26: /* CM_FIQ_ENSET */ + return s->fiq_enabled; + case 32: /* CM_VOLTAGE_CTL0 */ + case 33: /* CM_VOLTAGE_CTL1 */ + case 34: /* CM_VOLTAGE_CTL2 */ + case 35: /* CM_VOLTAGE_CTL3 */ + /* ??? Voltage control unimplemented. */ + return 0; + default: + qemu_log_mask(LOG_UNIMP, + "%s: Unimplemented offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + return 0; + } +} + +static void integratorcm_do_remap(IntegratorCMState *s) +{ + /* Sync memory region state with CM_CTRL REMAP bit: + * bit 0 => flash at address 0; bit 1 => RAM + */ + memory_region_set_enabled(&s->flash, !(s->cm_ctrl & 4)); +} + +static void integratorcm_set_ctrl(IntegratorCMState *s, uint32_t value) +{ + if (value & 8) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + if ((s->cm_ctrl ^ value) & 1) { + /* (value & 1) != 0 means the green "MISC LED" is lit. + * We don't have any nice place to display LEDs. printf is a bad + * idea because Linux uses the LED as a heartbeat and the output + * will swamp anything else on the terminal. + */ + } + /* Note that the RESET bit [3] always reads as zero */ + s->cm_ctrl = (s->cm_ctrl & ~5) | (value & 5); + integratorcm_do_remap(s); +} + +static void integratorcm_update(IntegratorCMState *s) +{ + /* ??? The CPU irq/fiq is raised when either the core module or base PIC + are active. */ + if (s->int_level & (s->irq_enabled | s->fiq_enabled)) + hw_error("Core module interrupt\n"); +} + +static void integratorcm_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + IntegratorCMState *s = opaque; + switch (offset >> 2) { + case 2: /* CM_OSC */ + if (s->cm_lock == 0xa05f) + s->cm_osc = value; + break; + case 3: /* CM_CTRL */ + integratorcm_set_ctrl(s, value); + break; + case 5: /* CM_LOCK */ + s->cm_lock = value & 0xffff; + break; + case 7: /* CM_AUXOSC */ + if (s->cm_lock == 0xa05f) + s->cm_auxosc = value; + break; + case 8: /* CM_SDRAM */ + s->cm_sdram = value; + break; + case 9: /* CM_INIT */ + /* ??? This can change the memory bus frequency. */ + s->cm_init = value; + break; + case 12: /* CM_FLAGSS */ + s->cm_flags |= value; + break; + case 13: /* CM_FLAGSC */ + s->cm_flags &= ~value; + break; + case 14: /* CM_NVFLAGSS */ + s->cm_nvflags |= value; + break; + case 15: /* CM_NVFLAGSS */ + s->cm_nvflags &= ~value; + break; + case 18: /* CM_IRQ_ENSET */ + s->irq_enabled |= value; + integratorcm_update(s); + break; + case 19: /* CM_IRQ_ENCLR */ + s->irq_enabled &= ~value; + integratorcm_update(s); + break; + case 20: /* CM_SOFT_INTSET */ + s->int_level |= (value & 1); + integratorcm_update(s); + break; + case 21: /* CM_SOFT_INTCLR */ + s->int_level &= ~(value & 1); + integratorcm_update(s); + break; + case 26: /* CM_FIQ_ENSET */ + s->fiq_enabled |= value; + integratorcm_update(s); + break; + case 27: /* CM_FIQ_ENCLR */ + s->fiq_enabled &= ~value; + integratorcm_update(s); + break; + case 32: /* CM_VOLTAGE_CTL0 */ + case 33: /* CM_VOLTAGE_CTL1 */ + case 34: /* CM_VOLTAGE_CTL2 */ + case 35: /* CM_VOLTAGE_CTL3 */ + /* ??? Voltage control unimplemented. */ + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: Unimplemented offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + break; + } +} + +/* Integrator/CM control registers. */ + +static const MemoryRegionOps integratorcm_ops = { + .read = integratorcm_read, + .write = integratorcm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void integratorcm_init(Object *obj) +{ + IntegratorCMState *s = INTEGRATOR_CM(obj); + + s->cm_osc = 0x01000048; + /* ??? What should the high bits of this value be? */ + s->cm_auxosc = 0x0007feff; + s->cm_sdram = 0x00011122; + memcpy(integrator_spd + 73, "QEMU-MEMORY", 11); + s->cm_init = 0x00000112; + s->cm_refcnt_offset = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24, + 1000); + + /* ??? Save/restore. */ +} + +static void integratorcm_realize(DeviceState *d, Error **errp) +{ + IntegratorCMState *s = INTEGRATOR_CM(d); + SysBusDevice *dev = SYS_BUS_DEVICE(d); + Error *local_err = NULL; + + memory_region_init_ram(&s->flash, OBJECT(d), "integrator.flash", 0x100000, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + memory_region_init_io(&s->iomem, OBJECT(d), &integratorcm_ops, s, + "integratorcm", 0x00800000); + sysbus_init_mmio(dev, &s->iomem); + + integratorcm_do_remap(s); + + if (s->memsz >= 256) { + integrator_spd[31] = 64; + s->cm_sdram |= 0x10; + } else if (s->memsz >= 128) { + integrator_spd[31] = 32; + s->cm_sdram |= 0x0c; + } else if (s->memsz >= 64) { + integrator_spd[31] = 16; + s->cm_sdram |= 0x08; + } else if (s->memsz >= 32) { + integrator_spd[31] = 4; + s->cm_sdram |= 0x04; + } else { + integrator_spd[31] = 2; + } +} + +/* Integrator/CP hardware emulation. */ +/* Primary interrupt controller. */ + +#define TYPE_INTEGRATOR_PIC "integrator_pic" +OBJECT_DECLARE_SIMPLE_TYPE(icp_pic_state, INTEGRATOR_PIC) + +struct icp_pic_state { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + uint32_t level; + uint32_t irq_enabled; + uint32_t fiq_enabled; + qemu_irq parent_irq; + qemu_irq parent_fiq; +}; + +static const VMStateDescription vmstate_icp_pic = { + .name = "icp_pic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(level, icp_pic_state), + VMSTATE_UINT32(irq_enabled, icp_pic_state), + VMSTATE_UINT32(fiq_enabled, icp_pic_state), + VMSTATE_END_OF_LIST() + } +}; + +static void icp_pic_update(icp_pic_state *s) +{ + uint32_t flags; + + flags = (s->level & s->irq_enabled); + qemu_set_irq(s->parent_irq, flags != 0); + flags = (s->level & s->fiq_enabled); + qemu_set_irq(s->parent_fiq, flags != 0); +} + +static void icp_pic_set_irq(void *opaque, int irq, int level) +{ + icp_pic_state *s = (icp_pic_state *)opaque; + if (level) + s->level |= 1 << irq; + else + s->level &= ~(1 << irq); + icp_pic_update(s); +} + +static uint64_t icp_pic_read(void *opaque, hwaddr offset, + unsigned size) +{ + icp_pic_state *s = (icp_pic_state *)opaque; + + switch (offset >> 2) { + case 0: /* IRQ_STATUS */ + return s->level & s->irq_enabled; + case 1: /* IRQ_RAWSTAT */ + return s->level; + case 2: /* IRQ_ENABLESET */ + return s->irq_enabled; + case 4: /* INT_SOFTSET */ + return s->level & 1; + case 8: /* FRQ_STATUS */ + return s->level & s->fiq_enabled; + case 9: /* FRQ_RAWSTAT */ + return s->level; + case 10: /* FRQ_ENABLESET */ + return s->fiq_enabled; + case 3: /* IRQ_ENABLECLR */ + case 5: /* INT_SOFTCLR */ + case 11: /* FRQ_ENABLECLR */ + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + return 0; + } +} + +static void icp_pic_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + icp_pic_state *s = (icp_pic_state *)opaque; + + switch (offset >> 2) { + case 2: /* IRQ_ENABLESET */ + s->irq_enabled |= value; + break; + case 3: /* IRQ_ENABLECLR */ + s->irq_enabled &= ~value; + break; + case 4: /* INT_SOFTSET */ + if (value & 1) + icp_pic_set_irq(s, 0, 1); + break; + case 5: /* INT_SOFTCLR */ + if (value & 1) + icp_pic_set_irq(s, 0, 0); + break; + case 10: /* FRQ_ENABLESET */ + s->fiq_enabled |= value; + break; + case 11: /* FRQ_ENABLECLR */ + s->fiq_enabled &= ~value; + break; + case 0: /* IRQ_STATUS */ + case 1: /* IRQ_RAWSTAT */ + case 8: /* FRQ_STATUS */ + case 9: /* FRQ_RAWSTAT */ + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + return; + } + icp_pic_update(s); +} + +static const MemoryRegionOps icp_pic_ops = { + .read = icp_pic_read, + .write = icp_pic_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void icp_pic_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + icp_pic_state *s = INTEGRATOR_PIC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + qdev_init_gpio_in(dev, icp_pic_set_irq, 32); + sysbus_init_irq(sbd, &s->parent_irq); + sysbus_init_irq(sbd, &s->parent_fiq); + memory_region_init_io(&s->iomem, obj, &icp_pic_ops, s, + "icp-pic", 0x00800000); + sysbus_init_mmio(sbd, &s->iomem); +} + +/* CP control registers. */ + +#define TYPE_ICP_CONTROL_REGS "icp-ctrl-regs" +OBJECT_DECLARE_SIMPLE_TYPE(ICPCtrlRegsState, ICP_CONTROL_REGS) + +struct ICPCtrlRegsState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + + qemu_irq mmc_irq; + uint32_t intreg_state; +}; + +#define ICP_GPIO_MMC_WPROT "mmc-wprot" +#define ICP_GPIO_MMC_CARDIN "mmc-cardin" + +#define ICP_INTREG_WPROT (1 << 0) +#define ICP_INTREG_CARDIN (1 << 3) + +static const VMStateDescription vmstate_icp_control = { + .name = "icp_control", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(intreg_state, ICPCtrlRegsState), + VMSTATE_END_OF_LIST() + } +}; + +static uint64_t icp_control_read(void *opaque, hwaddr offset, + unsigned size) +{ + ICPCtrlRegsState *s = opaque; + + switch (offset >> 2) { + case 0: /* CP_IDFIELD */ + return 0x41034003; + case 1: /* CP_FLASHPROG */ + return 0; + case 2: /* CP_INTREG */ + return s->intreg_state; + case 3: /* CP_DECODE */ + return 0x11; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + return 0; + } +} + +static void icp_control_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + ICPCtrlRegsState *s = opaque; + + switch (offset >> 2) { + case 2: /* CP_INTREG */ + s->intreg_state &= ~(value & ICP_INTREG_CARDIN); + qemu_set_irq(s->mmc_irq, !!(s->intreg_state & ICP_INTREG_CARDIN)); + break; + case 1: /* CP_FLASHPROG */ + case 3: /* CP_DECODE */ + /* Nothing interesting implemented yet. */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + } +} + +static const MemoryRegionOps icp_control_ops = { + .read = icp_control_read, + .write = icp_control_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void icp_control_mmc_wprot(void *opaque, int line, int level) +{ + ICPCtrlRegsState *s = opaque; + + s->intreg_state &= ~ICP_INTREG_WPROT; + if (level) { + s->intreg_state |= ICP_INTREG_WPROT; + } +} + +static void icp_control_mmc_cardin(void *opaque, int line, int level) +{ + ICPCtrlRegsState *s = opaque; + + /* line is released by writing to CP_INTREG */ + if (level) { + s->intreg_state |= ICP_INTREG_CARDIN; + qemu_set_irq(s->mmc_irq, 1); + } +} + +static void icp_control_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ICPCtrlRegsState *s = ICP_CONTROL_REGS(obj); + DeviceState *dev = DEVICE(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &icp_control_ops, s, + "icp_ctrl_regs", 0x00800000); + sysbus_init_mmio(sbd, &s->iomem); + + qdev_init_gpio_in_named(dev, icp_control_mmc_wprot, ICP_GPIO_MMC_WPROT, 1); + qdev_init_gpio_in_named(dev, icp_control_mmc_cardin, + ICP_GPIO_MMC_CARDIN, 1); + sysbus_init_irq(sbd, &s->mmc_irq); +} + + +/* Board init. */ + +static struct arm_boot_info integrator_binfo = { + .loader_start = 0x0, + .board_id = 0x113, +}; + +static void integratorcp_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + Object *cpuobj; + ARMCPU *cpu; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *ram_alias = g_new(MemoryRegion, 1); + qemu_irq pic[32]; + DeviceState *dev, *sic, *icp; + DriveInfo *dinfo; + int i; + + cpuobj = object_new(machine->cpu_type); + + /* By default ARM1176 CPUs have EL3 enabled. This board does not + * currently support EL3 so the CPU EL3 property is disabled before + * realization. + */ + if (object_property_find(cpuobj, "has_el3")) { + object_property_set_bool(cpuobj, "has_el3", false, &error_fatal); + } + + qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); + + cpu = ARM_CPU(cpuobj); + + /* ??? On a real system the first 1Mb is mapped as SSRAM or boot flash. */ + /* ??? RAM should repeat to fill physical memory space. */ + /* SDRAM at address zero*/ + memory_region_add_subregion(address_space_mem, 0, machine->ram); + /* And again at address 0x80000000 */ + memory_region_init_alias(ram_alias, NULL, "ram.alias", machine->ram, + 0, ram_size); + memory_region_add_subregion(address_space_mem, 0x80000000, ram_alias); + + dev = qdev_new(TYPE_INTEGRATOR_CM); + qdev_prop_set_uint32(dev, "memsz", ram_size >> 20); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map((SysBusDevice *)dev, 0, 0x10000000); + + dev = sysbus_create_varargs(TYPE_INTEGRATOR_PIC, 0x14000000, + qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ), + qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_FIQ), + NULL); + for (i = 0; i < 32; i++) { + pic[i] = qdev_get_gpio_in(dev, i); + } + sic = sysbus_create_simple(TYPE_INTEGRATOR_PIC, 0xca000000, pic[26]); + sysbus_create_varargs("integrator_pit", 0x13000000, + pic[5], pic[6], pic[7], NULL); + sysbus_create_simple("pl031", 0x15000000, pic[8]); + pl011_create(0x16000000, pic[1], serial_hd(0)); + pl011_create(0x17000000, pic[2], serial_hd(1)); + icp = sysbus_create_simple(TYPE_ICP_CONTROL_REGS, 0xcb000000, + qdev_get_gpio_in(sic, 3)); + sysbus_create_simple("pl050_keyboard", 0x18000000, pic[3]); + sysbus_create_simple("pl050_mouse", 0x19000000, pic[4]); + sysbus_create_simple(TYPE_INTEGRATOR_DEBUG, 0x1a000000, 0); + + dev = sysbus_create_varargs("pl181", 0x1c000000, pic[23], pic[24], NULL); + qdev_connect_gpio_out_named(dev, "card-read-only", 0, + qdev_get_gpio_in_named(icp, ICP_GPIO_MMC_WPROT, 0)); + qdev_connect_gpio_out_named(dev, "card-inserted", 0, + qdev_get_gpio_in_named(icp, ICP_GPIO_MMC_CARDIN, 0)); + dinfo = drive_get_next(IF_SD); + if (dinfo) { + DeviceState *card; + + card = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + qdev_realize_and_unref(card, qdev_get_child_bus(dev, "sd-bus"), + &error_fatal); + } + + sysbus_create_varargs("pl041", 0x1d000000, pic[25], NULL); + + if (nd_table[0].used) + smc91c111_init(&nd_table[0], 0xc8000000, pic[27]); + + sysbus_create_simple("pl110", 0xc0000000, pic[22]); + + integrator_binfo.ram_size = ram_size; + arm_load_kernel(cpu, machine, &integrator_binfo); +} + +static void integratorcp_machine_init(MachineClass *mc) +{ + mc->desc = "ARM Integrator/CP (ARM926EJ-S)"; + mc->init = integratorcp_init; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926"); + mc->default_ram_id = "integrator.ram"; +} + +DEFINE_MACHINE("integratorcp", integratorcp_machine_init) + +static Property core_properties[] = { + DEFINE_PROP_UINT32("memsz", IntegratorCMState, memsz, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void core_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, core_properties); + dc->realize = integratorcm_realize; + dc->vmsd = &vmstate_integratorcm; +} + +static void icp_pic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_icp_pic; +} + +static void icp_control_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_icp_control; +} + +static const TypeInfo core_info = { + .name = TYPE_INTEGRATOR_CM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IntegratorCMState), + .instance_init = integratorcm_init, + .class_init = core_class_init, +}; + +static const TypeInfo icp_pic_info = { + .name = TYPE_INTEGRATOR_PIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(icp_pic_state), + .instance_init = icp_pic_init, + .class_init = icp_pic_class_init, +}; + +static const TypeInfo icp_ctrl_regs_info = { + .name = TYPE_ICP_CONTROL_REGS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ICPCtrlRegsState), + .instance_init = icp_control_init, + .class_init = icp_control_class_init, +}; + +static void integratorcp_register_types(void) +{ + type_register_static(&icp_pic_info); + type_register_static(&core_info); + type_register_static(&icp_ctrl_regs_info); +} + +type_init(integratorcp_register_types) diff --git a/hw/arm/kzm.c b/hw/arm/kzm.c new file mode 100644 index 000000000..39559c44c --- /dev/null +++ b/hw/arm/kzm.c @@ -0,0 +1,142 @@ +/* + * KZM Board System emulation. + * + * Copyright (c) 2008 OKL and 2011 NICTA + * Written by Hans at OK-Labs + * Updated by Peter Chubb. + * + * This code is licensed under the GPL, version 2 or later. + * See the file `COPYING' in the top level directory. + * + * It (partially) emulates a Kyoto Microcomputer + * KZM-ARM11-01 evaluation board, with a Freescale + * i.MX31 SoC + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/fsl-imx31.h" +#include "hw/boards.h" +#include "qemu/error-report.h" +#include "exec/address-spaces.h" +#include "net/net.h" +#include "hw/net/lan9118.h" +#include "hw/char/serial.h" +#include "sysemu/qtest.h" +#include "sysemu/sysemu.h" +#include "qemu/cutils.h" + +/* Memory map for Kzm Emulation Baseboard: + * 0x00000000-0x7fffffff See i.MX31 SOC for support + * 0x80000000-0x8fffffff RAM EMULATED + * 0x90000000-0x9fffffff RAM EMULATED + * 0xa0000000-0xafffffff Flash IGNORED + * 0xb0000000-0xb3ffffff Unavailable IGNORED + * 0xb4000000-0xb4000fff 8-bit free space IGNORED + * 0xb4001000-0xb400100f Board control IGNORED + * 0xb4001003 DIP switch + * 0xb4001010-0xb400101f 7-segment LED IGNORED + * 0xb4001020-0xb400102f LED IGNORED + * 0xb4001030-0xb400103f LED IGNORED + * 0xb4001040-0xb400104f FPGA, UART EMULATED + * 0xb4001050-0xb400105f FPGA, UART EMULATED + * 0xb4001060-0xb40fffff FPGA IGNORED + * 0xb6000000-0xb61fffff LAN controller EMULATED + * 0xb6200000-0xb62fffff FPGA NAND Controller IGNORED + * 0xb6300000-0xb7ffffff Free IGNORED + * 0xb8000000-0xb8004fff Memory control registers IGNORED + * 0xc0000000-0xc3ffffff PCMCIA/CF IGNORED + * 0xc4000000-0xffffffff Reserved IGNORED + */ + +typedef struct IMX31KZM { + FslIMX31State soc; + MemoryRegion ram_alias; +} IMX31KZM; + +#define KZM_RAM_ADDR (FSL_IMX31_SDRAM0_ADDR) +#define KZM_FPGA_ADDR (FSL_IMX31_CS4_ADDR + 0x1040) +#define KZM_LAN9118_ADDR (FSL_IMX31_CS5_ADDR) + +static struct arm_boot_info kzm_binfo = { + .loader_start = KZM_RAM_ADDR, + .board_id = 1722, +}; + +static void kzm_init(MachineState *machine) +{ + IMX31KZM *s = g_new0(IMX31KZM, 1); + unsigned int ram_size; + unsigned int alias_offset; + unsigned int i; + + object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_FSL_IMX31); + + qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); + + /* Check the amount of memory is compatible with the SOC */ + if (machine->ram_size > (FSL_IMX31_SDRAM0_SIZE + FSL_IMX31_SDRAM1_SIZE)) { + char *sz = size_to_str(FSL_IMX31_SDRAM0_SIZE + FSL_IMX31_SDRAM1_SIZE); + error_report("RAM size more than %s is not supported", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + memory_region_add_subregion(get_system_memory(), FSL_IMX31_SDRAM0_ADDR, + machine->ram); + + /* initialize the alias memory if any */ + for (i = 0, ram_size = machine->ram_size, alias_offset = 0; + (i < 2) && ram_size; i++) { + unsigned int size; + static const struct { + hwaddr addr; + unsigned int size; + } ram[2] = { + { FSL_IMX31_SDRAM0_ADDR, FSL_IMX31_SDRAM0_SIZE }, + { FSL_IMX31_SDRAM1_ADDR, FSL_IMX31_SDRAM1_SIZE }, + }; + + size = MIN(ram_size, ram[i].size); + + ram_size -= size; + + if (size < ram[i].size) { + memory_region_init_alias(&s->ram_alias, NULL, "ram.alias", + machine->ram, + alias_offset, ram[i].size - size); + memory_region_add_subregion(get_system_memory(), + ram[i].addr + size, &s->ram_alias); + } + + alias_offset += ram[i].size; + } + + if (nd_table[0].used) { + lan9118_init(&nd_table[0], KZM_LAN9118_ADDR, + qdev_get_gpio_in(DEVICE(&s->soc.avic), 52)); + } + + if (serial_hd(2)) { /* touchscreen */ + serial_mm_init(get_system_memory(), KZM_FPGA_ADDR+0x10, 0, + qdev_get_gpio_in(DEVICE(&s->soc.avic), 52), + 14745600, serial_hd(2), DEVICE_NATIVE_ENDIAN); + } + + kzm_binfo.ram_size = machine->ram_size; + kzm_binfo.nb_cpus = 1; + + if (!qtest_enabled()) { + arm_load_kernel(&s->soc.cpu, machine, &kzm_binfo); + } +} + +static void kzm_machine_init(MachineClass *mc) +{ + mc->desc = "ARM KZM Emulation Baseboard (ARM1136)"; + mc->init = kzm_init; + mc->ignore_memory_transaction_failures = true; + mc->default_ram_id = "kzm.ram"; +} + +DEFINE_MACHINE("kzm", kzm_machine_init) diff --git a/hw/arm/mainstone.c b/hw/arm/mainstone.c new file mode 100644 index 000000000..8454b6545 --- /dev/null +++ b/hw/arm/mainstone.c @@ -0,0 +1,179 @@ +/* + * PXA270-based Intel Mainstone platforms. + * + * Copyright (c) 2007 by Armin Kuster or + * + * + * Code based on spitz platform by Andrzej Zaborowski + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/arm/pxa.h" +#include "hw/arm/boot.h" +#include "net/net.h" +#include "hw/net/smc91c111.h" +#include "hw/boards.h" +#include "hw/block/flash.h" +#include "hw/sysbus.h" +#include "exec/address-spaces.h" +#include "cpu.h" + +/* Device addresses */ +#define MST_FPGA_PHYS 0x08000000 +#define MST_ETH_PHYS 0x10000300 +#define MST_FLASH_0 0x00000000 +#define MST_FLASH_1 0x04000000 + +/* IRQ definitions */ +#define MMC_IRQ 0 +#define USIM_IRQ 1 +#define USBC_IRQ 2 +#define ETHERNET_IRQ 3 +#define AC97_IRQ 4 +#define PEN_IRQ 5 +#define MSINS_IRQ 6 +#define EXBRD_IRQ 7 +#define S0_CD_IRQ 9 +#define S0_STSCHG_IRQ 10 +#define S0_IRQ 11 +#define S1_CD_IRQ 13 +#define S1_STSCHG_IRQ 14 +#define S1_IRQ 15 + +static const struct keymap map[0xE0] = { + [0 ... 0xDF] = { -1, -1 }, + [0x1e] = {0,0}, /* a */ + [0x30] = {0,1}, /* b */ + [0x2e] = {0,2}, /* c */ + [0x20] = {0,3}, /* d */ + [0x12] = {0,4}, /* e */ + [0x21] = {0,5}, /* f */ + [0x22] = {1,0}, /* g */ + [0x23] = {1,1}, /* h */ + [0x17] = {1,2}, /* i */ + [0x24] = {1,3}, /* j */ + [0x25] = {1,4}, /* k */ + [0x26] = {1,5}, /* l */ + [0x32] = {2,0}, /* m */ + [0x31] = {2,1}, /* n */ + [0x18] = {2,2}, /* o */ + [0x19] = {2,3}, /* p */ + [0x10] = {2,4}, /* q */ + [0x13] = {2,5}, /* r */ + [0x1f] = {3,0}, /* s */ + [0x14] = {3,1}, /* t */ + [0x16] = {3,2}, /* u */ + [0x2f] = {3,3}, /* v */ + [0x11] = {3,4}, /* w */ + [0x2d] = {3,5}, /* x */ + [0x34] = {4,0}, /* . */ + [0x15] = {4,2}, /* y */ + [0x2c] = {4,3}, /* z */ + [0x35] = {4,4}, /* / */ + [0xc7] = {5,0}, /* Home */ + [0x2a] = {5,1}, /* shift */ + /* + * There are two matrix positions which map to space, + * but QEMU can only use one of them for the reverse + * mapping, so simply use the second one. + */ + /* [0x39] = {5,2}, space */ + [0x39] = {5,3}, /* space */ + /* + * Matrix position {5,4} and other keys are missing here. + * TODO: Compare with Linux code and test real hardware. + */ + [0x1c] = {5,4}, /* enter */ + [0x0e] = {5,5}, /* backspace */ + [0xc8] = {6,0}, /* up */ + [0xd0] = {6,1}, /* down */ + [0xcb] = {6,2}, /* left */ + [0xcd] = {6,3}, /* right */ +}; + +enum mainstone_model_e { mainstone }; + +#define MAINSTONE_RAM 0x04000000 +#define MAINSTONE_ROM 0x00800000 +#define MAINSTONE_FLASH 0x02000000 + +static struct arm_boot_info mainstone_binfo = { + .loader_start = PXA2XX_SDRAM_BASE, + .ram_size = 0x04000000, +}; + +static void mainstone_common_init(MemoryRegion *address_space_mem, + MachineState *machine, + enum mainstone_model_e model, int arm_id) +{ + uint32_t sector_len = 256 * 1024; + hwaddr mainstone_flash_base[] = { MST_FLASH_0, MST_FLASH_1 }; + PXA2xxState *mpu; + DeviceState *mst_irq; + DriveInfo *dinfo; + int i; + MemoryRegion *rom = g_new(MemoryRegion, 1); + + /* Setup CPU & memory */ + mpu = pxa270_init(address_space_mem, mainstone_binfo.ram_size, + machine->cpu_type); + memory_region_init_rom(rom, NULL, "mainstone.rom", MAINSTONE_ROM, + &error_fatal); + memory_region_add_subregion(address_space_mem, 0, rom); + + /* There are two 32MiB flash devices on the board */ + for (i = 0; i < 2; i ++) { + dinfo = drive_get(IF_PFLASH, 0, i); + if (!pflash_cfi01_register(mainstone_flash_base[i], + i ? "mainstone.flash1" : "mainstone.flash0", + MAINSTONE_FLASH, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + sector_len, 4, 0, 0, 0, 0, 0)) { + error_report("Error registering flash memory"); + exit(1); + } + } + + mst_irq = sysbus_create_simple("mainstone-fpga", MST_FPGA_PHYS, + qdev_get_gpio_in(mpu->gpio, 0)); + + /* setup keypad */ + pxa27x_register_keypad(mpu->kp, map, 0xe0); + + /* MMC/SD host */ + pxa2xx_mmci_handlers(mpu->mmc, NULL, qdev_get_gpio_in(mst_irq, MMC_IRQ)); + + pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[0], + qdev_get_gpio_in(mst_irq, S0_IRQ), + qdev_get_gpio_in(mst_irq, S0_CD_IRQ)); + pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[1], + qdev_get_gpio_in(mst_irq, S1_IRQ), + qdev_get_gpio_in(mst_irq, S1_CD_IRQ)); + + smc91c111_init(&nd_table[0], MST_ETH_PHYS, + qdev_get_gpio_in(mst_irq, ETHERNET_IRQ)); + + mainstone_binfo.board_id = arm_id; + arm_load_kernel(mpu->cpu, machine, &mainstone_binfo); +} + +static void mainstone_init(MachineState *machine) +{ + mainstone_common_init(get_system_memory(), machine, mainstone, 0x196); +} + +static void mainstone2_machine_init(MachineClass *mc) +{ + mc->desc = "Mainstone II (PXA27x)"; + mc->init = mainstone_init; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5"); +} + +DEFINE_MACHINE("mainstone", mainstone2_machine_init) diff --git a/hw/arm/mcimx6ul-evk.c b/hw/arm/mcimx6ul-evk.c new file mode 100644 index 000000000..77fae874b --- /dev/null +++ b/hw/arm/mcimx6ul-evk.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018 Jean-Christophe Dubois + * + * MCIMX6UL_EVK Board System emulation. + * + * This code is licensed under the GPL, version 2 or later. + * See the file `COPYING' in the top level directory. + * + * It (partially) emulates a mcimx6ul_evk board, with a Freescale + * i.MX6ul SoC + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/fsl-imx6ul.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "qemu/error-report.h" +#include "sysemu/qtest.h" + +static void mcimx6ul_evk_init(MachineState *machine) +{ + static struct arm_boot_info boot_info; + FslIMX6ULState *s; + int i; + + if (machine->ram_size > FSL_IMX6UL_MMDC_SIZE) { + error_report("RAM size " RAM_ADDR_FMT " above max supported (%08x)", + machine->ram_size, FSL_IMX6UL_MMDC_SIZE); + exit(1); + } + + boot_info = (struct arm_boot_info) { + .loader_start = FSL_IMX6UL_MMDC_ADDR, + .board_id = -1, + .ram_size = machine->ram_size, + .nb_cpus = machine->smp.cpus, + }; + + s = FSL_IMX6UL(object_new(TYPE_FSL_IMX6UL)); + object_property_add_child(OBJECT(machine), "soc", OBJECT(s)); + object_property_set_uint(OBJECT(s), "fec1-phy-num", 2, &error_fatal); + object_property_set_uint(OBJECT(s), "fec2-phy-num", 1, &error_fatal); + qdev_realize(DEVICE(s), NULL, &error_fatal); + + memory_region_add_subregion(get_system_memory(), FSL_IMX6UL_MMDC_ADDR, + machine->ram); + + for (i = 0; i < FSL_IMX6UL_NUM_USDHCS; i++) { + BusState *bus; + DeviceState *carddev; + DriveInfo *di; + BlockBackend *blk; + + di = drive_get_next(IF_SD); + blk = di ? blk_by_legacy_dinfo(di) : NULL; + bus = qdev_get_child_bus(DEVICE(&s->usdhc[i]), "sd-bus"); + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); + } + + if (!qtest_enabled()) { + arm_load_kernel(&s->cpu, machine, &boot_info); + } +} + +static void mcimx6ul_evk_machine_init(MachineClass *mc) +{ + mc->desc = "Freescale i.MX6UL Evaluation Kit (Cortex-A7)"; + mc->init = mcimx6ul_evk_init; + mc->max_cpus = FSL_IMX6UL_NUM_CPUS; + mc->default_ram_id = "mcimx6ul-evk.ram"; +} +DEFINE_MACHINE("mcimx6ul-evk", mcimx6ul_evk_machine_init) diff --git a/hw/arm/mcimx7d-sabre.c b/hw/arm/mcimx7d-sabre.c new file mode 100644 index 000000000..935d4b0f1 --- /dev/null +++ b/hw/arm/mcimx7d-sabre.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018, Impinj, Inc. + * + * MCIMX7D_SABRE Board System emulation. + * + * Author: Andrey Smirnov + * + * This code is licensed under the GPL, version 2 or later. + * See the file `COPYING' in the top level directory. + * + * It (partially) emulates a mcimx7d_sabre board, with a Freescale + * i.MX7 SoC + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/fsl-imx7.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "qemu/error-report.h" +#include "sysemu/qtest.h" + +static void mcimx7d_sabre_init(MachineState *machine) +{ + static struct arm_boot_info boot_info; + FslIMX7State *s; + int i; + + if (machine->ram_size > FSL_IMX7_MMDC_SIZE) { + error_report("RAM size " RAM_ADDR_FMT " above max supported (%08x)", + machine->ram_size, FSL_IMX7_MMDC_SIZE); + exit(1); + } + + boot_info = (struct arm_boot_info) { + .loader_start = FSL_IMX7_MMDC_ADDR, + .board_id = -1, + .ram_size = machine->ram_size, + .nb_cpus = machine->smp.cpus, + }; + + s = FSL_IMX7(object_new(TYPE_FSL_IMX7)); + object_property_add_child(OBJECT(machine), "soc", OBJECT(s)); + qdev_realize(DEVICE(s), NULL, &error_fatal); + + memory_region_add_subregion(get_system_memory(), FSL_IMX7_MMDC_ADDR, + machine->ram); + + for (i = 0; i < FSL_IMX7_NUM_USDHCS; i++) { + BusState *bus; + DeviceState *carddev; + DriveInfo *di; + BlockBackend *blk; + + di = drive_get_next(IF_SD); + blk = di ? blk_by_legacy_dinfo(di) : NULL; + bus = qdev_get_child_bus(DEVICE(&s->usdhc[i]), "sd-bus"); + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); + } + + if (!qtest_enabled()) { + arm_load_kernel(&s->cpu[0], machine, &boot_info); + } +} + +static void mcimx7d_sabre_machine_init(MachineClass *mc) +{ + mc->desc = "Freescale i.MX7 DUAL SABRE (Cortex-A7)"; + mc->init = mcimx7d_sabre_init; + mc->max_cpus = FSL_IMX7_NUM_CPUS; + mc->default_ram_id = "mcimx7d-sabre.ram"; +} +DEFINE_MACHINE("mcimx7d-sabre", mcimx7d_sabre_machine_init) diff --git a/hw/arm/meson.build b/hw/arm/meson.build new file mode 100644 index 000000000..721a8eb8b --- /dev/null +++ b/hw/arm/meson.build @@ -0,0 +1,62 @@ +arm_ss = ss.source_set() +arm_ss.add(files('boot.c'), fdt) +arm_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('sysbus-fdt.c')) +arm_ss.add(when: 'CONFIG_ARM_VIRT', if_true: files('virt.c')) +arm_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c')) +arm_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic_boards.c')) +arm_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4_boards.c')) +arm_ss.add(when: 'CONFIG_EMCRAFT_SF2', if_true: files('msf2-som.c')) +arm_ss.add(when: 'CONFIG_HIGHBANK', if_true: files('highbank.c')) +arm_ss.add(when: 'CONFIG_INTEGRATOR', if_true: files('integratorcp.c')) +arm_ss.add(when: 'CONFIG_MAINSTONE', if_true: files('mainstone.c')) +arm_ss.add(when: 'CONFIG_MICROBIT', if_true: files('microbit.c')) +arm_ss.add(when: 'CONFIG_MUSICPAL', if_true: files('musicpal.c')) +arm_ss.add(when: 'CONFIG_NETDUINO2', if_true: files('netduino2.c')) +arm_ss.add(when: 'CONFIG_NETDUINOPLUS2', if_true: files('netduinoplus2.c')) +arm_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx.c', 'npcm7xx_boards.c')) +arm_ss.add(when: 'CONFIG_NSERIES', if_true: files('nseries.c')) +arm_ss.add(when: 'CONFIG_SX1', if_true: files('omap_sx1.c')) +arm_ss.add(when: 'CONFIG_CHEETAH', if_true: files('palm.c')) +arm_ss.add(when: 'CONFIG_GUMSTIX', if_true: files('gumstix.c')) +arm_ss.add(when: 'CONFIG_SPITZ', if_true: files('spitz.c')) +arm_ss.add(when: 'CONFIG_TOSA', if_true: files('tosa.c')) +arm_ss.add(when: 'CONFIG_Z2', if_true: files('z2.c')) +arm_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview.c')) +arm_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa-ref.c')) +arm_ss.add(when: 'CONFIG_STELLARIS', if_true: files('stellaris.c')) +arm_ss.add(when: 'CONFIG_STM32VLDISCOVERY', if_true: files('stm32vldiscovery.c')) +arm_ss.add(when: 'CONFIG_COLLIE', if_true: files('collie.c')) +arm_ss.add(when: 'CONFIG_VERSATILE', if_true: files('versatilepb.c')) +arm_ss.add(when: 'CONFIG_VEXPRESS', if_true: files('vexpress.c')) +arm_ss.add(when: 'CONFIG_ZYNQ', if_true: files('xilinx_zynq.c')) +arm_ss.add(when: 'CONFIG_SABRELITE', if_true: files('sabrelite.c')) + +arm_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m.c')) +arm_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210.c')) +arm_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx.c', 'pxa2xx_gpio.c', 'pxa2xx_pic.c')) +arm_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic.c')) +arm_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c', 'omap2.c')) +arm_ss.add(when: 'CONFIG_STRONGARM', if_true: files('strongarm.c')) +arm_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('allwinner-a10.c', 'cubieboard.c')) +arm_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3.c', 'orangepi.c')) +arm_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_peripherals.c', 'bcm2836.c', 'raspi.c')) +arm_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c')) +arm_ss.add(when: 'CONFIG_STM32F205_SOC', if_true: files('stm32f205_soc.c')) +arm_ss.add(when: 'CONFIG_STM32F405_SOC', if_true: files('stm32f405_soc.c')) +arm_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp.c', 'xlnx-zcu102.c')) +arm_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal.c', 'xlnx-versal-virt.c')) +arm_ss.add(when: 'CONFIG_FSL_IMX25', if_true: files('fsl-imx25.c', 'imx25_pdk.c')) +arm_ss.add(when: 'CONFIG_FSL_IMX31', if_true: files('fsl-imx31.c', 'kzm.c')) +arm_ss.add(when: 'CONFIG_FSL_IMX6', if_true: files('fsl-imx6.c')) +arm_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_soc.c', 'aspeed.c', 'aspeed_ast2600.c')) +arm_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2.c')) +arm_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2-tz.c')) +arm_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-soc.c')) +arm_ss.add(when: 'CONFIG_MUSCA', if_true: files('musca.c')) +arm_ss.add(when: 'CONFIG_ARMSSE', if_true: files('armsse.c')) +arm_ss.add(when: 'CONFIG_FSL_IMX7', if_true: files('fsl-imx7.c', 'mcimx7d-sabre.c')) +arm_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c', 'smmuv3.c')) +arm_ss.add(when: 'CONFIG_FSL_IMX6UL', if_true: files('fsl-imx6ul.c', 'mcimx6ul-evk.c')) +arm_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_soc.c')) + +hw_arch += {'arm': arm_ss} diff --git a/hw/arm/microbit.c b/hw/arm/microbit.c new file mode 100644 index 000000000..e9494334c --- /dev/null +++ b/hw/arm/microbit.c @@ -0,0 +1,84 @@ +/* + * BBC micro:bit machine + * http://tech.microbit.org/hardware/ + * + * Copyright 2018 Joel Stanley + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/arm/boot.h" +#include "sysemu/sysemu.h" +#include "exec/address-spaces.h" + +#include "hw/arm/nrf51_soc.h" +#include "hw/i2c/microbit_i2c.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +struct MicrobitMachineState { + MachineState parent; + + NRF51State nrf51; + MicrobitI2CState i2c; +}; + +#define TYPE_MICROBIT_MACHINE MACHINE_TYPE_NAME("microbit") + +OBJECT_DECLARE_SIMPLE_TYPE(MicrobitMachineState, MICROBIT_MACHINE) + +static void microbit_init(MachineState *machine) +{ + MicrobitMachineState *s = MICROBIT_MACHINE(machine); + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *mr; + + object_initialize_child(OBJECT(machine), "nrf51", &s->nrf51, + TYPE_NRF51_SOC); + qdev_prop_set_chr(DEVICE(&s->nrf51), "serial0", serial_hd(0)); + object_property_set_link(OBJECT(&s->nrf51), "memory", + OBJECT(system_memory), &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(&s->nrf51), &error_fatal); + + /* + * Overlap the TWI stub device into the SoC. This is a microbit-specific + * hack until we implement the nRF51 TWI controller properly and the + * magnetometer/accelerometer devices. + */ + object_initialize_child(OBJECT(machine), "microbit.twi", &s->i2c, + TYPE_MICROBIT_I2C); + sysbus_realize(SYS_BUS_DEVICE(&s->i2c), &error_fatal); + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->i2c), 0); + memory_region_add_subregion_overlap(&s->nrf51.container, NRF51_TWI_BASE, + mr, -1); + + armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + s->nrf51.flash_size); +} + +static void microbit_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "BBC micro:bit (Cortex-M0)"; + mc->init = microbit_init; + mc->max_cpus = 1; +} + +static const TypeInfo microbit_info = { + .name = TYPE_MICROBIT_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(MicrobitMachineState), + .class_init = microbit_machine_class_init, +}; + +static void microbit_machine_init(void) +{ + type_register_static(µbit_info); +} + +type_init(microbit_machine_init); diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c new file mode 100644 index 000000000..f40e854de --- /dev/null +++ b/hw/arm/mps2-tz.c @@ -0,0 +1,1448 @@ +/* + * ARM V2M MPS2 board emulation, trustzone aware FPGA images + * + * Copyright (c) 2017 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* The MPS2 and MPS2+ dev boards are FPGA based (the 2+ has a bigger + * FPGA but is otherwise the same as the 2). Since the CPU itself + * and most of the devices are in the FPGA, the details of the board + * as seen by the guest depend significantly on the FPGA image. + * This source file covers the following FPGA images, for TrustZone cores: + * "mps2-an505" -- Cortex-M33 as documented in ARM Application Note AN505 + * "mps2-an521" -- Dual Cortex-M33 as documented in Application Note AN521 + * "mps2-an524" -- Dual Cortex-M33 as documented in Application Note AN524 + * "mps2-an547" -- Single Cortex-M55 as documented in Application Note AN547 + * + * Links to the TRM for the board itself and to the various Application + * Notes which document the FPGA images can be found here: + * https://developer.arm.com/products/system-design/development-boards/fpga-prototyping-boards/mps2 + * + * Board TRM: + * https://developer.arm.com/documentation/100112/latest/ + * Application Note AN505: + * https://developer.arm.com/documentation/dai0505/latest/ + * Application Note AN521: + * https://developer.arm.com/documentation/dai0521/latest/ + * Application Note AN524: + * https://developer.arm.com/documentation/dai0524/latest/ + * Application Note AN547: + * https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/DAI0547B_SSE300_PLUS_U55_FPGA_for_mps3.pdf + * + * The AN505 defers to the Cortex-M33 processor ARMv8M IoT Kit FVP User Guide + * (ARM ECM0601256) for the details of some of the device layout: + * https://developer.arm.com/documentation/ecm0601256/latest + * Similarly, the AN521 and AN524 use the SSE-200, and the SSE-200 TRM defines + * most of the device layout: + * https://developer.arm.com/documentation/101104/latest/ + * and the AN547 uses the SSE-300, whose layout is in the SSE-300 TRM: + * https://developer.arm.com/documentation/101773/latest/ + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/arm/boot.h" +#include "hw/arm/armv7m.h" +#include "hw/or-irq.h" +#include "hw/boards.h" +#include "exec/address-spaces.h" +#include "sysemu/sysemu.h" +#include "sysemu/reset.h" +#include "hw/misc/unimp.h" +#include "hw/char/cmsdk-apb-uart.h" +#include "hw/timer/cmsdk-apb-timer.h" +#include "hw/misc/mps2-scc.h" +#include "hw/misc/mps2-fpgaio.h" +#include "hw/misc/tz-mpc.h" +#include "hw/misc/tz-msc.h" +#include "hw/arm/armsse.h" +#include "hw/dma/pl080.h" +#include "hw/rtc/pl031.h" +#include "hw/ssi/pl022.h" +#include "hw/i2c/arm_sbcon_i2c.h" +#include "hw/net/lan9118.h" +#include "net/net.h" +#include "hw/core/split-irq.h" +#include "hw/qdev-clock.h" +#include "qom/object.h" +#include "hw/irq.h" + +#define MPS2TZ_NUMIRQ_MAX 96 +#define MPS2TZ_RAM_MAX 5 + +typedef enum MPS2TZFPGAType { + FPGA_AN505, + FPGA_AN521, + FPGA_AN524, + FPGA_AN547, +} MPS2TZFPGAType; + +/* + * Define the layout of RAM in a board, including which parts are + * behind which MPCs. + * mrindex specifies the index into mms->ram[] to use for the backing RAM; + * -1 means "use the system RAM". + */ +typedef struct RAMInfo { + const char *name; + uint32_t base; + uint32_t size; + int mpc; /* MPC number, -1 for "not behind an MPC" */ + int mrindex; + int flags; +} RAMInfo; + +/* + * Flag values: + * IS_ALIAS: this RAM area is an alias to the upstream end of the + * MPC specified by its .mpc value + * IS_ROM: this RAM area is read-only + */ +#define IS_ALIAS 1 +#define IS_ROM 2 + +struct MPS2TZMachineClass { + MachineClass parent; + MPS2TZFPGAType fpga_type; + uint32_t scc_id; + uint32_t sysclk_frq; /* Main SYSCLK frequency in Hz */ + uint32_t apb_periph_frq; /* APB peripheral frequency in Hz */ + uint32_t len_oscclk; + const uint32_t *oscclk; + uint32_t fpgaio_num_leds; /* Number of LEDs in FPGAIO LED0 register */ + bool fpgaio_has_switches; /* Does FPGAIO have SWITCH register? */ + bool fpgaio_has_dbgctrl; /* Does FPGAIO have DBGCTRL register? */ + int numirq; /* Number of external interrupts */ + int uart_overflow_irq; /* number of the combined UART overflow IRQ */ + uint32_t init_svtor; /* init-svtor setting for SSE */ + uint32_t sram_addr_width; /* SRAM_ADDR_WIDTH setting for SSE */ + const RAMInfo *raminfo; + const char *armsse_type; + uint32_t boot_ram_size; /* size of ram at address 0; 0 == find in raminfo */ +}; + +struct MPS2TZMachineState { + MachineState parent; + + ARMSSE iotkit; + MemoryRegion ram[MPS2TZ_RAM_MAX]; + MemoryRegion eth_usb_container; + + MPS2SCC scc; + MPS2FPGAIO fpgaio; + TZPPC ppc[5]; + TZMPC mpc[3]; + PL022State spi[5]; + ArmSbconI2CState i2c[5]; + UnimplementedDeviceState i2s_audio; + UnimplementedDeviceState gpio[4]; + UnimplementedDeviceState gfx; + UnimplementedDeviceState cldc; + UnimplementedDeviceState usb; + PL031State rtc; + PL080State dma[4]; + TZMSC msc[4]; + CMSDKAPBUART uart[6]; + SplitIRQ sec_resp_splitter; + qemu_or_irq uart_irq_orgate; + DeviceState *lan9118; + SplitIRQ cpu_irq_splitter[MPS2TZ_NUMIRQ_MAX]; + Clock *sysclk; + Clock *s32kclk; + + bool remap; + qemu_irq remap_irq; +}; + +#define TYPE_MPS2TZ_MACHINE "mps2tz" +#define TYPE_MPS2TZ_AN505_MACHINE MACHINE_TYPE_NAME("mps2-an505") +#define TYPE_MPS2TZ_AN521_MACHINE MACHINE_TYPE_NAME("mps2-an521") +#define TYPE_MPS3TZ_AN524_MACHINE MACHINE_TYPE_NAME("mps3-an524") +#define TYPE_MPS3TZ_AN547_MACHINE MACHINE_TYPE_NAME("mps3-an547") + +OBJECT_DECLARE_TYPE(MPS2TZMachineState, MPS2TZMachineClass, MPS2TZ_MACHINE) + +/* Slow 32Khz S32KCLK frequency in Hz */ +#define S32KCLK_FRQ (32 * 1000) + +/* + * The MPS3 DDR is 2GiB, but on a 32-bit host QEMU doesn't permit + * emulation of that much guest RAM, so artificially make it smaller. + */ +#if HOST_LONG_BITS == 32 +#define MPS3_DDR_SIZE (1 * GiB) +#else +#define MPS3_DDR_SIZE (2 * GiB) +#endif + +static const uint32_t an505_oscclk[] = { + 40000000, + 24580000, + 25000000, +}; + +static const uint32_t an524_oscclk[] = { + 24000000, + 32000000, + 50000000, + 50000000, + 24576000, + 23750000, +}; + +static const RAMInfo an505_raminfo[] = { { + .name = "ssram-0", + .base = 0x00000000, + .size = 0x00400000, + .mpc = 0, + .mrindex = 0, + }, { + .name = "ssram-1", + .base = 0x28000000, + .size = 0x00200000, + .mpc = 1, + .mrindex = 1, + }, { + .name = "ssram-2", + .base = 0x28200000, + .size = 0x00200000, + .mpc = 2, + .mrindex = 2, + }, { + .name = "ssram-0-alias", + .base = 0x00400000, + .size = 0x00400000, + .mpc = 0, + .mrindex = 3, + .flags = IS_ALIAS, + }, { + /* Use the largest bit of contiguous RAM as our "system memory" */ + .name = "mps.ram", + .base = 0x80000000, + .size = 16 * MiB, + .mpc = -1, + .mrindex = -1, + }, { + .name = NULL, + }, +}; + +/* + * Note that the addresses and MPC numbering here should match up + * with those used in remap_memory(), which can swap the BRAM and QSPI. + */ +static const RAMInfo an524_raminfo[] = { { + .name = "bram", + .base = 0x00000000, + .size = 512 * KiB, + .mpc = 0, + .mrindex = 0, + }, { + /* We don't model QSPI flash yet; for now expose it as simple ROM */ + .name = "QSPI", + .base = 0x28000000, + .size = 8 * MiB, + .mpc = 1, + .mrindex = 1, + .flags = IS_ROM, + }, { + .name = "DDR", + .base = 0x60000000, + .size = MPS3_DDR_SIZE, + .mpc = 2, + .mrindex = -1, + }, { + .name = NULL, + }, +}; + +static const RAMInfo an547_raminfo[] = { { + .name = "sram", + .base = 0x01000000, + .size = 2 * MiB, + .mpc = 0, + .mrindex = 1, + }, { + .name = "sram 2", + .base = 0x21000000, + .size = 4 * MiB, + .mpc = -1, + .mrindex = 3, + }, { + /* We don't model QSPI flash yet; for now expose it as simple ROM */ + .name = "QSPI", + .base = 0x28000000, + .size = 8 * MiB, + .mpc = 1, + .mrindex = 4, + .flags = IS_ROM, + }, { + .name = "DDR", + .base = 0x60000000, + .size = MPS3_DDR_SIZE, + .mpc = 2, + .mrindex = -1, + }, { + .name = NULL, + }, +}; + +static const RAMInfo *find_raminfo_for_mpc(MPS2TZMachineState *mms, int mpc) +{ + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); + const RAMInfo *p; + const RAMInfo *found = NULL; + + for (p = mmc->raminfo; p->name; p++) { + if (p->mpc == mpc && !(p->flags & IS_ALIAS)) { + /* There should only be one entry in the array for this MPC */ + g_assert(!found); + found = p; + } + } + /* if raminfo array doesn't have an entry for each MPC this is a bug */ + assert(found); + return found; +} + +static MemoryRegion *mr_for_raminfo(MPS2TZMachineState *mms, + const RAMInfo *raminfo) +{ + /* Return an initialized MemoryRegion for the RAMInfo. */ + MemoryRegion *ram; + + if (raminfo->mrindex < 0) { + /* Means this RAMInfo is for QEMU's "system memory" */ + MachineState *machine = MACHINE(mms); + assert(!(raminfo->flags & IS_ROM)); + return machine->ram; + } + + assert(raminfo->mrindex < MPS2TZ_RAM_MAX); + ram = &mms->ram[raminfo->mrindex]; + + memory_region_init_ram(ram, NULL, raminfo->name, + raminfo->size, &error_fatal); + if (raminfo->flags & IS_ROM) { + memory_region_set_readonly(ram, true); + } + return ram; +} + +/* Create an alias of an entire original MemoryRegion @orig + * located at @base in the memory map. + */ +static void make_ram_alias(MemoryRegion *mr, const char *name, + MemoryRegion *orig, hwaddr base) +{ + memory_region_init_alias(mr, NULL, name, orig, 0, + memory_region_size(orig)); + memory_region_add_subregion(get_system_memory(), base, mr); +} + +static qemu_irq get_sse_irq_in(MPS2TZMachineState *mms, int irqno) +{ + /* + * Return a qemu_irq which will signal IRQ n to all CPUs in the + * SSE. The irqno should be as the CPU sees it, so the first + * external-to-the-SSE interrupt is 32. + */ + MachineClass *mc = MACHINE_GET_CLASS(mms); + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); + + assert(irqno >= 32 && irqno < (mmc->numirq + 32)); + + /* + * Convert from "CPU irq number" (as listed in the FPGA image + * documentation) to the SSE external-interrupt number. + */ + irqno -= 32; + + if (mc->max_cpus > 1) { + return qdev_get_gpio_in(DEVICE(&mms->cpu_irq_splitter[irqno]), 0); + } else { + return qdev_get_gpio_in_named(DEVICE(&mms->iotkit), "EXP_IRQ", irqno); + } +} + +/* Union describing the device-specific extra data we pass to the devfn. */ +typedef union PPCExtraData { + bool i2c_internal; +} PPCExtraData; + +/* Most of the devices in the AN505 FPGA image sit behind + * Peripheral Protection Controllers. These data structures + * define the layout of which devices sit behind which PPCs. + * The devfn for each port is a function which creates, configures + * and initializes the device, returning the MemoryRegion which + * needs to be plugged into the downstream end of the PPC port. + */ +typedef MemoryRegion *MakeDevFn(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, + const PPCExtraData *extradata); + +typedef struct PPCPortInfo { + const char *name; + MakeDevFn *devfn; + void *opaque; + hwaddr addr; + hwaddr size; + int irqs[3]; /* currently no device needs more IRQ lines than this */ + PPCExtraData extradata; /* to pass device-specific info to the devfn */ +} PPCPortInfo; + +typedef struct PPCInfo { + const char *name; + PPCPortInfo ports[TZ_NUM_PORTS]; +} PPCInfo; + +static MemoryRegion *make_unimp_dev(MPS2TZMachineState *mms, + void *opaque, + const char *name, hwaddr size, + const int *irqs, + const PPCExtraData *extradata) +{ + /* Initialize, configure and realize a TYPE_UNIMPLEMENTED_DEVICE, + * and return a pointer to its MemoryRegion. + */ + UnimplementedDeviceState *uds = opaque; + + object_initialize_child(OBJECT(mms), name, uds, TYPE_UNIMPLEMENTED_DEVICE); + qdev_prop_set_string(DEVICE(uds), "name", name); + qdev_prop_set_uint64(DEVICE(uds), "size", size); + sysbus_realize(SYS_BUS_DEVICE(uds), &error_fatal); + return sysbus_mmio_get_region(SYS_BUS_DEVICE(uds), 0); +} + +static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, const PPCExtraData *extradata) +{ + /* The irq[] array is tx, rx, combined, in that order */ + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); + CMSDKAPBUART *uart = opaque; + int i = uart - &mms->uart[0]; + SysBusDevice *s; + DeviceState *orgate_dev = DEVICE(&mms->uart_irq_orgate); + + object_initialize_child(OBJECT(mms), name, uart, TYPE_CMSDK_APB_UART); + qdev_prop_set_chr(DEVICE(uart), "chardev", serial_hd(i)); + qdev_prop_set_uint32(DEVICE(uart), "pclk-frq", mmc->apb_periph_frq); + sysbus_realize(SYS_BUS_DEVICE(uart), &error_fatal); + s = SYS_BUS_DEVICE(uart); + sysbus_connect_irq(s, 0, get_sse_irq_in(mms, irqs[0])); + sysbus_connect_irq(s, 1, get_sse_irq_in(mms, irqs[1])); + sysbus_connect_irq(s, 2, qdev_get_gpio_in(orgate_dev, i * 2)); + sysbus_connect_irq(s, 3, qdev_get_gpio_in(orgate_dev, i * 2 + 1)); + sysbus_connect_irq(s, 4, get_sse_irq_in(mms, irqs[2])); + return sysbus_mmio_get_region(SYS_BUS_DEVICE(uart), 0); +} + +static MemoryRegion *make_scc(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, const PPCExtraData *extradata) +{ + MPS2SCC *scc = opaque; + DeviceState *sccdev; + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); + uint32_t i; + + object_initialize_child(OBJECT(mms), "scc", scc, TYPE_MPS2_SCC); + sccdev = DEVICE(scc); + qdev_prop_set_uint32(sccdev, "scc-cfg0", mms->remap ? 1 : 0); + qdev_prop_set_uint32(sccdev, "scc-cfg4", 0x2); + qdev_prop_set_uint32(sccdev, "scc-aid", 0x00200008); + qdev_prop_set_uint32(sccdev, "scc-id", mmc->scc_id); + qdev_prop_set_uint32(sccdev, "len-oscclk", mmc->len_oscclk); + for (i = 0; i < mmc->len_oscclk; i++) { + g_autofree char *propname = g_strdup_printf("oscclk[%u]", i); + qdev_prop_set_uint32(sccdev, propname, mmc->oscclk[i]); + } + sysbus_realize(SYS_BUS_DEVICE(scc), &error_fatal); + return sysbus_mmio_get_region(SYS_BUS_DEVICE(sccdev), 0); +} + +static MemoryRegion *make_fpgaio(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, const PPCExtraData *extradata) +{ + MPS2FPGAIO *fpgaio = opaque; + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); + + object_initialize_child(OBJECT(mms), "fpgaio", fpgaio, TYPE_MPS2_FPGAIO); + qdev_prop_set_uint32(DEVICE(fpgaio), "num-leds", mmc->fpgaio_num_leds); + qdev_prop_set_bit(DEVICE(fpgaio), "has-switches", mmc->fpgaio_has_switches); + qdev_prop_set_bit(DEVICE(fpgaio), "has-dbgctrl", mmc->fpgaio_has_dbgctrl); + sysbus_realize(SYS_BUS_DEVICE(fpgaio), &error_fatal); + return sysbus_mmio_get_region(SYS_BUS_DEVICE(fpgaio), 0); +} + +static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, + const PPCExtraData *extradata) +{ + SysBusDevice *s; + NICInfo *nd = &nd_table[0]; + + /* In hardware this is a LAN9220; the LAN9118 is software compatible + * except that it doesn't support the checksum-offload feature. + */ + qemu_check_nic_model(nd, "lan9118"); + mms->lan9118 = qdev_new(TYPE_LAN9118); + qdev_set_nic_properties(mms->lan9118, nd); + + s = SYS_BUS_DEVICE(mms->lan9118); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, get_sse_irq_in(mms, irqs[0])); + return sysbus_mmio_get_region(s, 0); +} + +static MemoryRegion *make_eth_usb(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, + const PPCExtraData *extradata) +{ + /* + * The AN524 makes the ethernet and USB share a PPC port. + * irqs[] is the ethernet IRQ. + */ + SysBusDevice *s; + NICInfo *nd = &nd_table[0]; + + memory_region_init(&mms->eth_usb_container, OBJECT(mms), + "mps2-tz-eth-usb-container", 0x200000); + + /* + * In hardware this is a LAN9220; the LAN9118 is software compatible + * except that it doesn't support the checksum-offload feature. + */ + qemu_check_nic_model(nd, "lan9118"); + mms->lan9118 = qdev_new(TYPE_LAN9118); + qdev_set_nic_properties(mms->lan9118, nd); + + s = SYS_BUS_DEVICE(mms->lan9118); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, get_sse_irq_in(mms, irqs[0])); + + memory_region_add_subregion(&mms->eth_usb_container, + 0, sysbus_mmio_get_region(s, 0)); + + /* The USB OTG controller is an ISP1763; we don't have a model of it. */ + object_initialize_child(OBJECT(mms), "usb-otg", + &mms->usb, TYPE_UNIMPLEMENTED_DEVICE); + qdev_prop_set_string(DEVICE(&mms->usb), "name", "usb-otg"); + qdev_prop_set_uint64(DEVICE(&mms->usb), "size", 0x100000); + s = SYS_BUS_DEVICE(&mms->usb); + sysbus_realize(s, &error_fatal); + + memory_region_add_subregion(&mms->eth_usb_container, + 0x100000, sysbus_mmio_get_region(s, 0)); + + return &mms->eth_usb_container; +} + +static MemoryRegion *make_mpc(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, const PPCExtraData *extradata) +{ + TZMPC *mpc = opaque; + int i = mpc - &mms->mpc[0]; + MemoryRegion *upstream; + const RAMInfo *raminfo = find_raminfo_for_mpc(mms, i); + MemoryRegion *ram = mr_for_raminfo(mms, raminfo); + + object_initialize_child(OBJECT(mms), name, mpc, TYPE_TZ_MPC); + object_property_set_link(OBJECT(mpc), "downstream", OBJECT(ram), + &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(mpc), &error_fatal); + /* Map the upstream end of the MPC into system memory */ + upstream = sysbus_mmio_get_region(SYS_BUS_DEVICE(mpc), 1); + memory_region_add_subregion(get_system_memory(), raminfo->base, upstream); + /* and connect its interrupt to the IoTKit */ + qdev_connect_gpio_out_named(DEVICE(mpc), "irq", 0, + qdev_get_gpio_in_named(DEVICE(&mms->iotkit), + "mpcexp_status", i)); + + /* Return the register interface MR for our caller to map behind the PPC */ + return sysbus_mmio_get_region(SYS_BUS_DEVICE(mpc), 0); +} + +static hwaddr boot_mem_base(MPS2TZMachineState *mms) +{ + /* + * Return the canonical address of the block which will be mapped + * at address 0x0 (i.e. where the vector table is). + * This is usually 0, but if the AN524 alternate memory map is + * enabled it will be the base address of the QSPI block. + */ + return mms->remap ? 0x28000000 : 0; +} + +static void remap_memory(MPS2TZMachineState *mms, int map) +{ + /* + * Remap the memory for the AN524. 'map' is the value of + * SCC CFG_REG0 bit 0, i.e. 0 for the default map and 1 + * for the "option 1" mapping where QSPI is at address 0. + * + * Effectively we need to swap around the "upstream" ends of + * MPC 0 and MPC 1. + */ + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); + int i; + + if (mmc->fpga_type != FPGA_AN524) { + return; + } + + memory_region_transaction_begin(); + for (i = 0; i < 2; i++) { + TZMPC *mpc = &mms->mpc[i]; + MemoryRegion *upstream = sysbus_mmio_get_region(SYS_BUS_DEVICE(mpc), 1); + hwaddr addr = (i ^ map) ? 0x28000000 : 0; + + memory_region_set_address(upstream, addr); + } + memory_region_transaction_commit(); +} + +static void remap_irq_fn(void *opaque, int n, int level) +{ + MPS2TZMachineState *mms = opaque; + + remap_memory(mms, level); +} + +static MemoryRegion *make_dma(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, const PPCExtraData *extradata) +{ + /* The irq[] array is DMACINTR, DMACINTERR, DMACINTTC, in that order */ + PL080State *dma = opaque; + int i = dma - &mms->dma[0]; + SysBusDevice *s; + char *mscname = g_strdup_printf("%s-msc", name); + TZMSC *msc = &mms->msc[i]; + DeviceState *iotkitdev = DEVICE(&mms->iotkit); + MemoryRegion *msc_upstream; + MemoryRegion *msc_downstream; + + /* + * Each DMA device is a PL081 whose transaction master interface + * is guarded by a Master Security Controller. The downstream end of + * the MSC connects to the IoTKit AHB Slave Expansion port, so the + * DMA devices can see all devices and memory that the CPU does. + */ + object_initialize_child(OBJECT(mms), mscname, msc, TYPE_TZ_MSC); + msc_downstream = sysbus_mmio_get_region(SYS_BUS_DEVICE(&mms->iotkit), 0); + object_property_set_link(OBJECT(msc), "downstream", + OBJECT(msc_downstream), &error_fatal); + object_property_set_link(OBJECT(msc), "idau", OBJECT(mms), &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(msc), &error_fatal); + + qdev_connect_gpio_out_named(DEVICE(msc), "irq", 0, + qdev_get_gpio_in_named(iotkitdev, + "mscexp_status", i)); + qdev_connect_gpio_out_named(iotkitdev, "mscexp_clear", i, + qdev_get_gpio_in_named(DEVICE(msc), + "irq_clear", 0)); + qdev_connect_gpio_out_named(iotkitdev, "mscexp_ns", i, + qdev_get_gpio_in_named(DEVICE(msc), + "cfg_nonsec", 0)); + qdev_connect_gpio_out(DEVICE(&mms->sec_resp_splitter), + ARRAY_SIZE(mms->ppc) + i, + qdev_get_gpio_in_named(DEVICE(msc), + "cfg_sec_resp", 0)); + msc_upstream = sysbus_mmio_get_region(SYS_BUS_DEVICE(msc), 0); + + object_initialize_child(OBJECT(mms), name, dma, TYPE_PL081); + object_property_set_link(OBJECT(dma), "downstream", OBJECT(msc_upstream), + &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(dma), &error_fatal); + + s = SYS_BUS_DEVICE(dma); + /* Wire up DMACINTR, DMACINTERR, DMACINTTC */ + sysbus_connect_irq(s, 0, get_sse_irq_in(mms, irqs[0])); + sysbus_connect_irq(s, 1, get_sse_irq_in(mms, irqs[1])); + sysbus_connect_irq(s, 2, get_sse_irq_in(mms, irqs[2])); + + g_free(mscname); + return sysbus_mmio_get_region(s, 0); +} + +static MemoryRegion *make_spi(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, const PPCExtraData *extradata) +{ + /* + * The AN505 has five PL022 SPI controllers. + * One of these should have the LCD controller behind it; the others + * are connected only to the FPGA's "general purpose SPI connector" + * or "shield" expansion connectors. + * Note that if we do implement devices behind SPI, the chip select + * lines are set via the "MISC" register in the MPS2 FPGAIO device. + */ + PL022State *spi = opaque; + SysBusDevice *s; + + object_initialize_child(OBJECT(mms), name, spi, TYPE_PL022); + sysbus_realize(SYS_BUS_DEVICE(spi), &error_fatal); + s = SYS_BUS_DEVICE(spi); + sysbus_connect_irq(s, 0, get_sse_irq_in(mms, irqs[0])); + return sysbus_mmio_get_region(s, 0); +} + +static MemoryRegion *make_i2c(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, const PPCExtraData *extradata) +{ + ArmSbconI2CState *i2c = opaque; + SysBusDevice *s; + + object_initialize_child(OBJECT(mms), name, i2c, TYPE_ARM_SBCON_I2C); + s = SYS_BUS_DEVICE(i2c); + sysbus_realize(s, &error_fatal); + + /* + * If this is an internal-use-only i2c bus, mark it full + * so that user-created i2c devices are not plugged into it. + * If we implement models of any on-board i2c devices that + * plug in to one of the internal-use-only buses, then we will + * need to create and plugging those in here before we mark the + * bus as full. + */ + if (extradata->i2c_internal) { + BusState *qbus = qdev_get_child_bus(DEVICE(i2c), "i2c"); + qbus_mark_full(qbus); + } + + return sysbus_mmio_get_region(s, 0); +} + +static MemoryRegion *make_rtc(MPS2TZMachineState *mms, void *opaque, + const char *name, hwaddr size, + const int *irqs, const PPCExtraData *extradata) +{ + PL031State *pl031 = opaque; + SysBusDevice *s; + + object_initialize_child(OBJECT(mms), name, pl031, TYPE_PL031); + s = SYS_BUS_DEVICE(pl031); + sysbus_realize(s, &error_fatal); + /* + * The board docs don't give an IRQ number for the PL031, so + * presumably it is not connected. + */ + return sysbus_mmio_get_region(s, 0); +} + +static void create_non_mpc_ram(MPS2TZMachineState *mms) +{ + /* + * Handle the RAMs which are either not behind MPCs or which are + * aliases to another MPC. + */ + const RAMInfo *p; + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); + + for (p = mmc->raminfo; p->name; p++) { + if (p->flags & IS_ALIAS) { + SysBusDevice *mpc_sbd = SYS_BUS_DEVICE(&mms->mpc[p->mpc]); + MemoryRegion *upstream = sysbus_mmio_get_region(mpc_sbd, 1); + make_ram_alias(&mms->ram[p->mrindex], p->name, upstream, p->base); + } else if (p->mpc == -1) { + /* RAM not behind an MPC */ + MemoryRegion *mr = mr_for_raminfo(mms, p); + memory_region_add_subregion(get_system_memory(), p->base, mr); + } + } +} + +static uint32_t boot_ram_size(MPS2TZMachineState *mms) +{ + /* Return the size of the RAM block at guest address zero */ + const RAMInfo *p; + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); + + /* + * Use a per-board specification (for when the boot RAM is in + * the SSE and so doesn't have a RAMInfo list entry) + */ + if (mmc->boot_ram_size) { + return mmc->boot_ram_size; + } + + for (p = mmc->raminfo; p->name; p++) { + if (p->base == boot_mem_base(mms)) { + return p->size; + } + } + g_assert_not_reached(); +} + +static void mps2tz_common_init(MachineState *machine) +{ + MPS2TZMachineState *mms = MPS2TZ_MACHINE(machine); + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms); + MachineClass *mc = MACHINE_GET_CLASS(machine); + MemoryRegion *system_memory = get_system_memory(); + DeviceState *iotkitdev; + DeviceState *dev_splitter; + const PPCInfo *ppcs; + int num_ppcs; + int i; + + if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { + error_report("This board can only be used with CPU %s", + mc->default_cpu_type); + exit(1); + } + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + /* These clocks don't need migration because they are fixed-frequency */ + mms->sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(mms->sysclk, mmc->sysclk_frq); + mms->s32kclk = clock_new(OBJECT(machine), "S32KCLK"); + clock_set_hz(mms->s32kclk, S32KCLK_FRQ); + + object_initialize_child(OBJECT(machine), TYPE_IOTKIT, &mms->iotkit, + mmc->armsse_type); + iotkitdev = DEVICE(&mms->iotkit); + object_property_set_link(OBJECT(&mms->iotkit), "memory", + OBJECT(system_memory), &error_abort); + qdev_prop_set_uint32(iotkitdev, "EXP_NUMIRQ", mmc->numirq); + qdev_prop_set_uint32(iotkitdev, "init-svtor", mmc->init_svtor); + qdev_prop_set_uint32(iotkitdev, "SRAM_ADDR_WIDTH", mmc->sram_addr_width); + qdev_connect_clock_in(iotkitdev, "MAINCLK", mms->sysclk); + qdev_connect_clock_in(iotkitdev, "S32KCLK", mms->s32kclk); + sysbus_realize(SYS_BUS_DEVICE(&mms->iotkit), &error_fatal); + + /* + * If this board has more than one CPU, then we need to create splitters + * to feed the IRQ inputs for each CPU in the SSE from each device in the + * board. If there is only one CPU, we can just wire the device IRQ + * directly to the SSE's IRQ input. + */ + assert(mmc->numirq <= MPS2TZ_NUMIRQ_MAX); + if (mc->max_cpus > 1) { + for (i = 0; i < mmc->numirq; i++) { + char *name = g_strdup_printf("mps2-irq-splitter%d", i); + SplitIRQ *splitter = &mms->cpu_irq_splitter[i]; + + object_initialize_child_with_props(OBJECT(machine), name, + splitter, sizeof(*splitter), + TYPE_SPLIT_IRQ, &error_fatal, + NULL); + g_free(name); + + object_property_set_int(OBJECT(splitter), "num-lines", 2, + &error_fatal); + qdev_realize(DEVICE(splitter), NULL, &error_fatal); + qdev_connect_gpio_out(DEVICE(splitter), 0, + qdev_get_gpio_in_named(DEVICE(&mms->iotkit), + "EXP_IRQ", i)); + qdev_connect_gpio_out(DEVICE(splitter), 1, + qdev_get_gpio_in_named(DEVICE(&mms->iotkit), + "EXP_CPU1_IRQ", i)); + } + } + + /* The sec_resp_cfg output from the IoTKit must be split into multiple + * lines, one for each of the PPCs we create here, plus one per MSC. + */ + object_initialize_child(OBJECT(machine), "sec-resp-splitter", + &mms->sec_resp_splitter, TYPE_SPLIT_IRQ); + object_property_set_int(OBJECT(&mms->sec_resp_splitter), "num-lines", + ARRAY_SIZE(mms->ppc) + ARRAY_SIZE(mms->msc), + &error_fatal); + qdev_realize(DEVICE(&mms->sec_resp_splitter), NULL, &error_fatal); + dev_splitter = DEVICE(&mms->sec_resp_splitter); + qdev_connect_gpio_out_named(iotkitdev, "sec_resp_cfg", 0, + qdev_get_gpio_in(dev_splitter, 0)); + + /* + * The IoTKit sets up much of the memory layout, including + * the aliases between secure and non-secure regions in the + * address space, and also most of the devices in the system. + * The FPGA itself contains various RAMs and some additional devices. + * The FPGA images have an odd combination of different RAMs, + * because in hardware they are different implementations and + * connected to different buses, giving varying performance/size + * tradeoffs. For QEMU they're all just RAM, though. We arbitrarily + * call the largest lump our "system memory". + */ + + /* + * The overflow IRQs for all UARTs are ORed together. + * Tx, Rx and "combined" IRQs are sent to the NVIC separately. + * Create the OR gate for this: it has one input for the TX overflow + * and one for the RX overflow for each UART we might have. + * (If the board has fewer than the maximum possible number of UARTs + * those inputs are never wired up and are treated as always-zero.) + */ + object_initialize_child(OBJECT(mms), "uart-irq-orgate", + &mms->uart_irq_orgate, TYPE_OR_IRQ); + object_property_set_int(OBJECT(&mms->uart_irq_orgate), "num-lines", + 2 * ARRAY_SIZE(mms->uart), + &error_fatal); + qdev_realize(DEVICE(&mms->uart_irq_orgate), NULL, &error_fatal); + qdev_connect_gpio_out(DEVICE(&mms->uart_irq_orgate), 0, + get_sse_irq_in(mms, mmc->uart_overflow_irq)); + + /* Most of the devices in the FPGA are behind Peripheral Protection + * Controllers. The required order for initializing things is: + * + initialize the PPC + * + initialize, configure and realize downstream devices + * + connect downstream device MemoryRegions to the PPC + * + realize the PPC + * + map the PPC's MemoryRegions to the places in the address map + * where the downstream devices should appear + * + wire up the PPC's control lines to the IoTKit object + */ + + const PPCInfo an505_ppcs[] = { { + .name = "apb_ppcexp0", + .ports = { + { "ssram-0-mpc", make_mpc, &mms->mpc[0], 0x58007000, 0x1000 }, + { "ssram-1-mpc", make_mpc, &mms->mpc[1], 0x58008000, 0x1000 }, + { "ssram-2-mpc", make_mpc, &mms->mpc[2], 0x58009000, 0x1000 }, + }, + }, { + .name = "apb_ppcexp1", + .ports = { + { "spi0", make_spi, &mms->spi[0], 0x40205000, 0x1000, { 51 } }, + { "spi1", make_spi, &mms->spi[1], 0x40206000, 0x1000, { 52 } }, + { "spi2", make_spi, &mms->spi[2], 0x40209000, 0x1000, { 53 } }, + { "spi3", make_spi, &mms->spi[3], 0x4020a000, 0x1000, { 54 } }, + { "spi4", make_spi, &mms->spi[4], 0x4020b000, 0x1000, { 55 } }, + { "uart0", make_uart, &mms->uart[0], 0x40200000, 0x1000, { 32, 33, 42 } }, + { "uart1", make_uart, &mms->uart[1], 0x40201000, 0x1000, { 34, 35, 43 } }, + { "uart2", make_uart, &mms->uart[2], 0x40202000, 0x1000, { 36, 37, 44 } }, + { "uart3", make_uart, &mms->uart[3], 0x40203000, 0x1000, { 38, 39, 45 } }, + { "uart4", make_uart, &mms->uart[4], 0x40204000, 0x1000, { 40, 41, 46 } }, + { "i2c0", make_i2c, &mms->i2c[0], 0x40207000, 0x1000, {}, + { .i2c_internal = true /* touchscreen */ } }, + { "i2c1", make_i2c, &mms->i2c[1], 0x40208000, 0x1000, {}, + { .i2c_internal = true /* audio conf */ } }, + { "i2c2", make_i2c, &mms->i2c[2], 0x4020c000, 0x1000, {}, + { .i2c_internal = false /* shield 0 */ } }, + { "i2c3", make_i2c, &mms->i2c[3], 0x4020d000, 0x1000, {}, + { .i2c_internal = false /* shield 1 */ } }, + }, + }, { + .name = "apb_ppcexp2", + .ports = { + { "scc", make_scc, &mms->scc, 0x40300000, 0x1000 }, + { "i2s-audio", make_unimp_dev, &mms->i2s_audio, + 0x40301000, 0x1000 }, + { "fpgaio", make_fpgaio, &mms->fpgaio, 0x40302000, 0x1000 }, + }, + }, { + .name = "ahb_ppcexp0", + .ports = { + { "gfx", make_unimp_dev, &mms->gfx, 0x41000000, 0x140000 }, + { "gpio0", make_unimp_dev, &mms->gpio[0], 0x40100000, 0x1000 }, + { "gpio1", make_unimp_dev, &mms->gpio[1], 0x40101000, 0x1000 }, + { "gpio2", make_unimp_dev, &mms->gpio[2], 0x40102000, 0x1000 }, + { "gpio3", make_unimp_dev, &mms->gpio[3], 0x40103000, 0x1000 }, + { "eth", make_eth_dev, NULL, 0x42000000, 0x100000, { 48 } }, + }, + }, { + .name = "ahb_ppcexp1", + .ports = { + { "dma0", make_dma, &mms->dma[0], 0x40110000, 0x1000, { 58, 56, 57 } }, + { "dma1", make_dma, &mms->dma[1], 0x40111000, 0x1000, { 61, 59, 60 } }, + { "dma2", make_dma, &mms->dma[2], 0x40112000, 0x1000, { 64, 62, 63 } }, + { "dma3", make_dma, &mms->dma[3], 0x40113000, 0x1000, { 67, 65, 66 } }, + }, + }, + }; + + const PPCInfo an524_ppcs[] = { { + .name = "apb_ppcexp0", + .ports = { + { "bram-mpc", make_mpc, &mms->mpc[0], 0x58007000, 0x1000 }, + { "qspi-mpc", make_mpc, &mms->mpc[1], 0x58008000, 0x1000 }, + { "ddr-mpc", make_mpc, &mms->mpc[2], 0x58009000, 0x1000 }, + }, + }, { + .name = "apb_ppcexp1", + .ports = { + { "i2c0", make_i2c, &mms->i2c[0], 0x41200000, 0x1000, {}, + { .i2c_internal = true /* touchscreen */ } }, + { "i2c1", make_i2c, &mms->i2c[1], 0x41201000, 0x1000, {}, + { .i2c_internal = true /* audio conf */ } }, + { "spi0", make_spi, &mms->spi[0], 0x41202000, 0x1000, { 52 } }, + { "spi1", make_spi, &mms->spi[1], 0x41203000, 0x1000, { 53 } }, + { "spi2", make_spi, &mms->spi[2], 0x41204000, 0x1000, { 54 } }, + { "i2c2", make_i2c, &mms->i2c[2], 0x41205000, 0x1000, {}, + { .i2c_internal = false /* shield 0 */ } }, + { "i2c3", make_i2c, &mms->i2c[3], 0x41206000, 0x1000, {}, + { .i2c_internal = false /* shield 1 */ } }, + { /* port 7 reserved */ }, + { "i2c4", make_i2c, &mms->i2c[4], 0x41208000, 0x1000, {}, + { .i2c_internal = true /* DDR4 EEPROM */ } }, + }, + }, { + .name = "apb_ppcexp2", + .ports = { + { "scc", make_scc, &mms->scc, 0x41300000, 0x1000 }, + { "i2s-audio", make_unimp_dev, &mms->i2s_audio, + 0x41301000, 0x1000 }, + { "fpgaio", make_fpgaio, &mms->fpgaio, 0x41302000, 0x1000 }, + { "uart0", make_uart, &mms->uart[0], 0x41303000, 0x1000, { 32, 33, 42 } }, + { "uart1", make_uart, &mms->uart[1], 0x41304000, 0x1000, { 34, 35, 43 } }, + { "uart2", make_uart, &mms->uart[2], 0x41305000, 0x1000, { 36, 37, 44 } }, + { "uart3", make_uart, &mms->uart[3], 0x41306000, 0x1000, { 38, 39, 45 } }, + { "uart4", make_uart, &mms->uart[4], 0x41307000, 0x1000, { 40, 41, 46 } }, + { "uart5", make_uart, &mms->uart[5], 0x41308000, 0x1000, { 124, 125, 126 } }, + + { /* port 9 reserved */ }, + { "clcd", make_unimp_dev, &mms->cldc, 0x4130a000, 0x1000 }, + { "rtc", make_rtc, &mms->rtc, 0x4130b000, 0x1000 }, + }, + }, { + .name = "ahb_ppcexp0", + .ports = { + { "gpio0", make_unimp_dev, &mms->gpio[0], 0x41100000, 0x1000 }, + { "gpio1", make_unimp_dev, &mms->gpio[1], 0x41101000, 0x1000 }, + { "gpio2", make_unimp_dev, &mms->gpio[2], 0x41102000, 0x1000 }, + { "gpio3", make_unimp_dev, &mms->gpio[3], 0x41103000, 0x1000 }, + { "eth-usb", make_eth_usb, NULL, 0x41400000, 0x200000, { 48 } }, + }, + }, + }; + + const PPCInfo an547_ppcs[] = { { + .name = "apb_ppcexp0", + .ports = { + { "ssram-mpc", make_mpc, &mms->mpc[0], 0x57000000, 0x1000 }, + { "qspi-mpc", make_mpc, &mms->mpc[1], 0x57001000, 0x1000 }, + { "ddr-mpc", make_mpc, &mms->mpc[2], 0x57002000, 0x1000 }, + }, + }, { + .name = "apb_ppcexp1", + .ports = { + { "i2c0", make_i2c, &mms->i2c[0], 0x49200000, 0x1000, {}, + { .i2c_internal = true /* touchscreen */ } }, + { "i2c1", make_i2c, &mms->i2c[1], 0x49201000, 0x1000, {}, + { .i2c_internal = true /* audio conf */ } }, + { "spi0", make_spi, &mms->spi[0], 0x49202000, 0x1000, { 53 } }, + { "spi1", make_spi, &mms->spi[1], 0x49203000, 0x1000, { 54 } }, + { "spi2", make_spi, &mms->spi[2], 0x49204000, 0x1000, { 55 } }, + { "i2c2", make_i2c, &mms->i2c[2], 0x49205000, 0x1000, {}, + { .i2c_internal = false /* shield 0 */ } }, + { "i2c3", make_i2c, &mms->i2c[3], 0x49206000, 0x1000, {}, + { .i2c_internal = false /* shield 1 */ } }, + { /* port 7 reserved */ }, + { "i2c4", make_i2c, &mms->i2c[4], 0x49208000, 0x1000, {}, + { .i2c_internal = true /* DDR4 EEPROM */ } }, + }, + }, { + .name = "apb_ppcexp2", + .ports = { + { "scc", make_scc, &mms->scc, 0x49300000, 0x1000 }, + { "i2s-audio", make_unimp_dev, &mms->i2s_audio, 0x49301000, 0x1000 }, + { "fpgaio", make_fpgaio, &mms->fpgaio, 0x49302000, 0x1000 }, + { "uart0", make_uart, &mms->uart[0], 0x49303000, 0x1000, { 33, 34, 43 } }, + { "uart1", make_uart, &mms->uart[1], 0x49304000, 0x1000, { 35, 36, 44 } }, + { "uart2", make_uart, &mms->uart[2], 0x49305000, 0x1000, { 37, 38, 45 } }, + { "uart3", make_uart, &mms->uart[3], 0x49306000, 0x1000, { 39, 40, 46 } }, + { "uart4", make_uart, &mms->uart[4], 0x49307000, 0x1000, { 41, 42, 47 } }, + { "uart5", make_uart, &mms->uart[5], 0x49308000, 0x1000, { 125, 126, 127 } }, + + { /* port 9 reserved */ }, + { "clcd", make_unimp_dev, &mms->cldc, 0x4930a000, 0x1000 }, + { "rtc", make_rtc, &mms->rtc, 0x4930b000, 0x1000 }, + }, + }, { + .name = "ahb_ppcexp0", + .ports = { + { "gpio0", make_unimp_dev, &mms->gpio[0], 0x41100000, 0x1000 }, + { "gpio1", make_unimp_dev, &mms->gpio[1], 0x41101000, 0x1000 }, + { "gpio2", make_unimp_dev, &mms->gpio[2], 0x41102000, 0x1000 }, + { "gpio3", make_unimp_dev, &mms->gpio[3], 0x41103000, 0x1000 }, + { "eth-usb", make_eth_usb, NULL, 0x41400000, 0x200000, { 49 } }, + }, + }, + }; + + switch (mmc->fpga_type) { + case FPGA_AN505: + case FPGA_AN521: + ppcs = an505_ppcs; + num_ppcs = ARRAY_SIZE(an505_ppcs); + break; + case FPGA_AN524: + ppcs = an524_ppcs; + num_ppcs = ARRAY_SIZE(an524_ppcs); + break; + case FPGA_AN547: + ppcs = an547_ppcs; + num_ppcs = ARRAY_SIZE(an547_ppcs); + break; + default: + g_assert_not_reached(); + } + + for (i = 0; i < num_ppcs; i++) { + const PPCInfo *ppcinfo = &ppcs[i]; + TZPPC *ppc = &mms->ppc[i]; + DeviceState *ppcdev; + int port; + char *gpioname; + + object_initialize_child(OBJECT(machine), ppcinfo->name, ppc, + TYPE_TZ_PPC); + ppcdev = DEVICE(ppc); + + for (port = 0; port < TZ_NUM_PORTS; port++) { + const PPCPortInfo *pinfo = &ppcinfo->ports[port]; + MemoryRegion *mr; + char *portname; + + if (!pinfo->devfn) { + continue; + } + + mr = pinfo->devfn(mms, pinfo->opaque, pinfo->name, pinfo->size, + pinfo->irqs, &pinfo->extradata); + portname = g_strdup_printf("port[%d]", port); + object_property_set_link(OBJECT(ppc), portname, OBJECT(mr), + &error_fatal); + g_free(portname); + } + + sysbus_realize(SYS_BUS_DEVICE(ppc), &error_fatal); + + for (port = 0; port < TZ_NUM_PORTS; port++) { + const PPCPortInfo *pinfo = &ppcinfo->ports[port]; + + if (!pinfo->devfn) { + continue; + } + sysbus_mmio_map(SYS_BUS_DEVICE(ppc), port, pinfo->addr); + + gpioname = g_strdup_printf("%s_nonsec", ppcinfo->name); + qdev_connect_gpio_out_named(iotkitdev, gpioname, port, + qdev_get_gpio_in_named(ppcdev, + "cfg_nonsec", + port)); + g_free(gpioname); + gpioname = g_strdup_printf("%s_ap", ppcinfo->name); + qdev_connect_gpio_out_named(iotkitdev, gpioname, port, + qdev_get_gpio_in_named(ppcdev, + "cfg_ap", port)); + g_free(gpioname); + } + + gpioname = g_strdup_printf("%s_irq_enable", ppcinfo->name); + qdev_connect_gpio_out_named(iotkitdev, gpioname, 0, + qdev_get_gpio_in_named(ppcdev, + "irq_enable", 0)); + g_free(gpioname); + gpioname = g_strdup_printf("%s_irq_clear", ppcinfo->name); + qdev_connect_gpio_out_named(iotkitdev, gpioname, 0, + qdev_get_gpio_in_named(ppcdev, + "irq_clear", 0)); + g_free(gpioname); + gpioname = g_strdup_printf("%s_irq_status", ppcinfo->name); + qdev_connect_gpio_out_named(ppcdev, "irq", 0, + qdev_get_gpio_in_named(iotkitdev, + gpioname, 0)); + g_free(gpioname); + + qdev_connect_gpio_out(dev_splitter, i, + qdev_get_gpio_in_named(ppcdev, + "cfg_sec_resp", 0)); + } + + create_unimplemented_device("FPGA NS PC", 0x48007000, 0x1000); + + if (mmc->fpga_type == FPGA_AN547) { + create_unimplemented_device("U55 timing adapter 0", 0x48102000, 0x1000); + create_unimplemented_device("U55 timing adapter 1", 0x48103000, 0x1000); + } + + create_non_mpc_ram(mms); + + if (mmc->fpga_type == FPGA_AN524) { + /* + * Connect the line from the SCC so that we can remap when the + * guest updates that register. + */ + mms->remap_irq = qemu_allocate_irq(remap_irq_fn, mms, 0); + qdev_connect_gpio_out_named(DEVICE(&mms->scc), "remap", 0, + mms->remap_irq); + } + + armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + boot_ram_size(mms)); +} + +static void mps2_tz_idau_check(IDAUInterface *ii, uint32_t address, + int *iregion, bool *exempt, bool *ns, bool *nsc) +{ + /* + * The MPS2 TZ FPGA images have IDAUs in them which are connected to + * the Master Security Controllers. Thes have the same logic as + * is used by the IoTKit for the IDAU connected to the CPU, except + * that MSCs don't care about the NSC attribute. + */ + int region = extract32(address, 28, 4); + + *ns = !(region & 1); + *nsc = false; + /* 0xe0000000..0xe00fffff and 0xf0000000..0xf00fffff are exempt */ + *exempt = (address & 0xeff00000) == 0xe0000000; + *iregion = region; +} + +static char *mps2_get_remap(Object *obj, Error **errp) +{ + MPS2TZMachineState *mms = MPS2TZ_MACHINE(obj); + const char *val = mms->remap ? "QSPI" : "BRAM"; + return g_strdup(val); +} + +static void mps2_set_remap(Object *obj, const char *value, Error **errp) +{ + MPS2TZMachineState *mms = MPS2TZ_MACHINE(obj); + + if (!strcmp(value, "BRAM")) { + mms->remap = false; + } else if (!strcmp(value, "QSPI")) { + mms->remap = true; + } else { + error_setg(errp, "Invalid remap value"); + error_append_hint(errp, "Valid values are BRAM and QSPI.\n"); + } +} + +static void mps2_machine_reset(MachineState *machine) +{ + MPS2TZMachineState *mms = MPS2TZ_MACHINE(machine); + + /* + * Set the initial memory mapping before triggering the reset of + * the rest of the system, so that the guest image loader and CPU + * reset see the correct mapping. + */ + remap_memory(mms, mms->remap); + qemu_devices_reset(); +} + +static void mps2tz_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + IDAUInterfaceClass *iic = IDAU_INTERFACE_CLASS(oc); + + mc->init = mps2tz_common_init; + mc->reset = mps2_machine_reset; + iic->check = mps2_tz_idau_check; +} + +static void mps2tz_set_default_ram_info(MPS2TZMachineClass *mmc) +{ + /* + * Set mc->default_ram_size and default_ram_id from the + * information in mmc->raminfo. + */ + MachineClass *mc = MACHINE_CLASS(mmc); + const RAMInfo *p; + + for (p = mmc->raminfo; p->name; p++) { + if (p->mrindex < 0) { + /* Found the entry for "system memory" */ + mc->default_ram_size = p->size; + mc->default_ram_id = p->name; + return; + } + } + g_assert_not_reached(); +} + +static void mps2tz_an505_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + + mc->desc = "ARM MPS2 with AN505 FPGA image for Cortex-M33"; + mc->default_cpus = 1; + mc->min_cpus = mc->default_cpus; + mc->max_cpus = mc->default_cpus; + mmc->fpga_type = FPGA_AN505; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mmc->scc_id = 0x41045050; + mmc->sysclk_frq = 20 * 1000 * 1000; /* 20MHz */ + mmc->apb_periph_frq = mmc->sysclk_frq; + mmc->oscclk = an505_oscclk; + mmc->len_oscclk = ARRAY_SIZE(an505_oscclk); + mmc->fpgaio_num_leds = 2; + mmc->fpgaio_has_switches = false; + mmc->fpgaio_has_dbgctrl = false; + mmc->numirq = 92; + mmc->uart_overflow_irq = 47; + mmc->init_svtor = 0x10000000; + mmc->sram_addr_width = 15; + mmc->raminfo = an505_raminfo; + mmc->armsse_type = TYPE_IOTKIT; + mmc->boot_ram_size = 0; + mps2tz_set_default_ram_info(mmc); +} + +static void mps2tz_an521_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + + mc->desc = "ARM MPS2 with AN521 FPGA image for dual Cortex-M33"; + mc->default_cpus = 2; + mc->min_cpus = mc->default_cpus; + mc->max_cpus = mc->default_cpus; + mmc->fpga_type = FPGA_AN521; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mmc->scc_id = 0x41045210; + mmc->sysclk_frq = 20 * 1000 * 1000; /* 20MHz */ + mmc->apb_periph_frq = mmc->sysclk_frq; + mmc->oscclk = an505_oscclk; /* AN521 is the same as AN505 here */ + mmc->len_oscclk = ARRAY_SIZE(an505_oscclk); + mmc->fpgaio_num_leds = 2; + mmc->fpgaio_has_switches = false; + mmc->fpgaio_has_dbgctrl = false; + mmc->numirq = 92; + mmc->uart_overflow_irq = 47; + mmc->init_svtor = 0x10000000; + mmc->sram_addr_width = 15; + mmc->raminfo = an505_raminfo; /* AN521 is the same as AN505 here */ + mmc->armsse_type = TYPE_SSE200; + mmc->boot_ram_size = 0; + mps2tz_set_default_ram_info(mmc); +} + +static void mps3tz_an524_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + + mc->desc = "ARM MPS3 with AN524 FPGA image for dual Cortex-M33"; + mc->default_cpus = 2; + mc->min_cpus = mc->default_cpus; + mc->max_cpus = mc->default_cpus; + mmc->fpga_type = FPGA_AN524; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mmc->scc_id = 0x41045240; + mmc->sysclk_frq = 32 * 1000 * 1000; /* 32MHz */ + mmc->apb_periph_frq = mmc->sysclk_frq; + mmc->oscclk = an524_oscclk; + mmc->len_oscclk = ARRAY_SIZE(an524_oscclk); + mmc->fpgaio_num_leds = 10; + mmc->fpgaio_has_switches = true; + mmc->fpgaio_has_dbgctrl = false; + mmc->numirq = 95; + mmc->uart_overflow_irq = 47; + mmc->init_svtor = 0x10000000; + mmc->sram_addr_width = 15; + mmc->raminfo = an524_raminfo; + mmc->armsse_type = TYPE_SSE200; + mmc->boot_ram_size = 0; + mps2tz_set_default_ram_info(mmc); + + object_class_property_add_str(oc, "remap", mps2_get_remap, mps2_set_remap); + object_class_property_set_description(oc, "remap", + "Set memory mapping. Valid values " + "are BRAM (default) and QSPI."); +} + +static void mps3tz_an547_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + + mc->desc = "ARM MPS3 with AN547 FPGA image for Cortex-M55"; + mc->default_cpus = 1; + mc->min_cpus = mc->default_cpus; + mc->max_cpus = mc->default_cpus; + mmc->fpga_type = FPGA_AN547; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m55"); + mmc->scc_id = 0x41055470; + mmc->sysclk_frq = 32 * 1000 * 1000; /* 32MHz */ + mmc->apb_periph_frq = 25 * 1000 * 1000; /* 25MHz */ + mmc->oscclk = an524_oscclk; /* same as AN524 */ + mmc->len_oscclk = ARRAY_SIZE(an524_oscclk); + mmc->fpgaio_num_leds = 10; + mmc->fpgaio_has_switches = true; + mmc->fpgaio_has_dbgctrl = true; + mmc->numirq = 96; + mmc->uart_overflow_irq = 48; + mmc->init_svtor = 0x00000000; + mmc->sram_addr_width = 21; + mmc->raminfo = an547_raminfo; + mmc->armsse_type = TYPE_SSE300; + mmc->boot_ram_size = 512 * KiB; + mps2tz_set_default_ram_info(mmc); +} + +static const TypeInfo mps2tz_info = { + .name = TYPE_MPS2TZ_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(MPS2TZMachineState), + .class_size = sizeof(MPS2TZMachineClass), + .class_init = mps2tz_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_IDAU_INTERFACE }, + { } + }, +}; + +static const TypeInfo mps2tz_an505_info = { + .name = TYPE_MPS2TZ_AN505_MACHINE, + .parent = TYPE_MPS2TZ_MACHINE, + .class_init = mps2tz_an505_class_init, +}; + +static const TypeInfo mps2tz_an521_info = { + .name = TYPE_MPS2TZ_AN521_MACHINE, + .parent = TYPE_MPS2TZ_MACHINE, + .class_init = mps2tz_an521_class_init, +}; + +static const TypeInfo mps3tz_an524_info = { + .name = TYPE_MPS3TZ_AN524_MACHINE, + .parent = TYPE_MPS2TZ_MACHINE, + .class_init = mps3tz_an524_class_init, +}; + +static const TypeInfo mps3tz_an547_info = { + .name = TYPE_MPS3TZ_AN547_MACHINE, + .parent = TYPE_MPS2TZ_MACHINE, + .class_init = mps3tz_an547_class_init, +}; + +static void mps2tz_machine_init(void) +{ + type_register_static(&mps2tz_info); + type_register_static(&mps2tz_an505_info); + type_register_static(&mps2tz_an521_info); + type_register_static(&mps3tz_an524_info); + type_register_static(&mps3tz_an547_info); +} + +type_init(mps2tz_machine_init); diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c new file mode 100644 index 000000000..bb76fa688 --- /dev/null +++ b/hw/arm/mps2.c @@ -0,0 +1,564 @@ +/* + * ARM V2M MPS2 board emulation. + * + * Copyright (c) 2017 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* The MPS2 and MPS2+ dev boards are FPGA based (the 2+ has a bigger + * FPGA but is otherwise the same as the 2). Since the CPU itself + * and most of the devices are in the FPGA, the details of the board + * as seen by the guest depend significantly on the FPGA image. + * We model the following FPGA images: + * "mps2-an385" -- Cortex-M3 as documented in ARM Application Note AN385 + * "mps2-an386" -- Cortex-M4 as documented in ARM Application Note AN386 + * "mps2-an500" -- Cortex-M7 as documented in ARM Application Note AN500 + * "mps2-an511" -- Cortex-M3 'DesignStart' as documented in AN511 + * + * Links to the TRM for the board itself and to the various Application + * Notes which document the FPGA images can be found here: + * https://developer.arm.com/products/system-design/development-boards/cortex-m-prototyping-system + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/arm/boot.h" +#include "hw/arm/armv7m.h" +#include "hw/or-irq.h" +#include "hw/boards.h" +#include "exec/address-spaces.h" +#include "sysemu/sysemu.h" +#include "hw/misc/unimp.h" +#include "hw/char/cmsdk-apb-uart.h" +#include "hw/timer/cmsdk-apb-timer.h" +#include "hw/timer/cmsdk-apb-dualtimer.h" +#include "hw/misc/mps2-scc.h" +#include "hw/misc/mps2-fpgaio.h" +#include "hw/ssi/pl022.h" +#include "hw/i2c/arm_sbcon_i2c.h" +#include "hw/net/lan9118.h" +#include "net/net.h" +#include "hw/watchdog/cmsdk-apb-watchdog.h" +#include "hw/qdev-clock.h" +#include "qom/object.h" + +typedef enum MPS2FPGAType { + FPGA_AN385, + FPGA_AN386, + FPGA_AN500, + FPGA_AN511, +} MPS2FPGAType; + +struct MPS2MachineClass { + MachineClass parent; + MPS2FPGAType fpga_type; + uint32_t scc_id; + bool has_block_ram; + hwaddr ethernet_base; + hwaddr psram_base; +}; + +struct MPS2MachineState { + MachineState parent; + + ARMv7MState armv7m; + MemoryRegion ssram1; + MemoryRegion ssram1_m; + MemoryRegion ssram23; + MemoryRegion ssram23_m; + MemoryRegion blockram; + MemoryRegion blockram_m1; + MemoryRegion blockram_m2; + MemoryRegion blockram_m3; + MemoryRegion sram; + /* FPGA APB subsystem */ + MPS2SCC scc; + MPS2FPGAIO fpgaio; + /* CMSDK APB subsystem */ + CMSDKAPBDualTimer dualtimer; + CMSDKAPBWatchdog watchdog; + CMSDKAPBTimer timer[2]; + Clock *sysclk; + Clock *refclk; +}; + +#define TYPE_MPS2_MACHINE "mps2" +#define TYPE_MPS2_AN385_MACHINE MACHINE_TYPE_NAME("mps2-an385") +#define TYPE_MPS2_AN386_MACHINE MACHINE_TYPE_NAME("mps2-an386") +#define TYPE_MPS2_AN500_MACHINE MACHINE_TYPE_NAME("mps2-an500") +#define TYPE_MPS2_AN511_MACHINE MACHINE_TYPE_NAME("mps2-an511") + +OBJECT_DECLARE_TYPE(MPS2MachineState, MPS2MachineClass, MPS2_MACHINE) + +/* Main SYSCLK frequency in Hz */ +#define SYSCLK_FRQ 25000000 + +/* + * The Application Notes don't say anything about how the + * systick reference clock is configured. (Quite possibly + * they don't have one at all.) This 1MHz clock matches the + * pre-existing behaviour that used to be hardcoded in the + * armv7m_systick implementation. + */ +#define REFCLK_FRQ (1 * 1000 * 1000) + +/* Initialize the auxiliary RAM region @mr and map it into + * the memory map at @base. + */ +static void make_ram(MemoryRegion *mr, const char *name, + hwaddr base, hwaddr size) +{ + memory_region_init_ram(mr, NULL, name, size, &error_fatal); + memory_region_add_subregion(get_system_memory(), base, mr); +} + +/* Create an alias of an entire original MemoryRegion @orig + * located at @base in the memory map. + */ +static void make_ram_alias(MemoryRegion *mr, const char *name, + MemoryRegion *orig, hwaddr base) +{ + memory_region_init_alias(mr, NULL, name, orig, 0, + memory_region_size(orig)); + memory_region_add_subregion(get_system_memory(), base, mr); +} + +static void mps2_common_init(MachineState *machine) +{ + MPS2MachineState *mms = MPS2_MACHINE(machine); + MPS2MachineClass *mmc = MPS2_MACHINE_GET_CLASS(machine); + MemoryRegion *system_memory = get_system_memory(); + MachineClass *mc = MACHINE_GET_CLASS(machine); + DeviceState *armv7m, *sccdev; + int i; + + if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { + error_report("This board can only be used with CPU %s", + mc->default_cpu_type); + exit(1); + } + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + /* This clock doesn't need migration because it is fixed-frequency */ + mms->sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(mms->sysclk, SYSCLK_FRQ); + + mms->refclk = clock_new(OBJECT(machine), "REFCLK"); + clock_set_hz(mms->refclk, REFCLK_FRQ); + + /* The FPGA images have an odd combination of different RAMs, + * because in hardware they are different implementations and + * connected to different buses, giving varying performance/size + * tradeoffs. For QEMU they're all just RAM, though. We arbitrarily + * call the 16MB our "system memory", as it's the largest lump. + * + * AN385/AN386/AN511: + * 0x21000000 .. 0x21ffffff : PSRAM (16MB) + * AN385/AN386/AN500: + * 0x00000000 .. 0x003fffff : ZBT SSRAM1 + * 0x00400000 .. 0x007fffff : mirror of ZBT SSRAM1 + * 0x20000000 .. 0x203fffff : ZBT SSRAM 2&3 + * 0x20400000 .. 0x207fffff : mirror of ZBT SSRAM 2&3 + * AN385/AN386 only: + * 0x01000000 .. 0x01003fff : block RAM (16K) + * 0x01004000 .. 0x01007fff : mirror of above + * 0x01008000 .. 0x0100bfff : mirror of above + * 0x0100c000 .. 0x0100ffff : mirror of above + * AN511 only: + * 0x00000000 .. 0x0003ffff : FPGA block RAM + * 0x00400000 .. 0x007fffff : ZBT SSRAM1 + * 0x20000000 .. 0x2001ffff : SRAM + * 0x20400000 .. 0x207fffff : ZBT SSRAM 2&3 + * AN500 only: + * 0x60000000 .. 0x60ffffff : PSRAM (16MB) + * + * The AN385/AN386 has a feature where the lowest 16K can be mapped + * either to the bottom of the ZBT SSRAM1 or to the block RAM. + * This is of no use for QEMU so we don't implement it (as if + * zbt_boot_ctrl is always zero). + */ + memory_region_add_subregion(system_memory, mmc->psram_base, machine->ram); + + if (mmc->has_block_ram) { + make_ram(&mms->blockram, "mps.blockram", 0x01000000, 0x4000); + make_ram_alias(&mms->blockram_m1, "mps.blockram_m1", + &mms->blockram, 0x01004000); + make_ram_alias(&mms->blockram_m2, "mps.blockram_m2", + &mms->blockram, 0x01008000); + make_ram_alias(&mms->blockram_m3, "mps.blockram_m3", + &mms->blockram, 0x0100c000); + } + + switch (mmc->fpga_type) { + case FPGA_AN385: + case FPGA_AN386: + case FPGA_AN500: + make_ram(&mms->ssram1, "mps.ssram1", 0x0, 0x400000); + make_ram_alias(&mms->ssram1_m, "mps.ssram1_m", &mms->ssram1, 0x400000); + make_ram(&mms->ssram23, "mps.ssram23", 0x20000000, 0x400000); + make_ram_alias(&mms->ssram23_m, "mps.ssram23_m", + &mms->ssram23, 0x20400000); + break; + case FPGA_AN511: + make_ram(&mms->blockram, "mps.blockram", 0x0, 0x40000); + make_ram(&mms->ssram1, "mps.ssram1", 0x00400000, 0x00800000); + make_ram(&mms->sram, "mps.sram", 0x20000000, 0x20000); + make_ram(&mms->ssram23, "mps.ssram23", 0x20400000, 0x400000); + break; + default: + g_assert_not_reached(); + } + + object_initialize_child(OBJECT(mms), "armv7m", &mms->armv7m, TYPE_ARMV7M); + armv7m = DEVICE(&mms->armv7m); + switch (mmc->fpga_type) { + case FPGA_AN385: + case FPGA_AN386: + case FPGA_AN500: + qdev_prop_set_uint32(armv7m, "num-irq", 32); + break; + case FPGA_AN511: + qdev_prop_set_uint32(armv7m, "num-irq", 64); + break; + default: + g_assert_not_reached(); + } + qdev_connect_clock_in(armv7m, "cpuclk", mms->sysclk); + qdev_connect_clock_in(armv7m, "refclk", mms->refclk); + qdev_prop_set_string(armv7m, "cpu-type", machine->cpu_type); + qdev_prop_set_bit(armv7m, "enable-bitband", true); + object_property_set_link(OBJECT(&mms->armv7m), "memory", + OBJECT(system_memory), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&mms->armv7m), &error_fatal); + + create_unimplemented_device("zbtsmram mirror", 0x00400000, 0x00400000); + create_unimplemented_device("RESERVED 1", 0x00800000, 0x00800000); + create_unimplemented_device("Block RAM", 0x01000000, 0x00010000); + create_unimplemented_device("RESERVED 2", 0x01010000, 0x1EFF0000); + create_unimplemented_device("RESERVED 3", 0x20800000, 0x00800000); + create_unimplemented_device("PSRAM", 0x21000000, 0x01000000); + /* These three ranges all cover multiple devices; we may implement + * some of them below (in which case the real device takes precedence + * over the unimplemented-region mapping). + */ + create_unimplemented_device("CMSDK APB peripheral region @0x40000000", + 0x40000000, 0x00010000); + create_unimplemented_device("CMSDK AHB peripheral region @0x40010000", + 0x40010000, 0x00010000); + create_unimplemented_device("Extra peripheral region @0x40020000", + 0x40020000, 0x00010000); + + create_unimplemented_device("RESERVED 4", 0x40030000, 0x001D0000); + create_unimplemented_device("VGA", 0x41000000, 0x0200000); + + switch (mmc->fpga_type) { + case FPGA_AN385: + case FPGA_AN386: + case FPGA_AN500: + { + /* The overflow IRQs for UARTs 0, 1 and 2 are ORed together. + * Overflow for UARTs 4 and 5 doesn't trigger any interrupt. + */ + Object *orgate; + DeviceState *orgate_dev; + + orgate = object_new(TYPE_OR_IRQ); + object_property_set_int(orgate, "num-lines", 6, &error_fatal); + qdev_realize(DEVICE(orgate), NULL, &error_fatal); + orgate_dev = DEVICE(orgate); + qdev_connect_gpio_out(orgate_dev, 0, qdev_get_gpio_in(armv7m, 12)); + + for (i = 0; i < 5; i++) { + static const hwaddr uartbase[] = {0x40004000, 0x40005000, + 0x40006000, 0x40007000, + 0x40009000}; + /* RX irq number; TX irq is always one greater */ + static const int uartirq[] = {0, 2, 4, 18, 20}; + qemu_irq txovrint = NULL, rxovrint = NULL; + + if (i < 3) { + txovrint = qdev_get_gpio_in(orgate_dev, i * 2); + rxovrint = qdev_get_gpio_in(orgate_dev, i * 2 + 1); + } + + cmsdk_apb_uart_create(uartbase[i], + qdev_get_gpio_in(armv7m, uartirq[i] + 1), + qdev_get_gpio_in(armv7m, uartirq[i]), + txovrint, rxovrint, + NULL, + serial_hd(i), SYSCLK_FRQ); + } + break; + } + case FPGA_AN511: + { + /* The overflow IRQs for all UARTs are ORed together. + * Tx and Rx IRQs for each UART are ORed together. + */ + Object *orgate; + DeviceState *orgate_dev; + + orgate = object_new(TYPE_OR_IRQ); + object_property_set_int(orgate, "num-lines", 10, &error_fatal); + qdev_realize(DEVICE(orgate), NULL, &error_fatal); + orgate_dev = DEVICE(orgate); + qdev_connect_gpio_out(orgate_dev, 0, qdev_get_gpio_in(armv7m, 12)); + + for (i = 0; i < 5; i++) { + /* system irq numbers for the combined tx/rx for each UART */ + static const int uart_txrx_irqno[] = {0, 2, 45, 46, 56}; + static const hwaddr uartbase[] = {0x40004000, 0x40005000, + 0x4002c000, 0x4002d000, + 0x4002e000}; + Object *txrx_orgate; + DeviceState *txrx_orgate_dev; + + txrx_orgate = object_new(TYPE_OR_IRQ); + object_property_set_int(txrx_orgate, "num-lines", 2, &error_fatal); + qdev_realize(DEVICE(txrx_orgate), NULL, &error_fatal); + txrx_orgate_dev = DEVICE(txrx_orgate); + qdev_connect_gpio_out(txrx_orgate_dev, 0, + qdev_get_gpio_in(armv7m, uart_txrx_irqno[i])); + cmsdk_apb_uart_create(uartbase[i], + qdev_get_gpio_in(txrx_orgate_dev, 0), + qdev_get_gpio_in(txrx_orgate_dev, 1), + qdev_get_gpio_in(orgate_dev, i * 2), + qdev_get_gpio_in(orgate_dev, i * 2 + 1), + NULL, + serial_hd(i), SYSCLK_FRQ); + } + break; + } + default: + g_assert_not_reached(); + } + for (i = 0; i < 4; i++) { + static const hwaddr gpiobase[] = {0x40010000, 0x40011000, + 0x40012000, 0x40013000}; + create_unimplemented_device("cmsdk-ahb-gpio", gpiobase[i], 0x1000); + } + + /* CMSDK APB subsystem */ + for (i = 0; i < ARRAY_SIZE(mms->timer); i++) { + g_autofree char *name = g_strdup_printf("timer%d", i); + hwaddr base = 0x40000000 + i * 0x1000; + int irqno = 8 + i; + SysBusDevice *sbd; + + object_initialize_child(OBJECT(mms), name, &mms->timer[i], + TYPE_CMSDK_APB_TIMER); + sbd = SYS_BUS_DEVICE(&mms->timer[i]); + qdev_connect_clock_in(DEVICE(&mms->timer[i]), "pclk", mms->sysclk); + sysbus_realize_and_unref(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, base); + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(armv7m, irqno)); + } + + object_initialize_child(OBJECT(mms), "dualtimer", &mms->dualtimer, + TYPE_CMSDK_APB_DUALTIMER); + qdev_connect_clock_in(DEVICE(&mms->dualtimer), "TIMCLK", mms->sysclk); + sysbus_realize(SYS_BUS_DEVICE(&mms->dualtimer), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(&mms->dualtimer), 0, + qdev_get_gpio_in(armv7m, 10)); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->dualtimer), 0, 0x40002000); + object_initialize_child(OBJECT(mms), "watchdog", &mms->watchdog, + TYPE_CMSDK_APB_WATCHDOG); + qdev_connect_clock_in(DEVICE(&mms->watchdog), "WDOGCLK", mms->sysclk); + sysbus_realize(SYS_BUS_DEVICE(&mms->watchdog), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(&mms->watchdog), 0, + qdev_get_gpio_in_named(armv7m, "NMI", 0)); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->watchdog), 0, 0x40008000); + + /* FPGA APB subsystem */ + object_initialize_child(OBJECT(mms), "scc", &mms->scc, TYPE_MPS2_SCC); + sccdev = DEVICE(&mms->scc); + qdev_prop_set_uint32(sccdev, "scc-cfg4", 0x2); + qdev_prop_set_uint32(sccdev, "scc-aid", 0x00200008); + qdev_prop_set_uint32(sccdev, "scc-id", mmc->scc_id); + /* All these FPGA images have the same OSCCLK configuration */ + qdev_prop_set_uint32(sccdev, "len-oscclk", 3); + qdev_prop_set_uint32(sccdev, "oscclk[0]", 50000000); + qdev_prop_set_uint32(sccdev, "oscclk[1]", 24576000); + qdev_prop_set_uint32(sccdev, "oscclk[2]", 25000000); + sysbus_realize(SYS_BUS_DEVICE(&mms->scc), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(sccdev), 0, 0x4002f000); + object_initialize_child(OBJECT(mms), "fpgaio", + &mms->fpgaio, TYPE_MPS2_FPGAIO); + qdev_prop_set_uint32(DEVICE(&mms->fpgaio), "prescale-clk", 25000000); + sysbus_realize(SYS_BUS_DEVICE(&mms->fpgaio), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&mms->fpgaio), 0, 0x40028000); + sysbus_create_simple(TYPE_PL022, 0x40025000, /* External ADC */ + qdev_get_gpio_in(armv7m, 22)); + for (i = 0; i < 2; i++) { + static const int spi_irqno[] = {11, 24}; + static const hwaddr spibase[] = {0x40020000, /* APB */ + 0x40021000, /* LCD */ + 0x40026000, /* Shield0 */ + 0x40027000}; /* Shield1 */ + DeviceState *orgate_dev; + Object *orgate; + int j; + + orgate = object_new(TYPE_OR_IRQ); + object_property_set_int(orgate, "num-lines", 2, &error_fatal); + orgate_dev = DEVICE(orgate); + qdev_realize(orgate_dev, NULL, &error_fatal); + qdev_connect_gpio_out(orgate_dev, 0, + qdev_get_gpio_in(armv7m, spi_irqno[i])); + for (j = 0; j < 2; j++) { + sysbus_create_simple(TYPE_PL022, spibase[2 * i + j], + qdev_get_gpio_in(orgate_dev, j)); + } + } + for (i = 0; i < 4; i++) { + static const hwaddr i2cbase[] = {0x40022000, /* Touch */ + 0x40023000, /* Audio */ + 0x40029000, /* Shield0 */ + 0x4002a000}; /* Shield1 */ + DeviceState *dev; + + dev = sysbus_create_simple(TYPE_ARM_SBCON_I2C, i2cbase[i], NULL); + if (i < 2) { + /* + * internal-only bus: mark it full to avoid user-created + * i2c devices being plugged into it. + */ + BusState *qbus = qdev_get_child_bus(dev, "i2c"); + qbus_mark_full(qbus); + } + } + create_unimplemented_device("i2s", 0x40024000, 0x400); + + /* In hardware this is a LAN9220; the LAN9118 is software compatible + * except that it doesn't support the checksum-offload feature. + */ + lan9118_init(&nd_table[0], mmc->ethernet_base, + qdev_get_gpio_in(armv7m, + mmc->fpga_type == FPGA_AN511 ? 47 : 13)); + + armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + 0x400000); +} + +static void mps2_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = mps2_common_init; + mc->max_cpus = 1; + mc->default_ram_size = 16 * MiB; + mc->default_ram_id = "mps.ram"; +} + +static void mps2_an385_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + + mc->desc = "ARM MPS2 with AN385 FPGA image for Cortex-M3"; + mmc->fpga_type = FPGA_AN385; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); + mmc->scc_id = 0x41043850; + mmc->psram_base = 0x21000000; + mmc->ethernet_base = 0x40200000; + mmc->has_block_ram = true; +} + +static void mps2_an386_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + + mc->desc = "ARM MPS2 with AN386 FPGA image for Cortex-M4"; + mmc->fpga_type = FPGA_AN386; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m4"); + mmc->scc_id = 0x41043860; + mmc->psram_base = 0x21000000; + mmc->ethernet_base = 0x40200000; + mmc->has_block_ram = true; +} + +static void mps2_an500_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + + mc->desc = "ARM MPS2 with AN500 FPGA image for Cortex-M7"; + mmc->fpga_type = FPGA_AN500; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m7"); + mmc->scc_id = 0x41045000; + mmc->psram_base = 0x60000000; + mmc->ethernet_base = 0xa0000000; + mmc->has_block_ram = false; +} + +static void mps2_an511_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + + mc->desc = "ARM MPS2 with AN511 DesignStart FPGA image for Cortex-M3"; + mmc->fpga_type = FPGA_AN511; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); + mmc->scc_id = 0x41045110; + mmc->psram_base = 0x21000000; + mmc->ethernet_base = 0x40200000; + mmc->has_block_ram = false; +} + +static const TypeInfo mps2_info = { + .name = TYPE_MPS2_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(MPS2MachineState), + .class_size = sizeof(MPS2MachineClass), + .class_init = mps2_class_init, +}; + +static const TypeInfo mps2_an385_info = { + .name = TYPE_MPS2_AN385_MACHINE, + .parent = TYPE_MPS2_MACHINE, + .class_init = mps2_an385_class_init, +}; + +static const TypeInfo mps2_an386_info = { + .name = TYPE_MPS2_AN386_MACHINE, + .parent = TYPE_MPS2_MACHINE, + .class_init = mps2_an386_class_init, +}; + +static const TypeInfo mps2_an500_info = { + .name = TYPE_MPS2_AN500_MACHINE, + .parent = TYPE_MPS2_MACHINE, + .class_init = mps2_an500_class_init, +}; + +static const TypeInfo mps2_an511_info = { + .name = TYPE_MPS2_AN511_MACHINE, + .parent = TYPE_MPS2_MACHINE, + .class_init = mps2_an511_class_init, +}; + +static void mps2_machine_init(void) +{ + type_register_static(&mps2_info); + type_register_static(&mps2_an385_info); + type_register_static(&mps2_an386_info); + type_register_static(&mps2_an500_info); + type_register_static(&mps2_an511_info); +} + +type_init(mps2_machine_init); diff --git a/hw/arm/msf2-soc.c b/hw/arm/msf2-soc.c new file mode 100644 index 000000000..b5fe9f364 --- /dev/null +++ b/hw/arm/msf2-soc.c @@ -0,0 +1,266 @@ +/* + * SmartFusion2 SoC emulation. + * + * Copyright (c) 2017-2020 Subbaraya Sundeep + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "exec/address-spaces.h" +#include "hw/char/serial.h" +#include "hw/arm/msf2-soc.h" +#include "hw/misc/unimp.h" +#include "hw/qdev-clock.h" +#include "sysemu/sysemu.h" + +#define MSF2_TIMER_BASE 0x40004000 +#define MSF2_SYSREG_BASE 0x40038000 +#define MSF2_EMAC_BASE 0x40041000 + +#define ENVM_BASE_ADDRESS 0x60000000 + +#define SRAM_BASE_ADDRESS 0x20000000 + +#define MSF2_EMAC_IRQ 12 + +#define MSF2_ENVM_MAX_SIZE (512 * KiB) + +/* + * eSRAM max size is 80k without SECDED(Single error correction and + * dual error detection) feature and 64k with SECDED. + * We do not support SECDED now. + */ +#define MSF2_ESRAM_MAX_SIZE (80 * KiB) + +static const uint32_t spi_addr[MSF2_NUM_SPIS] = { 0x40001000 , 0x40011000 }; +static const uint32_t uart_addr[MSF2_NUM_UARTS] = { 0x40000000 , 0x40010000 }; + +static const int spi_irq[MSF2_NUM_SPIS] = { 2, 3 }; +static const int uart_irq[MSF2_NUM_UARTS] = { 10, 11 }; +static const int timer_irq[MSF2_NUM_TIMERS] = { 14, 15 }; + +static void m2sxxx_soc_initfn(Object *obj) +{ + MSF2State *s = MSF2_SOC(obj); + int i; + + object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M); + + object_initialize_child(obj, "sysreg", &s->sysreg, TYPE_MSF2_SYSREG); + + object_initialize_child(obj, "timer", &s->timer, TYPE_MSS_TIMER); + + for (i = 0; i < MSF2_NUM_SPIS; i++) { + object_initialize_child(obj, "spi[*]", &s->spi[i], TYPE_MSS_SPI); + } + + object_initialize_child(obj, "emac", &s->emac, TYPE_MSS_EMAC); + + s->m3clk = qdev_init_clock_in(DEVICE(obj), "m3clk", NULL, NULL, 0); + s->refclk = qdev_init_clock_in(DEVICE(obj), "refclk", NULL, NULL, 0); +} + +static void m2sxxx_soc_realize(DeviceState *dev_soc, Error **errp) +{ + MSF2State *s = MSF2_SOC(dev_soc); + DeviceState *dev, *armv7m; + SysBusDevice *busdev; + int i; + + MemoryRegion *system_memory = get_system_memory(); + + if (!clock_has_source(s->m3clk)) { + error_setg(errp, "m3clk must be wired up by the board code"); + return; + } + + /* + * We use s->refclk internally and only define it with qdev_init_clock_in() + * so it is correctly parented and not leaked on an init/deinit; it is not + * intended as an externally exposed clock. + */ + if (clock_has_source(s->refclk)) { + error_setg(errp, "refclk must not be wired up by the board code"); + return; + } + + /* + * TODO: ideally we should model the SoC SYSTICK_CR register at 0xe0042038, + * which allows the guest to program the divisor between the m3clk and + * the systick refclk to either /4, /8, /16 or /32, as well as setting + * the value the guest can read in the STCALIB register. Currently we + * implement the divisor as a fixed /32, which matches the reset value + * of SYSTICK_CR. + */ + clock_set_mul_div(s->refclk, 32, 1); + clock_set_source(s->refclk, s->m3clk); + + memory_region_init_rom(&s->nvm, OBJECT(dev_soc), "MSF2.eNVM", s->envm_size, + &error_fatal); + /* + * On power-on, the eNVM region 0x60000000 is automatically + * remapped to the Cortex-M3 processor executable region + * start address (0x0). We do not support remapping other eNVM, + * eSRAM and DDR regions by guest(via Sysreg) currently. + */ + memory_region_init_alias(&s->nvm_alias, OBJECT(dev_soc), "MSF2.eNVM", + &s->nvm, 0, s->envm_size); + + memory_region_add_subregion(system_memory, ENVM_BASE_ADDRESS, &s->nvm); + memory_region_add_subregion(system_memory, 0, &s->nvm_alias); + + memory_region_init_ram(&s->sram, NULL, "MSF2.eSRAM", s->esram_size, + &error_fatal); + memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, &s->sram); + + armv7m = DEVICE(&s->armv7m); + qdev_prop_set_uint32(armv7m, "num-irq", 81); + qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); + qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", s->m3clk); + qdev_connect_clock_in(armv7m, "refclk", s->refclk); + object_property_set_link(OBJECT(&s->armv7m), "memory", + OBJECT(get_system_memory()), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { + return; + } + + for (i = 0; i < MSF2_NUM_UARTS; i++) { + if (serial_hd(i)) { + serial_mm_init(get_system_memory(), uart_addr[i], 2, + qdev_get_gpio_in(armv7m, uart_irq[i]), + 115200, serial_hd(i), DEVICE_NATIVE_ENDIAN); + } + } + + dev = DEVICE(&s->timer); + /* + * APB0 clock is the timer input clock. + * TODO: ideally the MSF2 timer device should use a Clock rather than a + * clock-frequency integer property. + */ + qdev_prop_set_uint32(dev, "clock-frequency", + clock_get_hz(s->m3clk) / s->apb0div); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timer), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, MSF2_TIMER_BASE); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(armv7m, timer_irq[0])); + sysbus_connect_irq(busdev, 1, + qdev_get_gpio_in(armv7m, timer_irq[1])); + + dev = DEVICE(&s->sysreg); + qdev_prop_set_uint32(dev, "apb0divisor", s->apb0div); + qdev_prop_set_uint32(dev, "apb1divisor", s->apb1div); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sysreg), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, MSF2_SYSREG_BASE); + + for (i = 0; i < MSF2_NUM_SPIS; i++) { + gchar *bus_name; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, spi_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, + qdev_get_gpio_in(armv7m, spi_irq[i])); + + /* Alias controller SPI bus to the SoC itself */ + bus_name = g_strdup_printf("spi%d", i); + object_property_add_alias(OBJECT(s), bus_name, + OBJECT(&s->spi[i]), "spi"); + g_free(bus_name); + } + + /* FIXME use qdev NIC properties instead of nd_table[] */ + if (nd_table[0].used) { + qemu_check_nic_model(&nd_table[0], TYPE_MSS_EMAC); + qdev_set_nic_properties(DEVICE(&s->emac), &nd_table[0]); + } + dev = DEVICE(&s->emac); + object_property_set_link(OBJECT(&s->emac), "ahb-bus", + OBJECT(get_system_memory()), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->emac), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, MSF2_EMAC_BASE); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(armv7m, MSF2_EMAC_IRQ)); + + /* Below devices are not modelled yet. */ + create_unimplemented_device("i2c_0", 0x40002000, 0x1000); + create_unimplemented_device("dma", 0x40003000, 0x1000); + create_unimplemented_device("watchdog", 0x40005000, 0x1000); + create_unimplemented_device("i2c_1", 0x40012000, 0x1000); + create_unimplemented_device("gpio", 0x40013000, 0x1000); + create_unimplemented_device("hs-dma", 0x40014000, 0x1000); + create_unimplemented_device("can", 0x40015000, 0x1000); + create_unimplemented_device("rtc", 0x40017000, 0x1000); + create_unimplemented_device("apb_config", 0x40020000, 0x10000); + create_unimplemented_device("usb", 0x40043000, 0x1000); +} + +static Property m2sxxx_soc_properties[] = { + /* + * part name specifies the type of SmartFusion2 device variant(this + * property is for information purpose only. + */ + DEFINE_PROP_STRING("cpu-type", MSF2State, cpu_type), + DEFINE_PROP_STRING("part-name", MSF2State, part_name), + DEFINE_PROP_UINT64("eNVM-size", MSF2State, envm_size, MSF2_ENVM_MAX_SIZE), + DEFINE_PROP_UINT64("eSRAM-size", MSF2State, esram_size, + MSF2_ESRAM_MAX_SIZE), + /* default divisors in Libero GUI */ + DEFINE_PROP_UINT8("apb0div", MSF2State, apb0div, 2), + DEFINE_PROP_UINT8("apb1div", MSF2State, apb1div, 2), + DEFINE_PROP_END_OF_LIST(), +}; + +static void m2sxxx_soc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = m2sxxx_soc_realize; + device_class_set_props(dc, m2sxxx_soc_properties); +} + +static const TypeInfo m2sxxx_soc_info = { + .name = TYPE_MSF2_SOC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MSF2State), + .instance_init = m2sxxx_soc_initfn, + .class_init = m2sxxx_soc_class_init, +}; + +static void m2sxxx_soc_types(void) +{ + type_register_static(&m2sxxx_soc_info); +} + +type_init(m2sxxx_soc_types) diff --git a/hw/arm/msf2-som.c b/hw/arm/msf2-som.c new file mode 100644 index 000000000..396e8b991 --- /dev/null +++ b/hw/arm/msf2-som.c @@ -0,0 +1,111 @@ +/* + * SmartFusion2 SOM starter kit(from Emcraft) emulation. + * + * Copyright (c) 2017 Subbaraya Sundeep + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/arm/boot.h" +#include "hw/qdev-clock.h" +#include "exec/address-spaces.h" +#include "hw/arm/msf2-soc.h" + +#define DDR_BASE_ADDRESS 0xA0000000 +#define DDR_SIZE (64 * MiB) + +#define M2S010_ENVM_SIZE (256 * KiB) +#define M2S010_ESRAM_SIZE (64 * KiB) + +static void emcraft_sf2_s2s010_init(MachineState *machine) +{ + DeviceState *dev; + DeviceState *spi_flash; + MSF2State *soc; + MachineClass *mc = MACHINE_GET_CLASS(machine); + DriveInfo *dinfo = drive_get_next(IF_MTD); + qemu_irq cs_line; + BusState *spi_bus; + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *ddr = g_new(MemoryRegion, 1); + Clock *m3clk; + + if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { + error_report("This board can only be used with CPU %s", + mc->default_cpu_type); + exit(1); + } + + memory_region_init_ram(ddr, NULL, "ddr-ram", DDR_SIZE, + &error_fatal); + memory_region_add_subregion(sysmem, DDR_BASE_ADDRESS, ddr); + + dev = qdev_new(TYPE_MSF2_SOC); + qdev_prop_set_string(dev, "part-name", "M2S010"); + qdev_prop_set_string(dev, "cpu-type", mc->default_cpu_type); + + qdev_prop_set_uint64(dev, "eNVM-size", M2S010_ENVM_SIZE); + qdev_prop_set_uint64(dev, "eSRAM-size", M2S010_ESRAM_SIZE); + + /* + * CPU clock and peripheral clocks(APB0, APB1)are configurable + * in Libero. CPU clock is divided by APB0 and APB1 divisors for + * peripherals. Emcraft's SoM kit comes with these settings by default. + */ + /* This clock doesn't need migration because it is fixed-frequency */ + m3clk = clock_new(OBJECT(machine), "m3clk"); + clock_set_hz(m3clk, 142 * 1000000); + qdev_connect_clock_in(dev, "m3clk", m3clk); + qdev_prop_set_uint32(dev, "apb0div", 2); + qdev_prop_set_uint32(dev, "apb1div", 2); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + soc = MSF2_SOC(dev); + + /* Attach SPI flash to SPI0 controller */ + spi_bus = qdev_get_child_bus(dev, "spi0"); + spi_flash = qdev_new("s25sl12801"); + qdev_prop_set_uint8(spi_flash, "spansion-cr2nv", 1); + if (dinfo) { + qdev_prop_set_drive_err(spi_flash, "drive", + blk_by_legacy_dinfo(dinfo), &error_fatal); + } + qdev_realize_and_unref(spi_flash, spi_bus, &error_fatal); + cs_line = qdev_get_gpio_in_named(spi_flash, SSI_GPIO_CS, 0); + sysbus_connect_irq(SYS_BUS_DEVICE(&soc->spi[0]), 1, cs_line); + + armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + soc->envm_size); +} + +static void emcraft_sf2_machine_init(MachineClass *mc) +{ + mc->desc = "SmartFusion2 SOM kit from Emcraft (M2S010)"; + mc->init = emcraft_sf2_s2s010_init; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); +} + +DEFINE_MACHINE("emcraft-sf2", emcraft_sf2_machine_init) diff --git a/hw/arm/musca.c b/hw/arm/musca.c new file mode 100644 index 000000000..7a83f7dda --- /dev/null +++ b/hw/arm/musca.c @@ -0,0 +1,677 @@ +/* + * Arm Musca-B1 test chip board emulation + * + * Copyright (c) 2019 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * The Musca boards are a reference implementation of a system using + * the SSE-200 subsystem for embedded: + * https://developer.arm.com/products/system-design/development-boards/iot-test-chips-and-boards/musca-a-test-chip-board + * https://developer.arm.com/products/system-design/development-boards/iot-test-chips-and-boards/musca-b-test-chip-board + * We model the A and B1 variants of this board, as described in the TRMs: + * https://developer.arm.com/documentation/101107/latest/ + * https://developer.arm.com/documentation/101312/latest/ + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "exec/address-spaces.h" +#include "sysemu/sysemu.h" +#include "hw/arm/boot.h" +#include "hw/arm/armsse.h" +#include "hw/boards.h" +#include "hw/char/pl011.h" +#include "hw/core/split-irq.h" +#include "hw/misc/tz-mpc.h" +#include "hw/misc/tz-ppc.h" +#include "hw/misc/unimp.h" +#include "hw/rtc/pl031.h" +#include "hw/qdev-clock.h" +#include "qom/object.h" + +#define MUSCA_NUMIRQ_MAX 96 +#define MUSCA_PPC_MAX 3 +#define MUSCA_MPC_MAX 5 + +typedef struct MPCInfo MPCInfo; + +typedef enum MuscaType { + MUSCA_A, + MUSCA_B1, +} MuscaType; + +struct MuscaMachineClass { + MachineClass parent; + MuscaType type; + uint32_t init_svtor; + int sram_addr_width; + int num_irqs; + const MPCInfo *mpc_info; + int num_mpcs; +}; + +struct MuscaMachineState { + MachineState parent; + + ARMSSE sse; + /* RAM and flash */ + MemoryRegion ram[MUSCA_MPC_MAX]; + SplitIRQ cpu_irq_splitter[MUSCA_NUMIRQ_MAX]; + SplitIRQ sec_resp_splitter; + TZPPC ppc[MUSCA_PPC_MAX]; + MemoryRegion container; + UnimplementedDeviceState eflash[2]; + UnimplementedDeviceState qspi; + TZMPC mpc[MUSCA_MPC_MAX]; + UnimplementedDeviceState mhu[2]; + UnimplementedDeviceState pwm[3]; + UnimplementedDeviceState i2s; + PL011State uart[2]; + UnimplementedDeviceState i2c[2]; + UnimplementedDeviceState spi; + UnimplementedDeviceState scc; + UnimplementedDeviceState timer; + PL031State rtc; + UnimplementedDeviceState pvt; + UnimplementedDeviceState sdio; + UnimplementedDeviceState gpio; + UnimplementedDeviceState cryptoisland; + Clock *sysclk; + Clock *s32kclk; +}; + +#define TYPE_MUSCA_MACHINE "musca" +#define TYPE_MUSCA_A_MACHINE MACHINE_TYPE_NAME("musca-a") +#define TYPE_MUSCA_B1_MACHINE MACHINE_TYPE_NAME("musca-b1") + +OBJECT_DECLARE_TYPE(MuscaMachineState, MuscaMachineClass, MUSCA_MACHINE) + +/* + * Main SYSCLK frequency in Hz + * TODO this should really be different for the two cores, but we + * don't model that in our SSE-200 model yet. + */ +#define SYSCLK_FRQ 40000000 +/* Slow 32Khz S32KCLK frequency in Hz */ +#define S32KCLK_FRQ (32 * 1000) + +static qemu_irq get_sse_irq_in(MuscaMachineState *mms, int irqno) +{ + /* Return a qemu_irq which will signal IRQ n to all CPUs in the SSE. */ + assert(irqno < MUSCA_NUMIRQ_MAX); + + return qdev_get_gpio_in(DEVICE(&mms->cpu_irq_splitter[irqno]), 0); +} + +/* + * Most of the devices in the Musca board sit behind Peripheral Protection + * Controllers. These data structures define the layout of which devices + * sit behind which PPCs. + * The devfn for each port is a function which creates, configures + * and initializes the device, returning the MemoryRegion which + * needs to be plugged into the downstream end of the PPC port. + */ +typedef MemoryRegion *MakeDevFn(MuscaMachineState *mms, void *opaque, + const char *name, hwaddr size); + +typedef struct PPCPortInfo { + const char *name; + MakeDevFn *devfn; + void *opaque; + hwaddr addr; + hwaddr size; +} PPCPortInfo; + +typedef struct PPCInfo { + const char *name; + PPCPortInfo ports[TZ_NUM_PORTS]; +} PPCInfo; + +static MemoryRegion *make_unimp_dev(MuscaMachineState *mms, + void *opaque, const char *name, hwaddr size) +{ + /* + * Initialize, configure and realize a TYPE_UNIMPLEMENTED_DEVICE, + * and return a pointer to its MemoryRegion. + */ + UnimplementedDeviceState *uds = opaque; + + object_initialize_child(OBJECT(mms), name, uds, TYPE_UNIMPLEMENTED_DEVICE); + qdev_prop_set_string(DEVICE(uds), "name", name); + qdev_prop_set_uint64(DEVICE(uds), "size", size); + sysbus_realize(SYS_BUS_DEVICE(uds), &error_fatal); + return sysbus_mmio_get_region(SYS_BUS_DEVICE(uds), 0); +} + +typedef enum MPCInfoType { + MPC_RAM, + MPC_ROM, + MPC_CRYPTOISLAND, +} MPCInfoType; + +struct MPCInfo { + const char *name; + hwaddr addr; + hwaddr size; + MPCInfoType type; +}; + +/* Order of the MPCs here must match the order of the bits in SECMPCINTSTATUS */ +static const MPCInfo a_mpc_info[] = { { + .name = "qspi", + .type = MPC_ROM, + .addr = 0x00200000, + .size = 0x00800000, + }, { + .name = "sram", + .type = MPC_RAM, + .addr = 0x00000000, + .size = 0x00200000, + } +}; + +static const MPCInfo b1_mpc_info[] = { { + .name = "qspi", + .type = MPC_ROM, + .addr = 0x00000000, + .size = 0x02000000, + }, { + .name = "sram", + .type = MPC_RAM, + .addr = 0x0a400000, + .size = 0x00080000, + }, { + .name = "eflash0", + .type = MPC_ROM, + .addr = 0x0a000000, + .size = 0x00200000, + }, { + .name = "eflash1", + .type = MPC_ROM, + .addr = 0x0a200000, + .size = 0x00200000, + }, { + .name = "cryptoisland", + .type = MPC_CRYPTOISLAND, + .addr = 0x0a000000, + .size = 0x00200000, + } +}; + +static MemoryRegion *make_mpc(MuscaMachineState *mms, void *opaque, + const char *name, hwaddr size) +{ + /* + * Create an MPC and the RAM or flash behind it. + * MPC 0: eFlash 0 + * MPC 1: eFlash 1 + * MPC 2: SRAM + * MPC 3: QSPI flash + * MPC 4: CryptoIsland + * For now we implement the flash regions as ROM (ie not programmable) + * (with their control interface memory regions being unimplemented + * stubs behind the PPCs). + * The whole CryptoIsland region behind its MPC is an unimplemented stub. + */ + MuscaMachineClass *mmc = MUSCA_MACHINE_GET_CLASS(mms); + TZMPC *mpc = opaque; + int i = mpc - &mms->mpc[0]; + MemoryRegion *downstream; + MemoryRegion *upstream; + UnimplementedDeviceState *uds; + char *mpcname; + const MPCInfo *mpcinfo = mmc->mpc_info; + + mpcname = g_strdup_printf("%s-mpc", mpcinfo[i].name); + + switch (mpcinfo[i].type) { + case MPC_ROM: + downstream = &mms->ram[i]; + memory_region_init_rom(downstream, NULL, mpcinfo[i].name, + mpcinfo[i].size, &error_fatal); + break; + case MPC_RAM: + downstream = &mms->ram[i]; + memory_region_init_ram(downstream, NULL, mpcinfo[i].name, + mpcinfo[i].size, &error_fatal); + break; + case MPC_CRYPTOISLAND: + /* We don't implement the CryptoIsland yet */ + uds = &mms->cryptoisland; + object_initialize_child(OBJECT(mms), name, uds, + TYPE_UNIMPLEMENTED_DEVICE); + qdev_prop_set_string(DEVICE(uds), "name", mpcinfo[i].name); + qdev_prop_set_uint64(DEVICE(uds), "size", mpcinfo[i].size); + sysbus_realize(SYS_BUS_DEVICE(uds), &error_fatal); + downstream = sysbus_mmio_get_region(SYS_BUS_DEVICE(uds), 0); + break; + default: + g_assert_not_reached(); + } + + object_initialize_child(OBJECT(mms), mpcname, mpc, TYPE_TZ_MPC); + object_property_set_link(OBJECT(mpc), "downstream", OBJECT(downstream), + &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(mpc), &error_fatal); + /* Map the upstream end of the MPC into system memory */ + upstream = sysbus_mmio_get_region(SYS_BUS_DEVICE(mpc), 1); + memory_region_add_subregion(get_system_memory(), mpcinfo[i].addr, upstream); + /* and connect its interrupt to the SSE-200 */ + qdev_connect_gpio_out_named(DEVICE(mpc), "irq", 0, + qdev_get_gpio_in_named(DEVICE(&mms->sse), + "mpcexp_status", i)); + + g_free(mpcname); + /* Return the register interface MR for our caller to map behind the PPC */ + return sysbus_mmio_get_region(SYS_BUS_DEVICE(mpc), 0); +} + +static MemoryRegion *make_rtc(MuscaMachineState *mms, void *opaque, + const char *name, hwaddr size) +{ + PL031State *rtc = opaque; + + object_initialize_child(OBJECT(mms), name, rtc, TYPE_PL031); + sysbus_realize(SYS_BUS_DEVICE(rtc), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(rtc), 0, get_sse_irq_in(mms, 39)); + return sysbus_mmio_get_region(SYS_BUS_DEVICE(rtc), 0); +} + +static MemoryRegion *make_uart(MuscaMachineState *mms, void *opaque, + const char *name, hwaddr size) +{ + PL011State *uart = opaque; + int i = uart - &mms->uart[0]; + int irqbase = 7 + i * 6; + SysBusDevice *s; + + object_initialize_child(OBJECT(mms), name, uart, TYPE_PL011); + qdev_prop_set_chr(DEVICE(uart), "chardev", serial_hd(i)); + sysbus_realize(SYS_BUS_DEVICE(uart), &error_fatal); + s = SYS_BUS_DEVICE(uart); + sysbus_connect_irq(s, 0, get_sse_irq_in(mms, irqbase + 5)); /* combined */ + sysbus_connect_irq(s, 1, get_sse_irq_in(mms, irqbase + 0)); /* RX */ + sysbus_connect_irq(s, 2, get_sse_irq_in(mms, irqbase + 1)); /* TX */ + sysbus_connect_irq(s, 3, get_sse_irq_in(mms, irqbase + 2)); /* RT */ + sysbus_connect_irq(s, 4, get_sse_irq_in(mms, irqbase + 3)); /* MS */ + sysbus_connect_irq(s, 5, get_sse_irq_in(mms, irqbase + 4)); /* E */ + return sysbus_mmio_get_region(SYS_BUS_DEVICE(uart), 0); +} + +static MemoryRegion *make_musca_a_devs(MuscaMachineState *mms, void *opaque, + const char *name, hwaddr size) +{ + /* + * Create the container MemoryRegion for all the devices that live + * behind the Musca-A PPC's single port. These devices don't have a PPC + * port each, but we use the PPCPortInfo struct as a convenient way + * to describe them. Note that addresses here are relative to the base + * address of the PPC port region: 0x40100000, and devices appear both + * at the 0x4... NS region and the 0x5... S region. + */ + int i; + MemoryRegion *container = &mms->container; + + const PPCPortInfo devices[] = { + { "uart0", make_uart, &mms->uart[0], 0x1000, 0x1000 }, + { "uart1", make_uart, &mms->uart[1], 0x2000, 0x1000 }, + { "spi", make_unimp_dev, &mms->spi, 0x3000, 0x1000 }, + { "i2c0", make_unimp_dev, &mms->i2c[0], 0x4000, 0x1000 }, + { "i2c1", make_unimp_dev, &mms->i2c[1], 0x5000, 0x1000 }, + { "i2s", make_unimp_dev, &mms->i2s, 0x6000, 0x1000 }, + { "pwm0", make_unimp_dev, &mms->pwm[0], 0x7000, 0x1000 }, + { "rtc", make_rtc, &mms->rtc, 0x8000, 0x1000 }, + { "qspi", make_unimp_dev, &mms->qspi, 0xa000, 0x1000 }, + { "timer", make_unimp_dev, &mms->timer, 0xb000, 0x1000 }, + { "scc", make_unimp_dev, &mms->scc, 0xc000, 0x1000 }, + { "pwm1", make_unimp_dev, &mms->pwm[1], 0xe000, 0x1000 }, + { "pwm2", make_unimp_dev, &mms->pwm[2], 0xf000, 0x1000 }, + { "gpio", make_unimp_dev, &mms->gpio, 0x10000, 0x1000 }, + { "mpc0", make_mpc, &mms->mpc[0], 0x12000, 0x1000 }, + { "mpc1", make_mpc, &mms->mpc[1], 0x13000, 0x1000 }, + }; + + memory_region_init(container, OBJECT(mms), "musca-device-container", size); + + for (i = 0; i < ARRAY_SIZE(devices); i++) { + const PPCPortInfo *pinfo = &devices[i]; + MemoryRegion *mr; + + mr = pinfo->devfn(mms, pinfo->opaque, pinfo->name, pinfo->size); + memory_region_add_subregion(container, pinfo->addr, mr); + } + + return &mms->container; +} + +static void musca_init(MachineState *machine) +{ + MuscaMachineState *mms = MUSCA_MACHINE(machine); + MuscaMachineClass *mmc = MUSCA_MACHINE_GET_CLASS(mms); + MachineClass *mc = MACHINE_GET_CLASS(machine); + MemoryRegion *system_memory = get_system_memory(); + DeviceState *ssedev; + DeviceState *dev_splitter; + const PPCInfo *ppcs; + int num_ppcs; + int i; + + assert(mmc->num_irqs <= MUSCA_NUMIRQ_MAX); + assert(mmc->num_mpcs <= MUSCA_MPC_MAX); + + if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { + error_report("This board can only be used with CPU %s", + mc->default_cpu_type); + exit(1); + } + + mms->sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(mms->sysclk, SYSCLK_FRQ); + mms->s32kclk = clock_new(OBJECT(machine), "S32KCLK"); + clock_set_hz(mms->s32kclk, S32KCLK_FRQ); + + object_initialize_child(OBJECT(machine), "sse-200", &mms->sse, + TYPE_SSE200); + ssedev = DEVICE(&mms->sse); + object_property_set_link(OBJECT(&mms->sse), "memory", + OBJECT(system_memory), &error_fatal); + qdev_prop_set_uint32(ssedev, "EXP_NUMIRQ", mmc->num_irqs); + qdev_prop_set_uint32(ssedev, "init-svtor", mmc->init_svtor); + qdev_prop_set_uint32(ssedev, "SRAM_ADDR_WIDTH", mmc->sram_addr_width); + qdev_connect_clock_in(ssedev, "MAINCLK", mms->sysclk); + qdev_connect_clock_in(ssedev, "S32KCLK", mms->s32kclk); + /* + * Musca-A takes the default SSE-200 FPU/DSP settings (ie no for + * CPU0 and yes for CPU1); Musca-B1 explicitly enables them for CPU0. + */ + if (mmc->type == MUSCA_B1) { + qdev_prop_set_bit(ssedev, "CPU0_FPU", true); + qdev_prop_set_bit(ssedev, "CPU0_DSP", true); + } + sysbus_realize(SYS_BUS_DEVICE(&mms->sse), &error_fatal); + + /* + * We need to create splitters to feed the IRQ inputs + * for each CPU in the SSE-200 from each device in the board. + */ + for (i = 0; i < mmc->num_irqs; i++) { + char *name = g_strdup_printf("musca-irq-splitter%d", i); + SplitIRQ *splitter = &mms->cpu_irq_splitter[i]; + + object_initialize_child_with_props(OBJECT(machine), name, splitter, + sizeof(*splitter), TYPE_SPLIT_IRQ, + &error_fatal, NULL); + g_free(name); + + object_property_set_int(OBJECT(splitter), "num-lines", 2, + &error_fatal); + qdev_realize(DEVICE(splitter), NULL, &error_fatal); + qdev_connect_gpio_out(DEVICE(splitter), 0, + qdev_get_gpio_in_named(ssedev, "EXP_IRQ", i)); + qdev_connect_gpio_out(DEVICE(splitter), 1, + qdev_get_gpio_in_named(ssedev, + "EXP_CPU1_IRQ", i)); + } + + /* + * The sec_resp_cfg output from the SSE-200 must be split into multiple + * lines, one for each of the PPCs we create here. + */ + object_initialize_child_with_props(OBJECT(machine), "sec-resp-splitter", + &mms->sec_resp_splitter, + sizeof(mms->sec_resp_splitter), + TYPE_SPLIT_IRQ, &error_fatal, NULL); + + object_property_set_int(OBJECT(&mms->sec_resp_splitter), "num-lines", + ARRAY_SIZE(mms->ppc), &error_fatal); + qdev_realize(DEVICE(&mms->sec_resp_splitter), NULL, &error_fatal); + dev_splitter = DEVICE(&mms->sec_resp_splitter); + qdev_connect_gpio_out_named(ssedev, "sec_resp_cfg", 0, + qdev_get_gpio_in(dev_splitter, 0)); + + /* + * Most of the devices in the board are behind Peripheral Protection + * Controllers. The required order for initializing things is: + * + initialize the PPC + * + initialize, configure and realize downstream devices + * + connect downstream device MemoryRegions to the PPC + * + realize the PPC + * + map the PPC's MemoryRegions to the places in the address map + * where the downstream devices should appear + * + wire up the PPC's control lines to the SSE object + * + * The PPC mapping differs for the -A and -B1 variants; the -A version + * is much simpler, using only a single port of a single PPC and putting + * all the devices behind that. + */ + const PPCInfo a_ppcs[] = { { + .name = "ahb_ppcexp0", + .ports = { + { "musca-devices", make_musca_a_devs, 0, 0x40100000, 0x100000 }, + }, + }, + }; + + /* + * Devices listed with an 0x4.. address appear in both the NS 0x4.. region + * and the 0x5.. S region. Devices listed with an 0x5.. address appear + * only in the S region. + */ + const PPCInfo b1_ppcs[] = { { + .name = "apb_ppcexp0", + .ports = { + { "eflash0", make_unimp_dev, &mms->eflash[0], + 0x52400000, 0x1000 }, + { "eflash1", make_unimp_dev, &mms->eflash[1], + 0x52500000, 0x1000 }, + { "qspi", make_unimp_dev, &mms->qspi, 0x42800000, 0x100000 }, + { "mpc0", make_mpc, &mms->mpc[0], 0x52000000, 0x1000 }, + { "mpc1", make_mpc, &mms->mpc[1], 0x52100000, 0x1000 }, + { "mpc2", make_mpc, &mms->mpc[2], 0x52200000, 0x1000 }, + { "mpc3", make_mpc, &mms->mpc[3], 0x52300000, 0x1000 }, + { "mhu0", make_unimp_dev, &mms->mhu[0], 0x42600000, 0x100000 }, + { "mhu1", make_unimp_dev, &mms->mhu[1], 0x42700000, 0x100000 }, + { }, /* port 9: unused */ + { }, /* port 10: unused */ + { }, /* port 11: unused */ + { }, /* port 12: unused */ + { }, /* port 13: unused */ + { "mpc4", make_mpc, &mms->mpc[4], 0x52e00000, 0x1000 }, + }, + }, { + .name = "apb_ppcexp1", + .ports = { + { "pwm0", make_unimp_dev, &mms->pwm[0], 0x40101000, 0x1000 }, + { "pwm1", make_unimp_dev, &mms->pwm[1], 0x40102000, 0x1000 }, + { "pwm2", make_unimp_dev, &mms->pwm[2], 0x40103000, 0x1000 }, + { "i2s", make_unimp_dev, &mms->i2s, 0x40104000, 0x1000 }, + { "uart0", make_uart, &mms->uart[0], 0x40105000, 0x1000 }, + { "uart1", make_uart, &mms->uart[1], 0x40106000, 0x1000 }, + { "i2c0", make_unimp_dev, &mms->i2c[0], 0x40108000, 0x1000 }, + { "i2c1", make_unimp_dev, &mms->i2c[1], 0x40109000, 0x1000 }, + { "spi", make_unimp_dev, &mms->spi, 0x4010a000, 0x1000 }, + { "scc", make_unimp_dev, &mms->scc, 0x5010b000, 0x1000 }, + { "timer", make_unimp_dev, &mms->timer, 0x4010c000, 0x1000 }, + { "rtc", make_rtc, &mms->rtc, 0x4010d000, 0x1000 }, + { "pvt", make_unimp_dev, &mms->pvt, 0x4010e000, 0x1000 }, + { "sdio", make_unimp_dev, &mms->sdio, 0x4010f000, 0x1000 }, + }, + }, { + .name = "ahb_ppcexp0", + .ports = { + { }, /* port 0: unused */ + { "gpio", make_unimp_dev, &mms->gpio, 0x41000000, 0x1000 }, + }, + }, + }; + + switch (mmc->type) { + case MUSCA_A: + ppcs = a_ppcs; + num_ppcs = ARRAY_SIZE(a_ppcs); + break; + case MUSCA_B1: + ppcs = b1_ppcs; + num_ppcs = ARRAY_SIZE(b1_ppcs); + break; + default: + g_assert_not_reached(); + } + assert(num_ppcs <= MUSCA_PPC_MAX); + + for (i = 0; i < num_ppcs; i++) { + const PPCInfo *ppcinfo = &ppcs[i]; + TZPPC *ppc = &mms->ppc[i]; + DeviceState *ppcdev; + int port; + char *gpioname; + + object_initialize_child(OBJECT(machine), ppcinfo->name, ppc, + TYPE_TZ_PPC); + ppcdev = DEVICE(ppc); + + for (port = 0; port < TZ_NUM_PORTS; port++) { + const PPCPortInfo *pinfo = &ppcinfo->ports[port]; + MemoryRegion *mr; + char *portname; + + if (!pinfo->devfn) { + continue; + } + + mr = pinfo->devfn(mms, pinfo->opaque, pinfo->name, pinfo->size); + portname = g_strdup_printf("port[%d]", port); + object_property_set_link(OBJECT(ppc), portname, OBJECT(mr), + &error_fatal); + g_free(portname); + } + + sysbus_realize(SYS_BUS_DEVICE(ppc), &error_fatal); + + for (port = 0; port < TZ_NUM_PORTS; port++) { + const PPCPortInfo *pinfo = &ppcinfo->ports[port]; + + if (!pinfo->devfn) { + continue; + } + sysbus_mmio_map(SYS_BUS_DEVICE(ppc), port, pinfo->addr); + + gpioname = g_strdup_printf("%s_nonsec", ppcinfo->name); + qdev_connect_gpio_out_named(ssedev, gpioname, port, + qdev_get_gpio_in_named(ppcdev, + "cfg_nonsec", + port)); + g_free(gpioname); + gpioname = g_strdup_printf("%s_ap", ppcinfo->name); + qdev_connect_gpio_out_named(ssedev, gpioname, port, + qdev_get_gpio_in_named(ppcdev, + "cfg_ap", port)); + g_free(gpioname); + } + + gpioname = g_strdup_printf("%s_irq_enable", ppcinfo->name); + qdev_connect_gpio_out_named(ssedev, gpioname, 0, + qdev_get_gpio_in_named(ppcdev, + "irq_enable", 0)); + g_free(gpioname); + gpioname = g_strdup_printf("%s_irq_clear", ppcinfo->name); + qdev_connect_gpio_out_named(ssedev, gpioname, 0, + qdev_get_gpio_in_named(ppcdev, + "irq_clear", 0)); + g_free(gpioname); + gpioname = g_strdup_printf("%s_irq_status", ppcinfo->name); + qdev_connect_gpio_out_named(ppcdev, "irq", 0, + qdev_get_gpio_in_named(ssedev, + gpioname, 0)); + g_free(gpioname); + + qdev_connect_gpio_out(dev_splitter, i, + qdev_get_gpio_in_named(ppcdev, + "cfg_sec_resp", 0)); + } + + armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, 0x2000000); +} + +static void musca_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->default_cpus = 2; + mc->min_cpus = mc->default_cpus; + mc->max_cpus = mc->default_cpus; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mc->init = musca_init; +} + +static void musca_a_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MuscaMachineClass *mmc = MUSCA_MACHINE_CLASS(oc); + + mc->desc = "ARM Musca-A board (dual Cortex-M33)"; + mmc->type = MUSCA_A; + mmc->init_svtor = 0x10200000; + mmc->sram_addr_width = 15; + mmc->num_irqs = 64; + mmc->mpc_info = a_mpc_info; + mmc->num_mpcs = ARRAY_SIZE(a_mpc_info); +} + +static void musca_b1_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + MuscaMachineClass *mmc = MUSCA_MACHINE_CLASS(oc); + + mc->desc = "ARM Musca-B1 board (dual Cortex-M33)"; + mmc->type = MUSCA_B1; + /* + * This matches the DAPlink firmware which boots from QSPI. There + * is also a firmware blob which boots from the eFlash, which + * uses init_svtor = 0x1A000000. QEMU doesn't currently support that, + * though we could in theory expose a machine property on the command + * line to allow the user to request eFlash boot. + */ + mmc->init_svtor = 0x10000000; + mmc->sram_addr_width = 17; + mmc->num_irqs = 96; + mmc->mpc_info = b1_mpc_info; + mmc->num_mpcs = ARRAY_SIZE(b1_mpc_info); +} + +static const TypeInfo musca_info = { + .name = TYPE_MUSCA_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(MuscaMachineState), + .class_size = sizeof(MuscaMachineClass), + .class_init = musca_class_init, +}; + +static const TypeInfo musca_a_info = { + .name = TYPE_MUSCA_A_MACHINE, + .parent = TYPE_MUSCA_MACHINE, + .class_init = musca_a_class_init, +}; + +static const TypeInfo musca_b1_info = { + .name = TYPE_MUSCA_B1_MACHINE, + .parent = TYPE_MUSCA_MACHINE, + .class_init = musca_b1_class_init, +}; + +static void musca_machine_init(void) +{ + type_register_static(&musca_info); + type_register_static(&musca_a_info); + type_register_static(&musca_b1_info); +} + +type_init(musca_machine_init); diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c new file mode 100644 index 000000000..2d612cc0c --- /dev/null +++ b/hw/arm/musicpal.c @@ -0,0 +1,1756 @@ +/* + * Marvell MV88W8618 / Freecom MusicPal emulation. + * + * Copyright (c) 2008 Jan Kiszka + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/arm/boot.h" +#include "net/net.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/char/serial.h" +#include "qemu/timer.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "hw/block/flash.h" +#include "ui/console.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "hw/or-irq.h" +#include "hw/audio/wm8750.h" +#include "sysemu/block-backend.h" +#include "sysemu/runstate.h" +#include "sysemu/dma.h" +#include "ui/pixel_ops.h" +#include "qemu/cutils.h" +#include "qom/object.h" + +#define MP_MISC_BASE 0x80002000 +#define MP_MISC_SIZE 0x00001000 + +#define MP_ETH_BASE 0x80008000 +#define MP_ETH_SIZE 0x00001000 + +#define MP_WLAN_BASE 0x8000C000 +#define MP_WLAN_SIZE 0x00000800 + +#define MP_UART1_BASE 0x8000C840 +#define MP_UART2_BASE 0x8000C940 + +#define MP_GPIO_BASE 0x8000D000 +#define MP_GPIO_SIZE 0x00001000 + +#define MP_FLASHCFG_BASE 0x90006000 +#define MP_FLASHCFG_SIZE 0x00001000 + +#define MP_AUDIO_BASE 0x90007000 + +#define MP_PIC_BASE 0x90008000 +#define MP_PIC_SIZE 0x00001000 + +#define MP_PIT_BASE 0x90009000 +#define MP_PIT_SIZE 0x00001000 + +#define MP_LCD_BASE 0x9000c000 +#define MP_LCD_SIZE 0x00001000 + +#define MP_SRAM_BASE 0xC0000000 +#define MP_SRAM_SIZE 0x00020000 + +#define MP_RAM_DEFAULT_SIZE 32*1024*1024 +#define MP_FLASH_SIZE_MAX 32*1024*1024 + +#define MP_TIMER1_IRQ 4 +#define MP_TIMER2_IRQ 5 +#define MP_TIMER3_IRQ 6 +#define MP_TIMER4_IRQ 7 +#define MP_EHCI_IRQ 8 +#define MP_ETH_IRQ 9 +#define MP_UART_SHARED_IRQ 11 +#define MP_GPIO_IRQ 12 +#define MP_RTC_IRQ 28 +#define MP_AUDIO_IRQ 30 + +/* Wolfson 8750 I2C address */ +#define MP_WM_ADDR 0x1A + +/* Ethernet register offsets */ +#define MP_ETH_SMIR 0x010 +#define MP_ETH_PCXR 0x408 +#define MP_ETH_SDCMR 0x448 +#define MP_ETH_ICR 0x450 +#define MP_ETH_IMR 0x458 +#define MP_ETH_FRDP0 0x480 +#define MP_ETH_FRDP1 0x484 +#define MP_ETH_FRDP2 0x488 +#define MP_ETH_FRDP3 0x48C +#define MP_ETH_CRDP0 0x4A0 +#define MP_ETH_CRDP1 0x4A4 +#define MP_ETH_CRDP2 0x4A8 +#define MP_ETH_CRDP3 0x4AC +#define MP_ETH_CTDP0 0x4E0 +#define MP_ETH_CTDP1 0x4E4 + +/* MII PHY access */ +#define MP_ETH_SMIR_DATA 0x0000FFFF +#define MP_ETH_SMIR_ADDR 0x03FF0000 +#define MP_ETH_SMIR_OPCODE (1 << 26) /* Read value */ +#define MP_ETH_SMIR_RDVALID (1 << 27) + +/* PHY registers */ +#define MP_ETH_PHY1_BMSR 0x00210000 +#define MP_ETH_PHY1_PHYSID1 0x00410000 +#define MP_ETH_PHY1_PHYSID2 0x00610000 + +#define MP_PHY_BMSR_LINK 0x0004 +#define MP_PHY_BMSR_AUTONEG 0x0008 + +#define MP_PHY_88E3015 0x01410E20 + +/* TX descriptor status */ +#define MP_ETH_TX_OWN (1U << 31) + +/* RX descriptor status */ +#define MP_ETH_RX_OWN (1U << 31) + +/* Interrupt cause/mask bits */ +#define MP_ETH_IRQ_RX_BIT 0 +#define MP_ETH_IRQ_RX (1 << MP_ETH_IRQ_RX_BIT) +#define MP_ETH_IRQ_TXHI_BIT 2 +#define MP_ETH_IRQ_TXLO_BIT 3 + +/* Port config bits */ +#define MP_ETH_PCXR_2BSM_BIT 28 /* 2-byte incoming suffix */ + +/* SDMA command bits */ +#define MP_ETH_CMD_TXHI (1 << 23) +#define MP_ETH_CMD_TXLO (1 << 22) + +typedef struct mv88w8618_tx_desc { + uint32_t cmdstat; + uint16_t res; + uint16_t bytes; + uint32_t buffer; + uint32_t next; +} mv88w8618_tx_desc; + +typedef struct mv88w8618_rx_desc { + uint32_t cmdstat; + uint16_t bytes; + uint16_t buffer_size; + uint32_t buffer; + uint32_t next; +} mv88w8618_rx_desc; + +#define TYPE_MV88W8618_ETH "mv88w8618_eth" +OBJECT_DECLARE_SIMPLE_TYPE(mv88w8618_eth_state, MV88W8618_ETH) + +struct mv88w8618_eth_state { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + qemu_irq irq; + MemoryRegion *dma_mr; + AddressSpace dma_as; + uint32_t smir; + uint32_t icr; + uint32_t imr; + int mmio_index; + uint32_t vlan_header; + uint32_t tx_queue[2]; + uint32_t rx_queue[4]; + uint32_t frx_queue[4]; + uint32_t cur_rx[4]; + NICState *nic; + NICConf conf; +}; + +static void eth_rx_desc_put(AddressSpace *dma_as, uint32_t addr, + mv88w8618_rx_desc *desc) +{ + cpu_to_le32s(&desc->cmdstat); + cpu_to_le16s(&desc->bytes); + cpu_to_le16s(&desc->buffer_size); + cpu_to_le32s(&desc->buffer); + cpu_to_le32s(&desc->next); + dma_memory_write(dma_as, addr, desc, sizeof(*desc)); +} + +static void eth_rx_desc_get(AddressSpace *dma_as, uint32_t addr, + mv88w8618_rx_desc *desc) +{ + dma_memory_read(dma_as, addr, desc, sizeof(*desc)); + le32_to_cpus(&desc->cmdstat); + le16_to_cpus(&desc->bytes); + le16_to_cpus(&desc->buffer_size); + le32_to_cpus(&desc->buffer); + le32_to_cpus(&desc->next); +} + +static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + mv88w8618_eth_state *s = qemu_get_nic_opaque(nc); + uint32_t desc_addr; + mv88w8618_rx_desc desc; + int i; + + for (i = 0; i < 4; i++) { + desc_addr = s->cur_rx[i]; + if (!desc_addr) { + continue; + } + do { + eth_rx_desc_get(&s->dma_as, desc_addr, &desc); + if ((desc.cmdstat & MP_ETH_RX_OWN) && desc.buffer_size >= size) { + dma_memory_write(&s->dma_as, desc.buffer + s->vlan_header, + buf, size); + desc.bytes = size + s->vlan_header; + desc.cmdstat &= ~MP_ETH_RX_OWN; + s->cur_rx[i] = desc.next; + + s->icr |= MP_ETH_IRQ_RX; + if (s->icr & s->imr) { + qemu_irq_raise(s->irq); + } + eth_rx_desc_put(&s->dma_as, desc_addr, &desc); + return size; + } + desc_addr = desc.next; + } while (desc_addr != s->rx_queue[i]); + } + return size; +} + +static void eth_tx_desc_put(AddressSpace *dma_as, uint32_t addr, + mv88w8618_tx_desc *desc) +{ + cpu_to_le32s(&desc->cmdstat); + cpu_to_le16s(&desc->res); + cpu_to_le16s(&desc->bytes); + cpu_to_le32s(&desc->buffer); + cpu_to_le32s(&desc->next); + dma_memory_write(dma_as, addr, desc, sizeof(*desc)); +} + +static void eth_tx_desc_get(AddressSpace *dma_as, uint32_t addr, + mv88w8618_tx_desc *desc) +{ + dma_memory_read(dma_as, addr, desc, sizeof(*desc)); + le32_to_cpus(&desc->cmdstat); + le16_to_cpus(&desc->res); + le16_to_cpus(&desc->bytes); + le32_to_cpus(&desc->buffer); + le32_to_cpus(&desc->next); +} + +static void eth_send(mv88w8618_eth_state *s, int queue_index) +{ + uint32_t desc_addr = s->tx_queue[queue_index]; + mv88w8618_tx_desc desc; + uint32_t next_desc; + uint8_t buf[2048]; + int len; + + do { + eth_tx_desc_get(&s->dma_as, desc_addr, &desc); + next_desc = desc.next; + if (desc.cmdstat & MP_ETH_TX_OWN) { + len = desc.bytes; + if (len < 2048) { + dma_memory_read(&s->dma_as, desc.buffer, buf, len); + qemu_send_packet(qemu_get_queue(s->nic), buf, len); + } + desc.cmdstat &= ~MP_ETH_TX_OWN; + s->icr |= 1 << (MP_ETH_IRQ_TXLO_BIT - queue_index); + eth_tx_desc_put(&s->dma_as, desc_addr, &desc); + } + desc_addr = next_desc; + } while (desc_addr != s->tx_queue[queue_index]); +} + +static uint64_t mv88w8618_eth_read(void *opaque, hwaddr offset, + unsigned size) +{ + mv88w8618_eth_state *s = opaque; + + switch (offset) { + case MP_ETH_SMIR: + if (s->smir & MP_ETH_SMIR_OPCODE) { + switch (s->smir & MP_ETH_SMIR_ADDR) { + case MP_ETH_PHY1_BMSR: + return MP_PHY_BMSR_LINK | MP_PHY_BMSR_AUTONEG | + MP_ETH_SMIR_RDVALID; + case MP_ETH_PHY1_PHYSID1: + return (MP_PHY_88E3015 >> 16) | MP_ETH_SMIR_RDVALID; + case MP_ETH_PHY1_PHYSID2: + return (MP_PHY_88E3015 & 0xFFFF) | MP_ETH_SMIR_RDVALID; + default: + return MP_ETH_SMIR_RDVALID; + } + } + return 0; + + case MP_ETH_ICR: + return s->icr; + + case MP_ETH_IMR: + return s->imr; + + case MP_ETH_FRDP0 ... MP_ETH_FRDP3: + return s->frx_queue[(offset - MP_ETH_FRDP0)/4]; + + case MP_ETH_CRDP0 ... MP_ETH_CRDP3: + return s->rx_queue[(offset - MP_ETH_CRDP0)/4]; + + case MP_ETH_CTDP0 ... MP_ETH_CTDP1: + return s->tx_queue[(offset - MP_ETH_CTDP0)/4]; + + default: + return 0; + } +} + +static void mv88w8618_eth_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + mv88w8618_eth_state *s = opaque; + + switch (offset) { + case MP_ETH_SMIR: + s->smir = value; + break; + + case MP_ETH_PCXR: + s->vlan_header = ((value >> MP_ETH_PCXR_2BSM_BIT) & 1) * 2; + break; + + case MP_ETH_SDCMR: + if (value & MP_ETH_CMD_TXHI) { + eth_send(s, 1); + } + if (value & MP_ETH_CMD_TXLO) { + eth_send(s, 0); + } + if (value & (MP_ETH_CMD_TXHI | MP_ETH_CMD_TXLO) && s->icr & s->imr) { + qemu_irq_raise(s->irq); + } + break; + + case MP_ETH_ICR: + s->icr &= value; + break; + + case MP_ETH_IMR: + s->imr = value; + if (s->icr & s->imr) { + qemu_irq_raise(s->irq); + } + break; + + case MP_ETH_FRDP0 ... MP_ETH_FRDP3: + s->frx_queue[(offset - MP_ETH_FRDP0)/4] = value; + break; + + case MP_ETH_CRDP0 ... MP_ETH_CRDP3: + s->rx_queue[(offset - MP_ETH_CRDP0)/4] = + s->cur_rx[(offset - MP_ETH_CRDP0)/4] = value; + break; + + case MP_ETH_CTDP0 ... MP_ETH_CTDP1: + s->tx_queue[(offset - MP_ETH_CTDP0)/4] = value; + break; + } +} + +static const MemoryRegionOps mv88w8618_eth_ops = { + .read = mv88w8618_eth_read, + .write = mv88w8618_eth_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void eth_cleanup(NetClientState *nc) +{ + mv88w8618_eth_state *s = qemu_get_nic_opaque(nc); + + s->nic = NULL; +} + +static NetClientInfo net_mv88w8618_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = eth_receive, + .cleanup = eth_cleanup, +}; + +static void mv88w8618_eth_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(sbd); + mv88w8618_eth_state *s = MV88W8618_ETH(dev); + + sysbus_init_irq(sbd, &s->irq); + memory_region_init_io(&s->iomem, obj, &mv88w8618_eth_ops, s, + "mv88w8618-eth", MP_ETH_SIZE); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void mv88w8618_eth_realize(DeviceState *dev, Error **errp) +{ + mv88w8618_eth_state *s = MV88W8618_ETH(dev); + + if (!s->dma_mr) { + error_setg(errp, TYPE_MV88W8618_ETH " 'dma-memory' link not set"); + return; + } + + address_space_init(&s->dma_as, s->dma_mr, "emac-dma"); + s->nic = qemu_new_nic(&net_mv88w8618_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); +} + +static const VMStateDescription mv88w8618_eth_vmsd = { + .name = "mv88w8618_eth", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(smir, mv88w8618_eth_state), + VMSTATE_UINT32(icr, mv88w8618_eth_state), + VMSTATE_UINT32(imr, mv88w8618_eth_state), + VMSTATE_UINT32(vlan_header, mv88w8618_eth_state), + VMSTATE_UINT32_ARRAY(tx_queue, mv88w8618_eth_state, 2), + VMSTATE_UINT32_ARRAY(rx_queue, mv88w8618_eth_state, 4), + VMSTATE_UINT32_ARRAY(frx_queue, mv88w8618_eth_state, 4), + VMSTATE_UINT32_ARRAY(cur_rx, mv88w8618_eth_state, 4), + VMSTATE_END_OF_LIST() + } +}; + +static Property mv88w8618_eth_properties[] = { + DEFINE_NIC_PROPERTIES(mv88w8618_eth_state, conf), + DEFINE_PROP_LINK("dma-memory", mv88w8618_eth_state, dma_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mv88w8618_eth_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &mv88w8618_eth_vmsd; + device_class_set_props(dc, mv88w8618_eth_properties); + dc->realize = mv88w8618_eth_realize; +} + +static const TypeInfo mv88w8618_eth_info = { + .name = TYPE_MV88W8618_ETH, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mv88w8618_eth_state), + .instance_init = mv88w8618_eth_init, + .class_init = mv88w8618_eth_class_init, +}; + +/* LCD register offsets */ +#define MP_LCD_IRQCTRL 0x180 +#define MP_LCD_IRQSTAT 0x184 +#define MP_LCD_SPICTRL 0x1ac +#define MP_LCD_INST 0x1bc +#define MP_LCD_DATA 0x1c0 + +/* Mode magics */ +#define MP_LCD_SPI_DATA 0x00100011 +#define MP_LCD_SPI_CMD 0x00104011 +#define MP_LCD_SPI_INVALID 0x00000000 + +/* Commmands */ +#define MP_LCD_INST_SETPAGE0 0xB0 +/* ... */ +#define MP_LCD_INST_SETPAGE7 0xB7 + +#define MP_LCD_TEXTCOLOR 0xe0e0ff /* RRGGBB */ + +#define TYPE_MUSICPAL_LCD "musicpal_lcd" +OBJECT_DECLARE_SIMPLE_TYPE(musicpal_lcd_state, MUSICPAL_LCD) + +struct musicpal_lcd_state { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + uint32_t brightness; + uint32_t mode; + uint32_t irqctrl; + uint32_t page; + uint32_t page_off; + QemuConsole *con; + uint8_t video_ram[128*64/8]; +}; + +static uint8_t scale_lcd_color(musicpal_lcd_state *s, uint8_t col) +{ + switch (s->brightness) { + case 7: + return col; + case 0: + return 0; + default: + return (col * s->brightness) / 7; + } +} + +static inline void set_lcd_pixel32(musicpal_lcd_state *s, + int x, int y, uint32_t col) +{ + int dx, dy; + DisplaySurface *surface = qemu_console_surface(s->con); + uint32_t *pixel = + &((uint32_t *) surface_data(surface))[(y * 128 * 3 + x) * 3]; + + for (dy = 0; dy < 3; dy++, pixel += 127 * 3) { + for (dx = 0; dx < 3; dx++, pixel++) { + *pixel = col; + } + } +} + +static void lcd_refresh(void *opaque) +{ + musicpal_lcd_state *s = opaque; + int x, y, col; + + col = rgb_to_pixel32(scale_lcd_color(s, (MP_LCD_TEXTCOLOR >> 16) & 0xff), + scale_lcd_color(s, (MP_LCD_TEXTCOLOR >> 8) & 0xff), + scale_lcd_color(s, MP_LCD_TEXTCOLOR & 0xff)); + for (x = 0; x < 128; x++) { + for (y = 0; y < 64; y++) { + if (s->video_ram[x + (y / 8) * 128] & (1 << (y % 8))) { + set_lcd_pixel32(s, x, y, col); + } else { + set_lcd_pixel32(s, x, y, 0); + } + } + } + + dpy_gfx_update(s->con, 0, 0, 128*3, 64*3); +} + +static void lcd_invalidate(void *opaque) +{ +} + +static void musicpal_lcd_gpio_brightness_in(void *opaque, int irq, int level) +{ + musicpal_lcd_state *s = opaque; + s->brightness &= ~(1 << irq); + s->brightness |= level << irq; +} + +static uint64_t musicpal_lcd_read(void *opaque, hwaddr offset, + unsigned size) +{ + musicpal_lcd_state *s = opaque; + + switch (offset) { + case MP_LCD_IRQCTRL: + return s->irqctrl; + + default: + return 0; + } +} + +static void musicpal_lcd_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + musicpal_lcd_state *s = opaque; + + switch (offset) { + case MP_LCD_IRQCTRL: + s->irqctrl = value; + break; + + case MP_LCD_SPICTRL: + if (value == MP_LCD_SPI_DATA || value == MP_LCD_SPI_CMD) { + s->mode = value; + } else { + s->mode = MP_LCD_SPI_INVALID; + } + break; + + case MP_LCD_INST: + if (value >= MP_LCD_INST_SETPAGE0 && value <= MP_LCD_INST_SETPAGE7) { + s->page = value - MP_LCD_INST_SETPAGE0; + s->page_off = 0; + } + break; + + case MP_LCD_DATA: + if (s->mode == MP_LCD_SPI_CMD) { + if (value >= MP_LCD_INST_SETPAGE0 && + value <= MP_LCD_INST_SETPAGE7) { + s->page = value - MP_LCD_INST_SETPAGE0; + s->page_off = 0; + } + } else if (s->mode == MP_LCD_SPI_DATA) { + s->video_ram[s->page*128 + s->page_off] = value; + s->page_off = (s->page_off + 1) & 127; + } + break; + } +} + +static const MemoryRegionOps musicpal_lcd_ops = { + .read = musicpal_lcd_read, + .write = musicpal_lcd_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const GraphicHwOps musicpal_gfx_ops = { + .invalidate = lcd_invalidate, + .gfx_update = lcd_refresh, +}; + +static void musicpal_lcd_realize(DeviceState *dev, Error **errp) +{ + musicpal_lcd_state *s = MUSICPAL_LCD(dev); + s->con = graphic_console_init(dev, 0, &musicpal_gfx_ops, s); + qemu_console_resize(s->con, 128 * 3, 64 * 3); +} + +static void musicpal_lcd_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(sbd); + musicpal_lcd_state *s = MUSICPAL_LCD(dev); + + s->brightness = 7; + + memory_region_init_io(&s->iomem, obj, &musicpal_lcd_ops, s, + "musicpal-lcd", MP_LCD_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + + qdev_init_gpio_in(dev, musicpal_lcd_gpio_brightness_in, 3); +} + +static const VMStateDescription musicpal_lcd_vmsd = { + .name = "musicpal_lcd", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(brightness, musicpal_lcd_state), + VMSTATE_UINT32(mode, musicpal_lcd_state), + VMSTATE_UINT32(irqctrl, musicpal_lcd_state), + VMSTATE_UINT32(page, musicpal_lcd_state), + VMSTATE_UINT32(page_off, musicpal_lcd_state), + VMSTATE_BUFFER(video_ram, musicpal_lcd_state), + VMSTATE_END_OF_LIST() + } +}; + +static void musicpal_lcd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &musicpal_lcd_vmsd; + dc->realize = musicpal_lcd_realize; +} + +static const TypeInfo musicpal_lcd_info = { + .name = TYPE_MUSICPAL_LCD, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(musicpal_lcd_state), + .instance_init = musicpal_lcd_init, + .class_init = musicpal_lcd_class_init, +}; + +/* PIC register offsets */ +#define MP_PIC_STATUS 0x00 +#define MP_PIC_ENABLE_SET 0x08 +#define MP_PIC_ENABLE_CLR 0x0C + +#define TYPE_MV88W8618_PIC "mv88w8618_pic" +OBJECT_DECLARE_SIMPLE_TYPE(mv88w8618_pic_state, MV88W8618_PIC) + +struct mv88w8618_pic_state { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + uint32_t level; + uint32_t enabled; + qemu_irq parent_irq; +}; + +static void mv88w8618_pic_update(mv88w8618_pic_state *s) +{ + qemu_set_irq(s->parent_irq, (s->level & s->enabled)); +} + +static void mv88w8618_pic_set_irq(void *opaque, int irq, int level) +{ + mv88w8618_pic_state *s = opaque; + + if (level) { + s->level |= 1 << irq; + } else { + s->level &= ~(1 << irq); + } + mv88w8618_pic_update(s); +} + +static uint64_t mv88w8618_pic_read(void *opaque, hwaddr offset, + unsigned size) +{ + mv88w8618_pic_state *s = opaque; + + switch (offset) { + case MP_PIC_STATUS: + return s->level & s->enabled; + + default: + return 0; + } +} + +static void mv88w8618_pic_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + mv88w8618_pic_state *s = opaque; + + switch (offset) { + case MP_PIC_ENABLE_SET: + s->enabled |= value; + break; + + case MP_PIC_ENABLE_CLR: + s->enabled &= ~value; + s->level &= ~value; + break; + } + mv88w8618_pic_update(s); +} + +static void mv88w8618_pic_reset(DeviceState *d) +{ + mv88w8618_pic_state *s = MV88W8618_PIC(d); + + s->level = 0; + s->enabled = 0; +} + +static const MemoryRegionOps mv88w8618_pic_ops = { + .read = mv88w8618_pic_read, + .write = mv88w8618_pic_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mv88w8618_pic_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + mv88w8618_pic_state *s = MV88W8618_PIC(dev); + + qdev_init_gpio_in(DEVICE(dev), mv88w8618_pic_set_irq, 32); + sysbus_init_irq(dev, &s->parent_irq); + memory_region_init_io(&s->iomem, obj, &mv88w8618_pic_ops, s, + "musicpal-pic", MP_PIC_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static const VMStateDescription mv88w8618_pic_vmsd = { + .name = "mv88w8618_pic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(level, mv88w8618_pic_state), + VMSTATE_UINT32(enabled, mv88w8618_pic_state), + VMSTATE_END_OF_LIST() + } +}; + +static void mv88w8618_pic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = mv88w8618_pic_reset; + dc->vmsd = &mv88w8618_pic_vmsd; +} + +static const TypeInfo mv88w8618_pic_info = { + .name = TYPE_MV88W8618_PIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mv88w8618_pic_state), + .instance_init = mv88w8618_pic_init, + .class_init = mv88w8618_pic_class_init, +}; + +/* PIT register offsets */ +#define MP_PIT_TIMER1_LENGTH 0x00 +/* ... */ +#define MP_PIT_TIMER4_LENGTH 0x0C +#define MP_PIT_CONTROL 0x10 +#define MP_PIT_TIMER1_VALUE 0x14 +/* ... */ +#define MP_PIT_TIMER4_VALUE 0x20 +#define MP_BOARD_RESET 0x34 + +/* Magic board reset value (probably some watchdog behind it) */ +#define MP_BOARD_RESET_MAGIC 0x10000 + +typedef struct mv88w8618_timer_state { + ptimer_state *ptimer; + uint32_t limit; + int freq; + qemu_irq irq; +} mv88w8618_timer_state; + +#define TYPE_MV88W8618_PIT "mv88w8618_pit" +OBJECT_DECLARE_SIMPLE_TYPE(mv88w8618_pit_state, MV88W8618_PIT) + +struct mv88w8618_pit_state { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + mv88w8618_timer_state timer[4]; +}; + +static void mv88w8618_timer_tick(void *opaque) +{ + mv88w8618_timer_state *s = opaque; + + qemu_irq_raise(s->irq); +} + +static void mv88w8618_timer_init(SysBusDevice *dev, mv88w8618_timer_state *s, + uint32_t freq) +{ + sysbus_init_irq(dev, &s->irq); + s->freq = freq; + + s->ptimer = ptimer_init(mv88w8618_timer_tick, s, PTIMER_POLICY_DEFAULT); +} + +static uint64_t mv88w8618_pit_read(void *opaque, hwaddr offset, + unsigned size) +{ + mv88w8618_pit_state *s = opaque; + mv88w8618_timer_state *t; + + switch (offset) { + case MP_PIT_TIMER1_VALUE ... MP_PIT_TIMER4_VALUE: + t = &s->timer[(offset-MP_PIT_TIMER1_VALUE) >> 2]; + return ptimer_get_count(t->ptimer); + + default: + return 0; + } +} + +static void mv88w8618_pit_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + mv88w8618_pit_state *s = opaque; + mv88w8618_timer_state *t; + int i; + + switch (offset) { + case MP_PIT_TIMER1_LENGTH ... MP_PIT_TIMER4_LENGTH: + t = &s->timer[offset >> 2]; + t->limit = value; + ptimer_transaction_begin(t->ptimer); + if (t->limit > 0) { + ptimer_set_limit(t->ptimer, t->limit, 1); + } else { + ptimer_stop(t->ptimer); + } + ptimer_transaction_commit(t->ptimer); + break; + + case MP_PIT_CONTROL: + for (i = 0; i < 4; i++) { + t = &s->timer[i]; + ptimer_transaction_begin(t->ptimer); + if (value & 0xf && t->limit > 0) { + ptimer_set_limit(t->ptimer, t->limit, 0); + ptimer_set_freq(t->ptimer, t->freq); + ptimer_run(t->ptimer, 0); + } else { + ptimer_stop(t->ptimer); + } + ptimer_transaction_commit(t->ptimer); + value >>= 4; + } + break; + + case MP_BOARD_RESET: + if (value == MP_BOARD_RESET_MAGIC) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + } +} + +static void mv88w8618_pit_reset(DeviceState *d) +{ + mv88w8618_pit_state *s = MV88W8618_PIT(d); + int i; + + for (i = 0; i < 4; i++) { + mv88w8618_timer_state *t = &s->timer[i]; + ptimer_transaction_begin(t->ptimer); + ptimer_stop(t->ptimer); + ptimer_transaction_commit(t->ptimer); + t->limit = 0; + } +} + +static const MemoryRegionOps mv88w8618_pit_ops = { + .read = mv88w8618_pit_read, + .write = mv88w8618_pit_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mv88w8618_pit_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + mv88w8618_pit_state *s = MV88W8618_PIT(dev); + int i; + + /* Letting them all run at 1 MHz is likely just a pragmatic + * simplification. */ + for (i = 0; i < 4; i++) { + mv88w8618_timer_init(dev, &s->timer[i], 1000000); + } + + memory_region_init_io(&s->iomem, obj, &mv88w8618_pit_ops, s, + "musicpal-pit", MP_PIT_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static void mv88w8618_pit_finalize(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + mv88w8618_pit_state *s = MV88W8618_PIT(dev); + int i; + + for (i = 0; i < 4; i++) { + ptimer_free(s->timer[i].ptimer); + } +} + +static const VMStateDescription mv88w8618_timer_vmsd = { + .name = "timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PTIMER(ptimer, mv88w8618_timer_state), + VMSTATE_UINT32(limit, mv88w8618_timer_state), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription mv88w8618_pit_vmsd = { + .name = "mv88w8618_pit", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(timer, mv88w8618_pit_state, 4, 1, + mv88w8618_timer_vmsd, mv88w8618_timer_state), + VMSTATE_END_OF_LIST() + } +}; + +static void mv88w8618_pit_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = mv88w8618_pit_reset; + dc->vmsd = &mv88w8618_pit_vmsd; +} + +static const TypeInfo mv88w8618_pit_info = { + .name = TYPE_MV88W8618_PIT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mv88w8618_pit_state), + .instance_init = mv88w8618_pit_init, + .instance_finalize = mv88w8618_pit_finalize, + .class_init = mv88w8618_pit_class_init, +}; + +/* Flash config register offsets */ +#define MP_FLASHCFG_CFGR0 0x04 + +#define TYPE_MV88W8618_FLASHCFG "mv88w8618_flashcfg" +OBJECT_DECLARE_SIMPLE_TYPE(mv88w8618_flashcfg_state, MV88W8618_FLASHCFG) + +struct mv88w8618_flashcfg_state { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + uint32_t cfgr0; +}; + +static uint64_t mv88w8618_flashcfg_read(void *opaque, + hwaddr offset, + unsigned size) +{ + mv88w8618_flashcfg_state *s = opaque; + + switch (offset) { + case MP_FLASHCFG_CFGR0: + return s->cfgr0; + + default: + return 0; + } +} + +static void mv88w8618_flashcfg_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + mv88w8618_flashcfg_state *s = opaque; + + switch (offset) { + case MP_FLASHCFG_CFGR0: + s->cfgr0 = value; + break; + } +} + +static const MemoryRegionOps mv88w8618_flashcfg_ops = { + .read = mv88w8618_flashcfg_read, + .write = mv88w8618_flashcfg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mv88w8618_flashcfg_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + mv88w8618_flashcfg_state *s = MV88W8618_FLASHCFG(dev); + + s->cfgr0 = 0xfffe4285; /* Default as set by U-Boot for 8 MB flash */ + memory_region_init_io(&s->iomem, obj, &mv88w8618_flashcfg_ops, s, + "musicpal-flashcfg", MP_FLASHCFG_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static const VMStateDescription mv88w8618_flashcfg_vmsd = { + .name = "mv88w8618_flashcfg", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cfgr0, mv88w8618_flashcfg_state), + VMSTATE_END_OF_LIST() + } +}; + +static void mv88w8618_flashcfg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &mv88w8618_flashcfg_vmsd; +} + +static const TypeInfo mv88w8618_flashcfg_info = { + .name = TYPE_MV88W8618_FLASHCFG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mv88w8618_flashcfg_state), + .instance_init = mv88w8618_flashcfg_init, + .class_init = mv88w8618_flashcfg_class_init, +}; + +/* Misc register offsets */ +#define MP_MISC_BOARD_REVISION 0x18 + +#define MP_BOARD_REVISION 0x31 + +struct MusicPalMiscState { + SysBusDevice parent_obj; + MemoryRegion iomem; +}; + +#define TYPE_MUSICPAL_MISC "musicpal-misc" +OBJECT_DECLARE_SIMPLE_TYPE(MusicPalMiscState, MUSICPAL_MISC) + +static uint64_t musicpal_misc_read(void *opaque, hwaddr offset, + unsigned size) +{ + switch (offset) { + case MP_MISC_BOARD_REVISION: + return MP_BOARD_REVISION; + + default: + return 0; + } +} + +static void musicpal_misc_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ +} + +static const MemoryRegionOps musicpal_misc_ops = { + .read = musicpal_misc_read, + .write = musicpal_misc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void musicpal_misc_init(Object *obj) +{ + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + MusicPalMiscState *s = MUSICPAL_MISC(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &musicpal_misc_ops, NULL, + "musicpal-misc", MP_MISC_SIZE); + sysbus_init_mmio(sd, &s->iomem); +} + +static const TypeInfo musicpal_misc_info = { + .name = TYPE_MUSICPAL_MISC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = musicpal_misc_init, + .instance_size = sizeof(MusicPalMiscState), +}; + +/* WLAN register offsets */ +#define MP_WLAN_MAGIC1 0x11c +#define MP_WLAN_MAGIC2 0x124 + +static uint64_t mv88w8618_wlan_read(void *opaque, hwaddr offset, + unsigned size) +{ + switch (offset) { + /* Workaround to allow loading the binary-only wlandrv.ko crap + * from the original Freecom firmware. */ + case MP_WLAN_MAGIC1: + return ~3; + case MP_WLAN_MAGIC2: + return -1; + + default: + return 0; + } +} + +static void mv88w8618_wlan_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ +} + +static const MemoryRegionOps mv88w8618_wlan_ops = { + .read = mv88w8618_wlan_read, + .write =mv88w8618_wlan_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mv88w8618_wlan_realize(DeviceState *dev, Error **errp) +{ + MemoryRegion *iomem = g_new(MemoryRegion, 1); + + memory_region_init_io(iomem, OBJECT(dev), &mv88w8618_wlan_ops, NULL, + "musicpal-wlan", MP_WLAN_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), iomem); +} + +/* GPIO register offsets */ +#define MP_GPIO_OE_LO 0x008 +#define MP_GPIO_OUT_LO 0x00c +#define MP_GPIO_IN_LO 0x010 +#define MP_GPIO_IER_LO 0x014 +#define MP_GPIO_IMR_LO 0x018 +#define MP_GPIO_ISR_LO 0x020 +#define MP_GPIO_OE_HI 0x508 +#define MP_GPIO_OUT_HI 0x50c +#define MP_GPIO_IN_HI 0x510 +#define MP_GPIO_IER_HI 0x514 +#define MP_GPIO_IMR_HI 0x518 +#define MP_GPIO_ISR_HI 0x520 + +/* GPIO bits & masks */ +#define MP_GPIO_LCD_BRIGHTNESS 0x00070000 +#define MP_GPIO_I2C_DATA_BIT 29 +#define MP_GPIO_I2C_CLOCK_BIT 30 + +/* LCD brightness bits in GPIO_OE_HI */ +#define MP_OE_LCD_BRIGHTNESS 0x0007 + +#define TYPE_MUSICPAL_GPIO "musicpal_gpio" +OBJECT_DECLARE_SIMPLE_TYPE(musicpal_gpio_state, MUSICPAL_GPIO) + +struct musicpal_gpio_state { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + uint32_t lcd_brightness; + uint32_t out_state; + uint32_t in_state; + uint32_t ier; + uint32_t imr; + uint32_t isr; + qemu_irq irq; + qemu_irq out[5]; /* 3 brightness out + 2 lcd (data and clock ) */ +}; + +static void musicpal_gpio_brightness_update(musicpal_gpio_state *s) { + int i; + uint32_t brightness; + + /* compute brightness ratio */ + switch (s->lcd_brightness) { + case 0x00000007: + brightness = 0; + break; + + case 0x00020000: + brightness = 1; + break; + + case 0x00020001: + brightness = 2; + break; + + case 0x00040000: + brightness = 3; + break; + + case 0x00010006: + brightness = 4; + break; + + case 0x00020005: + brightness = 5; + break; + + case 0x00040003: + brightness = 6; + break; + + case 0x00030004: + default: + brightness = 7; + } + + /* set lcd brightness GPIOs */ + for (i = 0; i <= 2; i++) { + qemu_set_irq(s->out[i], (brightness >> i) & 1); + } +} + +static void musicpal_gpio_pin_event(void *opaque, int pin, int level) +{ + musicpal_gpio_state *s = opaque; + uint32_t mask = 1 << pin; + uint32_t delta = level << pin; + uint32_t old = s->in_state & mask; + + s->in_state &= ~mask; + s->in_state |= delta; + + if ((old ^ delta) && + ((level && (s->imr & mask)) || (!level && (s->ier & mask)))) { + s->isr = mask; + qemu_irq_raise(s->irq); + } +} + +static uint64_t musicpal_gpio_read(void *opaque, hwaddr offset, + unsigned size) +{ + musicpal_gpio_state *s = opaque; + + switch (offset) { + case MP_GPIO_OE_HI: /* used for LCD brightness control */ + return s->lcd_brightness & MP_OE_LCD_BRIGHTNESS; + + case MP_GPIO_OUT_LO: + return s->out_state & 0xFFFF; + case MP_GPIO_OUT_HI: + return s->out_state >> 16; + + case MP_GPIO_IN_LO: + return s->in_state & 0xFFFF; + case MP_GPIO_IN_HI: + return s->in_state >> 16; + + case MP_GPIO_IER_LO: + return s->ier & 0xFFFF; + case MP_GPIO_IER_HI: + return s->ier >> 16; + + case MP_GPIO_IMR_LO: + return s->imr & 0xFFFF; + case MP_GPIO_IMR_HI: + return s->imr >> 16; + + case MP_GPIO_ISR_LO: + return s->isr & 0xFFFF; + case MP_GPIO_ISR_HI: + return s->isr >> 16; + + default: + return 0; + } +} + +static void musicpal_gpio_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + musicpal_gpio_state *s = opaque; + switch (offset) { + case MP_GPIO_OE_HI: /* used for LCD brightness control */ + s->lcd_brightness = (s->lcd_brightness & MP_GPIO_LCD_BRIGHTNESS) | + (value & MP_OE_LCD_BRIGHTNESS); + musicpal_gpio_brightness_update(s); + break; + + case MP_GPIO_OUT_LO: + s->out_state = (s->out_state & 0xFFFF0000) | (value & 0xFFFF); + break; + case MP_GPIO_OUT_HI: + s->out_state = (s->out_state & 0xFFFF) | (value << 16); + s->lcd_brightness = (s->lcd_brightness & 0xFFFF) | + (s->out_state & MP_GPIO_LCD_BRIGHTNESS); + musicpal_gpio_brightness_update(s); + qemu_set_irq(s->out[3], (s->out_state >> MP_GPIO_I2C_DATA_BIT) & 1); + qemu_set_irq(s->out[4], (s->out_state >> MP_GPIO_I2C_CLOCK_BIT) & 1); + break; + + case MP_GPIO_IER_LO: + s->ier = (s->ier & 0xFFFF0000) | (value & 0xFFFF); + break; + case MP_GPIO_IER_HI: + s->ier = (s->ier & 0xFFFF) | (value << 16); + break; + + case MP_GPIO_IMR_LO: + s->imr = (s->imr & 0xFFFF0000) | (value & 0xFFFF); + break; + case MP_GPIO_IMR_HI: + s->imr = (s->imr & 0xFFFF) | (value << 16); + break; + } +} + +static const MemoryRegionOps musicpal_gpio_ops = { + .read = musicpal_gpio_read, + .write = musicpal_gpio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void musicpal_gpio_reset(DeviceState *d) +{ + musicpal_gpio_state *s = MUSICPAL_GPIO(d); + + s->lcd_brightness = 0; + s->out_state = 0; + s->in_state = 0xffffffff; + s->ier = 0; + s->imr = 0; + s->isr = 0; +} + +static void musicpal_gpio_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(sbd); + musicpal_gpio_state *s = MUSICPAL_GPIO(dev); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->iomem, obj, &musicpal_gpio_ops, s, + "musicpal-gpio", MP_GPIO_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + + qdev_init_gpio_out(dev, s->out, ARRAY_SIZE(s->out)); + + qdev_init_gpio_in(dev, musicpal_gpio_pin_event, 32); +} + +static const VMStateDescription musicpal_gpio_vmsd = { + .name = "musicpal_gpio", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(lcd_brightness, musicpal_gpio_state), + VMSTATE_UINT32(out_state, musicpal_gpio_state), + VMSTATE_UINT32(in_state, musicpal_gpio_state), + VMSTATE_UINT32(ier, musicpal_gpio_state), + VMSTATE_UINT32(imr, musicpal_gpio_state), + VMSTATE_UINT32(isr, musicpal_gpio_state), + VMSTATE_END_OF_LIST() + } +}; + +static void musicpal_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = musicpal_gpio_reset; + dc->vmsd = &musicpal_gpio_vmsd; +} + +static const TypeInfo musicpal_gpio_info = { + .name = TYPE_MUSICPAL_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(musicpal_gpio_state), + .instance_init = musicpal_gpio_init, + .class_init = musicpal_gpio_class_init, +}; + +/* Keyboard codes & masks */ +#define KEY_RELEASED 0x80 +#define KEY_CODE 0x7f + +#define KEYCODE_TAB 0x0f +#define KEYCODE_ENTER 0x1c +#define KEYCODE_F 0x21 +#define KEYCODE_M 0x32 + +#define KEYCODE_EXTENDED 0xe0 +#define KEYCODE_UP 0x48 +#define KEYCODE_DOWN 0x50 +#define KEYCODE_LEFT 0x4b +#define KEYCODE_RIGHT 0x4d + +#define MP_KEY_WHEEL_VOL (1 << 0) +#define MP_KEY_WHEEL_VOL_INV (1 << 1) +#define MP_KEY_WHEEL_NAV (1 << 2) +#define MP_KEY_WHEEL_NAV_INV (1 << 3) +#define MP_KEY_BTN_FAVORITS (1 << 4) +#define MP_KEY_BTN_MENU (1 << 5) +#define MP_KEY_BTN_VOLUME (1 << 6) +#define MP_KEY_BTN_NAVIGATION (1 << 7) + +#define TYPE_MUSICPAL_KEY "musicpal_key" +OBJECT_DECLARE_SIMPLE_TYPE(musicpal_key_state, MUSICPAL_KEY) + +struct musicpal_key_state { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + uint32_t kbd_extended; + uint32_t pressed_keys; + qemu_irq out[8]; +}; + +static void musicpal_key_event(void *opaque, int keycode) +{ + musicpal_key_state *s = opaque; + uint32_t event = 0; + int i; + + if (keycode == KEYCODE_EXTENDED) { + s->kbd_extended = 1; + return; + } + + if (s->kbd_extended) { + switch (keycode & KEY_CODE) { + case KEYCODE_UP: + event = MP_KEY_WHEEL_NAV | MP_KEY_WHEEL_NAV_INV; + break; + + case KEYCODE_DOWN: + event = MP_KEY_WHEEL_NAV; + break; + + case KEYCODE_LEFT: + event = MP_KEY_WHEEL_VOL | MP_KEY_WHEEL_VOL_INV; + break; + + case KEYCODE_RIGHT: + event = MP_KEY_WHEEL_VOL; + break; + } + } else { + switch (keycode & KEY_CODE) { + case KEYCODE_F: + event = MP_KEY_BTN_FAVORITS; + break; + + case KEYCODE_TAB: + event = MP_KEY_BTN_VOLUME; + break; + + case KEYCODE_ENTER: + event = MP_KEY_BTN_NAVIGATION; + break; + + case KEYCODE_M: + event = MP_KEY_BTN_MENU; + break; + } + /* Do not repeat already pressed buttons */ + if (!(keycode & KEY_RELEASED) && (s->pressed_keys & event)) { + event = 0; + } + } + + if (event) { + /* Raise GPIO pin first if repeating a key */ + if (!(keycode & KEY_RELEASED) && (s->pressed_keys & event)) { + for (i = 0; i <= 7; i++) { + if (event & (1 << i)) { + qemu_set_irq(s->out[i], 1); + } + } + } + for (i = 0; i <= 7; i++) { + if (event & (1 << i)) { + qemu_set_irq(s->out[i], !!(keycode & KEY_RELEASED)); + } + } + if (keycode & KEY_RELEASED) { + s->pressed_keys &= ~event; + } else { + s->pressed_keys |= event; + } + } + + s->kbd_extended = 0; +} + +static void musicpal_key_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(sbd); + musicpal_key_state *s = MUSICPAL_KEY(dev); + + memory_region_init(&s->iomem, obj, "dummy", 0); + sysbus_init_mmio(sbd, &s->iomem); + + s->kbd_extended = 0; + s->pressed_keys = 0; + + qdev_init_gpio_out(dev, s->out, ARRAY_SIZE(s->out)); + + qemu_add_kbd_event_handler(musicpal_key_event, s); +} + +static const VMStateDescription musicpal_key_vmsd = { + .name = "musicpal_key", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(kbd_extended, musicpal_key_state), + VMSTATE_UINT32(pressed_keys, musicpal_key_state), + VMSTATE_END_OF_LIST() + } +}; + +static void musicpal_key_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &musicpal_key_vmsd; +} + +static const TypeInfo musicpal_key_info = { + .name = TYPE_MUSICPAL_KEY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(musicpal_key_state), + .instance_init = musicpal_key_init, + .class_init = musicpal_key_class_init, +}; + +static struct arm_boot_info musicpal_binfo = { + .loader_start = 0x0, + .board_id = 0x20e, +}; + +static void musicpal_init(MachineState *machine) +{ + ARMCPU *cpu; + DeviceState *dev; + DeviceState *pic; + DeviceState *uart_orgate; + DeviceState *i2c_dev; + DeviceState *lcd_dev; + DeviceState *key_dev; + I2CSlave *wm8750_dev; + SysBusDevice *s; + I2CBus *i2c; + int i; + unsigned long flash_size; + DriveInfo *dinfo; + MachineClass *mc = MACHINE_GET_CLASS(machine); + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *sram = g_new(MemoryRegion, 1); + + /* For now we use a fixed - the original - RAM size */ + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + cpu = ARM_CPU(cpu_create(machine->cpu_type)); + + memory_region_add_subregion(address_space_mem, 0, machine->ram); + + memory_region_init_ram(sram, NULL, "musicpal.sram", MP_SRAM_SIZE, + &error_fatal); + memory_region_add_subregion(address_space_mem, MP_SRAM_BASE, sram); + + pic = sysbus_create_simple(TYPE_MV88W8618_PIC, MP_PIC_BASE, + qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ)); + sysbus_create_varargs(TYPE_MV88W8618_PIT, MP_PIT_BASE, + qdev_get_gpio_in(pic, MP_TIMER1_IRQ), + qdev_get_gpio_in(pic, MP_TIMER2_IRQ), + qdev_get_gpio_in(pic, MP_TIMER3_IRQ), + qdev_get_gpio_in(pic, MP_TIMER4_IRQ), NULL); + + /* Logically OR both UART IRQs together */ + uart_orgate = DEVICE(object_new(TYPE_OR_IRQ)); + object_property_set_int(OBJECT(uart_orgate), "num-lines", 2, &error_fatal); + qdev_realize_and_unref(uart_orgate, NULL, &error_fatal); + qdev_connect_gpio_out(DEVICE(uart_orgate), 0, + qdev_get_gpio_in(pic, MP_UART_SHARED_IRQ)); + + serial_mm_init(address_space_mem, MP_UART1_BASE, 2, + qdev_get_gpio_in(uart_orgate, 0), + 1825000, serial_hd(0), DEVICE_NATIVE_ENDIAN); + serial_mm_init(address_space_mem, MP_UART2_BASE, 2, + qdev_get_gpio_in(uart_orgate, 1), + 1825000, serial_hd(1), DEVICE_NATIVE_ENDIAN); + + /* Register flash */ + dinfo = drive_get(IF_PFLASH, 0, 0); + if (dinfo) { + BlockBackend *blk = blk_by_legacy_dinfo(dinfo); + + flash_size = blk_getlength(blk); + if (flash_size != 8*1024*1024 && flash_size != 16*1024*1024 && + flash_size != 32*1024*1024) { + error_report("Invalid flash image size"); + exit(1); + } + + /* + * The original U-Boot accesses the flash at 0xFE000000 instead of + * 0xFF800000 (if there is 8 MB flash). So remap flash access if the + * image is smaller than 32 MB. + */ + pflash_cfi02_register(0x100000000ULL - MP_FLASH_SIZE_MAX, + "musicpal.flash", flash_size, + blk, 0x10000, + MP_FLASH_SIZE_MAX / flash_size, + 2, 0x00BF, 0x236D, 0x0000, 0x0000, + 0x5555, 0x2AAA, 0); + } + sysbus_create_simple(TYPE_MV88W8618_FLASHCFG, MP_FLASHCFG_BASE, NULL); + + qemu_check_nic_model(&nd_table[0], "mv88w8618"); + dev = qdev_new(TYPE_MV88W8618_ETH); + qdev_set_nic_properties(dev, &nd_table[0]); + object_property_set_link(OBJECT(dev), "dma-memory", + OBJECT(get_system_memory()), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, MP_ETH_BASE); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(pic, MP_ETH_IRQ)); + + sysbus_create_simple("mv88w8618_wlan", MP_WLAN_BASE, NULL); + + sysbus_create_simple(TYPE_MUSICPAL_MISC, MP_MISC_BASE, NULL); + + dev = sysbus_create_simple(TYPE_MUSICPAL_GPIO, MP_GPIO_BASE, + qdev_get_gpio_in(pic, MP_GPIO_IRQ)); + i2c_dev = sysbus_create_simple("gpio_i2c", -1, NULL); + i2c = (I2CBus *)qdev_get_child_bus(i2c_dev, "i2c"); + + lcd_dev = sysbus_create_simple(TYPE_MUSICPAL_LCD, MP_LCD_BASE, NULL); + key_dev = sysbus_create_simple(TYPE_MUSICPAL_KEY, -1, NULL); + + /* I2C read data */ + qdev_connect_gpio_out(i2c_dev, 0, + qdev_get_gpio_in(dev, MP_GPIO_I2C_DATA_BIT)); + /* I2C data */ + qdev_connect_gpio_out(dev, 3, qdev_get_gpio_in(i2c_dev, 0)); + /* I2C clock */ + qdev_connect_gpio_out(dev, 4, qdev_get_gpio_in(i2c_dev, 1)); + + for (i = 0; i < 3; i++) { + qdev_connect_gpio_out(dev, i, qdev_get_gpio_in(lcd_dev, i)); + } + for (i = 0; i < 4; i++) { + qdev_connect_gpio_out(key_dev, i, qdev_get_gpio_in(dev, i + 8)); + } + for (i = 4; i < 8; i++) { + qdev_connect_gpio_out(key_dev, i, qdev_get_gpio_in(dev, i + 15)); + } + + wm8750_dev = i2c_slave_create_simple(i2c, TYPE_WM8750, MP_WM_ADDR); + dev = qdev_new(TYPE_MV88W8618_AUDIO); + s = SYS_BUS_DEVICE(dev); + object_property_set_link(OBJECT(dev), "wm8750", OBJECT(wm8750_dev), + NULL); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, MP_AUDIO_BASE); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(pic, MP_AUDIO_IRQ)); + + musicpal_binfo.ram_size = MP_RAM_DEFAULT_SIZE; + arm_load_kernel(cpu, machine, &musicpal_binfo); +} + +static void musicpal_machine_init(MachineClass *mc) +{ + mc->desc = "Marvell 88w8618 / MusicPal (ARM926EJ-S)"; + mc->init = musicpal_init; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926"); + mc->default_ram_size = MP_RAM_DEFAULT_SIZE; + mc->default_ram_id = "musicpal.ram"; +} + +DEFINE_MACHINE("musicpal", musicpal_machine_init) + +static void mv88w8618_wlan_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mv88w8618_wlan_realize; +} + +static const TypeInfo mv88w8618_wlan_info = { + .name = "mv88w8618_wlan", + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SysBusDevice), + .class_init = mv88w8618_wlan_class_init, +}; + +static void musicpal_register_types(void) +{ + type_register_static(&mv88w8618_pic_info); + type_register_static(&mv88w8618_pit_info); + type_register_static(&mv88w8618_flashcfg_info); + type_register_static(&mv88w8618_eth_info); + type_register_static(&mv88w8618_wlan_info); + type_register_static(&musicpal_lcd_info); + type_register_static(&musicpal_gpio_info); + type_register_static(&musicpal_key_info); + type_register_static(&musicpal_misc_info); +} + +type_init(musicpal_register_types) diff --git a/hw/arm/netduino2.c b/hw/arm/netduino2.c new file mode 100644 index 000000000..3365da11b --- /dev/null +++ b/hw/arm/netduino2.c @@ -0,0 +1,62 @@ +/* + * Netduino 2 Machine Model + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" +#include "qemu/error-report.h" +#include "hw/arm/stm32f205_soc.h" +#include "hw/arm/boot.h" + +/* Main SYSCLK frequency in Hz (120MHz) */ +#define SYSCLK_FRQ 120000000ULL + +static void netduino2_init(MachineState *machine) +{ + DeviceState *dev; + Clock *sysclk; + + /* This clock doesn't need migration because it is fixed-frequency */ + sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(sysclk, SYSCLK_FRQ); + + dev = qdev_new(TYPE_STM32F205_SOC); + qdev_prop_set_string(dev, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m3")); + qdev_connect_clock_in(dev, "sysclk", sysclk); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, + FLASH_SIZE); +} + +static void netduino2_machine_init(MachineClass *mc) +{ + mc->desc = "Netduino 2 Machine (Cortex-M3)"; + mc->init = netduino2_init; + mc->ignore_memory_transaction_failures = true; +} + +DEFINE_MACHINE("netduino2", netduino2_machine_init) diff --git a/hw/arm/netduinoplus2.c b/hw/arm/netduinoplus2.c new file mode 100644 index 000000000..76cea8e48 --- /dev/null +++ b/hw/arm/netduinoplus2.c @@ -0,0 +1,62 @@ +/* + * Netduino Plus 2 Machine Model + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" +#include "qemu/error-report.h" +#include "hw/arm/stm32f405_soc.h" +#include "hw/arm/boot.h" + +/* Main SYSCLK frequency in Hz (168MHz) */ +#define SYSCLK_FRQ 168000000ULL + +static void netduinoplus2_init(MachineState *machine) +{ + DeviceState *dev; + Clock *sysclk; + + /* This clock doesn't need migration because it is fixed-frequency */ + sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(sysclk, SYSCLK_FRQ); + + dev = qdev_new(TYPE_STM32F405_SOC); + qdev_prop_set_string(dev, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m4")); + qdev_connect_clock_in(dev, "sysclk", sysclk); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + armv7m_load_kernel(ARM_CPU(first_cpu), + machine->kernel_filename, + FLASH_SIZE); +} + +static void netduinoplus2_machine_init(MachineClass *mc) +{ + mc->desc = "Netduino Plus 2 Machine (Cortex-M4)"; + mc->init = netduinoplus2_init; +} + +DEFINE_MACHINE("netduinoplus2", netduinoplus2_machine_init) diff --git a/hw/arm/npcm7xx.c b/hw/arm/npcm7xx.c new file mode 100644 index 000000000..878c2208e --- /dev/null +++ b/hw/arm/npcm7xx.c @@ -0,0 +1,813 @@ +/* + * Nuvoton NPCM7xx SoC family. + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/arm/boot.h" +#include "hw/arm/npcm7xx.h" +#include "hw/char/serial.h" +#include "hw/loader.h" +#include "hw/misc/unimp.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/units.h" +#include "sysemu/sysemu.h" + +/* + * This covers the whole MMIO space. We'll use this to catch any MMIO accesses + * that aren't handled by any device. + */ +#define NPCM7XX_MMIO_BA (0x80000000) +#define NPCM7XX_MMIO_SZ (0x7ffd0000) + +/* OTP key storage and fuse strap array */ +#define NPCM7XX_OTP1_BA (0xf0189000) +#define NPCM7XX_OTP2_BA (0xf018a000) + +/* Core system modules. */ +#define NPCM7XX_L2C_BA (0xf03fc000) +#define NPCM7XX_CPUP_BA (0xf03fe000) +#define NPCM7XX_GCR_BA (0xf0800000) +#define NPCM7XX_CLK_BA (0xf0801000) +#define NPCM7XX_MC_BA (0xf0824000) +#define NPCM7XX_RNG_BA (0xf000b000) + +/* USB Host modules */ +#define NPCM7XX_EHCI_BA (0xf0806000) +#define NPCM7XX_OHCI_BA (0xf0807000) + +/* ADC Module */ +#define NPCM7XX_ADC_BA (0xf000c000) + +/* Internal AHB SRAM */ +#define NPCM7XX_RAM3_BA (0xc0008000) +#define NPCM7XX_RAM3_SZ (4 * KiB) + +/* Memory blocks at the end of the address space */ +#define NPCM7XX_RAM2_BA (0xfffd0000) +#define NPCM7XX_RAM2_SZ (128 * KiB) +#define NPCM7XX_ROM_BA (0xffff0000) +#define NPCM7XX_ROM_SZ (64 * KiB) + +/* SDHCI Modules */ +#define NPCM7XX_MMC_BA (0xf0842000) + +/* Clock configuration values to be fixed up when bypassing bootloader */ + +/* Run PLL1 at 1600 MHz */ +#define NPCM7XX_PLLCON1_FIXUP_VAL (0x00402101) +/* Run the CPU from PLL1 and UART from PLL2 */ +#define NPCM7XX_CLKSEL_FIXUP_VAL (0x004aaba9) + +/* + * Interrupt lines going into the GIC. This does not include internal Cortex-A9 + * interrupts. + */ +enum NPCM7xxInterrupt { + NPCM7XX_ADC_IRQ = 0, + NPCM7XX_UART0_IRQ = 2, + NPCM7XX_UART1_IRQ, + NPCM7XX_UART2_IRQ, + NPCM7XX_UART3_IRQ, + NPCM7XX_EMC1RX_IRQ = 15, + NPCM7XX_EMC1TX_IRQ, + NPCM7XX_MMC_IRQ = 26, + NPCM7XX_TIMER0_IRQ = 32, /* Timer Module 0 */ + NPCM7XX_TIMER1_IRQ, + NPCM7XX_TIMER2_IRQ, + NPCM7XX_TIMER3_IRQ, + NPCM7XX_TIMER4_IRQ, + NPCM7XX_TIMER5_IRQ, /* Timer Module 1 */ + NPCM7XX_TIMER6_IRQ, + NPCM7XX_TIMER7_IRQ, + NPCM7XX_TIMER8_IRQ, + NPCM7XX_TIMER9_IRQ, + NPCM7XX_TIMER10_IRQ, /* Timer Module 2 */ + NPCM7XX_TIMER11_IRQ, + NPCM7XX_TIMER12_IRQ, + NPCM7XX_TIMER13_IRQ, + NPCM7XX_TIMER14_IRQ, + NPCM7XX_WDG0_IRQ = 47, /* Timer Module 0 Watchdog */ + NPCM7XX_WDG1_IRQ, /* Timer Module 1 Watchdog */ + NPCM7XX_WDG2_IRQ, /* Timer Module 2 Watchdog */ + NPCM7XX_EHCI_IRQ = 61, + NPCM7XX_OHCI_IRQ = 62, + NPCM7XX_SMBUS0_IRQ = 64, + NPCM7XX_SMBUS1_IRQ, + NPCM7XX_SMBUS2_IRQ, + NPCM7XX_SMBUS3_IRQ, + NPCM7XX_SMBUS4_IRQ, + NPCM7XX_SMBUS5_IRQ, + NPCM7XX_SMBUS6_IRQ, + NPCM7XX_SMBUS7_IRQ, + NPCM7XX_SMBUS8_IRQ, + NPCM7XX_SMBUS9_IRQ, + NPCM7XX_SMBUS10_IRQ, + NPCM7XX_SMBUS11_IRQ, + NPCM7XX_SMBUS12_IRQ, + NPCM7XX_SMBUS13_IRQ, + NPCM7XX_SMBUS14_IRQ, + NPCM7XX_SMBUS15_IRQ, + NPCM7XX_PWM0_IRQ = 93, /* PWM module 0 */ + NPCM7XX_PWM1_IRQ, /* PWM module 1 */ + NPCM7XX_MFT0_IRQ = 96, /* MFT module 0 */ + NPCM7XX_MFT1_IRQ, /* MFT module 1 */ + NPCM7XX_MFT2_IRQ, /* MFT module 2 */ + NPCM7XX_MFT3_IRQ, /* MFT module 3 */ + NPCM7XX_MFT4_IRQ, /* MFT module 4 */ + NPCM7XX_MFT5_IRQ, /* MFT module 5 */ + NPCM7XX_MFT6_IRQ, /* MFT module 6 */ + NPCM7XX_MFT7_IRQ, /* MFT module 7 */ + NPCM7XX_EMC2RX_IRQ = 114, + NPCM7XX_EMC2TX_IRQ, + NPCM7XX_GPIO0_IRQ = 116, + NPCM7XX_GPIO1_IRQ, + NPCM7XX_GPIO2_IRQ, + NPCM7XX_GPIO3_IRQ, + NPCM7XX_GPIO4_IRQ, + NPCM7XX_GPIO5_IRQ, + NPCM7XX_GPIO6_IRQ, + NPCM7XX_GPIO7_IRQ, +}; + +/* Total number of GIC interrupts, including internal Cortex-A9 interrupts. */ +#define NPCM7XX_NUM_IRQ (160) + +/* Register base address for each Timer Module */ +static const hwaddr npcm7xx_tim_addr[] = { + 0xf0008000, + 0xf0009000, + 0xf000a000, +}; + +/* Register base address for each 16550 UART */ +static const hwaddr npcm7xx_uart_addr[] = { + 0xf0001000, + 0xf0002000, + 0xf0003000, + 0xf0004000, +}; + +/* Direct memory-mapped access to SPI0 CS0-1. */ +static const hwaddr npcm7xx_fiu0_flash_addr[] = { + 0x80000000, /* CS0 */ + 0x88000000, /* CS1 */ +}; + +/* Direct memory-mapped access to SPI3 CS0-3. */ +static const hwaddr npcm7xx_fiu3_flash_addr[] = { + 0xa0000000, /* CS0 */ + 0xa8000000, /* CS1 */ + 0xb0000000, /* CS2 */ + 0xb8000000, /* CS3 */ +}; + +/* Register base address for each PWM Module */ +static const hwaddr npcm7xx_pwm_addr[] = { + 0xf0103000, + 0xf0104000, +}; + +/* Register base address for each MFT Module */ +static const hwaddr npcm7xx_mft_addr[] = { + 0xf0180000, + 0xf0181000, + 0xf0182000, + 0xf0183000, + 0xf0184000, + 0xf0185000, + 0xf0186000, + 0xf0187000, +}; + +/* Direct memory-mapped access to each SMBus Module. */ +static const hwaddr npcm7xx_smbus_addr[] = { + 0xf0080000, + 0xf0081000, + 0xf0082000, + 0xf0083000, + 0xf0084000, + 0xf0085000, + 0xf0086000, + 0xf0087000, + 0xf0088000, + 0xf0089000, + 0xf008a000, + 0xf008b000, + 0xf008c000, + 0xf008d000, + 0xf008e000, + 0xf008f000, +}; + +/* Register base address for each EMC Module */ +static const hwaddr npcm7xx_emc_addr[] = { + 0xf0825000, + 0xf0826000, +}; + +static const struct { + hwaddr regs_addr; + uint32_t unconnected_pins; + uint32_t reset_pu; + uint32_t reset_pd; + uint32_t reset_osrc; + uint32_t reset_odsc; +} npcm7xx_gpio[] = { + { + .regs_addr = 0xf0010000, + .reset_pu = 0xff03ffff, + .reset_pd = 0x00fc0000, + }, { + .regs_addr = 0xf0011000, + .unconnected_pins = 0x0000001e, + .reset_pu = 0xfefffe07, + .reset_pd = 0x010001e0, + }, { + .regs_addr = 0xf0012000, + .reset_pu = 0x780fffff, + .reset_pd = 0x07f00000, + .reset_odsc = 0x00700000, + }, { + .regs_addr = 0xf0013000, + .reset_pu = 0x00fc0000, + .reset_pd = 0xff000000, + }, { + .regs_addr = 0xf0014000, + .reset_pu = 0xffffffff, + }, { + .regs_addr = 0xf0015000, + .reset_pu = 0xbf83f801, + .reset_pd = 0x007c0000, + .reset_osrc = 0x000000f1, + .reset_odsc = 0x3f9f80f1, + }, { + .regs_addr = 0xf0016000, + .reset_pu = 0xfc00f801, + .reset_pd = 0x000007fe, + .reset_odsc = 0x00000800, + }, { + .regs_addr = 0xf0017000, + .unconnected_pins = 0xffffff00, + .reset_pu = 0x0000007f, + .reset_osrc = 0x0000007f, + .reset_odsc = 0x0000007f, + }, +}; + +static const struct { + const char *name; + hwaddr regs_addr; + int cs_count; + const hwaddr *flash_addr; +} npcm7xx_fiu[] = { + { + .name = "fiu0", + .regs_addr = 0xfb000000, + .cs_count = ARRAY_SIZE(npcm7xx_fiu0_flash_addr), + .flash_addr = npcm7xx_fiu0_flash_addr, + }, { + .name = "fiu3", + .regs_addr = 0xc0000000, + .cs_count = ARRAY_SIZE(npcm7xx_fiu3_flash_addr), + .flash_addr = npcm7xx_fiu3_flash_addr, + }, +}; + +static void npcm7xx_write_board_setup(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + uint32_t board_setup[] = { + 0xe59f0010, /* ldr r0, clk_base_addr */ + 0xe59f1010, /* ldr r1, pllcon1_value */ + 0xe5801010, /* str r1, [r0, #16] */ + 0xe59f100c, /* ldr r1, clksel_value */ + 0xe5801004, /* str r1, [r0, #4] */ + 0xe12fff1e, /* bx lr */ + NPCM7XX_CLK_BA, + NPCM7XX_PLLCON1_FIXUP_VAL, + NPCM7XX_CLKSEL_FIXUP_VAL, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(board_setup); i++) { + board_setup[i] = tswap32(board_setup[i]); + } + rom_add_blob_fixed("board-setup", board_setup, sizeof(board_setup), + info->board_setup_addr); +} + +static void npcm7xx_write_secondary_boot(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + /* + * The default smpboot stub halts the secondary CPU with a 'wfi' + * instruction, but the arch/arm/mach-npcm/platsmp.c in the Linux kernel + * does not send an IPI to wake it up, so the second CPU fails to boot. So + * we need to provide our own smpboot stub that can not use 'wfi', it has + * to spin the secondary CPU until the first CPU writes to the SCRPAD reg. + */ + uint32_t smpboot[] = { + 0xe59f2018, /* ldr r2, bootreg_addr */ + 0xe3a00000, /* mov r0, #0 */ + 0xe5820000, /* str r0, [r2] */ + 0xe320f002, /* wfe */ + 0xe5921000, /* ldr r1, [r2] */ + 0xe1110001, /* tst r1, r1 */ + 0x0afffffb, /* beq */ + 0xe12fff11, /* bx r1 */ + NPCM7XX_SMP_BOOTREG_ADDR, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(smpboot); i++) { + smpboot[i] = tswap32(smpboot[i]); + } + + rom_add_blob_fixed("smpboot", smpboot, sizeof(smpboot), + NPCM7XX_SMP_LOADER_START); +} + +static struct arm_boot_info npcm7xx_binfo = { + .loader_start = NPCM7XX_LOADER_START, + .smp_loader_start = NPCM7XX_SMP_LOADER_START, + .smp_bootreg_addr = NPCM7XX_SMP_BOOTREG_ADDR, + .gic_cpu_if_addr = NPCM7XX_GIC_CPU_IF_ADDR, + .write_secondary_boot = npcm7xx_write_secondary_boot, + .board_id = -1, + .board_setup_addr = NPCM7XX_BOARD_SETUP_ADDR, + .write_board_setup = npcm7xx_write_board_setup, +}; + +void npcm7xx_load_kernel(MachineState *machine, NPCM7xxState *soc) +{ + NPCM7xxClass *sc = NPCM7XX_GET_CLASS(soc); + + npcm7xx_binfo.ram_size = machine->ram_size; + npcm7xx_binfo.nb_cpus = sc->num_cpus; + + arm_load_kernel(&soc->cpu[0], machine, &npcm7xx_binfo); +} + +static void npcm7xx_init_fuses(NPCM7xxState *s) +{ + NPCM7xxClass *nc = NPCM7XX_GET_CLASS(s); + uint32_t value; + + /* + * The initial mask of disabled modules indicates the chip derivative (e.g. + * NPCM750 or NPCM730). + */ + value = tswap32(nc->disabled_modules); + npcm7xx_otp_array_write(&s->fuse_array, &value, NPCM7XX_FUSE_DERIVATIVE, + sizeof(value)); +} + +static void npcm7xx_write_adc_calibration(NPCM7xxState *s) +{ + /* Both ADC and the fuse array must have realized. */ + QEMU_BUILD_BUG_ON(sizeof(s->adc.calibration_r_values) != 4); + npcm7xx_otp_array_write(&s->fuse_array, s->adc.calibration_r_values, + NPCM7XX_FUSE_ADC_CALIB, sizeof(s->adc.calibration_r_values)); +} + +static qemu_irq npcm7xx_irq(NPCM7xxState *s, int n) +{ + return qdev_get_gpio_in(DEVICE(&s->a9mpcore), n); +} + +static void npcm7xx_init(Object *obj) +{ + NPCM7xxState *s = NPCM7XX(obj); + int i; + + for (i = 0; i < NPCM7XX_MAX_NUM_CPUS; i++) { + object_initialize_child(obj, "cpu[*]", &s->cpu[i], + ARM_CPU_TYPE_NAME("cortex-a9")); + } + + object_initialize_child(obj, "a9mpcore", &s->a9mpcore, TYPE_A9MPCORE_PRIV); + object_initialize_child(obj, "gcr", &s->gcr, TYPE_NPCM7XX_GCR); + object_property_add_alias(obj, "power-on-straps", OBJECT(&s->gcr), + "power-on-straps"); + object_initialize_child(obj, "clk", &s->clk, TYPE_NPCM7XX_CLK); + object_initialize_child(obj, "otp1", &s->key_storage, + TYPE_NPCM7XX_KEY_STORAGE); + object_initialize_child(obj, "otp2", &s->fuse_array, + TYPE_NPCM7XX_FUSE_ARRAY); + object_initialize_child(obj, "mc", &s->mc, TYPE_NPCM7XX_MC); + object_initialize_child(obj, "rng", &s->rng, TYPE_NPCM7XX_RNG); + object_initialize_child(obj, "adc", &s->adc, TYPE_NPCM7XX_ADC); + + for (i = 0; i < ARRAY_SIZE(s->tim); i++) { + object_initialize_child(obj, "tim[*]", &s->tim[i], TYPE_NPCM7XX_TIMER); + } + + for (i = 0; i < ARRAY_SIZE(s->gpio); i++) { + object_initialize_child(obj, "gpio[*]", &s->gpio[i], TYPE_NPCM7XX_GPIO); + } + + for (i = 0; i < ARRAY_SIZE(s->smbus); i++) { + object_initialize_child(obj, "smbus[*]", &s->smbus[i], + TYPE_NPCM7XX_SMBUS); + } + + object_initialize_child(obj, "ehci", &s->ehci, TYPE_NPCM7XX_EHCI); + object_initialize_child(obj, "ohci", &s->ohci, TYPE_SYSBUS_OHCI); + + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_fiu) != ARRAY_SIZE(s->fiu)); + for (i = 0; i < ARRAY_SIZE(s->fiu); i++) { + object_initialize_child(obj, npcm7xx_fiu[i].name, &s->fiu[i], + TYPE_NPCM7XX_FIU); + } + + for (i = 0; i < ARRAY_SIZE(s->pwm); i++) { + object_initialize_child(obj, "pwm[*]", &s->pwm[i], TYPE_NPCM7XX_PWM); + } + + for (i = 0; i < ARRAY_SIZE(s->mft); i++) { + object_initialize_child(obj, "mft[*]", &s->mft[i], TYPE_NPCM7XX_MFT); + } + + for (i = 0; i < ARRAY_SIZE(s->emc); i++) { + object_initialize_child(obj, "emc[*]", &s->emc[i], TYPE_NPCM7XX_EMC); + } + + object_initialize_child(obj, "mmc", &s->mmc, TYPE_NPCM7XX_SDHCI); +} + +static void npcm7xx_realize(DeviceState *dev, Error **errp) +{ + NPCM7xxState *s = NPCM7XX(dev); + NPCM7xxClass *nc = NPCM7XX_GET_CLASS(s); + int i; + + if (memory_region_size(s->dram) > NPCM7XX_DRAM_SZ) { + error_setg(errp, "%s: NPCM7xx cannot address more than %" PRIu64 + " MiB of DRAM", __func__, NPCM7XX_DRAM_SZ / MiB); + return; + } + + /* CPUs */ + for (i = 0; i < nc->num_cpus; i++) { + object_property_set_int(OBJECT(&s->cpu[i]), "mp-affinity", + arm_cpu_mp_affinity(i, NPCM7XX_MAX_NUM_CPUS), + &error_abort); + object_property_set_int(OBJECT(&s->cpu[i]), "reset-cbar", + NPCM7XX_GIC_CPU_IF_ADDR, &error_abort); + object_property_set_bool(OBJECT(&s->cpu[i]), "reset-hivecs", true, + &error_abort); + + /* Disable security extensions. */ + object_property_set_bool(OBJECT(&s->cpu[i]), "has_el3", false, + &error_abort); + + if (!qdev_realize(DEVICE(&s->cpu[i]), NULL, errp)) { + return; + } + } + + /* A9MPCORE peripherals. Can only fail if we pass bad parameters here. */ + object_property_set_int(OBJECT(&s->a9mpcore), "num-cpu", nc->num_cpus, + &error_abort); + object_property_set_int(OBJECT(&s->a9mpcore), "num-irq", NPCM7XX_NUM_IRQ, + &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->a9mpcore), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->a9mpcore), 0, NPCM7XX_CPUP_BA); + + for (i = 0; i < nc->num_cpus; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i, + qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i + nc->num_cpus, + qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_FIQ)); + } + + /* L2 cache controller */ + sysbus_create_simple("l2x0", NPCM7XX_L2C_BA, NULL); + + /* System Global Control Registers (GCR). Can fail due to user input. */ + object_property_set_int(OBJECT(&s->gcr), "disabled-modules", + nc->disabled_modules, &error_abort); + object_property_add_const_link(OBJECT(&s->gcr), "dram-mr", OBJECT(s->dram)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gcr), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gcr), 0, NPCM7XX_GCR_BA); + + /* Clock Control Registers (CLK). Cannot fail. */ + sysbus_realize(SYS_BUS_DEVICE(&s->clk), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->clk), 0, NPCM7XX_CLK_BA); + + /* OTP key storage and fuse strap array. Cannot fail. */ + sysbus_realize(SYS_BUS_DEVICE(&s->key_storage), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->key_storage), 0, NPCM7XX_OTP1_BA); + sysbus_realize(SYS_BUS_DEVICE(&s->fuse_array), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->fuse_array), 0, NPCM7XX_OTP2_BA); + npcm7xx_init_fuses(s); + + /* Fake Memory Controller (MC). Cannot fail. */ + sysbus_realize(SYS_BUS_DEVICE(&s->mc), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mc), 0, NPCM7XX_MC_BA); + + /* ADC Modules. Cannot fail. */ + qdev_connect_clock_in(DEVICE(&s->adc), "clock", qdev_get_clock_out( + DEVICE(&s->clk), "adc-clock")); + sysbus_realize(SYS_BUS_DEVICE(&s->adc), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->adc), 0, NPCM7XX_ADC_BA); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, + npcm7xx_irq(s, NPCM7XX_ADC_IRQ)); + npcm7xx_write_adc_calibration(s); + + /* Timer Modules (TIM). Cannot fail. */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_tim_addr) != ARRAY_SIZE(s->tim)); + for (i = 0; i < ARRAY_SIZE(s->tim); i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->tim[i]); + int first_irq; + int j; + + /* Connect the timer clock. */ + qdev_connect_clock_in(DEVICE(&s->tim[i]), "clock", qdev_get_clock_out( + DEVICE(&s->clk), "timer-clock")); + + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, npcm7xx_tim_addr[i]); + + first_irq = NPCM7XX_TIMER0_IRQ + i * NPCM7XX_TIMERS_PER_CTRL; + for (j = 0; j < NPCM7XX_TIMERS_PER_CTRL; j++) { + qemu_irq irq = npcm7xx_irq(s, first_irq + j); + sysbus_connect_irq(sbd, j, irq); + } + + /* IRQ for watchdogs */ + sysbus_connect_irq(sbd, NPCM7XX_TIMERS_PER_CTRL, + npcm7xx_irq(s, NPCM7XX_WDG0_IRQ + i)); + /* GPIO that connects clk module with watchdog */ + qdev_connect_gpio_out_named(DEVICE(&s->tim[i]), + NPCM7XX_WATCHDOG_RESET_GPIO_OUT, 0, + qdev_get_gpio_in_named(DEVICE(&s->clk), + NPCM7XX_WATCHDOG_RESET_GPIO_IN, i)); + } + + /* UART0..3 (16550 compatible) */ + for (i = 0; i < ARRAY_SIZE(npcm7xx_uart_addr); i++) { + serial_mm_init(get_system_memory(), npcm7xx_uart_addr[i], 2, + npcm7xx_irq(s, NPCM7XX_UART0_IRQ + i), 115200, + serial_hd(i), DEVICE_LITTLE_ENDIAN); + } + + /* Random Number Generator. Cannot fail. */ + sysbus_realize(SYS_BUS_DEVICE(&s->rng), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->rng), 0, NPCM7XX_RNG_BA); + + /* GPIO modules. Cannot fail. */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_gpio) != ARRAY_SIZE(s->gpio)); + for (i = 0; i < ARRAY_SIZE(s->gpio); i++) { + Object *obj = OBJECT(&s->gpio[i]); + + object_property_set_uint(obj, "reset-pullup", + npcm7xx_gpio[i].reset_pu, &error_abort); + object_property_set_uint(obj, "reset-pulldown", + npcm7xx_gpio[i].reset_pd, &error_abort); + object_property_set_uint(obj, "reset-osrc", + npcm7xx_gpio[i].reset_osrc, &error_abort); + object_property_set_uint(obj, "reset-odsc", + npcm7xx_gpio[i].reset_odsc, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(obj), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(obj), 0, npcm7xx_gpio[i].regs_addr); + sysbus_connect_irq(SYS_BUS_DEVICE(obj), 0, + npcm7xx_irq(s, NPCM7XX_GPIO0_IRQ + i)); + } + + /* SMBus modules. Cannot fail. */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_smbus_addr) != ARRAY_SIZE(s->smbus)); + for (i = 0; i < ARRAY_SIZE(s->smbus); i++) { + Object *obj = OBJECT(&s->smbus[i]); + + sysbus_realize(SYS_BUS_DEVICE(obj), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(obj), 0, npcm7xx_smbus_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(obj), 0, + npcm7xx_irq(s, NPCM7XX_SMBUS0_IRQ + i)); + } + + /* USB Host */ + object_property_set_bool(OBJECT(&s->ehci), "companion-enable", true, + &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->ehci), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci), 0, NPCM7XX_EHCI_BA); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci), 0, + npcm7xx_irq(s, NPCM7XX_EHCI_IRQ)); + + object_property_set_str(OBJECT(&s->ohci), "masterbus", "usb-bus.0", + &error_abort); + object_property_set_uint(OBJECT(&s->ohci), "num-ports", 1, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->ohci), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ohci), 0, NPCM7XX_OHCI_BA); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ohci), 0, + npcm7xx_irq(s, NPCM7XX_OHCI_IRQ)); + + /* PWM Modules. Cannot fail. */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_pwm_addr) != ARRAY_SIZE(s->pwm)); + for (i = 0; i < ARRAY_SIZE(s->pwm); i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->pwm[i]); + + qdev_connect_clock_in(DEVICE(&s->pwm[i]), "clock", qdev_get_clock_out( + DEVICE(&s->clk), "apb3-clock")); + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, npcm7xx_pwm_addr[i]); + sysbus_connect_irq(sbd, i, npcm7xx_irq(s, NPCM7XX_PWM0_IRQ + i)); + } + + /* MFT Modules. Cannot fail. */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_mft_addr) != ARRAY_SIZE(s->mft)); + for (i = 0; i < ARRAY_SIZE(s->mft); i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->mft[i]); + + qdev_connect_clock_in(DEVICE(&s->mft[i]), "clock-in", + qdev_get_clock_out(DEVICE(&s->clk), + "apb4-clock")); + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, npcm7xx_mft_addr[i]); + sysbus_connect_irq(sbd, 0, npcm7xx_irq(s, NPCM7XX_MFT0_IRQ + i)); + } + + /* + * EMC Modules. Cannot fail. + * The mapping of the device to its netdev backend works as follows: + * emc[i] = nd_table[i] + * This works around the inability to specify the netdev property for the + * emc device: it's not pluggable and thus the -device option can't be + * used. + */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_emc_addr) != ARRAY_SIZE(s->emc)); + QEMU_BUILD_BUG_ON(ARRAY_SIZE(s->emc) != 2); + for (i = 0; i < ARRAY_SIZE(s->emc); i++) { + s->emc[i].emc_num = i; + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->emc[i]); + if (nd_table[i].used) { + qemu_check_nic_model(&nd_table[i], TYPE_NPCM7XX_EMC); + qdev_set_nic_properties(DEVICE(sbd), &nd_table[i]); + } + /* + * The device exists regardless of whether it's connected to a QEMU + * netdev backend. So always instantiate it even if there is no + * backend. + */ + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, npcm7xx_emc_addr[i]); + int tx_irq = i == 0 ? NPCM7XX_EMC1TX_IRQ : NPCM7XX_EMC2TX_IRQ; + int rx_irq = i == 0 ? NPCM7XX_EMC1RX_IRQ : NPCM7XX_EMC2RX_IRQ; + /* + * N.B. The values for the second argument sysbus_connect_irq are + * chosen to match the registration order in npcm7xx_emc_realize. + */ + sysbus_connect_irq(sbd, 0, npcm7xx_irq(s, tx_irq)); + sysbus_connect_irq(sbd, 1, npcm7xx_irq(s, rx_irq)); + } + + /* + * Flash Interface Unit (FIU). Can fail if incorrect number of chip selects + * specified, but this is a programming error. + */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_fiu) != ARRAY_SIZE(s->fiu)); + for (i = 0; i < ARRAY_SIZE(s->fiu); i++) { + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->fiu[i]); + int j; + + object_property_set_int(OBJECT(sbd), "cs-count", + npcm7xx_fiu[i].cs_count, &error_abort); + sysbus_realize(sbd, &error_abort); + + sysbus_mmio_map(sbd, 0, npcm7xx_fiu[i].regs_addr); + for (j = 0; j < npcm7xx_fiu[i].cs_count; j++) { + sysbus_mmio_map(sbd, j + 1, npcm7xx_fiu[i].flash_addr[j]); + } + } + + /* RAM2 (SRAM) */ + memory_region_init_ram(&s->sram, OBJECT(dev), "ram2", + NPCM7XX_RAM2_SZ, &error_abort); + memory_region_add_subregion(get_system_memory(), NPCM7XX_RAM2_BA, &s->sram); + + /* RAM3 (SRAM) */ + memory_region_init_ram(&s->ram3, OBJECT(dev), "ram3", + NPCM7XX_RAM3_SZ, &error_abort); + memory_region_add_subregion(get_system_memory(), NPCM7XX_RAM3_BA, &s->ram3); + + /* Internal ROM */ + memory_region_init_rom(&s->irom, OBJECT(dev), "irom", NPCM7XX_ROM_SZ, + &error_abort); + memory_region_add_subregion(get_system_memory(), NPCM7XX_ROM_BA, &s->irom); + + /* SDHCI */ + sysbus_realize(SYS_BUS_DEVICE(&s->mmc), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mmc), 0, NPCM7XX_MMC_BA); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->mmc), 0, + npcm7xx_irq(s, NPCM7XX_MMC_IRQ)); + + create_unimplemented_device("npcm7xx.shm", 0xc0001000, 4 * KiB); + create_unimplemented_device("npcm7xx.vdmx", 0xe0800000, 4 * KiB); + create_unimplemented_device("npcm7xx.pcierc", 0xe1000000, 64 * KiB); + create_unimplemented_device("npcm7xx.kcs", 0xf0007000, 4 * KiB); + create_unimplemented_device("npcm7xx.gfxi", 0xf000e000, 4 * KiB); + create_unimplemented_device("npcm7xx.espi", 0xf009f000, 4 * KiB); + create_unimplemented_device("npcm7xx.peci", 0xf0100000, 4 * KiB); + create_unimplemented_device("npcm7xx.siox[1]", 0xf0101000, 4 * KiB); + create_unimplemented_device("npcm7xx.siox[2]", 0xf0102000, 4 * KiB); + create_unimplemented_device("npcm7xx.pspi1", 0xf0200000, 4 * KiB); + create_unimplemented_device("npcm7xx.pspi2", 0xf0201000, 4 * KiB); + create_unimplemented_device("npcm7xx.ahbpci", 0xf0400000, 1 * MiB); + create_unimplemented_device("npcm7xx.mcphy", 0xf05f0000, 64 * KiB); + create_unimplemented_device("npcm7xx.gmac1", 0xf0802000, 8 * KiB); + create_unimplemented_device("npcm7xx.gmac2", 0xf0804000, 8 * KiB); + create_unimplemented_device("npcm7xx.vcd", 0xf0810000, 64 * KiB); + create_unimplemented_device("npcm7xx.ece", 0xf0820000, 8 * KiB); + create_unimplemented_device("npcm7xx.vdma", 0xf0822000, 8 * KiB); + create_unimplemented_device("npcm7xx.usbd[0]", 0xf0830000, 4 * KiB); + create_unimplemented_device("npcm7xx.usbd[1]", 0xf0831000, 4 * KiB); + create_unimplemented_device("npcm7xx.usbd[2]", 0xf0832000, 4 * KiB); + create_unimplemented_device("npcm7xx.usbd[3]", 0xf0833000, 4 * KiB); + create_unimplemented_device("npcm7xx.usbd[4]", 0xf0834000, 4 * KiB); + create_unimplemented_device("npcm7xx.usbd[5]", 0xf0835000, 4 * KiB); + create_unimplemented_device("npcm7xx.usbd[6]", 0xf0836000, 4 * KiB); + create_unimplemented_device("npcm7xx.usbd[7]", 0xf0837000, 4 * KiB); + create_unimplemented_device("npcm7xx.usbd[8]", 0xf0838000, 4 * KiB); + create_unimplemented_device("npcm7xx.usbd[9]", 0xf0839000, 4 * KiB); + create_unimplemented_device("npcm7xx.sd", 0xf0840000, 8 * KiB); + create_unimplemented_device("npcm7xx.pcimbx", 0xf0848000, 512 * KiB); + create_unimplemented_device("npcm7xx.aes", 0xf0858000, 4 * KiB); + create_unimplemented_device("npcm7xx.des", 0xf0859000, 4 * KiB); + create_unimplemented_device("npcm7xx.sha", 0xf085a000, 4 * KiB); + create_unimplemented_device("npcm7xx.secacc", 0xf085b000, 4 * KiB); + create_unimplemented_device("npcm7xx.spixcs0", 0xf8000000, 16 * MiB); + create_unimplemented_device("npcm7xx.spixcs1", 0xf9000000, 16 * MiB); + create_unimplemented_device("npcm7xx.spix", 0xfb001000, 4 * KiB); +} + +static Property npcm7xx_properties[] = { + DEFINE_PROP_LINK("dram-mr", NPCM7xxState, dram, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void npcm7xx_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = npcm7xx_realize; + dc->user_creatable = false; + device_class_set_props(dc, npcm7xx_properties); +} + +static void npcm730_class_init(ObjectClass *oc, void *data) +{ + NPCM7xxClass *nc = NPCM7XX_CLASS(oc); + + /* NPCM730 is optimized for data center use, so no graphics, etc. */ + nc->disabled_modules = 0x00300395; + nc->num_cpus = 2; +} + +static void npcm750_class_init(ObjectClass *oc, void *data) +{ + NPCM7xxClass *nc = NPCM7XX_CLASS(oc); + + /* NPCM750 has 2 cores and a full set of peripherals */ + nc->disabled_modules = 0x00000000; + nc->num_cpus = 2; +} + +static const TypeInfo npcm7xx_soc_types[] = { + { + .name = TYPE_NPCM7XX, + .parent = TYPE_DEVICE, + .instance_size = sizeof(NPCM7xxState), + .instance_init = npcm7xx_init, + .class_size = sizeof(NPCM7xxClass), + .class_init = npcm7xx_class_init, + .abstract = true, + }, { + .name = TYPE_NPCM730, + .parent = TYPE_NPCM7XX, + .class_init = npcm730_class_init, + }, { + .name = TYPE_NPCM750, + .parent = TYPE_NPCM7XX, + .class_init = npcm750_class_init, + }, +}; + +DEFINE_TYPES(npcm7xx_soc_types); diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c new file mode 100644 index 000000000..dec7d16ae --- /dev/null +++ b/hw/arm/npcm7xx_boards.c @@ -0,0 +1,497 @@ +/* + * Machine definitions for boards featuring an NPCM7xx SoC. + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/arm/npcm7xx.h" +#include "hw/core/cpu.h" +#include "hw/i2c/i2c_mux_pca954x.h" +#include "hw/i2c/smbus_eeprom.h" +#include "hw/loader.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/units.h" +#include "sysemu/blockdev.h" +#include "sysemu/sysemu.h" +#include "sysemu/block-backend.h" + +#define NPCM750_EVB_POWER_ON_STRAPS 0x00001ff7 +#define QUANTA_GSJ_POWER_ON_STRAPS 0x00001fff +#define QUANTA_GBS_POWER_ON_STRAPS 0x000017ff +#define KUDO_BMC_POWER_ON_STRAPS 0x00001fff + +static const char npcm7xx_default_bootrom[] = "npcm7xx_bootrom.bin"; + +static void npcm7xx_load_bootrom(MachineState *machine, NPCM7xxState *soc) +{ + const char *bios_name = machine->firmware ?: npcm7xx_default_bootrom; + g_autofree char *filename = NULL; + int ret; + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!filename) { + error_report("Could not find ROM image '%s'", bios_name); + if (!machine->kernel_filename) { + /* We can't boot without a bootrom or a kernel image. */ + exit(1); + } + return; + } + ret = load_image_mr(filename, &soc->irom); + if (ret < 0) { + error_report("Failed to load ROM image '%s'", filename); + exit(1); + } +} + +static void npcm7xx_connect_flash(NPCM7xxFIUState *fiu, int cs_no, + const char *flash_type, DriveInfo *dinfo) +{ + DeviceState *flash; + qemu_irq flash_cs; + + flash = qdev_new(flash_type); + if (dinfo) { + qdev_prop_set_drive(flash, "drive", blk_by_legacy_dinfo(dinfo)); + } + qdev_realize_and_unref(flash, BUS(fiu->spi), &error_fatal); + + flash_cs = qdev_get_gpio_in_named(flash, SSI_GPIO_CS, 0); + qdev_connect_gpio_out_named(DEVICE(fiu), "cs", cs_no, flash_cs); +} + +static void npcm7xx_connect_dram(NPCM7xxState *soc, MemoryRegion *dram) +{ + memory_region_add_subregion(get_system_memory(), NPCM7XX_DRAM_BA, dram); + + object_property_set_link(OBJECT(soc), "dram-mr", OBJECT(dram), + &error_abort); +} + +static void sdhci_attach_drive(SDHCIState *sdhci) +{ + DriveInfo *di = drive_get_next(IF_SD); + BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; + + BusState *bus = qdev_get_child_bus(DEVICE(sdhci), "sd-bus"); + if (bus == NULL) { + error_report("No SD bus found in SOC object"); + exit(1); + } + + DeviceState *carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); +} + +static NPCM7xxState *npcm7xx_create_soc(MachineState *machine, + uint32_t hw_straps) +{ + NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_GET_CLASS(machine); + MachineClass *mc = MACHINE_CLASS(nmc); + Object *obj; + + if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { + error_report("This board can only be used with %s", + mc->default_cpu_type); + exit(1); + } + + obj = object_new_with_props(nmc->soc_type, OBJECT(machine), "soc", + &error_abort, NULL); + object_property_set_uint(obj, "power-on-straps", hw_straps, &error_abort); + + return NPCM7XX(obj); +} + +static I2CBus *npcm7xx_i2c_get_bus(NPCM7xxState *soc, uint32_t num) +{ + g_assert(num < ARRAY_SIZE(soc->smbus)); + return I2C_BUS(qdev_get_child_bus(DEVICE(&soc->smbus[num]), "i2c-bus")); +} + +static void at24c_eeprom_init(NPCM7xxState *soc, int bus, uint8_t addr, + uint32_t rsize) +{ + I2CBus *i2c_bus = npcm7xx_i2c_get_bus(soc, bus); + I2CSlave *i2c_dev = i2c_slave_new("at24c-eeprom", addr); + DeviceState *dev = DEVICE(i2c_dev); + + qdev_prop_set_uint32(dev, "rom-size", rsize); + i2c_slave_realize_and_unref(i2c_dev, i2c_bus, &error_abort); +} + +static void npcm7xx_init_pwm_splitter(NPCM7xxMachine *machine, + NPCM7xxState *soc, const int *fan_counts) +{ + SplitIRQ *splitters = machine->fan_splitter; + + /* + * PWM 0~3 belong to module 0 output 0~3. + * PWM 4~7 belong to module 1 output 0~3. + */ + for (int i = 0; i < NPCM7XX_NR_PWM_MODULES; ++i) { + for (int j = 0; j < NPCM7XX_PWM_PER_MODULE; ++j) { + int splitter_no = i * NPCM7XX_PWM_PER_MODULE + j; + DeviceState *splitter; + + if (fan_counts[splitter_no] < 1) { + continue; + } + object_initialize_child(OBJECT(machine), "fan-splitter[*]", + &splitters[splitter_no], TYPE_SPLIT_IRQ); + splitter = DEVICE(&splitters[splitter_no]); + qdev_prop_set_uint16(splitter, "num-lines", + fan_counts[splitter_no]); + qdev_realize(splitter, NULL, &error_abort); + qdev_connect_gpio_out_named(DEVICE(&soc->pwm[i]), "duty-gpio-out", + j, qdev_get_gpio_in(splitter, 0)); + } + } +} + +static void npcm7xx_connect_pwm_fan(NPCM7xxState *soc, SplitIRQ *splitter, + int fan_no, int output_no) +{ + DeviceState *fan; + int fan_input; + qemu_irq fan_duty_gpio; + + g_assert(fan_no >= 0 && fan_no <= NPCM7XX_MFT_MAX_FAN_INPUT); + /* + * Fan 0~1 belong to module 0 input 0~1. + * Fan 2~3 belong to module 1 input 0~1. + * ... + * Fan 14~15 belong to module 7 input 0~1. + * Fan 16~17 belong to module 0 input 2~3. + * Fan 18~19 belong to module 1 input 2~3. + */ + if (fan_no < 16) { + fan = DEVICE(&soc->mft[fan_no / 2]); + fan_input = fan_no % 2; + } else { + fan = DEVICE(&soc->mft[(fan_no - 16) / 2]); + fan_input = fan_no % 2 + 2; + } + + /* Connect the Fan to PWM module */ + fan_duty_gpio = qdev_get_gpio_in_named(fan, "duty", fan_input); + qdev_connect_gpio_out(DEVICE(splitter), output_no, fan_duty_gpio); +} + +static void npcm750_evb_i2c_init(NPCM7xxState *soc) +{ + /* lm75 temperature sensor on SVB, tmp105 is compatible */ + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 0), "tmp105", 0x48); + /* lm75 temperature sensor on EB, tmp105 is compatible */ + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), "tmp105", 0x48); + /* tmp100 temperature sensor on EB, tmp105 is compatible */ + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 2), "tmp105", 0x48); + /* tmp100 temperature sensor on SVB, tmp105 is compatible */ + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 6), "tmp105", 0x48); +} + +static void npcm750_evb_fan_init(NPCM7xxMachine *machine, NPCM7xxState *soc) +{ + SplitIRQ *splitter = machine->fan_splitter; + static const int fan_counts[] = {2, 2, 2, 2, 2, 2, 2, 2}; + + npcm7xx_init_pwm_splitter(machine, soc, fan_counts); + npcm7xx_connect_pwm_fan(soc, &splitter[0], 0x00, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[0], 0x01, 1); + npcm7xx_connect_pwm_fan(soc, &splitter[1], 0x02, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[1], 0x03, 1); + npcm7xx_connect_pwm_fan(soc, &splitter[2], 0x04, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[2], 0x05, 1); + npcm7xx_connect_pwm_fan(soc, &splitter[3], 0x06, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[3], 0x07, 1); + npcm7xx_connect_pwm_fan(soc, &splitter[4], 0x08, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[4], 0x09, 1); + npcm7xx_connect_pwm_fan(soc, &splitter[5], 0x0a, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[5], 0x0b, 1); + npcm7xx_connect_pwm_fan(soc, &splitter[6], 0x0c, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[6], 0x0d, 1); + npcm7xx_connect_pwm_fan(soc, &splitter[7], 0x0e, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[7], 0x0f, 1); +} + +static void quanta_gsj_i2c_init(NPCM7xxState *soc) +{ + /* GSJ machine have 4 max31725 temperature sensors, tmp105 is compatible. */ + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), "tmp105", 0x5c); + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 2), "tmp105", 0x5c); + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 3), "tmp105", 0x5c); + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 4), "tmp105", 0x5c); + + at24c_eeprom_init(soc, 9, 0x55, 8192); + at24c_eeprom_init(soc, 10, 0x55, 8192); + + /* + * i2c-11: + * - power-brick@36: delta,dps800 + * - hotswap@15: ti,lm5066i + */ + + /* + * i2c-12: + * - ucd90160@6b + */ + + i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 15), "pca9548", 0x75); +} + +static void quanta_gsj_fan_init(NPCM7xxMachine *machine, NPCM7xxState *soc) +{ + SplitIRQ *splitter = machine->fan_splitter; + static const int fan_counts[] = {2, 2, 2, 0, 0, 0, 0, 0}; + + npcm7xx_init_pwm_splitter(machine, soc, fan_counts); + npcm7xx_connect_pwm_fan(soc, &splitter[0], 0x00, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[0], 0x01, 1); + npcm7xx_connect_pwm_fan(soc, &splitter[1], 0x02, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[1], 0x03, 1); + npcm7xx_connect_pwm_fan(soc, &splitter[2], 0x04, 0); + npcm7xx_connect_pwm_fan(soc, &splitter[2], 0x05, 1); +} + +static void quanta_gbs_i2c_init(NPCM7xxState *soc) +{ + /* + * i2c-0: + * pca9546@71 + * + * i2c-1: + * pca9535@24 + * pca9535@20 + * pca9535@21 + * pca9535@22 + * pca9535@23 + * pca9535@25 + * pca9535@26 + * + * i2c-2: + * sbtsi@4c + * + * i2c-5: + * atmel,24c64@50 mb_fru + * pca9546@71 + * - channel 0: max31725@54 + * - channel 1: max31725@55 + * - channel 2: max31725@5d + * atmel,24c64@51 fan_fru + * - channel 3: atmel,24c64@52 hsbp_fru + * + * i2c-6: + * pca9545@73 + * + * i2c-7: + * pca9545@72 + * + * i2c-8: + * adi,adm1272@10 + * + * i2c-9: + * pca9546@71 + * - channel 0: isil,isl68137@60 + * - channel 1: isil,isl68137@61 + * - channel 2: isil,isl68137@63 + * - channel 3: isil,isl68137@45 + * + * i2c-10: + * pca9545@71 + * + * i2c-11: + * pca9545@76 + * + * i2c-12: + * maxim,max34451@4e + * isil,isl68137@5d + * isil,isl68137@5e + * + * i2c-14: + * pca9545@70 + */ +} + +static void npcm750_evb_init(MachineState *machine) +{ + NPCM7xxState *soc; + + soc = npcm7xx_create_soc(machine, NPCM750_EVB_POWER_ON_STRAPS); + npcm7xx_connect_dram(soc, machine->ram); + qdev_realize(DEVICE(soc), NULL, &error_fatal); + + npcm7xx_load_bootrom(machine, soc); + npcm7xx_connect_flash(&soc->fiu[0], 0, "w25q256", drive_get(IF_MTD, 0, 0)); + npcm750_evb_i2c_init(soc); + npcm750_evb_fan_init(NPCM7XX_MACHINE(machine), soc); + npcm7xx_load_kernel(machine, soc); +} + +static void quanta_gsj_init(MachineState *machine) +{ + NPCM7xxState *soc; + + soc = npcm7xx_create_soc(machine, QUANTA_GSJ_POWER_ON_STRAPS); + npcm7xx_connect_dram(soc, machine->ram); + qdev_realize(DEVICE(soc), NULL, &error_fatal); + + npcm7xx_load_bootrom(machine, soc); + npcm7xx_connect_flash(&soc->fiu[0], 0, "mx25l25635e", + drive_get(IF_MTD, 0, 0)); + quanta_gsj_i2c_init(soc); + quanta_gsj_fan_init(NPCM7XX_MACHINE(machine), soc); + npcm7xx_load_kernel(machine, soc); +} + +static void quanta_gbs_init(MachineState *machine) +{ + NPCM7xxState *soc; + + soc = npcm7xx_create_soc(machine, QUANTA_GBS_POWER_ON_STRAPS); + npcm7xx_connect_dram(soc, machine->ram); + qdev_realize(DEVICE(soc), NULL, &error_fatal); + + npcm7xx_load_bootrom(machine, soc); + + npcm7xx_connect_flash(&soc->fiu[0], 0, "mx66u51235f", + drive_get(IF_MTD, 0, 0)); + + quanta_gbs_i2c_init(soc); + sdhci_attach_drive(&soc->mmc.sdhci); + npcm7xx_load_kernel(machine, soc); +} + +static void kudo_bmc_init(MachineState *machine) +{ + NPCM7xxState *soc; + + soc = npcm7xx_create_soc(machine, KUDO_BMC_POWER_ON_STRAPS); + npcm7xx_connect_dram(soc, machine->ram); + qdev_realize(DEVICE(soc), NULL, &error_fatal); + + npcm7xx_load_bootrom(machine, soc); + npcm7xx_connect_flash(&soc->fiu[0], 0, "mx66u51235f", + drive_get(IF_MTD, 0, 0)); + npcm7xx_connect_flash(&soc->fiu[1], 0, "mx66u51235f", + drive_get(IF_MTD, 3, 0)); + + npcm7xx_load_kernel(machine, soc); +} + +static void npcm7xx_set_soc_type(NPCM7xxMachineClass *nmc, const char *type) +{ + NPCM7xxClass *sc = NPCM7XX_CLASS(object_class_by_name(type)); + MachineClass *mc = MACHINE_CLASS(nmc); + + nmc->soc_type = type; + mc->default_cpus = mc->min_cpus = mc->max_cpus = sc->num_cpus; +} + +static void npcm7xx_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->no_floppy = 1; + mc->no_cdrom = 1; + mc->no_parallel = 1; + mc->default_ram_id = "ram"; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9"); +} + +/* + * Schematics: + * https://github.com/Nuvoton-Israel/nuvoton-info/blob/master/npcm7xx-poleg/evaluation-board/board_deliverables/NPCM750x_EB_ver.A1.1_COMPLETE.pdf + */ +static void npcm750_evb_machine_class_init(ObjectClass *oc, void *data) +{ + NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + npcm7xx_set_soc_type(nmc, TYPE_NPCM750); + + mc->desc = "Nuvoton NPCM750 Evaluation Board (Cortex-A9)"; + mc->init = npcm750_evb_init; + mc->default_ram_size = 512 * MiB; +}; + +static void gsj_machine_class_init(ObjectClass *oc, void *data) +{ + NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + npcm7xx_set_soc_type(nmc, TYPE_NPCM730); + + mc->desc = "Quanta GSJ (Cortex-A9)"; + mc->init = quanta_gsj_init; + mc->default_ram_size = 512 * MiB; +}; + +static void gbs_bmc_machine_class_init(ObjectClass *oc, void *data) +{ + NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + npcm7xx_set_soc_type(nmc, TYPE_NPCM730); + + mc->desc = "Quanta GBS (Cortex-A9)"; + mc->init = quanta_gbs_init; + mc->default_ram_size = 1 * GiB; +} + +static void kudo_bmc_machine_class_init(ObjectClass *oc, void *data) +{ + NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + npcm7xx_set_soc_type(nmc, TYPE_NPCM730); + + mc->desc = "Kudo BMC (Cortex-A9)"; + mc->init = kudo_bmc_init; + mc->default_ram_size = 1 * GiB; +}; + +static const TypeInfo npcm7xx_machine_types[] = { + { + .name = TYPE_NPCM7XX_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(NPCM7xxMachine), + .class_size = sizeof(NPCM7xxMachineClass), + .class_init = npcm7xx_machine_class_init, + .abstract = true, + }, { + .name = MACHINE_TYPE_NAME("npcm750-evb"), + .parent = TYPE_NPCM7XX_MACHINE, + .class_init = npcm750_evb_machine_class_init, + }, { + .name = MACHINE_TYPE_NAME("quanta-gsj"), + .parent = TYPE_NPCM7XX_MACHINE, + .class_init = gsj_machine_class_init, + }, { + .name = MACHINE_TYPE_NAME("quanta-gbs-bmc"), + .parent = TYPE_NPCM7XX_MACHINE, + .class_init = gbs_bmc_machine_class_init, + }, { + .name = MACHINE_TYPE_NAME("kudo-bmc"), + .parent = TYPE_NPCM7XX_MACHINE, + .class_init = kudo_bmc_machine_class_init, + }, +}; + +DEFINE_TYPES(npcm7xx_machine_types) diff --git a/hw/arm/nrf51_soc.c b/hw/arm/nrf51_soc.c new file mode 100644 index 000000000..34da0d62f --- /dev/null +++ b/hw/arm/nrf51_soc.c @@ -0,0 +1,243 @@ +/* + * Nordic Semiconductor nRF51 SoC + * http://infocenter.nordicsemi.com/pdf/nRF51_RM_v3.0.1.pdf + * + * Copyright 2018 Joel Stanley + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/boot.h" +#include "hw/sysbus.h" +#include "hw/qdev-clock.h" +#include "hw/misc/unimp.h" +#include "qemu/log.h" + +#include "hw/arm/nrf51.h" +#include "hw/arm/nrf51_soc.h" + +/* + * The size and base is for the NRF51822 part. If other parts + * are supported in the future, add a sub-class of NRF51SoC for + * the specific variants + */ +#define NRF51822_FLASH_PAGES 256 +#define NRF51822_SRAM_PAGES 16 +#define NRF51822_FLASH_SIZE (NRF51822_FLASH_PAGES * NRF51_PAGE_SIZE) +#define NRF51822_SRAM_SIZE (NRF51822_SRAM_PAGES * NRF51_PAGE_SIZE) + +#define BASE_TO_IRQ(base) ((base >> 12) & 0x1F) + +/* HCLK (the main CPU clock) on this SoC is always 16MHz */ +#define HCLK_FRQ 16000000 + +static uint64_t clock_read(void *opaque, hwaddr addr, unsigned int size) +{ + qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx " [%u]\n", + __func__, addr, size); + return 1; +} + +static void clock_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx " <- 0x%" PRIx64 " [%u]\n", + __func__, addr, data, size); +} + +static const MemoryRegionOps clock_ops = { + .read = clock_read, + .write = clock_write +}; + + +static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp) +{ + NRF51State *s = NRF51_SOC(dev_soc); + MemoryRegion *mr; + Error *err = NULL; + uint8_t i = 0; + hwaddr base_addr = 0; + + if (!s->board_memory) { + error_setg(errp, "memory property was not set"); + return; + } + + /* + * HCLK on this SoC is fixed, so we set up sysclk ourselves and + * the board shouldn't connect it. + */ + if (clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must not be wired up by the board code"); + return; + } + /* This clock doesn't need migration because it is fixed-frequency */ + clock_set_hz(s->sysclk, HCLK_FRQ); + qdev_connect_clock_in(DEVICE(&s->cpu), "cpuclk", s->sysclk); + /* + * This SoC has no systick device, so don't connect refclk. + * TODO: model the lack of systick (currently the armv7m object + * will always provide one). + */ + + object_property_set_link(OBJECT(&s->cpu), "memory", OBJECT(&s->container), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpu), errp)) { + return; + } + + memory_region_add_subregion_overlap(&s->container, 0, s->board_memory, -1); + + memory_region_init_ram(&s->sram, OBJECT(s), "nrf51.sram", s->sram_size, + &err); + if (err) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(&s->container, NRF51_SRAM_BASE, &s->sram); + + /* UART */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart), errp)) { + return; + } + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->uart), 0); + memory_region_add_subregion_overlap(&s->container, NRF51_UART_BASE, mr, 0); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart), 0, + qdev_get_gpio_in(DEVICE(&s->cpu), + BASE_TO_IRQ(NRF51_UART_BASE))); + + /* RNG */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rng), errp)) { + return; + } + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->rng), 0); + memory_region_add_subregion_overlap(&s->container, NRF51_RNG_BASE, mr, 0); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rng), 0, + qdev_get_gpio_in(DEVICE(&s->cpu), + BASE_TO_IRQ(NRF51_RNG_BASE))); + + /* UICR, FICR, NVMC, FLASH */ + if (!object_property_set_uint(OBJECT(&s->nvm), "flash-size", + s->flash_size, errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->nvm), errp)) { + return; + } + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->nvm), 0); + memory_region_add_subregion_overlap(&s->container, NRF51_NVMC_BASE, mr, 0); + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->nvm), 1); + memory_region_add_subregion_overlap(&s->container, NRF51_FICR_BASE, mr, 0); + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->nvm), 2); + memory_region_add_subregion_overlap(&s->container, NRF51_UICR_BASE, mr, 0); + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->nvm), 3); + memory_region_add_subregion_overlap(&s->container, NRF51_FLASH_BASE, mr, 0); + + /* GPIO */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { + return; + } + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gpio), 0); + memory_region_add_subregion_overlap(&s->container, NRF51_GPIO_BASE, mr, 0); + + /* Pass all GPIOs to the SOC layer so they are available to the board */ + qdev_pass_gpios(DEVICE(&s->gpio), dev_soc, NULL); + + /* TIMER */ + for (i = 0; i < NRF51_NUM_TIMERS; i++) { + if (!object_property_set_uint(OBJECT(&s->timer[i]), "id", i, errp)) { + return; + } + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timer[i]), errp)) { + return; + } + + base_addr = NRF51_TIMER_BASE + i * NRF51_PERIPHERAL_SIZE; + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->timer[i]), 0, base_addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer[i]), 0, + qdev_get_gpio_in(DEVICE(&s->cpu), + BASE_TO_IRQ(base_addr))); + } + + /* STUB Peripherals */ + memory_region_init_io(&s->clock, OBJECT(dev_soc), &clock_ops, NULL, + "nrf51_soc.clock", NRF51_PERIPHERAL_SIZE); + memory_region_add_subregion_overlap(&s->container, + NRF51_IOMEM_BASE, &s->clock, -1); + + create_unimplemented_device("nrf51_soc.io", NRF51_IOMEM_BASE, + NRF51_IOMEM_SIZE); + create_unimplemented_device("nrf51_soc.private", + NRF51_PRIVATE_BASE, NRF51_PRIVATE_SIZE); +} + +static void nrf51_soc_init(Object *obj) +{ + uint8_t i = 0; + + NRF51State *s = NRF51_SOC(obj); + + memory_region_init(&s->container, obj, "nrf51-container", UINT64_MAX); + + object_initialize_child(OBJECT(s), "armv6m", &s->cpu, TYPE_ARMV7M); + qdev_prop_set_string(DEVICE(&s->cpu), "cpu-type", + ARM_CPU_TYPE_NAME("cortex-m0")); + qdev_prop_set_uint32(DEVICE(&s->cpu), "num-irq", 32); + + object_initialize_child(obj, "uart", &s->uart, TYPE_NRF51_UART); + object_property_add_alias(obj, "serial0", OBJECT(&s->uart), "chardev"); + + object_initialize_child(obj, "rng", &s->rng, TYPE_NRF51_RNG); + + object_initialize_child(obj, "nvm", &s->nvm, TYPE_NRF51_NVM); + + object_initialize_child(obj, "gpio", &s->gpio, TYPE_NRF51_GPIO); + + for (i = 0; i < NRF51_NUM_TIMERS; i++) { + object_initialize_child(obj, "timer[*]", &s->timer[i], + TYPE_NRF51_TIMER); + + } + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); +} + +static Property nrf51_soc_properties[] = { + DEFINE_PROP_LINK("memory", NRF51State, board_memory, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_UINT32("sram-size", NRF51State, sram_size, NRF51822_SRAM_SIZE), + DEFINE_PROP_UINT32("flash-size", NRF51State, flash_size, + NRF51822_FLASH_SIZE), + DEFINE_PROP_END_OF_LIST(), +}; + +static void nrf51_soc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = nrf51_soc_realize; + device_class_set_props(dc, nrf51_soc_properties); +} + +static const TypeInfo nrf51_soc_info = { + .name = TYPE_NRF51_SOC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NRF51State), + .instance_init = nrf51_soc_init, + .class_init = nrf51_soc_class_init, +}; + +static void nrf51_soc_types(void) +{ + type_register_static(&nrf51_soc_info); +} +type_init(nrf51_soc_types) diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c new file mode 100644 index 000000000..af3164c55 --- /dev/null +++ b/hw/arm/nseries.c @@ -0,0 +1,1468 @@ +/* + * Nokia N-series internet tablets. + * + * Copyright (C) 2007 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "chardev/char.h" +#include "qemu/cutils.h" +#include "qemu/bswap.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "hw/arm/omap.h" +#include "hw/arm/boot.h" +#include "hw/irq.h" +#include "ui/console.h" +#include "hw/boards.h" +#include "hw/i2c/i2c.h" +#include "hw/display/blizzard.h" +#include "hw/input/lm832x.h" +#include "hw/input/tsc2xxx.h" +#include "hw/misc/cbus.h" +#include "hw/sensor/tmp105.h" +#include "hw/qdev-properties.h" +#include "hw/block/flash.h" +#include "hw/hw.h" +#include "hw/loader.h" +#include "hw/sysbus.h" +#include "qemu/log.h" + +/* Nokia N8x0 support */ +struct n800_s { + struct omap_mpu_state_s *mpu; + + struct rfbi_chip_s blizzard; + struct { + void *opaque; + uint32_t (*txrx)(void *opaque, uint32_t value, int len); + uWireSlave *chip; + } ts; + + int keymap[0x80]; + DeviceState *kbd; + + DeviceState *usb; + void *retu; + void *tahvo; + DeviceState *nand; +}; + +/* GPIO pins */ +#define N8X0_TUSB_ENABLE_GPIO 0 +#define N800_MMC2_WP_GPIO 8 +#define N800_UNKNOWN_GPIO0 9 /* out */ +#define N810_MMC2_VIOSD_GPIO 9 +#define N810_HEADSET_AMP_GPIO 10 +#define N800_CAM_TURN_GPIO 12 +#define N810_GPS_RESET_GPIO 12 +#define N800_BLIZZARD_POWERDOWN_GPIO 15 +#define N800_MMC1_WP_GPIO 23 +#define N810_MMC2_VSD_GPIO 23 +#define N8X0_ONENAND_GPIO 26 +#define N810_BLIZZARD_RESET_GPIO 30 +#define N800_UNKNOWN_GPIO2 53 /* out */ +#define N8X0_TUSB_INT_GPIO 58 +#define N8X0_BT_WKUP_GPIO 61 +#define N8X0_STI_GPIO 62 +#define N8X0_CBUS_SEL_GPIO 64 +#define N8X0_CBUS_DAT_GPIO 65 +#define N8X0_CBUS_CLK_GPIO 66 +#define N8X0_WLAN_IRQ_GPIO 87 +#define N8X0_BT_RESET_GPIO 92 +#define N8X0_TEA5761_CS_GPIO 93 +#define N800_UNKNOWN_GPIO 94 +#define N810_TSC_RESET_GPIO 94 +#define N800_CAM_ACT_GPIO 95 +#define N810_GPS_WAKEUP_GPIO 95 +#define N8X0_MMC_CS_GPIO 96 +#define N8X0_WLAN_PWR_GPIO 97 +#define N8X0_BT_HOST_WKUP_GPIO 98 +#define N810_SPEAKER_AMP_GPIO 101 +#define N810_KB_LOCK_GPIO 102 +#define N800_TSC_TS_GPIO 103 +#define N810_TSC_TS_GPIO 106 +#define N8X0_HEADPHONE_GPIO 107 +#define N8X0_RETU_GPIO 108 +#define N800_TSC_KP_IRQ_GPIO 109 +#define N810_KEYBOARD_GPIO 109 +#define N800_BAT_COVER_GPIO 110 +#define N810_SLIDE_GPIO 110 +#define N8X0_TAHVO_GPIO 111 +#define N800_UNKNOWN_GPIO4 112 /* out */ +#define N810_SLEEPX_LED_GPIO 112 +#define N800_TSC_RESET_GPIO 118 /* ? */ +#define N810_AIC33_RESET_GPIO 118 +#define N800_TSC_UNKNOWN_GPIO 119 /* out */ +#define N8X0_TMP105_GPIO 125 + +/* Config */ +#define BT_UART 0 +#define XLDR_LL_UART 1 + +/* Addresses on the I2C bus 0 */ +#define N810_TLV320AIC33_ADDR 0x18 /* Audio CODEC */ +#define N8X0_TCM825x_ADDR 0x29 /* Camera */ +#define N810_LP5521_ADDR 0x32 /* LEDs */ +#define N810_TSL2563_ADDR 0x3d /* Light sensor */ +#define N810_LM8323_ADDR 0x45 /* Keyboard */ +/* Addresses on the I2C bus 1 */ +#define N8X0_TMP105_ADDR 0x48 /* Temperature sensor */ +#define N8X0_MENELAUS_ADDR 0x72 /* Power management */ + +/* Chipselects on GPMC NOR interface */ +#define N8X0_ONENAND_CS 0 +#define N8X0_USB_ASYNC_CS 1 +#define N8X0_USB_SYNC_CS 4 + +#define N8X0_BD_ADDR 0x00, 0x1a, 0x89, 0x9e, 0x3e, 0x81 + +static void n800_mmc_cs_cb(void *opaque, int line, int level) +{ + /* TODO: this seems to actually be connected to the menelaus, to + * which also both MMC slots connect. */ + omap_mmc_enable((struct omap_mmc_s *) opaque, !level); +} + +static void n8x0_gpio_setup(struct n800_s *s) +{ + qdev_connect_gpio_out(s->mpu->gpio, N8X0_MMC_CS_GPIO, + qemu_allocate_irq(n800_mmc_cs_cb, s->mpu->mmc, 0)); + qemu_irq_lower(qdev_get_gpio_in(s->mpu->gpio, N800_BAT_COVER_GPIO)); +} + +#define MAEMO_CAL_HEADER(...) \ + 'C', 'o', 'n', 'F', 0x02, 0x00, 0x04, 0x00, \ + __VA_ARGS__, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + +static const uint8_t n8x0_cal_wlan_mac[] = { + MAEMO_CAL_HEADER('w', 'l', 'a', 'n', '-', 'm', 'a', 'c') + 0x1c, 0x00, 0x00, 0x00, 0x47, 0xd6, 0x69, 0xb3, + 0x30, 0x08, 0xa0, 0x83, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, + 0x89, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00, + 0x5d, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, +}; + +static const uint8_t n8x0_cal_bt_id[] = { + MAEMO_CAL_HEADER('b', 't', '-', 'i', 'd', 0, 0, 0) + 0x0a, 0x00, 0x00, 0x00, 0xa3, 0x4b, 0xf6, 0x96, + 0xa8, 0xeb, 0xb2, 0x41, 0x00, 0x00, 0x00, 0x00, + N8X0_BD_ADDR, +}; + +static void n8x0_nand_setup(struct n800_s *s) +{ + char *otp_region; + DriveInfo *dinfo; + + s->nand = qdev_new("onenand"); + qdev_prop_set_uint16(s->nand, "manufacturer_id", NAND_MFR_SAMSUNG); + /* Either 0x40 or 0x48 are OK for the device ID */ + qdev_prop_set_uint16(s->nand, "device_id", 0x48); + qdev_prop_set_uint16(s->nand, "version_id", 0); + qdev_prop_set_int32(s->nand, "shift", 1); + dinfo = drive_get(IF_MTD, 0, 0); + if (dinfo) { + qdev_prop_set_drive_err(s->nand, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + } + sysbus_realize_and_unref(SYS_BUS_DEVICE(s->nand), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(s->nand), 0, + qdev_get_gpio_in(s->mpu->gpio, N8X0_ONENAND_GPIO)); + omap_gpmc_attach(s->mpu->gpmc, N8X0_ONENAND_CS, + sysbus_mmio_get_region(SYS_BUS_DEVICE(s->nand), 0)); + otp_region = onenand_raw_otp(s->nand); + + memcpy(otp_region + 0x000, n8x0_cal_wlan_mac, sizeof(n8x0_cal_wlan_mac)); + memcpy(otp_region + 0x800, n8x0_cal_bt_id, sizeof(n8x0_cal_bt_id)); + /* XXX: in theory should also update the OOB for both pages */ +} + +static qemu_irq n8x0_system_powerdown; + +static void n8x0_powerdown_req(Notifier *n, void *opaque) +{ + qemu_irq_raise(n8x0_system_powerdown); +} + +static Notifier n8x0_system_powerdown_notifier = { + .notify = n8x0_powerdown_req +}; + +static void n8x0_i2c_setup(struct n800_s *s) +{ + DeviceState *dev; + qemu_irq tmp_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_TMP105_GPIO); + I2CBus *i2c = omap_i2c_bus(s->mpu->i2c[0]); + + /* Attach a menelaus PM chip */ + dev = DEVICE(i2c_slave_create_simple(i2c, "twl92230", N8X0_MENELAUS_ADDR)); + qdev_connect_gpio_out(dev, 3, + qdev_get_gpio_in(s->mpu->ih[0], + OMAP_INT_24XX_SYS_NIRQ)); + + n8x0_system_powerdown = qdev_get_gpio_in(dev, 3); + qemu_register_powerdown_notifier(&n8x0_system_powerdown_notifier); + + /* Attach a TMP105 PM chip (A0 wired to ground) */ + dev = DEVICE(i2c_slave_create_simple(i2c, TYPE_TMP105, N8X0_TMP105_ADDR)); + qdev_connect_gpio_out(dev, 0, tmp_irq); +} + +/* Touchscreen and keypad controller */ +static MouseTransformInfo n800_pointercal = { + .x = 800, + .y = 480, + .a = { 14560, -68, -3455208, -39, -9621, 35152972, 65536 }, +}; + +static MouseTransformInfo n810_pointercal = { + .x = 800, + .y = 480, + .a = { 15041, 148, -4731056, 171, -10238, 35933380, 65536 }, +}; + +#define RETU_KEYCODE 61 /* F3 */ + +static void n800_key_event(void *opaque, int keycode) +{ + struct n800_s *s = (struct n800_s *) opaque; + int code = s->keymap[keycode & 0x7f]; + + if (code == -1) { + if ((keycode & 0x7f) == RETU_KEYCODE) { + retu_key_event(s->retu, !(keycode & 0x80)); + } + return; + } + + tsc210x_key_event(s->ts.chip, code, !(keycode & 0x80)); +} + +static const int n800_keys[16] = { + -1, + 72, /* Up */ + 63, /* Home (F5) */ + -1, + 75, /* Left */ + 28, /* Enter */ + 77, /* Right */ + -1, + 1, /* Cycle (ESC) */ + 80, /* Down */ + 62, /* Menu (F4) */ + -1, + 66, /* Zoom- (F8) */ + 64, /* FullScreen (F6) */ + 65, /* Zoom+ (F7) */ + -1, +}; + +static void n800_tsc_kbd_setup(struct n800_s *s) +{ + int i; + + /* XXX: are the three pins inverted inside the chip between the + * tsc and the cpu (N4111)? */ + qemu_irq penirq = NULL; /* NC */ + qemu_irq kbirq = qdev_get_gpio_in(s->mpu->gpio, N800_TSC_KP_IRQ_GPIO); + qemu_irq dav = qdev_get_gpio_in(s->mpu->gpio, N800_TSC_TS_GPIO); + + s->ts.chip = tsc2301_init(penirq, kbirq, dav); + s->ts.opaque = s->ts.chip->opaque; + s->ts.txrx = tsc210x_txrx; + + for (i = 0; i < 0x80; i++) { + s->keymap[i] = -1; + } + for (i = 0; i < 0x10; i++) { + if (n800_keys[i] >= 0) { + s->keymap[n800_keys[i]] = i; + } + } + + qemu_add_kbd_event_handler(n800_key_event, s); + + tsc210x_set_transform(s->ts.chip, &n800_pointercal); +} + +static void n810_tsc_setup(struct n800_s *s) +{ + qemu_irq pintdav = qdev_get_gpio_in(s->mpu->gpio, N810_TSC_TS_GPIO); + + s->ts.opaque = tsc2005_init(pintdav); + s->ts.txrx = tsc2005_txrx; + + tsc2005_set_transform(s->ts.opaque, &n810_pointercal); +} + +/* N810 Keyboard controller */ +static void n810_key_event(void *opaque, int keycode) +{ + struct n800_s *s = (struct n800_s *) opaque; + int code = s->keymap[keycode & 0x7f]; + + if (code == -1) { + if ((keycode & 0x7f) == RETU_KEYCODE) { + retu_key_event(s->retu, !(keycode & 0x80)); + } + return; + } + + lm832x_key_event(s->kbd, code, !(keycode & 0x80)); +} + +#define M 0 + +static int n810_keys[0x80] = { + [0x01] = 16, /* Q */ + [0x02] = 37, /* K */ + [0x03] = 24, /* O */ + [0x04] = 25, /* P */ + [0x05] = 14, /* Backspace */ + [0x06] = 30, /* A */ + [0x07] = 31, /* S */ + [0x08] = 32, /* D */ + [0x09] = 33, /* F */ + [0x0a] = 34, /* G */ + [0x0b] = 35, /* H */ + [0x0c] = 36, /* J */ + + [0x11] = 17, /* W */ + [0x12] = 62, /* Menu (F4) */ + [0x13] = 38, /* L */ + [0x14] = 40, /* ' (Apostrophe) */ + [0x16] = 44, /* Z */ + [0x17] = 45, /* X */ + [0x18] = 46, /* C */ + [0x19] = 47, /* V */ + [0x1a] = 48, /* B */ + [0x1b] = 49, /* N */ + [0x1c] = 42, /* Shift (Left shift) */ + [0x1f] = 65, /* Zoom+ (F7) */ + + [0x21] = 18, /* E */ + [0x22] = 39, /* ; (Semicolon) */ + [0x23] = 12, /* - (Minus) */ + [0x24] = 13, /* = (Equal) */ + [0x2b] = 56, /* Fn (Left Alt) */ + [0x2c] = 50, /* M */ + [0x2f] = 66, /* Zoom- (F8) */ + + [0x31] = 19, /* R */ + [0x32] = 29 | M, /* Right Ctrl */ + [0x34] = 57, /* Space */ + [0x35] = 51, /* , (Comma) */ + [0x37] = 72 | M, /* Up */ + [0x3c] = 82 | M, /* Compose (Insert) */ + [0x3f] = 64, /* FullScreen (F6) */ + + [0x41] = 20, /* T */ + [0x44] = 52, /* . (Dot) */ + [0x46] = 77 | M, /* Right */ + [0x4f] = 63, /* Home (F5) */ + [0x51] = 21, /* Y */ + [0x53] = 80 | M, /* Down */ + [0x55] = 28, /* Enter */ + [0x5f] = 1, /* Cycle (ESC) */ + + [0x61] = 22, /* U */ + [0x64] = 75 | M, /* Left */ + + [0x71] = 23, /* I */ +#if 0 + [0x75] = 28 | M, /* KP Enter (KP Enter) */ +#else + [0x75] = 15, /* KP Enter (Tab) */ +#endif +}; + +#undef M + +static void n810_kbd_setup(struct n800_s *s) +{ + qemu_irq kbd_irq = qdev_get_gpio_in(s->mpu->gpio, N810_KEYBOARD_GPIO); + int i; + + for (i = 0; i < 0x80; i++) { + s->keymap[i] = -1; + } + for (i = 0; i < 0x80; i++) { + if (n810_keys[i] > 0) { + s->keymap[n810_keys[i]] = i; + } + } + + qemu_add_kbd_event_handler(n810_key_event, s); + + /* Attach the LM8322 keyboard to the I2C bus, + * should happen in n8x0_i2c_setup and s->kbd be initialised here. */ + s->kbd = DEVICE(i2c_slave_create_simple(omap_i2c_bus(s->mpu->i2c[0]), + TYPE_LM8323, N810_LM8323_ADDR)); + qdev_connect_gpio_out(s->kbd, 0, kbd_irq); +} + +/* LCD MIPI DBI-C controller (URAL) */ +struct mipid_s { + int resp[4]; + int param[4]; + int p; + int pm; + int cmd; + + int sleep; + int booster; + int te; + int selfcheck; + int partial; + int normal; + int vscr; + int invert; + int onoff; + int gamma; + uint32_t id; +}; + +static void mipid_reset(struct mipid_s *s) +{ + s->pm = 0; + s->cmd = 0; + + s->sleep = 1; + s->booster = 0; + s->selfcheck = + (1 << 7) | /* Register loading OK. */ + (1 << 5) | /* The chip is attached. */ + (1 << 4); /* Display glass still in one piece. */ + s->te = 0; + s->partial = 0; + s->normal = 1; + s->vscr = 0; + s->invert = 0; + s->onoff = 1; + s->gamma = 0; +} + +static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len) +{ + struct mipid_s *s = (struct mipid_s *) opaque; + uint8_t ret; + + if (len > 9) { + hw_error("%s: FIXME: bad SPI word width %i\n", __func__, len); + } + + if (s->p >= ARRAY_SIZE(s->resp)) { + ret = 0; + } else { + ret = s->resp[s->p++]; + } + if (s->pm-- > 0) { + s->param[s->pm] = cmd; + } else { + s->cmd = cmd; + } + + switch (s->cmd) { + case 0x00: /* NOP */ + break; + + case 0x01: /* SWRESET */ + mipid_reset(s); + break; + + case 0x02: /* BSTROFF */ + s->booster = 0; + break; + case 0x03: /* BSTRON */ + s->booster = 1; + break; + + case 0x04: /* RDDID */ + s->p = 0; + s->resp[0] = (s->id >> 16) & 0xff; + s->resp[1] = (s->id >> 8) & 0xff; + s->resp[2] = (s->id >> 0) & 0xff; + break; + + case 0x06: /* RD_RED */ + case 0x07: /* RD_GREEN */ + /* XXX the bootloader sometimes issues RD_BLUE meaning RDDID so + * for the bootloader one needs to change this. */ + case 0x08: /* RD_BLUE */ + s->p = 0; + /* TODO: return first pixel components */ + s->resp[0] = 0x01; + break; + + case 0x09: /* RDDST */ + s->p = 0; + s->resp[0] = s->booster << 7; + s->resp[1] = (5 << 4) | (s->partial << 2) | + (s->sleep << 1) | s->normal; + s->resp[2] = (s->vscr << 7) | (s->invert << 5) | + (s->onoff << 2) | (s->te << 1) | (s->gamma >> 2); + s->resp[3] = s->gamma << 6; + break; + + case 0x0a: /* RDDPM */ + s->p = 0; + s->resp[0] = (s->onoff << 2) | (s->normal << 3) | (s->sleep << 4) | + (s->partial << 5) | (s->sleep << 6) | (s->booster << 7); + break; + case 0x0b: /* RDDMADCTR */ + s->p = 0; + s->resp[0] = 0; + break; + case 0x0c: /* RDDCOLMOD */ + s->p = 0; + s->resp[0] = 5; /* 65K colours */ + break; + case 0x0d: /* RDDIM */ + s->p = 0; + s->resp[0] = (s->invert << 5) | (s->vscr << 7) | s->gamma; + break; + case 0x0e: /* RDDSM */ + s->p = 0; + s->resp[0] = s->te << 7; + break; + case 0x0f: /* RDDSDR */ + s->p = 0; + s->resp[0] = s->selfcheck; + break; + + case 0x10: /* SLPIN */ + s->sleep = 1; + break; + case 0x11: /* SLPOUT */ + s->sleep = 0; + s->selfcheck ^= 1 << 6; /* POFF self-diagnosis Ok */ + break; + + case 0x12: /* PTLON */ + s->partial = 1; + s->normal = 0; + s->vscr = 0; + break; + case 0x13: /* NORON */ + s->partial = 0; + s->normal = 1; + s->vscr = 0; + break; + + case 0x20: /* INVOFF */ + s->invert = 0; + break; + case 0x21: /* INVON */ + s->invert = 1; + break; + + case 0x22: /* APOFF */ + case 0x23: /* APON */ + goto bad_cmd; + + case 0x25: /* WRCNTR */ + if (s->pm < 0) { + s->pm = 1; + } + goto bad_cmd; + + case 0x26: /* GAMSET */ + if (!s->pm) { + s->gamma = ctz32(s->param[0] & 0xf); + if (s->gamma == 32) { + s->gamma = -1; /* XXX: should this be 0? */ + } + } else if (s->pm < 0) { + s->pm = 1; + } + break; + + case 0x28: /* DISPOFF */ + s->onoff = 0; + break; + case 0x29: /* DISPON */ + s->onoff = 1; + break; + + case 0x2a: /* CASET */ + case 0x2b: /* RASET */ + case 0x2c: /* RAMWR */ + case 0x2d: /* RGBSET */ + case 0x2e: /* RAMRD */ + case 0x30: /* PTLAR */ + case 0x33: /* SCRLAR */ + goto bad_cmd; + + case 0x34: /* TEOFF */ + s->te = 0; + break; + case 0x35: /* TEON */ + if (!s->pm) { + s->te = 1; + } else if (s->pm < 0) { + s->pm = 1; + } + break; + + case 0x36: /* MADCTR */ + goto bad_cmd; + + case 0x37: /* VSCSAD */ + s->partial = 0; + s->normal = 0; + s->vscr = 1; + break; + + case 0x38: /* IDMOFF */ + case 0x39: /* IDMON */ + case 0x3a: /* COLMOD */ + goto bad_cmd; + + case 0xb0: /* CLKINT / DISCTL */ + case 0xb1: /* CLKEXT */ + if (s->pm < 0) { + s->pm = 2; + } + break; + + case 0xb4: /* FRMSEL */ + break; + + case 0xb5: /* FRM8SEL */ + case 0xb6: /* TMPRNG / INIESC */ + case 0xb7: /* TMPHIS / NOP2 */ + case 0xb8: /* TMPREAD / MADCTL */ + case 0xba: /* DISTCTR */ + case 0xbb: /* EPVOL */ + goto bad_cmd; + + case 0xbd: /* Unknown */ + s->p = 0; + s->resp[0] = 0; + s->resp[1] = 1; + break; + + case 0xc2: /* IFMOD */ + if (s->pm < 0) { + s->pm = 2; + } + break; + + case 0xc6: /* PWRCTL */ + case 0xc7: /* PPWRCTL */ + case 0xd0: /* EPWROUT */ + case 0xd1: /* EPWRIN */ + case 0xd4: /* RDEV */ + case 0xd5: /* RDRR */ + goto bad_cmd; + + case 0xda: /* RDID1 */ + s->p = 0; + s->resp[0] = (s->id >> 16) & 0xff; + break; + case 0xdb: /* RDID2 */ + s->p = 0; + s->resp[0] = (s->id >> 8) & 0xff; + break; + case 0xdc: /* RDID3 */ + s->p = 0; + s->resp[0] = (s->id >> 0) & 0xff; + break; + + default: + bad_cmd: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: unknown command 0x%02x\n", __func__, s->cmd); + break; + } + + return ret; +} + +static void *mipid_init(void) +{ + struct mipid_s *s = (struct mipid_s *) g_malloc0(sizeof(*s)); + + s->id = 0x838f03; + mipid_reset(s); + + return s; +} + +static void n8x0_spi_setup(struct n800_s *s) +{ + void *tsc = s->ts.opaque; + void *mipid = mipid_init(); + + omap_mcspi_attach(s->mpu->mcspi[0], s->ts.txrx, tsc, 0); + omap_mcspi_attach(s->mpu->mcspi[0], mipid_txrx, mipid, 1); +} + +/* This task is normally performed by the bootloader. If we're loading + * a kernel directly, we need to enable the Blizzard ourselves. */ +static void n800_dss_init(struct rfbi_chip_s *chip) +{ + uint8_t *fb_blank; + + chip->write(chip->opaque, 0, 0x2a); /* LCD Width register */ + chip->write(chip->opaque, 1, 0x64); + chip->write(chip->opaque, 0, 0x2c); /* LCD HNDP register */ + chip->write(chip->opaque, 1, 0x1e); + chip->write(chip->opaque, 0, 0x2e); /* LCD Height 0 register */ + chip->write(chip->opaque, 1, 0xe0); + chip->write(chip->opaque, 0, 0x30); /* LCD Height 1 register */ + chip->write(chip->opaque, 1, 0x01); + chip->write(chip->opaque, 0, 0x32); /* LCD VNDP register */ + chip->write(chip->opaque, 1, 0x06); + chip->write(chip->opaque, 0, 0x68); /* Display Mode register */ + chip->write(chip->opaque, 1, 1); /* Enable bit */ + + chip->write(chip->opaque, 0, 0x6c); + chip->write(chip->opaque, 1, 0x00); /* Input X Start Position */ + chip->write(chip->opaque, 1, 0x00); /* Input X Start Position */ + chip->write(chip->opaque, 1, 0x00); /* Input Y Start Position */ + chip->write(chip->opaque, 1, 0x00); /* Input Y Start Position */ + chip->write(chip->opaque, 1, 0x1f); /* Input X End Position */ + chip->write(chip->opaque, 1, 0x03); /* Input X End Position */ + chip->write(chip->opaque, 1, 0xdf); /* Input Y End Position */ + chip->write(chip->opaque, 1, 0x01); /* Input Y End Position */ + chip->write(chip->opaque, 1, 0x00); /* Output X Start Position */ + chip->write(chip->opaque, 1, 0x00); /* Output X Start Position */ + chip->write(chip->opaque, 1, 0x00); /* Output Y Start Position */ + chip->write(chip->opaque, 1, 0x00); /* Output Y Start Position */ + chip->write(chip->opaque, 1, 0x1f); /* Output X End Position */ + chip->write(chip->opaque, 1, 0x03); /* Output X End Position */ + chip->write(chip->opaque, 1, 0xdf); /* Output Y End Position */ + chip->write(chip->opaque, 1, 0x01); /* Output Y End Position */ + chip->write(chip->opaque, 1, 0x01); /* Input Data Format */ + chip->write(chip->opaque, 1, 0x01); /* Data Source Select */ + + fb_blank = memset(g_malloc(800 * 480 * 2), 0xff, 800 * 480 * 2); + /* Display Memory Data Port */ + chip->block(chip->opaque, 1, fb_blank, 800 * 480 * 2, 800); + g_free(fb_blank); +} + +static void n8x0_dss_setup(struct n800_s *s) +{ + s->blizzard.opaque = s1d13745_init(NULL); + s->blizzard.block = s1d13745_write_block; + s->blizzard.write = s1d13745_write; + s->blizzard.read = s1d13745_read; + + omap_rfbi_attach(s->mpu->dss, 0, &s->blizzard); +} + +static void n8x0_cbus_setup(struct n800_s *s) +{ + qemu_irq dat_out = qdev_get_gpio_in(s->mpu->gpio, N8X0_CBUS_DAT_GPIO); + qemu_irq retu_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_RETU_GPIO); + qemu_irq tahvo_irq = qdev_get_gpio_in(s->mpu->gpio, N8X0_TAHVO_GPIO); + + CBus *cbus = cbus_init(dat_out); + + qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_CLK_GPIO, cbus->clk); + qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_DAT_GPIO, cbus->dat); + qdev_connect_gpio_out(s->mpu->gpio, N8X0_CBUS_SEL_GPIO, cbus->sel); + + cbus_attach(cbus, s->retu = retu_init(retu_irq, 1)); + cbus_attach(cbus, s->tahvo = tahvo_init(tahvo_irq, 1)); +} + +static void n8x0_usb_setup(struct n800_s *s) +{ + SysBusDevice *dev; + s->usb = qdev_new("tusb6010"); + dev = SYS_BUS_DEVICE(s->usb); + sysbus_realize_and_unref(dev, &error_fatal); + sysbus_connect_irq(dev, 0, + qdev_get_gpio_in(s->mpu->gpio, N8X0_TUSB_INT_GPIO)); + /* Using the NOR interface */ + omap_gpmc_attach(s->mpu->gpmc, N8X0_USB_ASYNC_CS, + sysbus_mmio_get_region(dev, 0)); + omap_gpmc_attach(s->mpu->gpmc, N8X0_USB_SYNC_CS, + sysbus_mmio_get_region(dev, 1)); + qdev_connect_gpio_out(s->mpu->gpio, N8X0_TUSB_ENABLE_GPIO, + qdev_get_gpio_in(s->usb, 0)); /* tusb_pwr */ +} + +/* Setup done before the main bootloader starts by some early setup code + * - used when we want to run the main bootloader in emulation. This + * isn't documented. */ +static uint32_t n800_pinout[104] = { + 0x080f00d8, 0x00d40808, 0x03080808, 0x080800d0, + 0x00dc0808, 0x0b0f0f00, 0x080800b4, 0x00c00808, + 0x08080808, 0x180800c4, 0x00b80000, 0x08080808, + 0x080800bc, 0x00cc0808, 0x08081818, 0x18180128, + 0x01241800, 0x18181818, 0x000000f0, 0x01300000, + 0x00001b0b, 0x1b0f0138, 0x00e0181b, 0x1b031b0b, + 0x180f0078, 0x00740018, 0x0f0f0f1a, 0x00000080, + 0x007c0000, 0x00000000, 0x00000088, 0x00840000, + 0x00000000, 0x00000094, 0x00980300, 0x0f180003, + 0x0000008c, 0x00900f0f, 0x0f0f1b00, 0x0f00009c, + 0x01140000, 0x1b1b0f18, 0x0818013c, 0x01400008, + 0x00001818, 0x000b0110, 0x010c1800, 0x0b030b0f, + 0x181800f4, 0x00f81818, 0x00000018, 0x000000fc, + 0x00401808, 0x00000000, 0x0f1b0030, 0x003c0008, + 0x00000000, 0x00000038, 0x00340000, 0x00000000, + 0x1a080070, 0x00641a1a, 0x08080808, 0x08080060, + 0x005c0808, 0x08080808, 0x08080058, 0x00540808, + 0x08080808, 0x0808006c, 0x00680808, 0x08080808, + 0x000000a8, 0x00b00000, 0x08080808, 0x000000a0, + 0x00a40000, 0x00000000, 0x08ff0050, 0x004c0808, + 0xffffffff, 0xffff0048, 0x0044ffff, 0xffffffff, + 0x000000ac, 0x01040800, 0x08080b0f, 0x18180100, + 0x01081818, 0x0b0b1808, 0x1a0300e4, 0x012c0b1a, + 0x02020018, 0x0b000134, 0x011c0800, 0x0b1b1b00, + 0x0f0000c8, 0x00ec181b, 0x000f0f02, 0x00180118, + 0x01200000, 0x0f0b1b1b, 0x0f0200e8, 0x0000020b, +}; + +static void n800_setup_nolo_tags(void *sram_base) +{ + int i; + uint32_t *p = sram_base + 0x8000; + uint32_t *v = sram_base + 0xa000; + + memset(p, 0, 0x3000); + + strcpy((void *) (p + 0), "QEMU N800"); + + strcpy((void *) (p + 8), "F5"); + + stl_p(p + 10, 0x04f70000); + strcpy((void *) (p + 9), "RX-34"); + + /* RAM size in MB? */ + stl_p(p + 12, 0x80); + + /* Pointer to the list of tags */ + stl_p(p + 13, OMAP2_SRAM_BASE + 0x9000); + + /* The NOLO tags start here */ + p = sram_base + 0x9000; +#define ADD_TAG(tag, len) \ + stw_p((uint16_t *) p + 0, tag); \ + stw_p((uint16_t *) p + 1, len); p++; \ + stl_p(p++, OMAP2_SRAM_BASE | (((void *) v - sram_base) & 0xffff)); + + /* OMAP STI console? Pin out settings? */ + ADD_TAG(0x6e01, 414); + for (i = 0; i < ARRAY_SIZE(n800_pinout); i++) { + stl_p(v++, n800_pinout[i]); + } + + /* Kernel memsize? */ + ADD_TAG(0x6e05, 1); + stl_p(v++, 2); + + /* NOLO serial console */ + ADD_TAG(0x6e02, 4); + stl_p(v++, XLDR_LL_UART); /* UART number (1 - 3) */ + +#if 0 + /* CBUS settings (Retu/AVilma) */ + ADD_TAG(0x6e03, 6); + stw_p((uint16_t *) v + 0, 65); /* CBUS GPIO0 */ + stw_p((uint16_t *) v + 1, 66); /* CBUS GPIO1 */ + stw_p((uint16_t *) v + 2, 64); /* CBUS GPIO2 */ + v += 2; +#endif + + /* Nokia ASIC BB5 (Retu/Tahvo) */ + ADD_TAG(0x6e0a, 4); + stw_p((uint16_t *) v + 0, 111); /* "Retu" interrupt GPIO */ + stw_p((uint16_t *) v + 1, 108); /* "Tahvo" interrupt GPIO */ + v++; + + /* LCD console? */ + ADD_TAG(0x6e04, 4); + stw_p((uint16_t *) v + 0, 30); /* ??? */ + stw_p((uint16_t *) v + 1, 24); /* ??? */ + v++; + +#if 0 + /* LCD settings */ + ADD_TAG(0x6e06, 2); + stw_p((uint16_t *) (v++), 15); /* ??? */ +#endif + + /* I^2C (Menelaus) */ + ADD_TAG(0x6e07, 4); + stl_p(v++, 0x00720000); /* ??? */ + + /* Unknown */ + ADD_TAG(0x6e0b, 6); + stw_p((uint16_t *) v + 0, 94); /* ??? */ + stw_p((uint16_t *) v + 1, 23); /* ??? */ + stw_p((uint16_t *) v + 2, 0); /* ??? */ + v += 2; + + /* OMAP gpio switch info */ + ADD_TAG(0x6e0c, 80); + strcpy((void *) v, "bat_cover"); v += 3; + stw_p((uint16_t *) v + 0, 110); /* GPIO num ??? */ + stw_p((uint16_t *) v + 1, 1); /* GPIO num ??? */ + v += 2; + strcpy((void *) v, "cam_act"); v += 3; + stw_p((uint16_t *) v + 0, 95); /* GPIO num ??? */ + stw_p((uint16_t *) v + 1, 32); /* GPIO num ??? */ + v += 2; + strcpy((void *) v, "cam_turn"); v += 3; + stw_p((uint16_t *) v + 0, 12); /* GPIO num ??? */ + stw_p((uint16_t *) v + 1, 33); /* GPIO num ??? */ + v += 2; + strcpy((void *) v, "headphone"); v += 3; + stw_p((uint16_t *) v + 0, 107); /* GPIO num ??? */ + stw_p((uint16_t *) v + 1, 17); /* GPIO num ??? */ + v += 2; + + /* Bluetooth */ + ADD_TAG(0x6e0e, 12); + stl_p(v++, 0x5c623d01); /* ??? */ + stl_p(v++, 0x00000201); /* ??? */ + stl_p(v++, 0x00000000); /* ??? */ + + /* CX3110x WLAN settings */ + ADD_TAG(0x6e0f, 8); + stl_p(v++, 0x00610025); /* ??? */ + stl_p(v++, 0xffff0057); /* ??? */ + + /* MMC host settings */ + ADD_TAG(0x6e10, 12); + stl_p(v++, 0xffff000f); /* ??? */ + stl_p(v++, 0xffffffff); /* ??? */ + stl_p(v++, 0x00000060); /* ??? */ + + /* OneNAND chip select */ + ADD_TAG(0x6e11, 10); + stl_p(v++, 0x00000401); /* ??? */ + stl_p(v++, 0x0002003a); /* ??? */ + stl_p(v++, 0x00000002); /* ??? */ + + /* TEA5761 sensor settings */ + ADD_TAG(0x6e12, 2); + stl_p(v++, 93); /* GPIO num ??? */ + +#if 0 + /* Unknown tag */ + ADD_TAG(6e09, 0); + + /* Kernel UART / console */ + ADD_TAG(6e12, 0); +#endif + + /* End of the list */ + stl_p(p++, 0x00000000); + stl_p(p++, 0x00000000); +} + +/* This task is normally performed by the bootloader. If we're loading + * a kernel directly, we need to set up GPMC mappings ourselves. */ +static void n800_gpmc_init(struct n800_s *s) +{ + uint32_t config7 = + (0xf << 8) | /* MASKADDRESS */ + (1 << 6) | /* CSVALID */ + (4 << 0); /* BASEADDRESS */ + + cpu_physical_memory_write(0x6800a078, /* GPMC_CONFIG7_0 */ + &config7, sizeof(config7)); +} + +/* Setup sequence done by the bootloader */ +static void n8x0_boot_init(void *opaque) +{ + struct n800_s *s = (struct n800_s *) opaque; + uint32_t buf; + + /* PRCM setup */ +#define omap_writel(addr, val) \ + buf = (val); \ + cpu_physical_memory_write(addr, &buf, sizeof(buf)) + + omap_writel(0x48008060, 0x41); /* PRCM_CLKSRC_CTRL */ + omap_writel(0x48008070, 1); /* PRCM_CLKOUT_CTRL */ + omap_writel(0x48008078, 0); /* PRCM_CLKEMUL_CTRL */ + omap_writel(0x48008090, 0); /* PRCM_VOLTSETUP */ + omap_writel(0x48008094, 0); /* PRCM_CLKSSETUP */ + omap_writel(0x48008098, 0); /* PRCM_POLCTRL */ + omap_writel(0x48008140, 2); /* CM_CLKSEL_MPU */ + omap_writel(0x48008148, 0); /* CM_CLKSTCTRL_MPU */ + omap_writel(0x48008158, 1); /* RM_RSTST_MPU */ + omap_writel(0x480081c8, 0x15); /* PM_WKDEP_MPU */ + omap_writel(0x480081d4, 0x1d4); /* PM_EVGENCTRL_MPU */ + omap_writel(0x480081d8, 0); /* PM_EVEGENONTIM_MPU */ + omap_writel(0x480081dc, 0); /* PM_EVEGENOFFTIM_MPU */ + omap_writel(0x480081e0, 0xc); /* PM_PWSTCTRL_MPU */ + omap_writel(0x48008200, 0x047e7ff7); /* CM_FCLKEN1_CORE */ + omap_writel(0x48008204, 0x00000004); /* CM_FCLKEN2_CORE */ + omap_writel(0x48008210, 0x047e7ff1); /* CM_ICLKEN1_CORE */ + omap_writel(0x48008214, 0x00000004); /* CM_ICLKEN2_CORE */ + omap_writel(0x4800821c, 0x00000000); /* CM_ICLKEN4_CORE */ + omap_writel(0x48008230, 0); /* CM_AUTOIDLE1_CORE */ + omap_writel(0x48008234, 0); /* CM_AUTOIDLE2_CORE */ + omap_writel(0x48008238, 7); /* CM_AUTOIDLE3_CORE */ + omap_writel(0x4800823c, 0); /* CM_AUTOIDLE4_CORE */ + omap_writel(0x48008240, 0x04360626); /* CM_CLKSEL1_CORE */ + omap_writel(0x48008244, 0x00000014); /* CM_CLKSEL2_CORE */ + omap_writel(0x48008248, 0); /* CM_CLKSTCTRL_CORE */ + omap_writel(0x48008300, 0x00000000); /* CM_FCLKEN_GFX */ + omap_writel(0x48008310, 0x00000000); /* CM_ICLKEN_GFX */ + omap_writel(0x48008340, 0x00000001); /* CM_CLKSEL_GFX */ + omap_writel(0x48008400, 0x00000004); /* CM_FCLKEN_WKUP */ + omap_writel(0x48008410, 0x00000004); /* CM_ICLKEN_WKUP */ + omap_writel(0x48008440, 0x00000000); /* CM_CLKSEL_WKUP */ + omap_writel(0x48008500, 0x000000cf); /* CM_CLKEN_PLL */ + omap_writel(0x48008530, 0x0000000c); /* CM_AUTOIDLE_PLL */ + omap_writel(0x48008540, /* CM_CLKSEL1_PLL */ + (0x78 << 12) | (6 << 8)); + omap_writel(0x48008544, 2); /* CM_CLKSEL2_PLL */ + + /* GPMC setup */ + n800_gpmc_init(s); + + /* Video setup */ + n800_dss_init(&s->blizzard); + + /* CPU setup */ + s->mpu->cpu->env.GE = 0x5; + + /* If the machine has a slided keyboard, open it */ + if (s->kbd) { + qemu_irq_raise(qdev_get_gpio_in(s->mpu->gpio, N810_SLIDE_GPIO)); + } +} + +#define OMAP_TAG_NOKIA_BT 0x4e01 +#define OMAP_TAG_WLAN_CX3110X 0x4e02 +#define OMAP_TAG_CBUS 0x4e03 +#define OMAP_TAG_EM_ASIC_BB5 0x4e04 + +static struct omap_gpiosw_info_s { + const char *name; + int line; + int type; +} n800_gpiosw_info[] = { + { + "bat_cover", N800_BAT_COVER_GPIO, + OMAP_GPIOSW_TYPE_COVER | OMAP_GPIOSW_INVERTED, + }, { + "cam_act", N800_CAM_ACT_GPIO, + OMAP_GPIOSW_TYPE_ACTIVITY, + }, { + "cam_turn", N800_CAM_TURN_GPIO, + OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_INVERTED, + }, { + "headphone", N8X0_HEADPHONE_GPIO, + OMAP_GPIOSW_TYPE_CONNECTION | OMAP_GPIOSW_INVERTED, + }, + { NULL } +}, n810_gpiosw_info[] = { + { + "gps_reset", N810_GPS_RESET_GPIO, + OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_OUTPUT, + }, { + "gps_wakeup", N810_GPS_WAKEUP_GPIO, + OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_OUTPUT, + }, { + "headphone", N8X0_HEADPHONE_GPIO, + OMAP_GPIOSW_TYPE_CONNECTION | OMAP_GPIOSW_INVERTED, + }, { + "kb_lock", N810_KB_LOCK_GPIO, + OMAP_GPIOSW_TYPE_COVER | OMAP_GPIOSW_INVERTED, + }, { + "sleepx_led", N810_SLEEPX_LED_GPIO, + OMAP_GPIOSW_TYPE_ACTIVITY | OMAP_GPIOSW_INVERTED | OMAP_GPIOSW_OUTPUT, + }, { + "slide", N810_SLIDE_GPIO, + OMAP_GPIOSW_TYPE_COVER | OMAP_GPIOSW_INVERTED, + }, + { NULL } +}; + +static struct omap_partition_info_s { + uint32_t offset; + uint32_t size; + int mask; + const char *name; +} n800_part_info[] = { + { 0x00000000, 0x00020000, 0x3, "bootloader" }, + { 0x00020000, 0x00060000, 0x0, "config" }, + { 0x00080000, 0x00200000, 0x0, "kernel" }, + { 0x00280000, 0x00200000, 0x3, "initfs" }, + { 0x00480000, 0x0fb80000, 0x3, "rootfs" }, + + { 0, 0, 0, NULL } +}, n810_part_info[] = { + { 0x00000000, 0x00020000, 0x3, "bootloader" }, + { 0x00020000, 0x00060000, 0x0, "config" }, + { 0x00080000, 0x00220000, 0x0, "kernel" }, + { 0x002a0000, 0x00400000, 0x0, "initfs" }, + { 0x006a0000, 0x0f960000, 0x0, "rootfs" }, + + { 0, 0, 0, NULL } +}; + +static uint8_t n8x0_bd_addr[6] = { N8X0_BD_ADDR }; + +static int n8x0_atag_setup(void *p, int model) +{ + uint8_t *b; + uint16_t *w; + uint32_t *l; + struct omap_gpiosw_info_s *gpiosw; + struct omap_partition_info_s *partition; + const char *tag; + + w = p; + + stw_p(w++, OMAP_TAG_UART); /* u16 tag */ + stw_p(w++, 4); /* u16 len */ + stw_p(w++, (1 << 2) | (1 << 1) | (1 << 0)); /* uint enabled_uarts */ + w++; + +#if 0 + stw_p(w++, OMAP_TAG_SERIAL_CONSOLE); /* u16 tag */ + stw_p(w++, 4); /* u16 len */ + stw_p(w++, XLDR_LL_UART + 1); /* u8 console_uart */ + stw_p(w++, 115200); /* u32 console_speed */ +#endif + + stw_p(w++, OMAP_TAG_LCD); /* u16 tag */ + stw_p(w++, 36); /* u16 len */ + strcpy((void *) w, "QEMU LCD panel"); /* char panel_name[16] */ + w += 8; + strcpy((void *) w, "blizzard"); /* char ctrl_name[16] */ + w += 8; + stw_p(w++, N810_BLIZZARD_RESET_GPIO); /* TODO: n800 s16 nreset_gpio */ + stw_p(w++, 24); /* u8 data_lines */ + + stw_p(w++, OMAP_TAG_CBUS); /* u16 tag */ + stw_p(w++, 8); /* u16 len */ + stw_p(w++, N8X0_CBUS_CLK_GPIO); /* s16 clk_gpio */ + stw_p(w++, N8X0_CBUS_DAT_GPIO); /* s16 dat_gpio */ + stw_p(w++, N8X0_CBUS_SEL_GPIO); /* s16 sel_gpio */ + w++; + + stw_p(w++, OMAP_TAG_EM_ASIC_BB5); /* u16 tag */ + stw_p(w++, 4); /* u16 len */ + stw_p(w++, N8X0_RETU_GPIO); /* s16 retu_irq_gpio */ + stw_p(w++, N8X0_TAHVO_GPIO); /* s16 tahvo_irq_gpio */ + + gpiosw = (model == 810) ? n810_gpiosw_info : n800_gpiosw_info; + for (; gpiosw->name; gpiosw++) { + stw_p(w++, OMAP_TAG_GPIO_SWITCH); /* u16 tag */ + stw_p(w++, 20); /* u16 len */ + strcpy((void *) w, gpiosw->name); /* char name[12] */ + w += 6; + stw_p(w++, gpiosw->line); /* u16 gpio */ + stw_p(w++, gpiosw->type); + stw_p(w++, 0); + stw_p(w++, 0); + } + + stw_p(w++, OMAP_TAG_NOKIA_BT); /* u16 tag */ + stw_p(w++, 12); /* u16 len */ + b = (void *) w; + stb_p(b++, 0x01); /* u8 chip_type (CSR) */ + stb_p(b++, N8X0_BT_WKUP_GPIO); /* u8 bt_wakeup_gpio */ + stb_p(b++, N8X0_BT_HOST_WKUP_GPIO); /* u8 host_wakeup_gpio */ + stb_p(b++, N8X0_BT_RESET_GPIO); /* u8 reset_gpio */ + stb_p(b++, BT_UART + 1); /* u8 bt_uart */ + memcpy(b, &n8x0_bd_addr, 6); /* u8 bd_addr[6] */ + b += 6; + stb_p(b++, 0x02); /* u8 bt_sysclk (38.4) */ + w = (void *) b; + + stw_p(w++, OMAP_TAG_WLAN_CX3110X); /* u16 tag */ + stw_p(w++, 8); /* u16 len */ + stw_p(w++, 0x25); /* u8 chip_type */ + stw_p(w++, N8X0_WLAN_PWR_GPIO); /* s16 power_gpio */ + stw_p(w++, N8X0_WLAN_IRQ_GPIO); /* s16 irq_gpio */ + stw_p(w++, -1); /* s16 spi_cs_gpio */ + + stw_p(w++, OMAP_TAG_MMC); /* u16 tag */ + stw_p(w++, 16); /* u16 len */ + if (model == 810) { + stw_p(w++, 0x23f); /* unsigned flags */ + stw_p(w++, -1); /* s16 power_pin */ + stw_p(w++, -1); /* s16 switch_pin */ + stw_p(w++, -1); /* s16 wp_pin */ + stw_p(w++, 0x240); /* unsigned flags */ + stw_p(w++, 0xc000); /* s16 power_pin */ + stw_p(w++, 0x0248); /* s16 switch_pin */ + stw_p(w++, 0xc000); /* s16 wp_pin */ + } else { + stw_p(w++, 0xf); /* unsigned flags */ + stw_p(w++, -1); /* s16 power_pin */ + stw_p(w++, -1); /* s16 switch_pin */ + stw_p(w++, -1); /* s16 wp_pin */ + stw_p(w++, 0); /* unsigned flags */ + stw_p(w++, 0); /* s16 power_pin */ + stw_p(w++, 0); /* s16 switch_pin */ + stw_p(w++, 0); /* s16 wp_pin */ + } + + stw_p(w++, OMAP_TAG_TEA5761); /* u16 tag */ + stw_p(w++, 4); /* u16 len */ + stw_p(w++, N8X0_TEA5761_CS_GPIO); /* u16 enable_gpio */ + w++; + + partition = (model == 810) ? n810_part_info : n800_part_info; + for (; partition->name; partition++) { + stw_p(w++, OMAP_TAG_PARTITION); /* u16 tag */ + stw_p(w++, 28); /* u16 len */ + strcpy((void *) w, partition->name); /* char name[16] */ + l = (void *) (w + 8); + stl_p(l++, partition->size); /* unsigned int size */ + stl_p(l++, partition->offset); /* unsigned int offset */ + stl_p(l++, partition->mask); /* unsigned int mask_flags */ + w = (void *) l; + } + + stw_p(w++, OMAP_TAG_BOOT_REASON); /* u16 tag */ + stw_p(w++, 12); /* u16 len */ +#if 0 + strcpy((void *) w, "por"); /* char reason_str[12] */ + strcpy((void *) w, "charger"); /* char reason_str[12] */ + strcpy((void *) w, "32wd_to"); /* char reason_str[12] */ + strcpy((void *) w, "sw_rst"); /* char reason_str[12] */ + strcpy((void *) w, "mbus"); /* char reason_str[12] */ + strcpy((void *) w, "unknown"); /* char reason_str[12] */ + strcpy((void *) w, "swdg_to"); /* char reason_str[12] */ + strcpy((void *) w, "sec_vio"); /* char reason_str[12] */ + strcpy((void *) w, "pwr_key"); /* char reason_str[12] */ + strcpy((void *) w, "rtc_alarm"); /* char reason_str[12] */ +#else + strcpy((void *) w, "pwr_key"); /* char reason_str[12] */ +#endif + w += 6; + + tag = (model == 810) ? "RX-44" : "RX-34"; + stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */ + stw_p(w++, 24); /* u16 len */ + strcpy((void *) w, "product"); /* char component[12] */ + w += 6; + strcpy((void *) w, tag); /* char version[12] */ + w += 6; + + stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */ + stw_p(w++, 24); /* u16 len */ + strcpy((void *) w, "hw-build"); /* char component[12] */ + w += 6; + strcpy((void *) w, "QEMU "); + pstrcat((void *) w, 12, qemu_hw_version()); /* char version[12] */ + w += 6; + + tag = (model == 810) ? "1.1.10-qemu" : "1.1.6-qemu"; + stw_p(w++, OMAP_TAG_VERSION_STR); /* u16 tag */ + stw_p(w++, 24); /* u16 len */ + strcpy((void *) w, "nolo"); /* char component[12] */ + w += 6; + strcpy((void *) w, tag); /* char version[12] */ + w += 6; + + return (void *) w - p; +} + +static int n800_atag_setup(const struct arm_boot_info *info, void *p) +{ + return n8x0_atag_setup(p, 800); +} + +static int n810_atag_setup(const struct arm_boot_info *info, void *p) +{ + return n8x0_atag_setup(p, 810); +} + +static void n8x0_init(MachineState *machine, + struct arm_boot_info *binfo, int model) +{ + struct n800_s *s = (struct n800_s *) g_malloc0(sizeof(*s)); + MachineClass *mc = MACHINE_GET_CLASS(machine); + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + binfo->ram_size = machine->ram_size; + + memory_region_add_subregion(get_system_memory(), OMAP2_Q2_BASE, + machine->ram); + + s->mpu = omap2420_mpu_init(machine->ram, machine->cpu_type); + + /* Setup peripherals + * + * Believed external peripherals layout in the N810: + * (spi bus 1) + * tsc2005 + * lcd_mipid + * (spi bus 2) + * Conexant cx3110x (WLAN) + * optional: pc2400m (WiMAX) + * (i2c bus 0) + * TLV320AIC33 (audio codec) + * TCM825x (camera by Toshiba) + * lp5521 (clever LEDs) + * tsl2563 (light sensor, hwmon, model 7, rev. 0) + * lm8323 (keypad, manf 00, rev 04) + * (i2c bus 1) + * tmp105 (temperature sensor, hwmon) + * menelaus (pm) + * (somewhere on i2c - maybe N800-only) + * tea5761 (FM tuner) + * (serial 0) + * GPS + * (some serial port) + * csr41814 (Bluetooth) + */ + n8x0_gpio_setup(s); + n8x0_nand_setup(s); + n8x0_i2c_setup(s); + if (model == 800) { + n800_tsc_kbd_setup(s); + } else if (model == 810) { + n810_tsc_setup(s); + n810_kbd_setup(s); + } + n8x0_spi_setup(s); + n8x0_dss_setup(s); + n8x0_cbus_setup(s); + if (machine_usb(machine)) { + n8x0_usb_setup(s); + } + + if (machine->kernel_filename) { + /* Or at the linux loader. */ + arm_load_kernel(s->mpu->cpu, machine, binfo); + + qemu_register_reset(n8x0_boot_init, s); + } + + if (option_rom[0].name && + (machine->boot_order[0] == 'n' || !machine->kernel_filename)) { + uint8_t *nolo_tags = g_new(uint8_t, 0x10000); + /* No, wait, better start at the ROM. */ + s->mpu->cpu->env.regs[15] = OMAP2_Q2_BASE + 0x400000; + + /* + * This is intended for loading the `secondary.bin' program from + * Nokia images (the NOLO bootloader). The entry point seems + * to be at OMAP2_Q2_BASE + 0x400000. + * + * The `2nd.bin' files contain some kind of earlier boot code and + * for them the entry point needs to be set to OMAP2_SRAM_BASE. + * + * The code above is for loading the `zImage' file from Nokia + * images. + */ + if (load_image_targphys(option_rom[0].name, + OMAP2_Q2_BASE + 0x400000, + machine->ram_size - 0x400000) < 0) { + error_report("Failed to load secondary bootloader %s", + option_rom[0].name); + exit(EXIT_FAILURE); + } + + n800_setup_nolo_tags(nolo_tags); + cpu_physical_memory_write(OMAP2_SRAM_BASE, nolo_tags, 0x10000); + g_free(nolo_tags); + } +} + +static struct arm_boot_info n800_binfo = { + .loader_start = OMAP2_Q2_BASE, + .board_id = 0x4f7, + .atag_board = n800_atag_setup, +}; + +static struct arm_boot_info n810_binfo = { + .loader_start = OMAP2_Q2_BASE, + /* 0x60c and 0x6bf (WiMAX Edition) have been assigned but are not + * used by some older versions of the bootloader and 5555 is used + * instead (including versions that shipped with many devices). */ + .board_id = 0x60c, + .atag_board = n810_atag_setup, +}; + +static void n800_init(MachineState *machine) +{ + n8x0_init(machine, &n800_binfo, 800); +} + +static void n810_init(MachineState *machine) +{ + n8x0_init(machine, &n810_binfo, 810); +} + +static void n800_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Nokia N800 tablet aka. RX-34 (OMAP2420)"; + mc->init = n800_init; + mc->default_boot_order = ""; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm1136-r2"); + /* Actually two chips of 0x4000000 bytes each */ + mc->default_ram_size = 0x08000000; + mc->default_ram_id = "omap2.dram"; +} + +static const TypeInfo n800_type = { + .name = MACHINE_TYPE_NAME("n800"), + .parent = TYPE_MACHINE, + .class_init = n800_class_init, +}; + +static void n810_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Nokia N810 tablet aka. RX-44 (OMAP2420)"; + mc->init = n810_init; + mc->default_boot_order = ""; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm1136-r2"); + /* Actually two chips of 0x4000000 bytes each */ + mc->default_ram_size = 0x08000000; + mc->default_ram_id = "omap2.dram"; +} + +static const TypeInfo n810_type = { + .name = MACHINE_TYPE_NAME("n810"), + .parent = TYPE_MACHINE, + .class_init = n810_class_init, +}; + +static void nseries_machine_init(void) +{ + type_register_static(&n800_type); + type_register_static(&n810_type); +} + +type_init(nseries_machine_init) diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c new file mode 100644 index 000000000..180d3788f --- /dev/null +++ b/hw/arm/omap1.c @@ -0,0 +1,4085 @@ +/* + * TI OMAP processors emulation. + * + * Copyright (C) 2006-2008 Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "cpu.h" +#include "exec/address-spaces.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/arm/boot.h" +#include "hw/arm/omap.h" +#include "sysemu/blockdev.h" +#include "sysemu/sysemu.h" +#include "hw/arm/soc_dma.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "qemu/range.h" +#include "hw/sysbus.h" +#include "qemu/cutils.h" +#include "qemu/bcd.h" + +static inline void omap_log_badwidth(const char *funcname, hwaddr addr, int sz) +{ + qemu_log_mask(LOG_GUEST_ERROR, "%s: %d-bit register %#08" HWADDR_PRIx "\n", + funcname, 8 * sz, addr); +} + +/* Should signal the TCMI/GPMC */ +uint32_t omap_badwidth_read8(void *opaque, hwaddr addr) +{ + uint8_t ret; + + omap_log_badwidth(__func__, addr, 1); + cpu_physical_memory_read(addr, &ret, 1); + return ret; +} + +void omap_badwidth_write8(void *opaque, hwaddr addr, + uint32_t value) +{ + uint8_t val8 = value; + + omap_log_badwidth(__func__, addr, 1); + cpu_physical_memory_write(addr, &val8, 1); +} + +uint32_t omap_badwidth_read16(void *opaque, hwaddr addr) +{ + uint16_t ret; + + omap_log_badwidth(__func__, addr, 2); + cpu_physical_memory_read(addr, &ret, 2); + return ret; +} + +void omap_badwidth_write16(void *opaque, hwaddr addr, + uint32_t value) +{ + uint16_t val16 = value; + + omap_log_badwidth(__func__, addr, 2); + cpu_physical_memory_write(addr, &val16, 2); +} + +uint32_t omap_badwidth_read32(void *opaque, hwaddr addr) +{ + uint32_t ret; + + omap_log_badwidth(__func__, addr, 4); + cpu_physical_memory_read(addr, &ret, 4); + return ret; +} + +void omap_badwidth_write32(void *opaque, hwaddr addr, + uint32_t value) +{ + omap_log_badwidth(__func__, addr, 4); + cpu_physical_memory_write(addr, &value, 4); +} + +/* MPU OS timers */ +struct omap_mpu_timer_s { + MemoryRegion iomem; + qemu_irq irq; + omap_clk clk; + uint32_t val; + int64_t time; + QEMUTimer *timer; + QEMUBH *tick; + int64_t rate; + int it_ena; + + int enable; + int ptv; + int ar; + int st; + uint32_t reset_val; +}; + +static inline uint32_t omap_timer_read(struct omap_mpu_timer_s *timer) +{ + uint64_t distance = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->time; + + if (timer->st && timer->enable && timer->rate) + return timer->val - muldiv64(distance >> (timer->ptv + 1), + timer->rate, NANOSECONDS_PER_SECOND); + else + return timer->val; +} + +static inline void omap_timer_sync(struct omap_mpu_timer_s *timer) +{ + timer->val = omap_timer_read(timer); + timer->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +} + +static inline void omap_timer_update(struct omap_mpu_timer_s *timer) +{ + int64_t expires; + + if (timer->enable && timer->st && timer->rate) { + timer->val = timer->reset_val; /* Should skip this on clk enable */ + expires = muldiv64((uint64_t) timer->val << (timer->ptv + 1), + NANOSECONDS_PER_SECOND, timer->rate); + + /* If timer expiry would be sooner than in about 1 ms and + * auto-reload isn't set, then fire immediately. This is a hack + * to make systems like PalmOS run in acceptable time. PalmOS + * sets the interval to a very low value and polls the status bit + * in a busy loop when it wants to sleep just a couple of CPU + * ticks. */ + if (expires > (NANOSECONDS_PER_SECOND >> 10) || timer->ar) { + timer_mod(timer->timer, timer->time + expires); + } else { + qemu_bh_schedule(timer->tick); + } + } else + timer_del(timer->timer); +} + +static void omap_timer_fire(void *opaque) +{ + struct omap_mpu_timer_s *timer = opaque; + + if (!timer->ar) { + timer->val = 0; + timer->st = 0; + } + + if (timer->it_ena) + /* Edge-triggered irq */ + qemu_irq_pulse(timer->irq); +} + +static void omap_timer_tick(void *opaque) +{ + struct omap_mpu_timer_s *timer = (struct omap_mpu_timer_s *) opaque; + + omap_timer_sync(timer); + omap_timer_fire(timer); + omap_timer_update(timer); +} + +static void omap_timer_clk_update(void *opaque, int line, int on) +{ + struct omap_mpu_timer_s *timer = (struct omap_mpu_timer_s *) opaque; + + omap_timer_sync(timer); + timer->rate = on ? omap_clk_getrate(timer->clk) : 0; + omap_timer_update(timer); +} + +static void omap_timer_clk_setup(struct omap_mpu_timer_s *timer) +{ + omap_clk_adduser(timer->clk, + qemu_allocate_irq(omap_timer_clk_update, timer, 0)); + timer->rate = omap_clk_getrate(timer->clk); +} + +static uint64_t omap_mpu_timer_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mpu_timer_s *s = (struct omap_mpu_timer_s *) opaque; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x00: /* CNTL_TIMER */ + return (s->enable << 5) | (s->ptv << 2) | (s->ar << 1) | s->st; + + case 0x04: /* LOAD_TIM */ + break; + + case 0x08: /* READ_TIM */ + return omap_timer_read(s); + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_mpu_timer_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_mpu_timer_s *s = (struct omap_mpu_timer_s *) opaque; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* CNTL_TIMER */ + omap_timer_sync(s); + s->enable = (value >> 5) & 1; + s->ptv = (value >> 2) & 7; + s->ar = (value >> 1) & 1; + s->st = value & 1; + omap_timer_update(s); + return; + + case 0x04: /* LOAD_TIM */ + s->reset_val = value; + return; + + case 0x08: /* READ_TIM */ + OMAP_RO_REG(addr); + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_mpu_timer_ops = { + .read = omap_mpu_timer_read, + .write = omap_mpu_timer_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void omap_mpu_timer_reset(struct omap_mpu_timer_s *s) +{ + timer_del(s->timer); + s->enable = 0; + s->reset_val = 31337; + s->val = 0; + s->ptv = 0; + s->ar = 0; + s->st = 0; + s->it_ena = 1; +} + +static struct omap_mpu_timer_s *omap_mpu_timer_init(MemoryRegion *system_memory, + hwaddr base, + qemu_irq irq, omap_clk clk) +{ + struct omap_mpu_timer_s *s = g_new0(struct omap_mpu_timer_s, 1); + + s->irq = irq; + s->clk = clk; + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_timer_tick, s); + s->tick = qemu_bh_new(omap_timer_fire, s); + omap_mpu_timer_reset(s); + omap_timer_clk_setup(s); + + memory_region_init_io(&s->iomem, NULL, &omap_mpu_timer_ops, s, + "omap-mpu-timer", 0x100); + + memory_region_add_subregion(system_memory, base, &s->iomem); + + return s; +} + +/* Watchdog timer */ +struct omap_watchdog_timer_s { + struct omap_mpu_timer_s timer; + MemoryRegion iomem; + uint8_t last_wr; + int mode; + int free; + int reset; +}; + +static uint64_t omap_wd_timer_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_watchdog_timer_s *s = (struct omap_watchdog_timer_s *) opaque; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (addr) { + case 0x00: /* CNTL_TIMER */ + return (s->timer.ptv << 9) | (s->timer.ar << 8) | + (s->timer.st << 7) | (s->free << 1); + + case 0x04: /* READ_TIMER */ + return omap_timer_read(&s->timer); + + case 0x08: /* TIMER_MODE */ + return s->mode << 15; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_wd_timer_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_watchdog_timer_s *s = (struct omap_watchdog_timer_s *) opaque; + + if (size != 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* CNTL_TIMER */ + omap_timer_sync(&s->timer); + s->timer.ptv = (value >> 9) & 7; + s->timer.ar = (value >> 8) & 1; + s->timer.st = (value >> 7) & 1; + s->free = (value >> 1) & 1; + omap_timer_update(&s->timer); + break; + + case 0x04: /* LOAD_TIMER */ + s->timer.reset_val = value & 0xffff; + break; + + case 0x08: /* TIMER_MODE */ + if (!s->mode && ((value >> 15) & 1)) + omap_clk_get(s->timer.clk); + s->mode |= (value >> 15) & 1; + if (s->last_wr == 0xf5) { + if ((value & 0xff) == 0xa0) { + if (s->mode) { + s->mode = 0; + omap_clk_put(s->timer.clk); + } + } else { + /* XXX: on T|E hardware somehow this has no effect, + * on Zire 71 it works as specified. */ + s->reset = 1; + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + } + s->last_wr = value & 0xff; + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_wd_timer_ops = { + .read = omap_wd_timer_read, + .write = omap_wd_timer_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_wd_timer_reset(struct omap_watchdog_timer_s *s) +{ + timer_del(s->timer.timer); + if (!s->mode) + omap_clk_get(s->timer.clk); + s->mode = 1; + s->free = 1; + s->reset = 0; + s->timer.enable = 1; + s->timer.it_ena = 1; + s->timer.reset_val = 0xffff; + s->timer.val = 0; + s->timer.st = 0; + s->timer.ptv = 0; + s->timer.ar = 0; + omap_timer_update(&s->timer); +} + +static struct omap_watchdog_timer_s *omap_wd_timer_init(MemoryRegion *memory, + hwaddr base, + qemu_irq irq, omap_clk clk) +{ + struct omap_watchdog_timer_s *s = g_new0(struct omap_watchdog_timer_s, 1); + + s->timer.irq = irq; + s->timer.clk = clk; + s->timer.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_timer_tick, &s->timer); + omap_wd_timer_reset(s); + omap_timer_clk_setup(&s->timer); + + memory_region_init_io(&s->iomem, NULL, &omap_wd_timer_ops, s, + "omap-wd-timer", 0x100); + memory_region_add_subregion(memory, base, &s->iomem); + + return s; +} + +/* 32-kHz timer */ +struct omap_32khz_timer_s { + struct omap_mpu_timer_s timer; + MemoryRegion iomem; +}; + +static uint64_t omap_os_timer_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_32khz_timer_s *s = (struct omap_32khz_timer_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (offset) { + case 0x00: /* TVR */ + return s->timer.reset_val; + + case 0x04: /* TCR */ + return omap_timer_read(&s->timer); + + case 0x08: /* CR */ + return (s->timer.ar << 3) | (s->timer.it_ena << 2) | s->timer.st; + + default: + break; + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_os_timer_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_32khz_timer_s *s = (struct omap_32khz_timer_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (offset) { + case 0x00: /* TVR */ + s->timer.reset_val = value & 0x00ffffff; + break; + + case 0x04: /* TCR */ + OMAP_RO_REG(addr); + break; + + case 0x08: /* CR */ + s->timer.ar = (value >> 3) & 1; + s->timer.it_ena = (value >> 2) & 1; + if (s->timer.st != (value & 1) || (value & 2)) { + omap_timer_sync(&s->timer); + s->timer.enable = value & 1; + s->timer.st = value & 1; + omap_timer_update(&s->timer); + } + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_os_timer_ops = { + .read = omap_os_timer_read, + .write = omap_os_timer_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_os_timer_reset(struct omap_32khz_timer_s *s) +{ + timer_del(s->timer.timer); + s->timer.enable = 0; + s->timer.it_ena = 0; + s->timer.reset_val = 0x00ffffff; + s->timer.val = 0; + s->timer.st = 0; + s->timer.ptv = 0; + s->timer.ar = 1; +} + +static struct omap_32khz_timer_s *omap_os_timer_init(MemoryRegion *memory, + hwaddr base, + qemu_irq irq, omap_clk clk) +{ + struct omap_32khz_timer_s *s = g_new0(struct omap_32khz_timer_s, 1); + + s->timer.irq = irq; + s->timer.clk = clk; + s->timer.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_timer_tick, &s->timer); + omap_os_timer_reset(s); + omap_timer_clk_setup(&s->timer); + + memory_region_init_io(&s->iomem, NULL, &omap_os_timer_ops, s, + "omap-os-timer", 0x800); + memory_region_add_subregion(memory, base, &s->iomem); + + return s; +} + +/* Ultra Low-Power Device Module */ +static uint64_t omap_ulpd_pm_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + uint16_t ret; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (addr) { + case 0x14: /* IT_STATUS */ + ret = s->ulpd_pm_regs[addr >> 2]; + s->ulpd_pm_regs[addr >> 2] = 0; + qemu_irq_lower(qdev_get_gpio_in(s->ih[1], OMAP_INT_GAUGE_32K)); + return ret; + + case 0x18: /* Reserved */ + case 0x1c: /* Reserved */ + case 0x20: /* Reserved */ + case 0x28: /* Reserved */ + case 0x2c: /* Reserved */ + OMAP_BAD_REG(addr); + /* fall through */ + case 0x00: /* COUNTER_32_LSB */ + case 0x04: /* COUNTER_32_MSB */ + case 0x08: /* COUNTER_HIGH_FREQ_LSB */ + case 0x0c: /* COUNTER_HIGH_FREQ_MSB */ + case 0x10: /* GAUGING_CTRL */ + case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */ + case 0x30: /* CLOCK_CTRL */ + case 0x34: /* SOFT_REQ */ + case 0x38: /* COUNTER_32_FIQ */ + case 0x3c: /* DPLL_CTRL */ + case 0x40: /* STATUS_REQ */ + /* XXX: check clk::usecount state for every clock */ + case 0x48: /* LOCL_TIME */ + case 0x4c: /* APLL_CTRL */ + case 0x50: /* POWER_CTRL */ + return s->ulpd_pm_regs[addr >> 2]; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static inline void omap_ulpd_clk_update(struct omap_mpu_state_s *s, + uint16_t diff, uint16_t value) +{ + if (diff & (1 << 4)) /* USB_MCLK_EN */ + omap_clk_onoff(omap_findclk(s, "usb_clk0"), (value >> 4) & 1); + if (diff & (1 << 5)) /* DIS_USB_PVCI_CLK */ + omap_clk_onoff(omap_findclk(s, "usb_w2fc_ck"), (~value >> 5) & 1); +} + +static inline void omap_ulpd_req_update(struct omap_mpu_state_s *s, + uint16_t diff, uint16_t value) +{ + if (diff & (1 << 0)) /* SOFT_DPLL_REQ */ + omap_clk_canidle(omap_findclk(s, "dpll4"), (~value >> 0) & 1); + if (diff & (1 << 1)) /* SOFT_COM_REQ */ + omap_clk_canidle(omap_findclk(s, "com_mclk_out"), (~value >> 1) & 1); + if (diff & (1 << 2)) /* SOFT_SDW_REQ */ + omap_clk_canidle(omap_findclk(s, "bt_mclk_out"), (~value >> 2) & 1); + if (diff & (1 << 3)) /* SOFT_USB_REQ */ + omap_clk_canidle(omap_findclk(s, "usb_clk0"), (~value >> 3) & 1); +} + +static void omap_ulpd_pm_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + int64_t now, ticks; + int div, mult; + static const int bypass_div[4] = { 1, 2, 4, 4 }; + uint16_t diff; + + if (size != 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* COUNTER_32_LSB */ + case 0x04: /* COUNTER_32_MSB */ + case 0x08: /* COUNTER_HIGH_FREQ_LSB */ + case 0x0c: /* COUNTER_HIGH_FREQ_MSB */ + case 0x14: /* IT_STATUS */ + case 0x40: /* STATUS_REQ */ + OMAP_RO_REG(addr); + break; + + case 0x10: /* GAUGING_CTRL */ + /* Bits 0 and 1 seem to be confused in the OMAP 310 TRM */ + if ((s->ulpd_pm_regs[addr >> 2] ^ value) & 1) { + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + if (value & 1) + s->ulpd_gauge_start = now; + else { + now -= s->ulpd_gauge_start; + + /* 32-kHz ticks */ + ticks = muldiv64(now, 32768, NANOSECONDS_PER_SECOND); + s->ulpd_pm_regs[0x00 >> 2] = (ticks >> 0) & 0xffff; + s->ulpd_pm_regs[0x04 >> 2] = (ticks >> 16) & 0xffff; + if (ticks >> 32) /* OVERFLOW_32K */ + s->ulpd_pm_regs[0x14 >> 2] |= 1 << 2; + + /* High frequency ticks */ + ticks = muldiv64(now, 12000000, NANOSECONDS_PER_SECOND); + s->ulpd_pm_regs[0x08 >> 2] = (ticks >> 0) & 0xffff; + s->ulpd_pm_regs[0x0c >> 2] = (ticks >> 16) & 0xffff; + if (ticks >> 32) /* OVERFLOW_HI_FREQ */ + s->ulpd_pm_regs[0x14 >> 2] |= 1 << 1; + + s->ulpd_pm_regs[0x14 >> 2] |= 1 << 0; /* IT_GAUGING */ + qemu_irq_raise(qdev_get_gpio_in(s->ih[1], OMAP_INT_GAUGE_32K)); + } + } + s->ulpd_pm_regs[addr >> 2] = value; + break; + + case 0x18: /* Reserved */ + case 0x1c: /* Reserved */ + case 0x20: /* Reserved */ + case 0x28: /* Reserved */ + case 0x2c: /* Reserved */ + OMAP_BAD_REG(addr); + /* fall through */ + case 0x24: /* SETUP_ANALOG_CELL3_ULPD1 */ + case 0x38: /* COUNTER_32_FIQ */ + case 0x48: /* LOCL_TIME */ + case 0x50: /* POWER_CTRL */ + s->ulpd_pm_regs[addr >> 2] = value; + break; + + case 0x30: /* CLOCK_CTRL */ + diff = s->ulpd_pm_regs[addr >> 2] ^ value; + s->ulpd_pm_regs[addr >> 2] = value & 0x3f; + omap_ulpd_clk_update(s, diff, value); + break; + + case 0x34: /* SOFT_REQ */ + diff = s->ulpd_pm_regs[addr >> 2] ^ value; + s->ulpd_pm_regs[addr >> 2] = value & 0x1f; + omap_ulpd_req_update(s, diff, value); + break; + + case 0x3c: /* DPLL_CTRL */ + /* XXX: OMAP310 TRM claims bit 3 is PLL_ENABLE, and bit 4 is + * omitted altogether, probably a typo. */ + /* This register has identical semantics with DPLL(1:3) control + * registers, see omap_dpll_write() */ + diff = s->ulpd_pm_regs[addr >> 2] & value; + s->ulpd_pm_regs[addr >> 2] = value & 0x2fff; + if (diff & (0x3ff << 2)) { + if (value & (1 << 4)) { /* PLL_ENABLE */ + div = ((value >> 5) & 3) + 1; /* PLL_DIV */ + mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */ + } else { + div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */ + mult = 1; + } + omap_clk_setrate(omap_findclk(s, "dpll4"), div, mult); + } + + /* Enter the desired mode. */ + s->ulpd_pm_regs[addr >> 2] = + (s->ulpd_pm_regs[addr >> 2] & 0xfffe) | + ((s->ulpd_pm_regs[addr >> 2] >> 4) & 1); + + /* Act as if the lock is restored. */ + s->ulpd_pm_regs[addr >> 2] |= 2; + break; + + case 0x4c: /* APLL_CTRL */ + diff = s->ulpd_pm_regs[addr >> 2] & value; + s->ulpd_pm_regs[addr >> 2] = value & 0xf; + if (diff & (1 << 0)) /* APLL_NDPLL_SWITCH */ + omap_clk_reparent(omap_findclk(s, "ck_48m"), omap_findclk(s, + (value & (1 << 0)) ? "apll" : "dpll4")); + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_ulpd_pm_ops = { + .read = omap_ulpd_pm_read, + .write = omap_ulpd_pm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_ulpd_pm_reset(struct omap_mpu_state_s *mpu) +{ + mpu->ulpd_pm_regs[0x00 >> 2] = 0x0001; + mpu->ulpd_pm_regs[0x04 >> 2] = 0x0000; + mpu->ulpd_pm_regs[0x08 >> 2] = 0x0001; + mpu->ulpd_pm_regs[0x0c >> 2] = 0x0000; + mpu->ulpd_pm_regs[0x10 >> 2] = 0x0000; + mpu->ulpd_pm_regs[0x18 >> 2] = 0x01; + mpu->ulpd_pm_regs[0x1c >> 2] = 0x01; + mpu->ulpd_pm_regs[0x20 >> 2] = 0x01; + mpu->ulpd_pm_regs[0x24 >> 2] = 0x03ff; + mpu->ulpd_pm_regs[0x28 >> 2] = 0x01; + mpu->ulpd_pm_regs[0x2c >> 2] = 0x01; + omap_ulpd_clk_update(mpu, mpu->ulpd_pm_regs[0x30 >> 2], 0x0000); + mpu->ulpd_pm_regs[0x30 >> 2] = 0x0000; + omap_ulpd_req_update(mpu, mpu->ulpd_pm_regs[0x34 >> 2], 0x0000); + mpu->ulpd_pm_regs[0x34 >> 2] = 0x0000; + mpu->ulpd_pm_regs[0x38 >> 2] = 0x0001; + mpu->ulpd_pm_regs[0x3c >> 2] = 0x2211; + mpu->ulpd_pm_regs[0x40 >> 2] = 0x0000; /* FIXME: dump a real STATUS_REQ */ + mpu->ulpd_pm_regs[0x48 >> 2] = 0x960; + mpu->ulpd_pm_regs[0x4c >> 2] = 0x08; + mpu->ulpd_pm_regs[0x50 >> 2] = 0x08; + omap_clk_setrate(omap_findclk(mpu, "dpll4"), 1, 4); + omap_clk_reparent(omap_findclk(mpu, "ck_48m"), omap_findclk(mpu, "dpll4")); +} + +static void omap_ulpd_pm_init(MemoryRegion *system_memory, + hwaddr base, + struct omap_mpu_state_s *mpu) +{ + memory_region_init_io(&mpu->ulpd_pm_iomem, NULL, &omap_ulpd_pm_ops, mpu, + "omap-ulpd-pm", 0x800); + memory_region_add_subregion(system_memory, base, &mpu->ulpd_pm_iomem); + omap_ulpd_pm_reset(mpu); +} + +/* OMAP Pin Configuration */ +static uint64_t omap_pin_cfg_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x00: /* FUNC_MUX_CTRL_0 */ + case 0x04: /* FUNC_MUX_CTRL_1 */ + case 0x08: /* FUNC_MUX_CTRL_2 */ + return s->func_mux_ctrl[addr >> 2]; + + case 0x0c: /* COMP_MODE_CTRL_0 */ + return s->comp_mode_ctrl[0]; + + case 0x10: /* FUNC_MUX_CTRL_3 */ + case 0x14: /* FUNC_MUX_CTRL_4 */ + case 0x18: /* FUNC_MUX_CTRL_5 */ + case 0x1c: /* FUNC_MUX_CTRL_6 */ + case 0x20: /* FUNC_MUX_CTRL_7 */ + case 0x24: /* FUNC_MUX_CTRL_8 */ + case 0x28: /* FUNC_MUX_CTRL_9 */ + case 0x2c: /* FUNC_MUX_CTRL_A */ + case 0x30: /* FUNC_MUX_CTRL_B */ + case 0x34: /* FUNC_MUX_CTRL_C */ + case 0x38: /* FUNC_MUX_CTRL_D */ + return s->func_mux_ctrl[(addr >> 2) - 1]; + + case 0x40: /* PULL_DWN_CTRL_0 */ + case 0x44: /* PULL_DWN_CTRL_1 */ + case 0x48: /* PULL_DWN_CTRL_2 */ + case 0x4c: /* PULL_DWN_CTRL_3 */ + return s->pull_dwn_ctrl[(addr & 0xf) >> 2]; + + case 0x50: /* GATE_INH_CTRL_0 */ + return s->gate_inh_ctrl[0]; + + case 0x60: /* VOLTAGE_CTRL_0 */ + return s->voltage_ctrl[0]; + + case 0x70: /* TEST_DBG_CTRL_0 */ + return s->test_dbg_ctrl[0]; + + case 0x80: /* MOD_CONF_CTRL_0 */ + return s->mod_conf_ctrl[0]; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static inline void omap_pin_funcmux0_update(struct omap_mpu_state_s *s, + uint32_t diff, uint32_t value) +{ + if (s->compat1509) { + if (diff & (1 << 9)) /* BLUETOOTH */ + omap_clk_onoff(omap_findclk(s, "bt_mclk_out"), + (~value >> 9) & 1); + if (diff & (1 << 7)) /* USB.CLKO */ + omap_clk_onoff(omap_findclk(s, "usb.clko"), + (value >> 7) & 1); + } +} + +static inline void omap_pin_funcmux1_update(struct omap_mpu_state_s *s, + uint32_t diff, uint32_t value) +{ + if (s->compat1509) { + if (diff & (1U << 31)) { + /* MCBSP3_CLK_HIZ_DI */ + omap_clk_onoff(omap_findclk(s, "mcbsp3.clkx"), (value >> 31) & 1); + } + if (diff & (1 << 1)) { + /* CLK32K */ + omap_clk_onoff(omap_findclk(s, "clk32k_out"), (~value >> 1) & 1); + } + } +} + +static inline void omap_pin_modconf1_update(struct omap_mpu_state_s *s, + uint32_t diff, uint32_t value) +{ + if (diff & (1U << 31)) { + /* CONF_MOD_UART3_CLK_MODE_R */ + omap_clk_reparent(omap_findclk(s, "uart3_ck"), + omap_findclk(s, ((value >> 31) & 1) ? + "ck_48m" : "armper_ck")); + } + if (diff & (1 << 30)) /* CONF_MOD_UART2_CLK_MODE_R */ + omap_clk_reparent(omap_findclk(s, "uart2_ck"), + omap_findclk(s, ((value >> 30) & 1) ? + "ck_48m" : "armper_ck")); + if (diff & (1 << 29)) /* CONF_MOD_UART1_CLK_MODE_R */ + omap_clk_reparent(omap_findclk(s, "uart1_ck"), + omap_findclk(s, ((value >> 29) & 1) ? + "ck_48m" : "armper_ck")); + if (diff & (1 << 23)) /* CONF_MOD_MMC_SD_CLK_REQ_R */ + omap_clk_reparent(omap_findclk(s, "mmc_ck"), + omap_findclk(s, ((value >> 23) & 1) ? + "ck_48m" : "armper_ck")); + if (diff & (1 << 12)) /* CONF_MOD_COM_MCLK_12_48_S */ + omap_clk_reparent(omap_findclk(s, "com_mclk_out"), + omap_findclk(s, ((value >> 12) & 1) ? + "ck_48m" : "armper_ck")); + if (diff & (1 << 9)) /* CONF_MOD_USB_HOST_HHC_UHO */ + omap_clk_onoff(omap_findclk(s, "usb_hhc_ck"), (value >> 9) & 1); +} + +static void omap_pin_cfg_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + uint32_t diff; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* FUNC_MUX_CTRL_0 */ + diff = s->func_mux_ctrl[addr >> 2] ^ value; + s->func_mux_ctrl[addr >> 2] = value; + omap_pin_funcmux0_update(s, diff, value); + return; + + case 0x04: /* FUNC_MUX_CTRL_1 */ + diff = s->func_mux_ctrl[addr >> 2] ^ value; + s->func_mux_ctrl[addr >> 2] = value; + omap_pin_funcmux1_update(s, diff, value); + return; + + case 0x08: /* FUNC_MUX_CTRL_2 */ + s->func_mux_ctrl[addr >> 2] = value; + return; + + case 0x0c: /* COMP_MODE_CTRL_0 */ + s->comp_mode_ctrl[0] = value; + s->compat1509 = (value != 0x0000eaef); + omap_pin_funcmux0_update(s, ~0, s->func_mux_ctrl[0]); + omap_pin_funcmux1_update(s, ~0, s->func_mux_ctrl[1]); + return; + + case 0x10: /* FUNC_MUX_CTRL_3 */ + case 0x14: /* FUNC_MUX_CTRL_4 */ + case 0x18: /* FUNC_MUX_CTRL_5 */ + case 0x1c: /* FUNC_MUX_CTRL_6 */ + case 0x20: /* FUNC_MUX_CTRL_7 */ + case 0x24: /* FUNC_MUX_CTRL_8 */ + case 0x28: /* FUNC_MUX_CTRL_9 */ + case 0x2c: /* FUNC_MUX_CTRL_A */ + case 0x30: /* FUNC_MUX_CTRL_B */ + case 0x34: /* FUNC_MUX_CTRL_C */ + case 0x38: /* FUNC_MUX_CTRL_D */ + s->func_mux_ctrl[(addr >> 2) - 1] = value; + return; + + case 0x40: /* PULL_DWN_CTRL_0 */ + case 0x44: /* PULL_DWN_CTRL_1 */ + case 0x48: /* PULL_DWN_CTRL_2 */ + case 0x4c: /* PULL_DWN_CTRL_3 */ + s->pull_dwn_ctrl[(addr & 0xf) >> 2] = value; + return; + + case 0x50: /* GATE_INH_CTRL_0 */ + s->gate_inh_ctrl[0] = value; + return; + + case 0x60: /* VOLTAGE_CTRL_0 */ + s->voltage_ctrl[0] = value; + return; + + case 0x70: /* TEST_DBG_CTRL_0 */ + s->test_dbg_ctrl[0] = value; + return; + + case 0x80: /* MOD_CONF_CTRL_0 */ + diff = s->mod_conf_ctrl[0] ^ value; + s->mod_conf_ctrl[0] = value; + omap_pin_modconf1_update(s, diff, value); + return; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_pin_cfg_ops = { + .read = omap_pin_cfg_read, + .write = omap_pin_cfg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_pin_cfg_reset(struct omap_mpu_state_s *mpu) +{ + /* Start in Compatibility Mode. */ + mpu->compat1509 = 1; + omap_pin_funcmux0_update(mpu, mpu->func_mux_ctrl[0], 0); + omap_pin_funcmux1_update(mpu, mpu->func_mux_ctrl[1], 0); + omap_pin_modconf1_update(mpu, mpu->mod_conf_ctrl[0], 0); + memset(mpu->func_mux_ctrl, 0, sizeof(mpu->func_mux_ctrl)); + memset(mpu->comp_mode_ctrl, 0, sizeof(mpu->comp_mode_ctrl)); + memset(mpu->pull_dwn_ctrl, 0, sizeof(mpu->pull_dwn_ctrl)); + memset(mpu->gate_inh_ctrl, 0, sizeof(mpu->gate_inh_ctrl)); + memset(mpu->voltage_ctrl, 0, sizeof(mpu->voltage_ctrl)); + memset(mpu->test_dbg_ctrl, 0, sizeof(mpu->test_dbg_ctrl)); + memset(mpu->mod_conf_ctrl, 0, sizeof(mpu->mod_conf_ctrl)); +} + +static void omap_pin_cfg_init(MemoryRegion *system_memory, + hwaddr base, + struct omap_mpu_state_s *mpu) +{ + memory_region_init_io(&mpu->pin_cfg_iomem, NULL, &omap_pin_cfg_ops, mpu, + "omap-pin-cfg", 0x800); + memory_region_add_subregion(system_memory, base, &mpu->pin_cfg_iomem); + omap_pin_cfg_reset(mpu); +} + +/* Device Identification, Die Identification */ +static uint64_t omap_id_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0xfffe1800: /* DIE_ID_LSB */ + return 0xc9581f0e; + case 0xfffe1804: /* DIE_ID_MSB */ + return 0xa8858bfa; + + case 0xfffe2000: /* PRODUCT_ID_LSB */ + return 0x00aaaafc; + case 0xfffe2004: /* PRODUCT_ID_MSB */ + return 0xcafeb574; + + case 0xfffed400: /* JTAG_ID_LSB */ + switch (s->mpu_model) { + case omap310: + return 0x03310315; + case omap1510: + return 0x03310115; + default: + hw_error("%s: bad mpu model\n", __func__); + } + break; + + case 0xfffed404: /* JTAG_ID_MSB */ + switch (s->mpu_model) { + case omap310: + return 0xfb57402f; + case omap1510: + return 0xfb47002f; + default: + hw_error("%s: bad mpu model\n", __func__); + } + break; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_id_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + OMAP_BAD_REG(addr); +} + +static const MemoryRegionOps omap_id_ops = { + .read = omap_id_read, + .write = omap_id_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_id_init(MemoryRegion *memory, struct omap_mpu_state_s *mpu) +{ + memory_region_init_io(&mpu->id_iomem, NULL, &omap_id_ops, mpu, + "omap-id", 0x100000000ULL); + memory_region_init_alias(&mpu->id_iomem_e18, NULL, "omap-id-e18", &mpu->id_iomem, + 0xfffe1800, 0x800); + memory_region_add_subregion(memory, 0xfffe1800, &mpu->id_iomem_e18); + memory_region_init_alias(&mpu->id_iomem_ed4, NULL, "omap-id-ed4", &mpu->id_iomem, + 0xfffed400, 0x100); + memory_region_add_subregion(memory, 0xfffed400, &mpu->id_iomem_ed4); + if (!cpu_is_omap15xx(mpu)) { + memory_region_init_alias(&mpu->id_iomem_ed4, NULL, "omap-id-e20", + &mpu->id_iomem, 0xfffe2000, 0x800); + memory_region_add_subregion(memory, 0xfffe2000, &mpu->id_iomem_e20); + } +} + +/* MPUI Control (Dummy) */ +static uint64_t omap_mpui_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x00: /* CTRL */ + return s->mpui_ctrl; + case 0x04: /* DEBUG_ADDR */ + return 0x01ffffff; + case 0x08: /* DEBUG_DATA */ + return 0xffffffff; + case 0x0c: /* DEBUG_FLAG */ + return 0x00000800; + case 0x10: /* STATUS */ + return 0x00000000; + + /* Not in OMAP310 */ + case 0x14: /* DSP_STATUS */ + case 0x18: /* DSP_BOOT_CONFIG */ + return 0x00000000; + case 0x1c: /* DSP_MPUI_CONFIG */ + return 0x0000ffff; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_mpui_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* CTRL */ + s->mpui_ctrl = value & 0x007fffff; + break; + + case 0x04: /* DEBUG_ADDR */ + case 0x08: /* DEBUG_DATA */ + case 0x0c: /* DEBUG_FLAG */ + case 0x10: /* STATUS */ + /* Not in OMAP310 */ + case 0x14: /* DSP_STATUS */ + OMAP_RO_REG(addr); + break; + case 0x18: /* DSP_BOOT_CONFIG */ + case 0x1c: /* DSP_MPUI_CONFIG */ + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_mpui_ops = { + .read = omap_mpui_read, + .write = omap_mpui_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_mpui_reset(struct omap_mpu_state_s *s) +{ + s->mpui_ctrl = 0x0003ff1b; +} + +static void omap_mpui_init(MemoryRegion *memory, hwaddr base, + struct omap_mpu_state_s *mpu) +{ + memory_region_init_io(&mpu->mpui_iomem, NULL, &omap_mpui_ops, mpu, + "omap-mpui", 0x100); + memory_region_add_subregion(memory, base, &mpu->mpui_iomem); + + omap_mpui_reset(mpu); +} + +/* TIPB Bridges */ +struct omap_tipb_bridge_s { + qemu_irq abort; + MemoryRegion iomem; + + int width_intr; + uint16_t control; + uint16_t alloc; + uint16_t buffer; + uint16_t enh_control; +}; + +static uint64_t omap_tipb_bridge_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_tipb_bridge_s *s = (struct omap_tipb_bridge_s *) opaque; + + if (size < 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (addr) { + case 0x00: /* TIPB_CNTL */ + return s->control; + case 0x04: /* TIPB_BUS_ALLOC */ + return s->alloc; + case 0x08: /* MPU_TIPB_CNTL */ + return s->buffer; + case 0x0c: /* ENHANCED_TIPB_CNTL */ + return s->enh_control; + case 0x10: /* ADDRESS_DBG */ + case 0x14: /* DATA_DEBUG_LOW */ + case 0x18: /* DATA_DEBUG_HIGH */ + return 0xffff; + case 0x1c: /* DEBUG_CNTR_SIG */ + return 0x00f8; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_tipb_bridge_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_tipb_bridge_s *s = (struct omap_tipb_bridge_s *) opaque; + + if (size < 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* TIPB_CNTL */ + s->control = value & 0xffff; + break; + + case 0x04: /* TIPB_BUS_ALLOC */ + s->alloc = value & 0x003f; + break; + + case 0x08: /* MPU_TIPB_CNTL */ + s->buffer = value & 0x0003; + break; + + case 0x0c: /* ENHANCED_TIPB_CNTL */ + s->width_intr = !(value & 2); + s->enh_control = value & 0x000f; + break; + + case 0x10: /* ADDRESS_DBG */ + case 0x14: /* DATA_DEBUG_LOW */ + case 0x18: /* DATA_DEBUG_HIGH */ + case 0x1c: /* DEBUG_CNTR_SIG */ + OMAP_RO_REG(addr); + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_tipb_bridge_ops = { + .read = omap_tipb_bridge_read, + .write = omap_tipb_bridge_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_tipb_bridge_reset(struct omap_tipb_bridge_s *s) +{ + s->control = 0xffff; + s->alloc = 0x0009; + s->buffer = 0x0000; + s->enh_control = 0x000f; +} + +static struct omap_tipb_bridge_s *omap_tipb_bridge_init( + MemoryRegion *memory, hwaddr base, + qemu_irq abort_irq, omap_clk clk) +{ + struct omap_tipb_bridge_s *s = g_new0(struct omap_tipb_bridge_s, 1); + + s->abort = abort_irq; + omap_tipb_bridge_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_tipb_bridge_ops, s, + "omap-tipb-bridge", 0x100); + memory_region_add_subregion(memory, base, &s->iomem); + + return s; +} + +/* Dummy Traffic Controller's Memory Interface */ +static uint64_t omap_tcmi_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + uint32_t ret; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x00: /* IMIF_PRIO */ + case 0x04: /* EMIFS_PRIO */ + case 0x08: /* EMIFF_PRIO */ + case 0x0c: /* EMIFS_CONFIG */ + case 0x10: /* EMIFS_CS0_CONFIG */ + case 0x14: /* EMIFS_CS1_CONFIG */ + case 0x18: /* EMIFS_CS2_CONFIG */ + case 0x1c: /* EMIFS_CS3_CONFIG */ + case 0x24: /* EMIFF_MRS */ + case 0x28: /* TIMEOUT1 */ + case 0x2c: /* TIMEOUT2 */ + case 0x30: /* TIMEOUT3 */ + case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */ + case 0x40: /* EMIFS_CFG_DYN_WAIT */ + return s->tcmi_regs[addr >> 2]; + + case 0x20: /* EMIFF_SDRAM_CONFIG */ + ret = s->tcmi_regs[addr >> 2]; + s->tcmi_regs[addr >> 2] &= ~1; /* XXX: Clear SLRF on SDRAM access */ + /* XXX: We can try using the VGA_DIRTY flag for this */ + return ret; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_tcmi_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* IMIF_PRIO */ + case 0x04: /* EMIFS_PRIO */ + case 0x08: /* EMIFF_PRIO */ + case 0x10: /* EMIFS_CS0_CONFIG */ + case 0x14: /* EMIFS_CS1_CONFIG */ + case 0x18: /* EMIFS_CS2_CONFIG */ + case 0x1c: /* EMIFS_CS3_CONFIG */ + case 0x20: /* EMIFF_SDRAM_CONFIG */ + case 0x24: /* EMIFF_MRS */ + case 0x28: /* TIMEOUT1 */ + case 0x2c: /* TIMEOUT2 */ + case 0x30: /* TIMEOUT3 */ + case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */ + case 0x40: /* EMIFS_CFG_DYN_WAIT */ + s->tcmi_regs[addr >> 2] = value; + break; + case 0x0c: /* EMIFS_CONFIG */ + s->tcmi_regs[addr >> 2] = (value & 0xf) | (1 << 4); + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_tcmi_ops = { + .read = omap_tcmi_read, + .write = omap_tcmi_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_tcmi_reset(struct omap_mpu_state_s *mpu) +{ + mpu->tcmi_regs[0x00 >> 2] = 0x00000000; + mpu->tcmi_regs[0x04 >> 2] = 0x00000000; + mpu->tcmi_regs[0x08 >> 2] = 0x00000000; + mpu->tcmi_regs[0x0c >> 2] = 0x00000010; + mpu->tcmi_regs[0x10 >> 2] = 0x0010fffb; + mpu->tcmi_regs[0x14 >> 2] = 0x0010fffb; + mpu->tcmi_regs[0x18 >> 2] = 0x0010fffb; + mpu->tcmi_regs[0x1c >> 2] = 0x0010fffb; + mpu->tcmi_regs[0x20 >> 2] = 0x00618800; + mpu->tcmi_regs[0x24 >> 2] = 0x00000037; + mpu->tcmi_regs[0x28 >> 2] = 0x00000000; + mpu->tcmi_regs[0x2c >> 2] = 0x00000000; + mpu->tcmi_regs[0x30 >> 2] = 0x00000000; + mpu->tcmi_regs[0x3c >> 2] = 0x00000003; + mpu->tcmi_regs[0x40 >> 2] = 0x00000000; +} + +static void omap_tcmi_init(MemoryRegion *memory, hwaddr base, + struct omap_mpu_state_s *mpu) +{ + memory_region_init_io(&mpu->tcmi_iomem, NULL, &omap_tcmi_ops, mpu, + "omap-tcmi", 0x100); + memory_region_add_subregion(memory, base, &mpu->tcmi_iomem); + omap_tcmi_reset(mpu); +} + +/* Digital phase-locked loops control */ +struct dpll_ctl_s { + MemoryRegion iomem; + uint16_t mode; + omap_clk dpll; +}; + +static uint64_t omap_dpll_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct dpll_ctl_s *s = (struct dpll_ctl_s *) opaque; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + if (addr == 0x00) /* CTL_REG */ + return s->mode; + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_dpll_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct dpll_ctl_s *s = (struct dpll_ctl_s *) opaque; + uint16_t diff; + static const int bypass_div[4] = { 1, 2, 4, 4 }; + int div, mult; + + if (size != 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + if (addr == 0x00) { /* CTL_REG */ + /* See omap_ulpd_pm_write() too */ + diff = s->mode & value; + s->mode = value & 0x2fff; + if (diff & (0x3ff << 2)) { + if (value & (1 << 4)) { /* PLL_ENABLE */ + div = ((value >> 5) & 3) + 1; /* PLL_DIV */ + mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */ + } else { + div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */ + mult = 1; + } + omap_clk_setrate(s->dpll, div, mult); + } + + /* Enter the desired mode. */ + s->mode = (s->mode & 0xfffe) | ((s->mode >> 4) & 1); + + /* Act as if the lock is restored. */ + s->mode |= 2; + } else { + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_dpll_ops = { + .read = omap_dpll_read, + .write = omap_dpll_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_dpll_reset(struct dpll_ctl_s *s) +{ + s->mode = 0x2002; + omap_clk_setrate(s->dpll, 1, 1); +} + +static struct dpll_ctl_s *omap_dpll_init(MemoryRegion *memory, + hwaddr base, omap_clk clk) +{ + struct dpll_ctl_s *s = g_malloc0(sizeof(*s)); + memory_region_init_io(&s->iomem, NULL, &omap_dpll_ops, s, "omap-dpll", 0x100); + + s->dpll = clk; + omap_dpll_reset(s); + + memory_region_add_subregion(memory, base, &s->iomem); + return s; +} + +/* MPU Clock/Reset/Power Mode Control */ +static uint64_t omap_clkm_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (addr) { + case 0x00: /* ARM_CKCTL */ + return s->clkm.arm_ckctl; + + case 0x04: /* ARM_IDLECT1 */ + return s->clkm.arm_idlect1; + + case 0x08: /* ARM_IDLECT2 */ + return s->clkm.arm_idlect2; + + case 0x0c: /* ARM_EWUPCT */ + return s->clkm.arm_ewupct; + + case 0x10: /* ARM_RSTCT1 */ + return s->clkm.arm_rstct1; + + case 0x14: /* ARM_RSTCT2 */ + return s->clkm.arm_rstct2; + + case 0x18: /* ARM_SYSST */ + return (s->clkm.clocking_scheme << 11) | s->clkm.cold_start; + + case 0x1c: /* ARM_CKOUT1 */ + return s->clkm.arm_ckout1; + + case 0x20: /* ARM_CKOUT2 */ + break; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static inline void omap_clkm_ckctl_update(struct omap_mpu_state_s *s, + uint16_t diff, uint16_t value) +{ + omap_clk clk; + + if (diff & (1 << 14)) { /* ARM_INTHCK_SEL */ + if (value & (1 << 14)) + /* Reserved */; + else { + clk = omap_findclk(s, "arminth_ck"); + omap_clk_reparent(clk, omap_findclk(s, "tc_ck")); + } + } + if (diff & (1 << 12)) { /* ARM_TIMXO */ + clk = omap_findclk(s, "armtim_ck"); + if (value & (1 << 12)) + omap_clk_reparent(clk, omap_findclk(s, "clkin")); + else + omap_clk_reparent(clk, omap_findclk(s, "ck_gen1")); + } + /* XXX: en_dspck */ + if (diff & (3 << 10)) { /* DSPMMUDIV */ + clk = omap_findclk(s, "dspmmu_ck"); + omap_clk_setrate(clk, 1 << ((value >> 10) & 3), 1); + } + if (diff & (3 << 8)) { /* TCDIV */ + clk = omap_findclk(s, "tc_ck"); + omap_clk_setrate(clk, 1 << ((value >> 8) & 3), 1); + } + if (diff & (3 << 6)) { /* DSPDIV */ + clk = omap_findclk(s, "dsp_ck"); + omap_clk_setrate(clk, 1 << ((value >> 6) & 3), 1); + } + if (diff & (3 << 4)) { /* ARMDIV */ + clk = omap_findclk(s, "arm_ck"); + omap_clk_setrate(clk, 1 << ((value >> 4) & 3), 1); + } + if (diff & (3 << 2)) { /* LCDDIV */ + clk = omap_findclk(s, "lcd_ck"); + omap_clk_setrate(clk, 1 << ((value >> 2) & 3), 1); + } + if (diff & (3 << 0)) { /* PERDIV */ + clk = omap_findclk(s, "armper_ck"); + omap_clk_setrate(clk, 1 << ((value >> 0) & 3), 1); + } +} + +static inline void omap_clkm_idlect1_update(struct omap_mpu_state_s *s, + uint16_t diff, uint16_t value) +{ + omap_clk clk; + + if (value & (1 << 11)) { /* SETARM_IDLE */ + cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HALT); + } + if (!(value & (1 << 10))) { /* WKUP_MODE */ + /* XXX: disable wakeup from IRQ */ + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } + +#define SET_CANIDLE(clock, bit) \ + if (diff & (1 << bit)) { \ + clk = omap_findclk(s, clock); \ + omap_clk_canidle(clk, (value >> bit) & 1); \ + } + SET_CANIDLE("mpuwd_ck", 0) /* IDLWDT_ARM */ + SET_CANIDLE("armxor_ck", 1) /* IDLXORP_ARM */ + SET_CANIDLE("mpuper_ck", 2) /* IDLPER_ARM */ + SET_CANIDLE("lcd_ck", 3) /* IDLLCD_ARM */ + SET_CANIDLE("lb_ck", 4) /* IDLLB_ARM */ + SET_CANIDLE("hsab_ck", 5) /* IDLHSAB_ARM */ + SET_CANIDLE("tipb_ck", 6) /* IDLIF_ARM */ + SET_CANIDLE("dma_ck", 6) /* IDLIF_ARM */ + SET_CANIDLE("tc_ck", 6) /* IDLIF_ARM */ + SET_CANIDLE("dpll1", 7) /* IDLDPLL_ARM */ + SET_CANIDLE("dpll2", 7) /* IDLDPLL_ARM */ + SET_CANIDLE("dpll3", 7) /* IDLDPLL_ARM */ + SET_CANIDLE("mpui_ck", 8) /* IDLAPI_ARM */ + SET_CANIDLE("armtim_ck", 9) /* IDLTIM_ARM */ +} + +static inline void omap_clkm_idlect2_update(struct omap_mpu_state_s *s, + uint16_t diff, uint16_t value) +{ + omap_clk clk; + +#define SET_ONOFF(clock, bit) \ + if (diff & (1 << bit)) { \ + clk = omap_findclk(s, clock); \ + omap_clk_onoff(clk, (value >> bit) & 1); \ + } + SET_ONOFF("mpuwd_ck", 0) /* EN_WDTCK */ + SET_ONOFF("armxor_ck", 1) /* EN_XORPCK */ + SET_ONOFF("mpuper_ck", 2) /* EN_PERCK */ + SET_ONOFF("lcd_ck", 3) /* EN_LCDCK */ + SET_ONOFF("lb_ck", 4) /* EN_LBCK */ + SET_ONOFF("hsab_ck", 5) /* EN_HSABCK */ + SET_ONOFF("mpui_ck", 6) /* EN_APICK */ + SET_ONOFF("armtim_ck", 7) /* EN_TIMCK */ + SET_CANIDLE("dma_ck", 8) /* DMACK_REQ */ + SET_ONOFF("arm_gpio_ck", 9) /* EN_GPIOCK */ + SET_ONOFF("lbfree_ck", 10) /* EN_LBFREECK */ +} + +static inline void omap_clkm_ckout1_update(struct omap_mpu_state_s *s, + uint16_t diff, uint16_t value) +{ + omap_clk clk; + + if (diff & (3 << 4)) { /* TCLKOUT */ + clk = omap_findclk(s, "tclk_out"); + switch ((value >> 4) & 3) { + case 1: + omap_clk_reparent(clk, omap_findclk(s, "ck_gen3")); + omap_clk_onoff(clk, 1); + break; + case 2: + omap_clk_reparent(clk, omap_findclk(s, "tc_ck")); + omap_clk_onoff(clk, 1); + break; + default: + omap_clk_onoff(clk, 0); + } + } + if (diff & (3 << 2)) { /* DCLKOUT */ + clk = omap_findclk(s, "dclk_out"); + switch ((value >> 2) & 3) { + case 0: + omap_clk_reparent(clk, omap_findclk(s, "dspmmu_ck")); + break; + case 1: + omap_clk_reparent(clk, omap_findclk(s, "ck_gen2")); + break; + case 2: + omap_clk_reparent(clk, omap_findclk(s, "dsp_ck")); + break; + case 3: + omap_clk_reparent(clk, omap_findclk(s, "ck_ref14")); + break; + } + } + if (diff & (3 << 0)) { /* ACLKOUT */ + clk = omap_findclk(s, "aclk_out"); + switch ((value >> 0) & 3) { + case 1: + omap_clk_reparent(clk, omap_findclk(s, "ck_gen1")); + omap_clk_onoff(clk, 1); + break; + case 2: + omap_clk_reparent(clk, omap_findclk(s, "arm_ck")); + omap_clk_onoff(clk, 1); + break; + case 3: + omap_clk_reparent(clk, omap_findclk(s, "ck_ref14")); + omap_clk_onoff(clk, 1); + break; + default: + omap_clk_onoff(clk, 0); + } + } +} + +static void omap_clkm_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + uint16_t diff; + omap_clk clk; + static const char *clkschemename[8] = { + "fully synchronous", "fully asynchronous", "synchronous scalable", + "mix mode 1", "mix mode 2", "bypass mode", "mix mode 3", "mix mode 4", + }; + + if (size != 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* ARM_CKCTL */ + diff = s->clkm.arm_ckctl ^ value; + s->clkm.arm_ckctl = value & 0x7fff; + omap_clkm_ckctl_update(s, diff, value); + return; + + case 0x04: /* ARM_IDLECT1 */ + diff = s->clkm.arm_idlect1 ^ value; + s->clkm.arm_idlect1 = value & 0x0fff; + omap_clkm_idlect1_update(s, diff, value); + return; + + case 0x08: /* ARM_IDLECT2 */ + diff = s->clkm.arm_idlect2 ^ value; + s->clkm.arm_idlect2 = value & 0x07ff; + omap_clkm_idlect2_update(s, diff, value); + return; + + case 0x0c: /* ARM_EWUPCT */ + s->clkm.arm_ewupct = value & 0x003f; + return; + + case 0x10: /* ARM_RSTCT1 */ + diff = s->clkm.arm_rstct1 ^ value; + s->clkm.arm_rstct1 = value & 0x0007; + if (value & 9) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + s->clkm.cold_start = 0xa; + } + if (diff & ~value & 4) { /* DSP_RST */ + omap_mpui_reset(s); + omap_tipb_bridge_reset(s->private_tipb); + omap_tipb_bridge_reset(s->public_tipb); + } + if (diff & 2) { /* DSP_EN */ + clk = omap_findclk(s, "dsp_ck"); + omap_clk_canidle(clk, (~value >> 1) & 1); + } + return; + + case 0x14: /* ARM_RSTCT2 */ + s->clkm.arm_rstct2 = value & 0x0001; + return; + + case 0x18: /* ARM_SYSST */ + if ((s->clkm.clocking_scheme ^ (value >> 11)) & 7) { + s->clkm.clocking_scheme = (value >> 11) & 7; + printf("%s: clocking scheme set to %s\n", __func__, + clkschemename[s->clkm.clocking_scheme]); + } + s->clkm.cold_start &= value & 0x3f; + return; + + case 0x1c: /* ARM_CKOUT1 */ + diff = s->clkm.arm_ckout1 ^ value; + s->clkm.arm_ckout1 = value & 0x003f; + omap_clkm_ckout1_update(s, diff, value); + return; + + case 0x20: /* ARM_CKOUT2 */ + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_clkm_ops = { + .read = omap_clkm_read, + .write = omap_clkm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static uint64_t omap_clkdsp_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + CPUState *cpu = CPU(s->cpu); + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (addr) { + case 0x04: /* DSP_IDLECT1 */ + return s->clkm.dsp_idlect1; + + case 0x08: /* DSP_IDLECT2 */ + return s->clkm.dsp_idlect2; + + case 0x14: /* DSP_RSTCT2 */ + return s->clkm.dsp_rstct2; + + case 0x18: /* DSP_SYSST */ + return (s->clkm.clocking_scheme << 11) | s->clkm.cold_start | + (cpu->halted << 6); /* Quite useless... */ + } + + OMAP_BAD_REG(addr); + return 0; +} + +static inline void omap_clkdsp_idlect1_update(struct omap_mpu_state_s *s, + uint16_t diff, uint16_t value) +{ + omap_clk clk; + + SET_CANIDLE("dspxor_ck", 1); /* IDLXORP_DSP */ +} + +static inline void omap_clkdsp_idlect2_update(struct omap_mpu_state_s *s, + uint16_t diff, uint16_t value) +{ + omap_clk clk; + + SET_ONOFF("dspxor_ck", 1); /* EN_XORPCK */ +} + +static void omap_clkdsp_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + uint16_t diff; + + if (size != 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (addr) { + case 0x04: /* DSP_IDLECT1 */ + diff = s->clkm.dsp_idlect1 ^ value; + s->clkm.dsp_idlect1 = value & 0x01f7; + omap_clkdsp_idlect1_update(s, diff, value); + break; + + case 0x08: /* DSP_IDLECT2 */ + s->clkm.dsp_idlect2 = value & 0x0037; + diff = s->clkm.dsp_idlect1 ^ value; + omap_clkdsp_idlect2_update(s, diff, value); + break; + + case 0x14: /* DSP_RSTCT2 */ + s->clkm.dsp_rstct2 = value & 0x0001; + break; + + case 0x18: /* DSP_SYSST */ + s->clkm.cold_start &= value & 0x3f; + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_clkdsp_ops = { + .read = omap_clkdsp_read, + .write = omap_clkdsp_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_clkm_reset(struct omap_mpu_state_s *s) +{ + if (s->wdt && s->wdt->reset) + s->clkm.cold_start = 0x6; + s->clkm.clocking_scheme = 0; + omap_clkm_ckctl_update(s, ~0, 0x3000); + s->clkm.arm_ckctl = 0x3000; + omap_clkm_idlect1_update(s, s->clkm.arm_idlect1 ^ 0x0400, 0x0400); + s->clkm.arm_idlect1 = 0x0400; + omap_clkm_idlect2_update(s, s->clkm.arm_idlect2 ^ 0x0100, 0x0100); + s->clkm.arm_idlect2 = 0x0100; + s->clkm.arm_ewupct = 0x003f; + s->clkm.arm_rstct1 = 0x0000; + s->clkm.arm_rstct2 = 0x0000; + s->clkm.arm_ckout1 = 0x0015; + s->clkm.dpll1_mode = 0x2002; + omap_clkdsp_idlect1_update(s, s->clkm.dsp_idlect1 ^ 0x0040, 0x0040); + s->clkm.dsp_idlect1 = 0x0040; + omap_clkdsp_idlect2_update(s, ~0, 0x0000); + s->clkm.dsp_idlect2 = 0x0000; + s->clkm.dsp_rstct2 = 0x0000; +} + +static void omap_clkm_init(MemoryRegion *memory, hwaddr mpu_base, + hwaddr dsp_base, struct omap_mpu_state_s *s) +{ + memory_region_init_io(&s->clkm_iomem, NULL, &omap_clkm_ops, s, + "omap-clkm", 0x100); + memory_region_init_io(&s->clkdsp_iomem, NULL, &omap_clkdsp_ops, s, + "omap-clkdsp", 0x1000); + + s->clkm.arm_idlect1 = 0x03ff; + s->clkm.arm_idlect2 = 0x0100; + s->clkm.dsp_idlect1 = 0x0002; + omap_clkm_reset(s); + s->clkm.cold_start = 0x3a; + + memory_region_add_subregion(memory, mpu_base, &s->clkm_iomem); + memory_region_add_subregion(memory, dsp_base, &s->clkdsp_iomem); +} + +/* MPU I/O */ +struct omap_mpuio_s { + qemu_irq irq; + qemu_irq kbd_irq; + qemu_irq *in; + qemu_irq handler[16]; + qemu_irq wakeup; + MemoryRegion iomem; + + uint16_t inputs; + uint16_t outputs; + uint16_t dir; + uint16_t edge; + uint16_t mask; + uint16_t ints; + + uint16_t debounce; + uint16_t latch; + uint8_t event; + + uint8_t buttons[5]; + uint8_t row_latch; + uint8_t cols; + int kbd_mask; + int clk; +}; + +static void omap_mpuio_set(void *opaque, int line, int level) +{ + struct omap_mpuio_s *s = (struct omap_mpuio_s *) opaque; + uint16_t prev = s->inputs; + + if (level) + s->inputs |= 1 << line; + else + s->inputs &= ~(1 << line); + + if (((1 << line) & s->dir & ~s->mask) && s->clk) { + if ((s->edge & s->inputs & ~prev) | (~s->edge & ~s->inputs & prev)) { + s->ints |= 1 << line; + qemu_irq_raise(s->irq); + /* TODO: wakeup */ + } + if ((s->event & (1 << 0)) && /* SET_GPIO_EVENT_MODE */ + (s->event >> 1) == line) /* PIN_SELECT */ + s->latch = s->inputs; + } +} + +static void omap_mpuio_kbd_update(struct omap_mpuio_s *s) +{ + int i; + uint8_t *row, rows = 0, cols = ~s->cols; + + for (row = s->buttons + 4, i = 1 << 4; i; row --, i >>= 1) + if (*row & cols) + rows |= i; + + qemu_set_irq(s->kbd_irq, rows && !s->kbd_mask && s->clk); + s->row_latch = ~rows; +} + +static uint64_t omap_mpuio_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mpuio_s *s = (struct omap_mpuio_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + uint16_t ret; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (offset) { + case 0x00: /* INPUT_LATCH */ + return s->inputs; + + case 0x04: /* OUTPUT_REG */ + return s->outputs; + + case 0x08: /* IO_CNTL */ + return s->dir; + + case 0x10: /* KBR_LATCH */ + return s->row_latch; + + case 0x14: /* KBC_REG */ + return s->cols; + + case 0x18: /* GPIO_EVENT_MODE_REG */ + return s->event; + + case 0x1c: /* GPIO_INT_EDGE_REG */ + return s->edge; + + case 0x20: /* KBD_INT */ + return (~s->row_latch & 0x1f) && !s->kbd_mask; + + case 0x24: /* GPIO_INT */ + ret = s->ints; + s->ints &= s->mask; + if (ret) + qemu_irq_lower(s->irq); + return ret; + + case 0x28: /* KBD_MASKIT */ + return s->kbd_mask; + + case 0x2c: /* GPIO_MASKIT */ + return s->mask; + + case 0x30: /* GPIO_DEBOUNCING_REG */ + return s->debounce; + + case 0x34: /* GPIO_LATCH_REG */ + return s->latch; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_mpuio_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_mpuio_s *s = (struct omap_mpuio_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + uint16_t diff; + int ln; + + if (size != 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (offset) { + case 0x04: /* OUTPUT_REG */ + diff = (s->outputs ^ value) & ~s->dir; + s->outputs = value; + while ((ln = ctz32(diff)) != 32) { + if (s->handler[ln]) + qemu_set_irq(s->handler[ln], (value >> ln) & 1); + diff &= ~(1 << ln); + } + break; + + case 0x08: /* IO_CNTL */ + diff = s->outputs & (s->dir ^ value); + s->dir = value; + + value = s->outputs & ~s->dir; + while ((ln = ctz32(diff)) != 32) { + if (s->handler[ln]) + qemu_set_irq(s->handler[ln], (value >> ln) & 1); + diff &= ~(1 << ln); + } + break; + + case 0x14: /* KBC_REG */ + s->cols = value; + omap_mpuio_kbd_update(s); + break; + + case 0x18: /* GPIO_EVENT_MODE_REG */ + s->event = value & 0x1f; + break; + + case 0x1c: /* GPIO_INT_EDGE_REG */ + s->edge = value; + break; + + case 0x28: /* KBD_MASKIT */ + s->kbd_mask = value & 1; + omap_mpuio_kbd_update(s); + break; + + case 0x2c: /* GPIO_MASKIT */ + s->mask = value; + break; + + case 0x30: /* GPIO_DEBOUNCING_REG */ + s->debounce = value & 0x1ff; + break; + + case 0x00: /* INPUT_LATCH */ + case 0x10: /* KBR_LATCH */ + case 0x20: /* KBD_INT */ + case 0x24: /* GPIO_INT */ + case 0x34: /* GPIO_LATCH_REG */ + OMAP_RO_REG(addr); + return; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_mpuio_ops = { + .read = omap_mpuio_read, + .write = omap_mpuio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_mpuio_reset(struct omap_mpuio_s *s) +{ + s->inputs = 0; + s->outputs = 0; + s->dir = ~0; + s->event = 0; + s->edge = 0; + s->kbd_mask = 0; + s->mask = 0; + s->debounce = 0; + s->latch = 0; + s->ints = 0; + s->row_latch = 0x1f; + s->clk = 1; +} + +static void omap_mpuio_onoff(void *opaque, int line, int on) +{ + struct omap_mpuio_s *s = (struct omap_mpuio_s *) opaque; + + s->clk = on; + if (on) + omap_mpuio_kbd_update(s); +} + +static struct omap_mpuio_s *omap_mpuio_init(MemoryRegion *memory, + hwaddr base, + qemu_irq kbd_int, qemu_irq gpio_int, qemu_irq wakeup, + omap_clk clk) +{ + struct omap_mpuio_s *s = g_new0(struct omap_mpuio_s, 1); + + s->irq = gpio_int; + s->kbd_irq = kbd_int; + s->wakeup = wakeup; + s->in = qemu_allocate_irqs(omap_mpuio_set, s, 16); + omap_mpuio_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_mpuio_ops, s, + "omap-mpuio", 0x800); + memory_region_add_subregion(memory, base, &s->iomem); + + omap_clk_adduser(clk, qemu_allocate_irq(omap_mpuio_onoff, s, 0)); + + return s; +} + +qemu_irq *omap_mpuio_in_get(struct omap_mpuio_s *s) +{ + return s->in; +} + +void omap_mpuio_out_set(struct omap_mpuio_s *s, int line, qemu_irq handler) +{ + if (line >= 16 || line < 0) + hw_error("%s: No GPIO line %i\n", __func__, line); + s->handler[line] = handler; +} + +void omap_mpuio_key(struct omap_mpuio_s *s, int row, int col, int down) +{ + if (row >= 5 || row < 0) + hw_error("%s: No key %i-%i\n", __func__, col, row); + + if (down) + s->buttons[row] |= 1 << col; + else + s->buttons[row] &= ~(1 << col); + + omap_mpuio_kbd_update(s); +} + +/* MicroWire Interface */ +struct omap_uwire_s { + MemoryRegion iomem; + qemu_irq txirq; + qemu_irq rxirq; + qemu_irq txdrq; + + uint16_t txbuf; + uint16_t rxbuf; + uint16_t control; + uint16_t setup[5]; + + uWireSlave *chip[4]; +}; + +static void omap_uwire_transfer_start(struct omap_uwire_s *s) +{ + int chipselect = (s->control >> 10) & 3; /* INDEX */ + uWireSlave *slave = s->chip[chipselect]; + + if ((s->control >> 5) & 0x1f) { /* NB_BITS_WR */ + if (s->control & (1 << 12)) /* CS_CMD */ + if (slave && slave->send) + slave->send(slave->opaque, + s->txbuf >> (16 - ((s->control >> 5) & 0x1f))); + s->control &= ~(1 << 14); /* CSRB */ + /* TODO: depending on s->setup[4] bits [1:0] assert an IRQ or + * a DRQ. When is the level IRQ supposed to be reset? */ + } + + if ((s->control >> 0) & 0x1f) { /* NB_BITS_RD */ + if (s->control & (1 << 12)) /* CS_CMD */ + if (slave && slave->receive) + s->rxbuf = slave->receive(slave->opaque); + s->control |= 1 << 15; /* RDRB */ + /* TODO: depending on s->setup[4] bits [1:0] assert an IRQ or + * a DRQ. When is the level IRQ supposed to be reset? */ + } +} + +static uint64_t omap_uwire_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_uwire_s *s = (struct omap_uwire_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (offset) { + case 0x00: /* RDR */ + s->control &= ~(1 << 15); /* RDRB */ + return s->rxbuf; + + case 0x04: /* CSR */ + return s->control; + + case 0x08: /* SR1 */ + return s->setup[0]; + case 0x0c: /* SR2 */ + return s->setup[1]; + case 0x10: /* SR3 */ + return s->setup[2]; + case 0x14: /* SR4 */ + return s->setup[3]; + case 0x18: /* SR5 */ + return s->setup[4]; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_uwire_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_uwire_s *s = (struct omap_uwire_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (offset) { + case 0x00: /* TDR */ + s->txbuf = value; /* TD */ + if ((s->setup[4] & (1 << 2)) && /* AUTO_TX_EN */ + ((s->setup[4] & (1 << 3)) || /* CS_TOGGLE_TX_EN */ + (s->control & (1 << 12)))) { /* CS_CMD */ + s->control |= 1 << 14; /* CSRB */ + omap_uwire_transfer_start(s); + } + break; + + case 0x04: /* CSR */ + s->control = value & 0x1fff; + if (value & (1 << 13)) /* START */ + omap_uwire_transfer_start(s); + break; + + case 0x08: /* SR1 */ + s->setup[0] = value & 0x003f; + break; + + case 0x0c: /* SR2 */ + s->setup[1] = value & 0x0fc0; + break; + + case 0x10: /* SR3 */ + s->setup[2] = value & 0x0003; + break; + + case 0x14: /* SR4 */ + s->setup[3] = value & 0x0001; + break; + + case 0x18: /* SR5 */ + s->setup[4] = value & 0x000f; + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_uwire_ops = { + .read = omap_uwire_read, + .write = omap_uwire_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_uwire_reset(struct omap_uwire_s *s) +{ + s->control = 0; + s->setup[0] = 0; + s->setup[1] = 0; + s->setup[2] = 0; + s->setup[3] = 0; + s->setup[4] = 0; +} + +static struct omap_uwire_s *omap_uwire_init(MemoryRegion *system_memory, + hwaddr base, + qemu_irq txirq, qemu_irq rxirq, + qemu_irq dma, + omap_clk clk) +{ + struct omap_uwire_s *s = g_new0(struct omap_uwire_s, 1); + + s->txirq = txirq; + s->rxirq = rxirq; + s->txdrq = dma; + omap_uwire_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_uwire_ops, s, "omap-uwire", 0x800); + memory_region_add_subregion(system_memory, base, &s->iomem); + + return s; +} + +void omap_uwire_attach(struct omap_uwire_s *s, + uWireSlave *slave, int chipselect) +{ + if (chipselect < 0 || chipselect > 3) { + error_report("%s: Bad chipselect %i", __func__, chipselect); + exit(-1); + } + + s->chip[chipselect] = slave; +} + +/* Pseudonoise Pulse-Width Light Modulator */ +struct omap_pwl_s { + MemoryRegion iomem; + uint8_t output; + uint8_t level; + uint8_t enable; + int clk; +}; + +static void omap_pwl_update(struct omap_pwl_s *s) +{ + int output = (s->clk && s->enable) ? s->level : 0; + + if (output != s->output) { + s->output = output; + printf("%s: Backlight now at %i/256\n", __func__, output); + } +} + +static uint64_t omap_pwl_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_pwl_s *s = (struct omap_pwl_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 1) { + return omap_badwidth_read8(opaque, addr); + } + + switch (offset) { + case 0x00: /* PWL_LEVEL */ + return s->level; + case 0x04: /* PWL_CTRL */ + return s->enable; + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_pwl_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_pwl_s *s = (struct omap_pwl_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 1) { + omap_badwidth_write8(opaque, addr, value); + return; + } + + switch (offset) { + case 0x00: /* PWL_LEVEL */ + s->level = value; + omap_pwl_update(s); + break; + case 0x04: /* PWL_CTRL */ + s->enable = value & 1; + omap_pwl_update(s); + break; + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_pwl_ops = { + .read = omap_pwl_read, + .write = omap_pwl_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_pwl_reset(struct omap_pwl_s *s) +{ + s->output = 0; + s->level = 0; + s->enable = 0; + s->clk = 1; + omap_pwl_update(s); +} + +static void omap_pwl_clk_update(void *opaque, int line, int on) +{ + struct omap_pwl_s *s = (struct omap_pwl_s *) opaque; + + s->clk = on; + omap_pwl_update(s); +} + +static struct omap_pwl_s *omap_pwl_init(MemoryRegion *system_memory, + hwaddr base, + omap_clk clk) +{ + struct omap_pwl_s *s = g_malloc0(sizeof(*s)); + + omap_pwl_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_pwl_ops, s, + "omap-pwl", 0x800); + memory_region_add_subregion(system_memory, base, &s->iomem); + + omap_clk_adduser(clk, qemu_allocate_irq(omap_pwl_clk_update, s, 0)); + return s; +} + +/* Pulse-Width Tone module */ +struct omap_pwt_s { + MemoryRegion iomem; + uint8_t frc; + uint8_t vrc; + uint8_t gcr; + omap_clk clk; +}; + +static uint64_t omap_pwt_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_pwt_s *s = (struct omap_pwt_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 1) { + return omap_badwidth_read8(opaque, addr); + } + + switch (offset) { + case 0x00: /* FRC */ + return s->frc; + case 0x04: /* VCR */ + return s->vrc; + case 0x08: /* GCR */ + return s->gcr; + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_pwt_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_pwt_s *s = (struct omap_pwt_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 1) { + omap_badwidth_write8(opaque, addr, value); + return; + } + + switch (offset) { + case 0x00: /* FRC */ + s->frc = value & 0x3f; + break; + case 0x04: /* VRC */ + if ((value ^ s->vrc) & 1) { + if (value & 1) + printf("%s: %iHz buzz on\n", __func__, (int) + /* 1.5 MHz from a 12-MHz or 13-MHz PWT_CLK */ + ((omap_clk_getrate(s->clk) >> 3) / + /* Pre-multiplexer divider */ + ((s->gcr & 2) ? 1 : 154) / + /* Octave multiplexer */ + (2 << (value & 3)) * + /* 101/107 divider */ + ((value & (1 << 2)) ? 101 : 107) * + /* 49/55 divider */ + ((value & (1 << 3)) ? 49 : 55) * + /* 50/63 divider */ + ((value & (1 << 4)) ? 50 : 63) * + /* 80/127 divider */ + ((value & (1 << 5)) ? 80 : 127) / + (107 * 55 * 63 * 127))); + else + printf("%s: silence!\n", __func__); + } + s->vrc = value & 0x7f; + break; + case 0x08: /* GCR */ + s->gcr = value & 3; + break; + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_pwt_ops = { + .read =omap_pwt_read, + .write = omap_pwt_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_pwt_reset(struct omap_pwt_s *s) +{ + s->frc = 0; + s->vrc = 0; + s->gcr = 0; +} + +static struct omap_pwt_s *omap_pwt_init(MemoryRegion *system_memory, + hwaddr base, + omap_clk clk) +{ + struct omap_pwt_s *s = g_malloc0(sizeof(*s)); + s->clk = clk; + omap_pwt_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_pwt_ops, s, + "omap-pwt", 0x800); + memory_region_add_subregion(system_memory, base, &s->iomem); + return s; +} + +/* Real-time Clock module */ +struct omap_rtc_s { + MemoryRegion iomem; + qemu_irq irq; + qemu_irq alarm; + QEMUTimer *clk; + + uint8_t interrupts; + uint8_t status; + int16_t comp_reg; + int running; + int pm_am; + int auto_comp; + int round; + struct tm alarm_tm; + time_t alarm_ti; + + struct tm current_tm; + time_t ti; + uint64_t tick; +}; + +static void omap_rtc_interrupts_update(struct omap_rtc_s *s) +{ + /* s->alarm is level-triggered */ + qemu_set_irq(s->alarm, (s->status >> 6) & 1); +} + +static void omap_rtc_alarm_update(struct omap_rtc_s *s) +{ + s->alarm_ti = mktimegm(&s->alarm_tm); + if (s->alarm_ti == -1) + printf("%s: conversion failed\n", __func__); +} + +static uint64_t omap_rtc_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_rtc_s *s = (struct omap_rtc_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + uint8_t i; + + if (size != 1) { + return omap_badwidth_read8(opaque, addr); + } + + switch (offset) { + case 0x00: /* SECONDS_REG */ + return to_bcd(s->current_tm.tm_sec); + + case 0x04: /* MINUTES_REG */ + return to_bcd(s->current_tm.tm_min); + + case 0x08: /* HOURS_REG */ + if (s->pm_am) + return ((s->current_tm.tm_hour > 11) << 7) | + to_bcd(((s->current_tm.tm_hour - 1) % 12) + 1); + else + return to_bcd(s->current_tm.tm_hour); + + case 0x0c: /* DAYS_REG */ + return to_bcd(s->current_tm.tm_mday); + + case 0x10: /* MONTHS_REG */ + return to_bcd(s->current_tm.tm_mon + 1); + + case 0x14: /* YEARS_REG */ + return to_bcd(s->current_tm.tm_year % 100); + + case 0x18: /* WEEK_REG */ + return s->current_tm.tm_wday; + + case 0x20: /* ALARM_SECONDS_REG */ + return to_bcd(s->alarm_tm.tm_sec); + + case 0x24: /* ALARM_MINUTES_REG */ + return to_bcd(s->alarm_tm.tm_min); + + case 0x28: /* ALARM_HOURS_REG */ + if (s->pm_am) + return ((s->alarm_tm.tm_hour > 11) << 7) | + to_bcd(((s->alarm_tm.tm_hour - 1) % 12) + 1); + else + return to_bcd(s->alarm_tm.tm_hour); + + case 0x2c: /* ALARM_DAYS_REG */ + return to_bcd(s->alarm_tm.tm_mday); + + case 0x30: /* ALARM_MONTHS_REG */ + return to_bcd(s->alarm_tm.tm_mon + 1); + + case 0x34: /* ALARM_YEARS_REG */ + return to_bcd(s->alarm_tm.tm_year % 100); + + case 0x40: /* RTC_CTRL_REG */ + return (s->pm_am << 3) | (s->auto_comp << 2) | + (s->round << 1) | s->running; + + case 0x44: /* RTC_STATUS_REG */ + i = s->status; + s->status &= ~0x3d; + return i; + + case 0x48: /* RTC_INTERRUPTS_REG */ + return s->interrupts; + + case 0x4c: /* RTC_COMP_LSB_REG */ + return ((uint16_t) s->comp_reg) & 0xff; + + case 0x50: /* RTC_COMP_MSB_REG */ + return ((uint16_t) s->comp_reg) >> 8; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_rtc_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_rtc_s *s = (struct omap_rtc_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + struct tm new_tm; + time_t ti[2]; + + if (size != 1) { + omap_badwidth_write8(opaque, addr, value); + return; + } + + switch (offset) { + case 0x00: /* SECONDS_REG */ +#ifdef ALMDEBUG + printf("RTC SEC_REG <-- %02x\n", value); +#endif + s->ti -= s->current_tm.tm_sec; + s->ti += from_bcd(value); + return; + + case 0x04: /* MINUTES_REG */ +#ifdef ALMDEBUG + printf("RTC MIN_REG <-- %02x\n", value); +#endif + s->ti -= s->current_tm.tm_min * 60; + s->ti += from_bcd(value) * 60; + return; + + case 0x08: /* HOURS_REG */ +#ifdef ALMDEBUG + printf("RTC HRS_REG <-- %02x\n", value); +#endif + s->ti -= s->current_tm.tm_hour * 3600; + if (s->pm_am) { + s->ti += (from_bcd(value & 0x3f) & 12) * 3600; + s->ti += ((value >> 7) & 1) * 43200; + } else + s->ti += from_bcd(value & 0x3f) * 3600; + return; + + case 0x0c: /* DAYS_REG */ +#ifdef ALMDEBUG + printf("RTC DAY_REG <-- %02x\n", value); +#endif + s->ti -= s->current_tm.tm_mday * 86400; + s->ti += from_bcd(value) * 86400; + return; + + case 0x10: /* MONTHS_REG */ +#ifdef ALMDEBUG + printf("RTC MTH_REG <-- %02x\n", value); +#endif + memcpy(&new_tm, &s->current_tm, sizeof(new_tm)); + new_tm.tm_mon = from_bcd(value); + ti[0] = mktimegm(&s->current_tm); + ti[1] = mktimegm(&new_tm); + + if (ti[0] != -1 && ti[1] != -1) { + s->ti -= ti[0]; + s->ti += ti[1]; + } else { + /* A less accurate version */ + s->ti -= s->current_tm.tm_mon * 2592000; + s->ti += from_bcd(value) * 2592000; + } + return; + + case 0x14: /* YEARS_REG */ +#ifdef ALMDEBUG + printf("RTC YRS_REG <-- %02x\n", value); +#endif + memcpy(&new_tm, &s->current_tm, sizeof(new_tm)); + new_tm.tm_year += from_bcd(value) - (new_tm.tm_year % 100); + ti[0] = mktimegm(&s->current_tm); + ti[1] = mktimegm(&new_tm); + + if (ti[0] != -1 && ti[1] != -1) { + s->ti -= ti[0]; + s->ti += ti[1]; + } else { + /* A less accurate version */ + s->ti -= (time_t)(s->current_tm.tm_year % 100) * 31536000; + s->ti += (time_t)from_bcd(value) * 31536000; + } + return; + + case 0x18: /* WEEK_REG */ + return; /* Ignored */ + + case 0x20: /* ALARM_SECONDS_REG */ +#ifdef ALMDEBUG + printf("ALM SEC_REG <-- %02x\n", value); +#endif + s->alarm_tm.tm_sec = from_bcd(value); + omap_rtc_alarm_update(s); + return; + + case 0x24: /* ALARM_MINUTES_REG */ +#ifdef ALMDEBUG + printf("ALM MIN_REG <-- %02x\n", value); +#endif + s->alarm_tm.tm_min = from_bcd(value); + omap_rtc_alarm_update(s); + return; + + case 0x28: /* ALARM_HOURS_REG */ +#ifdef ALMDEBUG + printf("ALM HRS_REG <-- %02x\n", value); +#endif + if (s->pm_am) + s->alarm_tm.tm_hour = + ((from_bcd(value & 0x3f)) % 12) + + ((value >> 7) & 1) * 12; + else + s->alarm_tm.tm_hour = from_bcd(value); + omap_rtc_alarm_update(s); + return; + + case 0x2c: /* ALARM_DAYS_REG */ +#ifdef ALMDEBUG + printf("ALM DAY_REG <-- %02x\n", value); +#endif + s->alarm_tm.tm_mday = from_bcd(value); + omap_rtc_alarm_update(s); + return; + + case 0x30: /* ALARM_MONTHS_REG */ +#ifdef ALMDEBUG + printf("ALM MON_REG <-- %02x\n", value); +#endif + s->alarm_tm.tm_mon = from_bcd(value); + omap_rtc_alarm_update(s); + return; + + case 0x34: /* ALARM_YEARS_REG */ +#ifdef ALMDEBUG + printf("ALM YRS_REG <-- %02x\n", value); +#endif + s->alarm_tm.tm_year = from_bcd(value); + omap_rtc_alarm_update(s); + return; + + case 0x40: /* RTC_CTRL_REG */ +#ifdef ALMDEBUG + printf("RTC CONTROL <-- %02x\n", value); +#endif + s->pm_am = (value >> 3) & 1; + s->auto_comp = (value >> 2) & 1; + s->round = (value >> 1) & 1; + s->running = value & 1; + s->status &= 0xfd; + s->status |= s->running << 1; + return; + + case 0x44: /* RTC_STATUS_REG */ +#ifdef ALMDEBUG + printf("RTC STATUSL <-- %02x\n", value); +#endif + s->status &= ~((value & 0xc0) ^ 0x80); + omap_rtc_interrupts_update(s); + return; + + case 0x48: /* RTC_INTERRUPTS_REG */ +#ifdef ALMDEBUG + printf("RTC INTRS <-- %02x\n", value); +#endif + s->interrupts = value; + return; + + case 0x4c: /* RTC_COMP_LSB_REG */ +#ifdef ALMDEBUG + printf("RTC COMPLSB <-- %02x\n", value); +#endif + s->comp_reg &= 0xff00; + s->comp_reg |= 0x00ff & value; + return; + + case 0x50: /* RTC_COMP_MSB_REG */ +#ifdef ALMDEBUG + printf("RTC COMPMSB <-- %02x\n", value); +#endif + s->comp_reg &= 0x00ff; + s->comp_reg |= 0xff00 & (value << 8); + return; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_rtc_ops = { + .read = omap_rtc_read, + .write = omap_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_rtc_tick(void *opaque) +{ + struct omap_rtc_s *s = opaque; + + if (s->round) { + /* Round to nearest full minute. */ + if (s->current_tm.tm_sec < 30) + s->ti -= s->current_tm.tm_sec; + else + s->ti += 60 - s->current_tm.tm_sec; + + s->round = 0; + } + + localtime_r(&s->ti, &s->current_tm); + + if ((s->interrupts & 0x08) && s->ti == s->alarm_ti) { + s->status |= 0x40; + omap_rtc_interrupts_update(s); + } + + if (s->interrupts & 0x04) + switch (s->interrupts & 3) { + case 0: + s->status |= 0x04; + qemu_irq_pulse(s->irq); + break; + case 1: + if (s->current_tm.tm_sec) + break; + s->status |= 0x08; + qemu_irq_pulse(s->irq); + break; + case 2: + if (s->current_tm.tm_sec || s->current_tm.tm_min) + break; + s->status |= 0x10; + qemu_irq_pulse(s->irq); + break; + case 3: + if (s->current_tm.tm_sec || + s->current_tm.tm_min || s->current_tm.tm_hour) + break; + s->status |= 0x20; + qemu_irq_pulse(s->irq); + break; + } + + /* Move on */ + if (s->running) + s->ti ++; + s->tick += 1000; + + /* + * Every full hour add a rough approximation of the compensation + * register to the 32kHz Timer (which drives the RTC) value. + */ + if (s->auto_comp && !s->current_tm.tm_sec && !s->current_tm.tm_min) + s->tick += s->comp_reg * 1000 / 32768; + + timer_mod(s->clk, s->tick); +} + +static void omap_rtc_reset(struct omap_rtc_s *s) +{ + struct tm tm; + + s->interrupts = 0; + s->comp_reg = 0; + s->running = 0; + s->pm_am = 0; + s->auto_comp = 0; + s->round = 0; + s->tick = qemu_clock_get_ms(rtc_clock); + memset(&s->alarm_tm, 0, sizeof(s->alarm_tm)); + s->alarm_tm.tm_mday = 0x01; + s->status = 1 << 7; + qemu_get_timedate(&tm, 0); + s->ti = mktimegm(&tm); + + omap_rtc_alarm_update(s); + omap_rtc_tick(s); +} + +static struct omap_rtc_s *omap_rtc_init(MemoryRegion *system_memory, + hwaddr base, + qemu_irq timerirq, qemu_irq alarmirq, + omap_clk clk) +{ + struct omap_rtc_s *s = g_new0(struct omap_rtc_s, 1); + + s->irq = timerirq; + s->alarm = alarmirq; + s->clk = timer_new_ms(rtc_clock, omap_rtc_tick, s); + + omap_rtc_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_rtc_ops, s, + "omap-rtc", 0x800); + memory_region_add_subregion(system_memory, base, &s->iomem); + + return s; +} + +/* Multi-channel Buffered Serial Port interfaces */ +struct omap_mcbsp_s { + MemoryRegion iomem; + qemu_irq txirq; + qemu_irq rxirq; + qemu_irq txdrq; + qemu_irq rxdrq; + + uint16_t spcr[2]; + uint16_t rcr[2]; + uint16_t xcr[2]; + uint16_t srgr[2]; + uint16_t mcr[2]; + uint16_t pcr; + uint16_t rcer[8]; + uint16_t xcer[8]; + int tx_rate; + int rx_rate; + int tx_req; + int rx_req; + + I2SCodec *codec; + QEMUTimer *source_timer; + QEMUTimer *sink_timer; +}; + +static void omap_mcbsp_intr_update(struct omap_mcbsp_s *s) +{ + int irq; + + switch ((s->spcr[0] >> 4) & 3) { /* RINTM */ + case 0: + irq = (s->spcr[0] >> 1) & 1; /* RRDY */ + break; + case 3: + irq = (s->spcr[0] >> 3) & 1; /* RSYNCERR */ + break; + default: + irq = 0; + break; + } + + if (irq) + qemu_irq_pulse(s->rxirq); + + switch ((s->spcr[1] >> 4) & 3) { /* XINTM */ + case 0: + irq = (s->spcr[1] >> 1) & 1; /* XRDY */ + break; + case 3: + irq = (s->spcr[1] >> 3) & 1; /* XSYNCERR */ + break; + default: + irq = 0; + break; + } + + if (irq) + qemu_irq_pulse(s->txirq); +} + +static void omap_mcbsp_rx_newdata(struct omap_mcbsp_s *s) +{ + if ((s->spcr[0] >> 1) & 1) /* RRDY */ + s->spcr[0] |= 1 << 2; /* RFULL */ + s->spcr[0] |= 1 << 1; /* RRDY */ + qemu_irq_raise(s->rxdrq); + omap_mcbsp_intr_update(s); +} + +static void omap_mcbsp_source_tick(void *opaque) +{ + struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque; + static const int bps[8] = { 0, 1, 1, 2, 2, 2, -255, -255 }; + + if (!s->rx_rate) + return; + if (s->rx_req) + printf("%s: Rx FIFO overrun\n", __func__); + + s->rx_req = s->rx_rate << bps[(s->rcr[0] >> 5) & 7]; + + omap_mcbsp_rx_newdata(s); + timer_mod(s->source_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND); +} + +static void omap_mcbsp_rx_start(struct omap_mcbsp_s *s) +{ + if (!s->codec || !s->codec->rts) + omap_mcbsp_source_tick(s); + else if (s->codec->in.len) { + s->rx_req = s->codec->in.len; + omap_mcbsp_rx_newdata(s); + } +} + +static void omap_mcbsp_rx_stop(struct omap_mcbsp_s *s) +{ + timer_del(s->source_timer); +} + +static void omap_mcbsp_rx_done(struct omap_mcbsp_s *s) +{ + s->spcr[0] &= ~(1 << 1); /* RRDY */ + qemu_irq_lower(s->rxdrq); + omap_mcbsp_intr_update(s); +} + +static void omap_mcbsp_tx_newdata(struct omap_mcbsp_s *s) +{ + s->spcr[1] |= 1 << 1; /* XRDY */ + qemu_irq_raise(s->txdrq); + omap_mcbsp_intr_update(s); +} + +static void omap_mcbsp_sink_tick(void *opaque) +{ + struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque; + static const int bps[8] = { 0, 1, 1, 2, 2, 2, -255, -255 }; + + if (!s->tx_rate) + return; + if (s->tx_req) + printf("%s: Tx FIFO underrun\n", __func__); + + s->tx_req = s->tx_rate << bps[(s->xcr[0] >> 5) & 7]; + + omap_mcbsp_tx_newdata(s); + timer_mod(s->sink_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND); +} + +static void omap_mcbsp_tx_start(struct omap_mcbsp_s *s) +{ + if (!s->codec || !s->codec->cts) + omap_mcbsp_sink_tick(s); + else if (s->codec->out.size) { + s->tx_req = s->codec->out.size; + omap_mcbsp_tx_newdata(s); + } +} + +static void omap_mcbsp_tx_done(struct omap_mcbsp_s *s) +{ + s->spcr[1] &= ~(1 << 1); /* XRDY */ + qemu_irq_lower(s->txdrq); + omap_mcbsp_intr_update(s); + if (s->codec && s->codec->cts) + s->codec->tx_swallow(s->codec->opaque); +} + +static void omap_mcbsp_tx_stop(struct omap_mcbsp_s *s) +{ + s->tx_req = 0; + omap_mcbsp_tx_done(s); + timer_del(s->sink_timer); +} + +static void omap_mcbsp_req_update(struct omap_mcbsp_s *s) +{ + int prev_rx_rate, prev_tx_rate; + int rx_rate = 0, tx_rate = 0; + int cpu_rate = 1500000; /* XXX */ + + /* TODO: check CLKSTP bit */ + if (s->spcr[1] & (1 << 6)) { /* GRST */ + if (s->spcr[0] & (1 << 0)) { /* RRST */ + if ((s->srgr[1] & (1 << 13)) && /* CLKSM */ + (s->pcr & (1 << 8))) { /* CLKRM */ + if (~s->pcr & (1 << 7)) /* SCLKME */ + rx_rate = cpu_rate / + ((s->srgr[0] & 0xff) + 1); /* CLKGDV */ + } else + if (s->codec) + rx_rate = s->codec->rx_rate; + } + + if (s->spcr[1] & (1 << 0)) { /* XRST */ + if ((s->srgr[1] & (1 << 13)) && /* CLKSM */ + (s->pcr & (1 << 9))) { /* CLKXM */ + if (~s->pcr & (1 << 7)) /* SCLKME */ + tx_rate = cpu_rate / + ((s->srgr[0] & 0xff) + 1); /* CLKGDV */ + } else + if (s->codec) + tx_rate = s->codec->tx_rate; + } + } + prev_tx_rate = s->tx_rate; + prev_rx_rate = s->rx_rate; + s->tx_rate = tx_rate; + s->rx_rate = rx_rate; + + if (s->codec) + s->codec->set_rate(s->codec->opaque, rx_rate, tx_rate); + + if (!prev_tx_rate && tx_rate) + omap_mcbsp_tx_start(s); + else if (s->tx_rate && !tx_rate) + omap_mcbsp_tx_stop(s); + + if (!prev_rx_rate && rx_rate) + omap_mcbsp_rx_start(s); + else if (prev_tx_rate && !tx_rate) + omap_mcbsp_rx_stop(s); +} + +static uint64_t omap_mcbsp_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + uint16_t ret; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (offset) { + case 0x00: /* DRR2 */ + if (((s->rcr[0] >> 5) & 7) < 3) /* RWDLEN1 */ + return 0x0000; + /* Fall through. */ + case 0x02: /* DRR1 */ + if (s->rx_req < 2) { + printf("%s: Rx FIFO underrun\n", __func__); + omap_mcbsp_rx_done(s); + } else { + s->tx_req -= 2; + if (s->codec && s->codec->in.len >= 2) { + ret = s->codec->in.fifo[s->codec->in.start ++] << 8; + ret |= s->codec->in.fifo[s->codec->in.start ++]; + s->codec->in.len -= 2; + } else + ret = 0x0000; + if (!s->tx_req) + omap_mcbsp_rx_done(s); + return ret; + } + return 0x0000; + + case 0x04: /* DXR2 */ + case 0x06: /* DXR1 */ + return 0x0000; + + case 0x08: /* SPCR2 */ + return s->spcr[1]; + case 0x0a: /* SPCR1 */ + return s->spcr[0]; + case 0x0c: /* RCR2 */ + return s->rcr[1]; + case 0x0e: /* RCR1 */ + return s->rcr[0]; + case 0x10: /* XCR2 */ + return s->xcr[1]; + case 0x12: /* XCR1 */ + return s->xcr[0]; + case 0x14: /* SRGR2 */ + return s->srgr[1]; + case 0x16: /* SRGR1 */ + return s->srgr[0]; + case 0x18: /* MCR2 */ + return s->mcr[1]; + case 0x1a: /* MCR1 */ + return s->mcr[0]; + case 0x1c: /* RCERA */ + return s->rcer[0]; + case 0x1e: /* RCERB */ + return s->rcer[1]; + case 0x20: /* XCERA */ + return s->xcer[0]; + case 0x22: /* XCERB */ + return s->xcer[1]; + case 0x24: /* PCR0 */ + return s->pcr; + case 0x26: /* RCERC */ + return s->rcer[2]; + case 0x28: /* RCERD */ + return s->rcer[3]; + case 0x2a: /* XCERC */ + return s->xcer[2]; + case 0x2c: /* XCERD */ + return s->xcer[3]; + case 0x2e: /* RCERE */ + return s->rcer[4]; + case 0x30: /* RCERF */ + return s->rcer[5]; + case 0x32: /* XCERE */ + return s->xcer[4]; + case 0x34: /* XCERF */ + return s->xcer[5]; + case 0x36: /* RCERG */ + return s->rcer[6]; + case 0x38: /* RCERH */ + return s->rcer[7]; + case 0x3a: /* XCERG */ + return s->xcer[6]; + case 0x3c: /* XCERH */ + return s->xcer[7]; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_mcbsp_writeh(void *opaque, hwaddr addr, + uint32_t value) +{ + struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + switch (offset) { + case 0x00: /* DRR2 */ + case 0x02: /* DRR1 */ + OMAP_RO_REG(addr); + return; + + case 0x04: /* DXR2 */ + if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */ + return; + /* Fall through. */ + case 0x06: /* DXR1 */ + if (s->tx_req > 1) { + s->tx_req -= 2; + if (s->codec && s->codec->cts) { + s->codec->out.fifo[s->codec->out.len ++] = (value >> 8) & 0xff; + s->codec->out.fifo[s->codec->out.len ++] = (value >> 0) & 0xff; + } + if (s->tx_req < 2) + omap_mcbsp_tx_done(s); + } else + printf("%s: Tx FIFO overrun\n", __func__); + return; + + case 0x08: /* SPCR2 */ + s->spcr[1] &= 0x0002; + s->spcr[1] |= 0x03f9 & value; + s->spcr[1] |= 0x0004 & (value << 2); /* XEMPTY := XRST */ + if (~value & 1) /* XRST */ + s->spcr[1] &= ~6; + omap_mcbsp_req_update(s); + return; + case 0x0a: /* SPCR1 */ + s->spcr[0] &= 0x0006; + s->spcr[0] |= 0xf8f9 & value; + if (value & (1 << 15)) /* DLB */ + printf("%s: Digital Loopback mode enable attempt\n", __func__); + if (~value & 1) { /* RRST */ + s->spcr[0] &= ~6; + s->rx_req = 0; + omap_mcbsp_rx_done(s); + } + omap_mcbsp_req_update(s); + return; + + case 0x0c: /* RCR2 */ + s->rcr[1] = value & 0xffff; + return; + case 0x0e: /* RCR1 */ + s->rcr[0] = value & 0x7fe0; + return; + case 0x10: /* XCR2 */ + s->xcr[1] = value & 0xffff; + return; + case 0x12: /* XCR1 */ + s->xcr[0] = value & 0x7fe0; + return; + case 0x14: /* SRGR2 */ + s->srgr[1] = value & 0xffff; + omap_mcbsp_req_update(s); + return; + case 0x16: /* SRGR1 */ + s->srgr[0] = value & 0xffff; + omap_mcbsp_req_update(s); + return; + case 0x18: /* MCR2 */ + s->mcr[1] = value & 0x03e3; + if (value & 3) /* XMCM */ + printf("%s: Tx channel selection mode enable attempt\n", __func__); + return; + case 0x1a: /* MCR1 */ + s->mcr[0] = value & 0x03e1; + if (value & 1) /* RMCM */ + printf("%s: Rx channel selection mode enable attempt\n", __func__); + return; + case 0x1c: /* RCERA */ + s->rcer[0] = value & 0xffff; + return; + case 0x1e: /* RCERB */ + s->rcer[1] = value & 0xffff; + return; + case 0x20: /* XCERA */ + s->xcer[0] = value & 0xffff; + return; + case 0x22: /* XCERB */ + s->xcer[1] = value & 0xffff; + return; + case 0x24: /* PCR0 */ + s->pcr = value & 0x7faf; + return; + case 0x26: /* RCERC */ + s->rcer[2] = value & 0xffff; + return; + case 0x28: /* RCERD */ + s->rcer[3] = value & 0xffff; + return; + case 0x2a: /* XCERC */ + s->xcer[2] = value & 0xffff; + return; + case 0x2c: /* XCERD */ + s->xcer[3] = value & 0xffff; + return; + case 0x2e: /* RCERE */ + s->rcer[4] = value & 0xffff; + return; + case 0x30: /* RCERF */ + s->rcer[5] = value & 0xffff; + return; + case 0x32: /* XCERE */ + s->xcer[4] = value & 0xffff; + return; + case 0x34: /* XCERF */ + s->xcer[5] = value & 0xffff; + return; + case 0x36: /* RCERG */ + s->rcer[6] = value & 0xffff; + return; + case 0x38: /* RCERH */ + s->rcer[7] = value & 0xffff; + return; + case 0x3a: /* XCERG */ + s->xcer[6] = value & 0xffff; + return; + case 0x3c: /* XCERH */ + s->xcer[7] = value & 0xffff; + return; + } + + OMAP_BAD_REG(addr); +} + +static void omap_mcbsp_writew(void *opaque, hwaddr addr, + uint32_t value) +{ + struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (offset == 0x04) { /* DXR */ + if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */ + return; + if (s->tx_req > 3) { + s->tx_req -= 4; + if (s->codec && s->codec->cts) { + s->codec->out.fifo[s->codec->out.len ++] = + (value >> 24) & 0xff; + s->codec->out.fifo[s->codec->out.len ++] = + (value >> 16) & 0xff; + s->codec->out.fifo[s->codec->out.len ++] = + (value >> 8) & 0xff; + s->codec->out.fifo[s->codec->out.len ++] = + (value >> 0) & 0xff; + } + if (s->tx_req < 4) + omap_mcbsp_tx_done(s); + } else + printf("%s: Tx FIFO overrun\n", __func__); + return; + } + + omap_badwidth_write16(opaque, addr, value); +} + +static void omap_mcbsp_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + switch (size) { + case 2: + omap_mcbsp_writeh(opaque, addr, value); + break; + case 4: + omap_mcbsp_writew(opaque, addr, value); + break; + default: + omap_badwidth_write16(opaque, addr, value); + } +} + +static const MemoryRegionOps omap_mcbsp_ops = { + .read = omap_mcbsp_read, + .write = omap_mcbsp_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_mcbsp_reset(struct omap_mcbsp_s *s) +{ + memset(&s->spcr, 0, sizeof(s->spcr)); + memset(&s->rcr, 0, sizeof(s->rcr)); + memset(&s->xcr, 0, sizeof(s->xcr)); + s->srgr[0] = 0x0001; + s->srgr[1] = 0x2000; + memset(&s->mcr, 0, sizeof(s->mcr)); + memset(&s->pcr, 0, sizeof(s->pcr)); + memset(&s->rcer, 0, sizeof(s->rcer)); + memset(&s->xcer, 0, sizeof(s->xcer)); + s->tx_req = 0; + s->rx_req = 0; + s->tx_rate = 0; + s->rx_rate = 0; + timer_del(s->source_timer); + timer_del(s->sink_timer); +} + +static struct omap_mcbsp_s *omap_mcbsp_init(MemoryRegion *system_memory, + hwaddr base, + qemu_irq txirq, qemu_irq rxirq, + qemu_irq *dma, omap_clk clk) +{ + struct omap_mcbsp_s *s = g_new0(struct omap_mcbsp_s, 1); + + s->txirq = txirq; + s->rxirq = rxirq; + s->txdrq = dma[0]; + s->rxdrq = dma[1]; + s->sink_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_mcbsp_sink_tick, s); + s->source_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_mcbsp_source_tick, s); + omap_mcbsp_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_mcbsp_ops, s, "omap-mcbsp", 0x800); + memory_region_add_subregion(system_memory, base, &s->iomem); + + return s; +} + +static void omap_mcbsp_i2s_swallow(void *opaque, int line, int level) +{ + struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque; + + if (s->rx_rate) { + s->rx_req = s->codec->in.len; + omap_mcbsp_rx_newdata(s); + } +} + +static void omap_mcbsp_i2s_start(void *opaque, int line, int level) +{ + struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque; + + if (s->tx_rate) { + s->tx_req = s->codec->out.size; + omap_mcbsp_tx_newdata(s); + } +} + +void omap_mcbsp_i2s_attach(struct omap_mcbsp_s *s, I2SCodec *slave) +{ + s->codec = slave; + slave->rx_swallow = qemu_allocate_irq(omap_mcbsp_i2s_swallow, s, 0); + slave->tx_start = qemu_allocate_irq(omap_mcbsp_i2s_start, s, 0); +} + +/* LED Pulse Generators */ +struct omap_lpg_s { + MemoryRegion iomem; + QEMUTimer *tm; + + uint8_t control; + uint8_t power; + int64_t on; + int64_t period; + int clk; + int cycle; +}; + +static void omap_lpg_tick(void *opaque) +{ + struct omap_lpg_s *s = opaque; + + if (s->cycle) + timer_mod(s->tm, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + s->period - s->on); + else + timer_mod(s->tm, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + s->on); + + s->cycle = !s->cycle; + printf("%s: LED is %s\n", __func__, s->cycle ? "on" : "off"); +} + +static void omap_lpg_update(struct omap_lpg_s *s) +{ + int64_t on, period = 1, ticks = 1000; + static const int per[8] = { 1, 2, 4, 8, 12, 16, 20, 24 }; + + if (~s->control & (1 << 6)) /* LPGRES */ + on = 0; + else if (s->control & (1 << 7)) /* PERM_ON */ + on = period; + else { + period = muldiv64(ticks, per[s->control & 7], /* PERCTRL */ + 256 / 32); + on = (s->clk && s->power) ? muldiv64(ticks, + per[(s->control >> 3) & 7], 256) : 0; /* ONCTRL */ + } + + timer_del(s->tm); + if (on == period && s->on < s->period) + printf("%s: LED is on\n", __func__); + else if (on == 0 && s->on) + printf("%s: LED is off\n", __func__); + else if (on && (on != s->on || period != s->period)) { + s->cycle = 0; + s->on = on; + s->period = period; + omap_lpg_tick(s); + return; + } + + s->on = on; + s->period = period; +} + +static void omap_lpg_reset(struct omap_lpg_s *s) +{ + s->control = 0x00; + s->power = 0x00; + s->clk = 1; + omap_lpg_update(s); +} + +static uint64_t omap_lpg_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_lpg_s *s = (struct omap_lpg_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 1) { + return omap_badwidth_read8(opaque, addr); + } + + switch (offset) { + case 0x00: /* LCR */ + return s->control; + + case 0x04: /* PMR */ + return s->power; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_lpg_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_lpg_s *s = (struct omap_lpg_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 1) { + omap_badwidth_write8(opaque, addr, value); + return; + } + + switch (offset) { + case 0x00: /* LCR */ + if (~value & (1 << 6)) /* LPGRES */ + omap_lpg_reset(s); + s->control = value & 0xff; + omap_lpg_update(s); + return; + + case 0x04: /* PMR */ + s->power = value & 0x01; + omap_lpg_update(s); + return; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_lpg_ops = { + .read = omap_lpg_read, + .write = omap_lpg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_lpg_clk_update(void *opaque, int line, int on) +{ + struct omap_lpg_s *s = (struct omap_lpg_s *) opaque; + + s->clk = on; + omap_lpg_update(s); +} + +static struct omap_lpg_s *omap_lpg_init(MemoryRegion *system_memory, + hwaddr base, omap_clk clk) +{ + struct omap_lpg_s *s = g_new0(struct omap_lpg_s, 1); + + s->tm = timer_new_ms(QEMU_CLOCK_VIRTUAL, omap_lpg_tick, s); + + omap_lpg_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_lpg_ops, s, "omap-lpg", 0x800); + memory_region_add_subregion(system_memory, base, &s->iomem); + + omap_clk_adduser(clk, qemu_allocate_irq(omap_lpg_clk_update, s, 0)); + + return s; +} + +/* MPUI Peripheral Bridge configuration */ +static uint64_t omap_mpui_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + if (addr == OMAP_MPUI_BASE) /* CMR */ + return 0xfe4d; + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_mpui_io_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + /* FIXME: infinite loop */ + omap_badwidth_write16(opaque, addr, value); +} + +static const MemoryRegionOps omap_mpui_io_ops = { + .read = omap_mpui_io_read, + .write = omap_mpui_io_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_setup_mpui_io(MemoryRegion *system_memory, + struct omap_mpu_state_s *mpu) +{ + memory_region_init_io(&mpu->mpui_io_iomem, NULL, &omap_mpui_io_ops, mpu, + "omap-mpui-io", 0x7fff); + memory_region_add_subregion(system_memory, OMAP_MPUI_BASE, + &mpu->mpui_io_iomem); +} + +/* General chip reset */ +static void omap1_mpu_reset(void *opaque) +{ + struct omap_mpu_state_s *mpu = (struct omap_mpu_state_s *) opaque; + + omap_dma_reset(mpu->dma); + omap_mpu_timer_reset(mpu->timer[0]); + omap_mpu_timer_reset(mpu->timer[1]); + omap_mpu_timer_reset(mpu->timer[2]); + omap_wd_timer_reset(mpu->wdt); + omap_os_timer_reset(mpu->os_timer); + omap_lcdc_reset(mpu->lcd); + omap_ulpd_pm_reset(mpu); + omap_pin_cfg_reset(mpu); + omap_mpui_reset(mpu); + omap_tipb_bridge_reset(mpu->private_tipb); + omap_tipb_bridge_reset(mpu->public_tipb); + omap_dpll_reset(mpu->dpll[0]); + omap_dpll_reset(mpu->dpll[1]); + omap_dpll_reset(mpu->dpll[2]); + omap_uart_reset(mpu->uart[0]); + omap_uart_reset(mpu->uart[1]); + omap_uart_reset(mpu->uart[2]); + omap_mmc_reset(mpu->mmc); + omap_mpuio_reset(mpu->mpuio); + omap_uwire_reset(mpu->microwire); + omap_pwl_reset(mpu->pwl); + omap_pwt_reset(mpu->pwt); + omap_rtc_reset(mpu->rtc); + omap_mcbsp_reset(mpu->mcbsp1); + omap_mcbsp_reset(mpu->mcbsp2); + omap_mcbsp_reset(mpu->mcbsp3); + omap_lpg_reset(mpu->led[0]); + omap_lpg_reset(mpu->led[1]); + omap_clkm_reset(mpu); + cpu_reset(CPU(mpu->cpu)); +} + +static const struct omap_map_s { + hwaddr phys_dsp; + hwaddr phys_mpu; + uint32_t size; + const char *name; +} omap15xx_dsp_mm[] = { + /* Strobe 0 */ + { 0xe1010000, 0xfffb0000, 0x800, "UART1 BT" }, /* CS0 */ + { 0xe1010800, 0xfffb0800, 0x800, "UART2 COM" }, /* CS1 */ + { 0xe1011800, 0xfffb1800, 0x800, "McBSP1 audio" }, /* CS3 */ + { 0xe1012000, 0xfffb2000, 0x800, "MCSI2 communication" }, /* CS4 */ + { 0xe1012800, 0xfffb2800, 0x800, "MCSI1 BT u-Law" }, /* CS5 */ + { 0xe1013000, 0xfffb3000, 0x800, "uWire" }, /* CS6 */ + { 0xe1013800, 0xfffb3800, 0x800, "I^2C" }, /* CS7 */ + { 0xe1014000, 0xfffb4000, 0x800, "USB W2FC" }, /* CS8 */ + { 0xe1014800, 0xfffb4800, 0x800, "RTC" }, /* CS9 */ + { 0xe1015000, 0xfffb5000, 0x800, "MPUIO" }, /* CS10 */ + { 0xe1015800, 0xfffb5800, 0x800, "PWL" }, /* CS11 */ + { 0xe1016000, 0xfffb6000, 0x800, "PWT" }, /* CS12 */ + { 0xe1017000, 0xfffb7000, 0x800, "McBSP3" }, /* CS14 */ + { 0xe1017800, 0xfffb7800, 0x800, "MMC" }, /* CS15 */ + { 0xe1019000, 0xfffb9000, 0x800, "32-kHz timer" }, /* CS18 */ + { 0xe1019800, 0xfffb9800, 0x800, "UART3" }, /* CS19 */ + { 0xe101c800, 0xfffbc800, 0x800, "TIPB switches" }, /* CS25 */ + /* Strobe 1 */ + { 0xe101e000, 0xfffce000, 0x800, "GPIOs" }, /* CS28 */ + + { 0 } +}; + +static void omap_setup_dsp_mapping(MemoryRegion *system_memory, + const struct omap_map_s *map) +{ + MemoryRegion *io; + + for (; map->phys_dsp; map ++) { + io = g_new(MemoryRegion, 1); + memory_region_init_alias(io, NULL, map->name, + system_memory, map->phys_mpu, map->size); + memory_region_add_subregion(system_memory, map->phys_dsp, io); + } +} + +void omap_mpu_wakeup(void *opaque, int irq, int req) +{ + struct omap_mpu_state_s *mpu = (struct omap_mpu_state_s *) opaque; + CPUState *cpu = CPU(mpu->cpu); + + if (cpu->halted) { + cpu_interrupt(cpu, CPU_INTERRUPT_EXITTB); + } +} + +static const struct dma_irq_map omap1_dma_irq_map[] = { + { 0, OMAP_INT_DMA_CH0_6 }, + { 0, OMAP_INT_DMA_CH1_7 }, + { 0, OMAP_INT_DMA_CH2_8 }, + { 0, OMAP_INT_DMA_CH3 }, + { 0, OMAP_INT_DMA_CH4 }, + { 0, OMAP_INT_DMA_CH5 }, + { 1, OMAP_INT_1610_DMA_CH6 }, + { 1, OMAP_INT_1610_DMA_CH7 }, + { 1, OMAP_INT_1610_DMA_CH8 }, + { 1, OMAP_INT_1610_DMA_CH9 }, + { 1, OMAP_INT_1610_DMA_CH10 }, + { 1, OMAP_INT_1610_DMA_CH11 }, + { 1, OMAP_INT_1610_DMA_CH12 }, + { 1, OMAP_INT_1610_DMA_CH13 }, + { 1, OMAP_INT_1610_DMA_CH14 }, + { 1, OMAP_INT_1610_DMA_CH15 } +}; + +/* DMA ports for OMAP1 */ +static int omap_validate_emiff_addr(struct omap_mpu_state_s *s, + hwaddr addr) +{ + return range_covers_byte(OMAP_EMIFF_BASE, s->sdram_size, addr); +} + +static int omap_validate_emifs_addr(struct omap_mpu_state_s *s, + hwaddr addr) +{ + return range_covers_byte(OMAP_EMIFS_BASE, OMAP_EMIFF_BASE - OMAP_EMIFS_BASE, + addr); +} + +static int omap_validate_imif_addr(struct omap_mpu_state_s *s, + hwaddr addr) +{ + return range_covers_byte(OMAP_IMIF_BASE, s->sram_size, addr); +} + +static int omap_validate_tipb_addr(struct omap_mpu_state_s *s, + hwaddr addr) +{ + return range_covers_byte(0xfffb0000, 0xffff0000 - 0xfffb0000, addr); +} + +static int omap_validate_local_addr(struct omap_mpu_state_s *s, + hwaddr addr) +{ + return range_covers_byte(OMAP_LOCALBUS_BASE, 0x1000000, addr); +} + +static int omap_validate_tipb_mpui_addr(struct omap_mpu_state_s *s, + hwaddr addr) +{ + return range_covers_byte(0xe1010000, 0xe1020004 - 0xe1010000, addr); +} + +struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *dram, + const char *cpu_type) +{ + int i; + struct omap_mpu_state_s *s = g_new0(struct omap_mpu_state_s, 1); + qemu_irq dma_irqs[6]; + DriveInfo *dinfo; + SysBusDevice *busdev; + MemoryRegion *system_memory = get_system_memory(); + + /* Core */ + s->mpu_model = omap310; + s->cpu = ARM_CPU(cpu_create(cpu_type)); + s->sdram_size = memory_region_size(dram); + s->sram_size = OMAP15XX_SRAM_SIZE; + + s->wakeup = qemu_allocate_irq(omap_mpu_wakeup, s, 0); + + /* Clocks */ + omap_clk_init(s); + + /* Memory-mapped stuff */ + memory_region_init_ram(&s->imif_ram, NULL, "omap1.sram", s->sram_size, + &error_fatal); + memory_region_add_subregion(system_memory, OMAP_IMIF_BASE, &s->imif_ram); + + omap_clkm_init(system_memory, 0xfffece00, 0xe1008000, s); + + s->ih[0] = qdev_new("omap-intc"); + qdev_prop_set_uint32(s->ih[0], "size", 0x100); + omap_intc_set_iclk(OMAP_INTC(s->ih[0]), omap_findclk(s, "arminth_ck")); + busdev = SYS_BUS_DEVICE(s->ih[0]); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ)); + sysbus_connect_irq(busdev, 1, + qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ)); + sysbus_mmio_map(busdev, 0, 0xfffecb00); + s->ih[1] = qdev_new("omap-intc"); + qdev_prop_set_uint32(s->ih[1], "size", 0x800); + omap_intc_set_iclk(OMAP_INTC(s->ih[1]), omap_findclk(s, "arminth_ck")); + busdev = SYS_BUS_DEVICE(s->ih[1]); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(s->ih[0], OMAP_INT_15XX_IH2_IRQ)); + /* The second interrupt controller's FIQ output is not wired up */ + sysbus_mmio_map(busdev, 0, 0xfffe0000); + + for (i = 0; i < 6; i++) { + dma_irqs[i] = qdev_get_gpio_in(s->ih[omap1_dma_irq_map[i].ih], + omap1_dma_irq_map[i].intr); + } + s->dma = omap_dma_init(0xfffed800, dma_irqs, system_memory, + qdev_get_gpio_in(s->ih[0], OMAP_INT_DMA_LCD), + s, omap_findclk(s, "dma_ck"), omap_dma_3_1); + + s->port[emiff ].addr_valid = omap_validate_emiff_addr; + s->port[emifs ].addr_valid = omap_validate_emifs_addr; + s->port[imif ].addr_valid = omap_validate_imif_addr; + s->port[tipb ].addr_valid = omap_validate_tipb_addr; + s->port[local ].addr_valid = omap_validate_local_addr; + s->port[tipb_mpui].addr_valid = omap_validate_tipb_mpui_addr; + + /* Register SDRAM and SRAM DMA ports for fast transfers. */ + soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(dram), + OMAP_EMIFF_BASE, s->sdram_size); + soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(&s->imif_ram), + OMAP_IMIF_BASE, s->sram_size); + + s->timer[0] = omap_mpu_timer_init(system_memory, 0xfffec500, + qdev_get_gpio_in(s->ih[0], OMAP_INT_TIMER1), + omap_findclk(s, "mputim_ck")); + s->timer[1] = omap_mpu_timer_init(system_memory, 0xfffec600, + qdev_get_gpio_in(s->ih[0], OMAP_INT_TIMER2), + omap_findclk(s, "mputim_ck")); + s->timer[2] = omap_mpu_timer_init(system_memory, 0xfffec700, + qdev_get_gpio_in(s->ih[0], OMAP_INT_TIMER3), + omap_findclk(s, "mputim_ck")); + + s->wdt = omap_wd_timer_init(system_memory, 0xfffec800, + qdev_get_gpio_in(s->ih[0], OMAP_INT_WD_TIMER), + omap_findclk(s, "armwdt_ck")); + + s->os_timer = omap_os_timer_init(system_memory, 0xfffb9000, + qdev_get_gpio_in(s->ih[1], OMAP_INT_OS_TIMER), + omap_findclk(s, "clk32-kHz")); + + s->lcd = omap_lcdc_init(system_memory, 0xfffec000, + qdev_get_gpio_in(s->ih[0], OMAP_INT_LCD_CTRL), + omap_dma_get_lcdch(s->dma), + omap_findclk(s, "lcd_ck")); + + omap_ulpd_pm_init(system_memory, 0xfffe0800, s); + omap_pin_cfg_init(system_memory, 0xfffe1000, s); + omap_id_init(system_memory, s); + + omap_mpui_init(system_memory, 0xfffec900, s); + + s->private_tipb = omap_tipb_bridge_init(system_memory, 0xfffeca00, + qdev_get_gpio_in(s->ih[0], OMAP_INT_BRIDGE_PRIV), + omap_findclk(s, "tipb_ck")); + s->public_tipb = omap_tipb_bridge_init(system_memory, 0xfffed300, + qdev_get_gpio_in(s->ih[0], OMAP_INT_BRIDGE_PUB), + omap_findclk(s, "tipb_ck")); + + omap_tcmi_init(system_memory, 0xfffecc00, s); + + s->uart[0] = omap_uart_init(0xfffb0000, + qdev_get_gpio_in(s->ih[1], OMAP_INT_UART1), + omap_findclk(s, "uart1_ck"), + omap_findclk(s, "uart1_ck"), + s->drq[OMAP_DMA_UART1_TX], s->drq[OMAP_DMA_UART1_RX], + "uart1", + serial_hd(0)); + s->uart[1] = omap_uart_init(0xfffb0800, + qdev_get_gpio_in(s->ih[1], OMAP_INT_UART2), + omap_findclk(s, "uart2_ck"), + omap_findclk(s, "uart2_ck"), + s->drq[OMAP_DMA_UART2_TX], s->drq[OMAP_DMA_UART2_RX], + "uart2", + serial_hd(0) ? serial_hd(1) : NULL); + s->uart[2] = omap_uart_init(0xfffb9800, + qdev_get_gpio_in(s->ih[0], OMAP_INT_UART3), + omap_findclk(s, "uart3_ck"), + omap_findclk(s, "uart3_ck"), + s->drq[OMAP_DMA_UART3_TX], s->drq[OMAP_DMA_UART3_RX], + "uart3", + serial_hd(0) && serial_hd(1) ? serial_hd(2) : NULL); + + s->dpll[0] = omap_dpll_init(system_memory, 0xfffecf00, + omap_findclk(s, "dpll1")); + s->dpll[1] = omap_dpll_init(system_memory, 0xfffed000, + omap_findclk(s, "dpll2")); + s->dpll[2] = omap_dpll_init(system_memory, 0xfffed100, + omap_findclk(s, "dpll3")); + + dinfo = drive_get(IF_SD, 0, 0); + if (!dinfo && !qtest_enabled()) { + warn_report("missing SecureDigital device"); + } + s->mmc = omap_mmc_init(0xfffb7800, system_memory, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + qdev_get_gpio_in(s->ih[1], OMAP_INT_OQN), + &s->drq[OMAP_DMA_MMC_TX], + omap_findclk(s, "mmc_ck")); + + s->mpuio = omap_mpuio_init(system_memory, 0xfffb5000, + qdev_get_gpio_in(s->ih[1], OMAP_INT_KEYBOARD), + qdev_get_gpio_in(s->ih[1], OMAP_INT_MPUIO), + s->wakeup, omap_findclk(s, "clk32-kHz")); + + s->gpio = qdev_new("omap-gpio"); + qdev_prop_set_int32(s->gpio, "mpu_model", s->mpu_model); + omap_gpio_set_clk(OMAP1_GPIO(s->gpio), omap_findclk(s, "arm_gpio_ck")); + sysbus_realize_and_unref(SYS_BUS_DEVICE(s->gpio), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(s->gpio), 0, + qdev_get_gpio_in(s->ih[0], OMAP_INT_GPIO_BANK1)); + sysbus_mmio_map(SYS_BUS_DEVICE(s->gpio), 0, 0xfffce000); + + s->microwire = omap_uwire_init(system_memory, 0xfffb3000, + qdev_get_gpio_in(s->ih[1], OMAP_INT_uWireTX), + qdev_get_gpio_in(s->ih[1], OMAP_INT_uWireRX), + s->drq[OMAP_DMA_UWIRE_TX], omap_findclk(s, "mpuper_ck")); + + s->pwl = omap_pwl_init(system_memory, 0xfffb5800, + omap_findclk(s, "armxor_ck")); + s->pwt = omap_pwt_init(system_memory, 0xfffb6000, + omap_findclk(s, "armxor_ck")); + + s->i2c[0] = qdev_new("omap_i2c"); + qdev_prop_set_uint8(s->i2c[0], "revision", 0x11); + omap_i2c_set_fclk(OMAP_I2C(s->i2c[0]), omap_findclk(s, "mpuper_ck")); + busdev = SYS_BUS_DEVICE(s->i2c[0]); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(s->ih[1], OMAP_INT_I2C)); + sysbus_connect_irq(busdev, 1, s->drq[OMAP_DMA_I2C_TX]); + sysbus_connect_irq(busdev, 2, s->drq[OMAP_DMA_I2C_RX]); + sysbus_mmio_map(busdev, 0, 0xfffb3800); + + s->rtc = omap_rtc_init(system_memory, 0xfffb4800, + qdev_get_gpio_in(s->ih[1], OMAP_INT_RTC_TIMER), + qdev_get_gpio_in(s->ih[1], OMAP_INT_RTC_ALARM), + omap_findclk(s, "clk32-kHz")); + + s->mcbsp1 = omap_mcbsp_init(system_memory, 0xfffb1800, + qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP1TX), + qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP1RX), + &s->drq[OMAP_DMA_MCBSP1_TX], omap_findclk(s, "dspxor_ck")); + s->mcbsp2 = omap_mcbsp_init(system_memory, 0xfffb1000, + qdev_get_gpio_in(s->ih[0], + OMAP_INT_310_McBSP2_TX), + qdev_get_gpio_in(s->ih[0], + OMAP_INT_310_McBSP2_RX), + &s->drq[OMAP_DMA_MCBSP2_TX], omap_findclk(s, "mpuper_ck")); + s->mcbsp3 = omap_mcbsp_init(system_memory, 0xfffb7000, + qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP3TX), + qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP3RX), + &s->drq[OMAP_DMA_MCBSP3_TX], omap_findclk(s, "dspxor_ck")); + + s->led[0] = omap_lpg_init(system_memory, + 0xfffbd000, omap_findclk(s, "clk32-kHz")); + s->led[1] = omap_lpg_init(system_memory, + 0xfffbd800, omap_findclk(s, "clk32-kHz")); + + /* Register mappings not currenlty implemented: + * MCSI2 Comm fffb2000 - fffb27ff (not mapped on OMAP310) + * MCSI1 Bluetooth fffb2800 - fffb2fff (not mapped on OMAP310) + * USB W2FC fffb4000 - fffb47ff + * Camera Interface fffb6800 - fffb6fff + * USB Host fffba000 - fffba7ff + * FAC fffba800 - fffbafff + * HDQ/1-Wire fffbc000 - fffbc7ff + * TIPB switches fffbc800 - fffbcfff + * Mailbox fffcf000 - fffcf7ff + * Local bus IF fffec100 - fffec1ff + * Local bus MMU fffec200 - fffec2ff + * DSP MMU fffed200 - fffed2ff + */ + + omap_setup_dsp_mapping(system_memory, omap15xx_dsp_mm); + omap_setup_mpui_io(system_memory, s); + + qemu_register_reset(omap1_mpu_reset, s); + + return s; +} diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c new file mode 100644 index 000000000..02b1aa8c9 --- /dev/null +++ b/hw/arm/omap2.c @@ -0,0 +1,2714 @@ +/* + * TI OMAP processors emulation. + * + * Copyright (C) 2007-2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "cpu.h" +#include "exec/address-spaces.h" +#include "sysemu/blockdev.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/arm/boot.h" +#include "hw/arm/omap.h" +#include "sysemu/sysemu.h" +#include "qemu/timer.h" +#include "chardev/char-fe.h" +#include "hw/block/flash.h" +#include "hw/arm/soc_dma.h" +#include "hw/sysbus.h" +#include "audio/audio.h" + +/* Enhanced Audio Controller (CODEC only) */ +struct omap_eac_s { + qemu_irq irq; + MemoryRegion iomem; + + uint16_t sysconfig; + uint8_t config[4]; + uint8_t control; + uint8_t address; + uint16_t data; + uint8_t vtol; + uint8_t vtsl; + uint16_t mixer; + uint16_t gain[4]; + uint8_t att; + uint16_t max[7]; + + struct { + qemu_irq txdrq; + qemu_irq rxdrq; + uint32_t (*txrx)(void *opaque, uint32_t, int); + void *opaque; + +#define EAC_BUF_LEN 1024 + uint32_t rxbuf[EAC_BUF_LEN]; + int rxoff; + int rxlen; + int rxavail; + uint32_t txbuf[EAC_BUF_LEN]; + int txlen; + int txavail; + + int enable; + int rate; + + uint16_t config[4]; + + /* These need to be moved to the actual codec */ + QEMUSoundCard card; + SWVoiceIn *in_voice; + SWVoiceOut *out_voice; + int hw_enable; + } codec; + + struct { + uint8_t control; + uint16_t config; + } modem, bt; +}; + +static inline void omap_eac_interrupt_update(struct omap_eac_s *s) +{ + qemu_set_irq(s->irq, (s->codec.config[1] >> 14) & 1); /* AURDI */ +} + +static inline void omap_eac_in_dmarequest_update(struct omap_eac_s *s) +{ + qemu_set_irq(s->codec.rxdrq, (s->codec.rxavail || s->codec.rxlen) && + ((s->codec.config[1] >> 12) & 1)); /* DMAREN */ +} + +static inline void omap_eac_out_dmarequest_update(struct omap_eac_s *s) +{ + qemu_set_irq(s->codec.txdrq, s->codec.txlen < s->codec.txavail && + ((s->codec.config[1] >> 11) & 1)); /* DMAWEN */ +} + +static inline void omap_eac_in_refill(struct omap_eac_s *s) +{ + int left = MIN(EAC_BUF_LEN - s->codec.rxlen, s->codec.rxavail) << 2; + int start = ((s->codec.rxoff + s->codec.rxlen) & (EAC_BUF_LEN - 1)) << 2; + int leftwrap = MIN(left, (EAC_BUF_LEN << 2) - start); + int recv = 1; + uint8_t *buf = (uint8_t *) s->codec.rxbuf + start; + + left -= leftwrap; + start = 0; + while (leftwrap && (recv = AUD_read(s->codec.in_voice, buf + start, + leftwrap)) > 0) { /* Be defensive */ + start += recv; + leftwrap -= recv; + } + if (recv <= 0) + s->codec.rxavail = 0; + else + s->codec.rxavail -= start >> 2; + s->codec.rxlen += start >> 2; + + if (recv > 0 && left > 0) { + start = 0; + while (left && (recv = AUD_read(s->codec.in_voice, + (uint8_t *) s->codec.rxbuf + start, + left)) > 0) { /* Be defensive */ + start += recv; + left -= recv; + } + if (recv <= 0) + s->codec.rxavail = 0; + else + s->codec.rxavail -= start >> 2; + s->codec.rxlen += start >> 2; + } +} + +static inline void omap_eac_out_empty(struct omap_eac_s *s) +{ + int left = s->codec.txlen << 2; + int start = 0; + int sent = 1; + + while (left && (sent = AUD_write(s->codec.out_voice, + (uint8_t *) s->codec.txbuf + start, + left)) > 0) { /* Be defensive */ + start += sent; + left -= sent; + } + + if (!sent) { + s->codec.txavail = 0; + omap_eac_out_dmarequest_update(s); + } + + if (start) + s->codec.txlen = 0; +} + +static void omap_eac_in_cb(void *opaque, int avail_b) +{ + struct omap_eac_s *s = (struct omap_eac_s *) opaque; + + s->codec.rxavail = avail_b >> 2; + omap_eac_in_refill(s); + /* TODO: possibly discard current buffer if overrun */ + omap_eac_in_dmarequest_update(s); +} + +static void omap_eac_out_cb(void *opaque, int free_b) +{ + struct omap_eac_s *s = (struct omap_eac_s *) opaque; + + s->codec.txavail = free_b >> 2; + if (s->codec.txlen) + omap_eac_out_empty(s); + else + omap_eac_out_dmarequest_update(s); +} + +static void omap_eac_enable_update(struct omap_eac_s *s) +{ + s->codec.enable = !(s->codec.config[1] & 1) && /* EACPWD */ + (s->codec.config[1] & 2) && /* AUDEN */ + s->codec.hw_enable; +} + +static const int omap_eac_fsint[4] = { + 8000, + 11025, + 22050, + 44100, +}; + +static const int omap_eac_fsint2[8] = { + 8000, + 11025, + 22050, + 44100, + 48000, + 0, 0, 0, +}; + +static const int omap_eac_fsint3[16] = { + 8000, + 11025, + 16000, + 22050, + 24000, + 32000, + 44100, + 48000, + 0, 0, 0, 0, 0, 0, 0, 0, +}; + +static void omap_eac_rate_update(struct omap_eac_s *s) +{ + int fsint[3]; + + fsint[2] = (s->codec.config[3] >> 9) & 0xf; + fsint[1] = (s->codec.config[2] >> 0) & 0x7; + fsint[0] = (s->codec.config[0] >> 6) & 0x3; + if (fsint[2] < 0xf) + s->codec.rate = omap_eac_fsint3[fsint[2]]; + else if (fsint[1] < 0x7) + s->codec.rate = omap_eac_fsint2[fsint[1]]; + else + s->codec.rate = omap_eac_fsint[fsint[0]]; +} + +static void omap_eac_volume_update(struct omap_eac_s *s) +{ + /* TODO */ +} + +static void omap_eac_format_update(struct omap_eac_s *s) +{ + struct audsettings fmt; + + /* The hardware buffers at most one sample */ + if (s->codec.rxlen) + s->codec.rxlen = 1; + + if (s->codec.in_voice) { + AUD_set_active_in(s->codec.in_voice, 0); + AUD_close_in(&s->codec.card, s->codec.in_voice); + s->codec.in_voice = NULL; + } + if (s->codec.out_voice) { + omap_eac_out_empty(s); + AUD_set_active_out(s->codec.out_voice, 0); + AUD_close_out(&s->codec.card, s->codec.out_voice); + s->codec.out_voice = NULL; + s->codec.txavail = 0; + } + /* Discard what couldn't be written */ + s->codec.txlen = 0; + + omap_eac_enable_update(s); + if (!s->codec.enable) + return; + + omap_eac_rate_update(s); + fmt.endianness = ((s->codec.config[0] >> 8) & 1); /* LI_BI */ + fmt.nchannels = ((s->codec.config[0] >> 10) & 1) ? 2 : 1; /* MN_ST */ + fmt.freq = s->codec.rate; + /* TODO: signedness possibly depends on the CODEC hardware - or + * does I2S specify it? */ + /* All register writes are 16 bits so we we store 16-bit samples + * in the buffers regardless of AGCFR[B8_16] value. */ + fmt.fmt = AUDIO_FORMAT_U16; + + s->codec.in_voice = AUD_open_in(&s->codec.card, s->codec.in_voice, + "eac.codec.in", s, omap_eac_in_cb, &fmt); + s->codec.out_voice = AUD_open_out(&s->codec.card, s->codec.out_voice, + "eac.codec.out", s, omap_eac_out_cb, &fmt); + + omap_eac_volume_update(s); + + AUD_set_active_in(s->codec.in_voice, 1); + AUD_set_active_out(s->codec.out_voice, 1); +} + +static void omap_eac_reset(struct omap_eac_s *s) +{ + s->sysconfig = 0; + s->config[0] = 0x0c; + s->config[1] = 0x09; + s->config[2] = 0xab; + s->config[3] = 0x03; + s->control = 0x00; + s->address = 0x00; + s->data = 0x0000; + s->vtol = 0x00; + s->vtsl = 0x00; + s->mixer = 0x0000; + s->gain[0] = 0xe7e7; + s->gain[1] = 0x6767; + s->gain[2] = 0x6767; + s->gain[3] = 0x6767; + s->att = 0xce; + s->max[0] = 0; + s->max[1] = 0; + s->max[2] = 0; + s->max[3] = 0; + s->max[4] = 0; + s->max[5] = 0; + s->max[6] = 0; + + s->modem.control = 0x00; + s->modem.config = 0x0000; + s->bt.control = 0x00; + s->bt.config = 0x0000; + s->codec.config[0] = 0x0649; + s->codec.config[1] = 0x0000; + s->codec.config[2] = 0x0007; + s->codec.config[3] = 0x1ffc; + s->codec.rxoff = 0; + s->codec.rxlen = 0; + s->codec.txlen = 0; + s->codec.rxavail = 0; + s->codec.txavail = 0; + + omap_eac_format_update(s); + omap_eac_interrupt_update(s); +} + +static uint64_t omap_eac_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_eac_s *s = (struct omap_eac_s *) opaque; + uint32_t ret; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (addr) { + case 0x000: /* CPCFR1 */ + return s->config[0]; + case 0x004: /* CPCFR2 */ + return s->config[1]; + case 0x008: /* CPCFR3 */ + return s->config[2]; + case 0x00c: /* CPCFR4 */ + return s->config[3]; + + case 0x010: /* CPTCTL */ + return s->control | ((s->codec.rxavail + s->codec.rxlen > 0) << 7) | + ((s->codec.txlen < s->codec.txavail) << 5); + + case 0x014: /* CPTTADR */ + return s->address; + case 0x018: /* CPTDATL */ + return s->data & 0xff; + case 0x01c: /* CPTDATH */ + return s->data >> 8; + case 0x020: /* CPTVSLL */ + return s->vtol; + case 0x024: /* CPTVSLH */ + return s->vtsl | (3 << 5); /* CRDY1 | CRDY2 */ + case 0x040: /* MPCTR */ + return s->modem.control; + case 0x044: /* MPMCCFR */ + return s->modem.config; + case 0x060: /* BPCTR */ + return s->bt.control; + case 0x064: /* BPMCCFR */ + return s->bt.config; + case 0x080: /* AMSCFR */ + return s->mixer; + case 0x084: /* AMVCTR */ + return s->gain[0]; + case 0x088: /* AM1VCTR */ + return s->gain[1]; + case 0x08c: /* AM2VCTR */ + return s->gain[2]; + case 0x090: /* AM3VCTR */ + return s->gain[3]; + case 0x094: /* ASTCTR */ + return s->att; + case 0x098: /* APD1LCR */ + return s->max[0]; + case 0x09c: /* APD1RCR */ + return s->max[1]; + case 0x0a0: /* APD2LCR */ + return s->max[2]; + case 0x0a4: /* APD2RCR */ + return s->max[3]; + case 0x0a8: /* APD3LCR */ + return s->max[4]; + case 0x0ac: /* APD3RCR */ + return s->max[5]; + case 0x0b0: /* APD4R */ + return s->max[6]; + case 0x0b4: /* ADWR */ + /* This should be write-only? Docs list it as read-only. */ + return 0x0000; + case 0x0b8: /* ADRDR */ + if (likely(s->codec.rxlen > 1)) { + ret = s->codec.rxbuf[s->codec.rxoff ++]; + s->codec.rxlen --; + s->codec.rxoff &= EAC_BUF_LEN - 1; + return ret; + } else if (s->codec.rxlen) { + ret = s->codec.rxbuf[s->codec.rxoff ++]; + s->codec.rxlen --; + s->codec.rxoff &= EAC_BUF_LEN - 1; + if (s->codec.rxavail) + omap_eac_in_refill(s); + omap_eac_in_dmarequest_update(s); + return ret; + } + return 0x0000; + case 0x0bc: /* AGCFR */ + return s->codec.config[0]; + case 0x0c0: /* AGCTR */ + return s->codec.config[1] | ((s->codec.config[1] & 2) << 14); + case 0x0c4: /* AGCFR2 */ + return s->codec.config[2]; + case 0x0c8: /* AGCFR3 */ + return s->codec.config[3]; + case 0x0cc: /* MBPDMACTR */ + case 0x0d0: /* MPDDMARR */ + case 0x0d8: /* MPUDMARR */ + case 0x0e4: /* BPDDMARR */ + case 0x0ec: /* BPUDMARR */ + return 0x0000; + + case 0x100: /* VERSION_NUMBER */ + return 0x0010; + + case 0x104: /* SYSCONFIG */ + return s->sysconfig; + + case 0x108: /* SYSSTATUS */ + return 1 | 0xe; /* RESETDONE | stuff */ + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_eac_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_eac_s *s = (struct omap_eac_s *) opaque; + + if (size != 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (addr) { + case 0x098: /* APD1LCR */ + case 0x09c: /* APD1RCR */ + case 0x0a0: /* APD2LCR */ + case 0x0a4: /* APD2RCR */ + case 0x0a8: /* APD3LCR */ + case 0x0ac: /* APD3RCR */ + case 0x0b0: /* APD4R */ + case 0x0b8: /* ADRDR */ + case 0x0d0: /* MPDDMARR */ + case 0x0d8: /* MPUDMARR */ + case 0x0e4: /* BPDDMARR */ + case 0x0ec: /* BPUDMARR */ + case 0x100: /* VERSION_NUMBER */ + case 0x108: /* SYSSTATUS */ + OMAP_RO_REG(addr); + return; + + case 0x000: /* CPCFR1 */ + s->config[0] = value & 0xff; + omap_eac_format_update(s); + break; + case 0x004: /* CPCFR2 */ + s->config[1] = value & 0xff; + omap_eac_format_update(s); + break; + case 0x008: /* CPCFR3 */ + s->config[2] = value & 0xff; + omap_eac_format_update(s); + break; + case 0x00c: /* CPCFR4 */ + s->config[3] = value & 0xff; + omap_eac_format_update(s); + break; + + case 0x010: /* CPTCTL */ + /* Assuming TXF and TXE bits are read-only... */ + s->control = value & 0x5f; + omap_eac_interrupt_update(s); + break; + + case 0x014: /* CPTTADR */ + s->address = value & 0xff; + break; + case 0x018: /* CPTDATL */ + s->data &= 0xff00; + s->data |= value & 0xff; + break; + case 0x01c: /* CPTDATH */ + s->data &= 0x00ff; + s->data |= value << 8; + break; + case 0x020: /* CPTVSLL */ + s->vtol = value & 0xf8; + break; + case 0x024: /* CPTVSLH */ + s->vtsl = value & 0x9f; + break; + case 0x040: /* MPCTR */ + s->modem.control = value & 0x8f; + break; + case 0x044: /* MPMCCFR */ + s->modem.config = value & 0x7fff; + break; + case 0x060: /* BPCTR */ + s->bt.control = value & 0x8f; + break; + case 0x064: /* BPMCCFR */ + s->bt.config = value & 0x7fff; + break; + case 0x080: /* AMSCFR */ + s->mixer = value & 0x0fff; + break; + case 0x084: /* AMVCTR */ + s->gain[0] = value & 0xffff; + break; + case 0x088: /* AM1VCTR */ + s->gain[1] = value & 0xff7f; + break; + case 0x08c: /* AM2VCTR */ + s->gain[2] = value & 0xff7f; + break; + case 0x090: /* AM3VCTR */ + s->gain[3] = value & 0xff7f; + break; + case 0x094: /* ASTCTR */ + s->att = value & 0xff; + break; + + case 0x0b4: /* ADWR */ + s->codec.txbuf[s->codec.txlen ++] = value; + if (unlikely(s->codec.txlen == EAC_BUF_LEN || + s->codec.txlen == s->codec.txavail)) { + if (s->codec.txavail) + omap_eac_out_empty(s); + /* Discard what couldn't be written */ + s->codec.txlen = 0; + } + break; + + case 0x0bc: /* AGCFR */ + s->codec.config[0] = value & 0x07ff; + omap_eac_format_update(s); + break; + case 0x0c0: /* AGCTR */ + s->codec.config[1] = value & 0x780f; + omap_eac_format_update(s); + break; + case 0x0c4: /* AGCFR2 */ + s->codec.config[2] = value & 0x003f; + omap_eac_format_update(s); + break; + case 0x0c8: /* AGCFR3 */ + s->codec.config[3] = value & 0xffff; + omap_eac_format_update(s); + break; + case 0x0cc: /* MBPDMACTR */ + case 0x0d4: /* MPDDMAWR */ + case 0x0e0: /* MPUDMAWR */ + case 0x0e8: /* BPDDMAWR */ + case 0x0f0: /* BPUDMAWR */ + break; + + case 0x104: /* SYSCONFIG */ + if (value & (1 << 1)) /* SOFTRESET */ + omap_eac_reset(s); + s->sysconfig = value & 0x31d; + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_eac_ops = { + .read = omap_eac_read, + .write = omap_eac_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static struct omap_eac_s *omap_eac_init(struct omap_target_agent_s *ta, + qemu_irq irq, qemu_irq *drq, omap_clk fclk, omap_clk iclk) +{ + struct omap_eac_s *s = g_new0(struct omap_eac_s, 1); + + s->irq = irq; + s->codec.rxdrq = *drq ++; + s->codec.txdrq = *drq; + omap_eac_reset(s); + + AUD_register_card("OMAP EAC", &s->codec.card); + + memory_region_init_io(&s->iomem, NULL, &omap_eac_ops, s, "omap.eac", + omap_l4_region_size(ta, 0)); + omap_l4_attach(ta, 0, &s->iomem); + + return s; +} + +/* STI/XTI (emulation interface) console - reverse engineered only */ +struct omap_sti_s { + qemu_irq irq; + MemoryRegion iomem; + MemoryRegion iomem_fifo; + CharBackend chr; + + uint32_t sysconfig; + uint32_t systest; + uint32_t irqst; + uint32_t irqen; + uint32_t clkcontrol; + uint32_t serial_config; +}; + +#define STI_TRACE_CONSOLE_CHANNEL 239 +#define STI_TRACE_CONTROL_CHANNEL 253 + +static inline void omap_sti_interrupt_update(struct omap_sti_s *s) +{ + qemu_set_irq(s->irq, s->irqst & s->irqen); +} + +static void omap_sti_reset(struct omap_sti_s *s) +{ + s->sysconfig = 0; + s->irqst = 0; + s->irqen = 0; + s->clkcontrol = 0; + s->serial_config = 0; + + omap_sti_interrupt_update(s); +} + +static uint64_t omap_sti_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_sti_s *s = (struct omap_sti_s *) opaque; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x00: /* STI_REVISION */ + return 0x10; + + case 0x10: /* STI_SYSCONFIG */ + return s->sysconfig; + + case 0x14: /* STI_SYSSTATUS / STI_RX_STATUS / XTI_SYSSTATUS */ + return 0x00; + + case 0x18: /* STI_IRQSTATUS */ + return s->irqst; + + case 0x1c: /* STI_IRQSETEN / STI_IRQCLREN */ + return s->irqen; + + case 0x24: /* STI_ER / STI_DR / XTI_TRACESELECT */ + case 0x28: /* STI_RX_DR / XTI_RXDATA */ + /* TODO */ + return 0; + + case 0x2c: /* STI_CLK_CTRL / XTI_SCLKCRTL */ + return s->clkcontrol; + + case 0x30: /* STI_SERIAL_CFG / XTI_SCONFIG */ + return s->serial_config; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_sti_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_sti_s *s = (struct omap_sti_s *) opaque; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* STI_REVISION */ + case 0x14: /* STI_SYSSTATUS / STI_RX_STATUS / XTI_SYSSTATUS */ + OMAP_RO_REG(addr); + return; + + case 0x10: /* STI_SYSCONFIG */ + if (value & (1 << 1)) /* SOFTRESET */ + omap_sti_reset(s); + s->sysconfig = value & 0xfe; + break; + + case 0x18: /* STI_IRQSTATUS */ + s->irqst &= ~value; + omap_sti_interrupt_update(s); + break; + + case 0x1c: /* STI_IRQSETEN / STI_IRQCLREN */ + s->irqen = value & 0xffff; + omap_sti_interrupt_update(s); + break; + + case 0x2c: /* STI_CLK_CTRL / XTI_SCLKCRTL */ + s->clkcontrol = value & 0xff; + break; + + case 0x30: /* STI_SERIAL_CFG / XTI_SCONFIG */ + s->serial_config = value & 0xff; + break; + + case 0x24: /* STI_ER / STI_DR / XTI_TRACESELECT */ + case 0x28: /* STI_RX_DR / XTI_RXDATA */ + /* TODO */ + return; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_sti_ops = { + .read = omap_sti_read, + .write = omap_sti_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static uint64_t omap_sti_fifo_read(void *opaque, hwaddr addr, + unsigned size) +{ + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_sti_fifo_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_sti_s *s = (struct omap_sti_s *) opaque; + int ch = addr >> 6; + uint8_t byte = value; + + if (size != 1) { + omap_badwidth_write8(opaque, addr, size); + return; + } + + if (ch == STI_TRACE_CONTROL_CHANNEL) { + /* Flush channel value. */ + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, (const uint8_t *) "\r", 1); + } else if (ch == STI_TRACE_CONSOLE_CHANNEL || 1) { + if (value == 0xc0 || value == 0xc3) { + /* Open channel ch. */ + } else if (value == 0x00) { + qemu_chr_fe_write_all(&s->chr, (const uint8_t *) "\n", 1); + } else { + qemu_chr_fe_write_all(&s->chr, &byte, 1); + } + } +} + +static const MemoryRegionOps omap_sti_fifo_ops = { + .read = omap_sti_fifo_read, + .write = omap_sti_fifo_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static struct omap_sti_s *omap_sti_init(struct omap_target_agent_s *ta, + MemoryRegion *sysmem, + hwaddr channel_base, qemu_irq irq, omap_clk clk, + Chardev *chr) +{ + struct omap_sti_s *s = g_new0(struct omap_sti_s, 1); + + s->irq = irq; + omap_sti_reset(s); + + qemu_chr_fe_init(&s->chr, chr ?: qemu_chr_new("null", "null", NULL), + &error_abort); + + memory_region_init_io(&s->iomem, NULL, &omap_sti_ops, s, "omap.sti", + omap_l4_region_size(ta, 0)); + omap_l4_attach(ta, 0, &s->iomem); + + memory_region_init_io(&s->iomem_fifo, NULL, &omap_sti_fifo_ops, s, + "omap.sti.fifo", 0x10000); + memory_region_add_subregion(sysmem, channel_base, &s->iomem_fifo); + + return s; +} + +/* L4 Interconnect */ +#define L4TA(n) (n) +#define L4TAO(n) ((n) + 39) + +static const struct omap_l4_region_s omap_l4_region[125] = { + [ 1] = { 0x40800, 0x800, 32 }, /* Initiator agent */ + [ 2] = { 0x41000, 0x1000, 32 }, /* Link agent */ + [ 0] = { 0x40000, 0x800, 32 }, /* Address and protection */ + [ 3] = { 0x00000, 0x1000, 32 | 16 | 8 }, /* System Control and Pinout */ + [ 4] = { 0x01000, 0x1000, 32 | 16 | 8 }, /* L4TAO1 */ + [ 5] = { 0x04000, 0x1000, 32 | 16 }, /* 32K Timer */ + [ 6] = { 0x05000, 0x1000, 32 | 16 | 8 }, /* L4TAO2 */ + [ 7] = { 0x08000, 0x800, 32 }, /* PRCM Region A */ + [ 8] = { 0x08800, 0x800, 32 }, /* PRCM Region B */ + [ 9] = { 0x09000, 0x1000, 32 | 16 | 8 }, /* L4TAO */ + [ 10] = { 0x12000, 0x1000, 32 | 16 | 8 }, /* Test (BCM) */ + [ 11] = { 0x13000, 0x1000, 32 | 16 | 8 }, /* L4TA1 */ + [ 12] = { 0x14000, 0x1000, 32 }, /* Test/emulation (TAP) */ + [ 13] = { 0x15000, 0x1000, 32 | 16 | 8 }, /* L4TA2 */ + [ 14] = { 0x18000, 0x1000, 32 | 16 | 8 }, /* GPIO1 */ + [ 16] = { 0x1a000, 0x1000, 32 | 16 | 8 }, /* GPIO2 */ + [ 18] = { 0x1c000, 0x1000, 32 | 16 | 8 }, /* GPIO3 */ + [ 19] = { 0x1e000, 0x1000, 32 | 16 | 8 }, /* GPIO4 */ + [ 15] = { 0x19000, 0x1000, 32 | 16 | 8 }, /* Quad GPIO TOP */ + [ 17] = { 0x1b000, 0x1000, 32 | 16 | 8 }, /* L4TA3 */ + [ 20] = { 0x20000, 0x1000, 32 | 16 | 8 }, /* WD Timer 1 (Secure) */ + [ 22] = { 0x22000, 0x1000, 32 | 16 | 8 }, /* WD Timer 2 (OMAP) */ + [ 21] = { 0x21000, 0x1000, 32 | 16 | 8 }, /* Dual WD timer TOP */ + [ 23] = { 0x23000, 0x1000, 32 | 16 | 8 }, /* L4TA4 */ + [ 24] = { 0x28000, 0x1000, 32 | 16 | 8 }, /* GP Timer 1 */ + [ 25] = { 0x29000, 0x1000, 32 | 16 | 8 }, /* L4TA7 */ + [ 26] = { 0x48000, 0x2000, 32 | 16 | 8 }, /* Emulation (ARM11ETB) */ + [ 27] = { 0x4a000, 0x1000, 32 | 16 | 8 }, /* L4TA9 */ + [ 28] = { 0x50000, 0x400, 32 | 16 | 8 }, /* Display top */ + [ 29] = { 0x50400, 0x400, 32 | 16 | 8 }, /* Display control */ + [ 30] = { 0x50800, 0x400, 32 | 16 | 8 }, /* Display RFBI */ + [ 31] = { 0x50c00, 0x400, 32 | 16 | 8 }, /* Display encoder */ + [ 32] = { 0x51000, 0x1000, 32 | 16 | 8 }, /* L4TA10 */ + [ 33] = { 0x52000, 0x400, 32 | 16 | 8 }, /* Camera top */ + [ 34] = { 0x52400, 0x400, 32 | 16 | 8 }, /* Camera core */ + [ 35] = { 0x52800, 0x400, 32 | 16 | 8 }, /* Camera DMA */ + [ 36] = { 0x52c00, 0x400, 32 | 16 | 8 }, /* Camera MMU */ + [ 37] = { 0x53000, 0x1000, 32 | 16 | 8 }, /* L4TA11 */ + [ 38] = { 0x56000, 0x1000, 32 | 16 | 8 }, /* sDMA */ + [ 39] = { 0x57000, 0x1000, 32 | 16 | 8 }, /* L4TA12 */ + [ 40] = { 0x58000, 0x1000, 32 | 16 | 8 }, /* SSI top */ + [ 41] = { 0x59000, 0x1000, 32 | 16 | 8 }, /* SSI GDD */ + [ 42] = { 0x5a000, 0x1000, 32 | 16 | 8 }, /* SSI Port1 */ + [ 43] = { 0x5b000, 0x1000, 32 | 16 | 8 }, /* SSI Port2 */ + [ 44] = { 0x5c000, 0x1000, 32 | 16 | 8 }, /* L4TA13 */ + [ 45] = { 0x5e000, 0x1000, 32 | 16 | 8 }, /* USB OTG */ + [ 46] = { 0x5f000, 0x1000, 32 | 16 | 8 }, /* L4TAO4 */ + [ 47] = { 0x60000, 0x1000, 32 | 16 | 8 }, /* Emulation (WIN_TRACER1SDRC) */ + [ 48] = { 0x61000, 0x1000, 32 | 16 | 8 }, /* L4TA14 */ + [ 49] = { 0x62000, 0x1000, 32 | 16 | 8 }, /* Emulation (WIN_TRACER2GPMC) */ + [ 50] = { 0x63000, 0x1000, 32 | 16 | 8 }, /* L4TA15 */ + [ 51] = { 0x64000, 0x1000, 32 | 16 | 8 }, /* Emulation (WIN_TRACER3OCM) */ + [ 52] = { 0x65000, 0x1000, 32 | 16 | 8 }, /* L4TA16 */ + [ 53] = { 0x66000, 0x300, 32 | 16 | 8 }, /* Emulation (WIN_TRACER4L4) */ + [ 54] = { 0x67000, 0x1000, 32 | 16 | 8 }, /* L4TA17 */ + [ 55] = { 0x68000, 0x1000, 32 | 16 | 8 }, /* Emulation (XTI) */ + [ 56] = { 0x69000, 0x1000, 32 | 16 | 8 }, /* L4TA18 */ + [ 57] = { 0x6a000, 0x1000, 16 | 8 }, /* UART1 */ + [ 58] = { 0x6b000, 0x1000, 32 | 16 | 8 }, /* L4TA19 */ + [ 59] = { 0x6c000, 0x1000, 16 | 8 }, /* UART2 */ + [ 60] = { 0x6d000, 0x1000, 32 | 16 | 8 }, /* L4TA20 */ + [ 61] = { 0x6e000, 0x1000, 16 | 8 }, /* UART3 */ + [ 62] = { 0x6f000, 0x1000, 32 | 16 | 8 }, /* L4TA21 */ + [ 63] = { 0x70000, 0x1000, 16 }, /* I2C1 */ + [ 64] = { 0x71000, 0x1000, 32 | 16 | 8 }, /* L4TAO5 */ + [ 65] = { 0x72000, 0x1000, 16 }, /* I2C2 */ + [ 66] = { 0x73000, 0x1000, 32 | 16 | 8 }, /* L4TAO6 */ + [ 67] = { 0x74000, 0x1000, 16 }, /* McBSP1 */ + [ 68] = { 0x75000, 0x1000, 32 | 16 | 8 }, /* L4TAO7 */ + [ 69] = { 0x76000, 0x1000, 16 }, /* McBSP2 */ + [ 70] = { 0x77000, 0x1000, 32 | 16 | 8 }, /* L4TAO8 */ + [ 71] = { 0x24000, 0x1000, 32 | 16 | 8 }, /* WD Timer 3 (DSP) */ + [ 72] = { 0x25000, 0x1000, 32 | 16 | 8 }, /* L4TA5 */ + [ 73] = { 0x26000, 0x1000, 32 | 16 | 8 }, /* WD Timer 4 (IVA) */ + [ 74] = { 0x27000, 0x1000, 32 | 16 | 8 }, /* L4TA6 */ + [ 75] = { 0x2a000, 0x1000, 32 | 16 | 8 }, /* GP Timer 2 */ + [ 76] = { 0x2b000, 0x1000, 32 | 16 | 8 }, /* L4TA8 */ + [ 77] = { 0x78000, 0x1000, 32 | 16 | 8 }, /* GP Timer 3 */ + [ 78] = { 0x79000, 0x1000, 32 | 16 | 8 }, /* L4TA22 */ + [ 79] = { 0x7a000, 0x1000, 32 | 16 | 8 }, /* GP Timer 4 */ + [ 80] = { 0x7b000, 0x1000, 32 | 16 | 8 }, /* L4TA23 */ + [ 81] = { 0x7c000, 0x1000, 32 | 16 | 8 }, /* GP Timer 5 */ + [ 82] = { 0x7d000, 0x1000, 32 | 16 | 8 }, /* L4TA24 */ + [ 83] = { 0x7e000, 0x1000, 32 | 16 | 8 }, /* GP Timer 6 */ + [ 84] = { 0x7f000, 0x1000, 32 | 16 | 8 }, /* L4TA25 */ + [ 85] = { 0x80000, 0x1000, 32 | 16 | 8 }, /* GP Timer 7 */ + [ 86] = { 0x81000, 0x1000, 32 | 16 | 8 }, /* L4TA26 */ + [ 87] = { 0x82000, 0x1000, 32 | 16 | 8 }, /* GP Timer 8 */ + [ 88] = { 0x83000, 0x1000, 32 | 16 | 8 }, /* L4TA27 */ + [ 89] = { 0x84000, 0x1000, 32 | 16 | 8 }, /* GP Timer 9 */ + [ 90] = { 0x85000, 0x1000, 32 | 16 | 8 }, /* L4TA28 */ + [ 91] = { 0x86000, 0x1000, 32 | 16 | 8 }, /* GP Timer 10 */ + [ 92] = { 0x87000, 0x1000, 32 | 16 | 8 }, /* L4TA29 */ + [ 93] = { 0x88000, 0x1000, 32 | 16 | 8 }, /* GP Timer 11 */ + [ 94] = { 0x89000, 0x1000, 32 | 16 | 8 }, /* L4TA30 */ + [ 95] = { 0x8a000, 0x1000, 32 | 16 | 8 }, /* GP Timer 12 */ + [ 96] = { 0x8b000, 0x1000, 32 | 16 | 8 }, /* L4TA31 */ + [ 97] = { 0x90000, 0x1000, 16 }, /* EAC */ + [ 98] = { 0x91000, 0x1000, 32 | 16 | 8 }, /* L4TA32 */ + [ 99] = { 0x92000, 0x1000, 16 }, /* FAC */ + [100] = { 0x93000, 0x1000, 32 | 16 | 8 }, /* L4TA33 */ + [101] = { 0x94000, 0x1000, 32 | 16 | 8 }, /* IPC (MAILBOX) */ + [102] = { 0x95000, 0x1000, 32 | 16 | 8 }, /* L4TA34 */ + [103] = { 0x98000, 0x1000, 32 | 16 | 8 }, /* SPI1 */ + [104] = { 0x99000, 0x1000, 32 | 16 | 8 }, /* L4TA35 */ + [105] = { 0x9a000, 0x1000, 32 | 16 | 8 }, /* SPI2 */ + [106] = { 0x9b000, 0x1000, 32 | 16 | 8 }, /* L4TA36 */ + [107] = { 0x9c000, 0x1000, 16 | 8 }, /* MMC SDIO */ + [108] = { 0x9d000, 0x1000, 32 | 16 | 8 }, /* L4TAO9 */ + [109] = { 0x9e000, 0x1000, 32 | 16 | 8 }, /* MS_PRO */ + [110] = { 0x9f000, 0x1000, 32 | 16 | 8 }, /* L4TAO10 */ + [111] = { 0xa0000, 0x1000, 32 }, /* RNG */ + [112] = { 0xa1000, 0x1000, 32 | 16 | 8 }, /* L4TAO11 */ + [113] = { 0xa2000, 0x1000, 32 }, /* DES3DES */ + [114] = { 0xa3000, 0x1000, 32 | 16 | 8 }, /* L4TAO12 */ + [115] = { 0xa4000, 0x1000, 32 }, /* SHA1MD5 */ + [116] = { 0xa5000, 0x1000, 32 | 16 | 8 }, /* L4TAO13 */ + [117] = { 0xa6000, 0x1000, 32 }, /* AES */ + [118] = { 0xa7000, 0x1000, 32 | 16 | 8 }, /* L4TA37 */ + [119] = { 0xa8000, 0x2000, 32 }, /* PKA */ + [120] = { 0xaa000, 0x1000, 32 | 16 | 8 }, /* L4TA38 */ + [121] = { 0xb0000, 0x1000, 32 }, /* MG */ + [122] = { 0xb1000, 0x1000, 32 | 16 | 8 }, + [123] = { 0xb2000, 0x1000, 32 }, /* HDQ/1-Wire */ + [124] = { 0xb3000, 0x1000, 32 | 16 | 8 }, /* L4TA39 */ +}; + +static const struct omap_l4_agent_info_s omap_l4_agent_info[54] = { + { 0, 0, 3, 2 }, /* L4IA initiatior agent */ + { L4TAO(1), 3, 2, 1 }, /* Control and pinout module */ + { L4TAO(2), 5, 2, 1 }, /* 32K timer */ + { L4TAO(3), 7, 3, 2 }, /* PRCM */ + { L4TA(1), 10, 2, 1 }, /* BCM */ + { L4TA(2), 12, 2, 1 }, /* Test JTAG */ + { L4TA(3), 14, 6, 3 }, /* Quad GPIO */ + { L4TA(4), 20, 4, 3 }, /* WD timer 1/2 */ + { L4TA(7), 24, 2, 1 }, /* GP timer 1 */ + { L4TA(9), 26, 2, 1 }, /* ATM11 ETB */ + { L4TA(10), 28, 5, 4 }, /* Display subsystem */ + { L4TA(11), 33, 5, 4 }, /* Camera subsystem */ + { L4TA(12), 38, 2, 1 }, /* sDMA */ + { L4TA(13), 40, 5, 4 }, /* SSI */ + { L4TAO(4), 45, 2, 1 }, /* USB */ + { L4TA(14), 47, 2, 1 }, /* Win Tracer1 */ + { L4TA(15), 49, 2, 1 }, /* Win Tracer2 */ + { L4TA(16), 51, 2, 1 }, /* Win Tracer3 */ + { L4TA(17), 53, 2, 1 }, /* Win Tracer4 */ + { L4TA(18), 55, 2, 1 }, /* XTI */ + { L4TA(19), 57, 2, 1 }, /* UART1 */ + { L4TA(20), 59, 2, 1 }, /* UART2 */ + { L4TA(21), 61, 2, 1 }, /* UART3 */ + { L4TAO(5), 63, 2, 1 }, /* I2C1 */ + { L4TAO(6), 65, 2, 1 }, /* I2C2 */ + { L4TAO(7), 67, 2, 1 }, /* McBSP1 */ + { L4TAO(8), 69, 2, 1 }, /* McBSP2 */ + { L4TA(5), 71, 2, 1 }, /* WD Timer 3 (DSP) */ + { L4TA(6), 73, 2, 1 }, /* WD Timer 4 (IVA) */ + { L4TA(8), 75, 2, 1 }, /* GP Timer 2 */ + { L4TA(22), 77, 2, 1 }, /* GP Timer 3 */ + { L4TA(23), 79, 2, 1 }, /* GP Timer 4 */ + { L4TA(24), 81, 2, 1 }, /* GP Timer 5 */ + { L4TA(25), 83, 2, 1 }, /* GP Timer 6 */ + { L4TA(26), 85, 2, 1 }, /* GP Timer 7 */ + { L4TA(27), 87, 2, 1 }, /* GP Timer 8 */ + { L4TA(28), 89, 2, 1 }, /* GP Timer 9 */ + { L4TA(29), 91, 2, 1 }, /* GP Timer 10 */ + { L4TA(30), 93, 2, 1 }, /* GP Timer 11 */ + { L4TA(31), 95, 2, 1 }, /* GP Timer 12 */ + { L4TA(32), 97, 2, 1 }, /* EAC */ + { L4TA(33), 99, 2, 1 }, /* FAC */ + { L4TA(34), 101, 2, 1 }, /* IPC */ + { L4TA(35), 103, 2, 1 }, /* SPI1 */ + { L4TA(36), 105, 2, 1 }, /* SPI2 */ + { L4TAO(9), 107, 2, 1 }, /* MMC SDIO */ + { L4TAO(10), 109, 2, 1 }, + { L4TAO(11), 111, 2, 1 }, /* RNG */ + { L4TAO(12), 113, 2, 1 }, /* DES3DES */ + { L4TAO(13), 115, 2, 1 }, /* SHA1MD5 */ + { L4TA(37), 117, 2, 1 }, /* AES */ + { L4TA(38), 119, 2, 1 }, /* PKA */ + { -1, 121, 2, 1 }, + { L4TA(39), 123, 2, 1 }, /* HDQ/1-Wire */ +}; + +#define omap_l4ta(bus, cs) \ + omap_l4ta_get(bus, omap_l4_region, omap_l4_agent_info, L4TA(cs)) +#define omap_l4tao(bus, cs) \ + omap_l4ta_get(bus, omap_l4_region, omap_l4_agent_info, L4TAO(cs)) + +/* Power, Reset, and Clock Management */ +struct omap_prcm_s { + qemu_irq irq[3]; + struct omap_mpu_state_s *mpu; + MemoryRegion iomem0; + MemoryRegion iomem1; + + uint32_t irqst[3]; + uint32_t irqen[3]; + + uint32_t sysconfig; + uint32_t voltctrl; + uint32_t scratch[20]; + + uint32_t clksrc[1]; + uint32_t clkout[1]; + uint32_t clkemul[1]; + uint32_t clkpol[1]; + uint32_t clksel[8]; + uint32_t clken[12]; + uint32_t clkctrl[4]; + uint32_t clkidle[7]; + uint32_t setuptime[2]; + + uint32_t wkup[3]; + uint32_t wken[3]; + uint32_t wkst[3]; + uint32_t rst[4]; + uint32_t rstctrl[1]; + uint32_t power[4]; + uint32_t rsttime_wkup; + + uint32_t ev; + uint32_t evtime[2]; + + int dpll_lock, apll_lock[2]; +}; + +static void omap_prcm_int_update(struct omap_prcm_s *s, int dom) +{ + qemu_set_irq(s->irq[dom], s->irqst[dom] & s->irqen[dom]); + /* XXX or is the mask applied before PRCM_IRQSTATUS_* ? */ +} + +static uint64_t omap_prcm_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_prcm_s *s = (struct omap_prcm_s *) opaque; + uint32_t ret; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x000: /* PRCM_REVISION */ + return 0x10; + + case 0x010: /* PRCM_SYSCONFIG */ + return s->sysconfig; + + case 0x018: /* PRCM_IRQSTATUS_MPU */ + return s->irqst[0]; + + case 0x01c: /* PRCM_IRQENABLE_MPU */ + return s->irqen[0]; + + case 0x050: /* PRCM_VOLTCTRL */ + return s->voltctrl; + case 0x054: /* PRCM_VOLTST */ + return s->voltctrl & 3; + + case 0x060: /* PRCM_CLKSRC_CTRL */ + return s->clksrc[0]; + case 0x070: /* PRCM_CLKOUT_CTRL */ + return s->clkout[0]; + case 0x078: /* PRCM_CLKEMUL_CTRL */ + return s->clkemul[0]; + case 0x080: /* PRCM_CLKCFG_CTRL */ + case 0x084: /* PRCM_CLKCFG_STATUS */ + return 0; + + case 0x090: /* PRCM_VOLTSETUP */ + return s->setuptime[0]; + + case 0x094: /* PRCM_CLKSSETUP */ + return s->setuptime[1]; + + case 0x098: /* PRCM_POLCTRL */ + return s->clkpol[0]; + + case 0x0b0: /* GENERAL_PURPOSE1 */ + case 0x0b4: /* GENERAL_PURPOSE2 */ + case 0x0b8: /* GENERAL_PURPOSE3 */ + case 0x0bc: /* GENERAL_PURPOSE4 */ + case 0x0c0: /* GENERAL_PURPOSE5 */ + case 0x0c4: /* GENERAL_PURPOSE6 */ + case 0x0c8: /* GENERAL_PURPOSE7 */ + case 0x0cc: /* GENERAL_PURPOSE8 */ + case 0x0d0: /* GENERAL_PURPOSE9 */ + case 0x0d4: /* GENERAL_PURPOSE10 */ + case 0x0d8: /* GENERAL_PURPOSE11 */ + case 0x0dc: /* GENERAL_PURPOSE12 */ + case 0x0e0: /* GENERAL_PURPOSE13 */ + case 0x0e4: /* GENERAL_PURPOSE14 */ + case 0x0e8: /* GENERAL_PURPOSE15 */ + case 0x0ec: /* GENERAL_PURPOSE16 */ + case 0x0f0: /* GENERAL_PURPOSE17 */ + case 0x0f4: /* GENERAL_PURPOSE18 */ + case 0x0f8: /* GENERAL_PURPOSE19 */ + case 0x0fc: /* GENERAL_PURPOSE20 */ + return s->scratch[(addr - 0xb0) >> 2]; + + case 0x140: /* CM_CLKSEL_MPU */ + return s->clksel[0]; + case 0x148: /* CM_CLKSTCTRL_MPU */ + return s->clkctrl[0]; + + case 0x158: /* RM_RSTST_MPU */ + return s->rst[0]; + case 0x1c8: /* PM_WKDEP_MPU */ + return s->wkup[0]; + case 0x1d4: /* PM_EVGENCTRL_MPU */ + return s->ev; + case 0x1d8: /* PM_EVEGENONTIM_MPU */ + return s->evtime[0]; + case 0x1dc: /* PM_EVEGENOFFTIM_MPU */ + return s->evtime[1]; + case 0x1e0: /* PM_PWSTCTRL_MPU */ + return s->power[0]; + case 0x1e4: /* PM_PWSTST_MPU */ + return 0; + + case 0x200: /* CM_FCLKEN1_CORE */ + return s->clken[0]; + case 0x204: /* CM_FCLKEN2_CORE */ + return s->clken[1]; + case 0x210: /* CM_ICLKEN1_CORE */ + return s->clken[2]; + case 0x214: /* CM_ICLKEN2_CORE */ + return s->clken[3]; + case 0x21c: /* CM_ICLKEN4_CORE */ + return s->clken[4]; + + case 0x220: /* CM_IDLEST1_CORE */ + /* TODO: check the actual iclk status */ + return 0x7ffffff9; + case 0x224: /* CM_IDLEST2_CORE */ + /* TODO: check the actual iclk status */ + return 0x00000007; + case 0x22c: /* CM_IDLEST4_CORE */ + /* TODO: check the actual iclk status */ + return 0x0000001f; + + case 0x230: /* CM_AUTOIDLE1_CORE */ + return s->clkidle[0]; + case 0x234: /* CM_AUTOIDLE2_CORE */ + return s->clkidle[1]; + case 0x238: /* CM_AUTOIDLE3_CORE */ + return s->clkidle[2]; + case 0x23c: /* CM_AUTOIDLE4_CORE */ + return s->clkidle[3]; + + case 0x240: /* CM_CLKSEL1_CORE */ + return s->clksel[1]; + case 0x244: /* CM_CLKSEL2_CORE */ + return s->clksel[2]; + + case 0x248: /* CM_CLKSTCTRL_CORE */ + return s->clkctrl[1]; + + case 0x2a0: /* PM_WKEN1_CORE */ + return s->wken[0]; + case 0x2a4: /* PM_WKEN2_CORE */ + return s->wken[1]; + + case 0x2b0: /* PM_WKST1_CORE */ + return s->wkst[0]; + case 0x2b4: /* PM_WKST2_CORE */ + return s->wkst[1]; + case 0x2c8: /* PM_WKDEP_CORE */ + return 0x1e; + + case 0x2e0: /* PM_PWSTCTRL_CORE */ + return s->power[1]; + case 0x2e4: /* PM_PWSTST_CORE */ + return 0x000030 | (s->power[1] & 0xfc00); + + case 0x300: /* CM_FCLKEN_GFX */ + return s->clken[5]; + case 0x310: /* CM_ICLKEN_GFX */ + return s->clken[6]; + case 0x320: /* CM_IDLEST_GFX */ + /* TODO: check the actual iclk status */ + return 0x00000001; + case 0x340: /* CM_CLKSEL_GFX */ + return s->clksel[3]; + case 0x348: /* CM_CLKSTCTRL_GFX */ + return s->clkctrl[2]; + case 0x350: /* RM_RSTCTRL_GFX */ + return s->rstctrl[0]; + case 0x358: /* RM_RSTST_GFX */ + return s->rst[1]; + case 0x3c8: /* PM_WKDEP_GFX */ + return s->wkup[1]; + + case 0x3e0: /* PM_PWSTCTRL_GFX */ + return s->power[2]; + case 0x3e4: /* PM_PWSTST_GFX */ + return s->power[2] & 3; + + case 0x400: /* CM_FCLKEN_WKUP */ + return s->clken[7]; + case 0x410: /* CM_ICLKEN_WKUP */ + return s->clken[8]; + case 0x420: /* CM_IDLEST_WKUP */ + /* TODO: check the actual iclk status */ + return 0x0000003f; + case 0x430: /* CM_AUTOIDLE_WKUP */ + return s->clkidle[4]; + case 0x440: /* CM_CLKSEL_WKUP */ + return s->clksel[4]; + case 0x450: /* RM_RSTCTRL_WKUP */ + return 0; + case 0x454: /* RM_RSTTIME_WKUP */ + return s->rsttime_wkup; + case 0x458: /* RM_RSTST_WKUP */ + return s->rst[2]; + case 0x4a0: /* PM_WKEN_WKUP */ + return s->wken[2]; + case 0x4b0: /* PM_WKST_WKUP */ + return s->wkst[2]; + + case 0x500: /* CM_CLKEN_PLL */ + return s->clken[9]; + case 0x520: /* CM_IDLEST_CKGEN */ + ret = 0x0000070 | (s->apll_lock[0] << 9) | (s->apll_lock[1] << 8); + if (!(s->clksel[6] & 3)) + /* Core uses 32-kHz clock */ + ret |= 3 << 0; + else if (!s->dpll_lock) + /* DPLL not locked, core uses ref_clk */ + ret |= 1 << 0; + else + /* Core uses DPLL */ + ret |= 2 << 0; + return ret; + case 0x530: /* CM_AUTOIDLE_PLL */ + return s->clkidle[5]; + case 0x540: /* CM_CLKSEL1_PLL */ + return s->clksel[5]; + case 0x544: /* CM_CLKSEL2_PLL */ + return s->clksel[6]; + + case 0x800: /* CM_FCLKEN_DSP */ + return s->clken[10]; + case 0x810: /* CM_ICLKEN_DSP */ + return s->clken[11]; + case 0x820: /* CM_IDLEST_DSP */ + /* TODO: check the actual iclk status */ + return 0x00000103; + case 0x830: /* CM_AUTOIDLE_DSP */ + return s->clkidle[6]; + case 0x840: /* CM_CLKSEL_DSP */ + return s->clksel[7]; + case 0x848: /* CM_CLKSTCTRL_DSP */ + return s->clkctrl[3]; + case 0x850: /* RM_RSTCTRL_DSP */ + return 0; + case 0x858: /* RM_RSTST_DSP */ + return s->rst[3]; + case 0x8c8: /* PM_WKDEP_DSP */ + return s->wkup[2]; + case 0x8e0: /* PM_PWSTCTRL_DSP */ + return s->power[3]; + case 0x8e4: /* PM_PWSTST_DSP */ + return 0x008030 | (s->power[3] & 0x3003); + + case 0x8f0: /* PRCM_IRQSTATUS_DSP */ + return s->irqst[1]; + case 0x8f4: /* PRCM_IRQENABLE_DSP */ + return s->irqen[1]; + + case 0x8f8: /* PRCM_IRQSTATUS_IVA */ + return s->irqst[2]; + case 0x8fc: /* PRCM_IRQENABLE_IVA */ + return s->irqen[2]; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_prcm_apll_update(struct omap_prcm_s *s) +{ + int mode[2]; + + mode[0] = (s->clken[9] >> 6) & 3; + s->apll_lock[0] = (mode[0] == 3); + mode[1] = (s->clken[9] >> 2) & 3; + s->apll_lock[1] = (mode[1] == 3); + /* TODO: update clocks */ + + if (mode[0] == 1 || mode[0] == 2 || mode[1] == 1 || mode[1] == 2) + fprintf(stderr, "%s: bad EN_54M_PLL or bad EN_96M_PLL\n", + __func__); +} + +static void omap_prcm_dpll_update(struct omap_prcm_s *s) +{ + omap_clk dpll = omap_findclk(s->mpu, "dpll"); + omap_clk dpll_x2 = omap_findclk(s->mpu, "dpll"); + omap_clk core = omap_findclk(s->mpu, "core_clk"); + int mode = (s->clken[9] >> 0) & 3; + int mult, div; + + mult = (s->clksel[5] >> 12) & 0x3ff; + div = (s->clksel[5] >> 8) & 0xf; + if (mult == 0 || mult == 1) + mode = 1; /* Bypass */ + + s->dpll_lock = 0; + switch (mode) { + case 0: + fprintf(stderr, "%s: bad EN_DPLL\n", __func__); + break; + case 1: /* Low-power bypass mode (Default) */ + case 2: /* Fast-relock bypass mode */ + omap_clk_setrate(dpll, 1, 1); + omap_clk_setrate(dpll_x2, 1, 1); + break; + case 3: /* Lock mode */ + s->dpll_lock = 1; /* After 20 FINT cycles (ref_clk / (div + 1)). */ + + omap_clk_setrate(dpll, div + 1, mult); + omap_clk_setrate(dpll_x2, div + 1, mult * 2); + break; + } + + switch ((s->clksel[6] >> 0) & 3) { + case 0: + omap_clk_reparent(core, omap_findclk(s->mpu, "clk32-kHz")); + break; + case 1: + omap_clk_reparent(core, dpll); + break; + case 2: + /* Default */ + omap_clk_reparent(core, dpll_x2); + break; + case 3: + fprintf(stderr, "%s: bad CORE_CLK_SRC\n", __func__); + break; + } +} + +static void omap_prcm_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_prcm_s *s = (struct omap_prcm_s *) opaque; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x000: /* PRCM_REVISION */ + case 0x054: /* PRCM_VOLTST */ + case 0x084: /* PRCM_CLKCFG_STATUS */ + case 0x1e4: /* PM_PWSTST_MPU */ + case 0x220: /* CM_IDLEST1_CORE */ + case 0x224: /* CM_IDLEST2_CORE */ + case 0x22c: /* CM_IDLEST4_CORE */ + case 0x2c8: /* PM_WKDEP_CORE */ + case 0x2e4: /* PM_PWSTST_CORE */ + case 0x320: /* CM_IDLEST_GFX */ + case 0x3e4: /* PM_PWSTST_GFX */ + case 0x420: /* CM_IDLEST_WKUP */ + case 0x520: /* CM_IDLEST_CKGEN */ + case 0x820: /* CM_IDLEST_DSP */ + case 0x8e4: /* PM_PWSTST_DSP */ + OMAP_RO_REG(addr); + return; + + case 0x010: /* PRCM_SYSCONFIG */ + s->sysconfig = value & 1; + break; + + case 0x018: /* PRCM_IRQSTATUS_MPU */ + s->irqst[0] &= ~value; + omap_prcm_int_update(s, 0); + break; + case 0x01c: /* PRCM_IRQENABLE_MPU */ + s->irqen[0] = value & 0x3f; + omap_prcm_int_update(s, 0); + break; + + case 0x050: /* PRCM_VOLTCTRL */ + s->voltctrl = value & 0xf1c3; + break; + + case 0x060: /* PRCM_CLKSRC_CTRL */ + s->clksrc[0] = value & 0xdb; + /* TODO update clocks */ + break; + + case 0x070: /* PRCM_CLKOUT_CTRL */ + s->clkout[0] = value & 0xbbbb; + /* TODO update clocks */ + break; + + case 0x078: /* PRCM_CLKEMUL_CTRL */ + s->clkemul[0] = value & 1; + /* TODO update clocks */ + break; + + case 0x080: /* PRCM_CLKCFG_CTRL */ + break; + + case 0x090: /* PRCM_VOLTSETUP */ + s->setuptime[0] = value & 0xffff; + break; + case 0x094: /* PRCM_CLKSSETUP */ + s->setuptime[1] = value & 0xffff; + break; + + case 0x098: /* PRCM_POLCTRL */ + s->clkpol[0] = value & 0x701; + break; + + case 0x0b0: /* GENERAL_PURPOSE1 */ + case 0x0b4: /* GENERAL_PURPOSE2 */ + case 0x0b8: /* GENERAL_PURPOSE3 */ + case 0x0bc: /* GENERAL_PURPOSE4 */ + case 0x0c0: /* GENERAL_PURPOSE5 */ + case 0x0c4: /* GENERAL_PURPOSE6 */ + case 0x0c8: /* GENERAL_PURPOSE7 */ + case 0x0cc: /* GENERAL_PURPOSE8 */ + case 0x0d0: /* GENERAL_PURPOSE9 */ + case 0x0d4: /* GENERAL_PURPOSE10 */ + case 0x0d8: /* GENERAL_PURPOSE11 */ + case 0x0dc: /* GENERAL_PURPOSE12 */ + case 0x0e0: /* GENERAL_PURPOSE13 */ + case 0x0e4: /* GENERAL_PURPOSE14 */ + case 0x0e8: /* GENERAL_PURPOSE15 */ + case 0x0ec: /* GENERAL_PURPOSE16 */ + case 0x0f0: /* GENERAL_PURPOSE17 */ + case 0x0f4: /* GENERAL_PURPOSE18 */ + case 0x0f8: /* GENERAL_PURPOSE19 */ + case 0x0fc: /* GENERAL_PURPOSE20 */ + s->scratch[(addr - 0xb0) >> 2] = value; + break; + + case 0x140: /* CM_CLKSEL_MPU */ + s->clksel[0] = value & 0x1f; + /* TODO update clocks */ + break; + case 0x148: /* CM_CLKSTCTRL_MPU */ + s->clkctrl[0] = value & 0x1f; + break; + + case 0x158: /* RM_RSTST_MPU */ + s->rst[0] &= ~value; + break; + case 0x1c8: /* PM_WKDEP_MPU */ + s->wkup[0] = value & 0x15; + break; + + case 0x1d4: /* PM_EVGENCTRL_MPU */ + s->ev = value & 0x1f; + break; + case 0x1d8: /* PM_EVEGENONTIM_MPU */ + s->evtime[0] = value; + break; + case 0x1dc: /* PM_EVEGENOFFTIM_MPU */ + s->evtime[1] = value; + break; + + case 0x1e0: /* PM_PWSTCTRL_MPU */ + s->power[0] = value & 0xc0f; + break; + + case 0x200: /* CM_FCLKEN1_CORE */ + s->clken[0] = value & 0xbfffffff; + /* TODO update clocks */ + /* The EN_EAC bit only gets/puts func_96m_clk. */ + break; + case 0x204: /* CM_FCLKEN2_CORE */ + s->clken[1] = value & 0x00000007; + /* TODO update clocks */ + break; + case 0x210: /* CM_ICLKEN1_CORE */ + s->clken[2] = value & 0xfffffff9; + /* TODO update clocks */ + /* The EN_EAC bit only gets/puts core_l4_iclk. */ + break; + case 0x214: /* CM_ICLKEN2_CORE */ + s->clken[3] = value & 0x00000007; + /* TODO update clocks */ + break; + case 0x21c: /* CM_ICLKEN4_CORE */ + s->clken[4] = value & 0x0000001f; + /* TODO update clocks */ + break; + + case 0x230: /* CM_AUTOIDLE1_CORE */ + s->clkidle[0] = value & 0xfffffff9; + /* TODO update clocks */ + break; + case 0x234: /* CM_AUTOIDLE2_CORE */ + s->clkidle[1] = value & 0x00000007; + /* TODO update clocks */ + break; + case 0x238: /* CM_AUTOIDLE3_CORE */ + s->clkidle[2] = value & 0x00000007; + /* TODO update clocks */ + break; + case 0x23c: /* CM_AUTOIDLE4_CORE */ + s->clkidle[3] = value & 0x0000001f; + /* TODO update clocks */ + break; + + case 0x240: /* CM_CLKSEL1_CORE */ + s->clksel[1] = value & 0x0fffbf7f; + /* TODO update clocks */ + break; + + case 0x244: /* CM_CLKSEL2_CORE */ + s->clksel[2] = value & 0x00fffffc; + /* TODO update clocks */ + break; + + case 0x248: /* CM_CLKSTCTRL_CORE */ + s->clkctrl[1] = value & 0x7; + break; + + case 0x2a0: /* PM_WKEN1_CORE */ + s->wken[0] = value & 0x04667ff8; + break; + case 0x2a4: /* PM_WKEN2_CORE */ + s->wken[1] = value & 0x00000005; + break; + + case 0x2b0: /* PM_WKST1_CORE */ + s->wkst[0] &= ~value; + break; + case 0x2b4: /* PM_WKST2_CORE */ + s->wkst[1] &= ~value; + break; + + case 0x2e0: /* PM_PWSTCTRL_CORE */ + s->power[1] = (value & 0x00fc3f) | (1 << 2); + break; + + case 0x300: /* CM_FCLKEN_GFX */ + s->clken[5] = value & 6; + /* TODO update clocks */ + break; + case 0x310: /* CM_ICLKEN_GFX */ + s->clken[6] = value & 1; + /* TODO update clocks */ + break; + case 0x340: /* CM_CLKSEL_GFX */ + s->clksel[3] = value & 7; + /* TODO update clocks */ + break; + case 0x348: /* CM_CLKSTCTRL_GFX */ + s->clkctrl[2] = value & 1; + break; + case 0x350: /* RM_RSTCTRL_GFX */ + s->rstctrl[0] = value & 1; + /* TODO: reset */ + break; + case 0x358: /* RM_RSTST_GFX */ + s->rst[1] &= ~value; + break; + case 0x3c8: /* PM_WKDEP_GFX */ + s->wkup[1] = value & 0x13; + break; + case 0x3e0: /* PM_PWSTCTRL_GFX */ + s->power[2] = (value & 0x00c0f) | (3 << 2); + break; + + case 0x400: /* CM_FCLKEN_WKUP */ + s->clken[7] = value & 0xd; + /* TODO update clocks */ + break; + case 0x410: /* CM_ICLKEN_WKUP */ + s->clken[8] = value & 0x3f; + /* TODO update clocks */ + break; + case 0x430: /* CM_AUTOIDLE_WKUP */ + s->clkidle[4] = value & 0x0000003f; + /* TODO update clocks */ + break; + case 0x440: /* CM_CLKSEL_WKUP */ + s->clksel[4] = value & 3; + /* TODO update clocks */ + break; + case 0x450: /* RM_RSTCTRL_WKUP */ + /* TODO: reset */ + if (value & 2) + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + break; + case 0x454: /* RM_RSTTIME_WKUP */ + s->rsttime_wkup = value & 0x1fff; + break; + case 0x458: /* RM_RSTST_WKUP */ + s->rst[2] &= ~value; + break; + case 0x4a0: /* PM_WKEN_WKUP */ + s->wken[2] = value & 0x00000005; + break; + case 0x4b0: /* PM_WKST_WKUP */ + s->wkst[2] &= ~value; + break; + + case 0x500: /* CM_CLKEN_PLL */ + if (value & 0xffffff30) + fprintf(stderr, "%s: write 0s in CM_CLKEN_PLL for " + "future compatibility\n", __func__); + if ((s->clken[9] ^ value) & 0xcc) { + s->clken[9] &= ~0xcc; + s->clken[9] |= value & 0xcc; + omap_prcm_apll_update(s); + } + if ((s->clken[9] ^ value) & 3) { + s->clken[9] &= ~3; + s->clken[9] |= value & 3; + omap_prcm_dpll_update(s); + } + break; + case 0x530: /* CM_AUTOIDLE_PLL */ + s->clkidle[5] = value & 0x000000cf; + /* TODO update clocks */ + break; + case 0x540: /* CM_CLKSEL1_PLL */ + if (value & 0xfc4000d7) + fprintf(stderr, "%s: write 0s in CM_CLKSEL1_PLL for " + "future compatibility\n", __func__); + if ((s->clksel[5] ^ value) & 0x003fff00) { + s->clksel[5] = value & 0x03bfff28; + omap_prcm_dpll_update(s); + } + /* TODO update the other clocks */ + + s->clksel[5] = value & 0x03bfff28; + break; + case 0x544: /* CM_CLKSEL2_PLL */ + if (value & ~3) + fprintf(stderr, "%s: write 0s in CM_CLKSEL2_PLL[31:2] for " + "future compatibility\n", __func__); + if (s->clksel[6] != (value & 3)) { + s->clksel[6] = value & 3; + omap_prcm_dpll_update(s); + } + break; + + case 0x800: /* CM_FCLKEN_DSP */ + s->clken[10] = value & 0x501; + /* TODO update clocks */ + break; + case 0x810: /* CM_ICLKEN_DSP */ + s->clken[11] = value & 0x2; + /* TODO update clocks */ + break; + case 0x830: /* CM_AUTOIDLE_DSP */ + s->clkidle[6] = value & 0x2; + /* TODO update clocks */ + break; + case 0x840: /* CM_CLKSEL_DSP */ + s->clksel[7] = value & 0x3fff; + /* TODO update clocks */ + break; + case 0x848: /* CM_CLKSTCTRL_DSP */ + s->clkctrl[3] = value & 0x101; + break; + case 0x850: /* RM_RSTCTRL_DSP */ + /* TODO: reset */ + break; + case 0x858: /* RM_RSTST_DSP */ + s->rst[3] &= ~value; + break; + case 0x8c8: /* PM_WKDEP_DSP */ + s->wkup[2] = value & 0x13; + break; + case 0x8e0: /* PM_PWSTCTRL_DSP */ + s->power[3] = (value & 0x03017) | (3 << 2); + break; + + case 0x8f0: /* PRCM_IRQSTATUS_DSP */ + s->irqst[1] &= ~value; + omap_prcm_int_update(s, 1); + break; + case 0x8f4: /* PRCM_IRQENABLE_DSP */ + s->irqen[1] = value & 0x7; + omap_prcm_int_update(s, 1); + break; + + case 0x8f8: /* PRCM_IRQSTATUS_IVA */ + s->irqst[2] &= ~value; + omap_prcm_int_update(s, 2); + break; + case 0x8fc: /* PRCM_IRQENABLE_IVA */ + s->irqen[2] = value & 0x7; + omap_prcm_int_update(s, 2); + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_prcm_ops = { + .read = omap_prcm_read, + .write = omap_prcm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_prcm_reset(struct omap_prcm_s *s) +{ + s->sysconfig = 0; + s->irqst[0] = 0; + s->irqst[1] = 0; + s->irqst[2] = 0; + s->irqen[0] = 0; + s->irqen[1] = 0; + s->irqen[2] = 0; + s->voltctrl = 0x1040; + s->ev = 0x14; + s->evtime[0] = 0; + s->evtime[1] = 0; + s->clkctrl[0] = 0; + s->clkctrl[1] = 0; + s->clkctrl[2] = 0; + s->clkctrl[3] = 0; + s->clken[1] = 7; + s->clken[3] = 7; + s->clken[4] = 0; + s->clken[5] = 0; + s->clken[6] = 0; + s->clken[7] = 0xc; + s->clken[8] = 0x3e; + s->clken[9] = 0x0d; + s->clken[10] = 0; + s->clken[11] = 0; + s->clkidle[0] = 0; + s->clkidle[2] = 7; + s->clkidle[3] = 0; + s->clkidle[4] = 0; + s->clkidle[5] = 0x0c; + s->clkidle[6] = 0; + s->clksel[0] = 0x01; + s->clksel[1] = 0x02100121; + s->clksel[2] = 0x00000000; + s->clksel[3] = 0x01; + s->clksel[4] = 0; + s->clksel[7] = 0x0121; + s->wkup[0] = 0x15; + s->wkup[1] = 0x13; + s->wkup[2] = 0x13; + s->wken[0] = 0x04667ff8; + s->wken[1] = 0x00000005; + s->wken[2] = 5; + s->wkst[0] = 0; + s->wkst[1] = 0; + s->wkst[2] = 0; + s->power[0] = 0x00c; + s->power[1] = 4; + s->power[2] = 0x0000c; + s->power[3] = 0x14; + s->rstctrl[0] = 1; + s->rst[3] = 1; + omap_prcm_apll_update(s); + omap_prcm_dpll_update(s); +} + +static void omap_prcm_coldreset(struct omap_prcm_s *s) +{ + s->setuptime[0] = 0; + s->setuptime[1] = 0; + memset(&s->scratch, 0, sizeof(s->scratch)); + s->rst[0] = 0x01; + s->rst[1] = 0x00; + s->rst[2] = 0x01; + s->clken[0] = 0; + s->clken[2] = 0; + s->clkidle[1] = 0; + s->clksel[5] = 0; + s->clksel[6] = 2; + s->clksrc[0] = 0x43; + s->clkout[0] = 0x0303; + s->clkemul[0] = 0; + s->clkpol[0] = 0x100; + s->rsttime_wkup = 0x1002; + + omap_prcm_reset(s); +} + +static struct omap_prcm_s *omap_prcm_init(struct omap_target_agent_s *ta, + qemu_irq mpu_int, qemu_irq dsp_int, qemu_irq iva_int, + struct omap_mpu_state_s *mpu) +{ + struct omap_prcm_s *s = g_new0(struct omap_prcm_s, 1); + + s->irq[0] = mpu_int; + s->irq[1] = dsp_int; + s->irq[2] = iva_int; + s->mpu = mpu; + omap_prcm_coldreset(s); + + memory_region_init_io(&s->iomem0, NULL, &omap_prcm_ops, s, "omap.pcrm0", + omap_l4_region_size(ta, 0)); + memory_region_init_io(&s->iomem1, NULL, &omap_prcm_ops, s, "omap.pcrm1", + omap_l4_region_size(ta, 1)); + omap_l4_attach(ta, 0, &s->iomem0); + omap_l4_attach(ta, 1, &s->iomem1); + + return s; +} + +/* System and Pinout control */ +struct omap_sysctl_s { + struct omap_mpu_state_s *mpu; + MemoryRegion iomem; + + uint32_t sysconfig; + uint32_t devconfig; + uint32_t psaconfig; + uint32_t padconf[0x45]; + uint8_t obs; + uint32_t msuspendmux[5]; +}; + +static uint32_t omap_sysctl_read8(void *opaque, hwaddr addr) +{ + + struct omap_sysctl_s *s = (struct omap_sysctl_s *) opaque; + int pad_offset, byte_offset; + int value; + + switch (addr) { + case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */ + pad_offset = (addr - 0x30) >> 2; + byte_offset = (addr - 0x30) & (4 - 1); + + value = s->padconf[pad_offset]; + value = (value >> (byte_offset * 8)) & 0xff; + + return value; + + default: + break; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static uint32_t omap_sysctl_read(void *opaque, hwaddr addr) +{ + struct omap_sysctl_s *s = (struct omap_sysctl_s *) opaque; + + switch (addr) { + case 0x000: /* CONTROL_REVISION */ + return 0x20; + + case 0x010: /* CONTROL_SYSCONFIG */ + return s->sysconfig; + + case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */ + return s->padconf[(addr - 0x30) >> 2]; + + case 0x270: /* CONTROL_DEBOBS */ + return s->obs; + + case 0x274: /* CONTROL_DEVCONF */ + return s->devconfig; + + case 0x28c: /* CONTROL_EMU_SUPPORT */ + return 0; + + case 0x290: /* CONTROL_MSUSPENDMUX_0 */ + return s->msuspendmux[0]; + case 0x294: /* CONTROL_MSUSPENDMUX_1 */ + return s->msuspendmux[1]; + case 0x298: /* CONTROL_MSUSPENDMUX_2 */ + return s->msuspendmux[2]; + case 0x29c: /* CONTROL_MSUSPENDMUX_3 */ + return s->msuspendmux[3]; + case 0x2a0: /* CONTROL_MSUSPENDMUX_4 */ + return s->msuspendmux[4]; + case 0x2a4: /* CONTROL_MSUSPENDMUX_5 */ + return 0; + + case 0x2b8: /* CONTROL_PSA_CTRL */ + return s->psaconfig; + case 0x2bc: /* CONTROL_PSA_CMD */ + case 0x2c0: /* CONTROL_PSA_VALUE */ + return 0; + + case 0x2b0: /* CONTROL_SEC_CTRL */ + return 0x800000f1; + case 0x2d0: /* CONTROL_SEC_EMU */ + return 0x80000015; + case 0x2d4: /* CONTROL_SEC_TAP */ + return 0x8000007f; + case 0x2b4: /* CONTROL_SEC_TEST */ + case 0x2f0: /* CONTROL_SEC_STATUS */ + case 0x2f4: /* CONTROL_SEC_ERR_STATUS */ + /* Secure mode is not present on general-pusrpose device. Outside + * secure mode these values cannot be read or written. */ + return 0; + + case 0x2d8: /* CONTROL_OCM_RAM_PERM */ + return 0xff; + case 0x2dc: /* CONTROL_OCM_PUB_RAM_ADD */ + case 0x2e0: /* CONTROL_EXT_SEC_RAM_START_ADD */ + case 0x2e4: /* CONTROL_EXT_SEC_RAM_STOP_ADD */ + /* No secure mode so no Extended Secure RAM present. */ + return 0; + + case 0x2f8: /* CONTROL_STATUS */ + /* Device Type => General-purpose */ + return 0x0300; + case 0x2fc: /* CONTROL_GENERAL_PURPOSE_STATUS */ + + case 0x300: /* CONTROL_RPUB_KEY_H_0 */ + case 0x304: /* CONTROL_RPUB_KEY_H_1 */ + case 0x308: /* CONTROL_RPUB_KEY_H_2 */ + case 0x30c: /* CONTROL_RPUB_KEY_H_3 */ + return 0xdecafbad; + + case 0x310: /* CONTROL_RAND_KEY_0 */ + case 0x314: /* CONTROL_RAND_KEY_1 */ + case 0x318: /* CONTROL_RAND_KEY_2 */ + case 0x31c: /* CONTROL_RAND_KEY_3 */ + case 0x320: /* CONTROL_CUST_KEY_0 */ + case 0x324: /* CONTROL_CUST_KEY_1 */ + case 0x330: /* CONTROL_TEST_KEY_0 */ + case 0x334: /* CONTROL_TEST_KEY_1 */ + case 0x338: /* CONTROL_TEST_KEY_2 */ + case 0x33c: /* CONTROL_TEST_KEY_3 */ + case 0x340: /* CONTROL_TEST_KEY_4 */ + case 0x344: /* CONTROL_TEST_KEY_5 */ + case 0x348: /* CONTROL_TEST_KEY_6 */ + case 0x34c: /* CONTROL_TEST_KEY_7 */ + case 0x350: /* CONTROL_TEST_KEY_8 */ + case 0x354: /* CONTROL_TEST_KEY_9 */ + /* Can only be accessed in secure mode and when C_FieldAccEnable + * bit is set in CONTROL_SEC_CTRL. + * TODO: otherwise an interconnect access error is generated. */ + return 0; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_sysctl_write8(void *opaque, hwaddr addr, + uint32_t value) +{ + struct omap_sysctl_s *s = (struct omap_sysctl_s *) opaque; + int pad_offset, byte_offset; + int prev_value; + + switch (addr) { + case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */ + pad_offset = (addr - 0x30) >> 2; + byte_offset = (addr - 0x30) & (4 - 1); + + prev_value = s->padconf[pad_offset]; + prev_value &= ~(0xff << (byte_offset * 8)); + prev_value |= ((value & 0x1f1f1f1f) << (byte_offset * 8)) & 0x1f1f1f1f; + s->padconf[pad_offset] = prev_value; + break; + + default: + OMAP_BAD_REG(addr); + break; + } +} + +static void omap_sysctl_write(void *opaque, hwaddr addr, + uint32_t value) +{ + struct omap_sysctl_s *s = (struct omap_sysctl_s *) opaque; + + switch (addr) { + case 0x000: /* CONTROL_REVISION */ + case 0x2a4: /* CONTROL_MSUSPENDMUX_5 */ + case 0x2c0: /* CONTROL_PSA_VALUE */ + case 0x2f8: /* CONTROL_STATUS */ + case 0x2fc: /* CONTROL_GENERAL_PURPOSE_STATUS */ + case 0x300: /* CONTROL_RPUB_KEY_H_0 */ + case 0x304: /* CONTROL_RPUB_KEY_H_1 */ + case 0x308: /* CONTROL_RPUB_KEY_H_2 */ + case 0x30c: /* CONTROL_RPUB_KEY_H_3 */ + case 0x310: /* CONTROL_RAND_KEY_0 */ + case 0x314: /* CONTROL_RAND_KEY_1 */ + case 0x318: /* CONTROL_RAND_KEY_2 */ + case 0x31c: /* CONTROL_RAND_KEY_3 */ + case 0x320: /* CONTROL_CUST_KEY_0 */ + case 0x324: /* CONTROL_CUST_KEY_1 */ + case 0x330: /* CONTROL_TEST_KEY_0 */ + case 0x334: /* CONTROL_TEST_KEY_1 */ + case 0x338: /* CONTROL_TEST_KEY_2 */ + case 0x33c: /* CONTROL_TEST_KEY_3 */ + case 0x340: /* CONTROL_TEST_KEY_4 */ + case 0x344: /* CONTROL_TEST_KEY_5 */ + case 0x348: /* CONTROL_TEST_KEY_6 */ + case 0x34c: /* CONTROL_TEST_KEY_7 */ + case 0x350: /* CONTROL_TEST_KEY_8 */ + case 0x354: /* CONTROL_TEST_KEY_9 */ + OMAP_RO_REG(addr); + return; + + case 0x010: /* CONTROL_SYSCONFIG */ + s->sysconfig = value & 0x1e; + break; + + case 0x030 ... 0x140: /* CONTROL_PADCONF - only used in the POP */ + /* XXX: should check constant bits */ + s->padconf[(addr - 0x30) >> 2] = value & 0x1f1f1f1f; + break; + + case 0x270: /* CONTROL_DEBOBS */ + s->obs = value & 0xff; + break; + + case 0x274: /* CONTROL_DEVCONF */ + s->devconfig = value & 0xffffc7ff; + break; + + case 0x28c: /* CONTROL_EMU_SUPPORT */ + break; + + case 0x290: /* CONTROL_MSUSPENDMUX_0 */ + s->msuspendmux[0] = value & 0x3fffffff; + break; + case 0x294: /* CONTROL_MSUSPENDMUX_1 */ + s->msuspendmux[1] = value & 0x3fffffff; + break; + case 0x298: /* CONTROL_MSUSPENDMUX_2 */ + s->msuspendmux[2] = value & 0x3fffffff; + break; + case 0x29c: /* CONTROL_MSUSPENDMUX_3 */ + s->msuspendmux[3] = value & 0x3fffffff; + break; + case 0x2a0: /* CONTROL_MSUSPENDMUX_4 */ + s->msuspendmux[4] = value & 0x3fffffff; + break; + + case 0x2b8: /* CONTROL_PSA_CTRL */ + s->psaconfig = value & 0x1c; + s->psaconfig |= (value & 0x20) ? 2 : 1; + break; + case 0x2bc: /* CONTROL_PSA_CMD */ + break; + + case 0x2b0: /* CONTROL_SEC_CTRL */ + case 0x2b4: /* CONTROL_SEC_TEST */ + case 0x2d0: /* CONTROL_SEC_EMU */ + case 0x2d4: /* CONTROL_SEC_TAP */ + case 0x2d8: /* CONTROL_OCM_RAM_PERM */ + case 0x2dc: /* CONTROL_OCM_PUB_RAM_ADD */ + case 0x2e0: /* CONTROL_EXT_SEC_RAM_START_ADD */ + case 0x2e4: /* CONTROL_EXT_SEC_RAM_STOP_ADD */ + case 0x2f0: /* CONTROL_SEC_STATUS */ + case 0x2f4: /* CONTROL_SEC_ERR_STATUS */ + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static uint64_t omap_sysctl_readfn(void *opaque, hwaddr addr, + unsigned size) +{ + switch (size) { + case 1: + return omap_sysctl_read8(opaque, addr); + case 2: + return omap_badwidth_read32(opaque, addr); /* TODO */ + case 4: + return omap_sysctl_read(opaque, addr); + default: + g_assert_not_reached(); + } +} + +static void omap_sysctl_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + switch (size) { + case 1: + omap_sysctl_write8(opaque, addr, value); + break; + case 2: + omap_badwidth_write32(opaque, addr, value); /* TODO */ + break; + case 4: + omap_sysctl_write(opaque, addr, value); + break; + default: + g_assert_not_reached(); + } +} + +static const MemoryRegionOps omap_sysctl_ops = { + .read = omap_sysctl_readfn, + .write = omap_sysctl_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_sysctl_reset(struct omap_sysctl_s *s) +{ + /* (power-on reset) */ + s->sysconfig = 0; + s->obs = 0; + s->devconfig = 0x0c000000; + s->msuspendmux[0] = 0x00000000; + s->msuspendmux[1] = 0x00000000; + s->msuspendmux[2] = 0x00000000; + s->msuspendmux[3] = 0x00000000; + s->msuspendmux[4] = 0x00000000; + s->psaconfig = 1; + + s->padconf[0x00] = 0x000f0f0f; + s->padconf[0x01] = 0x00000000; + s->padconf[0x02] = 0x00000000; + s->padconf[0x03] = 0x00000000; + s->padconf[0x04] = 0x00000000; + s->padconf[0x05] = 0x00000000; + s->padconf[0x06] = 0x00000000; + s->padconf[0x07] = 0x00000000; + s->padconf[0x08] = 0x08080800; + s->padconf[0x09] = 0x08080808; + s->padconf[0x0a] = 0x08080808; + s->padconf[0x0b] = 0x08080808; + s->padconf[0x0c] = 0x08080808; + s->padconf[0x0d] = 0x08080800; + s->padconf[0x0e] = 0x08080808; + s->padconf[0x0f] = 0x08080808; + s->padconf[0x10] = 0x18181808; /* | 0x07070700 if SBoot3 */ + s->padconf[0x11] = 0x18181818; /* | 0x07070707 if SBoot3 */ + s->padconf[0x12] = 0x18181818; /* | 0x07070707 if SBoot3 */ + s->padconf[0x13] = 0x18181818; /* | 0x07070707 if SBoot3 */ + s->padconf[0x14] = 0x18181818; /* | 0x00070707 if SBoot3 */ + s->padconf[0x15] = 0x18181818; + s->padconf[0x16] = 0x18181818; /* | 0x07000000 if SBoot3 */ + s->padconf[0x17] = 0x1f001f00; + s->padconf[0x18] = 0x1f1f1f1f; + s->padconf[0x19] = 0x00000000; + s->padconf[0x1a] = 0x1f180000; + s->padconf[0x1b] = 0x00001f1f; + s->padconf[0x1c] = 0x1f001f00; + s->padconf[0x1d] = 0x00000000; + s->padconf[0x1e] = 0x00000000; + s->padconf[0x1f] = 0x08000000; + s->padconf[0x20] = 0x08080808; + s->padconf[0x21] = 0x08080808; + s->padconf[0x22] = 0x0f080808; + s->padconf[0x23] = 0x0f0f0f0f; + s->padconf[0x24] = 0x000f0f0f; + s->padconf[0x25] = 0x1f1f1f0f; + s->padconf[0x26] = 0x080f0f1f; + s->padconf[0x27] = 0x070f1808; + s->padconf[0x28] = 0x0f070707; + s->padconf[0x29] = 0x000f0f1f; + s->padconf[0x2a] = 0x0f0f0f1f; + s->padconf[0x2b] = 0x08000000; + s->padconf[0x2c] = 0x0000001f; + s->padconf[0x2d] = 0x0f0f1f00; + s->padconf[0x2e] = 0x1f1f0f0f; + s->padconf[0x2f] = 0x0f1f1f1f; + s->padconf[0x30] = 0x0f0f0f0f; + s->padconf[0x31] = 0x0f1f0f1f; + s->padconf[0x32] = 0x0f0f0f0f; + s->padconf[0x33] = 0x0f1f0f1f; + s->padconf[0x34] = 0x1f1f0f0f; + s->padconf[0x35] = 0x0f0f1f1f; + s->padconf[0x36] = 0x0f0f1f0f; + s->padconf[0x37] = 0x0f0f0f0f; + s->padconf[0x38] = 0x1f18180f; + s->padconf[0x39] = 0x1f1f1f1f; + s->padconf[0x3a] = 0x00001f1f; + s->padconf[0x3b] = 0x00000000; + s->padconf[0x3c] = 0x00000000; + s->padconf[0x3d] = 0x0f0f0f0f; + s->padconf[0x3e] = 0x18000f0f; + s->padconf[0x3f] = 0x00070000; + s->padconf[0x40] = 0x00000707; + s->padconf[0x41] = 0x0f1f0700; + s->padconf[0x42] = 0x1f1f070f; + s->padconf[0x43] = 0x0008081f; + s->padconf[0x44] = 0x00000800; +} + +static struct omap_sysctl_s *omap_sysctl_init(struct omap_target_agent_s *ta, + omap_clk iclk, struct omap_mpu_state_s *mpu) +{ + struct omap_sysctl_s *s = g_new0(struct omap_sysctl_s, 1); + + s->mpu = mpu; + omap_sysctl_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_sysctl_ops, s, "omap.sysctl", + omap_l4_region_size(ta, 0)); + omap_l4_attach(ta, 0, &s->iomem); + + return s; +} + +/* General chip reset */ +static void omap2_mpu_reset(void *opaque) +{ + struct omap_mpu_state_s *mpu = (struct omap_mpu_state_s *) opaque; + + omap_dma_reset(mpu->dma); + omap_prcm_reset(mpu->prcm); + omap_sysctl_reset(mpu->sysc); + omap_gp_timer_reset(mpu->gptimer[0]); + omap_gp_timer_reset(mpu->gptimer[1]); + omap_gp_timer_reset(mpu->gptimer[2]); + omap_gp_timer_reset(mpu->gptimer[3]); + omap_gp_timer_reset(mpu->gptimer[4]); + omap_gp_timer_reset(mpu->gptimer[5]); + omap_gp_timer_reset(mpu->gptimer[6]); + omap_gp_timer_reset(mpu->gptimer[7]); + omap_gp_timer_reset(mpu->gptimer[8]); + omap_gp_timer_reset(mpu->gptimer[9]); + omap_gp_timer_reset(mpu->gptimer[10]); + omap_gp_timer_reset(mpu->gptimer[11]); + omap_synctimer_reset(mpu->synctimer); + omap_sdrc_reset(mpu->sdrc); + omap_gpmc_reset(mpu->gpmc); + omap_dss_reset(mpu->dss); + omap_uart_reset(mpu->uart[0]); + omap_uart_reset(mpu->uart[1]); + omap_uart_reset(mpu->uart[2]); + omap_mmc_reset(mpu->mmc); + omap_mcspi_reset(mpu->mcspi[0]); + omap_mcspi_reset(mpu->mcspi[1]); + cpu_reset(CPU(mpu->cpu)); +} + +static int omap2_validate_addr(struct omap_mpu_state_s *s, + hwaddr addr) +{ + return 1; +} + +static const struct dma_irq_map omap2_dma_irq_map[] = { + { 0, OMAP_INT_24XX_SDMA_IRQ0 }, + { 0, OMAP_INT_24XX_SDMA_IRQ1 }, + { 0, OMAP_INT_24XX_SDMA_IRQ2 }, + { 0, OMAP_INT_24XX_SDMA_IRQ3 }, +}; + +struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sdram, + const char *cpu_type) +{ + struct omap_mpu_state_s *s = g_new0(struct omap_mpu_state_s, 1); + qemu_irq dma_irqs[4]; + DriveInfo *dinfo; + int i; + SysBusDevice *busdev; + struct omap_target_agent_s *ta; + MemoryRegion *sysmem = get_system_memory(); + + /* Core */ + s->mpu_model = omap2420; + s->cpu = ARM_CPU(cpu_create(cpu_type)); + s->sram_size = OMAP242X_SRAM_SIZE; + + s->wakeup = qemu_allocate_irq(omap_mpu_wakeup, s, 0); + + /* Clocks */ + omap_clk_init(s); + + /* Memory-mapped stuff */ + memory_region_init_ram(&s->sram, NULL, "omap2.sram", s->sram_size, + &error_fatal); + memory_region_add_subregion(sysmem, OMAP2_SRAM_BASE, &s->sram); + + s->l4 = omap_l4_init(sysmem, OMAP2_L4_BASE, 54); + + /* Actually mapped at any 2K boundary in the ARM11 private-peripheral if */ + s->ih[0] = qdev_new("omap2-intc"); + qdev_prop_set_uint8(s->ih[0], "revision", 0x21); + omap_intc_set_fclk(OMAP_INTC(s->ih[0]), omap_findclk(s, "mpu_intc_fclk")); + omap_intc_set_iclk(OMAP_INTC(s->ih[0]), omap_findclk(s, "mpu_intc_iclk")); + busdev = SYS_BUS_DEVICE(s->ih[0]); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ)); + sysbus_connect_irq(busdev, 1, + qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ)); + sysbus_mmio_map(busdev, 0, 0x480fe000); + s->prcm = omap_prcm_init(omap_l4tao(s->l4, 3), + qdev_get_gpio_in(s->ih[0], + OMAP_INT_24XX_PRCM_MPU_IRQ), + NULL, NULL, s); + + s->sysc = omap_sysctl_init(omap_l4tao(s->l4, 1), + omap_findclk(s, "omapctrl_iclk"), s); + + for (i = 0; i < 4; i++) { + dma_irqs[i] = qdev_get_gpio_in(s->ih[omap2_dma_irq_map[i].ih], + omap2_dma_irq_map[i].intr); + } + s->dma = omap_dma4_init(0x48056000, dma_irqs, sysmem, s, 256, 32, + omap_findclk(s, "sdma_iclk"), + omap_findclk(s, "sdma_fclk")); + s->port->addr_valid = omap2_validate_addr; + + /* Register SDRAM and SRAM ports for fast DMA transfers. */ + soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(sdram), + OMAP2_Q2_BASE, memory_region_size(sdram)); + soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(&s->sram), + OMAP2_SRAM_BASE, s->sram_size); + + s->uart[0] = omap2_uart_init(sysmem, omap_l4ta(s->l4, 19), + qdev_get_gpio_in(s->ih[0], + OMAP_INT_24XX_UART1_IRQ), + omap_findclk(s, "uart1_fclk"), + omap_findclk(s, "uart1_iclk"), + s->drq[OMAP24XX_DMA_UART1_TX], + s->drq[OMAP24XX_DMA_UART1_RX], + "uart1", + serial_hd(0)); + s->uart[1] = omap2_uart_init(sysmem, omap_l4ta(s->l4, 20), + qdev_get_gpio_in(s->ih[0], + OMAP_INT_24XX_UART2_IRQ), + omap_findclk(s, "uart2_fclk"), + omap_findclk(s, "uart2_iclk"), + s->drq[OMAP24XX_DMA_UART2_TX], + s->drq[OMAP24XX_DMA_UART2_RX], + "uart2", + serial_hd(0) ? serial_hd(1) : NULL); + s->uart[2] = omap2_uart_init(sysmem, omap_l4ta(s->l4, 21), + qdev_get_gpio_in(s->ih[0], + OMAP_INT_24XX_UART3_IRQ), + omap_findclk(s, "uart3_fclk"), + omap_findclk(s, "uart3_iclk"), + s->drq[OMAP24XX_DMA_UART3_TX], + s->drq[OMAP24XX_DMA_UART3_RX], + "uart3", + serial_hd(0) && serial_hd(1) ? serial_hd(2) : NULL); + + s->gptimer[0] = omap_gp_timer_init(omap_l4ta(s->l4, 7), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER1), + omap_findclk(s, "wu_gpt1_clk"), + omap_findclk(s, "wu_l4_iclk")); + s->gptimer[1] = omap_gp_timer_init(omap_l4ta(s->l4, 8), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER2), + omap_findclk(s, "core_gpt2_clk"), + omap_findclk(s, "core_l4_iclk")); + s->gptimer[2] = omap_gp_timer_init(omap_l4ta(s->l4, 22), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER3), + omap_findclk(s, "core_gpt3_clk"), + omap_findclk(s, "core_l4_iclk")); + s->gptimer[3] = omap_gp_timer_init(omap_l4ta(s->l4, 23), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER4), + omap_findclk(s, "core_gpt4_clk"), + omap_findclk(s, "core_l4_iclk")); + s->gptimer[4] = omap_gp_timer_init(omap_l4ta(s->l4, 24), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER5), + omap_findclk(s, "core_gpt5_clk"), + omap_findclk(s, "core_l4_iclk")); + s->gptimer[5] = omap_gp_timer_init(omap_l4ta(s->l4, 25), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER6), + omap_findclk(s, "core_gpt6_clk"), + omap_findclk(s, "core_l4_iclk")); + s->gptimer[6] = omap_gp_timer_init(omap_l4ta(s->l4, 26), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER7), + omap_findclk(s, "core_gpt7_clk"), + omap_findclk(s, "core_l4_iclk")); + s->gptimer[7] = omap_gp_timer_init(omap_l4ta(s->l4, 27), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER8), + omap_findclk(s, "core_gpt8_clk"), + omap_findclk(s, "core_l4_iclk")); + s->gptimer[8] = omap_gp_timer_init(omap_l4ta(s->l4, 28), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER9), + omap_findclk(s, "core_gpt9_clk"), + omap_findclk(s, "core_l4_iclk")); + s->gptimer[9] = omap_gp_timer_init(omap_l4ta(s->l4, 29), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER10), + omap_findclk(s, "core_gpt10_clk"), + omap_findclk(s, "core_l4_iclk")); + s->gptimer[10] = omap_gp_timer_init(omap_l4ta(s->l4, 30), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER11), + omap_findclk(s, "core_gpt11_clk"), + omap_findclk(s, "core_l4_iclk")); + s->gptimer[11] = omap_gp_timer_init(omap_l4ta(s->l4, 31), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPTIMER12), + omap_findclk(s, "core_gpt12_clk"), + omap_findclk(s, "core_l4_iclk")); + + omap_tap_init(omap_l4ta(s->l4, 2), s); + + s->synctimer = omap_synctimer_init(omap_l4tao(s->l4, 2), s, + omap_findclk(s, "clk32-kHz"), + omap_findclk(s, "core_l4_iclk")); + + s->i2c[0] = qdev_new("omap_i2c"); + qdev_prop_set_uint8(s->i2c[0], "revision", 0x34); + omap_i2c_set_iclk(OMAP_I2C(s->i2c[0]), omap_findclk(s, "i2c1.iclk")); + omap_i2c_set_fclk(OMAP_I2C(s->i2c[0]), omap_findclk(s, "i2c1.fclk")); + busdev = SYS_BUS_DEVICE(s->i2c[0]); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_I2C1_IRQ)); + sysbus_connect_irq(busdev, 1, s->drq[OMAP24XX_DMA_I2C1_TX]); + sysbus_connect_irq(busdev, 2, s->drq[OMAP24XX_DMA_I2C1_RX]); + sysbus_mmio_map(busdev, 0, omap_l4_region_base(omap_l4tao(s->l4, 5), 0)); + + s->i2c[1] = qdev_new("omap_i2c"); + qdev_prop_set_uint8(s->i2c[1], "revision", 0x34); + omap_i2c_set_iclk(OMAP_I2C(s->i2c[1]), omap_findclk(s, "i2c2.iclk")); + omap_i2c_set_fclk(OMAP_I2C(s->i2c[1]), omap_findclk(s, "i2c2.fclk")); + busdev = SYS_BUS_DEVICE(s->i2c[1]); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_I2C2_IRQ)); + sysbus_connect_irq(busdev, 1, s->drq[OMAP24XX_DMA_I2C2_TX]); + sysbus_connect_irq(busdev, 2, s->drq[OMAP24XX_DMA_I2C2_RX]); + sysbus_mmio_map(busdev, 0, omap_l4_region_base(omap_l4tao(s->l4, 6), 0)); + + s->gpio = qdev_new("omap2-gpio"); + qdev_prop_set_int32(s->gpio, "mpu_model", s->mpu_model); + omap2_gpio_set_iclk(OMAP2_GPIO(s->gpio), omap_findclk(s, "gpio_iclk")); + omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 0, omap_findclk(s, "gpio1_dbclk")); + omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 1, omap_findclk(s, "gpio2_dbclk")); + omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 2, omap_findclk(s, "gpio3_dbclk")); + omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 3, omap_findclk(s, "gpio4_dbclk")); + if (s->mpu_model == omap2430) { + omap2_gpio_set_fclk(OMAP2_GPIO(s->gpio), 4, + omap_findclk(s, "gpio5_dbclk")); + } + busdev = SYS_BUS_DEVICE(s->gpio); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK1)); + sysbus_connect_irq(busdev, 3, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK2)); + sysbus_connect_irq(busdev, 6, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK3)); + sysbus_connect_irq(busdev, 9, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPIO_BANK4)); + if (s->mpu_model == omap2430) { + sysbus_connect_irq(busdev, 12, + qdev_get_gpio_in(s->ih[0], + OMAP_INT_243X_GPIO_BANK5)); + } + ta = omap_l4ta(s->l4, 3); + sysbus_mmio_map(busdev, 0, omap_l4_region_base(ta, 1)); + sysbus_mmio_map(busdev, 1, omap_l4_region_base(ta, 0)); + sysbus_mmio_map(busdev, 2, omap_l4_region_base(ta, 2)); + sysbus_mmio_map(busdev, 3, omap_l4_region_base(ta, 4)); + sysbus_mmio_map(busdev, 4, omap_l4_region_base(ta, 5)); + + s->sdrc = omap_sdrc_init(sysmem, 0x68009000); + s->gpmc = omap_gpmc_init(s, 0x6800a000, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_GPMC_IRQ), + s->drq[OMAP24XX_DMA_GPMC]); + + dinfo = drive_get(IF_SD, 0, 0); + if (!dinfo && !qtest_enabled()) { + warn_report("missing SecureDigital device"); + } + s->mmc = omap2_mmc_init(omap_l4tao(s->l4, 9), + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_MMC_IRQ), + &s->drq[OMAP24XX_DMA_MMC1_TX], + omap_findclk(s, "mmc_fclk"), omap_findclk(s, "mmc_iclk")); + + s->mcspi[0] = omap_mcspi_init(omap_l4ta(s->l4, 35), 4, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_MCSPI1_IRQ), + &s->drq[OMAP24XX_DMA_SPI1_TX0], + omap_findclk(s, "spi1_fclk"), + omap_findclk(s, "spi1_iclk")); + s->mcspi[1] = omap_mcspi_init(omap_l4ta(s->l4, 36), 2, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_MCSPI2_IRQ), + &s->drq[OMAP24XX_DMA_SPI2_TX0], + omap_findclk(s, "spi2_fclk"), + omap_findclk(s, "spi2_iclk")); + + s->dss = omap_dss_init(omap_l4ta(s->l4, 10), sysmem, 0x68000800, + /* XXX wire M_IRQ_25, D_L2_IRQ_30 and I_IRQ_13 together */ + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_DSS_IRQ), + s->drq[OMAP24XX_DMA_DSS], + omap_findclk(s, "dss_clk1"), omap_findclk(s, "dss_clk2"), + omap_findclk(s, "dss_54m_clk"), + omap_findclk(s, "dss_l3_iclk"), + omap_findclk(s, "dss_l4_iclk")); + + omap_sti_init(omap_l4ta(s->l4, 18), sysmem, 0x54000000, + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_STI), + omap_findclk(s, "emul_ck"), + serial_hd(0) && serial_hd(1) && serial_hd(2) ? + serial_hd(3) : NULL); + + s->eac = omap_eac_init(omap_l4ta(s->l4, 32), + qdev_get_gpio_in(s->ih[0], OMAP_INT_24XX_EAC_IRQ), + /* Ten consecutive lines */ + &s->drq[OMAP24XX_DMA_EAC_AC_RD], + omap_findclk(s, "func_96m_clk"), + omap_findclk(s, "core_l4_iclk")); + + /* All register mappings (includin those not currenlty implemented): + * SystemControlMod 48000000 - 48000fff + * SystemControlL4 48001000 - 48001fff + * 32kHz Timer Mod 48004000 - 48004fff + * 32kHz Timer L4 48005000 - 48005fff + * PRCM ModA 48008000 - 480087ff + * PRCM ModB 48008800 - 48008fff + * PRCM L4 48009000 - 48009fff + * TEST-BCM Mod 48012000 - 48012fff + * TEST-BCM L4 48013000 - 48013fff + * TEST-TAP Mod 48014000 - 48014fff + * TEST-TAP L4 48015000 - 48015fff + * GPIO1 Mod 48018000 - 48018fff + * GPIO Top 48019000 - 48019fff + * GPIO2 Mod 4801a000 - 4801afff + * GPIO L4 4801b000 - 4801bfff + * GPIO3 Mod 4801c000 - 4801cfff + * GPIO4 Mod 4801e000 - 4801efff + * WDTIMER1 Mod 48020000 - 48010fff + * WDTIMER Top 48021000 - 48011fff + * WDTIMER2 Mod 48022000 - 48012fff + * WDTIMER L4 48023000 - 48013fff + * WDTIMER3 Mod 48024000 - 48014fff + * WDTIMER3 L4 48025000 - 48015fff + * WDTIMER4 Mod 48026000 - 48016fff + * WDTIMER4 L4 48027000 - 48017fff + * GPTIMER1 Mod 48028000 - 48018fff + * GPTIMER1 L4 48029000 - 48019fff + * GPTIMER2 Mod 4802a000 - 4801afff + * GPTIMER2 L4 4802b000 - 4801bfff + * L4-Config AP 48040000 - 480407ff + * L4-Config IP 48040800 - 48040fff + * L4-Config LA 48041000 - 48041fff + * ARM11ETB Mod 48048000 - 48049fff + * ARM11ETB L4 4804a000 - 4804afff + * DISPLAY Top 48050000 - 480503ff + * DISPLAY DISPC 48050400 - 480507ff + * DISPLAY RFBI 48050800 - 48050bff + * DISPLAY VENC 48050c00 - 48050fff + * DISPLAY L4 48051000 - 48051fff + * CAMERA Top 48052000 - 480523ff + * CAMERA core 48052400 - 480527ff + * CAMERA DMA 48052800 - 48052bff + * CAMERA MMU 48052c00 - 48052fff + * CAMERA L4 48053000 - 48053fff + * SDMA Mod 48056000 - 48056fff + * SDMA L4 48057000 - 48057fff + * SSI Top 48058000 - 48058fff + * SSI GDD 48059000 - 48059fff + * SSI Port1 4805a000 - 4805afff + * SSI Port2 4805b000 - 4805bfff + * SSI L4 4805c000 - 4805cfff + * USB Mod 4805e000 - 480fefff + * USB L4 4805f000 - 480fffff + * WIN_TRACER1 Mod 48060000 - 48060fff + * WIN_TRACER1 L4 48061000 - 48061fff + * WIN_TRACER2 Mod 48062000 - 48062fff + * WIN_TRACER2 L4 48063000 - 48063fff + * WIN_TRACER3 Mod 48064000 - 48064fff + * WIN_TRACER3 L4 48065000 - 48065fff + * WIN_TRACER4 Top 48066000 - 480660ff + * WIN_TRACER4 ETT 48066100 - 480661ff + * WIN_TRACER4 WT 48066200 - 480662ff + * WIN_TRACER4 L4 48067000 - 48067fff + * XTI Mod 48068000 - 48068fff + * XTI L4 48069000 - 48069fff + * UART1 Mod 4806a000 - 4806afff + * UART1 L4 4806b000 - 4806bfff + * UART2 Mod 4806c000 - 4806cfff + * UART2 L4 4806d000 - 4806dfff + * UART3 Mod 4806e000 - 4806efff + * UART3 L4 4806f000 - 4806ffff + * I2C1 Mod 48070000 - 48070fff + * I2C1 L4 48071000 - 48071fff + * I2C2 Mod 48072000 - 48072fff + * I2C2 L4 48073000 - 48073fff + * McBSP1 Mod 48074000 - 48074fff + * McBSP1 L4 48075000 - 48075fff + * McBSP2 Mod 48076000 - 48076fff + * McBSP2 L4 48077000 - 48077fff + * GPTIMER3 Mod 48078000 - 48078fff + * GPTIMER3 L4 48079000 - 48079fff + * GPTIMER4 Mod 4807a000 - 4807afff + * GPTIMER4 L4 4807b000 - 4807bfff + * GPTIMER5 Mod 4807c000 - 4807cfff + * GPTIMER5 L4 4807d000 - 4807dfff + * GPTIMER6 Mod 4807e000 - 4807efff + * GPTIMER6 L4 4807f000 - 4807ffff + * GPTIMER7 Mod 48080000 - 48080fff + * GPTIMER7 L4 48081000 - 48081fff + * GPTIMER8 Mod 48082000 - 48082fff + * GPTIMER8 L4 48083000 - 48083fff + * GPTIMER9 Mod 48084000 - 48084fff + * GPTIMER9 L4 48085000 - 48085fff + * GPTIMER10 Mod 48086000 - 48086fff + * GPTIMER10 L4 48087000 - 48087fff + * GPTIMER11 Mod 48088000 - 48088fff + * GPTIMER11 L4 48089000 - 48089fff + * GPTIMER12 Mod 4808a000 - 4808afff + * GPTIMER12 L4 4808b000 - 4808bfff + * EAC Mod 48090000 - 48090fff + * EAC L4 48091000 - 48091fff + * FAC Mod 48092000 - 48092fff + * FAC L4 48093000 - 48093fff + * MAILBOX Mod 48094000 - 48094fff + * MAILBOX L4 48095000 - 48095fff + * SPI1 Mod 48098000 - 48098fff + * SPI1 L4 48099000 - 48099fff + * SPI2 Mod 4809a000 - 4809afff + * SPI2 L4 4809b000 - 4809bfff + * MMC/SDIO Mod 4809c000 - 4809cfff + * MMC/SDIO L4 4809d000 - 4809dfff + * MS_PRO Mod 4809e000 - 4809efff + * MS_PRO L4 4809f000 - 4809ffff + * RNG Mod 480a0000 - 480a0fff + * RNG L4 480a1000 - 480a1fff + * DES3DES Mod 480a2000 - 480a2fff + * DES3DES L4 480a3000 - 480a3fff + * SHA1MD5 Mod 480a4000 - 480a4fff + * SHA1MD5 L4 480a5000 - 480a5fff + * AES Mod 480a6000 - 480a6fff + * AES L4 480a7000 - 480a7fff + * PKA Mod 480a8000 - 480a9fff + * PKA L4 480aa000 - 480aafff + * MG Mod 480b0000 - 480b0fff + * MG L4 480b1000 - 480b1fff + * HDQ/1-wire Mod 480b2000 - 480b2fff + * HDQ/1-wire L4 480b3000 - 480b3fff + * MPU interrupt 480fe000 - 480fefff + * STI channel base 54000000 - 5400ffff + * IVA RAM 5c000000 - 5c01ffff + * IVA ROM 5c020000 - 5c027fff + * IMG_BUF_A 5c040000 - 5c040fff + * IMG_BUF_B 5c042000 - 5c042fff + * VLCDS 5c048000 - 5c0487ff + * IMX_COEF 5c049000 - 5c04afff + * IMX_CMD 5c051000 - 5c051fff + * VLCDQ 5c053000 - 5c0533ff + * VLCDH 5c054000 - 5c054fff + * SEQ_CMD 5c055000 - 5c055fff + * IMX_REG 5c056000 - 5c0560ff + * VLCD_REG 5c056100 - 5c0561ff + * SEQ_REG 5c056200 - 5c0562ff + * IMG_BUF_REG 5c056300 - 5c0563ff + * SEQIRQ_REG 5c056400 - 5c0564ff + * OCP_REG 5c060000 - 5c060fff + * SYSC_REG 5c070000 - 5c070fff + * MMU_REG 5d000000 - 5d000fff + * sDMA R 68000400 - 680005ff + * sDMA W 68000600 - 680007ff + * Display Control 68000800 - 680009ff + * DSP subsystem 68000a00 - 68000bff + * MPU subsystem 68000c00 - 68000dff + * IVA subsystem 68001000 - 680011ff + * USB 68001200 - 680013ff + * Camera 68001400 - 680015ff + * VLYNQ (firewall) 68001800 - 68001bff + * VLYNQ 68001e00 - 68001fff + * SSI 68002000 - 680021ff + * L4 68002400 - 680025ff + * DSP (firewall) 68002800 - 68002bff + * DSP subsystem 68002e00 - 68002fff + * IVA (firewall) 68003000 - 680033ff + * IVA 68003600 - 680037ff + * GFX 68003a00 - 68003bff + * CMDWR emulation 68003c00 - 68003dff + * SMS 68004000 - 680041ff + * OCM 68004200 - 680043ff + * GPMC 68004400 - 680045ff + * RAM (firewall) 68005000 - 680053ff + * RAM (err login) 68005400 - 680057ff + * ROM (firewall) 68005800 - 68005bff + * ROM (err login) 68005c00 - 68005fff + * GPMC (firewall) 68006000 - 680063ff + * GPMC (err login) 68006400 - 680067ff + * SMS (err login) 68006c00 - 68006fff + * SMS registers 68008000 - 68008fff + * SDRC registers 68009000 - 68009fff + * GPMC registers 6800a000 6800afff + */ + + qemu_register_reset(omap2_mpu_reset, s); + + return s; +} diff --git a/hw/arm/omap_sx1.c b/hw/arm/omap_sx1.c new file mode 100644 index 000000000..57829b374 --- /dev/null +++ b/hw/arm/omap_sx1.c @@ -0,0 +1,257 @@ +/* omap_sx1.c Support for the Siemens SX1 smartphone emulation. + * + * Copyright (C) 2008 + * Jean-Christophe PLAGNIOL-VILLARD + * Copyright (C) 2007 Vladimir Ananiev + * + * based on PalmOne's (TM) PDAs support (palm.c) + */ + +/* + * PalmOne's (TM) PDAs. + * + * Copyright (C) 2006-2007 Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "ui/console.h" +#include "hw/arm/omap.h" +#include "hw/boards.h" +#include "hw/arm/boot.h" +#include "hw/block/flash.h" +#include "sysemu/qtest.h" +#include "exec/address-spaces.h" +#include "cpu.h" +#include "qemu/cutils.h" + +/*****************************************************************************/ +/* Siemens SX1 Cellphone V1 */ +/* - ARM OMAP310 processor + * - SRAM 192 kB + * - SDRAM 32 MB at 0x10000000 + * - Boot flash 16 MB at 0x00000000 + * - Application flash 8 MB at 0x04000000 + * - 3 serial ports + * - 1 SecureDigital + * - 1 LCD display + * - 1 RTC + */ + +/*****************************************************************************/ +/* Siemens SX1 Cellphone V2 */ +/* - ARM OMAP310 processor + * - SRAM 192 kB + * - SDRAM 32 MB at 0x10000000 + * - Boot flash 32 MB at 0x00000000 + * - 3 serial ports + * - 1 SecureDigital + * - 1 LCD display + * - 1 RTC + */ + +static uint64_t static_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t *val = (uint32_t *) opaque; + uint32_t mask = (4 / size) - 1; + + return *val >> ((offset & mask) << 3); +} + +static void static_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ +#ifdef SPY + printf("%s: value %" PRIx64 " %u bytes written at 0x%x\n", + __func__, value, size, (int)offset); +#endif +} + +static const MemoryRegionOps static_ops = { + .read = static_read, + .write = static_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +#define sdram_size 0x02000000 +#define sector_size (128 * 1024) +#define flash0_size (16 * 1024 * 1024) +#define flash1_size ( 8 * 1024 * 1024) +#define flash2_size (32 * 1024 * 1024) +#define total_ram_v1 (sdram_size + flash0_size + flash1_size + OMAP15XX_SRAM_SIZE) +#define total_ram_v2 (sdram_size + flash2_size + OMAP15XX_SRAM_SIZE) + +static struct arm_boot_info sx1_binfo = { + .loader_start = OMAP_EMIFF_BASE, + .ram_size = sdram_size, + .board_id = 0x265, +}; + +static void sx1_init(MachineState *machine, const int version) +{ + struct omap_mpu_state_s *mpu; + MachineClass *mc = MACHINE_GET_CLASS(machine); + MemoryRegion *address_space = get_system_memory(); + MemoryRegion *flash = g_new(MemoryRegion, 1); + MemoryRegion *cs = g_new(MemoryRegion, 4); + static uint32_t cs0val = 0x00213090; + static uint32_t cs1val = 0x00215070; + static uint32_t cs2val = 0x00001139; + static uint32_t cs3val = 0x00001139; + DriveInfo *dinfo; + int fl_idx; + uint32_t flash_size = flash0_size; + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + if (version == 2) { + flash_size = flash2_size; + } + + memory_region_add_subregion(address_space, OMAP_EMIFF_BASE, machine->ram); + + mpu = omap310_mpu_init(machine->ram, machine->cpu_type); + + /* External Flash (EMIFS) */ + memory_region_init_rom(flash, NULL, "omap_sx1.flash0-0", flash_size, + &error_fatal); + memory_region_add_subregion(address_space, OMAP_CS0_BASE, flash); + + memory_region_init_io(&cs[0], NULL, &static_ops, &cs0val, + "sx1.cs0", OMAP_CS0_SIZE - flash_size); + memory_region_add_subregion(address_space, + OMAP_CS0_BASE + flash_size, &cs[0]); + + + memory_region_init_io(&cs[2], NULL, &static_ops, &cs2val, + "sx1.cs2", OMAP_CS2_SIZE); + memory_region_add_subregion(address_space, + OMAP_CS2_BASE, &cs[2]); + + memory_region_init_io(&cs[3], NULL, &static_ops, &cs3val, + "sx1.cs3", OMAP_CS3_SIZE); + memory_region_add_subregion(address_space, + OMAP_CS2_BASE, &cs[3]); + + fl_idx = 0; + if ((dinfo = drive_get(IF_PFLASH, 0, fl_idx)) != NULL) { + if (!pflash_cfi01_register(OMAP_CS0_BASE, + "omap_sx1.flash0-1", flash_size, + blk_by_legacy_dinfo(dinfo), + sector_size, 4, 0, 0, 0, 0, 0)) { + fprintf(stderr, "qemu: Error registering flash memory %d.\n", + fl_idx); + } + fl_idx++; + } + + if ((version == 1) && + (dinfo = drive_get(IF_PFLASH, 0, fl_idx)) != NULL) { + MemoryRegion *flash_1 = g_new(MemoryRegion, 1); + memory_region_init_rom(flash_1, NULL, "omap_sx1.flash1-0", + flash1_size, &error_fatal); + memory_region_add_subregion(address_space, OMAP_CS1_BASE, flash_1); + + memory_region_init_io(&cs[1], NULL, &static_ops, &cs1val, + "sx1.cs1", OMAP_CS1_SIZE - flash1_size); + memory_region_add_subregion(address_space, + OMAP_CS1_BASE + flash1_size, &cs[1]); + + if (!pflash_cfi01_register(OMAP_CS1_BASE, + "omap_sx1.flash1-1", flash1_size, + blk_by_legacy_dinfo(dinfo), + sector_size, 4, 0, 0, 0, 0, 0)) { + fprintf(stderr, "qemu: Error registering flash memory %d.\n", + fl_idx); + } + fl_idx++; + } else { + memory_region_init_io(&cs[1], NULL, &static_ops, &cs1val, + "sx1.cs1", OMAP_CS1_SIZE); + memory_region_add_subregion(address_space, + OMAP_CS1_BASE, &cs[1]); + } + + if (!machine->kernel_filename && !fl_idx && !qtest_enabled()) { + error_report("Kernel or Flash image must be specified"); + exit(1); + } + + /* Load the kernel. */ + arm_load_kernel(mpu->cpu, machine, &sx1_binfo); + + /* TODO: fix next line */ + //~ qemu_console_resize(ds, 640, 480); +} + +static void sx1_init_v1(MachineState *machine) +{ + sx1_init(machine, 1); +} + +static void sx1_init_v2(MachineState *machine) +{ + sx1_init(machine, 2); +} + +static void sx1_machine_v2_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Siemens SX1 (OMAP310) V2"; + mc->init = sx1_init_v2; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t"); + mc->default_ram_size = sdram_size; + mc->default_ram_id = "omap1.dram"; +} + +static const TypeInfo sx1_machine_v2_type = { + .name = MACHINE_TYPE_NAME("sx1"), + .parent = TYPE_MACHINE, + .class_init = sx1_machine_v2_class_init, +}; + +static void sx1_machine_v1_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Siemens SX1 (OMAP310) V1"; + mc->init = sx1_init_v1; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t"); + mc->default_ram_size = sdram_size; + mc->default_ram_id = "omap1.dram"; +} + +static const TypeInfo sx1_machine_v1_type = { + .name = MACHINE_TYPE_NAME("sx1-v1"), + .parent = TYPE_MACHINE, + .class_init = sx1_machine_v1_class_init, +}; + +static void sx1_machine_init(void) +{ + type_register_static(&sx1_machine_v1_type); + type_register_static(&sx1_machine_v2_type); +} + +type_init(sx1_machine_init) diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c new file mode 100644 index 000000000..0cf9895ce --- /dev/null +++ b/hw/arm/orangepi.c @@ -0,0 +1,125 @@ +/* + * Orange Pi emulation + * + * Copyright (C) 2019 Niek Linnenbank + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "exec/address-spaces.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/arm/allwinner-h3.h" + +static struct arm_boot_info orangepi_binfo = { + .nb_cpus = AW_H3_NUM_CPUS, +}; + +static void orangepi_init(MachineState *machine) +{ + AwH3State *h3; + DriveInfo *di; + BlockBackend *blk; + BusState *bus; + DeviceState *carddev; + + /* BIOS is not supported by this board */ + if (machine->firmware) { + error_report("BIOS not supported for this machine"); + exit(1); + } + + /* This board has fixed size RAM */ + if (machine->ram_size != 1 * GiB) { + error_report("This machine can only be used with 1GiB of RAM"); + exit(1); + } + + /* Only allow Cortex-A7 for this board */ + if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a7")) != 0) { + error_report("This board can only be used with cortex-a7 CPU"); + exit(1); + } + + h3 = AW_H3(object_new(TYPE_AW_H3)); + object_property_add_child(OBJECT(machine), "soc", OBJECT(h3)); + object_unref(OBJECT(h3)); + + /* Setup timer properties */ + object_property_set_int(OBJECT(h3), "clk0-freq", 32768, &error_abort); + object_property_set_int(OBJECT(h3), "clk1-freq", 24 * 1000 * 1000, + &error_abort); + + /* Setup SID properties. Currently using a default fixed SID identifier. */ + if (qemu_uuid_is_null(&h3->sid.identifier)) { + qdev_prop_set_string(DEVICE(h3), "identifier", + "02c00081-1111-2222-3333-000044556677"); + } else if (ldl_be_p(&h3->sid.identifier.data[0]) != 0x02c00081) { + warn_report("Security Identifier value does not include H3 prefix"); + } + + /* Setup EMAC properties */ + object_property_set_int(OBJECT(&h3->emac), "phy-addr", 1, &error_abort); + + /* DRAMC */ + object_property_set_uint(OBJECT(h3), "ram-addr", h3->memmap[AW_H3_DEV_SDRAM], + &error_abort); + object_property_set_int(OBJECT(h3), "ram-size", machine->ram_size / MiB, + &error_abort); + + /* Mark H3 object realized */ + qdev_realize(DEVICE(h3), NULL, &error_abort); + + /* Retrieve SD bus */ + di = drive_get_next(IF_SD); + blk = di ? blk_by_legacy_dinfo(di) : NULL; + bus = qdev_get_child_bus(DEVICE(h3), "sd-bus"); + + /* Plug in SD card */ + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); + + /* SDRAM */ + memory_region_add_subregion(get_system_memory(), h3->memmap[AW_H3_DEV_SDRAM], + machine->ram); + + /* Load target kernel or start using BootROM */ + if (!machine->kernel_filename && blk && blk_is_available(blk)) { + /* Use Boot ROM to copy data from SD card to SRAM */ + allwinner_h3_bootrom_setup(h3, blk); + } + orangepi_binfo.loader_start = h3->memmap[AW_H3_DEV_SDRAM]; + orangepi_binfo.ram_size = machine->ram_size; + arm_load_kernel(ARM_CPU(first_cpu), machine, &orangepi_binfo); +} + +static void orangepi_machine_init(MachineClass *mc) +{ + mc->desc = "Orange Pi PC (Cortex-A7)"; + mc->init = orangepi_init; + mc->block_default_type = IF_SD; + mc->units_per_default_bus = 1; + mc->min_cpus = AW_H3_NUM_CPUS; + mc->max_cpus = AW_H3_NUM_CPUS; + mc->default_cpus = AW_H3_NUM_CPUS; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); + mc->default_ram_size = 1 * GiB; + mc->default_ram_id = "orangepi.ram"; +} + +DEFINE_MACHINE("orangepi-pc", orangepi_machine_init) diff --git a/hw/arm/palm.c b/hw/arm/palm.c new file mode 100644 index 000000000..68e11dd1e --- /dev/null +++ b/hw/arm/palm.c @@ -0,0 +1,320 @@ +/* + * PalmOne's (TM) PDAs. + * + * Copyright (C) 2006-2007 Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "audio/audio.h" +#include "sysemu/sysemu.h" +#include "sysemu/qtest.h" +#include "ui/console.h" +#include "hw/arm/omap.h" +#include "hw/boards.h" +#include "hw/arm/boot.h" +#include "hw/input/tsc2xxx.h" +#include "hw/irq.h" +#include "hw/loader.h" +#include "cpu.h" +#include "qemu/cutils.h" +#include "qom/object.h" + +static uint64_t static_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t *val = (uint32_t *)opaque; + uint32_t sizemask = 7 >> size; + + return *val >> ((offset & sizemask) << 3); +} + +static void static_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ +#ifdef SPY + printf("%s: value %08lx written at " PA_FMT "\n", + __func__, value, offset); +#endif +} + +static const MemoryRegionOps static_ops = { + .read = static_read, + .write = static_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* Palm Tunsgten|E support */ + +/* Shared GPIOs */ +#define PALMTE_USBDETECT_GPIO 0 +#define PALMTE_USB_OR_DC_GPIO 1 +#define PALMTE_TSC_GPIO 4 +#define PALMTE_PINTDAV_GPIO 6 +#define PALMTE_MMC_WP_GPIO 8 +#define PALMTE_MMC_POWER_GPIO 9 +#define PALMTE_HDQ_GPIO 11 +#define PALMTE_HEADPHONES_GPIO 14 +#define PALMTE_SPEAKER_GPIO 15 +/* MPU private GPIOs */ +#define PALMTE_DC_GPIO 2 +#define PALMTE_MMC_SWITCH_GPIO 4 +#define PALMTE_MMC1_GPIO 6 +#define PALMTE_MMC2_GPIO 7 +#define PALMTE_MMC3_GPIO 11 + +static MouseTransformInfo palmte_pointercal = { + .x = 320, + .y = 320, + .a = { -5909, 8, 22465308, 104, 7644, -1219972, 65536 }, +}; + +static void palmte_microwire_setup(struct omap_mpu_state_s *cpu) +{ + uWireSlave *tsc; + + tsc = tsc2102_init(qdev_get_gpio_in(cpu->gpio, PALMTE_PINTDAV_GPIO)); + + omap_uwire_attach(cpu->microwire, tsc, 0); + omap_mcbsp_i2s_attach(cpu->mcbsp1, tsc210x_codec(tsc)); + + tsc210x_set_transform(tsc, &palmte_pointercal); +} + +static struct { + int row; + int column; +} palmte_keymap[0x80] = { + [0 ... 0x7f] = { -1, -1 }, + [0x3b] = { 0, 0 }, /* F1 -> Calendar */ + [0x3c] = { 1, 0 }, /* F2 -> Contacts */ + [0x3d] = { 2, 0 }, /* F3 -> Tasks List */ + [0x3e] = { 3, 0 }, /* F4 -> Note Pad */ + [0x01] = { 4, 0 }, /* Esc -> Power */ + [0x4b] = { 0, 1 }, /* Left */ + [0x50] = { 1, 1 }, /* Down */ + [0x48] = { 2, 1 }, /* Up */ + [0x4d] = { 3, 1 }, /* Right */ + [0x4c] = { 4, 1 }, /* Centre */ + [0x39] = { 4, 1 }, /* Spc -> Centre */ +}; + +static void palmte_button_event(void *opaque, int keycode) +{ + struct omap_mpu_state_s *cpu = (struct omap_mpu_state_s *) opaque; + + if (palmte_keymap[keycode & 0x7f].row != -1) + omap_mpuio_key(cpu->mpuio, + palmte_keymap[keycode & 0x7f].row, + palmte_keymap[keycode & 0x7f].column, + !(keycode & 0x80)); +} + +/* + * Encapsulation of some GPIO line behaviour for the Palm board + * + * QEMU interface: + * + unnamed GPIO inputs 0..6: for the various miscellaneous input lines + */ + +#define TYPE_PALM_MISC_GPIO "palm-misc-gpio" +OBJECT_DECLARE_SIMPLE_TYPE(PalmMiscGPIOState, PALM_MISC_GPIO) + +struct PalmMiscGPIOState { + SysBusDevice parent_obj; +}; + +static void palmte_onoff_gpios(void *opaque, int line, int level) +{ + switch (line) { + case 0: + printf("%s: current to MMC/SD card %sabled.\n", + __func__, level ? "dis" : "en"); + break; + case 1: + printf("%s: internal speaker amplifier %s.\n", + __func__, level ? "down" : "on"); + break; + + /* These LCD & Audio output signals have not been identified yet. */ + case 2: + case 3: + case 4: + printf("%s: LCD GPIO%i %s.\n", + __func__, line - 1, level ? "high" : "low"); + break; + case 5: + case 6: + printf("%s: Audio GPIO%i %s.\n", + __func__, line - 4, level ? "high" : "low"); + break; + } +} + +static void palm_misc_gpio_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + + qdev_init_gpio_in(dev, palmte_onoff_gpios, 7); +} + +static const TypeInfo palm_misc_gpio_info = { + .name = TYPE_PALM_MISC_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PalmMiscGPIOState), + .instance_init = palm_misc_gpio_init, + /* + * No class init required: device has no internal state so does not + * need to set up reset or vmstate, and has no realize method. + */ +}; + +static void palmte_gpio_setup(struct omap_mpu_state_s *cpu) +{ + DeviceState *misc_gpio; + + misc_gpio = sysbus_create_simple(TYPE_PALM_MISC_GPIO, -1, NULL); + + omap_mmc_handlers(cpu->mmc, + qdev_get_gpio_in(cpu->gpio, PALMTE_MMC_WP_GPIO), + qemu_irq_invert(omap_mpuio_in_get(cpu->mpuio) + [PALMTE_MMC_SWITCH_GPIO])); + + qdev_connect_gpio_out(cpu->gpio, PALMTE_MMC_POWER_GPIO, + qdev_get_gpio_in(misc_gpio, 0)); + qdev_connect_gpio_out(cpu->gpio, PALMTE_SPEAKER_GPIO, + qdev_get_gpio_in(misc_gpio, 1)); + qdev_connect_gpio_out(cpu->gpio, 11, qdev_get_gpio_in(misc_gpio, 2)); + qdev_connect_gpio_out(cpu->gpio, 12, qdev_get_gpio_in(misc_gpio, 3)); + qdev_connect_gpio_out(cpu->gpio, 13, qdev_get_gpio_in(misc_gpio, 4)); + omap_mpuio_out_set(cpu->mpuio, 1, qdev_get_gpio_in(misc_gpio, 5)); + omap_mpuio_out_set(cpu->mpuio, 3, qdev_get_gpio_in(misc_gpio, 6)); + + /* Reset some inputs to initial state. */ + qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, PALMTE_USBDETECT_GPIO)); + qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, PALMTE_USB_OR_DC_GPIO)); + qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, 4)); + qemu_irq_lower(qdev_get_gpio_in(cpu->gpio, PALMTE_HEADPHONES_GPIO)); + qemu_irq_lower(omap_mpuio_in_get(cpu->mpuio)[PALMTE_DC_GPIO]); + qemu_irq_raise(omap_mpuio_in_get(cpu->mpuio)[6]); + qemu_irq_raise(omap_mpuio_in_get(cpu->mpuio)[7]); + qemu_irq_raise(omap_mpuio_in_get(cpu->mpuio)[11]); +} + +static struct arm_boot_info palmte_binfo = { + .loader_start = OMAP_EMIFF_BASE, + .ram_size = 0x02000000, + .board_id = 0x331, +}; + +static void palmte_init(MachineState *machine) +{ + MemoryRegion *address_space_mem = get_system_memory(); + struct omap_mpu_state_s *mpu; + int flash_size = 0x00800000; + static uint32_t cs0val = 0xffffffff; + static uint32_t cs1val = 0x0000e1a0; + static uint32_t cs2val = 0x0000e1a0; + static uint32_t cs3val = 0xe1a0e1a0; + int rom_size, rom_loaded = 0; + MachineClass *mc = MACHINE_GET_CLASS(machine); + MemoryRegion *flash = g_new(MemoryRegion, 1); + MemoryRegion *cs = g_new(MemoryRegion, 4); + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + memory_region_add_subregion(address_space_mem, OMAP_EMIFF_BASE, + machine->ram); + + mpu = omap310_mpu_init(machine->ram, machine->cpu_type); + + /* External Flash (EMIFS) */ + memory_region_init_rom(flash, NULL, "palmte.flash", flash_size, + &error_fatal); + memory_region_add_subregion(address_space_mem, OMAP_CS0_BASE, flash); + + memory_region_init_io(&cs[0], NULL, &static_ops, &cs0val, "palmte-cs0", + OMAP_CS0_SIZE - flash_size); + memory_region_add_subregion(address_space_mem, OMAP_CS0_BASE + flash_size, + &cs[0]); + memory_region_init_io(&cs[1], NULL, &static_ops, &cs1val, "palmte-cs1", + OMAP_CS1_SIZE); + memory_region_add_subregion(address_space_mem, OMAP_CS1_BASE, &cs[1]); + memory_region_init_io(&cs[2], NULL, &static_ops, &cs2val, "palmte-cs2", + OMAP_CS2_SIZE); + memory_region_add_subregion(address_space_mem, OMAP_CS2_BASE, &cs[2]); + memory_region_init_io(&cs[3], NULL, &static_ops, &cs3val, "palmte-cs3", + OMAP_CS3_SIZE); + memory_region_add_subregion(address_space_mem, OMAP_CS3_BASE, &cs[3]); + + palmte_microwire_setup(mpu); + + qemu_add_kbd_event_handler(palmte_button_event, mpu); + + palmte_gpio_setup(mpu); + + /* Setup initial (reset) machine state */ + if (nb_option_roms) { + rom_size = get_image_size(option_rom[0].name); + if (rom_size > flash_size) { + fprintf(stderr, "%s: ROM image too big (%x > %x)\n", + __func__, rom_size, flash_size); + rom_size = 0; + } + if (rom_size > 0) { + rom_size = load_image_targphys(option_rom[0].name, OMAP_CS0_BASE, + flash_size); + rom_loaded = 1; + } + if (rom_size < 0) { + fprintf(stderr, "%s: error loading '%s'\n", + __func__, option_rom[0].name); + } + } + + if (!rom_loaded && !machine->kernel_filename && !qtest_enabled()) { + fprintf(stderr, "Kernel or ROM image must be specified\n"); + exit(1); + } + + /* Load the kernel. */ + arm_load_kernel(mpu->cpu, machine, &palmte_binfo); +} + +static void palmte_machine_init(MachineClass *mc) +{ + mc->desc = "Palm Tungsten|E aka. Cheetah PDA (OMAP310)"; + mc->init = palmte_init; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("ti925t"); + mc->default_ram_size = 0x02000000; + mc->default_ram_id = "omap1.dram"; +} + +DEFINE_MACHINE("cheetah", palmte_machine_init) + +static void palm_register_types(void) +{ + type_register_static(&palm_misc_gpio_info); +} + +type_init(palm_register_types) diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c new file mode 100644 index 000000000..15a247efa --- /dev/null +++ b/hw/arm/pxa2xx.c @@ -0,0 +1,2384 @@ +/* + * Intel XScale PXA255/270 processor support. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/arm/pxa.h" +#include "sysemu/sysemu.h" +#include "hw/char/serial.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/ssi/ssi.h" +#include "hw/sd/sd.h" +#include "chardev/char-fe.h" +#include "sysemu/blockdev.h" +#include "sysemu/qtest.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qom/object.h" + +static struct { + hwaddr io_base; + int irqn; +} pxa255_serial[] = { + { 0x40100000, PXA2XX_PIC_FFUART }, + { 0x40200000, PXA2XX_PIC_BTUART }, + { 0x40700000, PXA2XX_PIC_STUART }, + { 0x41600000, PXA25X_PIC_HWUART }, + { 0, 0 } +}, pxa270_serial[] = { + { 0x40100000, PXA2XX_PIC_FFUART }, + { 0x40200000, PXA2XX_PIC_BTUART }, + { 0x40700000, PXA2XX_PIC_STUART }, + { 0, 0 } +}; + +typedef struct PXASSPDef { + hwaddr io_base; + int irqn; +} PXASSPDef; + +#if 0 +static PXASSPDef pxa250_ssp[] = { + { 0x41000000, PXA2XX_PIC_SSP }, + { 0, 0 } +}; +#endif + +static PXASSPDef pxa255_ssp[] = { + { 0x41000000, PXA2XX_PIC_SSP }, + { 0x41400000, PXA25X_PIC_NSSP }, + { 0, 0 } +}; + +#if 0 +static PXASSPDef pxa26x_ssp[] = { + { 0x41000000, PXA2XX_PIC_SSP }, + { 0x41400000, PXA25X_PIC_NSSP }, + { 0x41500000, PXA26X_PIC_ASSP }, + { 0, 0 } +}; +#endif + +static PXASSPDef pxa27x_ssp[] = { + { 0x41000000, PXA2XX_PIC_SSP }, + { 0x41700000, PXA27X_PIC_SSP2 }, + { 0x41900000, PXA2XX_PIC_SSP3 }, + { 0, 0 } +}; + +#define PMCR 0x00 /* Power Manager Control register */ +#define PSSR 0x04 /* Power Manager Sleep Status register */ +#define PSPR 0x08 /* Power Manager Scratch-Pad register */ +#define PWER 0x0c /* Power Manager Wake-Up Enable register */ +#define PRER 0x10 /* Power Manager Rising-Edge Detect Enable register */ +#define PFER 0x14 /* Power Manager Falling-Edge Detect Enable register */ +#define PEDR 0x18 /* Power Manager Edge-Detect Status register */ +#define PCFR 0x1c /* Power Manager General Configuration register */ +#define PGSR0 0x20 /* Power Manager GPIO Sleep-State register 0 */ +#define PGSR1 0x24 /* Power Manager GPIO Sleep-State register 1 */ +#define PGSR2 0x28 /* Power Manager GPIO Sleep-State register 2 */ +#define PGSR3 0x2c /* Power Manager GPIO Sleep-State register 3 */ +#define RCSR 0x30 /* Reset Controller Status register */ +#define PSLR 0x34 /* Power Manager Sleep Configuration register */ +#define PTSR 0x38 /* Power Manager Standby Configuration register */ +#define PVCR 0x40 /* Power Manager Voltage Change Control register */ +#define PUCR 0x4c /* Power Manager USIM Card Control/Status register */ +#define PKWR 0x50 /* Power Manager Keyboard Wake-Up Enable register */ +#define PKSR 0x54 /* Power Manager Keyboard Level-Detect Status */ +#define PCMD0 0x80 /* Power Manager I2C Command register File 0 */ +#define PCMD31 0xfc /* Power Manager I2C Command register File 31 */ + +static uint64_t pxa2xx_pm_read(void *opaque, hwaddr addr, + unsigned size) +{ + PXA2xxState *s = (PXA2xxState *) opaque; + + switch (addr) { + case PMCR ... PCMD31: + if (addr & 3) + goto fail; + + return s->pm_regs[addr >> 2]; + default: + fail: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad read offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } + return 0; +} + +static void pxa2xx_pm_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + PXA2xxState *s = (PXA2xxState *) opaque; + + switch (addr) { + case PMCR: + /* Clear the write-one-to-clear bits... */ + s->pm_regs[addr >> 2] &= ~(value & 0x2a); + /* ...and set the plain r/w bits */ + s->pm_regs[addr >> 2] &= ~0x15; + s->pm_regs[addr >> 2] |= value & 0x15; + break; + + case PSSR: /* Read-clean registers */ + case RCSR: + case PKSR: + s->pm_regs[addr >> 2] &= ~value; + break; + + default: /* Read-write registers */ + if (!(addr & 3)) { + s->pm_regs[addr >> 2] = value; + break; + } + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad write offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } +} + +static const MemoryRegionOps pxa2xx_pm_ops = { + .read = pxa2xx_pm_read, + .write = pxa2xx_pm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_pxa2xx_pm = { + .name = "pxa2xx_pm", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(pm_regs, PXA2xxState, 0x40), + VMSTATE_END_OF_LIST() + } +}; + +#define CCCR 0x00 /* Core Clock Configuration register */ +#define CKEN 0x04 /* Clock Enable register */ +#define OSCC 0x08 /* Oscillator Configuration register */ +#define CCSR 0x0c /* Core Clock Status register */ + +static uint64_t pxa2xx_cm_read(void *opaque, hwaddr addr, + unsigned size) +{ + PXA2xxState *s = (PXA2xxState *) opaque; + + switch (addr) { + case CCCR: + case CKEN: + case OSCC: + return s->cm_regs[addr >> 2]; + + case CCSR: + return s->cm_regs[CCCR >> 2] | (3 << 28); + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad read offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } + return 0; +} + +static void pxa2xx_cm_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + PXA2xxState *s = (PXA2xxState *) opaque; + + switch (addr) { + case CCCR: + case CKEN: + s->cm_regs[addr >> 2] = value; + break; + + case OSCC: + s->cm_regs[addr >> 2] &= ~0x6c; + s->cm_regs[addr >> 2] |= value & 0x6e; + if ((value >> 1) & 1) /* OON */ + s->cm_regs[addr >> 2] |= 1 << 0; /* Oscillator is now stable */ + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad write offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } +} + +static const MemoryRegionOps pxa2xx_cm_ops = { + .read = pxa2xx_cm_read, + .write = pxa2xx_cm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_pxa2xx_cm = { + .name = "pxa2xx_cm", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(cm_regs, PXA2xxState, 4), + VMSTATE_UINT32(clkcfg, PXA2xxState), + VMSTATE_UINT32(pmnc, PXA2xxState), + VMSTATE_END_OF_LIST() + } +}; + +static uint64_t pxa2xx_clkcfg_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + PXA2xxState *s = (PXA2xxState *)ri->opaque; + return s->clkcfg; +} + +static void pxa2xx_clkcfg_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + PXA2xxState *s = (PXA2xxState *)ri->opaque; + s->clkcfg = value & 0xf; + if (value & 2) { + printf("%s: CPU frequency change attempt\n", __func__); + } +} + +static void pxa2xx_pwrmode_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + PXA2xxState *s = (PXA2xxState *)ri->opaque; + static const char *pwrmode[8] = { + "Normal", "Idle", "Deep-idle", "Standby", + "Sleep", "reserved (!)", "reserved (!)", "Deep-sleep", + }; + + if (value & 8) { + printf("%s: CPU voltage change attempt\n", __func__); + } + switch (value & 7) { + case 0: + /* Do nothing */ + break; + + case 1: + /* Idle */ + if (!(s->cm_regs[CCCR >> 2] & (1U << 31))) { /* CPDIS */ + cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HALT); + break; + } + /* Fall through. */ + + case 2: + /* Deep-Idle */ + cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HALT); + s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */ + goto message; + + case 3: + s->cpu->env.uncached_cpsr = ARM_CPU_MODE_SVC; + s->cpu->env.daif = PSTATE_A | PSTATE_F | PSTATE_I; + s->cpu->env.cp15.sctlr_ns = 0; + s->cpu->env.cp15.cpacr_el1 = 0; + s->cpu->env.cp15.ttbr0_el[1] = 0; + s->cpu->env.cp15.dacr_ns = 0; + s->pm_regs[PSSR >> 2] |= 0x8; /* Set STS */ + s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */ + + /* + * The scratch-pad register is almost universally used + * for storing the return address on suspend. For the + * lack of a resuming bootloader, perform a jump + * directly to that address. + */ + memset(s->cpu->env.regs, 0, 4 * 15); + s->cpu->env.regs[15] = s->pm_regs[PSPR >> 2]; + +#if 0 + buffer = 0xe59ff000; /* ldr pc, [pc, #0] */ + cpu_physical_memory_write(0, &buffer, 4); + buffer = s->pm_regs[PSPR >> 2]; + cpu_physical_memory_write(8, &buffer, 4); +#endif + + /* Suspend */ + cpu_interrupt(current_cpu, CPU_INTERRUPT_HALT); + + goto message; + + default: + message: + printf("%s: machine entered %s mode\n", __func__, + pwrmode[value & 7]); + } +} + +static uint64_t pxa2xx_cppmnc_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + PXA2xxState *s = (PXA2xxState *)ri->opaque; + return s->pmnc; +} + +static void pxa2xx_cppmnc_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + PXA2xxState *s = (PXA2xxState *)ri->opaque; + s->pmnc = value; +} + +static uint64_t pxa2xx_cpccnt_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + PXA2xxState *s = (PXA2xxState *)ri->opaque; + if (s->pmnc & 1) { + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + } else { + return 0; + } +} + +static const ARMCPRegInfo pxa_cp_reginfo[] = { + /* cp14 crm==1: perf registers */ + { .name = "CPPMNC", .cp = 14, .crn = 0, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_IO, + .readfn = pxa2xx_cppmnc_read, .writefn = pxa2xx_cppmnc_write }, + { .name = "CPCCNT", .cp = 14, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_IO, + .readfn = pxa2xx_cpccnt_read, .writefn = arm_cp_write_ignore }, + { .name = "CPINTEN", .cp = 14, .crn = 4, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPFLAG", .cp = 14, .crn = 5, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPEVTSEL", .cp = 14, .crn = 8, .crm = 1, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* cp14 crm==2: performance count registers */ + { .name = "CPPMN0", .cp = 14, .crn = 0, .crm = 2, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPPMN1", .cp = 14, .crn = 1, .crm = 2, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPPMN2", .cp = 14, .crn = 2, .crm = 2, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "CPPMN3", .cp = 14, .crn = 2, .crm = 3, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + /* cp14 crn==6: CLKCFG */ + { .name = "CLKCFG", .cp = 14, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_IO, + .readfn = pxa2xx_clkcfg_read, .writefn = pxa2xx_clkcfg_write }, + /* cp14 crn==7: PWRMODE */ + { .name = "PWRMODE", .cp = 14, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_IO, + .readfn = arm_cp_read_zero, .writefn = pxa2xx_pwrmode_write }, + REGINFO_SENTINEL +}; + +static void pxa2xx_setup_cp14(PXA2xxState *s) +{ + define_arm_cp_regs_with_opaque(s->cpu, pxa_cp_reginfo, s); +} + +#define MDCNFG 0x00 /* SDRAM Configuration register */ +#define MDREFR 0x04 /* SDRAM Refresh Control register */ +#define MSC0 0x08 /* Static Memory Control register 0 */ +#define MSC1 0x0c /* Static Memory Control register 1 */ +#define MSC2 0x10 /* Static Memory Control register 2 */ +#define MECR 0x14 /* Expansion Memory Bus Config register */ +#define SXCNFG 0x1c /* Synchronous Static Memory Config register */ +#define MCMEM0 0x28 /* PC Card Memory Socket 0 Timing register */ +#define MCMEM1 0x2c /* PC Card Memory Socket 1 Timing register */ +#define MCATT0 0x30 /* PC Card Attribute Socket 0 register */ +#define MCATT1 0x34 /* PC Card Attribute Socket 1 register */ +#define MCIO0 0x38 /* PC Card I/O Socket 0 Timing register */ +#define MCIO1 0x3c /* PC Card I/O Socket 1 Timing register */ +#define MDMRS 0x40 /* SDRAM Mode Register Set Config register */ +#define BOOT_DEF 0x44 /* Boot-time Default Configuration register */ +#define ARB_CNTL 0x48 /* Arbiter Control register */ +#define BSCNTR0 0x4c /* Memory Buffer Strength Control register 0 */ +#define BSCNTR1 0x50 /* Memory Buffer Strength Control register 1 */ +#define LCDBSCNTR 0x54 /* LCD Buffer Strength Control register */ +#define MDMRSLP 0x58 /* Low Power SDRAM Mode Set Config register */ +#define BSCNTR2 0x5c /* Memory Buffer Strength Control register 2 */ +#define BSCNTR3 0x60 /* Memory Buffer Strength Control register 3 */ +#define SA1110 0x64 /* SA-1110 Memory Compatibility register */ + +static uint64_t pxa2xx_mm_read(void *opaque, hwaddr addr, + unsigned size) +{ + PXA2xxState *s = (PXA2xxState *) opaque; + + switch (addr) { + case MDCNFG ... SA1110: + if ((addr & 3) == 0) + return s->mm_regs[addr >> 2]; + /* fall through */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad read offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } + return 0; +} + +static void pxa2xx_mm_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + PXA2xxState *s = (PXA2xxState *) opaque; + + switch (addr) { + case MDCNFG ... SA1110: + if ((addr & 3) == 0) { + s->mm_regs[addr >> 2] = value; + break; + } + /* fallthrough */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad write offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } +} + +static const MemoryRegionOps pxa2xx_mm_ops = { + .read = pxa2xx_mm_read, + .write = pxa2xx_mm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_pxa2xx_mm = { + .name = "pxa2xx_mm", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(mm_regs, PXA2xxState, 0x1a), + VMSTATE_END_OF_LIST() + } +}; + +#define TYPE_PXA2XX_SSP "pxa2xx-ssp" +OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxSSPState, PXA2XX_SSP) + +/* Synchronous Serial Ports */ +struct PXA2xxSSPState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + qemu_irq irq; + uint32_t enable; + SSIBus *bus; + + uint32_t sscr[2]; + uint32_t sspsp; + uint32_t ssto; + uint32_t ssitr; + uint32_t sssr; + uint8_t sstsa; + uint8_t ssrsa; + uint8_t ssacd; + + uint32_t rx_fifo[16]; + uint32_t rx_level; + uint32_t rx_start; +}; + +static bool pxa2xx_ssp_vmstate_validate(void *opaque, int version_id) +{ + PXA2xxSSPState *s = opaque; + + return s->rx_start < sizeof(s->rx_fifo); +} + +static const VMStateDescription vmstate_pxa2xx_ssp = { + .name = "pxa2xx-ssp", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(enable, PXA2xxSSPState), + VMSTATE_UINT32_ARRAY(sscr, PXA2xxSSPState, 2), + VMSTATE_UINT32(sspsp, PXA2xxSSPState), + VMSTATE_UINT32(ssto, PXA2xxSSPState), + VMSTATE_UINT32(ssitr, PXA2xxSSPState), + VMSTATE_UINT32(sssr, PXA2xxSSPState), + VMSTATE_UINT8(sstsa, PXA2xxSSPState), + VMSTATE_UINT8(ssrsa, PXA2xxSSPState), + VMSTATE_UINT8(ssacd, PXA2xxSSPState), + VMSTATE_UINT32(rx_level, PXA2xxSSPState), + VMSTATE_UINT32(rx_start, PXA2xxSSPState), + VMSTATE_VALIDATE("fifo is 16 bytes", pxa2xx_ssp_vmstate_validate), + VMSTATE_UINT32_ARRAY(rx_fifo, PXA2xxSSPState, 16), + VMSTATE_END_OF_LIST() + } +}; + +#define SSCR0 0x00 /* SSP Control register 0 */ +#define SSCR1 0x04 /* SSP Control register 1 */ +#define SSSR 0x08 /* SSP Status register */ +#define SSITR 0x0c /* SSP Interrupt Test register */ +#define SSDR 0x10 /* SSP Data register */ +#define SSTO 0x28 /* SSP Time-Out register */ +#define SSPSP 0x2c /* SSP Programmable Serial Protocol register */ +#define SSTSA 0x30 /* SSP TX Time Slot Active register */ +#define SSRSA 0x34 /* SSP RX Time Slot Active register */ +#define SSTSS 0x38 /* SSP Time Slot Status register */ +#define SSACD 0x3c /* SSP Audio Clock Divider register */ + +/* Bitfields for above registers */ +#define SSCR0_SPI(x) (((x) & 0x30) == 0x00) +#define SSCR0_SSP(x) (((x) & 0x30) == 0x10) +#define SSCR0_UWIRE(x) (((x) & 0x30) == 0x20) +#define SSCR0_PSP(x) (((x) & 0x30) == 0x30) +#define SSCR0_SSE (1 << 7) +#define SSCR0_RIM (1 << 22) +#define SSCR0_TIM (1 << 23) +#define SSCR0_MOD (1U << 31) +#define SSCR0_DSS(x) (((((x) >> 16) & 0x10) | ((x) & 0xf)) + 1) +#define SSCR1_RIE (1 << 0) +#define SSCR1_TIE (1 << 1) +#define SSCR1_LBM (1 << 2) +#define SSCR1_MWDS (1 << 5) +#define SSCR1_TFT(x) ((((x) >> 6) & 0xf) + 1) +#define SSCR1_RFT(x) ((((x) >> 10) & 0xf) + 1) +#define SSCR1_EFWR (1 << 14) +#define SSCR1_PINTE (1 << 18) +#define SSCR1_TINTE (1 << 19) +#define SSCR1_RSRE (1 << 20) +#define SSCR1_TSRE (1 << 21) +#define SSCR1_EBCEI (1 << 29) +#define SSITR_INT (7 << 5) +#define SSSR_TNF (1 << 2) +#define SSSR_RNE (1 << 3) +#define SSSR_TFS (1 << 5) +#define SSSR_RFS (1 << 6) +#define SSSR_ROR (1 << 7) +#define SSSR_PINT (1 << 18) +#define SSSR_TINT (1 << 19) +#define SSSR_EOC (1 << 20) +#define SSSR_TUR (1 << 21) +#define SSSR_BCE (1 << 23) +#define SSSR_RW 0x00bc0080 + +static void pxa2xx_ssp_int_update(PXA2xxSSPState *s) +{ + int level = 0; + + level |= s->ssitr & SSITR_INT; + level |= (s->sssr & SSSR_BCE) && (s->sscr[1] & SSCR1_EBCEI); + level |= (s->sssr & SSSR_TUR) && !(s->sscr[0] & SSCR0_TIM); + level |= (s->sssr & SSSR_EOC) && (s->sssr & (SSSR_TINT | SSSR_PINT)); + level |= (s->sssr & SSSR_TINT) && (s->sscr[1] & SSCR1_TINTE); + level |= (s->sssr & SSSR_PINT) && (s->sscr[1] & SSCR1_PINTE); + level |= (s->sssr & SSSR_ROR) && !(s->sscr[0] & SSCR0_RIM); + level |= (s->sssr & SSSR_RFS) && (s->sscr[1] & SSCR1_RIE); + level |= (s->sssr & SSSR_TFS) && (s->sscr[1] & SSCR1_TIE); + qemu_set_irq(s->irq, !!level); +} + +static void pxa2xx_ssp_fifo_update(PXA2xxSSPState *s) +{ + s->sssr &= ~(0xf << 12); /* Clear RFL */ + s->sssr &= ~(0xf << 8); /* Clear TFL */ + s->sssr &= ~SSSR_TFS; + s->sssr &= ~SSSR_TNF; + if (s->enable) { + s->sssr |= ((s->rx_level - 1) & 0xf) << 12; + if (s->rx_level >= SSCR1_RFT(s->sscr[1])) + s->sssr |= SSSR_RFS; + else + s->sssr &= ~SSSR_RFS; + if (s->rx_level) + s->sssr |= SSSR_RNE; + else + s->sssr &= ~SSSR_RNE; + /* TX FIFO is never filled, so it is always in underrun + condition if SSP is enabled */ + s->sssr |= SSSR_TFS; + s->sssr |= SSSR_TNF; + } + + pxa2xx_ssp_int_update(s); +} + +static uint64_t pxa2xx_ssp_read(void *opaque, hwaddr addr, + unsigned size) +{ + PXA2xxSSPState *s = (PXA2xxSSPState *) opaque; + uint32_t retval; + + switch (addr) { + case SSCR0: + return s->sscr[0]; + case SSCR1: + return s->sscr[1]; + case SSPSP: + return s->sspsp; + case SSTO: + return s->ssto; + case SSITR: + return s->ssitr; + case SSSR: + return s->sssr | s->ssitr; + case SSDR: + if (!s->enable) + return 0xffffffff; + if (s->rx_level < 1) { + printf("%s: SSP Rx Underrun\n", __func__); + return 0xffffffff; + } + s->rx_level --; + retval = s->rx_fifo[s->rx_start ++]; + s->rx_start &= 0xf; + pxa2xx_ssp_fifo_update(s); + return retval; + case SSTSA: + return s->sstsa; + case SSRSA: + return s->ssrsa; + case SSTSS: + return 0; + case SSACD: + return s->ssacd; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad read offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } + return 0; +} + +static void pxa2xx_ssp_write(void *opaque, hwaddr addr, + uint64_t value64, unsigned size) +{ + PXA2xxSSPState *s = (PXA2xxSSPState *) opaque; + uint32_t value = value64; + + switch (addr) { + case SSCR0: + s->sscr[0] = value & 0xc7ffffff; + s->enable = value & SSCR0_SSE; + if (value & SSCR0_MOD) + printf("%s: Attempt to use network mode\n", __func__); + if (s->enable && SSCR0_DSS(value) < 4) + printf("%s: Wrong data size: %u bits\n", __func__, + SSCR0_DSS(value)); + if (!(value & SSCR0_SSE)) { + s->sssr = 0; + s->ssitr = 0; + s->rx_level = 0; + } + pxa2xx_ssp_fifo_update(s); + break; + + case SSCR1: + s->sscr[1] = value; + if (value & (SSCR1_LBM | SSCR1_EFWR)) + printf("%s: Attempt to use SSP test mode\n", __func__); + pxa2xx_ssp_fifo_update(s); + break; + + case SSPSP: + s->sspsp = value; + break; + + case SSTO: + s->ssto = value; + break; + + case SSITR: + s->ssitr = value & SSITR_INT; + pxa2xx_ssp_int_update(s); + break; + + case SSSR: + s->sssr &= ~(value & SSSR_RW); + pxa2xx_ssp_int_update(s); + break; + + case SSDR: + if (SSCR0_UWIRE(s->sscr[0])) { + if (s->sscr[1] & SSCR1_MWDS) + value &= 0xffff; + else + value &= 0xff; + } else + /* Note how 32bits overflow does no harm here */ + value &= (1 << SSCR0_DSS(s->sscr[0])) - 1; + + /* Data goes from here to the Tx FIFO and is shifted out from + * there directly to the slave, no need to buffer it. + */ + if (s->enable) { + uint32_t readval; + readval = ssi_transfer(s->bus, value); + if (s->rx_level < 0x10) { + s->rx_fifo[(s->rx_start + s->rx_level ++) & 0xf] = readval; + } else { + s->sssr |= SSSR_ROR; + } + } + pxa2xx_ssp_fifo_update(s); + break; + + case SSTSA: + s->sstsa = value; + break; + + case SSRSA: + s->ssrsa = value; + break; + + case SSACD: + s->ssacd = value; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad write offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } +} + +static const MemoryRegionOps pxa2xx_ssp_ops = { + .read = pxa2xx_ssp_read, + .write = pxa2xx_ssp_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pxa2xx_ssp_reset(DeviceState *d) +{ + PXA2xxSSPState *s = PXA2XX_SSP(d); + + s->enable = 0; + s->sscr[0] = s->sscr[1] = 0; + s->sspsp = 0; + s->ssto = 0; + s->ssitr = 0; + s->sssr = 0; + s->sstsa = 0; + s->ssrsa = 0; + s->ssacd = 0; + s->rx_start = s->rx_level = 0; +} + +static void pxa2xx_ssp_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + PXA2xxSSPState *s = PXA2XX_SSP(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->iomem, obj, &pxa2xx_ssp_ops, s, + "pxa2xx-ssp", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + + s->bus = ssi_create_bus(dev, "ssi"); +} + +/* Real-Time Clock */ +#define RCNR 0x00 /* RTC Counter register */ +#define RTAR 0x04 /* RTC Alarm register */ +#define RTSR 0x08 /* RTC Status register */ +#define RTTR 0x0c /* RTC Timer Trim register */ +#define RDCR 0x10 /* RTC Day Counter register */ +#define RYCR 0x14 /* RTC Year Counter register */ +#define RDAR1 0x18 /* RTC Wristwatch Day Alarm register 1 */ +#define RYAR1 0x1c /* RTC Wristwatch Year Alarm register 1 */ +#define RDAR2 0x20 /* RTC Wristwatch Day Alarm register 2 */ +#define RYAR2 0x24 /* RTC Wristwatch Year Alarm register 2 */ +#define SWCR 0x28 /* RTC Stopwatch Counter register */ +#define SWAR1 0x2c /* RTC Stopwatch Alarm register 1 */ +#define SWAR2 0x30 /* RTC Stopwatch Alarm register 2 */ +#define RTCPICR 0x34 /* RTC Periodic Interrupt Counter register */ +#define PIAR 0x38 /* RTC Periodic Interrupt Alarm register */ + +#define TYPE_PXA2XX_RTC "pxa2xx_rtc" +OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxRTCState, PXA2XX_RTC) + +struct PXA2xxRTCState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + uint32_t rttr; + uint32_t rtsr; + uint32_t rtar; + uint32_t rdar1; + uint32_t rdar2; + uint32_t ryar1; + uint32_t ryar2; + uint32_t swar1; + uint32_t swar2; + uint32_t piar; + uint32_t last_rcnr; + uint32_t last_rdcr; + uint32_t last_rycr; + uint32_t last_swcr; + uint32_t last_rtcpicr; + int64_t last_hz; + int64_t last_sw; + int64_t last_pi; + QEMUTimer *rtc_hz; + QEMUTimer *rtc_rdal1; + QEMUTimer *rtc_rdal2; + QEMUTimer *rtc_swal1; + QEMUTimer *rtc_swal2; + QEMUTimer *rtc_pi; + qemu_irq rtc_irq; +}; + +static inline void pxa2xx_rtc_int_update(PXA2xxRTCState *s) +{ + qemu_set_irq(s->rtc_irq, !!(s->rtsr & 0x2553)); +} + +static void pxa2xx_rtc_hzupdate(PXA2xxRTCState *s) +{ + int64_t rt = qemu_clock_get_ms(rtc_clock); + s->last_rcnr += ((rt - s->last_hz) << 15) / + (1000 * ((s->rttr & 0xffff) + 1)); + s->last_rdcr += ((rt - s->last_hz) << 15) / + (1000 * ((s->rttr & 0xffff) + 1)); + s->last_hz = rt; +} + +static void pxa2xx_rtc_swupdate(PXA2xxRTCState *s) +{ + int64_t rt = qemu_clock_get_ms(rtc_clock); + if (s->rtsr & (1 << 12)) + s->last_swcr += (rt - s->last_sw) / 10; + s->last_sw = rt; +} + +static void pxa2xx_rtc_piupdate(PXA2xxRTCState *s) +{ + int64_t rt = qemu_clock_get_ms(rtc_clock); + if (s->rtsr & (1 << 15)) + s->last_swcr += rt - s->last_pi; + s->last_pi = rt; +} + +static inline void pxa2xx_rtc_alarm_update(PXA2xxRTCState *s, + uint32_t rtsr) +{ + if ((rtsr & (1 << 2)) && !(rtsr & (1 << 0))) + timer_mod(s->rtc_hz, s->last_hz + + (((s->rtar - s->last_rcnr) * 1000 * + ((s->rttr & 0xffff) + 1)) >> 15)); + else + timer_del(s->rtc_hz); + + if ((rtsr & (1 << 5)) && !(rtsr & (1 << 4))) + timer_mod(s->rtc_rdal1, s->last_hz + + (((s->rdar1 - s->last_rdcr) * 1000 * + ((s->rttr & 0xffff) + 1)) >> 15)); /* TODO: fixup */ + else + timer_del(s->rtc_rdal1); + + if ((rtsr & (1 << 7)) && !(rtsr & (1 << 6))) + timer_mod(s->rtc_rdal2, s->last_hz + + (((s->rdar2 - s->last_rdcr) * 1000 * + ((s->rttr & 0xffff) + 1)) >> 15)); /* TODO: fixup */ + else + timer_del(s->rtc_rdal2); + + if ((rtsr & 0x1200) == 0x1200 && !(rtsr & (1 << 8))) + timer_mod(s->rtc_swal1, s->last_sw + + (s->swar1 - s->last_swcr) * 10); /* TODO: fixup */ + else + timer_del(s->rtc_swal1); + + if ((rtsr & 0x1800) == 0x1800 && !(rtsr & (1 << 10))) + timer_mod(s->rtc_swal2, s->last_sw + + (s->swar2 - s->last_swcr) * 10); /* TODO: fixup */ + else + timer_del(s->rtc_swal2); + + if ((rtsr & 0xc000) == 0xc000 && !(rtsr & (1 << 13))) + timer_mod(s->rtc_pi, s->last_pi + + (s->piar & 0xffff) - s->last_rtcpicr); + else + timer_del(s->rtc_pi); +} + +static inline void pxa2xx_rtc_hz_tick(void *opaque) +{ + PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; + s->rtsr |= (1 << 0); + pxa2xx_rtc_alarm_update(s, s->rtsr); + pxa2xx_rtc_int_update(s); +} + +static inline void pxa2xx_rtc_rdal1_tick(void *opaque) +{ + PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; + s->rtsr |= (1 << 4); + pxa2xx_rtc_alarm_update(s, s->rtsr); + pxa2xx_rtc_int_update(s); +} + +static inline void pxa2xx_rtc_rdal2_tick(void *opaque) +{ + PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; + s->rtsr |= (1 << 6); + pxa2xx_rtc_alarm_update(s, s->rtsr); + pxa2xx_rtc_int_update(s); +} + +static inline void pxa2xx_rtc_swal1_tick(void *opaque) +{ + PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; + s->rtsr |= (1 << 8); + pxa2xx_rtc_alarm_update(s, s->rtsr); + pxa2xx_rtc_int_update(s); +} + +static inline void pxa2xx_rtc_swal2_tick(void *opaque) +{ + PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; + s->rtsr |= (1 << 10); + pxa2xx_rtc_alarm_update(s, s->rtsr); + pxa2xx_rtc_int_update(s); +} + +static inline void pxa2xx_rtc_pi_tick(void *opaque) +{ + PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; + s->rtsr |= (1 << 13); + pxa2xx_rtc_piupdate(s); + s->last_rtcpicr = 0; + pxa2xx_rtc_alarm_update(s, s->rtsr); + pxa2xx_rtc_int_update(s); +} + +static uint64_t pxa2xx_rtc_read(void *opaque, hwaddr addr, + unsigned size) +{ + PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; + + switch (addr) { + case RTTR: + return s->rttr; + case RTSR: + return s->rtsr; + case RTAR: + return s->rtar; + case RDAR1: + return s->rdar1; + case RDAR2: + return s->rdar2; + case RYAR1: + return s->ryar1; + case RYAR2: + return s->ryar2; + case SWAR1: + return s->swar1; + case SWAR2: + return s->swar2; + case PIAR: + return s->piar; + case RCNR: + return s->last_rcnr + + ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) / + (1000 * ((s->rttr & 0xffff) + 1)); + case RDCR: + return s->last_rdcr + + ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) / + (1000 * ((s->rttr & 0xffff) + 1)); + case RYCR: + return s->last_rycr; + case SWCR: + if (s->rtsr & (1 << 12)) + return s->last_swcr + + (qemu_clock_get_ms(rtc_clock) - s->last_sw) / 10; + else + return s->last_swcr; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad read offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } + return 0; +} + +static void pxa2xx_rtc_write(void *opaque, hwaddr addr, + uint64_t value64, unsigned size) +{ + PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; + uint32_t value = value64; + + switch (addr) { + case RTTR: + if (!(s->rttr & (1U << 31))) { + pxa2xx_rtc_hzupdate(s); + s->rttr = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + } + break; + + case RTSR: + if ((s->rtsr ^ value) & (1 << 15)) + pxa2xx_rtc_piupdate(s); + + if ((s->rtsr ^ value) & (1 << 12)) + pxa2xx_rtc_swupdate(s); + + if (((s->rtsr ^ value) & 0x4aac) | (value & ~0xdaac)) + pxa2xx_rtc_alarm_update(s, value); + + s->rtsr = (value & 0xdaac) | (s->rtsr & ~(value & ~0xdaac)); + pxa2xx_rtc_int_update(s); + break; + + case RTAR: + s->rtar = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case RDAR1: + s->rdar1 = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case RDAR2: + s->rdar2 = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case RYAR1: + s->ryar1 = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case RYAR2: + s->ryar2 = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case SWAR1: + pxa2xx_rtc_swupdate(s); + s->swar1 = value; + s->last_swcr = 0; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case SWAR2: + s->swar2 = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case PIAR: + s->piar = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case RCNR: + pxa2xx_rtc_hzupdate(s); + s->last_rcnr = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case RDCR: + pxa2xx_rtc_hzupdate(s); + s->last_rdcr = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case RYCR: + s->last_rycr = value; + break; + + case SWCR: + pxa2xx_rtc_swupdate(s); + s->last_swcr = value; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + case RTCPICR: + pxa2xx_rtc_piupdate(s); + s->last_rtcpicr = value & 0xffff; + pxa2xx_rtc_alarm_update(s, s->rtsr); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad write offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + } +} + +static const MemoryRegionOps pxa2xx_rtc_ops = { + .read = pxa2xx_rtc_read, + .write = pxa2xx_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pxa2xx_rtc_init(Object *obj) +{ + PXA2xxRTCState *s = PXA2XX_RTC(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + struct tm tm; + int wom; + + s->rttr = 0x7fff; + s->rtsr = 0; + + qemu_get_timedate(&tm, 0); + wom = ((tm.tm_mday - 1) / 7) + 1; + + s->last_rcnr = (uint32_t) mktimegm(&tm); + s->last_rdcr = (wom << 20) | ((tm.tm_wday + 1) << 17) | + (tm.tm_hour << 12) | (tm.tm_min << 6) | tm.tm_sec; + s->last_rycr = ((tm.tm_year + 1900) << 9) | + ((tm.tm_mon + 1) << 5) | tm.tm_mday; + s->last_swcr = (tm.tm_hour << 19) | + (tm.tm_min << 13) | (tm.tm_sec << 7); + s->last_rtcpicr = 0; + s->last_hz = s->last_sw = s->last_pi = qemu_clock_get_ms(rtc_clock); + + sysbus_init_irq(dev, &s->rtc_irq); + + memory_region_init_io(&s->iomem, obj, &pxa2xx_rtc_ops, s, + "pxa2xx-rtc", 0x10000); + sysbus_init_mmio(dev, &s->iomem); +} + +static void pxa2xx_rtc_realize(DeviceState *dev, Error **errp) +{ + PXA2xxRTCState *s = PXA2XX_RTC(dev); + s->rtc_hz = timer_new_ms(rtc_clock, pxa2xx_rtc_hz_tick, s); + s->rtc_rdal1 = timer_new_ms(rtc_clock, pxa2xx_rtc_rdal1_tick, s); + s->rtc_rdal2 = timer_new_ms(rtc_clock, pxa2xx_rtc_rdal2_tick, s); + s->rtc_swal1 = timer_new_ms(rtc_clock, pxa2xx_rtc_swal1_tick, s); + s->rtc_swal2 = timer_new_ms(rtc_clock, pxa2xx_rtc_swal2_tick, s); + s->rtc_pi = timer_new_ms(rtc_clock, pxa2xx_rtc_pi_tick, s); +} + +static int pxa2xx_rtc_pre_save(void *opaque) +{ + PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; + + pxa2xx_rtc_hzupdate(s); + pxa2xx_rtc_piupdate(s); + pxa2xx_rtc_swupdate(s); + + return 0; +} + +static int pxa2xx_rtc_post_load(void *opaque, int version_id) +{ + PXA2xxRTCState *s = (PXA2xxRTCState *) opaque; + + pxa2xx_rtc_alarm_update(s, s->rtsr); + + return 0; +} + +static const VMStateDescription vmstate_pxa2xx_rtc_regs = { + .name = "pxa2xx_rtc", + .version_id = 0, + .minimum_version_id = 0, + .pre_save = pxa2xx_rtc_pre_save, + .post_load = pxa2xx_rtc_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(rttr, PXA2xxRTCState), + VMSTATE_UINT32(rtsr, PXA2xxRTCState), + VMSTATE_UINT32(rtar, PXA2xxRTCState), + VMSTATE_UINT32(rdar1, PXA2xxRTCState), + VMSTATE_UINT32(rdar2, PXA2xxRTCState), + VMSTATE_UINT32(ryar1, PXA2xxRTCState), + VMSTATE_UINT32(ryar2, PXA2xxRTCState), + VMSTATE_UINT32(swar1, PXA2xxRTCState), + VMSTATE_UINT32(swar2, PXA2xxRTCState), + VMSTATE_UINT32(piar, PXA2xxRTCState), + VMSTATE_UINT32(last_rcnr, PXA2xxRTCState), + VMSTATE_UINT32(last_rdcr, PXA2xxRTCState), + VMSTATE_UINT32(last_rycr, PXA2xxRTCState), + VMSTATE_UINT32(last_swcr, PXA2xxRTCState), + VMSTATE_UINT32(last_rtcpicr, PXA2xxRTCState), + VMSTATE_INT64(last_hz, PXA2xxRTCState), + VMSTATE_INT64(last_sw, PXA2xxRTCState), + VMSTATE_INT64(last_pi, PXA2xxRTCState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void pxa2xx_rtc_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PXA2xx RTC Controller"; + dc->vmsd = &vmstate_pxa2xx_rtc_regs; + dc->realize = pxa2xx_rtc_realize; +} + +static const TypeInfo pxa2xx_rtc_sysbus_info = { + .name = TYPE_PXA2XX_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxRTCState), + .instance_init = pxa2xx_rtc_init, + .class_init = pxa2xx_rtc_sysbus_class_init, +}; + +/* I2C Interface */ + +#define TYPE_PXA2XX_I2C_SLAVE "pxa2xx-i2c-slave" +OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxI2CSlaveState, PXA2XX_I2C_SLAVE) + +struct PXA2xxI2CSlaveState { + I2CSlave parent_obj; + + PXA2xxI2CState *host; +}; + +struct PXA2xxI2CState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + PXA2xxI2CSlaveState *slave; + I2CBus *bus; + qemu_irq irq; + uint32_t offset; + uint32_t region_size; + + uint16_t control; + uint16_t status; + uint8_t ibmr; + uint8_t data; +}; + +#define IBMR 0x80 /* I2C Bus Monitor register */ +#define IDBR 0x88 /* I2C Data Buffer register */ +#define ICR 0x90 /* I2C Control register */ +#define ISR 0x98 /* I2C Status register */ +#define ISAR 0xa0 /* I2C Slave Address register */ + +static void pxa2xx_i2c_update(PXA2xxI2CState *s) +{ + uint16_t level = 0; + level |= s->status & s->control & (1 << 10); /* BED */ + level |= (s->status & (1 << 7)) && (s->control & (1 << 9)); /* IRF */ + level |= (s->status & (1 << 6)) && (s->control & (1 << 8)); /* ITE */ + level |= s->status & (1 << 9); /* SAD */ + qemu_set_irq(s->irq, !!level); +} + +/* These are only stubs now. */ +static int pxa2xx_i2c_event(I2CSlave *i2c, enum i2c_event event) +{ + PXA2xxI2CSlaveState *slave = PXA2XX_I2C_SLAVE(i2c); + PXA2xxI2CState *s = slave->host; + + switch (event) { + case I2C_START_SEND: + s->status |= (1 << 9); /* set SAD */ + s->status &= ~(1 << 0); /* clear RWM */ + break; + case I2C_START_RECV: + s->status |= (1 << 9); /* set SAD */ + s->status |= 1 << 0; /* set RWM */ + break; + case I2C_FINISH: + s->status |= (1 << 4); /* set SSD */ + break; + case I2C_NACK: + s->status |= 1 << 1; /* set ACKNAK */ + break; + } + pxa2xx_i2c_update(s); + + return 0; +} + +static uint8_t pxa2xx_i2c_rx(I2CSlave *i2c) +{ + PXA2xxI2CSlaveState *slave = PXA2XX_I2C_SLAVE(i2c); + PXA2xxI2CState *s = slave->host; + + if ((s->control & (1 << 14)) || !(s->control & (1 << 6))) { + return 0; + } + + if (s->status & (1 << 0)) { /* RWM */ + s->status |= 1 << 6; /* set ITE */ + } + pxa2xx_i2c_update(s); + + return s->data; +} + +static int pxa2xx_i2c_tx(I2CSlave *i2c, uint8_t data) +{ + PXA2xxI2CSlaveState *slave = PXA2XX_I2C_SLAVE(i2c); + PXA2xxI2CState *s = slave->host; + + if ((s->control & (1 << 14)) || !(s->control & (1 << 6))) { + return 1; + } + + if (!(s->status & (1 << 0))) { /* RWM */ + s->status |= 1 << 7; /* set IRF */ + s->data = data; + } + pxa2xx_i2c_update(s); + + return 1; +} + +static uint64_t pxa2xx_i2c_read(void *opaque, hwaddr addr, + unsigned size) +{ + PXA2xxI2CState *s = (PXA2xxI2CState *) opaque; + I2CSlave *slave; + + addr -= s->offset; + switch (addr) { + case ICR: + return s->control; + case ISR: + return s->status | (i2c_bus_busy(s->bus) << 2); + case ISAR: + slave = I2C_SLAVE(s->slave); + return slave->address; + case IDBR: + return s->data; + case IBMR: + if (s->status & (1 << 2)) + s->ibmr ^= 3; /* Fake SCL and SDA pin changes */ + else + s->ibmr = 0; + return s->ibmr; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad read offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } + return 0; +} + +static void pxa2xx_i2c_write(void *opaque, hwaddr addr, + uint64_t value64, unsigned size) +{ + PXA2xxI2CState *s = (PXA2xxI2CState *) opaque; + uint32_t value = value64; + int ack; + + addr -= s->offset; + switch (addr) { + case ICR: + s->control = value & 0xfff7; + if ((value & (1 << 3)) && (value & (1 << 6))) { /* TB and IUE */ + /* TODO: slave mode */ + if (value & (1 << 0)) { /* START condition */ + if (s->data & 1) + s->status |= 1 << 0; /* set RWM */ + else + s->status &= ~(1 << 0); /* clear RWM */ + ack = !i2c_start_transfer(s->bus, s->data >> 1, s->data & 1); + } else { + if (s->status & (1 << 0)) { /* RWM */ + s->data = i2c_recv(s->bus); + if (value & (1 << 2)) /* ACKNAK */ + i2c_nack(s->bus); + ack = 1; + } else + ack = !i2c_send(s->bus, s->data); + } + + if (value & (1 << 1)) /* STOP condition */ + i2c_end_transfer(s->bus); + + if (ack) { + if (value & (1 << 0)) /* START condition */ + s->status |= 1 << 6; /* set ITE */ + else + if (s->status & (1 << 0)) /* RWM */ + s->status |= 1 << 7; /* set IRF */ + else + s->status |= 1 << 6; /* set ITE */ + s->status &= ~(1 << 1); /* clear ACKNAK */ + } else { + s->status |= 1 << 6; /* set ITE */ + s->status |= 1 << 10; /* set BED */ + s->status |= 1 << 1; /* set ACKNAK */ + } + } + if (!(value & (1 << 3)) && (value & (1 << 6))) /* !TB and IUE */ + if (value & (1 << 4)) /* MA */ + i2c_end_transfer(s->bus); + pxa2xx_i2c_update(s); + break; + + case ISR: + s->status &= ~(value & 0x07f0); + pxa2xx_i2c_update(s); + break; + + case ISAR: + i2c_slave_set_address(I2C_SLAVE(s->slave), value & 0x7f); + break; + + case IDBR: + s->data = value & 0xff; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad write offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + } +} + +static const MemoryRegionOps pxa2xx_i2c_ops = { + .read = pxa2xx_i2c_read, + .write = pxa2xx_i2c_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_pxa2xx_i2c_slave = { + .name = "pxa2xx_i2c_slave", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_I2C_SLAVE(parent_obj, PXA2xxI2CSlaveState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pxa2xx_i2c = { + .name = "pxa2xx_i2c", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(control, PXA2xxI2CState), + VMSTATE_UINT16(status, PXA2xxI2CState), + VMSTATE_UINT8(ibmr, PXA2xxI2CState), + VMSTATE_UINT8(data, PXA2xxI2CState), + VMSTATE_STRUCT_POINTER(slave, PXA2xxI2CState, + vmstate_pxa2xx_i2c_slave, PXA2xxI2CSlaveState), + VMSTATE_END_OF_LIST() + } +}; + +static void pxa2xx_i2c_slave_class_init(ObjectClass *klass, void *data) +{ + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + k->event = pxa2xx_i2c_event; + k->recv = pxa2xx_i2c_rx; + k->send = pxa2xx_i2c_tx; +} + +static const TypeInfo pxa2xx_i2c_slave_info = { + .name = TYPE_PXA2XX_I2C_SLAVE, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(PXA2xxI2CSlaveState), + .class_init = pxa2xx_i2c_slave_class_init, +}; + +PXA2xxI2CState *pxa2xx_i2c_init(hwaddr base, + qemu_irq irq, uint32_t region_size) +{ + DeviceState *dev; + SysBusDevice *i2c_dev; + PXA2xxI2CState *s; + I2CBus *i2cbus; + + dev = qdev_new(TYPE_PXA2XX_I2C); + qdev_prop_set_uint32(dev, "size", region_size + 1); + qdev_prop_set_uint32(dev, "offset", base & region_size); + + i2c_dev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(i2c_dev, &error_fatal); + sysbus_mmio_map(i2c_dev, 0, base & ~region_size); + sysbus_connect_irq(i2c_dev, 0, irq); + + s = PXA2XX_I2C(i2c_dev); + /* FIXME: Should the slave device really be on a separate bus? */ + i2cbus = i2c_init_bus(dev, "dummy"); + s->slave = PXA2XX_I2C_SLAVE(i2c_slave_create_simple(i2cbus, + TYPE_PXA2XX_I2C_SLAVE, + 0)); + s->slave->host = s; + + return s; +} + +static void pxa2xx_i2c_initfn(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + PXA2xxI2CState *s = PXA2XX_I2C(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + s->bus = i2c_init_bus(dev, NULL); + + memory_region_init_io(&s->iomem, obj, &pxa2xx_i2c_ops, s, + "pxa2xx-i2c", s->region_size); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); +} + +I2CBus *pxa2xx_i2c_bus(PXA2xxI2CState *s) +{ + return s->bus; +} + +static Property pxa2xx_i2c_properties[] = { + DEFINE_PROP_UINT32("size", PXA2xxI2CState, region_size, 0x10000), + DEFINE_PROP_UINT32("offset", PXA2xxI2CState, offset, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pxa2xx_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PXA2xx I2C Bus Controller"; + dc->vmsd = &vmstate_pxa2xx_i2c; + device_class_set_props(dc, pxa2xx_i2c_properties); +} + +static const TypeInfo pxa2xx_i2c_info = { + .name = TYPE_PXA2XX_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxI2CState), + .instance_init = pxa2xx_i2c_initfn, + .class_init = pxa2xx_i2c_class_init, +}; + +/* PXA Inter-IC Sound Controller */ +static void pxa2xx_i2s_reset(PXA2xxI2SState *i2s) +{ + i2s->rx_len = 0; + i2s->tx_len = 0; + i2s->fifo_len = 0; + i2s->clk = 0x1a; + i2s->control[0] = 0x00; + i2s->control[1] = 0x00; + i2s->status = 0x00; + i2s->mask = 0x00; +} + +#define SACR_TFTH(val) ((val >> 8) & 0xf) +#define SACR_RFTH(val) ((val >> 12) & 0xf) +#define SACR_DREC(val) (val & (1 << 3)) +#define SACR_DPRL(val) (val & (1 << 4)) + +static inline void pxa2xx_i2s_update(PXA2xxI2SState *i2s) +{ + int rfs, tfs; + rfs = SACR_RFTH(i2s->control[0]) < i2s->rx_len && + !SACR_DREC(i2s->control[1]); + tfs = (i2s->tx_len || i2s->fifo_len < SACR_TFTH(i2s->control[0])) && + i2s->enable && !SACR_DPRL(i2s->control[1]); + + qemu_set_irq(i2s->rx_dma, rfs); + qemu_set_irq(i2s->tx_dma, tfs); + + i2s->status &= 0xe0; + if (i2s->fifo_len < 16 || !i2s->enable) + i2s->status |= 1 << 0; /* TNF */ + if (i2s->rx_len) + i2s->status |= 1 << 1; /* RNE */ + if (i2s->enable) + i2s->status |= 1 << 2; /* BSY */ + if (tfs) + i2s->status |= 1 << 3; /* TFS */ + if (rfs) + i2s->status |= 1 << 4; /* RFS */ + if (!(i2s->tx_len && i2s->enable)) + i2s->status |= i2s->fifo_len << 8; /* TFL */ + i2s->status |= MAX(i2s->rx_len, 0xf) << 12; /* RFL */ + + qemu_set_irq(i2s->irq, i2s->status & i2s->mask); +} + +#define SACR0 0x00 /* Serial Audio Global Control register */ +#define SACR1 0x04 /* Serial Audio I2S/MSB-Justified Control register */ +#define SASR0 0x0c /* Serial Audio Interface and FIFO Status register */ +#define SAIMR 0x14 /* Serial Audio Interrupt Mask register */ +#define SAICR 0x18 /* Serial Audio Interrupt Clear register */ +#define SADIV 0x60 /* Serial Audio Clock Divider register */ +#define SADR 0x80 /* Serial Audio Data register */ + +static uint64_t pxa2xx_i2s_read(void *opaque, hwaddr addr, + unsigned size) +{ + PXA2xxI2SState *s = (PXA2xxI2SState *) opaque; + + switch (addr) { + case SACR0: + return s->control[0]; + case SACR1: + return s->control[1]; + case SASR0: + return s->status; + case SAIMR: + return s->mask; + case SAICR: + return 0; + case SADIV: + return s->clk; + case SADR: + if (s->rx_len > 0) { + s->rx_len --; + pxa2xx_i2s_update(s); + return s->codec_in(s->opaque); + } + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad read offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } + return 0; +} + +static void pxa2xx_i2s_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + PXA2xxI2SState *s = (PXA2xxI2SState *) opaque; + uint32_t *sample; + + switch (addr) { + case SACR0: + if (value & (1 << 3)) /* RST */ + pxa2xx_i2s_reset(s); + s->control[0] = value & 0xff3d; + if (!s->enable && (value & 1) && s->tx_len) { /* ENB */ + for (sample = s->fifo; s->fifo_len > 0; s->fifo_len --, sample ++) + s->codec_out(s->opaque, *sample); + s->status &= ~(1 << 7); /* I2SOFF */ + } + if (value & (1 << 4)) /* EFWR */ + printf("%s: Attempt to use special function\n", __func__); + s->enable = (value & 9) == 1; /* ENB && !RST*/ + pxa2xx_i2s_update(s); + break; + case SACR1: + s->control[1] = value & 0x0039; + if (value & (1 << 5)) /* ENLBF */ + printf("%s: Attempt to use loopback function\n", __func__); + if (value & (1 << 4)) /* DPRL */ + s->fifo_len = 0; + pxa2xx_i2s_update(s); + break; + case SAIMR: + s->mask = value & 0x0078; + pxa2xx_i2s_update(s); + break; + case SAICR: + s->status &= ~(value & (3 << 5)); + pxa2xx_i2s_update(s); + break; + case SADIV: + s->clk = value & 0x007f; + break; + case SADR: + if (s->tx_len && s->enable) { + s->tx_len --; + pxa2xx_i2s_update(s); + s->codec_out(s->opaque, value); + } else if (s->fifo_len < 16) { + s->fifo[s->fifo_len ++] = value; + pxa2xx_i2s_update(s); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad write offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + } +} + +static const MemoryRegionOps pxa2xx_i2s_ops = { + .read = pxa2xx_i2s_read, + .write = pxa2xx_i2s_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_pxa2xx_i2s = { + .name = "pxa2xx_i2s", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(control, PXA2xxI2SState, 2), + VMSTATE_UINT32(status, PXA2xxI2SState), + VMSTATE_UINT32(mask, PXA2xxI2SState), + VMSTATE_UINT32(clk, PXA2xxI2SState), + VMSTATE_INT32(enable, PXA2xxI2SState), + VMSTATE_INT32(rx_len, PXA2xxI2SState), + VMSTATE_INT32(tx_len, PXA2xxI2SState), + VMSTATE_INT32(fifo_len, PXA2xxI2SState), + VMSTATE_END_OF_LIST() + } +}; + +static void pxa2xx_i2s_data_req(void *opaque, int tx, int rx) +{ + PXA2xxI2SState *s = (PXA2xxI2SState *) opaque; + uint32_t *sample; + + /* Signal FIFO errors */ + if (s->enable && s->tx_len) + s->status |= 1 << 5; /* TUR */ + if (s->enable && s->rx_len) + s->status |= 1 << 6; /* ROR */ + + /* Should be tx - MIN(tx, s->fifo_len) but we don't really need to + * handle the cases where it makes a difference. */ + s->tx_len = tx - s->fifo_len; + s->rx_len = rx; + /* Note that is s->codec_out wasn't set, we wouldn't get called. */ + if (s->enable) + for (sample = s->fifo; s->fifo_len; s->fifo_len --, sample ++) + s->codec_out(s->opaque, *sample); + pxa2xx_i2s_update(s); +} + +static PXA2xxI2SState *pxa2xx_i2s_init(MemoryRegion *sysmem, + hwaddr base, + qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma) +{ + PXA2xxI2SState *s = g_new0(PXA2xxI2SState, 1); + + s->irq = irq; + s->rx_dma = rx_dma; + s->tx_dma = tx_dma; + s->data_req = pxa2xx_i2s_data_req; + + pxa2xx_i2s_reset(s); + + memory_region_init_io(&s->iomem, NULL, &pxa2xx_i2s_ops, s, + "pxa2xx-i2s", 0x100000); + memory_region_add_subregion(sysmem, base, &s->iomem); + + vmstate_register(NULL, base, &vmstate_pxa2xx_i2s, s); + + return s; +} + +/* PXA Fast Infra-red Communications Port */ +struct PXA2xxFIrState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + qemu_irq irq; + qemu_irq rx_dma; + qemu_irq tx_dma; + uint32_t enable; + CharBackend chr; + + uint8_t control[3]; + uint8_t status[2]; + + uint32_t rx_len; + uint32_t rx_start; + uint8_t rx_fifo[64]; +}; + +static void pxa2xx_fir_reset(DeviceState *d) +{ + PXA2xxFIrState *s = PXA2XX_FIR(d); + + s->control[0] = 0x00; + s->control[1] = 0x00; + s->control[2] = 0x00; + s->status[0] = 0x00; + s->status[1] = 0x00; + s->enable = 0; +} + +static inline void pxa2xx_fir_update(PXA2xxFIrState *s) +{ + static const int tresh[4] = { 8, 16, 32, 0 }; + int intr = 0; + if ((s->control[0] & (1 << 4)) && /* RXE */ + s->rx_len >= tresh[s->control[2] & 3]) /* TRIG */ + s->status[0] |= 1 << 4; /* RFS */ + else + s->status[0] &= ~(1 << 4); /* RFS */ + if (s->control[0] & (1 << 3)) /* TXE */ + s->status[0] |= 1 << 3; /* TFS */ + else + s->status[0] &= ~(1 << 3); /* TFS */ + if (s->rx_len) + s->status[1] |= 1 << 2; /* RNE */ + else + s->status[1] &= ~(1 << 2); /* RNE */ + if (s->control[0] & (1 << 4)) /* RXE */ + s->status[1] |= 1 << 0; /* RSY */ + else + s->status[1] &= ~(1 << 0); /* RSY */ + + intr |= (s->control[0] & (1 << 5)) && /* RIE */ + (s->status[0] & (1 << 4)); /* RFS */ + intr |= (s->control[0] & (1 << 6)) && /* TIE */ + (s->status[0] & (1 << 3)); /* TFS */ + intr |= (s->control[2] & (1 << 4)) && /* TRAIL */ + (s->status[0] & (1 << 6)); /* EOC */ + intr |= (s->control[0] & (1 << 2)) && /* TUS */ + (s->status[0] & (1 << 1)); /* TUR */ + intr |= s->status[0] & 0x25; /* FRE, RAB, EIF */ + + qemu_set_irq(s->rx_dma, (s->status[0] >> 4) & 1); + qemu_set_irq(s->tx_dma, (s->status[0] >> 3) & 1); + + qemu_set_irq(s->irq, intr && s->enable); +} + +#define ICCR0 0x00 /* FICP Control register 0 */ +#define ICCR1 0x04 /* FICP Control register 1 */ +#define ICCR2 0x08 /* FICP Control register 2 */ +#define ICDR 0x0c /* FICP Data register */ +#define ICSR0 0x14 /* FICP Status register 0 */ +#define ICSR1 0x18 /* FICP Status register 1 */ +#define ICFOR 0x1c /* FICP FIFO Occupancy Status register */ + +static uint64_t pxa2xx_fir_read(void *opaque, hwaddr addr, + unsigned size) +{ + PXA2xxFIrState *s = (PXA2xxFIrState *) opaque; + uint8_t ret; + + switch (addr) { + case ICCR0: + return s->control[0]; + case ICCR1: + return s->control[1]; + case ICCR2: + return s->control[2]; + case ICDR: + s->status[0] &= ~0x01; + s->status[1] &= ~0x72; + if (s->rx_len) { + s->rx_len --; + ret = s->rx_fifo[s->rx_start ++]; + s->rx_start &= 63; + pxa2xx_fir_update(s); + return ret; + } + printf("%s: Rx FIFO underrun.\n", __func__); + break; + case ICSR0: + return s->status[0]; + case ICSR1: + return s->status[1] | (1 << 3); /* TNF */ + case ICFOR: + return s->rx_len; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad read offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } + return 0; +} + +static void pxa2xx_fir_write(void *opaque, hwaddr addr, + uint64_t value64, unsigned size) +{ + PXA2xxFIrState *s = (PXA2xxFIrState *) opaque; + uint32_t value = value64; + uint8_t ch; + + switch (addr) { + case ICCR0: + s->control[0] = value; + if (!(value & (1 << 4))) /* RXE */ + s->rx_len = s->rx_start = 0; + if (!(value & (1 << 3))) { /* TXE */ + /* Nop */ + } + s->enable = value & 1; /* ITR */ + if (!s->enable) + s->status[0] = 0; + pxa2xx_fir_update(s); + break; + case ICCR1: + s->control[1] = value; + break; + case ICCR2: + s->control[2] = value & 0x3f; + pxa2xx_fir_update(s); + break; + case ICDR: + if (s->control[2] & (1 << 2)) { /* TXP */ + ch = value; + } else { + ch = ~value; + } + if (s->enable && (s->control[0] & (1 << 3))) { /* TXE */ + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); + } + break; + case ICSR0: + s->status[0] &= ~(value & 0x66); + pxa2xx_fir_update(s); + break; + case ICFOR: + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad write offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + } +} + +static const MemoryRegionOps pxa2xx_fir_ops = { + .read = pxa2xx_fir_read, + .write = pxa2xx_fir_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int pxa2xx_fir_is_empty(void *opaque) +{ + PXA2xxFIrState *s = (PXA2xxFIrState *) opaque; + return (s->rx_len < 64); +} + +static void pxa2xx_fir_rx(void *opaque, const uint8_t *buf, int size) +{ + PXA2xxFIrState *s = (PXA2xxFIrState *) opaque; + if (!(s->control[0] & (1 << 4))) /* RXE */ + return; + + while (size --) { + s->status[1] |= 1 << 4; /* EOF */ + if (s->rx_len >= 64) { + s->status[1] |= 1 << 6; /* ROR */ + break; + } + + if (s->control[2] & (1 << 3)) /* RXP */ + s->rx_fifo[(s->rx_start + s->rx_len ++) & 63] = *(buf ++); + else + s->rx_fifo[(s->rx_start + s->rx_len ++) & 63] = ~*(buf ++); + } + + pxa2xx_fir_update(s); +} + +static void pxa2xx_fir_event(void *opaque, QEMUChrEvent event) +{ +} + +static void pxa2xx_fir_instance_init(Object *obj) +{ + PXA2xxFIrState *s = PXA2XX_FIR(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &pxa2xx_fir_ops, s, + "pxa2xx-fir", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->rx_dma); + sysbus_init_irq(sbd, &s->tx_dma); +} + +static void pxa2xx_fir_realize(DeviceState *dev, Error **errp) +{ + PXA2xxFIrState *s = PXA2XX_FIR(dev); + + qemu_chr_fe_set_handlers(&s->chr, pxa2xx_fir_is_empty, + pxa2xx_fir_rx, pxa2xx_fir_event, NULL, s, NULL, + true); +} + +static bool pxa2xx_fir_vmstate_validate(void *opaque, int version_id) +{ + PXA2xxFIrState *s = opaque; + + return s->rx_start < ARRAY_SIZE(s->rx_fifo); +} + +static const VMStateDescription pxa2xx_fir_vmsd = { + .name = "pxa2xx-fir", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(enable, PXA2xxFIrState), + VMSTATE_UINT8_ARRAY(control, PXA2xxFIrState, 3), + VMSTATE_UINT8_ARRAY(status, PXA2xxFIrState, 2), + VMSTATE_UINT32(rx_len, PXA2xxFIrState), + VMSTATE_UINT32(rx_start, PXA2xxFIrState), + VMSTATE_VALIDATE("fifo is 64 bytes", pxa2xx_fir_vmstate_validate), + VMSTATE_UINT8_ARRAY(rx_fifo, PXA2xxFIrState, 64), + VMSTATE_END_OF_LIST() + } +}; + +static Property pxa2xx_fir_properties[] = { + DEFINE_PROP_CHR("chardev", PXA2xxFIrState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pxa2xx_fir_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pxa2xx_fir_realize; + dc->vmsd = &pxa2xx_fir_vmsd; + device_class_set_props(dc, pxa2xx_fir_properties); + dc->reset = pxa2xx_fir_reset; +} + +static const TypeInfo pxa2xx_fir_info = { + .name = TYPE_PXA2XX_FIR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxFIrState), + .class_init = pxa2xx_fir_class_init, + .instance_init = pxa2xx_fir_instance_init, +}; + +static PXA2xxFIrState *pxa2xx_fir_init(MemoryRegion *sysmem, + hwaddr base, + qemu_irq irq, qemu_irq rx_dma, + qemu_irq tx_dma, + Chardev *chr) +{ + DeviceState *dev; + SysBusDevice *sbd; + + dev = qdev_new(TYPE_PXA2XX_FIR); + qdev_prop_set_chr(dev, "chardev", chr); + sbd = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, base); + sysbus_connect_irq(sbd, 0, irq); + sysbus_connect_irq(sbd, 1, rx_dma); + sysbus_connect_irq(sbd, 2, tx_dma); + return PXA2XX_FIR(dev); +} + +static void pxa2xx_reset(void *opaque, int line, int level) +{ + PXA2xxState *s = (PXA2xxState *) opaque; + + if (level && (s->pm_regs[PCFR >> 2] & 0x10)) { /* GPR_EN */ + cpu_reset(CPU(s->cpu)); + /* TODO: reset peripherals */ + } +} + +/* Initialise a PXA270 integrated chip (ARM based core). */ +PXA2xxState *pxa270_init(MemoryRegion *address_space, + unsigned int sdram_size, const char *cpu_type) +{ + PXA2xxState *s; + int i; + DriveInfo *dinfo; + s = g_new0(PXA2xxState, 1); + + if (strncmp(cpu_type, "pxa27", 5)) { + error_report("Machine requires a PXA27x processor"); + exit(1); + } + + s->cpu = ARM_CPU(cpu_create(cpu_type)); + s->reset = qemu_allocate_irq(pxa2xx_reset, s, 0); + + /* SDRAM & Internal Memory Storage */ + memory_region_init_ram(&s->sdram, NULL, "pxa270.sdram", sdram_size, + &error_fatal); + memory_region_add_subregion(address_space, PXA2XX_SDRAM_BASE, &s->sdram); + memory_region_init_ram(&s->internal, NULL, "pxa270.internal", 0x40000, + &error_fatal); + memory_region_add_subregion(address_space, PXA2XX_INTERNAL_BASE, + &s->internal); + + s->pic = pxa2xx_pic_init(0x40d00000, s->cpu); + + s->dma = pxa27x_dma_init(0x40000000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_DMA)); + + sysbus_create_varargs("pxa27x-timer", 0x40a00000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 0), + qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 1), + qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 2), + qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 3), + qdev_get_gpio_in(s->pic, PXA27X_PIC_OST_4_11), + NULL); + + s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 121); + + s->mmc = pxa2xx_mmci_init(address_space, 0x41100000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_MMC), + qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_MMCI), + qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_MMCI)); + dinfo = drive_get(IF_SD, 0, 0); + if (dinfo) { + DeviceState *carddev; + + /* Create and plug in the sd card */ + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", + blk_by_legacy_dinfo(dinfo), &error_fatal); + qdev_realize_and_unref(carddev, qdev_get_child_bus(DEVICE(s->mmc), + "sd-bus"), + &error_fatal); + } else if (!qtest_enabled()) { + warn_report("missing SecureDigital device"); + } + + for (i = 0; pxa270_serial[i].io_base; i++) { + if (serial_hd(i)) { + serial_mm_init(address_space, pxa270_serial[i].io_base, 2, + qdev_get_gpio_in(s->pic, pxa270_serial[i].irqn), + 14857000 / 16, serial_hd(i), + DEVICE_NATIVE_ENDIAN); + } else { + break; + } + } + if (serial_hd(i)) + s->fir = pxa2xx_fir_init(address_space, 0x40800000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_ICP), + qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_ICP), + qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_ICP), + serial_hd(i)); + + s->lcd = pxa2xx_lcdc_init(address_space, 0x44000000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_LCD)); + + s->cm_base = 0x41300000; + s->cm_regs[CCCR >> 2] = 0x02000210; /* 416.0 MHz */ + s->clkcfg = 0x00000009; /* Turbo mode active */ + memory_region_init_io(&s->cm_iomem, NULL, &pxa2xx_cm_ops, s, "pxa2xx-cm", 0x1000); + memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem); + vmstate_register(NULL, 0, &vmstate_pxa2xx_cm, s); + + pxa2xx_setup_cp14(s); + + s->mm_base = 0x48000000; + s->mm_regs[MDMRS >> 2] = 0x00020002; + s->mm_regs[MDREFR >> 2] = 0x03ca4000; + s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */ + memory_region_init_io(&s->mm_iomem, NULL, &pxa2xx_mm_ops, s, "pxa2xx-mm", 0x1000); + memory_region_add_subregion(address_space, s->mm_base, &s->mm_iomem); + vmstate_register(NULL, 0, &vmstate_pxa2xx_mm, s); + + s->pm_base = 0x40f00000; + memory_region_init_io(&s->pm_iomem, NULL, &pxa2xx_pm_ops, s, "pxa2xx-pm", 0x100); + memory_region_add_subregion(address_space, s->pm_base, &s->pm_iomem); + vmstate_register(NULL, 0, &vmstate_pxa2xx_pm, s); + + for (i = 0; pxa27x_ssp[i].io_base; i ++); + s->ssp = g_new0(SSIBus *, i); + for (i = 0; pxa27x_ssp[i].io_base; i ++) { + DeviceState *dev; + dev = sysbus_create_simple(TYPE_PXA2XX_SSP, pxa27x_ssp[i].io_base, + qdev_get_gpio_in(s->pic, pxa27x_ssp[i].irqn)); + s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi"); + } + + sysbus_create_simple("sysbus-ohci", 0x4c000000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_USBH1)); + + s->pcmcia[0] = pxa2xx_pcmcia_init(address_space, 0x20000000); + s->pcmcia[1] = pxa2xx_pcmcia_init(address_space, 0x30000000); + + sysbus_create_simple(TYPE_PXA2XX_RTC, 0x40900000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_RTCALARM)); + + s->i2c[0] = pxa2xx_i2c_init(0x40301600, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2C), 0xffff); + s->i2c[1] = pxa2xx_i2c_init(0x40f00100, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_PWRI2C), 0xff); + + s->i2s = pxa2xx_i2s_init(address_space, 0x40400000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2S), + qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_I2S), + qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_I2S)); + + s->kp = pxa27x_keypad_init(address_space, 0x41500000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_KEYPAD)); + + /* GPIO1 resets the processor */ + /* The handler can be overridden by board-specific code */ + qdev_connect_gpio_out(s->gpio, 1, s->reset); + return s; +} + +/* Initialise a PXA255 integrated chip (ARM based core). */ +PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size) +{ + PXA2xxState *s; + int i; + DriveInfo *dinfo; + + s = g_new0(PXA2xxState, 1); + + s->cpu = ARM_CPU(cpu_create(ARM_CPU_TYPE_NAME("pxa255"))); + s->reset = qemu_allocate_irq(pxa2xx_reset, s, 0); + + /* SDRAM & Internal Memory Storage */ + memory_region_init_ram(&s->sdram, NULL, "pxa255.sdram", sdram_size, + &error_fatal); + memory_region_add_subregion(address_space, PXA2XX_SDRAM_BASE, &s->sdram); + memory_region_init_ram(&s->internal, NULL, "pxa255.internal", + PXA2XX_INTERNAL_SIZE, &error_fatal); + memory_region_add_subregion(address_space, PXA2XX_INTERNAL_BASE, + &s->internal); + + s->pic = pxa2xx_pic_init(0x40d00000, s->cpu); + + s->dma = pxa255_dma_init(0x40000000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_DMA)); + + sysbus_create_varargs("pxa25x-timer", 0x40a00000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 0), + qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 1), + qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 2), + qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 3), + NULL); + + s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 85); + + s->mmc = pxa2xx_mmci_init(address_space, 0x41100000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_MMC), + qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_MMCI), + qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_MMCI)); + dinfo = drive_get(IF_SD, 0, 0); + if (dinfo) { + DeviceState *carddev; + + /* Create and plug in the sd card */ + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", + blk_by_legacy_dinfo(dinfo), &error_fatal); + qdev_realize_and_unref(carddev, qdev_get_child_bus(DEVICE(s->mmc), + "sd-bus"), + &error_fatal); + } else if (!qtest_enabled()) { + warn_report("missing SecureDigital device"); + } + + for (i = 0; pxa255_serial[i].io_base; i++) { + if (serial_hd(i)) { + serial_mm_init(address_space, pxa255_serial[i].io_base, 2, + qdev_get_gpio_in(s->pic, pxa255_serial[i].irqn), + 14745600 / 16, serial_hd(i), + DEVICE_NATIVE_ENDIAN); + } else { + break; + } + } + if (serial_hd(i)) + s->fir = pxa2xx_fir_init(address_space, 0x40800000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_ICP), + qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_ICP), + qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_ICP), + serial_hd(i)); + + s->lcd = pxa2xx_lcdc_init(address_space, 0x44000000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_LCD)); + + s->cm_base = 0x41300000; + s->cm_regs[CCCR >> 2] = 0x00000121; /* from datasheet */ + s->cm_regs[CKEN >> 2] = 0x00017def; /* from datasheet */ + + s->clkcfg = 0x00000009; /* Turbo mode active */ + memory_region_init_io(&s->cm_iomem, NULL, &pxa2xx_cm_ops, s, "pxa2xx-cm", 0x1000); + memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem); + vmstate_register(NULL, 0, &vmstate_pxa2xx_cm, s); + + pxa2xx_setup_cp14(s); + + s->mm_base = 0x48000000; + s->mm_regs[MDMRS >> 2] = 0x00020002; + s->mm_regs[MDREFR >> 2] = 0x03ca4000; + s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */ + memory_region_init_io(&s->mm_iomem, NULL, &pxa2xx_mm_ops, s, "pxa2xx-mm", 0x1000); + memory_region_add_subregion(address_space, s->mm_base, &s->mm_iomem); + vmstate_register(NULL, 0, &vmstate_pxa2xx_mm, s); + + s->pm_base = 0x40f00000; + memory_region_init_io(&s->pm_iomem, NULL, &pxa2xx_pm_ops, s, "pxa2xx-pm", 0x100); + memory_region_add_subregion(address_space, s->pm_base, &s->pm_iomem); + vmstate_register(NULL, 0, &vmstate_pxa2xx_pm, s); + + for (i = 0; pxa255_ssp[i].io_base; i ++); + s->ssp = g_new0(SSIBus *, i); + for (i = 0; pxa255_ssp[i].io_base; i ++) { + DeviceState *dev; + dev = sysbus_create_simple(TYPE_PXA2XX_SSP, pxa255_ssp[i].io_base, + qdev_get_gpio_in(s->pic, pxa255_ssp[i].irqn)); + s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi"); + } + + s->pcmcia[0] = pxa2xx_pcmcia_init(address_space, 0x20000000); + s->pcmcia[1] = pxa2xx_pcmcia_init(address_space, 0x30000000); + + sysbus_create_simple(TYPE_PXA2XX_RTC, 0x40900000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_RTCALARM)); + + s->i2c[0] = pxa2xx_i2c_init(0x40301600, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2C), 0xffff); + s->i2c[1] = pxa2xx_i2c_init(0x40f00100, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_PWRI2C), 0xff); + + s->i2s = pxa2xx_i2s_init(address_space, 0x40400000, + qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2S), + qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_I2S), + qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_I2S)); + + /* GPIO1 resets the processor */ + /* The handler can be overridden by board-specific code */ + qdev_connect_gpio_out(s->gpio, 1, s->reset); + return s; +} + +static void pxa2xx_ssp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = pxa2xx_ssp_reset; + dc->vmsd = &vmstate_pxa2xx_ssp; +} + +static const TypeInfo pxa2xx_ssp_info = { + .name = TYPE_PXA2XX_SSP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxSSPState), + .instance_init = pxa2xx_ssp_init, + .class_init = pxa2xx_ssp_class_init, +}; + +static void pxa2xx_register_types(void) +{ + type_register_static(&pxa2xx_i2c_slave_info); + type_register_static(&pxa2xx_ssp_info); + type_register_static(&pxa2xx_i2c_info); + type_register_static(&pxa2xx_rtc_sysbus_info); + type_register_static(&pxa2xx_fir_info); +} + +type_init(pxa2xx_register_types) diff --git a/hw/arm/pxa2xx_gpio.c b/hw/arm/pxa2xx_gpio.c new file mode 100644 index 000000000..e7c3d9922 --- /dev/null +++ b/hw/arm/pxa2xx_gpio.c @@ -0,0 +1,369 @@ +/* + * Intel XScale PXA255/270 GPIO controller emulation. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/arm/pxa.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define PXA2XX_GPIO_BANKS 4 + +#define TYPE_PXA2XX_GPIO "pxa2xx-gpio" +OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxGPIOInfo, PXA2XX_GPIO) + +struct PXA2xxGPIOInfo { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + qemu_irq irq0, irq1, irqX; + int lines; + int ncpu; + ARMCPU *cpu; + + /* XXX: GNU C vectors are more suitable */ + uint32_t ilevel[PXA2XX_GPIO_BANKS]; + uint32_t olevel[PXA2XX_GPIO_BANKS]; + uint32_t dir[PXA2XX_GPIO_BANKS]; + uint32_t rising[PXA2XX_GPIO_BANKS]; + uint32_t falling[PXA2XX_GPIO_BANKS]; + uint32_t status[PXA2XX_GPIO_BANKS]; + uint32_t gafr[PXA2XX_GPIO_BANKS * 2]; + + uint32_t prev_level[PXA2XX_GPIO_BANKS]; + qemu_irq handler[PXA2XX_GPIO_BANKS * 32]; + qemu_irq read_notify; +}; + +static struct { + enum { + GPIO_NONE, + GPLR, + GPSR, + GPCR, + GPDR, + GRER, + GFER, + GEDR, + GAFR_L, + GAFR_U, + } reg; + int bank; +} pxa2xx_gpio_regs[0x200] = { + [0 ... 0x1ff] = { GPIO_NONE, 0 }, +#define PXA2XX_REG(reg, a0, a1, a2, a3) \ + [a0] = { reg, 0 }, [a1] = { reg, 1 }, [a2] = { reg, 2 }, [a3] = { reg, 3 }, + + PXA2XX_REG(GPLR, 0x000, 0x004, 0x008, 0x100) + PXA2XX_REG(GPSR, 0x018, 0x01c, 0x020, 0x118) + PXA2XX_REG(GPCR, 0x024, 0x028, 0x02c, 0x124) + PXA2XX_REG(GPDR, 0x00c, 0x010, 0x014, 0x10c) + PXA2XX_REG(GRER, 0x030, 0x034, 0x038, 0x130) + PXA2XX_REG(GFER, 0x03c, 0x040, 0x044, 0x13c) + PXA2XX_REG(GEDR, 0x048, 0x04c, 0x050, 0x148) + PXA2XX_REG(GAFR_L, 0x054, 0x05c, 0x064, 0x06c) + PXA2XX_REG(GAFR_U, 0x058, 0x060, 0x068, 0x070) +}; + +static void pxa2xx_gpio_irq_update(PXA2xxGPIOInfo *s) +{ + if (s->status[0] & (1 << 0)) + qemu_irq_raise(s->irq0); + else + qemu_irq_lower(s->irq0); + + if (s->status[0] & (1 << 1)) + qemu_irq_raise(s->irq1); + else + qemu_irq_lower(s->irq1); + + if ((s->status[0] & ~3) | s->status[1] | s->status[2] | s->status[3]) + qemu_irq_raise(s->irqX); + else + qemu_irq_lower(s->irqX); +} + +/* Bitmap of pins used as standby and sleep wake-up sources. */ +static const int pxa2xx_gpio_wake[PXA2XX_GPIO_BANKS] = { + 0x8003fe1b, 0x002001fc, 0xec080000, 0x0012007f, +}; + +static void pxa2xx_gpio_set(void *opaque, int line, int level) +{ + PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque; + CPUState *cpu = CPU(s->cpu); + int bank; + uint32_t mask; + + if (line >= s->lines) { + printf("%s: No GPIO pin %i\n", __func__, line); + return; + } + + bank = line >> 5; + mask = 1U << (line & 31); + + if (level) { + s->status[bank] |= s->rising[bank] & mask & + ~s->ilevel[bank] & ~s->dir[bank]; + s->ilevel[bank] |= mask; + } else { + s->status[bank] |= s->falling[bank] & mask & + s->ilevel[bank] & ~s->dir[bank]; + s->ilevel[bank] &= ~mask; + } + + if (s->status[bank] & mask) + pxa2xx_gpio_irq_update(s); + + /* Wake-up GPIOs */ + if (cpu->halted && (mask & ~s->dir[bank] & pxa2xx_gpio_wake[bank])) { + cpu_interrupt(cpu, CPU_INTERRUPT_EXITTB); + } +} + +static void pxa2xx_gpio_handler_update(PXA2xxGPIOInfo *s) { + uint32_t level, diff; + int i, bit, line; + for (i = 0; i < PXA2XX_GPIO_BANKS; i ++) { + level = s->olevel[i] & s->dir[i]; + + for (diff = s->prev_level[i] ^ level; diff; diff ^= 1 << bit) { + bit = ctz32(diff); + line = bit + 32 * i; + qemu_set_irq(s->handler[line], (level >> bit) & 1); + } + + s->prev_level[i] = level; + } +} + +static uint64_t pxa2xx_gpio_read(void *opaque, hwaddr offset, + unsigned size) +{ + PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque; + uint32_t ret; + int bank; + if (offset >= 0x200) + return 0; + + bank = pxa2xx_gpio_regs[offset].bank; + switch (pxa2xx_gpio_regs[offset].reg) { + case GPDR: /* GPIO Pin-Direction registers */ + return s->dir[bank]; + + case GPSR: /* GPIO Pin-Output Set registers */ + qemu_log_mask(LOG_GUEST_ERROR, + "pxa2xx GPIO: read from write only register GPSR\n"); + return 0; + + case GPCR: /* GPIO Pin-Output Clear registers */ + qemu_log_mask(LOG_GUEST_ERROR, + "pxa2xx GPIO: read from write only register GPCR\n"); + return 0; + + case GRER: /* GPIO Rising-Edge Detect Enable registers */ + return s->rising[bank]; + + case GFER: /* GPIO Falling-Edge Detect Enable registers */ + return s->falling[bank]; + + case GAFR_L: /* GPIO Alternate Function registers */ + return s->gafr[bank * 2]; + + case GAFR_U: /* GPIO Alternate Function registers */ + return s->gafr[bank * 2 + 1]; + + case GPLR: /* GPIO Pin-Level registers */ + ret = (s->olevel[bank] & s->dir[bank]) | + (s->ilevel[bank] & ~s->dir[bank]); + qemu_irq_raise(s->read_notify); + return ret; + + case GEDR: /* GPIO Edge Detect Status registers */ + return s->status[bank]; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + } + + return 0; +} + +static void pxa2xx_gpio_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque; + int bank; + if (offset >= 0x200) + return; + + bank = pxa2xx_gpio_regs[offset].bank; + switch (pxa2xx_gpio_regs[offset].reg) { + case GPDR: /* GPIO Pin-Direction registers */ + s->dir[bank] = value; + pxa2xx_gpio_handler_update(s); + break; + + case GPSR: /* GPIO Pin-Output Set registers */ + s->olevel[bank] |= value; + pxa2xx_gpio_handler_update(s); + break; + + case GPCR: /* GPIO Pin-Output Clear registers */ + s->olevel[bank] &= ~value; + pxa2xx_gpio_handler_update(s); + break; + + case GRER: /* GPIO Rising-Edge Detect Enable registers */ + s->rising[bank] = value; + break; + + case GFER: /* GPIO Falling-Edge Detect Enable registers */ + s->falling[bank] = value; + break; + + case GAFR_L: /* GPIO Alternate Function registers */ + s->gafr[bank * 2] = value; + break; + + case GAFR_U: /* GPIO Alternate Function registers */ + s->gafr[bank * 2 + 1] = value; + break; + + case GEDR: /* GPIO Edge Detect Status registers */ + s->status[bank] &= ~value; + pxa2xx_gpio_irq_update(s); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + } +} + +static const MemoryRegionOps pxa_gpio_ops = { + .read = pxa2xx_gpio_read, + .write = pxa2xx_gpio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +DeviceState *pxa2xx_gpio_init(hwaddr base, + ARMCPU *cpu, DeviceState *pic, int lines) +{ + CPUState *cs = CPU(cpu); + DeviceState *dev; + + dev = qdev_new(TYPE_PXA2XX_GPIO); + qdev_prop_set_int32(dev, "lines", lines); + qdev_prop_set_int32(dev, "ncpu", cs->cpu_index); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(pic, PXA2XX_PIC_GPIO_0)); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, + qdev_get_gpio_in(pic, PXA2XX_PIC_GPIO_1)); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, + qdev_get_gpio_in(pic, PXA2XX_PIC_GPIO_X)); + + return dev; +} + +static void pxa2xx_gpio_initfn(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(sbd); + PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev); + + memory_region_init_io(&s->iomem, obj, &pxa_gpio_ops, + s, "pxa2xx-gpio", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq0); + sysbus_init_irq(sbd, &s->irq1); + sysbus_init_irq(sbd, &s->irqX); +} + +static void pxa2xx_gpio_realize(DeviceState *dev, Error **errp) +{ + PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev); + + s->cpu = ARM_CPU(qemu_get_cpu(s->ncpu)); + + qdev_init_gpio_in(dev, pxa2xx_gpio_set, s->lines); + qdev_init_gpio_out(dev, s->handler, s->lines); +} + +/* + * Registers a callback to notify on GPLR reads. This normally + * shouldn't be needed but it is used for the hack on Spitz machines. + */ +void pxa2xx_gpio_read_notifier(DeviceState *dev, qemu_irq handler) +{ + PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev); + + s->read_notify = handler; +} + +static const VMStateDescription vmstate_pxa2xx_gpio_regs = { + .name = "pxa2xx-gpio", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(ilevel, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), + VMSTATE_UINT32_ARRAY(olevel, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), + VMSTATE_UINT32_ARRAY(dir, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), + VMSTATE_UINT32_ARRAY(rising, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), + VMSTATE_UINT32_ARRAY(falling, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), + VMSTATE_UINT32_ARRAY(status, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), + VMSTATE_UINT32_ARRAY(gafr, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS * 2), + VMSTATE_UINT32_ARRAY(prev_level, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property pxa2xx_gpio_properties[] = { + DEFINE_PROP_INT32("lines", PXA2xxGPIOInfo, lines, 0), + DEFINE_PROP_INT32("ncpu", PXA2xxGPIOInfo, ncpu, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pxa2xx_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PXA2xx GPIO controller"; + device_class_set_props(dc, pxa2xx_gpio_properties); + dc->vmsd = &vmstate_pxa2xx_gpio_regs; + dc->realize = pxa2xx_gpio_realize; +} + +static const TypeInfo pxa2xx_gpio_info = { + .name = TYPE_PXA2XX_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxGPIOInfo), + .instance_init = pxa2xx_gpio_initfn, + .class_init = pxa2xx_gpio_class_init, +}; + +static void pxa2xx_gpio_register_types(void) +{ + type_register_static(&pxa2xx_gpio_info); +} + +type_init(pxa2xx_gpio_register_types) diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c new file mode 100644 index 000000000..ed032fed5 --- /dev/null +++ b/hw/arm/pxa2xx_pic.c @@ -0,0 +1,339 @@ +/* + * Intel XScale PXA Programmable Interrupt Controller. + * + * Copyright (c) 2006 Openedhand Ltd. + * Copyright (c) 2006 Thorsten Zitterell + * Written by Andrzej Zaborowski + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "cpu.h" +#include "hw/arm/pxa.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#define ICIP 0x00 /* Interrupt Controller IRQ Pending register */ +#define ICMR 0x04 /* Interrupt Controller Mask register */ +#define ICLR 0x08 /* Interrupt Controller Level register */ +#define ICFP 0x0c /* Interrupt Controller FIQ Pending register */ +#define ICPR 0x10 /* Interrupt Controller Pending register */ +#define ICCR 0x14 /* Interrupt Controller Control register */ +#define ICHP 0x18 /* Interrupt Controller Highest Priority register */ +#define IPR0 0x1c /* Interrupt Controller Priority register 0 */ +#define IPR31 0x98 /* Interrupt Controller Priority register 31 */ +#define ICIP2 0x9c /* Interrupt Controller IRQ Pending register 2 */ +#define ICMR2 0xa0 /* Interrupt Controller Mask register 2 */ +#define ICLR2 0xa4 /* Interrupt Controller Level register 2 */ +#define ICFP2 0xa8 /* Interrupt Controller FIQ Pending register 2 */ +#define ICPR2 0xac /* Interrupt Controller Pending register 2 */ +#define IPR32 0xb0 /* Interrupt Controller Priority register 32 */ +#define IPR39 0xcc /* Interrupt Controller Priority register 39 */ + +#define PXA2XX_PIC_SRCS 40 + +#define TYPE_PXA2XX_PIC "pxa2xx_pic" +OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxPICState, PXA2XX_PIC) + +struct PXA2xxPICState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + ARMCPU *cpu; + uint32_t int_enabled[2]; + uint32_t int_pending[2]; + uint32_t is_fiq[2]; + uint32_t int_idle; + uint32_t priority[PXA2XX_PIC_SRCS]; +}; + +static void pxa2xx_pic_update(void *opaque) +{ + uint32_t mask[2]; + PXA2xxPICState *s = (PXA2xxPICState *) opaque; + CPUState *cpu = CPU(s->cpu); + + if (cpu->halted) { + mask[0] = s->int_pending[0] & (s->int_enabled[0] | s->int_idle); + mask[1] = s->int_pending[1] & (s->int_enabled[1] | s->int_idle); + if (mask[0] || mask[1]) { + cpu_interrupt(cpu, CPU_INTERRUPT_EXITTB); + } + } + + mask[0] = s->int_pending[0] & s->int_enabled[0]; + mask[1] = s->int_pending[1] & s->int_enabled[1]; + + if ((mask[0] & s->is_fiq[0]) || (mask[1] & s->is_fiq[1])) { + cpu_interrupt(cpu, CPU_INTERRUPT_FIQ); + } else { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_FIQ); + } + + if ((mask[0] & ~s->is_fiq[0]) || (mask[1] & ~s->is_fiq[1])) { + cpu_interrupt(cpu, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); + } +} + +/* Note: Here level means state of the signal on a pin, not + * IRQ/FIQ distinction as in PXA Developer Manual. */ +static void pxa2xx_pic_set_irq(void *opaque, int irq, int level) +{ + PXA2xxPICState *s = (PXA2xxPICState *) opaque; + int int_set = (irq >= 32); + irq &= 31; + + if (level) + s->int_pending[int_set] |= 1 << irq; + else + s->int_pending[int_set] &= ~(1 << irq); + + pxa2xx_pic_update(opaque); +} + +static inline uint32_t pxa2xx_pic_highest(PXA2xxPICState *s) { + int i, int_set, irq; + uint32_t bit, mask[2]; + uint32_t ichp = 0x003f003f; /* Both IDs invalid */ + + mask[0] = s->int_pending[0] & s->int_enabled[0]; + mask[1] = s->int_pending[1] & s->int_enabled[1]; + + for (i = PXA2XX_PIC_SRCS - 1; i >= 0; i --) { + irq = s->priority[i] & 0x3f; + if ((s->priority[i] & (1U << 31)) && irq < PXA2XX_PIC_SRCS) { + /* Source peripheral ID is valid. */ + bit = 1 << (irq & 31); + int_set = (irq >= 32); + + if (mask[int_set] & bit & s->is_fiq[int_set]) { + /* FIQ asserted */ + ichp &= 0xffff0000; + ichp |= (1 << 15) | irq; + } + + if (mask[int_set] & bit & ~s->is_fiq[int_set]) { + /* IRQ asserted */ + ichp &= 0x0000ffff; + ichp |= (1U << 31) | (irq << 16); + } + } + } + + return ichp; +} + +static uint64_t pxa2xx_pic_mem_read(void *opaque, hwaddr offset, + unsigned size) +{ + PXA2xxPICState *s = (PXA2xxPICState *) opaque; + + switch (offset) { + case ICIP: /* IRQ Pending register */ + return s->int_pending[0] & ~s->is_fiq[0] & s->int_enabled[0]; + case ICIP2: /* IRQ Pending register 2 */ + return s->int_pending[1] & ~s->is_fiq[1] & s->int_enabled[1]; + case ICMR: /* Mask register */ + return s->int_enabled[0]; + case ICMR2: /* Mask register 2 */ + return s->int_enabled[1]; + case ICLR: /* Level register */ + return s->is_fiq[0]; + case ICLR2: /* Level register 2 */ + return s->is_fiq[1]; + case ICCR: /* Idle mask */ + return (s->int_idle == 0); + case ICFP: /* FIQ Pending register */ + return s->int_pending[0] & s->is_fiq[0] & s->int_enabled[0]; + case ICFP2: /* FIQ Pending register 2 */ + return s->int_pending[1] & s->is_fiq[1] & s->int_enabled[1]; + case ICPR: /* Pending register */ + return s->int_pending[0]; + case ICPR2: /* Pending register 2 */ + return s->int_pending[1]; + case IPR0 ... IPR31: + return s->priority[0 + ((offset - IPR0 ) >> 2)]; + case IPR32 ... IPR39: + return s->priority[32 + ((offset - IPR32) >> 2)]; + case ICHP: /* Highest Priority register */ + return pxa2xx_pic_highest(s); + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pxa2xx_pic_mem_read: bad register offset 0x%" HWADDR_PRIx + "\n", offset); + return 0; + } +} + +static void pxa2xx_pic_mem_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PXA2xxPICState *s = (PXA2xxPICState *) opaque; + + switch (offset) { + case ICMR: /* Mask register */ + s->int_enabled[0] = value; + break; + case ICMR2: /* Mask register 2 */ + s->int_enabled[1] = value; + break; + case ICLR: /* Level register */ + s->is_fiq[0] = value; + break; + case ICLR2: /* Level register 2 */ + s->is_fiq[1] = value; + break; + case ICCR: /* Idle mask */ + s->int_idle = (value & 1) ? 0 : ~0; + break; + case IPR0 ... IPR31: + s->priority[0 + ((offset - IPR0 ) >> 2)] = value & 0x8000003f; + break; + case IPR32 ... IPR39: + s->priority[32 + ((offset - IPR32) >> 2)] = value & 0x8000003f; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pxa2xx_pic_mem_write: bad register offset 0x%" + HWADDR_PRIx "\n", offset); + return; + } + pxa2xx_pic_update(opaque); +} + +/* Interrupt Controller Coprocessor Space Register Mapping */ +static const int pxa2xx_cp_reg_map[0x10] = { + [0x0 ... 0xf] = -1, + [0x0] = ICIP, + [0x1] = ICMR, + [0x2] = ICLR, + [0x3] = ICFP, + [0x4] = ICPR, + [0x5] = ICHP, + [0x6] = ICIP2, + [0x7] = ICMR2, + [0x8] = ICLR2, + [0x9] = ICFP2, + [0xa] = ICPR2, +}; + +static uint64_t pxa2xx_pic_cp_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + int offset = pxa2xx_cp_reg_map[ri->crn]; + return pxa2xx_pic_mem_read(ri->opaque, offset, 4); +} + +static void pxa2xx_pic_cp_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int offset = pxa2xx_cp_reg_map[ri->crn]; + pxa2xx_pic_mem_write(ri->opaque, offset, value, 4); +} + +#define REGINFO_FOR_PIC_CP(NAME, CRN) \ + { .name = NAME, .cp = 6, .crn = CRN, .crm = 0, .opc1 = 0, .opc2 = 0, \ + .access = PL1_RW, .type = ARM_CP_IO, \ + .readfn = pxa2xx_pic_cp_read, .writefn = pxa2xx_pic_cp_write } + +static const ARMCPRegInfo pxa_pic_cp_reginfo[] = { + REGINFO_FOR_PIC_CP("ICIP", 0), + REGINFO_FOR_PIC_CP("ICMR", 1), + REGINFO_FOR_PIC_CP("ICLR", 2), + REGINFO_FOR_PIC_CP("ICFP", 3), + REGINFO_FOR_PIC_CP("ICPR", 4), + REGINFO_FOR_PIC_CP("ICHP", 5), + REGINFO_FOR_PIC_CP("ICIP2", 6), + REGINFO_FOR_PIC_CP("ICMR2", 7), + REGINFO_FOR_PIC_CP("ICLR2", 8), + REGINFO_FOR_PIC_CP("ICFP2", 9), + REGINFO_FOR_PIC_CP("ICPR2", 0xa), + REGINFO_SENTINEL +}; + +static const MemoryRegionOps pxa2xx_pic_ops = { + .read = pxa2xx_pic_mem_read, + .write = pxa2xx_pic_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int pxa2xx_pic_post_load(void *opaque, int version_id) +{ + pxa2xx_pic_update(opaque); + return 0; +} + +DeviceState *pxa2xx_pic_init(hwaddr base, ARMCPU *cpu) +{ + DeviceState *dev = qdev_new(TYPE_PXA2XX_PIC); + PXA2xxPICState *s = PXA2XX_PIC(dev); + + s->cpu = cpu; + + s->int_pending[0] = 0; + s->int_pending[1] = 0; + s->int_enabled[0] = 0; + s->int_enabled[1] = 0; + s->is_fiq[0] = 0; + s->is_fiq[1] = 0; + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + qdev_init_gpio_in(dev, pxa2xx_pic_set_irq, PXA2XX_PIC_SRCS); + + /* Enable IC memory-mapped registers access. */ + memory_region_init_io(&s->iomem, OBJECT(s), &pxa2xx_pic_ops, s, + "pxa2xx-pic", 0x00100000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + + /* Enable IC coprocessor access. */ + define_arm_cp_regs_with_opaque(cpu, pxa_pic_cp_reginfo, s); + + return dev; +} + +static const VMStateDescription vmstate_pxa2xx_pic_regs = { + .name = "pxa2xx_pic", + .version_id = 0, + .minimum_version_id = 0, + .post_load = pxa2xx_pic_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(int_enabled, PXA2xxPICState, 2), + VMSTATE_UINT32_ARRAY(int_pending, PXA2xxPICState, 2), + VMSTATE_UINT32_ARRAY(is_fiq, PXA2xxPICState, 2), + VMSTATE_UINT32(int_idle, PXA2xxPICState), + VMSTATE_UINT32_ARRAY(priority, PXA2xxPICState, PXA2XX_PIC_SRCS), + VMSTATE_END_OF_LIST(), + }, +}; + +static void pxa2xx_pic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PXA2xx PIC"; + dc->vmsd = &vmstate_pxa2xx_pic_regs; +} + +static const TypeInfo pxa2xx_pic_info = { + .name = TYPE_PXA2XX_PIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxPICState), + .class_init = pxa2xx_pic_class_init, +}; + +static void pxa2xx_pic_register_types(void) +{ + type_register_static(&pxa2xx_pic_info); +} + +type_init(pxa2xx_pic_register_types) diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c new file mode 100644 index 000000000..146d35382 --- /dev/null +++ b/hw/arm/raspi.c @@ -0,0 +1,399 @@ +/* + * Raspberry Pi emulation (c) 2012 Gregory Estrade + * Upstreaming code cleanup [including bcm2835_*] (c) 2013 Jan Petrous + * + * Rasperry Pi 2 emulation Copyright (c) 2015, Microsoft + * Written by Andrew Baumann + * + * Raspberry Pi 3 emulation Copyright (c) 2018 Zoltán Baldaszti + * Upstream code cleanup (c) 2018 Pekka Enberg + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qapi/error.h" +#include "hw/arm/bcm2836.h" +#include "hw/registerfields.h" +#include "qemu/error-report.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/arm/boot.h" +#include "qom/object.h" + +#define SMPBOOT_ADDR 0x300 /* this should leave enough space for ATAGS */ +#define MVBAR_ADDR 0x400 /* secure vectors */ +#define BOARDSETUP_ADDR (MVBAR_ADDR + 0x20) /* board setup code */ +#define FIRMWARE_ADDR_2 0x8000 /* Pi 2 loads kernel.img here by default */ +#define FIRMWARE_ADDR_3 0x80000 /* Pi 3 loads kernel.img here by default */ +#define SPINTABLE_ADDR 0xd8 /* Pi 3 bootloader spintable */ + +/* Registered machine type (matches RPi Foundation bootloader and U-Boot) */ +#define MACH_TYPE_BCM2708 3138 + +struct RaspiMachineState { + /*< private >*/ + MachineState parent_obj; + /*< public >*/ + BCM283XState soc; + struct arm_boot_info binfo; +}; +typedef struct RaspiMachineState RaspiMachineState; + +struct RaspiMachineClass { + /*< private >*/ + MachineClass parent_obj; + /*< public >*/ + uint32_t board_rev; +}; +typedef struct RaspiMachineClass RaspiMachineClass; + +#define TYPE_RASPI_MACHINE MACHINE_TYPE_NAME("raspi-common") +DECLARE_OBJ_CHECKERS(RaspiMachineState, RaspiMachineClass, + RASPI_MACHINE, TYPE_RASPI_MACHINE) + + +/* + * Board revision codes: + * www.raspberrypi.org/documentation/hardware/raspberrypi/revision-codes/ + */ +FIELD(REV_CODE, REVISION, 0, 4); +FIELD(REV_CODE, TYPE, 4, 8); +FIELD(REV_CODE, PROCESSOR, 12, 4); +FIELD(REV_CODE, MANUFACTURER, 16, 4); +FIELD(REV_CODE, MEMORY_SIZE, 20, 3); +FIELD(REV_CODE, STYLE, 23, 1); + +typedef enum RaspiProcessorId { + PROCESSOR_ID_BCM2835 = 0, + PROCESSOR_ID_BCM2836 = 1, + PROCESSOR_ID_BCM2837 = 2, +} RaspiProcessorId; + +static const struct { + const char *type; + int cores_count; +} soc_property[] = { + [PROCESSOR_ID_BCM2835] = {TYPE_BCM2835, 1}, + [PROCESSOR_ID_BCM2836] = {TYPE_BCM2836, BCM283X_NCPUS}, + [PROCESSOR_ID_BCM2837] = {TYPE_BCM2837, BCM283X_NCPUS}, +}; + +static uint64_t board_ram_size(uint32_t board_rev) +{ + assert(FIELD_EX32(board_rev, REV_CODE, STYLE)); /* Only new style */ + return 256 * MiB << FIELD_EX32(board_rev, REV_CODE, MEMORY_SIZE); +} + +static RaspiProcessorId board_processor_id(uint32_t board_rev) +{ + int proc_id = FIELD_EX32(board_rev, REV_CODE, PROCESSOR); + + assert(FIELD_EX32(board_rev, REV_CODE, STYLE)); /* Only new style */ + assert(proc_id < ARRAY_SIZE(soc_property) && soc_property[proc_id].type); + + return proc_id; +} + +static const char *board_soc_type(uint32_t board_rev) +{ + return soc_property[board_processor_id(board_rev)].type; +} + +static int cores_count(uint32_t board_rev) +{ + return soc_property[board_processor_id(board_rev)].cores_count; +} + +static const char *board_type(uint32_t board_rev) +{ + static const char *types[] = { + "A", "B", "A+", "B+", "2B", "Alpha", "CM1", NULL, "3B", "Zero", + "CM3", NULL, "Zero W", "3B+", "3A+", NULL, "CM3+", "4B", + }; + assert(FIELD_EX32(board_rev, REV_CODE, STYLE)); /* Only new style */ + int bt = FIELD_EX32(board_rev, REV_CODE, TYPE); + if (bt >= ARRAY_SIZE(types) || !types[bt]) { + return "Unknown"; + } + return types[bt]; +} + +static void write_smpboot(ARMCPU *cpu, const struct arm_boot_info *info) +{ + static const uint32_t smpboot[] = { + 0xe1a0e00f, /* mov lr, pc */ + 0xe3a0fe00 + (BOARDSETUP_ADDR >> 4), /* mov pc, BOARDSETUP_ADDR */ + 0xee100fb0, /* mrc p15, 0, r0, c0, c0, 5;get core ID */ + 0xe7e10050, /* ubfx r0, r0, #0, #2 ;extract LSB */ + 0xe59f5014, /* ldr r5, =0x400000CC ;load mbox base */ + 0xe320f001, /* 1: yield */ + 0xe7953200, /* ldr r3, [r5, r0, lsl #4] ;read mbox for our core*/ + 0xe3530000, /* cmp r3, #0 ;spin while zero */ + 0x0afffffb, /* beq 1b */ + 0xe7853200, /* str r3, [r5, r0, lsl #4] ;clear mbox */ + 0xe12fff13, /* bx r3 ;jump to target */ + 0x400000cc, /* (constant: mailbox 3 read/clear base) */ + }; + + /* check that we don't overrun board setup vectors */ + QEMU_BUILD_BUG_ON(SMPBOOT_ADDR + sizeof(smpboot) > MVBAR_ADDR); + /* check that board setup address is correctly relocated */ + QEMU_BUILD_BUG_ON((BOARDSETUP_ADDR & 0xf) != 0 + || (BOARDSETUP_ADDR >> 4) >= 0x100); + + rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot), + info->smp_loader_start, + arm_boot_address_space(cpu, info)); +} + +static void write_smpboot64(ARMCPU *cpu, const struct arm_boot_info *info) +{ + AddressSpace *as = arm_boot_address_space(cpu, info); + /* Unlike the AArch32 version we don't need to call the board setup hook. + * The mechanism for doing the spin-table is also entirely different. + * We must have four 64-bit fields at absolute addresses + * 0xd8, 0xe0, 0xe8, 0xf0 in RAM, which are the flag variables for + * our CPUs, and which we must ensure are zero initialized before + * the primary CPU goes into the kernel. We put these variables inside + * a rom blob, so that the reset for ROM contents zeroes them for us. + */ + static const uint32_t smpboot[] = { + 0xd2801b05, /* mov x5, 0xd8 */ + 0xd53800a6, /* mrs x6, mpidr_el1 */ + 0x924004c6, /* and x6, x6, #0x3 */ + 0xd503205f, /* spin: wfe */ + 0xf86678a4, /* ldr x4, [x5,x6,lsl #3] */ + 0xb4ffffc4, /* cbz x4, spin */ + 0xd2800000, /* mov x0, #0x0 */ + 0xd2800001, /* mov x1, #0x0 */ + 0xd2800002, /* mov x2, #0x0 */ + 0xd2800003, /* mov x3, #0x0 */ + 0xd61f0080, /* br x4 */ + }; + + static const uint64_t spintables[] = { + 0, 0, 0, 0 + }; + + rom_add_blob_fixed_as("raspi_smpboot", smpboot, sizeof(smpboot), + info->smp_loader_start, as); + rom_add_blob_fixed_as("raspi_spintables", spintables, sizeof(spintables), + SPINTABLE_ADDR, as); +} + +static void write_board_setup(ARMCPU *cpu, const struct arm_boot_info *info) +{ + arm_write_secure_board_setup_dummy_smc(cpu, info, MVBAR_ADDR); +} + +static void reset_secondary(ARMCPU *cpu, const struct arm_boot_info *info) +{ + CPUState *cs = CPU(cpu); + cpu_set_pc(cs, info->smp_loader_start); +} + +static void setup_boot(MachineState *machine, RaspiProcessorId processor_id, + size_t ram_size) +{ + RaspiMachineState *s = RASPI_MACHINE(machine); + int r; + + s->binfo.board_id = MACH_TYPE_BCM2708; + s->binfo.ram_size = ram_size; + s->binfo.nb_cpus = machine->smp.cpus; + + if (processor_id <= PROCESSOR_ID_BCM2836) { + /* + * The BCM2835 and BCM2836 require some custom setup code to run + * in Secure mode before booting a kernel (to set up the SMC vectors + * so that we get a no-op SMC; this is used by Linux to call the + * firmware for some cache maintenance operations. + * The BCM2837 doesn't need this. + */ + s->binfo.board_setup_addr = BOARDSETUP_ADDR; + s->binfo.write_board_setup = write_board_setup; + s->binfo.secure_board_setup = true; + s->binfo.secure_boot = true; + } + + /* BCM2836 and BCM2837 requires SMP setup */ + if (processor_id >= PROCESSOR_ID_BCM2836) { + s->binfo.smp_loader_start = SMPBOOT_ADDR; + if (processor_id == PROCESSOR_ID_BCM2836) { + s->binfo.write_secondary_boot = write_smpboot; + } else { + s->binfo.write_secondary_boot = write_smpboot64; + } + s->binfo.secondary_cpu_reset_hook = reset_secondary; + } + + /* If the user specified a "firmware" image (e.g. UEFI), we bypass + * the normal Linux boot process + */ + if (machine->firmware) { + hwaddr firmware_addr = processor_id <= PROCESSOR_ID_BCM2836 + ? FIRMWARE_ADDR_2 : FIRMWARE_ADDR_3; + /* load the firmware image (typically kernel.img) */ + r = load_image_targphys(machine->firmware, firmware_addr, + ram_size - firmware_addr); + if (r < 0) { + error_report("Failed to load firmware from %s", machine->firmware); + exit(1); + } + + s->binfo.entry = firmware_addr; + s->binfo.firmware_loaded = true; + } + + arm_load_kernel(&s->soc.cpu[0].core, machine, &s->binfo); +} + +static void raspi_machine_init(MachineState *machine) +{ + RaspiMachineClass *mc = RASPI_MACHINE_GET_CLASS(machine); + RaspiMachineState *s = RASPI_MACHINE(machine); + uint32_t board_rev = mc->board_rev; + uint64_t ram_size = board_ram_size(board_rev); + uint32_t vcram_size; + DriveInfo *di; + BlockBackend *blk; + BusState *bus; + DeviceState *carddev; + + if (machine->ram_size != ram_size) { + char *size_str = size_to_str(ram_size); + error_report("Invalid RAM size, should be %s", size_str); + g_free(size_str); + exit(1); + } + + /* FIXME: Remove when we have custom CPU address space support */ + memory_region_add_subregion_overlap(get_system_memory(), 0, + machine->ram, 0); + + /* Setup the SOC */ + object_initialize_child(OBJECT(machine), "soc", &s->soc, + board_soc_type(board_rev)); + object_property_add_const_link(OBJECT(&s->soc), "ram", OBJECT(machine->ram)); + object_property_set_int(OBJECT(&s->soc), "board-rev", board_rev, + &error_abort); + qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); + + /* Create and plug in the SD cards */ + di = drive_get_next(IF_SD); + blk = di ? blk_by_legacy_dinfo(di) : NULL; + bus = qdev_get_child_bus(DEVICE(&s->soc), "sd-bus"); + if (bus == NULL) { + error_report("No SD bus found in SOC object"); + exit(1); + } + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); + + vcram_size = object_property_get_uint(OBJECT(&s->soc), "vcram-size", + &error_abort); + setup_boot(machine, board_processor_id(mc->board_rev), + machine->ram_size - vcram_size); +} + +static void raspi_machine_class_common_init(MachineClass *mc, + uint32_t board_rev) +{ + mc->desc = g_strdup_printf("Raspberry Pi %s (revision 1.%u)", + board_type(board_rev), + FIELD_EX32(board_rev, REV_CODE, REVISION)); + mc->init = raspi_machine_init; + mc->block_default_type = IF_SD; + mc->no_parallel = 1; + mc->no_floppy = 1; + mc->no_cdrom = 1; + mc->default_cpus = mc->min_cpus = mc->max_cpus = cores_count(board_rev); + mc->default_ram_size = board_ram_size(board_rev); + mc->default_ram_id = "ram"; +}; + +static void raspi0_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); + + rmc->board_rev = 0x920092; /* Revision 1.2 */ + raspi_machine_class_common_init(mc, rmc->board_rev); +}; + +static void raspi1ap_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); + + rmc->board_rev = 0x900021; /* Revision 1.1 */ + raspi_machine_class_common_init(mc, rmc->board_rev); +}; + +static void raspi2b_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); + + rmc->board_rev = 0xa21041; + raspi_machine_class_common_init(mc, rmc->board_rev); +}; + +#ifdef TARGET_AARCH64 +static void raspi3ap_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); + + rmc->board_rev = 0x9020e0; /* Revision 1.0 */ + raspi_machine_class_common_init(mc, rmc->board_rev); +}; + +static void raspi3b_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + RaspiMachineClass *rmc = RASPI_MACHINE_CLASS(oc); + + rmc->board_rev = 0xa02082; + raspi_machine_class_common_init(mc, rmc->board_rev); +}; +#endif /* TARGET_AARCH64 */ + +static const TypeInfo raspi_machine_types[] = { + { + .name = MACHINE_TYPE_NAME("raspi0"), + .parent = TYPE_RASPI_MACHINE, + .class_init = raspi0_machine_class_init, + }, { + .name = MACHINE_TYPE_NAME("raspi1ap"), + .parent = TYPE_RASPI_MACHINE, + .class_init = raspi1ap_machine_class_init, + }, { + .name = MACHINE_TYPE_NAME("raspi2b"), + .parent = TYPE_RASPI_MACHINE, + .class_init = raspi2b_machine_class_init, +#ifdef TARGET_AARCH64 + }, { + .name = MACHINE_TYPE_NAME("raspi3ap"), + .parent = TYPE_RASPI_MACHINE, + .class_init = raspi3ap_machine_class_init, + }, { + .name = MACHINE_TYPE_NAME("raspi3b"), + .parent = TYPE_RASPI_MACHINE, + .class_init = raspi3b_machine_class_init, +#endif + }, { + .name = TYPE_RASPI_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(RaspiMachineState), + .class_size = sizeof(RaspiMachineClass), + .abstract = true, + } +}; + +DEFINE_TYPES(raspi_machine_types) diff --git a/hw/arm/realview.c b/hw/arm/realview.c new file mode 100644 index 000000000..1c54316ba --- /dev/null +++ b/hw/arm/realview.c @@ -0,0 +1,468 @@ +/* + * ARM RealView Baseboard System emulation. + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "hw/arm/boot.h" +#include "hw/arm/primecell.h" +#include "hw/net/lan9118.h" +#include "hw/net/smc91c111.h" +#include "hw/pci/pci.h" +#include "net/net.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/i2c/i2c.h" +#include "qemu/error-report.h" +#include "hw/char/pl011.h" +#include "hw/cpu/a9mpcore.h" +#include "hw/intc/realview_gic.h" +#include "hw/irq.h" +#include "hw/i2c/arm_sbcon_i2c.h" +#include "hw/sd/sd.h" + +#define SMP_BOOT_ADDR 0xe0000000 +#define SMP_BOOTREG_ADDR 0x10000030 + +/* Board init. */ + +static struct arm_boot_info realview_binfo = { + .smp_loader_start = SMP_BOOT_ADDR, + .smp_bootreg_addr = SMP_BOOTREG_ADDR, +}; + +/* The following two lists must be consistent. */ +enum realview_board_type { + BOARD_EB, + BOARD_EB_MPCORE, + BOARD_PB_A8, + BOARD_PBX_A9, +}; + +static const int realview_board_id[] = { + 0x33b, + 0x33b, + 0x769, + 0x76d +}; + +static void realview_init(MachineState *machine, + enum realview_board_type board_type) +{ + ARMCPU *cpu = NULL; + CPUARMState *env; + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *ram_lo; + MemoryRegion *ram_hi = g_new(MemoryRegion, 1); + MemoryRegion *ram_alias = g_new(MemoryRegion, 1); + MemoryRegion *ram_hack = g_new(MemoryRegion, 1); + DeviceState *dev, *sysctl, *gpio2, *pl041; + SysBusDevice *busdev; + qemu_irq pic[64]; + qemu_irq mmc_irq[2]; + PCIBus *pci_bus = NULL; + NICInfo *nd; + DriveInfo *dinfo; + I2CBus *i2c; + int n; + unsigned int smp_cpus = machine->smp.cpus; + int done_nic = 0; + qemu_irq cpu_irq[4]; + int is_mpcore = 0; + int is_pb = 0; + uint32_t proc_id = 0; + uint32_t sys_id; + ram_addr_t low_ram_size; + ram_addr_t ram_size = machine->ram_size; + hwaddr periphbase = 0; + + switch (board_type) { + case BOARD_EB: + break; + case BOARD_EB_MPCORE: + is_mpcore = 1; + periphbase = 0x10100000; + break; + case BOARD_PB_A8: + is_pb = 1; + break; + case BOARD_PBX_A9: + is_mpcore = 1; + is_pb = 1; + periphbase = 0x1f000000; + break; + } + + for (n = 0; n < smp_cpus; n++) { + Object *cpuobj = object_new(machine->cpu_type); + + /* By default A9,A15 and ARM1176 CPUs have EL3 enabled. This board + * does not currently support EL3 so the CPU EL3 property is disabled + * before realization. + */ + if (object_property_find(cpuobj, "has_el3")) { + object_property_set_bool(cpuobj, "has_el3", false, &error_fatal); + } + + if (is_pb && is_mpcore) { + object_property_set_int(cpuobj, "reset-cbar", periphbase, + &error_fatal); + } + + qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); + + cpu_irq[n] = qdev_get_gpio_in(DEVICE(cpuobj), ARM_CPU_IRQ); + } + cpu = ARM_CPU(first_cpu); + env = &cpu->env; + if (arm_feature(env, ARM_FEATURE_V7)) { + if (is_mpcore) { + proc_id = 0x0c000000; + } else { + proc_id = 0x0e000000; + } + } else if (arm_feature(env, ARM_FEATURE_V6K)) { + proc_id = 0x06000000; + } else if (arm_feature(env, ARM_FEATURE_V6)) { + proc_id = 0x04000000; + } else { + proc_id = 0x02000000; + } + + if (is_pb && ram_size > 0x20000000) { + /* Core tile RAM. */ + ram_lo = g_new(MemoryRegion, 1); + low_ram_size = ram_size - 0x20000000; + ram_size = 0x20000000; + memory_region_init_ram(ram_lo, NULL, "realview.lowmem", low_ram_size, + &error_fatal); + memory_region_add_subregion(sysmem, 0x20000000, ram_lo); + } + + memory_region_init_ram(ram_hi, NULL, "realview.highmem", ram_size, + &error_fatal); + low_ram_size = ram_size; + if (low_ram_size > 0x10000000) + low_ram_size = 0x10000000; + /* SDRAM at address zero. */ + memory_region_init_alias(ram_alias, NULL, "realview.alias", + ram_hi, 0, low_ram_size); + memory_region_add_subregion(sysmem, 0, ram_alias); + if (is_pb) { + /* And again at a high address. */ + memory_region_add_subregion(sysmem, 0x70000000, ram_hi); + } else { + ram_size = low_ram_size; + } + + sys_id = is_pb ? 0x01780500 : 0xc1400400; + sysctl = qdev_new("realview_sysctl"); + qdev_prop_set_uint32(sysctl, "sys_id", sys_id); + qdev_prop_set_uint32(sysctl, "proc_id", proc_id); + sysbus_realize_and_unref(SYS_BUS_DEVICE(sysctl), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000); + + if (is_mpcore) { + dev = qdev_new(is_pb ? TYPE_A9MPCORE_PRIV : "realview_mpcore"); + qdev_prop_set_uint32(dev, "num-cpu", smp_cpus); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, periphbase); + for (n = 0; n < smp_cpus; n++) { + sysbus_connect_irq(busdev, n, cpu_irq[n]); + } + sysbus_create_varargs("l2x0", periphbase + 0x2000, NULL); + /* Both A9 and 11MPCore put the GIC CPU i/f at base + 0x100 */ + realview_binfo.gic_cpu_if_addr = periphbase + 0x100; + } else { + uint32_t gic_addr = is_pb ? 0x1e000000 : 0x10040000; + /* For now just create the nIRQ GIC, and ignore the others. */ + dev = sysbus_create_simple(TYPE_REALVIEW_GIC, gic_addr, cpu_irq[0]); + } + for (n = 0; n < 64; n++) { + pic[n] = qdev_get_gpio_in(dev, n); + } + + pl041 = qdev_new("pl041"); + qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512); + sysbus_realize_and_unref(SYS_BUS_DEVICE(pl041), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(pl041), 0, 0x10004000); + sysbus_connect_irq(SYS_BUS_DEVICE(pl041), 0, pic[19]); + + sysbus_create_simple("pl050_keyboard", 0x10006000, pic[20]); + sysbus_create_simple("pl050_mouse", 0x10007000, pic[21]); + + pl011_create(0x10009000, pic[12], serial_hd(0)); + pl011_create(0x1000a000, pic[13], serial_hd(1)); + pl011_create(0x1000b000, pic[14], serial_hd(2)); + pl011_create(0x1000c000, pic[15], serial_hd(3)); + + /* DMA controller is optional, apparently. */ + dev = qdev_new("pl081"); + object_property_set_link(OBJECT(dev), "downstream", OBJECT(sysmem), + &error_fatal); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0x10030000); + sysbus_connect_irq(busdev, 0, pic[24]); + + sysbus_create_simple("sp804", 0x10011000, pic[4]); + sysbus_create_simple("sp804", 0x10012000, pic[5]); + + sysbus_create_simple("pl061", 0x10013000, pic[6]); + sysbus_create_simple("pl061", 0x10014000, pic[7]); + gpio2 = sysbus_create_simple("pl061", 0x10015000, pic[8]); + + sysbus_create_simple("pl111", 0x10020000, pic[23]); + + dev = sysbus_create_varargs("pl181", 0x10005000, pic[17], pic[18], NULL); + /* Wire up MMC card detect and read-only signals. These have + * to go to both the PL061 GPIO and the sysctl register. + * Note that the PL181 orders these lines (readonly,inserted) + * and the PL061 has them the other way about. Also the card + * detect line is inverted. + */ + mmc_irq[0] = qemu_irq_split( + qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_WPROT), + qdev_get_gpio_in(gpio2, 1)); + mmc_irq[1] = qemu_irq_split( + qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_CARDIN), + qemu_irq_invert(qdev_get_gpio_in(gpio2, 0))); + qdev_connect_gpio_out_named(dev, "card-read-only", 0, mmc_irq[0]); + qdev_connect_gpio_out_named(dev, "card-inserted", 0, mmc_irq[1]); + dinfo = drive_get_next(IF_SD); + if (dinfo) { + DeviceState *card; + + card = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + qdev_realize_and_unref(card, qdev_get_child_bus(dev, "sd-bus"), + &error_fatal); + } + + sysbus_create_simple("pl031", 0x10017000, pic[10]); + + if (!is_pb) { + dev = qdev_new("realview_pci"); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0x10019000); /* PCI controller registers */ + sysbus_mmio_map(busdev, 1, 0x60000000); /* PCI self-config */ + sysbus_mmio_map(busdev, 2, 0x61000000); /* PCI config */ + sysbus_mmio_map(busdev, 3, 0x62000000); /* PCI I/O */ + sysbus_mmio_map(busdev, 4, 0x63000000); /* PCI memory window 1 */ + sysbus_mmio_map(busdev, 5, 0x64000000); /* PCI memory window 2 */ + sysbus_mmio_map(busdev, 6, 0x68000000); /* PCI memory window 3 */ + sysbus_connect_irq(busdev, 0, pic[48]); + sysbus_connect_irq(busdev, 1, pic[49]); + sysbus_connect_irq(busdev, 2, pic[50]); + sysbus_connect_irq(busdev, 3, pic[51]); + pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci"); + if (machine_usb(machine)) { + pci_create_simple(pci_bus, -1, "pci-ohci"); + } + n = drive_get_max_bus(IF_SCSI); + while (n >= 0) { + dev = DEVICE(pci_create_simple(pci_bus, -1, "lsi53c895a")); + lsi53c8xx_handle_legacy_cmdline(dev); + n--; + } + } + for(n = 0; n < nb_nics; n++) { + nd = &nd_table[n]; + + if (!done_nic && (!nd->model || + strcmp(nd->model, is_pb ? "lan9118" : "smc91c111") == 0)) { + if (is_pb) { + lan9118_init(nd, 0x4e000000, pic[28]); + } else { + smc91c111_init(nd, 0x4e000000, pic[28]); + } + done_nic = 1; + } else { + if (pci_bus) { + pci_nic_init_nofail(nd, pci_bus, "rtl8139", NULL); + } + } + } + + dev = sysbus_create_simple(TYPE_VERSATILE_I2C, 0x10002000, NULL); + i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); + i2c_slave_create_simple(i2c, "ds1338", 0x68); + + /* Memory map for RealView Emulation Baseboard: */ + /* 0x10000000 System registers. */ + /* 0x10001000 System controller. */ + /* 0x10002000 Two-Wire Serial Bus. */ + /* 0x10003000 Reserved. */ + /* 0x10004000 AACI. */ + /* 0x10005000 MCI. */ + /* 0x10006000 KMI0. */ + /* 0x10007000 KMI1. */ + /* 0x10008000 Character LCD. (EB) */ + /* 0x10009000 UART0. */ + /* 0x1000a000 UART1. */ + /* 0x1000b000 UART2. */ + /* 0x1000c000 UART3. */ + /* 0x1000d000 SSPI. */ + /* 0x1000e000 SCI. */ + /* 0x1000f000 Reserved. */ + /* 0x10010000 Watchdog. */ + /* 0x10011000 Timer 0+1. */ + /* 0x10012000 Timer 2+3. */ + /* 0x10013000 GPIO 0. */ + /* 0x10014000 GPIO 1. */ + /* 0x10015000 GPIO 2. */ + /* 0x10002000 Two-Wire Serial Bus - DVI. (PB) */ + /* 0x10017000 RTC. */ + /* 0x10018000 DMC. */ + /* 0x10019000 PCI controller config. */ + /* 0x10020000 CLCD. */ + /* 0x10030000 DMA Controller. */ + /* 0x10040000 GIC1. (EB) */ + /* 0x10050000 GIC2. (EB) */ + /* 0x10060000 GIC3. (EB) */ + /* 0x10070000 GIC4. (EB) */ + /* 0x10080000 SMC. */ + /* 0x1e000000 GIC1. (PB) */ + /* 0x1e001000 GIC2. (PB) */ + /* 0x1e002000 GIC3. (PB) */ + /* 0x1e003000 GIC4. (PB) */ + /* 0x40000000 NOR flash. */ + /* 0x44000000 DoC flash. */ + /* 0x48000000 SRAM. */ + /* 0x4c000000 Configuration flash. */ + /* 0x4e000000 Ethernet. */ + /* 0x4f000000 USB. */ + /* 0x50000000 PISMO. */ + /* 0x54000000 PISMO. */ + /* 0x58000000 PISMO. */ + /* 0x5c000000 PISMO. */ + /* 0x60000000 PCI. */ + /* 0x60000000 PCI Self Config. */ + /* 0x61000000 PCI Config. */ + /* 0x62000000 PCI IO. */ + /* 0x63000000 PCI mem 0. */ + /* 0x64000000 PCI mem 1. */ + /* 0x68000000 PCI mem 2. */ + + /* ??? Hack to map an additional page of ram for the secondary CPU + startup code. I guess this works on real hardware because the + BootROM happens to be in ROM/flash or in memory that isn't clobbered + until after Linux boots the secondary CPUs. */ + memory_region_init_ram(ram_hack, NULL, "realview.hack", 0x1000, + &error_fatal); + memory_region_add_subregion(sysmem, SMP_BOOT_ADDR, ram_hack); + + realview_binfo.ram_size = ram_size; + realview_binfo.nb_cpus = smp_cpus; + realview_binfo.board_id = realview_board_id[board_type]; + realview_binfo.loader_start = (board_type == BOARD_PB_A8 ? 0x70000000 : 0); + arm_load_kernel(ARM_CPU(first_cpu), machine, &realview_binfo); +} + +static void realview_eb_init(MachineState *machine) +{ + realview_init(machine, BOARD_EB); +} + +static void realview_eb_mpcore_init(MachineState *machine) +{ + realview_init(machine, BOARD_EB_MPCORE); +} + +static void realview_pb_a8_init(MachineState *machine) +{ + realview_init(machine, BOARD_PB_A8); +} + +static void realview_pbx_a9_init(MachineState *machine) +{ + realview_init(machine, BOARD_PBX_A9); +} + +static void realview_eb_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "ARM RealView Emulation Baseboard (ARM926EJ-S)"; + mc->init = realview_eb_init; + mc->block_default_type = IF_SCSI; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926"); +} + +static const TypeInfo realview_eb_type = { + .name = MACHINE_TYPE_NAME("realview-eb"), + .parent = TYPE_MACHINE, + .class_init = realview_eb_class_init, +}; + +static void realview_eb_mpcore_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "ARM RealView Emulation Baseboard (ARM11MPCore)"; + mc->init = realview_eb_mpcore_init; + mc->block_default_type = IF_SCSI; + mc->max_cpus = 4; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm11mpcore"); +} + +static const TypeInfo realview_eb_mpcore_type = { + .name = MACHINE_TYPE_NAME("realview-eb-mpcore"), + .parent = TYPE_MACHINE, + .class_init = realview_eb_mpcore_class_init, +}; + +static void realview_pb_a8_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "ARM RealView Platform Baseboard for Cortex-A8"; + mc->init = realview_pb_a8_init; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a8"); +} + +static const TypeInfo realview_pb_a8_type = { + .name = MACHINE_TYPE_NAME("realview-pb-a8"), + .parent = TYPE_MACHINE, + .class_init = realview_pb_a8_class_init, +}; + +static void realview_pbx_a9_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "ARM RealView Platform Baseboard Explore for Cortex-A9"; + mc->init = realview_pbx_a9_init; + mc->max_cpus = 4; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9"); +} + +static const TypeInfo realview_pbx_a9_type = { + .name = MACHINE_TYPE_NAME("realview-pbx-a9"), + .parent = TYPE_MACHINE, + .class_init = realview_pbx_a9_class_init, +}; + +static void realview_machine_init(void) +{ + type_register_static(&realview_eb_type); + type_register_static(&realview_eb_mpcore_type); + type_register_static(&realview_pb_a8_type); + type_register_static(&realview_pbx_a9_type); +} + +type_init(realview_machine_init) diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c new file mode 100644 index 000000000..553608e58 --- /dev/null +++ b/hw/arm/sabrelite.c @@ -0,0 +1,115 @@ +/* + * SABRELITE Board System emulation. + * + * Copyright (c) 2015 Jean-Christophe Dubois + * + * This code is licensed under the GPL, version 2 or later. + * See the file `COPYING' in the top level directory. + * + * It (partially) emulates a sabrelite board, with a Freescale + * i.MX6 SoC + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/fsl-imx6.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "qemu/error-report.h" +#include "sysemu/qtest.h" + +static struct arm_boot_info sabrelite_binfo = { + /* DDR memory start */ + .loader_start = FSL_IMX6_MMDC_ADDR, + /* No board ID, we boot from DT tree */ + .board_id = -1, +}; + +/* No need to do any particular setup for secondary boot */ +static void sabrelite_write_secondary(ARMCPU *cpu, + const struct arm_boot_info *info) +{ +} + +/* Secondary cores are reset through SRC device */ +static void sabrelite_reset_secondary(ARMCPU *cpu, + const struct arm_boot_info *info) +{ +} + +static void sabrelite_init(MachineState *machine) +{ + FslIMX6State *s; + + /* Check the amount of memory is compatible with the SOC */ + if (machine->ram_size > FSL_IMX6_MMDC_SIZE) { + error_report("RAM size " RAM_ADDR_FMT " above max supported (%08x)", + machine->ram_size, FSL_IMX6_MMDC_SIZE); + exit(1); + } + + s = FSL_IMX6(object_new(TYPE_FSL_IMX6)); + object_property_add_child(OBJECT(machine), "soc", OBJECT(s)); + + /* Ethernet PHY address is 6 */ + object_property_set_int(OBJECT(s), "fec-phy-num", 6, &error_fatal); + + qdev_realize(DEVICE(s), NULL, &error_fatal); + + memory_region_add_subregion(get_system_memory(), FSL_IMX6_MMDC_ADDR, + machine->ram); + + { + /* + * TODO: Ideally we would expose the chip select and spi bus on the + * SoC object using alias properties; then we would not need to + * directly access the underlying spi device object. + */ + /* Add the sst25vf016b NOR FLASH memory to first SPI */ + Object *spi_dev; + + spi_dev = object_resolve_path_component(OBJECT(s), "spi1"); + if (spi_dev) { + SSIBus *spi_bus; + + spi_bus = (SSIBus *)qdev_get_child_bus(DEVICE(spi_dev), "spi"); + if (spi_bus) { + DeviceState *flash_dev; + qemu_irq cs_line; + DriveInfo *dinfo = drive_get_next(IF_MTD); + + flash_dev = qdev_new("sst25vf016b"); + if (dinfo) { + qdev_prop_set_drive_err(flash_dev, "drive", + blk_by_legacy_dinfo(dinfo), + &error_fatal); + } + qdev_realize_and_unref(flash_dev, BUS(spi_bus), &error_fatal); + + cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); + qdev_connect_gpio_out(DEVICE(&s->gpio[2]), 19, cs_line); + } + } + } + + sabrelite_binfo.ram_size = machine->ram_size; + sabrelite_binfo.nb_cpus = machine->smp.cpus; + sabrelite_binfo.secure_boot = true; + sabrelite_binfo.write_secondary_boot = sabrelite_write_secondary; + sabrelite_binfo.secondary_cpu_reset_hook = sabrelite_reset_secondary; + + if (!qtest_enabled()) { + arm_load_kernel(&s->cpu[0], machine, &sabrelite_binfo); + } +} + +static void sabrelite_machine_init(MachineClass *mc) +{ + mc->desc = "Freescale i.MX6 Quad SABRE Lite Board (Cortex-A9)"; + mc->init = sabrelite_init; + mc->max_cpus = FSL_IMX6_NUM_CPUS; + mc->ignore_memory_transaction_failures = true; + mc->default_ram_id = "sabrelite.ram"; +} + +DEFINE_MACHINE("sabrelite", sabrelite_machine_init) diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c new file mode 100644 index 000000000..358714bd3 --- /dev/null +++ b/hw/arm/sbsa-ref.c @@ -0,0 +1,868 @@ +/* + * ARM SBSA Reference Platform emulation + * + * Copyright (c) 2018 Linaro Limited + * Written by Hongbo Zhang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/units.h" +#include "sysemu/device_tree.h" +#include "sysemu/numa.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "exec/hwaddr.h" +#include "kvm_arm.h" +#include "hw/arm/boot.h" +#include "hw/block/flash.h" +#include "hw/boards.h" +#include "hw/ide/internal.h" +#include "hw/ide/ahci_internal.h" +#include "hw/intc/arm_gicv3_common.h" +#include "hw/loader.h" +#include "hw/pci-host/gpex.h" +#include "hw/qdev-properties.h" +#include "hw/usb.h" +#include "hw/char/pl011.h" +#include "hw/watchdog/sbsa_gwdt.h" +#include "net/net.h" +#include "qom/object.h" + +#define RAMLIMIT_GB 8192 +#define RAMLIMIT_BYTES (RAMLIMIT_GB * GiB) + +#define NUM_IRQS 256 +#define NUM_SMMU_IRQS 4 +#define NUM_SATA_PORTS 6 + +#define VIRTUAL_PMU_IRQ 7 +#define ARCH_GIC_MAINT_IRQ 9 +#define ARCH_TIMER_VIRT_IRQ 11 +#define ARCH_TIMER_S_EL1_IRQ 13 +#define ARCH_TIMER_NS_EL1_IRQ 14 +#define ARCH_TIMER_NS_EL2_IRQ 10 + +enum { + SBSA_FLASH, + SBSA_MEM, + SBSA_CPUPERIPHS, + SBSA_GIC_DIST, + SBSA_GIC_REDIST, + SBSA_SECURE_EC, + SBSA_GWDT_WS0, + SBSA_GWDT_REFRESH, + SBSA_GWDT_CONTROL, + SBSA_SMMU, + SBSA_UART, + SBSA_RTC, + SBSA_PCIE, + SBSA_PCIE_MMIO, + SBSA_PCIE_MMIO_HIGH, + SBSA_PCIE_PIO, + SBSA_PCIE_ECAM, + SBSA_GPIO, + SBSA_SECURE_UART, + SBSA_SECURE_UART_MM, + SBSA_SECURE_MEM, + SBSA_AHCI, + SBSA_EHCI, +}; + +struct SBSAMachineState { + MachineState parent; + struct arm_boot_info bootinfo; + int smp_cpus; + void *fdt; + int fdt_size; + int psci_conduit; + DeviceState *gic; + PFlashCFI01 *flash[2]; +}; + +#define TYPE_SBSA_MACHINE MACHINE_TYPE_NAME("sbsa-ref") +OBJECT_DECLARE_SIMPLE_TYPE(SBSAMachineState, SBSA_MACHINE) + +static const MemMapEntry sbsa_ref_memmap[] = { + /* 512M boot ROM */ + [SBSA_FLASH] = { 0, 0x20000000 }, + /* 512M secure memory */ + [SBSA_SECURE_MEM] = { 0x20000000, 0x20000000 }, + /* Space reserved for CPU peripheral devices */ + [SBSA_CPUPERIPHS] = { 0x40000000, 0x00040000 }, + [SBSA_GIC_DIST] = { 0x40060000, 0x00010000 }, + [SBSA_GIC_REDIST] = { 0x40080000, 0x04000000 }, + [SBSA_SECURE_EC] = { 0x50000000, 0x00001000 }, + [SBSA_GWDT_REFRESH] = { 0x50010000, 0x00001000 }, + [SBSA_GWDT_CONTROL] = { 0x50011000, 0x00001000 }, + [SBSA_UART] = { 0x60000000, 0x00001000 }, + [SBSA_RTC] = { 0x60010000, 0x00001000 }, + [SBSA_GPIO] = { 0x60020000, 0x00001000 }, + [SBSA_SECURE_UART] = { 0x60030000, 0x00001000 }, + [SBSA_SECURE_UART_MM] = { 0x60040000, 0x00001000 }, + [SBSA_SMMU] = { 0x60050000, 0x00020000 }, + /* Space here reserved for more SMMUs */ + [SBSA_AHCI] = { 0x60100000, 0x00010000 }, + [SBSA_EHCI] = { 0x60110000, 0x00010000 }, + /* Space here reserved for other devices */ + [SBSA_PCIE_PIO] = { 0x7fff0000, 0x00010000 }, + /* 32-bit address PCIE MMIO space */ + [SBSA_PCIE_MMIO] = { 0x80000000, 0x70000000 }, + /* 256M PCIE ECAM space */ + [SBSA_PCIE_ECAM] = { 0xf0000000, 0x10000000 }, + /* ~1TB PCIE MMIO space (4GB to 1024GB boundary) */ + [SBSA_PCIE_MMIO_HIGH] = { 0x100000000ULL, 0xFF00000000ULL }, + [SBSA_MEM] = { 0x10000000000ULL, RAMLIMIT_BYTES }, +}; + +static const int sbsa_ref_irqmap[] = { + [SBSA_UART] = 1, + [SBSA_RTC] = 2, + [SBSA_PCIE] = 3, /* ... to 6 */ + [SBSA_GPIO] = 7, + [SBSA_SECURE_UART] = 8, + [SBSA_SECURE_UART_MM] = 9, + [SBSA_AHCI] = 10, + [SBSA_EHCI] = 11, + [SBSA_SMMU] = 12, /* ... to 15 */ + [SBSA_GWDT_WS0] = 16, +}; + +static const char * const valid_cpus[] = { + ARM_CPU_TYPE_NAME("cortex-a57"), + ARM_CPU_TYPE_NAME("cortex-a72"), + ARM_CPU_TYPE_NAME("max"), +}; + +static bool cpu_type_valid(const char *cpu) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(valid_cpus); i++) { + if (strcmp(cpu, valid_cpus[i]) == 0) { + return true; + } + } + return false; +} + +static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx) +{ + uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER; + return arm_cpu_mp_affinity(idx, clustersz); +} + +/* + * Firmware on this machine only uses ACPI table to load OS, these limited + * device tree nodes are just to let firmware know the info which varies from + * command line parameters, so it is not necessary to be fully compatible + * with the kernel CPU and NUMA binding rules. + */ +static void create_fdt(SBSAMachineState *sms) +{ + void *fdt = create_device_tree(&sms->fdt_size); + const MachineState *ms = MACHINE(sms); + int nb_numa_nodes = ms->numa_state->num_nodes; + int cpu; + + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + sms->fdt = fdt; + + qemu_fdt_setprop_string(fdt, "/", "compatible", "linux,sbsa-ref"); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2); + + if (ms->numa_state->have_numa_distance) { + int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t); + uint32_t *matrix = g_malloc0(size); + int idx, i, j; + + for (i = 0; i < nb_numa_nodes; i++) { + for (j = 0; j < nb_numa_nodes; j++) { + idx = (i * nb_numa_nodes + j) * 3; + matrix[idx + 0] = cpu_to_be32(i); + matrix[idx + 1] = cpu_to_be32(j); + matrix[idx + 2] = + cpu_to_be32(ms->numa_state->nodes[i].distance[j]); + } + } + + qemu_fdt_add_subnode(fdt, "/distance-map"); + qemu_fdt_setprop(fdt, "/distance-map", "distance-matrix", + matrix, size); + g_free(matrix); + } + + /* + * From Documentation/devicetree/bindings/arm/cpus.yaml + * On ARM v8 64-bit systems this property is required + * and matches the MPIDR_EL1 register affinity bits. + * + * * If cpus node's #address-cells property is set to 2 + * + * The first reg cell bits [7:0] must be set to + * bits [39:32] of MPIDR_EL1. + * + * The second reg cell bits [23:0] must be set to + * bits [23:0] of MPIDR_EL1. + */ + qemu_fdt_add_subnode(sms->fdt, "/cpus"); + qemu_fdt_setprop_cell(sms->fdt, "/cpus", "#address-cells", 2); + qemu_fdt_setprop_cell(sms->fdt, "/cpus", "#size-cells", 0x0); + + for (cpu = sms->smp_cpus - 1; cpu >= 0; cpu--) { + char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu); + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); + CPUState *cs = CPU(armcpu); + uint64_t mpidr = sbsa_ref_cpu_mp_affinity(sms, cpu); + + qemu_fdt_add_subnode(sms->fdt, nodename); + qemu_fdt_setprop_u64(sms->fdt, nodename, "reg", mpidr); + + if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) { + qemu_fdt_setprop_cell(sms->fdt, nodename, "numa-node-id", + ms->possible_cpus->cpus[cs->cpu_index].props.node_id); + } + + g_free(nodename); + } +} + +#define SBSA_FLASH_SECTOR_SIZE (256 * KiB) + +static PFlashCFI01 *sbsa_flash_create1(SBSAMachineState *sms, + const char *name, + const char *alias_prop_name) +{ + /* + * Create a single flash device. We use the same parameters as + * the flash devices on the Versatile Express board. + */ + DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01); + + qdev_prop_set_uint64(dev, "sector-length", SBSA_FLASH_SECTOR_SIZE); + qdev_prop_set_uint8(dev, "width", 4); + qdev_prop_set_uint8(dev, "device-width", 2); + qdev_prop_set_bit(dev, "big-endian", false); + qdev_prop_set_uint16(dev, "id0", 0x89); + qdev_prop_set_uint16(dev, "id1", 0x18); + qdev_prop_set_uint16(dev, "id2", 0x00); + qdev_prop_set_uint16(dev, "id3", 0x00); + qdev_prop_set_string(dev, "name", name); + object_property_add_child(OBJECT(sms), name, OBJECT(dev)); + object_property_add_alias(OBJECT(sms), alias_prop_name, + OBJECT(dev), "drive"); + return PFLASH_CFI01(dev); +} + +static void sbsa_flash_create(SBSAMachineState *sms) +{ + sms->flash[0] = sbsa_flash_create1(sms, "sbsa.flash0", "pflash0"); + sms->flash[1] = sbsa_flash_create1(sms, "sbsa.flash1", "pflash1"); +} + +static void sbsa_flash_map1(PFlashCFI01 *flash, + hwaddr base, hwaddr size, + MemoryRegion *sysmem) +{ + DeviceState *dev = DEVICE(flash); + + assert(QEMU_IS_ALIGNED(size, SBSA_FLASH_SECTOR_SIZE)); + assert(size / SBSA_FLASH_SECTOR_SIZE <= UINT32_MAX); + qdev_prop_set_uint32(dev, "num-blocks", size / SBSA_FLASH_SECTOR_SIZE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + memory_region_add_subregion(sysmem, base, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), + 0)); +} + +static void sbsa_flash_map(SBSAMachineState *sms, + MemoryRegion *sysmem, + MemoryRegion *secure_sysmem) +{ + /* + * Map two flash devices to fill the SBSA_FLASH space in the memmap. + * sysmem is the system memory space. secure_sysmem is the secure view + * of the system, and the first flash device should be made visible only + * there. The second flash device is visible to both secure and nonsecure. + */ + hwaddr flashsize = sbsa_ref_memmap[SBSA_FLASH].size / 2; + hwaddr flashbase = sbsa_ref_memmap[SBSA_FLASH].base; + + sbsa_flash_map1(sms->flash[0], flashbase, flashsize, + secure_sysmem); + sbsa_flash_map1(sms->flash[1], flashbase + flashsize, flashsize, + sysmem); +} + +static bool sbsa_firmware_init(SBSAMachineState *sms, + MemoryRegion *sysmem, + MemoryRegion *secure_sysmem) +{ + const char *bios_name; + int i; + BlockBackend *pflash_blk0; + + /* Map legacy -drive if=pflash to machine properties */ + for (i = 0; i < ARRAY_SIZE(sms->flash); i++) { + pflash_cfi01_legacy_drive(sms->flash[i], + drive_get(IF_PFLASH, 0, i)); + } + + sbsa_flash_map(sms, sysmem, secure_sysmem); + + pflash_blk0 = pflash_cfi01_get_blk(sms->flash[0]); + + bios_name = MACHINE(sms)->firmware; + if (bios_name) { + char *fname; + MemoryRegion *mr; + int image_size; + + if (pflash_blk0) { + error_report("The contents of the first flash device may be " + "specified with -bios or with -drive if=pflash... " + "but you cannot use both options at once"); + exit(1); + } + + /* Fall back to -bios */ + + fname = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!fname) { + error_report("Could not find ROM image '%s'", bios_name); + exit(1); + } + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(sms->flash[0]), 0); + image_size = load_image_mr(fname, mr); + g_free(fname); + if (image_size < 0) { + error_report("Could not load ROM image '%s'", bios_name); + exit(1); + } + } + + return pflash_blk0 || bios_name; +} + +static void create_secure_ram(SBSAMachineState *sms, + MemoryRegion *secure_sysmem) +{ + MemoryRegion *secram = g_new(MemoryRegion, 1); + hwaddr base = sbsa_ref_memmap[SBSA_SECURE_MEM].base; + hwaddr size = sbsa_ref_memmap[SBSA_SECURE_MEM].size; + + memory_region_init_ram(secram, NULL, "sbsa-ref.secure-ram", size, + &error_fatal); + memory_region_add_subregion(secure_sysmem, base, secram); +} + +static void create_gic(SBSAMachineState *sms) +{ + unsigned int smp_cpus = MACHINE(sms)->smp.cpus; + SysBusDevice *gicbusdev; + const char *gictype; + uint32_t redist0_capacity, redist0_count; + int i; + + gictype = gicv3_class_name(); + + sms->gic = qdev_new(gictype); + qdev_prop_set_uint32(sms->gic, "revision", 3); + qdev_prop_set_uint32(sms->gic, "num-cpu", smp_cpus); + /* + * Note that the num-irq property counts both internal and external + * interrupts; there are always 32 of the former (mandated by GIC spec). + */ + qdev_prop_set_uint32(sms->gic, "num-irq", NUM_IRQS + 32); + qdev_prop_set_bit(sms->gic, "has-security-extensions", true); + + redist0_capacity = + sbsa_ref_memmap[SBSA_GIC_REDIST].size / GICV3_REDIST_SIZE; + redist0_count = MIN(smp_cpus, redist0_capacity); + + qdev_prop_set_uint32(sms->gic, "len-redist-region-count", 1); + qdev_prop_set_uint32(sms->gic, "redist-region-count[0]", redist0_count); + + gicbusdev = SYS_BUS_DEVICE(sms->gic); + sysbus_realize_and_unref(gicbusdev, &error_fatal); + sysbus_mmio_map(gicbusdev, 0, sbsa_ref_memmap[SBSA_GIC_DIST].base); + sysbus_mmio_map(gicbusdev, 1, sbsa_ref_memmap[SBSA_GIC_REDIST].base); + + /* + * Wire the outputs from each CPU's generic timer and the GICv3 + * maintenance interrupt signal to the appropriate GIC PPI inputs, + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. + */ + for (i = 0; i < smp_cpus; i++) { + DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); + int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; + int irq; + /* + * Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs used for this board. + */ + const int timer_irq[] = { + [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ, + [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, + [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, + [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, + }; + + for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { + qdev_connect_gpio_out(cpudev, irq, + qdev_get_gpio_in(sms->gic, + ppibase + timer_irq[irq])); + } + + qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0, + qdev_get_gpio_in(sms->gic, ppibase + + ARCH_GIC_MAINT_IRQ)); + qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, + qdev_get_gpio_in(sms->gic, ppibase + + VIRTUAL_PMU_IRQ)); + + sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(gicbusdev, i + smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(gicbusdev, i + 2 * smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(gicbusdev, i + 3 * smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + } +} + +static void create_uart(const SBSAMachineState *sms, int uart, + MemoryRegion *mem, Chardev *chr) +{ + hwaddr base = sbsa_ref_memmap[uart].base; + int irq = sbsa_ref_irqmap[uart]; + DeviceState *dev = qdev_new(TYPE_PL011); + SysBusDevice *s = SYS_BUS_DEVICE(dev); + + qdev_prop_set_chr(dev, "chardev", chr); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + memory_region_add_subregion(mem, base, + sysbus_mmio_get_region(s, 0)); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(sms->gic, irq)); +} + +static void create_rtc(const SBSAMachineState *sms) +{ + hwaddr base = sbsa_ref_memmap[SBSA_RTC].base; + int irq = sbsa_ref_irqmap[SBSA_RTC]; + + sysbus_create_simple("pl031", base, qdev_get_gpio_in(sms->gic, irq)); +} + +static void create_wdt(const SBSAMachineState *sms) +{ + hwaddr rbase = sbsa_ref_memmap[SBSA_GWDT_REFRESH].base; + hwaddr cbase = sbsa_ref_memmap[SBSA_GWDT_CONTROL].base; + DeviceState *dev = qdev_new(TYPE_WDT_SBSA); + SysBusDevice *s = SYS_BUS_DEVICE(dev); + int irq = sbsa_ref_irqmap[SBSA_GWDT_WS0]; + + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, rbase); + sysbus_mmio_map(s, 1, cbase); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(sms->gic, irq)); +} + +static DeviceState *gpio_key_dev; +static void sbsa_ref_powerdown_req(Notifier *n, void *opaque) +{ + /* use gpio Pin 3 for power button event */ + qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1); +} + +static Notifier sbsa_ref_powerdown_notifier = { + .notify = sbsa_ref_powerdown_req +}; + +static void create_gpio(const SBSAMachineState *sms) +{ + DeviceState *pl061_dev; + hwaddr base = sbsa_ref_memmap[SBSA_GPIO].base; + int irq = sbsa_ref_irqmap[SBSA_GPIO]; + + pl061_dev = sysbus_create_simple("pl061", base, + qdev_get_gpio_in(sms->gic, irq)); + + gpio_key_dev = sysbus_create_simple("gpio-key", -1, + qdev_get_gpio_in(pl061_dev, 3)); + + /* connect powerdown request */ + qemu_register_powerdown_notifier(&sbsa_ref_powerdown_notifier); +} + +static void create_ahci(const SBSAMachineState *sms) +{ + hwaddr base = sbsa_ref_memmap[SBSA_AHCI].base; + int irq = sbsa_ref_irqmap[SBSA_AHCI]; + DeviceState *dev; + DriveInfo *hd[NUM_SATA_PORTS]; + SysbusAHCIState *sysahci; + AHCIState *ahci; + int i; + + dev = qdev_new("sysbus-ahci"); + qdev_prop_set_uint32(dev, "num-ports", NUM_SATA_PORTS); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(sms->gic, irq)); + + sysahci = SYSBUS_AHCI(dev); + ahci = &sysahci->ahci; + ide_drive_get(hd, ARRAY_SIZE(hd)); + for (i = 0; i < ahci->ports; i++) { + if (hd[i] == NULL) { + continue; + } + ide_create_drive(&ahci->dev[i].port, 0, hd[i]); + } +} + +static void create_ehci(const SBSAMachineState *sms) +{ + hwaddr base = sbsa_ref_memmap[SBSA_EHCI].base; + int irq = sbsa_ref_irqmap[SBSA_EHCI]; + + sysbus_create_simple("platform-ehci-usb", base, + qdev_get_gpio_in(sms->gic, irq)); +} + +static void create_smmu(const SBSAMachineState *sms, PCIBus *bus) +{ + hwaddr base = sbsa_ref_memmap[SBSA_SMMU].base; + int irq = sbsa_ref_irqmap[SBSA_SMMU]; + DeviceState *dev; + int i; + + dev = qdev_new("arm-smmuv3"); + + object_property_set_link(OBJECT(dev), "primary-bus", OBJECT(bus), + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + for (i = 0; i < NUM_SMMU_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, + qdev_get_gpio_in(sms->gic, irq + i)); + } +} + +static void create_pcie(SBSAMachineState *sms) +{ + hwaddr base_ecam = sbsa_ref_memmap[SBSA_PCIE_ECAM].base; + hwaddr size_ecam = sbsa_ref_memmap[SBSA_PCIE_ECAM].size; + hwaddr base_mmio = sbsa_ref_memmap[SBSA_PCIE_MMIO].base; + hwaddr size_mmio = sbsa_ref_memmap[SBSA_PCIE_MMIO].size; + hwaddr base_mmio_high = sbsa_ref_memmap[SBSA_PCIE_MMIO_HIGH].base; + hwaddr size_mmio_high = sbsa_ref_memmap[SBSA_PCIE_MMIO_HIGH].size; + hwaddr base_pio = sbsa_ref_memmap[SBSA_PCIE_PIO].base; + int irq = sbsa_ref_irqmap[SBSA_PCIE]; + MemoryRegion *mmio_alias, *mmio_alias_high, *mmio_reg; + MemoryRegion *ecam_alias, *ecam_reg; + DeviceState *dev; + PCIHostState *pci; + int i; + + dev = qdev_new(TYPE_GPEX_HOST); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* Map ECAM space */ + ecam_alias = g_new0(MemoryRegion, 1); + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam", + ecam_reg, 0, size_ecam); + memory_region_add_subregion(get_system_memory(), base_ecam, ecam_alias); + + /* Map the MMIO space */ + mmio_alias = g_new0(MemoryRegion, 1); + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio", + mmio_reg, base_mmio, size_mmio); + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias); + + /* Map the MMIO_HIGH space */ + mmio_alias_high = g_new0(MemoryRegion, 1); + memory_region_init_alias(mmio_alias_high, OBJECT(dev), "pcie-mmio-high", + mmio_reg, base_mmio_high, size_mmio_high); + memory_region_add_subregion(get_system_memory(), base_mmio_high, + mmio_alias_high); + + /* Map IO port space */ + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); + + for (i = 0; i < GPEX_NUM_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, + qdev_get_gpio_in(sms->gic, irq + i)); + gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); + } + + pci = PCI_HOST_BRIDGE(dev); + if (pci->bus) { + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + + if (!nd->model) { + nd->model = g_strdup("e1000e"); + } + + pci_nic_init_nofail(nd, pci->bus, nd->model, NULL); + } + } + + pci_create_simple(pci->bus, -1, "VGA"); + + create_smmu(sms, pci->bus); +} + +static void *sbsa_ref_dtb(const struct arm_boot_info *binfo, int *fdt_size) +{ + const SBSAMachineState *board = container_of(binfo, SBSAMachineState, + bootinfo); + + *fdt_size = board->fdt_size; + return board->fdt; +} + +static void create_secure_ec(MemoryRegion *mem) +{ + hwaddr base = sbsa_ref_memmap[SBSA_SECURE_EC].base; + DeviceState *dev = qdev_new("sbsa-ec"); + SysBusDevice *s = SYS_BUS_DEVICE(dev); + + memory_region_add_subregion(mem, base, + sysbus_mmio_get_region(s, 0)); +} + +static void sbsa_ref_init(MachineState *machine) +{ + unsigned int smp_cpus = machine->smp.cpus; + unsigned int max_cpus = machine->smp.max_cpus; + SBSAMachineState *sms = SBSA_MACHINE(machine); + MachineClass *mc = MACHINE_GET_CLASS(machine); + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *secure_sysmem = g_new(MemoryRegion, 1); + bool firmware_loaded; + const CPUArchIdList *possible_cpus; + int n, sbsa_max_cpus; + + if (!cpu_type_valid(machine->cpu_type)) { + error_report("sbsa-ref: CPU type %s not supported", machine->cpu_type); + exit(1); + } + + if (kvm_enabled()) { + error_report("sbsa-ref: KVM is not supported for this machine"); + exit(1); + } + + /* + * The Secure view of the world is the same as the NonSecure, + * but with a few extra devices. Create it as a container region + * containing the system memory at low priority; any secure-only + * devices go in at higher priority and take precedence. + */ + memory_region_init(secure_sysmem, OBJECT(machine), "secure-memory", + UINT64_MAX); + memory_region_add_subregion_overlap(secure_sysmem, 0, sysmem, -1); + + firmware_loaded = sbsa_firmware_init(sms, sysmem, secure_sysmem); + + /* + * This machine has EL3 enabled, external firmware should supply PSCI + * implementation, so the QEMU's internal PSCI is disabled. + */ + sms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; + + sbsa_max_cpus = sbsa_ref_memmap[SBSA_GIC_REDIST].size / GICV3_REDIST_SIZE; + + if (max_cpus > sbsa_max_cpus) { + error_report("Number of SMP CPUs requested (%d) exceeds max CPUs " + "supported by machine 'sbsa-ref' (%d)", + max_cpus, sbsa_max_cpus); + exit(1); + } + + sms->smp_cpus = smp_cpus; + + if (machine->ram_size > sbsa_ref_memmap[SBSA_MEM].size) { + error_report("sbsa-ref: cannot model more than %dGB RAM", RAMLIMIT_GB); + exit(1); + } + + possible_cpus = mc->possible_cpu_arch_ids(machine); + for (n = 0; n < possible_cpus->len; n++) { + Object *cpuobj; + CPUState *cs; + + if (n >= smp_cpus) { + break; + } + + cpuobj = object_new(possible_cpus->cpus[n].type); + object_property_set_int(cpuobj, "mp-affinity", + possible_cpus->cpus[n].arch_id, NULL); + + cs = CPU(cpuobj); + cs->cpu_index = n; + + numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpuobj), + &error_fatal); + + if (object_property_find(cpuobj, "reset-cbar")) { + object_property_set_int(cpuobj, "reset-cbar", + sbsa_ref_memmap[SBSA_CPUPERIPHS].base, + &error_abort); + } + + object_property_set_link(cpuobj, "memory", OBJECT(sysmem), + &error_abort); + + object_property_set_link(cpuobj, "secure-memory", + OBJECT(secure_sysmem), &error_abort); + + qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); + object_unref(cpuobj); + } + + memory_region_add_subregion(sysmem, sbsa_ref_memmap[SBSA_MEM].base, + machine->ram); + + create_fdt(sms); + + create_secure_ram(sms, secure_sysmem); + + create_gic(sms); + + create_uart(sms, SBSA_UART, sysmem, serial_hd(0)); + create_uart(sms, SBSA_SECURE_UART, secure_sysmem, serial_hd(1)); + /* Second secure UART for RAS and MM from EL0 */ + create_uart(sms, SBSA_SECURE_UART_MM, secure_sysmem, serial_hd(2)); + + create_rtc(sms); + + create_wdt(sms); + + create_gpio(sms); + + create_ahci(sms); + + create_ehci(sms); + + create_pcie(sms); + + create_secure_ec(secure_sysmem); + + sms->bootinfo.ram_size = machine->ram_size; + sms->bootinfo.nb_cpus = smp_cpus; + sms->bootinfo.board_id = -1; + sms->bootinfo.loader_start = sbsa_ref_memmap[SBSA_MEM].base; + sms->bootinfo.get_dtb = sbsa_ref_dtb; + sms->bootinfo.firmware_loaded = firmware_loaded; + arm_load_kernel(ARM_CPU(first_cpu), machine, &sms->bootinfo); +} + +static const CPUArchIdList *sbsa_ref_possible_cpu_arch_ids(MachineState *ms) +{ + unsigned int max_cpus = ms->smp.max_cpus; + SBSAMachineState *sms = SBSA_MACHINE(ms); + int n; + + if (ms->possible_cpus) { + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (n = 0; n < ms->possible_cpus->len; n++) { + ms->possible_cpus->cpus[n].type = ms->cpu_type; + ms->possible_cpus->cpus[n].arch_id = + sbsa_ref_cpu_mp_affinity(sms, n); + ms->possible_cpus->cpus[n].props.has_thread_id = true; + ms->possible_cpus->cpus[n].props.thread_id = n; + } + return ms->possible_cpus; +} + +static CpuInstanceProperties +sbsa_ref_cpu_index_to_props(MachineState *ms, unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +static int64_t +sbsa_ref_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + return idx % ms->numa_state->num_nodes; +} + +static void sbsa_ref_instance_init(Object *obj) +{ + SBSAMachineState *sms = SBSA_MACHINE(obj); + + sbsa_flash_create(sms); +} + +static void sbsa_ref_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = sbsa_ref_init; + mc->desc = "QEMU 'SBSA Reference' ARM Virtual Machine"; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a57"); + mc->max_cpus = 512; + mc->pci_allow_0_address = true; + mc->minimum_page_bits = 12; + mc->block_default_type = IF_IDE; + mc->no_cdrom = 1; + mc->default_ram_size = 1 * GiB; + mc->default_ram_id = "sbsa-ref.ram"; + mc->default_cpus = 4; + mc->possible_cpu_arch_ids = sbsa_ref_possible_cpu_arch_ids; + mc->cpu_index_to_instance_props = sbsa_ref_cpu_index_to_props; + mc->get_default_cpu_node_id = sbsa_ref_get_default_cpu_node_id; +} + +static const TypeInfo sbsa_ref_info = { + .name = TYPE_SBSA_MACHINE, + .parent = TYPE_MACHINE, + .instance_init = sbsa_ref_instance_init, + .class_init = sbsa_ref_class_init, + .instance_size = sizeof(SBSAMachineState), +}; + +static void sbsa_ref_machine_init(void) +{ + type_register_static(&sbsa_ref_info); +} + +type_init(sbsa_ref_machine_init); diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c new file mode 100644 index 000000000..0459850a9 --- /dev/null +++ b/hw/arm/smmu-common.c @@ -0,0 +1,569 @@ +/* + * Copyright (C) 2014-2016 Broadcom Corporation + * Copyright (c) 2017 Red Hat, Inc. + * Written by Prem Mallappa, Eric Auger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Prem Mallappa + * + */ + +#include "qemu/osdep.h" +#include "trace.h" +#include "exec/target_page.h" +#include "hw/core/cpu.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/jhash.h" +#include "qemu/module.h" + +#include "qemu/error-report.h" +#include "hw/arm/smmu-common.h" +#include "smmu-internal.h" + +/* IOTLB Management */ + +static guint smmu_iotlb_key_hash(gconstpointer v) +{ + SMMUIOTLBKey *key = (SMMUIOTLBKey *)v; + uint32_t a, b, c; + + /* Jenkins hash */ + a = b = c = JHASH_INITVAL + sizeof(*key); + a += key->asid + key->level + key->tg; + b += extract64(key->iova, 0, 32); + c += extract64(key->iova, 32, 32); + + __jhash_mix(a, b, c); + __jhash_final(a, b, c); + + return c; +} + +static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2) +{ + SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2; + + return (k1->asid == k2->asid) && (k1->iova == k2->iova) && + (k1->level == k2->level) && (k1->tg == k2->tg); +} + +SMMUIOTLBKey smmu_get_iotlb_key(uint16_t asid, uint64_t iova, + uint8_t tg, uint8_t level) +{ + SMMUIOTLBKey key = {.asid = asid, .iova = iova, .tg = tg, .level = level}; + + return key; +} + +SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg, + SMMUTransTableInfo *tt, hwaddr iova) +{ + uint8_t tg = (tt->granule_sz - 10) / 2; + uint8_t inputsize = 64 - tt->tsz; + uint8_t stride = tt->granule_sz - 3; + uint8_t level = 4 - (inputsize - 4) / stride; + SMMUTLBEntry *entry = NULL; + + while (level <= 3) { + uint64_t subpage_size = 1ULL << level_shift(level, tt->granule_sz); + uint64_t mask = subpage_size - 1; + SMMUIOTLBKey key; + + key = smmu_get_iotlb_key(cfg->asid, iova & ~mask, tg, level); + entry = g_hash_table_lookup(bs->iotlb, &key); + if (entry) { + break; + } + level++; + } + + if (entry) { + cfg->iotlb_hits++; + trace_smmu_iotlb_lookup_hit(cfg->asid, iova, + cfg->iotlb_hits, cfg->iotlb_misses, + 100 * cfg->iotlb_hits / + (cfg->iotlb_hits + cfg->iotlb_misses)); + } else { + cfg->iotlb_misses++; + trace_smmu_iotlb_lookup_miss(cfg->asid, iova, + cfg->iotlb_hits, cfg->iotlb_misses, + 100 * cfg->iotlb_hits / + (cfg->iotlb_hits + cfg->iotlb_misses)); + } + return entry; +} + +void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new) +{ + SMMUIOTLBKey *key = g_new0(SMMUIOTLBKey, 1); + uint8_t tg = (new->granule - 10) / 2; + + if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) { + smmu_iotlb_inv_all(bs); + } + + *key = smmu_get_iotlb_key(cfg->asid, new->entry.iova, tg, new->level); + trace_smmu_iotlb_insert(cfg->asid, new->entry.iova, tg, new->level); + g_hash_table_insert(bs->iotlb, key, new); +} + +inline void smmu_iotlb_inv_all(SMMUState *s) +{ + trace_smmu_iotlb_inv_all(); + g_hash_table_remove_all(s->iotlb); +} + +static gboolean smmu_hash_remove_by_asid(gpointer key, gpointer value, + gpointer user_data) +{ + uint16_t asid = *(uint16_t *)user_data; + SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key; + + return SMMU_IOTLB_ASID(*iotlb_key) == asid; +} + +static gboolean smmu_hash_remove_by_asid_iova(gpointer key, gpointer value, + gpointer user_data) +{ + SMMUTLBEntry *iter = (SMMUTLBEntry *)value; + IOMMUTLBEntry *entry = &iter->entry; + SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data; + SMMUIOTLBKey iotlb_key = *(SMMUIOTLBKey *)key; + + if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) { + return false; + } + return ((info->iova & ~entry->addr_mask) == entry->iova) || + ((entry->iova & ~info->mask) == info->iova); +} + +inline void +smmu_iotlb_inv_iova(SMMUState *s, int asid, dma_addr_t iova, + uint8_t tg, uint64_t num_pages, uint8_t ttl) +{ + /* if tg is not set we use 4KB range invalidation */ + uint8_t granule = tg ? tg * 2 + 10 : 12; + + if (ttl && (num_pages == 1) && (asid >= 0)) { + SMMUIOTLBKey key = smmu_get_iotlb_key(asid, iova, tg, ttl); + + if (g_hash_table_remove(s->iotlb, &key)) { + return; + } + /* + * if the entry is not found, let's see if it does not + * belong to a larger IOTLB entry + */ + } + + SMMUIOTLBPageInvInfo info = { + .asid = asid, .iova = iova, + .mask = (num_pages * 1 << granule) - 1}; + + g_hash_table_foreach_remove(s->iotlb, + smmu_hash_remove_by_asid_iova, + &info); +} + +inline void smmu_iotlb_inv_asid(SMMUState *s, uint16_t asid) +{ + trace_smmu_iotlb_inv_asid(asid); + g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid, &asid); +} + +/* VMSAv8-64 Translation */ + +/** + * get_pte - Get the content of a page table entry located at + * @base_addr[@index] + */ +static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte, + SMMUPTWEventInfo *info) +{ + int ret; + dma_addr_t addr = baseaddr + index * sizeof(*pte); + + /* TODO: guarantee 64-bit single-copy atomicity */ + ret = dma_memory_read(&address_space_memory, addr, pte, sizeof(*pte)); + + if (ret != MEMTX_OK) { + info->type = SMMU_PTW_ERR_WALK_EABT; + info->addr = addr; + return -EINVAL; + } + trace_smmu_get_pte(baseaddr, index, addr, *pte); + return 0; +} + +/* VMSAv8-64 Translation Table Format Descriptor Decoding */ + +/** + * get_page_pte_address - returns the L3 descriptor output address, + * ie. the page frame + * ARM ARM spec: Figure D4-17 VMSAv8-64 level 3 descriptor format + */ +static inline hwaddr get_page_pte_address(uint64_t pte, int granule_sz) +{ + return PTE_ADDRESS(pte, granule_sz); +} + +/** + * get_table_pte_address - return table descriptor output address, + * ie. address of next level table + * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats + */ +static inline hwaddr get_table_pte_address(uint64_t pte, int granule_sz) +{ + return PTE_ADDRESS(pte, granule_sz); +} + +/** + * get_block_pte_address - return block descriptor output address and block size + * ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats + */ +static inline hwaddr get_block_pte_address(uint64_t pte, int level, + int granule_sz, uint64_t *bsz) +{ + int n = level_shift(level, granule_sz); + + *bsz = 1ULL << n; + return PTE_ADDRESS(pte, n); +} + +SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova) +{ + bool tbi = extract64(iova, 55, 1) ? TBI1(cfg->tbi) : TBI0(cfg->tbi); + uint8_t tbi_byte = tbi * 8; + + if (cfg->tt[0].tsz && + !extract64(iova, 64 - cfg->tt[0].tsz, cfg->tt[0].tsz - tbi_byte)) { + /* there is a ttbr0 region and we are in it (high bits all zero) */ + return &cfg->tt[0]; + } else if (cfg->tt[1].tsz && + !extract64(iova, 64 - cfg->tt[1].tsz, cfg->tt[1].tsz - tbi_byte)) { + /* there is a ttbr1 region and we are in it (high bits all one) */ + return &cfg->tt[1]; + } else if (!cfg->tt[0].tsz) { + /* ttbr0 region is "everything not in the ttbr1 region" */ + return &cfg->tt[0]; + } else if (!cfg->tt[1].tsz) { + /* ttbr1 region is "everything not in the ttbr0 region" */ + return &cfg->tt[1]; + } + /* in the gap between the two regions, this is a Translation fault */ + return NULL; +} + +/** + * smmu_ptw_64 - VMSAv8-64 Walk of the page tables for a given IOVA + * @cfg: translation config + * @iova: iova to translate + * @perm: access type + * @tlbe: SMMUTLBEntry (out) + * @info: handle to an error info + * + * Return 0 on success, < 0 on error. In case of error, @info is filled + * and tlbe->perm is set to IOMMU_NONE. + * Upon success, @tlbe is filled with translated_addr and entry + * permission rights. + */ +static int smmu_ptw_64(SMMUTransCfg *cfg, + dma_addr_t iova, IOMMUAccessFlags perm, + SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) +{ + dma_addr_t baseaddr, indexmask; + int stage = cfg->stage; + SMMUTransTableInfo *tt = select_tt(cfg, iova); + uint8_t level, granule_sz, inputsize, stride; + + if (!tt || tt->disabled) { + info->type = SMMU_PTW_ERR_TRANSLATION; + goto error; + } + + granule_sz = tt->granule_sz; + stride = granule_sz - 3; + inputsize = 64 - tt->tsz; + level = 4 - (inputsize - 4) / stride; + indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; + baseaddr = extract64(tt->ttb, 0, 48); + baseaddr &= ~indexmask; + + while (level <= 3) { + uint64_t subpage_size = 1ULL << level_shift(level, granule_sz); + uint64_t mask = subpage_size - 1; + uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz); + uint64_t pte, gpa; + dma_addr_t pte_addr = baseaddr + offset * sizeof(pte); + uint8_t ap; + + if (get_pte(baseaddr, offset, &pte, info)) { + goto error; + } + trace_smmu_ptw_level(level, iova, subpage_size, + baseaddr, offset, pte); + + if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) { + trace_smmu_ptw_invalid_pte(stage, level, baseaddr, + pte_addr, offset, pte); + break; + } + + if (is_table_pte(pte, level)) { + ap = PTE_APTABLE(pte); + + if (is_permission_fault(ap, perm) && !tt->had) { + info->type = SMMU_PTW_ERR_PERMISSION; + goto error; + } + baseaddr = get_table_pte_address(pte, granule_sz); + level++; + continue; + } else if (is_page_pte(pte, level)) { + gpa = get_page_pte_address(pte, granule_sz); + trace_smmu_ptw_page_pte(stage, level, iova, + baseaddr, pte_addr, pte, gpa); + } else { + uint64_t block_size; + + gpa = get_block_pte_address(pte, level, granule_sz, + &block_size); + trace_smmu_ptw_block_pte(stage, level, baseaddr, + pte_addr, pte, iova, gpa, + block_size >> 20); + } + ap = PTE_AP(pte); + if (is_permission_fault(ap, perm)) { + info->type = SMMU_PTW_ERR_PERMISSION; + goto error; + } + + tlbe->entry.translated_addr = gpa; + tlbe->entry.iova = iova & ~mask; + tlbe->entry.addr_mask = mask; + tlbe->entry.perm = PTE_AP_TO_PERM(ap); + tlbe->level = level; + tlbe->granule = granule_sz; + return 0; + } + info->type = SMMU_PTW_ERR_TRANSLATION; + +error: + tlbe->entry.perm = IOMMU_NONE; + return -EINVAL; +} + +/** + * smmu_ptw - Walk the page tables for an IOVA, according to @cfg + * + * @cfg: translation configuration + * @iova: iova to translate + * @perm: tentative access type + * @tlbe: returned entry + * @info: ptw event handle + * + * return 0 on success + */ +inline int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm, + SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info) +{ + if (!cfg->aa64) { + /* + * This code path is not entered as we check this while decoding + * the configuration data in the derived SMMU model. + */ + g_assert_not_reached(); + } + + return smmu_ptw_64(cfg, iova, perm, tlbe, info); +} + +/** + * The bus number is used for lookup when SID based invalidation occurs. + * In that case we lazily populate the SMMUPciBus array from the bus hash + * table. At the time the SMMUPciBus is created (smmu_find_add_as), the bus + * numbers may not be always initialized yet. + */ +SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num) +{ + SMMUPciBus *smmu_pci_bus = s->smmu_pcibus_by_bus_num[bus_num]; + GHashTableIter iter; + + if (smmu_pci_bus) { + return smmu_pci_bus; + } + + g_hash_table_iter_init(&iter, s->smmu_pcibus_by_busptr); + while (g_hash_table_iter_next(&iter, NULL, (void **)&smmu_pci_bus)) { + if (pci_bus_num(smmu_pci_bus->bus) == bus_num) { + s->smmu_pcibus_by_bus_num[bus_num] = smmu_pci_bus; + return smmu_pci_bus; + } + } + + return NULL; +} + +static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn) +{ + SMMUState *s = opaque; + SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_pcibus_by_busptr, bus); + SMMUDevice *sdev; + static unsigned int index; + + if (!sbus) { + sbus = g_malloc0(sizeof(SMMUPciBus) + + sizeof(SMMUDevice *) * SMMU_PCI_DEVFN_MAX); + sbus->bus = bus; + g_hash_table_insert(s->smmu_pcibus_by_busptr, bus, sbus); + } + + sdev = sbus->pbdev[devfn]; + if (!sdev) { + char *name = g_strdup_printf("%s-%d-%d", s->mrtypename, devfn, index++); + + sdev = sbus->pbdev[devfn] = g_new0(SMMUDevice, 1); + + sdev->smmu = s; + sdev->bus = bus; + sdev->devfn = devfn; + + memory_region_init_iommu(&sdev->iommu, sizeof(sdev->iommu), + s->mrtypename, + OBJECT(s), name, 1ULL << SMMU_MAX_VA_BITS); + address_space_init(&sdev->as, + MEMORY_REGION(&sdev->iommu), name); + trace_smmu_add_mr(name); + g_free(name); + } + + return &sdev->as; +} + +IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid) +{ + uint8_t bus_n, devfn; + SMMUPciBus *smmu_bus; + SMMUDevice *smmu; + + bus_n = PCI_BUS_NUM(sid); + smmu_bus = smmu_find_smmu_pcibus(s, bus_n); + if (smmu_bus) { + devfn = SMMU_PCI_DEVFN(sid); + smmu = smmu_bus->pbdev[devfn]; + if (smmu) { + return &smmu->iommu; + } + } + return NULL; +} + +/* Unmap the whole notifier's range */ +static void smmu_unmap_notifier_range(IOMMUNotifier *n) +{ + IOMMUTLBEvent event; + + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.target_as = &address_space_memory; + event.entry.iova = n->start; + event.entry.perm = IOMMU_NONE; + event.entry.addr_mask = n->end - n->start; + + memory_region_notify_iommu_one(n, &event); +} + +/* Unmap all notifiers attached to @mr */ +inline void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr) +{ + IOMMUNotifier *n; + + trace_smmu_inv_notifiers_mr(mr->parent_obj.name); + IOMMU_NOTIFIER_FOREACH(n, mr) { + smmu_unmap_notifier_range(n); + } +} + +/* Unmap all notifiers of all mr's */ +void smmu_inv_notifiers_all(SMMUState *s) +{ + SMMUDevice *sdev; + + QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { + smmu_inv_notifiers_mr(&sdev->iommu); + } +} + +static void smmu_base_realize(DeviceState *dev, Error **errp) +{ + SMMUState *s = ARM_SMMU(dev); + SMMUBaseClass *sbc = ARM_SMMU_GET_CLASS(dev); + Error *local_err = NULL; + + sbc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + s->configs = g_hash_table_new_full(NULL, NULL, NULL, g_free); + s->iotlb = g_hash_table_new_full(smmu_iotlb_key_hash, smmu_iotlb_key_equal, + g_free, g_free); + s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL); + + if (s->primary_bus) { + pci_setup_iommu(s->primary_bus, smmu_find_add_as, s); + } else { + error_setg(errp, "SMMU is not attached to any PCI bus!"); + } +} + +static void smmu_base_reset(DeviceState *dev) +{ + SMMUState *s = ARM_SMMU(dev); + + g_hash_table_remove_all(s->configs); + g_hash_table_remove_all(s->iotlb); +} + +static Property smmu_dev_properties[] = { + DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0), + DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus, "PCI", PCIBus *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void smmu_base_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SMMUBaseClass *sbc = ARM_SMMU_CLASS(klass); + + device_class_set_props(dc, smmu_dev_properties); + device_class_set_parent_realize(dc, smmu_base_realize, + &sbc->parent_realize); + dc->reset = smmu_base_reset; +} + +static const TypeInfo smmu_base_info = { + .name = TYPE_ARM_SMMU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SMMUState), + .class_data = NULL, + .class_size = sizeof(SMMUBaseClass), + .class_init = smmu_base_class_init, + .abstract = true, +}; + +static void smmu_base_register_types(void) +{ + type_register_static(&smmu_base_info); +} + +type_init(smmu_base_register_types) + diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h new file mode 100644 index 000000000..2d75b3195 --- /dev/null +++ b/hw/arm/smmu-internal.h @@ -0,0 +1,112 @@ +/* + * ARM SMMU support - Internal API + * + * Copyright (c) 2017 Red Hat, Inc. + * Copyright (C) 2014-2016 Broadcom Corporation + * Written by Prem Mallappa, Eric Auger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef HW_ARM_SMMU_INTERNAL_H +#define HW_ARM_SMMU_INTERNAL_H + +#define TBI0(tbi) ((tbi) & 0x1) +#define TBI1(tbi) ((tbi) & 0x2 >> 1) + +/* PTE Manipulation */ + +#define ARM_LPAE_PTE_TYPE_SHIFT 0 +#define ARM_LPAE_PTE_TYPE_MASK 0x3 + +#define ARM_LPAE_PTE_TYPE_BLOCK 1 +#define ARM_LPAE_PTE_TYPE_TABLE 3 + +#define ARM_LPAE_L3_PTE_TYPE_RESERVED 1 +#define ARM_LPAE_L3_PTE_TYPE_PAGE 3 + +#define ARM_LPAE_PTE_VALID (1 << 0) + +#define PTE_ADDRESS(pte, shift) \ + (extract64(pte, shift, 47 - shift + 1) << shift) + +#define is_invalid_pte(pte) (!(pte & ARM_LPAE_PTE_VALID)) + +#define is_reserved_pte(pte, level) \ + ((level == 3) && \ + ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_L3_PTE_TYPE_RESERVED)) + +#define is_block_pte(pte, level) \ + ((level < 3) && \ + ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_PTE_TYPE_BLOCK)) + +#define is_table_pte(pte, level) \ + ((level < 3) && \ + ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_PTE_TYPE_TABLE)) + +#define is_page_pte(pte, level) \ + ((level == 3) && \ + ((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_L3_PTE_TYPE_PAGE)) + +/* access permissions */ + +#define PTE_AP(pte) \ + (extract64(pte, 6, 2)) + +#define PTE_APTABLE(pte) \ + (extract64(pte, 61, 2)) + +/* + * TODO: At the moment all transactions are considered as privileged (EL1) + * as IOMMU translation callback does not pass user/priv attributes. + */ +#define is_permission_fault(ap, perm) \ + (((perm) & IOMMU_WO) && ((ap) & 0x2)) + +#define PTE_AP_TO_PERM(ap) \ + (IOMMU_ACCESS_FLAG(true, !((ap) & 0x2))) + +/* Level Indexing */ + +static inline int level_shift(int level, int granule_sz) +{ + return granule_sz + (3 - level) * (granule_sz - 3); +} + +static inline uint64_t level_page_mask(int level, int granule_sz) +{ + return ~(MAKE_64BIT_MASK(0, level_shift(level, granule_sz))); +} + +static inline +uint64_t iova_level_offset(uint64_t iova, int inputsize, + int level, int gsz) +{ + return ((iova & MAKE_64BIT_MASK(0, inputsize)) >> level_shift(level, gsz)) & + MAKE_64BIT_MASK(0, gsz - 3); +} + +#define SMMU_IOTLB_ASID(key) ((key).asid) + +typedef struct SMMUIOTLBPageInvInfo { + int asid; + uint64_t iova; + uint64_t mask; +} SMMUIOTLBPageInvInfo; + +typedef struct SMMUSIDRange { + uint32_t start; + uint32_t end; +} SMMUSIDRange; + +#endif diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h new file mode 100644 index 000000000..d1885ae3f --- /dev/null +++ b/hw/arm/smmuv3-internal.h @@ -0,0 +1,631 @@ +/* + * ARM SMMUv3 support - Internal API + * + * Copyright (C) 2014-2016 Broadcom Corporation + * Copyright (c) 2017 Red Hat, Inc. + * Written by Prem Mallappa, Eric Auger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef HW_ARM_SMMUV3_INTERNAL_H +#define HW_ARM_SMMUV3_INTERNAL_H + +#include "hw/arm/smmu-common.h" + +typedef enum SMMUTranslationStatus { + SMMU_TRANS_DISABLE, + SMMU_TRANS_ABORT, + SMMU_TRANS_BYPASS, + SMMU_TRANS_ERROR, + SMMU_TRANS_SUCCESS, +} SMMUTranslationStatus; + +/* MMIO Registers */ + +REG32(IDR0, 0x0) + FIELD(IDR0, S1P, 1 , 1) + FIELD(IDR0, TTF, 2 , 2) + FIELD(IDR0, COHACC, 4 , 1) + FIELD(IDR0, ASID16, 12, 1) + FIELD(IDR0, TTENDIAN, 21, 2) + FIELD(IDR0, STALL_MODEL, 24, 2) + FIELD(IDR0, TERM_MODEL, 26, 1) + FIELD(IDR0, STLEVEL, 27, 2) + +REG32(IDR1, 0x4) + FIELD(IDR1, SIDSIZE, 0 , 6) + FIELD(IDR1, EVENTQS, 16, 5) + FIELD(IDR1, CMDQS, 21, 5) + +#define SMMU_IDR1_SIDSIZE 16 +#define SMMU_CMDQS 19 +#define SMMU_EVENTQS 19 + +REG32(IDR2, 0x8) +REG32(IDR3, 0xc) + FIELD(IDR3, HAD, 2, 1); + FIELD(IDR3, RIL, 10, 1); +REG32(IDR4, 0x10) +REG32(IDR5, 0x14) + FIELD(IDR5, OAS, 0, 3); + FIELD(IDR5, GRAN4K, 4, 1); + FIELD(IDR5, GRAN16K, 5, 1); + FIELD(IDR5, GRAN64K, 6, 1); + +#define SMMU_IDR5_OAS 4 + +REG32(IIDR, 0x18) +REG32(AIDR, 0x1c) +REG32(CR0, 0x20) + FIELD(CR0, SMMU_ENABLE, 0, 1) + FIELD(CR0, EVENTQEN, 2, 1) + FIELD(CR0, CMDQEN, 3, 1) + +#define SMMU_CR0_RESERVED 0xFFFFFC20 + +REG32(CR0ACK, 0x24) +REG32(CR1, 0x28) +REG32(CR2, 0x2c) +REG32(STATUSR, 0x40) +REG32(IRQ_CTRL, 0x50) + FIELD(IRQ_CTRL, GERROR_IRQEN, 0, 1) + FIELD(IRQ_CTRL, PRI_IRQEN, 1, 1) + FIELD(IRQ_CTRL, EVENTQ_IRQEN, 2, 1) + +REG32(IRQ_CTRL_ACK, 0x54) +REG32(GERROR, 0x60) + FIELD(GERROR, CMDQ_ERR, 0, 1) + FIELD(GERROR, EVENTQ_ABT_ERR, 2, 1) + FIELD(GERROR, PRIQ_ABT_ERR, 3, 1) + FIELD(GERROR, MSI_CMDQ_ABT_ERR, 4, 1) + FIELD(GERROR, MSI_EVENTQ_ABT_ERR, 5, 1) + FIELD(GERROR, MSI_PRIQ_ABT_ERR, 6, 1) + FIELD(GERROR, MSI_GERROR_ABT_ERR, 7, 1) + FIELD(GERROR, MSI_SFM_ERR, 8, 1) + +REG32(GERRORN, 0x64) + +#define A_GERROR_IRQ_CFG0 0x68 /* 64b */ +REG32(GERROR_IRQ_CFG1, 0x70) +REG32(GERROR_IRQ_CFG2, 0x74) + +#define A_STRTAB_BASE 0x80 /* 64b */ + +#define SMMU_BASE_ADDR_MASK 0xfffffffffffc0 + +REG32(STRTAB_BASE_CFG, 0x88) + FIELD(STRTAB_BASE_CFG, FMT, 16, 2) + FIELD(STRTAB_BASE_CFG, SPLIT, 6 , 5) + FIELD(STRTAB_BASE_CFG, LOG2SIZE, 0 , 6) + +#define A_CMDQ_BASE 0x90 /* 64b */ +REG32(CMDQ_PROD, 0x98) +REG32(CMDQ_CONS, 0x9c) + FIELD(CMDQ_CONS, ERR, 24, 7) + +#define A_EVENTQ_BASE 0xa0 /* 64b */ +REG32(EVENTQ_PROD, 0xa8) +REG32(EVENTQ_CONS, 0xac) + +#define A_EVENTQ_IRQ_CFG0 0xb0 /* 64b */ +REG32(EVENTQ_IRQ_CFG1, 0xb8) +REG32(EVENTQ_IRQ_CFG2, 0xbc) + +#define A_IDREGS 0xfd0 + +static inline int smmu_enabled(SMMUv3State *s) +{ + return FIELD_EX32(s->cr[0], CR0, SMMU_ENABLE); +} + +/* Command Queue Entry */ +typedef struct Cmd { + uint32_t word[4]; +} Cmd; + +/* Event Queue Entry */ +typedef struct Evt { + uint32_t word[8]; +} Evt; + +static inline uint32_t smmuv3_idreg(int regoffset) +{ + /* + * Return the value of the Primecell/Corelink ID registers at the + * specified offset from the first ID register. + * These value indicate an ARM implementation of MMU600 p1 + */ + static const uint8_t smmuv3_ids[] = { + 0x04, 0, 0, 0, 0x84, 0xB4, 0xF0, 0x10, 0x0D, 0xF0, 0x05, 0xB1 + }; + return smmuv3_ids[regoffset / 4]; +} + +static inline bool smmuv3_eventq_irq_enabled(SMMUv3State *s) +{ + return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, EVENTQ_IRQEN); +} + +static inline bool smmuv3_gerror_irq_enabled(SMMUv3State *s) +{ + return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, GERROR_IRQEN); +} + +/* Queue Handling */ + +#define Q_BASE(q) ((q)->base & SMMU_BASE_ADDR_MASK) +#define WRAP_MASK(q) (1 << (q)->log2size) +#define INDEX_MASK(q) (((1 << (q)->log2size)) - 1) +#define WRAP_INDEX_MASK(q) ((1 << ((q)->log2size + 1)) - 1) + +#define Q_CONS(q) ((q)->cons & INDEX_MASK(q)) +#define Q_PROD(q) ((q)->prod & INDEX_MASK(q)) + +#define Q_CONS_ENTRY(q) (Q_BASE(q) + (q)->entry_size * Q_CONS(q)) +#define Q_PROD_ENTRY(q) (Q_BASE(q) + (q)->entry_size * Q_PROD(q)) + +#define Q_CONS_WRAP(q) (((q)->cons & WRAP_MASK(q)) >> (q)->log2size) +#define Q_PROD_WRAP(q) (((q)->prod & WRAP_MASK(q)) >> (q)->log2size) + +static inline bool smmuv3_q_full(SMMUQueue *q) +{ + return ((q->cons ^ q->prod) & WRAP_INDEX_MASK(q)) == WRAP_MASK(q); +} + +static inline bool smmuv3_q_empty(SMMUQueue *q) +{ + return (q->cons & WRAP_INDEX_MASK(q)) == (q->prod & WRAP_INDEX_MASK(q)); +} + +static inline void queue_prod_incr(SMMUQueue *q) +{ + q->prod = (q->prod + 1) & WRAP_INDEX_MASK(q); +} + +static inline void queue_cons_incr(SMMUQueue *q) +{ + /* + * We have to use deposit for the CONS registers to preserve + * the ERR field in the high bits. + */ + q->cons = deposit32(q->cons, 0, q->log2size + 1, q->cons + 1); +} + +static inline bool smmuv3_cmdq_enabled(SMMUv3State *s) +{ + return FIELD_EX32(s->cr[0], CR0, CMDQEN); +} + +static inline bool smmuv3_eventq_enabled(SMMUv3State *s) +{ + return FIELD_EX32(s->cr[0], CR0, EVENTQEN); +} + +static inline void smmu_write_cmdq_err(SMMUv3State *s, uint32_t err_type) +{ + s->cmdq.cons = FIELD_DP32(s->cmdq.cons, CMDQ_CONS, ERR, err_type); +} + +/* Commands */ + +typedef enum SMMUCommandType { + SMMU_CMD_NONE = 0x00, + SMMU_CMD_PREFETCH_CONFIG , + SMMU_CMD_PREFETCH_ADDR, + SMMU_CMD_CFGI_STE, + SMMU_CMD_CFGI_STE_RANGE, + SMMU_CMD_CFGI_CD, + SMMU_CMD_CFGI_CD_ALL, + SMMU_CMD_CFGI_ALL, + SMMU_CMD_TLBI_NH_ALL = 0x10, + SMMU_CMD_TLBI_NH_ASID, + SMMU_CMD_TLBI_NH_VA, + SMMU_CMD_TLBI_NH_VAA, + SMMU_CMD_TLBI_EL3_ALL = 0x18, + SMMU_CMD_TLBI_EL3_VA = 0x1a, + SMMU_CMD_TLBI_EL2_ALL = 0x20, + SMMU_CMD_TLBI_EL2_ASID, + SMMU_CMD_TLBI_EL2_VA, + SMMU_CMD_TLBI_EL2_VAA, + SMMU_CMD_TLBI_S12_VMALL = 0x28, + SMMU_CMD_TLBI_S2_IPA = 0x2a, + SMMU_CMD_TLBI_NSNH_ALL = 0x30, + SMMU_CMD_ATC_INV = 0x40, + SMMU_CMD_PRI_RESP, + SMMU_CMD_RESUME = 0x44, + SMMU_CMD_STALL_TERM, + SMMU_CMD_SYNC, +} SMMUCommandType; + +static const char *cmd_stringify[] = { + [SMMU_CMD_PREFETCH_CONFIG] = "SMMU_CMD_PREFETCH_CONFIG", + [SMMU_CMD_PREFETCH_ADDR] = "SMMU_CMD_PREFETCH_ADDR", + [SMMU_CMD_CFGI_STE] = "SMMU_CMD_CFGI_STE", + [SMMU_CMD_CFGI_STE_RANGE] = "SMMU_CMD_CFGI_STE_RANGE", + [SMMU_CMD_CFGI_CD] = "SMMU_CMD_CFGI_CD", + [SMMU_CMD_CFGI_CD_ALL] = "SMMU_CMD_CFGI_CD_ALL", + [SMMU_CMD_CFGI_ALL] = "SMMU_CMD_CFGI_ALL", + [SMMU_CMD_TLBI_NH_ALL] = "SMMU_CMD_TLBI_NH_ALL", + [SMMU_CMD_TLBI_NH_ASID] = "SMMU_CMD_TLBI_NH_ASID", + [SMMU_CMD_TLBI_NH_VA] = "SMMU_CMD_TLBI_NH_VA", + [SMMU_CMD_TLBI_NH_VAA] = "SMMU_CMD_TLBI_NH_VAA", + [SMMU_CMD_TLBI_EL3_ALL] = "SMMU_CMD_TLBI_EL3_ALL", + [SMMU_CMD_TLBI_EL3_VA] = "SMMU_CMD_TLBI_EL3_VA", + [SMMU_CMD_TLBI_EL2_ALL] = "SMMU_CMD_TLBI_EL2_ALL", + [SMMU_CMD_TLBI_EL2_ASID] = "SMMU_CMD_TLBI_EL2_ASID", + [SMMU_CMD_TLBI_EL2_VA] = "SMMU_CMD_TLBI_EL2_VA", + [SMMU_CMD_TLBI_EL2_VAA] = "SMMU_CMD_TLBI_EL2_VAA", + [SMMU_CMD_TLBI_S12_VMALL] = "SMMU_CMD_TLBI_S12_VMALL", + [SMMU_CMD_TLBI_S2_IPA] = "SMMU_CMD_TLBI_S2_IPA", + [SMMU_CMD_TLBI_NSNH_ALL] = "SMMU_CMD_TLBI_NSNH_ALL", + [SMMU_CMD_ATC_INV] = "SMMU_CMD_ATC_INV", + [SMMU_CMD_PRI_RESP] = "SMMU_CMD_PRI_RESP", + [SMMU_CMD_RESUME] = "SMMU_CMD_RESUME", + [SMMU_CMD_STALL_TERM] = "SMMU_CMD_STALL_TERM", + [SMMU_CMD_SYNC] = "SMMU_CMD_SYNC", +}; + +static inline const char *smmu_cmd_string(SMMUCommandType type) +{ + if (type > SMMU_CMD_NONE && type < ARRAY_SIZE(cmd_stringify)) { + return cmd_stringify[type] ? cmd_stringify[type] : "UNKNOWN"; + } else { + return "INVALID"; + } +} + +/* CMDQ fields */ + +typedef enum { + SMMU_CERROR_NONE = 0, + SMMU_CERROR_ILL, + SMMU_CERROR_ABT, + SMMU_CERROR_ATC_INV_SYNC, +} SMMUCmdError; + +enum { /* Command completion notification */ + CMD_SYNC_SIG_NONE, + CMD_SYNC_SIG_IRQ, + CMD_SYNC_SIG_SEV, +}; + +#define CMD_TYPE(x) extract32((x)->word[0], 0 , 8) +#define CMD_NUM(x) extract32((x)->word[0], 12 , 5) +#define CMD_SCALE(x) extract32((x)->word[0], 20 , 5) +#define CMD_SSEC(x) extract32((x)->word[0], 10, 1) +#define CMD_SSV(x) extract32((x)->word[0], 11, 1) +#define CMD_RESUME_AC(x) extract32((x)->word[0], 12, 1) +#define CMD_RESUME_AB(x) extract32((x)->word[0], 13, 1) +#define CMD_SYNC_CS(x) extract32((x)->word[0], 12, 2) +#define CMD_SSID(x) extract32((x)->word[0], 12, 20) +#define CMD_SID(x) ((x)->word[1]) +#define CMD_VMID(x) extract32((x)->word[1], 0 , 16) +#define CMD_ASID(x) extract32((x)->word[1], 16, 16) +#define CMD_RESUME_STAG(x) extract32((x)->word[2], 0 , 16) +#define CMD_RESP(x) extract32((x)->word[2], 11, 2) +#define CMD_LEAF(x) extract32((x)->word[2], 0 , 1) +#define CMD_TTL(x) extract32((x)->word[2], 8 , 2) +#define CMD_TG(x) extract32((x)->word[2], 10, 2) +#define CMD_STE_RANGE(x) extract32((x)->word[2], 0 , 5) +#define CMD_ADDR(x) ({ \ + uint64_t high = (uint64_t)(x)->word[3]; \ + uint64_t low = extract32((x)->word[2], 12, 20); \ + uint64_t addr = high << 32 | (low << 12); \ + addr; \ + }) + +#define SMMU_FEATURE_2LVL_STE (1 << 0) + +/* Events */ + +typedef enum SMMUEventType { + SMMU_EVT_NONE = 0x00, + SMMU_EVT_F_UUT , + SMMU_EVT_C_BAD_STREAMID , + SMMU_EVT_F_STE_FETCH , + SMMU_EVT_C_BAD_STE , + SMMU_EVT_F_BAD_ATS_TREQ , + SMMU_EVT_F_STREAM_DISABLED , + SMMU_EVT_F_TRANS_FORBIDDEN , + SMMU_EVT_C_BAD_SUBSTREAMID , + SMMU_EVT_F_CD_FETCH , + SMMU_EVT_C_BAD_CD , + SMMU_EVT_F_WALK_EABT , + SMMU_EVT_F_TRANSLATION = 0x10, + SMMU_EVT_F_ADDR_SIZE , + SMMU_EVT_F_ACCESS , + SMMU_EVT_F_PERMISSION , + SMMU_EVT_F_TLB_CONFLICT = 0x20, + SMMU_EVT_F_CFG_CONFLICT , + SMMU_EVT_E_PAGE_REQ = 0x24, +} SMMUEventType; + +static const char *event_stringify[] = { + [SMMU_EVT_NONE] = "no recorded event", + [SMMU_EVT_F_UUT] = "SMMU_EVT_F_UUT", + [SMMU_EVT_C_BAD_STREAMID] = "SMMU_EVT_C_BAD_STREAMID", + [SMMU_EVT_F_STE_FETCH] = "SMMU_EVT_F_STE_FETCH", + [SMMU_EVT_C_BAD_STE] = "SMMU_EVT_C_BAD_STE", + [SMMU_EVT_F_BAD_ATS_TREQ] = "SMMU_EVT_F_BAD_ATS_TREQ", + [SMMU_EVT_F_STREAM_DISABLED] = "SMMU_EVT_F_STREAM_DISABLED", + [SMMU_EVT_F_TRANS_FORBIDDEN] = "SMMU_EVT_F_TRANS_FORBIDDEN", + [SMMU_EVT_C_BAD_SUBSTREAMID] = "SMMU_EVT_C_BAD_SUBSTREAMID", + [SMMU_EVT_F_CD_FETCH] = "SMMU_EVT_F_CD_FETCH", + [SMMU_EVT_C_BAD_CD] = "SMMU_EVT_C_BAD_CD", + [SMMU_EVT_F_WALK_EABT] = "SMMU_EVT_F_WALK_EABT", + [SMMU_EVT_F_TRANSLATION] = "SMMU_EVT_F_TRANSLATION", + [SMMU_EVT_F_ADDR_SIZE] = "SMMU_EVT_F_ADDR_SIZE", + [SMMU_EVT_F_ACCESS] = "SMMU_EVT_F_ACCESS", + [SMMU_EVT_F_PERMISSION] = "SMMU_EVT_F_PERMISSION", + [SMMU_EVT_F_TLB_CONFLICT] = "SMMU_EVT_F_TLB_CONFLICT", + [SMMU_EVT_F_CFG_CONFLICT] = "SMMU_EVT_F_CFG_CONFLICT", + [SMMU_EVT_E_PAGE_REQ] = "SMMU_EVT_E_PAGE_REQ", +}; + +static inline const char *smmu_event_string(SMMUEventType type) +{ + if (type < ARRAY_SIZE(event_stringify)) { + return event_stringify[type] ? event_stringify[type] : "UNKNOWN"; + } else { + return "INVALID"; + } +} + +/* Encode an event record */ +typedef struct SMMUEventInfo { + SMMUEventType type; + uint32_t sid; + bool recorded; + bool record_trans_faults; + bool inval_ste_allowed; + union { + struct { + uint32_t ssid; + bool ssv; + dma_addr_t addr; + bool rnw; + bool pnu; + bool ind; + } f_uut; + struct SSIDInfo { + uint32_t ssid; + bool ssv; + } c_bad_streamid; + struct SSIDAddrInfo { + uint32_t ssid; + bool ssv; + dma_addr_t addr; + } f_ste_fetch; + struct SSIDInfo c_bad_ste; + struct { + dma_addr_t addr; + bool rnw; + } f_transl_forbidden; + struct { + uint32_t ssid; + } c_bad_substream; + struct SSIDAddrInfo f_cd_fetch; + struct SSIDInfo c_bad_cd; + struct FullInfo { + bool stall; + uint16_t stag; + uint32_t ssid; + bool ssv; + bool s2; + dma_addr_t addr; + bool rnw; + bool pnu; + bool ind; + uint8_t class; + dma_addr_t addr2; + } f_walk_eabt; + struct FullInfo f_translation; + struct FullInfo f_addr_size; + struct FullInfo f_access; + struct FullInfo f_permission; + struct SSIDInfo f_cfg_conflict; + /** + * not supported yet: + * F_BAD_ATS_TREQ + * F_BAD_ATS_TREQ + * F_TLB_CONFLICT + * E_PAGE_REQUEST + * IMPDEF_EVENTn + */ + } u; +} SMMUEventInfo; + +/* EVTQ fields */ + +#define EVT_Q_OVERFLOW (1 << 31) + +#define EVT_SET_TYPE(x, v) ((x)->word[0] = deposit32((x)->word[0], 0 , 8 , v)) +#define EVT_SET_SSV(x, v) ((x)->word[0] = deposit32((x)->word[0], 11, 1 , v)) +#define EVT_SET_SSID(x, v) ((x)->word[0] = deposit32((x)->word[0], 12, 20, v)) +#define EVT_SET_SID(x, v) ((x)->word[1] = v) +#define EVT_SET_STAG(x, v) ((x)->word[2] = deposit32((x)->word[2], 0 , 16, v)) +#define EVT_SET_STALL(x, v) ((x)->word[2] = deposit32((x)->word[2], 31, 1 , v)) +#define EVT_SET_PNU(x, v) ((x)->word[3] = deposit32((x)->word[3], 1 , 1 , v)) +#define EVT_SET_IND(x, v) ((x)->word[3] = deposit32((x)->word[3], 2 , 1 , v)) +#define EVT_SET_RNW(x, v) ((x)->word[3] = deposit32((x)->word[3], 3 , 1 , v)) +#define EVT_SET_S2(x, v) ((x)->word[3] = deposit32((x)->word[3], 7 , 1 , v)) +#define EVT_SET_CLASS(x, v) ((x)->word[3] = deposit32((x)->word[3], 8 , 2 , v)) +#define EVT_SET_ADDR(x, addr) \ + do { \ + (x)->word[5] = (uint32_t)(addr >> 32); \ + (x)->word[4] = (uint32_t)(addr & 0xffffffff); \ + } while (0) +#define EVT_SET_ADDR2(x, addr) \ + do { \ + (x)->word[7] = (uint32_t)(addr >> 32); \ + (x)->word[6] = (uint32_t)(addr & 0xffffffff); \ + } while (0) + +void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *event); + +/* Configuration Data */ + +/* STE Level 1 Descriptor */ +typedef struct STEDesc { + uint32_t word[2]; +} STEDesc; + +/* CD Level 1 Descriptor */ +typedef struct CDDesc { + uint32_t word[2]; +} CDDesc; + +/* Stream Table Entry(STE) */ +typedef struct STE { + uint32_t word[16]; +} STE; + +/* Context Descriptor(CD) */ +typedef struct CD { + uint32_t word[16]; +} CD; + +/* STE fields */ + +#define STE_VALID(x) extract32((x)->word[0], 0, 1) + +#define STE_CONFIG(x) extract32((x)->word[0], 1, 3) +#define STE_CFG_S1_ENABLED(config) (config & 0x1) +#define STE_CFG_S2_ENABLED(config) (config & 0x2) +#define STE_CFG_ABORT(config) (!(config & 0x4)) +#define STE_CFG_BYPASS(config) (config == 0x4) + +#define STE_S1FMT(x) extract32((x)->word[0], 4 , 2) +#define STE_S1CDMAX(x) extract32((x)->word[1], 27, 5) +#define STE_S1STALLD(x) extract32((x)->word[2], 27, 1) +#define STE_EATS(x) extract32((x)->word[2], 28, 2) +#define STE_STRW(x) extract32((x)->word[2], 30, 2) +#define STE_S2VMID(x) extract32((x)->word[4], 0 , 16) +#define STE_S2T0SZ(x) extract32((x)->word[5], 0 , 6) +#define STE_S2SL0(x) extract32((x)->word[5], 6 , 2) +#define STE_S2TG(x) extract32((x)->word[5], 14, 2) +#define STE_S2PS(x) extract32((x)->word[5], 16, 3) +#define STE_S2AA64(x) extract32((x)->word[5], 19, 1) +#define STE_S2HD(x) extract32((x)->word[5], 24, 1) +#define STE_S2HA(x) extract32((x)->word[5], 25, 1) +#define STE_S2S(x) extract32((x)->word[5], 26, 1) +#define STE_CTXPTR(x) \ + ({ \ + unsigned long addr; \ + addr = (uint64_t)extract32((x)->word[1], 0, 16) << 32; \ + addr |= (uint64_t)((x)->word[0] & 0xffffffc0); \ + addr; \ + }) + +#define STE_S2TTB(x) \ + ({ \ + unsigned long addr; \ + addr = (uint64_t)extract32((x)->word[7], 0, 16) << 32; \ + addr |= (uint64_t)((x)->word[6] & 0xfffffff0); \ + addr; \ + }) + +static inline int oas2bits(int oas_field) +{ + switch (oas_field) { + case 0: + return 32; + case 1: + return 36; + case 2: + return 40; + case 3: + return 42; + case 4: + return 44; + case 5: + return 48; + } + return -1; +} + +static inline int pa_range(STE *ste) +{ + int oas_field = MIN(STE_S2PS(ste), SMMU_IDR5_OAS); + + if (!STE_S2AA64(ste)) { + return 40; + } + + return oas2bits(oas_field); +} + +#define MAX_PA(ste) ((1 << pa_range(ste)) - 1) + +/* CD fields */ + +#define CD_VALID(x) extract32((x)->word[0], 31, 1) +#define CD_ASID(x) extract32((x)->word[1], 16, 16) +#define CD_TTB(x, sel) \ + ({ \ + uint64_t hi, lo; \ + hi = extract32((x)->word[(sel) * 2 + 3], 0, 19); \ + hi <<= 32; \ + lo = (x)->word[(sel) * 2 + 2] & ~0xfULL; \ + hi | lo; \ + }) +#define CD_HAD(x, sel) extract32((x)->word[(sel) * 2 + 2], 1, 1) + +#define CD_TSZ(x, sel) extract32((x)->word[0], (16 * (sel)) + 0, 6) +#define CD_TG(x, sel) extract32((x)->word[0], (16 * (sel)) + 6, 2) +#define CD_EPD(x, sel) extract32((x)->word[0], (16 * (sel)) + 14, 1) +#define CD_ENDI(x) extract32((x)->word[0], 15, 1) +#define CD_IPS(x) extract32((x)->word[1], 0 , 3) +#define CD_TBI(x) extract32((x)->word[1], 6 , 2) +#define CD_HD(x) extract32((x)->word[1], 10 , 1) +#define CD_HA(x) extract32((x)->word[1], 11 , 1) +#define CD_S(x) extract32((x)->word[1], 12, 1) +#define CD_R(x) extract32((x)->word[1], 13, 1) +#define CD_A(x) extract32((x)->word[1], 14, 1) +#define CD_AARCH64(x) extract32((x)->word[1], 9 , 1) + +/** + * tg2granule - Decodes the CD translation granule size field according + * to the ttbr in use + * @bits: TG0/1 fields + * @ttbr: ttbr index in use + */ +static inline int tg2granule(int bits, int ttbr) +{ + switch (bits) { + case 0: + return ttbr ? 0 : 12; + case 1: + return ttbr ? 14 : 16; + case 2: + return ttbr ? 12 : 14; + case 3: + return ttbr ? 16 : 0; + default: + return 0; + } +} + +static inline uint64_t l1std_l2ptr(STEDesc *desc) +{ + uint64_t hi, lo; + + hi = desc->word[1]; + lo = desc->word[0] & ~0x1fULL; + return hi << 32 | lo; +} + +#define L1STD_SPAN(stm) (extract32((stm)->word[0], 0, 5)) + +#endif diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c new file mode 100644 index 000000000..01b60bee4 --- /dev/null +++ b/hw/arm/smmuv3.c @@ -0,0 +1,1583 @@ +/* + * Copyright (C) 2014-2016 Broadcom Corporation + * Copyright (c) 2017 Red Hat, Inc. + * Written by Prem Mallappa, Eric Auger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/qdev-core.h" +#include "hw/pci/pci.h" +#include "cpu.h" +#include "trace.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "qapi/error.h" + +#include "hw/arm/smmuv3.h" +#include "smmuv3-internal.h" +#include "smmu-internal.h" + +/** + * smmuv3_trigger_irq - pulse @irq if enabled and update + * GERROR register in case of GERROR interrupt + * + * @irq: irq type + * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) + */ +static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, + uint32_t gerror_mask) +{ + + bool pulse = false; + + switch (irq) { + case SMMU_IRQ_EVTQ: + pulse = smmuv3_eventq_irq_enabled(s); + break; + case SMMU_IRQ_PRIQ: + qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n"); + break; + case SMMU_IRQ_CMD_SYNC: + pulse = true; + break; + case SMMU_IRQ_GERROR: + { + uint32_t pending = s->gerror ^ s->gerrorn; + uint32_t new_gerrors = ~pending & gerror_mask; + + if (!new_gerrors) { + /* only toggle non pending errors */ + return; + } + s->gerror ^= new_gerrors; + trace_smmuv3_write_gerror(new_gerrors, s->gerror); + + pulse = smmuv3_gerror_irq_enabled(s); + break; + } + } + if (pulse) { + trace_smmuv3_trigger_irq(irq); + qemu_irq_pulse(s->irq[irq]); + } +} + +static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) +{ + uint32_t pending = s->gerror ^ s->gerrorn; + uint32_t toggled = s->gerrorn ^ new_gerrorn; + + if (toggled & ~pending) { + qemu_log_mask(LOG_GUEST_ERROR, + "guest toggles non pending errors = 0x%x\n", + toggled & ~pending); + } + + /* + * We do not raise any error in case guest toggles bits corresponding + * to not active IRQs (CONSTRAINED UNPREDICTABLE) + */ + s->gerrorn = new_gerrorn; + + trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); +} + +static inline MemTxResult queue_read(SMMUQueue *q, void *data) +{ + dma_addr_t addr = Q_CONS_ENTRY(q); + + return dma_memory_read(&address_space_memory, addr, data, q->entry_size); +} + +static MemTxResult queue_write(SMMUQueue *q, void *data) +{ + dma_addr_t addr = Q_PROD_ENTRY(q); + MemTxResult ret; + + ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); + if (ret != MEMTX_OK) { + return ret; + } + + queue_prod_incr(q); + return MEMTX_OK; +} + +static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) +{ + SMMUQueue *q = &s->eventq; + MemTxResult r; + + if (!smmuv3_eventq_enabled(s)) { + return MEMTX_ERROR; + } + + if (smmuv3_q_full(q)) { + return MEMTX_ERROR; + } + + r = queue_write(q, evt); + if (r != MEMTX_OK) { + return r; + } + + if (!smmuv3_q_empty(q)) { + smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); + } + return MEMTX_OK; +} + +void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) +{ + Evt evt = {}; + MemTxResult r; + + if (!smmuv3_eventq_enabled(s)) { + return; + } + + EVT_SET_TYPE(&evt, info->type); + EVT_SET_SID(&evt, info->sid); + + switch (info->type) { + case SMMU_EVT_NONE: + return; + case SMMU_EVT_F_UUT: + EVT_SET_SSID(&evt, info->u.f_uut.ssid); + EVT_SET_SSV(&evt, info->u.f_uut.ssv); + EVT_SET_ADDR(&evt, info->u.f_uut.addr); + EVT_SET_RNW(&evt, info->u.f_uut.rnw); + EVT_SET_PNU(&evt, info->u.f_uut.pnu); + EVT_SET_IND(&evt, info->u.f_uut.ind); + break; + case SMMU_EVT_C_BAD_STREAMID: + EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid); + EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv); + break; + case SMMU_EVT_F_STE_FETCH: + EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid); + EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv); + EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr); + break; + case SMMU_EVT_C_BAD_STE: + EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid); + EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv); + break; + case SMMU_EVT_F_STREAM_DISABLED: + break; + case SMMU_EVT_F_TRANS_FORBIDDEN: + EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr); + EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw); + break; + case SMMU_EVT_C_BAD_SUBSTREAMID: + EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid); + break; + case SMMU_EVT_F_CD_FETCH: + EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid); + EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv); + EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr); + break; + case SMMU_EVT_C_BAD_CD: + EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid); + EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv); + break; + case SMMU_EVT_F_WALK_EABT: + case SMMU_EVT_F_TRANSLATION: + case SMMU_EVT_F_ADDR_SIZE: + case SMMU_EVT_F_ACCESS: + case SMMU_EVT_F_PERMISSION: + EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall); + EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag); + EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid); + EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv); + EVT_SET_S2(&evt, info->u.f_walk_eabt.s2); + EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr); + EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw); + EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu); + EVT_SET_IND(&evt, info->u.f_walk_eabt.ind); + EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class); + EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2); + break; + case SMMU_EVT_F_CFG_CONFLICT: + EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid); + EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv); + break; + /* rest is not implemented */ + case SMMU_EVT_F_BAD_ATS_TREQ: + case SMMU_EVT_F_TLB_CONFLICT: + case SMMU_EVT_E_PAGE_REQ: + default: + g_assert_not_reached(); + } + + trace_smmuv3_record_event(smmu_event_string(info->type), info->sid); + r = smmuv3_write_eventq(s, &evt); + if (r != MEMTX_OK) { + smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK); + } + info->recorded = true; +} + +static void smmuv3_init_regs(SMMUv3State *s) +{ + /** + * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, + * multi-level stream table + */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ + /* terminated transaction will always be aborted/error returned */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1); + /* 2-level stream table supported */ + s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1); + + s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE); + s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); + s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); + + s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1); + s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1); + + /* 4K, 16K and 64K granule support */ + s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); + s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1); + s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); + s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ + + s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); + s->cmdq.prod = 0; + s->cmdq.cons = 0; + s->cmdq.entry_size = sizeof(struct Cmd); + s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS); + s->eventq.prod = 0; + s->eventq.cons = 0; + s->eventq.entry_size = sizeof(struct Evt); + + s->features = 0; + s->sid_split = 0; + s->aidr = 0x1; +} + +static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, + SMMUEventInfo *event) +{ + int ret; + + trace_smmuv3_get_ste(addr); + /* TODO: guarantee 64-bit single-copy atomicity */ + ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf)); + if (ret != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, + "Cannot fetch pte at address=0x%"PRIx64"\n", addr); + event->type = SMMU_EVT_F_STE_FETCH; + event->u.f_ste_fetch.addr = addr; + return -EINVAL; + } + return 0; + +} + +/* @ssid > 0 not supported yet */ +static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, + CD *buf, SMMUEventInfo *event) +{ + dma_addr_t addr = STE_CTXPTR(ste); + int ret; + + trace_smmuv3_get_cd(addr); + /* TODO: guarantee 64-bit single-copy atomicity */ + ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf)); + if (ret != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, + "Cannot fetch pte at address=0x%"PRIx64"\n", addr); + event->type = SMMU_EVT_F_CD_FETCH; + event->u.f_ste_fetch.addr = addr; + return -EINVAL; + } + return 0; +} + +/* Returns < 0 in case of invalid STE, 0 otherwise */ +static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, + STE *ste, SMMUEventInfo *event) +{ + uint32_t config; + + if (!STE_VALID(ste)) { + if (!event->inval_ste_allowed) { + qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n"); + } + goto bad_ste; + } + + config = STE_CONFIG(ste); + + if (STE_CFG_ABORT(config)) { + cfg->aborted = true; + return 0; + } + + if (STE_CFG_BYPASS(config)) { + cfg->bypassed = true; + return 0; + } + + if (STE_CFG_S2_ENABLED(config)) { + qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); + goto bad_ste; + } + + if (STE_S1CDMAX(ste) != 0) { + qemu_log_mask(LOG_UNIMP, + "SMMUv3 does not support multiple context descriptors yet\n"); + goto bad_ste; + } + + if (STE_S1STALLD(ste)) { + qemu_log_mask(LOG_UNIMP, + "SMMUv3 S1 stalling fault model not allowed yet\n"); + goto bad_ste; + } + return 0; + +bad_ste: + event->type = SMMU_EVT_C_BAD_STE; + return -EINVAL; +} + +/** + * smmu_find_ste - Return the stream table entry associated + * to the sid + * + * @s: smmuv3 handle + * @sid: stream ID + * @ste: returned stream table entry + * @event: handle to an event info + * + * Supports linear and 2-level stream table + * Return 0 on success, -EINVAL otherwise + */ +static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, + SMMUEventInfo *event) +{ + dma_addr_t addr, strtab_base; + uint32_t log2size; + int strtab_size_shift; + int ret; + + trace_smmuv3_find_ste(sid, s->features, s->sid_split); + log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE); + /* + * Check SID range against both guest-configured and implementation limits + */ + if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) { + event->type = SMMU_EVT_C_BAD_STREAMID; + return -EINVAL; + } + if (s->features & SMMU_FEATURE_2LVL_STE) { + int l1_ste_offset, l2_ste_offset, max_l2_ste, span; + dma_addr_t l1ptr, l2ptr; + STEDesc l1std; + + /* + * Align strtab base address to table size. For this purpose, assume it + * is not bounded by SMMU_IDR1_SIDSIZE. + */ + strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3); + strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & + ~MAKE_64BIT_MASK(0, strtab_size_shift); + l1_ste_offset = sid >> s->sid_split; + l2_ste_offset = sid & ((1 << s->sid_split) - 1); + l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); + /* TODO: guarantee 64-bit single-copy atomicity */ + ret = dma_memory_read(&address_space_memory, l1ptr, &l1std, + sizeof(l1std)); + if (ret != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, + "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr); + event->type = SMMU_EVT_F_STE_FETCH; + event->u.f_ste_fetch.addr = l1ptr; + return -EINVAL; + } + + span = L1STD_SPAN(&l1std); + + if (!span) { + /* l2ptr is not valid */ + if (!event->inval_ste_allowed) { + qemu_log_mask(LOG_GUEST_ERROR, + "invalid sid=%d (L1STD span=0)\n", sid); + } + event->type = SMMU_EVT_C_BAD_STREAMID; + return -EINVAL; + } + max_l2_ste = (1 << span) - 1; + l2ptr = l1std_l2ptr(&l1std); + trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset, + l2ptr, l2_ste_offset, max_l2_ste); + if (l2_ste_offset > max_l2_ste) { + qemu_log_mask(LOG_GUEST_ERROR, + "l2_ste_offset=%d > max_l2_ste=%d\n", + l2_ste_offset, max_l2_ste); + event->type = SMMU_EVT_C_BAD_STE; + return -EINVAL; + } + addr = l2ptr + l2_ste_offset * sizeof(*ste); + } else { + strtab_size_shift = log2size + 5; + strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & + ~MAKE_64BIT_MASK(0, strtab_size_shift); + addr = strtab_base + sid * sizeof(*ste); + } + + if (smmu_get_ste(s, addr, ste, event)) { + return -EINVAL; + } + + return 0; +} + +static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) +{ + int ret = -EINVAL; + int i; + + if (!CD_VALID(cd) || !CD_AARCH64(cd)) { + goto bad_cd; + } + if (!CD_A(cd)) { + goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */ + } + if (CD_S(cd)) { + goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */ + } + if (CD_HA(cd) || CD_HD(cd)) { + goto bad_cd; /* HTTU = 0 */ + } + + /* we support only those at the moment */ + cfg->aa64 = true; + cfg->stage = 1; + + cfg->oas = oas2bits(CD_IPS(cd)); + cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); + cfg->tbi = CD_TBI(cd); + cfg->asid = CD_ASID(cd); + + trace_smmuv3_decode_cd(cfg->oas); + + /* decode data dependent on TT */ + for (i = 0; i <= 1; i++) { + int tg, tsz; + SMMUTransTableInfo *tt = &cfg->tt[i]; + + cfg->tt[i].disabled = CD_EPD(cd, i); + if (cfg->tt[i].disabled) { + continue; + } + + tsz = CD_TSZ(cd, i); + if (tsz < 16 || tsz > 39) { + goto bad_cd; + } + + tg = CD_TG(cd, i); + tt->granule_sz = tg2granule(tg, i); + if ((tt->granule_sz != 12 && tt->granule_sz != 14 && + tt->granule_sz != 16) || CD_ENDI(cd)) { + goto bad_cd; + } + + tt->tsz = tsz; + tt->ttb = CD_TTB(cd, i); + if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) { + goto bad_cd; + } + tt->had = CD_HAD(cd, i); + trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had); + } + + event->record_trans_faults = CD_R(cd); + + return 0; + +bad_cd: + event->type = SMMU_EVT_C_BAD_CD; + return ret; +} + +/** + * smmuv3_decode_config - Prepare the translation configuration + * for the @mr iommu region + * @mr: iommu memory region the translation config must be prepared for + * @cfg: output translation configuration which is populated through + * the different configuration decoding steps + * @event: must be zero'ed by the caller + * + * return < 0 in case of config decoding error (@event is filled + * accordingly). Return 0 otherwise. + */ +static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, + SMMUEventInfo *event) +{ + SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); + uint32_t sid = smmu_get_sid(sdev); + SMMUv3State *s = sdev->smmu; + int ret; + STE ste; + CD cd; + + ret = smmu_find_ste(s, sid, &ste, event); + if (ret) { + return ret; + } + + ret = decode_ste(s, cfg, &ste, event); + if (ret) { + return ret; + } + + if (cfg->aborted || cfg->bypassed) { + return 0; + } + + ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); + if (ret) { + return ret; + } + + return decode_cd(cfg, &cd, event); +} + +/** + * smmuv3_get_config - Look up for a cached copy of configuration data for + * @sdev and on cache miss performs a configuration structure decoding from + * guest RAM. + * + * @sdev: SMMUDevice handle + * @event: output event info + * + * The configuration cache contains data resulting from both STE and CD + * decoding under the form of an SMMUTransCfg struct. The hash table is indexed + * by the SMMUDevice handle. + */ +static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) +{ + SMMUv3State *s = sdev->smmu; + SMMUState *bc = &s->smmu_state; + SMMUTransCfg *cfg; + + cfg = g_hash_table_lookup(bc->configs, sdev); + if (cfg) { + sdev->cfg_cache_hits++; + trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), + sdev->cfg_cache_hits, sdev->cfg_cache_misses, + 100 * sdev->cfg_cache_hits / + (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); + } else { + sdev->cfg_cache_misses++; + trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), + sdev->cfg_cache_hits, sdev->cfg_cache_misses, + 100 * sdev->cfg_cache_hits / + (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); + cfg = g_new0(SMMUTransCfg, 1); + + if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { + g_hash_table_insert(bc->configs, sdev, cfg); + } else { + g_free(cfg); + cfg = NULL; + } + } + return cfg; +} + +static void smmuv3_flush_config(SMMUDevice *sdev) +{ + SMMUv3State *s = sdev->smmu; + SMMUState *bc = &s->smmu_state; + + trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); + g_hash_table_remove(bc->configs, sdev); +} + +static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, + IOMMUAccessFlags flag, int iommu_idx) +{ + SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); + SMMUv3State *s = sdev->smmu; + uint32_t sid = smmu_get_sid(sdev); + SMMUEventInfo event = {.type = SMMU_EVT_NONE, + .sid = sid, + .inval_ste_allowed = false}; + SMMUPTWEventInfo ptw_info = {}; + SMMUTranslationStatus status; + SMMUState *bs = ARM_SMMU(s); + uint64_t page_mask, aligned_addr; + SMMUTLBEntry *cached_entry = NULL; + SMMUTransTableInfo *tt; + SMMUTransCfg *cfg = NULL; + IOMMUTLBEntry entry = { + .target_as = &address_space_memory, + .iova = addr, + .translated_addr = addr, + .addr_mask = ~(hwaddr)0, + .perm = IOMMU_NONE, + }; + + qemu_mutex_lock(&s->mutex); + + if (!smmu_enabled(s)) { + status = SMMU_TRANS_DISABLE; + goto epilogue; + } + + cfg = smmuv3_get_config(sdev, &event); + if (!cfg) { + status = SMMU_TRANS_ERROR; + goto epilogue; + } + + if (cfg->aborted) { + status = SMMU_TRANS_ABORT; + goto epilogue; + } + + if (cfg->bypassed) { + status = SMMU_TRANS_BYPASS; + goto epilogue; + } + + tt = select_tt(cfg, addr); + if (!tt) { + if (event.record_trans_faults) { + event.type = SMMU_EVT_F_TRANSLATION; + event.u.f_translation.addr = addr; + event.u.f_translation.rnw = flag & 0x1; + } + status = SMMU_TRANS_ERROR; + goto epilogue; + } + + page_mask = (1ULL << (tt->granule_sz)) - 1; + aligned_addr = addr & ~page_mask; + + cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr); + if (cached_entry) { + if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { + status = SMMU_TRANS_ERROR; + if (event.record_trans_faults) { + event.type = SMMU_EVT_F_PERMISSION; + event.u.f_permission.addr = addr; + event.u.f_permission.rnw = flag & 0x1; + } + } else { + status = SMMU_TRANS_SUCCESS; + } + goto epilogue; + } + + cached_entry = g_new0(SMMUTLBEntry, 1); + + if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { + g_free(cached_entry); + switch (ptw_info.type) { + case SMMU_PTW_ERR_WALK_EABT: + event.type = SMMU_EVT_F_WALK_EABT; + event.u.f_walk_eabt.addr = addr; + event.u.f_walk_eabt.rnw = flag & 0x1; + event.u.f_walk_eabt.class = 0x1; + event.u.f_walk_eabt.addr2 = ptw_info.addr; + break; + case SMMU_PTW_ERR_TRANSLATION: + if (event.record_trans_faults) { + event.type = SMMU_EVT_F_TRANSLATION; + event.u.f_translation.addr = addr; + event.u.f_translation.rnw = flag & 0x1; + } + break; + case SMMU_PTW_ERR_ADDR_SIZE: + if (event.record_trans_faults) { + event.type = SMMU_EVT_F_ADDR_SIZE; + event.u.f_addr_size.addr = addr; + event.u.f_addr_size.rnw = flag & 0x1; + } + break; + case SMMU_PTW_ERR_ACCESS: + if (event.record_trans_faults) { + event.type = SMMU_EVT_F_ACCESS; + event.u.f_access.addr = addr; + event.u.f_access.rnw = flag & 0x1; + } + break; + case SMMU_PTW_ERR_PERMISSION: + if (event.record_trans_faults) { + event.type = SMMU_EVT_F_PERMISSION; + event.u.f_permission.addr = addr; + event.u.f_permission.rnw = flag & 0x1; + } + break; + default: + g_assert_not_reached(); + } + status = SMMU_TRANS_ERROR; + } else { + smmu_iotlb_insert(bs, cfg, cached_entry); + status = SMMU_TRANS_SUCCESS; + } + +epilogue: + qemu_mutex_unlock(&s->mutex); + switch (status) { + case SMMU_TRANS_SUCCESS: + entry.perm = flag; + entry.translated_addr = cached_entry->entry.translated_addr + + (addr & cached_entry->entry.addr_mask); + entry.addr_mask = cached_entry->entry.addr_mask; + trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, + entry.translated_addr, entry.perm); + break; + case SMMU_TRANS_DISABLE: + entry.perm = flag; + entry.addr_mask = ~TARGET_PAGE_MASK; + trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, + entry.perm); + break; + case SMMU_TRANS_BYPASS: + entry.perm = flag; + entry.addr_mask = ~TARGET_PAGE_MASK; + trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, + entry.perm); + break; + case SMMU_TRANS_ABORT: + /* no event is recorded on abort */ + trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, + entry.perm); + break; + case SMMU_TRANS_ERROR: + qemu_log_mask(LOG_GUEST_ERROR, + "%s translation failed for iova=0x%"PRIx64"(%s)\n", + mr->parent_obj.name, addr, smmu_event_string(event.type)); + smmuv3_record_event(s, &event); + break; + } + + return entry; +} + +/** + * smmuv3_notify_iova - call the notifier @n for a given + * @asid and @iova tuple. + * + * @mr: IOMMU mr region handle + * @n: notifier to be called + * @asid: address space ID or negative value if we don't care + * @iova: iova + * @tg: translation granule (if communicated through range invalidation) + * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1 + */ +static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, + IOMMUNotifier *n, + int asid, dma_addr_t iova, + uint8_t tg, uint64_t num_pages) +{ + SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); + IOMMUTLBEvent event; + uint8_t granule; + + if (!tg) { + SMMUEventInfo event = {.inval_ste_allowed = true}; + SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event); + SMMUTransTableInfo *tt; + + if (!cfg) { + return; + } + + if (asid >= 0 && cfg->asid != asid) { + return; + } + + tt = select_tt(cfg, iova); + if (!tt) { + return; + } + granule = tt->granule_sz; + } else { + granule = tg * 2 + 10; + } + + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.target_as = &address_space_memory; + event.entry.iova = iova; + event.entry.addr_mask = num_pages * (1 << granule) - 1; + event.entry.perm = IOMMU_NONE; + + memory_region_notify_iommu_one(n, &event); +} + +/* invalidate an asid/iova range tuple in all mr's */ +static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, + uint8_t tg, uint64_t num_pages) +{ + SMMUDevice *sdev; + + QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { + IOMMUMemoryRegion *mr = &sdev->iommu; + IOMMUNotifier *n; + + trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova, + tg, num_pages); + + IOMMU_NOTIFIER_FOREACH(n, mr) { + smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages); + } + } +} + +static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) +{ + dma_addr_t end, addr = CMD_ADDR(cmd); + uint8_t type = CMD_TYPE(cmd); + uint16_t vmid = CMD_VMID(cmd); + uint8_t scale = CMD_SCALE(cmd); + uint8_t num = CMD_NUM(cmd); + uint8_t ttl = CMD_TTL(cmd); + bool leaf = CMD_LEAF(cmd); + uint8_t tg = CMD_TG(cmd); + uint64_t num_pages; + uint8_t granule; + int asid = -1; + + if (type == SMMU_CMD_TLBI_NH_VA) { + asid = CMD_ASID(cmd); + } + + if (!tg) { + trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); + smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1); + smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl); + return; + } + + /* RIL in use */ + + num_pages = (num + 1) * BIT_ULL(scale); + granule = tg * 2 + 10; + + /* Split invalidations into ^2 range invalidations */ + end = addr + (num_pages << granule) - 1; + + while (addr != end + 1) { + uint64_t mask = dma_aligned_pow2_mask(addr, end, 64); + + num_pages = (mask + 1) >> granule; + trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); + smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages); + smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl); + addr += mask + 1; + } +} + +static gboolean +smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data) +{ + SMMUDevice *sdev = (SMMUDevice *)key; + uint32_t sid = smmu_get_sid(sdev); + SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data; + + if (sid < sid_range->start || sid > sid_range->end) { + return false; + } + trace_smmuv3_config_cache_inv(sid); + return true; +} + +static int smmuv3_cmdq_consume(SMMUv3State *s) +{ + SMMUState *bs = ARM_SMMU(s); + SMMUCmdError cmd_error = SMMU_CERROR_NONE; + SMMUQueue *q = &s->cmdq; + SMMUCommandType type = 0; + + if (!smmuv3_cmdq_enabled(s)) { + return 0; + } + /* + * some commands depend on register values, typically CR0. In case those + * register values change while handling the command, spec says it + * is UNPREDICTABLE whether the command is interpreted under the new + * or old value. + */ + + while (!smmuv3_q_empty(q)) { + uint32_t pending = s->gerror ^ s->gerrorn; + Cmd cmd; + + trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), + Q_PROD_WRAP(q), Q_CONS_WRAP(q)); + + if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { + break; + } + + if (queue_read(q, &cmd) != MEMTX_OK) { + cmd_error = SMMU_CERROR_ABT; + break; + } + + type = CMD_TYPE(&cmd); + + trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); + + qemu_mutex_lock(&s->mutex); + switch (type) { + case SMMU_CMD_SYNC: + if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { + smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); + } + break; + case SMMU_CMD_PREFETCH_CONFIG: + case SMMU_CMD_PREFETCH_ADDR: + break; + case SMMU_CMD_CFGI_STE: + { + uint32_t sid = CMD_SID(&cmd); + IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); + SMMUDevice *sdev; + + if (CMD_SSEC(&cmd)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + if (!mr) { + break; + } + + trace_smmuv3_cmdq_cfgi_ste(sid); + sdev = container_of(mr, SMMUDevice, iommu); + smmuv3_flush_config(sdev); + + break; + } + case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ + { + uint32_t sid = CMD_SID(&cmd), mask; + uint8_t range = CMD_STE_RANGE(&cmd); + SMMUSIDRange sid_range; + + if (CMD_SSEC(&cmd)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + mask = (1ULL << (range + 1)) - 1; + sid_range.start = sid & ~mask; + sid_range.end = sid_range.start + mask; + + trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end); + g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste, + &sid_range); + break; + } + case SMMU_CMD_CFGI_CD: + case SMMU_CMD_CFGI_CD_ALL: + { + uint32_t sid = CMD_SID(&cmd); + IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); + SMMUDevice *sdev; + + if (CMD_SSEC(&cmd)) { + cmd_error = SMMU_CERROR_ILL; + break; + } + + if (!mr) { + break; + } + + trace_smmuv3_cmdq_cfgi_cd(sid); + sdev = container_of(mr, SMMUDevice, iommu); + smmuv3_flush_config(sdev); + break; + } + case SMMU_CMD_TLBI_NH_ASID: + { + uint16_t asid = CMD_ASID(&cmd); + + trace_smmuv3_cmdq_tlbi_nh_asid(asid); + smmu_inv_notifiers_all(&s->smmu_state); + smmu_iotlb_inv_asid(bs, asid); + break; + } + case SMMU_CMD_TLBI_NH_ALL: + case SMMU_CMD_TLBI_NSNH_ALL: + trace_smmuv3_cmdq_tlbi_nh(); + smmu_inv_notifiers_all(&s->smmu_state); + smmu_iotlb_inv_all(bs); + break; + case SMMU_CMD_TLBI_NH_VAA: + case SMMU_CMD_TLBI_NH_VA: + smmuv3_s1_range_inval(bs, &cmd); + break; + case SMMU_CMD_TLBI_EL3_ALL: + case SMMU_CMD_TLBI_EL3_VA: + case SMMU_CMD_TLBI_EL2_ALL: + case SMMU_CMD_TLBI_EL2_ASID: + case SMMU_CMD_TLBI_EL2_VA: + case SMMU_CMD_TLBI_EL2_VAA: + case SMMU_CMD_TLBI_S12_VMALL: + case SMMU_CMD_TLBI_S2_IPA: + case SMMU_CMD_ATC_INV: + case SMMU_CMD_PRI_RESP: + case SMMU_CMD_RESUME: + case SMMU_CMD_STALL_TERM: + trace_smmuv3_unhandled_cmd(type); + break; + default: + cmd_error = SMMU_CERROR_ILL; + qemu_log_mask(LOG_GUEST_ERROR, + "Illegal command type: %d\n", CMD_TYPE(&cmd)); + break; + } + qemu_mutex_unlock(&s->mutex); + if (cmd_error) { + break; + } + /* + * We only increment the cons index after the completion of + * the command. We do that because the SYNC returns immediately + * and does not check the completion of previous commands + */ + queue_cons_incr(q); + } + + if (cmd_error) { + trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); + smmu_write_cmdq_err(s, cmd_error); + smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); + } + + trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), + Q_PROD_WRAP(q), Q_CONS_WRAP(q)); + + return 0; +} + +static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, + uint64_t data, MemTxAttrs attrs) +{ + switch (offset) { + case A_GERROR_IRQ_CFG0: + s->gerror_irq_cfg0 = data; + return MEMTX_OK; + case A_STRTAB_BASE: + s->strtab_base = data; + return MEMTX_OK; + case A_CMDQ_BASE: + s->cmdq.base = data; + s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); + if (s->cmdq.log2size > SMMU_CMDQS) { + s->cmdq.log2size = SMMU_CMDQS; + } + return MEMTX_OK; + case A_EVENTQ_BASE: + s->eventq.base = data; + s->eventq.log2size = extract64(s->eventq.base, 0, 5); + if (s->eventq.log2size > SMMU_EVENTQS) { + s->eventq.log2size = SMMU_EVENTQS; + } + return MEMTX_OK; + case A_EVENTQ_IRQ_CFG0: + s->eventq_irq_cfg0 = data; + return MEMTX_OK; + default: + qemu_log_mask(LOG_UNIMP, + "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n", + __func__, offset); + return MEMTX_OK; + } +} + +static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, + uint64_t data, MemTxAttrs attrs) +{ + switch (offset) { + case A_CR0: + s->cr[0] = data; + s->cr0ack = data & ~SMMU_CR0_RESERVED; + /* in case the command queue has been enabled */ + smmuv3_cmdq_consume(s); + return MEMTX_OK; + case A_CR1: + s->cr[1] = data; + return MEMTX_OK; + case A_CR2: + s->cr[2] = data; + return MEMTX_OK; + case A_IRQ_CTRL: + s->irq_ctrl = data; + return MEMTX_OK; + case A_GERRORN: + smmuv3_write_gerrorn(s, data); + /* + * By acknowledging the CMDQ_ERR, SW may notify cmds can + * be processed again + */ + smmuv3_cmdq_consume(s); + return MEMTX_OK; + case A_GERROR_IRQ_CFG0: /* 64b */ + s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); + return MEMTX_OK; + case A_GERROR_IRQ_CFG0 + 4: + s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); + return MEMTX_OK; + case A_GERROR_IRQ_CFG1: + s->gerror_irq_cfg1 = data; + return MEMTX_OK; + case A_GERROR_IRQ_CFG2: + s->gerror_irq_cfg2 = data; + return MEMTX_OK; + case A_STRTAB_BASE: /* 64b */ + s->strtab_base = deposit64(s->strtab_base, 0, 32, data); + return MEMTX_OK; + case A_STRTAB_BASE + 4: + s->strtab_base = deposit64(s->strtab_base, 32, 32, data); + return MEMTX_OK; + case A_STRTAB_BASE_CFG: + s->strtab_base_cfg = data; + if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { + s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); + s->features |= SMMU_FEATURE_2LVL_STE; + } + return MEMTX_OK; + case A_CMDQ_BASE: /* 64b */ + s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); + s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); + if (s->cmdq.log2size > SMMU_CMDQS) { + s->cmdq.log2size = SMMU_CMDQS; + } + return MEMTX_OK; + case A_CMDQ_BASE + 4: /* 64b */ + s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); + return MEMTX_OK; + case A_CMDQ_PROD: + s->cmdq.prod = data; + smmuv3_cmdq_consume(s); + return MEMTX_OK; + case A_CMDQ_CONS: + s->cmdq.cons = data; + return MEMTX_OK; + case A_EVENTQ_BASE: /* 64b */ + s->eventq.base = deposit64(s->eventq.base, 0, 32, data); + s->eventq.log2size = extract64(s->eventq.base, 0, 5); + if (s->eventq.log2size > SMMU_EVENTQS) { + s->eventq.log2size = SMMU_EVENTQS; + } + return MEMTX_OK; + case A_EVENTQ_BASE + 4: + s->eventq.base = deposit64(s->eventq.base, 32, 32, data); + return MEMTX_OK; + case A_EVENTQ_PROD: + s->eventq.prod = data; + return MEMTX_OK; + case A_EVENTQ_CONS: + s->eventq.cons = data; + return MEMTX_OK; + case A_EVENTQ_IRQ_CFG0: /* 64b */ + s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); + return MEMTX_OK; + case A_EVENTQ_IRQ_CFG0 + 4: + s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); + return MEMTX_OK; + case A_EVENTQ_IRQ_CFG1: + s->eventq_irq_cfg1 = data; + return MEMTX_OK; + case A_EVENTQ_IRQ_CFG2: + s->eventq_irq_cfg2 = data; + return MEMTX_OK; + default: + qemu_log_mask(LOG_UNIMP, + "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n", + __func__, offset); + return MEMTX_OK; + } +} + +static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, + unsigned size, MemTxAttrs attrs) +{ + SMMUState *sys = opaque; + SMMUv3State *s = ARM_SMMUV3(sys); + MemTxResult r; + + /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ + offset &= ~0x10000; + + switch (size) { + case 8: + r = smmu_writell(s, offset, data, attrs); + break; + case 4: + r = smmu_writel(s, offset, data, attrs); + break; + default: + r = MEMTX_ERROR; + break; + } + + trace_smmuv3_write_mmio(offset, data, size, r); + return r; +} + +static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + switch (offset) { + case A_GERROR_IRQ_CFG0: + *data = s->gerror_irq_cfg0; + return MEMTX_OK; + case A_STRTAB_BASE: + *data = s->strtab_base; + return MEMTX_OK; + case A_CMDQ_BASE: + *data = s->cmdq.base; + return MEMTX_OK; + case A_EVENTQ_BASE: + *data = s->eventq.base; + return MEMTX_OK; + default: + *data = 0; + qemu_log_mask(LOG_UNIMP, + "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n", + __func__, offset); + return MEMTX_OK; + } +} + +static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + switch (offset) { + case A_IDREGS ... A_IDREGS + 0x2f: + *data = smmuv3_idreg(offset - A_IDREGS); + return MEMTX_OK; + case A_IDR0 ... A_IDR5: + *data = s->idr[(offset - A_IDR0) / 4]; + return MEMTX_OK; + case A_IIDR: + *data = s->iidr; + return MEMTX_OK; + case A_AIDR: + *data = s->aidr; + return MEMTX_OK; + case A_CR0: + *data = s->cr[0]; + return MEMTX_OK; + case A_CR0ACK: + *data = s->cr0ack; + return MEMTX_OK; + case A_CR1: + *data = s->cr[1]; + return MEMTX_OK; + case A_CR2: + *data = s->cr[2]; + return MEMTX_OK; + case A_STATUSR: + *data = s->statusr; + return MEMTX_OK; + case A_IRQ_CTRL: + case A_IRQ_CTRL_ACK: + *data = s->irq_ctrl; + return MEMTX_OK; + case A_GERROR: + *data = s->gerror; + return MEMTX_OK; + case A_GERRORN: + *data = s->gerrorn; + return MEMTX_OK; + case A_GERROR_IRQ_CFG0: /* 64b */ + *data = extract64(s->gerror_irq_cfg0, 0, 32); + return MEMTX_OK; + case A_GERROR_IRQ_CFG0 + 4: + *data = extract64(s->gerror_irq_cfg0, 32, 32); + return MEMTX_OK; + case A_GERROR_IRQ_CFG1: + *data = s->gerror_irq_cfg1; + return MEMTX_OK; + case A_GERROR_IRQ_CFG2: + *data = s->gerror_irq_cfg2; + return MEMTX_OK; + case A_STRTAB_BASE: /* 64b */ + *data = extract64(s->strtab_base, 0, 32); + return MEMTX_OK; + case A_STRTAB_BASE + 4: /* 64b */ + *data = extract64(s->strtab_base, 32, 32); + return MEMTX_OK; + case A_STRTAB_BASE_CFG: + *data = s->strtab_base_cfg; + return MEMTX_OK; + case A_CMDQ_BASE: /* 64b */ + *data = extract64(s->cmdq.base, 0, 32); + return MEMTX_OK; + case A_CMDQ_BASE + 4: + *data = extract64(s->cmdq.base, 32, 32); + return MEMTX_OK; + case A_CMDQ_PROD: + *data = s->cmdq.prod; + return MEMTX_OK; + case A_CMDQ_CONS: + *data = s->cmdq.cons; + return MEMTX_OK; + case A_EVENTQ_BASE: /* 64b */ + *data = extract64(s->eventq.base, 0, 32); + return MEMTX_OK; + case A_EVENTQ_BASE + 4: /* 64b */ + *data = extract64(s->eventq.base, 32, 32); + return MEMTX_OK; + case A_EVENTQ_PROD: + *data = s->eventq.prod; + return MEMTX_OK; + case A_EVENTQ_CONS: + *data = s->eventq.cons; + return MEMTX_OK; + default: + *data = 0; + qemu_log_mask(LOG_UNIMP, + "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n", + __func__, offset); + return MEMTX_OK; + } +} + +static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + SMMUState *sys = opaque; + SMMUv3State *s = ARM_SMMUV3(sys); + MemTxResult r; + + /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ + offset &= ~0x10000; + + switch (size) { + case 8: + r = smmu_readll(s, offset, data, attrs); + break; + case 4: + r = smmu_readl(s, offset, data, attrs); + break; + default: + r = MEMTX_ERROR; + break; + } + + trace_smmuv3_read_mmio(offset, *data, size, r); + return r; +} + +static const MemoryRegionOps smmu_mem_ops = { + .read_with_attrs = smmu_read_mmio, + .write_with_attrs = smmu_write_mmio, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; + +static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(s->irq); i++) { + sysbus_init_irq(dev, &s->irq[i]); + } +} + +static void smmu_reset(DeviceState *dev) +{ + SMMUv3State *s = ARM_SMMUV3(dev); + SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); + + c->parent_reset(dev); + + smmuv3_init_regs(s); +} + +static void smmu_realize(DeviceState *d, Error **errp) +{ + SMMUState *sys = ARM_SMMU(d); + SMMUv3State *s = ARM_SMMUV3(sys); + SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); + SysBusDevice *dev = SYS_BUS_DEVICE(d); + Error *local_err = NULL; + + c->parent_realize(d, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + qemu_mutex_init(&s->mutex); + + memory_region_init_io(&sys->iomem, OBJECT(s), + &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); + + sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION; + + sysbus_init_mmio(dev, &sys->iomem); + + smmu_init_irq(s, dev); +} + +static const VMStateDescription vmstate_smmuv3_queue = { + .name = "smmuv3_queue", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(base, SMMUQueue), + VMSTATE_UINT32(prod, SMMUQueue), + VMSTATE_UINT32(cons, SMMUQueue), + VMSTATE_UINT8(log2size, SMMUQueue), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_smmuv3 = { + .name = "smmuv3", + .version_id = 1, + .minimum_version_id = 1, + .priority = MIG_PRI_IOMMU, + .fields = (VMStateField[]) { + VMSTATE_UINT32(features, SMMUv3State), + VMSTATE_UINT8(sid_size, SMMUv3State), + VMSTATE_UINT8(sid_split, SMMUv3State), + + VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3), + VMSTATE_UINT32(cr0ack, SMMUv3State), + VMSTATE_UINT32(statusr, SMMUv3State), + VMSTATE_UINT32(irq_ctrl, SMMUv3State), + VMSTATE_UINT32(gerror, SMMUv3State), + VMSTATE_UINT32(gerrorn, SMMUv3State), + VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State), + VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State), + VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State), + VMSTATE_UINT64(strtab_base, SMMUv3State), + VMSTATE_UINT32(strtab_base_cfg, SMMUv3State), + VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State), + VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State), + VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State), + + VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), + VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), + + VMSTATE_END_OF_LIST(), + }, +}; + +static void smmuv3_instance_init(Object *obj) +{ + /* Nothing much to do here as of now */ +} + +static void smmuv3_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); + + dc->vmsd = &vmstate_smmuv3; + device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset); + c->parent_realize = dc->realize; + dc->realize = smmu_realize; +} + +static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, + IOMMUNotifierFlag old, + IOMMUNotifierFlag new, + Error **errp) +{ + SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); + SMMUv3State *s3 = sdev->smmu; + SMMUState *s = &(s3->smmu_state); + + if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) { + error_setg(errp, "SMMUv3 does not support dev-iotlb yet"); + return -EINVAL; + } + + if (new & IOMMU_NOTIFIER_MAP) { + error_setg(errp, + "device %02x.%02x.%x requires iommu MAP notifier which is " + "not currently supported", pci_bus_num(sdev->bus), + PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn)); + return -EINVAL; + } + + if (old == IOMMU_NOTIFIER_NONE) { + trace_smmuv3_notify_flag_add(iommu->parent_obj.name); + QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next); + } else if (new == IOMMU_NOTIFIER_NONE) { + trace_smmuv3_notify_flag_del(iommu->parent_obj.name); + QLIST_REMOVE(sdev, next); + } + return 0; +} + +static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, + void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = smmuv3_translate; + imrc->notify_flag_changed = smmuv3_notify_flag_changed; +} + +static const TypeInfo smmuv3_type_info = { + .name = TYPE_ARM_SMMUV3, + .parent = TYPE_ARM_SMMU, + .instance_size = sizeof(SMMUv3State), + .instance_init = smmuv3_instance_init, + .class_size = sizeof(SMMUv3Class), + .class_init = smmuv3_class_init, +}; + +static const TypeInfo smmuv3_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION, + .class_init = smmuv3_iommu_memory_region_class_init, +}; + +static void smmuv3_register_types(void) +{ + type_register(&smmuv3_type_info); + type_register(&smmuv3_iommu_memory_region_info); +} + +type_init(smmuv3_register_types) + diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c new file mode 100644 index 000000000..5aab0b856 --- /dev/null +++ b/hw/arm/spitz.c @@ -0,0 +1,1279 @@ +/* + * PXA270-based Clamshell PDA platforms. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/pxa.h" +#include "hw/arm/boot.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "hw/pcmcia.h" +#include "hw/qdev-properties.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "hw/ssi/ssi.h" +#include "hw/block/flash.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "hw/arm/sharpsl.h" +#include "ui/console.h" +#include "hw/audio/wm8750.h" +#include "audio/audio.h" +#include "hw/boards.h" +#include "hw/sysbus.h" +#include "hw/adc/max111x.h" +#include "migration/vmstate.h" +#include "exec/address-spaces.h" +#include "cpu.h" +#include "qom/object.h" + +enum spitz_model_e { spitz, akita, borzoi, terrier }; + +struct SpitzMachineClass { + MachineClass parent; + enum spitz_model_e model; + int arm_id; +}; + +struct SpitzMachineState { + MachineState parent; + PXA2xxState *mpu; + DeviceState *mux; + DeviceState *lcdtg; + DeviceState *ads7846; + DeviceState *max1111; + DeviceState *scp0; + DeviceState *scp1; + DeviceState *misc_gpio; +}; + +#define TYPE_SPITZ_MACHINE "spitz-common" +OBJECT_DECLARE_TYPE(SpitzMachineState, SpitzMachineClass, SPITZ_MACHINE) + +#define zaurus_printf(format, ...) \ + fprintf(stderr, "%s: " format, __func__, ##__VA_ARGS__) + +/* Spitz Flash */ +#define FLASH_BASE 0x0c000000 +#define FLASH_ECCLPLB 0x00 /* Line parity 7 - 0 bit */ +#define FLASH_ECCLPUB 0x04 /* Line parity 15 - 8 bit */ +#define FLASH_ECCCP 0x08 /* Column parity 5 - 0 bit */ +#define FLASH_ECCCNTR 0x0c /* ECC byte counter */ +#define FLASH_ECCCLRR 0x10 /* Clear ECC */ +#define FLASH_FLASHIO 0x14 /* Flash I/O */ +#define FLASH_FLASHCTL 0x18 /* Flash Control */ + +#define FLASHCTL_CE0 (1 << 0) +#define FLASHCTL_CLE (1 << 1) +#define FLASHCTL_ALE (1 << 2) +#define FLASHCTL_WP (1 << 3) +#define FLASHCTL_CE1 (1 << 4) +#define FLASHCTL_RYBY (1 << 5) +#define FLASHCTL_NCE (FLASHCTL_CE0 | FLASHCTL_CE1) + +#define TYPE_SL_NAND "sl-nand" +OBJECT_DECLARE_SIMPLE_TYPE(SLNANDState, SL_NAND) + +struct SLNANDState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + DeviceState *nand; + uint8_t ctl; + uint8_t manf_id; + uint8_t chip_id; + ECCState ecc; +}; + +static uint64_t sl_read(void *opaque, hwaddr addr, unsigned size) +{ + SLNANDState *s = (SLNANDState *) opaque; + int ryby; + + switch (addr) { +#define BSHR(byte, from, to) ((s->ecc.lp[byte] >> (from - to)) & (1 << to)) + case FLASH_ECCLPLB: + return BSHR(0, 4, 0) | BSHR(0, 5, 2) | BSHR(0, 6, 4) | BSHR(0, 7, 6) | + BSHR(1, 4, 1) | BSHR(1, 5, 3) | BSHR(1, 6, 5) | BSHR(1, 7, 7); + +#define BSHL(byte, from, to) ((s->ecc.lp[byte] << (to - from)) & (1 << to)) + case FLASH_ECCLPUB: + return BSHL(0, 0, 0) | BSHL(0, 1, 2) | BSHL(0, 2, 4) | BSHL(0, 3, 6) | + BSHL(1, 0, 1) | BSHL(1, 1, 3) | BSHL(1, 2, 5) | BSHL(1, 3, 7); + + case FLASH_ECCCP: + return s->ecc.cp; + + case FLASH_ECCCNTR: + return s->ecc.count & 0xff; + + case FLASH_FLASHCTL: + nand_getpins(s->nand, &ryby); + if (ryby) + return s->ctl | FLASHCTL_RYBY; + else + return s->ctl; + + case FLASH_FLASHIO: + if (size == 4) { + return ecc_digest(&s->ecc, nand_getio(s->nand)) | + (ecc_digest(&s->ecc, nand_getio(s->nand)) << 16); + } + return ecc_digest(&s->ecc, nand_getio(s->nand)); + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "sl_read: bad register offset 0x%02" HWADDR_PRIx "\n", + addr); + } + return 0; +} + +static void sl_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + SLNANDState *s = (SLNANDState *) opaque; + + switch (addr) { + case FLASH_ECCCLRR: + /* Value is ignored. */ + ecc_reset(&s->ecc); + break; + + case FLASH_FLASHCTL: + s->ctl = value & 0xff & ~FLASHCTL_RYBY; + nand_setpins(s->nand, + s->ctl & FLASHCTL_CLE, + s->ctl & FLASHCTL_ALE, + s->ctl & FLASHCTL_NCE, + s->ctl & FLASHCTL_WP, + 0); + break; + + case FLASH_FLASHIO: + nand_setio(s->nand, ecc_digest(&s->ecc, value & 0xff)); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "sl_write: bad register offset 0x%02" HWADDR_PRIx "\n", + addr); + } +} + +enum { + FLASH_128M, + FLASH_1024M, +}; + +static const MemoryRegionOps sl_ops = { + .read = sl_read, + .write = sl_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void sl_flash_register(PXA2xxState *cpu, int size) +{ + DeviceState *dev; + + dev = qdev_new(TYPE_SL_NAND); + + qdev_prop_set_uint8(dev, "manf_id", NAND_MFR_SAMSUNG); + if (size == FLASH_128M) + qdev_prop_set_uint8(dev, "chip_id", 0x73); + else if (size == FLASH_1024M) + qdev_prop_set_uint8(dev, "chip_id", 0xf1); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, FLASH_BASE); +} + +static void sl_nand_init(Object *obj) +{ + SLNANDState *s = SL_NAND(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + s->ctl = 0; + + memory_region_init_io(&s->iomem, obj, &sl_ops, s, "sl", 0x40); + sysbus_init_mmio(dev, &s->iomem); +} + +static void sl_nand_realize(DeviceState *dev, Error **errp) +{ + SLNANDState *s = SL_NAND(dev); + DriveInfo *nand; + + /* FIXME use a qdev drive property instead of drive_get() */ + nand = drive_get(IF_MTD, 0, 0); + s->nand = nand_init(nand ? blk_by_legacy_dinfo(nand) : NULL, + s->manf_id, s->chip_id); +} + +/* Spitz Keyboard */ + +#define SPITZ_KEY_STROBE_NUM 11 +#define SPITZ_KEY_SENSE_NUM 7 + +static const int spitz_gpio_key_sense[SPITZ_KEY_SENSE_NUM] = { + 12, 17, 91, 34, 36, 38, 39 +}; + +static const int spitz_gpio_key_strobe[SPITZ_KEY_STROBE_NUM] = { + 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114 +}; + +/* Eighth additional row maps the special keys */ +static int spitz_keymap[SPITZ_KEY_SENSE_NUM + 1][SPITZ_KEY_STROBE_NUM] = { + { 0x1d, 0x02, 0x04, 0x06, 0x07, 0x08, 0x0a, 0x0b, 0x0e, 0x3f, 0x40 }, + { -1 , 0x03, 0x05, 0x13, 0x15, 0x09, 0x17, 0x18, 0x19, 0x41, 0x42 }, + { 0x0f, 0x10, 0x12, 0x14, 0x22, 0x16, 0x24, 0x25, -1 , -1 , -1 }, + { 0x3c, 0x11, 0x1f, 0x21, 0x2f, 0x23, 0x32, 0x26, -1 , 0x36, -1 }, + { 0x3b, 0x1e, 0x20, 0x2e, 0x30, 0x31, 0x34, -1 , 0x1c, 0x2a, -1 }, + { 0x44, 0x2c, 0x2d, 0x0c, 0x39, 0x33, -1 , 0x48, -1 , -1 , 0x38 }, + { 0x37, 0x3d, -1 , 0x45, 0x57, 0x58, 0x4b, 0x50, 0x4d, -1 , -1 }, + { 0x52, 0x43, 0x01, 0x47, 0x49, -1 , -1 , -1 , -1 , -1 , -1 }, +}; + +#define SPITZ_GPIO_AK_INT 13 /* Remote control */ +#define SPITZ_GPIO_SYNC 16 /* Sync button */ +#define SPITZ_GPIO_ON_KEY 95 /* Power button */ +#define SPITZ_GPIO_SWA 97 /* Lid */ +#define SPITZ_GPIO_SWB 96 /* Tablet mode */ + +/* The special buttons are mapped to unused keys */ +static const int spitz_gpiomap[5] = { + SPITZ_GPIO_AK_INT, SPITZ_GPIO_SYNC, SPITZ_GPIO_ON_KEY, + SPITZ_GPIO_SWA, SPITZ_GPIO_SWB, +}; + +#define TYPE_SPITZ_KEYBOARD "spitz-keyboard" +OBJECT_DECLARE_SIMPLE_TYPE(SpitzKeyboardState, SPITZ_KEYBOARD) + +struct SpitzKeyboardState { + SysBusDevice parent_obj; + + qemu_irq sense[SPITZ_KEY_SENSE_NUM]; + qemu_irq gpiomap[5]; + int keymap[0x80]; + uint16_t keyrow[SPITZ_KEY_SENSE_NUM]; + uint16_t strobe_state; + uint16_t sense_state; + + uint16_t pre_map[0x100]; + uint16_t modifiers; + uint16_t imodifiers; + uint8_t fifo[16]; + int fifopos, fifolen; + QEMUTimer *kbdtimer; +}; + +static void spitz_keyboard_sense_update(SpitzKeyboardState *s) +{ + int i; + uint16_t strobe, sense = 0; + for (i = 0; i < SPITZ_KEY_SENSE_NUM; i ++) { + strobe = s->keyrow[i] & s->strobe_state; + if (strobe) { + sense |= 1 << i; + if (!(s->sense_state & (1 << i))) + qemu_irq_raise(s->sense[i]); + } else if (s->sense_state & (1 << i)) + qemu_irq_lower(s->sense[i]); + } + + s->sense_state = sense; +} + +static void spitz_keyboard_strobe(void *opaque, int line, int level) +{ + SpitzKeyboardState *s = (SpitzKeyboardState *) opaque; + + if (level) + s->strobe_state |= 1 << line; + else + s->strobe_state &= ~(1 << line); + spitz_keyboard_sense_update(s); +} + +static void spitz_keyboard_keydown(SpitzKeyboardState *s, int keycode) +{ + int spitz_keycode = s->keymap[keycode & 0x7f]; + if (spitz_keycode == -1) + return; + + /* Handle the additional keys */ + if ((spitz_keycode >> 4) == SPITZ_KEY_SENSE_NUM) { + qemu_set_irq(s->gpiomap[spitz_keycode & 0xf], (keycode < 0x80)); + return; + } + + if (keycode & 0x80) + s->keyrow[spitz_keycode >> 4] &= ~(1 << (spitz_keycode & 0xf)); + else + s->keyrow[spitz_keycode >> 4] |= 1 << (spitz_keycode & 0xf); + + spitz_keyboard_sense_update(s); +} + +#define SPITZ_MOD_SHIFT (1 << 7) +#define SPITZ_MOD_CTRL (1 << 8) +#define SPITZ_MOD_FN (1 << 9) + +#define QUEUE_KEY(c) s->fifo[(s->fifopos + s->fifolen ++) & 0xf] = c + +static void spitz_keyboard_handler(void *opaque, int keycode) +{ + SpitzKeyboardState *s = opaque; + uint16_t code; + int mapcode; + switch (keycode) { + case 0x2a: /* Left Shift */ + s->modifiers |= 1; + break; + case 0xaa: + s->modifiers &= ~1; + break; + case 0x36: /* Right Shift */ + s->modifiers |= 2; + break; + case 0xb6: + s->modifiers &= ~2; + break; + case 0x1d: /* Control */ + s->modifiers |= 4; + break; + case 0x9d: + s->modifiers &= ~4; + break; + case 0x38: /* Alt */ + s->modifiers |= 8; + break; + case 0xb8: + s->modifiers &= ~8; + break; + } + + code = s->pre_map[mapcode = ((s->modifiers & 3) ? + (keycode | SPITZ_MOD_SHIFT) : + (keycode & ~SPITZ_MOD_SHIFT))]; + + if (code != mapcode) { +#if 0 + if ((code & SPITZ_MOD_SHIFT) && !(s->modifiers & 1)) { + QUEUE_KEY(0x2a | (keycode & 0x80)); + } + if ((code & SPITZ_MOD_CTRL) && !(s->modifiers & 4)) { + QUEUE_KEY(0x1d | (keycode & 0x80)); + } + if ((code & SPITZ_MOD_FN) && !(s->modifiers & 8)) { + QUEUE_KEY(0x38 | (keycode & 0x80)); + } + if ((code & SPITZ_MOD_FN) && (s->modifiers & 1)) { + QUEUE_KEY(0x2a | (~keycode & 0x80)); + } + if ((code & SPITZ_MOD_FN) && (s->modifiers & 2)) { + QUEUE_KEY(0x36 | (~keycode & 0x80)); + } +#else + if (keycode & 0x80) { + if ((s->imodifiers & 1 ) && !(s->modifiers & 1)) + QUEUE_KEY(0x2a | 0x80); + if ((s->imodifiers & 4 ) && !(s->modifiers & 4)) + QUEUE_KEY(0x1d | 0x80); + if ((s->imodifiers & 8 ) && !(s->modifiers & 8)) + QUEUE_KEY(0x38 | 0x80); + if ((s->imodifiers & 0x10) && (s->modifiers & 1)) + QUEUE_KEY(0x2a); + if ((s->imodifiers & 0x20) && (s->modifiers & 2)) + QUEUE_KEY(0x36); + s->imodifiers = 0; + } else { + if ((code & SPITZ_MOD_SHIFT) && + !((s->modifiers | s->imodifiers) & 1)) { + QUEUE_KEY(0x2a); + s->imodifiers |= 1; + } + if ((code & SPITZ_MOD_CTRL) && + !((s->modifiers | s->imodifiers) & 4)) { + QUEUE_KEY(0x1d); + s->imodifiers |= 4; + } + if ((code & SPITZ_MOD_FN) && + !((s->modifiers | s->imodifiers) & 8)) { + QUEUE_KEY(0x38); + s->imodifiers |= 8; + } + if ((code & SPITZ_MOD_FN) && (s->modifiers & 1) && + !(s->imodifiers & 0x10)) { + QUEUE_KEY(0x2a | 0x80); + s->imodifiers |= 0x10; + } + if ((code & SPITZ_MOD_FN) && (s->modifiers & 2) && + !(s->imodifiers & 0x20)) { + QUEUE_KEY(0x36 | 0x80); + s->imodifiers |= 0x20; + } + } +#endif + } + + QUEUE_KEY((code & 0x7f) | (keycode & 0x80)); +} + +static void spitz_keyboard_tick(void *opaque) +{ + SpitzKeyboardState *s = (SpitzKeyboardState *) opaque; + + if (s->fifolen) { + spitz_keyboard_keydown(s, s->fifo[s->fifopos ++]); + s->fifolen --; + if (s->fifopos >= 16) + s->fifopos = 0; + } + + timer_mod(s->kbdtimer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND / 32); +} + +static void spitz_keyboard_pre_map(SpitzKeyboardState *s) +{ + int i; + for (i = 0; i < 0x100; i ++) + s->pre_map[i] = i; + s->pre_map[0x02 | SPITZ_MOD_SHIFT] = 0x02 | SPITZ_MOD_SHIFT; /* exclam */ + s->pre_map[0x28 | SPITZ_MOD_SHIFT] = 0x03 | SPITZ_MOD_SHIFT; /* quotedbl */ + s->pre_map[0x04 | SPITZ_MOD_SHIFT] = 0x04 | SPITZ_MOD_SHIFT; /* # */ + s->pre_map[0x05 | SPITZ_MOD_SHIFT] = 0x05 | SPITZ_MOD_SHIFT; /* dollar */ + s->pre_map[0x06 | SPITZ_MOD_SHIFT] = 0x06 | SPITZ_MOD_SHIFT; /* percent */ + s->pre_map[0x08 | SPITZ_MOD_SHIFT] = 0x07 | SPITZ_MOD_SHIFT; /* ampersand */ + s->pre_map[0x28] = 0x08 | SPITZ_MOD_SHIFT; /* ' */ + s->pre_map[0x0a | SPITZ_MOD_SHIFT] = 0x09 | SPITZ_MOD_SHIFT; /* ( */ + s->pre_map[0x0b | SPITZ_MOD_SHIFT] = 0x0a | SPITZ_MOD_SHIFT; /* ) */ + s->pre_map[0x29 | SPITZ_MOD_SHIFT] = 0x0b | SPITZ_MOD_SHIFT; /* tilde */ + s->pre_map[0x03 | SPITZ_MOD_SHIFT] = 0x0c | SPITZ_MOD_SHIFT; /* at */ + s->pre_map[0xd3] = 0x0e | SPITZ_MOD_FN; /* Delete */ + s->pre_map[0x3a] = 0x0f | SPITZ_MOD_FN; /* Caps_Lock */ + s->pre_map[0x07 | SPITZ_MOD_SHIFT] = 0x11 | SPITZ_MOD_FN; /* ^ */ + s->pre_map[0x0d] = 0x12 | SPITZ_MOD_FN; /* equal */ + s->pre_map[0x0d | SPITZ_MOD_SHIFT] = 0x13 | SPITZ_MOD_FN; /* plus */ + s->pre_map[0x1a] = 0x14 | SPITZ_MOD_FN; /* [ */ + s->pre_map[0x1b] = 0x15 | SPITZ_MOD_FN; /* ] */ + s->pre_map[0x1a | SPITZ_MOD_SHIFT] = 0x16 | SPITZ_MOD_FN; /* { */ + s->pre_map[0x1b | SPITZ_MOD_SHIFT] = 0x17 | SPITZ_MOD_FN; /* } */ + s->pre_map[0x27] = 0x22 | SPITZ_MOD_FN; /* semicolon */ + s->pre_map[0x27 | SPITZ_MOD_SHIFT] = 0x23 | SPITZ_MOD_FN; /* colon */ + s->pre_map[0x09 | SPITZ_MOD_SHIFT] = 0x24 | SPITZ_MOD_FN; /* asterisk */ + s->pre_map[0x2b] = 0x25 | SPITZ_MOD_FN; /* backslash */ + s->pre_map[0x2b | SPITZ_MOD_SHIFT] = 0x26 | SPITZ_MOD_FN; /* bar */ + s->pre_map[0x0c | SPITZ_MOD_SHIFT] = 0x30 | SPITZ_MOD_FN; /* _ */ + s->pre_map[0x33 | SPITZ_MOD_SHIFT] = 0x33 | SPITZ_MOD_FN; /* less */ + s->pre_map[0x35] = 0x33 | SPITZ_MOD_SHIFT; /* slash */ + s->pre_map[0x34 | SPITZ_MOD_SHIFT] = 0x34 | SPITZ_MOD_FN; /* greater */ + s->pre_map[0x35 | SPITZ_MOD_SHIFT] = 0x34 | SPITZ_MOD_SHIFT; /* question */ + s->pre_map[0x49] = 0x48 | SPITZ_MOD_FN; /* Page_Up */ + s->pre_map[0x51] = 0x50 | SPITZ_MOD_FN; /* Page_Down */ + + s->modifiers = 0; + s->imodifiers = 0; + s->fifopos = 0; + s->fifolen = 0; +} + +#undef SPITZ_MOD_SHIFT +#undef SPITZ_MOD_CTRL +#undef SPITZ_MOD_FN + +static int spitz_keyboard_post_load(void *opaque, int version_id) +{ + SpitzKeyboardState *s = (SpitzKeyboardState *) opaque; + + /* Release all pressed keys */ + memset(s->keyrow, 0, sizeof(s->keyrow)); + spitz_keyboard_sense_update(s); + s->modifiers = 0; + s->imodifiers = 0; + s->fifopos = 0; + s->fifolen = 0; + + return 0; +} + +static void spitz_keyboard_register(PXA2xxState *cpu) +{ + int i; + DeviceState *dev; + SpitzKeyboardState *s; + + dev = sysbus_create_simple(TYPE_SPITZ_KEYBOARD, -1, NULL); + s = SPITZ_KEYBOARD(dev); + + for (i = 0; i < SPITZ_KEY_SENSE_NUM; i ++) + qdev_connect_gpio_out(dev, i, qdev_get_gpio_in(cpu->gpio, spitz_gpio_key_sense[i])); + + for (i = 0; i < 5; i ++) + s->gpiomap[i] = qdev_get_gpio_in(cpu->gpio, spitz_gpiomap[i]); + + if (!graphic_rotate) + s->gpiomap[4] = qemu_irq_invert(s->gpiomap[4]); + + for (i = 0; i < 5; i++) + qemu_set_irq(s->gpiomap[i], 0); + + for (i = 0; i < SPITZ_KEY_STROBE_NUM; i ++) + qdev_connect_gpio_out(cpu->gpio, spitz_gpio_key_strobe[i], + qdev_get_gpio_in(dev, i)); + + timer_mod(s->kbdtimer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + + qemu_add_kbd_event_handler(spitz_keyboard_handler, s); +} + +static void spitz_keyboard_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + SpitzKeyboardState *s = SPITZ_KEYBOARD(obj); + int i, j; + + for (i = 0; i < 0x80; i ++) + s->keymap[i] = -1; + for (i = 0; i < SPITZ_KEY_SENSE_NUM + 1; i ++) + for (j = 0; j < SPITZ_KEY_STROBE_NUM; j ++) + if (spitz_keymap[i][j] != -1) + s->keymap[spitz_keymap[i][j]] = (i << 4) | j; + + spitz_keyboard_pre_map(s); + + qdev_init_gpio_in(dev, spitz_keyboard_strobe, SPITZ_KEY_STROBE_NUM); + qdev_init_gpio_out(dev, s->sense, SPITZ_KEY_SENSE_NUM); +} + +static void spitz_keyboard_realize(DeviceState *dev, Error **errp) +{ + SpitzKeyboardState *s = SPITZ_KEYBOARD(dev); + s->kbdtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, spitz_keyboard_tick, s); +} + +/* LCD backlight controller */ + +#define LCDTG_RESCTL 0x00 +#define LCDTG_PHACTRL 0x01 +#define LCDTG_DUTYCTRL 0x02 +#define LCDTG_POWERREG0 0x03 +#define LCDTG_POWERREG1 0x04 +#define LCDTG_GPOR3 0x05 +#define LCDTG_PICTRL 0x06 +#define LCDTG_POLCTRL 0x07 + +#define TYPE_SPITZ_LCDTG "spitz-lcdtg" +OBJECT_DECLARE_SIMPLE_TYPE(SpitzLCDTG, SPITZ_LCDTG) + +struct SpitzLCDTG { + SSIPeripheral ssidev; + uint32_t bl_intensity; + uint32_t bl_power; +}; + +static void spitz_bl_update(SpitzLCDTG *s) +{ + if (s->bl_power && s->bl_intensity) + zaurus_printf("LCD Backlight now at %u/63\n", s->bl_intensity); + else + zaurus_printf("LCD Backlight now off\n"); +} + +static inline void spitz_bl_bit5(void *opaque, int line, int level) +{ + SpitzLCDTG *s = opaque; + int prev = s->bl_intensity; + + if (level) + s->bl_intensity &= ~0x20; + else + s->bl_intensity |= 0x20; + + if (s->bl_power && prev != s->bl_intensity) + spitz_bl_update(s); +} + +static inline void spitz_bl_power(void *opaque, int line, int level) +{ + SpitzLCDTG *s = opaque; + s->bl_power = !!level; + spitz_bl_update(s); +} + +static uint32_t spitz_lcdtg_transfer(SSIPeripheral *dev, uint32_t value) +{ + SpitzLCDTG *s = SPITZ_LCDTG(dev); + int addr; + addr = value >> 5; + value &= 0x1f; + + switch (addr) { + case LCDTG_RESCTL: + if (value) + zaurus_printf("LCD in QVGA mode\n"); + else + zaurus_printf("LCD in VGA mode\n"); + break; + + case LCDTG_DUTYCTRL: + s->bl_intensity &= ~0x1f; + s->bl_intensity |= value; + if (s->bl_power) + spitz_bl_update(s); + break; + + case LCDTG_POWERREG0: + /* Set common voltage to M62332FP */ + break; + } + return 0; +} + +static void spitz_lcdtg_realize(SSIPeripheral *ssi, Error **errp) +{ + SpitzLCDTG *s = SPITZ_LCDTG(ssi); + DeviceState *dev = DEVICE(s); + + s->bl_power = 0; + s->bl_intensity = 0x20; + + qdev_init_gpio_in_named(dev, spitz_bl_bit5, "bl_bit5", 1); + qdev_init_gpio_in_named(dev, spitz_bl_power, "bl_power", 1); +} + +/* SSP devices */ + +#define CORGI_SSP_PORT 2 + +#define SPITZ_GPIO_LCDCON_CS 53 +#define SPITZ_GPIO_ADS7846_CS 14 +#define SPITZ_GPIO_MAX1111_CS 20 +#define SPITZ_GPIO_TP_INT 11 + +#define TYPE_CORGI_SSP "corgi-ssp" +OBJECT_DECLARE_SIMPLE_TYPE(CorgiSSPState, CORGI_SSP) + +/* "Demux" the signal based on current chipselect */ +struct CorgiSSPState { + SSIPeripheral ssidev; + SSIBus *bus[3]; + uint32_t enable[3]; +}; + +static uint32_t corgi_ssp_transfer(SSIPeripheral *dev, uint32_t value) +{ + CorgiSSPState *s = CORGI_SSP(dev); + int i; + + for (i = 0; i < 3; i++) { + if (s->enable[i]) { + return ssi_transfer(s->bus[i], value); + } + } + return 0; +} + +static void corgi_ssp_gpio_cs(void *opaque, int line, int level) +{ + CorgiSSPState *s = (CorgiSSPState *)opaque; + assert(line >= 0 && line < 3); + s->enable[line] = !level; +} + +#define MAX1111_BATT_VOLT 1 +#define MAX1111_BATT_TEMP 2 +#define MAX1111_ACIN_VOLT 3 + +#define SPITZ_BATTERY_TEMP 0xe0 /* About 2.9V */ +#define SPITZ_BATTERY_VOLT 0xd0 /* About 4.0V */ +#define SPITZ_CHARGEON_ACIN 0x80 /* About 5.0V */ + +static void corgi_ssp_realize(SSIPeripheral *d, Error **errp) +{ + DeviceState *dev = DEVICE(d); + CorgiSSPState *s = CORGI_SSP(d); + + qdev_init_gpio_in(dev, corgi_ssp_gpio_cs, 3); + s->bus[0] = ssi_create_bus(dev, "ssi0"); + s->bus[1] = ssi_create_bus(dev, "ssi1"); + s->bus[2] = ssi_create_bus(dev, "ssi2"); +} + +static void spitz_ssp_attach(SpitzMachineState *sms) +{ + void *bus; + + sms->mux = ssi_create_peripheral(sms->mpu->ssp[CORGI_SSP_PORT - 1], + TYPE_CORGI_SSP); + + bus = qdev_get_child_bus(sms->mux, "ssi0"); + sms->lcdtg = ssi_create_peripheral(bus, TYPE_SPITZ_LCDTG); + + bus = qdev_get_child_bus(sms->mux, "ssi1"); + sms->ads7846 = ssi_create_peripheral(bus, "ads7846"); + qdev_connect_gpio_out(sms->ads7846, 0, + qdev_get_gpio_in(sms->mpu->gpio, SPITZ_GPIO_TP_INT)); + + bus = qdev_get_child_bus(sms->mux, "ssi2"); + sms->max1111 = qdev_new(TYPE_MAX_1111); + qdev_prop_set_uint8(sms->max1111, "input1" /* BATT_VOLT */, + SPITZ_BATTERY_VOLT); + qdev_prop_set_uint8(sms->max1111, "input2" /* BATT_TEMP */, 0); + qdev_prop_set_uint8(sms->max1111, "input3" /* ACIN_VOLT */, + SPITZ_CHARGEON_ACIN); + ssi_realize_and_unref(sms->max1111, bus, &error_fatal); + + qdev_connect_gpio_out(sms->mpu->gpio, SPITZ_GPIO_LCDCON_CS, + qdev_get_gpio_in(sms->mux, 0)); + qdev_connect_gpio_out(sms->mpu->gpio, SPITZ_GPIO_ADS7846_CS, + qdev_get_gpio_in(sms->mux, 1)); + qdev_connect_gpio_out(sms->mpu->gpio, SPITZ_GPIO_MAX1111_CS, + qdev_get_gpio_in(sms->mux, 2)); +} + +/* CF Microdrive */ + +static void spitz_microdrive_attach(PXA2xxState *cpu, int slot) +{ + PCMCIACardState *md; + DriveInfo *dinfo; + + dinfo = drive_get(IF_IDE, 0, 0); + if (!dinfo || dinfo->media_cd) + return; + md = dscm1xxxx_init(dinfo); + pxa2xx_pcmcia_attach(cpu->pcmcia[slot], md); +} + +/* Wm8750 and Max7310 on I2C */ + +#define AKITA_MAX_ADDR 0x18 +#define SPITZ_WM_ADDRL 0x1b +#define SPITZ_WM_ADDRH 0x1a + +#define SPITZ_GPIO_WM 5 + +static void spitz_wm8750_addr(void *opaque, int line, int level) +{ + I2CSlave *wm = (I2CSlave *) opaque; + if (level) + i2c_slave_set_address(wm, SPITZ_WM_ADDRH); + else + i2c_slave_set_address(wm, SPITZ_WM_ADDRL); +} + +static void spitz_i2c_setup(PXA2xxState *cpu) +{ + /* Attach the CPU on one end of our I2C bus. */ + I2CBus *bus = pxa2xx_i2c_bus(cpu->i2c[0]); + + DeviceState *wm; + + /* Attach a WM8750 to the bus */ + wm = DEVICE(i2c_slave_create_simple(bus, TYPE_WM8750, 0)); + + spitz_wm8750_addr(wm, 0, 0); + qdev_connect_gpio_out(cpu->gpio, SPITZ_GPIO_WM, + qemu_allocate_irq(spitz_wm8750_addr, wm, 0)); + /* .. and to the sound interface. */ + cpu->i2s->opaque = wm; + cpu->i2s->codec_out = wm8750_dac_dat; + cpu->i2s->codec_in = wm8750_adc_dat; + wm8750_data_req_set(wm, cpu->i2s->data_req, cpu->i2s); +} + +static void spitz_akita_i2c_setup(PXA2xxState *cpu) +{ + /* Attach a Max7310 to Akita I2C bus. */ + i2c_slave_create_simple(pxa2xx_i2c_bus(cpu->i2c[0]), "max7310", + AKITA_MAX_ADDR); +} + +/* Other peripherals */ + +/* + * Encapsulation of some miscellaneous GPIO line behaviour for the Spitz boards. + * + * QEMU interface: + * + named GPIO inputs "green-led", "orange-led", "charging", "discharging": + * these currently just print messages that the line has been signalled + * + named GPIO input "adc-temp-on": set to cause the battery-temperature + * value to be passed to the max111x ADC + * + named GPIO output "adc-temp": the ADC value, to be wired up to the max111x + */ +#define TYPE_SPITZ_MISC_GPIO "spitz-misc-gpio" +OBJECT_DECLARE_SIMPLE_TYPE(SpitzMiscGPIOState, SPITZ_MISC_GPIO) + +struct SpitzMiscGPIOState { + SysBusDevice parent_obj; + + qemu_irq adc_value; +}; + +static void spitz_misc_charging(void *opaque, int n, int level) +{ + zaurus_printf("Charging %s.\n", level ? "off" : "on"); +} + +static void spitz_misc_discharging(void *opaque, int n, int level) +{ + zaurus_printf("Discharging %s.\n", level ? "off" : "on"); +} + +static void spitz_misc_green_led(void *opaque, int n, int level) +{ + zaurus_printf("Green LED %s.\n", level ? "off" : "on"); +} + +static void spitz_misc_orange_led(void *opaque, int n, int level) +{ + zaurus_printf("Orange LED %s.\n", level ? "off" : "on"); +} + +static void spitz_misc_adc_temp(void *opaque, int n, int level) +{ + SpitzMiscGPIOState *s = SPITZ_MISC_GPIO(opaque); + int batt_temp = level ? SPITZ_BATTERY_TEMP : 0; + + qemu_set_irq(s->adc_value, batt_temp); +} + +static void spitz_misc_gpio_init(Object *obj) +{ + SpitzMiscGPIOState *s = SPITZ_MISC_GPIO(obj); + DeviceState *dev = DEVICE(obj); + + qdev_init_gpio_in_named(dev, spitz_misc_charging, "charging", 1); + qdev_init_gpio_in_named(dev, spitz_misc_discharging, "discharging", 1); + qdev_init_gpio_in_named(dev, spitz_misc_green_led, "green-led", 1); + qdev_init_gpio_in_named(dev, spitz_misc_orange_led, "orange-led", 1); + qdev_init_gpio_in_named(dev, spitz_misc_adc_temp, "adc-temp-on", 1); + + qdev_init_gpio_out_named(dev, &s->adc_value, "adc-temp", 1); +} + +#define SPITZ_SCP_LED_GREEN 1 +#define SPITZ_SCP_JK_B 2 +#define SPITZ_SCP_CHRG_ON 3 +#define SPITZ_SCP_MUTE_L 4 +#define SPITZ_SCP_MUTE_R 5 +#define SPITZ_SCP_CF_POWER 6 +#define SPITZ_SCP_LED_ORANGE 7 +#define SPITZ_SCP_JK_A 8 +#define SPITZ_SCP_ADC_TEMP_ON 9 +#define SPITZ_SCP2_IR_ON 1 +#define SPITZ_SCP2_AKIN_PULLUP 2 +#define SPITZ_SCP2_BACKLIGHT_CONT 7 +#define SPITZ_SCP2_BACKLIGHT_ON 8 +#define SPITZ_SCP2_MIC_BIAS 9 + +static void spitz_scoop_gpio_setup(SpitzMachineState *sms) +{ + DeviceState *miscdev = sysbus_create_simple(TYPE_SPITZ_MISC_GPIO, -1, NULL); + + sms->misc_gpio = miscdev; + + qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_CHRG_ON, + qdev_get_gpio_in_named(miscdev, "charging", 0)); + qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_JK_B, + qdev_get_gpio_in_named(miscdev, "discharging", 0)); + qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_LED_GREEN, + qdev_get_gpio_in_named(miscdev, "green-led", 0)); + qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_LED_ORANGE, + qdev_get_gpio_in_named(miscdev, "orange-led", 0)); + qdev_connect_gpio_out(sms->scp0, SPITZ_SCP_ADC_TEMP_ON, + qdev_get_gpio_in_named(miscdev, "adc-temp-on", 0)); + qdev_connect_gpio_out_named(miscdev, "adc-temp", 0, + qdev_get_gpio_in(sms->max1111, MAX1111_BATT_TEMP)); + + if (sms->scp1) { + qdev_connect_gpio_out(sms->scp1, SPITZ_SCP2_BACKLIGHT_CONT, + qdev_get_gpio_in_named(sms->lcdtg, "bl_bit5", 0)); + qdev_connect_gpio_out(sms->scp1, SPITZ_SCP2_BACKLIGHT_ON, + qdev_get_gpio_in_named(sms->lcdtg, "bl_power", 0)); + } +} + +#define SPITZ_GPIO_HSYNC 22 +#define SPITZ_GPIO_SD_DETECT 9 +#define SPITZ_GPIO_SD_WP 81 +#define SPITZ_GPIO_ON_RESET 89 +#define SPITZ_GPIO_BAT_COVER 90 +#define SPITZ_GPIO_CF1_IRQ 105 +#define SPITZ_GPIO_CF1_CD 94 +#define SPITZ_GPIO_CF2_IRQ 106 +#define SPITZ_GPIO_CF2_CD 93 + +static int spitz_hsync; + +static void spitz_lcd_hsync_handler(void *opaque, int line, int level) +{ + PXA2xxState *cpu = (PXA2xxState *) opaque; + qemu_set_irq(qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_HSYNC), spitz_hsync); + spitz_hsync ^= 1; +} + +static void spitz_reset(void *opaque, int line, int level) +{ + if (level) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } +} + +static void spitz_gpio_setup(PXA2xxState *cpu, int slots) +{ + qemu_irq lcd_hsync; + qemu_irq reset; + + /* + * Bad hack: We toggle the LCD hsync GPIO on every GPIO status + * read to satisfy broken guests that poll-wait for hsync. + * Simulating a real hsync event would be less practical and + * wouldn't guarantee that a guest ever exits the loop. + */ + spitz_hsync = 0; + lcd_hsync = qemu_allocate_irq(spitz_lcd_hsync_handler, cpu, 0); + pxa2xx_gpio_read_notifier(cpu->gpio, lcd_hsync); + pxa2xx_lcd_vsync_notifier(cpu->lcd, lcd_hsync); + + /* MMC/SD host */ + pxa2xx_mmci_handlers(cpu->mmc, + qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_SD_WP), + qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_SD_DETECT)); + + /* Battery lock always closed */ + qemu_irq_raise(qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_BAT_COVER)); + + /* Handle reset */ + reset = qemu_allocate_irq(spitz_reset, cpu, 0); + qdev_connect_gpio_out(cpu->gpio, SPITZ_GPIO_ON_RESET, reset); + + /* PCMCIA signals: card's IRQ and Card-Detect */ + if (slots >= 1) + pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[0], + qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF1_IRQ), + qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF1_CD)); + if (slots >= 2) + pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[1], + qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF2_IRQ), + qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_CF2_CD)); +} + +/* Board init. */ +#define SPITZ_RAM 0x04000000 +#define SPITZ_ROM 0x00800000 + +static struct arm_boot_info spitz_binfo = { + .loader_start = PXA2XX_SDRAM_BASE, + .ram_size = 0x04000000, +}; + +static void spitz_common_init(MachineState *machine) +{ + SpitzMachineClass *smc = SPITZ_MACHINE_GET_CLASS(machine); + SpitzMachineState *sms = SPITZ_MACHINE(machine); + enum spitz_model_e model = smc->model; + PXA2xxState *mpu; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *rom = g_new(MemoryRegion, 1); + + /* Setup CPU & memory */ + mpu = pxa270_init(address_space_mem, spitz_binfo.ram_size, + machine->cpu_type); + sms->mpu = mpu; + + sl_flash_register(mpu, (model == spitz) ? FLASH_128M : FLASH_1024M); + + memory_region_init_rom(rom, NULL, "spitz.rom", SPITZ_ROM, &error_fatal); + memory_region_add_subregion(address_space_mem, 0, rom); + + /* Setup peripherals */ + spitz_keyboard_register(mpu); + + spitz_ssp_attach(sms); + + sms->scp0 = sysbus_create_simple("scoop", 0x10800000, NULL); + if (model != akita) { + sms->scp1 = sysbus_create_simple("scoop", 0x08800040, NULL); + } else { + sms->scp1 = NULL; + } + + spitz_scoop_gpio_setup(sms); + + spitz_gpio_setup(mpu, (model == akita) ? 1 : 2); + + spitz_i2c_setup(mpu); + + if (model == akita) + spitz_akita_i2c_setup(mpu); + + if (model == terrier) + /* A 6.0 GB microdrive is permanently sitting in CF slot 1. */ + spitz_microdrive_attach(mpu, 1); + else if (model != akita) + /* A 4.0 GB microdrive is permanently sitting in CF slot 0. */ + spitz_microdrive_attach(mpu, 0); + + spitz_binfo.board_id = smc->arm_id; + arm_load_kernel(mpu->cpu, machine, &spitz_binfo); + sl_bootparam_write(SL_PXA_PARAM_BASE); +} + +static void spitz_common_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->block_default_type = IF_IDE; + mc->ignore_memory_transaction_failures = true; + mc->init = spitz_common_init; +} + +static const TypeInfo spitz_common_info = { + .name = TYPE_SPITZ_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(SpitzMachineState), + .class_size = sizeof(SpitzMachineClass), + .class_init = spitz_common_class_init, +}; + +static void akitapda_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc); + + mc->desc = "Sharp SL-C1000 (Akita) PDA (PXA270)"; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0"); + smc->model = akita; + smc->arm_id = 0x2e8; +} + +static const TypeInfo akitapda_type = { + .name = MACHINE_TYPE_NAME("akita"), + .parent = TYPE_SPITZ_MACHINE, + .class_init = akitapda_class_init, +}; + +static void spitzpda_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc); + + mc->desc = "Sharp SL-C3000 (Spitz) PDA (PXA270)"; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0"); + smc->model = spitz; + smc->arm_id = 0x2c9; +} + +static const TypeInfo spitzpda_type = { + .name = MACHINE_TYPE_NAME("spitz"), + .parent = TYPE_SPITZ_MACHINE, + .class_init = spitzpda_class_init, +}; + +static void borzoipda_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc); + + mc->desc = "Sharp SL-C3100 (Borzoi) PDA (PXA270)"; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c0"); + smc->model = borzoi; + smc->arm_id = 0x33f; +} + +static const TypeInfo borzoipda_type = { + .name = MACHINE_TYPE_NAME("borzoi"), + .parent = TYPE_SPITZ_MACHINE, + .class_init = borzoipda_class_init, +}; + +static void terrierpda_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + SpitzMachineClass *smc = SPITZ_MACHINE_CLASS(oc); + + mc->desc = "Sharp SL-C3200 (Terrier) PDA (PXA270)"; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5"); + smc->model = terrier; + smc->arm_id = 0x33f; +} + +static const TypeInfo terrierpda_type = { + .name = MACHINE_TYPE_NAME("terrier"), + .parent = TYPE_SPITZ_MACHINE, + .class_init = terrierpda_class_init, +}; + +static void spitz_machine_init(void) +{ + type_register_static(&spitz_common_info); + type_register_static(&akitapda_type); + type_register_static(&spitzpda_type); + type_register_static(&borzoipda_type); + type_register_static(&terrierpda_type); +} + +type_init(spitz_machine_init) + +static bool is_version_0(void *opaque, int version_id) +{ + return version_id == 0; +} + +static const VMStateDescription vmstate_sl_nand_info = { + .name = "sl-nand", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(ctl, SLNANDState), + VMSTATE_STRUCT(ecc, SLNANDState, 0, vmstate_ecc_state, ECCState), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property sl_nand_properties[] = { + DEFINE_PROP_UINT8("manf_id", SLNANDState, manf_id, NAND_MFR_SAMSUNG), + DEFINE_PROP_UINT8("chip_id", SLNANDState, chip_id, 0xf1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sl_nand_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_sl_nand_info; + device_class_set_props(dc, sl_nand_properties); + dc->realize = sl_nand_realize; + /* Reason: init() method uses drive_get() */ + dc->user_creatable = false; +} + +static const TypeInfo sl_nand_info = { + .name = TYPE_SL_NAND, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SLNANDState), + .instance_init = sl_nand_init, + .class_init = sl_nand_class_init, +}; + +static const VMStateDescription vmstate_spitz_kbd = { + .name = "spitz-keyboard", + .version_id = 1, + .minimum_version_id = 0, + .post_load = spitz_keyboard_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT16(sense_state, SpitzKeyboardState), + VMSTATE_UINT16(strobe_state, SpitzKeyboardState), + VMSTATE_UNUSED_TEST(is_version_0, 5), + VMSTATE_END_OF_LIST(), + }, +}; + +static void spitz_keyboard_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_spitz_kbd; + dc->realize = spitz_keyboard_realize; +} + +static const TypeInfo spitz_keyboard_info = { + .name = TYPE_SPITZ_KEYBOARD, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SpitzKeyboardState), + .instance_init = spitz_keyboard_init, + .class_init = spitz_keyboard_class_init, +}; + +static const VMStateDescription vmstate_corgi_ssp_regs = { + .name = "corgi-ssp", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_SSI_PERIPHERAL(ssidev, CorgiSSPState), + VMSTATE_UINT32_ARRAY(enable, CorgiSSPState, 3), + VMSTATE_END_OF_LIST(), + } +}; + +static void corgi_ssp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + + k->realize = corgi_ssp_realize; + k->transfer = corgi_ssp_transfer; + dc->vmsd = &vmstate_corgi_ssp_regs; +} + +static const TypeInfo corgi_ssp_info = { + .name = TYPE_CORGI_SSP, + .parent = TYPE_SSI_PERIPHERAL, + .instance_size = sizeof(CorgiSSPState), + .class_init = corgi_ssp_class_init, +}; + +static const VMStateDescription vmstate_spitz_lcdtg_regs = { + .name = "spitz-lcdtg", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_SSI_PERIPHERAL(ssidev, SpitzLCDTG), + VMSTATE_UINT32(bl_intensity, SpitzLCDTG), + VMSTATE_UINT32(bl_power, SpitzLCDTG), + VMSTATE_END_OF_LIST(), + } +}; + +static void spitz_lcdtg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + + k->realize = spitz_lcdtg_realize; + k->transfer = spitz_lcdtg_transfer; + dc->vmsd = &vmstate_spitz_lcdtg_regs; +} + +static const TypeInfo spitz_lcdtg_info = { + .name = TYPE_SPITZ_LCDTG, + .parent = TYPE_SSI_PERIPHERAL, + .instance_size = sizeof(SpitzLCDTG), + .class_init = spitz_lcdtg_class_init, +}; + +static const TypeInfo spitz_misc_gpio_info = { + .name = TYPE_SPITZ_MISC_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SpitzMiscGPIOState), + .instance_init = spitz_misc_gpio_init, + /* + * No class_init required: device has no internal state so does not + * need to set up reset or vmstate, and does not have a realize method. + */ +}; + +static void spitz_register_types(void) +{ + type_register_static(&corgi_ssp_info); + type_register_static(&spitz_lcdtg_info); + type_register_static(&spitz_keyboard_info); + type_register_static(&sl_nand_info); + type_register_static(&spitz_misc_gpio_info); +} + +type_init(spitz_register_types) diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c new file mode 100644 index 000000000..78827ace6 --- /dev/null +++ b/hw/arm/stellaris.c @@ -0,0 +1,1392 @@ +/* + * Luminary Micro Stellaris peripherals + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/ssi/ssi.h" +#include "hw/arm/boot.h" +#include "qemu/timer.h" +#include "hw/i2c/i2c.h" +#include "net/net.h" +#include "hw/boards.h" +#include "qemu/log.h" +#include "exec/address-spaces.h" +#include "sysemu/sysemu.h" +#include "hw/arm/armv7m.h" +#include "hw/char/pl011.h" +#include "hw/input/gamepad.h" +#include "hw/irq.h" +#include "hw/watchdog/cmsdk-apb-watchdog.h" +#include "migration/vmstate.h" +#include "hw/misc/unimp.h" +#include "hw/timer/stellaris-gptm.h" +#include "hw/qdev-clock.h" +#include "qom/object.h" + +#define GPIO_A 0 +#define GPIO_B 1 +#define GPIO_C 2 +#define GPIO_D 3 +#define GPIO_E 4 +#define GPIO_F 5 +#define GPIO_G 6 + +#define BP_OLED_I2C 0x01 +#define BP_OLED_SSI 0x02 +#define BP_GAMEPAD 0x04 + +#define NUM_IRQ_LINES 64 + +typedef const struct { + const char *name; + uint32_t did0; + uint32_t did1; + uint32_t dc0; + uint32_t dc1; + uint32_t dc2; + uint32_t dc3; + uint32_t dc4; + uint32_t peripherals; +} stellaris_board_info; + +/* System controller. */ + +#define TYPE_STELLARIS_SYS "stellaris-sys" +OBJECT_DECLARE_SIMPLE_TYPE(ssys_state, STELLARIS_SYS) + +struct ssys_state { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t pborctl; + uint32_t ldopctl; + uint32_t int_status; + uint32_t int_mask; + uint32_t resc; + uint32_t rcc; + uint32_t rcc2; + uint32_t rcgc[3]; + uint32_t scgc[3]; + uint32_t dcgc[3]; + uint32_t clkvclr; + uint32_t ldoarst; + qemu_irq irq; + Clock *sysclk; + /* Properties (all read-only registers) */ + uint32_t user0; + uint32_t user1; + uint32_t did0; + uint32_t did1; + uint32_t dc0; + uint32_t dc1; + uint32_t dc2; + uint32_t dc3; + uint32_t dc4; +}; + +static void ssys_update(ssys_state *s) +{ + qemu_set_irq(s->irq, (s->int_status & s->int_mask) != 0); +} + +static uint32_t pllcfg_sandstorm[16] = { + 0x31c0, /* 1 Mhz */ + 0x1ae0, /* 1.8432 Mhz */ + 0x18c0, /* 2 Mhz */ + 0xd573, /* 2.4576 Mhz */ + 0x37a6, /* 3.57954 Mhz */ + 0x1ae2, /* 3.6864 Mhz */ + 0x0c40, /* 4 Mhz */ + 0x98bc, /* 4.906 Mhz */ + 0x935b, /* 4.9152 Mhz */ + 0x09c0, /* 5 Mhz */ + 0x4dee, /* 5.12 Mhz */ + 0x0c41, /* 6 Mhz */ + 0x75db, /* 6.144 Mhz */ + 0x1ae6, /* 7.3728 Mhz */ + 0x0600, /* 8 Mhz */ + 0x585b /* 8.192 Mhz */ +}; + +static uint32_t pllcfg_fury[16] = { + 0x3200, /* 1 Mhz */ + 0x1b20, /* 1.8432 Mhz */ + 0x1900, /* 2 Mhz */ + 0xf42b, /* 2.4576 Mhz */ + 0x37e3, /* 3.57954 Mhz */ + 0x1b21, /* 3.6864 Mhz */ + 0x0c80, /* 4 Mhz */ + 0x98ee, /* 4.906 Mhz */ + 0xd5b4, /* 4.9152 Mhz */ + 0x0a00, /* 5 Mhz */ + 0x4e27, /* 5.12 Mhz */ + 0x1902, /* 6 Mhz */ + 0xec1c, /* 6.144 Mhz */ + 0x1b23, /* 7.3728 Mhz */ + 0x0640, /* 8 Mhz */ + 0xb11c /* 8.192 Mhz */ +}; + +#define DID0_VER_MASK 0x70000000 +#define DID0_VER_0 0x00000000 +#define DID0_VER_1 0x10000000 + +#define DID0_CLASS_MASK 0x00FF0000 +#define DID0_CLASS_SANDSTORM 0x00000000 +#define DID0_CLASS_FURY 0x00010000 + +static int ssys_board_class(const ssys_state *s) +{ + uint32_t did0 = s->did0; + switch (did0 & DID0_VER_MASK) { + case DID0_VER_0: + return DID0_CLASS_SANDSTORM; + case DID0_VER_1: + switch (did0 & DID0_CLASS_MASK) { + case DID0_CLASS_SANDSTORM: + case DID0_CLASS_FURY: + return did0 & DID0_CLASS_MASK; + } + /* for unknown classes, fall through */ + default: + /* This can only happen if the hardwired constant did0 value + * in this board's stellaris_board_info struct is wrong. + */ + g_assert_not_reached(); + } +} + +static uint64_t ssys_read(void *opaque, hwaddr offset, + unsigned size) +{ + ssys_state *s = (ssys_state *)opaque; + + switch (offset) { + case 0x000: /* DID0 */ + return s->did0; + case 0x004: /* DID1 */ + return s->did1; + case 0x008: /* DC0 */ + return s->dc0; + case 0x010: /* DC1 */ + return s->dc1; + case 0x014: /* DC2 */ + return s->dc2; + case 0x018: /* DC3 */ + return s->dc3; + case 0x01c: /* DC4 */ + return s->dc4; + case 0x030: /* PBORCTL */ + return s->pborctl; + case 0x034: /* LDOPCTL */ + return s->ldopctl; + case 0x040: /* SRCR0 */ + return 0; + case 0x044: /* SRCR1 */ + return 0; + case 0x048: /* SRCR2 */ + return 0; + case 0x050: /* RIS */ + return s->int_status; + case 0x054: /* IMC */ + return s->int_mask; + case 0x058: /* MISC */ + return s->int_status & s->int_mask; + case 0x05c: /* RESC */ + return s->resc; + case 0x060: /* RCC */ + return s->rcc; + case 0x064: /* PLLCFG */ + { + int xtal; + xtal = (s->rcc >> 6) & 0xf; + switch (ssys_board_class(s)) { + case DID0_CLASS_FURY: + return pllcfg_fury[xtal]; + case DID0_CLASS_SANDSTORM: + return pllcfg_sandstorm[xtal]; + default: + g_assert_not_reached(); + } + } + case 0x070: /* RCC2 */ + return s->rcc2; + case 0x100: /* RCGC0 */ + return s->rcgc[0]; + case 0x104: /* RCGC1 */ + return s->rcgc[1]; + case 0x108: /* RCGC2 */ + return s->rcgc[2]; + case 0x110: /* SCGC0 */ + return s->scgc[0]; + case 0x114: /* SCGC1 */ + return s->scgc[1]; + case 0x118: /* SCGC2 */ + return s->scgc[2]; + case 0x120: /* DCGC0 */ + return s->dcgc[0]; + case 0x124: /* DCGC1 */ + return s->dcgc[1]; + case 0x128: /* DCGC2 */ + return s->dcgc[2]; + case 0x150: /* CLKVCLR */ + return s->clkvclr; + case 0x160: /* LDOARST */ + return s->ldoarst; + case 0x1e0: /* USER0 */ + return s->user0; + case 0x1e4: /* USER1 */ + return s->user1; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSYS: read at bad offset 0x%x\n", (int)offset); + return 0; + } +} + +static bool ssys_use_rcc2(ssys_state *s) +{ + return (s->rcc2 >> 31) & 0x1; +} + +/* + * Calculate the system clock period. We only want to propagate + * this change to the rest of the system if we're not being called + * from migration post-load. + */ +static void ssys_calculate_system_clock(ssys_state *s, bool propagate_clock) +{ + int period_ns; + /* + * SYSDIV field specifies divisor: 0 == /1, 1 == /2, etc. Input + * clock is 200MHz, which is a period of 5 ns. Dividing the clock + * frequency by X is the same as multiplying the period by X. + */ + if (ssys_use_rcc2(s)) { + period_ns = 5 * (((s->rcc2 >> 23) & 0x3f) + 1); + } else { + period_ns = 5 * (((s->rcc >> 23) & 0xf) + 1); + } + clock_set_ns(s->sysclk, period_ns); + if (propagate_clock) { + clock_propagate(s->sysclk); + } +} + +static void ssys_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + ssys_state *s = (ssys_state *)opaque; + + switch (offset) { + case 0x030: /* PBORCTL */ + s->pborctl = value & 0xffff; + break; + case 0x034: /* LDOPCTL */ + s->ldopctl = value & 0x1f; + break; + case 0x040: /* SRCR0 */ + case 0x044: /* SRCR1 */ + case 0x048: /* SRCR2 */ + qemu_log_mask(LOG_UNIMP, "Peripheral reset not implemented\n"); + break; + case 0x054: /* IMC */ + s->int_mask = value & 0x7f; + break; + case 0x058: /* MISC */ + s->int_status &= ~value; + break; + case 0x05c: /* RESC */ + s->resc = value & 0x3f; + break; + case 0x060: /* RCC */ + if ((s->rcc & (1 << 13)) != 0 && (value & (1 << 13)) == 0) { + /* PLL enable. */ + s->int_status |= (1 << 6); + } + s->rcc = value; + ssys_calculate_system_clock(s, true); + break; + case 0x070: /* RCC2 */ + if (ssys_board_class(s) == DID0_CLASS_SANDSTORM) { + break; + } + + if ((s->rcc2 & (1 << 13)) != 0 && (value & (1 << 13)) == 0) { + /* PLL enable. */ + s->int_status |= (1 << 6); + } + s->rcc2 = value; + ssys_calculate_system_clock(s, true); + break; + case 0x100: /* RCGC0 */ + s->rcgc[0] = value; + break; + case 0x104: /* RCGC1 */ + s->rcgc[1] = value; + break; + case 0x108: /* RCGC2 */ + s->rcgc[2] = value; + break; + case 0x110: /* SCGC0 */ + s->scgc[0] = value; + break; + case 0x114: /* SCGC1 */ + s->scgc[1] = value; + break; + case 0x118: /* SCGC2 */ + s->scgc[2] = value; + break; + case 0x120: /* DCGC0 */ + s->dcgc[0] = value; + break; + case 0x124: /* DCGC1 */ + s->dcgc[1] = value; + break; + case 0x128: /* DCGC2 */ + s->dcgc[2] = value; + break; + case 0x150: /* CLKVCLR */ + s->clkvclr = value; + break; + case 0x160: /* LDOARST */ + s->ldoarst = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSYS: write at bad offset 0x%x\n", (int)offset); + } + ssys_update(s); +} + +static const MemoryRegionOps ssys_ops = { + .read = ssys_read, + .write = ssys_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void stellaris_sys_reset_enter(Object *obj, ResetType type) +{ + ssys_state *s = STELLARIS_SYS(obj); + + s->pborctl = 0x7ffd; + s->rcc = 0x078e3ac0; + + if (ssys_board_class(s) == DID0_CLASS_SANDSTORM) { + s->rcc2 = 0; + } else { + s->rcc2 = 0x07802810; + } + s->rcgc[0] = 1; + s->scgc[0] = 1; + s->dcgc[0] = 1; +} + +static void stellaris_sys_reset_hold(Object *obj) +{ + ssys_state *s = STELLARIS_SYS(obj); + + /* OK to propagate clocks from the hold phase */ + ssys_calculate_system_clock(s, true); +} + +static void stellaris_sys_reset_exit(Object *obj) +{ +} + +static int stellaris_sys_post_load(void *opaque, int version_id) +{ + ssys_state *s = opaque; + + ssys_calculate_system_clock(s, false); + + return 0; +} + +static const VMStateDescription vmstate_stellaris_sys = { + .name = "stellaris_sys", + .version_id = 2, + .minimum_version_id = 1, + .post_load = stellaris_sys_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(pborctl, ssys_state), + VMSTATE_UINT32(ldopctl, ssys_state), + VMSTATE_UINT32(int_mask, ssys_state), + VMSTATE_UINT32(int_status, ssys_state), + VMSTATE_UINT32(resc, ssys_state), + VMSTATE_UINT32(rcc, ssys_state), + VMSTATE_UINT32_V(rcc2, ssys_state, 2), + VMSTATE_UINT32_ARRAY(rcgc, ssys_state, 3), + VMSTATE_UINT32_ARRAY(scgc, ssys_state, 3), + VMSTATE_UINT32_ARRAY(dcgc, ssys_state, 3), + VMSTATE_UINT32(clkvclr, ssys_state), + VMSTATE_UINT32(ldoarst, ssys_state), + /* No field for sysclk -- handled in post-load instead */ + VMSTATE_END_OF_LIST() + } +}; + +static Property stellaris_sys_properties[] = { + DEFINE_PROP_UINT32("user0", ssys_state, user0, 0), + DEFINE_PROP_UINT32("user1", ssys_state, user1, 0), + DEFINE_PROP_UINT32("did0", ssys_state, did0, 0), + DEFINE_PROP_UINT32("did1", ssys_state, did1, 0), + DEFINE_PROP_UINT32("dc0", ssys_state, dc0, 0), + DEFINE_PROP_UINT32("dc1", ssys_state, dc1, 0), + DEFINE_PROP_UINT32("dc2", ssys_state, dc2, 0), + DEFINE_PROP_UINT32("dc3", ssys_state, dc3, 0), + DEFINE_PROP_UINT32("dc4", ssys_state, dc4, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void stellaris_sys_instance_init(Object *obj) +{ + ssys_state *s = STELLARIS_SYS(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(s); + + memory_region_init_io(&s->iomem, obj, &ssys_ops, s, "ssys", 0x00001000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + s->sysclk = qdev_init_clock_out(DEVICE(s), "SYSCLK"); +} + +/* I2C controller. */ + +#define TYPE_STELLARIS_I2C "stellaris-i2c" +OBJECT_DECLARE_SIMPLE_TYPE(stellaris_i2c_state, STELLARIS_I2C) + +struct stellaris_i2c_state { + SysBusDevice parent_obj; + + I2CBus *bus; + qemu_irq irq; + MemoryRegion iomem; + uint32_t msa; + uint32_t mcs; + uint32_t mdr; + uint32_t mtpr; + uint32_t mimr; + uint32_t mris; + uint32_t mcr; +}; + +#define STELLARIS_I2C_MCS_BUSY 0x01 +#define STELLARIS_I2C_MCS_ERROR 0x02 +#define STELLARIS_I2C_MCS_ADRACK 0x04 +#define STELLARIS_I2C_MCS_DATACK 0x08 +#define STELLARIS_I2C_MCS_ARBLST 0x10 +#define STELLARIS_I2C_MCS_IDLE 0x20 +#define STELLARIS_I2C_MCS_BUSBSY 0x40 + +static uint64_t stellaris_i2c_read(void *opaque, hwaddr offset, + unsigned size) +{ + stellaris_i2c_state *s = (stellaris_i2c_state *)opaque; + + switch (offset) { + case 0x00: /* MSA */ + return s->msa; + case 0x04: /* MCS */ + /* We don't emulate timing, so the controller is never busy. */ + return s->mcs | STELLARIS_I2C_MCS_IDLE; + case 0x08: /* MDR */ + return s->mdr; + case 0x0c: /* MTPR */ + return s->mtpr; + case 0x10: /* MIMR */ + return s->mimr; + case 0x14: /* MRIS */ + return s->mris; + case 0x18: /* MMIS */ + return s->mris & s->mimr; + case 0x20: /* MCR */ + return s->mcr; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "stellaris_i2c: read at bad offset 0x%x\n", (int)offset); + return 0; + } +} + +static void stellaris_i2c_update(stellaris_i2c_state *s) +{ + int level; + + level = (s->mris & s->mimr) != 0; + qemu_set_irq(s->irq, level); +} + +static void stellaris_i2c_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + stellaris_i2c_state *s = (stellaris_i2c_state *)opaque; + + switch (offset) { + case 0x00: /* MSA */ + s->msa = value & 0xff; + break; + case 0x04: /* MCS */ + if ((s->mcr & 0x10) == 0) { + /* Disabled. Do nothing. */ + break; + } + /* Grab the bus if this is starting a transfer. */ + if ((value & 2) && (s->mcs & STELLARIS_I2C_MCS_BUSBSY) == 0) { + if (i2c_start_transfer(s->bus, s->msa >> 1, s->msa & 1)) { + s->mcs |= STELLARIS_I2C_MCS_ARBLST; + } else { + s->mcs &= ~STELLARIS_I2C_MCS_ARBLST; + s->mcs |= STELLARIS_I2C_MCS_BUSBSY; + } + } + /* If we don't have the bus then indicate an error. */ + if (!i2c_bus_busy(s->bus) + || (s->mcs & STELLARIS_I2C_MCS_BUSBSY) == 0) { + s->mcs |= STELLARIS_I2C_MCS_ERROR; + break; + } + s->mcs &= ~STELLARIS_I2C_MCS_ERROR; + if (value & 1) { + /* Transfer a byte. */ + /* TODO: Handle errors. */ + if (s->msa & 1) { + /* Recv */ + s->mdr = i2c_recv(s->bus); + } else { + /* Send */ + i2c_send(s->bus, s->mdr); + } + /* Raise an interrupt. */ + s->mris |= 1; + } + if (value & 4) { + /* Finish transfer. */ + i2c_end_transfer(s->bus); + s->mcs &= ~STELLARIS_I2C_MCS_BUSBSY; + } + break; + case 0x08: /* MDR */ + s->mdr = value & 0xff; + break; + case 0x0c: /* MTPR */ + s->mtpr = value & 0xff; + break; + case 0x10: /* MIMR */ + s->mimr = 1; + break; + case 0x1c: /* MICR */ + s->mris &= ~value; + break; + case 0x20: /* MCR */ + if (value & 1) { + qemu_log_mask(LOG_UNIMP, + "stellaris_i2c: Loopback not implemented\n"); + } + if (value & 0x20) { + qemu_log_mask(LOG_UNIMP, + "stellaris_i2c: Slave mode not implemented\n"); + } + s->mcr = value & 0x31; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "stellaris_i2c: write at bad offset 0x%x\n", (int)offset); + } + stellaris_i2c_update(s); +} + +static void stellaris_i2c_reset(stellaris_i2c_state *s) +{ + if (s->mcs & STELLARIS_I2C_MCS_BUSBSY) + i2c_end_transfer(s->bus); + + s->msa = 0; + s->mcs = 0; + s->mdr = 0; + s->mtpr = 1; + s->mimr = 0; + s->mris = 0; + s->mcr = 0; + stellaris_i2c_update(s); +} + +static const MemoryRegionOps stellaris_i2c_ops = { + .read = stellaris_i2c_read, + .write = stellaris_i2c_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_stellaris_i2c = { + .name = "stellaris_i2c", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(msa, stellaris_i2c_state), + VMSTATE_UINT32(mcs, stellaris_i2c_state), + VMSTATE_UINT32(mdr, stellaris_i2c_state), + VMSTATE_UINT32(mtpr, stellaris_i2c_state), + VMSTATE_UINT32(mimr, stellaris_i2c_state), + VMSTATE_UINT32(mris, stellaris_i2c_state), + VMSTATE_UINT32(mcr, stellaris_i2c_state), + VMSTATE_END_OF_LIST() + } +}; + +static void stellaris_i2c_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + stellaris_i2c_state *s = STELLARIS_I2C(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + I2CBus *bus; + + sysbus_init_irq(sbd, &s->irq); + bus = i2c_init_bus(dev, "i2c"); + s->bus = bus; + + memory_region_init_io(&s->iomem, obj, &stellaris_i2c_ops, s, + "i2c", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + /* ??? For now we only implement the master interface. */ + stellaris_i2c_reset(s); +} + +/* Analogue to Digital Converter. This is only partially implemented, + enough for applications that use a combined ADC and timer tick. */ + +#define STELLARIS_ADC_EM_CONTROLLER 0 +#define STELLARIS_ADC_EM_COMP 1 +#define STELLARIS_ADC_EM_EXTERNAL 4 +#define STELLARIS_ADC_EM_TIMER 5 +#define STELLARIS_ADC_EM_PWM0 6 +#define STELLARIS_ADC_EM_PWM1 7 +#define STELLARIS_ADC_EM_PWM2 8 + +#define STELLARIS_ADC_FIFO_EMPTY 0x0100 +#define STELLARIS_ADC_FIFO_FULL 0x1000 + +#define TYPE_STELLARIS_ADC "stellaris-adc" +typedef struct StellarisADCState stellaris_adc_state; +DECLARE_INSTANCE_CHECKER(stellaris_adc_state, STELLARIS_ADC, + TYPE_STELLARIS_ADC) + +struct StellarisADCState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t actss; + uint32_t ris; + uint32_t im; + uint32_t emux; + uint32_t ostat; + uint32_t ustat; + uint32_t sspri; + uint32_t sac; + struct { + uint32_t state; + uint32_t data[16]; + } fifo[4]; + uint32_t ssmux[4]; + uint32_t ssctl[4]; + uint32_t noise; + qemu_irq irq[4]; +}; + +static uint32_t stellaris_adc_fifo_read(stellaris_adc_state *s, int n) +{ + int tail; + + tail = s->fifo[n].state & 0xf; + if (s->fifo[n].state & STELLARIS_ADC_FIFO_EMPTY) { + s->ustat |= 1 << n; + } else { + s->fifo[n].state = (s->fifo[n].state & ~0xf) | ((tail + 1) & 0xf); + s->fifo[n].state &= ~STELLARIS_ADC_FIFO_FULL; + if (tail + 1 == ((s->fifo[n].state >> 4) & 0xf)) + s->fifo[n].state |= STELLARIS_ADC_FIFO_EMPTY; + } + return s->fifo[n].data[tail]; +} + +static void stellaris_adc_fifo_write(stellaris_adc_state *s, int n, + uint32_t value) +{ + int head; + + /* TODO: Real hardware has limited size FIFOs. We have a full 16 entry + FIFO fir each sequencer. */ + head = (s->fifo[n].state >> 4) & 0xf; + if (s->fifo[n].state & STELLARIS_ADC_FIFO_FULL) { + s->ostat |= 1 << n; + return; + } + s->fifo[n].data[head] = value; + head = (head + 1) & 0xf; + s->fifo[n].state &= ~STELLARIS_ADC_FIFO_EMPTY; + s->fifo[n].state = (s->fifo[n].state & ~0xf0) | (head << 4); + if ((s->fifo[n].state & 0xf) == head) + s->fifo[n].state |= STELLARIS_ADC_FIFO_FULL; +} + +static void stellaris_adc_update(stellaris_adc_state *s) +{ + int level; + int n; + + for (n = 0; n < 4; n++) { + level = (s->ris & s->im & (1 << n)) != 0; + qemu_set_irq(s->irq[n], level); + } +} + +static void stellaris_adc_trigger(void *opaque, int irq, int level) +{ + stellaris_adc_state *s = (stellaris_adc_state *)opaque; + int n; + + for (n = 0; n < 4; n++) { + if ((s->actss & (1 << n)) == 0) { + continue; + } + + if (((s->emux >> (n * 4)) & 0xff) != 5) { + continue; + } + + /* Some applications use the ADC as a random number source, so introduce + some variation into the signal. */ + s->noise = s->noise * 314159 + 1; + /* ??? actual inputs not implemented. Return an arbitrary value. */ + stellaris_adc_fifo_write(s, n, 0x200 + ((s->noise >> 16) & 7)); + s->ris |= (1 << n); + stellaris_adc_update(s); + } +} + +static void stellaris_adc_reset(stellaris_adc_state *s) +{ + int n; + + for (n = 0; n < 4; n++) { + s->ssmux[n] = 0; + s->ssctl[n] = 0; + s->fifo[n].state = STELLARIS_ADC_FIFO_EMPTY; + } +} + +static uint64_t stellaris_adc_read(void *opaque, hwaddr offset, + unsigned size) +{ + stellaris_adc_state *s = (stellaris_adc_state *)opaque; + + /* TODO: Implement this. */ + if (offset >= 0x40 && offset < 0xc0) { + int n; + n = (offset - 0x40) >> 5; + switch (offset & 0x1f) { + case 0x00: /* SSMUX */ + return s->ssmux[n]; + case 0x04: /* SSCTL */ + return s->ssctl[n]; + case 0x08: /* SSFIFO */ + return stellaris_adc_fifo_read(s, n); + case 0x0c: /* SSFSTAT */ + return s->fifo[n].state; + default: + break; + } + } + switch (offset) { + case 0x00: /* ACTSS */ + return s->actss; + case 0x04: /* RIS */ + return s->ris; + case 0x08: /* IM */ + return s->im; + case 0x0c: /* ISC */ + return s->ris & s->im; + case 0x10: /* OSTAT */ + return s->ostat; + case 0x14: /* EMUX */ + return s->emux; + case 0x18: /* USTAT */ + return s->ustat; + case 0x20: /* SSPRI */ + return s->sspri; + case 0x30: /* SAC */ + return s->sac; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "stellaris_adc: read at bad offset 0x%x\n", (int)offset); + return 0; + } +} + +static void stellaris_adc_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + stellaris_adc_state *s = (stellaris_adc_state *)opaque; + + /* TODO: Implement this. */ + if (offset >= 0x40 && offset < 0xc0) { + int n; + n = (offset - 0x40) >> 5; + switch (offset & 0x1f) { + case 0x00: /* SSMUX */ + s->ssmux[n] = value & 0x33333333; + return; + case 0x04: /* SSCTL */ + if (value != 6) { + qemu_log_mask(LOG_UNIMP, + "ADC: Unimplemented sequence %" PRIx64 "\n", + value); + } + s->ssctl[n] = value; + return; + default: + break; + } + } + switch (offset) { + case 0x00: /* ACTSS */ + s->actss = value & 0xf; + break; + case 0x08: /* IM */ + s->im = value; + break; + case 0x0c: /* ISC */ + s->ris &= ~value; + break; + case 0x10: /* OSTAT */ + s->ostat &= ~value; + break; + case 0x14: /* EMUX */ + s->emux = value; + break; + case 0x18: /* USTAT */ + s->ustat &= ~value; + break; + case 0x20: /* SSPRI */ + s->sspri = value; + break; + case 0x28: /* PSSI */ + qemu_log_mask(LOG_UNIMP, "ADC: sample initiate unimplemented\n"); + break; + case 0x30: /* SAC */ + s->sac = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "stellaris_adc: write at bad offset 0x%x\n", (int)offset); + } + stellaris_adc_update(s); +} + +static const MemoryRegionOps stellaris_adc_ops = { + .read = stellaris_adc_read, + .write = stellaris_adc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_stellaris_adc = { + .name = "stellaris_adc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(actss, stellaris_adc_state), + VMSTATE_UINT32(ris, stellaris_adc_state), + VMSTATE_UINT32(im, stellaris_adc_state), + VMSTATE_UINT32(emux, stellaris_adc_state), + VMSTATE_UINT32(ostat, stellaris_adc_state), + VMSTATE_UINT32(ustat, stellaris_adc_state), + VMSTATE_UINT32(sspri, stellaris_adc_state), + VMSTATE_UINT32(sac, stellaris_adc_state), + VMSTATE_UINT32(fifo[0].state, stellaris_adc_state), + VMSTATE_UINT32_ARRAY(fifo[0].data, stellaris_adc_state, 16), + VMSTATE_UINT32(ssmux[0], stellaris_adc_state), + VMSTATE_UINT32(ssctl[0], stellaris_adc_state), + VMSTATE_UINT32(fifo[1].state, stellaris_adc_state), + VMSTATE_UINT32_ARRAY(fifo[1].data, stellaris_adc_state, 16), + VMSTATE_UINT32(ssmux[1], stellaris_adc_state), + VMSTATE_UINT32(ssctl[1], stellaris_adc_state), + VMSTATE_UINT32(fifo[2].state, stellaris_adc_state), + VMSTATE_UINT32_ARRAY(fifo[2].data, stellaris_adc_state, 16), + VMSTATE_UINT32(ssmux[2], stellaris_adc_state), + VMSTATE_UINT32(ssctl[2], stellaris_adc_state), + VMSTATE_UINT32(fifo[3].state, stellaris_adc_state), + VMSTATE_UINT32_ARRAY(fifo[3].data, stellaris_adc_state, 16), + VMSTATE_UINT32(ssmux[3], stellaris_adc_state), + VMSTATE_UINT32(ssctl[3], stellaris_adc_state), + VMSTATE_UINT32(noise, stellaris_adc_state), + VMSTATE_END_OF_LIST() + } +}; + +static void stellaris_adc_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + stellaris_adc_state *s = STELLARIS_ADC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + int n; + + for (n = 0; n < 4; n++) { + sysbus_init_irq(sbd, &s->irq[n]); + } + + memory_region_init_io(&s->iomem, obj, &stellaris_adc_ops, s, + "adc", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + stellaris_adc_reset(s); + qdev_init_gpio_in(dev, stellaris_adc_trigger, 1); +} + +/* Board init. */ +static stellaris_board_info stellaris_boards[] = { + { "LM3S811EVB", + 0, + 0x0032000e, + 0x001f001f, /* dc0 */ + 0x001132bf, + 0x01071013, + 0x3f0f01ff, + 0x0000001f, + BP_OLED_I2C + }, + { "LM3S6965EVB", + 0x10010002, + 0x1073402e, + 0x00ff007f, /* dc0 */ + 0x001133ff, + 0x030f5317, + 0x0f0f87ff, + 0x5000007f, + BP_OLED_SSI | BP_GAMEPAD + } +}; + +static void stellaris_init(MachineState *ms, stellaris_board_info *board) +{ + static const int uart_irq[] = {5, 6, 33, 34}; + static const int timer_irq[] = {19, 21, 23, 35}; + static const uint32_t gpio_addr[7] = + { 0x40004000, 0x40005000, 0x40006000, 0x40007000, + 0x40024000, 0x40025000, 0x40026000}; + static const int gpio_irq[7] = {0, 1, 2, 3, 4, 30, 31}; + + /* Memory map of SoC devices, from + * Stellaris LM3S6965 Microcontroller Data Sheet (rev I) + * http://www.ti.com/lit/ds/symlink/lm3s6965.pdf + * + * 40000000 wdtimer + * 40002000 i2c (unimplemented) + * 40004000 GPIO + * 40005000 GPIO + * 40006000 GPIO + * 40007000 GPIO + * 40008000 SSI + * 4000c000 UART + * 4000d000 UART + * 4000e000 UART + * 40020000 i2c + * 40021000 i2c (unimplemented) + * 40024000 GPIO + * 40025000 GPIO + * 40026000 GPIO + * 40028000 PWM (unimplemented) + * 4002c000 QEI (unimplemented) + * 4002d000 QEI (unimplemented) + * 40030000 gptimer + * 40031000 gptimer + * 40032000 gptimer + * 40033000 gptimer + * 40038000 ADC + * 4003c000 analogue comparator (unimplemented) + * 40048000 ethernet + * 400fc000 hibernation module (unimplemented) + * 400fd000 flash memory control (unimplemented) + * 400fe000 system control + */ + + DeviceState *gpio_dev[7], *nvic; + qemu_irq gpio_in[7][8]; + qemu_irq gpio_out[7][8]; + qemu_irq adc; + int sram_size; + int flash_size; + I2CBus *i2c; + DeviceState *dev; + DeviceState *ssys_dev; + int i; + int j; + const uint8_t *macaddr; + + MemoryRegion *sram = g_new(MemoryRegion, 1); + MemoryRegion *flash = g_new(MemoryRegion, 1); + MemoryRegion *system_memory = get_system_memory(); + + flash_size = (((board->dc0 & 0xffff) + 1) << 1) * 1024; + sram_size = ((board->dc0 >> 18) + 1) * 1024; + + /* Flash programming is done via the SCU, so pretend it is ROM. */ + memory_region_init_rom(flash, NULL, "stellaris.flash", flash_size, + &error_fatal); + memory_region_add_subregion(system_memory, 0, flash); + + memory_region_init_ram(sram, NULL, "stellaris.sram", sram_size, + &error_fatal); + memory_region_add_subregion(system_memory, 0x20000000, sram); + + /* + * Create the system-registers object early, because we will + * need its sysclk output. + */ + ssys_dev = qdev_new(TYPE_STELLARIS_SYS); + /* Most devices come preprogrammed with a MAC address in the user data. */ + macaddr = nd_table[0].macaddr.a; + qdev_prop_set_uint32(ssys_dev, "user0", + macaddr[0] | (macaddr[1] << 8) | (macaddr[2] << 16)); + qdev_prop_set_uint32(ssys_dev, "user1", + macaddr[3] | (macaddr[4] << 8) | (macaddr[5] << 16)); + qdev_prop_set_uint32(ssys_dev, "did0", board->did0); + qdev_prop_set_uint32(ssys_dev, "did1", board->did1); + qdev_prop_set_uint32(ssys_dev, "dc0", board->dc0); + qdev_prop_set_uint32(ssys_dev, "dc1", board->dc1); + qdev_prop_set_uint32(ssys_dev, "dc2", board->dc2); + qdev_prop_set_uint32(ssys_dev, "dc3", board->dc3); + qdev_prop_set_uint32(ssys_dev, "dc4", board->dc4); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ssys_dev), &error_fatal); + + nvic = qdev_new(TYPE_ARMV7M); + qdev_prop_set_uint32(nvic, "num-irq", NUM_IRQ_LINES); + qdev_prop_set_string(nvic, "cpu-type", ms->cpu_type); + qdev_prop_set_bit(nvic, "enable-bitband", true); + qdev_connect_clock_in(nvic, "cpuclk", + qdev_get_clock_out(ssys_dev, "SYSCLK")); + /* This SoC does not connect the systick reference clock */ + object_property_set_link(OBJECT(nvic), "memory", + OBJECT(get_system_memory()), &error_abort); + /* This will exit with an error if the user passed us a bad cpu_type */ + sysbus_realize_and_unref(SYS_BUS_DEVICE(nvic), &error_fatal); + + /* Now we can wire up the IRQ and MMIO of the system registers */ + sysbus_mmio_map(SYS_BUS_DEVICE(ssys_dev), 0, 0x400fe000); + sysbus_connect_irq(SYS_BUS_DEVICE(ssys_dev), 0, qdev_get_gpio_in(nvic, 28)); + + if (board->dc1 & (1 << 16)) { + dev = sysbus_create_varargs(TYPE_STELLARIS_ADC, 0x40038000, + qdev_get_gpio_in(nvic, 14), + qdev_get_gpio_in(nvic, 15), + qdev_get_gpio_in(nvic, 16), + qdev_get_gpio_in(nvic, 17), + NULL); + adc = qdev_get_gpio_in(dev, 0); + } else { + adc = NULL; + } + for (i = 0; i < 4; i++) { + if (board->dc2 & (0x10000 << i)) { + SysBusDevice *sbd; + + dev = qdev_new(TYPE_STELLARIS_GPTM); + sbd = SYS_BUS_DEVICE(dev); + qdev_connect_clock_in(dev, "clk", + qdev_get_clock_out(ssys_dev, "SYSCLK")); + sysbus_realize_and_unref(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, 0x40030000 + i * 0x1000); + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(nvic, timer_irq[i])); + /* TODO: This is incorrect, but we get away with it because + the ADC output is only ever pulsed. */ + qdev_connect_gpio_out(dev, 0, adc); + } + } + + if (board->dc1 & (1 << 3)) { /* watchdog present */ + dev = qdev_new(TYPE_LUMINARY_WATCHDOG); + + qdev_connect_clock_in(dev, "WDOGCLK", + qdev_get_clock_out(ssys_dev, "SYSCLK")); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), + 0, + 0x40000000u); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), + 0, + qdev_get_gpio_in(nvic, 18)); + } + + + for (i = 0; i < 7; i++) { + if (board->dc4 & (1 << i)) { + gpio_dev[i] = sysbus_create_simple("pl061_luminary", gpio_addr[i], + qdev_get_gpio_in(nvic, + gpio_irq[i])); + for (j = 0; j < 8; j++) { + gpio_in[i][j] = qdev_get_gpio_in(gpio_dev[i], j); + gpio_out[i][j] = NULL; + } + } + } + + if (board->dc2 & (1 << 12)) { + dev = sysbus_create_simple(TYPE_STELLARIS_I2C, 0x40020000, + qdev_get_gpio_in(nvic, 8)); + i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); + if (board->peripherals & BP_OLED_I2C) { + i2c_slave_create_simple(i2c, "ssd0303", 0x3d); + } + } + + for (i = 0; i < 4; i++) { + if (board->dc2 & (1 << i)) { + pl011_luminary_create(0x4000c000 + i * 0x1000, + qdev_get_gpio_in(nvic, uart_irq[i]), + serial_hd(i)); + } + } + if (board->dc2 & (1 << 4)) { + dev = sysbus_create_simple("pl022", 0x40008000, + qdev_get_gpio_in(nvic, 7)); + if (board->peripherals & BP_OLED_SSI) { + void *bus; + DeviceState *sddev; + DeviceState *ssddev; + + /* + * Some boards have both an OLED controller and SD card connected to + * the same SSI port, with the SD card chip select connected to a + * GPIO pin. Technically the OLED chip select is connected to the + * SSI Fss pin. We do not bother emulating that as both devices + * should never be selected simultaneously, and our OLED controller + * ignores stray 0xff commands that occur when deselecting the SD + * card. + * + * The h/w wiring is: + * - GPIO pin D0 is wired to the active-low SD card chip select + * - GPIO pin A3 is wired to the active-low OLED chip select + * - The SoC wiring of the PL061 "auxiliary function" for A3 is + * SSI0Fss ("frame signal"), which is an output from the SoC's + * SSI controller. The SSI controller takes SSI0Fss low when it + * transmits a frame, so it can work as a chip-select signal. + * - GPIO A4 is aux-function SSI0Rx, and wired to the SD card Tx + * (the OLED never sends data to the CPU, so no wiring needed) + * - GPIO A5 is aux-function SSI0Tx, and wired to the SD card Rx + * and the OLED display-data-in + * - GPIO A2 is aux-function SSI0Clk, wired to SD card and OLED + * serial-clock input + * So a guest that wants to use the OLED can configure the PL061 + * to make pins A2, A3, A5 aux-function, so they are connected + * directly to the SSI controller. When the SSI controller sends + * data it asserts SSI0Fss which selects the OLED. + * A guest that wants to use the SD card configures A2, A4 and A5 + * as aux-function, but leaves A3 as a software-controlled GPIO + * line. It asserts the SD card chip-select by using the PL061 + * to control pin D0, and lets the SSI controller handle Clk, Tx + * and Rx. (The SSI controller asserts Fss during tx cycles as + * usual, but because A3 is not set to aux-function this is not + * forwarded to the OLED, and so the OLED stays unselected.) + * + * The QEMU implementation instead is: + * - GPIO pin D0 is wired to the active-low SD card chip select, + * and also to the OLED chip-select which is implemented + * as *active-high* + * - SSI controller signals go to the devices regardless of + * whether the guest programs A2, A4, A5 as aux-function or not + * + * The problem with this implementation is if the guest doesn't + * care about the SD card and only uses the OLED. In that case it + * may choose never to do anything with D0 (leaving it in its + * default floating state, which reliably leaves the card disabled + * because an SD card has a pullup on CS within the card itself), + * and only set up A2, A3, A5. This for us would mean the OLED + * never gets the chip-select assert it needs. We work around + * this with a manual raise of D0 here (despite board creation + * code being the wrong place to raise IRQ lines) to put the OLED + * into an initially selected state. + * + * In theory the right way to model this would be: + * - Implement aux-function support in the PL061, with an + * extra set of AFIN and AFOUT GPIO lines (set up so that + * if a GPIO line is in auxfn mode the main GPIO in and out + * track the AFIN and AFOUT lines) + * - Wire the AFOUT for D0 up to either a line from the + * SSI controller that's pulled low around every transmit, + * or at least to an always-0 line here on the board + * - Make the ssd0323 OLED controller chipselect active-low + */ + bus = qdev_get_child_bus(dev, "ssi"); + + sddev = ssi_create_peripheral(bus, "ssi-sd"); + ssddev = ssi_create_peripheral(bus, "ssd0323"); + gpio_out[GPIO_D][0] = qemu_irq_split( + qdev_get_gpio_in_named(sddev, SSI_GPIO_CS, 0), + qdev_get_gpio_in_named(ssddev, SSI_GPIO_CS, 0)); + gpio_out[GPIO_C][7] = qdev_get_gpio_in(ssddev, 0); + + /* Make sure the select pin is high. */ + qemu_irq_raise(gpio_out[GPIO_D][0]); + } + } + if (board->dc4 & (1 << 28)) { + DeviceState *enet; + + qemu_check_nic_model(&nd_table[0], "stellaris"); + + enet = qdev_new("stellaris_enet"); + qdev_set_nic_properties(enet, &nd_table[0]); + sysbus_realize_and_unref(SYS_BUS_DEVICE(enet), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(enet), 0, 0x40048000); + sysbus_connect_irq(SYS_BUS_DEVICE(enet), 0, qdev_get_gpio_in(nvic, 42)); + } + if (board->peripherals & BP_GAMEPAD) { + qemu_irq gpad_irq[5]; + static const int gpad_keycode[5] = { 0xc8, 0xd0, 0xcb, 0xcd, 0x1d }; + + gpad_irq[0] = qemu_irq_invert(gpio_in[GPIO_E][0]); /* up */ + gpad_irq[1] = qemu_irq_invert(gpio_in[GPIO_E][1]); /* down */ + gpad_irq[2] = qemu_irq_invert(gpio_in[GPIO_E][2]); /* left */ + gpad_irq[3] = qemu_irq_invert(gpio_in[GPIO_E][3]); /* right */ + gpad_irq[4] = qemu_irq_invert(gpio_in[GPIO_F][1]); /* select */ + + stellaris_gamepad_init(5, gpad_irq, gpad_keycode); + } + for (i = 0; i < 7; i++) { + if (board->dc4 & (1 << i)) { + for (j = 0; j < 8; j++) { + if (gpio_out[i][j]) { + qdev_connect_gpio_out(gpio_dev[i], j, gpio_out[i][j]); + } + } + } + } + + /* Add dummy regions for the devices we don't implement yet, + * so guest accesses don't cause unlogged crashes. + */ + create_unimplemented_device("i2c-0", 0x40002000, 0x1000); + create_unimplemented_device("i2c-2", 0x40021000, 0x1000); + create_unimplemented_device("PWM", 0x40028000, 0x1000); + create_unimplemented_device("QEI-0", 0x4002c000, 0x1000); + create_unimplemented_device("QEI-1", 0x4002d000, 0x1000); + create_unimplemented_device("analogue-comparator", 0x4003c000, 0x1000); + create_unimplemented_device("hibernation", 0x400fc000, 0x1000); + create_unimplemented_device("flash-control", 0x400fd000, 0x1000); + + armv7m_load_kernel(ARM_CPU(first_cpu), ms->kernel_filename, flash_size); +} + +/* FIXME: Figure out how to generate these from stellaris_boards. */ +static void lm3s811evb_init(MachineState *machine) +{ + stellaris_init(machine, &stellaris_boards[0]); +} + +static void lm3s6965evb_init(MachineState *machine) +{ + stellaris_init(machine, &stellaris_boards[1]); +} + +static void lm3s811evb_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Stellaris LM3S811EVB (Cortex-M3)"; + mc->init = lm3s811evb_init; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); +} + +static const TypeInfo lm3s811evb_type = { + .name = MACHINE_TYPE_NAME("lm3s811evb"), + .parent = TYPE_MACHINE, + .class_init = lm3s811evb_class_init, +}; + +static void lm3s6965evb_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Stellaris LM3S6965EVB (Cortex-M3)"; + mc->init = lm3s6965evb_init; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); +} + +static const TypeInfo lm3s6965evb_type = { + .name = MACHINE_TYPE_NAME("lm3s6965evb"), + .parent = TYPE_MACHINE, + .class_init = lm3s6965evb_class_init, +}; + +static void stellaris_machine_init(void) +{ + type_register_static(&lm3s811evb_type); + type_register_static(&lm3s6965evb_type); +} + +type_init(stellaris_machine_init) + +static void stellaris_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_stellaris_i2c; +} + +static const TypeInfo stellaris_i2c_info = { + .name = TYPE_STELLARIS_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(stellaris_i2c_state), + .instance_init = stellaris_i2c_init, + .class_init = stellaris_i2c_class_init, +}; + +static void stellaris_adc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_stellaris_adc; +} + +static const TypeInfo stellaris_adc_info = { + .name = TYPE_STELLARIS_ADC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(stellaris_adc_state), + .instance_init = stellaris_adc_init, + .class_init = stellaris_adc_class_init, +}; + +static void stellaris_sys_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->vmsd = &vmstate_stellaris_sys; + rc->phases.enter = stellaris_sys_reset_enter; + rc->phases.hold = stellaris_sys_reset_hold; + rc->phases.exit = stellaris_sys_reset_exit; + device_class_set_props(dc, stellaris_sys_properties); +} + +static const TypeInfo stellaris_sys_info = { + .name = TYPE_STELLARIS_SYS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ssys_state), + .instance_init = stellaris_sys_instance_init, + .class_init = stellaris_sys_class_init, +}; + +static void stellaris_register_types(void) +{ + type_register_static(&stellaris_i2c_info); + type_register_static(&stellaris_adc_info); + type_register_static(&stellaris_sys_info); +} + +type_init(stellaris_register_types) diff --git a/hw/arm/stm32f100_soc.c b/hw/arm/stm32f100_soc.c new file mode 100644 index 000000000..f7b344ba9 --- /dev/null +++ b/hw/arm/stm32f100_soc.c @@ -0,0 +1,209 @@ +/* + * STM32F100 SoC + * + * Copyright (c) 2021 Alexandre Iooss + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/arm/boot.h" +#include "exec/address-spaces.h" +#include "hw/arm/stm32f100_soc.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" +#include "hw/misc/unimp.h" +#include "sysemu/sysemu.h" + +/* stm32f100_soc implementation is derived from stm32f205_soc */ + +static const uint32_t usart_addr[STM_NUM_USARTS] = { 0x40013800, 0x40004400, + 0x40004800 }; +static const uint32_t spi_addr[STM_NUM_SPIS] = { 0x40013000, 0x40003800 }; + +static const int usart_irq[STM_NUM_USARTS] = {37, 38, 39}; +static const int spi_irq[STM_NUM_SPIS] = {35, 36}; + +static void stm32f100_soc_initfn(Object *obj) +{ + STM32F100State *s = STM32F100_SOC(obj); + int i; + + object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M); + + for (i = 0; i < STM_NUM_USARTS; i++) { + object_initialize_child(obj, "usart[*]", &s->usart[i], + TYPE_STM32F2XX_USART); + } + + for (i = 0; i < STM_NUM_SPIS; i++) { + object_initialize_child(obj, "spi[*]", &s->spi[i], TYPE_STM32F2XX_SPI); + } + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); + s->refclk = qdev_init_clock_in(DEVICE(s), "refclk", NULL, NULL, 0); +} + +static void stm32f100_soc_realize(DeviceState *dev_soc, Error **errp) +{ + STM32F100State *s = STM32F100_SOC(dev_soc); + DeviceState *dev, *armv7m; + SysBusDevice *busdev; + int i; + + MemoryRegion *system_memory = get_system_memory(); + + /* + * We use s->refclk internally and only define it with qdev_init_clock_in() + * so it is correctly parented and not leaked on an init/deinit; it is not + * intended as an externally exposed clock. + */ + if (clock_has_source(s->refclk)) { + error_setg(errp, "refclk clock must not be wired up by the board code"); + return; + } + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + /* + * TODO: ideally we should model the SoC RCC and its ability to + * change the sysclk frequency and define different sysclk sources. + */ + + /* The refclk always runs at frequency HCLK / 8 */ + clock_set_mul_div(s->refclk, 8, 1); + clock_set_source(s->refclk, s->sysclk); + + /* + * Init flash region + * Flash starts at 0x08000000 and then is aliased to boot memory at 0x0 + */ + memory_region_init_rom(&s->flash, OBJECT(dev_soc), "STM32F100.flash", + FLASH_SIZE, &error_fatal); + memory_region_init_alias(&s->flash_alias, OBJECT(dev_soc), + "STM32F100.flash.alias", &s->flash, 0, FLASH_SIZE); + memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash); + memory_region_add_subregion(system_memory, 0, &s->flash_alias); + + /* Init SRAM region */ + memory_region_init_ram(&s->sram, NULL, "STM32F100.sram", SRAM_SIZE, + &error_fatal); + memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, &s->sram); + + /* Init ARMv7m */ + armv7m = DEVICE(&s->armv7m); + qdev_prop_set_uint32(armv7m, "num-irq", 61); + qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); + qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + qdev_connect_clock_in(armv7m, "refclk", s->refclk); + object_property_set_link(OBJECT(&s->armv7m), "memory", + OBJECT(get_system_memory()), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { + return; + } + + /* Attach UART (uses USART registers) and USART controllers */ + for (i = 0; i < STM_NUM_USARTS; i++) { + dev = DEVICE(&(s->usart[i])); + qdev_prop_set_chr(dev, "chardev", serial_hd(i)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->usart[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, usart_addr[i]); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, usart_irq[i])); + } + + /* SPI 1 and 2 */ + for (i = 0; i < STM_NUM_SPIS; i++) { + dev = DEVICE(&(s->spi[i])); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, spi_addr[i]); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, spi_irq[i])); + } + + create_unimplemented_device("timer[2]", 0x40000000, 0x400); + create_unimplemented_device("timer[3]", 0x40000400, 0x400); + create_unimplemented_device("timer[4]", 0x40000800, 0x400); + create_unimplemented_device("timer[6]", 0x40001000, 0x400); + create_unimplemented_device("timer[7]", 0x40001400, 0x400); + create_unimplemented_device("RTC", 0x40002800, 0x400); + create_unimplemented_device("WWDG", 0x40002C00, 0x400); + create_unimplemented_device("IWDG", 0x40003000, 0x400); + create_unimplemented_device("I2C1", 0x40005400, 0x400); + create_unimplemented_device("I2C2", 0x40005800, 0x400); + create_unimplemented_device("BKP", 0x40006C00, 0x400); + create_unimplemented_device("PWR", 0x40007000, 0x400); + create_unimplemented_device("DAC", 0x40007400, 0x400); + create_unimplemented_device("CEC", 0x40007800, 0x400); + create_unimplemented_device("AFIO", 0x40010000, 0x400); + create_unimplemented_device("EXTI", 0x40010400, 0x400); + create_unimplemented_device("GPIOA", 0x40010800, 0x400); + create_unimplemented_device("GPIOB", 0x40010C00, 0x400); + create_unimplemented_device("GPIOC", 0x40011000, 0x400); + create_unimplemented_device("GPIOD", 0x40011400, 0x400); + create_unimplemented_device("GPIOE", 0x40011800, 0x400); + create_unimplemented_device("ADC1", 0x40012400, 0x400); + create_unimplemented_device("timer[1]", 0x40012C00, 0x400); + create_unimplemented_device("timer[15]", 0x40014000, 0x400); + create_unimplemented_device("timer[16]", 0x40014400, 0x400); + create_unimplemented_device("timer[17]", 0x40014800, 0x400); + create_unimplemented_device("DMA", 0x40020000, 0x400); + create_unimplemented_device("RCC", 0x40021000, 0x400); + create_unimplemented_device("Flash Int", 0x40022000, 0x400); + create_unimplemented_device("CRC", 0x40023000, 0x400); +} + +static Property stm32f100_soc_properties[] = { + DEFINE_PROP_STRING("cpu-type", STM32F100State, cpu_type), + DEFINE_PROP_END_OF_LIST(), +}; + +static void stm32f100_soc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = stm32f100_soc_realize; + device_class_set_props(dc, stm32f100_soc_properties); +} + +static const TypeInfo stm32f100_soc_info = { + .name = TYPE_STM32F100_SOC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32F100State), + .instance_init = stm32f100_soc_initfn, + .class_init = stm32f100_soc_class_init, +}; + +static void stm32f100_soc_types(void) +{ + type_register_static(&stm32f100_soc_info); +} + +type_init(stm32f100_soc_types) diff --git a/hw/arm/stm32f205_soc.c b/hw/arm/stm32f205_soc.c new file mode 100644 index 000000000..c6b75a381 --- /dev/null +++ b/hw/arm/stm32f205_soc.c @@ -0,0 +1,230 @@ +/* + * STM32F205 SoC + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/arm/boot.h" +#include "exec/address-spaces.h" +#include "hw/arm/stm32f205_soc.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" +#include "sysemu/sysemu.h" + +/* At the moment only Timer 2 to 5 are modelled */ +static const uint32_t timer_addr[STM_NUM_TIMERS] = { 0x40000000, 0x40000400, + 0x40000800, 0x40000C00 }; +static const uint32_t usart_addr[STM_NUM_USARTS] = { 0x40011000, 0x40004400, + 0x40004800, 0x40004C00, 0x40005000, 0x40011400 }; +static const uint32_t adc_addr[STM_NUM_ADCS] = { 0x40012000, 0x40012100, + 0x40012200 }; +static const uint32_t spi_addr[STM_NUM_SPIS] = { 0x40013000, 0x40003800, + 0x40003C00 }; + +static const int timer_irq[STM_NUM_TIMERS] = {28, 29, 30, 50}; +static const int usart_irq[STM_NUM_USARTS] = {37, 38, 39, 52, 53, 71}; +#define ADC_IRQ 18 +static const int spi_irq[STM_NUM_SPIS] = {35, 36, 51}; + +static void stm32f205_soc_initfn(Object *obj) +{ + STM32F205State *s = STM32F205_SOC(obj); + int i; + + object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M); + + object_initialize_child(obj, "syscfg", &s->syscfg, TYPE_STM32F2XX_SYSCFG); + + for (i = 0; i < STM_NUM_USARTS; i++) { + object_initialize_child(obj, "usart[*]", &s->usart[i], + TYPE_STM32F2XX_USART); + } + + for (i = 0; i < STM_NUM_TIMERS; i++) { + object_initialize_child(obj, "timer[*]", &s->timer[i], + TYPE_STM32F2XX_TIMER); + } + + s->adc_irqs = OR_IRQ(object_new(TYPE_OR_IRQ)); + + for (i = 0; i < STM_NUM_ADCS; i++) { + object_initialize_child(obj, "adc[*]", &s->adc[i], TYPE_STM32F2XX_ADC); + } + + for (i = 0; i < STM_NUM_SPIS; i++) { + object_initialize_child(obj, "spi[*]", &s->spi[i], TYPE_STM32F2XX_SPI); + } + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); + s->refclk = qdev_init_clock_in(DEVICE(s), "refclk", NULL, NULL, 0); +} + +static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp) +{ + STM32F205State *s = STM32F205_SOC(dev_soc); + DeviceState *dev, *armv7m; + SysBusDevice *busdev; + int i; + + MemoryRegion *system_memory = get_system_memory(); + + /* + * We use s->refclk internally and only define it with qdev_init_clock_in() + * so it is correctly parented and not leaked on an init/deinit; it is not + * intended as an externally exposed clock. + */ + if (clock_has_source(s->refclk)) { + error_setg(errp, "refclk clock must not be wired up by the board code"); + return; + } + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + /* + * TODO: ideally we should model the SoC RCC and its ability to + * change the sysclk frequency and define different sysclk sources. + */ + + /* The refclk always runs at frequency HCLK / 8 */ + clock_set_mul_div(s->refclk, 8, 1); + clock_set_source(s->refclk, s->sysclk); + + memory_region_init_rom(&s->flash, OBJECT(dev_soc), "STM32F205.flash", + FLASH_SIZE, &error_fatal); + memory_region_init_alias(&s->flash_alias, OBJECT(dev_soc), + "STM32F205.flash.alias", &s->flash, 0, FLASH_SIZE); + + memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash); + memory_region_add_subregion(system_memory, 0, &s->flash_alias); + + memory_region_init_ram(&s->sram, NULL, "STM32F205.sram", SRAM_SIZE, + &error_fatal); + memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, &s->sram); + + armv7m = DEVICE(&s->armv7m); + qdev_prop_set_uint32(armv7m, "num-irq", 96); + qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); + qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + qdev_connect_clock_in(armv7m, "refclk", s->refclk); + object_property_set_link(OBJECT(&s->armv7m), "memory", + OBJECT(get_system_memory()), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { + return; + } + + /* System configuration controller */ + dev = DEVICE(&s->syscfg); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->syscfg), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, 0x40013800); + + /* Attach UART (uses USART registers) and USART controllers */ + for (i = 0; i < STM_NUM_USARTS; i++) { + dev = DEVICE(&(s->usart[i])); + qdev_prop_set_chr(dev, "chardev", serial_hd(i)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->usart[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, usart_addr[i]); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, usart_irq[i])); + } + + /* Timer 2 to 5 */ + for (i = 0; i < STM_NUM_TIMERS; i++) { + dev = DEVICE(&(s->timer[i])); + qdev_prop_set_uint64(dev, "clock-frequency", 1000000000); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timer[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, timer_addr[i]); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, timer_irq[i])); + } + + /* ADC 1 to 3 */ + object_property_set_int(OBJECT(s->adc_irqs), "num-lines", STM_NUM_ADCS, + &error_abort); + if (!qdev_realize(DEVICE(s->adc_irqs), NULL, errp)) { + return; + } + qdev_connect_gpio_out(DEVICE(s->adc_irqs), 0, + qdev_get_gpio_in(armv7m, ADC_IRQ)); + + for (i = 0; i < STM_NUM_ADCS; i++) { + dev = DEVICE(&(s->adc[i])); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, adc_addr[i]); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(DEVICE(s->adc_irqs), i)); + } + + /* SPI 1 and 2 */ + for (i = 0; i < STM_NUM_SPIS; i++) { + dev = DEVICE(&(s->spi[i])); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, spi_addr[i]); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, spi_irq[i])); + } +} + +static Property stm32f205_soc_properties[] = { + DEFINE_PROP_STRING("cpu-type", STM32F205State, cpu_type), + DEFINE_PROP_END_OF_LIST(), +}; + +static void stm32f205_soc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = stm32f205_soc_realize; + device_class_set_props(dc, stm32f205_soc_properties); +} + +static const TypeInfo stm32f205_soc_info = { + .name = TYPE_STM32F205_SOC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32F205State), + .instance_init = stm32f205_soc_initfn, + .class_init = stm32f205_soc_class_init, +}; + +static void stm32f205_soc_types(void) +{ + type_register_static(&stm32f205_soc_info); +} + +type_init(stm32f205_soc_types) diff --git a/hw/arm/stm32f405_soc.c b/hw/arm/stm32f405_soc.c new file mode 100644 index 000000000..0019b7f47 --- /dev/null +++ b/hw/arm/stm32f405_soc.c @@ -0,0 +1,310 @@ +/* + * STM32F405 SoC + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "exec/address-spaces.h" +#include "sysemu/sysemu.h" +#include "hw/arm/stm32f405_soc.h" +#include "hw/qdev-clock.h" +#include "hw/misc/unimp.h" + +#define SYSCFG_ADD 0x40013800 +static const uint32_t usart_addr[] = { 0x40011000, 0x40004400, 0x40004800, + 0x40004C00, 0x40005000, 0x40011400, + 0x40007800, 0x40007C00 }; +/* At the moment only Timer 2 to 5 are modelled */ +static const uint32_t timer_addr[] = { 0x40000000, 0x40000400, + 0x40000800, 0x40000C00 }; +static const uint32_t adc_addr[] = { 0x40012000, 0x40012100, 0x40012200, + 0x40012300, 0x40012400, 0x40012500 }; +static const uint32_t spi_addr[] = { 0x40013000, 0x40003800, 0x40003C00, + 0x40013400, 0x40015000, 0x40015400 }; +#define EXTI_ADDR 0x40013C00 + +#define SYSCFG_IRQ 71 +static const int usart_irq[] = { 37, 38, 39, 52, 53, 71, 82, 83 }; +static const int timer_irq[] = { 28, 29, 30, 50 }; +#define ADC_IRQ 18 +static const int spi_irq[] = { 35, 36, 51, 0, 0, 0 }; +static const int exti_irq[] = { 6, 7, 8, 9, 10, 23, 23, 23, 23, 23, 40, + 40, 40, 40, 40, 40} ; + + +static void stm32f405_soc_initfn(Object *obj) +{ + STM32F405State *s = STM32F405_SOC(obj); + int i; + + object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M); + + object_initialize_child(obj, "syscfg", &s->syscfg, TYPE_STM32F4XX_SYSCFG); + + for (i = 0; i < STM_NUM_USARTS; i++) { + object_initialize_child(obj, "usart[*]", &s->usart[i], + TYPE_STM32F2XX_USART); + } + + for (i = 0; i < STM_NUM_TIMERS; i++) { + object_initialize_child(obj, "timer[*]", &s->timer[i], + TYPE_STM32F2XX_TIMER); + } + + for (i = 0; i < STM_NUM_ADCS; i++) { + object_initialize_child(obj, "adc[*]", &s->adc[i], TYPE_STM32F2XX_ADC); + } + + for (i = 0; i < STM_NUM_SPIS; i++) { + object_initialize_child(obj, "spi[*]", &s->spi[i], TYPE_STM32F2XX_SPI); + } + + object_initialize_child(obj, "exti", &s->exti, TYPE_STM32F4XX_EXTI); + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); + s->refclk = qdev_init_clock_in(DEVICE(s), "refclk", NULL, NULL, 0); +} + +static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp) +{ + STM32F405State *s = STM32F405_SOC(dev_soc); + MemoryRegion *system_memory = get_system_memory(); + DeviceState *dev, *armv7m; + SysBusDevice *busdev; + Error *err = NULL; + int i; + + /* + * We use s->refclk internally and only define it with qdev_init_clock_in() + * so it is correctly parented and not leaked on an init/deinit; it is not + * intended as an externally exposed clock. + */ + if (clock_has_source(s->refclk)) { + error_setg(errp, "refclk clock must not be wired up by the board code"); + return; + } + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + /* + * TODO: ideally we should model the SoC RCC and its ability to + * change the sysclk frequency and define different sysclk sources. + */ + + /* The refclk always runs at frequency HCLK / 8 */ + clock_set_mul_div(s->refclk, 8, 1); + clock_set_source(s->refclk, s->sysclk); + + memory_region_init_rom(&s->flash, OBJECT(dev_soc), "STM32F405.flash", + FLASH_SIZE, &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + memory_region_init_alias(&s->flash_alias, OBJECT(dev_soc), + "STM32F405.flash.alias", &s->flash, 0, + FLASH_SIZE); + + memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash); + memory_region_add_subregion(system_memory, 0, &s->flash_alias); + + memory_region_init_ram(&s->sram, NULL, "STM32F405.sram", SRAM_SIZE, + &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, &s->sram); + + armv7m = DEVICE(&s->armv7m); + qdev_prop_set_uint32(armv7m, "num-irq", 96); + qdev_prop_set_string(armv7m, "cpu-type", s->cpu_type); + qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + qdev_connect_clock_in(armv7m, "refclk", s->refclk); + object_property_set_link(OBJECT(&s->armv7m), "memory", + OBJECT(system_memory), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { + return; + } + + /* System configuration controller */ + dev = DEVICE(&s->syscfg); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->syscfg), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, SYSCFG_ADD); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, SYSCFG_IRQ)); + + /* Attach UART (uses USART registers) and USART controllers */ + for (i = 0; i < STM_NUM_USARTS; i++) { + dev = DEVICE(&(s->usart[i])); + qdev_prop_set_chr(dev, "chardev", serial_hd(i)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->usart[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, usart_addr[i]); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, usart_irq[i])); + } + + /* Timer 2 to 5 */ + for (i = 0; i < STM_NUM_TIMERS; i++) { + dev = DEVICE(&(s->timer[i])); + qdev_prop_set_uint64(dev, "clock-frequency", 1000000000); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timer[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, timer_addr[i]); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, timer_irq[i])); + } + + /* ADC device, the IRQs are ORed together */ + if (!object_initialize_child_with_props(OBJECT(s), "adc-orirq", + &s->adc_irqs, sizeof(s->adc_irqs), + TYPE_OR_IRQ, errp, NULL)) { + return; + } + object_property_set_int(OBJECT(&s->adc_irqs), "num-lines", STM_NUM_ADCS, + &error_abort); + if (!qdev_realize(DEVICE(&s->adc_irqs), NULL, errp)) { + return; + } + qdev_connect_gpio_out(DEVICE(&s->adc_irqs), 0, + qdev_get_gpio_in(armv7m, ADC_IRQ)); + + for (i = 0; i < STM_NUM_ADCS; i++) { + dev = DEVICE(&(s->adc[i])); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, adc_addr[i]); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(DEVICE(&s->adc_irqs), i)); + } + + /* SPI devices */ + for (i = 0; i < STM_NUM_SPIS; i++) { + dev = DEVICE(&(s->spi[i])); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, spi_addr[i]); + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m, spi_irq[i])); + } + + /* EXTI device */ + dev = DEVICE(&s->exti); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->exti), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, EXTI_ADDR); + for (i = 0; i < 16; i++) { + sysbus_connect_irq(busdev, i, qdev_get_gpio_in(armv7m, exti_irq[i])); + } + for (i = 0; i < 16; i++) { + qdev_connect_gpio_out(DEVICE(&s->syscfg), i, qdev_get_gpio_in(dev, i)); + } + + create_unimplemented_device("timer[7]", 0x40001400, 0x400); + create_unimplemented_device("timer[12]", 0x40001800, 0x400); + create_unimplemented_device("timer[6]", 0x40001000, 0x400); + create_unimplemented_device("timer[13]", 0x40001C00, 0x400); + create_unimplemented_device("timer[14]", 0x40002000, 0x400); + create_unimplemented_device("RTC and BKP", 0x40002800, 0x400); + create_unimplemented_device("WWDG", 0x40002C00, 0x400); + create_unimplemented_device("IWDG", 0x40003000, 0x400); + create_unimplemented_device("I2S2ext", 0x40003000, 0x400); + create_unimplemented_device("I2S3ext", 0x40004000, 0x400); + create_unimplemented_device("I2C1", 0x40005400, 0x400); + create_unimplemented_device("I2C2", 0x40005800, 0x400); + create_unimplemented_device("I2C3", 0x40005C00, 0x400); + create_unimplemented_device("CAN1", 0x40006400, 0x400); + create_unimplemented_device("CAN2", 0x40006800, 0x400); + create_unimplemented_device("PWR", 0x40007000, 0x400); + create_unimplemented_device("DAC", 0x40007400, 0x400); + create_unimplemented_device("timer[1]", 0x40010000, 0x400); + create_unimplemented_device("timer[8]", 0x40010400, 0x400); + create_unimplemented_device("SDIO", 0x40012C00, 0x400); + create_unimplemented_device("timer[9]", 0x40014000, 0x400); + create_unimplemented_device("timer[10]", 0x40014400, 0x400); + create_unimplemented_device("timer[11]", 0x40014800, 0x400); + create_unimplemented_device("GPIOA", 0x40020000, 0x400); + create_unimplemented_device("GPIOB", 0x40020400, 0x400); + create_unimplemented_device("GPIOC", 0x40020800, 0x400); + create_unimplemented_device("GPIOD", 0x40020C00, 0x400); + create_unimplemented_device("GPIOE", 0x40021000, 0x400); + create_unimplemented_device("GPIOF", 0x40021400, 0x400); + create_unimplemented_device("GPIOG", 0x40021800, 0x400); + create_unimplemented_device("GPIOH", 0x40021C00, 0x400); + create_unimplemented_device("GPIOI", 0x40022000, 0x400); + create_unimplemented_device("CRC", 0x40023000, 0x400); + create_unimplemented_device("RCC", 0x40023800, 0x400); + create_unimplemented_device("Flash Int", 0x40023C00, 0x400); + create_unimplemented_device("BKPSRAM", 0x40024000, 0x400); + create_unimplemented_device("DMA1", 0x40026000, 0x400); + create_unimplemented_device("DMA2", 0x40026400, 0x400); + create_unimplemented_device("Ethernet", 0x40028000, 0x1400); + create_unimplemented_device("USB OTG HS", 0x40040000, 0x30000); + create_unimplemented_device("USB OTG FS", 0x50000000, 0x31000); + create_unimplemented_device("DCMI", 0x50050000, 0x400); + create_unimplemented_device("RNG", 0x50060800, 0x400); +} + +static Property stm32f405_soc_properties[] = { + DEFINE_PROP_STRING("cpu-type", STM32F405State, cpu_type), + DEFINE_PROP_END_OF_LIST(), +}; + +static void stm32f405_soc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = stm32f405_soc_realize; + device_class_set_props(dc, stm32f405_soc_properties); + /* No vmstate or reset required: device has no internal state */ +} + +static const TypeInfo stm32f405_soc_info = { + .name = TYPE_STM32F405_SOC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32F405State), + .instance_init = stm32f405_soc_initfn, + .class_init = stm32f405_soc_class_init, +}; + +static void stm32f405_soc_types(void) +{ + type_register_static(&stm32f405_soc_info); +} + +type_init(stm32f405_soc_types) diff --git a/hw/arm/stm32vldiscovery.c b/hw/arm/stm32vldiscovery.c new file mode 100644 index 000000000..04036da3e --- /dev/null +++ b/hw/arm/stm32vldiscovery.c @@ -0,0 +1,65 @@ +/* + * ST STM32VLDISCOVERY machine + * + * Copyright (c) 2021 Alexandre Iooss + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" +#include "qemu/error-report.h" +#include "hw/arm/stm32f100_soc.h" +#include "hw/arm/boot.h" + +/* stm32vldiscovery implementation is derived from netduinoplus2 */ + +/* Main SYSCLK frequency in Hz (24MHz) */ +#define SYSCLK_FRQ 24000000ULL + +static void stm32vldiscovery_init(MachineState *machine) +{ + DeviceState *dev; + Clock *sysclk; + + /* This clock doesn't need migration because it is fixed-frequency */ + sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(sysclk, SYSCLK_FRQ); + + dev = qdev_new(TYPE_STM32F100_SOC); + qdev_prop_set_string(dev, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m3")); + qdev_connect_clock_in(dev, "sysclk", sysclk); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + armv7m_load_kernel(ARM_CPU(first_cpu), + machine->kernel_filename, + FLASH_SIZE); +} + +static void stm32vldiscovery_machine_init(MachineClass *mc) +{ + mc->desc = "ST STM32VLDISCOVERY (Cortex-M3)"; + mc->init = stm32vldiscovery_init; +} + +DEFINE_MACHINE("stm32vldiscovery", stm32vldiscovery_machine_init) diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c new file mode 100644 index 000000000..939a57dda --- /dev/null +++ b/hw/arm/strongarm.c @@ -0,0 +1,1649 @@ +/* + * StrongARM SA-1100/SA-1110 emulation + * + * Copyright (C) 2011 Dmitry Eremin-Solenikov + * + * Largely based on StrongARM emulation: + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * UART code based on QEMU 16550A UART emulation + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2008 Citrix Systems, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "strongarm.h" +#include "qemu/error-report.h" +#include "hw/arm/boot.h" +#include "chardev/char-fe.h" +#include "chardev/char-serial.h" +#include "sysemu/sysemu.h" +#include "hw/ssi/ssi.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qom/object.h" + +//#define DEBUG + +/* + TODO + - Implement cp15, c14 ? + - Implement cp15, c15 !!! (idle used in L) + - Implement idle mode handling/DIM + - Implement sleep mode/Wake sources + - Implement reset control + - Implement memory control regs + - PCMCIA handling + - Maybe support MBGNT/MBREQ + - DMA channels + - GPCLK + - IrDA + - MCP + - Enhance UART with modem signals + */ + +#ifdef DEBUG +# define DPRINTF(format, ...) printf(format , ## __VA_ARGS__) +#else +# define DPRINTF(format, ...) do { } while (0) +#endif + +static struct { + hwaddr io_base; + int irq; +} sa_serial[] = { + { 0x80010000, SA_PIC_UART1 }, + { 0x80030000, SA_PIC_UART2 }, + { 0x80050000, SA_PIC_UART3 }, + { 0, 0 } +}; + +/* Interrupt Controller */ + +#define TYPE_STRONGARM_PIC "strongarm_pic" +OBJECT_DECLARE_SIMPLE_TYPE(StrongARMPICState, STRONGARM_PIC) + +struct StrongARMPICState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq; + qemu_irq fiq; + + uint32_t pending; + uint32_t enabled; + uint32_t is_fiq; + uint32_t int_idle; +}; + +#define ICIP 0x00 +#define ICMR 0x04 +#define ICLR 0x08 +#define ICFP 0x10 +#define ICPR 0x20 +#define ICCR 0x0c + +#define SA_PIC_SRCS 32 + + +static void strongarm_pic_update(void *opaque) +{ + StrongARMPICState *s = opaque; + + /* FIXME: reflect DIM */ + qemu_set_irq(s->fiq, s->pending & s->enabled & s->is_fiq); + qemu_set_irq(s->irq, s->pending & s->enabled & ~s->is_fiq); +} + +static void strongarm_pic_set_irq(void *opaque, int irq, int level) +{ + StrongARMPICState *s = opaque; + + if (level) { + s->pending |= 1 << irq; + } else { + s->pending &= ~(1 << irq); + } + + strongarm_pic_update(s); +} + +static uint64_t strongarm_pic_mem_read(void *opaque, hwaddr offset, + unsigned size) +{ + StrongARMPICState *s = opaque; + + switch (offset) { + case ICIP: + return s->pending & ~s->is_fiq & s->enabled; + case ICMR: + return s->enabled; + case ICLR: + return s->is_fiq; + case ICCR: + return s->int_idle == 0; + case ICFP: + return s->pending & s->is_fiq & s->enabled; + case ICPR: + return s->pending; + default: + printf("%s: Bad register offset 0x" TARGET_FMT_plx "\n", + __func__, offset); + return 0; + } +} + +static void strongarm_pic_mem_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + StrongARMPICState *s = opaque; + + switch (offset) { + case ICMR: + s->enabled = value; + break; + case ICLR: + s->is_fiq = value; + break; + case ICCR: + s->int_idle = (value & 1) ? 0 : ~0; + break; + default: + printf("%s: Bad register offset 0x" TARGET_FMT_plx "\n", + __func__, offset); + break; + } + strongarm_pic_update(s); +} + +static const MemoryRegionOps strongarm_pic_ops = { + .read = strongarm_pic_mem_read, + .write = strongarm_pic_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void strongarm_pic_initfn(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + StrongARMPICState *s = STRONGARM_PIC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + qdev_init_gpio_in(dev, strongarm_pic_set_irq, SA_PIC_SRCS); + memory_region_init_io(&s->iomem, obj, &strongarm_pic_ops, s, + "pic", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->fiq); +} + +static int strongarm_pic_post_load(void *opaque, int version_id) +{ + strongarm_pic_update(opaque); + return 0; +} + +static const VMStateDescription vmstate_strongarm_pic_regs = { + .name = "strongarm_pic", + .version_id = 0, + .minimum_version_id = 0, + .post_load = strongarm_pic_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(pending, StrongARMPICState), + VMSTATE_UINT32(enabled, StrongARMPICState), + VMSTATE_UINT32(is_fiq, StrongARMPICState), + VMSTATE_UINT32(int_idle, StrongARMPICState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void strongarm_pic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "StrongARM PIC"; + dc->vmsd = &vmstate_strongarm_pic_regs; +} + +static const TypeInfo strongarm_pic_info = { + .name = TYPE_STRONGARM_PIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(StrongARMPICState), + .instance_init = strongarm_pic_initfn, + .class_init = strongarm_pic_class_init, +}; + +/* Real-Time Clock */ +#define RTAR 0x00 /* RTC Alarm register */ +#define RCNR 0x04 /* RTC Counter register */ +#define RTTR 0x08 /* RTC Timer Trim register */ +#define RTSR 0x10 /* RTC Status register */ + +#define RTSR_AL (1 << 0) /* RTC Alarm detected */ +#define RTSR_HZ (1 << 1) /* RTC 1Hz detected */ +#define RTSR_ALE (1 << 2) /* RTC Alarm enable */ +#define RTSR_HZE (1 << 3) /* RTC 1Hz enable */ + +/* 16 LSB of RTTR are clockdiv for internal trim logic, + * trim delete isn't emulated, so + * f = 32 768 / (RTTR_trim + 1) */ + +#define TYPE_STRONGARM_RTC "strongarm-rtc" +OBJECT_DECLARE_SIMPLE_TYPE(StrongARMRTCState, STRONGARM_RTC) + +struct StrongARMRTCState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t rttr; + uint32_t rtsr; + uint32_t rtar; + uint32_t last_rcnr; + int64_t last_hz; + QEMUTimer *rtc_alarm; + QEMUTimer *rtc_hz; + qemu_irq rtc_irq; + qemu_irq rtc_hz_irq; +}; + +static inline void strongarm_rtc_int_update(StrongARMRTCState *s) +{ + qemu_set_irq(s->rtc_irq, s->rtsr & RTSR_AL); + qemu_set_irq(s->rtc_hz_irq, s->rtsr & RTSR_HZ); +} + +static void strongarm_rtc_hzupdate(StrongARMRTCState *s) +{ + int64_t rt = qemu_clock_get_ms(rtc_clock); + s->last_rcnr += ((rt - s->last_hz) << 15) / + (1000 * ((s->rttr & 0xffff) + 1)); + s->last_hz = rt; +} + +static inline void strongarm_rtc_timer_update(StrongARMRTCState *s) +{ + if ((s->rtsr & RTSR_HZE) && !(s->rtsr & RTSR_HZ)) { + timer_mod(s->rtc_hz, s->last_hz + 1000); + } else { + timer_del(s->rtc_hz); + } + + if ((s->rtsr & RTSR_ALE) && !(s->rtsr & RTSR_AL)) { + timer_mod(s->rtc_alarm, s->last_hz + + (((s->rtar - s->last_rcnr) * 1000 * + ((s->rttr & 0xffff) + 1)) >> 15)); + } else { + timer_del(s->rtc_alarm); + } +} + +static inline void strongarm_rtc_alarm_tick(void *opaque) +{ + StrongARMRTCState *s = opaque; + s->rtsr |= RTSR_AL; + strongarm_rtc_timer_update(s); + strongarm_rtc_int_update(s); +} + +static inline void strongarm_rtc_hz_tick(void *opaque) +{ + StrongARMRTCState *s = opaque; + s->rtsr |= RTSR_HZ; + strongarm_rtc_timer_update(s); + strongarm_rtc_int_update(s); +} + +static uint64_t strongarm_rtc_read(void *opaque, hwaddr addr, + unsigned size) +{ + StrongARMRTCState *s = opaque; + + switch (addr) { + case RTTR: + return s->rttr; + case RTSR: + return s->rtsr; + case RTAR: + return s->rtar; + case RCNR: + return s->last_rcnr + + ((qemu_clock_get_ms(rtc_clock) - s->last_hz) << 15) / + (1000 * ((s->rttr & 0xffff) + 1)); + default: + printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr); + return 0; + } +} + +static void strongarm_rtc_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + StrongARMRTCState *s = opaque; + uint32_t old_rtsr; + + switch (addr) { + case RTTR: + strongarm_rtc_hzupdate(s); + s->rttr = value; + strongarm_rtc_timer_update(s); + break; + + case RTSR: + old_rtsr = s->rtsr; + s->rtsr = (value & (RTSR_ALE | RTSR_HZE)) | + (s->rtsr & ~(value & (RTSR_AL | RTSR_HZ))); + + if (s->rtsr != old_rtsr) { + strongarm_rtc_timer_update(s); + } + + strongarm_rtc_int_update(s); + break; + + case RTAR: + s->rtar = value; + strongarm_rtc_timer_update(s); + break; + + case RCNR: + strongarm_rtc_hzupdate(s); + s->last_rcnr = value; + strongarm_rtc_timer_update(s); + break; + + default: + printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr); + } +} + +static const MemoryRegionOps strongarm_rtc_ops = { + .read = strongarm_rtc_read, + .write = strongarm_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void strongarm_rtc_init(Object *obj) +{ + StrongARMRTCState *s = STRONGARM_RTC(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + struct tm tm; + + s->rttr = 0x0; + s->rtsr = 0; + + qemu_get_timedate(&tm, 0); + + s->last_rcnr = (uint32_t) mktimegm(&tm); + s->last_hz = qemu_clock_get_ms(rtc_clock); + + sysbus_init_irq(dev, &s->rtc_irq); + sysbus_init_irq(dev, &s->rtc_hz_irq); + + memory_region_init_io(&s->iomem, obj, &strongarm_rtc_ops, s, + "rtc", 0x10000); + sysbus_init_mmio(dev, &s->iomem); +} + +static void strongarm_rtc_realize(DeviceState *dev, Error **errp) +{ + StrongARMRTCState *s = STRONGARM_RTC(dev); + s->rtc_alarm = timer_new_ms(rtc_clock, strongarm_rtc_alarm_tick, s); + s->rtc_hz = timer_new_ms(rtc_clock, strongarm_rtc_hz_tick, s); +} + +static int strongarm_rtc_pre_save(void *opaque) +{ + StrongARMRTCState *s = opaque; + + strongarm_rtc_hzupdate(s); + + return 0; +} + +static int strongarm_rtc_post_load(void *opaque, int version_id) +{ + StrongARMRTCState *s = opaque; + + strongarm_rtc_timer_update(s); + strongarm_rtc_int_update(s); + + return 0; +} + +static const VMStateDescription vmstate_strongarm_rtc_regs = { + .name = "strongarm-rtc", + .version_id = 0, + .minimum_version_id = 0, + .pre_save = strongarm_rtc_pre_save, + .post_load = strongarm_rtc_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(rttr, StrongARMRTCState), + VMSTATE_UINT32(rtsr, StrongARMRTCState), + VMSTATE_UINT32(rtar, StrongARMRTCState), + VMSTATE_UINT32(last_rcnr, StrongARMRTCState), + VMSTATE_INT64(last_hz, StrongARMRTCState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void strongarm_rtc_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "StrongARM RTC Controller"; + dc->vmsd = &vmstate_strongarm_rtc_regs; + dc->realize = strongarm_rtc_realize; +} + +static const TypeInfo strongarm_rtc_sysbus_info = { + .name = TYPE_STRONGARM_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(StrongARMRTCState), + .instance_init = strongarm_rtc_init, + .class_init = strongarm_rtc_sysbus_class_init, +}; + +/* GPIO */ +#define GPLR 0x00 +#define GPDR 0x04 +#define GPSR 0x08 +#define GPCR 0x0c +#define GRER 0x10 +#define GFER 0x14 +#define GEDR 0x18 +#define GAFR 0x1c + +#define TYPE_STRONGARM_GPIO "strongarm-gpio" +OBJECT_DECLARE_SIMPLE_TYPE(StrongARMGPIOInfo, STRONGARM_GPIO) + +struct StrongARMGPIOInfo { + SysBusDevice busdev; + MemoryRegion iomem; + qemu_irq handler[28]; + qemu_irq irqs[11]; + qemu_irq irqX; + + uint32_t ilevel; + uint32_t olevel; + uint32_t dir; + uint32_t rising; + uint32_t falling; + uint32_t status; + uint32_t gafr; + + uint32_t prev_level; +}; + + +static void strongarm_gpio_irq_update(StrongARMGPIOInfo *s) +{ + int i; + for (i = 0; i < 11; i++) { + qemu_set_irq(s->irqs[i], s->status & (1 << i)); + } + + qemu_set_irq(s->irqX, (s->status & ~0x7ff)); +} + +static void strongarm_gpio_set(void *opaque, int line, int level) +{ + StrongARMGPIOInfo *s = opaque; + uint32_t mask; + + mask = 1 << line; + + if (level) { + s->status |= s->rising & mask & + ~s->ilevel & ~s->dir; + s->ilevel |= mask; + } else { + s->status |= s->falling & mask & + s->ilevel & ~s->dir; + s->ilevel &= ~mask; + } + + if (s->status & mask) { + strongarm_gpio_irq_update(s); + } +} + +static void strongarm_gpio_handler_update(StrongARMGPIOInfo *s) +{ + uint32_t level, diff; + int bit; + + level = s->olevel & s->dir; + + for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { + bit = ctz32(diff); + qemu_set_irq(s->handler[bit], (level >> bit) & 1); + } + + s->prev_level = level; +} + +static uint64_t strongarm_gpio_read(void *opaque, hwaddr offset, + unsigned size) +{ + StrongARMGPIOInfo *s = opaque; + + switch (offset) { + case GPDR: /* GPIO Pin-Direction registers */ + return s->dir; + + case GPSR: /* GPIO Pin-Output Set registers */ + qemu_log_mask(LOG_GUEST_ERROR, + "strongarm GPIO: read from write only register GPSR\n"); + return 0; + + case GPCR: /* GPIO Pin-Output Clear registers */ + qemu_log_mask(LOG_GUEST_ERROR, + "strongarm GPIO: read from write only register GPCR\n"); + return 0; + + case GRER: /* GPIO Rising-Edge Detect Enable registers */ + return s->rising; + + case GFER: /* GPIO Falling-Edge Detect Enable registers */ + return s->falling; + + case GAFR: /* GPIO Alternate Function registers */ + return s->gafr; + + case GPLR: /* GPIO Pin-Level registers */ + return (s->olevel & s->dir) | + (s->ilevel & ~s->dir); + + case GEDR: /* GPIO Edge Detect Status registers */ + return s->status; + + default: + printf("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset); + } + + return 0; +} + +static void strongarm_gpio_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + StrongARMGPIOInfo *s = opaque; + + switch (offset) { + case GPDR: /* GPIO Pin-Direction registers */ + s->dir = value & 0x0fffffff; + strongarm_gpio_handler_update(s); + break; + + case GPSR: /* GPIO Pin-Output Set registers */ + s->olevel |= value & 0x0fffffff; + strongarm_gpio_handler_update(s); + break; + + case GPCR: /* GPIO Pin-Output Clear registers */ + s->olevel &= ~value; + strongarm_gpio_handler_update(s); + break; + + case GRER: /* GPIO Rising-Edge Detect Enable registers */ + s->rising = value; + break; + + case GFER: /* GPIO Falling-Edge Detect Enable registers */ + s->falling = value; + break; + + case GAFR: /* GPIO Alternate Function registers */ + s->gafr = value; + break; + + case GEDR: /* GPIO Edge Detect Status registers */ + s->status &= ~value; + strongarm_gpio_irq_update(s); + break; + + default: + printf("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset); + } +} + +static const MemoryRegionOps strongarm_gpio_ops = { + .read = strongarm_gpio_read, + .write = strongarm_gpio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static DeviceState *strongarm_gpio_init(hwaddr base, + DeviceState *pic) +{ + DeviceState *dev; + int i; + + dev = qdev_new(TYPE_STRONGARM_GPIO); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + for (i = 0; i < 12; i++) + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, + qdev_get_gpio_in(pic, SA_PIC_GPIO0_EDGE + i)); + + return dev; +} + +static void strongarm_gpio_initfn(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + StrongARMGPIOInfo *s = STRONGARM_GPIO(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + int i; + + qdev_init_gpio_in(dev, strongarm_gpio_set, 28); + qdev_init_gpio_out(dev, s->handler, 28); + + memory_region_init_io(&s->iomem, obj, &strongarm_gpio_ops, s, + "gpio", 0x1000); + + sysbus_init_mmio(sbd, &s->iomem); + for (i = 0; i < 11; i++) { + sysbus_init_irq(sbd, &s->irqs[i]); + } + sysbus_init_irq(sbd, &s->irqX); +} + +static const VMStateDescription vmstate_strongarm_gpio_regs = { + .name = "strongarm-gpio", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ilevel, StrongARMGPIOInfo), + VMSTATE_UINT32(olevel, StrongARMGPIOInfo), + VMSTATE_UINT32(dir, StrongARMGPIOInfo), + VMSTATE_UINT32(rising, StrongARMGPIOInfo), + VMSTATE_UINT32(falling, StrongARMGPIOInfo), + VMSTATE_UINT32(status, StrongARMGPIOInfo), + VMSTATE_UINT32(gafr, StrongARMGPIOInfo), + VMSTATE_UINT32(prev_level, StrongARMGPIOInfo), + VMSTATE_END_OF_LIST(), + }, +}; + +static void strongarm_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "StrongARM GPIO controller"; + dc->vmsd = &vmstate_strongarm_gpio_regs; +} + +static const TypeInfo strongarm_gpio_info = { + .name = TYPE_STRONGARM_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(StrongARMGPIOInfo), + .instance_init = strongarm_gpio_initfn, + .class_init = strongarm_gpio_class_init, +}; + +/* Peripheral Pin Controller */ +#define PPDR 0x00 +#define PPSR 0x04 +#define PPAR 0x08 +#define PSDR 0x0c +#define PPFR 0x10 + +#define TYPE_STRONGARM_PPC "strongarm-ppc" +OBJECT_DECLARE_SIMPLE_TYPE(StrongARMPPCInfo, STRONGARM_PPC) + +struct StrongARMPPCInfo { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq handler[28]; + + uint32_t ilevel; + uint32_t olevel; + uint32_t dir; + uint32_t ppar; + uint32_t psdr; + uint32_t ppfr; + + uint32_t prev_level; +}; + +static void strongarm_ppc_set(void *opaque, int line, int level) +{ + StrongARMPPCInfo *s = opaque; + + if (level) { + s->ilevel |= 1 << line; + } else { + s->ilevel &= ~(1 << line); + } +} + +static void strongarm_ppc_handler_update(StrongARMPPCInfo *s) +{ + uint32_t level, diff; + int bit; + + level = s->olevel & s->dir; + + for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { + bit = ctz32(diff); + qemu_set_irq(s->handler[bit], (level >> bit) & 1); + } + + s->prev_level = level; +} + +static uint64_t strongarm_ppc_read(void *opaque, hwaddr offset, + unsigned size) +{ + StrongARMPPCInfo *s = opaque; + + switch (offset) { + case PPDR: /* PPC Pin Direction registers */ + return s->dir | ~0x3fffff; + + case PPSR: /* PPC Pin State registers */ + return (s->olevel & s->dir) | + (s->ilevel & ~s->dir) | + ~0x3fffff; + + case PPAR: + return s->ppar | ~0x41000; + + case PSDR: + return s->psdr; + + case PPFR: + return s->ppfr | ~0x7f001; + + default: + printf("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset); + } + + return 0; +} + +static void strongarm_ppc_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + StrongARMPPCInfo *s = opaque; + + switch (offset) { + case PPDR: /* PPC Pin Direction registers */ + s->dir = value & 0x3fffff; + strongarm_ppc_handler_update(s); + break; + + case PPSR: /* PPC Pin State registers */ + s->olevel = value & s->dir & 0x3fffff; + strongarm_ppc_handler_update(s); + break; + + case PPAR: + s->ppar = value & 0x41000; + break; + + case PSDR: + s->psdr = value & 0x3fffff; + break; + + case PPFR: + s->ppfr = value & 0x7f001; + break; + + default: + printf("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset); + } +} + +static const MemoryRegionOps strongarm_ppc_ops = { + .read = strongarm_ppc_read, + .write = strongarm_ppc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void strongarm_ppc_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + StrongARMPPCInfo *s = STRONGARM_PPC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + qdev_init_gpio_in(dev, strongarm_ppc_set, 22); + qdev_init_gpio_out(dev, s->handler, 22); + + memory_region_init_io(&s->iomem, obj, &strongarm_ppc_ops, s, + "ppc", 0x1000); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription vmstate_strongarm_ppc_regs = { + .name = "strongarm-ppc", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ilevel, StrongARMPPCInfo), + VMSTATE_UINT32(olevel, StrongARMPPCInfo), + VMSTATE_UINT32(dir, StrongARMPPCInfo), + VMSTATE_UINT32(ppar, StrongARMPPCInfo), + VMSTATE_UINT32(psdr, StrongARMPPCInfo), + VMSTATE_UINT32(ppfr, StrongARMPPCInfo), + VMSTATE_UINT32(prev_level, StrongARMPPCInfo), + VMSTATE_END_OF_LIST(), + }, +}; + +static void strongarm_ppc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "StrongARM PPC controller"; + dc->vmsd = &vmstate_strongarm_ppc_regs; +} + +static const TypeInfo strongarm_ppc_info = { + .name = TYPE_STRONGARM_PPC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(StrongARMPPCInfo), + .instance_init = strongarm_ppc_init, + .class_init = strongarm_ppc_class_init, +}; + +/* UART Ports */ +#define UTCR0 0x00 +#define UTCR1 0x04 +#define UTCR2 0x08 +#define UTCR3 0x0c +#define UTDR 0x14 +#define UTSR0 0x1c +#define UTSR1 0x20 + +#define UTCR0_PE (1 << 0) /* Parity enable */ +#define UTCR0_OES (1 << 1) /* Even parity */ +#define UTCR0_SBS (1 << 2) /* 2 stop bits */ +#define UTCR0_DSS (1 << 3) /* 8-bit data */ + +#define UTCR3_RXE (1 << 0) /* Rx enable */ +#define UTCR3_TXE (1 << 1) /* Tx enable */ +#define UTCR3_BRK (1 << 2) /* Force Break */ +#define UTCR3_RIE (1 << 3) /* Rx int enable */ +#define UTCR3_TIE (1 << 4) /* Tx int enable */ +#define UTCR3_LBM (1 << 5) /* Loopback */ + +#define UTSR0_TFS (1 << 0) /* Tx FIFO nearly empty */ +#define UTSR0_RFS (1 << 1) /* Rx FIFO nearly full */ +#define UTSR0_RID (1 << 2) /* Receiver Idle */ +#define UTSR0_RBB (1 << 3) /* Receiver begin break */ +#define UTSR0_REB (1 << 4) /* Receiver end break */ +#define UTSR0_EIF (1 << 5) /* Error in FIFO */ + +#define UTSR1_RNE (1 << 1) /* Receive FIFO not empty */ +#define UTSR1_TNF (1 << 2) /* Transmit FIFO not full */ +#define UTSR1_PRE (1 << 3) /* Parity error */ +#define UTSR1_FRE (1 << 4) /* Frame error */ +#define UTSR1_ROR (1 << 5) /* Receive Over Run */ + +#define RX_FIFO_PRE (1 << 8) +#define RX_FIFO_FRE (1 << 9) +#define RX_FIFO_ROR (1 << 10) + +#define TYPE_STRONGARM_UART "strongarm-uart" +OBJECT_DECLARE_SIMPLE_TYPE(StrongARMUARTState, STRONGARM_UART) + +struct StrongARMUARTState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + CharBackend chr; + qemu_irq irq; + + uint8_t utcr0; + uint16_t brd; + uint8_t utcr3; + uint8_t utsr0; + uint8_t utsr1; + + uint8_t tx_fifo[8]; + uint8_t tx_start; + uint8_t tx_len; + uint16_t rx_fifo[12]; /* value + error flags in high bits */ + uint8_t rx_start; + uint8_t rx_len; + + uint64_t char_transmit_time; /* time to transmit a char in nanoseconds */ + bool wait_break_end; + QEMUTimer *rx_timeout_timer; + QEMUTimer *tx_timer; +}; + +static void strongarm_uart_update_status(StrongARMUARTState *s) +{ + uint16_t utsr1 = 0; + + if (s->tx_len != 8) { + utsr1 |= UTSR1_TNF; + } + + if (s->rx_len != 0) { + uint16_t ent = s->rx_fifo[s->rx_start]; + + utsr1 |= UTSR1_RNE; + if (ent & RX_FIFO_PRE) { + s->utsr1 |= UTSR1_PRE; + } + if (ent & RX_FIFO_FRE) { + s->utsr1 |= UTSR1_FRE; + } + if (ent & RX_FIFO_ROR) { + s->utsr1 |= UTSR1_ROR; + } + } + + s->utsr1 = utsr1; +} + +static void strongarm_uart_update_int_status(StrongARMUARTState *s) +{ + uint16_t utsr0 = s->utsr0 & + (UTSR0_REB | UTSR0_RBB | UTSR0_RID); + int i; + + if ((s->utcr3 & UTCR3_TXE) && + (s->utcr3 & UTCR3_TIE) && + s->tx_len <= 4) { + utsr0 |= UTSR0_TFS; + } + + if ((s->utcr3 & UTCR3_RXE) && + (s->utcr3 & UTCR3_RIE) && + s->rx_len > 4) { + utsr0 |= UTSR0_RFS; + } + + for (i = 0; i < s->rx_len && i < 4; i++) + if (s->rx_fifo[(s->rx_start + i) % 12] & ~0xff) { + utsr0 |= UTSR0_EIF; + break; + } + + s->utsr0 = utsr0; + qemu_set_irq(s->irq, utsr0); +} + +static void strongarm_uart_update_parameters(StrongARMUARTState *s) +{ + int speed, parity, data_bits, stop_bits, frame_size; + QEMUSerialSetParams ssp; + + /* Start bit. */ + frame_size = 1; + if (s->utcr0 & UTCR0_PE) { + /* Parity bit. */ + frame_size++; + if (s->utcr0 & UTCR0_OES) { + parity = 'E'; + } else { + parity = 'O'; + } + } else { + parity = 'N'; + } + if (s->utcr0 & UTCR0_SBS) { + stop_bits = 2; + } else { + stop_bits = 1; + } + + data_bits = (s->utcr0 & UTCR0_DSS) ? 8 : 7; + frame_size += data_bits + stop_bits; + speed = 3686400 / 16 / (s->brd + 1); + ssp.speed = speed; + ssp.parity = parity; + ssp.data_bits = data_bits; + ssp.stop_bits = stop_bits; + s->char_transmit_time = (NANOSECONDS_PER_SECOND / speed) * frame_size; + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp); + + DPRINTF(stderr, "%s speed=%d parity=%c data=%d stop=%d\n", s->chr->label, + speed, parity, data_bits, stop_bits); +} + +static void strongarm_uart_rx_to(void *opaque) +{ + StrongARMUARTState *s = opaque; + + if (s->rx_len) { + s->utsr0 |= UTSR0_RID; + strongarm_uart_update_int_status(s); + } +} + +static void strongarm_uart_rx_push(StrongARMUARTState *s, uint16_t c) +{ + if ((s->utcr3 & UTCR3_RXE) == 0) { + /* rx disabled */ + return; + } + + if (s->wait_break_end) { + s->utsr0 |= UTSR0_REB; + s->wait_break_end = false; + } + + if (s->rx_len < 12) { + s->rx_fifo[(s->rx_start + s->rx_len) % 12] = c; + s->rx_len++; + } else + s->rx_fifo[(s->rx_start + 11) % 12] |= RX_FIFO_ROR; +} + +static int strongarm_uart_can_receive(void *opaque) +{ + StrongARMUARTState *s = opaque; + + if (s->rx_len == 12) { + return 0; + } + /* It's best not to get more than 2/3 of RX FIFO, so advertise that much */ + if (s->rx_len < 8) { + return 8 - s->rx_len; + } + return 1; +} + +static void strongarm_uart_receive(void *opaque, const uint8_t *buf, int size) +{ + StrongARMUARTState *s = opaque; + int i; + + for (i = 0; i < size; i++) { + strongarm_uart_rx_push(s, buf[i]); + } + + /* call the timeout receive callback in 3 char transmit time */ + timer_mod(s->rx_timeout_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time * 3); + + strongarm_uart_update_status(s); + strongarm_uart_update_int_status(s); +} + +static void strongarm_uart_event(void *opaque, QEMUChrEvent event) +{ + StrongARMUARTState *s = opaque; + if (event == CHR_EVENT_BREAK) { + s->utsr0 |= UTSR0_RBB; + strongarm_uart_rx_push(s, RX_FIFO_FRE); + s->wait_break_end = true; + strongarm_uart_update_status(s); + strongarm_uart_update_int_status(s); + } +} + +static void strongarm_uart_tx(void *opaque) +{ + StrongARMUARTState *s = opaque; + uint64_t new_xmit_ts = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + if (s->utcr3 & UTCR3_LBM) /* loopback */ { + strongarm_uart_receive(s, &s->tx_fifo[s->tx_start], 1); + } else if (qemu_chr_fe_backend_connected(&s->chr)) { + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &s->tx_fifo[s->tx_start], 1); + } + + s->tx_start = (s->tx_start + 1) % 8; + s->tx_len--; + if (s->tx_len) { + timer_mod(s->tx_timer, new_xmit_ts + s->char_transmit_time); + } + strongarm_uart_update_status(s); + strongarm_uart_update_int_status(s); +} + +static uint64_t strongarm_uart_read(void *opaque, hwaddr addr, + unsigned size) +{ + StrongARMUARTState *s = opaque; + uint16_t ret; + + switch (addr) { + case UTCR0: + return s->utcr0; + + case UTCR1: + return s->brd >> 8; + + case UTCR2: + return s->brd & 0xff; + + case UTCR3: + return s->utcr3; + + case UTDR: + if (s->rx_len != 0) { + ret = s->rx_fifo[s->rx_start]; + s->rx_start = (s->rx_start + 1) % 12; + s->rx_len--; + strongarm_uart_update_status(s); + strongarm_uart_update_int_status(s); + return ret; + } + return 0; + + case UTSR0: + return s->utsr0; + + case UTSR1: + return s->utsr1; + + default: + printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr); + return 0; + } +} + +static void strongarm_uart_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + StrongARMUARTState *s = opaque; + + switch (addr) { + case UTCR0: + s->utcr0 = value & 0x7f; + strongarm_uart_update_parameters(s); + break; + + case UTCR1: + s->brd = (s->brd & 0xff) | ((value & 0xf) << 8); + strongarm_uart_update_parameters(s); + break; + + case UTCR2: + s->brd = (s->brd & 0xf00) | (value & 0xff); + strongarm_uart_update_parameters(s); + break; + + case UTCR3: + s->utcr3 = value & 0x3f; + if ((s->utcr3 & UTCR3_RXE) == 0) { + s->rx_len = 0; + } + if ((s->utcr3 & UTCR3_TXE) == 0) { + s->tx_len = 0; + } + strongarm_uart_update_status(s); + strongarm_uart_update_int_status(s); + break; + + case UTDR: + if ((s->utcr3 & UTCR3_TXE) && s->tx_len != 8) { + s->tx_fifo[(s->tx_start + s->tx_len) % 8] = value; + s->tx_len++; + strongarm_uart_update_status(s); + strongarm_uart_update_int_status(s); + if (s->tx_len == 1) { + strongarm_uart_tx(s); + } + } + break; + + case UTSR0: + s->utsr0 = s->utsr0 & ~(value & + (UTSR0_REB | UTSR0_RBB | UTSR0_RID)); + strongarm_uart_update_int_status(s); + break; + + default: + printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr); + } +} + +static const MemoryRegionOps strongarm_uart_ops = { + .read = strongarm_uart_read, + .write = strongarm_uart_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void strongarm_uart_init(Object *obj) +{ + StrongARMUARTState *s = STRONGARM_UART(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &strongarm_uart_ops, s, + "uart", 0x10000); + sysbus_init_mmio(dev, &s->iomem); + sysbus_init_irq(dev, &s->irq); +} + +static void strongarm_uart_realize(DeviceState *dev, Error **errp) +{ + StrongARMUARTState *s = STRONGARM_UART(dev); + + s->rx_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + strongarm_uart_rx_to, + s); + s->tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, strongarm_uart_tx, s); + qemu_chr_fe_set_handlers(&s->chr, + strongarm_uart_can_receive, + strongarm_uart_receive, + strongarm_uart_event, + NULL, s, NULL, true); +} + +static void strongarm_uart_reset(DeviceState *dev) +{ + StrongARMUARTState *s = STRONGARM_UART(dev); + + s->utcr0 = UTCR0_DSS; /* 8 data, no parity */ + s->brd = 23; /* 9600 */ + /* enable send & recv - this actually violates spec */ + s->utcr3 = UTCR3_TXE | UTCR3_RXE; + + s->rx_len = s->tx_len = 0; + + strongarm_uart_update_parameters(s); + strongarm_uart_update_status(s); + strongarm_uart_update_int_status(s); +} + +static int strongarm_uart_post_load(void *opaque, int version_id) +{ + StrongARMUARTState *s = opaque; + + strongarm_uart_update_parameters(s); + strongarm_uart_update_status(s); + strongarm_uart_update_int_status(s); + + /* tx and restart timer */ + if (s->tx_len) { + strongarm_uart_tx(s); + } + + /* restart rx timeout timer */ + if (s->rx_len) { + timer_mod(s->rx_timeout_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time * 3); + } + + return 0; +} + +static const VMStateDescription vmstate_strongarm_uart_regs = { + .name = "strongarm-uart", + .version_id = 0, + .minimum_version_id = 0, + .post_load = strongarm_uart_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(utcr0, StrongARMUARTState), + VMSTATE_UINT16(brd, StrongARMUARTState), + VMSTATE_UINT8(utcr3, StrongARMUARTState), + VMSTATE_UINT8(utsr0, StrongARMUARTState), + VMSTATE_UINT8_ARRAY(tx_fifo, StrongARMUARTState, 8), + VMSTATE_UINT8(tx_start, StrongARMUARTState), + VMSTATE_UINT8(tx_len, StrongARMUARTState), + VMSTATE_UINT16_ARRAY(rx_fifo, StrongARMUARTState, 12), + VMSTATE_UINT8(rx_start, StrongARMUARTState), + VMSTATE_UINT8(rx_len, StrongARMUARTState), + VMSTATE_BOOL(wait_break_end, StrongARMUARTState), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property strongarm_uart_properties[] = { + DEFINE_PROP_CHR("chardev", StrongARMUARTState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void strongarm_uart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "StrongARM UART controller"; + dc->reset = strongarm_uart_reset; + dc->vmsd = &vmstate_strongarm_uart_regs; + device_class_set_props(dc, strongarm_uart_properties); + dc->realize = strongarm_uart_realize; +} + +static const TypeInfo strongarm_uart_info = { + .name = TYPE_STRONGARM_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(StrongARMUARTState), + .instance_init = strongarm_uart_init, + .class_init = strongarm_uart_class_init, +}; + +/* Synchronous Serial Ports */ + +#define TYPE_STRONGARM_SSP "strongarm-ssp" +OBJECT_DECLARE_SIMPLE_TYPE(StrongARMSSPState, STRONGARM_SSP) + +struct StrongARMSSPState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq; + SSIBus *bus; + + uint16_t sscr[2]; + uint16_t sssr; + + uint16_t rx_fifo[8]; + uint8_t rx_level; + uint8_t rx_start; +}; + +#define SSCR0 0x60 /* SSP Control register 0 */ +#define SSCR1 0x64 /* SSP Control register 1 */ +#define SSDR 0x6c /* SSP Data register */ +#define SSSR 0x74 /* SSP Status register */ + +/* Bitfields for above registers */ +#define SSCR0_SPI(x) (((x) & 0x30) == 0x00) +#define SSCR0_SSP(x) (((x) & 0x30) == 0x10) +#define SSCR0_UWIRE(x) (((x) & 0x30) == 0x20) +#define SSCR0_PSP(x) (((x) & 0x30) == 0x30) +#define SSCR0_SSE (1 << 7) +#define SSCR0_DSS(x) (((x) & 0xf) + 1) +#define SSCR1_RIE (1 << 0) +#define SSCR1_TIE (1 << 1) +#define SSCR1_LBM (1 << 2) +#define SSSR_TNF (1 << 2) +#define SSSR_RNE (1 << 3) +#define SSSR_TFS (1 << 5) +#define SSSR_RFS (1 << 6) +#define SSSR_ROR (1 << 7) +#define SSSR_RW 0x0080 + +static void strongarm_ssp_int_update(StrongARMSSPState *s) +{ + int level = 0; + + level |= (s->sssr & SSSR_ROR); + level |= (s->sssr & SSSR_RFS) && (s->sscr[1] & SSCR1_RIE); + level |= (s->sssr & SSSR_TFS) && (s->sscr[1] & SSCR1_TIE); + qemu_set_irq(s->irq, level); +} + +static void strongarm_ssp_fifo_update(StrongARMSSPState *s) +{ + s->sssr &= ~SSSR_TFS; + s->sssr &= ~SSSR_TNF; + if (s->sscr[0] & SSCR0_SSE) { + if (s->rx_level >= 4) { + s->sssr |= SSSR_RFS; + } else { + s->sssr &= ~SSSR_RFS; + } + if (s->rx_level) { + s->sssr |= SSSR_RNE; + } else { + s->sssr &= ~SSSR_RNE; + } + /* TX FIFO is never filled, so it is always in underrun + condition if SSP is enabled */ + s->sssr |= SSSR_TFS; + s->sssr |= SSSR_TNF; + } + + strongarm_ssp_int_update(s); +} + +static uint64_t strongarm_ssp_read(void *opaque, hwaddr addr, + unsigned size) +{ + StrongARMSSPState *s = opaque; + uint32_t retval; + + switch (addr) { + case SSCR0: + return s->sscr[0]; + case SSCR1: + return s->sscr[1]; + case SSSR: + return s->sssr; + case SSDR: + if (~s->sscr[0] & SSCR0_SSE) { + return 0xffffffff; + } + if (s->rx_level < 1) { + printf("%s: SSP Rx Underrun\n", __func__); + return 0xffffffff; + } + s->rx_level--; + retval = s->rx_fifo[s->rx_start++]; + s->rx_start &= 0x7; + strongarm_ssp_fifo_update(s); + return retval; + default: + printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr); + break; + } + return 0; +} + +static void strongarm_ssp_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + StrongARMSSPState *s = opaque; + + switch (addr) { + case SSCR0: + s->sscr[0] = value & 0xffbf; + if ((s->sscr[0] & SSCR0_SSE) && SSCR0_DSS(value) < 4) { + printf("%s: Wrong data size: %i bits\n", __func__, + (int)SSCR0_DSS(value)); + } + if (!(value & SSCR0_SSE)) { + s->sssr = 0; + s->rx_level = 0; + } + strongarm_ssp_fifo_update(s); + break; + + case SSCR1: + s->sscr[1] = value & 0x2f; + if (value & SSCR1_LBM) { + printf("%s: Attempt to use SSP LBM mode\n", __func__); + } + strongarm_ssp_fifo_update(s); + break; + + case SSSR: + s->sssr &= ~(value & SSSR_RW); + strongarm_ssp_int_update(s); + break; + + case SSDR: + if (SSCR0_UWIRE(s->sscr[0])) { + value &= 0xff; + } else + /* Note how 32bits overflow does no harm here */ + value &= (1 << SSCR0_DSS(s->sscr[0])) - 1; + + /* Data goes from here to the Tx FIFO and is shifted out from + * there directly to the slave, no need to buffer it. + */ + if (s->sscr[0] & SSCR0_SSE) { + uint32_t readval; + if (s->sscr[1] & SSCR1_LBM) { + readval = value; + } else { + readval = ssi_transfer(s->bus, value); + } + + if (s->rx_level < 0x08) { + s->rx_fifo[(s->rx_start + s->rx_level++) & 0x7] = readval; + } else { + s->sssr |= SSSR_ROR; + } + } + strongarm_ssp_fifo_update(s); + break; + + default: + printf("%s: Bad register 0x" TARGET_FMT_plx "\n", __func__, addr); + break; + } +} + +static const MemoryRegionOps strongarm_ssp_ops = { + .read = strongarm_ssp_read, + .write = strongarm_ssp_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int strongarm_ssp_post_load(void *opaque, int version_id) +{ + StrongARMSSPState *s = opaque; + + strongarm_ssp_fifo_update(s); + + return 0; +} + +static void strongarm_ssp_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(sbd); + StrongARMSSPState *s = STRONGARM_SSP(dev); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->iomem, obj, &strongarm_ssp_ops, s, + "ssp", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + + s->bus = ssi_create_bus(dev, "ssi"); +} + +static void strongarm_ssp_reset(DeviceState *dev) +{ + StrongARMSSPState *s = STRONGARM_SSP(dev); + + s->sssr = 0x03; /* 3 bit data, SPI, disabled */ + s->rx_start = 0; + s->rx_level = 0; +} + +static const VMStateDescription vmstate_strongarm_ssp_regs = { + .name = "strongarm-ssp", + .version_id = 0, + .minimum_version_id = 0, + .post_load = strongarm_ssp_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT16_ARRAY(sscr, StrongARMSSPState, 2), + VMSTATE_UINT16(sssr, StrongARMSSPState), + VMSTATE_UINT16_ARRAY(rx_fifo, StrongARMSSPState, 8), + VMSTATE_UINT8(rx_start, StrongARMSSPState), + VMSTATE_UINT8(rx_level, StrongARMSSPState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void strongarm_ssp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "StrongARM SSP controller"; + dc->reset = strongarm_ssp_reset; + dc->vmsd = &vmstate_strongarm_ssp_regs; +} + +static const TypeInfo strongarm_ssp_info = { + .name = TYPE_STRONGARM_SSP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(StrongARMSSPState), + .instance_init = strongarm_ssp_init, + .class_init = strongarm_ssp_class_init, +}; + +/* Main CPU functions */ +StrongARMState *sa1110_init(const char *cpu_type) +{ + StrongARMState *s; + int i; + + s = g_new0(StrongARMState, 1); + + if (strncmp(cpu_type, "sa1110", 6)) { + error_report("Machine requires a SA1110 processor."); + exit(1); + } + + s->cpu = ARM_CPU(cpu_create(cpu_type)); + + s->pic = sysbus_create_varargs("strongarm_pic", 0x90050000, + qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ), + qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ), + NULL); + + sysbus_create_varargs("pxa25x-timer", 0x90000000, + qdev_get_gpio_in(s->pic, SA_PIC_OSTC0), + qdev_get_gpio_in(s->pic, SA_PIC_OSTC1), + qdev_get_gpio_in(s->pic, SA_PIC_OSTC2), + qdev_get_gpio_in(s->pic, SA_PIC_OSTC3), + NULL); + + sysbus_create_simple(TYPE_STRONGARM_RTC, 0x90010000, + qdev_get_gpio_in(s->pic, SA_PIC_RTC_ALARM)); + + s->gpio = strongarm_gpio_init(0x90040000, s->pic); + + s->ppc = sysbus_create_varargs(TYPE_STRONGARM_PPC, 0x90060000, NULL); + + for (i = 0; sa_serial[i].io_base; i++) { + DeviceState *dev = qdev_new(TYPE_STRONGARM_UART); + qdev_prop_set_chr(dev, "chardev", serial_hd(i)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, + sa_serial[i].io_base); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(s->pic, sa_serial[i].irq)); + } + + s->ssp = sysbus_create_varargs(TYPE_STRONGARM_SSP, 0x80070000, + qdev_get_gpio_in(s->pic, SA_PIC_SSP), NULL); + s->ssp_bus = (SSIBus *)qdev_get_child_bus(s->ssp, "ssi"); + + return s; +} + +static void strongarm_register_types(void) +{ + type_register_static(&strongarm_pic_info); + type_register_static(&strongarm_rtc_sysbus_info); + type_register_static(&strongarm_gpio_info); + type_register_static(&strongarm_ppc_info); + type_register_static(&strongarm_uart_info); + type_register_static(&strongarm_ssp_info); +} + +type_init(strongarm_register_types) diff --git a/hw/arm/strongarm.h b/hw/arm/strongarm.h new file mode 100644 index 000000000..192821f6a --- /dev/null +++ b/hw/arm/strongarm.h @@ -0,0 +1,67 @@ +#ifndef STRONGARM_H +#define STRONGARM_H + +#include "exec/memory.h" +#include "target/arm/cpu-qom.h" + +#define SA_CS0 0x00000000 +#define SA_CS1 0x08000000 +#define SA_CS2 0x10000000 +#define SA_CS3 0x18000000 +#define SA_PCMCIA_CS0 0x20000000 +#define SA_PCMCIA_CS1 0x30000000 +#define SA_CS4 0x40000000 +#define SA_CS5 0x48000000 +/* system registers here */ +#define SA_SDCS0 0xc0000000 +#define SA_SDCS1 0xc8000000 +#define SA_SDCS2 0xd0000000 +#define SA_SDCS3 0xd8000000 + +enum { + SA_PIC_GPIO0_EDGE = 0, + SA_PIC_GPIO1_EDGE, + SA_PIC_GPIO2_EDGE, + SA_PIC_GPIO3_EDGE, + SA_PIC_GPIO4_EDGE, + SA_PIC_GPIO5_EDGE, + SA_PIC_GPIO6_EDGE, + SA_PIC_GPIO7_EDGE, + SA_PIC_GPIO8_EDGE, + SA_PIC_GPIO9_EDGE, + SA_PIC_GPIO10_EDGE, + SA_PIC_GPIOX_EDGE, + SA_PIC_LCD, + SA_PIC_UDC, + SA_PIC_RSVD1, + SA_PIC_UART1, + SA_PIC_UART2, + SA_PIC_UART3, + SA_PIC_MCP, + SA_PIC_SSP, + SA_PIC_DMA_CH0, + SA_PIC_DMA_CH1, + SA_PIC_DMA_CH2, + SA_PIC_DMA_CH3, + SA_PIC_DMA_CH4, + SA_PIC_DMA_CH5, + SA_PIC_OSTC0, + SA_PIC_OSTC1, + SA_PIC_OSTC2, + SA_PIC_OSTC3, + SA_PIC_RTC_HZ, + SA_PIC_RTC_ALARM, +}; + +typedef struct { + ARMCPU *cpu; + DeviceState *pic; + DeviceState *gpio; + DeviceState *ppc; + DeviceState *ssp; + SSIBus *ssp_bus; +} StrongARMState; + +StrongARMState *sa1110_init(const char *cpu_type); + +#endif diff --git a/hw/arm/sysbus-fdt.c b/hw/arm/sysbus-fdt.c new file mode 100644 index 000000000..48c5fe9bf --- /dev/null +++ b/hw/arm/sysbus-fdt.c @@ -0,0 +1,571 @@ +/* + * ARM Platform Bus device tree generation helpers + * + * Copyright (c) 2014 Linaro Limited + * + * Authors: + * Alex Graf + * Eric Auger + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include +#ifdef CONFIG_LINUX +#include +#endif +#include "hw/arm/sysbus-fdt.h" +#include "qemu/error-report.h" +#include "sysemu/device_tree.h" +#include "sysemu/tpm.h" +#include "hw/platform-bus.h" +#include "hw/vfio/vfio-platform.h" +#include "hw/vfio/vfio-calxeda-xgmac.h" +#include "hw/vfio/vfio-amd-xgbe.h" +#include "hw/display/ramfb.h" +#include "hw/arm/fdt.h" + +/* + * internal struct that contains the information to create dynamic + * sysbus device node + */ +typedef struct PlatformBusFDTData { + void *fdt; /* device tree handle */ + int irq_start; /* index of the first IRQ usable by platform bus devices */ + const char *pbus_node_name; /* name of the platform bus node */ + PlatformBusDevice *pbus; +} PlatformBusFDTData; + +/* struct that allows to match a device and create its FDT node */ +typedef struct BindingEntry { + const char *typename; + const char *compat; + int (*add_fn)(SysBusDevice *sbdev, void *opaque); + bool (*match_fn)(SysBusDevice *sbdev, const struct BindingEntry *combo); +} BindingEntry; + +/* helpers */ + +typedef struct HostProperty { + const char *name; + bool optional; +} HostProperty; + +#ifdef CONFIG_LINUX + +/** + * copy_properties_from_host + * + * copies properties listed in an array from host device tree to + * guest device tree. If a non optional property is not found, the + * function asserts. An optional property is ignored if not found + * in the host device tree. + * @props: array of HostProperty to copy + * @nb_props: number of properties in the array + * @host_dt: host device tree blob + * @guest_dt: guest device tree blob + * @node_path: host dt node path where the property is supposed to be + found + * @nodename: guest node name the properties should be added to + */ +static void copy_properties_from_host(HostProperty *props, int nb_props, + void *host_fdt, void *guest_fdt, + char *node_path, char *nodename) +{ + int i, prop_len; + const void *r; + Error *err = NULL; + + for (i = 0; i < nb_props; i++) { + r = qemu_fdt_getprop(host_fdt, node_path, + props[i].name, + &prop_len, + &err); + if (r) { + qemu_fdt_setprop(guest_fdt, nodename, + props[i].name, r, prop_len); + } else { + if (props[i].optional && prop_len == -FDT_ERR_NOTFOUND) { + /* optional property does not exist */ + error_free(err); + } else { + error_report_err(err); + } + if (!props[i].optional) { + /* mandatory property not found: bail out */ + exit(1); + } + err = NULL; + } + } +} + +/* clock properties whose values are copied/pasted from host */ +static HostProperty clock_copied_properties[] = { + {"compatible", false}, + {"#clock-cells", false}, + {"clock-frequency", true}, + {"clock-output-names", true}, +}; + +/** + * fdt_build_clock_node + * + * Build a guest clock node, used as a dependency from a passthrough'ed + * device. Most information are retrieved from the host clock node. + * Also check the host clock is a fixed one. + * + * @host_fdt: host device tree blob from which info are retrieved + * @guest_fdt: guest device tree blob where the clock node is added + * @host_phandle: phandle of the clock in host device tree + * @guest_phandle: phandle to assign to the guest node + */ +static void fdt_build_clock_node(void *host_fdt, void *guest_fdt, + uint32_t host_phandle, + uint32_t guest_phandle) +{ + char *node_path = NULL; + char *nodename; + const void *r; + int ret, node_offset, prop_len, path_len = 16; + + node_offset = fdt_node_offset_by_phandle(host_fdt, host_phandle); + if (node_offset <= 0) { + error_report("not able to locate clock handle %d in host device tree", + host_phandle); + exit(1); + } + node_path = g_malloc(path_len); + while ((ret = fdt_get_path(host_fdt, node_offset, node_path, path_len)) + == -FDT_ERR_NOSPACE) { + path_len += 16; + node_path = g_realloc(node_path, path_len); + } + if (ret < 0) { + error_report("not able to retrieve node path for clock handle %d", + host_phandle); + exit(1); + } + + r = qemu_fdt_getprop(host_fdt, node_path, "compatible", &prop_len, + &error_fatal); + if (strcmp(r, "fixed-clock")) { + error_report("clock handle %d is not a fixed clock", host_phandle); + exit(1); + } + + nodename = strrchr(node_path, '/'); + qemu_fdt_add_subnode(guest_fdt, nodename); + + copy_properties_from_host(clock_copied_properties, + ARRAY_SIZE(clock_copied_properties), + host_fdt, guest_fdt, + node_path, nodename); + + qemu_fdt_setprop_cell(guest_fdt, nodename, "phandle", guest_phandle); + + g_free(node_path); +} + +/** + * sysfs_to_dt_name: convert the name found in sysfs into the node name + * for instance e0900000.xgmac is converted into xgmac@e0900000 + * @sysfs_name: directory name in sysfs + * + * returns the device tree name upon success or NULL in case the sysfs name + * does not match the expected format + */ +static char *sysfs_to_dt_name(const char *sysfs_name) +{ + gchar **substrings = g_strsplit(sysfs_name, ".", 2); + char *dt_name = NULL; + + if (!substrings || !substrings[0] || !substrings[1]) { + goto out; + } + dt_name = g_strdup_printf("%s@%s", substrings[1], substrings[0]); +out: + g_strfreev(substrings); + return dt_name; +} + +/* Device Specific Code */ + +/** + * add_calxeda_midway_xgmac_fdt_node + * + * Generates a simple node with following properties: + * compatible string, regs, interrupts, dma-coherent + */ +static int add_calxeda_midway_xgmac_fdt_node(SysBusDevice *sbdev, void *opaque) +{ + PlatformBusFDTData *data = opaque; + PlatformBusDevice *pbus = data->pbus; + void *fdt = data->fdt; + const char *parent_node = data->pbus_node_name; + int compat_str_len, i; + char *nodename; + uint32_t *irq_attr, *reg_attr; + uint64_t mmio_base, irq_number; + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); + VFIODevice *vbasedev = &vdev->vbasedev; + + mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + nodename = g_strdup_printf("%s/%s@%" PRIx64, parent_node, + vbasedev->name, mmio_base); + qemu_fdt_add_subnode(fdt, nodename); + + compat_str_len = strlen(vdev->compat) + 1; + qemu_fdt_setprop(fdt, nodename, "compatible", + vdev->compat, compat_str_len); + + qemu_fdt_setprop(fdt, nodename, "dma-coherent", "", 0); + + reg_attr = g_new(uint32_t, vbasedev->num_regions * 2); + for (i = 0; i < vbasedev->num_regions; i++) { + mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, i); + reg_attr[2 * i] = cpu_to_be32(mmio_base); + reg_attr[2 * i + 1] = cpu_to_be32( + memory_region_size(vdev->regions[i]->mem)); + } + qemu_fdt_setprop(fdt, nodename, "reg", reg_attr, + vbasedev->num_regions * 2 * sizeof(uint32_t)); + + irq_attr = g_new(uint32_t, vbasedev->num_irqs * 3); + for (i = 0; i < vbasedev->num_irqs; i++) { + irq_number = platform_bus_get_irqn(pbus, sbdev , i) + + data->irq_start; + irq_attr[3 * i] = cpu_to_be32(GIC_FDT_IRQ_TYPE_SPI); + irq_attr[3 * i + 1] = cpu_to_be32(irq_number); + irq_attr[3 * i + 2] = cpu_to_be32(GIC_FDT_IRQ_FLAGS_LEVEL_HI); + } + qemu_fdt_setprop(fdt, nodename, "interrupts", + irq_attr, vbasedev->num_irqs * 3 * sizeof(uint32_t)); + g_free(irq_attr); + g_free(reg_attr); + g_free(nodename); + return 0; +} + +/* AMD xgbe properties whose values are copied/pasted from host */ +static HostProperty amd_xgbe_copied_properties[] = { + {"compatible", false}, + {"dma-coherent", true}, + {"amd,per-channel-interrupt", true}, + {"phy-mode", false}, + {"mac-address", true}, + {"amd,speed-set", false}, + {"amd,serdes-blwc", true}, + {"amd,serdes-cdr-rate", true}, + {"amd,serdes-pq-skew", true}, + {"amd,serdes-tx-amp", true}, + {"amd,serdes-dfe-tap-config", true}, + {"amd,serdes-dfe-tap-enable", true}, + {"clock-names", false}, +}; + +/** + * add_amd_xgbe_fdt_node + * + * Generates the combined xgbe/phy node following kernel >=4.2 + * binding documentation: + * Documentation/devicetree/bindings/net/amd-xgbe.txt: + * Also 2 clock nodes are created (dma and ptp) + * + * Asserts in case of error + */ +static int add_amd_xgbe_fdt_node(SysBusDevice *sbdev, void *opaque) +{ + PlatformBusFDTData *data = opaque; + PlatformBusDevice *pbus = data->pbus; + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); + VFIODevice *vbasedev = &vdev->vbasedev; + VFIOINTp *intp; + const char *parent_node = data->pbus_node_name; + char **node_path, *nodename, *dt_name; + void *guest_fdt = data->fdt, *host_fdt; + const void *r; + int i, prop_len; + uint32_t *irq_attr, *reg_attr, *host_clock_phandles; + uint64_t mmio_base, irq_number; + uint32_t guest_clock_phandles[2]; + + host_fdt = load_device_tree_from_sysfs(); + + dt_name = sysfs_to_dt_name(vbasedev->name); + if (!dt_name) { + error_report("%s incorrect sysfs device name %s", + __func__, vbasedev->name); + exit(1); + } + node_path = qemu_fdt_node_path(host_fdt, dt_name, vdev->compat, + &error_fatal); + if (!node_path || !node_path[0]) { + error_report("%s unable to retrieve node path for %s/%s", + __func__, dt_name, vdev->compat); + exit(1); + } + + if (node_path[1]) { + error_report("%s more than one node matching %s/%s!", + __func__, dt_name, vdev->compat); + exit(1); + } + + g_free(dt_name); + + if (vbasedev->num_regions != 5) { + error_report("%s Does the host dt node combine XGBE/PHY?", __func__); + exit(1); + } + + /* generate nodes for DMA_CLK and PTP_CLK */ + r = qemu_fdt_getprop(host_fdt, node_path[0], "clocks", + &prop_len, &error_fatal); + if (prop_len != 8) { + error_report("%s clocks property should contain 2 handles", __func__); + exit(1); + } + host_clock_phandles = (uint32_t *)r; + guest_clock_phandles[0] = qemu_fdt_alloc_phandle(guest_fdt); + guest_clock_phandles[1] = qemu_fdt_alloc_phandle(guest_fdt); + + /** + * clock handles fetched from host dt are in be32 layout whereas + * rest of the code uses cpu layout. Also guest clock handles are + * in cpu layout. + */ + fdt_build_clock_node(host_fdt, guest_fdt, + be32_to_cpu(host_clock_phandles[0]), + guest_clock_phandles[0]); + + fdt_build_clock_node(host_fdt, guest_fdt, + be32_to_cpu(host_clock_phandles[1]), + guest_clock_phandles[1]); + + /* combined XGBE/PHY node */ + mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + nodename = g_strdup_printf("%s/%s@%" PRIx64, parent_node, + vbasedev->name, mmio_base); + qemu_fdt_add_subnode(guest_fdt, nodename); + + copy_properties_from_host(amd_xgbe_copied_properties, + ARRAY_SIZE(amd_xgbe_copied_properties), + host_fdt, guest_fdt, + node_path[0], nodename); + + qemu_fdt_setprop_cells(guest_fdt, nodename, "clocks", + guest_clock_phandles[0], + guest_clock_phandles[1]); + + reg_attr = g_new(uint32_t, vbasedev->num_regions * 2); + for (i = 0; i < vbasedev->num_regions; i++) { + mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, i); + reg_attr[2 * i] = cpu_to_be32(mmio_base); + reg_attr[2 * i + 1] = cpu_to_be32( + memory_region_size(vdev->regions[i]->mem)); + } + qemu_fdt_setprop(guest_fdt, nodename, "reg", reg_attr, + vbasedev->num_regions * 2 * sizeof(uint32_t)); + + irq_attr = g_new(uint32_t, vbasedev->num_irqs * 3); + for (i = 0; i < vbasedev->num_irqs; i++) { + irq_number = platform_bus_get_irqn(pbus, sbdev , i) + + data->irq_start; + irq_attr[3 * i] = cpu_to_be32(GIC_FDT_IRQ_TYPE_SPI); + irq_attr[3 * i + 1] = cpu_to_be32(irq_number); + /* + * General device interrupt and PCS auto-negotiation interrupts are + * level-sensitive while the 4 per-channel interrupts are edge + * sensitive + */ + QLIST_FOREACH(intp, &vdev->intp_list, next) { + if (intp->pin == i) { + break; + } + } + if (intp->flags & VFIO_IRQ_INFO_AUTOMASKED) { + irq_attr[3 * i + 2] = cpu_to_be32(GIC_FDT_IRQ_FLAGS_LEVEL_HI); + } else { + irq_attr[3 * i + 2] = cpu_to_be32(GIC_FDT_IRQ_FLAGS_EDGE_LO_HI); + } + } + qemu_fdt_setprop(guest_fdt, nodename, "interrupts", + irq_attr, vbasedev->num_irqs * 3 * sizeof(uint32_t)); + + g_free(host_fdt); + g_strfreev(node_path); + g_free(irq_attr); + g_free(reg_attr); + g_free(nodename); + return 0; +} + +/* DT compatible matching */ +static bool vfio_platform_match(SysBusDevice *sbdev, + const BindingEntry *entry) +{ + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); + const char *compat; + unsigned int n; + + for (n = vdev->num_compat, compat = vdev->compat; n > 0; + n--, compat += strlen(compat) + 1) { + if (!strcmp(entry->compat, compat)) { + return true; + } + } + + return false; +} + +#define VFIO_PLATFORM_BINDING(compat, add_fn) \ + {TYPE_VFIO_PLATFORM, (compat), (add_fn), vfio_platform_match} + +#endif /* CONFIG_LINUX */ + +#ifdef CONFIG_TPM +/* + * add_tpm_tis_fdt_node: Create a DT node for TPM TIS + * + * See kernel documentation: + * Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt + * Optional interrupt for command completion is not exposed + */ +static int add_tpm_tis_fdt_node(SysBusDevice *sbdev, void *opaque) +{ + PlatformBusFDTData *data = opaque; + PlatformBusDevice *pbus = data->pbus; + void *fdt = data->fdt; + const char *parent_node = data->pbus_node_name; + char *nodename; + uint32_t reg_attr[2]; + uint64_t mmio_base; + + mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + nodename = g_strdup_printf("%s/tpm_tis@%" PRIx64, parent_node, mmio_base); + qemu_fdt_add_subnode(fdt, nodename); + + qemu_fdt_setprop_string(fdt, nodename, "compatible", "tcg,tpm-tis-mmio"); + + reg_attr[0] = cpu_to_be32(mmio_base); + reg_attr[1] = cpu_to_be32(0x5000); + qemu_fdt_setprop(fdt, nodename, "reg", reg_attr, 2 * sizeof(uint32_t)); + + g_free(nodename); + return 0; +} +#endif + +static int no_fdt_node(SysBusDevice *sbdev, void *opaque) +{ + return 0; +} + +/* Device type based matching */ +static bool type_match(SysBusDevice *sbdev, const BindingEntry *entry) +{ + return !strcmp(object_get_typename(OBJECT(sbdev)), entry->typename); +} + +#define TYPE_BINDING(type, add_fn) {(type), NULL, (add_fn), NULL} + +/* list of supported dynamic sysbus bindings */ +static const BindingEntry bindings[] = { +#ifdef CONFIG_LINUX + TYPE_BINDING(TYPE_VFIO_CALXEDA_XGMAC, add_calxeda_midway_xgmac_fdt_node), + TYPE_BINDING(TYPE_VFIO_AMD_XGBE, add_amd_xgbe_fdt_node), + VFIO_PLATFORM_BINDING("amd,xgbe-seattle-v1a", add_amd_xgbe_fdt_node), +#endif +#ifdef CONFIG_TPM + TYPE_BINDING(TYPE_TPM_TIS_SYSBUS, add_tpm_tis_fdt_node), +#endif + TYPE_BINDING(TYPE_RAMFB_DEVICE, no_fdt_node), + TYPE_BINDING("", NULL), /* last element */ +}; + +/* Generic Code */ + +/** + * add_fdt_node - add the device tree node of a dynamic sysbus device + * + * @sbdev: handle to the sysbus device + * @opaque: handle to the PlatformBusFDTData + * + * Checks the sysbus type belongs to the list of device types that + * are dynamically instantiable and if so call the node creation + * function. + */ +static void add_fdt_node(SysBusDevice *sbdev, void *opaque) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(bindings); i++) { + const BindingEntry *iter = &bindings[i]; + + if (type_match(sbdev, iter)) { + if (!iter->match_fn || iter->match_fn(sbdev, iter)) { + ret = iter->add_fn(sbdev, opaque); + assert(!ret); + return; + } + } + } + error_report("Device %s can not be dynamically instantiated", + qdev_fw_name(DEVICE(sbdev))); + exit(1); +} + +void platform_bus_add_all_fdt_nodes(void *fdt, const char *intc, hwaddr addr, + hwaddr bus_size, int irq_start) +{ + const char platcomp[] = "qemu,platform\0simple-bus"; + PlatformBusDevice *pbus; + DeviceState *dev; + gchar *node; + + assert(fdt); + + node = g_strdup_printf("/platform@%"PRIx64, addr); + + /* Create a /platform node that we can put all devices into */ + qemu_fdt_add_subnode(fdt, node); + qemu_fdt_setprop(fdt, node, "compatible", platcomp, sizeof(platcomp)); + + /* Our platform bus region is less than 32bits, so 1 cell is enough for + * address and size + */ + qemu_fdt_setprop_cells(fdt, node, "#size-cells", 1); + qemu_fdt_setprop_cells(fdt, node, "#address-cells", 1); + qemu_fdt_setprop_cells(fdt, node, "ranges", 0, addr >> 32, addr, bus_size); + + qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", intc); + + dev = qdev_find_recursive(sysbus_get_default(), TYPE_PLATFORM_BUS_DEVICE); + pbus = PLATFORM_BUS_DEVICE(dev); + + PlatformBusFDTData data = { + .fdt = fdt, + .irq_start = irq_start, + .pbus_node_name = node, + .pbus = pbus, + }; + + /* Loop through all dynamic sysbus devices and create their node */ + foreach_dynamic_sysbus_device(add_fdt_node, &data); + + g_free(node); +} diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c new file mode 100644 index 000000000..d5a6763cf --- /dev/null +++ b/hw/arm/tosa.c @@ -0,0 +1,326 @@ +/* vim:set shiftwidth=4 ts=4 et: */ +/* + * PXA255 Sharp Zaurus SL-6000 PDA platform + * + * Copyright (c) 2008 Dmitry Baryshkov + * + * Code based on spitz platform by Andrzej Zaborowski + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "sysemu/runstate.h" +#include "hw/arm/pxa.h" +#include "hw/arm/boot.h" +#include "hw/arm/sharpsl.h" +#include "hw/pcmcia.h" +#include "hw/boards.h" +#include "hw/display/tc6393xb.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "hw/ssi/ssi.h" +#include "hw/sysbus.h" +#include "hw/misc/led.h" +#include "exec/address-spaces.h" +#include "qom/object.h" + +#define TOSA_RAM 0x04000000 +#define TOSA_ROM 0x00800000 + +#define TOSA_GPIO_USB_IN (5) +#define TOSA_GPIO_nSD_DETECT (9) +#define TOSA_GPIO_ON_RESET (19) +#define TOSA_GPIO_CF_IRQ (21) /* CF slot0 Ready */ +#define TOSA_GPIO_CF_CD (13) +#define TOSA_GPIO_TC6393XB_INT (15) +#define TOSA_GPIO_JC_CF_IRQ (36) /* CF slot1 Ready */ + +#define TOSA_SCOOP_GPIO_BASE 1 +#define TOSA_GPIO_IR_POWERDWN (TOSA_SCOOP_GPIO_BASE + 2) +#define TOSA_GPIO_SD_WP (TOSA_SCOOP_GPIO_BASE + 3) +#define TOSA_GPIO_PWR_ON (TOSA_SCOOP_GPIO_BASE + 4) + +#define TOSA_SCOOP_JC_GPIO_BASE 1 +#define TOSA_GPIO_BT_LED (TOSA_SCOOP_JC_GPIO_BASE + 0) +#define TOSA_GPIO_NOTE_LED (TOSA_SCOOP_JC_GPIO_BASE + 1) +#define TOSA_GPIO_CHRG_ERR_LED (TOSA_SCOOP_JC_GPIO_BASE + 2) +#define TOSA_GPIO_TC6393XB_L3V_ON (TOSA_SCOOP_JC_GPIO_BASE + 5) +#define TOSA_GPIO_WLAN_LED (TOSA_SCOOP_JC_GPIO_BASE + 7) + +#define DAC_BASE 0x4e +#define DAC_CH1 0 +#define DAC_CH2 1 + +static void tosa_microdrive_attach(PXA2xxState *cpu) +{ + PCMCIACardState *md; + DriveInfo *dinfo; + + dinfo = drive_get(IF_IDE, 0, 0); + if (!dinfo || dinfo->media_cd) + return; + md = dscm1xxxx_init(dinfo); + pxa2xx_pcmcia_attach(cpu->pcmcia[0], md); +} + +/* + * Encapsulation of some GPIO line behaviour for the Tosa board + * + * QEMU interface: + * + named GPIO inputs "leds[0..3]": assert to light LEDs + * + named GPIO input "reset": when asserted, resets the system + */ + +#define TYPE_TOSA_MISC_GPIO "tosa-misc-gpio" +OBJECT_DECLARE_SIMPLE_TYPE(TosaMiscGPIOState, TOSA_MISC_GPIO) + +struct TosaMiscGPIOState { + SysBusDevice parent_obj; +}; + +static void tosa_reset(void *opaque, int line, int level) +{ + if (level) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } +} + +static void tosa_misc_gpio_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + + qdev_init_gpio_in_named(dev, tosa_reset, "reset", 1); +} + +static void tosa_gpio_setup(PXA2xxState *cpu, + DeviceState *scp0, + DeviceState *scp1, + TC6393xbState *tmio) +{ + DeviceState *misc_gpio; + LEDState *led[4]; + + misc_gpio = sysbus_create_simple(TYPE_TOSA_MISC_GPIO, -1, NULL); + + /* MMC/SD host */ + pxa2xx_mmci_handlers(cpu->mmc, + qdev_get_gpio_in(scp0, TOSA_GPIO_SD_WP), + qemu_irq_invert(qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_nSD_DETECT))); + + /* Handle reset */ + qdev_connect_gpio_out(cpu->gpio, TOSA_GPIO_ON_RESET, + qdev_get_gpio_in_named(misc_gpio, "reset", 0)); + + /* PCMCIA signals: card's IRQ and Card-Detect */ + pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[0], + qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_CF_IRQ), + qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_CF_CD)); + + pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[1], + qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_JC_CF_IRQ), + NULL); + + led[0] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH, + LED_COLOR_BLUE, "bluetooth"); + led[1] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH, + LED_COLOR_GREEN, "note"); + led[2] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH, + LED_COLOR_AMBER, "charger-error"); + led[3] = led_create_simple(OBJECT(misc_gpio), GPIO_POLARITY_ACTIVE_HIGH, + LED_COLOR_GREEN, "wlan"); + + qdev_connect_gpio_out(scp1, TOSA_GPIO_BT_LED, + qdev_get_gpio_in(DEVICE(led[0]), 0)); + qdev_connect_gpio_out(scp1, TOSA_GPIO_NOTE_LED, + qdev_get_gpio_in(DEVICE(led[1]), 0)); + qdev_connect_gpio_out(scp1, TOSA_GPIO_CHRG_ERR_LED, + qdev_get_gpio_in(DEVICE(led[2]), 0)); + qdev_connect_gpio_out(scp1, TOSA_GPIO_WLAN_LED, + qdev_get_gpio_in(DEVICE(led[3]), 0)); + + qdev_connect_gpio_out(scp1, TOSA_GPIO_TC6393XB_L3V_ON, tc6393xb_l3v_get(tmio)); + + /* UDC Vbus */ + qemu_irq_raise(qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_USB_IN)); +} + +static uint32_t tosa_ssp_tansfer(SSIPeripheral *dev, uint32_t value) +{ + fprintf(stderr, "TG: %u %02x\n", value >> 5, value & 0x1f); + return 0; +} + +static void tosa_ssp_realize(SSIPeripheral *dev, Error **errp) +{ + /* Nothing to do. */ +} + +#define TYPE_TOSA_DAC "tosa_dac" +OBJECT_DECLARE_SIMPLE_TYPE(TosaDACState, TOSA_DAC) + +struct TosaDACState { + I2CSlave parent_obj; + + int len; + char buf[3]; +}; + +static int tosa_dac_send(I2CSlave *i2c, uint8_t data) +{ + TosaDACState *s = TOSA_DAC(i2c); + + s->buf[s->len] = data; + if (s->len ++ > 2) { +#ifdef VERBOSE + fprintf(stderr, "%s: message too long (%i bytes)\n", __func__, s->len); +#endif + return 1; + } + + if (s->len == 2) { + fprintf(stderr, "dac: channel %d value 0x%02x\n", + s->buf[0], s->buf[1]); + } + + return 0; +} + +static int tosa_dac_event(I2CSlave *i2c, enum i2c_event event) +{ + TosaDACState *s = TOSA_DAC(i2c); + + s->len = 0; + switch (event) { + case I2C_START_SEND: + break; + case I2C_START_RECV: + printf("%s: recv not supported!!!\n", __func__); + break; + case I2C_FINISH: +#ifdef VERBOSE + if (s->len < 2) + printf("%s: message too short (%i bytes)\n", __func__, s->len); + if (s->len > 2) + printf("%s: message too long\n", __func__); +#endif + break; + default: + break; + } + + return 0; +} + +static uint8_t tosa_dac_recv(I2CSlave *s) +{ + printf("%s: recv not supported!!!\n", __func__); + return 0xff; +} + +static void tosa_tg_init(PXA2xxState *cpu) +{ + I2CBus *bus = pxa2xx_i2c_bus(cpu->i2c[0]); + i2c_slave_create_simple(bus, TYPE_TOSA_DAC, DAC_BASE); + ssi_create_peripheral(cpu->ssp[1], "tosa-ssp"); +} + + +static struct arm_boot_info tosa_binfo = { + .loader_start = PXA2XX_SDRAM_BASE, + .ram_size = 0x04000000, +}; + +static void tosa_init(MachineState *machine) +{ + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *rom = g_new(MemoryRegion, 1); + PXA2xxState *mpu; + TC6393xbState *tmio; + DeviceState *scp0, *scp1; + + mpu = pxa255_init(address_space_mem, tosa_binfo.ram_size); + + memory_region_init_rom(rom, NULL, "tosa.rom", TOSA_ROM, &error_fatal); + memory_region_add_subregion(address_space_mem, 0, rom); + + tmio = tc6393xb_init(address_space_mem, 0x10000000, + qdev_get_gpio_in(mpu->gpio, TOSA_GPIO_TC6393XB_INT)); + + scp0 = sysbus_create_simple("scoop", 0x08800000, NULL); + scp1 = sysbus_create_simple("scoop", 0x14800040, NULL); + + tosa_gpio_setup(mpu, scp0, scp1, tmio); + + tosa_microdrive_attach(mpu); + + tosa_tg_init(mpu); + + tosa_binfo.board_id = 0x208; + arm_load_kernel(mpu->cpu, machine, &tosa_binfo); + sl_bootparam_write(SL_PXA_PARAM_BASE); +} + +static void tosapda_machine_init(MachineClass *mc) +{ + mc->desc = "Sharp SL-6000 (Tosa) PDA (PXA255)"; + mc->init = tosa_init; + mc->block_default_type = IF_IDE; + mc->ignore_memory_transaction_failures = true; +} + +DEFINE_MACHINE("tosa", tosapda_machine_init) + +static void tosa_dac_class_init(ObjectClass *klass, void *data) +{ + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + k->event = tosa_dac_event; + k->recv = tosa_dac_recv; + k->send = tosa_dac_send; +} + +static const TypeInfo tosa_dac_info = { + .name = TYPE_TOSA_DAC, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(TosaDACState), + .class_init = tosa_dac_class_init, +}; + +static void tosa_ssp_class_init(ObjectClass *klass, void *data) +{ + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + + k->realize = tosa_ssp_realize; + k->transfer = tosa_ssp_tansfer; +} + +static const TypeInfo tosa_ssp_info = { + .name = "tosa-ssp", + .parent = TYPE_SSI_PERIPHERAL, + .instance_size = sizeof(SSIPeripheral), + .class_init = tosa_ssp_class_init, +}; + +static const TypeInfo tosa_misc_gpio_info = { + .name = TYPE_TOSA_MISC_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(TosaMiscGPIOState), + .instance_init = tosa_misc_gpio_init, + /* + * No class init required: device has no internal state so does not + * need to set up reset or vmstate, and has no realize method. + */ +}; + +static void tosa_register_types(void) +{ + type_register_static(&tosa_dac_info); + type_register_static(&tosa_ssp_info); + type_register_static(&tosa_misc_gpio_info); +} + +type_init(tosa_register_types) diff --git a/hw/arm/trace-events b/hw/arm/trace-events new file mode 100644 index 000000000..2dee296c8 --- /dev/null +++ b/hw/arm/trace-events @@ -0,0 +1,55 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# virt-acpi-build.c +virt_acpi_setup(void) "No fw cfg or ACPI disabled. Bailing out." + +# smmu-common.c +smmu_add_mr(const char *name) "%s" +smmu_ptw_level(int level, uint64_t iova, size_t subpage_size, uint64_t baseaddr, uint32_t offset, uint64_t pte) "level=%d iova=0x%"PRIx64" subpage_sz=0x%zx baseaddr=0x%"PRIx64" offset=%d => pte=0x%"PRIx64 +smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" offset=%d pte=0x%"PRIx64 +smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64 +smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB" +smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64 +smmu_iotlb_inv_all(void) "IOTLB invalidate all" +smmu_iotlb_inv_asid(uint16_t asid) "IOTLB invalidate asid=%d" +smmu_iotlb_inv_iova(uint16_t asid, uint64_t addr) "IOTLB invalidate asid=%d addr=0x%"PRIx64 +smmu_inv_notifiers_mr(const char *name) "iommu mr=%s" +smmu_iotlb_lookup_hit(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache HIT asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_lookup_miss(uint16_t asid, uint64_t addr, uint32_t hit, uint32_t miss, uint32_t p) "IOTLB cache MISS asid=%d addr=0x%"PRIx64" hit=%d miss=%d hit rate=%d" +smmu_iotlb_insert(uint16_t asid, uint64_t addr, uint8_t tg, uint8_t level) "IOTLB ++ asid=%d addr=0x%"PRIx64" tg=%d level=%d" + +# smmuv3.c +smmuv3_read_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)" +smmuv3_trigger_irq(int irq) "irq=%d" +smmuv3_write_gerror(uint32_t toggled, uint32_t gerror) "toggled=0x%x, new GERROR=0x%x" +smmuv3_write_gerrorn(uint32_t acked, uint32_t gerrorn) "acked=0x%x, new GERRORN=0x%x" +smmuv3_unhandled_cmd(uint32_t type) "Unhandled command type=%d" +smmuv3_cmdq_consume(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "prod=%d cons=%d prod.wrap=%d cons.wrap=%d" +smmuv3_cmdq_opcode(const char *opcode) "<--- %s" +smmuv3_cmdq_consume_out(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "prod:%d, cons:%d, prod_wrap:%d, cons_wrap:%d " +smmuv3_cmdq_consume_error(const char *cmd_name, uint8_t cmd_error) "Error on %s command execution: %d" +smmuv3_write_mmio(uint64_t addr, uint64_t val, unsigned size, uint32_t r) "addr: 0x%"PRIx64" val:0x%"PRIx64" size: 0x%x(%d)" +smmuv3_record_event(const char *type, uint32_t sid) "%s sid=0x%x" +smmuv3_find_ste(uint16_t sid, uint32_t features, uint16_t sid_split) "sid=0x%x features:0x%x, sid_split:0x%x" +smmuv3_find_ste_2lvl(uint64_t strtab_base, uint64_t l1ptr, int l1_ste_offset, uint64_t l2ptr, int l2_ste_offset, int max_l2_ste) "strtab_base:0x%"PRIx64" l1ptr:0x%"PRIx64" l1_off:0x%x, l2ptr:0x%"PRIx64" l2_off:0x%x max_l2_ste:%d" +smmuv3_get_ste(uint64_t addr) "STE addr: 0x%"PRIx64 +smmuv3_translate_disable(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=0x%x bypass (smmu disabled) iova:0x%"PRIx64" is_write=%d" +smmuv3_translate_bypass(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=0x%x STE bypass iova:0x%"PRIx64" is_write=%d" +smmuv3_translate_abort(const char *n, uint16_t sid, uint64_t addr, bool is_write) "%s sid=0x%x abort on iova:0x%"PRIx64" is_write=%d" +smmuv3_translate_success(const char *n, uint16_t sid, uint64_t iova, uint64_t translated, int perm) "%s sid=0x%x iova=0x%"PRIx64" translated=0x%"PRIx64" perm=0x%x" +smmuv3_get_cd(uint64_t addr) "CD addr: 0x%"PRIx64 +smmuv3_decode_cd(uint32_t oas) "oas=%d" +smmuv3_decode_cd_tt(int i, uint32_t tsz, uint64_t ttb, uint32_t granule_sz, bool had) "TT[%d]:tsz:%d ttb:0x%"PRIx64" granule_sz:%d had:%d" +smmuv3_cmdq_cfgi_ste(int streamid) "streamid= 0x%x" +smmuv3_cmdq_cfgi_ste_range(int start, int end) "start=0x%x - end=0x%x" +smmuv3_cmdq_cfgi_cd(uint32_t sid) "sid=0x%x" +smmuv3_config_cache_hit(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache HIT for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" +smmuv3_config_cache_miss(uint32_t sid, uint32_t hits, uint32_t misses, uint32_t perc) "Config cache MISS for sid=0x%x (hits=%d, misses=%d, hit rate=%d)" +smmuv3_s1_range_inval(int vmid, int asid, uint64_t addr, uint8_t tg, uint64_t num_pages, uint8_t ttl, bool leaf) "vmid=%d asid=%d addr=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64" ttl=%d leaf=%d" +smmuv3_cmdq_tlbi_nh(void) "" +smmuv3_cmdq_tlbi_nh_asid(uint16_t asid) "asid=%d" +smmuv3_config_cache_inv(uint32_t sid) "Config cache INV for sid=0x%x" +smmuv3_notify_flag_add(const char *iommu) "ADD SMMUNotifier node for iommu mr=%s" +smmuv3_notify_flag_del(const char *iommu) "DEL SMMUNotifier node for iommu mr=%s" +smmuv3_inv_notifiers_iova(const char *name, uint16_t asid, uint64_t iova, uint8_t tg, uint64_t num_pages) "iommu mr=%s asid=%d iova=0x%"PRIx64" tg=%d num_pages=0x%"PRIx64 + diff --git a/hw/arm/trace.h b/hw/arm/trace.h new file mode 100644 index 000000000..91337aa6b --- /dev/null +++ b/hw/arm/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_arm.h" diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c new file mode 100644 index 000000000..575399c4f --- /dev/null +++ b/hw/arm/versatilepb.c @@ -0,0 +1,475 @@ +/* + * ARM Versatile Platform/Application Baseboard System emulation. + * + * Copyright (c) 2005-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/arm/boot.h" +#include "hw/net/smc91c111.h" +#include "net/net.h" +#include "sysemu/sysemu.h" +#include "hw/pci/pci.h" +#include "hw/i2c/i2c.h" +#include "hw/i2c/arm_sbcon_i2c.h" +#include "hw/irq.h" +#include "hw/boards.h" +#include "hw/block/flash.h" +#include "qemu/error-report.h" +#include "hw/char/pl011.h" +#include "hw/sd/sd.h" +#include "qom/object.h" + +#define VERSATILE_FLASH_ADDR 0x34000000 +#define VERSATILE_FLASH_SIZE (64 * 1024 * 1024) +#define VERSATILE_FLASH_SECT_SIZE (256 * 1024) + +/* Primary interrupt controller. */ + +#define TYPE_VERSATILE_PB_SIC "versatilepb_sic" +OBJECT_DECLARE_SIMPLE_TYPE(vpb_sic_state, VERSATILE_PB_SIC) + +struct vpb_sic_state { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t level; + uint32_t mask; + uint32_t pic_enable; + qemu_irq parent[32]; + int irq; +}; + +static const VMStateDescription vmstate_vpb_sic = { + .name = "versatilepb_sic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(level, vpb_sic_state), + VMSTATE_UINT32(mask, vpb_sic_state), + VMSTATE_UINT32(pic_enable, vpb_sic_state), + VMSTATE_END_OF_LIST() + } +}; + +static void vpb_sic_update(vpb_sic_state *s) +{ + uint32_t flags; + + flags = s->level & s->mask; + qemu_set_irq(s->parent[s->irq], flags != 0); +} + +static void vpb_sic_update_pic(vpb_sic_state *s) +{ + int i; + uint32_t mask; + + for (i = 21; i <= 30; i++) { + mask = 1u << i; + if (!(s->pic_enable & mask)) + continue; + qemu_set_irq(s->parent[i], (s->level & mask) != 0); + } +} + +static void vpb_sic_set_irq(void *opaque, int irq, int level) +{ + vpb_sic_state *s = (vpb_sic_state *)opaque; + if (level) + s->level |= 1u << irq; + else + s->level &= ~(1u << irq); + if (s->pic_enable & (1u << irq)) + qemu_set_irq(s->parent[irq], level); + vpb_sic_update(s); +} + +static uint64_t vpb_sic_read(void *opaque, hwaddr offset, + unsigned size) +{ + vpb_sic_state *s = (vpb_sic_state *)opaque; + + switch (offset >> 2) { + case 0: /* STATUS */ + return s->level & s->mask; + case 1: /* RAWSTAT */ + return s->level; + case 2: /* ENABLE */ + return s->mask; + case 4: /* SOFTINT */ + return s->level & 1; + case 8: /* PICENABLE */ + return s->pic_enable; + default: + printf ("vpb_sic_read: Bad register offset 0x%x\n", (int)offset); + return 0; + } +} + +static void vpb_sic_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + vpb_sic_state *s = (vpb_sic_state *)opaque; + + switch (offset >> 2) { + case 2: /* ENSET */ + s->mask |= value; + break; + case 3: /* ENCLR */ + s->mask &= ~value; + break; + case 4: /* SOFTINTSET */ + if (value) + s->mask |= 1; + break; + case 5: /* SOFTINTCLR */ + if (value) + s->mask &= ~1u; + break; + case 8: /* PICENSET */ + s->pic_enable |= (value & 0x7fe00000); + vpb_sic_update_pic(s); + break; + case 9: /* PICENCLR */ + s->pic_enable &= ~value; + vpb_sic_update_pic(s); + break; + default: + printf ("vpb_sic_write: Bad register offset 0x%x\n", (int)offset); + return; + } + vpb_sic_update(s); +} + +static const MemoryRegionOps vpb_sic_ops = { + .read = vpb_sic_read, + .write = vpb_sic_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void vpb_sic_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + vpb_sic_state *s = VERSATILE_PB_SIC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + int i; + + qdev_init_gpio_in(dev, vpb_sic_set_irq, 32); + for (i = 0; i < 32; i++) { + sysbus_init_irq(sbd, &s->parent[i]); + } + s->irq = 31; + memory_region_init_io(&s->iomem, obj, &vpb_sic_ops, s, + "vpb-sic", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +/* Board init. */ + +/* The AB and PB boards both use the same core, just with different + peripherals and expansion busses. For now we emulate a subset of the + PB peripherals and just change the board ID. */ + +static struct arm_boot_info versatile_binfo; + +static void versatile_init(MachineState *machine, int board_id) +{ + Object *cpuobj; + ARMCPU *cpu; + MemoryRegion *sysmem = get_system_memory(); + qemu_irq pic[32]; + qemu_irq sic[32]; + DeviceState *dev, *sysctl; + SysBusDevice *busdev; + DeviceState *pl041; + PCIBus *pci_bus; + NICInfo *nd; + I2CBus *i2c; + int n; + int done_smc = 0; + DriveInfo *dinfo; + + if (machine->ram_size > 0x10000000) { + /* Device starting at address 0x10000000, + * and memory cannot overlap with devices. + * Refuse to run rather than behaving very confusingly. + */ + error_report("versatilepb: memory size must not exceed 256MB"); + exit(1); + } + + cpuobj = object_new(machine->cpu_type); + + /* By default ARM1176 CPUs have EL3 enabled. This board does not + * currently support EL3 so the CPU EL3 property is disabled before + * realization. + */ + if (object_property_find(cpuobj, "has_el3")) { + object_property_set_bool(cpuobj, "has_el3", false, &error_fatal); + } + + qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); + + cpu = ARM_CPU(cpuobj); + + /* ??? RAM should repeat to fill physical memory space. */ + /* SDRAM at address zero. */ + memory_region_add_subregion(sysmem, 0, machine->ram); + + sysctl = qdev_new("realview_sysctl"); + qdev_prop_set_uint32(sysctl, "sys_id", 0x41007004); + qdev_prop_set_uint32(sysctl, "proc_id", 0x02000000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(sysctl), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000); + + dev = sysbus_create_varargs("pl190", 0x10140000, + qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ), + qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_FIQ), + NULL); + for (n = 0; n < 32; n++) { + pic[n] = qdev_get_gpio_in(dev, n); + } + dev = sysbus_create_simple(TYPE_VERSATILE_PB_SIC, 0x10003000, NULL); + for (n = 0; n < 32; n++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), n, pic[n]); + sic[n] = qdev_get_gpio_in(dev, n); + } + + sysbus_create_simple("pl050_keyboard", 0x10006000, sic[3]); + sysbus_create_simple("pl050_mouse", 0x10007000, sic[4]); + + dev = qdev_new("versatile_pci"); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0x10001000); /* PCI controller regs */ + sysbus_mmio_map(busdev, 1, 0x41000000); /* PCI self-config */ + sysbus_mmio_map(busdev, 2, 0x42000000); /* PCI config */ + sysbus_mmio_map(busdev, 3, 0x43000000); /* PCI I/O */ + sysbus_mmio_map(busdev, 4, 0x44000000); /* PCI memory window 1 */ + sysbus_mmio_map(busdev, 5, 0x50000000); /* PCI memory window 2 */ + sysbus_mmio_map(busdev, 6, 0x60000000); /* PCI memory window 3 */ + sysbus_connect_irq(busdev, 0, sic[27]); + sysbus_connect_irq(busdev, 1, sic[28]); + sysbus_connect_irq(busdev, 2, sic[29]); + sysbus_connect_irq(busdev, 3, sic[30]); + pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci"); + + for(n = 0; n < nb_nics; n++) { + nd = &nd_table[n]; + + if (!done_smc && (!nd->model || strcmp(nd->model, "smc91c111") == 0)) { + smc91c111_init(nd, 0x10010000, sic[25]); + done_smc = 1; + } else { + pci_nic_init_nofail(nd, pci_bus, "rtl8139", NULL); + } + } + if (machine_usb(machine)) { + pci_create_simple(pci_bus, -1, "pci-ohci"); + } + n = drive_get_max_bus(IF_SCSI); + while (n >= 0) { + dev = DEVICE(pci_create_simple(pci_bus, -1, "lsi53c895a")); + lsi53c8xx_handle_legacy_cmdline(dev); + n--; + } + + pl011_create(0x101f1000, pic[12], serial_hd(0)); + pl011_create(0x101f2000, pic[13], serial_hd(1)); + pl011_create(0x101f3000, pic[14], serial_hd(2)); + pl011_create(0x10009000, sic[6], serial_hd(3)); + + dev = qdev_new("pl080"); + object_property_set_link(OBJECT(dev), "downstream", OBJECT(sysmem), + &error_fatal); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0x10130000); + sysbus_connect_irq(busdev, 0, pic[17]); + + sysbus_create_simple("sp804", 0x101e2000, pic[4]); + sysbus_create_simple("sp804", 0x101e3000, pic[5]); + + sysbus_create_simple("pl061", 0x101e4000, pic[6]); + sysbus_create_simple("pl061", 0x101e5000, pic[7]); + sysbus_create_simple("pl061", 0x101e6000, pic[8]); + sysbus_create_simple("pl061", 0x101e7000, pic[9]); + + /* The versatile/PB actually has a modified Color LCD controller + that includes hardware cursor support from the PL111. */ + dev = sysbus_create_simple("pl110_versatile", 0x10120000, pic[16]); + /* Wire up the mux control signals from the SYS_CLCD register */ + qdev_connect_gpio_out(sysctl, 0, qdev_get_gpio_in(dev, 0)); + + dev = sysbus_create_varargs("pl181", 0x10005000, sic[22], sic[1], NULL); + dinfo = drive_get_next(IF_SD); + if (dinfo) { + DeviceState *card; + + card = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + qdev_realize_and_unref(card, qdev_get_child_bus(dev, "sd-bus"), + &error_fatal); + } + + dev = sysbus_create_varargs("pl181", 0x1000b000, sic[23], sic[2], NULL); + dinfo = drive_get_next(IF_SD); + if (dinfo) { + DeviceState *card; + + card = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + qdev_realize_and_unref(card, qdev_get_child_bus(dev, "sd-bus"), + &error_fatal); + } + + /* Add PL031 Real Time Clock. */ + sysbus_create_simple("pl031", 0x101e8000, pic[10]); + + dev = sysbus_create_simple(TYPE_VERSATILE_I2C, 0x10002000, NULL); + i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); + i2c_slave_create_simple(i2c, "ds1338", 0x68); + + /* Add PL041 AACI Interface to the LM4549 codec */ + pl041 = qdev_new("pl041"); + qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512); + sysbus_realize_and_unref(SYS_BUS_DEVICE(pl041), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(pl041), 0, 0x10004000); + sysbus_connect_irq(SYS_BUS_DEVICE(pl041), 0, sic[24]); + + /* Memory map for Versatile/PB: */ + /* 0x10000000 System registers. */ + /* 0x10001000 PCI controller config registers. */ + /* 0x10002000 Serial bus interface. */ + /* 0x10003000 Secondary interrupt controller. */ + /* 0x10004000 AACI (audio). */ + /* 0x10005000 MMCI0. */ + /* 0x10006000 KMI0 (keyboard). */ + /* 0x10007000 KMI1 (mouse). */ + /* 0x10008000 Character LCD Interface. */ + /* 0x10009000 UART3. */ + /* 0x1000a000 Smart card 1. */ + /* 0x1000b000 MMCI1. */ + /* 0x10010000 Ethernet. */ + /* 0x10020000 USB. */ + /* 0x10100000 SSMC. */ + /* 0x10110000 MPMC. */ + /* 0x10120000 CLCD Controller. */ + /* 0x10130000 DMA Controller. */ + /* 0x10140000 Vectored interrupt controller. */ + /* 0x101d0000 AHB Monitor Interface. */ + /* 0x101e0000 System Controller. */ + /* 0x101e1000 Watchdog Interface. */ + /* 0x101e2000 Timer 0/1. */ + /* 0x101e3000 Timer 2/3. */ + /* 0x101e4000 GPIO port 0. */ + /* 0x101e5000 GPIO port 1. */ + /* 0x101e6000 GPIO port 2. */ + /* 0x101e7000 GPIO port 3. */ + /* 0x101e8000 RTC. */ + /* 0x101f0000 Smart card 0. */ + /* 0x101f1000 UART0. */ + /* 0x101f2000 UART1. */ + /* 0x101f3000 UART2. */ + /* 0x101f4000 SSPI. */ + /* 0x34000000 NOR Flash */ + + dinfo = drive_get(IF_PFLASH, 0, 0); + if (!pflash_cfi01_register(VERSATILE_FLASH_ADDR, "versatile.flash", + VERSATILE_FLASH_SIZE, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + VERSATILE_FLASH_SECT_SIZE, + 4, 0x0089, 0x0018, 0x0000, 0x0, 0)) { + fprintf(stderr, "qemu: Error registering flash memory.\n"); + } + + versatile_binfo.ram_size = machine->ram_size; + versatile_binfo.board_id = board_id; + arm_load_kernel(cpu, machine, &versatile_binfo); +} + +static void vpb_init(MachineState *machine) +{ + versatile_init(machine, 0x183); +} + +static void vab_init(MachineState *machine) +{ + versatile_init(machine, 0x25e); +} + +static void versatilepb_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "ARM Versatile/PB (ARM926EJ-S)"; + mc->init = vpb_init; + mc->block_default_type = IF_SCSI; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926"); + mc->default_ram_id = "versatile.ram"; +} + +static const TypeInfo versatilepb_type = { + .name = MACHINE_TYPE_NAME("versatilepb"), + .parent = TYPE_MACHINE, + .class_init = versatilepb_class_init, +}; + +static void versatileab_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "ARM Versatile/AB (ARM926EJ-S)"; + mc->init = vab_init; + mc->block_default_type = IF_SCSI; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("arm926"); + mc->default_ram_id = "versatile.ram"; +} + +static const TypeInfo versatileab_type = { + .name = MACHINE_TYPE_NAME("versatileab"), + .parent = TYPE_MACHINE, + .class_init = versatileab_class_init, +}; + +static void versatile_machine_init(void) +{ + type_register_static(&versatilepb_type); + type_register_static(&versatileab_type); +} + +type_init(versatile_machine_init) + +static void vpb_sic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_vpb_sic; +} + +static const TypeInfo vpb_sic_info = { + .name = TYPE_VERSATILE_PB_SIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(vpb_sic_state), + .instance_init = vpb_sic_init, + .class_init = vpb_sic_class_init, +}; + +static void versatilepb_register_types(void) +{ + type_register_static(&vpb_sic_info); +} + +type_init(versatilepb_register_types) diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c new file mode 100644 index 000000000..58481c076 --- /dev/null +++ b/hw/arm/vexpress.c @@ -0,0 +1,857 @@ +/* + * ARM Versatile Express emulation. + * + * Copyright (c) 2010 - 2011 B Labs Ltd. + * Copyright (c) 2011 Linaro Limited + * Written by Bahadir Balban, Amit Mahajan, Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "hw/arm/boot.h" +#include "hw/arm/primecell.h" +#include "hw/net/lan9118.h" +#include "hw/i2c/i2c.h" +#include "net/net.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/block/flash.h" +#include "sysemu/device_tree.h" +#include "qemu/error-report.h" +#include +#include "hw/char/pl011.h" +#include "hw/cpu/a9mpcore.h" +#include "hw/cpu/a15mpcore.h" +#include "hw/i2c/arm_sbcon_i2c.h" +#include "hw/sd/sd.h" +#include "qom/object.h" + +#define VEXPRESS_BOARD_ID 0x8e0 +#define VEXPRESS_FLASH_SIZE (64 * 1024 * 1024) +#define VEXPRESS_FLASH_SECT_SIZE (256 * 1024) + +/* Number of virtio transports to create (0..8; limited by + * number of available IRQ lines). + */ +#define NUM_VIRTIO_TRANSPORTS 4 + +/* Address maps for peripherals: + * the Versatile Express motherboard has two possible maps, + * the "legacy" one (used for A9) and the "Cortex-A Series" + * map (used for newer cores). + * Individual daughterboards can also have different maps for + * their peripherals. + */ + +enum { + VE_SYSREGS, + VE_SP810, + VE_SERIALPCI, + VE_PL041, + VE_MMCI, + VE_KMI0, + VE_KMI1, + VE_UART0, + VE_UART1, + VE_UART2, + VE_UART3, + VE_WDT, + VE_TIMER01, + VE_TIMER23, + VE_SERIALDVI, + VE_RTC, + VE_COMPACTFLASH, + VE_CLCD, + VE_NORFLASH0, + VE_NORFLASH1, + VE_NORFLASHALIAS, + VE_SRAM, + VE_VIDEORAM, + VE_ETHERNET, + VE_USB, + VE_DAPROM, + VE_VIRTIO, +}; + +static hwaddr motherboard_legacy_map[] = { + [VE_NORFLASHALIAS] = 0, + /* CS7: 0x10000000 .. 0x10020000 */ + [VE_SYSREGS] = 0x10000000, + [VE_SP810] = 0x10001000, + [VE_SERIALPCI] = 0x10002000, + [VE_PL041] = 0x10004000, + [VE_MMCI] = 0x10005000, + [VE_KMI0] = 0x10006000, + [VE_KMI1] = 0x10007000, + [VE_UART0] = 0x10009000, + [VE_UART1] = 0x1000a000, + [VE_UART2] = 0x1000b000, + [VE_UART3] = 0x1000c000, + [VE_WDT] = 0x1000f000, + [VE_TIMER01] = 0x10011000, + [VE_TIMER23] = 0x10012000, + [VE_VIRTIO] = 0x10013000, + [VE_SERIALDVI] = 0x10016000, + [VE_RTC] = 0x10017000, + [VE_COMPACTFLASH] = 0x1001a000, + [VE_CLCD] = 0x1001f000, + /* CS0: 0x40000000 .. 0x44000000 */ + [VE_NORFLASH0] = 0x40000000, + /* CS1: 0x44000000 .. 0x48000000 */ + [VE_NORFLASH1] = 0x44000000, + /* CS2: 0x48000000 .. 0x4a000000 */ + [VE_SRAM] = 0x48000000, + /* CS3: 0x4c000000 .. 0x50000000 */ + [VE_VIDEORAM] = 0x4c000000, + [VE_ETHERNET] = 0x4e000000, + [VE_USB] = 0x4f000000, +}; + +static hwaddr motherboard_aseries_map[] = { + [VE_NORFLASHALIAS] = 0, + /* CS0: 0x08000000 .. 0x0c000000 */ + [VE_NORFLASH0] = 0x08000000, + /* CS4: 0x0c000000 .. 0x10000000 */ + [VE_NORFLASH1] = 0x0c000000, + /* CS5: 0x10000000 .. 0x14000000 */ + /* CS1: 0x14000000 .. 0x18000000 */ + [VE_SRAM] = 0x14000000, + /* CS2: 0x18000000 .. 0x1c000000 */ + [VE_VIDEORAM] = 0x18000000, + [VE_ETHERNET] = 0x1a000000, + [VE_USB] = 0x1b000000, + /* CS3: 0x1c000000 .. 0x20000000 */ + [VE_DAPROM] = 0x1c000000, + [VE_SYSREGS] = 0x1c010000, + [VE_SP810] = 0x1c020000, + [VE_SERIALPCI] = 0x1c030000, + [VE_PL041] = 0x1c040000, + [VE_MMCI] = 0x1c050000, + [VE_KMI0] = 0x1c060000, + [VE_KMI1] = 0x1c070000, + [VE_UART0] = 0x1c090000, + [VE_UART1] = 0x1c0a0000, + [VE_UART2] = 0x1c0b0000, + [VE_UART3] = 0x1c0c0000, + [VE_WDT] = 0x1c0f0000, + [VE_TIMER01] = 0x1c110000, + [VE_TIMER23] = 0x1c120000, + [VE_VIRTIO] = 0x1c130000, + [VE_SERIALDVI] = 0x1c160000, + [VE_RTC] = 0x1c170000, + [VE_COMPACTFLASH] = 0x1c1a0000, + [VE_CLCD] = 0x1c1f0000, +}; + +/* Structure defining the peculiarities of a specific daughterboard */ + +typedef struct VEDBoardInfo VEDBoardInfo; + +struct VexpressMachineClass { + MachineClass parent; + VEDBoardInfo *daughterboard; +}; + +struct VexpressMachineState { + MachineState parent; + bool secure; + bool virt; +}; + +#define TYPE_VEXPRESS_MACHINE "vexpress" +#define TYPE_VEXPRESS_A9_MACHINE MACHINE_TYPE_NAME("vexpress-a9") +#define TYPE_VEXPRESS_A15_MACHINE MACHINE_TYPE_NAME("vexpress-a15") +OBJECT_DECLARE_TYPE(VexpressMachineState, VexpressMachineClass, VEXPRESS_MACHINE) + +typedef void DBoardInitFn(const VexpressMachineState *machine, + ram_addr_t ram_size, + const char *cpu_type, + qemu_irq *pic); + +struct VEDBoardInfo { + struct arm_boot_info bootinfo; + const hwaddr *motherboard_map; + hwaddr loader_start; + const hwaddr gic_cpu_if_addr; + uint32_t proc_id; + uint32_t num_voltage_sensors; + const uint32_t *voltages; + uint32_t num_clocks; + const uint32_t *clocks; + DBoardInitFn *init; +}; + +static void init_cpus(MachineState *ms, const char *cpu_type, + const char *privdev, hwaddr periphbase, + qemu_irq *pic, bool secure, bool virt) +{ + DeviceState *dev; + SysBusDevice *busdev; + int n; + unsigned int smp_cpus = ms->smp.cpus; + + /* Create the actual CPUs */ + for (n = 0; n < smp_cpus; n++) { + Object *cpuobj = object_new(cpu_type); + + if (!secure) { + object_property_set_bool(cpuobj, "has_el3", false, NULL); + } + if (!virt) { + if (object_property_find(cpuobj, "has_el2")) { + object_property_set_bool(cpuobj, "has_el2", false, NULL); + } + } + + if (object_property_find(cpuobj, "reset-cbar")) { + object_property_set_int(cpuobj, "reset-cbar", periphbase, + &error_abort); + } + qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); + } + + /* Create the private peripheral devices (including the GIC); + * this must happen after the CPUs are created because a15mpcore_priv + * wires itself up to the CPU's generic_timer gpio out lines. + */ + dev = qdev_new(privdev); + qdev_prop_set_uint32(dev, "num-cpu", smp_cpus); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, periphbase); + + /* Interrupts [42:0] are from the motherboard; + * [47:43] are reserved; [63:48] are daughterboard + * peripherals. Note that some documentation numbers + * external interrupts starting from 32 (because there + * are internal interrupts 0..31). + */ + for (n = 0; n < 64; n++) { + pic[n] = qdev_get_gpio_in(dev, n); + } + + /* Connect the CPUs to the GIC */ + for (n = 0; n < smp_cpus; n++) { + DeviceState *cpudev = DEVICE(qemu_get_cpu(n)); + + sysbus_connect_irq(busdev, n, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(busdev, n + smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(busdev, n + 2 * smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(busdev, n + 3 * smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + } +} + +static void a9_daughterboard_init(const VexpressMachineState *vms, + ram_addr_t ram_size, + const char *cpu_type, + qemu_irq *pic) +{ + MachineState *machine = MACHINE(vms); + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *lowram = g_new(MemoryRegion, 1); + ram_addr_t low_ram_size; + + if (ram_size > 0x40000000) { + /* 1GB is the maximum the address space permits */ + error_report("vexpress-a9: cannot model more than 1GB RAM"); + exit(1); + } + + low_ram_size = ram_size; + if (low_ram_size > 0x4000000) { + low_ram_size = 0x4000000; + } + /* RAM is from 0x60000000 upwards. The bottom 64MB of the + * address space should in theory be remappable to various + * things including ROM or RAM; we always map the RAM there. + */ + memory_region_init_alias(lowram, NULL, "vexpress.lowmem", machine->ram, + 0, low_ram_size); + memory_region_add_subregion(sysmem, 0x0, lowram); + memory_region_add_subregion(sysmem, 0x60000000, machine->ram); + + /* 0x1e000000 A9MPCore (SCU) private memory region */ + init_cpus(machine, cpu_type, TYPE_A9MPCORE_PRIV, 0x1e000000, pic, + vms->secure, vms->virt); + + /* Daughterboard peripherals : 0x10020000 .. 0x20000000 */ + + /* 0x10020000 PL111 CLCD (daughterboard) */ + sysbus_create_simple("pl111", 0x10020000, pic[44]); + + /* 0x10060000 AXI RAM */ + /* 0x100e0000 PL341 Dynamic Memory Controller */ + /* 0x100e1000 PL354 Static Memory Controller */ + /* 0x100e2000 System Configuration Controller */ + + sysbus_create_simple("sp804", 0x100e4000, pic[48]); + /* 0x100e5000 SP805 Watchdog module */ + /* 0x100e6000 BP147 TrustZone Protection Controller */ + /* 0x100e9000 PL301 'Fast' AXI matrix */ + /* 0x100ea000 PL301 'Slow' AXI matrix */ + /* 0x100ec000 TrustZone Address Space Controller */ + /* 0x10200000 CoreSight debug APB */ + /* 0x1e00a000 PL310 L2 Cache Controller */ + sysbus_create_varargs("l2x0", 0x1e00a000, NULL); +} + +/* Voltage values for SYS_CFG_VOLT daughterboard registers; + * values are in microvolts. + */ +static const uint32_t a9_voltages[] = { + 1000000, /* VD10 : 1.0V : SoC internal logic voltage */ + 1000000, /* VD10_S2 : 1.0V : PL310, L2 cache, RAM, non-PL310 logic */ + 1000000, /* VD10_S3 : 1.0V : Cortex-A9, cores, MPEs, SCU, PL310 logic */ + 1800000, /* VCC1V8 : 1.8V : DDR2 SDRAM, test chip DDR2 I/O supply */ + 900000, /* DDR2VTT : 0.9V : DDR2 SDRAM VTT termination voltage */ + 3300000, /* VCC3V3 : 3.3V : local board supply for misc external logic */ +}; + +/* Reset values for daughterboard oscillators (in Hz) */ +static const uint32_t a9_clocks[] = { + 45000000, /* AMBA AXI ACLK: 45MHz */ + 23750000, /* daughterboard CLCD clock: 23.75MHz */ + 66670000, /* Test chip reference clock: 66.67MHz */ +}; + +static VEDBoardInfo a9_daughterboard = { + .motherboard_map = motherboard_legacy_map, + .loader_start = 0x60000000, + .gic_cpu_if_addr = 0x1e000100, + .proc_id = 0x0c000191, + .num_voltage_sensors = ARRAY_SIZE(a9_voltages), + .voltages = a9_voltages, + .num_clocks = ARRAY_SIZE(a9_clocks), + .clocks = a9_clocks, + .init = a9_daughterboard_init, +}; + +static void a15_daughterboard_init(const VexpressMachineState *vms, + ram_addr_t ram_size, + const char *cpu_type, + qemu_irq *pic) +{ + MachineState *machine = MACHINE(vms); + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *sram = g_new(MemoryRegion, 1); + + { + /* We have to use a separate 64 bit variable here to avoid the gcc + * "comparison is always false due to limited range of data type" + * warning if we are on a host where ram_addr_t is 32 bits. + */ + uint64_t rsz = ram_size; + if (rsz > (30ULL * 1024 * 1024 * 1024)) { + error_report("vexpress-a15: cannot model more than 30GB RAM"); + exit(1); + } + } + + /* RAM is from 0x80000000 upwards; there is no low-memory alias for it. */ + memory_region_add_subregion(sysmem, 0x80000000, machine->ram); + + /* 0x2c000000 A15MPCore private memory region (GIC) */ + init_cpus(machine, cpu_type, TYPE_A15MPCORE_PRIV, + 0x2c000000, pic, vms->secure, vms->virt); + + /* A15 daughterboard peripherals: */ + + /* 0x20000000: CoreSight interfaces: not modelled */ + /* 0x2a000000: PL301 AXI interconnect: not modelled */ + /* 0x2a420000: SCC: not modelled */ + /* 0x2a430000: system counter: not modelled */ + /* 0x2b000000: HDLCD controller: not modelled */ + /* 0x2b060000: SP805 watchdog: not modelled */ + /* 0x2b0a0000: PL341 dynamic memory controller: not modelled */ + /* 0x2e000000: system SRAM */ + memory_region_init_ram(sram, NULL, "vexpress.a15sram", 0x10000, + &error_fatal); + memory_region_add_subregion(sysmem, 0x2e000000, sram); + + /* 0x7ffb0000: DMA330 DMA controller: not modelled */ + /* 0x7ffd0000: PL354 static memory controller: not modelled */ +} + +static const uint32_t a15_voltages[] = { + 900000, /* Vcore: 0.9V : CPU core voltage */ +}; + +static const uint32_t a15_clocks[] = { + 60000000, /* OSCCLK0: 60MHz : CPU_CLK reference */ + 0, /* OSCCLK1: reserved */ + 0, /* OSCCLK2: reserved */ + 0, /* OSCCLK3: reserved */ + 40000000, /* OSCCLK4: 40MHz : external AXI master clock */ + 23750000, /* OSCCLK5: 23.75MHz : HDLCD PLL reference */ + 50000000, /* OSCCLK6: 50MHz : static memory controller clock */ + 60000000, /* OSCCLK7: 60MHz : SYSCLK reference */ + 40000000, /* OSCCLK8: 40MHz : DDR2 PLL reference */ +}; + +static VEDBoardInfo a15_daughterboard = { + .motherboard_map = motherboard_aseries_map, + .loader_start = 0x80000000, + .gic_cpu_if_addr = 0x2c002000, + .proc_id = 0x14000237, + .num_voltage_sensors = ARRAY_SIZE(a15_voltages), + .voltages = a15_voltages, + .num_clocks = ARRAY_SIZE(a15_clocks), + .clocks = a15_clocks, + .init = a15_daughterboard_init, +}; + +static int add_virtio_mmio_node(void *fdt, uint32_t acells, uint32_t scells, + hwaddr addr, hwaddr size, uint32_t intc, + int irq) +{ + /* Add a virtio_mmio node to the device tree blob: + * virtio_mmio@ADDRESS { + * compatible = "virtio,mmio"; + * reg = ; + * interrupt-parent = <&intc>; + * interrupts = <0, irq, 1>; + * } + * (Note that the format of the interrupts property is dependent on the + * interrupt controller that interrupt-parent points to; these are for + * the ARM GIC and indicate an SPI interrupt, rising-edge-triggered.) + */ + int rc; + char *nodename = g_strdup_printf("/virtio_mmio@%" PRIx64, addr); + + rc = qemu_fdt_add_subnode(fdt, nodename); + rc |= qemu_fdt_setprop_string(fdt, nodename, + "compatible", "virtio,mmio"); + rc |= qemu_fdt_setprop_sized_cells(fdt, nodename, "reg", + acells, addr, scells, size); + qemu_fdt_setprop_cells(fdt, nodename, "interrupt-parent", intc); + qemu_fdt_setprop_cells(fdt, nodename, "interrupts", 0, irq, 1); + qemu_fdt_setprop(fdt, nodename, "dma-coherent", NULL, 0); + g_free(nodename); + if (rc) { + return -1; + } + return 0; +} + +static uint32_t find_int_controller(void *fdt) +{ + /* Find the FDT node corresponding to the interrupt controller + * for virtio-mmio devices. We do this by scanning the fdt for + * a node with the right compatibility, since we know there is + * only one GIC on a vexpress board. + * We return the phandle of the node, or 0 if none was found. + */ + const char *compat = "arm,cortex-a9-gic"; + int offset; + + offset = fdt_node_offset_by_compatible(fdt, -1, compat); + if (offset >= 0) { + return fdt_get_phandle(fdt, offset); + } + return 0; +} + +static void vexpress_modify_dtb(const struct arm_boot_info *info, void *fdt) +{ + uint32_t acells, scells, intc; + const VEDBoardInfo *daughterboard = (const VEDBoardInfo *)info; + + acells = qemu_fdt_getprop_cell(fdt, "/", "#address-cells", + NULL, &error_fatal); + scells = qemu_fdt_getprop_cell(fdt, "/", "#size-cells", + NULL, &error_fatal); + intc = find_int_controller(fdt); + if (!intc) { + /* Not fatal, we just won't provide virtio. This will + * happen with older device tree blobs. + */ + warn_report("couldn't find interrupt controller in " + "dtb; will not include virtio-mmio devices in the dtb"); + } else { + int i; + const hwaddr *map = daughterboard->motherboard_map; + + /* We iterate backwards here because adding nodes + * to the dtb puts them in last-first. + */ + for (i = NUM_VIRTIO_TRANSPORTS - 1; i >= 0; i--) { + add_virtio_mmio_node(fdt, acells, scells, + map[VE_VIRTIO] + 0x200 * i, + 0x200, intc, 40 + i); + } + } +} + + +/* Open code a private version of pflash registration since we + * need to set non-default device width for VExpress platform. + */ +static PFlashCFI01 *ve_pflash_cfi01_register(hwaddr base, const char *name, + DriveInfo *di) +{ + DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01); + + if (di) { + qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(di)); + } + + qdev_prop_set_uint32(dev, "num-blocks", + VEXPRESS_FLASH_SIZE / VEXPRESS_FLASH_SECT_SIZE); + qdev_prop_set_uint64(dev, "sector-length", VEXPRESS_FLASH_SECT_SIZE); + qdev_prop_set_uint8(dev, "width", 4); + qdev_prop_set_uint8(dev, "device-width", 2); + qdev_prop_set_bit(dev, "big-endian", false); + qdev_prop_set_uint16(dev, "id0", 0x89); + qdev_prop_set_uint16(dev, "id1", 0x18); + qdev_prop_set_uint16(dev, "id2", 0x00); + qdev_prop_set_uint16(dev, "id3", 0x00); + qdev_prop_set_string(dev, "name", name); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + return PFLASH_CFI01(dev); +} + +static void vexpress_common_init(MachineState *machine) +{ + VexpressMachineState *vms = VEXPRESS_MACHINE(machine); + VexpressMachineClass *vmc = VEXPRESS_MACHINE_GET_CLASS(machine); + VEDBoardInfo *daughterboard = vmc->daughterboard; + DeviceState *dev, *sysctl, *pl041; + qemu_irq pic[64]; + uint32_t sys_id; + DriveInfo *dinfo; + PFlashCFI01 *pflash0; + I2CBus *i2c; + ram_addr_t vram_size, sram_size; + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *vram = g_new(MemoryRegion, 1); + MemoryRegion *sram = g_new(MemoryRegion, 1); + MemoryRegion *flashalias = g_new(MemoryRegion, 1); + MemoryRegion *flash0mem; + const hwaddr *map = daughterboard->motherboard_map; + int i; + + daughterboard->init(vms, machine->ram_size, machine->cpu_type, pic); + + /* + * If a bios file was provided, attempt to map it into memory + */ + if (machine->firmware) { + char *fn; + int image_size; + + if (drive_get(IF_PFLASH, 0, 0)) { + error_report("The contents of the first flash device may be " + "specified with -bios or with -drive if=pflash... " + "but you cannot use both options at once"); + exit(1); + } + fn = qemu_find_file(QEMU_FILE_TYPE_BIOS, machine->firmware); + if (!fn) { + error_report("Could not find ROM image '%s'", machine->firmware); + exit(1); + } + image_size = load_image_targphys(fn, map[VE_NORFLASH0], + VEXPRESS_FLASH_SIZE); + g_free(fn); + if (image_size < 0) { + error_report("Could not load ROM image '%s'", machine->firmware); + exit(1); + } + } + + /* Motherboard peripherals: the wiring is the same but the + * addresses vary between the legacy and A-Series memory maps. + */ + + sys_id = 0x1190f500; + + sysctl = qdev_new("realview_sysctl"); + qdev_prop_set_uint32(sysctl, "sys_id", sys_id); + qdev_prop_set_uint32(sysctl, "proc_id", daughterboard->proc_id); + qdev_prop_set_uint32(sysctl, "len-db-voltage", + daughterboard->num_voltage_sensors); + for (i = 0; i < daughterboard->num_voltage_sensors; i++) { + char *propname = g_strdup_printf("db-voltage[%d]", i); + qdev_prop_set_uint32(sysctl, propname, daughterboard->voltages[i]); + g_free(propname); + } + qdev_prop_set_uint32(sysctl, "len-db-clock", + daughterboard->num_clocks); + for (i = 0; i < daughterboard->num_clocks; i++) { + char *propname = g_strdup_printf("db-clock[%d]", i); + qdev_prop_set_uint32(sysctl, propname, daughterboard->clocks[i]); + g_free(propname); + } + sysbus_realize_and_unref(SYS_BUS_DEVICE(sysctl), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, map[VE_SYSREGS]); + + /* VE_SP810: not modelled */ + /* VE_SERIALPCI: not modelled */ + + pl041 = qdev_new("pl041"); + qdev_prop_set_uint32(pl041, "nc_fifo_depth", 512); + sysbus_realize_and_unref(SYS_BUS_DEVICE(pl041), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(pl041), 0, map[VE_PL041]); + sysbus_connect_irq(SYS_BUS_DEVICE(pl041), 0, pic[11]); + + dev = sysbus_create_varargs("pl181", map[VE_MMCI], pic[9], pic[10], NULL); + /* Wire up MMC card detect and read-only signals */ + qdev_connect_gpio_out_named(dev, "card-read-only", 0, + qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_WPROT)); + qdev_connect_gpio_out_named(dev, "card-inserted", 0, + qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_CARDIN)); + dinfo = drive_get_next(IF_SD); + if (dinfo) { + DeviceState *card; + + card = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + qdev_realize_and_unref(card, qdev_get_child_bus(dev, "sd-bus"), + &error_fatal); + } + + sysbus_create_simple("pl050_keyboard", map[VE_KMI0], pic[12]); + sysbus_create_simple("pl050_mouse", map[VE_KMI1], pic[13]); + + pl011_create(map[VE_UART0], pic[5], serial_hd(0)); + pl011_create(map[VE_UART1], pic[6], serial_hd(1)); + pl011_create(map[VE_UART2], pic[7], serial_hd(2)); + pl011_create(map[VE_UART3], pic[8], serial_hd(3)); + + sysbus_create_simple("sp804", map[VE_TIMER01], pic[2]); + sysbus_create_simple("sp804", map[VE_TIMER23], pic[3]); + + dev = sysbus_create_simple(TYPE_VERSATILE_I2C, map[VE_SERIALDVI], NULL); + i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); + i2c_slave_create_simple(i2c, "sii9022", 0x39); + + sysbus_create_simple("pl031", map[VE_RTC], pic[4]); /* RTC */ + + /* VE_COMPACTFLASH: not modelled */ + + sysbus_create_simple("pl111", map[VE_CLCD], pic[14]); + + dinfo = drive_get_next(IF_PFLASH); + pflash0 = ve_pflash_cfi01_register(map[VE_NORFLASH0], "vexpress.flash0", + dinfo); + if (!pflash0) { + error_report("vexpress: error registering flash 0"); + exit(1); + } + + if (map[VE_NORFLASHALIAS] != -1) { + /* Map flash 0 as an alias into low memory */ + flash0mem = sysbus_mmio_get_region(SYS_BUS_DEVICE(pflash0), 0); + memory_region_init_alias(flashalias, NULL, "vexpress.flashalias", + flash0mem, 0, VEXPRESS_FLASH_SIZE); + memory_region_add_subregion(sysmem, map[VE_NORFLASHALIAS], flashalias); + } + + dinfo = drive_get_next(IF_PFLASH); + if (!ve_pflash_cfi01_register(map[VE_NORFLASH1], "vexpress.flash1", + dinfo)) { + error_report("vexpress: error registering flash 1"); + exit(1); + } + + sram_size = 0x2000000; + memory_region_init_ram(sram, NULL, "vexpress.sram", sram_size, + &error_fatal); + memory_region_add_subregion(sysmem, map[VE_SRAM], sram); + + vram_size = 0x800000; + memory_region_init_ram(vram, NULL, "vexpress.vram", vram_size, + &error_fatal); + memory_region_add_subregion(sysmem, map[VE_VIDEORAM], vram); + + /* 0x4e000000 LAN9118 Ethernet */ + if (nd_table[0].used) { + lan9118_init(&nd_table[0], map[VE_ETHERNET], pic[15]); + } + + /* VE_USB: not modelled */ + + /* VE_DAPROM: not modelled */ + + /* Create mmio transports, so the user can create virtio backends + * (which will be automatically plugged in to the transports). If + * no backend is created the transport will just sit harmlessly idle. + */ + for (i = 0; i < NUM_VIRTIO_TRANSPORTS; i++) { + sysbus_create_simple("virtio-mmio", map[VE_VIRTIO] + 0x200 * i, + pic[40 + i]); + } + + daughterboard->bootinfo.ram_size = machine->ram_size; + daughterboard->bootinfo.nb_cpus = machine->smp.cpus; + daughterboard->bootinfo.board_id = VEXPRESS_BOARD_ID; + daughterboard->bootinfo.loader_start = daughterboard->loader_start; + daughterboard->bootinfo.smp_loader_start = map[VE_SRAM]; + daughterboard->bootinfo.smp_bootreg_addr = map[VE_SYSREGS] + 0x30; + daughterboard->bootinfo.gic_cpu_if_addr = daughterboard->gic_cpu_if_addr; + daughterboard->bootinfo.modify_dtb = vexpress_modify_dtb; + /* When booting Linux we should be in secure state if the CPU has one. */ + daughterboard->bootinfo.secure_boot = vms->secure; + arm_load_kernel(ARM_CPU(first_cpu), machine, &daughterboard->bootinfo); +} + +static bool vexpress_get_secure(Object *obj, Error **errp) +{ + VexpressMachineState *vms = VEXPRESS_MACHINE(obj); + + return vms->secure; +} + +static void vexpress_set_secure(Object *obj, bool value, Error **errp) +{ + VexpressMachineState *vms = VEXPRESS_MACHINE(obj); + + vms->secure = value; +} + +static bool vexpress_get_virt(Object *obj, Error **errp) +{ + VexpressMachineState *vms = VEXPRESS_MACHINE(obj); + + return vms->virt; +} + +static void vexpress_set_virt(Object *obj, bool value, Error **errp) +{ + VexpressMachineState *vms = VEXPRESS_MACHINE(obj); + + vms->virt = value; +} + +static void vexpress_instance_init(Object *obj) +{ + VexpressMachineState *vms = VEXPRESS_MACHINE(obj); + + /* EL3 is enabled by default on vexpress */ + vms->secure = true; +} + +static void vexpress_a15_instance_init(Object *obj) +{ + VexpressMachineState *vms = VEXPRESS_MACHINE(obj); + + /* + * For the vexpress-a15, EL2 is by default enabled if EL3 is, + * but can also be specifically set to on or off. + */ + vms->virt = true; +} + +static void vexpress_a9_instance_init(Object *obj) +{ + VexpressMachineState *vms = VEXPRESS_MACHINE(obj); + + /* The A9 doesn't have the virt extensions */ + vms->virt = false; +} + +static void vexpress_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "ARM Versatile Express"; + mc->init = vexpress_common_init; + mc->max_cpus = 4; + mc->ignore_memory_transaction_failures = true; + mc->default_ram_id = "vexpress.highmem"; + + object_class_property_add_bool(oc, "secure", vexpress_get_secure, + vexpress_set_secure); + object_class_property_set_description(oc, "secure", + "Set on/off to enable/disable the ARM " + "Security Extensions (TrustZone)"); +} + +static void vexpress_a9_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + VexpressMachineClass *vmc = VEXPRESS_MACHINE_CLASS(oc); + + mc->desc = "ARM Versatile Express for Cortex-A9"; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9"); + + vmc->daughterboard = &a9_daughterboard; +} + +static void vexpress_a15_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + VexpressMachineClass *vmc = VEXPRESS_MACHINE_CLASS(oc); + + mc->desc = "ARM Versatile Express for Cortex-A15"; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15"); + + vmc->daughterboard = &a15_daughterboard; + + object_class_property_add_bool(oc, "virtualization", vexpress_get_virt, + vexpress_set_virt); + object_class_property_set_description(oc, "virtualization", + "Set on/off to enable/disable the ARM " + "Virtualization Extensions " + "(defaults to same as 'secure')"); + +} + +static const TypeInfo vexpress_info = { + .name = TYPE_VEXPRESS_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(VexpressMachineState), + .instance_init = vexpress_instance_init, + .class_size = sizeof(VexpressMachineClass), + .class_init = vexpress_class_init, +}; + +static const TypeInfo vexpress_a9_info = { + .name = TYPE_VEXPRESS_A9_MACHINE, + .parent = TYPE_VEXPRESS_MACHINE, + .class_init = vexpress_a9_class_init, + .instance_init = vexpress_a9_instance_init, +}; + +static const TypeInfo vexpress_a15_info = { + .name = TYPE_VEXPRESS_A15_MACHINE, + .parent = TYPE_VEXPRESS_MACHINE, + .class_init = vexpress_a15_class_init, + .instance_init = vexpress_a15_instance_init, +}; + +static void vexpress_machine_init(void) +{ + type_register_static(&vexpress_info); + type_register_static(&vexpress_a9_info); + type_register_static(&vexpress_a15_info); +} + +type_init(vexpress_machine_init); diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c new file mode 100644 index 000000000..674f90265 --- /dev/null +++ b/hw/arm/virt-acpi-build.c @@ -0,0 +1,1152 @@ +/* Support for generating ACPI tables and passing them to Guests + * + * ARM virt ACPI generation + * + * Copyright (C) 2008-2010 Kevin O'Connor + * Copyright (C) 2006 Fabrice Bellard + * Copyright (C) 2013 Red Hat Inc + * + * Author: Michael S. Tsirkin + * + * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD. + * + * Author: Shannon Zhao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/bitmap.h" +#include "trace.h" +#include "hw/core/cpu.h" +#include "target/arm/cpu.h" +#include "hw/acpi/acpi-defs.h" +#include "hw/acpi/acpi.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/acpi/bios-linker-loader.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/utils.h" +#include "hw/acpi/pci.h" +#include "hw/acpi/memory_hotplug.h" +#include "hw/acpi/generic_event_device.h" +#include "hw/acpi/tpm.h" +#include "hw/pci/pcie_host.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci-host/gpex.h" +#include "hw/arm/virt.h" +#include "hw/mem/nvdimm.h" +#include "hw/platform-bus.h" +#include "sysemu/numa.h" +#include "sysemu/reset.h" +#include "sysemu/tpm.h" +#include "kvm_arm.h" +#include "migration/vmstate.h" +#include "hw/acpi/ghes.h" + +#define ARM_SPI_BASE 32 + +#define ACPI_BUILD_TABLE_SIZE 0x20000 + +static void acpi_dsdt_add_cpus(Aml *scope, VirtMachineState *vms) +{ + MachineState *ms = MACHINE(vms); + uint16_t i; + + for (i = 0; i < ms->smp.cpus; i++) { + Aml *dev = aml_device("C%.03X", i); + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007"))); + aml_append(dev, aml_name_decl("_UID", aml_int(i))); + aml_append(scope, dev); + } +} + +static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap, + uint32_t uart_irq) +{ + Aml *dev = aml_device("COM0"); + aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0011"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(uart_memmap->base, + uart_memmap->size, AML_READ_WRITE)); + aml_append(crs, + aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &uart_irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + aml_append(scope, dev); +} + +static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap) +{ + Aml *dev = aml_device("FWCF"); + aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base, + fw_cfg_memmap->size, AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} + +static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap) +{ + Aml *dev, *crs; + hwaddr base = flash_memmap->base; + hwaddr size = flash_memmap->size / 2; + + dev = aml_device("FLS0"); + aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + + crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + + dev = aml_device("FLS1"); + aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0015"))); + aml_append(dev, aml_name_decl("_UID", aml_int(1))); + crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(base + size, size, AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} + +static void acpi_dsdt_add_virtio(Aml *scope, + const MemMapEntry *virtio_mmio_memmap, + uint32_t mmio_irq, int num) +{ + hwaddr base = virtio_mmio_memmap->base; + hwaddr size = virtio_mmio_memmap->size; + int i; + + for (i = 0; i < num; i++) { + uint32_t irq = mmio_irq + i; + Aml *dev = aml_device("VR%02u", i); + aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005"))); + aml_append(dev, aml_name_decl("_UID", aml_int(i))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE)); + aml_append(crs, + aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + base += size; + } +} + +static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap, + uint32_t irq, bool use_highmem, bool highmem_ecam, + VirtMachineState *vms) +{ + int ecam_id = VIRT_ECAM_ID(highmem_ecam); + struct GPEXConfig cfg = { + .mmio32 = memmap[VIRT_PCIE_MMIO], + .pio = memmap[VIRT_PCIE_PIO], + .ecam = memmap[ecam_id], + .irq = irq, + .bus = vms->bus, + }; + + if (use_highmem) { + cfg.mmio64 = memmap[VIRT_HIGH_PCIE_MMIO]; + } + + acpi_dsdt_add_gpex(scope, &cfg); +} + +static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap, + uint32_t gpio_irq) +{ + Aml *dev = aml_device("GPO0"); + aml_append(dev, aml_name_decl("_HID", aml_string("ARMH0061"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(gpio_memmap->base, gpio_memmap->size, + AML_READ_WRITE)); + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &gpio_irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + Aml *aei = aml_resource_template(); + /* Pin 3 for power button */ + const uint32_t pin_list[1] = {3}; + aml_append(aei, aml_gpio_int(AML_CONSUMER, AML_EDGE, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, AML_PULL_UP, 0, pin_list, 1, + "GPO0", NULL, 0)); + aml_append(dev, aml_name_decl("_AEI", aei)); + + /* _E03 is handle for power button */ + Aml *method = aml_method("_E03", 0, AML_NOTSERIALIZED); + aml_append(method, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE), + aml_int(0x80))); + aml_append(dev, method); + aml_append(scope, dev); +} + +#ifdef CONFIG_TPM +static void acpi_dsdt_add_tpm(Aml *scope, VirtMachineState *vms) +{ + PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); + hwaddr pbus_base = vms->memmap[VIRT_PLATFORM_BUS].base; + SysBusDevice *sbdev = SYS_BUS_DEVICE(tpm_find()); + MemoryRegion *sbdev_mr; + hwaddr tpm_base; + + if (!sbdev) { + return; + } + + tpm_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + assert(tpm_base != -1); + + tpm_base += pbus_base; + + sbdev_mr = sysbus_mmio_get_region(sbdev, 0); + + Aml *dev = aml_device("TPM0"); + aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + + Aml *crs = aml_resource_template(); + aml_append(crs, + aml_memory32_fixed(tpm_base, + (uint32_t)memory_region_size(sbdev_mr), + AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} +#endif + +#define ID_MAPPING_ENTRY_SIZE 20 +#define SMMU_V3_ENTRY_SIZE 68 +#define ROOT_COMPLEX_ENTRY_SIZE 36 +#define IORT_NODE_OFFSET 48 + +static void build_iort_id_mapping(GArray *table_data, uint32_t input_base, + uint32_t id_count, uint32_t out_ref) +{ + /* Table 4 ID mapping format */ + build_append_int_noprefix(table_data, input_base, 4); /* Input base */ + build_append_int_noprefix(table_data, id_count, 4); /* Number of IDs */ + build_append_int_noprefix(table_data, input_base, 4); /* Output base */ + build_append_int_noprefix(table_data, out_ref, 4); /* Output Reference */ + /* Flags */ + build_append_int_noprefix(table_data, 0 /* Single mapping (disabled) */, 4); +} + +struct AcpiIortIdMapping { + uint32_t input_base; + uint32_t id_count; +}; +typedef struct AcpiIortIdMapping AcpiIortIdMapping; + +/* Build the iort ID mapping to SMMUv3 for a given PCI host bridge */ +static int +iort_host_bridges(Object *obj, void *opaque) +{ + GArray *idmap_blob = opaque; + + if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) { + PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; + + if (bus && !pci_bus_bypass_iommu(bus)) { + int min_bus, max_bus; + + pci_bus_range(bus, &min_bus, &max_bus); + + AcpiIortIdMapping idmap = { + .input_base = min_bus << 8, + .id_count = (max_bus - min_bus + 1) << 8, + }; + g_array_append_val(idmap_blob, idmap); + } + } + + return 0; +} + +static int iort_idmap_compare(gconstpointer a, gconstpointer b) +{ + AcpiIortIdMapping *idmap_a = (AcpiIortIdMapping *)a; + AcpiIortIdMapping *idmap_b = (AcpiIortIdMapping *)b; + + return idmap_a->input_base - idmap_b->input_base; +} + +/* + * Input Output Remapping Table (IORT) + * Conforms to "IO Remapping Table System Software on ARM Platforms", + * Document number: ARM DEN 0049E.b, Feb 2021 + */ +static void +build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +{ + int i, nb_nodes, rc_mapping_count; + const uint32_t iort_node_offset = IORT_NODE_OFFSET; + size_t node_size, smmu_offset = 0; + AcpiIortIdMapping *idmap; + uint32_t id = 0; + GArray *smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); + GArray *its_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); + + AcpiTable table = { .sig = "IORT", .rev = 3, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + /* Table 2 The IORT */ + acpi_table_begin(&table, table_data); + + if (vms->iommu == VIRT_IOMMU_SMMUV3) { + AcpiIortIdMapping next_range = {0}; + + object_child_foreach_recursive(object_get_root(), + iort_host_bridges, smmu_idmaps); + + /* Sort the smmu idmap by input_base */ + g_array_sort(smmu_idmaps, iort_idmap_compare); + + /* + * Split the whole RIDs by mapping from RC to SMMU, + * build the ID mapping from RC to ITS directly. + */ + for (i = 0; i < smmu_idmaps->len; i++) { + idmap = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i); + + if (next_range.input_base < idmap->input_base) { + next_range.id_count = idmap->input_base - next_range.input_base; + g_array_append_val(its_idmaps, next_range); + } + + next_range.input_base = idmap->input_base + idmap->id_count; + } + + /* Append the last RC -> ITS ID mapping */ + if (next_range.input_base < 0xFFFF) { + next_range.id_count = 0xFFFF - next_range.input_base; + g_array_append_val(its_idmaps, next_range); + } + + nb_nodes = 3; /* RC, ITS, SMMUv3 */ + rc_mapping_count = smmu_idmaps->len + its_idmaps->len; + } else { + nb_nodes = 2; /* RC, ITS */ + rc_mapping_count = 1; + } + /* Number of IORT Nodes */ + build_append_int_noprefix(table_data, nb_nodes, 4); + + /* Offset to Array of IORT Nodes */ + build_append_int_noprefix(table_data, IORT_NODE_OFFSET, 4); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + + /* Table 12 ITS Group Format */ + build_append_int_noprefix(table_data, 0 /* ITS Group */, 1); /* Type */ + node_size = 20 /* fixed header size */ + 4 /* 1 GIC ITS Identifier */; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Revision */ + build_append_int_noprefix(table_data, id++, 4); /* Identifier */ + build_append_int_noprefix(table_data, 0, 4); /* Number of ID mappings */ + build_append_int_noprefix(table_data, 0, 4); /* Reference to ID Array */ + build_append_int_noprefix(table_data, 1, 4); /* Number of ITSs */ + /* GIC ITS Identifier Array */ + build_append_int_noprefix(table_data, 0 /* MADT translation_id */, 4); + + if (vms->iommu == VIRT_IOMMU_SMMUV3) { + int irq = vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE; + + smmu_offset = table_data->len - table.table_offset; + /* Table 9 SMMUv3 Format */ + build_append_int_noprefix(table_data, 4 /* SMMUv3 */, 1); /* Type */ + node_size = SMMU_V3_ENTRY_SIZE + ID_MAPPING_ENTRY_SIZE; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 4, 1); /* Revision */ + build_append_int_noprefix(table_data, id++, 4); /* Identifier */ + build_append_int_noprefix(table_data, 1, 4); /* Number of ID mappings */ + /* Reference to ID Array */ + build_append_int_noprefix(table_data, SMMU_V3_ENTRY_SIZE, 4); + /* Base address */ + build_append_int_noprefix(table_data, vms->memmap[VIRT_SMMU].base, 8); + /* Flags */ + build_append_int_noprefix(table_data, 1 /* COHACC Override */, 4); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* VATOS address */ + /* Model */ + build_append_int_noprefix(table_data, 0 /* Generic SMMU-v3 */, 4); + build_append_int_noprefix(table_data, irq, 4); /* Event */ + build_append_int_noprefix(table_data, irq + 1, 4); /* PRI */ + build_append_int_noprefix(table_data, irq + 3, 4); /* GERR */ + build_append_int_noprefix(table_data, irq + 2, 4); /* Sync */ + build_append_int_noprefix(table_data, 0, 4); /* Proximity domain */ + /* DeviceID mapping index (ignored since interrupts are GSIV based) */ + build_append_int_noprefix(table_data, 0, 4); + + /* output IORT node is the ITS group node (the first node) */ + build_iort_id_mapping(table_data, 0, 0xFFFF, IORT_NODE_OFFSET); + } + + /* Table 17 Root Complex Node */ + build_append_int_noprefix(table_data, 2 /* Root complex */, 1); /* Type */ + node_size = ROOT_COMPLEX_ENTRY_SIZE + + ID_MAPPING_ENTRY_SIZE * rc_mapping_count; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 3, 1); /* Revision */ + build_append_int_noprefix(table_data, id++, 4); /* Identifier */ + /* Number of ID mappings */ + build_append_int_noprefix(table_data, rc_mapping_count, 4); + /* Reference to ID Array */ + build_append_int_noprefix(table_data, ROOT_COMPLEX_ENTRY_SIZE, 4); + + /* Table 14 Memory access properties */ + /* CCA: Cache Coherent Attribute */ + build_append_int_noprefix(table_data, 1 /* fully coherent */, 4); + build_append_int_noprefix(table_data, 0, 1); /* AH: Note Allocation Hints */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Table 15 Memory Access Flags */ + build_append_int_noprefix(table_data, 0x3 /* CCA = CPM = DACS = 1 */, 1); + + build_append_int_noprefix(table_data, 0, 4); /* ATS Attribute */ + /* MCFG pci_segment */ + build_append_int_noprefix(table_data, 0, 4); /* PCI Segment number */ + + /* Memory address size limit */ + build_append_int_noprefix(table_data, 64, 1); + + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ + + /* Output Reference */ + if (vms->iommu == VIRT_IOMMU_SMMUV3) { + AcpiIortIdMapping *range; + + /* translated RIDs connect to SMMUv3 node: RC -> SMMUv3 -> ITS */ + for (i = 0; i < smmu_idmaps->len; i++) { + range = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i); + /* output IORT node is the smmuv3 node */ + build_iort_id_mapping(table_data, range->input_base, + range->id_count, smmu_offset); + } + + /* bypassed RIDs connect to ITS group node directly: RC -> ITS */ + for (i = 0; i < its_idmaps->len; i++) { + range = &g_array_index(its_idmaps, AcpiIortIdMapping, i); + /* output IORT node is the ITS group node (the first node) */ + build_iort_id_mapping(table_data, range->input_base, + range->id_count, iort_node_offset); + } + } else { + /* output IORT node is the ITS group node (the first node) */ + build_iort_id_mapping(table_data, 0, 0xFFFF, IORT_NODE_OFFSET); + } + + acpi_table_end(linker, &table); + g_array_free(smmu_idmaps, true); + g_array_free(its_idmaps, true); +} + +/* + * Serial Port Console Redirection Table (SPCR) + * Rev: 1.07 + */ +static void +build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +{ + AcpiTable table = { .sig = "SPCR", .rev = 2, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + + acpi_table_begin(&table, table_data); + + /* Interface Type */ + build_append_int_noprefix(table_data, 3, 1); /* ARM PL011 UART */ + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ + /* Base Address */ + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 8, 0, 1, + vms->memmap[VIRT_UART].base); + /* Interrupt Type */ + build_append_int_noprefix(table_data, + (1 << 3) /* Bit[3] ARMH GIC interrupt */, 1); + build_append_int_noprefix(table_data, 0, 1); /* IRQ */ + /* Global System Interrupt */ + build_append_int_noprefix(table_data, + vms->irqmap[VIRT_UART] + ARM_SPI_BASE, 4); + build_append_int_noprefix(table_data, 3 /* 9600 */, 1); /* Baud Rate */ + build_append_int_noprefix(table_data, 0 /* No Parity */, 1); /* Parity */ + /* Stop Bits */ + build_append_int_noprefix(table_data, 1 /* 1 Stop bit */, 1); + /* Flow Control */ + build_append_int_noprefix(table_data, + (1 << 1) /* RTS/CTS hardware flow control */, 1); + /* Terminal Type */ + build_append_int_noprefix(table_data, 0 /* VT100 */, 1); + build_append_int_noprefix(table_data, 0, 1); /* Language */ + /* PCI Device ID */ + build_append_int_noprefix(table_data, 0xffff /* not a PCI device*/, 2); + /* PCI Vendor ID */ + build_append_int_noprefix(table_data, 0xffff /* not a PCI device*/, 2); + build_append_int_noprefix(table_data, 0, 1); /* PCI Bus Number */ + build_append_int_noprefix(table_data, 0, 1); /* PCI Device Number */ + build_append_int_noprefix(table_data, 0, 1); /* PCI Function Number */ + build_append_int_noprefix(table_data, 0, 4); /* PCI Flags */ + build_append_int_noprefix(table_data, 0, 1); /* PCI Segment */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + + acpi_table_end(linker, &table); +} + +/* + * ACPI spec, Revision 5.1 + * 5.2.16 System Resource Affinity Table (SRAT) + */ +static void +build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +{ + int i; + uint64_t mem_base; + MachineClass *mc = MACHINE_GET_CLASS(vms); + MachineState *ms = MACHINE(vms); + const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(ms); + AcpiTable table = { .sig = "SRAT", .rev = 3, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ + + for (i = 0; i < cpu_list->len; ++i) { + uint32_t nodeid = cpu_list->cpus[i].props.node_id; + /* + * 5.2.16.4 GICC Affinity Structure + */ + build_append_int_noprefix(table_data, 3, 1); /* Type */ + build_append_int_noprefix(table_data, 18, 1); /* Length */ + build_append_int_noprefix(table_data, nodeid, 4); /* Proximity Domain */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + /* Flags, Table 5-76 */ + build_append_int_noprefix(table_data, 1 /* Enabled */, 4); + build_append_int_noprefix(table_data, 0, 4); /* Clock Domain */ + } + + mem_base = vms->memmap[VIRT_MEM].base; + for (i = 0; i < ms->numa_state->num_nodes; ++i) { + if (ms->numa_state->nodes[i].node_mem > 0) { + build_srat_memory(table_data, mem_base, + ms->numa_state->nodes[i].node_mem, i, + MEM_AFFINITY_ENABLED); + mem_base += ms->numa_state->nodes[i].node_mem; + } + } + + if (ms->nvdimms_state->is_enabled) { + nvdimm_build_srat(table_data); + } + + if (ms->device_memory) { + build_srat_memory(table_data, ms->device_memory->base, + memory_region_size(&ms->device_memory->mr), + ms->numa_state->num_nodes - 1, + MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); + } + + acpi_table_end(linker, &table); +} + +/* + * ACPI spec, Revision 5.1 + * 5.2.24 Generic Timer Description Table (GTDT) + */ +static void +build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +{ + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + /* + * Table 5-117 Flag Definitions + * set only "Timer interrupt Mode" and assume "Timer Interrupt + * polarity" bit as '0: Interrupt is Active high' + */ + uint32_t irqflags = vmc->claim_edge_triggered_timers ? + 1 : /* Interrupt is Edge triggered */ + 0; /* Interrupt is Level triggered */ + AcpiTable table = { .sig = "GTDT", .rev = 2, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + + acpi_table_begin(&table, table_data); + + /* CntControlBase Physical Address */ + /* FIXME: invalid value, should be 0xFFFFFFFFFFFFFFFF if not impl. ? */ + build_append_int_noprefix(table_data, 0, 8); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + /* + * FIXME: clarify comment: + * The interrupt values are the same with the device tree when adding 16 + */ + /* Secure EL1 timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_S_EL1_IRQ + 16, 4); + /* Secure EL1 timer Flags */ + build_append_int_noprefix(table_data, irqflags, 4); + /* Non-Secure EL1 timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL1_IRQ + 16, 4); + /* Non-Secure EL1 timer Flags */ + build_append_int_noprefix(table_data, irqflags | + 1UL << 2, /* Always-on Capability */ + 4); + /* Virtual timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_VIRT_IRQ + 16, 4); + /* Virtual Timer Flags */ + build_append_int_noprefix(table_data, irqflags, 4); + /* Non-Secure EL2 timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL2_IRQ + 16, 4); + /* Non-Secure EL2 timer Flags */ + build_append_int_noprefix(table_data, irqflags, 4); + /* CntReadBase Physical address */ + build_append_int_noprefix(table_data, 0, 8); + /* Platform Timer Count */ + build_append_int_noprefix(table_data, 0, 4); + /* Platform Timer Offset */ + build_append_int_noprefix(table_data, 0, 4); + + acpi_table_end(linker, &table); +} + +/* Debug Port Table 2 (DBG2) */ +static void +build_dbg2(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +{ + AcpiTable table = { .sig = "DBG2", .rev = 0, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + int dbg2devicelength; + const char name[] = "COM0"; + const int namespace_length = sizeof(name); + + acpi_table_begin(&table, table_data); + + dbg2devicelength = 22 + /* BaseAddressRegister[] offset */ + 12 + /* BaseAddressRegister[] */ + 4 + /* AddressSize[] */ + namespace_length /* NamespaceString[] */; + + /* OffsetDbgDeviceInfo */ + build_append_int_noprefix(table_data, 44, 4); + /* NumberDbgDeviceInfo */ + build_append_int_noprefix(table_data, 1, 4); + + /* Table 2. Debug Device Information structure format */ + build_append_int_noprefix(table_data, 0, 1); /* Revision */ + build_append_int_noprefix(table_data, dbg2devicelength, 2); /* Length */ + /* NumberofGenericAddressRegisters */ + build_append_int_noprefix(table_data, 1, 1); + /* NameSpaceStringLength */ + build_append_int_noprefix(table_data, namespace_length, 2); + build_append_int_noprefix(table_data, 38, 2); /* NameSpaceStringOffset */ + build_append_int_noprefix(table_data, 0, 2); /* OemDataLength */ + /* OemDataOffset (0 means no OEM data) */ + build_append_int_noprefix(table_data, 0, 2); + + /* Port Type */ + build_append_int_noprefix(table_data, 0x8000 /* Serial */, 2); + /* Port Subtype */ + build_append_int_noprefix(table_data, 0x3 /* ARM PL011 UART */, 2); + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* BaseAddressRegisterOffset */ + build_append_int_noprefix(table_data, 22, 2); + /* AddressSizeOffset */ + build_append_int_noprefix(table_data, 34, 2); + + /* BaseAddressRegister[] */ + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 8, 0, 1, + vms->memmap[VIRT_UART].base); + + /* AddressSize[] */ + build_append_int_noprefix(table_data, + vms->memmap[VIRT_UART].size, 4); + + /* NamespaceString[] */ + g_array_append_vals(table_data, name, namespace_length); + + acpi_table_end(linker, &table); +}; + +/* + * ACPI spec, Revision 5.1 Errata A + * 5.2.12 Multiple APIC Description Table (MADT) + */ +static void build_append_gicr(GArray *table_data, uint64_t base, uint32_t size) +{ + build_append_int_noprefix(table_data, 0xE, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Discovery Range Base Addres */ + build_append_int_noprefix(table_data, base, 8); + build_append_int_noprefix(table_data, size, 4); /* Discovery Range Length */ +} + +static void +build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +{ + int i; + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + const MemMapEntry *memmap = vms->memmap; + AcpiTable table = { .sig = "APIC", .rev = 3, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + + acpi_table_begin(&table, table_data); + /* Local Interrupt Controller Address */ + build_append_int_noprefix(table_data, 0, 4); + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + + /* 5.2.12.15 GIC Distributor Structure */ + build_append_int_noprefix(table_data, 0xC, 1); /* Type */ + build_append_int_noprefix(table_data, 24, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* GIC ID */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, memmap[VIRT_GIC_DIST].base, 8); + build_append_int_noprefix(table_data, 0, 4); /* System Vector Base */ + /* GIC version */ + build_append_int_noprefix(table_data, vms->gic_version, 1); + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ + + for (i = 0; i < MACHINE(vms)->smp.cpus; i++) { + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i)); + uint64_t physical_base_address = 0, gich = 0, gicv = 0; + uint32_t vgic_interrupt = vms->virt ? PPI(ARCH_GIC_MAINT_IRQ) : 0; + uint32_t pmu_interrupt = arm_feature(&armcpu->env, ARM_FEATURE_PMU) ? + PPI(VIRTUAL_PMU_IRQ) : 0; + + if (vms->gic_version == 2) { + physical_base_address = memmap[VIRT_GIC_CPU].base; + gicv = memmap[VIRT_GIC_VCPU].base; + gich = memmap[VIRT_GIC_HYP].base; + } + + /* 5.2.12.14 GIC Structure */ + build_append_int_noprefix(table_data, 0xB, 1); /* Type */ + build_append_int_noprefix(table_data, 76, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, i, 4); /* GIC ID */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + /* Flags */ + build_append_int_noprefix(table_data, 1, 4); /* Enabled */ + /* Parking Protocol Version */ + build_append_int_noprefix(table_data, 0, 4); + /* Performance Interrupt GSIV */ + build_append_int_noprefix(table_data, pmu_interrupt, 4); + build_append_int_noprefix(table_data, 0, 8); /* Parked Address */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, physical_base_address, 8); + build_append_int_noprefix(table_data, gicv, 8); /* GICV */ + build_append_int_noprefix(table_data, gich, 8); /* GICH */ + /* VGIC Maintenance interrupt */ + build_append_int_noprefix(table_data, vgic_interrupt, 4); + build_append_int_noprefix(table_data, 0, 8); /* GICR Base Address*/ + /* MPIDR */ + build_append_int_noprefix(table_data, armcpu->mp_affinity, 8); + } + + if (vms->gic_version == 3) { + build_append_gicr(table_data, memmap[VIRT_GIC_REDIST].base, + memmap[VIRT_GIC_REDIST].size); + if (virt_gicv3_redist_region_count(vms) == 2) { + build_append_gicr(table_data, memmap[VIRT_HIGH_GIC_REDIST2].base, + memmap[VIRT_HIGH_GIC_REDIST2].size); + } + + if (its_class_name() && !vmc->no_its) { + /* + * FIXME: Structure is from Revision 6.0 where 'GIC Structure' + * has additional fields on top of implemented 5.1 Errata A, + * to make it consistent with v6.0 we need to bump everything + * to v6.0 + */ + /* + * ACPI spec, Revision 6.0 Errata A + * (original 6.0 definition has invalid Length) + * 5.2.12.18 GIC ITS Structure + */ + build_append_int_noprefix(table_data, 0xF, 1); /* Type */ + build_append_int_noprefix(table_data, 20, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* GIC ITS ID */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, memmap[VIRT_GIC_ITS].base, 8); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + } + } else { + const uint16_t spi_base = vms->irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE; + + /* 5.2.12.16 GIC MSI Frame Structure */ + build_append_int_noprefix(table_data, 0xD, 1); /* Type */ + build_append_int_noprefix(table_data, 24, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* GIC MSI Frame ID */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, memmap[VIRT_GIC_V2M].base, 8); + build_append_int_noprefix(table_data, 1, 4); /* Flags */ + /* SPI Count */ + build_append_int_noprefix(table_data, NUM_GICV2M_SPIS, 2); + build_append_int_noprefix(table_data, spi_base, 2); /* SPI Base */ + } + acpi_table_end(linker, &table); +} + +/* FADT */ +static void build_fadt_rev5(GArray *table_data, BIOSLinker *linker, + VirtMachineState *vms, unsigned dsdt_tbl_offset) +{ + /* ACPI v5.1 */ + AcpiFadtData fadt = { + .rev = 5, + .minor_ver = 1, + .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI, + .xdsdt_tbl_offset = &dsdt_tbl_offset, + }; + + switch (vms->psci_conduit) { + case QEMU_PSCI_CONDUIT_DISABLED: + fadt.arm_boot_arch = 0; + break; + case QEMU_PSCI_CONDUIT_HVC: + fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT | + ACPI_FADT_ARM_PSCI_USE_HVC; + break; + case QEMU_PSCI_CONDUIT_SMC: + fadt.arm_boot_arch = ACPI_FADT_ARM_PSCI_COMPLIANT; + break; + default: + g_assert_not_reached(); + } + + build_fadt(table_data, linker, &fadt, vms->oem_id, vms->oem_table_id); +} + +/* DSDT */ +static void +build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +{ + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + Aml *scope, *dsdt; + MachineState *ms = MACHINE(vms); + const MemMapEntry *memmap = vms->memmap; + const int *irqmap = vms->irqmap; + AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + + acpi_table_begin(&table, table_data); + dsdt = init_aml_allocator(); + + /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware. + * While UEFI can use libfdt to disable the RTC device node in the DTB that + * it passes to the OS, it cannot modify AML. Therefore, we won't generate + * the RTC ACPI device at all when using UEFI. + */ + scope = aml_scope("\\_SB"); + acpi_dsdt_add_cpus(scope, vms); + acpi_dsdt_add_uart(scope, &memmap[VIRT_UART], + (irqmap[VIRT_UART] + ARM_SPI_BASE)); + if (vmc->acpi_expose_flash) { + acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]); + } + acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]); + acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO], + (irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS); + acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE), + vms->highmem, vms->highmem_ecam, vms); + if (vms->acpi_dev) { + build_ged_aml(scope, "\\_SB."GED_DEVICE, + HOTPLUG_HANDLER(vms->acpi_dev), + irqmap[VIRT_ACPI_GED] + ARM_SPI_BASE, AML_SYSTEM_MEMORY, + memmap[VIRT_ACPI_GED].base); + } else { + acpi_dsdt_add_gpio(scope, &memmap[VIRT_GPIO], + (irqmap[VIRT_GPIO] + ARM_SPI_BASE)); + } + + if (vms->acpi_dev) { + uint32_t event = object_property_get_uint(OBJECT(vms->acpi_dev), + "ged-event", &error_abort); + + if (event & ACPI_GED_MEM_HOTPLUG_EVT) { + build_memory_hotplug_aml(scope, ms->ram_slots, "\\_SB", NULL, + AML_SYSTEM_MEMORY, + memmap[VIRT_PCDIMM_ACPI].base); + } + } + + acpi_dsdt_add_power_button(scope); +#ifdef CONFIG_TPM + acpi_dsdt_add_tpm(scope, vms); +#endif + + aml_append(dsdt, scope); + + /* copy AML table into ACPI tables blob */ + g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); + + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +typedef +struct AcpiBuildState { + /* Copy of table in RAM (for patching). */ + MemoryRegion *table_mr; + MemoryRegion *rsdp_mr; + MemoryRegion *linker_mr; + /* Is table patched? */ + bool patched; +} AcpiBuildState; + +static void acpi_align_size(GArray *blob, unsigned align) +{ + /* + * Align size to multiple of given size. This reduces the chance + * we need to change size in the future (breaking cross version migration). + */ + g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); +} + +static +void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) +{ + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + GArray *table_offsets; + unsigned dsdt, xsdt; + GArray *tables_blob = tables->table_data; + MachineState *ms = MACHINE(vms); + + table_offsets = g_array_new(false, true /* clear */, + sizeof(uint32_t)); + + bios_linker_loader_alloc(tables->linker, + ACPI_BUILD_TABLE_FILE, tables_blob, + 64, false /* high memory */); + + /* DSDT is pointed to by FADT */ + dsdt = tables_blob->len; + build_dsdt(tables_blob, tables->linker, vms); + + /* FADT MADT PPTT GTDT MCFG SPCR DBG2 pointed to by RSDT */ + acpi_add_table(table_offsets, tables_blob); + build_fadt_rev5(tables_blob, tables->linker, vms, dsdt); + + acpi_add_table(table_offsets, tables_blob); + build_madt(tables_blob, tables->linker, vms); + + if (!vmc->no_cpu_topology) { + acpi_add_table(table_offsets, tables_blob); + build_pptt(tables_blob, tables->linker, ms, + vms->oem_id, vms->oem_table_id); + } + + acpi_add_table(table_offsets, tables_blob); + build_gtdt(tables_blob, tables->linker, vms); + + acpi_add_table(table_offsets, tables_blob); + { + AcpiMcfgInfo mcfg = { + .base = vms->memmap[VIRT_ECAM_ID(vms->highmem_ecam)].base, + .size = vms->memmap[VIRT_ECAM_ID(vms->highmem_ecam)].size, + }; + build_mcfg(tables_blob, tables->linker, &mcfg, vms->oem_id, + vms->oem_table_id); + } + + acpi_add_table(table_offsets, tables_blob); + build_spcr(tables_blob, tables->linker, vms); + + acpi_add_table(table_offsets, tables_blob); + build_dbg2(tables_blob, tables->linker, vms); + + if (vms->ras) { + build_ghes_error_table(tables->hardware_errors, tables->linker); + acpi_add_table(table_offsets, tables_blob); + acpi_build_hest(tables_blob, tables->linker, vms->oem_id, + vms->oem_table_id); + } + + if (ms->numa_state->num_nodes > 0) { + acpi_add_table(table_offsets, tables_blob); + build_srat(tables_blob, tables->linker, vms); + if (ms->numa_state->have_numa_distance) { + acpi_add_table(table_offsets, tables_blob); + build_slit(tables_blob, tables->linker, ms, vms->oem_id, + vms->oem_table_id); + } + } + + if (ms->nvdimms_state->is_enabled) { + nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, + ms->nvdimms_state, ms->ram_slots, vms->oem_id, + vms->oem_table_id); + } + + if (its_class_name() && !vmc->no_its) { + acpi_add_table(table_offsets, tables_blob); + build_iort(tables_blob, tables->linker, vms); + } + +#ifdef CONFIG_TPM + if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0) { + acpi_add_table(table_offsets, tables_blob); + build_tpm2(tables_blob, tables->linker, tables->tcpalog, vms->oem_id, + vms->oem_table_id); + } +#endif + + /* XSDT is pointed to by RSDP */ + xsdt = tables_blob->len; + build_xsdt(tables_blob, tables->linker, table_offsets, vms->oem_id, + vms->oem_table_id); + + /* RSDP is in FSEG memory, so allocate it separately */ + { + AcpiRsdpData rsdp_data = { + .revision = 2, + .oem_id = vms->oem_id, + .xsdt_tbl_offset = &xsdt, + .rsdt_tbl_offset = NULL, + }; + build_rsdp(tables->rsdp, tables->linker, &rsdp_data); + } + + /* + * The align size is 128, warn if 64k is not enough therefore + * the align size could be resized. + */ + if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { + warn_report("ACPI table size %u exceeds %d bytes," + " migration may not work", + tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); + error_printf("Try removing CPUs, NUMA nodes, memory slots" + " or PCI bridges."); + } + acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE); + + + /* Cleanup memory that's no longer used. */ + g_array_free(table_offsets, true); +} + +static void acpi_ram_update(MemoryRegion *mr, GArray *data) +{ + uint32_t size = acpi_data_len(data); + + /* Make sure RAM size is correct - in case it got changed + * e.g. by migration */ + memory_region_ram_resize(mr, size, &error_abort); + + memcpy(memory_region_get_ram_ptr(mr), data->data, size); + memory_region_set_dirty(mr, 0, size); +} + +static void virt_acpi_build_update(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + AcpiBuildTables tables; + + /* No state to update or already patched? Nothing to do. */ + if (!build_state || build_state->patched) { + return; + } + build_state->patched = true; + + acpi_build_tables_init(&tables); + + virt_acpi_build(VIRT_MACHINE(qdev_get_machine()), &tables); + + acpi_ram_update(build_state->table_mr, tables.table_data); + acpi_ram_update(build_state->rsdp_mr, tables.rsdp); + acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob); + + acpi_build_tables_cleanup(&tables, true); +} + +static void virt_acpi_build_reset(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + build_state->patched = false; +} + +static const VMStateDescription vmstate_virt_acpi_build = { + .name = "virt_acpi_build", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(patched, AcpiBuildState), + VMSTATE_END_OF_LIST() + }, +}; + +void virt_acpi_setup(VirtMachineState *vms) +{ + AcpiBuildTables tables; + AcpiBuildState *build_state; + AcpiGedState *acpi_ged_state; + + if (!vms->fw_cfg) { + trace_virt_acpi_setup(); + return; + } + + if (!virt_is_acpi_enabled(vms)) { + trace_virt_acpi_setup(); + return; + } + + build_state = g_malloc0(sizeof *build_state); + + acpi_build_tables_init(&tables); + virt_acpi_build(vms, &tables); + + /* Now expose it all to Guest */ + build_state->table_mr = acpi_add_rom_blob(virt_acpi_build_update, + build_state, tables.table_data, + ACPI_BUILD_TABLE_FILE); + assert(build_state->table_mr != NULL); + + build_state->linker_mr = acpi_add_rom_blob(virt_acpi_build_update, + build_state, + tables.linker->cmd_blob, + ACPI_BUILD_LOADER_FILE); + + fw_cfg_add_file(vms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data, + acpi_data_len(tables.tcpalog)); + + if (vms->ras) { + assert(vms->acpi_dev); + acpi_ged_state = ACPI_GED(vms->acpi_dev); + acpi_ghes_add_fw_cfg(&acpi_ged_state->ghes_state, + vms->fw_cfg, tables.hardware_errors); + } + + build_state->rsdp_mr = acpi_add_rom_blob(virt_acpi_build_update, + build_state, tables.rsdp, + ACPI_BUILD_RSDP_FILE); + + qemu_register_reset(virt_acpi_build_reset, build_state); + virt_acpi_build_reset(build_state); + vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state); + + /* Cleanup tables but don't free the memory: we track it + * in build_state. + */ + acpi_build_tables_cleanup(&tables, false); +} diff --git a/hw/arm/virt.c b/hw/arm/virt.c new file mode 100644 index 000000000..30da05dfe --- /dev/null +++ b/hw/arm/virt.c @@ -0,0 +1,3033 @@ +/* + * ARM mach-virt emulation + * + * Copyright (c) 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + * Emulate a virtual board which works by passing Linux all the information + * it needs about what devices are present via the device tree. + * There are some restrictions about what we can do here: + * + we can only present devices whose Linux drivers will work based + * purely on the device tree with no platform data at all + * + we want to present a very stripped-down minimalist platform, + * both because this reduces the security attack surface from the guest + * and also because it reduces our exposure to being broken when + * the kernel updates its device tree bindings and requires further + * information in a device binding that we aren't providing. + * This is essentially the same approach kvmtool uses. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/units.h" +#include "qemu/option.h" +#include "monitor/qdev.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/arm/boot.h" +#include "hw/arm/primecell.h" +#include "hw/arm/virt.h" +#include "hw/block/flash.h" +#include "hw/vfio/vfio-calxeda-xgmac.h" +#include "hw/vfio/vfio-amd-xgbe.h" +#include "hw/display/ramfb.h" +#include "net/net.h" +#include "sysemu/device_tree.h" +#include "sysemu/numa.h" +#include "sysemu/runstate.h" +#include "sysemu/tpm.h" +#include "sysemu/kvm.h" +#include "sysemu/hvf.h" +#include "hw/loader.h" +#include "qapi/error.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/pci-host/gpex.h" +#include "hw/virtio/virtio-pci.h" +#include "hw/arm/sysbus-fdt.h" +#include "hw/platform-bus.h" +#include "hw/qdev-properties.h" +#include "hw/arm/fdt.h" +#include "hw/intc/arm_gic.h" +#include "hw/intc/arm_gicv3_common.h" +#include "hw/irq.h" +#include "kvm_arm.h" +#include "hw/firmware/smbios.h" +#include "qapi/visitor.h" +#include "qapi/qapi-visit-common.h" +#include "standard-headers/linux/input.h" +#include "hw/arm/smmuv3.h" +#include "hw/acpi/acpi.h" +#include "target/arm/internals.h" +#include "hw/mem/pc-dimm.h" +#include "hw/mem/nvdimm.h" +#include "hw/acpi/generic_event_device.h" +#include "hw/virtio/virtio-iommu.h" +#include "hw/char/pl011.h" +#include "qemu/guest-random.h" + +#define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \ + static void virt_##major##_##minor##_class_init(ObjectClass *oc, \ + void *data) \ + { \ + MachineClass *mc = MACHINE_CLASS(oc); \ + virt_machine_##major##_##minor##_options(mc); \ + mc->desc = "QEMU " # major "." # minor " ARM Virtual Machine"; \ + if (latest) { \ + mc->alias = "virt"; \ + } \ + } \ + static const TypeInfo machvirt_##major##_##minor##_info = { \ + .name = MACHINE_TYPE_NAME("virt-" # major "." # minor), \ + .parent = TYPE_VIRT_MACHINE, \ + .class_init = virt_##major##_##minor##_class_init, \ + }; \ + static void machvirt_machine_##major##_##minor##_init(void) \ + { \ + type_register_static(&machvirt_##major##_##minor##_info); \ + } \ + type_init(machvirt_machine_##major##_##minor##_init); + +#define DEFINE_VIRT_MACHINE_AS_LATEST(major, minor) \ + DEFINE_VIRT_MACHINE_LATEST(major, minor, true) +#define DEFINE_VIRT_MACHINE(major, minor) \ + DEFINE_VIRT_MACHINE_LATEST(major, minor, false) + + +/* Number of external interrupt lines to configure the GIC with */ +#define NUM_IRQS 256 + +#define PLATFORM_BUS_NUM_IRQS 64 + +/* Legacy RAM limit in GB (< version 4.0) */ +#define LEGACY_RAMLIMIT_GB 255 +#define LEGACY_RAMLIMIT_BYTES (LEGACY_RAMLIMIT_GB * GiB) + +/* Addresses and sizes of our components. + * 0..128MB is space for a flash device so we can run bootrom code such as UEFI. + * 128MB..256MB is used for miscellaneous device I/O. + * 256MB..1GB is reserved for possible future PCI support (ie where the + * PCI memory window will go if we add a PCI host controller). + * 1GB and up is RAM (which may happily spill over into the + * high memory region beyond 4GB). + * This represents a compromise between how much RAM can be given to + * a 32 bit VM and leaving space for expansion and in particular for PCI. + * Note that devices should generally be placed at multiples of 0x10000, + * to accommodate guests using 64K pages. + */ +static const MemMapEntry base_memmap[] = { + /* Space up to 0x8000000 is reserved for a boot ROM */ + [VIRT_FLASH] = { 0, 0x08000000 }, + [VIRT_CPUPERIPHS] = { 0x08000000, 0x00020000 }, + /* GIC distributor and CPU interfaces sit inside the CPU peripheral space */ + [VIRT_GIC_DIST] = { 0x08000000, 0x00010000 }, + [VIRT_GIC_CPU] = { 0x08010000, 0x00010000 }, + [VIRT_GIC_V2M] = { 0x08020000, 0x00001000 }, + [VIRT_GIC_HYP] = { 0x08030000, 0x00010000 }, + [VIRT_GIC_VCPU] = { 0x08040000, 0x00010000 }, + /* The space in between here is reserved for GICv3 CPU/vCPU/HYP */ + [VIRT_GIC_ITS] = { 0x08080000, 0x00020000 }, + /* This redistributor space allows up to 2*64kB*123 CPUs */ + [VIRT_GIC_REDIST] = { 0x080A0000, 0x00F60000 }, + [VIRT_UART] = { 0x09000000, 0x00001000 }, + [VIRT_RTC] = { 0x09010000, 0x00001000 }, + [VIRT_FW_CFG] = { 0x09020000, 0x00000018 }, + [VIRT_GPIO] = { 0x09030000, 0x00001000 }, + [VIRT_SECURE_UART] = { 0x09040000, 0x00001000 }, + [VIRT_SMMU] = { 0x09050000, 0x00020000 }, + [VIRT_PCDIMM_ACPI] = { 0x09070000, MEMORY_HOTPLUG_IO_LEN }, + [VIRT_ACPI_GED] = { 0x09080000, ACPI_GED_EVT_SEL_LEN }, + [VIRT_NVDIMM_ACPI] = { 0x09090000, NVDIMM_ACPI_IO_LEN}, + [VIRT_PVTIME] = { 0x090a0000, 0x00010000 }, + [VIRT_SECURE_GPIO] = { 0x090b0000, 0x00001000 }, + [VIRT_MMIO] = { 0x0a000000, 0x00000200 }, + /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */ + [VIRT_PLATFORM_BUS] = { 0x0c000000, 0x02000000 }, + [VIRT_SECURE_MEM] = { 0x0e000000, 0x01000000 }, + [VIRT_PCIE_MMIO] = { 0x10000000, 0x2eff0000 }, + [VIRT_PCIE_PIO] = { 0x3eff0000, 0x00010000 }, + [VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 }, + /* Actual RAM size depends on initial RAM and device memory settings */ + [VIRT_MEM] = { GiB, LEGACY_RAMLIMIT_BYTES }, +}; + +/* + * Highmem IO Regions: This memory map is floating, located after the RAM. + * Each MemMapEntry base (GPA) will be dynamically computed, depending on the + * top of the RAM, so that its base get the same alignment as the size, + * ie. a 512GiB entry will be aligned on a 512GiB boundary. If there is + * less than 256GiB of RAM, the floating area starts at the 256GiB mark. + * Note the extended_memmap is sized so that it eventually also includes the + * base_memmap entries (VIRT_HIGH_GIC_REDIST2 index is greater than the last + * index of base_memmap). + */ +static MemMapEntry extended_memmap[] = { + /* Additional 64 MB redist region (can contain up to 512 redistributors) */ + [VIRT_HIGH_GIC_REDIST2] = { 0x0, 64 * MiB }, + [VIRT_HIGH_PCIE_ECAM] = { 0x0, 256 * MiB }, + /* Second PCIe window */ + [VIRT_HIGH_PCIE_MMIO] = { 0x0, 512 * GiB }, +}; + +static const int a15irqmap[] = { + [VIRT_UART] = 1, + [VIRT_RTC] = 2, + [VIRT_PCIE] = 3, /* ... to 6 */ + [VIRT_GPIO] = 7, + [VIRT_SECURE_UART] = 8, + [VIRT_ACPI_GED] = 9, + [VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */ + [VIRT_GIC_V2M] = 48, /* ...to 48 + NUM_GICV2M_SPIS - 1 */ + [VIRT_SMMU] = 74, /* ...to 74 + NUM_SMMU_IRQS - 1 */ + [VIRT_PLATFORM_BUS] = 112, /* ...to 112 + PLATFORM_BUS_NUM_IRQS -1 */ +}; + +static const char *valid_cpus[] = { + ARM_CPU_TYPE_NAME("cortex-a7"), + ARM_CPU_TYPE_NAME("cortex-a15"), + ARM_CPU_TYPE_NAME("cortex-a53"), + ARM_CPU_TYPE_NAME("cortex-a57"), + ARM_CPU_TYPE_NAME("cortex-a72"), + ARM_CPU_TYPE_NAME("a64fx"), + ARM_CPU_TYPE_NAME("host"), + ARM_CPU_TYPE_NAME("max"), +}; + +static bool cpu_type_valid(const char *cpu) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(valid_cpus); i++) { + if (strcmp(cpu, valid_cpus[i]) == 0) { + return true; + } + } + return false; +} + +static void create_kaslr_seed(MachineState *ms, const char *node) +{ + uint64_t seed; + + if (qemu_guest_getrandom(&seed, sizeof(seed), NULL)) { + return; + } + qemu_fdt_setprop_u64(ms->fdt, node, "kaslr-seed", seed); +} + +static void create_fdt(VirtMachineState *vms) +{ + MachineState *ms = MACHINE(vms); + int nb_numa_nodes = ms->numa_state->num_nodes; + void *fdt = create_device_tree(&vms->fdt_size); + + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + ms->fdt = fdt; + + /* Header */ + qemu_fdt_setprop_string(fdt, "/", "compatible", "linux,dummy-virt"); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2); + + /* /chosen must exist for load_dtb to fill in necessary properties later */ + qemu_fdt_add_subnode(fdt, "/chosen"); + create_kaslr_seed(ms, "/chosen"); + + if (vms->secure) { + qemu_fdt_add_subnode(fdt, "/secure-chosen"); + create_kaslr_seed(ms, "/secure-chosen"); + } + + /* Clock node, for the benefit of the UART. The kernel device tree + * binding documentation claims the PL011 node clock properties are + * optional but in practice if you omit them the kernel refuses to + * probe for the device. + */ + vms->clock_phandle = qemu_fdt_alloc_phandle(fdt); + qemu_fdt_add_subnode(fdt, "/apb-pclk"); + qemu_fdt_setprop_string(fdt, "/apb-pclk", "compatible", "fixed-clock"); + qemu_fdt_setprop_cell(fdt, "/apb-pclk", "#clock-cells", 0x0); + qemu_fdt_setprop_cell(fdt, "/apb-pclk", "clock-frequency", 24000000); + qemu_fdt_setprop_string(fdt, "/apb-pclk", "clock-output-names", + "clk24mhz"); + qemu_fdt_setprop_cell(fdt, "/apb-pclk", "phandle", vms->clock_phandle); + + if (nb_numa_nodes > 0 && ms->numa_state->have_numa_distance) { + int size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t); + uint32_t *matrix = g_malloc0(size); + int idx, i, j; + + for (i = 0; i < nb_numa_nodes; i++) { + for (j = 0; j < nb_numa_nodes; j++) { + idx = (i * nb_numa_nodes + j) * 3; + matrix[idx + 0] = cpu_to_be32(i); + matrix[idx + 1] = cpu_to_be32(j); + matrix[idx + 2] = + cpu_to_be32(ms->numa_state->nodes[i].distance[j]); + } + } + + qemu_fdt_add_subnode(fdt, "/distance-map"); + qemu_fdt_setprop_string(fdt, "/distance-map", "compatible", + "numa-distance-map-v1"); + qemu_fdt_setprop(fdt, "/distance-map", "distance-matrix", + matrix, size); + g_free(matrix); + } +} + +static void fdt_add_timer_nodes(const VirtMachineState *vms) +{ + /* On real hardware these interrupts are level-triggered. + * On KVM they were edge-triggered before host kernel version 4.4, + * and level-triggered afterwards. + * On emulated QEMU they are level-triggered. + * + * Getting the DTB info about them wrong is awkward for some + * guest kernels: + * pre-4.8 ignore the DT and leave the interrupt configured + * with whatever the GIC reset value (or the bootloader) left it at + * 4.8 before rc6 honour the incorrect data by programming it back + * into the GIC, causing problems + * 4.8rc6 and later ignore the DT and always write "level triggered" + * into the GIC + * + * For backwards-compatibility, virt-2.8 and earlier will continue + * to say these are edge-triggered, but later machines will report + * the correct information. + */ + ARMCPU *armcpu; + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + uint32_t irqflags = GIC_FDT_IRQ_FLAGS_LEVEL_HI; + MachineState *ms = MACHINE(vms); + + if (vmc->claim_edge_triggered_timers) { + irqflags = GIC_FDT_IRQ_FLAGS_EDGE_LO_HI; + } + + if (vms->gic_version == VIRT_GIC_VERSION_2) { + irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START, + GIC_FDT_IRQ_PPI_CPU_WIDTH, + (1 << MACHINE(vms)->smp.cpus) - 1); + } + + qemu_fdt_add_subnode(ms->fdt, "/timer"); + + armcpu = ARM_CPU(qemu_get_cpu(0)); + if (arm_feature(&armcpu->env, ARM_FEATURE_V8)) { + const char compat[] = "arm,armv8-timer\0arm,armv7-timer"; + qemu_fdt_setprop(ms->fdt, "/timer", "compatible", + compat, sizeof(compat)); + } else { + qemu_fdt_setprop_string(ms->fdt, "/timer", "compatible", + "arm,armv7-timer"); + } + qemu_fdt_setprop(ms->fdt, "/timer", "always-on", NULL, 0); + qemu_fdt_setprop_cells(ms->fdt, "/timer", "interrupts", + GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_S_EL1_IRQ, irqflags, + GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL1_IRQ, irqflags, + GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_VIRT_IRQ, irqflags, + GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL2_IRQ, irqflags); +} + +static void fdt_add_cpu_nodes(const VirtMachineState *vms) +{ + int cpu; + int addr_cells = 1; + const MachineState *ms = MACHINE(vms); + const VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + int smp_cpus = ms->smp.cpus; + + /* + * See Linux Documentation/devicetree/bindings/arm/cpus.yaml + * On ARM v8 64-bit systems value should be set to 2, + * that corresponds to the MPIDR_EL1 register size. + * If MPIDR_EL1[63:32] value is equal to 0 on all CPUs + * in the system, #address-cells can be set to 1, since + * MPIDR_EL1[63:32] bits are not used for CPUs + * identification. + * + * Here we actually don't know whether our system is 32- or 64-bit one. + * The simplest way to go is to examine affinity IDs of all our CPUs. If + * at least one of them has Aff3 populated, we set #address-cells to 2. + */ + for (cpu = 0; cpu < smp_cpus; cpu++) { + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); + + if (armcpu->mp_affinity & ARM_AFF3_MASK) { + addr_cells = 2; + break; + } + } + + qemu_fdt_add_subnode(ms->fdt, "/cpus"); + qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", addr_cells); + qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0); + + for (cpu = smp_cpus - 1; cpu >= 0; cpu--) { + char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu); + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); + CPUState *cs = CPU(armcpu); + + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu"); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + armcpu->dtb_compatible); + + if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED && smp_cpus > 1) { + qemu_fdt_setprop_string(ms->fdt, nodename, + "enable-method", "psci"); + } + + if (addr_cells == 2) { + qemu_fdt_setprop_u64(ms->fdt, nodename, "reg", + armcpu->mp_affinity); + } else { + qemu_fdt_setprop_cell(ms->fdt, nodename, "reg", + armcpu->mp_affinity); + } + + if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) { + qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", + ms->possible_cpus->cpus[cs->cpu_index].props.node_id); + } + + if (!vmc->no_cpu_topology) { + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", + qemu_fdt_alloc_phandle(ms->fdt)); + } + + g_free(nodename); + } + + if (!vmc->no_cpu_topology) { + /* + * Add vCPU topology description through fdt node cpu-map. + * + * See Linux Documentation/devicetree/bindings/cpu/cpu-topology.txt + * In a SMP system, the hierarchy of CPUs can be defined through + * four entities that are used to describe the layout of CPUs in + * the system: socket/cluster/core/thread. + * + * A socket node represents the boundary of system physical package + * and its child nodes must be one or more cluster nodes. A system + * can contain several layers of clustering within a single physical + * package and cluster nodes can be contained in parent cluster nodes. + * + * Given that cluster is not yet supported in the vCPU topology, + * we currently generate one cluster node within each socket node + * by default. + */ + qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map"); + + for (cpu = smp_cpus - 1; cpu >= 0; cpu--) { + char *cpu_path = g_strdup_printf("/cpus/cpu@%d", cpu); + char *map_path; + + if (ms->smp.threads > 1) { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/cluster0/core%d/thread%d", + cpu / (ms->smp.cores * ms->smp.threads), + (cpu / ms->smp.threads) % ms->smp.cores, + cpu % ms->smp.threads); + } else { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/cluster0/core%d", + cpu / ms->smp.cores, + cpu % ms->smp.cores); + } + qemu_fdt_add_path(ms->fdt, map_path); + qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path); + + g_free(map_path); + g_free(cpu_path); + } + } +} + +static void fdt_add_its_gic_node(VirtMachineState *vms) +{ + char *nodename; + MachineState *ms = MACHINE(vms); + + vms->msi_phandle = qemu_fdt_alloc_phandle(ms->fdt); + nodename = g_strdup_printf("/intc/its@%" PRIx64, + vms->memmap[VIRT_GIC_ITS].base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + "arm,gic-v3-its"); + qemu_fdt_setprop(ms->fdt, nodename, "msi-controller", NULL, 0); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, vms->memmap[VIRT_GIC_ITS].base, + 2, vms->memmap[VIRT_GIC_ITS].size); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", vms->msi_phandle); + g_free(nodename); +} + +static void fdt_add_v2m_gic_node(VirtMachineState *vms) +{ + MachineState *ms = MACHINE(vms); + char *nodename; + + nodename = g_strdup_printf("/intc/v2m@%" PRIx64, + vms->memmap[VIRT_GIC_V2M].base); + vms->msi_phandle = qemu_fdt_alloc_phandle(ms->fdt); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + "arm,gic-v2m-frame"); + qemu_fdt_setprop(ms->fdt, nodename, "msi-controller", NULL, 0); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, vms->memmap[VIRT_GIC_V2M].base, + 2, vms->memmap[VIRT_GIC_V2M].size); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", vms->msi_phandle); + g_free(nodename); +} + +static void fdt_add_gic_node(VirtMachineState *vms) +{ + MachineState *ms = MACHINE(vms); + char *nodename; + + vms->gic_phandle = qemu_fdt_alloc_phandle(ms->fdt); + qemu_fdt_setprop_cell(ms->fdt, "/", "interrupt-parent", vms->gic_phandle); + + nodename = g_strdup_printf("/intc@%" PRIx64, + vms->memmap[VIRT_GIC_DIST].base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 3); + qemu_fdt_setprop(ms->fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 0x2); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 0x2); + qemu_fdt_setprop(ms->fdt, nodename, "ranges", NULL, 0); + if (vms->gic_version == VIRT_GIC_VERSION_3) { + int nb_redist_regions = virt_gicv3_redist_region_count(vms); + + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + "arm,gic-v3"); + + qemu_fdt_setprop_cell(ms->fdt, nodename, + "#redistributor-regions", nb_redist_regions); + + if (nb_redist_regions == 1) { + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, vms->memmap[VIRT_GIC_DIST].base, + 2, vms->memmap[VIRT_GIC_DIST].size, + 2, vms->memmap[VIRT_GIC_REDIST].base, + 2, vms->memmap[VIRT_GIC_REDIST].size); + } else { + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, vms->memmap[VIRT_GIC_DIST].base, + 2, vms->memmap[VIRT_GIC_DIST].size, + 2, vms->memmap[VIRT_GIC_REDIST].base, + 2, vms->memmap[VIRT_GIC_REDIST].size, + 2, vms->memmap[VIRT_HIGH_GIC_REDIST2].base, + 2, vms->memmap[VIRT_HIGH_GIC_REDIST2].size); + } + + if (vms->virt) { + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", + GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + } + } else { + /* 'cortex-a15-gic' means 'GIC v2' */ + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", + "arm,cortex-a15-gic"); + if (!vms->virt) { + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, vms->memmap[VIRT_GIC_DIST].base, + 2, vms->memmap[VIRT_GIC_DIST].size, + 2, vms->memmap[VIRT_GIC_CPU].base, + 2, vms->memmap[VIRT_GIC_CPU].size); + } else { + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, vms->memmap[VIRT_GIC_DIST].base, + 2, vms->memmap[VIRT_GIC_DIST].size, + 2, vms->memmap[VIRT_GIC_CPU].base, + 2, vms->memmap[VIRT_GIC_CPU].size, + 2, vms->memmap[VIRT_GIC_HYP].base, + 2, vms->memmap[VIRT_GIC_HYP].size, + 2, vms->memmap[VIRT_GIC_VCPU].base, + 2, vms->memmap[VIRT_GIC_VCPU].size); + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", + GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + } + } + + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", vms->gic_phandle); + g_free(nodename); +} + +static void fdt_add_pmu_nodes(const VirtMachineState *vms) +{ + ARMCPU *armcpu = ARM_CPU(first_cpu); + uint32_t irqflags = GIC_FDT_IRQ_FLAGS_LEVEL_HI; + MachineState *ms = MACHINE(vms); + + if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU)) { + assert(!object_property_get_bool(OBJECT(armcpu), "pmu", NULL)); + return; + } + + if (vms->gic_version == VIRT_GIC_VERSION_2) { + irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START, + GIC_FDT_IRQ_PPI_CPU_WIDTH, + (1 << MACHINE(vms)->smp.cpus) - 1); + } + + qemu_fdt_add_subnode(ms->fdt, "/pmu"); + if (arm_feature(&armcpu->env, ARM_FEATURE_V8)) { + const char compat[] = "arm,armv8-pmuv3"; + qemu_fdt_setprop(ms->fdt, "/pmu", "compatible", + compat, sizeof(compat)); + qemu_fdt_setprop_cells(ms->fdt, "/pmu", "interrupts", + GIC_FDT_IRQ_TYPE_PPI, VIRTUAL_PMU_IRQ, irqflags); + } +} + +static inline DeviceState *create_acpi_ged(VirtMachineState *vms) +{ + DeviceState *dev; + MachineState *ms = MACHINE(vms); + int irq = vms->irqmap[VIRT_ACPI_GED]; + uint32_t event = ACPI_GED_PWR_DOWN_EVT; + + if (ms->ram_slots) { + event |= ACPI_GED_MEM_HOTPLUG_EVT; + } + + if (ms->nvdimms_state->is_enabled) { + event |= ACPI_GED_NVDIMM_HOTPLUG_EVT; + } + + dev = qdev_new(TYPE_ACPI_GED); + qdev_prop_set_uint32(dev, "ged-event", event); + + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_ACPI_GED].base); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, vms->memmap[VIRT_PCDIMM_ACPI].base); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(vms->gic, irq)); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + return dev; +} + +static void create_its(VirtMachineState *vms) +{ + const char *itsclass = its_class_name(); + DeviceState *dev; + + if (!strcmp(itsclass, "arm-gicv3-its")) { + if (!vms->tcg_its) { + itsclass = NULL; + } + } + + if (!itsclass) { + /* Do nothing if not supported */ + return; + } + + dev = qdev_new(itsclass); + + object_property_set_link(OBJECT(dev), "parent-gicv3", OBJECT(vms->gic), + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_GIC_ITS].base); + + fdt_add_its_gic_node(vms); + vms->msi_controller = VIRT_MSI_CTRL_ITS; +} + +static void create_v2m(VirtMachineState *vms) +{ + int i; + int irq = vms->irqmap[VIRT_GIC_V2M]; + DeviceState *dev; + + dev = qdev_new("arm-gicv2m"); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_GIC_V2M].base); + qdev_prop_set_uint32(dev, "base-spi", irq); + qdev_prop_set_uint32(dev, "num-spi", NUM_GICV2M_SPIS); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + for (i = 0; i < NUM_GICV2M_SPIS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, + qdev_get_gpio_in(vms->gic, irq + i)); + } + + fdt_add_v2m_gic_node(vms); + vms->msi_controller = VIRT_MSI_CTRL_GICV2M; +} + +static void create_gic(VirtMachineState *vms, MemoryRegion *mem) +{ + MachineState *ms = MACHINE(vms); + /* We create a standalone GIC */ + SysBusDevice *gicbusdev; + const char *gictype; + int type = vms->gic_version, i; + unsigned int smp_cpus = ms->smp.cpus; + uint32_t nb_redist_regions = 0; + + gictype = (type == 3) ? gicv3_class_name() : gic_class_name(); + + vms->gic = qdev_new(gictype); + qdev_prop_set_uint32(vms->gic, "revision", type); + qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus); + /* Note that the num-irq property counts both internal and external + * interrupts; there are always 32 of the former (mandated by GIC spec). + */ + qdev_prop_set_uint32(vms->gic, "num-irq", NUM_IRQS + 32); + if (!kvm_irqchip_in_kernel()) { + qdev_prop_set_bit(vms->gic, "has-security-extensions", vms->secure); + } + + if (type == 3) { + uint32_t redist0_capacity = + vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE; + uint32_t redist0_count = MIN(smp_cpus, redist0_capacity); + + nb_redist_regions = virt_gicv3_redist_region_count(vms); + + qdev_prop_set_uint32(vms->gic, "len-redist-region-count", + nb_redist_regions); + qdev_prop_set_uint32(vms->gic, "redist-region-count[0]", redist0_count); + + if (!kvm_irqchip_in_kernel()) { + if (vms->tcg_its) { + object_property_set_link(OBJECT(vms->gic), "sysmem", + OBJECT(mem), &error_fatal); + qdev_prop_set_bit(vms->gic, "has-lpi", true); + } + } + + if (nb_redist_regions == 2) { + uint32_t redist1_capacity = + vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE; + + qdev_prop_set_uint32(vms->gic, "redist-region-count[1]", + MIN(smp_cpus - redist0_count, redist1_capacity)); + } + } else { + if (!kvm_irqchip_in_kernel()) { + qdev_prop_set_bit(vms->gic, "has-virtualization-extensions", + vms->virt); + } + } + gicbusdev = SYS_BUS_DEVICE(vms->gic); + sysbus_realize_and_unref(gicbusdev, &error_fatal); + sysbus_mmio_map(gicbusdev, 0, vms->memmap[VIRT_GIC_DIST].base); + if (type == 3) { + sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_REDIST].base); + if (nb_redist_regions == 2) { + sysbus_mmio_map(gicbusdev, 2, + vms->memmap[VIRT_HIGH_GIC_REDIST2].base); + } + } else { + sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base); + if (vms->virt) { + sysbus_mmio_map(gicbusdev, 2, vms->memmap[VIRT_GIC_HYP].base); + sysbus_mmio_map(gicbusdev, 3, vms->memmap[VIRT_GIC_VCPU].base); + } + } + + /* Wire the outputs from each CPU's generic timer and the GICv3 + * maintenance interrupt signal to the appropriate GIC PPI inputs, + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. + */ + for (i = 0; i < smp_cpus; i++) { + DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); + int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; + int irq; + /* Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs we use for the virt board. + */ + const int timer_irq[] = { + [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ, + [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, + [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, + [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, + }; + + for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { + qdev_connect_gpio_out(cpudev, irq, + qdev_get_gpio_in(vms->gic, + ppibase + timer_irq[irq])); + } + + if (type == 3) { + qemu_irq irq = qdev_get_gpio_in(vms->gic, + ppibase + ARCH_GIC_MAINT_IRQ); + qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", + 0, irq); + } else if (vms->virt) { + qemu_irq irq = qdev_get_gpio_in(vms->gic, + ppibase + ARCH_GIC_MAINT_IRQ); + sysbus_connect_irq(gicbusdev, i + 4 * smp_cpus, irq); + } + + qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, + qdev_get_gpio_in(vms->gic, ppibase + + VIRTUAL_PMU_IRQ)); + + sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(gicbusdev, i + smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(gicbusdev, i + 2 * smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(gicbusdev, i + 3 * smp_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + } + + fdt_add_gic_node(vms); + + if (type == 3 && vms->its) { + create_its(vms); + } else if (type == 2) { + create_v2m(vms); + } +} + +static void create_uart(const VirtMachineState *vms, int uart, + MemoryRegion *mem, Chardev *chr) +{ + char *nodename; + hwaddr base = vms->memmap[uart].base; + hwaddr size = vms->memmap[uart].size; + int irq = vms->irqmap[uart]; + const char compat[] = "arm,pl011\0arm,primecell"; + const char clocknames[] = "uartclk\0apb_pclk"; + DeviceState *dev = qdev_new(TYPE_PL011); + SysBusDevice *s = SYS_BUS_DEVICE(dev); + MachineState *ms = MACHINE(vms); + + qdev_prop_set_chr(dev, "chardev", chr); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + memory_region_add_subregion(mem, base, + sysbus_mmio_get_region(s, 0)); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq)); + + nodename = g_strdup_printf("/pl011@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + /* Note that we can't use setprop_string because of the embedded NUL */ + qemu_fdt_setprop(ms->fdt, nodename, "compatible", + compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base, 2, size); + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irq, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_cells(ms->fdt, nodename, "clocks", + vms->clock_phandle, vms->clock_phandle); + qemu_fdt_setprop(ms->fdt, nodename, "clock-names", + clocknames, sizeof(clocknames)); + + if (uart == VIRT_UART) { + qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", nodename); + } else { + /* Mark as not usable by the normal world */ + qemu_fdt_setprop_string(ms->fdt, nodename, "status", "disabled"); + qemu_fdt_setprop_string(ms->fdt, nodename, "secure-status", "okay"); + + qemu_fdt_setprop_string(ms->fdt, "/secure-chosen", "stdout-path", + nodename); + } + + g_free(nodename); +} + +static void create_rtc(const VirtMachineState *vms) +{ + char *nodename; + hwaddr base = vms->memmap[VIRT_RTC].base; + hwaddr size = vms->memmap[VIRT_RTC].size; + int irq = vms->irqmap[VIRT_RTC]; + const char compat[] = "arm,pl031\0arm,primecell"; + MachineState *ms = MACHINE(vms); + + sysbus_create_simple("pl031", base, qdev_get_gpio_in(vms->gic, irq)); + + nodename = g_strdup_printf("/pl031@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop(ms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base, 2, size); + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irq, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_cell(ms->fdt, nodename, "clocks", vms->clock_phandle); + qemu_fdt_setprop_string(ms->fdt, nodename, "clock-names", "apb_pclk"); + g_free(nodename); +} + +static DeviceState *gpio_key_dev; +static void virt_powerdown_req(Notifier *n, void *opaque) +{ + VirtMachineState *s = container_of(n, VirtMachineState, powerdown_notifier); + + if (s->acpi_dev) { + acpi_send_event(s->acpi_dev, ACPI_POWER_DOWN_STATUS); + } else { + /* use gpio Pin 3 for power button event */ + qemu_set_irq(qdev_get_gpio_in(gpio_key_dev, 0), 1); + } +} + +static void create_gpio_keys(char *fdt, DeviceState *pl061_dev, + uint32_t phandle) +{ + gpio_key_dev = sysbus_create_simple("gpio-key", -1, + qdev_get_gpio_in(pl061_dev, 3)); + + qemu_fdt_add_subnode(fdt, "/gpio-keys"); + qemu_fdt_setprop_string(fdt, "/gpio-keys", "compatible", "gpio-keys"); + qemu_fdt_setprop_cell(fdt, "/gpio-keys", "#size-cells", 0); + qemu_fdt_setprop_cell(fdt, "/gpio-keys", "#address-cells", 1); + + qemu_fdt_add_subnode(fdt, "/gpio-keys/poweroff"); + qemu_fdt_setprop_string(fdt, "/gpio-keys/poweroff", + "label", "GPIO Key Poweroff"); + qemu_fdt_setprop_cell(fdt, "/gpio-keys/poweroff", "linux,code", + KEY_POWER); + qemu_fdt_setprop_cells(fdt, "/gpio-keys/poweroff", + "gpios", phandle, 3, 0); +} + +#define SECURE_GPIO_POWEROFF 0 +#define SECURE_GPIO_RESET 1 + +static void create_secure_gpio_pwr(char *fdt, DeviceState *pl061_dev, + uint32_t phandle) +{ + DeviceState *gpio_pwr_dev; + + /* gpio-pwr */ + gpio_pwr_dev = sysbus_create_simple("gpio-pwr", -1, NULL); + + /* connect secure pl061 to gpio-pwr */ + qdev_connect_gpio_out(pl061_dev, SECURE_GPIO_RESET, + qdev_get_gpio_in_named(gpio_pwr_dev, "reset", 0)); + qdev_connect_gpio_out(pl061_dev, SECURE_GPIO_POWEROFF, + qdev_get_gpio_in_named(gpio_pwr_dev, "shutdown", 0)); + + qemu_fdt_add_subnode(fdt, "/gpio-poweroff"); + qemu_fdt_setprop_string(fdt, "/gpio-poweroff", "compatible", + "gpio-poweroff"); + qemu_fdt_setprop_cells(fdt, "/gpio-poweroff", + "gpios", phandle, SECURE_GPIO_POWEROFF, 0); + qemu_fdt_setprop_string(fdt, "/gpio-poweroff", "status", "disabled"); + qemu_fdt_setprop_string(fdt, "/gpio-poweroff", "secure-status", + "okay"); + + qemu_fdt_add_subnode(fdt, "/gpio-restart"); + qemu_fdt_setprop_string(fdt, "/gpio-restart", "compatible", + "gpio-restart"); + qemu_fdt_setprop_cells(fdt, "/gpio-restart", + "gpios", phandle, SECURE_GPIO_RESET, 0); + qemu_fdt_setprop_string(fdt, "/gpio-restart", "status", "disabled"); + qemu_fdt_setprop_string(fdt, "/gpio-restart", "secure-status", + "okay"); +} + +static void create_gpio_devices(const VirtMachineState *vms, int gpio, + MemoryRegion *mem) +{ + char *nodename; + DeviceState *pl061_dev; + hwaddr base = vms->memmap[gpio].base; + hwaddr size = vms->memmap[gpio].size; + int irq = vms->irqmap[gpio]; + const char compat[] = "arm,pl061\0arm,primecell"; + SysBusDevice *s; + MachineState *ms = MACHINE(vms); + + pl061_dev = qdev_new("pl061"); + /* Pull lines down to 0 if not driven by the PL061 */ + qdev_prop_set_uint32(pl061_dev, "pullups", 0); + qdev_prop_set_uint32(pl061_dev, "pulldowns", 0xff); + s = SYS_BUS_DEVICE(pl061_dev); + sysbus_realize_and_unref(s, &error_fatal); + memory_region_add_subregion(mem, base, sysbus_mmio_get_region(s, 0)); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(vms->gic, irq)); + + uint32_t phandle = qemu_fdt_alloc_phandle(ms->fdt); + nodename = g_strdup_printf("/pl061@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base, 2, size); + qemu_fdt_setprop(ms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#gpio-cells", 2); + qemu_fdt_setprop(ms->fdt, nodename, "gpio-controller", NULL, 0); + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irq, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_cell(ms->fdt, nodename, "clocks", vms->clock_phandle); + qemu_fdt_setprop_string(ms->fdt, nodename, "clock-names", "apb_pclk"); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", phandle); + + if (gpio != VIRT_GPIO) { + /* Mark as not usable by the normal world */ + qemu_fdt_setprop_string(ms->fdt, nodename, "status", "disabled"); + qemu_fdt_setprop_string(ms->fdt, nodename, "secure-status", "okay"); + } + g_free(nodename); + + /* Child gpio devices */ + if (gpio == VIRT_GPIO) { + create_gpio_keys(ms->fdt, pl061_dev, phandle); + } else { + create_secure_gpio_pwr(ms->fdt, pl061_dev, phandle); + } +} + +static void create_virtio_devices(const VirtMachineState *vms) +{ + int i; + hwaddr size = vms->memmap[VIRT_MMIO].size; + MachineState *ms = MACHINE(vms); + + /* We create the transports in forwards order. Since qbus_realize() + * prepends (not appends) new child buses, the incrementing loop below will + * create a list of virtio-mmio buses with decreasing base addresses. + * + * When a -device option is processed from the command line, + * qbus_find_recursive() picks the next free virtio-mmio bus in forwards + * order. The upshot is that -device options in increasing command line + * order are mapped to virtio-mmio buses with decreasing base addresses. + * + * When this code was originally written, that arrangement ensured that the + * guest Linux kernel would give the lowest "name" (/dev/vda, eth0, etc) to + * the first -device on the command line. (The end-to-end order is a + * function of this loop, qbus_realize(), qbus_find_recursive(), and the + * guest kernel's name-to-address assignment strategy.) + * + * Meanwhile, the kernel's traversal seems to have been reversed; see eg. + * the message, if not necessarily the code, of commit 70161ff336. + * Therefore the loop now establishes the inverse of the original intent. + * + * Unfortunately, we can't counteract the kernel change by reversing the + * loop; it would break existing command lines. + * + * In any case, the kernel makes no guarantee about the stability of + * enumeration order of virtio devices (as demonstrated by it changing + * between kernel versions). For reliable and stable identification + * of disks users must use UUIDs or similar mechanisms. + */ + for (i = 0; i < NUM_VIRTIO_TRANSPORTS; i++) { + int irq = vms->irqmap[VIRT_MMIO] + i; + hwaddr base = vms->memmap[VIRT_MMIO].base + i * size; + + sysbus_create_simple("virtio-mmio", base, + qdev_get_gpio_in(vms->gic, irq)); + } + + /* We add dtb nodes in reverse order so that they appear in the finished + * device tree lowest address first. + * + * Note that this mapping is independent of the loop above. The previous + * loop influences virtio device to virtio transport assignment, whereas + * this loop controls how virtio transports are laid out in the dtb. + */ + for (i = NUM_VIRTIO_TRANSPORTS - 1; i >= 0; i--) { + char *nodename; + int irq = vms->irqmap[VIRT_MMIO] + i; + hwaddr base = vms->memmap[VIRT_MMIO].base + i * size; + + nodename = g_strdup_printf("/virtio_mmio@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, + "compatible", "virtio,mmio"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base, 2, size); + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irq, + GIC_FDT_IRQ_FLAGS_EDGE_LO_HI); + qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); + g_free(nodename); + } +} + +#define VIRT_FLASH_SECTOR_SIZE (256 * KiB) + +static PFlashCFI01 *virt_flash_create1(VirtMachineState *vms, + const char *name, + const char *alias_prop_name) +{ + /* + * Create a single flash device. We use the same parameters as + * the flash devices on the Versatile Express board. + */ + DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01); + + qdev_prop_set_uint64(dev, "sector-length", VIRT_FLASH_SECTOR_SIZE); + qdev_prop_set_uint8(dev, "width", 4); + qdev_prop_set_uint8(dev, "device-width", 2); + qdev_prop_set_bit(dev, "big-endian", false); + qdev_prop_set_uint16(dev, "id0", 0x89); + qdev_prop_set_uint16(dev, "id1", 0x18); + qdev_prop_set_uint16(dev, "id2", 0x00); + qdev_prop_set_uint16(dev, "id3", 0x00); + qdev_prop_set_string(dev, "name", name); + object_property_add_child(OBJECT(vms), name, OBJECT(dev)); + object_property_add_alias(OBJECT(vms), alias_prop_name, + OBJECT(dev), "drive"); + return PFLASH_CFI01(dev); +} + +static void virt_flash_create(VirtMachineState *vms) +{ + vms->flash[0] = virt_flash_create1(vms, "virt.flash0", "pflash0"); + vms->flash[1] = virt_flash_create1(vms, "virt.flash1", "pflash1"); +} + +static void virt_flash_map1(PFlashCFI01 *flash, + hwaddr base, hwaddr size, + MemoryRegion *sysmem) +{ + DeviceState *dev = DEVICE(flash); + + assert(QEMU_IS_ALIGNED(size, VIRT_FLASH_SECTOR_SIZE)); + assert(size / VIRT_FLASH_SECTOR_SIZE <= UINT32_MAX); + qdev_prop_set_uint32(dev, "num-blocks", size / VIRT_FLASH_SECTOR_SIZE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + memory_region_add_subregion(sysmem, base, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), + 0)); +} + +static void virt_flash_map(VirtMachineState *vms, + MemoryRegion *sysmem, + MemoryRegion *secure_sysmem) +{ + /* + * Map two flash devices to fill the VIRT_FLASH space in the memmap. + * sysmem is the system memory space. secure_sysmem is the secure view + * of the system, and the first flash device should be made visible only + * there. The second flash device is visible to both secure and nonsecure. + * If sysmem == secure_sysmem this means there is no separate Secure + * address space and both flash devices are generally visible. + */ + hwaddr flashsize = vms->memmap[VIRT_FLASH].size / 2; + hwaddr flashbase = vms->memmap[VIRT_FLASH].base; + + virt_flash_map1(vms->flash[0], flashbase, flashsize, + secure_sysmem); + virt_flash_map1(vms->flash[1], flashbase + flashsize, flashsize, + sysmem); +} + +static void virt_flash_fdt(VirtMachineState *vms, + MemoryRegion *sysmem, + MemoryRegion *secure_sysmem) +{ + hwaddr flashsize = vms->memmap[VIRT_FLASH].size / 2; + hwaddr flashbase = vms->memmap[VIRT_FLASH].base; + MachineState *ms = MACHINE(vms); + char *nodename; + + if (sysmem == secure_sysmem) { + /* Report both flash devices as a single node in the DT */ + nodename = g_strdup_printf("/flash@%" PRIx64, flashbase); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, flashbase, 2, flashsize, + 2, flashbase + flashsize, 2, flashsize); + qemu_fdt_setprop_cell(ms->fdt, nodename, "bank-width", 4); + g_free(nodename); + } else { + /* + * Report the devices as separate nodes so we can mark one as + * only visible to the secure world. + */ + nodename = g_strdup_printf("/secflash@%" PRIx64, flashbase); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, flashbase, 2, flashsize); + qemu_fdt_setprop_cell(ms->fdt, nodename, "bank-width", 4); + qemu_fdt_setprop_string(ms->fdt, nodename, "status", "disabled"); + qemu_fdt_setprop_string(ms->fdt, nodename, "secure-status", "okay"); + g_free(nodename); + + nodename = g_strdup_printf("/flash@%" PRIx64, flashbase); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, flashbase + flashsize, 2, flashsize); + qemu_fdt_setprop_cell(ms->fdt, nodename, "bank-width", 4); + g_free(nodename); + } +} + +static bool virt_firmware_init(VirtMachineState *vms, + MemoryRegion *sysmem, + MemoryRegion *secure_sysmem) +{ + int i; + const char *bios_name; + BlockBackend *pflash_blk0; + + /* Map legacy -drive if=pflash to machine properties */ + for (i = 0; i < ARRAY_SIZE(vms->flash); i++) { + pflash_cfi01_legacy_drive(vms->flash[i], + drive_get(IF_PFLASH, 0, i)); + } + + virt_flash_map(vms, sysmem, secure_sysmem); + + pflash_blk0 = pflash_cfi01_get_blk(vms->flash[0]); + + bios_name = MACHINE(vms)->firmware; + if (bios_name) { + char *fname; + MemoryRegion *mr; + int image_size; + + if (pflash_blk0) { + error_report("The contents of the first flash device may be " + "specified with -bios or with -drive if=pflash... " + "but you cannot use both options at once"); + exit(1); + } + + /* Fall back to -bios */ + + fname = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!fname) { + error_report("Could not find ROM image '%s'", bios_name); + exit(1); + } + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(vms->flash[0]), 0); + image_size = load_image_mr(fname, mr); + g_free(fname); + if (image_size < 0) { + error_report("Could not load ROM image '%s'", bios_name); + exit(1); + } + } + + return pflash_blk0 || bios_name; +} + +static FWCfgState *create_fw_cfg(const VirtMachineState *vms, AddressSpace *as) +{ + MachineState *ms = MACHINE(vms); + hwaddr base = vms->memmap[VIRT_FW_CFG].base; + hwaddr size = vms->memmap[VIRT_FW_CFG].size; + FWCfgState *fw_cfg; + char *nodename; + + fw_cfg = fw_cfg_init_mem_wide(base + 8, base, 8, base + 16, as); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)ms->smp.cpus); + + nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, + "compatible", "qemu,fw-cfg-mmio"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base, 2, size); + qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); + g_free(nodename); + return fw_cfg; +} + +static void create_pcie_irq_map(const MachineState *ms, + uint32_t gic_phandle, + int first_irq, const char *nodename) +{ + int devfn, pin; + uint32_t full_irq_map[4 * 4 * 10] = { 0 }; + uint32_t *irq_map = full_irq_map; + + for (devfn = 0; devfn <= 0x18; devfn += 0x8) { + for (pin = 0; pin < 4; pin++) { + int irq_type = GIC_FDT_IRQ_TYPE_SPI; + int irq_nr = first_irq + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS); + int irq_level = GIC_FDT_IRQ_FLAGS_LEVEL_HI; + int i; + + uint32_t map[] = { + devfn << 8, 0, 0, /* devfn */ + pin + 1, /* PCI pin */ + gic_phandle, 0, 0, irq_type, irq_nr, irq_level }; /* GIC irq */ + + /* Convert map to big endian */ + for (i = 0; i < 10; i++) { + irq_map[i] = cpu_to_be32(map[i]); + } + irq_map += 10; + } + } + + qemu_fdt_setprop(ms->fdt, nodename, "interrupt-map", + full_irq_map, sizeof(full_irq_map)); + + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupt-map-mask", + cpu_to_be16(PCI_DEVFN(3, 0)), /* Slot 3 */ + 0, 0, + 0x7 /* PCI irq */); +} + +static void create_smmu(const VirtMachineState *vms, + PCIBus *bus) +{ + char *node; + const char compat[] = "arm,smmu-v3"; + int irq = vms->irqmap[VIRT_SMMU]; + int i; + hwaddr base = vms->memmap[VIRT_SMMU].base; + hwaddr size = vms->memmap[VIRT_SMMU].size; + const char irq_names[] = "eventq\0priq\0cmdq-sync\0gerror"; + DeviceState *dev; + MachineState *ms = MACHINE(vms); + + if (vms->iommu != VIRT_IOMMU_SMMUV3 || !vms->iommu_phandle) { + return; + } + + dev = qdev_new("arm-smmuv3"); + + object_property_set_link(OBJECT(dev), "primary-bus", OBJECT(bus), + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + for (i = 0; i < NUM_SMMU_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, + qdev_get_gpio_in(vms->gic, irq + i)); + } + + node = g_strdup_printf("/smmuv3@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, node); + qemu_fdt_setprop(ms->fdt, node, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(ms->fdt, node, "reg", 2, base, 2, size); + + qemu_fdt_setprop_cells(ms->fdt, node, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irq , GIC_FDT_IRQ_FLAGS_EDGE_LO_HI, + GIC_FDT_IRQ_TYPE_SPI, irq + 1, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI, + GIC_FDT_IRQ_TYPE_SPI, irq + 2, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI, + GIC_FDT_IRQ_TYPE_SPI, irq + 3, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI); + + qemu_fdt_setprop(ms->fdt, node, "interrupt-names", irq_names, + sizeof(irq_names)); + + qemu_fdt_setprop_cell(ms->fdt, node, "clocks", vms->clock_phandle); + qemu_fdt_setprop_string(ms->fdt, node, "clock-names", "apb_pclk"); + qemu_fdt_setprop(ms->fdt, node, "dma-coherent", NULL, 0); + + qemu_fdt_setprop_cell(ms->fdt, node, "#iommu-cells", 1); + + qemu_fdt_setprop_cell(ms->fdt, node, "phandle", vms->iommu_phandle); + g_free(node); +} + +static void create_virtio_iommu_dt_bindings(VirtMachineState *vms) +{ + const char compat[] = "virtio,pci-iommu"; + uint16_t bdf = vms->virtio_iommu_bdf; + MachineState *ms = MACHINE(vms); + char *node; + + vms->iommu_phandle = qemu_fdt_alloc_phandle(ms->fdt); + + node = g_strdup_printf("%s/virtio_iommu@%d", vms->pciehb_nodename, bdf); + qemu_fdt_add_subnode(ms->fdt, node); + qemu_fdt_setprop(ms->fdt, node, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(ms->fdt, node, "reg", + 1, bdf << 8, 1, 0, 1, 0, + 1, 0, 1, 0); + + qemu_fdt_setprop_cell(ms->fdt, node, "#iommu-cells", 1); + qemu_fdt_setprop_cell(ms->fdt, node, "phandle", vms->iommu_phandle); + g_free(node); + + qemu_fdt_setprop_cells(ms->fdt, vms->pciehb_nodename, "iommu-map", + 0x0, vms->iommu_phandle, 0x0, bdf, + bdf + 1, vms->iommu_phandle, bdf + 1, 0xffff - bdf); +} + +static void create_pcie(VirtMachineState *vms) +{ + hwaddr base_mmio = vms->memmap[VIRT_PCIE_MMIO].base; + hwaddr size_mmio = vms->memmap[VIRT_PCIE_MMIO].size; + hwaddr base_mmio_high = vms->memmap[VIRT_HIGH_PCIE_MMIO].base; + hwaddr size_mmio_high = vms->memmap[VIRT_HIGH_PCIE_MMIO].size; + hwaddr base_pio = vms->memmap[VIRT_PCIE_PIO].base; + hwaddr size_pio = vms->memmap[VIRT_PCIE_PIO].size; + hwaddr base_ecam, size_ecam; + hwaddr base = base_mmio; + int nr_pcie_buses; + int irq = vms->irqmap[VIRT_PCIE]; + MemoryRegion *mmio_alias; + MemoryRegion *mmio_reg; + MemoryRegion *ecam_alias; + MemoryRegion *ecam_reg; + DeviceState *dev; + char *nodename; + int i, ecam_id; + PCIHostState *pci; + MachineState *ms = MACHINE(vms); + + dev = qdev_new(TYPE_GPEX_HOST); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + ecam_id = VIRT_ECAM_ID(vms->highmem_ecam); + base_ecam = vms->memmap[ecam_id].base; + size_ecam = vms->memmap[ecam_id].size; + nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN; + /* Map only the first size_ecam bytes of ECAM space */ + ecam_alias = g_new0(MemoryRegion, 1); + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam", + ecam_reg, 0, size_ecam); + memory_region_add_subregion(get_system_memory(), base_ecam, ecam_alias); + + /* Map the MMIO window into system address space so as to expose + * the section of PCI MMIO space which starts at the same base address + * (ie 1:1 mapping for that part of PCI MMIO space visible through + * the window). + */ + mmio_alias = g_new0(MemoryRegion, 1); + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio", + mmio_reg, base_mmio, size_mmio); + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias); + + if (vms->highmem) { + /* Map high MMIO space */ + MemoryRegion *high_mmio_alias = g_new0(MemoryRegion, 1); + + memory_region_init_alias(high_mmio_alias, OBJECT(dev), "pcie-mmio-high", + mmio_reg, base_mmio_high, size_mmio_high); + memory_region_add_subregion(get_system_memory(), base_mmio_high, + high_mmio_alias); + } + + /* Map IO port space */ + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_pio); + + for (i = 0; i < GPEX_NUM_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, + qdev_get_gpio_in(vms->gic, irq + i)); + gpex_set_irq_num(GPEX_HOST(dev), i, irq + i); + } + + pci = PCI_HOST_BRIDGE(dev); + pci->bypass_iommu = vms->default_bus_bypass_iommu; + vms->bus = pci->bus; + if (vms->bus) { + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + + if (!nd->model) { + nd->model = g_strdup("virtio"); + } + + pci_nic_init_nofail(nd, pci->bus, nd->model, NULL); + } + } + + nodename = vms->pciehb_nodename = g_strdup_printf("/pcie@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, + "compatible", "pci-host-ecam-generic"); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "pci"); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 3); + qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 2); + qemu_fdt_setprop_cell(ms->fdt, nodename, "linux,pci-domain", 0); + qemu_fdt_setprop_cells(ms->fdt, nodename, "bus-range", 0, + nr_pcie_buses - 1); + qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); + + if (vms->msi_phandle) { + qemu_fdt_setprop_cells(ms->fdt, nodename, "msi-parent", + vms->msi_phandle); + } + + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", + 2, base_ecam, 2, size_ecam); + + if (vms->highmem) { + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges", + 1, FDT_PCI_RANGE_IOPORT, 2, 0, + 2, base_pio, 2, size_pio, + 1, FDT_PCI_RANGE_MMIO, 2, base_mmio, + 2, base_mmio, 2, size_mmio, + 1, FDT_PCI_RANGE_MMIO_64BIT, + 2, base_mmio_high, + 2, base_mmio_high, 2, size_mmio_high); + } else { + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges", + 1, FDT_PCI_RANGE_IOPORT, 2, 0, + 2, base_pio, 2, size_pio, + 1, FDT_PCI_RANGE_MMIO, 2, base_mmio, + 2, base_mmio, 2, size_mmio); + } + + qemu_fdt_setprop_cell(ms->fdt, nodename, "#interrupt-cells", 1); + create_pcie_irq_map(ms, vms->gic_phandle, irq, nodename); + + if (vms->iommu) { + vms->iommu_phandle = qemu_fdt_alloc_phandle(ms->fdt); + + switch (vms->iommu) { + case VIRT_IOMMU_SMMUV3: + create_smmu(vms, vms->bus); + qemu_fdt_setprop_cells(ms->fdt, nodename, "iommu-map", + 0x0, vms->iommu_phandle, 0x0, 0x10000); + break; + default: + g_assert_not_reached(); + } + } +} + +static void create_platform_bus(VirtMachineState *vms) +{ + DeviceState *dev; + SysBusDevice *s; + int i; + MemoryRegion *sysmem = get_system_memory(); + + dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); + dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); + qdev_prop_set_uint32(dev, "num_irqs", PLATFORM_BUS_NUM_IRQS); + qdev_prop_set_uint32(dev, "mmio_size", vms->memmap[VIRT_PLATFORM_BUS].size); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + vms->platform_bus_dev = dev; + + s = SYS_BUS_DEVICE(dev); + for (i = 0; i < PLATFORM_BUS_NUM_IRQS; i++) { + int irq = vms->irqmap[VIRT_PLATFORM_BUS] + i; + sysbus_connect_irq(s, i, qdev_get_gpio_in(vms->gic, irq)); + } + + memory_region_add_subregion(sysmem, + vms->memmap[VIRT_PLATFORM_BUS].base, + sysbus_mmio_get_region(s, 0)); +} + +static void create_tag_ram(MemoryRegion *tag_sysmem, + hwaddr base, hwaddr size, + const char *name) +{ + MemoryRegion *tagram = g_new(MemoryRegion, 1); + + memory_region_init_ram(tagram, NULL, name, size / 32, &error_fatal); + memory_region_add_subregion(tag_sysmem, base / 32, tagram); +} + +static void create_secure_ram(VirtMachineState *vms, + MemoryRegion *secure_sysmem, + MemoryRegion *secure_tag_sysmem) +{ + MemoryRegion *secram = g_new(MemoryRegion, 1); + char *nodename; + hwaddr base = vms->memmap[VIRT_SECURE_MEM].base; + hwaddr size = vms->memmap[VIRT_SECURE_MEM].size; + MachineState *ms = MACHINE(vms); + + memory_region_init_ram(secram, NULL, "virt.secure-ram", size, + &error_fatal); + memory_region_add_subregion(secure_sysmem, base, secram); + + nodename = g_strdup_printf("/secram@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "memory"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, base, 2, size); + qemu_fdt_setprop_string(ms->fdt, nodename, "status", "disabled"); + qemu_fdt_setprop_string(ms->fdt, nodename, "secure-status", "okay"); + + if (secure_tag_sysmem) { + create_tag_ram(secure_tag_sysmem, base, size, "mach-virt.secure-tag"); + } + + g_free(nodename); +} + +static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size) +{ + const VirtMachineState *board = container_of(binfo, VirtMachineState, + bootinfo); + MachineState *ms = MACHINE(board); + + + *fdt_size = board->fdt_size; + return ms->fdt; +} + +static void virt_build_smbios(VirtMachineState *vms) +{ + MachineClass *mc = MACHINE_GET_CLASS(vms); + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + uint8_t *smbios_tables, *smbios_anchor; + size_t smbios_tables_len, smbios_anchor_len; + const char *product = "QEMU Virtual Machine"; + + if (kvm_enabled()) { + product = "KVM Virtual Machine"; + } + + smbios_set_defaults("QEMU", product, + vmc->smbios_old_sys_ver ? "1.0" : mc->name, false, + true, SMBIOS_ENTRY_POINT_30); + + smbios_get_tables(MACHINE(vms), NULL, 0, + &smbios_tables, &smbios_tables_len, + &smbios_anchor, &smbios_anchor_len, + &error_fatal); + + if (smbios_anchor) { + fw_cfg_add_file(vms->fw_cfg, "etc/smbios/smbios-tables", + smbios_tables, smbios_tables_len); + fw_cfg_add_file(vms->fw_cfg, "etc/smbios/smbios-anchor", + smbios_anchor, smbios_anchor_len); + } +} + +static +void virt_machine_done(Notifier *notifier, void *data) +{ + VirtMachineState *vms = container_of(notifier, VirtMachineState, + machine_done); + MachineState *ms = MACHINE(vms); + ARMCPU *cpu = ARM_CPU(first_cpu); + struct arm_boot_info *info = &vms->bootinfo; + AddressSpace *as = arm_boot_address_space(cpu, info); + + /* + * If the user provided a dtb, we assume the dynamic sysbus nodes + * already are integrated there. This corresponds to a use case where + * the dynamic sysbus nodes are complex and their generation is not yet + * supported. In that case the user can take charge of the guest dt + * while qemu takes charge of the qom stuff. + */ + if (info->dtb_filename == NULL) { + platform_bus_add_all_fdt_nodes(ms->fdt, "/intc", + vms->memmap[VIRT_PLATFORM_BUS].base, + vms->memmap[VIRT_PLATFORM_BUS].size, + vms->irqmap[VIRT_PLATFORM_BUS]); + } + if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) { + exit(1); + } + + fw_cfg_add_extra_pci_roots(vms->bus, vms->fw_cfg); + + virt_acpi_setup(vms); + virt_build_smbios(vms); +} + +static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx) +{ + uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER; + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + + if (!vmc->disallow_affinity_adjustment) { + /* Adjust MPIDR like 64-bit KVM hosts, which incorporate the + * GIC's target-list limitations. 32-bit KVM hosts currently + * always create clusters of 4 CPUs, but that is expected to + * change when they gain support for gicv3. When KVM is enabled + * it will override the changes we make here, therefore our + * purposes are to make TCG consistent (with 64-bit KVM hosts) + * and to improve SGI efficiency. + */ + if (vms->gic_version == VIRT_GIC_VERSION_3) { + clustersz = GICV3_TARGETLIST_BITS; + } else { + clustersz = GIC_TARGETLIST_BITS; + } + } + return arm_cpu_mp_affinity(idx, clustersz); +} + +static void virt_set_memmap(VirtMachineState *vms) +{ + MachineState *ms = MACHINE(vms); + hwaddr base, device_memory_base, device_memory_size; + int i; + + vms->memmap = extended_memmap; + + for (i = 0; i < ARRAY_SIZE(base_memmap); i++) { + vms->memmap[i] = base_memmap[i]; + } + + if (ms->ram_slots > ACPI_MAX_RAM_SLOTS) { + error_report("unsupported number of memory slots: %"PRIu64, + ms->ram_slots); + exit(EXIT_FAILURE); + } + + /* + * We compute the base of the high IO region depending on the + * amount of initial and device memory. The device memory start/size + * is aligned on 1GiB. We never put the high IO region below 256GiB + * so that if maxram_size is < 255GiB we keep the legacy memory map. + * The device region size assumes 1GiB page max alignment per slot. + */ + device_memory_base = + ROUND_UP(vms->memmap[VIRT_MEM].base + ms->ram_size, GiB); + device_memory_size = ms->maxram_size - ms->ram_size + ms->ram_slots * GiB; + + /* Base address of the high IO region */ + base = device_memory_base + ROUND_UP(device_memory_size, GiB); + if (base < device_memory_base) { + error_report("maxmem/slots too huge"); + exit(EXIT_FAILURE); + } + if (base < vms->memmap[VIRT_MEM].base + LEGACY_RAMLIMIT_BYTES) { + base = vms->memmap[VIRT_MEM].base + LEGACY_RAMLIMIT_BYTES; + } + + for (i = VIRT_LOWMEMMAP_LAST; i < ARRAY_SIZE(extended_memmap); i++) { + hwaddr size = extended_memmap[i].size; + + base = ROUND_UP(base, size); + vms->memmap[i].base = base; + vms->memmap[i].size = size; + base += size; + } + vms->highest_gpa = base - 1; + if (device_memory_size > 0) { + ms->device_memory = g_malloc0(sizeof(*ms->device_memory)); + ms->device_memory->base = device_memory_base; + memory_region_init(&ms->device_memory->mr, OBJECT(vms), + "device-memory", device_memory_size); + } +} + +/* + * finalize_gic_version - Determines the final gic_version + * according to the gic-version property + * + * Default GIC type is v2 + */ +static void finalize_gic_version(VirtMachineState *vms) +{ + unsigned int max_cpus = MACHINE(vms)->smp.max_cpus; + + if (kvm_enabled()) { + int probe_bitmap; + + if (!kvm_irqchip_in_kernel()) { + switch (vms->gic_version) { + case VIRT_GIC_VERSION_HOST: + warn_report( + "gic-version=host not relevant with kernel-irqchip=off " + "as only userspace GICv2 is supported. Using v2 ..."); + return; + case VIRT_GIC_VERSION_MAX: + case VIRT_GIC_VERSION_NOSEL: + vms->gic_version = VIRT_GIC_VERSION_2; + return; + case VIRT_GIC_VERSION_2: + return; + case VIRT_GIC_VERSION_3: + error_report( + "gic-version=3 is not supported with kernel-irqchip=off"); + exit(1); + } + } + + probe_bitmap = kvm_arm_vgic_probe(); + if (!probe_bitmap) { + error_report("Unable to determine GIC version supported by host"); + exit(1); + } + + switch (vms->gic_version) { + case VIRT_GIC_VERSION_HOST: + case VIRT_GIC_VERSION_MAX: + if (probe_bitmap & KVM_ARM_VGIC_V3) { + vms->gic_version = VIRT_GIC_VERSION_3; + } else { + vms->gic_version = VIRT_GIC_VERSION_2; + } + return; + case VIRT_GIC_VERSION_NOSEL: + if ((probe_bitmap & KVM_ARM_VGIC_V2) && max_cpus <= GIC_NCPU) { + vms->gic_version = VIRT_GIC_VERSION_2; + } else if (probe_bitmap & KVM_ARM_VGIC_V3) { + /* + * in case the host does not support v2 in-kernel emulation or + * the end-user requested more than 8 VCPUs we now default + * to v3. In any case defaulting to v2 would be broken. + */ + vms->gic_version = VIRT_GIC_VERSION_3; + } else if (max_cpus > GIC_NCPU) { + error_report("host only supports in-kernel GICv2 emulation " + "but more than 8 vcpus are requested"); + exit(1); + } + break; + case VIRT_GIC_VERSION_2: + case VIRT_GIC_VERSION_3: + break; + } + + /* Check chosen version is effectively supported by the host */ + if (vms->gic_version == VIRT_GIC_VERSION_2 && + !(probe_bitmap & KVM_ARM_VGIC_V2)) { + error_report("host does not support in-kernel GICv2 emulation"); + exit(1); + } else if (vms->gic_version == VIRT_GIC_VERSION_3 && + !(probe_bitmap & KVM_ARM_VGIC_V3)) { + error_report("host does not support in-kernel GICv3 emulation"); + exit(1); + } + return; + } + + /* TCG mode */ + switch (vms->gic_version) { + case VIRT_GIC_VERSION_NOSEL: + vms->gic_version = VIRT_GIC_VERSION_2; + break; + case VIRT_GIC_VERSION_MAX: + vms->gic_version = VIRT_GIC_VERSION_3; + break; + case VIRT_GIC_VERSION_HOST: + error_report("gic-version=host requires KVM"); + exit(1); + case VIRT_GIC_VERSION_2: + case VIRT_GIC_VERSION_3: + break; + } +} + +/* + * virt_cpu_post_init() must be called after the CPUs have + * been realized and the GIC has been created. + */ +static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem) +{ + int max_cpus = MACHINE(vms)->smp.max_cpus; + bool aarch64, pmu, steal_time; + CPUState *cpu; + + aarch64 = object_property_get_bool(OBJECT(first_cpu), "aarch64", NULL); + pmu = object_property_get_bool(OBJECT(first_cpu), "pmu", NULL); + steal_time = object_property_get_bool(OBJECT(first_cpu), + "kvm-steal-time", NULL); + + if (kvm_enabled()) { + hwaddr pvtime_reg_base = vms->memmap[VIRT_PVTIME].base; + hwaddr pvtime_reg_size = vms->memmap[VIRT_PVTIME].size; + + if (steal_time) { + MemoryRegion *pvtime = g_new(MemoryRegion, 1); + hwaddr pvtime_size = max_cpus * PVTIME_SIZE_PER_CPU; + + /* The memory region size must be a multiple of host page size. */ + pvtime_size = REAL_HOST_PAGE_ALIGN(pvtime_size); + + if (pvtime_size > pvtime_reg_size) { + error_report("pvtime requires a %" HWADDR_PRId + " byte memory region for %d CPUs," + " but only %" HWADDR_PRId " has been reserved", + pvtime_size, max_cpus, pvtime_reg_size); + exit(1); + } + + memory_region_init_ram(pvtime, NULL, "pvtime", pvtime_size, NULL); + memory_region_add_subregion(sysmem, pvtime_reg_base, pvtime); + } + + CPU_FOREACH(cpu) { + if (pmu) { + assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU)); + if (kvm_irqchip_in_kernel()) { + kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ)); + } + kvm_arm_pmu_init(cpu); + } + if (steal_time) { + kvm_arm_pvtime_init(cpu, pvtime_reg_base + + cpu->cpu_index * PVTIME_SIZE_PER_CPU); + } + } + } else { + if (aarch64 && vms->highmem) { + int requested_pa_size = 64 - clz64(vms->highest_gpa); + int pamax = arm_pamax(ARM_CPU(first_cpu)); + + if (pamax < requested_pa_size) { + error_report("VCPU supports less PA bits (%d) than " + "requested by the memory map (%d)", + pamax, requested_pa_size); + exit(1); + } + } + } +} + +static void machvirt_init(MachineState *machine) +{ + VirtMachineState *vms = VIRT_MACHINE(machine); + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(machine); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *possible_cpus; + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *secure_sysmem = NULL; + MemoryRegion *tag_sysmem = NULL; + MemoryRegion *secure_tag_sysmem = NULL; + int n, virt_max_cpus; + bool firmware_loaded; + bool aarch64 = true; + bool has_ged = !vmc->no_ged; + unsigned int smp_cpus = machine->smp.cpus; + unsigned int max_cpus = machine->smp.max_cpus; + + /* + * In accelerated mode, the memory map is computed earlier in kvm_type() + * to create a VM with the right number of IPA bits. + */ + if (!vms->memmap) { + virt_set_memmap(vms); + } + + /* We can probe only here because during property set + * KVM is not available yet + */ + finalize_gic_version(vms); + + if (!cpu_type_valid(machine->cpu_type)) { + error_report("mach-virt: CPU type %s not supported", machine->cpu_type); + exit(1); + } + + if (vms->secure) { + /* + * The Secure view of the world is the same as the NonSecure, + * but with a few extra devices. Create it as a container region + * containing the system memory at low priority; any secure-only + * devices go in at higher priority and take precedence. + */ + secure_sysmem = g_new(MemoryRegion, 1); + memory_region_init(secure_sysmem, OBJECT(machine), "secure-memory", + UINT64_MAX); + memory_region_add_subregion_overlap(secure_sysmem, 0, sysmem, -1); + } + + firmware_loaded = virt_firmware_init(vms, sysmem, + secure_sysmem ?: sysmem); + + /* If we have an EL3 boot ROM then the assumption is that it will + * implement PSCI itself, so disable QEMU's internal implementation + * so it doesn't get in the way. Instead of starting secondary + * CPUs in PSCI powerdown state we will start them all running and + * let the boot ROM sort them out. + * The usual case is that we do use QEMU's PSCI implementation; + * if the guest has EL2 then we will use SMC as the conduit, + * and otherwise we will use HVC (for backwards compatibility and + * because if we're using KVM then we must use HVC). + */ + if (vms->secure && firmware_loaded) { + vms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; + } else if (vms->virt) { + vms->psci_conduit = QEMU_PSCI_CONDUIT_SMC; + } else { + vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC; + } + + /* The maximum number of CPUs depends on the GIC version, or on how + * many redistributors we can fit into the memory map. + */ + if (vms->gic_version == VIRT_GIC_VERSION_3) { + virt_max_cpus = + vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE; + virt_max_cpus += + vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE; + } else { + virt_max_cpus = GIC_NCPU; + } + + if (max_cpus > virt_max_cpus) { + error_report("Number of SMP CPUs requested (%d) exceeds max CPUs " + "supported by machine 'mach-virt' (%d)", + max_cpus, virt_max_cpus); + exit(1); + } + + if (vms->virt && (kvm_enabled() || hvf_enabled())) { + error_report("mach-virt: %s does not support providing " + "Virtualization extensions to the guest CPU", + kvm_enabled() ? "KVM" : "HVF"); + exit(1); + } + + if (vms->mte && (kvm_enabled() || hvf_enabled())) { + error_report("mach-virt: %s does not support providing " + "MTE to the guest CPU", + kvm_enabled() ? "KVM" : "HVF"); + exit(1); + } + + create_fdt(vms); + + possible_cpus = mc->possible_cpu_arch_ids(machine); + assert(possible_cpus->len == max_cpus); + for (n = 0; n < possible_cpus->len; n++) { + Object *cpuobj; + CPUState *cs; + + if (n >= smp_cpus) { + break; + } + + cpuobj = object_new(possible_cpus->cpus[n].type); + object_property_set_int(cpuobj, "mp-affinity", + possible_cpus->cpus[n].arch_id, NULL); + + cs = CPU(cpuobj); + cs->cpu_index = n; + + numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpuobj), + &error_fatal); + + aarch64 &= object_property_get_bool(cpuobj, "aarch64", NULL); + + if (!vms->secure) { + object_property_set_bool(cpuobj, "has_el3", false, NULL); + } + + if (!vms->virt && object_property_find(cpuobj, "has_el2")) { + object_property_set_bool(cpuobj, "has_el2", false, NULL); + } + + if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) { + object_property_set_int(cpuobj, "psci-conduit", vms->psci_conduit, + NULL); + + /* Secondary CPUs start in PSCI powered-down state */ + if (n > 0) { + object_property_set_bool(cpuobj, "start-powered-off", true, + NULL); + } + } + + if (vmc->kvm_no_adjvtime && + object_property_find(cpuobj, "kvm-no-adjvtime")) { + object_property_set_bool(cpuobj, "kvm-no-adjvtime", true, NULL); + } + + if (vmc->no_kvm_steal_time && + object_property_find(cpuobj, "kvm-steal-time")) { + object_property_set_bool(cpuobj, "kvm-steal-time", false, NULL); + } + + if (vmc->no_pmu && object_property_find(cpuobj, "pmu")) { + object_property_set_bool(cpuobj, "pmu", false, NULL); + } + + if (object_property_find(cpuobj, "reset-cbar")) { + object_property_set_int(cpuobj, "reset-cbar", + vms->memmap[VIRT_CPUPERIPHS].base, + &error_abort); + } + + object_property_set_link(cpuobj, "memory", OBJECT(sysmem), + &error_abort); + if (vms->secure) { + object_property_set_link(cpuobj, "secure-memory", + OBJECT(secure_sysmem), &error_abort); + } + + if (vms->mte) { + /* Create the memory region only once, but link to all cpus. */ + if (!tag_sysmem) { + /* + * The property exists only if MemTag is supported. + * If it is, we must allocate the ram to back that up. + */ + if (!object_property_find(cpuobj, "tag-memory")) { + error_report("MTE requested, but not supported " + "by the guest CPU"); + exit(1); + } + + tag_sysmem = g_new(MemoryRegion, 1); + memory_region_init(tag_sysmem, OBJECT(machine), + "tag-memory", UINT64_MAX / 32); + + if (vms->secure) { + secure_tag_sysmem = g_new(MemoryRegion, 1); + memory_region_init(secure_tag_sysmem, OBJECT(machine), + "secure-tag-memory", UINT64_MAX / 32); + + /* As with ram, secure-tag takes precedence over tag. */ + memory_region_add_subregion_overlap(secure_tag_sysmem, 0, + tag_sysmem, -1); + } + } + + object_property_set_link(cpuobj, "tag-memory", OBJECT(tag_sysmem), + &error_abort); + if (vms->secure) { + object_property_set_link(cpuobj, "secure-tag-memory", + OBJECT(secure_tag_sysmem), + &error_abort); + } + } + + qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); + object_unref(cpuobj); + } + fdt_add_timer_nodes(vms); + fdt_add_cpu_nodes(vms); + + memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base, + machine->ram); + if (machine->device_memory) { + memory_region_add_subregion(sysmem, machine->device_memory->base, + &machine->device_memory->mr); + } + + virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem); + + create_gic(vms, sysmem); + + virt_cpu_post_init(vms, sysmem); + + fdt_add_pmu_nodes(vms); + + create_uart(vms, VIRT_UART, sysmem, serial_hd(0)); + + if (vms->secure) { + create_secure_ram(vms, secure_sysmem, secure_tag_sysmem); + create_uart(vms, VIRT_SECURE_UART, secure_sysmem, serial_hd(1)); + } + + if (tag_sysmem) { + create_tag_ram(tag_sysmem, vms->memmap[VIRT_MEM].base, + machine->ram_size, "mach-virt.tag"); + } + + vms->highmem_ecam &= vms->highmem && (!firmware_loaded || aarch64); + + create_rtc(vms); + + create_pcie(vms); + + if (has_ged && aarch64 && firmware_loaded && virt_is_acpi_enabled(vms)) { + vms->acpi_dev = create_acpi_ged(vms); + } else { + create_gpio_devices(vms, VIRT_GPIO, sysmem); + } + + if (vms->secure && !vmc->no_secure_gpio) { + create_gpio_devices(vms, VIRT_SECURE_GPIO, secure_sysmem); + } + + /* connect powerdown request */ + vms->powerdown_notifier.notify = virt_powerdown_req; + qemu_register_powerdown_notifier(&vms->powerdown_notifier); + + /* Create mmio transports, so the user can create virtio backends + * (which will be automatically plugged in to the transports). If + * no backend is created the transport will just sit harmlessly idle. + */ + create_virtio_devices(vms); + + vms->fw_cfg = create_fw_cfg(vms, &address_space_memory); + rom_set_fw(vms->fw_cfg); + + create_platform_bus(vms); + + if (machine->nvdimms_state->is_enabled) { + const struct AcpiGenericAddress arm_virt_nvdimm_acpi_dsmio = { + .space_id = AML_AS_SYSTEM_MEMORY, + .address = vms->memmap[VIRT_NVDIMM_ACPI].base, + .bit_width = NVDIMM_ACPI_IO_LEN << 3 + }; + + nvdimm_init_acpi_state(machine->nvdimms_state, sysmem, + arm_virt_nvdimm_acpi_dsmio, + vms->fw_cfg, OBJECT(vms)); + } + + vms->bootinfo.ram_size = machine->ram_size; + vms->bootinfo.nb_cpus = smp_cpus; + vms->bootinfo.board_id = -1; + vms->bootinfo.loader_start = vms->memmap[VIRT_MEM].base; + vms->bootinfo.get_dtb = machvirt_dtb; + vms->bootinfo.skip_dtb_autoload = true; + vms->bootinfo.firmware_loaded = firmware_loaded; + arm_load_kernel(ARM_CPU(first_cpu), machine, &vms->bootinfo); + + vms->machine_done.notify = virt_machine_done; + qemu_add_machine_init_done_notifier(&vms->machine_done); +} + +static bool virt_get_secure(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->secure; +} + +static void virt_set_secure(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->secure = value; +} + +static bool virt_get_virt(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->virt; +} + +static void virt_set_virt(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->virt = value; +} + +static bool virt_get_highmem(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->highmem; +} + +static void virt_set_highmem(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->highmem = value; +} + +static bool virt_get_its(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->its; +} + +static void virt_set_its(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->its = value; +} + +static char *virt_get_oem_id(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return g_strdup(vms->oem_id); +} + +static void virt_set_oem_id(Object *obj, const char *value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + size_t len = strlen(value); + + if (len > 6) { + error_setg(errp, + "User specified oem-id value is bigger than 6 bytes in size"); + return; + } + + strncpy(vms->oem_id, value, 6); +} + +static char *virt_get_oem_table_id(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return g_strdup(vms->oem_table_id); +} + +static void virt_set_oem_table_id(Object *obj, const char *value, + Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + size_t len = strlen(value); + + if (len > 8) { + error_setg(errp, + "User specified oem-table-id value is bigger than 8 bytes in size"); + return; + } + strncpy(vms->oem_table_id, value, 8); +} + + +bool virt_is_acpi_enabled(VirtMachineState *vms) +{ + if (vms->acpi == ON_OFF_AUTO_OFF) { + return false; + } + return true; +} + +static void virt_get_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + OnOffAuto acpi = vms->acpi; + + visit_type_OnOffAuto(v, name, &acpi, errp); +} + +static void virt_set_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &vms->acpi, errp); +} + +static bool virt_get_ras(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->ras; +} + +static void virt_set_ras(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->ras = value; +} + +static bool virt_get_mte(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->mte; +} + +static void virt_set_mte(Object *obj, bool value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->mte = value; +} + +static char *virt_get_gic_version(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + const char *val = vms->gic_version == VIRT_GIC_VERSION_3 ? "3" : "2"; + + return g_strdup(val); +} + +static void virt_set_gic_version(Object *obj, const char *value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + if (!strcmp(value, "3")) { + vms->gic_version = VIRT_GIC_VERSION_3; + } else if (!strcmp(value, "2")) { + vms->gic_version = VIRT_GIC_VERSION_2; + } else if (!strcmp(value, "host")) { + vms->gic_version = VIRT_GIC_VERSION_HOST; /* Will probe later */ + } else if (!strcmp(value, "max")) { + vms->gic_version = VIRT_GIC_VERSION_MAX; /* Will probe later */ + } else { + error_setg(errp, "Invalid gic-version value"); + error_append_hint(errp, "Valid values are 3, 2, host, max.\n"); + } +} + +static char *virt_get_iommu(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + switch (vms->iommu) { + case VIRT_IOMMU_NONE: + return g_strdup("none"); + case VIRT_IOMMU_SMMUV3: + return g_strdup("smmuv3"); + default: + g_assert_not_reached(); + } +} + +static void virt_set_iommu(Object *obj, const char *value, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + if (!strcmp(value, "smmuv3")) { + vms->iommu = VIRT_IOMMU_SMMUV3; + } else if (!strcmp(value, "none")) { + vms->iommu = VIRT_IOMMU_NONE; + } else { + error_setg(errp, "Invalid iommu value"); + error_append_hint(errp, "Valid values are none, smmuv3.\n"); + } +} + +static bool virt_get_default_bus_bypass_iommu(Object *obj, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + return vms->default_bus_bypass_iommu; +} + +static void virt_set_default_bus_bypass_iommu(Object *obj, bool value, + Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + + vms->default_bus_bypass_iommu = value; +} + +static CpuInstanceProperties +virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +static int64_t virt_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + return idx % ms->numa_state->num_nodes; +} + +static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) +{ + int n; + unsigned int max_cpus = ms->smp.max_cpus; + VirtMachineState *vms = VIRT_MACHINE(ms); + + if (ms->possible_cpus) { + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (n = 0; n < ms->possible_cpus->len; n++) { + ms->possible_cpus->cpus[n].type = ms->cpu_type; + ms->possible_cpus->cpus[n].arch_id = + virt_cpu_mp_affinity(vms, n); + ms->possible_cpus->cpus[n].props.has_thread_id = true; + ms->possible_cpus->cpus[n].props.thread_id = n; + } + return ms->possible_cpus; +} + +static void virt_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(hotplug_dev); + const MachineState *ms = MACHINE(hotplug_dev); + const bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); + + if (!vms->acpi_dev) { + error_setg(errp, + "memory hotplug is not enabled: missing acpi-ged device"); + return; + } + + if (vms->mte) { + error_setg(errp, "memory hotplug is not enabled: MTE is enabled"); + return; + } + + if (is_nvdimm && !ms->nvdimms_state->is_enabled) { + error_setg(errp, "nvdimm is not enabled: add 'nvdimm=on' to '-M'"); + return; + } + + pc_dimm_pre_plug(PC_DIMM(dev), MACHINE(hotplug_dev), NULL, errp); +} + +static void virt_memory_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(hotplug_dev); + MachineState *ms = MACHINE(hotplug_dev); + bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); + + pc_dimm_plug(PC_DIMM(dev), MACHINE(vms)); + + if (is_nvdimm) { + nvdimm_plug(ms->nvdimms_state); + } + + hotplug_handler_plug(HOTPLUG_HANDLER(vms->acpi_dev), + dev, &error_abort); +} + +static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(hotplug_dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + virt_memory_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + hwaddr db_start = 0, db_end = 0; + char *resv_prop_str; + + switch (vms->msi_controller) { + case VIRT_MSI_CTRL_NONE: + return; + case VIRT_MSI_CTRL_ITS: + /* GITS_TRANSLATER page */ + db_start = base_memmap[VIRT_GIC_ITS].base + 0x10000; + db_end = base_memmap[VIRT_GIC_ITS].base + + base_memmap[VIRT_GIC_ITS].size - 1; + break; + case VIRT_MSI_CTRL_GICV2M: + /* MSI_SETSPI_NS page */ + db_start = base_memmap[VIRT_GIC_V2M].base; + db_end = db_start + base_memmap[VIRT_GIC_V2M].size - 1; + break; + } + resv_prop_str = g_strdup_printf("0x%"PRIx64":0x%"PRIx64":%u", + db_start, db_end, + VIRTIO_IOMMU_RESV_MEM_T_MSI); + + qdev_prop_set_uint32(dev, "len-reserved-regions", 1); + qdev_prop_set_string(dev, "reserved-regions[0]", resv_prop_str); + g_free(resv_prop_str); + } +} + +static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(hotplug_dev); + + if (vms->platform_bus_dev) { + MachineClass *mc = MACHINE_GET_CLASS(vms); + + if (device_is_dynamic_sysbus(mc, dev)) { + platform_bus_link_device(PLATFORM_BUS_DEVICE(vms->platform_bus_dev), + SYS_BUS_DEVICE(dev)); + } + } + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + virt_memory_plug(hotplug_dev, dev, errp); + } + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + PCIDevice *pdev = PCI_DEVICE(dev); + + vms->iommu = VIRT_IOMMU_VIRTIO; + vms->virtio_iommu_bdf = pci_get_bdf(pdev); + create_virtio_iommu_dt_bindings(vms); + } +} + +static void virt_dimm_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(hotplug_dev); + Error *local_err = NULL; + + if (!vms->acpi_dev) { + error_setg(&local_err, + "memory hotplug is not enabled: missing acpi-ged device"); + goto out; + } + + if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { + error_setg(&local_err, + "nvdimm device hot unplug is not supported yet."); + goto out; + } + + hotplug_handler_unplug_request(HOTPLUG_HANDLER(vms->acpi_dev), dev, + &local_err); +out: + error_propagate(errp, local_err); +} + +static void virt_dimm_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + VirtMachineState *vms = VIRT_MACHINE(hotplug_dev); + Error *local_err = NULL; + + hotplug_handler_unplug(HOTPLUG_HANDLER(vms->acpi_dev), dev, &local_err); + if (local_err) { + goto out; + } + + pc_dimm_unplug(PC_DIMM(dev), MACHINE(vms)); + qdev_unrealize(dev); + +out: + error_propagate(errp, local_err); +} + +static void virt_machine_device_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + virt_dimm_unplug_request(hotplug_dev, dev, errp); + } else { + error_setg(errp, "device unplug request for unsupported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +static void virt_machine_device_unplug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + virt_dimm_unplug(hotplug_dev, dev, errp); + } else { + error_setg(errp, "virt: device unplug for unsupported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, + DeviceState *dev) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + + if (device_is_dynamic_sysbus(mc, dev) || + (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM))) { + return HOTPLUG_HANDLER(machine); + } + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + VirtMachineState *vms = VIRT_MACHINE(machine); + + if (!vms->bootinfo.firmware_loaded || !virt_is_acpi_enabled(vms)) { + return HOTPLUG_HANDLER(machine); + } + } + return NULL; +} + +/* + * for arm64 kvm_type [7-0] encodes the requested number of bits + * in the IPA address space + */ +static int virt_kvm_type(MachineState *ms, const char *type_str) +{ + VirtMachineState *vms = VIRT_MACHINE(ms); + int max_vm_pa_size, requested_pa_size; + bool fixed_ipa; + + max_vm_pa_size = kvm_arm_get_max_vm_ipa_size(ms, &fixed_ipa); + + /* we freeze the memory map to compute the highest gpa */ + virt_set_memmap(vms); + + requested_pa_size = 64 - clz64(vms->highest_gpa); + + /* + * KVM requires the IPA size to be at least 32 bits. + */ + if (requested_pa_size < 32) { + requested_pa_size = 32; + } + + if (requested_pa_size > max_vm_pa_size) { + error_report("-m and ,maxmem option values " + "require an IPA range (%d bits) larger than " + "the one supported by the host (%d bits)", + requested_pa_size, max_vm_pa_size); + exit(1); + } + /* + * We return the requested PA log size, unless KVM only supports + * the implicit legacy 40b IPA setting, in which case the kvm_type + * must be 0. + */ + return fixed_ipa ? 0 : requested_pa_size; +} + +static void virt_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + + mc->init = machvirt_init; + /* Start with max_cpus set to 512, which is the maximum supported by KVM. + * The value may be reduced later when we have more information about the + * configuration of the particular instance. + */ + mc->max_cpus = 512; + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_CALXEDA_XGMAC); + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_AMD_XGBE); + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_PLATFORM); +#ifdef CONFIG_TPM + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); +#endif + mc->block_default_type = IF_VIRTIO; + mc->no_cdrom = 1; + mc->pci_allow_0_address = true; + /* We know we will never create a pre-ARMv7 CPU which needs 1K pages */ + mc->minimum_page_bits = 12; + mc->possible_cpu_arch_ids = virt_possible_cpu_arch_ids; + mc->cpu_index_to_instance_props = virt_cpu_index_to_props; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15"); + mc->get_default_cpu_node_id = virt_get_default_cpu_node_id; + mc->kvm_type = virt_kvm_type; + assert(!mc->get_hotplug_handler); + mc->get_hotplug_handler = virt_machine_get_hotplug_handler; + hc->pre_plug = virt_machine_device_pre_plug_cb; + hc->plug = virt_machine_device_plug_cb; + hc->unplug_request = virt_machine_device_unplug_request_cb; + hc->unplug = virt_machine_device_unplug_cb; + mc->nvdimm_supported = true; + mc->auto_enable_numa_with_memhp = true; + mc->auto_enable_numa_with_memdev = true; + mc->default_ram_id = "mach-virt.ram"; + + object_class_property_add(oc, "acpi", "OnOffAuto", + virt_get_acpi, virt_set_acpi, + NULL, NULL); + object_class_property_set_description(oc, "acpi", + "Enable ACPI"); + object_class_property_add_bool(oc, "secure", virt_get_secure, + virt_set_secure); + object_class_property_set_description(oc, "secure", + "Set on/off to enable/disable the ARM " + "Security Extensions (TrustZone)"); + + object_class_property_add_bool(oc, "virtualization", virt_get_virt, + virt_set_virt); + object_class_property_set_description(oc, "virtualization", + "Set on/off to enable/disable emulating a " + "guest CPU which implements the ARM " + "Virtualization Extensions"); + + object_class_property_add_bool(oc, "highmem", virt_get_highmem, + virt_set_highmem); + object_class_property_set_description(oc, "highmem", + "Set on/off to enable/disable using " + "physical address space above 32 bits"); + + object_class_property_add_str(oc, "gic-version", virt_get_gic_version, + virt_set_gic_version); + object_class_property_set_description(oc, "gic-version", + "Set GIC version. " + "Valid values are 2, 3, host and max"); + + object_class_property_add_str(oc, "iommu", virt_get_iommu, virt_set_iommu); + object_class_property_set_description(oc, "iommu", + "Set the IOMMU type. " + "Valid values are none and smmuv3"); + + object_class_property_add_bool(oc, "default-bus-bypass-iommu", + virt_get_default_bus_bypass_iommu, + virt_set_default_bus_bypass_iommu); + object_class_property_set_description(oc, "default-bus-bypass-iommu", + "Set on/off to enable/disable " + "bypass_iommu for default root bus"); + + object_class_property_add_bool(oc, "ras", virt_get_ras, + virt_set_ras); + object_class_property_set_description(oc, "ras", + "Set on/off to enable/disable reporting host memory errors " + "to a KVM guest using ACPI and guest external abort exceptions"); + + object_class_property_add_bool(oc, "mte", virt_get_mte, virt_set_mte); + object_class_property_set_description(oc, "mte", + "Set on/off to enable/disable emulating a " + "guest CPU which implements the ARM " + "Memory Tagging Extension"); + + object_class_property_add_bool(oc, "its", virt_get_its, + virt_set_its); + object_class_property_set_description(oc, "its", + "Set on/off to enable/disable " + "ITS instantiation"); + + object_class_property_add_str(oc, "x-oem-id", + virt_get_oem_id, + virt_set_oem_id); + object_class_property_set_description(oc, "x-oem-id", + "Override the default value of field OEMID " + "in ACPI table header." + "The string may be up to 6 bytes in size"); + + + object_class_property_add_str(oc, "x-oem-table-id", + virt_get_oem_table_id, + virt_set_oem_table_id); + object_class_property_set_description(oc, "x-oem-table-id", + "Override the default value of field OEM Table ID " + "in ACPI table header." + "The string may be up to 8 bytes in size"); + +} + +static void virt_instance_init(Object *obj) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + + /* EL3 is disabled by default on virt: this makes us consistent + * between KVM and TCG for this board, and it also allows us to + * boot UEFI blobs which assume no TrustZone support. + */ + vms->secure = false; + + /* EL2 is also disabled by default, for similar reasons */ + vms->virt = false; + + /* High memory is enabled by default */ + vms->highmem = true; + vms->gic_version = VIRT_GIC_VERSION_NOSEL; + + vms->highmem_ecam = !vmc->no_highmem_ecam; + + if (vmc->no_its) { + vms->its = false; + } else { + /* Default allows ITS instantiation */ + vms->its = true; + + if (vmc->no_tcg_its) { + vms->tcg_its = false; + } else { + vms->tcg_its = true; + } + } + + /* Default disallows iommu instantiation */ + vms->iommu = VIRT_IOMMU_NONE; + + /* The default root bus is attached to iommu by default */ + vms->default_bus_bypass_iommu = false; + + /* Default disallows RAS instantiation */ + vms->ras = false; + + /* MTE is disabled by default. */ + vms->mte = false; + + vms->irqmap = a15irqmap; + + virt_flash_create(vms); + + vms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); + vms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); +} + +static const TypeInfo virt_machine_info = { + .name = TYPE_VIRT_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(VirtMachineState), + .class_size = sizeof(VirtMachineClass), + .class_init = virt_machine_class_init, + .instance_init = virt_instance_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + }, +}; + +static void machvirt_machine_init(void) +{ + type_register_static(&virt_machine_info); +} +type_init(machvirt_machine_init); + +static void virt_machine_6_2_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE_AS_LATEST(6, 2) + +static void virt_machine_6_1_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_6_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); + mc->smp_props.prefer_sockets = true; + vmc->no_cpu_topology = true; + + /* qemu ITS was introduced with 6.2 */ + vmc->no_tcg_its = true; +} +DEFINE_VIRT_MACHINE(6, 1) + +static void virt_machine_6_0_options(MachineClass *mc) +{ + virt_machine_6_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len); +} +DEFINE_VIRT_MACHINE(6, 0) + +static void virt_machine_5_2_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_6_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len); + vmc->no_secure_gpio = true; +} +DEFINE_VIRT_MACHINE(5, 2) + +static void virt_machine_5_1_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_5_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len); + vmc->no_kvm_steal_time = true; +} +DEFINE_VIRT_MACHINE(5, 1) + +static void virt_machine_5_0_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_5_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len); + mc->numa_mem_supported = true; + vmc->acpi_expose_flash = true; + mc->auto_enable_numa_with_memdev = false; +} +DEFINE_VIRT_MACHINE(5, 0) + +static void virt_machine_4_2_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_5_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len); + vmc->kvm_no_adjvtime = true; +} +DEFINE_VIRT_MACHINE(4, 2) + +static void virt_machine_4_1_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_4_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len); + vmc->no_ged = true; + mc->auto_enable_numa_with_memhp = false; +} +DEFINE_VIRT_MACHINE(4, 1) + +static void virt_machine_4_0_options(MachineClass *mc) +{ + virt_machine_4_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); +} +DEFINE_VIRT_MACHINE(4, 0) + +static void virt_machine_3_1_options(MachineClass *mc) +{ + virt_machine_4_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len); +} +DEFINE_VIRT_MACHINE(3, 1) + +static void virt_machine_3_0_options(MachineClass *mc) +{ + virt_machine_3_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len); +} +DEFINE_VIRT_MACHINE(3, 0) + +static void virt_machine_2_12_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_3_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); + vmc->no_highmem_ecam = true; + mc->max_cpus = 255; +} +DEFINE_VIRT_MACHINE(2, 12) + +static void virt_machine_2_11_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_2_12_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); + vmc->smbios_old_sys_ver = true; +} +DEFINE_VIRT_MACHINE(2, 11) + +static void virt_machine_2_10_options(MachineClass *mc) +{ + virt_machine_2_11_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); + /* before 2.11 we never faulted accesses to bad addresses */ + mc->ignore_memory_transaction_failures = true; +} +DEFINE_VIRT_MACHINE(2, 10) + +static void virt_machine_2_9_options(MachineClass *mc) +{ + virt_machine_2_10_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); +} +DEFINE_VIRT_MACHINE(2, 9) + +static void virt_machine_2_8_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_2_9_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); + /* For 2.8 and earlier we falsely claimed in the DT that + * our timers were edge-triggered, not level-triggered. + */ + vmc->claim_edge_triggered_timers = true; +} +DEFINE_VIRT_MACHINE(2, 8) + +static void virt_machine_2_7_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_2_8_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); + /* ITS was introduced with 2.8 */ + vmc->no_its = true; + /* Stick with 1K pages for migration compatibility */ + mc->minimum_page_bits = 0; +} +DEFINE_VIRT_MACHINE(2, 7) + +static void virt_machine_2_6_options(MachineClass *mc) +{ + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); + + virt_machine_2_7_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); + vmc->disallow_affinity_adjustment = true; + /* Disable PMU for 2.6 as PMU support was first introduced in 2.7 */ + vmc->no_pmu = true; +} +DEFINE_VIRT_MACHINE(2, 6) diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c new file mode 100644 index 000000000..69c333e91 --- /dev/null +++ b/hw/arm/xilinx_zynq.c @@ -0,0 +1,377 @@ +/* + * Xilinx Zynq Baseboard System emulation. + * + * Copyright (c) 2010 Xilinx. + * Copyright (c) 2012 Peter A.G. Crosthwaite (peter.croshtwaite@petalogix.com) + * Copyright (c) 2012 Petalogix Pty Ltd. + * Written by Haibing Ma + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "hw/arm/boot.h" +#include "net/net.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/block/flash.h" +#include "hw/loader.h" +#include "hw/adc/zynq-xadc.h" +#include "hw/ssi/ssi.h" +#include "hw/usb/chipidea.h" +#include "qemu/error-report.h" +#include "hw/sd/sdhci.h" +#include "hw/char/cadence_uart.h" +#include "hw/net/cadence_gem.h" +#include "hw/cpu/a9mpcore.h" +#include "hw/qdev-clock.h" +#include "sysemu/reset.h" +#include "qom/object.h" + +#define TYPE_ZYNQ_MACHINE MACHINE_TYPE_NAME("xilinx-zynq-a9") +OBJECT_DECLARE_SIMPLE_TYPE(ZynqMachineState, ZYNQ_MACHINE) + +/* board base frequency: 33.333333 MHz */ +#define PS_CLK_FREQUENCY (100 * 1000 * 1000 / 3) + +#define NUM_SPI_FLASHES 4 +#define NUM_QSPI_FLASHES 2 +#define NUM_QSPI_BUSSES 2 + +#define FLASH_SIZE (64 * 1024 * 1024) +#define FLASH_SECTOR_SIZE (128 * 1024) + +#define IRQ_OFFSET 32 /* pic interrupts start from index 32 */ + +#define MPCORE_PERIPHBASE 0xF8F00000 +#define ZYNQ_BOARD_MIDR 0x413FC090 + +static const int dma_irqs[8] = { + 46, 47, 48, 49, 72, 73, 74, 75 +}; + +#define BOARD_SETUP_ADDR 0x100 + +#define SLCR_LOCK_OFFSET 0x004 +#define SLCR_UNLOCK_OFFSET 0x008 +#define SLCR_ARM_PLL_OFFSET 0x100 + +#define SLCR_XILINX_UNLOCK_KEY 0xdf0d +#define SLCR_XILINX_LOCK_KEY 0x767b + +#define ZYNQ_SDHCI_CAPABILITIES 0x69ec0080 /* Datasheet: UG585 (v1.12.1) */ + +#define ARMV7_IMM16(x) (extract32((x), 0, 12) | \ + extract32((x), 12, 4) << 16) + +/* Write immediate val to address r0 + addr. r0 should contain base offset + * of the SLCR block. Clobbers r1. + */ + +#define SLCR_WRITE(addr, val) \ + 0xe3001000 + ARMV7_IMM16(extract32((val), 0, 16)), /* movw r1 ... */ \ + 0xe3401000 + ARMV7_IMM16(extract32((val), 16, 16)), /* movt r1 ... */ \ + 0xe5801000 + (addr) + +struct ZynqMachineState { + MachineState parent; + Clock *ps_clk; +}; + +static void zynq_write_board_setup(ARMCPU *cpu, + const struct arm_boot_info *info) +{ + int n; + uint32_t board_setup_blob[] = { + 0xe3a004f8, /* mov r0, #0xf8000000 */ + SLCR_WRITE(SLCR_UNLOCK_OFFSET, SLCR_XILINX_UNLOCK_KEY), + SLCR_WRITE(SLCR_ARM_PLL_OFFSET, 0x00014008), + SLCR_WRITE(SLCR_LOCK_OFFSET, SLCR_XILINX_LOCK_KEY), + 0xe12fff1e, /* bx lr */ + }; + for (n = 0; n < ARRAY_SIZE(board_setup_blob); n++) { + board_setup_blob[n] = tswap32(board_setup_blob[n]); + } + rom_add_blob_fixed("board-setup", board_setup_blob, + sizeof(board_setup_blob), BOARD_SETUP_ADDR); +} + +static struct arm_boot_info zynq_binfo = {}; + +static void gem_init(NICInfo *nd, uint32_t base, qemu_irq irq) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new(TYPE_CADENCE_GEM); + if (nd->used) { + qemu_check_nic_model(nd, TYPE_CADENCE_GEM); + qdev_set_nic_properties(dev, nd); + } + object_property_set_int(OBJECT(dev), "phy-addr", 7, &error_abort); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, base); + sysbus_connect_irq(s, 0, irq); +} + +static inline void zynq_init_spi_flashes(uint32_t base_addr, qemu_irq irq, + bool is_qspi) +{ + DeviceState *dev; + SysBusDevice *busdev; + SSIBus *spi; + DeviceState *flash_dev; + int i, j; + int num_busses = is_qspi ? NUM_QSPI_BUSSES : 1; + int num_ss = is_qspi ? NUM_QSPI_FLASHES : NUM_SPI_FLASHES; + + dev = qdev_new(is_qspi ? "xlnx.ps7-qspi" : "xlnx.ps7-spi"); + qdev_prop_set_uint8(dev, "num-txrx-bytes", is_qspi ? 4 : 1); + qdev_prop_set_uint8(dev, "num-ss-bits", num_ss); + qdev_prop_set_uint8(dev, "num-busses", num_busses); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, base_addr); + if (is_qspi) { + sysbus_mmio_map(busdev, 1, 0xFC000000); + } + sysbus_connect_irq(busdev, 0, irq); + + for (i = 0; i < num_busses; ++i) { + char bus_name[16]; + qemu_irq cs_line; + + snprintf(bus_name, 16, "spi%d", i); + spi = (SSIBus *)qdev_get_child_bus(dev, bus_name); + + for (j = 0; j < num_ss; ++j) { + DriveInfo *dinfo = drive_get_next(IF_MTD); + flash_dev = qdev_new("n25q128"); + if (dinfo) { + qdev_prop_set_drive_err(flash_dev, "drive", + blk_by_legacy_dinfo(dinfo), + &error_fatal); + } + qdev_realize_and_unref(flash_dev, BUS(spi), &error_fatal); + + cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); + sysbus_connect_irq(busdev, i * num_ss + j + 1, cs_line); + } + } + +} + +static void zynq_init(MachineState *machine) +{ + ZynqMachineState *zynq_machine = ZYNQ_MACHINE(machine); + ARMCPU *cpu; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *ocm_ram = g_new(MemoryRegion, 1); + DeviceState *dev, *slcr; + SysBusDevice *busdev; + qemu_irq pic[64]; + int n; + + /* max 2GB ram */ + if (machine->ram_size > 2 * GiB) { + error_report("RAM size more than 2 GiB is not supported"); + exit(EXIT_FAILURE); + } + + cpu = ARM_CPU(object_new(machine->cpu_type)); + + /* By default A9 CPUs have EL3 enabled. This board does not + * currently support EL3 so the CPU EL3 property is disabled before + * realization. + */ + if (object_property_find(OBJECT(cpu), "has_el3")) { + object_property_set_bool(OBJECT(cpu), "has_el3", false, &error_fatal); + } + + object_property_set_int(OBJECT(cpu), "midr", ZYNQ_BOARD_MIDR, + &error_fatal); + object_property_set_int(OBJECT(cpu), "reset-cbar", MPCORE_PERIPHBASE, + &error_fatal); + qdev_realize(DEVICE(cpu), NULL, &error_fatal); + + /* DDR remapped to address zero. */ + memory_region_add_subregion(address_space_mem, 0, machine->ram); + + /* 256K of on-chip memory */ + memory_region_init_ram(ocm_ram, NULL, "zynq.ocm_ram", 256 * KiB, + &error_fatal); + memory_region_add_subregion(address_space_mem, 0xFFFC0000, ocm_ram); + + DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); + + /* AMD */ + pflash_cfi02_register(0xe2000000, "zynq.pflash", FLASH_SIZE, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + FLASH_SECTOR_SIZE, 1, + 1, 0x0066, 0x0022, 0x0000, 0x0000, 0x0555, 0x2aa, + 0); + + /* Create the main clock source, and feed slcr with it */ + zynq_machine->ps_clk = CLOCK(object_new(TYPE_CLOCK)); + object_property_add_child(OBJECT(zynq_machine), "ps_clk", + OBJECT(zynq_machine->ps_clk)); + object_unref(OBJECT(zynq_machine->ps_clk)); + clock_set_hz(zynq_machine->ps_clk, PS_CLK_FREQUENCY); + + /* Create slcr, keep a pointer to connect clocks */ + slcr = qdev_new("xilinx-zynq_slcr"); + qdev_connect_clock_in(slcr, "ps_clk", zynq_machine->ps_clk); + sysbus_realize_and_unref(SYS_BUS_DEVICE(slcr), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(slcr), 0, 0xF8000000); + + dev = qdev_new(TYPE_A9MPCORE_PRIV); + qdev_prop_set_uint32(dev, "num-cpu", 1); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE); + sysbus_connect_irq(busdev, 0, + qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ)); + + for (n = 0; n < 64; n++) { + pic[n] = qdev_get_gpio_in(dev, n); + } + + zynq_init_spi_flashes(0xE0006000, pic[58-IRQ_OFFSET], false); + zynq_init_spi_flashes(0xE0007000, pic[81-IRQ_OFFSET], false); + zynq_init_spi_flashes(0xE000D000, pic[51-IRQ_OFFSET], true); + + sysbus_create_simple(TYPE_CHIPIDEA, 0xE0002000, pic[53 - IRQ_OFFSET]); + sysbus_create_simple(TYPE_CHIPIDEA, 0xE0003000, pic[76 - IRQ_OFFSET]); + + dev = qdev_new(TYPE_CADENCE_UART); + busdev = SYS_BUS_DEVICE(dev); + qdev_prop_set_chr(dev, "chardev", serial_hd(0)); + qdev_connect_clock_in(dev, "refclk", + qdev_get_clock_out(slcr, "uart0_ref_clk")); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0xE0000000); + sysbus_connect_irq(busdev, 0, pic[59 - IRQ_OFFSET]); + dev = qdev_new(TYPE_CADENCE_UART); + busdev = SYS_BUS_DEVICE(dev); + qdev_prop_set_chr(dev, "chardev", serial_hd(1)); + qdev_connect_clock_in(dev, "refclk", + qdev_get_clock_out(slcr, "uart1_ref_clk")); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0xE0001000); + sysbus_connect_irq(busdev, 0, pic[82 - IRQ_OFFSET]); + + sysbus_create_varargs("cadence_ttc", 0xF8001000, + pic[42-IRQ_OFFSET], pic[43-IRQ_OFFSET], pic[44-IRQ_OFFSET], NULL); + sysbus_create_varargs("cadence_ttc", 0xF8002000, + pic[69-IRQ_OFFSET], pic[70-IRQ_OFFSET], pic[71-IRQ_OFFSET], NULL); + + gem_init(&nd_table[0], 0xE000B000, pic[54-IRQ_OFFSET]); + gem_init(&nd_table[1], 0xE000C000, pic[77-IRQ_OFFSET]); + + for (n = 0; n < 2; n++) { + int hci_irq = n ? 79 : 56; + hwaddr hci_addr = n ? 0xE0101000 : 0xE0100000; + DriveInfo *di; + BlockBackend *blk; + DeviceState *carddev; + + /* Compatible with: + * - SD Host Controller Specification Version 2.0 Part A2 + * - SDIO Specification Version 2.0 + * - MMC Specification Version 3.31 + */ + dev = qdev_new(TYPE_SYSBUS_SDHCI); + qdev_prop_set_uint8(dev, "sd-spec-version", 2); + qdev_prop_set_uint64(dev, "capareg", ZYNQ_SDHCI_CAPABILITIES); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, hci_addr); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[hci_irq - IRQ_OFFSET]); + + di = drive_get_next(IF_SD); + blk = di ? blk_by_legacy_dinfo(di) : NULL; + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, qdev_get_child_bus(dev, "sd-bus"), + &error_fatal); + } + + dev = qdev_new(TYPE_ZYNQ_XADC); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xF8007100); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[39-IRQ_OFFSET]); + + dev = qdev_new("pl330"); + object_property_set_link(OBJECT(dev), "memory", + OBJECT(address_space_mem), + &error_fatal); + qdev_prop_set_uint8(dev, "num_chnls", 8); + qdev_prop_set_uint8(dev, "num_periph_req", 4); + qdev_prop_set_uint8(dev, "num_events", 16); + + qdev_prop_set_uint8(dev, "data_width", 64); + qdev_prop_set_uint8(dev, "wr_cap", 8); + qdev_prop_set_uint8(dev, "wr_q_dep", 16); + qdev_prop_set_uint8(dev, "rd_cap", 8); + qdev_prop_set_uint8(dev, "rd_q_dep", 16); + qdev_prop_set_uint16(dev, "data_buffer_dep", 256); + + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0xF8003000); + sysbus_connect_irq(busdev, 0, pic[45-IRQ_OFFSET]); /* abort irq line */ + for (n = 0; n < ARRAY_SIZE(dma_irqs); ++n) { /* event irqs */ + sysbus_connect_irq(busdev, n + 1, pic[dma_irqs[n] - IRQ_OFFSET]); + } + + dev = qdev_new("xlnx.ps7-dev-cfg"); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_connect_irq(busdev, 0, pic[40 - IRQ_OFFSET]); + sysbus_mmio_map(busdev, 0, 0xF8007000); + + zynq_binfo.ram_size = machine->ram_size; + zynq_binfo.nb_cpus = 1; + zynq_binfo.board_id = 0xd32; + zynq_binfo.loader_start = 0; + zynq_binfo.board_setup_addr = BOARD_SETUP_ADDR; + zynq_binfo.write_board_setup = zynq_write_board_setup; + + arm_load_kernel(ARM_CPU(first_cpu), machine, &zynq_binfo); +} + +static void zynq_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + mc->desc = "Xilinx Zynq Platform Baseboard for Cortex-A9"; + mc->init = zynq_init; + mc->max_cpus = 1; + mc->no_sdcard = 1; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9"); + mc->default_ram_id = "zynq.ext_ram"; +} + +static const TypeInfo zynq_machine_type = { + .name = TYPE_ZYNQ_MACHINE, + .parent = TYPE_MACHINE, + .class_init = zynq_machine_class_init, + .instance_size = sizeof(ZynqMachineState), +}; + +static void zynq_machine_register_types(void) +{ + type_register_static(&zynq_machine_type); +} + +type_init(zynq_machine_register_types) diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c new file mode 100644 index 000000000..d2f55e29b --- /dev/null +++ b/hw/arm/xlnx-versal-virt.c @@ -0,0 +1,726 @@ +/* + * Xilinx Versal Virtual board. + * + * Copyright (c) 2018 Xilinx Inc. + * Written by Edgar E. Iglesias + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "sysemu/device_tree.h" +#include "hw/boards.h" +#include "hw/sysbus.h" +#include "hw/arm/sysbus-fdt.h" +#include "hw/arm/fdt.h" +#include "cpu.h" +#include "hw/qdev-properties.h" +#include "hw/arm/xlnx-versal.h" +#include "qom/object.h" + +#define TYPE_XLNX_VERSAL_VIRT_MACHINE MACHINE_TYPE_NAME("xlnx-versal-virt") +OBJECT_DECLARE_SIMPLE_TYPE(VersalVirt, XLNX_VERSAL_VIRT_MACHINE) + +struct VersalVirt { + MachineState parent_obj; + + Versal soc; + + void *fdt; + int fdt_size; + struct { + uint32_t gic; + uint32_t ethernet_phy[2]; + uint32_t clk_125Mhz; + uint32_t clk_25Mhz; + uint32_t usb; + uint32_t dwc; + } phandle; + struct arm_boot_info binfo; + + struct { + bool secure; + } cfg; +}; + +static void fdt_create(VersalVirt *s) +{ + MachineClass *mc = MACHINE_GET_CLASS(s); + int i; + + s->fdt = create_device_tree(&s->fdt_size); + if (!s->fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + /* Allocate all phandles. */ + s->phandle.gic = qemu_fdt_alloc_phandle(s->fdt); + for (i = 0; i < ARRAY_SIZE(s->phandle.ethernet_phy); i++) { + s->phandle.ethernet_phy[i] = qemu_fdt_alloc_phandle(s->fdt); + } + s->phandle.clk_25Mhz = qemu_fdt_alloc_phandle(s->fdt); + s->phandle.clk_125Mhz = qemu_fdt_alloc_phandle(s->fdt); + + s->phandle.usb = qemu_fdt_alloc_phandle(s->fdt); + s->phandle.dwc = qemu_fdt_alloc_phandle(s->fdt); + /* Create /chosen node for load_dtb. */ + qemu_fdt_add_subnode(s->fdt, "/chosen"); + + /* Header */ + qemu_fdt_setprop_cell(s->fdt, "/", "interrupt-parent", s->phandle.gic); + qemu_fdt_setprop_cell(s->fdt, "/", "#size-cells", 0x2); + qemu_fdt_setprop_cell(s->fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_string(s->fdt, "/", "model", mc->desc); + qemu_fdt_setprop_string(s->fdt, "/", "compatible", "xlnx-versal-virt"); +} + +static void fdt_add_clk_node(VersalVirt *s, const char *name, + unsigned int freq_hz, uint32_t phandle) +{ + qemu_fdt_add_subnode(s->fdt, name); + qemu_fdt_setprop_cell(s->fdt, name, "phandle", phandle); + qemu_fdt_setprop_cell(s->fdt, name, "clock-frequency", freq_hz); + qemu_fdt_setprop_cell(s->fdt, name, "#clock-cells", 0x0); + qemu_fdt_setprop_string(s->fdt, name, "compatible", "fixed-clock"); + qemu_fdt_setprop(s->fdt, name, "u-boot,dm-pre-reloc", NULL, 0); +} + +static void fdt_add_cpu_nodes(VersalVirt *s, uint32_t psci_conduit) +{ + int i; + + qemu_fdt_add_subnode(s->fdt, "/cpus"); + qemu_fdt_setprop_cell(s->fdt, "/cpus", "#size-cells", 0x0); + qemu_fdt_setprop_cell(s->fdt, "/cpus", "#address-cells", 1); + + for (i = XLNX_VERSAL_NR_ACPUS - 1; i >= 0; i--) { + char *name = g_strdup_printf("/cpus/cpu@%d", i); + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i)); + + qemu_fdt_add_subnode(s->fdt, name); + qemu_fdt_setprop_cell(s->fdt, name, "reg", armcpu->mp_affinity); + if (psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) { + qemu_fdt_setprop_string(s->fdt, name, "enable-method", "psci"); + } + qemu_fdt_setprop_string(s->fdt, name, "device_type", "cpu"); + qemu_fdt_setprop_string(s->fdt, name, "compatible", + armcpu->dtb_compatible); + g_free(name); + } +} + +static void fdt_add_gic_nodes(VersalVirt *s) +{ + char *nodename; + + nodename = g_strdup_printf("/gic@%x", MM_GIC_APU_DIST_MAIN); + qemu_fdt_add_subnode(s->fdt, nodename); + qemu_fdt_setprop_cell(s->fdt, nodename, "phandle", s->phandle.gic); + qemu_fdt_setprop_cells(s->fdt, nodename, "interrupts", + GIC_FDT_IRQ_TYPE_PPI, VERSAL_GIC_MAINT_IRQ, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_sized_cells(s->fdt, nodename, "reg", + 2, MM_GIC_APU_DIST_MAIN, + 2, MM_GIC_APU_DIST_MAIN_SIZE, + 2, MM_GIC_APU_REDIST_0, + 2, MM_GIC_APU_REDIST_0_SIZE); + qemu_fdt_setprop_cell(s->fdt, nodename, "#interrupt-cells", 3); + qemu_fdt_setprop_string(s->fdt, nodename, "compatible", "arm,gic-v3"); + g_free(nodename); +} + +static void fdt_add_timer_nodes(VersalVirt *s) +{ + const char compat[] = "arm,armv8-timer"; + uint32_t irqflags = GIC_FDT_IRQ_FLAGS_LEVEL_HI; + + qemu_fdt_add_subnode(s->fdt, "/timer"); + qemu_fdt_setprop_cells(s->fdt, "/timer", "interrupts", + GIC_FDT_IRQ_TYPE_PPI, VERSAL_TIMER_S_EL1_IRQ, irqflags, + GIC_FDT_IRQ_TYPE_PPI, VERSAL_TIMER_NS_EL1_IRQ, irqflags, + GIC_FDT_IRQ_TYPE_PPI, VERSAL_TIMER_VIRT_IRQ, irqflags, + GIC_FDT_IRQ_TYPE_PPI, VERSAL_TIMER_NS_EL2_IRQ, irqflags); + qemu_fdt_setprop(s->fdt, "/timer", "compatible", + compat, sizeof(compat)); +} + +static void fdt_add_usb_xhci_nodes(VersalVirt *s) +{ + const char clocknames[] = "bus_clk\0ref_clk"; + const char irq_name[] = "dwc_usb3"; + const char compatVersalDWC3[] = "xlnx,versal-dwc3"; + const char compatDWC3[] = "snps,dwc3"; + char *name = g_strdup_printf("/usb@%" PRIx32, MM_USB2_CTRL_REGS); + + qemu_fdt_add_subnode(s->fdt, name); + qemu_fdt_setprop(s->fdt, name, "compatible", + compatVersalDWC3, sizeof(compatVersalDWC3)); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_USB2_CTRL_REGS, + 2, MM_USB2_CTRL_REGS_SIZE); + qemu_fdt_setprop(s->fdt, name, "clock-names", + clocknames, sizeof(clocknames)); + qemu_fdt_setprop_cells(s->fdt, name, "clocks", + s->phandle.clk_25Mhz, s->phandle.clk_125Mhz); + qemu_fdt_setprop(s->fdt, name, "ranges", NULL, 0); + qemu_fdt_setprop_cell(s->fdt, name, "#address-cells", 2); + qemu_fdt_setprop_cell(s->fdt, name, "#size-cells", 2); + qemu_fdt_setprop_cell(s->fdt, name, "phandle", s->phandle.usb); + g_free(name); + + name = g_strdup_printf("/usb@%" PRIx32 "/dwc3@%" PRIx32, + MM_USB2_CTRL_REGS, MM_USB_0); + qemu_fdt_add_subnode(s->fdt, name); + qemu_fdt_setprop(s->fdt, name, "compatible", + compatDWC3, sizeof(compatDWC3)); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_USB_0, 2, MM_USB_0_SIZE); + qemu_fdt_setprop(s->fdt, name, "interrupt-names", + irq_name, sizeof(irq_name)); + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, VERSAL_USB0_IRQ_0, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_cell(s->fdt, name, + "snps,quirk-frame-length-adjustment", 0x20); + qemu_fdt_setprop_cells(s->fdt, name, "#stream-id-cells", 1); + qemu_fdt_setprop_string(s->fdt, name, "dr_mode", "host"); + qemu_fdt_setprop_string(s->fdt, name, "phy-names", "usb3-phy"); + qemu_fdt_setprop(s->fdt, name, "snps,dis_u2_susphy_quirk", NULL, 0); + qemu_fdt_setprop(s->fdt, name, "snps,dis_u3_susphy_quirk", NULL, 0); + qemu_fdt_setprop(s->fdt, name, "snps,refclk_fladj", NULL, 0); + qemu_fdt_setprop(s->fdt, name, "snps,mask_phy_reset", NULL, 0); + qemu_fdt_setprop_cell(s->fdt, name, "phandle", s->phandle.dwc); + qemu_fdt_setprop_string(s->fdt, name, "maximum-speed", "high-speed"); + g_free(name); +} + +static void fdt_add_uart_nodes(VersalVirt *s) +{ + uint64_t addrs[] = { MM_UART1, MM_UART0 }; + unsigned int irqs[] = { VERSAL_UART1_IRQ_0, VERSAL_UART0_IRQ_0 }; + const char compat[] = "arm,pl011\0arm,sbsa-uart"; + const char clocknames[] = "uartclk\0apb_pclk"; + int i; + + for (i = 0; i < ARRAY_SIZE(addrs); i++) { + char *name = g_strdup_printf("/uart@%" PRIx64, addrs[i]); + qemu_fdt_add_subnode(s->fdt, name); + qemu_fdt_setprop_cell(s->fdt, name, "current-speed", 115200); + qemu_fdt_setprop_cells(s->fdt, name, "clocks", + s->phandle.clk_125Mhz, s->phandle.clk_125Mhz); + qemu_fdt_setprop(s->fdt, name, "clock-names", + clocknames, sizeof(clocknames)); + + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irqs[i], + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, addrs[i], 2, 0x1000); + qemu_fdt_setprop(s->fdt, name, "compatible", + compat, sizeof(compat)); + qemu_fdt_setprop(s->fdt, name, "u-boot,dm-pre-reloc", NULL, 0); + + if (addrs[i] == MM_UART0) { + /* Select UART0. */ + qemu_fdt_setprop_string(s->fdt, "/chosen", "stdout-path", name); + } + g_free(name); + } +} + +static void fdt_add_fixed_link_nodes(VersalVirt *s, char *gemname, + uint32_t phandle) +{ + char *name = g_strdup_printf("%s/fixed-link", gemname); + + qemu_fdt_add_subnode(s->fdt, name); + qemu_fdt_setprop_cell(s->fdt, name, "phandle", phandle); + qemu_fdt_setprop(s->fdt, name, "full-duplex", NULL, 0); + qemu_fdt_setprop_cell(s->fdt, name, "speed", 1000); + g_free(name); +} + +static void fdt_add_gem_nodes(VersalVirt *s) +{ + uint64_t addrs[] = { MM_GEM1, MM_GEM0 }; + unsigned int irqs[] = { VERSAL_GEM1_IRQ_0, VERSAL_GEM0_IRQ_0 }; + const char clocknames[] = "pclk\0hclk\0tx_clk\0rx_clk"; + const char compat_gem[] = "cdns,zynqmp-gem\0cdns,gem"; + int i; + + for (i = 0; i < ARRAY_SIZE(addrs); i++) { + char *name = g_strdup_printf("/ethernet@%" PRIx64, addrs[i]); + qemu_fdt_add_subnode(s->fdt, name); + + fdt_add_fixed_link_nodes(s, name, s->phandle.ethernet_phy[i]); + qemu_fdt_setprop_string(s->fdt, name, "phy-mode", "rgmii-id"); + qemu_fdt_setprop_cell(s->fdt, name, "phy-handle", + s->phandle.ethernet_phy[i]); + qemu_fdt_setprop_cells(s->fdt, name, "clocks", + s->phandle.clk_25Mhz, s->phandle.clk_25Mhz, + s->phandle.clk_125Mhz, s->phandle.clk_125Mhz); + qemu_fdt_setprop(s->fdt, name, "clock-names", + clocknames, sizeof(clocknames)); + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irqs[i], + GIC_FDT_IRQ_FLAGS_LEVEL_HI, + GIC_FDT_IRQ_TYPE_SPI, irqs[i], + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, addrs[i], 2, 0x1000); + qemu_fdt_setprop(s->fdt, name, "compatible", + compat_gem, sizeof(compat_gem)); + qemu_fdt_setprop_cell(s->fdt, name, "#address-cells", 1); + qemu_fdt_setprop_cell(s->fdt, name, "#size-cells", 0); + g_free(name); + } +} + +static void fdt_add_zdma_nodes(VersalVirt *s) +{ + const char clocknames[] = "clk_main\0clk_apb"; + const char compat[] = "xlnx,zynqmp-dma-1.0"; + int i; + + for (i = XLNX_VERSAL_NR_ADMAS - 1; i >= 0; i--) { + uint64_t addr = MM_ADMA_CH0 + MM_ADMA_CH0_SIZE * i; + char *name = g_strdup_printf("/dma@%" PRIx64, addr); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_cell(s->fdt, name, "xlnx,bus-width", 64); + qemu_fdt_setprop_cells(s->fdt, name, "clocks", + s->phandle.clk_25Mhz, s->phandle.clk_25Mhz); + qemu_fdt_setprop(s->fdt, name, "clock-names", + clocknames, sizeof(clocknames)); + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, VERSAL_ADMA_IRQ_0 + i, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, addr, 2, 0x1000); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); + } +} + +static void fdt_add_sd_nodes(VersalVirt *s) +{ + const char clocknames[] = "clk_xin\0clk_ahb"; + const char compat[] = "arasan,sdhci-8.9a"; + int i; + + for (i = ARRAY_SIZE(s->soc.pmc.iou.sd) - 1; i >= 0; i--) { + uint64_t addr = MM_PMC_SD0 + MM_PMC_SD0_SIZE * i; + char *name = g_strdup_printf("/sdhci@%" PRIx64, addr); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_cells(s->fdt, name, "clocks", + s->phandle.clk_25Mhz, s->phandle.clk_25Mhz); + qemu_fdt_setprop(s->fdt, name, "clock-names", + clocknames, sizeof(clocknames)); + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, VERSAL_SD0_IRQ_0 + i * 2, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, addr, 2, MM_PMC_SD0_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); + } +} + +static void fdt_add_rtc_node(VersalVirt *s) +{ + const char compat[] = "xlnx,zynqmp-rtc"; + const char interrupt_names[] = "alarm\0sec"; + char *name = g_strdup_printf("/rtc@%x", MM_PMC_RTC); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, VERSAL_RTC_ALARM_IRQ, + GIC_FDT_IRQ_FLAGS_LEVEL_HI, + GIC_FDT_IRQ_TYPE_SPI, VERSAL_RTC_SECONDS_IRQ, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->fdt, name, "interrupt-names", + interrupt_names, sizeof(interrupt_names)); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_PMC_RTC, 2, MM_PMC_RTC_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); +} + +static void fdt_add_bbram_node(VersalVirt *s) +{ + const char compat[] = TYPE_XLNX_BBRAM; + const char interrupt_names[] = "bbram-error"; + char *name = g_strdup_printf("/bbram@%x", MM_PMC_BBRAM_CTRL); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, VERSAL_BBRAM_APB_IRQ_0, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->fdt, name, "interrupt-names", + interrupt_names, sizeof(interrupt_names)); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_PMC_BBRAM_CTRL, + 2, MM_PMC_BBRAM_CTRL_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); +} + +static void fdt_add_efuse_ctrl_node(VersalVirt *s) +{ + const char compat[] = TYPE_XLNX_VERSAL_EFUSE_CTRL; + const char interrupt_names[] = "pmc_efuse"; + char *name = g_strdup_printf("/pmc_efuse@%x", MM_PMC_EFUSE_CTRL); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, VERSAL_EFUSE_IRQ, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->fdt, name, "interrupt-names", + interrupt_names, sizeof(interrupt_names)); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_PMC_EFUSE_CTRL, + 2, MM_PMC_EFUSE_CTRL_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); +} + +static void fdt_add_efuse_cache_node(VersalVirt *s) +{ + const char compat[] = TYPE_XLNX_VERSAL_EFUSE_CACHE; + char *name = g_strdup_printf("/xlnx_pmc_efuse_cache@%x", + MM_PMC_EFUSE_CACHE); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_PMC_EFUSE_CACHE, + 2, MM_PMC_EFUSE_CACHE_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); +} + +static void fdt_nop_memory_nodes(void *fdt, Error **errp) +{ + Error *err = NULL; + char **node_path; + int n = 0; + + node_path = qemu_fdt_node_unit_path(fdt, "memory", &err); + if (err) { + error_propagate(errp, err); + return; + } + while (node_path[n]) { + if (g_str_has_prefix(node_path[n], "/memory")) { + qemu_fdt_nop_node(fdt, node_path[n]); + } + n++; + } + g_strfreev(node_path); +} + +static void fdt_add_memory_nodes(VersalVirt *s, void *fdt, uint64_t ram_size) +{ + /* Describes the various split DDR access regions. */ + static const struct { + uint64_t base; + uint64_t size; + } addr_ranges[] = { + { MM_TOP_DDR, MM_TOP_DDR_SIZE }, + { MM_TOP_DDR_2, MM_TOP_DDR_2_SIZE }, + { MM_TOP_DDR_3, MM_TOP_DDR_3_SIZE }, + { MM_TOP_DDR_4, MM_TOP_DDR_4_SIZE } + }; + uint64_t mem_reg_prop[8] = {0}; + uint64_t size = ram_size; + Error *err = NULL; + char *name; + int i; + + fdt_nop_memory_nodes(fdt, &err); + if (err) { + error_report_err(err); + return; + } + + name = g_strdup_printf("/memory@%x", MM_TOP_DDR); + for (i = 0; i < ARRAY_SIZE(addr_ranges) && size; i++) { + uint64_t mapsize; + + mapsize = size < addr_ranges[i].size ? size : addr_ranges[i].size; + + mem_reg_prop[i * 2] = addr_ranges[i].base; + mem_reg_prop[i * 2 + 1] = mapsize; + size -= mapsize; + } + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "device_type", "memory"); + + switch (i) { + case 1: + qemu_fdt_setprop_sized_cells(fdt, name, "reg", + 2, mem_reg_prop[0], + 2, mem_reg_prop[1]); + break; + case 2: + qemu_fdt_setprop_sized_cells(fdt, name, "reg", + 2, mem_reg_prop[0], + 2, mem_reg_prop[1], + 2, mem_reg_prop[2], + 2, mem_reg_prop[3]); + break; + case 3: + qemu_fdt_setprop_sized_cells(fdt, name, "reg", + 2, mem_reg_prop[0], + 2, mem_reg_prop[1], + 2, mem_reg_prop[2], + 2, mem_reg_prop[3], + 2, mem_reg_prop[4], + 2, mem_reg_prop[5]); + break; + case 4: + qemu_fdt_setprop_sized_cells(fdt, name, "reg", + 2, mem_reg_prop[0], + 2, mem_reg_prop[1], + 2, mem_reg_prop[2], + 2, mem_reg_prop[3], + 2, mem_reg_prop[4], + 2, mem_reg_prop[5], + 2, mem_reg_prop[6], + 2, mem_reg_prop[7]); + break; + default: + g_assert_not_reached(); + } + g_free(name); +} + +static void versal_virt_modify_dtb(const struct arm_boot_info *binfo, + void *fdt) +{ + VersalVirt *s = container_of(binfo, VersalVirt, binfo); + + fdt_add_memory_nodes(s, fdt, binfo->ram_size); +} + +static void *versal_virt_get_dtb(const struct arm_boot_info *binfo, + int *fdt_size) +{ + const VersalVirt *board = container_of(binfo, VersalVirt, binfo); + + *fdt_size = board->fdt_size; + return board->fdt; +} + +#define NUM_VIRTIO_TRANSPORT 8 +static void create_virtio_regions(VersalVirt *s) +{ + int virtio_mmio_size = 0x200; + int i; + + for (i = 0; i < NUM_VIRTIO_TRANSPORT; i++) { + char *name = g_strdup_printf("virtio%d", i); + hwaddr base = MM_TOP_RSVD + i * virtio_mmio_size; + int irq = VERSAL_RSVD_IRQ_FIRST + i; + MemoryRegion *mr; + DeviceState *dev; + qemu_irq pic_irq; + + pic_irq = qdev_get_gpio_in(DEVICE(&s->soc.fpd.apu.gic), irq); + dev = qdev_new("virtio-mmio"); + object_property_add_child(OBJECT(&s->soc), name, OBJECT(dev)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic_irq); + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(&s->soc.mr_ps, base, mr); + g_free(name); + } + + for (i = 0; i < NUM_VIRTIO_TRANSPORT; i++) { + hwaddr base = MM_TOP_RSVD + i * virtio_mmio_size; + int irq = VERSAL_RSVD_IRQ_FIRST + i; + char *name = g_strdup_printf("/virtio_mmio@%" PRIx64, base); + + qemu_fdt_add_subnode(s->fdt, name); + qemu_fdt_setprop(s->fdt, name, "dma-coherent", NULL, 0); + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irq, + GIC_FDT_IRQ_FLAGS_EDGE_LO_HI); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, base, 2, virtio_mmio_size); + qemu_fdt_setprop_string(s->fdt, name, "compatible", "virtio,mmio"); + g_free(name); + } +} + +static void bbram_attach_drive(XlnxBBRam *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 0); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + +static void efuse_attach_drive(XlnxEFuse *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 1); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + +static void sd_plugin_card(SDHCIState *sd, DriveInfo *di) +{ + BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; + DeviceState *card; + + card = qdev_new(TYPE_SD_CARD); + object_property_add_child(OBJECT(sd), "card[*]", OBJECT(card)); + qdev_prop_set_drive_err(card, "drive", blk, &error_fatal); + qdev_realize_and_unref(card, qdev_get_child_bus(DEVICE(sd), "sd-bus"), + &error_fatal); +} + +static void versal_virt_init(MachineState *machine) +{ + VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(machine); + int psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; + int i; + + /* + * If the user provides an Operating System to be loaded, we expect them + * to use the -kernel command line option. + * + * Users can load firmware or boot-loaders with the -device loader options. + * + * When loading an OS, we generate a dtb and let arm_load_kernel() select + * where it gets loaded. This dtb will be passed to the kernel in x0. + * + * If there's no -kernel option, we generate a DTB and place it at 0x1000 + * for the bootloaders or firmware to pick up. + * + * If users want to provide their own DTB, they can use the -dtb option. + * These dtb's will have their memory nodes modified to match QEMU's + * selected ram_size option before they get passed to the kernel or fw. + * + * When loading an OS, we turn on QEMU's PSCI implementation with SMC + * as the PSCI conduit. When there's no -kernel, we assume the user + * provides EL3 firmware to handle PSCI. + */ + if (machine->kernel_filename) { + psci_conduit = QEMU_PSCI_CONDUIT_SMC; + } + + object_initialize_child(OBJECT(machine), "xlnx-versal", &s->soc, + TYPE_XLNX_VERSAL); + object_property_set_link(OBJECT(&s->soc), "ddr", OBJECT(machine->ram), + &error_abort); + object_property_set_int(OBJECT(&s->soc), "psci-conduit", psci_conduit, + &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->soc), &error_fatal); + + fdt_create(s); + create_virtio_regions(s); + fdt_add_gem_nodes(s); + fdt_add_uart_nodes(s); + fdt_add_gic_nodes(s); + fdt_add_timer_nodes(s); + fdt_add_zdma_nodes(s); + fdt_add_usb_xhci_nodes(s); + fdt_add_sd_nodes(s); + fdt_add_rtc_node(s); + fdt_add_bbram_node(s); + fdt_add_efuse_ctrl_node(s); + fdt_add_efuse_cache_node(s); + fdt_add_cpu_nodes(s, psci_conduit); + fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz); + fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz); + + /* Make the APU cpu address space visible to virtio and other + * modules unaware of muliple address-spaces. */ + memory_region_add_subregion_overlap(get_system_memory(), + 0, &s->soc.fpd.apu.mr, 0); + + /* Attach bbram backend, if given */ + bbram_attach_drive(&s->soc.pmc.bbram); + + /* Attach efuse backend, if given */ + efuse_attach_drive(&s->soc.pmc.efuse); + + /* Plugin SD cards. */ + for (i = 0; i < ARRAY_SIZE(s->soc.pmc.iou.sd); i++) { + sd_plugin_card(&s->soc.pmc.iou.sd[i], drive_get_next(IF_SD)); + } + + s->binfo.ram_size = machine->ram_size; + s->binfo.loader_start = 0x0; + s->binfo.get_dtb = versal_virt_get_dtb; + s->binfo.modify_dtb = versal_virt_modify_dtb; + if (machine->kernel_filename) { + arm_load_kernel(&s->soc.fpd.apu.cpu[0], machine, &s->binfo); + } else { + AddressSpace *as = arm_boot_address_space(&s->soc.fpd.apu.cpu[0], + &s->binfo); + /* Some boot-loaders (e.g u-boot) don't like blobs at address 0 (NULL). + * Offset things by 4K. */ + s->binfo.loader_start = 0x1000; + s->binfo.dtb_limit = 0x1000000; + if (arm_load_dtb(s->binfo.loader_start, + &s->binfo, s->binfo.dtb_limit, as, machine) < 0) { + exit(EXIT_FAILURE); + } + } +} + +static void versal_virt_machine_instance_init(Object *obj) +{ +} + +static void versal_virt_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Xilinx Versal Virtual development board"; + mc->init = versal_virt_init; + mc->min_cpus = XLNX_VERSAL_NR_ACPUS; + mc->max_cpus = XLNX_VERSAL_NR_ACPUS; + mc->default_cpus = XLNX_VERSAL_NR_ACPUS; + mc->no_cdrom = true; + mc->default_ram_id = "ddr"; +} + +static const TypeInfo versal_virt_machine_init_typeinfo = { + .name = TYPE_XLNX_VERSAL_VIRT_MACHINE, + .parent = TYPE_MACHINE, + .class_init = versal_virt_machine_class_init, + .instance_init = versal_virt_machine_instance_init, + .instance_size = sizeof(VersalVirt), +}; + +static void versal_virt_machine_init_register_types(void) +{ + type_register_static(&versal_virt_machine_init_typeinfo); +} + +type_init(versal_virt_machine_init_register_types) + diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c new file mode 100644 index 000000000..b2705b692 --- /dev/null +++ b/hw/arm/xlnx-versal.c @@ -0,0 +1,510 @@ +/* + * Xilinx Versal SoC model. + * + * Copyright (c) 2018 Xilinx Inc. + * Written by Edgar E. Iglesias + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "net/net.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "hw/arm/boot.h" +#include "kvm_arm.h" +#include "hw/misc/unimp.h" +#include "hw/arm/xlnx-versal.h" + +#define XLNX_VERSAL_ACPU_TYPE ARM_CPU_TYPE_NAME("cortex-a72") +#define GEM_REVISION 0x40070106 + +static void versal_create_apu_cpus(Versal *s) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(s->fpd.apu.cpu); i++) { + Object *obj; + + object_initialize_child(OBJECT(s), "apu-cpu[*]", &s->fpd.apu.cpu[i], + XLNX_VERSAL_ACPU_TYPE); + obj = OBJECT(&s->fpd.apu.cpu[i]); + object_property_set_int(obj, "psci-conduit", s->cfg.psci_conduit, + &error_abort); + if (i) { + /* Secondary CPUs start in PSCI powered-down state */ + object_property_set_bool(obj, "start-powered-off", true, + &error_abort); + } + + object_property_set_int(obj, "core-count", ARRAY_SIZE(s->fpd.apu.cpu), + &error_abort); + object_property_set_link(obj, "memory", OBJECT(&s->fpd.apu.mr), + &error_abort); + qdev_realize(DEVICE(obj), NULL, &error_fatal); + } +} + +static void versal_create_apu_gic(Versal *s, qemu_irq *pic) +{ + static const uint64_t addrs[] = { + MM_GIC_APU_DIST_MAIN, + MM_GIC_APU_REDIST_0 + }; + SysBusDevice *gicbusdev; + DeviceState *gicdev; + int nr_apu_cpus = ARRAY_SIZE(s->fpd.apu.cpu); + int i; + + object_initialize_child(OBJECT(s), "apu-gic", &s->fpd.apu.gic, + gicv3_class_name()); + gicbusdev = SYS_BUS_DEVICE(&s->fpd.apu.gic); + gicdev = DEVICE(&s->fpd.apu.gic); + qdev_prop_set_uint32(gicdev, "revision", 3); + qdev_prop_set_uint32(gicdev, "num-cpu", nr_apu_cpus); + qdev_prop_set_uint32(gicdev, "num-irq", XLNX_VERSAL_NR_IRQS + 32); + qdev_prop_set_uint32(gicdev, "len-redist-region-count", 1); + qdev_prop_set_uint32(gicdev, "redist-region-count[0]", nr_apu_cpus); + qdev_prop_set_bit(gicdev, "has-security-extensions", true); + + sysbus_realize(SYS_BUS_DEVICE(&s->fpd.apu.gic), &error_fatal); + + for (i = 0; i < ARRAY_SIZE(addrs); i++) { + MemoryRegion *mr; + + mr = sysbus_mmio_get_region(gicbusdev, i); + memory_region_add_subregion(&s->fpd.apu.mr, addrs[i], mr); + } + + for (i = 0; i < nr_apu_cpus; i++) { + DeviceState *cpudev = DEVICE(&s->fpd.apu.cpu[i]); + int ppibase = XLNX_VERSAL_NR_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; + qemu_irq maint_irq; + int ti; + /* Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs. + */ + const int timer_irq[] = { + [GTIMER_PHYS] = VERSAL_TIMER_NS_EL1_IRQ, + [GTIMER_VIRT] = VERSAL_TIMER_VIRT_IRQ, + [GTIMER_HYP] = VERSAL_TIMER_NS_EL2_IRQ, + [GTIMER_SEC] = VERSAL_TIMER_S_EL1_IRQ, + }; + + for (ti = 0; ti < ARRAY_SIZE(timer_irq); ti++) { + qdev_connect_gpio_out(cpudev, ti, + qdev_get_gpio_in(gicdev, + ppibase + timer_irq[ti])); + } + maint_irq = qdev_get_gpio_in(gicdev, + ppibase + VERSAL_GIC_MAINT_IRQ); + qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", + 0, maint_irq); + sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(gicbusdev, i + nr_apu_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(gicbusdev, i + 2 * nr_apu_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(gicbusdev, i + 3 * nr_apu_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + } + + for (i = 0; i < XLNX_VERSAL_NR_IRQS; i++) { + pic[i] = qdev_get_gpio_in(gicdev, i); + } +} + +static void versal_create_uarts(Versal *s, qemu_irq *pic) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(s->lpd.iou.uart); i++) { + static const int irqs[] = { VERSAL_UART0_IRQ_0, VERSAL_UART1_IRQ_0}; + static const uint64_t addrs[] = { MM_UART0, MM_UART1 }; + char *name = g_strdup_printf("uart%d", i); + DeviceState *dev; + MemoryRegion *mr; + + object_initialize_child(OBJECT(s), name, &s->lpd.iou.uart[i], + TYPE_PL011); + dev = DEVICE(&s->lpd.iou.uart[i]); + qdev_prop_set_chr(dev, "chardev", serial_hd(i)); + sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(&s->mr_ps, addrs[i], mr); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]); + g_free(name); + } +} + +static void versal_create_usbs(Versal *s, qemu_irq *pic) +{ + DeviceState *dev; + MemoryRegion *mr; + + object_initialize_child(OBJECT(s), "usb2", &s->lpd.iou.usb, + TYPE_XILINX_VERSAL_USB2); + dev = DEVICE(&s->lpd.iou.usb); + + object_property_set_link(OBJECT(dev), "dma", OBJECT(&s->mr_ps), + &error_abort); + qdev_prop_set_uint32(dev, "intrs", 1); + qdev_prop_set_uint32(dev, "slots", 2); + + sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(&s->mr_ps, MM_USB_0, mr); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[VERSAL_USB0_IRQ_0]); + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_add_subregion(&s->mr_ps, MM_USB2_CTRL_REGS, mr); +} + +static void versal_create_gems(Versal *s, qemu_irq *pic) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(s->lpd.iou.gem); i++) { + static const int irqs[] = { VERSAL_GEM0_IRQ_0, VERSAL_GEM1_IRQ_0}; + static const uint64_t addrs[] = { MM_GEM0, MM_GEM1 }; + char *name = g_strdup_printf("gem%d", i); + NICInfo *nd = &nd_table[i]; + DeviceState *dev; + MemoryRegion *mr; + + object_initialize_child(OBJECT(s), name, &s->lpd.iou.gem[i], + TYPE_CADENCE_GEM); + dev = DEVICE(&s->lpd.iou.gem[i]); + /* FIXME use qdev NIC properties instead of nd_table[] */ + if (nd->used) { + qemu_check_nic_model(nd, "cadence_gem"); + qdev_set_nic_properties(dev, nd); + } + object_property_set_int(OBJECT(dev), "phy-addr", 23, &error_abort); + object_property_set_int(OBJECT(dev), "num-priority-queues", 2, + &error_abort); + object_property_set_link(OBJECT(dev), "dma", OBJECT(&s->mr_ps), + &error_abort); + sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(&s->mr_ps, addrs[i], mr); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]); + g_free(name); + } +} + +static void versal_create_admas(Versal *s, qemu_irq *pic) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(s->lpd.iou.adma); i++) { + char *name = g_strdup_printf("adma%d", i); + DeviceState *dev; + MemoryRegion *mr; + + object_initialize_child(OBJECT(s), name, &s->lpd.iou.adma[i], + TYPE_XLNX_ZDMA); + dev = DEVICE(&s->lpd.iou.adma[i]); + object_property_set_int(OBJECT(dev), "bus-width", 128, &error_abort); + object_property_set_link(OBJECT(dev), "dma", + OBJECT(get_system_memory()), &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(&s->mr_ps, + MM_ADMA_CH0 + i * MM_ADMA_CH0_SIZE, mr); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[VERSAL_ADMA_IRQ_0 + i]); + g_free(name); + } +} + +#define SDHCI_CAPABILITIES 0x280737ec6481 /* Same as on ZynqMP. */ +static void versal_create_sds(Versal *s, qemu_irq *pic) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(s->pmc.iou.sd); i++) { + DeviceState *dev; + MemoryRegion *mr; + + object_initialize_child(OBJECT(s), "sd[*]", &s->pmc.iou.sd[i], + TYPE_SYSBUS_SDHCI); + dev = DEVICE(&s->pmc.iou.sd[i]); + + object_property_set_uint(OBJECT(dev), "sd-spec-version", 3, + &error_fatal); + object_property_set_uint(OBJECT(dev), "capareg", SDHCI_CAPABILITIES, + &error_fatal); + object_property_set_uint(OBJECT(dev), "uhs", UHS_I, &error_fatal); + sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(&s->mr_ps, + MM_PMC_SD0 + i * MM_PMC_SD0_SIZE, mr); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + pic[VERSAL_SD0_IRQ_0 + i * 2]); + } +} + +static void versal_create_rtc(Versal *s, qemu_irq *pic) +{ + SysBusDevice *sbd; + MemoryRegion *mr; + + object_initialize_child(OBJECT(s), "rtc", &s->pmc.rtc, + TYPE_XLNX_ZYNQMP_RTC); + sbd = SYS_BUS_DEVICE(&s->pmc.rtc); + sysbus_realize(SYS_BUS_DEVICE(sbd), &error_fatal); + + mr = sysbus_mmio_get_region(sbd, 0); + memory_region_add_subregion(&s->mr_ps, MM_PMC_RTC, mr); + + /* + * TODO: Connect the ALARM and SECONDS interrupts once our RTC model + * supports them. + */ + sysbus_connect_irq(sbd, 1, pic[VERSAL_RTC_APB_ERR_IRQ]); +} + +static void versal_create_xrams(Versal *s, qemu_irq *pic) +{ + int nr_xrams = ARRAY_SIZE(s->lpd.xram.ctrl); + DeviceState *orgate; + int i; + + /* XRAM IRQs get ORed into a single line. */ + object_initialize_child(OBJECT(s), "xram-irq-orgate", + &s->lpd.xram.irq_orgate, TYPE_OR_IRQ); + orgate = DEVICE(&s->lpd.xram.irq_orgate); + object_property_set_int(OBJECT(orgate), + "num-lines", nr_xrams, &error_fatal); + qdev_realize(orgate, NULL, &error_fatal); + qdev_connect_gpio_out(orgate, 0, pic[VERSAL_XRAM_IRQ_0]); + + for (i = 0; i < ARRAY_SIZE(s->lpd.xram.ctrl); i++) { + SysBusDevice *sbd; + MemoryRegion *mr; + + object_initialize_child(OBJECT(s), "xram[*]", &s->lpd.xram.ctrl[i], + TYPE_XLNX_XRAM_CTRL); + sbd = SYS_BUS_DEVICE(&s->lpd.xram.ctrl[i]); + sysbus_realize(sbd, &error_fatal); + + mr = sysbus_mmio_get_region(sbd, 0); + memory_region_add_subregion(&s->mr_ps, + MM_XRAMC + i * MM_XRAMC_SIZE, mr); + mr = sysbus_mmio_get_region(sbd, 1); + memory_region_add_subregion(&s->mr_ps, MM_XRAM + i * MiB, mr); + + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(orgate, i)); + } +} + +static void versal_create_bbram(Versal *s, qemu_irq *pic) +{ + SysBusDevice *sbd; + + object_initialize_child_with_props(OBJECT(s), "bbram", &s->pmc.bbram, + sizeof(s->pmc.bbram), TYPE_XLNX_BBRAM, + &error_fatal, + "crc-zpads", "0", + NULL); + sbd = SYS_BUS_DEVICE(&s->pmc.bbram); + + sysbus_realize(sbd, &error_fatal); + memory_region_add_subregion(&s->mr_ps, MM_PMC_BBRAM_CTRL, + sysbus_mmio_get_region(sbd, 0)); + sysbus_connect_irq(sbd, 0, pic[VERSAL_BBRAM_APB_IRQ_0]); +} + +static void versal_realize_efuse_part(Versal *s, Object *dev, hwaddr base) +{ + SysBusDevice *part = SYS_BUS_DEVICE(dev); + + object_property_set_link(OBJECT(part), "efuse", + OBJECT(&s->pmc.efuse), &error_abort); + + sysbus_realize(part, &error_abort); + memory_region_add_subregion(&s->mr_ps, base, + sysbus_mmio_get_region(part, 0)); +} + +static void versal_create_efuse(Versal *s, qemu_irq *pic) +{ + Object *bits = OBJECT(&s->pmc.efuse); + Object *ctrl = OBJECT(&s->pmc.efuse_ctrl); + Object *cache = OBJECT(&s->pmc.efuse_cache); + + object_initialize_child(OBJECT(s), "efuse-ctrl", &s->pmc.efuse_ctrl, + TYPE_XLNX_VERSAL_EFUSE_CTRL); + + object_initialize_child(OBJECT(s), "efuse-cache", &s->pmc.efuse_cache, + TYPE_XLNX_VERSAL_EFUSE_CACHE); + + object_initialize_child_with_props(ctrl, "xlnx-efuse@0", bits, + sizeof(s->pmc.efuse), + TYPE_XLNX_EFUSE, &error_abort, + "efuse-nr", "3", + "efuse-size", "8192", + NULL); + + qdev_realize(DEVICE(bits), NULL, &error_abort); + versal_realize_efuse_part(s, ctrl, MM_PMC_EFUSE_CTRL); + versal_realize_efuse_part(s, cache, MM_PMC_EFUSE_CACHE); + + sysbus_connect_irq(SYS_BUS_DEVICE(ctrl), 0, pic[VERSAL_EFUSE_IRQ]); +} + +/* This takes the board allocated linear DDR memory and creates aliases + * for each split DDR range/aperture on the Versal address map. + */ +static void versal_map_ddr(Versal *s) +{ + uint64_t size = memory_region_size(s->cfg.mr_ddr); + /* Describes the various split DDR access regions. */ + static const struct { + uint64_t base; + uint64_t size; + } addr_ranges[] = { + { MM_TOP_DDR, MM_TOP_DDR_SIZE }, + { MM_TOP_DDR_2, MM_TOP_DDR_2_SIZE }, + { MM_TOP_DDR_3, MM_TOP_DDR_3_SIZE }, + { MM_TOP_DDR_4, MM_TOP_DDR_4_SIZE } + }; + uint64_t offset = 0; + int i; + + assert(ARRAY_SIZE(addr_ranges) == ARRAY_SIZE(s->noc.mr_ddr_ranges)); + for (i = 0; i < ARRAY_SIZE(addr_ranges) && size; i++) { + char *name; + uint64_t mapsize; + + mapsize = size < addr_ranges[i].size ? size : addr_ranges[i].size; + name = g_strdup_printf("noc-ddr-range%d", i); + /* Create the MR alias. */ + memory_region_init_alias(&s->noc.mr_ddr_ranges[i], OBJECT(s), + name, s->cfg.mr_ddr, + offset, mapsize); + + /* Map it onto the NoC MR. */ + memory_region_add_subregion(&s->mr_ps, addr_ranges[i].base, + &s->noc.mr_ddr_ranges[i]); + offset += mapsize; + size -= mapsize; + g_free(name); + } +} + +static void versal_unimp_area(Versal *s, const char *name, + MemoryRegion *mr, + hwaddr base, hwaddr size) +{ + DeviceState *dev = qdev_new(TYPE_UNIMPLEMENTED_DEVICE); + MemoryRegion *mr_dev; + + qdev_prop_set_string(dev, "name", name); + qdev_prop_set_uint64(dev, "size", size); + object_property_add_child(OBJECT(s), name, OBJECT(dev)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + mr_dev = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(mr, base, mr_dev); +} + +static void versal_unimp(Versal *s) +{ + versal_unimp_area(s, "psm", &s->mr_ps, + MM_PSM_START, MM_PSM_END - MM_PSM_START); + versal_unimp_area(s, "crl", &s->mr_ps, + MM_CRL, MM_CRL_SIZE); + versal_unimp_area(s, "crf", &s->mr_ps, + MM_FPD_CRF, MM_FPD_CRF_SIZE); + versal_unimp_area(s, "apu", &s->mr_ps, + MM_FPD_FPD_APU, MM_FPD_FPD_APU_SIZE); + versal_unimp_area(s, "crp", &s->mr_ps, + MM_PMC_CRP, MM_PMC_CRP_SIZE); + versal_unimp_area(s, "iou-scntr", &s->mr_ps, + MM_IOU_SCNTR, MM_IOU_SCNTR_SIZE); + versal_unimp_area(s, "iou-scntr-seucre", &s->mr_ps, + MM_IOU_SCNTRS, MM_IOU_SCNTRS_SIZE); +} + +static void versal_realize(DeviceState *dev, Error **errp) +{ + Versal *s = XLNX_VERSAL(dev); + qemu_irq pic[XLNX_VERSAL_NR_IRQS]; + + versal_create_apu_cpus(s); + versal_create_apu_gic(s, pic); + versal_create_uarts(s, pic); + versal_create_usbs(s, pic); + versal_create_gems(s, pic); + versal_create_admas(s, pic); + versal_create_sds(s, pic); + versal_create_rtc(s, pic); + versal_create_xrams(s, pic); + versal_create_bbram(s, pic); + versal_create_efuse(s, pic); + versal_map_ddr(s); + versal_unimp(s); + + /* Create the On Chip Memory (OCM). */ + memory_region_init_ram(&s->lpd.mr_ocm, OBJECT(s), "ocm", + MM_OCM_SIZE, &error_fatal); + + memory_region_add_subregion_overlap(&s->mr_ps, MM_OCM, &s->lpd.mr_ocm, 0); + memory_region_add_subregion_overlap(&s->fpd.apu.mr, 0, &s->mr_ps, 0); +} + +static void versal_init(Object *obj) +{ + Versal *s = XLNX_VERSAL(obj); + + memory_region_init(&s->fpd.apu.mr, obj, "mr-apu", UINT64_MAX); + memory_region_init(&s->mr_ps, obj, "mr-ps-switch", UINT64_MAX); +} + +static Property versal_properties[] = { + DEFINE_PROP_LINK("ddr", Versal, cfg.mr_ddr, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_UINT32("psci-conduit", Versal, cfg.psci_conduit, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void versal_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = versal_realize; + device_class_set_props(dc, versal_properties); + /* No VMSD since we haven't got any top-level SoC state to save. */ +} + +static const TypeInfo versal_info = { + .name = TYPE_XLNX_VERSAL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Versal), + .instance_init = versal_init, + .class_init = versal_class_init, +}; + +static void versal_register_types(void) +{ + type_register_static(&versal_info); +} + +type_init(versal_register_types); diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c new file mode 100644 index 000000000..3dc2b5e8c --- /dev/null +++ b/hw/arm/xlnx-zcu102.c @@ -0,0 +1,302 @@ +/* + * Xilinx ZynqMP ZCU102 board + * + * Copyright (C) 2015 Xilinx Inc + * Written by Peter Crosthwaite + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/arm/xlnx-zynqmp.h" +#include "hw/boards.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "sysemu/device_tree.h" +#include "qom/object.h" +#include "net/can_emu.h" + +struct XlnxZCU102 { + MachineState parent_obj; + + XlnxZynqMPState soc; + + bool secure; + bool virt; + + CanBusState *canbus[XLNX_ZYNQMP_NUM_CAN]; + + struct arm_boot_info binfo; +}; + +#define TYPE_ZCU102_MACHINE MACHINE_TYPE_NAME("xlnx-zcu102") +OBJECT_DECLARE_SIMPLE_TYPE(XlnxZCU102, ZCU102_MACHINE) + + +static bool zcu102_get_secure(Object *obj, Error **errp) +{ + XlnxZCU102 *s = ZCU102_MACHINE(obj); + + return s->secure; +} + +static void zcu102_set_secure(Object *obj, bool value, Error **errp) +{ + XlnxZCU102 *s = ZCU102_MACHINE(obj); + + s->secure = value; +} + +static bool zcu102_get_virt(Object *obj, Error **errp) +{ + XlnxZCU102 *s = ZCU102_MACHINE(obj); + + return s->virt; +} + +static void zcu102_set_virt(Object *obj, bool value, Error **errp) +{ + XlnxZCU102 *s = ZCU102_MACHINE(obj); + + s->virt = value; +} + +static void zcu102_modify_dtb(const struct arm_boot_info *binfo, void *fdt) +{ + XlnxZCU102 *s = container_of(binfo, XlnxZCU102, binfo); + bool method_is_hvc; + char **node_path; + const char *r; + int prop_len; + int i; + + /* If EL3 is enabled, we keep all firmware nodes active. */ + if (!s->secure) { + node_path = qemu_fdt_node_path(fdt, NULL, "xlnx,zynqmp-firmware", + &error_fatal); + + for (i = 0; node_path && node_path[i]; i++) { + r = qemu_fdt_getprop(fdt, node_path[i], "method", &prop_len, NULL); + method_is_hvc = r && !strcmp("hvc", r); + + /* Allow HVC based firmware if EL2 is enabled. */ + if (method_is_hvc && s->virt) { + continue; + } + qemu_fdt_setprop_string(fdt, node_path[i], "status", "disabled"); + } + g_strfreev(node_path); + } +} + +static void bbram_attach_drive(XlnxBBRam *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 2); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + +static void efuse_attach_drive(XlnxEFuse *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 3); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + +static void xlnx_zcu102_init(MachineState *machine) +{ + XlnxZCU102 *s = ZCU102_MACHINE(machine); + int i; + uint64_t ram_size = machine->ram_size; + + /* Create the memory region to pass to the SoC */ + if (ram_size > XLNX_ZYNQMP_MAX_RAM_SIZE) { + error_report("ERROR: RAM size 0x%" PRIx64 " above max supported of " + "0x%llx", ram_size, + XLNX_ZYNQMP_MAX_RAM_SIZE); + exit(1); + } + + if (ram_size < 0x08000000) { + qemu_log("WARNING: RAM size 0x%" PRIx64 " is small for ZCU102", + ram_size); + } + + object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_XLNX_ZYNQMP); + + object_property_set_link(OBJECT(&s->soc), "ddr-ram", OBJECT(machine->ram), + &error_abort); + object_property_set_bool(OBJECT(&s->soc), "secure", s->secure, + &error_fatal); + object_property_set_bool(OBJECT(&s->soc), "virtualization", s->virt, + &error_fatal); + + for (i = 0; i < XLNX_ZYNQMP_NUM_CAN; i++) { + gchar *bus_name = g_strdup_printf("canbus%d", i); + + object_property_set_link(OBJECT(&s->soc), bus_name, + OBJECT(s->canbus[i]), &error_fatal); + g_free(bus_name); + } + + qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); + + /* Attach bbram backend, if given */ + bbram_attach_drive(&s->soc.bbram); + + /* Attach efuse backend, if given */ + efuse_attach_drive(&s->soc.efuse); + + /* Create and plug in the SD cards */ + for (i = 0; i < XLNX_ZYNQMP_NUM_SDHCI; i++) { + BusState *bus; + DriveInfo *di = drive_get_next(IF_SD); + BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; + DeviceState *carddev; + char *bus_name; + + bus_name = g_strdup_printf("sd-bus%d", i); + bus = qdev_get_child_bus(DEVICE(&s->soc), bus_name); + g_free(bus_name); + if (!bus) { + error_report("No SD bus found for SD card %d", i); + exit(1); + } + carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_SPIS; i++) { + BusState *spi_bus; + DeviceState *flash_dev; + qemu_irq cs_line; + DriveInfo *dinfo = drive_get_next(IF_MTD); + gchar *bus_name = g_strdup_printf("spi%d", i); + + spi_bus = qdev_get_child_bus(DEVICE(&s->soc), bus_name); + g_free(bus_name); + + flash_dev = qdev_new("sst25wf080"); + if (dinfo) { + qdev_prop_set_drive_err(flash_dev, "drive", + blk_by_legacy_dinfo(dinfo), &error_fatal); + } + qdev_realize_and_unref(flash_dev, spi_bus, &error_fatal); + + cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.spi[i]), 1, cs_line); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_QSPI_FLASH; i++) { + BusState *spi_bus; + DeviceState *flash_dev; + qemu_irq cs_line; + DriveInfo *dinfo = drive_get_next(IF_MTD); + int bus = i / XLNX_ZYNQMP_NUM_QSPI_BUS_CS; + gchar *bus_name = g_strdup_printf("qspi%d", bus); + + spi_bus = qdev_get_child_bus(DEVICE(&s->soc), bus_name); + g_free(bus_name); + + flash_dev = qdev_new("n25q512a11"); + if (dinfo) { + qdev_prop_set_drive_err(flash_dev, "drive", + blk_by_legacy_dinfo(dinfo), &error_fatal); + } + qdev_realize_and_unref(flash_dev, spi_bus, &error_fatal); + + cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.qspi), i + 1, cs_line); + } + + /* TODO create and connect IDE devices for ide_drive_get() */ + + s->binfo.ram_size = ram_size; + s->binfo.loader_start = 0; + s->binfo.modify_dtb = zcu102_modify_dtb; + arm_load_kernel(s->soc.boot_cpu_ptr, machine, &s->binfo); +} + +static void xlnx_zcu102_machine_instance_init(Object *obj) +{ + XlnxZCU102 *s = ZCU102_MACHINE(obj); + + /* Default to secure mode being disabled */ + s->secure = false; + /* Default to virt (EL2) being disabled */ + s->virt = false; + object_property_add_link(obj, "canbus0", TYPE_CAN_BUS, + (Object **)&s->canbus[0], + object_property_allow_set_link, + 0); + + object_property_add_link(obj, "canbus1", TYPE_CAN_BUS, + (Object **)&s->canbus[1], + object_property_allow_set_link, + 0); +} + +static void xlnx_zcu102_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Xilinx ZynqMP ZCU102 board with 4xA53s and 2xR5Fs based on " \ + "the value of smp"; + mc->init = xlnx_zcu102_init; + mc->block_default_type = IF_IDE; + mc->units_per_default_bus = 1; + mc->ignore_memory_transaction_failures = true; + mc->max_cpus = XLNX_ZYNQMP_NUM_APU_CPUS + XLNX_ZYNQMP_NUM_RPU_CPUS; + mc->default_cpus = XLNX_ZYNQMP_NUM_APU_CPUS; + mc->default_ram_id = "ddr-ram"; + + object_class_property_add_bool(oc, "secure", zcu102_get_secure, + zcu102_set_secure); + object_class_property_set_description(oc, "secure", + "Set on/off to enable/disable the ARM " + "Security Extensions (TrustZone)"); + + object_class_property_add_bool(oc, "virtualization", zcu102_get_virt, + zcu102_set_virt); + object_class_property_set_description(oc, "virtualization", + "Set on/off to enable/disable emulating a " + "guest CPU which implements the ARM " + "Virtualization Extensions"); +} + +static const TypeInfo xlnx_zcu102_machine_init_typeinfo = { + .name = TYPE_ZCU102_MACHINE, + .parent = TYPE_MACHINE, + .class_init = xlnx_zcu102_machine_class_init, + .instance_init = xlnx_zcu102_machine_instance_init, + .instance_size = sizeof(XlnxZCU102), +}; + +static void xlnx_zcu102_machine_init_register_types(void) +{ + type_register_static(&xlnx_zcu102_machine_init_typeinfo); +} + +type_init(xlnx_zcu102_machine_init_register_types) diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c new file mode 100644 index 000000000..1c52a575a --- /dev/null +++ b/hw/arm/xlnx-zynqmp.c @@ -0,0 +1,780 @@ +/* + * Xilinx Zynq MPSoC emulation + * + * Copyright (C) 2015 Xilinx Inc + * Written by Peter Crosthwaite + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/arm/xlnx-zynqmp.h" +#include "hw/intc/arm_gic_common.h" +#include "hw/misc/unimp.h" +#include "hw/boards.h" +#include "sysemu/kvm.h" +#include "sysemu/sysemu.h" +#include "kvm_arm.h" + +#define GIC_NUM_SPI_INTR 160 + +#define ARM_PHYS_TIMER_PPI 30 +#define ARM_VIRT_TIMER_PPI 27 +#define ARM_HYP_TIMER_PPI 26 +#define ARM_SEC_TIMER_PPI 29 +#define GIC_MAINTENANCE_PPI 25 + +#define GEM_REVISION 0x40070106 + +#define GIC_BASE_ADDR 0xf9000000 +#define GIC_DIST_ADDR 0xf9010000 +#define GIC_CPU_ADDR 0xf9020000 +#define GIC_VIFACE_ADDR 0xf9040000 +#define GIC_VCPU_ADDR 0xf9060000 + +#define SATA_INTR 133 +#define SATA_ADDR 0xFD0C0000 +#define SATA_NUM_PORTS 2 + +#define QSPI_ADDR 0xff0f0000 +#define LQSPI_ADDR 0xc0000000 +#define QSPI_IRQ 15 +#define QSPI_DMA_ADDR 0xff0f0800 + +#define DP_ADDR 0xfd4a0000 +#define DP_IRQ 113 + +#define DPDMA_ADDR 0xfd4c0000 +#define DPDMA_IRQ 116 + +#define APU_ADDR 0xfd5c0000 +#define APU_SIZE 0x100 + +#define IPI_ADDR 0xFF300000 +#define IPI_IRQ 64 + +#define RTC_ADDR 0xffa60000 +#define RTC_IRQ 26 + +#define BBRAM_ADDR 0xffcd0000 +#define BBRAM_IRQ 11 + +#define EFUSE_ADDR 0xffcc0000 +#define EFUSE_IRQ 87 + +#define SDHCI_CAPABILITIES 0x280737ec6481 /* Datasheet: UG1085 (v1.7) */ + +static const uint64_t gem_addr[XLNX_ZYNQMP_NUM_GEMS] = { + 0xFF0B0000, 0xFF0C0000, 0xFF0D0000, 0xFF0E0000, +}; + +static const int gem_intr[XLNX_ZYNQMP_NUM_GEMS] = { + 57, 59, 61, 63, +}; + +static const uint64_t uart_addr[XLNX_ZYNQMP_NUM_UARTS] = { + 0xFF000000, 0xFF010000, +}; + +static const int uart_intr[XLNX_ZYNQMP_NUM_UARTS] = { + 21, 22, +}; + +static const uint64_t can_addr[XLNX_ZYNQMP_NUM_CAN] = { + 0xFF060000, 0xFF070000, +}; + +static const int can_intr[XLNX_ZYNQMP_NUM_CAN] = { + 23, 24, +}; + +static const uint64_t sdhci_addr[XLNX_ZYNQMP_NUM_SDHCI] = { + 0xFF160000, 0xFF170000, +}; + +static const int sdhci_intr[XLNX_ZYNQMP_NUM_SDHCI] = { + 48, 49, +}; + +static const uint64_t spi_addr[XLNX_ZYNQMP_NUM_SPIS] = { + 0xFF040000, 0xFF050000, +}; + +static const int spi_intr[XLNX_ZYNQMP_NUM_SPIS] = { + 19, 20, +}; + +static const uint64_t gdma_ch_addr[XLNX_ZYNQMP_NUM_GDMA_CH] = { + 0xFD500000, 0xFD510000, 0xFD520000, 0xFD530000, + 0xFD540000, 0xFD550000, 0xFD560000, 0xFD570000 +}; + +static const int gdma_ch_intr[XLNX_ZYNQMP_NUM_GDMA_CH] = { + 124, 125, 126, 127, 128, 129, 130, 131 +}; + +static const uint64_t adma_ch_addr[XLNX_ZYNQMP_NUM_ADMA_CH] = { + 0xFFA80000, 0xFFA90000, 0xFFAA0000, 0xFFAB0000, + 0xFFAC0000, 0xFFAD0000, 0xFFAE0000, 0xFFAF0000 +}; + +static const int adma_ch_intr[XLNX_ZYNQMP_NUM_ADMA_CH] = { + 77, 78, 79, 80, 81, 82, 83, 84 +}; + +typedef struct XlnxZynqMPGICRegion { + int region_index; + uint32_t address; + uint32_t offset; + bool virt; +} XlnxZynqMPGICRegion; + +static const XlnxZynqMPGICRegion xlnx_zynqmp_gic_regions[] = { + /* Distributor */ + { + .region_index = 0, + .address = GIC_DIST_ADDR, + .offset = 0, + .virt = false + }, + + /* CPU interface */ + { + .region_index = 1, + .address = GIC_CPU_ADDR, + .offset = 0, + .virt = false + }, + { + .region_index = 1, + .address = GIC_CPU_ADDR + 0x10000, + .offset = 0x1000, + .virt = false + }, + + /* Virtual interface */ + { + .region_index = 2, + .address = GIC_VIFACE_ADDR, + .offset = 0, + .virt = true + }, + + /* Virtual CPU interface */ + { + .region_index = 3, + .address = GIC_VCPU_ADDR, + .offset = 0, + .virt = true + }, + { + .region_index = 3, + .address = GIC_VCPU_ADDR + 0x10000, + .offset = 0x1000, + .virt = true + }, +}; + +static inline int arm_gic_ppi_index(int cpu_nr, int ppi_index) +{ + return GIC_NUM_SPI_INTR + cpu_nr * GIC_INTERNAL + ppi_index; +} + +static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, + const char *boot_cpu, Error **errp) +{ + int i; + int num_rpus = MIN(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS, + XLNX_ZYNQMP_NUM_RPU_CPUS); + + if (num_rpus <= 0) { + /* Don't create rpu-cluster object if there's nothing to put in it */ + return; + } + + object_initialize_child(OBJECT(s), "rpu-cluster", &s->rpu_cluster, + TYPE_CPU_CLUSTER); + qdev_prop_set_uint32(DEVICE(&s->rpu_cluster), "cluster-id", 1); + + for (i = 0; i < num_rpus; i++) { + const char *name; + + object_initialize_child(OBJECT(&s->rpu_cluster), "rpu-cpu[*]", + &s->rpu_cpu[i], + ARM_CPU_TYPE_NAME("cortex-r5f")); + + name = object_get_canonical_path_component(OBJECT(&s->rpu_cpu[i])); + if (strcmp(name, boot_cpu)) { + /* Secondary CPUs start in PSCI powered-down state */ + object_property_set_bool(OBJECT(&s->rpu_cpu[i]), + "start-powered-off", true, &error_abort); + } else { + s->boot_cpu_ptr = &s->rpu_cpu[i]; + } + + object_property_set_bool(OBJECT(&s->rpu_cpu[i]), "reset-hivecs", true, + &error_abort); + if (!qdev_realize(DEVICE(&s->rpu_cpu[i]), NULL, errp)) { + return; + } + } + + qdev_realize(DEVICE(&s->rpu_cluster), NULL, &error_fatal); +} + +static void xlnx_zynqmp_create_bbram(XlnxZynqMPState *s, qemu_irq *gic) +{ + SysBusDevice *sbd; + + object_initialize_child_with_props(OBJECT(s), "bbram", &s->bbram, + sizeof(s->bbram), TYPE_XLNX_BBRAM, + &error_fatal, + "crc-zpads", "1", + NULL); + sbd = SYS_BUS_DEVICE(&s->bbram); + + sysbus_realize(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, BBRAM_ADDR); + sysbus_connect_irq(sbd, 0, gic[BBRAM_IRQ]); +} + +static void xlnx_zynqmp_create_efuse(XlnxZynqMPState *s, qemu_irq *gic) +{ + Object *bits = OBJECT(&s->efuse); + Object *ctrl = OBJECT(&s->efuse_ctrl); + SysBusDevice *sbd; + + object_initialize_child(OBJECT(s), "efuse-ctrl", &s->efuse_ctrl, + TYPE_XLNX_ZYNQMP_EFUSE); + + object_initialize_child_with_props(ctrl, "xlnx-efuse@0", bits, + sizeof(s->efuse), + TYPE_XLNX_EFUSE, &error_abort, + "efuse-nr", "3", + "efuse-size", "2048", + NULL); + + qdev_realize(DEVICE(bits), NULL, &error_abort); + object_property_set_link(ctrl, "efuse", bits, &error_abort); + + sbd = SYS_BUS_DEVICE(ctrl); + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, EFUSE_ADDR); + sysbus_connect_irq(sbd, 0, gic[EFUSE_IRQ]); +} + +static void xlnx_zynqmp_create_unimp_mmio(XlnxZynqMPState *s) +{ + static const struct UnimpInfo { + const char *name; + hwaddr base; + hwaddr size; + } unimp_areas[ARRAY_SIZE(s->mr_unimp)] = { + { .name = "apu", APU_ADDR, APU_SIZE }, + }; + unsigned int nr; + + for (nr = 0; nr < ARRAY_SIZE(unimp_areas); nr++) { + const struct UnimpInfo *info = &unimp_areas[nr]; + DeviceState *dev = qdev_new(TYPE_UNIMPLEMENTED_DEVICE); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + assert(info->name && info->base && info->size > 0); + qdev_prop_set_string(dev, "name", info->name); + qdev_prop_set_uint64(dev, "size", info->size); + object_property_add_child(OBJECT(s), info->name, OBJECT(dev)); + + sysbus_realize_and_unref(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, info->base); + } +} + +static void xlnx_zynqmp_init(Object *obj) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + XlnxZynqMPState *s = XLNX_ZYNQMP(obj); + int i; + int num_apus = MIN(ms->smp.cpus, XLNX_ZYNQMP_NUM_APU_CPUS); + + object_initialize_child(obj, "apu-cluster", &s->apu_cluster, + TYPE_CPU_CLUSTER); + qdev_prop_set_uint32(DEVICE(&s->apu_cluster), "cluster-id", 0); + + for (i = 0; i < num_apus; i++) { + object_initialize_child(OBJECT(&s->apu_cluster), "apu-cpu[*]", + &s->apu_cpu[i], + ARM_CPU_TYPE_NAME("cortex-a53")); + } + + object_initialize_child(obj, "gic", &s->gic, gic_class_name()); + + for (i = 0; i < XLNX_ZYNQMP_NUM_GEMS; i++) { + object_initialize_child(obj, "gem[*]", &s->gem[i], TYPE_CADENCE_GEM); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_UARTS; i++) { + object_initialize_child(obj, "uart[*]", &s->uart[i], + TYPE_CADENCE_UART); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_CAN; i++) { + object_initialize_child(obj, "can[*]", &s->can[i], + TYPE_XLNX_ZYNQMP_CAN); + } + + object_initialize_child(obj, "sata", &s->sata, TYPE_SYSBUS_AHCI); + + for (i = 0; i < XLNX_ZYNQMP_NUM_SDHCI; i++) { + object_initialize_child(obj, "sdhci[*]", &s->sdhci[i], + TYPE_SYSBUS_SDHCI); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_SPIS; i++) { + object_initialize_child(obj, "spi[*]", &s->spi[i], TYPE_XILINX_SPIPS); + } + + object_initialize_child(obj, "qspi", &s->qspi, TYPE_XLNX_ZYNQMP_QSPIPS); + + object_initialize_child(obj, "xxxdp", &s->dp, TYPE_XLNX_DP); + + object_initialize_child(obj, "dp-dma", &s->dpdma, TYPE_XLNX_DPDMA); + + object_initialize_child(obj, "ipi", &s->ipi, TYPE_XLNX_ZYNQMP_IPI); + + object_initialize_child(obj, "rtc", &s->rtc, TYPE_XLNX_ZYNQMP_RTC); + + for (i = 0; i < XLNX_ZYNQMP_NUM_GDMA_CH; i++) { + object_initialize_child(obj, "gdma[*]", &s->gdma[i], TYPE_XLNX_ZDMA); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_ADMA_CH; i++) { + object_initialize_child(obj, "adma[*]", &s->adma[i], TYPE_XLNX_ZDMA); + } + + object_initialize_child(obj, "qspi-dma", &s->qspi_dma, TYPE_XLNX_CSU_DMA); +} + +static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + XlnxZynqMPState *s = XLNX_ZYNQMP(dev); + MemoryRegion *system_memory = get_system_memory(); + uint8_t i; + uint64_t ram_size; + int num_apus = MIN(ms->smp.cpus, XLNX_ZYNQMP_NUM_APU_CPUS); + const char *boot_cpu = s->boot_cpu ? s->boot_cpu : "apu-cpu[0]"; + ram_addr_t ddr_low_size, ddr_high_size; + qemu_irq gic_spi[GIC_NUM_SPI_INTR]; + Error *err = NULL; + + ram_size = memory_region_size(s->ddr_ram); + + /* + * Create the DDR Memory Regions. User friendly checks should happen at + * the board level + */ + if (ram_size > XLNX_ZYNQMP_MAX_LOW_RAM_SIZE) { + /* + * The RAM size is above the maximum available for the low DDR. + * Create the high DDR memory region as well. + */ + assert(ram_size <= XLNX_ZYNQMP_MAX_RAM_SIZE); + ddr_low_size = XLNX_ZYNQMP_MAX_LOW_RAM_SIZE; + ddr_high_size = ram_size - XLNX_ZYNQMP_MAX_LOW_RAM_SIZE; + + memory_region_init_alias(&s->ddr_ram_high, OBJECT(dev), + "ddr-ram-high", s->ddr_ram, ddr_low_size, + ddr_high_size); + memory_region_add_subregion(get_system_memory(), + XLNX_ZYNQMP_HIGH_RAM_START, + &s->ddr_ram_high); + } else { + /* RAM must be non-zero */ + assert(ram_size); + ddr_low_size = ram_size; + } + + memory_region_init_alias(&s->ddr_ram_low, OBJECT(dev), "ddr-ram-low", + s->ddr_ram, 0, ddr_low_size); + memory_region_add_subregion(get_system_memory(), 0, &s->ddr_ram_low); + + /* Create the four OCM banks */ + for (i = 0; i < XLNX_ZYNQMP_NUM_OCM_BANKS; i++) { + char *ocm_name = g_strdup_printf("zynqmp.ocm_ram_bank_%d", i); + + memory_region_init_ram(&s->ocm_ram[i], NULL, ocm_name, + XLNX_ZYNQMP_OCM_RAM_SIZE, &error_fatal); + memory_region_add_subregion(get_system_memory(), + XLNX_ZYNQMP_OCM_RAM_0_ADDRESS + + i * XLNX_ZYNQMP_OCM_RAM_SIZE, + &s->ocm_ram[i]); + + g_free(ocm_name); + } + + qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", GIC_NUM_SPI_INTR + 32); + qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2); + qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", num_apus); + qdev_prop_set_bit(DEVICE(&s->gic), "has-security-extensions", s->secure); + qdev_prop_set_bit(DEVICE(&s->gic), + "has-virtualization-extensions", s->virt); + + qdev_realize(DEVICE(&s->apu_cluster), NULL, &error_fatal); + + /* Realize APUs before realizing the GIC. KVM requires this. */ + for (i = 0; i < num_apus; i++) { + const char *name; + + object_property_set_int(OBJECT(&s->apu_cpu[i]), "psci-conduit", + QEMU_PSCI_CONDUIT_SMC, &error_abort); + + name = object_get_canonical_path_component(OBJECT(&s->apu_cpu[i])); + if (strcmp(name, boot_cpu)) { + /* Secondary CPUs start in PSCI powered-down state */ + object_property_set_bool(OBJECT(&s->apu_cpu[i]), + "start-powered-off", true, &error_abort); + } else { + s->boot_cpu_ptr = &s->apu_cpu[i]; + } + + object_property_set_bool(OBJECT(&s->apu_cpu[i]), "has_el3", s->secure, + NULL); + object_property_set_bool(OBJECT(&s->apu_cpu[i]), "has_el2", s->virt, + NULL); + object_property_set_int(OBJECT(&s->apu_cpu[i]), "reset-cbar", + GIC_BASE_ADDR, &error_abort); + object_property_set_int(OBJECT(&s->apu_cpu[i]), "core-count", + num_apus, &error_abort); + if (!qdev_realize(DEVICE(&s->apu_cpu[i]), NULL, errp)) { + return; + } + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) { + return; + } + + assert(ARRAY_SIZE(xlnx_zynqmp_gic_regions) == XLNX_ZYNQMP_GIC_REGIONS); + for (i = 0; i < XLNX_ZYNQMP_GIC_REGIONS; i++) { + SysBusDevice *gic = SYS_BUS_DEVICE(&s->gic); + const XlnxZynqMPGICRegion *r = &xlnx_zynqmp_gic_regions[i]; + MemoryRegion *mr; + uint32_t addr = r->address; + int j; + + if (r->virt && !s->virt) { + continue; + } + + mr = sysbus_mmio_get_region(gic, r->region_index); + for (j = 0; j < XLNX_ZYNQMP_GIC_ALIASES; j++) { + MemoryRegion *alias = &s->gic_mr[i][j]; + + memory_region_init_alias(alias, OBJECT(s), "zynqmp-gic-alias", mr, + r->offset, XLNX_ZYNQMP_GIC_REGION_SIZE); + memory_region_add_subregion(system_memory, addr, alias); + + addr += XLNX_ZYNQMP_GIC_REGION_SIZE; + } + } + + for (i = 0; i < num_apus; i++) { + qemu_irq irq; + + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i, + qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]), + ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus, + qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]), + ARM_CPU_FIQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 2, + qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]), + ARM_CPU_VIRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 3, + qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]), + ARM_CPU_VFIQ)); + irq = qdev_get_gpio_in(DEVICE(&s->gic), + arm_gic_ppi_index(i, ARM_PHYS_TIMER_PPI)); + qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_PHYS, irq); + irq = qdev_get_gpio_in(DEVICE(&s->gic), + arm_gic_ppi_index(i, ARM_VIRT_TIMER_PPI)); + qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_VIRT, irq); + irq = qdev_get_gpio_in(DEVICE(&s->gic), + arm_gic_ppi_index(i, ARM_HYP_TIMER_PPI)); + qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_HYP, irq); + irq = qdev_get_gpio_in(DEVICE(&s->gic), + arm_gic_ppi_index(i, ARM_SEC_TIMER_PPI)); + qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_SEC, irq); + + if (s->virt) { + irq = qdev_get_gpio_in(DEVICE(&s->gic), + arm_gic_ppi_index(i, GIC_MAINTENANCE_PPI)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 4, irq); + } + } + + xlnx_zynqmp_create_rpu(ms, s, boot_cpu, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (!s->boot_cpu_ptr) { + error_setg(errp, "ZynqMP Boot cpu %s not found", boot_cpu); + return; + } + + for (i = 0; i < GIC_NUM_SPI_INTR; i++) { + gic_spi[i] = qdev_get_gpio_in(DEVICE(&s->gic), i); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_GEMS; i++) { + NICInfo *nd = &nd_table[i]; + + /* FIXME use qdev NIC properties instead of nd_table[] */ + if (nd->used) { + qemu_check_nic_model(nd, TYPE_CADENCE_GEM); + qdev_set_nic_properties(DEVICE(&s->gem[i]), nd); + } + object_property_set_int(OBJECT(&s->gem[i]), "revision", GEM_REVISION, + &error_abort); + object_property_set_int(OBJECT(&s->gem[i]), "phy-addr", 23, + &error_abort); + object_property_set_int(OBJECT(&s->gem[i]), "num-priority-queues", 2, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gem[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gem[i]), 0, gem_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gem[i]), 0, + gic_spi[gem_intr[i]]); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_UARTS; i++) { + qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", serial_hd(i)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, uart_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + gic_spi[uart_intr[i]]); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_CAN; i++) { + object_property_set_int(OBJECT(&s->can[i]), "ext_clk_freq", + XLNX_ZYNQMP_CAN_REF_CLK, &error_abort); + + object_property_set_link(OBJECT(&s->can[i]), "canbus", + OBJECT(s->canbus[i]), &error_fatal); + + sysbus_realize(SYS_BUS_DEVICE(&s->can[i]), &err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->can[i]), 0, can_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->can[i]), 0, + gic_spi[can_intr[i]]); + } + + object_property_set_int(OBJECT(&s->sata), "num-ports", SATA_NUM_PORTS, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->sata), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sata), 0, SATA_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->sata), 0, gic_spi[SATA_INTR]); + + for (i = 0; i < XLNX_ZYNQMP_NUM_SDHCI; i++) { + char *bus_name; + SysBusDevice *sbd = SYS_BUS_DEVICE(&s->sdhci[i]); + Object *sdhci = OBJECT(&s->sdhci[i]); + + /* + * Compatible with: + * - SD Host Controller Specification Version 3.00 + * - SDIO Specification Version 3.0 + * - eMMC Specification Version 4.51 + */ + if (!object_property_set_uint(sdhci, "sd-spec-version", 3, errp)) { + return; + } + if (!object_property_set_uint(sdhci, "capareg", SDHCI_CAPABILITIES, + errp)) { + return; + } + if (!object_property_set_uint(sdhci, "uhs", UHS_I, errp)) { + return; + } + if (!sysbus_realize(SYS_BUS_DEVICE(sdhci), errp)) { + return; + } + sysbus_mmio_map(sbd, 0, sdhci_addr[i]); + sysbus_connect_irq(sbd, 0, gic_spi[sdhci_intr[i]]); + + /* Alias controller SD bus to the SoC itself */ + bus_name = g_strdup_printf("sd-bus%d", i); + object_property_add_alias(OBJECT(s), bus_name, sdhci, "sd-bus"); + g_free(bus_name); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_SPIS; i++) { + gchar *bus_name; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, spi_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, + gic_spi[spi_intr[i]]); + + /* Alias controller SPI bus to the SoC itself */ + bus_name = g_strdup_printf("spi%d", i); + object_property_add_alias(OBJECT(s), bus_name, + OBJECT(&s->spi[i]), "spi0"); + g_free(bus_name); + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->dp), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dp), 0, DP_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->dp), 0, gic_spi[DP_IRQ]); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->dpdma), errp)) { + return; + } + object_property_set_link(OBJECT(&s->dp), "dpdma", OBJECT(&s->dpdma), + &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dpdma), 0, DPDMA_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->dpdma), 0, gic_spi[DPDMA_IRQ]); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ipi), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ipi), 0, IPI_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ipi), 0, gic_spi[IPI_IRQ]); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, RTC_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, gic_spi[RTC_IRQ]); + + xlnx_zynqmp_create_bbram(s, gic_spi); + xlnx_zynqmp_create_efuse(s, gic_spi); + xlnx_zynqmp_create_unimp_mmio(s); + + for (i = 0; i < XLNX_ZYNQMP_NUM_GDMA_CH; i++) { + if (!object_property_set_uint(OBJECT(&s->gdma[i]), "bus-width", 128, + errp)) { + return; + } + if (!object_property_set_link(OBJECT(&s->gdma[i]), "dma", + OBJECT(system_memory), errp)) { + return; + } + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gdma[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gdma[i]), 0, gdma_ch_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gdma[i]), 0, + gic_spi[gdma_ch_intr[i]]); + } + + for (i = 0; i < XLNX_ZYNQMP_NUM_ADMA_CH; i++) { + if (!object_property_set_link(OBJECT(&s->adma[i]), "dma", + OBJECT(system_memory), errp)) { + return; + } + if (!sysbus_realize(SYS_BUS_DEVICE(&s->adma[i]), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->adma[i]), 0, adma_ch_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->adma[i]), 0, + gic_spi[adma_ch_intr[i]]); + } + + if (!object_property_set_link(OBJECT(&s->qspi_dma), "dma", + OBJECT(system_memory), errp)) { + return; + } + if (!sysbus_realize(SYS_BUS_DEVICE(&s->qspi_dma), errp)) { + return; + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi_dma), 0, QSPI_DMA_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi_dma), 0, gic_spi[QSPI_IRQ]); + + if (!object_property_set_link(OBJECT(&s->qspi), "stream-connected-dma", + OBJECT(&s->qspi_dma), errp)) { + return; + } + if (!sysbus_realize(SYS_BUS_DEVICE(&s->qspi), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 0, QSPI_ADDR); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 1, LQSPI_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi), 0, gic_spi[QSPI_IRQ]); + + for (i = 0; i < XLNX_ZYNQMP_NUM_QSPI_BUS; i++) { + g_autofree gchar *bus_name = g_strdup_printf("qspi%d", i); + g_autofree gchar *target_bus = g_strdup_printf("spi%d", i); + + /* Alias controller SPI bus to the SoC itself */ + object_property_add_alias(OBJECT(s), bus_name, + OBJECT(&s->qspi), target_bus); + } +} + +static Property xlnx_zynqmp_props[] = { + DEFINE_PROP_STRING("boot-cpu", XlnxZynqMPState, boot_cpu), + DEFINE_PROP_BOOL("secure", XlnxZynqMPState, secure, false), + DEFINE_PROP_BOOL("virtualization", XlnxZynqMPState, virt, false), + DEFINE_PROP_LINK("ddr-ram", XlnxZynqMPState, ddr_ram, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_LINK("canbus0", XlnxZynqMPState, canbus[0], TYPE_CAN_BUS, + CanBusState *), + DEFINE_PROP_LINK("canbus1", XlnxZynqMPState, canbus[1], TYPE_CAN_BUS, + CanBusState *), + DEFINE_PROP_END_OF_LIST() +}; + +static void xlnx_zynqmp_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, xlnx_zynqmp_props); + dc->realize = xlnx_zynqmp_realize; + /* Reason: Uses serial_hds in realize function, thus can't be used twice */ + dc->user_creatable = false; +} + +static const TypeInfo xlnx_zynqmp_type_info = { + .name = TYPE_XLNX_ZYNQMP, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XlnxZynqMPState), + .instance_init = xlnx_zynqmp_init, + .class_init = xlnx_zynqmp_class_init, +}; + +static void xlnx_zynqmp_register_types(void) +{ + type_register_static(&xlnx_zynqmp_type_info); +} + +type_init(xlnx_zynqmp_register_types) diff --git a/hw/arm/z2.c b/hw/arm/z2.c new file mode 100644 index 000000000..9c1e87620 --- /dev/null +++ b/hw/arm/z2.c @@ -0,0 +1,355 @@ +/* + * PXA270-based Zipit Z2 device + * + * Copyright (c) 2011 by Vasily Khoruzhick + * + * Code is based on mainstone platform. + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/arm/pxa.h" +#include "hw/arm/boot.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "hw/ssi/ssi.h" +#include "migration/vmstate.h" +#include "hw/boards.h" +#include "hw/block/flash.h" +#include "ui/console.h" +#include "hw/audio/wm8750.h" +#include "audio/audio.h" +#include "exec/address-spaces.h" +#include "cpu.h" +#include "qom/object.h" + +#ifdef DEBUG_Z2 +#define DPRINTF(fmt, ...) \ + printf(fmt, ## __VA_ARGS__) +#else +#define DPRINTF(fmt, ...) +#endif + +static const struct keymap map[0x100] = { + [0 ... 0xff] = { -1, -1 }, + [0x3b] = {0, 0}, /* Option = F1 */ + [0xc8] = {0, 1}, /* Up */ + [0xd0] = {0, 2}, /* Down */ + [0xcb] = {0, 3}, /* Left */ + [0xcd] = {0, 4}, /* Right */ + [0xcf] = {0, 5}, /* End */ + [0x0d] = {0, 6}, /* KPPLUS */ + [0xc7] = {1, 0}, /* Home */ + [0x10] = {1, 1}, /* Q */ + [0x17] = {1, 2}, /* I */ + [0x22] = {1, 3}, /* G */ + [0x2d] = {1, 4}, /* X */ + [0x1c] = {1, 5}, /* Enter */ + [0x0c] = {1, 6}, /* KPMINUS */ + [0xc9] = {2, 0}, /* PageUp */ + [0x11] = {2, 1}, /* W */ + [0x18] = {2, 2}, /* O */ + [0x23] = {2, 3}, /* H */ + [0x2e] = {2, 4}, /* C */ + [0x38] = {2, 5}, /* LeftAlt */ + [0xd1] = {3, 0}, /* PageDown */ + [0x12] = {3, 1}, /* E */ + [0x19] = {3, 2}, /* P */ + [0x24] = {3, 3}, /* J */ + [0x2f] = {3, 4}, /* V */ + [0x2a] = {3, 5}, /* LeftShift */ + [0x01] = {4, 0}, /* Esc */ + [0x13] = {4, 1}, /* R */ + [0x1e] = {4, 2}, /* A */ + [0x25] = {4, 3}, /* K */ + [0x30] = {4, 4}, /* B */ + [0x1d] = {4, 5}, /* LeftCtrl */ + [0x0f] = {5, 0}, /* Tab */ + [0x14] = {5, 1}, /* T */ + [0x1f] = {5, 2}, /* S */ + [0x26] = {5, 3}, /* L */ + [0x31] = {5, 4}, /* N */ + [0x39] = {5, 5}, /* Space */ + [0x3c] = {6, 0}, /* Stop = F2 */ + [0x15] = {6, 1}, /* Y */ + [0x20] = {6, 2}, /* D */ + [0x0e] = {6, 3}, /* Backspace */ + [0x32] = {6, 4}, /* M */ + [0x33] = {6, 5}, /* Comma */ + [0x3d] = {7, 0}, /* Play = F3 */ + [0x16] = {7, 1}, /* U */ + [0x21] = {7, 2}, /* F */ + [0x2c] = {7, 3}, /* Z */ + [0x27] = {7, 4}, /* Semicolon */ + [0x34] = {7, 5}, /* Dot */ +}; + +#define Z2_RAM_SIZE 0x02000000 +#define Z2_FLASH_BASE 0x00000000 +#define Z2_FLASH_SIZE 0x00800000 + +static struct arm_boot_info z2_binfo = { + .loader_start = PXA2XX_SDRAM_BASE, + .ram_size = Z2_RAM_SIZE, +}; + +#define Z2_GPIO_SD_DETECT 96 +#define Z2_GPIO_AC_IN 0 +#define Z2_GPIO_KEY_ON 1 +#define Z2_GPIO_LCD_CS 88 + +struct ZipitLCD { + SSIPeripheral ssidev; + int32_t selected; + int32_t enabled; + uint8_t buf[3]; + uint32_t cur_reg; + int pos; +}; + +#define TYPE_ZIPIT_LCD "zipit-lcd" +OBJECT_DECLARE_SIMPLE_TYPE(ZipitLCD, ZIPIT_LCD) + +static uint32_t zipit_lcd_transfer(SSIPeripheral *dev, uint32_t value) +{ + ZipitLCD *z = ZIPIT_LCD(dev); + uint16_t val; + if (z->selected) { + z->buf[z->pos] = value & 0xff; + z->pos++; + } + if (z->pos == 3) { + switch (z->buf[0]) { + case 0x74: + DPRINTF("%s: reg: 0x%.2x\n", __func__, z->buf[2]); + z->cur_reg = z->buf[2]; + break; + case 0x76: + val = z->buf[1] << 8 | z->buf[2]; + DPRINTF("%s: value: 0x%.4x\n", __func__, val); + if (z->cur_reg == 0x22 && val == 0x0000) { + z->enabled = 1; + printf("%s: LCD enabled\n", __func__); + } else if (z->cur_reg == 0x10 && val == 0x0000) { + z->enabled = 0; + printf("%s: LCD disabled\n", __func__); + } + break; + default: + DPRINTF("%s: unknown command!\n", __func__); + break; + } + z->pos = 0; + } + return 0; +} + +static void z2_lcd_cs(void *opaque, int line, int level) +{ + ZipitLCD *z2_lcd = opaque; + z2_lcd->selected = !level; +} + +static void zipit_lcd_realize(SSIPeripheral *dev, Error **errp) +{ + ZipitLCD *z = ZIPIT_LCD(dev); + z->selected = 0; + z->enabled = 0; + z->pos = 0; +} + +static const VMStateDescription vmstate_zipit_lcd_state = { + .name = "zipit-lcd", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_SSI_PERIPHERAL(ssidev, ZipitLCD), + VMSTATE_INT32(selected, ZipitLCD), + VMSTATE_INT32(enabled, ZipitLCD), + VMSTATE_BUFFER(buf, ZipitLCD), + VMSTATE_UINT32(cur_reg, ZipitLCD), + VMSTATE_INT32(pos, ZipitLCD), + VMSTATE_END_OF_LIST(), + } +}; + +static void zipit_lcd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + + k->realize = zipit_lcd_realize; + k->transfer = zipit_lcd_transfer; + dc->vmsd = &vmstate_zipit_lcd_state; +} + +static const TypeInfo zipit_lcd_info = { + .name = TYPE_ZIPIT_LCD, + .parent = TYPE_SSI_PERIPHERAL, + .instance_size = sizeof(ZipitLCD), + .class_init = zipit_lcd_class_init, +}; + +#define TYPE_AER915 "aer915" +OBJECT_DECLARE_SIMPLE_TYPE(AER915State, AER915) + +struct AER915State { + I2CSlave parent_obj; + + int len; + uint8_t buf[3]; +}; + +static int aer915_send(I2CSlave *i2c, uint8_t data) +{ + AER915State *s = AER915(i2c); + + s->buf[s->len] = data; + if (s->len++ > 2) { + DPRINTF("%s: message too long (%i bytes)\n", + __func__, s->len); + return 1; + } + + if (s->len == 2) { + DPRINTF("%s: reg %d value 0x%02x\n", __func__, + s->buf[0], s->buf[1]); + } + + return 0; +} + +static int aer915_event(I2CSlave *i2c, enum i2c_event event) +{ + AER915State *s = AER915(i2c); + + switch (event) { + case I2C_START_SEND: + s->len = 0; + break; + case I2C_START_RECV: + if (s->len != 1) { + DPRINTF("%s: short message!?\n", __func__); + } + break; + case I2C_FINISH: + break; + default: + break; + } + + return 0; +} + +static uint8_t aer915_recv(I2CSlave *slave) +{ + AER915State *s = AER915(slave); + int retval = 0x00; + + switch (s->buf[0]) { + /* Return hardcoded battery voltage, + * 0xf0 means ~4.1V + */ + case 0x02: + retval = 0xf0; + break; + /* Return 0x00 for other regs, + * we don't know what they are for, + * anyway they return 0x00 on real hardware. + */ + default: + break; + } + + return retval; +} + +static const VMStateDescription vmstate_aer915_state = { + .name = "aer915", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32(len, AER915State), + VMSTATE_BUFFER(buf, AER915State), + VMSTATE_END_OF_LIST(), + } +}; + +static void aer915_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + k->event = aer915_event; + k->recv = aer915_recv; + k->send = aer915_send; + dc->vmsd = &vmstate_aer915_state; +} + +static const TypeInfo aer915_info = { + .name = TYPE_AER915, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(AER915State), + .class_init = aer915_class_init, +}; + +static void z2_init(MachineState *machine) +{ + MemoryRegion *address_space_mem = get_system_memory(); + uint32_t sector_len = 0x10000; + PXA2xxState *mpu; + DriveInfo *dinfo; + void *z2_lcd; + I2CBus *bus; + DeviceState *wm; + + /* Setup CPU & memory */ + mpu = pxa270_init(address_space_mem, z2_binfo.ram_size, machine->cpu_type); + + dinfo = drive_get(IF_PFLASH, 0, 0); + if (!pflash_cfi01_register(Z2_FLASH_BASE, "z2.flash0", Z2_FLASH_SIZE, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + sector_len, 4, 0, 0, 0, 0, 0)) { + error_report("Error registering flash memory"); + exit(1); + } + + /* setup keypad */ + pxa27x_register_keypad(mpu->kp, map, 0x100); + + /* MMC/SD host */ + pxa2xx_mmci_handlers(mpu->mmc, + NULL, + qdev_get_gpio_in(mpu->gpio, Z2_GPIO_SD_DETECT)); + + type_register_static(&zipit_lcd_info); + type_register_static(&aer915_info); + z2_lcd = ssi_create_peripheral(mpu->ssp[1], TYPE_ZIPIT_LCD); + bus = pxa2xx_i2c_bus(mpu->i2c[0]); + i2c_slave_create_simple(bus, TYPE_AER915, 0x55); + wm = DEVICE(i2c_slave_create_simple(bus, TYPE_WM8750, 0x1b)); + mpu->i2s->opaque = wm; + mpu->i2s->codec_out = wm8750_dac_dat; + mpu->i2s->codec_in = wm8750_adc_dat; + wm8750_data_req_set(wm, mpu->i2s->data_req, mpu->i2s); + + qdev_connect_gpio_out(mpu->gpio, Z2_GPIO_LCD_CS, + qemu_allocate_irq(z2_lcd_cs, z2_lcd, 0)); + + z2_binfo.board_id = 0x6dd; + arm_load_kernel(mpu->cpu, machine, &z2_binfo); +} + +static void z2_machine_init(MachineClass *mc) +{ + mc->desc = "Zipit Z2 (PXA27x)"; + mc->init = z2_init; + mc->ignore_memory_transaction_failures = true; + mc->default_cpu_type = ARM_CPU_TYPE_NAME("pxa270-c5"); +} + +DEFINE_MACHINE("z2", z2_machine_init) diff --git a/hw/audio/Kconfig b/hw/audio/Kconfig new file mode 100644 index 000000000..e9c6fed82 --- /dev/null +++ b/hw/audio/Kconfig @@ -0,0 +1,52 @@ +config SB16 + bool + default y + depends on ISA_BUS + +config ES1370 + bool + default y if PCI_DEVICES + depends on PCI + +config AC97 + bool + default y if PCI_DEVICES + depends on PCI + +config ADLIB + bool + default y + depends on ISA_BUS + +config GUS + bool + default y + depends on ISA_BUS + +config CS4231A + bool + default y + depends on ISA_BUS + +config HDA + bool + default y if PCI_DEVICES + depends on PCI + +config PCSPK + bool + default y + depends on I8254 + +config WM8750 + bool + depends on I2C + +config PL041 + bool + +config CS4231 + bool + +config MARVELL_88W8618 + bool diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c new file mode 100644 index 000000000..3cb813106 --- /dev/null +++ b/hw/audio/ac97.c @@ -0,0 +1,1437 @@ +/* + * Copyright (C) 2006 InnoTek Systemberatung GmbH + * + * This file is part of VirtualBox Open Source Edition (OSE), as + * available from http://www.virtualbox.org. This file is free software; + * you can redistribute it and/or modify it under the terms of the GNU + * General Public License as published by the Free Software Foundation, + * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE + * distribution. VirtualBox OSE is distributed in the hope that it will + * be useful, but WITHOUT ANY WARRANTY of any kind. + * + * If you received this file as part of a commercial VirtualBox + * distribution, then only the terms of your commercial VirtualBox + * license agreement apply instead of the previous paragraph. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/audio/soundhw.h" +#include "audio/audio.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "sysemu/dma.h" +#include "qom/object.h" + +enum { + AC97_Reset = 0x00, + AC97_Master_Volume_Mute = 0x02, + AC97_Headphone_Volume_Mute = 0x04, + AC97_Master_Volume_Mono_Mute = 0x06, + AC97_Master_Tone_RL = 0x08, + AC97_PC_BEEP_Volume_Mute = 0x0A, + AC97_Phone_Volume_Mute = 0x0C, + AC97_Mic_Volume_Mute = 0x0E, + AC97_Line_In_Volume_Mute = 0x10, + AC97_CD_Volume_Mute = 0x12, + AC97_Video_Volume_Mute = 0x14, + AC97_Aux_Volume_Mute = 0x16, + AC97_PCM_Out_Volume_Mute = 0x18, + AC97_Record_Select = 0x1A, + AC97_Record_Gain_Mute = 0x1C, + AC97_Record_Gain_Mic_Mute = 0x1E, + AC97_General_Purpose = 0x20, + AC97_3D_Control = 0x22, + AC97_AC_97_RESERVED = 0x24, + AC97_Powerdown_Ctrl_Stat = 0x26, + AC97_Extended_Audio_ID = 0x28, + AC97_Extended_Audio_Ctrl_Stat = 0x2A, + AC97_PCM_Front_DAC_Rate = 0x2C, + AC97_PCM_Surround_DAC_Rate = 0x2E, + AC97_PCM_LFE_DAC_Rate = 0x30, + AC97_PCM_LR_ADC_Rate = 0x32, + AC97_MIC_ADC_Rate = 0x34, + AC97_6Ch_Vol_C_LFE_Mute = 0x36, + AC97_6Ch_Vol_L_R_Surround_Mute = 0x38, + AC97_Vendor_Reserved = 0x58, + AC97_Sigmatel_Analog = 0x6c, /* We emulate a Sigmatel codec */ + AC97_Sigmatel_Dac2Invert = 0x6e, /* We emulate a Sigmatel codec */ + AC97_Vendor_ID1 = 0x7c, + AC97_Vendor_ID2 = 0x7e +}; + +#define SOFT_VOLUME +#define SR_FIFOE 16 /* rwc */ +#define SR_BCIS 8 /* rwc */ +#define SR_LVBCI 4 /* rwc */ +#define SR_CELV 2 /* ro */ +#define SR_DCH 1 /* ro */ +#define SR_VALID_MASK ((1 << 5) - 1) +#define SR_WCLEAR_MASK (SR_FIFOE | SR_BCIS | SR_LVBCI) +#define SR_RO_MASK (SR_DCH | SR_CELV) +#define SR_INT_MASK (SR_FIFOE | SR_BCIS | SR_LVBCI) + +#define CR_IOCE 16 /* rw */ +#define CR_FEIE 8 /* rw */ +#define CR_LVBIE 4 /* rw */ +#define CR_RR 2 /* rw */ +#define CR_RPBM 1 /* rw */ +#define CR_VALID_MASK ((1 << 5) - 1) +#define CR_DONT_CLEAR_MASK (CR_IOCE | CR_FEIE | CR_LVBIE) + +#define GC_WR 4 /* rw */ +#define GC_CR 2 /* rw */ +#define GC_VALID_MASK ((1 << 6) - 1) + +#define GS_MD3 (1<<17) /* rw */ +#define GS_AD3 (1<<16) /* rw */ +#define GS_RCS (1<<15) /* rwc */ +#define GS_B3S12 (1<<14) /* ro */ +#define GS_B2S12 (1<<13) /* ro */ +#define GS_B1S12 (1<<12) /* ro */ +#define GS_S1R1 (1<<11) /* rwc */ +#define GS_S0R1 (1<<10) /* rwc */ +#define GS_S1CR (1<<9) /* ro */ +#define GS_S0CR (1<<8) /* ro */ +#define GS_MINT (1<<7) /* ro */ +#define GS_POINT (1<<6) /* ro */ +#define GS_PIINT (1<<5) /* ro */ +#define GS_RSRVD ((1<<4)|(1<<3)) +#define GS_MOINT (1<<2) /* ro */ +#define GS_MIINT (1<<1) /* ro */ +#define GS_GSCI 1 /* rwc */ +#define GS_RO_MASK (GS_B3S12| \ + GS_B2S12| \ + GS_B1S12| \ + GS_S1CR| \ + GS_S0CR| \ + GS_MINT| \ + GS_POINT| \ + GS_PIINT| \ + GS_RSRVD| \ + GS_MOINT| \ + GS_MIINT) +#define GS_VALID_MASK ((1 << 18) - 1) +#define GS_WCLEAR_MASK (GS_RCS|GS_S1R1|GS_S0R1|GS_GSCI) + +#define BD_IOC (1<<31) +#define BD_BUP (1<<30) + +#define EACS_VRA 1 +#define EACS_VRM 8 + +#define MUTE_SHIFT 15 + +#define TYPE_AC97 "AC97" +OBJECT_DECLARE_SIMPLE_TYPE(AC97LinkState, AC97) + +#define REC_MASK 7 +enum { + REC_MIC = 0, + REC_CD, + REC_VIDEO, + REC_AUX, + REC_LINE_IN, + REC_STEREO_MIX, + REC_MONO_MIX, + REC_PHONE +}; + +typedef struct BD { + uint32_t addr; + uint32_t ctl_len; +} BD; + +typedef struct AC97BusMasterRegs { + uint32_t bdbar; /* rw 0 */ + uint8_t civ; /* ro 0 */ + uint8_t lvi; /* rw 0 */ + uint16_t sr; /* rw 1 */ + uint16_t picb; /* ro 0 */ + uint8_t piv; /* ro 0 */ + uint8_t cr; /* rw 0 */ + unsigned int bd_valid; + BD bd; +} AC97BusMasterRegs; + +struct AC97LinkState { + PCIDevice dev; + QEMUSoundCard card; + uint32_t glob_cnt; + uint32_t glob_sta; + uint32_t cas; + uint32_t last_samp; + AC97BusMasterRegs bm_regs[3]; + uint8_t mixer_data[256]; + SWVoiceIn *voice_pi; + SWVoiceOut *voice_po; + SWVoiceIn *voice_mc; + int invalid_freq[3]; + uint8_t silence[128]; + int bup_flag; + MemoryRegion io_nam; + MemoryRegion io_nabm; +}; + +enum { + BUP_SET = 1, + BUP_LAST = 2 +}; + +#ifdef DEBUG_AC97 +#define dolog(...) AUD_log ("ac97", __VA_ARGS__) +#else +#define dolog(...) +#endif + +#define MKREGS(prefix, start) \ +enum { \ + prefix ## _BDBAR = start, \ + prefix ## _CIV = start + 4, \ + prefix ## _LVI = start + 5, \ + prefix ## _SR = start + 6, \ + prefix ## _PICB = start + 8, \ + prefix ## _PIV = start + 10, \ + prefix ## _CR = start + 11 \ +} + +enum { + PI_INDEX = 0, + PO_INDEX, + MC_INDEX, + LAST_INDEX +}; + +MKREGS (PI, PI_INDEX * 16); +MKREGS (PO, PO_INDEX * 16); +MKREGS (MC, MC_INDEX * 16); + +enum { + GLOB_CNT = 0x2c, + GLOB_STA = 0x30, + CAS = 0x34 +}; + +#define GET_BM(index) (((index) >> 4) & 3) + +static void po_callback (void *opaque, int free); +static void pi_callback (void *opaque, int avail); +static void mc_callback (void *opaque, int avail); + +static void warm_reset (AC97LinkState *s) +{ + (void) s; +} + +static void cold_reset (AC97LinkState * s) +{ + (void) s; +} + +static void fetch_bd (AC97LinkState *s, AC97BusMasterRegs *r) +{ + uint8_t b[8]; + + pci_dma_read (&s->dev, r->bdbar + r->civ * 8, b, 8); + r->bd_valid = 1; + r->bd.addr = le32_to_cpu (*(uint32_t *) &b[0]) & ~3; + r->bd.ctl_len = le32_to_cpu (*(uint32_t *) &b[4]); + r->picb = r->bd.ctl_len & 0xffff; + dolog ("bd %2d addr=%#x ctl=%#06x len=%#x(%d bytes)\n", + r->civ, r->bd.addr, r->bd.ctl_len >> 16, + r->bd.ctl_len & 0xffff, + (r->bd.ctl_len & 0xffff) << 1); +} + +static void update_sr (AC97LinkState *s, AC97BusMasterRegs *r, uint32_t new_sr) +{ + int event = 0; + int level = 0; + uint32_t new_mask = new_sr & SR_INT_MASK; + uint32_t old_mask = r->sr & SR_INT_MASK; + uint32_t masks[] = {GS_PIINT, GS_POINT, GS_MINT}; + + if (new_mask ^ old_mask) { + /** @todo is IRQ deasserted when only one of status bits is cleared? */ + if (!new_mask) { + event = 1; + level = 0; + } + else { + if ((new_mask & SR_LVBCI) && (r->cr & CR_LVBIE)) { + event = 1; + level = 1; + } + if ((new_mask & SR_BCIS) && (r->cr & CR_IOCE)) { + event = 1; + level = 1; + } + } + } + + r->sr = new_sr; + + dolog ("IOC%d LVB%d sr=%#x event=%d level=%d\n", + r->sr & SR_BCIS, r->sr & SR_LVBCI, + r->sr, + event, level); + + if (!event) + return; + + if (level) { + s->glob_sta |= masks[r - s->bm_regs]; + dolog ("set irq level=1\n"); + pci_irq_assert(&s->dev); + } + else { + s->glob_sta &= ~masks[r - s->bm_regs]; + dolog ("set irq level=0\n"); + pci_irq_deassert(&s->dev); + } +} + +static void voice_set_active (AC97LinkState *s, int bm_index, int on) +{ + switch (bm_index) { + case PI_INDEX: + AUD_set_active_in (s->voice_pi, on); + break; + + case PO_INDEX: + AUD_set_active_out (s->voice_po, on); + break; + + case MC_INDEX: + AUD_set_active_in (s->voice_mc, on); + break; + + default: + AUD_log ("ac97", "invalid bm_index(%d) in voice_set_active", bm_index); + break; + } +} + +static void reset_bm_regs (AC97LinkState *s, AC97BusMasterRegs *r) +{ + dolog ("reset_bm_regs\n"); + r->bdbar = 0; + r->civ = 0; + r->lvi = 0; + /** todo do we need to do that? */ + update_sr (s, r, SR_DCH); + r->picb = 0; + r->piv = 0; + r->cr = r->cr & CR_DONT_CLEAR_MASK; + r->bd_valid = 0; + + voice_set_active (s, r - s->bm_regs, 0); + memset (s->silence, 0, sizeof (s->silence)); +} + +static void mixer_store (AC97LinkState *s, uint32_t i, uint16_t v) +{ + if (i + 2 > sizeof (s->mixer_data)) { + dolog ("mixer_store: index %d out of bounds %zd\n", + i, sizeof (s->mixer_data)); + return; + } + + s->mixer_data[i + 0] = v & 0xff; + s->mixer_data[i + 1] = v >> 8; +} + +static uint16_t mixer_load (AC97LinkState *s, uint32_t i) +{ + uint16_t val = 0xffff; + + if (i + 2 > sizeof (s->mixer_data)) { + dolog ("mixer_load: index %d out of bounds %zd\n", + i, sizeof (s->mixer_data)); + } + else { + val = s->mixer_data[i + 0] | (s->mixer_data[i + 1] << 8); + } + + return val; +} + +static void open_voice (AC97LinkState *s, int index, int freq) +{ + struct audsettings as; + + as.freq = freq; + as.nchannels = 2; + as.fmt = AUDIO_FORMAT_S16; + as.endianness = 0; + + if (freq > 0) { + s->invalid_freq[index] = 0; + switch (index) { + case PI_INDEX: + s->voice_pi = AUD_open_in ( + &s->card, + s->voice_pi, + "ac97.pi", + s, + pi_callback, + &as + ); + break; + + case PO_INDEX: + s->voice_po = AUD_open_out ( + &s->card, + s->voice_po, + "ac97.po", + s, + po_callback, + &as + ); + break; + + case MC_INDEX: + s->voice_mc = AUD_open_in ( + &s->card, + s->voice_mc, + "ac97.mc", + s, + mc_callback, + &as + ); + break; + } + } + else { + s->invalid_freq[index] = freq; + switch (index) { + case PI_INDEX: + AUD_close_in (&s->card, s->voice_pi); + s->voice_pi = NULL; + break; + + case PO_INDEX: + AUD_close_out (&s->card, s->voice_po); + s->voice_po = NULL; + break; + + case MC_INDEX: + AUD_close_in (&s->card, s->voice_mc); + s->voice_mc = NULL; + break; + } + } +} + +static void reset_voices (AC97LinkState *s, uint8_t active[LAST_INDEX]) +{ + uint16_t freq; + + freq = mixer_load (s, AC97_PCM_LR_ADC_Rate); + open_voice (s, PI_INDEX, freq); + AUD_set_active_in (s->voice_pi, active[PI_INDEX]); + + freq = mixer_load (s, AC97_PCM_Front_DAC_Rate); + open_voice (s, PO_INDEX, freq); + AUD_set_active_out (s->voice_po, active[PO_INDEX]); + + freq = mixer_load (s, AC97_MIC_ADC_Rate); + open_voice (s, MC_INDEX, freq); + AUD_set_active_in (s->voice_mc, active[MC_INDEX]); +} + +static void get_volume (uint16_t vol, uint16_t mask, int inverse, + int *mute, uint8_t *lvol, uint8_t *rvol) +{ + *mute = (vol >> MUTE_SHIFT) & 1; + *rvol = (255 * (vol & mask)) / mask; + *lvol = (255 * ((vol >> 8) & mask)) / mask; + + if (inverse) { + *rvol = 255 - *rvol; + *lvol = 255 - *lvol; + } +} + +static void update_combined_volume_out (AC97LinkState *s) +{ + uint8_t lvol, rvol, plvol, prvol; + int mute, pmute; + + get_volume (mixer_load (s, AC97_Master_Volume_Mute), 0x3f, 1, + &mute, &lvol, &rvol); + get_volume (mixer_load (s, AC97_PCM_Out_Volume_Mute), 0x1f, 1, + &pmute, &plvol, &prvol); + + mute = mute | pmute; + lvol = (lvol * plvol) / 255; + rvol = (rvol * prvol) / 255; + + AUD_set_volume_out (s->voice_po, mute, lvol, rvol); +} + +static void update_volume_in (AC97LinkState *s) +{ + uint8_t lvol, rvol; + int mute; + + get_volume (mixer_load (s, AC97_Record_Gain_Mute), 0x0f, 0, + &mute, &lvol, &rvol); + + AUD_set_volume_in (s->voice_pi, mute, lvol, rvol); +} + +static void set_volume (AC97LinkState *s, int index, uint32_t val) +{ + switch (index) { + case AC97_Master_Volume_Mute: + val &= 0xbf3f; + mixer_store (s, index, val); + update_combined_volume_out (s); + break; + case AC97_PCM_Out_Volume_Mute: + val &= 0x9f1f; + mixer_store (s, index, val); + update_combined_volume_out (s); + break; + case AC97_Record_Gain_Mute: + val &= 0x8f0f; + mixer_store (s, index, val); + update_volume_in (s); + break; + } +} + +static void record_select (AC97LinkState *s, uint32_t val) +{ + uint8_t rs = val & REC_MASK; + uint8_t ls = (val >> 8) & REC_MASK; + mixer_store (s, AC97_Record_Select, rs | (ls << 8)); +} + +static void mixer_reset (AC97LinkState *s) +{ + uint8_t active[LAST_INDEX]; + + dolog ("mixer_reset\n"); + memset (s->mixer_data, 0, sizeof (s->mixer_data)); + memset (active, 0, sizeof (active)); + mixer_store (s, AC97_Reset , 0x0000); /* 6940 */ + mixer_store (s, AC97_Headphone_Volume_Mute , 0x0000); + mixer_store (s, AC97_Master_Volume_Mono_Mute , 0x0000); + mixer_store (s, AC97_Master_Tone_RL, 0x0000); + mixer_store (s, AC97_PC_BEEP_Volume_Mute , 0x0000); + mixer_store (s, AC97_Phone_Volume_Mute , 0x0000); + mixer_store (s, AC97_Mic_Volume_Mute , 0x0000); + mixer_store (s, AC97_Line_In_Volume_Mute , 0x0000); + mixer_store (s, AC97_CD_Volume_Mute , 0x0000); + mixer_store (s, AC97_Video_Volume_Mute , 0x0000); + mixer_store (s, AC97_Aux_Volume_Mute , 0x0000); + mixer_store (s, AC97_Record_Gain_Mic_Mute , 0x0000); + mixer_store (s, AC97_General_Purpose , 0x0000); + mixer_store (s, AC97_3D_Control , 0x0000); + mixer_store (s, AC97_Powerdown_Ctrl_Stat , 0x000f); + + /* + * Sigmatel 9700 (STAC9700) + */ + mixer_store (s, AC97_Vendor_ID1 , 0x8384); + mixer_store (s, AC97_Vendor_ID2 , 0x7600); /* 7608 */ + + mixer_store (s, AC97_Extended_Audio_ID , 0x0809); + mixer_store (s, AC97_Extended_Audio_Ctrl_Stat, 0x0009); + mixer_store (s, AC97_PCM_Front_DAC_Rate , 0xbb80); + mixer_store (s, AC97_PCM_Surround_DAC_Rate , 0xbb80); + mixer_store (s, AC97_PCM_LFE_DAC_Rate , 0xbb80); + mixer_store (s, AC97_PCM_LR_ADC_Rate , 0xbb80); + mixer_store (s, AC97_MIC_ADC_Rate , 0xbb80); + + record_select (s, 0); + set_volume (s, AC97_Master_Volume_Mute, 0x8000); + set_volume (s, AC97_PCM_Out_Volume_Mute, 0x8808); + set_volume (s, AC97_Record_Gain_Mute, 0x8808); + + reset_voices (s, active); +} + +/** + * Native audio mixer + * I/O Reads + */ +static uint32_t nam_readb (void *opaque, uint32_t addr) +{ + AC97LinkState *s = opaque; + dolog ("U nam readb %#x\n", addr); + s->cas = 0; + return ~0U; +} + +static uint32_t nam_readw (void *opaque, uint32_t addr) +{ + AC97LinkState *s = opaque; + uint32_t index = addr; + s->cas = 0; + return mixer_load(s, index); +} + +static uint32_t nam_readl (void *opaque, uint32_t addr) +{ + AC97LinkState *s = opaque; + dolog ("U nam readl %#x\n", addr); + s->cas = 0; + return ~0U; +} + +/** + * Native audio mixer + * I/O Writes + */ +static void nam_writeb (void *opaque, uint32_t addr, uint32_t val) +{ + AC97LinkState *s = opaque; + dolog ("U nam writeb %#x <- %#x\n", addr, val); + s->cas = 0; +} + +static void nam_writew (void *opaque, uint32_t addr, uint32_t val) +{ + AC97LinkState *s = opaque; + uint32_t index = addr; + s->cas = 0; + switch (index) { + case AC97_Reset: + mixer_reset (s); + break; + case AC97_Powerdown_Ctrl_Stat: + val &= ~0x800f; + val |= mixer_load (s, index) & 0xf; + mixer_store (s, index, val); + break; + case AC97_PCM_Out_Volume_Mute: + case AC97_Master_Volume_Mute: + case AC97_Record_Gain_Mute: + set_volume (s, index, val); + break; + case AC97_Record_Select: + record_select (s, val); + break; + case AC97_Vendor_ID1: + case AC97_Vendor_ID2: + dolog ("Attempt to write vendor ID to %#x\n", val); + break; + case AC97_Extended_Audio_ID: + dolog ("Attempt to write extended audio ID to %#x\n", val); + break; + case AC97_Extended_Audio_Ctrl_Stat: + if (!(val & EACS_VRA)) { + mixer_store (s, AC97_PCM_Front_DAC_Rate, 0xbb80); + mixer_store (s, AC97_PCM_LR_ADC_Rate, 0xbb80); + open_voice (s, PI_INDEX, 48000); + open_voice (s, PO_INDEX, 48000); + } + if (!(val & EACS_VRM)) { + mixer_store (s, AC97_MIC_ADC_Rate, 0xbb80); + open_voice (s, MC_INDEX, 48000); + } + dolog ("Setting extended audio control to %#x\n", val); + mixer_store (s, AC97_Extended_Audio_Ctrl_Stat, val); + break; + case AC97_PCM_Front_DAC_Rate: + if (mixer_load (s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRA) { + mixer_store (s, index, val); + dolog ("Set front DAC rate to %d\n", val); + open_voice (s, PO_INDEX, val); + } + else { + dolog ("Attempt to set front DAC rate to %d, " + "but VRA is not set\n", + val); + } + break; + case AC97_MIC_ADC_Rate: + if (mixer_load (s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRM) { + mixer_store (s, index, val); + dolog ("Set MIC ADC rate to %d\n", val); + open_voice (s, MC_INDEX, val); + } + else { + dolog ("Attempt to set MIC ADC rate to %d, " + "but VRM is not set\n", + val); + } + break; + case AC97_PCM_LR_ADC_Rate: + if (mixer_load (s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRA) { + mixer_store (s, index, val); + dolog ("Set front LR ADC rate to %d\n", val); + open_voice (s, PI_INDEX, val); + } + else { + dolog ("Attempt to set LR ADC rate to %d, but VRA is not set\n", + val); + } + break; + case AC97_Headphone_Volume_Mute: + case AC97_Master_Volume_Mono_Mute: + case AC97_Master_Tone_RL: + case AC97_PC_BEEP_Volume_Mute: + case AC97_Phone_Volume_Mute: + case AC97_Mic_Volume_Mute: + case AC97_Line_In_Volume_Mute: + case AC97_CD_Volume_Mute: + case AC97_Video_Volume_Mute: + case AC97_Aux_Volume_Mute: + case AC97_Record_Gain_Mic_Mute: + case AC97_General_Purpose: + case AC97_3D_Control: + case AC97_Sigmatel_Analog: + case AC97_Sigmatel_Dac2Invert: + /* None of the features in these regs are emulated, so they are RO */ + break; + default: + dolog ("U nam writew %#x <- %#x\n", addr, val); + mixer_store (s, index, val); + break; + } +} + +static void nam_writel (void *opaque, uint32_t addr, uint32_t val) +{ + AC97LinkState *s = opaque; + dolog ("U nam writel %#x <- %#x\n", addr, val); + s->cas = 0; +} + +/** + * Native audio bus master + * I/O Reads + */ +static uint32_t nabm_readb (void *opaque, uint32_t addr) +{ + AC97LinkState *s = opaque; + AC97BusMasterRegs *r = NULL; + uint32_t index = addr; + uint32_t val = ~0U; + + switch (index) { + case CAS: + dolog ("CAS %d\n", s->cas); + val = s->cas; + s->cas = 1; + break; + case PI_CIV: + case PO_CIV: + case MC_CIV: + r = &s->bm_regs[GET_BM (index)]; + val = r->civ; + dolog ("CIV[%d] -> %#x\n", GET_BM (index), val); + break; + case PI_LVI: + case PO_LVI: + case MC_LVI: + r = &s->bm_regs[GET_BM (index)]; + val = r->lvi; + dolog ("LVI[%d] -> %#x\n", GET_BM (index), val); + break; + case PI_PIV: + case PO_PIV: + case MC_PIV: + r = &s->bm_regs[GET_BM (index)]; + val = r->piv; + dolog ("PIV[%d] -> %#x\n", GET_BM (index), val); + break; + case PI_CR: + case PO_CR: + case MC_CR: + r = &s->bm_regs[GET_BM (index)]; + val = r->cr; + dolog ("CR[%d] -> %#x\n", GET_BM (index), val); + break; + case PI_SR: + case PO_SR: + case MC_SR: + r = &s->bm_regs[GET_BM (index)]; + val = r->sr & 0xff; + dolog ("SRb[%d] -> %#x\n", GET_BM (index), val); + break; + default: + dolog ("U nabm readb %#x -> %#x\n", addr, val); + break; + } + return val; +} + +static uint32_t nabm_readw (void *opaque, uint32_t addr) +{ + AC97LinkState *s = opaque; + AC97BusMasterRegs *r = NULL; + uint32_t index = addr; + uint32_t val = ~0U; + + switch (index) { + case PI_SR: + case PO_SR: + case MC_SR: + r = &s->bm_regs[GET_BM (index)]; + val = r->sr; + dolog ("SR[%d] -> %#x\n", GET_BM (index), val); + break; + case PI_PICB: + case PO_PICB: + case MC_PICB: + r = &s->bm_regs[GET_BM (index)]; + val = r->picb; + dolog ("PICB[%d] -> %#x\n", GET_BM (index), val); + break; + default: + dolog ("U nabm readw %#x -> %#x\n", addr, val); + break; + } + return val; +} + +static uint32_t nabm_readl (void *opaque, uint32_t addr) +{ + AC97LinkState *s = opaque; + AC97BusMasterRegs *r = NULL; + uint32_t index = addr; + uint32_t val = ~0U; + + switch (index) { + case PI_BDBAR: + case PO_BDBAR: + case MC_BDBAR: + r = &s->bm_regs[GET_BM (index)]; + val = r->bdbar; + dolog ("BMADDR[%d] -> %#x\n", GET_BM (index), val); + break; + case PI_CIV: + case PO_CIV: + case MC_CIV: + r = &s->bm_regs[GET_BM (index)]; + val = r->civ | (r->lvi << 8) | (r->sr << 16); + dolog ("CIV LVI SR[%d] -> %#x, %#x, %#x\n", GET_BM (index), + r->civ, r->lvi, r->sr); + break; + case PI_PICB: + case PO_PICB: + case MC_PICB: + r = &s->bm_regs[GET_BM (index)]; + val = r->picb | (r->piv << 16) | (r->cr << 24); + dolog ("PICB PIV CR[%d] -> %#x %#x %#x %#x\n", GET_BM (index), + val, r->picb, r->piv, r->cr); + break; + case GLOB_CNT: + val = s->glob_cnt; + dolog ("glob_cnt -> %#x\n", val); + break; + case GLOB_STA: + val = s->glob_sta | GS_S0CR; + dolog ("glob_sta -> %#x\n", val); + break; + default: + dolog ("U nabm readl %#x -> %#x\n", addr, val); + break; + } + return val; +} + +/** + * Native audio bus master + * I/O Writes + */ +static void nabm_writeb (void *opaque, uint32_t addr, uint32_t val) +{ + AC97LinkState *s = opaque; + AC97BusMasterRegs *r = NULL; + uint32_t index = addr; + switch (index) { + case PI_LVI: + case PO_LVI: + case MC_LVI: + r = &s->bm_regs[GET_BM (index)]; + if ((r->cr & CR_RPBM) && (r->sr & SR_DCH)) { + r->sr &= ~(SR_DCH | SR_CELV); + r->civ = r->piv; + r->piv = (r->piv + 1) % 32; + fetch_bd (s, r); + } + r->lvi = val % 32; + dolog ("LVI[%d] <- %#x\n", GET_BM (index), val); + break; + case PI_CR: + case PO_CR: + case MC_CR: + r = &s->bm_regs[GET_BM (index)]; + if (val & CR_RR) { + reset_bm_regs (s, r); + } + else { + r->cr = val & CR_VALID_MASK; + if (!(r->cr & CR_RPBM)) { + voice_set_active (s, r - s->bm_regs, 0); + r->sr |= SR_DCH; + } + else { + r->civ = r->piv; + r->piv = (r->piv + 1) % 32; + fetch_bd (s, r); + r->sr &= ~SR_DCH; + voice_set_active (s, r - s->bm_regs, 1); + } + } + dolog ("CR[%d] <- %#x (cr %#x)\n", GET_BM (index), val, r->cr); + break; + case PI_SR: + case PO_SR: + case MC_SR: + r = &s->bm_regs[GET_BM (index)]; + r->sr |= val & ~(SR_RO_MASK | SR_WCLEAR_MASK); + update_sr (s, r, r->sr & ~(val & SR_WCLEAR_MASK)); + dolog ("SR[%d] <- %#x (sr %#x)\n", GET_BM (index), val, r->sr); + break; + default: + dolog ("U nabm writeb %#x <- %#x\n", addr, val); + break; + } +} + +static void nabm_writew (void *opaque, uint32_t addr, uint32_t val) +{ + AC97LinkState *s = opaque; + AC97BusMasterRegs *r = NULL; + uint32_t index = addr; + switch (index) { + case PI_SR: + case PO_SR: + case MC_SR: + r = &s->bm_regs[GET_BM (index)]; + r->sr |= val & ~(SR_RO_MASK | SR_WCLEAR_MASK); + update_sr (s, r, r->sr & ~(val & SR_WCLEAR_MASK)); + dolog ("SR[%d] <- %#x (sr %#x)\n", GET_BM (index), val, r->sr); + break; + default: + dolog ("U nabm writew %#x <- %#x\n", addr, val); + break; + } +} + +static void nabm_writel (void *opaque, uint32_t addr, uint32_t val) +{ + AC97LinkState *s = opaque; + AC97BusMasterRegs *r = NULL; + uint32_t index = addr; + switch (index) { + case PI_BDBAR: + case PO_BDBAR: + case MC_BDBAR: + r = &s->bm_regs[GET_BM (index)]; + r->bdbar = val & ~3; + dolog ("BDBAR[%d] <- %#x (bdbar %#x)\n", + GET_BM (index), val, r->bdbar); + break; + case GLOB_CNT: + if (val & GC_WR) + warm_reset (s); + if (val & GC_CR) + cold_reset (s); + if (!(val & (GC_WR | GC_CR))) + s->glob_cnt = val & GC_VALID_MASK; + dolog ("glob_cnt <- %#x (glob_cnt %#x)\n", val, s->glob_cnt); + break; + case GLOB_STA: + s->glob_sta &= ~(val & GS_WCLEAR_MASK); + s->glob_sta |= (val & ~(GS_WCLEAR_MASK | GS_RO_MASK)) & GS_VALID_MASK; + dolog ("glob_sta <- %#x (glob_sta %#x)\n", val, s->glob_sta); + break; + default: + dolog ("U nabm writel %#x <- %#x\n", addr, val); + break; + } +} + +static int write_audio (AC97LinkState *s, AC97BusMasterRegs *r, + int max, int *stop) +{ + uint8_t tmpbuf[4096]; + uint32_t addr = r->bd.addr; + uint32_t temp = r->picb << 1; + uint32_t written = 0; + int to_copy = 0; + temp = MIN (temp, max); + + if (!temp) { + *stop = 1; + return 0; + } + + while (temp) { + int copied; + to_copy = MIN (temp, sizeof (tmpbuf)); + pci_dma_read (&s->dev, addr, tmpbuf, to_copy); + copied = AUD_write (s->voice_po, tmpbuf, to_copy); + dolog ("write_audio max=%x to_copy=%x copied=%x\n", + max, to_copy, copied); + if (!copied) { + *stop = 1; + break; + } + temp -= copied; + addr += copied; + written += copied; + } + + if (!temp) { + if (to_copy < 4) { + dolog ("whoops\n"); + s->last_samp = 0; + } + else { + s->last_samp = *(uint32_t *) &tmpbuf[to_copy - 4]; + } + } + + r->bd.addr = addr; + return written; +} + +static void write_bup (AC97LinkState *s, int elapsed) +{ + dolog ("write_bup\n"); + if (!(s->bup_flag & BUP_SET)) { + if (s->bup_flag & BUP_LAST) { + int i; + uint8_t *p = s->silence; + for (i = 0; i < sizeof (s->silence) / 4; i++, p += 4) { + *(uint32_t *) p = s->last_samp; + } + } + else { + memset (s->silence, 0, sizeof (s->silence)); + } + s->bup_flag |= BUP_SET; + } + + while (elapsed) { + int temp = MIN (elapsed, sizeof (s->silence)); + while (temp) { + int copied = AUD_write (s->voice_po, s->silence, temp); + if (!copied) + return; + temp -= copied; + elapsed -= copied; + } + } +} + +static int read_audio (AC97LinkState *s, AC97BusMasterRegs *r, + int max, int *stop) +{ + uint8_t tmpbuf[4096]; + uint32_t addr = r->bd.addr; + uint32_t temp = r->picb << 1; + uint32_t nread = 0; + int to_copy = 0; + SWVoiceIn *voice = (r - s->bm_regs) == MC_INDEX ? s->voice_mc : s->voice_pi; + + temp = MIN (temp, max); + + if (!temp) { + *stop = 1; + return 0; + } + + while (temp) { + int acquired; + to_copy = MIN (temp, sizeof (tmpbuf)); + acquired = AUD_read (voice, tmpbuf, to_copy); + if (!acquired) { + *stop = 1; + break; + } + pci_dma_write (&s->dev, addr, tmpbuf, acquired); + temp -= acquired; + addr += acquired; + nread += acquired; + } + + r->bd.addr = addr; + return nread; +} + +static void transfer_audio (AC97LinkState *s, int index, int elapsed) +{ + AC97BusMasterRegs *r = &s->bm_regs[index]; + int stop = 0; + + if (s->invalid_freq[index]) { + AUD_log ("ac97", "attempt to use voice %d with invalid frequency %d\n", + index, s->invalid_freq[index]); + return; + } + + if (r->sr & SR_DCH) { + if (r->cr & CR_RPBM) { + switch (index) { + case PO_INDEX: + write_bup (s, elapsed); + break; + } + } + return; + } + + while ((elapsed >> 1) && !stop) { + int temp; + + if (!r->bd_valid) { + dolog ("invalid bd\n"); + fetch_bd (s, r); + } + + if (!r->picb) { + dolog ("fresh bd %d is empty %#x %#x\n", + r->civ, r->bd.addr, r->bd.ctl_len); + if (r->civ == r->lvi) { + r->sr |= SR_DCH; /* CELV? */ + s->bup_flag = 0; + break; + } + r->sr &= ~SR_CELV; + r->civ = r->piv; + r->piv = (r->piv + 1) % 32; + fetch_bd (s, r); + return; + } + + switch (index) { + case PO_INDEX: + temp = write_audio (s, r, elapsed, &stop); + elapsed -= temp; + r->picb -= (temp >> 1); + break; + + case PI_INDEX: + case MC_INDEX: + temp = read_audio (s, r, elapsed, &stop); + elapsed -= temp; + r->picb -= (temp >> 1); + break; + } + + if (!r->picb) { + uint32_t new_sr = r->sr & ~SR_CELV; + + if (r->bd.ctl_len & BD_IOC) { + new_sr |= SR_BCIS; + } + + if (r->civ == r->lvi) { + dolog ("Underrun civ (%d) == lvi (%d)\n", r->civ, r->lvi); + + new_sr |= SR_LVBCI | SR_DCH | SR_CELV; + stop = 1; + s->bup_flag = (r->bd.ctl_len & BD_BUP) ? BUP_LAST : 0; + } + else { + r->civ = r->piv; + r->piv = (r->piv + 1) % 32; + fetch_bd (s, r); + } + + update_sr (s, r, new_sr); + } + } +} + +static void pi_callback (void *opaque, int avail) +{ + transfer_audio (opaque, PI_INDEX, avail); +} + +static void mc_callback (void *opaque, int avail) +{ + transfer_audio (opaque, MC_INDEX, avail); +} + +static void po_callback (void *opaque, int free) +{ + transfer_audio (opaque, PO_INDEX, free); +} + +static const VMStateDescription vmstate_ac97_bm_regs = { + .name = "ac97_bm_regs", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32 (bdbar, AC97BusMasterRegs), + VMSTATE_UINT8 (civ, AC97BusMasterRegs), + VMSTATE_UINT8 (lvi, AC97BusMasterRegs), + VMSTATE_UINT16 (sr, AC97BusMasterRegs), + VMSTATE_UINT16 (picb, AC97BusMasterRegs), + VMSTATE_UINT8 (piv, AC97BusMasterRegs), + VMSTATE_UINT8 (cr, AC97BusMasterRegs), + VMSTATE_UINT32 (bd_valid, AC97BusMasterRegs), + VMSTATE_UINT32 (bd.addr, AC97BusMasterRegs), + VMSTATE_UINT32 (bd.ctl_len, AC97BusMasterRegs), + VMSTATE_END_OF_LIST () + } +}; + +static int ac97_post_load (void *opaque, int version_id) +{ + uint8_t active[LAST_INDEX]; + AC97LinkState *s = opaque; + + record_select (s, mixer_load (s, AC97_Record_Select)); + set_volume (s, AC97_Master_Volume_Mute, + mixer_load (s, AC97_Master_Volume_Mute)); + set_volume (s, AC97_PCM_Out_Volume_Mute, + mixer_load (s, AC97_PCM_Out_Volume_Mute)); + set_volume (s, AC97_Record_Gain_Mute, + mixer_load (s, AC97_Record_Gain_Mute)); + + active[PI_INDEX] = !!(s->bm_regs[PI_INDEX].cr & CR_RPBM); + active[PO_INDEX] = !!(s->bm_regs[PO_INDEX].cr & CR_RPBM); + active[MC_INDEX] = !!(s->bm_regs[MC_INDEX].cr & CR_RPBM); + reset_voices (s, active); + + s->bup_flag = 0; + s->last_samp = 0; + return 0; +} + +static bool is_version_2 (void *opaque, int version_id) +{ + return version_id == 2; +} + +static const VMStateDescription vmstate_ac97 = { + .name = "ac97", + .version_id = 3, + .minimum_version_id = 2, + .post_load = ac97_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE (dev, AC97LinkState), + VMSTATE_UINT32 (glob_cnt, AC97LinkState), + VMSTATE_UINT32 (glob_sta, AC97LinkState), + VMSTATE_UINT32 (cas, AC97LinkState), + VMSTATE_STRUCT_ARRAY (bm_regs, AC97LinkState, 3, 1, + vmstate_ac97_bm_regs, AC97BusMasterRegs), + VMSTATE_BUFFER (mixer_data, AC97LinkState), + VMSTATE_UNUSED_TEST (is_version_2, 3), + VMSTATE_END_OF_LIST () + } +}; + +static uint64_t nam_read(void *opaque, hwaddr addr, unsigned size) +{ + if ((addr / size) > 256) { + return -1; + } + + switch (size) { + case 1: + return nam_readb(opaque, addr); + case 2: + return nam_readw(opaque, addr); + case 4: + return nam_readl(opaque, addr); + default: + return -1; + } +} + +static void nam_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + if ((addr / size) > 256) { + return; + } + + switch (size) { + case 1: + nam_writeb(opaque, addr, val); + break; + case 2: + nam_writew(opaque, addr, val); + break; + case 4: + nam_writel(opaque, addr, val); + break; + } +} + +static const MemoryRegionOps ac97_io_nam_ops = { + .read = nam_read, + .write = nam_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t nabm_read(void *opaque, hwaddr addr, unsigned size) +{ + if ((addr / size) > 64) { + return -1; + } + + switch (size) { + case 1: + return nabm_readb(opaque, addr); + case 2: + return nabm_readw(opaque, addr); + case 4: + return nabm_readl(opaque, addr); + default: + return -1; + } +} + +static void nabm_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + if ((addr / size) > 64) { + return; + } + + switch (size) { + case 1: + nabm_writeb(opaque, addr, val); + break; + case 2: + nabm_writew(opaque, addr, val); + break; + case 4: + nabm_writel(opaque, addr, val); + break; + } +} + + +static const MemoryRegionOps ac97_io_nabm_ops = { + .read = nabm_read, + .write = nabm_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ac97_on_reset (DeviceState *dev) +{ + AC97LinkState *s = container_of(dev, AC97LinkState, dev.qdev); + + reset_bm_regs (s, &s->bm_regs[0]); + reset_bm_regs (s, &s->bm_regs[1]); + reset_bm_regs (s, &s->bm_regs[2]); + + /* + * Reset the mixer too. The Windows XP driver seems to rely on + * this. At least it wants to read the vendor id before it resets + * the codec manually. + */ + mixer_reset (s); +} + +static void ac97_realize(PCIDevice *dev, Error **errp) +{ + AC97LinkState *s = AC97(dev); + uint8_t *c = s->dev.config; + + /* TODO: no need to override */ + c[PCI_COMMAND] = 0x00; /* pcicmd pci command rw, ro */ + c[PCI_COMMAND + 1] = 0x00; + + /* TODO: */ + c[PCI_STATUS] = PCI_STATUS_FAST_BACK; /* pcists pci status rwc, ro */ + c[PCI_STATUS + 1] = PCI_STATUS_DEVSEL_MEDIUM >> 8; + + c[PCI_CLASS_PROG] = 0x00; /* pi programming interface ro */ + + /* TODO set when bar is registered. no need to override. */ + /* nabmar native audio mixer base address rw */ + c[PCI_BASE_ADDRESS_0] = PCI_BASE_ADDRESS_SPACE_IO; + c[PCI_BASE_ADDRESS_0 + 1] = 0x00; + c[PCI_BASE_ADDRESS_0 + 2] = 0x00; + c[PCI_BASE_ADDRESS_0 + 3] = 0x00; + + /* TODO set when bar is registered. no need to override. */ + /* nabmbar native audio bus mastering base address rw */ + c[PCI_BASE_ADDRESS_0 + 4] = PCI_BASE_ADDRESS_SPACE_IO; + c[PCI_BASE_ADDRESS_0 + 5] = 0x00; + c[PCI_BASE_ADDRESS_0 + 6] = 0x00; + c[PCI_BASE_ADDRESS_0 + 7] = 0x00; + + c[PCI_INTERRUPT_LINE] = 0x00; /* intr_ln interrupt line rw */ + c[PCI_INTERRUPT_PIN] = 0x01; /* intr_pn interrupt pin ro */ + + memory_region_init_io (&s->io_nam, OBJECT(s), &ac97_io_nam_ops, s, + "ac97-nam", 1024); + memory_region_init_io (&s->io_nabm, OBJECT(s), &ac97_io_nabm_ops, s, + "ac97-nabm", 256); + pci_register_bar (&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_nam); + pci_register_bar (&s->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->io_nabm); + AUD_register_card ("ac97", &s->card); + ac97_on_reset(DEVICE(s)); +} + +static void ac97_exit(PCIDevice *dev) +{ + AC97LinkState *s = AC97(dev); + + AUD_close_in(&s->card, s->voice_pi); + AUD_close_out(&s->card, s->voice_po); + AUD_close_in(&s->card, s->voice_mc); + AUD_remove_card(&s->card); +} + +static Property ac97_properties[] = { + DEFINE_AUDIO_PROPERTIES(AC97LinkState, card), + DEFINE_PROP_END_OF_LIST (), +}; + +static void ac97_class_init (ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS (klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS (klass); + + k->realize = ac97_realize; + k->exit = ac97_exit; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82801AA_5; + k->revision = 0x01; + k->class_id = PCI_CLASS_MULTIMEDIA_AUDIO; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->desc = "Intel 82801AA AC97 Audio"; + dc->vmsd = &vmstate_ac97; + device_class_set_props(dc, ac97_properties); + dc->reset = ac97_on_reset; +} + +static const TypeInfo ac97_info = { + .name = TYPE_AC97, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof (AC97LinkState), + .class_init = ac97_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void ac97_register_types (void) +{ + type_register_static (&ac97_info); + deprecated_register_soundhw("ac97", "Intel 82801AA AC97 Audio", + 0, TYPE_AC97); +} + +type_init (ac97_register_types) diff --git a/hw/audio/adlib.c b/hw/audio/adlib.c new file mode 100644 index 000000000..5f979b148 --- /dev/null +++ b/hw/audio/adlib.c @@ -0,0 +1,328 @@ +/* + * QEMU Proxy for OPL2/3 emulation by MAME team + * + * Copyright (c) 2004-2005 Vassili Karpov (malc) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/audio/soundhw.h" +#include "audio/audio.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +//#define DEBUG + +#define ADLIB_KILL_TIMERS 1 + +#define ADLIB_DESC "Yamaha YM3812 (OPL2)" + +#ifdef DEBUG +#include "qemu/timer.h" +#endif + +#define dolog(...) AUD_log ("adlib", __VA_ARGS__) +#ifdef DEBUG +#define ldebug(...) dolog (__VA_ARGS__) +#else +#define ldebug(...) +#endif + +#include "fmopl.h" +#define SHIFT 1 + +#define TYPE_ADLIB "adlib" +OBJECT_DECLARE_SIMPLE_TYPE(AdlibState, ADLIB) + +struct AdlibState { + ISADevice parent_obj; + + QEMUSoundCard card; + uint32_t freq; + uint32_t port; + int ticking[2]; + int enabled; + int active; + int bufpos; +#ifdef DEBUG + int64_t exp[2]; +#endif + int16_t *mixbuf; + uint64_t dexp[2]; + SWVoiceOut *voice; + int left, pos, samples; + QEMUAudioTimeStamp ats; + FM_OPL *opl; + PortioList port_list; +}; + +static void adlib_stop_opl_timer (AdlibState *s, size_t n) +{ + OPLTimerOver (s->opl, n); + s->ticking[n] = 0; +} + +static void adlib_kill_timers (AdlibState *s) +{ + size_t i; + + for (i = 0; i < 2; ++i) { + if (s->ticking[i]) { + uint64_t delta; + + delta = AUD_get_elapsed_usec_out (s->voice, &s->ats); + ldebug ( + "delta = %f dexp = %f expired => %d\n", + delta / 1000000.0, + s->dexp[i] / 1000000.0, + delta >= s->dexp[i] + ); + if (ADLIB_KILL_TIMERS || delta >= s->dexp[i]) { + adlib_stop_opl_timer (s, i); + AUD_init_time_stamp_out (s->voice, &s->ats); + } + } + } +} + +static void adlib_write(void *opaque, uint32_t nport, uint32_t val) +{ + AdlibState *s = opaque; + int a = nport & 3; + + s->active = 1; + AUD_set_active_out (s->voice, 1); + + adlib_kill_timers (s); + + OPLWrite (s->opl, a, val); +} + +static uint32_t adlib_read(void *opaque, uint32_t nport) +{ + AdlibState *s = opaque; + int a = nport & 3; + + adlib_kill_timers (s); + return OPLRead (s->opl, a); +} + +static void timer_handler (void *opaque, int c, double interval_Sec) +{ + AdlibState *s = opaque; + unsigned n = c & 1; +#ifdef DEBUG + double interval; + int64_t exp; +#endif + + if (interval_Sec == 0.0) { + s->ticking[n] = 0; + return; + } + + s->ticking[n] = 1; +#ifdef DEBUG + interval = NANOSECONDS_PER_SECOND * interval_Sec; + exp = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + interval; + s->exp[n] = exp; +#endif + + s->dexp[n] = interval_Sec * 1000000.0; + AUD_init_time_stamp_out (s->voice, &s->ats); +} + +static int write_audio (AdlibState *s, int samples) +{ + int net = 0; + int pos = s->pos; + + while (samples) { + int nbytes, wbytes, wsampl; + + nbytes = samples << SHIFT; + wbytes = AUD_write ( + s->voice, + s->mixbuf + (pos << (SHIFT - 1)), + nbytes + ); + + if (wbytes) { + wsampl = wbytes >> SHIFT; + + samples -= wsampl; + pos = (pos + wsampl) % s->samples; + + net += wsampl; + } + else { + break; + } + } + + return net; +} + +static void adlib_callback (void *opaque, int free) +{ + AdlibState *s = opaque; + int samples, to_play, written; + + samples = free >> SHIFT; + if (!(s->active && s->enabled) || !samples) { + return; + } + + to_play = MIN (s->left, samples); + while (to_play) { + written = write_audio (s, to_play); + + if (written) { + s->left -= written; + samples -= written; + to_play -= written; + s->pos = (s->pos + written) % s->samples; + } + else { + return; + } + } + + samples = MIN (samples, s->samples - s->pos); + if (!samples) { + return; + } + + YM3812UpdateOne (s->opl, s->mixbuf + s->pos, samples); + + while (samples) { + written = write_audio (s, samples); + + if (written) { + samples -= written; + s->pos = (s->pos + written) % s->samples; + } + else { + s->left = samples; + return; + } + } +} + +static void Adlib_fini (AdlibState *s) +{ + if (s->opl) { + OPLDestroy (s->opl); + s->opl = NULL; + } + + g_free(s->mixbuf); + + s->active = 0; + s->enabled = 0; + AUD_remove_card (&s->card); +} + +static MemoryRegionPortio adlib_portio_list[] = { + { 0, 4, 1, .read = adlib_read, .write = adlib_write, }, + { 0, 2, 1, .read = adlib_read, .write = adlib_write, }, + { 0x388, 4, 1, .read = adlib_read, .write = adlib_write, }, + PORTIO_END_OF_LIST(), +}; + +static void adlib_realizefn (DeviceState *dev, Error **errp) +{ + AdlibState *s = ADLIB(dev); + struct audsettings as; + + s->opl = OPLCreate (3579545, s->freq); + if (!s->opl) { + error_setg (errp, "OPLCreate %d failed", s->freq); + return; + } + else { + OPLSetTimerHandler(s->opl, timer_handler, s); + s->enabled = 1; + } + + as.freq = s->freq; + as.nchannels = SHIFT; + as.fmt = AUDIO_FORMAT_S16; + as.endianness = AUDIO_HOST_ENDIANNESS; + + AUD_register_card ("adlib", &s->card); + + s->voice = AUD_open_out ( + &s->card, + s->voice, + "adlib", + s, + adlib_callback, + &as + ); + if (!s->voice) { + Adlib_fini (s); + error_setg (errp, "Initializing audio voice failed"); + return; + } + + s->samples = AUD_get_buffer_size_out (s->voice) >> SHIFT; + s->mixbuf = g_malloc0 (s->samples << SHIFT); + + adlib_portio_list[0].offset = s->port; + adlib_portio_list[1].offset = s->port + 8; + portio_list_init (&s->port_list, OBJECT(s), adlib_portio_list, s, "adlib"); + portio_list_add (&s->port_list, isa_address_space_io(&s->parent_obj), 0); +} + +static Property adlib_properties[] = { + DEFINE_AUDIO_PROPERTIES(AdlibState, card), + DEFINE_PROP_UINT32 ("iobase", AdlibState, port, 0x220), + DEFINE_PROP_UINT32 ("freq", AdlibState, freq, 44100), + DEFINE_PROP_END_OF_LIST (), +}; + +static void adlib_class_initfn (ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS (klass); + + dc->realize = adlib_realizefn; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->desc = ADLIB_DESC; + device_class_set_props(dc, adlib_properties); +} + +static const TypeInfo adlib_info = { + .name = TYPE_ADLIB, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof (AdlibState), + .class_init = adlib_class_initfn, +}; + +static void adlib_register_types (void) +{ + type_register_static (&adlib_info); + deprecated_register_soundhw("adlib", ADLIB_DESC, 1, TYPE_ADLIB); +} + +type_init (adlib_register_types) diff --git a/hw/audio/cs4231.c b/hw/audio/cs4231.c new file mode 100644 index 000000000..aefc3edea --- /dev/null +++ b/hw/audio/cs4231.c @@ -0,0 +1,184 @@ +/* + * QEMU Crystal CS4231 audio chip emulation + * + * Copyright (c) 2006 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" + +/* + * In addition to Crystal CS4231 there is a DMA controller on Sparc. + */ +#define CS_SIZE 0x40 +#define CS_REGS 16 +#define CS_DREGS 32 +#define CS_MAXDREG (CS_DREGS - 1) + +#define TYPE_CS4231 "sun-CS4231" +typedef struct CSState CSState; +DECLARE_INSTANCE_CHECKER(CSState, CS4231, + TYPE_CS4231) + +struct CSState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq; + uint32_t regs[CS_REGS]; + uint8_t dregs[CS_DREGS]; +}; + +#define CS_RAP(s) ((s)->regs[0] & CS_MAXDREG) +#define CS_VER 0xa0 +#define CS_CDC_VER 0x8a + +static void cs_reset(DeviceState *d) +{ + CSState *s = CS4231(d); + + memset(s->regs, 0, CS_REGS * 4); + memset(s->dregs, 0, CS_DREGS); + s->dregs[12] = CS_CDC_VER; + s->dregs[25] = CS_VER; +} + +static uint64_t cs_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + CSState *s = opaque; + uint32_t saddr, ret; + + saddr = addr >> 2; + switch (saddr) { + case 1: + switch (CS_RAP(s)) { + case 3: // Write only + ret = 0; + break; + default: + ret = s->dregs[CS_RAP(s)]; + break; + } + trace_cs4231_mem_readl_dreg(CS_RAP(s), ret); + break; + default: + ret = s->regs[saddr]; + trace_cs4231_mem_readl_reg(saddr, ret); + break; + } + return ret; +} + +static void cs_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + CSState *s = opaque; + uint32_t saddr; + + saddr = addr >> 2; + trace_cs4231_mem_writel_reg(saddr, s->regs[saddr], val); + switch (saddr) { + case 1: + trace_cs4231_mem_writel_dreg(CS_RAP(s), s->dregs[CS_RAP(s)], val); + switch(CS_RAP(s)) { + case 11: + case 25: // Read only + break; + case 12: + val &= 0x40; + val |= CS_CDC_VER; // Codec version + s->dregs[CS_RAP(s)] = val; + break; + default: + s->dregs[CS_RAP(s)] = val; + break; + } + break; + case 2: // Read only + break; + case 4: + if (val & 1) { + cs_reset(DEVICE(s)); + } + val &= 0x7f; + s->regs[saddr] = val; + break; + default: + s->regs[saddr] = val; + break; + } +} + +static const MemoryRegionOps cs_mem_ops = { + .read = cs_mem_read, + .write = cs_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_cs4231 = { + .name ="cs4231", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, CSState, CS_REGS), + VMSTATE_UINT8_ARRAY(dregs, CSState, CS_DREGS), + VMSTATE_END_OF_LIST() + } +}; + +static void cs4231_init(Object *obj) +{ + CSState *s = CS4231(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &cs_mem_ops, s, "cs4321", + CS_SIZE); + sysbus_init_mmio(dev, &s->iomem); + sysbus_init_irq(dev, &s->irq); +} + +static void cs4231_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = cs_reset; + dc->vmsd = &vmstate_cs4231; +} + +static const TypeInfo cs4231_info = { + .name = TYPE_CS4231, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CSState), + .instance_init = cs4231_init, + .class_init = cs4231_class_init, +}; + +static void cs4231_register_types(void) +{ + type_register_static(&cs4231_info); +} + +type_init(cs4231_register_types) diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c new file mode 100644 index 000000000..7d60ce6f0 --- /dev/null +++ b/hw/audio/cs4231a.c @@ -0,0 +1,723 @@ +/* + * QEMU Crystal CS4231 audio chip emulation + * + * Copyright (c) 2006 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/audio/soundhw.h" +#include "audio/audio.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qapi/error.h" +#include "qom/object.h" + +/* + Missing features: + ADC + Loopback + Timer + ADPCM + More... +*/ + +/* #define DEBUG */ +/* #define DEBUG_XLAW */ + +static struct { + int aci_counter; +} conf = {1}; + +#ifdef DEBUG +#define dolog(...) AUD_log ("cs4231a", __VA_ARGS__) +#else +#define dolog(...) +#endif + +#define lwarn(...) AUD_log ("cs4231a", "warning: " __VA_ARGS__) +#define lerr(...) AUD_log ("cs4231a", "error: " __VA_ARGS__) + +#define CS_REGS 16 +#define CS_DREGS 32 + +#define TYPE_CS4231A "cs4231a" +typedef struct CSState CSState; +DECLARE_INSTANCE_CHECKER(CSState, CS4231A, + TYPE_CS4231A) + +struct CSState { + ISADevice dev; + QEMUSoundCard card; + MemoryRegion ioports; + qemu_irq pic; + uint32_t regs[CS_REGS]; + uint8_t dregs[CS_DREGS]; + uint32_t irq; + uint32_t dma; + uint32_t port; + IsaDma *isa_dma; + int shift; + int dma_running; + int audio_free; + int transferred; + int aci_counter; + SWVoiceOut *voice; + int16_t *tab; +}; + +#define MODE2 (1 << 6) +#define MCE (1 << 6) +#define PMCE (1 << 4) +#define CMCE (1 << 5) +#define TE (1 << 6) +#define PEN (1 << 0) +#define INT (1 << 0) +#define IEN (1 << 1) +#define PPIO (1 << 6) +#define PI (1 << 4) +#define CI (1 << 5) +#define TI (1 << 6) + +enum { + Index_Address, + Index_Data, + Status, + PIO_Data +}; + +enum { + Left_ADC_Input_Control, + Right_ADC_Input_Control, + Left_AUX1_Input_Control, + Right_AUX1_Input_Control, + Left_AUX2_Input_Control, + Right_AUX2_Input_Control, + Left_DAC_Output_Control, + Right_DAC_Output_Control, + FS_And_Playback_Data_Format, + Interface_Configuration, + Pin_Control, + Error_Status_And_Initialization, + MODE_And_ID, + Loopback_Control, + Playback_Upper_Base_Count, + Playback_Lower_Base_Count, + Alternate_Feature_Enable_I, + Alternate_Feature_Enable_II, + Left_Line_Input_Control, + Right_Line_Input_Control, + Timer_Low_Base, + Timer_High_Base, + RESERVED, + Alternate_Feature_Enable_III, + Alternate_Feature_Status, + Version_Chip_ID, + Mono_Input_And_Output_Control, + RESERVED_2, + Capture_Data_Format, + RESERVED_3, + Capture_Upper_Base_Count, + Capture_Lower_Base_Count +}; + +static int freqs[2][8] = { + { 8000, 16000, 27420, 32000, -1, -1, 48000, 9000 }, + { 5510, 11025, 18900, 22050, 37800, 44100, 33075, 6620 } +}; + +/* Tables courtesy http://hazelware.luggle.com/tutorials/mulawcompression.html */ +static int16_t MuLawDecompressTable[256] = +{ + -32124,-31100,-30076,-29052,-28028,-27004,-25980,-24956, + -23932,-22908,-21884,-20860,-19836,-18812,-17788,-16764, + -15996,-15484,-14972,-14460,-13948,-13436,-12924,-12412, + -11900,-11388,-10876,-10364, -9852, -9340, -8828, -8316, + -7932, -7676, -7420, -7164, -6908, -6652, -6396, -6140, + -5884, -5628, -5372, -5116, -4860, -4604, -4348, -4092, + -3900, -3772, -3644, -3516, -3388, -3260, -3132, -3004, + -2876, -2748, -2620, -2492, -2364, -2236, -2108, -1980, + -1884, -1820, -1756, -1692, -1628, -1564, -1500, -1436, + -1372, -1308, -1244, -1180, -1116, -1052, -988, -924, + -876, -844, -812, -780, -748, -716, -684, -652, + -620, -588, -556, -524, -492, -460, -428, -396, + -372, -356, -340, -324, -308, -292, -276, -260, + -244, -228, -212, -196, -180, -164, -148, -132, + -120, -112, -104, -96, -88, -80, -72, -64, + -56, -48, -40, -32, -24, -16, -8, 0, + 32124, 31100, 30076, 29052, 28028, 27004, 25980, 24956, + 23932, 22908, 21884, 20860, 19836, 18812, 17788, 16764, + 15996, 15484, 14972, 14460, 13948, 13436, 12924, 12412, + 11900, 11388, 10876, 10364, 9852, 9340, 8828, 8316, + 7932, 7676, 7420, 7164, 6908, 6652, 6396, 6140, + 5884, 5628, 5372, 5116, 4860, 4604, 4348, 4092, + 3900, 3772, 3644, 3516, 3388, 3260, 3132, 3004, + 2876, 2748, 2620, 2492, 2364, 2236, 2108, 1980, + 1884, 1820, 1756, 1692, 1628, 1564, 1500, 1436, + 1372, 1308, 1244, 1180, 1116, 1052, 988, 924, + 876, 844, 812, 780, 748, 716, 684, 652, + 620, 588, 556, 524, 492, 460, 428, 396, + 372, 356, 340, 324, 308, 292, 276, 260, + 244, 228, 212, 196, 180, 164, 148, 132, + 120, 112, 104, 96, 88, 80, 72, 64, + 56, 48, 40, 32, 24, 16, 8, 0 +}; + +static int16_t ALawDecompressTable[256] = +{ + -5504, -5248, -6016, -5760, -4480, -4224, -4992, -4736, + -7552, -7296, -8064, -7808, -6528, -6272, -7040, -6784, + -2752, -2624, -3008, -2880, -2240, -2112, -2496, -2368, + -3776, -3648, -4032, -3904, -3264, -3136, -3520, -3392, + -22016,-20992,-24064,-23040,-17920,-16896,-19968,-18944, + -30208,-29184,-32256,-31232,-26112,-25088,-28160,-27136, + -11008,-10496,-12032,-11520,-8960, -8448, -9984, -9472, + -15104,-14592,-16128,-15616,-13056,-12544,-14080,-13568, + -344, -328, -376, -360, -280, -264, -312, -296, + -472, -456, -504, -488, -408, -392, -440, -424, + -88, -72, -120, -104, -24, -8, -56, -40, + -216, -200, -248, -232, -152, -136, -184, -168, + -1376, -1312, -1504, -1440, -1120, -1056, -1248, -1184, + -1888, -1824, -2016, -1952, -1632, -1568, -1760, -1696, + -688, -656, -752, -720, -560, -528, -624, -592, + -944, -912, -1008, -976, -816, -784, -880, -848, + 5504, 5248, 6016, 5760, 4480, 4224, 4992, 4736, + 7552, 7296, 8064, 7808, 6528, 6272, 7040, 6784, + 2752, 2624, 3008, 2880, 2240, 2112, 2496, 2368, + 3776, 3648, 4032, 3904, 3264, 3136, 3520, 3392, + 22016, 20992, 24064, 23040, 17920, 16896, 19968, 18944, + 30208, 29184, 32256, 31232, 26112, 25088, 28160, 27136, + 11008, 10496, 12032, 11520, 8960, 8448, 9984, 9472, + 15104, 14592, 16128, 15616, 13056, 12544, 14080, 13568, + 344, 328, 376, 360, 280, 264, 312, 296, + 472, 456, 504, 488, 408, 392, 440, 424, + 88, 72, 120, 104, 24, 8, 56, 40, + 216, 200, 248, 232, 152, 136, 184, 168, + 1376, 1312, 1504, 1440, 1120, 1056, 1248, 1184, + 1888, 1824, 2016, 1952, 1632, 1568, 1760, 1696, + 688, 656, 752, 720, 560, 528, 624, 592, + 944, 912, 1008, 976, 816, 784, 880, 848 +}; + +static void cs4231a_reset (DeviceState *dev) +{ + CSState *s = CS4231A (dev); + + s->regs[Index_Address] = 0x40; + s->regs[Index_Data] = 0x00; + s->regs[Status] = 0x00; + s->regs[PIO_Data] = 0x00; + + s->dregs[Left_ADC_Input_Control] = 0x00; + s->dregs[Right_ADC_Input_Control] = 0x00; + s->dregs[Left_AUX1_Input_Control] = 0x88; + s->dregs[Right_AUX1_Input_Control] = 0x88; + s->dregs[Left_AUX2_Input_Control] = 0x88; + s->dregs[Right_AUX2_Input_Control] = 0x88; + s->dregs[Left_DAC_Output_Control] = 0x80; + s->dregs[Right_DAC_Output_Control] = 0x80; + s->dregs[FS_And_Playback_Data_Format] = 0x00; + s->dregs[Interface_Configuration] = 0x08; + s->dregs[Pin_Control] = 0x00; + s->dregs[Error_Status_And_Initialization] = 0x00; + s->dregs[MODE_And_ID] = 0x8a; + s->dregs[Loopback_Control] = 0x00; + s->dregs[Playback_Upper_Base_Count] = 0x00; + s->dregs[Playback_Lower_Base_Count] = 0x00; + s->dregs[Alternate_Feature_Enable_I] = 0x00; + s->dregs[Alternate_Feature_Enable_II] = 0x00; + s->dregs[Left_Line_Input_Control] = 0x88; + s->dregs[Right_Line_Input_Control] = 0x88; + s->dregs[Timer_Low_Base] = 0x00; + s->dregs[Timer_High_Base] = 0x00; + s->dregs[RESERVED] = 0x00; + s->dregs[Alternate_Feature_Enable_III] = 0x00; + s->dregs[Alternate_Feature_Status] = 0x00; + s->dregs[Version_Chip_ID] = 0xa0; + s->dregs[Mono_Input_And_Output_Control] = 0xa0; + s->dregs[RESERVED_2] = 0x00; + s->dregs[Capture_Data_Format] = 0x00; + s->dregs[RESERVED_3] = 0x00; + s->dregs[Capture_Upper_Base_Count] = 0x00; + s->dregs[Capture_Lower_Base_Count] = 0x00; +} + +static void cs_audio_callback (void *opaque, int free) +{ + CSState *s = opaque; + s->audio_free = free; +} + +static void cs_reset_voices (CSState *s, uint32_t val) +{ + int xtal; + struct audsettings as; + IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma); + +#ifdef DEBUG_XLAW + if (val == 0 || val == 32) + val = (1 << 4) | (1 << 5); +#endif + + xtal = val & 1; + as.freq = freqs[xtal][(val >> 1) & 7]; + + if (as.freq == -1) { + lerr ("unsupported frequency (val=%#x)\n", val); + goto error; + } + + as.nchannels = (val & (1 << 4)) ? 2 : 1; + as.endianness = 0; + s->tab = NULL; + + switch ((val >> 5) & ((s->dregs[MODE_And_ID] & MODE2) ? 7 : 3)) { + case 0: + as.fmt = AUDIO_FORMAT_U8; + s->shift = as.nchannels == 2; + break; + + case 1: + s->tab = MuLawDecompressTable; + goto x_law; + case 3: + s->tab = ALawDecompressTable; + x_law: + as.fmt = AUDIO_FORMAT_S16; + as.endianness = AUDIO_HOST_ENDIANNESS; + s->shift = as.nchannels == 2; + break; + + case 6: + as.endianness = 1; + /* fall through */ + case 2: + as.fmt = AUDIO_FORMAT_S16; + s->shift = as.nchannels; + break; + + case 7: + case 4: + lerr ("attempt to use reserved format value (%#x)\n", val); + goto error; + + case 5: + lerr ("ADPCM 4 bit IMA compatible format is not supported\n"); + goto error; + } + + s->voice = AUD_open_out ( + &s->card, + s->voice, + "cs4231a", + s, + cs_audio_callback, + &as + ); + + if (s->dregs[Interface_Configuration] & PEN) { + if (!s->dma_running) { + k->hold_DREQ(s->isa_dma, s->dma); + AUD_set_active_out (s->voice, 1); + s->transferred = 0; + } + s->dma_running = 1; + } + else { + if (s->dma_running) { + k->release_DREQ(s->isa_dma, s->dma); + AUD_set_active_out (s->voice, 0); + } + s->dma_running = 0; + } + return; + + error: + if (s->dma_running) { + k->release_DREQ(s->isa_dma, s->dma); + AUD_set_active_out (s->voice, 0); + } +} + +static uint64_t cs_read (void *opaque, hwaddr addr, unsigned size) +{ + CSState *s = opaque; + uint32_t saddr, iaddr, ret; + + saddr = addr; + iaddr = ~0U; + + switch (saddr) { + case Index_Address: + ret = s->regs[saddr] & ~0x80; + break; + + case Index_Data: + if (!(s->dregs[MODE_And_ID] & MODE2)) + iaddr = s->regs[Index_Address] & 0x0f; + else + iaddr = s->regs[Index_Address] & 0x1f; + + ret = s->dregs[iaddr]; + if (iaddr == Error_Status_And_Initialization) { + /* keep SEAL happy */ + if (s->aci_counter) { + ret |= 1 << 5; + s->aci_counter -= 1; + } + } + break; + + default: + ret = s->regs[saddr]; + break; + } + dolog ("read %d:%d -> %d\n", saddr, iaddr, ret); + return ret; +} + +static void cs_write (void *opaque, hwaddr addr, + uint64_t val64, unsigned size) +{ + CSState *s = opaque; + uint32_t saddr, iaddr, val; + + saddr = addr; + val = val64; + + switch (saddr) { + case Index_Address: + if (!(s->regs[Index_Address] & MCE) && (val & MCE) + && (s->dregs[Interface_Configuration] & (3 << 3))) + s->aci_counter = conf.aci_counter; + + s->regs[Index_Address] = val & ~(1 << 7); + break; + + case Index_Data: + if (!(s->dregs[MODE_And_ID] & MODE2)) + iaddr = s->regs[Index_Address] & 0x0f; + else + iaddr = s->regs[Index_Address] & 0x1f; + + switch (iaddr) { + case RESERVED: + case RESERVED_2: + case RESERVED_3: + lwarn ("attempt to write %#x to reserved indirect register %d\n", + val, iaddr); + break; + + case FS_And_Playback_Data_Format: + if (s->regs[Index_Address] & MCE) { + cs_reset_voices (s, val); + } + else { + if (s->dregs[Alternate_Feature_Status] & PMCE) { + val = (val & ~0x0f) | (s->dregs[iaddr] & 0x0f); + cs_reset_voices (s, val); + } + else { + lwarn ("[P]MCE(%#x, %#x) is not set, val=%#x\n", + s->regs[Index_Address], + s->dregs[Alternate_Feature_Status], + val); + break; + } + } + s->dregs[iaddr] = val; + break; + + case Interface_Configuration: + val &= ~(1 << 5); /* D5 is reserved */ + s->dregs[iaddr] = val; + if (val & PPIO) { + lwarn ("PIO is not supported (%#x)\n", val); + break; + } + if (val & PEN) { + if (!s->dma_running) { + cs_reset_voices (s, s->dregs[FS_And_Playback_Data_Format]); + } + } + else { + if (s->dma_running) { + IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma); + k->release_DREQ(s->isa_dma, s->dma); + AUD_set_active_out (s->voice, 0); + s->dma_running = 0; + } + } + break; + + case Error_Status_And_Initialization: + lwarn ("attempt to write to read only register %d\n", iaddr); + break; + + case MODE_And_ID: + dolog ("val=%#x\n", val); + if (val & MODE2) + s->dregs[iaddr] |= MODE2; + else + s->dregs[iaddr] &= ~MODE2; + break; + + case Alternate_Feature_Enable_I: + if (val & TE) + lerr ("timer is not yet supported\n"); + s->dregs[iaddr] = val; + break; + + case Alternate_Feature_Status: + if ((s->dregs[iaddr] & PI) && !(val & PI)) { + /* XXX: TI CI */ + qemu_irq_lower (s->pic); + s->regs[Status] &= ~INT; + } + s->dregs[iaddr] = val; + break; + + case Version_Chip_ID: + lwarn ("write to Version_Chip_ID register %#x\n", val); + s->dregs[iaddr] = val; + break; + + default: + s->dregs[iaddr] = val; + break; + } + dolog ("written value %#x to indirect register %d\n", val, iaddr); + break; + + case Status: + if (s->regs[Status] & INT) { + qemu_irq_lower (s->pic); + } + s->regs[Status] &= ~INT; + s->dregs[Alternate_Feature_Status] &= ~(PI | CI | TI); + break; + + case PIO_Data: + lwarn ("attempt to write value %#x to PIO register\n", val); + break; + } +} + +static int cs_write_audio (CSState *s, int nchan, int dma_pos, + int dma_len, int len) +{ + int temp, net; + uint8_t tmpbuf[4096]; + IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma); + + temp = len; + net = 0; + + while (temp) { + int left = dma_len - dma_pos; + int copied; + size_t to_copy; + + to_copy = MIN (temp, left); + if (to_copy > sizeof (tmpbuf)) { + to_copy = sizeof (tmpbuf); + } + + copied = k->read_memory(s->isa_dma, nchan, tmpbuf, dma_pos, to_copy); + if (s->tab) { + int i; + int16_t linbuf[4096]; + + for (i = 0; i < copied; ++i) + linbuf[i] = s->tab[tmpbuf[i]]; + copied = AUD_write (s->voice, linbuf, copied << 1); + copied >>= 1; + } + else { + copied = AUD_write (s->voice, tmpbuf, copied); + } + + temp -= copied; + dma_pos = (dma_pos + copied) % dma_len; + net += copied; + + if (!copied) { + break; + } + } + + return net; +} + +static int cs_dma_read (void *opaque, int nchan, int dma_pos, int dma_len) +{ + CSState *s = opaque; + int copy, written; + int till = -1; + + copy = s->voice ? (s->audio_free >> (s->tab != NULL)) : dma_len; + + if (s->dregs[Pin_Control] & IEN) { + till = (s->dregs[Playback_Lower_Base_Count] + | (s->dregs[Playback_Upper_Base_Count] << 8)) << s->shift; + till -= s->transferred; + copy = MIN (till, copy); + } + + if ((copy <= 0) || (dma_len <= 0)) { + return dma_pos; + } + + written = cs_write_audio (s, nchan, dma_pos, dma_len, copy); + + dma_pos = (dma_pos + written) % dma_len; + s->audio_free -= (written << (s->tab != NULL)); + + if (written == till) { + s->regs[Status] |= INT; + s->dregs[Alternate_Feature_Status] |= PI; + s->transferred = 0; + qemu_irq_raise (s->pic); + } + else { + s->transferred += written; + } + + return dma_pos; +} + +static int cs4231a_pre_load (void *opaque) +{ + CSState *s = opaque; + + if (s->dma_running) { + IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma); + k->release_DREQ(s->isa_dma, s->dma); + AUD_set_active_out (s->voice, 0); + } + s->dma_running = 0; + return 0; +} + +static int cs4231a_post_load (void *opaque, int version_id) +{ + CSState *s = opaque; + + if (s->dma_running && (s->dregs[Interface_Configuration] & PEN)) { + s->dma_running = 0; + cs_reset_voices (s, s->dregs[FS_And_Playback_Data_Format]); + } + return 0; +} + +static const VMStateDescription vmstate_cs4231a = { + .name = "cs4231a", + .version_id = 1, + .minimum_version_id = 1, + .pre_load = cs4231a_pre_load, + .post_load = cs4231a_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY (regs, CSState, CS_REGS), + VMSTATE_BUFFER (dregs, CSState), + VMSTATE_INT32 (dma_running, CSState), + VMSTATE_INT32 (audio_free, CSState), + VMSTATE_INT32 (transferred, CSState), + VMSTATE_INT32 (aci_counter, CSState), + VMSTATE_END_OF_LIST () + } +}; + +static const MemoryRegionOps cs_ioport_ops = { + .read = cs_read, + .write = cs_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + } +}; + +static void cs4231a_initfn (Object *obj) +{ + CSState *s = CS4231A (obj); + + memory_region_init_io (&s->ioports, OBJECT(s), &cs_ioport_ops, s, + "cs4231a", 4); +} + +static void cs4231a_realizefn (DeviceState *dev, Error **errp) +{ + ISADevice *d = ISA_DEVICE (dev); + CSState *s = CS4231A (dev); + IsaDmaClass *k; + + s->isa_dma = isa_get_dma(isa_bus_from_device(d), s->dma); + if (!s->isa_dma) { + error_setg(errp, "ISA controller does not support DMA"); + return; + } + + isa_init_irq(d, &s->pic, s->irq); + k = ISADMA_GET_CLASS(s->isa_dma); + k->register_channel(s->isa_dma, s->dma, cs_dma_read, s); + + isa_register_ioport (d, &s->ioports, s->port); + + AUD_register_card ("cs4231a", &s->card); +} + +static Property cs4231a_properties[] = { + DEFINE_AUDIO_PROPERTIES(CSState, card), + DEFINE_PROP_UINT32 ("iobase", CSState, port, 0x534), + DEFINE_PROP_UINT32 ("irq", CSState, irq, 9), + DEFINE_PROP_UINT32 ("dma", CSState, dma, 3), + DEFINE_PROP_END_OF_LIST (), +}; + +static void cs4231a_class_initfn (ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS (klass); + + dc->realize = cs4231a_realizefn; + dc->reset = cs4231a_reset; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->desc = "Crystal Semiconductor CS4231A"; + dc->vmsd = &vmstate_cs4231a; + device_class_set_props(dc, cs4231a_properties); +} + +static const TypeInfo cs4231a_info = { + .name = TYPE_CS4231A, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof (CSState), + .instance_init = cs4231a_initfn, + .class_init = cs4231a_class_initfn, +}; + +static void cs4231a_register_types (void) +{ + type_register_static (&cs4231a_info); + deprecated_register_soundhw("cs4231a", "CS4231A", 1, TYPE_CS4231A); +} + +type_init (cs4231a_register_types) diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c new file mode 100644 index 000000000..690458981 --- /dev/null +++ b/hw/audio/es1370.c @@ -0,0 +1,930 @@ +/* + * QEMU ES1370 emulation + * + * Copyright (c) 2005 Vassili Karpov (malc) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* #define DEBUG_ES1370 */ +/* #define VERBOSE_ES1370 */ +#define SILENT_ES1370 + +#include "qemu/osdep.h" +#include "hw/audio/soundhw.h" +#include "audio/audio.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "sysemu/dma.h" +#include "qom/object.h" + +/* Missing stuff: + SCTRL_P[12](END|ST)INC + SCTRL_P1SCTRLD + SCTRL_P2DACSEN + CTRL_DAC_SYNC + MIDI + non looped mode + surely more +*/ + +/* + Following macros and samplerate array were copied verbatim from + Linux kernel 2.4.30: drivers/sound/es1370.c + + Copyright (C) 1998-2001, 2003 Thomas Sailer (t.sailer@alumni.ethz.ch) +*/ + +/* Start blatant GPL violation */ + +#define ES1370_REG_CONTROL 0x00 +#define ES1370_REG_STATUS 0x04 +#define ES1370_REG_UART_DATA 0x08 +#define ES1370_REG_UART_STATUS 0x09 +#define ES1370_REG_UART_CONTROL 0x09 +#define ES1370_REG_UART_TEST 0x0a +#define ES1370_REG_MEMPAGE 0x0c +#define ES1370_REG_CODEC 0x10 +#define ES1370_REG_SERIAL_CONTROL 0x20 +#define ES1370_REG_DAC1_SCOUNT 0x24 +#define ES1370_REG_DAC2_SCOUNT 0x28 +#define ES1370_REG_ADC_SCOUNT 0x2c + +#define ES1370_REG_DAC1_FRAMEADR 0xc30 +#define ES1370_REG_DAC1_FRAMECNT 0xc34 +#define ES1370_REG_DAC2_FRAMEADR 0xc38 +#define ES1370_REG_DAC2_FRAMECNT 0xc3c +#define ES1370_REG_ADC_FRAMEADR 0xd30 +#define ES1370_REG_ADC_FRAMECNT 0xd34 +#define ES1370_REG_PHANTOM_FRAMEADR 0xd38 +#define ES1370_REG_PHANTOM_FRAMECNT 0xd3c + +static const unsigned dac1_samplerate[] = { 5512, 11025, 22050, 44100 }; + +#define DAC2_SRTODIV(x) (((1411200+(x)/2)/(x))-2) +#define DAC2_DIVTOSR(x) (1411200/((x)+2)) + +#define CTRL_ADC_STOP 0x80000000 /* 1 = ADC stopped */ +#define CTRL_XCTL1 0x40000000 /* electret mic bias */ +#define CTRL_OPEN 0x20000000 /* no function, can be read and written */ +#define CTRL_PCLKDIV 0x1fff0000 /* ADC/DAC2 clock divider */ +#define CTRL_SH_PCLKDIV 16 +#define CTRL_MSFMTSEL 0x00008000 /* MPEG serial data fmt: 0 = Sony, 1 = I2S */ +#define CTRL_M_SBB 0x00004000 /* DAC2 clock: 0 = PCLKDIV, 1 = MPEG */ +#define CTRL_WTSRSEL 0x00003000 /* DAC1 clock freq: 0=5512, 1=11025, 2=22050, 3=44100 */ +#define CTRL_SH_WTSRSEL 12 +#define CTRL_DAC_SYNC 0x00000800 /* 1 = DAC2 runs off DAC1 clock */ +#define CTRL_CCB_INTRM 0x00000400 /* 1 = CCB "voice" ints enabled */ +#define CTRL_M_CB 0x00000200 /* recording source: 0 = ADC, 1 = MPEG */ +#define CTRL_XCTL0 0x00000100 /* 0 = Line in, 1 = Line out */ +#define CTRL_BREQ 0x00000080 /* 1 = test mode (internal mem test) */ +#define CTRL_DAC1_EN 0x00000040 /* enable DAC1 */ +#define CTRL_DAC2_EN 0x00000020 /* enable DAC2 */ +#define CTRL_ADC_EN 0x00000010 /* enable ADC */ +#define CTRL_UART_EN 0x00000008 /* enable MIDI uart */ +#define CTRL_JYSTK_EN 0x00000004 /* enable Joystick port (presumably at address 0x200) */ +#define CTRL_CDC_EN 0x00000002 /* enable serial (CODEC) interface */ +#define CTRL_SERR_DIS 0x00000001 /* 1 = disable PCI SERR signal */ + +#define STAT_INTR 0x80000000 /* wired or of all interrupt bits */ +#define STAT_CSTAT 0x00000400 /* 1 = codec busy or codec write in progress */ +#define STAT_CBUSY 0x00000200 /* 1 = codec busy */ +#define STAT_CWRIP 0x00000100 /* 1 = codec write in progress */ +#define STAT_VC 0x00000060 /* CCB int source, 0=DAC1, 1=DAC2, 2=ADC, 3=undef */ +#define STAT_SH_VC 5 +#define STAT_MCCB 0x00000010 /* CCB int pending */ +#define STAT_UART 0x00000008 /* UART int pending */ +#define STAT_DAC1 0x00000004 /* DAC1 int pending */ +#define STAT_DAC2 0x00000002 /* DAC2 int pending */ +#define STAT_ADC 0x00000001 /* ADC int pending */ + +#define USTAT_RXINT 0x80 /* UART rx int pending */ +#define USTAT_TXINT 0x04 /* UART tx int pending */ +#define USTAT_TXRDY 0x02 /* UART tx ready */ +#define USTAT_RXRDY 0x01 /* UART rx ready */ + +#define UCTRL_RXINTEN 0x80 /* 1 = enable RX ints */ +#define UCTRL_TXINTEN 0x60 /* TX int enable field mask */ +#define UCTRL_ENA_TXINT 0x20 /* enable TX int */ +#define UCTRL_CNTRL 0x03 /* control field */ +#define UCTRL_CNTRL_SWR 0x03 /* software reset command */ + +#define SCTRL_P2ENDINC 0x00380000 /* */ +#define SCTRL_SH_P2ENDINC 19 +#define SCTRL_P2STINC 0x00070000 /* */ +#define SCTRL_SH_P2STINC 16 +#define SCTRL_R1LOOPSEL 0x00008000 /* 0 = loop mode */ +#define SCTRL_P2LOOPSEL 0x00004000 /* 0 = loop mode */ +#define SCTRL_P1LOOPSEL 0x00002000 /* 0 = loop mode */ +#define SCTRL_P2PAUSE 0x00001000 /* 1 = pause mode */ +#define SCTRL_P1PAUSE 0x00000800 /* 1 = pause mode */ +#define SCTRL_R1INTEN 0x00000400 /* enable interrupt */ +#define SCTRL_P2INTEN 0x00000200 /* enable interrupt */ +#define SCTRL_P1INTEN 0x00000100 /* enable interrupt */ +#define SCTRL_P1SCTRLD 0x00000080 /* reload sample count register for DAC1 */ +#define SCTRL_P2DACSEN 0x00000040 /* 1 = DAC2 play back last sample when disabled */ +#define SCTRL_R1SEB 0x00000020 /* 1 = 16bit */ +#define SCTRL_R1SMB 0x00000010 /* 1 = stereo */ +#define SCTRL_R1FMT 0x00000030 /* format mask */ +#define SCTRL_SH_R1FMT 4 +#define SCTRL_P2SEB 0x00000008 /* 1 = 16bit */ +#define SCTRL_P2SMB 0x00000004 /* 1 = stereo */ +#define SCTRL_P2FMT 0x0000000c /* format mask */ +#define SCTRL_SH_P2FMT 2 +#define SCTRL_P1SEB 0x00000002 /* 1 = 16bit */ +#define SCTRL_P1SMB 0x00000001 /* 1 = stereo */ +#define SCTRL_P1FMT 0x00000003 /* format mask */ +#define SCTRL_SH_P1FMT 0 + +/* End blatant GPL violation */ + +#define NB_CHANNELS 3 +#define DAC1_CHANNEL 0 +#define DAC2_CHANNEL 1 +#define ADC_CHANNEL 2 + +static void es1370_dac1_callback (void *opaque, int free); +static void es1370_dac2_callback (void *opaque, int free); +static void es1370_adc_callback (void *opaque, int avail); + +#ifdef DEBUG_ES1370 + +#define ldebug(...) AUD_log ("es1370", __VA_ARGS__) + +static void print_ctl (uint32_t val) +{ + char buf[1024]; + + buf[0] = '\0'; +#define a(n) if (val & CTRL_##n) strcat (buf, " "#n) + a (ADC_STOP); + a (XCTL1); + a (OPEN); + a (MSFMTSEL); + a (M_SBB); + a (DAC_SYNC); + a (CCB_INTRM); + a (M_CB); + a (XCTL0); + a (BREQ); + a (DAC1_EN); + a (DAC2_EN); + a (ADC_EN); + a (UART_EN); + a (JYSTK_EN); + a (CDC_EN); + a (SERR_DIS); +#undef a + AUD_log ("es1370", "ctl - PCLKDIV %d(DAC2 freq %d), freq %d,%s\n", + (val & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV, + DAC2_DIVTOSR ((val & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV), + dac1_samplerate[(val & CTRL_WTSRSEL) >> CTRL_SH_WTSRSEL], + buf); +} + +static void print_sctl (uint32_t val) +{ + static const char *fmt_names[] = {"8M", "8S", "16M", "16S"}; + char buf[1024]; + + buf[0] = '\0'; + +#define a(n) if (val & SCTRL_##n) strcat (buf, " "#n) +#define b(n) if (!(val & SCTRL_##n)) strcat (buf, " "#n) + b (R1LOOPSEL); + b (P2LOOPSEL); + b (P1LOOPSEL); + a (P2PAUSE); + a (P1PAUSE); + a (R1INTEN); + a (P2INTEN); + a (P1INTEN); + a (P1SCTRLD); + a (P2DACSEN); + if (buf[0]) { + strcat (buf, "\n "); + } + else { + buf[0] = ' '; + buf[1] = '\0'; + } +#undef b +#undef a + AUD_log ("es1370", + "%s" + "p2_end_inc %d, p2_st_inc %d, r1_fmt %s, p2_fmt %s, p1_fmt %s\n", + buf, + (val & SCTRL_P2ENDINC) >> SCTRL_SH_P2ENDINC, + (val & SCTRL_P2STINC) >> SCTRL_SH_P2STINC, + fmt_names [(val >> SCTRL_SH_R1FMT) & 3], + fmt_names [(val >> SCTRL_SH_P2FMT) & 3], + fmt_names [(val >> SCTRL_SH_P1FMT) & 3] + ); +} +#else +#define ldebug(...) +#define print_ctl(...) +#define print_sctl(...) +#endif + +#ifdef VERBOSE_ES1370 +#define dolog(...) AUD_log ("es1370", __VA_ARGS__) +#else +#define dolog(...) +#endif + +#ifndef SILENT_ES1370 +#define lwarn(...) AUD_log ("es1370: warning", __VA_ARGS__) +#else +#define lwarn(...) +#endif + +struct chan { + uint32_t shift; + uint32_t leftover; + uint32_t scount; + uint32_t frame_addr; + uint32_t frame_cnt; +}; + +struct ES1370State { + PCIDevice dev; + QEMUSoundCard card; + MemoryRegion io; + struct chan chan[NB_CHANNELS]; + SWVoiceOut *dac_voice[2]; + SWVoiceIn *adc_voice; + + uint32_t ctl; + uint32_t status; + uint32_t mempage; + uint32_t codec; + uint32_t sctl; +}; +typedef struct ES1370State ES1370State; + +struct chan_bits { + uint32_t ctl_en; + uint32_t stat_int; + uint32_t sctl_pause; + uint32_t sctl_inten; + uint32_t sctl_fmt; + uint32_t sctl_sh_fmt; + uint32_t sctl_loopsel; + void (*calc_freq) (ES1370State *s, uint32_t ctl, + uint32_t *old_freq, uint32_t *new_freq); +}; + +#define TYPE_ES1370 "ES1370" +OBJECT_DECLARE_SIMPLE_TYPE(ES1370State, ES1370) + +static void es1370_dac1_calc_freq (ES1370State *s, uint32_t ctl, + uint32_t *old_freq, uint32_t *new_freq); +static void es1370_dac2_and_adc_calc_freq (ES1370State *s, uint32_t ctl, + uint32_t *old_freq, + uint32_t *new_freq); + +static const struct chan_bits es1370_chan_bits[] = { + {CTRL_DAC1_EN, STAT_DAC1, SCTRL_P1PAUSE, SCTRL_P1INTEN, + SCTRL_P1FMT, SCTRL_SH_P1FMT, SCTRL_P1LOOPSEL, + es1370_dac1_calc_freq}, + + {CTRL_DAC2_EN, STAT_DAC2, SCTRL_P2PAUSE, SCTRL_P2INTEN, + SCTRL_P2FMT, SCTRL_SH_P2FMT, SCTRL_P2LOOPSEL, + es1370_dac2_and_adc_calc_freq}, + + {CTRL_ADC_EN, STAT_ADC, 0, SCTRL_R1INTEN, + SCTRL_R1FMT, SCTRL_SH_R1FMT, SCTRL_R1LOOPSEL, + es1370_dac2_and_adc_calc_freq} +}; + +static void es1370_update_status (ES1370State *s, uint32_t new_status) +{ + uint32_t level = new_status & (STAT_DAC1 | STAT_DAC2 | STAT_ADC); + + if (level) { + s->status = new_status | STAT_INTR; + } + else { + s->status = new_status & ~STAT_INTR; + } + pci_set_irq(&s->dev, !!level); +} + +static void es1370_reset (ES1370State *s) +{ + size_t i; + + s->ctl = 1; + s->status = 0x60; + s->mempage = 0; + s->codec = 0; + s->sctl = 0; + + for (i = 0; i < NB_CHANNELS; ++i) { + struct chan *d = &s->chan[i]; + d->scount = 0; + d->leftover = 0; + if (i == ADC_CHANNEL) { + AUD_close_in (&s->card, s->adc_voice); + s->adc_voice = NULL; + } + else { + AUD_close_out (&s->card, s->dac_voice[i]); + s->dac_voice[i] = NULL; + } + } + pci_irq_deassert(&s->dev); +} + +static void es1370_maybe_lower_irq (ES1370State *s, uint32_t sctl) +{ + uint32_t new_status = s->status; + + if (!(sctl & SCTRL_P1INTEN) && (s->sctl & SCTRL_P1INTEN)) { + new_status &= ~STAT_DAC1; + } + + if (!(sctl & SCTRL_P2INTEN) && (s->sctl & SCTRL_P2INTEN)) { + new_status &= ~STAT_DAC2; + } + + if (!(sctl & SCTRL_R1INTEN) && (s->sctl & SCTRL_R1INTEN)) { + new_status &= ~STAT_ADC; + } + + if (new_status != s->status) { + es1370_update_status (s, new_status); + } +} + +static void es1370_dac1_calc_freq (ES1370State *s, uint32_t ctl, + uint32_t *old_freq, uint32_t *new_freq) + +{ + *old_freq = dac1_samplerate[(s->ctl & CTRL_WTSRSEL) >> CTRL_SH_WTSRSEL]; + *new_freq = dac1_samplerate[(ctl & CTRL_WTSRSEL) >> CTRL_SH_WTSRSEL]; +} + +static void es1370_dac2_and_adc_calc_freq (ES1370State *s, uint32_t ctl, + uint32_t *old_freq, + uint32_t *new_freq) + +{ + uint32_t old_pclkdiv, new_pclkdiv; + + new_pclkdiv = (ctl & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV; + old_pclkdiv = (s->ctl & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV; + *new_freq = DAC2_DIVTOSR (new_pclkdiv); + *old_freq = DAC2_DIVTOSR (old_pclkdiv); +} + +static void es1370_update_voices (ES1370State *s, uint32_t ctl, uint32_t sctl) +{ + size_t i; + uint32_t old_freq, new_freq, old_fmt, new_fmt; + + for (i = 0; i < NB_CHANNELS; ++i) { + struct chan *d = &s->chan[i]; + const struct chan_bits *b = &es1370_chan_bits[i]; + + new_fmt = (sctl & b->sctl_fmt) >> b->sctl_sh_fmt; + old_fmt = (s->sctl & b->sctl_fmt) >> b->sctl_sh_fmt; + + b->calc_freq (s, ctl, &old_freq, &new_freq); + + if ((old_fmt != new_fmt) || (old_freq != new_freq)) { + d->shift = (new_fmt & 1) + (new_fmt >> 1); + ldebug ("channel %zu, freq = %d, nchannels %d, fmt %d, shift %d\n", + i, + new_freq, + 1 << (new_fmt & 1), + (new_fmt & 2) ? AUDIO_FORMAT_S16 : AUDIO_FORMAT_U8, + d->shift); + if (new_freq) { + struct audsettings as; + + as.freq = new_freq; + as.nchannels = 1 << (new_fmt & 1); + as.fmt = (new_fmt & 2) ? AUDIO_FORMAT_S16 : AUDIO_FORMAT_U8; + as.endianness = 0; + + if (i == ADC_CHANNEL) { + s->adc_voice = + AUD_open_in ( + &s->card, + s->adc_voice, + "es1370.adc", + s, + es1370_adc_callback, + &as + ); + } + else { + s->dac_voice[i] = + AUD_open_out ( + &s->card, + s->dac_voice[i], + i ? "es1370.dac2" : "es1370.dac1", + s, + i ? es1370_dac2_callback : es1370_dac1_callback, + &as + ); + } + } + } + + if (((ctl ^ s->ctl) & b->ctl_en) + || ((sctl ^ s->sctl) & b->sctl_pause)) { + int on = (ctl & b->ctl_en) && !(sctl & b->sctl_pause); + + if (i == ADC_CHANNEL) { + AUD_set_active_in (s->adc_voice, on); + } + else { + AUD_set_active_out (s->dac_voice[i], on); + } + } + } + + s->ctl = ctl; + s->sctl = sctl; +} + +static inline uint32_t es1370_fixup (ES1370State *s, uint32_t addr) +{ + addr &= 0xff; + if (addr >= 0x30 && addr <= 0x3f) + addr |= s->mempage << 8; + return addr; +} + +static void es1370_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + ES1370State *s = opaque; + struct chan *d = &s->chan[0]; + + addr = es1370_fixup (s, addr); + + switch (addr) { + case ES1370_REG_CONTROL: + es1370_update_voices (s, val, s->sctl); + print_ctl (val); + break; + + case ES1370_REG_MEMPAGE: + s->mempage = val & 0xf; + break; + + case ES1370_REG_SERIAL_CONTROL: + es1370_maybe_lower_irq (s, val); + es1370_update_voices (s, s->ctl, val); + print_sctl (val); + break; + + case ES1370_REG_DAC1_SCOUNT: + case ES1370_REG_DAC2_SCOUNT: + case ES1370_REG_ADC_SCOUNT: + d += (addr - ES1370_REG_DAC1_SCOUNT) >> 2; + d->scount = (val & 0xffff) | (d->scount & ~0xffff); + ldebug ("chan %td CURR_SAMP_CT %d, SAMP_CT %d\n", + d - &s->chan[0], val >> 16, (val & 0xffff)); + break; + + case ES1370_REG_ADC_FRAMEADR: + d += 2; + goto frameadr; + case ES1370_REG_DAC1_FRAMEADR: + case ES1370_REG_DAC2_FRAMEADR: + d += (addr - ES1370_REG_DAC1_FRAMEADR) >> 3; + frameadr: + d->frame_addr = val; + ldebug ("chan %td frame address %#x\n", d - &s->chan[0], val); + break; + + case ES1370_REG_PHANTOM_FRAMECNT: + lwarn ("writing to phantom frame count %#x\n", val); + break; + case ES1370_REG_PHANTOM_FRAMEADR: + lwarn ("writing to phantom frame address %#x\n", val); + break; + + case ES1370_REG_ADC_FRAMECNT: + d += 2; + goto framecnt; + case ES1370_REG_DAC1_FRAMECNT: + case ES1370_REG_DAC2_FRAMECNT: + d += (addr - ES1370_REG_DAC1_FRAMECNT) >> 3; + framecnt: + d->frame_cnt = val; + d->leftover = 0; + ldebug ("chan %td frame count %d, buffer size %d\n", + d - &s->chan[0], val >> 16, val & 0xffff); + break; + + default: + lwarn ("writel %#x <- %#x\n", addr, val); + break; + } +} + +static uint64_t es1370_read(void *opaque, hwaddr addr, unsigned size) +{ + ES1370State *s = opaque; + uint32_t val; + struct chan *d = &s->chan[0]; + + addr = es1370_fixup (s, addr); + + switch (addr) { + case ES1370_REG_CONTROL: + val = s->ctl; + break; + case ES1370_REG_STATUS: + val = s->status; + break; + case ES1370_REG_MEMPAGE: + val = s->mempage; + break; + case ES1370_REG_CODEC: + val = s->codec; + break; + case ES1370_REG_SERIAL_CONTROL: + val = s->sctl; + break; + + case ES1370_REG_DAC1_SCOUNT: + case ES1370_REG_DAC2_SCOUNT: + case ES1370_REG_ADC_SCOUNT: + d += (addr - ES1370_REG_DAC1_SCOUNT) >> 2; + val = d->scount; +#ifdef DEBUG_ES1370 + { + uint32_t curr_count = d->scount >> 16; + uint32_t count = d->scount & 0xffff; + + curr_count <<= d->shift; + count <<= d->shift; + dolog ("read scount curr %d, total %d\n", curr_count, count); + } +#endif + break; + + case ES1370_REG_ADC_FRAMECNT: + d += 2; + goto framecnt; + case ES1370_REG_DAC1_FRAMECNT: + case ES1370_REG_DAC2_FRAMECNT: + d += (addr - ES1370_REG_DAC1_FRAMECNT) >> 3; + framecnt: + val = d->frame_cnt; +#ifdef DEBUG_ES1370 + { + uint32_t size = ((d->frame_cnt & 0xffff) + 1) << 2; + uint32_t curr = ((d->frame_cnt >> 16) + 1) << 2; + if (curr > size) { + dolog ("read framecnt curr %d, size %d %d\n", curr, size, + curr > size); + } + } +#endif + break; + + case ES1370_REG_ADC_FRAMEADR: + d += 2; + goto frameadr; + case ES1370_REG_DAC1_FRAMEADR: + case ES1370_REG_DAC2_FRAMEADR: + d += (addr - ES1370_REG_DAC1_FRAMEADR) >> 3; + frameadr: + val = d->frame_addr; + break; + + case ES1370_REG_PHANTOM_FRAMECNT: + val = ~0U; + lwarn ("reading from phantom frame count\n"); + break; + case ES1370_REG_PHANTOM_FRAMEADR: + val = ~0U; + lwarn ("reading from phantom frame address\n"); + break; + + default: + val = ~0U; + lwarn ("readl %#x -> %#x\n", addr, val); + break; + } + return val; +} + +static void es1370_transfer_audio (ES1370State *s, struct chan *d, int loop_sel, + int max, int *irq) +{ + uint8_t tmpbuf[4096]; + uint32_t addr = d->frame_addr; + int sc = d->scount & 0xffff; + int csc = d->scount >> 16; + int csc_bytes = (csc + 1) << d->shift; + int cnt = d->frame_cnt >> 16; + int size = d->frame_cnt & 0xffff; + if (size < cnt) { + return; + } + int left = ((size - cnt + 1) << 2) + d->leftover; + int transferred = 0; + int temp = MIN (max, MIN (left, csc_bytes)); + int index = d - &s->chan[0]; + + addr += (cnt << 2) + d->leftover; + + if (index == ADC_CHANNEL) { + while (temp > 0) { + int acquired, to_copy; + + to_copy = MIN ((size_t) temp, sizeof (tmpbuf)); + acquired = AUD_read (s->adc_voice, tmpbuf, to_copy); + if (!acquired) + break; + + pci_dma_write (&s->dev, addr, tmpbuf, acquired); + + temp -= acquired; + addr += acquired; + transferred += acquired; + } + } + else { + SWVoiceOut *voice = s->dac_voice[index]; + + while (temp > 0) { + int copied, to_copy; + + to_copy = MIN ((size_t) temp, sizeof (tmpbuf)); + pci_dma_read (&s->dev, addr, tmpbuf, to_copy); + copied = AUD_write (voice, tmpbuf, to_copy); + if (!copied) + break; + temp -= copied; + addr += copied; + transferred += copied; + } + } + + if (csc_bytes == transferred) { + *irq = 1; + d->scount = sc | (sc << 16); + ldebug ("sc = %d, rate = %f\n", + (sc + 1) << d->shift, + (sc + 1) / (double) 44100); + } + else { + *irq = 0; + d->scount = sc | (((csc_bytes - transferred - 1) >> d->shift) << 16); + } + + cnt += (transferred + d->leftover) >> 2; + + if (s->sctl & loop_sel) { + /* Bah, how stupid is that having a 0 represent true value? + i just spent few hours on this shit */ + AUD_log ("es1370: warning", "non looping mode\n"); + } + else { + d->frame_cnt = size; + + if ((uint32_t) cnt <= d->frame_cnt) + d->frame_cnt |= cnt << 16; + } + + d->leftover = (transferred + d->leftover) & 3; +} + +static void es1370_run_channel (ES1370State *s, size_t chan, int free_or_avail) +{ + uint32_t new_status = s->status; + int max_bytes, irq; + struct chan *d = &s->chan[chan]; + const struct chan_bits *b = &es1370_chan_bits[chan]; + + if (!(s->ctl & b->ctl_en) || (s->sctl & b->sctl_pause)) { + return; + } + + max_bytes = free_or_avail; + max_bytes &= ~((1 << d->shift) - 1); + if (!max_bytes) { + return; + } + + es1370_transfer_audio (s, d, b->sctl_loopsel, max_bytes, &irq); + + if (irq) { + if (s->sctl & b->sctl_inten) { + new_status |= b->stat_int; + } + } + + if (new_status != s->status) { + es1370_update_status (s, new_status); + } +} + +static void es1370_dac1_callback (void *opaque, int free) +{ + ES1370State *s = opaque; + + es1370_run_channel (s, DAC1_CHANNEL, free); +} + +static void es1370_dac2_callback (void *opaque, int free) +{ + ES1370State *s = opaque; + + es1370_run_channel (s, DAC2_CHANNEL, free); +} + +static void es1370_adc_callback (void *opaque, int avail) +{ + ES1370State *s = opaque; + + es1370_run_channel (s, ADC_CHANNEL, avail); +} + +static const MemoryRegionOps es1370_io_ops = { + .read = es1370_read, + .write = es1370_write, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const VMStateDescription vmstate_es1370_channel = { + .name = "es1370_channel", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32 (shift, struct chan), + VMSTATE_UINT32 (leftover, struct chan), + VMSTATE_UINT32 (scount, struct chan), + VMSTATE_UINT32 (frame_addr, struct chan), + VMSTATE_UINT32 (frame_cnt, struct chan), + VMSTATE_END_OF_LIST () + } +}; + +static int es1370_post_load (void *opaque, int version_id) +{ + uint32_t ctl, sctl; + ES1370State *s = opaque; + size_t i; + + for (i = 0; i < NB_CHANNELS; ++i) { + if (i == ADC_CHANNEL) { + if (s->adc_voice) { + AUD_close_in (&s->card, s->adc_voice); + s->adc_voice = NULL; + } + } + else { + if (s->dac_voice[i]) { + AUD_close_out (&s->card, s->dac_voice[i]); + s->dac_voice[i] = NULL; + } + } + } + + ctl = s->ctl; + sctl = s->sctl; + s->ctl = 0; + s->sctl = 0; + es1370_update_voices (s, ctl, sctl); + return 0; +} + +static const VMStateDescription vmstate_es1370 = { + .name = "es1370", + .version_id = 2, + .minimum_version_id = 2, + .post_load = es1370_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE (dev, ES1370State), + VMSTATE_STRUCT_ARRAY (chan, ES1370State, NB_CHANNELS, 2, + vmstate_es1370_channel, struct chan), + VMSTATE_UINT32 (ctl, ES1370State), + VMSTATE_UINT32 (status, ES1370State), + VMSTATE_UINT32 (mempage, ES1370State), + VMSTATE_UINT32 (codec, ES1370State), + VMSTATE_UINT32 (sctl, ES1370State), + VMSTATE_END_OF_LIST () + } +}; + +static void es1370_on_reset(DeviceState *dev) +{ + ES1370State *s = container_of(dev, ES1370State, dev.qdev); + es1370_reset (s); +} + +static void es1370_realize(PCIDevice *dev, Error **errp) +{ + ES1370State *s = ES1370(dev); + uint8_t *c = s->dev.config; + + c[PCI_STATUS + 1] = PCI_STATUS_DEVSEL_SLOW >> 8; + +#if 0 + c[PCI_CAPABILITY_LIST] = 0xdc; + c[PCI_INTERRUPT_LINE] = 10; + c[0xdc] = 0x00; +#endif + + c[PCI_INTERRUPT_PIN] = 1; + c[PCI_MIN_GNT] = 0x0c; + c[PCI_MAX_LAT] = 0x80; + + memory_region_init_io (&s->io, OBJECT(s), &es1370_io_ops, s, "es1370", 256); + pci_register_bar (&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io); + + AUD_register_card ("es1370", &s->card); + es1370_reset (s); +} + +static void es1370_exit(PCIDevice *dev) +{ + ES1370State *s = ES1370(dev); + int i; + + for (i = 0; i < 2; ++i) { + AUD_close_out(&s->card, s->dac_voice[i]); + } + + AUD_close_in(&s->card, s->adc_voice); + AUD_remove_card(&s->card); +} + +static Property es1370_properties[] = { + DEFINE_AUDIO_PROPERTIES(ES1370State, card), + DEFINE_PROP_END_OF_LIST(), +}; + +static void es1370_class_init (ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS (klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS (klass); + + k->realize = es1370_realize; + k->exit = es1370_exit; + k->vendor_id = PCI_VENDOR_ID_ENSONIQ; + k->device_id = PCI_DEVICE_ID_ENSONIQ_ES1370; + k->class_id = PCI_CLASS_MULTIMEDIA_AUDIO; + k->subsystem_vendor_id = 0x4942; + k->subsystem_id = 0x4c4c; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->desc = "ENSONIQ AudioPCI ES1370"; + dc->vmsd = &vmstate_es1370; + dc->reset = es1370_on_reset; + device_class_set_props(dc, es1370_properties); +} + +static const TypeInfo es1370_info = { + .name = TYPE_ES1370, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof (ES1370State), + .class_init = es1370_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void es1370_register_types (void) +{ + type_register_static (&es1370_info); + deprecated_register_soundhw("es1370", "ENSONIQ AudioPCI ES1370", + 0, TYPE_ES1370); +} + +type_init (es1370_register_types) diff --git a/hw/audio/fmopl.c b/hw/audio/fmopl.c new file mode 100644 index 000000000..8a71a569f --- /dev/null +++ b/hw/audio/fmopl.c @@ -0,0 +1,1211 @@ +/* +** +** File: fmopl.c -- software implementation of FM sound generator +** +** Copyright (C) 1999,2000 Tatsuyuki Satoh , MultiArcadeMachineEmurator development +** +** Version 0.37a +** +*/ + +/* + preliminary : + Problem : + note: +*/ + +/* This version of fmopl.c is a fork of the MAME one, relicensed under the LGPL. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include +//#include "driver.h" /* use M.A.M.E. */ +#include "fmopl.h" +#ifndef PI +#define PI 3.14159265358979323846 +#endif + +/* -------------------- for debug --------------------- */ +/* #define OPL_OUTPUT_LOG */ +#ifdef OPL_OUTPUT_LOG +static FILE *opl_dbg_fp = NULL; +static FM_OPL *opl_dbg_opl[16]; +static int opl_dbg_maxchip,opl_dbg_chip; +#endif + +/* -------------------- preliminary define section --------------------- */ +/* attack/decay rate time rate */ +#define OPL_ARRATE 141280 /* RATE 4 = 2826.24ms @ 3.6MHz */ +#define OPL_DRRATE 1956000 /* RATE 4 = 39280.64ms @ 3.6MHz */ + +#define DELTAT_MIXING_LEVEL (1) /* DELTA-T ADPCM MIXING LEVEL */ + +#define FREQ_BITS 24 /* frequency turn */ + +/* counter bits = 20 , octerve 7 */ +#define FREQ_RATE (1<<(FREQ_BITS-20)) +#define TL_BITS (FREQ_BITS+2) + +/* final output shift , limit minimum and maximum */ +#define OPL_OUTSB (TL_BITS+3-16) /* OPL output final shift 16bit */ +#define OPL_MAXOUT (0x7fff<=LOG_LEVEL ) logerror x +#define LOG(n,x) + +/* --------------------- subroutines --------------------- */ + +static inline int Limit( int val, int max, int min ) { + if ( val > max ) + val = max; + else if ( val < min ) + val = min; + + return val; +} + +/* status set and IRQ handling */ +static inline void OPL_STATUS_SET(FM_OPL *OPL,int flag) +{ + /* set status flag */ + OPL->status |= flag; + if(!(OPL->status & 0x80)) + { + if(OPL->status & OPL->statusmask) + { /* IRQ on */ + OPL->status |= 0x80; + } + } +} + +/* status reset and IRQ handling */ +static inline void OPL_STATUS_RESET(FM_OPL *OPL,int flag) +{ + /* reset status flag */ + OPL->status &=~flag; + if((OPL->status & 0x80)) + { + if (!(OPL->status & OPL->statusmask) ) + { + OPL->status &= 0x7f; + } + } +} + +/* IRQ mask set */ +static inline void OPL_STATUSMASK_SET(FM_OPL *OPL,int flag) +{ + OPL->statusmask = flag; + /* IRQ handling check */ + OPL_STATUS_SET(OPL,0); + OPL_STATUS_RESET(OPL,0); +} + +/* ----- key on ----- */ +static inline void OPL_KEYON(OPL_SLOT *SLOT) +{ + /* sin wave restart */ + SLOT->Cnt = 0; + /* set attack */ + SLOT->evm = ENV_MOD_AR; + SLOT->evs = SLOT->evsa; + SLOT->evc = EG_AST; + SLOT->eve = EG_AED; +} +/* ----- key off ----- */ +static inline void OPL_KEYOFF(OPL_SLOT *SLOT) +{ + if( SLOT->evm > ENV_MOD_RR) + { + /* set envelope counter from envleope output */ + SLOT->evm = ENV_MOD_RR; + if( !(SLOT->evc&EG_DST) ) + //SLOT->evc = (ENV_CURVE[SLOT->evc>>ENV_BITS]<evc = EG_DST; + SLOT->eve = EG_DED; + SLOT->evs = SLOT->evsr; + } +} + +/* ---------- calcrate Envelope Generator & Phase Generator ---------- */ +/* return : envelope output */ +static inline uint32_t OPL_CALC_SLOT( OPL_SLOT *SLOT ) +{ + /* calcrate envelope generator */ + if( (SLOT->evc+=SLOT->evs) >= SLOT->eve ) + { + switch( SLOT->evm ){ + case ENV_MOD_AR: /* ATTACK -> DECAY1 */ + /* next DR */ + SLOT->evm = ENV_MOD_DR; + SLOT->evc = EG_DST; + SLOT->eve = SLOT->SL; + SLOT->evs = SLOT->evsd; + break; + case ENV_MOD_DR: /* DECAY -> SL or RR */ + SLOT->evc = SLOT->SL; + SLOT->eve = EG_DED; + if(SLOT->eg_typ) + { + SLOT->evs = 0; + } + else + { + SLOT->evm = ENV_MOD_RR; + SLOT->evs = SLOT->evsr; + } + break; + case ENV_MOD_RR: /* RR -> OFF */ + SLOT->evc = EG_OFF; + SLOT->eve = EG_OFF+1; + SLOT->evs = 0; + break; + } + } + /* calcrate envelope */ + return SLOT->TLL+ENV_CURVE[SLOT->evc>>ENV_BITS]+(SLOT->ams ? ams : 0); +} + +/* set algorithm connection */ +static void set_algorithm( OPL_CH *CH) +{ + int32_t *carrier = &outd[0]; + CH->connect1 = CH->CON ? carrier : &feedback2; + CH->connect2 = carrier; +} + +/* ---------- frequency counter for operater update ---------- */ +static inline void CALC_FCSLOT(OPL_CH *CH,OPL_SLOT *SLOT) +{ + int ksr; + + /* frequency step counter */ + SLOT->Incr = CH->fc * SLOT->mul; + ksr = CH->kcode >> SLOT->KSR; + + if( SLOT->ksr != ksr ) + { + SLOT->ksr = ksr; + /* attack , decay rate recalcration */ + SLOT->evsa = SLOT->AR[ksr]; + SLOT->evsd = SLOT->DR[ksr]; + SLOT->evsr = SLOT->RR[ksr]; + } + SLOT->TLL = SLOT->TL + (CH->ksl_base>>SLOT->ksl); +} + +/* set multi,am,vib,EG-TYP,KSR,mul */ +static inline void set_mul(FM_OPL *OPL,int slot,int v) +{ + OPL_CH *CH = &OPL->P_CH[slot/2]; + OPL_SLOT *SLOT = &CH->SLOT[slot&1]; + + SLOT->mul = MUL_TABLE[v&0x0f]; + SLOT->KSR = (v&0x10) ? 0 : 2; + SLOT->eg_typ = (v&0x20)>>5; + SLOT->vib = (v&0x40); + SLOT->ams = (v&0x80); + CALC_FCSLOT(CH,SLOT); +} + +/* set ksl & tl */ +static inline void set_ksl_tl(FM_OPL *OPL,int slot,int v) +{ + OPL_CH *CH = &OPL->P_CH[slot/2]; + OPL_SLOT *SLOT = &CH->SLOT[slot&1]; + int ksl = v>>6; /* 0 / 1.5 / 3 / 6 db/OCT */ + + SLOT->ksl = ksl ? 3-ksl : 31; + SLOT->TL = (v&0x3f)*(0.75/EG_STEP); /* 0.75db step */ + + if( !(OPL->mode&0x80) ) + { /* not CSM latch total level */ + SLOT->TLL = SLOT->TL + (CH->ksl_base>>SLOT->ksl); + } +} + +/* set attack rate & decay rate */ +static inline void set_ar_dr(FM_OPL *OPL,int slot,int v) +{ + OPL_CH *CH = &OPL->P_CH[slot/2]; + OPL_SLOT *SLOT = &CH->SLOT[slot&1]; + int ar = v>>4; + int dr = v&0x0f; + + SLOT->AR = ar ? &OPL->AR_TABLE[ar<<2] : RATE_0; + SLOT->evsa = SLOT->AR[SLOT->ksr]; + if( SLOT->evm == ENV_MOD_AR ) SLOT->evs = SLOT->evsa; + + SLOT->DR = dr ? &OPL->DR_TABLE[dr<<2] : RATE_0; + SLOT->evsd = SLOT->DR[SLOT->ksr]; + if( SLOT->evm == ENV_MOD_DR ) SLOT->evs = SLOT->evsd; +} + +/* set sustain level & release rate */ +static inline void set_sl_rr(FM_OPL *OPL,int slot,int v) +{ + OPL_CH *CH = &OPL->P_CH[slot/2]; + OPL_SLOT *SLOT = &CH->SLOT[slot&1]; + int sl = v>>4; + int rr = v & 0x0f; + + SLOT->SL = SL_TABLE[sl]; + if( SLOT->evm == ENV_MOD_DR ) SLOT->eve = SLOT->SL; + SLOT->RR = &OPL->DR_TABLE[rr<<2]; + SLOT->evsr = SLOT->RR[SLOT->ksr]; + if( SLOT->evm == ENV_MOD_RR ) SLOT->evs = SLOT->evsr; +} + +/* operator output calcrator */ +#define OP_OUT(slot,env,con) slot->wavetable[((slot->Cnt+con)/(0x1000000/SIN_ENT))&(SIN_ENT-1)][env] +/* ---------- calcrate one of channel ---------- */ +static inline void OPL_CALC_CH( OPL_CH *CH ) +{ + uint32_t env_out; + OPL_SLOT *SLOT; + + feedback2 = 0; + /* SLOT 1 */ + SLOT = &CH->SLOT[SLOT1]; + env_out=OPL_CALC_SLOT(SLOT); + if( env_out < EG_ENT-1 ) + { + /* PG */ + if(SLOT->vib) SLOT->Cnt += (SLOT->Incr*vib/VIB_RATE); + else SLOT->Cnt += SLOT->Incr; + /* connectoion */ + if(CH->FB) + { + int feedback1 = (CH->op1_out[0]+CH->op1_out[1])>>CH->FB; + CH->op1_out[1] = CH->op1_out[0]; + *CH->connect1 += CH->op1_out[0] = OP_OUT(SLOT,env_out,feedback1); + } + else + { + *CH->connect1 += OP_OUT(SLOT,env_out,0); + } + }else + { + CH->op1_out[1] = CH->op1_out[0]; + CH->op1_out[0] = 0; + } + /* SLOT 2 */ + SLOT = &CH->SLOT[SLOT2]; + env_out=OPL_CALC_SLOT(SLOT); + if( env_out < EG_ENT-1 ) + { + /* PG */ + if(SLOT->vib) SLOT->Cnt += (SLOT->Incr*vib/VIB_RATE); + else SLOT->Cnt += SLOT->Incr; + /* connectoion */ + outd[0] += OP_OUT(SLOT,env_out, feedback2); + } +} + +/* ---------- calcrate rhythm block ---------- */ +#define WHITE_NOISE_db 6.0 +static inline void OPL_CALC_RH( OPL_CH *CH ) +{ + uint32_t env_tam,env_sd,env_top,env_hh; + int whitenoise = (rand()&1)*(WHITE_NOISE_db/EG_STEP); + int32_t tone8; + + OPL_SLOT *SLOT; + int env_out; + + /* BD : same as FM serial mode and output level is large */ + feedback2 = 0; + /* SLOT 1 */ + SLOT = &CH[6].SLOT[SLOT1]; + env_out=OPL_CALC_SLOT(SLOT); + if( env_out < EG_ENT-1 ) + { + /* PG */ + if(SLOT->vib) SLOT->Cnt += (SLOT->Incr*vib/VIB_RATE); + else SLOT->Cnt += SLOT->Incr; + /* connectoion */ + if(CH[6].FB) + { + int feedback1 = (CH[6].op1_out[0]+CH[6].op1_out[1])>>CH[6].FB; + CH[6].op1_out[1] = CH[6].op1_out[0]; + feedback2 = CH[6].op1_out[0] = OP_OUT(SLOT,env_out,feedback1); + } + else + { + feedback2 = OP_OUT(SLOT,env_out,0); + } + }else + { + feedback2 = 0; + CH[6].op1_out[1] = CH[6].op1_out[0]; + CH[6].op1_out[0] = 0; + } + /* SLOT 2 */ + SLOT = &CH[6].SLOT[SLOT2]; + env_out=OPL_CALC_SLOT(SLOT); + if( env_out < EG_ENT-1 ) + { + /* PG */ + if(SLOT->vib) SLOT->Cnt += (SLOT->Incr*vib/VIB_RATE); + else SLOT->Cnt += SLOT->Incr; + /* connectoion */ + outd[0] += OP_OUT(SLOT,env_out, feedback2)*2; + } + + // SD (17) = mul14[fnum7] + white noise + // TAM (15) = mul15[fnum8] + // TOP (18) = fnum6(mul18[fnum8]+whitenoise) + // HH (14) = fnum7(mul18[fnum8]+whitenoise) + white noise + env_sd =OPL_CALC_SLOT(SLOT7_2) + whitenoise; + env_tam=OPL_CALC_SLOT(SLOT8_1); + env_top=OPL_CALC_SLOT(SLOT8_2); + env_hh =OPL_CALC_SLOT(SLOT7_1) + whitenoise; + + /* PG */ + if(SLOT7_1->vib) SLOT7_1->Cnt += (2*SLOT7_1->Incr*vib/VIB_RATE); + else SLOT7_1->Cnt += 2*SLOT7_1->Incr; + if(SLOT7_2->vib) SLOT7_2->Cnt += ((CH[7].fc*8)*vib/VIB_RATE); + else SLOT7_2->Cnt += (CH[7].fc*8); + if(SLOT8_1->vib) SLOT8_1->Cnt += (SLOT8_1->Incr*vib/VIB_RATE); + else SLOT8_1->Cnt += SLOT8_1->Incr; + if(SLOT8_2->vib) SLOT8_2->Cnt += ((CH[8].fc*48)*vib/VIB_RATE); + else SLOT8_2->Cnt += (CH[8].fc*48); + + tone8 = OP_OUT(SLOT8_2,whitenoise,0 ); + + /* SD */ + if( env_sd < EG_ENT-1 ) + outd[0] += OP_OUT(SLOT7_1,env_sd, 0)*8; + /* TAM */ + if( env_tam < EG_ENT-1 ) + outd[0] += OP_OUT(SLOT8_1,env_tam, 0)*2; + /* TOP-CY */ + if( env_top < EG_ENT-1 ) + outd[0] += OP_OUT(SLOT7_2,env_top,tone8)*2; + /* HH */ + if( env_hh < EG_ENT-1 ) + outd[0] += OP_OUT(SLOT7_2,env_hh,tone8)*2; +} + +/* ----------- initialize time tabls ----------- */ +static void init_timetables( FM_OPL *OPL , int ARRATE , int DRRATE ) +{ + int i; + double rate; + + /* make attack rate & decay rate tables */ + for (i = 0;i < 4;i++) OPL->AR_TABLE[i] = OPL->DR_TABLE[i] = 0; + for (i = 4;i <= 60;i++){ + rate = OPL->freqbase; /* frequency rate */ + if( i < 60 ) rate *= 1.0+(i&3)*0.25; /* b0-1 : x1 , x1.25 , x1.5 , x1.75 */ + rate *= 1<<((i>>2)-1); /* b2-5 : shift bit */ + rate *= (double)(EG_ENT<AR_TABLE[i] = rate / ARRATE; + OPL->DR_TABLE[i] = rate / DRRATE; + } + for (i = 60; i < ARRAY_SIZE(OPL->AR_TABLE); i++) + { + OPL->AR_TABLE[i] = EG_AED-1; + OPL->DR_TABLE[i] = OPL->DR_TABLE[60]; + } +#if 0 + for (i = 0;i < 64 ;i++){ /* make for overflow area */ + LOG(LOG_WAR, ("rate %2d , ar %f ms , dr %f ms\n", i, + ((double)(EG_ENT<AR_TABLE[i]) * (1000.0 / OPL->rate), + ((double)(EG_ENT<DR_TABLE[i]) * (1000.0 / OPL->rate) )); + } +#endif +} + +/* ---------- generic table initialize ---------- */ +static int OPLOpenTable( void ) +{ + int s,t; + double rate; + int i,j; + double pom; + + /* allocate dynamic tables */ + if( (TL_TABLE = malloc(TL_MAX*2*sizeof(int32_t))) == NULL) + return 0; + if( (SIN_TABLE = malloc(SIN_ENT*4 *sizeof(int32_t *))) == NULL) + { + free(TL_TABLE); + return 0; + } + if( (AMS_TABLE = malloc(AMS_ENT*2 *sizeof(int32_t))) == NULL) + { + free(TL_TABLE); + free(SIN_TABLE); + return 0; + } + if( (VIB_TABLE = malloc(VIB_ENT*2 *sizeof(int32_t))) == NULL) + { + free(TL_TABLE); + free(SIN_TABLE); + free(AMS_TABLE); + return 0; + } + ENV_CURVE = g_new(int32_t, 2 * EG_ENT + 1); + /* make total level table */ + for (t = 0;t < EG_ENT-1 ;t++){ + rate = ((1< voltage */ + TL_TABLE[ t] = (int)rate; + TL_TABLE[TL_MAX+t] = -TL_TABLE[t]; +/* LOG(LOG_INF,("TotalLevel(%3d) = %x\n",t,TL_TABLE[t]));*/ + } + /* fill volume off area */ + for ( t = EG_ENT-1; t < TL_MAX ;t++){ + TL_TABLE[t] = TL_TABLE[TL_MAX+t] = 0; + } + + /* make sinwave table (total level offet) */ + /* degree 0 = degree 180 = off */ + SIN_TABLE[0] = SIN_TABLE[SIN_ENT/2] = &TL_TABLE[EG_ENT-1]; + for (s = 1;s <= SIN_ENT/4;s++){ + pom = sin(2*PI*s/SIN_ENT); /* sin */ + pom = 20*log10(1/pom); /* decibel */ + j = pom / EG_STEP; /* TL_TABLE steps */ + + /* degree 0 - 90 , degree 180 - 90 : plus section */ + SIN_TABLE[ s] = SIN_TABLE[SIN_ENT/2-s] = &TL_TABLE[j]; + /* degree 180 - 270 , degree 360 - 270 : minus section */ + SIN_TABLE[SIN_ENT/2+s] = SIN_TABLE[SIN_ENT -s] = &TL_TABLE[TL_MAX+j]; +/* LOG(LOG_INF,("sin(%3d) = %f:%f db\n",s,pom,(double)j * EG_STEP));*/ + } + for (s = 0;s < SIN_ENT;s++) + { + SIN_TABLE[SIN_ENT*1+s] = s<(SIN_ENT/2) ? SIN_TABLE[s] : &TL_TABLE[EG_ENT]; + SIN_TABLE[SIN_ENT*2+s] = SIN_TABLE[s % (SIN_ENT/2)]; + SIN_TABLE[SIN_ENT*3+s] = (s/(SIN_ENT/4))&1 ? &TL_TABLE[EG_ENT] : SIN_TABLE[SIN_ENT*2+s]; + } + + /* envelope counter -> envelope output table */ + for (i=0; i= EG_ENT ) pom = EG_ENT-1; */ + ENV_CURVE[i] = (int)pom; + /* DECAY ,RELEASE curve */ + ENV_CURVE[(EG_DST>>ENV_BITS)+i]= i; + } + /* off */ + ENV_CURVE[EG_OFF>>ENV_BITS]= EG_ENT-1; + /* make LFO ams table */ + for (i=0; iSLOT[SLOT1]; + OPL_SLOT *slot2 = &CH->SLOT[SLOT2]; + /* all key off */ + OPL_KEYOFF(slot1); + OPL_KEYOFF(slot2); + /* total level latch */ + slot1->TLL = slot1->TL + (CH->ksl_base>>slot1->ksl); + slot1->TLL = slot1->TL + (CH->ksl_base>>slot1->ksl); + /* key on */ + CH->op1_out[0] = CH->op1_out[1] = 0; + OPL_KEYON(slot1); + OPL_KEYON(slot2); +} + +/* ---------- opl initialize ---------- */ +static void OPL_initialize(FM_OPL *OPL) +{ + int fn; + + /* frequency base */ + OPL->freqbase = (OPL->rate) ? ((double)OPL->clock / OPL->rate) / 72 : 0; + /* Timer base time */ + OPL->TimerBase = 1.0/((double)OPL->clock / 72.0 ); + /* make time tables */ + init_timetables( OPL , OPL_ARRATE , OPL_DRRATE ); + /* make fnumber -> increment counter table */ + for( fn=0 ; fn < 1024 ; fn++ ) + { + OPL->FN_TABLE[fn] = OPL->freqbase * fn * FREQ_RATE * (1<<7) / 2; + } + /* LFO freq.table */ + OPL->amsIncr = OPL->rate ? (double)AMS_ENT*(1<rate * 3.7 * ((double)OPL->clock/3600000) : 0; + OPL->vibIncr = OPL->rate ? (double)VIB_ENT*(1<rate * 6.4 * ((double)OPL->clock/3600000) : 0; +} + +/* ---------- write a OPL registers ---------- */ +static void OPLWriteReg(FM_OPL *OPL, int r, int v) +{ + OPL_CH *CH; + int slot; + int block_fnum; + + switch(r&0xe0) + { + case 0x00: /* 00-1f:control */ + switch(r&0x1f) + { + case 0x01: + /* wave selector enable */ + OPL->wavesel = v&0x20; + if(!OPL->wavesel) + { + /* preset compatible mode */ + int c; + for(c=0;cmax_ch;c++) + { + OPL->P_CH[c].SLOT[SLOT1].wavetable = &SIN_TABLE[0]; + OPL->P_CH[c].SLOT[SLOT2].wavetable = &SIN_TABLE[0]; + } + } + return; + case 0x02: /* Timer 1 */ + OPL->T[0] = (256-v)*4; + break; + case 0x03: /* Timer 2 */ + OPL->T[1] = (256-v)*16; + return; + case 0x04: /* IRQ clear / mask and Timer enable */ + if(v&0x80) + { /* IRQ flag clear */ + OPL_STATUS_RESET(OPL,0x7f); + } + else + { /* set IRQ mask ,timer enable*/ + uint8_t st1 = v&1; + uint8_t st2 = (v>>1)&1; + /* IRQRST,T1MSK,t2MSK,EOSMSK,BRMSK,x,ST2,ST1 */ + OPL_STATUS_RESET(OPL,v&0x78); + OPL_STATUSMASK_SET(OPL,((~v)&0x78)|0x01); + /* timer 2 */ + if(OPL->st[1] != st2) + { + double interval = st2 ? (double)OPL->T[1]*OPL->TimerBase : 0.0; + OPL->st[1] = st2; + if (OPL->TimerHandler) { + (OPL->TimerHandler)(OPL->TimerParam, 1, interval); + } + } + /* timer 1 */ + if(OPL->st[0] != st1) + { + double interval = st1 ? (double)OPL->T[0]*OPL->TimerBase : 0.0; + OPL->st[0] = st1; + if (OPL->TimerHandler) { + (OPL->TimerHandler)(OPL->TimerParam, 0, interval); + } + } + } + return; + } + break; + case 0x20: /* am,vib,ksr,eg type,mul */ + slot = slot_array[r&0x1f]; + if(slot == -1) return; + set_mul(OPL,slot,v); + return; + case 0x40: + slot = slot_array[r&0x1f]; + if(slot == -1) return; + set_ksl_tl(OPL,slot,v); + return; + case 0x60: + slot = slot_array[r&0x1f]; + if(slot == -1) return; + set_ar_dr(OPL,slot,v); + return; + case 0x80: + slot = slot_array[r&0x1f]; + if(slot == -1) return; + set_sl_rr(OPL,slot,v); + return; + case 0xa0: + switch(r) + { + case 0xbd: + /* amsep,vibdep,r,bd,sd,tom,tc,hh */ + { + uint8_t rkey = OPL->rhythm^v; + OPL->ams_table = &AMS_TABLE[v&0x80 ? AMS_ENT : 0]; + OPL->vib_table = &VIB_TABLE[v&0x40 ? VIB_ENT : 0]; + OPL->rhythm = v&0x3f; + if(OPL->rhythm&0x20) + { +#if 0 + usrintf_showmessage("OPL Rhythm mode select"); +#endif + /* BD key on/off */ + if(rkey&0x10) + { + if(v&0x10) + { + OPL->P_CH[6].op1_out[0] = OPL->P_CH[6].op1_out[1] = 0; + OPL_KEYON(&OPL->P_CH[6].SLOT[SLOT1]); + OPL_KEYON(&OPL->P_CH[6].SLOT[SLOT2]); + } + else + { + OPL_KEYOFF(&OPL->P_CH[6].SLOT[SLOT1]); + OPL_KEYOFF(&OPL->P_CH[6].SLOT[SLOT2]); + } + } + /* SD key on/off */ + if(rkey&0x08) + { + if(v&0x08) OPL_KEYON(&OPL->P_CH[7].SLOT[SLOT2]); + else OPL_KEYOFF(&OPL->P_CH[7].SLOT[SLOT2]); + }/* TAM key on/off */ + if(rkey&0x04) + { + if(v&0x04) OPL_KEYON(&OPL->P_CH[8].SLOT[SLOT1]); + else OPL_KEYOFF(&OPL->P_CH[8].SLOT[SLOT1]); + } + /* TOP-CY key on/off */ + if(rkey&0x02) + { + if(v&0x02) OPL_KEYON(&OPL->P_CH[8].SLOT[SLOT2]); + else OPL_KEYOFF(&OPL->P_CH[8].SLOT[SLOT2]); + } + /* HH key on/off */ + if(rkey&0x01) + { + if(v&0x01) OPL_KEYON(&OPL->P_CH[7].SLOT[SLOT1]); + else OPL_KEYOFF(&OPL->P_CH[7].SLOT[SLOT1]); + } + } + } + return; + } + /* keyon,block,fnum */ + if( (r&0x0f) > 8) return; + CH = &OPL->P_CH[r&0x0f]; + if(!(r&0x10)) + { /* a0-a8 */ + block_fnum = (CH->block_fnum&0x1f00) | v; + } + else + { /* b0-b8 */ + int keyon = (v>>5)&1; + block_fnum = ((v&0x1f)<<8) | (CH->block_fnum&0xff); + if(CH->keyon != keyon) + { + if( (CH->keyon=keyon) ) + { + CH->op1_out[0] = CH->op1_out[1] = 0; + OPL_KEYON(&CH->SLOT[SLOT1]); + OPL_KEYON(&CH->SLOT[SLOT2]); + } + else + { + OPL_KEYOFF(&CH->SLOT[SLOT1]); + OPL_KEYOFF(&CH->SLOT[SLOT2]); + } + } + } + /* update */ + if(CH->block_fnum != block_fnum) + { + int blockRv = 7-(block_fnum>>10); + int fnum = block_fnum&0x3ff; + CH->block_fnum = block_fnum; + + CH->ksl_base = KSL_TABLE[block_fnum>>6]; + CH->fc = OPL->FN_TABLE[fnum]>>blockRv; + CH->kcode = CH->block_fnum>>9; + if( (OPL->mode&0x40) && CH->block_fnum&0x100) CH->kcode |=1; + CALC_FCSLOT(CH,&CH->SLOT[SLOT1]); + CALC_FCSLOT(CH,&CH->SLOT[SLOT2]); + } + return; + case 0xc0: + /* FB,C */ + if( (r&0x0f) > 8) return; + CH = &OPL->P_CH[r&0x0f]; + { + int feedback = (v>>1)&7; + CH->FB = feedback ? (8+1) - feedback : 0; + CH->CON = v&1; + set_algorithm(CH); + } + return; + case 0xe0: /* wave type */ + slot = slot_array[r&0x1f]; + if(slot == -1) return; + CH = &OPL->P_CH[slot/2]; + if(OPL->wavesel) + { + /* LOG(LOG_INF,("OPL SLOT %d wave select %d\n",slot,v&3)); */ + CH->SLOT[slot&1].wavetable = &SIN_TABLE[(v&0x03)*SIN_ENT]; + } + return; + } +} + +/* lock/unlock for common table */ +static int OPL_LockTable(void) +{ + num_lock++; + if(num_lock>1) return 0; + /* first time */ + cur_chip = NULL; + /* allocate total level table (128kb space) */ + if( !OPLOpenTable() ) + { + num_lock--; + return -1; + } + return 0; +} + +static void OPL_UnLockTable(void) +{ + if(num_lock) num_lock--; + if(num_lock) return; + /* last time */ + cur_chip = NULL; + OPLCloseTable(); +} + +/*******************************************************************************/ +/* YM3812 local section */ +/*******************************************************************************/ + +/* ---------- update one of chip ----------- */ +void YM3812UpdateOne(FM_OPL *OPL, int16_t *buffer, int length) +{ + int i; + int data; + int16_t *buf = buffer; + uint32_t amsCnt = OPL->amsCnt; + uint32_t vibCnt = OPL->vibCnt; + uint8_t rhythm = OPL->rhythm&0x20; + OPL_CH *CH,*R_CH; + + if( (void *)OPL != cur_chip ){ + cur_chip = (void *)OPL; + /* channel pointers */ + S_CH = OPL->P_CH; + E_CH = &S_CH[9]; + /* rhythm slot */ + SLOT7_1 = &S_CH[7].SLOT[SLOT1]; + SLOT7_2 = &S_CH[7].SLOT[SLOT2]; + SLOT8_1 = &S_CH[8].SLOT[SLOT1]; + SLOT8_2 = &S_CH[8].SLOT[SLOT2]; + /* LFO state */ + amsIncr = OPL->amsIncr; + vibIncr = OPL->vibIncr; + ams_table = OPL->ams_table; + vib_table = OPL->vib_table; + } + R_CH = rhythm ? &S_CH[6] : E_CH; + for( i=0; i < length ; i++ ) + { + /* channel A channel B channel C */ + /* LFO */ + ams = ams_table[(amsCnt+=amsIncr)>>AMS_SHIFT]; + vib = vib_table[(vibCnt+=vibIncr)>>VIB_SHIFT]; + outd[0] = 0; + /* FM part */ + for(CH=S_CH ; CH < R_CH ; CH++) + OPL_CALC_CH(CH); + /* Rythn part */ + if(rhythm) + OPL_CALC_RH(S_CH); + /* limit check */ + data = Limit( outd[0] , OPL_MAXOUT, OPL_MINOUT ); + /* store to sound buffer */ + buf[i] = data >> OPL_OUTSB; + } + + OPL->amsCnt = amsCnt; + OPL->vibCnt = vibCnt; +#ifdef OPL_OUTPUT_LOG + if(opl_dbg_fp) + { + for(opl_dbg_chip=0;opl_dbg_chipmode = 0; /* normal mode */ + OPL_STATUS_RESET(OPL,0x7f); + /* reset with register write */ + OPLWriteReg(OPL,0x01,0); /* wabesel disable */ + OPLWriteReg(OPL,0x02,0); /* Timer1 */ + OPLWriteReg(OPL,0x03,0); /* Timer2 */ + OPLWriteReg(OPL,0x04,0); /* IRQ mask clear */ + for(i = 0xff ; i >= 0x20 ; i-- ) OPLWriteReg(OPL,i,0); + /* reset operator parameter */ + for( c = 0 ; c < OPL->max_ch ; c++ ) + { + OPL_CH *CH = &OPL->P_CH[c]; + /* OPL->P_CH[c].PAN = OPN_CENTER; */ + for(s = 0 ; s < 2 ; s++ ) + { + /* wave table */ + CH->SLOT[s].wavetable = &SIN_TABLE[0]; + /* CH->SLOT[s].evm = ENV_MOD_RR; */ + CH->SLOT[s].evc = EG_OFF; + CH->SLOT[s].eve = EG_OFF+1; + CH->SLOT[s].evs = 0; + } + } +} + +/* ---------- Create one of virtual YM3812 ---------- */ +/* 'rate' is sampling rate and 'bufsiz' is the size of the */ +FM_OPL *OPLCreate(int clock, int rate) +{ + char *ptr; + FM_OPL *OPL; + int state_size; + int max_ch = 9; /* normaly 9 channels */ + + if( OPL_LockTable() ==-1) return NULL; + /* allocate OPL state space */ + state_size = sizeof(FM_OPL); + state_size += sizeof(OPL_CH)*max_ch; + /* allocate memory block */ + ptr = malloc(state_size); + if(ptr==NULL) return NULL; + /* clear */ + memset(ptr,0,state_size); + OPL = (FM_OPL *)ptr; ptr+=sizeof(FM_OPL); + OPL->P_CH = (OPL_CH *)ptr; ptr+=sizeof(OPL_CH)*max_ch; + /* set channel state pointer */ + OPL->clock = clock; + OPL->rate = rate; + OPL->max_ch = max_ch; + /* init grobal tables */ + OPL_initialize(OPL); + /* reset chip */ + OPLResetChip(OPL); +#ifdef OPL_OUTPUT_LOG + if(!opl_dbg_fp) + { + opl_dbg_fp = fopen("opllog.opl","wb"); + opl_dbg_maxchip = 0; + } + if(opl_dbg_fp) + { + opl_dbg_opl[opl_dbg_maxchip] = OPL; + fprintf(opl_dbg_fp,"%c%c%c%c%c%c",0x00+opl_dbg_maxchip, + type, + clock&0xff, + (clock/0x100)&0xff, + (clock/0x10000)&0xff, + (clock/0x1000000)&0xff); + opl_dbg_maxchip++; + } +#endif + return OPL; +} + +/* ---------- Destroy one of virtual YM3812 ---------- */ +void OPLDestroy(FM_OPL *OPL) +{ +#ifdef OPL_OUTPUT_LOG + if(opl_dbg_fp) + { + fclose(opl_dbg_fp); + opl_dbg_fp = NULL; + } +#endif + OPL_UnLockTable(); + free(OPL); +} + +/* ---------- Option handlers ---------- */ + +void OPLSetTimerHandler(FM_OPL *OPL, OPL_TIMERHANDLER TimerHandler, + void *param) +{ + OPL->TimerHandler = TimerHandler; + OPL->TimerParam = param; +} + +/* ---------- YM3812 I/O interface ---------- */ +int OPLWrite(FM_OPL *OPL,int a,int v) +{ + if( !(a&1) ) + { /* address port */ + OPL->address = v & 0xff; + } + else + { /* data port */ +#ifdef OPL_OUTPUT_LOG + if(opl_dbg_fp) + { + for(opl_dbg_chip=0;opl_dbg_chipaddress,v); + } +#endif + OPLWriteReg(OPL,OPL->address,v); + } + return OPL->status>>7; +} + +unsigned char OPLRead(FM_OPL *OPL,int a) +{ + if( !(a&1) ) + { /* status port */ + return OPL->status & (OPL->statusmask|0x80); + } + /* data port */ + switch(OPL->address) + { + case 0x05: /* KeyBoard IN */ + return 0; +#if 0 + case 0x0f: /* ADPCM-DATA */ + return 0; +#endif + case 0x19: /* I/O DATA */ + return 0; + case 0x1a: /* PCM-DATA */ + return 0; + } + return 0; +} + +int OPLTimerOver(FM_OPL *OPL,int c) +{ + if( c ) + { /* Timer B */ + OPL_STATUS_SET(OPL,0x20); + } + else + { /* Timer A */ + OPL_STATUS_SET(OPL,0x40); + /* CSM mode key,TL control */ + if( OPL->mode & 0x80 ) + { /* CSM mode total level latch and auto key on */ + int ch; + for(ch=0;ch<9;ch++) + CSMKeyControll( &OPL->P_CH[ch] ); + } + } + /* reload timer */ + if (OPL->TimerHandler) { + (OPL->TimerHandler)(OPL->TimerParam, c, + (double)OPL->T[c] * OPL->TimerBase); + } + return OPL->status>>7; +} diff --git a/hw/audio/fmopl.h b/hw/audio/fmopl.h new file mode 100644 index 000000000..e008e72d7 --- /dev/null +++ b/hw/audio/fmopl.h @@ -0,0 +1,103 @@ +#ifndef FMOPL_H +#define FMOPL_H + + +typedef void (*OPL_TIMERHANDLER)(void *param, int channel, double interval_Sec); + +/* !!!!! here is private section , do not access there member direct !!!!! */ + +/* Saving is necessary for member of the 'R' mark for suspend/resume */ +/* ---------- OPL one of slot ---------- */ +typedef struct fm_opl_slot { + int32_t TL; /* total level :TL << 8 */ + int32_t TLL; /* adjusted now TL */ + uint8_t KSR; /* key scale rate :(shift down bit) */ + int32_t *AR; /* attack rate :&AR_TABLE[AR<<2] */ + int32_t *DR; /* decay rate :&DR_TALBE[DR<<2] */ + int32_t SL; /* sustin level :SL_TALBE[SL] */ + int32_t *RR; /* release rate :&DR_TABLE[RR<<2] */ + uint8_t ksl; /* keyscale level :(shift down bits) */ + uint8_t ksr; /* key scale rate :kcode>>KSR */ + uint32_t mul; /* multiple :ML_TABLE[ML] */ + uint32_t Cnt; /* frequency count : */ + uint32_t Incr; /* frequency step : */ + /* envelope generator state */ + uint8_t eg_typ; /* envelope type flag */ + uint8_t evm; /* envelope phase */ + int32_t evc; /* envelope counter */ + int32_t eve; /* envelope counter end point */ + int32_t evs; /* envelope counter step */ + int32_t evsa; /* envelope step for AR :AR[ksr] */ + int32_t evsd; /* envelope step for DR :DR[ksr] */ + int32_t evsr; /* envelope step for RR :RR[ksr] */ + /* LFO */ + uint8_t ams; /* ams flag */ + uint8_t vib; /* vibrate flag */ + /* wave selector */ + int32_t **wavetable; +}OPL_SLOT; + +/* ---------- OPL one of channel ---------- */ +typedef struct fm_opl_channel { + OPL_SLOT SLOT[2]; + uint8_t CON; /* connection type */ + uint8_t FB; /* feed back :(shift down bit) */ + int32_t *connect1; /* slot1 output pointer */ + int32_t *connect2; /* slot2 output pointer */ + int32_t op1_out[2]; /* slot1 output for selfeedback */ + /* phase generator state */ + uint32_t block_fnum; /* block+fnum : */ + uint8_t kcode; /* key code : KeyScaleCode */ + uint32_t fc; /* Freq. Increment base */ + uint32_t ksl_base; /* KeyScaleLevel Base step */ + uint8_t keyon; /* key on/off flag */ +} OPL_CH; + +/* OPL state */ +typedef struct fm_opl_f { + int clock; /* master clock (Hz) */ + int rate; /* sampling rate (Hz) */ + double freqbase; /* frequency base */ + double TimerBase; /* Timer base time (==sampling time) */ + uint8_t address; /* address register */ + uint8_t status; /* status flag */ + uint8_t statusmask; /* status mask */ + uint32_t mode; /* Reg.08 : CSM , notesel,etc. */ + /* Timer */ + int T[2]; /* timer counter */ + uint8_t st[2]; /* timer enable */ + /* FM channel slots */ + OPL_CH *P_CH; /* pointer of CH */ + int max_ch; /* maximum channel */ + /* Rhythm sention */ + uint8_t rhythm; /* Rhythm mode , key flag */ + /* time tables */ + int32_t AR_TABLE[76]; /* attack rate tables */ + int32_t DR_TABLE[76]; /* decay rate tables */ + uint32_t FN_TABLE[1024]; /* fnumber -> increment counter */ + /* LFO */ + int32_t *ams_table; + int32_t *vib_table; + int32_t amsCnt; + int32_t amsIncr; + int32_t vibCnt; + int32_t vibIncr; + /* wave selector enable flag */ + uint8_t wavesel; + /* external event callback handler */ + OPL_TIMERHANDLER TimerHandler; /* TIMER handler */ + void *TimerParam; /* TIMER parameter */ +} FM_OPL; + +/* ---------- Generic interface section ---------- */ +FM_OPL *OPLCreate(int clock, int rate); +void OPLDestroy(FM_OPL *OPL); +void OPLSetTimerHandler(FM_OPL *OPL, OPL_TIMERHANDLER TimerHandler, + void *param); + +int OPLWrite(FM_OPL *OPL,int a,int v); +unsigned char OPLRead(FM_OPL *OPL,int a); +int OPLTimerOver(FM_OPL *OPL,int c); + +void YM3812UpdateOne(FM_OPL *OPL, int16_t *buffer, int length); +#endif diff --git a/hw/audio/gus.c b/hw/audio/gus.c new file mode 100644 index 000000000..e8719ee11 --- /dev/null +++ b/hw/audio/gus.c @@ -0,0 +1,323 @@ +/* + * QEMU Proxy for Gravis Ultrasound GF1 emulation by Tibor "TS" Schütz + * + * Copyright (c) 2002-2005 Vassili Karpov (malc) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/audio/soundhw.h" +#include "audio/audio.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "gusemu.h" +#include "gustate.h" +#include "qom/object.h" + +#define dolog(...) AUD_log ("audio", __VA_ARGS__) +#ifdef DEBUG +#define ldebug(...) dolog (__VA_ARGS__) +#else +#define ldebug(...) +#endif + +#define TYPE_GUS "gus" +OBJECT_DECLARE_SIMPLE_TYPE(GUSState, GUS) + +struct GUSState { + ISADevice dev; + GUSEmuState emu; + QEMUSoundCard card; + uint32_t freq; + uint32_t port; + int pos, left, shift, irqs; + int16_t *mixbuf; + uint8_t himem[1024 * 1024 + 32 + 4096]; + int samples; + SWVoiceOut *voice; + int64_t last_ticks; + qemu_irq pic; + IsaDma *isa_dma; + PortioList portio_list1; + PortioList portio_list2; +}; + +static uint32_t gus_readb(void *opaque, uint32_t nport) +{ + GUSState *s = opaque; + + return gus_read (&s->emu, nport, 1); +} + +static void gus_writeb(void *opaque, uint32_t nport, uint32_t val) +{ + GUSState *s = opaque; + + gus_write (&s->emu, nport, 1, val); +} + +static int write_audio (GUSState *s, int samples) +{ + int net = 0; + int pos = s->pos; + + while (samples) { + int nbytes, wbytes, wsampl; + + nbytes = samples << s->shift; + wbytes = AUD_write ( + s->voice, + s->mixbuf + (pos << (s->shift - 1)), + nbytes + ); + + if (wbytes) { + wsampl = wbytes >> s->shift; + + samples -= wsampl; + pos = (pos + wsampl) % s->samples; + + net += wsampl; + } + else { + break; + } + } + + return net; +} + +static void GUS_callback (void *opaque, int free) +{ + int samples, to_play, net = 0; + GUSState *s = opaque; + + samples = free >> s->shift; + to_play = MIN (samples, s->left); + + while (to_play) { + int written = write_audio (s, to_play); + + if (!written) { + goto reset; + } + + s->left -= written; + to_play -= written; + samples -= written; + net += written; + } + + samples = MIN (samples, s->samples); + if (samples) { + gus_mixvoices (&s->emu, s->freq, samples, s->mixbuf); + + while (samples) { + int written = write_audio (s, samples); + if (!written) { + break; + } + samples -= written; + net += written; + } + } + s->left = samples; + + reset: + gus_irqgen (&s->emu, (uint64_t)net * 1000000 / s->freq); +} + +int GUS_irqrequest (GUSEmuState *emu, int hwirq, int n) +{ + GUSState *s = emu->opaque; + /* qemu_irq_lower (s->pic); */ + qemu_irq_raise (s->pic); + s->irqs += n; + ldebug ("irqrequest %d %d %d\n", hwirq, n, s->irqs); + return n; +} + +void GUS_irqclear (GUSEmuState *emu, int hwirq) +{ + GUSState *s = emu->opaque; + ldebug ("irqclear %d %d\n", hwirq, s->irqs); + qemu_irq_lower (s->pic); + s->irqs -= 1; +#ifdef IRQ_STORM + if (s->irqs > 0) { + qemu_irq_raise (s->pic[hwirq]); + } +#endif +} + +void GUS_dmarequest (GUSEmuState *emu) +{ + GUSState *s = emu->opaque; + IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma); + ldebug ("dma request %d\n", der->gusdma); + k->hold_DREQ(s->isa_dma, s->emu.gusdma); +} + +static int GUS_read_DMA (void *opaque, int nchan, int dma_pos, int dma_len) +{ + GUSState *s = opaque; + IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma); + char tmpbuf[4096]; + int pos = dma_pos, mode, left = dma_len - dma_pos; + + ldebug ("read DMA %#x %d\n", dma_pos, dma_len); + mode = k->has_autoinitialization(s->isa_dma, s->emu.gusdma); + while (left) { + int to_copy = MIN ((size_t) left, sizeof (tmpbuf)); + int copied; + + ldebug ("left=%d to_copy=%d pos=%d\n", left, to_copy, pos); + copied = k->read_memory(s->isa_dma, nchan, tmpbuf, pos, to_copy); + gus_dma_transferdata (&s->emu, tmpbuf, copied, left == copied); + left -= copied; + pos += copied; + } + + if (((mode >> 4) & 1) == 0) { + k->release_DREQ(s->isa_dma, s->emu.gusdma); + } + return dma_len; +} + +static const VMStateDescription vmstate_gus = { + .name = "gus", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_INT32 (pos, GUSState), + VMSTATE_INT32 (left, GUSState), + VMSTATE_INT32 (shift, GUSState), + VMSTATE_INT32 (irqs, GUSState), + VMSTATE_INT32 (samples, GUSState), + VMSTATE_INT64 (last_ticks, GUSState), + VMSTATE_BUFFER (himem, GUSState), + VMSTATE_END_OF_LIST () + } +}; + +static const MemoryRegionPortio gus_portio_list1[] = { + {0x000, 1, 1, .write = gus_writeb }, + {0x006, 10, 1, .read = gus_readb, .write = gus_writeb }, + {0x100, 8, 1, .read = gus_readb, .write = gus_writeb }, + PORTIO_END_OF_LIST (), +}; + +static const MemoryRegionPortio gus_portio_list2[] = { + {0, 2, 1, .read = gus_readb }, + PORTIO_END_OF_LIST (), +}; + +static void gus_realizefn (DeviceState *dev, Error **errp) +{ + ISADevice *d = ISA_DEVICE(dev); + GUSState *s = GUS (dev); + IsaDmaClass *k; + struct audsettings as; + + s->isa_dma = isa_get_dma(isa_bus_from_device(d), s->emu.gusdma); + if (!s->isa_dma) { + error_setg(errp, "ISA controller does not support DMA"); + return; + } + + AUD_register_card ("gus", &s->card); + + as.freq = s->freq; + as.nchannels = 2; + as.fmt = AUDIO_FORMAT_S16; + as.endianness = AUDIO_HOST_ENDIANNESS; + + s->voice = AUD_open_out ( + &s->card, + NULL, + "gus", + s, + GUS_callback, + &as + ); + + if (!s->voice) { + AUD_remove_card (&s->card); + error_setg(errp, "No voice"); + return; + } + + s->shift = 2; + s->samples = AUD_get_buffer_size_out (s->voice) >> s->shift; + s->mixbuf = g_malloc0 (s->samples << s->shift); + + isa_register_portio_list(d, &s->portio_list1, s->port, + gus_portio_list1, s, "gus"); + isa_register_portio_list(d, &s->portio_list2, (s->port + 0x100) & 0xf00, + gus_portio_list2, s, "gus"); + + k = ISADMA_GET_CLASS(s->isa_dma); + k->register_channel(s->isa_dma, s->emu.gusdma, GUS_read_DMA, s); + s->emu.himemaddr = s->himem; + s->emu.gusdatapos = s->emu.himemaddr + 1024 * 1024 + 32; + s->emu.opaque = s; + isa_init_irq (d, &s->pic, s->emu.gusirq); + + AUD_set_active_out (s->voice, 1); +} + +static Property gus_properties[] = { + DEFINE_AUDIO_PROPERTIES(GUSState, card), + DEFINE_PROP_UINT32 ("freq", GUSState, freq, 44100), + DEFINE_PROP_UINT32 ("iobase", GUSState, port, 0x240), + DEFINE_PROP_UINT32 ("irq", GUSState, emu.gusirq, 7), + DEFINE_PROP_UINT32 ("dma", GUSState, emu.gusdma, 3), + DEFINE_PROP_END_OF_LIST (), +}; + +static void gus_class_initfn (ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS (klass); + + dc->realize = gus_realizefn; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->desc = "Gravis Ultrasound GF1"; + dc->vmsd = &vmstate_gus; + device_class_set_props(dc, gus_properties); +} + +static const TypeInfo gus_info = { + .name = TYPE_GUS, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof (GUSState), + .class_init = gus_class_initfn, +}; + +static void gus_register_types (void) +{ + type_register_static (&gus_info); + deprecated_register_soundhw("gus", "Gravis Ultrasound GF1", 1, TYPE_GUS); +} + +type_init (gus_register_types) diff --git a/hw/audio/gusemu.h b/hw/audio/gusemu.h new file mode 100644 index 000000000..ab591eefb --- /dev/null +++ b/hw/audio/gusemu.h @@ -0,0 +1,88 @@ +/* + * GUSEMU32 - API + * + * Copyright (C) 2000-2007 Tibor "TS" Schütz + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef GUSEMU_H +#define GUSEMU_H + +typedef struct _GUSEmuState +{ + uint8_t *himemaddr; /* 1024*1024 bytes used for storing uploaded samples (+32 additional bytes for read padding) */ + uint8_t *gusdatapos; /* (gusdataend-gusdata) bytes used for storing emulated GF1/mixer register states (32*32+4 bytes in initial GUSemu32 version) */ + uint32_t gusirq; + uint32_t gusdma; + unsigned int timer1fraction; + unsigned int timer2fraction; + void *opaque; +} GUSEmuState; + +/* ** Callback functions needed: */ +/* NMI is defined as hwirq=-1 (not supported (yet?)) */ +/* GUS_irqrequest returns the number of IRQs actually scheduled into the virtual machine */ +/* Level triggered IRQ simulations normally return 1 */ +/* Event triggered IRQ simulation can safely ignore GUS_irqclear calls */ +int GUS_irqrequest(GUSEmuState *state, int hwirq, int num);/* needed in both mixer and bus emulation functions. */ +void GUS_irqclear( GUSEmuState *state, int hwirq); /* used by gus_write() only - can be left empty for mixer functions */ +void GUS_dmarequest(GUSEmuState *state); /* used by gus_write() only - can be left empty for mixer functions */ + +/* ** ISA bus interface functions: */ + +/* Port I/O handlers */ +/* support the following ports: */ +/* 2x0,2x6,2x8...2xF,3x0...3x7; */ +/* optional: 388,389 (at least writes should be forwarded or some GUS detection algorithms will fail) */ +/* data is passed in host byte order */ +unsigned int gus_read( GUSEmuState *state, int port, int size); +void gus_write(GUSEmuState *state, int port, int size, unsigned int data); +/* size is given in bytes (1 for byte, 2 for word) */ + +/* DMA data transfer function */ +/* data pointed to is passed in native x86 order */ +void gus_dma_transferdata(GUSEmuState *state, char *dma_addr, unsigned int count, int TC); +/* Called back by GUS_start_DMA as soon as the emulated DMA controller is ready for a transfer to or from GUS */ +/* (might be immediately if the DMA controller was programmed first) */ +/* dma_addr is an already translated address directly pointing to the beginning of the memory block */ +/* do not forget to update DMA states after the call, including the DREQ and TC flags */ +/* it is possible to break down a single transfer into multiple ones, but take care that: */ +/* -dma_count is actually count-1 */ +/* -before and during a transfer, DREQ is set and TC cleared */ +/* -when calling gus_dma_transferdata(), TC is only set true for call transferring the last byte */ +/* -after the last transfer, DREQ is cleared and TC is set */ + +/* ** GF1 mixer emulation functions: */ +/* Usually, gus_irqgen should be called directly after gus_mixvoices if you can meet the recommended ranges. */ +/* If the interrupts are executed immediately (i.e., are synchronous), it may be useful to break this */ +/* down into a sequence of gus_mixvoice();gus_irqgen(); calls while mixing an audio block. */ +/* If the interrupts are asynchronous, it may be needed to use a separate thread mixing into a temporary */ +/* audio buffer in order to avoid quality loss caused by large numsamples and elapsed_time values. */ + +void gus_mixvoices(GUSEmuState *state, unsigned int playback_freq, unsigned int numsamples, int16_t *bufferpos); +/* recommended range: 10 < numsamples < 100 */ +/* lower values may result in increased rounding error, higher values often cause audible timing delays */ + +void gus_irqgen(GUSEmuState *state, unsigned int elapsed_time); +/* recommended range: 80us < elapsed_time < max(1000us, numsamples/playback_freq) */ +/* lower values won´t provide any benefit at all, higher values can cause audible timing delays */ +/* note: masked timers are also calculated by this function, thus it might be needed even without any IRQs in use! */ + +#endif /* GUSEMU_H */ diff --git a/hw/audio/gusemu_hal.c b/hw/audio/gusemu_hal.c new file mode 100644 index 000000000..5b9a14ee2 --- /dev/null +++ b/hw/audio/gusemu_hal.c @@ -0,0 +1,556 @@ +/* + * GUSEMU32 - bus interface part + * + * Copyright (C) 2000-2007 Tibor "TS" Schütz + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * TODO: check mixer: see 7.20 of sdk for panning pos (applies to all gus models?)? + */ + +#include "qemu/osdep.h" +#include "gustate.h" +#include "gusemu.h" + +#define GUSregb(position) (* (gusptr+(position))) +#define GUSregw(position) (*(uint16_t *) (gusptr+(position))) +#define GUSregd(position) (*(uint32_t *)(gusptr + (position))) + +/* size given in bytes */ +unsigned int gus_read(GUSEmuState * state, int port, int size) +{ + int value_read = 0; + + uint8_t *gusptr; + gusptr = state->gusdatapos; + GUSregd(portaccesses)++; + + switch (port & 0xff0f) + { + /* MixerCtrlReg (read not supported on GUS classic) */ + /* case 0x200: return GUSregb(MixerCtrlReg2x0); */ + case 0x206: /* IRQstatReg / SB2x6IRQ */ + /* adlib/sb bits set in port handlers */ + /* timer/voice bits set in gus_irqgen() */ + /* dma bit set in gus_dma_transferdata */ + /* midi not implemented yet */ + return GUSregb(IRQStatReg2x6); + /* case 0x308: */ /* AdLib388 */ + case 0x208: + if (GUSregb(GUS45TimerCtrl) & 1) + return GUSregb(TimerStatus2x8); + return GUSregb(AdLibStatus2x8); /* AdLibStatus */ + case 0x309: /* AdLib389 */ + case 0x209: + return GUSregb(AdLibData2x9); /* AdLibData */ + case 0x20A: + return GUSregb(AdLibCommand2xA); /* AdLib2x8_2xA */ + +#if 0 + case 0x20B: /* GUS hidden registers (read not supported on GUS classic) */ + switch (GUSregb(RegCtrl_2xF) & 0x07) + { + case 0: /* IRQ/DMA select */ + if (GUSregb(MixerCtrlReg2x0) & 0x40) + return GUSregb(IRQ_2xB); /* control register select bit */ + else + return GUSregb(DMA_2xB); + /* case 1-5: */ /* general purpose emulation regs */ + /* return ... */ /* + status reset reg (write only) */ + case 6: + return GUSregb(Jumper_2xB); /* Joystick/MIDI enable (JumperReg) */ + default:; + } + break; +#endif + + case 0x20C: /* SB2xCd */ + value_read = GUSregb(SB2xCd); + if (GUSregb(StatRead_2xF) & 0x20) + GUSregb(SB2xCd) ^= 0x80; /* toggle MSB on read */ + return value_read; + /* case 0x20D: */ /* SB2xD is write only -> 2xE writes to it*/ + case 0x20E: + if (GUSregb(RegCtrl_2xF) & 0x80) /* 2xE read IRQ enabled? */ + { + GUSregb(StatRead_2xF) |= 0x80; + GUS_irqrequest(state, state->gusirq, 1); + } + return GUSregb(SB2xE); /* SB2xE */ + case 0x20F: /* StatRead_2xF */ + /*set/clear fixed bits */ + /*value_read = (GUSregb(StatRead_2xF) & 0xf9)|1; */ /*(LSB not set on GUS classic!)*/ + value_read = (GUSregb(StatRead_2xF) & 0xf9); + if (GUSregb(MixerCtrlReg2x0) & 0x08) + value_read |= 2; /* DMA/IRQ enabled flag */ + return value_read; + /* case 0x300: */ /* MIDI (not implemented) */ + /* case 0x301: */ /* MIDI (not implemented) */ + case 0x302: + return GUSregb(VoiceSelReg3x2); /* VoiceSelReg */ + case 0x303: + return GUSregb(FunkSelReg3x3); /* FunkSelReg */ + case 0x304: /* DataRegLoByte3x4 + DataRegWord3x4 */ + case 0x305: /* DataRegHiByte3x5 */ + switch (GUSregb(FunkSelReg3x3)) + { + /* common functions */ + case 0x41: /* DramDMAContrReg */ + value_read = GUSregb(GUS41DMACtrl); /* &0xfb */ + GUSregb(GUS41DMACtrl) &= 0xbb; + if (state->gusdma >= 4) + value_read |= 0x04; + if (GUSregb(IRQStatReg2x6) & 0x80) + { + value_read |= 0x40; + GUSregb(IRQStatReg2x6) &= 0x7f; + if (!GUSregb(IRQStatReg2x6)) + GUS_irqclear(state, state->gusirq); + } + return (uint8_t) value_read; + /* DramDMAmemPosReg */ + /* case 0x42: value_read=GUSregw(GUS42DMAStart); break;*/ + /* 43h+44h write only */ + case 0x45: + return GUSregb(GUS45TimerCtrl); /* TimerCtrlReg */ + /* 46h+47h write only */ + /* 48h: samp freq - write only */ + case 0x49: + return GUSregb(GUS49SampCtrl) & 0xbf; /* SampCtrlReg */ + /* case 4bh: */ /* joystick trim not supported */ + /* case 0x4c: return GUSregb(GUS4cReset); */ /* GUSreset: write only*/ + /* voice specific functions */ + case 0x80: + case 0x81: + case 0x82: + case 0x83: + case 0x84: + case 0x85: + case 0x86: + case 0x87: + case 0x88: + case 0x89: + case 0x8a: + case 0x8b: + case 0x8c: + case 0x8d: + { + int offset = 2 * (GUSregb(FunkSelReg3x3) & 0x0f); + offset += ((int) GUSregb(VoiceSelReg3x2) & 0x1f) << 5; /* = Voice*32 + Funktion*2 */ + value_read = GUSregw(offset); + } + break; + /* voice unspecific functions */ + case 0x8e: /* NumVoice */ + return GUSregb(NumVoices); + case 0x8f: /* irqstatreg */ + /* (pseudo IRQ-FIFO is processed during a gus_write(0x3X3,0x8f)) */ + return GUSregb(SynVoiceIRQ8f); + default: + return 0xffff; + } + if (size == 1) + { + if ((port & 0xff0f) == 0x305) + value_read = value_read >> 8; + value_read &= 0xff; + } + return (uint16_t) value_read; + /* case 0x306: */ /* Mixer/Version info */ + /* return 0xff; */ /* Pre 3.6 boards, ICS mixer NOT present */ + case 0x307: /* DRAMaccess */ + { + uint8_t *adr; + adr = state->himemaddr + (GUSregd(GUSDRAMPOS24bit) & 0xfffff); + return *adr; + } + default:; + } + return 0xffff; +} + +void gus_write(GUSEmuState * state, int port, int size, unsigned int data) +{ + uint8_t *gusptr; + gusptr = state->gusdatapos; + GUSregd(portaccesses)++; + + switch (port & 0xff0f) + { + case 0x200: /* MixerCtrlReg */ + GUSregb(MixerCtrlReg2x0) = (uint8_t) data; + break; + case 0x206: /* IRQstatReg / SB2x6IRQ */ + if (GUSregb(GUS45TimerCtrl) & 0x20) /* SB IRQ enabled? -> set 2x6IRQ bit */ + { + GUSregb(TimerStatus2x8) |= 0x08; + GUSregb(IRQStatReg2x6) = 0x10; + GUS_irqrequest(state, state->gusirq, 1); + } + break; + case 0x308: /* AdLib 388h */ + case 0x208: /* AdLibCommandReg */ + GUSregb(AdLibCommand2xA) = (uint8_t) data; + break; + case 0x309: /* AdLib 389h */ + case 0x209: /* AdLibDataReg */ + if ((GUSregb(AdLibCommand2xA) == 0x04) && (!(GUSregb(GUS45TimerCtrl) & 1))) /* GUS auto timer mode enabled? */ + { + if (data & 0x80) + GUSregb(TimerStatus2x8) &= 0x1f; /* AdLib IRQ reset? -> clear maskable adl. timer int regs */ + else + GUSregb(TimerDataReg2x9) = (uint8_t) data; + } + else + { + GUSregb(AdLibData2x9) = (uint8_t) data; + if (GUSregb(GUS45TimerCtrl) & 0x02) + { + GUSregb(TimerStatus2x8) |= 0x01; + GUSregb(IRQStatReg2x6) = 0x10; + GUS_irqrequest(state, state->gusirq, 1); + } + } + break; + case 0x20A: + GUSregb(AdLibStatus2x8) = (uint8_t) data; + break; /* AdLibStatus2x8 */ + case 0x20B: /* GUS hidden registers */ + switch (GUSregb(RegCtrl_2xF) & 0x7) + { + case 0: + if (GUSregb(MixerCtrlReg2x0) & 0x40) + GUSregb(IRQ_2xB) = (uint8_t) data; /* control register select bit */ + else + GUSregb(DMA_2xB) = (uint8_t) data; + break; + /* case 1-4: general purpose emulation regs */ + case 5: /* clear stat reg 2xF */ + GUSregb(StatRead_2xF) = 0; /* ToDo: is this identical with GUS classic? */ + if (!GUSregb(IRQStatReg2x6)) + GUS_irqclear(state, state->gusirq); + break; + case 6: /* Jumper reg (Joystick/MIDI enable) */ + GUSregb(Jumper_2xB) = (uint8_t) data; + break; + default:; + } + break; + case 0x20C: /* SB2xCd */ + if (GUSregb(GUS45TimerCtrl) & 0x20) + { + GUSregb(TimerStatus2x8) |= 0x10; /* SB IRQ enabled? -> set 2xCIRQ bit */ + GUSregb(IRQStatReg2x6) = 0x10; + GUS_irqrequest(state, state->gusirq, 1); + } + /* fall through */ + case 0x20D: /* SB2xCd no IRQ */ + GUSregb(SB2xCd) = (uint8_t) data; + break; + case 0x20E: /* SB2xE */ + GUSregb(SB2xE) = (uint8_t) data; + break; + case 0x20F: + GUSregb(RegCtrl_2xF) = (uint8_t) data; + break; /* CtrlReg2xF */ + case 0x302: /* VoiceSelReg */ + GUSregb(VoiceSelReg3x2) = (uint8_t) data; + break; + case 0x303: /* FunkSelReg */ + GUSregb(FunkSelReg3x3) = (uint8_t) data; + if ((uint8_t) data == 0x8f) /* set irqstatreg, get voicereg and clear IRQ */ + { + int voice; + if (GUSregd(voicewavetableirq)) /* WavetableIRQ */ + { + for (voice = 0; voice < 31; voice++) + { + if (GUSregd(voicewavetableirq) & (1 << voice)) + { + GUSregd(voicewavetableirq) ^= (1 << voice); /* clear IRQ bit */ + GUSregb(voice << 5) &= 0x7f; /* clear voice reg irq bit */ + if (!GUSregd(voicewavetableirq)) + GUSregb(IRQStatReg2x6) &= 0xdf; + if (!GUSregb(IRQStatReg2x6)) + GUS_irqclear(state, state->gusirq); + GUSregb(SynVoiceIRQ8f) = voice | 0x60; /* (bit==0 => IRQ wartend) */ + return; + } + } + } + else if (GUSregd(voicevolrampirq)) /* VolRamp IRQ */ + { + for (voice = 0; voice < 31; voice++) + { + if (GUSregd(voicevolrampirq) & (1 << voice)) + { + GUSregd(voicevolrampirq) ^= (1 << voice); /* clear IRQ bit */ + GUSregb((voice << 5) + VSRVolRampControl) &= 0x7f; /* clear voice volume reg irq bit */ + if (!GUSregd(voicevolrampirq)) + GUSregb(IRQStatReg2x6) &= 0xbf; + if (!GUSregb(IRQStatReg2x6)) + GUS_irqclear(state, state->gusirq); + GUSregb(SynVoiceIRQ8f) = voice | 0x80; /* (bit==0 => IRQ wartend) */ + return; + } + } + } + GUSregb(SynVoiceIRQ8f) = 0xe8; /* kein IRQ wartet */ + } + break; + case 0x304: + case 0x305: + { + uint16_t writedata = (uint16_t) data; + uint16_t readmask = 0x0000; + if (size == 1) + { + readmask = 0xff00; + writedata &= 0xff; + if ((port & 0xff0f) == 0x305) + { + writedata = (uint16_t) (writedata << 8); + readmask = 0x00ff; + } + } + switch (GUSregb(FunkSelReg3x3)) + { + /* voice specific functions */ + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0a: + case 0x0b: + case 0x0c: + case 0x0d: + { + int offset; + if (!(GUSregb(GUS4cReset) & 0x01)) + break; /* reset flag active? */ + offset = 2 * (GUSregb(FunkSelReg3x3) & 0x0f); + offset += (GUSregb(VoiceSelReg3x2) & 0x1f) << 5; /* = Voice*32 + Funktion*2 */ + GUSregw(offset) = (uint16_t) ((GUSregw(offset) & readmask) | writedata); + } + break; + /* voice unspecific functions */ + case 0x0e: /* NumVoices */ + GUSregb(NumVoices) = (uint8_t) data; + break; + /* case 0x0f: */ /* read only */ + /* common functions */ + case 0x41: /* DramDMAContrReg */ + GUSregb(GUS41DMACtrl) = (uint8_t) data; + if (data & 0x01) + GUS_dmarequest(state); + break; + case 0x42: /* DramDMAmemPosReg */ + GUSregw(GUS42DMAStart) = (GUSregw(GUS42DMAStart) & readmask) | writedata; + GUSregb(GUS50DMAHigh) &= 0xf; /* compatibility stuff... */ + break; + case 0x43: /* DRAMaddrLo */ + GUSregd(GUSDRAMPOS24bit) = + (GUSregd(GUSDRAMPOS24bit) & (readmask | 0xff0000)) | writedata; + break; + case 0x44: /* DRAMaddrHi */ + GUSregd(GUSDRAMPOS24bit) = + (GUSregd(GUSDRAMPOS24bit) & 0xffff) | ((data & 0x0f) << 16); + break; + case 0x45: /* TCtrlReg */ + GUSregb(GUS45TimerCtrl) = (uint8_t) data; + if (!(data & 0x20)) + GUSregb(TimerStatus2x8) &= 0xe7; /* sb IRQ dis? -> clear 2x8/2xC sb IRQ flags */ + if (!(data & 0x02)) + GUSregb(TimerStatus2x8) &= 0xfe; /* adlib data IRQ dis? -> clear 2x8 adlib IRQ flag */ + if (!(GUSregb(TimerStatus2x8) & 0x19)) + GUSregb(IRQStatReg2x6) &= 0xef; /* 0xe6; $$clear IRQ if both IRQ bits are inactive or cleared */ + /* catch up delayed timer IRQs: */ + if ((GUSregw(TimerIRQs) > 1) && (GUSregb(TimerDataReg2x9) & 3)) + { + if (GUSregb(TimerDataReg2x9) & 1) /* start timer 1 (80us decrement rate) */ + { + if (!(GUSregb(TimerDataReg2x9) & 0x40)) + GUSregb(TimerStatus2x8) |= 0xc0; /* maskable bits */ + if (data & 4) /* timer1 irq enable */ + { + GUSregb(TimerStatus2x8) |= 4; /* nonmaskable bit */ + GUSregb(IRQStatReg2x6) |= 4; /* timer 1 irq pending */ + } + } + if (GUSregb(TimerDataReg2x9) & 2) /* start timer 2 (320us decrement rate) */ + { + if (!(GUSregb(TimerDataReg2x9) & 0x20)) + GUSregb(TimerStatus2x8) |= 0xa0; /* maskable bits */ + if (data & 8) /* timer2 irq enable */ + { + GUSregb(TimerStatus2x8) |= 2; /* nonmaskable bit */ + GUSregb(IRQStatReg2x6) |= 8; /* timer 2 irq pending */ + } + } + GUSregw(TimerIRQs)--; + if (GUSregw(BusyTimerIRQs) > 1) + GUSregw(BusyTimerIRQs)--; + else + GUSregw(BusyTimerIRQs) = + GUS_irqrequest(state, state->gusirq, GUSregw(TimerIRQs)); + } + else + GUSregw(TimerIRQs) = 0; + + if (!(data & 0x04)) + { + GUSregb(TimerStatus2x8) &= 0xfb; /* clear non-maskable timer1 bit */ + GUSregb(IRQStatReg2x6) &= 0xfb; + } + if (!(data & 0x08)) + { + GUSregb(TimerStatus2x8) &= 0xfd; /* clear non-maskable timer2 bit */ + GUSregb(IRQStatReg2x6) &= 0xf7; + } + if (!GUSregb(IRQStatReg2x6)) + GUS_irqclear(state, state->gusirq); + break; + case 0x46: /* Counter1 */ + GUSregb(GUS46Counter1) = (uint8_t) data; + break; + case 0x47: /* Counter2 */ + GUSregb(GUS47Counter2) = (uint8_t) data; + break; + /* case 0x48: */ /* sampling freq reg not emulated (same as interwave) */ + case 0x49: /* SampCtrlReg */ + GUSregb(GUS49SampCtrl) = (uint8_t) data; + break; + /* case 0x4b: */ /* joystick trim not emulated */ + case 0x4c: /* GUSreset */ + GUSregb(GUS4cReset) = (uint8_t) data; + if (!(GUSregb(GUS4cReset) & 1)) /* reset... */ + { + GUSregd(voicewavetableirq) = 0; + GUSregd(voicevolrampirq) = 0; + GUSregw(TimerIRQs) = 0; + GUSregw(BusyTimerIRQs) = 0; + GUSregb(NumVoices) = 0xcd; + GUSregb(IRQStatReg2x6) = 0; + GUSregb(TimerStatus2x8) = 0; + GUSregb(AdLibData2x9) = 0; + GUSregb(TimerDataReg2x9) = 0; + GUSregb(GUS41DMACtrl) = 0; + GUSregb(GUS45TimerCtrl) = 0; + GUSregb(GUS49SampCtrl) = 0; + GUSregb(GUS4cReset) &= 0xf9; /* clear IRQ and DAC enable bits */ + GUS_irqclear(state, state->gusirq); + } + /* IRQ enable bit checked elsewhere */ + /* EnableDAC bit may be used by external callers */ + break; + } + } + break; + case 0x307: /* DRAMaccess */ + { + uint8_t *adr; + adr = state->himemaddr + (GUSregd(GUSDRAMPOS24bit) & 0xfffff); + *adr = (uint8_t) data; + } + break; + } +} + +/* Attention when breaking up a single DMA transfer to multiple ones: + * it may lead to multiple terminal count interrupts and broken transfers: + * + * 1. Whenever you transfer a piece of data, the gusemu callback is invoked + * 2. The callback may generate a TC irq (if the register was set up to do so) + * 3. The irq may result in the program using the GUS to reprogram the GUS + * + * Some programs also decide to upload by just checking if TC occurs + * (via interrupt or a cleared GUS dma flag) + * and then start the next transfer, without checking DMA state + * + * Thus: Always make sure to set the TC flag correctly! + * + * Note that the genuine GUS had a granularity of 16 bytes/words for low/high DMA + * while later cards had atomic granularity provided by an additional GUS50DMAHigh register + * GUSemu also uses this register to support byte-granular transfers for better compatibility + * with emulators other than GUSemu32 + */ + +void gus_dma_transferdata(GUSEmuState * state, char *dma_addr, unsigned int count, int TC) +{ + /* this function gets called by the callback function as soon as a DMA transfer is about to start + * dma_addr is a translated address within accessible memory, not the physical one, + * count is (real dma count register)+1 + * note that the amount of bytes transferred is fully determined by values in the DMA registers + * do not forget to update DMA states after transferring the entire block: + * DREQ cleared & TC asserted after the _whole_ transfer */ + + char *srcaddr; + char *destaddr; + char msbmask = 0; + uint8_t *gusptr; + gusptr = state->gusdatapos; + + srcaddr = dma_addr; /* system memory address */ + { + int offset = (GUSregw(GUS42DMAStart) << 4) + (GUSregb(GUS50DMAHigh) & 0xf); + if (state->gusdma >= 4) + offset = (offset & 0xc0000) + (2 * (offset & 0x1fff0)); /* 16 bit address translation */ + destaddr = (char *) state->himemaddr + offset; /* wavetable RAM address */ + } + + GUSregw(GUS42DMAStart) += (uint16_t) (count >> 4); /* ToDo: add 16bit GUS page limit? */ + GUSregb(GUS50DMAHigh) = (uint8_t) ((count + GUSregb(GUS50DMAHigh)) & 0xf); /* ToDo: add 16bit GUS page limit? */ + + if (GUSregb(GUS41DMACtrl) & 0x02) /* direction, 0 := sysram->gusram */ + { + char *tmpaddr = destaddr; + destaddr = srcaddr; + srcaddr = tmpaddr; + } + + if ((GUSregb(GUS41DMACtrl) & 0x80) && (!(GUSregb(GUS41DMACtrl) & 0x02))) + msbmask = (const char) 0x80; /* invert MSB */ + for (; count > 0; count--) + { + if (GUSregb(GUS41DMACtrl) & 0x40) + *(destaddr++) = *(srcaddr++); /* 16 bit lobyte */ + else + *(destaddr++) = (msbmask ^ (*(srcaddr++))); /* 8 bit */ + if (state->gusdma >= 4) + *(destaddr++) = (msbmask ^ (*(srcaddr++))); /* 16 bit hibyte */ + } + + if (TC) + { + (GUSregb(GUS41DMACtrl)) &= 0xfe; /* clear DMA request bit */ + if (GUSregb(GUS41DMACtrl) & 0x20) /* DMA terminal count IRQ */ + { + GUSregb(IRQStatReg2x6) |= 0x80; + GUS_irqrequest(state, state->gusirq, 1); + } + } +} diff --git a/hw/audio/gusemu_mixer.c b/hw/audio/gusemu_mixer.c new file mode 100644 index 000000000..56300de77 --- /dev/null +++ b/hw/audio/gusemu_mixer.c @@ -0,0 +1,241 @@ +/* + * GUSEMU32 - mixing engine (similar to Interwave GF1 compatibility) + * + * Copyright (C) 2000-2007 Tibor "TS" Schütz + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "gusemu.h" +#include "gustate.h" + +#define GUSregb(position) (* (gusptr+(position))) +#define GUSregw(position) (*(uint16_t *) (gusptr+(position))) +#define GUSregd(position) (*(uint32_t *)(gusptr + (position))) + +#define GUSvoice(position) (*(uint16_t *)(voiceptr+(position))) + +/* samples are always 16bit stereo (4 bytes each, first right then left interleaved) */ +void gus_mixvoices(GUSEmuState * state, unsigned int playback_freq, unsigned int numsamples, + int16_t *bufferpos) +{ + /* note that byte registers are stored in the upper half of each voice register! */ + uint8_t *gusptr; + int Voice; + uint16_t *voiceptr; + + unsigned int count; + for (count = 0; count < numsamples * 2; count++) + *(bufferpos + count) = 0; /* clear */ + + gusptr = state->gusdatapos; + voiceptr = (uint16_t *) gusptr; + if (!(GUSregb(GUS4cReset) & 0x01)) /* reset flag active? */ + return; + + for (Voice = 0; Voice <= (GUSregb(NumVoices) & 31); Voice++) + { + if (GUSvoice(wVSRControl) & 0x200) + GUSvoice(wVSRControl) |= 0x100; /* voice stop request */ + if (GUSvoice(wVSRVolRampControl) & 0x200) + GUSvoice(wVSRVolRampControl) |= 0x100; /* Volume ramp stop request */ + if (!(GUSvoice(wVSRControl) & GUSvoice(wVSRVolRampControl) & 0x100)) /* neither voice nor volume calculation active - save some time here ;) */ + { + unsigned int sample; + + unsigned int LoopStart = (GUSvoice(wVSRLoopStartHi) << 16) | GUSvoice(wVSRLoopStartLo); /* 23.9 format */ + unsigned int LoopEnd = (GUSvoice(wVSRLoopEndHi) << 16) | GUSvoice(wVSRLoopEndLo); /* 23.9 format */ + unsigned int CurrPos = (GUSvoice(wVSRCurrPosHi) << 16) | GUSvoice(wVSRCurrPosLo); /* 23.9 format */ + int VoiceIncrement = ((((unsigned long) GUSvoice(wVSRFreq) * 44100) / playback_freq) * (14 >> 1)) / + ((GUSregb(NumVoices) & 31) + 1); /* 6.10 increment/frame to 23.9 increment/sample */ + + int PanningPos = (GUSvoice(wVSRPanning) >> 8) & 0xf; + + unsigned int Volume32 = 32 * GUSvoice(wVSRCurrVol); /* 32 times larger than original gus for maintaining precision while ramping */ + unsigned int StartVol32 = (GUSvoice(wVSRVolRampStartVol) & 0xff00) * 32; + unsigned int EndVol32 = (GUSvoice(wVSRVolRampEndVol) & 0xff00) * 32; + int VolumeIncrement32 = (32 * 16 * (GUSvoice(wVSRVolRampRate) & 0x3f00) >> 8) >> ((((GUSvoice(wVSRVolRampRate) & 0xc000) >> 8) >> 6) * 3); /* including 1/8/64/512 volume speed divisor */ + VolumeIncrement32 = (((VolumeIncrement32 * 44100 / 2) / playback_freq) * 14) / ((GUSregb(NumVoices) & 31) + 1); /* adjust ramping speed to playback speed */ + + if (GUSvoice(wVSRControl) & 0x4000) + VoiceIncrement = -VoiceIncrement; /* reverse playback */ + if (GUSvoice(wVSRVolRampControl) & 0x4000) + VolumeIncrement32 = -VolumeIncrement32; /* reverse ramping */ + + for (sample = 0; sample < numsamples; sample++) + { + int sample1, sample2, Volume; + if (GUSvoice(wVSRControl) & 0x400) /* 16bit */ + { + int offset = ((CurrPos >> 9) & 0xc0000) + (((CurrPos >> 9) & 0x1ffff) << 1); + int8_t *adr; + adr = (int8_t *) state->himemaddr + offset; + sample1 = (*adr & 0xff) + (*(adr + 1) * 256); + sample2 = (*(adr + 2) & 0xff) + (*(adr + 2 + 1) * 256); + } + else /* 8bit */ + { + int offset = (CurrPos >> 9) & 0xfffff; + int8_t *adr; + adr = (int8_t *) state->himemaddr + offset; + sample1 = (*adr) * 256; + sample2 = (*(adr + 1)) * 256; + } + + Volume = ((((Volume32 >> (4 + 5)) & 0xff) + 256) << (Volume32 >> ((4 + 8) + 5))) / 512; /* semi-logarithmic volume, +5 due to additional precision */ + sample1 = (((sample1 * Volume) >> 16) * (512 - (CurrPos % 512))) / 512; + sample2 = (((sample2 * Volume) >> 16) * (CurrPos % 512)) / 512; + sample1 += sample2; + + if (!(GUSvoice(wVSRVolRampControl) & 0x100)) + { + Volume32 += VolumeIncrement32; + if ((GUSvoice(wVSRVolRampControl) & 0x4000) ? (Volume32 <= StartVol32) : (Volume32 >= EndVol32)) /* ramp up boundary cross */ + { + if (GUSvoice(wVSRVolRampControl) & 0x2000) + GUSvoice(wVSRVolRampControl) |= 0x8000; /* volramp IRQ enabled? -> IRQ wait flag */ + if (GUSvoice(wVSRVolRampControl) & 0x800) /* loop enabled */ + { + if (GUSvoice(wVSRVolRampControl) & 0x1000) /* bidir. loop */ + { + GUSvoice(wVSRVolRampControl) ^= 0x4000; /* toggle dir */ + VolumeIncrement32 = -VolumeIncrement32; + } + else + Volume32 = (GUSvoice(wVSRVolRampControl) & 0x4000) ? EndVol32 : StartVol32; /* unidir. loop ramp */ + } + else + { + GUSvoice(wVSRVolRampControl) |= 0x100; + Volume32 = + (GUSvoice(wVSRVolRampControl) & 0x4000) ? StartVol32 : EndVol32; + } + } + } + if ((GUSvoice(wVSRVolRampControl) & 0xa000) == 0xa000) /* volramp IRQ set and enabled? */ + { + GUSregd(voicevolrampirq) |= 1 << Voice; /* set irq slot */ + } + else + { + GUSregd(voicevolrampirq) &= (~(1 << Voice)); /* clear irq slot */ + GUSvoice(wVSRVolRampControl) &= 0x7f00; + } + + if (!(GUSvoice(wVSRControl) & 0x100)) + { + CurrPos += VoiceIncrement; + if ((GUSvoice(wVSRControl) & 0x4000) ? (CurrPos <= LoopStart) : (CurrPos >= LoopEnd)) /* playback boundary cross */ + { + if (GUSvoice(wVSRControl) & 0x2000) + GUSvoice(wVSRControl) |= 0x8000; /* voice IRQ enabled -> IRQ wait flag */ + if (GUSvoice(wVSRControl) & 0x800) /* loop enabled */ + { + if (GUSvoice(wVSRControl) & 0x1000) /* pingpong loop */ + { + GUSvoice(wVSRControl) ^= 0x4000; /* toggle dir */ + VoiceIncrement = -VoiceIncrement; + } + else + CurrPos = (GUSvoice(wVSRControl) & 0x4000) ? LoopEnd : LoopStart; /* unidir. loop */ + } + else if (!(GUSvoice(wVSRVolRampControl) & 0x400)) + GUSvoice(wVSRControl) |= 0x100; /* loop disabled, rollover check */ + } + } + if ((GUSvoice(wVSRControl) & 0xa000) == 0xa000) /* wavetable IRQ set and enabled? */ + { + GUSregd(voicewavetableirq) |= 1 << Voice; /* set irq slot */ + } + else + { + GUSregd(voicewavetableirq) &= (~(1 << Voice)); /* clear irq slot */ + GUSvoice(wVSRControl) &= 0x7f00; + } + + /* mix samples into buffer */ + *(bufferpos + 2 * sample) += (int16_t) ((sample1 * PanningPos) >> 4); /* right */ + *(bufferpos + 2 * sample + 1) += (int16_t) ((sample1 * (15 - PanningPos)) >> 4); /* left */ + } + /* write back voice and volume */ + GUSvoice(wVSRCurrVol) = Volume32 / 32; + GUSvoice(wVSRCurrPosHi) = CurrPos >> 16; + GUSvoice(wVSRCurrPosLo) = CurrPos & 0xffff; + } + voiceptr += 16; /* next voice */ + } +} + +void gus_irqgen(GUSEmuState * state, unsigned int elapsed_time) +/* time given in microseconds */ +{ + int requestedIRQs = 0; + uint8_t *gusptr; + gusptr = state->gusdatapos; + if (GUSregb(TimerDataReg2x9) & 1) /* start timer 1 (80us decrement rate) */ + { + unsigned int timer1fraction = state->timer1fraction; + int newtimerirqs; + newtimerirqs = (elapsed_time + timer1fraction) / (80 * (256 - GUSregb(GUS46Counter1))); + state->timer1fraction = (elapsed_time + timer1fraction) % (80 * (256 - GUSregb(GUS46Counter1))); + if (newtimerirqs) + { + if (!(GUSregb(TimerDataReg2x9) & 0x40)) + GUSregb(TimerStatus2x8) |= 0xc0; /* maskable bits */ + if (GUSregb(GUS45TimerCtrl) & 4) /* timer1 irq enable */ + { + GUSregb(TimerStatus2x8) |= 4; /* nonmaskable bit */ + GUSregb(IRQStatReg2x6) |= 4; /* timer 1 irq pending */ + GUSregw(TimerIRQs) += newtimerirqs; + requestedIRQs += newtimerirqs; + } + } + } + if (GUSregb(TimerDataReg2x9) & 2) /* start timer 2 (320us decrement rate) */ + { + unsigned int timer2fraction = state->timer2fraction; + int newtimerirqs; + newtimerirqs = (elapsed_time + timer2fraction) / (320 * (256 - GUSregb(GUS47Counter2))); + state->timer2fraction = (elapsed_time + timer2fraction) % (320 * (256 - GUSregb(GUS47Counter2))); + if (newtimerirqs) + { + if (!(GUSregb(TimerDataReg2x9) & 0x20)) + GUSregb(TimerStatus2x8) |= 0xa0; /* maskable bits */ + if (GUSregb(GUS45TimerCtrl) & 8) /* timer2 irq enable */ + { + GUSregb(TimerStatus2x8) |= 2; /* nonmaskable bit */ + GUSregb(IRQStatReg2x6) |= 8; /* timer 2 irq pending */ + GUSregw(TimerIRQs) += newtimerirqs; + requestedIRQs += newtimerirqs; + } + } + } + if (GUSregb(GUS4cReset) & 0x4) /* synth IRQ enable */ + { + if (GUSregd(voicewavetableirq)) + GUSregb(IRQStatReg2x6) |= 0x20; + if (GUSregd(voicevolrampirq)) + GUSregb(IRQStatReg2x6) |= 0x40; + } + if ((!requestedIRQs) && GUSregb(IRQStatReg2x6)) + requestedIRQs++; + if (GUSregb(IRQStatReg2x6)) + GUSregw(BusyTimerIRQs) = GUS_irqrequest(state, state->gusirq, requestedIRQs); +} diff --git a/hw/audio/gustate.h b/hw/audio/gustate.h new file mode 100644 index 000000000..d16297110 --- /dev/null +++ b/hw/audio/gustate.h @@ -0,0 +1,132 @@ +/* + * GUSEMU32 - persistent GUS register state + * + * Copyright (C) 2000-2007 Tibor "TS" Schütz + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef GUSTATE_H +#define GUSTATE_H + +/*state block offset*/ +#define gusdata (0) + +/* data stored using this structure is in host byte order! */ + +/*access type*/ +#define PortRead (0) +#define PortWrite (1) + +#define Port8Bitacc (0) +#define Port16Bitacc (1) + +/*voice register offsets (in bytes)*/ +#define VSRegs (0) +#define VSRControl (0) +#define VSRegsEnd (VSRControl+VSRegs + 32*(16*2)) +#define VSRFreq (2) +#define VSRLoopStartHi (4) +#define VSRLoopStartLo (6) +#define VSRLoopEndHi (8) +#define VSRLoopEndLo (10) +#define VSRVolRampRate (12) +#define VSRVolRampStartVol (14) +#define VSRVolRampEndVol (16) +#define VSRCurrVol (18) +#define VSRCurrPosHi (20) +#define VSRCurrPosLo (22) +#define VSRPanning (24) +#define VSRVolRampControl (26) + +/*voice register offsets (in words)*/ +#define wVSRegs (0) +#define wVSRControl (0) +#define wVSRegsEnd (wVSRControl+wVSRegs + 32*(16)) +#define wVSRFreq (1) +#define wVSRLoopStartHi (2) +#define wVSRLoopStartLo (3) +#define wVSRLoopEndHi (4) +#define wVSRLoopEndLo (5) +#define wVSRVolRampRate (6) +#define wVSRVolRampStartVol (7) +#define wVSRVolRampEndVol (8) +#define wVSRCurrVol (9) +#define wVSRCurrPosHi (10) +#define wVSRCurrPosLo (11) +#define wVSRPanning (12) +#define wVSRVolRampControl (13) + +/*GUS register state block: 32 voices, padding filled with remaining registers*/ +#define DataRegLoByte3x4 (VSRVolRampControl+2) +#define DataRegWord3x4 (DataRegLoByte3x4) +#define DataRegHiByte3x5 (VSRVolRampControl+2 +1) +#define DMA_2xB (VSRVolRampControl+2+2) +#define IRQ_2xB (VSRVolRampControl+2+3) + +#define RegCtrl_2xF (VSRVolRampControl+2+(16*2)) +#define Jumper_2xB (VSRVolRampControl+2+(16*2)+1) +#define GUS42DMAStart (VSRVolRampControl+2+(16*2)+2) + +#define GUS43DRAMIOlo (VSRVolRampControl+2+(16*2)*2) +#define GUSDRAMPOS24bit (GUS43DRAMIOlo) +#define GUS44DRAMIOhi (VSRVolRampControl+2+(16*2)*2+2) + +#define voicewavetableirq (VSRVolRampControl+2+(16*2)*3) /* voice IRQ pseudoqueue: 1 bit per voice */ + +#define voicevolrampirq (VSRVolRampControl+2+(16*2)*4) /* voice IRQ pseudoqueue: 1 bit per voice */ + +#define startvoices (VSRVolRampControl+2+(16*2)*5) /* statistics / optimizations */ + +#define IRQStatReg2x6 (VSRVolRampControl+2+(16*2)*6) +#define TimerStatus2x8 (VSRVolRampControl+2+(16*2)*6+1) +#define TimerDataReg2x9 (VSRVolRampControl+2+(16*2)*6+2) +#define MixerCtrlReg2x0 (VSRVolRampControl+2+(16*2)*6+3) + +#define VoiceSelReg3x2 (VSRVolRampControl+2+(16*2)*7) +#define FunkSelReg3x3 (VSRVolRampControl+2+(16*2)*7+1) +#define AdLibStatus2x8 (VSRVolRampControl+2+(16*2)*7+2) +#define StatRead_2xF (VSRVolRampControl+2+(16*2)*7+3) + +#define GUS48SampSpeed (VSRVolRampControl+2+(16*2)*8) +#define GUS41DMACtrl (VSRVolRampControl+2+(16*2)*8+1) +#define GUS45TimerCtrl (VSRVolRampControl+2+(16*2)*8+2) +#define GUS46Counter1 (VSRVolRampControl+2+(16*2)*8+3) + +#define GUS47Counter2 (VSRVolRampControl+2+(16*2)*9) +#define GUS49SampCtrl (VSRVolRampControl+2+(16*2)*9+1) +#define GUS4cReset (VSRVolRampControl+2+(16*2)*9+2) +#define NumVoices (VSRVolRampControl+2+(16*2)*9+3) + +#define TimerIRQs (VSRVolRampControl+2+(16*2)*10) /* delayed IRQ, statistics */ +#define BusyTimerIRQs (VSRVolRampControl+2+(16*2)*10+2) /* delayed IRQ, statistics */ + +#define AdLibCommand2xA (VSRVolRampControl+2+(16*2)*11) +#define AdLibData2x9 (VSRVolRampControl+2+(16*2)*11+1) +#define SB2xCd (VSRVolRampControl+2+(16*2)*11+2) +#define SB2xE (VSRVolRampControl+2+(16*2)*11+3) + +#define SynVoiceIRQ8f (VSRVolRampControl+2+(16*2)*12) +#define GUS50DMAHigh (VSRVolRampControl+2+(16*2)*12+1) + +#define portaccesses (VSRegsEnd) /* statistics / suspend mode */ + +#define gusdataend (VSRegsEnd+4) + +#endif /* GUSTATE_H */ diff --git a/hw/audio/hda-codec-common.h b/hw/audio/hda-codec-common.h new file mode 100644 index 000000000..b4fdb51e8 --- /dev/null +++ b/hw/audio/hda-codec-common.h @@ -0,0 +1,456 @@ +/* + * Common code to disable/enable mixer emulation at run time + * + * Copyright (C) 2013 Red Hat, Inc. + * + * Written by Bandan Das + * with important bits picked up from hda-codec.c + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * HDA codec descriptions + */ + +#ifdef HDA_MIXER +#define QEMU_HDA_ID_OUTPUT ((QEMU_HDA_ID_VENDOR << 16) | 0x12) +#define QEMU_HDA_ID_DUPLEX ((QEMU_HDA_ID_VENDOR << 16) | 0x22) +#define QEMU_HDA_ID_MICRO ((QEMU_HDA_ID_VENDOR << 16) | 0x32) +#define QEMU_HDA_AMP_CAPS \ + (AC_AMPCAP_MUTE | \ + (QEMU_HDA_AMP_STEPS << AC_AMPCAP_OFFSET_SHIFT) | \ + (QEMU_HDA_AMP_STEPS << AC_AMPCAP_NUM_STEPS_SHIFT) | \ + (3 << AC_AMPCAP_STEP_SIZE_SHIFT)) +#else +#define QEMU_HDA_ID_OUTPUT ((QEMU_HDA_ID_VENDOR << 16) | 0x11) +#define QEMU_HDA_ID_DUPLEX ((QEMU_HDA_ID_VENDOR << 16) | 0x21) +#define QEMU_HDA_ID_MICRO ((QEMU_HDA_ID_VENDOR << 16) | 0x31) +#define QEMU_HDA_AMP_CAPS QEMU_HDA_AMP_NONE +#endif + + +/* common: audio output widget */ +static const desc_param glue(common_params_audio_dac_, PARAM)[] = { + { + .id = AC_PAR_AUDIO_WIDGET_CAP, + .val = ((AC_WID_AUD_OUT << AC_WCAP_TYPE_SHIFT) | + AC_WCAP_FORMAT_OVRD | + AC_WCAP_AMP_OVRD | + AC_WCAP_OUT_AMP | + AC_WCAP_STEREO), + },{ + .id = AC_PAR_PCM, + .val = QEMU_HDA_PCM_FORMATS, + },{ + .id = AC_PAR_STREAM, + .val = AC_SUPFMT_PCM, + },{ + .id = AC_PAR_AMP_IN_CAP, + .val = QEMU_HDA_AMP_NONE, + },{ + .id = AC_PAR_AMP_OUT_CAP, + .val = QEMU_HDA_AMP_CAPS, + }, +}; + +/* common: audio input widget */ +static const desc_param glue(common_params_audio_adc_, PARAM)[] = { + { + .id = AC_PAR_AUDIO_WIDGET_CAP, + .val = ((AC_WID_AUD_IN << AC_WCAP_TYPE_SHIFT) | + AC_WCAP_CONN_LIST | + AC_WCAP_FORMAT_OVRD | + AC_WCAP_AMP_OVRD | + AC_WCAP_IN_AMP | + AC_WCAP_STEREO), + },{ + .id = AC_PAR_CONNLIST_LEN, + .val = 1, + },{ + .id = AC_PAR_PCM, + .val = QEMU_HDA_PCM_FORMATS, + },{ + .id = AC_PAR_STREAM, + .val = AC_SUPFMT_PCM, + },{ + .id = AC_PAR_AMP_IN_CAP, + .val = QEMU_HDA_AMP_CAPS, + },{ + .id = AC_PAR_AMP_OUT_CAP, + .val = QEMU_HDA_AMP_NONE, + }, +}; + +/* common: pin widget (line-out) */ +static const desc_param glue(common_params_audio_lineout_, PARAM)[] = { + { + .id = AC_PAR_AUDIO_WIDGET_CAP, + .val = ((AC_WID_PIN << AC_WCAP_TYPE_SHIFT) | + AC_WCAP_CONN_LIST | + AC_WCAP_STEREO), + },{ + .id = AC_PAR_PIN_CAP, + .val = AC_PINCAP_OUT, + },{ + .id = AC_PAR_CONNLIST_LEN, + .val = 1, + },{ + .id = AC_PAR_AMP_IN_CAP, + .val = QEMU_HDA_AMP_NONE, + },{ + .id = AC_PAR_AMP_OUT_CAP, + .val = QEMU_HDA_AMP_NONE, + }, +}; + +/* common: pin widget (line-in) */ +static const desc_param glue(common_params_audio_linein_, PARAM)[] = { + { + .id = AC_PAR_AUDIO_WIDGET_CAP, + .val = ((AC_WID_PIN << AC_WCAP_TYPE_SHIFT) | + AC_WCAP_STEREO), + },{ + .id = AC_PAR_PIN_CAP, + .val = AC_PINCAP_IN, + },{ + .id = AC_PAR_AMP_IN_CAP, + .val = QEMU_HDA_AMP_NONE, + },{ + .id = AC_PAR_AMP_OUT_CAP, + .val = QEMU_HDA_AMP_NONE, + }, +}; + +/* output: root node */ +static const desc_param glue(output_params_root_, PARAM)[] = { + { + .id = AC_PAR_VENDOR_ID, + .val = QEMU_HDA_ID_OUTPUT, + },{ + .id = AC_PAR_SUBSYSTEM_ID, + .val = QEMU_HDA_ID_OUTPUT, + },{ + .id = AC_PAR_REV_ID, + .val = 0x00100101, + },{ + .id = AC_PAR_NODE_COUNT, + .val = 0x00010001, + }, +}; + +/* output: audio function */ +static const desc_param glue(output_params_audio_func_, PARAM)[] = { + { + .id = AC_PAR_FUNCTION_TYPE, + .val = AC_GRP_AUDIO_FUNCTION, + },{ + .id = AC_PAR_SUBSYSTEM_ID, + .val = QEMU_HDA_ID_OUTPUT, + },{ + .id = AC_PAR_NODE_COUNT, + .val = 0x00020002, + },{ + .id = AC_PAR_PCM, + .val = QEMU_HDA_PCM_FORMATS, + },{ + .id = AC_PAR_STREAM, + .val = AC_SUPFMT_PCM, + },{ + .id = AC_PAR_AMP_IN_CAP, + .val = QEMU_HDA_AMP_NONE, + },{ + .id = AC_PAR_AMP_OUT_CAP, + .val = QEMU_HDA_AMP_NONE, + },{ + .id = AC_PAR_GPIO_CAP, + .val = 0, + },{ + .id = AC_PAR_AUDIO_FG_CAP, + .val = 0x00000808, + },{ + .id = AC_PAR_POWER_STATE, + .val = 0, + }, +}; + +/* output: nodes */ +static const desc_node glue(output_nodes_, PARAM)[] = { + { + .nid = AC_NODE_ROOT, + .name = "root", + .params = glue(output_params_root_, PARAM), + .nparams = ARRAY_SIZE(glue(output_params_root_, PARAM)), + },{ + .nid = 1, + .name = "func", + .params = glue(output_params_audio_func_, PARAM), + .nparams = ARRAY_SIZE(glue(output_params_audio_func_, PARAM)), + },{ + .nid = 2, + .name = "dac", + .params = glue(common_params_audio_dac_, PARAM), + .nparams = ARRAY_SIZE(glue(common_params_audio_dac_, PARAM)), + .stindex = 0, + },{ + .nid = 3, + .name = "out", + .params = glue(common_params_audio_lineout_, PARAM), + .nparams = ARRAY_SIZE(glue(common_params_audio_lineout_, PARAM)), + .config = ((AC_JACK_PORT_COMPLEX << AC_DEFCFG_PORT_CONN_SHIFT) | + (AC_JACK_LINE_OUT << AC_DEFCFG_DEVICE_SHIFT) | + (AC_JACK_CONN_UNKNOWN << AC_DEFCFG_CONN_TYPE_SHIFT) | + (AC_JACK_COLOR_GREEN << AC_DEFCFG_COLOR_SHIFT) | + 0x10), + .pinctl = AC_PINCTL_OUT_EN, + .conn = (uint32_t[]) { 2 }, + } +}; + +/* output: codec */ +static const desc_codec glue(output_, PARAM) = { + .name = "output", + .iid = QEMU_HDA_ID_OUTPUT, + .nodes = glue(output_nodes_, PARAM), + .nnodes = ARRAY_SIZE(glue(output_nodes_, PARAM)), +}; + +/* duplex: root node */ +static const desc_param glue(duplex_params_root_, PARAM)[] = { + { + .id = AC_PAR_VENDOR_ID, + .val = QEMU_HDA_ID_DUPLEX, + },{ + .id = AC_PAR_SUBSYSTEM_ID, + .val = QEMU_HDA_ID_DUPLEX, + },{ + .id = AC_PAR_REV_ID, + .val = 0x00100101, + },{ + .id = AC_PAR_NODE_COUNT, + .val = 0x00010001, + }, +}; + +/* duplex: audio function */ +static const desc_param glue(duplex_params_audio_func_, PARAM)[] = { + { + .id = AC_PAR_FUNCTION_TYPE, + .val = AC_GRP_AUDIO_FUNCTION, + },{ + .id = AC_PAR_SUBSYSTEM_ID, + .val = QEMU_HDA_ID_DUPLEX, + },{ + .id = AC_PAR_NODE_COUNT, + .val = 0x00020004, + },{ + .id = AC_PAR_PCM, + .val = QEMU_HDA_PCM_FORMATS, + },{ + .id = AC_PAR_STREAM, + .val = AC_SUPFMT_PCM, + },{ + .id = AC_PAR_AMP_IN_CAP, + .val = QEMU_HDA_AMP_NONE, + },{ + .id = AC_PAR_AMP_OUT_CAP, + .val = QEMU_HDA_AMP_NONE, + },{ + .id = AC_PAR_GPIO_CAP, + .val = 0, + },{ + .id = AC_PAR_AUDIO_FG_CAP, + .val = 0x00000808, + },{ + .id = AC_PAR_POWER_STATE, + .val = 0, + }, +}; + +/* duplex: nodes */ +static const desc_node glue(duplex_nodes_, PARAM)[] = { + { + .nid = AC_NODE_ROOT, + .name = "root", + .params = glue(duplex_params_root_, PARAM), + .nparams = ARRAY_SIZE(glue(duplex_params_root_, PARAM)), + },{ + .nid = 1, + .name = "func", + .params = glue(duplex_params_audio_func_, PARAM), + .nparams = ARRAY_SIZE(glue(duplex_params_audio_func_, PARAM)), + },{ + .nid = 2, + .name = "dac", + .params = glue(common_params_audio_dac_, PARAM), + .nparams = ARRAY_SIZE(glue(common_params_audio_dac_, PARAM)), + .stindex = 0, + },{ + .nid = 3, + .name = "out", + .params = glue(common_params_audio_lineout_, PARAM), + .nparams = ARRAY_SIZE(glue(common_params_audio_lineout_, PARAM)), + .config = ((AC_JACK_PORT_COMPLEX << AC_DEFCFG_PORT_CONN_SHIFT) | + (AC_JACK_LINE_OUT << AC_DEFCFG_DEVICE_SHIFT) | + (AC_JACK_CONN_UNKNOWN << AC_DEFCFG_CONN_TYPE_SHIFT) | + (AC_JACK_COLOR_GREEN << AC_DEFCFG_COLOR_SHIFT) | + 0x10), + .pinctl = AC_PINCTL_OUT_EN, + .conn = (uint32_t[]) { 2 }, + },{ + .nid = 4, + .name = "adc", + .params = glue(common_params_audio_adc_, PARAM), + .nparams = ARRAY_SIZE(glue(common_params_audio_adc_, PARAM)), + .stindex = 1, + .conn = (uint32_t[]) { 5 }, + },{ + .nid = 5, + .name = "in", + .params = glue(common_params_audio_linein_, PARAM), + .nparams = ARRAY_SIZE(glue(common_params_audio_linein_, PARAM)), + .config = ((AC_JACK_PORT_COMPLEX << AC_DEFCFG_PORT_CONN_SHIFT) | + (AC_JACK_LINE_IN << AC_DEFCFG_DEVICE_SHIFT) | + (AC_JACK_CONN_UNKNOWN << AC_DEFCFG_CONN_TYPE_SHIFT) | + (AC_JACK_COLOR_RED << AC_DEFCFG_COLOR_SHIFT) | + 0x20), + .pinctl = AC_PINCTL_IN_EN, + } +}; + +/* duplex: codec */ +static const desc_codec glue(duplex_, PARAM) = { + .name = "duplex", + .iid = QEMU_HDA_ID_DUPLEX, + .nodes = glue(duplex_nodes_, PARAM), + .nnodes = ARRAY_SIZE(glue(duplex_nodes_, PARAM)), +}; + +/* micro: root node */ +static const desc_param glue(micro_params_root_, PARAM)[] = { + { + .id = AC_PAR_VENDOR_ID, + .val = QEMU_HDA_ID_MICRO, + },{ + .id = AC_PAR_SUBSYSTEM_ID, + .val = QEMU_HDA_ID_MICRO, + },{ + .id = AC_PAR_REV_ID, + .val = 0x00100101, + },{ + .id = AC_PAR_NODE_COUNT, + .val = 0x00010001, + }, +}; + +/* micro: audio function */ +static const desc_param glue(micro_params_audio_func_, PARAM)[] = { + { + .id = AC_PAR_FUNCTION_TYPE, + .val = AC_GRP_AUDIO_FUNCTION, + },{ + .id = AC_PAR_SUBSYSTEM_ID, + .val = QEMU_HDA_ID_MICRO, + },{ + .id = AC_PAR_NODE_COUNT, + .val = 0x00020004, + },{ + .id = AC_PAR_PCM, + .val = QEMU_HDA_PCM_FORMATS, + },{ + .id = AC_PAR_STREAM, + .val = AC_SUPFMT_PCM, + },{ + .id = AC_PAR_AMP_IN_CAP, + .val = QEMU_HDA_AMP_NONE, + },{ + .id = AC_PAR_AMP_OUT_CAP, + .val = QEMU_HDA_AMP_NONE, + },{ + .id = AC_PAR_GPIO_CAP, + .val = 0, + },{ + .id = AC_PAR_AUDIO_FG_CAP, + .val = 0x00000808, + },{ + .id = AC_PAR_POWER_STATE, + .val = 0, + }, +}; + +/* micro: nodes */ +static const desc_node glue(micro_nodes_, PARAM)[] = { + { + .nid = AC_NODE_ROOT, + .name = "root", + .params = glue(micro_params_root_, PARAM), + .nparams = ARRAY_SIZE(glue(micro_params_root_, PARAM)), + },{ + .nid = 1, + .name = "func", + .params = glue(micro_params_audio_func_, PARAM), + .nparams = ARRAY_SIZE(glue(micro_params_audio_func_, PARAM)), + },{ + .nid = 2, + .name = "dac", + .params = glue(common_params_audio_dac_, PARAM), + .nparams = ARRAY_SIZE(glue(common_params_audio_dac_, PARAM)), + .stindex = 0, + },{ + .nid = 3, + .name = "out", + .params = glue(common_params_audio_lineout_, PARAM), + .nparams = ARRAY_SIZE(glue(common_params_audio_lineout_, PARAM)), + .config = ((AC_JACK_PORT_COMPLEX << AC_DEFCFG_PORT_CONN_SHIFT) | + (AC_JACK_SPEAKER << AC_DEFCFG_DEVICE_SHIFT) | + (AC_JACK_CONN_UNKNOWN << AC_DEFCFG_CONN_TYPE_SHIFT) | + (AC_JACK_COLOR_GREEN << AC_DEFCFG_COLOR_SHIFT) | + 0x10), + .pinctl = AC_PINCTL_OUT_EN, + .conn = (uint32_t[]) { 2 }, + },{ + .nid = 4, + .name = "adc", + .params = glue(common_params_audio_adc_, PARAM), + .nparams = ARRAY_SIZE(glue(common_params_audio_adc_, PARAM)), + .stindex = 1, + .conn = (uint32_t[]) { 5 }, + },{ + .nid = 5, + .name = "in", + .params = glue(common_params_audio_linein_, PARAM), + .nparams = ARRAY_SIZE(glue(common_params_audio_linein_, PARAM)), + .config = ((AC_JACK_PORT_COMPLEX << AC_DEFCFG_PORT_CONN_SHIFT) | + (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT) | + (AC_JACK_CONN_UNKNOWN << AC_DEFCFG_CONN_TYPE_SHIFT) | + (AC_JACK_COLOR_RED << AC_DEFCFG_COLOR_SHIFT) | + 0x20), + .pinctl = AC_PINCTL_IN_EN, + } +}; + +/* micro: codec */ +static const desc_codec glue(micro_, PARAM) = { + .name = "micro", + .iid = QEMU_HDA_ID_MICRO, + .nodes = glue(micro_nodes_, PARAM), + .nnodes = ARRAY_SIZE(glue(micro_nodes_, PARAM)), +}; + +#undef PARAM +#undef HDA_MIXER +#undef QEMU_HDA_ID_OUTPUT +#undef QEMU_HDA_ID_DUPLEX +#undef QEMU_HDA_ID_MICRO +#undef QEMU_HDA_AMP_CAPS diff --git a/hw/audio/hda-codec.c b/hw/audio/hda-codec.c new file mode 100644 index 000000000..feb8f9e2b --- /dev/null +++ b/hw/audio/hda-codec.c @@ -0,0 +1,960 @@ +/* + * Copyright (C) 2010 Red Hat, Inc. + * + * written by Gerd Hoffmann + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "intel-hda.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "intel-hda-defs.h" +#include "audio/audio.h" +#include "trace.h" +#include "qom/object.h" + +/* -------------------------------------------------------------------------- */ + +typedef struct desc_param { + uint32_t id; + uint32_t val; +} desc_param; + +typedef struct desc_node { + uint32_t nid; + const char *name; + const desc_param *params; + uint32_t nparams; + uint32_t config; + uint32_t pinctl; + uint32_t *conn; + uint32_t stindex; +} desc_node; + +typedef struct desc_codec { + const char *name; + uint32_t iid; + const desc_node *nodes; + uint32_t nnodes; +} desc_codec; + +static const desc_param* hda_codec_find_param(const desc_node *node, uint32_t id) +{ + int i; + + for (i = 0; i < node->nparams; i++) { + if (node->params[i].id == id) { + return &node->params[i]; + } + } + return NULL; +} + +static const desc_node* hda_codec_find_node(const desc_codec *codec, uint32_t nid) +{ + int i; + + for (i = 0; i < codec->nnodes; i++) { + if (codec->nodes[i].nid == nid) { + return &codec->nodes[i]; + } + } + return NULL; +} + +static void hda_codec_parse_fmt(uint32_t format, struct audsettings *as) +{ + if (format & AC_FMT_TYPE_NON_PCM) { + return; + } + + as->freq = (format & AC_FMT_BASE_44K) ? 44100 : 48000; + + switch ((format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT) { + case 1: as->freq *= 2; break; + case 2: as->freq *= 3; break; + case 3: as->freq *= 4; break; + } + + switch ((format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT) { + case 1: as->freq /= 2; break; + case 2: as->freq /= 3; break; + case 3: as->freq /= 4; break; + case 4: as->freq /= 5; break; + case 5: as->freq /= 6; break; + case 6: as->freq /= 7; break; + case 7: as->freq /= 8; break; + } + + switch (format & AC_FMT_BITS_MASK) { + case AC_FMT_BITS_8: as->fmt = AUDIO_FORMAT_S8; break; + case AC_FMT_BITS_16: as->fmt = AUDIO_FORMAT_S16; break; + case AC_FMT_BITS_32: as->fmt = AUDIO_FORMAT_S32; break; + } + + as->nchannels = ((format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT) + 1; +} + +/* -------------------------------------------------------------------------- */ +/* + * HDA codec descriptions + */ + +/* some defines */ + +#define QEMU_HDA_ID_VENDOR 0x1af4 +#define QEMU_HDA_PCM_FORMATS (AC_SUPPCM_BITS_16 | \ + 0x1fc /* 16 -> 96 kHz */) +#define QEMU_HDA_AMP_NONE (0) +#define QEMU_HDA_AMP_STEPS 0x4a + +#define PARAM mixemu +#define HDA_MIXER +#include "hda-codec-common.h" + +#define PARAM nomixemu +#include "hda-codec-common.h" + +#define HDA_TIMER_TICKS (SCALE_MS) +#define B_SIZE sizeof(st->buf) +#define B_MASK (sizeof(st->buf) - 1) + +/* -------------------------------------------------------------------------- */ + +static const char *fmt2name[] = { + [ AUDIO_FORMAT_U8 ] = "PCM-U8", + [ AUDIO_FORMAT_S8 ] = "PCM-S8", + [ AUDIO_FORMAT_U16 ] = "PCM-U16", + [ AUDIO_FORMAT_S16 ] = "PCM-S16", + [ AUDIO_FORMAT_U32 ] = "PCM-U32", + [ AUDIO_FORMAT_S32 ] = "PCM-S32", +}; + +typedef struct HDAAudioState HDAAudioState; +typedef struct HDAAudioStream HDAAudioStream; + +struct HDAAudioStream { + HDAAudioState *state; + const desc_node *node; + bool output, running; + uint32_t stream; + uint32_t channel; + uint32_t format; + uint32_t gain_left, gain_right; + bool mute_left, mute_right; + struct audsettings as; + union { + SWVoiceIn *in; + SWVoiceOut *out; + } voice; + uint8_t compat_buf[HDA_BUFFER_SIZE]; + uint32_t compat_bpos; + uint8_t buf[8192]; /* size must be power of two */ + int64_t rpos; + int64_t wpos; + QEMUTimer *buft; + int64_t buft_start; +}; + +#define TYPE_HDA_AUDIO "hda-audio" +OBJECT_DECLARE_SIMPLE_TYPE(HDAAudioState, HDA_AUDIO) + +struct HDAAudioState { + HDACodecDevice hda; + const char *name; + + QEMUSoundCard card; + const desc_codec *desc; + HDAAudioStream st[4]; + bool running_compat[16]; + bool running_real[2 * 16]; + + /* properties */ + uint32_t debug; + bool mixer; + bool use_timer; +}; + +static inline int64_t hda_bytes_per_second(HDAAudioStream *st) +{ + return 2LL * st->as.nchannels * st->as.freq; +} + +static inline void hda_timer_sync_adjust(HDAAudioStream *st, int64_t target_pos) +{ + int64_t limit = B_SIZE / 8; + int64_t corr = 0; + + if (target_pos > limit) { + corr = HDA_TIMER_TICKS; + } + if (target_pos < -limit) { + corr = -HDA_TIMER_TICKS; + } + if (target_pos < -(2 * limit)) { + corr = -(4 * HDA_TIMER_TICKS); + } + if (corr == 0) { + return; + } + + trace_hda_audio_adjust(st->node->name, target_pos); + st->buft_start += corr; +} + +static void hda_audio_input_timer(void *opaque) +{ + HDAAudioStream *st = opaque; + + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + int64_t buft_start = st->buft_start; + int64_t wpos = st->wpos; + int64_t rpos = st->rpos; + + int64_t wanted_rpos = hda_bytes_per_second(st) * (now - buft_start) + / NANOSECONDS_PER_SECOND; + wanted_rpos &= -4; /* IMPORTANT! clip to frames */ + + if (wanted_rpos <= rpos) { + /* we already transmitted the data */ + goto out_timer; + } + + int64_t to_transfer = MIN(wpos - rpos, wanted_rpos - rpos); + while (to_transfer) { + uint32_t start = (rpos & B_MASK); + uint32_t chunk = MIN(B_SIZE - start, to_transfer); + int rc = hda_codec_xfer( + &st->state->hda, st->stream, false, st->buf + start, chunk); + if (!rc) { + break; + } + rpos += chunk; + to_transfer -= chunk; + st->rpos += chunk; + } + +out_timer: + + if (st->running) { + timer_mod_anticipate_ns(st->buft, now + HDA_TIMER_TICKS); + } +} + +static void hda_audio_input_cb(void *opaque, int avail) +{ + HDAAudioStream *st = opaque; + + int64_t wpos = st->wpos; + int64_t rpos = st->rpos; + + int64_t to_transfer = MIN(B_SIZE - (wpos - rpos), avail); + + while (to_transfer) { + uint32_t start = (uint32_t) (wpos & B_MASK); + uint32_t chunk = (uint32_t) MIN(B_SIZE - start, to_transfer); + uint32_t read = AUD_read(st->voice.in, st->buf + start, chunk); + wpos += read; + to_transfer -= read; + st->wpos += read; + if (chunk != read) { + break; + } + } + + hda_timer_sync_adjust(st, -((wpos - rpos) - (B_SIZE >> 1))); +} + +static void hda_audio_output_timer(void *opaque) +{ + HDAAudioStream *st = opaque; + + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + int64_t buft_start = st->buft_start; + int64_t wpos = st->wpos; + int64_t rpos = st->rpos; + + int64_t wanted_wpos = hda_bytes_per_second(st) * (now - buft_start) + / NANOSECONDS_PER_SECOND; + wanted_wpos &= -4; /* IMPORTANT! clip to frames */ + + if (wanted_wpos <= wpos) { + /* we already received the data */ + goto out_timer; + } + + int64_t to_transfer = MIN(B_SIZE - (wpos - rpos), wanted_wpos - wpos); + while (to_transfer) { + uint32_t start = (wpos & B_MASK); + uint32_t chunk = MIN(B_SIZE - start, to_transfer); + int rc = hda_codec_xfer( + &st->state->hda, st->stream, true, st->buf + start, chunk); + if (!rc) { + break; + } + wpos += chunk; + to_transfer -= chunk; + st->wpos += chunk; + } + +out_timer: + + if (st->running) { + timer_mod_anticipate_ns(st->buft, now + HDA_TIMER_TICKS); + } +} + +static void hda_audio_output_cb(void *opaque, int avail) +{ + HDAAudioStream *st = opaque; + + int64_t wpos = st->wpos; + int64_t rpos = st->rpos; + + int64_t to_transfer = MIN(wpos - rpos, avail); + + if (wpos - rpos == B_SIZE) { + /* drop buffer, reset timer adjust */ + st->rpos = 0; + st->wpos = 0; + st->buft_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + trace_hda_audio_overrun(st->node->name); + return; + } + + while (to_transfer) { + uint32_t start = (uint32_t) (rpos & B_MASK); + uint32_t chunk = (uint32_t) MIN(B_SIZE - start, to_transfer); + uint32_t written = AUD_write(st->voice.out, st->buf + start, chunk); + rpos += written; + to_transfer -= written; + st->rpos += written; + if (chunk != written) { + break; + } + } + + hda_timer_sync_adjust(st, (wpos - rpos) - (B_SIZE >> 1)); +} + +static void hda_audio_compat_input_cb(void *opaque, int avail) +{ + HDAAudioStream *st = opaque; + int recv = 0; + int len; + bool rc; + + while (avail - recv >= sizeof(st->compat_buf)) { + if (st->compat_bpos != sizeof(st->compat_buf)) { + len = AUD_read(st->voice.in, st->compat_buf + st->compat_bpos, + sizeof(st->compat_buf) - st->compat_bpos); + st->compat_bpos += len; + recv += len; + if (st->compat_bpos != sizeof(st->compat_buf)) { + break; + } + } + rc = hda_codec_xfer(&st->state->hda, st->stream, false, + st->compat_buf, sizeof(st->compat_buf)); + if (!rc) { + break; + } + st->compat_bpos = 0; + } +} + +static void hda_audio_compat_output_cb(void *opaque, int avail) +{ + HDAAudioStream *st = opaque; + int sent = 0; + int len; + bool rc; + + while (avail - sent >= sizeof(st->compat_buf)) { + if (st->compat_bpos == sizeof(st->compat_buf)) { + rc = hda_codec_xfer(&st->state->hda, st->stream, true, + st->compat_buf, sizeof(st->compat_buf)); + if (!rc) { + break; + } + st->compat_bpos = 0; + } + len = AUD_write(st->voice.out, st->compat_buf + st->compat_bpos, + sizeof(st->compat_buf) - st->compat_bpos); + st->compat_bpos += len; + sent += len; + if (st->compat_bpos != sizeof(st->compat_buf)) { + break; + } + } +} + +static void hda_audio_set_running(HDAAudioStream *st, bool running) +{ + if (st->node == NULL) { + return; + } + if (st->running == running) { + return; + } + st->running = running; + trace_hda_audio_running(st->node->name, st->stream, st->running); + if (st->state->use_timer) { + if (running) { + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + st->rpos = 0; + st->wpos = 0; + st->buft_start = now; + timer_mod_anticipate_ns(st->buft, now + HDA_TIMER_TICKS); + } else { + timer_del(st->buft); + } + } + if (st->output) { + AUD_set_active_out(st->voice.out, st->running); + } else { + AUD_set_active_in(st->voice.in, st->running); + } +} + +static void hda_audio_set_amp(HDAAudioStream *st) +{ + bool muted; + uint32_t left, right; + + if (st->node == NULL) { + return; + } + + muted = st->mute_left && st->mute_right; + left = st->mute_left ? 0 : st->gain_left; + right = st->mute_right ? 0 : st->gain_right; + + left = left * 255 / QEMU_HDA_AMP_STEPS; + right = right * 255 / QEMU_HDA_AMP_STEPS; + + if (!st->state->mixer) { + return; + } + if (st->output) { + AUD_set_volume_out(st->voice.out, muted, left, right); + } else { + AUD_set_volume_in(st->voice.in, muted, left, right); + } +} + +static void hda_audio_setup(HDAAudioStream *st) +{ + bool use_timer = st->state->use_timer; + audio_callback_fn cb; + + if (st->node == NULL) { + return; + } + + trace_hda_audio_format(st->node->name, st->as.nchannels, + fmt2name[st->as.fmt], st->as.freq); + + if (st->output) { + if (use_timer) { + cb = hda_audio_output_cb; + st->buft = timer_new_ns(QEMU_CLOCK_VIRTUAL, + hda_audio_output_timer, st); + } else { + cb = hda_audio_compat_output_cb; + } + st->voice.out = AUD_open_out(&st->state->card, st->voice.out, + st->node->name, st, cb, &st->as); + } else { + if (use_timer) { + cb = hda_audio_input_cb; + st->buft = timer_new_ns(QEMU_CLOCK_VIRTUAL, + hda_audio_input_timer, st); + } else { + cb = hda_audio_compat_input_cb; + } + st->voice.in = AUD_open_in(&st->state->card, st->voice.in, + st->node->name, st, cb, &st->as); + } +} + +static void hda_audio_command(HDACodecDevice *hda, uint32_t nid, uint32_t data) +{ + HDAAudioState *a = HDA_AUDIO(hda); + HDAAudioStream *st; + const desc_node *node = NULL; + const desc_param *param; + uint32_t verb, payload, response, count, shift; + + if ((data & 0x70000) == 0x70000) { + /* 12/8 id/payload */ + verb = (data >> 8) & 0xfff; + payload = data & 0x00ff; + } else { + /* 4/16 id/payload */ + verb = (data >> 8) & 0xf00; + payload = data & 0xffff; + } + + node = hda_codec_find_node(a->desc, nid); + if (node == NULL) { + goto fail; + } + dprint(a, 2, "%s: nid %d (%s), verb 0x%x, payload 0x%x\n", + __func__, nid, node->name, verb, payload); + + switch (verb) { + /* all nodes */ + case AC_VERB_PARAMETERS: + param = hda_codec_find_param(node, payload); + if (param == NULL) { + goto fail; + } + hda_codec_response(hda, true, param->val); + break; + case AC_VERB_GET_SUBSYSTEM_ID: + hda_codec_response(hda, true, a->desc->iid); + break; + + /* all functions */ + case AC_VERB_GET_CONNECT_LIST: + param = hda_codec_find_param(node, AC_PAR_CONNLIST_LEN); + count = param ? param->val : 0; + response = 0; + shift = 0; + while (payload < count && shift < 32) { + response |= node->conn[payload] << shift; + payload++; + shift += 8; + } + hda_codec_response(hda, true, response); + break; + + /* pin widget */ + case AC_VERB_GET_CONFIG_DEFAULT: + hda_codec_response(hda, true, node->config); + break; + case AC_VERB_GET_PIN_WIDGET_CONTROL: + hda_codec_response(hda, true, node->pinctl); + break; + case AC_VERB_SET_PIN_WIDGET_CONTROL: + if (node->pinctl != payload) { + dprint(a, 1, "unhandled pin control bit\n"); + } + hda_codec_response(hda, true, 0); + break; + + /* audio in/out widget */ + case AC_VERB_SET_CHANNEL_STREAMID: + st = a->st + node->stindex; + if (st->node == NULL) { + goto fail; + } + hda_audio_set_running(st, false); + st->stream = (payload >> 4) & 0x0f; + st->channel = payload & 0x0f; + dprint(a, 2, "%s: stream %d, channel %d\n", + st->node->name, st->stream, st->channel); + hda_audio_set_running(st, a->running_real[st->output * 16 + st->stream]); + hda_codec_response(hda, true, 0); + break; + case AC_VERB_GET_CONV: + st = a->st + node->stindex; + if (st->node == NULL) { + goto fail; + } + response = st->stream << 4 | st->channel; + hda_codec_response(hda, true, response); + break; + case AC_VERB_SET_STREAM_FORMAT: + st = a->st + node->stindex; + if (st->node == NULL) { + goto fail; + } + st->format = payload; + hda_codec_parse_fmt(st->format, &st->as); + hda_audio_setup(st); + hda_codec_response(hda, true, 0); + break; + case AC_VERB_GET_STREAM_FORMAT: + st = a->st + node->stindex; + if (st->node == NULL) { + goto fail; + } + hda_codec_response(hda, true, st->format); + break; + case AC_VERB_GET_AMP_GAIN_MUTE: + st = a->st + node->stindex; + if (st->node == NULL) { + goto fail; + } + if (payload & AC_AMP_GET_LEFT) { + response = st->gain_left | (st->mute_left ? AC_AMP_MUTE : 0); + } else { + response = st->gain_right | (st->mute_right ? AC_AMP_MUTE : 0); + } + hda_codec_response(hda, true, response); + break; + case AC_VERB_SET_AMP_GAIN_MUTE: + st = a->st + node->stindex; + if (st->node == NULL) { + goto fail; + } + dprint(a, 1, "amp (%s): %s%s%s%s index %d gain %3d %s\n", + st->node->name, + (payload & AC_AMP_SET_OUTPUT) ? "o" : "-", + (payload & AC_AMP_SET_INPUT) ? "i" : "-", + (payload & AC_AMP_SET_LEFT) ? "l" : "-", + (payload & AC_AMP_SET_RIGHT) ? "r" : "-", + (payload & AC_AMP_SET_INDEX) >> AC_AMP_SET_INDEX_SHIFT, + (payload & AC_AMP_GAIN), + (payload & AC_AMP_MUTE) ? "muted" : ""); + if (payload & AC_AMP_SET_LEFT) { + st->gain_left = payload & AC_AMP_GAIN; + st->mute_left = payload & AC_AMP_MUTE; + } + if (payload & AC_AMP_SET_RIGHT) { + st->gain_right = payload & AC_AMP_GAIN; + st->mute_right = payload & AC_AMP_MUTE; + } + hda_audio_set_amp(st); + hda_codec_response(hda, true, 0); + break; + + /* not supported */ + case AC_VERB_SET_POWER_STATE: + case AC_VERB_GET_POWER_STATE: + case AC_VERB_GET_SDI_SELECT: + hda_codec_response(hda, true, 0); + break; + default: + goto fail; + } + return; + +fail: + dprint(a, 1, "%s: not handled: nid %d (%s), verb 0x%x, payload 0x%x\n", + __func__, nid, node ? node->name : "?", verb, payload); + hda_codec_response(hda, true, 0); +} + +static void hda_audio_stream(HDACodecDevice *hda, uint32_t stnr, bool running, bool output) +{ + HDAAudioState *a = HDA_AUDIO(hda); + int s; + + a->running_compat[stnr] = running; + a->running_real[output * 16 + stnr] = running; + for (s = 0; s < ARRAY_SIZE(a->st); s++) { + if (a->st[s].node == NULL) { + continue; + } + if (a->st[s].output != output) { + continue; + } + if (a->st[s].stream != stnr) { + continue; + } + hda_audio_set_running(&a->st[s], running); + } +} + +static int hda_audio_init(HDACodecDevice *hda, const struct desc_codec *desc) +{ + HDAAudioState *a = HDA_AUDIO(hda); + HDAAudioStream *st; + const desc_node *node; + const desc_param *param; + uint32_t i, type; + + a->desc = desc; + a->name = object_get_typename(OBJECT(a)); + dprint(a, 1, "%s: cad %d\n", __func__, a->hda.cad); + + AUD_register_card("hda", &a->card); + for (i = 0; i < a->desc->nnodes; i++) { + node = a->desc->nodes + i; + param = hda_codec_find_param(node, AC_PAR_AUDIO_WIDGET_CAP); + if (param == NULL) { + continue; + } + type = (param->val & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT; + switch (type) { + case AC_WID_AUD_OUT: + case AC_WID_AUD_IN: + assert(node->stindex < ARRAY_SIZE(a->st)); + st = a->st + node->stindex; + st->state = a; + st->node = node; + if (type == AC_WID_AUD_OUT) { + /* unmute output by default */ + st->gain_left = QEMU_HDA_AMP_STEPS; + st->gain_right = QEMU_HDA_AMP_STEPS; + st->compat_bpos = sizeof(st->compat_buf); + st->output = true; + } else { + st->output = false; + } + st->format = AC_FMT_TYPE_PCM | AC_FMT_BITS_16 | + (1 << AC_FMT_CHAN_SHIFT); + hda_codec_parse_fmt(st->format, &st->as); + hda_audio_setup(st); + break; + } + } + return 0; +} + +static void hda_audio_exit(HDACodecDevice *hda) +{ + HDAAudioState *a = HDA_AUDIO(hda); + HDAAudioStream *st; + int i; + + dprint(a, 1, "%s\n", __func__); + for (i = 0; i < ARRAY_SIZE(a->st); i++) { + st = a->st + i; + if (st->node == NULL) { + continue; + } + if (a->use_timer) { + timer_del(st->buft); + } + if (st->output) { + AUD_close_out(&a->card, st->voice.out); + } else { + AUD_close_in(&a->card, st->voice.in); + } + } + AUD_remove_card(&a->card); +} + +static int hda_audio_post_load(void *opaque, int version) +{ + HDAAudioState *a = opaque; + HDAAudioStream *st; + int i; + + dprint(a, 1, "%s\n", __func__); + if (version == 1) { + /* assume running_compat[] is for output streams */ + for (i = 0; i < ARRAY_SIZE(a->running_compat); i++) + a->running_real[16 + i] = a->running_compat[i]; + } + + for (i = 0; i < ARRAY_SIZE(a->st); i++) { + st = a->st + i; + if (st->node == NULL) + continue; + hda_codec_parse_fmt(st->format, &st->as); + hda_audio_setup(st); + hda_audio_set_amp(st); + hda_audio_set_running(st, a->running_real[st->output * 16 + st->stream]); + } + return 0; +} + +static void hda_audio_reset(DeviceState *dev) +{ + HDAAudioState *a = HDA_AUDIO(dev); + HDAAudioStream *st; + int i; + + dprint(a, 1, "%s\n", __func__); + for (i = 0; i < ARRAY_SIZE(a->st); i++) { + st = a->st + i; + if (st->node != NULL) { + hda_audio_set_running(st, false); + } + } +} + +static bool vmstate_hda_audio_stream_buf_needed(void *opaque) +{ + HDAAudioStream *st = opaque; + return st->state && st->state->use_timer; +} + +static const VMStateDescription vmstate_hda_audio_stream_buf = { + .name = "hda-audio-stream/buffer", + .version_id = 1, + .needed = vmstate_hda_audio_stream_buf_needed, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(buf, HDAAudioStream), + VMSTATE_INT64(rpos, HDAAudioStream), + VMSTATE_INT64(wpos, HDAAudioStream), + VMSTATE_TIMER_PTR(buft, HDAAudioStream), + VMSTATE_INT64(buft_start, HDAAudioStream), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_hda_audio_stream = { + .name = "hda-audio-stream", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(stream, HDAAudioStream), + VMSTATE_UINT32(channel, HDAAudioStream), + VMSTATE_UINT32(format, HDAAudioStream), + VMSTATE_UINT32(gain_left, HDAAudioStream), + VMSTATE_UINT32(gain_right, HDAAudioStream), + VMSTATE_BOOL(mute_left, HDAAudioStream), + VMSTATE_BOOL(mute_right, HDAAudioStream), + VMSTATE_UINT32(compat_bpos, HDAAudioStream), + VMSTATE_BUFFER(compat_buf, HDAAudioStream), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_hda_audio_stream_buf, + NULL + } +}; + +static const VMStateDescription vmstate_hda_audio = { + .name = "hda-audio", + .version_id = 2, + .post_load = hda_audio_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(st, HDAAudioState, 4, 0, + vmstate_hda_audio_stream, + HDAAudioStream), + VMSTATE_BOOL_ARRAY(running_compat, HDAAudioState, 16), + VMSTATE_BOOL_ARRAY_V(running_real, HDAAudioState, 2 * 16, 2), + VMSTATE_END_OF_LIST() + } +}; + +static Property hda_audio_properties[] = { + DEFINE_AUDIO_PROPERTIES(HDAAudioState, card), + DEFINE_PROP_UINT32("debug", HDAAudioState, debug, 0), + DEFINE_PROP_BOOL("mixer", HDAAudioState, mixer, true), + DEFINE_PROP_BOOL("use-timer", HDAAudioState, use_timer, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static int hda_audio_init_output(HDACodecDevice *hda) +{ + HDAAudioState *a = HDA_AUDIO(hda); + + if (!a->mixer) { + return hda_audio_init(hda, &output_nomixemu); + } else { + return hda_audio_init(hda, &output_mixemu); + } +} + +static int hda_audio_init_duplex(HDACodecDevice *hda) +{ + HDAAudioState *a = HDA_AUDIO(hda); + + if (!a->mixer) { + return hda_audio_init(hda, &duplex_nomixemu); + } else { + return hda_audio_init(hda, &duplex_mixemu); + } +} + +static int hda_audio_init_micro(HDACodecDevice *hda) +{ + HDAAudioState *a = HDA_AUDIO(hda); + + if (!a->mixer) { + return hda_audio_init(hda, µ_nomixemu); + } else { + return hda_audio_init(hda, µ_mixemu); + } +} + +static void hda_audio_base_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass); + + k->exit = hda_audio_exit; + k->command = hda_audio_command; + k->stream = hda_audio_stream; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->reset = hda_audio_reset; + dc->vmsd = &vmstate_hda_audio; + device_class_set_props(dc, hda_audio_properties); +} + +static const TypeInfo hda_audio_info = { + .name = TYPE_HDA_AUDIO, + .parent = TYPE_HDA_CODEC_DEVICE, + .instance_size = sizeof(HDAAudioState), + .class_init = hda_audio_base_class_init, + .abstract = true, +}; + +static void hda_audio_output_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass); + + k->init = hda_audio_init_output; + dc->desc = "HDA Audio Codec, output-only (line-out)"; +} + +static const TypeInfo hda_audio_output_info = { + .name = "hda-output", + .parent = TYPE_HDA_AUDIO, + .class_init = hda_audio_output_class_init, +}; + +static void hda_audio_duplex_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass); + + k->init = hda_audio_init_duplex; + dc->desc = "HDA Audio Codec, duplex (line-out, line-in)"; +} + +static const TypeInfo hda_audio_duplex_info = { + .name = "hda-duplex", + .parent = TYPE_HDA_AUDIO, + .class_init = hda_audio_duplex_class_init, +}; + +static void hda_audio_micro_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + HDACodecDeviceClass *k = HDA_CODEC_DEVICE_CLASS(klass); + + k->init = hda_audio_init_micro; + dc->desc = "HDA Audio Codec, duplex (speaker, microphone)"; +} + +static const TypeInfo hda_audio_micro_info = { + .name = "hda-micro", + .parent = TYPE_HDA_AUDIO, + .class_init = hda_audio_micro_class_init, +}; + +static void hda_audio_register_types(void) +{ + type_register_static(&hda_audio_info); + type_register_static(&hda_audio_output_info); + type_register_static(&hda_audio_duplex_info); + type_register_static(&hda_audio_micro_info); +} + +type_init(hda_audio_register_types) diff --git a/hw/audio/intel-hda-defs.h b/hw/audio/intel-hda-defs.h new file mode 100644 index 000000000..2e37e5b87 --- /dev/null +++ b/hw/audio/intel-hda-defs.h @@ -0,0 +1,717 @@ +#ifndef HW_INTEL_HDA_DEFS_H +#define HW_INTEL_HDA_DEFS_H + +/* qemu */ +#define HDA_BUFFER_SIZE 256 + +/* --------------------------------------------------------------------- */ +/* from linux/sound/pci/hda/hda_intel.c */ + +/* + * registers + */ +#define ICH6_REG_GCAP 0x00 +#define ICH6_GCAP_64OK (1 << 0) /* 64bit address support */ +#define ICH6_GCAP_NSDO (3 << 1) /* # of serial data out signals */ +#define ICH6_GCAP_BSS (31 << 3) /* # of bidirectional streams */ +#define ICH6_GCAP_ISS (15 << 8) /* # of input streams */ +#define ICH6_GCAP_OSS (15 << 12) /* # of output streams */ +#define ICH6_REG_VMIN 0x02 +#define ICH6_REG_VMAJ 0x03 +#define ICH6_REG_OUTPAY 0x04 +#define ICH6_REG_INPAY 0x06 +#define ICH6_REG_GCTL 0x08 +#define ICH6_GCTL_RESET (1 << 0) /* controller reset */ +#define ICH6_GCTL_FCNTRL (1 << 1) /* flush control */ +#define ICH6_GCTL_UNSOL (1 << 8) /* accept unsol. response enable */ +#define ICH6_REG_WAKEEN 0x0c +#define ICH6_REG_STATESTS 0x0e +#define ICH6_REG_GSTS 0x10 +#define ICH6_GSTS_FSTS (1 << 1) /* flush status */ +#define ICH6_REG_INTCTL 0x20 +#define ICH6_REG_INTSTS 0x24 +#define ICH6_REG_WALLCLK 0x30 /* 24Mhz source */ +#define ICH6_REG_SYNC 0x34 +#define ICH6_REG_CORBLBASE 0x40 +#define ICH6_REG_CORBUBASE 0x44 +#define ICH6_REG_CORBWP 0x48 +#define ICH6_REG_CORBRP 0x4a +#define ICH6_CORBRP_RST (1 << 15) /* read pointer reset */ +#define ICH6_REG_CORBCTL 0x4c +#define ICH6_CORBCTL_RUN (1 << 1) /* enable DMA */ +#define ICH6_CORBCTL_CMEIE (1 << 0) /* enable memory error irq */ +#define ICH6_REG_CORBSTS 0x4d +#define ICH6_CORBSTS_CMEI (1 << 0) /* memory error indication */ +#define ICH6_REG_CORBSIZE 0x4e + +#define ICH6_REG_RIRBLBASE 0x50 +#define ICH6_REG_RIRBUBASE 0x54 +#define ICH6_REG_RIRBWP 0x58 +#define ICH6_RIRBWP_RST (1 << 15) /* write pointer reset */ +#define ICH6_REG_RINTCNT 0x5a +#define ICH6_REG_RIRBCTL 0x5c +#define ICH6_RBCTL_IRQ_EN (1 << 0) /* enable IRQ */ +#define ICH6_RBCTL_DMA_EN (1 << 1) /* enable DMA */ +#define ICH6_RBCTL_OVERRUN_EN (1 << 2) /* enable overrun irq */ +#define ICH6_REG_RIRBSTS 0x5d +#define ICH6_RBSTS_IRQ (1 << 0) /* response irq */ +#define ICH6_RBSTS_OVERRUN (1 << 2) /* overrun irq */ +#define ICH6_REG_RIRBSIZE 0x5e + +#define ICH6_REG_IC 0x60 +#define ICH6_REG_IR 0x64 +#define ICH6_REG_IRS 0x68 +#define ICH6_IRS_VALID (1<<1) +#define ICH6_IRS_BUSY (1<<0) + +#define ICH6_REG_DPLBASE 0x70 +#define ICH6_REG_DPUBASE 0x74 +#define ICH6_DPLBASE_ENABLE 0x1 /* Enable position buffer */ + +/* SD offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */ +enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; + +/* stream register offsets from stream base */ +#define ICH6_REG_SD_CTL 0x00 +#define ICH6_REG_SD_STS 0x03 +#define ICH6_REG_SD_LPIB 0x04 +#define ICH6_REG_SD_CBL 0x08 +#define ICH6_REG_SD_LVI 0x0c +#define ICH6_REG_SD_FIFOW 0x0e +#define ICH6_REG_SD_FIFOSIZE 0x10 +#define ICH6_REG_SD_FORMAT 0x12 +#define ICH6_REG_SD_BDLPL 0x18 +#define ICH6_REG_SD_BDLPU 0x1c + +/* PCI space */ +#define ICH6_PCIREG_TCSEL 0x44 + +/* + * other constants + */ + +/* max number of SDs */ +/* ICH, ATI and VIA have 4 playback and 4 capture */ +#define ICH6_NUM_CAPTURE 4 +#define ICH6_NUM_PLAYBACK 4 + +/* ULI has 6 playback and 5 capture */ +#define ULI_NUM_CAPTURE 5 +#define ULI_NUM_PLAYBACK 6 + +/* ATI HDMI has 1 playback and 0 capture */ +#define ATIHDMI_NUM_CAPTURE 0 +#define ATIHDMI_NUM_PLAYBACK 1 + +/* TERA has 4 playback and 3 capture */ +#define TERA_NUM_CAPTURE 3 +#define TERA_NUM_PLAYBACK 4 + +/* this number is statically defined for simplicity */ +#define MAX_AZX_DEV 16 + +/* max number of fragments - we may use more if allocating more pages for BDL */ +#define BDL_SIZE 4096 +#define AZX_MAX_BDL_ENTRIES (BDL_SIZE / 16) +#define AZX_MAX_FRAG 32 +/* max buffer size - no h/w limit, you can increase as you like */ +#define AZX_MAX_BUF_SIZE (1024*1024*1024) + +/* RIRB int mask: overrun[2], response[0] */ +#define RIRB_INT_RESPONSE 0x01 +#define RIRB_INT_OVERRUN 0x04 +#define RIRB_INT_MASK 0x05 + +/* STATESTS int mask: S3,SD2,SD1,SD0 */ +#define AZX_MAX_CODECS 8 +#define AZX_DEFAULT_CODECS 4 +#define STATESTS_INT_MASK ((1 << AZX_MAX_CODECS) - 1) + +/* SD_CTL bits */ +#define SD_CTL_STREAM_RESET 0x01 /* stream reset bit */ +#define SD_CTL_DMA_START 0x02 /* stream DMA start bit */ +#define SD_CTL_STRIPE (3 << 16) /* stripe control */ +#define SD_CTL_TRAFFIC_PRIO (1 << 18) /* traffic priority */ +#define SD_CTL_DIR (1 << 19) /* bi-directional stream */ +#define SD_CTL_STREAM_TAG_MASK (0xf << 20) +#define SD_CTL_STREAM_TAG_SHIFT 20 + +/* SD_CTL and SD_STS */ +#define SD_INT_DESC_ERR 0x10 /* descriptor error interrupt */ +#define SD_INT_FIFO_ERR 0x08 /* FIFO error interrupt */ +#define SD_INT_COMPLETE 0x04 /* completion interrupt */ +#define SD_INT_MASK (SD_INT_DESC_ERR|SD_INT_FIFO_ERR|\ + SD_INT_COMPLETE) + +/* SD_STS */ +#define SD_STS_FIFO_READY 0x20 /* FIFO ready */ + +/* INTCTL and INTSTS */ +#define ICH6_INT_ALL_STREAM 0xff /* all stream interrupts */ +#define ICH6_INT_CTRL_EN 0x40000000 /* controller interrupt enable bit */ +#define ICH6_INT_GLOBAL_EN 0x80000000 /* global interrupt enable bit */ + +/* below are so far hardcoded - should read registers in future */ +#define ICH6_MAX_CORB_ENTRIES 256 +#define ICH6_MAX_RIRB_ENTRIES 256 + +/* position fix mode */ +enum { + POS_FIX_AUTO, + POS_FIX_LPIB, + POS_FIX_POSBUF, +}; + +/* Defines for ATI HD Audio support in SB450 south bridge */ +#define ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR 0x42 +#define ATI_SB450_HDAUDIO_ENABLE_SNOOP 0x02 + +/* Defines for Nvidia HDA support */ +#define NVIDIA_HDA_TRANSREG_ADDR 0x4e +#define NVIDIA_HDA_ENABLE_COHBITS 0x0f +#define NVIDIA_HDA_ISTRM_COH 0x4d +#define NVIDIA_HDA_OSTRM_COH 0x4c +#define NVIDIA_HDA_ENABLE_COHBIT 0x01 + +/* Defines for Intel SCH HDA snoop control */ +#define INTEL_SCH_HDA_DEVC 0x78 +#define INTEL_SCH_HDA_DEVC_NOSNOOP (0x1<<11) + +/* Define IN stream 0 FIFO size offset in VIA controller */ +#define VIA_IN_STREAM0_FIFO_SIZE_OFFSET 0x90 +/* Define VIA HD Audio Device ID*/ +#define VIA_HDAC_DEVICE_ID 0x3288 + +/* HD Audio class code */ +#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 + +/* --------------------------------------------------------------------- */ +/* from linux/sound/pci/hda/hda_codec.h */ + +/* + * nodes + */ +#define AC_NODE_ROOT 0x00 + +/* + * function group types + */ +enum { + AC_GRP_AUDIO_FUNCTION = 0x01, + AC_GRP_MODEM_FUNCTION = 0x02, +}; + +/* + * widget types + */ +enum { + AC_WID_AUD_OUT, /* Audio Out */ + AC_WID_AUD_IN, /* Audio In */ + AC_WID_AUD_MIX, /* Audio Mixer */ + AC_WID_AUD_SEL, /* Audio Selector */ + AC_WID_PIN, /* Pin Complex */ + AC_WID_POWER, /* Power */ + AC_WID_VOL_KNB, /* Volume Knob */ + AC_WID_BEEP, /* Beep Generator */ + AC_WID_VENDOR = 0x0f /* Vendor specific */ +}; + +/* + * GET verbs + */ +#define AC_VERB_GET_STREAM_FORMAT 0x0a00 +#define AC_VERB_GET_AMP_GAIN_MUTE 0x0b00 +#define AC_VERB_GET_PROC_COEF 0x0c00 +#define AC_VERB_GET_COEF_INDEX 0x0d00 +#define AC_VERB_PARAMETERS 0x0f00 +#define AC_VERB_GET_CONNECT_SEL 0x0f01 +#define AC_VERB_GET_CONNECT_LIST 0x0f02 +#define AC_VERB_GET_PROC_STATE 0x0f03 +#define AC_VERB_GET_SDI_SELECT 0x0f04 +#define AC_VERB_GET_POWER_STATE 0x0f05 +#define AC_VERB_GET_CONV 0x0f06 +#define AC_VERB_GET_PIN_WIDGET_CONTROL 0x0f07 +#define AC_VERB_GET_UNSOLICITED_RESPONSE 0x0f08 +#define AC_VERB_GET_PIN_SENSE 0x0f09 +#define AC_VERB_GET_BEEP_CONTROL 0x0f0a +#define AC_VERB_GET_EAPD_BTLENABLE 0x0f0c +#define AC_VERB_GET_DIGI_CONVERT_1 0x0f0d +#define AC_VERB_GET_DIGI_CONVERT_2 0x0f0e /* unused */ +#define AC_VERB_GET_VOLUME_KNOB_CONTROL 0x0f0f +/* f10-f1a: GPIO */ +#define AC_VERB_GET_GPIO_DATA 0x0f15 +#define AC_VERB_GET_GPIO_MASK 0x0f16 +#define AC_VERB_GET_GPIO_DIRECTION 0x0f17 +#define AC_VERB_GET_GPIO_WAKE_MASK 0x0f18 +#define AC_VERB_GET_GPIO_UNSOLICITED_RSP_MASK 0x0f19 +#define AC_VERB_GET_GPIO_STICKY_MASK 0x0f1a +#define AC_VERB_GET_CONFIG_DEFAULT 0x0f1c +/* f20: AFG/MFG */ +#define AC_VERB_GET_SUBSYSTEM_ID 0x0f20 +#define AC_VERB_GET_CVT_CHAN_COUNT 0x0f2d +#define AC_VERB_GET_HDMI_DIP_SIZE 0x0f2e +#define AC_VERB_GET_HDMI_ELDD 0x0f2f +#define AC_VERB_GET_HDMI_DIP_INDEX 0x0f30 +#define AC_VERB_GET_HDMI_DIP_DATA 0x0f31 +#define AC_VERB_GET_HDMI_DIP_XMIT 0x0f32 +#define AC_VERB_GET_HDMI_CP_CTRL 0x0f33 +#define AC_VERB_GET_HDMI_CHAN_SLOT 0x0f34 + +/* + * SET verbs + */ +#define AC_VERB_SET_STREAM_FORMAT 0x200 +#define AC_VERB_SET_AMP_GAIN_MUTE 0x300 +#define AC_VERB_SET_PROC_COEF 0x400 +#define AC_VERB_SET_COEF_INDEX 0x500 +#define AC_VERB_SET_CONNECT_SEL 0x701 +#define AC_VERB_SET_PROC_STATE 0x703 +#define AC_VERB_SET_SDI_SELECT 0x704 +#define AC_VERB_SET_POWER_STATE 0x705 +#define AC_VERB_SET_CHANNEL_STREAMID 0x706 +#define AC_VERB_SET_PIN_WIDGET_CONTROL 0x707 +#define AC_VERB_SET_UNSOLICITED_ENABLE 0x708 +#define AC_VERB_SET_PIN_SENSE 0x709 +#define AC_VERB_SET_BEEP_CONTROL 0x70a +#define AC_VERB_SET_EAPD_BTLENABLE 0x70c +#define AC_VERB_SET_DIGI_CONVERT_1 0x70d +#define AC_VERB_SET_DIGI_CONVERT_2 0x70e +#define AC_VERB_SET_VOLUME_KNOB_CONTROL 0x70f +#define AC_VERB_SET_GPIO_DATA 0x715 +#define AC_VERB_SET_GPIO_MASK 0x716 +#define AC_VERB_SET_GPIO_DIRECTION 0x717 +#define AC_VERB_SET_GPIO_WAKE_MASK 0x718 +#define AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK 0x719 +#define AC_VERB_SET_GPIO_STICKY_MASK 0x71a +#define AC_VERB_SET_CONFIG_DEFAULT_BYTES_0 0x71c +#define AC_VERB_SET_CONFIG_DEFAULT_BYTES_1 0x71d +#define AC_VERB_SET_CONFIG_DEFAULT_BYTES_2 0x71e +#define AC_VERB_SET_CONFIG_DEFAULT_BYTES_3 0x71f +#define AC_VERB_SET_EAPD 0x788 +#define AC_VERB_SET_CODEC_RESET 0x7ff +#define AC_VERB_SET_CVT_CHAN_COUNT 0x72d +#define AC_VERB_SET_HDMI_DIP_INDEX 0x730 +#define AC_VERB_SET_HDMI_DIP_DATA 0x731 +#define AC_VERB_SET_HDMI_DIP_XMIT 0x732 +#define AC_VERB_SET_HDMI_CP_CTRL 0x733 +#define AC_VERB_SET_HDMI_CHAN_SLOT 0x734 + +/* + * Parameter IDs + */ +#define AC_PAR_VENDOR_ID 0x00 +#define AC_PAR_SUBSYSTEM_ID 0x01 +#define AC_PAR_REV_ID 0x02 +#define AC_PAR_NODE_COUNT 0x04 +#define AC_PAR_FUNCTION_TYPE 0x05 +#define AC_PAR_AUDIO_FG_CAP 0x08 +#define AC_PAR_AUDIO_WIDGET_CAP 0x09 +#define AC_PAR_PCM 0x0a +#define AC_PAR_STREAM 0x0b +#define AC_PAR_PIN_CAP 0x0c +#define AC_PAR_AMP_IN_CAP 0x0d +#define AC_PAR_CONNLIST_LEN 0x0e +#define AC_PAR_POWER_STATE 0x0f +#define AC_PAR_PROC_CAP 0x10 +#define AC_PAR_GPIO_CAP 0x11 +#define AC_PAR_AMP_OUT_CAP 0x12 +#define AC_PAR_VOL_KNB_CAP 0x13 +#define AC_PAR_HDMI_LPCM_CAP 0x20 + +/* + * AC_VERB_PARAMETERS results (32bit) + */ + +/* Function Group Type */ +#define AC_FGT_TYPE (0xff<<0) +#define AC_FGT_TYPE_SHIFT 0 +#define AC_FGT_UNSOL_CAP (1<<8) + +/* Audio Function Group Capabilities */ +#define AC_AFG_OUT_DELAY (0xf<<0) +#define AC_AFG_IN_DELAY (0xf<<8) +#define AC_AFG_BEEP_GEN (1<<16) + +/* Audio Widget Capabilities */ +#define AC_WCAP_STEREO (1<<0) /* stereo I/O */ +#define AC_WCAP_IN_AMP (1<<1) /* AMP-in present */ +#define AC_WCAP_OUT_AMP (1<<2) /* AMP-out present */ +#define AC_WCAP_AMP_OVRD (1<<3) /* AMP-parameter override */ +#define AC_WCAP_FORMAT_OVRD (1<<4) /* format override */ +#define AC_WCAP_STRIPE (1<<5) /* stripe */ +#define AC_WCAP_PROC_WID (1<<6) /* Proc Widget */ +#define AC_WCAP_UNSOL_CAP (1<<7) /* Unsol capable */ +#define AC_WCAP_CONN_LIST (1<<8) /* connection list */ +#define AC_WCAP_DIGITAL (1<<9) /* digital I/O */ +#define AC_WCAP_POWER (1<<10) /* power control */ +#define AC_WCAP_LR_SWAP (1<<11) /* L/R swap */ +#define AC_WCAP_CP_CAPS (1<<12) /* content protection */ +#define AC_WCAP_CHAN_CNT_EXT (7<<13) /* channel count ext */ +#define AC_WCAP_DELAY (0xf<<16) +#define AC_WCAP_DELAY_SHIFT 16 +#define AC_WCAP_TYPE (0xf<<20) +#define AC_WCAP_TYPE_SHIFT 20 + +/* supported PCM rates and bits */ +#define AC_SUPPCM_RATES (0xfff << 0) +#define AC_SUPPCM_BITS_8 (1<<16) +#define AC_SUPPCM_BITS_16 (1<<17) +#define AC_SUPPCM_BITS_20 (1<<18) +#define AC_SUPPCM_BITS_24 (1<<19) +#define AC_SUPPCM_BITS_32 (1<<20) + +/* supported PCM stream format */ +#define AC_SUPFMT_PCM (1<<0) +#define AC_SUPFMT_FLOAT32 (1<<1) +#define AC_SUPFMT_AC3 (1<<2) + +/* GP I/O count */ +#define AC_GPIO_IO_COUNT (0xff<<0) +#define AC_GPIO_O_COUNT (0xff<<8) +#define AC_GPIO_O_COUNT_SHIFT 8 +#define AC_GPIO_I_COUNT (0xff<<16) +#define AC_GPIO_I_COUNT_SHIFT 16 +#define AC_GPIO_UNSOLICITED (1<<30) +#define AC_GPIO_WAKE (1<<31) + +/* Converter stream, channel */ +#define AC_CONV_CHANNEL (0xf<<0) +#define AC_CONV_STREAM (0xf<<4) +#define AC_CONV_STREAM_SHIFT 4 + +/* Input converter SDI select */ +#define AC_SDI_SELECT (0xf<<0) + +/* stream format id */ +#define AC_FMT_CHAN_SHIFT 0 +#define AC_FMT_CHAN_MASK (0x0f << 0) +#define AC_FMT_BITS_SHIFT 4 +#define AC_FMT_BITS_MASK (7 << 4) +#define AC_FMT_BITS_8 (0 << 4) +#define AC_FMT_BITS_16 (1 << 4) +#define AC_FMT_BITS_20 (2 << 4) +#define AC_FMT_BITS_24 (3 << 4) +#define AC_FMT_BITS_32 (4 << 4) +#define AC_FMT_DIV_SHIFT 8 +#define AC_FMT_DIV_MASK (7 << 8) +#define AC_FMT_MULT_SHIFT 11 +#define AC_FMT_MULT_MASK (7 << 11) +#define AC_FMT_BASE_SHIFT 14 +#define AC_FMT_BASE_48K (0 << 14) +#define AC_FMT_BASE_44K (1 << 14) +#define AC_FMT_TYPE_SHIFT 15 +#define AC_FMT_TYPE_PCM (0 << 15) +#define AC_FMT_TYPE_NON_PCM (1 << 15) + +/* Unsolicited response control */ +#define AC_UNSOL_TAG (0x3f<<0) +#define AC_UNSOL_ENABLED (1<<7) +#define AC_USRSP_EN AC_UNSOL_ENABLED + +/* Unsolicited responses */ +#define AC_UNSOL_RES_TAG (0x3f<<26) +#define AC_UNSOL_RES_TAG_SHIFT 26 +#define AC_UNSOL_RES_SUBTAG (0x1f<<21) +#define AC_UNSOL_RES_SUBTAG_SHIFT 21 +#define AC_UNSOL_RES_ELDV (1<<1) /* ELD Data valid (for HDMI) */ +#define AC_UNSOL_RES_PD (1<<0) /* pinsense detect */ +#define AC_UNSOL_RES_CP_STATE (1<<1) /* content protection */ +#define AC_UNSOL_RES_CP_READY (1<<0) /* content protection */ + +/* Pin widget capabilies */ +#define AC_PINCAP_IMP_SENSE (1<<0) /* impedance sense capable */ +#define AC_PINCAP_TRIG_REQ (1<<1) /* trigger required */ +#define AC_PINCAP_PRES_DETECT (1<<2) /* presence detect capable */ +#define AC_PINCAP_HP_DRV (1<<3) /* headphone drive capable */ +#define AC_PINCAP_OUT (1<<4) /* output capable */ +#define AC_PINCAP_IN (1<<5) /* input capable */ +#define AC_PINCAP_BALANCE (1<<6) /* balanced I/O capable */ +/* Note: This LR_SWAP pincap is defined in the Realtek ALC883 specification, + * but is marked reserved in the Intel HDA specification. + */ +#define AC_PINCAP_LR_SWAP (1<<7) /* L/R swap */ +/* Note: The same bit as LR_SWAP is newly defined as HDMI capability + * in HD-audio specification + */ +#define AC_PINCAP_HDMI (1<<7) /* HDMI pin */ +#define AC_PINCAP_DP (1<<24) /* DisplayPort pin, can + * coexist with AC_PINCAP_HDMI + */ +#define AC_PINCAP_VREF (0x37<<8) +#define AC_PINCAP_VREF_SHIFT 8 +#define AC_PINCAP_EAPD (1<<16) /* EAPD capable */ +#define AC_PINCAP_HBR (1<<27) /* High Bit Rate */ +/* Vref status (used in pin cap) */ +#define AC_PINCAP_VREF_HIZ (1<<0) /* Hi-Z */ +#define AC_PINCAP_VREF_50 (1<<1) /* 50% */ +#define AC_PINCAP_VREF_GRD (1<<2) /* ground */ +#define AC_PINCAP_VREF_80 (1<<4) /* 80% */ +#define AC_PINCAP_VREF_100 (1<<5) /* 100% */ + +/* Amplifier capabilities */ +#define AC_AMPCAP_OFFSET (0x7f<<0) /* 0dB offset */ +#define AC_AMPCAP_OFFSET_SHIFT 0 +#define AC_AMPCAP_NUM_STEPS (0x7f<<8) /* number of steps */ +#define AC_AMPCAP_NUM_STEPS_SHIFT 8 +#define AC_AMPCAP_STEP_SIZE (0x7f<<16) /* step size 0-32dB + * in 0.25dB + */ +#define AC_AMPCAP_STEP_SIZE_SHIFT 16 +#define AC_AMPCAP_MUTE (1<<31) /* mute capable */ +#define AC_AMPCAP_MUTE_SHIFT 31 + +/* Connection list */ +#define AC_CLIST_LENGTH (0x7f<<0) +#define AC_CLIST_LONG (1<<7) + +/* Supported power status */ +#define AC_PWRST_D0SUP (1<<0) +#define AC_PWRST_D1SUP (1<<1) +#define AC_PWRST_D2SUP (1<<2) +#define AC_PWRST_D3SUP (1<<3) +#define AC_PWRST_D3COLDSUP (1<<4) +#define AC_PWRST_S3D3COLDSUP (1<<29) +#define AC_PWRST_CLKSTOP (1<<30) +#define AC_PWRST_EPSS (1U<<31) + +/* Power state values */ +#define AC_PWRST_SETTING (0xf<<0) +#define AC_PWRST_ACTUAL (0xf<<4) +#define AC_PWRST_ACTUAL_SHIFT 4 +#define AC_PWRST_D0 0x00 +#define AC_PWRST_D1 0x01 +#define AC_PWRST_D2 0x02 +#define AC_PWRST_D3 0x03 + +/* Processing capabilies */ +#define AC_PCAP_BENIGN (1<<0) +#define AC_PCAP_NUM_COEF (0xff<<8) +#define AC_PCAP_NUM_COEF_SHIFT 8 + +/* Volume knobs capabilities */ +#define AC_KNBCAP_NUM_STEPS (0x7f<<0) +#define AC_KNBCAP_DELTA (1<<7) + +/* HDMI LPCM capabilities */ +#define AC_LPCMCAP_48K_CP_CHNS (0x0f<<0) /* max channels w/ CP-on */ +#define AC_LPCMCAP_48K_NO_CHNS (0x0f<<4) /* max channels w/o CP-on */ +#define AC_LPCMCAP_48K_20BIT (1<<8) /* 20b bitrate supported */ +#define AC_LPCMCAP_48K_24BIT (1<<9) /* 24b bitrate supported */ +#define AC_LPCMCAP_96K_CP_CHNS (0x0f<<10) /* max channels w/ CP-on */ +#define AC_LPCMCAP_96K_NO_CHNS (0x0f<<14) /* max channels w/o CP-on */ +#define AC_LPCMCAP_96K_20BIT (1<<18) /* 20b bitrate supported */ +#define AC_LPCMCAP_96K_24BIT (1<<19) /* 24b bitrate supported */ +#define AC_LPCMCAP_192K_CP_CHNS (0x0f<<20) /* max channels w/ CP-on */ +#define AC_LPCMCAP_192K_NO_CHNS (0x0f<<24) /* max channels w/o CP-on */ +#define AC_LPCMCAP_192K_20BIT (1<<28) /* 20b bitrate supported */ +#define AC_LPCMCAP_192K_24BIT (1<<29) /* 24b bitrate supported */ +#define AC_LPCMCAP_44K (1<<30) /* 44.1kHz support */ +#define AC_LPCMCAP_44K_MS (1<<31) /* 44.1kHz-multiplies support */ + +/* + * Control Parameters + */ + +/* Amp gain/mute */ +#define AC_AMP_MUTE (1<<7) +#define AC_AMP_GAIN (0x7f) +#define AC_AMP_GET_INDEX (0xf<<0) + +#define AC_AMP_GET_LEFT (1<<13) +#define AC_AMP_GET_RIGHT (0<<13) +#define AC_AMP_GET_OUTPUT (1<<15) +#define AC_AMP_GET_INPUT (0<<15) + +#define AC_AMP_SET_INDEX (0xf<<8) +#define AC_AMP_SET_INDEX_SHIFT 8 +#define AC_AMP_SET_RIGHT (1<<12) +#define AC_AMP_SET_LEFT (1<<13) +#define AC_AMP_SET_INPUT (1<<14) +#define AC_AMP_SET_OUTPUT (1<<15) + +/* DIGITAL1 bits */ +#define AC_DIG1_ENABLE (1<<0) +#define AC_DIG1_V (1<<1) +#define AC_DIG1_VCFG (1<<2) +#define AC_DIG1_EMPHASIS (1<<3) +#define AC_DIG1_COPYRIGHT (1<<4) +#define AC_DIG1_NONAUDIO (1<<5) +#define AC_DIG1_PROFESSIONAL (1<<6) +#define AC_DIG1_LEVEL (1<<7) + +/* DIGITAL2 bits */ +#define AC_DIG2_CC (0x7f<<0) + +/* Pin widget control - 8bit */ +#define AC_PINCTL_EPT (0x3<<0) +#define AC_PINCTL_EPT_NATIVE 0 +#define AC_PINCTL_EPT_HBR 3 +#define AC_PINCTL_VREFEN (0x7<<0) +#define AC_PINCTL_VREF_HIZ 0 /* Hi-Z */ +#define AC_PINCTL_VREF_50 1 /* 50% */ +#define AC_PINCTL_VREF_GRD 2 /* ground */ +#define AC_PINCTL_VREF_80 4 /* 80% */ +#define AC_PINCTL_VREF_100 5 /* 100% */ +#define AC_PINCTL_IN_EN (1<<5) +#define AC_PINCTL_OUT_EN (1<<6) +#define AC_PINCTL_HP_EN (1<<7) + +/* Pin sense - 32bit */ +#define AC_PINSENSE_IMPEDANCE_MASK (0x7fffffff) +#define AC_PINSENSE_PRESENCE (1<<31) +#define AC_PINSENSE_ELDV (1<<30) /* ELD valid (HDMI) */ + +/* EAPD/BTL enable - 32bit */ +#define AC_EAPDBTL_BALANCED (1<<0) +#define AC_EAPDBTL_EAPD (1<<1) +#define AC_EAPDBTL_LR_SWAP (1<<2) + +/* HDMI ELD data */ +#define AC_ELDD_ELD_VALID (1<<31) +#define AC_ELDD_ELD_DATA 0xff + +/* HDMI DIP size */ +#define AC_DIPSIZE_ELD_BUF (1<<3) /* ELD buf size of packet size */ +#define AC_DIPSIZE_PACK_IDX (0x07<<0) /* packet index */ + +/* HDMI DIP index */ +#define AC_DIPIDX_PACK_IDX (0x07<<5) /* packet idnex */ +#define AC_DIPIDX_BYTE_IDX (0x1f<<0) /* byte index */ + +/* HDMI DIP xmit (transmit) control */ +#define AC_DIPXMIT_MASK (0x3<<6) +#define AC_DIPXMIT_DISABLE (0x0<<6) /* disable xmit */ +#define AC_DIPXMIT_ONCE (0x2<<6) /* xmit once then disable */ +#define AC_DIPXMIT_BEST (0x3<<6) /* best effort */ + +/* HDMI content protection (CP) control */ +#define AC_CPCTRL_CES (1<<9) /* current encryption state */ +#define AC_CPCTRL_READY (1<<8) /* ready bit */ +#define AC_CPCTRL_SUBTAG (0x1f<<3) /* subtag for unsol-resp */ +#define AC_CPCTRL_STATE (3<<0) /* current CP request state */ + +/* Converter channel <-> HDMI slot mapping */ +#define AC_CVTMAP_HDMI_SLOT (0xf<<0) /* HDMI slot number */ +#define AC_CVTMAP_CHAN (0xf<<4) /* converter channel number */ + +/* configuration default - 32bit */ +#define AC_DEFCFG_SEQUENCE (0xf<<0) +#define AC_DEFCFG_DEF_ASSOC (0xf<<4) +#define AC_DEFCFG_ASSOC_SHIFT 4 +#define AC_DEFCFG_MISC (0xf<<8) +#define AC_DEFCFG_MISC_SHIFT 8 +#define AC_DEFCFG_MISC_NO_PRESENCE (1<<0) +#define AC_DEFCFG_COLOR (0xf<<12) +#define AC_DEFCFG_COLOR_SHIFT 12 +#define AC_DEFCFG_CONN_TYPE (0xf<<16) +#define AC_DEFCFG_CONN_TYPE_SHIFT 16 +#define AC_DEFCFG_DEVICE (0xf<<20) +#define AC_DEFCFG_DEVICE_SHIFT 20 +#define AC_DEFCFG_LOCATION (0x3f<<24) +#define AC_DEFCFG_LOCATION_SHIFT 24 +#define AC_DEFCFG_PORT_CONN (0x3<<30) +#define AC_DEFCFG_PORT_CONN_SHIFT 30 + +/* device device types (0x0-0xf) */ +enum { + AC_JACK_LINE_OUT, + AC_JACK_SPEAKER, + AC_JACK_HP_OUT, + AC_JACK_CD, + AC_JACK_SPDIF_OUT, + AC_JACK_DIG_OTHER_OUT, + AC_JACK_MODEM_LINE_SIDE, + AC_JACK_MODEM_HAND_SIDE, + AC_JACK_LINE_IN, + AC_JACK_AUX, + AC_JACK_MIC_IN, + AC_JACK_TELEPHONY, + AC_JACK_SPDIF_IN, + AC_JACK_DIG_OTHER_IN, + AC_JACK_OTHER = 0xf, +}; + +/* jack connection types (0x0-0xf) */ +enum { + AC_JACK_CONN_UNKNOWN, + AC_JACK_CONN_1_8, + AC_JACK_CONN_1_4, + AC_JACK_CONN_ATAPI, + AC_JACK_CONN_RCA, + AC_JACK_CONN_OPTICAL, + AC_JACK_CONN_OTHER_DIGITAL, + AC_JACK_CONN_OTHER_ANALOG, + AC_JACK_CONN_DIN, + AC_JACK_CONN_XLR, + AC_JACK_CONN_RJ11, + AC_JACK_CONN_COMB, + AC_JACK_CONN_OTHER = 0xf, +}; + +/* jack colors (0x0-0xf) */ +enum { + AC_JACK_COLOR_UNKNOWN, + AC_JACK_COLOR_BLACK, + AC_JACK_COLOR_GREY, + AC_JACK_COLOR_BLUE, + AC_JACK_COLOR_GREEN, + AC_JACK_COLOR_RED, + AC_JACK_COLOR_ORANGE, + AC_JACK_COLOR_YELLOW, + AC_JACK_COLOR_PURPLE, + AC_JACK_COLOR_PINK, + AC_JACK_COLOR_WHITE = 0xe, + AC_JACK_COLOR_OTHER, +}; + +/* Jack location (0x0-0x3f) */ +/* common case */ +enum { + AC_JACK_LOC_NONE, + AC_JACK_LOC_REAR, + AC_JACK_LOC_FRONT, + AC_JACK_LOC_LEFT, + AC_JACK_LOC_RIGHT, + AC_JACK_LOC_TOP, + AC_JACK_LOC_BOTTOM, +}; +/* bits 4-5 */ +enum { + AC_JACK_LOC_EXTERNAL = 0x00, + AC_JACK_LOC_INTERNAL = 0x10, + AC_JACK_LOC_SEPARATE = 0x20, + AC_JACK_LOC_OTHER = 0x30, +}; +enum { + /* external on primary chasis */ + AC_JACK_LOC_REAR_PANEL = 0x07, + AC_JACK_LOC_DRIVE_BAY, + /* internal */ + AC_JACK_LOC_RISER = 0x17, + AC_JACK_LOC_HDMI, + AC_JACK_LOC_ATAPI, + /* others */ + AC_JACK_LOC_MOBILE_IN = 0x37, + AC_JACK_LOC_MOBILE_OUT, +}; + +/* Port connectivity (0-3) */ +enum { + AC_JACK_PORT_COMPLEX, + AC_JACK_PORT_NONE, + AC_JACK_PORT_FIXED, + AC_JACK_PORT_BOTH, +}; + +/* max. connections to a widget */ +#define HDA_MAX_CONNECTIONS 32 + +/* max. codec address */ +#define HDA_MAX_CODEC_ADDRESS 0x0f + +/* max number of PCM devics per card */ +#define HDA_MAX_PCMS 10 + +/* --------------------------------------------------------------------- */ + +#endif diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c new file mode 100644 index 000000000..8ce9df64e --- /dev/null +++ b/hw/audio/intel-hda.c @@ -0,0 +1,1331 @@ +/* + * Copyright (C) 2010 Red Hat, Inc. + * + * written by Gerd Hoffmann + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/pci/msi.h" +#include "qemu/timer.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "hw/audio/soundhw.h" +#include "intel-hda.h" +#include "migration/vmstate.h" +#include "intel-hda-defs.h" +#include "sysemu/dma.h" +#include "qapi/error.h" +#include "qom/object.h" + +/* --------------------------------------------------------------------- */ +/* hda bus */ + +static Property hda_props[] = { + DEFINE_PROP_UINT32("cad", HDACodecDevice, cad, -1), + DEFINE_PROP_END_OF_LIST() +}; + +static const TypeInfo hda_codec_bus_info = { + .name = TYPE_HDA_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(HDACodecBus), +}; + +void hda_codec_bus_init(DeviceState *dev, HDACodecBus *bus, size_t bus_size, + hda_codec_response_func response, + hda_codec_xfer_func xfer) +{ + qbus_init(bus, bus_size, TYPE_HDA_BUS, dev, NULL); + bus->response = response; + bus->xfer = xfer; +} + +static void hda_codec_dev_realize(DeviceState *qdev, Error **errp) +{ + HDACodecBus *bus = HDA_BUS(qdev->parent_bus); + HDACodecDevice *dev = HDA_CODEC_DEVICE(qdev); + HDACodecDeviceClass *cdc = HDA_CODEC_DEVICE_GET_CLASS(dev); + + if (dev->cad == -1) { + dev->cad = bus->next_cad; + } + if (dev->cad >= 15) { + error_setg(errp, "HDA audio codec address is full"); + return; + } + bus->next_cad = dev->cad + 1; + if (cdc->init(dev) != 0) { + error_setg(errp, "HDA audio init failed"); + } +} + +static void hda_codec_dev_unrealize(DeviceState *qdev) +{ + HDACodecDevice *dev = HDA_CODEC_DEVICE(qdev); + HDACodecDeviceClass *cdc = HDA_CODEC_DEVICE_GET_CLASS(dev); + + if (cdc->exit) { + cdc->exit(dev); + } +} + +HDACodecDevice *hda_codec_find(HDACodecBus *bus, uint32_t cad) +{ + BusChild *kid; + HDACodecDevice *cdev; + + QTAILQ_FOREACH(kid, &bus->qbus.children, sibling) { + DeviceState *qdev = kid->child; + cdev = HDA_CODEC_DEVICE(qdev); + if (cdev->cad == cad) { + return cdev; + } + } + return NULL; +} + +void hda_codec_response(HDACodecDevice *dev, bool solicited, uint32_t response) +{ + HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus); + bus->response(dev, solicited, response); +} + +bool hda_codec_xfer(HDACodecDevice *dev, uint32_t stnr, bool output, + uint8_t *buf, uint32_t len) +{ + HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus); + return bus->xfer(dev, stnr, output, buf, len); +} + +/* --------------------------------------------------------------------- */ +/* intel hda emulation */ + +typedef struct IntelHDAStream IntelHDAStream; +typedef struct IntelHDAState IntelHDAState; +typedef struct IntelHDAReg IntelHDAReg; + +typedef struct bpl { + uint64_t addr; + uint32_t len; + uint32_t flags; +} bpl; + +struct IntelHDAStream { + /* registers */ + uint32_t ctl; + uint32_t lpib; + uint32_t cbl; + uint32_t lvi; + uint32_t fmt; + uint32_t bdlp_lbase; + uint32_t bdlp_ubase; + + /* state */ + bpl *bpl; + uint32_t bentries; + uint32_t bsize, be, bp; +}; + +struct IntelHDAState { + PCIDevice pci; + const char *name; + HDACodecBus codecs; + + /* registers */ + uint32_t g_ctl; + uint32_t wake_en; + uint32_t state_sts; + uint32_t int_ctl; + uint32_t int_sts; + uint32_t wall_clk; + + uint32_t corb_lbase; + uint32_t corb_ubase; + uint32_t corb_rp; + uint32_t corb_wp; + uint32_t corb_ctl; + uint32_t corb_sts; + uint32_t corb_size; + + uint32_t rirb_lbase; + uint32_t rirb_ubase; + uint32_t rirb_wp; + uint32_t rirb_cnt; + uint32_t rirb_ctl; + uint32_t rirb_sts; + uint32_t rirb_size; + + uint32_t dp_lbase; + uint32_t dp_ubase; + + uint32_t icw; + uint32_t irr; + uint32_t ics; + + /* streams */ + IntelHDAStream st[8]; + + /* state */ + MemoryRegion container; + MemoryRegion mmio; + MemoryRegion alias; + uint32_t rirb_count; + int64_t wall_base_ns; + + /* debug logging */ + const IntelHDAReg *last_reg; + uint32_t last_val; + uint32_t last_write; + uint32_t last_sec; + uint32_t repeat_count; + + /* properties */ + uint32_t debug; + OnOffAuto msi; + bool old_msi_addr; +}; + +#define TYPE_INTEL_HDA_GENERIC "intel-hda-generic" + +DECLARE_INSTANCE_CHECKER(IntelHDAState, INTEL_HDA, + TYPE_INTEL_HDA_GENERIC) + +struct IntelHDAReg { + const char *name; /* register name */ + uint32_t size; /* size in bytes */ + uint32_t reset; /* reset value */ + uint32_t wmask; /* write mask */ + uint32_t wclear; /* write 1 to clear bits */ + uint32_t offset; /* location in IntelHDAState */ + uint32_t shift; /* byte access entries for dwords */ + uint32_t stream; + void (*whandler)(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old); + void (*rhandler)(IntelHDAState *d, const IntelHDAReg *reg); +}; + +static void intel_hda_reset(DeviceState *dev); + +/* --------------------------------------------------------------------- */ + +static hwaddr intel_hda_addr(uint32_t lbase, uint32_t ubase) +{ + return ((uint64_t)ubase << 32) | lbase; +} + +static void intel_hda_update_int_sts(IntelHDAState *d) +{ + uint32_t sts = 0; + uint32_t i; + + /* update controller status */ + if (d->rirb_sts & ICH6_RBSTS_IRQ) { + sts |= (1 << 30); + } + if (d->rirb_sts & ICH6_RBSTS_OVERRUN) { + sts |= (1 << 30); + } + if (d->state_sts & d->wake_en) { + sts |= (1 << 30); + } + + /* update stream status */ + for (i = 0; i < 8; i++) { + /* buffer completion interrupt */ + if (d->st[i].ctl & (1 << 26)) { + sts |= (1 << i); + } + } + + /* update global status */ + if (sts & d->int_ctl) { + sts |= (1U << 31); + } + + d->int_sts = sts; +} + +static void intel_hda_update_irq(IntelHDAState *d) +{ + bool msi = msi_enabled(&d->pci); + int level; + + intel_hda_update_int_sts(d); + if (d->int_sts & (1U << 31) && d->int_ctl & (1U << 31)) { + level = 1; + } else { + level = 0; + } + dprint(d, 2, "%s: level %d [%s]\n", __func__, + level, msi ? "msi" : "intx"); + if (msi) { + if (level) { + msi_notify(&d->pci, 0); + } + } else { + pci_set_irq(&d->pci, level); + } +} + +static int intel_hda_send_command(IntelHDAState *d, uint32_t verb) +{ + uint32_t cad, nid, data; + HDACodecDevice *codec; + HDACodecDeviceClass *cdc; + + cad = (verb >> 28) & 0x0f; + if (verb & (1 << 27)) { + /* indirect node addressing, not specified in HDA 1.0 */ + dprint(d, 1, "%s: indirect node addressing (guest bug?)\n", __func__); + return -1; + } + nid = (verb >> 20) & 0x7f; + data = verb & 0xfffff; + + codec = hda_codec_find(&d->codecs, cad); + if (codec == NULL) { + dprint(d, 1, "%s: addressed non-existing codec\n", __func__); + return -1; + } + cdc = HDA_CODEC_DEVICE_GET_CLASS(codec); + cdc->command(codec, nid, data); + return 0; +} + +static void intel_hda_corb_run(IntelHDAState *d) +{ + hwaddr addr; + uint32_t rp, verb; + + if (d->ics & ICH6_IRS_BUSY) { + dprint(d, 2, "%s: [icw] verb 0x%08x\n", __func__, d->icw); + intel_hda_send_command(d, d->icw); + return; + } + + for (;;) { + if (!(d->corb_ctl & ICH6_CORBCTL_RUN)) { + dprint(d, 2, "%s: !run\n", __func__); + return; + } + if ((d->corb_rp & 0xff) == d->corb_wp) { + dprint(d, 2, "%s: corb ring empty\n", __func__); + return; + } + if (d->rirb_count == d->rirb_cnt) { + dprint(d, 2, "%s: rirb count reached\n", __func__); + return; + } + + rp = (d->corb_rp + 1) & 0xff; + addr = intel_hda_addr(d->corb_lbase, d->corb_ubase); + verb = ldl_le_pci_dma(&d->pci, addr + 4*rp); + d->corb_rp = rp; + + dprint(d, 2, "%s: [rp 0x%x] verb 0x%08x\n", __func__, rp, verb); + intel_hda_send_command(d, verb); + } +} + +static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t response) +{ + HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus); + IntelHDAState *d = container_of(bus, IntelHDAState, codecs); + hwaddr addr; + uint32_t wp, ex; + + if (d->ics & ICH6_IRS_BUSY) { + dprint(d, 2, "%s: [irr] response 0x%x, cad 0x%x\n", + __func__, response, dev->cad); + d->irr = response; + d->ics &= ~(ICH6_IRS_BUSY | 0xf0); + d->ics |= (ICH6_IRS_VALID | (dev->cad << 4)); + return; + } + + if (!(d->rirb_ctl & ICH6_RBCTL_DMA_EN)) { + dprint(d, 1, "%s: rirb dma disabled, drop codec response\n", __func__); + return; + } + + ex = (solicited ? 0 : (1 << 4)) | dev->cad; + wp = (d->rirb_wp + 1) & 0xff; + addr = intel_hda_addr(d->rirb_lbase, d->rirb_ubase); + stl_le_pci_dma(&d->pci, addr + 8*wp, response); + stl_le_pci_dma(&d->pci, addr + 8*wp + 4, ex); + d->rirb_wp = wp; + + dprint(d, 2, "%s: [wp 0x%x] response 0x%x, extra 0x%x\n", + __func__, wp, response, ex); + + d->rirb_count++; + if (d->rirb_count == d->rirb_cnt) { + dprint(d, 2, "%s: rirb count reached (%d)\n", __func__, d->rirb_count); + if (d->rirb_ctl & ICH6_RBCTL_IRQ_EN) { + d->rirb_sts |= ICH6_RBSTS_IRQ; + intel_hda_update_irq(d); + } + } else if ((d->corb_rp & 0xff) == d->corb_wp) { + dprint(d, 2, "%s: corb ring empty (%d/%d)\n", __func__, + d->rirb_count, d->rirb_cnt); + if (d->rirb_ctl & ICH6_RBCTL_IRQ_EN) { + d->rirb_sts |= ICH6_RBSTS_IRQ; + intel_hda_update_irq(d); + } + } +} + +static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output, + uint8_t *buf, uint32_t len) +{ + HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus); + IntelHDAState *d = container_of(bus, IntelHDAState, codecs); + hwaddr addr; + uint32_t s, copy, left; + IntelHDAStream *st; + bool irq = false; + + st = output ? d->st + 4 : d->st; + for (s = 0; s < 4; s++) { + if (stnr == ((st[s].ctl >> 20) & 0x0f)) { + st = st + s; + break; + } + } + if (s == 4) { + return false; + } + if (st->bpl == NULL) { + return false; + } + + left = len; + s = st->bentries; + while (left > 0 && s-- > 0) { + copy = left; + if (copy > st->bsize - st->lpib) + copy = st->bsize - st->lpib; + if (copy > st->bpl[st->be].len - st->bp) + copy = st->bpl[st->be].len - st->bp; + + dprint(d, 3, "dma: entry %d, pos %d/%d, copy %d\n", + st->be, st->bp, st->bpl[st->be].len, copy); + + pci_dma_rw(&d->pci, st->bpl[st->be].addr + st->bp, buf, copy, !output); + st->lpib += copy; + st->bp += copy; + buf += copy; + left -= copy; + + if (st->bpl[st->be].len == st->bp) { + /* bpl entry filled */ + if (st->bpl[st->be].flags & 0x01) { + irq = true; + } + st->bp = 0; + st->be++; + if (st->be == st->bentries) { + /* bpl wrap around */ + st->be = 0; + st->lpib = 0; + } + } + } + if (d->dp_lbase & 0x01) { + s = st - d->st; + addr = intel_hda_addr(d->dp_lbase & ~0x01, d->dp_ubase); + stl_le_pci_dma(&d->pci, addr + 8*s, st->lpib); + } + dprint(d, 3, "dma: --\n"); + + if (irq) { + st->ctl |= (1 << 26); /* buffer completion interrupt */ + intel_hda_update_irq(d); + } + return true; +} + +static void intel_hda_parse_bdl(IntelHDAState *d, IntelHDAStream *st) +{ + hwaddr addr; + uint8_t buf[16]; + uint32_t i; + + addr = intel_hda_addr(st->bdlp_lbase, st->bdlp_ubase); + st->bentries = st->lvi +1; + g_free(st->bpl); + st->bpl = g_malloc(sizeof(bpl) * st->bentries); + for (i = 0; i < st->bentries; i++, addr += 16) { + pci_dma_read(&d->pci, addr, buf, 16); + st->bpl[i].addr = le64_to_cpu(*(uint64_t *)buf); + st->bpl[i].len = le32_to_cpu(*(uint32_t *)(buf + 8)); + st->bpl[i].flags = le32_to_cpu(*(uint32_t *)(buf + 12)); + dprint(d, 1, "bdl/%d: 0x%" PRIx64 " +0x%x, 0x%x\n", + i, st->bpl[i].addr, st->bpl[i].len, st->bpl[i].flags); + } + + st->bsize = st->cbl; + st->lpib = 0; + st->be = 0; + st->bp = 0; +} + +static void intel_hda_notify_codecs(IntelHDAState *d, uint32_t stream, bool running, bool output) +{ + BusChild *kid; + HDACodecDevice *cdev; + + QTAILQ_FOREACH(kid, &d->codecs.qbus.children, sibling) { + DeviceState *qdev = kid->child; + HDACodecDeviceClass *cdc; + + cdev = HDA_CODEC_DEVICE(qdev); + cdc = HDA_CODEC_DEVICE_GET_CLASS(cdev); + if (cdc->stream) { + cdc->stream(cdev, stream, running, output); + } + } +} + +/* --------------------------------------------------------------------- */ + +static void intel_hda_set_g_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) +{ + if ((d->g_ctl & ICH6_GCTL_RESET) == 0) { + intel_hda_reset(DEVICE(d)); + } +} + +static void intel_hda_set_wake_en(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) +{ + intel_hda_update_irq(d); +} + +static void intel_hda_set_state_sts(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) +{ + intel_hda_update_irq(d); +} + +static void intel_hda_set_int_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) +{ + intel_hda_update_irq(d); +} + +static void intel_hda_get_wall_clk(IntelHDAState *d, const IntelHDAReg *reg) +{ + int64_t ns; + + ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - d->wall_base_ns; + d->wall_clk = (uint32_t)(ns * 24 / 1000); /* 24 MHz */ +} + +static void intel_hda_set_corb_wp(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) +{ + intel_hda_corb_run(d); +} + +static void intel_hda_set_corb_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) +{ + intel_hda_corb_run(d); +} + +static void intel_hda_set_rirb_wp(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) +{ + if (d->rirb_wp & ICH6_RIRBWP_RST) { + d->rirb_wp = 0; + } +} + +static void intel_hda_set_rirb_sts(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) +{ + intel_hda_update_irq(d); + + if ((old & ICH6_RBSTS_IRQ) && !(d->rirb_sts & ICH6_RBSTS_IRQ)) { + /* cleared ICH6_RBSTS_IRQ */ + d->rirb_count = 0; + intel_hda_corb_run(d); + } +} + +static void intel_hda_set_ics(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) +{ + if (d->ics & ICH6_IRS_BUSY) { + intel_hda_corb_run(d); + } +} + +static void intel_hda_set_st_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) +{ + bool output = reg->stream >= 4; + IntelHDAStream *st = d->st + reg->stream; + + if (st->ctl & 0x01) { + /* reset */ + dprint(d, 1, "st #%d: reset\n", reg->stream); + st->ctl = SD_STS_FIFO_READY << 24; + } + if ((st->ctl & 0x02) != (old & 0x02)) { + uint32_t stnr = (st->ctl >> 20) & 0x0f; + /* run bit flipped */ + if (st->ctl & 0x02) { + /* start */ + dprint(d, 1, "st #%d: start %d (ring buf %d bytes)\n", + reg->stream, stnr, st->cbl); + intel_hda_parse_bdl(d, st); + intel_hda_notify_codecs(d, stnr, true, output); + } else { + /* stop */ + dprint(d, 1, "st #%d: stop %d\n", reg->stream, stnr); + intel_hda_notify_codecs(d, stnr, false, output); + } + } + intel_hda_update_irq(d); +} + +/* --------------------------------------------------------------------- */ + +#define ST_REG(_n, _o) (0x80 + (_n) * 0x20 + (_o)) + +static const struct IntelHDAReg regtab[] = { + /* global */ + [ ICH6_REG_GCAP ] = { + .name = "GCAP", + .size = 2, + .reset = 0x4401, + }, + [ ICH6_REG_VMIN ] = { + .name = "VMIN", + .size = 1, + }, + [ ICH6_REG_VMAJ ] = { + .name = "VMAJ", + .size = 1, + .reset = 1, + }, + [ ICH6_REG_OUTPAY ] = { + .name = "OUTPAY", + .size = 2, + .reset = 0x3c, + }, + [ ICH6_REG_INPAY ] = { + .name = "INPAY", + .size = 2, + .reset = 0x1d, + }, + [ ICH6_REG_GCTL ] = { + .name = "GCTL", + .size = 4, + .wmask = 0x0103, + .offset = offsetof(IntelHDAState, g_ctl), + .whandler = intel_hda_set_g_ctl, + }, + [ ICH6_REG_WAKEEN ] = { + .name = "WAKEEN", + .size = 2, + .wmask = 0x7fff, + .offset = offsetof(IntelHDAState, wake_en), + .whandler = intel_hda_set_wake_en, + }, + [ ICH6_REG_STATESTS ] = { + .name = "STATESTS", + .size = 2, + .wmask = 0x7fff, + .wclear = 0x7fff, + .offset = offsetof(IntelHDAState, state_sts), + .whandler = intel_hda_set_state_sts, + }, + + /* interrupts */ + [ ICH6_REG_INTCTL ] = { + .name = "INTCTL", + .size = 4, + .wmask = 0xc00000ff, + .offset = offsetof(IntelHDAState, int_ctl), + .whandler = intel_hda_set_int_ctl, + }, + [ ICH6_REG_INTSTS ] = { + .name = "INTSTS", + .size = 4, + .wmask = 0xc00000ff, + .wclear = 0xc00000ff, + .offset = offsetof(IntelHDAState, int_sts), + }, + + /* misc */ + [ ICH6_REG_WALLCLK ] = { + .name = "WALLCLK", + .size = 4, + .offset = offsetof(IntelHDAState, wall_clk), + .rhandler = intel_hda_get_wall_clk, + }, + + /* dma engine */ + [ ICH6_REG_CORBLBASE ] = { + .name = "CORBLBASE", + .size = 4, + .wmask = 0xffffff80, + .offset = offsetof(IntelHDAState, corb_lbase), + }, + [ ICH6_REG_CORBUBASE ] = { + .name = "CORBUBASE", + .size = 4, + .wmask = 0xffffffff, + .offset = offsetof(IntelHDAState, corb_ubase), + }, + [ ICH6_REG_CORBWP ] = { + .name = "CORBWP", + .size = 2, + .wmask = 0xff, + .offset = offsetof(IntelHDAState, corb_wp), + .whandler = intel_hda_set_corb_wp, + }, + [ ICH6_REG_CORBRP ] = { + .name = "CORBRP", + .size = 2, + .wmask = 0x80ff, + .offset = offsetof(IntelHDAState, corb_rp), + }, + [ ICH6_REG_CORBCTL ] = { + .name = "CORBCTL", + .size = 1, + .wmask = 0x03, + .offset = offsetof(IntelHDAState, corb_ctl), + .whandler = intel_hda_set_corb_ctl, + }, + [ ICH6_REG_CORBSTS ] = { + .name = "CORBSTS", + .size = 1, + .wmask = 0x01, + .wclear = 0x01, + .offset = offsetof(IntelHDAState, corb_sts), + }, + [ ICH6_REG_CORBSIZE ] = { + .name = "CORBSIZE", + .size = 1, + .reset = 0x42, + .offset = offsetof(IntelHDAState, corb_size), + }, + [ ICH6_REG_RIRBLBASE ] = { + .name = "RIRBLBASE", + .size = 4, + .wmask = 0xffffff80, + .offset = offsetof(IntelHDAState, rirb_lbase), + }, + [ ICH6_REG_RIRBUBASE ] = { + .name = "RIRBUBASE", + .size = 4, + .wmask = 0xffffffff, + .offset = offsetof(IntelHDAState, rirb_ubase), + }, + [ ICH6_REG_RIRBWP ] = { + .name = "RIRBWP", + .size = 2, + .wmask = 0x8000, + .offset = offsetof(IntelHDAState, rirb_wp), + .whandler = intel_hda_set_rirb_wp, + }, + [ ICH6_REG_RINTCNT ] = { + .name = "RINTCNT", + .size = 2, + .wmask = 0xff, + .offset = offsetof(IntelHDAState, rirb_cnt), + }, + [ ICH6_REG_RIRBCTL ] = { + .name = "RIRBCTL", + .size = 1, + .wmask = 0x07, + .offset = offsetof(IntelHDAState, rirb_ctl), + }, + [ ICH6_REG_RIRBSTS ] = { + .name = "RIRBSTS", + .size = 1, + .wmask = 0x05, + .wclear = 0x05, + .offset = offsetof(IntelHDAState, rirb_sts), + .whandler = intel_hda_set_rirb_sts, + }, + [ ICH6_REG_RIRBSIZE ] = { + .name = "RIRBSIZE", + .size = 1, + .reset = 0x42, + .offset = offsetof(IntelHDAState, rirb_size), + }, + + [ ICH6_REG_DPLBASE ] = { + .name = "DPLBASE", + .size = 4, + .wmask = 0xffffff81, + .offset = offsetof(IntelHDAState, dp_lbase), + }, + [ ICH6_REG_DPUBASE ] = { + .name = "DPUBASE", + .size = 4, + .wmask = 0xffffffff, + .offset = offsetof(IntelHDAState, dp_ubase), + }, + + [ ICH6_REG_IC ] = { + .name = "ICW", + .size = 4, + .wmask = 0xffffffff, + .offset = offsetof(IntelHDAState, icw), + }, + [ ICH6_REG_IR ] = { + .name = "IRR", + .size = 4, + .offset = offsetof(IntelHDAState, irr), + }, + [ ICH6_REG_IRS ] = { + .name = "ICS", + .size = 2, + .wmask = 0x0003, + .wclear = 0x0002, + .offset = offsetof(IntelHDAState, ics), + .whandler = intel_hda_set_ics, + }, + +#define HDA_STREAM(_t, _i) \ + [ ST_REG(_i, ICH6_REG_SD_CTL) ] = { \ + .stream = _i, \ + .name = _t stringify(_i) " CTL", \ + .size = 4, \ + .wmask = 0x1cff001f, \ + .offset = offsetof(IntelHDAState, st[_i].ctl), \ + .whandler = intel_hda_set_st_ctl, \ + }, \ + [ ST_REG(_i, ICH6_REG_SD_CTL) + 2] = { \ + .stream = _i, \ + .name = _t stringify(_i) " CTL(stnr)", \ + .size = 1, \ + .shift = 16, \ + .wmask = 0x00ff0000, \ + .offset = offsetof(IntelHDAState, st[_i].ctl), \ + .whandler = intel_hda_set_st_ctl, \ + }, \ + [ ST_REG(_i, ICH6_REG_SD_STS)] = { \ + .stream = _i, \ + .name = _t stringify(_i) " CTL(sts)", \ + .size = 1, \ + .shift = 24, \ + .wmask = 0x1c000000, \ + .wclear = 0x1c000000, \ + .offset = offsetof(IntelHDAState, st[_i].ctl), \ + .whandler = intel_hda_set_st_ctl, \ + .reset = SD_STS_FIFO_READY << 24 \ + }, \ + [ ST_REG(_i, ICH6_REG_SD_LPIB) ] = { \ + .stream = _i, \ + .name = _t stringify(_i) " LPIB", \ + .size = 4, \ + .offset = offsetof(IntelHDAState, st[_i].lpib), \ + }, \ + [ ST_REG(_i, ICH6_REG_SD_CBL) ] = { \ + .stream = _i, \ + .name = _t stringify(_i) " CBL", \ + .size = 4, \ + .wmask = 0xffffffff, \ + .offset = offsetof(IntelHDAState, st[_i].cbl), \ + }, \ + [ ST_REG(_i, ICH6_REG_SD_LVI) ] = { \ + .stream = _i, \ + .name = _t stringify(_i) " LVI", \ + .size = 2, \ + .wmask = 0x00ff, \ + .offset = offsetof(IntelHDAState, st[_i].lvi), \ + }, \ + [ ST_REG(_i, ICH6_REG_SD_FIFOSIZE) ] = { \ + .stream = _i, \ + .name = _t stringify(_i) " FIFOS", \ + .size = 2, \ + .reset = HDA_BUFFER_SIZE, \ + }, \ + [ ST_REG(_i, ICH6_REG_SD_FORMAT) ] = { \ + .stream = _i, \ + .name = _t stringify(_i) " FMT", \ + .size = 2, \ + .wmask = 0x7f7f, \ + .offset = offsetof(IntelHDAState, st[_i].fmt), \ + }, \ + [ ST_REG(_i, ICH6_REG_SD_BDLPL) ] = { \ + .stream = _i, \ + .name = _t stringify(_i) " BDLPL", \ + .size = 4, \ + .wmask = 0xffffff80, \ + .offset = offsetof(IntelHDAState, st[_i].bdlp_lbase), \ + }, \ + [ ST_REG(_i, ICH6_REG_SD_BDLPU) ] = { \ + .stream = _i, \ + .name = _t stringify(_i) " BDLPU", \ + .size = 4, \ + .wmask = 0xffffffff, \ + .offset = offsetof(IntelHDAState, st[_i].bdlp_ubase), \ + }, \ + + HDA_STREAM("IN", 0) + HDA_STREAM("IN", 1) + HDA_STREAM("IN", 2) + HDA_STREAM("IN", 3) + + HDA_STREAM("OUT", 4) + HDA_STREAM("OUT", 5) + HDA_STREAM("OUT", 6) + HDA_STREAM("OUT", 7) + +}; + +static const IntelHDAReg *intel_hda_reg_find(IntelHDAState *d, hwaddr addr) +{ + const IntelHDAReg *reg; + + if (addr >= ARRAY_SIZE(regtab)) { + goto noreg; + } + reg = regtab+addr; + if (reg->name == NULL) { + goto noreg; + } + return reg; + +noreg: + dprint(d, 1, "unknown register, addr 0x%x\n", (int) addr); + return NULL; +} + +static uint32_t *intel_hda_reg_addr(IntelHDAState *d, const IntelHDAReg *reg) +{ + uint8_t *addr = (void*)d; + + addr += reg->offset; + return (uint32_t*)addr; +} + +static void intel_hda_reg_write(IntelHDAState *d, const IntelHDAReg *reg, uint32_t val, + uint32_t wmask) +{ + uint32_t *addr; + uint32_t old; + + if (!reg) { + return; + } + if (!reg->wmask) { + qemu_log_mask(LOG_GUEST_ERROR, "intel-hda: write to r/o reg %s\n", + reg->name); + return; + } + + if (d->debug) { + time_t now = time(NULL); + if (d->last_write && d->last_reg == reg && d->last_val == val) { + d->repeat_count++; + if (d->last_sec != now) { + dprint(d, 2, "previous register op repeated %d times\n", d->repeat_count); + d->last_sec = now; + d->repeat_count = 0; + } + } else { + if (d->repeat_count) { + dprint(d, 2, "previous register op repeated %d times\n", d->repeat_count); + } + dprint(d, 2, "write %-16s: 0x%x (%x)\n", reg->name, val, wmask); + d->last_write = 1; + d->last_reg = reg; + d->last_val = val; + d->last_sec = now; + d->repeat_count = 0; + } + } + assert(reg->offset != 0); + + addr = intel_hda_reg_addr(d, reg); + old = *addr; + + if (reg->shift) { + val <<= reg->shift; + wmask <<= reg->shift; + } + wmask &= reg->wmask; + *addr &= ~wmask; + *addr |= wmask & val; + *addr &= ~(val & reg->wclear); + + if (reg->whandler) { + reg->whandler(d, reg, old); + } +} + +static uint32_t intel_hda_reg_read(IntelHDAState *d, const IntelHDAReg *reg, + uint32_t rmask) +{ + uint32_t *addr, ret; + + if (!reg) { + return 0; + } + + if (reg->rhandler) { + reg->rhandler(d, reg); + } + + if (reg->offset == 0) { + /* constant read-only register */ + ret = reg->reset; + } else { + addr = intel_hda_reg_addr(d, reg); + ret = *addr; + if (reg->shift) { + ret >>= reg->shift; + } + ret &= rmask; + } + if (d->debug) { + time_t now = time(NULL); + if (!d->last_write && d->last_reg == reg && d->last_val == ret) { + d->repeat_count++; + if (d->last_sec != now) { + dprint(d, 2, "previous register op repeated %d times\n", d->repeat_count); + d->last_sec = now; + d->repeat_count = 0; + } + } else { + if (d->repeat_count) { + dprint(d, 2, "previous register op repeated %d times\n", d->repeat_count); + } + dprint(d, 2, "read %-16s: 0x%x (%x)\n", reg->name, ret, rmask); + d->last_write = 0; + d->last_reg = reg; + d->last_val = ret; + d->last_sec = now; + d->repeat_count = 0; + } + } + return ret; +} + +static void intel_hda_regs_reset(IntelHDAState *d) +{ + uint32_t *addr; + int i; + + for (i = 0; i < ARRAY_SIZE(regtab); i++) { + if (regtab[i].name == NULL) { + continue; + } + if (regtab[i].offset == 0) { + continue; + } + addr = intel_hda_reg_addr(d, regtab + i); + *addr = regtab[i].reset; + } +} + +/* --------------------------------------------------------------------- */ + +static void intel_hda_mmio_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + IntelHDAState *d = opaque; + const IntelHDAReg *reg = intel_hda_reg_find(d, addr); + + intel_hda_reg_write(d, reg, val, MAKE_64BIT_MASK(0, size * 8)); +} + +static uint64_t intel_hda_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + IntelHDAState *d = opaque; + const IntelHDAReg *reg = intel_hda_reg_find(d, addr); + + return intel_hda_reg_read(d, reg, MAKE_64BIT_MASK(0, size * 8)); +} + +static const MemoryRegionOps intel_hda_mmio_ops = { + .read = intel_hda_mmio_read, + .write = intel_hda_mmio_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* --------------------------------------------------------------------- */ + +static void intel_hda_reset(DeviceState *dev) +{ + BusChild *kid; + IntelHDAState *d = INTEL_HDA(dev); + HDACodecDevice *cdev; + + intel_hda_regs_reset(d); + d->wall_base_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + /* reset codecs */ + QTAILQ_FOREACH(kid, &d->codecs.qbus.children, sibling) { + DeviceState *qdev = kid->child; + cdev = HDA_CODEC_DEVICE(qdev); + device_legacy_reset(DEVICE(cdev)); + d->state_sts |= (1 << cdev->cad); + } + intel_hda_update_irq(d); +} + +static void intel_hda_realize(PCIDevice *pci, Error **errp) +{ + IntelHDAState *d = INTEL_HDA(pci); + uint8_t *conf = d->pci.config; + Error *err = NULL; + int ret; + + d->name = object_get_typename(OBJECT(d)); + + pci_config_set_interrupt_pin(conf, 1); + + /* HDCTL off 0x40 bit 0 selects signaling mode (1-HDA, 0 - Ac97) 18.1.19 */ + conf[0x40] = 0x01; + + if (d->msi != ON_OFF_AUTO_OFF) { + ret = msi_init(&d->pci, d->old_msi_addr ? 0x50 : 0x60, + 1, true, false, &err); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error */ + assert(!ret || ret == -ENOTSUP); + if (ret && d->msi == ON_OFF_AUTO_ON) { + /* Can't satisfy user's explicit msi=on request, fail */ + error_append_hint(&err, "You have to use msi=auto (default) or " + "msi=off with this machine type.\n"); + error_propagate(errp, err); + return; + } + assert(!err || d->msi == ON_OFF_AUTO_AUTO); + /* With msi=auto, we fall back to MSI off silently */ + error_free(err); + } + + memory_region_init(&d->container, OBJECT(d), + "intel-hda-container", 0x4000); + memory_region_init_io(&d->mmio, OBJECT(d), &intel_hda_mmio_ops, d, + "intel-hda", 0x2000); + memory_region_add_subregion(&d->container, 0x0000, &d->mmio); + memory_region_init_alias(&d->alias, OBJECT(d), "intel-hda-alias", + &d->mmio, 0, 0x2000); + memory_region_add_subregion(&d->container, 0x2000, &d->alias); + pci_register_bar(&d->pci, 0, 0, &d->container); + + hda_codec_bus_init(DEVICE(pci), &d->codecs, sizeof(d->codecs), + intel_hda_response, intel_hda_xfer); +} + +static void intel_hda_exit(PCIDevice *pci) +{ + IntelHDAState *d = INTEL_HDA(pci); + + msi_uninit(&d->pci); +} + +static int intel_hda_post_load(void *opaque, int version) +{ + IntelHDAState* d = opaque; + int i; + + dprint(d, 1, "%s\n", __func__); + for (i = 0; i < ARRAY_SIZE(d->st); i++) { + if (d->st[i].ctl & 0x02) { + intel_hda_parse_bdl(d, &d->st[i]); + } + } + intel_hda_update_irq(d); + return 0; +} + +static const VMStateDescription vmstate_intel_hda_stream = { + .name = "intel-hda-stream", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ctl, IntelHDAStream), + VMSTATE_UINT32(lpib, IntelHDAStream), + VMSTATE_UINT32(cbl, IntelHDAStream), + VMSTATE_UINT32(lvi, IntelHDAStream), + VMSTATE_UINT32(fmt, IntelHDAStream), + VMSTATE_UINT32(bdlp_lbase, IntelHDAStream), + VMSTATE_UINT32(bdlp_ubase, IntelHDAStream), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_intel_hda = { + .name = "intel-hda", + .version_id = 1, + .post_load = intel_hda_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(pci, IntelHDAState), + + /* registers */ + VMSTATE_UINT32(g_ctl, IntelHDAState), + VMSTATE_UINT32(wake_en, IntelHDAState), + VMSTATE_UINT32(state_sts, IntelHDAState), + VMSTATE_UINT32(int_ctl, IntelHDAState), + VMSTATE_UINT32(int_sts, IntelHDAState), + VMSTATE_UINT32(wall_clk, IntelHDAState), + VMSTATE_UINT32(corb_lbase, IntelHDAState), + VMSTATE_UINT32(corb_ubase, IntelHDAState), + VMSTATE_UINT32(corb_rp, IntelHDAState), + VMSTATE_UINT32(corb_wp, IntelHDAState), + VMSTATE_UINT32(corb_ctl, IntelHDAState), + VMSTATE_UINT32(corb_sts, IntelHDAState), + VMSTATE_UINT32(corb_size, IntelHDAState), + VMSTATE_UINT32(rirb_lbase, IntelHDAState), + VMSTATE_UINT32(rirb_ubase, IntelHDAState), + VMSTATE_UINT32(rirb_wp, IntelHDAState), + VMSTATE_UINT32(rirb_cnt, IntelHDAState), + VMSTATE_UINT32(rirb_ctl, IntelHDAState), + VMSTATE_UINT32(rirb_sts, IntelHDAState), + VMSTATE_UINT32(rirb_size, IntelHDAState), + VMSTATE_UINT32(dp_lbase, IntelHDAState), + VMSTATE_UINT32(dp_ubase, IntelHDAState), + VMSTATE_UINT32(icw, IntelHDAState), + VMSTATE_UINT32(irr, IntelHDAState), + VMSTATE_UINT32(ics, IntelHDAState), + VMSTATE_STRUCT_ARRAY(st, IntelHDAState, 8, 0, + vmstate_intel_hda_stream, + IntelHDAStream), + + /* additional state info */ + VMSTATE_UINT32(rirb_count, IntelHDAState), + VMSTATE_INT64(wall_base_ns, IntelHDAState), + + VMSTATE_END_OF_LIST() + } +}; + +static Property intel_hda_properties[] = { + DEFINE_PROP_UINT32("debug", IntelHDAState, debug, 0), + DEFINE_PROP_ON_OFF_AUTO("msi", IntelHDAState, msi, ON_OFF_AUTO_AUTO), + DEFINE_PROP_BOOL("old_msi_addr", IntelHDAState, old_msi_addr, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void intel_hda_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = intel_hda_realize; + k->exit = intel_hda_exit; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->class_id = PCI_CLASS_MULTIMEDIA_HD_AUDIO; + dc->reset = intel_hda_reset; + dc->vmsd = &vmstate_intel_hda; + device_class_set_props(dc, intel_hda_properties); +} + +static void intel_hda_class_init_ich6(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->device_id = 0x2668; + k->revision = 1; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->desc = "Intel HD Audio Controller (ich6)"; +} + +static void intel_hda_class_init_ich9(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->device_id = 0x293e; + k->revision = 3; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->desc = "Intel HD Audio Controller (ich9)"; +} + +static const TypeInfo intel_hda_info = { + .name = TYPE_INTEL_HDA_GENERIC, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(IntelHDAState), + .class_init = intel_hda_class_init, + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static const TypeInfo intel_hda_info_ich6 = { + .name = "intel-hda", + .parent = TYPE_INTEL_HDA_GENERIC, + .class_init = intel_hda_class_init_ich6, +}; + +static const TypeInfo intel_hda_info_ich9 = { + .name = "ich9-intel-hda", + .parent = TYPE_INTEL_HDA_GENERIC, + .class_init = intel_hda_class_init_ich9, +}; + +static void hda_codec_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + k->realize = hda_codec_dev_realize; + k->unrealize = hda_codec_dev_unrealize; + set_bit(DEVICE_CATEGORY_SOUND, k->categories); + k->bus_type = TYPE_HDA_BUS; + device_class_set_props(k, hda_props); +} + +static const TypeInfo hda_codec_device_type_info = { + .name = TYPE_HDA_CODEC_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(HDACodecDevice), + .abstract = true, + .class_size = sizeof(HDACodecDeviceClass), + .class_init = hda_codec_device_class_init, +}; + +/* + * create intel hda controller with codec attached to it, + * so '-soundhw hda' works. + */ +static int intel_hda_and_codec_init(PCIBus *bus) +{ + DeviceState *controller; + BusState *hdabus; + DeviceState *codec; + + warn_report("'-soundhw hda' is deprecated, " + "please use '-device intel-hda -device hda-duplex' instead"); + controller = DEVICE(pci_create_simple(bus, -1, "intel-hda")); + hdabus = QLIST_FIRST(&controller->child_bus); + codec = qdev_new("hda-duplex"); + qdev_realize_and_unref(codec, hdabus, &error_fatal); + return 0; +} + +static void intel_hda_register_types(void) +{ + type_register_static(&hda_codec_bus_info); + type_register_static(&intel_hda_info); + type_register_static(&intel_hda_info_ich6); + type_register_static(&intel_hda_info_ich9); + type_register_static(&hda_codec_device_type_info); + pci_register_soundhw("hda", "Intel HD Audio", intel_hda_and_codec_init); +} + +type_init(intel_hda_register_types) diff --git a/hw/audio/intel-hda.h b/hw/audio/intel-hda.h new file mode 100644 index 000000000..f78c1833e --- /dev/null +++ b/hw/audio/intel-hda.h @@ -0,0 +1,66 @@ +#ifndef HW_INTEL_HDA_H +#define HW_INTEL_HDA_H + +#include "hw/qdev-core.h" +#include "qom/object.h" + +/* --------------------------------------------------------------------- */ +/* hda bus */ + +#define TYPE_HDA_CODEC_DEVICE "hda-codec" +OBJECT_DECLARE_TYPE(HDACodecDevice, HDACodecDeviceClass, + HDA_CODEC_DEVICE) + +#define TYPE_HDA_BUS "HDA" +OBJECT_DECLARE_SIMPLE_TYPE(HDACodecBus, HDA_BUS) + + +typedef void (*hda_codec_response_func)(HDACodecDevice *dev, + bool solicited, uint32_t response); +typedef bool (*hda_codec_xfer_func)(HDACodecDevice *dev, + uint32_t stnr, bool output, + uint8_t *buf, uint32_t len); + +struct HDACodecBus { + BusState qbus; + uint32_t next_cad; + hda_codec_response_func response; + hda_codec_xfer_func xfer; +}; + +struct HDACodecDeviceClass { + DeviceClass parent_class; + + int (*init)(HDACodecDevice *dev); + void (*exit)(HDACodecDevice *dev); + void (*command)(HDACodecDevice *dev, uint32_t nid, uint32_t data); + void (*stream)(HDACodecDevice *dev, uint32_t stnr, bool running, bool output); +}; + +struct HDACodecDevice { + DeviceState qdev; + uint32_t cad; /* codec address */ +}; + +void hda_codec_bus_init(DeviceState *dev, HDACodecBus *bus, size_t bus_size, + hda_codec_response_func response, + hda_codec_xfer_func xfer); +HDACodecDevice *hda_codec_find(HDACodecBus *bus, uint32_t cad); + +void hda_codec_response(HDACodecDevice *dev, bool solicited, uint32_t response); +bool hda_codec_xfer(HDACodecDevice *dev, uint32_t stnr, bool output, + uint8_t *buf, uint32_t len); + +/* --------------------------------------------------------------------- */ + +#define dprint(_dev, _level, _fmt, ...) \ + do { \ + if (_dev->debug >= _level) { \ + fprintf(stderr, "%s: ", _dev->name); \ + fprintf(stderr, _fmt, ## __VA_ARGS__); \ + } \ + } while (0) + +/* --------------------------------------------------------------------- */ + +#endif diff --git a/hw/audio/lm4549.c b/hw/audio/lm4549.c new file mode 100644 index 000000000..32b1481b5 --- /dev/null +++ b/hw/audio/lm4549.c @@ -0,0 +1,336 @@ +/* + * LM4549 Audio Codec Interface + * + * Copyright (c) 2011 + * Written by Mathieu Sonet - www.elasticsheep.com + * + * This code is licensed under the GPL. + * + * ***************************************************************** + * + * This driver emulates the LM4549 codec. + * + * It supports only one playback voice and no record voice. + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "audio/audio.h" +#include "lm4549.h" +#include "migration/vmstate.h" + +#if 0 +#define LM4549_DEBUG 1 +#endif + +#if 0 +#define LM4549_DUMP_DAC_INPUT 1 +#endif + +#ifdef LM4549_DEBUG +#define DPRINTF(fmt, ...) \ +do { printf("lm4549: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#if defined(LM4549_DUMP_DAC_INPUT) +static FILE *fp_dac_input; +#endif + +/* LM4549 register list */ +enum { + LM4549_Reset = 0x00, + LM4549_Master_Volume = 0x02, + LM4549_Line_Out_Volume = 0x04, + LM4549_Master_Volume_Mono = 0x06, + LM4549_PC_Beep_Volume = 0x0A, + LM4549_Phone_Volume = 0x0C, + LM4549_Mic_Volume = 0x0E, + LM4549_Line_In_Volume = 0x10, + LM4549_CD_Volume = 0x12, + LM4549_Video_Volume = 0x14, + LM4549_Aux_Volume = 0x16, + LM4549_PCM_Out_Volume = 0x18, + LM4549_Record_Select = 0x1A, + LM4549_Record_Gain = 0x1C, + LM4549_General_Purpose = 0x20, + LM4549_3D_Control = 0x22, + LM4549_Powerdown_Ctrl_Stat = 0x26, + LM4549_Ext_Audio_ID = 0x28, + LM4549_Ext_Audio_Stat_Ctrl = 0x2A, + LM4549_PCM_Front_DAC_Rate = 0x2C, + LM4549_PCM_ADC_Rate = 0x32, + LM4549_Vendor_ID1 = 0x7C, + LM4549_Vendor_ID2 = 0x7E +}; + +static void lm4549_reset(lm4549_state *s) +{ + uint16_t *regfile = s->regfile; + + regfile[LM4549_Reset] = 0x0d50; + regfile[LM4549_Master_Volume] = 0x8008; + regfile[LM4549_Line_Out_Volume] = 0x8000; + regfile[LM4549_Master_Volume_Mono] = 0x8000; + regfile[LM4549_PC_Beep_Volume] = 0x0000; + regfile[LM4549_Phone_Volume] = 0x8008; + regfile[LM4549_Mic_Volume] = 0x8008; + regfile[LM4549_Line_In_Volume] = 0x8808; + regfile[LM4549_CD_Volume] = 0x8808; + regfile[LM4549_Video_Volume] = 0x8808; + regfile[LM4549_Aux_Volume] = 0x8808; + regfile[LM4549_PCM_Out_Volume] = 0x8808; + regfile[LM4549_Record_Select] = 0x0000; + regfile[LM4549_Record_Gain] = 0x8000; + regfile[LM4549_General_Purpose] = 0x0000; + regfile[LM4549_3D_Control] = 0x0101; + regfile[LM4549_Powerdown_Ctrl_Stat] = 0x000f; + regfile[LM4549_Ext_Audio_ID] = 0x0001; + regfile[LM4549_Ext_Audio_Stat_Ctrl] = 0x0000; + regfile[LM4549_PCM_Front_DAC_Rate] = 0xbb80; + regfile[LM4549_PCM_ADC_Rate] = 0xbb80; + regfile[LM4549_Vendor_ID1] = 0x4e53; + regfile[LM4549_Vendor_ID2] = 0x4331; +} + +static void lm4549_audio_transfer(lm4549_state *s) +{ + uint32_t written_bytes, written_samples; + uint32_t i; + + /* Activate the voice */ + AUD_set_active_out(s->voice, 1); + s->voice_is_active = 1; + + /* Try to write the buffer content */ + written_bytes = AUD_write(s->voice, s->buffer, + s->buffer_level * sizeof(uint16_t)); + written_samples = written_bytes >> 1; + +#if defined(LM4549_DUMP_DAC_INPUT) + fwrite(s->buffer, sizeof(uint8_t), written_bytes, fp_dac_input); +#endif + + s->buffer_level -= written_samples; + + if (s->buffer_level > 0) { + /* Move the data back to the start of the buffer */ + for (i = 0; i < s->buffer_level; i++) { + s->buffer[i] = s->buffer[i + written_samples]; + } + } +} + +static void lm4549_audio_out_callback(void *opaque, int free) +{ + lm4549_state *s = (lm4549_state *)opaque; + static uint32_t prev_buffer_level; + +#ifdef LM4549_DEBUG + int size = AUD_get_buffer_size_out(s->voice); + DPRINTF("audio_out_callback size = %i free = %i\n", size, free); +#endif + + /* Detect that no data are consumed + => disable the voice */ + if (s->buffer_level == prev_buffer_level) { + AUD_set_active_out(s->voice, 0); + s->voice_is_active = 0; + } + prev_buffer_level = s->buffer_level; + + /* Check if a buffer transfer is pending */ + if (s->buffer_level == LM4549_BUFFER_SIZE) { + lm4549_audio_transfer(s); + + /* Request more data */ + if (s->data_req_cb != NULL) { + (s->data_req_cb)(s->opaque); + } + } +} + +uint32_t lm4549_read(lm4549_state *s, hwaddr offset) +{ + uint16_t *regfile = s->regfile; + uint32_t value = 0; + + /* Read the stored value */ + assert(offset < 128); + value = regfile[offset]; + + DPRINTF("read [0x%02x] = 0x%04x\n", offset, value); + + return value; +} + +void lm4549_write(lm4549_state *s, + hwaddr offset, uint32_t value) +{ + uint16_t *regfile = s->regfile; + + assert(offset < 128); + DPRINTF("write [0x%02x] = 0x%04x\n", offset, value); + + switch (offset) { + case LM4549_Reset: + lm4549_reset(s); + break; + + case LM4549_PCM_Front_DAC_Rate: + regfile[LM4549_PCM_Front_DAC_Rate] = value; + DPRINTF("DAC rate change = %i\n", value); + + /* Re-open a voice with the new sample rate */ + struct audsettings as; + as.freq = value; + as.nchannels = 2; + as.fmt = AUDIO_FORMAT_S16; + as.endianness = 0; + + s->voice = AUD_open_out( + &s->card, + s->voice, + "lm4549.out", + s, + lm4549_audio_out_callback, + &as + ); + break; + + case LM4549_Powerdown_Ctrl_Stat: + value &= ~0xf; + value |= regfile[LM4549_Powerdown_Ctrl_Stat] & 0xf; + regfile[LM4549_Powerdown_Ctrl_Stat] = value; + break; + + case LM4549_Ext_Audio_ID: + case LM4549_Vendor_ID1: + case LM4549_Vendor_ID2: + DPRINTF("Write to read-only register 0x%x\n", (int)offset); + break; + + default: + /* Store the new value */ + regfile[offset] = value; + break; + } +} + +uint32_t lm4549_write_samples(lm4549_state *s, uint32_t left, uint32_t right) +{ + /* The left and right samples are in 20-bit resolution. + The LM4549 has 18-bit resolution and only uses the bits [19:2]. + This model supports 16-bit playback. + */ + + if (s->buffer_level > LM4549_BUFFER_SIZE - 2) { + DPRINTF("write_sample Buffer full\n"); + return 0; + } + + /* Store 16-bit samples in the buffer */ + s->buffer[s->buffer_level++] = (left >> 4); + s->buffer[s->buffer_level++] = (right >> 4); + + if (s->buffer_level == LM4549_BUFFER_SIZE) { + /* Trigger the transfer of the buffer to the audio host */ + lm4549_audio_transfer(s); + } + + return 1; +} + +static int lm4549_post_load(void *opaque, int version_id) +{ + lm4549_state *s = (lm4549_state *)opaque; + uint16_t *regfile = s->regfile; + + /* Re-open a voice with the current sample rate */ + uint32_t freq = regfile[LM4549_PCM_Front_DAC_Rate]; + + DPRINTF("post_load freq = %i\n", freq); + DPRINTF("post_load voice_is_active = %i\n", s->voice_is_active); + + struct audsettings as; + as.freq = freq; + as.nchannels = 2; + as.fmt = AUDIO_FORMAT_S16; + as.endianness = 0; + + s->voice = AUD_open_out( + &s->card, + s->voice, + "lm4549.out", + s, + lm4549_audio_out_callback, + &as + ); + + /* Request data */ + if (s->voice_is_active == 1) { + lm4549_audio_out_callback(s, AUD_get_buffer_size_out(s->voice)); + } + + return 0; +} + +void lm4549_init(lm4549_state *s, lm4549_callback data_req_cb, void* opaque) +{ + struct audsettings as; + + /* Store the callback and opaque pointer */ + s->data_req_cb = data_req_cb; + s->opaque = opaque; + + /* Init the registers */ + lm4549_reset(s); + + /* Register an audio card */ + AUD_register_card("lm4549", &s->card); + + /* Open a default voice */ + as.freq = 48000; + as.nchannels = 2; + as.fmt = AUDIO_FORMAT_S16; + as.endianness = 0; + + s->voice = AUD_open_out( + &s->card, + s->voice, + "lm4549.out", + s, + lm4549_audio_out_callback, + &as + ); + + AUD_set_volume_out(s->voice, 0, 255, 255); + + s->voice_is_active = 0; + + /* Reset the input buffer */ + memset(s->buffer, 0x00, sizeof(s->buffer)); + s->buffer_level = 0; + +#if defined(LM4549_DUMP_DAC_INPUT) + fp_dac_input = fopen("lm4549_dac_input.pcm", "wb"); + if (!fp_dac_input) { + hw_error("Unable to open lm4549_dac_input.pcm for writing\n"); + } +#endif +} + +const VMStateDescription vmstate_lm4549_state = { + .name = "lm4549_state", + .version_id = 1, + .minimum_version_id = 1, + .post_load = lm4549_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(voice_is_active, lm4549_state), + VMSTATE_UINT16_ARRAY(regfile, lm4549_state, 128), + VMSTATE_UINT16_ARRAY(buffer, lm4549_state, LM4549_BUFFER_SIZE), + VMSTATE_UINT32(buffer_level, lm4549_state), + VMSTATE_END_OF_LIST() + } +}; diff --git a/hw/audio/lm4549.h b/hw/audio/lm4549.h new file mode 100644 index 000000000..aba9bb5b0 --- /dev/null +++ b/hw/audio/lm4549.h @@ -0,0 +1,44 @@ +/* + * LM4549 Audio Codec Interface + * + * Copyright (c) 2011 + * Written by Mathieu Sonet - www.elasticsheep.com + * + * This code is licensed under the GPL. + * + * ***************************************************************** + */ + +#ifndef HW_LM4549_H +#define HW_LM4549_H + +#include "audio/audio.h" +#include "exec/hwaddr.h" + +typedef void (*lm4549_callback)(void *opaque); + +#define LM4549_BUFFER_SIZE (512 * 2) /* 512 16-bit stereo samples */ + + +typedef struct { + QEMUSoundCard card; + SWVoiceOut *voice; + uint32_t voice_is_active; + + uint16_t regfile[128]; + lm4549_callback data_req_cb; + void *opaque; + + uint16_t buffer[LM4549_BUFFER_SIZE]; + uint32_t buffer_level; +} lm4549_state; + +extern const VMStateDescription vmstate_lm4549_state; + + +void lm4549_init(lm4549_state *s, lm4549_callback data_req, void *opaque); +uint32_t lm4549_read(lm4549_state *s, hwaddr offset); +void lm4549_write(lm4549_state *s, hwaddr offset, uint32_t value); +uint32_t lm4549_write_samples(lm4549_state *s, uint32_t left, uint32_t right); + +#endif /* HW_LM4549_H */ diff --git a/hw/audio/marvell_88w8618.c b/hw/audio/marvell_88w8618.c new file mode 100644 index 000000000..e6c09bdb8 --- /dev/null +++ b/hw/audio/marvell_88w8618.c @@ -0,0 +1,313 @@ +/* + * Marvell 88w8618 audio emulation extracted from + * Marvell MV88w8618 / Freecom MusicPal emulation. + * + * Copyright (c) 2008 Jan Kiszka + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/audio/wm8750.h" +#include "audio/audio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define MP_AUDIO_SIZE 0x00001000 + +/* Audio register offsets */ +#define MP_AUDIO_PLAYBACK_MODE 0x00 +#define MP_AUDIO_CLOCK_DIV 0x18 +#define MP_AUDIO_IRQ_STATUS 0x20 +#define MP_AUDIO_IRQ_ENABLE 0x24 +#define MP_AUDIO_TX_START_LO 0x28 +#define MP_AUDIO_TX_THRESHOLD 0x2C +#define MP_AUDIO_TX_STATUS 0x38 +#define MP_AUDIO_TX_START_HI 0x40 + +/* Status register and IRQ enable bits */ +#define MP_AUDIO_TX_HALF (1 << 6) +#define MP_AUDIO_TX_FULL (1 << 7) + +/* Playback mode bits */ +#define MP_AUDIO_16BIT_SAMPLE (1 << 0) +#define MP_AUDIO_PLAYBACK_EN (1 << 7) +#define MP_AUDIO_CLOCK_24MHZ (1 << 9) +#define MP_AUDIO_MONO (1 << 14) + +OBJECT_DECLARE_SIMPLE_TYPE(mv88w8618_audio_state, MV88W8618_AUDIO) + +struct mv88w8618_audio_state { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq; + uint32_t playback_mode; + uint32_t status; + uint32_t irq_enable; + uint32_t phys_buf; + uint32_t target_buffer; + uint32_t threshold; + uint32_t play_pos; + uint32_t last_free; + uint32_t clock_div; + void *wm; +}; + +static void mv88w8618_audio_callback(void *opaque, int free_out, int free_in) +{ + mv88w8618_audio_state *s = opaque; + int16_t *codec_buffer; + int8_t buf[4096]; + int8_t *mem_buffer; + int pos, block_size; + + if (!(s->playback_mode & MP_AUDIO_PLAYBACK_EN)) { + return; + } + if (s->playback_mode & MP_AUDIO_16BIT_SAMPLE) { + free_out <<= 1; + } + if (!(s->playback_mode & MP_AUDIO_MONO)) { + free_out <<= 1; + } + block_size = s->threshold / 2; + if (free_out - s->last_free < block_size) { + return; + } + if (block_size > 4096) { + return; + } + cpu_physical_memory_read(s->target_buffer + s->play_pos, buf, block_size); + mem_buffer = buf; + if (s->playback_mode & MP_AUDIO_16BIT_SAMPLE) { + if (s->playback_mode & MP_AUDIO_MONO) { + codec_buffer = wm8750_dac_buffer(s->wm, block_size >> 1); + for (pos = 0; pos < block_size; pos += 2) { + *codec_buffer++ = *(int16_t *)mem_buffer; + *codec_buffer++ = *(int16_t *)mem_buffer; + mem_buffer += 2; + } + } else { + memcpy(wm8750_dac_buffer(s->wm, block_size >> 2), + (uint32_t *)mem_buffer, block_size); + } + } else { + if (s->playback_mode & MP_AUDIO_MONO) { + codec_buffer = wm8750_dac_buffer(s->wm, block_size); + for (pos = 0; pos < block_size; pos++) { + *codec_buffer++ = cpu_to_le16(256 * *mem_buffer); + *codec_buffer++ = cpu_to_le16(256 * *mem_buffer++); + } + } else { + codec_buffer = wm8750_dac_buffer(s->wm, block_size >> 1); + for (pos = 0; pos < block_size; pos += 2) { + *codec_buffer++ = cpu_to_le16(256 * *mem_buffer++); + *codec_buffer++ = cpu_to_le16(256 * *mem_buffer++); + } + } + } + wm8750_dac_commit(s->wm); + + s->last_free = free_out - block_size; + + if (s->play_pos == 0) { + s->status |= MP_AUDIO_TX_HALF; + s->play_pos = block_size; + } else { + s->status |= MP_AUDIO_TX_FULL; + s->play_pos = 0; + } + + if (s->status & s->irq_enable) { + qemu_irq_raise(s->irq); + } +} + +static void mv88w8618_audio_clock_update(mv88w8618_audio_state *s) +{ + int rate; + + if (s->playback_mode & MP_AUDIO_CLOCK_24MHZ) { + rate = 24576000 / 64; /* 24.576MHz */ + } else { + rate = 11289600 / 64; /* 11.2896MHz */ + } + rate /= ((s->clock_div >> 8) & 0xff) + 1; + + wm8750_set_bclk_in(s->wm, rate); +} + +static uint64_t mv88w8618_audio_read(void *opaque, hwaddr offset, + unsigned size) +{ + mv88w8618_audio_state *s = opaque; + + switch (offset) { + case MP_AUDIO_PLAYBACK_MODE: + return s->playback_mode; + + case MP_AUDIO_CLOCK_DIV: + return s->clock_div; + + case MP_AUDIO_IRQ_STATUS: + return s->status; + + case MP_AUDIO_IRQ_ENABLE: + return s->irq_enable; + + case MP_AUDIO_TX_STATUS: + return s->play_pos >> 2; + + default: + return 0; + } +} + +static void mv88w8618_audio_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + mv88w8618_audio_state *s = opaque; + + switch (offset) { + case MP_AUDIO_PLAYBACK_MODE: + if (value & MP_AUDIO_PLAYBACK_EN && + !(s->playback_mode & MP_AUDIO_PLAYBACK_EN)) { + s->status = 0; + s->last_free = 0; + s->play_pos = 0; + } + s->playback_mode = value; + mv88w8618_audio_clock_update(s); + break; + + case MP_AUDIO_CLOCK_DIV: + s->clock_div = value; + s->last_free = 0; + s->play_pos = 0; + mv88w8618_audio_clock_update(s); + break; + + case MP_AUDIO_IRQ_STATUS: + s->status &= ~value; + break; + + case MP_AUDIO_IRQ_ENABLE: + s->irq_enable = value; + if (s->status & s->irq_enable) { + qemu_irq_raise(s->irq); + } + break; + + case MP_AUDIO_TX_START_LO: + s->phys_buf = (s->phys_buf & 0xFFFF0000) | (value & 0xFFFF); + s->target_buffer = s->phys_buf; + s->play_pos = 0; + s->last_free = 0; + break; + + case MP_AUDIO_TX_THRESHOLD: + s->threshold = (value + 1) * 4; + break; + + case MP_AUDIO_TX_START_HI: + s->phys_buf = (s->phys_buf & 0xFFFF) | (value << 16); + s->target_buffer = s->phys_buf; + s->play_pos = 0; + s->last_free = 0; + break; + } +} + +static void mv88w8618_audio_reset(DeviceState *d) +{ + mv88w8618_audio_state *s = MV88W8618_AUDIO(d); + + s->playback_mode = 0; + s->status = 0; + s->irq_enable = 0; + s->clock_div = 0; + s->threshold = 0; + s->phys_buf = 0; +} + +static const MemoryRegionOps mv88w8618_audio_ops = { + .read = mv88w8618_audio_read, + .write = mv88w8618_audio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mv88w8618_audio_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + mv88w8618_audio_state *s = MV88W8618_AUDIO(dev); + + sysbus_init_irq(dev, &s->irq); + + memory_region_init_io(&s->iomem, obj, &mv88w8618_audio_ops, s, + "audio", MP_AUDIO_SIZE); + sysbus_init_mmio(dev, &s->iomem); + + object_property_add_link(OBJECT(dev), "wm8750", TYPE_WM8750, + (Object **) &s->wm, + qdev_prop_allow_set_link_before_realize, + 0); +} + +static void mv88w8618_audio_realize(DeviceState *dev, Error **errp) +{ + mv88w8618_audio_state *s = MV88W8618_AUDIO(dev); + + wm8750_data_req_set(s->wm, mv88w8618_audio_callback, s); +} + +static const VMStateDescription mv88w8618_audio_vmsd = { + .name = "mv88w8618_audio", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(playback_mode, mv88w8618_audio_state), + VMSTATE_UINT32(status, mv88w8618_audio_state), + VMSTATE_UINT32(irq_enable, mv88w8618_audio_state), + VMSTATE_UINT32(phys_buf, mv88w8618_audio_state), + VMSTATE_UINT32(target_buffer, mv88w8618_audio_state), + VMSTATE_UINT32(threshold, mv88w8618_audio_state), + VMSTATE_UINT32(play_pos, mv88w8618_audio_state), + VMSTATE_UINT32(last_free, mv88w8618_audio_state), + VMSTATE_UINT32(clock_div, mv88w8618_audio_state), + VMSTATE_END_OF_LIST() + } +}; + +static void mv88w8618_audio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mv88w8618_audio_realize; + dc->reset = mv88w8618_audio_reset; + dc->vmsd = &mv88w8618_audio_vmsd; + dc->user_creatable = false; +} + +static const TypeInfo mv88w8618_audio_info = { + .name = TYPE_MV88W8618_AUDIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mv88w8618_audio_state), + .instance_init = mv88w8618_audio_init, + .class_init = mv88w8618_audio_class_init, +}; + +static void mv88w8618_register_types(void) +{ + type_register_static(&mv88w8618_audio_info); +} + +type_init(mv88w8618_register_types) diff --git a/hw/audio/meson.build b/hw/audio/meson.build new file mode 100644 index 000000000..e48a9fc73 --- /dev/null +++ b/hw/audio/meson.build @@ -0,0 +1,14 @@ +softmmu_ss.add(files('soundhw.c')) +softmmu_ss.add(when: 'CONFIG_AC97', if_true: files('ac97.c')) +softmmu_ss.add(when: 'CONFIG_ADLIB', if_true: files('fmopl.c', 'adlib.c')) +softmmu_ss.add(when: 'CONFIG_CS4231', if_true: files('cs4231.c')) +softmmu_ss.add(when: 'CONFIG_CS4231A', if_true: files('cs4231a.c')) +softmmu_ss.add(when: 'CONFIG_ES1370', if_true: files('es1370.c')) +softmmu_ss.add(when: 'CONFIG_GUS', if_true: files('gus.c', 'gusemu_hal.c', 'gusemu_mixer.c')) +softmmu_ss.add(when: 'CONFIG_HDA', if_true: files('intel-hda.c', 'hda-codec.c')) +softmmu_ss.add(when: 'CONFIG_MARVELL_88W8618', if_true: files('marvell_88w8618.c')) +softmmu_ss.add(when: 'CONFIG_PCSPK', if_true: files('pcspk.c')) +softmmu_ss.add(when: 'CONFIG_PL041', if_true: files('pl041.c', 'lm4549.c')) +softmmu_ss.add(when: 'CONFIG_SB16', if_true: files('sb16.c')) +softmmu_ss.add(when: 'CONFIG_VT82C686', if_true: files('via-ac97.c')) +softmmu_ss.add(when: 'CONFIG_WM8750', if_true: files('wm8750.c')) diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c new file mode 100644 index 000000000..b056c0538 --- /dev/null +++ b/hw/audio/pcspk.c @@ -0,0 +1,263 @@ +/* + * QEMU PC speaker emulation + * + * Copyright (c) 2006 Joachim Henke + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "hw/audio/soundhw.h" +#include "audio/audio.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qemu/error-report.h" +#include "hw/timer/i8254.h" +#include "migration/vmstate.h" +#include "hw/audio/pcspk.h" +#include "qapi/error.h" +#include "qom/object.h" + +#define PCSPK_BUF_LEN 1792 +#define PCSPK_SAMPLE_RATE 32000 +#define PCSPK_MAX_FREQ (PCSPK_SAMPLE_RATE >> 1) +#define PCSPK_MIN_COUNT DIV_ROUND_UP(PIT_FREQ, PCSPK_MAX_FREQ) + +OBJECT_DECLARE_SIMPLE_TYPE(PCSpkState, PC_SPEAKER) + +struct PCSpkState { + ISADevice parent_obj; + + MemoryRegion ioport; + uint32_t iobase; + uint8_t sample_buf[PCSPK_BUF_LEN]; + QEMUSoundCard card; + SWVoiceOut *voice; + void *pit; + unsigned int pit_count; + unsigned int samples; + unsigned int play_pos; + uint8_t data_on; + uint8_t dummy_refresh_clock; + bool migrate; +}; + +static const char *s_spk = "pcspk"; +static PCSpkState *pcspk_state; + +static inline void generate_samples(PCSpkState *s) +{ + unsigned int i; + + if (s->pit_count) { + const uint32_t m = PCSPK_SAMPLE_RATE * s->pit_count; + const uint32_t n = ((uint64_t)PIT_FREQ << 32) / m; + + /* multiple of wavelength for gapless looping */ + s->samples = (QEMU_ALIGN_DOWN(PCSPK_BUF_LEN * PIT_FREQ, m) / (PIT_FREQ >> 1) + 1) >> 1; + for (i = 0; i < s->samples; ++i) + s->sample_buf[i] = (64 & (n * i >> 25)) - 32; + } else { + s->samples = PCSPK_BUF_LEN; + for (i = 0; i < PCSPK_BUF_LEN; ++i) + s->sample_buf[i] = 128; /* silence */ + } +} + +static void pcspk_callback(void *opaque, int free) +{ + PCSpkState *s = opaque; + PITChannelInfo ch; + unsigned int n; + + pit_get_channel_info(s->pit, 2, &ch); + + if (ch.mode != 3) { + return; + } + + n = ch.initial_count; + /* avoid frequencies that are not reproducible with sample rate */ + if (n < PCSPK_MIN_COUNT) + n = 0; + + if (s->pit_count != n) { + s->pit_count = n; + s->play_pos = 0; + generate_samples(s); + } + + while (free > 0) { + n = MIN(s->samples - s->play_pos, (unsigned int)free); + n = AUD_write(s->voice, &s->sample_buf[s->play_pos], n); + if (!n) + break; + s->play_pos = (s->play_pos + n) % s->samples; + free -= n; + } +} + +static int pcspk_audio_init(PCSpkState *s) +{ + struct audsettings as = {PCSPK_SAMPLE_RATE, 1, AUDIO_FORMAT_U8, 0}; + + if (s->voice) { + /* already initialized */ + return 0; + } + + AUD_register_card(s_spk, &s->card); + + s->voice = AUD_open_out(&s->card, s->voice, s_spk, s, pcspk_callback, &as); + if (!s->voice) { + AUD_log(s_spk, "Could not open voice\n"); + return -1; + } + + return 0; +} + +static uint64_t pcspk_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + PCSpkState *s = opaque; + PITChannelInfo ch; + + pit_get_channel_info(s->pit, 2, &ch); + + s->dummy_refresh_clock ^= (1 << 4); + + return ch.gate | (s->data_on << 1) | s->dummy_refresh_clock | + (ch.out << 5); +} + +static void pcspk_io_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PCSpkState *s = opaque; + const int gate = val & 1; + + s->data_on = (val >> 1) & 1; + pit_set_gate(s->pit, 2, gate); + if (s->voice) { + if (gate) /* restart */ + s->play_pos = 0; + AUD_set_active_out(s->voice, gate & s->data_on); + } +} + +static const MemoryRegionOps pcspk_io_ops = { + .read = pcspk_io_read, + .write = pcspk_io_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void pcspk_initfn(Object *obj) +{ + PCSpkState *s = PC_SPEAKER(obj); + + memory_region_init_io(&s->ioport, OBJECT(s), &pcspk_io_ops, s, "pcspk", 1); + + object_property_add_link(obj, "pit", TYPE_PIT_COMMON, + (Object **)&s->pit, + qdev_prop_allow_set_link_before_realize, + 0); +} + +static void pcspk_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + PCSpkState *s = PC_SPEAKER(dev); + + isa_register_ioport(isadev, &s->ioport, s->iobase); + + if (s->card.state) { + pcspk_audio_init(s); + } + + pcspk_state = s; +} + +static bool migrate_needed(void *opaque) +{ + PCSpkState *s = opaque; + + return s->migrate; +} + +static const VMStateDescription vmstate_spk = { + .name = "pcspk", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .needed = migrate_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(data_on, PCSpkState), + VMSTATE_UINT8(dummy_refresh_clock, PCSpkState), + VMSTATE_END_OF_LIST() + } +}; + +static Property pcspk_properties[] = { + DEFINE_AUDIO_PROPERTIES(PCSpkState, card), + DEFINE_PROP_UINT32("iobase", PCSpkState, iobase, 0x61), + DEFINE_PROP_BOOL("migrate", PCSpkState, migrate, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pcspk_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pcspk_realizefn; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->vmsd = &vmstate_spk; + device_class_set_props(dc, pcspk_properties); + /* Reason: realize sets global pcspk_state */ + /* Reason: pit object link */ + dc->user_creatable = false; +} + +static const TypeInfo pcspk_info = { + .name = TYPE_PC_SPEAKER, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(PCSpkState), + .instance_init = pcspk_initfn, + .class_init = pcspk_class_initfn, +}; + +static int pcspk_audio_init_soundhw(ISABus *bus) +{ + PCSpkState *s = pcspk_state; + + warn_report("'-soundhw pcspk' is deprecated, " + "please set a backend using '-machine pcspk-audiodev=' instead"); + return pcspk_audio_init(s); +} + +static void pcspk_register(void) +{ + type_register_static(&pcspk_info); + isa_register_soundhw("pcspk", "PC speaker", pcspk_audio_init_soundhw); +} +type_init(pcspk_register) diff --git a/hw/audio/pl041.c b/hw/audio/pl041.c new file mode 100644 index 000000000..03acd4fe3 --- /dev/null +++ b/hw/audio/pl041.c @@ -0,0 +1,660 @@ +/* + * Arm PrimeCell PL041 Advanced Audio Codec Interface + * + * Copyright (c) 2011 + * Written by Mathieu Sonet - www.elasticsheep.com + * + * This code is licensed under the GPL. + * + * ***************************************************************** + * + * This driver emulates the ARM AACI interface + * connected to a LM4549 codec. + * + * Limitations: + * - Supports only a playback on one channel (Versatile/Vexpress) + * - Supports only one TX FIFO in compact-mode or non-compact mode. + * - Supports playback of 12, 16, 18 and 20 bits samples. + * - Record is not supported. + * - The PL041 is hardwired to a LM4549 codec. + * + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#include "pl041.h" +#include "lm4549.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#if 0 +#define PL041_DEBUG_LEVEL 1 +#endif + +#if defined(PL041_DEBUG_LEVEL) && (PL041_DEBUG_LEVEL >= 1) +#define DBG_L1(fmt, ...) \ +do { printf("pl041: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DBG_L1(fmt, ...) \ +do { } while (0) +#endif + +#if defined(PL041_DEBUG_LEVEL) && (PL041_DEBUG_LEVEL >= 2) +#define DBG_L2(fmt, ...) \ +do { printf("pl041: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DBG_L2(fmt, ...) \ +do { } while (0) +#endif + + +#define MAX_FIFO_DEPTH (1024) +#define DEFAULT_FIFO_DEPTH (8) + +#define SLOT1_RW (1 << 19) + +/* This FIFO only stores 20-bit samples on 32-bit words. + So its level is independent of the selected mode */ +typedef struct { + uint32_t level; + uint32_t data[MAX_FIFO_DEPTH]; +} pl041_fifo; + +typedef struct { + pl041_fifo tx_fifo; + uint8_t tx_enabled; + uint8_t tx_compact_mode; + uint8_t tx_sample_size; + + pl041_fifo rx_fifo; + uint8_t rx_enabled; + uint8_t rx_compact_mode; + uint8_t rx_sample_size; +} pl041_channel; + +#define TYPE_PL041 "pl041" +OBJECT_DECLARE_SIMPLE_TYPE(PL041State, PL041) + +struct PL041State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq; + + uint32_t fifo_depth; /* FIFO depth in non-compact mode */ + + pl041_regfile regs; + pl041_channel fifo1; + lm4549_state codec; +}; + + +static const unsigned char pl041_default_id[8] = { + 0x41, 0x10, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 +}; + +#if defined(PL041_DEBUG_LEVEL) +#define REGISTER(name, offset) #name, +static const char *pl041_regs_name[] = { + #include "pl041.hx" +}; +#undef REGISTER +#endif + + +#if defined(PL041_DEBUG_LEVEL) +static const char *get_reg_name(hwaddr offset) +{ + if (offset <= PL041_dr1_7) { + return pl041_regs_name[offset >> 2]; + } + + return "unknown"; +} +#endif + +static uint8_t pl041_compute_periphid3(PL041State *s) +{ + uint8_t id3 = 1; /* One channel */ + + /* Add the fifo depth information */ + switch (s->fifo_depth) { + case 8: + id3 |= 0 << 3; + break; + case 32: + id3 |= 1 << 3; + break; + case 64: + id3 |= 2 << 3; + break; + case 128: + id3 |= 3 << 3; + break; + case 256: + id3 |= 4 << 3; + break; + case 512: + id3 |= 5 << 3; + break; + case 1024: + id3 |= 6 << 3; + break; + case 2048: + id3 |= 7 << 3; + break; + } + + return id3; +} + +static void pl041_reset(PL041State *s) +{ + DBG_L1("pl041_reset\n"); + + memset(&s->regs, 0x00, sizeof(pl041_regfile)); + + s->regs.slfr = SL1TXEMPTY | SL2TXEMPTY | SL12TXEMPTY; + s->regs.sr1 = TXFE | RXFE | TXHE; + s->regs.isr1 = 0; + + memset(&s->fifo1, 0x00, sizeof(s->fifo1)); +} + + +static void pl041_fifo1_write(PL041State *s, uint32_t value) +{ + pl041_channel *channel = &s->fifo1; + pl041_fifo *fifo = &s->fifo1.tx_fifo; + + /* Push the value in the FIFO */ + if (channel->tx_compact_mode == 0) { + /* Non-compact mode */ + + if (fifo->level < s->fifo_depth) { + /* Pad the value with 0 to obtain a 20-bit sample */ + switch (channel->tx_sample_size) { + case 12: + value = (value << 8) & 0xFFFFF; + break; + case 16: + value = (value << 4) & 0xFFFFF; + break; + case 18: + value = (value << 2) & 0xFFFFF; + break; + case 20: + default: + break; + } + + /* Store the sample in the FIFO */ + fifo->data[fifo->level++] = value; + } +#if defined(PL041_DEBUG_LEVEL) + else { + DBG_L1("fifo1 write: overrun\n"); + } +#endif + } else { + /* Compact mode */ + + if ((fifo->level + 2) < s->fifo_depth) { + uint32_t i = 0; + uint32_t sample = 0; + + for (i = 0; i < 2; i++) { + sample = value & 0xFFFF; + value = value >> 16; + + /* Pad each sample with 0 to obtain a 20-bit sample */ + switch (channel->tx_sample_size) { + case 12: + sample = sample << 8; + break; + case 16: + default: + sample = sample << 4; + break; + } + + /* Store the sample in the FIFO */ + fifo->data[fifo->level++] = sample; + } + } +#if defined(PL041_DEBUG_LEVEL) + else { + DBG_L1("fifo1 write: overrun\n"); + } +#endif + } + + /* Update the status register */ + if (fifo->level > 0) { + s->regs.sr1 &= ~(TXUNDERRUN | TXFE); + } + + if (fifo->level >= (s->fifo_depth / 2)) { + s->regs.sr1 &= ~TXHE; + } + + if (fifo->level >= s->fifo_depth) { + s->regs.sr1 |= TXFF; + } + + DBG_L2("fifo1_push sr1 = 0x%08x\n", s->regs.sr1); +} + +static void pl041_fifo1_transmit(PL041State *s) +{ + pl041_channel *channel = &s->fifo1; + pl041_fifo *fifo = &s->fifo1.tx_fifo; + uint32_t slots = s->regs.txcr1 & TXSLOT_MASK; + uint32_t written_samples; + + /* Check if FIFO1 transmit is enabled */ + if ((channel->tx_enabled) && (slots & (TXSLOT3 | TXSLOT4))) { + if (fifo->level >= (s->fifo_depth / 2)) { + int i; + + DBG_L1("Transfer FIFO level = %i\n", fifo->level); + + /* Try to transfer the whole FIFO */ + for (i = 0; i < (fifo->level / 2); i++) { + uint32_t left = fifo->data[i * 2]; + uint32_t right = fifo->data[i * 2 + 1]; + + /* Transmit two 20-bit samples to the codec */ + if (lm4549_write_samples(&s->codec, left, right) == 0) { + DBG_L1("Codec buffer full\n"); + break; + } + } + + written_samples = i * 2; + if (written_samples > 0) { + /* Update the FIFO level */ + fifo->level -= written_samples; + + /* Move back the pending samples to the start of the FIFO */ + for (i = 0; i < fifo->level; i++) { + fifo->data[i] = fifo->data[written_samples + i]; + } + + /* Update the status register */ + s->regs.sr1 &= ~TXFF; + + if (fifo->level <= (s->fifo_depth / 2)) { + s->regs.sr1 |= TXHE; + } + + if (fifo->level == 0) { + s->regs.sr1 |= TXFE | TXUNDERRUN; + DBG_L1("Empty FIFO\n"); + } + } + } + } +} + +static void pl041_isr1_update(PL041State *s) +{ + /* Update ISR1 */ + if (s->regs.sr1 & TXUNDERRUN) { + s->regs.isr1 |= URINTR; + } else { + s->regs.isr1 &= ~URINTR; + } + + if (s->regs.sr1 & TXHE) { + s->regs.isr1 |= TXINTR; + } else { + s->regs.isr1 &= ~TXINTR; + } + + if (!(s->regs.sr1 & TXBUSY) && (s->regs.sr1 & TXFE)) { + s->regs.isr1 |= TXCINTR; + } else { + s->regs.isr1 &= ~TXCINTR; + } + + /* Update the irq state */ + qemu_set_irq(s->irq, ((s->regs.isr1 & s->regs.ie1) > 0) ? 1 : 0); + DBG_L2("Set interrupt sr1 = 0x%08x isr1 = 0x%08x masked = 0x%08x\n", + s->regs.sr1, s->regs.isr1, s->regs.isr1 & s->regs.ie1); +} + +static void pl041_request_data(void *opaque) +{ + PL041State *s = (PL041State *)opaque; + + /* Trigger pending transfers */ + pl041_fifo1_transmit(s); + pl041_isr1_update(s); +} + +static uint64_t pl041_read(void *opaque, hwaddr offset, + unsigned size) +{ + PL041State *s = (PL041State *)opaque; + int value; + + if ((offset >= PL041_periphid0) && (offset <= PL041_pcellid3)) { + if (offset == PL041_periphid3) { + value = pl041_compute_periphid3(s); + } else { + value = pl041_default_id[(offset - PL041_periphid0) >> 2]; + } + + DBG_L1("pl041_read [0x%08x] => 0x%08x\n", offset, value); + return value; + } else if (offset <= PL041_dr4_7) { + value = *((uint32_t *)&s->regs + (offset >> 2)); + } else { + DBG_L1("pl041_read: Reserved offset %x\n", (int)offset); + return 0; + } + + switch (offset) { + case PL041_allints: + value = s->regs.isr1 & 0x7F; + break; + } + + DBG_L1("pl041_read [0x%08x] %s => 0x%08x\n", offset, + get_reg_name(offset), value); + + return value; +} + +static void pl041_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PL041State *s = (PL041State *)opaque; + uint16_t control, data; + uint32_t result; + + DBG_L1("pl041_write [0x%08x] %s <= 0x%08x\n", offset, + get_reg_name(offset), (unsigned int)value); + + /* Write the register */ + if (offset <= PL041_dr4_7) { + *((uint32_t *)&s->regs + (offset >> 2)) = value; + } else { + DBG_L1("pl041_write: Reserved offset %x\n", (int)offset); + return; + } + + /* Execute the actions */ + switch (offset) { + case PL041_txcr1: + { + pl041_channel *channel = &s->fifo1; + + uint32_t txen = s->regs.txcr1 & TXEN; + uint32_t tsize = (s->regs.txcr1 & TSIZE_MASK) >> TSIZE_MASK_BIT; + uint32_t compact_mode = (s->regs.txcr1 & TXCOMPACT) ? 1 : 0; +#if defined(PL041_DEBUG_LEVEL) + uint32_t slots = (s->regs.txcr1 & TXSLOT_MASK) >> TXSLOT_MASK_BIT; + uint32_t txfen = (s->regs.txcr1 & TXFEN) > 0 ? 1 : 0; +#endif + + DBG_L1("=> txen = %i slots = 0x%01x tsize = %i compact = %i " + "txfen = %i\n", txen, slots, tsize, compact_mode, txfen); + + channel->tx_enabled = txen; + channel->tx_compact_mode = compact_mode; + + switch (tsize) { + case 0: + channel->tx_sample_size = 16; + break; + case 1: + channel->tx_sample_size = 18; + break; + case 2: + channel->tx_sample_size = 20; + break; + case 3: + channel->tx_sample_size = 12; + break; + } + + DBG_L1("TX enabled = %i\n", channel->tx_enabled); + DBG_L1("TX compact mode = %i\n", channel->tx_compact_mode); + DBG_L1("TX sample width = %i\n", channel->tx_sample_size); + + /* Check if compact mode is allowed with selected tsize */ + if (channel->tx_compact_mode == 1) { + if ((channel->tx_sample_size == 18) || + (channel->tx_sample_size == 20)) { + channel->tx_compact_mode = 0; + DBG_L1("Compact mode not allowed with 18/20-bit sample size\n"); + } + } + + break; + } + case PL041_sl1tx: + s->regs.slfr &= ~SL1TXEMPTY; + + control = (s->regs.sl1tx >> 12) & 0x7F; + data = (s->regs.sl2tx >> 4) & 0xFFFF; + + if ((s->regs.sl1tx & SLOT1_RW) == 0) { + /* Write operation */ + lm4549_write(&s->codec, control, data); + } else { + /* Read operation */ + result = lm4549_read(&s->codec, control); + + /* Store the returned value */ + s->regs.sl1rx = s->regs.sl1tx & ~SLOT1_RW; + s->regs.sl2rx = result << 4; + + s->regs.slfr &= ~(SL1RXBUSY | SL2RXBUSY); + s->regs.slfr |= SL1RXVALID | SL2RXVALID; + } + break; + + case PL041_sl2tx: + s->regs.sl2tx = value; + s->regs.slfr &= ~SL2TXEMPTY; + break; + + case PL041_intclr: + DBG_L1("=> Clear interrupt intclr = 0x%08x isr1 = 0x%08x\n", + s->regs.intclr, s->regs.isr1); + + if (s->regs.intclr & TXUEC1) { + s->regs.sr1 &= ~TXUNDERRUN; + } + break; + + case PL041_maincr: + { +#if defined(PL041_DEBUG_LEVEL) + char debug[] = " AACIFE SL1RXEN SL1TXEN"; + if (!(value & AACIFE)) { + debug[0] = '!'; + } + if (!(value & SL1RXEN)) { + debug[8] = '!'; + } + if (!(value & SL1TXEN)) { + debug[17] = '!'; + } + DBG_L1("%s\n", debug); +#endif + + if ((s->regs.maincr & AACIFE) == 0) { + pl041_reset(s); + } + break; + } + + case PL041_dr1_0: + case PL041_dr1_1: + case PL041_dr1_2: + case PL041_dr1_3: + pl041_fifo1_write(s, value); + break; + } + + /* Transmit the FIFO content */ + pl041_fifo1_transmit(s); + + /* Update the ISR1 register */ + pl041_isr1_update(s); +} + +static void pl041_device_reset(DeviceState *d) +{ + PL041State *s = PL041(d); + + pl041_reset(s); +} + +static const MemoryRegionOps pl041_ops = { + .read = pl041_read, + .write = pl041_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pl041_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + PL041State *s = PL041(dev); + + DBG_L1("pl041_init 0x%08x\n", (uint32_t)s); + + /* Connect the device to the sysbus */ + memory_region_init_io(&s->iomem, obj, &pl041_ops, s, "pl041", 0x1000); + sysbus_init_mmio(dev, &s->iomem); + sysbus_init_irq(dev, &s->irq); +} + +static void pl041_realize(DeviceState *dev, Error **errp) +{ + PL041State *s = PL041(dev); + + /* Check the device properties */ + switch (s->fifo_depth) { + case 8: + case 32: + case 64: + case 128: + case 256: + case 512: + case 1024: + case 2048: + break; + case 16: + default: + /* NC FIFO depth of 16 is not allowed because its id bits in + AACIPERIPHID3 overlap with the id for the default NC FIFO depth */ + qemu_log_mask(LOG_UNIMP, + "pl041: unsupported non-compact fifo depth [%i]\n", + s->fifo_depth); + } + + /* Init the codec */ + lm4549_init(&s->codec, &pl041_request_data, (void *)s); +} + +static const VMStateDescription vmstate_pl041_regfile = { + .name = "pl041_regfile", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { +#define REGISTER(name, offset) VMSTATE_UINT32(name, pl041_regfile), + #include "pl041.hx" +#undef REGISTER + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pl041_fifo = { + .name = "pl041_fifo", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(level, pl041_fifo), + VMSTATE_UINT32_ARRAY(data, pl041_fifo, MAX_FIFO_DEPTH), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pl041_channel = { + .name = "pl041_channel", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(tx_fifo, pl041_channel, 0, + vmstate_pl041_fifo, pl041_fifo), + VMSTATE_UINT8(tx_enabled, pl041_channel), + VMSTATE_UINT8(tx_compact_mode, pl041_channel), + VMSTATE_UINT8(tx_sample_size, pl041_channel), + VMSTATE_STRUCT(rx_fifo, pl041_channel, 0, + vmstate_pl041_fifo, pl041_fifo), + VMSTATE_UINT8(rx_enabled, pl041_channel), + VMSTATE_UINT8(rx_compact_mode, pl041_channel), + VMSTATE_UINT8(rx_sample_size, pl041_channel), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pl041 = { + .name = "pl041", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(fifo_depth, PL041State), + VMSTATE_STRUCT(regs, PL041State, 0, + vmstate_pl041_regfile, pl041_regfile), + VMSTATE_STRUCT(fifo1, PL041State, 0, + vmstate_pl041_channel, pl041_channel), + VMSTATE_STRUCT(codec, PL041State, 0, + vmstate_lm4549_state, lm4549_state), + VMSTATE_END_OF_LIST() + } +}; + +static Property pl041_device_properties[] = { + DEFINE_AUDIO_PROPERTIES(PL041State, codec.card), + /* Non-compact FIFO depth property */ + DEFINE_PROP_UINT32("nc_fifo_depth", PL041State, fifo_depth, + DEFAULT_FIFO_DEPTH), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pl041_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pl041_realize; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->reset = pl041_device_reset; + dc->vmsd = &vmstate_pl041; + device_class_set_props(dc, pl041_device_properties); +} + +static const TypeInfo pl041_device_info = { + .name = TYPE_PL041, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL041State), + .instance_init = pl041_init, + .class_init = pl041_device_class_init, +}; + +static void pl041_register_types(void) +{ + type_register_static(&pl041_device_info); +} + +type_init(pl041_register_types) diff --git a/hw/audio/pl041.h b/hw/audio/pl041.h new file mode 100644 index 000000000..515db4756 --- /dev/null +++ b/hw/audio/pl041.h @@ -0,0 +1,135 @@ +/* + * Arm PrimeCell PL041 Advanced Audio Codec Interface + * + * Copyright (c) 2011 + * Written by Mathieu Sonet - www.elasticsheep.com + * + * This code is licensed under the GPL. + * + * ***************************************************************** + */ + +#ifndef HW_PL041_H +#define HW_PL041_H + +/* Register file */ +#define REGISTER(name, offset) uint32_t name; +typedef struct { + #include "pl041.hx" +} pl041_regfile; +#undef REGISTER + +/* Register addresses */ +#define REGISTER(name, offset) PL041_##name = offset, +enum { + #include "pl041.hx" + + PL041_periphid0 = 0xFE0, + PL041_periphid1 = 0xFE4, + PL041_periphid2 = 0xFE8, + PL041_periphid3 = 0xFEC, + PL041_pcellid0 = 0xFF0, + PL041_pcellid1 = 0xFF4, + PL041_pcellid2 = 0xFF8, + PL041_pcellid3 = 0xFFC, +}; +#undef REGISTER + +/* Register bits */ + +/* IEx */ +#define TXCIE (1 << 0) +#define RXTIE (1 << 1) +#define TXIE (1 << 2) +#define RXIE (1 << 3) +#define RXOIE (1 << 4) +#define TXUIE (1 << 5) +#define RXTOIE (1 << 6) + +/* TXCRx */ +#define TXEN (1 << 0) +#define TXSLOT1 (1 << 1) +#define TXSLOT2 (1 << 2) +#define TXSLOT3 (1 << 3) +#define TXSLOT4 (1 << 4) +#define TXCOMPACT (1 << 15) +#define TXFEN (1 << 16) + +#define TXSLOT_MASK_BIT (1) +#define TXSLOT_MASK (0xFFF << TXSLOT_MASK_BIT) + +#define TSIZE_MASK_BIT (13) +#define TSIZE_MASK (0x3 << TSIZE_MASK_BIT) + +#define TSIZE_16BITS (0x0 << TSIZE_MASK_BIT) +#define TSIZE_18BITS (0x1 << TSIZE_MASK_BIT) +#define TSIZE_20BITS (0x2 << TSIZE_MASK_BIT) +#define TSIZE_12BITS (0x3 << TSIZE_MASK_BIT) + +/* SRx */ +#define RXFE (1 << 0) +#define TXFE (1 << 1) +#define RXHF (1 << 2) +#define TXHE (1 << 3) +#define RXFF (1 << 4) +#define TXFF (1 << 5) +#define RXBUSY (1 << 6) +#define TXBUSY (1 << 7) +#define RXOVERRUN (1 << 8) +#define TXUNDERRUN (1 << 9) +#define RXTIMEOUT (1 << 10) +#define RXTOFE (1 << 11) + +/* ISRx */ +#define TXCINTR (1 << 0) +#define RXTOINTR (1 << 1) +#define TXINTR (1 << 2) +#define RXINTR (1 << 3) +#define ORINTR (1 << 4) +#define URINTR (1 << 5) +#define RXTOFEINTR (1 << 6) + +/* SLFR */ +#define SL1RXBUSY (1 << 0) +#define SL1TXBUSY (1 << 1) +#define SL2RXBUSY (1 << 2) +#define SL2TXBUSY (1 << 3) +#define SL12RXBUSY (1 << 4) +#define SL12TXBUSY (1 << 5) +#define SL1RXVALID (1 << 6) +#define SL1TXEMPTY (1 << 7) +#define SL2RXVALID (1 << 8) +#define SL2TXEMPTY (1 << 9) +#define SL12RXVALID (1 << 10) +#define SL12TXEMPTY (1 << 11) +#define RAWGPIOINT (1 << 12) +#define RWIS (1 << 13) + +/* MAINCR */ +#define AACIFE (1 << 0) +#define LOOPBACK (1 << 1) +#define LOWPOWER (1 << 2) +#define SL1RXEN (1 << 3) +#define SL1TXEN (1 << 4) +#define SL2RXEN (1 << 5) +#define SL2TXEN (1 << 6) +#define SL12RXEN (1 << 7) +#define SL12TXEN (1 << 8) +#define DMAENABLE (1 << 9) + +/* INTCLR */ +#define WISC (1 << 0) +#define RXOEC1 (1 << 1) +#define RXOEC2 (1 << 2) +#define RXOEC3 (1 << 3) +#define RXOEC4 (1 << 4) +#define TXUEC1 (1 << 5) +#define TXUEC2 (1 << 6) +#define TXUEC3 (1 << 7) +#define TXUEC4 (1 << 8) +#define RXTOFEC1 (1 << 9) +#define RXTOFEC2 (1 << 10) +#define RXTOFEC3 (1 << 11) +#define RXTOFEC4 (1 << 12) + +#endif /* HW_PL041_H */ diff --git a/hw/audio/pl041.hx b/hw/audio/pl041.hx new file mode 100644 index 000000000..dd7188cbc --- /dev/null +++ b/hw/audio/pl041.hx @@ -0,0 +1,81 @@ +/* + * Arm PrimeCell PL041 Advanced Audio Codec Interface + * + * Copyright (c) 2011 + * Written by Mathieu Sonet - www.elasticsheep.com + * + * This code is licensed under the GPL. + * + * ***************************************************************** + */ + +/* PL041 register file description */ + +REGISTER( rxcr1, 0x00 ) +REGISTER( txcr1, 0x04 ) +REGISTER( sr1, 0x08 ) +REGISTER( isr1, 0x0C ) +REGISTER( ie1, 0x10 ) +REGISTER( rxcr2, 0x14 ) +REGISTER( txcr2, 0x18 ) +REGISTER( sr2, 0x1C ) +REGISTER( isr2, 0x20 ) +REGISTER( ie2, 0x24 ) +REGISTER( rxcr3, 0x28 ) +REGISTER( txcr3, 0x2C ) +REGISTER( sr3, 0x30 ) +REGISTER( isr3, 0x34 ) +REGISTER( ie3, 0x38 ) +REGISTER( rxcr4, 0x3C ) +REGISTER( txcr4, 0x40 ) +REGISTER( sr4, 0x44 ) +REGISTER( isr4, 0x48 ) +REGISTER( ie4, 0x4C ) +REGISTER( sl1rx, 0x50 ) +REGISTER( sl1tx, 0x54 ) +REGISTER( sl2rx, 0x58 ) +REGISTER( sl2tx, 0x5C ) +REGISTER( sl12rx, 0x60 ) +REGISTER( sl12tx, 0x64 ) +REGISTER( slfr, 0x68 ) +REGISTER( slistat, 0x6C ) +REGISTER( slien, 0x70 ) +REGISTER( intclr, 0x74 ) +REGISTER( maincr, 0x78 ) +REGISTER( reset, 0x7C ) +REGISTER( sync, 0x80 ) +REGISTER( allints, 0x84 ) +REGISTER( mainfr, 0x88 ) +REGISTER( unused, 0x8C ) +REGISTER( dr1_0, 0x90 ) +REGISTER( dr1_1, 0x94 ) +REGISTER( dr1_2, 0x98 ) +REGISTER( dr1_3, 0x9C ) +REGISTER( dr1_4, 0xA0 ) +REGISTER( dr1_5, 0xA4 ) +REGISTER( dr1_6, 0xA8 ) +REGISTER( dr1_7, 0xAC ) +REGISTER( dr2_0, 0xB0 ) +REGISTER( dr2_1, 0xB4 ) +REGISTER( dr2_2, 0xB8 ) +REGISTER( dr2_3, 0xBC ) +REGISTER( dr2_4, 0xC0 ) +REGISTER( dr2_5, 0xC4 ) +REGISTER( dr2_6, 0xC8 ) +REGISTER( dr2_7, 0xCC ) +REGISTER( dr3_0, 0xD0 ) +REGISTER( dr3_1, 0xD4 ) +REGISTER( dr3_2, 0xD8 ) +REGISTER( dr3_3, 0xDC ) +REGISTER( dr3_4, 0xE0 ) +REGISTER( dr3_5, 0xE4 ) +REGISTER( dr3_6, 0xE8 ) +REGISTER( dr3_7, 0xEC ) +REGISTER( dr4_0, 0xF0 ) +REGISTER( dr4_1, 0xF4 ) +REGISTER( dr4_2, 0xF8 ) +REGISTER( dr4_3, 0xFC ) +REGISTER( dr4_4, 0x100 ) +REGISTER( dr4_5, 0x104 ) +REGISTER( dr4_6, 0x108 ) +REGISTER( dr4_7, 0x10C ) diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c new file mode 100644 index 000000000..60f1f75e3 --- /dev/null +++ b/hw/audio/sb16.c @@ -0,0 +1,1476 @@ +/* + * QEMU Soundblaster 16 emulation + * + * Copyright (c) 2003-2005 Vassili Karpov (malc) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/audio/soundhw.h" +#include "audio/audio.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/timer.h" +#include "qemu/host-utils.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "qom/object.h" + +#define dolog(...) AUD_log ("sb16", __VA_ARGS__) + +/* #define DEBUG */ +/* #define DEBUG_SB16_MOST */ + +#ifdef DEBUG +#define ldebug(...) dolog (__VA_ARGS__) +#else +#define ldebug(...) +#endif + +static const char e3[] = "COPYRIGHT (C) CREATIVE TECHNOLOGY LTD, 1992."; + +#define TYPE_SB16 "sb16" +OBJECT_DECLARE_SIMPLE_TYPE(SB16State, SB16) + +struct SB16State { + ISADevice parent_obj; + + QEMUSoundCard card; + qemu_irq pic; + uint32_t irq; + uint32_t dma; + uint32_t hdma; + uint32_t port; + uint32_t ver; + IsaDma *isa_dma; + IsaDma *isa_hdma; + + int in_index; + int out_data_len; + int fmt_stereo; + int fmt_signed; + int fmt_bits; + AudioFormat fmt; + int dma_auto; + int block_size; + int fifo; + int freq; + int time_const; + int speaker; + int needed_bytes; + int cmd; + int use_hdma; + int highspeed; + int can_write; + + int v2x6; + + uint8_t csp_param; + uint8_t csp_value; + uint8_t csp_mode; + uint8_t csp_regs[256]; + uint8_t csp_index; + uint8_t csp_reg83[4]; + int csp_reg83r; + int csp_reg83w; + + uint8_t in2_data[10]; + uint8_t out_data[50]; + uint8_t test_reg; + uint8_t last_read_byte; + int nzero; + + int left_till_irq; + + int dma_running; + int bytes_per_second; + int align; + int audio_free; + SWVoiceOut *voice; + + QEMUTimer *aux_ts; + /* mixer state */ + int mixer_nreg; + uint8_t mixer_regs[256]; + PortioList portio_list; +}; + +#define SAMPLE_RATE_MIN 5000 +#define SAMPLE_RATE_MAX 45000 + +static void SB_audio_callback (void *opaque, int free); + +static int magic_of_irq (int irq) +{ + switch (irq) { + case 5: + return 2; + case 7: + return 4; + case 9: + return 1; + case 10: + return 8; + default: + qemu_log_mask(LOG_GUEST_ERROR, "bad irq %d\n", irq); + return 2; + } +} + +static int irq_of_magic (int magic) +{ + switch (magic) { + case 1: + return 9; + case 2: + return 5; + case 4: + return 7; + case 8: + return 10; + default: + qemu_log_mask(LOG_GUEST_ERROR, "bad irq magic %d\n", magic); + return -1; + } +} + +#if 0 +static void log_dsp (SB16State *dsp) +{ + ldebug ("%s:%s:%d:%s:dmasize=%d:freq=%d:const=%d:speaker=%d\n", + dsp->fmt_stereo ? "Stereo" : "Mono", + dsp->fmt_signed ? "Signed" : "Unsigned", + dsp->fmt_bits, + dsp->dma_auto ? "Auto" : "Single", + dsp->block_size, + dsp->freq, + dsp->time_const, + dsp->speaker); +} +#endif + +static void speaker (SB16State *s, int on) +{ + s->speaker = on; + /* AUD_enable (s->voice, on); */ +} + +static void control (SB16State *s, int hold) +{ + int dma = s->use_hdma ? s->hdma : s->dma; + IsaDma *isa_dma = s->use_hdma ? s->isa_hdma : s->isa_dma; + IsaDmaClass *k = ISADMA_GET_CLASS(isa_dma); + s->dma_running = hold; + + ldebug ("hold %d high %d dma %d\n", hold, s->use_hdma, dma); + + if (hold) { + k->hold_DREQ(isa_dma, dma); + AUD_set_active_out (s->voice, 1); + } + else { + k->release_DREQ(isa_dma, dma); + AUD_set_active_out (s->voice, 0); + } +} + +static void aux_timer (void *opaque) +{ + SB16State *s = opaque; + s->can_write = 1; + qemu_irq_raise (s->pic); +} + +#define DMA8_AUTO 1 +#define DMA8_HIGH 2 + +static void continue_dma8 (SB16State *s) +{ + if (s->freq > 0) { + struct audsettings as; + + s->audio_free = 0; + + as.freq = s->freq; + as.nchannels = 1 << s->fmt_stereo; + as.fmt = s->fmt; + as.endianness = 0; + + s->voice = AUD_open_out ( + &s->card, + s->voice, + "sb16", + s, + SB_audio_callback, + &as + ); + } + + control (s, 1); +} + +static inline int restrict_sampling_rate(int freq) +{ + if (freq < SAMPLE_RATE_MIN) { + qemu_log_mask(LOG_GUEST_ERROR, + "sampling range too low: %d, increasing to %u\n", + freq, SAMPLE_RATE_MIN); + return SAMPLE_RATE_MIN; + } else if (freq > SAMPLE_RATE_MAX) { + qemu_log_mask(LOG_GUEST_ERROR, + "sampling range too high: %d, decreasing to %u\n", + freq, SAMPLE_RATE_MAX); + return SAMPLE_RATE_MAX; + } else { + return freq; + } +} + +static void dma_cmd8 (SB16State *s, int mask, int dma_len) +{ + s->fmt = AUDIO_FORMAT_U8; + s->use_hdma = 0; + s->fmt_bits = 8; + s->fmt_signed = 0; + s->fmt_stereo = (s->mixer_regs[0x0e] & 2) != 0; + if (-1 == s->time_const) { + if (s->freq <= 0) + s->freq = 11025; + } + else { + int tmp = (256 - s->time_const); + s->freq = (1000000 + (tmp / 2)) / tmp; + } + s->freq = restrict_sampling_rate(s->freq); + + if (dma_len != -1) { + s->block_size = dma_len << s->fmt_stereo; + } + else { + /* This is apparently the only way to make both Act1/PL + and SecondReality/FC work + + Act1 sets block size via command 0x48 and it's an odd number + SR does the same with even number + Both use stereo, and Creatives own documentation states that + 0x48 sets block size in bytes less one.. go figure */ + s->block_size &= ~s->fmt_stereo; + } + + s->freq >>= s->fmt_stereo; + s->left_till_irq = s->block_size; + s->bytes_per_second = (s->freq << s->fmt_stereo); + /* s->highspeed = (mask & DMA8_HIGH) != 0; */ + s->dma_auto = (mask & DMA8_AUTO) != 0; + s->align = (1 << s->fmt_stereo) - 1; + + if (s->block_size & s->align) { + qemu_log_mask(LOG_GUEST_ERROR, "warning: misaligned block size %d," + " alignment %d\n", s->block_size, s->align + 1); + } + + ldebug ("freq %d, stereo %d, sign %d, bits %d, " + "dma %d, auto %d, fifo %d, high %d\n", + s->freq, s->fmt_stereo, s->fmt_signed, s->fmt_bits, + s->block_size, s->dma_auto, s->fifo, s->highspeed); + + continue_dma8 (s); + speaker (s, 1); +} + +static void dma_cmd (SB16State *s, uint8_t cmd, uint8_t d0, int dma_len) +{ + s->use_hdma = cmd < 0xc0; + s->fifo = (cmd >> 1) & 1; + s->dma_auto = (cmd >> 2) & 1; + s->fmt_signed = (d0 >> 4) & 1; + s->fmt_stereo = (d0 >> 5) & 1; + + switch (cmd >> 4) { + case 11: + s->fmt_bits = 16; + break; + + case 12: + s->fmt_bits = 8; + break; + } + + if (-1 != s->time_const) { +#if 1 + int tmp = 256 - s->time_const; + s->freq = (1000000 + (tmp / 2)) / tmp; +#else + /* s->freq = 1000000 / ((255 - s->time_const) << s->fmt_stereo); */ + s->freq = 1000000 / ((255 - s->time_const)); +#endif + s->time_const = -1; + } + + s->block_size = dma_len + 1; + s->block_size <<= (s->fmt_bits == 16); + if (!s->dma_auto) { + /* It is clear that for DOOM and auto-init this value + shouldn't take stereo into account, while Miles Sound Systems + setsound.exe with single transfer mode wouldn't work without it + wonders of SB16 yet again */ + s->block_size <<= s->fmt_stereo; + } + + ldebug ("freq %d, stereo %d, sign %d, bits %d, " + "dma %d, auto %d, fifo %d, high %d\n", + s->freq, s->fmt_stereo, s->fmt_signed, s->fmt_bits, + s->block_size, s->dma_auto, s->fifo, s->highspeed); + + if (16 == s->fmt_bits) { + if (s->fmt_signed) { + s->fmt = AUDIO_FORMAT_S16; + } + else { + s->fmt = AUDIO_FORMAT_U16; + } + } + else { + if (s->fmt_signed) { + s->fmt = AUDIO_FORMAT_S8; + } + else { + s->fmt = AUDIO_FORMAT_U8; + } + } + + s->left_till_irq = s->block_size; + + s->bytes_per_second = (s->freq << s->fmt_stereo) << (s->fmt_bits == 16); + s->highspeed = 0; + s->align = (1 << (s->fmt_stereo + (s->fmt_bits == 16))) - 1; + if (s->block_size & s->align) { + qemu_log_mask(LOG_GUEST_ERROR, "warning: misaligned block size %d," + " alignment %d\n", s->block_size, s->align + 1); + } + + if (s->freq) { + struct audsettings as; + + s->audio_free = 0; + + as.freq = s->freq; + as.nchannels = 1 << s->fmt_stereo; + as.fmt = s->fmt; + as.endianness = 0; + + s->voice = AUD_open_out ( + &s->card, + s->voice, + "sb16", + s, + SB_audio_callback, + &as + ); + } + + control (s, 1); + speaker (s, 1); +} + +static inline void dsp_out_data (SB16State *s, uint8_t val) +{ + ldebug ("outdata %#x\n", val); + if ((size_t) s->out_data_len < sizeof (s->out_data)) { + s->out_data[s->out_data_len++] = val; + } +} + +static inline uint8_t dsp_get_data (SB16State *s) +{ + if (s->in_index) { + return s->in2_data[--s->in_index]; + } + else { + dolog ("buffer underflow\n"); + return 0; + } +} + +static void command (SB16State *s, uint8_t cmd) +{ + ldebug ("command %#x\n", cmd); + + if (cmd > 0xaf && cmd < 0xd0) { + if (cmd & 8) { + qemu_log_mask(LOG_UNIMP, "ADC not yet supported (command %#x)\n", + cmd); + } + + switch (cmd >> 4) { + case 11: + case 12: + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%#x wrong bits\n", cmd); + } + s->needed_bytes = 3; + } + else { + s->needed_bytes = 0; + + switch (cmd) { + case 0x03: + dsp_out_data (s, 0x10); /* s->csp_param); */ + goto warn; + + case 0x04: + s->needed_bytes = 1; + goto warn; + + case 0x05: + s->needed_bytes = 2; + goto warn; + + case 0x08: + /* __asm__ ("int3"); */ + goto warn; + + case 0x0e: + s->needed_bytes = 2; + goto warn; + + case 0x09: + dsp_out_data (s, 0xf8); + goto warn; + + case 0x0f: + s->needed_bytes = 1; + goto warn; + + case 0x10: + s->needed_bytes = 1; + goto warn; + + case 0x14: + s->needed_bytes = 2; + s->block_size = 0; + break; + + case 0x1c: /* Auto-Initialize DMA DAC, 8-bit */ + dma_cmd8 (s, DMA8_AUTO, -1); + break; + + case 0x20: /* Direct ADC, Juice/PL */ + dsp_out_data (s, 0xff); + goto warn; + + case 0x35: + qemu_log_mask(LOG_UNIMP, "0x35 - MIDI command not implemented\n"); + break; + + case 0x40: + s->freq = -1; + s->time_const = -1; + s->needed_bytes = 1; + break; + + case 0x41: + s->freq = -1; + s->time_const = -1; + s->needed_bytes = 2; + break; + + case 0x42: + s->freq = -1; + s->time_const = -1; + s->needed_bytes = 2; + goto warn; + + case 0x45: + dsp_out_data (s, 0xaa); + goto warn; + + case 0x47: /* Continue Auto-Initialize DMA 16bit */ + break; + + case 0x48: + s->needed_bytes = 2; + break; + + case 0x74: + s->needed_bytes = 2; /* DMA DAC, 4-bit ADPCM */ + qemu_log_mask(LOG_UNIMP, "0x75 - DMA DAC, 4-bit ADPCM not" + " implemented\n"); + break; + + case 0x75: /* DMA DAC, 4-bit ADPCM Reference */ + s->needed_bytes = 2; + qemu_log_mask(LOG_UNIMP, "0x74 - DMA DAC, 4-bit ADPCM Reference not" + " implemented\n"); + break; + + case 0x76: /* DMA DAC, 2.6-bit ADPCM */ + s->needed_bytes = 2; + qemu_log_mask(LOG_UNIMP, "0x74 - DMA DAC, 2.6-bit ADPCM not" + " implemented\n"); + break; + + case 0x77: /* DMA DAC, 2.6-bit ADPCM Reference */ + s->needed_bytes = 2; + qemu_log_mask(LOG_UNIMP, "0x74 - DMA DAC, 2.6-bit ADPCM Reference" + " not implemented\n"); + break; + + case 0x7d: + qemu_log_mask(LOG_UNIMP, "0x7d - Autio-Initialize DMA DAC, 4-bit" + " ADPCM Reference\n"); + qemu_log_mask(LOG_UNIMP, "not implemented\n"); + break; + + case 0x7f: + qemu_log_mask(LOG_UNIMP, "0x7d - Autio-Initialize DMA DAC, 2.6-bit" + " ADPCM Reference\n"); + qemu_log_mask(LOG_UNIMP, "not implemented\n"); + break; + + case 0x80: + s->needed_bytes = 2; + break; + + case 0x90: + case 0x91: + dma_cmd8 (s, ((cmd & 1) == 0) | DMA8_HIGH, -1); + break; + + case 0xd0: /* halt DMA operation. 8bit */ + control (s, 0); + break; + + case 0xd1: /* speaker on */ + speaker (s, 1); + break; + + case 0xd3: /* speaker off */ + speaker (s, 0); + break; + + case 0xd4: /* continue DMA operation. 8bit */ + /* KQ6 (or maybe Sierras audblst.drv in general) resets + the frequency between halt/continue */ + continue_dma8 (s); + break; + + case 0xd5: /* halt DMA operation. 16bit */ + control (s, 0); + break; + + case 0xd6: /* continue DMA operation. 16bit */ + control (s, 1); + break; + + case 0xd9: /* exit auto-init DMA after this block. 16bit */ + s->dma_auto = 0; + break; + + case 0xda: /* exit auto-init DMA after this block. 8bit */ + s->dma_auto = 0; + break; + + case 0xe0: /* DSP identification */ + s->needed_bytes = 1; + break; + + case 0xe1: + dsp_out_data (s, s->ver & 0xff); + dsp_out_data (s, s->ver >> 8); + break; + + case 0xe2: + s->needed_bytes = 1; + goto warn; + + case 0xe3: + { + int i; + for (i = sizeof (e3) - 1; i >= 0; --i) + dsp_out_data (s, e3[i]); + } + break; + + case 0xe4: /* write test reg */ + s->needed_bytes = 1; + break; + + case 0xe7: + qemu_log_mask(LOG_UNIMP, "Attempt to probe for ESS (0xe7)?\n"); + break; + + case 0xe8: /* read test reg */ + dsp_out_data (s, s->test_reg); + break; + + case 0xf2: + case 0xf3: + dsp_out_data (s, 0xaa); + s->mixer_regs[0x82] |= (cmd == 0xf2) ? 1 : 2; + qemu_irq_raise (s->pic); + break; + + case 0xf9: + s->needed_bytes = 1; + goto warn; + + case 0xfa: + dsp_out_data (s, 0); + goto warn; + + case 0xfc: /* FIXME */ + dsp_out_data (s, 0); + goto warn; + + default: + qemu_log_mask(LOG_UNIMP, "Unrecognized command %#x\n", cmd); + break; + } + } + + if (!s->needed_bytes) { + ldebug ("\n"); + } + + exit: + if (!s->needed_bytes) { + s->cmd = -1; + } + else { + s->cmd = cmd; + } + return; + + warn: + qemu_log_mask(LOG_UNIMP, "warning: command %#x,%d is not truly understood" + " yet\n", cmd, s->needed_bytes); + goto exit; + +} + +static uint16_t dsp_get_lohi (SB16State *s) +{ + uint8_t hi = dsp_get_data (s); + uint8_t lo = dsp_get_data (s); + return (hi << 8) | lo; +} + +static uint16_t dsp_get_hilo (SB16State *s) +{ + uint8_t lo = dsp_get_data (s); + uint8_t hi = dsp_get_data (s); + return (hi << 8) | lo; +} + +static void complete (SB16State *s) +{ + int d0, d1, d2; + ldebug ("complete command %#x, in_index %d, needed_bytes %d\n", + s->cmd, s->in_index, s->needed_bytes); + + if (s->cmd > 0xaf && s->cmd < 0xd0) { + d2 = dsp_get_data (s); + d1 = dsp_get_data (s); + d0 = dsp_get_data (s); + + if (s->cmd & 8) { + dolog ("ADC params cmd = %#x d0 = %d, d1 = %d, d2 = %d\n", + s->cmd, d0, d1, d2); + } + else { + ldebug ("cmd = %#x d0 = %d, d1 = %d, d2 = %d\n", + s->cmd, d0, d1, d2); + dma_cmd (s, s->cmd, d0, d1 + (d2 << 8)); + } + } + else { + switch (s->cmd) { + case 0x04: + s->csp_mode = dsp_get_data (s); + s->csp_reg83r = 0; + s->csp_reg83w = 0; + ldebug ("CSP command 0x04: mode=%#x\n", s->csp_mode); + break; + + case 0x05: + s->csp_param = dsp_get_data (s); + s->csp_value = dsp_get_data (s); + ldebug ("CSP command 0x05: param=%#x value=%#x\n", + s->csp_param, + s->csp_value); + break; + + case 0x0e: + d0 = dsp_get_data (s); + d1 = dsp_get_data (s); + ldebug ("write CSP register %d <- %#x\n", d1, d0); + if (d1 == 0x83) { + ldebug ("0x83[%d] <- %#x\n", s->csp_reg83r, d0); + s->csp_reg83[s->csp_reg83r % 4] = d0; + s->csp_reg83r += 1; + } + else { + s->csp_regs[d1] = d0; + } + break; + + case 0x0f: + d0 = dsp_get_data (s); + ldebug ("read CSP register %#x -> %#x, mode=%#x\n", + d0, s->csp_regs[d0], s->csp_mode); + if (d0 == 0x83) { + ldebug ("0x83[%d] -> %#x\n", + s->csp_reg83w, + s->csp_reg83[s->csp_reg83w % 4]); + dsp_out_data (s, s->csp_reg83[s->csp_reg83w % 4]); + s->csp_reg83w += 1; + } + else { + dsp_out_data (s, s->csp_regs[d0]); + } + break; + + case 0x10: + d0 = dsp_get_data (s); + dolog ("cmd 0x10 d0=%#x\n", d0); + break; + + case 0x14: + dma_cmd8 (s, 0, dsp_get_lohi (s) + 1); + break; + + case 0x40: + s->time_const = dsp_get_data (s); + ldebug ("set time const %d\n", s->time_const); + break; + + case 0x41: + case 0x42: + /* + * 0x41 is documented as setting the output sample rate, + * and 0x42 the input sample rate, but in fact SB16 hardware + * seems to have only a single sample rate under the hood, + * and FT2 sets output freq with this (go figure). Compare: + * http://homepages.cae.wisc.edu/~brodskye/sb16doc/sb16doc.html#SamplingRate + */ + s->freq = restrict_sampling_rate(dsp_get_hilo(s)); + ldebug ("set freq %d\n", s->freq); + break; + + case 0x48: + s->block_size = dsp_get_lohi (s) + 1; + ldebug ("set dma block len %d\n", s->block_size); + break; + + case 0x74: + case 0x75: + case 0x76: + case 0x77: + /* ADPCM stuff, ignore */ + break; + + case 0x80: + { + int freq, samples, bytes; + int64_t ticks; + + freq = s->freq > 0 ? s->freq : 11025; + samples = dsp_get_lohi (s) + 1; + bytes = samples << s->fmt_stereo << (s->fmt_bits == 16); + ticks = muldiv64(bytes, NANOSECONDS_PER_SECOND, freq); + if (ticks < NANOSECONDS_PER_SECOND / 1024) { + qemu_irq_raise (s->pic); + } + else { + if (s->aux_ts) { + timer_mod ( + s->aux_ts, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ticks + ); + } + } + ldebug ("mix silence %d %d %" PRId64 "\n", samples, bytes, ticks); + } + break; + + case 0xe0: + d0 = dsp_get_data (s); + s->out_data_len = 0; + ldebug ("E0 data = %#x\n", d0); + dsp_out_data (s, ~d0); + break; + + case 0xe2: +#ifdef DEBUG + d0 = dsp_get_data (s); + dolog ("E2 = %#x\n", d0); +#endif + break; + + case 0xe4: + s->test_reg = dsp_get_data (s); + break; + + case 0xf9: + d0 = dsp_get_data (s); + ldebug ("command 0xf9 with %#x\n", d0); + switch (d0) { + case 0x0e: + dsp_out_data (s, 0xff); + break; + + case 0x0f: + dsp_out_data (s, 0x07); + break; + + case 0x37: + dsp_out_data (s, 0x38); + break; + + default: + dsp_out_data (s, 0x00); + break; + } + break; + + default: + qemu_log_mask(LOG_UNIMP, "complete: unrecognized command %#x\n", + s->cmd); + return; + } + } + + ldebug ("\n"); + s->cmd = -1; +} + +static void legacy_reset (SB16State *s) +{ + struct audsettings as; + + s->freq = 11025; + s->fmt_signed = 0; + s->fmt_bits = 8; + s->fmt_stereo = 0; + + as.freq = s->freq; + as.nchannels = 1; + as.fmt = AUDIO_FORMAT_U8; + as.endianness = 0; + + s->voice = AUD_open_out ( + &s->card, + s->voice, + "sb16", + s, + SB_audio_callback, + &as + ); + + /* Not sure about that... */ + /* AUD_set_active_out (s->voice, 1); */ +} + +static void reset (SB16State *s) +{ + qemu_irq_lower (s->pic); + if (s->dma_auto) { + qemu_irq_raise (s->pic); + qemu_irq_lower (s->pic); + } + + s->mixer_regs[0x82] = 0; + s->dma_auto = 0; + s->in_index = 0; + s->out_data_len = 0; + s->left_till_irq = 0; + s->needed_bytes = 0; + s->block_size = -1; + s->nzero = 0; + s->highspeed = 0; + s->v2x6 = 0; + s->cmd = -1; + + dsp_out_data (s, 0xaa); + speaker (s, 0); + control (s, 0); + legacy_reset (s); +} + +static void dsp_write(void *opaque, uint32_t nport, uint32_t val) +{ + SB16State *s = opaque; + int iport; + + iport = nport - s->port; + + ldebug ("write %#x <- %#x\n", nport, val); + switch (iport) { + case 0x06: + switch (val) { + case 0x00: + if (s->v2x6 == 1) { + reset (s); + } + s->v2x6 = 0; + break; + + case 0x01: + case 0x03: /* FreeBSD kludge */ + s->v2x6 = 1; + break; + + case 0xc6: + s->v2x6 = 0; /* Prince of Persia, csp.sys, diagnose.exe */ + break; + + case 0xb8: /* Panic */ + reset (s); + break; + + case 0x39: + dsp_out_data (s, 0x38); + reset (s); + s->v2x6 = 0x39; + break; + + default: + s->v2x6 = val; + break; + } + break; + + case 0x0c: /* write data or command | write status */ +/* if (s->highspeed) */ +/* break; */ + + if (s->needed_bytes == 0) { + command (s, val); +#if 0 + if (0 == s->needed_bytes) { + log_dsp (s); + } +#endif + } + else { + if (s->in_index == sizeof (s->in2_data)) { + dolog ("in data overrun\n"); + } + else { + s->in2_data[s->in_index++] = val; + if (s->in_index == s->needed_bytes) { + s->needed_bytes = 0; + complete (s); +#if 0 + log_dsp (s); +#endif + } + } + } + break; + + default: + ldebug ("(nport=%#x, val=%#x)\n", nport, val); + break; + } +} + +static uint32_t dsp_read(void *opaque, uint32_t nport) +{ + SB16State *s = opaque; + int iport, retval, ack = 0; + + iport = nport - s->port; + + switch (iport) { + case 0x06: /* reset */ + retval = 0xff; + break; + + case 0x0a: /* read data */ + if (s->out_data_len) { + retval = s->out_data[--s->out_data_len]; + s->last_read_byte = retval; + } + else { + if (s->cmd != -1) { + dolog ("empty output buffer for command %#x\n", + s->cmd); + } + retval = s->last_read_byte; + /* goto error; */ + } + break; + + case 0x0c: /* 0 can write */ + retval = s->can_write ? 0 : 0x80; + break; + + case 0x0d: /* timer interrupt clear */ + /* dolog ("timer interrupt clear\n"); */ + retval = 0; + break; + + case 0x0e: /* data available status | irq 8 ack */ + retval = (!s->out_data_len || s->highspeed) ? 0 : 0x80; + if (s->mixer_regs[0x82] & 1) { + ack = 1; + s->mixer_regs[0x82] &= ~1; + qemu_irq_lower (s->pic); + } + break; + + case 0x0f: /* irq 16 ack */ + retval = 0xff; + if (s->mixer_regs[0x82] & 2) { + ack = 1; + s->mixer_regs[0x82] &= ~2; + qemu_irq_lower (s->pic); + } + break; + + default: + goto error; + } + + if (!ack) { + ldebug ("read %#x -> %#x\n", nport, retval); + } + + return retval; + + error: + dolog ("warning: dsp_read %#x error\n", nport); + return 0xff; +} + +static void reset_mixer (SB16State *s) +{ + int i; + + memset (s->mixer_regs, 0xff, 0x7f); + memset (s->mixer_regs + 0x83, 0xff, sizeof (s->mixer_regs) - 0x83); + + s->mixer_regs[0x02] = 4; /* master volume 3bits */ + s->mixer_regs[0x06] = 4; /* MIDI volume 3bits */ + s->mixer_regs[0x08] = 0; /* CD volume 3bits */ + s->mixer_regs[0x0a] = 0; /* voice volume 2bits */ + + /* d5=input filt, d3=lowpass filt, d1,d2=input source */ + s->mixer_regs[0x0c] = 0; + + /* d5=output filt, d1=stereo switch */ + s->mixer_regs[0x0e] = 0; + + /* voice volume L d5,d7, R d1,d3 */ + s->mixer_regs[0x04] = (4 << 5) | (4 << 1); + /* master ... */ + s->mixer_regs[0x22] = (4 << 5) | (4 << 1); + /* MIDI ... */ + s->mixer_regs[0x26] = (4 << 5) | (4 << 1); + + for (i = 0x30; i < 0x48; i++) { + s->mixer_regs[i] = 0x20; + } +} + +static void mixer_write_indexb(void *opaque, uint32_t nport, uint32_t val) +{ + SB16State *s = opaque; + (void) nport; + s->mixer_nreg = val; +} + +static void mixer_write_datab(void *opaque, uint32_t nport, uint32_t val) +{ + SB16State *s = opaque; + + (void) nport; + ldebug ("mixer_write [%#x] <- %#x\n", s->mixer_nreg, val); + + switch (s->mixer_nreg) { + case 0x00: + reset_mixer (s); + break; + + case 0x80: + { + int irq = irq_of_magic (val); + ldebug ("setting irq to %d (val=%#x)\n", irq, val); + if (irq > 0) { + s->irq = irq; + } + } + break; + + case 0x81: + { + int dma, hdma; + + dma = ctz32 (val & 0xf); + hdma = ctz32 (val & 0xf0); + if (dma != s->dma || hdma != s->hdma) { + qemu_log_mask(LOG_GUEST_ERROR, "attempt to change DMA 8bit" + " %d(%d), 16bit %d(%d) (val=%#x)\n", dma, s->dma, + hdma, s->hdma, val); + } +#if 0 + s->dma = dma; + s->hdma = hdma; +#endif + } + break; + + case 0x82: + qemu_log_mask(LOG_GUEST_ERROR, "attempt to write into IRQ status" + " register (val=%#x)\n", val); + return; + + default: + if (s->mixer_nreg >= 0x80) { + ldebug ("attempt to write mixer[%#x] <- %#x\n", s->mixer_nreg, val); + } + break; + } + + s->mixer_regs[s->mixer_nreg] = val; +} + +static uint32_t mixer_read(void *opaque, uint32_t nport) +{ + SB16State *s = opaque; + + (void) nport; +#ifndef DEBUG_SB16_MOST + if (s->mixer_nreg != 0x82) { + ldebug ("mixer_read[%#x] -> %#x\n", + s->mixer_nreg, s->mixer_regs[s->mixer_nreg]); + } +#else + ldebug ("mixer_read[%#x] -> %#x\n", + s->mixer_nreg, s->mixer_regs[s->mixer_nreg]); +#endif + return s->mixer_regs[s->mixer_nreg]; +} + +static int write_audio (SB16State *s, int nchan, int dma_pos, + int dma_len, int len) +{ + IsaDma *isa_dma = nchan == s->dma ? s->isa_dma : s->isa_hdma; + IsaDmaClass *k = ISADMA_GET_CLASS(isa_dma); + int temp, net; + uint8_t tmpbuf[4096]; + + temp = len; + net = 0; + + while (temp) { + int left = dma_len - dma_pos; + int copied; + size_t to_copy; + + to_copy = MIN (temp, left); + if (to_copy > sizeof (tmpbuf)) { + to_copy = sizeof (tmpbuf); + } + + copied = k->read_memory(isa_dma, nchan, tmpbuf, dma_pos, to_copy); + copied = AUD_write (s->voice, tmpbuf, copied); + + temp -= copied; + dma_pos = (dma_pos + copied) % dma_len; + net += copied; + + if (!copied) { + break; + } + } + + return net; +} + +static int SB_read_DMA (void *opaque, int nchan, int dma_pos, int dma_len) +{ + SB16State *s = opaque; + int till, copy, written, free; + + if (s->block_size <= 0) { + qemu_log_mask(LOG_GUEST_ERROR, "invalid block size=%d nchan=%d" + " dma_pos=%d dma_len=%d\n", s->block_size, nchan, + dma_pos, dma_len); + return dma_pos; + } + + if (s->left_till_irq < 0) { + s->left_till_irq = s->block_size; + } + + if (s->voice) { + free = s->audio_free & ~s->align; + if ((free <= 0) || !dma_len) { + return dma_pos; + } + } + else { + free = dma_len; + } + + copy = free; + till = s->left_till_irq; + +#ifdef DEBUG_SB16_MOST + dolog ("pos:%06d %d till:%d len:%d\n", + dma_pos, free, till, dma_len); +#endif + + if (till <= copy) { + if (s->dma_auto == 0) { + copy = till; + } + } + + written = write_audio (s, nchan, dma_pos, dma_len, copy); + dma_pos = (dma_pos + written) % dma_len; + s->left_till_irq -= written; + + if (s->left_till_irq <= 0) { + s->mixer_regs[0x82] |= (nchan & 4) ? 2 : 1; + qemu_irq_raise (s->pic); + if (s->dma_auto == 0) { + control (s, 0); + speaker (s, 0); + } + } + +#ifdef DEBUG_SB16_MOST + ldebug ("pos %5d free %5d size %5d till % 5d copy %5d written %5d size %5d\n", + dma_pos, free, dma_len, s->left_till_irq, copy, written, + s->block_size); +#endif + + while (s->left_till_irq <= 0) { + s->left_till_irq = s->block_size + s->left_till_irq; + } + + return dma_pos; +} + +static void SB_audio_callback (void *opaque, int free) +{ + SB16State *s = opaque; + s->audio_free = free; +} + +static int sb16_post_load (void *opaque, int version_id) +{ + SB16State *s = opaque; + + if (s->voice) { + AUD_close_out (&s->card, s->voice); + s->voice = NULL; + } + + if (s->dma_running) { + if (s->freq) { + struct audsettings as; + + s->audio_free = 0; + + as.freq = s->freq; + as.nchannels = 1 << s->fmt_stereo; + as.fmt = s->fmt; + as.endianness = 0; + + s->voice = AUD_open_out ( + &s->card, + s->voice, + "sb16", + s, + SB_audio_callback, + &as + ); + } + + control (s, 1); + speaker (s, s->speaker); + } + return 0; +} + +static const VMStateDescription vmstate_sb16 = { + .name = "sb16", + .version_id = 1, + .minimum_version_id = 1, + .post_load = sb16_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32 (irq, SB16State), + VMSTATE_UINT32 (dma, SB16State), + VMSTATE_UINT32 (hdma, SB16State), + VMSTATE_UINT32 (port, SB16State), + VMSTATE_UINT32 (ver, SB16State), + VMSTATE_INT32 (in_index, SB16State), + VMSTATE_INT32 (out_data_len, SB16State), + VMSTATE_INT32 (fmt_stereo, SB16State), + VMSTATE_INT32 (fmt_signed, SB16State), + VMSTATE_INT32 (fmt_bits, SB16State), + VMSTATE_UINT32 (fmt, SB16State), + VMSTATE_INT32 (dma_auto, SB16State), + VMSTATE_INT32 (block_size, SB16State), + VMSTATE_INT32 (fifo, SB16State), + VMSTATE_INT32 (freq, SB16State), + VMSTATE_INT32 (time_const, SB16State), + VMSTATE_INT32 (speaker, SB16State), + VMSTATE_INT32 (needed_bytes, SB16State), + VMSTATE_INT32 (cmd, SB16State), + VMSTATE_INT32 (use_hdma, SB16State), + VMSTATE_INT32 (highspeed, SB16State), + VMSTATE_INT32 (can_write, SB16State), + VMSTATE_INT32 (v2x6, SB16State), + + VMSTATE_UINT8 (csp_param, SB16State), + VMSTATE_UINT8 (csp_value, SB16State), + VMSTATE_UINT8 (csp_mode, SB16State), + VMSTATE_UINT8 (csp_param, SB16State), + VMSTATE_BUFFER (csp_regs, SB16State), + VMSTATE_UINT8 (csp_index, SB16State), + VMSTATE_BUFFER (csp_reg83, SB16State), + VMSTATE_INT32 (csp_reg83r, SB16State), + VMSTATE_INT32 (csp_reg83w, SB16State), + + VMSTATE_BUFFER (in2_data, SB16State), + VMSTATE_BUFFER (out_data, SB16State), + VMSTATE_UINT8 (test_reg, SB16State), + VMSTATE_UINT8 (last_read_byte, SB16State), + + VMSTATE_INT32 (nzero, SB16State), + VMSTATE_INT32 (left_till_irq, SB16State), + VMSTATE_INT32 (dma_running, SB16State), + VMSTATE_INT32 (bytes_per_second, SB16State), + VMSTATE_INT32 (align, SB16State), + + VMSTATE_INT32 (mixer_nreg, SB16State), + VMSTATE_BUFFER (mixer_regs, SB16State), + + VMSTATE_END_OF_LIST () + } +}; + +static const MemoryRegionPortio sb16_ioport_list[] = { + { 4, 1, 1, .write = mixer_write_indexb }, + { 5, 1, 1, .read = mixer_read, .write = mixer_write_datab }, + { 6, 1, 1, .read = dsp_read, .write = dsp_write }, + { 10, 1, 1, .read = dsp_read }, + { 12, 1, 1, .write = dsp_write }, + { 12, 4, 1, .read = dsp_read }, + PORTIO_END_OF_LIST (), +}; + + +static void sb16_initfn (Object *obj) +{ + SB16State *s = SB16 (obj); + + s->cmd = -1; +} + +static void sb16_realizefn (DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE (dev); + SB16State *s = SB16 (dev); + IsaDmaClass *k; + + s->isa_hdma = isa_get_dma(isa_bus_from_device(isadev), s->hdma); + s->isa_dma = isa_get_dma(isa_bus_from_device(isadev), s->dma); + if (!s->isa_dma || !s->isa_hdma) { + error_setg(errp, "ISA controller does not support DMA"); + return; + } + + isa_init_irq (isadev, &s->pic, s->irq); + + s->mixer_regs[0x80] = magic_of_irq (s->irq); + s->mixer_regs[0x81] = (1 << s->dma) | (1 << s->hdma); + s->mixer_regs[0x82] = 2 << 5; + + s->csp_regs[5] = 1; + s->csp_regs[9] = 0xf8; + + reset_mixer (s); + s->aux_ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, aux_timer, s); + if (!s->aux_ts) { + error_setg(errp, "warning: Could not create auxiliary timer"); + } + + isa_register_portio_list(isadev, &s->portio_list, s->port, + sb16_ioport_list, s, "sb16"); + + k = ISADMA_GET_CLASS(s->isa_hdma); + k->register_channel(s->isa_hdma, s->hdma, SB_read_DMA, s); + + k = ISADMA_GET_CLASS(s->isa_dma); + k->register_channel(s->isa_dma, s->dma, SB_read_DMA, s); + + s->can_write = 1; + + AUD_register_card ("sb16", &s->card); +} + +static Property sb16_properties[] = { + DEFINE_AUDIO_PROPERTIES(SB16State, card), + DEFINE_PROP_UINT32 ("version", SB16State, ver, 0x0405), /* 4.5 */ + DEFINE_PROP_UINT32 ("iobase", SB16State, port, 0x220), + DEFINE_PROP_UINT32 ("irq", SB16State, irq, 5), + DEFINE_PROP_UINT32 ("dma", SB16State, dma, 1), + DEFINE_PROP_UINT32 ("dma16", SB16State, hdma, 5), + DEFINE_PROP_END_OF_LIST (), +}; + +static void sb16_class_initfn (ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS (klass); + + dc->realize = sb16_realizefn; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->desc = "Creative Sound Blaster 16"; + dc->vmsd = &vmstate_sb16; + device_class_set_props(dc, sb16_properties); +} + +static const TypeInfo sb16_info = { + .name = TYPE_SB16, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof (SB16State), + .instance_init = sb16_initfn, + .class_init = sb16_class_initfn, +}; + +static void sb16_register_types (void) +{ + type_register_static (&sb16_info); + deprecated_register_soundhw("sb16", "Creative Sound Blaster 16", + 1, TYPE_SB16); +} + +type_init (sb16_register_types) diff --git a/hw/audio/soundhw.c b/hw/audio/soundhw.c new file mode 100644 index 000000000..173b674ff --- /dev/null +++ b/hw/audio/soundhw.c @@ -0,0 +1,177 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu/option.h" +#include "qemu/help_option.h" +#include "qemu/error-report.h" +#include "qom/object.h" +#include "hw/isa/isa.h" +#include "hw/pci/pci.h" +#include "hw/audio/soundhw.h" + +struct soundhw { + const char *name; + const char *descr; + const char *typename; + int enabled; + int isa; + union { + int (*init_isa) (ISABus *bus); + int (*init_pci) (PCIBus *bus); + } init; +}; + +static struct soundhw soundhw[9]; +static int soundhw_count; + +void isa_register_soundhw(const char *name, const char *descr, + int (*init_isa)(ISABus *bus)) +{ + assert(soundhw_count < ARRAY_SIZE(soundhw) - 1); + soundhw[soundhw_count].name = name; + soundhw[soundhw_count].descr = descr; + soundhw[soundhw_count].isa = 1; + soundhw[soundhw_count].init.init_isa = init_isa; + soundhw_count++; +} + +void pci_register_soundhw(const char *name, const char *descr, + int (*init_pci)(PCIBus *bus)) +{ + assert(soundhw_count < ARRAY_SIZE(soundhw) - 1); + soundhw[soundhw_count].name = name; + soundhw[soundhw_count].descr = descr; + soundhw[soundhw_count].isa = 0; + soundhw[soundhw_count].init.init_pci = init_pci; + soundhw_count++; +} + +void deprecated_register_soundhw(const char *name, const char *descr, + int isa, const char *typename) +{ + assert(soundhw_count < ARRAY_SIZE(soundhw) - 1); + soundhw[soundhw_count].name = name; + soundhw[soundhw_count].descr = descr; + soundhw[soundhw_count].isa = isa; + soundhw[soundhw_count].typename = typename; + soundhw_count++; +} + +void select_soundhw(const char *optarg) +{ + struct soundhw *c; + + if (is_help_option(optarg)) { + show_valid_cards: + + if (soundhw_count) { + printf("Valid sound card names (comma separated):\n"); + for (c = soundhw; c->name; ++c) { + printf ("%-11s %s\n", c->name, c->descr); + } + printf("\n-soundhw all will enable all of the above\n"); + } else { + printf("Machine has no user-selectable audio hardware " + "(it may or may not have always-present audio hardware).\n"); + } + exit(!is_help_option(optarg)); + } + else { + size_t l; + const char *p; + char *e; + int bad_card = 0; + + if (!strcmp(optarg, "all")) { + for (c = soundhw; c->name; ++c) { + c->enabled = 1; + } + return; + } + + p = optarg; + while (*p) { + e = strchr(p, ','); + l = !e ? strlen(p) : (size_t) (e - p); + + for (c = soundhw; c->name; ++c) { + if (!strncmp(c->name, p, l) && !c->name[l]) { + c->enabled = 1; + break; + } + } + + if (!c->name) { + if (l > 80) { + error_report("Unknown sound card name (too big to show)"); + } + else { + error_report("Unknown sound card name `%.*s'", + (int) l, p); + } + bad_card = 1; + } + p += l + (e != NULL); + } + + if (bad_card) { + goto show_valid_cards; + } + } +} + +void soundhw_init(void) +{ + struct soundhw *c; + ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL); + PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL); + + for (c = soundhw; c->name; ++c) { + if (c->enabled) { + if (c->typename) { + warn_report("'-soundhw %s' is deprecated, " + "please use '-device %s' instead", + c->name, c->typename); + if (c->isa) { + isa_create_simple(isa_bus, c->typename); + } else { + pci_create_simple(pci_bus, -1, c->typename); + } + } else if (c->isa) { + if (!isa_bus) { + error_report("ISA bus not available for %s", c->name); + exit(1); + } + c->init.init_isa(isa_bus); + } else { + if (!pci_bus) { + error_report("PCI bus not available for %s", c->name); + exit(1); + } + c->init.init_pci(pci_bus); + } + } + } +} + diff --git a/hw/audio/trace-events b/hw/audio/trace-events new file mode 100644 index 000000000..e0e71cd9b --- /dev/null +++ b/hw/audio/trace-events @@ -0,0 +1,13 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# cs4231.c +cs4231_mem_readl_dreg(uint32_t reg, uint32_t ret) "read dreg %d: 0x%02x" +cs4231_mem_readl_reg(uint32_t reg, uint32_t ret) "read reg %d: 0x%08x" +cs4231_mem_writel_reg(uint32_t reg, uint32_t old, uint32_t val) "write reg %d: 0x%08x -> 0x%08x" +cs4231_mem_writel_dreg(uint32_t reg, uint32_t old, uint32_t val) "write dreg %d: 0x%02x -> 0x%02x" + +# hda-codec.c +hda_audio_running(const char *stream, int nr, bool running) "st %s, nr %d, run %d" +hda_audio_format(const char *stream, int chan, const char *fmt, int freq) "st %s, %d x %s @ %d Hz" +hda_audio_adjust(const char *stream, int pos) "st %s, pos %d" +hda_audio_overrun(const char *stream) "st %s" diff --git a/hw/audio/trace.h b/hw/audio/trace.h new file mode 100644 index 000000000..5c7516a4d --- /dev/null +++ b/hw/audio/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_audio.h" diff --git a/hw/audio/via-ac97.c b/hw/audio/via-ac97.c new file mode 100644 index 000000000..6d556f74f --- /dev/null +++ b/hw/audio/via-ac97.c @@ -0,0 +1,93 @@ +/* + * VIA south bridges sound support + * + * This work is licensed under the GNU GPL license version 2 or later. + */ + +/* + * TODO: This is entirely boiler plate just registering empty PCI devices + * with the right ID guests expect, functionality should be added here. + */ + +#include "qemu/osdep.h" +#include "hw/isa/vt82c686.h" +#include "hw/pci/pci.h" + +static void via_ac97_realize(PCIDevice *pci_dev, Error **errp) +{ + pci_set_word(pci_dev->config + PCI_COMMAND, + PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY); + pci_set_word(pci_dev->config + PCI_STATUS, + PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_MEDIUM); + pci_set_long(pci_dev->config + PCI_INTERRUPT_PIN, 0x03); +} + +static void via_ac97_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = via_ac97_realize; + k->vendor_id = PCI_VENDOR_ID_VIA; + k->device_id = PCI_DEVICE_ID_VIA_AC97; + k->revision = 0x50; + k->class_id = PCI_CLASS_MULTIMEDIA_AUDIO; + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + dc->desc = "VIA AC97"; + /* Reason: Part of a south bridge chip */ + dc->user_creatable = false; +} + +static const TypeInfo via_ac97_info = { + .name = TYPE_VIA_AC97, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = via_ac97_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void via_mc97_realize(PCIDevice *pci_dev, Error **errp) +{ + pci_set_word(pci_dev->config + PCI_COMMAND, + PCI_COMMAND_INVALIDATE | PCI_COMMAND_VGA_PALETTE); + pci_set_word(pci_dev->config + PCI_STATUS, PCI_STATUS_DEVSEL_MEDIUM); + pci_set_long(pci_dev->config + PCI_INTERRUPT_PIN, 0x03); +} + +static void via_mc97_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = via_mc97_realize; + k->vendor_id = PCI_VENDOR_ID_VIA; + k->device_id = PCI_DEVICE_ID_VIA_MC97; + k->class_id = PCI_CLASS_COMMUNICATION_OTHER; + k->revision = 0x30; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->desc = "VIA MC97"; + /* Reason: Part of a south bridge chip */ + dc->user_creatable = false; +} + +static const TypeInfo via_mc97_info = { + .name = TYPE_VIA_MC97, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = via_mc97_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void via_ac97_register_types(void) +{ + type_register_static(&via_ac97_info); + type_register_static(&via_mc97_info); +} + +type_init(via_ac97_register_types) diff --git a/hw/audio/wm8750.c b/hw/audio/wm8750.c new file mode 100644 index 000000000..b5722b37c --- /dev/null +++ b/hw/audio/wm8750.c @@ -0,0 +1,736 @@ +/* + * WM8750 audio CODEC. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This file is licensed under GNU GPL. + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "hw/audio/wm8750.h" +#include "audio/audio.h" +#include "qom/object.h" + +#define IN_PORT_N 3 +#define OUT_PORT_N 3 + +#define CODEC "wm8750" + +typedef struct { + int adc; + int adc_hz; + int dac; + int dac_hz; +} WMRate; + +OBJECT_DECLARE_SIMPLE_TYPE(WM8750State, WM8750) + +struct WM8750State { + I2CSlave parent_obj; + + uint8_t i2c_data[2]; + int i2c_len; + QEMUSoundCard card; + SWVoiceIn *adc_voice[IN_PORT_N]; + SWVoiceOut *dac_voice[OUT_PORT_N]; + int enable; + void (*data_req)(void *, int, int); + void *opaque; + uint8_t data_in[4096]; + uint8_t data_out[4096]; + int idx_in, req_in; + int idx_out, req_out; + + SWVoiceOut **out[2]; + uint8_t outvol[7], outmute[2]; + SWVoiceIn **in[2]; + uint8_t invol[4], inmute[2]; + + uint8_t diff[2], pol, ds, monomix[2], alc, mute; + uint8_t path[4], mpath[2], power, format; + const WMRate *rate; + uint8_t rate_vmstate; + int adc_hz, dac_hz, ext_adc_hz, ext_dac_hz, master; +}; + +/* pow(10.0, -i / 20.0) * 255, i = 0..42 */ +static const uint8_t wm8750_vol_db_table[] = { + 255, 227, 203, 181, 161, 143, 128, 114, 102, 90, 81, 72, 64, 57, 51, 45, + 40, 36, 32, 29, 26, 23, 20, 18, 16, 14, 13, 11, 10, 9, 8, 7, 6, 6, 5, 5, + 4, 4, 3, 3, 3, 2, 2 +}; + +#define WM8750_OUTVOL_TRANSFORM(x) wm8750_vol_db_table[(0x7f - x) / 3] +#define WM8750_INVOL_TRANSFORM(x) (x << 2) + +static inline void wm8750_in_load(WM8750State *s) +{ + if (s->idx_in + s->req_in <= sizeof(s->data_in)) + return; + s->idx_in = MAX(0, (int) sizeof(s->data_in) - s->req_in); + AUD_read(*s->in[0], s->data_in + s->idx_in, + sizeof(s->data_in) - s->idx_in); +} + +static inline void wm8750_out_flush(WM8750State *s) +{ + int sent = 0; + while (sent < s->idx_out) + sent += AUD_write(*s->out[0], s->data_out + sent, s->idx_out - sent) + ?: s->idx_out; + s->idx_out = 0; +} + +static void wm8750_audio_in_cb(void *opaque, int avail_b) +{ + WM8750State *s = (WM8750State *) opaque; + s->req_in = avail_b; + s->data_req(s->opaque, s->req_out >> 2, avail_b >> 2); +} + +static void wm8750_audio_out_cb(void *opaque, int free_b) +{ + WM8750State *s = (WM8750State *) opaque; + + if (s->idx_out >= free_b) { + s->idx_out = free_b; + s->req_out = 0; + wm8750_out_flush(s); + } else + s->req_out = free_b - s->idx_out; + + s->data_req(s->opaque, s->req_out >> 2, s->req_in >> 2); +} + +static const WMRate wm_rate_table[] = { + { 256, 48000, 256, 48000 }, /* SR: 00000 */ + { 384, 48000, 384, 48000 }, /* SR: 00001 */ + { 256, 48000, 1536, 8000 }, /* SR: 00010 */ + { 384, 48000, 2304, 8000 }, /* SR: 00011 */ + { 1536, 8000, 256, 48000 }, /* SR: 00100 */ + { 2304, 8000, 384, 48000 }, /* SR: 00101 */ + { 1536, 8000, 1536, 8000 }, /* SR: 00110 */ + { 2304, 8000, 2304, 8000 }, /* SR: 00111 */ + { 1024, 12000, 1024, 12000 }, /* SR: 01000 */ + { 1526, 12000, 1536, 12000 }, /* SR: 01001 */ + { 768, 16000, 768, 16000 }, /* SR: 01010 */ + { 1152, 16000, 1152, 16000 }, /* SR: 01011 */ + { 384, 32000, 384, 32000 }, /* SR: 01100 */ + { 576, 32000, 576, 32000 }, /* SR: 01101 */ + { 128, 96000, 128, 96000 }, /* SR: 01110 */ + { 192, 96000, 192, 96000 }, /* SR: 01111 */ + { 256, 44100, 256, 44100 }, /* SR: 10000 */ + { 384, 44100, 384, 44100 }, /* SR: 10001 */ + { 256, 44100, 1408, 8018 }, /* SR: 10010 */ + { 384, 44100, 2112, 8018 }, /* SR: 10011 */ + { 1408, 8018, 256, 44100 }, /* SR: 10100 */ + { 2112, 8018, 384, 44100 }, /* SR: 10101 */ + { 1408, 8018, 1408, 8018 }, /* SR: 10110 */ + { 2112, 8018, 2112, 8018 }, /* SR: 10111 */ + { 1024, 11025, 1024, 11025 }, /* SR: 11000 */ + { 1536, 11025, 1536, 11025 }, /* SR: 11001 */ + { 512, 22050, 512, 22050 }, /* SR: 11010 */ + { 768, 22050, 768, 22050 }, /* SR: 11011 */ + { 512, 24000, 512, 24000 }, /* SR: 11100 */ + { 768, 24000, 768, 24000 }, /* SR: 11101 */ + { 128, 88200, 128, 88200 }, /* SR: 11110 */ + { 192, 88200, 192, 88200 }, /* SR: 11111 */ +}; + +static void wm8750_vol_update(WM8750State *s) +{ + /* FIXME: multiply all volumes by s->invol[2], s->invol[3] */ + + AUD_set_volume_in(s->adc_voice[0], s->mute, + s->inmute[0] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[0]), + s->inmute[1] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[1])); + AUD_set_volume_in(s->adc_voice[1], s->mute, + s->inmute[0] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[0]), + s->inmute[1] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[1])); + AUD_set_volume_in(s->adc_voice[2], s->mute, + s->inmute[0] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[0]), + s->inmute[1] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[1])); + + /* FIXME: multiply all volumes by s->outvol[0], s->outvol[1] */ + + /* Speaker: LOUT2VOL ROUT2VOL */ + AUD_set_volume_out(s->dac_voice[0], s->mute, + s->outmute[0] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[4]), + s->outmute[1] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[5])); + + /* Headphone: LOUT1VOL ROUT1VOL */ + AUD_set_volume_out(s->dac_voice[1], s->mute, + s->outmute[0] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[2]), + s->outmute[1] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[3])); + + /* MONOOUT: MONOVOL MONOVOL */ + AUD_set_volume_out(s->dac_voice[2], s->mute, + s->outmute[0] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[6]), + s->outmute[1] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[6])); +} + +static void wm8750_set_format(WM8750State *s) +{ + int i; + struct audsettings in_fmt; + struct audsettings out_fmt; + + wm8750_out_flush(s); + + if (s->in[0] && *s->in[0]) + AUD_set_active_in(*s->in[0], 0); + if (s->out[0] && *s->out[0]) + AUD_set_active_out(*s->out[0], 0); + + for (i = 0; i < IN_PORT_N; i ++) + if (s->adc_voice[i]) { + AUD_close_in(&s->card, s->adc_voice[i]); + s->adc_voice[i] = NULL; + } + for (i = 0; i < OUT_PORT_N; i ++) + if (s->dac_voice[i]) { + AUD_close_out(&s->card, s->dac_voice[i]); + s->dac_voice[i] = NULL; + } + + if (!s->enable) + return; + + /* Setup input */ + in_fmt.endianness = 0; + in_fmt.nchannels = 2; + in_fmt.freq = s->adc_hz; + in_fmt.fmt = AUDIO_FORMAT_S16; + + s->adc_voice[0] = AUD_open_in(&s->card, s->adc_voice[0], + CODEC ".input1", s, wm8750_audio_in_cb, &in_fmt); + s->adc_voice[1] = AUD_open_in(&s->card, s->adc_voice[1], + CODEC ".input2", s, wm8750_audio_in_cb, &in_fmt); + s->adc_voice[2] = AUD_open_in(&s->card, s->adc_voice[2], + CODEC ".input3", s, wm8750_audio_in_cb, &in_fmt); + + /* Setup output */ + out_fmt.endianness = 0; + out_fmt.nchannels = 2; + out_fmt.freq = s->dac_hz; + out_fmt.fmt = AUDIO_FORMAT_S16; + + s->dac_voice[0] = AUD_open_out(&s->card, s->dac_voice[0], + CODEC ".speaker", s, wm8750_audio_out_cb, &out_fmt); + s->dac_voice[1] = AUD_open_out(&s->card, s->dac_voice[1], + CODEC ".headphone", s, wm8750_audio_out_cb, &out_fmt); + /* MONOMIX is also in stereo for simplicity */ + s->dac_voice[2] = AUD_open_out(&s->card, s->dac_voice[2], + CODEC ".monomix", s, wm8750_audio_out_cb, &out_fmt); + /* no sense emulating OUT3 which is a mix of other outputs */ + + wm8750_vol_update(s); + + /* We should connect the left and right channels to their + * respective inputs/outputs but we have completely no need + * for mixing or combining paths to different ports, so we + * connect both channels to where the left channel is routed. */ + if (s->in[0] && *s->in[0]) + AUD_set_active_in(*s->in[0], 1); + if (s->out[0] && *s->out[0]) + AUD_set_active_out(*s->out[0], 1); +} + +static void wm8750_clk_update(WM8750State *s, int ext) +{ + if (s->master || !s->ext_dac_hz) + s->dac_hz = s->rate->dac_hz; + else + s->dac_hz = s->ext_dac_hz; + + if (s->master || !s->ext_adc_hz) + s->adc_hz = s->rate->adc_hz; + else + s->adc_hz = s->ext_adc_hz; + + if (s->master || (!s->ext_dac_hz && !s->ext_adc_hz)) { + if (!ext) + wm8750_set_format(s); + } else { + if (ext) + wm8750_set_format(s); + } +} + +static void wm8750_reset(I2CSlave *i2c) +{ + WM8750State *s = WM8750(i2c); + + s->rate = &wm_rate_table[0]; + s->enable = 0; + wm8750_clk_update(s, 1); + s->diff[0] = 0; + s->diff[1] = 0; + s->ds = 0; + s->alc = 0; + s->in[0] = &s->adc_voice[0]; + s->invol[0] = 0x17; + s->invol[1] = 0x17; + s->invol[2] = 0xc3; + s->invol[3] = 0xc3; + s->out[0] = &s->dac_voice[0]; + s->outvol[0] = 0xff; + s->outvol[1] = 0xff; + s->outvol[2] = 0x79; + s->outvol[3] = 0x79; + s->outvol[4] = 0x79; + s->outvol[5] = 0x79; + s->outvol[6] = 0x79; + s->inmute[0] = 0; + s->inmute[1] = 0; + s->outmute[0] = 0; + s->outmute[1] = 0; + s->mute = 1; + s->path[0] = 0; + s->path[1] = 0; + s->path[2] = 0; + s->path[3] = 0; + s->mpath[0] = 0; + s->mpath[1] = 0; + s->format = 0x0a; + s->idx_in = sizeof(s->data_in); + s->req_in = 0; + s->idx_out = 0; + s->req_out = 0; + wm8750_vol_update(s); + s->i2c_len = 0; +} + +static int wm8750_event(I2CSlave *i2c, enum i2c_event event) +{ + WM8750State *s = WM8750(i2c); + + switch (event) { + case I2C_START_SEND: + s->i2c_len = 0; + break; + case I2C_FINISH: +#ifdef VERBOSE + if (s->i2c_len < 2) + printf("%s: message too short (%i bytes)\n", + __func__, s->i2c_len); +#endif + break; + default: + break; + } + + return 0; +} + +#define WM8750_LINVOL 0x00 +#define WM8750_RINVOL 0x01 +#define WM8750_LOUT1V 0x02 +#define WM8750_ROUT1V 0x03 +#define WM8750_ADCDAC 0x05 +#define WM8750_IFACE 0x07 +#define WM8750_SRATE 0x08 +#define WM8750_LDAC 0x0a +#define WM8750_RDAC 0x0b +#define WM8750_BASS 0x0c +#define WM8750_TREBLE 0x0d +#define WM8750_RESET 0x0f +#define WM8750_3D 0x10 +#define WM8750_ALC1 0x11 +#define WM8750_ALC2 0x12 +#define WM8750_ALC3 0x13 +#define WM8750_NGATE 0x14 +#define WM8750_LADC 0x15 +#define WM8750_RADC 0x16 +#define WM8750_ADCTL1 0x17 +#define WM8750_ADCTL2 0x18 +#define WM8750_PWR1 0x19 +#define WM8750_PWR2 0x1a +#define WM8750_ADCTL3 0x1b +#define WM8750_ADCIN 0x1f +#define WM8750_LADCIN 0x20 +#define WM8750_RADCIN 0x21 +#define WM8750_LOUTM1 0x22 +#define WM8750_LOUTM2 0x23 +#define WM8750_ROUTM1 0x24 +#define WM8750_ROUTM2 0x25 +#define WM8750_MOUTM1 0x26 +#define WM8750_MOUTM2 0x27 +#define WM8750_LOUT2V 0x28 +#define WM8750_ROUT2V 0x29 +#define WM8750_MOUTV 0x2a + +static int wm8750_tx(I2CSlave *i2c, uint8_t data) +{ + WM8750State *s = WM8750(i2c); + uint8_t cmd; + uint16_t value; + + if (s->i2c_len >= 2) { +#ifdef VERBOSE + printf("%s: long message (%i bytes)\n", __func__, s->i2c_len); +#endif + return 1; + } + s->i2c_data[s->i2c_len ++] = data; + if (s->i2c_len != 2) + return 0; + + cmd = s->i2c_data[0] >> 1; + value = ((s->i2c_data[0] << 8) | s->i2c_data[1]) & 0x1ff; + + switch (cmd) { + case WM8750_LADCIN: /* ADC Signal Path Control (Left) */ + s->diff[0] = (((value >> 6) & 3) == 3); /* LINSEL */ + if (s->diff[0]) + s->in[0] = &s->adc_voice[0 + s->ds * 1]; + else + s->in[0] = &s->adc_voice[((value >> 6) & 3) * 1 + 0]; + break; + + case WM8750_RADCIN: /* ADC Signal Path Control (Right) */ + s->diff[1] = (((value >> 6) & 3) == 3); /* RINSEL */ + if (s->diff[1]) + s->in[1] = &s->adc_voice[0 + s->ds * 1]; + else + s->in[1] = &s->adc_voice[((value >> 6) & 3) * 1 + 0]; + break; + + case WM8750_ADCIN: /* ADC Input Mode */ + s->ds = (value >> 8) & 1; /* DS */ + if (s->diff[0]) + s->in[0] = &s->adc_voice[0 + s->ds * 1]; + if (s->diff[1]) + s->in[1] = &s->adc_voice[0 + s->ds * 1]; + s->monomix[0] = (value >> 6) & 3; /* MONOMIX */ + break; + + case WM8750_ADCTL1: /* Additional Control (1) */ + s->monomix[1] = (value >> 1) & 1; /* DMONOMIX */ + break; + + case WM8750_PWR1: /* Power Management (1) */ + s->enable = ((value >> 6) & 7) == 3; /* VMIDSEL, VREF */ + wm8750_set_format(s); + break; + + case WM8750_LINVOL: /* Left Channel PGA */ + s->invol[0] = value & 0x3f; /* LINVOL */ + s->inmute[0] = (value >> 7) & 1; /* LINMUTE */ + wm8750_vol_update(s); + break; + + case WM8750_RINVOL: /* Right Channel PGA */ + s->invol[1] = value & 0x3f; /* RINVOL */ + s->inmute[1] = (value >> 7) & 1; /* RINMUTE */ + wm8750_vol_update(s); + break; + + case WM8750_ADCDAC: /* ADC and DAC Control */ + s->pol = (value >> 5) & 3; /* ADCPOL */ + s->mute = (value >> 3) & 1; /* DACMU */ + wm8750_vol_update(s); + break; + + case WM8750_ADCTL3: /* Additional Control (3) */ + break; + + case WM8750_LADC: /* Left ADC Digital Volume */ + s->invol[2] = value & 0xff; /* LADCVOL */ + wm8750_vol_update(s); + break; + + case WM8750_RADC: /* Right ADC Digital Volume */ + s->invol[3] = value & 0xff; /* RADCVOL */ + wm8750_vol_update(s); + break; + + case WM8750_ALC1: /* ALC Control (1) */ + s->alc = (value >> 7) & 3; /* ALCSEL */ + break; + + case WM8750_NGATE: /* Noise Gate Control */ + case WM8750_3D: /* 3D enhance */ + break; + + case WM8750_LDAC: /* Left Channel Digital Volume */ + s->outvol[0] = value & 0xff; /* LDACVOL */ + wm8750_vol_update(s); + break; + + case WM8750_RDAC: /* Right Channel Digital Volume */ + s->outvol[1] = value & 0xff; /* RDACVOL */ + wm8750_vol_update(s); + break; + + case WM8750_BASS: /* Bass Control */ + break; + + case WM8750_LOUTM1: /* Left Mixer Control (1) */ + s->path[0] = (value >> 8) & 1; /* LD2LO */ + /* TODO: mute/unmute respective paths */ + wm8750_vol_update(s); + break; + + case WM8750_LOUTM2: /* Left Mixer Control (2) */ + s->path[1] = (value >> 8) & 1; /* RD2LO */ + /* TODO: mute/unmute respective paths */ + wm8750_vol_update(s); + break; + + case WM8750_ROUTM1: /* Right Mixer Control (1) */ + s->path[2] = (value >> 8) & 1; /* LD2RO */ + /* TODO: mute/unmute respective paths */ + wm8750_vol_update(s); + break; + + case WM8750_ROUTM2: /* Right Mixer Control (2) */ + s->path[3] = (value >> 8) & 1; /* RD2RO */ + /* TODO: mute/unmute respective paths */ + wm8750_vol_update(s); + break; + + case WM8750_MOUTM1: /* Mono Mixer Control (1) */ + s->mpath[0] = (value >> 8) & 1; /* LD2MO */ + /* TODO: mute/unmute respective paths */ + wm8750_vol_update(s); + break; + + case WM8750_MOUTM2: /* Mono Mixer Control (2) */ + s->mpath[1] = (value >> 8) & 1; /* RD2MO */ + /* TODO: mute/unmute respective paths */ + wm8750_vol_update(s); + break; + + case WM8750_LOUT1V: /* LOUT1 Volume */ + s->outvol[2] = value & 0x7f; /* LOUT1VOL */ + wm8750_vol_update(s); + break; + + case WM8750_LOUT2V: /* LOUT2 Volume */ + s->outvol[4] = value & 0x7f; /* LOUT2VOL */ + wm8750_vol_update(s); + break; + + case WM8750_ROUT1V: /* ROUT1 Volume */ + s->outvol[3] = value & 0x7f; /* ROUT1VOL */ + wm8750_vol_update(s); + break; + + case WM8750_ROUT2V: /* ROUT2 Volume */ + s->outvol[5] = value & 0x7f; /* ROUT2VOL */ + wm8750_vol_update(s); + break; + + case WM8750_MOUTV: /* MONOOUT Volume */ + s->outvol[6] = value & 0x7f; /* MONOOUTVOL */ + wm8750_vol_update(s); + break; + + case WM8750_ADCTL2: /* Additional Control (2) */ + break; + + case WM8750_PWR2: /* Power Management (2) */ + s->power = value & 0x7e; + /* TODO: mute/unmute respective paths */ + wm8750_vol_update(s); + break; + + case WM8750_IFACE: /* Digital Audio Interface Format */ + s->format = value; + s->master = (value >> 6) & 1; /* MS */ + wm8750_clk_update(s, s->master); + break; + + case WM8750_SRATE: /* Clocking and Sample Rate Control */ + s->rate = &wm_rate_table[(value >> 1) & 0x1f]; + wm8750_clk_update(s, 0); + break; + + case WM8750_RESET: /* Reset */ + wm8750_reset(I2C_SLAVE(s)); + break; + +#ifdef VERBOSE + default: + printf("%s: unknown register %02x\n", __func__, cmd); +#endif + } + + return 0; +} + +static uint8_t wm8750_rx(I2CSlave *i2c) +{ + return 0x00; +} + +static int wm8750_pre_save(void *opaque) +{ + WM8750State *s = opaque; + + s->rate_vmstate = s->rate - wm_rate_table; + + return 0; +} + +static int wm8750_post_load(void *opaque, int version_id) +{ + WM8750State *s = opaque; + + s->rate = &wm_rate_table[s->rate_vmstate & 0x1f]; + return 0; +} + +static const VMStateDescription vmstate_wm8750 = { + .name = CODEC, + .version_id = 0, + .minimum_version_id = 0, + .pre_save = wm8750_pre_save, + .post_load = wm8750_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(i2c_data, WM8750State, 2), + VMSTATE_INT32(i2c_len, WM8750State), + VMSTATE_INT32(enable, WM8750State), + VMSTATE_INT32(idx_in, WM8750State), + VMSTATE_INT32(req_in, WM8750State), + VMSTATE_INT32(idx_out, WM8750State), + VMSTATE_INT32(req_out, WM8750State), + VMSTATE_UINT8_ARRAY(outvol, WM8750State, 7), + VMSTATE_UINT8_ARRAY(outmute, WM8750State, 2), + VMSTATE_UINT8_ARRAY(invol, WM8750State, 4), + VMSTATE_UINT8_ARRAY(inmute, WM8750State, 2), + VMSTATE_UINT8_ARRAY(diff, WM8750State, 2), + VMSTATE_UINT8(pol, WM8750State), + VMSTATE_UINT8(ds, WM8750State), + VMSTATE_UINT8_ARRAY(monomix, WM8750State, 2), + VMSTATE_UINT8(alc, WM8750State), + VMSTATE_UINT8(mute, WM8750State), + VMSTATE_UINT8_ARRAY(path, WM8750State, 4), + VMSTATE_UINT8_ARRAY(mpath, WM8750State, 2), + VMSTATE_UINT8(format, WM8750State), + VMSTATE_UINT8(power, WM8750State), + VMSTATE_UINT8(rate_vmstate, WM8750State), + VMSTATE_I2C_SLAVE(parent_obj, WM8750State), + VMSTATE_END_OF_LIST() + } +}; + +static void wm8750_realize(DeviceState *dev, Error **errp) +{ + WM8750State *s = WM8750(dev); + + AUD_register_card(CODEC, &s->card); + wm8750_reset(I2C_SLAVE(s)); +} + +#if 0 +static void wm8750_fini(I2CSlave *i2c) +{ + WM8750State *s = WM8750(i2c); + + wm8750_reset(I2C_SLAVE(s)); + AUD_remove_card(&s->card); + g_free(s); +} +#endif + +void wm8750_data_req_set(DeviceState *dev, data_req_cb *data_req, void *opaque) +{ + WM8750State *s = WM8750(dev); + + s->data_req = data_req; + s->opaque = opaque; +} + +void wm8750_dac_dat(void *opaque, uint32_t sample) +{ + WM8750State *s = (WM8750State *) opaque; + + *(uint32_t *) &s->data_out[s->idx_out] = sample; + s->req_out -= 4; + s->idx_out += 4; + if (s->idx_out >= sizeof(s->data_out) || s->req_out <= 0) + wm8750_out_flush(s); +} + +void *wm8750_dac_buffer(void *opaque, int samples) +{ + WM8750State *s = (WM8750State *) opaque; + /* XXX: Should check if there are samples free samples available */ + void *ret = s->data_out + s->idx_out; + + s->idx_out += samples << 2; + s->req_out -= samples << 2; + return ret; +} + +void wm8750_dac_commit(void *opaque) +{ + WM8750State *s = (WM8750State *) opaque; + + wm8750_out_flush(s); +} + +uint32_t wm8750_adc_dat(void *opaque) +{ + WM8750State *s = (WM8750State *) opaque; + uint32_t *data; + + if (s->idx_in >= sizeof(s->data_in)) { + wm8750_in_load(s); + if (s->idx_in >= sizeof(s->data_in)) { + return 0x80008000; /* silence in AUDIO_FORMAT_S16 sample format */ + } + } + + data = (uint32_t *) &s->data_in[s->idx_in]; + s->req_in -= 4; + s->idx_in += 4; + return *data; +} + +void wm8750_set_bclk_in(void *opaque, int new_hz) +{ + WM8750State *s = (WM8750State *) opaque; + + s->ext_adc_hz = new_hz; + s->ext_dac_hz = new_hz; + wm8750_clk_update(s, 1); +} + +static Property wm8750_properties[] = { + DEFINE_AUDIO_PROPERTIES(WM8750State, card), + DEFINE_PROP_END_OF_LIST(), +}; + +static void wm8750_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); + + dc->realize = wm8750_realize; + sc->event = wm8750_event; + sc->recv = wm8750_rx; + sc->send = wm8750_tx; + dc->vmsd = &vmstate_wm8750; + device_class_set_props(dc, wm8750_properties); +} + +static const TypeInfo wm8750_info = { + .name = TYPE_WM8750, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(WM8750State), + .class_init = wm8750_class_init, +}; + +static void wm8750_register_types(void) +{ + type_register_static(&wm8750_info); +} + +type_init(wm8750_register_types) diff --git a/hw/avr/Kconfig b/hw/avr/Kconfig new file mode 100644 index 000000000..d31298c3c --- /dev/null +++ b/hw/avr/Kconfig @@ -0,0 +1,9 @@ +config AVR_ATMEGA_MCU + bool + select AVR_TIMER16 + select AVR_USART + select AVR_POWER + +config ARDUINO + select AVR_ATMEGA_MCU + select UNIMP diff --git a/hw/avr/arduino.c b/hw/avr/arduino.c new file mode 100644 index 000000000..48ef47834 --- /dev/null +++ b/hw/avr/arduino.c @@ -0,0 +1,159 @@ +/* + * QEMU Arduino boards + * + * Copyright (c) 2019-2020 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPLv2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +/* TODO: Implement the use of EXTRAM */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "atmega.h" +#include "boot.h" +#include "qom/object.h" + +struct ArduinoMachineState { + /*< private >*/ + MachineState parent_obj; + /*< public >*/ + AtmegaMcuState mcu; +}; +typedef struct ArduinoMachineState ArduinoMachineState; + +struct ArduinoMachineClass { + /*< private >*/ + MachineClass parent_class; + /*< public >*/ + const char *mcu_type; + uint64_t xtal_hz; +}; +typedef struct ArduinoMachineClass ArduinoMachineClass; + +#define TYPE_ARDUINO_MACHINE \ + MACHINE_TYPE_NAME("arduino") +DECLARE_OBJ_CHECKERS(ArduinoMachineState, ArduinoMachineClass, + ARDUINO_MACHINE, TYPE_ARDUINO_MACHINE) + +static void arduino_machine_init(MachineState *machine) +{ + ArduinoMachineClass *amc = ARDUINO_MACHINE_GET_CLASS(machine); + ArduinoMachineState *ams = ARDUINO_MACHINE(machine); + + object_initialize_child(OBJECT(machine), "mcu", &ams->mcu, amc->mcu_type); + object_property_set_uint(OBJECT(&ams->mcu), "xtal-frequency-hz", + amc->xtal_hz, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&ams->mcu), &error_abort); + + if (machine->firmware) { + if (!avr_load_firmware(&ams->mcu.cpu, machine, + &ams->mcu.flash, machine->firmware)) { + exit(1); + } + } +} + +static void arduino_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = arduino_machine_init; + mc->default_cpus = 1; + mc->min_cpus = mc->default_cpus; + mc->max_cpus = mc->default_cpus; + mc->no_floppy = 1; + mc->no_cdrom = 1; + mc->no_parallel = 1; +} + +static void arduino_duemilanove_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc); + + /* + * https://www.arduino.cc/en/Main/ArduinoBoardDuemilanove + * https://www.arduino.cc/en/uploads/Main/arduino-duemilanove-schematic.pdf + */ + mc->desc = "Arduino Duemilanove (ATmega168)", + mc->alias = "2009"; + amc->mcu_type = TYPE_ATMEGA168_MCU; + amc->xtal_hz = 16 * 1000 * 1000; +}; + +static void arduino_uno_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc); + + /* + * https://store.arduino.cc/arduino-uno-rev3 + * https://www.arduino.cc/en/uploads/Main/arduino-uno-schematic.pdf + */ + mc->desc = "Arduino UNO (ATmega328P)"; + mc->alias = "uno"; + amc->mcu_type = TYPE_ATMEGA328_MCU; + amc->xtal_hz = 16 * 1000 * 1000; +}; + +static void arduino_mega_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc); + + /* + * https://www.arduino.cc/en/Main/ArduinoBoardMega + * https://www.arduino.cc/en/uploads/Main/arduino-mega2560-schematic.pdf + */ + mc->desc = "Arduino Mega (ATmega1280)"; + mc->alias = "mega"; + amc->mcu_type = TYPE_ATMEGA1280_MCU; + amc->xtal_hz = 16 * 1000 * 1000; +}; + +static void arduino_mega2560_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + ArduinoMachineClass *amc = ARDUINO_MACHINE_CLASS(oc); + + /* + * https://store.arduino.cc/arduino-mega-2560-rev3 + * https://www.arduino.cc/en/uploads/Main/arduino-mega2560_R3-sch.pdf + */ + mc->desc = "Arduino Mega 2560 (ATmega2560)"; + mc->alias = "mega2560"; + amc->mcu_type = TYPE_ATMEGA2560_MCU; + amc->xtal_hz = 16 * 1000 * 1000; /* CSTCE16M0V53-R0 */ +}; + +static const TypeInfo arduino_machine_types[] = { + { + .name = MACHINE_TYPE_NAME("arduino-duemilanove"), + .parent = TYPE_ARDUINO_MACHINE, + .class_init = arduino_duemilanove_class_init, + }, { + .name = MACHINE_TYPE_NAME("arduino-uno"), + .parent = TYPE_ARDUINO_MACHINE, + .class_init = arduino_uno_class_init, + }, { + .name = MACHINE_TYPE_NAME("arduino-mega"), + .parent = TYPE_ARDUINO_MACHINE, + .class_init = arduino_mega_class_init, + }, { + .name = MACHINE_TYPE_NAME("arduino-mega-2560-v3"), + .parent = TYPE_ARDUINO_MACHINE, + .class_init = arduino_mega2560_class_init, + }, { + .name = TYPE_ARDUINO_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(ArduinoMachineState), + .class_size = sizeof(ArduinoMachineClass), + .class_init = arduino_machine_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(arduino_machine_types) diff --git a/hw/avr/atmega.c b/hw/avr/atmega.c new file mode 100644 index 000000000..0608e2d47 --- /dev/null +++ b/hw/avr/atmega.c @@ -0,0 +1,457 @@ +/* + * QEMU ATmega MCU + * + * Copyright (c) 2019-2020 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPLv2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "sysemu/sysemu.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "qom/object.h" +#include "hw/misc/unimp.h" +#include "atmega.h" + +enum AtmegaPeripheral { + POWER0, POWER1, + GPIOA, GPIOB, GPIOC, GPIOD, GPIOE, GPIOF, + GPIOG, GPIOH, GPIOI, GPIOJ, GPIOK, GPIOL, + USART0, USART1, USART2, USART3, + TIMER0, TIMER1, TIMER2, TIMER3, TIMER4, TIMER5, + PERIFMAX +}; + +#define GPIO(n) (n + GPIOA) +#define USART(n) (n + USART0) +#define TIMER(n) (n + TIMER0) +#define POWER(n) (n + POWER0) + +typedef struct { + uint16_t addr; + enum AtmegaPeripheral power_index; + uint8_t power_bit; + /* timer specific */ + uint16_t intmask_addr; + uint16_t intflag_addr; + bool is_timer16; +} peripheral_cfg; + +struct AtmegaMcuClass { + /*< private >*/ + SysBusDeviceClass parent_class; + /*< public >*/ + const char *uc_name; + const char *cpu_type; + size_t flash_size; + size_t eeprom_size; + size_t sram_size; + size_t io_size; + size_t gpio_count; + size_t adc_count; + const uint8_t *irq; + const peripheral_cfg *dev; +}; +typedef struct AtmegaMcuClass AtmegaMcuClass; + +DECLARE_CLASS_CHECKERS(AtmegaMcuClass, ATMEGA_MCU, + TYPE_ATMEGA_MCU) + +static const peripheral_cfg dev168_328[PERIFMAX] = { + [USART0] = { 0xc0, POWER0, 1 }, + [TIMER2] = { 0xb0, POWER0, 6, 0x70, 0x37, false }, + [TIMER1] = { 0x80, POWER0, 3, 0x6f, 0x36, true }, + [POWER0] = { 0x64 }, + [TIMER0] = { 0x44, POWER0, 5, 0x6e, 0x35, false }, + [GPIOD] = { 0x29 }, + [GPIOC] = { 0x26 }, + [GPIOB] = { 0x23 }, +}, dev1280_2560[PERIFMAX] = { + [USART3] = { 0x130, POWER1, 2 }, + [TIMER5] = { 0x120, POWER1, 5, 0x73, 0x3a, true }, + [GPIOL] = { 0x109 }, + [GPIOK] = { 0x106 }, + [GPIOJ] = { 0x103 }, + [GPIOH] = { 0x100 }, + [USART2] = { 0xd0, POWER1, 1 }, + [USART1] = { 0xc8, POWER1, 0 }, + [USART0] = { 0xc0, POWER0, 1 }, + [TIMER2] = { 0xb0, POWER0, 6, 0x70, 0x37, false }, /* TODO async */ + [TIMER4] = { 0xa0, POWER1, 4, 0x72, 0x39, true }, + [TIMER3] = { 0x90, POWER1, 3, 0x71, 0x38, true }, + [TIMER1] = { 0x80, POWER0, 3, 0x6f, 0x36, true }, + [POWER1] = { 0x65 }, + [POWER0] = { 0x64 }, + [TIMER0] = { 0x44, POWER0, 5, 0x6e, 0x35, false }, + [GPIOG] = { 0x32 }, + [GPIOF] = { 0x2f }, + [GPIOE] = { 0x2c }, + [GPIOD] = { 0x29 }, + [GPIOC] = { 0x26 }, + [GPIOB] = { 0x23 }, + [GPIOA] = { 0x20 }, +}; + +enum AtmegaIrq { + USART0_RXC_IRQ, USART0_DRE_IRQ, USART0_TXC_IRQ, + USART1_RXC_IRQ, USART1_DRE_IRQ, USART1_TXC_IRQ, + USART2_RXC_IRQ, USART2_DRE_IRQ, USART2_TXC_IRQ, + USART3_RXC_IRQ, USART3_DRE_IRQ, USART3_TXC_IRQ, + TIMER0_CAPT_IRQ, TIMER0_COMPA_IRQ, TIMER0_COMPB_IRQ, + TIMER0_COMPC_IRQ, TIMER0_OVF_IRQ, + TIMER1_CAPT_IRQ, TIMER1_COMPA_IRQ, TIMER1_COMPB_IRQ, + TIMER1_COMPC_IRQ, TIMER1_OVF_IRQ, + TIMER2_CAPT_IRQ, TIMER2_COMPA_IRQ, TIMER2_COMPB_IRQ, + TIMER2_COMPC_IRQ, TIMER2_OVF_IRQ, + TIMER3_CAPT_IRQ, TIMER3_COMPA_IRQ, TIMER3_COMPB_IRQ, + TIMER3_COMPC_IRQ, TIMER3_OVF_IRQ, + TIMER4_CAPT_IRQ, TIMER4_COMPA_IRQ, TIMER4_COMPB_IRQ, + TIMER4_COMPC_IRQ, TIMER4_OVF_IRQ, + TIMER5_CAPT_IRQ, TIMER5_COMPA_IRQ, TIMER5_COMPB_IRQ, + TIMER5_COMPC_IRQ, TIMER5_OVF_IRQ, + IRQ_COUNT +}; + +#define USART_IRQ_COUNT 3 +#define USART_RXC_IRQ(n) (n * USART_IRQ_COUNT + USART0_RXC_IRQ) +#define USART_DRE_IRQ(n) (n * USART_IRQ_COUNT + USART0_DRE_IRQ) +#define USART_TXC_IRQ(n) (n * USART_IRQ_COUNT + USART0_TXC_IRQ) +#define TIMER_IRQ_COUNT 5 +#define TIMER_CAPT_IRQ(n) (n * TIMER_IRQ_COUNT + TIMER0_CAPT_IRQ) +#define TIMER_COMPA_IRQ(n) (n * TIMER_IRQ_COUNT + TIMER0_COMPA_IRQ) +#define TIMER_COMPB_IRQ(n) (n * TIMER_IRQ_COUNT + TIMER0_COMPB_IRQ) +#define TIMER_COMPC_IRQ(n) (n * TIMER_IRQ_COUNT + TIMER0_COMPC_IRQ) +#define TIMER_OVF_IRQ(n) (n * TIMER_IRQ_COUNT + TIMER0_OVF_IRQ) + +static const uint8_t irq168_328[IRQ_COUNT] = { + [TIMER2_COMPA_IRQ] = 8, + [TIMER2_COMPB_IRQ] = 9, + [TIMER2_OVF_IRQ] = 10, + [TIMER1_CAPT_IRQ] = 11, + [TIMER1_COMPA_IRQ] = 12, + [TIMER1_COMPB_IRQ] = 13, + [TIMER1_OVF_IRQ] = 14, + [TIMER0_COMPA_IRQ] = 15, + [TIMER0_COMPB_IRQ] = 16, + [TIMER0_OVF_IRQ] = 17, + [USART0_RXC_IRQ] = 19, + [USART0_DRE_IRQ] = 20, + [USART0_TXC_IRQ] = 21, +}, irq1280_2560[IRQ_COUNT] = { + [TIMER2_COMPA_IRQ] = 14, + [TIMER2_COMPB_IRQ] = 15, + [TIMER2_OVF_IRQ] = 16, + [TIMER1_CAPT_IRQ] = 17, + [TIMER1_COMPA_IRQ] = 18, + [TIMER1_COMPB_IRQ] = 19, + [TIMER1_COMPC_IRQ] = 20, + [TIMER1_OVF_IRQ] = 21, + [TIMER0_COMPA_IRQ] = 22, + [TIMER0_COMPB_IRQ] = 23, + [TIMER0_OVF_IRQ] = 24, + [USART0_RXC_IRQ] = 26, + [USART0_DRE_IRQ] = 27, + [USART0_TXC_IRQ] = 28, + [TIMER3_CAPT_IRQ] = 32, + [TIMER3_COMPA_IRQ] = 33, + [TIMER3_COMPB_IRQ] = 34, + [TIMER3_COMPC_IRQ] = 35, + [TIMER3_OVF_IRQ] = 36, + [USART1_RXC_IRQ] = 37, + [USART1_DRE_IRQ] = 38, + [USART1_TXC_IRQ] = 39, + [TIMER4_CAPT_IRQ] = 42, + [TIMER4_COMPA_IRQ] = 43, + [TIMER4_COMPB_IRQ] = 44, + [TIMER4_COMPC_IRQ] = 45, + [TIMER4_OVF_IRQ] = 46, + [TIMER5_CAPT_IRQ] = 47, + [TIMER5_COMPA_IRQ] = 48, + [TIMER5_COMPB_IRQ] = 49, + [TIMER5_COMPC_IRQ] = 50, + [TIMER5_OVF_IRQ] = 51, + [USART2_RXC_IRQ] = 52, + [USART2_DRE_IRQ] = 53, + [USART2_TXC_IRQ] = 54, + [USART3_RXC_IRQ] = 55, + [USART3_DRE_IRQ] = 56, + [USART3_TXC_IRQ] = 57, +}; + +static void connect_peripheral_irq(const AtmegaMcuClass *k, + SysBusDevice *dev, int dev_irqn, + DeviceState *cpu, + unsigned peripheral_index) +{ + int cpu_irq = k->irq[peripheral_index]; + + if (!cpu_irq) { + return; + } + /* FIXME move that to avr_cpu_set_int() once 'sample' board is removed */ + assert(cpu_irq >= 2); + cpu_irq -= 2; + + sysbus_connect_irq(dev, dev_irqn, qdev_get_gpio_in(cpu, cpu_irq)); +} + +static void connect_power_reduction_gpio(AtmegaMcuState *s, + const AtmegaMcuClass *k, + DeviceState *cpu, + unsigned peripheral_index) +{ + unsigned power_index = k->dev[peripheral_index].power_index; + assert(k->dev[power_index].addr); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pwr[power_index - POWER0]), + k->dev[peripheral_index].power_bit, + qdev_get_gpio_in(cpu, 0)); +} + +static void atmega_realize(DeviceState *dev, Error **errp) +{ + AtmegaMcuState *s = ATMEGA_MCU(dev); + const AtmegaMcuClass *mc = ATMEGA_MCU_GET_CLASS(dev); + DeviceState *cpudev; + SysBusDevice *sbd; + char *devname; + size_t i; + + assert(mc->io_size <= 0x200); + + if (!s->xtal_freq_hz) { + error_setg(errp, "\"xtal-frequency-hz\" property must be provided."); + return; + } + + /* CPU */ + object_initialize_child(OBJECT(dev), "cpu", &s->cpu, mc->cpu_type); + object_property_set_bool(OBJECT(&s->cpu), "realized", true, &error_abort); + cpudev = DEVICE(&s->cpu); + + /* SRAM */ + memory_region_init_ram(&s->sram, OBJECT(dev), "sram", mc->sram_size, + &error_abort); + memory_region_add_subregion(get_system_memory(), + OFFSET_DATA + mc->io_size, &s->sram); + + /* Flash */ + memory_region_init_rom(&s->flash, OBJECT(dev), + "flash", mc->flash_size, &error_fatal); + memory_region_add_subregion(get_system_memory(), OFFSET_CODE, &s->flash); + + /* + * I/O + * + * 0x00 - 0x1f: Registers + * 0x20 - 0x5f: I/O memory + * 0x60 - 0xff: Extended I/O + */ + s->io = qdev_new(TYPE_UNIMPLEMENTED_DEVICE); + qdev_prop_set_string(s->io, "name", "I/O"); + qdev_prop_set_uint64(s->io, "size", mc->io_size); + sysbus_realize_and_unref(SYS_BUS_DEVICE(s->io), &error_fatal); + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(s->io), 0, OFFSET_DATA, -1234); + + /* Power Reduction */ + for (i = 0; i < POWER_MAX; i++) { + int idx = POWER(i); + if (!mc->dev[idx].addr) { + continue; + } + devname = g_strdup_printf("power%zu", i); + object_initialize_child(OBJECT(dev), devname, &s->pwr[i], + TYPE_AVR_MASK); + sysbus_realize(SYS_BUS_DEVICE(&s->pwr[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->pwr[i]), 0, + OFFSET_DATA + mc->dev[idx].addr); + g_free(devname); + } + + /* GPIO */ + for (i = 0; i < GPIO_MAX; i++) { + int idx = GPIO(i); + if (!mc->dev[idx].addr) { + continue; + } + devname = g_strdup_printf("atmega-gpio-%c", 'a' + (char)i); + create_unimplemented_device(devname, + OFFSET_DATA + mc->dev[idx].addr, 3); + g_free(devname); + } + + /* USART */ + for (i = 0; i < USART_MAX; i++) { + int idx = USART(i); + if (!mc->dev[idx].addr) { + continue; + } + devname = g_strdup_printf("usart%zu", i); + object_initialize_child(OBJECT(dev), devname, &s->usart[i], + TYPE_AVR_USART); + qdev_prop_set_chr(DEVICE(&s->usart[i]), "chardev", serial_hd(i)); + sbd = SYS_BUS_DEVICE(&s->usart[i]); + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, OFFSET_DATA + mc->dev[USART(i)].addr); + connect_peripheral_irq(mc, sbd, 0, cpudev, USART_RXC_IRQ(i)); + connect_peripheral_irq(mc, sbd, 1, cpudev, USART_DRE_IRQ(i)); + connect_peripheral_irq(mc, sbd, 2, cpudev, USART_TXC_IRQ(i)); + connect_power_reduction_gpio(s, mc, DEVICE(&s->usart[i]), idx); + g_free(devname); + } + + /* Timer */ + for (i = 0; i < TIMER_MAX; i++) { + int idx = TIMER(i); + if (!mc->dev[idx].addr) { + continue; + } + if (!mc->dev[idx].is_timer16) { + create_unimplemented_device("avr-timer8", + OFFSET_DATA + mc->dev[idx].addr, 5); + create_unimplemented_device("avr-timer8-intmask", + OFFSET_DATA + + mc->dev[idx].intmask_addr, 1); + create_unimplemented_device("avr-timer8-intflag", + OFFSET_DATA + + mc->dev[idx].intflag_addr, 1); + continue; + } + devname = g_strdup_printf("timer%zu", i); + object_initialize_child(OBJECT(dev), devname, &s->timer[i], + TYPE_AVR_TIMER16); + object_property_set_uint(OBJECT(&s->timer[i]), "cpu-frequency-hz", + s->xtal_freq_hz, &error_abort); + sbd = SYS_BUS_DEVICE(&s->timer[i]); + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, OFFSET_DATA + mc->dev[idx].addr); + sysbus_mmio_map(sbd, 1, OFFSET_DATA + mc->dev[idx].intmask_addr); + sysbus_mmio_map(sbd, 2, OFFSET_DATA + mc->dev[idx].intflag_addr); + connect_peripheral_irq(mc, sbd, 0, cpudev, TIMER_CAPT_IRQ(i)); + connect_peripheral_irq(mc, sbd, 1, cpudev, TIMER_COMPA_IRQ(i)); + connect_peripheral_irq(mc, sbd, 2, cpudev, TIMER_COMPB_IRQ(i)); + connect_peripheral_irq(mc, sbd, 3, cpudev, TIMER_COMPC_IRQ(i)); + connect_peripheral_irq(mc, sbd, 4, cpudev, TIMER_OVF_IRQ(i)); + connect_power_reduction_gpio(s, mc, DEVICE(&s->timer[i]), idx); + g_free(devname); + } + + create_unimplemented_device("avr-twi", OFFSET_DATA + 0x0b8, 6); + create_unimplemented_device("avr-adc", OFFSET_DATA + 0x078, 8); + create_unimplemented_device("avr-ext-mem-ctrl", OFFSET_DATA + 0x074, 2); + create_unimplemented_device("avr-watchdog", OFFSET_DATA + 0x060, 1); + create_unimplemented_device("avr-spi", OFFSET_DATA + 0x04c, 3); + create_unimplemented_device("avr-eeprom", OFFSET_DATA + 0x03f, 3); +} + +static Property atmega_props[] = { + DEFINE_PROP_UINT64("xtal-frequency-hz", AtmegaMcuState, + xtal_freq_hz, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void atmega_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = atmega_realize; + device_class_set_props(dc, atmega_props); + /* Reason: Mapped at fixed location on the system bus */ + dc->user_creatable = false; +} + +static void atmega168_class_init(ObjectClass *oc, void *data) +{ + AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc); + + amc->cpu_type = AVR_CPU_TYPE_NAME("avr5"); + amc->flash_size = 16 * KiB; + amc->eeprom_size = 512; + amc->sram_size = 1 * KiB; + amc->io_size = 256; + amc->gpio_count = 23; + amc->adc_count = 6; + amc->irq = irq168_328; + amc->dev = dev168_328; +}; + +static void atmega328_class_init(ObjectClass *oc, void *data) +{ + AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc); + + amc->cpu_type = AVR_CPU_TYPE_NAME("avr5"); + amc->flash_size = 32 * KiB; + amc->eeprom_size = 1 * KiB; + amc->sram_size = 2 * KiB; + amc->io_size = 256; + amc->gpio_count = 23; + amc->adc_count = 6; + amc->irq = irq168_328; + amc->dev = dev168_328; +}; + +static void atmega1280_class_init(ObjectClass *oc, void *data) +{ + AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc); + + amc->cpu_type = AVR_CPU_TYPE_NAME("avr51"); + amc->flash_size = 128 * KiB; + amc->eeprom_size = 4 * KiB; + amc->sram_size = 8 * KiB; + amc->io_size = 512; + amc->gpio_count = 86; + amc->adc_count = 16; + amc->irq = irq1280_2560; + amc->dev = dev1280_2560; +}; + +static void atmega2560_class_init(ObjectClass *oc, void *data) +{ + AtmegaMcuClass *amc = ATMEGA_MCU_CLASS(oc); + + amc->cpu_type = AVR_CPU_TYPE_NAME("avr6"); + amc->flash_size = 256 * KiB; + amc->eeprom_size = 4 * KiB; + amc->sram_size = 8 * KiB; + amc->io_size = 512; + amc->gpio_count = 54; + amc->adc_count = 16; + amc->irq = irq1280_2560; + amc->dev = dev1280_2560; +}; + +static const TypeInfo atmega_mcu_types[] = { + { + .name = TYPE_ATMEGA168_MCU, + .parent = TYPE_ATMEGA_MCU, + .class_init = atmega168_class_init, + }, { + .name = TYPE_ATMEGA328_MCU, + .parent = TYPE_ATMEGA_MCU, + .class_init = atmega328_class_init, + }, { + .name = TYPE_ATMEGA1280_MCU, + .parent = TYPE_ATMEGA_MCU, + .class_init = atmega1280_class_init, + }, { + .name = TYPE_ATMEGA2560_MCU, + .parent = TYPE_ATMEGA_MCU, + .class_init = atmega2560_class_init, + }, { + .name = TYPE_ATMEGA_MCU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AtmegaMcuState), + .class_size = sizeof(AtmegaMcuClass), + .class_init = atmega_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(atmega_mcu_types) diff --git a/hw/avr/atmega.h b/hw/avr/atmega.h new file mode 100644 index 000000000..a99ee15c7 --- /dev/null +++ b/hw/avr/atmega.h @@ -0,0 +1,51 @@ +/* + * QEMU ATmega MCU + * + * Copyright (c) 2019-2020 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPLv2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_AVR_ATMEGA_H +#define HW_AVR_ATMEGA_H + +#include "hw/char/avr_usart.h" +#include "hw/timer/avr_timer16.h" +#include "hw/misc/avr_power.h" +#include "target/avr/cpu.h" +#include "qom/object.h" + +#define TYPE_ATMEGA_MCU "ATmega" +#define TYPE_ATMEGA168_MCU "ATmega168" +#define TYPE_ATMEGA328_MCU "ATmega328" +#define TYPE_ATMEGA1280_MCU "ATmega1280" +#define TYPE_ATMEGA2560_MCU "ATmega2560" + +typedef struct AtmegaMcuState AtmegaMcuState; +DECLARE_INSTANCE_CHECKER(AtmegaMcuState, ATMEGA_MCU, + TYPE_ATMEGA_MCU) + +#define POWER_MAX 2 +#define USART_MAX 4 +#define TIMER_MAX 6 +#define GPIO_MAX 12 + +struct AtmegaMcuState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + AVRCPU cpu; + MemoryRegion flash; + MemoryRegion eeprom; + MemoryRegion sram; + DeviceState *io; + AVRMaskState pwr[POWER_MAX]; + AVRUsartState usart[USART_MAX]; + AVRTimer16State timer[TIMER_MAX]; + uint64_t xtal_freq_hz; +}; + +#endif /* HW_AVR_ATMEGA_H */ diff --git a/hw/avr/boot.c b/hw/avr/boot.c new file mode 100644 index 000000000..cbede775c --- /dev/null +++ b/hw/avr/boot.c @@ -0,0 +1,116 @@ +/* + * AVR loader helpers + * + * Copyright (c) 2019-2020 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPLv2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "hw/loader.h" +#include "elf.h" +#include "boot.h" +#include "qemu/error-report.h" + +static const char *avr_elf_e_flags_to_cpu_type(uint32_t flags) +{ + switch (flags & EF_AVR_MACH) { + case bfd_mach_avr1: + return AVR_CPU_TYPE_NAME("avr1"); + case bfd_mach_avr2: + return AVR_CPU_TYPE_NAME("avr2"); + case bfd_mach_avr25: + return AVR_CPU_TYPE_NAME("avr25"); + case bfd_mach_avr3: + return AVR_CPU_TYPE_NAME("avr3"); + case bfd_mach_avr31: + return AVR_CPU_TYPE_NAME("avr31"); + case bfd_mach_avr35: + return AVR_CPU_TYPE_NAME("avr35"); + case bfd_mach_avr4: + return AVR_CPU_TYPE_NAME("avr4"); + case bfd_mach_avr5: + return AVR_CPU_TYPE_NAME("avr5"); + case bfd_mach_avr51: + return AVR_CPU_TYPE_NAME("avr51"); + case bfd_mach_avr6: + return AVR_CPU_TYPE_NAME("avr6"); + case bfd_mach_avrtiny: + return AVR_CPU_TYPE_NAME("avrtiny"); + case bfd_mach_avrxmega2: + return AVR_CPU_TYPE_NAME("xmega2"); + case bfd_mach_avrxmega3: + return AVR_CPU_TYPE_NAME("xmega3"); + case bfd_mach_avrxmega4: + return AVR_CPU_TYPE_NAME("xmega4"); + case bfd_mach_avrxmega5: + return AVR_CPU_TYPE_NAME("xmega5"); + case bfd_mach_avrxmega6: + return AVR_CPU_TYPE_NAME("xmega6"); + case bfd_mach_avrxmega7: + return AVR_CPU_TYPE_NAME("xmega7"); + default: + return NULL; + } +} + +bool avr_load_firmware(AVRCPU *cpu, MachineState *ms, + MemoryRegion *program_mr, const char *firmware) +{ + g_autofree char *filename = NULL; + int bytes_loaded; + uint64_t entry; + uint32_t e_flags; + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, firmware); + if (filename == NULL) { + error_report("Unable to find %s", firmware); + return false; + } + + bytes_loaded = load_elf_ram_sym(filename, + NULL, NULL, NULL, + &entry, NULL, NULL, + &e_flags, 0, EM_AVR, 0, 0, + NULL, true, NULL); + if (bytes_loaded >= 0) { + /* If ELF file is provided, determine CPU type reading ELF e_flags. */ + const char *elf_cpu = avr_elf_e_flags_to_cpu_type(e_flags); + const char *mcu_cpu_type = object_get_typename(OBJECT(cpu)); + int cpu_len = strlen(mcu_cpu_type) - strlen(AVR_CPU_TYPE_SUFFIX); + + if (entry) { + error_report("BIOS entry_point must be 0x0000 " + "(ELF image '%s' has entry_point 0x%04" PRIx64 ")", + firmware, entry); + return false; + } + if (!elf_cpu) { + warn_report("Could not determine CPU type for ELF image '%s', " + "assuming '%.*s' CPU", + firmware, cpu_len, mcu_cpu_type); + return true; + } + if (strcmp(elf_cpu, mcu_cpu_type)) { + error_report("Current machine: %s with '%.*s' CPU", + MACHINE_GET_CLASS(ms)->desc, cpu_len, mcu_cpu_type); + error_report("ELF image '%s' is for '%.*s' CPU", + firmware, + (int)(strlen(elf_cpu) - strlen(AVR_CPU_TYPE_SUFFIX)), + elf_cpu); + return false; + } + } else { + bytes_loaded = load_image_mr(filename, program_mr); + } + if (bytes_loaded < 0) { + error_report("Unable to load firmware image %s as ELF or raw binary", + firmware); + return false; + } + return true; +} diff --git a/hw/avr/boot.h b/hw/avr/boot.h new file mode 100644 index 000000000..684d55332 --- /dev/null +++ b/hw/avr/boot.h @@ -0,0 +1,33 @@ +/* + * AVR loader helpers + * + * Copyright (c) 2019-2020 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPLv2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_AVR_BOOT_H +#define HW_AVR_BOOT_H + +#include "hw/boards.h" +#include "cpu.h" + +/** + * avr_load_firmware: load an image into a memory region + * + * @cpu: Handle a AVR CPU object + * @ms: A MachineState + * @mr: Memory Region to load into + * @firmware: Path to the firmware file (raw binary or ELF format) + * + * Load a firmware supplied by the machine or by the user with the + * '-bios' command line option, and put it in target memory. + * + * Returns: true on success, false on error. + */ +bool avr_load_firmware(AVRCPU *cpu, MachineState *ms, + MemoryRegion *mr, const char *firmware); + +#endif diff --git a/hw/avr/meson.build b/hw/avr/meson.build new file mode 100644 index 000000000..46d53fb17 --- /dev/null +++ b/hw/avr/meson.build @@ -0,0 +1,6 @@ +avr_ss = ss.source_set() +avr_ss.add(files('boot.c')) +avr_ss.add(when: 'CONFIG_AVR_ATMEGA_MCU', if_true: files('atmega.c')) +avr_ss.add(when: 'CONFIG_ARDUINO', if_true: files('arduino.c')) + +hw_arch += {'avr': avr_ss} diff --git a/hw/block/Kconfig b/hw/block/Kconfig new file mode 100644 index 000000000..9e8f28f98 --- /dev/null +++ b/hw/block/Kconfig @@ -0,0 +1,46 @@ +config FDC + bool + +config FDC_ISA + bool + depends on ISA_BUS + select FDC + +config FDC_SYSBUS + bool + select FDC + +config SSI_M25P80 + bool + +config NAND + bool + +config PFLASH_CFI01 + bool + +config PFLASH_CFI02 + bool + +config ECC + bool + +config ONENAND + bool + +config TC58128 + bool + +config VIRTIO_BLK + bool + default y + depends on VIRTIO + +config VHOST_USER_BLK + bool + # Only PCI devices are provided for now + default y if VIRTIO_PCI + depends on VIRTIO && VHOST_USER && LINUX + +config SWIM + bool diff --git a/hw/block/block.c b/hw/block/block.c new file mode 100644 index 000000000..d47ebf005 --- /dev/null +++ b/hw/block/block.c @@ -0,0 +1,238 @@ +/* + * Common code for block device models + * + * Copyright (C) 2012 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "sysemu/blockdev.h" +#include "sysemu/block-backend.h" +#include "hw/block/block.h" +#include "qapi/error.h" +#include "qapi/qapi-types-block.h" + +/* + * Read the entire contents of @blk into @buf. + * @blk's contents must be @size bytes, and @size must be at most + * BDRV_REQUEST_MAX_BYTES. + * On success, return true. + * On failure, store an error through @errp and return false. + * Note that the error messages do not identify the block backend. + * TODO Since callers don't either, this can result in confusing + * errors. + * This function not intended for actual block devices, which read on + * demand. It's for things like memory devices that (ab)use a block + * backend to provide persistence. + */ +bool blk_check_size_and_read_all(BlockBackend *blk, void *buf, hwaddr size, + Error **errp) +{ + int64_t blk_len; + int ret; + + blk_len = blk_getlength(blk); + if (blk_len < 0) { + error_setg_errno(errp, -blk_len, + "can't get size of block backend"); + return false; + } + if (blk_len != size) { + error_setg(errp, "device requires %" HWADDR_PRIu " bytes, " + "block backend provides %" PRIu64 " bytes", + size, blk_len); + return false; + } + + /* + * We could loop for @size > BDRV_REQUEST_MAX_BYTES, but if we + * ever get to the point we want to read *gigabytes* here, we + * should probably rework the device to be more like an actual + * block device and read only on demand. + */ + assert(size <= BDRV_REQUEST_MAX_BYTES); + ret = blk_pread(blk, 0, buf, size); + if (ret < 0) { + error_setg_errno(errp, -ret, "can't read block backend"); + return false; + } + return true; +} + +bool blkconf_blocksizes(BlockConf *conf, Error **errp) +{ + BlockBackend *blk = conf->blk; + BlockSizes blocksizes; + BlockDriverState *bs; + bool use_blocksizes; + bool use_bs; + + switch (conf->backend_defaults) { + case ON_OFF_AUTO_AUTO: + use_blocksizes = !blk_probe_blocksizes(blk, &blocksizes); + use_bs = false; + break; + + case ON_OFF_AUTO_ON: + use_blocksizes = !blk_probe_blocksizes(blk, &blocksizes); + bs = blk_bs(blk); + use_bs = bs; + break; + + case ON_OFF_AUTO_OFF: + use_blocksizes = false; + use_bs = false; + break; + + default: + abort(); + } + + /* fill in detected values if they are not defined via qemu command line */ + if (!conf->physical_block_size) { + if (use_blocksizes) { + conf->physical_block_size = blocksizes.phys; + } else { + conf->physical_block_size = BDRV_SECTOR_SIZE; + } + } + if (!conf->logical_block_size) { + if (use_blocksizes) { + conf->logical_block_size = blocksizes.log; + } else { + conf->logical_block_size = BDRV_SECTOR_SIZE; + } + } + if (use_bs) { + if (!conf->opt_io_size) { + conf->opt_io_size = bs->bl.opt_transfer; + } + if (conf->discard_granularity == -1) { + if (bs->bl.pdiscard_alignment) { + conf->discard_granularity = bs->bl.pdiscard_alignment; + } else if (bs->bl.request_alignment != 1) { + conf->discard_granularity = bs->bl.request_alignment; + } + } + } + + if (conf->logical_block_size > conf->physical_block_size) { + error_setg(errp, + "logical_block_size > physical_block_size not supported"); + return false; + } + + if (!QEMU_IS_ALIGNED(conf->min_io_size, conf->logical_block_size)) { + error_setg(errp, + "min_io_size must be a multiple of logical_block_size"); + return false; + } + + /* + * all devices which support min_io_size (scsi and virtio-blk) expose it to + * the guest as a uint16_t in units of logical blocks + */ + if (conf->min_io_size / conf->logical_block_size > UINT16_MAX) { + error_setg(errp, "min_io_size must not exceed %u logical blocks", + UINT16_MAX); + return false; + } + + if (!QEMU_IS_ALIGNED(conf->opt_io_size, conf->logical_block_size)) { + error_setg(errp, + "opt_io_size must be a multiple of logical_block_size"); + return false; + } + + if (conf->discard_granularity != -1 && + !QEMU_IS_ALIGNED(conf->discard_granularity, + conf->logical_block_size)) { + error_setg(errp, "discard_granularity must be " + "a multiple of logical_block_size"); + return false; + } + + return true; +} + +bool blkconf_apply_backend_options(BlockConf *conf, bool readonly, + bool resizable, Error **errp) +{ + BlockBackend *blk = conf->blk; + BlockdevOnError rerror, werror; + uint64_t perm, shared_perm; + bool wce; + int ret; + + perm = BLK_PERM_CONSISTENT_READ; + if (!readonly) { + perm |= BLK_PERM_WRITE; + } + + shared_perm = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | + BLK_PERM_GRAPH_MOD; + if (resizable) { + shared_perm |= BLK_PERM_RESIZE; + } + if (conf->share_rw) { + shared_perm |= BLK_PERM_WRITE; + } + + ret = blk_set_perm(blk, perm, shared_perm, errp); + if (ret < 0) { + return false; + } + + switch (conf->wce) { + case ON_OFF_AUTO_ON: wce = true; break; + case ON_OFF_AUTO_OFF: wce = false; break; + case ON_OFF_AUTO_AUTO: wce = blk_enable_write_cache(blk); break; + default: + abort(); + } + + rerror = conf->rerror; + if (rerror == BLOCKDEV_ON_ERROR_AUTO) { + rerror = blk_get_on_error(blk, true); + } + + werror = conf->werror; + if (werror == BLOCKDEV_ON_ERROR_AUTO) { + werror = blk_get_on_error(blk, false); + } + + blk_set_enable_write_cache(blk, wce); + blk_set_on_error(blk, rerror, werror); + + return true; +} + +bool blkconf_geometry(BlockConf *conf, int *ptrans, + unsigned cyls_max, unsigned heads_max, unsigned secs_max, + Error **errp) +{ + if (!conf->cyls && !conf->heads && !conf->secs) { + hd_geometry_guess(conf->blk, + &conf->cyls, &conf->heads, &conf->secs, + ptrans); + } else if (ptrans && *ptrans == BIOS_ATA_TRANSLATION_AUTO) { + *ptrans = hd_bios_chs_auto_trans(conf->cyls, conf->heads, conf->secs); + } + if (conf->cyls || conf->heads || conf->secs) { + if (conf->cyls < 1 || conf->cyls > cyls_max) { + error_setg(errp, "cyls must be between 1 and %u", cyls_max); + return false; + } + if (conf->heads < 1 || conf->heads > heads_max) { + error_setg(errp, "heads must be between 1 and %u", heads_max); + return false; + } + if (conf->secs < 1 || conf->secs > secs_max) { + error_setg(errp, "secs must be between 1 and %u", secs_max); + return false; + } + } + return true; +} diff --git a/hw/block/cdrom.c b/hw/block/cdrom.c new file mode 100644 index 000000000..c6bfa50ad --- /dev/null +++ b/hw/block/cdrom.c @@ -0,0 +1,155 @@ +/* + * QEMU ATAPI CD-ROM Emulator + * + * Copyright (c) 2006 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* ??? Most of the ATAPI emulation is still in ide.c. It should be moved + here. */ + +#include "qemu/osdep.h" +#include "hw/scsi/scsi.h" + +static void lba_to_msf(uint8_t *buf, int lba) +{ + lba += 150; + buf[0] = (lba / 75) / 60; + buf[1] = (lba / 75) % 60; + buf[2] = lba % 75; +} + +/* same toc as bochs. Return -1 if error or the toc length */ +/* XXX: check this */ +int cdrom_read_toc(int nb_sectors, uint8_t *buf, int msf, int start_track) +{ + uint8_t *q; + int len; + + if (start_track > 1 && start_track != 0xaa) + return -1; + q = buf + 2; + *q++ = 1; /* first session */ + *q++ = 1; /* last session */ + if (start_track <= 1) { + *q++ = 0; /* reserved */ + *q++ = 0x14; /* ADR, control */ + *q++ = 1; /* track number */ + *q++ = 0; /* reserved */ + if (msf) { + *q++ = 0; /* reserved */ + lba_to_msf(q, 0); + q += 3; + } else { + /* sector 0 */ + stl_be_p(q, 0); + q += 4; + } + } + /* lead out track */ + *q++ = 0; /* reserved */ + *q++ = 0x16; /* ADR, control */ + *q++ = 0xaa; /* track number */ + *q++ = 0; /* reserved */ + if (msf) { + *q++ = 0; /* reserved */ + lba_to_msf(q, nb_sectors); + q += 3; + } else { + stl_be_p(q, nb_sectors); + q += 4; + } + len = q - buf; + stw_be_p(buf, len - 2); + return len; +} + +/* mostly same info as PearPc */ +int cdrom_read_toc_raw(int nb_sectors, uint8_t *buf, int msf, int session_num) +{ + uint8_t *q; + int len; + + q = buf + 2; + *q++ = 1; /* first session */ + *q++ = 1; /* last session */ + + *q++ = 1; /* session number */ + *q++ = 0x14; /* data track */ + *q++ = 0; /* track number */ + *q++ = 0xa0; /* lead-in */ + *q++ = 0; /* min */ + *q++ = 0; /* sec */ + *q++ = 0; /* frame */ + *q++ = 0; + *q++ = 1; /* first track */ + *q++ = 0x00; /* disk type */ + *q++ = 0x00; + + *q++ = 1; /* session number */ + *q++ = 0x14; /* data track */ + *q++ = 0; /* track number */ + *q++ = 0xa1; + *q++ = 0; /* min */ + *q++ = 0; /* sec */ + *q++ = 0; /* frame */ + *q++ = 0; + *q++ = 1; /* last track */ + *q++ = 0x00; + *q++ = 0x00; + + *q++ = 1; /* session number */ + *q++ = 0x14; /* data track */ + *q++ = 0; /* track number */ + *q++ = 0xa2; /* lead-out */ + *q++ = 0; /* min */ + *q++ = 0; /* sec */ + *q++ = 0; /* frame */ + if (msf) { + *q++ = 0; /* reserved */ + lba_to_msf(q, nb_sectors); + q += 3; + } else { + stl_be_p(q, nb_sectors); + q += 4; + } + + *q++ = 1; /* session number */ + *q++ = 0x14; /* ADR, control */ + *q++ = 0; /* track number */ + *q++ = 1; /* point */ + *q++ = 0; /* min */ + *q++ = 0; /* sec */ + *q++ = 0; /* frame */ + if (msf) { + *q++ = 0; + lba_to_msf(q, 0); + q += 3; + } else { + *q++ = 0; + *q++ = 0; + *q++ = 0; + *q++ = 0; + } + + len = q - buf; + stw_be_p(buf, len - 2); + return len; +} diff --git a/hw/block/dataplane/meson.build b/hw/block/dataplane/meson.build new file mode 100644 index 000000000..12c6a264f --- /dev/null +++ b/hw/block/dataplane/meson.build @@ -0,0 +1,2 @@ +specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c')) +specific_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c')) diff --git a/hw/block/dataplane/trace-events b/hw/block/dataplane/trace-events new file mode 100644 index 000000000..38fc3e750 --- /dev/null +++ b/hw/block/dataplane/trace-events @@ -0,0 +1,5 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# virtio-blk.c +virtio_blk_data_plane_start(void *s) "dataplane %p" +virtio_blk_data_plane_stop(void *s) "dataplane %p" diff --git a/hw/block/dataplane/trace.h b/hw/block/dataplane/trace.h new file mode 100644 index 000000000..240cc5983 --- /dev/null +++ b/hw/block/dataplane/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_block_dataplane.h" diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c new file mode 100644 index 000000000..ee5a5352d --- /dev/null +++ b/hw/block/dataplane/virtio-blk.c @@ -0,0 +1,369 @@ +/* + * Dedicated thread for virtio-blk I/O processing + * + * Copyright 2012 IBM, Corp. + * Copyright 2012 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Stefan Hajnoczi + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "trace.h" +#include "qemu/iov.h" +#include "qemu/main-loop.h" +#include "qemu/thread.h" +#include "qemu/error-report.h" +#include "hw/virtio/virtio-access.h" +#include "hw/virtio/virtio-blk.h" +#include "virtio-blk.h" +#include "block/aio.h" +#include "hw/virtio/virtio-bus.h" +#include "qom/object_interfaces.h" + +struct VirtIOBlockDataPlane { + bool starting; + bool stopping; + + VirtIOBlkConf *conf; + VirtIODevice *vdev; + QEMUBH *bh; /* bh for guest notification */ + unsigned long *batch_notify_vqs; + bool batch_notifications; + + /* Note that these EventNotifiers are assigned by value. This is + * fine as long as you do not call event_notifier_cleanup on them + * (because you don't own the file descriptor or handle; you just + * use it). + */ + IOThread *iothread; + AioContext *ctx; +}; + +/* Raise an interrupt to signal guest, if necessary */ +void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq) +{ + if (s->batch_notifications) { + set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs); + qemu_bh_schedule(s->bh); + } else { + virtio_notify_irqfd(s->vdev, vq); + } +} + +static void notify_guest_bh(void *opaque) +{ + VirtIOBlockDataPlane *s = opaque; + unsigned nvqs = s->conf->num_queues; + unsigned long bitmap[BITS_TO_LONGS(nvqs)]; + unsigned j; + + memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap)); + memset(s->batch_notify_vqs, 0, sizeof(bitmap)); + + for (j = 0; j < nvqs; j += BITS_PER_LONG) { + unsigned long bits = bitmap[j / BITS_PER_LONG]; + + while (bits != 0) { + unsigned i = j + ctzl(bits); + VirtQueue *vq = virtio_get_queue(s->vdev, i); + + virtio_notify_irqfd(s->vdev, vq); + + bits &= bits - 1; /* clear right-most bit */ + } + } +} + +/* Context: QEMU global mutex held */ +bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, + VirtIOBlockDataPlane **dataplane, + Error **errp) +{ + VirtIOBlockDataPlane *s; + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + *dataplane = NULL; + + if (conf->iothread) { + if (!k->set_guest_notifiers || !k->ioeventfd_assign) { + error_setg(errp, + "device is incompatible with iothread " + "(transport does not support notifiers)"); + return false; + } + if (!virtio_device_ioeventfd_enabled(vdev)) { + error_setg(errp, "ioeventfd is required for iothread"); + return false; + } + + /* If dataplane is (re-)enabled while the guest is running there could + * be block jobs that can conflict. + */ + if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { + error_prepend(errp, "cannot start virtio-blk dataplane: "); + return false; + } + } + /* Don't try if transport does not support notifiers. */ + if (!virtio_device_ioeventfd_enabled(vdev)) { + return false; + } + + s = g_new0(VirtIOBlockDataPlane, 1); + s->vdev = vdev; + s->conf = conf; + + if (conf->iothread) { + s->iothread = conf->iothread; + object_ref(OBJECT(s->iothread)); + s->ctx = iothread_get_aio_context(s->iothread); + } else { + s->ctx = qemu_get_aio_context(); + } + s->bh = aio_bh_new(s->ctx, notify_guest_bh, s); + s->batch_notify_vqs = bitmap_new(conf->num_queues); + + *dataplane = s; + + return true; +} + +/* Context: QEMU global mutex held */ +void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) +{ + VirtIOBlock *vblk; + + if (!s) { + return; + } + + vblk = VIRTIO_BLK(s->vdev); + assert(!vblk->dataplane_started); + g_free(s->batch_notify_vqs); + qemu_bh_delete(s->bh); + if (s->iothread) { + object_unref(OBJECT(s->iothread)); + } + g_free(s); +} + +static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev, + VirtQueue *vq) +{ + VirtIOBlock *s = (VirtIOBlock *)vdev; + + assert(s->dataplane); + assert(s->dataplane_started); + + return virtio_blk_handle_vq(s, vq); +} + +/* Context: QEMU global mutex held */ +int virtio_blk_data_plane_start(VirtIODevice *vdev) +{ + VirtIOBlock *vblk = VIRTIO_BLK(vdev); + VirtIOBlockDataPlane *s = vblk->dataplane; + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + AioContext *old_context; + unsigned i; + unsigned nvqs = s->conf->num_queues; + Error *local_err = NULL; + int r; + + if (vblk->dataplane_started || s->starting) { + return 0; + } + + s->starting = true; + + if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { + s->batch_notifications = true; + } else { + s->batch_notifications = false; + } + + /* Set up guest notifier (irq) */ + r = k->set_guest_notifiers(qbus->parent, nvqs, true); + if (r != 0) { + error_report("virtio-blk failed to set guest notifier (%d), " + "ensure -accel kvm is set.", r); + goto fail_guest_notifiers; + } + + /* + * Batch all the host notifiers in a single transaction to avoid + * quadratic time complexity in address_space_update_ioeventfds(). + */ + memory_region_transaction_begin(); + + /* Set up virtqueue notify */ + for (i = 0; i < nvqs; i++) { + r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true); + if (r != 0) { + int j = i; + + fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); + while (i--) { + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); + } + + /* + * The transaction expects the ioeventfds to be open when it + * commits. Do it now, before the cleanup loop. + */ + memory_region_transaction_commit(); + + while (j--) { + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), j); + } + goto fail_host_notifiers; + } + } + + memory_region_transaction_commit(); + + s->starting = false; + vblk->dataplane_started = true; + trace_virtio_blk_data_plane_start(s); + + old_context = blk_get_aio_context(s->conf->conf.blk); + aio_context_acquire(old_context); + r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err); + aio_context_release(old_context); + if (r < 0) { + error_report_err(local_err); + goto fail_aio_context; + } + + /* Process queued requests before the ones in vring */ + virtio_blk_process_queued_requests(vblk, false); + + /* Kick right away to begin processing requests already in vring */ + for (i = 0; i < nvqs; i++) { + VirtQueue *vq = virtio_get_queue(s->vdev, i); + + event_notifier_set(virtio_queue_get_host_notifier(vq)); + } + + /* Get this show started by hooking up our callbacks */ + aio_context_acquire(s->ctx); + for (i = 0; i < nvqs; i++) { + VirtQueue *vq = virtio_get_queue(s->vdev, i); + + virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, + virtio_blk_data_plane_handle_output); + } + aio_context_release(s->ctx); + return 0; + + fail_aio_context: + memory_region_transaction_begin(); + + for (i = 0; i < nvqs; i++) { + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); + } + + memory_region_transaction_commit(); + + for (i = 0; i < nvqs; i++) { + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); + } + fail_host_notifiers: + k->set_guest_notifiers(qbus->parent, nvqs, false); + fail_guest_notifiers: + /* + * If we failed to set up the guest notifiers queued requests will be + * processed on the main context. + */ + virtio_blk_process_queued_requests(vblk, false); + vblk->dataplane_disabled = true; + s->starting = false; + vblk->dataplane_started = true; + return -ENOSYS; +} + +/* Stop notifications for new requests from guest. + * + * Context: BH in IOThread + */ +static void virtio_blk_data_plane_stop_bh(void *opaque) +{ + VirtIOBlockDataPlane *s = opaque; + unsigned i; + + for (i = 0; i < s->conf->num_queues; i++) { + VirtQueue *vq = virtio_get_queue(s->vdev, i); + + virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL); + } +} + +/* Context: QEMU global mutex held */ +void virtio_blk_data_plane_stop(VirtIODevice *vdev) +{ + VirtIOBlock *vblk = VIRTIO_BLK(vdev); + VirtIOBlockDataPlane *s = vblk->dataplane; + BusState *qbus = qdev_get_parent_bus(DEVICE(vblk)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + unsigned i; + unsigned nvqs = s->conf->num_queues; + + if (!vblk->dataplane_started || s->stopping) { + return; + } + + /* Better luck next time. */ + if (vblk->dataplane_disabled) { + vblk->dataplane_disabled = false; + vblk->dataplane_started = false; + return; + } + s->stopping = true; + trace_virtio_blk_data_plane_stop(s); + + aio_context_acquire(s->ctx); + aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s); + + /* Drain and try to switch bs back to the QEMU main loop. If other users + * keep the BlockBackend in the iothread, that's ok */ + blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL); + + aio_context_release(s->ctx); + + /* + * Batch all the host notifiers in a single transaction to avoid + * quadratic time complexity in address_space_update_ioeventfds(). + */ + memory_region_transaction_begin(); + + for (i = 0; i < nvqs; i++) { + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); + } + + /* + * The transaction expects the ioeventfds to be open when it + * commits. Do it now, before the cleanup loop. + */ + memory_region_transaction_commit(); + + for (i = 0; i < nvqs; i++) { + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); + } + + qemu_bh_cancel(s->bh); + notify_guest_bh(s); /* final chance to notify guest */ + + /* Clean up guest notifier (irq) */ + k->set_guest_notifiers(qbus->parent, nvqs, false); + + vblk->dataplane_started = false; + s->stopping = false; +} diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h new file mode 100644 index 000000000..5e18bb99a --- /dev/null +++ b/hw/block/dataplane/virtio-blk.h @@ -0,0 +1,31 @@ +/* + * Dedicated thread for virtio-blk I/O processing + * + * Copyright 2012 IBM, Corp. + * Copyright 2012 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Stefan Hajnoczi + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef HW_DATAPLANE_VIRTIO_BLK_H +#define HW_DATAPLANE_VIRTIO_BLK_H + +#include "hw/virtio/virtio.h" + +typedef struct VirtIOBlockDataPlane VirtIOBlockDataPlane; + +bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, + VirtIOBlockDataPlane **dataplane, + Error **errp); +void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s); +void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq); + +int virtio_blk_data_plane_start(VirtIODevice *vdev); +void virtio_blk_data_plane_stop(VirtIODevice *vdev); + +#endif /* HW_DATAPLANE_VIRTIO_BLK_H */ diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c new file mode 100644 index 000000000..860787580 --- /dev/null +++ b/hw/block/dataplane/xen-block.c @@ -0,0 +1,828 @@ +/* + * Copyright (c) 2018 Citrix Systems Inc. + * (c) Gerd Hoffmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; under version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "hw/xen/xen_common.h" +#include "hw/block/xen_blkif.h" +#include "sysemu/block-backend.h" +#include "sysemu/iothread.h" +#include "xen-block.h" + +typedef struct XenBlockRequest { + blkif_request_t req; + int16_t status; + off_t start; + QEMUIOVector v; + void *buf; + size_t size; + int presync; + int aio_inflight; + int aio_errors; + XenBlockDataPlane *dataplane; + QLIST_ENTRY(XenBlockRequest) list; + BlockAcctCookie acct; +} XenBlockRequest; + +struct XenBlockDataPlane { + XenDevice *xendev; + XenEventChannel *event_channel; + unsigned int *ring_ref; + unsigned int nr_ring_ref; + void *sring; + int protocol; + blkif_back_rings_t rings; + int more_work; + QLIST_HEAD(inflight_head, XenBlockRequest) inflight; + QLIST_HEAD(freelist_head, XenBlockRequest) freelist; + int requests_total; + int requests_inflight; + unsigned int max_requests; + BlockBackend *blk; + unsigned int sector_size; + QEMUBH *bh; + IOThread *iothread; + AioContext *ctx; +}; + +static int xen_block_send_response(XenBlockRequest *request); + +static void reset_request(XenBlockRequest *request) +{ + memset(&request->req, 0, sizeof(request->req)); + request->status = 0; + request->start = 0; + request->size = 0; + request->presync = 0; + + request->aio_inflight = 0; + request->aio_errors = 0; + + request->dataplane = NULL; + memset(&request->list, 0, sizeof(request->list)); + memset(&request->acct, 0, sizeof(request->acct)); + + qemu_iovec_reset(&request->v); +} + +static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane) +{ + XenBlockRequest *request = NULL; + + if (QLIST_EMPTY(&dataplane->freelist)) { + if (dataplane->requests_total >= dataplane->max_requests) { + goto out; + } + /* allocate new struct */ + request = g_malloc0(sizeof(*request)); + request->dataplane = dataplane; + /* + * We cannot need more pages per requests than this, and since we + * re-use requests, allocate the memory once here. It will be freed + * xen_block_dataplane_destroy() when the request list is freed. + */ + request->buf = qemu_memalign(XC_PAGE_SIZE, + BLKIF_MAX_SEGMENTS_PER_REQUEST * + XC_PAGE_SIZE); + dataplane->requests_total++; + qemu_iovec_init(&request->v, 1); + } else { + /* get one from freelist */ + request = QLIST_FIRST(&dataplane->freelist); + QLIST_REMOVE(request, list); + } + QLIST_INSERT_HEAD(&dataplane->inflight, request, list); + dataplane->requests_inflight++; + +out: + return request; +} + +static void xen_block_complete_request(XenBlockRequest *request) +{ + XenBlockDataPlane *dataplane = request->dataplane; + + if (xen_block_send_response(request)) { + Error *local_err = NULL; + + xen_device_notify_event_channel(dataplane->xendev, + dataplane->event_channel, + &local_err); + if (local_err) { + error_report_err(local_err); + } + } + + QLIST_REMOVE(request, list); + dataplane->requests_inflight--; + reset_request(request); + request->dataplane = dataplane; + QLIST_INSERT_HEAD(&dataplane->freelist, request, list); +} + +/* + * translate request into iovec + start offset + * do sanity checks along the way + */ +static int xen_block_parse_request(XenBlockRequest *request) +{ + XenBlockDataPlane *dataplane = request->dataplane; + size_t len; + int i; + + switch (request->req.operation) { + case BLKIF_OP_READ: + break; + case BLKIF_OP_FLUSH_DISKCACHE: + request->presync = 1; + if (!request->req.nr_segments) { + return 0; + } + /* fall through */ + case BLKIF_OP_WRITE: + break; + case BLKIF_OP_DISCARD: + return 0; + default: + error_report("error: unknown operation (%d)", request->req.operation); + goto err; + }; + + if (request->req.operation != BLKIF_OP_READ && + !blk_is_writable(dataplane->blk)) { + error_report("error: write req for ro device"); + goto err; + } + + request->start = request->req.sector_number * dataplane->sector_size; + for (i = 0; i < request->req.nr_segments; i++) { + if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) { + error_report("error: nr_segments too big"); + goto err; + } + if (request->req.seg[i].first_sect > request->req.seg[i].last_sect) { + error_report("error: first > last sector"); + goto err; + } + if (request->req.seg[i].last_sect * dataplane->sector_size >= + XC_PAGE_SIZE) { + error_report("error: page crossing"); + goto err; + } + + len = (request->req.seg[i].last_sect - + request->req.seg[i].first_sect + 1) * dataplane->sector_size; + request->size += len; + } + if (request->start + request->size > blk_getlength(dataplane->blk)) { + error_report("error: access beyond end of file"); + goto err; + } + return 0; + +err: + request->status = BLKIF_RSP_ERROR; + return -1; +} + +static int xen_block_copy_request(XenBlockRequest *request) +{ + XenBlockDataPlane *dataplane = request->dataplane; + XenDevice *xendev = dataplane->xendev; + XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + int i, count; + bool to_domain = (request->req.operation == BLKIF_OP_READ); + void *virt = request->buf; + Error *local_err = NULL; + + if (request->req.nr_segments == 0) { + return 0; + } + + count = request->req.nr_segments; + + for (i = 0; i < count; i++) { + if (to_domain) { + segs[i].dest.foreign.ref = request->req.seg[i].gref; + segs[i].dest.foreign.offset = request->req.seg[i].first_sect * + dataplane->sector_size; + segs[i].source.virt = virt; + } else { + segs[i].source.foreign.ref = request->req.seg[i].gref; + segs[i].source.foreign.offset = request->req.seg[i].first_sect * + dataplane->sector_size; + segs[i].dest.virt = virt; + } + segs[i].len = (request->req.seg[i].last_sect - + request->req.seg[i].first_sect + 1) * + dataplane->sector_size; + virt += segs[i].len; + } + + xen_device_copy_grant_refs(xendev, to_domain, segs, count, &local_err); + + if (local_err) { + error_reportf_err(local_err, "failed to copy data: "); + + request->aio_errors++; + return -1; + } + + return 0; +} + +static int xen_block_do_aio(XenBlockRequest *request); + +static void xen_block_complete_aio(void *opaque, int ret) +{ + XenBlockRequest *request = opaque; + XenBlockDataPlane *dataplane = request->dataplane; + + aio_context_acquire(dataplane->ctx); + + if (ret != 0) { + error_report("%s I/O error", + request->req.operation == BLKIF_OP_READ ? + "read" : "write"); + request->aio_errors++; + } + + request->aio_inflight--; + if (request->presync) { + request->presync = 0; + xen_block_do_aio(request); + goto done; + } + if (request->aio_inflight > 0) { + goto done; + } + + switch (request->req.operation) { + case BLKIF_OP_READ: + /* in case of failure request->aio_errors is increased */ + if (ret == 0) { + xen_block_copy_request(request); + } + break; + case BLKIF_OP_WRITE: + case BLKIF_OP_FLUSH_DISKCACHE: + default: + break; + } + + request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; + + switch (request->req.operation) { + case BLKIF_OP_WRITE: + case BLKIF_OP_FLUSH_DISKCACHE: + if (!request->req.nr_segments) { + break; + } + /* fall through */ + case BLKIF_OP_READ: + if (request->status == BLKIF_RSP_OKAY) { + block_acct_done(blk_get_stats(dataplane->blk), &request->acct); + } else { + block_acct_failed(blk_get_stats(dataplane->blk), &request->acct); + } + break; + case BLKIF_OP_DISCARD: + default: + break; + } + + xen_block_complete_request(request); + + if (dataplane->more_work) { + qemu_bh_schedule(dataplane->bh); + } + +done: + aio_context_release(dataplane->ctx); +} + +static bool xen_block_split_discard(XenBlockRequest *request, + blkif_sector_t sector_number, + uint64_t nr_sectors) +{ + XenBlockDataPlane *dataplane = request->dataplane; + int64_t byte_offset; + int byte_chunk; + uint64_t byte_remaining; + uint64_t sec_start = sector_number; + uint64_t sec_count = nr_sectors; + + /* Wrap around, or overflowing byte limit? */ + if (sec_start + sec_count < sec_count || + sec_start + sec_count > INT64_MAX / dataplane->sector_size) { + return false; + } + + byte_offset = sec_start * dataplane->sector_size; + byte_remaining = sec_count * dataplane->sector_size; + + do { + byte_chunk = byte_remaining > BDRV_REQUEST_MAX_BYTES ? + BDRV_REQUEST_MAX_BYTES : byte_remaining; + request->aio_inflight++; + blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk, + xen_block_complete_aio, request); + byte_remaining -= byte_chunk; + byte_offset += byte_chunk; + } while (byte_remaining > 0); + + return true; +} + +static int xen_block_do_aio(XenBlockRequest *request) +{ + XenBlockDataPlane *dataplane = request->dataplane; + + if (request->req.nr_segments && + (request->req.operation == BLKIF_OP_WRITE || + request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) && + xen_block_copy_request(request)) { + goto err; + } + + request->aio_inflight++; + if (request->presync) { + blk_aio_flush(request->dataplane->blk, xen_block_complete_aio, + request); + return 0; + } + + switch (request->req.operation) { + case BLKIF_OP_READ: + qemu_iovec_add(&request->v, request->buf, request->size); + block_acct_start(blk_get_stats(dataplane->blk), &request->acct, + request->v.size, BLOCK_ACCT_READ); + request->aio_inflight++; + blk_aio_preadv(dataplane->blk, request->start, &request->v, 0, + xen_block_complete_aio, request); + break; + case BLKIF_OP_WRITE: + case BLKIF_OP_FLUSH_DISKCACHE: + if (!request->req.nr_segments) { + break; + } + + qemu_iovec_add(&request->v, request->buf, request->size); + block_acct_start(blk_get_stats(dataplane->blk), &request->acct, + request->v.size, + request->req.operation == BLKIF_OP_WRITE ? + BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH); + request->aio_inflight++; + blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0, + xen_block_complete_aio, request); + break; + case BLKIF_OP_DISCARD: + { + struct blkif_request_discard *req = (void *)&request->req; + if (!xen_block_split_discard(request, req->sector_number, + req->nr_sectors)) { + goto err; + } + break; + } + default: + /* unknown operation (shouldn't happen -- parse catches this) */ + goto err; + } + + xen_block_complete_aio(request, 0); + + return 0; + +err: + request->status = BLKIF_RSP_ERROR; + xen_block_complete_request(request); + return -1; +} + +static int xen_block_send_response(XenBlockRequest *request) +{ + XenBlockDataPlane *dataplane = request->dataplane; + int send_notify = 0; + int have_requests = 0; + blkif_response_t *resp; + + /* Place on the response ring for the relevant domain. */ + switch (dataplane->protocol) { + case BLKIF_PROTOCOL_NATIVE: + resp = (blkif_response_t *)RING_GET_RESPONSE( + &dataplane->rings.native, + dataplane->rings.native.rsp_prod_pvt); + break; + case BLKIF_PROTOCOL_X86_32: + resp = (blkif_response_t *)RING_GET_RESPONSE( + &dataplane->rings.x86_32_part, + dataplane->rings.x86_32_part.rsp_prod_pvt); + break; + case BLKIF_PROTOCOL_X86_64: + resp = (blkif_response_t *)RING_GET_RESPONSE( + &dataplane->rings.x86_64_part, + dataplane->rings.x86_64_part.rsp_prod_pvt); + break; + default: + return 0; + } + + resp->id = request->req.id; + resp->operation = request->req.operation; + resp->status = request->status; + + dataplane->rings.common.rsp_prod_pvt++; + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common, + send_notify); + if (dataplane->rings.common.rsp_prod_pvt == + dataplane->rings.common.req_cons) { + /* + * Tail check for pending requests. Allows frontend to avoid + * notifications if requests are already in flight (lower + * overheads and promotes batching). + */ + RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common, + have_requests); + } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) { + have_requests = 1; + } + + if (have_requests) { + dataplane->more_work++; + } + return send_notify; +} + +static int xen_block_get_request(XenBlockDataPlane *dataplane, + XenBlockRequest *request, RING_IDX rc) +{ + switch (dataplane->protocol) { + case BLKIF_PROTOCOL_NATIVE: { + blkif_request_t *req = + RING_GET_REQUEST(&dataplane->rings.native, rc); + + memcpy(&request->req, req, sizeof(request->req)); + break; + } + case BLKIF_PROTOCOL_X86_32: { + blkif_x86_32_request_t *req = + RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc); + + blkif_get_x86_32_req(&request->req, req); + break; + } + case BLKIF_PROTOCOL_X86_64: { + blkif_x86_64_request_t *req = + RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc); + + blkif_get_x86_64_req(&request->req, req); + break; + } + } + /* Prevent the compiler from accessing the on-ring fields instead. */ + barrier(); + return 0; +} + +/* + * Threshold of in-flight requests above which we will start using + * blk_io_plug()/blk_io_unplug() to batch requests. + */ +#define IO_PLUG_THRESHOLD 1 + +static bool xen_block_handle_requests(XenBlockDataPlane *dataplane) +{ + RING_IDX rc, rp; + XenBlockRequest *request; + int inflight_atstart = dataplane->requests_inflight; + int batched = 0; + bool done_something = false; + + dataplane->more_work = 0; + + rc = dataplane->rings.common.req_cons; + rp = dataplane->rings.common.sring->req_prod; + xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ + + /* + * If there was more than IO_PLUG_THRESHOLD requests in flight + * when we got here, this is an indication that there the bottleneck + * is below us, so it's worth beginning to batch up I/O requests + * rather than submitting them immediately. The maximum number + * of requests we're willing to batch is the number already in + * flight, so it can grow up to max_requests when the bottleneck + * is below us. + */ + if (inflight_atstart > IO_PLUG_THRESHOLD) { + blk_io_plug(dataplane->blk); + } + while (rc != rp) { + /* pull request from ring */ + if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) { + break; + } + request = xen_block_start_request(dataplane); + if (request == NULL) { + dataplane->more_work++; + break; + } + xen_block_get_request(dataplane, request, rc); + dataplane->rings.common.req_cons = ++rc; + done_something = true; + + /* parse them */ + if (xen_block_parse_request(request) != 0) { + switch (request->req.operation) { + case BLKIF_OP_READ: + block_acct_invalid(blk_get_stats(dataplane->blk), + BLOCK_ACCT_READ); + break; + case BLKIF_OP_WRITE: + block_acct_invalid(blk_get_stats(dataplane->blk), + BLOCK_ACCT_WRITE); + break; + case BLKIF_OP_FLUSH_DISKCACHE: + block_acct_invalid(blk_get_stats(dataplane->blk), + BLOCK_ACCT_FLUSH); + default: + break; + }; + + xen_block_complete_request(request); + continue; + } + + if (inflight_atstart > IO_PLUG_THRESHOLD && + batched >= inflight_atstart) { + blk_io_unplug(dataplane->blk); + } + xen_block_do_aio(request); + if (inflight_atstart > IO_PLUG_THRESHOLD) { + if (batched >= inflight_atstart) { + blk_io_plug(dataplane->blk); + batched = 0; + } else { + batched++; + } + } + } + if (inflight_atstart > IO_PLUG_THRESHOLD) { + blk_io_unplug(dataplane->blk); + } + + return done_something; +} + +static void xen_block_dataplane_bh(void *opaque) +{ + XenBlockDataPlane *dataplane = opaque; + + aio_context_acquire(dataplane->ctx); + xen_block_handle_requests(dataplane); + aio_context_release(dataplane->ctx); +} + +static bool xen_block_dataplane_event(void *opaque) +{ + XenBlockDataPlane *dataplane = opaque; + + return xen_block_handle_requests(dataplane); +} + +XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev, + BlockBackend *blk, + unsigned int sector_size, + IOThread *iothread) +{ + XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1); + + dataplane->xendev = xendev; + dataplane->blk = blk; + dataplane->sector_size = sector_size; + + QLIST_INIT(&dataplane->inflight); + QLIST_INIT(&dataplane->freelist); + + if (iothread) { + dataplane->iothread = iothread; + object_ref(OBJECT(dataplane->iothread)); + dataplane->ctx = iothread_get_aio_context(dataplane->iothread); + } else { + dataplane->ctx = qemu_get_aio_context(); + } + dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh, + dataplane); + + return dataplane; +} + +void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane) +{ + XenBlockRequest *request; + + if (!dataplane) { + return; + } + + while (!QLIST_EMPTY(&dataplane->freelist)) { + request = QLIST_FIRST(&dataplane->freelist); + QLIST_REMOVE(request, list); + qemu_iovec_destroy(&request->v); + qemu_vfree(request->buf); + g_free(request); + } + + qemu_bh_delete(dataplane->bh); + if (dataplane->iothread) { + object_unref(OBJECT(dataplane->iothread)); + } + + g_free(dataplane); +} + +void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) +{ + XenDevice *xendev; + + if (!dataplane) { + return; + } + + xendev = dataplane->xendev; + + aio_context_acquire(dataplane->ctx); + if (dataplane->event_channel) { + /* Only reason for failure is a NULL channel */ + xen_device_set_event_channel_context(xendev, dataplane->event_channel, + qemu_get_aio_context(), + &error_abort); + } + /* Xen doesn't have multiple users for nodes, so this can't fail */ + blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort); + aio_context_release(dataplane->ctx); + + /* + * Now that the context has been moved onto the main thread, cancel + * further processing. + */ + qemu_bh_cancel(dataplane->bh); + + if (dataplane->event_channel) { + Error *local_err = NULL; + + xen_device_unbind_event_channel(xendev, dataplane->event_channel, + &local_err); + dataplane->event_channel = NULL; + + if (local_err) { + error_report_err(local_err); + } + } + + if (dataplane->sring) { + Error *local_err = NULL; + + xen_device_unmap_grant_refs(xendev, dataplane->sring, + dataplane->nr_ring_ref, &local_err); + dataplane->sring = NULL; + + if (local_err) { + error_report_err(local_err); + } + } + + g_free(dataplane->ring_ref); + dataplane->ring_ref = NULL; +} + +void xen_block_dataplane_start(XenBlockDataPlane *dataplane, + const unsigned int ring_ref[], + unsigned int nr_ring_ref, + unsigned int event_channel, + unsigned int protocol, + Error **errp) +{ + ERRP_GUARD(); + XenDevice *xendev = dataplane->xendev; + AioContext *old_context; + unsigned int ring_size; + unsigned int i; + + dataplane->nr_ring_ref = nr_ring_ref; + dataplane->ring_ref = g_new(unsigned int, nr_ring_ref); + + for (i = 0; i < nr_ring_ref; i++) { + dataplane->ring_ref[i] = ring_ref[i]; + } + + dataplane->protocol = protocol; + + ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref; + switch (dataplane->protocol) { + case BLKIF_PROTOCOL_NATIVE: + { + dataplane->max_requests = __CONST_RING_SIZE(blkif, ring_size); + break; + } + case BLKIF_PROTOCOL_X86_32: + { + dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size); + break; + } + case BLKIF_PROTOCOL_X86_64: + { + dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size); + break; + } + default: + error_setg(errp, "unknown protocol %u", dataplane->protocol); + return; + } + + xen_device_set_max_grant_refs(xendev, dataplane->nr_ring_ref, + errp); + if (*errp) { + goto stop; + } + + dataplane->sring = xen_device_map_grant_refs(xendev, + dataplane->ring_ref, + dataplane->nr_ring_ref, + PROT_READ | PROT_WRITE, + errp); + if (*errp) { + goto stop; + } + + switch (dataplane->protocol) { + case BLKIF_PROTOCOL_NATIVE: + { + blkif_sring_t *sring_native = dataplane->sring; + + BACK_RING_INIT(&dataplane->rings.native, sring_native, ring_size); + break; + } + case BLKIF_PROTOCOL_X86_32: + { + blkif_x86_32_sring_t *sring_x86_32 = dataplane->sring; + + BACK_RING_INIT(&dataplane->rings.x86_32_part, sring_x86_32, + ring_size); + break; + } + case BLKIF_PROTOCOL_X86_64: + { + blkif_x86_64_sring_t *sring_x86_64 = dataplane->sring; + + BACK_RING_INIT(&dataplane->rings.x86_64_part, sring_x86_64, + ring_size); + break; + } + } + + dataplane->event_channel = + xen_device_bind_event_channel(xendev, event_channel, + xen_block_dataplane_event, dataplane, + errp); + if (*errp) { + goto stop; + } + + old_context = blk_get_aio_context(dataplane->blk); + aio_context_acquire(old_context); + /* If other users keep the BlockBackend in the iothread, that's ok */ + blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL); + aio_context_release(old_context); + + /* Only reason for failure is a NULL channel */ + aio_context_acquire(dataplane->ctx); + xen_device_set_event_channel_context(xendev, dataplane->event_channel, + dataplane->ctx, &error_abort); + aio_context_release(dataplane->ctx); + + return; + +stop: + xen_block_dataplane_stop(dataplane); +} diff --git a/hw/block/dataplane/xen-block.h b/hw/block/dataplane/xen-block.h new file mode 100644 index 000000000..76dcd51c3 --- /dev/null +++ b/hw/block/dataplane/xen-block.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018 Citrix Systems Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef HW_BLOCK_DATAPLANE_XEN_BLOCK_H +#define HW_BLOCK_DATAPLANE_XEN_BLOCK_H + +#include "hw/block/block.h" +#include "hw/xen/xen-bus.h" +#include "sysemu/iothread.h" + +typedef struct XenBlockDataPlane XenBlockDataPlane; + +XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev, + BlockBackend *blk, + unsigned int sector_size, + IOThread *iothread); +void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane); +void xen_block_dataplane_start(XenBlockDataPlane *dataplane, + const unsigned int ring_ref[], + unsigned int nr_ring_ref, + unsigned int event_channel, + unsigned int protocol, + Error **errp); +void xen_block_dataplane_stop(XenBlockDataPlane *dataplane); + +#endif /* HW_BLOCK_DATAPLANE_XEN_BLOCK_H */ diff --git a/hw/block/ecc.c b/hw/block/ecc.c new file mode 100644 index 000000000..6e0d63842 --- /dev/null +++ b/hw/block/ecc.c @@ -0,0 +1,91 @@ +/* + * Calculate Error-correcting Codes. Used by NAND Flash controllers + * (not by NAND chips). + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "migration/vmstate.h" +#include "hw/block/flash.h" + +/* + * Pre-calculated 256-way 1 byte column parity. Table borrowed from Linux. + */ +static const uint8_t nand_ecc_precalc_table[] = { + 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, + 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00, + 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, + 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65, + 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, + 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66, + 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, + 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03, + 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, + 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69, + 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, + 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c, + 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, + 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f, + 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, + 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a, + 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, + 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a, + 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, + 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f, + 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, + 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c, + 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, + 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69, + 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, + 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03, + 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, + 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66, + 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, + 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65, + 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, + 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00, +}; + +/* Update ECC parity count. */ +uint8_t ecc_digest(ECCState *s, uint8_t sample) +{ + uint8_t idx = nand_ecc_precalc_table[sample]; + + s->cp ^= idx & 0x3f; + if (idx & 0x40) { + s->lp[0] ^= ~s->count; + s->lp[1] ^= s->count; + } + s->count ++; + + return sample; +} + +/* Reinitialise the counters. */ +void ecc_reset(ECCState *s) +{ + s->lp[0] = 0x0000; + s->lp[1] = 0x0000; + s->cp = 0x00; + s->count = 0; +} + +/* Save/restore */ +const VMStateDescription vmstate_ecc_state = { + .name = "ecc-state", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(cp, ECCState), + VMSTATE_UINT16_ARRAY(lp, ECCState, 2), + VMSTATE_UINT16(count, ECCState), + VMSTATE_END_OF_LIST(), + }, +}; diff --git a/hw/block/fdc-internal.h b/hw/block/fdc-internal.h new file mode 100644 index 000000000..036392e9f --- /dev/null +++ b/hw/block/fdc-internal.h @@ -0,0 +1,158 @@ +/* + * QEMU Floppy disk emulator (Intel 82078) + * + * Copyright (c) 2003, 2007 Jocelyn Mayer + * Copyright (c) 2008 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef HW_BLOCK_FDC_INTERNAL_H +#define HW_BLOCK_FDC_INTERNAL_H + +#include "exec/memory.h" +#include "exec/ioport.h" +#include "hw/block/block.h" +#include "hw/block/fdc.h" +#include "qapi/qapi-types-block.h" + +typedef struct FDCtrl FDCtrl; + +/* Floppy bus emulation */ + +typedef struct FloppyBus { + BusState bus; + FDCtrl *fdc; +} FloppyBus; + +/* Floppy disk drive emulation */ + +typedef enum FDriveRate { + FDRIVE_RATE_500K = 0x00, /* 500 Kbps */ + FDRIVE_RATE_300K = 0x01, /* 300 Kbps */ + FDRIVE_RATE_250K = 0x02, /* 250 Kbps */ + FDRIVE_RATE_1M = 0x03, /* 1 Mbps */ +} FDriveRate; + +typedef enum FDriveSize { + FDRIVE_SIZE_UNKNOWN, + FDRIVE_SIZE_350, + FDRIVE_SIZE_525, +} FDriveSize; + +typedef struct FDFormat { + FloppyDriveType drive; + uint8_t last_sect; + uint8_t max_track; + uint8_t max_head; + FDriveRate rate; +} FDFormat; + +typedef enum FDiskFlags { + FDISK_DBL_SIDES = 0x01, +} FDiskFlags; + +typedef struct FDrive { + FDCtrl *fdctrl; + BlockBackend *blk; + BlockConf *conf; + /* Drive status */ + FloppyDriveType drive; /* CMOS drive type */ + uint8_t perpendicular; /* 2.88 MB access mode */ + /* Position */ + uint8_t head; + uint8_t track; + uint8_t sect; + /* Media */ + FloppyDriveType disk; /* Current disk type */ + FDiskFlags flags; + uint8_t last_sect; /* Nb sector per track */ + uint8_t max_track; /* Nb of tracks */ + uint16_t bps; /* Bytes per sector */ + uint8_t ro; /* Is read-only */ + uint8_t media_changed; /* Is media changed */ + uint8_t media_rate; /* Data rate of medium */ + + bool media_validated; /* Have we validated the media? */ +} FDrive; + +struct FDCtrl { + MemoryRegion iomem; + qemu_irq irq; + /* Controller state */ + QEMUTimer *result_timer; + int dma_chann; + uint8_t phase; + IsaDma *dma; + /* Controller's identification */ + uint8_t version; + /* HW */ + uint8_t sra; + uint8_t srb; + uint8_t dor; + uint8_t dor_vmstate; /* only used as temp during vmstate */ + uint8_t tdr; + uint8_t dsr; + uint8_t msr; + uint8_t cur_drv; + uint8_t status0; + uint8_t status1; + uint8_t status2; + /* Command FIFO */ + uint8_t *fifo; + int32_t fifo_size; + uint32_t data_pos; + uint32_t data_len; + uint8_t data_state; + uint8_t data_dir; + uint8_t eot; /* last wanted sector */ + /* States kept only to be returned back */ + /* precompensation */ + uint8_t precomp_trk; + uint8_t config; + uint8_t lock; + /* Power down config (also with status regB access mode */ + uint8_t pwrd; + /* Floppy drives */ + FloppyBus bus; + uint8_t num_floppies; + FDrive drives[MAX_FD]; + struct { + FloppyDriveType type; + } qdev_for_drives[MAX_FD]; + int reset_sensei; + FloppyDriveType fallback; /* type=auto failure fallback */ + /* Timers state */ + uint8_t timer0; + uint8_t timer1; + PortioList portio_list; +}; + +extern const FDFormat fd_formats[]; +extern const VMStateDescription vmstate_fdc; + +uint32_t fdctrl_read(void *opaque, uint32_t reg); +void fdctrl_write(void *opaque, uint32_t reg, uint32_t value); +void fdctrl_reset(FDCtrl *fdctrl, int do_irq); +void fdctrl_realize_common(DeviceState *dev, FDCtrl *fdctrl, Error **errp); + +int fdctrl_transfer_handler(void *opaque, int nchan, int dma_pos, int dma_len); + +void fdctrl_init_drives(FloppyBus *bus, DriveInfo **fds); + +#endif diff --git a/hw/block/fdc-isa.c b/hw/block/fdc-isa.c new file mode 100644 index 000000000..3bf64e066 --- /dev/null +++ b/hw/block/fdc-isa.c @@ -0,0 +1,320 @@ +/* + * QEMU Floppy disk emulator (Intel 82078) + * + * Copyright (c) 2003, 2007 Jocelyn Mayer + * Copyright (c) 2008 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* + * The controller is used in Sun4m systems in a slightly different + * way. There are changes in DOR register and DMA is not available. + */ + +#include "qemu/osdep.h" +#include "hw/block/fdc.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "hw/acpi/aml-build.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "hw/block/block.h" +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" +#include "sysemu/sysemu.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" +#include "fdc-internal.h" + +OBJECT_DECLARE_SIMPLE_TYPE(FDCtrlISABus, ISA_FDC) + +struct FDCtrlISABus { + /*< private >*/ + ISADevice parent_obj; + /*< public >*/ + + uint32_t iobase; + uint32_t irq; + uint32_t dma; + struct FDCtrl state; + int32_t bootindexA; + int32_t bootindexB; +}; + +static void fdctrl_external_reset_isa(DeviceState *d) +{ + FDCtrlISABus *isa = ISA_FDC(d); + FDCtrl *s = &isa->state; + + fdctrl_reset(s, 0); +} + +void isa_fdc_init_drives(ISADevice *fdc, DriveInfo **fds) +{ + fdctrl_init_drives(&ISA_FDC(fdc)->state.bus, fds); +} + +static const MemoryRegionPortio fdc_portio_list[] = { + { 1, 5, 1, .read = fdctrl_read, .write = fdctrl_write }, + { 7, 1, 1, .read = fdctrl_read, .write = fdctrl_write }, + PORTIO_END_OF_LIST(), +}; + +static void isabus_fdc_realize(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + FDCtrlISABus *isa = ISA_FDC(dev); + FDCtrl *fdctrl = &isa->state; + Error *err = NULL; + + isa_register_portio_list(isadev, &fdctrl->portio_list, + isa->iobase, fdc_portio_list, fdctrl, + "fdc"); + + isa_init_irq(isadev, &fdctrl->irq, isa->irq); + fdctrl->dma_chann = isa->dma; + if (fdctrl->dma_chann != -1) { + IsaDmaClass *k; + fdctrl->dma = isa_get_dma(isa_bus_from_device(isadev), isa->dma); + if (!fdctrl->dma) { + error_setg(errp, "ISA controller does not support DMA"); + return; + } + k = ISADMA_GET_CLASS(fdctrl->dma); + k->register_channel(fdctrl->dma, fdctrl->dma_chann, + &fdctrl_transfer_handler, fdctrl); + } + + qdev_set_legacy_instance_id(dev, isa->iobase, 2); + + fdctrl_realize_common(dev, fdctrl, &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } +} + +FloppyDriveType isa_fdc_get_drive_type(ISADevice *fdc, int i) +{ + FDCtrlISABus *isa = ISA_FDC(fdc); + + return isa->state.drives[i].drive; +} + +static void isa_fdc_get_drive_max_chs(FloppyDriveType type, uint8_t *maxc, + uint8_t *maxh, uint8_t *maxs) +{ + const FDFormat *fdf; + + *maxc = *maxh = *maxs = 0; + for (fdf = fd_formats; fdf->drive != FLOPPY_DRIVE_TYPE_NONE; fdf++) { + if (fdf->drive != type) { + continue; + } + if (*maxc < fdf->max_track) { + *maxc = fdf->max_track; + } + if (*maxh < fdf->max_head) { + *maxh = fdf->max_head; + } + if (*maxs < fdf->last_sect) { + *maxs = fdf->last_sect; + } + } + (*maxc)--; +} + +static Aml *build_fdinfo_aml(int idx, FloppyDriveType type) +{ + Aml *dev, *fdi; + uint8_t maxc, maxh, maxs; + + isa_fdc_get_drive_max_chs(type, &maxc, &maxh, &maxs); + + dev = aml_device("FLP%c", 'A' + idx); + + aml_append(dev, aml_name_decl("_ADR", aml_int(idx))); + + fdi = aml_package(16); + aml_append(fdi, aml_int(idx)); /* Drive Number */ + aml_append(fdi, + aml_int(cmos_get_fd_drive_type(type))); /* Device Type */ + /* + * the values below are the limits of the drive, and are thus independent + * of the inserted media + */ + aml_append(fdi, aml_int(maxc)); /* Maximum Cylinder Number */ + aml_append(fdi, aml_int(maxs)); /* Maximum Sector Number */ + aml_append(fdi, aml_int(maxh)); /* Maximum Head Number */ + /* + * SeaBIOS returns the below values for int 0x13 func 0x08 regardless of + * the drive type, so shall we + */ + aml_append(fdi, aml_int(0xAF)); /* disk_specify_1 */ + aml_append(fdi, aml_int(0x02)); /* disk_specify_2 */ + aml_append(fdi, aml_int(0x25)); /* disk_motor_wait */ + aml_append(fdi, aml_int(0x02)); /* disk_sector_siz */ + aml_append(fdi, aml_int(0x12)); /* disk_eot */ + aml_append(fdi, aml_int(0x1B)); /* disk_rw_gap */ + aml_append(fdi, aml_int(0xFF)); /* disk_dtl */ + aml_append(fdi, aml_int(0x6C)); /* disk_formt_gap */ + aml_append(fdi, aml_int(0xF6)); /* disk_fill */ + aml_append(fdi, aml_int(0x0F)); /* disk_head_sttl */ + aml_append(fdi, aml_int(0x08)); /* disk_motor_strt */ + + aml_append(dev, aml_name_decl("_FDI", fdi)); + return dev; +} + +int cmos_get_fd_drive_type(FloppyDriveType fd0) +{ + int val; + + switch (fd0) { + case FLOPPY_DRIVE_TYPE_144: + /* 1.44 Mb 3"5 drive */ + val = 4; + break; + case FLOPPY_DRIVE_TYPE_288: + /* 2.88 Mb 3"5 drive */ + val = 5; + break; + case FLOPPY_DRIVE_TYPE_120: + /* 1.2 Mb 5"5 drive */ + val = 2; + break; + case FLOPPY_DRIVE_TYPE_NONE: + default: + val = 0; + break; + } + return val; +} + +static void fdc_isa_build_aml(ISADevice *isadev, Aml *scope) +{ + Aml *dev; + Aml *crs; + int i; + +#define ACPI_FDE_MAX_FD 4 + uint32_t fde_buf[5] = { + 0, 0, 0, 0, /* presence of floppy drives #0 - #3 */ + cpu_to_le32(2) /* tape presence (2 == never present) */ + }; + + crs = aml_resource_template(); + aml_append(crs, aml_io(AML_DECODE16, 0x03F2, 0x03F2, 0x00, 0x04)); + aml_append(crs, aml_io(AML_DECODE16, 0x03F7, 0x03F7, 0x00, 0x01)); + aml_append(crs, aml_irq_no_flags(6)); + aml_append(crs, + aml_dma(AML_COMPATIBILITY, AML_NOTBUSMASTER, AML_TRANSFER8, 2)); + + dev = aml_device("FDC0"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0700"))); + aml_append(dev, aml_name_decl("_CRS", crs)); + + for (i = 0; i < MIN(MAX_FD, ACPI_FDE_MAX_FD); i++) { + FloppyDriveType type = isa_fdc_get_drive_type(isadev, i); + + if (type < FLOPPY_DRIVE_TYPE_NONE) { + fde_buf[i] = cpu_to_le32(1); /* drive present */ + aml_append(dev, build_fdinfo_aml(i, type)); + } + } + aml_append(dev, aml_name_decl("_FDE", + aml_buffer(sizeof(fde_buf), (uint8_t *)fde_buf))); + + aml_append(scope, dev); +} + +static const VMStateDescription vmstate_isa_fdc = { + .name = "fdc", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(state, FDCtrlISABus, 0, vmstate_fdc, FDCtrl), + VMSTATE_END_OF_LIST() + } +}; + +static Property isa_fdc_properties[] = { + DEFINE_PROP_UINT32("iobase", FDCtrlISABus, iobase, 0x3f0), + DEFINE_PROP_UINT32("irq", FDCtrlISABus, irq, 6), + DEFINE_PROP_UINT32("dma", FDCtrlISABus, dma, 2), + DEFINE_PROP_SIGNED("fdtypeA", FDCtrlISABus, state.qdev_for_drives[0].type, + FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type, + FloppyDriveType), + DEFINE_PROP_SIGNED("fdtypeB", FDCtrlISABus, state.qdev_for_drives[1].type, + FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type, + FloppyDriveType), + DEFINE_PROP_SIGNED("fallback", FDCtrlISABus, state.fallback, + FLOPPY_DRIVE_TYPE_288, qdev_prop_fdc_drive_type, + FloppyDriveType), + DEFINE_PROP_END_OF_LIST(), +}; + +static void isabus_fdc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ISADeviceClass *isa = ISA_DEVICE_CLASS(klass); + + dc->desc = "virtual floppy controller"; + dc->realize = isabus_fdc_realize; + dc->fw_name = "fdc"; + dc->reset = fdctrl_external_reset_isa; + dc->vmsd = &vmstate_isa_fdc; + isa->build_aml = fdc_isa_build_aml; + device_class_set_props(dc, isa_fdc_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static void isabus_fdc_instance_init(Object *obj) +{ + FDCtrlISABus *isa = ISA_FDC(obj); + + device_add_bootindex_property(obj, &isa->bootindexA, + "bootindexA", "/floppy@0", + DEVICE(obj)); + device_add_bootindex_property(obj, &isa->bootindexB, + "bootindexB", "/floppy@1", + DEVICE(obj)); +} + +static const TypeInfo isa_fdc_info = { + .name = TYPE_ISA_FDC, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(FDCtrlISABus), + .class_init = isabus_fdc_class_init, + .instance_init = isabus_fdc_instance_init, +}; + +static void isa_fdc_register_types(void) +{ + type_register_static(&isa_fdc_info); +} + +type_init(isa_fdc_register_types) diff --git a/hw/block/fdc-sysbus.c b/hw/block/fdc-sysbus.c new file mode 100644 index 000000000..57fc8773f --- /dev/null +++ b/hw/block/fdc-sysbus.c @@ -0,0 +1,251 @@ +/* + * QEMU Floppy disk emulator (Intel 82078) + * + * Copyright (c) 2003, 2007 Jocelyn Mayer + * Copyright (c) 2008 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qom/object.h" +#include "hw/sysbus.h" +#include "hw/block/fdc.h" +#include "migration/vmstate.h" +#include "fdc-internal.h" +#include "trace.h" + +#define TYPE_SYSBUS_FDC "base-sysbus-fdc" +typedef struct FDCtrlSysBusClass FDCtrlSysBusClass; +typedef struct FDCtrlSysBus FDCtrlSysBus; +DECLARE_OBJ_CHECKERS(FDCtrlSysBus, FDCtrlSysBusClass, + SYSBUS_FDC, TYPE_SYSBUS_FDC) + +struct FDCtrlSysBusClass { + /*< private >*/ + SysBusDeviceClass parent_class; + /*< public >*/ + + bool use_strict_io; +}; + +struct FDCtrlSysBus { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + struct FDCtrl state; +}; + +static uint64_t fdctrl_read_mem(void *opaque, hwaddr reg, unsigned ize) +{ + return fdctrl_read(opaque, (uint32_t)reg); +} + +static void fdctrl_write_mem(void *opaque, hwaddr reg, + uint64_t value, unsigned size) +{ + fdctrl_write(opaque, (uint32_t)reg, value); +} + +static const MemoryRegionOps fdctrl_mem_ops = { + .read = fdctrl_read_mem, + .write = fdctrl_write_mem, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const MemoryRegionOps fdctrl_mem_strict_ops = { + .read = fdctrl_read_mem, + .write = fdctrl_write_mem, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void fdctrl_external_reset_sysbus(DeviceState *d) +{ + FDCtrlSysBus *sys = SYSBUS_FDC(d); + FDCtrl *s = &sys->state; + + fdctrl_reset(s, 0); +} + +static void fdctrl_handle_tc(void *opaque, int irq, int level) +{ + trace_fdctrl_tc_pulse(level); +} + +void fdctrl_init_sysbus(qemu_irq irq, int dma_chann, + hwaddr mmio_base, DriveInfo **fds) +{ + FDCtrl *fdctrl; + DeviceState *dev; + SysBusDevice *sbd; + FDCtrlSysBus *sys; + + dev = qdev_new("sysbus-fdc"); + sys = SYSBUS_FDC(dev); + fdctrl = &sys->state; + fdctrl->dma_chann = dma_chann; /* FIXME */ + sbd = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sbd, &error_fatal); + sysbus_connect_irq(sbd, 0, irq); + sysbus_mmio_map(sbd, 0, mmio_base); + + fdctrl_init_drives(&sys->state.bus, fds); +} + +void sun4m_fdctrl_init(qemu_irq irq, hwaddr io_base, + DriveInfo **fds, qemu_irq *fdc_tc) +{ + DeviceState *dev; + FDCtrlSysBus *sys; + + dev = qdev_new("sun-fdtwo"); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sys = SYSBUS_FDC(dev); + sysbus_connect_irq(SYS_BUS_DEVICE(sys), 0, irq); + sysbus_mmio_map(SYS_BUS_DEVICE(sys), 0, io_base); + *fdc_tc = qdev_get_gpio_in(dev, 0); + + fdctrl_init_drives(&sys->state.bus, fds); +} + +static void sysbus_fdc_common_instance_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + FDCtrlSysBusClass *sbdc = SYSBUS_FDC_GET_CLASS(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + FDCtrlSysBus *sys = SYSBUS_FDC(obj); + FDCtrl *fdctrl = &sys->state; + + qdev_set_legacy_instance_id(dev, 0 /* io */, 2); /* FIXME */ + + memory_region_init_io(&fdctrl->iomem, obj, + sbdc->use_strict_io ? &fdctrl_mem_strict_ops + : &fdctrl_mem_ops, + fdctrl, "fdc", 0x08); + sysbus_init_mmio(sbd, &fdctrl->iomem); + + sysbus_init_irq(sbd, &fdctrl->irq); + qdev_init_gpio_in(dev, fdctrl_handle_tc, 1); +} + +static void sysbus_fdc_realize(DeviceState *dev, Error **errp) +{ + FDCtrlSysBus *sys = SYSBUS_FDC(dev); + FDCtrl *fdctrl = &sys->state; + + fdctrl_realize_common(dev, fdctrl, errp); +} + +static const VMStateDescription vmstate_sysbus_fdc = { + .name = "fdc", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(state, FDCtrlSysBus, 0, vmstate_fdc, FDCtrl), + VMSTATE_END_OF_LIST() + } +}; + +static void sysbus_fdc_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sysbus_fdc_realize; + dc->reset = fdctrl_external_reset_sysbus; + dc->vmsd = &vmstate_sysbus_fdc; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo sysbus_fdc_common_typeinfo = { + .name = TYPE_SYSBUS_FDC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(FDCtrlSysBus), + .instance_init = sysbus_fdc_common_instance_init, + .abstract = true, + .class_init = sysbus_fdc_common_class_init, + .class_size = sizeof(FDCtrlSysBusClass), +}; + +static Property sysbus_fdc_properties[] = { + DEFINE_PROP_SIGNED("fdtypeA", FDCtrlSysBus, state.qdev_for_drives[0].type, + FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type, + FloppyDriveType), + DEFINE_PROP_SIGNED("fdtypeB", FDCtrlSysBus, state.qdev_for_drives[1].type, + FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type, + FloppyDriveType), + DEFINE_PROP_SIGNED("fallback", FDCtrlSysBus, state.fallback, + FLOPPY_DRIVE_TYPE_144, qdev_prop_fdc_drive_type, + FloppyDriveType), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sysbus_fdc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "virtual floppy controller"; + device_class_set_props(dc, sysbus_fdc_properties); +} + +static const TypeInfo sysbus_fdc_typeinfo = { + .name = "sysbus-fdc", + .parent = TYPE_SYSBUS_FDC, + .class_init = sysbus_fdc_class_init, +}; + +static Property sun4m_fdc_properties[] = { + DEFINE_PROP_SIGNED("fdtype", FDCtrlSysBus, state.qdev_for_drives[0].type, + FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type, + FloppyDriveType), + DEFINE_PROP_SIGNED("fallback", FDCtrlSysBus, state.fallback, + FLOPPY_DRIVE_TYPE_144, qdev_prop_fdc_drive_type, + FloppyDriveType), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sun4m_fdc_class_init(ObjectClass *klass, void *data) +{ + FDCtrlSysBusClass *sbdc = SYSBUS_FDC_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + sbdc->use_strict_io = true; + dc->desc = "virtual floppy controller"; + device_class_set_props(dc, sun4m_fdc_properties); +} + +static const TypeInfo sun4m_fdc_typeinfo = { + .name = "sun-fdtwo", + .parent = TYPE_SYSBUS_FDC, + .class_init = sun4m_fdc_class_init, +}; + +static void sysbus_fdc_register_types(void) +{ + type_register_static(&sysbus_fdc_common_typeinfo); + type_register_static(&sysbus_fdc_typeinfo); + type_register_static(&sun4m_fdc_typeinfo); +} + +type_init(sysbus_fdc_register_types) diff --git a/hw/block/fdc.c b/hw/block/fdc.c new file mode 100644 index 000000000..21d18ac2e --- /dev/null +++ b/hw/block/fdc.c @@ -0,0 +1,2384 @@ +/* + * QEMU Floppy disk emulator (Intel 82078) + * + * Copyright (c) 2003, 2007 Jocelyn Mayer + * Copyright (c) 2008 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* + * The controller is used in Sun4m systems in a slightly different + * way. There are changes in DOR register and DMA is not available. + */ + +#include "qemu/osdep.h" +#include "hw/block/fdc.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "hw/block/block.h" +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" +#include "sysemu/sysemu.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" +#include "fdc-internal.h" + +/********************************************************/ +/* debug Floppy devices */ + +#define DEBUG_FLOPPY 0 + +#define FLOPPY_DPRINTF(fmt, ...) \ + do { \ + if (DEBUG_FLOPPY) { \ + fprintf(stderr, "FLOPPY: " fmt , ## __VA_ARGS__); \ + } \ + } while (0) + + +/* Anonymous BlockBackend for empty drive */ +static BlockBackend *blk_create_empty_drive(void) +{ + return blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); +} + +/********************************************************/ +/* qdev floppy bus */ + +#define TYPE_FLOPPY_BUS "floppy-bus" +OBJECT_DECLARE_SIMPLE_TYPE(FloppyBus, FLOPPY_BUS) + +static FDrive *get_drv(FDCtrl *fdctrl, int unit); + +static const TypeInfo floppy_bus_info = { + .name = TYPE_FLOPPY_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(FloppyBus), +}; + +static void floppy_bus_create(FDCtrl *fdc, FloppyBus *bus, DeviceState *dev) +{ + qbus_init(bus, sizeof(FloppyBus), TYPE_FLOPPY_BUS, dev, NULL); + bus->fdc = fdc; +} + + +/********************************************************/ +/* Floppy drive emulation */ + +/* In many cases, the total sector size of a format is enough to uniquely + * identify it. However, there are some total sector collisions between + * formats of different physical size, and these are noted below by + * highlighting the total sector size for entries with collisions. */ +const FDFormat fd_formats[] = { + /* First entry is default format */ + /* 1.44 MB 3"1/2 floppy disks */ + { FLOPPY_DRIVE_TYPE_144, 18, 80, 1, FDRIVE_RATE_500K, }, /* 3.5" 2880 */ + { FLOPPY_DRIVE_TYPE_144, 20, 80, 1, FDRIVE_RATE_500K, }, /* 3.5" 3200 */ + { FLOPPY_DRIVE_TYPE_144, 21, 80, 1, FDRIVE_RATE_500K, }, + { FLOPPY_DRIVE_TYPE_144, 21, 82, 1, FDRIVE_RATE_500K, }, + { FLOPPY_DRIVE_TYPE_144, 21, 83, 1, FDRIVE_RATE_500K, }, + { FLOPPY_DRIVE_TYPE_144, 22, 80, 1, FDRIVE_RATE_500K, }, + { FLOPPY_DRIVE_TYPE_144, 23, 80, 1, FDRIVE_RATE_500K, }, + { FLOPPY_DRIVE_TYPE_144, 24, 80, 1, FDRIVE_RATE_500K, }, + /* 2.88 MB 3"1/2 floppy disks */ + { FLOPPY_DRIVE_TYPE_288, 36, 80, 1, FDRIVE_RATE_1M, }, + { FLOPPY_DRIVE_TYPE_288, 39, 80, 1, FDRIVE_RATE_1M, }, + { FLOPPY_DRIVE_TYPE_288, 40, 80, 1, FDRIVE_RATE_1M, }, + { FLOPPY_DRIVE_TYPE_288, 44, 80, 1, FDRIVE_RATE_1M, }, + { FLOPPY_DRIVE_TYPE_288, 48, 80, 1, FDRIVE_RATE_1M, }, + /* 720 kB 3"1/2 floppy disks */ + { FLOPPY_DRIVE_TYPE_144, 9, 80, 1, FDRIVE_RATE_250K, }, /* 3.5" 1440 */ + { FLOPPY_DRIVE_TYPE_144, 10, 80, 1, FDRIVE_RATE_250K, }, + { FLOPPY_DRIVE_TYPE_144, 10, 82, 1, FDRIVE_RATE_250K, }, + { FLOPPY_DRIVE_TYPE_144, 10, 83, 1, FDRIVE_RATE_250K, }, + { FLOPPY_DRIVE_TYPE_144, 13, 80, 1, FDRIVE_RATE_250K, }, + { FLOPPY_DRIVE_TYPE_144, 14, 80, 1, FDRIVE_RATE_250K, }, + /* 1.2 MB 5"1/4 floppy disks */ + { FLOPPY_DRIVE_TYPE_120, 15, 80, 1, FDRIVE_RATE_500K, }, + { FLOPPY_DRIVE_TYPE_120, 18, 80, 1, FDRIVE_RATE_500K, }, /* 5.25" 2880 */ + { FLOPPY_DRIVE_TYPE_120, 18, 82, 1, FDRIVE_RATE_500K, }, + { FLOPPY_DRIVE_TYPE_120, 18, 83, 1, FDRIVE_RATE_500K, }, + { FLOPPY_DRIVE_TYPE_120, 20, 80, 1, FDRIVE_RATE_500K, }, /* 5.25" 3200 */ + /* 720 kB 5"1/4 floppy disks */ + { FLOPPY_DRIVE_TYPE_120, 9, 80, 1, FDRIVE_RATE_250K, }, /* 5.25" 1440 */ + { FLOPPY_DRIVE_TYPE_120, 11, 80, 1, FDRIVE_RATE_250K, }, + /* 360 kB 5"1/4 floppy disks */ + { FLOPPY_DRIVE_TYPE_120, 9, 40, 1, FDRIVE_RATE_300K, }, /* 5.25" 720 */ + { FLOPPY_DRIVE_TYPE_120, 9, 40, 0, FDRIVE_RATE_300K, }, + { FLOPPY_DRIVE_TYPE_120, 10, 41, 1, FDRIVE_RATE_300K, }, + { FLOPPY_DRIVE_TYPE_120, 10, 42, 1, FDRIVE_RATE_300K, }, + /* 320 kB 5"1/4 floppy disks */ + { FLOPPY_DRIVE_TYPE_120, 8, 40, 1, FDRIVE_RATE_250K, }, + { FLOPPY_DRIVE_TYPE_120, 8, 40, 0, FDRIVE_RATE_250K, }, + /* 360 kB must match 5"1/4 better than 3"1/2... */ + { FLOPPY_DRIVE_TYPE_144, 9, 80, 0, FDRIVE_RATE_250K, }, /* 3.5" 720 */ + /* end */ + { FLOPPY_DRIVE_TYPE_NONE, -1, -1, 0, 0, }, +}; + +static FDriveSize drive_size(FloppyDriveType drive) +{ + switch (drive) { + case FLOPPY_DRIVE_TYPE_120: + return FDRIVE_SIZE_525; + case FLOPPY_DRIVE_TYPE_144: + case FLOPPY_DRIVE_TYPE_288: + return FDRIVE_SIZE_350; + default: + return FDRIVE_SIZE_UNKNOWN; + } +} + +#define GET_CUR_DRV(fdctrl) ((fdctrl)->cur_drv) +#define SET_CUR_DRV(fdctrl, drive) ((fdctrl)->cur_drv = (drive)) + +/* Will always be a fixed parameter for us */ +#define FD_SECTOR_LEN 512 +#define FD_SECTOR_SC 2 /* Sector size code */ +#define FD_RESET_SENSEI_COUNT 4 /* Number of sense interrupts on RESET */ + + +static FloppyDriveType get_fallback_drive_type(FDrive *drv); + +/* Hack: FD_SEEK is expected to work on empty drives. However, QEMU + * currently goes through some pains to keep seeks within the bounds + * established by last_sect and max_track. Correcting this is difficult, + * as refactoring FDC code tends to expose nasty bugs in the Linux kernel. + * + * For now: allow empty drives to have large bounds so we can seek around, + * with the understanding that when a diskette is inserted, the bounds will + * properly tighten to match the geometry of that inserted medium. + */ +static void fd_empty_seek_hack(FDrive *drv) +{ + drv->last_sect = 0xFF; + drv->max_track = 0xFF; +} + +static void fd_init(FDrive *drv) +{ + /* Drive */ + drv->perpendicular = 0; + /* Disk */ + drv->disk = FLOPPY_DRIVE_TYPE_NONE; + drv->last_sect = 0; + drv->max_track = 0; + drv->ro = true; + drv->media_changed = 1; +} + +#define NUM_SIDES(drv) ((drv)->flags & FDISK_DBL_SIDES ? 2 : 1) + +static int fd_sector_calc(uint8_t head, uint8_t track, uint8_t sect, + uint8_t last_sect, uint8_t num_sides) +{ + return (((track * num_sides) + head) * last_sect) + sect - 1; +} + +/* Returns current position, in sectors, for given drive */ +static int fd_sector(FDrive *drv) +{ + return fd_sector_calc(drv->head, drv->track, drv->sect, drv->last_sect, + NUM_SIDES(drv)); +} + +/* Returns current position, in bytes, for given drive */ +static int fd_offset(FDrive *drv) +{ + g_assert(fd_sector(drv) < INT_MAX >> BDRV_SECTOR_BITS); + return fd_sector(drv) << BDRV_SECTOR_BITS; +} + +/* Seek to a new position: + * returns 0 if already on right track + * returns 1 if track changed + * returns 2 if track is invalid + * returns 3 if sector is invalid + * returns 4 if seek is disabled + */ +static int fd_seek(FDrive *drv, uint8_t head, uint8_t track, uint8_t sect, + int enable_seek) +{ + uint32_t sector; + int ret; + + if (track > drv->max_track || + (head != 0 && (drv->flags & FDISK_DBL_SIDES) == 0)) { + FLOPPY_DPRINTF("try to read %d %02x %02x (max=%d %d %02x %02x)\n", + head, track, sect, 1, + (drv->flags & FDISK_DBL_SIDES) == 0 ? 0 : 1, + drv->max_track, drv->last_sect); + return 2; + } + if (sect > drv->last_sect) { + FLOPPY_DPRINTF("try to read %d %02x %02x (max=%d %d %02x %02x)\n", + head, track, sect, 1, + (drv->flags & FDISK_DBL_SIDES) == 0 ? 0 : 1, + drv->max_track, drv->last_sect); + return 3; + } + sector = fd_sector_calc(head, track, sect, drv->last_sect, NUM_SIDES(drv)); + ret = 0; + if (sector != fd_sector(drv)) { +#if 0 + if (!enable_seek) { + FLOPPY_DPRINTF("error: no implicit seek %d %02x %02x" + " (max=%d %02x %02x)\n", + head, track, sect, 1, drv->max_track, + drv->last_sect); + return 4; + } +#endif + drv->head = head; + if (drv->track != track) { + if (drv->blk != NULL && blk_is_inserted(drv->blk)) { + drv->media_changed = 0; + } + ret = 1; + } + drv->track = track; + drv->sect = sect; + } + + if (drv->blk == NULL || !blk_is_inserted(drv->blk)) { + ret = 2; + } + + return ret; +} + +/* Set drive back to track 0 */ +static void fd_recalibrate(FDrive *drv) +{ + FLOPPY_DPRINTF("recalibrate\n"); + fd_seek(drv, 0, 0, 1, 1); +} + +/** + * Determine geometry based on inserted diskette. + * Will not operate on an empty drive. + * + * @return: 0 on success, -1 if the drive is empty. + */ +static int pick_geometry(FDrive *drv) +{ + BlockBackend *blk = drv->blk; + const FDFormat *parse; + uint64_t nb_sectors, size; + int i; + int match, size_match, type_match; + bool magic = drv->drive == FLOPPY_DRIVE_TYPE_AUTO; + + /* We can only pick a geometry if we have a diskette. */ + if (!drv->blk || !blk_is_inserted(drv->blk) || + drv->drive == FLOPPY_DRIVE_TYPE_NONE) + { + return -1; + } + + /* We need to determine the likely geometry of the inserted medium. + * In order of preference, we look for: + * (1) The same drive type and number of sectors, + * (2) The same diskette size and number of sectors, + * (3) The same drive type. + * + * In all cases, matches that occur higher in the drive table will take + * precedence over matches that occur later in the table. + */ + blk_get_geometry(blk, &nb_sectors); + match = size_match = type_match = -1; + for (i = 0; ; i++) { + parse = &fd_formats[i]; + if (parse->drive == FLOPPY_DRIVE_TYPE_NONE) { + break; + } + size = (parse->max_head + 1) * parse->max_track * parse->last_sect; + if (nb_sectors == size) { + if (magic || parse->drive == drv->drive) { + /* (1) perfect match -- nb_sectors and drive type */ + goto out; + } else if (drive_size(parse->drive) == drive_size(drv->drive)) { + /* (2) size match -- nb_sectors and physical medium size */ + match = (match == -1) ? i : match; + } else { + /* This is suspicious -- Did the user misconfigure? */ + size_match = (size_match == -1) ? i : size_match; + } + } else if (type_match == -1) { + if ((parse->drive == drv->drive) || + (magic && (parse->drive == get_fallback_drive_type(drv)))) { + /* (3) type match -- nb_sectors mismatch, but matches the type + * specified explicitly by the user, or matches the fallback + * default type when using the drive autodetect mechanism */ + type_match = i; + } + } + } + + /* No exact match found */ + if (match == -1) { + if (size_match != -1) { + parse = &fd_formats[size_match]; + FLOPPY_DPRINTF("User requested floppy drive type '%s', " + "but inserted medium appears to be a " + "%"PRId64" sector '%s' type\n", + FloppyDriveType_str(drv->drive), + nb_sectors, + FloppyDriveType_str(parse->drive)); + } + assert(type_match != -1 && "misconfigured fd_format"); + match = type_match; + } + parse = &(fd_formats[match]); + + out: + if (parse->max_head == 0) { + drv->flags &= ~FDISK_DBL_SIDES; + } else { + drv->flags |= FDISK_DBL_SIDES; + } + drv->max_track = parse->max_track; + drv->last_sect = parse->last_sect; + drv->disk = parse->drive; + drv->media_rate = parse->rate; + return 0; +} + +static void pick_drive_type(FDrive *drv) +{ + if (drv->drive != FLOPPY_DRIVE_TYPE_AUTO) { + return; + } + + if (pick_geometry(drv) == 0) { + drv->drive = drv->disk; + } else { + drv->drive = get_fallback_drive_type(drv); + } + + g_assert(drv->drive != FLOPPY_DRIVE_TYPE_AUTO); +} + +/* Revalidate a disk drive after a disk change */ +static void fd_revalidate(FDrive *drv) +{ + int rc; + + FLOPPY_DPRINTF("revalidate\n"); + if (drv->blk != NULL) { + drv->ro = !blk_is_writable(drv->blk); + if (!blk_is_inserted(drv->blk)) { + FLOPPY_DPRINTF("No disk in drive\n"); + drv->disk = FLOPPY_DRIVE_TYPE_NONE; + fd_empty_seek_hack(drv); + } else if (!drv->media_validated) { + rc = pick_geometry(drv); + if (rc) { + FLOPPY_DPRINTF("Could not validate floppy drive media"); + } else { + drv->media_validated = true; + FLOPPY_DPRINTF("Floppy disk (%d h %d t %d s) %s\n", + (drv->flags & FDISK_DBL_SIDES) ? 2 : 1, + drv->max_track, drv->last_sect, + drv->ro ? "ro" : "rw"); + } + } + } else { + FLOPPY_DPRINTF("No drive connected\n"); + drv->last_sect = 0; + drv->max_track = 0; + drv->flags &= ~FDISK_DBL_SIDES; + drv->drive = FLOPPY_DRIVE_TYPE_NONE; + drv->disk = FLOPPY_DRIVE_TYPE_NONE; + } +} + +static void fd_change_cb(void *opaque, bool load, Error **errp) +{ + FDrive *drive = opaque; + + if (!load) { + blk_set_perm(drive->blk, 0, BLK_PERM_ALL, &error_abort); + } else { + if (!blkconf_apply_backend_options(drive->conf, + !blk_supports_write_perm(drive->blk), + false, errp)) { + return; + } + } + + drive->media_changed = 1; + drive->media_validated = false; + fd_revalidate(drive); +} + +static const BlockDevOps fd_block_ops = { + .change_media_cb = fd_change_cb, +}; + + +#define TYPE_FLOPPY_DRIVE "floppy" +OBJECT_DECLARE_SIMPLE_TYPE(FloppyDrive, FLOPPY_DRIVE) + +struct FloppyDrive { + DeviceState qdev; + uint32_t unit; + BlockConf conf; + FloppyDriveType type; +}; + +static Property floppy_drive_properties[] = { + DEFINE_PROP_UINT32("unit", FloppyDrive, unit, -1), + DEFINE_BLOCK_PROPERTIES(FloppyDrive, conf), + DEFINE_PROP_SIGNED("drive-type", FloppyDrive, type, + FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type, + FloppyDriveType), + DEFINE_PROP_END_OF_LIST(), +}; + +static void floppy_drive_realize(DeviceState *qdev, Error **errp) +{ + FloppyDrive *dev = FLOPPY_DRIVE(qdev); + FloppyBus *bus = FLOPPY_BUS(qdev->parent_bus); + FDrive *drive; + bool read_only; + int ret; + + if (dev->unit == -1) { + for (dev->unit = 0; dev->unit < MAX_FD; dev->unit++) { + drive = get_drv(bus->fdc, dev->unit); + if (!drive->blk) { + break; + } + } + } + + if (dev->unit >= MAX_FD) { + error_setg(errp, "Can't create floppy unit %d, bus supports " + "only %d units", dev->unit, MAX_FD); + return; + } + + drive = get_drv(bus->fdc, dev->unit); + if (drive->blk) { + error_setg(errp, "Floppy unit %d is in use", dev->unit); + return; + } + + if (!dev->conf.blk) { + dev->conf.blk = blk_create_empty_drive(); + ret = blk_attach_dev(dev->conf.blk, qdev); + assert(ret == 0); + + /* Don't take write permissions on an empty drive to allow attaching a + * read-only node later */ + read_only = true; + } else { + read_only = !blk_bs(dev->conf.blk) || + !blk_supports_write_perm(dev->conf.blk); + } + + if (!blkconf_blocksizes(&dev->conf, errp)) { + return; + } + + if (dev->conf.logical_block_size != 512 || + dev->conf.physical_block_size != 512) + { + error_setg(errp, "Physical and logical block size must " + "be 512 for floppy"); + return; + } + + /* rerror/werror aren't supported by fdc and therefore not even registered + * with qdev. So set the defaults manually before they are used in + * blkconf_apply_backend_options(). */ + dev->conf.rerror = BLOCKDEV_ON_ERROR_AUTO; + dev->conf.werror = BLOCKDEV_ON_ERROR_AUTO; + + if (!blkconf_apply_backend_options(&dev->conf, read_only, false, errp)) { + return; + } + + /* 'enospc' is the default for -drive, 'report' is what blk_new() gives us + * for empty drives. */ + if (blk_get_on_error(dev->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC && + blk_get_on_error(dev->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) { + error_setg(errp, "fdc doesn't support drive option werror"); + return; + } + if (blk_get_on_error(dev->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { + error_setg(errp, "fdc doesn't support drive option rerror"); + return; + } + + drive->conf = &dev->conf; + drive->blk = dev->conf.blk; + drive->fdctrl = bus->fdc; + + fd_init(drive); + blk_set_dev_ops(drive->blk, &fd_block_ops, drive); + + /* Keep 'type' qdev property and FDrive->drive in sync */ + drive->drive = dev->type; + pick_drive_type(drive); + dev->type = drive->drive; + + fd_revalidate(drive); +} + +static void floppy_drive_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + k->realize = floppy_drive_realize; + set_bit(DEVICE_CATEGORY_STORAGE, k->categories); + k->bus_type = TYPE_FLOPPY_BUS; + device_class_set_props(k, floppy_drive_properties); + k->desc = "virtual floppy drive"; +} + +static const TypeInfo floppy_drive_info = { + .name = TYPE_FLOPPY_DRIVE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(FloppyDrive), + .class_init = floppy_drive_class_init, +}; + +/********************************************************/ +/* Intel 82078 floppy disk controller emulation */ + +static void fdctrl_to_command_phase(FDCtrl *fdctrl); +static void fdctrl_raise_irq(FDCtrl *fdctrl); +static FDrive *get_cur_drv(FDCtrl *fdctrl); + +static uint32_t fdctrl_read_statusA(FDCtrl *fdctrl); +static uint32_t fdctrl_read_statusB(FDCtrl *fdctrl); +static uint32_t fdctrl_read_dor(FDCtrl *fdctrl); +static void fdctrl_write_dor(FDCtrl *fdctrl, uint32_t value); +static uint32_t fdctrl_read_tape(FDCtrl *fdctrl); +static void fdctrl_write_tape(FDCtrl *fdctrl, uint32_t value); +static uint32_t fdctrl_read_main_status(FDCtrl *fdctrl); +static void fdctrl_write_rate(FDCtrl *fdctrl, uint32_t value); +static uint32_t fdctrl_read_data(FDCtrl *fdctrl); +static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value); +static uint32_t fdctrl_read_dir(FDCtrl *fdctrl); +static void fdctrl_write_ccr(FDCtrl *fdctrl, uint32_t value); + +enum { + FD_DIR_WRITE = 0, + FD_DIR_READ = 1, + FD_DIR_SCANE = 2, + FD_DIR_SCANL = 3, + FD_DIR_SCANH = 4, + FD_DIR_VERIFY = 5, +}; + +enum { + FD_STATE_MULTI = 0x01, /* multi track flag */ + FD_STATE_FORMAT = 0x02, /* format flag */ +}; + +enum { + FD_REG_SRA = 0x00, + FD_REG_SRB = 0x01, + FD_REG_DOR = 0x02, + FD_REG_TDR = 0x03, + FD_REG_MSR = 0x04, + FD_REG_DSR = 0x04, + FD_REG_FIFO = 0x05, + FD_REG_DIR = 0x07, + FD_REG_CCR = 0x07, +}; + +enum { + FD_CMD_READ_TRACK = 0x02, + FD_CMD_SPECIFY = 0x03, + FD_CMD_SENSE_DRIVE_STATUS = 0x04, + FD_CMD_WRITE = 0x05, + FD_CMD_READ = 0x06, + FD_CMD_RECALIBRATE = 0x07, + FD_CMD_SENSE_INTERRUPT_STATUS = 0x08, + FD_CMD_WRITE_DELETED = 0x09, + FD_CMD_READ_ID = 0x0a, + FD_CMD_READ_DELETED = 0x0c, + FD_CMD_FORMAT_TRACK = 0x0d, + FD_CMD_DUMPREG = 0x0e, + FD_CMD_SEEK = 0x0f, + FD_CMD_VERSION = 0x10, + FD_CMD_SCAN_EQUAL = 0x11, + FD_CMD_PERPENDICULAR_MODE = 0x12, + FD_CMD_CONFIGURE = 0x13, + FD_CMD_LOCK = 0x14, + FD_CMD_VERIFY = 0x16, + FD_CMD_POWERDOWN_MODE = 0x17, + FD_CMD_PART_ID = 0x18, + FD_CMD_SCAN_LOW_OR_EQUAL = 0x19, + FD_CMD_SCAN_HIGH_OR_EQUAL = 0x1d, + FD_CMD_SAVE = 0x2e, + FD_CMD_OPTION = 0x33, + FD_CMD_RESTORE = 0x4e, + FD_CMD_DRIVE_SPECIFICATION_COMMAND = 0x8e, + FD_CMD_RELATIVE_SEEK_OUT = 0x8f, + FD_CMD_FORMAT_AND_WRITE = 0xcd, + FD_CMD_RELATIVE_SEEK_IN = 0xcf, +}; + +enum { + FD_CONFIG_PRETRK = 0xff, /* Pre-compensation set to track 0 */ + FD_CONFIG_FIFOTHR = 0x0f, /* FIFO threshold set to 1 byte */ + FD_CONFIG_POLL = 0x10, /* Poll enabled */ + FD_CONFIG_EFIFO = 0x20, /* FIFO disabled */ + FD_CONFIG_EIS = 0x40, /* No implied seeks */ +}; + +enum { + FD_SR0_DS0 = 0x01, + FD_SR0_DS1 = 0x02, + FD_SR0_HEAD = 0x04, + FD_SR0_EQPMT = 0x10, + FD_SR0_SEEK = 0x20, + FD_SR0_ABNTERM = 0x40, + FD_SR0_INVCMD = 0x80, + FD_SR0_RDYCHG = 0xc0, +}; + +enum { + FD_SR1_MA = 0x01, /* Missing address mark */ + FD_SR1_NW = 0x02, /* Not writable */ + FD_SR1_EC = 0x80, /* End of cylinder */ +}; + +enum { + FD_SR2_SNS = 0x04, /* Scan not satisfied */ + FD_SR2_SEH = 0x08, /* Scan equal hit */ +}; + +enum { + FD_SRA_DIR = 0x01, + FD_SRA_nWP = 0x02, + FD_SRA_nINDX = 0x04, + FD_SRA_HDSEL = 0x08, + FD_SRA_nTRK0 = 0x10, + FD_SRA_STEP = 0x20, + FD_SRA_nDRV2 = 0x40, + FD_SRA_INTPEND = 0x80, +}; + +enum { + FD_SRB_MTR0 = 0x01, + FD_SRB_MTR1 = 0x02, + FD_SRB_WGATE = 0x04, + FD_SRB_RDATA = 0x08, + FD_SRB_WDATA = 0x10, + FD_SRB_DR0 = 0x20, +}; + +enum { +#if MAX_FD == 4 + FD_DOR_SELMASK = 0x03, +#else + FD_DOR_SELMASK = 0x01, +#endif + FD_DOR_nRESET = 0x04, + FD_DOR_DMAEN = 0x08, + FD_DOR_MOTEN0 = 0x10, + FD_DOR_MOTEN1 = 0x20, + FD_DOR_MOTEN2 = 0x40, + FD_DOR_MOTEN3 = 0x80, +}; + +enum { +#if MAX_FD == 4 + FD_TDR_BOOTSEL = 0x0c, +#else + FD_TDR_BOOTSEL = 0x04, +#endif +}; + +enum { + FD_DSR_DRATEMASK= 0x03, + FD_DSR_PWRDOWN = 0x40, + FD_DSR_SWRESET = 0x80, +}; + +enum { + FD_MSR_DRV0BUSY = 0x01, + FD_MSR_DRV1BUSY = 0x02, + FD_MSR_DRV2BUSY = 0x04, + FD_MSR_DRV3BUSY = 0x08, + FD_MSR_CMDBUSY = 0x10, + FD_MSR_NONDMA = 0x20, + FD_MSR_DIO = 0x40, + FD_MSR_RQM = 0x80, +}; + +enum { + FD_DIR_DSKCHG = 0x80, +}; + +/* + * See chapter 5.0 "Controller phases" of the spec: + * + * Command phase: + * The host writes a command and its parameters into the FIFO. The command + * phase is completed when all parameters for the command have been supplied, + * and execution phase is entered. + * + * Execution phase: + * Data transfers, either DMA or non-DMA. For non-DMA transfers, the FIFO + * contains the payload now, otherwise it's unused. When all bytes of the + * required data have been transferred, the state is switched to either result + * phase (if the command produces status bytes) or directly back into the + * command phase for the next command. + * + * Result phase: + * The host reads out the FIFO, which contains one or more result bytes now. + */ +enum { + /* Only for migration: reconstruct phase from registers like qemu 2.3 */ + FD_PHASE_RECONSTRUCT = 0, + + FD_PHASE_COMMAND = 1, + FD_PHASE_EXECUTION = 2, + FD_PHASE_RESULT = 3, +}; + +#define FD_MULTI_TRACK(state) ((state) & FD_STATE_MULTI) +#define FD_FORMAT_CMD(state) ((state) & FD_STATE_FORMAT) + +static FloppyDriveType get_fallback_drive_type(FDrive *drv) +{ + return drv->fdctrl->fallback; +} + +uint32_t fdctrl_read(void *opaque, uint32_t reg) +{ + FDCtrl *fdctrl = opaque; + uint32_t retval; + + reg &= 7; + switch (reg) { + case FD_REG_SRA: + retval = fdctrl_read_statusA(fdctrl); + break; + case FD_REG_SRB: + retval = fdctrl_read_statusB(fdctrl); + break; + case FD_REG_DOR: + retval = fdctrl_read_dor(fdctrl); + break; + case FD_REG_TDR: + retval = fdctrl_read_tape(fdctrl); + break; + case FD_REG_MSR: + retval = fdctrl_read_main_status(fdctrl); + break; + case FD_REG_FIFO: + retval = fdctrl_read_data(fdctrl); + break; + case FD_REG_DIR: + retval = fdctrl_read_dir(fdctrl); + break; + default: + retval = (uint32_t)(-1); + break; + } + trace_fdc_ioport_read(reg, retval); + + return retval; +} + +void fdctrl_write(void *opaque, uint32_t reg, uint32_t value) +{ + FDCtrl *fdctrl = opaque; + + reg &= 7; + trace_fdc_ioport_write(reg, value); + switch (reg) { + case FD_REG_DOR: + fdctrl_write_dor(fdctrl, value); + break; + case FD_REG_TDR: + fdctrl_write_tape(fdctrl, value); + break; + case FD_REG_DSR: + fdctrl_write_rate(fdctrl, value); + break; + case FD_REG_FIFO: + fdctrl_write_data(fdctrl, value); + break; + case FD_REG_CCR: + fdctrl_write_ccr(fdctrl, value); + break; + default: + break; + } +} + +static bool fdrive_media_changed_needed(void *opaque) +{ + FDrive *drive = opaque; + + return (drive->blk != NULL && drive->media_changed != 1); +} + +static const VMStateDescription vmstate_fdrive_media_changed = { + .name = "fdrive/media_changed", + .version_id = 1, + .minimum_version_id = 1, + .needed = fdrive_media_changed_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(media_changed, FDrive), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_fdrive_media_rate = { + .name = "fdrive/media_rate", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(media_rate, FDrive), + VMSTATE_END_OF_LIST() + } +}; + +static bool fdrive_perpendicular_needed(void *opaque) +{ + FDrive *drive = opaque; + + return drive->perpendicular != 0; +} + +static const VMStateDescription vmstate_fdrive_perpendicular = { + .name = "fdrive/perpendicular", + .version_id = 1, + .minimum_version_id = 1, + .needed = fdrive_perpendicular_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(perpendicular, FDrive), + VMSTATE_END_OF_LIST() + } +}; + +static int fdrive_post_load(void *opaque, int version_id) +{ + fd_revalidate(opaque); + return 0; +} + +static const VMStateDescription vmstate_fdrive = { + .name = "fdrive", + .version_id = 1, + .minimum_version_id = 1, + .post_load = fdrive_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(head, FDrive), + VMSTATE_UINT8(track, FDrive), + VMSTATE_UINT8(sect, FDrive), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_fdrive_media_changed, + &vmstate_fdrive_media_rate, + &vmstate_fdrive_perpendicular, + NULL + } +}; + +/* + * Reconstructs the phase from register values according to the logic that was + * implemented in qemu 2.3. This is the default value that is used if the phase + * subsection is not present on migration. + * + * Don't change this function to reflect newer qemu versions, it is part of + * the migration ABI. + */ +static int reconstruct_phase(FDCtrl *fdctrl) +{ + if (fdctrl->msr & FD_MSR_NONDMA) { + return FD_PHASE_EXECUTION; + } else if ((fdctrl->msr & FD_MSR_RQM) == 0) { + /* qemu 2.3 disabled RQM only during DMA transfers */ + return FD_PHASE_EXECUTION; + } else if (fdctrl->msr & FD_MSR_DIO) { + return FD_PHASE_RESULT; + } else { + return FD_PHASE_COMMAND; + } +} + +static int fdc_pre_save(void *opaque) +{ + FDCtrl *s = opaque; + + s->dor_vmstate = s->dor | GET_CUR_DRV(s); + + return 0; +} + +static int fdc_pre_load(void *opaque) +{ + FDCtrl *s = opaque; + s->phase = FD_PHASE_RECONSTRUCT; + return 0; +} + +static int fdc_post_load(void *opaque, int version_id) +{ + FDCtrl *s = opaque; + + SET_CUR_DRV(s, s->dor_vmstate & FD_DOR_SELMASK); + s->dor = s->dor_vmstate & ~FD_DOR_SELMASK; + + if (s->phase == FD_PHASE_RECONSTRUCT) { + s->phase = reconstruct_phase(s); + } + + return 0; +} + +static bool fdc_reset_sensei_needed(void *opaque) +{ + FDCtrl *s = opaque; + + return s->reset_sensei != 0; +} + +static const VMStateDescription vmstate_fdc_reset_sensei = { + .name = "fdc/reset_sensei", + .version_id = 1, + .minimum_version_id = 1, + .needed = fdc_reset_sensei_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(reset_sensei, FDCtrl), + VMSTATE_END_OF_LIST() + } +}; + +static bool fdc_result_timer_needed(void *opaque) +{ + FDCtrl *s = opaque; + + return timer_pending(s->result_timer); +} + +static const VMStateDescription vmstate_fdc_result_timer = { + .name = "fdc/result_timer", + .version_id = 1, + .minimum_version_id = 1, + .needed = fdc_result_timer_needed, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(result_timer, FDCtrl), + VMSTATE_END_OF_LIST() + } +}; + +static bool fdc_phase_needed(void *opaque) +{ + FDCtrl *fdctrl = opaque; + + return reconstruct_phase(fdctrl) != fdctrl->phase; +} + +static const VMStateDescription vmstate_fdc_phase = { + .name = "fdc/phase", + .version_id = 1, + .minimum_version_id = 1, + .needed = fdc_phase_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(phase, FDCtrl), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_fdc = { + .name = "fdc", + .version_id = 2, + .minimum_version_id = 2, + .pre_save = fdc_pre_save, + .pre_load = fdc_pre_load, + .post_load = fdc_post_load, + .fields = (VMStateField[]) { + /* Controller State */ + VMSTATE_UINT8(sra, FDCtrl), + VMSTATE_UINT8(srb, FDCtrl), + VMSTATE_UINT8(dor_vmstate, FDCtrl), + VMSTATE_UINT8(tdr, FDCtrl), + VMSTATE_UINT8(dsr, FDCtrl), + VMSTATE_UINT8(msr, FDCtrl), + VMSTATE_UINT8(status0, FDCtrl), + VMSTATE_UINT8(status1, FDCtrl), + VMSTATE_UINT8(status2, FDCtrl), + /* Command FIFO */ + VMSTATE_VARRAY_INT32(fifo, FDCtrl, fifo_size, 0, vmstate_info_uint8, + uint8_t), + VMSTATE_UINT32(data_pos, FDCtrl), + VMSTATE_UINT32(data_len, FDCtrl), + VMSTATE_UINT8(data_state, FDCtrl), + VMSTATE_UINT8(data_dir, FDCtrl), + VMSTATE_UINT8(eot, FDCtrl), + /* States kept only to be returned back */ + VMSTATE_UINT8(timer0, FDCtrl), + VMSTATE_UINT8(timer1, FDCtrl), + VMSTATE_UINT8(precomp_trk, FDCtrl), + VMSTATE_UINT8(config, FDCtrl), + VMSTATE_UINT8(lock, FDCtrl), + VMSTATE_UINT8(pwrd, FDCtrl), + VMSTATE_UINT8_EQUAL(num_floppies, FDCtrl, NULL), + VMSTATE_STRUCT_ARRAY(drives, FDCtrl, MAX_FD, 1, + vmstate_fdrive, FDrive), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_fdc_reset_sensei, + &vmstate_fdc_result_timer, + &vmstate_fdc_phase, + NULL + } +}; + +/* Change IRQ state */ +static void fdctrl_reset_irq(FDCtrl *fdctrl) +{ + fdctrl->status0 = 0; + if (!(fdctrl->sra & FD_SRA_INTPEND)) + return; + FLOPPY_DPRINTF("Reset interrupt\n"); + qemu_set_irq(fdctrl->irq, 0); + fdctrl->sra &= ~FD_SRA_INTPEND; +} + +static void fdctrl_raise_irq(FDCtrl *fdctrl) +{ + if (!(fdctrl->sra & FD_SRA_INTPEND)) { + qemu_set_irq(fdctrl->irq, 1); + fdctrl->sra |= FD_SRA_INTPEND; + } + + fdctrl->reset_sensei = 0; + FLOPPY_DPRINTF("Set interrupt status to 0x%02x\n", fdctrl->status0); +} + +/* Reset controller */ +void fdctrl_reset(FDCtrl *fdctrl, int do_irq) +{ + int i; + + FLOPPY_DPRINTF("reset controller\n"); + fdctrl_reset_irq(fdctrl); + /* Initialise controller */ + fdctrl->sra = 0; + fdctrl->srb = 0xc0; + if (!fdctrl->drives[1].blk) { + fdctrl->sra |= FD_SRA_nDRV2; + } + fdctrl->cur_drv = 0; + fdctrl->dor = FD_DOR_nRESET; + fdctrl->dor |= (fdctrl->dma_chann != -1) ? FD_DOR_DMAEN : 0; + fdctrl->msr = FD_MSR_RQM; + fdctrl->reset_sensei = 0; + timer_del(fdctrl->result_timer); + /* FIFO state */ + fdctrl->data_pos = 0; + fdctrl->data_len = 0; + fdctrl->data_state = 0; + fdctrl->data_dir = FD_DIR_WRITE; + for (i = 0; i < MAX_FD; i++) + fd_recalibrate(&fdctrl->drives[i]); + fdctrl_to_command_phase(fdctrl); + if (do_irq) { + fdctrl->status0 |= FD_SR0_RDYCHG; + fdctrl_raise_irq(fdctrl); + fdctrl->reset_sensei = FD_RESET_SENSEI_COUNT; + } +} + +static inline FDrive *drv0(FDCtrl *fdctrl) +{ + return &fdctrl->drives[(fdctrl->tdr & FD_TDR_BOOTSEL) >> 2]; +} + +static inline FDrive *drv1(FDCtrl *fdctrl) +{ + if ((fdctrl->tdr & FD_TDR_BOOTSEL) < (1 << 2)) + return &fdctrl->drives[1]; + else + return &fdctrl->drives[0]; +} + +#if MAX_FD == 4 +static inline FDrive *drv2(FDCtrl *fdctrl) +{ + if ((fdctrl->tdr & FD_TDR_BOOTSEL) < (2 << 2)) + return &fdctrl->drives[2]; + else + return &fdctrl->drives[1]; +} + +static inline FDrive *drv3(FDCtrl *fdctrl) +{ + if ((fdctrl->tdr & FD_TDR_BOOTSEL) < (3 << 2)) + return &fdctrl->drives[3]; + else + return &fdctrl->drives[2]; +} +#endif + +static FDrive *get_drv(FDCtrl *fdctrl, int unit) +{ + switch (unit) { + case 0: return drv0(fdctrl); + case 1: return drv1(fdctrl); +#if MAX_FD == 4 + case 2: return drv2(fdctrl); + case 3: return drv3(fdctrl); +#endif + default: return NULL; + } +} + +static FDrive *get_cur_drv(FDCtrl *fdctrl) +{ + FDrive *cur_drv = get_drv(fdctrl, fdctrl->cur_drv); + + if (!cur_drv->blk) { + /* + * Kludge: empty drive line selected. Create an anonymous + * BlockBackend to avoid NULL deref with various BlockBackend + * API calls within this model (CVE-2021-20196). + * Due to the controller QOM model limitations, we don't + * attach the created to the controller device. + */ + cur_drv->blk = blk_create_empty_drive(); + } + return cur_drv; +} + +/* Status A register : 0x00 (read-only) */ +static uint32_t fdctrl_read_statusA(FDCtrl *fdctrl) +{ + uint32_t retval = fdctrl->sra; + + FLOPPY_DPRINTF("status register A: 0x%02x\n", retval); + + return retval; +} + +/* Status B register : 0x01 (read-only) */ +static uint32_t fdctrl_read_statusB(FDCtrl *fdctrl) +{ + uint32_t retval = fdctrl->srb; + + FLOPPY_DPRINTF("status register B: 0x%02x\n", retval); + + return retval; +} + +/* Digital output register : 0x02 */ +static uint32_t fdctrl_read_dor(FDCtrl *fdctrl) +{ + uint32_t retval = fdctrl->dor; + + /* Selected drive */ + retval |= fdctrl->cur_drv; + FLOPPY_DPRINTF("digital output register: 0x%02x\n", retval); + + return retval; +} + +static void fdctrl_write_dor(FDCtrl *fdctrl, uint32_t value) +{ + FLOPPY_DPRINTF("digital output register set to 0x%02x\n", value); + + /* Motors */ + if (value & FD_DOR_MOTEN0) + fdctrl->srb |= FD_SRB_MTR0; + else + fdctrl->srb &= ~FD_SRB_MTR0; + if (value & FD_DOR_MOTEN1) + fdctrl->srb |= FD_SRB_MTR1; + else + fdctrl->srb &= ~FD_SRB_MTR1; + + /* Drive */ + if (value & 1) + fdctrl->srb |= FD_SRB_DR0; + else + fdctrl->srb &= ~FD_SRB_DR0; + + /* Reset */ + if (!(value & FD_DOR_nRESET)) { + if (fdctrl->dor & FD_DOR_nRESET) { + FLOPPY_DPRINTF("controller enter RESET state\n"); + } + } else { + if (!(fdctrl->dor & FD_DOR_nRESET)) { + FLOPPY_DPRINTF("controller out of RESET state\n"); + fdctrl_reset(fdctrl, 1); + fdctrl->dsr &= ~FD_DSR_PWRDOWN; + } + } + /* Selected drive */ + fdctrl->cur_drv = value & FD_DOR_SELMASK; + + fdctrl->dor = value; +} + +/* Tape drive register : 0x03 */ +static uint32_t fdctrl_read_tape(FDCtrl *fdctrl) +{ + uint32_t retval = fdctrl->tdr; + + FLOPPY_DPRINTF("tape drive register: 0x%02x\n", retval); + + return retval; +} + +static void fdctrl_write_tape(FDCtrl *fdctrl, uint32_t value) +{ + /* Reset mode */ + if (!(fdctrl->dor & FD_DOR_nRESET)) { + FLOPPY_DPRINTF("Floppy controller in RESET state !\n"); + return; + } + FLOPPY_DPRINTF("tape drive register set to 0x%02x\n", value); + /* Disk boot selection indicator */ + fdctrl->tdr = value & FD_TDR_BOOTSEL; + /* Tape indicators: never allow */ +} + +/* Main status register : 0x04 (read) */ +static uint32_t fdctrl_read_main_status(FDCtrl *fdctrl) +{ + uint32_t retval = fdctrl->msr; + + fdctrl->dsr &= ~FD_DSR_PWRDOWN; + fdctrl->dor |= FD_DOR_nRESET; + + FLOPPY_DPRINTF("main status register: 0x%02x\n", retval); + + return retval; +} + +/* Data select rate register : 0x04 (write) */ +static void fdctrl_write_rate(FDCtrl *fdctrl, uint32_t value) +{ + /* Reset mode */ + if (!(fdctrl->dor & FD_DOR_nRESET)) { + FLOPPY_DPRINTF("Floppy controller in RESET state !\n"); + return; + } + FLOPPY_DPRINTF("select rate register set to 0x%02x\n", value); + /* Reset: autoclear */ + if (value & FD_DSR_SWRESET) { + fdctrl->dor &= ~FD_DOR_nRESET; + fdctrl_reset(fdctrl, 1); + fdctrl->dor |= FD_DOR_nRESET; + } + if (value & FD_DSR_PWRDOWN) { + fdctrl_reset(fdctrl, 1); + } + fdctrl->dsr = value; +} + +/* Configuration control register: 0x07 (write) */ +static void fdctrl_write_ccr(FDCtrl *fdctrl, uint32_t value) +{ + /* Reset mode */ + if (!(fdctrl->dor & FD_DOR_nRESET)) { + FLOPPY_DPRINTF("Floppy controller in RESET state !\n"); + return; + } + FLOPPY_DPRINTF("configuration control register set to 0x%02x\n", value); + + /* Only the rate selection bits used in AT mode, and we + * store those in the DSR. + */ + fdctrl->dsr = (fdctrl->dsr & ~FD_DSR_DRATEMASK) | + (value & FD_DSR_DRATEMASK); +} + +static int fdctrl_media_changed(FDrive *drv) +{ + return drv->media_changed; +} + +/* Digital input register : 0x07 (read-only) */ +static uint32_t fdctrl_read_dir(FDCtrl *fdctrl) +{ + uint32_t retval = 0; + + if (fdctrl_media_changed(get_cur_drv(fdctrl))) { + retval |= FD_DIR_DSKCHG; + } + if (retval != 0) { + FLOPPY_DPRINTF("Floppy digital input register: 0x%02x\n", retval); + } + + return retval; +} + +/* Clear the FIFO and update the state for receiving the next command */ +static void fdctrl_to_command_phase(FDCtrl *fdctrl) +{ + fdctrl->phase = FD_PHASE_COMMAND; + fdctrl->data_dir = FD_DIR_WRITE; + fdctrl->data_pos = 0; + fdctrl->data_len = 1; /* Accept command byte, adjust for params later */ + fdctrl->msr &= ~(FD_MSR_CMDBUSY | FD_MSR_DIO); + fdctrl->msr |= FD_MSR_RQM; +} + +/* Update the state to allow the guest to read out the command status. + * @fifo_len is the number of result bytes to be read out. */ +static void fdctrl_to_result_phase(FDCtrl *fdctrl, int fifo_len) +{ + fdctrl->phase = FD_PHASE_RESULT; + fdctrl->data_dir = FD_DIR_READ; + fdctrl->data_len = fifo_len; + fdctrl->data_pos = 0; + fdctrl->msr |= FD_MSR_CMDBUSY | FD_MSR_RQM | FD_MSR_DIO; +} + +/* Set an error: unimplemented/unknown command */ +static void fdctrl_unimplemented(FDCtrl *fdctrl, int direction) +{ + qemu_log_mask(LOG_UNIMP, "fdc: unimplemented command 0x%02x\n", + fdctrl->fifo[0]); + fdctrl->fifo[0] = FD_SR0_INVCMD; + fdctrl_to_result_phase(fdctrl, 1); +} + +/* Seek to next sector + * returns 0 when end of track reached (for DBL_SIDES on head 1) + * otherwise returns 1 + */ +static int fdctrl_seek_to_next_sect(FDCtrl *fdctrl, FDrive *cur_drv) +{ + FLOPPY_DPRINTF("seek to next sector (%d %02x %02x => %d)\n", + cur_drv->head, cur_drv->track, cur_drv->sect, + fd_sector(cur_drv)); + /* XXX: cur_drv->sect >= cur_drv->last_sect should be an + error in fact */ + uint8_t new_head = cur_drv->head; + uint8_t new_track = cur_drv->track; + uint8_t new_sect = cur_drv->sect; + + int ret = 1; + + if (new_sect >= cur_drv->last_sect || + new_sect == fdctrl->eot) { + new_sect = 1; + if (FD_MULTI_TRACK(fdctrl->data_state)) { + if (new_head == 0 && + (cur_drv->flags & FDISK_DBL_SIDES) != 0) { + new_head = 1; + } else { + new_head = 0; + new_track++; + fdctrl->status0 |= FD_SR0_SEEK; + if ((cur_drv->flags & FDISK_DBL_SIDES) == 0) { + ret = 0; + } + } + } else { + fdctrl->status0 |= FD_SR0_SEEK; + new_track++; + ret = 0; + } + if (ret == 1) { + FLOPPY_DPRINTF("seek to next track (%d %02x %02x => %d)\n", + new_head, new_track, new_sect, fd_sector(cur_drv)); + } + } else { + new_sect++; + } + fd_seek(cur_drv, new_head, new_track, new_sect, 1); + return ret; +} + +/* Callback for transfer end (stop or abort) */ +static void fdctrl_stop_transfer(FDCtrl *fdctrl, uint8_t status0, + uint8_t status1, uint8_t status2) +{ + FDrive *cur_drv; + cur_drv = get_cur_drv(fdctrl); + + fdctrl->status0 &= ~(FD_SR0_DS0 | FD_SR0_DS1 | FD_SR0_HEAD); + fdctrl->status0 |= GET_CUR_DRV(fdctrl); + if (cur_drv->head) { + fdctrl->status0 |= FD_SR0_HEAD; + } + fdctrl->status0 |= status0; + + FLOPPY_DPRINTF("transfer status: %02x %02x %02x (%02x)\n", + status0, status1, status2, fdctrl->status0); + fdctrl->fifo[0] = fdctrl->status0; + fdctrl->fifo[1] = status1; + fdctrl->fifo[2] = status2; + fdctrl->fifo[3] = cur_drv->track; + fdctrl->fifo[4] = cur_drv->head; + fdctrl->fifo[5] = cur_drv->sect; + fdctrl->fifo[6] = FD_SECTOR_SC; + fdctrl->data_dir = FD_DIR_READ; + if (fdctrl->dma_chann != -1 && !(fdctrl->msr & FD_MSR_NONDMA)) { + IsaDmaClass *k = ISADMA_GET_CLASS(fdctrl->dma); + k->release_DREQ(fdctrl->dma, fdctrl->dma_chann); + } + fdctrl->msr |= FD_MSR_RQM | FD_MSR_DIO; + fdctrl->msr &= ~FD_MSR_NONDMA; + + fdctrl_to_result_phase(fdctrl, 7); + fdctrl_raise_irq(fdctrl); +} + +/* Prepare a data transfer (either DMA or FIFO) */ +static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv; + uint8_t kh, kt, ks; + + SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); + cur_drv = get_cur_drv(fdctrl); + kt = fdctrl->fifo[2]; + kh = fdctrl->fifo[3]; + ks = fdctrl->fifo[4]; + FLOPPY_DPRINTF("Start transfer at %d %d %02x %02x (%d)\n", + GET_CUR_DRV(fdctrl), kh, kt, ks, + fd_sector_calc(kh, kt, ks, cur_drv->last_sect, + NUM_SIDES(cur_drv))); + switch (fd_seek(cur_drv, kh, kt, ks, fdctrl->config & FD_CONFIG_EIS)) { + case 2: + /* sect too big */ + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00); + fdctrl->fifo[3] = kt; + fdctrl->fifo[4] = kh; + fdctrl->fifo[5] = ks; + return; + case 3: + /* track too big */ + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_EC, 0x00); + fdctrl->fifo[3] = kt; + fdctrl->fifo[4] = kh; + fdctrl->fifo[5] = ks; + return; + case 4: + /* No seek enabled */ + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00); + fdctrl->fifo[3] = kt; + fdctrl->fifo[4] = kh; + fdctrl->fifo[5] = ks; + return; + case 1: + fdctrl->status0 |= FD_SR0_SEEK; + break; + default: + break; + } + + /* Check the data rate. If the programmed data rate does not match + * the currently inserted medium, the operation has to fail. */ + if ((fdctrl->dsr & FD_DSR_DRATEMASK) != cur_drv->media_rate) { + FLOPPY_DPRINTF("data rate mismatch (fdc=%d, media=%d)\n", + fdctrl->dsr & FD_DSR_DRATEMASK, cur_drv->media_rate); + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_MA, 0x00); + fdctrl->fifo[3] = kt; + fdctrl->fifo[4] = kh; + fdctrl->fifo[5] = ks; + return; + } + + /* Set the FIFO state */ + fdctrl->data_dir = direction; + fdctrl->data_pos = 0; + assert(fdctrl->msr & FD_MSR_CMDBUSY); + if (fdctrl->fifo[0] & 0x80) + fdctrl->data_state |= FD_STATE_MULTI; + else + fdctrl->data_state &= ~FD_STATE_MULTI; + if (fdctrl->fifo[5] == 0) { + fdctrl->data_len = fdctrl->fifo[8]; + } else { + int tmp; + fdctrl->data_len = 128 << (fdctrl->fifo[5] > 7 ? 7 : fdctrl->fifo[5]); + tmp = (fdctrl->fifo[6] - ks + 1); + if (fdctrl->fifo[0] & 0x80) + tmp += fdctrl->fifo[6]; + fdctrl->data_len *= tmp; + } + fdctrl->eot = fdctrl->fifo[6]; + if (fdctrl->dor & FD_DOR_DMAEN) { + /* DMA transfer is enabled. */ + IsaDmaClass *k = ISADMA_GET_CLASS(fdctrl->dma); + + FLOPPY_DPRINTF("direction=%d (%d - %d)\n", + direction, (128 << fdctrl->fifo[5]) * + (cur_drv->last_sect - ks + 1), fdctrl->data_len); + + /* No access is allowed until DMA transfer has completed */ + fdctrl->msr &= ~FD_MSR_RQM; + if (direction != FD_DIR_VERIFY) { + /* + * Now, we just have to wait for the DMA controller to + * recall us... + */ + k->hold_DREQ(fdctrl->dma, fdctrl->dma_chann); + k->schedule(fdctrl->dma); + } else { + /* Start transfer */ + fdctrl_transfer_handler(fdctrl, fdctrl->dma_chann, 0, + fdctrl->data_len); + } + return; + } + FLOPPY_DPRINTF("start non-DMA transfer\n"); + fdctrl->msr |= FD_MSR_NONDMA | FD_MSR_RQM; + if (direction != FD_DIR_WRITE) + fdctrl->msr |= FD_MSR_DIO; + /* IO based transfer: calculate len */ + fdctrl_raise_irq(fdctrl); +} + +/* Prepare a transfer of deleted data */ +static void fdctrl_start_transfer_del(FDCtrl *fdctrl, int direction) +{ + qemu_log_mask(LOG_UNIMP, "fdctrl_start_transfer_del() unimplemented\n"); + + /* We don't handle deleted data, + * so we don't return *ANYTHING* + */ + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00); +} + +/* handlers for DMA transfers */ +int fdctrl_transfer_handler(void *opaque, int nchan, int dma_pos, int dma_len) +{ + FDCtrl *fdctrl; + FDrive *cur_drv; + int len, start_pos, rel_pos; + uint8_t status0 = 0x00, status1 = 0x00, status2 = 0x00; + IsaDmaClass *k; + + fdctrl = opaque; + if (fdctrl->msr & FD_MSR_RQM) { + FLOPPY_DPRINTF("Not in DMA transfer mode !\n"); + return 0; + } + k = ISADMA_GET_CLASS(fdctrl->dma); + cur_drv = get_cur_drv(fdctrl); + if (fdctrl->data_dir == FD_DIR_SCANE || fdctrl->data_dir == FD_DIR_SCANL || + fdctrl->data_dir == FD_DIR_SCANH) + status2 = FD_SR2_SNS; + if (dma_len > fdctrl->data_len) + dma_len = fdctrl->data_len; + if (cur_drv->blk == NULL) { + if (fdctrl->data_dir == FD_DIR_WRITE) + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00); + else + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00); + len = 0; + goto transfer_error; + } + rel_pos = fdctrl->data_pos % FD_SECTOR_LEN; + for (start_pos = fdctrl->data_pos; fdctrl->data_pos < dma_len;) { + len = dma_len - fdctrl->data_pos; + if (len + rel_pos > FD_SECTOR_LEN) + len = FD_SECTOR_LEN - rel_pos; + FLOPPY_DPRINTF("copy %d bytes (%d %d %d) %d pos %d %02x " + "(%d-0x%08x 0x%08x)\n", len, dma_len, fdctrl->data_pos, + fdctrl->data_len, GET_CUR_DRV(fdctrl), cur_drv->head, + cur_drv->track, cur_drv->sect, fd_sector(cur_drv), + fd_sector(cur_drv) * FD_SECTOR_LEN); + if (fdctrl->data_dir != FD_DIR_WRITE || + len < FD_SECTOR_LEN || rel_pos != 0) { + /* READ & SCAN commands and realign to a sector for WRITE */ + if (blk_pread(cur_drv->blk, fd_offset(cur_drv), + fdctrl->fifo, BDRV_SECTOR_SIZE) < 0) { + FLOPPY_DPRINTF("Floppy: error getting sector %d\n", + fd_sector(cur_drv)); + /* Sure, image size is too small... */ + memset(fdctrl->fifo, 0, FD_SECTOR_LEN); + } + } + switch (fdctrl->data_dir) { + case FD_DIR_READ: + /* READ commands */ + k->write_memory(fdctrl->dma, nchan, fdctrl->fifo + rel_pos, + fdctrl->data_pos, len); + break; + case FD_DIR_WRITE: + /* WRITE commands */ + if (cur_drv->ro) { + /* Handle readonly medium early, no need to do DMA, touch the + * LED or attempt any writes. A real floppy doesn't attempt + * to write to readonly media either. */ + fdctrl_stop_transfer(fdctrl, + FD_SR0_ABNTERM | FD_SR0_SEEK, FD_SR1_NW, + 0x00); + goto transfer_error; + } + + k->read_memory(fdctrl->dma, nchan, fdctrl->fifo + rel_pos, + fdctrl->data_pos, len); + if (blk_pwrite(cur_drv->blk, fd_offset(cur_drv), + fdctrl->fifo, BDRV_SECTOR_SIZE, 0) < 0) { + FLOPPY_DPRINTF("error writing sector %d\n", + fd_sector(cur_drv)); + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00); + goto transfer_error; + } + break; + case FD_DIR_VERIFY: + /* VERIFY commands */ + break; + default: + /* SCAN commands */ + { + uint8_t tmpbuf[FD_SECTOR_LEN]; + int ret; + k->read_memory(fdctrl->dma, nchan, tmpbuf, fdctrl->data_pos, + len); + ret = memcmp(tmpbuf, fdctrl->fifo + rel_pos, len); + if (ret == 0) { + status2 = FD_SR2_SEH; + goto end_transfer; + } + if ((ret < 0 && fdctrl->data_dir == FD_DIR_SCANL) || + (ret > 0 && fdctrl->data_dir == FD_DIR_SCANH)) { + status2 = 0x00; + goto end_transfer; + } + } + break; + } + fdctrl->data_pos += len; + rel_pos = fdctrl->data_pos % FD_SECTOR_LEN; + if (rel_pos == 0) { + /* Seek to next sector */ + if (!fdctrl_seek_to_next_sect(fdctrl, cur_drv)) + break; + } + } + end_transfer: + len = fdctrl->data_pos - start_pos; + FLOPPY_DPRINTF("end transfer %d %d %d\n", + fdctrl->data_pos, len, fdctrl->data_len); + if (fdctrl->data_dir == FD_DIR_SCANE || + fdctrl->data_dir == FD_DIR_SCANL || + fdctrl->data_dir == FD_DIR_SCANH) + status2 = FD_SR2_SEH; + fdctrl->data_len -= len; + fdctrl_stop_transfer(fdctrl, status0, status1, status2); + transfer_error: + + return len; +} + +/* Data register : 0x05 */ +static uint32_t fdctrl_read_data(FDCtrl *fdctrl) +{ + FDrive *cur_drv; + uint32_t retval = 0; + uint32_t pos; + + cur_drv = get_cur_drv(fdctrl); + fdctrl->dsr &= ~FD_DSR_PWRDOWN; + if (!(fdctrl->msr & FD_MSR_RQM) || !(fdctrl->msr & FD_MSR_DIO)) { + FLOPPY_DPRINTF("error: controller not ready for reading\n"); + return 0; + } + + /* If data_len spans multiple sectors, the current position in the FIFO + * wraps around while fdctrl->data_pos is the real position in the whole + * request. */ + pos = fdctrl->data_pos; + pos %= FD_SECTOR_LEN; + + switch (fdctrl->phase) { + case FD_PHASE_EXECUTION: + assert(fdctrl->msr & FD_MSR_NONDMA); + if (pos == 0) { + if (fdctrl->data_pos != 0) + if (!fdctrl_seek_to_next_sect(fdctrl, cur_drv)) { + FLOPPY_DPRINTF("error seeking to next sector %d\n", + fd_sector(cur_drv)); + return 0; + } + if (blk_pread(cur_drv->blk, fd_offset(cur_drv), fdctrl->fifo, + BDRV_SECTOR_SIZE) + < 0) { + FLOPPY_DPRINTF("error getting sector %d\n", + fd_sector(cur_drv)); + /* Sure, image size is too small... */ + memset(fdctrl->fifo, 0, FD_SECTOR_LEN); + } + } + + if (++fdctrl->data_pos == fdctrl->data_len) { + fdctrl->msr &= ~FD_MSR_RQM; + fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00); + } + break; + + case FD_PHASE_RESULT: + assert(!(fdctrl->msr & FD_MSR_NONDMA)); + if (++fdctrl->data_pos == fdctrl->data_len) { + fdctrl->msr &= ~FD_MSR_RQM; + fdctrl_to_command_phase(fdctrl); + fdctrl_reset_irq(fdctrl); + } + break; + + case FD_PHASE_COMMAND: + default: + abort(); + } + + retval = fdctrl->fifo[pos]; + FLOPPY_DPRINTF("data register: 0x%02x\n", retval); + + return retval; +} + +static void fdctrl_format_sector(FDCtrl *fdctrl) +{ + FDrive *cur_drv; + uint8_t kh, kt, ks; + + SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); + cur_drv = get_cur_drv(fdctrl); + kt = fdctrl->fifo[6]; + kh = fdctrl->fifo[7]; + ks = fdctrl->fifo[8]; + FLOPPY_DPRINTF("format sector at %d %d %02x %02x (%d)\n", + GET_CUR_DRV(fdctrl), kh, kt, ks, + fd_sector_calc(kh, kt, ks, cur_drv->last_sect, + NUM_SIDES(cur_drv))); + switch (fd_seek(cur_drv, kh, kt, ks, fdctrl->config & FD_CONFIG_EIS)) { + case 2: + /* sect too big */ + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00); + fdctrl->fifo[3] = kt; + fdctrl->fifo[4] = kh; + fdctrl->fifo[5] = ks; + return; + case 3: + /* track too big */ + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_EC, 0x00); + fdctrl->fifo[3] = kt; + fdctrl->fifo[4] = kh; + fdctrl->fifo[5] = ks; + return; + case 4: + /* No seek enabled */ + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00); + fdctrl->fifo[3] = kt; + fdctrl->fifo[4] = kh; + fdctrl->fifo[5] = ks; + return; + case 1: + fdctrl->status0 |= FD_SR0_SEEK; + break; + default: + break; + } + memset(fdctrl->fifo, 0, FD_SECTOR_LEN); + if (cur_drv->blk == NULL || + blk_pwrite(cur_drv->blk, fd_offset(cur_drv), fdctrl->fifo, + BDRV_SECTOR_SIZE, 0) < 0) { + FLOPPY_DPRINTF("error formatting sector %d\n", fd_sector(cur_drv)); + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00); + } else { + if (cur_drv->sect == cur_drv->last_sect) { + fdctrl->data_state &= ~FD_STATE_FORMAT; + /* Last sector done */ + fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00); + } else { + /* More to do */ + fdctrl->data_pos = 0; + fdctrl->data_len = 4; + } + } +} + +static void fdctrl_handle_lock(FDCtrl *fdctrl, int direction) +{ + fdctrl->lock = (fdctrl->fifo[0] & 0x80) ? 1 : 0; + fdctrl->fifo[0] = fdctrl->lock << 4; + fdctrl_to_result_phase(fdctrl, 1); +} + +static void fdctrl_handle_dumpreg(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv = get_cur_drv(fdctrl); + + /* Drives position */ + fdctrl->fifo[0] = drv0(fdctrl)->track; + fdctrl->fifo[1] = drv1(fdctrl)->track; +#if MAX_FD == 4 + fdctrl->fifo[2] = drv2(fdctrl)->track; + fdctrl->fifo[3] = drv3(fdctrl)->track; +#else + fdctrl->fifo[2] = 0; + fdctrl->fifo[3] = 0; +#endif + /* timers */ + fdctrl->fifo[4] = fdctrl->timer0; + fdctrl->fifo[5] = (fdctrl->timer1 << 1) | (fdctrl->dor & FD_DOR_DMAEN ? 1 : 0); + fdctrl->fifo[6] = cur_drv->last_sect; + fdctrl->fifo[7] = (fdctrl->lock << 7) | + (cur_drv->perpendicular << 2); + fdctrl->fifo[8] = fdctrl->config; + fdctrl->fifo[9] = fdctrl->precomp_trk; + fdctrl_to_result_phase(fdctrl, 10); +} + +static void fdctrl_handle_version(FDCtrl *fdctrl, int direction) +{ + /* Controller's version */ + fdctrl->fifo[0] = fdctrl->version; + fdctrl_to_result_phase(fdctrl, 1); +} + +static void fdctrl_handle_partid(FDCtrl *fdctrl, int direction) +{ + fdctrl->fifo[0] = 0x41; /* Stepping 1 */ + fdctrl_to_result_phase(fdctrl, 1); +} + +static void fdctrl_handle_restore(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv = get_cur_drv(fdctrl); + + /* Drives position */ + drv0(fdctrl)->track = fdctrl->fifo[3]; + drv1(fdctrl)->track = fdctrl->fifo[4]; +#if MAX_FD == 4 + drv2(fdctrl)->track = fdctrl->fifo[5]; + drv3(fdctrl)->track = fdctrl->fifo[6]; +#endif + /* timers */ + fdctrl->timer0 = fdctrl->fifo[7]; + fdctrl->timer1 = fdctrl->fifo[8]; + cur_drv->last_sect = fdctrl->fifo[9]; + fdctrl->lock = fdctrl->fifo[10] >> 7; + cur_drv->perpendicular = (fdctrl->fifo[10] >> 2) & 0xF; + fdctrl->config = fdctrl->fifo[11]; + fdctrl->precomp_trk = fdctrl->fifo[12]; + fdctrl->pwrd = fdctrl->fifo[13]; + fdctrl_to_command_phase(fdctrl); +} + +static void fdctrl_handle_save(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv = get_cur_drv(fdctrl); + + fdctrl->fifo[0] = 0; + fdctrl->fifo[1] = 0; + /* Drives position */ + fdctrl->fifo[2] = drv0(fdctrl)->track; + fdctrl->fifo[3] = drv1(fdctrl)->track; +#if MAX_FD == 4 + fdctrl->fifo[4] = drv2(fdctrl)->track; + fdctrl->fifo[5] = drv3(fdctrl)->track; +#else + fdctrl->fifo[4] = 0; + fdctrl->fifo[5] = 0; +#endif + /* timers */ + fdctrl->fifo[6] = fdctrl->timer0; + fdctrl->fifo[7] = fdctrl->timer1; + fdctrl->fifo[8] = cur_drv->last_sect; + fdctrl->fifo[9] = (fdctrl->lock << 7) | + (cur_drv->perpendicular << 2); + fdctrl->fifo[10] = fdctrl->config; + fdctrl->fifo[11] = fdctrl->precomp_trk; + fdctrl->fifo[12] = fdctrl->pwrd; + fdctrl->fifo[13] = 0; + fdctrl->fifo[14] = 0; + fdctrl_to_result_phase(fdctrl, 15); +} + +static void fdctrl_handle_readid(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv = get_cur_drv(fdctrl); + + cur_drv->head = (fdctrl->fifo[1] >> 2) & 1; + timer_mod(fdctrl->result_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (NANOSECONDS_PER_SECOND / 50)); +} + +static void fdctrl_handle_format_track(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv; + + SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); + cur_drv = get_cur_drv(fdctrl); + fdctrl->data_state |= FD_STATE_FORMAT; + if (fdctrl->fifo[0] & 0x80) + fdctrl->data_state |= FD_STATE_MULTI; + else + fdctrl->data_state &= ~FD_STATE_MULTI; + cur_drv->bps = + fdctrl->fifo[2] > 7 ? 16384 : 128 << fdctrl->fifo[2]; +#if 0 + cur_drv->last_sect = + cur_drv->flags & FDISK_DBL_SIDES ? fdctrl->fifo[3] : + fdctrl->fifo[3] / 2; +#else + cur_drv->last_sect = fdctrl->fifo[3]; +#endif + /* TODO: implement format using DMA expected by the Bochs BIOS + * and Linux fdformat (read 3 bytes per sector via DMA and fill + * the sector with the specified fill byte + */ + fdctrl->data_state &= ~FD_STATE_FORMAT; + fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00); +} + +static void fdctrl_handle_specify(FDCtrl *fdctrl, int direction) +{ + fdctrl->timer0 = (fdctrl->fifo[1] >> 4) & 0xF; + fdctrl->timer1 = fdctrl->fifo[2] >> 1; + if (fdctrl->fifo[2] & 1) + fdctrl->dor &= ~FD_DOR_DMAEN; + else + fdctrl->dor |= FD_DOR_DMAEN; + /* No result back */ + fdctrl_to_command_phase(fdctrl); +} + +static void fdctrl_handle_sense_drive_status(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv; + + SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); + cur_drv = get_cur_drv(fdctrl); + cur_drv->head = (fdctrl->fifo[1] >> 2) & 1; + /* 1 Byte status back */ + fdctrl->fifo[0] = (cur_drv->ro << 6) | + (cur_drv->track == 0 ? 0x10 : 0x00) | + (cur_drv->head << 2) | + GET_CUR_DRV(fdctrl) | + 0x28; + fdctrl_to_result_phase(fdctrl, 1); +} + +static void fdctrl_handle_recalibrate(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv; + + SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); + cur_drv = get_cur_drv(fdctrl); + fd_recalibrate(cur_drv); + fdctrl_to_command_phase(fdctrl); + /* Raise Interrupt */ + fdctrl->status0 |= FD_SR0_SEEK; + fdctrl_raise_irq(fdctrl); +} + +static void fdctrl_handle_sense_interrupt_status(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv = get_cur_drv(fdctrl); + + if (fdctrl->reset_sensei > 0) { + fdctrl->fifo[0] = + FD_SR0_RDYCHG + FD_RESET_SENSEI_COUNT - fdctrl->reset_sensei; + fdctrl->reset_sensei--; + } else if (!(fdctrl->sra & FD_SRA_INTPEND)) { + fdctrl->fifo[0] = FD_SR0_INVCMD; + fdctrl_to_result_phase(fdctrl, 1); + return; + } else { + fdctrl->fifo[0] = + (fdctrl->status0 & ~(FD_SR0_HEAD | FD_SR0_DS1 | FD_SR0_DS0)) + | GET_CUR_DRV(fdctrl); + } + + fdctrl->fifo[1] = cur_drv->track; + fdctrl_to_result_phase(fdctrl, 2); + fdctrl_reset_irq(fdctrl); + fdctrl->status0 = FD_SR0_RDYCHG; +} + +static void fdctrl_handle_seek(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv; + + SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); + cur_drv = get_cur_drv(fdctrl); + fdctrl_to_command_phase(fdctrl); + /* The seek command just sends step pulses to the drive and doesn't care if + * there is a medium inserted of if it's banging the head against the drive. + */ + fd_seek(cur_drv, cur_drv->head, fdctrl->fifo[2], cur_drv->sect, 1); + /* Raise Interrupt */ + fdctrl->status0 |= FD_SR0_SEEK; + fdctrl_raise_irq(fdctrl); +} + +static void fdctrl_handle_perpendicular_mode(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv = get_cur_drv(fdctrl); + + if (fdctrl->fifo[1] & 0x80) + cur_drv->perpendicular = fdctrl->fifo[1] & 0x7; + /* No result back */ + fdctrl_to_command_phase(fdctrl); +} + +static void fdctrl_handle_configure(FDCtrl *fdctrl, int direction) +{ + fdctrl->config = fdctrl->fifo[2]; + fdctrl->precomp_trk = fdctrl->fifo[3]; + /* No result back */ + fdctrl_to_command_phase(fdctrl); +} + +static void fdctrl_handle_powerdown_mode(FDCtrl *fdctrl, int direction) +{ + fdctrl->pwrd = fdctrl->fifo[1]; + fdctrl->fifo[0] = fdctrl->fifo[1]; + fdctrl_to_result_phase(fdctrl, 1); +} + +static void fdctrl_handle_option(FDCtrl *fdctrl, int direction) +{ + /* No result back */ + fdctrl_to_command_phase(fdctrl); +} + +static void fdctrl_handle_drive_specification_command(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv = get_cur_drv(fdctrl); + uint32_t pos; + + pos = fdctrl->data_pos - 1; + pos %= FD_SECTOR_LEN; + if (fdctrl->fifo[pos] & 0x80) { + /* Command parameters done */ + if (fdctrl->fifo[pos] & 0x40) { + fdctrl->fifo[0] = fdctrl->fifo[1]; + fdctrl->fifo[2] = 0; + fdctrl->fifo[3] = 0; + fdctrl_to_result_phase(fdctrl, 4); + } else { + fdctrl_to_command_phase(fdctrl); + } + } else if (fdctrl->data_len > 7) { + /* ERROR */ + fdctrl->fifo[0] = 0x80 | + (cur_drv->head << 2) | GET_CUR_DRV(fdctrl); + fdctrl_to_result_phase(fdctrl, 1); + } +} + +static void fdctrl_handle_relative_seek_in(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv; + + SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); + cur_drv = get_cur_drv(fdctrl); + if (fdctrl->fifo[2] + cur_drv->track >= cur_drv->max_track) { + fd_seek(cur_drv, cur_drv->head, cur_drv->max_track - 1, + cur_drv->sect, 1); + } else { + fd_seek(cur_drv, cur_drv->head, + cur_drv->track + fdctrl->fifo[2], cur_drv->sect, 1); + } + fdctrl_to_command_phase(fdctrl); + /* Raise Interrupt */ + fdctrl->status0 |= FD_SR0_SEEK; + fdctrl_raise_irq(fdctrl); +} + +static void fdctrl_handle_relative_seek_out(FDCtrl *fdctrl, int direction) +{ + FDrive *cur_drv; + + SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK); + cur_drv = get_cur_drv(fdctrl); + if (fdctrl->fifo[2] > cur_drv->track) { + fd_seek(cur_drv, cur_drv->head, 0, cur_drv->sect, 1); + } else { + fd_seek(cur_drv, cur_drv->head, + cur_drv->track - fdctrl->fifo[2], cur_drv->sect, 1); + } + fdctrl_to_command_phase(fdctrl); + /* Raise Interrupt */ + fdctrl->status0 |= FD_SR0_SEEK; + fdctrl_raise_irq(fdctrl); +} + +/* + * Handlers for the execution phase of each command + */ +typedef struct FDCtrlCommand { + uint8_t value; + uint8_t mask; + const char* name; + int parameters; + void (*handler)(FDCtrl *fdctrl, int direction); + int direction; +} FDCtrlCommand; + +static const FDCtrlCommand handlers[] = { + { FD_CMD_READ, 0x1f, "READ", 8, fdctrl_start_transfer, FD_DIR_READ }, + { FD_CMD_WRITE, 0x3f, "WRITE", 8, fdctrl_start_transfer, FD_DIR_WRITE }, + { FD_CMD_SEEK, 0xff, "SEEK", 2, fdctrl_handle_seek }, + { FD_CMD_SENSE_INTERRUPT_STATUS, 0xff, "SENSE INTERRUPT STATUS", 0, fdctrl_handle_sense_interrupt_status }, + { FD_CMD_RECALIBRATE, 0xff, "RECALIBRATE", 1, fdctrl_handle_recalibrate }, + { FD_CMD_FORMAT_TRACK, 0xbf, "FORMAT TRACK", 5, fdctrl_handle_format_track }, + { FD_CMD_READ_TRACK, 0xbf, "READ TRACK", 8, fdctrl_start_transfer, FD_DIR_READ }, + { FD_CMD_RESTORE, 0xff, "RESTORE", 17, fdctrl_handle_restore }, /* part of READ DELETED DATA */ + { FD_CMD_SAVE, 0xff, "SAVE", 0, fdctrl_handle_save }, /* part of READ DELETED DATA */ + { FD_CMD_READ_DELETED, 0x1f, "READ DELETED DATA", 8, fdctrl_start_transfer_del, FD_DIR_READ }, + { FD_CMD_SCAN_EQUAL, 0x1f, "SCAN EQUAL", 8, fdctrl_start_transfer, FD_DIR_SCANE }, + { FD_CMD_VERIFY, 0x1f, "VERIFY", 8, fdctrl_start_transfer, FD_DIR_VERIFY }, + { FD_CMD_SCAN_LOW_OR_EQUAL, 0x1f, "SCAN LOW OR EQUAL", 8, fdctrl_start_transfer, FD_DIR_SCANL }, + { FD_CMD_SCAN_HIGH_OR_EQUAL, 0x1f, "SCAN HIGH OR EQUAL", 8, fdctrl_start_transfer, FD_DIR_SCANH }, + { FD_CMD_WRITE_DELETED, 0x3f, "WRITE DELETED DATA", 8, fdctrl_start_transfer_del, FD_DIR_WRITE }, + { FD_CMD_READ_ID, 0xbf, "READ ID", 1, fdctrl_handle_readid }, + { FD_CMD_SPECIFY, 0xff, "SPECIFY", 2, fdctrl_handle_specify }, + { FD_CMD_SENSE_DRIVE_STATUS, 0xff, "SENSE DRIVE STATUS", 1, fdctrl_handle_sense_drive_status }, + { FD_CMD_PERPENDICULAR_MODE, 0xff, "PERPENDICULAR MODE", 1, fdctrl_handle_perpendicular_mode }, + { FD_CMD_CONFIGURE, 0xff, "CONFIGURE", 3, fdctrl_handle_configure }, + { FD_CMD_POWERDOWN_MODE, 0xff, "POWERDOWN MODE", 2, fdctrl_handle_powerdown_mode }, + { FD_CMD_OPTION, 0xff, "OPTION", 1, fdctrl_handle_option }, + { FD_CMD_DRIVE_SPECIFICATION_COMMAND, 0xff, "DRIVE SPECIFICATION COMMAND", 5, fdctrl_handle_drive_specification_command }, + { FD_CMD_RELATIVE_SEEK_OUT, 0xff, "RELATIVE SEEK OUT", 2, fdctrl_handle_relative_seek_out }, + { FD_CMD_FORMAT_AND_WRITE, 0xff, "FORMAT AND WRITE", 10, fdctrl_unimplemented }, + { FD_CMD_RELATIVE_SEEK_IN, 0xff, "RELATIVE SEEK IN", 2, fdctrl_handle_relative_seek_in }, + { FD_CMD_LOCK, 0x7f, "LOCK", 0, fdctrl_handle_lock }, + { FD_CMD_DUMPREG, 0xff, "DUMPREG", 0, fdctrl_handle_dumpreg }, + { FD_CMD_VERSION, 0xff, "VERSION", 0, fdctrl_handle_version }, + { FD_CMD_PART_ID, 0xff, "PART ID", 0, fdctrl_handle_partid }, + { FD_CMD_WRITE, 0x1f, "WRITE (BeOS)", 8, fdctrl_start_transfer, FD_DIR_WRITE }, /* not in specification ; BeOS 4.5 bug */ + { 0, 0, "unknown", 0, fdctrl_unimplemented }, /* default handler */ +}; +/* Associate command to an index in the 'handlers' array */ +static uint8_t command_to_handler[256]; + +static const FDCtrlCommand *get_command(uint8_t cmd) +{ + int idx; + + idx = command_to_handler[cmd]; + FLOPPY_DPRINTF("%s command\n", handlers[idx].name); + return &handlers[idx]; +} + +static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value) +{ + FDrive *cur_drv; + const FDCtrlCommand *cmd; + uint32_t pos; + + /* Reset mode */ + if (!(fdctrl->dor & FD_DOR_nRESET)) { + FLOPPY_DPRINTF("Floppy controller in RESET state !\n"); + return; + } + if (!(fdctrl->msr & FD_MSR_RQM) || (fdctrl->msr & FD_MSR_DIO)) { + FLOPPY_DPRINTF("error: controller not ready for writing\n"); + return; + } + fdctrl->dsr &= ~FD_DSR_PWRDOWN; + + FLOPPY_DPRINTF("%s: %02x\n", __func__, value); + + /* If data_len spans multiple sectors, the current position in the FIFO + * wraps around while fdctrl->data_pos is the real position in the whole + * request. */ + pos = fdctrl->data_pos++; + pos %= FD_SECTOR_LEN; + fdctrl->fifo[pos] = value; + + if (fdctrl->data_pos == fdctrl->data_len) { + fdctrl->msr &= ~FD_MSR_RQM; + } + + switch (fdctrl->phase) { + case FD_PHASE_EXECUTION: + /* For DMA requests, RQM should be cleared during execution phase, so + * we would have errored out above. */ + assert(fdctrl->msr & FD_MSR_NONDMA); + + /* FIFO data write */ + if (pos == FD_SECTOR_LEN - 1 || + fdctrl->data_pos == fdctrl->data_len) { + cur_drv = get_cur_drv(fdctrl); + if (blk_pwrite(cur_drv->blk, fd_offset(cur_drv), fdctrl->fifo, + BDRV_SECTOR_SIZE, 0) < 0) { + FLOPPY_DPRINTF("error writing sector %d\n", + fd_sector(cur_drv)); + break; + } + if (!fdctrl_seek_to_next_sect(fdctrl, cur_drv)) { + FLOPPY_DPRINTF("error seeking to next sector %d\n", + fd_sector(cur_drv)); + break; + } + } + + /* Switch to result phase when done with the transfer */ + if (fdctrl->data_pos == fdctrl->data_len) { + fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00); + } + break; + + case FD_PHASE_COMMAND: + assert(!(fdctrl->msr & FD_MSR_NONDMA)); + assert(fdctrl->data_pos < FD_SECTOR_LEN); + + if (pos == 0) { + /* The first byte specifies the command. Now we start reading + * as many parameters as this command requires. */ + cmd = get_command(value); + fdctrl->data_len = cmd->parameters + 1; + if (cmd->parameters) { + fdctrl->msr |= FD_MSR_RQM; + } + fdctrl->msr |= FD_MSR_CMDBUSY; + } + + if (fdctrl->data_pos == fdctrl->data_len) { + /* We have all parameters now, execute the command */ + fdctrl->phase = FD_PHASE_EXECUTION; + + if (fdctrl->data_state & FD_STATE_FORMAT) { + fdctrl_format_sector(fdctrl); + break; + } + + cmd = get_command(fdctrl->fifo[0]); + FLOPPY_DPRINTF("Calling handler for '%s'\n", cmd->name); + cmd->handler(fdctrl, cmd->direction); + } + break; + + case FD_PHASE_RESULT: + default: + abort(); + } +} + +static void fdctrl_result_timer(void *opaque) +{ + FDCtrl *fdctrl = opaque; + FDrive *cur_drv = get_cur_drv(fdctrl); + + /* Pretend we are spinning. + * This is needed for Coherent, which uses READ ID to check for + * sector interleaving. + */ + if (cur_drv->last_sect != 0) { + cur_drv->sect = (cur_drv->sect % cur_drv->last_sect) + 1; + } + /* READ_ID can't automatically succeed! */ + if ((fdctrl->dsr & FD_DSR_DRATEMASK) != cur_drv->media_rate) { + FLOPPY_DPRINTF("read id rate mismatch (fdc=%d, media=%d)\n", + fdctrl->dsr & FD_DSR_DRATEMASK, cur_drv->media_rate); + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_MA, 0x00); + } else { + fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00); + } +} + +/* Init functions */ + +void fdctrl_init_drives(FloppyBus *bus, DriveInfo **fds) +{ + DeviceState *dev; + int i; + + for (i = 0; i < MAX_FD; i++) { + if (fds[i]) { + dev = qdev_new("floppy"); + qdev_prop_set_uint32(dev, "unit", i); + qdev_prop_set_enum(dev, "drive-type", FLOPPY_DRIVE_TYPE_AUTO); + qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(fds[i]), + &error_fatal); + qdev_realize_and_unref(dev, &bus->bus, &error_fatal); + } + } +} + +void fdctrl_realize_common(DeviceState *dev, FDCtrl *fdctrl, Error **errp) +{ + int i, j; + FDrive *drive; + static int command_tables_inited = 0; + + if (fdctrl->fallback == FLOPPY_DRIVE_TYPE_AUTO) { + error_setg(errp, "Cannot choose a fallback FDrive type of 'auto'"); + return; + } + + /* Fill 'command_to_handler' lookup table */ + if (!command_tables_inited) { + command_tables_inited = 1; + for (i = ARRAY_SIZE(handlers) - 1; i >= 0; i--) { + for (j = 0; j < sizeof(command_to_handler); j++) { + if ((j & handlers[i].mask) == handlers[i].value) { + command_to_handler[j] = i; + } + } + } + } + + FLOPPY_DPRINTF("init controller\n"); + fdctrl->fifo = qemu_memalign(512, FD_SECTOR_LEN); + memset(fdctrl->fifo, 0, FD_SECTOR_LEN); + fdctrl->fifo_size = 512; + fdctrl->result_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + fdctrl_result_timer, fdctrl); + + fdctrl->version = 0x90; /* Intel 82078 controller */ + fdctrl->config = FD_CONFIG_EIS | FD_CONFIG_EFIFO; /* Implicit seek, polling & FIFO enabled */ + fdctrl->num_floppies = MAX_FD; + + floppy_bus_create(fdctrl, &fdctrl->bus, dev); + + for (i = 0; i < MAX_FD; i++) { + drive = &fdctrl->drives[i]; + drive->fdctrl = fdctrl; + fd_init(drive); + fd_revalidate(drive); + } +} + +static void fdc_register_types(void) +{ + type_register_static(&floppy_bus_info); + type_register_static(&floppy_drive_info); +} + +type_init(fdc_register_types) diff --git a/hw/block/hd-geometry.c b/hw/block/hd-geometry.c new file mode 100644 index 000000000..dcbccee29 --- /dev/null +++ b/hw/block/hd-geometry.c @@ -0,0 +1,163 @@ +/* + * Hard disk geometry utilities + * + * Copyright (C) 2012 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "sysemu/block-backend.h" +#include "qapi/qapi-types-block.h" +#include "qemu/bswap.h" +#include "hw/block/block.h" +#include "trace.h" + +struct partition { + uint8_t boot_ind; /* 0x80 - active */ + uint8_t head; /* starting head */ + uint8_t sector; /* starting sector */ + uint8_t cyl; /* starting cylinder */ + uint8_t sys_ind; /* What partition type */ + uint8_t end_head; /* end head */ + uint8_t end_sector; /* end sector */ + uint8_t end_cyl; /* end cylinder */ + uint32_t start_sect; /* starting sector counting from 0 */ + uint32_t nr_sects; /* nr of sectors in partition */ +} QEMU_PACKED; + +/* try to guess the disk logical geometry from the MSDOS partition table. + Return 0 if OK, -1 if could not guess */ +static int guess_disk_lchs(BlockBackend *blk, + int *pcylinders, int *pheads, int *psectors) +{ + uint8_t buf[BDRV_SECTOR_SIZE]; + int i, heads, sectors, cylinders; + struct partition *p; + uint32_t nr_sects; + uint64_t nb_sectors; + + blk_get_geometry(blk, &nb_sectors); + + if (blk_pread(blk, 0, buf, BDRV_SECTOR_SIZE) < 0) { + return -1; + } + /* test msdos magic */ + if (buf[510] != 0x55 || buf[511] != 0xaa) { + return -1; + } + for (i = 0; i < 4; i++) { + p = ((struct partition *)(buf + 0x1be)) + i; + nr_sects = le32_to_cpu(p->nr_sects); + if (nr_sects && p->end_head) { + /* We make the assumption that the partition terminates on + a cylinder boundary */ + heads = p->end_head + 1; + sectors = p->end_sector & 63; + if (sectors == 0) { + continue; + } + cylinders = nb_sectors / (heads * sectors); + if (cylinders < 1 || cylinders > 16383) { + continue; + } + *pheads = heads; + *psectors = sectors; + *pcylinders = cylinders; + trace_hd_geometry_lchs_guess(blk, cylinders, heads, sectors); + return 0; + } + } + return -1; +} + +static void guess_chs_for_size(BlockBackend *blk, + uint32_t *pcyls, uint32_t *pheads, uint32_t *psecs) +{ + uint64_t nb_sectors; + int cylinders; + + blk_get_geometry(blk, &nb_sectors); + + cylinders = nb_sectors / (16 * 63); + if (cylinders > 16383) { + cylinders = 16383; + } else if (cylinders < 2) { + cylinders = 2; + } + *pcyls = cylinders; + *pheads = 16; + *psecs = 63; +} + +void hd_geometry_guess(BlockBackend *blk, + uint32_t *pcyls, uint32_t *pheads, uint32_t *psecs, + int *ptrans) +{ + int cylinders, heads, secs, translation; + HDGeometry geo; + + /* Try to probe the backing device geometry, otherwise fallback + to the old logic. (as of 12/2014 probing only succeeds on DASDs) */ + if (blk_probe_geometry(blk, &geo) == 0) { + *pcyls = geo.cylinders; + *psecs = geo.sectors; + *pheads = geo.heads; + translation = BIOS_ATA_TRANSLATION_NONE; + } else if (guess_disk_lchs(blk, &cylinders, &heads, &secs) < 0) { + /* no LCHS guess: use a standard physical disk geometry */ + guess_chs_for_size(blk, pcyls, pheads, psecs); + translation = hd_bios_chs_auto_trans(*pcyls, *pheads, *psecs); + } else if (heads > 16) { + /* LCHS guess with heads > 16 means that a BIOS LBA + translation was active, so a standard physical disk + geometry is OK */ + guess_chs_for_size(blk, pcyls, pheads, psecs); + translation = *pcyls * *pheads <= 131072 + ? BIOS_ATA_TRANSLATION_LARGE + : BIOS_ATA_TRANSLATION_LBA; + } else { + /* LCHS guess with heads <= 16: use as physical geometry */ + *pcyls = cylinders; + *pheads = heads; + *psecs = secs; + /* disable any translation to be in sync with + the logical geometry */ + translation = BIOS_ATA_TRANSLATION_NONE; + } + if (ptrans) { + *ptrans = translation; + } + trace_hd_geometry_guess(blk, *pcyls, *pheads, *psecs, translation); +} + +int hd_bios_chs_auto_trans(uint32_t cyls, uint32_t heads, uint32_t secs) +{ + return cyls <= 1024 && heads <= 16 && secs <= 63 + ? BIOS_ATA_TRANSLATION_NONE + : BIOS_ATA_TRANSLATION_LBA; +} diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c new file mode 100644 index 000000000..b77503dc8 --- /dev/null +++ b/hw/block/m25p80.c @@ -0,0 +1,1661 @@ +/* + * ST M25P80 emulator. Emulate all SPI flash devices based on the m25p80 command + * set. Known devices table current as of Jun/2012 and taken from linux. + * See drivers/mtd/devices/m25p80.c. + * + * Copyright (C) 2011 Edgar E. Iglesias + * Copyright (C) 2012 Peter A. G. Crosthwaite + * Copyright (C) 2012 PetaLogix + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) a later version of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "sysemu/block-backend.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/ssi/ssi.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "trace.h" +#include "qom/object.h" + +/* Fields for FlashPartInfo->flags */ + +/* erase capabilities */ +#define ER_4K 1 +#define ER_32K 2 +/* set to allow the page program command to write 0s back to 1. Useful for + * modelling EEPROM with SPI flash command set + */ +#define EEPROM 0x100 + +/* 16 MiB max in 3 byte address mode */ +#define MAX_3BYTES_SIZE 0x1000000 + +#define SPI_NOR_MAX_ID_LEN 6 + +typedef struct FlashPartInfo { + const char *part_name; + /* + * This array stores the ID bytes. + * The first three bytes are the JEDIC ID. + * JEDEC ID zero means "no ID" (mostly older chips). + */ + uint8_t id[SPI_NOR_MAX_ID_LEN]; + uint8_t id_len; + /* there is confusion between manufacturers as to what a sector is. In this + * device model, a "sector" is the size that is erased by the ERASE_SECTOR + * command (opcode 0xd8). + */ + uint32_t sector_size; + uint32_t n_sectors; + uint32_t page_size; + uint16_t flags; + /* + * Big sized spi nor are often stacked devices, thus sometime + * replace chip erase with die erase. + * This field inform how many die is in the chip. + */ + uint8_t die_cnt; +} FlashPartInfo; + +/* adapted from linux */ +/* Used when the "_ext_id" is two bytes at most */ +#define INFO(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags)\ + .part_name = _part_name,\ + .id = {\ + ((_jedec_id) >> 16) & 0xff,\ + ((_jedec_id) >> 8) & 0xff,\ + (_jedec_id) & 0xff,\ + ((_ext_id) >> 8) & 0xff,\ + (_ext_id) & 0xff,\ + },\ + .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),\ + .sector_size = (_sector_size),\ + .n_sectors = (_n_sectors),\ + .page_size = 256,\ + .flags = (_flags),\ + .die_cnt = 0 + +#define INFO6(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags)\ + .part_name = _part_name,\ + .id = {\ + ((_jedec_id) >> 16) & 0xff,\ + ((_jedec_id) >> 8) & 0xff,\ + (_jedec_id) & 0xff,\ + ((_ext_id) >> 16) & 0xff,\ + ((_ext_id) >> 8) & 0xff,\ + (_ext_id) & 0xff,\ + },\ + .id_len = 6,\ + .sector_size = (_sector_size),\ + .n_sectors = (_n_sectors),\ + .page_size = 256,\ + .flags = (_flags),\ + .die_cnt = 0 + +#define INFO_STACKED(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors,\ + _flags, _die_cnt)\ + .part_name = _part_name,\ + .id = {\ + ((_jedec_id) >> 16) & 0xff,\ + ((_jedec_id) >> 8) & 0xff,\ + (_jedec_id) & 0xff,\ + ((_ext_id) >> 8) & 0xff,\ + (_ext_id) & 0xff,\ + },\ + .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),\ + .sector_size = (_sector_size),\ + .n_sectors = (_n_sectors),\ + .page_size = 256,\ + .flags = (_flags),\ + .die_cnt = _die_cnt + +#define JEDEC_NUMONYX 0x20 +#define JEDEC_WINBOND 0xEF +#define JEDEC_SPANSION 0x01 + +/* Numonyx (Micron) Configuration register macros */ +#define VCFG_DUMMY 0x1 +#define VCFG_WRAP_SEQUENTIAL 0x2 +#define NVCFG_XIP_MODE_DISABLED (7 << 9) +#define NVCFG_XIP_MODE_MASK (7 << 9) +#define VCFG_XIP_MODE_DISABLED (1 << 3) +#define CFG_DUMMY_CLK_LEN 4 +#define NVCFG_DUMMY_CLK_POS 12 +#define VCFG_DUMMY_CLK_POS 4 +#define EVCFG_OUT_DRIVER_STRENGTH_DEF 7 +#define EVCFG_VPP_ACCELERATOR (1 << 3) +#define EVCFG_RESET_HOLD_ENABLED (1 << 4) +#define NVCFG_DUAL_IO_MASK (1 << 2) +#define EVCFG_DUAL_IO_DISABLED (1 << 6) +#define NVCFG_QUAD_IO_MASK (1 << 3) +#define EVCFG_QUAD_IO_DISABLED (1 << 7) +#define NVCFG_4BYTE_ADDR_MASK (1 << 0) +#define NVCFG_LOWER_SEGMENT_MASK (1 << 1) + +/* Numonyx (Micron) Flag Status Register macros */ +#define FSR_4BYTE_ADDR_MODE_ENABLED 0x1 +#define FSR_FLASH_READY (1 << 7) + +/* Spansion configuration registers macros. */ +#define SPANSION_QUAD_CFG_POS 0 +#define SPANSION_QUAD_CFG_LEN 1 +#define SPANSION_DUMMY_CLK_POS 0 +#define SPANSION_DUMMY_CLK_LEN 4 +#define SPANSION_ADDR_LEN_POS 7 +#define SPANSION_ADDR_LEN_LEN 1 + +/* + * Spansion read mode command length in bytes, + * the mode is currently not supported. +*/ + +#define SPANSION_CONTINUOUS_READ_MODE_CMD_LEN 1 +#define WINBOND_CONTINUOUS_READ_MODE_CMD_LEN 1 + +static const FlashPartInfo known_devices[] = { + /* Atmel -- some are (confusingly) marketed as "DataFlash" */ + { INFO("at25fs010", 0x1f6601, 0, 32 << 10, 4, ER_4K) }, + { INFO("at25fs040", 0x1f6604, 0, 64 << 10, 8, ER_4K) }, + + { INFO("at25df041a", 0x1f4401, 0, 64 << 10, 8, ER_4K) }, + { INFO("at25df321a", 0x1f4701, 0, 64 << 10, 64, ER_4K) }, + { INFO("at25df641", 0x1f4800, 0, 64 << 10, 128, ER_4K) }, + + { INFO("at26f004", 0x1f0400, 0, 64 << 10, 8, ER_4K) }, + { INFO("at26df081a", 0x1f4501, 0, 64 << 10, 16, ER_4K) }, + { INFO("at26df161a", 0x1f4601, 0, 64 << 10, 32, ER_4K) }, + { INFO("at26df321", 0x1f4700, 0, 64 << 10, 64, ER_4K) }, + + { INFO("at45db081d", 0x1f2500, 0, 64 << 10, 16, ER_4K) }, + + /* Atmel EEPROMS - it is assumed, that don't care bit in command + * is set to 0. Block protection is not supported. + */ + { INFO("at25128a-nonjedec", 0x0, 0, 1, 131072, EEPROM) }, + { INFO("at25256a-nonjedec", 0x0, 0, 1, 262144, EEPROM) }, + + /* EON -- en25xxx */ + { INFO("en25f32", 0x1c3116, 0, 64 << 10, 64, ER_4K) }, + { INFO("en25p32", 0x1c2016, 0, 64 << 10, 64, 0) }, + { INFO("en25q32b", 0x1c3016, 0, 64 << 10, 64, 0) }, + { INFO("en25p64", 0x1c2017, 0, 64 << 10, 128, 0) }, + { INFO("en25q64", 0x1c3017, 0, 64 << 10, 128, ER_4K) }, + + /* GigaDevice */ + { INFO("gd25q32", 0xc84016, 0, 64 << 10, 64, ER_4K) }, + { INFO("gd25q64", 0xc84017, 0, 64 << 10, 128, ER_4K) }, + + /* Intel/Numonyx -- xxxs33b */ + { INFO("160s33b", 0x898911, 0, 64 << 10, 32, 0) }, + { INFO("320s33b", 0x898912, 0, 64 << 10, 64, 0) }, + { INFO("640s33b", 0x898913, 0, 64 << 10, 128, 0) }, + { INFO("n25q064", 0x20ba17, 0, 64 << 10, 128, 0) }, + + /* ISSI */ + { INFO("is25lq040b", 0x9d4013, 0, 64 << 10, 8, ER_4K) }, + { INFO("is25lp080d", 0x9d6014, 0, 64 << 10, 16, ER_4K) }, + { INFO("is25lp016d", 0x9d6015, 0, 64 << 10, 32, ER_4K) }, + { INFO("is25lp032", 0x9d6016, 0, 64 << 10, 64, ER_4K) }, + { INFO("is25lp064", 0x9d6017, 0, 64 << 10, 128, ER_4K) }, + { INFO("is25lp128", 0x9d6018, 0, 64 << 10, 256, ER_4K) }, + { INFO("is25lp256", 0x9d6019, 0, 64 << 10, 512, ER_4K) }, + { INFO("is25wp032", 0x9d7016, 0, 64 << 10, 64, ER_4K) }, + { INFO("is25wp064", 0x9d7017, 0, 64 << 10, 128, ER_4K) }, + { INFO("is25wp128", 0x9d7018, 0, 64 << 10, 256, ER_4K) }, + { INFO("is25wp256", 0x9d7019, 0, 64 << 10, 512, ER_4K) }, + + /* Macronix */ + { INFO("mx25l2005a", 0xc22012, 0, 64 << 10, 4, ER_4K) }, + { INFO("mx25l4005a", 0xc22013, 0, 64 << 10, 8, ER_4K) }, + { INFO("mx25l8005", 0xc22014, 0, 64 << 10, 16, 0) }, + { INFO("mx25l1606e", 0xc22015, 0, 64 << 10, 32, ER_4K) }, + { INFO("mx25l3205d", 0xc22016, 0, 64 << 10, 64, 0) }, + { INFO("mx25l6405d", 0xc22017, 0, 64 << 10, 128, 0) }, + { INFO("mx25l12805d", 0xc22018, 0, 64 << 10, 256, 0) }, + { INFO("mx25l12855e", 0xc22618, 0, 64 << 10, 256, 0) }, + { INFO6("mx25l25635e", 0xc22019, 0xc22019, 64 << 10, 512, 0) }, + { INFO("mx25l25655e", 0xc22619, 0, 64 << 10, 512, 0) }, + { INFO("mx66l51235f", 0xc2201a, 0, 64 << 10, 1024, ER_4K | ER_32K) }, + { INFO("mx66u51235f", 0xc2253a, 0, 64 << 10, 1024, ER_4K | ER_32K) }, + { INFO("mx66u1g45g", 0xc2253b, 0, 64 << 10, 2048, ER_4K | ER_32K) }, + { INFO("mx66l1g45g", 0xc2201b, 0, 64 << 10, 2048, ER_4K | ER_32K) }, + + /* Micron */ + { INFO("n25q032a11", 0x20bb16, 0, 64 << 10, 64, ER_4K) }, + { INFO("n25q032a13", 0x20ba16, 0, 64 << 10, 64, ER_4K) }, + { INFO("n25q064a11", 0x20bb17, 0, 64 << 10, 128, ER_4K) }, + { INFO("n25q064a13", 0x20ba17, 0, 64 << 10, 128, ER_4K) }, + { INFO("n25q128a11", 0x20bb18, 0, 64 << 10, 256, ER_4K) }, + { INFO("n25q128a13", 0x20ba18, 0, 64 << 10, 256, ER_4K) }, + { INFO("n25q256a11", 0x20bb19, 0, 64 << 10, 512, ER_4K) }, + { INFO("n25q256a13", 0x20ba19, 0, 64 << 10, 512, ER_4K) }, + { INFO("n25q512a11", 0x20bb20, 0, 64 << 10, 1024, ER_4K) }, + { INFO("n25q512a13", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, + { INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) }, + { INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, ER_4K) }, + { INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) }, + { INFO("n25q512ax3", 0x20ba20, 0x1000, 64 << 10, 1024, ER_4K) }, + { INFO("mt25ql512ab", 0x20ba20, 0x1044, 64 << 10, 1024, ER_4K | ER_32K) }, + { INFO_STACKED("n25q00", 0x20ba21, 0x1000, 64 << 10, 2048, ER_4K, 4) }, + { INFO_STACKED("n25q00a", 0x20bb21, 0x1000, 64 << 10, 2048, ER_4K, 4) }, + { INFO_STACKED("mt25ql01g", 0x20ba21, 0x1040, 64 << 10, 2048, ER_4K, 2) }, + { INFO_STACKED("mt25qu01g", 0x20bb21, 0x1040, 64 << 10, 2048, ER_4K, 2) }, + { INFO_STACKED("mt25ql02g", 0x20ba22, 0x1040, 64 << 10, 4096, ER_4K | ER_32K, 2) }, + { INFO_STACKED("mt25qu02g", 0x20bb22, 0x1040, 64 << 10, 4096, ER_4K | ER_32K, 2) }, + + /* Spansion -- single (large) sector size only, at least + * for the chips listed here (without boot sectors). + */ + { INFO("s25sl032p", 0x010215, 0x4d00, 64 << 10, 64, ER_4K) }, + { INFO("s25sl064p", 0x010216, 0x4d00, 64 << 10, 128, ER_4K) }, + { INFO("s25fl256s0", 0x010219, 0x4d00, 256 << 10, 128, 0) }, + { INFO("s25fl256s1", 0x010219, 0x4d01, 64 << 10, 512, 0) }, + { INFO6("s25fl512s", 0x010220, 0x4d0080, 256 << 10, 256, 0) }, + { INFO6("s70fl01gs", 0x010221, 0x4d0080, 256 << 10, 512, 0) }, + { INFO("s25sl12800", 0x012018, 0x0300, 256 << 10, 64, 0) }, + { INFO("s25sl12801", 0x012018, 0x0301, 64 << 10, 256, 0) }, + { INFO("s25fl129p0", 0x012018, 0x4d00, 256 << 10, 64, 0) }, + { INFO("s25fl129p1", 0x012018, 0x4d01, 64 << 10, 256, 0) }, + { INFO("s25sl004a", 0x010212, 0, 64 << 10, 8, 0) }, + { INFO("s25sl008a", 0x010213, 0, 64 << 10, 16, 0) }, + { INFO("s25sl016a", 0x010214, 0, 64 << 10, 32, 0) }, + { INFO("s25sl032a", 0x010215, 0, 64 << 10, 64, 0) }, + { INFO("s25sl064a", 0x010216, 0, 64 << 10, 128, 0) }, + { INFO("s25fl016k", 0xef4015, 0, 64 << 10, 32, ER_4K | ER_32K) }, + { INFO("s25fl064k", 0xef4017, 0, 64 << 10, 128, ER_4K | ER_32K) }, + + /* Spansion -- boot sectors support */ + { INFO6("s25fs512s", 0x010220, 0x4d0081, 256 << 10, 256, 0) }, + { INFO6("s70fs01gs", 0x010221, 0x4d0081, 256 << 10, 512, 0) }, + + /* SST -- large erase sizes are "overlays", "sectors" are 4<< 10 */ + { INFO("sst25vf040b", 0xbf258d, 0, 64 << 10, 8, ER_4K) }, + { INFO("sst25vf080b", 0xbf258e, 0, 64 << 10, 16, ER_4K) }, + { INFO("sst25vf016b", 0xbf2541, 0, 64 << 10, 32, ER_4K) }, + { INFO("sst25vf032b", 0xbf254a, 0, 64 << 10, 64, ER_4K) }, + { INFO("sst25wf512", 0xbf2501, 0, 64 << 10, 1, ER_4K) }, + { INFO("sst25wf010", 0xbf2502, 0, 64 << 10, 2, ER_4K) }, + { INFO("sst25wf020", 0xbf2503, 0, 64 << 10, 4, ER_4K) }, + { INFO("sst25wf040", 0xbf2504, 0, 64 << 10, 8, ER_4K) }, + { INFO("sst25wf080", 0xbf2505, 0, 64 << 10, 16, ER_4K) }, + + /* ST Microelectronics -- newer production may have feature updates */ + { INFO("m25p05", 0x202010, 0, 32 << 10, 2, 0) }, + { INFO("m25p10", 0x202011, 0, 32 << 10, 4, 0) }, + { INFO("m25p20", 0x202012, 0, 64 << 10, 4, 0) }, + { INFO("m25p40", 0x202013, 0, 64 << 10, 8, 0) }, + { INFO("m25p80", 0x202014, 0, 64 << 10, 16, 0) }, + { INFO("m25p16", 0x202015, 0, 64 << 10, 32, 0) }, + { INFO("m25p32", 0x202016, 0, 64 << 10, 64, 0) }, + { INFO("m25p64", 0x202017, 0, 64 << 10, 128, 0) }, + { INFO("m25p128", 0x202018, 0, 256 << 10, 64, 0) }, + { INFO("n25q032", 0x20ba16, 0, 64 << 10, 64, 0) }, + + { INFO("m45pe10", 0x204011, 0, 64 << 10, 2, 0) }, + { INFO("m45pe80", 0x204014, 0, 64 << 10, 16, 0) }, + { INFO("m45pe16", 0x204015, 0, 64 << 10, 32, 0) }, + + { INFO("m25pe20", 0x208012, 0, 64 << 10, 4, 0) }, + { INFO("m25pe80", 0x208014, 0, 64 << 10, 16, 0) }, + { INFO("m25pe16", 0x208015, 0, 64 << 10, 32, ER_4K) }, + + { INFO("m25px32", 0x207116, 0, 64 << 10, 64, ER_4K) }, + { INFO("m25px32-s0", 0x207316, 0, 64 << 10, 64, ER_4K) }, + { INFO("m25px32-s1", 0x206316, 0, 64 << 10, 64, ER_4K) }, + { INFO("m25px64", 0x207117, 0, 64 << 10, 128, 0) }, + + /* Winbond -- w25x "blocks" are 64k, "sectors" are 4KiB */ + { INFO("w25x10", 0xef3011, 0, 64 << 10, 2, ER_4K) }, + { INFO("w25x20", 0xef3012, 0, 64 << 10, 4, ER_4K) }, + { INFO("w25x40", 0xef3013, 0, 64 << 10, 8, ER_4K) }, + { INFO("w25x80", 0xef3014, 0, 64 << 10, 16, ER_4K) }, + { INFO("w25x16", 0xef3015, 0, 64 << 10, 32, ER_4K) }, + { INFO("w25x32", 0xef3016, 0, 64 << 10, 64, ER_4K) }, + { INFO("w25q32", 0xef4016, 0, 64 << 10, 64, ER_4K) }, + { INFO("w25q32dw", 0xef6016, 0, 64 << 10, 64, ER_4K) }, + { INFO("w25x64", 0xef3017, 0, 64 << 10, 128, ER_4K) }, + { INFO("w25q64", 0xef4017, 0, 64 << 10, 128, ER_4K) }, + { INFO("w25q80", 0xef5014, 0, 64 << 10, 16, ER_4K) }, + { INFO("w25q80bl", 0xef4014, 0, 64 << 10, 16, ER_4K) }, + { INFO("w25q256", 0xef4019, 0, 64 << 10, 512, ER_4K) }, + { INFO("w25q512jv", 0xef4020, 0, 64 << 10, 1024, ER_4K) }, +}; + +typedef enum { + NOP = 0, + WRSR = 0x1, + WRDI = 0x4, + RDSR = 0x5, + WREN = 0x6, + BRRD = 0x16, + BRWR = 0x17, + JEDEC_READ = 0x9f, + BULK_ERASE_60 = 0x60, + BULK_ERASE = 0xc7, + READ_FSR = 0x70, + RDCR = 0x15, + + READ = 0x03, + READ4 = 0x13, + FAST_READ = 0x0b, + FAST_READ4 = 0x0c, + DOR = 0x3b, + DOR4 = 0x3c, + QOR = 0x6b, + QOR4 = 0x6c, + DIOR = 0xbb, + DIOR4 = 0xbc, + QIOR = 0xeb, + QIOR4 = 0xec, + + PP = 0x02, + PP4 = 0x12, + PP4_4 = 0x3e, + DPP = 0xa2, + QPP = 0x32, + QPP_4 = 0x34, + RDID_90 = 0x90, + RDID_AB = 0xab, + AAI_WP = 0xad, + + ERASE_4K = 0x20, + ERASE4_4K = 0x21, + ERASE_32K = 0x52, + ERASE4_32K = 0x5c, + ERASE_SECTOR = 0xd8, + ERASE4_SECTOR = 0xdc, + + EN_4BYTE_ADDR = 0xB7, + EX_4BYTE_ADDR = 0xE9, + + EXTEND_ADDR_READ = 0xC8, + EXTEND_ADDR_WRITE = 0xC5, + + RESET_ENABLE = 0x66, + RESET_MEMORY = 0x99, + + /* + * Micron: 0x35 - enable QPI + * Spansion: 0x35 - read control register + */ + RDCR_EQIO = 0x35, + RSTQIO = 0xf5, + + RNVCR = 0xB5, + WNVCR = 0xB1, + + RVCR = 0x85, + WVCR = 0x81, + + REVCR = 0x65, + WEVCR = 0x61, + + DIE_ERASE = 0xC4, +} FlashCMD; + +typedef enum { + STATE_IDLE, + STATE_PAGE_PROGRAM, + STATE_READ, + STATE_COLLECTING_DATA, + STATE_COLLECTING_VAR_LEN_DATA, + STATE_READING_DATA, +} CMDState; + +typedef enum { + MAN_SPANSION, + MAN_MACRONIX, + MAN_NUMONYX, + MAN_WINBOND, + MAN_SST, + MAN_ISSI, + MAN_GENERIC, +} Manufacturer; + +typedef enum { + MODE_STD = 0, + MODE_DIO = 1, + MODE_QIO = 2 +} SPIMode; + +#define M25P80_INTERNAL_DATA_BUFFER_SZ 16 + +struct Flash { + SSIPeripheral parent_obj; + + BlockBackend *blk; + + uint8_t *storage; + uint32_t size; + int page_size; + + uint8_t state; + uint8_t data[M25P80_INTERNAL_DATA_BUFFER_SZ]; + uint32_t len; + uint32_t pos; + bool data_read_loop; + uint8_t needed_bytes; + uint8_t cmd_in_progress; + uint32_t cur_addr; + uint32_t nonvolatile_cfg; + /* Configuration register for Macronix */ + uint32_t volatile_cfg; + uint32_t enh_volatile_cfg; + /* Spansion cfg registers. */ + uint8_t spansion_cr1nv; + uint8_t spansion_cr2nv; + uint8_t spansion_cr3nv; + uint8_t spansion_cr4nv; + uint8_t spansion_cr1v; + uint8_t spansion_cr2v; + uint8_t spansion_cr3v; + uint8_t spansion_cr4v; + bool write_enable; + bool four_bytes_address_mode; + bool reset_enable; + bool quad_enable; + bool aai_enable; + uint8_t ear; + + int64_t dirty_page; + + const FlashPartInfo *pi; + +}; + +struct M25P80Class { + SSIPeripheralClass parent_class; + FlashPartInfo *pi; +}; + +#define TYPE_M25P80 "m25p80-generic" +OBJECT_DECLARE_TYPE(Flash, M25P80Class, M25P80) + +static inline Manufacturer get_man(Flash *s) +{ + switch (s->pi->id[0]) { + case 0x20: + return MAN_NUMONYX; + case 0xEF: + return MAN_WINBOND; + case 0x01: + return MAN_SPANSION; + case 0xC2: + return MAN_MACRONIX; + case 0xBF: + return MAN_SST; + case 0x9D: + return MAN_ISSI; + default: + return MAN_GENERIC; + } +} + +static void blk_sync_complete(void *opaque, int ret) +{ + QEMUIOVector *iov = opaque; + + qemu_iovec_destroy(iov); + g_free(iov); + + /* do nothing. Masters do not directly interact with the backing store, + * only the working copy so no mutexing required. + */ +} + +static void flash_sync_page(Flash *s, int page) +{ + QEMUIOVector *iov; + + if (!s->blk || !blk_is_writable(s->blk)) { + return; + } + + iov = g_new(QEMUIOVector, 1); + qemu_iovec_init(iov, 1); + qemu_iovec_add(iov, s->storage + page * s->pi->page_size, + s->pi->page_size); + blk_aio_pwritev(s->blk, page * s->pi->page_size, iov, 0, + blk_sync_complete, iov); +} + +static inline void flash_sync_area(Flash *s, int64_t off, int64_t len) +{ + QEMUIOVector *iov; + + if (!s->blk || !blk_is_writable(s->blk)) { + return; + } + + assert(!(len % BDRV_SECTOR_SIZE)); + iov = g_new(QEMUIOVector, 1); + qemu_iovec_init(iov, 1); + qemu_iovec_add(iov, s->storage + off, len); + blk_aio_pwritev(s->blk, off, iov, 0, blk_sync_complete, iov); +} + +static void flash_erase(Flash *s, int offset, FlashCMD cmd) +{ + uint32_t len; + uint8_t capa_to_assert = 0; + + switch (cmd) { + case ERASE_4K: + case ERASE4_4K: + len = 4 * KiB; + capa_to_assert = ER_4K; + break; + case ERASE_32K: + case ERASE4_32K: + len = 32 * KiB; + capa_to_assert = ER_32K; + break; + case ERASE_SECTOR: + case ERASE4_SECTOR: + len = s->pi->sector_size; + break; + case BULK_ERASE: + len = s->size; + break; + case DIE_ERASE: + if (s->pi->die_cnt) { + len = s->size / s->pi->die_cnt; + offset = offset & (~(len - 1)); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: die erase is not supported" + " by device\n"); + return; + } + break; + default: + abort(); + } + + trace_m25p80_flash_erase(s, offset, len); + + if ((s->pi->flags & capa_to_assert) != capa_to_assert) { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: %d erase size not supported by" + " device\n", len); + } + + if (!s->write_enable) { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: erase with write protect!\n"); + return; + } + memset(s->storage + offset, 0xff, len); + flash_sync_area(s, offset, len); +} + +static inline void flash_sync_dirty(Flash *s, int64_t newpage) +{ + if (s->dirty_page >= 0 && s->dirty_page != newpage) { + flash_sync_page(s, s->dirty_page); + s->dirty_page = newpage; + } +} + +static inline +void flash_write8(Flash *s, uint32_t addr, uint8_t data) +{ + uint32_t page = addr / s->pi->page_size; + uint8_t prev = s->storage[s->cur_addr]; + + if (!s->write_enable) { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: write with write protect!\n"); + return; + } + + if ((prev ^ data) & data) { + trace_m25p80_programming_zero_to_one(s, addr, prev, data); + } + + if (s->pi->flags & EEPROM) { + s->storage[s->cur_addr] = data; + } else { + s->storage[s->cur_addr] &= data; + } + + flash_sync_dirty(s, page); + s->dirty_page = page; +} + +static inline int get_addr_length(Flash *s) +{ + /* check if eeprom is in use */ + if (s->pi->flags == EEPROM) { + return 2; + } + + switch (s->cmd_in_progress) { + case PP4: + case PP4_4: + case QPP_4: + case READ4: + case QIOR4: + case ERASE4_4K: + case ERASE4_32K: + case ERASE4_SECTOR: + case FAST_READ4: + case DOR4: + case QOR4: + case DIOR4: + return 4; + default: + return s->four_bytes_address_mode ? 4 : 3; + } +} + +static void complete_collecting_data(Flash *s) +{ + int i, n; + + n = get_addr_length(s); + s->cur_addr = (n == 3 ? s->ear : 0); + for (i = 0; i < n; ++i) { + s->cur_addr <<= 8; + s->cur_addr |= s->data[i]; + } + + s->cur_addr &= s->size - 1; + + s->state = STATE_IDLE; + + trace_m25p80_complete_collecting(s, s->cmd_in_progress, n, s->ear, + s->cur_addr); + + switch (s->cmd_in_progress) { + case DPP: + case QPP: + case QPP_4: + case PP: + case PP4: + case PP4_4: + s->state = STATE_PAGE_PROGRAM; + break; + case AAI_WP: + /* AAI programming starts from the even address */ + s->cur_addr &= ~BIT(0); + s->state = STATE_PAGE_PROGRAM; + break; + case READ: + case READ4: + case FAST_READ: + case FAST_READ4: + case DOR: + case DOR4: + case QOR: + case QOR4: + case DIOR: + case DIOR4: + case QIOR: + case QIOR4: + s->state = STATE_READ; + break; + case ERASE_4K: + case ERASE4_4K: + case ERASE_32K: + case ERASE4_32K: + case ERASE_SECTOR: + case ERASE4_SECTOR: + case DIE_ERASE: + flash_erase(s, s->cur_addr, s->cmd_in_progress); + break; + case WRSR: + switch (get_man(s)) { + case MAN_SPANSION: + s->quad_enable = !!(s->data[1] & 0x02); + break; + case MAN_ISSI: + s->quad_enable = extract32(s->data[0], 6, 1); + break; + case MAN_MACRONIX: + s->quad_enable = extract32(s->data[0], 6, 1); + if (s->len > 1) { + s->volatile_cfg = s->data[1]; + s->four_bytes_address_mode = extract32(s->data[1], 5, 1); + } + break; + default: + break; + } + if (s->write_enable) { + s->write_enable = false; + } + break; + case BRWR: + case EXTEND_ADDR_WRITE: + s->ear = s->data[0]; + break; + case WNVCR: + s->nonvolatile_cfg = s->data[0] | (s->data[1] << 8); + break; + case WVCR: + s->volatile_cfg = s->data[0]; + break; + case WEVCR: + s->enh_volatile_cfg = s->data[0]; + break; + case RDID_90: + case RDID_AB: + if (get_man(s) == MAN_SST) { + if (s->cur_addr <= 1) { + if (s->cur_addr) { + s->data[0] = s->pi->id[2]; + s->data[1] = s->pi->id[0]; + } else { + s->data[0] = s->pi->id[0]; + s->data[1] = s->pi->id[2]; + } + s->pos = 0; + s->len = 2; + s->data_read_loop = true; + s->state = STATE_READING_DATA; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: Invalid read id address\n"); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: Read id (command 0x90/0xAB) is not supported" + " by device\n"); + } + break; + default: + break; + } +} + +static void reset_memory(Flash *s) +{ + s->cmd_in_progress = NOP; + s->cur_addr = 0; + s->ear = 0; + s->four_bytes_address_mode = false; + s->len = 0; + s->needed_bytes = 0; + s->pos = 0; + s->state = STATE_IDLE; + s->write_enable = false; + s->reset_enable = false; + s->quad_enable = false; + s->aai_enable = false; + + switch (get_man(s)) { + case MAN_NUMONYX: + s->volatile_cfg = 0; + s->volatile_cfg |= VCFG_DUMMY; + s->volatile_cfg |= VCFG_WRAP_SEQUENTIAL; + if ((s->nonvolatile_cfg & NVCFG_XIP_MODE_MASK) + == NVCFG_XIP_MODE_DISABLED) { + s->volatile_cfg |= VCFG_XIP_MODE_DISABLED; + } + s->volatile_cfg |= deposit32(s->volatile_cfg, + VCFG_DUMMY_CLK_POS, + CFG_DUMMY_CLK_LEN, + extract32(s->nonvolatile_cfg, + NVCFG_DUMMY_CLK_POS, + CFG_DUMMY_CLK_LEN) + ); + + s->enh_volatile_cfg = 0; + s->enh_volatile_cfg |= EVCFG_OUT_DRIVER_STRENGTH_DEF; + s->enh_volatile_cfg |= EVCFG_VPP_ACCELERATOR; + s->enh_volatile_cfg |= EVCFG_RESET_HOLD_ENABLED; + if (s->nonvolatile_cfg & NVCFG_DUAL_IO_MASK) { + s->enh_volatile_cfg |= EVCFG_DUAL_IO_DISABLED; + } + if (s->nonvolatile_cfg & NVCFG_QUAD_IO_MASK) { + s->enh_volatile_cfg |= EVCFG_QUAD_IO_DISABLED; + } + if (!(s->nonvolatile_cfg & NVCFG_4BYTE_ADDR_MASK)) { + s->four_bytes_address_mode = true; + } + if (!(s->nonvolatile_cfg & NVCFG_LOWER_SEGMENT_MASK)) { + s->ear = s->size / MAX_3BYTES_SIZE - 1; + } + break; + case MAN_MACRONIX: + s->volatile_cfg = 0x7; + break; + case MAN_SPANSION: + s->spansion_cr1v = s->spansion_cr1nv; + s->spansion_cr2v = s->spansion_cr2nv; + s->spansion_cr3v = s->spansion_cr3nv; + s->spansion_cr4v = s->spansion_cr4nv; + s->quad_enable = extract32(s->spansion_cr1v, + SPANSION_QUAD_CFG_POS, + SPANSION_QUAD_CFG_LEN + ); + s->four_bytes_address_mode = extract32(s->spansion_cr2v, + SPANSION_ADDR_LEN_POS, + SPANSION_ADDR_LEN_LEN + ); + break; + default: + break; + } + + trace_m25p80_reset_done(s); +} + +static uint8_t numonyx_mode(Flash *s) +{ + if (!(s->enh_volatile_cfg & EVCFG_QUAD_IO_DISABLED)) { + return MODE_QIO; + } else if (!(s->enh_volatile_cfg & EVCFG_DUAL_IO_DISABLED)) { + return MODE_DIO; + } else { + return MODE_STD; + } +} + +static uint8_t numonyx_extract_cfg_num_dummies(Flash *s) +{ + uint8_t num_dummies; + uint8_t mode; + assert(get_man(s) == MAN_NUMONYX); + + mode = numonyx_mode(s); + num_dummies = extract32(s->volatile_cfg, 4, 4); + + if (num_dummies == 0x0 || num_dummies == 0xf) { + switch (s->cmd_in_progress) { + case QIOR: + case QIOR4: + num_dummies = 10; + break; + default: + num_dummies = (mode == MODE_QIO) ? 10 : 8; + break; + } + } + + return num_dummies; +} + +static void decode_fast_read_cmd(Flash *s) +{ + s->needed_bytes = get_addr_length(s); + switch (get_man(s)) { + /* Dummy cycles - modeled with bytes writes instead of bits */ + case MAN_SST: + s->needed_bytes += 1; + break; + case MAN_WINBOND: + s->needed_bytes += 8; + break; + case MAN_NUMONYX: + s->needed_bytes += numonyx_extract_cfg_num_dummies(s); + break; + case MAN_MACRONIX: + if (extract32(s->volatile_cfg, 6, 2) == 1) { + s->needed_bytes += 6; + } else { + s->needed_bytes += 8; + } + break; + case MAN_SPANSION: + s->needed_bytes += extract32(s->spansion_cr2v, + SPANSION_DUMMY_CLK_POS, + SPANSION_DUMMY_CLK_LEN + ); + break; + case MAN_ISSI: + /* + * The Fast Read instruction code is followed by address bytes and + * dummy cycles, transmitted via the SI line. + * + * The number of dummy cycles is configurable but this is currently + * unmodeled, hence the default value 8 is used. + * + * QPI (Quad Peripheral Interface) mode has different default value + * of dummy cycles, but this is unsupported at the time being. + */ + s->needed_bytes += 1; + break; + default: + break; + } + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; +} + +static void decode_dio_read_cmd(Flash *s) +{ + s->needed_bytes = get_addr_length(s); + /* Dummy cycles modeled with bytes writes instead of bits */ + switch (get_man(s)) { + case MAN_WINBOND: + s->needed_bytes += WINBOND_CONTINUOUS_READ_MODE_CMD_LEN; + break; + case MAN_SPANSION: + s->needed_bytes += SPANSION_CONTINUOUS_READ_MODE_CMD_LEN; + s->needed_bytes += extract32(s->spansion_cr2v, + SPANSION_DUMMY_CLK_POS, + SPANSION_DUMMY_CLK_LEN + ); + break; + case MAN_NUMONYX: + s->needed_bytes += numonyx_extract_cfg_num_dummies(s); + break; + case MAN_MACRONIX: + switch (extract32(s->volatile_cfg, 6, 2)) { + case 1: + s->needed_bytes += 6; + break; + case 2: + s->needed_bytes += 8; + break; + default: + s->needed_bytes += 4; + break; + } + break; + case MAN_ISSI: + /* + * The Fast Read Dual I/O instruction code is followed by address bytes + * and dummy cycles, transmitted via the IO1 and IO0 line. + * + * The number of dummy cycles is configurable but this is currently + * unmodeled, hence the default value 4 is used. + */ + s->needed_bytes += 1; + break; + default: + break; + } + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; +} + +static void decode_qio_read_cmd(Flash *s) +{ + s->needed_bytes = get_addr_length(s); + /* Dummy cycles modeled with bytes writes instead of bits */ + switch (get_man(s)) { + case MAN_WINBOND: + s->needed_bytes += WINBOND_CONTINUOUS_READ_MODE_CMD_LEN; + s->needed_bytes += 4; + break; + case MAN_SPANSION: + s->needed_bytes += SPANSION_CONTINUOUS_READ_MODE_CMD_LEN; + s->needed_bytes += extract32(s->spansion_cr2v, + SPANSION_DUMMY_CLK_POS, + SPANSION_DUMMY_CLK_LEN + ); + break; + case MAN_NUMONYX: + s->needed_bytes += numonyx_extract_cfg_num_dummies(s); + break; + case MAN_MACRONIX: + switch (extract32(s->volatile_cfg, 6, 2)) { + case 1: + s->needed_bytes += 4; + break; + case 2: + s->needed_bytes += 8; + break; + default: + s->needed_bytes += 6; + break; + } + break; + case MAN_ISSI: + /* + * The Fast Read Quad I/O instruction code is followed by address bytes + * and dummy cycles, transmitted via the IO3, IO2, IO1 and IO0 line. + * + * The number of dummy cycles is configurable but this is currently + * unmodeled, hence the default value 6 is used. + * + * QPI (Quad Peripheral Interface) mode has different default value + * of dummy cycles, but this is unsupported at the time being. + */ + s->needed_bytes += 3; + break; + default: + break; + } + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; +} + +static bool is_valid_aai_cmd(uint32_t cmd) +{ + return cmd == AAI_WP || cmd == WRDI || cmd == RDSR; +} + +static void decode_new_cmd(Flash *s, uint32_t value) +{ + int i; + + s->cmd_in_progress = value; + trace_m25p80_command_decoded(s, value); + + if (value != RESET_MEMORY) { + s->reset_enable = false; + } + + if (get_man(s) == MAN_SST && s->aai_enable && !is_valid_aai_cmd(value)) { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: Invalid cmd within AAI programming sequence"); + } + + switch (value) { + + case ERASE_4K: + case ERASE4_4K: + case ERASE_32K: + case ERASE4_32K: + case ERASE_SECTOR: + case ERASE4_SECTOR: + case PP: + case PP4: + case DIE_ERASE: + case RDID_90: + case RDID_AB: + s->needed_bytes = get_addr_length(s); + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; + break; + case READ: + case READ4: + if (get_man(s) != MAN_NUMONYX || numonyx_mode(s) == MODE_STD) { + s->needed_bytes = get_addr_length(s); + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Cannot execute cmd %x in " + "DIO or QIO mode\n", s->cmd_in_progress); + } + break; + case DPP: + if (get_man(s) != MAN_NUMONYX || numonyx_mode(s) != MODE_QIO) { + s->needed_bytes = get_addr_length(s); + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Cannot execute cmd %x in " + "QIO mode\n", s->cmd_in_progress); + } + break; + case QPP: + case QPP_4: + case PP4_4: + if (get_man(s) != MAN_NUMONYX || numonyx_mode(s) != MODE_DIO) { + s->needed_bytes = get_addr_length(s); + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Cannot execute cmd %x in " + "DIO mode\n", s->cmd_in_progress); + } + break; + + case FAST_READ: + case FAST_READ4: + decode_fast_read_cmd(s); + break; + case DOR: + case DOR4: + if (get_man(s) != MAN_NUMONYX || numonyx_mode(s) != MODE_QIO) { + decode_fast_read_cmd(s); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Cannot execute cmd %x in " + "QIO mode\n", s->cmd_in_progress); + } + break; + case QOR: + case QOR4: + if (get_man(s) != MAN_NUMONYX || numonyx_mode(s) != MODE_DIO) { + decode_fast_read_cmd(s); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Cannot execute cmd %x in " + "DIO mode\n", s->cmd_in_progress); + } + break; + + case DIOR: + case DIOR4: + if (get_man(s) != MAN_NUMONYX || numonyx_mode(s) != MODE_QIO) { + decode_dio_read_cmd(s); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Cannot execute cmd %x in " + "QIO mode\n", s->cmd_in_progress); + } + break; + + case QIOR: + case QIOR4: + if (get_man(s) != MAN_NUMONYX || numonyx_mode(s) != MODE_DIO) { + decode_qio_read_cmd(s); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Cannot execute cmd %x in " + "DIO mode\n", s->cmd_in_progress); + } + break; + + case WRSR: + if (s->write_enable) { + switch (get_man(s)) { + case MAN_SPANSION: + s->needed_bytes = 2; + s->state = STATE_COLLECTING_DATA; + break; + case MAN_MACRONIX: + s->needed_bytes = 2; + s->state = STATE_COLLECTING_VAR_LEN_DATA; + break; + default: + s->needed_bytes = 1; + s->state = STATE_COLLECTING_DATA; + } + s->pos = 0; + } + break; + + case WRDI: + s->write_enable = false; + if (get_man(s) == MAN_SST) { + s->aai_enable = false; + } + break; + case WREN: + s->write_enable = true; + break; + + case RDSR: + s->data[0] = (!!s->write_enable) << 1; + if (get_man(s) == MAN_MACRONIX || get_man(s) == MAN_ISSI) { + s->data[0] |= (!!s->quad_enable) << 6; + } + if (get_man(s) == MAN_SST) { + s->data[0] |= (!!s->aai_enable) << 6; + } + + s->pos = 0; + s->len = 1; + s->data_read_loop = true; + s->state = STATE_READING_DATA; + break; + + case READ_FSR: + s->data[0] = FSR_FLASH_READY; + if (s->four_bytes_address_mode) { + s->data[0] |= FSR_4BYTE_ADDR_MODE_ENABLED; + } + s->pos = 0; + s->len = 1; + s->data_read_loop = true; + s->state = STATE_READING_DATA; + break; + + case JEDEC_READ: + if (get_man(s) != MAN_NUMONYX || numonyx_mode(s) == MODE_STD) { + trace_m25p80_populated_jedec(s); + for (i = 0; i < s->pi->id_len; i++) { + s->data[i] = s->pi->id[i]; + } + for (; i < SPI_NOR_MAX_ID_LEN; i++) { + s->data[i] = 0; + } + + s->len = SPI_NOR_MAX_ID_LEN; + s->pos = 0; + s->state = STATE_READING_DATA; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Cannot execute JEDEC read " + "in DIO or QIO mode\n"); + } + break; + + case RDCR: + s->data[0] = s->volatile_cfg & 0xFF; + s->data[0] |= (!!s->four_bytes_address_mode) << 5; + s->pos = 0; + s->len = 1; + s->state = STATE_READING_DATA; + break; + + case BULK_ERASE_60: + case BULK_ERASE: + if (s->write_enable) { + trace_m25p80_chip_erase(s); + flash_erase(s, 0, BULK_ERASE); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: chip erase with write " + "protect!\n"); + } + break; + case NOP: + break; + case EN_4BYTE_ADDR: + s->four_bytes_address_mode = true; + break; + case EX_4BYTE_ADDR: + s->four_bytes_address_mode = false; + break; + case BRRD: + case EXTEND_ADDR_READ: + s->data[0] = s->ear; + s->pos = 0; + s->len = 1; + s->state = STATE_READING_DATA; + break; + case BRWR: + case EXTEND_ADDR_WRITE: + if (s->write_enable) { + s->needed_bytes = 1; + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; + } + break; + case RNVCR: + s->data[0] = s->nonvolatile_cfg & 0xFF; + s->data[1] = (s->nonvolatile_cfg >> 8) & 0xFF; + s->pos = 0; + s->len = 2; + s->state = STATE_READING_DATA; + break; + case WNVCR: + if (s->write_enable && get_man(s) == MAN_NUMONYX) { + s->needed_bytes = 2; + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; + } + break; + case RVCR: + s->data[0] = s->volatile_cfg & 0xFF; + s->pos = 0; + s->len = 1; + s->state = STATE_READING_DATA; + break; + case WVCR: + if (s->write_enable) { + s->needed_bytes = 1; + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; + } + break; + case REVCR: + s->data[0] = s->enh_volatile_cfg & 0xFF; + s->pos = 0; + s->len = 1; + s->state = STATE_READING_DATA; + break; + case WEVCR: + if (s->write_enable) { + s->needed_bytes = 1; + s->pos = 0; + s->len = 0; + s->state = STATE_COLLECTING_DATA; + } + break; + case RESET_ENABLE: + s->reset_enable = true; + break; + case RESET_MEMORY: + if (s->reset_enable) { + reset_memory(s); + } + break; + case RDCR_EQIO: + switch (get_man(s)) { + case MAN_SPANSION: + s->data[0] = (!!s->quad_enable) << 1; + s->pos = 0; + s->len = 1; + s->state = STATE_READING_DATA; + break; + case MAN_MACRONIX: + s->quad_enable = true; + break; + default: + break; + } + break; + case RSTQIO: + s->quad_enable = false; + break; + case AAI_WP: + if (get_man(s) == MAN_SST) { + if (s->write_enable) { + if (s->aai_enable) { + s->state = STATE_PAGE_PROGRAM; + } else { + s->aai_enable = true; + s->needed_bytes = get_addr_length(s); + s->state = STATE_COLLECTING_DATA; + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: AAI_WP with write protect\n"); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Unknown cmd %x\n", value); + } + break; + default: + s->pos = 0; + s->len = 1; + s->state = STATE_READING_DATA; + s->data_read_loop = true; + s->data[0] = 0; + qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Unknown cmd %x\n", value); + break; + } +} + +static int m25p80_cs(SSIPeripheral *ss, bool select) +{ + Flash *s = M25P80(ss); + + if (select) { + if (s->state == STATE_COLLECTING_VAR_LEN_DATA) { + complete_collecting_data(s); + } + s->len = 0; + s->pos = 0; + s->state = STATE_IDLE; + flash_sync_dirty(s, -1); + s->data_read_loop = false; + } + + trace_m25p80_select(s, select ? "de" : ""); + + return 0; +} + +static uint32_t m25p80_transfer8(SSIPeripheral *ss, uint32_t tx) +{ + Flash *s = M25P80(ss); + uint32_t r = 0; + + trace_m25p80_transfer(s, s->state, s->len, s->needed_bytes, s->pos, + s->cur_addr, (uint8_t)tx); + + switch (s->state) { + + case STATE_PAGE_PROGRAM: + trace_m25p80_page_program(s, s->cur_addr, (uint8_t)tx); + flash_write8(s, s->cur_addr, (uint8_t)tx); + s->cur_addr = (s->cur_addr + 1) & (s->size - 1); + + if (get_man(s) == MAN_SST && s->aai_enable && s->cur_addr == 0) { + /* + * There is no wrap mode during AAI programming once the highest + * unprotected memory address is reached. The Write-Enable-Latch + * bit is automatically reset, and AAI programming mode aborts. + */ + s->write_enable = false; + s->aai_enable = false; + } + + break; + + case STATE_READ: + r = s->storage[s->cur_addr]; + trace_m25p80_read_byte(s, s->cur_addr, (uint8_t)r); + s->cur_addr = (s->cur_addr + 1) & (s->size - 1); + break; + + case STATE_COLLECTING_DATA: + case STATE_COLLECTING_VAR_LEN_DATA: + + if (s->len >= M25P80_INTERNAL_DATA_BUFFER_SZ) { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: Write overrun internal data buffer. " + "SPI controller (QEMU emulator or guest driver) " + "is misbehaving\n"); + s->len = s->pos = 0; + s->state = STATE_IDLE; + break; + } + + s->data[s->len] = (uint8_t)tx; + s->len++; + + if (s->len == s->needed_bytes) { + complete_collecting_data(s); + } + break; + + case STATE_READING_DATA: + + if (s->pos >= M25P80_INTERNAL_DATA_BUFFER_SZ) { + qemu_log_mask(LOG_GUEST_ERROR, + "M25P80: Read overrun internal data buffer. " + "SPI controller (QEMU emulator or guest driver) " + "is misbehaving\n"); + s->len = s->pos = 0; + s->state = STATE_IDLE; + break; + } + + r = s->data[s->pos]; + trace_m25p80_read_data(s, s->pos, (uint8_t)r); + s->pos++; + if (s->pos == s->len) { + s->pos = 0; + if (!s->data_read_loop) { + s->state = STATE_IDLE; + } + } + break; + + default: + case STATE_IDLE: + decode_new_cmd(s, (uint8_t)tx); + break; + } + + return r; +} + +static void m25p80_realize(SSIPeripheral *ss, Error **errp) +{ + Flash *s = M25P80(ss); + M25P80Class *mc = M25P80_GET_CLASS(s); + int ret; + + s->pi = mc->pi; + + s->size = s->pi->sector_size * s->pi->n_sectors; + s->dirty_page = -1; + + if (s->blk) { + uint64_t perm = BLK_PERM_CONSISTENT_READ | + (blk_supports_write_perm(s->blk) ? BLK_PERM_WRITE : 0); + ret = blk_set_perm(s->blk, perm, BLK_PERM_ALL, errp); + if (ret < 0) { + return; + } + + trace_m25p80_binding(s); + s->storage = blk_blockalign(s->blk, s->size); + + if (blk_pread(s->blk, 0, s->storage, s->size) != s->size) { + error_setg(errp, "failed to read the initial flash content"); + return; + } + } else { + trace_m25p80_binding_no_bdrv(s); + s->storage = blk_blockalign(NULL, s->size); + memset(s->storage, 0xFF, s->size); + } +} + +static void m25p80_reset(DeviceState *d) +{ + Flash *s = M25P80(d); + + reset_memory(s); +} + +static int m25p80_pre_save(void *opaque) +{ + flash_sync_dirty((Flash *)opaque, -1); + + return 0; +} + +static Property m25p80_properties[] = { + /* This is default value for Micron flash */ + DEFINE_PROP_UINT32("nonvolatile-cfg", Flash, nonvolatile_cfg, 0x8FFF), + DEFINE_PROP_UINT8("spansion-cr1nv", Flash, spansion_cr1nv, 0x0), + DEFINE_PROP_UINT8("spansion-cr2nv", Flash, spansion_cr2nv, 0x8), + DEFINE_PROP_UINT8("spansion-cr3nv", Flash, spansion_cr3nv, 0x2), + DEFINE_PROP_UINT8("spansion-cr4nv", Flash, spansion_cr4nv, 0x10), + DEFINE_PROP_DRIVE("drive", Flash, blk), + DEFINE_PROP_END_OF_LIST(), +}; + +static int m25p80_pre_load(void *opaque) +{ + Flash *s = (Flash *)opaque; + + s->data_read_loop = false; + return 0; +} + +static bool m25p80_data_read_loop_needed(void *opaque) +{ + Flash *s = (Flash *)opaque; + + return s->data_read_loop; +} + +static const VMStateDescription vmstate_m25p80_data_read_loop = { + .name = "m25p80/data_read_loop", + .version_id = 1, + .minimum_version_id = 1, + .needed = m25p80_data_read_loop_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(data_read_loop, Flash), + VMSTATE_END_OF_LIST() + } +}; + +static bool m25p80_aai_enable_needed(void *opaque) +{ + Flash *s = (Flash *)opaque; + + return s->aai_enable; +} + +static const VMStateDescription vmstate_m25p80_aai_enable = { + .name = "m25p80/aai_enable", + .version_id = 1, + .minimum_version_id = 1, + .needed = m25p80_aai_enable_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(aai_enable, Flash), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_m25p80 = { + .name = "m25p80", + .version_id = 0, + .minimum_version_id = 0, + .pre_save = m25p80_pre_save, + .pre_load = m25p80_pre_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(state, Flash), + VMSTATE_UINT8_ARRAY(data, Flash, M25P80_INTERNAL_DATA_BUFFER_SZ), + VMSTATE_UINT32(len, Flash), + VMSTATE_UINT32(pos, Flash), + VMSTATE_UINT8(needed_bytes, Flash), + VMSTATE_UINT8(cmd_in_progress, Flash), + VMSTATE_UINT32(cur_addr, Flash), + VMSTATE_BOOL(write_enable, Flash), + VMSTATE_BOOL(reset_enable, Flash), + VMSTATE_UINT8(ear, Flash), + VMSTATE_BOOL(four_bytes_address_mode, Flash), + VMSTATE_UINT32(nonvolatile_cfg, Flash), + VMSTATE_UINT32(volatile_cfg, Flash), + VMSTATE_UINT32(enh_volatile_cfg, Flash), + VMSTATE_BOOL(quad_enable, Flash), + VMSTATE_UINT8(spansion_cr1nv, Flash), + VMSTATE_UINT8(spansion_cr2nv, Flash), + VMSTATE_UINT8(spansion_cr3nv, Flash), + VMSTATE_UINT8(spansion_cr4nv, Flash), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_m25p80_data_read_loop, + &vmstate_m25p80_aai_enable, + NULL + } +}; + +static void m25p80_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + M25P80Class *mc = M25P80_CLASS(klass); + + k->realize = m25p80_realize; + k->transfer = m25p80_transfer8; + k->set_cs = m25p80_cs; + k->cs_polarity = SSI_CS_LOW; + dc->vmsd = &vmstate_m25p80; + device_class_set_props(dc, m25p80_properties); + dc->reset = m25p80_reset; + mc->pi = data; +} + +static const TypeInfo m25p80_info = { + .name = TYPE_M25P80, + .parent = TYPE_SSI_PERIPHERAL, + .instance_size = sizeof(Flash), + .class_size = sizeof(M25P80Class), + .abstract = true, +}; + +static void m25p80_register_types(void) +{ + int i; + + type_register_static(&m25p80_info); + for (i = 0; i < ARRAY_SIZE(known_devices); ++i) { + TypeInfo ti = { + .name = known_devices[i].part_name, + .parent = TYPE_M25P80, + .class_init = m25p80_class_init, + .class_data = (void *)&known_devices[i], + }; + type_register(&ti); + } +} + +type_init(m25p80_register_types) diff --git a/hw/block/meson.build b/hw/block/meson.build new file mode 100644 index 000000000..238932611 --- /dev/null +++ b/hw/block/meson.build @@ -0,0 +1,22 @@ +softmmu_ss.add(files( + 'block.c', + 'cdrom.c', + 'hd-geometry.c' +)) +softmmu_ss.add(when: 'CONFIG_ECC', if_true: files('ecc.c')) +softmmu_ss.add(when: 'CONFIG_FDC', if_true: files('fdc.c')) +softmmu_ss.add(when: 'CONFIG_FDC_ISA', if_true: files('fdc-isa.c')) +softmmu_ss.add(when: 'CONFIG_FDC_SYSBUS', if_true: files('fdc-sysbus.c')) +softmmu_ss.add(when: 'CONFIG_NAND', if_true: files('nand.c')) +softmmu_ss.add(when: 'CONFIG_ONENAND', if_true: files('onenand.c')) +softmmu_ss.add(when: 'CONFIG_PFLASH_CFI01', if_true: files('pflash_cfi01.c')) +softmmu_ss.add(when: 'CONFIG_PFLASH_CFI02', if_true: files('pflash_cfi02.c')) +softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c')) +softmmu_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c')) +softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c')) +softmmu_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c')) + +specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c')) +specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c')) + +subdir('dataplane') diff --git a/hw/block/nand.c b/hw/block/nand.c new file mode 100644 index 000000000..8bc80e351 --- /dev/null +++ b/hw/block/nand.c @@ -0,0 +1,814 @@ +/* + * Flash NAND memory emulation. Based on "16M x 8 Bit NAND Flash + * Memory" datasheet for the KM29U128AT / K9F2808U0A chips from + * Samsung Electronic. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * Support for additional features based on "MT29F2G16ABCWP 2Gx16" + * datasheet from Micron Technology and "NAND02G-B2C" datasheet + * from ST Microelectronics. + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#ifndef NAND_IO + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/block/flash.h" +#include "sysemu/block-backend.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qom/object.h" + +# define NAND_CMD_READ0 0x00 +# define NAND_CMD_READ1 0x01 +# define NAND_CMD_READ2 0x50 +# define NAND_CMD_LPREAD2 0x30 +# define NAND_CMD_NOSERIALREAD2 0x35 +# define NAND_CMD_RANDOMREAD1 0x05 +# define NAND_CMD_RANDOMREAD2 0xe0 +# define NAND_CMD_READID 0x90 +# define NAND_CMD_RESET 0xff +# define NAND_CMD_PAGEPROGRAM1 0x80 +# define NAND_CMD_PAGEPROGRAM2 0x10 +# define NAND_CMD_CACHEPROGRAM2 0x15 +# define NAND_CMD_BLOCKERASE1 0x60 +# define NAND_CMD_BLOCKERASE2 0xd0 +# define NAND_CMD_READSTATUS 0x70 +# define NAND_CMD_COPYBACKPRG1 0x85 + +# define NAND_IOSTATUS_ERROR (1 << 0) +# define NAND_IOSTATUS_PLANE0 (1 << 1) +# define NAND_IOSTATUS_PLANE1 (1 << 2) +# define NAND_IOSTATUS_PLANE2 (1 << 3) +# define NAND_IOSTATUS_PLANE3 (1 << 4) +# define NAND_IOSTATUS_READY (1 << 6) +# define NAND_IOSTATUS_UNPROTCT (1 << 7) + +# define MAX_PAGE 0x800 +# define MAX_OOB 0x40 + +typedef struct NANDFlashState NANDFlashState; +struct NANDFlashState { + DeviceState parent_obj; + + uint8_t manf_id, chip_id; + uint8_t buswidth; /* in BYTES */ + int size, pages; + int page_shift, oob_shift, erase_shift, addr_shift; + uint8_t *storage; + BlockBackend *blk; + int mem_oob; + + uint8_t cle, ale, ce, wp, gnd; + + uint8_t io[MAX_PAGE + MAX_OOB + 0x400]; + uint8_t *ioaddr; + int iolen; + + uint32_t cmd; + uint64_t addr; + int addrlen; + int status; + int offset; + + void (*blk_write)(NANDFlashState *s); + void (*blk_erase)(NANDFlashState *s); + void (*blk_load)(NANDFlashState *s, uint64_t addr, int offset); + + uint32_t ioaddr_vmstate; +}; + +#define TYPE_NAND "nand" + +OBJECT_DECLARE_SIMPLE_TYPE(NANDFlashState, NAND) + +static void mem_and(uint8_t *dest, const uint8_t *src, size_t n) +{ + /* Like memcpy() but we logical-AND the data into the destination */ + int i; + for (i = 0; i < n; i++) { + dest[i] &= src[i]; + } +} + +# define NAND_NO_AUTOINCR 0x00000001 +# define NAND_BUSWIDTH_16 0x00000002 +# define NAND_NO_PADDING 0x00000004 +# define NAND_CACHEPRG 0x00000008 +# define NAND_COPYBACK 0x00000010 +# define NAND_IS_AND 0x00000020 +# define NAND_4PAGE_ARRAY 0x00000040 +# define NAND_NO_READRDY 0x00000100 +# define NAND_SAMSUNG_LP (NAND_NO_PADDING | NAND_COPYBACK) + +# define NAND_IO + +# define PAGE(addr) ((addr) >> ADDR_SHIFT) +# define PAGE_START(page) (PAGE(page) * (NAND_PAGE_SIZE + OOB_SIZE)) +# define PAGE_MASK ((1 << ADDR_SHIFT) - 1) +# define OOB_SHIFT (PAGE_SHIFT - 5) +# define OOB_SIZE (1 << OOB_SHIFT) +# define SECTOR(addr) ((addr) >> (9 + ADDR_SHIFT - PAGE_SHIFT)) +# define SECTOR_OFFSET(addr) ((addr) & ((511 >> PAGE_SHIFT) << 8)) + +# define NAND_PAGE_SIZE 256 +# define PAGE_SHIFT 8 +# define PAGE_SECTORS 1 +# define ADDR_SHIFT 8 +# include "nand.c" +# define NAND_PAGE_SIZE 512 +# define PAGE_SHIFT 9 +# define PAGE_SECTORS 1 +# define ADDR_SHIFT 8 +# include "nand.c" +# define NAND_PAGE_SIZE 2048 +# define PAGE_SHIFT 11 +# define PAGE_SECTORS 4 +# define ADDR_SHIFT 16 +# include "nand.c" + +/* Information based on Linux drivers/mtd/nand/raw/nand_ids.c */ +static const struct { + int size; + int width; + int page_shift; + int erase_shift; + uint32_t options; +} nand_flash_ids[0x100] = { + [0 ... 0xff] = { 0 }, + + [0x6b] = { 4, 8, 9, 4, 0 }, + [0xe3] = { 4, 8, 9, 4, 0 }, + [0xe5] = { 4, 8, 9, 4, 0 }, + [0xd6] = { 8, 8, 9, 4, 0 }, + [0xe6] = { 8, 8, 9, 4, 0 }, + + [0x33] = { 16, 8, 9, 5, 0 }, + [0x73] = { 16, 8, 9, 5, 0 }, + [0x43] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x53] = { 16, 16, 9, 5, NAND_BUSWIDTH_16 }, + + [0x35] = { 32, 8, 9, 5, 0 }, + [0x75] = { 32, 8, 9, 5, 0 }, + [0x45] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x55] = { 32, 16, 9, 5, NAND_BUSWIDTH_16 }, + + [0x36] = { 64, 8, 9, 5, 0 }, + [0x76] = { 64, 8, 9, 5, 0 }, + [0x46] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x56] = { 64, 16, 9, 5, NAND_BUSWIDTH_16 }, + + [0x78] = { 128, 8, 9, 5, 0 }, + [0x39] = { 128, 8, 9, 5, 0 }, + [0x79] = { 128, 8, 9, 5, 0 }, + [0x72] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x49] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x74] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + [0x59] = { 128, 16, 9, 5, NAND_BUSWIDTH_16 }, + + [0x71] = { 256, 8, 9, 5, 0 }, + + /* + * These are the new chips with large page size. The pagesize and the + * erasesize is determined from the extended id bytes + */ +# define LP_OPTIONS (NAND_SAMSUNG_LP | NAND_NO_READRDY | NAND_NO_AUTOINCR) +# define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) + + /* 512 Megabit */ + [0xa2] = { 64, 8, 0, 0, LP_OPTIONS }, + [0xf2] = { 64, 8, 0, 0, LP_OPTIONS }, + [0xb2] = { 64, 16, 0, 0, LP_OPTIONS16 }, + [0xc2] = { 64, 16, 0, 0, LP_OPTIONS16 }, + + /* 1 Gigabit */ + [0xa1] = { 128, 8, 0, 0, LP_OPTIONS }, + [0xf1] = { 128, 8, 0, 0, LP_OPTIONS }, + [0xb1] = { 128, 16, 0, 0, LP_OPTIONS16 }, + [0xc1] = { 128, 16, 0, 0, LP_OPTIONS16 }, + + /* 2 Gigabit */ + [0xaa] = { 256, 8, 0, 0, LP_OPTIONS }, + [0xda] = { 256, 8, 0, 0, LP_OPTIONS }, + [0xba] = { 256, 16, 0, 0, LP_OPTIONS16 }, + [0xca] = { 256, 16, 0, 0, LP_OPTIONS16 }, + + /* 4 Gigabit */ + [0xac] = { 512, 8, 0, 0, LP_OPTIONS }, + [0xdc] = { 512, 8, 0, 0, LP_OPTIONS }, + [0xbc] = { 512, 16, 0, 0, LP_OPTIONS16 }, + [0xcc] = { 512, 16, 0, 0, LP_OPTIONS16 }, + + /* 8 Gigabit */ + [0xa3] = { 1024, 8, 0, 0, LP_OPTIONS }, + [0xd3] = { 1024, 8, 0, 0, LP_OPTIONS }, + [0xb3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, + [0xc3] = { 1024, 16, 0, 0, LP_OPTIONS16 }, + + /* 16 Gigabit */ + [0xa5] = { 2048, 8, 0, 0, LP_OPTIONS }, + [0xd5] = { 2048, 8, 0, 0, LP_OPTIONS }, + [0xb5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, + [0xc5] = { 2048, 16, 0, 0, LP_OPTIONS16 }, +}; + +static void nand_reset(DeviceState *dev) +{ + NANDFlashState *s = NAND(dev); + s->cmd = NAND_CMD_READ0; + s->addr = 0; + s->addrlen = 0; + s->iolen = 0; + s->offset = 0; + s->status &= NAND_IOSTATUS_UNPROTCT; + s->status |= NAND_IOSTATUS_READY; +} + +static inline void nand_pushio_byte(NANDFlashState *s, uint8_t value) +{ + s->ioaddr[s->iolen++] = value; + for (value = s->buswidth; --value;) { + s->ioaddr[s->iolen++] = 0; + } +} + +static void nand_command(NANDFlashState *s) +{ + unsigned int offset; + switch (s->cmd) { + case NAND_CMD_READ0: + s->iolen = 0; + break; + + case NAND_CMD_READID: + s->ioaddr = s->io; + s->iolen = 0; + nand_pushio_byte(s, s->manf_id); + nand_pushio_byte(s, s->chip_id); + nand_pushio_byte(s, 'Q'); /* Don't-care byte (often 0xa5) */ + if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) { + /* Page Size, Block Size, Spare Size; bit 6 indicates + * 8 vs 16 bit width NAND. + */ + nand_pushio_byte(s, (s->buswidth == 2) ? 0x55 : 0x15); + } else { + nand_pushio_byte(s, 0xc0); /* Multi-plane */ + } + break; + + case NAND_CMD_RANDOMREAD2: + case NAND_CMD_NOSERIALREAD2: + if (!(nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP)) + break; + offset = s->addr & ((1 << s->addr_shift) - 1); + s->blk_load(s, s->addr, offset); + if (s->gnd) + s->iolen = (1 << s->page_shift) - offset; + else + s->iolen = (1 << s->page_shift) + (1 << s->oob_shift) - offset; + break; + + case NAND_CMD_RESET: + nand_reset(DEVICE(s)); + break; + + case NAND_CMD_PAGEPROGRAM1: + s->ioaddr = s->io; + s->iolen = 0; + break; + + case NAND_CMD_PAGEPROGRAM2: + if (s->wp) { + s->blk_write(s); + } + break; + + case NAND_CMD_BLOCKERASE1: + break; + + case NAND_CMD_BLOCKERASE2: + s->addr &= (1ull << s->addrlen * 8) - 1; + s->addr <<= nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP ? + 16 : 8; + + if (s->wp) { + s->blk_erase(s); + } + break; + + case NAND_CMD_READSTATUS: + s->ioaddr = s->io; + s->iolen = 0; + nand_pushio_byte(s, s->status); + break; + + default: + printf("%s: Unknown NAND command 0x%02x\n", __func__, s->cmd); + } +} + +static int nand_pre_save(void *opaque) +{ + NANDFlashState *s = NAND(opaque); + + s->ioaddr_vmstate = s->ioaddr - s->io; + + return 0; +} + +static int nand_post_load(void *opaque, int version_id) +{ + NANDFlashState *s = NAND(opaque); + + if (s->ioaddr_vmstate > sizeof(s->io)) { + return -EINVAL; + } + s->ioaddr = s->io + s->ioaddr_vmstate; + + return 0; +} + +static const VMStateDescription vmstate_nand = { + .name = "nand", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = nand_pre_save, + .post_load = nand_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(cle, NANDFlashState), + VMSTATE_UINT8(ale, NANDFlashState), + VMSTATE_UINT8(ce, NANDFlashState), + VMSTATE_UINT8(wp, NANDFlashState), + VMSTATE_UINT8(gnd, NANDFlashState), + VMSTATE_BUFFER(io, NANDFlashState), + VMSTATE_UINT32(ioaddr_vmstate, NANDFlashState), + VMSTATE_INT32(iolen, NANDFlashState), + VMSTATE_UINT32(cmd, NANDFlashState), + VMSTATE_UINT64(addr, NANDFlashState), + VMSTATE_INT32(addrlen, NANDFlashState), + VMSTATE_INT32(status, NANDFlashState), + VMSTATE_INT32(offset, NANDFlashState), + /* XXX: do we want to save s->storage too? */ + VMSTATE_END_OF_LIST() + } +}; + +static void nand_realize(DeviceState *dev, Error **errp) +{ + int pagesize; + NANDFlashState *s = NAND(dev); + int ret; + + + s->buswidth = nand_flash_ids[s->chip_id].width >> 3; + s->size = nand_flash_ids[s->chip_id].size << 20; + if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) { + s->page_shift = 11; + s->erase_shift = 6; + } else { + s->page_shift = nand_flash_ids[s->chip_id].page_shift; + s->erase_shift = nand_flash_ids[s->chip_id].erase_shift; + } + + switch (1 << s->page_shift) { + case 256: + nand_init_256(s); + break; + case 512: + nand_init_512(s); + break; + case 2048: + nand_init_2048(s); + break; + default: + error_setg(errp, "Unsupported NAND block size %#x", + 1 << s->page_shift); + return; + } + + pagesize = 1 << s->oob_shift; + s->mem_oob = 1; + if (s->blk) { + if (!blk_supports_write_perm(s->blk)) { + error_setg(errp, "Can't use a read-only drive"); + return; + } + ret = blk_set_perm(s->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, + BLK_PERM_ALL, errp); + if (ret < 0) { + return; + } + if (blk_getlength(s->blk) >= + (s->pages << s->page_shift) + (s->pages << s->oob_shift)) { + pagesize = 0; + s->mem_oob = 0; + } + } else { + pagesize += 1 << s->page_shift; + } + if (pagesize) { + s->storage = (uint8_t *) memset(g_malloc(s->pages * pagesize), + 0xff, s->pages * pagesize); + } + /* Give s->ioaddr a sane value in case we save state before it is used. */ + s->ioaddr = s->io; +} + +static Property nand_properties[] = { + DEFINE_PROP_UINT8("manufacturer_id", NANDFlashState, manf_id, 0), + DEFINE_PROP_UINT8("chip_id", NANDFlashState, chip_id, 0), + DEFINE_PROP_DRIVE("drive", NANDFlashState, blk), + DEFINE_PROP_END_OF_LIST(), +}; + +static void nand_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = nand_realize; + dc->reset = nand_reset; + dc->vmsd = &vmstate_nand; + device_class_set_props(dc, nand_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo nand_info = { + .name = TYPE_NAND, + .parent = TYPE_DEVICE, + .instance_size = sizeof(NANDFlashState), + .class_init = nand_class_init, +}; + +static void nand_register_types(void) +{ + type_register_static(&nand_info); +} + +/* + * Chip inputs are CLE, ALE, CE, WP, GND and eight I/O pins. Chip + * outputs are R/B and eight I/O pins. + * + * CE, WP and R/B are active low. + */ +void nand_setpins(DeviceState *dev, uint8_t cle, uint8_t ale, + uint8_t ce, uint8_t wp, uint8_t gnd) +{ + NANDFlashState *s = NAND(dev); + + s->cle = cle; + s->ale = ale; + s->ce = ce; + s->wp = wp; + s->gnd = gnd; + if (wp) { + s->status |= NAND_IOSTATUS_UNPROTCT; + } else { + s->status &= ~NAND_IOSTATUS_UNPROTCT; + } +} + +void nand_getpins(DeviceState *dev, int *rb) +{ + *rb = 1; +} + +void nand_setio(DeviceState *dev, uint32_t value) +{ + int i; + NANDFlashState *s = NAND(dev); + + if (!s->ce && s->cle) { + if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) { + if (s->cmd == NAND_CMD_READ0 && value == NAND_CMD_LPREAD2) + return; + if (value == NAND_CMD_RANDOMREAD1) { + s->addr &= ~((1 << s->addr_shift) - 1); + s->addrlen = 0; + return; + } + } + if (value == NAND_CMD_READ0) { + s->offset = 0; + } else if (value == NAND_CMD_READ1) { + s->offset = 0x100; + value = NAND_CMD_READ0; + } else if (value == NAND_CMD_READ2) { + s->offset = 1 << s->page_shift; + value = NAND_CMD_READ0; + } + + s->cmd = value; + + if (s->cmd == NAND_CMD_READSTATUS || + s->cmd == NAND_CMD_PAGEPROGRAM2 || + s->cmd == NAND_CMD_BLOCKERASE1 || + s->cmd == NAND_CMD_BLOCKERASE2 || + s->cmd == NAND_CMD_NOSERIALREAD2 || + s->cmd == NAND_CMD_RANDOMREAD2 || + s->cmd == NAND_CMD_RESET) { + nand_command(s); + } + + if (s->cmd != NAND_CMD_RANDOMREAD2) { + s->addrlen = 0; + } + } + + if (s->ale) { + unsigned int shift = s->addrlen * 8; + uint64_t mask = ~(0xffull << shift); + uint64_t v = (uint64_t)value << shift; + + s->addr = (s->addr & mask) | v; + s->addrlen ++; + + switch (s->addrlen) { + case 1: + if (s->cmd == NAND_CMD_READID) { + nand_command(s); + } + break; + case 2: /* fix cache address as a byte address */ + s->addr <<= (s->buswidth - 1); + break; + case 3: + if (!(nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) && + (s->cmd == NAND_CMD_READ0 || + s->cmd == NAND_CMD_PAGEPROGRAM1)) { + nand_command(s); + } + break; + case 4: + if ((nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) && + nand_flash_ids[s->chip_id].size < 256 && /* 1Gb or less */ + (s->cmd == NAND_CMD_READ0 || + s->cmd == NAND_CMD_PAGEPROGRAM1)) { + nand_command(s); + } + break; + case 5: + if ((nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) && + nand_flash_ids[s->chip_id].size >= 256 && /* 2Gb or more */ + (s->cmd == NAND_CMD_READ0 || + s->cmd == NAND_CMD_PAGEPROGRAM1)) { + nand_command(s); + } + break; + default: + break; + } + } + + if (!s->cle && !s->ale && s->cmd == NAND_CMD_PAGEPROGRAM1) { + if (s->iolen < (1 << s->page_shift) + (1 << s->oob_shift)) { + for (i = s->buswidth; i--; value >>= 8) { + s->io[s->iolen ++] = (uint8_t) (value & 0xff); + } + } + } else if (!s->cle && !s->ale && s->cmd == NAND_CMD_COPYBACKPRG1) { + if ((s->addr & ((1 << s->addr_shift) - 1)) < + (1 << s->page_shift) + (1 << s->oob_shift)) { + for (i = s->buswidth; i--; s->addr++, value >>= 8) { + s->io[s->iolen + (s->addr & ((1 << s->addr_shift) - 1))] = + (uint8_t) (value & 0xff); + } + } + } +} + +uint32_t nand_getio(DeviceState *dev) +{ + int offset; + uint32_t x = 0; + NANDFlashState *s = NAND(dev); + + /* Allow sequential reading */ + if (!s->iolen && s->cmd == NAND_CMD_READ0) { + offset = (int) (s->addr & ((1 << s->addr_shift) - 1)) + s->offset; + s->offset = 0; + + s->blk_load(s, s->addr, offset); + if (s->gnd) + s->iolen = (1 << s->page_shift) - offset; + else + s->iolen = (1 << s->page_shift) + (1 << s->oob_shift) - offset; + } + + if (s->ce || s->iolen <= 0) { + return 0; + } + + for (offset = s->buswidth; offset--;) { + x |= s->ioaddr[offset] << (offset << 3); + } + /* after receiving READ STATUS command all subsequent reads will + * return the status register value until another command is issued + */ + if (s->cmd != NAND_CMD_READSTATUS) { + s->addr += s->buswidth; + s->ioaddr += s->buswidth; + s->iolen -= s->buswidth; + } + return x; +} + +uint32_t nand_getbuswidth(DeviceState *dev) +{ + NANDFlashState *s = (NANDFlashState *) dev; + return s->buswidth << 3; +} + +DeviceState *nand_init(BlockBackend *blk, int manf_id, int chip_id) +{ + DeviceState *dev; + + if (nand_flash_ids[chip_id].size == 0) { + hw_error("%s: Unsupported NAND chip ID.\n", __func__); + } + dev = qdev_new(TYPE_NAND); + qdev_prop_set_uint8(dev, "manufacturer_id", manf_id); + qdev_prop_set_uint8(dev, "chip_id", chip_id); + if (blk) { + qdev_prop_set_drive_err(dev, "drive", blk, &error_fatal); + } + + qdev_realize(dev, NULL, &error_fatal); + return dev; +} + +type_init(nand_register_types) + +#else + +/* Program a single page */ +static void glue(nand_blk_write_, NAND_PAGE_SIZE)(NANDFlashState *s) +{ + uint64_t off, page, sector, soff; + uint8_t iobuf[(PAGE_SECTORS + 2) * 0x200]; + if (PAGE(s->addr) >= s->pages) + return; + + if (!s->blk) { + mem_and(s->storage + PAGE_START(s->addr) + (s->addr & PAGE_MASK) + + s->offset, s->io, s->iolen); + } else if (s->mem_oob) { + sector = SECTOR(s->addr); + off = (s->addr & PAGE_MASK) + s->offset; + soff = SECTOR_OFFSET(s->addr); + if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS, iobuf, + PAGE_SECTORS << BDRV_SECTOR_BITS) < 0) { + printf("%s: read error in sector %" PRIu64 "\n", __func__, sector); + return; + } + + mem_and(iobuf + (soff | off), s->io, MIN(s->iolen, NAND_PAGE_SIZE - off)); + if (off + s->iolen > NAND_PAGE_SIZE) { + page = PAGE(s->addr); + mem_and(s->storage + (page << OOB_SHIFT), s->io + NAND_PAGE_SIZE - off, + MIN(OOB_SIZE, off + s->iolen - NAND_PAGE_SIZE)); + } + + if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS, iobuf, + PAGE_SECTORS << BDRV_SECTOR_BITS, 0) < 0) { + printf("%s: write error in sector %" PRIu64 "\n", __func__, sector); + } + } else { + off = PAGE_START(s->addr) + (s->addr & PAGE_MASK) + s->offset; + sector = off >> 9; + soff = off & 0x1ff; + if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS, iobuf, + (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS) < 0) { + printf("%s: read error in sector %" PRIu64 "\n", __func__, sector); + return; + } + + mem_and(iobuf + soff, s->io, s->iolen); + + if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS, iobuf, + (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, 0) < 0) { + printf("%s: write error in sector %" PRIu64 "\n", __func__, sector); + } + } + s->offset = 0; +} + +/* Erase a single block */ +static void glue(nand_blk_erase_, NAND_PAGE_SIZE)(NANDFlashState *s) +{ + uint64_t i, page, addr; + uint8_t iobuf[0x200] = { [0 ... 0x1ff] = 0xff, }; + addr = s->addr & ~((1 << (ADDR_SHIFT + s->erase_shift)) - 1); + + if (PAGE(addr) >= s->pages) { + return; + } + + if (!s->blk) { + memset(s->storage + PAGE_START(addr), + 0xff, (NAND_PAGE_SIZE + OOB_SIZE) << s->erase_shift); + } else if (s->mem_oob) { + memset(s->storage + (PAGE(addr) << OOB_SHIFT), + 0xff, OOB_SIZE << s->erase_shift); + i = SECTOR(addr); + page = SECTOR(addr + (1 << (ADDR_SHIFT + s->erase_shift))); + for (; i < page; i ++) + if (blk_pwrite(s->blk, i << BDRV_SECTOR_BITS, iobuf, + BDRV_SECTOR_SIZE, 0) < 0) { + printf("%s: write error in sector %" PRIu64 "\n", __func__, i); + } + } else { + addr = PAGE_START(addr); + page = addr >> 9; + if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, iobuf, + BDRV_SECTOR_SIZE) < 0) { + printf("%s: read error in sector %" PRIu64 "\n", __func__, page); + } + memset(iobuf + (addr & 0x1ff), 0xff, (~addr & 0x1ff) + 1); + if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, iobuf, + BDRV_SECTOR_SIZE, 0) < 0) { + printf("%s: write error in sector %" PRIu64 "\n", __func__, page); + } + + memset(iobuf, 0xff, 0x200); + i = (addr & ~0x1ff) + 0x200; + for (addr += ((NAND_PAGE_SIZE + OOB_SIZE) << s->erase_shift) - 0x200; + i < addr; i += 0x200) { + if (blk_pwrite(s->blk, i, iobuf, BDRV_SECTOR_SIZE, 0) < 0) { + printf("%s: write error in sector %" PRIu64 "\n", + __func__, i >> 9); + } + } + + page = i >> 9; + if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, iobuf, + BDRV_SECTOR_SIZE) < 0) { + printf("%s: read error in sector %" PRIu64 "\n", __func__, page); + } + memset(iobuf, 0xff, ((addr - 1) & 0x1ff) + 1); + if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, iobuf, + BDRV_SECTOR_SIZE, 0) < 0) { + printf("%s: write error in sector %" PRIu64 "\n", __func__, page); + } + } +} + +static void glue(nand_blk_load_, NAND_PAGE_SIZE)(NANDFlashState *s, + uint64_t addr, int offset) +{ + if (PAGE(addr) >= s->pages) { + return; + } + + if (s->blk) { + if (s->mem_oob) { + if (blk_pread(s->blk, SECTOR(addr) << BDRV_SECTOR_BITS, s->io, + PAGE_SECTORS << BDRV_SECTOR_BITS) < 0) { + printf("%s: read error in sector %" PRIu64 "\n", + __func__, SECTOR(addr)); + } + memcpy(s->io + SECTOR_OFFSET(s->addr) + NAND_PAGE_SIZE, + s->storage + (PAGE(s->addr) << OOB_SHIFT), + OOB_SIZE); + s->ioaddr = s->io + SECTOR_OFFSET(s->addr) + offset; + } else { + if (blk_pread(s->blk, PAGE_START(addr), s->io, + (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS) < 0) { + printf("%s: read error in sector %" PRIu64 "\n", + __func__, PAGE_START(addr) >> 9); + } + s->ioaddr = s->io + (PAGE_START(addr) & 0x1ff) + offset; + } + } else { + memcpy(s->io, s->storage + PAGE_START(s->addr) + + offset, NAND_PAGE_SIZE + OOB_SIZE - offset); + s->ioaddr = s->io; + } +} + +static void glue(nand_init_, NAND_PAGE_SIZE)(NANDFlashState *s) +{ + s->oob_shift = PAGE_SHIFT - 5; + s->pages = s->size >> PAGE_SHIFT; + s->addr_shift = ADDR_SHIFT; + + s->blk_erase = glue(nand_blk_erase_, NAND_PAGE_SIZE); + s->blk_write = glue(nand_blk_write_, NAND_PAGE_SIZE); + s->blk_load = glue(nand_blk_load_, NAND_PAGE_SIZE); +} + +# undef NAND_PAGE_SIZE +# undef PAGE_SHIFT +# undef PAGE_SECTORS +# undef ADDR_SHIFT +#endif /* NAND_IO */ diff --git a/hw/block/onenand.c b/hw/block/onenand.c new file mode 100644 index 000000000..afc0cd3a0 --- /dev/null +++ b/hw/block/onenand.c @@ -0,0 +1,872 @@ +/* + * OneNAND flash memories emulation. + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/hw.h" +#include "hw/block/flash.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "sysemu/block-backend.h" +#include "exec/memory.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* 11 for 2kB-page OneNAND ("2nd generation") and 10 for 1kB-page chips */ +#define PAGE_SHIFT 11 + +/* Fixed */ +#define BLOCK_SHIFT (PAGE_SHIFT + 6) + +#define TYPE_ONE_NAND "onenand" +OBJECT_DECLARE_SIMPLE_TYPE(OneNANDState, ONE_NAND) + +struct OneNANDState { + SysBusDevice parent_obj; + + struct { + uint16_t man; + uint16_t dev; + uint16_t ver; + } id; + int shift; + hwaddr base; + qemu_irq intr; + qemu_irq rdy; + BlockBackend *blk; + BlockBackend *blk_cur; + uint8_t *image; + uint8_t *otp; + uint8_t *current; + MemoryRegion ram; + MemoryRegion mapped_ram; + uint8_t current_direction; + uint8_t *boot[2]; + uint8_t *data[2][2]; + MemoryRegion iomem; + MemoryRegion container; + int cycle; + int otpmode; + + uint16_t addr[8]; + uint16_t unladdr[8]; + int bufaddr; + int count; + uint16_t command; + uint16_t config[2]; + uint16_t status; + uint16_t intstatus; + uint16_t wpstatus; + + ECCState ecc; + + int density_mask; + int secs; + int secs_cur; + int blocks; + uint8_t *blockwp; +}; + +enum { + ONEN_BUF_BLOCK = 0, + ONEN_BUF_BLOCK2 = 1, + ONEN_BUF_DEST_BLOCK = 2, + ONEN_BUF_DEST_PAGE = 3, + ONEN_BUF_PAGE = 7, +}; + +enum { + ONEN_ERR_CMD = 1 << 10, + ONEN_ERR_ERASE = 1 << 11, + ONEN_ERR_PROG = 1 << 12, + ONEN_ERR_LOAD = 1 << 13, +}; + +enum { + ONEN_INT_RESET = 1 << 4, + ONEN_INT_ERASE = 1 << 5, + ONEN_INT_PROG = 1 << 6, + ONEN_INT_LOAD = 1 << 7, + ONEN_INT = 1 << 15, +}; + +enum { + ONEN_LOCK_LOCKTIGHTEN = 1 << 0, + ONEN_LOCK_LOCKED = 1 << 1, + ONEN_LOCK_UNLOCKED = 1 << 2, +}; + +static void onenand_mem_setup(OneNANDState *s) +{ + /* XXX: We should use IO_MEM_ROMD but we broke it earlier... + * Both 0x0000 ... 0x01ff and 0x8000 ... 0x800f can be used to + * write boot commands. Also take note of the BWPS bit. */ + memory_region_init(&s->container, OBJECT(s), "onenand", + 0x10000 << s->shift); + memory_region_add_subregion(&s->container, 0, &s->iomem); + memory_region_init_alias(&s->mapped_ram, OBJECT(s), "onenand-mapped-ram", + &s->ram, 0x0200 << s->shift, + 0xbe00 << s->shift); + memory_region_add_subregion_overlap(&s->container, + 0x0200 << s->shift, + &s->mapped_ram, + 1); +} + +static void onenand_intr_update(OneNANDState *s) +{ + qemu_set_irq(s->intr, ((s->intstatus >> 15) ^ (~s->config[0] >> 6)) & 1); +} + +static int onenand_pre_save(void *opaque) +{ + OneNANDState *s = opaque; + if (s->current == s->otp) { + s->current_direction = 1; + } else if (s->current == s->image) { + s->current_direction = 2; + } else { + s->current_direction = 0; + } + + return 0; +} + +static int onenand_post_load(void *opaque, int version_id) +{ + OneNANDState *s = opaque; + switch (s->current_direction) { + case 0: + break; + case 1: + s->current = s->otp; + break; + case 2: + s->current = s->image; + break; + default: + return -1; + } + onenand_intr_update(s); + return 0; +} + +static const VMStateDescription vmstate_onenand = { + .name = "onenand", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = onenand_pre_save, + .post_load = onenand_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(current_direction, OneNANDState), + VMSTATE_INT32(cycle, OneNANDState), + VMSTATE_INT32(otpmode, OneNANDState), + VMSTATE_UINT16_ARRAY(addr, OneNANDState, 8), + VMSTATE_UINT16_ARRAY(unladdr, OneNANDState, 8), + VMSTATE_INT32(bufaddr, OneNANDState), + VMSTATE_INT32(count, OneNANDState), + VMSTATE_UINT16(command, OneNANDState), + VMSTATE_UINT16_ARRAY(config, OneNANDState, 2), + VMSTATE_UINT16(status, OneNANDState), + VMSTATE_UINT16(intstatus, OneNANDState), + VMSTATE_UINT16(wpstatus, OneNANDState), + VMSTATE_INT32(secs_cur, OneNANDState), + VMSTATE_PARTIAL_VBUFFER(blockwp, OneNANDState, blocks), + VMSTATE_UINT8(ecc.cp, OneNANDState), + VMSTATE_UINT16_ARRAY(ecc.lp, OneNANDState, 2), + VMSTATE_UINT16(ecc.count, OneNANDState), + VMSTATE_BUFFER_POINTER_UNSAFE(otp, OneNANDState, 0, + ((64 + 2) << PAGE_SHIFT)), + VMSTATE_END_OF_LIST() + } +}; + +/* Hot reset (Reset OneNAND command) or warm reset (RP pin low) */ +static void onenand_reset(OneNANDState *s, int cold) +{ + memset(&s->addr, 0, sizeof(s->addr)); + s->command = 0; + s->count = 1; + s->bufaddr = 0; + s->config[0] = 0x40c0; + s->config[1] = 0x0000; + onenand_intr_update(s); + qemu_irq_raise(s->rdy); + s->status = 0x0000; + s->intstatus = cold ? 0x8080 : 0x8010; + s->unladdr[0] = 0; + s->unladdr[1] = 0; + s->wpstatus = 0x0002; + s->cycle = 0; + s->otpmode = 0; + s->blk_cur = s->blk; + s->current = s->image; + s->secs_cur = s->secs; + + if (cold) { + /* Lock the whole flash */ + memset(s->blockwp, ONEN_LOCK_LOCKED, s->blocks); + + if (s->blk_cur && blk_pread(s->blk_cur, 0, s->boot[0], + 8 << BDRV_SECTOR_BITS) < 0) { + hw_error("%s: Loading the BootRAM failed.\n", __func__); + } + } +} + +static void onenand_system_reset(DeviceState *dev) +{ + OneNANDState *s = ONE_NAND(dev); + + onenand_reset(s, 1); +} + +static inline int onenand_load_main(OneNANDState *s, int sec, int secn, + void *dest) +{ + assert(UINT32_MAX >> BDRV_SECTOR_BITS > sec); + assert(UINT32_MAX >> BDRV_SECTOR_BITS > secn); + if (s->blk_cur) { + return blk_pread(s->blk_cur, sec << BDRV_SECTOR_BITS, dest, + secn << BDRV_SECTOR_BITS) < 0; + } else if (sec + secn > s->secs_cur) { + return 1; + } + + memcpy(dest, s->current + (sec << 9), secn << 9); + + return 0; +} + +static inline int onenand_prog_main(OneNANDState *s, int sec, int secn, + void *src) +{ + int result = 0; + + if (secn > 0) { + uint32_t size = secn << BDRV_SECTOR_BITS; + uint32_t offset = sec << BDRV_SECTOR_BITS; + assert(UINT32_MAX >> BDRV_SECTOR_BITS > sec); + assert(UINT32_MAX >> BDRV_SECTOR_BITS > secn); + const uint8_t *sp = (const uint8_t *)src; + uint8_t *dp = 0; + if (s->blk_cur) { + dp = g_malloc(size); + if (!dp || blk_pread(s->blk_cur, offset, dp, size) < 0) { + result = 1; + } + } else { + if (sec + secn > s->secs_cur) { + result = 1; + } else { + dp = (uint8_t *)s->current + offset; + } + } + if (!result) { + uint32_t i; + for (i = 0; i < size; i++) { + dp[i] &= sp[i]; + } + if (s->blk_cur) { + result = blk_pwrite(s->blk_cur, offset, dp, size, 0) < 0; + } + } + if (dp && s->blk_cur) { + g_free(dp); + } + } + + return result; +} + +static inline int onenand_load_spare(OneNANDState *s, int sec, int secn, + void *dest) +{ + uint8_t buf[512]; + + if (s->blk_cur) { + uint32_t offset = (s->secs_cur + (sec >> 5)) << BDRV_SECTOR_BITS; + if (blk_pread(s->blk_cur, offset, buf, BDRV_SECTOR_SIZE) < 0) { + return 1; + } + memcpy(dest, buf + ((sec & 31) << 4), secn << 4); + } else if (sec + secn > s->secs_cur) { + return 1; + } else { + memcpy(dest, s->current + (s->secs_cur << 9) + (sec << 4), secn << 4); + } + + return 0; +} + +static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn, + void *src) +{ + int result = 0; + if (secn > 0) { + const uint8_t *sp = (const uint8_t *)src; + uint8_t *dp = 0, *dpp = 0; + uint32_t offset = (s->secs_cur + (sec >> 5)) << BDRV_SECTOR_BITS; + assert(UINT32_MAX >> BDRV_SECTOR_BITS > s->secs_cur + (sec >> 5)); + if (s->blk_cur) { + dp = g_malloc(512); + if (!dp + || blk_pread(s->blk_cur, offset, dp, BDRV_SECTOR_SIZE) < 0) { + result = 1; + } else { + dpp = dp + ((sec & 31) << 4); + } + } else { + if (sec + secn > s->secs_cur) { + result = 1; + } else { + dpp = s->current + (s->secs_cur << 9) + (sec << 4); + } + } + if (!result) { + uint32_t i; + for (i = 0; i < (secn << 4); i++) { + dpp[i] &= sp[i]; + } + if (s->blk_cur) { + result = blk_pwrite(s->blk_cur, offset, dp, + BDRV_SECTOR_SIZE, 0) < 0; + } + } + g_free(dp); + } + return result; +} + +static inline int onenand_erase(OneNANDState *s, int sec, int num) +{ + uint8_t *blankbuf, *tmpbuf; + + blankbuf = g_malloc(512); + tmpbuf = g_malloc(512); + memset(blankbuf, 0xff, 512); + for (; num > 0; num--, sec++) { + if (s->blk_cur) { + int erasesec = s->secs_cur + (sec >> 5); + if (blk_pwrite(s->blk_cur, sec << BDRV_SECTOR_BITS, blankbuf, + BDRV_SECTOR_SIZE, 0) < 0) { + goto fail; + } + if (blk_pread(s->blk_cur, erasesec << BDRV_SECTOR_BITS, tmpbuf, + BDRV_SECTOR_SIZE) < 0) { + goto fail; + } + memcpy(tmpbuf + ((sec & 31) << 4), blankbuf, 1 << 4); + if (blk_pwrite(s->blk_cur, erasesec << BDRV_SECTOR_BITS, tmpbuf, + BDRV_SECTOR_SIZE, 0) < 0) { + goto fail; + } + } else { + if (sec + 1 > s->secs_cur) { + goto fail; + } + memcpy(s->current + (sec << 9), blankbuf, 512); + memcpy(s->current + (s->secs_cur << 9) + (sec << 4), + blankbuf, 1 << 4); + } + } + + g_free(tmpbuf); + g_free(blankbuf); + return 0; + +fail: + g_free(tmpbuf); + g_free(blankbuf); + return 1; +} + +static void onenand_command(OneNANDState *s) +{ + int b; + int sec; + void *buf; +#define SETADDR(block, page) \ + sec = (s->addr[page] & 3) + \ + ((((s->addr[page] >> 2) & 0x3f) + \ + (((s->addr[block] & 0xfff) | \ + (s->addr[block] >> 15 ? \ + s->density_mask : 0)) << 6)) << (PAGE_SHIFT - 9)); +#define SETBUF_M() \ + buf = (s->bufaddr & 8) ? \ + s->data[(s->bufaddr >> 2) & 1][0] : s->boot[0]; \ + buf += (s->bufaddr & 3) << 9; +#define SETBUF_S() \ + buf = (s->bufaddr & 8) ? \ + s->data[(s->bufaddr >> 2) & 1][1] : s->boot[1]; \ + buf += (s->bufaddr & 3) << 4; + + switch (s->command) { + case 0x00: /* Load single/multiple sector data unit into buffer */ + SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) + + SETBUF_M() + if (onenand_load_main(s, sec, s->count, buf)) + s->status |= ONEN_ERR_CMD | ONEN_ERR_LOAD; + +#if 0 + SETBUF_S() + if (onenand_load_spare(s, sec, s->count, buf)) + s->status |= ONEN_ERR_CMD | ONEN_ERR_LOAD; +#endif + + /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages) + * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages) + * then we need two split the read/write into two chunks. + */ + s->intstatus |= ONEN_INT | ONEN_INT_LOAD; + break; + case 0x13: /* Load single/multiple spare sector into buffer */ + SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) + + SETBUF_S() + if (onenand_load_spare(s, sec, s->count, buf)) + s->status |= ONEN_ERR_CMD | ONEN_ERR_LOAD; + + /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages) + * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages) + * then we need two split the read/write into two chunks. + */ + s->intstatus |= ONEN_INT | ONEN_INT_LOAD; + break; + case 0x80: /* Program single/multiple sector data unit from buffer */ + SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) + + SETBUF_M() + if (onenand_prog_main(s, sec, s->count, buf)) + s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG; + +#if 0 + SETBUF_S() + if (onenand_prog_spare(s, sec, s->count, buf)) + s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG; +#endif + + /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages) + * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages) + * then we need two split the read/write into two chunks. + */ + s->intstatus |= ONEN_INT | ONEN_INT_PROG; + break; + case 0x1a: /* Program single/multiple spare area sector from buffer */ + SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) + + SETBUF_S() + if (onenand_prog_spare(s, sec, s->count, buf)) + s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG; + + /* TODO: if (s->bufaddr & 3) + s->count was > 4 (2k-pages) + * or if (s->bufaddr & 1) + s->count was > 2 (1k-pages) + * then we need two split the read/write into two chunks. + */ + s->intstatus |= ONEN_INT | ONEN_INT_PROG; + break; + case 0x1b: /* Copy-back program */ + SETBUF_S() + + SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) + if (onenand_load_main(s, sec, s->count, buf)) + s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG; + + SETADDR(ONEN_BUF_DEST_BLOCK, ONEN_BUF_DEST_PAGE) + if (onenand_prog_main(s, sec, s->count, buf)) + s->status |= ONEN_ERR_CMD | ONEN_ERR_PROG; + + /* TODO: spare areas */ + + s->intstatus |= ONEN_INT | ONEN_INT_PROG; + break; + + case 0x23: /* Unlock NAND array block(s) */ + s->intstatus |= ONEN_INT; + + /* XXX the previous (?) area should be locked automatically */ + for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) { + if (b >= s->blocks) { + s->status |= ONEN_ERR_CMD; + break; + } + if (s->blockwp[b] == ONEN_LOCK_LOCKTIGHTEN) + break; + + s->wpstatus = s->blockwp[b] = ONEN_LOCK_UNLOCKED; + } + break; + case 0x27: /* Unlock All NAND array blocks */ + s->intstatus |= ONEN_INT; + + for (b = 0; b < s->blocks; b ++) { + if (s->blockwp[b] == ONEN_LOCK_LOCKTIGHTEN) + break; + + s->wpstatus = s->blockwp[b] = ONEN_LOCK_UNLOCKED; + } + break; + + case 0x2a: /* Lock NAND array block(s) */ + s->intstatus |= ONEN_INT; + + for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) { + if (b >= s->blocks) { + s->status |= ONEN_ERR_CMD; + break; + } + if (s->blockwp[b] == ONEN_LOCK_LOCKTIGHTEN) + break; + + s->wpstatus = s->blockwp[b] = ONEN_LOCK_LOCKED; + } + break; + case 0x2c: /* Lock-tight NAND array block(s) */ + s->intstatus |= ONEN_INT; + + for (b = s->unladdr[0]; b <= s->unladdr[1]; b ++) { + if (b >= s->blocks) { + s->status |= ONEN_ERR_CMD; + break; + } + if (s->blockwp[b] == ONEN_LOCK_UNLOCKED) + continue; + + s->wpstatus = s->blockwp[b] = ONEN_LOCK_LOCKTIGHTEN; + } + break; + + case 0x71: /* Erase-Verify-Read */ + s->intstatus |= ONEN_INT; + break; + case 0x95: /* Multi-block erase */ + qemu_irq_pulse(s->intr); + /* Fall through. */ + case 0x94: /* Block erase */ + sec = ((s->addr[ONEN_BUF_BLOCK] & 0xfff) | + (s->addr[ONEN_BUF_BLOCK] >> 15 ? s->density_mask : 0)) + << (BLOCK_SHIFT - 9); + if (onenand_erase(s, sec, 1 << (BLOCK_SHIFT - 9))) + s->status |= ONEN_ERR_CMD | ONEN_ERR_ERASE; + + s->intstatus |= ONEN_INT | ONEN_INT_ERASE; + break; + case 0xb0: /* Erase suspend */ + break; + case 0x30: /* Erase resume */ + s->intstatus |= ONEN_INT | ONEN_INT_ERASE; + break; + + case 0xf0: /* Reset NAND Flash core */ + onenand_reset(s, 0); + break; + case 0xf3: /* Reset OneNAND */ + onenand_reset(s, 0); + break; + + case 0x65: /* OTP Access */ + s->intstatus |= ONEN_INT; + s->blk_cur = NULL; + s->current = s->otp; + s->secs_cur = 1 << (BLOCK_SHIFT - 9); + s->addr[ONEN_BUF_BLOCK] = 0; + s->otpmode = 1; + break; + + default: + s->status |= ONEN_ERR_CMD; + s->intstatus |= ONEN_INT; + qemu_log_mask(LOG_GUEST_ERROR, "unknown OneNAND command %x\n", + s->command); + } + + onenand_intr_update(s); +} + +static uint64_t onenand_read(void *opaque, hwaddr addr, + unsigned size) +{ + OneNANDState *s = (OneNANDState *) opaque; + int offset = addr >> s->shift; + + switch (offset) { + case 0x0000 ... 0xbffe: + return lduw_le_p(s->boot[0] + addr); + + case 0xf000: /* Manufacturer ID */ + return s->id.man; + case 0xf001: /* Device ID */ + return s->id.dev; + case 0xf002: /* Version ID */ + return s->id.ver; + /* TODO: get the following values from a real chip! */ + case 0xf003: /* Data Buffer size */ + return 1 << PAGE_SHIFT; + case 0xf004: /* Boot Buffer size */ + return 0x200; + case 0xf005: /* Amount of buffers */ + return 1 | (2 << 8); + case 0xf006: /* Technology */ + return 0; + + case 0xf100 ... 0xf107: /* Start addresses */ + return s->addr[offset - 0xf100]; + + case 0xf200: /* Start buffer */ + return (s->bufaddr << 8) | ((s->count - 1) & (1 << (PAGE_SHIFT - 10))); + + case 0xf220: /* Command */ + return s->command; + case 0xf221: /* System Configuration 1 */ + return s->config[0] & 0xffe0; + case 0xf222: /* System Configuration 2 */ + return s->config[1]; + + case 0xf240: /* Controller Status */ + return s->status; + case 0xf241: /* Interrupt */ + return s->intstatus; + case 0xf24c: /* Unlock Start Block Address */ + return s->unladdr[0]; + case 0xf24d: /* Unlock End Block Address */ + return s->unladdr[1]; + case 0xf24e: /* Write Protection Status */ + return s->wpstatus; + + case 0xff00: /* ECC Status */ + return 0x00; + case 0xff01: /* ECC Result of main area data */ + case 0xff02: /* ECC Result of spare area data */ + case 0xff03: /* ECC Result of main area data */ + case 0xff04: /* ECC Result of spare area data */ + qemu_log_mask(LOG_UNIMP, + "onenand: ECC result registers unimplemented\n"); + return 0x0000; + } + + qemu_log_mask(LOG_GUEST_ERROR, "read of unknown OneNAND register 0x%x\n", + offset); + return 0; +} + +static void onenand_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + OneNANDState *s = (OneNANDState *) opaque; + int offset = addr >> s->shift; + int sec; + + switch (offset) { + case 0x0000 ... 0x01ff: + case 0x8000 ... 0x800f: + if (s->cycle) { + s->cycle = 0; + + if (value == 0x0000) { + SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) + onenand_load_main(s, sec, + 1 << (PAGE_SHIFT - 9), s->data[0][0]); + s->addr[ONEN_BUF_PAGE] += 4; + s->addr[ONEN_BUF_PAGE] &= 0xff; + } + break; + } + + switch (value) { + case 0x00f0: /* Reset OneNAND */ + onenand_reset(s, 0); + break; + + case 0x00e0: /* Load Data into Buffer */ + s->cycle = 1; + break; + + case 0x0090: /* Read Identification Data */ + memset(s->boot[0], 0, 3 << s->shift); + s->boot[0][0 << s->shift] = s->id.man & 0xff; + s->boot[0][1 << s->shift] = s->id.dev & 0xff; + s->boot[0][2 << s->shift] = s->wpstatus & 0xff; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "unknown OneNAND boot command %" PRIx64 "\n", + value); + } + break; + + case 0xf100 ... 0xf107: /* Start addresses */ + s->addr[offset - 0xf100] = value; + break; + + case 0xf200: /* Start buffer */ + s->bufaddr = (value >> 8) & 0xf; + if (PAGE_SHIFT == 11) + s->count = (value & 3) ?: 4; + else if (PAGE_SHIFT == 10) + s->count = (value & 1) ?: 2; + break; + + case 0xf220: /* Command */ + if (s->intstatus & (1 << 15)) + break; + s->command = value; + onenand_command(s); + break; + case 0xf221: /* System Configuration 1 */ + s->config[0] = value; + onenand_intr_update(s); + qemu_set_irq(s->rdy, (s->config[0] >> 7) & 1); + break; + case 0xf222: /* System Configuration 2 */ + s->config[1] = value; + break; + + case 0xf241: /* Interrupt */ + s->intstatus &= value; + if ((1 << 15) & ~s->intstatus) + s->status &= ~(ONEN_ERR_CMD | ONEN_ERR_ERASE | + ONEN_ERR_PROG | ONEN_ERR_LOAD); + onenand_intr_update(s); + break; + case 0xf24c: /* Unlock Start Block Address */ + s->unladdr[0] = value & (s->blocks - 1); + /* For some reason we have to set the end address to by default + * be same as start because the software forgets to write anything + * in there. */ + s->unladdr[1] = value & (s->blocks - 1); + break; + case 0xf24d: /* Unlock End Block Address */ + s->unladdr[1] = value & (s->blocks - 1); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "write to unknown OneNAND register 0x%x\n", + offset); + } +} + +static const MemoryRegionOps onenand_ops = { + .read = onenand_read, + .write = onenand_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void onenand_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + OneNANDState *s = ONE_NAND(dev); + uint32_t size = 1 << (24 + ((s->id.dev >> 4) & 7)); + void *ram; + Error *local_err = NULL; + + s->base = (hwaddr)-1; + s->rdy = NULL; + s->blocks = size >> BLOCK_SHIFT; + s->secs = size >> 9; + s->blockwp = g_malloc(s->blocks); + s->density_mask = (s->id.dev & 0x08) + ? (1 << (6 + ((s->id.dev >> 4) & 7))) : 0; + memory_region_init_io(&s->iomem, OBJECT(s), &onenand_ops, s, "onenand", + 0x10000 << s->shift); + if (!s->blk) { + s->image = memset(g_malloc(size + (size >> 5)), + 0xff, size + (size >> 5)); + } else { + if (!blk_supports_write_perm(s->blk)) { + error_setg(errp, "Can't use a read-only drive"); + return; + } + blk_set_perm(s->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, + BLK_PERM_ALL, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + s->blk_cur = s->blk; + } + s->otp = memset(g_malloc((64 + 2) << PAGE_SHIFT), + 0xff, (64 + 2) << PAGE_SHIFT); + memory_region_init_ram_nomigrate(&s->ram, OBJECT(s), "onenand.ram", + 0xc000 << s->shift, &error_fatal); + vmstate_register_ram_global(&s->ram); + ram = memory_region_get_ram_ptr(&s->ram); + s->boot[0] = ram + (0x0000 << s->shift); + s->boot[1] = ram + (0x8000 << s->shift); + s->data[0][0] = ram + ((0x0200 + (0 << (PAGE_SHIFT - 1))) << s->shift); + s->data[0][1] = ram + ((0x8010 + (0 << (PAGE_SHIFT - 6))) << s->shift); + s->data[1][0] = ram + ((0x0200 + (1 << (PAGE_SHIFT - 1))) << s->shift); + s->data[1][1] = ram + ((0x8010 + (1 << (PAGE_SHIFT - 6))) << s->shift); + onenand_mem_setup(s); + sysbus_init_irq(sbd, &s->intr); + sysbus_init_mmio(sbd, &s->container); + vmstate_register(VMSTATE_IF(dev), + ((s->shift & 0x7f) << 24) + | ((s->id.man & 0xff) << 16) + | ((s->id.dev & 0xff) << 8) + | (s->id.ver & 0xff), + &vmstate_onenand, s); +} + +static Property onenand_properties[] = { + DEFINE_PROP_UINT16("manufacturer_id", OneNANDState, id.man, 0), + DEFINE_PROP_UINT16("device_id", OneNANDState, id.dev, 0), + DEFINE_PROP_UINT16("version_id", OneNANDState, id.ver, 0), + DEFINE_PROP_INT32("shift", OneNANDState, shift, 0), + DEFINE_PROP_DRIVE("drive", OneNANDState, blk), + DEFINE_PROP_END_OF_LIST(), +}; + +static void onenand_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = onenand_realize; + dc->reset = onenand_system_reset; + device_class_set_props(dc, onenand_properties); +} + +static const TypeInfo onenand_info = { + .name = TYPE_ONE_NAND, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(OneNANDState), + .class_init = onenand_class_init, +}; + +static void onenand_register_types(void) +{ + type_register_static(&onenand_info); +} + +void *onenand_raw_otp(DeviceState *onenand_device) +{ + OneNANDState *s = ONE_NAND(onenand_device); + + return s->otp; +} + +type_init(onenand_register_types) diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c new file mode 100644 index 000000000..81f9f971d --- /dev/null +++ b/hw/block/pflash_cfi01.c @@ -0,0 +1,1043 @@ +/* + * CFI parallel flash with Intel command set emulation + * + * Copyright (c) 2006 Thorsten Zitterell + * Copyright (c) 2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * For now, this code can emulate flashes of 1, 2 or 4 bytes width. + * Supported commands/modes are: + * - flash read + * - flash write + * - flash ID read + * - sector erase + * - CFI queries + * + * It does not support timings + * It does not support flash interleaving + * It does not implement software data protection as found in many real chips + * It does not implement erase suspend/resume commands + * It does not implement multiple sectors erase + * + * It does not implement much more ... + */ + +#include "qemu/osdep.h" +#include "hw/block/block.h" +#include "hw/block/flash.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "sysemu/block-backend.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qemu/host-utils.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "sysemu/blockdev.h" +#include "sysemu/runstate.h" +#include "trace.h" + +#define PFLASH_BE 0 +#define PFLASH_SECURE 1 + +struct PFlashCFI01 { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + BlockBackend *blk; + uint32_t nb_blocs; + uint64_t sector_len; + uint8_t bank_width; + uint8_t device_width; /* If 0, device width not specified. */ + uint8_t max_device_width; /* max device width in bytes */ + uint32_t features; + uint8_t wcycle; /* if 0, the flash is read normally */ + bool ro; + uint8_t cmd; + uint8_t status; + uint16_t ident0; + uint16_t ident1; + uint16_t ident2; + uint16_t ident3; + uint8_t cfi_table[0x52]; + uint64_t counter; + unsigned int writeblock_size; + MemoryRegion mem; + char *name; + void *storage; + VMChangeStateEntry *vmstate; + bool old_multiple_chip_handling; +}; + +static int pflash_post_load(void *opaque, int version_id); + +static const VMStateDescription vmstate_pflash = { + .name = "pflash_cfi01", + .version_id = 1, + .minimum_version_id = 1, + .post_load = pflash_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(wcycle, PFlashCFI01), + VMSTATE_UINT8(cmd, PFlashCFI01), + VMSTATE_UINT8(status, PFlashCFI01), + VMSTATE_UINT64(counter, PFlashCFI01), + VMSTATE_END_OF_LIST() + } +}; + +/* + * Perform a CFI query based on the bank width of the flash. + * If this code is called we know we have a device_width set for + * this flash. + */ +static uint32_t pflash_cfi_query(PFlashCFI01 *pfl, hwaddr offset) +{ + int i; + uint32_t resp = 0; + hwaddr boff; + + /* + * Adjust incoming offset to match expected device-width + * addressing. CFI query addresses are always specified in terms of + * the maximum supported width of the device. This means that x8 + * devices and x8/x16 devices in x8 mode behave differently. For + * devices that are not used at their max width, we will be + * provided with addresses that use higher address bits than + * expected (based on the max width), so we will shift them lower + * so that they will match the addresses used when + * device_width==max_device_width. + */ + boff = offset >> (ctz32(pfl->bank_width) + + ctz32(pfl->max_device_width) - ctz32(pfl->device_width)); + + if (boff >= sizeof(pfl->cfi_table)) { + return 0; + } + /* + * Now we will construct the CFI response generated by a single + * device, then replicate that for all devices that make up the + * bus. For wide parts used in x8 mode, CFI query responses + * are different than native byte-wide parts. + */ + resp = pfl->cfi_table[boff]; + if (pfl->device_width != pfl->max_device_width) { + /* The only case currently supported is x8 mode for a + * wider part. + */ + if (pfl->device_width != 1 || pfl->bank_width > 4) { + trace_pflash_unsupported_device_configuration(pfl->name, + pfl->device_width, pfl->max_device_width); + return 0; + } + /* CFI query data is repeated, rather than zero padded for + * wide devices used in x8 mode. + */ + for (i = 1; i < pfl->max_device_width; i++) { + resp = deposit32(resp, 8 * i, 8, pfl->cfi_table[boff]); + } + } + /* Replicate responses for each device in bank. */ + if (pfl->device_width < pfl->bank_width) { + for (i = pfl->device_width; + i < pfl->bank_width; i += pfl->device_width) { + resp = deposit32(resp, 8 * i, 8 * pfl->device_width, resp); + } + } + + return resp; +} + + + +/* Perform a device id query based on the bank width of the flash. */ +static uint32_t pflash_devid_query(PFlashCFI01 *pfl, hwaddr offset) +{ + int i; + uint32_t resp; + hwaddr boff; + + /* + * Adjust incoming offset to match expected device-width + * addressing. Device ID read addresses are always specified in + * terms of the maximum supported width of the device. This means + * that x8 devices and x8/x16 devices in x8 mode behave + * differently. For devices that are not used at their max width, + * we will be provided with addresses that use higher address bits + * than expected (based on the max width), so we will shift them + * lower so that they will match the addresses used when + * device_width==max_device_width. + */ + boff = offset >> (ctz32(pfl->bank_width) + + ctz32(pfl->max_device_width) - ctz32(pfl->device_width)); + + /* + * Mask off upper bits which may be used in to query block + * or sector lock status at other addresses. + * Offsets 2/3 are block lock status, is not emulated. + */ + switch (boff & 0xFF) { + case 0: + resp = pfl->ident0; + trace_pflash_manufacturer_id(pfl->name, resp); + break; + case 1: + resp = pfl->ident1; + trace_pflash_device_id(pfl->name, resp); + break; + default: + trace_pflash_device_info(pfl->name, offset); + return 0; + } + /* Replicate responses for each device in bank. */ + if (pfl->device_width < pfl->bank_width) { + for (i = pfl->device_width; + i < pfl->bank_width; i += pfl->device_width) { + resp = deposit32(resp, 8 * i, 8 * pfl->device_width, resp); + } + } + + return resp; +} + +static uint32_t pflash_data_read(PFlashCFI01 *pfl, hwaddr offset, + int width, int be) +{ + uint8_t *p; + uint32_t ret; + + p = pfl->storage; + switch (width) { + case 1: + ret = p[offset]; + break; + case 2: + if (be) { + ret = p[offset] << 8; + ret |= p[offset + 1]; + } else { + ret = p[offset]; + ret |= p[offset + 1] << 8; + } + break; + case 4: + if (be) { + ret = p[offset] << 24; + ret |= p[offset + 1] << 16; + ret |= p[offset + 2] << 8; + ret |= p[offset + 3]; + } else { + ret = p[offset]; + ret |= p[offset + 1] << 8; + ret |= p[offset + 2] << 16; + ret |= p[offset + 3] << 24; + } + break; + default: + abort(); + } + trace_pflash_data_read(pfl->name, offset, width, ret); + return ret; +} + +static uint32_t pflash_read(PFlashCFI01 *pfl, hwaddr offset, + int width, int be) +{ + hwaddr boff; + uint32_t ret; + + ret = -1; + switch (pfl->cmd) { + default: + /* This should never happen : reset state & treat it as a read */ + trace_pflash_read_unknown_state(pfl->name, pfl->cmd); + pfl->wcycle = 0; + /* + * The command 0x00 is not assigned by the CFI open standard, + * but QEMU historically uses it for the READ_ARRAY command (0xff). + */ + pfl->cmd = 0x00; + /* fall through to read code */ + case 0x00: /* This model reset value for READ_ARRAY (not CFI compliant) */ + /* Flash area read */ + ret = pflash_data_read(pfl, offset, width, be); + break; + case 0x10: /* Single byte program */ + case 0x20: /* Block erase */ + case 0x28: /* Block erase */ + case 0x40: /* single byte program */ + case 0x50: /* Clear status register */ + case 0x60: /* Block /un)lock */ + case 0x70: /* Status Register */ + case 0xe8: /* Write block */ + /* + * Status register read. Return status from each device in + * bank. + */ + ret = pfl->status; + if (pfl->device_width && width > pfl->device_width) { + int shift = pfl->device_width * 8; + while (shift + pfl->device_width * 8 <= width * 8) { + ret |= pfl->status << shift; + shift += pfl->device_width * 8; + } + } else if (!pfl->device_width && width > 2) { + /* + * Handle 32 bit flash cases where device width is not + * set. (Existing behavior before device width added.) + */ + ret |= pfl->status << 16; + } + trace_pflash_read_status(pfl->name, ret); + break; + case 0x90: + if (!pfl->device_width) { + /* Preserve old behavior if device width not specified */ + boff = offset & 0xFF; + if (pfl->bank_width == 2) { + boff = boff >> 1; + } else if (pfl->bank_width == 4) { + boff = boff >> 2; + } + + switch (boff) { + case 0: + ret = pfl->ident0 << 8 | pfl->ident1; + trace_pflash_manufacturer_id(pfl->name, ret); + break; + case 1: + ret = pfl->ident2 << 8 | pfl->ident3; + trace_pflash_device_id(pfl->name, ret); + break; + default: + trace_pflash_device_info(pfl->name, boff); + ret = 0; + break; + } + } else { + /* + * If we have a read larger than the bank_width, combine multiple + * manufacturer/device ID queries into a single response. + */ + int i; + for (i = 0; i < width; i += pfl->bank_width) { + ret = deposit32(ret, i * 8, pfl->bank_width * 8, + pflash_devid_query(pfl, + offset + i * pfl->bank_width)); + } + } + break; + case 0x98: /* Query mode */ + if (!pfl->device_width) { + /* Preserve old behavior if device width not specified */ + boff = offset & 0xFF; + if (pfl->bank_width == 2) { + boff = boff >> 1; + } else if (pfl->bank_width == 4) { + boff = boff >> 2; + } + + if (boff < sizeof(pfl->cfi_table)) { + ret = pfl->cfi_table[boff]; + } else { + ret = 0; + } + } else { + /* + * If we have a read larger than the bank_width, combine multiple + * CFI queries into a single response. + */ + int i; + for (i = 0; i < width; i += pfl->bank_width) { + ret = deposit32(ret, i * 8, pfl->bank_width * 8, + pflash_cfi_query(pfl, + offset + i * pfl->bank_width)); + } + } + + break; + } + trace_pflash_io_read(pfl->name, offset, width, ret, pfl->cmd, pfl->wcycle); + + return ret; +} + +/* update flash content on disk */ +static void pflash_update(PFlashCFI01 *pfl, int offset, + int size) +{ + int offset_end; + int ret; + if (pfl->blk) { + offset_end = offset + size; + /* widen to sector boundaries */ + offset = QEMU_ALIGN_DOWN(offset, BDRV_SECTOR_SIZE); + offset_end = QEMU_ALIGN_UP(offset_end, BDRV_SECTOR_SIZE); + ret = blk_pwrite(pfl->blk, offset, pfl->storage + offset, + offset_end - offset, 0); + if (ret < 0) { + /* TODO set error bit in status */ + error_report("Could not update PFLASH: %s", strerror(-ret)); + } + } +} + +static inline void pflash_data_write(PFlashCFI01 *pfl, hwaddr offset, + uint32_t value, int width, int be) +{ + uint8_t *p = pfl->storage; + + trace_pflash_data_write(pfl->name, offset, width, value, pfl->counter); + switch (width) { + case 1: + p[offset] = value; + break; + case 2: + if (be) { + p[offset] = value >> 8; + p[offset + 1] = value; + } else { + p[offset] = value; + p[offset + 1] = value >> 8; + } + break; + case 4: + if (be) { + p[offset] = value >> 24; + p[offset + 1] = value >> 16; + p[offset + 2] = value >> 8; + p[offset + 3] = value; + } else { + p[offset] = value; + p[offset + 1] = value >> 8; + p[offset + 2] = value >> 16; + p[offset + 3] = value >> 24; + } + break; + } + +} + +static void pflash_write(PFlashCFI01 *pfl, hwaddr offset, + uint32_t value, int width, int be) +{ + uint8_t *p; + uint8_t cmd; + + cmd = value; + + trace_pflash_io_write(pfl->name, offset, width, value, pfl->wcycle); + if (!pfl->wcycle) { + /* Set the device in I/O access mode */ + memory_region_rom_device_set_romd(&pfl->mem, false); + } + + switch (pfl->wcycle) { + case 0: + /* read mode */ + switch (cmd) { + case 0x00: /* This model reset value for READ_ARRAY (not CFI) */ + goto mode_read_array; + case 0x10: /* Single Byte Program */ + case 0x40: /* Single Byte Program */ + trace_pflash_write(pfl->name, "single byte program (0)"); + break; + case 0x20: /* Block erase */ + p = pfl->storage; + offset &= ~(pfl->sector_len - 1); + + trace_pflash_write_block_erase(pfl->name, offset, pfl->sector_len); + + if (!pfl->ro) { + memset(p + offset, 0xff, pfl->sector_len); + pflash_update(pfl, offset, pfl->sector_len); + } else { + pfl->status |= 0x20; /* Block erase error */ + } + pfl->status |= 0x80; /* Ready! */ + break; + case 0x50: /* Clear status bits */ + trace_pflash_write(pfl->name, "clear status bits"); + pfl->status = 0x0; + goto mode_read_array; + case 0x60: /* Block (un)lock */ + trace_pflash_write(pfl->name, "block unlock"); + break; + case 0x70: /* Status Register */ + trace_pflash_write(pfl->name, "read status register"); + pfl->cmd = cmd; + return; + case 0x90: /* Read Device ID */ + trace_pflash_write(pfl->name, "read device information"); + pfl->cmd = cmd; + return; + case 0x98: /* CFI query */ + trace_pflash_write(pfl->name, "CFI query"); + break; + case 0xe8: /* Write to buffer */ + trace_pflash_write(pfl->name, "write to buffer"); + /* FIXME should save @offset, @width for case 1+ */ + qemu_log_mask(LOG_UNIMP, + "%s: Write to buffer emulation is flawed\n", + __func__); + pfl->status |= 0x80; /* Ready! */ + break; + case 0xf0: /* Probe for AMD flash */ + trace_pflash_write(pfl->name, "probe for AMD flash"); + goto mode_read_array; + case 0xff: /* Read Array */ + trace_pflash_write(pfl->name, "read array mode"); + goto mode_read_array; + default: + goto error_flash; + } + pfl->wcycle++; + pfl->cmd = cmd; + break; + case 1: + switch (pfl->cmd) { + case 0x10: /* Single Byte Program */ + case 0x40: /* Single Byte Program */ + trace_pflash_write(pfl->name, "single byte program (1)"); + if (!pfl->ro) { + pflash_data_write(pfl, offset, value, width, be); + pflash_update(pfl, offset, width); + } else { + pfl->status |= 0x10; /* Programming error */ + } + pfl->status |= 0x80; /* Ready! */ + pfl->wcycle = 0; + break; + case 0x20: /* Block erase */ + case 0x28: + if (cmd == 0xd0) { /* confirm */ + pfl->wcycle = 0; + pfl->status |= 0x80; + } else if (cmd == 0xff) { /* Read Array */ + goto mode_read_array; + } else + goto error_flash; + + break; + case 0xe8: + /* + * Mask writeblock size based on device width, or bank width if + * device width not specified. + */ + /* FIXME check @offset, @width */ + if (pfl->device_width) { + value = extract32(value, 0, pfl->device_width * 8); + } else { + value = extract32(value, 0, pfl->bank_width * 8); + } + trace_pflash_write_block(pfl->name, value); + pfl->counter = value; + pfl->wcycle++; + break; + case 0x60: + if (cmd == 0xd0) { + pfl->wcycle = 0; + pfl->status |= 0x80; + } else if (cmd == 0x01) { + pfl->wcycle = 0; + pfl->status |= 0x80; + } else if (cmd == 0xff) { /* Read Array */ + goto mode_read_array; + } else { + trace_pflash_write(pfl->name, "unknown (un)locking command"); + goto mode_read_array; + } + break; + case 0x98: + if (cmd == 0xff) { /* Read Array */ + goto mode_read_array; + } else { + trace_pflash_write(pfl->name, "leaving query mode"); + } + break; + default: + goto error_flash; + } + break; + case 2: + switch (pfl->cmd) { + case 0xe8: /* Block write */ + /* FIXME check @offset, @width */ + if (!pfl->ro) { + /* + * FIXME writing straight to memory is *wrong*. We + * should write to a buffer, and flush it to memory + * only on confirm command (see below). + */ + pflash_data_write(pfl, offset, value, width, be); + } else { + pfl->status |= 0x10; /* Programming error */ + } + + pfl->status |= 0x80; + + if (!pfl->counter) { + hwaddr mask = pfl->writeblock_size - 1; + mask = ~mask; + + trace_pflash_write(pfl->name, "block write finished"); + pfl->wcycle++; + if (!pfl->ro) { + /* Flush the entire write buffer onto backing storage. */ + /* FIXME premature! */ + pflash_update(pfl, offset & mask, pfl->writeblock_size); + } else { + pfl->status |= 0x10; /* Programming error */ + } + } + + pfl->counter--; + break; + default: + goto error_flash; + } + break; + case 3: /* Confirm mode */ + switch (pfl->cmd) { + case 0xe8: /* Block write */ + if (cmd == 0xd0) { + /* FIXME this is where we should write out the buffer */ + pfl->wcycle = 0; + pfl->status |= 0x80; + } else { + qemu_log_mask(LOG_UNIMP, + "%s: Aborting write to buffer not implemented," + " the data is already written to storage!\n" + "Flash device reset into READ mode.\n", + __func__); + goto mode_read_array; + } + break; + default: + goto error_flash; + } + break; + default: + /* Should never happen */ + trace_pflash_write(pfl->name, "invalid write state"); + goto mode_read_array; + } + return; + + error_flash: + qemu_log_mask(LOG_UNIMP, "%s: Unimplemented flash cmd sequence " + "(offset " TARGET_FMT_plx ", wcycle 0x%x cmd 0x%x value 0x%x)" + "\n", __func__, offset, pfl->wcycle, pfl->cmd, value); + + mode_read_array: + trace_pflash_mode_read_array(pfl->name); + memory_region_rom_device_set_romd(&pfl->mem, true); + pfl->wcycle = 0; + pfl->cmd = 0x00; /* This model reset value for READ_ARRAY (not CFI) */ +} + + +static MemTxResult pflash_mem_read_with_attrs(void *opaque, hwaddr addr, uint64_t *value, + unsigned len, MemTxAttrs attrs) +{ + PFlashCFI01 *pfl = opaque; + bool be = !!(pfl->features & (1 << PFLASH_BE)); + + if ((pfl->features & (1 << PFLASH_SECURE)) && !attrs.secure) { + *value = pflash_data_read(opaque, addr, len, be); + } else { + *value = pflash_read(opaque, addr, len, be); + } + return MEMTX_OK; +} + +static MemTxResult pflash_mem_write_with_attrs(void *opaque, hwaddr addr, uint64_t value, + unsigned len, MemTxAttrs attrs) +{ + PFlashCFI01 *pfl = opaque; + bool be = !!(pfl->features & (1 << PFLASH_BE)); + + if ((pfl->features & (1 << PFLASH_SECURE)) && !attrs.secure) { + return MEMTX_ERROR; + } else { + pflash_write(opaque, addr, value, len, be); + return MEMTX_OK; + } +} + +static const MemoryRegionOps pflash_cfi01_ops = { + .read_with_attrs = pflash_mem_read_with_attrs, + .write_with_attrs = pflash_mem_write_with_attrs, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pflash_cfi01_fill_cfi_table(PFlashCFI01 *pfl) +{ + uint64_t blocks_per_device, sector_len_per_device, device_len; + int num_devices; + + /* + * These are only used to expose the parameters of each device + * in the cfi_table[]. + */ + num_devices = pfl->device_width ? (pfl->bank_width / pfl->device_width) : 1; + if (pfl->old_multiple_chip_handling) { + blocks_per_device = pfl->nb_blocs / num_devices; + sector_len_per_device = pfl->sector_len; + } else { + blocks_per_device = pfl->nb_blocs; + sector_len_per_device = pfl->sector_len / num_devices; + } + device_len = sector_len_per_device * blocks_per_device; + + /* Hardcoded CFI table */ + /* Standard "QRY" string */ + pfl->cfi_table[0x10] = 'Q'; + pfl->cfi_table[0x11] = 'R'; + pfl->cfi_table[0x12] = 'Y'; + /* Command set (Intel) */ + pfl->cfi_table[0x13] = 0x01; + pfl->cfi_table[0x14] = 0x00; + /* Primary extended table address (none) */ + pfl->cfi_table[0x15] = 0x31; + pfl->cfi_table[0x16] = 0x00; + /* Alternate command set (none) */ + pfl->cfi_table[0x17] = 0x00; + pfl->cfi_table[0x18] = 0x00; + /* Alternate extended table (none) */ + pfl->cfi_table[0x19] = 0x00; + pfl->cfi_table[0x1A] = 0x00; + /* Vcc min */ + pfl->cfi_table[0x1B] = 0x45; + /* Vcc max */ + pfl->cfi_table[0x1C] = 0x55; + /* Vpp min (no Vpp pin) */ + pfl->cfi_table[0x1D] = 0x00; + /* Vpp max (no Vpp pin) */ + pfl->cfi_table[0x1E] = 0x00; + /* Reserved */ + pfl->cfi_table[0x1F] = 0x07; + /* Timeout for min size buffer write */ + pfl->cfi_table[0x20] = 0x07; + /* Typical timeout for block erase */ + pfl->cfi_table[0x21] = 0x0a; + /* Typical timeout for full chip erase (4096 ms) */ + pfl->cfi_table[0x22] = 0x00; + /* Reserved */ + pfl->cfi_table[0x23] = 0x04; + /* Max timeout for buffer write */ + pfl->cfi_table[0x24] = 0x04; + /* Max timeout for block erase */ + pfl->cfi_table[0x25] = 0x04; + /* Max timeout for chip erase */ + pfl->cfi_table[0x26] = 0x00; + /* Device size */ + pfl->cfi_table[0x27] = ctz32(device_len); /* + 1; */ + /* Flash device interface (8 & 16 bits) */ + pfl->cfi_table[0x28] = 0x02; + pfl->cfi_table[0x29] = 0x00; + /* Max number of bytes in multi-bytes write */ + if (pfl->bank_width == 1) { + pfl->cfi_table[0x2A] = 0x08; + } else { + pfl->cfi_table[0x2A] = 0x0B; + } + pfl->writeblock_size = 1 << pfl->cfi_table[0x2A]; + if (!pfl->old_multiple_chip_handling && num_devices > 1) { + pfl->writeblock_size *= num_devices; + } + + pfl->cfi_table[0x2B] = 0x00; + /* Number of erase block regions (uniform) */ + pfl->cfi_table[0x2C] = 0x01; + /* Erase block region 1 */ + pfl->cfi_table[0x2D] = blocks_per_device - 1; + pfl->cfi_table[0x2E] = (blocks_per_device - 1) >> 8; + pfl->cfi_table[0x2F] = sector_len_per_device >> 8; + pfl->cfi_table[0x30] = sector_len_per_device >> 16; + + /* Extended */ + pfl->cfi_table[0x31] = 'P'; + pfl->cfi_table[0x32] = 'R'; + pfl->cfi_table[0x33] = 'I'; + + pfl->cfi_table[0x34] = '1'; + pfl->cfi_table[0x35] = '0'; + + pfl->cfi_table[0x36] = 0x00; + pfl->cfi_table[0x37] = 0x00; + pfl->cfi_table[0x38] = 0x00; + pfl->cfi_table[0x39] = 0x00; + + pfl->cfi_table[0x3a] = 0x00; + + pfl->cfi_table[0x3b] = 0x00; + pfl->cfi_table[0x3c] = 0x00; + + pfl->cfi_table[0x3f] = 0x01; /* Number of protection fields */ +} + +static void pflash_cfi01_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + PFlashCFI01 *pfl = PFLASH_CFI01(dev); + uint64_t total_len; + int ret; + + if (pfl->sector_len == 0) { + error_setg(errp, "attribute \"sector-length\" not specified or zero."); + return; + } + if (pfl->nb_blocs == 0) { + error_setg(errp, "attribute \"num-blocks\" not specified or zero."); + return; + } + if (pfl->name == NULL) { + error_setg(errp, "attribute \"name\" not specified."); + return; + } + + total_len = pfl->sector_len * pfl->nb_blocs; + + memory_region_init_rom_device( + &pfl->mem, OBJECT(dev), + &pflash_cfi01_ops, + pfl, + pfl->name, total_len, errp); + if (*errp) { + return; + } + + pfl->storage = memory_region_get_ram_ptr(&pfl->mem); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &pfl->mem); + + if (pfl->blk) { + uint64_t perm; + pfl->ro = !blk_supports_write_perm(pfl->blk); + perm = BLK_PERM_CONSISTENT_READ | (pfl->ro ? 0 : BLK_PERM_WRITE); + ret = blk_set_perm(pfl->blk, perm, BLK_PERM_ALL, errp); + if (ret < 0) { + return; + } + } else { + pfl->ro = false; + } + + if (pfl->blk) { + if (!blk_check_size_and_read_all(pfl->blk, pfl->storage, total_len, + errp)) { + vmstate_unregister_ram(&pfl->mem, DEVICE(pfl)); + return; + } + } + + /* + * Default to devices being used at their maximum device width. This was + * assumed before the device_width support was added. + */ + if (!pfl->max_device_width) { + pfl->max_device_width = pfl->device_width; + } + + pfl->wcycle = 0; + /* + * The command 0x00 is not assigned by the CFI open standard, + * but QEMU historically uses it for the READ_ARRAY command (0xff). + */ + pfl->cmd = 0x00; + pfl->status = 0x80; /* WSM ready */ + pflash_cfi01_fill_cfi_table(pfl); +} + +static void pflash_cfi01_system_reset(DeviceState *dev) +{ + PFlashCFI01 *pfl = PFLASH_CFI01(dev); + + trace_pflash_reset(pfl->name); + /* + * The command 0x00 is not assigned by the CFI open standard, + * but QEMU historically uses it for the READ_ARRAY command (0xff). + */ + pfl->cmd = 0x00; + pfl->wcycle = 0; + memory_region_rom_device_set_romd(&pfl->mem, true); + /* + * The WSM ready timer occurs at most 150ns after system reset. + * This model deliberately ignores this delay. + */ + pfl->status = 0x80; +} + +static Property pflash_cfi01_properties[] = { + DEFINE_PROP_DRIVE("drive", PFlashCFI01, blk), + /* num-blocks is the number of blocks actually visible to the guest, + * ie the total size of the device divided by the sector length. + * If we're emulating flash devices wired in parallel the actual + * number of blocks per indvidual device will differ. + */ + DEFINE_PROP_UINT32("num-blocks", PFlashCFI01, nb_blocs, 0), + DEFINE_PROP_UINT64("sector-length", PFlashCFI01, sector_len, 0), + /* width here is the overall width of this QEMU device in bytes. + * The QEMU device may be emulating a number of flash devices + * wired up in parallel; the width of each individual flash + * device should be specified via device-width. If the individual + * devices have a maximum width which is greater than the width + * they are being used for, this maximum width should be set via + * max-device-width (which otherwise defaults to device-width). + * So for instance a 32-bit wide QEMU flash device made from four + * 16-bit flash devices used in 8-bit wide mode would be configured + * with width = 4, device-width = 1, max-device-width = 2. + * + * If device-width is not specified we default to backwards + * compatible behaviour which is a bad emulation of two + * 16 bit devices making up a 32 bit wide QEMU device. This + * is deprecated for new uses of this device. + */ + DEFINE_PROP_UINT8("width", PFlashCFI01, bank_width, 0), + DEFINE_PROP_UINT8("device-width", PFlashCFI01, device_width, 0), + DEFINE_PROP_UINT8("max-device-width", PFlashCFI01, max_device_width, 0), + DEFINE_PROP_BIT("big-endian", PFlashCFI01, features, PFLASH_BE, 0), + DEFINE_PROP_BIT("secure", PFlashCFI01, features, PFLASH_SECURE, 0), + DEFINE_PROP_UINT16("id0", PFlashCFI01, ident0, 0), + DEFINE_PROP_UINT16("id1", PFlashCFI01, ident1, 0), + DEFINE_PROP_UINT16("id2", PFlashCFI01, ident2, 0), + DEFINE_PROP_UINT16("id3", PFlashCFI01, ident3, 0), + DEFINE_PROP_STRING("name", PFlashCFI01, name), + DEFINE_PROP_BOOL("old-multiple-chip-handling", PFlashCFI01, + old_multiple_chip_handling, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pflash_cfi01_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = pflash_cfi01_system_reset; + dc->realize = pflash_cfi01_realize; + device_class_set_props(dc, pflash_cfi01_properties); + dc->vmsd = &vmstate_pflash; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + + +static const TypeInfo pflash_cfi01_info = { + .name = TYPE_PFLASH_CFI01, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PFlashCFI01), + .class_init = pflash_cfi01_class_init, +}; + +static void pflash_cfi01_register_types(void) +{ + type_register_static(&pflash_cfi01_info); +} + +type_init(pflash_cfi01_register_types) + +PFlashCFI01 *pflash_cfi01_register(hwaddr base, + const char *name, + hwaddr size, + BlockBackend *blk, + uint32_t sector_len, + int bank_width, + uint16_t id0, uint16_t id1, + uint16_t id2, uint16_t id3, + int be) +{ + DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01); + + if (blk) { + qdev_prop_set_drive(dev, "drive", blk); + } + assert(QEMU_IS_ALIGNED(size, sector_len)); + qdev_prop_set_uint32(dev, "num-blocks", size / sector_len); + qdev_prop_set_uint64(dev, "sector-length", sector_len); + qdev_prop_set_uint8(dev, "width", bank_width); + qdev_prop_set_bit(dev, "big-endian", !!be); + qdev_prop_set_uint16(dev, "id0", id0); + qdev_prop_set_uint16(dev, "id1", id1); + qdev_prop_set_uint16(dev, "id2", id2); + qdev_prop_set_uint16(dev, "id3", id3); + qdev_prop_set_string(dev, "name", name); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + return PFLASH_CFI01(dev); +} + +BlockBackend *pflash_cfi01_get_blk(PFlashCFI01 *fl) +{ + return fl->blk; +} + +MemoryRegion *pflash_cfi01_get_memory(PFlashCFI01 *fl) +{ + return &fl->mem; +} + +/* + * Handle -drive if=pflash for machines that use properties. + * If @dinfo is null, do nothing. + * Else if @fl's property "drive" is already set, fatal error. + * Else set it to the BlockBackend with @dinfo. + */ +void pflash_cfi01_legacy_drive(PFlashCFI01 *fl, DriveInfo *dinfo) +{ + Location loc; + + if (!dinfo) { + return; + } + + loc_push_none(&loc); + qemu_opts_loc_restore(dinfo->opts); + if (fl->blk) { + error_report("clashes with -machine"); + exit(1); + } + qdev_prop_set_drive_err(DEVICE(fl), "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + loc_pop(&loc); +} + +static void postload_update_cb(void *opaque, bool running, RunState state) +{ + PFlashCFI01 *pfl = opaque; + + /* This is called after bdrv_invalidate_cache_all. */ + qemu_del_vm_change_state_handler(pfl->vmstate); + pfl->vmstate = NULL; + + trace_pflash_postload_cb(pfl->name); + pflash_update(pfl, 0, pfl->sector_len * pfl->nb_blocs); +} + +static int pflash_post_load(void *opaque, int version_id) +{ + PFlashCFI01 *pfl = opaque; + + if (!pfl->ro) { + pfl->vmstate = qemu_add_vm_change_state_handler(postload_update_cb, + pfl); + } + return 0; +} diff --git a/hw/block/pflash_cfi02.c b/hw/block/pflash_cfi02.c new file mode 100644 index 000000000..02c514fb6 --- /dev/null +++ b/hw/block/pflash_cfi02.c @@ -0,0 +1,1031 @@ +/* + * CFI parallel flash with AMD command set emulation + * + * Copyright (c) 2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + * For now, this code can emulate flashes of 1, 2 or 4 bytes width. + * Supported commands/modes are: + * - flash read + * - flash write + * - flash ID read + * - sector erase + * - chip erase + * - unlock bypass command + * - CFI queries + * + * It does not support flash interleaving. + * It does not implement software data protection as found in many real chips + */ + +#include "qemu/osdep.h" +#include "hw/block/block.h" +#include "hw/block/flash.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/bitmap.h" +#include "qemu/timer.h" +#include "sysemu/block-backend.h" +#include "qemu/host-utils.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "trace.h" + +#define PFLASH_LAZY_ROMD_THRESHOLD 42 + +/* + * The size of the cfi_table indirectly depends on this and the start of the + * PRI table directly depends on it. 4 is the maximum size (and also what + * seems common) without changing the PRT table address. + */ +#define PFLASH_MAX_ERASE_REGIONS 4 + +/* Special write cycles for CFI queries. */ +enum { + WCYCLE_CFI = 7, + WCYCLE_AUTOSELECT_CFI = 8, +}; + +struct PFlashCFI02 { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + BlockBackend *blk; + uint32_t uniform_nb_blocs; + uint32_t uniform_sector_len; + uint32_t total_sectors; + uint32_t nb_blocs[PFLASH_MAX_ERASE_REGIONS]; + uint32_t sector_len[PFLASH_MAX_ERASE_REGIONS]; + uint32_t chip_len; + uint8_t mappings; + uint8_t width; + uint8_t be; + int wcycle; /* if 0, the flash is read normally */ + int bypass; + int ro; + uint8_t cmd; + uint8_t status; + /* FIXME: implement array device properties */ + uint16_t ident0; + uint16_t ident1; + uint16_t ident2; + uint16_t ident3; + uint16_t unlock_addr0; + uint16_t unlock_addr1; + uint8_t cfi_table[0x4d]; + QEMUTimer timer; + /* + * The device replicates the flash memory across its memory space. Emulate + * that by having a container (.mem) filled with an array of aliases + * (.mem_mappings) pointing to the flash memory (.orig_mem). + */ + MemoryRegion mem; + MemoryRegion *mem_mappings; /* array; one per mapping */ + MemoryRegion orig_mem; + bool rom_mode; + int read_counter; /* used for lazy switch-back to rom mode */ + int sectors_to_erase; + uint64_t erase_time_remaining; + unsigned long *sector_erase_map; + char *name; + void *storage; +}; + +/* + * Toggle status bit DQ7. + */ +static inline void toggle_dq7(PFlashCFI02 *pfl) +{ + pfl->status ^= 0x80; +} + +/* + * Set status bit DQ7 to bit 7 of value. + */ +static inline void set_dq7(PFlashCFI02 *pfl, uint8_t value) +{ + pfl->status &= 0x7F; + pfl->status |= value & 0x80; +} + +/* + * Toggle status bit DQ6. + */ +static inline void toggle_dq6(PFlashCFI02 *pfl) +{ + pfl->status ^= 0x40; +} + +/* + * Turn on DQ3. + */ +static inline void assert_dq3(PFlashCFI02 *pfl) +{ + pfl->status |= 0x08; +} + +/* + * Turn off DQ3. + */ +static inline void reset_dq3(PFlashCFI02 *pfl) +{ + pfl->status &= ~0x08; +} + +/* + * Toggle status bit DQ2. + */ +static inline void toggle_dq2(PFlashCFI02 *pfl) +{ + pfl->status ^= 0x04; +} + +/* + * Set up replicated mappings of the same region. + */ +static void pflash_setup_mappings(PFlashCFI02 *pfl) +{ + unsigned i; + hwaddr size = memory_region_size(&pfl->orig_mem); + + memory_region_init(&pfl->mem, OBJECT(pfl), "pflash", pfl->mappings * size); + pfl->mem_mappings = g_new(MemoryRegion, pfl->mappings); + for (i = 0; i < pfl->mappings; ++i) { + memory_region_init_alias(&pfl->mem_mappings[i], OBJECT(pfl), + "pflash-alias", &pfl->orig_mem, 0, size); + memory_region_add_subregion(&pfl->mem, i * size, &pfl->mem_mappings[i]); + } +} + +static void pflash_reset_state_machine(PFlashCFI02 *pfl) +{ + trace_pflash_reset(pfl->name); + pfl->cmd = 0x00; + pfl->wcycle = 0; +} + +static void pflash_mode_read_array(PFlashCFI02 *pfl) +{ + trace_pflash_mode_read_array(pfl->name); + pflash_reset_state_machine(pfl); + pfl->rom_mode = true; + memory_region_rom_device_set_romd(&pfl->orig_mem, true); +} + +static size_t pflash_regions_count(PFlashCFI02 *pfl) +{ + return pfl->cfi_table[0x2c]; +} + +/* + * Returns the time it takes to erase the number of sectors scheduled for + * erasure based on CFI address 0x21 which is "Typical timeout per individual + * block erase 2^N ms." + */ +static uint64_t pflash_erase_time(PFlashCFI02 *pfl) +{ + /* + * If there are no sectors to erase (which can happen if all of the sectors + * to be erased are protected), then erase takes 100 us. Protected sectors + * aren't supported so this should never happen. + */ + return ((1ULL << pfl->cfi_table[0x21]) * pfl->sectors_to_erase) * SCALE_US; +} + +/* + * Returns true if the device is currently in erase suspend mode. + */ +static inline bool pflash_erase_suspend_mode(PFlashCFI02 *pfl) +{ + return pfl->erase_time_remaining > 0; +} + +static void pflash_timer(void *opaque) +{ + PFlashCFI02 *pfl = opaque; + + trace_pflash_timer_expired(pfl->name, pfl->cmd); + if (pfl->cmd == 0x30) { + /* + * Sector erase. If DQ3 is 0 when the timer expires, then the 50 + * us erase timeout has expired so we need to start the timer for the + * sector erase algorithm. Otherwise, the erase completed and we should + * go back to read array mode. + */ + if ((pfl->status & 0x08) == 0) { + assert_dq3(pfl); + uint64_t timeout = pflash_erase_time(pfl); + timer_mod(&pfl->timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout); + trace_pflash_erase_timeout(pfl->name, pfl->sectors_to_erase); + return; + } + trace_pflash_erase_complete(pfl->name); + bitmap_zero(pfl->sector_erase_map, pfl->total_sectors); + pfl->sectors_to_erase = 0; + reset_dq3(pfl); + } + + /* Reset flash */ + toggle_dq7(pfl); + if (pfl->bypass) { + pfl->wcycle = 2; + pfl->cmd = 0; + } else { + pflash_mode_read_array(pfl); + } +} + +/* + * Read data from flash. + */ +static uint64_t pflash_data_read(PFlashCFI02 *pfl, hwaddr offset, + unsigned int width) +{ + uint8_t *p = (uint8_t *)pfl->storage + offset; + uint64_t ret = pfl->be ? ldn_be_p(p, width) : ldn_le_p(p, width); + trace_pflash_data_read(pfl->name, offset, width, ret); + return ret; +} + +typedef struct { + uint32_t len; + uint32_t num; +} SectorInfo; + +/* + * offset should be a byte offset of the QEMU device and _not_ a device + * offset. + */ +static SectorInfo pflash_sector_info(PFlashCFI02 *pfl, hwaddr offset) +{ + assert(offset < pfl->chip_len); + hwaddr addr = 0; + uint32_t sector_num = 0; + for (int i = 0; i < pflash_regions_count(pfl); ++i) { + uint64_t region_size = (uint64_t)pfl->nb_blocs[i] * pfl->sector_len[i]; + if (addr <= offset && offset < addr + region_size) { + return (SectorInfo) { + .len = pfl->sector_len[i], + .num = sector_num + (offset - addr) / pfl->sector_len[i], + }; + } + sector_num += pfl->nb_blocs[i]; + addr += region_size; + } + abort(); +} + +/* + * Returns true if the offset refers to a flash sector that is currently being + * erased. + */ +static bool pflash_sector_is_erasing(PFlashCFI02 *pfl, hwaddr offset) +{ + long sector_num = pflash_sector_info(pfl, offset).num; + return test_bit(sector_num, pfl->sector_erase_map); +} + +static uint64_t pflash_read(void *opaque, hwaddr offset, unsigned int width) +{ + PFlashCFI02 *pfl = opaque; + hwaddr boff; + uint64_t ret; + + /* Lazy reset to ROMD mode after a certain amount of read accesses */ + if (!pfl->rom_mode && pfl->wcycle == 0 && + ++pfl->read_counter > PFLASH_LAZY_ROMD_THRESHOLD) { + pflash_mode_read_array(pfl); + } + offset &= pfl->chip_len - 1; + boff = offset & 0xFF; + if (pfl->width == 2) { + boff = boff >> 1; + } else if (pfl->width == 4) { + boff = boff >> 2; + } + switch (pfl->cmd) { + default: + /* This should never happen : reset state & treat it as a read*/ + trace_pflash_read_unknown_state(pfl->name, pfl->cmd); + pflash_reset_state_machine(pfl); + /* fall through to the read code */ + case 0x80: /* Erase (unlock) */ + /* We accept reads during second unlock sequence... */ + case 0x00: + if (pflash_erase_suspend_mode(pfl) && + pflash_sector_is_erasing(pfl, offset)) { + /* Toggle bit 2, but not 6. */ + toggle_dq2(pfl); + /* Status register read */ + ret = pfl->status; + trace_pflash_read_status(pfl->name, ret); + break; + } + /* Flash area read */ + ret = pflash_data_read(pfl, offset, width); + break; + case 0x90: /* flash ID read */ + switch (boff) { + case 0x00: + case 0x01: + ret = boff & 0x01 ? pfl->ident1 : pfl->ident0; + break; + case 0x02: + ret = 0x00; /* Pretend all sectors are unprotected */ + break; + case 0x0E: + case 0x0F: + ret = boff & 0x01 ? pfl->ident3 : pfl->ident2; + if (ret != (uint8_t)-1) { + break; + } + /* Fall through to data read. */ + default: + ret = pflash_data_read(pfl, offset, width); + } + trace_pflash_read_done(pfl->name, boff, ret); + break; + case 0x10: /* Chip Erase */ + case 0x30: /* Sector Erase */ + /* Toggle bit 2 during erase, but not program. */ + toggle_dq2(pfl); + /* fall through */ + case 0xA0: /* Program */ + /* Toggle bit 6 */ + toggle_dq6(pfl); + /* Status register read */ + ret = pfl->status; + trace_pflash_read_status(pfl->name, ret); + break; + case 0x98: + /* CFI query mode */ + if (boff < sizeof(pfl->cfi_table)) { + ret = pfl->cfi_table[boff]; + } else { + ret = 0; + } + break; + } + trace_pflash_io_read(pfl->name, offset, width, ret, pfl->cmd, pfl->wcycle); + + return ret; +} + +/* update flash content on disk */ +static void pflash_update(PFlashCFI02 *pfl, int offset, int size) +{ + int offset_end; + int ret; + if (pfl->blk) { + offset_end = offset + size; + /* widen to sector boundaries */ + offset = QEMU_ALIGN_DOWN(offset, BDRV_SECTOR_SIZE); + offset_end = QEMU_ALIGN_UP(offset_end, BDRV_SECTOR_SIZE); + ret = blk_pwrite(pfl->blk, offset, pfl->storage + offset, + offset_end - offset, 0); + if (ret < 0) { + /* TODO set error bit in status */ + error_report("Could not update PFLASH: %s", strerror(-ret)); + } + } +} + +static void pflash_sector_erase(PFlashCFI02 *pfl, hwaddr offset) +{ + SectorInfo sector_info = pflash_sector_info(pfl, offset); + uint64_t sector_len = sector_info.len; + offset &= ~(sector_len - 1); + trace_pflash_sector_erase_start(pfl->name, pfl->width * 2, offset, + pfl->width * 2, offset + sector_len - 1); + if (!pfl->ro) { + uint8_t *p = pfl->storage; + memset(p + offset, 0xff, sector_len); + pflash_update(pfl, offset, sector_len); + } + set_dq7(pfl, 0x00); + ++pfl->sectors_to_erase; + set_bit(sector_info.num, pfl->sector_erase_map); + /* Set (or reset) the 50 us timer for additional erase commands. */ + timer_mod(&pfl->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 50000); +} + +static void pflash_write(void *opaque, hwaddr offset, uint64_t value, + unsigned int width) +{ + PFlashCFI02 *pfl = opaque; + hwaddr boff; + uint8_t *p; + uint8_t cmd; + + trace_pflash_io_write(pfl->name, offset, width, value, pfl->wcycle); + cmd = value; + if (pfl->cmd != 0xA0) { + /* Reset does nothing during chip erase and sector erase. */ + if (cmd == 0xF0 && pfl->cmd != 0x10 && pfl->cmd != 0x30) { + if (pfl->wcycle == WCYCLE_AUTOSELECT_CFI) { + /* Return to autoselect mode. */ + pfl->wcycle = 3; + pfl->cmd = 0x90; + return; + } + goto reset_flash; + } + } + offset &= pfl->chip_len - 1; + + boff = offset; + if (pfl->width == 2) { + boff = boff >> 1; + } else if (pfl->width == 4) { + boff = boff >> 2; + } + /* Only the least-significant 11 bits are used in most cases. */ + boff &= 0x7FF; + switch (pfl->wcycle) { + case 0: + /* Set the device in I/O access mode if required */ + if (pfl->rom_mode) { + pfl->rom_mode = false; + memory_region_rom_device_set_romd(&pfl->orig_mem, false); + } + pfl->read_counter = 0; + /* We're in read mode */ + check_unlock0: + if (boff == 0x55 && cmd == 0x98) { + /* Enter CFI query mode */ + pfl->wcycle = WCYCLE_CFI; + pfl->cmd = 0x98; + return; + } + /* Handle erase resume in erase suspend mode, otherwise reset. */ + if (cmd == 0x30) { /* Erase Resume */ + if (pflash_erase_suspend_mode(pfl)) { + /* Resume the erase. */ + timer_mod(&pfl->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + pfl->erase_time_remaining); + pfl->erase_time_remaining = 0; + pfl->wcycle = 6; + pfl->cmd = 0x30; + set_dq7(pfl, 0x00); + assert_dq3(pfl); + return; + } + goto reset_flash; + } + /* Ignore erase suspend. */ + if (cmd == 0xB0) { /* Erase Suspend */ + return; + } + if (boff != pfl->unlock_addr0 || cmd != 0xAA) { + trace_pflash_unlock0_failed(pfl->name, boff, + cmd, pfl->unlock_addr0); + goto reset_flash; + } + trace_pflash_write(pfl->name, "unlock sequence started"); + break; + case 1: + /* We started an unlock sequence */ + check_unlock1: + if (boff != pfl->unlock_addr1 || cmd != 0x55) { + trace_pflash_unlock1_failed(pfl->name, boff, cmd); + goto reset_flash; + } + trace_pflash_write(pfl->name, "unlock sequence done"); + break; + case 2: + /* We finished an unlock sequence */ + if (!pfl->bypass && boff != pfl->unlock_addr0) { + trace_pflash_write_failed(pfl->name, boff, cmd); + goto reset_flash; + } + switch (cmd) { + case 0x20: + pfl->bypass = 1; + goto do_bypass; + case 0x80: /* Erase */ + case 0x90: /* Autoselect */ + case 0xA0: /* Program */ + pfl->cmd = cmd; + trace_pflash_write_start(pfl->name, cmd); + break; + default: + trace_pflash_write_unknown(pfl->name, cmd); + goto reset_flash; + } + break; + case 3: + switch (pfl->cmd) { + case 0x80: /* Erase */ + /* We need another unlock sequence */ + goto check_unlock0; + case 0xA0: /* Program */ + if (pflash_erase_suspend_mode(pfl) && + pflash_sector_is_erasing(pfl, offset)) { + /* Ignore writes to erasing sectors. */ + if (pfl->bypass) { + goto do_bypass; + } + goto reset_flash; + } + trace_pflash_data_write(pfl->name, offset, width, value, 0); + if (!pfl->ro) { + p = (uint8_t *)pfl->storage + offset; + if (pfl->be) { + uint64_t current = ldn_be_p(p, width); + stn_be_p(p, width, current & value); + } else { + uint64_t current = ldn_le_p(p, width); + stn_le_p(p, width, current & value); + } + pflash_update(pfl, offset, width); + } + /* + * While programming, status bit DQ7 should hold the opposite + * value from how it was programmed. + */ + set_dq7(pfl, ~value); + /* Let's pretend write is immediate */ + if (pfl->bypass) + goto do_bypass; + goto reset_flash; + case 0x90: /* Autoselect */ + if (pfl->bypass && cmd == 0x00) { + /* Unlock bypass reset */ + goto reset_flash; + } + /* + * We can enter CFI query mode from autoselect mode, but we must + * return to autoselect mode after a reset. + */ + if (boff == 0x55 && cmd == 0x98) { + /* Enter autoselect CFI query mode */ + pfl->wcycle = WCYCLE_AUTOSELECT_CFI; + pfl->cmd = 0x98; + return; + } + /* fall through */ + default: + trace_pflash_write_invalid(pfl->name, pfl->cmd); + goto reset_flash; + } + case 4: + switch (pfl->cmd) { + case 0xA0: /* Program */ + /* Ignore writes while flash data write is occurring */ + /* As we suppose write is immediate, this should never happen */ + return; + case 0x80: /* Erase */ + goto check_unlock1; + default: + /* Should never happen */ + trace_pflash_write_invalid_state(pfl->name, pfl->cmd, 5); + goto reset_flash; + } + break; + case 5: + if (pflash_erase_suspend_mode(pfl)) { + /* Erasing is not supported in erase suspend mode. */ + goto reset_flash; + } + switch (cmd) { + case 0x10: /* Chip Erase */ + if (boff != pfl->unlock_addr0) { + trace_pflash_chip_erase_invalid(pfl->name, offset); + goto reset_flash; + } + /* Chip erase */ + trace_pflash_chip_erase_start(pfl->name); + if (!pfl->ro) { + memset(pfl->storage, 0xff, pfl->chip_len); + pflash_update(pfl, 0, pfl->chip_len); + } + set_dq7(pfl, 0x00); + /* Wait the time specified at CFI address 0x22. */ + timer_mod(&pfl->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (1ULL << pfl->cfi_table[0x22]) * SCALE_MS); + break; + case 0x30: /* Sector erase */ + pflash_sector_erase(pfl, offset); + break; + default: + trace_pflash_write_invalid_command(pfl->name, cmd); + goto reset_flash; + } + pfl->cmd = cmd; + break; + case 6: + switch (pfl->cmd) { + case 0x10: /* Chip Erase */ + /* Ignore writes during chip erase */ + return; + case 0x30: /* Sector erase */ + if (cmd == 0xB0) { + /* + * If erase suspend happens during the erase timeout (so DQ3 is + * 0), then the device suspends erasing immediately. Set the + * remaining time to be the total time to erase. Otherwise, + * there is a maximum amount of time it can take to enter + * suspend mode. Let's ignore that and suspend immediately and + * set the remaining time to the actual time remaining on the + * timer. + */ + if ((pfl->status & 0x08) == 0) { + pfl->erase_time_remaining = pflash_erase_time(pfl); + } else { + int64_t delta = timer_expire_time_ns(&pfl->timer) - + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + /* Make sure we have a positive time remaining. */ + pfl->erase_time_remaining = delta <= 0 ? 1 : delta; + } + reset_dq3(pfl); + timer_del(&pfl->timer); + pflash_reset_state_machine(pfl); + return; + } + /* + * If DQ3 is 0, additional sector erase commands can be + * written and anything else (other than an erase suspend) resets + * the device. + */ + if ((pfl->status & 0x08) == 0) { + if (cmd == 0x30) { + pflash_sector_erase(pfl, offset); + } else { + goto reset_flash; + } + } + /* Ignore writes during the actual erase. */ + return; + default: + /* Should never happen */ + trace_pflash_write_invalid_state(pfl->name, pfl->cmd, 6); + goto reset_flash; + } + break; + /* Special values for CFI queries */ + case WCYCLE_CFI: + case WCYCLE_AUTOSELECT_CFI: + trace_pflash_write(pfl->name, "invalid write in CFI query mode"); + goto reset_flash; + default: + /* Should never happen */ + trace_pflash_write(pfl->name, "invalid write state (wc 7)"); + goto reset_flash; + } + pfl->wcycle++; + + return; + + /* Reset flash */ + reset_flash: + pfl->bypass = 0; + pflash_reset_state_machine(pfl); + return; + + do_bypass: + pfl->wcycle = 2; + pfl->cmd = 0; +} + +static const MemoryRegionOps pflash_cfi02_ops = { + .read = pflash_read, + .write = pflash_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pflash_cfi02_fill_cfi_table(PFlashCFI02 *pfl, int nb_regions) +{ + /* Hardcoded CFI table (mostly from SG29 Spansion flash) */ + const uint16_t pri_ofs = 0x40; + /* Standard "QRY" string */ + pfl->cfi_table[0x10] = 'Q'; + pfl->cfi_table[0x11] = 'R'; + pfl->cfi_table[0x12] = 'Y'; + /* Command set (AMD/Fujitsu) */ + pfl->cfi_table[0x13] = 0x02; + pfl->cfi_table[0x14] = 0x00; + /* Primary extended table address */ + pfl->cfi_table[0x15] = pri_ofs; + pfl->cfi_table[0x16] = pri_ofs >> 8; + /* Alternate command set (none) */ + pfl->cfi_table[0x17] = 0x00; + pfl->cfi_table[0x18] = 0x00; + /* Alternate extended table (none) */ + pfl->cfi_table[0x19] = 0x00; + pfl->cfi_table[0x1A] = 0x00; + /* Vcc min */ + pfl->cfi_table[0x1B] = 0x27; + /* Vcc max */ + pfl->cfi_table[0x1C] = 0x36; + /* Vpp min (no Vpp pin) */ + pfl->cfi_table[0x1D] = 0x00; + /* Vpp max (no Vpp pin) */ + pfl->cfi_table[0x1E] = 0x00; + /* Timeout per single byte/word write (128 ms) */ + pfl->cfi_table[0x1F] = 0x07; + /* Timeout for min size buffer write (NA) */ + pfl->cfi_table[0x20] = 0x00; + /* Typical timeout for block erase (512 ms) */ + pfl->cfi_table[0x21] = 0x09; + /* Typical timeout for full chip erase (4096 ms) */ + pfl->cfi_table[0x22] = 0x0C; + /* Reserved */ + pfl->cfi_table[0x23] = 0x01; + /* Max timeout for buffer write (NA) */ + pfl->cfi_table[0x24] = 0x00; + /* Max timeout for block erase */ + pfl->cfi_table[0x25] = 0x0A; + /* Max timeout for chip erase */ + pfl->cfi_table[0x26] = 0x0D; + /* Device size */ + pfl->cfi_table[0x27] = ctz32(pfl->chip_len); + /* Flash device interface (8 & 16 bits) */ + pfl->cfi_table[0x28] = 0x02; + pfl->cfi_table[0x29] = 0x00; + /* Max number of bytes in multi-bytes write */ + /* + * XXX: disable buffered write as it's not supported + * pfl->cfi_table[0x2A] = 0x05; + */ + pfl->cfi_table[0x2A] = 0x00; + pfl->cfi_table[0x2B] = 0x00; + /* Number of erase block regions */ + pfl->cfi_table[0x2c] = nb_regions; + /* Erase block regions */ + for (int i = 0; i < nb_regions; ++i) { + uint32_t sector_len_per_device = pfl->sector_len[i]; + pfl->cfi_table[0x2d + 4 * i] = pfl->nb_blocs[i] - 1; + pfl->cfi_table[0x2e + 4 * i] = (pfl->nb_blocs[i] - 1) >> 8; + pfl->cfi_table[0x2f + 4 * i] = sector_len_per_device >> 8; + pfl->cfi_table[0x30 + 4 * i] = sector_len_per_device >> 16; + } + assert(0x2c + 4 * nb_regions < pri_ofs); + + /* Extended */ + pfl->cfi_table[0x00 + pri_ofs] = 'P'; + pfl->cfi_table[0x01 + pri_ofs] = 'R'; + pfl->cfi_table[0x02 + pri_ofs] = 'I'; + + /* Extended version 1.0 */ + pfl->cfi_table[0x03 + pri_ofs] = '1'; + pfl->cfi_table[0x04 + pri_ofs] = '0'; + + /* Address sensitive unlock required. */ + pfl->cfi_table[0x05 + pri_ofs] = 0x00; + /* Erase suspend to read/write. */ + pfl->cfi_table[0x06 + pri_ofs] = 0x02; + /* Sector protect not supported. */ + pfl->cfi_table[0x07 + pri_ofs] = 0x00; + /* Temporary sector unprotect not supported. */ + pfl->cfi_table[0x08 + pri_ofs] = 0x00; + + /* Sector protect/unprotect scheme. */ + pfl->cfi_table[0x09 + pri_ofs] = 0x00; + + /* Simultaneous operation not supported. */ + pfl->cfi_table[0x0a + pri_ofs] = 0x00; + /* Burst mode not supported. */ + pfl->cfi_table[0x0b + pri_ofs] = 0x00; + /* Page mode not supported. */ + pfl->cfi_table[0x0c + pri_ofs] = 0x00; + assert(0x0c + pri_ofs < ARRAY_SIZE(pfl->cfi_table)); +} + +static void pflash_cfi02_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + PFlashCFI02 *pfl = PFLASH_CFI02(dev); + int ret; + + if (pfl->uniform_sector_len == 0 && pfl->sector_len[0] == 0) { + error_setg(errp, "attribute \"sector-length\" not specified or zero."); + return; + } + if (pfl->uniform_nb_blocs == 0 && pfl->nb_blocs[0] == 0) { + error_setg(errp, "attribute \"num-blocks\" not specified or zero."); + return; + } + if (pfl->name == NULL) { + error_setg(errp, "attribute \"name\" not specified."); + return; + } + + int nb_regions; + pfl->chip_len = 0; + pfl->total_sectors = 0; + for (nb_regions = 0; nb_regions < PFLASH_MAX_ERASE_REGIONS; ++nb_regions) { + if (pfl->nb_blocs[nb_regions] == 0) { + break; + } + pfl->total_sectors += pfl->nb_blocs[nb_regions]; + uint64_t sector_len_per_device = pfl->sector_len[nb_regions]; + + /* + * The size of each flash sector must be a power of 2 and it must be + * aligned at the same power of 2. + */ + if (sector_len_per_device & 0xff || + sector_len_per_device >= (1 << 24) || + !is_power_of_2(sector_len_per_device)) + { + error_setg(errp, "unsupported configuration: " + "sector length[%d] per device = %" PRIx64 ".", + nb_regions, sector_len_per_device); + return; + } + if (pfl->chip_len & (sector_len_per_device - 1)) { + error_setg(errp, "unsupported configuration: " + "flash region %d not correctly aligned.", + nb_regions); + return; + } + + pfl->chip_len += (uint64_t)pfl->sector_len[nb_regions] * + pfl->nb_blocs[nb_regions]; + } + + uint64_t uniform_len = (uint64_t)pfl->uniform_nb_blocs * + pfl->uniform_sector_len; + if (nb_regions == 0) { + nb_regions = 1; + pfl->nb_blocs[0] = pfl->uniform_nb_blocs; + pfl->sector_len[0] = pfl->uniform_sector_len; + pfl->chip_len = uniform_len; + pfl->total_sectors = pfl->uniform_nb_blocs; + } else if (uniform_len != 0 && uniform_len != pfl->chip_len) { + error_setg(errp, "\"num-blocks\"*\"sector-length\" " + "different from \"num-blocks0\"*\'sector-length0\" + ... + " + "\"num-blocks3\"*\"sector-length3\""); + return; + } + + memory_region_init_rom_device(&pfl->orig_mem, OBJECT(pfl), + &pflash_cfi02_ops, pfl, pfl->name, + pfl->chip_len, errp); + if (*errp) { + return; + } + + pfl->storage = memory_region_get_ram_ptr(&pfl->orig_mem); + + if (pfl->blk) { + uint64_t perm; + pfl->ro = !blk_supports_write_perm(pfl->blk); + perm = BLK_PERM_CONSISTENT_READ | (pfl->ro ? 0 : BLK_PERM_WRITE); + ret = blk_set_perm(pfl->blk, perm, BLK_PERM_ALL, errp); + if (ret < 0) { + return; + } + } else { + pfl->ro = 0; + } + + if (pfl->blk) { + if (!blk_check_size_and_read_all(pfl->blk, pfl->storage, + pfl->chip_len, errp)) { + vmstate_unregister_ram(&pfl->orig_mem, DEVICE(pfl)); + return; + } + } + + /* Only 11 bits are used in the comparison. */ + pfl->unlock_addr0 &= 0x7FF; + pfl->unlock_addr1 &= 0x7FF; + + /* Allocate memory for a bitmap for sectors being erased. */ + pfl->sector_erase_map = bitmap_new(pfl->total_sectors); + + pfl->rom_mode = true; + if (pfl->mappings > 1) { + pflash_setup_mappings(pfl); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &pfl->mem); + } else { + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &pfl->orig_mem); + } + + timer_init_ns(&pfl->timer, QEMU_CLOCK_VIRTUAL, pflash_timer, pfl); + pfl->status = 0; + + pflash_cfi02_fill_cfi_table(pfl, nb_regions); +} + +static void pflash_cfi02_reset(DeviceState *dev) +{ + PFlashCFI02 *pfl = PFLASH_CFI02(dev); + + pflash_reset_state_machine(pfl); +} + +static Property pflash_cfi02_properties[] = { + DEFINE_PROP_DRIVE("drive", PFlashCFI02, blk), + DEFINE_PROP_UINT32("num-blocks", PFlashCFI02, uniform_nb_blocs, 0), + DEFINE_PROP_UINT32("sector-length", PFlashCFI02, uniform_sector_len, 0), + DEFINE_PROP_UINT32("num-blocks0", PFlashCFI02, nb_blocs[0], 0), + DEFINE_PROP_UINT32("sector-length0", PFlashCFI02, sector_len[0], 0), + DEFINE_PROP_UINT32("num-blocks1", PFlashCFI02, nb_blocs[1], 0), + DEFINE_PROP_UINT32("sector-length1", PFlashCFI02, sector_len[1], 0), + DEFINE_PROP_UINT32("num-blocks2", PFlashCFI02, nb_blocs[2], 0), + DEFINE_PROP_UINT32("sector-length2", PFlashCFI02, sector_len[2], 0), + DEFINE_PROP_UINT32("num-blocks3", PFlashCFI02, nb_blocs[3], 0), + DEFINE_PROP_UINT32("sector-length3", PFlashCFI02, sector_len[3], 0), + DEFINE_PROP_UINT8("width", PFlashCFI02, width, 0), + DEFINE_PROP_UINT8("mappings", PFlashCFI02, mappings, 0), + DEFINE_PROP_UINT8("big-endian", PFlashCFI02, be, 0), + DEFINE_PROP_UINT16("id0", PFlashCFI02, ident0, 0), + DEFINE_PROP_UINT16("id1", PFlashCFI02, ident1, 0), + DEFINE_PROP_UINT16("id2", PFlashCFI02, ident2, 0), + DEFINE_PROP_UINT16("id3", PFlashCFI02, ident3, 0), + DEFINE_PROP_UINT16("unlock-addr0", PFlashCFI02, unlock_addr0, 0), + DEFINE_PROP_UINT16("unlock-addr1", PFlashCFI02, unlock_addr1, 0), + DEFINE_PROP_STRING("name", PFlashCFI02, name), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pflash_cfi02_unrealize(DeviceState *dev) +{ + PFlashCFI02 *pfl = PFLASH_CFI02(dev); + timer_del(&pfl->timer); + g_free(pfl->sector_erase_map); +} + +static void pflash_cfi02_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pflash_cfi02_realize; + dc->reset = pflash_cfi02_reset; + dc->unrealize = pflash_cfi02_unrealize; + device_class_set_props(dc, pflash_cfi02_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo pflash_cfi02_info = { + .name = TYPE_PFLASH_CFI02, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PFlashCFI02), + .class_init = pflash_cfi02_class_init, +}; + +static void pflash_cfi02_register_types(void) +{ + type_register_static(&pflash_cfi02_info); +} + +type_init(pflash_cfi02_register_types) + +PFlashCFI02 *pflash_cfi02_register(hwaddr base, + const char *name, + hwaddr size, + BlockBackend *blk, + uint32_t sector_len, + int nb_mappings, int width, + uint16_t id0, uint16_t id1, + uint16_t id2, uint16_t id3, + uint16_t unlock_addr0, + uint16_t unlock_addr1, + int be) +{ + DeviceState *dev = qdev_new(TYPE_PFLASH_CFI02); + + if (blk) { + qdev_prop_set_drive(dev, "drive", blk); + } + assert(QEMU_IS_ALIGNED(size, sector_len)); + qdev_prop_set_uint32(dev, "num-blocks", size / sector_len); + qdev_prop_set_uint32(dev, "sector-length", sector_len); + qdev_prop_set_uint8(dev, "width", width); + qdev_prop_set_uint8(dev, "mappings", nb_mappings); + qdev_prop_set_uint8(dev, "big-endian", !!be); + qdev_prop_set_uint16(dev, "id0", id0); + qdev_prop_set_uint16(dev, "id1", id1); + qdev_prop_set_uint16(dev, "id2", id2); + qdev_prop_set_uint16(dev, "id3", id3); + qdev_prop_set_uint16(dev, "unlock-addr0", unlock_addr0); + qdev_prop_set_uint16(dev, "unlock-addr1", unlock_addr1); + qdev_prop_set_string(dev, "name", name); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + return PFLASH_CFI02(dev); +} diff --git a/hw/block/swim.c b/hw/block/swim.c new file mode 100644 index 000000000..333da08ce --- /dev/null +++ b/hw/block/swim.c @@ -0,0 +1,491 @@ +/* + * QEMU Macintosh floppy disk controller emulator (SWIM) + * + * Copyright (c) 2014-2018 Laurent Vivier + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Only the basic support: it allows to switch from IWM (Integrated WOZ + * Machine) mode to the SWIM mode and makes the linux driver happy. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "sysemu/block-backend.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/block/block.h" +#include "hw/block/swim.h" +#include "hw/qdev-properties.h" + +/* IWM registers */ + +#define IWM_PH0L 0 +#define IWM_PH0H 1 +#define IWM_PH1L 2 +#define IWM_PH1H 3 +#define IWM_PH2L 4 +#define IWM_PH2H 5 +#define IWM_PH3L 6 +#define IWM_PH3H 7 +#define IWM_MTROFF 8 +#define IWM_MTRON 9 +#define IWM_INTDRIVE 10 +#define IWM_EXTDRIVE 11 +#define IWM_Q6L 12 +#define IWM_Q6H 13 +#define IWM_Q7L 14 +#define IWM_Q7H 15 + +/* SWIM registers */ + +#define SWIM_WRITE_DATA 0 +#define SWIM_WRITE_MARK 1 +#define SWIM_WRITE_CRC 2 +#define SWIM_WRITE_PARAMETER 3 +#define SWIM_WRITE_PHASE 4 +#define SWIM_WRITE_SETUP 5 +#define SWIM_WRITE_MODE0 6 +#define SWIM_WRITE_MODE1 7 + +#define SWIM_READ_DATA 8 +#define SWIM_READ_MARK 9 +#define SWIM_READ_ERROR 10 +#define SWIM_READ_PARAMETER 11 +#define SWIM_READ_PHASE 12 +#define SWIM_READ_SETUP 13 +#define SWIM_READ_STATUS 14 +#define SWIM_READ_HANDSHAKE 15 + +#define REG_SHIFT 9 + +#define SWIM_MODE_IWM 0 +#define SWIM_MODE_SWIM 1 + +/* bits in phase register */ + +#define SWIM_SEEK_NEGATIVE 0x074 +#define SWIM_STEP 0x071 +#define SWIM_MOTOR_ON 0x072 +#define SWIM_MOTOR_OFF 0x076 +#define SWIM_INDEX 0x073 +#define SWIM_EJECT 0x077 +#define SWIM_SETMFM 0x171 +#define SWIM_SETGCR 0x175 +#define SWIM_RELAX 0x033 +#define SWIM_LSTRB 0x008 +#define SWIM_CA_MASK 0x077 + +/* Select values for swim_select and swim_readbit */ + +#define SWIM_READ_DATA_0 0x074 +#define SWIM_TWOMEG_DRIVE 0x075 +#define SWIM_SINGLE_SIDED 0x076 +#define SWIM_DRIVE_PRESENT 0x077 +#define SWIM_DISK_IN 0x170 +#define SWIM_WRITE_PROT 0x171 +#define SWIM_TRACK_ZERO 0x172 +#define SWIM_TACHO 0x173 +#define SWIM_READ_DATA_1 0x174 +#define SWIM_MFM_MODE 0x175 +#define SWIM_SEEK_COMPLETE 0x176 +#define SWIM_ONEMEG_MEDIA 0x177 + +/* Bits in handshake register */ + +#define SWIM_MARK_BYTE 0x01 +#define SWIM_CRC_ZERO 0x02 +#define SWIM_RDDATA 0x04 +#define SWIM_SENSE 0x08 +#define SWIM_MOTEN 0x10 +#define SWIM_ERROR 0x20 +#define SWIM_DAT2BYTE 0x40 +#define SWIM_DAT1BYTE 0x80 + +/* bits in setup register */ + +#define SWIM_S_INV_WDATA 0x01 +#define SWIM_S_3_5_SELECT 0x02 +#define SWIM_S_GCR 0x04 +#define SWIM_S_FCLK_DIV2 0x08 +#define SWIM_S_ERROR_CORR 0x10 +#define SWIM_S_IBM_DRIVE 0x20 +#define SWIM_S_GCR_WRITE 0x40 +#define SWIM_S_TIMEOUT 0x80 + +/* bits in mode register */ + +#define SWIM_CLFIFO 0x01 +#define SWIM_ENBL1 0x02 +#define SWIM_ENBL2 0x04 +#define SWIM_ACTION 0x08 +#define SWIM_WRITE_MODE 0x10 +#define SWIM_HEDSEL 0x20 +#define SWIM_MOTON 0x80 + +static void fd_recalibrate(FDrive *drive) +{ +} + +static void swim_change_cb(void *opaque, bool load, Error **errp) +{ + FDrive *drive = opaque; + + if (!load) { + blk_set_perm(drive->blk, 0, BLK_PERM_ALL, &error_abort); + } else { + if (!blkconf_apply_backend_options(drive->conf, + !blk_supports_write_perm(drive->blk), + false, errp)) { + return; + } + } +} + +static const BlockDevOps swim_block_ops = { + .change_media_cb = swim_change_cb, +}; + +static Property swim_drive_properties[] = { + DEFINE_PROP_INT32("unit", SWIMDrive, unit, -1), + DEFINE_BLOCK_PROPERTIES(SWIMDrive, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void swim_drive_realize(DeviceState *qdev, Error **errp) +{ + SWIMDrive *dev = SWIM_DRIVE(qdev); + SWIMBus *bus = SWIM_BUS(qdev->parent_bus); + FDrive *drive; + int ret; + + if (dev->unit == -1) { + for (dev->unit = 0; dev->unit < SWIM_MAX_FD; dev->unit++) { + drive = &bus->ctrl->drives[dev->unit]; + if (!drive->blk) { + break; + } + } + } + + if (dev->unit >= SWIM_MAX_FD) { + error_setg(errp, "Can't create floppy unit %d, bus supports " + "only %d units", dev->unit, SWIM_MAX_FD); + return; + } + + drive = &bus->ctrl->drives[dev->unit]; + if (drive->blk) { + error_setg(errp, "Floppy unit %d is in use", dev->unit); + return; + } + + if (!dev->conf.blk) { + /* Anonymous BlockBackend for an empty drive */ + dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); + ret = blk_attach_dev(dev->conf.blk, qdev); + assert(ret == 0); + } + + if (!blkconf_blocksizes(&dev->conf, errp)) { + return; + } + + if (dev->conf.logical_block_size != 512 || + dev->conf.physical_block_size != 512) + { + error_setg(errp, "Physical and logical block size must " + "be 512 for floppy"); + return; + } + + /* + * rerror/werror aren't supported by fdc and therefore not even registered + * with qdev. So set the defaults manually before they are used in + * blkconf_apply_backend_options(). + */ + dev->conf.rerror = BLOCKDEV_ON_ERROR_AUTO; + dev->conf.werror = BLOCKDEV_ON_ERROR_AUTO; + + if (!blkconf_apply_backend_options(&dev->conf, + !blk_supports_write_perm(dev->conf.blk), + false, errp)) { + return; + } + + /* + * 'enospc' is the default for -drive, 'report' is what blk_new() gives us + * for empty drives. + */ + if (blk_get_on_error(dev->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC && + blk_get_on_error(dev->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) { + error_setg(errp, "fdc doesn't support drive option werror"); + return; + } + if (blk_get_on_error(dev->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { + error_setg(errp, "fdc doesn't support drive option rerror"); + return; + } + + drive->conf = &dev->conf; + drive->blk = dev->conf.blk; + drive->swimctrl = bus->ctrl; + + blk_set_dev_ops(drive->blk, &swim_block_ops, drive); +} + +static void swim_drive_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + k->realize = swim_drive_realize; + set_bit(DEVICE_CATEGORY_STORAGE, k->categories); + k->bus_type = TYPE_SWIM_BUS; + device_class_set_props(k, swim_drive_properties); + k->desc = "virtual SWIM drive"; +} + +static const TypeInfo swim_drive_info = { + .name = TYPE_SWIM_DRIVE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SWIMDrive), + .class_init = swim_drive_class_init, +}; + +static const TypeInfo swim_bus_info = { + .name = TYPE_SWIM_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(SWIMBus), +}; + +static void iwmctrl_write(void *opaque, hwaddr reg, uint64_t value, + unsigned size) +{ + SWIMCtrl *swimctrl = opaque; + + reg >>= REG_SHIFT; + + swimctrl->regs[reg >> 1] = reg & 1; + + if (swimctrl->regs[IWM_Q6] && + swimctrl->regs[IWM_Q7]) { + if (swimctrl->regs[IWM_MTR]) { + /* data register */ + swimctrl->iwm_data = value; + } else { + /* mode register */ + swimctrl->iwm_mode = value; + /* detect sequence to switch from IWM mode to SWIM mode */ + switch (swimctrl->iwm_switch) { + case 0: + if (value == 0x57) { + swimctrl->iwm_switch++; + } + break; + case 1: + if (value == 0x17) { + swimctrl->iwm_switch++; + } + break; + case 2: + if (value == 0x57) { + swimctrl->iwm_switch++; + } + break; + case 3: + if (value == 0x57) { + swimctrl->mode = SWIM_MODE_SWIM; + swimctrl->iwm_switch = 0; + } + break; + } + } + } +} + +static uint64_t iwmctrl_read(void *opaque, hwaddr reg, unsigned size) +{ + SWIMCtrl *swimctrl = opaque; + + reg >>= REG_SHIFT; + + swimctrl->regs[reg >> 1] = reg & 1; + + return 0; +} + +static void swimctrl_write(void *opaque, hwaddr reg, uint64_t value, + unsigned size) +{ + SWIMCtrl *swimctrl = opaque; + + if (swimctrl->mode == SWIM_MODE_IWM) { + iwmctrl_write(opaque, reg, value, size); + return; + } + + reg >>= REG_SHIFT; + + switch (reg) { + case SWIM_WRITE_PHASE: + swimctrl->swim_phase = value; + break; + case SWIM_WRITE_MODE0: + swimctrl->swim_mode &= ~value; + break; + case SWIM_WRITE_MODE1: + swimctrl->swim_mode |= value; + break; + case SWIM_WRITE_DATA: + case SWIM_WRITE_MARK: + case SWIM_WRITE_CRC: + case SWIM_WRITE_PARAMETER: + case SWIM_WRITE_SETUP: + break; + } +} + +static uint64_t swimctrl_read(void *opaque, hwaddr reg, unsigned size) +{ + SWIMCtrl *swimctrl = opaque; + uint32_t value = 0; + + if (swimctrl->mode == SWIM_MODE_IWM) { + return iwmctrl_read(opaque, reg, size); + } + + reg >>= REG_SHIFT; + + switch (reg) { + case SWIM_READ_PHASE: + value = swimctrl->swim_phase; + break; + case SWIM_READ_HANDSHAKE: + if (swimctrl->swim_phase == SWIM_DRIVE_PRESENT) { + /* always answer "no drive present" */ + value = SWIM_SENSE; + } + break; + case SWIM_READ_DATA: + case SWIM_READ_MARK: + case SWIM_READ_ERROR: + case SWIM_READ_PARAMETER: + case SWIM_READ_SETUP: + case SWIM_READ_STATUS: + break; + } + + return value; +} + +static const MemoryRegionOps swimctrl_mem_ops = { + .write = swimctrl_write, + .read = swimctrl_read, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void sysbus_swim_reset(DeviceState *d) +{ + Swim *sys = SWIM(d); + SWIMCtrl *ctrl = &sys->ctrl; + int i; + + ctrl->mode = 0; + ctrl->iwm_switch = 0; + for (i = 0; i < 8; i++) { + ctrl->regs[i] = 0; + } + ctrl->iwm_data = 0; + ctrl->iwm_mode = 0; + ctrl->swim_phase = 0; + ctrl->swim_mode = 0; + for (i = 0; i < SWIM_MAX_FD; i++) { + fd_recalibrate(&ctrl->drives[i]); + } +} + +static void sysbus_swim_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + Swim *sbs = SWIM(obj); + SWIMCtrl *swimctrl = &sbs->ctrl; + + memory_region_init_io(&swimctrl->iomem, obj, &swimctrl_mem_ops, swimctrl, + "swim", 0x2000); + sysbus_init_mmio(sbd, &swimctrl->iomem); +} + +static void sysbus_swim_realize(DeviceState *dev, Error **errp) +{ + Swim *sys = SWIM(dev); + SWIMCtrl *swimctrl = &sys->ctrl; + + qbus_init(&swimctrl->bus, sizeof(SWIMBus), TYPE_SWIM_BUS, dev, NULL); + swimctrl->bus.ctrl = swimctrl; +} + +static const VMStateDescription vmstate_fdrive = { + .name = "fdrive", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_swim = { + .name = "swim", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32(mode, SWIMCtrl), + /* IWM mode */ + VMSTATE_INT32(iwm_switch, SWIMCtrl), + VMSTATE_UINT16_ARRAY(regs, SWIMCtrl, 8), + VMSTATE_UINT8(iwm_data, SWIMCtrl), + VMSTATE_UINT8(iwm_mode, SWIMCtrl), + /* SWIM mode */ + VMSTATE_UINT8(swim_phase, SWIMCtrl), + VMSTATE_UINT8(swim_mode, SWIMCtrl), + /* Drives */ + VMSTATE_STRUCT_ARRAY(drives, SWIMCtrl, SWIM_MAX_FD, 1, + vmstate_fdrive, FDrive), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_sysbus_swim = { + .name = "SWIM", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(ctrl, Swim, 0, vmstate_swim, SWIMCtrl), + VMSTATE_END_OF_LIST() + } +}; + +static void sysbus_swim_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = sysbus_swim_realize; + dc->reset = sysbus_swim_reset; + dc->vmsd = &vmstate_sysbus_swim; +} + +static const TypeInfo sysbus_swim_info = { + .name = TYPE_SWIM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Swim), + .instance_init = sysbus_swim_init, + .class_init = sysbus_swim_class_init, +}; + +static void swim_register_types(void) +{ + type_register_static(&sysbus_swim_info); + type_register_static(&swim_bus_info); + type_register_static(&swim_drive_info); +} + +type_init(swim_register_types) diff --git a/hw/block/tc58128.c b/hw/block/tc58128.c new file mode 100644 index 000000000..bfc27ad89 --- /dev/null +++ b/hw/block/tc58128.c @@ -0,0 +1,208 @@ +/* + * TC58128 NAND EEPROM emulation + * + * Copyright (c) 2005 Samuel Tardieu + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * SPDX-License-Identifier: MIT + */ +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/sh4/sh.h" +#include "hw/loader.h" +#include "sysemu/qtest.h" +#include "qemu/error-report.h" + +#define CE1 0x0100 +#define CE2 0x0200 +#define RE 0x0400 +#define WE 0x0800 +#define ALE 0x1000 +#define CLE 0x2000 +#define RDY1 0x4000 +#define RDY2 0x8000 +#define RDY(n) ((n) == 0 ? RDY1 : RDY2) + +typedef enum { WAIT, READ1, READ2, READ3 } state_t; + +typedef struct { + uint8_t *flash_contents; + state_t state; + uint32_t address; + uint8_t address_cycle; +} tc58128_dev; + +static tc58128_dev tc58128_devs[2]; + +#define FLASH_SIZE (16 * MiB) + +static void init_dev(tc58128_dev * dev, const char *filename) +{ + int ret, blocks; + + dev->state = WAIT; + dev->flash_contents = g_malloc(FLASH_SIZE); + memset(dev->flash_contents, 0xff, FLASH_SIZE); + if (filename) { + /* Load flash image skipping the first block */ + ret = load_image_size(filename, dev->flash_contents + 528 * 32, + FLASH_SIZE - 528 * 32); + if (ret < 0) { + if (!qtest_enabled()) { + error_report("Could not load flash image %s", filename); + exit(1); + } + } else { + /* Build first block with number of blocks */ + blocks = DIV_ROUND_UP(ret, 528 * 32); + dev->flash_contents[0] = blocks & 0xff; + dev->flash_contents[1] = (blocks >> 8) & 0xff; + dev->flash_contents[2] = (blocks >> 16) & 0xff; + dev->flash_contents[3] = (blocks >> 24) & 0xff; + fprintf(stderr, "loaded %d bytes for %s into flash\n", ret, + filename); + } + } +} + +static void handle_command(tc58128_dev * dev, uint8_t command) +{ + switch (command) { + case 0xff: + fprintf(stderr, "reset flash device\n"); + dev->state = WAIT; + break; + case 0x00: + fprintf(stderr, "read mode 1\n"); + dev->state = READ1; + dev->address_cycle = 0; + break; + case 0x01: + fprintf(stderr, "read mode 2\n"); + dev->state = READ2; + dev->address_cycle = 0; + break; + case 0x50: + fprintf(stderr, "read mode 3\n"); + dev->state = READ3; + dev->address_cycle = 0; + break; + default: + fprintf(stderr, "unknown flash command 0x%02x\n", command); + abort(); + } +} + +static void handle_address(tc58128_dev * dev, uint8_t data) +{ + switch (dev->state) { + case READ1: + case READ2: + case READ3: + switch (dev->address_cycle) { + case 0: + dev->address = data; + if (dev->state == READ2) + dev->address |= 0x100; + else if (dev->state == READ3) + dev->address |= 0x200; + break; + case 1: + dev->address += data * 528 * 0x100; + break; + case 2: + dev->address += data * 528; + fprintf(stderr, "address pointer in flash: 0x%08x\n", + dev->address); + break; + default: + /* Invalid data */ + abort(); + } + dev->address_cycle++; + break; + default: + abort(); + } +} + +static uint8_t handle_read(tc58128_dev * dev) +{ +#if 0 + if (dev->address % 0x100000 == 0) + fprintf(stderr, "reading flash at address 0x%08x\n", dev->address); +#endif + return dev->flash_contents[dev->address++]; +} + +/* We never mark the device as busy, so interrupts cannot be triggered + XXXXX */ + +static int tc58128_cb(uint16_t porta, uint16_t portb, + uint16_t * periph_pdtra, uint16_t * periph_portadir, + uint16_t * periph_pdtrb, uint16_t * periph_portbdir) +{ + int dev; + + if ((porta & CE1) == 0) + dev = 0; + else if ((porta & CE2) == 0) + dev = 1; + else + return 0; /* No device selected */ + + if ((porta & RE) && (porta & WE)) { + /* Nothing to do, assert ready and return to input state */ + *periph_portadir &= 0xff00; + *periph_portadir |= RDY(dev); + *periph_pdtra |= RDY(dev); + return 1; + } + + if (porta & CLE) { + /* Command */ + assert((porta & WE) == 0); + handle_command(&tc58128_devs[dev], porta & 0x00ff); + } else if (porta & ALE) { + assert((porta & WE) == 0); + handle_address(&tc58128_devs[dev], porta & 0x00ff); + } else if ((porta & RE) == 0) { + *periph_portadir |= 0x00ff; + *periph_pdtra &= 0xff00; + *periph_pdtra |= handle_read(&tc58128_devs[dev]); + } else { + abort(); + } + return 1; +} + +static sh7750_io_device tc58128 = { + RE | WE, /* Port A triggers */ + 0, /* Port B triggers */ + tc58128_cb /* Callback */ +}; + +int tc58128_init(struct SH7750State *s, const char *zone1, const char *zone2) +{ + init_dev(&tc58128_devs[0], zone1); + init_dev(&tc58128_devs[1], zone2); + return sh7750_register_io_device(s, &tc58128); +} diff --git a/hw/block/trace-events b/hw/block/trace-events new file mode 100644 index 000000000..d86b53520 --- /dev/null +++ b/hw/block/trace-events @@ -0,0 +1,84 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# fdc.c +fdc_ioport_read(uint8_t reg, uint8_t value) "read reg 0x%02x val 0x%02x" +fdc_ioport_write(uint8_t reg, uint8_t value) "write reg 0x%02x val 0x%02x" + +# fdc-sysbus.c +fdctrl_tc_pulse(int level) "TC pulse: %u" + +# pflash_cfi01.c +# pflash_cfi02.c +pflash_chip_erase_invalid(const char *name, uint64_t offset) "%s: chip erase: invalid address 0x%" PRIx64 +pflash_chip_erase_start(const char *name) "%s: start chip erase" +pflash_data_read(const char *name, uint64_t offset, unsigned size, uint32_t value) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x" +pflash_data_write(const char *name, uint64_t offset, unsigned size, uint32_t value, uint64_t counter) "%s: data offset:0x%04"PRIx64" size:%u value:0x%04x counter:0x%016"PRIx64 +pflash_device_id(const char *name, uint16_t id) "%s: read device ID: 0x%04x" +pflash_device_info(const char *name, uint64_t offset) "%s: read device information offset:0x%04" PRIx64 +pflash_erase_complete(const char *name) "%s: sector erase complete" +pflash_erase_timeout(const char *name, int count) "%s: erase timeout fired; erasing %d sectors" +pflash_io_read(const char *name, uint64_t offset, unsigned int size, uint32_t value, uint8_t cmd, uint8_t wcycle) "%s: offset:0x%04" PRIx64 " size:%u value:0x%04x cmd:0x%02x wcycle:%u" +pflash_io_write(const char *name, uint64_t offset, unsigned int size, uint32_t value, uint8_t wcycle) "%s: offset:0x%04"PRIx64" size:%u value:0x%04x wcycle:%u" +pflash_manufacturer_id(const char *name, uint16_t id) "%s: read manufacturer ID: 0x%04x" +pflash_mode_read_array(const char *name) "%s: read array mode" +pflash_postload_cb(const char *name) "%s: updating bdrv" +pflash_read_done(const char *name, uint64_t offset, uint64_t ret) "%s: ID:0x%" PRIx64 " ret:0x%" PRIx64 +pflash_read_status(const char *name, uint32_t ret) "%s: status:0x%x" +pflash_read_unknown_state(const char *name, uint8_t cmd) "%s: unknown command state:0x%x" +pflash_reset(const char *name) "%s: reset" +pflash_sector_erase_start(const char *name, int width1, uint64_t start, int width2, uint64_t end) "%s: start sector erase at: 0x%0*" PRIx64 "-0x%0*" PRIx64 +pflash_timer_expired(const char *name, uint8_t cmd) "%s: command 0x%02x done" +pflash_unlock0_failed(const char *name, uint64_t offset, uint8_t cmd, uint16_t addr0) "%s: unlock0 failed 0x%" PRIx64 " 0x%02x 0x%04x" +pflash_unlock1_failed(const char *name, uint64_t offset, uint8_t cmd) "%s: unlock0 failed 0x%" PRIx64 " 0x%02x" +pflash_unsupported_device_configuration(const char *name, uint8_t width, uint8_t max) "%s: unsupported device configuration: device_width:%d max_device_width:%d" +pflash_write(const char *name, const char *str) "%s: %s" +pflash_write_block(const char *name, uint32_t value) "%s: block write: bytes:0x%x" +pflash_write_block_erase(const char *name, uint64_t offset, uint64_t len) "%s: block erase offset:0x%" PRIx64 " bytes:0x%" PRIx64 +pflash_write_failed(const char *name, uint64_t offset, uint8_t cmd) "%s: command failed 0x%" PRIx64 " 0x%02x" +pflash_write_invalid(const char *name, uint8_t cmd) "%s: invalid write for command 0x%02x" +pflash_write_invalid_command(const char *name, uint8_t cmd) "%s: invalid command 0x%02x (wc 5)" +pflash_write_invalid_state(const char *name, uint8_t cmd, int wc) "%s: invalid command state 0x%02x (wc %d)" +pflash_write_start(const char *name, uint8_t cmd) "%s: starting command 0x%02x" +pflash_write_unknown(const char *name, uint8_t cmd) "%s: unknown command 0x%02x" + +# virtio-blk.c +virtio_blk_req_complete(void *vdev, void *req, int status) "vdev %p req %p status %d" +virtio_blk_rw_complete(void *vdev, void *req, int ret) "vdev %p req %p ret %d" +virtio_blk_handle_write(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu" +virtio_blk_handle_read(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu" +virtio_blk_submit_multireq(void *vdev, void *mrb, int start, int num_reqs, uint64_t offset, size_t size, bool is_write) "vdev %p mrb %p start %d num_reqs %d offset %"PRIu64" size %zu is_write %d" + +# hd-geometry.c +hd_geometry_lchs_guess(void *blk, int cyls, int heads, int secs) "blk %p LCHS %d %d %d" +hd_geometry_guess(void *blk, uint32_t cyls, uint32_t heads, uint32_t secs, int trans) "blk %p CHS %u %u %u trans %d" + +# xen-block.c +xen_block_realize(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u" +xen_block_connect(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u" +xen_block_disconnect(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u" +xen_block_unrealize(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u" +xen_block_size(const char *type, uint32_t disk, uint32_t partition, int64_t sectors) "%s d%up%u %"PRIi64 +xen_disk_realize(void) "" +xen_disk_unrealize(void) "" +xen_cdrom_realize(void) "" +xen_cdrom_unrealize(void) "" +xen_block_blockdev_add(char *str) "%s" +xen_block_blockdev_del(const char *node_name) "%s" +xen_block_device_create(unsigned int number) "%u" +xen_block_device_destroy(unsigned int number) "%u" + +# m25p80.c +m25p80_flash_erase(void *s, int offset, uint32_t len) "[%p] offset = 0x%"PRIx32", len = %u" +m25p80_programming_zero_to_one(void *s, uint32_t addr, uint8_t prev, uint8_t data) "[%p] programming zero to one! addr=0x%"PRIx32" 0x%"PRIx8" -> 0x%"PRIx8 +m25p80_reset_done(void *s) "[%p] Reset done." +m25p80_command_decoded(void *s, uint32_t cmd) "[%p] new command:0x%"PRIx32 +m25p80_complete_collecting(void *s, uint32_t cmd, int n, uint8_t ear, uint32_t cur_addr) "[%p] decode cmd: 0x%"PRIx32" len %d ear 0x%"PRIx8" addr 0x%"PRIx32 +m25p80_populated_jedec(void *s) "[%p] populated jedec code" +m25p80_chip_erase(void *s) "[%p] chip erase" +m25p80_select(void *s, const char *what) "[%p] %sselect" +m25p80_page_program(void *s, uint32_t addr, uint8_t tx) "[%p] page program cur_addr=0x%"PRIx32" data=0x%"PRIx8 +m25p80_transfer(void *s, uint8_t state, uint32_t len, uint8_t needed, uint32_t pos, uint32_t cur_addr, uint8_t t) "[%p] Transfer state 0x%"PRIx8" len 0x%"PRIx32" needed 0x%"PRIx8" pos 0x%"PRIx32" addr 0x%"PRIx32" tx 0x%"PRIx8 +m25p80_read_byte(void *s, uint32_t addr, uint8_t v) "[%p] Read byte 0x%"PRIx32"=0x%"PRIx8 +m25p80_read_data(void *s, uint32_t pos, uint8_t v) "[%p] Read data 0x%"PRIx32"=0x%"PRIx8 +m25p80_binding(void *s) "[%p] Binding to IF_MTD drive" +m25p80_binding_no_bdrv(void *s) "[%p] No BDRV - binding to RAM" diff --git a/hw/block/trace.h b/hw/block/trace.h new file mode 100644 index 000000000..cde210ae6 --- /dev/null +++ b/hw/block/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_block.h" diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c new file mode 100644 index 000000000..ba13cb87e --- /dev/null +++ b/hw/block/vhost-user-blk.c @@ -0,0 +1,620 @@ +/* + * vhost-user-blk host device + * + * Copyright(C) 2017 Intel Corporation. + * + * Authors: + * Changpeng Liu + * + * Largely based on the "vhost-user-scsi.c" and "vhost-scsi.c" implemented by: + * Felipe Franciosi + * Stefan Hajnoczi + * Nicholas Bellinger + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/cutils.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-user-blk.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" + +#define REALIZE_CONNECTION_RETRIES 3 + +static const int user_feature_bits[] = { + VIRTIO_BLK_F_SIZE_MAX, + VIRTIO_BLK_F_SEG_MAX, + VIRTIO_BLK_F_GEOMETRY, + VIRTIO_BLK_F_BLK_SIZE, + VIRTIO_BLK_F_TOPOLOGY, + VIRTIO_BLK_F_MQ, + VIRTIO_BLK_F_RO, + VIRTIO_BLK_F_FLUSH, + VIRTIO_BLK_F_CONFIG_WCE, + VIRTIO_BLK_F_DISCARD, + VIRTIO_BLK_F_WRITE_ZEROES, + VIRTIO_F_VERSION_1, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_F_RING_PACKED, + VIRTIO_F_IOMMU_PLATFORM, + VHOST_INVALID_FEATURE_BIT +}; + +static void vhost_user_blk_event(void *opaque, QEMUChrEvent event); + +static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + + /* Our num_queues overrides the device backend */ + virtio_stw_p(vdev, &s->blkcfg.num_queues, s->num_queues); + + memcpy(config, &s->blkcfg, sizeof(struct virtio_blk_config)); +} + +static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + struct virtio_blk_config *blkcfg = (struct virtio_blk_config *)config; + int ret; + + if (blkcfg->wce == s->blkcfg.wce) { + return; + } + + ret = vhost_dev_set_config(&s->dev, &blkcfg->wce, + offsetof(struct virtio_blk_config, wce), + sizeof(blkcfg->wce), + VHOST_SET_CONFIG_TYPE_MASTER); + if (ret) { + error_report("set device config space failed"); + return; + } + + s->blkcfg.wce = blkcfg->wce; +} + +static int vhost_user_blk_handle_config_change(struct vhost_dev *dev) +{ + int ret; + struct virtio_blk_config blkcfg; + VHostUserBlk *s = VHOST_USER_BLK(dev->vdev); + Error *local_err = NULL; + + ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg, + sizeof(struct virtio_blk_config), + &local_err); + if (ret < 0) { + error_report_err(local_err); + return -1; + } + + /* valid for resize only */ + if (blkcfg.capacity != s->blkcfg.capacity) { + s->blkcfg.capacity = blkcfg.capacity; + memcpy(dev->vdev->config, &s->blkcfg, sizeof(struct virtio_blk_config)); + virtio_notify_config(dev->vdev); + } + + return 0; +} + +const VhostDevConfigOps blk_ops = { + .vhost_dev_config_notifier = vhost_user_blk_handle_config_change, +}; + +static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int i, ret; + + if (!k->set_guest_notifiers) { + error_setg(errp, "binding does not support guest notifiers"); + return -ENOSYS; + } + + ret = vhost_dev_enable_notifiers(&s->dev, vdev); + if (ret < 0) { + error_setg_errno(errp, -ret, "Error enabling host notifiers"); + return ret; + } + + ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, true); + if (ret < 0) { + error_setg_errno(errp, -ret, "Error binding guest notifier"); + goto err_host_notifiers; + } + + s->dev.acked_features = vdev->guest_features; + + ret = vhost_dev_prepare_inflight(&s->dev, vdev); + if (ret < 0) { + error_setg_errno(errp, -ret, "Error setting inflight format"); + goto err_guest_notifiers; + } + + if (!s->inflight->addr) { + ret = vhost_dev_get_inflight(&s->dev, s->queue_size, s->inflight); + if (ret < 0) { + error_setg_errno(errp, -ret, "Error getting inflight"); + goto err_guest_notifiers; + } + } + + ret = vhost_dev_set_inflight(&s->dev, s->inflight); + if (ret < 0) { + error_setg_errno(errp, -ret, "Error setting inflight"); + goto err_guest_notifiers; + } + + ret = vhost_dev_start(&s->dev, vdev); + if (ret < 0) { + error_setg_errno(errp, -ret, "Error starting vhost"); + goto err_guest_notifiers; + } + s->started_vu = true; + + /* guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < s->dev.nvqs; i++) { + vhost_virtqueue_mask(&s->dev, vdev, i, false); + } + + return ret; + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&s->dev, vdev); + return ret; +} + +static void vhost_user_blk_stop(VirtIODevice *vdev) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + + if (!s->started_vu) { + return; + } + s->started_vu = false; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(&s->dev, vdev); + + ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(&s->dev, vdev); +} + +static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + bool should_start = virtio_device_started(vdev, status); + Error *local_err = NULL; + int ret; + + if (!vdev->vm_running) { + should_start = false; + } + + if (!s->connected) { + return; + } + + if (s->dev.started == should_start) { + return; + } + + if (should_start) { + ret = vhost_user_blk_start(vdev, &local_err); + if (ret < 0) { + error_reportf_err(local_err, "vhost-user-blk: vhost start failed: "); + qemu_chr_fe_disconnect(&s->chardev); + } + } else { + vhost_user_blk_stop(vdev); + } + +} + +static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev, + uint64_t features, + Error **errp) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + + /* Turn on pre-defined features */ + virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX); + virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY); + virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY); + virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE); + virtio_add_feature(&features, VIRTIO_BLK_F_FLUSH); + virtio_add_feature(&features, VIRTIO_BLK_F_RO); + virtio_add_feature(&features, VIRTIO_BLK_F_DISCARD); + virtio_add_feature(&features, VIRTIO_BLK_F_WRITE_ZEROES); + + if (s->config_wce) { + virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE); + } + if (s->num_queues > 1) { + virtio_add_feature(&features, VIRTIO_BLK_F_MQ); + } + + return vhost_get_features(&s->dev, user_feature_bits, features); +} + +static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + Error *local_err = NULL; + int i, ret; + + if (!vdev->start_on_kick) { + return; + } + + if (!s->connected) { + return; + } + + if (s->dev.started) { + return; + } + + /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start + * vhost here instead of waiting for .set_status(). + */ + ret = vhost_user_blk_start(vdev, &local_err); + if (ret < 0) { + error_reportf_err(local_err, "vhost-user-blk: vhost start failed: "); + qemu_chr_fe_disconnect(&s->chardev); + return; + } + + /* Kick right away to begin processing requests already in vring */ + for (i = 0; i < s->dev.nvqs; i++) { + VirtQueue *kick_vq = virtio_get_queue(vdev, i); + + if (!virtio_queue_get_desc_addr(vdev, i)) { + continue; + } + event_notifier_set(virtio_queue_get_host_notifier(kick_vq)); + } +} + +static void vhost_user_blk_reset(VirtIODevice *vdev) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + + vhost_dev_free_inflight(s->inflight); +} + +static int vhost_user_blk_connect(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBlk *s = VHOST_USER_BLK(vdev); + int ret = 0; + + if (s->connected) { + return 0; + } + s->connected = true; + + s->dev.num_queues = s->num_queues; + s->dev.nvqs = s->num_queues; + s->dev.vqs = s->vhost_vqs; + s->dev.vq_index = 0; + s->dev.backend_features = 0; + + vhost_dev_set_config_notifier(&s->dev, &blk_ops); + + ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0, + errp); + if (ret < 0) { + return ret; + } + + /* restore vhost state */ + if (virtio_device_started(vdev, vdev->status)) { + ret = vhost_user_blk_start(vdev, errp); + if (ret < 0) { + return ret; + } + } + + return 0; +} + +static void vhost_user_blk_disconnect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBlk *s = VHOST_USER_BLK(vdev); + + if (!s->connected) { + return; + } + s->connected = false; + + vhost_user_blk_stop(vdev); + + vhost_dev_cleanup(&s->dev); +} + +static void vhost_user_blk_chr_closed_bh(void *opaque) +{ + DeviceState *dev = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBlk *s = VHOST_USER_BLK(vdev); + + vhost_user_blk_disconnect(dev); + qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event, + NULL, opaque, NULL, true); +} + +static void vhost_user_blk_event(void *opaque, QEMUChrEvent event) +{ + DeviceState *dev = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBlk *s = VHOST_USER_BLK(vdev); + Error *local_err = NULL; + + switch (event) { + case CHR_EVENT_OPENED: + if (vhost_user_blk_connect(dev, &local_err) < 0) { + error_report_err(local_err); + qemu_chr_fe_disconnect(&s->chardev); + return; + } + break; + case CHR_EVENT_CLOSED: + if (!runstate_check(RUN_STATE_SHUTDOWN)) { + /* + * A close event may happen during a read/write, but vhost + * code assumes the vhost_dev remains setup, so delay the + * stop & clear. + */ + AioContext *ctx = qemu_get_current_aio_context(); + + qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL, + NULL, NULL, false); + aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque); + + /* + * Move vhost device to the stopped state. The vhost-user device + * will be clean up and disconnected in BH. This can be useful in + * the vhost migration code. If disconnect was caught there is an + * option for the general vhost code to get the dev state without + * knowing its type (in this case vhost-user). + */ + s->dev.started = false; + } + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static int vhost_user_blk_realize_connect(VHostUserBlk *s, Error **errp) +{ + DeviceState *dev = &s->parent_obj.parent_obj; + int ret; + + s->connected = false; + + ret = qemu_chr_fe_wait_connected(&s->chardev, errp); + if (ret < 0) { + return ret; + } + + ret = vhost_user_blk_connect(dev, errp); + if (ret < 0) { + qemu_chr_fe_disconnect(&s->chardev); + return ret; + } + assert(s->connected); + + ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg, + sizeof(struct virtio_blk_config), errp); + if (ret < 0) { + qemu_chr_fe_disconnect(&s->chardev); + vhost_dev_cleanup(&s->dev); + return ret; + } + + return 0; +} + +static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBlk *s = VHOST_USER_BLK(vdev); + int retries; + int i, ret; + + if (!s->chardev.chr) { + error_setg(errp, "chardev is mandatory"); + return; + } + + if (s->num_queues == VHOST_USER_BLK_AUTO_NUM_QUEUES) { + s->num_queues = 1; + } + if (!s->num_queues || s->num_queues > VIRTIO_QUEUE_MAX) { + error_setg(errp, "invalid number of IO queues"); + return; + } + + if (!s->queue_size) { + error_setg(errp, "queue size must be non-zero"); + return; + } + if (s->queue_size > VIRTQUEUE_MAX_SIZE) { + error_setg(errp, "queue size must not exceed %d", + VIRTQUEUE_MAX_SIZE); + return; + } + + if (!vhost_user_init(&s->vhost_user, &s->chardev, errp)) { + return; + } + + virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, + sizeof(struct virtio_blk_config)); + + s->virtqs = g_new(VirtQueue *, s->num_queues); + for (i = 0; i < s->num_queues; i++) { + s->virtqs[i] = virtio_add_queue(vdev, s->queue_size, + vhost_user_blk_handle_output); + } + + s->inflight = g_new0(struct vhost_inflight, 1); + s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues); + + retries = REALIZE_CONNECTION_RETRIES; + assert(!*errp); + do { + if (*errp) { + error_prepend(errp, "Reconnecting after error: "); + error_report_err(*errp); + *errp = NULL; + } + ret = vhost_user_blk_realize_connect(s, errp); + } while (ret == -EPROTO && retries--); + + if (ret < 0) { + goto virtio_err; + } + + /* we're fully initialized, now we can operate, so add the handler */ + qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, + vhost_user_blk_event, NULL, (void *)dev, + NULL, true); + return; + +virtio_err: + g_free(s->vhost_vqs); + s->vhost_vqs = NULL; + g_free(s->inflight); + s->inflight = NULL; + for (i = 0; i < s->num_queues; i++) { + virtio_delete_queue(s->virtqs[i]); + } + g_free(s->virtqs); + virtio_cleanup(vdev); + vhost_user_cleanup(&s->vhost_user); +} + +static void vhost_user_blk_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBlk *s = VHOST_USER_BLK(dev); + int i; + + virtio_set_status(vdev, 0); + qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, + NULL, NULL, NULL, false); + vhost_dev_cleanup(&s->dev); + vhost_dev_free_inflight(s->inflight); + g_free(s->vhost_vqs); + s->vhost_vqs = NULL; + g_free(s->inflight); + s->inflight = NULL; + + for (i = 0; i < s->num_queues; i++) { + virtio_delete_queue(s->virtqs[i]); + } + g_free(s->virtqs); + virtio_cleanup(vdev); + vhost_user_cleanup(&s->vhost_user); +} + +static void vhost_user_blk_instance_init(Object *obj) +{ + VHostUserBlk *s = VHOST_USER_BLK(obj); + + device_add_bootindex_property(obj, &s->bootindex, "bootindex", + "/disk@0,0", DEVICE(obj)); +} + +static const VMStateDescription vmstate_vhost_user_blk = { + .name = "vhost-user-blk", + .minimum_version_id = 1, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static Property vhost_user_blk_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserBlk, chardev), + DEFINE_PROP_UINT16("num-queues", VHostUserBlk, num_queues, + VHOST_USER_BLK_AUTO_NUM_QUEUES), + DEFINE_PROP_UINT32("queue-size", VHostUserBlk, queue_size, 128), + DEFINE_PROP_BIT("config-wce", VHostUserBlk, config_wce, 0, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_user_blk_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vhost_user_blk_properties); + dc->vmsd = &vmstate_vhost_user_blk; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + vdc->realize = vhost_user_blk_device_realize; + vdc->unrealize = vhost_user_blk_device_unrealize; + vdc->get_config = vhost_user_blk_update_config; + vdc->set_config = vhost_user_blk_set_config; + vdc->get_features = vhost_user_blk_get_features; + vdc->set_status = vhost_user_blk_set_status; + vdc->reset = vhost_user_blk_reset; +} + +static const TypeInfo vhost_user_blk_info = { + .name = TYPE_VHOST_USER_BLK, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostUserBlk), + .instance_init = vhost_user_blk_instance_init, + .class_init = vhost_user_blk_class_init, +}; + +static void virtio_register_types(void) +{ + type_register_static(&vhost_user_blk_info); +} + +type_init(virtio_register_types) diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c new file mode 100644 index 000000000..f139cd7cc --- /dev/null +++ b/hw/block/virtio-blk.c @@ -0,0 +1,1353 @@ +/* + * Virtio Block Device + * + * Copyright IBM, Corp. 2007 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "trace.h" +#include "hw/block/block.h" +#include "hw/qdev-properties.h" +#include "sysemu/blockdev.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" +#include "hw/virtio/virtio-blk.h" +#include "dataplane/virtio-blk.h" +#include "scsi/constants.h" +#ifdef __linux__ +# include +#endif +#include "hw/virtio/virtio-bus.h" +#include "migration/qemu-file-types.h" +#include "hw/virtio/virtio-access.h" + +/* Config size before the discard support (hide associated config fields) */ +#define VIRTIO_BLK_CFG_SIZE offsetof(struct virtio_blk_config, \ + max_discard_sectors) +/* + * Starting from the discard feature, we can use this array to properly + * set the config size depending on the features enabled. + */ +static const VirtIOFeature feature_sizes[] = { + {.flags = 1ULL << VIRTIO_BLK_F_DISCARD, + .end = endof(struct virtio_blk_config, discard_sector_alignment)}, + {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES, + .end = endof(struct virtio_blk_config, write_zeroes_may_unmap)}, + {} +}; + +static void virtio_blk_set_config_size(VirtIOBlock *s, uint64_t host_features) +{ + s->config_size = MAX(VIRTIO_BLK_CFG_SIZE, + virtio_feature_get_config_size(feature_sizes, host_features)); + + assert(s->config_size <= sizeof(struct virtio_blk_config)); +} + +static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, + VirtIOBlockReq *req) +{ + req->dev = s; + req->vq = vq; + req->qiov.size = 0; + req->in_len = 0; + req->next = NULL; + req->mr_next = NULL; +} + +static void virtio_blk_free_request(VirtIOBlockReq *req) +{ + g_free(req); +} + +static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) +{ + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + trace_virtio_blk_req_complete(vdev, req, status); + + stb_p(&req->in->status, status); + iov_discard_undo(&req->inhdr_undo); + iov_discard_undo(&req->outhdr_undo); + virtqueue_push(req->vq, &req->elem, req->in_len); + if (s->dataplane_started && !s->dataplane_disabled) { + virtio_blk_data_plane_notify(s->dataplane, req->vq); + } else { + virtio_notify(vdev, req->vq); + } +} + +static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, + bool is_read, bool acct_failed) +{ + VirtIOBlock *s = req->dev; + BlockErrorAction action = blk_get_error_action(s->blk, is_read, error); + + if (action == BLOCK_ERROR_ACTION_STOP) { + /* Break the link as the next request is going to be parsed from the + * ring again. Otherwise we may end up doing a double completion! */ + req->mr_next = NULL; + req->next = s->rq; + s->rq = req; + } else if (action == BLOCK_ERROR_ACTION_REPORT) { + virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); + if (acct_failed) { + block_acct_failed(blk_get_stats(s->blk), &req->acct); + } + virtio_blk_free_request(req); + } + + blk_error_action(s->blk, action, is_read, error); + return action != BLOCK_ERROR_ACTION_IGNORE; +} + +static void virtio_blk_rw_complete(void *opaque, int ret) +{ + VirtIOBlockReq *next = opaque; + VirtIOBlock *s = next->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + while (next) { + VirtIOBlockReq *req = next; + next = req->mr_next; + trace_virtio_blk_rw_complete(vdev, req, ret); + + if (req->qiov.nalloc != -1) { + /* If nalloc is != -1 req->qiov is a local copy of the original + * external iovec. It was allocated in submit_requests to be + * able to merge requests. */ + qemu_iovec_destroy(&req->qiov); + } + + if (ret) { + int p = virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type); + bool is_read = !(p & VIRTIO_BLK_T_OUT); + /* Note that memory may be dirtied on read failure. If the + * virtio request is not completed here, as is the case for + * BLOCK_ERROR_ACTION_STOP, the memory may not be copied + * correctly during live migration. While this is ugly, + * it is acceptable because the device is free to write to + * the memory until the request is completed (which will + * happen on the other side of the migration). + */ + if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) { + continue; + } + } + + virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); + block_acct_done(blk_get_stats(s->blk), &req->acct); + virtio_blk_free_request(req); + } + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); +} + +static void virtio_blk_flush_complete(void *opaque, int ret) +{ + VirtIOBlockReq *req = opaque; + VirtIOBlock *s = req->dev; + + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + if (ret) { + if (virtio_blk_handle_rw_error(req, -ret, 0, true)) { + goto out; + } + } + + virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); + block_acct_done(blk_get_stats(s->blk), &req->acct); + virtio_blk_free_request(req); + +out: + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); +} + +static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) +{ + VirtIOBlockReq *req = opaque; + VirtIOBlock *s = req->dev; + bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) & + ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES; + + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + if (ret) { + if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) { + goto out; + } + } + + virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); + if (is_write_zeroes) { + block_acct_done(blk_get_stats(s->blk), &req->acct); + } + virtio_blk_free_request(req); + +out: + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); +} + +#ifdef __linux__ + +typedef struct { + VirtIOBlockReq *req; + struct sg_io_hdr hdr; +} VirtIOBlockIoctlReq; + +static void virtio_blk_ioctl_complete(void *opaque, int status) +{ + VirtIOBlockIoctlReq *ioctl_req = opaque; + VirtIOBlockReq *req = ioctl_req->req; + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + struct virtio_scsi_inhdr *scsi; + struct sg_io_hdr *hdr; + + scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base; + + if (status) { + status = VIRTIO_BLK_S_UNSUPP; + virtio_stl_p(vdev, &scsi->errors, 255); + goto out; + } + + hdr = &ioctl_req->hdr; + /* + * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi) + * clear the masked_status field [hence status gets cleared too, see + * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED + * status has occurred. However they do set DRIVER_SENSE in driver_status + * field. Also a (sb_len_wr > 0) indicates there is a sense buffer. + */ + if (hdr->status == 0 && hdr->sb_len_wr > 0) { + hdr->status = CHECK_CONDITION; + } + + virtio_stl_p(vdev, &scsi->errors, + hdr->status | (hdr->msg_status << 8) | + (hdr->host_status << 16) | (hdr->driver_status << 24)); + virtio_stl_p(vdev, &scsi->residual, hdr->resid); + virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr); + virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len); + +out: + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + virtio_blk_req_complete(req, status); + virtio_blk_free_request(req); + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); + g_free(ioctl_req); +} + +#endif + +static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq) +{ + VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq)); + + if (req) { + virtio_blk_init_request(s, vq, req); + } + return req; +} + +static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req) +{ + int status = VIRTIO_BLK_S_OK; + struct virtio_scsi_inhdr *scsi = NULL; + VirtIOBlock *blk = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(blk); + VirtQueueElement *elem = &req->elem; + +#ifdef __linux__ + int i; + VirtIOBlockIoctlReq *ioctl_req; + BlockAIOCB *acb; +#endif + + /* + * We require at least one output segment each for the virtio_blk_outhdr + * and the SCSI command block. + * + * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr + * and the sense buffer pointer in the input segments. + */ + if (elem->out_num < 2 || elem->in_num < 3) { + status = VIRTIO_BLK_S_IOERR; + goto fail; + } + + /* + * The scsi inhdr is placed in the second-to-last input segment, just + * before the regular inhdr. + */ + scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base; + + if (!virtio_has_feature(blk->host_features, VIRTIO_BLK_F_SCSI)) { + status = VIRTIO_BLK_S_UNSUPP; + goto fail; + } + + /* + * No support for bidirection commands yet. + */ + if (elem->out_num > 2 && elem->in_num > 3) { + status = VIRTIO_BLK_S_UNSUPP; + goto fail; + } + +#ifdef __linux__ + ioctl_req = g_new0(VirtIOBlockIoctlReq, 1); + ioctl_req->req = req; + ioctl_req->hdr.interface_id = 'S'; + ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len; + ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base; + ioctl_req->hdr.dxfer_len = 0; + + if (elem->out_num > 2) { + /* + * If there are more than the minimally required 2 output segments + * there is write payload starting from the third iovec. + */ + ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV; + ioctl_req->hdr.iovec_count = elem->out_num - 2; + + for (i = 0; i < ioctl_req->hdr.iovec_count; i++) { + ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len; + } + + ioctl_req->hdr.dxferp = elem->out_sg + 2; + + } else if (elem->in_num > 3) { + /* + * If we have more than 3 input segments the guest wants to actually + * read data. + */ + ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV; + ioctl_req->hdr.iovec_count = elem->in_num - 3; + for (i = 0; i < ioctl_req->hdr.iovec_count; i++) { + ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len; + } + + ioctl_req->hdr.dxferp = elem->in_sg; + } else { + /* + * Some SCSI commands don't actually transfer any data. + */ + ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE; + } + + ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base; + ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len; + + acb = blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr, + virtio_blk_ioctl_complete, ioctl_req); + if (!acb) { + g_free(ioctl_req); + status = VIRTIO_BLK_S_UNSUPP; + goto fail; + } + return -EINPROGRESS; +#else + abort(); +#endif + +fail: + /* Just put anything nonzero so that the ioctl fails in the guest. */ + if (scsi) { + virtio_stl_p(vdev, &scsi->errors, 255); + } + return status; +} + +static void virtio_blk_handle_scsi(VirtIOBlockReq *req) +{ + int status; + + status = virtio_blk_handle_scsi_req(req); + if (status != -EINPROGRESS) { + virtio_blk_req_complete(req, status); + virtio_blk_free_request(req); + } +} + +static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb, + int start, int num_reqs, int niov) +{ + QEMUIOVector *qiov = &mrb->reqs[start]->qiov; + int64_t sector_num = mrb->reqs[start]->sector_num; + bool is_write = mrb->is_write; + + if (num_reqs > 1) { + int i; + struct iovec *tmp_iov = qiov->iov; + int tmp_niov = qiov->niov; + + /* mrb->reqs[start]->qiov was initialized from external so we can't + * modify it here. We need to initialize it locally and then add the + * external iovecs. */ + qemu_iovec_init(qiov, niov); + + for (i = 0; i < tmp_niov; i++) { + qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len); + } + + for (i = start + 1; i < start + num_reqs; i++) { + qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0, + mrb->reqs[i]->qiov.size); + mrb->reqs[i - 1]->mr_next = mrb->reqs[i]; + } + + trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev), + mrb, start, num_reqs, + sector_num << BDRV_SECTOR_BITS, + qiov->size, is_write); + block_acct_merge_done(blk_get_stats(blk), + is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ, + num_reqs - 1); + } + + if (is_write) { + blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0, + virtio_blk_rw_complete, mrb->reqs[start]); + } else { + blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0, + virtio_blk_rw_complete, mrb->reqs[start]); + } +} + +static int multireq_compare(const void *a, const void *b) +{ + const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a, + *req2 = *(VirtIOBlockReq **)b; + + /* + * Note that we can't simply subtract sector_num1 from sector_num2 + * here as that could overflow the return value. + */ + if (req1->sector_num > req2->sector_num) { + return 1; + } else if (req1->sector_num < req2->sector_num) { + return -1; + } else { + return 0; + } +} + +static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb) +{ + int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0; + uint32_t max_transfer; + int64_t sector_num = 0; + + if (mrb->num_reqs == 1) { + submit_requests(blk, mrb, 0, 1, -1); + mrb->num_reqs = 0; + return; + } + + max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk); + + qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs), + &multireq_compare); + + for (i = 0; i < mrb->num_reqs; i++) { + VirtIOBlockReq *req = mrb->reqs[i]; + if (num_reqs > 0) { + /* + * NOTE: We cannot merge the requests in below situations: + * 1. requests are not sequential + * 2. merge would exceed maximum number of IOVs + * 3. merge would exceed maximum transfer length of backend device + */ + if (sector_num + nb_sectors != req->sector_num || + niov > blk_get_max_iov(blk) - req->qiov.niov || + req->qiov.size > max_transfer || + nb_sectors > (max_transfer - + req->qiov.size) / BDRV_SECTOR_SIZE) { + submit_requests(blk, mrb, start, num_reqs, niov); + num_reqs = 0; + } + } + + if (num_reqs == 0) { + sector_num = req->sector_num; + nb_sectors = niov = 0; + start = i; + } + + nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE; + niov += req->qiov.niov; + num_reqs++; + } + + submit_requests(blk, mrb, start, num_reqs, niov); + mrb->num_reqs = 0; +} + +static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb) +{ + VirtIOBlock *s = req->dev; + + block_acct_start(blk_get_stats(s->blk), &req->acct, 0, + BLOCK_ACCT_FLUSH); + + /* + * Make sure all outstanding writes are posted to the backing device. + */ + if (mrb->is_write && mrb->num_reqs > 0) { + virtio_blk_submit_multireq(s->blk, mrb); + } + blk_aio_flush(s->blk, virtio_blk_flush_complete, req); +} + +static bool virtio_blk_sect_range_ok(VirtIOBlock *dev, + uint64_t sector, size_t size) +{ + uint64_t nb_sectors = size >> BDRV_SECTOR_BITS; + uint64_t total_sectors; + + if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) { + return false; + } + if (sector & dev->sector_mask) { + return false; + } + if (size % dev->conf.conf.logical_block_size) { + return false; + } + blk_get_geometry(dev->blk, &total_sectors); + if (sector > total_sectors || nb_sectors > total_sectors - sector) { + return false; + } + return true; +} + +static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req, + struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes) +{ + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + uint64_t sector; + uint32_t num_sectors, flags, max_sectors; + uint8_t err_status; + int bytes; + + sector = virtio_ldq_p(vdev, &dwz_hdr->sector); + num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors); + flags = virtio_ldl_p(vdev, &dwz_hdr->flags); + max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors : + s->conf.max_discard_sectors; + + /* + * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check + * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in + * the integer variable. + */ + if (unlikely(num_sectors > max_sectors)) { + err_status = VIRTIO_BLK_S_IOERR; + goto err; + } + + bytes = num_sectors << BDRV_SECTOR_BITS; + + if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) { + err_status = VIRTIO_BLK_S_IOERR; + goto err; + } + + /* + * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard + * and write zeroes commands if any unknown flag is set. + */ + if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { + err_status = VIRTIO_BLK_S_UNSUPP; + goto err; + } + + if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */ + int blk_aio_flags = 0; + + if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { + blk_aio_flags |= BDRV_REQ_MAY_UNMAP; + } + + block_acct_start(blk_get_stats(s->blk), &req->acct, bytes, + BLOCK_ACCT_WRITE); + + blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS, + bytes, blk_aio_flags, + virtio_blk_discard_write_zeroes_complete, req); + } else { /* VIRTIO_BLK_T_DISCARD */ + /* + * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for + * discard commands if the unmap flag is set. + */ + if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { + err_status = VIRTIO_BLK_S_UNSUPP; + goto err; + } + + blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes, + virtio_blk_discard_write_zeroes_complete, req); + } + + return VIRTIO_BLK_S_OK; + +err: + if (is_write_zeroes) { + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE); + } + return err_status; +} + +static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) +{ + uint32_t type; + struct iovec *in_iov = req->elem.in_sg; + struct iovec *out_iov = req->elem.out_sg; + unsigned in_num = req->elem.in_num; + unsigned out_num = req->elem.out_num; + VirtIOBlock *s = req->dev; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + if (req->elem.out_num < 1 || req->elem.in_num < 1) { + virtio_error(vdev, "virtio-blk missing headers"); + return -1; + } + + if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out, + sizeof(req->out)) != sizeof(req->out))) { + virtio_error(vdev, "virtio-blk request outhdr too short"); + return -1; + } + + iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out), + &req->outhdr_undo); + + if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { + virtio_error(vdev, "virtio-blk request inhdr too short"); + iov_discard_undo(&req->outhdr_undo); + return -1; + } + + /* We always touch the last byte, so just see how big in_iov is. */ + req->in_len = iov_size(in_iov, in_num); + req->in = (void *)in_iov[in_num - 1].iov_base + + in_iov[in_num - 1].iov_len + - sizeof(struct virtio_blk_inhdr); + iov_discard_back_undoable(in_iov, &in_num, sizeof(struct virtio_blk_inhdr), + &req->inhdr_undo); + + type = virtio_ldl_p(vdev, &req->out.type); + + /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER + * is an optional flag. Although a guest should not send this flag if + * not negotiated we ignored it in the past. So keep ignoring it. */ + switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) { + case VIRTIO_BLK_T_IN: + { + bool is_write = type & VIRTIO_BLK_T_OUT; + req->sector_num = virtio_ldq_p(vdev, &req->out.sector); + + if (is_write) { + qemu_iovec_init_external(&req->qiov, out_iov, out_num); + trace_virtio_blk_handle_write(vdev, req, req->sector_num, + req->qiov.size / BDRV_SECTOR_SIZE); + } else { + qemu_iovec_init_external(&req->qiov, in_iov, in_num); + trace_virtio_blk_handle_read(vdev, req, req->sector_num, + req->qiov.size / BDRV_SECTOR_SIZE); + } + + if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) { + virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); + block_acct_invalid(blk_get_stats(s->blk), + is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); + virtio_blk_free_request(req); + return 0; + } + + block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size, + is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); + + /* merge would exceed maximum number of requests or IO direction + * changes */ + if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS || + is_write != mrb->is_write || + !s->conf.request_merging)) { + virtio_blk_submit_multireq(s->blk, mrb); + } + + assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS); + mrb->reqs[mrb->num_reqs++] = req; + mrb->is_write = is_write; + break; + } + case VIRTIO_BLK_T_FLUSH: + virtio_blk_handle_flush(req, mrb); + break; + case VIRTIO_BLK_T_SCSI_CMD: + virtio_blk_handle_scsi(req); + break; + case VIRTIO_BLK_T_GET_ID: + { + /* + * NB: per existing s/n string convention the string is + * terminated by '\0' only when shorter than buffer. + */ + const char *serial = s->conf.serial ? s->conf.serial : ""; + size_t size = MIN(strlen(serial) + 1, + MIN(iov_size(in_iov, in_num), + VIRTIO_BLK_ID_BYTES)); + iov_from_buf(in_iov, in_num, 0, serial, size); + virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); + virtio_blk_free_request(req); + break; + } + /* + * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with + * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement, + * so we must mask it for these requests, then we will check if it is set. + */ + case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT: + case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT: + { + struct virtio_blk_discard_write_zeroes dwz_hdr; + size_t out_len = iov_size(out_iov, out_num); + bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) == + VIRTIO_BLK_T_WRITE_ZEROES; + uint8_t err_status; + + /* + * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains + * more than one segment. + */ + if (unlikely(!(type & VIRTIO_BLK_T_OUT) || + out_len > sizeof(dwz_hdr))) { + virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); + virtio_blk_free_request(req); + return 0; + } + + if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr, + sizeof(dwz_hdr)) != sizeof(dwz_hdr))) { + iov_discard_undo(&req->inhdr_undo); + iov_discard_undo(&req->outhdr_undo); + virtio_error(vdev, "virtio-blk discard/write_zeroes header" + " too short"); + return -1; + } + + err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr, + is_write_zeroes); + if (err_status != VIRTIO_BLK_S_OK) { + virtio_blk_req_complete(req, err_status); + virtio_blk_free_request(req); + } + + break; + } + default: + virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); + virtio_blk_free_request(req); + } + return 0; +} + +bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) +{ + VirtIOBlockReq *req; + MultiReqBuffer mrb = {}; + bool suppress_notifications = virtio_queue_get_notification(vq); + bool progress = false; + + aio_context_acquire(blk_get_aio_context(s->blk)); + blk_io_plug(s->blk); + + do { + if (suppress_notifications) { + virtio_queue_set_notification(vq, 0); + } + + while ((req = virtio_blk_get_request(s, vq))) { + progress = true; + if (virtio_blk_handle_request(req, &mrb)) { + virtqueue_detach_element(req->vq, &req->elem, 0); + virtio_blk_free_request(req); + break; + } + } + + if (suppress_notifications) { + virtio_queue_set_notification(vq, 1); + } + } while (!virtio_queue_empty(vq)); + + if (mrb.num_reqs) { + virtio_blk_submit_multireq(s->blk, &mrb); + } + + blk_io_unplug(s->blk); + aio_context_release(blk_get_aio_context(s->blk)); + return progress; +} + +static void virtio_blk_handle_output_do(VirtIOBlock *s, VirtQueue *vq) +{ + virtio_blk_handle_vq(s, vq); +} + +static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOBlock *s = (VirtIOBlock *)vdev; + + if (s->dataplane) { + /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start + * dataplane here instead of waiting for .set_status(). + */ + virtio_device_start_ioeventfd(vdev); + if (!s->dataplane_disabled) { + return; + } + } + virtio_blk_handle_output_do(s, vq); +} + +void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh) +{ + VirtIOBlockReq *req = s->rq; + MultiReqBuffer mrb = {}; + + s->rq = NULL; + + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + while (req) { + VirtIOBlockReq *next = req->next; + if (virtio_blk_handle_request(req, &mrb)) { + /* Device is now broken and won't do any processing until it gets + * reset. Already queued requests will be lost: let's purge them. + */ + while (req) { + next = req->next; + virtqueue_detach_element(req->vq, &req->elem, 0); + virtio_blk_free_request(req); + req = next; + } + break; + } + req = next; + } + + if (mrb.num_reqs) { + virtio_blk_submit_multireq(s->blk, &mrb); + } + if (is_bh) { + blk_dec_in_flight(s->conf.conf.blk); + } + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); +} + +static void virtio_blk_dma_restart_bh(void *opaque) +{ + VirtIOBlock *s = opaque; + + qemu_bh_delete(s->bh); + s->bh = NULL; + + virtio_blk_process_queued_requests(s, true); +} + +static void virtio_blk_dma_restart_cb(void *opaque, bool running, + RunState state) +{ + VirtIOBlock *s = opaque; + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); + VirtioBusState *bus = VIRTIO_BUS(qbus); + + if (!running) { + return; + } + + /* + * If ioeventfd is enabled, don't schedule the BH here as queued + * requests will be processed while starting the data plane. + */ + if (!s->bh && !virtio_bus_ioeventfd_enabled(bus)) { + s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk), + virtio_blk_dma_restart_bh, s); + blk_inc_in_flight(s->conf.conf.blk); + qemu_bh_schedule(s->bh); + } +} + +static void virtio_blk_reset(VirtIODevice *vdev) +{ + VirtIOBlock *s = VIRTIO_BLK(vdev); + AioContext *ctx; + VirtIOBlockReq *req; + + ctx = blk_get_aio_context(s->blk); + aio_context_acquire(ctx); + blk_drain(s->blk); + + /* We drop queued requests after blk_drain() because blk_drain() itself can + * produce them. */ + while (s->rq) { + req = s->rq; + s->rq = req->next; + virtqueue_detach_element(req->vq, &req->elem, 0); + virtio_blk_free_request(req); + } + + aio_context_release(ctx); + + assert(!s->dataplane_started); + blk_set_enable_write_cache(s->blk, s->original_wce); +} + +/* coalesce internal state, copy to pci i/o region 0 + */ +static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) +{ + VirtIOBlock *s = VIRTIO_BLK(vdev); + BlockConf *conf = &s->conf.conf; + struct virtio_blk_config blkcfg; + uint64_t capacity; + int64_t length; + int blk_size = conf->logical_block_size; + + blk_get_geometry(s->blk, &capacity); + memset(&blkcfg, 0, sizeof(blkcfg)); + virtio_stq_p(vdev, &blkcfg.capacity, capacity); + virtio_stl_p(vdev, &blkcfg.seg_max, + s->conf.seg_max_adjust ? s->conf.queue_size - 2 : 128 - 2); + virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls); + virtio_stl_p(vdev, &blkcfg.blk_size, blk_size); + virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size); + virtio_stl_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size); + blkcfg.geometry.heads = conf->heads; + /* + * We must ensure that the block device capacity is a multiple of + * the logical block size. If that is not the case, let's use + * sector_mask to adopt the geometry to have a correct picture. + * For those devices where the capacity is ok for the given geometry + * we don't touch the sector value of the geometry, since some devices + * (like s390 dasd) need a specific value. Here the capacity is already + * cyls*heads*secs*blk_size and the sector value is not block size + * divided by 512 - instead it is the amount of blk_size blocks + * per track (cylinder). + */ + length = blk_getlength(s->blk); + if (length > 0 && length / conf->heads / conf->secs % blk_size) { + blkcfg.geometry.sectors = conf->secs & ~s->sector_mask; + } else { + blkcfg.geometry.sectors = conf->secs; + } + blkcfg.size_max = 0; + blkcfg.physical_block_exp = get_physical_block_exp(conf); + blkcfg.alignment_offset = 0; + blkcfg.wce = blk_enable_write_cache(s->blk); + virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues); + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) { + uint32_t discard_granularity = conf->discard_granularity; + if (discard_granularity == -1 || !s->conf.report_discard_granularity) { + discard_granularity = blk_size; + } + virtio_stl_p(vdev, &blkcfg.max_discard_sectors, + s->conf.max_discard_sectors); + virtio_stl_p(vdev, &blkcfg.discard_sector_alignment, + discard_granularity >> BDRV_SECTOR_BITS); + /* + * We support only one segment per request since multiple segments + * are not widely used and there are no userspace APIs that allow + * applications to submit multiple segments in a single call. + */ + virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1); + } + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) { + virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors, + s->conf.max_write_zeroes_sectors); + blkcfg.write_zeroes_may_unmap = 1; + virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1); + } + memcpy(config, &blkcfg, s->config_size); +} + +static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) +{ + VirtIOBlock *s = VIRTIO_BLK(vdev); + struct virtio_blk_config blkcfg; + + memcpy(&blkcfg, config, s->config_size); + + aio_context_acquire(blk_get_aio_context(s->blk)); + blk_set_enable_write_cache(s->blk, blkcfg.wce != 0); + aio_context_release(blk_get_aio_context(s->blk)); +} + +static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VirtIOBlock *s = VIRTIO_BLK(vdev); + + /* Firstly sync all virtio-blk possible supported features */ + features |= s->host_features; + + virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX); + virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY); + virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY); + virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE); + if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) { + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_SCSI)) { + error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0"); + return 0; + } + } else { + virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT); + virtio_add_feature(&features, VIRTIO_BLK_F_SCSI); + } + + if (blk_enable_write_cache(s->blk) || + (s->conf.x_enable_wce_if_config_wce && + virtio_has_feature(features, VIRTIO_BLK_F_CONFIG_WCE))) { + virtio_add_feature(&features, VIRTIO_BLK_F_WCE); + } + if (!blk_is_writable(s->blk)) { + virtio_add_feature(&features, VIRTIO_BLK_F_RO); + } + if (s->conf.num_queues > 1) { + virtio_add_feature(&features, VIRTIO_BLK_F_MQ); + } + + return features; +} + +static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) +{ + VirtIOBlock *s = VIRTIO_BLK(vdev); + + if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) { + assert(!s->dataplane_started); + } + + if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { + return; + } + + /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send + * cache flushes. Thus, the "auto writethrough" behavior is never + * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature. + * Leaving it enabled would break the following sequence: + * + * Guest started with "-drive cache=writethrough" + * Guest sets status to 0 + * Guest sets DRIVER bit in status field + * Guest reads host features (WCE=0, CONFIG_WCE=1) + * Guest writes guest features (WCE=0, CONFIG_WCE=1) + * Guest writes 1 to the WCE configuration field (writeback mode) + * Guest sets DRIVER_OK bit in status field + * + * s->blk would erroneously be placed in writethrough mode. + */ + if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) { + aio_context_acquire(blk_get_aio_context(s->blk)); + blk_set_enable_write_cache(s->blk, + virtio_vdev_has_feature(vdev, + VIRTIO_BLK_F_WCE)); + aio_context_release(blk_get_aio_context(s->blk)); + } +} + +static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f) +{ + VirtIOBlock *s = VIRTIO_BLK(vdev); + VirtIOBlockReq *req = s->rq; + + while (req) { + qemu_put_sbyte(f, 1); + + if (s->conf.num_queues > 1) { + qemu_put_be32(f, virtio_get_queue_index(req->vq)); + } + + qemu_put_virtqueue_element(vdev, f, &req->elem); + req = req->next; + } + qemu_put_sbyte(f, 0); +} + +static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, + int version_id) +{ + VirtIOBlock *s = VIRTIO_BLK(vdev); + + while (qemu_get_sbyte(f)) { + unsigned nvqs = s->conf.num_queues; + unsigned vq_idx = 0; + VirtIOBlockReq *req; + + if (nvqs > 1) { + vq_idx = qemu_get_be32(f); + + if (vq_idx >= nvqs) { + error_report("Invalid virtqueue index in request list: %#x", + vq_idx); + return -EINVAL; + } + } + + req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq)); + virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req); + req->next = s->rq; + s->rq = req; + } + + return 0; +} + +static void virtio_resize_cb(void *opaque) +{ + VirtIODevice *vdev = opaque; + + assert(qemu_get_current_aio_context() == qemu_get_aio_context()); + virtio_notify_config(vdev); +} + +static void virtio_blk_resize(void *opaque) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(opaque); + + /* + * virtio_notify_config() needs to acquire the global mutex, + * so it can't be called from an iothread. Instead, schedule + * it to be run in the main context BH. + */ + aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev); +} + +static const BlockDevOps virtio_block_ops = { + .resize_cb = virtio_blk_resize, +}; + +static void virtio_blk_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOBlock *s = VIRTIO_BLK(dev); + VirtIOBlkConf *conf = &s->conf; + Error *err = NULL; + unsigned i; + + if (!conf->conf.blk) { + error_setg(errp, "drive property not set"); + return; + } + if (!blk_is_inserted(conf->conf.blk)) { + error_setg(errp, "Device needs media, but drive is empty"); + return; + } + if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) { + conf->num_queues = 1; + } + if (!conf->num_queues) { + error_setg(errp, "num-queues property must be larger than 0"); + return; + } + if (conf->queue_size <= 2) { + error_setg(errp, "invalid queue-size property (%" PRIu16 "), " + "must be > 2", conf->queue_size); + return; + } + if (!is_power_of_2(conf->queue_size) || + conf->queue_size > VIRTQUEUE_MAX_SIZE) { + error_setg(errp, "invalid queue-size property (%" PRIu16 "), " + "must be a power of 2 (max %d)", + conf->queue_size, VIRTQUEUE_MAX_SIZE); + return; + } + + if (!blkconf_apply_backend_options(&conf->conf, + !blk_supports_write_perm(conf->conf.blk), + true, errp)) { + return; + } + s->original_wce = blk_enable_write_cache(conf->conf.blk); + if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) { + return; + } + + if (!blkconf_blocksizes(&conf->conf, errp)) { + return; + } + + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) && + (!conf->max_discard_sectors || + conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) { + error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")" + ", must be between 1 and %d", + conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS); + return; + } + + if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) && + (!conf->max_write_zeroes_sectors || + conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) { + error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32 + "), must be between 1 and %d", + conf->max_write_zeroes_sectors, + (int)BDRV_REQUEST_MAX_SECTORS); + return; + } + + virtio_blk_set_config_size(s, s->host_features); + + virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, s->config_size); + + s->blk = conf->conf.blk; + s->rq = NULL; + s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1; + + for (i = 0; i < conf->num_queues; i++) { + virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output); + } + virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err); + if (err != NULL) { + error_propagate(errp, err); + for (i = 0; i < conf->num_queues; i++) { + virtio_del_queue(vdev, i); + } + virtio_cleanup(vdev); + return; + } + + s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); + blk_set_dev_ops(s->blk, &virtio_block_ops, s); + blk_set_guest_block_size(s->blk, s->conf.conf.logical_block_size); + + blk_iostatus_enable(s->blk); + + add_boot_device_lchs(dev, "/disk@0,0", + conf->conf.lcyls, + conf->conf.lheads, + conf->conf.lsecs); +} + +static void virtio_blk_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOBlock *s = VIRTIO_BLK(dev); + VirtIOBlkConf *conf = &s->conf; + unsigned i; + + blk_drain(s->blk); + del_boot_device_lchs(dev, "/disk@0,0"); + virtio_blk_data_plane_destroy(s->dataplane); + s->dataplane = NULL; + for (i = 0; i < conf->num_queues; i++) { + virtio_del_queue(vdev, i); + } + qemu_del_vm_change_state_handler(s->change); + blockdev_mark_auto_del(s->blk); + virtio_cleanup(vdev); +} + +static void virtio_blk_instance_init(Object *obj) +{ + VirtIOBlock *s = VIRTIO_BLK(obj); + + device_add_bootindex_property(obj, &s->conf.conf.bootindex, + "bootindex", "/disk@0,0", + DEVICE(obj)); +} + +static const VMStateDescription vmstate_virtio_blk = { + .name = "virtio-blk", + .minimum_version_id = 2, + .version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static Property virtio_blk_properties[] = { + DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf), + DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf), + DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf), + DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial), + DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features, + VIRTIO_BLK_F_CONFIG_WCE, true), +#ifdef __linux__ + DEFINE_PROP_BIT64("scsi", VirtIOBlock, host_features, + VIRTIO_BLK_F_SCSI, false), +#endif + DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, + true), + DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, + VIRTIO_BLK_AUTO_NUM_QUEUES), + DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256), + DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true), + DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD, + IOThread *), + DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features, + VIRTIO_BLK_F_DISCARD, true), + DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock, + conf.report_discard_granularity, true), + DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features, + VIRTIO_BLK_F_WRITE_ZEROES, true), + DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock, + conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS), + DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock, + conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS), + DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock, + conf.x_enable_wce_if_config_wce, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_blk_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_blk_properties); + dc->vmsd = &vmstate_virtio_blk; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + vdc->realize = virtio_blk_device_realize; + vdc->unrealize = virtio_blk_device_unrealize; + vdc->get_config = virtio_blk_update_config; + vdc->set_config = virtio_blk_set_config; + vdc->get_features = virtio_blk_get_features; + vdc->set_status = virtio_blk_set_status; + vdc->reset = virtio_blk_reset; + vdc->save = virtio_blk_save_device; + vdc->load = virtio_blk_load_device; + vdc->start_ioeventfd = virtio_blk_data_plane_start; + vdc->stop_ioeventfd = virtio_blk_data_plane_stop; +} + +static const TypeInfo virtio_blk_info = { + .name = TYPE_VIRTIO_BLK, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOBlock), + .instance_init = virtio_blk_instance_init, + .class_init = virtio_blk_class_init, +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_blk_info); +} + +type_init(virtio_register_types) diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c new file mode 100644 index 000000000..674953f1a --- /dev/null +++ b/hw/block/xen-block.c @@ -0,0 +1,1024 @@ +/* + * Copyright (c) 2018 Citrix Systems Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-block-core.h" +#include "qapi/qapi-commands-qom.h" +#include "qapi/qapi-visit-block-core.h" +#include "qapi/qobject-input-visitor.h" +#include "qapi/visitor.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qstring.h" +#include "qom/object_interfaces.h" +#include "hw/xen/xen_common.h" +#include "hw/block/xen_blkif.h" +#include "hw/qdev-properties.h" +#include "hw/xen/xen-block.h" +#include "hw/xen/xen-backend.h" +#include "sysemu/blockdev.h" +#include "sysemu/block-backend.h" +#include "sysemu/iothread.h" +#include "dataplane/xen-block.h" +#include "trace.h" + +static char *xen_block_get_name(XenDevice *xendev, Error **errp) +{ + XenBlockDevice *blockdev = XEN_BLOCK_DEVICE(xendev); + XenBlockVdev *vdev = &blockdev->props.vdev; + + return g_strdup_printf("%lu", vdev->number); +} + +static void xen_block_disconnect(XenDevice *xendev, Error **errp) +{ + XenBlockDevice *blockdev = XEN_BLOCK_DEVICE(xendev); + const char *type = object_get_typename(OBJECT(blockdev)); + XenBlockVdev *vdev = &blockdev->props.vdev; + + trace_xen_block_disconnect(type, vdev->disk, vdev->partition); + + xen_block_dataplane_stop(blockdev->dataplane); +} + +static void xen_block_connect(XenDevice *xendev, Error **errp) +{ + XenBlockDevice *blockdev = XEN_BLOCK_DEVICE(xendev); + const char *type = object_get_typename(OBJECT(blockdev)); + XenBlockVdev *vdev = &blockdev->props.vdev; + BlockConf *conf = &blockdev->props.conf; + unsigned int feature_large_sector_size; + unsigned int order, nr_ring_ref, *ring_ref, event_channel, protocol; + char *str; + + trace_xen_block_connect(type, vdev->disk, vdev->partition); + + if (xen_device_frontend_scanf(xendev, "feature-large-sector-size", "%u", + &feature_large_sector_size) != 1) { + feature_large_sector_size = 0; + } + + if (feature_large_sector_size != 1 && + conf->logical_block_size != XEN_BLKIF_SECTOR_SIZE) { + error_setg(errp, "logical_block_size != %u not supported by frontend", + XEN_BLKIF_SECTOR_SIZE); + return; + } + + if (xen_device_frontend_scanf(xendev, "ring-page-order", "%u", + &order) != 1) { + nr_ring_ref = 1; + ring_ref = g_new(unsigned int, nr_ring_ref); + + if (xen_device_frontend_scanf(xendev, "ring-ref", "%u", + &ring_ref[0]) != 1) { + error_setg(errp, "failed to read ring-ref"); + g_free(ring_ref); + return; + } + } else if (order <= blockdev->props.max_ring_page_order) { + unsigned int i; + + nr_ring_ref = 1 << order; + ring_ref = g_new(unsigned int, nr_ring_ref); + + for (i = 0; i < nr_ring_ref; i++) { + const char *key = g_strdup_printf("ring-ref%u", i); + + if (xen_device_frontend_scanf(xendev, key, "%u", + &ring_ref[i]) != 1) { + error_setg(errp, "failed to read %s", key); + g_free((gpointer)key); + g_free(ring_ref); + return; + } + + g_free((gpointer)key); + } + } else { + error_setg(errp, "invalid ring-page-order (%d)", order); + return; + } + + if (xen_device_frontend_scanf(xendev, "event-channel", "%u", + &event_channel) != 1) { + error_setg(errp, "failed to read event-channel"); + g_free(ring_ref); + return; + } + + if (xen_device_frontend_scanf(xendev, "protocol", "%ms", + &str) != 1) { + protocol = BLKIF_PROTOCOL_NATIVE; + } else { + if (strcmp(str, XEN_IO_PROTO_ABI_X86_32) == 0) { + protocol = BLKIF_PROTOCOL_X86_32; + } else if (strcmp(str, XEN_IO_PROTO_ABI_X86_64) == 0) { + protocol = BLKIF_PROTOCOL_X86_64; + } else { + protocol = BLKIF_PROTOCOL_NATIVE; + } + + free(str); + } + + xen_block_dataplane_start(blockdev->dataplane, ring_ref, nr_ring_ref, + event_channel, protocol, errp); + + g_free(ring_ref); +} + +static void xen_block_unrealize(XenDevice *xendev) +{ + XenBlockDevice *blockdev = XEN_BLOCK_DEVICE(xendev); + XenBlockDeviceClass *blockdev_class = + XEN_BLOCK_DEVICE_GET_CLASS(xendev); + const char *type = object_get_typename(OBJECT(blockdev)); + XenBlockVdev *vdev = &blockdev->props.vdev; + + if (vdev->type == XEN_BLOCK_VDEV_TYPE_INVALID) { + return; + } + + trace_xen_block_unrealize(type, vdev->disk, vdev->partition); + + /* Disconnect from the frontend in case this has not already happened */ + xen_block_disconnect(xendev, NULL); + + xen_block_dataplane_destroy(blockdev->dataplane); + blockdev->dataplane = NULL; + + if (blockdev_class->unrealize) { + blockdev_class->unrealize(blockdev); + } +} + +static void xen_block_set_size(XenBlockDevice *blockdev) +{ + const char *type = object_get_typename(OBJECT(blockdev)); + XenBlockVdev *vdev = &blockdev->props.vdev; + BlockConf *conf = &blockdev->props.conf; + int64_t sectors = blk_getlength(conf->blk) / conf->logical_block_size; + XenDevice *xendev = XEN_DEVICE(blockdev); + + trace_xen_block_size(type, vdev->disk, vdev->partition, sectors); + + xen_device_backend_printf(xendev, "sectors", "%"PRIi64, sectors); +} + +static void xen_block_resize_cb(void *opaque) +{ + XenBlockDevice *blockdev = opaque; + XenDevice *xendev = XEN_DEVICE(blockdev); + enum xenbus_state state = xen_device_backend_get_state(xendev); + + xen_block_set_size(blockdev); + + /* + * Mimic the behaviour of Linux xen-blkback and re-write the state + * to trigger the frontend watch. + */ + xen_device_backend_printf(xendev, "state", "%u", state); +} + +static const BlockDevOps xen_block_dev_ops = { + .resize_cb = xen_block_resize_cb, +}; + +static void xen_block_realize(XenDevice *xendev, Error **errp) +{ + ERRP_GUARD(); + XenBlockDevice *blockdev = XEN_BLOCK_DEVICE(xendev); + XenBlockDeviceClass *blockdev_class = + XEN_BLOCK_DEVICE_GET_CLASS(xendev); + const char *type = object_get_typename(OBJECT(blockdev)); + XenBlockVdev *vdev = &blockdev->props.vdev; + BlockConf *conf = &blockdev->props.conf; + BlockBackend *blk = conf->blk; + + if (vdev->type == XEN_BLOCK_VDEV_TYPE_INVALID) { + error_setg(errp, "vdev property not set"); + return; + } + + trace_xen_block_realize(type, vdev->disk, vdev->partition); + + if (blockdev_class->realize) { + blockdev_class->realize(blockdev, errp); + if (*errp) { + return; + } + } + + /* + * The blkif protocol does not deal with removable media, so it must + * always be present, even for CDRom devices. + */ + assert(blk); + if (!blk_is_inserted(blk)) { + error_setg(errp, "device needs media, but drive is empty"); + return; + } + + if (!blkconf_apply_backend_options(conf, blockdev->info & VDISK_READONLY, + true, errp)) { + return; + } + + if (!(blockdev->info & VDISK_CDROM) && + !blkconf_geometry(conf, NULL, 65535, 255, 255, errp)) { + return; + } + + if (!blkconf_blocksizes(conf, errp)) { + return; + } + + blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev); + blk_set_guest_block_size(blk, conf->logical_block_size); + + if (conf->discard_granularity == -1) { + conf->discard_granularity = conf->physical_block_size; + } + + if (blk_get_flags(blk) & BDRV_O_UNMAP) { + xen_device_backend_printf(xendev, "feature-discard", "%u", 1); + xen_device_backend_printf(xendev, "discard-granularity", "%u", + conf->discard_granularity); + xen_device_backend_printf(xendev, "discard-alignment", "%u", 0); + } + + xen_device_backend_printf(xendev, "feature-flush-cache", "%u", 1); + xen_device_backend_printf(xendev, "max-ring-page-order", "%u", + blockdev->props.max_ring_page_order); + xen_device_backend_printf(xendev, "info", "%u", blockdev->info); + + xen_device_frontend_printf(xendev, "virtual-device", "%lu", + vdev->number); + xen_device_frontend_printf(xendev, "device-type", "%s", + blockdev->device_type); + + xen_device_backend_printf(xendev, "sector-size", "%u", + conf->logical_block_size); + + xen_block_set_size(blockdev); + + blockdev->dataplane = + xen_block_dataplane_create(xendev, blk, conf->logical_block_size, + blockdev->props.iothread); +} + +static void xen_block_frontend_changed(XenDevice *xendev, + enum xenbus_state frontend_state, + Error **errp) +{ + ERRP_GUARD(); + enum xenbus_state backend_state = xen_device_backend_get_state(xendev); + + switch (frontend_state) { + case XenbusStateInitialised: + case XenbusStateConnected: + if (backend_state == XenbusStateConnected) { + break; + } + + xen_block_disconnect(xendev, errp); + if (*errp) { + break; + } + + xen_block_connect(xendev, errp); + if (*errp) { + break; + } + + xen_device_backend_set_state(xendev, XenbusStateConnected); + break; + + case XenbusStateClosing: + xen_device_backend_set_state(xendev, XenbusStateClosing); + break; + + case XenbusStateClosed: + case XenbusStateUnknown: + xen_block_disconnect(xendev, errp); + if (*errp) { + break; + } + + xen_device_backend_set_state(xendev, XenbusStateClosed); + break; + + default: + break; + } +} + +static char *disk_to_vbd_name(unsigned int disk) +{ + char *name, *prefix = (disk >= 26) ? + disk_to_vbd_name((disk / 26) - 1) : g_strdup(""); + + name = g_strdup_printf("%s%c", prefix, 'a' + disk % 26); + g_free(prefix); + + return name; +} + +static void xen_block_get_vdev(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + XenBlockVdev *vdev = object_field_prop_ptr(obj, prop); + char *str; + + switch (vdev->type) { + case XEN_BLOCK_VDEV_TYPE_DP: + str = g_strdup_printf("d%lup%lu", vdev->disk, vdev->partition); + break; + + case XEN_BLOCK_VDEV_TYPE_XVD: + case XEN_BLOCK_VDEV_TYPE_HD: + case XEN_BLOCK_VDEV_TYPE_SD: { + char *name = disk_to_vbd_name(vdev->disk); + + str = g_strdup_printf("%s%s%lu", + (vdev->type == XEN_BLOCK_VDEV_TYPE_XVD) ? + "xvd" : + (vdev->type == XEN_BLOCK_VDEV_TYPE_HD) ? + "hd" : + "sd", + name, vdev->partition); + g_free(name); + break; + } + default: + error_setg(errp, "invalid vdev type"); + return; + } + + visit_type_str(v, name, &str, errp); + g_free(str); +} + +static int vbd_name_to_disk(const char *name, const char **endp, + unsigned long *disk) +{ + unsigned int n = 0; + + while (*name != '\0') { + if (!g_ascii_isalpha(*name) || !g_ascii_islower(*name)) { + break; + } + + n *= 26; + n += *name++ - 'a' + 1; + } + *endp = name; + + if (!n) { + return -1; + } + + *disk = n - 1; + + return 0; +} + +static void xen_block_set_vdev(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + XenBlockVdev *vdev = object_field_prop_ptr(obj, prop); + char *str, *p; + const char *end; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + p = strchr(str, 'd'); + if (!p) { + goto invalid; + } + + *p++ = '\0'; + if (*str == '\0') { + vdev->type = XEN_BLOCK_VDEV_TYPE_DP; + } else if (strcmp(str, "xv") == 0) { + vdev->type = XEN_BLOCK_VDEV_TYPE_XVD; + } else if (strcmp(str, "h") == 0) { + vdev->type = XEN_BLOCK_VDEV_TYPE_HD; + } else if (strcmp(str, "s") == 0) { + vdev->type = XEN_BLOCK_VDEV_TYPE_SD; + } else { + goto invalid; + } + + if (vdev->type == XEN_BLOCK_VDEV_TYPE_DP) { + if (qemu_strtoul(p, &end, 10, &vdev->disk)) { + goto invalid; + } + + if (*end == 'p') { + if (*(++end) == '\0') { + goto invalid; + } + } + } else { + if (vbd_name_to_disk(p, &end, &vdev->disk)) { + goto invalid; + } + } + + if (*end != '\0') { + p = (char *)end; + + if (qemu_strtoul(p, &end, 10, &vdev->partition)) { + goto invalid; + } + + if (*end != '\0') { + goto invalid; + } + } else { + vdev->partition = 0; + } + + switch (vdev->type) { + case XEN_BLOCK_VDEV_TYPE_DP: + case XEN_BLOCK_VDEV_TYPE_XVD: + if (vdev->disk < (1 << 4) && vdev->partition < (1 << 4)) { + vdev->number = (202 << 8) | (vdev->disk << 4) | + vdev->partition; + } else if (vdev->disk < (1 << 20) && vdev->partition < (1 << 8)) { + vdev->number = (1 << 28) | (vdev->disk << 8) | + vdev->partition; + } else { + goto invalid; + } + break; + + case XEN_BLOCK_VDEV_TYPE_HD: + if ((vdev->disk == 0 || vdev->disk == 1) && + vdev->partition < (1 << 6)) { + vdev->number = (3 << 8) | (vdev->disk << 6) | vdev->partition; + } else if ((vdev->disk == 2 || vdev->disk == 3) && + vdev->partition < (1 << 6)) { + vdev->number = (22 << 8) | ((vdev->disk - 2) << 6) | + vdev->partition; + } else { + goto invalid; + } + break; + + case XEN_BLOCK_VDEV_TYPE_SD: + if (vdev->disk < (1 << 4) && vdev->partition < (1 << 4)) { + vdev->number = (8 << 8) | (vdev->disk << 4) | vdev->partition; + } else { + goto invalid; + } + break; + + default: + goto invalid; + } + + g_free(str); + return; + +invalid: + error_setg(errp, "invalid virtual disk specifier"); + + vdev->type = XEN_BLOCK_VDEV_TYPE_INVALID; + g_free(str); +} + +/* + * This property deals with 'vdev' names adhering to the Xen VBD naming + * scheme described in: + * + * https://xenbits.xen.org/docs/unstable/man/xen-vbd-interface.7.html + */ +const PropertyInfo xen_block_prop_vdev = { + .name = "str", + .description = "Virtual Disk specifier: d*p*/xvd*/hd*/sd*", + .get = xen_block_get_vdev, + .set = xen_block_set_vdev, +}; + +static Property xen_block_props[] = { + DEFINE_PROP("vdev", XenBlockDevice, props.vdev, + xen_block_prop_vdev, XenBlockVdev), + DEFINE_BLOCK_PROPERTIES(XenBlockDevice, props.conf), + DEFINE_PROP_UINT32("max-ring-page-order", XenBlockDevice, + props.max_ring_page_order, 4), + DEFINE_PROP_LINK("iothread", XenBlockDevice, props.iothread, + TYPE_IOTHREAD, IOThread *), + DEFINE_PROP_END_OF_LIST() +}; + +static void xen_block_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dev_class = DEVICE_CLASS(class); + XenDeviceClass *xendev_class = XEN_DEVICE_CLASS(class); + + xendev_class->backend = "qdisk"; + xendev_class->device = "vbd"; + xendev_class->get_name = xen_block_get_name; + xendev_class->realize = xen_block_realize; + xendev_class->frontend_changed = xen_block_frontend_changed; + xendev_class->unrealize = xen_block_unrealize; + + device_class_set_props(dev_class, xen_block_props); +} + +static const TypeInfo xen_block_type_info = { + .name = TYPE_XEN_BLOCK_DEVICE, + .parent = TYPE_XEN_DEVICE, + .instance_size = sizeof(XenBlockDevice), + .abstract = true, + .class_size = sizeof(XenBlockDeviceClass), + .class_init = xen_block_class_init, +}; + +static void xen_disk_unrealize(XenBlockDevice *blockdev) +{ + trace_xen_disk_unrealize(); +} + +static void xen_disk_realize(XenBlockDevice *blockdev, Error **errp) +{ + BlockConf *conf = &blockdev->props.conf; + + trace_xen_disk_realize(); + + blockdev->device_type = "disk"; + + if (!conf->blk) { + error_setg(errp, "drive property not set"); + return; + } + + blockdev->info = blk_supports_write_perm(conf->blk) ? 0 : VDISK_READONLY; +} + +static void xen_disk_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dev_class = DEVICE_CLASS(class); + XenBlockDeviceClass *blockdev_class = XEN_BLOCK_DEVICE_CLASS(class); + + blockdev_class->realize = xen_disk_realize; + blockdev_class->unrealize = xen_disk_unrealize; + + dev_class->desc = "Xen Disk Device"; +} + +static const TypeInfo xen_disk_type_info = { + .name = TYPE_XEN_DISK_DEVICE, + .parent = TYPE_XEN_BLOCK_DEVICE, + .instance_size = sizeof(XenDiskDevice), + .class_init = xen_disk_class_init, +}; + +static void xen_cdrom_unrealize(XenBlockDevice *blockdev) +{ + trace_xen_cdrom_unrealize(); +} + +static void xen_cdrom_realize(XenBlockDevice *blockdev, Error **errp) +{ + BlockConf *conf = &blockdev->props.conf; + + trace_xen_cdrom_realize(); + + blockdev->device_type = "cdrom"; + + if (!conf->blk) { + int rc; + + /* Set up an empty drive */ + conf->blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); + + rc = blk_attach_dev(conf->blk, DEVICE(blockdev)); + if (!rc) { + error_setg_errno(errp, -rc, "failed to create drive"); + return; + } + } + + blockdev->info = VDISK_READONLY | VDISK_CDROM; +} + +static void xen_cdrom_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dev_class = DEVICE_CLASS(class); + XenBlockDeviceClass *blockdev_class = XEN_BLOCK_DEVICE_CLASS(class); + + blockdev_class->realize = xen_cdrom_realize; + blockdev_class->unrealize = xen_cdrom_unrealize; + + dev_class->desc = "Xen CD-ROM Device"; +} + +static const TypeInfo xen_cdrom_type_info = { + .name = TYPE_XEN_CDROM_DEVICE, + .parent = TYPE_XEN_BLOCK_DEVICE, + .instance_size = sizeof(XenCDRomDevice), + .class_init = xen_cdrom_class_init, +}; + +static void xen_block_register_types(void) +{ + type_register_static(&xen_block_type_info); + type_register_static(&xen_disk_type_info); + type_register_static(&xen_cdrom_type_info); +} + +type_init(xen_block_register_types) + +static void xen_block_blockdev_del(const char *node_name, Error **errp) +{ + trace_xen_block_blockdev_del(node_name); + + qmp_blockdev_del(node_name, errp); +} + +static char *xen_block_blockdev_add(const char *id, QDict *qdict, + Error **errp) +{ + ERRP_GUARD(); + const char *driver = qdict_get_try_str(qdict, "driver"); + BlockdevOptions *options = NULL; + char *node_name; + Visitor *v; + + if (!driver) { + error_setg(errp, "no 'driver' parameter"); + return NULL; + } + + node_name = g_strdup_printf("%s-%s", id, driver); + qdict_put_str(qdict, "node-name", node_name); + + trace_xen_block_blockdev_add(node_name); + + v = qobject_input_visitor_new(QOBJECT(qdict)); + visit_type_BlockdevOptions(v, NULL, &options, errp); + visit_free(v); + if (!options) { + goto fail; + } + + qmp_blockdev_add(options, errp); + + if (*errp) { + goto fail; + } + + qapi_free_BlockdevOptions(options); + + return node_name; + +fail: + if (options) { + qapi_free_BlockdevOptions(options); + } + g_free(node_name); + + return NULL; +} + +static void xen_block_drive_destroy(XenBlockDrive *drive, Error **errp) +{ + ERRP_GUARD(); + char *node_name = drive->node_name; + + if (node_name) { + xen_block_blockdev_del(node_name, errp); + if (*errp) { + return; + } + g_free(node_name); + drive->node_name = NULL; + } + g_free(drive->id); + g_free(drive); +} + +static XenBlockDrive *xen_block_drive_create(const char *id, + const char *device_type, + QDict *opts, Error **errp) +{ + ERRP_GUARD(); + const char *params = qdict_get_try_str(opts, "params"); + const char *mode = qdict_get_try_str(opts, "mode"); + const char *direct_io_safe = qdict_get_try_str(opts, "direct-io-safe"); + const char *discard_enable = qdict_get_try_str(opts, "discard-enable"); + char *driver = NULL; + char *filename = NULL; + XenBlockDrive *drive = NULL; + QDict *file_layer; + QDict *driver_layer; + struct stat st; + int rc; + + if (params) { + char **v = g_strsplit(params, ":", 2); + + if (v[1] == NULL) { + filename = g_strdup(v[0]); + driver = g_strdup("raw"); + } else { + if (strcmp(v[0], "aio") == 0) { + driver = g_strdup("raw"); + } else if (strcmp(v[0], "vhd") == 0) { + driver = g_strdup("vpc"); + } else { + driver = g_strdup(v[0]); + } + filename = g_strdup(v[1]); + } + + g_strfreev(v); + } else { + error_setg(errp, "no params"); + goto done; + } + + assert(filename); + assert(driver); + + drive = g_new0(XenBlockDrive, 1); + drive->id = g_strdup(id); + + file_layer = qdict_new(); + driver_layer = qdict_new(); + + rc = stat(filename, &st); + if (rc) { + error_setg_errno(errp, errno, "Could not stat file '%s'", filename); + goto done; + } + if (S_ISBLK(st.st_mode)) { + qdict_put_str(file_layer, "driver", "host_device"); + } else { + qdict_put_str(file_layer, "driver", "file"); + } + + qdict_put_str(file_layer, "filename", filename); + g_free(filename); + + if (mode && *mode != 'w') { + qdict_put_bool(file_layer, "read-only", true); + } + + if (direct_io_safe) { + unsigned long value; + + if (!qemu_strtoul(direct_io_safe, NULL, 2, &value) && !!value) { + QDict *cache_qdict = qdict_new(); + + qdict_put_bool(cache_qdict, "direct", true); + qdict_put(file_layer, "cache", cache_qdict); + + qdict_put_str(file_layer, "aio", "native"); + } + } + + if (discard_enable) { + unsigned long value; + + if (!qemu_strtoul(discard_enable, NULL, 2, &value) && !!value) { + qdict_put_str(file_layer, "discard", "unmap"); + qdict_put_str(driver_layer, "discard", "unmap"); + } + } + + /* + * It is necessary to turn file locking off as an emulated device + * may have already opened the same image file. + */ + qdict_put_str(file_layer, "locking", "off"); + + qdict_put_str(driver_layer, "driver", driver); + g_free(driver); + + qdict_put(driver_layer, "file", file_layer); + + g_assert(!drive->node_name); + drive->node_name = xen_block_blockdev_add(drive->id, driver_layer, + errp); + + qobject_unref(driver_layer); + +done: + if (*errp) { + xen_block_drive_destroy(drive, NULL); + return NULL; + } + + return drive; +} + +static const char *xen_block_drive_get_node_name(XenBlockDrive *drive) +{ + return drive->node_name ? drive->node_name : ""; +} + +static void xen_block_iothread_destroy(XenBlockIOThread *iothread, + Error **errp) +{ + qmp_object_del(iothread->id, errp); + + g_free(iothread->id); + g_free(iothread); +} + +static XenBlockIOThread *xen_block_iothread_create(const char *id, + Error **errp) +{ + ERRP_GUARD(); + XenBlockIOThread *iothread = g_new(XenBlockIOThread, 1); + ObjectOptions *opts; + + iothread->id = g_strdup(id); + + opts = g_new(ObjectOptions, 1); + *opts = (ObjectOptions) { + .qom_type = OBJECT_TYPE_IOTHREAD, + .id = g_strdup(id), + }; + qmp_object_add(opts, errp); + qapi_free_ObjectOptions(opts); + + if (*errp) { + g_free(iothread->id); + g_free(iothread); + return NULL; + } + + return iothread; +} + +static void xen_block_device_create(XenBackendInstance *backend, + QDict *opts, Error **errp) +{ + ERRP_GUARD(); + XenBus *xenbus = xen_backend_get_bus(backend); + const char *name = xen_backend_get_name(backend); + unsigned long number; + const char *vdev, *device_type; + XenBlockDrive *drive = NULL; + XenBlockIOThread *iothread = NULL; + XenDevice *xendev = NULL; + const char *type; + XenBlockDevice *blockdev; + + if (qemu_strtoul(name, NULL, 10, &number)) { + error_setg(errp, "failed to parse name '%s'", name); + goto fail; + } + + trace_xen_block_device_create(number); + + vdev = qdict_get_try_str(opts, "dev"); + if (!vdev) { + error_setg(errp, "no dev parameter"); + goto fail; + } + + device_type = qdict_get_try_str(opts, "device-type"); + if (!device_type) { + error_setg(errp, "no device-type parameter"); + goto fail; + } + + if (!strcmp(device_type, "disk")) { + type = TYPE_XEN_DISK_DEVICE; + } else if (!strcmp(device_type, "cdrom")) { + type = TYPE_XEN_CDROM_DEVICE; + } else { + error_setg(errp, "invalid device-type parameter '%s'", device_type); + goto fail; + } + + drive = xen_block_drive_create(vdev, device_type, opts, errp); + if (!drive) { + error_prepend(errp, "failed to create drive: "); + goto fail; + } + + iothread = xen_block_iothread_create(vdev, errp); + if (*errp) { + error_prepend(errp, "failed to create iothread: "); + goto fail; + } + + xendev = XEN_DEVICE(qdev_new(type)); + blockdev = XEN_BLOCK_DEVICE(xendev); + + if (!object_property_set_str(OBJECT(xendev), "vdev", vdev, + errp)) { + error_prepend(errp, "failed to set 'vdev': "); + goto fail; + } + + if (!object_property_set_str(OBJECT(xendev), "drive", + xen_block_drive_get_node_name(drive), + errp)) { + error_prepend(errp, "failed to set 'drive': "); + goto fail; + } + + if (!object_property_set_str(OBJECT(xendev), "iothread", iothread->id, + errp)) { + error_prepend(errp, "failed to set 'iothread': "); + goto fail; + } + + blockdev->iothread = iothread; + blockdev->drive = drive; + + if (!qdev_realize_and_unref(DEVICE(xendev), BUS(xenbus), errp)) { + error_prepend(errp, "realization of device %s failed: ", type); + goto fail; + } + + xen_backend_set_device(backend, xendev); + return; + +fail: + if (xendev) { + object_unparent(OBJECT(xendev)); + } + + if (iothread) { + xen_block_iothread_destroy(iothread, NULL); + } + + if (drive) { + xen_block_drive_destroy(drive, NULL); + } +} + +static void xen_block_device_destroy(XenBackendInstance *backend, + Error **errp) +{ + ERRP_GUARD(); + XenDevice *xendev = xen_backend_get_device(backend); + XenBlockDevice *blockdev = XEN_BLOCK_DEVICE(xendev); + XenBlockVdev *vdev = &blockdev->props.vdev; + XenBlockDrive *drive = blockdev->drive; + XenBlockIOThread *iothread = blockdev->iothread; + + trace_xen_block_device_destroy(vdev->number); + + object_unparent(OBJECT(xendev)); + + /* + * Drain all pending RCU callbacks as object_unparent() frees `xendev' + * in a RCU callback. + * And due to the property "drive" still existing in `xendev', we + * can't destroy the XenBlockDrive associated with `xendev' with + * xen_block_drive_destroy() below. + */ + drain_call_rcu(); + + if (iothread) { + xen_block_iothread_destroy(iothread, errp); + if (*errp) { + error_prepend(errp, "failed to destroy iothread: "); + return; + } + } + + if (drive) { + xen_block_drive_destroy(drive, errp); + if (*errp) { + error_prepend(errp, "failed to destroy drive: "); + return; + } + } +} + +static const XenBackendInfo xen_block_backend_info = { + .type = "qdisk", + .create = xen_block_device_create, + .destroy = xen_block_device_destroy, +}; + +static void xen_block_register_backend(void) +{ + xen_backend_register(&xen_block_backend_info); +} + +xen_backend_init(xen_block_register_backend); diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h new file mode 100644 index 000000000..99733529c --- /dev/null +++ b/hw/block/xen_blkif.h @@ -0,0 +1,147 @@ +#ifndef XEN_BLKIF_H +#define XEN_BLKIF_H + +#include "hw/xen/interface/io/blkif.h" +#include "hw/xen/interface/io/protocols.h" + +/* + * Not a real protocol. Used to generate ring structs which contain + * the elements common to all protocols only. This way we get a + * compiler-checkable way to use common struct elements, so we can + * avoid using switch(protocol) in a number of places. + */ +struct blkif_common_request { + char dummy; +}; +struct blkif_common_response { + char dummy; +}; + +/* i386 protocol version */ +#pragma pack(push, 4) +struct blkif_x86_32_request { + uint8_t operation; /* BLKIF_OP_??? */ + uint8_t nr_segments; /* number of segments */ + blkif_vdev_t handle; /* only for read/write requests */ + uint64_t id; /* private guest value, echoed in resp */ + blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */ + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; +}; +struct blkif_x86_32_request_discard { + uint8_t operation; /* BLKIF_OP_DISCARD */ + uint8_t flag; /* nr_segments in request struct */ + blkif_vdev_t handle; /* only for read/write requests */ + uint64_t id; /* private guest value, echoed in resp */ + blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */ + uint64_t nr_sectors; /* # of contiguous sectors to discard */ +}; +struct blkif_x86_32_response { + uint64_t id; /* copied from request */ + uint8_t operation; /* copied from request */ + int16_t status; /* BLKIF_RSP_??? */ +}; +typedef struct blkif_x86_32_request blkif_x86_32_request_t; +typedef struct blkif_x86_32_response blkif_x86_32_response_t; +#pragma pack(pop) + +/* x86_64 protocol version */ +struct blkif_x86_64_request { + uint8_t operation; /* BLKIF_OP_??? */ + uint8_t nr_segments; /* number of segments */ + blkif_vdev_t handle; /* only for read/write requests */ + uint64_t __attribute__((__aligned__(8))) id; + blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */ + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; +}; +struct blkif_x86_64_request_discard { + uint8_t operation; /* BLKIF_OP_DISCARD */ + uint8_t flag; /* nr_segments in request struct */ + blkif_vdev_t handle; /* only for read/write requests */ + uint64_t __attribute__((__aligned__(8))) id; + blkif_sector_t sector_number; /* start sector idx on disk (r/w only) */ + uint64_t nr_sectors; /* # of contiguous sectors to discard */ +}; +struct blkif_x86_64_response { + uint64_t __attribute__((__aligned__(8))) id; + uint8_t operation; /* copied from request */ + int16_t status; /* BLKIF_RSP_??? */ +}; +typedef struct blkif_x86_64_request blkif_x86_64_request_t; +typedef struct blkif_x86_64_response blkif_x86_64_response_t; + +DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, + struct blkif_common_response); +DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, + struct blkif_x86_32_response); +DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, + struct blkif_x86_64_response); + +union blkif_back_rings { + blkif_back_ring_t native; + blkif_common_back_ring_t common; + blkif_x86_32_back_ring_t x86_32_part; + blkif_x86_64_back_ring_t x86_64_part; +}; +typedef union blkif_back_rings blkif_back_rings_t; + +enum blkif_protocol { + BLKIF_PROTOCOL_NATIVE = 1, + BLKIF_PROTOCOL_X86_32 = 2, + BLKIF_PROTOCOL_X86_64 = 3, +}; + +static inline void blkif_get_x86_32_req(blkif_request_t *dst, + blkif_x86_32_request_t *src) +{ + int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; + + dst->operation = src->operation; + dst->nr_segments = src->nr_segments; + dst->handle = src->handle; + dst->id = src->id; + dst->sector_number = src->sector_number; + /* Prevent the compiler from using src->... instead. */ + barrier(); + if (dst->operation == BLKIF_OP_DISCARD) { + struct blkif_x86_32_request_discard *s = (void *)src; + struct blkif_request_discard *d = (void *)dst; + d->nr_sectors = s->nr_sectors; + return; + } + if (n > dst->nr_segments) { + n = dst->nr_segments; + } + for (i = 0; i < n; i++) { + dst->seg[i] = src->seg[i]; + } +} + +static inline void blkif_get_x86_64_req(blkif_request_t *dst, + blkif_x86_64_request_t *src) +{ + int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; + + dst->operation = src->operation; + dst->nr_segments = src->nr_segments; + dst->handle = src->handle; + dst->id = src->id; + dst->sector_number = src->sector_number; + /* Prevent the compiler from using src->... instead. */ + barrier(); + if (dst->operation == BLKIF_OP_DISCARD) { + struct blkif_x86_64_request_discard *s = (void *)src; + struct blkif_request_discard *d = (void *)dst; + d->nr_sectors = s->nr_sectors; + return; + } + if (n > dst->nr_segments) { + n = dst->nr_segments; + } + for (i = 0; i < n; i++) { + dst->seg[i] = src->seg[i]; + } +} + +#define XEN_BLKIF_SECTOR_SIZE 512 + +#endif /* XEN_BLKIF_H */ diff --git a/hw/char/Kconfig b/hw/char/Kconfig new file mode 100644 index 000000000..6b6cf2fc1 --- /dev/null +++ b/hw/char/Kconfig @@ -0,0 +1,73 @@ +config ESCC + bool + +config HTIF + bool + +config PARALLEL + bool + default y + depends on ISA_BUS + +config PL011 + bool + +config SERIAL + bool + +config SERIAL_ISA + bool + default y + depends on ISA_BUS + select SERIAL + +config SERIAL_PCI + bool + default y if PCI_DEVICES + depends on PCI + select SERIAL + +config SERIAL_PCI_MULTI + bool + default y if PCI_DEVICES + depends on PCI + select SERIAL + +config VIRTIO_SERIAL + bool + default y + depends on VIRTIO + +config STM32F2XX_USART + bool + +config CMSDK_APB_UART + bool + +config SCLPCONSOLE + bool + +config TERMINAL3270 + bool + +config SH_SCI + bool + +config RENESAS_SCI + bool + +config AVR_USART + bool + +config MCHP_PFSOC_MMUART + bool + select SERIAL + +config SIFIVE_UART + bool + +config GOLDFISH_TTY + bool + +config SHAKTI_UART + bool diff --git a/hw/char/avr_usart.c b/hw/char/avr_usart.c new file mode 100644 index 000000000..5bcf9db0b --- /dev/null +++ b/hw/char/avr_usart.c @@ -0,0 +1,321 @@ +/* + * AVR USART + * + * Copyright (c) 2018 University of Kent + * Author: Sarah Harris + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "qemu/osdep.h" +#include "hw/char/avr_usart.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" + +static int avr_usart_can_receive(void *opaque) +{ + AVRUsartState *usart = opaque; + + if (usart->data_valid || !(usart->csrb & USART_CSRB_RXEN)) { + return 0; + } + return 1; +} + +static void avr_usart_receive(void *opaque, const uint8_t *buffer, int size) +{ + AVRUsartState *usart = opaque; + assert(size == 1); + assert(!usart->data_valid); + usart->data = buffer[0]; + usart->data_valid = true; + usart->csra |= USART_CSRA_RXC; + if (usart->csrb & USART_CSRB_RXCIE) { + qemu_set_irq(usart->rxc_irq, 1); + } +} + +static void update_char_mask(AVRUsartState *usart) +{ + uint8_t mode = ((usart->csrc & USART_CSRC_CSZ0) ? 1 : 0) | + ((usart->csrc & USART_CSRC_CSZ1) ? 2 : 0) | + ((usart->csrb & USART_CSRB_CSZ2) ? 4 : 0); + switch (mode) { + case 0: + usart->char_mask = 0b11111; + break; + case 1: + usart->char_mask = 0b111111; + break; + case 2: + usart->char_mask = 0b1111111; + break; + case 3: + usart->char_mask = 0b11111111; + break; + case 4: + /* Fallthrough. */ + case 5: + /* Fallthrough. */ + case 6: + qemu_log_mask( + LOG_GUEST_ERROR, + "%s: Reserved character size 0x%x\n", + __func__, + mode); + break; + case 7: + qemu_log_mask( + LOG_GUEST_ERROR, + "%s: Nine bit character size not supported (forcing eight)\n", + __func__); + usart->char_mask = 0b11111111; + break; + default: + assert(0); + } +} + +static void avr_usart_reset(DeviceState *dev) +{ + AVRUsartState *usart = AVR_USART(dev); + usart->data_valid = false; + usart->csra = 0b00100000; + usart->csrb = 0b00000000; + usart->csrc = 0b00000110; + usart->brrl = 0; + usart->brrh = 0; + update_char_mask(usart); + qemu_set_irq(usart->rxc_irq, 0); + qemu_set_irq(usart->txc_irq, 0); + qemu_set_irq(usart->dre_irq, 0); +} + +static uint64_t avr_usart_read(void *opaque, hwaddr addr, unsigned int size) +{ + AVRUsartState *usart = opaque; + uint8_t data; + assert(size == 1); + + if (!usart->enabled) { + return 0; + } + + switch (addr) { + case USART_DR: + if (!(usart->csrb & USART_CSRB_RXEN)) { + /* Receiver disabled, ignore. */ + return 0; + } + if (usart->data_valid) { + data = usart->data & usart->char_mask; + usart->data_valid = false; + } else { + data = 0; + } + usart->csra &= 0xff ^ USART_CSRA_RXC; + qemu_set_irq(usart->rxc_irq, 0); + qemu_chr_fe_accept_input(&usart->chr); + return data; + case USART_CSRA: + return usart->csra; + case USART_CSRB: + return usart->csrb; + case USART_CSRC: + return usart->csrc; + case USART_BRRL: + return usart->brrl; + case USART_BRRH: + return usart->brrh; + default: + qemu_log_mask( + LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, + addr); + } + return 0; +} + +static void avr_usart_write(void *opaque, hwaddr addr, uint64_t value, + unsigned int size) +{ + AVRUsartState *usart = opaque; + uint8_t mask; + uint8_t data; + assert((value & 0xff) == value); + assert(size == 1); + + if (!usart->enabled) { + return; + } + + switch (addr) { + case USART_DR: + if (!(usart->csrb & USART_CSRB_TXEN)) { + /* Transmitter disabled, ignore. */ + return; + } + usart->csra |= USART_CSRA_TXC; + usart->csra |= USART_CSRA_DRE; + if (usart->csrb & USART_CSRB_TXCIE) { + qemu_set_irq(usart->txc_irq, 1); + usart->csra &= 0xff ^ USART_CSRA_TXC; + } + if (usart->csrb & USART_CSRB_DREIE) { + qemu_set_irq(usart->dre_irq, 1); + } + data = value; + qemu_chr_fe_write_all(&usart->chr, &data, 1); + break; + case USART_CSRA: + mask = 0b01000011; + /* Mask read-only bits. */ + value = (value & mask) | (usart->csra & (0xff ^ mask)); + usart->csra = value; + if (value & USART_CSRA_TXC) { + usart->csra ^= USART_CSRA_TXC; + qemu_set_irq(usart->txc_irq, 0); + } + if (value & USART_CSRA_MPCM) { + qemu_log_mask( + LOG_GUEST_ERROR, + "%s: MPCM not supported by USART\n", + __func__); + } + break; + case USART_CSRB: + mask = 0b11111101; + /* Mask read-only bits. */ + value = (value & mask) | (usart->csrb & (0xff ^ mask)); + usart->csrb = value; + if (!(value & USART_CSRB_RXEN)) { + /* Receiver disabled, flush input buffer. */ + usart->data_valid = false; + } + qemu_set_irq(usart->rxc_irq, + ((value & USART_CSRB_RXCIE) && + (usart->csra & USART_CSRA_RXC)) ? 1 : 0); + qemu_set_irq(usart->txc_irq, + ((value & USART_CSRB_TXCIE) && + (usart->csra & USART_CSRA_TXC)) ? 1 : 0); + qemu_set_irq(usart->dre_irq, + ((value & USART_CSRB_DREIE) && + (usart->csra & USART_CSRA_DRE)) ? 1 : 0); + update_char_mask(usart); + break; + case USART_CSRC: + usart->csrc = value; + if ((value & USART_CSRC_MSEL1) && (value & USART_CSRC_MSEL0)) { + qemu_log_mask( + LOG_GUEST_ERROR, + "%s: SPI mode not supported by USART\n", + __func__); + } + if ((value & USART_CSRC_MSEL1) && !(value & USART_CSRC_MSEL0)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad USART mode\n", __func__); + } + if (!(value & USART_CSRC_PM1) && (value & USART_CSRC_PM0)) { + qemu_log_mask( + LOG_GUEST_ERROR, + "%s: Bad USART parity mode\n", + __func__); + } + update_char_mask(usart); + break; + case USART_BRRL: + usart->brrl = value; + break; + case USART_BRRH: + usart->brrh = value & 0b00001111; + break; + default: + qemu_log_mask( + LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, + addr); + } +} + +static const MemoryRegionOps avr_usart_ops = { + .read = avr_usart_read, + .write = avr_usart_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = {.min_access_size = 1, .max_access_size = 1} +}; + +static Property avr_usart_properties[] = { + DEFINE_PROP_CHR("chardev", AVRUsartState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void avr_usart_pr(void *opaque, int irq, int level) +{ + AVRUsartState *s = AVR_USART(opaque); + + s->enabled = !level; + + if (!s->enabled) { + avr_usart_reset(DEVICE(s)); + } +} + +static void avr_usart_init(Object *obj) +{ + AVRUsartState *s = AVR_USART(obj); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->rxc_irq); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->dre_irq); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->txc_irq); + memory_region_init_io(&s->mmio, obj, &avr_usart_ops, s, TYPE_AVR_USART, 7); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + qdev_init_gpio_in(DEVICE(s), avr_usart_pr, 1); + s->enabled = true; +} + +static void avr_usart_realize(DeviceState *dev, Error **errp) +{ + AVRUsartState *s = AVR_USART(dev); + qemu_chr_fe_set_handlers(&s->chr, avr_usart_can_receive, + avr_usart_receive, NULL, NULL, + s, NULL, true); + avr_usart_reset(dev); +} + +static void avr_usart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = avr_usart_reset; + device_class_set_props(dc, avr_usart_properties); + dc->realize = avr_usart_realize; +} + +static const TypeInfo avr_usart_info = { + .name = TYPE_AVR_USART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AVRUsartState), + .instance_init = avr_usart_init, + .class_init = avr_usart_class_init, +}; + +static void avr_usart_register_types(void) +{ + type_register_static(&avr_usart_info); +} + +type_init(avr_usart_register_types) diff --git a/hw/char/bcm2835_aux.c b/hw/char/bcm2835_aux.c new file mode 100644 index 000000000..96410b1ff --- /dev/null +++ b/hw/char/bcm2835_aux.c @@ -0,0 +1,321 @@ +/* + * BCM2835 (Raspberry Pi / Pi 2) Aux block (mini UART and SPI). + * Copyright (c) 2015, Microsoft + * Written by Andrew Baumann + * Based on pl011.c, copyright terms below: + * + * Arm PrimeCell PL011 UART + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + * + * At present only the core UART functions (data path for tx/rx) are + * implemented. The following features/registers are unimplemented: + * - Line/modem control + * - Scratch register + * - Extra control + * - Baudrate + * - SPI interfaces + */ + +#include "qemu/osdep.h" +#include "hw/char/bcm2835_aux.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define AUX_IRQ 0x0 +#define AUX_ENABLES 0x4 +#define AUX_MU_IO_REG 0x40 +#define AUX_MU_IER_REG 0x44 +#define AUX_MU_IIR_REG 0x48 +#define AUX_MU_LCR_REG 0x4c +#define AUX_MU_MCR_REG 0x50 +#define AUX_MU_LSR_REG 0x54 +#define AUX_MU_MSR_REG 0x58 +#define AUX_MU_SCRATCH 0x5c +#define AUX_MU_CNTL_REG 0x60 +#define AUX_MU_STAT_REG 0x64 +#define AUX_MU_BAUD_REG 0x68 + +/* bits in IER/IIR registers */ +#define RX_INT 0x1 +#define TX_INT 0x2 + +static void bcm2835_aux_update(BCM2835AuxState *s) +{ + /* signal an interrupt if either: + * 1. rx interrupt is enabled and we have a non-empty rx fifo, or + * 2. the tx interrupt is enabled (since we instantly drain the tx fifo) + */ + s->iir = 0; + if ((s->ier & RX_INT) && s->read_count != 0) { + s->iir |= RX_INT; + } + if (s->ier & TX_INT) { + s->iir |= TX_INT; + } + qemu_set_irq(s->irq, s->iir != 0); +} + +static uint64_t bcm2835_aux_read(void *opaque, hwaddr offset, unsigned size) +{ + BCM2835AuxState *s = opaque; + uint32_t c, res; + + switch (offset) { + case AUX_IRQ: + return s->iir != 0; + + case AUX_ENABLES: + return 1; /* mini UART permanently enabled */ + + case AUX_MU_IO_REG: + /* "DLAB bit set means access baudrate register" is NYI */ + c = s->read_fifo[s->read_pos]; + if (s->read_count > 0) { + s->read_count--; + if (++s->read_pos == BCM2835_AUX_RX_FIFO_LEN) { + s->read_pos = 0; + } + } + qemu_chr_fe_accept_input(&s->chr); + bcm2835_aux_update(s); + return c; + + case AUX_MU_IER_REG: + /* "DLAB bit set means access baudrate register" is NYI */ + return 0xc0 | s->ier; /* FIFO enables always read 1 */ + + case AUX_MU_IIR_REG: + res = 0xc0; /* FIFO enables */ + /* The spec is unclear on what happens when both tx and rx + * interrupts are active, besides that this cannot occur. At + * present, we choose to prioritise the rx interrupt, since + * the tx fifo is always empty. */ + if (s->read_count != 0) { + res |= 0x4; + } else { + res |= 0x2; + } + if (s->iir == 0) { + res |= 0x1; + } + return res; + + case AUX_MU_LCR_REG: + qemu_log_mask(LOG_UNIMP, "%s: AUX_MU_LCR_REG unsupported\n", __func__); + return 0; + + case AUX_MU_MCR_REG: + qemu_log_mask(LOG_UNIMP, "%s: AUX_MU_MCR_REG unsupported\n", __func__); + return 0; + + case AUX_MU_LSR_REG: + res = 0x60; /* tx idle, empty */ + if (s->read_count != 0) { + res |= 0x1; + } + return res; + + case AUX_MU_MSR_REG: + qemu_log_mask(LOG_UNIMP, "%s: AUX_MU_MSR_REG unsupported\n", __func__); + return 0; + + case AUX_MU_SCRATCH: + qemu_log_mask(LOG_UNIMP, "%s: AUX_MU_SCRATCH unsupported\n", __func__); + return 0; + + case AUX_MU_CNTL_REG: + return 0x3; /* tx, rx enabled */ + + case AUX_MU_STAT_REG: + res = 0x30e; /* space in the output buffer, empty tx fifo, idle tx/rx */ + if (s->read_count > 0) { + res |= 0x1; /* data in input buffer */ + assert(s->read_count < BCM2835_AUX_RX_FIFO_LEN); + res |= ((uint32_t)s->read_count) << 16; /* rx fifo fill level */ + } + return res; + + case AUX_MU_BAUD_REG: + qemu_log_mask(LOG_UNIMP, "%s: AUX_MU_BAUD_REG unsupported\n", __func__); + return 0; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + return 0; + } +} + +static void bcm2835_aux_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + BCM2835AuxState *s = opaque; + unsigned char ch; + + switch (offset) { + case AUX_ENABLES: + if (value != 1) { + qemu_log_mask(LOG_UNIMP, "%s: unsupported attempt to enable SPI" + " or disable UART: 0x%"PRIx64"\n", + __func__, value); + } + break; + + case AUX_MU_IO_REG: + /* "DLAB bit set means access baudrate register" is NYI */ + ch = value; + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); + break; + + case AUX_MU_IER_REG: + /* "DLAB bit set means access baudrate register" is NYI */ + s->ier = value & (TX_INT | RX_INT); + bcm2835_aux_update(s); + break; + + case AUX_MU_IIR_REG: + if (value & 0x2) { + s->read_count = 0; + } + break; + + case AUX_MU_LCR_REG: + qemu_log_mask(LOG_UNIMP, "%s: AUX_MU_LCR_REG unsupported\n", __func__); + break; + + case AUX_MU_MCR_REG: + qemu_log_mask(LOG_UNIMP, "%s: AUX_MU_MCR_REG unsupported\n", __func__); + break; + + case AUX_MU_SCRATCH: + qemu_log_mask(LOG_UNIMP, "%s: AUX_MU_SCRATCH unsupported\n", __func__); + break; + + case AUX_MU_CNTL_REG: + qemu_log_mask(LOG_UNIMP, "%s: AUX_MU_CNTL_REG unsupported\n", __func__); + break; + + case AUX_MU_BAUD_REG: + qemu_log_mask(LOG_UNIMP, "%s: AUX_MU_BAUD_REG unsupported\n", __func__); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + } + + bcm2835_aux_update(s); +} + +static int bcm2835_aux_can_receive(void *opaque) +{ + BCM2835AuxState *s = opaque; + + return s->read_count < BCM2835_AUX_RX_FIFO_LEN; +} + +static void bcm2835_aux_put_fifo(void *opaque, uint8_t value) +{ + BCM2835AuxState *s = opaque; + int slot; + + slot = s->read_pos + s->read_count; + if (slot >= BCM2835_AUX_RX_FIFO_LEN) { + slot -= BCM2835_AUX_RX_FIFO_LEN; + } + s->read_fifo[slot] = value; + s->read_count++; + if (s->read_count == BCM2835_AUX_RX_FIFO_LEN) { + /* buffer full */ + } + bcm2835_aux_update(s); +} + +static void bcm2835_aux_receive(void *opaque, const uint8_t *buf, int size) +{ + bcm2835_aux_put_fifo(opaque, *buf); +} + +static const MemoryRegionOps bcm2835_aux_ops = { + .read = bcm2835_aux_read, + .write = bcm2835_aux_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, +}; + +static const VMStateDescription vmstate_bcm2835_aux = { + .name = TYPE_BCM2835_AUX, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(read_fifo, BCM2835AuxState, + BCM2835_AUX_RX_FIFO_LEN), + VMSTATE_UINT8(read_pos, BCM2835AuxState), + VMSTATE_UINT8(read_count, BCM2835AuxState), + VMSTATE_UINT8(ier, BCM2835AuxState), + VMSTATE_UINT8(iir, BCM2835AuxState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_aux_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + BCM2835AuxState *s = BCM2835_AUX(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &bcm2835_aux_ops, s, + TYPE_BCM2835_AUX, 0x100); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); +} + +static void bcm2835_aux_realize(DeviceState *dev, Error **errp) +{ + BCM2835AuxState *s = BCM2835_AUX(dev); + + qemu_chr_fe_set_handlers(&s->chr, bcm2835_aux_can_receive, + bcm2835_aux_receive, NULL, NULL, s, NULL, true); +} + +static Property bcm2835_aux_props[] = { + DEFINE_PROP_CHR("chardev", BCM2835AuxState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void bcm2835_aux_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = bcm2835_aux_realize; + dc->vmsd = &vmstate_bcm2835_aux; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + device_class_set_props(dc, bcm2835_aux_props); +} + +static const TypeInfo bcm2835_aux_info = { + .name = TYPE_BCM2835_AUX, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835AuxState), + .instance_init = bcm2835_aux_init, + .class_init = bcm2835_aux_class_init, +}; + +static void bcm2835_aux_register_types(void) +{ + type_register_static(&bcm2835_aux_info); +} + +type_init(bcm2835_aux_register_types) diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c new file mode 100644 index 000000000..c069a3084 --- /dev/null +++ b/hw/char/cadence_uart.c @@ -0,0 +1,648 @@ +/* + * Device model for Cadence UART + * + * Reference: Xilinx Zynq 7000 reference manual + * - http://www.xilinx.com/support/documentation/user_guides/ug585-Zynq-7000-TRM.pdf + * - Chapter 19 UART Controller + * - Appendix B for Register details + * + * Copyright (c) 2010 Xilinx Inc. + * Copyright (c) 2012 Peter A.G. Crosthwaite (peter.crosthwaite@petalogix.com) + * Copyright (c) 2012 PetaLogix Pty Ltd. + * Written by Haibing Ma + * M.Habib + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "chardev/char-fe.h" +#include "chardev/char-serial.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/char/cadence_uart.h" +#include "hw/irq.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties-system.h" +#include "trace.h" + +#ifdef CADENCE_UART_ERR_DEBUG +#define DB_PRINT(...) do { \ + fprintf(stderr, ": %s: ", __func__); \ + fprintf(stderr, ## __VA_ARGS__); \ + } while (0) +#else + #define DB_PRINT(...) +#endif + +#define UART_SR_INTR_RTRIG 0x00000001 +#define UART_SR_INTR_REMPTY 0x00000002 +#define UART_SR_INTR_RFUL 0x00000004 +#define UART_SR_INTR_TEMPTY 0x00000008 +#define UART_SR_INTR_TFUL 0x00000010 +/* somewhat awkwardly, TTRIG is misaligned between SR and ISR */ +#define UART_SR_TTRIG 0x00002000 +#define UART_INTR_TTRIG 0x00000400 +/* bits fields in CSR that correlate to CISR. If any of these bits are set in + * SR, then the same bit in CISR is set high too */ +#define UART_SR_TO_CISR_MASK 0x0000001F + +#define UART_INTR_ROVR 0x00000020 +#define UART_INTR_FRAME 0x00000040 +#define UART_INTR_PARE 0x00000080 +#define UART_INTR_TIMEOUT 0x00000100 +#define UART_INTR_DMSI 0x00000200 +#define UART_INTR_TOVR 0x00001000 + +#define UART_SR_RACTIVE 0x00000400 +#define UART_SR_TACTIVE 0x00000800 +#define UART_SR_FDELT 0x00001000 + +#define UART_CR_RXRST 0x00000001 +#define UART_CR_TXRST 0x00000002 +#define UART_CR_RX_EN 0x00000004 +#define UART_CR_RX_DIS 0x00000008 +#define UART_CR_TX_EN 0x00000010 +#define UART_CR_TX_DIS 0x00000020 +#define UART_CR_RST_TO 0x00000040 +#define UART_CR_STARTBRK 0x00000080 +#define UART_CR_STOPBRK 0x00000100 + +#define UART_MR_CLKS 0x00000001 +#define UART_MR_CHRL 0x00000006 +#define UART_MR_CHRL_SH 1 +#define UART_MR_PAR 0x00000038 +#define UART_MR_PAR_SH 3 +#define UART_MR_NBSTOP 0x000000C0 +#define UART_MR_NBSTOP_SH 6 +#define UART_MR_CHMODE 0x00000300 +#define UART_MR_CHMODE_SH 8 +#define UART_MR_UCLKEN 0x00000400 +#define UART_MR_IRMODE 0x00000800 + +#define UART_DATA_BITS_6 (0x3 << UART_MR_CHRL_SH) +#define UART_DATA_BITS_7 (0x2 << UART_MR_CHRL_SH) +#define UART_PARITY_ODD (0x1 << UART_MR_PAR_SH) +#define UART_PARITY_EVEN (0x0 << UART_MR_PAR_SH) +#define UART_STOP_BITS_1 (0x3 << UART_MR_NBSTOP_SH) +#define UART_STOP_BITS_2 (0x2 << UART_MR_NBSTOP_SH) +#define NORMAL_MODE (0x0 << UART_MR_CHMODE_SH) +#define ECHO_MODE (0x1 << UART_MR_CHMODE_SH) +#define LOCAL_LOOPBACK (0x2 << UART_MR_CHMODE_SH) +#define REMOTE_LOOPBACK (0x3 << UART_MR_CHMODE_SH) + +#define UART_DEFAULT_REF_CLK (50 * 1000 * 1000) + +#define R_CR (0x00/4) +#define R_MR (0x04/4) +#define R_IER (0x08/4) +#define R_IDR (0x0C/4) +#define R_IMR (0x10/4) +#define R_CISR (0x14/4) +#define R_BRGR (0x18/4) +#define R_RTOR (0x1C/4) +#define R_RTRIG (0x20/4) +#define R_MCR (0x24/4) +#define R_MSR (0x28/4) +#define R_SR (0x2C/4) +#define R_TX_RX (0x30/4) +#define R_BDIV (0x34/4) +#define R_FDEL (0x38/4) +#define R_PMIN (0x3C/4) +#define R_PWID (0x40/4) +#define R_TTRIG (0x44/4) + + +static void uart_update_status(CadenceUARTState *s) +{ + s->r[R_SR] = 0; + + s->r[R_SR] |= s->rx_count == CADENCE_UART_RX_FIFO_SIZE ? UART_SR_INTR_RFUL + : 0; + s->r[R_SR] |= !s->rx_count ? UART_SR_INTR_REMPTY : 0; + s->r[R_SR] |= s->rx_count >= s->r[R_RTRIG] ? UART_SR_INTR_RTRIG : 0; + + s->r[R_SR] |= s->tx_count == CADENCE_UART_TX_FIFO_SIZE ? UART_SR_INTR_TFUL + : 0; + s->r[R_SR] |= !s->tx_count ? UART_SR_INTR_TEMPTY : 0; + s->r[R_SR] |= s->tx_count >= s->r[R_TTRIG] ? UART_SR_TTRIG : 0; + + s->r[R_CISR] |= s->r[R_SR] & UART_SR_TO_CISR_MASK; + s->r[R_CISR] |= s->r[R_SR] & UART_SR_TTRIG ? UART_INTR_TTRIG : 0; + qemu_set_irq(s->irq, !!(s->r[R_IMR] & s->r[R_CISR])); +} + +static void fifo_trigger_update(void *opaque) +{ + CadenceUARTState *s = opaque; + + if (s->r[R_RTOR]) { + s->r[R_CISR] |= UART_INTR_TIMEOUT; + uart_update_status(s); + } +} + +static void uart_rx_reset(CadenceUARTState *s) +{ + s->rx_wpos = 0; + s->rx_count = 0; + qemu_chr_fe_accept_input(&s->chr); +} + +static void uart_tx_reset(CadenceUARTState *s) +{ + s->tx_count = 0; +} + +static void uart_send_breaks(CadenceUARTState *s) +{ + int break_enabled = 1; + + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_BREAK, + &break_enabled); +} + +static void uart_parameters_setup(CadenceUARTState *s) +{ + QEMUSerialSetParams ssp; + unsigned int baud_rate, packet_size, input_clk; + input_clk = clock_get_hz(s->refclk); + + baud_rate = (s->r[R_MR] & UART_MR_CLKS) ? input_clk / 8 : input_clk; + baud_rate /= (s->r[R_BRGR] * (s->r[R_BDIV] + 1)); + trace_cadence_uart_baudrate(baud_rate); + + ssp.speed = baud_rate; + + packet_size = 1; + + switch (s->r[R_MR] & UART_MR_PAR) { + case UART_PARITY_EVEN: + ssp.parity = 'E'; + packet_size++; + break; + case UART_PARITY_ODD: + ssp.parity = 'O'; + packet_size++; + break; + default: + ssp.parity = 'N'; + break; + } + + switch (s->r[R_MR] & UART_MR_CHRL) { + case UART_DATA_BITS_6: + ssp.data_bits = 6; + break; + case UART_DATA_BITS_7: + ssp.data_bits = 7; + break; + default: + ssp.data_bits = 8; + break; + } + + switch (s->r[R_MR] & UART_MR_NBSTOP) { + case UART_STOP_BITS_1: + ssp.stop_bits = 1; + break; + default: + ssp.stop_bits = 2; + break; + } + + packet_size += ssp.data_bits + ssp.stop_bits; + if (ssp.speed == 0) { + /* + * Avoid division-by-zero below. + * TODO: find something better + */ + ssp.speed = 1; + } + s->char_tx_time = (NANOSECONDS_PER_SECOND / ssp.speed) * packet_size; + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp); +} + +static int uart_can_receive(void *opaque) +{ + CadenceUARTState *s = opaque; + int ret; + uint32_t ch_mode; + + /* ignore characters when unclocked or in reset */ + if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n", + __func__); + return 0; + } + + ret = MAX(CADENCE_UART_RX_FIFO_SIZE, CADENCE_UART_TX_FIFO_SIZE); + ch_mode = s->r[R_MR] & UART_MR_CHMODE; + + if (ch_mode == NORMAL_MODE || ch_mode == ECHO_MODE) { + ret = MIN(ret, CADENCE_UART_RX_FIFO_SIZE - s->rx_count); + } + if (ch_mode == REMOTE_LOOPBACK || ch_mode == ECHO_MODE) { + ret = MIN(ret, CADENCE_UART_TX_FIFO_SIZE - s->tx_count); + } + return ret; +} + +static void uart_ctrl_update(CadenceUARTState *s) +{ + if (s->r[R_CR] & UART_CR_TXRST) { + uart_tx_reset(s); + } + + if (s->r[R_CR] & UART_CR_RXRST) { + uart_rx_reset(s); + } + + s->r[R_CR] &= ~(UART_CR_TXRST | UART_CR_RXRST); + + if (s->r[R_CR] & UART_CR_STARTBRK && !(s->r[R_CR] & UART_CR_STOPBRK)) { + uart_send_breaks(s); + } +} + +static void uart_write_rx_fifo(void *opaque, const uint8_t *buf, int size) +{ + CadenceUARTState *s = opaque; + uint64_t new_rx_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + int i; + + if ((s->r[R_CR] & UART_CR_RX_DIS) || !(s->r[R_CR] & UART_CR_RX_EN)) { + return; + } + + if (s->rx_count == CADENCE_UART_RX_FIFO_SIZE) { + s->r[R_CISR] |= UART_INTR_ROVR; + } else { + for (i = 0; i < size; i++) { + s->rx_fifo[s->rx_wpos] = buf[i]; + s->rx_wpos = (s->rx_wpos + 1) % CADENCE_UART_RX_FIFO_SIZE; + s->rx_count++; + } + timer_mod(s->fifo_trigger_handle, new_rx_time + + (s->char_tx_time * 4)); + } + uart_update_status(s); +} + +static gboolean cadence_uart_xmit(void *do_not_use, GIOCondition cond, + void *opaque) +{ + CadenceUARTState *s = opaque; + int ret; + + /* instant drain the fifo when there's no back-end */ + if (!qemu_chr_fe_backend_connected(&s->chr)) { + s->tx_count = 0; + return FALSE; + } + + if (!s->tx_count) { + return FALSE; + } + + ret = qemu_chr_fe_write(&s->chr, s->tx_fifo, s->tx_count); + + if (ret >= 0) { + s->tx_count -= ret; + memmove(s->tx_fifo, s->tx_fifo + ret, s->tx_count); + } + + if (s->tx_count) { + guint r = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP, + cadence_uart_xmit, s); + if (!r) { + s->tx_count = 0; + return FALSE; + } + } + + uart_update_status(s); + return FALSE; +} + +static void uart_write_tx_fifo(CadenceUARTState *s, const uint8_t *buf, + int size) +{ + if ((s->r[R_CR] & UART_CR_TX_DIS) || !(s->r[R_CR] & UART_CR_TX_EN)) { + return; + } + + if (size > CADENCE_UART_TX_FIFO_SIZE - s->tx_count) { + size = CADENCE_UART_TX_FIFO_SIZE - s->tx_count; + /* + * This can only be a guest error via a bad tx fifo register push, + * as can_receive() should stop remote loop and echo modes ever getting + * us to here. + */ + qemu_log_mask(LOG_GUEST_ERROR, "cadence_uart: TxFIFO overflow"); + s->r[R_CISR] |= UART_INTR_ROVR; + } + + memcpy(s->tx_fifo + s->tx_count, buf, size); + s->tx_count += size; + + cadence_uart_xmit(NULL, G_IO_OUT, s); +} + +static void uart_receive(void *opaque, const uint8_t *buf, int size) +{ + CadenceUARTState *s = opaque; + uint32_t ch_mode = s->r[R_MR] & UART_MR_CHMODE; + + if (ch_mode == NORMAL_MODE || ch_mode == ECHO_MODE) { + uart_write_rx_fifo(opaque, buf, size); + } + if (ch_mode == REMOTE_LOOPBACK || ch_mode == ECHO_MODE) { + uart_write_tx_fifo(s, buf, size); + } +} + +static void uart_event(void *opaque, QEMUChrEvent event) +{ + CadenceUARTState *s = opaque; + uint8_t buf = '\0'; + + /* ignore characters when unclocked or in reset */ + if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n", + __func__); + return; + } + + if (event == CHR_EVENT_BREAK) { + uart_write_rx_fifo(opaque, &buf, 1); + } + + uart_update_status(s); +} + +static void uart_read_rx_fifo(CadenceUARTState *s, uint32_t *c) +{ + if ((s->r[R_CR] & UART_CR_RX_DIS) || !(s->r[R_CR] & UART_CR_RX_EN)) { + return; + } + + if (s->rx_count) { + uint32_t rx_rpos = (CADENCE_UART_RX_FIFO_SIZE + s->rx_wpos - + s->rx_count) % CADENCE_UART_RX_FIFO_SIZE; + *c = s->rx_fifo[rx_rpos]; + s->rx_count--; + + qemu_chr_fe_accept_input(&s->chr); + } else { + *c = 0; + } + + uart_update_status(s); +} + +static MemTxResult uart_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size, MemTxAttrs attrs) +{ + CadenceUARTState *s = opaque; + + /* ignore access when unclocked or in reset */ + if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n", + __func__); + return MEMTX_ERROR; + } + + DB_PRINT(" offset:%x data:%08x\n", (unsigned)offset, (unsigned)value); + offset >>= 2; + if (offset >= CADENCE_UART_R_MAX) { + return MEMTX_DECODE_ERROR; + } + switch (offset) { + case R_IER: /* ier (wts imr) */ + s->r[R_IMR] |= value; + break; + case R_IDR: /* idr (wtc imr) */ + s->r[R_IMR] &= ~value; + break; + case R_IMR: /* imr (read only) */ + break; + case R_CISR: /* cisr (wtc) */ + s->r[R_CISR] &= ~value; + break; + case R_TX_RX: /* UARTDR */ + switch (s->r[R_MR] & UART_MR_CHMODE) { + case NORMAL_MODE: + uart_write_tx_fifo(s, (uint8_t *) &value, 1); + break; + case LOCAL_LOOPBACK: + uart_write_rx_fifo(opaque, (uint8_t *) &value, 1); + break; + } + break; + case R_BRGR: /* Baud rate generator */ + if (value >= 0x01) { + s->r[offset] = value & 0xFFFF; + } + break; + case R_BDIV: /* Baud rate divider */ + if (value >= 0x04) { + s->r[offset] = value & 0xFF; + } + break; + default: + s->r[offset] = value; + } + + switch (offset) { + case R_CR: + uart_ctrl_update(s); + break; + case R_MR: + uart_parameters_setup(s); + break; + } + uart_update_status(s); + + return MEMTX_OK; +} + +static MemTxResult uart_read(void *opaque, hwaddr offset, + uint64_t *value, unsigned size, MemTxAttrs attrs) +{ + CadenceUARTState *s = opaque; + uint32_t c = 0; + + /* ignore access when unclocked or in reset */ + if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n", + __func__); + return MEMTX_ERROR; + } + + offset >>= 2; + if (offset >= CADENCE_UART_R_MAX) { + return MEMTX_DECODE_ERROR; + } + if (offset == R_TX_RX) { + uart_read_rx_fifo(s, &c); + } else { + c = s->r[offset]; + } + + DB_PRINT(" offset:%x data:%08x\n", (unsigned)(offset << 2), (unsigned)c); + *value = c; + return MEMTX_OK; +} + +static const MemoryRegionOps uart_ops = { + .read_with_attrs = uart_read, + .write_with_attrs = uart_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void cadence_uart_reset_init(Object *obj, ResetType type) +{ + CadenceUARTState *s = CADENCE_UART(obj); + + s->r[R_CR] = 0x00000128; + s->r[R_IMR] = 0; + s->r[R_CISR] = 0; + s->r[R_RTRIG] = 0x00000020; + s->r[R_BRGR] = 0x0000028B; + s->r[R_BDIV] = 0x0000000F; + s->r[R_TTRIG] = 0x00000020; +} + +static void cadence_uart_reset_hold(Object *obj) +{ + CadenceUARTState *s = CADENCE_UART(obj); + + uart_rx_reset(s); + uart_tx_reset(s); + + uart_update_status(s); +} + +static void cadence_uart_realize(DeviceState *dev, Error **errp) +{ + CadenceUARTState *s = CADENCE_UART(dev); + + s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL, + fifo_trigger_update, s); + + qemu_chr_fe_set_handlers(&s->chr, uart_can_receive, uart_receive, + uart_event, NULL, s, NULL, true); +} + +static void cadence_uart_refclk_update(void *opaque, ClockEvent event) +{ + CadenceUARTState *s = opaque; + + /* recompute uart's speed on clock change */ + uart_parameters_setup(s); +} + +static void cadence_uart_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + CadenceUARTState *s = CADENCE_UART(obj); + + memory_region_init_io(&s->iomem, obj, &uart_ops, s, "uart", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + + s->refclk = qdev_init_clock_in(DEVICE(obj), "refclk", + cadence_uart_refclk_update, s, ClockUpdate); + /* initialize the frequency in case the clock remains unconnected */ + clock_set_hz(s->refclk, UART_DEFAULT_REF_CLK); + + s->char_tx_time = (NANOSECONDS_PER_SECOND / 9600) * 10; +} + +static int cadence_uart_pre_load(void *opaque) +{ + CadenceUARTState *s = opaque; + + /* the frequency will be overriden if the refclk field is present */ + clock_set_hz(s->refclk, UART_DEFAULT_REF_CLK); + return 0; +} + +static int cadence_uart_post_load(void *opaque, int version_id) +{ + CadenceUARTState *s = opaque; + + /* Ensure these two aren't invalid numbers */ + if (s->r[R_BRGR] < 1 || s->r[R_BRGR] & ~0xFFFF || + s->r[R_BDIV] <= 3 || s->r[R_BDIV] & ~0xFF) { + /* Value is invalid, abort */ + return 1; + } + + uart_parameters_setup(s); + uart_update_status(s); + return 0; +} + +static const VMStateDescription vmstate_cadence_uart = { + .name = "cadence_uart", + .version_id = 3, + .minimum_version_id = 2, + .pre_load = cadence_uart_pre_load, + .post_load = cadence_uart_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(r, CadenceUARTState, CADENCE_UART_R_MAX), + VMSTATE_UINT8_ARRAY(rx_fifo, CadenceUARTState, + CADENCE_UART_RX_FIFO_SIZE), + VMSTATE_UINT8_ARRAY(tx_fifo, CadenceUARTState, + CADENCE_UART_TX_FIFO_SIZE), + VMSTATE_UINT32(rx_count, CadenceUARTState), + VMSTATE_UINT32(tx_count, CadenceUARTState), + VMSTATE_UINT32(rx_wpos, CadenceUARTState), + VMSTATE_TIMER_PTR(fifo_trigger_handle, CadenceUARTState), + VMSTATE_CLOCK_V(refclk, CadenceUARTState, 3), + VMSTATE_END_OF_LIST() + }, +}; + +static Property cadence_uart_properties[] = { + DEFINE_PROP_CHR("chardev", CadenceUARTState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void cadence_uart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->realize = cadence_uart_realize; + dc->vmsd = &vmstate_cadence_uart; + rc->phases.enter = cadence_uart_reset_init; + rc->phases.hold = cadence_uart_reset_hold; + device_class_set_props(dc, cadence_uart_properties); + } + +static const TypeInfo cadence_uart_info = { + .name = TYPE_CADENCE_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CadenceUARTState), + .instance_init = cadence_uart_init, + .class_init = cadence_uart_class_init, +}; + +static void cadence_uart_register_types(void) +{ + type_register_static(&cadence_uart_info); +} + +type_init(cadence_uart_register_types) diff --git a/hw/char/cmsdk-apb-uart.c b/hw/char/cmsdk-apb-uart.c new file mode 100644 index 000000000..f8dc89ee3 --- /dev/null +++ b/hw/char/cmsdk-apb-uart.c @@ -0,0 +1,409 @@ +/* + * ARM CMSDK APB UART emulation + * + * Copyright (c) 2017 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* This is a model of the "APB UART" which is part of the Cortex-M + * System Design Kit (CMSDK) and documented in the Cortex-M System + * Design Kit Technical Reference Manual (ARM DDI0479C): + * https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/registerfields.h" +#include "chardev/char-fe.h" +#include "chardev/char-serial.h" +#include "hw/char/cmsdk-apb-uart.h" +#include "hw/irq.h" +#include "hw/qdev-properties-system.h" + +REG32(DATA, 0) +REG32(STATE, 4) + FIELD(STATE, TXFULL, 0, 1) + FIELD(STATE, RXFULL, 1, 1) + FIELD(STATE, TXOVERRUN, 2, 1) + FIELD(STATE, RXOVERRUN, 3, 1) +REG32(CTRL, 8) + FIELD(CTRL, TX_EN, 0, 1) + FIELD(CTRL, RX_EN, 1, 1) + FIELD(CTRL, TX_INTEN, 2, 1) + FIELD(CTRL, RX_INTEN, 3, 1) + FIELD(CTRL, TXO_INTEN, 4, 1) + FIELD(CTRL, RXO_INTEN, 5, 1) + FIELD(CTRL, HSTEST, 6, 1) +REG32(INTSTATUS, 0xc) + FIELD(INTSTATUS, TX, 0, 1) + FIELD(INTSTATUS, RX, 1, 1) + FIELD(INTSTATUS, TXO, 2, 1) + FIELD(INTSTATUS, RXO, 3, 1) +REG32(BAUDDIV, 0x10) +REG32(PID4, 0xFD0) +REG32(PID5, 0xFD4) +REG32(PID6, 0xFD8) +REG32(PID7, 0xFDC) +REG32(PID0, 0xFE0) +REG32(PID1, 0xFE4) +REG32(PID2, 0xFE8) +REG32(PID3, 0xFEC) +REG32(CID0, 0xFF0) +REG32(CID1, 0xFF4) +REG32(CID2, 0xFF8) +REG32(CID3, 0xFFC) + +/* PID/CID values */ +static const int uart_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x21, 0xb8, 0x1b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static bool uart_baudrate_ok(CMSDKAPBUART *s) +{ + /* The minimum permitted bauddiv setting is 16, so we just ignore + * settings below that (usually this means the device has just + * been reset and not yet programmed). + */ + return s->bauddiv >= 16 && s->bauddiv <= s->pclk_frq; +} + +static void uart_update_parameters(CMSDKAPBUART *s) +{ + QEMUSerialSetParams ssp; + + /* This UART is always 8N1 but the baud rate is programmable. */ + if (!uart_baudrate_ok(s)) { + return; + } + + ssp.data_bits = 8; + ssp.parity = 'N'; + ssp.stop_bits = 1; + ssp.speed = s->pclk_frq / s->bauddiv; + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp); + trace_cmsdk_apb_uart_set_params(ssp.speed); +} + +static void cmsdk_apb_uart_update(CMSDKAPBUART *s) +{ + /* update outbound irqs, including handling the way the rxo and txo + * interrupt status bits are just logical AND of the overrun bit in + * STATE and the overrun interrupt enable bit in CTRL. + */ + uint32_t omask = (R_INTSTATUS_RXO_MASK | R_INTSTATUS_TXO_MASK); + s->intstatus &= ~omask; + s->intstatus |= (s->state & (s->ctrl >> 2) & omask); + + qemu_set_irq(s->txint, !!(s->intstatus & R_INTSTATUS_TX_MASK)); + qemu_set_irq(s->rxint, !!(s->intstatus & R_INTSTATUS_RX_MASK)); + qemu_set_irq(s->txovrint, !!(s->intstatus & R_INTSTATUS_TXO_MASK)); + qemu_set_irq(s->rxovrint, !!(s->intstatus & R_INTSTATUS_RXO_MASK)); + qemu_set_irq(s->uartint, !!(s->intstatus)); +} + +static int uart_can_receive(void *opaque) +{ + CMSDKAPBUART *s = CMSDK_APB_UART(opaque); + + /* We can take a char if RX is enabled and the buffer is empty */ + if (s->ctrl & R_CTRL_RX_EN_MASK && !(s->state & R_STATE_RXFULL_MASK)) { + return 1; + } + return 0; +} + +static void uart_receive(void *opaque, const uint8_t *buf, int size) +{ + CMSDKAPBUART *s = CMSDK_APB_UART(opaque); + + trace_cmsdk_apb_uart_receive(*buf); + + /* In fact uart_can_receive() ensures that we can't be + * called unless RX is enabled and the buffer is empty, + * but we include this logic as documentation of what the + * hardware does if a character arrives in these circumstances. + */ + if (!(s->ctrl & R_CTRL_RX_EN_MASK)) { + /* Just drop the character on the floor */ + return; + } + + if (s->state & R_STATE_RXFULL_MASK) { + s->state |= R_STATE_RXOVERRUN_MASK; + } + + s->rxbuf = *buf; + s->state |= R_STATE_RXFULL_MASK; + if (s->ctrl & R_CTRL_RX_INTEN_MASK) { + s->intstatus |= R_INTSTATUS_RX_MASK; + } + cmsdk_apb_uart_update(s); +} + +static uint64_t uart_read(void *opaque, hwaddr offset, unsigned size) +{ + CMSDKAPBUART *s = CMSDK_APB_UART(opaque); + uint64_t r; + + switch (offset) { + case A_DATA: + r = s->rxbuf; + s->state &= ~R_STATE_RXFULL_MASK; + cmsdk_apb_uart_update(s); + qemu_chr_fe_accept_input(&s->chr); + break; + case A_STATE: + r = s->state; + break; + case A_CTRL: + r = s->ctrl; + break; + case A_INTSTATUS: + r = s->intstatus; + break; + case A_BAUDDIV: + r = s->bauddiv; + break; + case A_PID4 ... A_CID3: + r = uart_id[(offset - A_PID4) / 4]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB UART read: bad offset %x\n", (int) offset); + r = 0; + break; + } + trace_cmsdk_apb_uart_read(offset, r, size); + return r; +} + +/* Try to send tx data, and arrange to be called back later if + * we can't (ie the char backend is busy/blocking). + */ +static gboolean uart_transmit(void *do_not_use, GIOCondition cond, void *opaque) +{ + CMSDKAPBUART *s = CMSDK_APB_UART(opaque); + int ret; + + s->watch_tag = 0; + + if (!(s->ctrl & R_CTRL_TX_EN_MASK) || !(s->state & R_STATE_TXFULL_MASK)) { + return FALSE; + } + + ret = qemu_chr_fe_write(&s->chr, &s->txbuf, 1); + if (ret <= 0) { + s->watch_tag = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP, + uart_transmit, s); + if (!s->watch_tag) { + /* Most common reason to be here is "no chardev backend": + * just insta-drain the buffer, so the serial output + * goes into a void, rather than blocking the guest. + */ + goto buffer_drained; + } + /* Transmit pending */ + trace_cmsdk_apb_uart_tx_pending(); + return FALSE; + } + +buffer_drained: + /* Character successfully sent */ + trace_cmsdk_apb_uart_tx(s->txbuf); + s->state &= ~R_STATE_TXFULL_MASK; + /* Going from TXFULL set to clear triggers the tx interrupt */ + if (s->ctrl & R_CTRL_TX_INTEN_MASK) { + s->intstatus |= R_INTSTATUS_TX_MASK; + } + cmsdk_apb_uart_update(s); + return FALSE; +} + +static void uart_cancel_transmit(CMSDKAPBUART *s) +{ + if (s->watch_tag) { + g_source_remove(s->watch_tag); + s->watch_tag = 0; + } +} + +static void uart_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + CMSDKAPBUART *s = CMSDK_APB_UART(opaque); + + trace_cmsdk_apb_uart_write(offset, value, size); + + switch (offset) { + case A_DATA: + s->txbuf = value; + if (s->state & R_STATE_TXFULL_MASK) { + /* Buffer already full -- note the overrun and let the + * existing pending transmit callback handle the new char. + */ + s->state |= R_STATE_TXOVERRUN_MASK; + cmsdk_apb_uart_update(s); + } else { + s->state |= R_STATE_TXFULL_MASK; + uart_transmit(NULL, G_IO_OUT, s); + } + break; + case A_STATE: + /* Bits 0 and 1 are read only; bits 2 and 3 are W1C */ + s->state &= ~(value & + (R_STATE_TXOVERRUN_MASK | R_STATE_RXOVERRUN_MASK)); + cmsdk_apb_uart_update(s); + break; + case A_CTRL: + s->ctrl = value & 0x7f; + if ((s->ctrl & R_CTRL_TX_EN_MASK) && !uart_baudrate_ok(s)) { + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB UART: Tx enabled with invalid baudrate\n"); + } + cmsdk_apb_uart_update(s); + break; + case A_INTSTATUS: + /* All bits are W1C. Clearing the overrun interrupt bits really + * clears the overrun status bits in the STATE register (which + * is then reflected into the intstatus value by the update function). + */ + s->state &= ~(value & (R_INTSTATUS_TXO_MASK | R_INTSTATUS_RXO_MASK)); + s->intstatus &= ~value; + cmsdk_apb_uart_update(s); + break; + case A_BAUDDIV: + s->bauddiv = value & 0xFFFFF; + uart_update_parameters(s); + break; + case A_PID4 ... A_CID3: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB UART write: write to RO offset 0x%x\n", + (int)offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB UART write: bad offset 0x%x\n", (int) offset); + break; + } +} + +static const MemoryRegionOps uart_ops = { + .read = uart_read, + .write = uart_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void cmsdk_apb_uart_reset(DeviceState *dev) +{ + CMSDKAPBUART *s = CMSDK_APB_UART(dev); + + trace_cmsdk_apb_uart_reset(); + uart_cancel_transmit(s); + s->state = 0; + s->ctrl = 0; + s->intstatus = 0; + s->bauddiv = 0; + s->txbuf = 0; + s->rxbuf = 0; +} + +static void cmsdk_apb_uart_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + CMSDKAPBUART *s = CMSDK_APB_UART(obj); + + memory_region_init_io(&s->iomem, obj, &uart_ops, s, "uart", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->txint); + sysbus_init_irq(sbd, &s->rxint); + sysbus_init_irq(sbd, &s->txovrint); + sysbus_init_irq(sbd, &s->rxovrint); + sysbus_init_irq(sbd, &s->uartint); +} + +static void cmsdk_apb_uart_realize(DeviceState *dev, Error **errp) +{ + CMSDKAPBUART *s = CMSDK_APB_UART(dev); + + if (s->pclk_frq == 0) { + error_setg(errp, "CMSDK APB UART: pclk-frq property must be set"); + return; + } + + /* This UART has no flow control, so we do not need to register + * an event handler to deal with CHR_EVENT_BREAK. + */ + qemu_chr_fe_set_handlers(&s->chr, uart_can_receive, uart_receive, + NULL, NULL, s, NULL, true); +} + +static int cmsdk_apb_uart_post_load(void *opaque, int version_id) +{ + CMSDKAPBUART *s = CMSDK_APB_UART(opaque); + + /* If we have a pending character, arrange to resend it. */ + if (s->state & R_STATE_TXFULL_MASK) { + s->watch_tag = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP, + uart_transmit, s); + } + uart_update_parameters(s); + return 0; +} + +static const VMStateDescription cmsdk_apb_uart_vmstate = { + .name = "cmsdk-apb-uart", + .version_id = 1, + .minimum_version_id = 1, + .post_load = cmsdk_apb_uart_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(state, CMSDKAPBUART), + VMSTATE_UINT32(ctrl, CMSDKAPBUART), + VMSTATE_UINT32(intstatus, CMSDKAPBUART), + VMSTATE_UINT32(bauddiv, CMSDKAPBUART), + VMSTATE_UINT8(txbuf, CMSDKAPBUART), + VMSTATE_UINT8(rxbuf, CMSDKAPBUART), + VMSTATE_END_OF_LIST() + } +}; + +static Property cmsdk_apb_uart_properties[] = { + DEFINE_PROP_CHR("chardev", CMSDKAPBUART, chr), + DEFINE_PROP_UINT32("pclk-frq", CMSDKAPBUART, pclk_frq, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void cmsdk_apb_uart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = cmsdk_apb_uart_realize; + dc->vmsd = &cmsdk_apb_uart_vmstate; + dc->reset = cmsdk_apb_uart_reset; + device_class_set_props(dc, cmsdk_apb_uart_properties); +} + +static const TypeInfo cmsdk_apb_uart_info = { + .name = TYPE_CMSDK_APB_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CMSDKAPBUART), + .instance_init = cmsdk_apb_uart_init, + .class_init = cmsdk_apb_uart_class_init, +}; + +static void cmsdk_apb_uart_register_types(void) +{ + type_register_static(&cmsdk_apb_uart_info); +} + +type_init(cmsdk_apb_uart_register_types); diff --git a/hw/char/debugcon.c b/hw/char/debugcon.c new file mode 100644 index 000000000..fdb04fee0 --- /dev/null +++ b/hw/char/debugcon.c @@ -0,0 +1,145 @@ +/* + * QEMU Bochs-style debug console ("port E9") emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2008 Citrix Systems, Inc. + * Copyright (c) Intel Corporation; author: H. Peter Anvin + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "chardev/char-fe.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qom/object.h" + +#define TYPE_ISA_DEBUGCON_DEVICE "isa-debugcon" +OBJECT_DECLARE_SIMPLE_TYPE(ISADebugconState, ISA_DEBUGCON_DEVICE) + +//#define DEBUG_DEBUGCON + +typedef struct DebugconState { + MemoryRegion io; + CharBackend chr; + uint32_t readback; +} DebugconState; + +struct ISADebugconState { + ISADevice parent_obj; + + uint32_t iobase; + DebugconState state; +}; + +static void debugcon_ioport_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + DebugconState *s = opaque; + unsigned char ch = val; + +#ifdef DEBUG_DEBUGCON + printf(" [debugcon: write addr=0x%04" HWADDR_PRIx " val=0x%02" PRIx64 "]\n", addr, val); +#endif + + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); +} + + +static uint64_t debugcon_ioport_read(void *opaque, hwaddr addr, unsigned width) +{ + DebugconState *s = opaque; + +#ifdef DEBUG_DEBUGCON + printf("debugcon: read addr=0x%04" HWADDR_PRIx "\n", addr); +#endif + + return s->readback; +} + +static const MemoryRegionOps debugcon_ops = { + .read = debugcon_ioport_read, + .write = debugcon_ioport_write, + .valid.min_access_size = 1, + .valid.max_access_size = 1, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void debugcon_realize_core(DebugconState *s, Error **errp) +{ + if (!qemu_chr_fe_backend_connected(&s->chr)) { + error_setg(errp, "Can't create debugcon device, empty char device"); + return; + } + + qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, NULL, NULL, s, NULL, true); +} + +static void debugcon_isa_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *d = ISA_DEVICE(dev); + ISADebugconState *isa = ISA_DEBUGCON_DEVICE(dev); + DebugconState *s = &isa->state; + Error *err = NULL; + + debugcon_realize_core(s, &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + memory_region_init_io(&s->io, OBJECT(dev), &debugcon_ops, s, + TYPE_ISA_DEBUGCON_DEVICE, 1); + memory_region_add_subregion(isa_address_space_io(d), + isa->iobase, &s->io); +} + +static Property debugcon_isa_properties[] = { + DEFINE_PROP_UINT32("iobase", ISADebugconState, iobase, 0xe9), + DEFINE_PROP_CHR("chardev", ISADebugconState, state.chr), + DEFINE_PROP_UINT32("readback", ISADebugconState, state.readback, 0xe9), + DEFINE_PROP_END_OF_LIST(), +}; + +static void debugcon_isa_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = debugcon_isa_realizefn; + device_class_set_props(dc, debugcon_isa_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo debugcon_isa_info = { + .name = TYPE_ISA_DEBUGCON_DEVICE, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISADebugconState), + .class_init = debugcon_isa_class_initfn, +}; + +static void debugcon_register_types(void) +{ + type_register_static(&debugcon_isa_info); +} + +type_init(debugcon_register_types) diff --git a/hw/char/digic-uart.c b/hw/char/digic-uart.c new file mode 100644 index 000000000..00e5df551 --- /dev/null +++ b/hw/char/digic-uart.c @@ -0,0 +1,203 @@ +/* + * QEMU model of the Canon DIGIC UART block. + * + * Copyright (C) 2013 Antony Pavlov + * + * This model is based on reverse engineering efforts + * made by CHDK (http://chdk.wikia.com) and + * Magic Lantern (http://www.magiclantern.fm) projects + * contributors. + * + * See "Serial terminal" docs here: + * http://magiclantern.wikia.com/wiki/Register_Map#Misc_Registers + * + * The QEMU model of the Milkymist UART block by Michael Walle + * is used as a template. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "chardev/char-fe.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#include "hw/char/digic-uart.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" + +enum { + ST_RX_RDY = (1 << 0), + ST_TX_RDY = (1 << 1), +}; + +static uint64_t digic_uart_read(void *opaque, hwaddr addr, + unsigned size) +{ + DigicUartState *s = opaque; + uint64_t ret = 0; + + addr >>= 2; + + switch (addr) { + case R_RX: + s->reg_st &= ~(ST_RX_RDY); + ret = s->reg_rx; + break; + + case R_ST: + ret = s->reg_st; + break; + + default: + qemu_log_mask(LOG_UNIMP, + "digic-uart: read access to unknown register 0x" + TARGET_FMT_plx "\n", addr << 2); + } + + return ret; +} + +static void digic_uart_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + DigicUartState *s = opaque; + unsigned char ch = value; + + addr >>= 2; + + switch (addr) { + case R_TX: + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); + break; + + case R_ST: + /* + * Ignore write to R_ST. + * + * The point is that this register is actively used + * during receiving and transmitting symbols, + * but we don't know the function of most of bits. + * + * Ignoring writes to R_ST is only a simplification + * of the model. It has no perceptible side effects + * for existing guests. + */ + break; + + default: + qemu_log_mask(LOG_UNIMP, + "digic-uart: write access to unknown register 0x" + TARGET_FMT_plx "\n", addr << 2); + } +} + +static const MemoryRegionOps uart_mmio_ops = { + .read = digic_uart_read, + .write = digic_uart_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int uart_can_rx(void *opaque) +{ + DigicUartState *s = opaque; + + return !(s->reg_st & ST_RX_RDY); +} + +static void uart_rx(void *opaque, const uint8_t *buf, int size) +{ + DigicUartState *s = opaque; + + assert(uart_can_rx(opaque)); + + s->reg_st |= ST_RX_RDY; + s->reg_rx = *buf; +} + +static void uart_event(void *opaque, QEMUChrEvent event) +{ +} + +static void digic_uart_reset(DeviceState *d) +{ + DigicUartState *s = DIGIC_UART(d); + + s->reg_rx = 0; + s->reg_st = ST_TX_RDY; +} + +static void digic_uart_realize(DeviceState *dev, Error **errp) +{ + DigicUartState *s = DIGIC_UART(dev); + + qemu_chr_fe_set_handlers(&s->chr, uart_can_rx, uart_rx, + uart_event, NULL, s, NULL, true); +} + +static void digic_uart_init(Object *obj) +{ + DigicUartState *s = DIGIC_UART(obj); + + memory_region_init_io(&s->regs_region, OBJECT(s), &uart_mmio_ops, s, + TYPE_DIGIC_UART, 0x18); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->regs_region); +} + +static const VMStateDescription vmstate_digic_uart = { + .name = "digic-uart", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(reg_rx, DigicUartState), + VMSTATE_UINT32(reg_st, DigicUartState), + VMSTATE_END_OF_LIST() + } +}; + +static Property digic_uart_properties[] = { + DEFINE_PROP_CHR("chardev", DigicUartState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void digic_uart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = digic_uart_realize; + dc->reset = digic_uart_reset; + dc->vmsd = &vmstate_digic_uart; + device_class_set_props(dc, digic_uart_properties); +} + +static const TypeInfo digic_uart_info = { + .name = TYPE_DIGIC_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(DigicUartState), + .instance_init = digic_uart_init, + .class_init = digic_uart_class_init, +}; + +static void digic_uart_register_types(void) +{ + type_register_static(&digic_uart_info); +} + +type_init(digic_uart_register_types) diff --git a/hw/char/escc.c b/hw/char/escc.c new file mode 100644 index 000000000..8755d8d34 --- /dev/null +++ b/hw/char/escc.c @@ -0,0 +1,1006 @@ +/* + * QEMU ESCC (Z8030/Z8530/Z85C30/SCC/ESCC) serial port emulation + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "hw/char/escc.h" +#include "ui/console.h" +#include "trace.h" + +/* + * Chipset docs: + * "Z80C30/Z85C30/Z80230/Z85230/Z85233 SCC/ESCC User Manual", + * http://www.zilog.com/docs/serial/scc_escc_um.pdf + * + * On Sparc32 this is the serial port, mouse and keyboard part of chip STP2001 + * (Slave I/O), also produced as NCR89C105. See + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C105.txt + * + * The serial ports implement full AMD AM8530 or Zilog Z8530 chips, + * mouse and keyboard ports don't implement all functions and they are + * only asynchronous. There is no DMA. + * + * Z85C30 is also used on PowerMacs and m68k Macs. + * + * There are some small differences between Sparc version (sunzilog) + * and PowerMac (pmac): + * Offset between control and data registers + * There is some kind of lockup bug, but we can ignore it + * CTS is inverted + * DMA on pmac using DBDMA chip + * pmac can do IRDA and faster rates, sunzilog can only do 38400 + * pmac baud rate generator clock is 3.6864 MHz, sunzilog 4.9152 MHz + * + * Linux driver for m68k Macs is the same as for PowerMac (pmac_zilog), + * but registers are grouped by type and not by channel: + * channel is selected by bit 0 of the address (instead of bit 1) + * and register is selected by bit 1 of the address (instead of bit 0). + */ + +/* + * Modifications: + * 2006-Aug-10 Igor Kovalenko : Renamed KBDQueue to SERIOQueue, implemented + * serial mouse queue. + * Implemented serial mouse protocol. + * + * 2010-May-23 Artyom Tarasenko: Reworked IUS logic + */ + +#define CHN_C(s) ((s)->chn == escc_chn_b ? 'b' : 'a') + +#define SERIAL_CTRL 0 +#define SERIAL_DATA 1 + +#define W_CMD 0 +#define CMD_PTR_MASK 0x07 +#define CMD_CMD_MASK 0x38 +#define CMD_HI 0x08 +#define CMD_CLR_TXINT 0x28 +#define CMD_CLR_IUS 0x38 +#define W_INTR 1 +#define INTR_INTALL 0x01 +#define INTR_TXINT 0x02 +#define INTR_PAR_SPEC 0x04 +#define INTR_RXMODEMSK 0x18 +#define INTR_RXINT1ST 0x08 +#define INTR_RXINTALL 0x10 +#define INTR_WTRQ_TXRX 0x20 +#define W_IVEC 2 +#define W_RXCTRL 3 +#define RXCTRL_RXEN 0x01 +#define RXCTRL_HUNT 0x10 +#define W_TXCTRL1 4 +#define TXCTRL1_PAREN 0x01 +#define TXCTRL1_PAREV 0x02 +#define TXCTRL1_1STOP 0x04 +#define TXCTRL1_1HSTOP 0x08 +#define TXCTRL1_2STOP 0x0c +#define TXCTRL1_STPMSK 0x0c +#define TXCTRL1_CLK1X 0x00 +#define TXCTRL1_CLK16X 0x40 +#define TXCTRL1_CLK32X 0x80 +#define TXCTRL1_CLK64X 0xc0 +#define TXCTRL1_CLKMSK 0xc0 +#define W_TXCTRL2 5 +#define TXCTRL2_TXCRC 0x01 +#define TXCTRL2_TXEN 0x08 +#define TXCTRL2_BITMSK 0x60 +#define TXCTRL2_5BITS 0x00 +#define TXCTRL2_7BITS 0x20 +#define TXCTRL2_6BITS 0x40 +#define TXCTRL2_8BITS 0x60 +#define W_SYNC1 6 +#define W_SYNC2 7 +#define W_TXBUF 8 +#define W_MINTR 9 +#define MINTR_VIS 0x01 +#define MINTR_NV 0x02 +#define MINTR_STATUSHI 0x10 +#define MINTR_SOFTIACK 0x20 +#define MINTR_RST_MASK 0xc0 +#define MINTR_RST_B 0x40 +#define MINTR_RST_A 0x80 +#define MINTR_RST_ALL 0xc0 +#define W_MISC1 10 +#define MISC1_ENC_MASK 0x60 +#define W_CLOCK 11 +#define CLOCK_TRXC 0x08 +#define W_BRGLO 12 +#define W_BRGHI 13 +#define W_MISC2 14 +#define MISC2_BRG_EN 0x01 +#define MISC2_BRG_SRC 0x02 +#define MISC2_LCL_LOOP 0x10 +#define MISC2_PLLCMD0 0x20 +#define MISC2_PLLCMD1 0x40 +#define MISC2_PLLCMD2 0x80 +#define W_EXTINT 15 +#define EXTINT_DCD 0x08 +#define EXTINT_SYNCINT 0x10 +#define EXTINT_CTSINT 0x20 +#define EXTINT_TXUNDRN 0x40 +#define EXTINT_BRKINT 0x80 + +#define R_STATUS 0 +#define STATUS_RXAV 0x01 +#define STATUS_ZERO 0x02 +#define STATUS_TXEMPTY 0x04 +#define STATUS_DCD 0x08 +#define STATUS_SYNC 0x10 +#define STATUS_CTS 0x20 +#define STATUS_TXUNDRN 0x40 +#define STATUS_BRK 0x80 +#define R_SPEC 1 +#define SPEC_ALLSENT 0x01 +#define SPEC_BITS8 0x06 +#define R_IVEC 2 +#define IVEC_TXINTB 0x00 +#define IVEC_LONOINT 0x06 +#define IVEC_LORXINTA 0x0c +#define IVEC_LORXINTB 0x04 +#define IVEC_LOTXINTA 0x08 +#define IVEC_HINOINT 0x60 +#define IVEC_HIRXINTA 0x30 +#define IVEC_HIRXINTB 0x20 +#define IVEC_HITXINTA 0x10 +#define R_INTR 3 +#define INTR_EXTINTB 0x01 +#define INTR_TXINTB 0x02 +#define INTR_RXINTB 0x04 +#define INTR_EXTINTA 0x08 +#define INTR_TXINTA 0x10 +#define INTR_RXINTA 0x20 +#define R_IPEN 4 +#define R_TXCTRL1 5 +#define R_TXCTRL2 6 +#define R_BC 7 +#define R_RXBUF 8 +#define R_RXCTRL 9 +#define R_MISC 10 +#define MISC_2CLKMISS 0x40 +#define R_MISC1 11 +#define R_BRGLO 12 +#define R_BRGHI 13 +#define R_MISC1I 14 +#define R_EXTINT 15 + +static void handle_kbd_command(ESCCChannelState *s, int val); +static int serial_can_receive(void *opaque); +static void serial_receive_byte(ESCCChannelState *s, int ch); + +static int reg_shift(ESCCState *s) +{ + return s->bit_swap ? s->it_shift + 1 : s->it_shift; +} + +static int chn_shift(ESCCState *s) +{ + return s->bit_swap ? s->it_shift : s->it_shift + 1; +} + +static void clear_queue(void *opaque) +{ + ESCCChannelState *s = opaque; + ESCCSERIOQueue *q = &s->queue; + q->rptr = q->wptr = q->count = 0; +} + +static void put_queue(void *opaque, int b) +{ + ESCCChannelState *s = opaque; + ESCCSERIOQueue *q = &s->queue; + + trace_escc_put_queue(CHN_C(s), b); + if (q->count >= ESCC_SERIO_QUEUE_SIZE) { + return; + } + q->data[q->wptr] = b; + if (++q->wptr == ESCC_SERIO_QUEUE_SIZE) { + q->wptr = 0; + } + q->count++; + serial_receive_byte(s, 0); +} + +static uint32_t get_queue(void *opaque) +{ + ESCCChannelState *s = opaque; + ESCCSERIOQueue *q = &s->queue; + int val; + + if (q->count == 0) { + return 0; + } else { + val = q->data[q->rptr]; + if (++q->rptr == ESCC_SERIO_QUEUE_SIZE) { + q->rptr = 0; + } + q->count--; + } + trace_escc_get_queue(CHN_C(s), val); + if (q->count > 0) { + serial_receive_byte(s, 0); + } + return val; +} + +static int escc_update_irq_chn(ESCCChannelState *s) +{ + if ((((s->wregs[W_INTR] & INTR_TXINT) && (s->txint == 1)) || + /* tx ints enabled, pending */ + ((((s->wregs[W_INTR] & INTR_RXMODEMSK) == INTR_RXINT1ST) || + ((s->wregs[W_INTR] & INTR_RXMODEMSK) == INTR_RXINTALL)) && + s->rxint == 1) || + /* rx ints enabled, pending */ + ((s->wregs[W_EXTINT] & EXTINT_BRKINT) && + (s->rregs[R_STATUS] & STATUS_BRK)))) { + /* break int e&p */ + return 1; + } + return 0; +} + +static void escc_update_irq(ESCCChannelState *s) +{ + int irq; + + irq = escc_update_irq_chn(s); + irq |= escc_update_irq_chn(s->otherchn); + + trace_escc_update_irq(irq); + qemu_set_irq(s->irq, irq); +} + +static void escc_reset_chn(ESCCChannelState *s) +{ + s->reg = 0; + s->rx = s->tx = 0; + s->rxint = s->txint = 0; + s->rxint_under_svc = s->txint_under_svc = 0; + s->e0_mode = s->led_mode = s->caps_lock_mode = s->num_lock_mode = 0; + clear_queue(s); +} + +static void escc_soft_reset_chn(ESCCChannelState *s) +{ + escc_reset_chn(s); + + s->wregs[W_CMD] = 0; + s->wregs[W_INTR] &= INTR_PAR_SPEC | INTR_WTRQ_TXRX; + s->wregs[W_RXCTRL] &= ~RXCTRL_RXEN; + /* 1 stop bit */ + s->wregs[W_TXCTRL1] |= TXCTRL1_1STOP; + s->wregs[W_TXCTRL2] &= TXCTRL2_TXCRC | TXCTRL2_8BITS; + s->wregs[W_MINTR] &= ~MINTR_SOFTIACK; + s->wregs[W_MISC1] &= MISC1_ENC_MASK; + /* PLL disabled */ + s->wregs[W_MISC2] &= MISC2_BRG_EN | MISC2_BRG_SRC | + MISC2_PLLCMD1 | MISC2_PLLCMD2; + s->wregs[W_MISC2] |= MISC2_PLLCMD0; + /* Enable most interrupts */ + s->wregs[W_EXTINT] = EXTINT_DCD | EXTINT_SYNCINT | EXTINT_CTSINT | + EXTINT_TXUNDRN | EXTINT_BRKINT; + + s->rregs[R_STATUS] &= STATUS_DCD | STATUS_SYNC | STATUS_CTS | STATUS_BRK; + s->rregs[R_STATUS] |= STATUS_TXEMPTY | STATUS_TXUNDRN; + if (s->disabled) { + s->rregs[R_STATUS] |= STATUS_DCD | STATUS_SYNC | STATUS_CTS; + } + s->rregs[R_SPEC] &= SPEC_ALLSENT; + s->rregs[R_SPEC] |= SPEC_BITS8; + s->rregs[R_INTR] = 0; + s->rregs[R_MISC] &= MISC_2CLKMISS; +} + +static void escc_hard_reset_chn(ESCCChannelState *s) +{ + escc_soft_reset_chn(s); + + /* + * Hard reset is almost identical to soft reset above, except that the + * values of WR9 (W_MINTR), WR10 (W_MISC1), WR11 (W_CLOCK) and WR14 + * (W_MISC2) have extra bits forced to 0/1 + */ + s->wregs[W_MINTR] &= MINTR_VIS | MINTR_NV; + s->wregs[W_MINTR] |= MINTR_RST_B | MINTR_RST_A; + s->wregs[W_MISC1] = 0; + s->wregs[W_CLOCK] = CLOCK_TRXC; + s->wregs[W_MISC2] &= MISC2_PLLCMD1 | MISC2_PLLCMD2; + s->wregs[W_MISC2] |= MISC2_LCL_LOOP | MISC2_PLLCMD0; +} + +static void escc_reset(DeviceState *d) +{ + ESCCState *s = ESCC(d); + int i, j; + + for (i = 0; i < 2; i++) { + ESCCChannelState *cs = &s->chn[i]; + + /* + * According to the ESCC datasheet "Miscellaneous Questions" section + * on page 384, the values of the ESCC registers are not guaranteed on + * power-on until an explicit hardware or software reset has been + * issued. For now we zero the registers so that a device reset always + * returns the emulated device to a fixed state. + */ + for (j = 0; j < ESCC_SERIAL_REGS; j++) { + cs->rregs[j] = 0; + cs->wregs[j] = 0; + } + + /* + * ...but there is an exception. The "Transmit Interrupts and Transmit + * Buffer Empty Bit" section on page 50 of the ESCC datasheet says of + * the STATUS_TXEMPTY bit in R_STATUS: "After a hardware reset + * (including a hardware reset by software), or a channel reset, this + * bit is set to 1". The Sun PROM checks this bit early on startup and + * gets stuck in an infinite loop if it is not set. + */ + cs->rregs[R_STATUS] |= STATUS_TXEMPTY; + + escc_reset_chn(cs); + } +} + +static inline void set_rxint(ESCCChannelState *s) +{ + s->rxint = 1; + /* + * XXX: missing daisy chaining: escc_chn_b rx should have a lower priority + * than chn_a rx/tx/special_condition service + */ + s->rxint_under_svc = 1; + if (s->chn == escc_chn_a) { + s->rregs[R_INTR] |= INTR_RXINTA; + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { + s->otherchn->rregs[R_IVEC] = IVEC_HIRXINTA; + } else { + s->otherchn->rregs[R_IVEC] = IVEC_LORXINTA; + } + } else { + s->otherchn->rregs[R_INTR] |= INTR_RXINTB; + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { + s->rregs[R_IVEC] = IVEC_HIRXINTB; + } else { + s->rregs[R_IVEC] = IVEC_LORXINTB; + } + } + escc_update_irq(s); +} + +static inline void set_txint(ESCCChannelState *s) +{ + s->txint = 1; + if (!s->rxint_under_svc) { + s->txint_under_svc = 1; + if (s->chn == escc_chn_a) { + if (s->wregs[W_INTR] & INTR_TXINT) { + s->rregs[R_INTR] |= INTR_TXINTA; + } + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { + s->otherchn->rregs[R_IVEC] = IVEC_HITXINTA; + } else { + s->otherchn->rregs[R_IVEC] = IVEC_LOTXINTA; + } + } else { + s->rregs[R_IVEC] = IVEC_TXINTB; + if (s->wregs[W_INTR] & INTR_TXINT) { + s->otherchn->rregs[R_INTR] |= INTR_TXINTB; + } + } + escc_update_irq(s); + } +} + +static inline void clr_rxint(ESCCChannelState *s) +{ + s->rxint = 0; + s->rxint_under_svc = 0; + if (s->chn == escc_chn_a) { + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { + s->otherchn->rregs[R_IVEC] = IVEC_HINOINT; + } else { + s->otherchn->rregs[R_IVEC] = IVEC_LONOINT; + } + s->rregs[R_INTR] &= ~INTR_RXINTA; + } else { + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { + s->rregs[R_IVEC] = IVEC_HINOINT; + } else { + s->rregs[R_IVEC] = IVEC_LONOINT; + } + s->otherchn->rregs[R_INTR] &= ~INTR_RXINTB; + } + if (s->txint) { + set_txint(s); + } + escc_update_irq(s); +} + +static inline void clr_txint(ESCCChannelState *s) +{ + s->txint = 0; + s->txint_under_svc = 0; + if (s->chn == escc_chn_a) { + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { + s->otherchn->rregs[R_IVEC] = IVEC_HINOINT; + } else { + s->otherchn->rregs[R_IVEC] = IVEC_LONOINT; + } + s->rregs[R_INTR] &= ~INTR_TXINTA; + } else { + s->otherchn->rregs[R_INTR] &= ~INTR_TXINTB; + if (s->wregs[W_MINTR] & MINTR_STATUSHI) { + s->rregs[R_IVEC] = IVEC_HINOINT; + } else { + s->rregs[R_IVEC] = IVEC_LONOINT; + } + s->otherchn->rregs[R_INTR] &= ~INTR_TXINTB; + } + if (s->rxint) { + set_rxint(s); + } + escc_update_irq(s); +} + +static void escc_update_parameters(ESCCChannelState *s) +{ + int speed, parity, data_bits, stop_bits; + QEMUSerialSetParams ssp; + + if (!qemu_chr_fe_backend_connected(&s->chr) || s->type != escc_serial) { + return; + } + + if (s->wregs[W_TXCTRL1] & TXCTRL1_PAREN) { + if (s->wregs[W_TXCTRL1] & TXCTRL1_PAREV) { + parity = 'E'; + } else { + parity = 'O'; + } + } else { + parity = 'N'; + } + if ((s->wregs[W_TXCTRL1] & TXCTRL1_STPMSK) == TXCTRL1_2STOP) { + stop_bits = 2; + } else { + stop_bits = 1; + } + switch (s->wregs[W_TXCTRL2] & TXCTRL2_BITMSK) { + case TXCTRL2_5BITS: + data_bits = 5; + break; + case TXCTRL2_7BITS: + data_bits = 7; + break; + case TXCTRL2_6BITS: + data_bits = 6; + break; + default: + case TXCTRL2_8BITS: + data_bits = 8; + break; + } + speed = s->clock / ((s->wregs[W_BRGLO] | (s->wregs[W_BRGHI] << 8)) + 2); + switch (s->wregs[W_TXCTRL1] & TXCTRL1_CLKMSK) { + case TXCTRL1_CLK1X: + break; + case TXCTRL1_CLK16X: + speed /= 16; + break; + case TXCTRL1_CLK32X: + speed /= 32; + break; + default: + case TXCTRL1_CLK64X: + speed /= 64; + break; + } + ssp.speed = speed; + ssp.parity = parity; + ssp.data_bits = data_bits; + ssp.stop_bits = stop_bits; + trace_escc_update_parameters(CHN_C(s), speed, parity, data_bits, stop_bits); + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp); +} + +static void escc_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + ESCCState *serial = opaque; + ESCCChannelState *s; + uint32_t saddr; + int newreg, channel; + + val &= 0xff; + saddr = (addr >> reg_shift(serial)) & 1; + channel = (addr >> chn_shift(serial)) & 1; + s = &serial->chn[channel]; + switch (saddr) { + case SERIAL_CTRL: + trace_escc_mem_writeb_ctrl(CHN_C(s), s->reg, val & 0xff); + newreg = 0; + switch (s->reg) { + case W_CMD: + newreg = val & CMD_PTR_MASK; + val &= CMD_CMD_MASK; + switch (val) { + case CMD_HI: + newreg |= CMD_HI; + break; + case CMD_CLR_TXINT: + clr_txint(s); + break; + case CMD_CLR_IUS: + if (s->rxint_under_svc) { + s->rxint_under_svc = 0; + if (s->txint) { + set_txint(s); + } + } else if (s->txint_under_svc) { + s->txint_under_svc = 0; + } + escc_update_irq(s); + break; + default: + break; + } + break; + case W_RXCTRL: + s->wregs[s->reg] = val; + if (val & RXCTRL_HUNT) { + s->rregs[R_STATUS] |= STATUS_SYNC; + } + break; + case W_INTR ... W_IVEC: + case W_SYNC1 ... W_TXBUF: + case W_MISC1 ... W_CLOCK: + case W_MISC2 ... W_EXTINT: + s->wregs[s->reg] = val; + break; + case W_TXCTRL1: + s->wregs[s->reg] = val; + /* + * The ESCC datasheet states that SPEC_ALLSENT is always set in + * sync mode, and set in async mode when all characters have + * cleared the transmitter. Since writes to SERIAL_DATA use the + * blocking qemu_chr_fe_write_all() function to write each + * character, the guest can never see the state when async data + * is in the process of being transmitted so we can set this bit + * unconditionally regardless of the state of the W_TXCTRL1 mode + * bits. + */ + s->rregs[R_SPEC] |= SPEC_ALLSENT; + escc_update_parameters(s); + break; + case W_TXCTRL2: + s->wregs[s->reg] = val; + escc_update_parameters(s); + break; + case W_BRGLO: + case W_BRGHI: + s->wregs[s->reg] = val; + s->rregs[s->reg] = val; + escc_update_parameters(s); + break; + case W_MINTR: + switch (val & MINTR_RST_MASK) { + case 0: + default: + break; + case MINTR_RST_B: + trace_escc_soft_reset_chn(CHN_C(&serial->chn[0])); + escc_soft_reset_chn(&serial->chn[0]); + return; + case MINTR_RST_A: + trace_escc_soft_reset_chn(CHN_C(&serial->chn[1])); + escc_soft_reset_chn(&serial->chn[1]); + return; + case MINTR_RST_ALL: + trace_escc_hard_reset(); + escc_hard_reset_chn(&serial->chn[0]); + escc_hard_reset_chn(&serial->chn[1]); + return; + } + break; + default: + break; + } + if (s->reg == 0) { + s->reg = newreg; + } else { + s->reg = 0; + } + break; + case SERIAL_DATA: + trace_escc_mem_writeb_data(CHN_C(s), val); + /* + * Lower the irq when data is written to the Tx buffer and no other + * interrupts are currently pending. The irq will be raised again once + * the Tx buffer becomes empty below. + */ + s->txint = 0; + escc_update_irq(s); + s->tx = val; + if (s->wregs[W_TXCTRL2] & TXCTRL2_TXEN) { /* tx enabled */ + if (qemu_chr_fe_backend_connected(&s->chr)) { + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ + qemu_chr_fe_write_all(&s->chr, &s->tx, 1); + } else if (s->type == escc_kbd && !s->disabled) { + handle_kbd_command(s, val); + } + } + s->rregs[R_STATUS] |= STATUS_TXEMPTY; /* Tx buffer empty */ + s->rregs[R_SPEC] |= SPEC_ALLSENT; /* All sent */ + set_txint(s); + break; + default: + break; + } +} + +static uint64_t escc_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + ESCCState *serial = opaque; + ESCCChannelState *s; + uint32_t saddr; + uint32_t ret; + int channel; + + saddr = (addr >> reg_shift(serial)) & 1; + channel = (addr >> chn_shift(serial)) & 1; + s = &serial->chn[channel]; + switch (saddr) { + case SERIAL_CTRL: + trace_escc_mem_readb_ctrl(CHN_C(s), s->reg, s->rregs[s->reg]); + ret = s->rregs[s->reg]; + s->reg = 0; + return ret; + case SERIAL_DATA: + s->rregs[R_STATUS] &= ~STATUS_RXAV; + clr_rxint(s); + if (s->type == escc_kbd || s->type == escc_mouse) { + ret = get_queue(s); + } else { + ret = s->rx; + } + trace_escc_mem_readb_data(CHN_C(s), ret); + qemu_chr_fe_accept_input(&s->chr); + return ret; + default: + break; + } + return 0; +} + +static const MemoryRegionOps escc_mem_ops = { + .read = escc_mem_read, + .write = escc_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static int serial_can_receive(void *opaque) +{ + ESCCChannelState *s = opaque; + int ret; + + if (((s->wregs[W_RXCTRL] & RXCTRL_RXEN) == 0) /* Rx not enabled */ + || ((s->rregs[R_STATUS] & STATUS_RXAV) == STATUS_RXAV)) { + /* char already available */ + ret = 0; + } else { + ret = 1; + } + return ret; +} + +static void serial_receive_byte(ESCCChannelState *s, int ch) +{ + trace_escc_serial_receive_byte(CHN_C(s), ch); + s->rregs[R_STATUS] |= STATUS_RXAV; + s->rx = ch; + set_rxint(s); +} + +static void serial_receive_break(ESCCChannelState *s) +{ + s->rregs[R_STATUS] |= STATUS_BRK; + escc_update_irq(s); +} + +static void serial_receive1(void *opaque, const uint8_t *buf, int size) +{ + ESCCChannelState *s = opaque; + serial_receive_byte(s, buf[0]); +} + +static void serial_event(void *opaque, QEMUChrEvent event) +{ + ESCCChannelState *s = opaque; + if (event == CHR_EVENT_BREAK) { + serial_receive_break(s); + } +} + +static const VMStateDescription vmstate_escc_chn = { + .name = "escc_chn", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(vmstate_dummy, ESCCChannelState), + VMSTATE_UINT32(reg, ESCCChannelState), + VMSTATE_UINT32(rxint, ESCCChannelState), + VMSTATE_UINT32(txint, ESCCChannelState), + VMSTATE_UINT32(rxint_under_svc, ESCCChannelState), + VMSTATE_UINT32(txint_under_svc, ESCCChannelState), + VMSTATE_UINT8(rx, ESCCChannelState), + VMSTATE_UINT8(tx, ESCCChannelState), + VMSTATE_BUFFER(wregs, ESCCChannelState), + VMSTATE_BUFFER(rregs, ESCCChannelState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_escc = { + .name = "escc", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(chn, ESCCState, 2, 2, vmstate_escc_chn, + ESCCChannelState), + VMSTATE_END_OF_LIST() + } +}; + +static void sunkbd_handle_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) +{ + ESCCChannelState *s = (ESCCChannelState *)dev; + int qcode, keycode; + InputKeyEvent *key; + + assert(evt->type == INPUT_EVENT_KIND_KEY); + key = evt->u.key.data; + qcode = qemu_input_key_value_to_qcode(key->key); + trace_escc_sunkbd_event_in(qcode, QKeyCode_str(qcode), + key->down); + + if (qcode == Q_KEY_CODE_CAPS_LOCK) { + if (key->down) { + s->caps_lock_mode ^= 1; + if (s->caps_lock_mode == 2) { + return; /* Drop second press */ + } + } else { + s->caps_lock_mode ^= 2; + if (s->caps_lock_mode == 3) { + return; /* Drop first release */ + } + } + } + + if (qcode == Q_KEY_CODE_NUM_LOCK) { + if (key->down) { + s->num_lock_mode ^= 1; + if (s->num_lock_mode == 2) { + return; /* Drop second press */ + } + } else { + s->num_lock_mode ^= 2; + if (s->num_lock_mode == 3) { + return; /* Drop first release */ + } + } + } + + if (qcode > qemu_input_map_qcode_to_sun_len) { + return; + } + + keycode = qemu_input_map_qcode_to_sun[qcode]; + if (!key->down) { + keycode |= 0x80; + } + trace_escc_sunkbd_event_out(keycode); + put_queue(s, keycode); +} + +static QemuInputHandler sunkbd_handler = { + .name = "sun keyboard", + .mask = INPUT_EVENT_MASK_KEY, + .event = sunkbd_handle_event, +}; + +static void handle_kbd_command(ESCCChannelState *s, int val) +{ + trace_escc_kbd_command(val); + if (s->led_mode) { /* Ignore led byte */ + s->led_mode = 0; + return; + } + switch (val) { + case 1: /* Reset, return type code */ + clear_queue(s); + put_queue(s, 0xff); + put_queue(s, 4); /* Type 4 */ + put_queue(s, 0x7f); + break; + case 0xe: /* Set leds */ + s->led_mode = 1; + break; + case 7: /* Query layout */ + case 0xf: + clear_queue(s); + put_queue(s, 0xfe); + put_queue(s, 0x21); /* en-us layout */ + break; + default: + break; + } +} + +static void sunmouse_event(void *opaque, + int dx, int dy, int dz, int buttons_state) +{ + ESCCChannelState *s = opaque; + int ch; + + trace_escc_sunmouse_event(dx, dy, buttons_state); + ch = 0x80 | 0x7; /* protocol start byte, no buttons pressed */ + + if (buttons_state & MOUSE_EVENT_LBUTTON) { + ch ^= 0x4; + } + if (buttons_state & MOUSE_EVENT_MBUTTON) { + ch ^= 0x2; + } + if (buttons_state & MOUSE_EVENT_RBUTTON) { + ch ^= 0x1; + } + + put_queue(s, ch); + + ch = dx; + + if (ch > 127) { + ch = 127; + } else if (ch < -127) { + ch = -127; + } + + put_queue(s, ch & 0xff); + + ch = -dy; + + if (ch > 127) { + ch = 127; + } else if (ch < -127) { + ch = -127; + } + + put_queue(s, ch & 0xff); + + /* MSC protocol specifies two extra motion bytes */ + + put_queue(s, 0); + put_queue(s, 0); +} + +static void escc_init1(Object *obj) +{ + ESCCState *s = ESCC(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + unsigned int i; + + for (i = 0; i < 2; i++) { + sysbus_init_irq(dev, &s->chn[i].irq); + s->chn[i].chn = 1 - i; + } + s->chn[0].otherchn = &s->chn[1]; + s->chn[1].otherchn = &s->chn[0]; + + sysbus_init_mmio(dev, &s->mmio); +} + +static void escc_realize(DeviceState *dev, Error **errp) +{ + ESCCState *s = ESCC(dev); + unsigned int i; + + s->chn[0].disabled = s->disabled; + s->chn[1].disabled = s->disabled; + + memory_region_init_io(&s->mmio, OBJECT(dev), &escc_mem_ops, s, "escc", + ESCC_SIZE << s->it_shift); + + for (i = 0; i < 2; i++) { + if (qemu_chr_fe_backend_connected(&s->chn[i].chr)) { + s->chn[i].clock = s->frequency / 2; + qemu_chr_fe_set_handlers(&s->chn[i].chr, serial_can_receive, + serial_receive1, serial_event, NULL, + &s->chn[i], NULL, true); + } + } + + if (s->chn[0].type == escc_mouse) { + qemu_add_mouse_event_handler(sunmouse_event, &s->chn[0], 0, + "QEMU Sun Mouse"); + } + if (s->chn[1].type == escc_kbd) { + s->chn[1].hs = qemu_input_handler_register((DeviceState *)(&s->chn[1]), + &sunkbd_handler); + } +} + +static Property escc_properties[] = { + DEFINE_PROP_UINT32("frequency", ESCCState, frequency, 0), + DEFINE_PROP_UINT32("it_shift", ESCCState, it_shift, 0), + DEFINE_PROP_BOOL("bit_swap", ESCCState, bit_swap, false), + DEFINE_PROP_UINT32("disabled", ESCCState, disabled, 0), + DEFINE_PROP_UINT32("chnBtype", ESCCState, chn[0].type, 0), + DEFINE_PROP_UINT32("chnAtype", ESCCState, chn[1].type, 0), + DEFINE_PROP_CHR("chrB", ESCCState, chn[0].chr), + DEFINE_PROP_CHR("chrA", ESCCState, chn[1].chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void escc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = escc_reset; + dc->realize = escc_realize; + dc->vmsd = &vmstate_escc; + device_class_set_props(dc, escc_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo escc_info = { + .name = TYPE_ESCC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ESCCState), + .instance_init = escc_init1, + .class_init = escc_class_init, +}; + +static void escc_register_types(void) +{ + type_register_static(&escc_info); +} + +type_init(escc_register_types) diff --git a/hw/char/etraxfs_ser.c b/hw/char/etraxfs_ser.c new file mode 100644 index 000000000..e8c301772 --- /dev/null +++ b/hw/char/etraxfs_ser.c @@ -0,0 +1,267 @@ +/* + * QEMU ETRAX System Emulator + * + * Copyright (c) 2007 Edgar E. Iglesias, Axis Communications AB. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/sysbus.h" +#include "chardev/char-fe.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define D(x) + +#define RW_TR_CTRL (0x00 / 4) +#define RW_TR_DMA_EN (0x04 / 4) +#define RW_REC_CTRL (0x08 / 4) +#define RW_DOUT (0x1c / 4) +#define RS_STAT_DIN (0x20 / 4) +#define R_STAT_DIN (0x24 / 4) +#define RW_INTR_MASK (0x2c / 4) +#define RW_ACK_INTR (0x30 / 4) +#define R_INTR (0x34 / 4) +#define R_MASKED_INTR (0x38 / 4) +#define R_MAX (0x3c / 4) + +#define STAT_DAV 16 +#define STAT_TR_IDLE 22 +#define STAT_TR_RDY 24 + +#define TYPE_ETRAX_FS_SERIAL "etraxfs-serial" +typedef struct ETRAXSerial ETRAXSerial; +DECLARE_INSTANCE_CHECKER(ETRAXSerial, ETRAX_SERIAL, + TYPE_ETRAX_FS_SERIAL) + +struct ETRAXSerial { + SysBusDevice parent_obj; + + MemoryRegion mmio; + CharBackend chr; + qemu_irq irq; + + int pending_tx; + + uint8_t rx_fifo[16]; + unsigned int rx_fifo_pos; + unsigned int rx_fifo_len; + + /* Control registers. */ + uint32_t regs[R_MAX]; +}; + +static void ser_update_irq(ETRAXSerial *s) +{ + + if (s->rx_fifo_len) { + s->regs[R_INTR] |= 8; + } else { + s->regs[R_INTR] &= ~8; + } + + s->regs[R_MASKED_INTR] = s->regs[R_INTR] & s->regs[RW_INTR_MASK]; + qemu_set_irq(s->irq, !!s->regs[R_MASKED_INTR]); +} + +static uint64_t +ser_read(void *opaque, hwaddr addr, unsigned int size) +{ + ETRAXSerial *s = opaque; + uint32_t r = 0; + + addr >>= 2; + switch (addr) + { + case R_STAT_DIN: + r = s->rx_fifo[(s->rx_fifo_pos - s->rx_fifo_len) & 15]; + if (s->rx_fifo_len) { + r |= 1 << STAT_DAV; + } + r |= 1 << STAT_TR_RDY; + r |= 1 << STAT_TR_IDLE; + break; + case RS_STAT_DIN: + r = s->rx_fifo[(s->rx_fifo_pos - s->rx_fifo_len) & 15]; + if (s->rx_fifo_len) { + r |= 1 << STAT_DAV; + s->rx_fifo_len--; + } + r |= 1 << STAT_TR_RDY; + r |= 1 << STAT_TR_IDLE; + break; + default: + r = s->regs[addr]; + D(qemu_log("%s " TARGET_FMT_plx "=%x\n", __func__, addr, r)); + break; + } + return r; +} + +static void +ser_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + ETRAXSerial *s = opaque; + uint32_t value = val64; + unsigned char ch = val64; + + D(qemu_log("%s " TARGET_FMT_plx "=%x\n", __func__, addr, value)); + addr >>= 2; + switch (addr) + { + case RW_DOUT: + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); + s->regs[R_INTR] |= 3; + s->pending_tx = 1; + s->regs[addr] = value; + break; + case RW_ACK_INTR: + if (s->pending_tx) { + value &= ~1; + s->pending_tx = 0; + D(qemu_log("fixedup value=%x r_intr=%x\n", + value, s->regs[R_INTR])); + } + s->regs[addr] = value; + s->regs[R_INTR] &= ~value; + D(printf("r_intr=%x\n", s->regs[R_INTR])); + break; + default: + s->regs[addr] = value; + break; + } + ser_update_irq(s); +} + +static const MemoryRegionOps ser_ops = { + .read = ser_read, + .write = ser_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static Property etraxfs_ser_properties[] = { + DEFINE_PROP_CHR("chardev", ETRAXSerial, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void serial_receive(void *opaque, const uint8_t *buf, int size) +{ + ETRAXSerial *s = opaque; + int i; + + /* Got a byte. */ + if (s->rx_fifo_len >= 16) { + D(qemu_log("WARNING: UART dropped char.\n")); + return; + } + + for (i = 0; i < size; i++) { + s->rx_fifo[s->rx_fifo_pos] = buf[i]; + s->rx_fifo_pos++; + s->rx_fifo_pos &= 15; + s->rx_fifo_len++; + } + + ser_update_irq(s); +} + +static int serial_can_receive(void *opaque) +{ + ETRAXSerial *s = opaque; + + /* Is the receiver enabled? */ + if (!(s->regs[RW_REC_CTRL] & (1 << 3))) { + return 0; + } + + return sizeof(s->rx_fifo) - s->rx_fifo_len; +} + +static void serial_event(void *opaque, QEMUChrEvent event) +{ + +} + +static void etraxfs_ser_reset(DeviceState *d) +{ + ETRAXSerial *s = ETRAX_SERIAL(d); + + /* transmitter begins ready and idle. */ + s->regs[RS_STAT_DIN] |= (1 << STAT_TR_RDY); + s->regs[RS_STAT_DIN] |= (1 << STAT_TR_IDLE); + + s->regs[RW_REC_CTRL] = 0x10000; + +} + +static void etraxfs_ser_init(Object *obj) +{ + ETRAXSerial *s = ETRAX_SERIAL(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(dev, &s->irq); + memory_region_init_io(&s->mmio, obj, &ser_ops, s, + "etraxfs-serial", R_MAX * 4); + sysbus_init_mmio(dev, &s->mmio); +} + +static void etraxfs_ser_realize(DeviceState *dev, Error **errp) +{ + ETRAXSerial *s = ETRAX_SERIAL(dev); + + qemu_chr_fe_set_handlers(&s->chr, + serial_can_receive, serial_receive, + serial_event, NULL, s, NULL, true); +} + +static void etraxfs_ser_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = etraxfs_ser_reset; + device_class_set_props(dc, etraxfs_ser_properties); + dc->realize = etraxfs_ser_realize; +} + +static const TypeInfo etraxfs_ser_info = { + .name = TYPE_ETRAX_FS_SERIAL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ETRAXSerial), + .instance_init = etraxfs_ser_init, + .class_init = etraxfs_ser_class_init, +}; + +static void etraxfs_serial_register_types(void) +{ + type_register_static(&etraxfs_ser_info); +} + +type_init(etraxfs_serial_register_types) diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c new file mode 100644 index 000000000..80d401a37 --- /dev/null +++ b/hw/char/exynos4210_uart.c @@ -0,0 +1,738 @@ +/* + * Exynos4210 UART Emulation + * + * Copyright (C) 2011 Samsung Electronics Co Ltd. + * Maksim Kozlov, + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "chardev/char-fe.h" +#include "chardev/char-serial.h" + +#include "hw/arm/exynos4210.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" + +#include "trace.h" +#include "qom/object.h" + +/* + * Offsets for UART registers relative to SFR base address + * for UARTn + * + */ +#define ULCON 0x0000 /* Line Control */ +#define UCON 0x0004 /* Control */ +#define UFCON 0x0008 /* FIFO Control */ +#define UMCON 0x000C /* Modem Control */ +#define UTRSTAT 0x0010 /* Tx/Rx Status */ +#define UERSTAT 0x0014 /* UART Error Status */ +#define UFSTAT 0x0018 /* FIFO Status */ +#define UMSTAT 0x001C /* Modem Status */ +#define UTXH 0x0020 /* Transmit Buffer */ +#define URXH 0x0024 /* Receive Buffer */ +#define UBRDIV 0x0028 /* Baud Rate Divisor */ +#define UFRACVAL 0x002C /* Divisor Fractional Value */ +#define UINTP 0x0030 /* Interrupt Pending */ +#define UINTSP 0x0034 /* Interrupt Source Pending */ +#define UINTM 0x0038 /* Interrupt Mask */ + +/* + * for indexing register in the uint32_t array + * + * 'reg' - register offset (see offsets definitions above) + * + */ +#define I_(reg) (reg / sizeof(uint32_t)) + +typedef struct Exynos4210UartReg { + const char *name; /* the only reason is the debug output */ + hwaddr offset; + uint32_t reset_value; +} Exynos4210UartReg; + +static const Exynos4210UartReg exynos4210_uart_regs[] = { + {"ULCON", ULCON, 0x00000000}, + {"UCON", UCON, 0x00003000}, + {"UFCON", UFCON, 0x00000000}, + {"UMCON", UMCON, 0x00000000}, + {"UTRSTAT", UTRSTAT, 0x00000006}, /* RO */ + {"UERSTAT", UERSTAT, 0x00000000}, /* RO */ + {"UFSTAT", UFSTAT, 0x00000000}, /* RO */ + {"UMSTAT", UMSTAT, 0x00000000}, /* RO */ + {"UTXH", UTXH, 0x5c5c5c5c}, /* WO, undefined reset value*/ + {"URXH", URXH, 0x00000000}, /* RO */ + {"UBRDIV", UBRDIV, 0x00000000}, + {"UFRACVAL", UFRACVAL, 0x00000000}, + {"UINTP", UINTP, 0x00000000}, + {"UINTSP", UINTSP, 0x00000000}, + {"UINTM", UINTM, 0x00000000}, +}; + +#define EXYNOS4210_UART_REGS_MEM_SIZE 0x3C + +/* UART FIFO Control */ +#define UFCON_FIFO_ENABLE 0x1 +#define UFCON_Rx_FIFO_RESET 0x2 +#define UFCON_Tx_FIFO_RESET 0x4 +#define UFCON_Tx_FIFO_TRIGGER_LEVEL_SHIFT 8 +#define UFCON_Tx_FIFO_TRIGGER_LEVEL (7 << UFCON_Tx_FIFO_TRIGGER_LEVEL_SHIFT) +#define UFCON_Rx_FIFO_TRIGGER_LEVEL_SHIFT 4 +#define UFCON_Rx_FIFO_TRIGGER_LEVEL (7 << UFCON_Rx_FIFO_TRIGGER_LEVEL_SHIFT) + +/* Uart FIFO Status */ +#define UFSTAT_Rx_FIFO_COUNT 0xff +#define UFSTAT_Rx_FIFO_FULL 0x100 +#define UFSTAT_Rx_FIFO_ERROR 0x200 +#define UFSTAT_Tx_FIFO_COUNT_SHIFT 16 +#define UFSTAT_Tx_FIFO_COUNT (0xff << UFSTAT_Tx_FIFO_COUNT_SHIFT) +#define UFSTAT_Tx_FIFO_FULL_SHIFT 24 +#define UFSTAT_Tx_FIFO_FULL (1 << UFSTAT_Tx_FIFO_FULL_SHIFT) + +/* UART Interrupt Source Pending */ +#define UINTSP_RXD 0x1 /* Receive interrupt */ +#define UINTSP_ERROR 0x2 /* Error interrupt */ +#define UINTSP_TXD 0x4 /* Transmit interrupt */ +#define UINTSP_MODEM 0x8 /* Modem interrupt */ + +/* UART Line Control */ +#define ULCON_IR_MODE_SHIFT 6 +#define ULCON_PARITY_SHIFT 3 +#define ULCON_STOP_BIT_SHIFT 1 + +/* UART Tx/Rx Status */ +#define UTRSTAT_Rx_TIMEOUT 0x8 +#define UTRSTAT_TRANSMITTER_EMPTY 0x4 +#define UTRSTAT_Tx_BUFFER_EMPTY 0x2 +#define UTRSTAT_Rx_BUFFER_DATA_READY 0x1 + +/* UART Error Status */ +#define UERSTAT_OVERRUN 0x1 +#define UERSTAT_PARITY 0x2 +#define UERSTAT_FRAME 0x4 +#define UERSTAT_BREAK 0x8 + +typedef struct { + uint8_t *data; + uint32_t sp, rp; /* store and retrieve pointers */ + uint32_t size; +} Exynos4210UartFIFO; + +#define TYPE_EXYNOS4210_UART "exynos4210.uart" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210UartState, EXYNOS4210_UART) + +struct Exynos4210UartState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + uint32_t reg[EXYNOS4210_UART_REGS_MEM_SIZE / sizeof(uint32_t)]; + Exynos4210UartFIFO rx; + Exynos4210UartFIFO tx; + + QEMUTimer *fifo_timeout_timer; + uint64_t wordtime; /* word time in ns */ + + CharBackend chr; + qemu_irq irq; + qemu_irq dmairq; + + uint32_t channel; + +}; + + +/* Used only for tracing */ +static const char *exynos4210_uart_regname(hwaddr offset) +{ + + int i; + + for (i = 0; i < ARRAY_SIZE(exynos4210_uart_regs); i++) { + if (offset == exynos4210_uart_regs[i].offset) { + return exynos4210_uart_regs[i].name; + } + } + + return NULL; +} + + +static void fifo_store(Exynos4210UartFIFO *q, uint8_t ch) +{ + q->data[q->sp] = ch; + q->sp = (q->sp + 1) % q->size; +} + +static uint8_t fifo_retrieve(Exynos4210UartFIFO *q) +{ + uint8_t ret = q->data[q->rp]; + q->rp = (q->rp + 1) % q->size; + return ret; +} + +static int fifo_elements_number(const Exynos4210UartFIFO *q) +{ + if (q->sp < q->rp) { + return q->size - q->rp + q->sp; + } + + return q->sp - q->rp; +} + +static int fifo_empty_elements_number(const Exynos4210UartFIFO *q) +{ + return q->size - fifo_elements_number(q); +} + +static void fifo_reset(Exynos4210UartFIFO *q) +{ + g_free(q->data); + q->data = NULL; + + q->data = (uint8_t *)g_malloc0(q->size); + + q->sp = 0; + q->rp = 0; +} + +static uint32_t exynos4210_uart_FIFO_trigger_level(uint32_t channel, + uint32_t reg) +{ + uint32_t level; + + switch (channel) { + case 0: + level = reg * 32; + break; + case 1: + case 4: + level = reg * 8; + break; + case 2: + case 3: + level = reg * 2; + break; + default: + level = 0; + trace_exynos_uart_channel_error(channel); + break; + } + return level; +} + +static uint32_t +exynos4210_uart_Tx_FIFO_trigger_level(const Exynos4210UartState *s) +{ + uint32_t reg; + + reg = (s->reg[I_(UFCON)] & UFCON_Tx_FIFO_TRIGGER_LEVEL) >> + UFCON_Tx_FIFO_TRIGGER_LEVEL_SHIFT; + + return exynos4210_uart_FIFO_trigger_level(s->channel, reg); +} + +static uint32_t +exynos4210_uart_Rx_FIFO_trigger_level(const Exynos4210UartState *s) +{ + uint32_t reg; + + reg = ((s->reg[I_(UFCON)] & UFCON_Rx_FIFO_TRIGGER_LEVEL) >> + UFCON_Rx_FIFO_TRIGGER_LEVEL_SHIFT) + 1; + + return exynos4210_uart_FIFO_trigger_level(s->channel, reg); +} + +/* + * Update Rx DMA busy signal if Rx DMA is enabled. For simplicity, + * mark DMA as busy if DMA is enabled and the receive buffer is empty. + */ +static void exynos4210_uart_update_dmabusy(Exynos4210UartState *s) +{ + bool rx_dma_enabled = (s->reg[I_(UCON)] & 0x03) == 0x02; + uint32_t count = fifo_elements_number(&s->rx); + + if (rx_dma_enabled && !count) { + qemu_irq_raise(s->dmairq); + trace_exynos_uart_dmabusy(s->channel); + } else { + qemu_irq_lower(s->dmairq); + trace_exynos_uart_dmaready(s->channel); + } +} + +static void exynos4210_uart_update_irq(Exynos4210UartState *s) +{ + /* + * The Tx interrupt is always requested if the number of data in the + * transmit FIFO is smaller than the trigger level. + */ + if (s->reg[I_(UFCON)] & UFCON_FIFO_ENABLE) { + uint32_t count = (s->reg[I_(UFSTAT)] & UFSTAT_Tx_FIFO_COUNT) >> + UFSTAT_Tx_FIFO_COUNT_SHIFT; + + if (count <= exynos4210_uart_Tx_FIFO_trigger_level(s)) { + s->reg[I_(UINTSP)] |= UINTSP_TXD; + } + + /* + * Rx interrupt if trigger level is reached or if rx timeout + * interrupt is disabled and there is data in the receive buffer + */ + count = fifo_elements_number(&s->rx); + if ((count && !(s->reg[I_(UCON)] & 0x80)) || + count >= exynos4210_uart_Rx_FIFO_trigger_level(s)) { + exynos4210_uart_update_dmabusy(s); + s->reg[I_(UINTSP)] |= UINTSP_RXD; + timer_del(s->fifo_timeout_timer); + } + } else if (s->reg[I_(UTRSTAT)] & UTRSTAT_Rx_BUFFER_DATA_READY) { + exynos4210_uart_update_dmabusy(s); + s->reg[I_(UINTSP)] |= UINTSP_RXD; + } + + s->reg[I_(UINTP)] = s->reg[I_(UINTSP)] & ~s->reg[I_(UINTM)]; + + if (s->reg[I_(UINTP)]) { + qemu_irq_raise(s->irq); + trace_exynos_uart_irq_raised(s->channel, s->reg[I_(UINTP)]); + } else { + qemu_irq_lower(s->irq); + trace_exynos_uart_irq_lowered(s->channel); + } +} + +static void exynos4210_uart_timeout_int(void *opaque) +{ + Exynos4210UartState *s = opaque; + + trace_exynos_uart_rx_timeout(s->channel, s->reg[I_(UTRSTAT)], + s->reg[I_(UINTSP)]); + + if ((s->reg[I_(UTRSTAT)] & UTRSTAT_Rx_BUFFER_DATA_READY) || + (s->reg[I_(UCON)] & (1 << 11))) { + s->reg[I_(UINTSP)] |= UINTSP_RXD; + s->reg[I_(UTRSTAT)] |= UTRSTAT_Rx_TIMEOUT; + exynos4210_uart_update_dmabusy(s); + exynos4210_uart_update_irq(s); + } +} + +static void exynos4210_uart_update_parameters(Exynos4210UartState *s) +{ + int speed, parity, data_bits, stop_bits; + QEMUSerialSetParams ssp; + uint64_t uclk_rate; + + if (s->reg[I_(UBRDIV)] == 0) { + return; + } + + if (s->reg[I_(ULCON)] & 0x20) { + if (s->reg[I_(ULCON)] & 0x28) { + parity = 'E'; + } else { + parity = 'O'; + } + } else { + parity = 'N'; + } + + if (s->reg[I_(ULCON)] & 0x4) { + stop_bits = 2; + } else { + stop_bits = 1; + } + + data_bits = (s->reg[I_(ULCON)] & 0x3) + 5; + + uclk_rate = 24000000; + + speed = uclk_rate / ((16 * (s->reg[I_(UBRDIV)]) & 0xffff) + + (s->reg[I_(UFRACVAL)] & 0x7) + 16); + + ssp.speed = speed; + ssp.parity = parity; + ssp.data_bits = data_bits; + ssp.stop_bits = stop_bits; + + s->wordtime = NANOSECONDS_PER_SECOND * (data_bits + stop_bits + 1) / speed; + + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp); + + trace_exynos_uart_update_params( + s->channel, speed, parity, data_bits, stop_bits, s->wordtime); +} + +static void exynos4210_uart_rx_timeout_set(Exynos4210UartState *s) +{ + if (s->reg[I_(UCON)] & 0x80) { + uint32_t timeout = ((s->reg[I_(UCON)] >> 12) & 0x0f) * s->wordtime; + + timer_mod(s->fifo_timeout_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout); + } else { + timer_del(s->fifo_timeout_timer); + } +} + +static void exynos4210_uart_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + Exynos4210UartState *s = (Exynos4210UartState *)opaque; + uint8_t ch; + + trace_exynos_uart_write(s->channel, offset, + exynos4210_uart_regname(offset), val); + + switch (offset) { + case ULCON: + case UBRDIV: + case UFRACVAL: + s->reg[I_(offset)] = val; + exynos4210_uart_update_parameters(s); + break; + case UFCON: + s->reg[I_(UFCON)] = val; + if (val & UFCON_Rx_FIFO_RESET) { + fifo_reset(&s->rx); + s->reg[I_(UFCON)] &= ~UFCON_Rx_FIFO_RESET; + trace_exynos_uart_rx_fifo_reset(s->channel); + } + if (val & UFCON_Tx_FIFO_RESET) { + fifo_reset(&s->tx); + s->reg[I_(UFCON)] &= ~UFCON_Tx_FIFO_RESET; + trace_exynos_uart_tx_fifo_reset(s->channel); + } + break; + + case UTXH: + if (qemu_chr_fe_backend_connected(&s->chr)) { + s->reg[I_(UTRSTAT)] &= ~(UTRSTAT_TRANSMITTER_EMPTY | + UTRSTAT_Tx_BUFFER_EMPTY); + ch = (uint8_t)val; + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); + trace_exynos_uart_tx(s->channel, ch); + s->reg[I_(UTRSTAT)] |= UTRSTAT_TRANSMITTER_EMPTY | + UTRSTAT_Tx_BUFFER_EMPTY; + s->reg[I_(UINTSP)] |= UINTSP_TXD; + exynos4210_uart_update_irq(s); + } + break; + + case UINTP: + s->reg[I_(UINTP)] &= ~val; + s->reg[I_(UINTSP)] &= ~val; + trace_exynos_uart_intclr(s->channel, s->reg[I_(UINTP)]); + exynos4210_uart_update_irq(s); + break; + case UTRSTAT: + if (val & UTRSTAT_Rx_TIMEOUT) { + s->reg[I_(UTRSTAT)] &= ~UTRSTAT_Rx_TIMEOUT; + } + break; + case UERSTAT: + case UFSTAT: + case UMSTAT: + case URXH: + trace_exynos_uart_ro_write( + s->channel, exynos4210_uart_regname(offset), offset); + break; + case UINTSP: + s->reg[I_(UINTSP)] &= ~val; + break; + case UINTM: + s->reg[I_(UINTM)] = val; + exynos4210_uart_update_irq(s); + break; + case UCON: + case UMCON: + default: + s->reg[I_(offset)] = val; + break; + } +} + +static uint64_t exynos4210_uart_read(void *opaque, hwaddr offset, + unsigned size) +{ + Exynos4210UartState *s = (Exynos4210UartState *)opaque; + uint32_t res; + + switch (offset) { + case UERSTAT: /* Read Only */ + res = s->reg[I_(UERSTAT)]; + s->reg[I_(UERSTAT)] = 0; + trace_exynos_uart_read(s->channel, offset, + exynos4210_uart_regname(offset), res); + return res; + case UFSTAT: /* Read Only */ + s->reg[I_(UFSTAT)] = fifo_elements_number(&s->rx) & 0xff; + if (fifo_empty_elements_number(&s->rx) == 0) { + s->reg[I_(UFSTAT)] |= UFSTAT_Rx_FIFO_FULL; + s->reg[I_(UFSTAT)] &= ~0xff; + } + trace_exynos_uart_read(s->channel, offset, + exynos4210_uart_regname(offset), + s->reg[I_(UFSTAT)]); + return s->reg[I_(UFSTAT)]; + case URXH: + if (s->reg[I_(UFCON)] & UFCON_FIFO_ENABLE) { + if (fifo_elements_number(&s->rx)) { + res = fifo_retrieve(&s->rx); + trace_exynos_uart_rx(s->channel, res); + if (!fifo_elements_number(&s->rx)) { + s->reg[I_(UTRSTAT)] &= ~UTRSTAT_Rx_BUFFER_DATA_READY; + } else { + s->reg[I_(UTRSTAT)] |= UTRSTAT_Rx_BUFFER_DATA_READY; + } + } else { + trace_exynos_uart_rx_error(s->channel); + s->reg[I_(UINTSP)] |= UINTSP_ERROR; + exynos4210_uart_update_irq(s); + res = 0; + } + } else { + s->reg[I_(UTRSTAT)] &= ~UTRSTAT_Rx_BUFFER_DATA_READY; + res = s->reg[I_(URXH)]; + } + qemu_chr_fe_accept_input(&s->chr); + exynos4210_uart_update_dmabusy(s); + trace_exynos_uart_read(s->channel, offset, + exynos4210_uart_regname(offset), res); + return res; + case UTXH: + trace_exynos_uart_wo_read(s->channel, exynos4210_uart_regname(offset), + offset); + break; + default: + trace_exynos_uart_read(s->channel, offset, + exynos4210_uart_regname(offset), + s->reg[I_(offset)]); + return s->reg[I_(offset)]; + } + + trace_exynos_uart_read(s->channel, offset, exynos4210_uart_regname(offset), + 0); + return 0; +} + +static const MemoryRegionOps exynos4210_uart_ops = { + .read = exynos4210_uart_read, + .write = exynos4210_uart_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .max_access_size = 4, + .unaligned = false + }, +}; + +static int exynos4210_uart_can_receive(void *opaque) +{ + Exynos4210UartState *s = (Exynos4210UartState *)opaque; + + if (s->reg[I_(UFCON)] & UFCON_FIFO_ENABLE) { + return fifo_empty_elements_number(&s->rx); + } else { + return !(s->reg[I_(UTRSTAT)] & UTRSTAT_Rx_BUFFER_DATA_READY); + } +} + +static void exynos4210_uart_receive(void *opaque, const uint8_t *buf, int size) +{ + Exynos4210UartState *s = (Exynos4210UartState *)opaque; + int i; + + if (s->reg[I_(UFCON)] & UFCON_FIFO_ENABLE) { + if (fifo_empty_elements_number(&s->rx) < size) { + size = fifo_empty_elements_number(&s->rx); + s->reg[I_(UINTSP)] |= UINTSP_ERROR; + } + for (i = 0; i < size; i++) { + fifo_store(&s->rx, buf[i]); + } + exynos4210_uart_rx_timeout_set(s); + } else { + s->reg[I_(URXH)] = buf[0]; + } + s->reg[I_(UTRSTAT)] |= UTRSTAT_Rx_BUFFER_DATA_READY; + + exynos4210_uart_update_irq(s); +} + + +static void exynos4210_uart_event(void *opaque, QEMUChrEvent event) +{ + Exynos4210UartState *s = (Exynos4210UartState *)opaque; + + if (event == CHR_EVENT_BREAK) { + /* When the RxDn is held in logic 0, then a null byte is pushed into the + * fifo */ + fifo_store(&s->rx, '\0'); + s->reg[I_(UERSTAT)] |= UERSTAT_BREAK; + exynos4210_uart_update_irq(s); + } +} + + +static void exynos4210_uart_reset(DeviceState *dev) +{ + Exynos4210UartState *s = EXYNOS4210_UART(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(exynos4210_uart_regs); i++) { + s->reg[I_(exynos4210_uart_regs[i].offset)] = + exynos4210_uart_regs[i].reset_value; + } + + fifo_reset(&s->rx); + fifo_reset(&s->tx); + + trace_exynos_uart_rxsize(s->channel, s->rx.size); +} + +static int exynos4210_uart_post_load(void *opaque, int version_id) +{ + Exynos4210UartState *s = (Exynos4210UartState *)opaque; + + exynos4210_uart_update_parameters(s); + exynos4210_uart_rx_timeout_set(s); + + return 0; +} + +static const VMStateDescription vmstate_exynos4210_uart_fifo = { + .name = "exynos4210.uart.fifo", + .version_id = 1, + .minimum_version_id = 1, + .post_load = exynos4210_uart_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(sp, Exynos4210UartFIFO), + VMSTATE_UINT32(rp, Exynos4210UartFIFO), + VMSTATE_VBUFFER_UINT32(data, Exynos4210UartFIFO, 1, NULL, size), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_exynos4210_uart = { + .name = "exynos4210.uart", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(rx, Exynos4210UartState, 1, + vmstate_exynos4210_uart_fifo, Exynos4210UartFIFO), + VMSTATE_UINT32_ARRAY(reg, Exynos4210UartState, + EXYNOS4210_UART_REGS_MEM_SIZE / sizeof(uint32_t)), + VMSTATE_END_OF_LIST() + } +}; + +DeviceState *exynos4210_uart_create(hwaddr addr, + int fifo_size, + int channel, + Chardev *chr, + qemu_irq irq) +{ + DeviceState *dev; + SysBusDevice *bus; + + dev = qdev_new(TYPE_EXYNOS4210_UART); + + qdev_prop_set_chr(dev, "chardev", chr); + qdev_prop_set_uint32(dev, "channel", channel); + qdev_prop_set_uint32(dev, "rx-size", fifo_size); + qdev_prop_set_uint32(dev, "tx-size", fifo_size); + + bus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(bus, &error_fatal); + if (addr != (hwaddr)-1) { + sysbus_mmio_map(bus, 0, addr); + } + sysbus_connect_irq(bus, 0, irq); + + return dev; +} + +static void exynos4210_uart_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + Exynos4210UartState *s = EXYNOS4210_UART(dev); + + s->wordtime = NANOSECONDS_PER_SECOND * 10 / 9600; + + /* memory mapping */ + memory_region_init_io(&s->iomem, obj, &exynos4210_uart_ops, s, + "exynos4210.uart", EXYNOS4210_UART_REGS_MEM_SIZE); + sysbus_init_mmio(dev, &s->iomem); + + sysbus_init_irq(dev, &s->irq); + sysbus_init_irq(dev, &s->dmairq); +} + +static void exynos4210_uart_realize(DeviceState *dev, Error **errp) +{ + Exynos4210UartState *s = EXYNOS4210_UART(dev); + + s->fifo_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + exynos4210_uart_timeout_int, s); + + qemu_chr_fe_set_handlers(&s->chr, exynos4210_uart_can_receive, + exynos4210_uart_receive, exynos4210_uart_event, + NULL, s, NULL, true); +} + +static Property exynos4210_uart_properties[] = { + DEFINE_PROP_CHR("chardev", Exynos4210UartState, chr), + DEFINE_PROP_UINT32("channel", Exynos4210UartState, channel, 0), + DEFINE_PROP_UINT32("rx-size", Exynos4210UartState, rx.size, 16), + DEFINE_PROP_UINT32("tx-size", Exynos4210UartState, tx.size, 16), + DEFINE_PROP_END_OF_LIST(), +}; + +static void exynos4210_uart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = exynos4210_uart_realize; + dc->reset = exynos4210_uart_reset; + device_class_set_props(dc, exynos4210_uart_properties); + dc->vmsd = &vmstate_exynos4210_uart; +} + +static const TypeInfo exynos4210_uart_info = { + .name = TYPE_EXYNOS4210_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210UartState), + .instance_init = exynos4210_uart_init, + .class_init = exynos4210_uart_class_init, +}; + +static void exynos4210_uart_register(void) +{ + type_register_static(&exynos4210_uart_info); +} + +type_init(exynos4210_uart_register) diff --git a/hw/char/goldfish_tty.c b/hw/char/goldfish_tty.c new file mode 100644 index 000000000..20b77885c --- /dev/null +++ b/hw/char/goldfish_tty.c @@ -0,0 +1,285 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Goldfish TTY + * + * (c) 2020 Laurent Vivier + * + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties-system.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "chardev/char-fe.h" +#include "qemu/log.h" +#include "trace.h" +#include "exec/address-spaces.h" +#include "hw/char/goldfish_tty.h" + +#define GOLDFISH_TTY_VERSION 1 + +/* registers */ + +enum { + REG_PUT_CHAR = 0x00, + REG_BYTES_READY = 0x04, + REG_CMD = 0x08, + REG_DATA_PTR = 0x10, + REG_DATA_LEN = 0x14, + REG_DATA_PTR_HIGH = 0x18, + REG_VERSION = 0x20, +}; + +/* commands */ + +enum { + CMD_INT_DISABLE = 0x00, + CMD_INT_ENABLE = 0x01, + CMD_WRITE_BUFFER = 0x02, + CMD_READ_BUFFER = 0x03, +}; + +static uint64_t goldfish_tty_read(void *opaque, hwaddr addr, + unsigned size) +{ + GoldfishTTYState *s = opaque; + uint64_t value = 0; + + switch (addr) { + case REG_BYTES_READY: + value = fifo8_num_used(&s->rx_fifo); + break; + case REG_VERSION: + value = GOLDFISH_TTY_VERSION; + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: unimplemented register read 0x%02"HWADDR_PRIx"\n", + __func__, addr); + break; + } + + trace_goldfish_tty_read(s, addr, size, value); + + return value; +} + +static void goldfish_tty_cmd(GoldfishTTYState *s, uint32_t cmd) +{ + uint32_t to_copy; + uint8_t *buf; + uint8_t data_out[GOLFISH_TTY_BUFFER_SIZE]; + int len; + uint64_t ptr; + + switch (cmd) { + case CMD_INT_DISABLE: + if (s->int_enabled) { + if (!fifo8_is_empty(&s->rx_fifo)) { + qemu_set_irq(s->irq, 0); + } + s->int_enabled = false; + } + break; + case CMD_INT_ENABLE: + if (!s->int_enabled) { + if (!fifo8_is_empty(&s->rx_fifo)) { + qemu_set_irq(s->irq, 1); + } + s->int_enabled = true; + } + break; + case CMD_WRITE_BUFFER: + len = s->data_len; + ptr = s->data_ptr; + while (len) { + to_copy = MIN(GOLFISH_TTY_BUFFER_SIZE, len); + + address_space_rw(&address_space_memory, ptr, + MEMTXATTRS_UNSPECIFIED, data_out, to_copy, 0); + qemu_chr_fe_write_all(&s->chr, data_out, to_copy); + + len -= to_copy; + ptr += to_copy; + } + break; + case CMD_READ_BUFFER: + len = s->data_len; + ptr = s->data_ptr; + while (len && !fifo8_is_empty(&s->rx_fifo)) { + buf = (uint8_t *)fifo8_pop_buf(&s->rx_fifo, len, &to_copy); + address_space_rw(&address_space_memory, ptr, + MEMTXATTRS_UNSPECIFIED, buf, to_copy, 1); + + len -= to_copy; + ptr += to_copy; + } + if (s->int_enabled && fifo8_is_empty(&s->rx_fifo)) { + qemu_set_irq(s->irq, 0); + } + break; + } +} + +static void goldfish_tty_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + GoldfishTTYState *s = opaque; + unsigned char c; + + trace_goldfish_tty_write(s, addr, size, value); + + switch (addr) { + case REG_PUT_CHAR: + c = value; + qemu_chr_fe_write_all(&s->chr, &c, sizeof(c)); + break; + case REG_CMD: + goldfish_tty_cmd(s, value); + break; + case REG_DATA_PTR: + s->data_ptr = value; + break; + case REG_DATA_PTR_HIGH: + s->data_ptr = deposit64(s->data_ptr, 32, 32, value); + break; + case REG_DATA_LEN: + s->data_len = value; + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: unimplemented register write 0x%02"HWADDR_PRIx"\n", + __func__, addr); + break; + } +} + +static const MemoryRegionOps goldfish_tty_ops = { + .read = goldfish_tty_read, + .write = goldfish_tty_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.max_access_size = 4, + .impl.max_access_size = 4, + .impl.min_access_size = 4, +}; + +static int goldfish_tty_can_receive(void *opaque) +{ + GoldfishTTYState *s = opaque; + int available = fifo8_num_free(&s->rx_fifo); + + trace_goldfish_tty_can_receive(s, available); + + return available; +} + +static void goldfish_tty_receive(void *opaque, const uint8_t *buffer, int size) +{ + GoldfishTTYState *s = opaque; + + trace_goldfish_tty_receive(s, size); + + g_assert(size <= fifo8_num_free(&s->rx_fifo)); + + fifo8_push_all(&s->rx_fifo, buffer, size); + + if (s->int_enabled && !fifo8_is_empty(&s->rx_fifo)) { + qemu_set_irq(s->irq, 1); + } +} + +static void goldfish_tty_reset(DeviceState *dev) +{ + GoldfishTTYState *s = GOLDFISH_TTY(dev); + + trace_goldfish_tty_reset(s); + + fifo8_reset(&s->rx_fifo); + s->int_enabled = false; + s->data_ptr = 0; + s->data_len = 0; +} + +static void goldfish_tty_realize(DeviceState *dev, Error **errp) +{ + GoldfishTTYState *s = GOLDFISH_TTY(dev); + + trace_goldfish_tty_realize(s); + + fifo8_create(&s->rx_fifo, GOLFISH_TTY_BUFFER_SIZE); + memory_region_init_io(&s->iomem, OBJECT(s), &goldfish_tty_ops, s, + "goldfish_tty", 0x24); + + if (qemu_chr_fe_backend_connected(&s->chr)) { + qemu_chr_fe_set_handlers(&s->chr, goldfish_tty_can_receive, + goldfish_tty_receive, NULL, NULL, + s, NULL, true); + } +} + +static void goldfish_tty_unrealize(DeviceState *dev) +{ + GoldfishTTYState *s = GOLDFISH_TTY(dev); + + trace_goldfish_tty_unrealize(s); + + fifo8_destroy(&s->rx_fifo); +} + +static const VMStateDescription vmstate_goldfish_tty = { + .name = "goldfish_tty", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(data_len, GoldfishTTYState), + VMSTATE_UINT64(data_ptr, GoldfishTTYState), + VMSTATE_BOOL(int_enabled, GoldfishTTYState), + VMSTATE_FIFO8(rx_fifo, GoldfishTTYState), + VMSTATE_END_OF_LIST() + } +}; + +static Property goldfish_tty_properties[] = { + DEFINE_PROP_CHR("chardev", GoldfishTTYState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void goldfish_tty_instance_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + GoldfishTTYState *s = GOLDFISH_TTY(obj); + + trace_goldfish_tty_instance_init(s); + + sysbus_init_mmio(dev, &s->iomem); + sysbus_init_irq(dev, &s->irq); +} + +static void goldfish_tty_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, goldfish_tty_properties); + dc->reset = goldfish_tty_reset; + dc->realize = goldfish_tty_realize; + dc->unrealize = goldfish_tty_unrealize; + dc->vmsd = &vmstate_goldfish_tty; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo goldfish_tty_info = { + .name = TYPE_GOLDFISH_TTY, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = goldfish_tty_class_init, + .instance_init = goldfish_tty_instance_init, + .instance_size = sizeof(GoldfishTTYState), +}; + +static void goldfish_tty_register_types(void) +{ + type_register_static(&goldfish_tty_info); +} + +type_init(goldfish_tty_register_types) diff --git a/hw/char/grlib_apbuart.c b/hw/char/grlib_apbuart.c new file mode 100644 index 000000000..82ff40a53 --- /dev/null +++ b/hw/char/grlib_apbuart.c @@ -0,0 +1,304 @@ +/* + * QEMU GRLIB APB UART Emulator + * + * Copyright (c) 2010-2019 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/sparc/grlib.h" +#include "hw/sysbus.h" +#include "qemu/module.h" +#include "chardev/char-fe.h" + +#include "trace.h" +#include "qom/object.h" + +#define UART_REG_SIZE 20 /* Size of memory mapped registers */ + +/* UART status register fields */ +#define UART_DATA_READY (1 << 0) +#define UART_TRANSMIT_SHIFT_EMPTY (1 << 1) +#define UART_TRANSMIT_FIFO_EMPTY (1 << 2) +#define UART_BREAK_RECEIVED (1 << 3) +#define UART_OVERRUN (1 << 4) +#define UART_PARITY_ERROR (1 << 5) +#define UART_FRAMING_ERROR (1 << 6) +#define UART_TRANSMIT_FIFO_HALF (1 << 7) +#define UART_RECEIVE_FIFO_HALF (1 << 8) +#define UART_TRANSMIT_FIFO_FULL (1 << 9) +#define UART_RECEIVE_FIFO_FULL (1 << 10) + +/* UART control register fields */ +#define UART_RECEIVE_ENABLE (1 << 0) +#define UART_TRANSMIT_ENABLE (1 << 1) +#define UART_RECEIVE_INTERRUPT (1 << 2) +#define UART_TRANSMIT_INTERRUPT (1 << 3) +#define UART_PARITY_SELECT (1 << 4) +#define UART_PARITY_ENABLE (1 << 5) +#define UART_FLOW_CONTROL (1 << 6) +#define UART_LOOPBACK (1 << 7) +#define UART_EXTERNAL_CLOCK (1 << 8) +#define UART_RECEIVE_FIFO_INTERRUPT (1 << 9) +#define UART_TRANSMIT_FIFO_INTERRUPT (1 << 10) +#define UART_FIFO_DEBUG_MODE (1 << 11) +#define UART_OUTPUT_ENABLE (1 << 12) +#define UART_FIFO_AVAILABLE (1 << 31) + +/* Memory mapped register offsets */ +#define DATA_OFFSET 0x00 +#define STATUS_OFFSET 0x04 +#define CONTROL_OFFSET 0x08 +#define SCALER_OFFSET 0x0C /* not supported */ +#define FIFO_DEBUG_OFFSET 0x10 /* not supported */ + +#define FIFO_LENGTH 1024 + +OBJECT_DECLARE_SIMPLE_TYPE(UART, GRLIB_APB_UART) + +struct UART { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq; + + CharBackend chr; + + /* registers */ + uint32_t status; + uint32_t control; + + /* FIFO */ + char buffer[FIFO_LENGTH]; + int len; + int current; +}; + +static int uart_data_to_read(UART *uart) +{ + return uart->current < uart->len; +} + +static char uart_pop(UART *uart) +{ + char ret; + + if (uart->len == 0) { + uart->status &= ~UART_DATA_READY; + return 0; + } + + ret = uart->buffer[uart->current++]; + + if (uart->current >= uart->len) { + /* Flush */ + uart->len = 0; + uart->current = 0; + } + + if (!uart_data_to_read(uart)) { + uart->status &= ~UART_DATA_READY; + } + + return ret; +} + +static void uart_add_to_fifo(UART *uart, + const uint8_t *buffer, + int length) +{ + if (uart->len + length > FIFO_LENGTH) { + abort(); + } + memcpy(uart->buffer + uart->len, buffer, length); + uart->len += length; +} + +static int grlib_apbuart_can_receive(void *opaque) +{ + UART *uart = opaque; + + return FIFO_LENGTH - uart->len; +} + +static void grlib_apbuart_receive(void *opaque, const uint8_t *buf, int size) +{ + UART *uart = opaque; + + if (uart->control & UART_RECEIVE_ENABLE) { + uart_add_to_fifo(uart, buf, size); + + uart->status |= UART_DATA_READY; + + if (uart->control & UART_RECEIVE_INTERRUPT) { + qemu_irq_pulse(uart->irq); + } + } +} + +static void grlib_apbuart_event(void *opaque, QEMUChrEvent event) +{ + trace_grlib_apbuart_event(event); +} + + +static uint64_t grlib_apbuart_read(void *opaque, hwaddr addr, + unsigned size) +{ + UART *uart = opaque; + + addr &= 0xff; + + /* Unit registers */ + switch (addr) { + case DATA_OFFSET: + case DATA_OFFSET + 3: /* when only one byte read */ + return uart_pop(uart); + + case STATUS_OFFSET: + /* Read Only */ + return uart->status; + + case CONTROL_OFFSET: + return uart->control; + + case SCALER_OFFSET: + /* Not supported */ + return 0; + + default: + trace_grlib_apbuart_readl_unknown(addr); + return 0; + } +} + +static void grlib_apbuart_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + UART *uart = opaque; + unsigned char c = 0; + + addr &= 0xff; + + /* Unit registers */ + switch (addr) { + case DATA_OFFSET: + case DATA_OFFSET + 3: /* When only one byte write */ + /* Transmit when character device available and transmitter enabled */ + if (qemu_chr_fe_backend_connected(&uart->chr) && + (uart->control & UART_TRANSMIT_ENABLE)) { + c = value & 0xFF; + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&uart->chr, &c, 1); + /* Generate interrupt */ + if (uart->control & UART_TRANSMIT_INTERRUPT) { + qemu_irq_pulse(uart->irq); + } + } + return; + + case STATUS_OFFSET: + /* Read Only */ + return; + + case CONTROL_OFFSET: + uart->control = value; + return; + + case SCALER_OFFSET: + /* Not supported */ + return; + + default: + break; + } + + trace_grlib_apbuart_writel_unknown(addr, value); +} + +static const MemoryRegionOps grlib_apbuart_ops = { + .write = grlib_apbuart_write, + .read = grlib_apbuart_read, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void grlib_apbuart_realize(DeviceState *dev, Error **errp) +{ + UART *uart = GRLIB_APB_UART(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + qemu_chr_fe_set_handlers(&uart->chr, + grlib_apbuart_can_receive, + grlib_apbuart_receive, + grlib_apbuart_event, + NULL, uart, NULL, true); + + sysbus_init_irq(sbd, &uart->irq); + + memory_region_init_io(&uart->iomem, OBJECT(uart), &grlib_apbuart_ops, uart, + "uart", UART_REG_SIZE); + + sysbus_init_mmio(sbd, &uart->iomem); +} + +static void grlib_apbuart_reset(DeviceState *d) +{ + UART *uart = GRLIB_APB_UART(d); + + /* Transmitter FIFO and shift registers are always empty in QEMU */ + uart->status = UART_TRANSMIT_FIFO_EMPTY | UART_TRANSMIT_SHIFT_EMPTY; + /* Everything is off */ + uart->control = 0; + /* Flush receive FIFO */ + uart->len = 0; + uart->current = 0; +} + +static Property grlib_apbuart_properties[] = { + DEFINE_PROP_CHR("chrdev", UART, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void grlib_apbuart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = grlib_apbuart_realize; + dc->reset = grlib_apbuart_reset; + device_class_set_props(dc, grlib_apbuart_properties); +} + +static const TypeInfo grlib_apbuart_info = { + .name = TYPE_GRLIB_APB_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(UART), + .class_init = grlib_apbuart_class_init, +}; + +static void grlib_apbuart_register_types(void) +{ + type_register_static(&grlib_apbuart_info); +} + +type_init(grlib_apbuart_register_types) diff --git a/hw/char/ibex_uart.c b/hw/char/ibex_uart.c new file mode 100644 index 000000000..e58181fcf --- /dev/null +++ b/hw/char/ibex_uart.c @@ -0,0 +1,569 @@ +/* + * QEMU lowRISC Ibex UART device + * + * Copyright (c) 2020 Western Digital + * + * For details check the documentation here: + * https://docs.opentitan.org/hw/ip/uart/doc/ + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/char/ibex_uart.h" +#include "hw/irq.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +REG32(INTR_STATE, 0x00) + FIELD(INTR_STATE, TX_WATERMARK, 0, 1) + FIELD(INTR_STATE, RX_WATERMARK, 1, 1) + FIELD(INTR_STATE, TX_EMPTY, 2, 1) + FIELD(INTR_STATE, RX_OVERFLOW, 3, 1) +REG32(INTR_ENABLE, 0x04) +REG32(INTR_TEST, 0x08) +REG32(ALERT_TEST, 0x0C) +REG32(CTRL, 0x10) + FIELD(CTRL, TX_ENABLE, 0, 1) + FIELD(CTRL, RX_ENABLE, 1, 1) + FIELD(CTRL, NF, 2, 1) + FIELD(CTRL, SLPBK, 4, 1) + FIELD(CTRL, LLPBK, 5, 1) + FIELD(CTRL, PARITY_EN, 6, 1) + FIELD(CTRL, PARITY_ODD, 7, 1) + FIELD(CTRL, RXBLVL, 8, 2) + FIELD(CTRL, NCO, 16, 16) +REG32(STATUS, 0x14) + FIELD(STATUS, TXFULL, 0, 1) + FIELD(STATUS, RXFULL, 1, 1) + FIELD(STATUS, TXEMPTY, 2, 1) + FIELD(STATUS, RXIDLE, 4, 1) + FIELD(STATUS, RXEMPTY, 5, 1) +REG32(RDATA, 0x18) +REG32(WDATA, 0x1C) +REG32(FIFO_CTRL, 0x20) + FIELD(FIFO_CTRL, RXRST, 0, 1) + FIELD(FIFO_CTRL, TXRST, 1, 1) + FIELD(FIFO_CTRL, RXILVL, 2, 3) + FIELD(FIFO_CTRL, TXILVL, 5, 2) +REG32(FIFO_STATUS, 0x24) + FIELD(FIFO_STATUS, TXLVL, 0, 5) + FIELD(FIFO_STATUS, RXLVL, 16, 5) +REG32(OVRD, 0x28) +REG32(VAL, 0x2C) +REG32(TIMEOUT_CTRL, 0x30) + +static void ibex_uart_update_irqs(IbexUartState *s) +{ + if (s->uart_intr_state & s->uart_intr_enable & R_INTR_STATE_TX_WATERMARK_MASK) { + qemu_set_irq(s->tx_watermark, 1); + } else { + qemu_set_irq(s->tx_watermark, 0); + } + + if (s->uart_intr_state & s->uart_intr_enable & R_INTR_STATE_RX_WATERMARK_MASK) { + qemu_set_irq(s->rx_watermark, 1); + } else { + qemu_set_irq(s->rx_watermark, 0); + } + + if (s->uart_intr_state & s->uart_intr_enable & R_INTR_STATE_TX_EMPTY_MASK) { + qemu_set_irq(s->tx_empty, 1); + } else { + qemu_set_irq(s->tx_empty, 0); + } + + if (s->uart_intr_state & s->uart_intr_enable & R_INTR_STATE_RX_OVERFLOW_MASK) { + qemu_set_irq(s->rx_overflow, 1); + } else { + qemu_set_irq(s->rx_overflow, 0); + } +} + +static int ibex_uart_can_receive(void *opaque) +{ + IbexUartState *s = opaque; + + if ((s->uart_ctrl & R_CTRL_RX_ENABLE_MASK) + && !(s->uart_status & R_STATUS_RXFULL_MASK)) { + return 1; + } + + return 0; +} + +static void ibex_uart_receive(void *opaque, const uint8_t *buf, int size) +{ + IbexUartState *s = opaque; + uint8_t rx_fifo_level = (s->uart_fifo_ctrl & R_FIFO_CTRL_RXILVL_MASK) + >> R_FIFO_CTRL_RXILVL_SHIFT; + + s->uart_rdata = *buf; + + s->uart_status &= ~R_STATUS_RXIDLE_MASK; + s->uart_status &= ~R_STATUS_RXEMPTY_MASK; + /* The RXFULL is set after receiving a single byte + * as the FIFO buffers are not yet implemented. + */ + s->uart_status |= R_STATUS_RXFULL_MASK; + s->rx_level += 1; + + if (size > rx_fifo_level) { + s->uart_intr_state |= R_INTR_STATE_RX_WATERMARK_MASK; + } + + ibex_uart_update_irqs(s); +} + +static gboolean ibex_uart_xmit(void *do_not_use, GIOCondition cond, + void *opaque) +{ + IbexUartState *s = opaque; + uint8_t tx_fifo_level = (s->uart_fifo_ctrl & R_FIFO_CTRL_TXILVL_MASK) + >> R_FIFO_CTRL_TXILVL_SHIFT; + int ret; + + /* instant drain the fifo when there's no back-end */ + if (!qemu_chr_fe_backend_connected(&s->chr)) { + s->tx_level = 0; + return FALSE; + } + + if (!s->tx_level) { + s->uart_status &= ~R_STATUS_TXFULL_MASK; + s->uart_status |= R_STATUS_TXEMPTY_MASK; + s->uart_intr_state |= R_INTR_STATE_TX_EMPTY_MASK; + s->uart_intr_state &= ~R_INTR_STATE_TX_WATERMARK_MASK; + ibex_uart_update_irqs(s); + return FALSE; + } + + ret = qemu_chr_fe_write(&s->chr, s->tx_fifo, s->tx_level); + + if (ret >= 0) { + s->tx_level -= ret; + memmove(s->tx_fifo, s->tx_fifo + ret, s->tx_level); + } + + if (s->tx_level) { + guint r = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP, + ibex_uart_xmit, s); + if (!r) { + s->tx_level = 0; + return FALSE; + } + } + + /* Clear the TX Full bit */ + if (s->tx_level != IBEX_UART_TX_FIFO_SIZE) { + s->uart_status &= ~R_STATUS_TXFULL_MASK; + } + + /* Disable the TX_WATERMARK IRQ */ + if (s->tx_level < tx_fifo_level) { + s->uart_intr_state &= ~R_INTR_STATE_TX_WATERMARK_MASK; + } + + /* Set TX empty */ + if (s->tx_level == 0) { + s->uart_status |= R_STATUS_TXEMPTY_MASK; + s->uart_intr_state |= R_INTR_STATE_TX_EMPTY_MASK; + } + + ibex_uart_update_irqs(s); + return FALSE; +} + +static void uart_write_tx_fifo(IbexUartState *s, const uint8_t *buf, + int size) +{ + uint64_t current_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint8_t tx_fifo_level = (s->uart_fifo_ctrl & R_FIFO_CTRL_TXILVL_MASK) + >> R_FIFO_CTRL_TXILVL_SHIFT; + + if (size > IBEX_UART_TX_FIFO_SIZE - s->tx_level) { + size = IBEX_UART_TX_FIFO_SIZE - s->tx_level; + qemu_log_mask(LOG_GUEST_ERROR, "ibex_uart: TX FIFO overflow"); + } + + memcpy(s->tx_fifo + s->tx_level, buf, size); + s->tx_level += size; + + if (s->tx_level > 0) { + s->uart_status &= ~R_STATUS_TXEMPTY_MASK; + } + + if (s->tx_level >= tx_fifo_level) { + s->uart_intr_state |= R_INTR_STATE_TX_WATERMARK_MASK; + ibex_uart_update_irqs(s); + } + + if (s->tx_level == IBEX_UART_TX_FIFO_SIZE) { + s->uart_status |= R_STATUS_TXFULL_MASK; + } + + timer_mod(s->fifo_trigger_handle, current_time + + (s->char_tx_time * 4)); +} + +static void ibex_uart_reset(DeviceState *dev) +{ + IbexUartState *s = IBEX_UART(dev); + + s->uart_intr_state = 0x00000000; + s->uart_intr_state = 0x00000000; + s->uart_intr_enable = 0x00000000; + s->uart_ctrl = 0x00000000; + s->uart_status = 0x0000003c; + s->uart_rdata = 0x00000000; + s->uart_fifo_ctrl = 0x00000000; + s->uart_fifo_status = 0x00000000; + s->uart_ovrd = 0x00000000; + s->uart_val = 0x00000000; + s->uart_timeout_ctrl = 0x00000000; + + s->tx_level = 0; + s->rx_level = 0; + + s->char_tx_time = (NANOSECONDS_PER_SECOND / 230400) * 10; + + ibex_uart_update_irqs(s); +} + +static uint64_t ibex_uart_get_baud(IbexUartState *s) +{ + uint64_t baud; + + baud = ((s->uart_ctrl & R_CTRL_NCO_MASK) >> 16); + baud *= clock_get_hz(s->f_clk); + baud >>= 20; + + return baud; +} + +static uint64_t ibex_uart_read(void *opaque, hwaddr addr, + unsigned int size) +{ + IbexUartState *s = opaque; + uint64_t retvalue = 0; + + switch (addr >> 2) { + case R_INTR_STATE: + retvalue = s->uart_intr_state; + break; + case R_INTR_ENABLE: + retvalue = s->uart_intr_enable; + break; + case R_INTR_TEST: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: wdata is write only\n", __func__); + break; + + case R_CTRL: + retvalue = s->uart_ctrl; + break; + case R_STATUS: + retvalue = s->uart_status; + break; + + case R_RDATA: + retvalue = s->uart_rdata; + if ((s->uart_ctrl & R_CTRL_RX_ENABLE_MASK) && (s->rx_level > 0)) { + qemu_chr_fe_accept_input(&s->chr); + + s->rx_level -= 1; + s->uart_status &= ~R_STATUS_RXFULL_MASK; + if (s->rx_level == 0) { + s->uart_status |= R_STATUS_RXIDLE_MASK; + s->uart_status |= R_STATUS_RXEMPTY_MASK; + } + } + break; + case R_WDATA: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: wdata is write only\n", __func__); + break; + + case R_FIFO_CTRL: + retvalue = s->uart_fifo_ctrl; + break; + case R_FIFO_STATUS: + retvalue = s->uart_fifo_status; + + retvalue |= (s->rx_level & 0x1F) << R_FIFO_STATUS_RXLVL_SHIFT; + retvalue |= (s->tx_level & 0x1F) << R_FIFO_STATUS_TXLVL_SHIFT; + + qemu_log_mask(LOG_UNIMP, + "%s: RX fifos are not supported\n", __func__); + break; + + case R_OVRD: + retvalue = s->uart_ovrd; + qemu_log_mask(LOG_UNIMP, + "%s: ovrd is not supported\n", __func__); + break; + case R_VAL: + retvalue = s->uart_val; + qemu_log_mask(LOG_UNIMP, + "%s: val is not supported\n", __func__); + break; + case R_TIMEOUT_CTRL: + retvalue = s->uart_timeout_ctrl; + qemu_log_mask(LOG_UNIMP, + "%s: timeout_ctrl is not supported\n", __func__); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + return 0; + } + + return retvalue; +} + +static void ibex_uart_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + IbexUartState *s = opaque; + uint32_t value = val64; + + switch (addr >> 2) { + case R_INTR_STATE: + /* Write 1 clear */ + s->uart_intr_state &= ~value; + ibex_uart_update_irqs(s); + break; + case R_INTR_ENABLE: + s->uart_intr_enable = value; + ibex_uart_update_irqs(s); + break; + case R_INTR_TEST: + s->uart_intr_state |= value; + ibex_uart_update_irqs(s); + break; + + case R_CTRL: + s->uart_ctrl = value; + + if (value & R_CTRL_NF_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: UART_CTRL_NF is not supported\n", __func__); + } + if (value & R_CTRL_SLPBK_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: UART_CTRL_SLPBK is not supported\n", __func__); + } + if (value & R_CTRL_LLPBK_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: UART_CTRL_LLPBK is not supported\n", __func__); + } + if (value & R_CTRL_PARITY_EN_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: UART_CTRL_PARITY_EN is not supported\n", + __func__); + } + if (value & R_CTRL_PARITY_ODD_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: UART_CTRL_PARITY_ODD is not supported\n", + __func__); + } + if (value & R_CTRL_RXBLVL_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: UART_CTRL_RXBLVL is not supported\n", __func__); + } + if (value & R_CTRL_NCO_MASK) { + uint64_t baud = ibex_uart_get_baud(s); + + s->char_tx_time = (NANOSECONDS_PER_SECOND / baud) * 10; + } + break; + case R_STATUS: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: status is read only\n", __func__); + break; + + case R_RDATA: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: rdata is read only\n", __func__); + break; + case R_WDATA: + uart_write_tx_fifo(s, (uint8_t *) &value, 1); + break; + + case R_FIFO_CTRL: + s->uart_fifo_ctrl = value; + + if (value & R_FIFO_CTRL_RXRST_MASK) { + s->rx_level = 0; + qemu_log_mask(LOG_UNIMP, + "%s: RX fifos are not supported\n", __func__); + } + if (value & R_FIFO_CTRL_TXRST_MASK) { + s->tx_level = 0; + } + break; + case R_FIFO_STATUS: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: fifo_status is read only\n", __func__); + break; + + case R_OVRD: + s->uart_ovrd = value; + qemu_log_mask(LOG_UNIMP, + "%s: ovrd is not supported\n", __func__); + break; + case R_VAL: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: val is read only\n", __func__); + break; + case R_TIMEOUT_CTRL: + s->uart_timeout_ctrl = value; + qemu_log_mask(LOG_UNIMP, + "%s: timeout_ctrl is not supported\n", __func__); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + } +} + +static void ibex_uart_clk_update(void *opaque, ClockEvent event) +{ + IbexUartState *s = opaque; + + /* recompute uart's speed on clock change */ + uint64_t baud = ibex_uart_get_baud(s); + + s->char_tx_time = (NANOSECONDS_PER_SECOND / baud) * 10; +} + +static void fifo_trigger_update(void *opaque) +{ + IbexUartState *s = opaque; + + if (s->uart_ctrl & R_CTRL_TX_ENABLE_MASK) { + ibex_uart_xmit(NULL, G_IO_OUT, s); + } +} + +static const MemoryRegionOps ibex_uart_ops = { + .read = ibex_uart_read, + .write = ibex_uart_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static int ibex_uart_post_load(void *opaque, int version_id) +{ + IbexUartState *s = opaque; + + ibex_uart_update_irqs(s); + return 0; +} + +static const VMStateDescription vmstate_ibex_uart = { + .name = TYPE_IBEX_UART, + .version_id = 1, + .minimum_version_id = 1, + .post_load = ibex_uart_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(tx_fifo, IbexUartState, + IBEX_UART_TX_FIFO_SIZE), + VMSTATE_UINT32(tx_level, IbexUartState), + VMSTATE_UINT64(char_tx_time, IbexUartState), + VMSTATE_TIMER_PTR(fifo_trigger_handle, IbexUartState), + VMSTATE_UINT32(uart_intr_state, IbexUartState), + VMSTATE_UINT32(uart_intr_enable, IbexUartState), + VMSTATE_UINT32(uart_ctrl, IbexUartState), + VMSTATE_UINT32(uart_status, IbexUartState), + VMSTATE_UINT32(uart_rdata, IbexUartState), + VMSTATE_UINT32(uart_fifo_ctrl, IbexUartState), + VMSTATE_UINT32(uart_fifo_status, IbexUartState), + VMSTATE_UINT32(uart_ovrd, IbexUartState), + VMSTATE_UINT32(uart_val, IbexUartState), + VMSTATE_UINT32(uart_timeout_ctrl, IbexUartState), + VMSTATE_END_OF_LIST() + } +}; + +static Property ibex_uart_properties[] = { + DEFINE_PROP_CHR("chardev", IbexUartState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ibex_uart_init(Object *obj) +{ + IbexUartState *s = IBEX_UART(obj); + + s->f_clk = qdev_init_clock_in(DEVICE(obj), "f_clock", + ibex_uart_clk_update, s, ClockUpdate); + clock_set_hz(s->f_clk, IBEX_UART_CLOCK); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->tx_watermark); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->rx_watermark); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->tx_empty); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->rx_overflow); + + memory_region_init_io(&s->mmio, obj, &ibex_uart_ops, s, + TYPE_IBEX_UART, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void ibex_uart_realize(DeviceState *dev, Error **errp) +{ + IbexUartState *s = IBEX_UART(dev); + + s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL, + fifo_trigger_update, s); + + qemu_chr_fe_set_handlers(&s->chr, ibex_uart_can_receive, + ibex_uart_receive, NULL, NULL, + s, NULL, true); +} + +static void ibex_uart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = ibex_uart_reset; + dc->realize = ibex_uart_realize; + dc->vmsd = &vmstate_ibex_uart; + device_class_set_props(dc, ibex_uart_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo ibex_uart_info = { + .name = TYPE_IBEX_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IbexUartState), + .instance_init = ibex_uart_init, + .class_init = ibex_uart_class_init, +}; + +static void ibex_uart_register_types(void) +{ + type_register_static(&ibex_uart_info); +} + +type_init(ibex_uart_register_types) diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c new file mode 100644 index 000000000..ee1375e26 --- /dev/null +++ b/hw/char/imx_serial.c @@ -0,0 +1,392 @@ +/* + * IMX31 UARTS + * + * Copyright (c) 2008 OKL + * Originally Written by Hans Jiang + * Copyright (c) 2011 NICTA Pty Ltd. + * Updated by Jean-Christophe Dubois + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This is a `bare-bones' implementation of the IMX series serial ports. + * TODO: + * -- implement FIFOs. The real hardware has 32 word transmit + * and receive FIFOs; we currently use a 1-char buffer + * -- implement DMA + * -- implement BAUD-rate and modem lines, for when the backend + * is a real serial device. + */ + +#include "qemu/osdep.h" +#include "hw/char/imx_serial.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef DEBUG_IMX_UART +#define DEBUG_IMX_UART 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX_UART) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_SERIAL, \ + __func__, ##args); \ + } \ + } while (0) + +static const VMStateDescription vmstate_imx_serial = { + .name = TYPE_IMX_SERIAL, + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_INT32(readbuff, IMXSerialState), + VMSTATE_UINT32(usr1, IMXSerialState), + VMSTATE_UINT32(usr2, IMXSerialState), + VMSTATE_UINT32(ucr1, IMXSerialState), + VMSTATE_UINT32(uts1, IMXSerialState), + VMSTATE_UINT32(onems, IMXSerialState), + VMSTATE_UINT32(ufcr, IMXSerialState), + VMSTATE_UINT32(ubmr, IMXSerialState), + VMSTATE_UINT32(ubrc, IMXSerialState), + VMSTATE_UINT32(ucr3, IMXSerialState), + VMSTATE_UINT32(ucr4, IMXSerialState), + VMSTATE_END_OF_LIST() + }, +}; + +static void imx_update(IMXSerialState *s) +{ + uint32_t usr1; + uint32_t usr2; + uint32_t mask; + + /* + * Lucky for us TRDY and RRDY has the same offset in both USR1 and + * UCR1, so we can get away with something as simple as the + * following: + */ + usr1 = s->usr1 & s->ucr1 & (USR1_TRDY | USR1_RRDY); + /* + * Bits that we want in USR2 are not as conveniently laid out, + * unfortunately. + */ + mask = (s->ucr1 & UCR1_TXMPTYEN) ? USR2_TXFE : 0; + /* + * TCEN and TXDC are both bit 3 + * RDR and DREN are both bit 0 + */ + mask |= s->ucr4 & (UCR4_TCEN | UCR4_DREN); + + usr2 = s->usr2 & mask; + + qemu_set_irq(s->irq, usr1 || usr2); +} + +static void imx_serial_reset(IMXSerialState *s) +{ + + s->usr1 = USR1_TRDY | USR1_RXDS; + /* + * Fake attachment of a terminal: assert RTS. + */ + s->usr1 |= USR1_RTSS; + s->usr2 = USR2_TXFE | USR2_TXDC | USR2_DCDIN; + s->uts1 = UTS1_RXEMPTY | UTS1_TXEMPTY; + s->ucr1 = 0; + s->ucr2 = UCR2_SRST; + s->ucr3 = 0x700; + s->ubmr = 0; + s->ubrc = 4; + s->readbuff = URXD_ERR; +} + +static void imx_serial_reset_at_boot(DeviceState *dev) +{ + IMXSerialState *s = IMX_SERIAL(dev); + + imx_serial_reset(s); + + /* + * enable the uart on boot, so messages from the linux decompresser + * are visible. On real hardware this is done by the boot rom + * before anything else is loaded. + */ + s->ucr1 = UCR1_UARTEN; + s->ucr2 = UCR2_TXEN; + +} + +static uint64_t imx_serial_read(void *opaque, hwaddr offset, + unsigned size) +{ + IMXSerialState *s = (IMXSerialState *)opaque; + uint32_t c; + + DPRINTF("read(offset=0x%" HWADDR_PRIx ")\n", offset); + + switch (offset >> 2) { + case 0x0: /* URXD */ + c = s->readbuff; + if (!(s->uts1 & UTS1_RXEMPTY)) { + /* Character is valid */ + c |= URXD_CHARRDY; + s->usr1 &= ~USR1_RRDY; + s->usr2 &= ~USR2_RDR; + s->uts1 |= UTS1_RXEMPTY; + imx_update(s); + qemu_chr_fe_accept_input(&s->chr); + } + return c; + + case 0x20: /* UCR1 */ + return s->ucr1; + + case 0x21: /* UCR2 */ + return s->ucr2; + + case 0x25: /* USR1 */ + return s->usr1; + + case 0x26: /* USR2 */ + return s->usr2; + + case 0x2A: /* BRM Modulator */ + return s->ubmr; + + case 0x2B: /* Baud Rate Count */ + return s->ubrc; + + case 0x2d: /* Test register */ + return s->uts1; + + case 0x24: /* UFCR */ + return s->ufcr; + + case 0x2c: + return s->onems; + + case 0x22: /* UCR3 */ + return s->ucr3; + + case 0x23: /* UCR4 */ + return s->ucr4; + + case 0x29: /* BRM Incremental */ + return 0x0; /* TODO */ + + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_SERIAL, __func__, offset); + return 0; + } +} + +static void imx_serial_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + IMXSerialState *s = (IMXSerialState *)opaque; + Chardev *chr = qemu_chr_fe_get_driver(&s->chr); + unsigned char ch; + + DPRINTF("write(offset=0x%" HWADDR_PRIx ", value = 0x%x) to %s\n", + offset, (unsigned int)value, chr ? chr->label : "NODEV"); + + switch (offset >> 2) { + case 0x10: /* UTXD */ + ch = value; + if (s->ucr2 & UCR2_TXEN) { + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); + s->usr1 &= ~USR1_TRDY; + s->usr2 &= ~USR2_TXDC; + imx_update(s); + s->usr1 |= USR1_TRDY; + s->usr2 |= USR2_TXDC; + imx_update(s); + } + break; + + case 0x20: /* UCR1 */ + s->ucr1 = value & 0xffff; + + DPRINTF("write(ucr1=%x)\n", (unsigned int)value); + + imx_update(s); + break; + + case 0x21: /* UCR2 */ + /* + * Only a few bits in control register 2 are implemented as yet. + * If it's intended to use a real serial device as a back-end, this + * register will have to be implemented more fully. + */ + if (!(value & UCR2_SRST)) { + imx_serial_reset(s); + imx_update(s); + value |= UCR2_SRST; + } + if (value & UCR2_RXEN) { + if (!(s->ucr2 & UCR2_RXEN)) { + qemu_chr_fe_accept_input(&s->chr); + } + } + s->ucr2 = value & 0xffff; + break; + + case 0x25: /* USR1 */ + value &= USR1_AWAKE | USR1_AIRINT | USR1_DTRD | USR1_AGTIM | + USR1_FRAMERR | USR1_ESCF | USR1_RTSD | USR1_PARTYER; + s->usr1 &= ~value; + break; + + case 0x26: /* USR2 */ + /* + * Writing 1 to some bits clears them; all other + * values are ignored + */ + value &= USR2_ADET | USR2_DTRF | USR2_IDLE | USR2_ACST | + USR2_RIDELT | USR2_IRINT | USR2_WAKE | + USR2_DCDDELT | USR2_RTSF | USR2_BRCD | USR2_ORE; + s->usr2 &= ~value; + break; + + /* + * Linux expects to see what it writes to these registers + * We don't currently alter the baud rate + */ + case 0x29: /* UBIR */ + s->ubrc = value & 0xffff; + break; + + case 0x2a: /* UBMR */ + s->ubmr = value & 0xffff; + break; + + case 0x2c: /* One ms reg */ + s->onems = value & 0xffff; + break; + + case 0x24: /* FIFO control register */ + s->ufcr = value & 0xffff; + break; + + case 0x22: /* UCR3 */ + s->ucr3 = value & 0xffff; + break; + + case 0x23: /* UCR4 */ + s->ucr4 = value & 0xffff; + imx_update(s); + break; + + case 0x2d: /* UTS1 */ + qemu_log_mask(LOG_UNIMP, "[%s]%s: Unimplemented reg 0x%" + HWADDR_PRIx "\n", TYPE_IMX_SERIAL, __func__, offset); + /* TODO */ + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_SERIAL, __func__, offset); + } +} + +static int imx_can_receive(void *opaque) +{ + IMXSerialState *s = (IMXSerialState *)opaque; + return !(s->usr1 & USR1_RRDY); +} + +static void imx_put_data(void *opaque, uint32_t value) +{ + IMXSerialState *s = (IMXSerialState *)opaque; + + DPRINTF("received char\n"); + + s->usr1 |= USR1_RRDY; + s->usr2 |= USR2_RDR; + s->uts1 &= ~UTS1_RXEMPTY; + s->readbuff = value; + if (value & URXD_BRK) { + s->usr2 |= USR2_BRCD; + } + imx_update(s); +} + +static void imx_receive(void *opaque, const uint8_t *buf, int size) +{ + imx_put_data(opaque, *buf); +} + +static void imx_event(void *opaque, QEMUChrEvent event) +{ + if (event == CHR_EVENT_BREAK) { + imx_put_data(opaque, URXD_BRK | URXD_FRMERR | URXD_ERR); + } +} + + +static const struct MemoryRegionOps imx_serial_ops = { + .read = imx_serial_read, + .write = imx_serial_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void imx_serial_realize(DeviceState *dev, Error **errp) +{ + IMXSerialState *s = IMX_SERIAL(dev); + + DPRINTF("char dev for uart: %p\n", qemu_chr_fe_get_driver(&s->chr)); + + qemu_chr_fe_set_handlers(&s->chr, imx_can_receive, imx_receive, + imx_event, NULL, s, NULL, true); +} + +static void imx_serial_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + IMXSerialState *s = IMX_SERIAL(obj); + + memory_region_init_io(&s->iomem, obj, &imx_serial_ops, s, + TYPE_IMX_SERIAL, 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); +} + +static Property imx_serial_properties[] = { + DEFINE_PROP_CHR("chardev", IMXSerialState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void imx_serial_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = imx_serial_realize; + dc->vmsd = &vmstate_imx_serial; + dc->reset = imx_serial_reset_at_boot; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + dc->desc = "i.MX series UART"; + device_class_set_props(dc, imx_serial_properties); +} + +static const TypeInfo imx_serial_info = { + .name = TYPE_IMX_SERIAL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXSerialState), + .instance_init = imx_serial_init, + .class_init = imx_serial_class_init, +}; + +static void imx_serial_register_types(void) +{ + type_register_static(&imx_serial_info); +} + +type_init(imx_serial_register_types) diff --git a/hw/char/ipoctal232.c b/hw/char/ipoctal232.c new file mode 100644 index 000000000..3311e0872 --- /dev/null +++ b/hw/char/ipoctal232.c @@ -0,0 +1,608 @@ +/* + * QEMU GE IP-Octal 232 IndustryPack emulation + * + * Copyright (C) 2012 Igalia, S.L. + * Author: Alberto Garcia + * + * This code is licensed under the GNU GPL v2 or (at your option) any + * later version. + */ + +#include "qemu/osdep.h" +#include "hw/ipack/ipack.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/module.h" +#include "chardev/char-fe.h" +#include "qom/object.h" + +/* #define DEBUG_IPOCTAL */ + +#ifdef DEBUG_IPOCTAL +#define DPRINTF2(fmt, ...) \ + do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF2(fmt, ...) do { } while (0) +#endif + +#define DPRINTF(fmt, ...) DPRINTF2("IP-Octal: " fmt, ## __VA_ARGS__) + +#define RX_FIFO_SIZE 3 + +/* The IP-Octal has 8 channels (a-h) + divided into 4 blocks (A-D) */ +#define N_CHANNELS 8 +#define N_BLOCKS 4 + +#define REG_MRa 0x01 +#define REG_MRb 0x11 +#define REG_SRa 0x03 +#define REG_SRb 0x13 +#define REG_CSRa 0x03 +#define REG_CSRb 0x13 +#define REG_CRa 0x05 +#define REG_CRb 0x15 +#define REG_RHRa 0x07 +#define REG_RHRb 0x17 +#define REG_THRa 0x07 +#define REG_THRb 0x17 +#define REG_ACR 0x09 +#define REG_ISR 0x0B +#define REG_IMR 0x0B +#define REG_OPCR 0x1B + +#define CR_ENABLE_RX BIT(0) +#define CR_DISABLE_RX BIT(1) +#define CR_ENABLE_TX BIT(2) +#define CR_DISABLE_TX BIT(3) +#define CR_CMD(cr) ((cr) >> 4) +#define CR_NO_OP 0 +#define CR_RESET_MR 1 +#define CR_RESET_RX 2 +#define CR_RESET_TX 3 +#define CR_RESET_ERR 4 +#define CR_RESET_BRKINT 5 +#define CR_START_BRK 6 +#define CR_STOP_BRK 7 +#define CR_ASSERT_RTSN 8 +#define CR_NEGATE_RTSN 9 +#define CR_TIMEOUT_ON 10 +#define CR_TIMEOUT_OFF 12 + +#define SR_RXRDY BIT(0) +#define SR_FFULL BIT(1) +#define SR_TXRDY BIT(2) +#define SR_TXEMT BIT(3) +#define SR_OVERRUN BIT(4) +#define SR_PARITY BIT(5) +#define SR_FRAMING BIT(6) +#define SR_BREAK BIT(7) + +#define ISR_TXRDYA BIT(0) +#define ISR_RXRDYA BIT(1) +#define ISR_BREAKA BIT(2) +#define ISR_CNTRDY BIT(3) +#define ISR_TXRDYB BIT(4) +#define ISR_RXRDYB BIT(5) +#define ISR_BREAKB BIT(6) +#define ISR_MPICHG BIT(7) +#define ISR_TXRDY(CH) (((CH) & 1) ? BIT(4) : BIT(0)) +#define ISR_RXRDY(CH) (((CH) & 1) ? BIT(5) : BIT(1)) +#define ISR_BREAK(CH) (((CH) & 1) ? BIT(6) : BIT(2)) + +typedef struct IPOctalState IPOctalState; +typedef struct SCC2698Channel SCC2698Channel; +typedef struct SCC2698Block SCC2698Block; + +struct SCC2698Channel { + IPOctalState *ipoctal; + CharBackend dev; + bool rx_enabled; + uint8_t mr[2]; + uint8_t mr_idx; + uint8_t sr; + uint8_t rhr[RX_FIFO_SIZE]; + uint8_t rhr_idx; + uint8_t rx_pending; +}; + +struct SCC2698Block { + uint8_t imr; + uint8_t isr; +}; + +struct IPOctalState { + IPackDevice parent_obj; + + SCC2698Channel ch[N_CHANNELS]; + SCC2698Block blk[N_BLOCKS]; + uint8_t irq_vector; +}; + +#define TYPE_IPOCTAL "ipoctal232" + +OBJECT_DECLARE_SIMPLE_TYPE(IPOctalState, IPOCTAL) + +static const VMStateDescription vmstate_scc2698_channel = { + .name = "scc2698_channel", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(rx_enabled, SCC2698Channel), + VMSTATE_UINT8_ARRAY(mr, SCC2698Channel, 2), + VMSTATE_UINT8(mr_idx, SCC2698Channel), + VMSTATE_UINT8(sr, SCC2698Channel), + VMSTATE_UINT8_ARRAY(rhr, SCC2698Channel, RX_FIFO_SIZE), + VMSTATE_UINT8(rhr_idx, SCC2698Channel), + VMSTATE_UINT8(rx_pending, SCC2698Channel), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_scc2698_block = { + .name = "scc2698_block", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(imr, SCC2698Block), + VMSTATE_UINT8(isr, SCC2698Block), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ipoctal = { + .name = "ipoctal232", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_IPACK_DEVICE(parent_obj, IPOctalState), + VMSTATE_STRUCT_ARRAY(ch, IPOctalState, N_CHANNELS, 1, + vmstate_scc2698_channel, SCC2698Channel), + VMSTATE_STRUCT_ARRAY(blk, IPOctalState, N_BLOCKS, 1, + vmstate_scc2698_block, SCC2698Block), + VMSTATE_UINT8(irq_vector, IPOctalState), + VMSTATE_END_OF_LIST() + } +}; + +/* data[10] is 0x0C, not 0x0B as the doc says */ +static const uint8_t id_prom_data[] = { + 0x49, 0x50, 0x41, 0x43, 0xF0, 0x22, + 0xA1, 0x00, 0x00, 0x00, 0x0C, 0xCC +}; + +static void update_irq(IPOctalState *dev, unsigned block) +{ + IPackDevice *idev = IPACK_DEVICE(dev); + /* Blocks A and B interrupt on INT0#, C and D on INT1#. + Thus, to get the status we have to check two blocks. */ + SCC2698Block *blk0 = &dev->blk[block]; + SCC2698Block *blk1 = &dev->blk[block^1]; + unsigned intno = block / 2; + + if ((blk0->isr & blk0->imr) || (blk1->isr & blk1->imr)) { + qemu_irq_raise(idev->irq[intno]); + } else { + qemu_irq_lower(idev->irq[intno]); + } +} + +static void write_cr(IPOctalState *dev, unsigned channel, uint8_t val) +{ + SCC2698Channel *ch = &dev->ch[channel]; + SCC2698Block *blk = &dev->blk[channel / 2]; + + DPRINTF("Write CR%c %u: ", channel + 'a', val); + + /* The lower 4 bits are used to enable and disable Tx and Rx */ + if (val & CR_ENABLE_RX) { + DPRINTF2("Rx on, "); + ch->rx_enabled = true; + } + if (val & CR_DISABLE_RX) { + DPRINTF2("Rx off, "); + ch->rx_enabled = false; + } + if (val & CR_ENABLE_TX) { + DPRINTF2("Tx on, "); + ch->sr |= SR_TXRDY | SR_TXEMT; + blk->isr |= ISR_TXRDY(channel); + } + if (val & CR_DISABLE_TX) { + DPRINTF2("Tx off, "); + ch->sr &= ~(SR_TXRDY | SR_TXEMT); + blk->isr &= ~ISR_TXRDY(channel); + } + + DPRINTF2("cmd: "); + + /* The rest of the bits implement different commands */ + switch (CR_CMD(val)) { + case CR_NO_OP: + DPRINTF2("none"); + break; + case CR_RESET_MR: + DPRINTF2("reset MR"); + ch->mr_idx = 0; + break; + case CR_RESET_RX: + DPRINTF2("reset Rx"); + ch->rx_enabled = false; + ch->rx_pending = 0; + ch->sr &= ~SR_RXRDY; + blk->isr &= ~ISR_RXRDY(channel); + break; + case CR_RESET_TX: + DPRINTF2("reset Tx"); + ch->sr &= ~(SR_TXRDY | SR_TXEMT); + blk->isr &= ~ISR_TXRDY(channel); + break; + case CR_RESET_ERR: + DPRINTF2("reset err"); + ch->sr &= ~(SR_OVERRUN | SR_PARITY | SR_FRAMING | SR_BREAK); + break; + case CR_RESET_BRKINT: + DPRINTF2("reset brk ch int"); + blk->isr &= ~(ISR_BREAKA | ISR_BREAKB); + break; + default: + DPRINTF2("unsupported 0x%x", CR_CMD(val)); + } + + DPRINTF2("\n"); +} + +static uint16_t io_read(IPackDevice *ip, uint8_t addr) +{ + IPOctalState *dev = IPOCTAL(ip); + uint16_t ret = 0; + /* addr[7:6]: block (A-D) + addr[7:5]: channel (a-h) + addr[5:0]: register */ + unsigned block = addr >> 5; + unsigned channel = addr >> 4; + /* Big endian, accessed using 8-bit bytes at odd locations */ + unsigned offset = (addr & 0x1F) ^ 1; + SCC2698Channel *ch = &dev->ch[channel]; + SCC2698Block *blk = &dev->blk[block]; + uint8_t old_isr = blk->isr; + + switch (offset) { + + case REG_MRa: + case REG_MRb: + ret = ch->mr[ch->mr_idx]; + DPRINTF("Read MR%u%c: 0x%x\n", ch->mr_idx + 1, channel + 'a', ret); + ch->mr_idx = 1; + break; + + case REG_SRa: + case REG_SRb: + ret = ch->sr; + DPRINTF("Read SR%c: 0x%x\n", channel + 'a', ret); + break; + + case REG_RHRa: + case REG_RHRb: + ret = ch->rhr[ch->rhr_idx]; + if (ch->rx_pending > 0) { + ch->rx_pending--; + if (ch->rx_pending == 0) { + ch->sr &= ~SR_RXRDY; + blk->isr &= ~ISR_RXRDY(channel); + qemu_chr_fe_accept_input(&ch->dev); + } else { + ch->rhr_idx = (ch->rhr_idx + 1) % RX_FIFO_SIZE; + } + if (ch->sr & SR_BREAK) { + ch->sr &= ~SR_BREAK; + blk->isr |= ISR_BREAK(channel); + } + } + DPRINTF("Read RHR%c (0x%x)\n", channel + 'a', ret); + break; + + case REG_ISR: + ret = blk->isr; + DPRINTF("Read ISR%c: 0x%x\n", block + 'A', ret); + break; + + default: + DPRINTF("Read unknown/unsupported register 0x%02x\n", offset); + } + + if (old_isr != blk->isr) { + update_irq(dev, block); + } + + return ret; +} + +static void io_write(IPackDevice *ip, uint8_t addr, uint16_t val) +{ + IPOctalState *dev = IPOCTAL(ip); + unsigned reg = val & 0xFF; + /* addr[7:6]: block (A-D) + addr[7:5]: channel (a-h) + addr[5:0]: register */ + unsigned block = addr >> 5; + unsigned channel = addr >> 4; + /* Big endian, accessed using 8-bit bytes at odd locations */ + unsigned offset = (addr & 0x1F) ^ 1; + SCC2698Channel *ch = &dev->ch[channel]; + SCC2698Block *blk = &dev->blk[block]; + uint8_t old_isr = blk->isr; + uint8_t old_imr = blk->imr; + + switch (offset) { + + case REG_MRa: + case REG_MRb: + ch->mr[ch->mr_idx] = reg; + DPRINTF("Write MR%u%c 0x%x\n", ch->mr_idx + 1, channel + 'a', reg); + ch->mr_idx = 1; + break; + + /* Not implemented */ + case REG_CSRa: + case REG_CSRb: + DPRINTF("Write CSR%c: 0x%x\n", channel + 'a', reg); + break; + + case REG_CRa: + case REG_CRb: + write_cr(dev, channel, reg); + break; + + case REG_THRa: + case REG_THRb: + if (ch->sr & SR_TXRDY) { + uint8_t thr = reg; + DPRINTF("Write THR%c (0x%x)\n", channel + 'a', reg); + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&ch->dev, &thr, 1); + } else { + DPRINTF("Write THR%c (0x%x), Tx disabled\n", channel + 'a', reg); + } + break; + + /* Not implemented */ + case REG_ACR: + DPRINTF("Write ACR%c 0x%x\n", block + 'A', val); + break; + + case REG_IMR: + DPRINTF("Write IMR%c 0x%x\n", block + 'A', val); + blk->imr = reg; + break; + + /* Not implemented */ + case REG_OPCR: + DPRINTF("Write OPCR%c 0x%x\n", block + 'A', val); + break; + + default: + DPRINTF("Write unknown/unsupported register 0x%02x %u\n", offset, val); + } + + if (old_isr != blk->isr || old_imr != blk->imr) { + update_irq(dev, block); + } +} + +static uint16_t id_read(IPackDevice *ip, uint8_t addr) +{ + uint16_t ret = 0; + unsigned pos = addr / 2; /* The ID PROM data is stored every other byte */ + + if (pos < ARRAY_SIZE(id_prom_data)) { + ret = id_prom_data[pos]; + } else { + DPRINTF("Attempt to read unavailable PROM data at 0x%x\n", addr); + } + + return ret; +} + +static void id_write(IPackDevice *ip, uint8_t addr, uint16_t val) +{ + IPOctalState *dev = IPOCTAL(ip); + if (addr == 1) { + DPRINTF("Write IRQ vector: %u\n", (unsigned) val); + dev->irq_vector = val; /* Undocumented, but the hw works like that */ + } else { + DPRINTF("Attempt to write 0x%x to 0x%x\n", val, addr); + } +} + +static uint16_t int_read(IPackDevice *ip, uint8_t addr) +{ + IPOctalState *dev = IPOCTAL(ip); + /* Read address 0 to ACK INT0# and address 2 to ACK INT1# */ + if (addr != 0 && addr != 2) { + DPRINTF("Attempt to read from 0x%x\n", addr); + return 0; + } else { + /* Update interrupts if necessary */ + update_irq(dev, addr); + return dev->irq_vector; + } +} + +static void int_write(IPackDevice *ip, uint8_t addr, uint16_t val) +{ + DPRINTF("Attempt to write 0x%x to 0x%x\n", val, addr); +} + +static uint16_t mem_read16(IPackDevice *ip, uint32_t addr) +{ + DPRINTF("Attempt to read from 0x%x\n", addr); + return 0; +} + +static void mem_write16(IPackDevice *ip, uint32_t addr, uint16_t val) +{ + DPRINTF("Attempt to write 0x%x to 0x%x\n", val, addr); +} + +static uint8_t mem_read8(IPackDevice *ip, uint32_t addr) +{ + DPRINTF("Attempt to read from 0x%x\n", addr); + return 0; +} + +static void mem_write8(IPackDevice *ip, uint32_t addr, uint8_t val) +{ + IPOctalState *dev = IPOCTAL(ip); + if (addr == 1) { + DPRINTF("Write IRQ vector: %u\n", (unsigned) val); + dev->irq_vector = val; + } else { + DPRINTF("Attempt to write 0x%x to 0x%x\n", val, addr); + } +} + +static int hostdev_can_receive(void *opaque) +{ + SCC2698Channel *ch = opaque; + int available_bytes = RX_FIFO_SIZE - ch->rx_pending; + return ch->rx_enabled ? available_bytes : 0; +} + +static void hostdev_receive(void *opaque, const uint8_t *buf, int size) +{ + SCC2698Channel *ch = opaque; + IPOctalState *dev = ch->ipoctal; + unsigned pos = ch->rhr_idx + ch->rx_pending; + int i; + + assert(size + ch->rx_pending <= RX_FIFO_SIZE); + + /* Copy data to the RxFIFO */ + for (i = 0; i < size; i++) { + pos %= RX_FIFO_SIZE; + ch->rhr[pos++] = buf[i]; + } + + ch->rx_pending += size; + + /* If the RxFIFO was empty raise an interrupt */ + if (!(ch->sr & SR_RXRDY)) { + unsigned block, channel = 0; + /* Find channel number to update the ISR register */ + while (&dev->ch[channel] != ch) { + channel++; + } + block = channel / 2; + dev->blk[block].isr |= ISR_RXRDY(channel); + ch->sr |= SR_RXRDY; + update_irq(dev, block); + } +} + +static void hostdev_event(void *opaque, QEMUChrEvent event) +{ + SCC2698Channel *ch = opaque; + switch (event) { + case CHR_EVENT_OPENED: + DPRINTF("Device %s opened\n", ch->dev->label); + break; + case CHR_EVENT_BREAK: { + uint8_t zero = 0; + DPRINTF("Device %s received break\n", ch->dev->label); + + if (!(ch->sr & SR_BREAK)) { + IPOctalState *dev = ch->ipoctal; + unsigned block, channel = 0; + + while (&dev->ch[channel] != ch) { + channel++; + } + block = channel / 2; + + ch->sr |= SR_BREAK; + dev->blk[block].isr |= ISR_BREAK(channel); + } + + /* Put a zero character in the buffer */ + hostdev_receive(ch, &zero, 1); + } + break; + default: + DPRINTF("Device %s received event %d\n", ch->dev->label, event); + } +} + +static void ipoctal_realize(DeviceState *dev, Error **errp) +{ + IPOctalState *s = IPOCTAL(dev); + unsigned i; + + for (i = 0; i < N_CHANNELS; i++) { + SCC2698Channel *ch = &s->ch[i]; + ch->ipoctal = s; + + /* Redirect IP-Octal channels to host character devices */ + if (qemu_chr_fe_backend_connected(&ch->dev)) { + qemu_chr_fe_set_handlers(&ch->dev, hostdev_can_receive, + hostdev_receive, hostdev_event, + NULL, ch, NULL, true); + DPRINTF("Redirecting channel %u to %s\n", i, ch->dev->label); + } else { + DPRINTF("Could not redirect channel %u, no chardev set\n", i); + } + } +} + +static Property ipoctal_properties[] = { + DEFINE_PROP_CHR("chardev0", IPOctalState, ch[0].dev), + DEFINE_PROP_CHR("chardev1", IPOctalState, ch[1].dev), + DEFINE_PROP_CHR("chardev2", IPOctalState, ch[2].dev), + DEFINE_PROP_CHR("chardev3", IPOctalState, ch[3].dev), + DEFINE_PROP_CHR("chardev4", IPOctalState, ch[4].dev), + DEFINE_PROP_CHR("chardev5", IPOctalState, ch[5].dev), + DEFINE_PROP_CHR("chardev6", IPOctalState, ch[6].dev), + DEFINE_PROP_CHR("chardev7", IPOctalState, ch[7].dev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ipoctal_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IPackDeviceClass *ic = IPACK_DEVICE_CLASS(klass); + + ic->realize = ipoctal_realize; + ic->io_read = io_read; + ic->io_write = io_write; + ic->id_read = id_read; + ic->id_write = id_write; + ic->int_read = int_read; + ic->int_write = int_write; + ic->mem_read16 = mem_read16; + ic->mem_write16 = mem_write16; + ic->mem_read8 = mem_read8; + ic->mem_write8 = mem_write8; + + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + dc->desc = "GE IP-Octal 232 8-channel RS-232 IndustryPack"; + device_class_set_props(dc, ipoctal_properties); + dc->vmsd = &vmstate_ipoctal; +} + +static const TypeInfo ipoctal_info = { + .name = TYPE_IPOCTAL, + .parent = TYPE_IPACK_DEVICE, + .instance_size = sizeof(IPOctalState), + .class_init = ipoctal_class_init, +}; + +static void ipoctal_register_types(void) +{ + type_register_static(&ipoctal_info); +} + +type_init(ipoctal_register_types) diff --git a/hw/char/mcf_uart.c b/hw/char/mcf_uart.c new file mode 100644 index 000000000..6fa4ac502 --- /dev/null +++ b/hw/char/mcf_uart.c @@ -0,0 +1,366 @@ +/* + * ColdFire UART emulation. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/m68k/mcf.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "chardev/char-fe.h" +#include "qom/object.h" + +struct mcf_uart_state { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint8_t mr[2]; + uint8_t sr; + uint8_t isr; + uint8_t imr; + uint8_t bg1; + uint8_t bg2; + uint8_t fifo[4]; + uint8_t tb; + int current_mr; + int fifo_len; + int tx_enabled; + int rx_enabled; + qemu_irq irq; + CharBackend chr; +}; + +#define TYPE_MCF_UART "mcf-uart" +OBJECT_DECLARE_SIMPLE_TYPE(mcf_uart_state, MCF_UART) + +/* UART Status Register bits. */ +#define MCF_UART_RxRDY 0x01 +#define MCF_UART_FFULL 0x02 +#define MCF_UART_TxRDY 0x04 +#define MCF_UART_TxEMP 0x08 +#define MCF_UART_OE 0x10 +#define MCF_UART_PE 0x20 +#define MCF_UART_FE 0x40 +#define MCF_UART_RB 0x80 + +/* Interrupt flags. */ +#define MCF_UART_TxINT 0x01 +#define MCF_UART_RxINT 0x02 +#define MCF_UART_DBINT 0x04 +#define MCF_UART_COSINT 0x80 + +/* UMR1 flags. */ +#define MCF_UART_BC0 0x01 +#define MCF_UART_BC1 0x02 +#define MCF_UART_PT 0x04 +#define MCF_UART_PM0 0x08 +#define MCF_UART_PM1 0x10 +#define MCF_UART_ERR 0x20 +#define MCF_UART_RxIRQ 0x40 +#define MCF_UART_RxRTS 0x80 + +static void mcf_uart_update(mcf_uart_state *s) +{ + s->isr &= ~(MCF_UART_TxINT | MCF_UART_RxINT); + if (s->sr & MCF_UART_TxRDY) + s->isr |= MCF_UART_TxINT; + if ((s->sr & ((s->mr[0] & MCF_UART_RxIRQ) + ? MCF_UART_FFULL : MCF_UART_RxRDY)) != 0) + s->isr |= MCF_UART_RxINT; + + qemu_set_irq(s->irq, (s->isr & s->imr) != 0); +} + +uint64_t mcf_uart_read(void *opaque, hwaddr addr, + unsigned size) +{ + mcf_uart_state *s = (mcf_uart_state *)opaque; + switch (addr & 0x3f) { + case 0x00: + return s->mr[s->current_mr]; + case 0x04: + return s->sr; + case 0x0c: + { + uint8_t val; + int i; + + if (s->fifo_len == 0) + return 0; + + val = s->fifo[0]; + s->fifo_len--; + for (i = 0; i < s->fifo_len; i++) + s->fifo[i] = s->fifo[i + 1]; + s->sr &= ~MCF_UART_FFULL; + if (s->fifo_len == 0) + s->sr &= ~MCF_UART_RxRDY; + mcf_uart_update(s); + qemu_chr_fe_accept_input(&s->chr); + return val; + } + case 0x10: + /* TODO: Implement IPCR. */ + return 0; + case 0x14: + return s->isr; + case 0x18: + return s->bg1; + case 0x1c: + return s->bg2; + default: + return 0; + } +} + +/* Update TxRDY flag and set data if present and enabled. */ +static void mcf_uart_do_tx(mcf_uart_state *s) +{ + if (s->tx_enabled && (s->sr & MCF_UART_TxEMP) == 0) { + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, (unsigned char *)&s->tb, 1); + s->sr |= MCF_UART_TxEMP; + } + if (s->tx_enabled) { + s->sr |= MCF_UART_TxRDY; + } else { + s->sr &= ~MCF_UART_TxRDY; + } +} + +static void mcf_do_command(mcf_uart_state *s, uint8_t cmd) +{ + /* Misc command. */ + switch ((cmd >> 4) & 7) { + case 0: /* No-op. */ + break; + case 1: /* Reset mode register pointer. */ + s->current_mr = 0; + break; + case 2: /* Reset receiver. */ + s->rx_enabled = 0; + s->fifo_len = 0; + s->sr &= ~(MCF_UART_RxRDY | MCF_UART_FFULL); + break; + case 3: /* Reset transmitter. */ + s->tx_enabled = 0; + s->sr |= MCF_UART_TxEMP; + s->sr &= ~MCF_UART_TxRDY; + break; + case 4: /* Reset error status. */ + break; + case 5: /* Reset break-change interrupt. */ + s->isr &= ~MCF_UART_DBINT; + break; + case 6: /* Start break. */ + case 7: /* Stop break. */ + break; + } + + /* Transmitter command. */ + switch ((cmd >> 2) & 3) { + case 0: /* No-op. */ + break; + case 1: /* Enable. */ + s->tx_enabled = 1; + mcf_uart_do_tx(s); + break; + case 2: /* Disable. */ + s->tx_enabled = 0; + mcf_uart_do_tx(s); + break; + case 3: /* Reserved. */ + fprintf(stderr, "mcf_uart: Bad TX command\n"); + break; + } + + /* Receiver command. */ + switch (cmd & 3) { + case 0: /* No-op. */ + break; + case 1: /* Enable. */ + s->rx_enabled = 1; + break; + case 2: + s->rx_enabled = 0; + break; + case 3: /* Reserved. */ + fprintf(stderr, "mcf_uart: Bad RX command\n"); + break; + } +} + +void mcf_uart_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + mcf_uart_state *s = (mcf_uart_state *)opaque; + switch (addr & 0x3f) { + case 0x00: + s->mr[s->current_mr] = val; + s->current_mr = 1; + break; + case 0x04: + /* CSR is ignored. */ + break; + case 0x08: /* Command Register. */ + mcf_do_command(s, val); + break; + case 0x0c: /* Transmit Buffer. */ + s->sr &= ~MCF_UART_TxEMP; + s->tb = val; + mcf_uart_do_tx(s); + break; + case 0x10: + /* ACR is ignored. */ + break; + case 0x14: + s->imr = val; + break; + default: + break; + } + mcf_uart_update(s); +} + +static void mcf_uart_reset(DeviceState *dev) +{ + mcf_uart_state *s = MCF_UART(dev); + + s->fifo_len = 0; + s->mr[0] = 0; + s->mr[1] = 0; + s->sr = MCF_UART_TxEMP; + s->tx_enabled = 0; + s->rx_enabled = 0; + s->isr = 0; + s->imr = 0; +} + +static void mcf_uart_push_byte(mcf_uart_state *s, uint8_t data) +{ + /* Break events overwrite the last byte if the fifo is full. */ + if (s->fifo_len == 4) + s->fifo_len--; + + s->fifo[s->fifo_len] = data; + s->fifo_len++; + s->sr |= MCF_UART_RxRDY; + if (s->fifo_len == 4) + s->sr |= MCF_UART_FFULL; + + mcf_uart_update(s); +} + +static void mcf_uart_event(void *opaque, QEMUChrEvent event) +{ + mcf_uart_state *s = (mcf_uart_state *)opaque; + + switch (event) { + case CHR_EVENT_BREAK: + s->isr |= MCF_UART_DBINT; + mcf_uart_push_byte(s, 0); + break; + default: + break; + } +} + +static int mcf_uart_can_receive(void *opaque) +{ + mcf_uart_state *s = (mcf_uart_state *)opaque; + + return s->rx_enabled && (s->sr & MCF_UART_FFULL) == 0; +} + +static void mcf_uart_receive(void *opaque, const uint8_t *buf, int size) +{ + mcf_uart_state *s = (mcf_uart_state *)opaque; + + mcf_uart_push_byte(s, buf[0]); +} + +static const MemoryRegionOps mcf_uart_ops = { + .read = mcf_uart_read, + .write = mcf_uart_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mcf_uart_instance_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + mcf_uart_state *s = MCF_UART(dev); + + memory_region_init_io(&s->iomem, obj, &mcf_uart_ops, s, "uart", 0x40); + sysbus_init_mmio(dev, &s->iomem); + + sysbus_init_irq(dev, &s->irq); +} + +static void mcf_uart_realize(DeviceState *dev, Error **errp) +{ + mcf_uart_state *s = MCF_UART(dev); + + qemu_chr_fe_set_handlers(&s->chr, mcf_uart_can_receive, mcf_uart_receive, + mcf_uart_event, NULL, s, NULL, true); +} + +static Property mcf_uart_properties[] = { + DEFINE_PROP_CHR("chardev", mcf_uart_state, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mcf_uart_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = mcf_uart_realize; + dc->reset = mcf_uart_reset; + device_class_set_props(dc, mcf_uart_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo mcf_uart_info = { + .name = TYPE_MCF_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mcf_uart_state), + .instance_init = mcf_uart_instance_init, + .class_init = mcf_uart_class_init, +}; + +static void mcf_uart_register(void) +{ + type_register_static(&mcf_uart_info); +} + +type_init(mcf_uart_register) + +void *mcf_uart_init(qemu_irq irq, Chardev *chrdrv) +{ + DeviceState *dev; + + dev = qdev_new(TYPE_MCF_UART); + if (chrdrv) { + qdev_prop_set_chr(dev, "chardev", chrdrv); + } + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); + + return dev; +} + +void mcf_uart_mm_init(hwaddr base, qemu_irq irq, Chardev *chrdrv) +{ + DeviceState *dev; + + dev = mcf_uart_init(irq, chrdrv); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); +} diff --git a/hw/char/mchp_pfsoc_mmuart.c b/hw/char/mchp_pfsoc_mmuart.c new file mode 100644 index 000000000..22f3e78eb --- /dev/null +++ b/hw/char/mchp_pfsoc_mmuart.c @@ -0,0 +1,163 @@ +/* + * Microchip PolarFire SoC MMUART emulation + * + * Copyright (c) 2020 Wind River Systems, Inc. + * + * Author: + * Bin Meng + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/char/mchp_pfsoc_mmuart.h" +#include "hw/qdev-properties.h" + +#define REGS_OFFSET 0x20 + +static uint64_t mchp_pfsoc_mmuart_read(void *opaque, hwaddr addr, unsigned size) +{ + MchpPfSoCMMUartState *s = opaque; + + addr >>= 2; + if (addr >= MCHP_PFSOC_MMUART_REG_COUNT) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: read: addr=0x%" HWADDR_PRIx "\n", + __func__, addr << 2); + return 0; + } + + return s->reg[addr]; +} + +static void mchp_pfsoc_mmuart_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + MchpPfSoCMMUartState *s = opaque; + uint32_t val32 = (uint32_t)value; + + addr >>= 2; + if (addr >= MCHP_PFSOC_MMUART_REG_COUNT) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%" HWADDR_PRIx + " v=0x%x\n", __func__, addr << 2, val32); + return; + } + + s->reg[addr] = val32; +} + +static const MemoryRegionOps mchp_pfsoc_mmuart_ops = { + .read = mchp_pfsoc_mmuart_read, + .write = mchp_pfsoc_mmuart_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void mchp_pfsoc_mmuart_reset(DeviceState *dev) +{ + MchpPfSoCMMUartState *s = MCHP_PFSOC_UART(dev); + + memset(s->reg, 0, sizeof(s->reg)); + device_cold_reset(DEVICE(&s->serial_mm)); +} + +static void mchp_pfsoc_mmuart_init(Object *obj) +{ + MchpPfSoCMMUartState *s = MCHP_PFSOC_UART(obj); + + object_initialize_child(obj, "serial-mm", &s->serial_mm, TYPE_SERIAL_MM); + object_property_add_alias(obj, "chardev", OBJECT(&s->serial_mm), "chardev"); +} + +static void mchp_pfsoc_mmuart_realize(DeviceState *dev, Error **errp) +{ + MchpPfSoCMMUartState *s = MCHP_PFSOC_UART(dev); + + qdev_prop_set_uint8(DEVICE(&s->serial_mm), "regshift", 2); + qdev_prop_set_uint32(DEVICE(&s->serial_mm), "baudbase", 399193); + qdev_prop_set_uint8(DEVICE(&s->serial_mm), "endianness", + DEVICE_LITTLE_ENDIAN); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->serial_mm), errp)) { + return; + } + + sysbus_pass_irq(SYS_BUS_DEVICE(dev), SYS_BUS_DEVICE(&s->serial_mm)); + + memory_region_init(&s->container, OBJECT(s), "mchp.pfsoc.mmuart", 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container); + + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->serial_mm), 0)); + + memory_region_init_io(&s->iomem, OBJECT(s), &mchp_pfsoc_mmuart_ops, s, + "mchp.pfsoc.mmuart.regs", 0x1000 - REGS_OFFSET); + memory_region_add_subregion(&s->container, REGS_OFFSET, &s->iomem); +} + +static const VMStateDescription mchp_pfsoc_mmuart_vmstate = { + .name = "mchp.pfsoc.uart", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, MchpPfSoCMMUartState, + MCHP_PFSOC_MMUART_REG_COUNT), + VMSTATE_END_OF_LIST() + } +}; + +static void mchp_pfsoc_mmuart_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = mchp_pfsoc_mmuart_realize; + dc->reset = mchp_pfsoc_mmuart_reset; + dc->vmsd = &mchp_pfsoc_mmuart_vmstate; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo mchp_pfsoc_mmuart_info = { + .name = TYPE_MCHP_PFSOC_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MchpPfSoCMMUartState), + .instance_init = mchp_pfsoc_mmuart_init, + .class_init = mchp_pfsoc_mmuart_class_init, +}; + +static void mchp_pfsoc_mmuart_register_types(void) +{ + type_register_static(&mchp_pfsoc_mmuart_info); +} + +type_init(mchp_pfsoc_mmuart_register_types) + +MchpPfSoCMMUartState *mchp_pfsoc_mmuart_create(MemoryRegion *sysmem, + hwaddr base, + qemu_irq irq, Chardev *chr) +{ + DeviceState *dev = qdev_new(TYPE_MCHP_PFSOC_UART); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + qdev_prop_set_chr(dev, "chardev", chr); + sysbus_realize(sbd, &error_fatal); + + memory_region_add_subregion(sysmem, base, sysbus_mmio_get_region(sbd, 0)); + sysbus_connect_irq(sbd, 0, irq); + + return MCHP_PFSOC_UART(dev); +} diff --git a/hw/char/meson.build b/hw/char/meson.build new file mode 100644 index 000000000..7b594f51b --- /dev/null +++ b/hw/char/meson.build @@ -0,0 +1,41 @@ +softmmu_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_uart.c')) +softmmu_ss.add(when: 'CONFIG_CMSDK_APB_UART', if_true: files('cmsdk-apb-uart.c')) +softmmu_ss.add(when: 'CONFIG_ESCC', if_true: files('escc.c')) +softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_ser.c')) +softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_apbuart.c')) +softmmu_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_uart.c')) +softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_serial.c')) +softmmu_ss.add(when: 'CONFIG_IPACK', if_true: files('ipoctal232.c')) +softmmu_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('parallel-isa.c')) +softmmu_ss.add(when: 'CONFIG_ISA_DEBUG', if_true: files('debugcon.c')) +softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_uart.c')) +softmmu_ss.add(when: 'CONFIG_PARALLEL', if_true: files('parallel.c')) +softmmu_ss.add(when: 'CONFIG_PL011', if_true: files('pl011.c')) +softmmu_ss.add(when: 'CONFIG_SCLPCONSOLE', if_true: files('sclpconsole.c', 'sclpconsole-lm.c')) +softmmu_ss.add(when: 'CONFIG_SERIAL', if_true: files('serial.c')) +softmmu_ss.add(when: 'CONFIG_SERIAL_ISA', if_true: files('serial-isa.c')) +softmmu_ss.add(when: 'CONFIG_SERIAL_PCI', if_true: files('serial-pci.c')) +softmmu_ss.add(when: 'CONFIG_SERIAL_PCI_MULTI', if_true: files('serial-pci-multi.c')) +softmmu_ss.add(when: 'CONFIG_SHAKTI_UART', if_true: files('shakti_uart.c')) +softmmu_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-console.c')) +softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen_console.c')) +softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_uartlite.c')) + +softmmu_ss.add(when: 'CONFIG_AVR_USART', if_true: files('avr_usart.c')) +softmmu_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_uart.c')) +softmmu_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-uart.c')) +softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_uart.c')) +softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_uart.c')) +softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_aux.c')) +softmmu_ss.add(when: 'CONFIG_RENESAS_SCI', if_true: files('renesas_sci.c')) +softmmu_ss.add(when: 'CONFIG_SIFIVE_UART', if_true: files('sifive_uart.c')) +softmmu_ss.add(when: 'CONFIG_SH_SCI', if_true: files('sh_serial.c')) +softmmu_ss.add(when: 'CONFIG_STM32F2XX_USART', if_true: files('stm32f2xx_usart.c')) +softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_MMUART', if_true: files('mchp_pfsoc_mmuart.c')) + +specific_ss.add(when: 'CONFIG_HTIF', if_true: files('riscv_htif.c')) +specific_ss.add(when: 'CONFIG_TERMINAL3270', if_true: files('terminal3270.c')) +specific_ss.add(when: 'CONFIG_VIRTIO', if_true: files('virtio-serial-bus.c')) +specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_vty.c')) + +specific_ss.add(when: 'CONFIG_GOLDFISH_TTY', if_true: files('goldfish_tty.c')) diff --git a/hw/char/nrf51_uart.c b/hw/char/nrf51_uart.c new file mode 100644 index 000000000..3c6f982de --- /dev/null +++ b/hw/char/nrf51_uart.c @@ -0,0 +1,335 @@ +/* + * nRF51 SoC UART emulation + * + * See nRF51 Series Reference Manual, "29 Universal Asynchronous + * Receiver/Transmitter" for hardware specifications: + * http://infocenter.nordicsemi.com/pdf/nRF51_RM_v3.0.pdf + * + * Copyright (c) 2018 Julia Suvorova + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/char/nrf51_uart.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "trace.h" + +static void nrf51_uart_update_irq(NRF51UARTState *s) +{ + bool irq = false; + + irq |= (s->reg[R_UART_RXDRDY] && + (s->reg[R_UART_INTEN] & R_UART_INTEN_RXDRDY_MASK)); + irq |= (s->reg[R_UART_TXDRDY] && + (s->reg[R_UART_INTEN] & R_UART_INTEN_TXDRDY_MASK)); + irq |= (s->reg[R_UART_ERROR] && + (s->reg[R_UART_INTEN] & R_UART_INTEN_ERROR_MASK)); + irq |= (s->reg[R_UART_RXTO] && + (s->reg[R_UART_INTEN] & R_UART_INTEN_RXTO_MASK)); + + qemu_set_irq(s->irq, irq); +} + +static uint64_t uart_read(void *opaque, hwaddr addr, unsigned int size) +{ + NRF51UARTState *s = NRF51_UART(opaque); + uint64_t r; + + if (!s->enabled) { + return 0; + } + + switch (addr) { + case A_UART_RXD: + r = s->rx_fifo[s->rx_fifo_pos]; + if (s->rx_started && s->rx_fifo_len) { + s->rx_fifo_pos = (s->rx_fifo_pos + 1) % UART_FIFO_LENGTH; + s->rx_fifo_len--; + if (s->rx_fifo_len) { + s->reg[R_UART_RXDRDY] = 1; + nrf51_uart_update_irq(s); + } + qemu_chr_fe_accept_input(&s->chr); + } + break; + case A_UART_INTENSET: + case A_UART_INTENCLR: + case A_UART_INTEN: + r = s->reg[R_UART_INTEN]; + break; + default: + r = s->reg[addr / 4]; + break; + } + + trace_nrf51_uart_read(addr, r, size); + + return r; +} + +static gboolean uart_transmit(void *do_not_use, GIOCondition cond, void *opaque) +{ + NRF51UARTState *s = NRF51_UART(opaque); + int r; + uint8_t c = s->reg[R_UART_TXD]; + + s->watch_tag = 0; + + r = qemu_chr_fe_write(&s->chr, &c, 1); + if (r <= 0) { + s->watch_tag = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP, + uart_transmit, s); + if (!s->watch_tag) { + /* The hardware has no transmit error reporting, + * so silently drop the byte + */ + goto buffer_drained; + } + return FALSE; + } + +buffer_drained: + s->reg[R_UART_TXDRDY] = 1; + s->pending_tx_byte = false; + return FALSE; +} + +static void uart_cancel_transmit(NRF51UARTState *s) +{ + if (s->watch_tag) { + g_source_remove(s->watch_tag); + s->watch_tag = 0; + } +} + +static void uart_write(void *opaque, hwaddr addr, + uint64_t value, unsigned int size) +{ + NRF51UARTState *s = NRF51_UART(opaque); + + trace_nrf51_uart_write(addr, value, size); + + if (!s->enabled && (addr != A_UART_ENABLE)) { + return; + } + + switch (addr) { + case A_UART_TXD: + if (!s->pending_tx_byte && s->tx_started) { + s->reg[R_UART_TXD] = value; + s->pending_tx_byte = true; + uart_transmit(NULL, G_IO_OUT, s); + } + break; + case A_UART_INTEN: + s->reg[R_UART_INTEN] = value; + break; + case A_UART_INTENSET: + s->reg[R_UART_INTEN] |= value; + break; + case A_UART_INTENCLR: + s->reg[R_UART_INTEN] &= ~value; + break; + case A_UART_TXDRDY ... A_UART_RXTO: + s->reg[addr / 4] = value; + break; + case A_UART_ERRORSRC: + s->reg[addr / 4] &= ~value; + break; + case A_UART_RXD: + break; + case A_UART_RXDRDY: + if (value == 0) { + s->reg[R_UART_RXDRDY] = 0; + } + break; + case A_UART_STARTTX: + if (value == 1) { + s->tx_started = true; + } + break; + case A_UART_STARTRX: + if (value == 1) { + s->rx_started = true; + } + break; + case A_UART_ENABLE: + if (value) { + if (value == 4) { + s->enabled = true; + } + break; + } + s->enabled = false; + value = 1; + /* fall through */ + case A_UART_SUSPEND: + case A_UART_STOPTX: + if (value == 1) { + s->tx_started = false; + } + /* fall through */ + case A_UART_STOPRX: + if (addr != A_UART_STOPTX && value == 1) { + s->rx_started = false; + s->reg[R_UART_RXTO] = 1; + } + break; + default: + s->reg[addr / 4] = value; + break; + } + nrf51_uart_update_irq(s); +} + +static const MemoryRegionOps uart_ops = { + .read = uart_read, + .write = uart_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void nrf51_uart_reset(DeviceState *dev) +{ + NRF51UARTState *s = NRF51_UART(dev); + + s->pending_tx_byte = 0; + + uart_cancel_transmit(s); + + memset(s->reg, 0, sizeof(s->reg)); + + s->reg[R_UART_PSELRTS] = 0xFFFFFFFF; + s->reg[R_UART_PSELTXD] = 0xFFFFFFFF; + s->reg[R_UART_PSELCTS] = 0xFFFFFFFF; + s->reg[R_UART_PSELRXD] = 0xFFFFFFFF; + s->reg[R_UART_BAUDRATE] = 0x4000000; + + s->rx_fifo_len = 0; + s->rx_fifo_pos = 0; + s->rx_started = false; + s->tx_started = false; + s->enabled = false; +} + +static void uart_receive(void *opaque, const uint8_t *buf, int size) +{ + + NRF51UARTState *s = NRF51_UART(opaque); + int i; + + if (size == 0 || s->rx_fifo_len >= UART_FIFO_LENGTH) { + return; + } + + for (i = 0; i < size; i++) { + uint32_t pos = (s->rx_fifo_pos + s->rx_fifo_len) % UART_FIFO_LENGTH; + s->rx_fifo[pos] = buf[i]; + s->rx_fifo_len++; + } + + s->reg[R_UART_RXDRDY] = 1; + nrf51_uart_update_irq(s); +} + +static int uart_can_receive(void *opaque) +{ + NRF51UARTState *s = NRF51_UART(opaque); + + return s->rx_started ? (UART_FIFO_LENGTH - s->rx_fifo_len) : 0; +} + +static void uart_event(void *opaque, QEMUChrEvent event) +{ + NRF51UARTState *s = NRF51_UART(opaque); + + if (event == CHR_EVENT_BREAK) { + s->reg[R_UART_ERRORSRC] |= 3; + s->reg[R_UART_ERROR] = 1; + nrf51_uart_update_irq(s); + } +} + +static void nrf51_uart_realize(DeviceState *dev, Error **errp) +{ + NRF51UARTState *s = NRF51_UART(dev); + + qemu_chr_fe_set_handlers(&s->chr, uart_can_receive, uart_receive, + uart_event, NULL, s, NULL, true); +} + +static void nrf51_uart_init(Object *obj) +{ + NRF51UARTState *s = NRF51_UART(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &uart_ops, s, + "nrf51_soc.uart", UART_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); +} + +static int nrf51_uart_post_load(void *opaque, int version_id) +{ + NRF51UARTState *s = NRF51_UART(opaque); + + if (s->pending_tx_byte) { + s->watch_tag = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP, + uart_transmit, s); + } + + return 0; +} + +static const VMStateDescription nrf51_uart_vmstate = { + .name = "nrf51_soc.uart", + .post_load = nrf51_uart_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, NRF51UARTState, 0x56C), + VMSTATE_UINT8_ARRAY(rx_fifo, NRF51UARTState, UART_FIFO_LENGTH), + VMSTATE_UINT32(rx_fifo_pos, NRF51UARTState), + VMSTATE_UINT32(rx_fifo_len, NRF51UARTState), + VMSTATE_BOOL(rx_started, NRF51UARTState), + VMSTATE_BOOL(tx_started, NRF51UARTState), + VMSTATE_BOOL(pending_tx_byte, NRF51UARTState), + VMSTATE_BOOL(enabled, NRF51UARTState), + VMSTATE_END_OF_LIST() + } +}; + +static Property nrf51_uart_properties[] = { + DEFINE_PROP_CHR("chardev", NRF51UARTState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void nrf51_uart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = nrf51_uart_reset; + dc->realize = nrf51_uart_realize; + device_class_set_props(dc, nrf51_uart_properties); + dc->vmsd = &nrf51_uart_vmstate; +} + +static const TypeInfo nrf51_uart_info = { + .name = TYPE_NRF51_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NRF51UARTState), + .instance_init = nrf51_uart_init, + .class_init = nrf51_uart_class_init +}; + +static void nrf51_uart_register_types(void) +{ + type_register_static(&nrf51_uart_info); +} + +type_init(nrf51_uart_register_types) diff --git a/hw/char/omap_uart.c b/hw/char/omap_uart.c new file mode 100644 index 000000000..e8da93337 --- /dev/null +++ b/hw/char/omap_uart.c @@ -0,0 +1,187 @@ +/* + * TI OMAP processors UART emulation. + * + * Copyright (C) 2006-2008 Andrzej Zaborowski + * Copyright (C) 2007-2009 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#include "qemu/osdep.h" +#include "chardev/char.h" +#include "hw/arm/omap.h" +#include "hw/char/serial.h" +#include "exec/address-spaces.h" + +/* UARTs */ +struct omap_uart_s { + MemoryRegion iomem; + hwaddr base; + SerialMM *serial; /* TODO */ + struct omap_target_agent_s *ta; + omap_clk fclk; + qemu_irq irq; + + uint8_t eblr; + uint8_t syscontrol; + uint8_t wkup; + uint8_t cfps; + uint8_t mdr[2]; + uint8_t scr; + uint8_t clksel; +}; + +void omap_uart_reset(struct omap_uart_s *s) +{ + s->eblr = 0x00; + s->syscontrol = 0; + s->wkup = 0x3f; + s->cfps = 0x69; + s->clksel = 0; +} + +struct omap_uart_s *omap_uart_init(hwaddr base, + qemu_irq irq, omap_clk fclk, omap_clk iclk, + qemu_irq txdma, qemu_irq rxdma, + const char *label, Chardev *chr) +{ + struct omap_uart_s *s = g_new0(struct omap_uart_s, 1); + + s->base = base; + s->fclk = fclk; + s->irq = irq; + s->serial = serial_mm_init(get_system_memory(), base, 2, irq, + omap_clk_getrate(fclk)/16, + chr ?: qemu_chr_new(label, "null", NULL), + DEVICE_NATIVE_ENDIAN); + return s; +} + +static uint64_t omap_uart_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_uart_s *s = (struct omap_uart_s *) opaque; + + if (size == 4) { + return omap_badwidth_read8(opaque, addr); + } + + switch (addr) { + case 0x20: /* MDR1 */ + return s->mdr[0]; + case 0x24: /* MDR2 */ + return s->mdr[1]; + case 0x40: /* SCR */ + return s->scr; + case 0x44: /* SSR */ + return 0x0; + case 0x48: /* EBLR (OMAP2) */ + return s->eblr; + case 0x4C: /* OSC_12M_SEL (OMAP1) */ + return s->clksel; + case 0x50: /* MVR */ + return 0x30; + case 0x54: /* SYSC (OMAP2) */ + return s->syscontrol; + case 0x58: /* SYSS (OMAP2) */ + return 1; + case 0x5c: /* WER (OMAP2) */ + return s->wkup; + case 0x60: /* CFPS (OMAP2) */ + return s->cfps; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_uart_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_uart_s *s = (struct omap_uart_s *) opaque; + + if (size == 4) { + omap_badwidth_write8(opaque, addr, value); + return; + } + + switch (addr) { + case 0x20: /* MDR1 */ + s->mdr[0] = value & 0x7f; + break; + case 0x24: /* MDR2 */ + s->mdr[1] = value & 0xff; + break; + case 0x40: /* SCR */ + s->scr = value & 0xff; + break; + case 0x48: /* EBLR (OMAP2) */ + s->eblr = value & 0xff; + break; + case 0x4C: /* OSC_12M_SEL (OMAP1) */ + s->clksel = value & 1; + break; + case 0x44: /* SSR */ + case 0x50: /* MVR */ + case 0x58: /* SYSS (OMAP2) */ + OMAP_RO_REG(addr); + break; + case 0x54: /* SYSC (OMAP2) */ + s->syscontrol = value & 0x1d; + if (value & 2) + omap_uart_reset(s); + break; + case 0x5c: /* WER (OMAP2) */ + s->wkup = value & 0x7f; + break; + case 0x60: /* CFPS (OMAP2) */ + s->cfps = value & 0xff; + break; + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_uart_ops = { + .read = omap_uart_read, + .write = omap_uart_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct omap_uart_s *omap2_uart_init(MemoryRegion *sysmem, + struct omap_target_agent_s *ta, + qemu_irq irq, omap_clk fclk, omap_clk iclk, + qemu_irq txdma, qemu_irq rxdma, + const char *label, Chardev *chr) +{ + hwaddr base = omap_l4_attach(ta, 0, NULL); + struct omap_uart_s *s = omap_uart_init(base, irq, + fclk, iclk, txdma, rxdma, label, chr); + + memory_region_init_io(&s->iomem, NULL, &omap_uart_ops, s, "omap.uart", 0x100); + + s->ta = ta; + + memory_region_add_subregion(sysmem, base + 0x20, &s->iomem); + + return s; +} + +void omap_uart_attach(struct omap_uart_s *s, Chardev *chr) +{ + /* TODO: Should reuse or destroy current s->serial */ + s->serial = serial_mm_init(get_system_memory(), s->base, 2, s->irq, + omap_clk_getrate(s->fclk) / 16, + chr ?: qemu_chr_new("null", "null", NULL), + DEVICE_NATIVE_ENDIAN); +} diff --git a/hw/char/parallel-isa.c b/hw/char/parallel-isa.c new file mode 100644 index 000000000..1ccbb96e7 --- /dev/null +++ b/hw/char/parallel-isa.c @@ -0,0 +1,42 @@ +/* + * QEMU Parallel PORT (ISA bus helpers) + * + * These functions reside in a separate file since they also might be + * required for linking when compiling QEMU without CONFIG_PARALLEL. + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: MIT + */ + +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "hw/char/parallel.h" +#include "qapi/error.h" + +static void parallel_init(ISABus *bus, int index, Chardev *chr) +{ + DeviceState *dev; + ISADevice *isadev; + + isadev = isa_new("isa-parallel"); + dev = DEVICE(isadev); + qdev_prop_set_uint32(dev, "index", index); + qdev_prop_set_chr(dev, "chardev", chr); + isa_realize_and_unref(isadev, bus, &error_fatal); +} + +void parallel_hds_isa_init(ISABus *bus, int n) +{ + int i; + + assert(n <= MAX_PARALLEL_PORTS); + + for (i = 0; i < n; i++) { + if (parallel_hds[i]) { + parallel_init(bus, i, parallel_hds[i]); + } + } +} diff --git a/hw/char/parallel.c b/hw/char/parallel.c new file mode 100644 index 000000000..b45e67bfb --- /dev/null +++ b/hw/char/parallel.c @@ -0,0 +1,669 @@ +/* + * QEMU Parallel PORT emulation + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2007 Marko Kohtala + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "chardev/char-parallel.h" +#include "chardev/char-fe.h" +#include "hw/acpi/aml-build.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "hw/char/parallel.h" +#include "sysemu/reset.h" +#include "sysemu/sysemu.h" +#include "trace.h" +#include "qom/object.h" + +//#define DEBUG_PARALLEL + +#ifdef DEBUG_PARALLEL +#define pdebug(fmt, ...) printf("pp: " fmt, ## __VA_ARGS__) +#else +#define pdebug(fmt, ...) ((void)0) +#endif + +#define PARA_REG_DATA 0 +#define PARA_REG_STS 1 +#define PARA_REG_CTR 2 +#define PARA_REG_EPP_ADDR 3 +#define PARA_REG_EPP_DATA 4 + +/* + * These are the definitions for the Printer Status Register + */ +#define PARA_STS_BUSY 0x80 /* Busy complement */ +#define PARA_STS_ACK 0x40 /* Acknowledge */ +#define PARA_STS_PAPER 0x20 /* Out of paper */ +#define PARA_STS_ONLINE 0x10 /* Online */ +#define PARA_STS_ERROR 0x08 /* Error complement */ +#define PARA_STS_TMOUT 0x01 /* EPP timeout */ + +/* + * These are the definitions for the Printer Control Register + */ +#define PARA_CTR_DIR 0x20 /* Direction (1=read, 0=write) */ +#define PARA_CTR_INTEN 0x10 /* IRQ Enable */ +#define PARA_CTR_SELECT 0x08 /* Select In complement */ +#define PARA_CTR_INIT 0x04 /* Initialize Printer complement */ +#define PARA_CTR_AUTOLF 0x02 /* Auto linefeed complement */ +#define PARA_CTR_STROBE 0x01 /* Strobe complement */ + +#define PARA_CTR_SIGNAL (PARA_CTR_SELECT|PARA_CTR_INIT|PARA_CTR_AUTOLF|PARA_CTR_STROBE) + +typedef struct ParallelState { + MemoryRegion iomem; + uint8_t dataw; + uint8_t datar; + uint8_t status; + uint8_t control; + qemu_irq irq; + int irq_pending; + CharBackend chr; + int hw_driver; + int epp_timeout; + uint32_t last_read_offset; /* For debugging */ + /* Memory-mapped interface */ + int it_shift; + PortioList portio_list; +} ParallelState; + +#define TYPE_ISA_PARALLEL "isa-parallel" +OBJECT_DECLARE_SIMPLE_TYPE(ISAParallelState, ISA_PARALLEL) + +struct ISAParallelState { + ISADevice parent_obj; + + uint32_t index; + uint32_t iobase; + uint32_t isairq; + ParallelState state; +}; + +static void parallel_update_irq(ParallelState *s) +{ + if (s->irq_pending) + qemu_irq_raise(s->irq); + else + qemu_irq_lower(s->irq); +} + +static void +parallel_ioport_write_sw(void *opaque, uint32_t addr, uint32_t val) +{ + ParallelState *s = opaque; + + addr &= 7; + trace_parallel_ioport_write("SW", addr, val); + switch(addr) { + case PARA_REG_DATA: + s->dataw = val; + parallel_update_irq(s); + break; + case PARA_REG_CTR: + val |= 0xc0; + if ((val & PARA_CTR_INIT) == 0 ) { + s->status = PARA_STS_BUSY; + s->status |= PARA_STS_ACK; + s->status |= PARA_STS_ONLINE; + s->status |= PARA_STS_ERROR; + } + else if (val & PARA_CTR_SELECT) { + if (val & PARA_CTR_STROBE) { + s->status &= ~PARA_STS_BUSY; + if ((s->control & PARA_CTR_STROBE) == 0) + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &s->dataw, 1); + } else { + if (s->control & PARA_CTR_INTEN) { + s->irq_pending = 1; + } + } + } + parallel_update_irq(s); + s->control = val; + break; + } +} + +static void parallel_ioport_write_hw(void *opaque, uint32_t addr, uint32_t val) +{ + ParallelState *s = opaque; + uint8_t parm = val; + int dir; + + /* Sometimes programs do several writes for timing purposes on old + HW. Take care not to waste time on writes that do nothing. */ + + s->last_read_offset = ~0U; + + addr &= 7; + trace_parallel_ioport_write("HW", addr, val); + switch(addr) { + case PARA_REG_DATA: + if (s->dataw == val) + return; + pdebug("wd%02x\n", val); + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_WRITE_DATA, &parm); + s->dataw = val; + break; + case PARA_REG_STS: + pdebug("ws%02x\n", val); + if (val & PARA_STS_TMOUT) + s->epp_timeout = 0; + break; + case PARA_REG_CTR: + val |= 0xc0; + if (s->control == val) + return; + pdebug("wc%02x\n", val); + + if ((val & PARA_CTR_DIR) != (s->control & PARA_CTR_DIR)) { + if (val & PARA_CTR_DIR) { + dir = 1; + } else { + dir = 0; + } + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_DATA_DIR, &dir); + parm &= ~PARA_CTR_DIR; + } + + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_WRITE_CONTROL, &parm); + s->control = val; + break; + case PARA_REG_EPP_ADDR: + if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT) + /* Controls not correct for EPP address cycle, so do nothing */ + pdebug("wa%02x s\n", val); + else { + struct ParallelIOArg ioarg = { .buffer = &parm, .count = 1 }; + if (qemu_chr_fe_ioctl(&s->chr, + CHR_IOCTL_PP_EPP_WRITE_ADDR, &ioarg)) { + s->epp_timeout = 1; + pdebug("wa%02x t\n", val); + } + else + pdebug("wa%02x\n", val); + } + break; + case PARA_REG_EPP_DATA: + if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT) + /* Controls not correct for EPP data cycle, so do nothing */ + pdebug("we%02x s\n", val); + else { + struct ParallelIOArg ioarg = { .buffer = &parm, .count = 1 }; + if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg)) { + s->epp_timeout = 1; + pdebug("we%02x t\n", val); + } + else + pdebug("we%02x\n", val); + } + break; + } +} + +static void +parallel_ioport_eppdata_write_hw2(void *opaque, uint32_t addr, uint32_t val) +{ + ParallelState *s = opaque; + uint16_t eppdata = cpu_to_le16(val); + int err; + struct ParallelIOArg ioarg = { + .buffer = &eppdata, .count = sizeof(eppdata) + }; + + trace_parallel_ioport_write("EPP", addr, val); + if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT) { + /* Controls not correct for EPP data cycle, so do nothing */ + pdebug("we%04x s\n", val); + return; + } + err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg); + if (err) { + s->epp_timeout = 1; + pdebug("we%04x t\n", val); + } + else + pdebug("we%04x\n", val); +} + +static void +parallel_ioport_eppdata_write_hw4(void *opaque, uint32_t addr, uint32_t val) +{ + ParallelState *s = opaque; + uint32_t eppdata = cpu_to_le32(val); + int err; + struct ParallelIOArg ioarg = { + .buffer = &eppdata, .count = sizeof(eppdata) + }; + + trace_parallel_ioport_write("EPP", addr, val); + if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT) { + /* Controls not correct for EPP data cycle, so do nothing */ + pdebug("we%08x s\n", val); + return; + } + err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg); + if (err) { + s->epp_timeout = 1; + pdebug("we%08x t\n", val); + } + else + pdebug("we%08x\n", val); +} + +static uint32_t parallel_ioport_read_sw(void *opaque, uint32_t addr) +{ + ParallelState *s = opaque; + uint32_t ret = 0xff; + + addr &= 7; + switch(addr) { + case PARA_REG_DATA: + if (s->control & PARA_CTR_DIR) + ret = s->datar; + else + ret = s->dataw; + break; + case PARA_REG_STS: + ret = s->status; + s->irq_pending = 0; + if ((s->status & PARA_STS_BUSY) == 0 && (s->control & PARA_CTR_STROBE) == 0) { + /* XXX Fixme: wait 5 microseconds */ + if (s->status & PARA_STS_ACK) + s->status &= ~PARA_STS_ACK; + else { + /* XXX Fixme: wait 5 microseconds */ + s->status |= PARA_STS_ACK; + s->status |= PARA_STS_BUSY; + } + } + parallel_update_irq(s); + break; + case PARA_REG_CTR: + ret = s->control; + break; + } + trace_parallel_ioport_read("SW", addr, ret); + return ret; +} + +static uint32_t parallel_ioport_read_hw(void *opaque, uint32_t addr) +{ + ParallelState *s = opaque; + uint8_t ret = 0xff; + addr &= 7; + switch(addr) { + case PARA_REG_DATA: + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_DATA, &ret); + if (s->last_read_offset != addr || s->datar != ret) + pdebug("rd%02x\n", ret); + s->datar = ret; + break; + case PARA_REG_STS: + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_STATUS, &ret); + ret &= ~PARA_STS_TMOUT; + if (s->epp_timeout) + ret |= PARA_STS_TMOUT; + if (s->last_read_offset != addr || s->status != ret) + pdebug("rs%02x\n", ret); + s->status = ret; + break; + case PARA_REG_CTR: + /* s->control has some bits fixed to 1. It is zero only when + it has not been yet written to. */ + if (s->control == 0) { + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_CONTROL, &ret); + if (s->last_read_offset != addr) + pdebug("rc%02x\n", ret); + s->control = ret; + } + else { + ret = s->control; + if (s->last_read_offset != addr) + pdebug("rc%02x\n", ret); + } + break; + case PARA_REG_EPP_ADDR: + if ((s->control & (PARA_CTR_DIR | PARA_CTR_SIGNAL)) != + (PARA_CTR_DIR | PARA_CTR_INIT)) + /* Controls not correct for EPP addr cycle, so do nothing */ + pdebug("ra%02x s\n", ret); + else { + struct ParallelIOArg ioarg = { .buffer = &ret, .count = 1 }; + if (qemu_chr_fe_ioctl(&s->chr, + CHR_IOCTL_PP_EPP_READ_ADDR, &ioarg)) { + s->epp_timeout = 1; + pdebug("ra%02x t\n", ret); + } + else + pdebug("ra%02x\n", ret); + } + break; + case PARA_REG_EPP_DATA: + if ((s->control & (PARA_CTR_DIR | PARA_CTR_SIGNAL)) != + (PARA_CTR_DIR | PARA_CTR_INIT)) + /* Controls not correct for EPP data cycle, so do nothing */ + pdebug("re%02x s\n", ret); + else { + struct ParallelIOArg ioarg = { .buffer = &ret, .count = 1 }; + if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg)) { + s->epp_timeout = 1; + pdebug("re%02x t\n", ret); + } + else + pdebug("re%02x\n", ret); + } + break; + } + trace_parallel_ioport_read("HW", addr, ret); + s->last_read_offset = addr; + return ret; +} + +static uint32_t +parallel_ioport_eppdata_read_hw2(void *opaque, uint32_t addr) +{ + ParallelState *s = opaque; + uint32_t ret; + uint16_t eppdata = ~0; + int err; + struct ParallelIOArg ioarg = { + .buffer = &eppdata, .count = sizeof(eppdata) + }; + if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT)) { + /* Controls not correct for EPP data cycle, so do nothing */ + pdebug("re%04x s\n", eppdata); + return eppdata; + } + err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg); + ret = le16_to_cpu(eppdata); + + if (err) { + s->epp_timeout = 1; + pdebug("re%04x t\n", ret); + } + else + pdebug("re%04x\n", ret); + trace_parallel_ioport_read("EPP", addr, ret); + return ret; +} + +static uint32_t +parallel_ioport_eppdata_read_hw4(void *opaque, uint32_t addr) +{ + ParallelState *s = opaque; + uint32_t ret; + uint32_t eppdata = ~0U; + int err; + struct ParallelIOArg ioarg = { + .buffer = &eppdata, .count = sizeof(eppdata) + }; + if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT)) { + /* Controls not correct for EPP data cycle, so do nothing */ + pdebug("re%08x s\n", eppdata); + return eppdata; + } + err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg); + ret = le32_to_cpu(eppdata); + + if (err) { + s->epp_timeout = 1; + pdebug("re%08x t\n", ret); + } + else + pdebug("re%08x\n", ret); + trace_parallel_ioport_read("EPP", addr, ret); + return ret; +} + +static void parallel_ioport_ecp_write(void *opaque, uint32_t addr, uint32_t val) +{ + trace_parallel_ioport_write("ECP", addr & 7, val); + pdebug("wecp%d=%02x\n", addr & 7, val); +} + +static uint32_t parallel_ioport_ecp_read(void *opaque, uint32_t addr) +{ + uint8_t ret = 0xff; + + trace_parallel_ioport_read("ECP", addr & 7, ret); + pdebug("recp%d:%02x\n", addr & 7, ret); + return ret; +} + +static void parallel_reset(void *opaque) +{ + ParallelState *s = opaque; + + s->datar = ~0; + s->dataw = ~0; + s->status = PARA_STS_BUSY; + s->status |= PARA_STS_ACK; + s->status |= PARA_STS_ONLINE; + s->status |= PARA_STS_ERROR; + s->status |= PARA_STS_TMOUT; + s->control = PARA_CTR_SELECT; + s->control |= PARA_CTR_INIT; + s->control |= 0xc0; + s->irq_pending = 0; + s->hw_driver = 0; + s->epp_timeout = 0; + s->last_read_offset = ~0U; +} + +static const int isa_parallel_io[MAX_PARALLEL_PORTS] = { 0x378, 0x278, 0x3bc }; + +static const MemoryRegionPortio isa_parallel_portio_hw_list[] = { + { 0, 8, 1, + .read = parallel_ioport_read_hw, + .write = parallel_ioport_write_hw }, + { 4, 1, 2, + .read = parallel_ioport_eppdata_read_hw2, + .write = parallel_ioport_eppdata_write_hw2 }, + { 4, 1, 4, + .read = parallel_ioport_eppdata_read_hw4, + .write = parallel_ioport_eppdata_write_hw4 }, + { 0x400, 8, 1, + .read = parallel_ioport_ecp_read, + .write = parallel_ioport_ecp_write }, + PORTIO_END_OF_LIST(), +}; + +static const MemoryRegionPortio isa_parallel_portio_sw_list[] = { + { 0, 8, 1, + .read = parallel_ioport_read_sw, + .write = parallel_ioport_write_sw }, + PORTIO_END_OF_LIST(), +}; + + +static const VMStateDescription vmstate_parallel_isa = { + .name = "parallel_isa", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(state.dataw, ISAParallelState), + VMSTATE_UINT8(state.datar, ISAParallelState), + VMSTATE_UINT8(state.status, ISAParallelState), + VMSTATE_UINT8(state.control, ISAParallelState), + VMSTATE_INT32(state.irq_pending, ISAParallelState), + VMSTATE_INT32(state.epp_timeout, ISAParallelState), + VMSTATE_END_OF_LIST() + } +}; + +static int parallel_can_receive(void *opaque) +{ + return 1; +} + +static void parallel_isa_realizefn(DeviceState *dev, Error **errp) +{ + static int index; + ISADevice *isadev = ISA_DEVICE(dev); + ISAParallelState *isa = ISA_PARALLEL(dev); + ParallelState *s = &isa->state; + int base; + uint8_t dummy; + + if (!qemu_chr_fe_backend_connected(&s->chr)) { + error_setg(errp, "Can't create parallel device, empty char device"); + return; + } + + if (isa->index == -1) { + isa->index = index; + } + if (isa->index >= MAX_PARALLEL_PORTS) { + error_setg(errp, "Max. supported number of parallel ports is %d.", + MAX_PARALLEL_PORTS); + return; + } + if (isa->iobase == -1) { + isa->iobase = isa_parallel_io[isa->index]; + } + index++; + + base = isa->iobase; + isa_init_irq(isadev, &s->irq, isa->isairq); + qemu_register_reset(parallel_reset, s); + + qemu_chr_fe_set_handlers(&s->chr, parallel_can_receive, NULL, + NULL, NULL, s, NULL, true); + if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_STATUS, &dummy) == 0) { + s->hw_driver = 1; + s->status = dummy; + } + + isa_register_portio_list(isadev, &s->portio_list, base, + (s->hw_driver + ? &isa_parallel_portio_hw_list[0] + : &isa_parallel_portio_sw_list[0]), + s, "parallel"); +} + +static void parallel_isa_build_aml(ISADevice *isadev, Aml *scope) +{ + ISAParallelState *isa = ISA_PARALLEL(isadev); + Aml *dev; + Aml *crs; + + crs = aml_resource_template(); + aml_append(crs, aml_io(AML_DECODE16, isa->iobase, isa->iobase, 0x08, 0x08)); + aml_append(crs, aml_irq_no_flags(isa->isairq)); + + dev = aml_device("LPT%d", isa->index + 1); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0400"))); + aml_append(dev, aml_name_decl("_UID", aml_int(isa->index + 1))); + aml_append(dev, aml_name_decl("_STA", aml_int(0xf))); + aml_append(dev, aml_name_decl("_CRS", crs)); + + aml_append(scope, dev); +} + +/* Memory mapped interface */ +static uint64_t parallel_mm_readfn(void *opaque, hwaddr addr, unsigned size) +{ + ParallelState *s = opaque; + + return parallel_ioport_read_sw(s, addr >> s->it_shift) & + MAKE_64BIT_MASK(0, size * 8); +} + +static void parallel_mm_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + ParallelState *s = opaque; + + parallel_ioport_write_sw(s, addr >> s->it_shift, + value & MAKE_64BIT_MASK(0, size * 8)); +} + +static const MemoryRegionOps parallel_mm_ops = { + .read = parallel_mm_readfn, + .write = parallel_mm_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* If fd is zero, it means that the parallel device uses the console */ +bool parallel_mm_init(MemoryRegion *address_space, + hwaddr base, int it_shift, qemu_irq irq, + Chardev *chr) +{ + ParallelState *s; + + s = g_malloc0(sizeof(ParallelState)); + s->irq = irq; + qemu_chr_fe_init(&s->chr, chr, &error_abort); + s->it_shift = it_shift; + qemu_register_reset(parallel_reset, s); + + memory_region_init_io(&s->iomem, NULL, ¶llel_mm_ops, s, + "parallel", 8 << it_shift); + memory_region_add_subregion(address_space, base, &s->iomem); + return true; +} + +static Property parallel_isa_properties[] = { + DEFINE_PROP_UINT32("index", ISAParallelState, index, -1), + DEFINE_PROP_UINT32("iobase", ISAParallelState, iobase, -1), + DEFINE_PROP_UINT32("irq", ISAParallelState, isairq, 7), + DEFINE_PROP_CHR("chardev", ISAParallelState, state.chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void parallel_isa_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ISADeviceClass *isa = ISA_DEVICE_CLASS(klass); + + dc->realize = parallel_isa_realizefn; + dc->vmsd = &vmstate_parallel_isa; + isa->build_aml = parallel_isa_build_aml; + device_class_set_props(dc, parallel_isa_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo parallel_isa_info = { + .name = TYPE_ISA_PARALLEL, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISAParallelState), + .class_init = parallel_isa_class_initfn, +}; + +static void parallel_register_types(void) +{ + type_register_static(¶llel_isa_info); +} + +type_init(parallel_register_types) diff --git a/hw/char/pl011.c b/hw/char/pl011.c new file mode 100644 index 000000000..6e2d7f750 --- /dev/null +++ b/hw/char/pl011.c @@ -0,0 +1,451 @@ +/* + * Arm PrimeCell PL011 UART + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +/* + * QEMU interface: + * + sysbus MMIO region 0: device registers + * + sysbus IRQ 0: UARTINTR (combined interrupt line) + * + sysbus IRQ 1: UARTRXINTR (receive FIFO interrupt line) + * + sysbus IRQ 2: UARTTXINTR (transmit FIFO interrupt line) + * + sysbus IRQ 3: UARTRTINTR (receive timeout interrupt line) + * + sysbus IRQ 4: UARTMSINTR (momem status interrupt line) + * + sysbus IRQ 5: UARTEINTR (error interrupt line) + */ + +#include "qemu/osdep.h" +#include "hw/char/pl011.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "chardev/char-fe.h" +#include "chardev/char-serial.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + +#define PL011_INT_TX 0x20 +#define PL011_INT_RX 0x10 + +#define PL011_FLAG_TXFE 0x80 +#define PL011_FLAG_RXFF 0x40 +#define PL011_FLAG_TXFF 0x20 +#define PL011_FLAG_RXFE 0x10 + +/* Interrupt status bits in UARTRIS, UARTMIS, UARTIMSC */ +#define INT_OE (1 << 10) +#define INT_BE (1 << 9) +#define INT_PE (1 << 8) +#define INT_FE (1 << 7) +#define INT_RT (1 << 6) +#define INT_TX (1 << 5) +#define INT_RX (1 << 4) +#define INT_DSR (1 << 3) +#define INT_DCD (1 << 2) +#define INT_CTS (1 << 1) +#define INT_RI (1 << 0) +#define INT_E (INT_OE | INT_BE | INT_PE | INT_FE) +#define INT_MS (INT_RI | INT_DSR | INT_DCD | INT_CTS) + +static const unsigned char pl011_id_arm[8] = + { 0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; +static const unsigned char pl011_id_luminary[8] = + { 0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1 }; + +/* Which bits in the interrupt status matter for each outbound IRQ line ? */ +static const uint32_t irqmask[] = { + INT_E | INT_MS | INT_RT | INT_TX | INT_RX, /* combined IRQ */ + INT_RX, + INT_TX, + INT_RT, + INT_MS, + INT_E, +}; + +static void pl011_update(PL011State *s) +{ + uint32_t flags; + int i; + + flags = s->int_level & s->int_enabled; + trace_pl011_irq_state(flags != 0); + for (i = 0; i < ARRAY_SIZE(s->irq); i++) { + qemu_set_irq(s->irq[i], (flags & irqmask[i]) != 0); + } +} + +static uint64_t pl011_read(void *opaque, hwaddr offset, + unsigned size) +{ + PL011State *s = (PL011State *)opaque; + uint32_t c; + uint64_t r; + + switch (offset >> 2) { + case 0: /* UARTDR */ + s->flags &= ~PL011_FLAG_RXFF; + c = s->read_fifo[s->read_pos]; + if (s->read_count > 0) { + s->read_count--; + if (++s->read_pos == 16) + s->read_pos = 0; + } + if (s->read_count == 0) { + s->flags |= PL011_FLAG_RXFE; + } + if (s->read_count == s->read_trigger - 1) + s->int_level &= ~ PL011_INT_RX; + trace_pl011_read_fifo(s->read_count); + s->rsr = c >> 8; + pl011_update(s); + qemu_chr_fe_accept_input(&s->chr); + r = c; + break; + case 1: /* UARTRSR */ + r = s->rsr; + break; + case 6: /* UARTFR */ + r = s->flags; + break; + case 8: /* UARTILPR */ + r = s->ilpr; + break; + case 9: /* UARTIBRD */ + r = s->ibrd; + break; + case 10: /* UARTFBRD */ + r = s->fbrd; + break; + case 11: /* UARTLCR_H */ + r = s->lcr; + break; + case 12: /* UARTCR */ + r = s->cr; + break; + case 13: /* UARTIFLS */ + r = s->ifl; + break; + case 14: /* UARTIMSC */ + r = s->int_enabled; + break; + case 15: /* UARTRIS */ + r = s->int_level; + break; + case 16: /* UARTMIS */ + r = s->int_level & s->int_enabled; + break; + case 18: /* UARTDMACR */ + r = s->dmacr; + break; + case 0x3f8 ... 0x400: + r = s->id[(offset - 0xfe0) >> 2]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl011_read: Bad offset 0x%x\n", (int)offset); + r = 0; + break; + } + + trace_pl011_read(offset, r); + return r; +} + +static void pl011_set_read_trigger(PL011State *s) +{ +#if 0 + /* The docs say the RX interrupt is triggered when the FIFO exceeds + the threshold. However linux only reads the FIFO in response to an + interrupt. Triggering the interrupt when the FIFO is non-empty seems + to make things work. */ + if (s->lcr & 0x10) + s->read_trigger = (s->ifl >> 1) & 0x1c; + else +#endif + s->read_trigger = 1; +} + +static unsigned int pl011_get_baudrate(const PL011State *s) +{ + uint64_t clk; + + if (s->fbrd == 0) { + return 0; + } + + clk = clock_get_hz(s->clk); + return (clk / ((s->ibrd << 6) + s->fbrd)) << 2; +} + +static void pl011_trace_baudrate_change(const PL011State *s) +{ + trace_pl011_baudrate_change(pl011_get_baudrate(s), + clock_get_hz(s->clk), + s->ibrd, s->fbrd); +} + +static void pl011_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PL011State *s = (PL011State *)opaque; + unsigned char ch; + + trace_pl011_write(offset, value); + + switch (offset >> 2) { + case 0: /* UARTDR */ + /* ??? Check if transmitter is enabled. */ + ch = value; + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); + s->int_level |= PL011_INT_TX; + pl011_update(s); + break; + case 1: /* UARTRSR/UARTECR */ + s->rsr = 0; + break; + case 6: /* UARTFR */ + /* Writes to Flag register are ignored. */ + break; + case 8: /* UARTUARTILPR */ + s->ilpr = value; + break; + case 9: /* UARTIBRD */ + s->ibrd = value; + pl011_trace_baudrate_change(s); + break; + case 10: /* UARTFBRD */ + s->fbrd = value; + pl011_trace_baudrate_change(s); + break; + case 11: /* UARTLCR_H */ + /* Reset the FIFO state on FIFO enable or disable */ + if ((s->lcr ^ value) & 0x10) { + s->read_count = 0; + s->read_pos = 0; + } + if ((s->lcr ^ value) & 0x1) { + int break_enable = value & 0x1; + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_BREAK, + &break_enable); + } + s->lcr = value; + pl011_set_read_trigger(s); + break; + case 12: /* UARTCR */ + /* ??? Need to implement the enable and loopback bits. */ + s->cr = value; + break; + case 13: /* UARTIFS */ + s->ifl = value; + pl011_set_read_trigger(s); + break; + case 14: /* UARTIMSC */ + s->int_enabled = value; + pl011_update(s); + break; + case 17: /* UARTICR */ + s->int_level &= ~value; + pl011_update(s); + break; + case 18: /* UARTDMACR */ + s->dmacr = value; + if (value & 3) { + qemu_log_mask(LOG_UNIMP, "pl011: DMA not implemented\n"); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl011_write: Bad offset 0x%x\n", (int)offset); + } +} + +static int pl011_can_receive(void *opaque) +{ + PL011State *s = (PL011State *)opaque; + int r; + + if (s->lcr & 0x10) { + r = s->read_count < 16; + } else { + r = s->read_count < 1; + } + trace_pl011_can_receive(s->lcr, s->read_count, r); + return r; +} + +static void pl011_put_fifo(void *opaque, uint32_t value) +{ + PL011State *s = (PL011State *)opaque; + int slot; + + slot = s->read_pos + s->read_count; + if (slot >= 16) + slot -= 16; + s->read_fifo[slot] = value; + s->read_count++; + s->flags &= ~PL011_FLAG_RXFE; + trace_pl011_put_fifo(value, s->read_count); + if (!(s->lcr & 0x10) || s->read_count == 16) { + trace_pl011_put_fifo_full(); + s->flags |= PL011_FLAG_RXFF; + } + if (s->read_count == s->read_trigger) { + s->int_level |= PL011_INT_RX; + pl011_update(s); + } +} + +static void pl011_receive(void *opaque, const uint8_t *buf, int size) +{ + pl011_put_fifo(opaque, *buf); +} + +static void pl011_event(void *opaque, QEMUChrEvent event) +{ + if (event == CHR_EVENT_BREAK) + pl011_put_fifo(opaque, 0x400); +} + +static void pl011_clock_update(void *opaque, ClockEvent event) +{ + PL011State *s = PL011(opaque); + + pl011_trace_baudrate_change(s); +} + +static const MemoryRegionOps pl011_ops = { + .read = pl011_read, + .write = pl011_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static bool pl011_clock_needed(void *opaque) +{ + PL011State *s = PL011(opaque); + + return s->migrate_clk; +} + +static const VMStateDescription vmstate_pl011_clock = { + .name = "pl011/clock", + .version_id = 1, + .minimum_version_id = 1, + .needed = pl011_clock_needed, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(clk, PL011State), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pl011 = { + .name = "pl011", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32(readbuff, PL011State), + VMSTATE_UINT32(flags, PL011State), + VMSTATE_UINT32(lcr, PL011State), + VMSTATE_UINT32(rsr, PL011State), + VMSTATE_UINT32(cr, PL011State), + VMSTATE_UINT32(dmacr, PL011State), + VMSTATE_UINT32(int_enabled, PL011State), + VMSTATE_UINT32(int_level, PL011State), + VMSTATE_UINT32_ARRAY(read_fifo, PL011State, 16), + VMSTATE_UINT32(ilpr, PL011State), + VMSTATE_UINT32(ibrd, PL011State), + VMSTATE_UINT32(fbrd, PL011State), + VMSTATE_UINT32(ifl, PL011State), + VMSTATE_INT32(read_pos, PL011State), + VMSTATE_INT32(read_count, PL011State), + VMSTATE_INT32(read_trigger, PL011State), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_pl011_clock, + NULL + } +}; + +static Property pl011_properties[] = { + DEFINE_PROP_CHR("chardev", PL011State, chr), + DEFINE_PROP_BOOL("migrate-clk", PL011State, migrate_clk, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pl011_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + PL011State *s = PL011(obj); + int i; + + memory_region_init_io(&s->iomem, OBJECT(s), &pl011_ops, s, "pl011", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + for (i = 0; i < ARRAY_SIZE(s->irq); i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } + + s->clk = qdev_init_clock_in(DEVICE(obj), "clk", pl011_clock_update, s, + ClockUpdate); + + s->read_trigger = 1; + s->ifl = 0x12; + s->cr = 0x300; + s->flags = 0x90; + + s->id = pl011_id_arm; +} + +static void pl011_realize(DeviceState *dev, Error **errp) +{ + PL011State *s = PL011(dev); + + qemu_chr_fe_set_handlers(&s->chr, pl011_can_receive, pl011_receive, + pl011_event, NULL, s, NULL, true); +} + +static void pl011_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = pl011_realize; + dc->vmsd = &vmstate_pl011; + device_class_set_props(dc, pl011_properties); +} + +static const TypeInfo pl011_arm_info = { + .name = TYPE_PL011, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL011State), + .instance_init = pl011_init, + .class_init = pl011_class_init, +}; + +static void pl011_luminary_init(Object *obj) +{ + PL011State *s = PL011(obj); + + s->id = pl011_id_luminary; +} + +static const TypeInfo pl011_luminary_info = { + .name = TYPE_PL011_LUMINARY, + .parent = TYPE_PL011, + .instance_init = pl011_luminary_init, +}; + +static void pl011_register_types(void) +{ + type_register_static(&pl011_arm_info); + type_register_static(&pl011_luminary_info); +} + +type_init(pl011_register_types) diff --git a/hw/char/renesas_sci.c b/hw/char/renesas_sci.c new file mode 100644 index 000000000..1c6346729 --- /dev/null +++ b/hw/char/renesas_sci.c @@ -0,0 +1,351 @@ +/* + * Renesas Serial Communication Interface + * + * Datasheet: RX62N Group, RX621 Group User's Manual: Hardware + * (Rev.1.40 R01UH0033EJ0140) + * + * Copyright (c) 2019 Yoshinori Sato + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/registerfields.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/char/renesas_sci.h" +#include "migration/vmstate.h" + +/* SCI register map */ +REG8(SMR, 0) + FIELD(SMR, CKS, 0, 2) + FIELD(SMR, MP, 2, 1) + FIELD(SMR, STOP, 3, 1) + FIELD(SMR, PM, 4, 1) + FIELD(SMR, PE, 5, 1) + FIELD(SMR, CHR, 6, 1) + FIELD(SMR, CM, 7, 1) +REG8(BRR, 1) +REG8(SCR, 2) + FIELD(SCR, CKE, 0, 2) + FIELD(SCR, TEIE, 2, 1) + FIELD(SCR, MPIE, 3, 1) + FIELD(SCR, RE, 4, 1) + FIELD(SCR, TE, 5, 1) + FIELD(SCR, RIE, 6, 1) + FIELD(SCR, TIE, 7, 1) +REG8(TDR, 3) +REG8(SSR, 4) + FIELD(SSR, MPBT, 0, 1) + FIELD(SSR, MPB, 1, 1) + FIELD(SSR, TEND, 2, 1) + FIELD(SSR, ERR, 3, 3) + FIELD(SSR, PER, 3, 1) + FIELD(SSR, FER, 4, 1) + FIELD(SSR, ORER, 5, 1) + FIELD(SSR, RDRF, 6, 1) + FIELD(SSR, TDRE, 7, 1) +REG8(RDR, 5) +REG8(SCMR, 6) + FIELD(SCMR, SMIF, 0, 1) + FIELD(SCMR, SINV, 2, 1) + FIELD(SCMR, SDIR, 3, 1) + FIELD(SCMR, BCP2, 7, 1) +REG8(SEMR, 7) + FIELD(SEMR, ACS0, 0, 1) + FIELD(SEMR, ABCS, 4, 1) + +static int can_receive(void *opaque) +{ + RSCIState *sci = RSCI(opaque); + if (sci->rx_next > qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) { + return 0; + } else { + return FIELD_EX8(sci->scr, SCR, RE); + } +} + +static void receive(void *opaque, const uint8_t *buf, int size) +{ + RSCIState *sci = RSCI(opaque); + sci->rx_next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + sci->trtime; + if (FIELD_EX8(sci->ssr, SSR, RDRF) || size > 1) { + sci->ssr = FIELD_DP8(sci->ssr, SSR, ORER, 1); + if (FIELD_EX8(sci->scr, SCR, RIE)) { + qemu_set_irq(sci->irq[ERI], 1); + } + } else { + sci->rdr = buf[0]; + sci->ssr = FIELD_DP8(sci->ssr, SSR, RDRF, 1); + if (FIELD_EX8(sci->scr, SCR, RIE)) { + qemu_irq_pulse(sci->irq[RXI]); + } + } +} + +static void send_byte(RSCIState *sci) +{ + if (qemu_chr_fe_backend_connected(&sci->chr)) { + qemu_chr_fe_write_all(&sci->chr, &sci->tdr, 1); + } + timer_mod(&sci->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + sci->trtime); + sci->ssr = FIELD_DP8(sci->ssr, SSR, TEND, 0); + sci->ssr = FIELD_DP8(sci->ssr, SSR, TDRE, 1); + qemu_set_irq(sci->irq[TEI], 0); + if (FIELD_EX8(sci->scr, SCR, TIE)) { + qemu_irq_pulse(sci->irq[TXI]); + } +} + +static void txend(void *opaque) +{ + RSCIState *sci = RSCI(opaque); + if (!FIELD_EX8(sci->ssr, SSR, TDRE)) { + send_byte(sci); + } else { + sci->ssr = FIELD_DP8(sci->ssr, SSR, TEND, 1); + if (FIELD_EX8(sci->scr, SCR, TEIE)) { + qemu_set_irq(sci->irq[TEI], 1); + } + } +} + +static void update_trtime(RSCIState *sci) +{ + /* char per bits */ + sci->trtime = 8 - FIELD_EX8(sci->smr, SMR, CHR); + sci->trtime += FIELD_EX8(sci->smr, SMR, PE); + sci->trtime += FIELD_EX8(sci->smr, SMR, STOP) + 1; + /* x bit transmit time (32 * divrate * brr) / base freq */ + sci->trtime *= 32 * sci->brr; + sci->trtime *= 1 << (2 * FIELD_EX8(sci->smr, SMR, CKS)); + sci->trtime *= NANOSECONDS_PER_SECOND; + sci->trtime /= sci->input_freq; +} + +static bool sci_is_tr_enabled(RSCIState *sci) +{ + return FIELD_EX8(sci->scr, SCR, TE) || FIELD_EX8(sci->scr, SCR, RE); +} + +static void sci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) +{ + RSCIState *sci = RSCI(opaque); + + switch (offset) { + case A_SMR: + if (!sci_is_tr_enabled(sci)) { + sci->smr = val; + update_trtime(sci); + } + break; + case A_BRR: + if (!sci_is_tr_enabled(sci)) { + sci->brr = val; + update_trtime(sci); + } + break; + case A_SCR: + sci->scr = val; + if (FIELD_EX8(sci->scr, SCR, TE)) { + sci->ssr = FIELD_DP8(sci->ssr, SSR, TDRE, 1); + sci->ssr = FIELD_DP8(sci->ssr, SSR, TEND, 1); + if (FIELD_EX8(sci->scr, SCR, TIE)) { + qemu_irq_pulse(sci->irq[TXI]); + } + } + if (!FIELD_EX8(sci->scr, SCR, TEIE)) { + qemu_set_irq(sci->irq[TEI], 0); + } + if (!FIELD_EX8(sci->scr, SCR, RIE)) { + qemu_set_irq(sci->irq[ERI], 0); + } + break; + case A_TDR: + sci->tdr = val; + if (FIELD_EX8(sci->ssr, SSR, TEND)) { + send_byte(sci); + } else { + sci->ssr = FIELD_DP8(sci->ssr, SSR, TDRE, 0); + } + break; + case A_SSR: + sci->ssr = FIELD_DP8(sci->ssr, SSR, MPBT, + FIELD_EX8(val, SSR, MPBT)); + sci->ssr = FIELD_DP8(sci->ssr, SSR, ERR, + FIELD_EX8(val, SSR, ERR) & 0x07); + if (FIELD_EX8(sci->read_ssr, SSR, ERR) && + FIELD_EX8(sci->ssr, SSR, ERR) == 0) { + qemu_set_irq(sci->irq[ERI], 0); + } + break; + case A_RDR: + qemu_log_mask(LOG_GUEST_ERROR, "reneas_sci: RDR is read only.\n"); + break; + case A_SCMR: + sci->scmr = val; break; + case A_SEMR: /* SEMR */ + sci->semr = val; break; + default: + qemu_log_mask(LOG_UNIMP, "renesas_sci: Register 0x%" HWADDR_PRIX " " + "not implemented\n", + offset); + } +} + +static uint64_t sci_read(void *opaque, hwaddr offset, unsigned size) +{ + RSCIState *sci = RSCI(opaque); + + switch (offset) { + case A_SMR: + return sci->smr; + case A_BRR: + return sci->brr; + case A_SCR: + return sci->scr; + case A_TDR: + return sci->tdr; + case A_SSR: + sci->read_ssr = sci->ssr; + return sci->ssr; + case A_RDR: + sci->ssr = FIELD_DP8(sci->ssr, SSR, RDRF, 0); + return sci->rdr; + case A_SCMR: + return sci->scmr; + case A_SEMR: + return sci->semr; + default: + qemu_log_mask(LOG_UNIMP, "renesas_sci: Register 0x%" HWADDR_PRIX + " not implemented.\n", offset); + } + return UINT64_MAX; +} + +static const MemoryRegionOps sci_ops = { + .write = sci_write, + .read = sci_read, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.max_access_size = 1, + .valid.max_access_size = 1, +}; + +static void rsci_reset(DeviceState *dev) +{ + RSCIState *sci = RSCI(dev); + sci->smr = sci->scr = 0x00; + sci->brr = 0xff; + sci->tdr = 0xff; + sci->rdr = 0x00; + sci->ssr = 0x84; + sci->scmr = 0x00; + sci->semr = 0x00; + sci->rx_next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +} + +static void sci_event(void *opaque, QEMUChrEvent event) +{ + RSCIState *sci = RSCI(opaque); + if (event == CHR_EVENT_BREAK) { + sci->ssr = FIELD_DP8(sci->ssr, SSR, FER, 1); + if (FIELD_EX8(sci->scr, SCR, RIE)) { + qemu_set_irq(sci->irq[ERI], 1); + } + } +} + +static void rsci_realize(DeviceState *dev, Error **errp) +{ + RSCIState *sci = RSCI(dev); + + if (sci->input_freq == 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "renesas_sci: input-freq property must be set."); + return; + } + qemu_chr_fe_set_handlers(&sci->chr, can_receive, receive, + sci_event, NULL, sci, NULL, true); +} + +static void rsci_init(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + RSCIState *sci = RSCI(obj); + int i; + + memory_region_init_io(&sci->memory, OBJECT(sci), &sci_ops, + sci, "renesas-sci", 0x8); + sysbus_init_mmio(d, &sci->memory); + + for (i = 0; i < SCI_NR_IRQ; i++) { + sysbus_init_irq(d, &sci->irq[i]); + } + timer_init_ns(&sci->timer, QEMU_CLOCK_VIRTUAL, txend, sci); +} + +static const VMStateDescription vmstate_rsci = { + .name = "renesas-sci", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT64(trtime, RSCIState), + VMSTATE_INT64(rx_next, RSCIState), + VMSTATE_UINT8(smr, RSCIState), + VMSTATE_UINT8(brr, RSCIState), + VMSTATE_UINT8(scr, RSCIState), + VMSTATE_UINT8(tdr, RSCIState), + VMSTATE_UINT8(ssr, RSCIState), + VMSTATE_UINT8(rdr, RSCIState), + VMSTATE_UINT8(scmr, RSCIState), + VMSTATE_UINT8(semr, RSCIState), + VMSTATE_UINT8(read_ssr, RSCIState), + VMSTATE_TIMER(timer, RSCIState), + VMSTATE_END_OF_LIST() + } +}; + +static Property rsci_properties[] = { + DEFINE_PROP_UINT64("input-freq", RSCIState, input_freq, 0), + DEFINE_PROP_CHR("chardev", RSCIState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void rsci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = rsci_realize; + dc->vmsd = &vmstate_rsci; + dc->reset = rsci_reset; + device_class_set_props(dc, rsci_properties); +} + +static const TypeInfo rsci_info = { + .name = TYPE_RENESAS_SCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RSCIState), + .instance_init = rsci_init, + .class_init = rsci_class_init, +}; + +static void rsci_register_types(void) +{ + type_register_static(&rsci_info); +} + +type_init(rsci_register_types) diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c new file mode 100644 index 000000000..ddae738d5 --- /dev/null +++ b/hw/char/riscv_htif.c @@ -0,0 +1,260 @@ +/* + * QEMU RISC-V Host Target Interface (HTIF) Emulation + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017-2018 SiFive, Inc. + * + * This provides HTIF device emulation for QEMU. At the moment this allows + * for identical copies of bbl/linux to run on both spike and QEMU. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "hw/char/riscv_htif.h" +#include "hw/char/serial.h" +#include "chardev/char.h" +#include "chardev/char-fe.h" +#include "qemu/timer.h" +#include "qemu/error-report.h" + +#define RISCV_DEBUG_HTIF 0 +#define HTIF_DEBUG(fmt, ...) \ + do { \ + if (RISCV_DEBUG_HTIF) { \ + qemu_log_mask(LOG_TRACE, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\ + } \ + } while (0) + +static uint64_t fromhost_addr, tohost_addr; +static int address_symbol_set; + +void htif_symbol_callback(const char *st_name, int st_info, uint64_t st_value, + uint64_t st_size) +{ + if (strcmp("fromhost", st_name) == 0) { + address_symbol_set |= 1; + fromhost_addr = st_value; + if (st_size != 8) { + error_report("HTIF fromhost must be 8 bytes"); + exit(1); + } + } else if (strcmp("tohost", st_name) == 0) { + address_symbol_set |= 2; + tohost_addr = st_value; + if (st_size != 8) { + error_report("HTIF tohost must be 8 bytes"); + exit(1); + } + } +} + +/* + * Called by the char dev to see if HTIF is ready to accept input. + */ +static int htif_can_recv(void *opaque) +{ + return 1; +} + +/* + * Called by the char dev to supply input to HTIF console. + * We assume that we will receive one character at a time. + */ +static void htif_recv(void *opaque, const uint8_t *buf, int size) +{ + HTIFState *htifstate = opaque; + + if (size != 1) { + return; + } + + /* TODO - we need to check whether mfromhost is zero which indicates + the device is ready to receive. The current implementation + will drop characters */ + + uint64_t val_written = htifstate->pending_read; + uint64_t resp = 0x100 | *buf; + + htifstate->env->mfromhost = (val_written >> 48 << 48) | (resp << 16 >> 16); +} + +/* + * Called by the char dev to supply special events to the HTIF console. + * Not used for HTIF. + */ +static void htif_event(void *opaque, QEMUChrEvent event) +{ + +} + +static int htif_be_change(void *opaque) +{ + HTIFState *s = opaque; + + qemu_chr_fe_set_handlers(&s->chr, htif_can_recv, htif_recv, htif_event, + htif_be_change, s, NULL, true); + + return 0; +} + +static void htif_handle_tohost_write(HTIFState *htifstate, uint64_t val_written) +{ + uint8_t device = val_written >> 56; + uint8_t cmd = val_written >> 48; + uint64_t payload = val_written & 0xFFFFFFFFFFFFULL; + int resp = 0; + + HTIF_DEBUG("mtohost write: device: %d cmd: %d what: %02" PRIx64 + " -payload: %016" PRIx64 "\n", device, cmd, payload & 0xFF, payload); + + /* + * Currently, there is a fixed mapping of devices: + * 0: riscv-tests Pass/Fail Reporting Only (no syscall proxy) + * 1: Console + */ + if (unlikely(device == 0x0)) { + /* frontend syscall handler, shutdown and exit code support */ + if (cmd == 0x0) { + if (payload & 0x1) { + /* exit code */ + int exit_code = payload >> 1; + exit(exit_code); + } else { + qemu_log_mask(LOG_UNIMP, "pk syscall proxy not supported\n"); + } + } else { + qemu_log("HTIF device %d: unknown command\n", device); + } + } else if (likely(device == 0x1)) { + /* HTIF Console */ + if (cmd == 0x0) { + /* this should be a queue, but not yet implemented as such */ + htifstate->pending_read = val_written; + htifstate->env->mtohost = 0; /* clear to indicate we read */ + return; + } else if (cmd == 0x1) { + qemu_chr_fe_write(&htifstate->chr, (uint8_t *)&payload, 1); + resp = 0x100 | (uint8_t)payload; + } else { + qemu_log("HTIF device %d: unknown command\n", device); + } + } else { + qemu_log("HTIF unknown device or command\n"); + HTIF_DEBUG("device: %d cmd: %d what: %02" PRIx64 + " payload: %016" PRIx64, device, cmd, payload & 0xFF, payload); + } + /* + * - latest bbl does not set fromhost to 0 if there is a value in tohost + * - with this code enabled, qemu hangs waiting for fromhost to go to 0 + * - with this code disabled, qemu works with bbl priv v1.9.1 and v1.10 + * - HTIF needs protocol documentation and a more complete state machine + + while (!htifstate->fromhost_inprogress && + htifstate->env->mfromhost != 0x0) { + } + */ + htifstate->env->mfromhost = (val_written >> 48 << 48) | (resp << 16 >> 16); + htifstate->env->mtohost = 0; /* clear to indicate we read */ +} + +#define TOHOST_OFFSET1 (htifstate->tohost_offset) +#define TOHOST_OFFSET2 (htifstate->tohost_offset + 4) +#define FROMHOST_OFFSET1 (htifstate->fromhost_offset) +#define FROMHOST_OFFSET2 (htifstate->fromhost_offset + 4) + +/* CPU wants to read an HTIF register */ +static uint64_t htif_mm_read(void *opaque, hwaddr addr, unsigned size) +{ + HTIFState *htifstate = opaque; + if (addr == TOHOST_OFFSET1) { + return htifstate->env->mtohost & 0xFFFFFFFF; + } else if (addr == TOHOST_OFFSET2) { + return (htifstate->env->mtohost >> 32) & 0xFFFFFFFF; + } else if (addr == FROMHOST_OFFSET1) { + return htifstate->env->mfromhost & 0xFFFFFFFF; + } else if (addr == FROMHOST_OFFSET2) { + return (htifstate->env->mfromhost >> 32) & 0xFFFFFFFF; + } else { + qemu_log("Invalid htif read: address %016" PRIx64 "\n", + (uint64_t)addr); + return 0; + } +} + +/* CPU wrote to an HTIF register */ +static void htif_mm_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + HTIFState *htifstate = opaque; + if (addr == TOHOST_OFFSET1) { + if (htifstate->env->mtohost == 0x0) { + htifstate->allow_tohost = 1; + htifstate->env->mtohost = value & 0xFFFFFFFF; + } else { + htifstate->allow_tohost = 0; + } + } else if (addr == TOHOST_OFFSET2) { + if (htifstate->allow_tohost) { + htifstate->env->mtohost |= value << 32; + htif_handle_tohost_write(htifstate, htifstate->env->mtohost); + } + } else if (addr == FROMHOST_OFFSET1) { + htifstate->fromhost_inprogress = 1; + htifstate->env->mfromhost = value & 0xFFFFFFFF; + } else if (addr == FROMHOST_OFFSET2) { + htifstate->env->mfromhost |= value << 32; + htifstate->fromhost_inprogress = 0; + } else { + qemu_log("Invalid htif write: address %016" PRIx64 "\n", + (uint64_t)addr); + } +} + +static const MemoryRegionOps htif_mm_ops = { + .read = htif_mm_read, + .write = htif_mm_write, +}; + +HTIFState *htif_mm_init(MemoryRegion *address_space, MemoryRegion *main_mem, + CPURISCVState *env, Chardev *chr) +{ + uint64_t base = MIN(tohost_addr, fromhost_addr); + uint64_t size = MAX(tohost_addr + 8, fromhost_addr + 8) - base; + uint64_t tohost_offset = tohost_addr - base; + uint64_t fromhost_offset = fromhost_addr - base; + + HTIFState *s = g_malloc0(sizeof(HTIFState)); + s->address_space = address_space; + s->main_mem = main_mem; + s->main_mem_ram_ptr = memory_region_get_ram_ptr(main_mem); + s->env = env; + s->tohost_offset = tohost_offset; + s->fromhost_offset = fromhost_offset; + s->pending_read = 0; + s->allow_tohost = 0; + s->fromhost_inprogress = 0; + qemu_chr_fe_init(&s->chr, chr, &error_abort); + qemu_chr_fe_set_handlers(&s->chr, htif_can_recv, htif_recv, htif_event, + htif_be_change, s, NULL, true); + if (address_symbol_set == 3) { + memory_region_init_io(&s->mmio, NULL, &htif_mm_ops, s, + TYPE_HTIF_UART, size); + memory_region_add_subregion_overlap(address_space, base, + &s->mmio, 1); + } + + return s; +} diff --git a/hw/char/sclpconsole-lm.c b/hw/char/sclpconsole-lm.c new file mode 100644 index 000000000..b9e9b2d45 --- /dev/null +++ b/hw/char/sclpconsole-lm.c @@ -0,0 +1,373 @@ +/* + * SCLP event types + * Operations Command - Line Mode input + * Message - Line Mode output + * + * Copyright IBM, Corp. 2013 + * + * Authors: + * Heinz Graalfs + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/thread.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "chardev/char-fe.h" + +#include "hw/s390x/sclp.h" +#include "migration/vmstate.h" +#include "hw/s390x/event-facility.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/s390x/ebcdic.h" +#include "qom/object.h" + +#define SIZE_BUFFER 4096 +#define NEWLINE "\n" + +typedef struct OprtnsCommand { + EventBufferHeader header; + MDMSU message_unit; + char data[]; +} QEMU_PACKED OprtnsCommand; + +/* max size for line-mode data in 4K SCCB page */ +#define SIZE_CONSOLE_BUFFER (SCCB_DATA_LEN - sizeof(OprtnsCommand)) + +struct SCLPConsoleLM { + SCLPEvent event; + CharBackend chr; + bool echo; /* immediate echo of input if true */ + uint32_t write_errors; /* errors writing to char layer */ + uint32_t length; /* length of byte stream in buffer */ + uint8_t buf[SIZE_CONSOLE_BUFFER]; +}; +typedef struct SCLPConsoleLM SCLPConsoleLM; + +#define TYPE_SCLPLM_CONSOLE "sclplmconsole" +DECLARE_INSTANCE_CHECKER(SCLPConsoleLM, SCLPLM_CONSOLE, + TYPE_SCLPLM_CONSOLE) + +/* +* Character layer call-back functions + * + * Allow 1 character at a time + * + * Accumulate bytes from character layer in console buffer, + * event_pending is set when a newline character is encountered + * + * The maximum command line length is limited by the maximum + * space available in an SCCB. Line mode console input is sent + * truncated to the guest in case it doesn't fit into the SCCB. + */ + +static int chr_can_read(void *opaque) +{ + SCLPConsoleLM *scon = opaque; + + if (scon->event.event_pending) { + return 0; + } + return 1; +} + +static void chr_read(void *opaque, const uint8_t *buf, int size) +{ + SCLPConsoleLM *scon = opaque; + + assert(size == 1); + + if (*buf == '\r' || *buf == '\n') { + scon->event.event_pending = true; + sclp_service_interrupt(0); + return; + } + if (scon->length == SIZE_CONSOLE_BUFFER) { + /* Eat the character, but still process CR and LF. */ + return; + } + scon->buf[scon->length] = *buf; + scon->length += 1; + if (scon->echo) { + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&scon->chr, buf, size); + } +} + +/* functions to be called by event facility */ + +static bool can_handle_event(uint8_t type) +{ + return type == SCLP_EVENT_MESSAGE || type == SCLP_EVENT_PMSGCMD; +} + +static sccb_mask_t send_mask(void) +{ + return SCLP_EVENT_MASK_OP_CMD | SCLP_EVENT_MASK_PMSGCMD; +} + +static sccb_mask_t receive_mask(void) +{ + return SCLP_EVENT_MASK_MSG | SCLP_EVENT_MASK_PMSGCMD; +} + +/* + * Triggered by SCLP's read_event_data + * - convert ASCII byte stream to EBCDIC and + * - copy converted data into provided (SCLP) buffer + */ +static int get_console_data(SCLPEvent *event, uint8_t *buf, size_t *size, + int avail) +{ + int len; + + SCLPConsoleLM *cons = SCLPLM_CONSOLE(event); + + len = cons->length; + /* data need to fit into provided SCLP buffer */ + if (len > avail) { + return 1; + } + + ebcdic_put(buf, (char *)&cons->buf, len); + *size = len; + cons->length = 0; + /* data provided and no more data pending */ + event->event_pending = false; + qemu_notify_event(); + return 0; +} + +static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr, + int *slen) +{ + int avail, rc; + size_t src_len; + uint8_t *to; + OprtnsCommand *oc = (OprtnsCommand *) evt_buf_hdr; + + if (!event->event_pending) { + /* no data pending */ + return 0; + } + + to = (uint8_t *)&oc->data; + avail = *slen - sizeof(OprtnsCommand); + rc = get_console_data(event, to, &src_len, avail); + if (rc) { + /* data didn't fit, try next SCCB */ + return 1; + } + + oc->message_unit.mdmsu.gds_id = GDS_ID_MDSMU; + oc->message_unit.mdmsu.length = cpu_to_be16(sizeof(struct MDMSU)); + + oc->message_unit.cpmsu.gds_id = GDS_ID_CPMSU; + oc->message_unit.cpmsu.length = + cpu_to_be16(sizeof(struct MDMSU) - sizeof(GdsVector)); + + oc->message_unit.text_command.gds_id = GDS_ID_TEXTCMD; + oc->message_unit.text_command.length = + cpu_to_be16(sizeof(struct MDMSU) - (2 * sizeof(GdsVector))); + + oc->message_unit.self_def_text_message.key = GDS_KEY_SELFDEFTEXTMSG; + oc->message_unit.self_def_text_message.length = + cpu_to_be16(sizeof(struct MDMSU) - (3 * sizeof(GdsVector))); + + oc->message_unit.text_message.key = GDS_KEY_TEXTMSG; + oc->message_unit.text_message.length = + cpu_to_be16(sizeof(GdsSubvector) + src_len); + + oc->header.length = cpu_to_be16(sizeof(OprtnsCommand) + src_len); + oc->header.type = SCLP_EVENT_OPRTNS_COMMAND; + *slen = avail - src_len; + + return 1; +} + +/* + * Triggered by SCLP's write_event_data + * - write console data to character layer + * returns < 0 if an error occurred + */ +static int write_console_data(SCLPEvent *event, const uint8_t *buf, int len) +{ + SCLPConsoleLM *scon = SCLPLM_CONSOLE(event); + + if (!qemu_chr_fe_backend_connected(&scon->chr)) { + /* If there's no backend, we can just say we consumed all data. */ + return len; + } + + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + return qemu_chr_fe_write_all(&scon->chr, buf, len); +} + +static int process_mdb(SCLPEvent *event, MDBO *mdbo) +{ + int rc; + int len; + uint8_t buffer[SIZE_BUFFER]; + + len = be16_to_cpu(mdbo->length); + len -= sizeof(mdbo->length) + sizeof(mdbo->type) + + sizeof(mdbo->mto.line_type_flags) + + sizeof(mdbo->mto.alarm_control) + + sizeof(mdbo->mto._reserved); + + assert(len <= SIZE_BUFFER); + + /* convert EBCDIC SCLP contents to ASCII console message */ + ascii_put(buffer, mdbo->mto.message, len); + rc = write_console_data(event, (uint8_t *)NEWLINE, 1); + if (rc < 0) { + return rc; + } + return write_console_data(event, buffer, len); +} + +static int write_event_data(SCLPEvent *event, EventBufferHeader *ebh) +{ + int len; + int written; + int errors = 0; + MDBO *mdbo; + SclpMsg *data = (SclpMsg *) ebh; + SCLPConsoleLM *scon = SCLPLM_CONSOLE(event); + + len = be16_to_cpu(data->mdb.header.length); + if (len < sizeof(data->mdb.header)) { + return SCLP_RC_INCONSISTENT_LENGTHS; + } + len -= sizeof(data->mdb.header); + + /* first check message buffers */ + mdbo = data->mdb.mdbo; + while (len > 0) { + if (be16_to_cpu(mdbo->length) > len + || be16_to_cpu(mdbo->length) == 0) { + return SCLP_RC_INCONSISTENT_LENGTHS; + } + len -= be16_to_cpu(mdbo->length); + mdbo = (void *) mdbo + be16_to_cpu(mdbo->length); + } + + /* then execute */ + len = be16_to_cpu(data->mdb.header.length) - sizeof(data->mdb.header); + mdbo = data->mdb.mdbo; + while (len > 0) { + switch (be16_to_cpu(mdbo->type)) { + case MESSAGE_TEXT: + /* message text object */ + written = process_mdb(event, mdbo); + if (written < 0) { + /* character layer error */ + errors++; + } + break; + default: /* ignore */ + break; + } + len -= be16_to_cpu(mdbo->length); + mdbo = (void *) mdbo + be16_to_cpu(mdbo->length); + } + if (errors) { + scon->write_errors += errors; + } + data->header.flags = SCLP_EVENT_BUFFER_ACCEPTED; + + return SCLP_RC_NORMAL_COMPLETION; +} + +/* functions for live migration */ + +static const VMStateDescription vmstate_sclplmconsole = { + .name = "sclplmconsole", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_BOOL(event.event_pending, SCLPConsoleLM), + VMSTATE_UINT32(write_errors, SCLPConsoleLM), + VMSTATE_UINT32(length, SCLPConsoleLM), + VMSTATE_UINT8_ARRAY(buf, SCLPConsoleLM, SIZE_CONSOLE_BUFFER), + VMSTATE_END_OF_LIST() + } +}; + +/* qemu object creation and initialization functions */ + +/* tell character layer our call-back functions */ + +static int console_init(SCLPEvent *event) +{ + static bool console_available; + + SCLPConsoleLM *scon = SCLPLM_CONSOLE(event); + + if (console_available) { + error_report("Multiple line-mode operator consoles are not supported"); + return -1; + } + console_available = true; + + qemu_chr_fe_set_handlers(&scon->chr, chr_can_read, + chr_read, NULL, NULL, scon, NULL, true); + + return 0; +} + +static void console_reset(DeviceState *dev) +{ + SCLPEvent *event = SCLP_EVENT(dev); + SCLPConsoleLM *scon = SCLPLM_CONSOLE(event); + + event->event_pending = false; + scon->length = 0; + scon->write_errors = 0; +} + +static Property console_properties[] = { + DEFINE_PROP_CHR("chardev", SCLPConsoleLM, chr), + DEFINE_PROP_UINT32("write_errors", SCLPConsoleLM, write_errors, 0), + DEFINE_PROP_BOOL("echo", SCLPConsoleLM, echo, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void console_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SCLPEventClass *ec = SCLP_EVENT_CLASS(klass); + + device_class_set_props(dc, console_properties); + dc->reset = console_reset; + dc->vmsd = &vmstate_sclplmconsole; + ec->init = console_init; + ec->get_send_mask = send_mask; + ec->get_receive_mask = receive_mask; + ec->can_handle_event = can_handle_event; + ec->read_event_data = read_event_data; + ec->write_event_data = write_event_data; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo sclp_console_info = { + .name = TYPE_SCLPLM_CONSOLE, + .parent = TYPE_SCLP_EVENT, + .instance_size = sizeof(SCLPConsoleLM), + .class_init = console_class_init, + .class_size = sizeof(SCLPEventClass), +}; + +static void register_types(void) +{ + type_register_static(&sclp_console_info); +} + +type_init(register_types) diff --git a/hw/char/sclpconsole.c b/hw/char/sclpconsole.c new file mode 100644 index 000000000..c36b57222 --- /dev/null +++ b/hw/char/sclpconsole.c @@ -0,0 +1,289 @@ +/* + * SCLP event type + * Ascii Console Data (VT220 Console) + * + * Copyright IBM, Corp. 2012 + * + * Authors: + * Heinz Graalfs + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/thread.h" +#include "qemu/error-report.h" +#include "qemu/module.h" + +#include "hw/s390x/sclp.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/s390x/event-facility.h" +#include "chardev/char-fe.h" +#include "qom/object.h" + +typedef struct ASCIIConsoleData { + EventBufferHeader ebh; + char data[]; +} QEMU_PACKED ASCIIConsoleData; + +/* max size for ASCII data in 4K SCCB page */ +#define SIZE_BUFFER_VT220 4080 + +struct SCLPConsole { + SCLPEvent event; + CharBackend chr; + uint8_t iov[SIZE_BUFFER_VT220]; + uint32_t iov_sclp; /* offset in buf for SCLP read operation */ + uint32_t iov_bs; /* offset in buf for char layer read operation */ + uint32_t iov_data_len; /* length of byte stream in buffer */ + uint32_t iov_sclp_rest; /* length of byte stream not read via SCLP */ + bool notify; /* qemu_notify_event() req'd if true */ +}; +typedef struct SCLPConsole SCLPConsole; + +#define TYPE_SCLP_CONSOLE "sclpconsole" +DECLARE_INSTANCE_CHECKER(SCLPConsole, SCLP_CONSOLE, + TYPE_SCLP_CONSOLE) + +/* character layer call-back functions */ + +/* Return number of bytes that fit into iov buffer */ +static int chr_can_read(void *opaque) +{ + SCLPConsole *scon = opaque; + int avail = SIZE_BUFFER_VT220 - scon->iov_data_len; + + if (avail == 0) { + scon->notify = true; + } + return avail; +} + +/* Send data from a char device over to the guest */ +static void chr_read(void *opaque, const uint8_t *buf, int size) +{ + SCLPConsole *scon = opaque; + + assert(scon); + /* read data must fit into current buffer */ + assert(size <= SIZE_BUFFER_VT220 - scon->iov_data_len); + + /* put byte-stream from character layer into buffer */ + memcpy(&scon->iov[scon->iov_bs], buf, size); + scon->iov_data_len += size; + scon->iov_sclp_rest += size; + scon->iov_bs += size; + scon->event.event_pending = true; + sclp_service_interrupt(0); +} + +/* functions to be called by event facility */ + +static bool can_handle_event(uint8_t type) +{ + return type == SCLP_EVENT_ASCII_CONSOLE_DATA; +} + +static sccb_mask_t send_mask(void) +{ + return SCLP_EVENT_MASK_MSG_ASCII; +} + +static sccb_mask_t receive_mask(void) +{ + return SCLP_EVENT_MASK_MSG_ASCII; +} + +/* triggered by SCLP's read_event_data - + * copy console data byte-stream into provided (SCLP) buffer + */ +static void get_console_data(SCLPEvent *event, uint8_t *buf, size_t *size, + int avail) +{ + SCLPConsole *cons = SCLP_CONSOLE(event); + + /* first byte is hex 0 saying an ascii string follows */ + *buf++ = '\0'; + avail--; + /* if all data fit into provided SCLP buffer */ + if (avail >= cons->iov_sclp_rest) { + /* copy character byte-stream to SCLP buffer */ + memcpy(buf, &cons->iov[cons->iov_sclp], cons->iov_sclp_rest); + *size = cons->iov_sclp_rest + 1; + cons->iov_sclp = 0; + cons->iov_bs = 0; + cons->iov_data_len = 0; + cons->iov_sclp_rest = 0; + event->event_pending = false; + /* data provided and no more data pending */ + } else { + /* if provided buffer is too small, just copy part */ + memcpy(buf, &cons->iov[cons->iov_sclp], avail); + *size = avail + 1; + cons->iov_sclp_rest -= avail; + cons->iov_sclp += avail; + /* more data pending */ + } + if (cons->notify) { + cons->notify = false; + qemu_notify_event(); + } +} + +static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr, + int *slen) +{ + int avail; + size_t src_len; + uint8_t *to; + ASCIIConsoleData *acd = (ASCIIConsoleData *) evt_buf_hdr; + + if (!event->event_pending) { + /* no data pending */ + return 0; + } + + to = (uint8_t *)&acd->data; + avail = *slen - sizeof(ASCIIConsoleData); + get_console_data(event, to, &src_len, avail); + + acd->ebh.length = cpu_to_be16(sizeof(ASCIIConsoleData) + src_len); + acd->ebh.type = SCLP_EVENT_ASCII_CONSOLE_DATA; + acd->ebh.flags |= SCLP_EVENT_BUFFER_ACCEPTED; + *slen = avail - src_len; + + return 1; +} + +/* triggered by SCLP's write_event_data + * - write console data to character layer + * returns < 0 if an error occurred + */ +static ssize_t write_console_data(SCLPEvent *event, const uint8_t *buf, + size_t len) +{ + SCLPConsole *scon = SCLP_CONSOLE(event); + + if (!qemu_chr_fe_backend_connected(&scon->chr)) { + /* If there's no backend, we can just say we consumed all data. */ + return len; + } + + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + return qemu_chr_fe_write_all(&scon->chr, buf, len); +} + +static int write_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr) +{ + int rc; + int length; + ssize_t written; + ASCIIConsoleData *acd = (ASCIIConsoleData *) evt_buf_hdr; + + length = be16_to_cpu(evt_buf_hdr->length) - sizeof(EventBufferHeader); + written = write_console_data(event, (uint8_t *)acd->data, length); + + rc = SCLP_RC_NORMAL_COMPLETION; + /* set event buffer accepted flag */ + evt_buf_hdr->flags |= SCLP_EVENT_BUFFER_ACCEPTED; + + /* written will be zero if a pty is not connected - don't treat as error */ + if (written < 0) { + /* event buffer not accepted due to error in character layer */ + evt_buf_hdr->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED); + rc = SCLP_RC_CONTAINED_EQUIPMENT_CHECK; + } + + return rc; +} + +static const VMStateDescription vmstate_sclpconsole = { + .name = "sclpconsole", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_BOOL(event.event_pending, SCLPConsole), + VMSTATE_UINT8_ARRAY(iov, SCLPConsole, SIZE_BUFFER_VT220), + VMSTATE_UINT32(iov_sclp, SCLPConsole), + VMSTATE_UINT32(iov_bs, SCLPConsole), + VMSTATE_UINT32(iov_data_len, SCLPConsole), + VMSTATE_UINT32(iov_sclp_rest, SCLPConsole), + VMSTATE_END_OF_LIST() + } +}; + +/* qemu object creation and initialization functions */ + +/* tell character layer our call-back functions */ + +static int console_init(SCLPEvent *event) +{ + static bool console_available; + + SCLPConsole *scon = SCLP_CONSOLE(event); + + if (console_available) { + error_report("Multiple VT220 operator consoles are not supported"); + return -1; + } + console_available = true; + qemu_chr_fe_set_handlers(&scon->chr, chr_can_read, + chr_read, NULL, NULL, scon, NULL, true); + + return 0; +} + +static void console_reset(DeviceState *dev) +{ + SCLPEvent *event = SCLP_EVENT(dev); + SCLPConsole *scon = SCLP_CONSOLE(event); + + event->event_pending = false; + scon->iov_sclp = 0; + scon->iov_bs = 0; + scon->iov_data_len = 0; + scon->iov_sclp_rest = 0; + scon->notify = false; +} + +static Property console_properties[] = { + DEFINE_PROP_CHR("chardev", SCLPConsole, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void console_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SCLPEventClass *ec = SCLP_EVENT_CLASS(klass); + + device_class_set_props(dc, console_properties); + dc->reset = console_reset; + dc->vmsd = &vmstate_sclpconsole; + ec->init = console_init; + ec->get_send_mask = send_mask; + ec->get_receive_mask = receive_mask; + ec->can_handle_event = can_handle_event; + ec->read_event_data = read_event_data; + ec->write_event_data = write_event_data; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo sclp_console_info = { + .name = TYPE_SCLP_CONSOLE, + .parent = TYPE_SCLP_EVENT, + .instance_size = sizeof(SCLPConsole), + .class_init = console_class_init, + .class_size = sizeof(SCLPEventClass), +}; + +static void register_types(void) +{ + type_register_static(&sclp_console_info); +} + +type_init(register_types) diff --git a/hw/char/serial-isa.c b/hw/char/serial-isa.c new file mode 100644 index 000000000..1b8b30307 --- /dev/null +++ b/hw/char/serial-isa.c @@ -0,0 +1,182 @@ +/* + * QEMU 16550A UART emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2008 Citrix Systems, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "sysemu/sysemu.h" +#include "hw/acpi/aml-build.h" +#include "hw/char/serial.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(ISASerialState, ISA_SERIAL) + +struct ISASerialState { + ISADevice parent_obj; + + uint32_t index; + uint32_t iobase; + uint32_t isairq; + SerialState state; +}; + +static const int isa_serial_io[MAX_ISA_SERIAL_PORTS] = { + 0x3f8, 0x2f8, 0x3e8, 0x2e8 +}; +static const int isa_serial_irq[MAX_ISA_SERIAL_PORTS] = { + 4, 3, 4, 3 +}; + +static void serial_isa_realizefn(DeviceState *dev, Error **errp) +{ + static int index; + ISADevice *isadev = ISA_DEVICE(dev); + ISASerialState *isa = ISA_SERIAL(dev); + SerialState *s = &isa->state; + + if (isa->index == -1) { + isa->index = index; + } + if (isa->index >= MAX_ISA_SERIAL_PORTS) { + error_setg(errp, "Max. supported number of ISA serial ports is %d.", + MAX_ISA_SERIAL_PORTS); + return; + } + if (isa->iobase == -1) { + isa->iobase = isa_serial_io[isa->index]; + } + if (isa->isairq == -1) { + isa->isairq = isa_serial_irq[isa->index]; + } + index++; + + isa_init_irq(isadev, &s->irq, isa->isairq); + qdev_realize(DEVICE(s), NULL, errp); + qdev_set_legacy_instance_id(dev, isa->iobase, 3); + + memory_region_init_io(&s->io, OBJECT(isa), &serial_io_ops, s, "serial", 8); + isa_register_ioport(isadev, &s->io, isa->iobase); +} + +static void serial_isa_build_aml(ISADevice *isadev, Aml *scope) +{ + ISASerialState *isa = ISA_SERIAL(isadev); + Aml *dev; + Aml *crs; + + crs = aml_resource_template(); + aml_append(crs, aml_io(AML_DECODE16, isa->iobase, isa->iobase, 0x00, 0x08)); + aml_append(crs, aml_irq_no_flags(isa->isairq)); + + dev = aml_device("COM%d", isa->index + 1); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0501"))); + aml_append(dev, aml_name_decl("_UID", aml_int(isa->index + 1))); + aml_append(dev, aml_name_decl("_STA", aml_int(0xf))); + aml_append(dev, aml_name_decl("_CRS", crs)); + + aml_append(scope, dev); +} + +static const VMStateDescription vmstate_isa_serial = { + .name = "serial", + .version_id = 3, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(state, ISASerialState, 0, vmstate_serial, SerialState), + VMSTATE_END_OF_LIST() + } +}; + +static Property serial_isa_properties[] = { + DEFINE_PROP_UINT32("index", ISASerialState, index, -1), + DEFINE_PROP_UINT32("iobase", ISASerialState, iobase, -1), + DEFINE_PROP_UINT32("irq", ISASerialState, isairq, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void serial_isa_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ISADeviceClass *isa = ISA_DEVICE_CLASS(klass); + + dc->realize = serial_isa_realizefn; + dc->vmsd = &vmstate_isa_serial; + isa->build_aml = serial_isa_build_aml; + device_class_set_props(dc, serial_isa_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static void serial_isa_initfn(Object *o) +{ + ISASerialState *self = ISA_SERIAL(o); + + object_initialize_child(o, "serial", &self->state, TYPE_SERIAL); + + qdev_alias_all_properties(DEVICE(&self->state), o); +} + +static const TypeInfo serial_isa_info = { + .name = TYPE_ISA_SERIAL, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISASerialState), + .instance_init = serial_isa_initfn, + .class_init = serial_isa_class_initfn, +}; + +static void serial_register_types(void) +{ + type_register_static(&serial_isa_info); +} + +type_init(serial_register_types) + +static void serial_isa_init(ISABus *bus, int index, Chardev *chr) +{ + DeviceState *dev; + ISADevice *isadev; + + isadev = isa_new(TYPE_ISA_SERIAL); + dev = DEVICE(isadev); + qdev_prop_set_uint32(dev, "index", index); + qdev_prop_set_chr(dev, "chardev", chr); + isa_realize_and_unref(isadev, bus, &error_fatal); +} + +void serial_hds_isa_init(ISABus *bus, int from, int to) +{ + int i; + + assert(from >= 0); + assert(to <= MAX_ISA_SERIAL_PORTS); + + for (i = from; i < to; ++i) { + if (serial_hd(i)) { + serial_isa_init(bus, i, serial_hd(i)); + } + } +} diff --git a/hw/char/serial-pci-multi.c b/hw/char/serial-pci-multi.c new file mode 100644 index 000000000..3a9f96c2d --- /dev/null +++ b/hw/char/serial-pci-multi.c @@ -0,0 +1,222 @@ +/* + * QEMU 16550A multi UART emulation + * + * SPDX-License-Identifier: MIT + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2008 Citrix Systems, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* see docs/specs/pci-serial.txt */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/char/serial.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" + +#define PCI_SERIAL_MAX_PORTS 4 + +typedef struct PCIMultiSerialState { + PCIDevice dev; + MemoryRegion iobar; + uint32_t ports; + char *name[PCI_SERIAL_MAX_PORTS]; + SerialState state[PCI_SERIAL_MAX_PORTS]; + uint32_t level[PCI_SERIAL_MAX_PORTS]; + qemu_irq *irqs; + uint8_t prog_if; +} PCIMultiSerialState; + +static void multi_serial_pci_exit(PCIDevice *dev) +{ + PCIMultiSerialState *pci = DO_UPCAST(PCIMultiSerialState, dev, dev); + SerialState *s; + int i; + + for (i = 0; i < pci->ports; i++) { + s = pci->state + i; + qdev_unrealize(DEVICE(s)); + memory_region_del_subregion(&pci->iobar, &s->io); + g_free(pci->name[i]); + } + qemu_free_irqs(pci->irqs, pci->ports); +} + +static void multi_serial_irq_mux(void *opaque, int n, int level) +{ + PCIMultiSerialState *pci = opaque; + int i, pending = 0; + + pci->level[n] = level; + for (i = 0; i < pci->ports; i++) { + if (pci->level[i]) { + pending = 1; + } + } + pci_set_irq(&pci->dev, pending); +} + +static size_t multi_serial_get_port_count(PCIDeviceClass *pc) +{ + switch (pc->device_id) { + case 0x0003: + return 2; + case 0x0004: + return 4; + } + + g_assert_not_reached(); +} + + +static void multi_serial_pci_realize(PCIDevice *dev, Error **errp) +{ + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); + PCIMultiSerialState *pci = DO_UPCAST(PCIMultiSerialState, dev, dev); + SerialState *s; + size_t i, nports = multi_serial_get_port_count(pc); + + pci->dev.config[PCI_CLASS_PROG] = pci->prog_if; + pci->dev.config[PCI_INTERRUPT_PIN] = 0x01; + memory_region_init(&pci->iobar, OBJECT(pci), "multiserial", 8 * nports); + pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->iobar); + pci->irqs = qemu_allocate_irqs(multi_serial_irq_mux, pci, nports); + + for (i = 0; i < nports; i++) { + s = pci->state + i; + if (!qdev_realize(DEVICE(s), NULL, errp)) { + multi_serial_pci_exit(dev); + return; + } + s->irq = pci->irqs[i]; + pci->name[i] = g_strdup_printf("uart #%zu", i + 1); + memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s, + pci->name[i], 8); + memory_region_add_subregion(&pci->iobar, 8 * i, &s->io); + pci->ports++; + } +} + +static const VMStateDescription vmstate_pci_multi_serial = { + .name = "pci-serial-multi", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PCIMultiSerialState), + VMSTATE_STRUCT_ARRAY(state, PCIMultiSerialState, PCI_SERIAL_MAX_PORTS, + 0, vmstate_serial, SerialState), + VMSTATE_UINT32_ARRAY(level, PCIMultiSerialState, PCI_SERIAL_MAX_PORTS), + VMSTATE_END_OF_LIST() + } +}; + +static Property multi_2x_serial_pci_properties[] = { + DEFINE_PROP_CHR("chardev1", PCIMultiSerialState, state[0].chr), + DEFINE_PROP_CHR("chardev2", PCIMultiSerialState, state[1].chr), + DEFINE_PROP_UINT8("prog_if", PCIMultiSerialState, prog_if, 0x02), + DEFINE_PROP_END_OF_LIST(), +}; + +static Property multi_4x_serial_pci_properties[] = { + DEFINE_PROP_CHR("chardev1", PCIMultiSerialState, state[0].chr), + DEFINE_PROP_CHR("chardev2", PCIMultiSerialState, state[1].chr), + DEFINE_PROP_CHR("chardev3", PCIMultiSerialState, state[2].chr), + DEFINE_PROP_CHR("chardev4", PCIMultiSerialState, state[3].chr), + DEFINE_PROP_UINT8("prog_if", PCIMultiSerialState, prog_if, 0x02), + DEFINE_PROP_END_OF_LIST(), +}; + +static void multi_2x_serial_pci_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass); + pc->realize = multi_serial_pci_realize; + pc->exit = multi_serial_pci_exit; + pc->vendor_id = PCI_VENDOR_ID_REDHAT; + pc->device_id = PCI_DEVICE_ID_REDHAT_SERIAL2; + pc->revision = 1; + pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL; + dc->vmsd = &vmstate_pci_multi_serial; + device_class_set_props(dc, multi_2x_serial_pci_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static void multi_4x_serial_pci_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass); + pc->realize = multi_serial_pci_realize; + pc->exit = multi_serial_pci_exit; + pc->vendor_id = PCI_VENDOR_ID_REDHAT; + pc->device_id = PCI_DEVICE_ID_REDHAT_SERIAL4; + pc->revision = 1; + pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL; + dc->vmsd = &vmstate_pci_multi_serial; + device_class_set_props(dc, multi_4x_serial_pci_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static void multi_serial_init(Object *o) +{ + PCIDevice *dev = PCI_DEVICE(o); + PCIMultiSerialState *pms = DO_UPCAST(PCIMultiSerialState, dev, dev); + size_t i, nports = multi_serial_get_port_count(PCI_DEVICE_GET_CLASS(dev)); + + for (i = 0; i < nports; i++) { + object_initialize_child(o, "serial[*]", &pms->state[i], TYPE_SERIAL); + } +} + +static const TypeInfo multi_2x_serial_pci_info = { + .name = "pci-serial-2x", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIMultiSerialState), + .instance_init = multi_serial_init, + .class_init = multi_2x_serial_pci_class_initfn, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static const TypeInfo multi_4x_serial_pci_info = { + .name = "pci-serial-4x", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIMultiSerialState), + .instance_init = multi_serial_init, + .class_init = multi_4x_serial_pci_class_initfn, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void multi_serial_pci_register_types(void) +{ + type_register_static(&multi_2x_serial_pci_info); + type_register_static(&multi_4x_serial_pci_info); +} + +type_init(multi_serial_pci_register_types) diff --git a/hw/char/serial-pci.c b/hw/char/serial-pci.c new file mode 100644 index 000000000..93d6f9924 --- /dev/null +++ b/hw/char/serial-pci.c @@ -0,0 +1,130 @@ +/* + * QEMU 16550A UART emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2008 Citrix Systems, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* see docs/specs/pci-serial.txt */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/char/serial.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +struct PCISerialState { + PCIDevice dev; + SerialState state; + uint8_t prog_if; +}; + +#define TYPE_PCI_SERIAL "pci-serial" +OBJECT_DECLARE_SIMPLE_TYPE(PCISerialState, PCI_SERIAL) + +static void serial_pci_realize(PCIDevice *dev, Error **errp) +{ + PCISerialState *pci = DO_UPCAST(PCISerialState, dev, dev); + SerialState *s = &pci->state; + + if (!qdev_realize(DEVICE(s), NULL, errp)) { + return; + } + + pci->dev.config[PCI_CLASS_PROG] = pci->prog_if; + pci->dev.config[PCI_INTERRUPT_PIN] = 0x01; + s->irq = pci_allocate_irq(&pci->dev); + + memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s, "serial", 8); + pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io); +} + +static void serial_pci_exit(PCIDevice *dev) +{ + PCISerialState *pci = DO_UPCAST(PCISerialState, dev, dev); + SerialState *s = &pci->state; + + qdev_unrealize(DEVICE(s)); + qemu_free_irq(s->irq); +} + +static const VMStateDescription vmstate_pci_serial = { + .name = "pci-serial", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PCISerialState), + VMSTATE_STRUCT(state, PCISerialState, 0, vmstate_serial, SerialState), + VMSTATE_END_OF_LIST() + } +}; + +static Property serial_pci_properties[] = { + DEFINE_PROP_UINT8("prog_if", PCISerialState, prog_if, 0x02), + DEFINE_PROP_END_OF_LIST(), +}; + +static void serial_pci_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass); + pc->realize = serial_pci_realize; + pc->exit = serial_pci_exit; + pc->vendor_id = PCI_VENDOR_ID_REDHAT; + pc->device_id = PCI_DEVICE_ID_REDHAT_SERIAL; + pc->revision = 1; + pc->class_id = PCI_CLASS_COMMUNICATION_SERIAL; + dc->vmsd = &vmstate_pci_serial; + device_class_set_props(dc, serial_pci_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static void serial_pci_init(Object *o) +{ + PCISerialState *ps = PCI_SERIAL(o); + + object_initialize_child(o, "serial", &ps->state, TYPE_SERIAL); + + qdev_alias_all_properties(DEVICE(&ps->state), o); +} + +static const TypeInfo serial_pci_info = { + .name = TYPE_PCI_SERIAL, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCISerialState), + .instance_init = serial_pci_init, + .class_init = serial_pci_class_initfn, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void serial_pci_register_types(void) +{ + type_register_static(&serial_pci_info); +} + +type_init(serial_pci_register_types) diff --git a/hw/char/serial.c b/hw/char/serial.c new file mode 100644 index 000000000..7061aacbc --- /dev/null +++ b/hw/char/serial.c @@ -0,0 +1,1127 @@ +/* + * QEMU 16550A UART emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2008 Citrix Systems, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "hw/char/serial.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "chardev/char-serial.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "qemu/error-report.h" +#include "trace.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" + +#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ + +#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */ +#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */ +#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */ +#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */ + +#define UART_IIR_NO_INT 0x01 /* No interrupts pending */ +#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */ + +#define UART_IIR_MSI 0x00 /* Modem status interrupt */ +#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */ +#define UART_IIR_RDI 0x04 /* Receiver data interrupt */ +#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */ +#define UART_IIR_CTI 0x0C /* Character Timeout Indication */ + +#define UART_IIR_FENF 0x80 /* Fifo enabled, but not functionning */ +#define UART_IIR_FE 0xC0 /* Fifo enabled */ + +/* + * These are the definitions for the Modem Control Register + */ +#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */ +#define UART_MCR_OUT2 0x08 /* Out2 complement */ +#define UART_MCR_OUT1 0x04 /* Out1 complement */ +#define UART_MCR_RTS 0x02 /* RTS complement */ +#define UART_MCR_DTR 0x01 /* DTR complement */ + +/* + * These are the definitions for the Modem Status Register + */ +#define UART_MSR_DCD 0x80 /* Data Carrier Detect */ +#define UART_MSR_RI 0x40 /* Ring Indicator */ +#define UART_MSR_DSR 0x20 /* Data Set Ready */ +#define UART_MSR_CTS 0x10 /* Clear to Send */ +#define UART_MSR_DDCD 0x08 /* Delta DCD */ +#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */ +#define UART_MSR_DDSR 0x02 /* Delta DSR */ +#define UART_MSR_DCTS 0x01 /* Delta CTS */ +#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */ + +#define UART_LSR_TEMT 0x40 /* Transmitter empty */ +#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ +#define UART_LSR_BI 0x10 /* Break interrupt indicator */ +#define UART_LSR_FE 0x08 /* Frame error indicator */ +#define UART_LSR_PE 0x04 /* Parity error indicator */ +#define UART_LSR_OE 0x02 /* Overrun error indicator */ +#define UART_LSR_DR 0x01 /* Receiver data ready */ +#define UART_LSR_INT_ANY 0x1E /* Any of the lsr-interrupt-triggering status bits */ + +/* Interrupt trigger levels. The byte-counts are for 16550A - in newer UARTs the byte-count for each ITL is higher. */ + +#define UART_FCR_ITL_1 0x00 /* 1 byte ITL */ +#define UART_FCR_ITL_2 0x40 /* 4 bytes ITL */ +#define UART_FCR_ITL_3 0x80 /* 8 bytes ITL */ +#define UART_FCR_ITL_4 0xC0 /* 14 bytes ITL */ + +#define UART_FCR_DMS 0x08 /* DMA Mode Select */ +#define UART_FCR_XFR 0x04 /* XMIT Fifo Reset */ +#define UART_FCR_RFR 0x02 /* RCVR Fifo Reset */ +#define UART_FCR_FE 0x01 /* FIFO Enable */ + +#define MAX_XMIT_RETRY 4 + +static void serial_receive1(void *opaque, const uint8_t *buf, int size); +static void serial_xmit(SerialState *s); + +static inline void recv_fifo_put(SerialState *s, uint8_t chr) +{ + /* Receive overruns do not overwrite FIFO contents. */ + if (!fifo8_is_full(&s->recv_fifo)) { + fifo8_push(&s->recv_fifo, chr); + } else { + s->lsr |= UART_LSR_OE; + } +} + +static void serial_update_irq(SerialState *s) +{ + uint8_t tmp_iir = UART_IIR_NO_INT; + + if ((s->ier & UART_IER_RLSI) && (s->lsr & UART_LSR_INT_ANY)) { + tmp_iir = UART_IIR_RLSI; + } else if ((s->ier & UART_IER_RDI) && s->timeout_ipending) { + /* Note that(s->ier & UART_IER_RDI) can mask this interrupt, + * this is not in the specification but is observed on existing + * hardware. */ + tmp_iir = UART_IIR_CTI; + } else if ((s->ier & UART_IER_RDI) && (s->lsr & UART_LSR_DR) && + (!(s->fcr & UART_FCR_FE) || + s->recv_fifo.num >= s->recv_fifo_itl)) { + tmp_iir = UART_IIR_RDI; + } else if ((s->ier & UART_IER_THRI) && s->thr_ipending) { + tmp_iir = UART_IIR_THRI; + } else if ((s->ier & UART_IER_MSI) && (s->msr & UART_MSR_ANY_DELTA)) { + tmp_iir = UART_IIR_MSI; + } + + s->iir = tmp_iir | (s->iir & 0xF0); + + if (tmp_iir != UART_IIR_NO_INT) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static void serial_update_parameters(SerialState *s) +{ + float speed; + int parity, data_bits, stop_bits, frame_size; + QEMUSerialSetParams ssp; + + /* Start bit. */ + frame_size = 1; + if (s->lcr & 0x08) { + /* Parity bit. */ + frame_size++; + if (s->lcr & 0x10) + parity = 'E'; + else + parity = 'O'; + } else { + parity = 'N'; + } + if (s->lcr & 0x04) { + stop_bits = 2; + } else { + stop_bits = 1; + } + + data_bits = (s->lcr & 0x03) + 5; + frame_size += data_bits + stop_bits; + /* Zero divisor should give about 3500 baud */ + speed = (s->divider == 0) ? 3500 : (float) s->baudbase / s->divider; + ssp.speed = speed; + ssp.parity = parity; + ssp.data_bits = data_bits; + ssp.stop_bits = stop_bits; + s->char_transmit_time = (NANOSECONDS_PER_SECOND / speed) * frame_size; + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp); + trace_serial_update_parameters(speed, parity, data_bits, stop_bits); +} + +static void serial_update_msl(SerialState *s) +{ + uint8_t omsr; + int flags; + + timer_del(s->modem_status_poll); + + if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_GET_TIOCM, + &flags) == -ENOTSUP) { + s->poll_msl = -1; + return; + } + + omsr = s->msr; + + s->msr = (flags & CHR_TIOCM_CTS) ? s->msr | UART_MSR_CTS : s->msr & ~UART_MSR_CTS; + s->msr = (flags & CHR_TIOCM_DSR) ? s->msr | UART_MSR_DSR : s->msr & ~UART_MSR_DSR; + s->msr = (flags & CHR_TIOCM_CAR) ? s->msr | UART_MSR_DCD : s->msr & ~UART_MSR_DCD; + s->msr = (flags & CHR_TIOCM_RI) ? s->msr | UART_MSR_RI : s->msr & ~UART_MSR_RI; + + if (s->msr != omsr) { + /* Set delta bits */ + s->msr = s->msr | ((s->msr >> 4) ^ (omsr >> 4)); + /* UART_MSR_TERI only if change was from 1 -> 0 */ + if ((s->msr & UART_MSR_TERI) && !(omsr & UART_MSR_RI)) + s->msr &= ~UART_MSR_TERI; + serial_update_irq(s); + } + + /* The real 16550A apparently has a 250ns response latency to line status changes. + We'll be lazy and poll only every 10ms, and only poll it at all if MSI interrupts are turned on */ + + if (s->poll_msl) { + timer_mod(s->modem_status_poll, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND / 100); + } +} + +static gboolean serial_watch_cb(void *do_not_use, GIOCondition cond, + void *opaque) +{ + SerialState *s = opaque; + s->watch_tag = 0; + serial_xmit(s); + return FALSE; +} + +static void serial_xmit(SerialState *s) +{ + do { + assert(!(s->lsr & UART_LSR_TEMT)); + if (s->tsr_retry == 0) { + assert(!(s->lsr & UART_LSR_THRE)); + + if (s->fcr & UART_FCR_FE) { + assert(!fifo8_is_empty(&s->xmit_fifo)); + s->tsr = fifo8_pop(&s->xmit_fifo); + if (!s->xmit_fifo.num) { + s->lsr |= UART_LSR_THRE; + } + } else { + s->tsr = s->thr; + s->lsr |= UART_LSR_THRE; + } + if ((s->lsr & UART_LSR_THRE) && !s->thr_ipending) { + s->thr_ipending = 1; + serial_update_irq(s); + } + } + + if (s->mcr & UART_MCR_LOOP) { + /* in loopback mode, say that we just received a char */ + serial_receive1(s, &s->tsr, 1); + } else { + int rc = qemu_chr_fe_write(&s->chr, &s->tsr, 1); + + if ((rc == 0 || + (rc == -1 && errno == EAGAIN)) && + s->tsr_retry < MAX_XMIT_RETRY) { + assert(s->watch_tag == 0); + s->watch_tag = + qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP, + serial_watch_cb, s); + if (s->watch_tag > 0) { + s->tsr_retry++; + return; + } + } + } + s->tsr_retry = 0; + + /* Transmit another byte if it is already available. It is only + possible when FIFO is enabled and not empty. */ + } while (!(s->lsr & UART_LSR_THRE)); + + s->last_xmit_ts = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->lsr |= UART_LSR_TEMT; +} + +/* Setter for FCR. + is_load flag means, that value is set while loading VM state + and interrupt should not be invoked */ +static void serial_write_fcr(SerialState *s, uint8_t val) +{ + /* Set fcr - val only has the bits that are supposed to "stick" */ + s->fcr = val; + + if (val & UART_FCR_FE) { + s->iir |= UART_IIR_FE; + /* Set recv_fifo trigger Level */ + switch (val & 0xC0) { + case UART_FCR_ITL_1: + s->recv_fifo_itl = 1; + break; + case UART_FCR_ITL_2: + s->recv_fifo_itl = 4; + break; + case UART_FCR_ITL_3: + s->recv_fifo_itl = 8; + break; + case UART_FCR_ITL_4: + s->recv_fifo_itl = 14; + break; + } + } else { + s->iir &= ~UART_IIR_FE; + } +} + +static void serial_update_tiocm(SerialState *s) +{ + int flags; + + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_GET_TIOCM, &flags); + + flags &= ~(CHR_TIOCM_RTS | CHR_TIOCM_DTR); + + if (s->mcr & UART_MCR_RTS) { + flags |= CHR_TIOCM_RTS; + } + if (s->mcr & UART_MCR_DTR) { + flags |= CHR_TIOCM_DTR; + } + + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_TIOCM, &flags); +} + +static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + SerialState *s = opaque; + + assert(size == 1 && addr < 8); + trace_serial_write(addr, val); + switch(addr) { + default: + case 0: + if (s->lcr & UART_LCR_DLAB) { + s->divider = deposit32(s->divider, 8 * addr, 8, val); + serial_update_parameters(s); + } else { + s->thr = (uint8_t) val; + if(s->fcr & UART_FCR_FE) { + /* xmit overruns overwrite data, so make space if needed */ + if (fifo8_is_full(&s->xmit_fifo)) { + fifo8_pop(&s->xmit_fifo); + } + fifo8_push(&s->xmit_fifo, s->thr); + } + s->thr_ipending = 0; + s->lsr &= ~UART_LSR_THRE; + s->lsr &= ~UART_LSR_TEMT; + serial_update_irq(s); + if (s->tsr_retry == 0) { + serial_xmit(s); + } + } + break; + case 1: + if (s->lcr & UART_LCR_DLAB) { + s->divider = deposit32(s->divider, 8 * addr, 8, val); + serial_update_parameters(s); + } else { + uint8_t changed = (s->ier ^ val) & 0x0f; + s->ier = val & 0x0f; + /* If the backend device is a real serial port, turn polling of the modem + * status lines on physical port on or off depending on UART_IER_MSI state. + */ + if ((changed & UART_IER_MSI) && s->poll_msl >= 0) { + if (s->ier & UART_IER_MSI) { + s->poll_msl = 1; + serial_update_msl(s); + } else { + timer_del(s->modem_status_poll); + s->poll_msl = 0; + } + } + + /* Turning on the THRE interrupt on IER can trigger the interrupt + * if LSR.THRE=1, even if it had been masked before by reading IIR. + * This is not in the datasheet, but Windows relies on it. It is + * unclear if THRE has to be resampled every time THRI becomes + * 1, or only on the rising edge. Bochs does the latter, and Windows + * always toggles IER to all zeroes and back to all ones, so do the + * same. + * + * If IER.THRI is zero, thr_ipending is not used. Set it to zero + * so that the thr_ipending subsection is not migrated. + */ + if (changed & UART_IER_THRI) { + if ((s->ier & UART_IER_THRI) && (s->lsr & UART_LSR_THRE)) { + s->thr_ipending = 1; + } else { + s->thr_ipending = 0; + } + } + + if (changed) { + serial_update_irq(s); + } + } + break; + case 2: + /* Did the enable/disable flag change? If so, make sure FIFOs get flushed */ + if ((val ^ s->fcr) & UART_FCR_FE) { + val |= UART_FCR_XFR | UART_FCR_RFR; + } + + /* FIFO clear */ + + if (val & UART_FCR_RFR) { + s->lsr &= ~(UART_LSR_DR | UART_LSR_BI); + timer_del(s->fifo_timeout_timer); + s->timeout_ipending = 0; + fifo8_reset(&s->recv_fifo); + } + + if (val & UART_FCR_XFR) { + s->lsr |= UART_LSR_THRE; + s->thr_ipending = 1; + fifo8_reset(&s->xmit_fifo); + } + + serial_write_fcr(s, val & 0xC9); + serial_update_irq(s); + break; + case 3: + { + int break_enable; + s->lcr = val; + serial_update_parameters(s); + break_enable = (val >> 6) & 1; + if (break_enable != s->last_break_enable) { + s->last_break_enable = break_enable; + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_BREAK, + &break_enable); + } + } + break; + case 4: + { + int old_mcr = s->mcr; + s->mcr = val & 0x1f; + if (val & UART_MCR_LOOP) + break; + + if (s->poll_msl >= 0 && old_mcr != s->mcr) { + serial_update_tiocm(s); + /* Update the modem status after a one-character-send wait-time, since there may be a response + from the device/computer at the other end of the serial line */ + timer_mod(s->modem_status_poll, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time); + } + } + break; + case 5: + break; + case 6: + break; + case 7: + s->scr = val; + break; + } +} + +static uint64_t serial_ioport_read(void *opaque, hwaddr addr, unsigned size) +{ + SerialState *s = opaque; + uint32_t ret; + + assert(size == 1 && addr < 8); + switch(addr) { + default: + case 0: + if (s->lcr & UART_LCR_DLAB) { + ret = extract16(s->divider, 8 * addr, 8); + } else { + if(s->fcr & UART_FCR_FE) { + ret = fifo8_is_empty(&s->recv_fifo) ? + 0 : fifo8_pop(&s->recv_fifo); + if (s->recv_fifo.num == 0) { + s->lsr &= ~(UART_LSR_DR | UART_LSR_BI); + } else { + timer_mod(s->fifo_timeout_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time * 4); + } + s->timeout_ipending = 0; + } else { + ret = s->rbr; + s->lsr &= ~(UART_LSR_DR | UART_LSR_BI); + } + serial_update_irq(s); + if (!(s->mcr & UART_MCR_LOOP)) { + /* in loopback mode, don't receive any data */ + qemu_chr_fe_accept_input(&s->chr); + } + } + break; + case 1: + if (s->lcr & UART_LCR_DLAB) { + ret = extract16(s->divider, 8 * addr, 8); + } else { + ret = s->ier; + } + break; + case 2: + ret = s->iir; + if ((ret & UART_IIR_ID) == UART_IIR_THRI) { + s->thr_ipending = 0; + serial_update_irq(s); + } + break; + case 3: + ret = s->lcr; + break; + case 4: + ret = s->mcr; + break; + case 5: + ret = s->lsr; + /* Clear break and overrun interrupts */ + if (s->lsr & (UART_LSR_BI|UART_LSR_OE)) { + s->lsr &= ~(UART_LSR_BI|UART_LSR_OE); + serial_update_irq(s); + } + break; + case 6: + if (s->mcr & UART_MCR_LOOP) { + /* in loopback, the modem output pins are connected to the + inputs */ + ret = (s->mcr & 0x0c) << 4; + ret |= (s->mcr & 0x02) << 3; + ret |= (s->mcr & 0x01) << 5; + } else { + if (s->poll_msl >= 0) + serial_update_msl(s); + ret = s->msr; + /* Clear delta bits & msr int after read, if they were set */ + if (s->msr & UART_MSR_ANY_DELTA) { + s->msr &= 0xF0; + serial_update_irq(s); + } + } + break; + case 7: + ret = s->scr; + break; + } + trace_serial_read(addr, ret); + return ret; +} + +static int serial_can_receive(SerialState *s) +{ + if(s->fcr & UART_FCR_FE) { + if (s->recv_fifo.num < UART_FIFO_LENGTH) { + /* + * Advertise (fifo.itl - fifo.count) bytes when count < ITL, and 1 + * if above. If UART_FIFO_LENGTH - fifo.count is advertised the + * effect will be to almost always fill the fifo completely before + * the guest has a chance to respond, effectively overriding the ITL + * that the guest has set. + */ + return (s->recv_fifo.num <= s->recv_fifo_itl) ? + s->recv_fifo_itl - s->recv_fifo.num : 1; + } else { + return 0; + } + } else { + return !(s->lsr & UART_LSR_DR); + } +} + +static void serial_receive_break(SerialState *s) +{ + s->rbr = 0; + /* When the LSR_DR is set a null byte is pushed into the fifo */ + recv_fifo_put(s, '\0'); + s->lsr |= UART_LSR_BI | UART_LSR_DR; + serial_update_irq(s); +} + +/* There's data in recv_fifo and s->rbr has not been read for 4 char transmit times */ +static void fifo_timeout_int (void *opaque) { + SerialState *s = opaque; + if (s->recv_fifo.num) { + s->timeout_ipending = 1; + serial_update_irq(s); + } +} + +static int serial_can_receive1(void *opaque) +{ + SerialState *s = opaque; + return serial_can_receive(s); +} + +static void serial_receive1(void *opaque, const uint8_t *buf, int size) +{ + SerialState *s = opaque; + + if (s->wakeup) { + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); + } + if(s->fcr & UART_FCR_FE) { + int i; + for (i = 0; i < size; i++) { + recv_fifo_put(s, buf[i]); + } + s->lsr |= UART_LSR_DR; + /* call the timeout receive callback in 4 char transmit time */ + timer_mod(s->fifo_timeout_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time * 4); + } else { + if (s->lsr & UART_LSR_DR) + s->lsr |= UART_LSR_OE; + s->rbr = buf[0]; + s->lsr |= UART_LSR_DR; + } + serial_update_irq(s); +} + +static void serial_event(void *opaque, QEMUChrEvent event) +{ + SerialState *s = opaque; + if (event == CHR_EVENT_BREAK) + serial_receive_break(s); +} + +static int serial_pre_save(void *opaque) +{ + SerialState *s = opaque; + s->fcr_vmstate = s->fcr; + + return 0; +} + +static int serial_pre_load(void *opaque) +{ + SerialState *s = opaque; + s->thr_ipending = -1; + s->poll_msl = -1; + return 0; +} + +static int serial_post_load(void *opaque, int version_id) +{ + SerialState *s = opaque; + + if (version_id < 3) { + s->fcr_vmstate = 0; + } + if (s->thr_ipending == -1) { + s->thr_ipending = ((s->iir & UART_IIR_ID) == UART_IIR_THRI); + } + + if (s->tsr_retry > 0) { + /* tsr_retry > 0 implies LSR.TEMT = 0 (transmitter not empty). */ + if (s->lsr & UART_LSR_TEMT) { + error_report("inconsistent state in serial device " + "(tsr empty, tsr_retry=%d", s->tsr_retry); + return -1; + } + + if (s->tsr_retry > MAX_XMIT_RETRY) { + s->tsr_retry = MAX_XMIT_RETRY; + } + + assert(s->watch_tag == 0); + s->watch_tag = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP, + serial_watch_cb, s); + } else { + /* tsr_retry == 0 implies LSR.TEMT = 1 (transmitter empty). */ + if (!(s->lsr & UART_LSR_TEMT)) { + error_report("inconsistent state in serial device " + "(tsr not empty, tsr_retry=0"); + return -1; + } + } + + s->last_break_enable = (s->lcr >> 6) & 1; + /* Initialize fcr via setter to perform essential side-effects */ + serial_write_fcr(s, s->fcr_vmstate); + serial_update_parameters(s); + return 0; +} + +static bool serial_thr_ipending_needed(void *opaque) +{ + SerialState *s = opaque; + + if (s->ier & UART_IER_THRI) { + bool expected_value = ((s->iir & UART_IIR_ID) == UART_IIR_THRI); + return s->thr_ipending != expected_value; + } else { + /* LSR.THRE will be sampled again when the interrupt is + * enabled. thr_ipending is not used in this case, do + * not migrate it. + */ + return false; + } +} + +static const VMStateDescription vmstate_serial_thr_ipending = { + .name = "serial/thr_ipending", + .version_id = 1, + .minimum_version_id = 1, + .needed = serial_thr_ipending_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(thr_ipending, SerialState), + VMSTATE_END_OF_LIST() + } +}; + +static bool serial_tsr_needed(void *opaque) +{ + SerialState *s = (SerialState *)opaque; + return s->tsr_retry != 0; +} + +static const VMStateDescription vmstate_serial_tsr = { + .name = "serial/tsr", + .version_id = 1, + .minimum_version_id = 1, + .needed = serial_tsr_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(tsr_retry, SerialState), + VMSTATE_UINT8(thr, SerialState), + VMSTATE_UINT8(tsr, SerialState), + VMSTATE_END_OF_LIST() + } +}; + +static bool serial_recv_fifo_needed(void *opaque) +{ + SerialState *s = (SerialState *)opaque; + return !fifo8_is_empty(&s->recv_fifo); + +} + +static const VMStateDescription vmstate_serial_recv_fifo = { + .name = "serial/recv_fifo", + .version_id = 1, + .minimum_version_id = 1, + .needed = serial_recv_fifo_needed, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(recv_fifo, SerialState, 1, vmstate_fifo8, Fifo8), + VMSTATE_END_OF_LIST() + } +}; + +static bool serial_xmit_fifo_needed(void *opaque) +{ + SerialState *s = (SerialState *)opaque; + return !fifo8_is_empty(&s->xmit_fifo); +} + +static const VMStateDescription vmstate_serial_xmit_fifo = { + .name = "serial/xmit_fifo", + .version_id = 1, + .minimum_version_id = 1, + .needed = serial_xmit_fifo_needed, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(xmit_fifo, SerialState, 1, vmstate_fifo8, Fifo8), + VMSTATE_END_OF_LIST() + } +}; + +static bool serial_fifo_timeout_timer_needed(void *opaque) +{ + SerialState *s = (SerialState *)opaque; + return timer_pending(s->fifo_timeout_timer); +} + +static const VMStateDescription vmstate_serial_fifo_timeout_timer = { + .name = "serial/fifo_timeout_timer", + .version_id = 1, + .minimum_version_id = 1, + .needed = serial_fifo_timeout_timer_needed, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(fifo_timeout_timer, SerialState), + VMSTATE_END_OF_LIST() + } +}; + +static bool serial_timeout_ipending_needed(void *opaque) +{ + SerialState *s = (SerialState *)opaque; + return s->timeout_ipending != 0; +} + +static const VMStateDescription vmstate_serial_timeout_ipending = { + .name = "serial/timeout_ipending", + .version_id = 1, + .minimum_version_id = 1, + .needed = serial_timeout_ipending_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(timeout_ipending, SerialState), + VMSTATE_END_OF_LIST() + } +}; + +static bool serial_poll_needed(void *opaque) +{ + SerialState *s = (SerialState *)opaque; + return s->poll_msl >= 0; +} + +static const VMStateDescription vmstate_serial_poll = { + .name = "serial/poll", + .version_id = 1, + .needed = serial_poll_needed, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32(poll_msl, SerialState), + VMSTATE_TIMER_PTR(modem_status_poll, SerialState), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_serial = { + .name = "serial", + .version_id = 3, + .minimum_version_id = 2, + .pre_save = serial_pre_save, + .pre_load = serial_pre_load, + .post_load = serial_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT16_V(divider, SerialState, 2), + VMSTATE_UINT8(rbr, SerialState), + VMSTATE_UINT8(ier, SerialState), + VMSTATE_UINT8(iir, SerialState), + VMSTATE_UINT8(lcr, SerialState), + VMSTATE_UINT8(mcr, SerialState), + VMSTATE_UINT8(lsr, SerialState), + VMSTATE_UINT8(msr, SerialState), + VMSTATE_UINT8(scr, SerialState), + VMSTATE_UINT8_V(fcr_vmstate, SerialState, 3), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_serial_thr_ipending, + &vmstate_serial_tsr, + &vmstate_serial_recv_fifo, + &vmstate_serial_xmit_fifo, + &vmstate_serial_fifo_timeout_timer, + &vmstate_serial_timeout_ipending, + &vmstate_serial_poll, + NULL + } +}; + +static void serial_reset(void *opaque) +{ + SerialState *s = opaque; + + if (s->watch_tag > 0) { + g_source_remove(s->watch_tag); + s->watch_tag = 0; + } + + s->rbr = 0; + s->ier = 0; + s->iir = UART_IIR_NO_INT; + s->lcr = 0; + s->lsr = UART_LSR_TEMT | UART_LSR_THRE; + s->msr = UART_MSR_DCD | UART_MSR_DSR | UART_MSR_CTS; + /* Default to 9600 baud, 1 start bit, 8 data bits, 1 stop bit, no parity. */ + s->divider = 0x0C; + s->mcr = UART_MCR_OUT2; + s->scr = 0; + s->tsr_retry = 0; + s->char_transmit_time = (NANOSECONDS_PER_SECOND / 9600) * 10; + s->poll_msl = 0; + + s->timeout_ipending = 0; + timer_del(s->fifo_timeout_timer); + timer_del(s->modem_status_poll); + + fifo8_reset(&s->recv_fifo); + fifo8_reset(&s->xmit_fifo); + + s->last_xmit_ts = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + s->thr_ipending = 0; + s->last_break_enable = 0; + qemu_irq_lower(s->irq); + + serial_update_msl(s); + s->msr &= ~UART_MSR_ANY_DELTA; +} + +static int serial_be_change(void *opaque) +{ + SerialState *s = opaque; + + qemu_chr_fe_set_handlers(&s->chr, serial_can_receive1, serial_receive1, + serial_event, serial_be_change, s, NULL, true); + + serial_update_parameters(s); + + qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_BREAK, + &s->last_break_enable); + + s->poll_msl = (s->ier & UART_IER_MSI) ? 1 : 0; + serial_update_msl(s); + + if (s->poll_msl >= 0 && !(s->mcr & UART_MCR_LOOP)) { + serial_update_tiocm(s); + } + + if (s->watch_tag > 0) { + g_source_remove(s->watch_tag); + s->watch_tag = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP, + serial_watch_cb, s); + } + + return 0; +} + +static void serial_realize(DeviceState *dev, Error **errp) +{ + SerialState *s = SERIAL(dev); + + s->modem_status_poll = timer_new_ns(QEMU_CLOCK_VIRTUAL, (QEMUTimerCB *) serial_update_msl, s); + + s->fifo_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, (QEMUTimerCB *) fifo_timeout_int, s); + qemu_register_reset(serial_reset, s); + + qemu_chr_fe_set_handlers(&s->chr, serial_can_receive1, serial_receive1, + serial_event, serial_be_change, s, NULL, true); + fifo8_create(&s->recv_fifo, UART_FIFO_LENGTH); + fifo8_create(&s->xmit_fifo, UART_FIFO_LENGTH); + serial_reset(s); +} + +static void serial_unrealize(DeviceState *dev) +{ + SerialState *s = SERIAL(dev); + + qemu_chr_fe_deinit(&s->chr, false); + + timer_free(s->modem_status_poll); + + timer_free(s->fifo_timeout_timer); + + fifo8_destroy(&s->recv_fifo); + fifo8_destroy(&s->xmit_fifo); + + qemu_unregister_reset(serial_reset, s); +} + +/* Change the main reference oscillator frequency. */ +void serial_set_frequency(SerialState *s, uint32_t frequency) +{ + s->baudbase = frequency; + serial_update_parameters(s); +} + +const MemoryRegionOps serial_io_ops = { + .read = serial_ioport_read, + .write = serial_ioport_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static Property serial_properties[] = { + DEFINE_PROP_CHR("chardev", SerialState, chr), + DEFINE_PROP_UINT32("baudbase", SerialState, baudbase, 115200), + DEFINE_PROP_BOOL("wakeup", SerialState, wakeup, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void serial_class_init(ObjectClass *klass, void* data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + /* internal device for serialio/serialmm, not user-creatable */ + dc->user_creatable = false; + dc->realize = serial_realize; + dc->unrealize = serial_unrealize; + device_class_set_props(dc, serial_properties); +} + +static const TypeInfo serial_info = { + .name = TYPE_SERIAL, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SerialState), + .class_init = serial_class_init, +}; + +/* Memory mapped interface */ +static uint64_t serial_mm_read(void *opaque, hwaddr addr, + unsigned size) +{ + SerialMM *s = SERIAL_MM(opaque); + return serial_ioport_read(&s->serial, addr >> s->regshift, 1); +} + +static void serial_mm_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + SerialMM *s = SERIAL_MM(opaque); + value &= 255; + serial_ioport_write(&s->serial, addr >> s->regshift, value, 1); +} + +static const MemoryRegionOps serial_mm_ops[3] = { + [DEVICE_NATIVE_ENDIAN] = { + .read = serial_mm_read, + .write = serial_mm_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.max_access_size = 8, + .impl.max_access_size = 8, + }, + [DEVICE_LITTLE_ENDIAN] = { + .read = serial_mm_read, + .write = serial_mm_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.max_access_size = 8, + .impl.max_access_size = 8, + }, + [DEVICE_BIG_ENDIAN] = { + .read = serial_mm_read, + .write = serial_mm_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid.max_access_size = 8, + .impl.max_access_size = 8, + }, +}; + +static void serial_mm_realize(DeviceState *dev, Error **errp) +{ + SerialMM *smm = SERIAL_MM(dev); + SerialState *s = &smm->serial; + + if (!qdev_realize(DEVICE(s), NULL, errp)) { + return; + } + + memory_region_init_io(&s->io, OBJECT(dev), + &serial_mm_ops[smm->endianness], smm, "serial", + 8 << smm->regshift); + sysbus_init_mmio(SYS_BUS_DEVICE(smm), &s->io); + sysbus_init_irq(SYS_BUS_DEVICE(smm), &smm->serial.irq); +} + +static const VMStateDescription vmstate_serial_mm = { + .name = "serial", + .version_id = 3, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(serial, SerialMM, 0, vmstate_serial, SerialState), + VMSTATE_END_OF_LIST() + } +}; + +SerialMM *serial_mm_init(MemoryRegion *address_space, + hwaddr base, int regshift, + qemu_irq irq, int baudbase, + Chardev *chr, enum device_endian end) +{ + SerialMM *smm = SERIAL_MM(qdev_new(TYPE_SERIAL_MM)); + MemoryRegion *mr; + + qdev_prop_set_uint8(DEVICE(smm), "regshift", regshift); + qdev_prop_set_uint32(DEVICE(smm), "baudbase", baudbase); + qdev_prop_set_chr(DEVICE(smm), "chardev", chr); + qdev_set_legacy_instance_id(DEVICE(smm), base, 2); + qdev_prop_set_uint8(DEVICE(smm), "endianness", end); + sysbus_realize_and_unref(SYS_BUS_DEVICE(smm), &error_fatal); + + sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, irq); + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(smm), 0); + memory_region_add_subregion(address_space, base, mr); + + return smm; +} + +static void serial_mm_instance_init(Object *o) +{ + SerialMM *smm = SERIAL_MM(o); + + object_initialize_child(o, "serial", &smm->serial, TYPE_SERIAL); + + qdev_alias_all_properties(DEVICE(&smm->serial), o); +} + +static Property serial_mm_properties[] = { + /* + * Set the spacing between adjacent memory-mapped UART registers. + * Each register will be at (1 << regshift) bytes after the + * previous one. + */ + DEFINE_PROP_UINT8("regshift", SerialMM, regshift, 0), + DEFINE_PROP_UINT8("endianness", SerialMM, endianness, DEVICE_NATIVE_ENDIAN), + DEFINE_PROP_END_OF_LIST(), +}; + +static void serial_mm_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, serial_mm_properties); + dc->realize = serial_mm_realize; + dc->vmsd = &vmstate_serial_mm; +} + +static const TypeInfo serial_mm_info = { + .name = TYPE_SERIAL_MM, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = serial_mm_class_init, + .instance_init = serial_mm_instance_init, + .instance_size = sizeof(SerialMM), +}; + +static void serial_register_types(void) +{ + type_register_static(&serial_info); + type_register_static(&serial_mm_info); +} + +type_init(serial_register_types) diff --git a/hw/char/sh_serial.c b/hw/char/sh_serial.c new file mode 100644 index 000000000..355886ee3 --- /dev/null +++ b/hw/char/sh_serial.c @@ -0,0 +1,465 @@ +/* + * QEMU SCI/SCIF serial port emulation + * + * Copyright (c) 2007 Magnus Damm + * + * Based on serial.c - QEMU 16450 UART emulation + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/sh4/sh.h" +#include "chardev/char-fe.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "trace.h" + +#define SH_SERIAL_FLAG_TEND (1 << 0) +#define SH_SERIAL_FLAG_TDE (1 << 1) +#define SH_SERIAL_FLAG_RDF (1 << 2) +#define SH_SERIAL_FLAG_BRK (1 << 3) +#define SH_SERIAL_FLAG_DR (1 << 4) + +#define SH_RX_FIFO_LENGTH (16) + +OBJECT_DECLARE_SIMPLE_TYPE(SHSerialState, SH_SERIAL) + +struct SHSerialState { + SysBusDevice parent; + uint8_t smr; + uint8_t brr; + uint8_t scr; + uint8_t dr; /* ftdr / tdr */ + uint8_t sr; /* fsr / ssr */ + uint16_t fcr; + uint8_t sptr; + + uint8_t rx_fifo[SH_RX_FIFO_LENGTH]; /* frdr / rdr */ + uint8_t rx_cnt; + uint8_t rx_tail; + uint8_t rx_head; + + uint8_t feat; + int flags; + int rtrg; + + CharBackend chr; + QEMUTimer fifo_timeout_timer; + uint64_t etu; /* Elementary Time Unit (ns) */ + + qemu_irq eri; + qemu_irq rxi; + qemu_irq txi; + qemu_irq tei; + qemu_irq bri; +}; + +typedef struct {} SHSerialStateClass; + +OBJECT_DEFINE_TYPE(SHSerialState, sh_serial, SH_SERIAL, SYS_BUS_DEVICE) + +static void sh_serial_clear_fifo(SHSerialState *s) +{ + memset(s->rx_fifo, 0, SH_RX_FIFO_LENGTH); + s->rx_cnt = 0; + s->rx_head = 0; + s->rx_tail = 0; +} + +static void sh_serial_write(void *opaque, hwaddr offs, + uint64_t val, unsigned size) +{ + SHSerialState *s = opaque; + DeviceState *d = DEVICE(s); + unsigned char ch; + + trace_sh_serial_write(d->id, size, offs, val); + switch (offs) { + case 0x00: /* SMR */ + s->smr = val & ((s->feat & SH_SERIAL_FEAT_SCIF) ? 0x7b : 0xff); + return; + case 0x04: /* BRR */ + s->brr = val; + return; + case 0x08: /* SCR */ + /* TODO : For SH7751, SCIF mask should be 0xfb. */ + s->scr = val & ((s->feat & SH_SERIAL_FEAT_SCIF) ? 0xfa : 0xff); + if (!(val & (1 << 5))) { + s->flags |= SH_SERIAL_FLAG_TEND; + } + if ((s->feat & SH_SERIAL_FEAT_SCIF) && s->txi) { + qemu_set_irq(s->txi, val & (1 << 7)); + } + if (!(val & (1 << 6))) { + qemu_set_irq(s->rxi, 0); + } + return; + case 0x0c: /* FTDR / TDR */ + if (qemu_chr_fe_backend_connected(&s->chr)) { + ch = val; + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); + } + s->dr = val; + s->flags &= ~SH_SERIAL_FLAG_TDE; + return; +#if 0 + case 0x14: /* FRDR / RDR */ + ret = 0; + break; +#endif + } + if (s->feat & SH_SERIAL_FEAT_SCIF) { + switch (offs) { + case 0x10: /* FSR */ + if (!(val & (1 << 6))) { + s->flags &= ~SH_SERIAL_FLAG_TEND; + } + if (!(val & (1 << 5))) { + s->flags &= ~SH_SERIAL_FLAG_TDE; + } + if (!(val & (1 << 4))) { + s->flags &= ~SH_SERIAL_FLAG_BRK; + } + if (!(val & (1 << 1))) { + s->flags &= ~SH_SERIAL_FLAG_RDF; + } + if (!(val & (1 << 0))) { + s->flags &= ~SH_SERIAL_FLAG_DR; + } + + if (!(val & (1 << 1)) || !(val & (1 << 0))) { + if (s->rxi) { + qemu_set_irq(s->rxi, 0); + } + } + return; + case 0x18: /* FCR */ + s->fcr = val; + switch ((val >> 6) & 3) { + case 0: + s->rtrg = 1; + break; + case 1: + s->rtrg = 4; + break; + case 2: + s->rtrg = 8; + break; + case 3: + s->rtrg = 14; + break; + } + if (val & (1 << 1)) { + sh_serial_clear_fifo(s); + s->sr &= ~(1 << 1); + } + + return; + case 0x20: /* SPTR */ + s->sptr = val & 0xf3; + return; + case 0x24: /* LSR */ + return; + } + } else { + switch (offs) { +#if 0 + case 0x0c: + ret = s->dr; + break; + case 0x10: + ret = 0; + break; +#endif + case 0x1c: + s->sptr = val & 0x8f; + return; + } + } + qemu_log_mask(LOG_GUEST_ERROR, + "%s: unsupported write to 0x%02" HWADDR_PRIx "\n", + __func__, offs); +} + +static uint64_t sh_serial_read(void *opaque, hwaddr offs, + unsigned size) +{ + SHSerialState *s = opaque; + DeviceState *d = DEVICE(s); + uint32_t ret = UINT32_MAX; + +#if 0 + switch (offs) { + case 0x00: + ret = s->smr; + break; + case 0x04: + ret = s->brr; + break; + case 0x08: + ret = s->scr; + break; + case 0x14: + ret = 0; + break; + } +#endif + if (s->feat & SH_SERIAL_FEAT_SCIF) { + switch (offs) { + case 0x00: /* SMR */ + ret = s->smr; + break; + case 0x08: /* SCR */ + ret = s->scr; + break; + case 0x10: /* FSR */ + ret = 0; + if (s->flags & SH_SERIAL_FLAG_TEND) { + ret |= (1 << 6); + } + if (s->flags & SH_SERIAL_FLAG_TDE) { + ret |= (1 << 5); + } + if (s->flags & SH_SERIAL_FLAG_BRK) { + ret |= (1 << 4); + } + if (s->flags & SH_SERIAL_FLAG_RDF) { + ret |= (1 << 1); + } + if (s->flags & SH_SERIAL_FLAG_DR) { + ret |= (1 << 0); + } + + if (s->scr & (1 << 5)) { + s->flags |= SH_SERIAL_FLAG_TDE | SH_SERIAL_FLAG_TEND; + } + + break; + case 0x14: + if (s->rx_cnt > 0) { + ret = s->rx_fifo[s->rx_tail++]; + s->rx_cnt--; + if (s->rx_tail == SH_RX_FIFO_LENGTH) { + s->rx_tail = 0; + } + if (s->rx_cnt < s->rtrg) { + s->flags &= ~SH_SERIAL_FLAG_RDF; + } + } + break; + case 0x18: + ret = s->fcr; + break; + case 0x1c: + ret = s->rx_cnt; + break; + case 0x20: + ret = s->sptr; + break; + case 0x24: + ret = 0; + break; + } + } else { + switch (offs) { +#if 0 + case 0x0c: + ret = s->dr; + break; + case 0x10: + ret = 0; + break; + case 0x14: + ret = s->rx_fifo[0]; + break; +#endif + case 0x1c: + ret = s->sptr; + break; + } + } + trace_sh_serial_read(d->id, size, offs, ret); + + if (ret > UINT16_MAX) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: unsupported read from 0x%02" HWADDR_PRIx "\n", + __func__, offs); + ret = 0; + } + + return ret; +} + +static int sh_serial_can_receive(SHSerialState *s) +{ + return s->scr & (1 << 4); +} + +static void sh_serial_receive_break(SHSerialState *s) +{ + if (s->feat & SH_SERIAL_FEAT_SCIF) { + s->sr |= (1 << 4); + } +} + +static int sh_serial_can_receive1(void *opaque) +{ + SHSerialState *s = opaque; + return sh_serial_can_receive(s); +} + +static void sh_serial_timeout_int(void *opaque) +{ + SHSerialState *s = opaque; + + s->flags |= SH_SERIAL_FLAG_RDF; + if (s->scr & (1 << 6) && s->rxi) { + qemu_set_irq(s->rxi, 1); + } +} + +static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size) +{ + SHSerialState *s = opaque; + + if (s->feat & SH_SERIAL_FEAT_SCIF) { + int i; + for (i = 0; i < size; i++) { + if (s->rx_cnt < SH_RX_FIFO_LENGTH) { + s->rx_fifo[s->rx_head++] = buf[i]; + if (s->rx_head == SH_RX_FIFO_LENGTH) { + s->rx_head = 0; + } + s->rx_cnt++; + if (s->rx_cnt >= s->rtrg) { + s->flags |= SH_SERIAL_FLAG_RDF; + if (s->scr & (1 << 6) && s->rxi) { + timer_del(&s->fifo_timeout_timer); + qemu_set_irq(s->rxi, 1); + } + } else { + timer_mod(&s->fifo_timeout_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 15 * s->etu); + } + } + } + } else { + s->rx_fifo[0] = buf[0]; + } +} + +static void sh_serial_event(void *opaque, QEMUChrEvent event) +{ + SHSerialState *s = opaque; + if (event == CHR_EVENT_BREAK) { + sh_serial_receive_break(s); + } +} + +static const MemoryRegionOps sh_serial_ops = { + .read = sh_serial_read, + .write = sh_serial_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void sh_serial_reset(DeviceState *dev) +{ + SHSerialState *s = SH_SERIAL(dev); + + s->flags = SH_SERIAL_FLAG_TEND | SH_SERIAL_FLAG_TDE; + s->rtrg = 1; + + s->smr = 0; + s->brr = 0xff; + s->scr = 1 << 5; /* pretend that TX is enabled so early printk works */ + s->sptr = 0; + + if (s->feat & SH_SERIAL_FEAT_SCIF) { + s->fcr = 0; + } else { + s->dr = 0xff; + } + + sh_serial_clear_fifo(s); +} + +static void sh_serial_realize(DeviceState *d, Error **errp) +{ + SHSerialState *s = SH_SERIAL(d); + MemoryRegion *iomem = g_malloc(sizeof(*iomem)); + + assert(d->id); + memory_region_init_io(iomem, OBJECT(d), &sh_serial_ops, s, d->id, 0x28); + sysbus_init_mmio(SYS_BUS_DEVICE(d), iomem); + qdev_init_gpio_out_named(d, &s->eri, "eri", 1); + qdev_init_gpio_out_named(d, &s->rxi, "rxi", 1); + qdev_init_gpio_out_named(d, &s->txi, "txi", 1); + qdev_init_gpio_out_named(d, &s->tei, "tei", 1); + qdev_init_gpio_out_named(d, &s->bri, "bri", 1); + + if (qemu_chr_fe_backend_connected(&s->chr)) { + qemu_chr_fe_set_handlers(&s->chr, sh_serial_can_receive1, + sh_serial_receive1, + sh_serial_event, NULL, s, NULL, true); + } + + timer_init_ns(&s->fifo_timeout_timer, QEMU_CLOCK_VIRTUAL, + sh_serial_timeout_int, s); + s->etu = NANOSECONDS_PER_SECOND / 9600; +} + +static void sh_serial_finalize(Object *obj) +{ + SHSerialState *s = SH_SERIAL(obj); + + timer_del(&s->fifo_timeout_timer); +} + +static void sh_serial_init(Object *obj) +{ +} + +static Property sh_serial_properties[] = { + DEFINE_PROP_CHR("chardev", SHSerialState, chr), + DEFINE_PROP_UINT8("features", SHSerialState, feat, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void sh_serial_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, sh_serial_properties); + dc->realize = sh_serial_realize; + dc->reset = sh_serial_reset; + /* Reason: part of SuperH CPU/SoC, needs to be wired up */ + dc->user_creatable = false; +} diff --git a/hw/char/shakti_uart.c b/hw/char/shakti_uart.c new file mode 100644 index 000000000..98b142c7d --- /dev/null +++ b/hw/char/shakti_uart.c @@ -0,0 +1,186 @@ +/* + * SHAKTI UART + * + * Copyright (c) 2021 Vijai Kumar K + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/char/shakti_uart.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qemu/log.h" + +static uint64_t shakti_uart_read(void *opaque, hwaddr addr, unsigned size) +{ + ShaktiUartState *s = opaque; + + switch (addr) { + case SHAKTI_UART_BAUD: + return s->uart_baud; + case SHAKTI_UART_RX: + qemu_chr_fe_accept_input(&s->chr); + s->uart_status &= ~SHAKTI_UART_STATUS_RX_NOT_EMPTY; + return s->uart_rx; + case SHAKTI_UART_STATUS: + return s->uart_status; + case SHAKTI_UART_DELAY: + return s->uart_delay; + case SHAKTI_UART_CONTROL: + return s->uart_control; + case SHAKTI_UART_INT_EN: + return s->uart_interrupt; + case SHAKTI_UART_IQ_CYCLES: + return s->uart_iq_cycles; + case SHAKTI_UART_RX_THRES: + return s->uart_rx_threshold; + default: + /* Also handles TX REG which is write only */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + } + + return 0; +} + +static void shakti_uart_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + ShaktiUartState *s = opaque; + uint32_t value = data; + uint8_t ch; + + switch (addr) { + case SHAKTI_UART_BAUD: + s->uart_baud = value; + break; + case SHAKTI_UART_TX: + ch = value; + qemu_chr_fe_write_all(&s->chr, &ch, 1); + s->uart_status &= ~SHAKTI_UART_STATUS_TX_FULL; + break; + case SHAKTI_UART_STATUS: + s->uart_status = value; + break; + case SHAKTI_UART_DELAY: + s->uart_delay = value; + break; + case SHAKTI_UART_CONTROL: + s->uart_control = value; + break; + case SHAKTI_UART_INT_EN: + s->uart_interrupt = value; + break; + case SHAKTI_UART_IQ_CYCLES: + s->uart_iq_cycles = value; + break; + case SHAKTI_UART_RX_THRES: + s->uart_rx_threshold = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + } +} + +static const MemoryRegionOps shakti_uart_ops = { + .read = shakti_uart_read, + .write = shakti_uart_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = {.min_access_size = 1, .max_access_size = 4}, + .valid = {.min_access_size = 1, .max_access_size = 4}, +}; + +static void shakti_uart_reset(DeviceState *dev) +{ + ShaktiUartState *s = SHAKTI_UART(dev); + + s->uart_baud = SHAKTI_UART_BAUD_DEFAULT; + s->uart_tx = 0x0; + s->uart_rx = 0x0; + s->uart_status = 0x0000; + s->uart_delay = 0x0000; + s->uart_control = SHAKTI_UART_CONTROL_DEFAULT; + s->uart_interrupt = 0x0000; + s->uart_iq_cycles = 0x00; + s->uart_rx_threshold = 0x00; +} + +static int shakti_uart_can_receive(void *opaque) +{ + ShaktiUartState *s = opaque; + + return !(s->uart_status & SHAKTI_UART_STATUS_RX_NOT_EMPTY); +} + +static void shakti_uart_receive(void *opaque, const uint8_t *buf, int size) +{ + ShaktiUartState *s = opaque; + + s->uart_rx = *buf; + s->uart_status |= SHAKTI_UART_STATUS_RX_NOT_EMPTY; +} + +static void shakti_uart_realize(DeviceState *dev, Error **errp) +{ + ShaktiUartState *sus = SHAKTI_UART(dev); + qemu_chr_fe_set_handlers(&sus->chr, shakti_uart_can_receive, + shakti_uart_receive, NULL, NULL, sus, NULL, true); +} + +static void shakti_uart_instance_init(Object *obj) +{ + ShaktiUartState *sus = SHAKTI_UART(obj); + memory_region_init_io(&sus->mmio, + obj, + &shakti_uart_ops, + sus, + TYPE_SHAKTI_UART, + 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &sus->mmio); +} + +static Property shakti_uart_properties[] = { + DEFINE_PROP_CHR("chardev", ShaktiUartState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void shakti_uart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->reset = shakti_uart_reset; + dc->realize = shakti_uart_realize; + device_class_set_props(dc, shakti_uart_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo shakti_uart_info = { + .name = TYPE_SHAKTI_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ShaktiUartState), + .class_init = shakti_uart_class_init, + .instance_init = shakti_uart_instance_init, +}; + +static void shakti_uart_register_types(void) +{ + type_register_static(&shakti_uart_info); +} +type_init(shakti_uart_register_types) diff --git a/hw/char/sifive_uart.c b/hw/char/sifive_uart.c new file mode 100644 index 000000000..1c75f792b --- /dev/null +++ b/hw/char/sifive_uart.c @@ -0,0 +1,289 @@ +/* + * QEMU model of the UART on the SiFive E300 and U500 series SOCs. + * + * Copyright (c) 2016 Stefan O'Rear + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "migration/vmstate.h" +#include "chardev/char.h" +#include "chardev/char-fe.h" +#include "hw/irq.h" +#include "hw/char/sifive_uart.h" +#include "hw/qdev-properties-system.h" + +/* + * Not yet implemented: + * + * Transmit FIFO using "qemu/fifo8.h" + */ + +/* Returns the state of the IP (interrupt pending) register */ +static uint64_t sifive_uart_ip(SiFiveUARTState *s) +{ + uint64_t ret = 0; + + uint64_t txcnt = SIFIVE_UART_GET_TXCNT(s->txctrl); + uint64_t rxcnt = SIFIVE_UART_GET_RXCNT(s->rxctrl); + + if (txcnt != 0) { + ret |= SIFIVE_UART_IP_TXWM; + } + if (s->rx_fifo_len > rxcnt) { + ret |= SIFIVE_UART_IP_RXWM; + } + + return ret; +} + +static void sifive_uart_update_irq(SiFiveUARTState *s) +{ + int cond = 0; + if ((s->ie & SIFIVE_UART_IE_TXWM) || + ((s->ie & SIFIVE_UART_IE_RXWM) && s->rx_fifo_len)) { + cond = 1; + } + if (cond) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static uint64_t +sifive_uart_read(void *opaque, hwaddr addr, unsigned int size) +{ + SiFiveUARTState *s = opaque; + unsigned char r; + switch (addr) { + case SIFIVE_UART_RXFIFO: + if (s->rx_fifo_len) { + r = s->rx_fifo[0]; + memmove(s->rx_fifo, s->rx_fifo + 1, s->rx_fifo_len - 1); + s->rx_fifo_len--; + qemu_chr_fe_accept_input(&s->chr); + sifive_uart_update_irq(s); + return r; + } + return 0x80000000; + + case SIFIVE_UART_TXFIFO: + return 0; /* Should check tx fifo */ + case SIFIVE_UART_IE: + return s->ie; + case SIFIVE_UART_IP: + return sifive_uart_ip(s); + case SIFIVE_UART_TXCTRL: + return s->txctrl; + case SIFIVE_UART_RXCTRL: + return s->rxctrl; + case SIFIVE_UART_DIV: + return s->div; + } + + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad read: addr=0x%x\n", + __func__, (int)addr); + return 0; +} + +static void +sifive_uart_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + SiFiveUARTState *s = opaque; + uint32_t value = val64; + unsigned char ch = value; + + switch (addr) { + case SIFIVE_UART_TXFIFO: + qemu_chr_fe_write(&s->chr, &ch, 1); + sifive_uart_update_irq(s); + return; + case SIFIVE_UART_IE: + s->ie = val64; + sifive_uart_update_irq(s); + return; + case SIFIVE_UART_TXCTRL: + s->txctrl = val64; + return; + case SIFIVE_UART_RXCTRL: + s->rxctrl = val64; + return; + case SIFIVE_UART_DIV: + s->div = val64; + return; + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%x v=0x%x\n", + __func__, (int)addr, (int)value); +} + +static const MemoryRegionOps sifive_uart_ops = { + .read = sifive_uart_read, + .write = sifive_uart_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void sifive_uart_rx(void *opaque, const uint8_t *buf, int size) +{ + SiFiveUARTState *s = opaque; + + /* Got a byte. */ + if (s->rx_fifo_len >= sizeof(s->rx_fifo)) { + printf("WARNING: UART dropped char.\n"); + return; + } + s->rx_fifo[s->rx_fifo_len++] = *buf; + + sifive_uart_update_irq(s); +} + +static int sifive_uart_can_rx(void *opaque) +{ + SiFiveUARTState *s = opaque; + + return s->rx_fifo_len < sizeof(s->rx_fifo); +} + +static void sifive_uart_event(void *opaque, QEMUChrEvent event) +{ +} + +static int sifive_uart_be_change(void *opaque) +{ + SiFiveUARTState *s = opaque; + + qemu_chr_fe_set_handlers(&s->chr, sifive_uart_can_rx, sifive_uart_rx, + sifive_uart_event, sifive_uart_be_change, s, + NULL, true); + + return 0; +} + +static Property sifive_uart_properties[] = { + DEFINE_PROP_CHR("chardev", SiFiveUARTState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sifive_uart_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + SiFiveUARTState *s = SIFIVE_UART(obj); + + memory_region_init_io(&s->mmio, OBJECT(s), &sifive_uart_ops, s, + TYPE_SIFIVE_UART, SIFIVE_UART_MAX); + sysbus_init_mmio(sbd, &s->mmio); + sysbus_init_irq(sbd, &s->irq); +} + +static void sifive_uart_realize(DeviceState *dev, Error **errp) +{ + SiFiveUARTState *s = SIFIVE_UART(dev); + + qemu_chr_fe_set_handlers(&s->chr, sifive_uart_can_rx, sifive_uart_rx, + sifive_uart_event, sifive_uart_be_change, s, + NULL, true); + +} + +static void sifive_uart_reset_enter(Object *obj, ResetType type) +{ + SiFiveUARTState *s = SIFIVE_UART(obj); + s->ie = 0; + s->ip = 0; + s->txctrl = 0; + s->rxctrl = 0; + s->div = 0; + s->rx_fifo_len = 0; +} + +static void sifive_uart_reset_hold(Object *obj) +{ + SiFiveUARTState *s = SIFIVE_UART(obj); + qemu_irq_lower(s->irq); +} + +static const VMStateDescription vmstate_sifive_uart = { + .name = TYPE_SIFIVE_UART, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(rx_fifo, SiFiveUARTState, + SIFIVE_UART_RX_FIFO_SIZE), + VMSTATE_UINT8(rx_fifo_len, SiFiveUARTState), + VMSTATE_UINT32(ie, SiFiveUARTState), + VMSTATE_UINT32(ip, SiFiveUARTState), + VMSTATE_UINT32(txctrl, SiFiveUARTState), + VMSTATE_UINT32(rxctrl, SiFiveUARTState), + VMSTATE_UINT32(div, SiFiveUARTState), + VMSTATE_END_OF_LIST() + }, +}; + + +static void sifive_uart_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + ResettableClass *rc = RESETTABLE_CLASS(oc); + + dc->realize = sifive_uart_realize; + dc->vmsd = &vmstate_sifive_uart; + rc->phases.enter = sifive_uart_reset_enter; + rc->phases.hold = sifive_uart_reset_hold; + device_class_set_props(dc, sifive_uart_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo sifive_uart_info = { + .name = TYPE_SIFIVE_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SiFiveUARTState), + .instance_init = sifive_uart_init, + .class_init = sifive_uart_class_init, +}; + +static void sifive_uart_register_types(void) +{ + type_register_static(&sifive_uart_info); +} + +type_init(sifive_uart_register_types) + +/* + * Create UART device. + */ +SiFiveUARTState *sifive_uart_create(MemoryRegion *address_space, hwaddr base, + Chardev *chr, qemu_irq irq) +{ + DeviceState *dev; + SysBusDevice *s; + SiFiveUARTState *r; + + dev = qdev_new("riscv.sifive.uart"); + s = SYS_BUS_DEVICE(dev); + qdev_prop_set_chr(dev, "chardev", chr); + sysbus_realize_and_unref(s, &error_fatal); + memory_region_add_subregion(address_space, base, + sysbus_mmio_get_region(s, 0)); + sysbus_connect_irq(s, 0, irq); + + r = SIFIVE_UART(dev); + return r; +} diff --git a/hw/char/spapr_vty.c b/hw/char/spapr_vty.c new file mode 100644 index 000000000..91eae1a59 --- /dev/null +++ b/hw/char/spapr_vty.c @@ -0,0 +1,272 @@ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "chardev/char-fe.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qom/object.h" + +#define VTERM_BUFSIZE 16 + +struct SpaprVioVty { + SpaprVioDevice sdev; + CharBackend chardev; + uint32_t in, out; + uint8_t buf[VTERM_BUFSIZE]; +}; + +#define TYPE_VIO_SPAPR_VTY_DEVICE "spapr-vty" +OBJECT_DECLARE_SIMPLE_TYPE(SpaprVioVty, VIO_SPAPR_VTY_DEVICE) + +static int vty_can_receive(void *opaque) +{ + SpaprVioVty *dev = VIO_SPAPR_VTY_DEVICE(opaque); + + return VTERM_BUFSIZE - (dev->in - dev->out); +} + +static void vty_receive(void *opaque, const uint8_t *buf, int size) +{ + SpaprVioVty *dev = VIO_SPAPR_VTY_DEVICE(opaque); + int i; + + if ((dev->in == dev->out) && size) { + /* toggle line to simulate edge interrupt */ + spapr_vio_irq_pulse(&dev->sdev); + } + for (i = 0; i < size; i++) { + if (dev->in - dev->out >= VTERM_BUFSIZE) { + static bool reported; + if (!reported) { + error_report("VTY input buffer exhausted - characters dropped." + " (input size = %i)", size); + reported = true; + } + break; + } + dev->buf[dev->in++ % VTERM_BUFSIZE] = buf[i]; + } +} + +static int vty_getchars(SpaprVioDevice *sdev, uint8_t *buf, int max) +{ + SpaprVioVty *dev = VIO_SPAPR_VTY_DEVICE(sdev); + int n = 0; + + while ((n < max) && (dev->out != dev->in)) { + /* + * Long ago, PowerVM's vty implementation had a bug where it + * inserted a \0 after every \r going to the guest. Existing + * guests have a workaround for this which removes every \0 + * immediately following a \r. To avoid triggering this + * workaround, we stop before inserting a \0 if the preceding + * character in the output buffer is a \r. + */ + if (n > 0 && (buf[n - 1] == '\r') && + (dev->buf[dev->out % VTERM_BUFSIZE] == '\0')) { + break; + } + buf[n++] = dev->buf[dev->out++ % VTERM_BUFSIZE]; + } + + qemu_chr_fe_accept_input(&dev->chardev); + + return n; +} + +void vty_putchars(SpaprVioDevice *sdev, uint8_t *buf, int len) +{ + SpaprVioVty *dev = VIO_SPAPR_VTY_DEVICE(sdev); + + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&dev->chardev, buf, len); +} + +static void spapr_vty_realize(SpaprVioDevice *sdev, Error **errp) +{ + SpaprVioVty *dev = VIO_SPAPR_VTY_DEVICE(sdev); + + if (!qemu_chr_fe_backend_connected(&dev->chardev)) { + error_setg(errp, "chardev property not set"); + return; + } + + qemu_chr_fe_set_handlers(&dev->chardev, vty_can_receive, + vty_receive, NULL, NULL, dev, NULL, true); +} + +/* Forward declaration */ +static target_ulong h_put_term_char(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong reg = args[0]; + target_ulong len = args[1]; + target_ulong char0_7 = args[2]; + target_ulong char8_15 = args[3]; + SpaprVioDevice *sdev; + uint8_t buf[16]; + + sdev = vty_lookup(spapr, reg); + if (!sdev) { + return H_PARAMETER; + } + + if (len > 16) { + return H_PARAMETER; + } + + *((uint64_t *)buf) = cpu_to_be64(char0_7); + *((uint64_t *)buf + 1) = cpu_to_be64(char8_15); + + vty_putchars(sdev, buf, len); + + return H_SUCCESS; +} + +static target_ulong h_get_term_char(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong reg = args[0]; + target_ulong *len = args + 0; + target_ulong *char0_7 = args + 1; + target_ulong *char8_15 = args + 2; + SpaprVioDevice *sdev; + uint8_t buf[16]; + + sdev = vty_lookup(spapr, reg); + if (!sdev) { + return H_PARAMETER; + } + + *len = vty_getchars(sdev, buf, sizeof(buf)); + if (*len < 16) { + memset(buf + *len, 0, 16 - *len); + } + + *char0_7 = be64_to_cpu(*((uint64_t *)buf)); + *char8_15 = be64_to_cpu(*((uint64_t *)buf + 1)); + + return H_SUCCESS; +} + +void spapr_vty_create(SpaprVioBus *bus, Chardev *chardev) +{ + DeviceState *dev; + + dev = qdev_new("spapr-vty"); + qdev_prop_set_chr(dev, "chardev", chardev); + qdev_realize_and_unref(dev, &bus->bus, &error_fatal); +} + +static Property spapr_vty_properties[] = { + DEFINE_SPAPR_PROPERTIES(SpaprVioVty, sdev), + DEFINE_PROP_CHR("chardev", SpaprVioVty, chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_spapr_vty = { + .name = "spapr_vty", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_SPAPR_VIO(sdev, SpaprVioVty), + + VMSTATE_UINT32(in, SpaprVioVty), + VMSTATE_UINT32(out, SpaprVioVty), + VMSTATE_BUFFER(buf, SpaprVioVty), + VMSTATE_END_OF_LIST() + }, +}; + +static void spapr_vty_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); + + k->realize = spapr_vty_realize; + k->dt_name = "vty"; + k->dt_type = "serial"; + k->dt_compatible = "hvterm1"; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + device_class_set_props(dc, spapr_vty_properties); + dc->vmsd = &vmstate_spapr_vty; +} + +static const TypeInfo spapr_vty_info = { + .name = TYPE_VIO_SPAPR_VTY_DEVICE, + .parent = TYPE_VIO_SPAPR_DEVICE, + .instance_size = sizeof(SpaprVioVty), + .class_init = spapr_vty_class_init, +}; + +SpaprVioDevice *spapr_vty_get_default(SpaprVioBus *bus) +{ + SpaprVioDevice *sdev, *selected; + BusChild *kid; + + /* + * To avoid the console bouncing around we want one VTY to be + * the "default". We haven't really got anything to go on, so + * arbitrarily choose the one with the lowest reg value. + */ + + selected = NULL; + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + DeviceState *iter = kid->child; + + /* Only look at VTY devices */ + if (!object_dynamic_cast(OBJECT(iter), TYPE_VIO_SPAPR_VTY_DEVICE)) { + continue; + } + + sdev = VIO_SPAPR_DEVICE(iter); + + /* First VTY we've found, so it is selected for now */ + if (!selected) { + selected = sdev; + continue; + } + + /* Choose VTY with lowest reg value */ + if (sdev->reg < selected->reg) { + selected = sdev; + } + } + + return selected; +} + +SpaprVioDevice *vty_lookup(SpaprMachineState *spapr, target_ulong reg) +{ + SpaprVioDevice *sdev; + + sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + if (!sdev && reg == 0) { + /* Hack for kernel early debug, which always specifies reg==0. + * We search all VIO devices, and grab the vty with the lowest + * reg. This attempts to mimic existing PowerVM behaviour + * (early debug does work there, despite having no vty with + * reg==0. */ + return spapr_vty_get_default(spapr->vio_bus); + } + + if (!object_dynamic_cast(OBJECT(sdev), TYPE_VIO_SPAPR_VTY_DEVICE)) { + return NULL; + } + + return sdev; +} + +static void spapr_vty_register_types(void) +{ + spapr_register_hypercall(H_PUT_TERM_CHAR, h_put_term_char); + spapr_register_hypercall(H_GET_TERM_CHAR, h_get_term_char); + type_register_static(&spapr_vty_info); +} + +type_init(spapr_vty_register_types) diff --git a/hw/char/stm32f2xx_usart.c b/hw/char/stm32f2xx_usart.c new file mode 100644 index 000000000..8df083242 --- /dev/null +++ b/hw/char/stm32f2xx_usart.c @@ -0,0 +1,243 @@ +/* + * STM32F2XX USART + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/char/stm32f2xx_usart.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef STM_USART_ERR_DEBUG +#define STM_USART_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(lvl, fmt, args...) do { \ + if (STM_USART_ERR_DEBUG >= lvl) { \ + qemu_log("%s: " fmt, __func__, ## args); \ + } \ +} while (0) + +#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args) + +static int stm32f2xx_usart_can_receive(void *opaque) +{ + STM32F2XXUsartState *s = opaque; + + if (!(s->usart_sr & USART_SR_RXNE)) { + return 1; + } + + return 0; +} + +static void stm32f2xx_usart_receive(void *opaque, const uint8_t *buf, int size) +{ + STM32F2XXUsartState *s = opaque; + + if (!(s->usart_cr1 & USART_CR1_UE && s->usart_cr1 & USART_CR1_RE)) { + /* USART not enabled - drop the chars */ + DB_PRINT("Dropping the chars\n"); + return; + } + + s->usart_dr = *buf; + s->usart_sr |= USART_SR_RXNE; + + if (s->usart_cr1 & USART_CR1_RXNEIE) { + qemu_set_irq(s->irq, 1); + } + + DB_PRINT("Receiving: %c\n", s->usart_dr); +} + +static void stm32f2xx_usart_reset(DeviceState *dev) +{ + STM32F2XXUsartState *s = STM32F2XX_USART(dev); + + s->usart_sr = USART_SR_RESET; + s->usart_dr = 0x00000000; + s->usart_brr = 0x00000000; + s->usart_cr1 = 0x00000000; + s->usart_cr2 = 0x00000000; + s->usart_cr3 = 0x00000000; + s->usart_gtpr = 0x00000000; + + qemu_set_irq(s->irq, 0); +} + +static uint64_t stm32f2xx_usart_read(void *opaque, hwaddr addr, + unsigned int size) +{ + STM32F2XXUsartState *s = opaque; + uint64_t retvalue; + + DB_PRINT("Read 0x%"HWADDR_PRIx"\n", addr); + + switch (addr) { + case USART_SR: + retvalue = s->usart_sr; + qemu_chr_fe_accept_input(&s->chr); + return retvalue; + case USART_DR: + DB_PRINT("Value: 0x%" PRIx32 ", %c\n", s->usart_dr, (char) s->usart_dr); + s->usart_sr &= ~USART_SR_RXNE; + qemu_chr_fe_accept_input(&s->chr); + qemu_set_irq(s->irq, 0); + return s->usart_dr & 0x3FF; + case USART_BRR: + return s->usart_brr; + case USART_CR1: + return s->usart_cr1; + case USART_CR2: + return s->usart_cr2; + case USART_CR3: + return s->usart_cr3; + case USART_GTPR: + return s->usart_gtpr; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + return 0; + } + + return 0; +} + +static void stm32f2xx_usart_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + STM32F2XXUsartState *s = opaque; + uint32_t value = val64; + unsigned char ch; + + DB_PRINT("Write 0x%" PRIx32 ", 0x%"HWADDR_PRIx"\n", value, addr); + + switch (addr) { + case USART_SR: + if (value <= 0x3FF) { + /* I/O being synchronous, TXE is always set. In addition, it may + only be set by hardware, so keep it set here. */ + s->usart_sr = value | USART_SR_TXE; + } else { + s->usart_sr &= value; + } + if (!(s->usart_sr & USART_SR_RXNE)) { + qemu_set_irq(s->irq, 0); + } + return; + case USART_DR: + if (value < 0xF000) { + ch = value; + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); + /* XXX I/O are currently synchronous, making it impossible for + software to observe transient states where TXE or TC aren't + set. Unlike TXE however, which is read-only, software may + clear TC by writing 0 to the SR register, so set it again + on each write. */ + s->usart_sr |= USART_SR_TC; + } + return; + case USART_BRR: + s->usart_brr = value; + return; + case USART_CR1: + s->usart_cr1 = value; + if (s->usart_cr1 & USART_CR1_RXNEIE && + s->usart_sr & USART_SR_RXNE) { + qemu_set_irq(s->irq, 1); + } + return; + case USART_CR2: + s->usart_cr2 = value; + return; + case USART_CR3: + s->usart_cr3 = value; + return; + case USART_GTPR: + s->usart_gtpr = value; + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + } +} + +static const MemoryRegionOps stm32f2xx_usart_ops = { + .read = stm32f2xx_usart_read, + .write = stm32f2xx_usart_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static Property stm32f2xx_usart_properties[] = { + DEFINE_PROP_CHR("chardev", STM32F2XXUsartState, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void stm32f2xx_usart_init(Object *obj) +{ + STM32F2XXUsartState *s = STM32F2XX_USART(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, &stm32f2xx_usart_ops, s, + TYPE_STM32F2XX_USART, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void stm32f2xx_usart_realize(DeviceState *dev, Error **errp) +{ + STM32F2XXUsartState *s = STM32F2XX_USART(dev); + + qemu_chr_fe_set_handlers(&s->chr, stm32f2xx_usart_can_receive, + stm32f2xx_usart_receive, NULL, NULL, + s, NULL, true); +} + +static void stm32f2xx_usart_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = stm32f2xx_usart_reset; + device_class_set_props(dc, stm32f2xx_usart_properties); + dc->realize = stm32f2xx_usart_realize; +} + +static const TypeInfo stm32f2xx_usart_info = { + .name = TYPE_STM32F2XX_USART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32F2XXUsartState), + .instance_init = stm32f2xx_usart_init, + .class_init = stm32f2xx_usart_class_init, +}; + +static void stm32f2xx_usart_register_types(void) +{ + type_register_static(&stm32f2xx_usart_info); +} + +type_init(stm32f2xx_usart_register_types) diff --git a/hw/char/terminal3270.c b/hw/char/terminal3270.c new file mode 100644 index 000000000..82e85fac2 --- /dev/null +++ b/hw/char/terminal3270.c @@ -0,0 +1,321 @@ +/* + * Terminal 3270 implementation + * + * Copyright 2017 IBM Corp. + * + * Authors: Yang Chen + * Jing Liu + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "chardev/char-fe.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/s390x/3270-ccw.h" +#include "qom/object.h" + +/* Enough spaces for different window sizes. */ +#define INPUT_BUFFER_SIZE 1000 +/* + * 1 for header, 1024*2 for datastream, 2 for tail + * Reserve enough spaces for telnet IAC escape. + */ +#define OUTPUT_BUFFER_SIZE 2051 + +struct Terminal3270 { + EmulatedCcw3270Device cdev; + CharBackend chr; + uint8_t inv[INPUT_BUFFER_SIZE]; + uint8_t outv[OUTPUT_BUFFER_SIZE]; + int in_len; + bool handshake_done; + guint timer_tag; +}; +typedef struct Terminal3270 Terminal3270; + +#define TYPE_TERMINAL_3270 "x-terminal3270" +DECLARE_INSTANCE_CHECKER(Terminal3270, TERMINAL_3270, + TYPE_TERMINAL_3270) + +static int terminal_can_read(void *opaque) +{ + Terminal3270 *t = opaque; + + return INPUT_BUFFER_SIZE - t->in_len; +} + +static void terminal_timer_cancel(Terminal3270 *t) +{ + if (t->timer_tag) { + g_source_remove(t->timer_tag); + t->timer_tag = 0; + } +} + +/* + * Protocol handshake done, + * signal guest by an unsolicited DE irq. + */ +static void TN3270_handshake_done(Terminal3270 *t) +{ + CcwDevice *ccw_dev = CCW_DEVICE(t); + SubchDev *sch = ccw_dev->sch; + + t->handshake_done = true; + sch->curr_status.scsw.dstat = SCSW_DSTAT_DEVICE_END; + css_conditional_io_interrupt(sch); +} + +/* + * Called when the interval is timeout to detect + * if the client is still alive by Timing Mark. + */ +static gboolean send_timing_mark_cb(gpointer opaque) +{ + Terminal3270 *t = opaque; + const uint8_t timing[] = {0xff, 0xfd, 0x06}; + + qemu_chr_fe_write_all(&t->chr, timing, sizeof(timing)); + return true; +} + +/* + * Receive inbound data from socket. + * For data given to guest, drop the data boundary IAC, IAC_EOR. + * TODO: + * Using "Reset" key on x3270 may result multiple commands in one packet. + * This usually happens when the user meets a poor traffic of the network. + * As of now, for such case, we simply terminate the connection, + * and we should come back here later with a better solution. + */ +static void terminal_read(void *opaque, const uint8_t *buf, int size) +{ + Terminal3270 *t = opaque; + CcwDevice *ccw_dev = CCW_DEVICE(t); + SubchDev *sch = ccw_dev->sch; + int end; + + assert(size <= (INPUT_BUFFER_SIZE - t->in_len)); + + terminal_timer_cancel(t); + t->timer_tag = g_timeout_add_seconds(600, send_timing_mark_cb, t); + memcpy(&t->inv[t->in_len], buf, size); + t->in_len += size; + if (t->in_len < 2) { + return; + } + + if (!t->handshake_done) { + /* + * Receiving Terminal Type is the last step of handshake. + * The data format: IAC SB Terminal-Type IS IAC SE + * The code for Terminal-Type is 0x18, for IS is 0. + * Simply check the data format and mark handshake_done. + */ + if (t->in_len > 6 && t->inv[2] == 0x18 && t->inv[3] == 0x0 && + t->inv[t->in_len - 2] == IAC && t->inv[t->in_len - 1] == IAC_SE) { + TN3270_handshake_done(t); + t->in_len = 0; + } + return; + } + + for (end = 0; end < t->in_len - 1; end++) { + if (t->inv[end] == IAC && t->inv[end + 1] == IAC_EOR) { + break; + } + } + if (end == t->in_len - 2) { + /* Data is valid for consuming. */ + t->in_len -= 2; + sch->curr_status.scsw.dstat = SCSW_DSTAT_ATTENTION; + css_conditional_io_interrupt(sch); + } else if (end < t->in_len - 2) { + /* "Reset" key is used. */ + qemu_chr_fe_disconnect(&t->chr); + } else { + /* Gathering data. */ + return; + } +} + +static void chr_event(void *opaque, QEMUChrEvent event) +{ + Terminal3270 *t = opaque; + CcwDevice *ccw_dev = CCW_DEVICE(t); + SubchDev *sch = ccw_dev->sch; + + /* Ensure the initial status correct, always reset them. */ + t->in_len = 0; + t->handshake_done = false; + terminal_timer_cancel(t); + + switch (event) { + case CHR_EVENT_OPENED: + /* + * 3270 does handshake firstly by the negotiate options in + * char-socket.c. Once qemu receives the terminal-type of the + * client, mark handshake done and trigger everything rolling again. + */ + t->timer_tag = g_timeout_add_seconds(600, send_timing_mark_cb, t); + break; + case CHR_EVENT_CLOSED: + sch->curr_status.scsw.dstat = SCSW_DSTAT_DEVICE_END; + css_conditional_io_interrupt(sch); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static void terminal_init(EmulatedCcw3270Device *dev, Error **errp) +{ + Terminal3270 *t = TERMINAL_3270(dev); + static bool terminal_available; + + if (terminal_available) { + error_setg(errp, "Multiple 3270 terminals are not supported."); + return; + } + terminal_available = true; + qemu_chr_fe_set_handlers(&t->chr, terminal_can_read, + terminal_read, chr_event, NULL, t, NULL, true); +} + +static inline CcwDataStream *get_cds(Terminal3270 *t) +{ + return &(CCW_DEVICE(&t->cdev)->sch->cds); +} + +static int read_payload_3270(EmulatedCcw3270Device *dev) +{ + Terminal3270 *t = TERMINAL_3270(dev); + int len; + int ret; + + len = MIN(ccw_dstream_avail(get_cds(t)), t->in_len); + ret = ccw_dstream_write_buf(get_cds(t), t->inv, len); + if (ret < 0) { + return ret; + } + t->in_len -= len; + + return len; +} + +/* TN3270 uses binary transmission, which needs escape IAC to IAC IAC */ +static int insert_IAC_escape_char(uint8_t *outv, int out_len) +{ + int IAC_num = 0, new_out_len, i, j; + + for (i = 0; i < out_len; i++) { + if (outv[i] == IAC) { + IAC_num++; + } + } + if (IAC_num == 0) { + return out_len; + } + new_out_len = out_len + IAC_num; + for (i = out_len - 1, j = new_out_len - 1; j > i && i >= 0; i--, j--) { + outv[j] = outv[i]; + if (outv[i] == IAC) { + outv[--j] = IAC; + } + } + return new_out_len; +} + +/* + * Write 3270 outbound to socket. + * Return the count of 3270 data field if succeeded, zero if failed. + */ +static int write_payload_3270(EmulatedCcw3270Device *dev, uint8_t cmd) +{ + Terminal3270 *t = TERMINAL_3270(dev); + int retval = 0; + int count = ccw_dstream_avail(get_cds(t)); + int bound = (OUTPUT_BUFFER_SIZE - 3) / 2; + int len = MIN(count, bound); + int out_len = 0; + + if (!t->handshake_done) { + if (!(t->outv[0] == IAC && t->outv[1] != IAC)) { + /* + * Before having finished 3270 negotiation, + * sending outbound data except protocol options is prohibited. + */ + return 0; + } + } + if (!qemu_chr_fe_backend_connected(&t->chr)) { + /* We just say we consumed all data if there's no backend. */ + return count; + } + + t->outv[out_len++] = cmd; + do { + retval = ccw_dstream_read_buf(get_cds(t), &t->outv[out_len], len); + if (retval < 0) { + return retval; + } + count = ccw_dstream_avail(get_cds(t)); + out_len += len; + + out_len = insert_IAC_escape_char(t->outv, out_len); + if (!count) { + t->outv[out_len++] = IAC; + t->outv[out_len++] = IAC_EOR; + } + retval = qemu_chr_fe_write_all(&t->chr, t->outv, out_len); + len = MIN(count, bound); + out_len = 0; + } while (len && retval >= 0); + return (retval <= 0) ? 0 : get_cds(t)->count; +} + +static Property terminal_properties[] = { + DEFINE_PROP_CHR("chardev", Terminal3270, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription terminal3270_vmstate = { + .name = TYPE_TERMINAL_3270, + .unmigratable = 1, +}; + +static void terminal_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + EmulatedCcw3270Class *ck = EMULATED_CCW_3270_CLASS(klass); + + device_class_set_props(dc, terminal_properties); + dc->vmsd = &terminal3270_vmstate; + ck->init = terminal_init; + ck->read_payload_3270 = read_payload_3270; + ck->write_payload_3270 = write_payload_3270; +} + +static const TypeInfo ccw_terminal_info = { + .name = TYPE_TERMINAL_3270, + .parent = TYPE_EMULATED_CCW_3270, + .instance_size = sizeof(Terminal3270), + .class_init = terminal_class_init, + .class_size = sizeof(EmulatedCcw3270Class), +}; + +static void register_types(void) +{ + type_register_static(&ccw_terminal_info); +} + +type_init(register_types) diff --git a/hw/char/trace-events b/hw/char/trace-events new file mode 100644 index 000000000..2ecb36232 --- /dev/null +++ b/hw/char/trace-events @@ -0,0 +1,107 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# parallel.c +parallel_ioport_read(const char *desc, uint16_t addr, uint8_t value) "read [%s] addr 0x%02x val 0x%02x" +parallel_ioport_write(const char *desc, uint16_t addr, uint8_t value) "write [%s] addr 0x%02x val 0x%02x" + +# serial.c +serial_read(uint16_t addr, uint8_t value) "read addr 0x%02x val 0x%02x" +serial_write(uint16_t addr, uint8_t value) "write addr 0x%02x val 0x%02x" +serial_update_parameters(uint64_t baudrate, char parity, int data_bits, int stop_bits) "baudrate=%"PRIu64" parity='%c' data=%d stop=%d" + +# virtio-serial-bus.c +virtio_serial_send_control_event(unsigned int port, uint16_t event, uint16_t value) "port %u, event %u, value %u" +virtio_serial_throttle_port(unsigned int port, bool throttle) "port %u, throttle %d" +virtio_serial_handle_control_message(uint16_t event, uint16_t value) "event %u, value %u" +virtio_serial_handle_control_message_port(unsigned int port) "port %u" + +# virtio-console.c +virtio_console_flush_buf(unsigned int port, size_t len, ssize_t ret) "port %u, in_len %zu, out_len %zd" +virtio_console_chr_read(unsigned int port, int size) "port %u, size %d" +virtio_console_chr_event(unsigned int port, int event) "port %u, event %d" + +# goldfish_tty.c +goldfish_tty_read(void *dev, unsigned int addr, unsigned int size, uint64_t value) "tty: %p reg: 0x%02x size: %d value: 0x%"PRIx64 +goldfish_tty_write(void *dev, unsigned int addr, unsigned int size, uint64_t value) "tty: %p reg: 0x%02x size: %d value: 0x%"PRIx64 +goldfish_tty_can_receive(void *dev, unsigned int available) "tty: %p available: %u" +goldfish_tty_receive(void *dev, unsigned int size) "tty: %p size: %u" +goldfish_tty_reset(void *dev) "tty: %p" +goldfish_tty_realize(void *dev) "tty: %p" +goldfish_tty_unrealize(void *dev) "tty: %p" +goldfish_tty_instance_init(void *dev) "tty: %p" + +# grlib_apbuart.c +grlib_apbuart_event(int event) "event:%d" +grlib_apbuart_writel_unknown(uint64_t addr, uint32_t value) "addr 0x%"PRIx64" value 0x%x" +grlib_apbuart_readl_unknown(uint64_t addr) "addr 0x%"PRIx64 + +# escc.c +escc_hard_reset(void) "hard reset" +escc_soft_reset_chn(char channel) "soft reset channel %c" +escc_put_queue(char channel, int b) "channel %c put: 0x%02x" +escc_get_queue(char channel, int val) "channel %c get 0x%02x" +escc_update_irq(int irq) "IRQ = %d" +escc_update_parameters(char channel, int speed, int parity, int data_bits, int stop_bits) "channel %c: speed=%d parity=%c data=%d stop=%d" +escc_mem_writeb_ctrl(char channel, uint32_t reg, uint32_t val) "Write channel %c, reg[%d] = 0x%2.2x" +escc_mem_writeb_data(char channel, uint32_t val) "Write channel %c, ch %d" +escc_mem_readb_ctrl(char channel, uint32_t reg, uint8_t val) "Read channel %c, reg[%d] = 0x%2.2x" +escc_mem_readb_data(char channel, uint32_t ret) "Read channel %c, ch %d" +escc_serial_receive_byte(char channel, int ch) "channel %c put ch %d" +escc_sunkbd_event_in(int ch, const char *name, int down) "QKeyCode 0x%2.2x [%s], down %d" +escc_sunkbd_event_out(int ch) "Translated keycode 0x%2.2x" +escc_kbd_command(int val) "Command %d" +escc_sunmouse_event(int dx, int dy, int buttons_state) "dx=%d dy=%d buttons=0x%01x" + +# pl011.c +pl011_irq_state(int level) "irq state %d" +pl011_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" +pl011_read_fifo(int read_count) "FIFO read, read_count now %d" +pl011_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" +pl011_can_receive(uint32_t lcr, int read_count, int r) "LCR 0x%08x read_count %d returning %d" +pl011_put_fifo(uint32_t c, int read_count) "new char 0x%x read_count now %d" +pl011_put_fifo_full(void) "FIFO now full, RXFF set" +pl011_baudrate_change(unsigned int baudrate, uint64_t clock, uint32_t ibrd, uint32_t fbrd) "new baudrate %u (clk: %" PRIu64 "hz, ibrd: %" PRIu32 ", fbrd: %" PRIu32 ")" + +# cmsdk-apb-uart.c +cmsdk_apb_uart_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB UART read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +cmsdk_apb_uart_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB UART write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +cmsdk_apb_uart_reset(void) "CMSDK APB UART: reset" +cmsdk_apb_uart_receive(uint8_t c) "CMSDK APB UART: got character 0x%x from backend" +cmsdk_apb_uart_tx_pending(void) "CMSDK APB UART: character send to backend pending" +cmsdk_apb_uart_tx(uint8_t c) "CMSDK APB UART: character 0x%x sent to backend" +cmsdk_apb_uart_set_params(int speed) "CMSDK APB UART: params set to %d 8N1" + +# nrf51_uart.c +nrf51_uart_read(uint64_t addr, uint64_t r, unsigned int size) "addr 0x%" PRIx64 " value 0x%" PRIx64 " size %u" +nrf51_uart_write(uint64_t addr, uint64_t value, unsigned int size) "addr 0x%" PRIx64 " value 0x%" PRIx64 " size %u" + +# shakti_uart.c +shakti_uart_read(uint64_t addr, uint16_t r, unsigned int size) "addr 0x%" PRIx64 " value 0x%" PRIx16 " size %u" +shakti_uart_write(uint64_t addr, uint64_t value, unsigned int size) "addr 0x%" PRIx64 " value 0x%" PRIx64 " size %u" + +# exynos4210_uart.c +exynos_uart_dmabusy(uint32_t channel) "UART%d: DMA busy (Rx buffer empty)" +exynos_uart_dmaready(uint32_t channel) "UART%d: DMA ready" +exynos_uart_irq_raised(uint32_t channel, uint32_t reg) "UART%d: IRQ raised: 0x%08"PRIx32 +exynos_uart_irq_lowered(uint32_t channel) "UART%d: IRQ lowered" +exynos_uart_update_params(uint32_t channel, int speed, uint8_t parity, int data, int stop, uint64_t wordtime) "UART%d: speed: %d, parity: %c, data bits: %d, stop bits: %d wordtime: %"PRId64"ns" +exynos_uart_write(uint32_t channel, uint32_t offset, const char *name, uint64_t val) "UART%d: <0x%04x> %s <- 0x%" PRIx64 +exynos_uart_read(uint32_t channel, uint32_t offset, const char *name, uint64_t val) "UART%d: <0x%04x> %s -> 0x%" PRIx64 +exynos_uart_rx_fifo_reset(uint32_t channel) "UART%d: Rx FIFO Reset" +exynos_uart_tx_fifo_reset(uint32_t channel) "UART%d: Tx FIFO Reset" +exynos_uart_tx(uint32_t channel, uint8_t ch) "UART%d: Tx 0x%02"PRIx32 +exynos_uart_intclr(uint32_t channel, uint32_t reg) "UART%d: interrupts cleared: 0x%08"PRIx32 +exynos_uart_ro_write(uint32_t channel, const char *name, uint32_t reg) "UART%d: Trying to write into RO register: %s [0x%04"PRIx32"]" +exynos_uart_rx(uint32_t channel, uint8_t ch) "UART%d: Rx 0x%02"PRIx32 +exynos_uart_rx_error(uint32_t channel) "UART%d: Rx error" +exynos_uart_wo_read(uint32_t channel, const char *name, uint32_t reg) "UART%d: Trying to read from WO register: %s [0x%04"PRIx32"]" +exynos_uart_rxsize(uint32_t channel, uint32_t size) "UART%d: Rx FIFO size: %d" +exynos_uart_channel_error(uint32_t channel) "Wrong UART channel number: %d" +exynos_uart_rx_timeout(uint32_t channel, uint32_t stat, uint32_t intsp) "UART%d: Rx timeout stat=0x%x intsp=0x%x" + +# cadence_uart.c +cadence_uart_baudrate(unsigned baudrate) "baudrate %u" + +# sh_serial.c +sh_serial_read(char *id, unsigned size, uint64_t offs, uint64_t val) " %s size %d offs 0x%02" PRIx64 " -> 0x%02" PRIx64 +sh_serial_write(char *id, unsigned size, uint64_t offs, uint64_t val) "%s size %d offs 0x%02" PRIx64 " <- 0x%02" PRIx64 diff --git a/hw/char/trace.h b/hw/char/trace.h new file mode 100644 index 000000000..c2df66af2 --- /dev/null +++ b/hw/char/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_char.h" diff --git a/hw/char/virtio-console.c b/hw/char/virtio-console.c new file mode 100644 index 000000000..dd5a02e33 --- /dev/null +++ b/hw/char/virtio-console.c @@ -0,0 +1,309 @@ +/* + * Virtio Console and Generic Serial Port Devices + * + * Copyright Red Hat, Inc. 2009, 2010 + * + * Authors: + * Amit Shah + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "chardev/char-fe.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "trace.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/virtio/virtio-serial.h" +#include "qapi/error.h" +#include "qapi/qapi-events-char.h" +#include "qom/object.h" + +#define TYPE_VIRTIO_CONSOLE_SERIAL_PORT "virtserialport" +typedef struct VirtConsole VirtConsole; +DECLARE_INSTANCE_CHECKER(VirtConsole, VIRTIO_CONSOLE, + TYPE_VIRTIO_CONSOLE_SERIAL_PORT) + +struct VirtConsole { + VirtIOSerialPort parent_obj; + + CharBackend chr; + guint watch; +}; + +/* + * Callback function that's called from chardevs when backend becomes + * writable. + */ +static gboolean chr_write_unblocked(void *do_not_use, GIOCondition cond, + void *opaque) +{ + VirtConsole *vcon = opaque; + + vcon->watch = 0; + virtio_serial_throttle_port(VIRTIO_SERIAL_PORT(vcon), false); + return FALSE; +} + +/* Callback function that's called when the guest sends us data */ +static ssize_t flush_buf(VirtIOSerialPort *port, + const uint8_t *buf, ssize_t len) +{ + VirtConsole *vcon = VIRTIO_CONSOLE(port); + ssize_t ret; + + if (!qemu_chr_fe_backend_connected(&vcon->chr)) { + /* If there's no backend, we can just say we consumed all data. */ + return len; + } + + ret = qemu_chr_fe_write(&vcon->chr, buf, len); + trace_virtio_console_flush_buf(port->id, len, ret); + + if (ret < len) { + VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(port); + + /* + * Ideally we'd get a better error code than just -1, but + * that's what the chardev interface gives us right now. If + * we had a finer-grained message, like -EPIPE, we could close + * this connection. + */ + if (ret < 0) + ret = 0; + + /* XXX we should be queuing data to send later for the + * console devices too rather than silently dropping + * console data on EAGAIN. The Linux virtio-console + * hvc driver though does sends with spinlocks held, + * so if we enable throttling that'll stall the entire + * guest kernel, not merely the process writing to the + * console. + * + * While we could queue data for later write without + * enabling throttling, this would result in the guest + * being able to trigger arbitrary memory usage in QEMU + * buffering data for later writes. + * + * So fixing this problem likely requires fixing the + * Linux virtio-console hvc driver to not hold spinlocks + * while writing, and instead merely block the process + * that's writing. QEMU would then need some way to detect + * if the guest had the fixed driver too, before we can + * use throttling on host side. + */ + if (!k->is_console) { + virtio_serial_throttle_port(port, true); + if (!vcon->watch) { + vcon->watch = qemu_chr_fe_add_watch(&vcon->chr, + G_IO_OUT|G_IO_HUP, + chr_write_unblocked, vcon); + } + } + } + return ret; +} + +/* Callback function that's called when the guest opens/closes the port */ +static void set_guest_connected(VirtIOSerialPort *port, int guest_connected) +{ + VirtConsole *vcon = VIRTIO_CONSOLE(port); + DeviceState *dev = DEVICE(port); + VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(port); + + if (!k->is_console) { + qemu_chr_fe_set_open(&vcon->chr, guest_connected); + } + + if (dev->id) { + qapi_event_send_vserport_change(dev->id, guest_connected); + } +} + +static void guest_writable(VirtIOSerialPort *port) +{ + VirtConsole *vcon = VIRTIO_CONSOLE(port); + + qemu_chr_fe_accept_input(&vcon->chr); +} + +/* Readiness of the guest to accept data on a port */ +static int chr_can_read(void *opaque) +{ + VirtConsole *vcon = opaque; + + return virtio_serial_guest_ready(VIRTIO_SERIAL_PORT(vcon)); +} + +/* Send data from a char device over to the guest */ +static void chr_read(void *opaque, const uint8_t *buf, int size) +{ + VirtConsole *vcon = opaque; + VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(vcon); + + trace_virtio_console_chr_read(port->id, size); + virtio_serial_write(port, buf, size); +} + +static void chr_event(void *opaque, QEMUChrEvent event) +{ + VirtConsole *vcon = opaque; + VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(vcon); + + trace_virtio_console_chr_event(port->id, event); + switch (event) { + case CHR_EVENT_OPENED: + virtio_serial_open(port); + break; + case CHR_EVENT_CLOSED: + if (vcon->watch) { + g_source_remove(vcon->watch); + vcon->watch = 0; + } + virtio_serial_close(port); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static int chr_be_change(void *opaque) +{ + VirtConsole *vcon = opaque; + VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(vcon); + VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(port); + + if (k->is_console) { + qemu_chr_fe_set_handlers(&vcon->chr, chr_can_read, chr_read, + NULL, chr_be_change, vcon, NULL, true); + } else { + qemu_chr_fe_set_handlers(&vcon->chr, chr_can_read, chr_read, + chr_event, chr_be_change, vcon, NULL, false); + } + + if (vcon->watch) { + g_source_remove(vcon->watch); + vcon->watch = qemu_chr_fe_add_watch(&vcon->chr, + G_IO_OUT | G_IO_HUP, + chr_write_unblocked, vcon); + } + + return 0; +} + +static void virtconsole_enable_backend(VirtIOSerialPort *port, bool enable) +{ + VirtConsole *vcon = VIRTIO_CONSOLE(port); + + if (!qemu_chr_fe_backend_connected(&vcon->chr)) { + return; + } + + if (enable) { + VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(port); + + qemu_chr_fe_set_handlers(&vcon->chr, chr_can_read, chr_read, + k->is_console ? NULL : chr_event, + chr_be_change, vcon, NULL, false); + } else { + qemu_chr_fe_set_handlers(&vcon->chr, NULL, NULL, NULL, + NULL, NULL, NULL, false); + } +} + +static void virtconsole_realize(DeviceState *dev, Error **errp) +{ + VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev); + VirtConsole *vcon = VIRTIO_CONSOLE(dev); + VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(dev); + + if (port->id == 0 && !k->is_console) { + error_setg(errp, "Port number 0 on virtio-serial devices reserved " + "for virtconsole devices for backward compatibility."); + return; + } + + if (qemu_chr_fe_backend_connected(&vcon->chr)) { + /* + * For consoles we don't block guest data transfer just + * because nothing is connected - we'll just let it go + * whetherever the chardev wants - /dev/null probably. + * + * For serial ports we need 100% reliable data transfer + * so we use the opened/closed signals from chardev to + * trigger open/close of the device + */ + if (k->is_console) { + qemu_chr_fe_set_handlers(&vcon->chr, chr_can_read, chr_read, + NULL, chr_be_change, + vcon, NULL, true); + virtio_serial_open(port); + } else { + qemu_chr_fe_set_handlers(&vcon->chr, chr_can_read, chr_read, + chr_event, chr_be_change, + vcon, NULL, false); + } + } +} + +static void virtconsole_unrealize(DeviceState *dev) +{ + VirtConsole *vcon = VIRTIO_CONSOLE(dev); + + if (vcon->watch) { + g_source_remove(vcon->watch); + } +} + +static void virtconsole_class_init(ObjectClass *klass, void *data) +{ + VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_CLASS(klass); + + k->is_console = true; +} + +static const TypeInfo virtconsole_info = { + .name = "virtconsole", + .parent = TYPE_VIRTIO_CONSOLE_SERIAL_PORT, + .class_init = virtconsole_class_init, +}; + +static Property virtserialport_properties[] = { + DEFINE_PROP_CHR("chardev", VirtConsole, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtserialport_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_CLASS(klass); + + k->realize = virtconsole_realize; + k->unrealize = virtconsole_unrealize; + k->have_data = flush_buf; + k->set_guest_connected = set_guest_connected; + k->enable_backend = virtconsole_enable_backend; + k->guest_writable = guest_writable; + device_class_set_props(dc, virtserialport_properties); +} + +static const TypeInfo virtserialport_info = { + .name = TYPE_VIRTIO_CONSOLE_SERIAL_PORT, + .parent = TYPE_VIRTIO_SERIAL_PORT, + .instance_size = sizeof(VirtConsole), + .class_init = virtserialport_class_init, +}; + +static void virtconsole_register_types(void) +{ + type_register_static(&virtserialport_info); + type_register_static(&virtconsole_info); +} + +type_init(virtconsole_register_types) diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c new file mode 100644 index 000000000..f01ec2137 --- /dev/null +++ b/hw/char/virtio-serial-bus.c @@ -0,0 +1,1209 @@ +/* + * A bus for connecting virtio serial and console ports + * + * Copyright (C) 2009, 2010 Red Hat, Inc. + * + * Author(s): + * Amit Shah + * + * Some earlier parts are: + * Copyright IBM, Corp. 2008 + * authored by + * Christian Ehrhardt + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/iov.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "migration/qemu-file-types.h" +#include "monitor/monitor.h" +#include "qemu/error-report.h" +#include "qemu/queue.h" +#include "hw/qdev-properties.h" +#include "trace.h" +#include "hw/virtio/virtio-serial.h" +#include "hw/virtio/virtio-access.h" + +static struct VirtIOSerialDevices { + QLIST_HEAD(, VirtIOSerial) devices; +} vserdevices; + +static VirtIOSerialPort *find_port_by_id(VirtIOSerial *vser, uint32_t id) +{ + VirtIOSerialPort *port; + + if (id == VIRTIO_CONSOLE_BAD_ID) { + return NULL; + } + + QTAILQ_FOREACH(port, &vser->ports, next) { + if (port->id == id) + return port; + } + return NULL; +} + +static VirtIOSerialPort *find_port_by_vq(VirtIOSerial *vser, VirtQueue *vq) +{ + VirtIOSerialPort *port; + + QTAILQ_FOREACH(port, &vser->ports, next) { + if (port->ivq == vq || port->ovq == vq) + return port; + } + return NULL; +} + +static VirtIOSerialPort *find_port_by_name(char *name) +{ + VirtIOSerial *vser; + + QLIST_FOREACH(vser, &vserdevices.devices, next) { + VirtIOSerialPort *port; + + QTAILQ_FOREACH(port, &vser->ports, next) { + if (port->name && !strcmp(port->name, name)) { + return port; + } + } + } + return NULL; +} + +static VirtIOSerialPort *find_first_connected_console(VirtIOSerial *vser) +{ + VirtIOSerialPort *port; + + QTAILQ_FOREACH(port, &vser->ports, next) { + VirtIOSerialPortClass const *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); + if (vsc->is_console && port->host_connected) { + return port; + } + } + return NULL; +} + +static bool use_multiport(VirtIOSerial *vser) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vser); + return virtio_vdev_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT); +} + +static size_t write_to_port(VirtIOSerialPort *port, + const uint8_t *buf, size_t size) +{ + VirtQueueElement *elem; + VirtQueue *vq; + size_t offset; + + vq = port->ivq; + if (!virtio_queue_ready(vq)) { + return 0; + } + + offset = 0; + while (offset < size) { + size_t len; + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + + len = iov_from_buf(elem->in_sg, elem->in_num, 0, + buf + offset, size - offset); + offset += len; + + virtqueue_push(vq, elem, len); + g_free(elem); + } + + virtio_notify(VIRTIO_DEVICE(port->vser), vq); + return offset; +} + +static void discard_vq_data(VirtQueue *vq, VirtIODevice *vdev) +{ + VirtQueueElement *elem; + + if (!virtio_queue_ready(vq)) { + return; + } + for (;;) { + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + virtqueue_push(vq, elem, 0); + g_free(elem); + } + virtio_notify(vdev, vq); +} + +static void discard_throttle_data(VirtIOSerialPort *port) +{ + if (port->elem) { + virtqueue_detach_element(port->ovq, port->elem, 0); + g_free(port->elem); + port->elem = NULL; + } +} + +static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq, + VirtIODevice *vdev) +{ + VirtIOSerialPortClass *vsc; + + assert(port); + assert(virtio_queue_ready(vq)); + + vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); + + while (!port->throttled) { + unsigned int i; + + /* Pop an elem only if we haven't left off a previous one mid-way */ + if (!port->elem) { + port->elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!port->elem) { + break; + } + port->iov_idx = 0; + port->iov_offset = 0; + } + + for (i = port->iov_idx; i < port->elem->out_num; i++) { + size_t buf_size; + ssize_t ret; + + buf_size = port->elem->out_sg[i].iov_len - port->iov_offset; + ret = vsc->have_data(port, + port->elem->out_sg[i].iov_base + + port->iov_offset, + buf_size); + if (!port->elem) { /* bail if we got disconnected */ + return; + } + if (port->throttled) { + port->iov_idx = i; + if (ret > 0) { + port->iov_offset += ret; + } + break; + } + port->iov_offset = 0; + } + if (port->throttled) { + break; + } + virtqueue_push(vq, port->elem, 0); + g_free(port->elem); + port->elem = NULL; + } + virtio_notify(vdev, vq); +} + +static void flush_queued_data(VirtIOSerialPort *port) +{ + assert(port); + + if (!virtio_queue_ready(port->ovq)) { + return; + } + do_flush_queued_data(port, port->ovq, VIRTIO_DEVICE(port->vser)); +} + +static size_t send_control_msg(VirtIOSerial *vser, void *buf, size_t len) +{ + VirtQueueElement *elem; + VirtQueue *vq; + + vq = vser->c_ivq; + if (!virtio_queue_ready(vq)) { + return 0; + } + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + return 0; + } + + /* TODO: detect a buffer that's too short, set NEEDS_RESET */ + iov_from_buf(elem->in_sg, elem->in_num, 0, buf, len); + + virtqueue_push(vq, elem, len); + virtio_notify(VIRTIO_DEVICE(vser), vq); + g_free(elem); + + return len; +} + +static size_t send_control_event(VirtIOSerial *vser, uint32_t port_id, + uint16_t event, uint16_t value) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vser); + struct virtio_console_control cpkt; + + virtio_stl_p(vdev, &cpkt.id, port_id); + virtio_stw_p(vdev, &cpkt.event, event); + virtio_stw_p(vdev, &cpkt.value, value); + + trace_virtio_serial_send_control_event(port_id, event, value); + return send_control_msg(vser, &cpkt, sizeof(cpkt)); +} + +/* Functions for use inside qemu to open and read from/write to ports */ +int virtio_serial_open(VirtIOSerialPort *port) +{ + /* Don't allow opening an already-open port */ + if (port->host_connected) { + return 0; + } + /* Send port open notification to the guest */ + port->host_connected = true; + send_control_event(port->vser, port->id, VIRTIO_CONSOLE_PORT_OPEN, 1); + + return 0; +} + +int virtio_serial_close(VirtIOSerialPort *port) +{ + port->host_connected = false; + /* + * If there's any data the guest sent which the app didn't + * consume, reset the throttling flag and discard the data. + */ + port->throttled = false; + discard_throttle_data(port); + discard_vq_data(port->ovq, VIRTIO_DEVICE(port->vser)); + + send_control_event(port->vser, port->id, VIRTIO_CONSOLE_PORT_OPEN, 0); + + return 0; +} + +/* Individual ports/apps call this function to write to the guest. */ +ssize_t virtio_serial_write(VirtIOSerialPort *port, const uint8_t *buf, + size_t size) +{ + if (!port || !port->host_connected || !port->guest_connected) { + return 0; + } + return write_to_port(port, buf, size); +} + +/* + * Readiness of the guest to accept data on a port. + * Returns max. data the guest can receive + */ +size_t virtio_serial_guest_ready(VirtIOSerialPort *port) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(port->vser); + VirtQueue *vq = port->ivq; + unsigned int bytes; + + if (!virtio_queue_ready(vq) || + !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || + virtio_queue_empty(vq)) { + return 0; + } + if (use_multiport(port->vser) && !port->guest_connected) { + return 0; + } + virtqueue_get_avail_bytes(vq, &bytes, NULL, 4096, 0); + return bytes; +} + +static void flush_queued_data_bh(void *opaque) +{ + VirtIOSerialPort *port = opaque; + + flush_queued_data(port); +} + +void virtio_serial_throttle_port(VirtIOSerialPort *port, bool throttle) +{ + if (!port) { + return; + } + + trace_virtio_serial_throttle_port(port->id, throttle); + port->throttled = throttle; + if (throttle) { + return; + } + qemu_bh_schedule(port->bh); +} + +/* Guest wants to notify us of some event */ +static void handle_control_message(VirtIOSerial *vser, void *buf, size_t len) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vser); + struct VirtIOSerialPort *port; + VirtIOSerialPortClass *vsc; + struct virtio_console_control cpkt, *gcpkt; + uint8_t *buffer; + size_t buffer_len; + + gcpkt = buf; + + if (len < sizeof(cpkt)) { + /* The guest sent an invalid control packet */ + return; + } + + cpkt.event = virtio_lduw_p(vdev, &gcpkt->event); + cpkt.value = virtio_lduw_p(vdev, &gcpkt->value); + + trace_virtio_serial_handle_control_message(cpkt.event, cpkt.value); + + if (cpkt.event == VIRTIO_CONSOLE_DEVICE_READY) { + if (!cpkt.value) { + error_report("virtio-serial-bus: Guest failure in adding device %s", + vser->bus.qbus.name); + return; + } + /* + * The device is up, we can now tell the device about all the + * ports we have here. + */ + QTAILQ_FOREACH(port, &vser->ports, next) { + send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_ADD, 1); + } + return; + } + + port = find_port_by_id(vser, virtio_ldl_p(vdev, &gcpkt->id)); + if (!port) { + error_report("virtio-serial-bus: Unexpected port id %u for device %s", + virtio_ldl_p(vdev, &gcpkt->id), vser->bus.qbus.name); + return; + } + + trace_virtio_serial_handle_control_message_port(port->id); + + vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); + + switch(cpkt.event) { + case VIRTIO_CONSOLE_PORT_READY: + if (!cpkt.value) { + error_report("virtio-serial-bus: Guest failure in adding port %u for device %s", + port->id, vser->bus.qbus.name); + break; + } + /* + * Now that we know the guest asked for the port name, we're + * sure the guest has initialised whatever state is necessary + * for this port. Now's a good time to let the guest know if + * this port is a console port so that the guest can hook it + * up to hvc. + */ + if (vsc->is_console) { + send_control_event(vser, port->id, VIRTIO_CONSOLE_CONSOLE_PORT, 1); + } + + if (port->name) { + virtio_stl_p(vdev, &cpkt.id, port->id); + virtio_stw_p(vdev, &cpkt.event, VIRTIO_CONSOLE_PORT_NAME); + virtio_stw_p(vdev, &cpkt.value, 1); + + buffer_len = sizeof(cpkt) + strlen(port->name) + 1; + buffer = g_malloc(buffer_len); + + memcpy(buffer, &cpkt, sizeof(cpkt)); + memcpy(buffer + sizeof(cpkt), port->name, strlen(port->name)); + buffer[buffer_len - 1] = 0; + + send_control_msg(vser, buffer, buffer_len); + g_free(buffer); + } + + if (port->host_connected) { + send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_OPEN, 1); + } + + /* + * When the guest has asked us for this information it means + * the guest is all setup and has its virtqueues + * initialised. If some app is interested in knowing about + * this event, let it know. + */ + if (vsc->guest_ready) { + vsc->guest_ready(port); + } + break; + + case VIRTIO_CONSOLE_PORT_OPEN: + port->guest_connected = cpkt.value; + if (vsc->set_guest_connected) { + /* Send the guest opened notification if an app is interested */ + vsc->set_guest_connected(port, cpkt.value); + } + break; + } +} + +static void control_in(VirtIODevice *vdev, VirtQueue *vq) +{ +} + +static void control_out(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtQueueElement *elem; + VirtIOSerial *vser; + uint8_t *buf; + size_t len; + + vser = VIRTIO_SERIAL(vdev); + + len = 0; + buf = NULL; + for (;;) { + size_t cur_len; + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + + cur_len = iov_size(elem->out_sg, elem->out_num); + /* + * Allocate a new buf only if we didn't have one previously or + * if the size of the buf differs + */ + if (cur_len > len) { + g_free(buf); + + buf = g_malloc(cur_len); + len = cur_len; + } + iov_to_buf(elem->out_sg, elem->out_num, 0, buf, cur_len); + + handle_control_message(vser, buf, cur_len); + virtqueue_push(vq, elem, 0); + g_free(elem); + } + g_free(buf); + virtio_notify(vdev, vq); +} + +/* Guest wrote something to some port. */ +static void handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOSerial *vser; + VirtIOSerialPort *port; + + vser = VIRTIO_SERIAL(vdev); + port = find_port_by_vq(vser, vq); + + if (!port || !port->host_connected) { + discard_vq_data(vq, vdev); + return; + } + + if (!port->throttled) { + do_flush_queued_data(port, vq, vdev); + return; + } +} + +static void handle_input(VirtIODevice *vdev, VirtQueue *vq) +{ + /* + * Users of virtio-serial would like to know when guest becomes + * writable again -- i.e. if a vq had stuff queued up and the + * guest wasn't reading at all, the host would not be able to + * write to the vq anymore. Once the guest reads off something, + * we can start queueing things up again. However, this call is + * made for each buffer addition by the guest -- even though free + * buffers existed prior to the current buffer addition. This is + * done so as not to maintain previous state, which will need + * additional live-migration-related changes. + */ + VirtIOSerial *vser; + VirtIOSerialPort *port; + VirtIOSerialPortClass *vsc; + + vser = VIRTIO_SERIAL(vdev); + port = find_port_by_vq(vser, vq); + + if (!port) { + return; + } + vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); + + /* + * If guest_connected is false, this call is being made by the + * early-boot queueing up of descriptors, which is just noise for + * the host apps -- don't disturb them in that case. + */ + if (port->guest_connected && port->host_connected && vsc->guest_writable) { + vsc->guest_writable(port); + } +} + +static uint64_t get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VirtIOSerial *vser; + + vser = VIRTIO_SERIAL(vdev); + + features |= vser->host_features; + if (vser->bus.max_nr_ports > 1) { + virtio_add_feature(&features, VIRTIO_CONSOLE_F_MULTIPORT); + } + return features; +} + +/* Guest requested config info */ +static void get_config(VirtIODevice *vdev, uint8_t *config_data) +{ + VirtIOSerial *vser = VIRTIO_SERIAL(vdev); + struct virtio_console_config *config = + (struct virtio_console_config *)config_data; + + config->cols = 0; + config->rows = 0; + config->max_nr_ports = virtio_tswap32(vdev, + vser->serial.max_virtserial_ports); +} + +/* Guest sent new config info */ +static void set_config(VirtIODevice *vdev, const uint8_t *config_data) +{ + VirtIOSerial *vser = VIRTIO_SERIAL(vdev); + struct virtio_console_config *config = + (struct virtio_console_config *)config_data; + VirtIOSerialPort *port = find_first_connected_console(vser); + VirtIOSerialPortClass *vsc; + uint8_t emerg_wr_lo; + + if (!virtio_has_feature(vser->host_features, + VIRTIO_CONSOLE_F_EMERG_WRITE) || !config->emerg_wr) { + return; + } + + emerg_wr_lo = le32_to_cpu(config->emerg_wr); + /* Make sure we don't misdetect an emergency write when the guest + * does a short config write after an emergency write. */ + config->emerg_wr = 0; + if (!port) { + return; + } + vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); + (void)vsc->have_data(port, &emerg_wr_lo, 1); +} + +static void guest_reset(VirtIOSerial *vser) +{ + VirtIOSerialPort *port; + VirtIOSerialPortClass *vsc; + + QTAILQ_FOREACH(port, &vser->ports, next) { + vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); + + discard_throttle_data(port); + + if (port->guest_connected) { + port->guest_connected = false; + if (vsc->set_guest_connected) { + vsc->set_guest_connected(port, false); + } + } + } +} + +static void set_status(VirtIODevice *vdev, uint8_t status) +{ + VirtIOSerial *vser; + VirtIOSerialPort *port; + + vser = VIRTIO_SERIAL(vdev); + port = find_port_by_id(vser, 0); + + if (port && !use_multiport(port->vser) + && (status & VIRTIO_CONFIG_S_DRIVER_OK)) { + /* + * Non-multiport guests won't be able to tell us guest + * open/close status. Such guests can only have a port at id + * 0, so set guest_connected for such ports as soon as guest + * is up. + */ + port->guest_connected = true; + } + if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { + guest_reset(vser); + } + + QTAILQ_FOREACH(port, &vser->ports, next) { + VirtIOSerialPortClass *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); + if (vsc->enable_backend) { + vsc->enable_backend(port, vdev->vm_running); + } + } +} + +static void vser_reset(VirtIODevice *vdev) +{ + VirtIOSerial *vser; + + vser = VIRTIO_SERIAL(vdev); + guest_reset(vser); +} + +static void virtio_serial_save_device(VirtIODevice *vdev, QEMUFile *f) +{ + VirtIOSerial *s = VIRTIO_SERIAL(vdev); + VirtIOSerialPort *port; + uint32_t nr_active_ports; + unsigned int i, max_nr_ports; + struct virtio_console_config config; + + /* The config space (ignored on the far end in current versions) */ + get_config(vdev, (uint8_t *)&config); + qemu_put_be16(f, config.cols); + qemu_put_be16(f, config.rows); + qemu_put_be32(f, config.max_nr_ports); + + /* The ports map */ + max_nr_ports = s->serial.max_virtserial_ports; + for (i = 0; i < DIV_ROUND_UP(max_nr_ports, 32); i++) { + qemu_put_be32s(f, &s->ports_map[i]); + } + + /* Ports */ + + nr_active_ports = 0; + QTAILQ_FOREACH(port, &s->ports, next) { + nr_active_ports++; + } + + qemu_put_be32s(f, &nr_active_ports); + + /* + * Items in struct VirtIOSerialPort. + */ + QTAILQ_FOREACH(port, &s->ports, next) { + uint32_t elem_popped; + + qemu_put_be32s(f, &port->id); + qemu_put_byte(f, port->guest_connected); + qemu_put_byte(f, port->host_connected); + + elem_popped = 0; + if (port->elem) { + elem_popped = 1; + } + qemu_put_be32s(f, &elem_popped); + if (elem_popped) { + qemu_put_be32s(f, &port->iov_idx); + qemu_put_be64s(f, &port->iov_offset); + qemu_put_virtqueue_element(vdev, f, port->elem); + } + } +} + +static void virtio_serial_post_load_timer_cb(void *opaque) +{ + uint32_t i; + VirtIOSerial *s = VIRTIO_SERIAL(opaque); + VirtIOSerialPort *port; + uint8_t host_connected; + VirtIOSerialPortClass *vsc; + + if (!s->post_load) { + return; + } + for (i = 0 ; i < s->post_load->nr_active_ports; ++i) { + port = s->post_load->connected[i].port; + host_connected = s->post_load->connected[i].host_connected; + if (host_connected != port->host_connected) { + /* + * We have to let the guest know of the host connection + * status change + */ + send_control_event(s, port->id, VIRTIO_CONSOLE_PORT_OPEN, + port->host_connected); + } + vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); + if (vsc->set_guest_connected) { + vsc->set_guest_connected(port, port->guest_connected); + } + } + g_free(s->post_load->connected); + timer_free(s->post_load->timer); + g_free(s->post_load); + s->post_load = NULL; +} + +static int fetch_active_ports_list(QEMUFile *f, + VirtIOSerial *s, uint32_t nr_active_ports) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(s); + uint32_t i; + + s->post_load = g_malloc0(sizeof(*s->post_load)); + s->post_load->nr_active_ports = nr_active_ports; + s->post_load->connected = + g_malloc0(sizeof(*s->post_load->connected) * nr_active_ports); + + s->post_load->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + virtio_serial_post_load_timer_cb, + s); + + /* Items in struct VirtIOSerialPort */ + for (i = 0; i < nr_active_ports; i++) { + VirtIOSerialPort *port; + uint32_t elem_popped; + uint32_t id; + + id = qemu_get_be32(f); + port = find_port_by_id(s, id); + if (!port) { + return -EINVAL; + } + + port->guest_connected = qemu_get_byte(f); + s->post_load->connected[i].port = port; + s->post_load->connected[i].host_connected = qemu_get_byte(f); + + qemu_get_be32s(f, &elem_popped); + if (elem_popped) { + qemu_get_be32s(f, &port->iov_idx); + qemu_get_be64s(f, &port->iov_offset); + + port->elem = + qemu_get_virtqueue_element(vdev, f, sizeof(VirtQueueElement)); + + /* + * Port was throttled on source machine. Let's + * unthrottle it here so data starts flowing again. + */ + virtio_serial_throttle_port(port, false); + } + } + timer_mod(s->post_load->timer, 1); + return 0; +} + +static int virtio_serial_load_device(VirtIODevice *vdev, QEMUFile *f, + int version_id) +{ + VirtIOSerial *s = VIRTIO_SERIAL(vdev); + uint32_t max_nr_ports, nr_active_ports, ports_map; + unsigned int i; + int ret; + uint32_t tmp; + + /* Unused */ + qemu_get_be16s(f, (uint16_t *) &tmp); + qemu_get_be16s(f, (uint16_t *) &tmp); + qemu_get_be32s(f, &tmp); + + max_nr_ports = s->serial.max_virtserial_ports; + for (i = 0; i < DIV_ROUND_UP(max_nr_ports, 32); i++) { + qemu_get_be32s(f, &ports_map); + + if (ports_map != s->ports_map[i]) { + /* + * Ports active on source and destination don't + * match. Fail migration. + */ + return -EINVAL; + } + } + + qemu_get_be32s(f, &nr_active_ports); + + if (nr_active_ports) { + ret = fetch_active_ports_list(f, s, nr_active_ports); + if (ret) { + return ret; + } + } + return 0; +} + +static void virtser_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent); + +static Property virtser_props[] = { + DEFINE_PROP_UINT32("nr", VirtIOSerialPort, id, VIRTIO_CONSOLE_BAD_ID), + DEFINE_PROP_STRING("name", VirtIOSerialPort, name), + DEFINE_PROP_END_OF_LIST() +}; + +static void virtser_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + k->print_dev = virtser_bus_dev_print; +} + +static const TypeInfo virtser_bus_info = { + .name = TYPE_VIRTIO_SERIAL_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(VirtIOSerialBus), + .class_init = virtser_bus_class_init, +}; + +static void virtser_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent) +{ + VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(qdev); + + monitor_printf(mon, "%*sport %d, guest %s, host %s, throttle %s\n", + indent, "", port->id, + port->guest_connected ? "on" : "off", + port->host_connected ? "on" : "off", + port->throttled ? "on" : "off"); +} + +/* This function is only used if a port id is not provided by the user */ +static uint32_t find_free_port_id(VirtIOSerial *vser) +{ + unsigned int i, max_nr_ports; + + max_nr_ports = vser->serial.max_virtserial_ports; + for (i = 0; i < DIV_ROUND_UP(max_nr_ports, 32); i++) { + uint32_t map, zeroes; + + map = vser->ports_map[i]; + zeroes = ctz32(~map); + if (zeroes != 32) { + return zeroes + i * 32; + } + } + return VIRTIO_CONSOLE_BAD_ID; +} + +static void mark_port_added(VirtIOSerial *vser, uint32_t port_id) +{ + unsigned int i; + + i = port_id / 32; + vser->ports_map[i] |= 1U << (port_id % 32); +} + +static void add_port(VirtIOSerial *vser, uint32_t port_id) +{ + mark_port_added(vser, port_id); + send_control_event(vser, port_id, VIRTIO_CONSOLE_PORT_ADD, 1); +} + +static void remove_port(VirtIOSerial *vser, uint32_t port_id) +{ + VirtIOSerialPort *port; + + /* + * Don't mark port 0 removed -- we explicitly reserve it for + * backward compat with older guests, ensure a virtconsole device + * unplug retains the reservation. + */ + if (port_id) { + unsigned int i; + + i = port_id / 32; + vser->ports_map[i] &= ~(1U << (port_id % 32)); + } + + port = find_port_by_id(vser, port_id); + /* + * This function is only called from qdev's unplug callback; if we + * get a NULL port here, we're in trouble. + */ + assert(port); + + /* Flush out any unconsumed buffers first */ + discard_throttle_data(port); + discard_vq_data(port->ovq, VIRTIO_DEVICE(port->vser)); + + send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_REMOVE, 1); +} + +static void virtser_port_device_realize(DeviceState *dev, Error **errp) +{ + VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev); + VirtIOSerialPortClass *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); + VirtIOSerialBus *bus = VIRTIO_SERIAL_BUS(qdev_get_parent_bus(dev)); + int max_nr_ports; + bool plugging_port0; + Error *err = NULL; + + port->vser = bus->vser; + + assert(vsc->have_data); + + /* + * Is the first console port we're seeing? If so, put it up at + * location 0. This is done for backward compatibility (old + * kernel, new qemu). + */ + plugging_port0 = vsc->is_console && !find_port_by_id(port->vser, 0); + + if (find_port_by_id(port->vser, port->id)) { + error_setg(errp, "virtio-serial-bus: A port already exists at id %u", + port->id); + return; + } + + if (port->name != NULL && find_port_by_name(port->name)) { + error_setg(errp, "virtio-serial-bus: A port already exists by name %s", + port->name); + return; + } + + if (port->id == VIRTIO_CONSOLE_BAD_ID) { + if (plugging_port0) { + port->id = 0; + } else { + port->id = find_free_port_id(port->vser); + if (port->id == VIRTIO_CONSOLE_BAD_ID) { + error_setg(errp, "virtio-serial-bus: Maximum port limit for " + "this device reached"); + return; + } + } + } + + max_nr_ports = port->vser->serial.max_virtserial_ports; + if (port->id >= max_nr_ports) { + error_setg(errp, "virtio-serial-bus: Out-of-range port id specified, " + "max. allowed: %u", max_nr_ports - 1); + return; + } + + vsc->realize(dev, &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + + port->bh = qemu_bh_new(flush_queued_data_bh, port); + port->elem = NULL; +} + +static void virtser_port_device_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev); + + QTAILQ_INSERT_TAIL(&port->vser->ports, port, next); + port->ivq = port->vser->ivqs[port->id]; + port->ovq = port->vser->ovqs[port->id]; + + add_port(port->vser, port->id); + + /* Send an update to the guest about this new port added */ + virtio_notify_config(VIRTIO_DEVICE(hotplug_dev)); +} + +static void virtser_port_device_unrealize(DeviceState *dev) +{ + VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev); + VirtIOSerialPortClass *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(dev); + VirtIOSerial *vser = port->vser; + + qemu_bh_delete(port->bh); + remove_port(port->vser, port->id); + + QTAILQ_REMOVE(&vser->ports, port, next); + + if (vsc->unrealize) { + vsc->unrealize(dev); + } +} + +static void virtio_serial_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOSerial *vser = VIRTIO_SERIAL(dev); + uint32_t i, max_supported_ports; + size_t config_size = sizeof(struct virtio_console_config); + + if (!vser->serial.max_virtserial_ports) { + error_setg(errp, "Maximum number of serial ports not specified"); + return; + } + + /* Each port takes 2 queues, and one pair is for the control queue */ + max_supported_ports = VIRTIO_QUEUE_MAX / 2 - 1; + + if (vser->serial.max_virtserial_ports > max_supported_ports) { + error_setg(errp, "maximum ports supported: %u", max_supported_ports); + return; + } + + if (!virtio_has_feature(vser->host_features, + VIRTIO_CONSOLE_F_EMERG_WRITE)) { + config_size = offsetof(struct virtio_console_config, emerg_wr); + } + virtio_init(vdev, "virtio-serial", VIRTIO_ID_CONSOLE, + config_size); + + /* Spawn a new virtio-serial bus on which the ports will ride as devices */ + qbus_init(&vser->bus, sizeof(vser->bus), TYPE_VIRTIO_SERIAL_BUS, + dev, vdev->bus_name); + qbus_set_hotplug_handler(BUS(&vser->bus), OBJECT(vser)); + vser->bus.vser = vser; + QTAILQ_INIT(&vser->ports); + + vser->bus.max_nr_ports = vser->serial.max_virtserial_ports; + vser->ivqs = g_malloc(vser->serial.max_virtserial_ports + * sizeof(VirtQueue *)); + vser->ovqs = g_malloc(vser->serial.max_virtserial_ports + * sizeof(VirtQueue *)); + + /* Add a queue for host to guest transfers for port 0 (backward compat) */ + vser->ivqs[0] = virtio_add_queue(vdev, 128, handle_input); + /* Add a queue for guest to host transfers for port 0 (backward compat) */ + vser->ovqs[0] = virtio_add_queue(vdev, 128, handle_output); + + /* TODO: host to guest notifications can get dropped + * if the queue fills up. Implement queueing in host, + * this might also make it possible to reduce the control + * queue size: as guest preposts buffers there, + * this will save 4Kbyte of guest memory per entry. */ + + /* control queue: host to guest */ + vser->c_ivq = virtio_add_queue(vdev, 32, control_in); + /* control queue: guest to host */ + vser->c_ovq = virtio_add_queue(vdev, 32, control_out); + + for (i = 1; i < vser->bus.max_nr_ports; i++) { + /* Add a per-port queue for host to guest transfers */ + vser->ivqs[i] = virtio_add_queue(vdev, 128, handle_input); + /* Add a per-per queue for guest to host transfers */ + vser->ovqs[i] = virtio_add_queue(vdev, 128, handle_output); + } + + vser->ports_map = g_malloc0((DIV_ROUND_UP(vser->serial.max_virtserial_ports, 32)) + * sizeof(vser->ports_map[0])); + /* + * Reserve location 0 for a console port for backward compat + * (old kernel, new qemu) + */ + mark_port_added(vser, 0); + + vser->post_load = NULL; + + QLIST_INSERT_HEAD(&vserdevices.devices, vser, next); +} + +static void virtio_serial_port_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_INPUT, k->categories); + k->bus_type = TYPE_VIRTIO_SERIAL_BUS; + k->realize = virtser_port_device_realize; + k->unrealize = virtser_port_device_unrealize; + device_class_set_props(k, virtser_props); +} + +static const TypeInfo virtio_serial_port_type_info = { + .name = TYPE_VIRTIO_SERIAL_PORT, + .parent = TYPE_DEVICE, + .instance_size = sizeof(VirtIOSerialPort), + .abstract = true, + .class_size = sizeof(VirtIOSerialPortClass), + .class_init = virtio_serial_port_class_init, +}; + +static void virtio_serial_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOSerial *vser = VIRTIO_SERIAL(dev); + int i; + + QLIST_REMOVE(vser, next); + + virtio_delete_queue(vser->c_ivq); + virtio_delete_queue(vser->c_ovq); + for (i = 0; i < vser->bus.max_nr_ports; i++) { + virtio_delete_queue(vser->ivqs[i]); + virtio_delete_queue(vser->ovqs[i]); + } + + g_free(vser->ivqs); + g_free(vser->ovqs); + g_free(vser->ports_map); + if (vser->post_load) { + g_free(vser->post_load->connected); + timer_free(vser->post_load->timer); + g_free(vser->post_load); + } + + qbus_set_hotplug_handler(BUS(&vser->bus), NULL); + + virtio_cleanup(vdev); +} + +/* Note: 'console' is used for backwards compatibility */ +static const VMStateDescription vmstate_virtio_console = { + .name = "virtio-console", + .minimum_version_id = 3, + .version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static Property virtio_serial_properties[] = { + DEFINE_PROP_UINT32("max_ports", VirtIOSerial, serial.max_virtserial_ports, + 31), + DEFINE_PROP_BIT64("emergency-write", VirtIOSerial, host_features, + VIRTIO_CONSOLE_F_EMERG_WRITE, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_serial_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + + QLIST_INIT(&vserdevices.devices); + + device_class_set_props(dc, virtio_serial_properties); + dc->vmsd = &vmstate_virtio_console; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + vdc->realize = virtio_serial_device_realize; + vdc->unrealize = virtio_serial_device_unrealize; + vdc->get_features = get_features; + vdc->get_config = get_config; + vdc->set_config = set_config; + vdc->set_status = set_status; + vdc->reset = vser_reset; + vdc->save = virtio_serial_save_device; + vdc->load = virtio_serial_load_device; + hc->plug = virtser_port_device_plug; + hc->unplug = qdev_simple_device_unplug_cb; +} + +static const TypeInfo virtio_device_info = { + .name = TYPE_VIRTIO_SERIAL, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOSerial), + .class_init = virtio_serial_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static void virtio_serial_register_types(void) +{ + type_register_static(&virtser_bus_info); + type_register_static(&virtio_serial_port_type_info); + type_register_static(&virtio_device_info); +} + +type_init(virtio_serial_register_types) diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c new file mode 100644 index 000000000..63153dfde --- /dev/null +++ b/hw/char/xen_console.c @@ -0,0 +1,298 @@ +/* + * Copyright (C) International Business Machines Corp., 2005 + * Author(s): Anthony Liguori + * + * Copyright (C) Red Hat 2007 + * + * Xen Console + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; under version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include +#include + +#include "qapi/error.h" +#include "sysemu/sysemu.h" +#include "chardev/char-fe.h" +#include "hw/xen/xen-legacy-backend.h" + +#include "hw/xen/interface/io/console.h" + +struct buffer { + uint8_t *data; + size_t consumed; + size_t size; + size_t capacity; + size_t max_capacity; +}; + +struct XenConsole { + struct XenLegacyDevice xendev; /* must be first */ + struct buffer buffer; + char console[XEN_BUFSIZE]; + int ring_ref; + void *sring; + CharBackend chr; + int backlog; +}; + +static void buffer_append(struct XenConsole *con) +{ + struct buffer *buffer = &con->buffer; + XENCONS_RING_IDX cons, prod, size; + struct xencons_interface *intf = con->sring; + + cons = intf->out_cons; + prod = intf->out_prod; + xen_mb(); + + size = prod - cons; + if ((size == 0) || (size > sizeof(intf->out))) + return; + + if ((buffer->capacity - buffer->size) < size) { + buffer->capacity += (size + 1024); + buffer->data = g_realloc(buffer->data, buffer->capacity); + } + + while (cons != prod) + buffer->data[buffer->size++] = intf->out[ + MASK_XENCONS_IDX(cons++, intf->out)]; + + xen_mb(); + intf->out_cons = cons; + xen_pv_send_notify(&con->xendev); + + if (buffer->max_capacity && + buffer->size > buffer->max_capacity) { + /* Discard the middle of the data. */ + + size_t over = buffer->size - buffer->max_capacity; + uint8_t *maxpos = buffer->data + buffer->max_capacity; + + memmove(maxpos - over, maxpos, over); + buffer->data = g_realloc(buffer->data, buffer->max_capacity); + buffer->size = buffer->capacity = buffer->max_capacity; + + if (buffer->consumed > buffer->max_capacity - over) + buffer->consumed = buffer->max_capacity - over; + } +} + +static void buffer_advance(struct buffer *buffer, size_t len) +{ + buffer->consumed += len; + if (buffer->consumed == buffer->size) { + buffer->consumed = 0; + buffer->size = 0; + } +} + +static int ring_free_bytes(struct XenConsole *con) +{ + struct xencons_interface *intf = con->sring; + XENCONS_RING_IDX cons, prod, space; + + cons = intf->in_cons; + prod = intf->in_prod; + xen_mb(); + + space = prod - cons; + if (space > sizeof(intf->in)) + return 0; /* ring is screwed: ignore it */ + + return (sizeof(intf->in) - space); +} + +static int xencons_can_receive(void *opaque) +{ + struct XenConsole *con = opaque; + return ring_free_bytes(con); +} + +static void xencons_receive(void *opaque, const uint8_t *buf, int len) +{ + struct XenConsole *con = opaque; + struct xencons_interface *intf = con->sring; + XENCONS_RING_IDX prod; + int i, max; + + max = ring_free_bytes(con); + /* The can_receive() func limits this, but check again anyway */ + if (max < len) + len = max; + + prod = intf->in_prod; + for (i = 0; i < len; i++) { + intf->in[MASK_XENCONS_IDX(prod++, intf->in)] = + buf[i]; + } + xen_wmb(); + intf->in_prod = prod; + xen_pv_send_notify(&con->xendev); +} + +static void xencons_send(struct XenConsole *con) +{ + ssize_t len, size; + + size = con->buffer.size - con->buffer.consumed; + if (qemu_chr_fe_backend_connected(&con->chr)) { + len = qemu_chr_fe_write(&con->chr, + con->buffer.data + con->buffer.consumed, + size); + } else { + len = size; + } + if (len < 1) { + if (!con->backlog) { + con->backlog = 1; + xen_pv_printf(&con->xendev, 1, + "backlog piling up, nobody listening?\n"); + } + } else { + buffer_advance(&con->buffer, len); + if (con->backlog && len == size) { + con->backlog = 0; + xen_pv_printf(&con->xendev, 1, "backlog is gone\n"); + } + } +} + +/* -------------------------------------------------------------------- */ + +static int con_init(struct XenLegacyDevice *xendev) +{ + struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); + char *type, *dom, label[32]; + int ret = 0; + const char *output; + + /* setup */ + dom = xs_get_domain_path(xenstore, con->xendev.dom); + if (!xendev->dev) { + snprintf(con->console, sizeof(con->console), "%s/console", dom); + } else { + snprintf(con->console, sizeof(con->console), "%s/device/console/%d", dom, xendev->dev); + } + free(dom); + + type = xenstore_read_str(con->console, "type"); + if (!type || strcmp(type, "ioemu") != 0) { + xen_pv_printf(xendev, 1, "not for me (type=%s)\n", type); + ret = -1; + goto out; + } + + output = xenstore_read_str(con->console, "output"); + + /* no Xen override, use qemu output device */ + if (output == NULL) { + if (con->xendev.dev) { + qemu_chr_fe_init(&con->chr, serial_hd(con->xendev.dev), + &error_abort); + } + } else { + snprintf(label, sizeof(label), "xencons%d", con->xendev.dev); + qemu_chr_fe_init(&con->chr, + /* + * FIXME: sure we want to support implicit + * muxed monitors here? + */ + qemu_chr_new_mux_mon(label, output, NULL), + &error_abort); + } + + xenstore_store_pv_console_info(con->xendev.dev, + qemu_chr_fe_get_driver(&con->chr)); + +out: + g_free(type); + return ret; +} + +static int con_initialise(struct XenLegacyDevice *xendev) +{ + struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); + int limit; + + if (xenstore_read_int(con->console, "ring-ref", &con->ring_ref) == -1) + return -1; + if (xenstore_read_int(con->console, "port", &con->xendev.remote_port) == -1) + return -1; + if (xenstore_read_int(con->console, "limit", &limit) == 0) + con->buffer.max_capacity = limit; + + if (!xendev->dev) { + xen_pfn_t mfn = con->ring_ref; + con->sring = xenforeignmemory_map(xen_fmem, con->xendev.dom, + PROT_READ | PROT_WRITE, + 1, &mfn, NULL); + } else { + con->sring = xen_be_map_grant_ref(xendev, con->ring_ref, + PROT_READ | PROT_WRITE); + } + if (!con->sring) + return -1; + + xen_be_bind_evtchn(&con->xendev); + qemu_chr_fe_set_handlers(&con->chr, xencons_can_receive, + xencons_receive, NULL, NULL, con, NULL, true); + + xen_pv_printf(xendev, 1, + "ring mfn %d, remote port %d, local port %d, limit %zd\n", + con->ring_ref, + con->xendev.remote_port, + con->xendev.local_port, + con->buffer.max_capacity); + return 0; +} + +static void con_disconnect(struct XenLegacyDevice *xendev) +{ + struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); + + qemu_chr_fe_deinit(&con->chr, false); + xen_pv_unbind_evtchn(&con->xendev); + + if (con->sring) { + if (!xendev->dev) { + xenforeignmemory_unmap(xen_fmem, con->sring, 1); + } else { + xen_be_unmap_grant_ref(xendev, con->sring); + } + con->sring = NULL; + } +} + +static void con_event(struct XenLegacyDevice *xendev) +{ + struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); + + buffer_append(con); + if (con->buffer.size - con->buffer.consumed) + xencons_send(con); +} + +/* -------------------------------------------------------------------- */ + +struct XenDevOps xen_console_ops = { + .size = sizeof(struct XenConsole), + .flags = DEVOPS_FLAG_IGNORE_STATE|DEVOPS_FLAG_NEED_GNTDEV, + .init = con_init, + .initialise = con_initialise, + .event = con_event, + .disconnect = con_disconnect, +}; diff --git a/hw/char/xilinx_uartlite.c b/hw/char/xilinx_uartlite.c new file mode 100644 index 000000000..99b9a6f85 --- /dev/null +++ b/hw/char/xilinx_uartlite.c @@ -0,0 +1,257 @@ +/* + * QEMU model of Xilinx uartlite. + * + * Copyright (c) 2009 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/sysbus.h" +#include "qemu/module.h" +#include "chardev/char-fe.h" +#include "qom/object.h" + +#define DUART(x) + +#define R_RX 0 +#define R_TX 1 +#define R_STATUS 2 +#define R_CTRL 3 +#define R_MAX 4 + +#define STATUS_RXVALID 0x01 +#define STATUS_RXFULL 0x02 +#define STATUS_TXEMPTY 0x04 +#define STATUS_TXFULL 0x08 +#define STATUS_IE 0x10 +#define STATUS_OVERRUN 0x20 +#define STATUS_FRAME 0x40 +#define STATUS_PARITY 0x80 + +#define CONTROL_RST_TX 0x01 +#define CONTROL_RST_RX 0x02 +#define CONTROL_IE 0x10 + +#define TYPE_XILINX_UARTLITE "xlnx.xps-uartlite" +OBJECT_DECLARE_SIMPLE_TYPE(XilinxUARTLite, XILINX_UARTLITE) + +struct XilinxUARTLite { + SysBusDevice parent_obj; + + MemoryRegion mmio; + CharBackend chr; + qemu_irq irq; + + uint8_t rx_fifo[8]; + unsigned int rx_fifo_pos; + unsigned int rx_fifo_len; + + uint32_t regs[R_MAX]; +}; + +static void uart_update_irq(XilinxUARTLite *s) +{ + unsigned int irq; + + if (s->rx_fifo_len) + s->regs[R_STATUS] |= STATUS_IE; + + irq = (s->regs[R_STATUS] & STATUS_IE) && (s->regs[R_CTRL] & CONTROL_IE); + qemu_set_irq(s->irq, irq); +} + +static void uart_update_status(XilinxUARTLite *s) +{ + uint32_t r; + + r = s->regs[R_STATUS]; + r &= ~7; + r |= 1 << 2; /* Tx fifo is always empty. We are fast :) */ + r |= (s->rx_fifo_len == sizeof (s->rx_fifo)) << 1; + r |= (!!s->rx_fifo_len); + s->regs[R_STATUS] = r; +} + +static void xilinx_uartlite_reset(DeviceState *dev) +{ + uart_update_status(XILINX_UARTLITE(dev)); +} + +static uint64_t +uart_read(void *opaque, hwaddr addr, unsigned int size) +{ + XilinxUARTLite *s = opaque; + uint32_t r = 0; + addr >>= 2; + switch (addr) + { + case R_RX: + r = s->rx_fifo[(s->rx_fifo_pos - s->rx_fifo_len) & 7]; + if (s->rx_fifo_len) + s->rx_fifo_len--; + uart_update_status(s); + uart_update_irq(s); + qemu_chr_fe_accept_input(&s->chr); + break; + + default: + if (addr < ARRAY_SIZE(s->regs)) + r = s->regs[addr]; + DUART(qemu_log("%s addr=%x v=%x\n", __func__, addr, r)); + break; + } + return r; +} + +static void +uart_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + XilinxUARTLite *s = opaque; + uint32_t value = val64; + unsigned char ch = value; + + addr >>= 2; + switch (addr) + { + case R_STATUS: + qemu_log_mask(LOG_GUEST_ERROR, "%s: write to UART STATUS\n", + __func__); + break; + + case R_CTRL: + if (value & CONTROL_RST_RX) { + s->rx_fifo_pos = 0; + s->rx_fifo_len = 0; + } + s->regs[addr] = value; + break; + + case R_TX: + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->chr, &ch, 1); + s->regs[addr] = value; + + /* hax. */ + s->regs[R_STATUS] |= STATUS_IE; + break; + + default: + DUART(printf("%s addr=%x v=%x\n", __func__, addr, value)); + if (addr < ARRAY_SIZE(s->regs)) + s->regs[addr] = value; + break; + } + uart_update_status(s); + uart_update_irq(s); +} + +static const MemoryRegionOps uart_ops = { + .read = uart_read, + .write = uart_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4 + } +}; + +static Property xilinx_uartlite_properties[] = { + DEFINE_PROP_CHR("chardev", XilinxUARTLite, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void uart_rx(void *opaque, const uint8_t *buf, int size) +{ + XilinxUARTLite *s = opaque; + + /* Got a byte. */ + if (s->rx_fifo_len >= 8) { + printf("WARNING: UART dropped char.\n"); + return; + } + s->rx_fifo[s->rx_fifo_pos] = *buf; + s->rx_fifo_pos++; + s->rx_fifo_pos &= 0x7; + s->rx_fifo_len++; + + uart_update_status(s); + uart_update_irq(s); +} + +static int uart_can_rx(void *opaque) +{ + XilinxUARTLite *s = opaque; + + return s->rx_fifo_len < sizeof(s->rx_fifo); +} + +static void uart_event(void *opaque, QEMUChrEvent event) +{ + +} + +static void xilinx_uartlite_realize(DeviceState *dev, Error **errp) +{ + XilinxUARTLite *s = XILINX_UARTLITE(dev); + + qemu_chr_fe_set_handlers(&s->chr, uart_can_rx, uart_rx, + uart_event, NULL, s, NULL, true); +} + +static void xilinx_uartlite_init(Object *obj) +{ + XilinxUARTLite *s = XILINX_UARTLITE(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, &uart_ops, s, + "xlnx.xps-uartlite", R_MAX * 4); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void xilinx_uartlite_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = xilinx_uartlite_reset; + dc->realize = xilinx_uartlite_realize; + device_class_set_props(dc, xilinx_uartlite_properties); +} + +static const TypeInfo xilinx_uartlite_info = { + .name = TYPE_XILINX_UARTLITE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XilinxUARTLite), + .instance_init = xilinx_uartlite_init, + .class_init = xilinx_uartlite_class_init, +}; + +static void xilinx_uart_register_types(void) +{ + type_register_static(&xilinx_uartlite_info); +} + +type_init(xilinx_uart_register_types) diff --git a/hw/core/Kconfig b/hw/core/Kconfig new file mode 100644 index 000000000..939750365 --- /dev/null +++ b/hw/core/Kconfig @@ -0,0 +1,29 @@ +config EMPTY_SLOT + bool + +config PTIMER + bool + +config FITLOADER + bool + +config GENERIC_LOADER + bool + default y + +config GUEST_LOADER + bool + default y + depends on TCG + +config OR_IRQ + bool + +config PLATFORM_BUS + bool + +config REGISTER + bool + +config SPLIT_IRQ + bool diff --git a/hw/core/bus.c b/hw/core/bus.c new file mode 100644 index 000000000..c7831b529 --- /dev/null +++ b/hw/core/bus.c @@ -0,0 +1,341 @@ +/* + * Dynamic device configuration and creation -- buses. + * + * Copyright (c) 2009 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "qemu/ctype.h" +#include "qemu/module.h" +#include "qapi/error.h" + +void qbus_set_hotplug_handler(BusState *bus, Object *handler) +{ + object_property_set_link(OBJECT(bus), QDEV_HOTPLUG_HANDLER_PROPERTY, + handler, &error_abort); +} + +void qbus_set_bus_hotplug_handler(BusState *bus) +{ + qbus_set_hotplug_handler(bus, OBJECT(bus)); +} + +int qbus_walk_children(BusState *bus, + qdev_walkerfn *pre_devfn, qbus_walkerfn *pre_busfn, + qdev_walkerfn *post_devfn, qbus_walkerfn *post_busfn, + void *opaque) +{ + BusChild *kid; + int err; + + if (pre_busfn) { + err = pre_busfn(bus, opaque); + if (err) { + return err; + } + } + + WITH_RCU_READ_LOCK_GUARD() { + QTAILQ_FOREACH_RCU(kid, &bus->children, sibling) { + err = qdev_walk_children(kid->child, + pre_devfn, pre_busfn, + post_devfn, post_busfn, opaque); + if (err < 0) { + return err; + } + } + } + + if (post_busfn) { + err = post_busfn(bus, opaque); + if (err) { + return err; + } + } + + return 0; +} + +void bus_cold_reset(BusState *bus) +{ + resettable_reset(OBJECT(bus), RESET_TYPE_COLD); +} + +bool bus_is_in_reset(BusState *bus) +{ + return resettable_is_in_reset(OBJECT(bus)); +} + +static ResettableState *bus_get_reset_state(Object *obj) +{ + BusState *bus = BUS(obj); + return &bus->reset; +} + +static void bus_reset_child_foreach(Object *obj, ResettableChildCallback cb, + void *opaque, ResetType type) +{ + BusState *bus = BUS(obj); + BusChild *kid; + + WITH_RCU_READ_LOCK_GUARD() { + QTAILQ_FOREACH_RCU(kid, &bus->children, sibling) { + cb(OBJECT(kid->child), opaque, type); + } + } +} + +static void qbus_init_internal(BusState *bus, DeviceState *parent, + const char *name) +{ + const char *typename = object_get_typename(OBJECT(bus)); + BusClass *bc; + int i, bus_id; + + bus->parent = parent; + + if (name) { + bus->name = g_strdup(name); + } else if (bus->parent && bus->parent->id) { + /* parent device has id -> use it plus parent-bus-id for bus name */ + bus_id = bus->parent->num_child_bus; + bus->name = g_strdup_printf("%s.%d", bus->parent->id, bus_id); + } else { + /* no id -> use lowercase bus type plus global bus-id for bus name */ + bc = BUS_GET_CLASS(bus); + bus_id = bc->automatic_ids++; + bus->name = g_strdup_printf("%s.%d", typename, bus_id); + for (i = 0; bus->name[i]; i++) { + bus->name[i] = qemu_tolower(bus->name[i]); + } + } + + if (bus->parent) { + QLIST_INSERT_HEAD(&bus->parent->child_bus, bus, sibling); + bus->parent->num_child_bus++; + object_property_add_child(OBJECT(bus->parent), bus->name, OBJECT(bus)); + object_unref(OBJECT(bus)); + } else { + /* The only bus without a parent is the main system bus */ + assert(bus == sysbus_get_default()); + } +} + +static void bus_unparent(Object *obj) +{ + BusState *bus = BUS(obj); + BusChild *kid; + + /* Only the main system bus has no parent, and that bus is never freed */ + assert(bus->parent); + + while ((kid = QTAILQ_FIRST(&bus->children)) != NULL) { + DeviceState *dev = kid->child; + object_unparent(OBJECT(dev)); + } + QLIST_REMOVE(bus, sibling); + bus->parent->num_child_bus--; + bus->parent = NULL; +} + +void qbus_init(void *bus, size_t size, const char *typename, + DeviceState *parent, const char *name) +{ + object_initialize(bus, size, typename); + qbus_init_internal(bus, parent, name); +} + +BusState *qbus_new(const char *typename, DeviceState *parent, const char *name) +{ + BusState *bus; + + bus = BUS(object_new(typename)); + qbus_init_internal(bus, parent, name); + + return bus; +} + +bool qbus_realize(BusState *bus, Error **errp) +{ + return object_property_set_bool(OBJECT(bus), "realized", true, errp); +} + +void qbus_unrealize(BusState *bus) +{ + object_property_set_bool(OBJECT(bus), "realized", false, &error_abort); +} + +static bool bus_get_realized(Object *obj, Error **errp) +{ + BusState *bus = BUS(obj); + + return bus->realized; +} + +static void bus_set_realized(Object *obj, bool value, Error **errp) +{ + BusState *bus = BUS(obj); + BusClass *bc = BUS_GET_CLASS(bus); + BusChild *kid; + + if (value && !bus->realized) { + if (bc->realize) { + bc->realize(bus, errp); + } + + /* TODO: recursive realization */ + } else if (!value && bus->realized) { + WITH_RCU_READ_LOCK_GUARD() { + QTAILQ_FOREACH_RCU(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + qdev_unrealize(dev); + } + } + if (bc->unrealize) { + bc->unrealize(bus); + } + } + + bus->realized = value; +} + +static void qbus_initfn(Object *obj) +{ + BusState *bus = BUS(obj); + + QTAILQ_INIT(&bus->children); + object_property_add_link(obj, QDEV_HOTPLUG_HANDLER_PROPERTY, + TYPE_HOTPLUG_HANDLER, + (Object **)&bus->hotplug_handler, + object_property_allow_set_link, + 0); + object_property_add_bool(obj, "realized", + bus_get_realized, bus_set_realized); +} + +static char *default_bus_get_fw_dev_path(DeviceState *dev) +{ + return g_strdup(object_get_typename(OBJECT(dev))); +} + +/** + * bus_phases_reset: + * Transition reset method for buses to allow moving + * smoothly from legacy reset method to multi-phases + */ +static void bus_phases_reset(BusState *bus) +{ + ResettableClass *rc = RESETTABLE_GET_CLASS(bus); + + if (rc->phases.enter) { + rc->phases.enter(OBJECT(bus), RESET_TYPE_COLD); + } + if (rc->phases.hold) { + rc->phases.hold(OBJECT(bus)); + } + if (rc->phases.exit) { + rc->phases.exit(OBJECT(bus)); + } +} + +static void bus_transitional_reset(Object *obj) +{ + BusClass *bc = BUS_GET_CLASS(obj); + + /* + * This will call either @bus_phases_reset (for multi-phases transitioned + * buses) or a bus's specific method for not-yet transitioned buses. + * In both case, it does not reset children. + */ + if (bc->reset) { + bc->reset(BUS(obj)); + } +} + +/** + * bus_get_transitional_reset: + * check if the bus's class is ready for multi-phase + */ +static ResettableTrFunction bus_get_transitional_reset(Object *obj) +{ + BusClass *dc = BUS_GET_CLASS(obj); + if (dc->reset != bus_phases_reset) { + /* + * dc->reset has been overridden by a subclass, + * the bus is not ready for multi phase yet. + */ + return bus_transitional_reset; + } + return NULL; +} + +static void bus_class_init(ObjectClass *class, void *data) +{ + BusClass *bc = BUS_CLASS(class); + ResettableClass *rc = RESETTABLE_CLASS(class); + + class->unparent = bus_unparent; + bc->get_fw_dev_path = default_bus_get_fw_dev_path; + + rc->get_state = bus_get_reset_state; + rc->child_foreach = bus_reset_child_foreach; + + /* + * @bus_phases_reset is put as the default reset method below, allowing + * to do the multi-phase transition from base classes to leaf classes. It + * allows a legacy-reset Bus class to extend a multi-phases-reset + * Bus class for the following reason: + * + If a base class B has been moved to multi-phase, then it does not + * override this default reset method and may have defined phase methods. + * + A child class C (extending class B) which uses + * bus_class_set_parent_reset() (or similar means) to override the + * reset method will still work as expected. @bus_phases_reset function + * will be registered as the parent reset method and effectively call + * parent reset phases. + */ + bc->reset = bus_phases_reset; + rc->get_transitional_function = bus_get_transitional_reset; +} + +static void qbus_finalize(Object *obj) +{ + BusState *bus = BUS(obj); + + g_free(bus->name); +} + +static const TypeInfo bus_info = { + .name = TYPE_BUS, + .parent = TYPE_OBJECT, + .instance_size = sizeof(BusState), + .abstract = true, + .class_size = sizeof(BusClass), + .instance_init = qbus_initfn, + .instance_finalize = qbus_finalize, + .class_init = bus_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_RESETTABLE_INTERFACE }, + { } + }, +}; + +static void bus_register_types(void) +{ + type_register_static(&bus_info); +} + +type_init(bus_register_types) diff --git a/hw/core/clock-vmstate.c b/hw/core/clock-vmstate.c new file mode 100644 index 000000000..9d9174ffb --- /dev/null +++ b/hw/core/clock-vmstate.c @@ -0,0 +1,63 @@ +/* + * Clock migration structure + * + * Copyright GreenSocs 2019-2020 + * + * Authors: + * Damien Hedde + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "migration/vmstate.h" +#include "hw/clock.h" + +static bool muldiv_needed(void *opaque) +{ + Clock *clk = opaque; + + return clk->multiplier != 1 || clk->divider != 1; +} + +static int clock_pre_load(void *opaque) +{ + Clock *clk = opaque; + /* + * The initial out-of-reset settings of the Clock might have been + * configured by the device to be different from what we set + * in clock_initfn(), so we must here set the default values to + * be used if they are not in the inbound migration state. + */ + clk->multiplier = 1; + clk->divider = 1; + + return 0; +} + +const VMStateDescription vmstate_muldiv = { + .name = "clock/muldiv", + .version_id = 1, + .minimum_version_id = 1, + .needed = muldiv_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(multiplier, Clock), + VMSTATE_UINT32(divider, Clock), + }, +}; + +const VMStateDescription vmstate_clock = { + .name = "clock", + .version_id = 0, + .minimum_version_id = 0, + .pre_load = clock_pre_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64(period, Clock), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_muldiv, + NULL + }, +}; diff --git a/hw/core/clock.c b/hw/core/clock.c new file mode 100644 index 000000000..916875e07 --- /dev/null +++ b/hw/core/clock.c @@ -0,0 +1,195 @@ +/* + * Hardware Clocks + * + * Copyright GreenSocs 2016-2020 + * + * Authors: + * Frederic Konrad + * Damien Hedde + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "hw/clock.h" +#include "trace.h" + +#define CLOCK_PATH(_clk) (_clk->canonical_path) + +void clock_setup_canonical_path(Clock *clk) +{ + g_free(clk->canonical_path); + clk->canonical_path = object_get_canonical_path(OBJECT(clk)); +} + +Clock *clock_new(Object *parent, const char *name) +{ + Object *obj; + Clock *clk; + + obj = object_new(TYPE_CLOCK); + object_property_add_child(parent, name, obj); + object_unref(obj); + + clk = CLOCK(obj); + clock_setup_canonical_path(clk); + + return clk; +} + +void clock_set_callback(Clock *clk, ClockCallback *cb, void *opaque, + unsigned int events) +{ + clk->callback = cb; + clk->callback_opaque = opaque; + clk->callback_events = events; +} + +void clock_clear_callback(Clock *clk) +{ + clock_set_callback(clk, NULL, NULL, 0); +} + +bool clock_set(Clock *clk, uint64_t period) +{ + if (clk->period == period) { + return false; + } + trace_clock_set(CLOCK_PATH(clk), CLOCK_PERIOD_TO_HZ(clk->period), + CLOCK_PERIOD_TO_HZ(period)); + clk->period = period; + + return true; +} + +static uint64_t clock_get_child_period(Clock *clk) +{ + /* + * Return the period to be used for child clocks, which is the parent + * clock period adjusted for for multiplier and divider effects. + */ + return muldiv64(clk->period, clk->multiplier, clk->divider); +} + +static void clock_call_callback(Clock *clk, ClockEvent event) +{ + /* + * Call the Clock's callback for this event, if it has one and + * is interested in this event. + */ + if (clk->callback && (clk->callback_events & event)) { + clk->callback(clk->callback_opaque, event); + } +} + +static void clock_propagate_period(Clock *clk, bool call_callbacks) +{ + Clock *child; + uint64_t child_period = clock_get_child_period(clk); + + QLIST_FOREACH(child, &clk->children, sibling) { + if (child->period != child_period) { + if (call_callbacks) { + clock_call_callback(child, ClockPreUpdate); + } + child->period = child_period; + trace_clock_update(CLOCK_PATH(child), CLOCK_PATH(clk), + CLOCK_PERIOD_TO_HZ(child->period), + call_callbacks); + if (call_callbacks) { + clock_call_callback(child, ClockUpdate); + } + clock_propagate_period(child, call_callbacks); + } + } +} + +void clock_propagate(Clock *clk) +{ + assert(clk->source == NULL); + trace_clock_propagate(CLOCK_PATH(clk)); + clock_propagate_period(clk, true); +} + +void clock_set_source(Clock *clk, Clock *src) +{ + /* changing clock source is not supported */ + assert(!clk->source); + + trace_clock_set_source(CLOCK_PATH(clk), CLOCK_PATH(src)); + + clk->period = clock_get_child_period(src); + QLIST_INSERT_HEAD(&src->children, clk, sibling); + clk->source = src; + clock_propagate_period(clk, false); +} + +static void clock_disconnect(Clock *clk) +{ + if (clk->source == NULL) { + return; + } + + trace_clock_disconnect(CLOCK_PATH(clk)); + + clk->source = NULL; + QLIST_REMOVE(clk, sibling); +} + +char *clock_display_freq(Clock *clk) +{ + return freq_to_str(clock_get_hz(clk)); +} + +void clock_set_mul_div(Clock *clk, uint32_t multiplier, uint32_t divider) +{ + assert(divider != 0); + + trace_clock_set_mul_div(CLOCK_PATH(clk), clk->multiplier, multiplier, + clk->divider, divider); + clk->multiplier = multiplier; + clk->divider = divider; +} + +static void clock_initfn(Object *obj) +{ + Clock *clk = CLOCK(obj); + + clk->multiplier = 1; + clk->divider = 1; + + QLIST_INIT(&clk->children); +} + +static void clock_finalizefn(Object *obj) +{ + Clock *clk = CLOCK(obj); + Clock *child, *next; + + /* clear our list of children */ + QLIST_FOREACH_SAFE(child, &clk->children, sibling, next) { + clock_disconnect(child); + } + + /* remove us from source's children list */ + clock_disconnect(clk); + + g_free(clk->canonical_path); +} + +static const TypeInfo clock_info = { + .name = TYPE_CLOCK, + .parent = TYPE_OBJECT, + .instance_size = sizeof(Clock), + .instance_init = clock_initfn, + .instance_finalize = clock_finalizefn, +}; + +static void clock_register_types(void) +{ + type_register_static(&clock_info); +} + +type_init(clock_register_types) diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c new file mode 100644 index 000000000..9e3241b43 --- /dev/null +++ b/hw/core/cpu-common.c @@ -0,0 +1,298 @@ +/* + * QEMU CPU model + * + * Copyright (c) 2012-2014 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/core/cpu.h" +#include "sysemu/hw_accel.h" +#include "qemu/notify.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "exec/log.h" +#include "exec/cpu-common.h" +#include "qemu/error-report.h" +#include "qemu/qemu-print.h" +#include "sysemu/tcg.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "trace/trace-root.h" +#include "qemu/plugin.h" + +CPUState *cpu_by_arch_id(int64_t id) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->get_arch_id(cpu) == id) { + return cpu; + } + } + return NULL; +} + +bool cpu_exists(int64_t id) +{ + return !!cpu_by_arch_id(id); +} + +CPUState *cpu_create(const char *typename) +{ + Error *err = NULL; + CPUState *cpu = CPU(object_new(typename)); + if (!qdev_realize(DEVICE(cpu), NULL, &err)) { + error_report_err(err); + object_unref(OBJECT(cpu)); + exit(EXIT_FAILURE); + } + return cpu; +} + +/* Resetting the IRQ comes from across the code base so we take the + * BQL here if we need to. cpu_interrupt assumes it is held.*/ +void cpu_reset_interrupt(CPUState *cpu, int mask) +{ + bool need_lock = !qemu_mutex_iothread_locked(); + + if (need_lock) { + qemu_mutex_lock_iothread(); + } + cpu->interrupt_request &= ~mask; + if (need_lock) { + qemu_mutex_unlock_iothread(); + } +} + +void cpu_exit(CPUState *cpu) +{ + qatomic_set(&cpu->exit_request, 1); + /* Ensure cpu_exec will see the exit request after TCG has exited. */ + smp_wmb(); + qatomic_set(&cpu->icount_decr_ptr->u16.high, -1); +} + +static int cpu_common_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) +{ + return 0; +} + +static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg) +{ + return 0; +} + +void cpu_dump_state(CPUState *cpu, FILE *f, int flags) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->dump_state) { + cpu_synchronize_state(cpu); + cc->dump_state(cpu, f, flags); + } +} + +void cpu_reset(CPUState *cpu) +{ + device_cold_reset(DEVICE(cpu)); + + trace_guest_cpu_reset(cpu); +} + +static void cpu_common_reset(DeviceState *dev) +{ + CPUState *cpu = CPU(dev); + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (qemu_loglevel_mask(CPU_LOG_RESET)) { + qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index); + log_cpu_state(cpu, cc->reset_dump_flags); + } + + cpu->interrupt_request = 0; + cpu->halted = cpu->start_powered_off; + cpu->mem_io_pc = 0; + cpu->icount_extra = 0; + qatomic_set(&cpu->icount_decr_ptr->u32, 0); + cpu->can_do_io = 1; + cpu->exception_index = -1; + cpu->crash_occurred = false; + cpu->cflags_next_tb = -1; + + if (tcg_enabled()) { + cpu_tb_jmp_cache_clear(cpu); + + tcg_flush_softmmu_tlb(cpu); + } +} + +static bool cpu_common_has_work(CPUState *cs) +{ + return false; +} + +ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) +{ + CPUClass *cc = CPU_CLASS(object_class_by_name(typename)); + + assert(cpu_model && cc->class_by_name); + return cc->class_by_name(cpu_model); +} + +static void cpu_common_parse_features(const char *typename, char *features, + Error **errp) +{ + char *val; + static bool cpu_globals_initialized; + /* Single "key=value" string being parsed */ + char *featurestr = features ? strtok(features, ",") : NULL; + + /* should be called only once, catch invalid users */ + assert(!cpu_globals_initialized); + cpu_globals_initialized = true; + + while (featurestr) { + val = strchr(featurestr, '='); + if (val) { + GlobalProperty *prop = g_new0(typeof(*prop), 1); + *val = 0; + val++; + prop->driver = typename; + prop->property = g_strdup(featurestr); + prop->value = g_strdup(val); + qdev_prop_register_global(prop); + } else { + error_setg(errp, "Expected key=value format, found %s.", + featurestr); + return; + } + featurestr = strtok(NULL, ","); + } +} + +static void cpu_common_realizefn(DeviceState *dev, Error **errp) +{ + CPUState *cpu = CPU(dev); + Object *machine = qdev_get_machine(); + + /* qdev_get_machine() can return something that's not TYPE_MACHINE + * if this is one of the user-only emulators; in that case there's + * no need to check the ignore_memory_transaction_failures board flag. + */ + if (object_dynamic_cast(machine, TYPE_MACHINE)) { + ObjectClass *oc = object_get_class(machine); + MachineClass *mc = MACHINE_CLASS(oc); + + if (mc) { + cpu->ignore_memory_transaction_failures = + mc->ignore_memory_transaction_failures; + } + } + + if (dev->hotplugged) { + cpu_synchronize_post_init(cpu); + cpu_resume(cpu); + } + + /* NOTE: latest generic point where the cpu is fully realized */ + trace_init_vcpu(cpu); +} + +static void cpu_common_unrealizefn(DeviceState *dev) +{ + CPUState *cpu = CPU(dev); + + /* NOTE: latest generic point before the cpu is fully unrealized */ + trace_fini_vcpu(cpu); + cpu_exec_unrealizefn(cpu); +} + +static void cpu_common_initfn(Object *obj) +{ + CPUState *cpu = CPU(obj); + CPUClass *cc = CPU_GET_CLASS(obj); + + cpu->cpu_index = UNASSIGNED_CPU_INDEX; + cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX; + cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs; + /* *-user doesn't have configurable SMP topology */ + /* the default value is changed by qemu_init_vcpu() for softmmu */ + cpu->nr_cores = 1; + cpu->nr_threads = 1; + + qemu_mutex_init(&cpu->work_mutex); + QSIMPLEQ_INIT(&cpu->work_list); + QTAILQ_INIT(&cpu->breakpoints); + QTAILQ_INIT(&cpu->watchpoints); + + cpu_exec_initfn(cpu); +} + +static void cpu_common_finalize(Object *obj) +{ + CPUState *cpu = CPU(obj); + + qemu_mutex_destroy(&cpu->work_mutex); +} + +static int64_t cpu_common_get_arch_id(CPUState *cpu) +{ + return cpu->cpu_index; +} + +static void cpu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + CPUClass *k = CPU_CLASS(klass); + + k->parse_features = cpu_common_parse_features; + k->get_arch_id = cpu_common_get_arch_id; + k->has_work = cpu_common_has_work; + k->gdb_read_register = cpu_common_gdb_read_register; + k->gdb_write_register = cpu_common_gdb_write_register; + set_bit(DEVICE_CATEGORY_CPU, dc->categories); + dc->realize = cpu_common_realizefn; + dc->unrealize = cpu_common_unrealizefn; + dc->reset = cpu_common_reset; + cpu_class_init_props(dc); + /* + * Reason: CPUs still need special care by board code: wiring up + * IRQs, adding reset handlers, halting non-first CPUs, ... + */ + dc->user_creatable = false; +} + +static const TypeInfo cpu_type_info = { + .name = TYPE_CPU, + .parent = TYPE_DEVICE, + .instance_size = sizeof(CPUState), + .instance_init = cpu_common_initfn, + .instance_finalize = cpu_common_finalize, + .abstract = true, + .class_size = sizeof(CPUClass), + .class_init = cpu_class_init, +}; + +static void cpu_register_types(void) +{ + type_register_static(&cpu_type_info); +} + +type_init(cpu_register_types) diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c new file mode 100644 index 000000000..00253f892 --- /dev/null +++ b/hw/core/cpu-sysemu.c @@ -0,0 +1,145 @@ +/* + * QEMU CPU model (system emulation specific) + * + * Copyright (c) 2012-2014 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/core/cpu.h" +#include "hw/core/sysemu-cpu-ops.h" + +bool cpu_paging_enabled(const CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->sysemu_ops->get_paging_enabled) { + return cc->sysemu_ops->get_paging_enabled(cpu); + } + + return false; +} + +void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, + Error **errp) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->sysemu_ops->get_memory_mapping) { + cc->sysemu_ops->get_memory_mapping(cpu, list, errp); + return; + } + + error_setg(errp, "Obtaining memory mappings is unsupported on this CPU."); +} + +hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, + MemTxAttrs *attrs) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->sysemu_ops->get_phys_page_attrs_debug) { + return cc->sysemu_ops->get_phys_page_attrs_debug(cpu, addr, attrs); + } + /* Fallback for CPUs which don't implement the _attrs_ hook */ + *attrs = MEMTXATTRS_UNSPECIFIED; + return cc->sysemu_ops->get_phys_page_debug(cpu, addr); +} + +hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) +{ + MemTxAttrs attrs = {}; + + return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs); +} + +int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + int ret = 0; + + if (cc->sysemu_ops->asidx_from_attrs) { + ret = cc->sysemu_ops->asidx_from_attrs(cpu, attrs); + assert(ret < cpu->num_ases && ret >= 0); + } + return ret; +} + +int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, + void *opaque) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (!cc->sysemu_ops->write_elf32_qemunote) { + return 0; + } + return (*cc->sysemu_ops->write_elf32_qemunote)(f, cpu, opaque); +} + +int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, + int cpuid, void *opaque) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (!cc->sysemu_ops->write_elf32_note) { + return -1; + } + return (*cc->sysemu_ops->write_elf32_note)(f, cpu, cpuid, opaque); +} + +int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, + void *opaque) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (!cc->sysemu_ops->write_elf64_qemunote) { + return 0; + } + return (*cc->sysemu_ops->write_elf64_qemunote)(f, cpu, opaque); +} + +int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, + int cpuid, void *opaque) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (!cc->sysemu_ops->write_elf64_note) { + return -1; + } + return (*cc->sysemu_ops->write_elf64_note)(f, cpu, cpuid, opaque); +} + +bool cpu_virtio_is_big_endian(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + if (cc->sysemu_ops->virtio_is_big_endian) { + return cc->sysemu_ops->virtio_is_big_endian(cpu); + } + return target_words_bigendian(); +} + +GuestPanicInformation *cpu_get_crash_info(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + GuestPanicInformation *res = NULL; + + if (cc->sysemu_ops->get_crash_info) { + res = cc->sysemu_ops->get_crash_info(cpu); + } + return res; +} diff --git a/hw/core/fw-path-provider.c b/hw/core/fw-path-provider.c new file mode 100644 index 000000000..4840faefd --- /dev/null +++ b/hw/core/fw-path-provider.c @@ -0,0 +1,54 @@ +/* + * Firmware path provider class and helpers. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/fw-path-provider.h" +#include "qemu/module.h" + +char *fw_path_provider_get_dev_path(FWPathProvider *p, BusState *bus, + DeviceState *dev) +{ + FWPathProviderClass *k = FW_PATH_PROVIDER_GET_CLASS(p); + + return k->get_dev_path(p, bus, dev); +} + +char *fw_path_provider_try_get_dev_path(Object *o, BusState *bus, + DeviceState *dev) +{ + FWPathProvider *p = (FWPathProvider *) + object_dynamic_cast(o, TYPE_FW_PATH_PROVIDER); + + if (p) { + return fw_path_provider_get_dev_path(p, bus, dev); + } + + return NULL; +} + +static const TypeInfo fw_path_provider_info = { + .name = TYPE_FW_PATH_PROVIDER, + .parent = TYPE_INTERFACE, + .class_size = sizeof(FWPathProviderClass), +}; + +static void fw_path_provider_register_types(void) +{ + type_register_static(&fw_path_provider_info); +} + +type_init(fw_path_provider_register_types) diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c new file mode 100644 index 000000000..d14f932ee --- /dev/null +++ b/hw/core/generic-loader.c @@ -0,0 +1,221 @@ +/* + * Generic Loader + * + * Copyright (C) 2014 Li Guang + * Copyright (C) 2016 Xilinx Inc. + * Written by Li Guang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +/* + * Internally inside QEMU this is a device. It is a strange device that + * provides no hardware interface but allows QEMU to monkey patch memory + * specified when it is created. To be able to do this it has a reset + * callback that does the memory operations. + + * This device allows the user to monkey patch memory. To be able to do + * this it needs a backend to manage the datas, the same as other + * memory-related devices. In this case as the backend is so trivial we + * have merged it with the frontend instead of creating and maintaining a + * separate backend. + */ + +#include "qemu/osdep.h" +#include "hw/core/cpu.h" +#include "sysemu/dma.h" +#include "sysemu/reset.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/core/generic-loader.h" + +#define CPU_NONE 0xFFFFFFFF + +static void generic_loader_reset(void *opaque) +{ + GenericLoaderState *s = GENERIC_LOADER(opaque); + + if (s->set_pc) { + CPUClass *cc = CPU_GET_CLASS(s->cpu); + cpu_reset(s->cpu); + if (cc) { + cc->set_pc(s->cpu, s->addr); + } + } + + if (s->data_len) { + assert(s->data_len < sizeof(s->data)); + dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len); + } +} + +static void generic_loader_realize(DeviceState *dev, Error **errp) +{ + GenericLoaderState *s = GENERIC_LOADER(dev); + hwaddr entry; + int big_endian; + int size = 0; + + s->set_pc = false; + + /* Perform some error checking on the user's options */ + if (s->data || s->data_len || s->data_be) { + /* User is loading memory values */ + if (s->file) { + error_setg(errp, "Specifying a file is not supported when loading " + "memory values"); + return; + } else if (s->force_raw) { + error_setg(errp, "Specifying force-raw is not supported when " + "loading memory values"); + return; + } else if (!s->data_len) { + /* We can't check for !data here as a value of 0 is still valid. */ + error_setg(errp, "Both data and data-len must be specified"); + return; + } else if (s->data_len > 8) { + error_setg(errp, "data-len cannot be greater then 8 bytes"); + return; + } + } else if (s->file || s->force_raw) { + /* User is loading an image */ + if (s->data || s->data_len || s->data_be) { + error_setg(errp, "data can not be specified when loading an " + "image"); + return; + } + /* The user specified a file, only set the PC if they also specified + * a CPU to use. + */ + if (s->cpu_num != CPU_NONE) { + s->set_pc = true; + } + } else if (s->addr) { + /* User is setting the PC */ + if (s->data || s->data_len || s->data_be) { + error_setg(errp, "data can not be specified when setting a " + "program counter"); + return; + } else if (s->cpu_num == CPU_NONE) { + error_setg(errp, "cpu_num must be specified when setting a " + "program counter"); + return; + } + s->set_pc = true; + } else { + /* Did the user specify anything? */ + error_setg(errp, "please include valid arguments"); + return; + } + + qemu_register_reset(generic_loader_reset, dev); + + if (s->cpu_num != CPU_NONE) { + s->cpu = qemu_get_cpu(s->cpu_num); + if (!s->cpu) { + error_setg(errp, "Specified boot CPU#%d is nonexistent", + s->cpu_num); + return; + } + } else { + s->cpu = first_cpu; + } + + big_endian = target_words_bigendian(); + + if (s->file) { + AddressSpace *as = s->cpu ? s->cpu->as : NULL; + + if (!s->force_raw) { + size = load_elf_as(s->file, NULL, NULL, NULL, &entry, NULL, NULL, + NULL, big_endian, 0, 0, 0, as); + + if (size < 0) { + size = load_uimage_as(s->file, &entry, NULL, NULL, NULL, NULL, + as); + } + + if (size < 0) { + size = load_targphys_hex_as(s->file, &entry, as); + } + } + + if (size < 0 || s->force_raw) { + /* Default to the maximum size being the machine's ram size */ + size = load_image_targphys_as(s->file, s->addr, current_machine->ram_size, as); + } else { + s->addr = entry; + } + + if (size < 0) { + error_setg(errp, "Cannot load specified image %s", s->file); + return; + } + } + + /* Convert the data endiannes */ + if (s->data_be) { + s->data = cpu_to_be64(s->data); + } else { + s->data = cpu_to_le64(s->data); + } +} + +static void generic_loader_unrealize(DeviceState *dev) +{ + qemu_unregister_reset(generic_loader_reset, dev); +} + +static Property generic_loader_props[] = { + DEFINE_PROP_UINT64("addr", GenericLoaderState, addr, 0), + DEFINE_PROP_UINT64("data", GenericLoaderState, data, 0), + DEFINE_PROP_UINT8("data-len", GenericLoaderState, data_len, 0), + DEFINE_PROP_BOOL("data-be", GenericLoaderState, data_be, false), + DEFINE_PROP_UINT32("cpu-num", GenericLoaderState, cpu_num, CPU_NONE), + DEFINE_PROP_BOOL("force-raw", GenericLoaderState, force_raw, false), + DEFINE_PROP_STRING("file", GenericLoaderState, file), + DEFINE_PROP_END_OF_LIST(), +}; + +static void generic_loader_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + /* The reset function is not registered here and is instead registered in + * the realize function to allow this device to be added via the device_add + * command in the QEMU monitor. + * TODO: Improve the device_add functionality to allow resets to be + * connected + */ + dc->realize = generic_loader_realize; + dc->unrealize = generic_loader_unrealize; + device_class_set_props(dc, generic_loader_props); + dc->desc = "Generic Loader"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static TypeInfo generic_loader_info = { + .name = TYPE_GENERIC_LOADER, + .parent = TYPE_DEVICE, + .instance_size = sizeof(GenericLoaderState), + .class_init = generic_loader_class_init, +}; + +static void generic_loader_register_type(void) +{ + type_register_static(&generic_loader_info); +} + +type_init(generic_loader_register_type) diff --git a/hw/core/gpio.c b/hw/core/gpio.c new file mode 100644 index 000000000..8e6b4f5ed --- /dev/null +++ b/hw/core/gpio.c @@ -0,0 +1,197 @@ +/* + * qdev GPIO helpers + * + * Copyright (c) 2009 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/qdev-core.h" +#include "hw/irq.h" +#include "qapi/error.h" + +static NamedGPIOList *qdev_get_named_gpio_list(DeviceState *dev, + const char *name) +{ + NamedGPIOList *ngl; + + QLIST_FOREACH(ngl, &dev->gpios, node) { + /* NULL is a valid and matchable name. */ + if (g_strcmp0(name, ngl->name) == 0) { + return ngl; + } + } + + ngl = g_malloc0(sizeof(*ngl)); + ngl->name = g_strdup(name); + QLIST_INSERT_HEAD(&dev->gpios, ngl, node); + return ngl; +} + +void qdev_init_gpio_in_named_with_opaque(DeviceState *dev, + qemu_irq_handler handler, + void *opaque, + const char *name, int n) +{ + int i; + NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); + + assert(gpio_list->num_out == 0 || !name); + gpio_list->in = qemu_extend_irqs(gpio_list->in, gpio_list->num_in, handler, + opaque, n); + + if (!name) { + name = "unnamed-gpio-in"; + } + for (i = gpio_list->num_in; i < gpio_list->num_in + n; i++) { + gchar *propname = g_strdup_printf("%s[%u]", name, i); + + object_property_add_child(OBJECT(dev), propname, + OBJECT(gpio_list->in[i])); + g_free(propname); + } + + gpio_list->num_in += n; +} + +void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n) +{ + qdev_init_gpio_in_named(dev, handler, NULL, n); +} + +void qdev_init_gpio_out_named(DeviceState *dev, qemu_irq *pins, + const char *name, int n) +{ + int i; + NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); + + assert(gpio_list->num_in == 0 || !name); + + if (!name) { + name = "unnamed-gpio-out"; + } + memset(pins, 0, sizeof(*pins) * n); + for (i = 0; i < n; ++i) { + gchar *propname = g_strdup_printf("%s[%u]", name, + gpio_list->num_out + i); + + object_property_add_link(OBJECT(dev), propname, TYPE_IRQ, + (Object **)&pins[i], + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + g_free(propname); + } + gpio_list->num_out += n; +} + +void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n) +{ + qdev_init_gpio_out_named(dev, pins, NULL, n); +} + +qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n) +{ + NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); + + assert(n >= 0 && n < gpio_list->num_in); + return gpio_list->in[n]; +} + +qemu_irq qdev_get_gpio_in(DeviceState *dev, int n) +{ + return qdev_get_gpio_in_named(dev, NULL, n); +} + +void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n, + qemu_irq pin) +{ + char *propname = g_strdup_printf("%s[%d]", + name ? name : "unnamed-gpio-out", n); + if (pin && !OBJECT(pin)->parent) { + /* We need a name for object_property_set_link to work */ + object_property_add_child(container_get(qdev_get_machine(), + "/unattached"), + "non-qdev-gpio[*]", OBJECT(pin)); + } + object_property_set_link(OBJECT(dev), propname, OBJECT(pin), &error_abort); + g_free(propname); +} + +qemu_irq qdev_get_gpio_out_connector(DeviceState *dev, const char *name, int n) +{ + g_autofree char *propname = g_strdup_printf("%s[%d]", + name ? name : "unnamed-gpio-out", n); + + qemu_irq ret = (qemu_irq)object_property_get_link(OBJECT(dev), propname, + NULL); + + return ret; +} + +/* disconnect a GPIO output, returning the disconnected input (if any) */ + +static qemu_irq qdev_disconnect_gpio_out_named(DeviceState *dev, + const char *name, int n) +{ + char *propname = g_strdup_printf("%s[%d]", + name ? name : "unnamed-gpio-out", n); + + qemu_irq ret = (qemu_irq)object_property_get_link(OBJECT(dev), propname, + NULL); + if (ret) { + object_property_set_link(OBJECT(dev), propname, NULL, NULL); + } + g_free(propname); + return ret; +} + +qemu_irq qdev_intercept_gpio_out(DeviceState *dev, qemu_irq icpt, + const char *name, int n) +{ + qemu_irq disconnected = qdev_disconnect_gpio_out_named(dev, name, n); + qdev_connect_gpio_out_named(dev, name, n, icpt); + return disconnected; +} + +void qdev_connect_gpio_out(DeviceState *dev, int n, qemu_irq pin) +{ + qdev_connect_gpio_out_named(dev, NULL, n, pin); +} + +void qdev_pass_gpios(DeviceState *dev, DeviceState *container, + const char *name) +{ + int i; + NamedGPIOList *ngl = qdev_get_named_gpio_list(dev, name); + + for (i = 0; i < ngl->num_in; i++) { + const char *nm = ngl->name ? ngl->name : "unnamed-gpio-in"; + char *propname = g_strdup_printf("%s[%d]", nm, i); + + object_property_add_alias(OBJECT(container), propname, + OBJECT(dev), propname); + g_free(propname); + } + for (i = 0; i < ngl->num_out; i++) { + const char *nm = ngl->name ? ngl->name : "unnamed-gpio-out"; + char *propname = g_strdup_printf("%s[%d]", nm, i); + + object_property_add_alias(OBJECT(container), propname, + OBJECT(dev), propname); + g_free(propname); + } + QLIST_REMOVE(ngl, node); + QLIST_INSERT_HEAD(&container->gpios, ngl, node); +} diff --git a/hw/core/guest-loader.c b/hw/core/guest-loader.c new file mode 100644 index 000000000..d3f9d1a06 --- /dev/null +++ b/hw/core/guest-loader.c @@ -0,0 +1,144 @@ +/* + * Guest Loader + * + * Copyright (C) 2020 Linaro + * Written by Alex Bennée + * (based on the generic-loader by Li Guang ) + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* + * Much like the generic-loader this is treated as a special device + * inside QEMU. However unlike the generic-loader this device is used + * to load guest images for hypervisors. As part of that process the + * hypervisor needs to have platform information passed to it by the + * lower levels of the stack (e.g. firmware/bootloader). If you boot + * the hypervisor directly you use the guest-loader to load the Dom0 + * or equivalent guest images in the right place in the same way a + * boot loader would. + * + * This is only relevant for full system emulation. + */ + +#include "qemu/osdep.h" +#include "hw/core/cpu.h" +#include "sysemu/dma.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "guest-loader.h" +#include "sysemu/device_tree.h" +#include "hw/boards.h" + +/* + * Insert some FDT nodes for the loaded blob. + */ +static void loader_insert_platform_data(GuestLoaderState *s, int size, + Error **errp) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + void *fdt = machine->fdt; + g_autofree char *node = g_strdup_printf("/chosen/module@0x%08" PRIx64, + s->addr); + uint64_t reg_attr[2] = {cpu_to_be64(s->addr), cpu_to_be64(size)}; + + if (!fdt) { + error_setg(errp, "Cannot modify FDT fields if the machine has none"); + return; + } + + qemu_fdt_add_subnode(fdt, node); + qemu_fdt_setprop(fdt, node, "reg", ®_attr, sizeof(reg_attr)); + + if (s->kernel) { + const char *compat[2] = { "multiboot,module", "multiboot,kernel" }; + if (qemu_fdt_setprop_string_array(fdt, node, "compatible", + (char **) &compat, + ARRAY_SIZE(compat)) < 0) { + error_setg(errp, "couldn't set %s/compatible", node); + return; + } + if (s->args) { + if (qemu_fdt_setprop_string(fdt, node, "bootargs", s->args) < 0) { + error_setg(errp, "couldn't set %s/bootargs", node); + } + } + } else if (s->initrd) { + const char *compat[2] = { "multiboot,module", "multiboot,ramdisk" }; + if (qemu_fdt_setprop_string_array(fdt, node, "compatible", + (char **) &compat, + ARRAY_SIZE(compat)) < 0) { + error_setg(errp, "couldn't set %s/compatible", node); + return; + } + } +} + +static void guest_loader_realize(DeviceState *dev, Error **errp) +{ + GuestLoaderState *s = GUEST_LOADER(dev); + char *file = s->kernel ? s->kernel : s->initrd; + int size = 0; + + /* Perform some error checking on the user's options */ + if (s->kernel && s->initrd) { + error_setg(errp, "Cannot specify a kernel and initrd in same stanza"); + return; + } else if (!s->kernel && !s->initrd) { + error_setg(errp, "Need to specify a kernel or initrd image"); + return; + } else if (!s->addr) { + error_setg(errp, "Need to specify the address of guest blob"); + return; + } else if (s->args && !s->kernel) { + error_setg(errp, "Boot args only relevant to kernel blobs"); + } + + /* Default to the maximum size being the machine's ram size */ + size = load_image_targphys_as(file, s->addr, current_machine->ram_size, + NULL); + if (size < 0) { + error_setg(errp, "Cannot load specified image %s", file); + return; + } + + /* Now the image is loaded we need to update the platform data */ + loader_insert_platform_data(s, size, errp); +} + +static Property guest_loader_props[] = { + DEFINE_PROP_UINT64("addr", GuestLoaderState, addr, 0), + DEFINE_PROP_STRING("kernel", GuestLoaderState, kernel), + DEFINE_PROP_STRING("bootargs", GuestLoaderState, args), + DEFINE_PROP_STRING("initrd", GuestLoaderState, initrd), + DEFINE_PROP_END_OF_LIST(), +}; + +static void guest_loader_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = guest_loader_realize; + device_class_set_props(dc, guest_loader_props); + dc->desc = "Guest Loader"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static TypeInfo guest_loader_info = { + .name = TYPE_GUEST_LOADER, + .parent = TYPE_DEVICE, + .instance_size = sizeof(GuestLoaderState), + .class_init = guest_loader_class_init, +}; + +static void guest_loader_register_type(void) +{ + type_register_static(&guest_loader_info); +} + +type_init(guest_loader_register_type) diff --git a/hw/core/guest-loader.h b/hw/core/guest-loader.h new file mode 100644 index 000000000..07f4b4884 --- /dev/null +++ b/hw/core/guest-loader.h @@ -0,0 +1,34 @@ +/* + * Guest Loader + * + * Copyright (C) 2020 Linaro + * Written by Alex Bennée + * (based on the generic-loader by Li Guang ) + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef GUEST_LOADER_H +#define GUEST_LOADER_H + +#include "hw/qdev-core.h" +#include "qom/object.h" + +struct GuestLoaderState { + /* */ + DeviceState parent_obj; + + /* */ + uint64_t addr; + char *kernel; + char *args; + char *initrd; +}; + +#define TYPE_GUEST_LOADER "guest-loader" +OBJECT_DECLARE_SIMPLE_TYPE(GuestLoaderState, GUEST_LOADER) + +#endif diff --git a/hw/core/hotplug-stubs.c b/hw/core/hotplug-stubs.c new file mode 100644 index 000000000..7aadaa29b --- /dev/null +++ b/hw/core/hotplug-stubs.c @@ -0,0 +1,34 @@ +/* + * Hotplug handler stubs + * + * Copyright (c) Red Hat + * + * Authors: + * Philippe Mathieu-Daudé , + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/qdev-core.h" + +HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev) +{ + return NULL; +} + +void hotplug_handler_pre_plug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, + Error **errp) +{ + g_assert_not_reached(); +} + +void hotplug_handler_plug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, + Error **errp) +{ + g_assert_not_reached(); +} diff --git a/hw/core/hotplug.c b/hw/core/hotplug.c new file mode 100644 index 000000000..17ac98668 --- /dev/null +++ b/hw/core/hotplug.c @@ -0,0 +1,71 @@ +/* + * Hotplug handler interface. + * + * Copyright (c) 2014 Red Hat Inc. + * + * Authors: + * Igor Mammedov , + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/hotplug.h" +#include "qemu/module.h" + +void hotplug_handler_pre_plug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, + Error **errp) +{ + HotplugHandlerClass *hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler); + + if (hdc->pre_plug) { + hdc->pre_plug(plug_handler, plugged_dev, errp); + } +} + +void hotplug_handler_plug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, + Error **errp) +{ + HotplugHandlerClass *hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler); + + if (hdc->plug) { + hdc->plug(plug_handler, plugged_dev, errp); + } +} + +void hotplug_handler_unplug_request(HotplugHandler *plug_handler, + DeviceState *plugged_dev, + Error **errp) +{ + HotplugHandlerClass *hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler); + + if (hdc->unplug_request) { + hdc->unplug_request(plug_handler, plugged_dev, errp); + } +} + +void hotplug_handler_unplug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, + Error **errp) +{ + HotplugHandlerClass *hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler); + + if (hdc->unplug) { + hdc->unplug(plug_handler, plugged_dev, errp); + } +} + +static const TypeInfo hotplug_handler_info = { + .name = TYPE_HOTPLUG_HANDLER, + .parent = TYPE_INTERFACE, + .class_size = sizeof(HotplugHandlerClass), +}; + +static void hotplug_handler_register_types(void) +{ + type_register_static(&hotplug_handler_info); +} + +type_init(hotplug_handler_register_types) diff --git a/hw/core/irq.c b/hw/core/irq.c new file mode 100644 index 000000000..8a9cbdd55 --- /dev/null +++ b/hw/core/irq.c @@ -0,0 +1,146 @@ +/* + * QEMU IRQ/GPIO common code. + * + * Copyright (c) 2007 CodeSourcery. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "hw/irq.h" +#include "qom/object.h" + +DECLARE_INSTANCE_CHECKER(struct IRQState, IRQ, + TYPE_IRQ) + +struct IRQState { + Object parent_obj; + + qemu_irq_handler handler; + void *opaque; + int n; +}; + +void qemu_set_irq(qemu_irq irq, int level) +{ + if (!irq) + return; + + irq->handler(irq->opaque, irq->n, level); +} + +qemu_irq *qemu_extend_irqs(qemu_irq *old, int n_old, qemu_irq_handler handler, + void *opaque, int n) +{ + qemu_irq *s; + int i; + + if (!old) { + n_old = 0; + } + s = old ? g_renew(qemu_irq, old, n + n_old) : g_new(qemu_irq, n); + for (i = n_old; i < n + n_old; i++) { + s[i] = qemu_allocate_irq(handler, opaque, i); + } + return s; +} + +qemu_irq *qemu_allocate_irqs(qemu_irq_handler handler, void *opaque, int n) +{ + return qemu_extend_irqs(NULL, 0, handler, opaque, n); +} + +qemu_irq qemu_allocate_irq(qemu_irq_handler handler, void *opaque, int n) +{ + struct IRQState *irq; + + irq = IRQ(object_new(TYPE_IRQ)); + irq->handler = handler; + irq->opaque = opaque; + irq->n = n; + + return irq; +} + +void qemu_free_irqs(qemu_irq *s, int n) +{ + int i; + for (i = 0; i < n; i++) { + qemu_free_irq(s[i]); + } + g_free(s); +} + +void qemu_free_irq(qemu_irq irq) +{ + object_unref(OBJECT(irq)); +} + +static void qemu_notirq(void *opaque, int line, int level) +{ + struct IRQState *irq = opaque; + + irq->handler(irq->opaque, irq->n, !level); +} + +qemu_irq qemu_irq_invert(qemu_irq irq) +{ + /* The default state for IRQs is low, so raise the output now. */ + qemu_irq_raise(irq); + return qemu_allocate_irq(qemu_notirq, irq, 0); +} + +static void qemu_splitirq(void *opaque, int line, int level) +{ + struct IRQState **irq = opaque; + irq[0]->handler(irq[0]->opaque, irq[0]->n, level); + irq[1]->handler(irq[1]->opaque, irq[1]->n, level); +} + +qemu_irq qemu_irq_split(qemu_irq irq1, qemu_irq irq2) +{ + qemu_irq *s = g_malloc0(2 * sizeof(qemu_irq)); + s[0] = irq1; + s[1] = irq2; + return qemu_allocate_irq(qemu_splitirq, s, 0); +} + +void qemu_irq_intercept_in(qemu_irq *gpio_in, qemu_irq_handler handler, int n) +{ + int i; + qemu_irq *old_irqs = qemu_allocate_irqs(NULL, NULL, n); + for (i = 0; i < n; i++) { + *old_irqs[i] = *gpio_in[i]; + gpio_in[i]->handler = handler; + gpio_in[i]->opaque = &old_irqs[i]; + } +} + +static const TypeInfo irq_type_info = { + .name = TYPE_IRQ, + .parent = TYPE_OBJECT, + .instance_size = sizeof(struct IRQState), +}; + +static void irq_register_types(void) +{ + type_register_static(&irq_type_info); +} + +type_init(irq_register_types) diff --git a/hw/core/loader-fit.c b/hw/core/loader-fit.c new file mode 100644 index 000000000..b7c7b3ba9 --- /dev/null +++ b/hw/core/loader-fit.c @@ -0,0 +1,335 @@ +/* + * Flattened Image Tree loader. + * + * Copyright (c) 2016 Imagination Technologies + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/units.h" +#include "exec/memory.h" +#include "hw/loader.h" +#include "hw/loader-fit.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "sysemu/device_tree.h" + +#include +#include + +#define FIT_LOADER_MAX_PATH (128) + +static const void *fit_load_image_alloc(const void *itb, const char *name, + int *poff, size_t *psz, Error **errp) +{ + const void *data; + const char *comp; + void *uncomp_data; + char path[FIT_LOADER_MAX_PATH]; + int off, sz; + ssize_t uncomp_len; + + snprintf(path, sizeof(path), "/images/%s", name); + + off = fdt_path_offset(itb, path); + if (off < 0) { + error_setg(errp, "can't find node %s", path); + return NULL; + } + if (poff) { + *poff = off; + } + + data = fdt_getprop(itb, off, "data", &sz); + if (!data) { + error_setg(errp, "can't get %s/data", path); + return NULL; + } + + comp = fdt_getprop(itb, off, "compression", NULL); + if (!comp || !strcmp(comp, "none")) { + if (psz) { + *psz = sz; + } + uncomp_data = g_malloc(sz); + memmove(uncomp_data, data, sz); + return uncomp_data; + } + + if (!strcmp(comp, "gzip")) { + uncomp_len = UBOOT_MAX_GUNZIP_BYTES; + uncomp_data = g_malloc(uncomp_len); + + uncomp_len = gunzip(uncomp_data, uncomp_len, (void *) data, sz); + if (uncomp_len < 0) { + error_setg(errp, "unable to decompress %s image", name); + g_free(uncomp_data); + return NULL; + } + + data = g_realloc(uncomp_data, uncomp_len); + if (psz) { + *psz = uncomp_len; + } + return data; + } + + error_setg(errp, "unknown compression '%s'", comp); + return NULL; +} + +static int fit_image_addr(const void *itb, int img, const char *name, + hwaddr *addr, Error **errp) +{ + const void *prop; + int len; + + prop = fdt_getprop(itb, img, name, &len); + if (!prop) { + error_setg(errp, "can't find %s address", name); + return -ENOENT; + } + + switch (len) { + case 4: + *addr = fdt32_to_cpu(*(fdt32_t *)prop); + return 0; + case 8: + *addr = fdt64_to_cpu(*(fdt64_t *)prop); + return 0; + default: + error_setg(errp, "invalid %s address length %d", name, len); + return -EINVAL; + } +} + +static int fit_load_kernel(const struct fit_loader *ldr, const void *itb, + int cfg, void *opaque, hwaddr *pend, + Error **errp) +{ + const char *name; + const void *data; + const void *load_data; + hwaddr load_addr, entry_addr; + int img_off, err; + size_t sz; + int ret; + + name = fdt_getprop(itb, cfg, "kernel", NULL); + if (!name) { + error_setg(errp, "no kernel specified by FIT configuration"); + return -EINVAL; + } + + load_data = data = fit_load_image_alloc(itb, name, &img_off, &sz, errp); + if (!data) { + error_prepend(errp, "unable to load kernel image from FIT: "); + return -EINVAL; + } + + err = fit_image_addr(itb, img_off, "load", &load_addr, errp); + if (err) { + error_prepend(errp, "unable to read kernel load address from FIT: "); + ret = err; + goto out; + } + + err = fit_image_addr(itb, img_off, "entry", &entry_addr, errp); + if (err) { + error_prepend(errp, "unable to read kernel entry address from FIT: "); + ret = err; + goto out; + } + + if (ldr->kernel_filter) { + load_data = ldr->kernel_filter(opaque, data, &load_addr, &entry_addr); + } + + if (pend) { + *pend = load_addr + sz; + } + + load_addr = ldr->addr_to_phys(opaque, load_addr); + rom_add_blob_fixed(name, load_data, sz, load_addr); + + ret = 0; +out: + g_free((void *) data); + if (data != load_data) { + g_free((void *) load_data); + } + return ret; +} + +static int fit_load_fdt(const struct fit_loader *ldr, const void *itb, + int cfg, void *opaque, const void *match_data, + hwaddr kernel_end, Error **errp) +{ + Error *err = NULL; + const char *name; + const void *data; + const void *load_data; + hwaddr load_addr; + int img_off; + size_t sz; + int ret; + + name = fdt_getprop(itb, cfg, "fdt", NULL); + if (!name) { + return 0; + } + + load_data = data = fit_load_image_alloc(itb, name, &img_off, &sz, errp); + if (!data) { + error_prepend(errp, "unable to load FDT image from FIT: "); + return -EINVAL; + } + + ret = fit_image_addr(itb, img_off, "load", &load_addr, &err); + if (ret == -ENOENT) { + load_addr = ROUND_UP(kernel_end, 64 * KiB) + (10 * MiB); + error_free(err); + } else if (ret) { + error_propagate_prepend(errp, err, + "unable to read FDT load address from FIT: "); + goto out; + } + + if (ldr->fdt_filter) { + load_data = ldr->fdt_filter(opaque, data, match_data, &load_addr); + } + + load_addr = ldr->addr_to_phys(opaque, load_addr); + sz = fdt_totalsize(load_data); + rom_add_blob_fixed(name, load_data, sz, load_addr); + + ret = 0; +out: + g_free((void *) data); + if (data != load_data) { + g_free((void *) load_data); + } + return ret; +} + +static bool fit_cfg_compatible(const void *itb, int cfg, const char *compat) +{ + const void *fdt; + const char *fdt_name; + bool ret; + + fdt_name = fdt_getprop(itb, cfg, "fdt", NULL); + if (!fdt_name) { + return false; + } + + fdt = fit_load_image_alloc(itb, fdt_name, NULL, NULL, NULL); + if (!fdt) { + return false; + } + + if (fdt_check_header(fdt)) { + ret = false; + goto out; + } + + if (fdt_node_check_compatible(fdt, 0, compat)) { + ret = false; + goto out; + } + + ret = true; +out: + g_free((void *) fdt); + return ret; +} + +int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque) +{ + Error *err = NULL; + const struct fit_loader_match *match; + const void *itb, *match_data = NULL; + const char *def_cfg_name; + char path[FIT_LOADER_MAX_PATH]; + int itb_size, configs, cfg_off, off; + hwaddr kernel_end; + int ret; + + itb = load_device_tree(filename, &itb_size); + if (!itb) { + return -EINVAL; + } + + configs = fdt_path_offset(itb, "/configurations"); + if (configs < 0) { + error_report("can't find node /configurations"); + ret = configs; + goto out; + } + + cfg_off = -FDT_ERR_NOTFOUND; + + if (ldr->matches) { + for (match = ldr->matches; match->compatible; match++) { + off = fdt_first_subnode(itb, configs); + while (off >= 0) { + if (fit_cfg_compatible(itb, off, match->compatible)) { + cfg_off = off; + match_data = match->data; + break; + } + + off = fdt_next_subnode(itb, off); + } + + if (cfg_off >= 0) { + break; + } + } + } + + if (cfg_off < 0) { + def_cfg_name = fdt_getprop(itb, configs, "default", NULL); + if (def_cfg_name) { + snprintf(path, sizeof(path), "/configurations/%s", def_cfg_name); + cfg_off = fdt_path_offset(itb, path); + } + } + + if (cfg_off < 0) { + error_report("can't find configuration"); + ret = cfg_off; + goto out; + } + + ret = fit_load_kernel(ldr, itb, cfg_off, opaque, &kernel_end, &err); + if (ret) { + error_report_err(err); + goto out; + } + + ret = fit_load_fdt(ldr, itb, cfg_off, opaque, match_data, kernel_end, + &err); + if (ret) { + error_report_err(err); + goto out; + } + + ret = 0; +out: + g_free((void *) itb); + return ret; +} diff --git a/hw/core/loader.c b/hw/core/loader.c new file mode 100644 index 000000000..052a0fd71 --- /dev/null +++ b/hw/core/loader.c @@ -0,0 +1,1760 @@ +/* + * QEMU Executable loader + * + * Copyright (c) 2006 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * Gunzip functionality in this file is derived from u-boot: + * + * (C) Copyright 2008 Semihalf + * + * (C) Copyright 2000-2005 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/type-helpers.h" +#include "trace.h" +#include "hw/hw.h" +#include "disas/disas.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "sysemu/reset.h" +#include "sysemu/sysemu.h" +#include "uboot_image.h" +#include "hw/loader.h" +#include "hw/nvram/fw_cfg.h" +#include "exec/memory.h" +#include "hw/boards.h" +#include "qemu/cutils.h" +#include "sysemu/runstate.h" + +#include + +static int roms_loaded; + +/* return the size or -1 if error */ +int64_t get_image_size(const char *filename) +{ + int fd; + int64_t size; + fd = open(filename, O_RDONLY | O_BINARY); + if (fd < 0) + return -1; + size = lseek(fd, 0, SEEK_END); + close(fd); + return size; +} + +/* return the size or -1 if error */ +ssize_t load_image_size(const char *filename, void *addr, size_t size) +{ + int fd; + ssize_t actsize, l = 0; + + fd = open(filename, O_RDONLY | O_BINARY); + if (fd < 0) { + return -1; + } + + while ((actsize = read(fd, addr + l, size - l)) > 0) { + l += actsize; + } + + close(fd); + + return actsize < 0 ? -1 : l; +} + +/* read()-like version */ +ssize_t read_targphys(const char *name, + int fd, hwaddr dst_addr, size_t nbytes) +{ + uint8_t *buf; + ssize_t did; + + buf = g_malloc(nbytes); + did = read(fd, buf, nbytes); + if (did > 0) + rom_add_blob_fixed("read", buf, did, dst_addr); + g_free(buf); + return did; +} + +int load_image_targphys(const char *filename, + hwaddr addr, uint64_t max_sz) +{ + return load_image_targphys_as(filename, addr, max_sz, NULL); +} + +/* return the size or -1 if error */ +int load_image_targphys_as(const char *filename, + hwaddr addr, uint64_t max_sz, AddressSpace *as) +{ + int size; + + size = get_image_size(filename); + if (size < 0 || size > max_sz) { + return -1; + } + if (size > 0) { + if (rom_add_file_fixed_as(filename, addr, -1, as) < 0) { + return -1; + } + } + return size; +} + +int load_image_mr(const char *filename, MemoryRegion *mr) +{ + int size; + + if (!memory_access_is_direct(mr, false)) { + /* Can only load an image into RAM or ROM */ + return -1; + } + + size = get_image_size(filename); + + if (size < 0 || size > memory_region_size(mr)) { + return -1; + } + if (size > 0) { + if (rom_add_file_mr(filename, mr, -1) < 0) { + return -1; + } + } + return size; +} + +void pstrcpy_targphys(const char *name, hwaddr dest, int buf_size, + const char *source) +{ + const char *nulp; + char *ptr; + + if (buf_size <= 0) return; + nulp = memchr(source, 0, buf_size); + if (nulp) { + rom_add_blob_fixed(name, source, (nulp - source) + 1, dest); + } else { + rom_add_blob_fixed(name, source, buf_size, dest); + ptr = rom_ptr(dest + buf_size - 1, sizeof(*ptr)); + *ptr = 0; + } +} + +/* A.OUT loader */ + +struct exec +{ + uint32_t a_info; /* Use macros N_MAGIC, etc for access */ + uint32_t a_text; /* length of text, in bytes */ + uint32_t a_data; /* length of data, in bytes */ + uint32_t a_bss; /* length of uninitialized data area, in bytes */ + uint32_t a_syms; /* length of symbol table data in file, in bytes */ + uint32_t a_entry; /* start address */ + uint32_t a_trsize; /* length of relocation info for text, in bytes */ + uint32_t a_drsize; /* length of relocation info for data, in bytes */ +}; + +static void bswap_ahdr(struct exec *e) +{ + bswap32s(&e->a_info); + bswap32s(&e->a_text); + bswap32s(&e->a_data); + bswap32s(&e->a_bss); + bswap32s(&e->a_syms); + bswap32s(&e->a_entry); + bswap32s(&e->a_trsize); + bswap32s(&e->a_drsize); +} + +#define N_MAGIC(exec) ((exec).a_info & 0xffff) +#define OMAGIC 0407 +#define NMAGIC 0410 +#define ZMAGIC 0413 +#define QMAGIC 0314 +#define _N_HDROFF(x) (1024 - sizeof (struct exec)) +#define N_TXTOFF(x) \ + (N_MAGIC(x) == ZMAGIC ? _N_HDROFF((x)) + sizeof (struct exec) : \ + (N_MAGIC(x) == QMAGIC ? 0 : sizeof (struct exec))) +#define N_TXTADDR(x, target_page_size) (N_MAGIC(x) == QMAGIC ? target_page_size : 0) +#define _N_SEGMENT_ROUND(x, target_page_size) (((x) + target_page_size - 1) & ~(target_page_size - 1)) + +#define _N_TXTENDADDR(x, target_page_size) (N_TXTADDR(x, target_page_size)+(x).a_text) + +#define N_DATADDR(x, target_page_size) \ + (N_MAGIC(x)==OMAGIC? (_N_TXTENDADDR(x, target_page_size)) \ + : (_N_SEGMENT_ROUND (_N_TXTENDADDR(x, target_page_size), target_page_size))) + + +int load_aout(const char *filename, hwaddr addr, int max_sz, + int bswap_needed, hwaddr target_page_size) +{ + int fd; + ssize_t size, ret; + struct exec e; + uint32_t magic; + + fd = open(filename, O_RDONLY | O_BINARY); + if (fd < 0) + return -1; + + size = read(fd, &e, sizeof(e)); + if (size < 0) + goto fail; + + if (bswap_needed) { + bswap_ahdr(&e); + } + + magic = N_MAGIC(e); + switch (magic) { + case ZMAGIC: + case QMAGIC: + case OMAGIC: + if (e.a_text + e.a_data > max_sz) + goto fail; + lseek(fd, N_TXTOFF(e), SEEK_SET); + size = read_targphys(filename, fd, addr, e.a_text + e.a_data); + if (size < 0) + goto fail; + break; + case NMAGIC: + if (N_DATADDR(e, target_page_size) + e.a_data > max_sz) + goto fail; + lseek(fd, N_TXTOFF(e), SEEK_SET); + size = read_targphys(filename, fd, addr, e.a_text); + if (size < 0) + goto fail; + ret = read_targphys(filename, fd, addr + N_DATADDR(e, target_page_size), + e.a_data); + if (ret < 0) + goto fail; + size += ret; + break; + default: + goto fail; + } + close(fd); + return size; + fail: + close(fd); + return -1; +} + +/* ELF loader */ + +static void *load_at(int fd, off_t offset, size_t size) +{ + void *ptr; + if (lseek(fd, offset, SEEK_SET) < 0) + return NULL; + ptr = g_malloc(size); + if (read(fd, ptr, size) != size) { + g_free(ptr); + return NULL; + } + return ptr; +} + +#ifdef ELF_CLASS +#undef ELF_CLASS +#endif + +#define ELF_CLASS ELFCLASS32 +#include "elf.h" + +#define SZ 32 +#define elf_word uint32_t +#define elf_sword int32_t +#define bswapSZs bswap32s +#include "hw/elf_ops.h" + +#undef elfhdr +#undef elf_phdr +#undef elf_shdr +#undef elf_sym +#undef elf_rela +#undef elf_note +#undef elf_word +#undef elf_sword +#undef bswapSZs +#undef SZ +#define elfhdr elf64_hdr +#define elf_phdr elf64_phdr +#define elf_note elf64_note +#define elf_shdr elf64_shdr +#define elf_sym elf64_sym +#define elf_rela elf64_rela +#define elf_word uint64_t +#define elf_sword int64_t +#define bswapSZs bswap64s +#define SZ 64 +#include "hw/elf_ops.h" + +const char *load_elf_strerror(ssize_t error) +{ + switch (error) { + case 0: + return "No error"; + case ELF_LOAD_FAILED: + return "Failed to load ELF"; + case ELF_LOAD_NOT_ELF: + return "The image is not ELF"; + case ELF_LOAD_WRONG_ARCH: + return "The image is from incompatible architecture"; + case ELF_LOAD_WRONG_ENDIAN: + return "The image has incorrect endianness"; + case ELF_LOAD_TOO_BIG: + return "The image segments are too big to load"; + default: + return "Unknown error"; + } +} + +void load_elf_hdr(const char *filename, void *hdr, bool *is64, Error **errp) +{ + int fd; + uint8_t e_ident_local[EI_NIDENT]; + uint8_t *e_ident; + size_t hdr_size, off; + bool is64l; + + if (!hdr) { + hdr = e_ident_local; + } + e_ident = hdr; + + fd = open(filename, O_RDONLY | O_BINARY); + if (fd < 0) { + error_setg_errno(errp, errno, "Failed to open file: %s", filename); + return; + } + if (read(fd, hdr, EI_NIDENT) != EI_NIDENT) { + error_setg_errno(errp, errno, "Failed to read file: %s", filename); + goto fail; + } + if (e_ident[0] != ELFMAG0 || + e_ident[1] != ELFMAG1 || + e_ident[2] != ELFMAG2 || + e_ident[3] != ELFMAG3) { + error_setg(errp, "Bad ELF magic"); + goto fail; + } + + is64l = e_ident[EI_CLASS] == ELFCLASS64; + hdr_size = is64l ? sizeof(Elf64_Ehdr) : sizeof(Elf32_Ehdr); + if (is64) { + *is64 = is64l; + } + + off = EI_NIDENT; + while (hdr != e_ident_local && off < hdr_size) { + size_t br = read(fd, hdr + off, hdr_size - off); + switch (br) { + case 0: + error_setg(errp, "File too short: %s", filename); + goto fail; + case -1: + error_setg_errno(errp, errno, "Failed to read file: %s", + filename); + goto fail; + } + off += br; + } + +fail: + close(fd); +} + +/* return < 0 if error, otherwise the number of bytes loaded in memory */ +ssize_t load_elf(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, + uint64_t *highaddr, uint32_t *pflags, int big_endian, + int elf_machine, int clear_lsb, int data_swab) +{ + return load_elf_as(filename, elf_note_fn, translate_fn, translate_opaque, + pentry, lowaddr, highaddr, pflags, big_endian, + elf_machine, clear_lsb, data_swab, NULL); +} + +/* return < 0 if error, otherwise the number of bytes loaded in memory */ +ssize_t load_elf_as(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, + uint64_t *highaddr, uint32_t *pflags, int big_endian, + int elf_machine, int clear_lsb, int data_swab, + AddressSpace *as) +{ + return load_elf_ram(filename, elf_note_fn, translate_fn, translate_opaque, + pentry, lowaddr, highaddr, pflags, big_endian, + elf_machine, clear_lsb, data_swab, as, true); +} + +/* return < 0 if error, otherwise the number of bytes loaded in memory */ +ssize_t load_elf_ram(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, + int big_endian, int elf_machine, int clear_lsb, + int data_swab, AddressSpace *as, bool load_rom) +{ + return load_elf_ram_sym(filename, elf_note_fn, + translate_fn, translate_opaque, + pentry, lowaddr, highaddr, pflags, big_endian, + elf_machine, clear_lsb, data_swab, as, + load_rom, NULL); +} + +/* return < 0 if error, otherwise the number of bytes loaded in memory */ +ssize_t load_elf_ram_sym(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, + uint32_t *pflags, int big_endian, int elf_machine, + int clear_lsb, int data_swab, + AddressSpace *as, bool load_rom, symbol_fn_t sym_cb) +{ + int fd, data_order, target_data_order, must_swab; + ssize_t ret = ELF_LOAD_FAILED; + uint8_t e_ident[EI_NIDENT]; + + fd = open(filename, O_RDONLY | O_BINARY); + if (fd < 0) { + perror(filename); + return -1; + } + if (read(fd, e_ident, sizeof(e_ident)) != sizeof(e_ident)) + goto fail; + if (e_ident[0] != ELFMAG0 || + e_ident[1] != ELFMAG1 || + e_ident[2] != ELFMAG2 || + e_ident[3] != ELFMAG3) { + ret = ELF_LOAD_NOT_ELF; + goto fail; + } +#ifdef HOST_WORDS_BIGENDIAN + data_order = ELFDATA2MSB; +#else + data_order = ELFDATA2LSB; +#endif + must_swab = data_order != e_ident[EI_DATA]; + if (big_endian) { + target_data_order = ELFDATA2MSB; + } else { + target_data_order = ELFDATA2LSB; + } + + if (target_data_order != e_ident[EI_DATA]) { + ret = ELF_LOAD_WRONG_ENDIAN; + goto fail; + } + + lseek(fd, 0, SEEK_SET); + if (e_ident[EI_CLASS] == ELFCLASS64) { + ret = load_elf64(filename, fd, elf_note_fn, + translate_fn, translate_opaque, must_swab, + pentry, lowaddr, highaddr, pflags, elf_machine, + clear_lsb, data_swab, as, load_rom, sym_cb); + } else { + ret = load_elf32(filename, fd, elf_note_fn, + translate_fn, translate_opaque, must_swab, + pentry, lowaddr, highaddr, pflags, elf_machine, + clear_lsb, data_swab, as, load_rom, sym_cb); + } + + fail: + close(fd); + return ret; +} + +static void bswap_uboot_header(uboot_image_header_t *hdr) +{ +#ifndef HOST_WORDS_BIGENDIAN + bswap32s(&hdr->ih_magic); + bswap32s(&hdr->ih_hcrc); + bswap32s(&hdr->ih_time); + bswap32s(&hdr->ih_size); + bswap32s(&hdr->ih_load); + bswap32s(&hdr->ih_ep); + bswap32s(&hdr->ih_dcrc); +#endif +} + + +#define ZALLOC_ALIGNMENT 16 + +static void *zalloc(void *x, unsigned items, unsigned size) +{ + void *p; + + size *= items; + size = (size + ZALLOC_ALIGNMENT - 1) & ~(ZALLOC_ALIGNMENT - 1); + + p = g_malloc(size); + + return (p); +} + +static void zfree(void *x, void *addr) +{ + g_free(addr); +} + + +#define HEAD_CRC 2 +#define EXTRA_FIELD 4 +#define ORIG_NAME 8 +#define COMMENT 0x10 +#define RESERVED 0xe0 + +#define DEFLATED 8 + +ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen) +{ + z_stream s; + ssize_t dstbytes; + int r, i, flags; + + /* skip header */ + i = 10; + if (srclen < 4) { + goto toosmall; + } + flags = src[3]; + if (src[2] != DEFLATED || (flags & RESERVED) != 0) { + puts ("Error: Bad gzipped data\n"); + return -1; + } + if ((flags & EXTRA_FIELD) != 0) { + if (srclen < 12) { + goto toosmall; + } + i = 12 + src[10] + (src[11] << 8); + } + if ((flags & ORIG_NAME) != 0) { + while (i < srclen && src[i++] != 0) { + /* do nothing */ + } + } + if ((flags & COMMENT) != 0) { + while (i < srclen && src[i++] != 0) { + /* do nothing */ + } + } + if ((flags & HEAD_CRC) != 0) { + i += 2; + } + if (i >= srclen) { + goto toosmall; + } + + s.zalloc = zalloc; + s.zfree = zfree; + + r = inflateInit2(&s, -MAX_WBITS); + if (r != Z_OK) { + printf ("Error: inflateInit2() returned %d\n", r); + return (-1); + } + s.next_in = src + i; + s.avail_in = srclen - i; + s.next_out = dst; + s.avail_out = dstlen; + r = inflate(&s, Z_FINISH); + if (r != Z_OK && r != Z_STREAM_END) { + printf ("Error: inflate() returned %d\n", r); + return -1; + } + dstbytes = s.next_out - (unsigned char *) dst; + inflateEnd(&s); + + return dstbytes; + +toosmall: + puts("Error: gunzip out of data in header\n"); + return -1; +} + +/* Load a U-Boot image. */ +static int load_uboot_image(const char *filename, hwaddr *ep, hwaddr *loadaddr, + int *is_linux, uint8_t image_type, + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, AddressSpace *as) +{ + int fd; + int size; + hwaddr address; + uboot_image_header_t h; + uboot_image_header_t *hdr = &h; + uint8_t *data = NULL; + int ret = -1; + int do_uncompress = 0; + + fd = open(filename, O_RDONLY | O_BINARY); + if (fd < 0) + return -1; + + size = read(fd, hdr, sizeof(uboot_image_header_t)); + if (size < sizeof(uboot_image_header_t)) { + goto out; + } + + bswap_uboot_header(hdr); + + if (hdr->ih_magic != IH_MAGIC) + goto out; + + if (hdr->ih_type != image_type) { + if (!(image_type == IH_TYPE_KERNEL && + hdr->ih_type == IH_TYPE_KERNEL_NOLOAD)) { + fprintf(stderr, "Wrong image type %d, expected %d\n", hdr->ih_type, + image_type); + goto out; + } + } + + /* TODO: Implement other image types. */ + switch (hdr->ih_type) { + case IH_TYPE_KERNEL_NOLOAD: + if (!loadaddr || *loadaddr == LOAD_UIMAGE_LOADADDR_INVALID) { + fprintf(stderr, "this image format (kernel_noload) cannot be " + "loaded on this machine type"); + goto out; + } + + hdr->ih_load = *loadaddr + sizeof(*hdr); + hdr->ih_ep += hdr->ih_load; + /* fall through */ + case IH_TYPE_KERNEL: + address = hdr->ih_load; + if (translate_fn) { + address = translate_fn(translate_opaque, address); + } + if (loadaddr) { + *loadaddr = hdr->ih_load; + } + + switch (hdr->ih_comp) { + case IH_COMP_NONE: + break; + case IH_COMP_GZIP: + do_uncompress = 1; + break; + default: + fprintf(stderr, + "Unable to load u-boot images with compression type %d\n", + hdr->ih_comp); + goto out; + } + + if (ep) { + *ep = hdr->ih_ep; + } + + /* TODO: Check CPU type. */ + if (is_linux) { + if (hdr->ih_os == IH_OS_LINUX) { + *is_linux = 1; + } else { + *is_linux = 0; + } + } + + break; + case IH_TYPE_RAMDISK: + address = *loadaddr; + break; + default: + fprintf(stderr, "Unsupported u-boot image type %d\n", hdr->ih_type); + goto out; + } + + data = g_malloc(hdr->ih_size); + + if (read(fd, data, hdr->ih_size) != hdr->ih_size) { + fprintf(stderr, "Error reading file\n"); + goto out; + } + + if (do_uncompress) { + uint8_t *compressed_data; + size_t max_bytes; + ssize_t bytes; + + compressed_data = data; + max_bytes = UBOOT_MAX_GUNZIP_BYTES; + data = g_malloc(max_bytes); + + bytes = gunzip(data, max_bytes, compressed_data, hdr->ih_size); + g_free(compressed_data); + if (bytes < 0) { + fprintf(stderr, "Unable to decompress gzipped image!\n"); + goto out; + } + hdr->ih_size = bytes; + } + + rom_add_blob_fixed_as(filename, data, hdr->ih_size, address, as); + + ret = hdr->ih_size; + +out: + g_free(data); + close(fd); + return ret; +} + +int load_uimage(const char *filename, hwaddr *ep, hwaddr *loadaddr, + int *is_linux, + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque) +{ + return load_uboot_image(filename, ep, loadaddr, is_linux, IH_TYPE_KERNEL, + translate_fn, translate_opaque, NULL); +} + +int load_uimage_as(const char *filename, hwaddr *ep, hwaddr *loadaddr, + int *is_linux, + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, AddressSpace *as) +{ + return load_uboot_image(filename, ep, loadaddr, is_linux, IH_TYPE_KERNEL, + translate_fn, translate_opaque, as); +} + +/* Load a ramdisk. */ +int load_ramdisk(const char *filename, hwaddr addr, uint64_t max_sz) +{ + return load_ramdisk_as(filename, addr, max_sz, NULL); +} + +int load_ramdisk_as(const char *filename, hwaddr addr, uint64_t max_sz, + AddressSpace *as) +{ + return load_uboot_image(filename, NULL, &addr, NULL, IH_TYPE_RAMDISK, + NULL, NULL, as); +} + +/* Load a gzip-compressed kernel to a dynamically allocated buffer. */ +int load_image_gzipped_buffer(const char *filename, uint64_t max_sz, + uint8_t **buffer) +{ + uint8_t *compressed_data = NULL; + uint8_t *data = NULL; + gsize len; + ssize_t bytes; + int ret = -1; + + if (!g_file_get_contents(filename, (char **) &compressed_data, &len, + NULL)) { + goto out; + } + + /* Is it a gzip-compressed file? */ + if (len < 2 || + compressed_data[0] != 0x1f || + compressed_data[1] != 0x8b) { + goto out; + } + + if (max_sz > LOAD_IMAGE_MAX_GUNZIP_BYTES) { + max_sz = LOAD_IMAGE_MAX_GUNZIP_BYTES; + } + + data = g_malloc(max_sz); + bytes = gunzip(data, max_sz, compressed_data, len); + if (bytes < 0) { + fprintf(stderr, "%s: unable to decompress gzipped kernel file\n", + filename); + goto out; + } + + /* trim to actual size and return to caller */ + *buffer = g_realloc(data, bytes); + ret = bytes; + /* ownership has been transferred to caller */ + data = NULL; + + out: + g_free(compressed_data); + g_free(data); + return ret; +} + +/* Load a gzip-compressed kernel. */ +int load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz) +{ + int bytes; + uint8_t *data; + + bytes = load_image_gzipped_buffer(filename, max_sz, &data); + if (bytes != -1) { + rom_add_blob_fixed(filename, data, bytes, addr); + g_free(data); + } + return bytes; +} + +/* + * Functions for reboot-persistent memory regions. + * - used for vga bios and option roms. + * - also linux kernel (-kernel / -initrd). + */ + +typedef struct Rom Rom; + +struct Rom { + char *name; + char *path; + + /* datasize is the amount of memory allocated in "data". If datasize is less + * than romsize, it means that the area from datasize to romsize is filled + * with zeros. + */ + size_t romsize; + size_t datasize; + + uint8_t *data; + MemoryRegion *mr; + AddressSpace *as; + int isrom; + char *fw_dir; + char *fw_file; + GMappedFile *mapped_file; + + bool committed; + + hwaddr addr; + QTAILQ_ENTRY(Rom) next; +}; + +static FWCfgState *fw_cfg; +static QTAILQ_HEAD(, Rom) roms = QTAILQ_HEAD_INITIALIZER(roms); + +/* + * rom->data can be heap-allocated or memory-mapped (e.g. when added with + * rom_add_elf_program()) + */ +static void rom_free_data(Rom *rom) +{ + if (rom->mapped_file) { + g_mapped_file_unref(rom->mapped_file); + rom->mapped_file = NULL; + } else { + g_free(rom->data); + } + + rom->data = NULL; +} + +static void rom_free(Rom *rom) +{ + rom_free_data(rom); + g_free(rom->path); + g_free(rom->name); + g_free(rom->fw_dir); + g_free(rom->fw_file); + g_free(rom); +} + +static inline bool rom_order_compare(Rom *rom, Rom *item) +{ + return ((uintptr_t)(void *)rom->as > (uintptr_t)(void *)item->as) || + (rom->as == item->as && rom->addr >= item->addr); +} + +static void rom_insert(Rom *rom) +{ + Rom *item; + + if (roms_loaded) { + hw_error ("ROM images must be loaded at startup\n"); + } + + /* The user didn't specify an address space, this is the default */ + if (!rom->as) { + rom->as = &address_space_memory; + } + + rom->committed = false; + + /* List is ordered by load address in the same address space */ + QTAILQ_FOREACH(item, &roms, next) { + if (rom_order_compare(rom, item)) { + continue; + } + QTAILQ_INSERT_BEFORE(item, rom, next); + return; + } + QTAILQ_INSERT_TAIL(&roms, rom, next); +} + +static void fw_cfg_resized(const char *id, uint64_t length, void *host) +{ + if (fw_cfg) { + fw_cfg_modify_file(fw_cfg, id + strlen("/rom@"), host, length); + } +} + +static void *rom_set_mr(Rom *rom, Object *owner, const char *name, bool ro) +{ + void *data; + + rom->mr = g_malloc(sizeof(*rom->mr)); + memory_region_init_resizeable_ram(rom->mr, owner, name, + rom->datasize, rom->romsize, + fw_cfg_resized, + &error_fatal); + memory_region_set_readonly(rom->mr, ro); + vmstate_register_ram_global(rom->mr); + + data = memory_region_get_ram_ptr(rom->mr); + memcpy(data, rom->data, rom->datasize); + + return data; +} + +int rom_add_file(const char *file, const char *fw_dir, + hwaddr addr, int32_t bootindex, + bool option_rom, MemoryRegion *mr, + AddressSpace *as) +{ + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + Rom *rom; + int rc, fd = -1; + char devpath[100]; + + if (as && mr) { + fprintf(stderr, "Specifying an Address Space and Memory Region is " \ + "not valid when loading a rom\n"); + /* We haven't allocated anything so we don't need any cleanup */ + return -1; + } + + rom = g_malloc0(sizeof(*rom)); + rom->name = g_strdup(file); + rom->path = qemu_find_file(QEMU_FILE_TYPE_BIOS, rom->name); + rom->as = as; + if (rom->path == NULL) { + rom->path = g_strdup(file); + } + + fd = open(rom->path, O_RDONLY | O_BINARY); + if (fd == -1) { + fprintf(stderr, "Could not open option rom '%s': %s\n", + rom->path, strerror(errno)); + goto err; + } + + if (fw_dir) { + rom->fw_dir = g_strdup(fw_dir); + rom->fw_file = g_strdup(file); + } + rom->addr = addr; + rom->romsize = lseek(fd, 0, SEEK_END); + if (rom->romsize == -1) { + fprintf(stderr, "rom: file %-20s: get size error: %s\n", + rom->name, strerror(errno)); + goto err; + } + + rom->datasize = rom->romsize; + rom->data = g_malloc0(rom->datasize); + lseek(fd, 0, SEEK_SET); + rc = read(fd, rom->data, rom->datasize); + if (rc != rom->datasize) { + fprintf(stderr, "rom: file %-20s: read error: rc=%d (expected %zd)\n", + rom->name, rc, rom->datasize); + goto err; + } + close(fd); + rom_insert(rom); + if (rom->fw_file && fw_cfg) { + const char *basename; + char fw_file_name[FW_CFG_MAX_FILE_PATH]; + void *data; + + basename = strrchr(rom->fw_file, '/'); + if (basename) { + basename++; + } else { + basename = rom->fw_file; + } + snprintf(fw_file_name, sizeof(fw_file_name), "%s/%s", rom->fw_dir, + basename); + snprintf(devpath, sizeof(devpath), "/rom@%s", fw_file_name); + + if ((!option_rom || mc->option_rom_has_mr) && mc->rom_file_has_mr) { + data = rom_set_mr(rom, OBJECT(fw_cfg), devpath, true); + } else { + data = rom->data; + } + + fw_cfg_add_file(fw_cfg, fw_file_name, data, rom->romsize); + } else { + if (mr) { + rom->mr = mr; + snprintf(devpath, sizeof(devpath), "/rom@%s", file); + } else { + snprintf(devpath, sizeof(devpath), "/rom@" TARGET_FMT_plx, addr); + } + } + + add_boot_device_path(bootindex, NULL, devpath); + return 0; + +err: + if (fd != -1) + close(fd); + + rom_free(rom); + return -1; +} + +MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len, + size_t max_len, hwaddr addr, const char *fw_file_name, + FWCfgCallback fw_callback, void *callback_opaque, + AddressSpace *as, bool read_only) +{ + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + Rom *rom; + MemoryRegion *mr = NULL; + + rom = g_malloc0(sizeof(*rom)); + rom->name = g_strdup(name); + rom->as = as; + rom->addr = addr; + rom->romsize = max_len ? max_len : len; + rom->datasize = len; + g_assert(rom->romsize >= rom->datasize); + rom->data = g_malloc0(rom->datasize); + memcpy(rom->data, blob, len); + rom_insert(rom); + if (fw_file_name && fw_cfg) { + char devpath[100]; + void *data; + + if (read_only) { + snprintf(devpath, sizeof(devpath), "/rom@%s", fw_file_name); + } else { + snprintf(devpath, sizeof(devpath), "/ram@%s", fw_file_name); + } + + if (mc->rom_file_has_mr) { + data = rom_set_mr(rom, OBJECT(fw_cfg), devpath, read_only); + mr = rom->mr; + } else { + data = rom->data; + } + + fw_cfg_add_file_callback(fw_cfg, fw_file_name, + fw_callback, NULL, callback_opaque, + data, rom->datasize, read_only); + } + return mr; +} + +/* This function is specific for elf program because we don't need to allocate + * all the rom. We just allocate the first part and the rest is just zeros. This + * is why romsize and datasize are different. Also, this function takes its own + * reference to "mapped_file", so we don't have to allocate and copy the buffer. + */ +int rom_add_elf_program(const char *name, GMappedFile *mapped_file, void *data, + size_t datasize, size_t romsize, hwaddr addr, + AddressSpace *as) +{ + Rom *rom; + + rom = g_malloc0(sizeof(*rom)); + rom->name = g_strdup(name); + rom->addr = addr; + rom->datasize = datasize; + rom->romsize = romsize; + rom->data = data; + rom->as = as; + + if (mapped_file && data) { + g_mapped_file_ref(mapped_file); + rom->mapped_file = mapped_file; + } + + rom_insert(rom); + return 0; +} + +int rom_add_vga(const char *file) +{ + return rom_add_file(file, "vgaroms", 0, -1, true, NULL, NULL); +} + +int rom_add_option(const char *file, int32_t bootindex) +{ + return rom_add_file(file, "genroms", 0, bootindex, true, NULL, NULL); +} + +static void rom_reset(void *unused) +{ + Rom *rom; + + QTAILQ_FOREACH(rom, &roms, next) { + if (rom->fw_file) { + continue; + } + /* + * We don't need to fill in the RAM with ROM data because we'll fill + * the data in during the next incoming migration in all cases. Note + * that some of those RAMs can actually be modified by the guest. + */ + if (runstate_check(RUN_STATE_INMIGRATE)) { + if (rom->data && rom->isrom) { + /* + * Free it so that a rom_reset after migration doesn't + * overwrite a potentially modified 'rom'. + */ + rom_free_data(rom); + } + continue; + } + + if (rom->data == NULL) { + continue; + } + if (rom->mr) { + void *host = memory_region_get_ram_ptr(rom->mr); + memcpy(host, rom->data, rom->datasize); + } else { + address_space_write_rom(rom->as, rom->addr, MEMTXATTRS_UNSPECIFIED, + rom->data, rom->datasize); + } + if (rom->isrom) { + /* rom needs to be written only once */ + rom_free_data(rom); + } + /* + * The rom loader is really on the same level as firmware in the guest + * shadowing a ROM into RAM. Such a shadowing mechanism needs to ensure + * that the instruction cache for that new region is clear, so that the + * CPU definitely fetches its instructions from the just written data. + */ + cpu_flush_icache_range(rom->addr, rom->datasize); + + trace_loader_write_rom(rom->name, rom->addr, rom->datasize, rom->isrom); + } +} + +/* Return true if two consecutive ROMs in the ROM list overlap */ +static bool roms_overlap(Rom *last_rom, Rom *this_rom) +{ + if (!last_rom) { + return false; + } + return last_rom->as == this_rom->as && + last_rom->addr + last_rom->romsize > this_rom->addr; +} + +static const char *rom_as_name(Rom *rom) +{ + const char *name = rom->as ? rom->as->name : NULL; + return name ?: "anonymous"; +} + +static void rom_print_overlap_error_header(void) +{ + error_report("Some ROM regions are overlapping"); + error_printf( + "These ROM regions might have been loaded by " + "direct user request or by default.\n" + "They could be BIOS/firmware images, a guest kernel, " + "initrd or some other file loaded into guest memory.\n" + "Check whether you intended to load all this guest code, and " + "whether it has been built to load to the correct addresses.\n"); +} + +static void rom_print_one_overlap_error(Rom *last_rom, Rom *rom) +{ + error_printf( + "\nThe following two regions overlap (in the %s address space):\n", + rom_as_name(rom)); + error_printf( + " %s (addresses 0x" TARGET_FMT_plx " - 0x" TARGET_FMT_plx ")\n", + last_rom->name, last_rom->addr, last_rom->addr + last_rom->romsize); + error_printf( + " %s (addresses 0x" TARGET_FMT_plx " - 0x" TARGET_FMT_plx ")\n", + rom->name, rom->addr, rom->addr + rom->romsize); +} + +int rom_check_and_register_reset(void) +{ + MemoryRegionSection section; + Rom *rom, *last_rom = NULL; + bool found_overlap = false; + + QTAILQ_FOREACH(rom, &roms, next) { + if (rom->fw_file) { + continue; + } + if (!rom->mr) { + if (roms_overlap(last_rom, rom)) { + if (!found_overlap) { + found_overlap = true; + rom_print_overlap_error_header(); + } + rom_print_one_overlap_error(last_rom, rom); + /* Keep going through the list so we report all overlaps */ + } + last_rom = rom; + } + section = memory_region_find(rom->mr ? rom->mr : get_system_memory(), + rom->addr, 1); + rom->isrom = int128_nz(section.size) && memory_region_is_rom(section.mr); + memory_region_unref(section.mr); + } + if (found_overlap) { + return -1; + } + + qemu_register_reset(rom_reset, NULL); + roms_loaded = 1; + return 0; +} + +void rom_set_fw(FWCfgState *f) +{ + fw_cfg = f; +} + +void rom_set_order_override(int order) +{ + if (!fw_cfg) + return; + fw_cfg_set_order_override(fw_cfg, order); +} + +void rom_reset_order_override(void) +{ + if (!fw_cfg) + return; + fw_cfg_reset_order_override(fw_cfg); +} + +void rom_transaction_begin(void) +{ + Rom *rom; + + /* Ignore ROMs added without the transaction API */ + QTAILQ_FOREACH(rom, &roms, next) { + rom->committed = true; + } +} + +void rom_transaction_end(bool commit) +{ + Rom *rom; + Rom *tmp; + + QTAILQ_FOREACH_SAFE(rom, &roms, next, tmp) { + if (rom->committed) { + continue; + } + if (commit) { + rom->committed = true; + } else { + QTAILQ_REMOVE(&roms, rom, next); + rom_free(rom); + } + } +} + +static Rom *find_rom(hwaddr addr, size_t size) +{ + Rom *rom; + + QTAILQ_FOREACH(rom, &roms, next) { + if (rom->fw_file) { + continue; + } + if (rom->mr) { + continue; + } + if (rom->addr > addr) { + continue; + } + if (rom->addr + rom->romsize < addr + size) { + continue; + } + return rom; + } + return NULL; +} + +/* + * Copies memory from registered ROMs to dest. Any memory that is contained in + * a ROM between addr and addr + size is copied. Note that this can involve + * multiple ROMs, which need not start at addr and need not end at addr + size. + */ +int rom_copy(uint8_t *dest, hwaddr addr, size_t size) +{ + hwaddr end = addr + size; + uint8_t *s, *d = dest; + size_t l = 0; + Rom *rom; + + QTAILQ_FOREACH(rom, &roms, next) { + if (rom->fw_file) { + continue; + } + if (rom->mr) { + continue; + } + if (rom->addr + rom->romsize < addr) { + continue; + } + if (rom->addr > end || rom->addr < addr) { + break; + } + + d = dest + (rom->addr - addr); + s = rom->data; + l = rom->datasize; + + if ((d + l) > (dest + size)) { + l = dest - d; + } + + if (l > 0) { + memcpy(d, s, l); + } + + if (rom->romsize > rom->datasize) { + /* If datasize is less than romsize, it means that we didn't + * allocate all the ROM because the trailing data are only zeros. + */ + + d += l; + l = rom->romsize - rom->datasize; + + if ((d + l) > (dest + size)) { + /* Rom size doesn't fit in the destination area. Adjust to avoid + * overflow. + */ + l = dest - d; + } + + if (l > 0) { + memset(d, 0x0, l); + } + } + } + + return (d + l) - dest; +} + +void *rom_ptr(hwaddr addr, size_t size) +{ + Rom *rom; + + rom = find_rom(addr, size); + if (!rom || !rom->data) + return NULL; + return rom->data + (addr - rom->addr); +} + +typedef struct FindRomCBData { + size_t size; /* Amount of data we want from ROM, in bytes */ + MemoryRegion *mr; /* MR at the unaliased guest addr */ + hwaddr xlat; /* Offset of addr within mr */ + void *rom; /* Output: rom data pointer, if found */ +} FindRomCBData; + +static bool find_rom_cb(Int128 start, Int128 len, const MemoryRegion *mr, + hwaddr offset_in_region, void *opaque) +{ + FindRomCBData *cbdata = opaque; + hwaddr alias_addr; + + if (mr != cbdata->mr) { + return false; + } + + alias_addr = int128_get64(start) + cbdata->xlat - offset_in_region; + cbdata->rom = rom_ptr(alias_addr, cbdata->size); + if (!cbdata->rom) { + return false; + } + /* Found a match, stop iterating */ + return true; +} + +void *rom_ptr_for_as(AddressSpace *as, hwaddr addr, size_t size) +{ + /* + * Find any ROM data for the given guest address range. If there + * is a ROM blob then return a pointer to the host memory + * corresponding to 'addr'; otherwise return NULL. + * + * We look not only for ROM blobs that were loaded directly to + * addr, but also for ROM blobs that were loaded to aliases of + * that memory at other addresses within the AddressSpace. + * + * Note that we do not check @as against the 'as' member in the + * 'struct Rom' returned by rom_ptr(). The Rom::as is the + * AddressSpace which the rom blob should be written to, whereas + * our @as argument is the AddressSpace which we are (effectively) + * reading from, and the same underlying RAM will often be visible + * in multiple AddressSpaces. (A common example is a ROM blob + * written to the 'system' address space but then read back via a + * CPU's cpu->as pointer.) This does mean we might potentially + * return a false-positive match if a ROM blob was loaded into an + * AS which is entirely separate and distinct from the one we're + * querying, but this issue exists also for rom_ptr() and hasn't + * caused any problems in practice. + */ + FlatView *fv; + void *rom; + hwaddr len_unused; + FindRomCBData cbdata = {}; + + /* Easy case: there's data at the actual address */ + rom = rom_ptr(addr, size); + if (rom) { + return rom; + } + + RCU_READ_LOCK_GUARD(); + + fv = address_space_to_flatview(as); + cbdata.mr = flatview_translate(fv, addr, &cbdata.xlat, &len_unused, + false, MEMTXATTRS_UNSPECIFIED); + if (!cbdata.mr) { + /* Nothing at this address, so there can't be any aliasing */ + return NULL; + } + cbdata.size = size; + flatview_for_each_range(fv, find_rom_cb, &cbdata); + return cbdata.rom; +} + +HumanReadableText *qmp_x_query_roms(Error **errp) +{ + Rom *rom; + g_autoptr(GString) buf = g_string_new(""); + + QTAILQ_FOREACH(rom, &roms, next) { + if (rom->mr) { + g_string_append_printf(buf, "%s" + " size=0x%06zx name=\"%s\"\n", + memory_region_name(rom->mr), + rom->romsize, + rom->name); + } else if (!rom->fw_file) { + g_string_append_printf(buf, "addr=" TARGET_FMT_plx + " size=0x%06zx mem=%s name=\"%s\"\n", + rom->addr, rom->romsize, + rom->isrom ? "rom" : "ram", + rom->name); + } else { + g_string_append_printf(buf, "fw=%s/%s" + " size=0x%06zx name=\"%s\"\n", + rom->fw_dir, + rom->fw_file, + rom->romsize, + rom->name); + } + } + + return human_readable_text_from_str(buf); +} + +typedef enum HexRecord HexRecord; +enum HexRecord { + DATA_RECORD = 0, + EOF_RECORD, + EXT_SEG_ADDR_RECORD, + START_SEG_ADDR_RECORD, + EXT_LINEAR_ADDR_RECORD, + START_LINEAR_ADDR_RECORD, +}; + +/* Each record contains a 16-bit address which is combined with the upper 16 + * bits of the implicit "next address" to form a 32-bit address. + */ +#define NEXT_ADDR_MASK 0xffff0000 + +#define DATA_FIELD_MAX_LEN 0xff +#define LEN_EXCEPT_DATA 0x5 +/* 0x5 = sizeof(byte_count) + sizeof(address) + sizeof(record_type) + + * sizeof(checksum) */ +typedef struct { + uint8_t byte_count; + uint16_t address; + uint8_t record_type; + uint8_t data[DATA_FIELD_MAX_LEN]; + uint8_t checksum; +} HexLine; + +/* return 0 or -1 if error */ +static bool parse_record(HexLine *line, uint8_t *our_checksum, const uint8_t c, + uint32_t *index, const bool in_process) +{ + /* +-------+---------------+-------+---------------------+--------+ + * | byte | |record | | | + * | count | address | type | data |checksum| + * +-------+---------------+-------+---------------------+--------+ + * ^ ^ ^ ^ ^ ^ + * |1 byte | 2 bytes |1 byte | 0-255 bytes | 1 byte | + */ + uint8_t value = 0; + uint32_t idx = *index; + /* ignore space */ + if (g_ascii_isspace(c)) { + return true; + } + if (!g_ascii_isxdigit(c) || !in_process) { + return false; + } + value = g_ascii_xdigit_value(c); + value = (idx & 0x1) ? (value & 0xf) : (value << 4); + if (idx < 2) { + line->byte_count |= value; + } else if (2 <= idx && idx < 6) { + line->address <<= 4; + line->address += g_ascii_xdigit_value(c); + } else if (6 <= idx && idx < 8) { + line->record_type |= value; + } else if (8 <= idx && idx < 8 + 2 * line->byte_count) { + line->data[(idx - 8) >> 1] |= value; + } else if (8 + 2 * line->byte_count <= idx && + idx < 10 + 2 * line->byte_count) { + line->checksum |= value; + } else { + return false; + } + *our_checksum += value; + ++(*index); + return true; +} + +typedef struct { + const char *filename; + HexLine line; + uint8_t *bin_buf; + hwaddr *start_addr; + int total_size; + uint32_t next_address_to_write; + uint32_t current_address; + uint32_t current_rom_index; + uint32_t rom_start_address; + AddressSpace *as; + bool complete; +} HexParser; + +/* return size or -1 if error */ +static int handle_record_type(HexParser *parser) +{ + HexLine *line = &(parser->line); + switch (line->record_type) { + case DATA_RECORD: + parser->current_address = + (parser->next_address_to_write & NEXT_ADDR_MASK) | line->address; + /* verify this is a contiguous block of memory */ + if (parser->current_address != parser->next_address_to_write) { + if (parser->current_rom_index != 0) { + rom_add_blob_fixed_as(parser->filename, parser->bin_buf, + parser->current_rom_index, + parser->rom_start_address, parser->as); + } + parser->rom_start_address = parser->current_address; + parser->current_rom_index = 0; + } + + /* copy from line buffer to output bin_buf */ + memcpy(parser->bin_buf + parser->current_rom_index, line->data, + line->byte_count); + parser->current_rom_index += line->byte_count; + parser->total_size += line->byte_count; + /* save next address to write */ + parser->next_address_to_write = + parser->current_address + line->byte_count; + break; + + case EOF_RECORD: + if (parser->current_rom_index != 0) { + rom_add_blob_fixed_as(parser->filename, parser->bin_buf, + parser->current_rom_index, + parser->rom_start_address, parser->as); + } + parser->complete = true; + return parser->total_size; + case EXT_SEG_ADDR_RECORD: + case EXT_LINEAR_ADDR_RECORD: + if (line->byte_count != 2 && line->address != 0) { + return -1; + } + + if (parser->current_rom_index != 0) { + rom_add_blob_fixed_as(parser->filename, parser->bin_buf, + parser->current_rom_index, + parser->rom_start_address, parser->as); + } + + /* save next address to write, + * in case of non-contiguous block of memory */ + parser->next_address_to_write = (line->data[0] << 12) | + (line->data[1] << 4); + if (line->record_type == EXT_LINEAR_ADDR_RECORD) { + parser->next_address_to_write <<= 12; + } + + parser->rom_start_address = parser->next_address_to_write; + parser->current_rom_index = 0; + break; + + case START_SEG_ADDR_RECORD: + if (line->byte_count != 4 && line->address != 0) { + return -1; + } + + /* x86 16-bit CS:IP segmented addressing */ + *(parser->start_addr) = (((line->data[0] << 8) | line->data[1]) << 4) + + ((line->data[2] << 8) | line->data[3]); + break; + + case START_LINEAR_ADDR_RECORD: + if (line->byte_count != 4 && line->address != 0) { + return -1; + } + + *(parser->start_addr) = ldl_be_p(line->data); + break; + + default: + return -1; + } + + return parser->total_size; +} + +/* return size or -1 if error */ +static int parse_hex_blob(const char *filename, hwaddr *addr, uint8_t *hex_blob, + size_t hex_blob_size, AddressSpace *as) +{ + bool in_process = false; /* avoid re-enter and + * check whether record begin with ':' */ + uint8_t *end = hex_blob + hex_blob_size; + uint8_t our_checksum = 0; + uint32_t record_index = 0; + HexParser parser = { + .filename = filename, + .bin_buf = g_malloc(hex_blob_size), + .start_addr = addr, + .as = as, + .complete = false + }; + + rom_transaction_begin(); + + for (; hex_blob < end && !parser.complete; ++hex_blob) { + switch (*hex_blob) { + case '\r': + case '\n': + if (!in_process) { + break; + } + + in_process = false; + if ((LEN_EXCEPT_DATA + parser.line.byte_count) * 2 != + record_index || + our_checksum != 0) { + parser.total_size = -1; + goto out; + } + + if (handle_record_type(&parser) == -1) { + parser.total_size = -1; + goto out; + } + break; + + /* start of a new record. */ + case ':': + memset(&parser.line, 0, sizeof(HexLine)); + in_process = true; + record_index = 0; + break; + + /* decoding lines */ + default: + if (!parse_record(&parser.line, &our_checksum, *hex_blob, + &record_index, in_process)) { + parser.total_size = -1; + goto out; + } + break; + } + } + +out: + g_free(parser.bin_buf); + rom_transaction_end(parser.total_size != -1); + return parser.total_size; +} + +/* return size or -1 if error */ +int load_targphys_hex_as(const char *filename, hwaddr *entry, AddressSpace *as) +{ + gsize hex_blob_size; + gchar *hex_blob; + int total_size = 0; + + if (!g_file_get_contents(filename, &hex_blob, &hex_blob_size, NULL)) { + return -1; + } + + total_size = parse_hex_blob(filename, entry, (uint8_t *)hex_blob, + hex_blob_size, as); + + g_free(hex_blob); + return total_size; +} diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c new file mode 100644 index 000000000..4e2f319ae --- /dev/null +++ b/hw/core/machine-hmp-cmds.c @@ -0,0 +1,132 @@ +/* + * HMP commands related to machines and CPUs + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "monitor/hmp.h" +#include "monitor/monitor.h" +#include "qapi/error.h" +#include "qapi/qapi-builtin-visit.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/qmp/qdict.h" +#include "qapi/string-output-visitor.h" +#include "qemu/error-report.h" +#include "sysemu/numa.h" +#include "hw/boards.h" + +void hmp_info_cpus(Monitor *mon, const QDict *qdict) +{ + CpuInfoFastList *cpu_list, *cpu; + + cpu_list = qmp_query_cpus_fast(NULL); + + for (cpu = cpu_list; cpu; cpu = cpu->next) { + int active = ' '; + + if (cpu->value->cpu_index == monitor_get_cpu_index(mon)) { + active = '*'; + } + + monitor_printf(mon, "%c CPU #%" PRId64 ":", active, + cpu->value->cpu_index); + monitor_printf(mon, " thread_id=%" PRId64 "\n", cpu->value->thread_id); + } + + qapi_free_CpuInfoFastList(cpu_list); +} + +void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + HotpluggableCPUList *l = qmp_query_hotpluggable_cpus(&err); + HotpluggableCPUList *saved = l; + CpuInstanceProperties *c; + + if (hmp_handle_error(mon, err)) { + return; + } + + monitor_printf(mon, "Hotpluggable CPUs:\n"); + while (l) { + monitor_printf(mon, " type: \"%s\"\n", l->value->type); + monitor_printf(mon, " vcpus_count: \"%" PRIu64 "\"\n", + l->value->vcpus_count); + if (l->value->has_qom_path) { + monitor_printf(mon, " qom_path: \"%s\"\n", l->value->qom_path); + } + + c = l->value->props; + monitor_printf(mon, " CPUInstance Properties:\n"); + if (c->has_node_id) { + monitor_printf(mon, " node-id: \"%" PRIu64 "\"\n", c->node_id); + } + if (c->has_socket_id) { + monitor_printf(mon, " socket-id: \"%" PRIu64 "\"\n", c->socket_id); + } + if (c->has_die_id) { + monitor_printf(mon, " die-id: \"%" PRIu64 "\"\n", c->die_id); + } + if (c->has_core_id) { + monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id); + } + if (c->has_thread_id) { + monitor_printf(mon, " thread-id: \"%" PRIu64 "\"\n", c->thread_id); + } + + l = l->next; + } + + qapi_free_HotpluggableCPUList(saved); +} + +void hmp_info_memdev(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + MemdevList *memdev_list = qmp_query_memdev(&err); + MemdevList *m = memdev_list; + Visitor *v; + char *str; + + while (m) { + v = string_output_visitor_new(false, &str); + visit_type_uint16List(v, NULL, &m->value->host_nodes, &error_abort); + monitor_printf(mon, "memory backend: %s\n", m->value->id); + monitor_printf(mon, " size: %" PRId64 "\n", m->value->size); + monitor_printf(mon, " merge: %s\n", + m->value->merge ? "true" : "false"); + monitor_printf(mon, " dump: %s\n", + m->value->dump ? "true" : "false"); + monitor_printf(mon, " prealloc: %s\n", + m->value->prealloc ? "true" : "false"); + monitor_printf(mon, " share: %s\n", + m->value->share ? "true" : "false"); + if (m->value->has_reserve) { + monitor_printf(mon, " reserve: %s\n", + m->value->reserve ? "true" : "false"); + } + monitor_printf(mon, " policy: %s\n", + HostMemPolicy_str(m->value->policy)); + visit_complete(v, &str); + monitor_printf(mon, " host nodes: %s\n", str); + + g_free(str); + visit_free(v); + m = m->next; + } + + monitor_printf(mon, "\n"); + + qapi_free_MemdevList(memdev_list); + hmp_handle_error(mon, err); +} diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c new file mode 100644 index 000000000..4f4ab30f8 --- /dev/null +++ b/hw/core/machine-qmp-cmds.c @@ -0,0 +1,246 @@ +/* + * QMP commands related to machines and CPUs + * + * Copyright (C) 2014 Red Hat Inc + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/boards.h" +#include "qapi/error.h" +#include "qapi/qapi-builtin-visit.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/qmp/qerror.h" +#include "qapi/qmp/qobject.h" +#include "qapi/qobject-input-visitor.h" +#include "qapi/type-helpers.h" +#include "qemu/main-loop.h" +#include "qom/qom-qobject.h" +#include "sysemu/hostmem.h" +#include "sysemu/hw_accel.h" +#include "sysemu/numa.h" +#include "sysemu/runstate.h" + +static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu) +{ +#ifdef TARGET_S390X + S390CPU *s390_cpu = S390_CPU(cpu); + CPUS390XState *env = &s390_cpu->env; + + info->cpu_state = env->cpu_state; +#else + abort(); +#endif +} + +/* + * fast means: we NEVER interrupt vCPU threads to retrieve + * information from KVM. + */ +CpuInfoFastList *qmp_query_cpus_fast(Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(ms); + CpuInfoFastList *head = NULL, **tail = &head; + SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME, + -1, &error_abort); + CPUState *cpu; + + CPU_FOREACH(cpu) { + CpuInfoFast *value = g_malloc0(sizeof(*value)); + + value->cpu_index = cpu->cpu_index; + value->qom_path = object_get_canonical_path(OBJECT(cpu)); + value->thread_id = cpu->thread_id; + + value->has_props = !!mc->cpu_index_to_instance_props; + if (value->has_props) { + CpuInstanceProperties *props; + props = g_malloc0(sizeof(*props)); + *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index); + value->props = props; + } + + value->target = target; + if (target == SYS_EMU_TARGET_S390X) { + cpustate_to_cpuinfo_s390(&value->u.s390x, cpu); + } + + QAPI_LIST_APPEND(tail, value); + } + + return head; +} + +MachineInfoList *qmp_query_machines(Error **errp) +{ + GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false); + MachineInfoList *mach_list = NULL; + + for (el = machines; el; el = el->next) { + MachineClass *mc = el->data; + MachineInfo *info; + + info = g_malloc0(sizeof(*info)); + if (mc->is_default) { + info->has_is_default = true; + info->is_default = true; + } + + if (mc->alias) { + info->has_alias = true; + info->alias = g_strdup(mc->alias); + } + + info->name = g_strdup(mc->name); + info->cpu_max = !mc->max_cpus ? 1 : mc->max_cpus; + info->hotpluggable_cpus = mc->has_hotpluggable_cpus; + info->numa_mem_supported = mc->numa_mem_supported; + info->deprecated = !!mc->deprecation_reason; + if (mc->default_cpu_type) { + info->default_cpu_type = g_strdup(mc->default_cpu_type); + info->has_default_cpu_type = true; + } + if (mc->default_ram_id) { + info->default_ram_id = g_strdup(mc->default_ram_id); + info->has_default_ram_id = true; + } + + QAPI_LIST_PREPEND(mach_list, info); + } + + g_slist_free(machines); + return mach_list; +} + +CurrentMachineParams *qmp_query_current_machine(Error **errp) +{ + CurrentMachineParams *params = g_malloc0(sizeof(*params)); + params->wakeup_suspend_support = qemu_wakeup_suspend_enabled(); + + return params; +} + +TargetInfo *qmp_query_target(Error **errp) +{ + TargetInfo *info = g_malloc0(sizeof(*info)); + + info->arch = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME, -1, + &error_abort); + + return info; +} + +HotpluggableCPUList *qmp_query_hotpluggable_cpus(Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(ms); + + if (!mc->has_hotpluggable_cpus) { + error_setg(errp, QERR_FEATURE_DISABLED, "query-hotpluggable-cpus"); + return NULL; + } + + return machine_query_hotpluggable_cpus(ms); +} + +void qmp_set_numa_node(NumaOptions *cmd, Error **errp) +{ + if (phase_check(PHASE_MACHINE_INITIALIZED)) { + error_setg(errp, "The command is permitted only before the machine has been created"); + return; + } + + set_numa_options(MACHINE(qdev_get_machine()), cmd, errp); +} + +static int query_memdev(Object *obj, void *opaque) +{ + Error *err = NULL; + MemdevList **list = opaque; + Memdev *m; + QObject *host_nodes; + Visitor *v; + + if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { + m = g_malloc0(sizeof(*m)); + + m->id = g_strdup(object_get_canonical_path_component(obj)); + m->has_id = !!m->id; + + m->size = object_property_get_uint(obj, "size", &error_abort); + m->merge = object_property_get_bool(obj, "merge", &error_abort); + m->dump = object_property_get_bool(obj, "dump", &error_abort); + m->prealloc = object_property_get_bool(obj, "prealloc", &error_abort); + m->share = object_property_get_bool(obj, "share", &error_abort); + m->reserve = object_property_get_bool(obj, "reserve", &err); + if (err) { + error_free_or_abort(&err); + } else { + m->has_reserve = true; + } + m->policy = object_property_get_enum(obj, "policy", "HostMemPolicy", + &error_abort); + host_nodes = object_property_get_qobject(obj, + "host-nodes", + &error_abort); + v = qobject_input_visitor_new(host_nodes); + visit_type_uint16List(v, NULL, &m->host_nodes, &error_abort); + visit_free(v); + qobject_unref(host_nodes); + + QAPI_LIST_PREPEND(*list, m); + } + + return 0; +} + +MemdevList *qmp_query_memdev(Error **errp) +{ + Object *obj = object_get_objects_root(); + MemdevList *list = NULL; + + object_child_foreach(obj, query_memdev, &list); + return list; +} + +HumanReadableText *qmp_x_query_numa(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + int i, nb_numa_nodes; + NumaNodeMem *node_mem; + CpuInfoFastList *cpu_list, *cpu; + MachineState *ms = MACHINE(qdev_get_machine()); + + nb_numa_nodes = ms->numa_state ? ms->numa_state->num_nodes : 0; + g_string_append_printf(buf, "%d nodes\n", nb_numa_nodes); + if (!nb_numa_nodes) { + goto done; + } + + cpu_list = qmp_query_cpus_fast(&error_abort); + node_mem = g_new0(NumaNodeMem, nb_numa_nodes); + + query_numa_node_mem(node_mem, ms); + for (i = 0; i < nb_numa_nodes; i++) { + g_string_append_printf(buf, "node %d cpus:", i); + for (cpu = cpu_list; cpu; cpu = cpu->next) { + if (cpu->value->has_props && cpu->value->props->has_node_id && + cpu->value->props->node_id == i) { + g_string_append_printf(buf, " %" PRIi64, cpu->value->cpu_index); + } + } + g_string_append_printf(buf, "\n"); + g_string_append_printf(buf, "node %d size: %" PRId64 " MB\n", i, + node_mem[i].node_mem >> 20); + g_string_append_printf(buf, "node %d plugged: %" PRId64 " MB\n", i, + node_mem[i].node_plugged_mem >> 20); + } + qapi_free_CpuInfoFastList(cpu_list); + g_free(node_mem); + + done: + return human_readable_text_from_str(buf); +} diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c new file mode 100644 index 000000000..116a0cbbf --- /dev/null +++ b/hw/core/machine-smp.c @@ -0,0 +1,181 @@ +/* + * QEMU Machine core (related to -smp parsing) + * + * Copyright (c) 2021 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/boards.h" +#include "qapi/error.h" + +/* + * Report information of a machine's supported CPU topology hierarchy. + * Topology members will be ordered from the largest to the smallest + * in the string. + */ +static char *cpu_hierarchy_to_string(MachineState *ms) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + GString *s = g_string_new(NULL); + + g_string_append_printf(s, "sockets (%u)", ms->smp.sockets); + + if (mc->smp_props.dies_supported) { + g_string_append_printf(s, " * dies (%u)", ms->smp.dies); + } + + g_string_append_printf(s, " * cores (%u)", ms->smp.cores); + g_string_append_printf(s, " * threads (%u)", ms->smp.threads); + + return g_string_free(s, false); +} + +/* + * smp_parse - Generic function used to parse the given SMP configuration + * + * Any missing parameter in "cpus/maxcpus/sockets/cores/threads" will be + * automatically computed based on the provided ones. + * + * In the calculation of omitted sockets/cores/threads: we prefer sockets + * over cores over threads before 6.2, while preferring cores over sockets + * over threads since 6.2. + * + * In the calculation of cpus/maxcpus: When both maxcpus and cpus are omitted, + * maxcpus will be computed from the given parameters and cpus will be set + * equal to maxcpus. When only one of maxcpus and cpus is given then the + * omitted one will be set to its given counterpart's value. Both maxcpus and + * cpus may be specified, but maxcpus must be equal to or greater than cpus. + * + * For compatibility, apart from the parameters that will be computed, newly + * introduced topology members which are likely to be target specific should + * be directly set as 1 if they are omitted (e.g. dies for PC since 4.1). + */ +void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + unsigned cpus = config->has_cpus ? config->cpus : 0; + unsigned sockets = config->has_sockets ? config->sockets : 0; + unsigned dies = config->has_dies ? config->dies : 0; + unsigned cores = config->has_cores ? config->cores : 0; + unsigned threads = config->has_threads ? config->threads : 0; + unsigned maxcpus = config->has_maxcpus ? config->maxcpus : 0; + + /* + * Specified CPU topology parameters must be greater than zero, + * explicit configuration like "cpus=0" is not allowed. + */ + if ((config->has_cpus && config->cpus == 0) || + (config->has_sockets && config->sockets == 0) || + (config->has_dies && config->dies == 0) || + (config->has_cores && config->cores == 0) || + (config->has_threads && config->threads == 0) || + (config->has_maxcpus && config->maxcpus == 0)) { + warn_report("Deprecated CPU topology (considered invalid): " + "CPU topology parameters must be greater than zero"); + } + + /* + * If not supported by the machine, a topology parameter must be + * omitted or specified equal to 1. + */ + if (!mc->smp_props.dies_supported && dies > 1) { + error_setg(errp, "dies not supported by this machine's CPU topology"); + return; + } + + dies = dies > 0 ? dies : 1; + + /* compute missing values based on the provided ones */ + if (cpus == 0 && maxcpus == 0) { + sockets = sockets > 0 ? sockets : 1; + cores = cores > 0 ? cores : 1; + threads = threads > 0 ? threads : 1; + } else { + maxcpus = maxcpus > 0 ? maxcpus : cpus; + + if (mc->smp_props.prefer_sockets) { + /* prefer sockets over cores before 6.2 */ + if (sockets == 0) { + cores = cores > 0 ? cores : 1; + threads = threads > 0 ? threads : 1; + sockets = maxcpus / (dies * cores * threads); + } else if (cores == 0) { + threads = threads > 0 ? threads : 1; + cores = maxcpus / (sockets * dies * threads); + } + } else { + /* prefer cores over sockets since 6.2 */ + if (cores == 0) { + sockets = sockets > 0 ? sockets : 1; + threads = threads > 0 ? threads : 1; + cores = maxcpus / (sockets * dies * threads); + } else if (sockets == 0) { + threads = threads > 0 ? threads : 1; + sockets = maxcpus / (dies * cores * threads); + } + } + + /* try to calculate omitted threads at last */ + if (threads == 0) { + threads = maxcpus / (sockets * dies * cores); + } + } + + maxcpus = maxcpus > 0 ? maxcpus : sockets * dies * cores * threads; + cpus = cpus > 0 ? cpus : maxcpus; + + ms->smp.cpus = cpus; + ms->smp.sockets = sockets; + ms->smp.dies = dies; + ms->smp.cores = cores; + ms->smp.threads = threads; + ms->smp.max_cpus = maxcpus; + + /* sanity-check of the computed topology */ + if (sockets * dies * cores * threads != maxcpus) { + g_autofree char *topo_msg = cpu_hierarchy_to_string(ms); + error_setg(errp, "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "%s != maxcpus (%u)", + topo_msg, maxcpus); + return; + } + + if (maxcpus < cpus) { + g_autofree char *topo_msg = cpu_hierarchy_to_string(ms); + error_setg(errp, "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "%s == maxcpus (%u) < smp_cpus (%u)", + topo_msg, maxcpus, cpus); + return; + } + + if (ms->smp.cpus < mc->min_cpus) { + error_setg(errp, "Invalid SMP CPUs %d. The min CPUs " + "supported by machine '%s' is %d", + ms->smp.cpus, + mc->name, mc->min_cpus); + return; + } + + if (ms->smp.max_cpus > mc->max_cpus) { + error_setg(errp, "Invalid SMP CPUs %d. The max CPUs " + "supported by machine '%s' is %d", + ms->smp.max_cpus, + mc->name, mc->max_cpus); + return; + } +} diff --git a/hw/core/machine.c b/hw/core/machine.c new file mode 100644 index 000000000..53a99abc5 --- /dev/null +++ b/hw/core/machine.c @@ -0,0 +1,1262 @@ +/* + * QEMU Machine + * + * Copyright (C) 2014 Red Hat Inc + * + * Authors: + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/option.h" +#include "qapi/qmp/qerror.h" +#include "sysemu/replay.h" +#include "qemu/units.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "qapi/error.h" +#include "qapi/qapi-visit-common.h" +#include "qapi/qapi-visit-machine.h" +#include "qapi/visitor.h" +#include "hw/sysbus.h" +#include "sysemu/cpus.h" +#include "sysemu/sysemu.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "sysemu/numa.h" +#include "qemu/error-report.h" +#include "sysemu/qtest.h" +#include "hw/pci/pci.h" +#include "hw/mem/nvdimm.h" +#include "migration/global_state.h" +#include "migration/vmstate.h" +#include "exec/confidential-guest-support.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-pci.h" + +GlobalProperty hw_compat_6_1[] = { + { "vhost-user-vsock-device", "seqpacket", "off" }, + { "nvme-ns", "shared", "off" }, +}; +const size_t hw_compat_6_1_len = G_N_ELEMENTS(hw_compat_6_1); + +GlobalProperty hw_compat_6_0[] = { + { "gpex-pcihost", "allow-unmapped-accesses", "false" }, + { "i8042", "extended-state", "false"}, + { "nvme-ns", "eui64-default", "off"}, + { "e1000", "init-vet", "off" }, + { "e1000e", "init-vet", "off" }, + { "vhost-vsock-device", "seqpacket", "off" }, +}; +const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0); + +GlobalProperty hw_compat_5_2[] = { + { "ICH9-LPC", "smm-compat", "on"}, + { "PIIX4_PM", "smm-compat", "on"}, + { "virtio-blk-device", "report-discard-granularity", "off" }, + { "virtio-net-pci-base", "vectors", "3"}, +}; +const size_t hw_compat_5_2_len = G_N_ELEMENTS(hw_compat_5_2); + +GlobalProperty hw_compat_5_1[] = { + { "vhost-scsi", "num_queues", "1"}, + { "vhost-user-blk", "num-queues", "1"}, + { "vhost-user-scsi", "num_queues", "1"}, + { "virtio-blk-device", "num-queues", "1"}, + { "virtio-scsi-device", "num_queues", "1"}, + { "nvme", "use-intel-id", "on"}, + { "pvpanic", "events", "1"}, /* PVPANIC_PANICKED */ + { "pl011", "migrate-clk", "off" }, + { "virtio-pci", "x-ats-page-aligned", "off"}, +}; +const size_t hw_compat_5_1_len = G_N_ELEMENTS(hw_compat_5_1); + +GlobalProperty hw_compat_5_0[] = { + { "pci-host-bridge", "x-config-reg-migration-enabled", "off" }, + { "virtio-balloon-device", "page-poison", "false" }, + { "vmport", "x-read-set-eax", "off" }, + { "vmport", "x-signal-unsupported-cmd", "off" }, + { "vmport", "x-report-vmx-type", "off" }, + { "vmport", "x-cmds-v2", "off" }, + { "virtio-device", "x-disable-legacy-check", "true" }, +}; +const size_t hw_compat_5_0_len = G_N_ELEMENTS(hw_compat_5_0); + +GlobalProperty hw_compat_4_2[] = { + { "virtio-blk-device", "queue-size", "128"}, + { "virtio-scsi-device", "virtqueue_size", "128"}, + { "virtio-blk-device", "x-enable-wce-if-config-wce", "off" }, + { "virtio-blk-device", "seg-max-adjust", "off"}, + { "virtio-scsi-device", "seg_max_adjust", "off"}, + { "vhost-blk-device", "seg_max_adjust", "off"}, + { "usb-host", "suppress-remote-wake", "off" }, + { "usb-redir", "suppress-remote-wake", "off" }, + { "qxl", "revision", "4" }, + { "qxl-vga", "revision", "4" }, + { "fw_cfg", "acpi-mr-restore", "false" }, + { "virtio-device", "use-disabled-flag", "false" }, +}; +const size_t hw_compat_4_2_len = G_N_ELEMENTS(hw_compat_4_2); + +GlobalProperty hw_compat_4_1[] = { + { "virtio-pci", "x-pcie-flr-init", "off" }, +}; +const size_t hw_compat_4_1_len = G_N_ELEMENTS(hw_compat_4_1); + +GlobalProperty hw_compat_4_0[] = { + { "VGA", "edid", "false" }, + { "secondary-vga", "edid", "false" }, + { "bochs-display", "edid", "false" }, + { "virtio-vga", "edid", "false" }, + { "virtio-gpu-device", "edid", "false" }, + { "virtio-device", "use-started", "false" }, + { "virtio-balloon-device", "qemu-4-0-config-size", "true" }, + { "pl031", "migrate-tick-offset", "false" }, +}; +const size_t hw_compat_4_0_len = G_N_ELEMENTS(hw_compat_4_0); + +GlobalProperty hw_compat_3_1[] = { + { "pcie-root-port", "x-speed", "2_5" }, + { "pcie-root-port", "x-width", "1" }, + { "memory-backend-file", "x-use-canonical-path-for-ramblock-id", "true" }, + { "memory-backend-memfd", "x-use-canonical-path-for-ramblock-id", "true" }, + { "tpm-crb", "ppi", "false" }, + { "tpm-tis", "ppi", "false" }, + { "usb-kbd", "serial", "42" }, + { "usb-mouse", "serial", "42" }, + { "usb-tablet", "serial", "42" }, + { "virtio-blk-device", "discard", "false" }, + { "virtio-blk-device", "write-zeroes", "false" }, + { "virtio-balloon-device", "qemu-4-0-config-size", "false" }, + { "pcie-root-port-base", "disable-acs", "true" }, /* Added in 4.1 */ +}; +const size_t hw_compat_3_1_len = G_N_ELEMENTS(hw_compat_3_1); + +GlobalProperty hw_compat_3_0[] = {}; +const size_t hw_compat_3_0_len = G_N_ELEMENTS(hw_compat_3_0); + +GlobalProperty hw_compat_2_12[] = { + { "migration", "decompress-error-check", "off" }, + { "hda-audio", "use-timer", "false" }, + { "cirrus-vga", "global-vmstate", "true" }, + { "VGA", "global-vmstate", "true" }, + { "vmware-svga", "global-vmstate", "true" }, + { "qxl-vga", "global-vmstate", "true" }, +}; +const size_t hw_compat_2_12_len = G_N_ELEMENTS(hw_compat_2_12); + +GlobalProperty hw_compat_2_11[] = { + { "hpet", "hpet-offset-saved", "false" }, + { "virtio-blk-pci", "vectors", "2" }, + { "vhost-user-blk-pci", "vectors", "2" }, + { "e1000", "migrate_tso_props", "off" }, +}; +const size_t hw_compat_2_11_len = G_N_ELEMENTS(hw_compat_2_11); + +GlobalProperty hw_compat_2_10[] = { + { "virtio-mouse-device", "wheel-axis", "false" }, + { "virtio-tablet-device", "wheel-axis", "false" }, +}; +const size_t hw_compat_2_10_len = G_N_ELEMENTS(hw_compat_2_10); + +GlobalProperty hw_compat_2_9[] = { + { "pci-bridge", "shpc", "off" }, + { "intel-iommu", "pt", "off" }, + { "virtio-net-device", "x-mtu-bypass-backend", "off" }, + { "pcie-root-port", "x-migrate-msix", "false" }, +}; +const size_t hw_compat_2_9_len = G_N_ELEMENTS(hw_compat_2_9); + +GlobalProperty hw_compat_2_8[] = { + { "fw_cfg_mem", "x-file-slots", "0x10" }, + { "fw_cfg_io", "x-file-slots", "0x10" }, + { "pflash_cfi01", "old-multiple-chip-handling", "on" }, + { "pci-bridge", "shpc", "on" }, + { TYPE_PCI_DEVICE, "x-pcie-extcap-init", "off" }, + { "virtio-pci", "x-pcie-deverr-init", "off" }, + { "virtio-pci", "x-pcie-lnkctl-init", "off" }, + { "virtio-pci", "x-pcie-pm-init", "off" }, + { "cirrus-vga", "vgamem_mb", "8" }, + { "isa-cirrus-vga", "vgamem_mb", "8" }, +}; +const size_t hw_compat_2_8_len = G_N_ELEMENTS(hw_compat_2_8); + +GlobalProperty hw_compat_2_7[] = { + { "virtio-pci", "page-per-vq", "on" }, + { "virtio-serial-device", "emergency-write", "off" }, + { "ioapic", "version", "0x11" }, + { "intel-iommu", "x-buggy-eim", "true" }, + { "virtio-pci", "x-ignore-backend-features", "on" }, +}; +const size_t hw_compat_2_7_len = G_N_ELEMENTS(hw_compat_2_7); + +GlobalProperty hw_compat_2_6[] = { + { "virtio-mmio", "format_transport_address", "off" }, + /* Optional because not all virtio-pci devices support legacy mode */ + { "virtio-pci", "disable-modern", "on", .optional = true }, + { "virtio-pci", "disable-legacy", "off", .optional = true }, +}; +const size_t hw_compat_2_6_len = G_N_ELEMENTS(hw_compat_2_6); + +GlobalProperty hw_compat_2_5[] = { + { "isa-fdc", "fallback", "144" }, + { "pvscsi", "x-old-pci-configuration", "on" }, + { "pvscsi", "x-disable-pcie", "on" }, + { "vmxnet3", "x-old-msi-offsets", "on" }, + { "vmxnet3", "x-disable-pcie", "on" }, +}; +const size_t hw_compat_2_5_len = G_N_ELEMENTS(hw_compat_2_5); + +GlobalProperty hw_compat_2_4[] = { + /* Optional because the 'scsi' property is Linux-only */ + { "virtio-blk-device", "scsi", "true", .optional = true }, + { "e1000", "extra_mac_registers", "off" }, + { "virtio-pci", "x-disable-pcie", "on" }, + { "virtio-pci", "migrate-extra", "off" }, + { "fw_cfg_mem", "dma_enabled", "off" }, + { "fw_cfg_io", "dma_enabled", "off" } +}; +const size_t hw_compat_2_4_len = G_N_ELEMENTS(hw_compat_2_4); + +GlobalProperty hw_compat_2_3[] = { + { "virtio-blk-pci", "any_layout", "off" }, + { "virtio-balloon-pci", "any_layout", "off" }, + { "virtio-serial-pci", "any_layout", "off" }, + { "virtio-9p-pci", "any_layout", "off" }, + { "virtio-rng-pci", "any_layout", "off" }, + { TYPE_PCI_DEVICE, "x-pcie-lnksta-dllla", "off" }, + { "migration", "send-configuration", "off" }, + { "migration", "send-section-footer", "off" }, + { "migration", "store-global-state", "off" }, +}; +const size_t hw_compat_2_3_len = G_N_ELEMENTS(hw_compat_2_3); + +GlobalProperty hw_compat_2_2[] = {}; +const size_t hw_compat_2_2_len = G_N_ELEMENTS(hw_compat_2_2); + +GlobalProperty hw_compat_2_1[] = { + { "intel-hda", "old_msi_addr", "on" }, + { "VGA", "qemu-extended-regs", "off" }, + { "secondary-vga", "qemu-extended-regs", "off" }, + { "virtio-scsi-pci", "any_layout", "off" }, + { "usb-mouse", "usb_version", "1" }, + { "usb-kbd", "usb_version", "1" }, + { "virtio-pci", "virtio-pci-bus-master-bug-migration", "on" }, +}; +const size_t hw_compat_2_1_len = G_N_ELEMENTS(hw_compat_2_1); + +MachineState *current_machine; + +static char *machine_get_kernel(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->kernel_filename); +} + +static void machine_set_kernel(Object *obj, const char *value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + g_free(ms->kernel_filename); + ms->kernel_filename = g_strdup(value); +} + +static char *machine_get_initrd(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->initrd_filename); +} + +static void machine_set_initrd(Object *obj, const char *value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + g_free(ms->initrd_filename); + ms->initrd_filename = g_strdup(value); +} + +static char *machine_get_append(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->kernel_cmdline); +} + +static void machine_set_append(Object *obj, const char *value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + g_free(ms->kernel_cmdline); + ms->kernel_cmdline = g_strdup(value); +} + +static char *machine_get_dtb(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->dtb); +} + +static void machine_set_dtb(Object *obj, const char *value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + g_free(ms->dtb); + ms->dtb = g_strdup(value); +} + +static char *machine_get_dumpdtb(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->dumpdtb); +} + +static void machine_set_dumpdtb(Object *obj, const char *value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + g_free(ms->dumpdtb); + ms->dumpdtb = g_strdup(value); +} + +static void machine_get_phandle_start(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + MachineState *ms = MACHINE(obj); + int64_t value = ms->phandle_start; + + visit_type_int(v, name, &value, errp); +} + +static void machine_set_phandle_start(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + MachineState *ms = MACHINE(obj); + int64_t value; + + if (!visit_type_int(v, name, &value, errp)) { + return; + } + + ms->phandle_start = value; +} + +static char *machine_get_dt_compatible(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->dt_compatible); +} + +static void machine_set_dt_compatible(Object *obj, const char *value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + g_free(ms->dt_compatible); + ms->dt_compatible = g_strdup(value); +} + +static bool machine_get_dump_guest_core(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->dump_guest_core; +} + +static void machine_set_dump_guest_core(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->dump_guest_core = value; +} + +static bool machine_get_mem_merge(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->mem_merge; +} + +static void machine_set_mem_merge(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->mem_merge = value; +} + +static bool machine_get_usb(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->usb; +} + +static void machine_set_usb(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->usb = value; + ms->usb_disabled = !value; +} + +static bool machine_get_graphics(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->enable_graphics; +} + +static void machine_set_graphics(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->enable_graphics = value; +} + +static char *machine_get_firmware(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->firmware); +} + +static void machine_set_firmware(Object *obj, const char *value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + g_free(ms->firmware); + ms->firmware = g_strdup(value); +} + +static void machine_set_suppress_vmdesc(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->suppress_vmdesc = value; +} + +static bool machine_get_suppress_vmdesc(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->suppress_vmdesc; +} + +static char *machine_get_memory_encryption(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + if (ms->cgs) { + return g_strdup(object_get_canonical_path_component(OBJECT(ms->cgs))); + } + + return NULL; +} + +static void machine_set_memory_encryption(Object *obj, const char *value, + Error **errp) +{ + Object *cgs = + object_resolve_path_component(object_get_objects_root(), value); + + if (!cgs) { + error_setg(errp, "No such memory encryption object '%s'", value); + return; + } + + object_property_set_link(obj, "confidential-guest-support", cgs, errp); +} + +static void machine_check_confidential_guest_support(const Object *obj, + const char *name, + Object *new_target, + Error **errp) +{ + /* + * So far the only constraint is that the target has the + * TYPE_CONFIDENTIAL_GUEST_SUPPORT interface, and that's checked + * by the QOM core + */ +} + +static bool machine_get_nvdimm(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->nvdimms_state->is_enabled; +} + +static void machine_set_nvdimm(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->nvdimms_state->is_enabled = value; +} + +static bool machine_get_hmat(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return ms->numa_state->hmat_enabled; +} + +static void machine_set_hmat(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + ms->numa_state->hmat_enabled = value; +} + +static char *machine_get_nvdimm_persistence(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->nvdimms_state->persistence_string); +} + +static void machine_set_nvdimm_persistence(Object *obj, const char *value, + Error **errp) +{ + MachineState *ms = MACHINE(obj); + NVDIMMState *nvdimms_state = ms->nvdimms_state; + + if (strcmp(value, "cpu") == 0) { + nvdimms_state->persistence = 3; + } else if (strcmp(value, "mem-ctrl") == 0) { + nvdimms_state->persistence = 2; + } else { + error_setg(errp, "-machine nvdimm-persistence=%s: unsupported option", + value); + return; + } + + g_free(nvdimms_state->persistence_string); + nvdimms_state->persistence_string = g_strdup(value); +} + +void machine_class_allow_dynamic_sysbus_dev(MachineClass *mc, const char *type) +{ + QAPI_LIST_PREPEND(mc->allowed_dynamic_sysbus_devices, g_strdup(type)); +} + +bool device_is_dynamic_sysbus(MachineClass *mc, DeviceState *dev) +{ + Object *obj = OBJECT(dev); + + if (!object_dynamic_cast(obj, TYPE_SYS_BUS_DEVICE)) { + return false; + } + + return device_type_is_dynamic_sysbus(mc, object_get_typename(obj)); +} + +bool device_type_is_dynamic_sysbus(MachineClass *mc, const char *type) +{ + bool allowed = false; + strList *wl; + ObjectClass *klass = object_class_by_name(type); + + for (wl = mc->allowed_dynamic_sysbus_devices; + !allowed && wl; + wl = wl->next) { + allowed |= !!object_class_dynamic_cast(klass, wl->value); + } + + return allowed; +} + +static char *machine_get_memdev(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + return g_strdup(ms->ram_memdev_id); +} + +static void machine_set_memdev(Object *obj, const char *value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + + g_free(ms->ram_memdev_id); + ms->ram_memdev_id = g_strdup(value); +} + +HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine) +{ + int i; + HotpluggableCPUList *head = NULL; + MachineClass *mc = MACHINE_GET_CLASS(machine); + + /* force board to initialize possible_cpus if it hasn't been done yet */ + mc->possible_cpu_arch_ids(machine); + + for (i = 0; i < machine->possible_cpus->len; i++) { + Object *cpu; + HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1); + + cpu_item->type = g_strdup(machine->possible_cpus->cpus[i].type); + cpu_item->vcpus_count = machine->possible_cpus->cpus[i].vcpus_count; + cpu_item->props = g_memdup(&machine->possible_cpus->cpus[i].props, + sizeof(*cpu_item->props)); + + cpu = machine->possible_cpus->cpus[i].cpu; + if (cpu) { + cpu_item->has_qom_path = true; + cpu_item->qom_path = object_get_canonical_path(cpu); + } + QAPI_LIST_PREPEND(head, cpu_item); + } + return head; +} + +/** + * machine_set_cpu_numa_node: + * @machine: machine object to modify + * @props: specifies which cpu objects to assign to + * numa node specified by @props.node_id + * @errp: if an error occurs, a pointer to an area to store the error + * + * Associate NUMA node specified by @props.node_id with cpu slots that + * match socket/core/thread-ids specified by @props. It's recommended to use + * query-hotpluggable-cpus.props values to specify affected cpu slots, + * which would lead to exact 1:1 mapping of cpu slots to NUMA node. + * + * However for CLI convenience it's possible to pass in subset of properties, + * which would affect all cpu slots that match it. + * Ex for pc machine: + * -smp 4,cores=2,sockets=2 -numa node,nodeid=0 -numa node,nodeid=1 \ + * -numa cpu,node-id=0,socket_id=0 \ + * -numa cpu,node-id=1,socket_id=1 + * will assign all child cores of socket 0 to node 0 and + * of socket 1 to node 1. + * + * On attempt of reassigning (already assigned) cpu slot to another NUMA node, + * return error. + * Empty subset is disallowed and function will return with error in this case. + */ +void machine_set_cpu_numa_node(MachineState *machine, + const CpuInstanceProperties *props, Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + NodeInfo *numa_info = machine->numa_state->nodes; + bool match = false; + int i; + + if (!mc->possible_cpu_arch_ids) { + error_setg(errp, "mapping of CPUs to NUMA node is not supported"); + return; + } + + /* disabling node mapping is not supported, forbid it */ + assert(props->has_node_id); + + /* force board to initialize possible_cpus if it hasn't been done yet */ + mc->possible_cpu_arch_ids(machine); + + for (i = 0; i < machine->possible_cpus->len; i++) { + CPUArchId *slot = &machine->possible_cpus->cpus[i]; + + /* reject unsupported by board properties */ + if (props->has_thread_id && !slot->props.has_thread_id) { + error_setg(errp, "thread-id is not supported"); + return; + } + + if (props->has_core_id && !slot->props.has_core_id) { + error_setg(errp, "core-id is not supported"); + return; + } + + if (props->has_socket_id && !slot->props.has_socket_id) { + error_setg(errp, "socket-id is not supported"); + return; + } + + if (props->has_die_id && !slot->props.has_die_id) { + error_setg(errp, "die-id is not supported"); + return; + } + + /* skip slots with explicit mismatch */ + if (props->has_thread_id && props->thread_id != slot->props.thread_id) { + continue; + } + + if (props->has_core_id && props->core_id != slot->props.core_id) { + continue; + } + + if (props->has_die_id && props->die_id != slot->props.die_id) { + continue; + } + + if (props->has_socket_id && props->socket_id != slot->props.socket_id) { + continue; + } + + /* reject assignment if slot is already assigned, for compatibility + * of legacy cpu_index mapping with SPAPR core based mapping do not + * error out if cpu thread and matched core have the same node-id */ + if (slot->props.has_node_id && + slot->props.node_id != props->node_id) { + error_setg(errp, "CPU is already assigned to node-id: %" PRId64, + slot->props.node_id); + return; + } + + /* assign slot to node as it's matched '-numa cpu' key */ + match = true; + slot->props.node_id = props->node_id; + slot->props.has_node_id = props->has_node_id; + + if (machine->numa_state->hmat_enabled) { + if ((numa_info[props->node_id].initiator < MAX_NODES) && + (props->node_id != numa_info[props->node_id].initiator)) { + error_setg(errp, "The initiator of CPU NUMA node %" PRId64 + " should be itself (got %" PRIu16 ")", + props->node_id, numa_info[props->node_id].initiator); + return; + } + numa_info[props->node_id].has_cpu = true; + numa_info[props->node_id].initiator = props->node_id; + } + } + + if (!match) { + error_setg(errp, "no match found"); + } +} + +static void machine_get_smp(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + SMPConfiguration *config = &(SMPConfiguration){ + .has_cpus = true, .cpus = ms->smp.cpus, + .has_sockets = true, .sockets = ms->smp.sockets, + .has_dies = true, .dies = ms->smp.dies, + .has_cores = true, .cores = ms->smp.cores, + .has_threads = true, .threads = ms->smp.threads, + .has_maxcpus = true, .maxcpus = ms->smp.max_cpus, + }; + if (!visit_type_SMPConfiguration(v, name, &config, &error_abort)) { + return; + } +} + +static void machine_set_smp(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + g_autoptr(SMPConfiguration) config = NULL; + + if (!visit_type_SMPConfiguration(v, name, &config, errp)) { + return; + } + + smp_parse(ms, config, errp); +} + +static void machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + /* Default 128 MB as guest ram size */ + mc->default_ram_size = 128 * MiB; + mc->rom_file_has_mr = true; + + /* numa node memory size aligned on 8MB by default. + * On Linux, each node's border has to be 8MB aligned + */ + mc->numa_mem_align_shift = 23; + + object_class_property_add_str(oc, "kernel", + machine_get_kernel, machine_set_kernel); + object_class_property_set_description(oc, "kernel", + "Linux kernel image file"); + + object_class_property_add_str(oc, "initrd", + machine_get_initrd, machine_set_initrd); + object_class_property_set_description(oc, "initrd", + "Linux initial ramdisk file"); + + object_class_property_add_str(oc, "append", + machine_get_append, machine_set_append); + object_class_property_set_description(oc, "append", + "Linux kernel command line"); + + object_class_property_add_str(oc, "dtb", + machine_get_dtb, machine_set_dtb); + object_class_property_set_description(oc, "dtb", + "Linux kernel device tree file"); + + object_class_property_add_str(oc, "dumpdtb", + machine_get_dumpdtb, machine_set_dumpdtb); + object_class_property_set_description(oc, "dumpdtb", + "Dump current dtb to a file and quit"); + + object_class_property_add(oc, "smp", "SMPConfiguration", + machine_get_smp, machine_set_smp, + NULL, NULL); + object_class_property_set_description(oc, "smp", + "CPU topology"); + + object_class_property_add(oc, "phandle-start", "int", + machine_get_phandle_start, machine_set_phandle_start, + NULL, NULL); + object_class_property_set_description(oc, "phandle-start", + "The first phandle ID we may generate dynamically"); + + object_class_property_add_str(oc, "dt-compatible", + machine_get_dt_compatible, machine_set_dt_compatible); + object_class_property_set_description(oc, "dt-compatible", + "Overrides the \"compatible\" property of the dt root node"); + + object_class_property_add_bool(oc, "dump-guest-core", + machine_get_dump_guest_core, machine_set_dump_guest_core); + object_class_property_set_description(oc, "dump-guest-core", + "Include guest memory in a core dump"); + + object_class_property_add_bool(oc, "mem-merge", + machine_get_mem_merge, machine_set_mem_merge); + object_class_property_set_description(oc, "mem-merge", + "Enable/disable memory merge support"); + + object_class_property_add_bool(oc, "usb", + machine_get_usb, machine_set_usb); + object_class_property_set_description(oc, "usb", + "Set on/off to enable/disable usb"); + + object_class_property_add_bool(oc, "graphics", + machine_get_graphics, machine_set_graphics); + object_class_property_set_description(oc, "graphics", + "Set on/off to enable/disable graphics emulation"); + + object_class_property_add_str(oc, "firmware", + machine_get_firmware, machine_set_firmware); + object_class_property_set_description(oc, "firmware", + "Firmware image"); + + object_class_property_add_bool(oc, "suppress-vmdesc", + machine_get_suppress_vmdesc, machine_set_suppress_vmdesc); + object_class_property_set_description(oc, "suppress-vmdesc", + "Set on to disable self-describing migration"); + + object_class_property_add_link(oc, "confidential-guest-support", + TYPE_CONFIDENTIAL_GUEST_SUPPORT, + offsetof(MachineState, cgs), + machine_check_confidential_guest_support, + OBJ_PROP_LINK_STRONG); + object_class_property_set_description(oc, "confidential-guest-support", + "Set confidential guest scheme to support"); + + /* For compatibility */ + object_class_property_add_str(oc, "memory-encryption", + machine_get_memory_encryption, machine_set_memory_encryption); + object_class_property_set_description(oc, "memory-encryption", + "Set memory encryption object to use"); + + object_class_property_add_str(oc, "memory-backend", + machine_get_memdev, machine_set_memdev); + object_class_property_set_description(oc, "memory-backend", + "Set RAM backend" + "Valid value is ID of hostmem based backend"); +} + +static void machine_class_base_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + mc->max_cpus = mc->max_cpus ?: 1; + mc->min_cpus = mc->min_cpus ?: 1; + mc->default_cpus = mc->default_cpus ?: 1; + + if (!object_class_is_abstract(oc)) { + const char *cname = object_class_get_name(oc); + assert(g_str_has_suffix(cname, TYPE_MACHINE_SUFFIX)); + mc->name = g_strndup(cname, + strlen(cname) - strlen(TYPE_MACHINE_SUFFIX)); + mc->compat_props = g_ptr_array_new(); + } +} + +static void machine_initfn(Object *obj) +{ + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + + container_get(obj, "/peripheral"); + container_get(obj, "/peripheral-anon"); + + ms->dump_guest_core = true; + ms->mem_merge = true; + ms->enable_graphics = true; + ms->kernel_cmdline = g_strdup(""); + + if (mc->nvdimm_supported) { + Object *obj = OBJECT(ms); + + ms->nvdimms_state = g_new0(NVDIMMState, 1); + object_property_add_bool(obj, "nvdimm", + machine_get_nvdimm, machine_set_nvdimm); + object_property_set_description(obj, "nvdimm", + "Set on/off to enable/disable " + "NVDIMM instantiation"); + + object_property_add_str(obj, "nvdimm-persistence", + machine_get_nvdimm_persistence, + machine_set_nvdimm_persistence); + object_property_set_description(obj, "nvdimm-persistence", + "Set NVDIMM persistence" + "Valid values are cpu, mem-ctrl"); + } + + if (mc->cpu_index_to_instance_props && mc->get_default_cpu_node_id) { + ms->numa_state = g_new0(NumaState, 1); + object_property_add_bool(obj, "hmat", + machine_get_hmat, machine_set_hmat); + object_property_set_description(obj, "hmat", + "Set on/off to enable/disable " + "ACPI Heterogeneous Memory Attribute " + "Table (HMAT)"); + } + + /* default to mc->default_cpus */ + ms->smp.cpus = mc->default_cpus; + ms->smp.max_cpus = mc->default_cpus; + ms->smp.sockets = 1; + ms->smp.dies = 1; + ms->smp.cores = 1; + ms->smp.threads = 1; +} + +static void machine_finalize(Object *obj) +{ + MachineState *ms = MACHINE(obj); + + g_free(ms->kernel_filename); + g_free(ms->initrd_filename); + g_free(ms->kernel_cmdline); + g_free(ms->dtb); + g_free(ms->dumpdtb); + g_free(ms->dt_compatible); + g_free(ms->firmware); + g_free(ms->device_memory); + g_free(ms->nvdimms_state); + g_free(ms->numa_state); +} + +bool machine_usb(MachineState *machine) +{ + return machine->usb; +} + +int machine_phandle_start(MachineState *machine) +{ + return machine->phandle_start; +} + +bool machine_dump_guest_core(MachineState *machine) +{ + return machine->dump_guest_core; +} + +bool machine_mem_merge(MachineState *machine) +{ + return machine->mem_merge; +} + +static char *cpu_slot_to_string(const CPUArchId *cpu) +{ + GString *s = g_string_new(NULL); + if (cpu->props.has_socket_id) { + g_string_append_printf(s, "socket-id: %"PRId64, cpu->props.socket_id); + } + if (cpu->props.has_die_id) { + if (s->len) { + g_string_append_printf(s, ", "); + } + g_string_append_printf(s, "die-id: %"PRId64, cpu->props.die_id); + } + if (cpu->props.has_core_id) { + if (s->len) { + g_string_append_printf(s, ", "); + } + g_string_append_printf(s, "core-id: %"PRId64, cpu->props.core_id); + } + if (cpu->props.has_thread_id) { + if (s->len) { + g_string_append_printf(s, ", "); + } + g_string_append_printf(s, "thread-id: %"PRId64, cpu->props.thread_id); + } + return g_string_free(s, false); +} + +static void numa_validate_initiator(NumaState *numa_state) +{ + int i; + NodeInfo *numa_info = numa_state->nodes; + + for (i = 0; i < numa_state->num_nodes; i++) { + if (numa_info[i].initiator == MAX_NODES) { + error_report("The initiator of NUMA node %d is missing, use " + "'-numa node,initiator' option to declare it", i); + exit(1); + } + + if (!numa_info[numa_info[i].initiator].present) { + error_report("NUMA node %" PRIu16 " is missing, use " + "'-numa node' option to declare it first", + numa_info[i].initiator); + exit(1); + } + + if (!numa_info[numa_info[i].initiator].has_cpu) { + error_report("The initiator of NUMA node %d is invalid", i); + exit(1); + } + } +} + +static void machine_numa_finish_cpu_init(MachineState *machine) +{ + int i; + bool default_mapping; + GString *s = g_string_new(NULL); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(machine); + + assert(machine->numa_state->num_nodes); + for (i = 0; i < possible_cpus->len; i++) { + if (possible_cpus->cpus[i].props.has_node_id) { + break; + } + } + default_mapping = (i == possible_cpus->len); + + for (i = 0; i < possible_cpus->len; i++) { + const CPUArchId *cpu_slot = &possible_cpus->cpus[i]; + + if (!cpu_slot->props.has_node_id) { + /* fetch default mapping from board and enable it */ + CpuInstanceProperties props = cpu_slot->props; + + props.node_id = mc->get_default_cpu_node_id(machine, i); + if (!default_mapping) { + /* record slots with not set mapping, + * TODO: make it hard error in future */ + char *cpu_str = cpu_slot_to_string(cpu_slot); + g_string_append_printf(s, "%sCPU %d [%s]", + s->len ? ", " : "", i, cpu_str); + g_free(cpu_str); + + /* non mapped cpus used to fallback to node 0 */ + props.node_id = 0; + } + + props.has_node_id = true; + machine_set_cpu_numa_node(machine, &props, &error_fatal); + } + } + + if (machine->numa_state->hmat_enabled) { + numa_validate_initiator(machine->numa_state); + } + + if (s->len && !qtest_enabled()) { + warn_report("CPU(s) not present in any NUMA nodes: %s", + s->str); + warn_report("All CPU(s) up to maxcpus should be described " + "in NUMA config, ability to start up with partial NUMA " + "mappings is obsoleted and will be removed in future"); + } + g_string_free(s, true); +} + +MemoryRegion *machine_consume_memdev(MachineState *machine, + HostMemoryBackend *backend) +{ + MemoryRegion *ret = host_memory_backend_get_memory(backend); + + if (memory_region_is_mapped(ret)) { + error_report("memory backend %s can't be used multiple times.", + object_get_canonical_path_component(OBJECT(backend))); + exit(EXIT_FAILURE); + } + host_memory_backend_set_mapped(backend, true); + vmstate_register_ram_global(ret); + return ret; +} + +void machine_run_board_init(MachineState *machine) +{ + MachineClass *machine_class = MACHINE_GET_CLASS(machine); + ObjectClass *oc = object_class_by_name(machine->cpu_type); + CPUClass *cc; + + /* This checkpoint is required by replay to separate prior clock + reading from the other reads, because timer polling functions query + clock values from the log. */ + replay_checkpoint(CHECKPOINT_INIT); + + if (machine->ram_memdev_id) { + Object *o; + o = object_resolve_path_type(machine->ram_memdev_id, + TYPE_MEMORY_BACKEND, NULL); + machine->ram = machine_consume_memdev(machine, MEMORY_BACKEND(o)); + } + + if (machine->numa_state) { + numa_complete_configuration(machine); + if (machine->numa_state->num_nodes) { + machine_numa_finish_cpu_init(machine); + } + } + + /* If the machine supports the valid_cpu_types check and the user + * specified a CPU with -cpu check here that the user CPU is supported. + */ + if (machine_class->valid_cpu_types && machine->cpu_type) { + int i; + + for (i = 0; machine_class->valid_cpu_types[i]; i++) { + if (object_class_dynamic_cast(oc, + machine_class->valid_cpu_types[i])) { + /* The user specificed CPU is in the valid field, we are + * good to go. + */ + break; + } + } + + if (!machine_class->valid_cpu_types[i]) { + /* The user specified CPU is not valid */ + error_report("Invalid CPU type: %s", machine->cpu_type); + error_printf("The valid types are: %s", + machine_class->valid_cpu_types[0]); + for (i = 1; machine_class->valid_cpu_types[i]; i++) { + error_printf(", %s", machine_class->valid_cpu_types[i]); + } + error_printf("\n"); + + exit(1); + } + } + + /* Check if CPU type is deprecated and warn if so */ + cc = CPU_CLASS(oc); + if (cc && cc->deprecation_note) { + warn_report("CPU model %s is deprecated -- %s", machine->cpu_type, + cc->deprecation_note); + } + + if (machine->cgs) { + /* + * With confidential guests, the host can't see the real + * contents of RAM, so there's no point in it trying to merge + * areas. + */ + machine_set_mem_merge(OBJECT(machine), false, &error_abort); + + /* + * Virtio devices can't count on directly accessing guest + * memory, so they need iommu_platform=on to use normal DMA + * mechanisms. That requires also disabling legacy virtio + * support for those virtio pci devices which allow it. + */ + object_register_sugar_prop(TYPE_VIRTIO_PCI, "disable-legacy", + "on", true); + object_register_sugar_prop(TYPE_VIRTIO_DEVICE, "iommu_platform", + "on", false); + } + + accel_init_interfaces(ACCEL_GET_CLASS(machine->accelerator)); + machine_class->init(machine); + phase_advance(PHASE_MACHINE_INITIALIZED); +} + +static NotifierList machine_init_done_notifiers = + NOTIFIER_LIST_INITIALIZER(machine_init_done_notifiers); + +void qemu_add_machine_init_done_notifier(Notifier *notify) +{ + notifier_list_add(&machine_init_done_notifiers, notify); + if (phase_check(PHASE_MACHINE_READY)) { + notify->notify(notify, NULL); + } +} + +void qemu_remove_machine_init_done_notifier(Notifier *notify) +{ + notifier_remove(notify); +} + +void qdev_machine_creation_done(void) +{ + cpu_synchronize_all_post_init(); + + if (current_machine->boot_once) { + qemu_boot_set(current_machine->boot_once, &error_fatal); + qemu_register_reset(restore_boot_order, g_strdup(current_machine->boot_order)); + } + + /* + * ok, initial machine setup is done, starting from now we can + * only create hotpluggable devices + */ + phase_advance(PHASE_MACHINE_READY); + qdev_assert_realized_properly(); + + /* TODO: once all bus devices are qdevified, this should be done + * when bus is created by qdev.c */ + /* + * TODO: If we had a main 'reset container' that the whole system + * lived in, we could reset that using the multi-phase reset + * APIs. For the moment, we just reset the sysbus, which will cause + * all devices hanging off it (and all their child buses, recursively) + * to be reset. Note that this will *not* reset any Device objects + * which are not attached to some part of the qbus tree! + */ + qemu_register_reset(resettable_cold_reset_fn, sysbus_get_default()); + + notifier_list_notify(&machine_init_done_notifiers, NULL); + + if (rom_check_and_register_reset() != 0) { + exit(1); + } + + replay_start(); + + /* This checkpoint is required by replay to separate prior clock + reading from the other reads, because timer polling functions query + clock values from the log. */ + replay_checkpoint(CHECKPOINT_RESET); + qemu_system_reset(SHUTDOWN_CAUSE_NONE); + register_global_state(); +} + +static const TypeInfo machine_info = { + .name = TYPE_MACHINE, + .parent = TYPE_OBJECT, + .abstract = true, + .class_size = sizeof(MachineClass), + .class_init = machine_class_init, + .class_base_init = machine_class_base_init, + .instance_size = sizeof(MachineState), + .instance_init = machine_initfn, + .instance_finalize = machine_finalize, +}; + +static void machine_register_types(void) +{ + type_register_static(&machine_info); +} + +type_init(machine_register_types) diff --git a/hw/core/meson.build b/hw/core/meson.build new file mode 100644 index 000000000..0f884d6fd --- /dev/null +++ b/hw/core/meson.build @@ -0,0 +1,56 @@ +# core qdev-related obj files, also used by *-user and unit tests +hwcore_ss.add(files( + 'bus.c', + 'qdev-properties.c', + 'qdev.c', + 'reset.c', + 'resettable.c', + 'vmstate-if.c', + # irq.c needed for qdev GPIO handling: + 'irq.c', + 'clock.c', + 'qdev-clock.c', +)) +if have_system + hwcore_ss.add(files( + 'hotplug.c', + 'qdev-hotplug.c', + )) +else + hwcore_ss.add(files( + 'hotplug-stubs.c', + )) +endif + +common_ss.add(files('cpu-common.c')) +common_ss.add(files('machine-smp.c')) +softmmu_ss.add(when: 'CONFIG_FITLOADER', if_true: files('loader-fit.c')) +softmmu_ss.add(when: 'CONFIG_GENERIC_LOADER', if_true: files('generic-loader.c')) +softmmu_ss.add(when: ['CONFIG_GUEST_LOADER', fdt], if_true: files('guest-loader.c')) +softmmu_ss.add(when: 'CONFIG_OR_IRQ', if_true: files('or-irq.c')) +softmmu_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('platform-bus.c')) +softmmu_ss.add(when: 'CONFIG_PTIMER', if_true: files('ptimer.c')) +softmmu_ss.add(when: 'CONFIG_REGISTER', if_true: files('register.c')) +softmmu_ss.add(when: 'CONFIG_SPLIT_IRQ', if_true: files('split-irq.c')) +softmmu_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('stream.c')) + +softmmu_ss.add(files( + 'cpu-sysemu.c', + 'fw-path-provider.c', + 'gpio.c', + 'loader.c', + 'machine-hmp-cmds.c', + 'machine.c', + 'nmi.c', + 'null-machine.c', + 'qdev-fw.c', + 'qdev-properties-system.c', + 'sysbus.c', + 'vm-change-state-handler.c', + 'clock-vmstate.c', +)) + +specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: files( + 'machine-qmp-cmds.c', + 'numa.c', +)) diff --git a/hw/core/nmi.c b/hw/core/nmi.c new file mode 100644 index 000000000..481c4b3c7 --- /dev/null +++ b/hw/core/nmi.c @@ -0,0 +1,88 @@ +/* + * NMI monitor handler class and helpers. + * + * Copyright IBM Corp., 2014 + * + * Author: Alexey Kardashevskiy + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/nmi.h" +#include "qapi/error.h" +#include "qapi/qmp/qerror.h" +#include "qemu/module.h" +#include "monitor/monitor.h" + +struct do_nmi_s { + int cpu_index; + Error *err; + bool handled; +}; + +static void nmi_children(Object *o, struct do_nmi_s *ns); + +static int do_nmi(Object *o, void *opaque) +{ + struct do_nmi_s *ns = opaque; + NMIState *n = (NMIState *) object_dynamic_cast(o, TYPE_NMI); + + if (n) { + NMIClass *nc = NMI_GET_CLASS(n); + + ns->handled = true; + nc->nmi_monitor_handler(n, ns->cpu_index, &ns->err); + if (ns->err) { + return -1; + } + } + nmi_children(o, ns); + + return 0; +} + +static void nmi_children(Object *o, struct do_nmi_s *ns) +{ + object_child_foreach(o, do_nmi, ns); +} + +void nmi_monitor_handle(int cpu_index, Error **errp) +{ + struct do_nmi_s ns = { + .cpu_index = cpu_index, + .err = NULL, + .handled = false + }; + + nmi_children(object_get_root(), &ns); + if (ns.handled) { + error_propagate(errp, ns.err); + } else { + error_setg(errp, QERR_UNSUPPORTED); + } +} + +static const TypeInfo nmi_info = { + .name = TYPE_NMI, + .parent = TYPE_INTERFACE, + .class_size = sizeof(NMIClass), +}; + +static void nmi_register_types(void) +{ + type_register_static(&nmi_info); +} + +type_init(nmi_register_types) diff --git a/hw/core/null-machine.c b/hw/core/null-machine.c new file mode 100644 index 000000000..f586a4bef --- /dev/null +++ b/hw/core/null-machine.c @@ -0,0 +1,59 @@ +/* + * Empty machine + * + * Copyright IBM, Corp. 2012 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "hw/boards.h" +#include "exec/address-spaces.h" +#include "hw/core/cpu.h" + +static void machine_none_init(MachineState *mch) +{ + CPUState *cpu = NULL; + + /* Initialize CPU (if user asked for it) */ + if (mch->cpu_type) { + cpu = cpu_create(mch->cpu_type); + if (!cpu) { + error_report("Unable to initialize CPU"); + exit(1); + } + } + + /* RAM at address zero */ + if (mch->ram) { + memory_region_add_subregion(get_system_memory(), 0, mch->ram); + } + + if (mch->kernel_filename) { + error_report("The -kernel parameter is not supported " + "(use the generic 'loader' device instead)."); + exit(1); + } +} + +static void machine_none_machine_init(MachineClass *mc) +{ + mc->desc = "empty machine"; + mc->init = machine_none_init; + mc->max_cpus = 1; + mc->default_ram_size = 0; + mc->default_ram_id = "ram"; + mc->no_serial = 1; + mc->no_parallel = 1; + mc->no_floppy = 1; + mc->no_cdrom = 1; + mc->no_sdcard = 1; +} + +DEFINE_MACHINE("none", machine_none_machine_init) diff --git a/hw/core/numa.c b/hw/core/numa.c new file mode 100644 index 000000000..e6050b227 --- /dev/null +++ b/hw/core/numa.c @@ -0,0 +1,872 @@ +/* + * NUMA parameter parsing routines + * + * Copyright (c) 2014 Fujitsu Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "sysemu/hostmem.h" +#include "sysemu/numa.h" +#include "exec/cpu-common.h" +#include "exec/ramlist.h" +#include "qemu/bitmap.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/opts-visitor.h" +#include "qapi/qapi-visit-machine.h" +#include "sysemu/qtest.h" +#include "hw/core/cpu.h" +#include "hw/mem/pc-dimm.h" +#include "migration/vmstate.h" +#include "hw/boards.h" +#include "hw/mem/memory-device.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "qemu/cutils.h" + +QemuOptsList qemu_numa_opts = { + .name = "numa", + .implied_opt_name = "type", + .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head), + .desc = { { 0 } } /* validated with OptsVisitor */ +}; + +static int have_memdevs; +bool numa_uses_legacy_mem(void) +{ + return !have_memdevs; +} + +static int have_mem; +static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one. + * For all nodes, nodeid < max_numa_nodeid + */ + +static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, + Error **errp) +{ + Error *err = NULL; + uint16_t nodenr; + uint16List *cpus = NULL; + MachineClass *mc = MACHINE_GET_CLASS(ms); + unsigned int max_cpus = ms->smp.max_cpus; + NodeInfo *numa_info = ms->numa_state->nodes; + + if (node->has_nodeid) { + nodenr = node->nodeid; + } else { + nodenr = ms->numa_state->num_nodes; + } + + if (nodenr >= MAX_NODES) { + error_setg(errp, "Max number of NUMA nodes reached: %" + PRIu16 "", nodenr); + return; + } + + if (numa_info[nodenr].present) { + error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr); + return; + } + + /* + * If not set the initiator, set it to MAX_NODES. And if + * HMAT is enabled and this node has no cpus, QEMU will raise error. + */ + numa_info[nodenr].initiator = MAX_NODES; + if (node->has_initiator) { + if (!ms->numa_state->hmat_enabled) { + error_setg(errp, "ACPI Heterogeneous Memory Attribute Table " + "(HMAT) is disabled, enable it with -machine hmat=on " + "before using any of hmat specific options"); + return; + } + + if (node->initiator >= MAX_NODES) { + error_report("The initiator id %" PRIu16 " expects an integer " + "between 0 and %d", node->initiator, + MAX_NODES - 1); + return; + } + + numa_info[nodenr].initiator = node->initiator; + } + + for (cpus = node->cpus; cpus; cpus = cpus->next) { + CpuInstanceProperties props; + if (cpus->value >= max_cpus) { + error_setg(errp, + "CPU index (%" PRIu16 ")" + " should be smaller than maxcpus (%d)", + cpus->value, max_cpus); + return; + } + props = mc->cpu_index_to_instance_props(ms, cpus->value); + props.node_id = nodenr; + props.has_node_id = true; + machine_set_cpu_numa_node(ms, &props, &err); + if (err) { + error_propagate(errp, err); + return; + } + } + + have_memdevs = have_memdevs ? : node->has_memdev; + have_mem = have_mem ? : node->has_mem; + if ((node->has_mem && have_memdevs) || (node->has_memdev && have_mem)) { + error_setg(errp, "numa configuration should use either mem= or memdev=," + "mixing both is not allowed"); + return; + } + + if (node->has_mem) { + if (!mc->numa_mem_supported) { + error_setg(errp, "Parameter -numa node,mem is not supported by this" + " machine type"); + error_append_hint(errp, "Use -numa node,memdev instead\n"); + return; + } + + numa_info[nodenr].node_mem = node->mem; + if (!qtest_enabled()) { + warn_report("Parameter -numa node,mem is deprecated," + " use -numa node,memdev instead"); + } + } + if (node->has_memdev) { + Object *o; + o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); + if (!o) { + error_setg(errp, "memdev=%s is ambiguous", node->memdev); + return; + } + + object_ref(o); + numa_info[nodenr].node_mem = object_property_get_uint(o, "size", NULL); + numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); + } + + numa_info[nodenr].present = true; + max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); + ms->numa_state->num_nodes++; +} + +static +void parse_numa_distance(MachineState *ms, NumaDistOptions *dist, Error **errp) +{ + uint16_t src = dist->src; + uint16_t dst = dist->dst; + uint8_t val = dist->val; + NodeInfo *numa_info = ms->numa_state->nodes; + + if (src >= MAX_NODES || dst >= MAX_NODES) { + error_setg(errp, "Parameter '%s' expects an integer between 0 and %d", + src >= MAX_NODES ? "src" : "dst", MAX_NODES - 1); + return; + } + + if (!numa_info[src].present || !numa_info[dst].present) { + error_setg(errp, "Source/Destination NUMA node is missing. " + "Please use '-numa node' option to declare it first."); + return; + } + + if (val < NUMA_DISTANCE_MIN) { + error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, " + "it shouldn't be less than %d.", + val, NUMA_DISTANCE_MIN); + return; + } + + if (src == dst && val != NUMA_DISTANCE_MIN) { + error_setg(errp, "Local distance of node %d should be %d.", + src, NUMA_DISTANCE_MIN); + return; + } + + numa_info[src].distance[dst] = val; + ms->numa_state->have_numa_distance = true; +} + +void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node, + Error **errp) +{ + int i, first_bit, last_bit; + uint64_t max_entry, temp_base, bitmap_copy; + NodeInfo *numa_info = numa_state->nodes; + HMAT_LB_Info *hmat_lb = + numa_state->hmat_lb[node->hierarchy][node->data_type]; + HMAT_LB_Data lb_data = {}; + HMAT_LB_Data *lb_temp; + + /* Error checking */ + if (node->initiator > numa_state->num_nodes) { + error_setg(errp, "Invalid initiator=%d, it should be less than %d", + node->initiator, numa_state->num_nodes); + return; + } + if (node->target > numa_state->num_nodes) { + error_setg(errp, "Invalid target=%d, it should be less than %d", + node->target, numa_state->num_nodes); + return; + } + if (!numa_info[node->initiator].has_cpu) { + error_setg(errp, "Invalid initiator=%d, it isn't an " + "initiator proximity domain", node->initiator); + return; + } + if (!numa_info[node->target].present) { + error_setg(errp, "The target=%d should point to an existing node", + node->target); + return; + } + + if (!hmat_lb) { + hmat_lb = g_malloc0(sizeof(*hmat_lb)); + numa_state->hmat_lb[node->hierarchy][node->data_type] = hmat_lb; + hmat_lb->list = g_array_new(false, true, sizeof(HMAT_LB_Data)); + } + hmat_lb->hierarchy = node->hierarchy; + hmat_lb->data_type = node->data_type; + lb_data.initiator = node->initiator; + lb_data.target = node->target; + + if (node->data_type <= HMATLB_DATA_TYPE_WRITE_LATENCY) { + /* Input latency data */ + + if (!node->has_latency) { + error_setg(errp, "Missing 'latency' option"); + return; + } + if (node->has_bandwidth) { + error_setg(errp, "Invalid option 'bandwidth' since " + "the data type is latency"); + return; + } + + /* Detect duplicate configuration */ + for (i = 0; i < hmat_lb->list->len; i++) { + lb_temp = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); + + if (node->initiator == lb_temp->initiator && + node->target == lb_temp->target) { + error_setg(errp, "Duplicate configuration of the latency for " + "initiator=%d and target=%d", node->initiator, + node->target); + return; + } + } + + hmat_lb->base = hmat_lb->base ? hmat_lb->base : UINT64_MAX; + + if (node->latency) { + /* Calculate the temporary base and compressed latency */ + max_entry = node->latency; + temp_base = 1; + while (QEMU_IS_ALIGNED(max_entry, 10)) { + max_entry /= 10; + temp_base *= 10; + } + + /* Calculate the max compressed latency */ + temp_base = MIN(hmat_lb->base, temp_base); + max_entry = node->latency / hmat_lb->base; + max_entry = MAX(hmat_lb->range_bitmap, max_entry); + + /* + * For latency hmat_lb->range_bitmap record the max compressed + * latency which should be less than 0xFFFF (UINT16_MAX) + */ + if (max_entry >= UINT16_MAX) { + error_setg(errp, "Latency %" PRIu64 " between initiator=%d and " + "target=%d should not differ from previously entered " + "min or max values on more than %d", node->latency, + node->initiator, node->target, UINT16_MAX - 1); + return; + } else { + hmat_lb->base = temp_base; + hmat_lb->range_bitmap = max_entry; + } + + /* + * Set lb_info_provided bit 0 as 1, + * latency information is provided + */ + numa_info[node->target].lb_info_provided |= BIT(0); + } + lb_data.data = node->latency; + } else if (node->data_type >= HMATLB_DATA_TYPE_ACCESS_BANDWIDTH) { + /* Input bandwidth data */ + if (!node->has_bandwidth) { + error_setg(errp, "Missing 'bandwidth' option"); + return; + } + if (node->has_latency) { + error_setg(errp, "Invalid option 'latency' since " + "the data type is bandwidth"); + return; + } + if (!QEMU_IS_ALIGNED(node->bandwidth, MiB)) { + error_setg(errp, "Bandwidth %" PRIu64 " between initiator=%d and " + "target=%d should be 1MB aligned", node->bandwidth, + node->initiator, node->target); + return; + } + + /* Detect duplicate configuration */ + for (i = 0; i < hmat_lb->list->len; i++) { + lb_temp = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); + + if (node->initiator == lb_temp->initiator && + node->target == lb_temp->target) { + error_setg(errp, "Duplicate configuration of the bandwidth for " + "initiator=%d and target=%d", node->initiator, + node->target); + return; + } + } + + hmat_lb->base = hmat_lb->base ? hmat_lb->base : 1; + + if (node->bandwidth) { + /* Keep bitmap unchanged when bandwidth out of range */ + bitmap_copy = hmat_lb->range_bitmap; + bitmap_copy |= node->bandwidth; + first_bit = ctz64(bitmap_copy); + temp_base = UINT64_C(1) << first_bit; + max_entry = node->bandwidth / temp_base; + last_bit = 64 - clz64(bitmap_copy); + + /* + * For bandwidth, first_bit record the base unit of bandwidth bits, + * last_bit record the last bit of the max bandwidth. The max + * compressed bandwidth should be less than 0xFFFF (UINT16_MAX) + */ + if ((last_bit - first_bit) > UINT16_BITS || + max_entry >= UINT16_MAX) { + error_setg(errp, "Bandwidth %" PRIu64 " between initiator=%d " + "and target=%d should not differ from previously " + "entered values on more than %d", node->bandwidth, + node->initiator, node->target, UINT16_MAX - 1); + return; + } else { + hmat_lb->base = temp_base; + hmat_lb->range_bitmap = bitmap_copy; + } + + /* + * Set lb_info_provided bit 1 as 1, + * bandwidth information is provided + */ + numa_info[node->target].lb_info_provided |= BIT(1); + } + lb_data.data = node->bandwidth; + } else { + assert(0); + } + + g_array_append_val(hmat_lb->list, lb_data); +} + +void parse_numa_hmat_cache(MachineState *ms, NumaHmatCacheOptions *node, + Error **errp) +{ + int nb_numa_nodes = ms->numa_state->num_nodes; + NodeInfo *numa_info = ms->numa_state->nodes; + NumaHmatCacheOptions *hmat_cache = NULL; + + if (node->node_id >= nb_numa_nodes) { + error_setg(errp, "Invalid node-id=%" PRIu32 ", it should be less " + "than %d", node->node_id, nb_numa_nodes); + return; + } + + if (numa_info[node->node_id].lb_info_provided != (BIT(0) | BIT(1))) { + error_setg(errp, "The latency and bandwidth information of " + "node-id=%" PRIu32 " should be provided before memory side " + "cache attributes", node->node_id); + return; + } + + if (node->level < 1 || node->level >= HMAT_LB_LEVELS) { + error_setg(errp, "Invalid level=%" PRIu8 ", it should be larger than 0 " + "and less than or equal to %d", node->level, + HMAT_LB_LEVELS - 1); + return; + } + + assert(node->associativity < HMAT_CACHE_ASSOCIATIVITY__MAX); + assert(node->policy < HMAT_CACHE_WRITE_POLICY__MAX); + if (ms->numa_state->hmat_cache[node->node_id][node->level]) { + error_setg(errp, "Duplicate configuration of the side cache for " + "node-id=%" PRIu32 " and level=%" PRIu8, + node->node_id, node->level); + return; + } + + if ((node->level > 1) && + ms->numa_state->hmat_cache[node->node_id][node->level - 1] == NULL) { + error_setg(errp, "Cache level=%u shall be defined first", + node->level - 1); + return; + } + + if ((node->level > 1) && + (node->size <= + ms->numa_state->hmat_cache[node->node_id][node->level - 1]->size)) { + error_setg(errp, "Invalid size=%" PRIu64 ", the size of level=%" PRIu8 + " should be larger than the size(%" PRIu64 ") of " + "level=%u", node->size, node->level, + ms->numa_state->hmat_cache[node->node_id] + [node->level - 1]->size, + node->level - 1); + return; + } + + if ((node->level < HMAT_LB_LEVELS - 1) && + ms->numa_state->hmat_cache[node->node_id][node->level + 1] && + (node->size >= + ms->numa_state->hmat_cache[node->node_id][node->level + 1]->size)) { + error_setg(errp, "Invalid size=%" PRIu64 ", the size of level=%" PRIu8 + " should be less than the size(%" PRIu64 ") of " + "level=%u", node->size, node->level, + ms->numa_state->hmat_cache[node->node_id] + [node->level + 1]->size, + node->level + 1); + return; + } + + hmat_cache = g_malloc0(sizeof(*hmat_cache)); + memcpy(hmat_cache, node, sizeof(*hmat_cache)); + ms->numa_state->hmat_cache[node->node_id][node->level] = hmat_cache; +} + +void set_numa_options(MachineState *ms, NumaOptions *object, Error **errp) +{ + if (!ms->numa_state) { + error_setg(errp, "NUMA is not supported by this machine-type"); + return; + } + + switch (object->type) { + case NUMA_OPTIONS_TYPE_NODE: + parse_numa_node(ms, &object->u.node, errp); + break; + case NUMA_OPTIONS_TYPE_DIST: + parse_numa_distance(ms, &object->u.dist, errp); + break; + case NUMA_OPTIONS_TYPE_CPU: + if (!object->u.cpu.has_node_id) { + error_setg(errp, "Missing mandatory node-id property"); + return; + } + if (!ms->numa_state->nodes[object->u.cpu.node_id].present) { + error_setg(errp, "Invalid node-id=%" PRId64 ", NUMA node must be " + "defined with -numa node,nodeid=ID before it's used with " + "-numa cpu,node-id=ID", object->u.cpu.node_id); + return; + } + + machine_set_cpu_numa_node(ms, + qapi_NumaCpuOptions_base(&object->u.cpu), + errp); + break; + case NUMA_OPTIONS_TYPE_HMAT_LB: + if (!ms->numa_state->hmat_enabled) { + error_setg(errp, "ACPI Heterogeneous Memory Attribute Table " + "(HMAT) is disabled, enable it with -machine hmat=on " + "before using any of hmat specific options"); + return; + } + + parse_numa_hmat_lb(ms->numa_state, &object->u.hmat_lb, errp); + break; + case NUMA_OPTIONS_TYPE_HMAT_CACHE: + if (!ms->numa_state->hmat_enabled) { + error_setg(errp, "ACPI Heterogeneous Memory Attribute Table " + "(HMAT) is disabled, enable it with -machine hmat=on " + "before using any of hmat specific options"); + return; + } + + parse_numa_hmat_cache(ms, &object->u.hmat_cache, errp); + break; + default: + abort(); + } +} + +static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) +{ + NumaOptions *object = NULL; + MachineState *ms = MACHINE(opaque); + Error *err = NULL; + Visitor *v = opts_visitor_new(opts); + + visit_type_NumaOptions(v, NULL, &object, errp); + visit_free(v); + if (!object) { + return -1; + } + + /* Fix up legacy suffix-less format */ + if ((object->type == NUMA_OPTIONS_TYPE_NODE) && object->u.node.has_mem) { + const char *mem_str = qemu_opt_get(opts, "mem"); + qemu_strtosz_MiB(mem_str, NULL, &object->u.node.mem); + } + + set_numa_options(ms, object, &err); + + qapi_free_NumaOptions(object); + if (err) { + error_propagate(errp, err); + return -1; + } + + return 0; +} + +/* If all node pair distances are symmetric, then only distances + * in one direction are enough. If there is even one asymmetric + * pair, though, then all distances must be provided. The + * distance from a node to itself is always NUMA_DISTANCE_MIN, + * so providing it is never necessary. + */ +static void validate_numa_distance(MachineState *ms) +{ + int src, dst; + bool is_asymmetrical = false; + int nb_numa_nodes = ms->numa_state->num_nodes; + NodeInfo *numa_info = ms->numa_state->nodes; + + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = src; dst < nb_numa_nodes; dst++) { + if (numa_info[src].distance[dst] == 0 && + numa_info[dst].distance[src] == 0) { + if (src != dst) { + error_report("The distance between node %d and %d is " + "missing, at least one distance value " + "between each nodes should be provided.", + src, dst); + exit(EXIT_FAILURE); + } + } + + if (numa_info[src].distance[dst] != 0 && + numa_info[dst].distance[src] != 0 && + numa_info[src].distance[dst] != + numa_info[dst].distance[src]) { + is_asymmetrical = true; + } + } + } + + if (is_asymmetrical) { + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = 0; dst < nb_numa_nodes; dst++) { + if (src != dst && numa_info[src].distance[dst] == 0) { + error_report("At least one asymmetrical pair of " + "distances is given, please provide distances " + "for both directions of all node pairs."); + exit(EXIT_FAILURE); + } + } + } + } +} + +static void complete_init_numa_distance(MachineState *ms) +{ + int src, dst; + NodeInfo *numa_info = ms->numa_state->nodes; + + /* Fixup NUMA distance by symmetric policy because if it is an + * asymmetric distance table, it should be a complete table and + * there would not be any missing distance except local node, which + * is verified by validate_numa_distance above. + */ + for (src = 0; src < ms->numa_state->num_nodes; src++) { + for (dst = 0; dst < ms->numa_state->num_nodes; dst++) { + if (numa_info[src].distance[dst] == 0) { + if (src == dst) { + numa_info[src].distance[dst] = NUMA_DISTANCE_MIN; + } else { + numa_info[src].distance[dst] = numa_info[dst].distance[src]; + } + } + } + } +} + +static void numa_init_memdev_container(MachineState *ms, MemoryRegion *ram) +{ + int i; + uint64_t addr = 0; + + for (i = 0; i < ms->numa_state->num_nodes; i++) { + uint64_t size = ms->numa_state->nodes[i].node_mem; + HostMemoryBackend *backend = ms->numa_state->nodes[i].node_memdev; + if (!backend) { + continue; + } + MemoryRegion *seg = machine_consume_memdev(ms, backend); + memory_region_add_subregion(ram, addr, seg); + addr += size; + } +} + +void numa_complete_configuration(MachineState *ms) +{ + int i; + MachineClass *mc = MACHINE_GET_CLASS(ms); + NodeInfo *numa_info = ms->numa_state->nodes; + + /* + * If memory hotplug is enabled (slot > 0) or memory devices are enabled + * (ms->maxram_size > ms->ram_size) but without '-numa' options explicitly on + * CLI, guests will break. + * + * Windows: won't enable memory hotplug without SRAT table at all + * + * Linux: if QEMU is started with initial memory all below 4Gb + * and no SRAT table present, guest kernel will use nommu DMA ops, + * which breaks 32bit hw drivers when memory is hotplugged and + * guest tries to use it with that drivers. + * + * Enable NUMA implicitly by adding a new NUMA node automatically. + * + * Or if MachineClass::auto_enable_numa is true and no NUMA nodes, + * assume there is just one node with whole RAM. + */ + if (ms->numa_state->num_nodes == 0 && + ((ms->ram_slots && mc->auto_enable_numa_with_memhp) || + (ms->maxram_size > ms->ram_size && mc->auto_enable_numa_with_memdev) || + mc->auto_enable_numa)) { + NumaNodeOptions node = { }; + parse_numa_node(ms, &node, &error_abort); + numa_info[0].node_mem = ms->ram_size; + } + + assert(max_numa_nodeid <= MAX_NODES); + + /* No support for sparse NUMA node IDs yet: */ + for (i = max_numa_nodeid - 1; i >= 0; i--) { + /* Report large node IDs first, to make mistakes easier to spot */ + if (!numa_info[i].present) { + error_report("numa: Node ID missing: %d", i); + exit(1); + } + } + + /* This must be always true if all nodes are present: */ + assert(ms->numa_state->num_nodes == max_numa_nodeid); + + if (ms->numa_state->num_nodes > 0) { + uint64_t numa_total; + + numa_total = 0; + for (i = 0; i < ms->numa_state->num_nodes; i++) { + numa_total += numa_info[i].node_mem; + } + if (numa_total != ms->ram_size) { + error_report("total memory for NUMA nodes (0x%" PRIx64 ")" + " should equal RAM size (0x" RAM_ADDR_FMT ")", + numa_total, ms->ram_size); + exit(1); + } + + if (!numa_uses_legacy_mem() && mc->default_ram_id) { + if (ms->ram_memdev_id) { + error_report("'-machine memory-backend' and '-numa memdev'" + " properties are mutually exclusive"); + exit(1); + } + ms->ram = g_new(MemoryRegion, 1); + memory_region_init(ms->ram, OBJECT(ms), mc->default_ram_id, + ms->ram_size); + numa_init_memdev_container(ms, ms->ram); + } + /* QEMU needs at least all unique node pair distances to build + * the whole NUMA distance table. QEMU treats the distance table + * as symmetric by default, i.e. distance A->B == distance B->A. + * Thus, QEMU is able to complete the distance table + * initialization even though only distance A->B is provided and + * distance B->A is not. QEMU knows the distance of a node to + * itself is always 10, so A->A distances may be omitted. When + * the distances of two nodes of a pair differ, i.e. distance + * A->B != distance B->A, then that means the distance table is + * asymmetric. In this case, the distances for both directions + * of all node pairs are required. + */ + if (ms->numa_state->have_numa_distance) { + /* Validate enough NUMA distance information was provided. */ + validate_numa_distance(ms); + + /* Validation succeeded, now fill in any missing distances. */ + complete_init_numa_distance(ms); + } + } +} + +void parse_numa_opts(MachineState *ms) +{ + qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, &error_fatal); +} + +void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp) +{ + int node_id = object_property_get_int(OBJECT(dev), "node-id", &error_abort); + + if (node_id == CPU_UNSET_NUMA_NODE_ID) { + /* due to bug in libvirt, it doesn't pass node-id from props on + * device_add as expected, so we have to fix it up here */ + if (slot->props.has_node_id) { + object_property_set_int(OBJECT(dev), "node-id", + slot->props.node_id, errp); + } + } else if (node_id != slot->props.node_id) { + error_setg(errp, "invalid node-id, must be %"PRId64, + slot->props.node_id); + } +} + +static void numa_stat_memory_devices(NumaNodeMem node_mem[]) +{ + MemoryDeviceInfoList *info_list = qmp_memory_device_list(); + MemoryDeviceInfoList *info; + PCDIMMDeviceInfo *pcdimm_info; + VirtioPMEMDeviceInfo *vpi; + VirtioMEMDeviceInfo *vmi; + SgxEPCDeviceInfo *se; + + for (info = info_list; info; info = info->next) { + MemoryDeviceInfo *value = info->value; + + if (value) { + switch (value->type) { + case MEMORY_DEVICE_INFO_KIND_DIMM: + case MEMORY_DEVICE_INFO_KIND_NVDIMM: + pcdimm_info = value->type == MEMORY_DEVICE_INFO_KIND_DIMM ? + value->u.dimm.data : value->u.nvdimm.data; + node_mem[pcdimm_info->node].node_mem += pcdimm_info->size; + node_mem[pcdimm_info->node].node_plugged_mem += + pcdimm_info->size; + break; + case MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM: + vpi = value->u.virtio_pmem.data; + /* TODO: once we support numa, assign to right node */ + node_mem[0].node_mem += vpi->size; + node_mem[0].node_plugged_mem += vpi->size; + break; + case MEMORY_DEVICE_INFO_KIND_VIRTIO_MEM: + vmi = value->u.virtio_mem.data; + node_mem[vmi->node].node_mem += vmi->size; + node_mem[vmi->node].node_plugged_mem += vmi->size; + break; + case MEMORY_DEVICE_INFO_KIND_SGX_EPC: + se = value->u.sgx_epc.data; + /* TODO: once we support numa, assign to right node */ + node_mem[0].node_mem += se->size; + node_mem[0].node_plugged_mem += se->size; + break; + default: + g_assert_not_reached(); + } + } + } + qapi_free_MemoryDeviceInfoList(info_list); +} + +void query_numa_node_mem(NumaNodeMem node_mem[], MachineState *ms) +{ + int i; + + if (ms->numa_state == NULL || ms->numa_state->num_nodes <= 0) { + return; + } + + numa_stat_memory_devices(node_mem); + for (i = 0; i < ms->numa_state->num_nodes; i++) { + node_mem[i].node_mem += ms->numa_state->nodes[i].node_mem; + } +} + +static int ram_block_notify_add_single(RAMBlock *rb, void *opaque) +{ + const ram_addr_t max_size = qemu_ram_get_max_length(rb); + const ram_addr_t size = qemu_ram_get_used_length(rb); + void *host = qemu_ram_get_host_addr(rb); + RAMBlockNotifier *notifier = opaque; + + if (host) { + notifier->ram_block_added(notifier, host, size, max_size); + } + return 0; +} + +void ram_block_notifier_add(RAMBlockNotifier *n) +{ + QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next); + + /* Notify about all existing ram blocks. */ + if (n->ram_block_added) { + qemu_ram_foreach_block(ram_block_notify_add_single, n); + } +} + +void ram_block_notifier_remove(RAMBlockNotifier *n) +{ + QLIST_REMOVE(n, next); +} + +void ram_block_notify_add(void *host, size_t size, size_t max_size) +{ + RAMBlockNotifier *notifier; + + QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { + if (notifier->ram_block_added) { + notifier->ram_block_added(notifier, host, size, max_size); + } + } +} + +void ram_block_notify_remove(void *host, size_t size, size_t max_size) +{ + RAMBlockNotifier *notifier; + + QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { + if (notifier->ram_block_removed) { + notifier->ram_block_removed(notifier, host, size, max_size); + } + } +} + +void ram_block_notify_resize(void *host, size_t old_size, size_t new_size) +{ + RAMBlockNotifier *notifier; + + QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { + if (notifier->ram_block_resized) { + notifier->ram_block_resized(notifier, host, old_size, new_size); + } + } +} diff --git a/hw/core/or-irq.c b/hw/core/or-irq.c new file mode 100644 index 000000000..d8f3754e9 --- /dev/null +++ b/hw/core/or-irq.c @@ -0,0 +1,149 @@ +/* + * QEMU IRQ/GPIO common code. + * + * Copyright (c) 2016 Alistair Francis . + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/or-irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +static void or_irq_handler(void *opaque, int n, int level) +{ + qemu_or_irq *s = OR_IRQ(opaque); + int or_level = 0; + int i; + + s->levels[n] = level; + + for (i = 0; i < s->num_lines; i++) { + or_level |= s->levels[i]; + } + + qemu_set_irq(s->out_irq, or_level); +} + +static void or_irq_reset(DeviceState *dev) +{ + qemu_or_irq *s = OR_IRQ(dev); + int i; + + for (i = 0; i < MAX_OR_LINES; i++) { + s->levels[i] = false; + } +} + +static void or_irq_realize(DeviceState *dev, Error **errp) +{ + qemu_or_irq *s = OR_IRQ(dev); + + assert(s->num_lines <= MAX_OR_LINES); + + qdev_init_gpio_in(dev, or_irq_handler, s->num_lines); +} + +static void or_irq_init(Object *obj) +{ + qemu_or_irq *s = OR_IRQ(obj); + + qdev_init_gpio_out(DEVICE(obj), &s->out_irq, 1); +} + +/* The original version of this device had a fixed 16 entries in its + * VMState array; devices with more inputs than this need to + * migrate the extra lines via a subsection. + * The subsection migrates as much of the levels[] array as is needed + * (including repeating the first 16 elements), to avoid the awkwardness + * of splitting it in two to meet the requirements of VMSTATE_VARRAY_UINT16. + */ +#define OLD_MAX_OR_LINES 16 +#if MAX_OR_LINES < OLD_MAX_OR_LINES +#error MAX_OR_LINES must be at least 16 for migration compatibility +#endif + +static bool vmstate_extras_needed(void *opaque) +{ + qemu_or_irq *s = OR_IRQ(opaque); + + return s->num_lines >= OLD_MAX_OR_LINES; +} + +static const VMStateDescription vmstate_or_irq_extras = { + .name = "or-irq-extras", + .version_id = 1, + .minimum_version_id = 1, + .needed = vmstate_extras_needed, + .fields = (VMStateField[]) { + VMSTATE_VARRAY_UINT16_UNSAFE(levels, qemu_or_irq, num_lines, 0, + vmstate_info_bool, bool), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_or_irq = { + .name = TYPE_OR_IRQ, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL_SUB_ARRAY(levels, qemu_or_irq, 0, OLD_MAX_OR_LINES), + VMSTATE_END_OF_LIST(), + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_or_irq_extras, + NULL + }, +}; + +static Property or_irq_properties[] = { + DEFINE_PROP_UINT16("num-lines", qemu_or_irq, num_lines, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void or_irq_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = or_irq_reset; + device_class_set_props(dc, or_irq_properties); + dc->realize = or_irq_realize; + dc->vmsd = &vmstate_or_irq; + + /* Reason: Needs to be wired up to work, e.g. see stm32f205_soc.c */ + dc->user_creatable = false; +} + +static const TypeInfo or_irq_type_info = { + .name = TYPE_OR_IRQ, + .parent = TYPE_DEVICE, + .instance_size = sizeof(qemu_or_irq), + .instance_init = or_irq_init, + .class_init = or_irq_class_init, +}; + +static void or_irq_register_types(void) +{ + type_register_static(&or_irq_type_info); +} + +type_init(or_irq_register_types) diff --git a/hw/core/platform-bus.c b/hw/core/platform-bus.c new file mode 100644 index 000000000..b8487b26b --- /dev/null +++ b/hw/core/platform-bus.c @@ -0,0 +1,230 @@ +/* + * Platform Bus device to support dynamic Sysbus devices + * + * Copyright (C) 2014 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Alexander Graf, + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/platform-bus.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" + + +/* + * Returns the PlatformBus IRQ number for a SysBusDevice irq number or -1 if + * the IRQ is not mapped on this Platform bus. + */ +int platform_bus_get_irqn(PlatformBusDevice *pbus, SysBusDevice *sbdev, + int n) +{ + qemu_irq sbirq = sysbus_get_connected_irq(sbdev, n); + int i; + + for (i = 0; i < pbus->num_irqs; i++) { + if (pbus->irqs[i] == sbirq) { + return i; + } + } + + /* IRQ not mapped on platform bus */ + return -1; +} + +/* + * Returns the PlatformBus MMIO region offset for Region n of a SysBusDevice or + * -1 if the region is not mapped on this Platform bus. + */ +hwaddr platform_bus_get_mmio_addr(PlatformBusDevice *pbus, SysBusDevice *sbdev, + int n) +{ + MemoryRegion *pbus_mr = &pbus->mmio; + MemoryRegion *sbdev_mr = sysbus_mmio_get_region(sbdev, n); + Object *pbus_mr_obj = OBJECT(pbus_mr); + Object *parent_mr; + + if (!memory_region_is_mapped(sbdev_mr)) { + /* Region is not mapped? */ + return -1; + } + + parent_mr = object_property_get_link(OBJECT(sbdev_mr), "container", + &error_abort); + if (parent_mr != pbus_mr_obj) { + /* MMIO region is not mapped on platform bus */ + return -1; + } + + return object_property_get_uint(OBJECT(sbdev_mr), "addr", NULL); +} + +static void platform_bus_count_irqs(SysBusDevice *sbdev, void *opaque) +{ + PlatformBusDevice *pbus = opaque; + qemu_irq sbirq; + int n, i; + + for (n = 0; ; n++) { + if (!sysbus_has_irq(sbdev, n)) { + break; + } + + sbirq = sysbus_get_connected_irq(sbdev, n); + for (i = 0; i < pbus->num_irqs; i++) { + if (pbus->irqs[i] == sbirq) { + bitmap_set(pbus->used_irqs, i, 1); + break; + } + } + } +} + +/* + * Loop through all sysbus devices and look for unassigned IRQ lines as well as + * unassociated MMIO regions. Connect them to the platform bus if available. + */ +static void plaform_bus_refresh_irqs(PlatformBusDevice *pbus) +{ + bitmap_zero(pbus->used_irqs, pbus->num_irqs); + foreach_dynamic_sysbus_device(platform_bus_count_irqs, pbus); +} + +static void platform_bus_map_irq(PlatformBusDevice *pbus, SysBusDevice *sbdev, + int n) +{ + int max_irqs = pbus->num_irqs; + int irqn; + + if (sysbus_is_irq_connected(sbdev, n)) { + /* IRQ is already mapped, nothing to do */ + return; + } + + irqn = find_first_zero_bit(pbus->used_irqs, max_irqs); + if (irqn >= max_irqs) { + error_report("Platform Bus: Can not fit IRQ line"); + exit(1); + } + + set_bit(irqn, pbus->used_irqs); + sysbus_connect_irq(sbdev, n, pbus->irqs[irqn]); +} + +static void platform_bus_map_mmio(PlatformBusDevice *pbus, SysBusDevice *sbdev, + int n) +{ + MemoryRegion *sbdev_mr = sysbus_mmio_get_region(sbdev, n); + uint64_t size = memory_region_size(sbdev_mr); + uint64_t alignment = (1ULL << (63 - clz64(size + size - 1))); + uint64_t off; + bool found_region = false; + + if (memory_region_is_mapped(sbdev_mr)) { + /* Region is already mapped, nothing to do */ + return; + } + + /* + * Look for empty space in the MMIO space that is naturally aligned with + * the target device's memory region + */ + for (off = 0; off < pbus->mmio_size; off += alignment) { + if (!memory_region_find(&pbus->mmio, off, size).mr) { + found_region = true; + break; + } + } + + if (!found_region) { + error_report("Platform Bus: Can not fit MMIO region of size %"PRIx64, + size); + exit(1); + } + + /* Map the device's region into our Platform Bus MMIO space */ + memory_region_add_subregion(&pbus->mmio, off, sbdev_mr); +} + +/* + * Look for unassigned IRQ lines as well as unassociated MMIO regions. + * Connect them to the platform bus if available. + */ +void platform_bus_link_device(PlatformBusDevice *pbus, SysBusDevice *sbdev) +{ + int i; + + for (i = 0; sysbus_has_irq(sbdev, i); i++) { + platform_bus_map_irq(pbus, sbdev, i); + } + + for (i = 0; sysbus_has_mmio(sbdev, i); i++) { + platform_bus_map_mmio(pbus, sbdev, i); + } +} + +static void platform_bus_realize(DeviceState *dev, Error **errp) +{ + PlatformBusDevice *pbus; + SysBusDevice *d; + int i; + + d = SYS_BUS_DEVICE(dev); + pbus = PLATFORM_BUS_DEVICE(dev); + + memory_region_init(&pbus->mmio, OBJECT(dev), "platform bus", + pbus->mmio_size); + sysbus_init_mmio(d, &pbus->mmio); + + pbus->used_irqs = bitmap_new(pbus->num_irqs); + pbus->irqs = g_new0(qemu_irq, pbus->num_irqs); + for (i = 0; i < pbus->num_irqs; i++) { + sysbus_init_irq(d, &pbus->irqs[i]); + } + + /* some devices might be initialized before so update used IRQs map */ + plaform_bus_refresh_irqs(pbus); +} + +static Property platform_bus_properties[] = { + DEFINE_PROP_UINT32("num_irqs", PlatformBusDevice, num_irqs, 0), + DEFINE_PROP_UINT32("mmio_size", PlatformBusDevice, mmio_size, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void platform_bus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = platform_bus_realize; + device_class_set_props(dc, platform_bus_properties); +} + +static const TypeInfo platform_bus_info = { + .name = TYPE_PLATFORM_BUS_DEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PlatformBusDevice), + .class_init = platform_bus_class_init, +}; + +static void platform_bus_register_types(void) +{ + type_register_static(&platform_bus_info); +} + +type_init(platform_bus_register_types) diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c new file mode 100644 index 000000000..6ba19fd96 --- /dev/null +++ b/hw/core/ptimer.c @@ -0,0 +1,486 @@ +/* + * General purpose implementation of a simple periodic countdown timer. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GNU LGPL. + */ + +#include "qemu/osdep.h" +#include "hw/ptimer.h" +#include "migration/vmstate.h" +#include "qemu/host-utils.h" +#include "sysemu/replay.h" +#include "sysemu/cpu-timers.h" +#include "sysemu/qtest.h" +#include "block/aio.h" +#include "sysemu/cpus.h" +#include "hw/clock.h" + +#define DELTA_ADJUST 1 +#define DELTA_NO_ADJUST -1 + +struct ptimer_state +{ + uint8_t enabled; /* 0 = disabled, 1 = periodic, 2 = oneshot. */ + uint64_t limit; + uint64_t delta; + uint32_t period_frac; + int64_t period; + int64_t last_event; + int64_t next_event; + uint8_t policy_mask; + QEMUTimer *timer; + ptimer_cb callback; + void *callback_opaque; + /* + * These track whether we're in a transaction block, and if we + * need to do a timer reload when the block finishes. They don't + * need to be migrated because migration can never happen in the + * middle of a transaction block. + */ + bool in_transaction; + bool need_reload; +}; + +/* Use a bottom-half routine to avoid reentrancy issues. */ +static void ptimer_trigger(ptimer_state *s) +{ + s->callback(s->callback_opaque); +} + +static void ptimer_reload(ptimer_state *s, int delta_adjust) +{ + uint32_t period_frac; + uint64_t period; + uint64_t delta; + bool suppress_trigger = false; + + /* + * Note that if delta_adjust is 0 then we must be here because of + * a count register write or timer start, not because of timer expiry. + * In that case the policy might require us to suppress the timer trigger + * that we would otherwise generate for a zero delta. + */ + if (delta_adjust == 0 && + (s->policy_mask & PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT)) { + suppress_trigger = true; + } + if (s->delta == 0 && !(s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER) + && !suppress_trigger) { + ptimer_trigger(s); + } + + /* + * Note that ptimer_trigger() might call the device callback function, + * which can then modify timer state, so we must not cache any fields + * from ptimer_state until after we have called it. + */ + delta = s->delta; + period = s->period; + period_frac = s->period_frac; + + if (delta == 0 && !(s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_RELOAD)) { + delta = s->delta = s->limit; + } + + if (s->period == 0) { + if (!qtest_enabled()) { + fprintf(stderr, "Timer with period zero, disabling\n"); + } + timer_del(s->timer); + s->enabled = 0; + return; + } + + if (s->policy_mask & PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD) { + if (delta_adjust != DELTA_NO_ADJUST) { + delta += delta_adjust; + } + } + + if (delta == 0 && (s->policy_mask & PTIMER_POLICY_CONTINUOUS_TRIGGER)) { + if (s->enabled == 1 && s->limit == 0) { + delta = 1; + } + } + + if (delta == 0 && (s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER)) { + if (delta_adjust != DELTA_NO_ADJUST) { + delta = 1; + } + } + + if (delta == 0 && (s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_RELOAD)) { + if (s->enabled == 1 && s->limit != 0) { + delta = 1; + } + } + + if (delta == 0) { + if (s->enabled == 0) { + /* trigger callback disabled the timer already */ + return; + } + if (!qtest_enabled()) { + fprintf(stderr, "Timer with delta zero, disabling\n"); + } + timer_del(s->timer); + s->enabled = 0; + return; + } + + /* + * Artificially limit timeout rate to something + * achievable under QEMU. Otherwise, QEMU spends all + * its time generating timer interrupts, and there + * is no forward progress. + * About ten microseconds is the fastest that really works + * on the current generation of host machines. + */ + + if (s->enabled == 1 && (delta * period < 10000) && + !icount_enabled() && !qtest_enabled()) { + period = 10000 / delta; + period_frac = 0; + } + + s->last_event = s->next_event; + s->next_event = s->last_event + delta * period; + if (period_frac) { + s->next_event += ((int64_t)period_frac * delta) >> 32; + } + timer_mod(s->timer, s->next_event); +} + +static void ptimer_tick(void *opaque) +{ + ptimer_state *s = (ptimer_state *)opaque; + bool trigger = true; + + /* + * We perform all the tick actions within a begin/commit block + * because the callback function that ptimer_trigger() calls + * might make calls into the ptimer APIs that provoke another + * trigger, and we want that to cause the callback function + * to be called iteratively, not recursively. + */ + ptimer_transaction_begin(s); + + if (s->enabled == 2) { + s->delta = 0; + s->enabled = 0; + } else { + int delta_adjust = DELTA_ADJUST; + + if (s->delta == 0 || s->limit == 0) { + /* If a "continuous trigger" policy is not used and limit == 0, + we should error out. delta == 0 means that this tick is + caused by a "no immediate reload" policy, so it shouldn't + be adjusted. */ + delta_adjust = DELTA_NO_ADJUST; + } + + if (!(s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER)) { + /* Avoid re-trigger on deferred reload if "no immediate trigger" + policy isn't used. */ + trigger = (delta_adjust == DELTA_ADJUST); + } + + s->delta = s->limit; + + ptimer_reload(s, delta_adjust); + } + + if (trigger) { + ptimer_trigger(s); + } + + ptimer_transaction_commit(s); +} + +uint64_t ptimer_get_count(ptimer_state *s) +{ + uint64_t counter; + + if (s->enabled && s->delta != 0) { + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + int64_t next = s->next_event; + int64_t last = s->last_event; + bool expired = (now - next >= 0); + bool oneshot = (s->enabled == 2); + + /* Figure out the current counter value. */ + if (expired) { + /* Prevent timer underflowing if it should already have + triggered. */ + counter = 0; + } else { + uint64_t rem; + uint64_t div; + int clz1, clz2; + int shift; + uint32_t period_frac = s->period_frac; + uint64_t period = s->period; + + if (!oneshot && (s->delta * period < 10000) && + !icount_enabled() && !qtest_enabled()) { + period = 10000 / s->delta; + period_frac = 0; + } + + /* We need to divide time by period, where time is stored in + rem (64-bit integer) and period is stored in period/period_frac + (64.32 fixed point). + + Doing full precision division is hard, so scale values and + do a 64-bit division. The result should be rounded down, + so that the rounding error never causes the timer to go + backwards. + */ + + rem = next - now; + div = period; + + clz1 = clz64(rem); + clz2 = clz64(div); + shift = clz1 < clz2 ? clz1 : clz2; + + rem <<= shift; + div <<= shift; + if (shift >= 32) { + div |= ((uint64_t)period_frac << (shift - 32)); + } else { + if (shift != 0) + div |= (period_frac >> (32 - shift)); + /* Look at remaining bits of period_frac and round div up if + necessary. */ + if ((uint32_t)(period_frac << shift)) + div += 1; + } + counter = rem / div; + + if (s->policy_mask & PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD) { + /* Before wrapping around, timer should stay with counter = 0 + for a one period. */ + if (!oneshot && s->delta == s->limit) { + if (now == last) { + /* Counter == delta here, check whether it was + adjusted and if it was, then right now it is + that "one period". */ + if (counter == s->limit + DELTA_ADJUST) { + return 0; + } + } else if (counter == s->limit) { + /* Since the counter is rounded down and now != last, + the counter == limit means that delta was adjusted + by +1 and right now it is that adjusted period. */ + return 0; + } + } + } + } + + if (s->policy_mask & PTIMER_POLICY_NO_COUNTER_ROUND_DOWN) { + /* If now == last then delta == limit, i.e. the counter already + represents the correct value. It would be rounded down a 1ns + later. */ + if (now != last) { + counter += 1; + } + } + } else { + counter = s->delta; + } + return counter; +} + +void ptimer_set_count(ptimer_state *s, uint64_t count) +{ + assert(s->in_transaction); + s->delta = count; + if (s->enabled) { + s->need_reload = true; + } +} + +void ptimer_run(ptimer_state *s, int oneshot) +{ + bool was_disabled = !s->enabled; + + assert(s->in_transaction); + + if (was_disabled && s->period == 0) { + if (!qtest_enabled()) { + fprintf(stderr, "Timer with period zero, disabling\n"); + } + return; + } + s->enabled = oneshot ? 2 : 1; + if (was_disabled) { + s->need_reload = true; + } +} + +/* Pause a timer. Note that this may cause it to "lose" time, even if it + is immediately restarted. */ +void ptimer_stop(ptimer_state *s) +{ + assert(s->in_transaction); + + if (!s->enabled) + return; + + s->delta = ptimer_get_count(s); + timer_del(s->timer); + s->enabled = 0; + s->need_reload = false; +} + +/* Set counter increment interval in nanoseconds. */ +void ptimer_set_period(ptimer_state *s, int64_t period) +{ + assert(s->in_transaction); + s->delta = ptimer_get_count(s); + s->period = period; + s->period_frac = 0; + if (s->enabled) { + s->need_reload = true; + } +} + +/* Set counter increment interval from a Clock */ +void ptimer_set_period_from_clock(ptimer_state *s, const Clock *clk, + unsigned int divisor) +{ + /* + * The raw clock period is a 64-bit value in units of 2^-32 ns; + * put another way it's a 32.32 fixed-point ns value. Our internal + * representation of the period is 64.32 fixed point ns, so + * the conversion is simple. + */ + uint64_t raw_period = clock_get(clk); + uint64_t period_frac; + + assert(s->in_transaction); + s->delta = ptimer_get_count(s); + s->period = extract64(raw_period, 32, 32); + period_frac = extract64(raw_period, 0, 32); + /* + * divisor specifies a possible frequency divisor between the + * clock and the timer, so it is a multiplier on the period. + * We do the multiply after splitting the raw period out into + * period and frac to avoid having to do a 32*64->96 multiply. + */ + s->period *= divisor; + period_frac *= divisor; + s->period += extract64(period_frac, 32, 32); + s->period_frac = (uint32_t)period_frac; + + if (s->enabled) { + s->need_reload = true; + } +} + +/* Set counter frequency in Hz. */ +void ptimer_set_freq(ptimer_state *s, uint32_t freq) +{ + assert(s->in_transaction); + s->delta = ptimer_get_count(s); + s->period = 1000000000ll / freq; + s->period_frac = (1000000000ll << 32) / freq; + if (s->enabled) { + s->need_reload = true; + } +} + +/* Set the initial countdown value. If reload is nonzero then also set + count = limit. */ +void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload) +{ + assert(s->in_transaction); + s->limit = limit; + if (reload) + s->delta = limit; + if (s->enabled && reload) { + s->need_reload = true; + } +} + +uint64_t ptimer_get_limit(ptimer_state *s) +{ + return s->limit; +} + +void ptimer_transaction_begin(ptimer_state *s) +{ + assert(!s->in_transaction); + s->in_transaction = true; + s->need_reload = false; +} + +void ptimer_transaction_commit(ptimer_state *s) +{ + assert(s->in_transaction); + /* + * We must loop here because ptimer_reload() can call the callback + * function, which might then update ptimer state in a way that + * means we need to do another reload and possibly another callback. + * A disabled timer never needs reloading (and if we don't check + * this then we loop forever if ptimer_reload() disables the timer). + */ + while (s->need_reload && s->enabled) { + s->need_reload = false; + s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + ptimer_reload(s, 0); + } + /* Now we've finished reload we can leave the transaction block. */ + s->in_transaction = false; +} + +const VMStateDescription vmstate_ptimer = { + .name = "ptimer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(enabled, ptimer_state), + VMSTATE_UINT64(limit, ptimer_state), + VMSTATE_UINT64(delta, ptimer_state), + VMSTATE_UINT32(period_frac, ptimer_state), + VMSTATE_INT64(period, ptimer_state), + VMSTATE_INT64(last_event, ptimer_state), + VMSTATE_INT64(next_event, ptimer_state), + VMSTATE_TIMER_PTR(timer, ptimer_state), + VMSTATE_END_OF_LIST() + } +}; + +ptimer_state *ptimer_init(ptimer_cb callback, void *callback_opaque, + uint8_t policy_mask) +{ + ptimer_state *s; + + /* The callback function is mandatory. */ + assert(callback); + + s = g_new0(ptimer_state, 1); + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ptimer_tick, s); + s->policy_mask = policy_mask; + s->callback = callback; + s->callback_opaque = callback_opaque; + + /* + * These two policies are incompatible -- trigger-on-decrement implies + * a timer trigger when the count becomes 0, but no-immediate-trigger + * implies a trigger when the count stops being 0. + */ + assert(!((policy_mask & PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT) && + (policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER))); + return s; +} + +void ptimer_free(ptimer_state *s) +{ + timer_free(s->timer); + g_free(s); +} diff --git a/hw/core/qdev-clock.c b/hw/core/qdev-clock.c new file mode 100644 index 000000000..117f4c6ea --- /dev/null +++ b/hw/core/qdev-clock.c @@ -0,0 +1,212 @@ +/* + * Device's clock input and output + * + * Copyright GreenSocs 2016-2020 + * + * Authors: + * Frederic Konrad + * Damien Hedde + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-core.h" +#include "qapi/error.h" + +/* + * qdev_init_clocklist: + * Add a new clock in a device + */ +static NamedClockList *qdev_init_clocklist(DeviceState *dev, const char *name, + bool output, Clock *clk) +{ + NamedClockList *ncl; + + /* + * Clock must be added before realize() so that we can compute the + * clock's canonical path during device_realize(). + */ + assert(!dev->realized); + + /* + * The ncl structure is freed by qdev_finalize_clocklist() which will + * be called during @dev's device_finalize(). + */ + ncl = g_new0(NamedClockList, 1); + ncl->name = g_strdup(name); + ncl->output = output; + ncl->alias = (clk != NULL); + + /* + * Trying to create a clock whose name clashes with some other + * clock or property is a bug in the caller and we will abort(). + */ + if (clk == NULL) { + clk = CLOCK(object_new(TYPE_CLOCK)); + object_property_add_child(OBJECT(dev), name, OBJECT(clk)); + if (output) { + /* + * Remove object_new()'s initial reference. + * Note that for inputs, the reference created by object_new() + * will be deleted in qdev_finalize_clocklist(). + */ + object_unref(OBJECT(clk)); + } + } else { + object_property_add_link(OBJECT(dev), name, + object_get_typename(OBJECT(clk)), + (Object **) &ncl->clock, + NULL, OBJ_PROP_LINK_STRONG); + /* + * Since the link property has the OBJ_PROP_LINK_STRONG flag, the clk + * object reference count gets decremented on property deletion. + * However object_property_add_link does not increment it since it + * doesn't know the linked object. Increment it here to ensure the + * aliased clock stays alive during this device life-time. + */ + object_ref(OBJECT(clk)); + } + + ncl->clock = clk; + + QLIST_INSERT_HEAD(&dev->clocks, ncl, node); + return ncl; +} + +void qdev_finalize_clocklist(DeviceState *dev) +{ + /* called by @dev's device_finalize() */ + NamedClockList *ncl, *ncl_next; + + QLIST_FOREACH_SAFE(ncl, &dev->clocks, node, ncl_next) { + QLIST_REMOVE(ncl, node); + if (!ncl->output && !ncl->alias) { + /* + * We kept a reference on the input clock to ensure it lives up to + * this point so we can safely remove the callback. + * It avoids having a callback to a deleted object if ncl->clock + * is still referenced somewhere else (eg: by a clock output). + */ + clock_clear_callback(ncl->clock); + object_unref(OBJECT(ncl->clock)); + } + g_free(ncl->name); + g_free(ncl); + } +} + +Clock *qdev_init_clock_out(DeviceState *dev, const char *name) +{ + NamedClockList *ncl; + + assert(name); + + ncl = qdev_init_clocklist(dev, name, true, NULL); + + return ncl->clock; +} + +Clock *qdev_init_clock_in(DeviceState *dev, const char *name, + ClockCallback *callback, void *opaque, + unsigned int events) +{ + NamedClockList *ncl; + + assert(name); + + ncl = qdev_init_clocklist(dev, name, false, NULL); + + if (callback) { + clock_set_callback(ncl->clock, callback, opaque, events); + } + return ncl->clock; +} + +void qdev_init_clocks(DeviceState *dev, const ClockPortInitArray clocks) +{ + const struct ClockPortInitElem *elem; + + for (elem = &clocks[0]; elem->name != NULL; elem++) { + Clock **clkp; + /* offset cannot be inside the DeviceState part */ + assert(elem->offset > sizeof(DeviceState)); + clkp = (Clock **)(((void *) dev) + elem->offset); + if (elem->is_output) { + *clkp = qdev_init_clock_out(dev, elem->name); + } else { + *clkp = qdev_init_clock_in(dev, elem->name, elem->callback, dev, + elem->callback_events); + } + } +} + +static NamedClockList *qdev_get_clocklist(DeviceState *dev, const char *name) +{ + NamedClockList *ncl; + + QLIST_FOREACH(ncl, &dev->clocks, node) { + if (strcmp(name, ncl->name) == 0) { + return ncl; + } + } + + return NULL; +} + +Clock *qdev_get_clock_in(DeviceState *dev, const char *name) +{ + NamedClockList *ncl; + + assert(name); + + ncl = qdev_get_clocklist(dev, name); + if (!ncl) { + error_report("Can not find clock-in '%s' for device type '%s'", + name, object_get_typename(OBJECT(dev))); + abort(); + } + assert(!ncl->output); + + return ncl->clock; +} + +Clock *qdev_get_clock_out(DeviceState *dev, const char *name) +{ + NamedClockList *ncl; + + assert(name); + + ncl = qdev_get_clocklist(dev, name); + if (!ncl) { + error_report("Can not find clock-out '%s' for device type '%s'", + name, object_get_typename(OBJECT(dev))); + abort(); + } + assert(ncl->output); + + return ncl->clock; +} + +Clock *qdev_alias_clock(DeviceState *dev, const char *name, + DeviceState *alias_dev, const char *alias_name) +{ + NamedClockList *ncl; + + assert(name && alias_name); + + ncl = qdev_get_clocklist(dev, name); + + qdev_init_clocklist(alias_dev, alias_name, ncl->output, ncl->clock); + + return ncl->clock; +} + +void qdev_connect_clock_in(DeviceState *dev, const char *name, Clock *source) +{ + assert(!dev->realized); + clock_set_source(qdev_get_clock_in(dev, name), source); +} diff --git a/hw/core/qdev-fw.c b/hw/core/qdev-fw.c new file mode 100644 index 000000000..a31958355 --- /dev/null +++ b/hw/core/qdev-fw.c @@ -0,0 +1,96 @@ +/* + * qdev fw helpers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/fw-path-provider.h" +#include "hw/qdev-core.h" + +const char *qdev_fw_name(DeviceState *dev) +{ + DeviceClass *dc = DEVICE_GET_CLASS(dev); + + if (dc->fw_name) { + return dc->fw_name; + } + + return object_get_typename(OBJECT(dev)); +} + +static char *bus_get_fw_dev_path(BusState *bus, DeviceState *dev) +{ + BusClass *bc = BUS_GET_CLASS(bus); + + if (bc->get_fw_dev_path) { + return bc->get_fw_dev_path(dev); + } + + return NULL; +} + +static char *qdev_get_fw_dev_path_from_handler(BusState *bus, DeviceState *dev) +{ + Object *obj = OBJECT(dev); + char *d = NULL; + + while (!d && obj->parent) { + obj = obj->parent; + d = fw_path_provider_try_get_dev_path(obj, bus, dev); + } + return d; +} + +char *qdev_get_own_fw_dev_path_from_handler(BusState *bus, DeviceState *dev) +{ + Object *obj = OBJECT(dev); + + return fw_path_provider_try_get_dev_path(obj, bus, dev); +} + +static int qdev_get_fw_dev_path_helper(DeviceState *dev, char *p, int size) +{ + int l = 0; + + if (dev && dev->parent_bus) { + char *d; + l = qdev_get_fw_dev_path_helper(dev->parent_bus->parent, p, size); + d = qdev_get_fw_dev_path_from_handler(dev->parent_bus, dev); + if (!d) { + d = bus_get_fw_dev_path(dev->parent_bus, dev); + } + if (d) { + l += snprintf(p + l, size - l, "%s", d); + g_free(d); + } else { + return l; + } + } + l += snprintf(p + l , size - l, "/"); + + return l; +} + +char *qdev_get_fw_dev_path(DeviceState *dev) +{ + char path[128]; + int l; + + l = qdev_get_fw_dev_path_helper(dev, path, 128); + + path[l - 1] = '\0'; + + return g_strdup(path); +} diff --git a/hw/core/qdev-hotplug.c b/hw/core/qdev-hotplug.c new file mode 100644 index 000000000..d495d0e9c --- /dev/null +++ b/hw/core/qdev-hotplug.c @@ -0,0 +1,73 @@ +/* + * QDev Hotplug handlers + * + * Copyright (c) Red Hat + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-core.h" +#include "hw/boards.h" + +HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev) +{ + MachineState *machine; + MachineClass *mc; + Object *m_obj = qdev_get_machine(); + + if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { + machine = MACHINE(m_obj); + mc = MACHINE_GET_CLASS(machine); + if (mc->get_hotplug_handler) { + return mc->get_hotplug_handler(machine, dev); + } + } + + return NULL; +} + +bool qdev_hotplug_allowed(DeviceState *dev, Error **errp) +{ + MachineState *machine; + MachineClass *mc; + Object *m_obj = qdev_get_machine(); + + if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { + machine = MACHINE(m_obj); + mc = MACHINE_GET_CLASS(machine); + if (mc->hotplug_allowed) { + return mc->hotplug_allowed(machine, dev, errp); + } + } + + return true; +} + +HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev) +{ + if (dev->parent_bus) { + return dev->parent_bus->hotplug_handler; + } + return NULL; +} + +HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev) +{ + HotplugHandler *hotplug_ctrl = qdev_get_machine_hotplug_handler(dev); + + if (hotplug_ctrl == NULL && dev->parent_bus) { + hotplug_ctrl = qdev_get_bus_hotplug_handler(dev); + } + return hotplug_ctrl; +} + +/* can be used as ->unplug() callback for the simple cases */ +void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + qdev_unrealize(dev); +} diff --git a/hw/core/qdev-prop-internal.h b/hw/core/qdev-prop-internal.h new file mode 100644 index 000000000..d7b77844f --- /dev/null +++ b/hw/core/qdev-prop-internal.h @@ -0,0 +1,28 @@ +/* + * qdev property parsing + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef HW_CORE_QDEV_PROP_INTERNAL_H +#define HW_CORE_QDEV_PROP_INTERNAL_H + +void qdev_propinfo_get_enum(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp); +void qdev_propinfo_set_enum(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp); + +void qdev_propinfo_set_default_value_enum(ObjectProperty *op, + const Property *prop); +void qdev_propinfo_set_default_value_int(ObjectProperty *op, + const Property *prop); +void qdev_propinfo_set_default_value_uint(ObjectProperty *op, + const Property *prop); + +void qdev_propinfo_get_int32(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp); +void qdev_propinfo_get_size32(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp); + +#endif diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c new file mode 100644 index 000000000..a91f60567 --- /dev/null +++ b/hw/core/qdev-properties-system.c @@ -0,0 +1,1121 @@ +/* + * qdev property parsing + * (parts specific for qemu-system-*) + * + * This file is based on code from hw/qdev-properties.c from + * commit 074a86fccd185616469dfcdc0e157f438aebba18, + * Copyright (c) Gerd Hoffmann and other contributors. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qapi/qapi-types-block.h" +#include "qapi/qapi-types-machine.h" +#include "qapi/qapi-types-migration.h" +#include "qapi/qmp/qerror.h" +#include "qemu/ctype.h" +#include "qemu/cutils.h" +#include "qemu/units.h" +#include "qemu/uuid.h" +#include "qemu/error-report.h" +#include "qdev-prop-internal.h" + +#include "audio/audio.h" +#include "chardev/char-fe.h" +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" +#include "net/net.h" +#include "hw/pci/pci.h" +#include "util/block-helpers.h" + +static bool check_prop_still_unset(Object *obj, const char *name, + const void *old_val, const char *new_val, + bool allow_override, Error **errp) +{ + const GlobalProperty *prop = qdev_find_global_prop(obj, name); + + if (!old_val || (!prop && allow_override)) { + return true; + } + + if (prop) { + error_setg(errp, "-global %s.%s=... conflicts with %s=%s", + prop->driver, prop->property, name, new_val); + } else { + /* Error message is vague, but a better one would be hard */ + error_setg(errp, "%s=%s conflicts, and override is not implemented", + name, new_val); + } + return false; +} + + +/* --- drive --- */ + +static void get_drive(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + void **ptr = object_field_prop_ptr(obj, prop); + const char *value; + char *p; + + if (*ptr) { + value = blk_name(*ptr); + if (!*value) { + BlockDriverState *bs = blk_bs(*ptr); + if (bs) { + value = bdrv_get_node_name(bs); + } + } + } else { + value = ""; + } + + p = g_strdup(value); + visit_type_str(v, name, &p, errp); + g_free(p); +} + +static void set_drive_helper(Object *obj, Visitor *v, const char *name, + void *opaque, bool iothread, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + Property *prop = opaque; + void **ptr = object_field_prop_ptr(obj, prop); + char *str; + BlockBackend *blk; + bool blk_created = false; + int ret; + BlockDriverState *bs; + AioContext *ctx; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + if (!check_prop_still_unset(obj, name, *ptr, str, true, errp)) { + return; + } + + if (*ptr) { + /* BlockBackend alread exists. So, we want to change attached node */ + blk = *ptr; + ctx = blk_get_aio_context(blk); + bs = bdrv_lookup_bs(NULL, str, errp); + if (!bs) { + return; + } + + if (ctx != bdrv_get_aio_context(bs)) { + error_setg(errp, "Different aio context is not supported for new " + "node"); + } + + aio_context_acquire(ctx); + blk_replace_bs(blk, bs, errp); + aio_context_release(ctx); + return; + } + + if (!*str) { + g_free(str); + *ptr = NULL; + return; + } + + blk = blk_by_name(str); + if (!blk) { + bs = bdrv_lookup_bs(NULL, str, NULL); + if (bs) { + /* + * If the device supports iothreads, it will make sure to move the + * block node to the right AioContext if necessary (or fail if this + * isn't possible because of other users). Devices that are not + * aware of iothreads require their BlockBackends to be in the main + * AioContext. + */ + ctx = iothread ? bdrv_get_aio_context(bs) : qemu_get_aio_context(); + blk = blk_new(ctx, 0, BLK_PERM_ALL); + blk_created = true; + + ret = blk_insert_bs(blk, bs, errp); + if (ret < 0) { + goto fail; + } + } + } + if (!blk) { + error_setg(errp, "Property '%s.%s' can't find value '%s'", + object_get_typename(OBJECT(dev)), name, str); + goto fail; + } + if (blk_attach_dev(blk, dev) < 0) { + DriveInfo *dinfo = blk_legacy_dinfo(blk); + + if (dinfo && dinfo->type != IF_NONE) { + error_setg(errp, "Drive '%s' is already in use because " + "it has been automatically connected to another " + "device (did you need 'if=none' in the drive options?)", + str); + } else { + error_setg(errp, "Drive '%s' is already in use by another device", + str); + } + goto fail; + } + + *ptr = blk; + +fail: + if (blk_created) { + /* If we need to keep a reference, blk_attach_dev() took it */ + blk_unref(blk); + } + + g_free(str); +} + +static void set_drive(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + set_drive_helper(obj, v, name, opaque, false, errp); +} + +static void set_drive_iothread(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + set_drive_helper(obj, v, name, opaque, true, errp); +} + +static void release_drive(Object *obj, const char *name, void *opaque) +{ + DeviceState *dev = DEVICE(obj); + Property *prop = opaque; + BlockBackend **ptr = object_field_prop_ptr(obj, prop); + + if (*ptr) { + AioContext *ctx = blk_get_aio_context(*ptr); + + aio_context_acquire(ctx); + blockdev_auto_del(*ptr); + blk_detach_dev(*ptr, dev); + aio_context_release(ctx); + } +} + +const PropertyInfo qdev_prop_drive = { + .name = "str", + .description = "Node name or ID of a block device to use as a backend", + .realized_set_allowed = true, + .get = get_drive, + .set = set_drive, + .release = release_drive, +}; + +const PropertyInfo qdev_prop_drive_iothread = { + .name = "str", + .description = "Node name or ID of a block device to use as a backend", + .realized_set_allowed = true, + .get = get_drive, + .set = set_drive_iothread, + .release = release_drive, +}; + +/* --- character device --- */ + +static void get_chr(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + CharBackend *be = object_field_prop_ptr(obj, opaque); + char *p; + + p = g_strdup(be->chr && be->chr->label ? be->chr->label : ""); + visit_type_str(v, name, &p, errp); + g_free(p); +} + +static void set_chr(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + CharBackend *be = object_field_prop_ptr(obj, prop); + Chardev *s; + char *str; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + /* + * TODO Should this really be an error? If no, the old value + * needs to be released before we store the new one. + */ + if (!check_prop_still_unset(obj, name, be->chr, str, false, errp)) { + return; + } + + if (!*str) { + g_free(str); + be->chr = NULL; + return; + } + + s = qemu_chr_find(str); + if (s == NULL) { + error_setg(errp, "Property '%s.%s' can't find value '%s'", + object_get_typename(obj), name, str); + } else if (!qemu_chr_fe_init(be, s, errp)) { + error_prepend(errp, "Property '%s.%s' can't take value '%s': ", + object_get_typename(obj), name, str); + } + g_free(str); +} + +static void release_chr(Object *obj, const char *name, void *opaque) +{ + Property *prop = opaque; + CharBackend *be = object_field_prop_ptr(obj, prop); + + qemu_chr_fe_deinit(be, false); +} + +const PropertyInfo qdev_prop_chr = { + .name = "str", + .description = "ID of a chardev to use as a backend", + .get = get_chr, + .set = set_chr, + .release = release_chr, +}; + +/* --- mac address --- */ + +/* + * accepted syntax versions: + * 01:02:03:04:05:06 + * 01-02-03-04-05-06 + */ +static void get_mac(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + MACAddr *mac = object_field_prop_ptr(obj, prop); + char buffer[2 * 6 + 5 + 1]; + char *p = buffer; + + snprintf(buffer, sizeof(buffer), "%02x:%02x:%02x:%02x:%02x:%02x", + mac->a[0], mac->a[1], mac->a[2], + mac->a[3], mac->a[4], mac->a[5]); + + visit_type_str(v, name, &p, errp); +} + +static void set_mac(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + MACAddr *mac = object_field_prop_ptr(obj, prop); + int i, pos; + char *str; + const char *p; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + for (i = 0, pos = 0; i < 6; i++, pos += 3) { + long val; + + if (!qemu_isxdigit(str[pos])) { + goto inval; + } + if (!qemu_isxdigit(str[pos + 1])) { + goto inval; + } + if (i == 5) { + if (str[pos + 2] != '\0') { + goto inval; + } + } else { + if (str[pos + 2] != ':' && str[pos + 2] != '-') { + goto inval; + } + } + if (qemu_strtol(str + pos, &p, 16, &val) < 0 || val > 0xff) { + goto inval; + } + mac->a[i] = val; + } + g_free(str); + return; + +inval: + error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str); + g_free(str); +} + +const PropertyInfo qdev_prop_macaddr = { + .name = "str", + .description = "Ethernet 6-byte MAC Address, example: 52:54:00:12:34:56", + .get = get_mac, + .set = set_mac, +}; + +void qdev_prop_set_macaddr(DeviceState *dev, const char *name, + const uint8_t *value) +{ + char str[2 * 6 + 5 + 1]; + snprintf(str, sizeof(str), "%02x:%02x:%02x:%02x:%02x:%02x", + value[0], value[1], value[2], value[3], value[4], value[5]); + + object_property_set_str(OBJECT(dev), name, str, &error_abort); +} + +/* --- netdev device --- */ +static void get_netdev(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + NICPeers *peers_ptr = object_field_prop_ptr(obj, prop); + char *p = g_strdup(peers_ptr->ncs[0] ? peers_ptr->ncs[0]->name : ""); + + visit_type_str(v, name, &p, errp); + g_free(p); +} + +static void set_netdev(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + NICPeers *peers_ptr = object_field_prop_ptr(obj, prop); + NetClientState **ncs = peers_ptr->ncs; + NetClientState *peers[MAX_QUEUE_NUM]; + int queues, err = 0, i = 0; + char *str; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + queues = qemu_find_net_clients_except(str, peers, + NET_CLIENT_DRIVER_NIC, + MAX_QUEUE_NUM); + if (queues == 0) { + err = -ENOENT; + goto out; + } + + if (queues > MAX_QUEUE_NUM) { + error_setg(errp, "queues of backend '%s'(%d) exceeds QEMU limitation(%d)", + str, queues, MAX_QUEUE_NUM); + goto out; + } + + for (i = 0; i < queues; i++) { + if (peers[i]->peer) { + err = -EEXIST; + goto out; + } + + /* + * TODO Should this really be an error? If no, the old value + * needs to be released before we store the new one. + */ + if (!check_prop_still_unset(obj, name, ncs[i], str, false, errp)) { + goto out; + } + + if (peers[i]->info->check_peer_type) { + if (!peers[i]->info->check_peer_type(peers[i], obj->class, errp)) { + goto out; + } + } + + ncs[i] = peers[i]; + ncs[i]->queue_index = i; + } + + peers_ptr->queues = queues; + +out: + error_set_from_qdev_prop_error(errp, err, obj, name, str); + g_free(str); +} + +const PropertyInfo qdev_prop_netdev = { + .name = "str", + .description = "ID of a netdev to use as a backend", + .get = get_netdev, + .set = set_netdev, +}; + + +/* --- audiodev --- */ +static void get_audiodev(Object *obj, Visitor *v, const char* name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + QEMUSoundCard *card = object_field_prop_ptr(obj, prop); + char *p = g_strdup(audio_get_id(card)); + + visit_type_str(v, name, &p, errp); + g_free(p); +} + +static void set_audiodev(Object *obj, Visitor *v, const char* name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + QEMUSoundCard *card = object_field_prop_ptr(obj, prop); + AudioState *state; + int err = 0; + char *str; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + state = audio_state_by_name(str); + + if (!state) { + err = -ENOENT; + goto out; + } + card->state = state; + +out: + error_set_from_qdev_prop_error(errp, err, obj, name, str); + g_free(str); +} + +const PropertyInfo qdev_prop_audiodev = { + .name = "str", + .description = "ID of an audiodev to use as a backend", + /* release done on shutdown */ + .get = get_audiodev, + .set = set_audiodev, +}; + +bool qdev_prop_set_drive_err(DeviceState *dev, const char *name, + BlockBackend *value, Error **errp) +{ + const char *ref = ""; + + if (value) { + ref = blk_name(value); + if (!*ref) { + const BlockDriverState *bs = blk_bs(value); + if (bs) { + ref = bdrv_get_node_name(bs); + } + } + } + + return object_property_set_str(OBJECT(dev), name, ref, errp); +} + +void qdev_prop_set_drive(DeviceState *dev, const char *name, + BlockBackend *value) +{ + qdev_prop_set_drive_err(dev, name, value, &error_abort); +} + +void qdev_prop_set_chr(DeviceState *dev, const char *name, + Chardev *value) +{ + assert(!value || value->label); + object_property_set_str(OBJECT(dev), name, value ? value->label : "", + &error_abort); +} + +void qdev_prop_set_netdev(DeviceState *dev, const char *name, + NetClientState *value) +{ + assert(!value || value->name); + object_property_set_str(OBJECT(dev), name, value ? value->name : "", + &error_abort); +} + +void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd) +{ + qdev_prop_set_macaddr(dev, "mac", nd->macaddr.a); + if (nd->netdev) { + qdev_prop_set_netdev(dev, "netdev", nd->netdev); + } + if (nd->nvectors != DEV_NVECTORS_UNSPECIFIED && + object_property_find(OBJECT(dev), "vectors")) { + qdev_prop_set_uint32(dev, "vectors", nd->nvectors); + } + nd->instantiated = 1; +} + +/* --- lost tick policy --- */ + +QEMU_BUILD_BUG_ON(sizeof(LostTickPolicy) != sizeof(int)); + +const PropertyInfo qdev_prop_losttickpolicy = { + .name = "LostTickPolicy", + .enum_table = &LostTickPolicy_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* --- blocksize --- */ + +static void set_blocksize(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + Property *prop = opaque; + uint32_t *ptr = object_field_prop_ptr(obj, prop); + uint64_t value; + Error *local_err = NULL; + + if (!visit_type_size(v, name, &value, errp)) { + return; + } + check_block_size(dev->id ? : "", name, value, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + *ptr = value; +} + +const PropertyInfo qdev_prop_blocksize = { + .name = "size", + .description = "A power of two between " MIN_BLOCK_SIZE_STR + " and " MAX_BLOCK_SIZE_STR, + .get = qdev_propinfo_get_size32, + .set = set_blocksize, + .set_default_value = qdev_propinfo_set_default_value_uint, +}; + +/* --- Block device error handling policy --- */ + +QEMU_BUILD_BUG_ON(sizeof(BlockdevOnError) != sizeof(int)); + +const PropertyInfo qdev_prop_blockdev_on_error = { + .name = "BlockdevOnError", + .description = "Error handling policy, " + "report/ignore/enospc/stop/auto", + .enum_table = &BlockdevOnError_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* --- BIOS CHS translation */ + +QEMU_BUILD_BUG_ON(sizeof(BiosAtaTranslation) != sizeof(int)); + +const PropertyInfo qdev_prop_bios_chs_trans = { + .name = "BiosAtaTranslation", + .description = "Logical CHS translation algorithm, " + "auto/none/lba/large/rechs", + .enum_table = &BiosAtaTranslation_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* --- FDC default drive types */ + +const PropertyInfo qdev_prop_fdc_drive_type = { + .name = "FdcDriveType", + .description = "FDC drive type, " + "144/288/120/none/auto", + .enum_table = &FloppyDriveType_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* --- MultiFDCompression --- */ + +const PropertyInfo qdev_prop_multifd_compression = { + .name = "MultiFDCompression", + .description = "multifd_compression values, " + "none/zlib/zstd", + .enum_table = &MultiFDCompression_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* --- Reserved Region --- */ + +/* + * Accepted syntax: + * :: + * where low/high addresses are uint64_t in hexadecimal + * and type is a non-negative decimal integer + */ +static void get_reserved_region(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + ReservedRegion *rr = object_field_prop_ptr(obj, prop); + char buffer[64]; + char *p = buffer; + int rc; + + rc = snprintf(buffer, sizeof(buffer), "0x%"PRIx64":0x%"PRIx64":%u", + rr->low, rr->high, rr->type); + assert(rc < sizeof(buffer)); + + visit_type_str(v, name, &p, errp); +} + +static void set_reserved_region(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + ReservedRegion *rr = object_field_prop_ptr(obj, prop); + Error *local_err = NULL; + const char *endptr; + char *str; + int ret; + + visit_type_str(v, name, &str, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + ret = qemu_strtou64(str, &endptr, 16, &rr->low); + if (ret) { + error_setg(errp, "start address of '%s'" + " must be a hexadecimal integer", name); + goto out; + } + if (*endptr != ':') { + goto separator_error; + } + + ret = qemu_strtou64(endptr + 1, &endptr, 16, &rr->high); + if (ret) { + error_setg(errp, "end address of '%s'" + " must be a hexadecimal integer", name); + goto out; + } + if (*endptr != ':') { + goto separator_error; + } + + ret = qemu_strtoui(endptr + 1, &endptr, 10, &rr->type); + if (ret) { + error_setg(errp, "type of '%s'" + " must be a non-negative decimal integer", name); + } + goto out; + +separator_error: + error_setg(errp, "reserved region fields must be separated with ':'"); +out: + g_free(str); + return; +} + +const PropertyInfo qdev_prop_reserved_region = { + .name = "reserved_region", + .description = "Reserved Region, example: 0xFEE00000:0xFEEFFFFF:0", + .get = get_reserved_region, + .set = set_reserved_region, +}; + +/* --- pci address --- */ + +/* + * bus-local address, i.e. "$slot" or "$slot.$fn" + */ +static void set_pci_devfn(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + int32_t value, *ptr = object_field_prop_ptr(obj, prop); + unsigned int slot, fn, n; + char *str; + + if (!visit_type_str(v, name, &str, NULL)) { + if (!visit_type_int32(v, name, &value, errp)) { + return; + } + if (value < -1 || value > 255) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + name ? name : "null", "a value between -1 and 255"); + return; + } + *ptr = value; + return; + } + + if (sscanf(str, "%x.%x%n", &slot, &fn, &n) != 2) { + fn = 0; + if (sscanf(str, "%x%n", &slot, &n) != 1) { + goto invalid; + } + } + if (str[n] != '\0' || fn > 7 || slot > 31) { + goto invalid; + } + *ptr = slot << 3 | fn; + g_free(str); + return; + +invalid: + error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str); + g_free(str); +} + +static int print_pci_devfn(Object *obj, Property *prop, char *dest, + size_t len) +{ + int32_t *ptr = object_field_prop_ptr(obj, prop); + + if (*ptr == -1) { + return snprintf(dest, len, ""); + } else { + return snprintf(dest, len, "%02x.%x", *ptr >> 3, *ptr & 7); + } +} + +const PropertyInfo qdev_prop_pci_devfn = { + .name = "int32", + .description = "Slot and optional function number, example: 06.0 or 06", + .print = print_pci_devfn, + .get = qdev_propinfo_get_int32, + .set = set_pci_devfn, + .set_default_value = qdev_propinfo_set_default_value_int, +}; + +/* --- pci host address --- */ + +static void get_pci_host_devaddr(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + PCIHostDeviceAddress *addr = object_field_prop_ptr(obj, prop); + char buffer[] = "ffff:ff:ff.f"; + char *p = buffer; + int rc = 0; + + /* + * Catch "invalid" device reference from vfio-pci and allow the + * default buffer representing the non-existent device to be used. + */ + if (~addr->domain || ~addr->bus || ~addr->slot || ~addr->function) { + rc = snprintf(buffer, sizeof(buffer), "%04x:%02x:%02x.%0d", + addr->domain, addr->bus, addr->slot, addr->function); + assert(rc == sizeof(buffer) - 1); + } + + visit_type_str(v, name, &p, errp); +} + +/* + * Parse [:]:. + * if is not supplied, it's assumed to be 0. + */ +static void set_pci_host_devaddr(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + PCIHostDeviceAddress *addr = object_field_prop_ptr(obj, prop); + char *str, *p; + char *e; + unsigned long val; + unsigned long dom = 0, bus = 0; + unsigned int slot = 0, func = 0; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + p = str; + val = strtoul(p, &e, 16); + if (e == p || *e != ':') { + goto inval; + } + bus = val; + + p = e + 1; + val = strtoul(p, &e, 16); + if (e == p) { + goto inval; + } + if (*e == ':') { + dom = bus; + bus = val; + p = e + 1; + val = strtoul(p, &e, 16); + if (e == p) { + goto inval; + } + } + slot = val; + + if (*e != '.') { + goto inval; + } + p = e + 1; + val = strtoul(p, &e, 10); + if (e == p) { + goto inval; + } + func = val; + + if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) { + goto inval; + } + + if (*e) { + goto inval; + } + + addr->domain = dom; + addr->bus = bus; + addr->slot = slot; + addr->function = func; + + g_free(str); + return; + +inval: + error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str); + g_free(str); +} + +const PropertyInfo qdev_prop_pci_host_devaddr = { + .name = "str", + .description = "Address (bus/device/function) of " + "the host device, example: 04:10.0", + .get = get_pci_host_devaddr, + .set = set_pci_host_devaddr, +}; + +/* --- OffAutoPCIBAR off/auto/bar0/bar1/bar2/bar3/bar4/bar5 --- */ + +const PropertyInfo qdev_prop_off_auto_pcibar = { + .name = "OffAutoPCIBAR", + .description = "off/auto/bar0/bar1/bar2/bar3/bar4/bar5", + .enum_table = &OffAutoPCIBAR_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* --- PCIELinkSpeed 2_5/5/8/16 -- */ + +static void get_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + PCIExpLinkSpeed *p = object_field_prop_ptr(obj, prop); + int speed; + + switch (*p) { + case QEMU_PCI_EXP_LNK_2_5GT: + speed = PCIE_LINK_SPEED_2_5; + break; + case QEMU_PCI_EXP_LNK_5GT: + speed = PCIE_LINK_SPEED_5; + break; + case QEMU_PCI_EXP_LNK_8GT: + speed = PCIE_LINK_SPEED_8; + break; + case QEMU_PCI_EXP_LNK_16GT: + speed = PCIE_LINK_SPEED_16; + break; + default: + /* Unreachable */ + abort(); + } + + visit_type_enum(v, name, &speed, prop->info->enum_table, errp); +} + +static void set_prop_pcielinkspeed(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + PCIExpLinkSpeed *p = object_field_prop_ptr(obj, prop); + int speed; + + if (!visit_type_enum(v, name, &speed, prop->info->enum_table, + errp)) { + return; + } + + switch (speed) { + case PCIE_LINK_SPEED_2_5: + *p = QEMU_PCI_EXP_LNK_2_5GT; + break; + case PCIE_LINK_SPEED_5: + *p = QEMU_PCI_EXP_LNK_5GT; + break; + case PCIE_LINK_SPEED_8: + *p = QEMU_PCI_EXP_LNK_8GT; + break; + case PCIE_LINK_SPEED_16: + *p = QEMU_PCI_EXP_LNK_16GT; + break; + default: + /* Unreachable */ + abort(); + } +} + +const PropertyInfo qdev_prop_pcie_link_speed = { + .name = "PCIELinkSpeed", + .description = "2_5/5/8/16", + .enum_table = &PCIELinkSpeed_lookup, + .get = get_prop_pcielinkspeed, + .set = set_prop_pcielinkspeed, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* --- PCIELinkWidth 1/2/4/8/12/16/32 -- */ + +static void get_prop_pcielinkwidth(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + PCIExpLinkWidth *p = object_field_prop_ptr(obj, prop); + int width; + + switch (*p) { + case QEMU_PCI_EXP_LNK_X1: + width = PCIE_LINK_WIDTH_1; + break; + case QEMU_PCI_EXP_LNK_X2: + width = PCIE_LINK_WIDTH_2; + break; + case QEMU_PCI_EXP_LNK_X4: + width = PCIE_LINK_WIDTH_4; + break; + case QEMU_PCI_EXP_LNK_X8: + width = PCIE_LINK_WIDTH_8; + break; + case QEMU_PCI_EXP_LNK_X12: + width = PCIE_LINK_WIDTH_12; + break; + case QEMU_PCI_EXP_LNK_X16: + width = PCIE_LINK_WIDTH_16; + break; + case QEMU_PCI_EXP_LNK_X32: + width = PCIE_LINK_WIDTH_32; + break; + default: + /* Unreachable */ + abort(); + } + + visit_type_enum(v, name, &width, prop->info->enum_table, errp); +} + +static void set_prop_pcielinkwidth(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + PCIExpLinkWidth *p = object_field_prop_ptr(obj, prop); + int width; + + if (!visit_type_enum(v, name, &width, prop->info->enum_table, + errp)) { + return; + } + + switch (width) { + case PCIE_LINK_WIDTH_1: + *p = QEMU_PCI_EXP_LNK_X1; + break; + case PCIE_LINK_WIDTH_2: + *p = QEMU_PCI_EXP_LNK_X2; + break; + case PCIE_LINK_WIDTH_4: + *p = QEMU_PCI_EXP_LNK_X4; + break; + case PCIE_LINK_WIDTH_8: + *p = QEMU_PCI_EXP_LNK_X8; + break; + case PCIE_LINK_WIDTH_12: + *p = QEMU_PCI_EXP_LNK_X12; + break; + case PCIE_LINK_WIDTH_16: + *p = QEMU_PCI_EXP_LNK_X16; + break; + case PCIE_LINK_WIDTH_32: + *p = QEMU_PCI_EXP_LNK_X32; + break; + default: + /* Unreachable */ + abort(); + } +} + +const PropertyInfo qdev_prop_pcie_link_width = { + .name = "PCIELinkWidth", + .description = "1/2/4/8/12/16/32", + .enum_table = &PCIELinkWidth_lookup, + .get = get_prop_pcielinkwidth, + .set = set_prop_pcielinkwidth, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* --- UUID --- */ + +static void get_uuid(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + QemuUUID *uuid = object_field_prop_ptr(obj, prop); + char buffer[UUID_FMT_LEN + 1]; + char *p = buffer; + + qemu_uuid_unparse(uuid, buffer); + + visit_type_str(v, name, &p, errp); +} + +#define UUID_VALUE_AUTO "auto" + +static void set_uuid(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + QemuUUID *uuid = object_field_prop_ptr(obj, prop); + char *str; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + if (!strcmp(str, UUID_VALUE_AUTO)) { + qemu_uuid_generate(uuid); + } else if (qemu_uuid_parse(str, uuid) < 0) { + error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str); + } + g_free(str); +} + +static void set_default_uuid_auto(ObjectProperty *op, const Property *prop) +{ + object_property_set_default_str(op, UUID_VALUE_AUTO); +} + +const PropertyInfo qdev_prop_uuid = { + .name = "str", + .description = "UUID (aka GUID) or \"" UUID_VALUE_AUTO + "\" for random value (default)", + .get = get_uuid, + .set = set_uuid, + .set_default_value = set_default_uuid_auto, +}; diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c new file mode 100644 index 000000000..c34aac6eb --- /dev/null +++ b/hw/core/qdev-properties.c @@ -0,0 +1,955 @@ +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qapi/qapi-types-misc.h" +#include "qapi/qmp/qerror.h" +#include "qemu/ctype.h" +#include "qemu/error-report.h" +#include "qapi/visitor.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qdev-prop-internal.h" + +void qdev_prop_set_after_realize(DeviceState *dev, const char *name, + Error **errp) +{ + if (dev->id) { + error_setg(errp, "Attempt to set property '%s' on device '%s' " + "(type '%s') after it was realized", name, dev->id, + object_get_typename(OBJECT(dev))); + } else { + error_setg(errp, "Attempt to set property '%s' on anonymous device " + "(type '%s') after it was realized", name, + object_get_typename(OBJECT(dev))); + } +} + +/* returns: true if property is allowed to be set, false otherwise */ +static bool qdev_prop_allow_set(Object *obj, const char *name, + const PropertyInfo *info, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + + if (dev->realized && !info->realized_set_allowed) { + qdev_prop_set_after_realize(dev, name, errp); + return false; + } + return true; +} + +void qdev_prop_allow_set_link_before_realize(const Object *obj, + const char *name, + Object *val, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + + if (dev->realized) { + error_setg(errp, "Attempt to set link property '%s' on device '%s' " + "(type '%s') after it was realized", + name, dev->id, object_get_typename(obj)); + } +} + +void *object_field_prop_ptr(Object *obj, Property *prop) +{ + void *ptr = obj; + ptr += prop->offset; + return ptr; +} + +static void field_prop_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + return prop->info->get(obj, v, name, opaque, errp); +} + +/** + * field_prop_getter: Return getter function to be used for property + * + * Return value can be NULL if @info has no getter function. + */ +static ObjectPropertyAccessor *field_prop_getter(const PropertyInfo *info) +{ + return info->get ? field_prop_get : NULL; +} + +static void field_prop_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + + if (!qdev_prop_allow_set(obj, name, prop->info, errp)) { + return; + } + + return prop->info->set(obj, v, name, opaque, errp); +} + +/** + * field_prop_setter: Return setter function to be used for property + * + * Return value can be NULL if @info has not setter function. + */ +static ObjectPropertyAccessor *field_prop_setter(const PropertyInfo *info) +{ + return info->set ? field_prop_set : NULL; +} + +void qdev_propinfo_get_enum(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + int *ptr = object_field_prop_ptr(obj, prop); + + visit_type_enum(v, name, ptr, prop->info->enum_table, errp); +} + +void qdev_propinfo_set_enum(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + int *ptr = object_field_prop_ptr(obj, prop); + + visit_type_enum(v, name, ptr, prop->info->enum_table, errp); +} + +void qdev_propinfo_set_default_value_enum(ObjectProperty *op, + const Property *prop) +{ + object_property_set_default_str(op, + qapi_enum_lookup(prop->info->enum_table, prop->defval.i)); +} + +const PropertyInfo qdev_prop_enum = { + .name = "enum", + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* Bit */ + +static uint32_t qdev_get_prop_mask(Property *prop) +{ + assert(prop->info == &qdev_prop_bit); + return 0x1 << prop->bitnr; +} + +static void bit_prop_set(Object *obj, Property *props, bool val) +{ + uint32_t *p = object_field_prop_ptr(obj, props); + uint32_t mask = qdev_get_prop_mask(props); + if (val) { + *p |= mask; + } else { + *p &= ~mask; + } +} + +static void prop_get_bit(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint32_t *p = object_field_prop_ptr(obj, prop); + bool value = (*p & qdev_get_prop_mask(prop)) != 0; + + visit_type_bool(v, name, &value, errp); +} + +static void prop_set_bit(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + bool value; + + if (!visit_type_bool(v, name, &value, errp)) { + return; + } + bit_prop_set(obj, prop, value); +} + +static void set_default_value_bool(ObjectProperty *op, const Property *prop) +{ + object_property_set_default_bool(op, prop->defval.u); +} + +const PropertyInfo qdev_prop_bit = { + .name = "bool", + .description = "on/off", + .get = prop_get_bit, + .set = prop_set_bit, + .set_default_value = set_default_value_bool, +}; + +/* Bit64 */ + +static uint64_t qdev_get_prop_mask64(Property *prop) +{ + assert(prop->info == &qdev_prop_bit64); + return 0x1ull << prop->bitnr; +} + +static void bit64_prop_set(Object *obj, Property *props, bool val) +{ + uint64_t *p = object_field_prop_ptr(obj, props); + uint64_t mask = qdev_get_prop_mask64(props); + if (val) { + *p |= mask; + } else { + *p &= ~mask; + } +} + +static void prop_get_bit64(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint64_t *p = object_field_prop_ptr(obj, prop); + bool value = (*p & qdev_get_prop_mask64(prop)) != 0; + + visit_type_bool(v, name, &value, errp); +} + +static void prop_set_bit64(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + bool value; + + if (!visit_type_bool(v, name, &value, errp)) { + return; + } + bit64_prop_set(obj, prop, value); +} + +const PropertyInfo qdev_prop_bit64 = { + .name = "bool", + .description = "on/off", + .get = prop_get_bit64, + .set = prop_set_bit64, + .set_default_value = set_default_value_bool, +}; + +/* --- bool --- */ + +static void get_bool(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + bool *ptr = object_field_prop_ptr(obj, prop); + + visit_type_bool(v, name, ptr, errp); +} + +static void set_bool(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + bool *ptr = object_field_prop_ptr(obj, prop); + + visit_type_bool(v, name, ptr, errp); +} + +const PropertyInfo qdev_prop_bool = { + .name = "bool", + .get = get_bool, + .set = set_bool, + .set_default_value = set_default_value_bool, +}; + +/* --- 8bit integer --- */ + +static void get_uint8(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + uint8_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint8(v, name, ptr, errp); +} + +static void set_uint8(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + uint8_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint8(v, name, ptr, errp); +} + +void qdev_propinfo_set_default_value_int(ObjectProperty *op, + const Property *prop) +{ + object_property_set_default_int(op, prop->defval.i); +} + +void qdev_propinfo_set_default_value_uint(ObjectProperty *op, + const Property *prop) +{ + object_property_set_default_uint(op, prop->defval.u); +} + +const PropertyInfo qdev_prop_uint8 = { + .name = "uint8", + .get = get_uint8, + .set = set_uint8, + .set_default_value = qdev_propinfo_set_default_value_uint, +}; + +/* --- 16bit integer --- */ + +static void get_uint16(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint16_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint16(v, name, ptr, errp); +} + +static void set_uint16(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint16_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint16(v, name, ptr, errp); +} + +const PropertyInfo qdev_prop_uint16 = { + .name = "uint16", + .get = get_uint16, + .set = set_uint16, + .set_default_value = qdev_propinfo_set_default_value_uint, +}; + +/* --- 32bit integer --- */ + +static void get_uint32(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint32_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint32(v, name, ptr, errp); +} + +static void set_uint32(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint32_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint32(v, name, ptr, errp); +} + +void qdev_propinfo_get_int32(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + int32_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_int32(v, name, ptr, errp); +} + +static void set_int32(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + int32_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_int32(v, name, ptr, errp); +} + +const PropertyInfo qdev_prop_uint32 = { + .name = "uint32", + .get = get_uint32, + .set = set_uint32, + .set_default_value = qdev_propinfo_set_default_value_uint, +}; + +const PropertyInfo qdev_prop_int32 = { + .name = "int32", + .get = qdev_propinfo_get_int32, + .set = set_int32, + .set_default_value = qdev_propinfo_set_default_value_int, +}; + +/* --- 64bit integer --- */ + +static void get_uint64(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint64_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint64(v, name, ptr, errp); +} + +static void set_uint64(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint64_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint64(v, name, ptr, errp); +} + +static void get_int64(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + int64_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_int64(v, name, ptr, errp); +} + +static void set_int64(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + int64_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_int64(v, name, ptr, errp); +} + +const PropertyInfo qdev_prop_uint64 = { + .name = "uint64", + .get = get_uint64, + .set = set_uint64, + .set_default_value = qdev_propinfo_set_default_value_uint, +}; + +const PropertyInfo qdev_prop_int64 = { + .name = "int64", + .get = get_int64, + .set = set_int64, + .set_default_value = qdev_propinfo_set_default_value_int, +}; + +/* --- string --- */ + +static void release_string(Object *obj, const char *name, void *opaque) +{ + Property *prop = opaque; + g_free(*(char **)object_field_prop_ptr(obj, prop)); +} + +static void get_string(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + char **ptr = object_field_prop_ptr(obj, prop); + + if (!*ptr) { + char *str = (char *)""; + visit_type_str(v, name, &str, errp); + } else { + visit_type_str(v, name, ptr, errp); + } +} + +static void set_string(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + char **ptr = object_field_prop_ptr(obj, prop); + char *str; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + g_free(*ptr); + *ptr = str; +} + +const PropertyInfo qdev_prop_string = { + .name = "str", + .release = release_string, + .get = get_string, + .set = set_string, +}; + +/* --- on/off/auto --- */ + +const PropertyInfo qdev_prop_on_off_auto = { + .name = "OnOffAuto", + .description = "on/off/auto", + .enum_table = &OnOffAuto_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; + +/* --- 32bit unsigned int 'size' type --- */ + +void qdev_propinfo_get_size32(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint32_t *ptr = object_field_prop_ptr(obj, prop); + uint64_t value = *ptr; + + visit_type_size(v, name, &value, errp); +} + +static void set_size32(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + uint32_t *ptr = object_field_prop_ptr(obj, prop); + uint64_t value; + + if (!visit_type_size(v, name, &value, errp)) { + return; + } + + if (value > UINT32_MAX) { + error_setg(errp, + "Property %s.%s doesn't take value %" PRIu64 + " (maximum: %u)", + object_get_typename(obj), name, value, UINT32_MAX); + return; + } + + *ptr = value; +} + +const PropertyInfo qdev_prop_size32 = { + .name = "size", + .get = qdev_propinfo_get_size32, + .set = set_size32, + .set_default_value = qdev_propinfo_set_default_value_uint, +}; + +/* --- support for array properties --- */ + +/* Used as an opaque for the object properties we add for each + * array element. Note that the struct Property must be first + * in the struct so that a pointer to this works as the opaque + * for the underlying element's property hooks as well as for + * our own release callback. + */ +typedef struct { + struct Property prop; + char *propname; + ObjectPropertyRelease *release; +} ArrayElementProperty; + +/* object property release callback for array element properties: + * we call the underlying element's property release hook, and + * then free the memory we allocated when we added the property. + */ +static void array_element_release(Object *obj, const char *name, void *opaque) +{ + ArrayElementProperty *p = opaque; + if (p->release) { + p->release(obj, name, opaque); + } + g_free(p->propname); + g_free(p); +} + +static void set_prop_arraylen(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + /* Setter for the property which defines the length of a + * variable-sized property array. As well as actually setting the + * array-length field in the device struct, we have to create the + * array itself and dynamically add the corresponding properties. + */ + Property *prop = opaque; + uint32_t *alenptr = object_field_prop_ptr(obj, prop); + void **arrayptr = (void *)obj + prop->arrayoffset; + void *eltptr; + const char *arrayname; + int i; + + if (*alenptr) { + error_setg(errp, "array size property %s may not be set more than once", + name); + return; + } + if (!visit_type_uint32(v, name, alenptr, errp)) { + return; + } + if (!*alenptr) { + return; + } + + /* DEFINE_PROP_ARRAY guarantees that name should start with this prefix; + * strip it off so we can get the name of the array itself. + */ + assert(strncmp(name, PROP_ARRAY_LEN_PREFIX, + strlen(PROP_ARRAY_LEN_PREFIX)) == 0); + arrayname = name + strlen(PROP_ARRAY_LEN_PREFIX); + + /* Note that it is the responsibility of the individual device's deinit + * to free the array proper. + */ + *arrayptr = eltptr = g_malloc0(*alenptr * prop->arrayfieldsize); + for (i = 0; i < *alenptr; i++, eltptr += prop->arrayfieldsize) { + char *propname = g_strdup_printf("%s[%d]", arrayname, i); + ArrayElementProperty *arrayprop = g_new0(ArrayElementProperty, 1); + arrayprop->release = prop->arrayinfo->release; + arrayprop->propname = propname; + arrayprop->prop.info = prop->arrayinfo; + arrayprop->prop.name = propname; + /* This ugly piece of pointer arithmetic sets up the offset so + * that when the underlying get/set hooks call qdev_get_prop_ptr + * they get the right answer despite the array element not actually + * being inside the device struct. + */ + arrayprop->prop.offset = eltptr - (void *)obj; + assert(object_field_prop_ptr(obj, &arrayprop->prop) == eltptr); + object_property_add(obj, propname, + arrayprop->prop.info->name, + field_prop_getter(arrayprop->prop.info), + field_prop_setter(arrayprop->prop.info), + array_element_release, + arrayprop); + } +} + +const PropertyInfo qdev_prop_arraylen = { + .name = "uint32", + .get = get_uint32, + .set = set_prop_arraylen, + .set_default_value = qdev_propinfo_set_default_value_uint, +}; + +/* --- public helpers --- */ + +static Property *qdev_prop_walk(Property *props, const char *name) +{ + if (!props) { + return NULL; + } + while (props->name) { + if (strcmp(props->name, name) == 0) { + return props; + } + props++; + } + return NULL; +} + +static Property *qdev_prop_find(DeviceState *dev, const char *name) +{ + ObjectClass *class; + Property *prop; + + /* device properties */ + class = object_get_class(OBJECT(dev)); + do { + prop = qdev_prop_walk(DEVICE_CLASS(class)->props_, name); + if (prop) { + return prop; + } + class = object_class_get_parent(class); + } while (class != object_class_by_name(TYPE_DEVICE)); + + return NULL; +} + +void error_set_from_qdev_prop_error(Error **errp, int ret, Object *obj, + const char *name, const char *value) +{ + switch (ret) { + case -EEXIST: + error_setg(errp, "Property '%s.%s' can't take value '%s', it's in use", + object_get_typename(obj), name, value); + break; + default: + case -EINVAL: + error_setg(errp, QERR_PROPERTY_VALUE_BAD, + object_get_typename(obj), name, value); + break; + case -ENOENT: + error_setg(errp, "Property '%s.%s' can't find value '%s'", + object_get_typename(obj), name, value); + break; + case 0: + break; + } +} + +void qdev_prop_set_bit(DeviceState *dev, const char *name, bool value) +{ + object_property_set_bool(OBJECT(dev), name, value, &error_abort); +} + +void qdev_prop_set_uint8(DeviceState *dev, const char *name, uint8_t value) +{ + object_property_set_int(OBJECT(dev), name, value, &error_abort); +} + +void qdev_prop_set_uint16(DeviceState *dev, const char *name, uint16_t value) +{ + object_property_set_int(OBJECT(dev), name, value, &error_abort); +} + +void qdev_prop_set_uint32(DeviceState *dev, const char *name, uint32_t value) +{ + object_property_set_int(OBJECT(dev), name, value, &error_abort); +} + +void qdev_prop_set_int32(DeviceState *dev, const char *name, int32_t value) +{ + object_property_set_int(OBJECT(dev), name, value, &error_abort); +} + +void qdev_prop_set_uint64(DeviceState *dev, const char *name, uint64_t value) +{ + object_property_set_int(OBJECT(dev), name, value, &error_abort); +} + +void qdev_prop_set_string(DeviceState *dev, const char *name, const char *value) +{ + object_property_set_str(OBJECT(dev), name, value, &error_abort); +} + +void qdev_prop_set_enum(DeviceState *dev, const char *name, int value) +{ + Property *prop; + + prop = qdev_prop_find(dev, name); + object_property_set_str(OBJECT(dev), name, + qapi_enum_lookup(prop->info->enum_table, value), + &error_abort); +} + +static GPtrArray *global_props(void) +{ + static GPtrArray *gp; + + if (!gp) { + gp = g_ptr_array_new(); + } + + return gp; +} + +void qdev_prop_register_global(GlobalProperty *prop) +{ + g_ptr_array_add(global_props(), prop); +} + +const GlobalProperty *qdev_find_global_prop(Object *obj, + const char *name) +{ + GPtrArray *props = global_props(); + const GlobalProperty *p; + int i; + + for (i = 0; i < props->len; i++) { + p = g_ptr_array_index(props, i); + if (object_dynamic_cast(obj, p->driver) + && !strcmp(p->property, name)) { + return p; + } + } + return NULL; +} + +int qdev_prop_check_globals(void) +{ + int i, ret = 0; + + for (i = 0; i < global_props()->len; i++) { + GlobalProperty *prop; + ObjectClass *oc; + DeviceClass *dc; + + prop = g_ptr_array_index(global_props(), i); + if (prop->used) { + continue; + } + oc = object_class_by_name(prop->driver); + oc = object_class_dynamic_cast(oc, TYPE_DEVICE); + if (!oc) { + warn_report("global %s.%s has invalid class name", + prop->driver, prop->property); + ret = 1; + continue; + } + dc = DEVICE_CLASS(oc); + if (!dc->hotpluggable && !prop->used) { + warn_report("global %s.%s=%s not used", + prop->driver, prop->property, prop->value); + ret = 1; + continue; + } + } + return ret; +} + +void qdev_prop_set_globals(DeviceState *dev) +{ + object_apply_global_props(OBJECT(dev), global_props(), + dev->hotplugged ? NULL : &error_fatal); +} + +/* --- 64bit unsigned int 'size' type --- */ + +static void get_size(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + uint64_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_size(v, name, ptr, errp); +} + +static void set_size(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + uint64_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_size(v, name, ptr, errp); +} + +const PropertyInfo qdev_prop_size = { + .name = "size", + .get = get_size, + .set = set_size, + .set_default_value = qdev_propinfo_set_default_value_uint, +}; + +/* --- object link property --- */ + +static ObjectProperty *create_link_property(ObjectClass *oc, const char *name, + Property *prop) +{ + return object_class_property_add_link(oc, name, prop->link_type, + prop->offset, + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); +} + +const PropertyInfo qdev_prop_link = { + .name = "link", + .create = create_link_property, +}; + +void qdev_property_add_static(DeviceState *dev, Property *prop) +{ + Object *obj = OBJECT(dev); + ObjectProperty *op; + + assert(!prop->info->create); + + op = object_property_add(obj, prop->name, prop->info->name, + field_prop_getter(prop->info), + field_prop_setter(prop->info), + prop->info->release, + prop); + + object_property_set_description(obj, prop->name, + prop->info->description); + + if (prop->set_default) { + prop->info->set_default_value(op, prop); + if (op->init) { + op->init(obj, op); + } + } +} + +static void qdev_class_add_property(DeviceClass *klass, const char *name, + Property *prop) +{ + ObjectClass *oc = OBJECT_CLASS(klass); + ObjectProperty *op; + + if (prop->info->create) { + op = prop->info->create(oc, name, prop); + } else { + op = object_class_property_add(oc, + name, prop->info->name, + field_prop_getter(prop->info), + field_prop_setter(prop->info), + prop->info->release, + prop); + } + if (prop->set_default) { + prop->info->set_default_value(op, prop); + } + object_class_property_set_description(oc, name, prop->info->description); +} + +/** + * Legacy property handling + */ + +static void qdev_get_legacy_property(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + + char buffer[1024]; + char *ptr = buffer; + + prop->info->print(obj, prop, buffer, sizeof(buffer)); + visit_type_str(v, name, &ptr, errp); +} + +/** + * qdev_class_add_legacy_property: + * @dev: Device to add the property to. + * @prop: The qdev property definition. + * + * Add a legacy QOM property to @dev for qdev property @prop. + * + * Legacy properties are string versions of QOM properties. The format of + * the string depends on the property type. Legacy properties are only + * needed for "info qtree". + * + * Do not use this in new code! QOM Properties added through this interface + * will be given names in the "legacy" namespace. + */ +static void qdev_class_add_legacy_property(DeviceClass *dc, Property *prop) +{ + g_autofree char *name = NULL; + + /* Register pointer properties as legacy properties */ + if (!prop->info->print && prop->info->get) { + return; + } + + name = g_strdup_printf("legacy-%s", prop->name); + object_class_property_add(OBJECT_CLASS(dc), name, "str", + prop->info->print ? qdev_get_legacy_property : prop->info->get, + NULL, NULL, prop); +} + +void device_class_set_props(DeviceClass *dc, Property *props) +{ + Property *prop; + + dc->props_ = props; + for (prop = props; prop && prop->name; prop++) { + qdev_class_add_legacy_property(dc, prop); + qdev_class_add_property(dc, prop->name, prop); + } +} + +void qdev_alias_all_properties(DeviceState *target, Object *source) +{ + ObjectClass *class; + Property *prop; + + class = object_get_class(OBJECT(target)); + do { + DeviceClass *dc = DEVICE_CLASS(class); + + for (prop = dc->props_; prop && prop->name; prop++) { + object_property_add_alias(source, prop->name, + OBJECT(target), prop->name); + } + class = object_class_get_parent(class); + } while (class != object_class_by_name(TYPE_DEVICE)); +} diff --git a/hw/core/qdev.c b/hw/core/qdev.c new file mode 100644 index 000000000..84f301944 --- /dev/null +++ b/hw/core/qdev.c @@ -0,0 +1,947 @@ +/* + * Dynamic device configuration and creation. + * + * Copyright (c) 2009 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* The theory here is that it should be possible to create a machine without + knowledge of specific devices. Historically board init routines have + passed a bunch of arguments to each device, requiring the board know + exactly which device it is dealing with. This file provides an abstract + API for device configuration and initialization. Devices will generally + inherit from a particular bus (e.g. PCI or I2C) rather than + this API directly. */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-events-qdev.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qerror.h" +#include "qapi/visitor.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/boards.h" +#include "hw/sysbus.h" +#include "hw/qdev-clock.h" +#include "migration/vmstate.h" +#include "trace.h" + +static bool qdev_hot_added = false; +bool qdev_hot_removed = false; + +const VMStateDescription *qdev_get_vmsd(DeviceState *dev) +{ + DeviceClass *dc = DEVICE_GET_CLASS(dev); + return dc->vmsd; +} + +static void bus_free_bus_child(BusChild *kid) +{ + object_unref(OBJECT(kid->child)); + g_free(kid); +} + +static void bus_remove_child(BusState *bus, DeviceState *child) +{ + BusChild *kid; + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + if (kid->child == child) { + char name[32]; + + snprintf(name, sizeof(name), "child[%d]", kid->index); + QTAILQ_REMOVE_RCU(&bus->children, kid, sibling); + + bus->num_children--; + + /* This gives back ownership of kid->child back to us. */ + object_property_del(OBJECT(bus), name); + + /* free the bus kid, when it is safe to do so*/ + call_rcu(kid, bus_free_bus_child, rcu); + break; + } + } +} + +static void bus_add_child(BusState *bus, DeviceState *child) +{ + char name[32]; + BusChild *kid = g_malloc0(sizeof(*kid)); + + bus->num_children++; + kid->index = bus->max_index++; + kid->child = child; + object_ref(OBJECT(kid->child)); + + QTAILQ_INSERT_HEAD_RCU(&bus->children, kid, sibling); + + /* This transfers ownership of kid->child to the property. */ + snprintf(name, sizeof(name), "child[%d]", kid->index); + object_property_add_link(OBJECT(bus), name, + object_get_typename(OBJECT(child)), + (Object **)&kid->child, + NULL, /* read-only property */ + 0); +} + +static bool bus_check_address(BusState *bus, DeviceState *child, Error **errp) +{ + BusClass *bc = BUS_GET_CLASS(bus); + return !bc->check_address || bc->check_address(bus, child, errp); +} + +bool qdev_set_parent_bus(DeviceState *dev, BusState *bus, Error **errp) +{ + BusState *old_parent_bus = dev->parent_bus; + DeviceClass *dc = DEVICE_GET_CLASS(dev); + + assert(dc->bus_type && object_dynamic_cast(OBJECT(bus), dc->bus_type)); + + if (!bus_check_address(bus, dev, errp)) { + return false; + } + + if (old_parent_bus) { + trace_qdev_update_parent_bus(dev, object_get_typename(OBJECT(dev)), + old_parent_bus, object_get_typename(OBJECT(old_parent_bus)), + OBJECT(bus), object_get_typename(OBJECT(bus))); + /* + * Keep a reference to the device while it's not plugged into + * any bus, to avoid it potentially evaporating when it is + * dereffed in bus_remove_child(). + * Also keep the ref of the parent bus until the end, so that + * we can safely call resettable_change_parent() below. + */ + object_ref(OBJECT(dev)); + bus_remove_child(dev->parent_bus, dev); + } + dev->parent_bus = bus; + object_ref(OBJECT(bus)); + bus_add_child(bus, dev); + if (dev->realized) { + resettable_change_parent(OBJECT(dev), OBJECT(bus), + OBJECT(old_parent_bus)); + } + if (old_parent_bus) { + object_unref(OBJECT(old_parent_bus)); + object_unref(OBJECT(dev)); + } + return true; +} + +DeviceState *qdev_new(const char *name) +{ + if (!object_class_by_name(name)) { + module_load_qom_one(name); + } + return DEVICE(object_new(name)); +} + +DeviceState *qdev_try_new(const char *name) +{ + if (!module_object_class_by_name(name)) { + return NULL; + } + return DEVICE(object_new(name)); +} + +static QTAILQ_HEAD(, DeviceListener) device_listeners + = QTAILQ_HEAD_INITIALIZER(device_listeners); + +enum ListenerDirection { Forward, Reverse }; + +#define DEVICE_LISTENER_CALL(_callback, _direction, _args...) \ + do { \ + DeviceListener *_listener; \ + \ + switch (_direction) { \ + case Forward: \ + QTAILQ_FOREACH(_listener, &device_listeners, link) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener, ##_args); \ + } \ + } \ + break; \ + case Reverse: \ + QTAILQ_FOREACH_REVERSE(_listener, &device_listeners, \ + link) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener, ##_args); \ + } \ + } \ + break; \ + default: \ + abort(); \ + } \ + } while (0) + +static int device_listener_add(DeviceState *dev, void *opaque) +{ + DEVICE_LISTENER_CALL(realize, Forward, dev); + + return 0; +} + +void device_listener_register(DeviceListener *listener) +{ + QTAILQ_INSERT_TAIL(&device_listeners, listener, link); + + qbus_walk_children(sysbus_get_default(), NULL, NULL, device_listener_add, + NULL, NULL); +} + +void device_listener_unregister(DeviceListener *listener) +{ + QTAILQ_REMOVE(&device_listeners, listener, link); +} + +bool qdev_should_hide_device(const QDict *opts, bool from_json, Error **errp) +{ + ERRP_GUARD(); + DeviceListener *listener; + + QTAILQ_FOREACH(listener, &device_listeners, link) { + if (listener->hide_device) { + if (listener->hide_device(listener, opts, from_json, errp)) { + return true; + } else if (*errp) { + return false; + } + } + } + + return false; +} + +void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id, + int required_for_version) +{ + assert(!dev->realized); + dev->instance_id_alias = alias_id; + dev->alias_required_for_version = required_for_version; +} + +static int qdev_prereset(DeviceState *dev, void *opaque) +{ + trace_qdev_reset_tree(dev, object_get_typename(OBJECT(dev))); + return 0; +} + +static int qbus_prereset(BusState *bus, void *opaque) +{ + trace_qbus_reset_tree(bus, object_get_typename(OBJECT(bus))); + return 0; +} + +static int qdev_reset_one(DeviceState *dev, void *opaque) +{ + device_legacy_reset(dev); + + return 0; +} + +static int qbus_reset_one(BusState *bus, void *opaque) +{ + BusClass *bc = BUS_GET_CLASS(bus); + trace_qbus_reset(bus, object_get_typename(OBJECT(bus))); + if (bc->reset) { + bc->reset(bus); + } + return 0; +} + +void qdev_reset_all(DeviceState *dev) +{ + trace_qdev_reset_all(dev, object_get_typename(OBJECT(dev))); + qdev_walk_children(dev, qdev_prereset, qbus_prereset, + qdev_reset_one, qbus_reset_one, NULL); +} + +void qdev_reset_all_fn(void *opaque) +{ + qdev_reset_all(DEVICE(opaque)); +} + +void qbus_reset_all(BusState *bus) +{ + trace_qbus_reset_all(bus, object_get_typename(OBJECT(bus))); + qbus_walk_children(bus, qdev_prereset, qbus_prereset, + qdev_reset_one, qbus_reset_one, NULL); +} + +void qbus_reset_all_fn(void *opaque) +{ + BusState *bus = opaque; + qbus_reset_all(bus); +} + +void device_cold_reset(DeviceState *dev) +{ + resettable_reset(OBJECT(dev), RESET_TYPE_COLD); +} + +bool device_is_in_reset(DeviceState *dev) +{ + return resettable_is_in_reset(OBJECT(dev)); +} + +static ResettableState *device_get_reset_state(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + return &dev->reset; +} + +static void device_reset_child_foreach(Object *obj, ResettableChildCallback cb, + void *opaque, ResetType type) +{ + DeviceState *dev = DEVICE(obj); + BusState *bus; + + QLIST_FOREACH(bus, &dev->child_bus, sibling) { + cb(OBJECT(bus), opaque, type); + } +} + +bool qdev_realize(DeviceState *dev, BusState *bus, Error **errp) +{ + assert(!dev->realized && !dev->parent_bus); + + if (bus) { + if (!qdev_set_parent_bus(dev, bus, errp)) { + return false; + } + } else { + assert(!DEVICE_GET_CLASS(dev)->bus_type); + } + + return object_property_set_bool(OBJECT(dev), "realized", true, errp); +} + +bool qdev_realize_and_unref(DeviceState *dev, BusState *bus, Error **errp) +{ + bool ret; + + ret = qdev_realize(dev, bus, errp); + object_unref(OBJECT(dev)); + return ret; +} + +void qdev_unrealize(DeviceState *dev) +{ + object_property_set_bool(OBJECT(dev), "realized", false, &error_abort); +} + +static int qdev_assert_realized_properly_cb(Object *obj, void *opaque) +{ + DeviceState *dev = DEVICE(object_dynamic_cast(obj, TYPE_DEVICE)); + DeviceClass *dc; + + if (dev) { + dc = DEVICE_GET_CLASS(dev); + assert(dev->realized); + assert(dev->parent_bus || !dc->bus_type); + } + return 0; +} + +void qdev_assert_realized_properly(void) +{ + object_child_foreach_recursive(object_get_root(), + qdev_assert_realized_properly_cb, NULL); +} + +bool qdev_machine_modified(void) +{ + return qdev_hot_added || qdev_hot_removed; +} + +BusState *qdev_get_parent_bus(DeviceState *dev) +{ + return dev->parent_bus; +} + +BusState *qdev_get_child_bus(DeviceState *dev, const char *name) +{ + BusState *bus; + Object *child = object_resolve_path_component(OBJECT(dev), name); + + bus = (BusState *)object_dynamic_cast(child, TYPE_BUS); + if (bus) { + return bus; + } + + QLIST_FOREACH(bus, &dev->child_bus, sibling) { + if (strcmp(name, bus->name) == 0) { + return bus; + } + } + return NULL; +} + +int qdev_walk_children(DeviceState *dev, + qdev_walkerfn *pre_devfn, qbus_walkerfn *pre_busfn, + qdev_walkerfn *post_devfn, qbus_walkerfn *post_busfn, + void *opaque) +{ + BusState *bus; + int err; + + if (pre_devfn) { + err = pre_devfn(dev, opaque); + if (err) { + return err; + } + } + + QLIST_FOREACH(bus, &dev->child_bus, sibling) { + err = qbus_walk_children(bus, pre_devfn, pre_busfn, + post_devfn, post_busfn, opaque); + if (err < 0) { + return err; + } + } + + if (post_devfn) { + err = post_devfn(dev, opaque); + if (err) { + return err; + } + } + + return 0; +} + +DeviceState *qdev_find_recursive(BusState *bus, const char *id) +{ + BusChild *kid; + DeviceState *ret; + BusState *child; + + WITH_RCU_READ_LOCK_GUARD() { + QTAILQ_FOREACH_RCU(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + if (dev->id && strcmp(dev->id, id) == 0) { + return dev; + } + + QLIST_FOREACH(child, &dev->child_bus, sibling) { + ret = qdev_find_recursive(child, id); + if (ret) { + return ret; + } + } + } + } + return NULL; +} + +char *qdev_get_dev_path(DeviceState *dev) +{ + BusClass *bc; + + if (!dev || !dev->parent_bus) { + return NULL; + } + + bc = BUS_GET_CLASS(dev->parent_bus); + if (bc->get_dev_path) { + return bc->get_dev_path(dev); + } + + return NULL; +} + +static bool device_get_realized(Object *obj, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + return dev->realized; +} + +static bool check_only_migratable(Object *obj, Error **errp) +{ + DeviceClass *dc = DEVICE_GET_CLASS(obj); + + if (!vmstate_check_only_migratable(dc->vmsd)) { + error_setg(errp, "Device %s is not migratable, but " + "--only-migratable was specified", + object_get_typename(obj)); + return false; + } + + return true; +} + +static void device_set_realized(Object *obj, bool value, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + DeviceClass *dc = DEVICE_GET_CLASS(dev); + HotplugHandler *hotplug_ctrl; + BusState *bus; + NamedClockList *ncl; + Error *local_err = NULL; + bool unattached_parent = false; + static int unattached_count; + + if (dev->hotplugged && !dc->hotpluggable) { + error_setg(errp, QERR_DEVICE_NO_HOTPLUG, object_get_typename(obj)); + return; + } + + if (value && !dev->realized) { + if (!check_only_migratable(obj, errp)) { + goto fail; + } + + if (!obj->parent) { + gchar *name = g_strdup_printf("device[%d]", unattached_count++); + + object_property_add_child(container_get(qdev_get_machine(), + "/unattached"), + name, obj); + unattached_parent = true; + g_free(name); + } + + hotplug_ctrl = qdev_get_hotplug_handler(dev); + if (hotplug_ctrl) { + hotplug_handler_pre_plug(hotplug_ctrl, dev, &local_err); + if (local_err != NULL) { + goto fail; + } + } + + if (dc->realize) { + dc->realize(dev, &local_err); + if (local_err != NULL) { + goto fail; + } + } + + DEVICE_LISTENER_CALL(realize, Forward, dev); + + /* + * always free/re-initialize here since the value cannot be cleaned up + * in device_unrealize due to its usage later on in the unplug path + */ + g_free(dev->canonical_path); + dev->canonical_path = object_get_canonical_path(OBJECT(dev)); + QLIST_FOREACH(ncl, &dev->clocks, node) { + if (ncl->alias) { + continue; + } else { + clock_setup_canonical_path(ncl->clock); + } + } + + if (qdev_get_vmsd(dev)) { + if (vmstate_register_with_alias_id(VMSTATE_IF(dev), + VMSTATE_INSTANCE_ID_ANY, + qdev_get_vmsd(dev), dev, + dev->instance_id_alias, + dev->alias_required_for_version, + &local_err) < 0) { + goto post_realize_fail; + } + } + + /* + * Clear the reset state, in case the object was previously unrealized + * with a dirty state. + */ + resettable_state_clear(&dev->reset); + + QLIST_FOREACH(bus, &dev->child_bus, sibling) { + if (!qbus_realize(bus, errp)) { + goto child_realize_fail; + } + } + if (dev->hotplugged) { + /* + * Reset the device, as well as its subtree which, at this point, + * should be realized too. + */ + resettable_assert_reset(OBJECT(dev), RESET_TYPE_COLD); + resettable_change_parent(OBJECT(dev), OBJECT(dev->parent_bus), + NULL); + resettable_release_reset(OBJECT(dev), RESET_TYPE_COLD); + } + dev->pending_deleted_event = false; + + if (hotplug_ctrl) { + hotplug_handler_plug(hotplug_ctrl, dev, &local_err); + if (local_err != NULL) { + goto child_realize_fail; + } + } + + qatomic_store_release(&dev->realized, value); + + } else if (!value && dev->realized) { + + /* + * Change the value so that any concurrent users are aware + * that the device is going to be unrealized + * + * TODO: change .realized property to enum that states + * each phase of the device realization/unrealization + */ + + qatomic_set(&dev->realized, value); + /* + * Ensure that concurrent users see this update prior to + * any other changes done by unrealize. + */ + smp_wmb(); + + QLIST_FOREACH(bus, &dev->child_bus, sibling) { + qbus_unrealize(bus); + } + if (qdev_get_vmsd(dev)) { + vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev); + } + if (dc->unrealize) { + dc->unrealize(dev); + } + dev->pending_deleted_event = true; + DEVICE_LISTENER_CALL(unrealize, Reverse, dev); + } + + assert(local_err == NULL); + return; + +child_realize_fail: + QLIST_FOREACH(bus, &dev->child_bus, sibling) { + qbus_unrealize(bus); + } + + if (qdev_get_vmsd(dev)) { + vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev); + } + +post_realize_fail: + g_free(dev->canonical_path); + dev->canonical_path = NULL; + if (dc->unrealize) { + dc->unrealize(dev); + } + +fail: + error_propagate(errp, local_err); + if (unattached_parent) { + /* + * Beware, this doesn't just revert + * object_property_add_child(), it also runs bus_remove()! + */ + object_unparent(OBJECT(dev)); + unattached_count--; + } +} + +static bool device_get_hotpluggable(Object *obj, Error **errp) +{ + DeviceClass *dc = DEVICE_GET_CLASS(obj); + DeviceState *dev = DEVICE(obj); + + return dc->hotpluggable && (dev->parent_bus == NULL || + qbus_is_hotpluggable(dev->parent_bus)); +} + +static bool device_get_hotplugged(Object *obj, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + + return dev->hotplugged; +} + +static void device_initfn(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + + if (phase_check(PHASE_MACHINE_READY)) { + dev->hotplugged = 1; + qdev_hot_added = true; + } + + dev->instance_id_alias = -1; + dev->realized = false; + dev->allow_unplug_during_migration = false; + + QLIST_INIT(&dev->gpios); + QLIST_INIT(&dev->clocks); +} + +static void device_post_init(Object *obj) +{ + /* + * Note: ordered so that the user's global properties take + * precedence. + */ + object_apply_compat_props(obj); + qdev_prop_set_globals(DEVICE(obj)); +} + +/* Unlink device from bus and free the structure. */ +static void device_finalize(Object *obj) +{ + NamedGPIOList *ngl, *next; + + DeviceState *dev = DEVICE(obj); + + QLIST_FOREACH_SAFE(ngl, &dev->gpios, node, next) { + QLIST_REMOVE(ngl, node); + qemu_free_irqs(ngl->in, ngl->num_in); + g_free(ngl->name); + g_free(ngl); + /* ngl->out irqs are owned by the other end and should not be freed + * here + */ + } + + qdev_finalize_clocklist(dev); + + /* Only send event if the device had been completely realized */ + if (dev->pending_deleted_event) { + g_assert(dev->canonical_path); + + qapi_event_send_device_deleted(!!dev->id, dev->id, dev->canonical_path); + g_free(dev->canonical_path); + dev->canonical_path = NULL; + } + + qobject_unref(dev->opts); + g_free(dev->id); +} + +static void device_class_base_init(ObjectClass *class, void *data) +{ + DeviceClass *klass = DEVICE_CLASS(class); + + /* We explicitly look up properties in the superclasses, + * so do not propagate them to the subclasses. + */ + klass->props_ = NULL; +} + +static void device_unparent(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + BusState *bus; + + if (dev->realized) { + qdev_unrealize(dev); + } + while (dev->num_child_bus) { + bus = QLIST_FIRST(&dev->child_bus); + object_unparent(OBJECT(bus)); + } + if (dev->parent_bus) { + bus_remove_child(dev->parent_bus, dev); + object_unref(OBJECT(dev->parent_bus)); + dev->parent_bus = NULL; + } +} + +static char * +device_vmstate_if_get_id(VMStateIf *obj) +{ + DeviceState *dev = DEVICE(obj); + + return qdev_get_dev_path(dev); +} + +/** + * device_phases_reset: + * Transition reset method for devices to allow moving + * smoothly from legacy reset method to multi-phases + */ +static void device_phases_reset(DeviceState *dev) +{ + ResettableClass *rc = RESETTABLE_GET_CLASS(dev); + + if (rc->phases.enter) { + rc->phases.enter(OBJECT(dev), RESET_TYPE_COLD); + } + if (rc->phases.hold) { + rc->phases.hold(OBJECT(dev)); + } + if (rc->phases.exit) { + rc->phases.exit(OBJECT(dev)); + } +} + +static void device_transitional_reset(Object *obj) +{ + DeviceClass *dc = DEVICE_GET_CLASS(obj); + + /* + * This will call either @device_phases_reset (for multi-phases transitioned + * devices) or a device's specific method for not-yet transitioned devices. + * In both case, it does not reset children. + */ + if (dc->reset) { + dc->reset(DEVICE(obj)); + } +} + +/** + * device_get_transitional_reset: + * check if the device's class is ready for multi-phase + */ +static ResettableTrFunction device_get_transitional_reset(Object *obj) +{ + DeviceClass *dc = DEVICE_GET_CLASS(obj); + if (dc->reset != device_phases_reset) { + /* + * dc->reset has been overridden by a subclass, + * the device is not ready for multi phase yet. + */ + return device_transitional_reset; + } + return NULL; +} + +static void device_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(class); + VMStateIfClass *vc = VMSTATE_IF_CLASS(class); + ResettableClass *rc = RESETTABLE_CLASS(class); + + class->unparent = device_unparent; + + /* by default all devices were considered as hotpluggable, + * so with intent to check it in generic qdev_unplug() / + * device_set_realized() functions make every device + * hotpluggable. Devices that shouldn't be hotpluggable, + * should override it in their class_init() + */ + dc->hotpluggable = true; + dc->user_creatable = true; + vc->get_id = device_vmstate_if_get_id; + rc->get_state = device_get_reset_state; + rc->child_foreach = device_reset_child_foreach; + + /* + * @device_phases_reset is put as the default reset method below, allowing + * to do the multi-phase transition from base classes to leaf classes. It + * allows a legacy-reset Device class to extend a multi-phases-reset + * Device class for the following reason: + * + If a base class B has been moved to multi-phase, then it does not + * override this default reset method and may have defined phase methods. + * + A child class C (extending class B) which uses + * device_class_set_parent_reset() (or similar means) to override the + * reset method will still work as expected. @device_phases_reset function + * will be registered as the parent reset method and effectively call + * parent reset phases. + */ + dc->reset = device_phases_reset; + rc->get_transitional_function = device_get_transitional_reset; + + object_class_property_add_bool(class, "realized", + device_get_realized, device_set_realized); + object_class_property_add_bool(class, "hotpluggable", + device_get_hotpluggable, NULL); + object_class_property_add_bool(class, "hotplugged", + device_get_hotplugged, NULL); + object_class_property_add_link(class, "parent_bus", TYPE_BUS, + offsetof(DeviceState, parent_bus), NULL, 0); +} + +void device_class_set_parent_reset(DeviceClass *dc, + DeviceReset dev_reset, + DeviceReset *parent_reset) +{ + *parent_reset = dc->reset; + dc->reset = dev_reset; +} + +void device_class_set_parent_realize(DeviceClass *dc, + DeviceRealize dev_realize, + DeviceRealize *parent_realize) +{ + *parent_realize = dc->realize; + dc->realize = dev_realize; +} + +void device_class_set_parent_unrealize(DeviceClass *dc, + DeviceUnrealize dev_unrealize, + DeviceUnrealize *parent_unrealize) +{ + *parent_unrealize = dc->unrealize; + dc->unrealize = dev_unrealize; +} + +void device_legacy_reset(DeviceState *dev) +{ + DeviceClass *klass = DEVICE_GET_CLASS(dev); + + trace_qdev_reset(dev, object_get_typename(OBJECT(dev))); + if (klass->reset) { + klass->reset(dev); + } +} + +Object *qdev_get_machine(void) +{ + static Object *dev; + + if (dev == NULL) { + dev = container_get(object_get_root(), "/machine"); + } + + return dev; +} + +static MachineInitPhase machine_phase; + +bool phase_check(MachineInitPhase phase) +{ + return machine_phase >= phase; +} + +void phase_advance(MachineInitPhase phase) +{ + assert(machine_phase == phase - 1); + machine_phase = phase; +} + +static const TypeInfo device_type_info = { + .name = TYPE_DEVICE, + .parent = TYPE_OBJECT, + .instance_size = sizeof(DeviceState), + .instance_init = device_initfn, + .instance_post_init = device_post_init, + .instance_finalize = device_finalize, + .class_base_init = device_class_base_init, + .class_init = device_class_init, + .abstract = true, + .class_size = sizeof(DeviceClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_VMSTATE_IF }, + { TYPE_RESETTABLE_INTERFACE }, + { } + } +}; + +static void qdev_register_types(void) +{ + type_register_static(&device_type_info); +} + +type_init(qdev_register_types) diff --git a/hw/core/register.c b/hw/core/register.c new file mode 100644 index 000000000..95b0150c0 --- /dev/null +++ b/hw/core/register.c @@ -0,0 +1,342 @@ +/* + * Register Definition API + * + * Copyright (c) 2016 Xilinx Inc. + * Copyright (c) 2013 Peter Crosthwaite + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "hw/register.h" +#include "qemu/log.h" +#include "qemu/module.h" + +static inline void register_write_val(RegisterInfo *reg, uint64_t val) +{ + g_assert(reg->data); + + switch (reg->data_size) { + case 1: + *(uint8_t *)reg->data = val; + break; + case 2: + *(uint16_t *)reg->data = val; + break; + case 4: + *(uint32_t *)reg->data = val; + break; + case 8: + *(uint64_t *)reg->data = val; + break; + default: + g_assert_not_reached(); + } +} + +static inline uint64_t register_read_val(RegisterInfo *reg) +{ + switch (reg->data_size) { + case 1: + return *(uint8_t *)reg->data; + case 2: + return *(uint16_t *)reg->data; + case 4: + return *(uint32_t *)reg->data; + case 8: + return *(uint64_t *)reg->data; + default: + g_assert_not_reached(); + } + return 0; /* unreachable */ +} + +static inline uint64_t register_enabled_mask(int data_size, unsigned size) +{ + if (data_size < size) { + size = data_size; + } + + return MAKE_64BIT_MASK(0, size * 8); +} + +void register_write(RegisterInfo *reg, uint64_t val, uint64_t we, + const char *prefix, bool debug) +{ + uint64_t old_val, new_val, test, no_w_mask; + const RegisterAccessInfo *ac; + + assert(reg); + + ac = reg->access; + + if (!ac || !ac->name) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: write to undefined device state " + "(written value: 0x%" PRIx64 ")\n", prefix, val); + return; + } + + old_val = reg->data ? register_read_val(reg) : ac->reset; + + test = (old_val ^ val) & ac->rsvd; + if (test) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: change of value in reserved bit" + "fields: 0x%" PRIx64 ")\n", prefix, test); + } + + test = val & ac->unimp; + if (test) { + qemu_log_mask(LOG_UNIMP, + "%s:%s writing 0x%" PRIx64 " to unimplemented bits:" \ + " 0x%" PRIx64 "\n", + prefix, reg->access->name, val, ac->unimp); + } + + /* Create the no write mask based on the read only, write to clear and + * reserved bit masks. + */ + no_w_mask = ac->ro | ac->w1c | ac->rsvd | ~we; + new_val = (val & ~no_w_mask) | (old_val & no_w_mask); + new_val &= ~(val & ac->w1c); + + if (ac->pre_write) { + new_val = ac->pre_write(reg, new_val); + } + + if (debug) { + qemu_log("%s:%s: write of value 0x%" PRIx64 "\n", prefix, ac->name, + new_val); + } + + register_write_val(reg, new_val); + + if (ac->post_write) { + ac->post_write(reg, new_val); + } +} + +uint64_t register_read(RegisterInfo *reg, uint64_t re, const char* prefix, + bool debug) +{ + uint64_t ret; + const RegisterAccessInfo *ac; + + assert(reg); + + ac = reg->access; + if (!ac || !ac->name) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: read from undefined device state\n", + prefix); + return 0; + } + + ret = reg->data ? register_read_val(reg) : ac->reset; + + register_write_val(reg, ret & ~(ac->cor & re)); + + /* Mask based on the read enable size */ + ret &= re; + + if (ac->post_read) { + ret = ac->post_read(reg, ret); + } + + if (debug) { + qemu_log("%s:%s: read of value 0x%" PRIx64 "\n", prefix, + ac->name, ret); + } + + return ret; +} + +void register_reset(RegisterInfo *reg) +{ + const RegisterAccessInfo *ac; + + g_assert(reg); + + if (!reg->data || !reg->access) { + return; + } + + ac = reg->access; + + register_write_val(reg, reg->access->reset); + + if (ac->post_write) { + ac->post_write(reg, reg->access->reset); + } +} + +void register_write_memory(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + RegisterInfoArray *reg_array = opaque; + RegisterInfo *reg = NULL; + uint64_t we; + int i; + + for (i = 0; i < reg_array->num_elements; i++) { + if (reg_array->r[i]->access->addr == addr) { + reg = reg_array->r[i]; + break; + } + } + + if (!reg) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: write to unimplemented register " \ + "at address: 0x%" PRIx64 "\n", reg_array->prefix, addr); + return; + } + + /* Generate appropriate write enable mask */ + we = register_enabled_mask(reg->data_size, size); + + register_write(reg, value, we, reg_array->prefix, + reg_array->debug); +} + +uint64_t register_read_memory(void *opaque, hwaddr addr, + unsigned size) +{ + RegisterInfoArray *reg_array = opaque; + RegisterInfo *reg = NULL; + uint64_t read_val; + uint64_t re; + int i; + + for (i = 0; i < reg_array->num_elements; i++) { + if (reg_array->r[i]->access->addr == addr) { + reg = reg_array->r[i]; + break; + } + } + + if (!reg) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: read to unimplemented register " \ + "at address: 0x%" PRIx64 "\n", reg_array->prefix, addr); + return 0; + } + + /* Generate appropriate read enable mask */ + re = register_enabled_mask(reg->data_size, size); + + read_val = register_read(reg, re, reg_array->prefix, + reg_array->debug); + + return extract64(read_val, 0, size * 8); +} + +static RegisterInfoArray *register_init_block(DeviceState *owner, + const RegisterAccessInfo *rae, + int num, RegisterInfo *ri, + void *data, + const MemoryRegionOps *ops, + bool debug_enabled, + uint64_t memory_size, + size_t data_size_bits) +{ + const char *device_prefix = object_get_typename(OBJECT(owner)); + RegisterInfoArray *r_array = g_new0(RegisterInfoArray, 1); + int data_size = data_size_bits >> 3; + int i; + + r_array->r = g_new0(RegisterInfo *, num); + r_array->num_elements = num; + r_array->debug = debug_enabled; + r_array->prefix = device_prefix; + + for (i = 0; i < num; i++) { + int index = rae[i].addr / data_size; + RegisterInfo *r = &ri[index]; + + /* Init the register, this will zero it. */ + object_initialize((void *)r, sizeof(*r), TYPE_REGISTER); + + /* Set the properties of the register */ + r->data = data + data_size * index; + r->data_size = data_size; + r->access = &rae[i]; + r->opaque = owner; + + r_array->r[i] = r; + } + + memory_region_init_io(&r_array->mem, OBJECT(owner), ops, r_array, + device_prefix, memory_size); + + return r_array; +} + +RegisterInfoArray *register_init_block8(DeviceState *owner, + const RegisterAccessInfo *rae, + int num, RegisterInfo *ri, + uint8_t *data, + const MemoryRegionOps *ops, + bool debug_enabled, + uint64_t memory_size) +{ + return register_init_block(owner, rae, num, ri, (void *) + data, ops, debug_enabled, memory_size, 8); +} + +RegisterInfoArray *register_init_block32(DeviceState *owner, + const RegisterAccessInfo *rae, + int num, RegisterInfo *ri, + uint32_t *data, + const MemoryRegionOps *ops, + bool debug_enabled, + uint64_t memory_size) +{ + return register_init_block(owner, rae, num, ri, (void *) + data, ops, debug_enabled, memory_size, 32); +} + +RegisterInfoArray *register_init_block64(DeviceState *owner, + const RegisterAccessInfo *rae, + int num, RegisterInfo *ri, + uint64_t *data, + const MemoryRegionOps *ops, + bool debug_enabled, + uint64_t memory_size) +{ + return register_init_block(owner, rae, num, ri, (void *) + data, ops, debug_enabled, memory_size, 64); +} + +void register_finalize_block(RegisterInfoArray *r_array) +{ + object_unparent(OBJECT(&r_array->mem)); + g_free(r_array->r); + g_free(r_array); +} + +static void register_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + /* Reason: needs to be wired up to work */ + dc->user_creatable = false; +} + +static const TypeInfo register_info = { + .name = TYPE_REGISTER, + .parent = TYPE_DEVICE, + .class_init = register_class_init, + .instance_size = sizeof(RegisterInfo), +}; + +static void register_register_types(void) +{ + type_register_static(®ister_info); +} + +type_init(register_register_types) diff --git a/hw/core/reset.c b/hw/core/reset.c new file mode 100644 index 000000000..9c477f2bf --- /dev/null +++ b/hw/core/reset.c @@ -0,0 +1,72 @@ +/* + * Reset handlers. + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2016 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/queue.h" +#include "sysemu/reset.h" + +/* reset/shutdown handler */ + +typedef struct QEMUResetEntry { + QTAILQ_ENTRY(QEMUResetEntry) entry; + QEMUResetHandler *func; + void *opaque; +} QEMUResetEntry; + +static QTAILQ_HEAD(, QEMUResetEntry) reset_handlers = + QTAILQ_HEAD_INITIALIZER(reset_handlers); + +void qemu_register_reset(QEMUResetHandler *func, void *opaque) +{ + QEMUResetEntry *re = g_malloc0(sizeof(QEMUResetEntry)); + + re->func = func; + re->opaque = opaque; + QTAILQ_INSERT_TAIL(&reset_handlers, re, entry); +} + +void qemu_unregister_reset(QEMUResetHandler *func, void *opaque) +{ + QEMUResetEntry *re; + + QTAILQ_FOREACH(re, &reset_handlers, entry) { + if (re->func == func && re->opaque == opaque) { + QTAILQ_REMOVE(&reset_handlers, re, entry); + g_free(re); + return; + } + } +} + +void qemu_devices_reset(void) +{ + QEMUResetEntry *re, *nre; + + /* reset all devices */ + QTAILQ_FOREACH_SAFE(re, &reset_handlers, entry, nre) { + re->func(re->opaque); + } +} + diff --git a/hw/core/resettable.c b/hw/core/resettable.c new file mode 100644 index 000000000..96a99ce39 --- /dev/null +++ b/hw/core/resettable.c @@ -0,0 +1,301 @@ +/* + * Resettable interface. + * + * Copyright (c) 2019 GreenSocs SAS + * + * Authors: + * Damien Hedde + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "hw/resettable.h" +#include "trace.h" + +/** + * resettable_phase_enter/hold/exit: + * Function executing a phase recursively in a resettable object and its + * children. + */ +static void resettable_phase_enter(Object *obj, void *opaque, ResetType type); +static void resettable_phase_hold(Object *obj, void *opaque, ResetType type); +static void resettable_phase_exit(Object *obj, void *opaque, ResetType type); + +/** + * enter_phase_in_progress: + * True if we are currently in reset enter phase. + * + * exit_phase_in_progress: + * count the number of exit phase we are in. + * + * Note: These flags are only used to guarantee (using asserts) that the reset + * API is used correctly. We can use global variables because we rely on the + * iothread mutex to ensure only one reset operation is in a progress at a + * given time. + */ +static bool enter_phase_in_progress; +static unsigned exit_phase_in_progress; + +void resettable_reset(Object *obj, ResetType type) +{ + trace_resettable_reset(obj, type); + resettable_assert_reset(obj, type); + resettable_release_reset(obj, type); +} + +void resettable_assert_reset(Object *obj, ResetType type) +{ + /* TODO: change this assert when adding support for other reset types */ + assert(type == RESET_TYPE_COLD); + trace_resettable_reset_assert_begin(obj, type); + assert(!enter_phase_in_progress); + + enter_phase_in_progress = true; + resettable_phase_enter(obj, NULL, type); + enter_phase_in_progress = false; + + resettable_phase_hold(obj, NULL, type); + + trace_resettable_reset_assert_end(obj); +} + +void resettable_release_reset(Object *obj, ResetType type) +{ + /* TODO: change this assert when adding support for other reset types */ + assert(type == RESET_TYPE_COLD); + trace_resettable_reset_release_begin(obj, type); + assert(!enter_phase_in_progress); + + exit_phase_in_progress += 1; + resettable_phase_exit(obj, NULL, type); + exit_phase_in_progress -= 1; + + trace_resettable_reset_release_end(obj); +} + +bool resettable_is_in_reset(Object *obj) +{ + ResettableClass *rc = RESETTABLE_GET_CLASS(obj); + ResettableState *s = rc->get_state(obj); + + return s->count > 0; +} + +/** + * resettable_child_foreach: + * helper to avoid checking the existence of the method. + */ +static void resettable_child_foreach(ResettableClass *rc, Object *obj, + ResettableChildCallback cb, + void *opaque, ResetType type) +{ + if (rc->child_foreach) { + rc->child_foreach(obj, cb, opaque, type); + } +} + +/** + * resettable_get_tr_func: + * helper to fetch transitional reset callback if any. + */ +static ResettableTrFunction resettable_get_tr_func(ResettableClass *rc, + Object *obj) +{ + ResettableTrFunction tr_func = NULL; + if (rc->get_transitional_function) { + tr_func = rc->get_transitional_function(obj); + } + return tr_func; +} + +static void resettable_phase_enter(Object *obj, void *opaque, ResetType type) +{ + ResettableClass *rc = RESETTABLE_GET_CLASS(obj); + ResettableState *s = rc->get_state(obj); + const char *obj_typename = object_get_typename(obj); + bool action_needed = false; + + /* exit phase has to finish properly before entering back in reset */ + assert(!s->exit_phase_in_progress); + + trace_resettable_phase_enter_begin(obj, obj_typename, s->count, type); + + /* Only take action if we really enter reset for the 1st time. */ + /* + * TODO: if adding more ResetType support, some additional checks + * are probably needed here. + */ + if (s->count++ == 0) { + action_needed = true; + } + /* + * We limit the count to an arbitrary "big" value. The value is big + * enough not to be triggered normally. + * The assert will stop an infinite loop if there is a cycle in the + * reset tree. The loop goes through resettable_foreach_child below + * which at some point will call us again. + */ + assert(s->count <= 50); + + /* + * handle the children even if action_needed is at false so that + * child counts are incremented too + */ + resettable_child_foreach(rc, obj, resettable_phase_enter, NULL, type); + + /* execute enter phase for the object if needed */ + if (action_needed) { + trace_resettable_phase_enter_exec(obj, obj_typename, type, + !!rc->phases.enter); + if (rc->phases.enter && !resettable_get_tr_func(rc, obj)) { + rc->phases.enter(obj, type); + } + s->hold_phase_pending = true; + } + trace_resettable_phase_enter_end(obj, obj_typename, s->count); +} + +static void resettable_phase_hold(Object *obj, void *opaque, ResetType type) +{ + ResettableClass *rc = RESETTABLE_GET_CLASS(obj); + ResettableState *s = rc->get_state(obj); + const char *obj_typename = object_get_typename(obj); + + /* exit phase has to finish properly before entering back in reset */ + assert(!s->exit_phase_in_progress); + + trace_resettable_phase_hold_begin(obj, obj_typename, s->count, type); + + /* handle children first */ + resettable_child_foreach(rc, obj, resettable_phase_hold, NULL, type); + + /* exec hold phase */ + if (s->hold_phase_pending) { + s->hold_phase_pending = false; + ResettableTrFunction tr_func = resettable_get_tr_func(rc, obj); + trace_resettable_phase_hold_exec(obj, obj_typename, !!rc->phases.hold); + if (tr_func) { + trace_resettable_transitional_function(obj, obj_typename); + tr_func(obj); + } else if (rc->phases.hold) { + rc->phases.hold(obj); + } + } + trace_resettable_phase_hold_end(obj, obj_typename, s->count); +} + +static void resettable_phase_exit(Object *obj, void *opaque, ResetType type) +{ + ResettableClass *rc = RESETTABLE_GET_CLASS(obj); + ResettableState *s = rc->get_state(obj); + const char *obj_typename = object_get_typename(obj); + + assert(!s->exit_phase_in_progress); + trace_resettable_phase_exit_begin(obj, obj_typename, s->count, type); + + /* exit_phase_in_progress ensures this phase is 'atomic' */ + s->exit_phase_in_progress = true; + resettable_child_foreach(rc, obj, resettable_phase_exit, NULL, type); + + assert(s->count > 0); + if (s->count == 1) { + trace_resettable_phase_exit_exec(obj, obj_typename, !!rc->phases.exit); + if (rc->phases.exit && !resettable_get_tr_func(rc, obj)) { + rc->phases.exit(obj); + } + s->count = 0; + } + s->exit_phase_in_progress = false; + trace_resettable_phase_exit_end(obj, obj_typename, s->count); +} + +/* + * resettable_get_count: + * Get the count of the Resettable object @obj. Return 0 if @obj is NULL. + */ +static unsigned resettable_get_count(Object *obj) +{ + if (obj) { + ResettableClass *rc = RESETTABLE_GET_CLASS(obj); + return rc->get_state(obj)->count; + } + return 0; +} + +void resettable_change_parent(Object *obj, Object *newp, Object *oldp) +{ + ResettableClass *rc = RESETTABLE_GET_CLASS(obj); + ResettableState *s = rc->get_state(obj); + unsigned newp_count = resettable_get_count(newp); + unsigned oldp_count = resettable_get_count(oldp); + + /* + * Ensure we do not change parent when in enter or exit phase. + * During these phases, the reset subtree being updated is partly in reset + * and partly not in reset (it depends on the actual position in + * resettable_child_foreach()s). We are not able to tell in which part is a + * leaving or arriving device. Thus we cannot set the reset count of the + * moving device to the proper value. + */ + assert(!enter_phase_in_progress && !exit_phase_in_progress); + trace_resettable_change_parent(obj, oldp, oldp_count, newp, newp_count); + + /* + * At most one of the two 'for' loops will be executed below + * in order to cope with the difference between the two counts. + */ + /* if newp is more reset than oldp */ + for (unsigned i = oldp_count; i < newp_count; i++) { + resettable_assert_reset(obj, RESET_TYPE_COLD); + } + /* + * if obj is leaving a bus under reset, we need to ensure + * hold phase is not pending. + */ + if (oldp_count && s->hold_phase_pending) { + resettable_phase_hold(obj, NULL, RESET_TYPE_COLD); + } + /* if oldp is more reset than newp */ + for (unsigned i = newp_count; i < oldp_count; i++) { + resettable_release_reset(obj, RESET_TYPE_COLD); + } +} + +void resettable_cold_reset_fn(void *opaque) +{ + resettable_reset((Object *) opaque, RESET_TYPE_COLD); +} + +void resettable_class_set_parent_phases(ResettableClass *rc, + ResettableEnterPhase enter, + ResettableHoldPhase hold, + ResettableExitPhase exit, + ResettablePhases *parent_phases) +{ + *parent_phases = rc->phases; + if (enter) { + rc->phases.enter = enter; + } + if (hold) { + rc->phases.hold = hold; + } + if (exit) { + rc->phases.exit = exit; + } +} + +static const TypeInfo resettable_interface_info = { + .name = TYPE_RESETTABLE_INTERFACE, + .parent = TYPE_INTERFACE, + .class_size = sizeof(ResettableClass), +}; + +static void reset_register_types(void) +{ + type_register_static(&resettable_interface_info); +} + +type_init(reset_register_types) diff --git a/hw/core/split-irq.c b/hw/core/split-irq.c new file mode 100644 index 000000000..3b90af2e8 --- /dev/null +++ b/hw/core/split-irq.c @@ -0,0 +1,92 @@ +/* + * IRQ splitter device. + * + * Copyright (c) 2018 Linaro Limited. + * Written by Peter Maydell + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/core/split-irq.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/module.h" + +static void split_irq_handler(void *opaque, int n, int level) +{ + SplitIRQ *s = SPLIT_IRQ(opaque); + int i; + + for (i = 0; i < s->num_lines; i++) { + qemu_set_irq(s->out_irq[i], level); + } +} + +static void split_irq_init(Object *obj) +{ + qdev_init_gpio_in(DEVICE(obj), split_irq_handler, 1); +} + +static void split_irq_realize(DeviceState *dev, Error **errp) +{ + SplitIRQ *s = SPLIT_IRQ(dev); + + if (s->num_lines < 1 || s->num_lines >= MAX_SPLIT_LINES) { + error_setg(errp, + "IRQ splitter number of lines %d is not between 1 and %d", + s->num_lines, MAX_SPLIT_LINES); + return; + } + + qdev_init_gpio_out(dev, s->out_irq, s->num_lines); +} + +static Property split_irq_properties[] = { + DEFINE_PROP_UINT16("num-lines", SplitIRQ, num_lines, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void split_irq_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + /* No state to reset or migrate */ + device_class_set_props(dc, split_irq_properties); + dc->realize = split_irq_realize; + + /* Reason: Needs to be wired up to work */ + dc->user_creatable = false; +} + +static const TypeInfo split_irq_type_info = { + .name = TYPE_SPLIT_IRQ, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SplitIRQ), + .instance_init = split_irq_init, + .class_init = split_irq_class_init, +}; + +static void split_irq_register_types(void) +{ + type_register_static(&split_irq_type_info); +} + +type_init(split_irq_register_types) diff --git a/hw/core/stream.c b/hw/core/stream.c new file mode 100644 index 000000000..19477d0f2 --- /dev/null +++ b/hw/core/stream.c @@ -0,0 +1,34 @@ +#include "qemu/osdep.h" +#include "hw/stream.h" +#include "qemu/module.h" + +size_t +stream_push(StreamSink *sink, uint8_t *buf, size_t len, bool eop) +{ + StreamSinkClass *k = STREAM_SINK_GET_CLASS(sink); + + return k->push(sink, buf, len, eop); +} + +bool +stream_can_push(StreamSink *sink, StreamCanPushNotifyFn notify, + void *notify_opaque) +{ + StreamSinkClass *k = STREAM_SINK_GET_CLASS(sink); + + return k->can_push ? k->can_push(sink, notify, notify_opaque) : true; +} + +static const TypeInfo stream_sink_info = { + .name = TYPE_STREAM_SINK, + .parent = TYPE_INTERFACE, + .class_size = sizeof(StreamSinkClass), +}; + + +static void stream_sink_register_types(void) +{ + type_register_static(&stream_sink_info); +} + +type_init(stream_sink_register_types) diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c new file mode 100644 index 000000000..05c1da3d3 --- /dev/null +++ b/hw/core/sysbus.c @@ -0,0 +1,367 @@ +/* + * System (CPU) Bus device support code + * + * Copyright (c) 2009 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "monitor/monitor.h" +#include "exec/address-spaces.h" + +static void sysbus_dev_print(Monitor *mon, DeviceState *dev, int indent); +static char *sysbus_get_fw_dev_path(DeviceState *dev); + +typedef struct SysBusFind { + void *opaque; + FindSysbusDeviceFunc *func; +} SysBusFind; + +/* Run func() for every sysbus device, traverse the tree for everything else */ +static int find_sysbus_device(Object *obj, void *opaque) +{ + SysBusFind *find = opaque; + Object *dev; + SysBusDevice *sbdev; + + dev = object_dynamic_cast(obj, TYPE_SYS_BUS_DEVICE); + sbdev = (SysBusDevice *)dev; + + if (!sbdev) { + /* Container, traverse it for children */ + return object_child_foreach(obj, find_sysbus_device, opaque); + } + + find->func(sbdev, find->opaque); + + return 0; +} + +/* + * Loop through all dynamically created sysbus devices and call + * func() for each instance. + */ +void foreach_dynamic_sysbus_device(FindSysbusDeviceFunc *func, void *opaque) +{ + Object *container; + SysBusFind find = { + .func = func, + .opaque = opaque, + }; + + /* Loop through all sysbus devices that were spawned outside the machine */ + container = container_get(qdev_get_machine(), "/peripheral"); + find_sysbus_device(container, &find); + container = container_get(qdev_get_machine(), "/peripheral-anon"); + find_sysbus_device(container, &find); +} + + +static void system_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->print_dev = sysbus_dev_print; + k->get_fw_dev_path = sysbus_get_fw_dev_path; +} + +static const TypeInfo system_bus_info = { + .name = TYPE_SYSTEM_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(BusState), + .class_init = system_bus_class_init, +}; + +/* Check whether an IRQ source exists */ +bool sysbus_has_irq(SysBusDevice *dev, int n) +{ + char *prop = g_strdup_printf("%s[%d]", SYSBUS_DEVICE_GPIO_IRQ, n); + ObjectProperty *r; + + r = object_property_find(OBJECT(dev), prop); + g_free(prop); + + return (r != NULL); +} + +bool sysbus_is_irq_connected(SysBusDevice *dev, int n) +{ + return !!sysbus_get_connected_irq(dev, n); +} + +qemu_irq sysbus_get_connected_irq(SysBusDevice *dev, int n) +{ + DeviceState *d = DEVICE(dev); + return qdev_get_gpio_out_connector(d, SYSBUS_DEVICE_GPIO_IRQ, n); +} + +void sysbus_connect_irq(SysBusDevice *dev, int n, qemu_irq irq) +{ + SysBusDeviceClass *sbd = SYS_BUS_DEVICE_GET_CLASS(dev); + + qdev_connect_gpio_out_named(DEVICE(dev), SYSBUS_DEVICE_GPIO_IRQ, n, irq); + + if (sbd->connect_irq_notifier) { + sbd->connect_irq_notifier(dev, irq); + } +} + +/* Check whether an MMIO region exists */ +bool sysbus_has_mmio(SysBusDevice *dev, unsigned int n) +{ + return (n < dev->num_mmio); +} + +static void sysbus_mmio_map_common(SysBusDevice *dev, int n, hwaddr addr, + bool may_overlap, int priority) +{ + assert(n >= 0 && n < dev->num_mmio); + + if (dev->mmio[n].addr == addr) { + /* ??? region already mapped here. */ + return; + } + if (dev->mmio[n].addr != (hwaddr)-1) { + /* Unregister previous mapping. */ + memory_region_del_subregion(get_system_memory(), dev->mmio[n].memory); + } + dev->mmio[n].addr = addr; + if (may_overlap) { + memory_region_add_subregion_overlap(get_system_memory(), + addr, + dev->mmio[n].memory, + priority); + } + else { + memory_region_add_subregion(get_system_memory(), + addr, + dev->mmio[n].memory); + } +} + +void sysbus_mmio_unmap(SysBusDevice *dev, int n) +{ + assert(n >= 0 && n < dev->num_mmio); + + if (dev->mmio[n].addr != (hwaddr)-1) { + memory_region_del_subregion(get_system_memory(), dev->mmio[n].memory); + dev->mmio[n].addr = (hwaddr)-1; + } +} + +void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr) +{ + sysbus_mmio_map_common(dev, n, addr, false, 0); +} + +void sysbus_mmio_map_overlap(SysBusDevice *dev, int n, hwaddr addr, + int priority) +{ + sysbus_mmio_map_common(dev, n, addr, true, priority); +} + +/* Request an IRQ source. The actual IRQ object may be populated later. */ +void sysbus_init_irq(SysBusDevice *dev, qemu_irq *p) +{ + qdev_init_gpio_out_named(DEVICE(dev), p, SYSBUS_DEVICE_GPIO_IRQ, 1); +} + +/* Pass IRQs from a target device. */ +void sysbus_pass_irq(SysBusDevice *dev, SysBusDevice *target) +{ + qdev_pass_gpios(DEVICE(target), DEVICE(dev), SYSBUS_DEVICE_GPIO_IRQ); +} + +void sysbus_init_mmio(SysBusDevice *dev, MemoryRegion *memory) +{ + int n; + + assert(dev->num_mmio < QDEV_MAX_MMIO); + n = dev->num_mmio++; + dev->mmio[n].addr = -1; + dev->mmio[n].memory = memory; +} + +MemoryRegion *sysbus_mmio_get_region(SysBusDevice *dev, int n) +{ + assert(n >= 0 && n < QDEV_MAX_MMIO); + return dev->mmio[n].memory; +} + +void sysbus_init_ioports(SysBusDevice *dev, uint32_t ioport, uint32_t size) +{ + uint32_t i; + + for (i = 0; i < size; i++) { + assert(dev->num_pio < QDEV_MAX_PIO); + dev->pio[dev->num_pio++] = ioport++; + } +} + +/* The purpose of preserving this empty realize function + * is to prevent the parent_realize field of some subclasses + * from being set to NULL to break the normal init/realize + * of some devices. + */ +static void sysbus_device_realize(DeviceState *dev, Error **errp) +{ +} + +DeviceState *sysbus_create_varargs(const char *name, + hwaddr addr, ...) +{ + DeviceState *dev; + SysBusDevice *s; + va_list va; + qemu_irq irq; + int n; + + dev = qdev_new(name); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + if (addr != (hwaddr)-1) { + sysbus_mmio_map(s, 0, addr); + } + va_start(va, addr); + n = 0; + while (1) { + irq = va_arg(va, qemu_irq); + if (!irq) { + break; + } + sysbus_connect_irq(s, n, irq); + n++; + } + va_end(va); + return dev; +} + +bool sysbus_realize(SysBusDevice *dev, Error **errp) +{ + return qdev_realize(DEVICE(dev), sysbus_get_default(), errp); +} + +bool sysbus_realize_and_unref(SysBusDevice *dev, Error **errp) +{ + return qdev_realize_and_unref(DEVICE(dev), sysbus_get_default(), errp); +} + +static void sysbus_dev_print(Monitor *mon, DeviceState *dev, int indent) +{ + SysBusDevice *s = SYS_BUS_DEVICE(dev); + hwaddr size; + int i; + + for (i = 0; i < s->num_mmio; i++) { + size = memory_region_size(s->mmio[i].memory); + monitor_printf(mon, "%*smmio " TARGET_FMT_plx "/" TARGET_FMT_plx "\n", + indent, "", s->mmio[i].addr, size); + } +} + +static char *sysbus_get_fw_dev_path(DeviceState *dev) +{ + SysBusDevice *s = SYS_BUS_DEVICE(dev); + SysBusDeviceClass *sbc = SYS_BUS_DEVICE_GET_CLASS(s); + char *addr, *fw_dev_path; + + if (sbc->explicit_ofw_unit_address) { + addr = sbc->explicit_ofw_unit_address(s); + if (addr) { + fw_dev_path = g_strdup_printf("%s@%s", qdev_fw_name(dev), addr); + g_free(addr); + return fw_dev_path; + } + } + if (s->num_mmio) { + return g_strdup_printf("%s@" TARGET_FMT_plx, qdev_fw_name(dev), + s->mmio[0].addr); + } + if (s->num_pio) { + return g_strdup_printf("%s@i%04x", qdev_fw_name(dev), s->pio[0]); + } + return g_strdup(qdev_fw_name(dev)); +} + +void sysbus_add_io(SysBusDevice *dev, hwaddr addr, + MemoryRegion *mem) +{ + memory_region_add_subregion(get_system_io(), addr, mem); +} + +MemoryRegion *sysbus_address_space(SysBusDevice *dev) +{ + return get_system_memory(); +} + +static void sysbus_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + k->realize = sysbus_device_realize; + k->bus_type = TYPE_SYSTEM_BUS; + /* + * device_add plugs devices into a suitable bus. For "real" buses, + * that actually connects the device. For sysbus, the connections + * need to be made separately, and device_add can't do that. The + * device would be left unconnected, and will probably not work + * + * However, a few machines can handle device_add/-device with + * a few specific sysbus devices. In those cases, the device + * subclass needs to override it and set user_creatable=true. + */ + k->user_creatable = false; +} + +static const TypeInfo sysbus_device_type_info = { + .name = TYPE_SYS_BUS_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SysBusDevice), + .abstract = true, + .class_size = sizeof(SysBusDeviceClass), + .class_init = sysbus_device_class_init, +}; + +static BusState *main_system_bus; + +static void main_system_bus_create(void) +{ + /* + * assign main_system_bus before qbus_init() + * in order to make "if (bus != sysbus_get_default())" work + */ + main_system_bus = g_malloc0(system_bus_info.instance_size); + qbus_init(main_system_bus, system_bus_info.instance_size, + TYPE_SYSTEM_BUS, NULL, "main-system-bus"); + OBJECT(main_system_bus)->free = g_free; +} + +BusState *sysbus_get_default(void) +{ + if (!main_system_bus) { + main_system_bus_create(); + } + return main_system_bus; +} + +static void sysbus_register_types(void) +{ + type_register_static(&system_bus_info); + type_register_static(&sysbus_device_type_info); +} + +type_init(sysbus_register_types) diff --git a/hw/core/trace-events b/hw/core/trace-events new file mode 100644 index 000000000..9b3ecce3b --- /dev/null +++ b/hw/core/trace-events @@ -0,0 +1,37 @@ +# loader.c +loader_write_rom(const char *name, uint64_t gpa, uint64_t size, bool isrom) "%s: @0x%"PRIx64" size=0x%"PRIx64" ROM=%d" + +# qdev.c +qdev_reset(void *obj, const char *objtype) "obj=%p(%s)" +qdev_reset_all(void *obj, const char *objtype) "obj=%p(%s)" +qdev_reset_tree(void *obj, const char *objtype) "obj=%p(%s)" +qbus_reset(void *obj, const char *objtype) "obj=%p(%s)" +qbus_reset_all(void *obj, const char *objtype) "obj=%p(%s)" +qbus_reset_tree(void *obj, const char *objtype) "obj=%p(%s)" +qdev_update_parent_bus(void *obj, const char *objtype, void *oldp, const char *oldptype, void *newp, const char *newptype) "obj=%p(%s) old_parent=%p(%s) new_parent=%p(%s)" + +# resettable.c +resettable_reset(void *obj, int cold) "obj=%p cold=%d" +resettable_reset_assert_begin(void *obj, int cold) "obj=%p cold=%d" +resettable_reset_assert_end(void *obj) "obj=%p" +resettable_reset_release_begin(void *obj, int cold) "obj=%p cold=%d" +resettable_reset_release_end(void *obj) "obj=%p" +resettable_change_parent(void *obj, void *o, unsigned oc, void *n, unsigned nc) "obj=%p from=%p(%d) to=%p(%d)" +resettable_phase_enter_begin(void *obj, const char *objtype, unsigned count, int type) "obj=%p(%s) count=%d type=%d" +resettable_phase_enter_exec(void *obj, const char *objtype, int type, int has_method) "obj=%p(%s) type=%d method=%d" +resettable_phase_enter_end(void *obj, const char *objtype, unsigned count) "obj=%p(%s) count=%d" +resettable_phase_hold_begin(void *obj, const char *objtype, unsigned count, int type) "obj=%p(%s) count=%d type=%d" +resettable_phase_hold_exec(void *obj, const char *objtype, int has_method) "obj=%p(%s) method=%d" +resettable_phase_hold_end(void *obj, const char *objtype, unsigned count) "obj=%p(%s) count=%d" +resettable_phase_exit_begin(void *obj, const char *objtype, unsigned count, int type) "obj=%p(%s) count=%d type=%d" +resettable_phase_exit_exec(void *obj, const char *objtype, int has_method) "obj=%p(%s) method=%d" +resettable_phase_exit_end(void *obj, const char *objtype, unsigned count) "obj=%p(%s) count=%d" +resettable_transitional_function(void *obj, const char *objtype) "obj=%p(%s)" + +# clock.c +clock_set_source(const char *clk, const char *src) "'%s', src='%s'" +clock_disconnect(const char *clk) "'%s'" +clock_set(const char *clk, uint64_t old, uint64_t new) "'%s', %"PRIu64"Hz->%"PRIu64"Hz" +clock_propagate(const char *clk) "'%s'" +clock_update(const char *clk, const char *src, uint64_t hz, int cb) "'%s', src='%s', val=%"PRIu64"Hz cb=%d" +clock_set_mul_div(const char *clk, uint32_t oldmul, uint32_t mul, uint32_t olddiv, uint32_t div) "'%s', mul: %u -> %u, div: %u -> %u" diff --git a/hw/core/trace.h b/hw/core/trace.h new file mode 100644 index 000000000..23dfd61c4 --- /dev/null +++ b/hw/core/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_core.h" diff --git a/hw/core/uboot_image.h b/hw/core/uboot_image.h new file mode 100644 index 000000000..608022de6 --- /dev/null +++ b/hw/core/uboot_image.h @@ -0,0 +1,159 @@ +/* + * (C) Copyright 2000-2005 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + ******************************************************************** + * NOTE: This header file defines an interface to U-Boot. Including + * this (unmodified) header file in another file is considered normal + * use of U-Boot, and does *not* fall under the heading of "derived + * work". + ******************************************************************** + */ + +#ifndef UBOOT_IMAGE_H +#define UBOOT_IMAGE_H + +/* + * Operating System Codes + */ +#define IH_OS_INVALID 0 /* Invalid OS */ +#define IH_OS_OPENBSD 1 /* OpenBSD */ +#define IH_OS_NETBSD 2 /* NetBSD */ +#define IH_OS_FREEBSD 3 /* FreeBSD */ +#define IH_OS_4_4BSD 4 /* 4.4BSD */ +#define IH_OS_LINUX 5 /* Linux */ +#define IH_OS_SVR4 6 /* SVR4 */ +#define IH_OS_ESIX 7 /* Esix */ +#define IH_OS_SOLARIS 8 /* Solaris */ +#define IH_OS_IRIX 9 /* Irix */ +#define IH_OS_SCO 10 /* SCO */ +#define IH_OS_DELL 11 /* Dell */ +#define IH_OS_NCR 12 /* NCR */ +#define IH_OS_LYNXOS 13 /* LynxOS */ +#define IH_OS_VXWORKS 14 /* VxWorks */ +#define IH_OS_PSOS 15 /* pSOS */ +#define IH_OS_QNX 16 /* QNX */ +#define IH_OS_U_BOOT 17 /* Firmware */ +#define IH_OS_RTEMS 18 /* RTEMS */ +#define IH_OS_ARTOS 19 /* ARTOS */ +#define IH_OS_UNITY 20 /* Unity OS */ + +/* + * CPU Architecture Codes (supported by Linux) + */ +#define IH_CPU_INVALID 0 /* Invalid CPU */ +#define IH_CPU_ALPHA 1 /* Alpha */ +#define IH_CPU_ARM 2 /* ARM */ +#define IH_CPU_I386 3 /* Intel x86 */ +#define IH_CPU_IA64 4 /* IA64 */ +#define IH_CPU_MIPS 5 /* MIPS */ +#define IH_CPU_MIPS64 6 /* MIPS 64 Bit */ +#define IH_CPU_PPC 7 /* PowerPC */ +#define IH_CPU_S390 8 /* IBM S390 */ +#define IH_CPU_SH 9 /* SuperH */ +#define IH_CPU_SPARC 10 /* Sparc */ +#define IH_CPU_SPARC64 11 /* Sparc 64 Bit */ +#define IH_CPU_M68K 12 /* M68K */ +#define IH_CPU_NIOS 13 /* Nios-32 */ +#define IH_CPU_MICROBLAZE 14 /* MicroBlaze */ +#define IH_CPU_NIOS2 15 /* Nios-II */ +#define IH_CPU_BLACKFIN 16 /* Blackfin */ +#define IH_CPU_AVR32 17 /* AVR32 */ + +/* + * Image Types + * + * "Standalone Programs" are directly runnable in the environment + * provided by U-Boot; it is expected that (if they behave + * well) you can continue to work in U-Boot after return from + * the Standalone Program. + * "OS Kernel Images" are usually images of some Embedded OS which + * will take over control completely. Usually these programs + * will install their own set of exception handlers, device + * drivers, set up the MMU, etc. - this means, that you cannot + * expect to re-enter U-Boot except by resetting the CPU. + * "RAMDisk Images" are more or less just data blocks, and their + * parameters (address, size) are passed to an OS kernel that is + * being started. + * "Multi-File Images" contain several images, typically an OS + * (Linux) kernel image and one or more data images like + * RAMDisks. This construct is useful for instance when you want + * to boot over the network using BOOTP etc., where the boot + * server provides just a single image file, but you want to get + * for instance an OS kernel and a RAMDisk image. + * + * "Multi-File Images" start with a list of image sizes, each + * image size (in bytes) specified by an "uint32_t" in network + * byte order. This list is terminated by an "(uint32_t)0". + * Immediately after the terminating 0 follow the images, one by + * one, all aligned on "uint32_t" boundaries (size rounded up to + * a multiple of 4 bytes - except for the last file). + * + * "Firmware Images" are binary images containing firmware (like + * U-Boot or FPGA images) which usually will be programmed to + * flash memory. + * + * "Script files" are command sequences that will be executed by + * U-Boot's command interpreter; this feature is especially + * useful when you configure U-Boot to use a real shell (hush) + * as command interpreter (=> Shell Scripts). + */ + +#define IH_TYPE_INVALID 0 /* Invalid Image */ +#define IH_TYPE_STANDALONE 1 /* Standalone Program */ +#define IH_TYPE_KERNEL 2 /* OS Kernel Image */ +#define IH_TYPE_RAMDISK 3 /* RAMDisk Image */ +#define IH_TYPE_MULTI 4 /* Multi-File Image */ +#define IH_TYPE_FIRMWARE 5 /* Firmware Image */ +#define IH_TYPE_SCRIPT 6 /* Script file */ +#define IH_TYPE_FILESYSTEM 7 /* Filesystem Image (any type) */ +#define IH_TYPE_FLATDT 8 /* Binary Flat Device Tree Blob */ +#define IH_TYPE_KERNEL_NOLOAD 14 /* OS Kernel Image (noload) */ + +/* + * Compression Types + */ +#define IH_COMP_NONE 0 /* No Compression Used */ +#define IH_COMP_GZIP 1 /* gzip Compression Used */ +#define IH_COMP_BZIP2 2 /* bzip2 Compression Used */ + +#define IH_MAGIC 0x27051956 /* Image Magic Number */ +#define IH_NMLEN 32 /* Image Name Length */ + +/* + * all data in network byte order (aka natural aka bigendian) + */ + +typedef struct uboot_image_header { + uint32_t ih_magic; /* Image Header Magic Number */ + uint32_t ih_hcrc; /* Image Header CRC Checksum */ + uint32_t ih_time; /* Image Creation Timestamp */ + uint32_t ih_size; /* Image Data Size */ + uint32_t ih_load; /* Data Load Address */ + uint32_t ih_ep; /* Entry Point Address */ + uint32_t ih_dcrc; /* Image Data CRC Checksum */ + uint8_t ih_os; /* Operating System */ + uint8_t ih_arch; /* CPU architecture */ + uint8_t ih_type; /* Image Type */ + uint8_t ih_comp; /* Compression Type */ + uint8_t ih_name[IH_NMLEN]; /* Image Name */ +} uboot_image_header_t; + + +#endif /* UBOOT_IMAGE_H */ diff --git a/hw/core/vm-change-state-handler.c b/hw/core/vm-change-state-handler.c new file mode 100644 index 000000000..1f3630986 --- /dev/null +++ b/hw/core/vm-change-state-handler.c @@ -0,0 +1,62 @@ +/* + * qdev vm change state handlers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/qdev-core.h" +#include "sysemu/runstate.h" + +static int qdev_get_dev_tree_depth(DeviceState *dev) +{ + int depth; + + for (depth = 0; dev; depth++) { + BusState *bus = dev->parent_bus; + + if (!bus) { + break; + } + + dev = bus->parent; + } + + return depth; +} + +/** + * qdev_add_vm_change_state_handler: + * @dev: the device that owns this handler + * @cb: the callback function to be invoked + * @opaque: user data passed to the callback function + * + * This function works like qemu_add_vm_change_state_handler() except callbacks + * are invoked in qdev tree depth order. Ordering is desirable when callbacks + * of children depend on their parent's callback having completed first. + * + * For example, when qdev_add_vm_change_state_handler() is used, a host + * controller's callback is invoked before the children on its bus when the VM + * starts running. The order is reversed when the VM stops running. + * + * Returns: an entry to be freed with qemu_del_vm_change_state_handler() + */ +VMChangeStateEntry *qdev_add_vm_change_state_handler(DeviceState *dev, + VMChangeStateHandler *cb, + void *opaque) +{ + int depth = qdev_get_dev_tree_depth(dev); + + return qemu_add_vm_change_state_handler_prio(cb, opaque, depth); +} diff --git a/hw/core/vmstate-if.c b/hw/core/vmstate-if.c new file mode 100644 index 000000000..bf453620f --- /dev/null +++ b/hw/core/vmstate-if.c @@ -0,0 +1,23 @@ +/* + * VMState interface + * + * Copyright (c) 2009-2019 Red Hat Inc + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/vmstate-if.h" + +static const TypeInfo vmstate_if_info = { + .name = TYPE_VMSTATE_IF, + .parent = TYPE_INTERFACE, + .class_size = sizeof(VMStateIfClass), +}; + +static void vmstate_register_types(void) +{ + type_register_static(&vmstate_if_info); +} + +type_init(vmstate_register_types); diff --git a/hw/cpu/Kconfig b/hw/cpu/Kconfig new file mode 100644 index 000000000..1767d028a --- /dev/null +++ b/hw/cpu/Kconfig @@ -0,0 +1,8 @@ +config ARM11MPCORE + bool + +config A9MPCORE + bool + +config A15MPCORE + bool diff --git a/hw/cpu/a15mpcore.c b/hw/cpu/a15mpcore.c new file mode 100644 index 000000000..774ca9987 --- /dev/null +++ b/hw/cpu/a15mpcore.c @@ -0,0 +1,180 @@ +/* + * Cortex-A15MPCore internal peripheral emulation. + * + * Copyright (c) 2012 Linaro Limited. + * Written by Peter Maydell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/cpu/a15mpcore.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "sysemu/kvm.h" +#include "kvm_arm.h" + +static void a15mp_priv_set_irq(void *opaque, int irq, int level) +{ + A15MPPrivState *s = (A15MPPrivState *)opaque; + + qemu_set_irq(qdev_get_gpio_in(DEVICE(&s->gic), irq), level); +} + +static void a15mp_priv_initfn(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + A15MPPrivState *s = A15MPCORE_PRIV(obj); + + memory_region_init(&s->container, obj, "a15mp-priv-container", 0x8000); + sysbus_init_mmio(sbd, &s->container); + + object_initialize_child(obj, "gic", &s->gic, gic_class_name()); + qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2); +} + +static void a15mp_priv_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + A15MPPrivState *s = A15MPCORE_PRIV(dev); + DeviceState *gicdev; + SysBusDevice *busdev; + int i; + bool has_el3; + bool has_el2 = false; + Object *cpuobj; + + gicdev = DEVICE(&s->gic); + qdev_prop_set_uint32(gicdev, "num-cpu", s->num_cpu); + qdev_prop_set_uint32(gicdev, "num-irq", s->num_irq); + + if (!kvm_irqchip_in_kernel()) { + /* Make the GIC's TZ support match the CPUs. We assume that + * either all the CPUs have TZ, or none do. + */ + cpuobj = OBJECT(qemu_get_cpu(0)); + has_el3 = object_property_find(cpuobj, "has_el3") && + object_property_get_bool(cpuobj, "has_el3", &error_abort); + qdev_prop_set_bit(gicdev, "has-security-extensions", has_el3); + /* Similarly for virtualization support */ + has_el2 = object_property_find(cpuobj, "has_el2") && + object_property_get_bool(cpuobj, "has_el2", &error_abort); + qdev_prop_set_bit(gicdev, "has-virtualization-extensions", has_el2); + } + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(&s->gic); + + /* Pass through outbound IRQ lines from the GIC */ + sysbus_pass_irq(sbd, busdev); + + /* Pass through inbound GPIO lines to the GIC */ + qdev_init_gpio_in(dev, a15mp_priv_set_irq, s->num_irq - 32); + + /* Wire the outputs from each CPU's generic timer to the + * appropriate GIC PPI inputs + */ + for (i = 0; i < s->num_cpu; i++) { + DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); + int ppibase = s->num_irq - 32 + i * 32; + int irq; + /* Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs used on the A15: + */ + const int timer_irq[] = { + [GTIMER_PHYS] = 30, + [GTIMER_VIRT] = 27, + [GTIMER_HYP] = 26, + [GTIMER_SEC] = 29, + }; + for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { + qdev_connect_gpio_out(cpudev, irq, + qdev_get_gpio_in(gicdev, + ppibase + timer_irq[irq])); + } + if (has_el2) { + /* Connect the GIC maintenance interrupt to PPI ID 25 */ + sysbus_connect_irq(SYS_BUS_DEVICE(gicdev), i + 4 * s->num_cpu, + qdev_get_gpio_in(gicdev, ppibase + 25)); + } + } + + /* Memory map (addresses are offsets from PERIPHBASE): + * 0x0000-0x0fff -- reserved + * 0x1000-0x1fff -- GIC Distributor + * 0x2000-0x3fff -- GIC CPU interface + * 0x4000-0x4fff -- GIC virtual interface control for this CPU + * 0x5000-0x51ff -- GIC virtual interface control for CPU 0 + * 0x5200-0x53ff -- GIC virtual interface control for CPU 1 + * 0x5400-0x55ff -- GIC virtual interface control for CPU 2 + * 0x5600-0x57ff -- GIC virtual interface control for CPU 3 + * 0x6000-0x7fff -- GIC virtual CPU interface + */ + memory_region_add_subregion(&s->container, 0x1000, + sysbus_mmio_get_region(busdev, 0)); + memory_region_add_subregion(&s->container, 0x2000, + sysbus_mmio_get_region(busdev, 1)); + if (has_el2) { + memory_region_add_subregion(&s->container, 0x4000, + sysbus_mmio_get_region(busdev, 2)); + memory_region_add_subregion(&s->container, 0x6000, + sysbus_mmio_get_region(busdev, 3)); + for (i = 0; i < s->num_cpu; i++) { + hwaddr base = 0x5000 + i * 0x200; + MemoryRegion *mr = sysbus_mmio_get_region(busdev, + 4 + s->num_cpu + i); + memory_region_add_subregion(&s->container, base, mr); + } + } +} + +static Property a15mp_priv_properties[] = { + DEFINE_PROP_UINT32("num-cpu", A15MPPrivState, num_cpu, 1), + /* The Cortex-A15MP may have anything from 0 to 224 external interrupt + * IRQ lines (with another 32 internal). We default to 128+32, which + * is the number provided by the Cortex-A15MP test chip in the + * Versatile Express A15 development board. + * Other boards may differ and should set this property appropriately. + */ + DEFINE_PROP_UINT32("num-irq", A15MPPrivState, num_irq, 160), + DEFINE_PROP_END_OF_LIST(), +}; + +static void a15mp_priv_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = a15mp_priv_realize; + device_class_set_props(dc, a15mp_priv_properties); + /* We currently have no savable state */ +} + +static const TypeInfo a15mp_priv_info = { + .name = TYPE_A15MPCORE_PRIV, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(A15MPPrivState), + .instance_init = a15mp_priv_initfn, + .class_init = a15mp_priv_class_init, +}; + +static void a15mp_register_types(void) +{ + type_register_static(&a15mp_priv_info); +} + +type_init(a15mp_register_types) diff --git a/hw/cpu/a9mpcore.c b/hw/cpu/a9mpcore.c new file mode 100644 index 000000000..d03f57e57 --- /dev/null +++ b/hw/cpu/a9mpcore.c @@ -0,0 +1,194 @@ +/* + * Cortex-A9MPCore internal peripheral emulation. + * + * Copyright (c) 2009 CodeSourcery. + * Copyright (c) 2011 Linaro Limited. + * Written by Paul Brook, Peter Maydell. + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/cpu/a9mpcore.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/core/cpu.h" +#include "cpu.h" + +#define A9_GIC_NUM_PRIORITY_BITS 5 + +static void a9mp_priv_set_irq(void *opaque, int irq, int level) +{ + A9MPPrivState *s = (A9MPPrivState *)opaque; + + qemu_set_irq(qdev_get_gpio_in(DEVICE(&s->gic), irq), level); +} + +static void a9mp_priv_initfn(Object *obj) +{ + A9MPPrivState *s = A9MPCORE_PRIV(obj); + + memory_region_init(&s->container, obj, "a9mp-priv-container", 0x2000); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->container); + + object_initialize_child(obj, "scu", &s->scu, TYPE_A9_SCU); + + object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GIC); + + object_initialize_child(obj, "gtimer", &s->gtimer, TYPE_A9_GTIMER); + + object_initialize_child(obj, "mptimer", &s->mptimer, TYPE_ARM_MPTIMER); + + object_initialize_child(obj, "wdt", &s->wdt, TYPE_ARM_MPTIMER); +} + +static void a9mp_priv_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + A9MPPrivState *s = A9MPCORE_PRIV(dev); + DeviceState *scudev, *gicdev, *gtimerdev, *mptimerdev, *wdtdev; + SysBusDevice *scubusdev, *gicbusdev, *gtimerbusdev, *mptimerbusdev, + *wdtbusdev; + int i; + bool has_el3; + CPUState *cpu0; + Object *cpuobj; + + cpu0 = qemu_get_cpu(0); + cpuobj = OBJECT(cpu0); + if (strcmp(object_get_typename(cpuobj), ARM_CPU_TYPE_NAME("cortex-a9"))) { + /* We might allow Cortex-A5 once we model it */ + error_setg(errp, + "Cortex-A9MPCore peripheral can only use Cortex-A9 CPU"); + return; + } + + scudev = DEVICE(&s->scu); + qdev_prop_set_uint32(scudev, "num-cpu", s->num_cpu); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { + return; + } + scubusdev = SYS_BUS_DEVICE(&s->scu); + + gicdev = DEVICE(&s->gic); + qdev_prop_set_uint32(gicdev, "num-cpu", s->num_cpu); + qdev_prop_set_uint32(gicdev, "num-irq", s->num_irq); + qdev_prop_set_uint32(gicdev, "num-priority-bits", + A9_GIC_NUM_PRIORITY_BITS); + + /* Make the GIC's TZ support match the CPUs. We assume that + * either all the CPUs have TZ, or none do. + */ + has_el3 = object_property_find(cpuobj, "has_el3") && + object_property_get_bool(cpuobj, "has_el3", &error_abort); + qdev_prop_set_bit(gicdev, "has-security-extensions", has_el3); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) { + return; + } + gicbusdev = SYS_BUS_DEVICE(&s->gic); + + /* Pass through outbound IRQ lines from the GIC */ + sysbus_pass_irq(sbd, gicbusdev); + + /* Pass through inbound GPIO lines to the GIC */ + qdev_init_gpio_in(dev, a9mp_priv_set_irq, s->num_irq - 32); + + gtimerdev = DEVICE(&s->gtimer); + qdev_prop_set_uint32(gtimerdev, "num-cpu", s->num_cpu); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gtimer), errp)) { + return; + } + gtimerbusdev = SYS_BUS_DEVICE(&s->gtimer); + + mptimerdev = DEVICE(&s->mptimer); + qdev_prop_set_uint32(mptimerdev, "num-cpu", s->num_cpu); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mptimer), errp)) { + return; + } + mptimerbusdev = SYS_BUS_DEVICE(&s->mptimer); + + wdtdev = DEVICE(&s->wdt); + qdev_prop_set_uint32(wdtdev, "num-cpu", s->num_cpu); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt), errp)) { + return; + } + wdtbusdev = SYS_BUS_DEVICE(&s->wdt); + + /* Memory map (addresses are offsets from PERIPHBASE): + * 0x0000-0x00ff -- Snoop Control Unit + * 0x0100-0x01ff -- GIC CPU interface + * 0x0200-0x02ff -- Global Timer + * 0x0300-0x05ff -- nothing + * 0x0600-0x06ff -- private timers and watchdogs + * 0x0700-0x0fff -- nothing + * 0x1000-0x1fff -- GIC Distributor + */ + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(scubusdev, 0)); + /* GIC CPU interface */ + memory_region_add_subregion(&s->container, 0x100, + sysbus_mmio_get_region(gicbusdev, 1)); + memory_region_add_subregion(&s->container, 0x200, + sysbus_mmio_get_region(gtimerbusdev, 0)); + /* Note that the A9 exposes only the "timer/watchdog for this core" + * memory region, not the "timer/watchdog for core X" ones 11MPcore has. + */ + memory_region_add_subregion(&s->container, 0x600, + sysbus_mmio_get_region(mptimerbusdev, 0)); + memory_region_add_subregion(&s->container, 0x620, + sysbus_mmio_get_region(wdtbusdev, 0)); + memory_region_add_subregion(&s->container, 0x1000, + sysbus_mmio_get_region(gicbusdev, 0)); + + /* Wire up the interrupt from each watchdog and timer. + * For each core the global timer is PPI 27, the private + * timer is PPI 29 and the watchdog PPI 30. + */ + for (i = 0; i < s->num_cpu; i++) { + int ppibase = (s->num_irq - 32) + i * 32; + sysbus_connect_irq(gtimerbusdev, i, + qdev_get_gpio_in(gicdev, ppibase + 27)); + sysbus_connect_irq(mptimerbusdev, i, + qdev_get_gpio_in(gicdev, ppibase + 29)); + sysbus_connect_irq(wdtbusdev, i, + qdev_get_gpio_in(gicdev, ppibase + 30)); + } +} + +static Property a9mp_priv_properties[] = { + DEFINE_PROP_UINT32("num-cpu", A9MPPrivState, num_cpu, 1), + /* The Cortex-A9MP may have anything from 0 to 224 external interrupt + * IRQ lines (with another 32 internal). We default to 64+32, which + * is the number provided by the Cortex-A9MP test chip in the + * Realview PBX-A9 and Versatile Express A9 development boards. + * Other boards may differ and should set this property appropriately. + */ + DEFINE_PROP_UINT32("num-irq", A9MPPrivState, num_irq, 96), + DEFINE_PROP_END_OF_LIST(), +}; + +static void a9mp_priv_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = a9mp_priv_realize; + device_class_set_props(dc, a9mp_priv_properties); +} + +static const TypeInfo a9mp_priv_info = { + .name = TYPE_A9MPCORE_PRIV, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(A9MPPrivState), + .instance_init = a9mp_priv_initfn, + .class_init = a9mp_priv_class_init, +}; + +static void a9mp_register_types(void) +{ + type_register_static(&a9mp_priv_info); +} + +type_init(a9mp_register_types) diff --git a/hw/cpu/arm11mpcore.c b/hw/cpu/arm11mpcore.c new file mode 100644 index 000000000..89c4e3514 --- /dev/null +++ b/hw/cpu/arm11mpcore.c @@ -0,0 +1,169 @@ +/* + * ARM11MPCore internal peripheral emulation. + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/cpu/arm11mpcore.h" +#include "hw/intc/realview_gic.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" + +#define ARM11MPCORE_NUM_GIC_PRIORITY_BITS 4 + +static void mpcore_priv_set_irq(void *opaque, int irq, int level) +{ + ARM11MPCorePriveState *s = (ARM11MPCorePriveState *)opaque; + + qemu_set_irq(qdev_get_gpio_in(DEVICE(&s->gic), irq), level); +} + +static void mpcore_priv_map_setup(ARM11MPCorePriveState *s) +{ + int i; + SysBusDevice *scubusdev = SYS_BUS_DEVICE(&s->scu); + DeviceState *gicdev = DEVICE(&s->gic); + SysBusDevice *gicbusdev = SYS_BUS_DEVICE(&s->gic); + SysBusDevice *timerbusdev = SYS_BUS_DEVICE(&s->mptimer); + SysBusDevice *wdtbusdev = SYS_BUS_DEVICE(&s->wdtimer); + + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(scubusdev, 0)); + /* GIC CPU interfaces: "current CPU" at 0x100, then specific CPUs + * at 0x200, 0x300... + */ + for (i = 0; i < (s->num_cpu + 1); i++) { + hwaddr offset = 0x100 + (i * 0x100); + memory_region_add_subregion(&s->container, offset, + sysbus_mmio_get_region(gicbusdev, i + 1)); + } + /* Add the regions for timer and watchdog for "current CPU" and + * for each specific CPU. + */ + for (i = 0; i < (s->num_cpu + 1); i++) { + /* Timers at 0x600, 0x700, ...; watchdogs at 0x620, 0x720, ... */ + hwaddr offset = 0x600 + i * 0x100; + memory_region_add_subregion(&s->container, offset, + sysbus_mmio_get_region(timerbusdev, i)); + memory_region_add_subregion(&s->container, offset + 0x20, + sysbus_mmio_get_region(wdtbusdev, i)); + } + memory_region_add_subregion(&s->container, 0x1000, + sysbus_mmio_get_region(gicbusdev, 0)); + /* Wire up the interrupt from each watchdog and timer. + * For each core the timer is PPI 29 and the watchdog PPI 30. + */ + for (i = 0; i < s->num_cpu; i++) { + int ppibase = (s->num_irq - 32) + i * 32; + sysbus_connect_irq(timerbusdev, i, + qdev_get_gpio_in(gicdev, ppibase + 29)); + sysbus_connect_irq(wdtbusdev, i, + qdev_get_gpio_in(gicdev, ppibase + 30)); + } +} + +static void mpcore_priv_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + ARM11MPCorePriveState *s = ARM11MPCORE_PRIV(dev); + DeviceState *scudev = DEVICE(&s->scu); + DeviceState *gicdev = DEVICE(&s->gic); + DeviceState *mptimerdev = DEVICE(&s->mptimer); + DeviceState *wdtimerdev = DEVICE(&s->wdtimer); + + qdev_prop_set_uint32(scudev, "num-cpu", s->num_cpu); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { + return; + } + + qdev_prop_set_uint32(gicdev, "num-cpu", s->num_cpu); + qdev_prop_set_uint32(gicdev, "num-irq", s->num_irq); + qdev_prop_set_uint32(gicdev, "num-priority-bits", + ARM11MPCORE_NUM_GIC_PRIORITY_BITS); + + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) { + return; + } + + /* Pass through outbound IRQ lines from the GIC */ + sysbus_pass_irq(sbd, SYS_BUS_DEVICE(&s->gic)); + + /* Pass through inbound GPIO lines to the GIC */ + qdev_init_gpio_in(dev, mpcore_priv_set_irq, s->num_irq - 32); + + qdev_prop_set_uint32(mptimerdev, "num-cpu", s->num_cpu); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mptimer), errp)) { + return; + } + + qdev_prop_set_uint32(wdtimerdev, "num-cpu", s->num_cpu); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdtimer), errp)) { + return; + } + + mpcore_priv_map_setup(s); +} + +static void mpcore_priv_initfn(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ARM11MPCorePriveState *s = ARM11MPCORE_PRIV(obj); + + memory_region_init(&s->container, OBJECT(s), + "mpcore-priv-container", 0x2000); + sysbus_init_mmio(sbd, &s->container); + + object_initialize_child(obj, "scu", &s->scu, TYPE_ARM11_SCU); + + object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GIC); + /* Request the legacy 11MPCore GIC behaviour: */ + qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 0); + + object_initialize_child(obj, "mptimer", &s->mptimer, TYPE_ARM_MPTIMER); + + object_initialize_child(obj, "wdtimer", &s->wdtimer, TYPE_ARM_MPTIMER); +} + +static Property mpcore_priv_properties[] = { + DEFINE_PROP_UINT32("num-cpu", ARM11MPCorePriveState, num_cpu, 1), + /* The ARM11 MPCORE TRM says the on-chip controller may have + * anything from 0 to 224 external interrupt IRQ lines (with another + * 32 internal). We default to 32+32, which is the number provided by + * the ARM11 MPCore test chip in the Realview Versatile Express + * coretile. Other boards may differ and should set this property + * appropriately. Some Linux kernels may not boot if the hardware + * has more IRQ lines than the kernel expects. + */ + DEFINE_PROP_UINT32("num-irq", ARM11MPCorePriveState, num_irq, 64), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mpcore_priv_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mpcore_priv_realize; + device_class_set_props(dc, mpcore_priv_properties); +} + +static const TypeInfo mpcore_priv_info = { + .name = TYPE_ARM11MPCORE_PRIV, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARM11MPCorePriveState), + .instance_init = mpcore_priv_initfn, + .class_init = mpcore_priv_class_init, +}; + +static void arm11mpcore_register_types(void) +{ + type_register_static(&mpcore_priv_info); +} + +type_init(arm11mpcore_register_types) diff --git a/hw/cpu/cluster.c b/hw/cpu/cluster.c new file mode 100644 index 000000000..e444b7c29 --- /dev/null +++ b/hw/cpu/cluster.c @@ -0,0 +1,100 @@ +/* + * QEMU CPU cluster + * + * Copyright (c) 2018 GreenSocs SAS + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "qemu/osdep.h" +#include "hw/cpu/cluster.h" +#include "hw/qdev-properties.h" +#include "hw/core/cpu.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/cutils.h" + +static Property cpu_cluster_properties[] = { + DEFINE_PROP_UINT32("cluster-id", CPUClusterState, cluster_id, 0), + DEFINE_PROP_END_OF_LIST() +}; + +typedef struct CallbackData { + CPUClusterState *cluster; + int cpu_count; +} CallbackData; + +static int add_cpu_to_cluster(Object *obj, void *opaque) +{ + CallbackData *cbdata = opaque; + CPUState *cpu = (CPUState *)object_dynamic_cast(obj, TYPE_CPU); + + if (cpu) { + cpu->cluster_index = cbdata->cluster->cluster_id; + cbdata->cpu_count++; + } + return 0; +} + +static void cpu_cluster_realize(DeviceState *dev, Error **errp) +{ + /* Iterate through all our CPU children and set their cluster_index */ + CPUClusterState *cluster = CPU_CLUSTER(dev); + Object *cluster_obj = OBJECT(dev); + CallbackData cbdata = { + .cluster = cluster, + .cpu_count = 0, + }; + + if (cluster->cluster_id >= MAX_CLUSTERS) { + error_setg(errp, "cluster-id must be less than %d", MAX_CLUSTERS); + return; + } + + object_child_foreach_recursive(cluster_obj, add_cpu_to_cluster, &cbdata); + + /* + * A cluster with no CPUs is a bug in the board/SoC code that created it; + * if you hit this during development of new code, check that you have + * created the CPUs and parented them into the cluster object before + * realizing the cluster object. + */ + assert(cbdata.cpu_count > 0); +} + +static void cpu_cluster_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, cpu_cluster_properties); + dc->realize = cpu_cluster_realize; + + /* This is not directly for users, CPU children must be attached by code */ + dc->user_creatable = false; +} + +static const TypeInfo cpu_cluster_type_info = { + .name = TYPE_CPU_CLUSTER, + .parent = TYPE_DEVICE, + .instance_size = sizeof(CPUClusterState), + .class_init = cpu_cluster_class_init, +}; + +static void cpu_cluster_register_types(void) +{ + type_register_static(&cpu_cluster_type_info); +} + +type_init(cpu_cluster_register_types) diff --git a/hw/cpu/core.c b/hw/cpu/core.c new file mode 100644 index 000000000..987607515 --- /dev/null +++ b/hw/cpu/core.c @@ -0,0 +1,106 @@ +/* + * CPU core abstract device + * + * Copyright (C) 2016 Bharata B Rao + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/cpu/core.h" +#include "qapi/visitor.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "sysemu/cpus.h" +#include "hw/boards.h" + +static void core_prop_get_core_id(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + CPUCore *core = CPU_CORE(obj); + int64_t value = core->core_id; + + visit_type_int(v, name, &value, errp); +} + +static void core_prop_set_core_id(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + CPUCore *core = CPU_CORE(obj); + int64_t value; + + if (!visit_type_int(v, name, &value, errp)) { + return; + } + + if (value < 0) { + error_setg(errp, "Invalid core id %"PRId64, value); + return; + } + + core->core_id = value; +} + +static void core_prop_get_nr_threads(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + CPUCore *core = CPU_CORE(obj); + int64_t value = core->nr_threads; + + visit_type_int(v, name, &value, errp); +} + +static void core_prop_set_nr_threads(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + CPUCore *core = CPU_CORE(obj); + int64_t value; + + if (!visit_type_int(v, name, &value, errp)) { + return; + } + + core->nr_threads = value; +} + +static void cpu_core_instance_init(Object *obj) +{ + CPUCore *core = CPU_CORE(obj); + + /* + * Only '-device something-cpu-core,help' can get us there before + * the machine has been created. We don't care to set nr_threads + * in this case since it isn't used afterwards. + */ + if (current_machine) { + core->nr_threads = current_machine->smp.threads; + } +} + +static void cpu_core_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + set_bit(DEVICE_CATEGORY_CPU, dc->categories); + object_class_property_add(oc, "core-id", "int", core_prop_get_core_id, + core_prop_set_core_id, NULL, NULL); + object_class_property_add(oc, "nr-threads", "int", core_prop_get_nr_threads, + core_prop_set_nr_threads, NULL, NULL); +} + +static const TypeInfo cpu_core_type_info = { + .name = TYPE_CPU_CORE, + .parent = TYPE_DEVICE, + .abstract = true, + .class_init = cpu_core_class_init, + .instance_size = sizeof(CPUCore), + .instance_init = cpu_core_instance_init, +}; + +static void cpu_core_register_types(void) +{ + type_register_static(&cpu_core_type_info); +} + +type_init(cpu_core_register_types) diff --git a/hw/cpu/meson.build b/hw/cpu/meson.build new file mode 100644 index 000000000..9e52fee9e --- /dev/null +++ b/hw/cpu/meson.build @@ -0,0 +1,6 @@ +softmmu_ss.add(files('core.c', 'cluster.c')) + +specific_ss.add(when: 'CONFIG_ARM11MPCORE', if_true: files('arm11mpcore.c')) +specific_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview_mpcore.c')) +specific_ss.add(when: 'CONFIG_A9MPCORE', if_true: files('a9mpcore.c')) +specific_ss.add(when: 'CONFIG_A15MPCORE', if_true: files('a15mpcore.c')) diff --git a/hw/cpu/realview_mpcore.c b/hw/cpu/realview_mpcore.c new file mode 100644 index 000000000..72c792eef --- /dev/null +++ b/hw/cpu/realview_mpcore.c @@ -0,0 +1,137 @@ +/* + * RealView ARM11MPCore internal peripheral emulation + * + * Copyright (c) 2006-2007 CodeSourcery. + * Copyright (c) 2013 SUSE LINUX Products GmbH + * Written by Paul Brook and Andreas Färber + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/cpu/arm11mpcore.h" +#include "hw/intc/realview_gic.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +#define TYPE_REALVIEW_MPCORE_RIRQ "realview_mpcore" +OBJECT_DECLARE_SIMPLE_TYPE(mpcore_rirq_state, REALVIEW_MPCORE_RIRQ) + +/* Dummy PIC to route IRQ lines. The baseboard has 4 independent IRQ + controllers. The output of these, plus some of the raw input lines + are fed into a single SMP-aware interrupt controller on the CPU. */ +struct mpcore_rirq_state { + SysBusDevice parent_obj; + + qemu_irq cpuic[32]; + qemu_irq rvic[4][64]; + uint32_t num_cpu; + + ARM11MPCorePriveState priv; + RealViewGICState gic[4]; +}; + +/* Map baseboard IRQs onto CPU IRQ lines. */ +static const int mpcore_irq_map[32] = { + -1, -1, -1, -1, 1, 2, -1, -1, + -1, -1, 6, -1, 4, 5, -1, -1, + -1, 14, 15, 0, 7, 8, -1, -1, + -1, -1, -1, -1, 9, 3, -1, -1, +}; + +static void mpcore_rirq_set_irq(void *opaque, int irq, int level) +{ + mpcore_rirq_state *s = (mpcore_rirq_state *)opaque; + int i; + + for (i = 0; i < 4; i++) { + qemu_set_irq(s->rvic[i][irq], level); + } + if (irq < 32) { + irq = mpcore_irq_map[irq]; + if (irq >= 0) { + qemu_set_irq(s->cpuic[irq], level); + } + } +} + +static void realview_mpcore_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + mpcore_rirq_state *s = REALVIEW_MPCORE_RIRQ(dev); + DeviceState *priv = DEVICE(&s->priv); + DeviceState *gic; + SysBusDevice *gicbusdev; + int n; + int i; + + qdev_prop_set_uint32(priv, "num-cpu", s->num_cpu); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->priv), errp)) { + return; + } + sysbus_pass_irq(sbd, SYS_BUS_DEVICE(&s->priv)); + for (i = 0; i < 32; i++) { + s->cpuic[i] = qdev_get_gpio_in(priv, i); + } + /* ??? IRQ routing is hardcoded to "normal" mode. */ + for (n = 0; n < 4; n++) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic[n]), errp)) { + return; + } + gic = DEVICE(&s->gic[n]); + gicbusdev = SYS_BUS_DEVICE(&s->gic[n]); + sysbus_mmio_map(gicbusdev, 0, 0x10040000 + n * 0x10000); + sysbus_connect_irq(gicbusdev, 0, s->cpuic[10 + n]); + for (i = 0; i < 64; i++) { + s->rvic[n][i] = qdev_get_gpio_in(gic, i); + } + } + qdev_init_gpio_in(dev, mpcore_rirq_set_irq, 64); +} + +static void mpcore_rirq_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + mpcore_rirq_state *s = REALVIEW_MPCORE_RIRQ(obj); + SysBusDevice *privbusdev; + int i; + + object_initialize_child(obj, "a11priv", &s->priv, TYPE_ARM11MPCORE_PRIV); + privbusdev = SYS_BUS_DEVICE(&s->priv); + sysbus_init_mmio(sbd, sysbus_mmio_get_region(privbusdev, 0)); + + for (i = 0; i < 4; i++) { + object_initialize_child(obj, "gic[*]", &s->gic[i], TYPE_REALVIEW_GIC); + } +} + +static Property mpcore_rirq_properties[] = { + DEFINE_PROP_UINT32("num-cpu", mpcore_rirq_state, num_cpu, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mpcore_rirq_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = realview_mpcore_realize; + device_class_set_props(dc, mpcore_rirq_properties); +} + +static const TypeInfo mpcore_rirq_info = { + .name = TYPE_REALVIEW_MPCORE_RIRQ, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mpcore_rirq_state), + .instance_init = mpcore_rirq_init, + .class_init = mpcore_rirq_class_init, +}; + +static void realview_mpcore_register_types(void) +{ + type_register_static(&mpcore_rirq_info); +} + +type_init(realview_mpcore_register_types) diff --git a/hw/cris/Kconfig b/hw/cris/Kconfig new file mode 100644 index 000000000..884ad2cbc --- /dev/null +++ b/hw/cris/Kconfig @@ -0,0 +1,9 @@ +config AXIS + bool + select ETRAXFS + select PFLASH_CFI02 + select NAND + +config ETRAXFS + bool + select PTIMER diff --git a/hw/cris/axis_dev88.c b/hw/cris/axis_dev88.c new file mode 100644 index 000000000..d82050d92 --- /dev/null +++ b/hw/cris/axis_dev88.c @@ -0,0 +1,352 @@ +/* + * QEMU model for the AXIS devboard 88. + * + * Copyright (c) 2009 Edgar E. Iglesias, Axis Communications AB. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "net/net.h" +#include "hw/block/flash.h" +#include "hw/boards.h" +#include "hw/cris/etraxfs.h" +#include "hw/loader.h" +#include "elf.h" +#include "boot.h" +#include "sysemu/qtest.h" +#include "sysemu/sysemu.h" + +#define D(x) +#define DNAND(x) + +struct nand_state_t +{ + DeviceState *nand; + MemoryRegion iomem; + unsigned int rdy:1; + unsigned int ale:1; + unsigned int cle:1; + unsigned int ce:1; +}; + +static struct nand_state_t nand_state; +static uint64_t nand_read(void *opaque, hwaddr addr, unsigned size) +{ + struct nand_state_t *s = opaque; + uint32_t r; + int rdy; + + r = nand_getio(s->nand); + nand_getpins(s->nand, &rdy); + s->rdy = rdy; + + DNAND(printf("%s addr=%x r=%x\n", __func__, addr, r)); + return r; +} + +static void +nand_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + struct nand_state_t *s = opaque; + int rdy; + + DNAND(printf("%s addr=%x v=%x\n", __func__, addr, (unsigned)value)); + nand_setpins(s->nand, s->cle, s->ale, s->ce, 1, 0); + nand_setio(s->nand, value); + nand_getpins(s->nand, &rdy); + s->rdy = rdy; +} + +static const MemoryRegionOps nand_ops = { + .read = nand_read, + .write = nand_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct tempsensor_t +{ + unsigned int shiftreg; + unsigned int count; + enum { + ST_OUT, ST_IN, ST_Z + } state; + + uint16_t regs[3]; +}; + +static void tempsensor_clkedge(struct tempsensor_t *s, + unsigned int clk, unsigned int data_in) +{ + D(printf("%s clk=%d state=%d sr=%x\n", __func__, + clk, s->state, s->shiftreg)); + if (s->count == 0) { + s->count = 16; + s->state = ST_OUT; + } + switch (s->state) { + case ST_OUT: + /* Output reg is clocked at negedge. */ + if (!clk) { + s->count--; + s->shiftreg <<= 1; + if (s->count == 0) { + s->shiftreg = 0; + s->state = ST_IN; + s->count = 16; + } + } + break; + case ST_Z: + if (clk) { + s->count--; + if (s->count == 0) { + s->shiftreg = 0; + s->state = ST_OUT; + s->count = 16; + } + } + break; + case ST_IN: + /* Indata is sampled at posedge. */ + if (clk) { + s->count--; + s->shiftreg <<= 1; + s->shiftreg |= data_in & 1; + if (s->count == 0) { + D(printf("%s cfgreg=%x\n", __func__, s->shiftreg)); + s->regs[0] = s->shiftreg; + s->state = ST_OUT; + s->count = 16; + + if ((s->regs[0] & 0xff) == 0) { + /* 25 degrees celsius. */ + s->shiftreg = 0x0b9f; + } else if ((s->regs[0] & 0xff) == 0xff) { + /* Sensor ID, 0x8100 LM70. */ + s->shiftreg = 0x8100; + } else + printf("Invalid tempsens state %x\n", s->regs[0]); + } + } + break; + } +} + + +#define RW_PA_DOUT 0x00 +#define R_PA_DIN 0x01 +#define RW_PA_OE 0x02 +#define RW_PD_DOUT 0x10 +#define R_PD_DIN 0x11 +#define RW_PD_OE 0x12 + +static struct gpio_state_t +{ + MemoryRegion iomem; + struct nand_state_t *nand; + struct tempsensor_t tempsensor; + uint32_t regs[0x5c / 4]; +} gpio_state; + +static uint64_t gpio_read(void *opaque, hwaddr addr, unsigned size) +{ + struct gpio_state_t *s = opaque; + uint32_t r = 0; + + addr >>= 2; + switch (addr) + { + case R_PA_DIN: + r = s->regs[RW_PA_DOUT] & s->regs[RW_PA_OE]; + + /* Encode pins from the nand. */ + r |= s->nand->rdy << 7; + break; + case R_PD_DIN: + r = s->regs[RW_PD_DOUT] & s->regs[RW_PD_OE]; + + /* Encode temp sensor pins. */ + r |= (!!(s->tempsensor.shiftreg & 0x10000)) << 4; + break; + + default: + r = s->regs[addr]; + break; + } + return r; + D(printf("%s %x=%x\n", __func__, addr, r)); +} + +static void gpio_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + struct gpio_state_t *s = opaque; + D(printf("%s %x=%x\n", __func__, addr, (unsigned)value)); + + addr >>= 2; + switch (addr) + { + case RW_PA_DOUT: + /* Decode nand pins. */ + s->nand->ale = !!(value & (1 << 6)); + s->nand->cle = !!(value & (1 << 5)); + s->nand->ce = !!(value & (1 << 4)); + + s->regs[addr] = value; + break; + + case RW_PD_DOUT: + /* Temp sensor clk. */ + if ((s->regs[addr] ^ value) & 2) + tempsensor_clkedge(&s->tempsensor, !!(value & 2), + !!(value & 16)); + s->regs[addr] = value; + break; + + default: + s->regs[addr] = value; + break; + } +} + +static const MemoryRegionOps gpio_ops = { + .read = gpio_read, + .write = gpio_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +#define INTMEM_SIZE (128 * KiB) + +static struct cris_load_info li; + +static +void axisdev88_init(MachineState *machine) +{ + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + CRISCPU *cpu; + DeviceState *dev; + SysBusDevice *s; + DriveInfo *nand; + qemu_irq irq[30], nmi[2]; + void *etraxfs_dmac; + struct etraxfs_dma_client *dma_eth; + int i; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *phys_intmem = g_new(MemoryRegion, 1); + + /* init CPUs */ + cpu = CRIS_CPU(cpu_create(machine->cpu_type)); + + memory_region_add_subregion(address_space_mem, 0x40000000, machine->ram); + + /* The ETRAX-FS has 128Kb on chip ram, the docs refer to it as the + internal memory. */ + memory_region_init_ram(phys_intmem, NULL, "axisdev88.chipram", + INTMEM_SIZE, &error_fatal); + memory_region_add_subregion(address_space_mem, 0x38000000, phys_intmem); + + /* Attach a NAND flash to CS1. */ + nand = drive_get(IF_MTD, 0, 0); + nand_state.nand = nand_init(nand ? blk_by_legacy_dinfo(nand) : NULL, + NAND_MFR_STMICRO, 0x39); + memory_region_init_io(&nand_state.iomem, NULL, &nand_ops, &nand_state, + "nand", 0x05000000); + memory_region_add_subregion(address_space_mem, 0x10000000, + &nand_state.iomem); + + gpio_state.nand = &nand_state; + memory_region_init_io(&gpio_state.iomem, NULL, &gpio_ops, &gpio_state, + "gpio", 0x5c); + memory_region_add_subregion(address_space_mem, 0x3001a000, + &gpio_state.iomem); + + + dev = qdev_new("etraxfs-pic"); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, 0x3001c000); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(DEVICE(cpu), CRIS_CPU_IRQ)); + sysbus_connect_irq(s, 1, qdev_get_gpio_in(DEVICE(cpu), CRIS_CPU_NMI)); + for (i = 0; i < 30; i++) { + irq[i] = qdev_get_gpio_in(dev, i); + } + nmi[0] = qdev_get_gpio_in(dev, 30); + nmi[1] = qdev_get_gpio_in(dev, 31); + + etraxfs_dmac = etraxfs_dmac_init(0x30000000, 10); + for (i = 0; i < 10; i++) { + /* On ETRAX, odd numbered channels are inputs. */ + etraxfs_dmac_connect(etraxfs_dmac, i, irq + 7 + i, i & 1); + } + + /* Add the two ethernet blocks. */ + dma_eth = g_malloc0(sizeof dma_eth[0] * 4); /* Allocate 4 channels. */ + etraxfs_eth_init(&nd_table[0], 0x30034000, 1, &dma_eth[0], &dma_eth[1]); + if (nb_nics > 1) { + etraxfs_eth_init(&nd_table[1], 0x30036000, 2, &dma_eth[2], &dma_eth[3]); + } + + /* The DMA Connector block is missing, hardwire things for now. */ + etraxfs_dmac_connect_client(etraxfs_dmac, 0, &dma_eth[0]); + etraxfs_dmac_connect_client(etraxfs_dmac, 1, &dma_eth[1]); + if (nb_nics > 1) { + etraxfs_dmac_connect_client(etraxfs_dmac, 6, &dma_eth[2]); + etraxfs_dmac_connect_client(etraxfs_dmac, 7, &dma_eth[3]); + } + + /* 2 timers. */ + sysbus_create_varargs("etraxfs-timer", 0x3001e000, irq[0x1b], nmi[1], NULL); + sysbus_create_varargs("etraxfs-timer", 0x3005e000, irq[0x1b], nmi[1], NULL); + + for (i = 0; i < 4; i++) { + etraxfs_ser_create(0x30026000 + i * 0x2000, irq[0x14 + i], serial_hd(i)); + } + + if (kernel_filename) { + li.image_filename = kernel_filename; + li.cmdline = kernel_cmdline; + li.ram_size = machine->ram_size; + cris_load_image(cpu, &li); + } else if (!qtest_enabled()) { + fprintf(stderr, "Kernel image must be specified\n"); + exit(1); + } +} + +static void axisdev88_machine_init(MachineClass *mc) +{ + mc->desc = "AXIS devboard 88"; + mc->init = axisdev88_init; + mc->is_default = true; + mc->default_cpu_type = CRIS_CPU_TYPE_NAME("crisv32"); + mc->default_ram_id = "axisdev88.ram"; +} + +DEFINE_MACHINE("axis-dev88", axisdev88_machine_init) diff --git a/hw/cris/boot.c b/hw/cris/boot.c new file mode 100644 index 000000000..9fa09cfd8 --- /dev/null +++ b/hw/cris/boot.c @@ -0,0 +1,102 @@ +/* + * CRIS image loading. + * + * Copyright (c) 2010 Edgar E. Iglesias, Axis Communications AB. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/loader.h" +#include "elf.h" +#include "boot.h" +#include "qemu/cutils.h" +#include "sysemu/reset.h" + +static void main_cpu_reset(void *opaque) +{ + CRISCPU *cpu = opaque; + CPUCRISState *env = &cpu->env; + struct cris_load_info *li; + + li = env->load_info; + + cpu_reset(CPU(cpu)); + + if (!li) { + /* nothing more to do. */ + return; + } + + env->pc = li->entry; + + if (li->image_filename) { + env->regs[8] = 0x56902387; /* RAM boot magic. */ + env->regs[9] = 0x40004000 + li->image_size; + } + + if (li->cmdline) { + /* Let the kernel know we are modifying the cmdline. */ + env->regs[10] = 0x87109563; + env->regs[11] = 0x40000000; + } +} + +static uint64_t translate_kernel_address(void *opaque, uint64_t addr) +{ + return addr - 0x80000000LL; +} + +void cris_load_image(CRISCPU *cpu, struct cris_load_info *li) +{ + CPUCRISState *env = &cpu->env; + uint64_t entry; + int kcmdline_len; + int image_size; + + env->load_info = li; + /* Boots a kernel elf binary, os/linux-2.6/vmlinux from the axis + devboard SDK. */ + image_size = load_elf(li->image_filename, NULL, + translate_kernel_address, NULL, + &entry, NULL, NULL, NULL, 0, EM_CRIS, 0, 0); + li->entry = entry; + if (image_size < 0) { + /* Takes a kimage from the axis devboard SDK. */ + image_size = load_image_targphys(li->image_filename, 0x40004000, + li->ram_size); + li->entry = 0x40004000; + } + + if (image_size < 0) { + fprintf(stderr, "qemu: could not load kernel '%s'\n", + li->image_filename); + exit(1); + } + + if (li->cmdline && (kcmdline_len = strlen(li->cmdline))) { + if (kcmdline_len > 256) { + fprintf(stderr, "Too long CRIS kernel cmdline (max 256)\n"); + exit(1); + } + pstrcpy_targphys("cmdline", 0x40000000, 256, li->cmdline); + } + qemu_register_reset(main_cpu_reset, cpu); +} diff --git a/hw/cris/boot.h b/hw/cris/boot.h new file mode 100644 index 000000000..9f1e0e340 --- /dev/null +++ b/hw/cris/boot.h @@ -0,0 +1,16 @@ +#ifndef HW_CRIS_BOOT_H +#define HW_CRIS_BOOT_H + +struct cris_load_info +{ + const char *image_filename; + const char *cmdline; + int image_size; + ram_addr_t ram_size; + + hwaddr entry; +}; + +void cris_load_image(CRISCPU *cpu, struct cris_load_info *li); + +#endif diff --git a/hw/cris/meson.build b/hw/cris/meson.build new file mode 100644 index 000000000..dc808a4e0 --- /dev/null +++ b/hw/cris/meson.build @@ -0,0 +1,5 @@ +cris_ss = ss.source_set() +cris_ss.add(files('boot.c')) +cris_ss.add(when: 'CONFIG_AXIS', if_true: files('axis_dev88.c')) + +hw_arch += {'cris': cris_ss} diff --git a/hw/display/Kconfig b/hw/display/Kconfig new file mode 100644 index 000000000..a2306b67d --- /dev/null +++ b/hw/display/Kconfig @@ -0,0 +1,136 @@ +config DDC + bool + depends on I2C + select EDID + +config EDID + bool + +config FW_CFG_DMA + bool + +config VGA_CIRRUS + bool + default y if PCI_DEVICES + depends on PCI + select VGA + +config G364FB + bool + +config JAZZ_LED + bool + +config PL110 + bool + select FRAMEBUFFER + +config SII9022 + bool + depends on I2C + select DDC + +config SSD0303 + bool + depends on I2C + +config SSD0323 + bool + +config VGA_PCI + bool + default y if PCI_DEVICES + depends on PCI + select VGA + select EDID + +config VGA_ISA + bool + depends on ISA_BUS + select VGA + +config VGA_ISA_MM + bool + select VGA + +config VMWARE_VGA + bool + default y if PCI_DEVICES + depends on PCI + select VGA + +config BOCHS_DISPLAY + bool + default y if PCI_DEVICES + depends on PCI + select VGA + select EDID + +config BLIZZARD + bool + +config FRAMEBUFFER + bool + +config SM501 + bool + select I2C + select DDC + select SERIAL + +config TCX + bool + +config CG3 + bool + +config ARTIST + bool + select FRAMEBUFFER + +config VGA + bool + +config QXL + bool + depends on SPICE && PCI + select VGA + +config VIRTIO_GPU + bool + default y + depends on VIRTIO + select EDID + +config VIRTIO_VGA + bool + # defaults to "N", enabled by specific boards + depends on VIRTIO_PCI + select VGA + +config VHOST_USER_GPU + bool + default y + depends on VIRTIO_GPU && VHOST_USER + +config VHOST_USER_VGA + bool + default y + depends on VIRTIO_VGA && VHOST_USER_GPU + +config DPCD + bool + select AUX + +config ATI_VGA + bool + default y if PCI_DEVICES + depends on PCI + select VGA + select BITBANG_I2C + select DDC + +config MACFB + bool + select FRAMEBUFFER + depends on NUBUS diff --git a/hw/display/artist.c b/hw/display/artist.c new file mode 100644 index 000000000..21b7fd1b4 --- /dev/null +++ b/hw/display/artist.c @@ -0,0 +1,1500 @@ +/* + * QEMU HP Artist Emulation + * + * Copyright (c) 2019 Sven Schnelle + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/loader.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "ui/console.h" +#include "trace.h" +#include "framebuffer.h" +#include "qom/object.h" + +#define TYPE_ARTIST "artist" +OBJECT_DECLARE_SIMPLE_TYPE(ARTISTState, ARTIST) + +#ifdef HOST_WORDS_BIGENDIAN +#define ROP8OFF(_i) (3 - (_i)) +#else +#define ROP8OFF +#endif + +struct vram_buffer { + MemoryRegion mr; + uint8_t *data; + unsigned int size; + unsigned int width; + unsigned int height; +}; + +struct ARTISTState { + SysBusDevice parent_obj; + + QemuConsole *con; + MemoryRegion vram_mem; + MemoryRegion mem_as_root; + MemoryRegion reg; + MemoryRegionSection fbsection; + + void *vram_int_mr; + AddressSpace as; + + struct vram_buffer vram_buffer[16]; + + uint16_t width; + uint16_t height; + uint16_t depth; + + uint32_t fg_color; + uint32_t bg_color; + + uint32_t vram_char_y; + uint32_t vram_bitmask; + + uint32_t vram_start; + uint32_t vram_pos; + + uint32_t vram_size; + + uint32_t blockmove_source; + uint32_t blockmove_dest; + uint32_t blockmove_size; + + uint32_t line_size; + uint32_t line_end; + uint32_t line_xy; + uint32_t line_pattern_start; + uint32_t line_pattern_skip; + + uint32_t cursor_pos; + + uint32_t cursor_height; + uint32_t cursor_width; + + uint32_t plane_mask; + + uint32_t reg_100080; + uint32_t reg_300200; + uint32_t reg_300208; + uint32_t reg_300218; + + uint32_t cmap_bm_access; + uint32_t dst_bm_access; + uint32_t src_bm_access; + uint32_t control_plane; + uint32_t transfer_data; + uint32_t image_bitmap_op; + + uint32_t font_write1; + uint32_t font_write2; + uint32_t font_write_pos_y; + + int draw_line_pattern; +}; + +typedef enum { + ARTIST_BUFFER_AP = 1, + ARTIST_BUFFER_OVERLAY = 2, + ARTIST_BUFFER_CURSOR1 = 6, + ARTIST_BUFFER_CURSOR2 = 7, + ARTIST_BUFFER_ATTRIBUTE = 13, + ARTIST_BUFFER_CMAP = 15, +} artist_buffer_t; + +typedef enum { + VRAM_IDX = 0x1004a0, + VRAM_BITMASK = 0x1005a0, + VRAM_WRITE_INCR_X = 0x100600, + VRAM_WRITE_INCR_X2 = 0x100604, + VRAM_WRITE_INCR_Y = 0x100620, + VRAM_START = 0x100800, + BLOCK_MOVE_SIZE = 0x100804, + BLOCK_MOVE_SOURCE = 0x100808, + TRANSFER_DATA = 0x100820, + FONT_WRITE_INCR_Y = 0x1008a0, + VRAM_START_TRIGGER = 0x100a00, + VRAM_SIZE_TRIGGER = 0x100a04, + FONT_WRITE_START = 0x100aa0, + BLOCK_MOVE_DEST_TRIGGER = 0x100b00, + BLOCK_MOVE_SIZE_TRIGGER = 0x100b04, + LINE_XY = 0x100ccc, + PATTERN_LINE_START = 0x100ecc, + LINE_SIZE = 0x100e04, + LINE_END = 0x100e44, + CMAP_BM_ACCESS = 0x118000, + DST_BM_ACCESS = 0x118004, + SRC_BM_ACCESS = 0x118008, + CONTROL_PLANE = 0x11800c, + FG_COLOR = 0x118010, + BG_COLOR = 0x118014, + PLANE_MASK = 0x118018, + IMAGE_BITMAP_OP = 0x11801c, + CURSOR_POS = 0x300100, + CURSOR_CTRL = 0x300104, +} artist_reg_t; + +typedef enum { + ARTIST_ROP_CLEAR = 0, + ARTIST_ROP_COPY = 3, + ARTIST_ROP_XOR = 6, + ARTIST_ROP_NOT_DST = 10, + ARTIST_ROP_SET = 15, +} artist_rop_t; + +#define REG_NAME(_x) case _x: return " "#_x; +static const char *artist_reg_name(uint64_t addr) +{ + switch ((artist_reg_t)addr) { + REG_NAME(VRAM_IDX); + REG_NAME(VRAM_BITMASK); + REG_NAME(VRAM_WRITE_INCR_X); + REG_NAME(VRAM_WRITE_INCR_X2); + REG_NAME(VRAM_WRITE_INCR_Y); + REG_NAME(VRAM_START); + REG_NAME(BLOCK_MOVE_SIZE); + REG_NAME(BLOCK_MOVE_SOURCE); + REG_NAME(FG_COLOR); + REG_NAME(BG_COLOR); + REG_NAME(PLANE_MASK); + REG_NAME(VRAM_START_TRIGGER); + REG_NAME(VRAM_SIZE_TRIGGER); + REG_NAME(BLOCK_MOVE_DEST_TRIGGER); + REG_NAME(BLOCK_MOVE_SIZE_TRIGGER); + REG_NAME(TRANSFER_DATA); + REG_NAME(CONTROL_PLANE); + REG_NAME(IMAGE_BITMAP_OP); + REG_NAME(CMAP_BM_ACCESS); + REG_NAME(DST_BM_ACCESS); + REG_NAME(SRC_BM_ACCESS); + REG_NAME(CURSOR_POS); + REG_NAME(CURSOR_CTRL); + REG_NAME(LINE_XY); + REG_NAME(PATTERN_LINE_START); + REG_NAME(LINE_SIZE); + REG_NAME(LINE_END); + REG_NAME(FONT_WRITE_INCR_Y); + REG_NAME(FONT_WRITE_START); + } + return ""; +} +#undef REG_NAME + +/* artist has a fixed line length of 2048 bytes. */ +#define ADDR_TO_Y(addr) extract32(addr, 11, 11) +#define ADDR_TO_X(addr) extract32(addr, 0, 11) + +static int16_t artist_get_x(uint32_t reg) +{ + return reg >> 16; +} + +static int16_t artist_get_y(uint32_t reg) +{ + return reg & 0xffff; +} + +static void artist_invalidate_lines(struct vram_buffer *buf, + int starty, int height) +{ + int start = starty * buf->width; + int size; + + if (starty + height > buf->height) + height = buf->height - starty; + + size = height * buf->width; + + if (start + size <= buf->size) { + memory_region_set_dirty(&buf->mr, start, size); + } +} + +static int vram_write_pix_per_transfer(ARTISTState *s) +{ + if (s->cmap_bm_access) { + return 1 << ((s->cmap_bm_access >> 27) & 0x0f); + } else { + return 1 << ((s->dst_bm_access >> 27) & 0x0f); + } +} + +static int vram_pixel_length(ARTISTState *s) +{ + if (s->cmap_bm_access) { + return (s->cmap_bm_access >> 24) & 0x07; + } else { + return (s->dst_bm_access >> 24) & 0x07; + } +} + +static int vram_write_bufidx(ARTISTState *s) +{ + if (s->cmap_bm_access) { + return (s->cmap_bm_access >> 12) & 0x0f; + } else { + return (s->dst_bm_access >> 12) & 0x0f; + } +} + +static int vram_read_bufidx(ARTISTState *s) +{ + if (s->cmap_bm_access) { + return (s->cmap_bm_access >> 12) & 0x0f; + } else { + return (s->src_bm_access >> 12) & 0x0f; + } +} + +static struct vram_buffer *vram_read_buffer(ARTISTState *s) +{ + return &s->vram_buffer[vram_read_bufidx(s)]; +} + +static struct vram_buffer *vram_write_buffer(ARTISTState *s) +{ + return &s->vram_buffer[vram_write_bufidx(s)]; +} + +static uint8_t artist_get_color(ARTISTState *s) +{ + if (s->image_bitmap_op & 2) { + return s->fg_color; + } else { + return s->bg_color; + } +} + +static artist_rop_t artist_get_op(ARTISTState *s) +{ + return (s->image_bitmap_op >> 8) & 0xf; +} + +static void artist_rop8(ARTISTState *s, struct vram_buffer *buf, + unsigned int offset, uint8_t val) +{ + const artist_rop_t op = artist_get_op(s); + uint8_t plane_mask; + uint8_t *dst; + + if (offset >= buf->size) { + qemu_log_mask(LOG_GUEST_ERROR, + "rop8 offset:%u bufsize:%u\n", offset, buf->size); + return; + } + dst = buf->data + offset; + plane_mask = s->plane_mask & 0xff; + + switch (op) { + case ARTIST_ROP_CLEAR: + *dst &= ~plane_mask; + break; + + case ARTIST_ROP_COPY: + *dst = (*dst & ~plane_mask) | (val & plane_mask); + break; + + case ARTIST_ROP_XOR: + *dst ^= val & plane_mask; + break; + + case ARTIST_ROP_NOT_DST: + *dst ^= plane_mask; + break; + + case ARTIST_ROP_SET: + *dst |= plane_mask; + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: unsupported rop %d\n", __func__, op); + break; + } +} + +static void artist_get_cursor_pos(ARTISTState *s, int *x, int *y) +{ + /* + * Don't know whether these magic offset values are configurable via + * some register. They are the same for all resolutions, so don't + * bother about it. + */ + + *y = 0x47a - artist_get_y(s->cursor_pos); + *x = ((artist_get_x(s->cursor_pos) - 338) / 2); + + if (*x > s->width) { + *x = 0; + } + + if (*y > s->height) { + *y = 0; + } +} + +static void artist_invalidate_cursor(ARTISTState *s) +{ + int x, y; + artist_get_cursor_pos(s, &x, &y); + artist_invalidate_lines(&s->vram_buffer[ARTIST_BUFFER_AP], + y, s->cursor_height); +} + +static void vram_bit_write(ARTISTState *s, int posy, bool incr_x, + int size, uint32_t data) +{ + struct vram_buffer *buf; + uint32_t vram_bitmask = s->vram_bitmask; + int mask, i, pix_count, pix_length; + unsigned int posx, offset, width; + uint8_t *data8, *p; + + pix_count = vram_write_pix_per_transfer(s); + pix_length = vram_pixel_length(s); + + buf = vram_write_buffer(s); + width = buf->width; + + if (s->cmap_bm_access) { + offset = s->vram_pos; + } else { + posx = ADDR_TO_X(s->vram_pos >> 2); + posy += ADDR_TO_Y(s->vram_pos >> 2); + offset = posy * width + posx; + } + + if (!buf->size || offset >= buf->size) { + return; + } + + p = buf->data; + + if (pix_count > size * 8) { + pix_count = size * 8; + } + + switch (pix_length) { + case 0: + if (s->image_bitmap_op & 0x20000000) { + data &= vram_bitmask; + } + + for (i = 0; i < pix_count; i++) { + uint32_t off = offset + pix_count - 1 - i; + if (off < buf->size) { + artist_rop8(s, buf, off, + (data & 1) ? (s->plane_mask >> 24) : 0); + } + data >>= 1; + } + memory_region_set_dirty(&buf->mr, offset, pix_count); + break; + + case 3: + if (s->cmap_bm_access) { + if (offset + 3 < buf->size) { + *(uint32_t *)(p + offset) = data; + } + break; + } + data8 = (uint8_t *)&data; + + for (i = 3; i >= 0; i--) { + if (!(s->image_bitmap_op & 0x20000000) || + s->vram_bitmask & (1 << (28 + i))) { + uint32_t off = offset + 3 - i; + if (off < buf->size) { + artist_rop8(s, buf, off, data8[ROP8OFF(i)]); + } + } + } + memory_region_set_dirty(&buf->mr, offset, 3); + break; + + case 6: + switch (size) { + default: + case 4: + vram_bitmask = s->vram_bitmask; + break; + + case 2: + vram_bitmask = s->vram_bitmask >> 16; + break; + + case 1: + vram_bitmask = s->vram_bitmask >> 24; + break; + } + + for (i = 0; i < pix_count && offset + i < buf->size; i++) { + mask = 1 << (pix_count - 1 - i); + + if (!(s->image_bitmap_op & 0x20000000) || + (vram_bitmask & mask)) { + if (data & mask) { + artist_rop8(s, buf, offset + i, s->fg_color); + } else { + if (!(s->image_bitmap_op & 0x10000002)) { + artist_rop8(s, buf, offset + i, s->bg_color); + } + } + } + } + memory_region_set_dirty(&buf->mr, offset, pix_count); + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: unknown pixel length %d\n", + __func__, pix_length); + break; + } + + if (incr_x) { + if (s->cmap_bm_access) { + s->vram_pos += 4; + } else { + s->vram_pos += pix_count << 2; + } + } + + if (vram_write_bufidx(s) == ARTIST_BUFFER_CURSOR1 || + vram_write_bufidx(s) == ARTIST_BUFFER_CURSOR2) { + artist_invalidate_cursor(s); + } +} + +static void block_move(ARTISTState *s, + unsigned int source_x, unsigned int source_y, + unsigned int dest_x, unsigned int dest_y, + unsigned int width, unsigned int height) +{ + struct vram_buffer *buf; + int line, endline, lineincr, startcolumn, endcolumn, columnincr, column; + unsigned int dst, src; + + trace_artist_block_move(source_x, source_y, dest_x, dest_y, width, height); + + if (s->control_plane != 0) { + /* We don't support CONTROL_PLANE accesses */ + qemu_log_mask(LOG_UNIMP, "%s: CONTROL_PLANE: %08x\n", __func__, + s->control_plane); + return; + } + + buf = &s->vram_buffer[ARTIST_BUFFER_AP]; + if (height > buf->height) { + height = buf->height; + } + if (width > buf->width) { + width = buf->width; + } + + if (dest_y > source_y) { + /* move down */ + line = height - 1; + endline = -1; + lineincr = -1; + } else { + /* move up */ + line = 0; + endline = height; + lineincr = 1; + } + + if (dest_x > source_x) { + /* move right */ + startcolumn = width - 1; + endcolumn = -1; + columnincr = -1; + } else { + /* move left */ + startcolumn = 0; + endcolumn = width; + columnincr = 1; + } + + for ( ; line != endline; line += lineincr) { + src = source_x + ((line + source_y) * buf->width) + startcolumn; + dst = dest_x + ((line + dest_y) * buf->width) + startcolumn; + + for (column = startcolumn; column != endcolumn; column += columnincr) { + if (dst >= buf->size || src >= buf->size) { + continue; + } + artist_rop8(s, buf, dst, buf->data[src]); + src += columnincr; + dst += columnincr; + } + } + + artist_invalidate_lines(buf, dest_y, height); +} + +static void fill_window(ARTISTState *s, + unsigned int startx, unsigned int starty, + unsigned int width, unsigned int height) +{ + unsigned int offset; + uint8_t color = artist_get_color(s); + struct vram_buffer *buf; + int x, y; + + trace_artist_fill_window(startx, starty, width, height, + s->image_bitmap_op, s->control_plane); + + if (s->control_plane != 0) { + /* We don't support CONTROL_PLANE accesses */ + qemu_log_mask(LOG_UNIMP, "%s: CONTROL_PLANE: %08x\n", __func__, + s->control_plane); + return; + } + + if (s->reg_100080 == 0x7d) { + /* + * Not sure what this register really does, but + * 0x7d seems to enable autoincremt of the Y axis + * by the current block move height. + */ + height = artist_get_y(s->blockmove_size); + s->vram_start += height; + } + + buf = &s->vram_buffer[ARTIST_BUFFER_AP]; + + for (y = starty; y < starty + height; y++) { + offset = y * s->width; + + for (x = startx; x < startx + width; x++) { + artist_rop8(s, buf, offset + x, color); + } + } + artist_invalidate_lines(buf, starty, height); +} + +static void draw_line(ARTISTState *s, + unsigned int x1, unsigned int y1, + unsigned int x2, unsigned int y2, + bool update_start, int skip_pix, int max_pix) +{ + struct vram_buffer *buf = &s->vram_buffer[ARTIST_BUFFER_AP]; + uint8_t color; + int dx, dy, t, e, x, y, incy, diago, horiz; + bool c1; + + trace_artist_draw_line(x1, y1, x2, y2); + + if ((x1 >= buf->width && x2 >= buf->width) || + (y1 >= buf->height && y2 >= buf->height)) { + return; + } + + + if (update_start) { + s->vram_start = (x2 << 16) | y2; + } + + if (x2 > x1) { + dx = x2 - x1; + } else { + dx = x1 - x2; + } + if (y2 > y1) { + dy = y2 - y1; + } else { + dy = y1 - y2; + } + + c1 = false; + if (dy > dx) { + t = y2; + y2 = x2; + x2 = t; + + t = y1; + y1 = x1; + x1 = t; + + t = dx; + dx = dy; + dy = t; + + c1 = true; + } + + if (x1 > x2) { + t = y2; + y2 = y1; + y1 = t; + + t = x1; + x1 = x2; + x2 = t; + } + + horiz = dy << 1; + diago = (dy - dx) << 1; + e = (dy << 1) - dx; + + if (y1 <= y2) { + incy = 1; + } else { + incy = -1; + } + x = x1; + y = y1; + color = artist_get_color(s); + + do { + unsigned int ofs; + + if (c1) { + ofs = x * s->width + y; + } else { + ofs = y * s->width + x; + } + + if (skip_pix > 0) { + skip_pix--; + } else { + artist_rop8(s, buf, ofs, color); + } + + if (e > 0) { + y += incy; + e += diago; + } else { + e += horiz; + } + x++; + } while (x <= x2 && (max_pix == -1 || --max_pix > 0)); + if (c1) + artist_invalidate_lines(buf, x, dy+1); + else + artist_invalidate_lines(buf, y, dx+1); +} + +static void draw_line_pattern_start(ARTISTState *s) +{ + + int startx = artist_get_x(s->vram_start); + int starty = artist_get_y(s->vram_start); + int endx = artist_get_x(s->blockmove_size); + int endy = artist_get_y(s->blockmove_size); + int pstart = s->line_pattern_start >> 16; + + draw_line(s, startx, starty, endx, endy, false, -1, pstart); + s->line_pattern_skip = pstart; +} + +static void draw_line_pattern_next(ARTISTState *s) +{ + + int startx = artist_get_x(s->vram_start); + int starty = artist_get_y(s->vram_start); + int endx = artist_get_x(s->blockmove_size); + int endy = artist_get_y(s->blockmove_size); + int line_xy = s->line_xy >> 16; + + draw_line(s, startx, starty, endx, endy, false, s->line_pattern_skip, + s->line_pattern_skip + line_xy); + s->line_pattern_skip += line_xy; + s->image_bitmap_op ^= 2; +} + +static void draw_line_size(ARTISTState *s, bool update_start) +{ + + int startx = artist_get_x(s->vram_start); + int starty = artist_get_y(s->vram_start); + int endx = artist_get_x(s->line_size); + int endy = artist_get_y(s->line_size); + + draw_line(s, startx, starty, endx, endy, update_start, -1, -1); +} + +static void draw_line_xy(ARTISTState *s, bool update_start) +{ + + int startx = artist_get_x(s->vram_start); + int starty = artist_get_y(s->vram_start); + int sizex = artist_get_x(s->blockmove_size); + int sizey = artist_get_y(s->blockmove_size); + int linexy = s->line_xy >> 16; + int endx, endy; + + endx = startx; + endy = starty; + + if (sizex > 0) { + endx = startx + linexy; + } + + if (sizex < 0) { + endx = startx; + startx -= linexy; + } + + if (sizey > 0) { + endy = starty + linexy; + } + + if (sizey < 0) { + endy = starty; + starty -= linexy; + } + + if (startx < 0) { + startx = 0; + } + + if (endx < 0) { + endx = 0; + } + + if (starty < 0) { + starty = 0; + } + + if (endy < 0) { + endy = 0; + } + + draw_line(s, startx, starty, endx, endy, false, -1, -1); +} + +static void draw_line_end(ARTISTState *s, bool update_start) +{ + + int startx = artist_get_x(s->vram_start); + int starty = artist_get_y(s->vram_start); + int endx = artist_get_x(s->line_end); + int endy = artist_get_y(s->line_end); + + draw_line(s, startx, starty, endx, endy, update_start, -1, -1); +} + +static void font_write16(ARTISTState *s, uint16_t val) +{ + struct vram_buffer *buf; + uint32_t color = (s->image_bitmap_op & 2) ? s->fg_color : s->bg_color; + uint16_t mask; + int i; + + unsigned int startx = artist_get_x(s->vram_start); + unsigned int starty = artist_get_y(s->vram_start) + s->font_write_pos_y; + unsigned int offset = starty * s->width + startx; + + buf = &s->vram_buffer[ARTIST_BUFFER_AP]; + + if (startx >= buf->width || starty >= buf->height || + offset + 16 >= buf->size) { + return; + } + + for (i = 0; i < 16; i++) { + mask = 1 << (15 - i); + if (val & mask) { + artist_rop8(s, buf, offset + i, color); + } else { + if (!(s->image_bitmap_op & 0x20000000)) { + artist_rop8(s, buf, offset + i, s->bg_color); + } + } + } + artist_invalidate_lines(buf, starty, 1); +} + +static void font_write(ARTISTState *s, uint32_t val) +{ + font_write16(s, val >> 16); + if (++s->font_write_pos_y == artist_get_y(s->blockmove_size)) { + s->vram_start += (s->blockmove_size & 0xffff0000); + return; + } + + font_write16(s, val & 0xffff); + if (++s->font_write_pos_y == artist_get_y(s->blockmove_size)) { + s->vram_start += (s->blockmove_size & 0xffff0000); + return; + } +} + +static void combine_write_reg(hwaddr addr, uint64_t val, int size, void *out) +{ + /* + * FIXME: is there a qemu helper for this? + */ + +#ifndef HOST_WORDS_BIGENDIAN + addr ^= 3; +#endif + + switch (size) { + case 1: + *(uint8_t *)(out + (addr & 3)) = val; + break; + + case 2: + *(uint16_t *)(out + (addr & 2)) = val; + break; + + case 4: + *(uint32_t *)out = val; + break; + + default: + qemu_log_mask(LOG_UNIMP, "unsupported write size: %d\n", size); + } +} + +static void artist_reg_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + ARTISTState *s = opaque; + int width, height; + + trace_artist_reg_write(size, addr, artist_reg_name(addr & ~3ULL), val); + + switch (addr & ~3ULL) { + case 0x100080: + combine_write_reg(addr, val, size, &s->reg_100080); + break; + + case FG_COLOR: + combine_write_reg(addr, val, size, &s->fg_color); + break; + + case BG_COLOR: + combine_write_reg(addr, val, size, &s->bg_color); + break; + + case VRAM_BITMASK: + combine_write_reg(addr, val, size, &s->vram_bitmask); + break; + + case VRAM_WRITE_INCR_Y: + vram_bit_write(s, s->vram_char_y++, false, size, val); + break; + + case VRAM_WRITE_INCR_X: + case VRAM_WRITE_INCR_X2: + vram_bit_write(s, s->vram_char_y, true, size, val); + break; + + case VRAM_IDX: + combine_write_reg(addr, val, size, &s->vram_pos); + s->vram_char_y = 0; + s->draw_line_pattern = 0; + break; + + case VRAM_START: + combine_write_reg(addr, val, size, &s->vram_start); + s->draw_line_pattern = 0; + break; + + case VRAM_START_TRIGGER: + combine_write_reg(addr, val, size, &s->vram_start); + fill_window(s, artist_get_x(s->vram_start), + artist_get_y(s->vram_start), + artist_get_x(s->blockmove_size), + artist_get_y(s->blockmove_size)); + break; + + case VRAM_SIZE_TRIGGER: + combine_write_reg(addr, val, size, &s->vram_size); + + if (size == 2 && !(addr & 2)) { + height = artist_get_y(s->blockmove_size); + } else { + height = artist_get_y(s->vram_size); + } + + if (size == 2 && (addr & 2)) { + width = artist_get_x(s->blockmove_size); + } else { + width = artist_get_x(s->vram_size); + } + + fill_window(s, artist_get_x(s->vram_start), + artist_get_y(s->vram_start), + width, height); + break; + + case LINE_XY: + combine_write_reg(addr, val, size, &s->line_xy); + if (s->draw_line_pattern) { + draw_line_pattern_next(s); + } else { + draw_line_xy(s, true); + } + break; + + case PATTERN_LINE_START: + combine_write_reg(addr, val, size, &s->line_pattern_start); + s->draw_line_pattern = 1; + draw_line_pattern_start(s); + break; + + case LINE_SIZE: + combine_write_reg(addr, val, size, &s->line_size); + draw_line_size(s, true); + break; + + case LINE_END: + combine_write_reg(addr, val, size, &s->line_end); + draw_line_end(s, true); + break; + + case BLOCK_MOVE_SIZE: + combine_write_reg(addr, val, size, &s->blockmove_size); + break; + + case BLOCK_MOVE_SOURCE: + combine_write_reg(addr, val, size, &s->blockmove_source); + break; + + case BLOCK_MOVE_DEST_TRIGGER: + combine_write_reg(addr, val, size, &s->blockmove_dest); + + block_move(s, artist_get_x(s->blockmove_source), + artist_get_y(s->blockmove_source), + artist_get_x(s->blockmove_dest), + artist_get_y(s->blockmove_dest), + artist_get_x(s->blockmove_size), + artist_get_y(s->blockmove_size)); + break; + + case BLOCK_MOVE_SIZE_TRIGGER: + combine_write_reg(addr, val, size, &s->blockmove_size); + + block_move(s, + artist_get_x(s->blockmove_source), + artist_get_y(s->blockmove_source), + artist_get_x(s->vram_start), + artist_get_y(s->vram_start), + artist_get_x(s->blockmove_size), + artist_get_y(s->blockmove_size)); + break; + + case PLANE_MASK: + combine_write_reg(addr, val, size, &s->plane_mask); + break; + + case CMAP_BM_ACCESS: + combine_write_reg(addr, val, size, &s->cmap_bm_access); + break; + + case DST_BM_ACCESS: + combine_write_reg(addr, val, size, &s->dst_bm_access); + s->cmap_bm_access = 0; + break; + + case SRC_BM_ACCESS: + combine_write_reg(addr, val, size, &s->src_bm_access); + s->cmap_bm_access = 0; + break; + + case CONTROL_PLANE: + combine_write_reg(addr, val, size, &s->control_plane); + break; + + case TRANSFER_DATA: + combine_write_reg(addr, val, size, &s->transfer_data); + break; + + case 0x300200: + combine_write_reg(addr, val, size, &s->reg_300200); + break; + + case 0x300208: + combine_write_reg(addr, val, size, &s->reg_300208); + break; + + case 0x300218: + combine_write_reg(addr, val, size, &s->reg_300218); + break; + + case CURSOR_POS: + artist_invalidate_cursor(s); + combine_write_reg(addr, val, size, &s->cursor_pos); + artist_invalidate_cursor(s); + break; + + case CURSOR_CTRL: + break; + + case IMAGE_BITMAP_OP: + combine_write_reg(addr, val, size, &s->image_bitmap_op); + break; + + case FONT_WRITE_INCR_Y: + combine_write_reg(addr, val, size, &s->font_write1); + font_write(s, s->font_write1); + break; + + case FONT_WRITE_START: + combine_write_reg(addr, val, size, &s->font_write2); + s->font_write_pos_y = 0; + font_write(s, s->font_write2); + break; + + case 300104: + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: unknown register: reg=%08" HWADDR_PRIx + " val=%08" PRIx64 " size=%d\n", + __func__, addr, val, size); + break; + } +} + +static uint64_t combine_read_reg(hwaddr addr, int size, void *in) +{ + /* + * FIXME: is there a qemu helper for this? + */ + +#ifndef HOST_WORDS_BIGENDIAN + addr ^= 3; +#endif + + switch (size) { + case 1: + return *(uint8_t *)(in + (addr & 3)); + + case 2: + return *(uint16_t *)(in + (addr & 2)); + + case 4: + return *(uint32_t *)in; + + default: + qemu_log_mask(LOG_UNIMP, "unsupported read size: %d\n", size); + return 0; + } +} + +static uint64_t artist_reg_read(void *opaque, hwaddr addr, unsigned size) +{ + ARTISTState *s = opaque; + uint32_t val = 0; + + switch (addr & ~3ULL) { + /* Unknown status registers */ + case 0: + break; + + case 0x211110: + val = (s->width << 16) | s->height; + if (s->depth == 1) { + val |= 1 << 31; + } + break; + + case 0x100000: + case 0x300000: + case 0x300004: + case 0x300308: + case 0x380000: + break; + + case 0x300008: + case 0x380008: + /* + * FIFO ready flag. we're not emulating the FIFOs + * so we're always ready + */ + val = 0x10; + break; + + case 0x300200: + val = s->reg_300200; + break; + + case 0x300208: + val = s->reg_300208; + break; + + case 0x300218: + val = s->reg_300218; + break; + + case 0x30023c: + val = 0xac4ffdac; + break; + + case 0x380004: + /* 0x02000000 Buserror */ + val = 0x6dc20006; + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: unknown register: %08" HWADDR_PRIx + " size %d\n", __func__, addr, size); + break; + } + val = combine_read_reg(addr, size, &val); + trace_artist_reg_read(size, addr, artist_reg_name(addr & ~3ULL), val); + return val; +} + +static void artist_vram_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + ARTISTState *s = opaque; + struct vram_buffer *buf; + unsigned int posy, posx; + unsigned int offset; + trace_artist_vram_write(size, addr, val); + + if (s->cmap_bm_access) { + buf = &s->vram_buffer[ARTIST_BUFFER_CMAP]; + if (addr + 3 < buf->size) { + *(uint32_t *)(buf->data + addr) = val; + } + return; + } + + buf = vram_write_buffer(s); + posy = ADDR_TO_Y(addr >> 2); + posx = ADDR_TO_X(addr >> 2); + + if (!buf->size) { + return; + } + + if (posy > buf->height || posx > buf->width) { + return; + } + + offset = posy * buf->width + posx; + if (offset >= buf->size) { + return; + } + + switch (size) { + case 4: + if (offset + 3 < buf->size) { + *(uint32_t *)(buf->data + offset) = be32_to_cpu(val); + memory_region_set_dirty(&buf->mr, offset, 4); + } + break; + case 2: + if (offset + 1 < buf->size) { + *(uint16_t *)(buf->data + offset) = be16_to_cpu(val); + memory_region_set_dirty(&buf->mr, offset, 2); + } + break; + case 1: + if (offset < buf->size) { + *(uint8_t *)(buf->data + offset) = val; + memory_region_set_dirty(&buf->mr, offset, 1); + } + break; + default: + break; + } +} + +static uint64_t artist_vram_read(void *opaque, hwaddr addr, unsigned size) +{ + ARTISTState *s = opaque; + struct vram_buffer *buf; + uint64_t val; + unsigned int posy, posx; + + if (s->cmap_bm_access) { + buf = &s->vram_buffer[ARTIST_BUFFER_CMAP]; + val = 0; + if (addr < buf->size && addr + 3 < buf->size) { + val = *(uint32_t *)(buf->data + addr); + } + trace_artist_vram_read(size, addr, 0, 0, val); + return val; + } + + buf = vram_read_buffer(s); + if (!buf->size) { + return 0; + } + + posy = ADDR_TO_Y(addr >> 2); + posx = ADDR_TO_X(addr >> 2); + + if (posy > buf->height || posx > buf->width) { + return 0; + } + + val = cpu_to_be32(*(uint32_t *)(buf->data + posy * buf->width + posx)); + trace_artist_vram_read(size, addr, posx, posy, val); + return val; +} + +static const MemoryRegionOps artist_reg_ops = { + .read = artist_reg_read, + .write = artist_reg_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 1, + .impl.max_access_size = 4, +}; + +static const MemoryRegionOps artist_vram_ops = { + .read = artist_vram_read, + .write = artist_vram_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 1, + .impl.max_access_size = 4, +}; + +static void artist_draw_cursor(ARTISTState *s) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + uint32_t *data = (uint32_t *)surface_data(surface); + struct vram_buffer *cursor0, *cursor1 , *buf; + int cx, cy, cursor_pos_x, cursor_pos_y; + + cursor0 = &s->vram_buffer[ARTIST_BUFFER_CURSOR1]; + cursor1 = &s->vram_buffer[ARTIST_BUFFER_CURSOR2]; + buf = &s->vram_buffer[ARTIST_BUFFER_AP]; + + artist_get_cursor_pos(s, &cursor_pos_x, &cursor_pos_y); + + for (cy = 0; cy < s->cursor_height; cy++) { + + for (cx = 0; cx < s->cursor_width; cx++) { + + if (cursor_pos_y + cy < 0 || + cursor_pos_x + cx < 0 || + cursor_pos_y + cy > buf->height - 1 || + cursor_pos_x + cx > buf->width) { + continue; + } + + int dstoffset = (cursor_pos_y + cy) * s->width + + (cursor_pos_x + cx); + + if (cursor0->data[cy * cursor0->width + cx]) { + data[dstoffset] = 0; + } else { + if (cursor1->data[cy * cursor1->width + cx]) { + data[dstoffset] = 0xffffff; + } + } + } + } +} + +static void artist_draw_line(void *opaque, uint8_t *d, const uint8_t *src, + int width, int pitch) +{ + ARTISTState *s = ARTIST(opaque); + uint32_t *cmap, *data = (uint32_t *)d; + int x; + + cmap = (uint32_t *)(s->vram_buffer[ARTIST_BUFFER_CMAP].data + 0x400); + + for (x = 0; x < s->width; x++) { + *data++ = cmap[*src++]; + } +} + +static void artist_update_display(void *opaque) +{ + ARTISTState *s = opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + int first = 0, last; + + + framebuffer_update_display(surface, &s->fbsection, s->width, s->height, + s->width, s->width * 4, 0, 0, artist_draw_line, + s, &first, &last); + + artist_draw_cursor(s); + + dpy_gfx_update(s->con, 0, 0, s->width, s->height); +} + +static void artist_invalidate(void *opaque) +{ + ARTISTState *s = ARTIST(opaque); + struct vram_buffer *buf = &s->vram_buffer[ARTIST_BUFFER_AP]; + memory_region_set_dirty(&buf->mr, 0, buf->size); +} + +static const GraphicHwOps artist_ops = { + .invalidate = artist_invalidate, + .gfx_update = artist_update_display, +}; + +static void artist_initfn(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ARTISTState *s = ARTIST(obj); + + memory_region_init_io(&s->reg, obj, &artist_reg_ops, s, "artist.reg", + 4 * MiB); + memory_region_init_io(&s->vram_mem, obj, &artist_vram_ops, s, "artist.vram", + 8 * MiB); + sysbus_init_mmio(sbd, &s->reg); + sysbus_init_mmio(sbd, &s->vram_mem); +} + +static void artist_create_buffer(ARTISTState *s, const char *name, + hwaddr *offset, unsigned int idx, + int width, int height) +{ + struct vram_buffer *buf = s->vram_buffer + idx; + + memory_region_init_ram(&buf->mr, NULL, name, width * height, + &error_fatal); + memory_region_add_subregion_overlap(&s->mem_as_root, *offset, &buf->mr, 0); + + buf->data = memory_region_get_ram_ptr(&buf->mr); + buf->size = height * width; + buf->width = width; + buf->height = height; + + *offset += buf->size; +} + +static void artist_realizefn(DeviceState *dev, Error **errp) +{ + ARTISTState *s = ARTIST(dev); + struct vram_buffer *buf; + hwaddr offset = 0; + + if (s->width > 2048 || s->height > 2048) { + error_report("artist: screen size can not exceed 2048 x 2048 pixel."); + s->width = MIN(s->width, 2048); + s->height = MIN(s->height, 2048); + } + + if (s->width < 640 || s->height < 480) { + error_report("artist: minimum screen size is 640 x 480 pixel."); + s->width = MAX(s->width, 640); + s->height = MAX(s->height, 480); + } + + memory_region_init(&s->mem_as_root, OBJECT(dev), "artist", ~0ull); + address_space_init(&s->as, &s->mem_as_root, "artist"); + + artist_create_buffer(s, "cmap", &offset, ARTIST_BUFFER_CMAP, 2048, 4); + artist_create_buffer(s, "ap", &offset, ARTIST_BUFFER_AP, + s->width, s->height); + artist_create_buffer(s, "cursor1", &offset, ARTIST_BUFFER_CURSOR1, 64, 64); + artist_create_buffer(s, "cursor2", &offset, ARTIST_BUFFER_CURSOR2, 64, 64); + artist_create_buffer(s, "attribute", &offset, ARTIST_BUFFER_ATTRIBUTE, + 64, 64); + + buf = &s->vram_buffer[ARTIST_BUFFER_AP]; + framebuffer_update_memory_section(&s->fbsection, &buf->mr, 0, + buf->width, buf->height); + /* + * no idea whether the cursor is fixed size or not, so assume 32x32 which + * seems sufficient for HP-UX X11. + */ + s->cursor_height = 32; + s->cursor_width = 32; + + s->con = graphic_console_init(dev, 0, &artist_ops, s); + qemu_console_resize(s->con, s->width, s->height); +} + +static int vmstate_artist_post_load(void *opaque, int version_id) +{ + artist_invalidate(opaque); + return 0; +} + +static const VMStateDescription vmstate_artist = { + .name = "artist", + .version_id = 1, + .minimum_version_id = 1, + .post_load = vmstate_artist_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT16(height, ARTISTState), + VMSTATE_UINT16(width, ARTISTState), + VMSTATE_UINT16(depth, ARTISTState), + VMSTATE_UINT32(fg_color, ARTISTState), + VMSTATE_UINT32(bg_color, ARTISTState), + VMSTATE_UINT32(vram_char_y, ARTISTState), + VMSTATE_UINT32(vram_bitmask, ARTISTState), + VMSTATE_UINT32(vram_start, ARTISTState), + VMSTATE_UINT32(vram_pos, ARTISTState), + VMSTATE_UINT32(vram_size, ARTISTState), + VMSTATE_UINT32(blockmove_source, ARTISTState), + VMSTATE_UINT32(blockmove_dest, ARTISTState), + VMSTATE_UINT32(blockmove_size, ARTISTState), + VMSTATE_UINT32(line_size, ARTISTState), + VMSTATE_UINT32(line_end, ARTISTState), + VMSTATE_UINT32(line_xy, ARTISTState), + VMSTATE_UINT32(cursor_pos, ARTISTState), + VMSTATE_UINT32(cursor_height, ARTISTState), + VMSTATE_UINT32(cursor_width, ARTISTState), + VMSTATE_UINT32(plane_mask, ARTISTState), + VMSTATE_UINT32(reg_100080, ARTISTState), + VMSTATE_UINT32(reg_300200, ARTISTState), + VMSTATE_UINT32(reg_300208, ARTISTState), + VMSTATE_UINT32(reg_300218, ARTISTState), + VMSTATE_UINT32(cmap_bm_access, ARTISTState), + VMSTATE_UINT32(dst_bm_access, ARTISTState), + VMSTATE_UINT32(src_bm_access, ARTISTState), + VMSTATE_UINT32(control_plane, ARTISTState), + VMSTATE_UINT32(transfer_data, ARTISTState), + VMSTATE_UINT32(image_bitmap_op, ARTISTState), + VMSTATE_UINT32(font_write1, ARTISTState), + VMSTATE_UINT32(font_write2, ARTISTState), + VMSTATE_UINT32(font_write_pos_y, ARTISTState), + VMSTATE_END_OF_LIST() + } +}; + +static Property artist_properties[] = { + DEFINE_PROP_UINT16("width", ARTISTState, width, 1280), + DEFINE_PROP_UINT16("height", ARTISTState, height, 1024), + DEFINE_PROP_UINT16("depth", ARTISTState, depth, 8), + DEFINE_PROP_END_OF_LIST(), +}; + +static void artist_reset(DeviceState *qdev) +{ +} + +static void artist_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = artist_realizefn; + dc->vmsd = &vmstate_artist; + dc->reset = artist_reset; + device_class_set_props(dc, artist_properties); +} + +static const TypeInfo artist_info = { + .name = TYPE_ARTIST, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARTISTState), + .instance_init = artist_initfn, + .class_init = artist_class_init, +}; + +static void artist_register_types(void) +{ + type_register_static(&artist_info); +} + +type_init(artist_register_types) diff --git a/hw/display/ati.c b/hw/display/ati.c new file mode 100644 index 000000000..31f22754d --- /dev/null +++ b/hw/display/ati.c @@ -0,0 +1,1052 @@ +/* + * QEMU ATI SVGA emulation + * + * Copyright (c) 2019 BALATON Zoltan + * + * This work is licensed under the GNU GPL license version 2 or later. + */ + +/* + * WARNING: + * This is very incomplete and only enough for Linux console and some + * unaccelerated X output at the moment. + * Currently it's little more than a frame buffer with minimal functions, + * other more advanced features of the hardware are yet to be implemented. + * We only aim for Rage 128 Pro (and some RV100) and 2D only at first, + * No 3D at all yet (maybe after 2D works, but feel free to improve it) + */ + +#include "qemu/osdep.h" +#include "ati_int.h" +#include "ati_regs.h" +#include "vga-access.h" +#include "hw/qdev-properties.h" +#include "vga_regs.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "ui/console.h" +#include "hw/display/i2c-ddc.h" +#include "trace.h" + +#define ATI_DEBUG_HW_CURSOR 0 + +static const struct { + const char *name; + uint16_t dev_id; +} ati_model_aliases[] = { + { "rage128p", PCI_DEVICE_ID_ATI_RAGE128_PF }, + { "rv100", PCI_DEVICE_ID_ATI_RADEON_QY }, +}; + +enum { VGA_MODE, EXT_MODE }; + +static void ati_vga_switch_mode(ATIVGAState *s) +{ + DPRINTF("%d -> %d\n", + s->mode, !!(s->regs.crtc_gen_cntl & CRTC2_EXT_DISP_EN)); + if (s->regs.crtc_gen_cntl & CRTC2_EXT_DISP_EN) { + /* Extended mode enabled */ + s->mode = EXT_MODE; + if (s->regs.crtc_gen_cntl & CRTC2_EN) { + /* CRT controller enabled, use CRTC values */ + /* FIXME Should these be the same as VGA CRTC regs? */ + uint32_t offs = s->regs.crtc_offset & 0x07ffffff; + int stride = (s->regs.crtc_pitch & 0x7ff) * 8; + int bpp = 0; + int h, v; + + if (s->regs.crtc_h_total_disp == 0) { + s->regs.crtc_h_total_disp = ((640 / 8) - 1) << 16; + } + if (s->regs.crtc_v_total_disp == 0) { + s->regs.crtc_v_total_disp = (480 - 1) << 16; + } + h = ((s->regs.crtc_h_total_disp >> 16) + 1) * 8; + v = (s->regs.crtc_v_total_disp >> 16) + 1; + switch (s->regs.crtc_gen_cntl & CRTC_PIX_WIDTH_MASK) { + case CRTC_PIX_WIDTH_4BPP: + bpp = 4; + break; + case CRTC_PIX_WIDTH_8BPP: + bpp = 8; + break; + case CRTC_PIX_WIDTH_15BPP: + bpp = 15; + break; + case CRTC_PIX_WIDTH_16BPP: + bpp = 16; + break; + case CRTC_PIX_WIDTH_24BPP: + bpp = 24; + break; + case CRTC_PIX_WIDTH_32BPP: + bpp = 32; + break; + default: + qemu_log_mask(LOG_UNIMP, "Unsupported bpp value\n"); + return; + } + DPRINTF("Switching to %dx%d %d %d @ %x\n", h, v, stride, bpp, offs); + vbe_ioport_write_index(&s->vga, 0, VBE_DISPI_INDEX_ENABLE); + vbe_ioport_write_data(&s->vga, 0, VBE_DISPI_DISABLED); + s->vga.big_endian_fb = (s->regs.config_cntl & APER_0_ENDIAN || + s->regs.config_cntl & APER_1_ENDIAN ? + true : false); + /* reset VBE regs then set up mode */ + s->vga.vbe_regs[VBE_DISPI_INDEX_XRES] = h; + s->vga.vbe_regs[VBE_DISPI_INDEX_YRES] = v; + s->vga.vbe_regs[VBE_DISPI_INDEX_BPP] = bpp; + /* enable mode via ioport so it updates vga regs */ + vbe_ioport_write_index(&s->vga, 0, VBE_DISPI_INDEX_ENABLE); + vbe_ioport_write_data(&s->vga, 0, VBE_DISPI_ENABLED | + VBE_DISPI_LFB_ENABLED | VBE_DISPI_NOCLEARMEM | + (s->regs.dac_cntl & DAC_8BIT_EN ? VBE_DISPI_8BIT_DAC : 0)); + /* now set offset and stride after enable as that resets these */ + if (stride) { + int bypp = DIV_ROUND_UP(bpp, BITS_PER_BYTE); + + vbe_ioport_write_index(&s->vga, 0, VBE_DISPI_INDEX_VIRT_WIDTH); + vbe_ioport_write_data(&s->vga, 0, stride); + stride *= bypp; + if (offs % stride) { + DPRINTF("CRTC offset is not multiple of pitch\n"); + vbe_ioport_write_index(&s->vga, 0, + VBE_DISPI_INDEX_X_OFFSET); + vbe_ioport_write_data(&s->vga, 0, offs % stride / bypp); + } + vbe_ioport_write_index(&s->vga, 0, VBE_DISPI_INDEX_Y_OFFSET); + vbe_ioport_write_data(&s->vga, 0, offs / stride); + DPRINTF("VBE offset (%d,%d), vbe_start_addr=%x\n", + s->vga.vbe_regs[VBE_DISPI_INDEX_X_OFFSET], + s->vga.vbe_regs[VBE_DISPI_INDEX_Y_OFFSET], + s->vga.vbe_start_addr); + } + } + } else { + /* VGA mode enabled */ + s->mode = VGA_MODE; + vbe_ioport_write_index(&s->vga, 0, VBE_DISPI_INDEX_ENABLE); + vbe_ioport_write_data(&s->vga, 0, VBE_DISPI_DISABLED); + } +} + +/* Used by host side hardware cursor */ +static void ati_cursor_define(ATIVGAState *s) +{ + uint8_t data[1024]; + uint32_t srcoff; + int i, j, idx = 0; + + if ((s->regs.cur_offset & BIT(31)) || s->cursor_guest_mode) { + return; /* Do not update cursor if locked or rendered by guest */ + } + /* FIXME handle cur_hv_offs correctly */ + srcoff = s->regs.cur_offset - + (s->regs.cur_hv_offs >> 16) - (s->regs.cur_hv_offs & 0xffff) * 16; + for (i = 0; i < 64; i++) { + for (j = 0; j < 8; j++, idx++) { + data[idx] = vga_read_byte(&s->vga, srcoff + i * 16 + j); + data[512 + idx] = vga_read_byte(&s->vga, srcoff + i * 16 + j + 8); + } + } + if (!s->cursor) { + s->cursor = cursor_alloc(64, 64); + } + cursor_set_mono(s->cursor, s->regs.cur_color1, s->regs.cur_color0, + &data[512], 1, &data[0]); + dpy_cursor_define(s->vga.con, s->cursor); +} + +/* Alternatively support guest rendered hardware cursor */ +static void ati_cursor_invalidate(VGACommonState *vga) +{ + ATIVGAState *s = container_of(vga, ATIVGAState, vga); + int size = (s->regs.crtc_gen_cntl & CRTC2_CUR_EN) ? 64 : 0; + + if (s->regs.cur_offset & BIT(31)) { + return; /* Do not update cursor if locked */ + } + if (s->cursor_size != size || + vga->hw_cursor_x != s->regs.cur_hv_pos >> 16 || + vga->hw_cursor_y != (s->regs.cur_hv_pos & 0xffff) || + s->cursor_offset != s->regs.cur_offset - (s->regs.cur_hv_offs >> 16) - + (s->regs.cur_hv_offs & 0xffff) * 16) { + /* Remove old cursor then update and show new one if needed */ + vga_invalidate_scanlines(vga, vga->hw_cursor_y, vga->hw_cursor_y + 63); + vga->hw_cursor_x = s->regs.cur_hv_pos >> 16; + vga->hw_cursor_y = s->regs.cur_hv_pos & 0xffff; + s->cursor_offset = s->regs.cur_offset - (s->regs.cur_hv_offs >> 16) - + (s->regs.cur_hv_offs & 0xffff) * 16; + s->cursor_size = size; + if (size) { + vga_invalidate_scanlines(vga, + vga->hw_cursor_y, vga->hw_cursor_y + 63); + } + } +} + +static void ati_cursor_draw_line(VGACommonState *vga, uint8_t *d, int scr_y) +{ + ATIVGAState *s = container_of(vga, ATIVGAState, vga); + uint32_t srcoff; + uint32_t *dp = (uint32_t *)d; + int i, j, h; + + if (!(s->regs.crtc_gen_cntl & CRTC2_CUR_EN) || + scr_y < vga->hw_cursor_y || scr_y >= vga->hw_cursor_y + 64 || + scr_y > s->regs.crtc_v_total_disp >> 16) { + return; + } + /* FIXME handle cur_hv_offs correctly */ + srcoff = s->cursor_offset + (scr_y - vga->hw_cursor_y) * 16; + dp = &dp[vga->hw_cursor_x]; + h = ((s->regs.crtc_h_total_disp >> 16) + 1) * 8; + for (i = 0; i < 8; i++) { + uint32_t color; + uint8_t abits = vga_read_byte(vga, srcoff + i); + uint8_t xbits = vga_read_byte(vga, srcoff + i + 8); + for (j = 0; j < 8; j++, abits <<= 1, xbits <<= 1) { + if (abits & BIT(7)) { + if (xbits & BIT(7)) { + color = dp[i * 8 + j] ^ 0xffffffff; /* complement */ + } else { + continue; /* transparent, no change */ + } + } else { + color = (xbits & BIT(7) ? s->regs.cur_color1 : + s->regs.cur_color0) | 0xff000000; + } + if (vga->hw_cursor_x + i * 8 + j >= h) { + return; /* end of screen, don't span to next line */ + } + dp[i * 8 + j] = color; + } + } +} + +static uint64_t ati_i2c(bitbang_i2c_interface *i2c, uint64_t data, int base) +{ + bool c = (data & BIT(base + 17) ? !!(data & BIT(base + 1)) : 1); + bool d = (data & BIT(base + 16) ? !!(data & BIT(base)) : 1); + + bitbang_i2c_set(i2c, BITBANG_I2C_SCL, c); + d = bitbang_i2c_set(i2c, BITBANG_I2C_SDA, d); + + data &= ~0xf00ULL; + if (c) { + data |= BIT(base + 9); + } + if (d) { + data |= BIT(base + 8); + } + return data; +} + +static void ati_vga_update_irq(ATIVGAState *s) +{ + pci_set_irq(&s->dev, !!(s->regs.gen_int_status & s->regs.gen_int_cntl)); +} + +static void ati_vga_vblank_irq(void *opaque) +{ + ATIVGAState *s = opaque; + + timer_mod(&s->vblank_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND / 60); + s->regs.gen_int_status |= CRTC_VBLANK_INT; + ati_vga_update_irq(s); +} + +static inline uint64_t ati_reg_read_offs(uint32_t reg, int offs, + unsigned int size) +{ + if (offs == 0 && size == 4) { + return reg; + } else { + return extract32(reg, offs * BITS_PER_BYTE, size * BITS_PER_BYTE); + } +} + +static uint64_t ati_mm_read(void *opaque, hwaddr addr, unsigned int size) +{ + ATIVGAState *s = opaque; + uint64_t val = 0; + + switch (addr) { + case MM_INDEX: + val = s->regs.mm_index; + break; + case MM_DATA ... MM_DATA + 3: + /* indexed access to regs or memory */ + if (s->regs.mm_index & BIT(31)) { + uint32_t idx = s->regs.mm_index & ~BIT(31); + if (idx <= s->vga.vram_size - size) { + val = ldn_le_p(s->vga.vram_ptr + idx, size); + } + } else if (s->regs.mm_index > MM_DATA + 3) { + val = ati_mm_read(s, s->regs.mm_index + addr - MM_DATA, size); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "ati_mm_read: mm_index too small: %u\n", s->regs.mm_index); + } + break; + case BIOS_0_SCRATCH ... BUS_CNTL - 1: + { + int i = (addr - BIOS_0_SCRATCH) / 4; + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF && i > 3) { + break; + } + val = ati_reg_read_offs(s->regs.bios_scratch[i], + addr - (BIOS_0_SCRATCH + i * 4), size); + break; + } + case GEN_INT_CNTL: + val = s->regs.gen_int_cntl; + break; + case GEN_INT_STATUS: + val = s->regs.gen_int_status; + break; + case CRTC_GEN_CNTL ... CRTC_GEN_CNTL + 3: + val = ati_reg_read_offs(s->regs.crtc_gen_cntl, + addr - CRTC_GEN_CNTL, size); + break; + case CRTC_EXT_CNTL ... CRTC_EXT_CNTL + 3: + val = ati_reg_read_offs(s->regs.crtc_ext_cntl, + addr - CRTC_EXT_CNTL, size); + break; + case DAC_CNTL: + val = s->regs.dac_cntl; + break; + case GPIO_VGA_DDC: + val = s->regs.gpio_vga_ddc; + break; + case GPIO_DVI_DDC: + val = s->regs.gpio_dvi_ddc; + break; + case GPIO_MONID ... GPIO_MONID + 3: + val = ati_reg_read_offs(s->regs.gpio_monid, + addr - GPIO_MONID, size); + break; + case PALETTE_INDEX: + /* FIXME unaligned access */ + val = vga_ioport_read(&s->vga, VGA_PEL_IR) << 16; + val |= vga_ioport_read(&s->vga, VGA_PEL_IW) & 0xff; + break; + case PALETTE_DATA: + val = vga_ioport_read(&s->vga, VGA_PEL_D); + break; + case CNFG_CNTL: + val = s->regs.config_cntl; + break; + case CNFG_MEMSIZE: + val = s->vga.vram_size; + break; + case CONFIG_APER_0_BASE: + case CONFIG_APER_1_BASE: + val = pci_default_read_config(&s->dev, + PCI_BASE_ADDRESS_0, size) & 0xfffffff0; + break; + case CONFIG_APER_SIZE: + val = s->vga.vram_size; + break; + case CONFIG_REG_1_BASE: + val = pci_default_read_config(&s->dev, + PCI_BASE_ADDRESS_2, size) & 0xfffffff0; + break; + case CONFIG_REG_APER_SIZE: + val = memory_region_size(&s->mm); + break; + case MC_STATUS: + val = 5; + break; + case MEM_SDRAM_MODE_REG: + if (s->dev_id != PCI_DEVICE_ID_ATI_RAGE128_PF) { + val = BIT(28) | BIT(20); + } + break; + case RBBM_STATUS: + case GUI_STAT: + val = 64; /* free CMDFIFO entries */ + break; + case CRTC_H_TOTAL_DISP: + val = s->regs.crtc_h_total_disp; + break; + case CRTC_H_SYNC_STRT_WID: + val = s->regs.crtc_h_sync_strt_wid; + break; + case CRTC_V_TOTAL_DISP: + val = s->regs.crtc_v_total_disp; + break; + case CRTC_V_SYNC_STRT_WID: + val = s->regs.crtc_v_sync_strt_wid; + break; + case CRTC_OFFSET: + val = s->regs.crtc_offset; + break; + case CRTC_OFFSET_CNTL: + val = s->regs.crtc_offset_cntl; + break; + case CRTC_PITCH: + val = s->regs.crtc_pitch; + break; + case 0xf00 ... 0xfff: + val = pci_default_read_config(&s->dev, addr - 0xf00, size); + break; + case CUR_OFFSET ... CUR_OFFSET + 3: + val = ati_reg_read_offs(s->regs.cur_offset, addr - CUR_OFFSET, size); + break; + case CUR_HORZ_VERT_POSN ... CUR_HORZ_VERT_POSN + 3: + val = ati_reg_read_offs(s->regs.cur_hv_pos, + addr - CUR_HORZ_VERT_POSN, size); + if (addr + size > CUR_HORZ_VERT_POSN + 3) { + val |= (s->regs.cur_offset & BIT(31)) >> (4 - size); + } + break; + case CUR_HORZ_VERT_OFF ... CUR_HORZ_VERT_OFF + 3: + val = ati_reg_read_offs(s->regs.cur_hv_offs, + addr - CUR_HORZ_VERT_OFF, size); + if (addr + size > CUR_HORZ_VERT_OFF + 3) { + val |= (s->regs.cur_offset & BIT(31)) >> (4 - size); + } + break; + case CUR_CLR0 ... CUR_CLR0 + 3: + val = ati_reg_read_offs(s->regs.cur_color0, addr - CUR_CLR0, size); + break; + case CUR_CLR1 ... CUR_CLR1 + 3: + val = ati_reg_read_offs(s->regs.cur_color1, addr - CUR_CLR1, size); + break; + case DST_OFFSET: + val = s->regs.dst_offset; + break; + case DST_PITCH: + val = s->regs.dst_pitch; + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + val &= s->regs.dst_tile << 16; + } + break; + case DST_WIDTH: + val = s->regs.dst_width; + break; + case DST_HEIGHT: + val = s->regs.dst_height; + break; + case SRC_X: + val = s->regs.src_x; + break; + case SRC_Y: + val = s->regs.src_y; + break; + case DST_X: + val = s->regs.dst_x; + break; + case DST_Y: + val = s->regs.dst_y; + break; + case DP_GUI_MASTER_CNTL: + val = s->regs.dp_gui_master_cntl; + break; + case SRC_OFFSET: + val = s->regs.src_offset; + break; + case SRC_PITCH: + val = s->regs.src_pitch; + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + val &= s->regs.src_tile << 16; + } + break; + case DP_BRUSH_BKGD_CLR: + val = s->regs.dp_brush_bkgd_clr; + break; + case DP_BRUSH_FRGD_CLR: + val = s->regs.dp_brush_frgd_clr; + break; + case DP_SRC_FRGD_CLR: + val = s->regs.dp_src_frgd_clr; + break; + case DP_SRC_BKGD_CLR: + val = s->regs.dp_src_bkgd_clr; + break; + case DP_CNTL: + val = s->regs.dp_cntl; + break; + case DP_DATATYPE: + val = s->regs.dp_datatype; + break; + case DP_MIX: + val = s->regs.dp_mix; + break; + case DP_WRITE_MASK: + val = s->regs.dp_write_mask; + break; + case DEFAULT_OFFSET: + val = s->regs.default_offset; + if (s->dev_id != PCI_DEVICE_ID_ATI_RAGE128_PF) { + val >>= 10; + val |= s->regs.default_pitch << 16; + val |= s->regs.default_tile << 30; + } + break; + case DEFAULT_PITCH: + val = s->regs.default_pitch; + val |= s->regs.default_tile << 16; + break; + case DEFAULT_SC_BOTTOM_RIGHT: + val = s->regs.default_sc_bottom_right; + break; + default: + break; + } + if (addr < CUR_OFFSET || addr > CUR_CLR1 || ATI_DEBUG_HW_CURSOR) { + trace_ati_mm_read(size, addr, ati_reg_name(addr & ~3ULL), val); + } + return val; +} + +static inline void ati_reg_write_offs(uint32_t *reg, int offs, + uint64_t data, unsigned int size) +{ + if (offs == 0 && size == 4) { + *reg = data; + } else { + *reg = deposit32(*reg, offs * BITS_PER_BYTE, size * BITS_PER_BYTE, + data); + } +} + +static void ati_mm_write(void *opaque, hwaddr addr, + uint64_t data, unsigned int size) +{ + ATIVGAState *s = opaque; + + if (addr < CUR_OFFSET || addr > CUR_CLR1 || ATI_DEBUG_HW_CURSOR) { + trace_ati_mm_write(size, addr, ati_reg_name(addr & ~3ULL), data); + } + switch (addr) { + case MM_INDEX: + s->regs.mm_index = data & ~3; + break; + case MM_DATA ... MM_DATA + 3: + /* indexed access to regs or memory */ + if (s->regs.mm_index & BIT(31)) { + uint32_t idx = s->regs.mm_index & ~BIT(31); + if (idx <= s->vga.vram_size - size) { + stn_le_p(s->vga.vram_ptr + idx, size, data); + } + } else if (s->regs.mm_index > MM_DATA + 3) { + ati_mm_write(s, s->regs.mm_index + addr - MM_DATA, data, size); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "ati_mm_write: mm_index too small: %u\n", s->regs.mm_index); + } + break; + case BIOS_0_SCRATCH ... BUS_CNTL - 1: + { + int i = (addr - BIOS_0_SCRATCH) / 4; + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF && i > 3) { + break; + } + ati_reg_write_offs(&s->regs.bios_scratch[i], + addr - (BIOS_0_SCRATCH + i * 4), data, size); + break; + } + case GEN_INT_CNTL: + s->regs.gen_int_cntl = data; + if (data & CRTC_VBLANK_INT) { + ati_vga_vblank_irq(s); + } else { + timer_del(&s->vblank_timer); + ati_vga_update_irq(s); + } + break; + case GEN_INT_STATUS: + data &= (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF ? + 0x000f040fUL : 0xfc080effUL); + s->regs.gen_int_status &= ~data; + ati_vga_update_irq(s); + break; + case CRTC_GEN_CNTL ... CRTC_GEN_CNTL + 3: + { + uint32_t val = s->regs.crtc_gen_cntl; + ati_reg_write_offs(&s->regs.crtc_gen_cntl, + addr - CRTC_GEN_CNTL, data, size); + if ((val & CRTC2_CUR_EN) != (s->regs.crtc_gen_cntl & CRTC2_CUR_EN)) { + if (s->cursor_guest_mode) { + s->vga.force_shadow = !!(s->regs.crtc_gen_cntl & CRTC2_CUR_EN); + } else { + if (s->regs.crtc_gen_cntl & CRTC2_CUR_EN) { + ati_cursor_define(s); + } + dpy_mouse_set(s->vga.con, s->regs.cur_hv_pos >> 16, + s->regs.cur_hv_pos & 0xffff, + (s->regs.crtc_gen_cntl & CRTC2_CUR_EN) != 0); + } + } + if ((val & (CRTC2_EXT_DISP_EN | CRTC2_EN)) != + (s->regs.crtc_gen_cntl & (CRTC2_EXT_DISP_EN | CRTC2_EN))) { + ati_vga_switch_mode(s); + } + break; + } + case CRTC_EXT_CNTL ... CRTC_EXT_CNTL + 3: + { + uint32_t val = s->regs.crtc_ext_cntl; + ati_reg_write_offs(&s->regs.crtc_ext_cntl, + addr - CRTC_EXT_CNTL, data, size); + if (s->regs.crtc_ext_cntl & CRT_CRTC_DISPLAY_DIS) { + DPRINTF("Display disabled\n"); + s->vga.ar_index &= ~BIT(5); + } else { + DPRINTF("Display enabled\n"); + s->vga.ar_index |= BIT(5); + ati_vga_switch_mode(s); + } + if ((val & CRT_CRTC_DISPLAY_DIS) != + (s->regs.crtc_ext_cntl & CRT_CRTC_DISPLAY_DIS)) { + ati_vga_switch_mode(s); + } + break; + } + case DAC_CNTL: + s->regs.dac_cntl = data & 0xffffe3ff; + s->vga.dac_8bit = !!(data & DAC_8BIT_EN); + break; + case GPIO_VGA_DDC: + if (s->dev_id != PCI_DEVICE_ID_ATI_RAGE128_PF) { + /* FIXME: Maybe add a property to select VGA or DVI port? */ + } + break; + case GPIO_DVI_DDC: + if (s->dev_id != PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.gpio_dvi_ddc = ati_i2c(&s->bbi2c, data, 0); + } + break; + case GPIO_MONID ... GPIO_MONID + 3: + /* FIXME What does Radeon have here? */ + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + ati_reg_write_offs(&s->regs.gpio_monid, + addr - GPIO_MONID, data, size); + /* + * Rage128p accesses DDC used to get EDID via these bits. + * Because some drivers access this via multiple byte writes + * we have to be careful when we send bits to avoid spurious + * changes in bitbang_i2c state. So only do it when mask is set + * and either the enable bits are changed or output bits changed + * while enabled. + */ + if ((s->regs.gpio_monid & BIT(25)) && + ((addr <= GPIO_MONID + 2 && addr + size > GPIO_MONID + 2) || + (addr == GPIO_MONID && (s->regs.gpio_monid & 0x60000)))) { + s->regs.gpio_monid = ati_i2c(&s->bbi2c, s->regs.gpio_monid, 1); + } + } + break; + case PALETTE_INDEX ... PALETTE_INDEX + 3: + if (size == 4) { + vga_ioport_write(&s->vga, VGA_PEL_IR, (data >> 16) & 0xff); + vga_ioport_write(&s->vga, VGA_PEL_IW, data & 0xff); + } else { + if (addr == PALETTE_INDEX) { + vga_ioport_write(&s->vga, VGA_PEL_IW, data & 0xff); + } else { + vga_ioport_write(&s->vga, VGA_PEL_IR, data & 0xff); + } + } + break; + case PALETTE_DATA ... PALETTE_DATA + 3: + data <<= addr - PALETTE_DATA; + data = bswap32(data) >> 8; + vga_ioport_write(&s->vga, VGA_PEL_D, data & 0xff); + data >>= 8; + vga_ioport_write(&s->vga, VGA_PEL_D, data & 0xff); + data >>= 8; + vga_ioport_write(&s->vga, VGA_PEL_D, data & 0xff); + break; + case CNFG_CNTL: + s->regs.config_cntl = data; + break; + case CRTC_H_TOTAL_DISP: + s->regs.crtc_h_total_disp = data & 0x07ff07ff; + break; + case CRTC_H_SYNC_STRT_WID: + s->regs.crtc_h_sync_strt_wid = data & 0x17bf1fff; + break; + case CRTC_V_TOTAL_DISP: + s->regs.crtc_v_total_disp = data & 0x0fff0fff; + break; + case CRTC_V_SYNC_STRT_WID: + s->regs.crtc_v_sync_strt_wid = data & 0x9f0fff; + break; + case CRTC_OFFSET: + s->regs.crtc_offset = data & 0xc7ffffff; + break; + case CRTC_OFFSET_CNTL: + s->regs.crtc_offset_cntl = data; /* FIXME */ + break; + case CRTC_PITCH: + s->regs.crtc_pitch = data & 0x07ff07ff; + break; + case 0xf00 ... 0xfff: + /* read-only copy of PCI config space so ignore writes */ + break; + case CUR_OFFSET ... CUR_OFFSET + 3: + { + uint32_t t = s->regs.cur_offset; + + ati_reg_write_offs(&t, addr - CUR_OFFSET, data, size); + t &= 0x87fffff0; + if (s->regs.cur_offset != t) { + s->regs.cur_offset = t; + ati_cursor_define(s); + } + break; + } + case CUR_HORZ_VERT_POSN ... CUR_HORZ_VERT_POSN + 3: + { + uint32_t t = s->regs.cur_hv_pos | (s->regs.cur_offset & BIT(31)); + + ati_reg_write_offs(&t, addr - CUR_HORZ_VERT_POSN, data, size); + s->regs.cur_hv_pos = t & 0x3fff0fff; + if (t & BIT(31)) { + s->regs.cur_offset |= t & BIT(31); + } else if (s->regs.cur_offset & BIT(31)) { + s->regs.cur_offset &= ~BIT(31); + ati_cursor_define(s); + } + if (!s->cursor_guest_mode && + (s->regs.crtc_gen_cntl & CRTC2_CUR_EN) && !(t & BIT(31))) { + dpy_mouse_set(s->vga.con, s->regs.cur_hv_pos >> 16, + s->regs.cur_hv_pos & 0xffff, 1); + } + break; + } + case CUR_HORZ_VERT_OFF: + { + uint32_t t = s->regs.cur_hv_offs | (s->regs.cur_offset & BIT(31)); + + ati_reg_write_offs(&t, addr - CUR_HORZ_VERT_OFF, data, size); + s->regs.cur_hv_offs = t & 0x3f003f; + if (t & BIT(31)) { + s->regs.cur_offset |= t & BIT(31); + } else if (s->regs.cur_offset & BIT(31)) { + s->regs.cur_offset &= ~BIT(31); + ati_cursor_define(s); + } + break; + } + case CUR_CLR0 ... CUR_CLR0 + 3: + { + uint32_t t = s->regs.cur_color0; + + ati_reg_write_offs(&t, addr - CUR_CLR0, data, size); + t &= 0xffffff; + if (s->regs.cur_color0 != t) { + s->regs.cur_color0 = t; + ati_cursor_define(s); + } + break; + } + case CUR_CLR1 ... CUR_CLR1 + 3: + /* + * Update cursor unconditionally here because some clients set up + * other registers before actually writing cursor data to memory at + * offset so we would miss cursor change unless always updating here + */ + ati_reg_write_offs(&s->regs.cur_color1, addr - CUR_CLR1, data, size); + s->regs.cur_color1 &= 0xffffff; + ati_cursor_define(s); + break; + case DST_OFFSET: + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.dst_offset = data & 0xfffffff0; + } else { + s->regs.dst_offset = data & 0xfffffc00; + } + break; + case DST_PITCH: + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.dst_pitch = data & 0x3fff; + s->regs.dst_tile = (data >> 16) & 1; + } else { + s->regs.dst_pitch = data & 0x3ff0; + } + break; + case DST_TILE: + if (s->dev_id == PCI_DEVICE_ID_ATI_RADEON_QY) { + s->regs.dst_tile = data & 3; + } + break; + case DST_WIDTH: + s->regs.dst_width = data & 0x3fff; + ati_2d_blt(s); + break; + case DST_HEIGHT: + s->regs.dst_height = data & 0x3fff; + break; + case SRC_X: + s->regs.src_x = data & 0x3fff; + break; + case SRC_Y: + s->regs.src_y = data & 0x3fff; + break; + case DST_X: + s->regs.dst_x = data & 0x3fff; + break; + case DST_Y: + s->regs.dst_y = data & 0x3fff; + break; + case SRC_PITCH_OFFSET: + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.src_offset = (data & 0x1fffff) << 5; + s->regs.src_pitch = (data & 0x7fe00000) >> 21; + s->regs.src_tile = data >> 31; + } else { + s->regs.src_offset = (data & 0x3fffff) << 10; + s->regs.src_pitch = (data & 0x3fc00000) >> 16; + s->regs.src_tile = (data >> 30) & 1; + } + break; + case DST_PITCH_OFFSET: + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.dst_offset = (data & 0x1fffff) << 5; + s->regs.dst_pitch = (data & 0x7fe00000) >> 21; + s->regs.dst_tile = data >> 31; + } else { + s->regs.dst_offset = (data & 0x3fffff) << 10; + s->regs.dst_pitch = (data & 0x3fc00000) >> 16; + s->regs.dst_tile = data >> 30; + } + break; + case SRC_Y_X: + s->regs.src_x = data & 0x3fff; + s->regs.src_y = (data >> 16) & 0x3fff; + break; + case DST_Y_X: + s->regs.dst_x = data & 0x3fff; + s->regs.dst_y = (data >> 16) & 0x3fff; + break; + case DST_HEIGHT_WIDTH: + s->regs.dst_width = data & 0x3fff; + s->regs.dst_height = (data >> 16) & 0x3fff; + ati_2d_blt(s); + break; + case DP_GUI_MASTER_CNTL: + s->regs.dp_gui_master_cntl = data & 0xf800000f; + s->regs.dp_datatype = (data & 0x0f00) >> 8 | (data & 0x30f0) << 4 | + (data & 0x4000) << 16; + s->regs.dp_mix = (data & GMC_ROP3_MASK) | (data & 0x7000000) >> 16; + break; + case DST_WIDTH_X: + s->regs.dst_x = data & 0x3fff; + s->regs.dst_width = (data >> 16) & 0x3fff; + ati_2d_blt(s); + break; + case SRC_X_Y: + s->regs.src_y = data & 0x3fff; + s->regs.src_x = (data >> 16) & 0x3fff; + break; + case DST_X_Y: + s->regs.dst_y = data & 0x3fff; + s->regs.dst_x = (data >> 16) & 0x3fff; + break; + case DST_WIDTH_HEIGHT: + s->regs.dst_height = data & 0x3fff; + s->regs.dst_width = (data >> 16) & 0x3fff; + ati_2d_blt(s); + break; + case DST_HEIGHT_Y: + s->regs.dst_y = data & 0x3fff; + s->regs.dst_height = (data >> 16) & 0x3fff; + break; + case SRC_OFFSET: + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.src_offset = data & 0xfffffff0; + } else { + s->regs.src_offset = data & 0xfffffc00; + } + break; + case SRC_PITCH: + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.src_pitch = data & 0x3fff; + s->regs.src_tile = (data >> 16) & 1; + } else { + s->regs.src_pitch = data & 0x3ff0; + } + break; + case DP_BRUSH_BKGD_CLR: + s->regs.dp_brush_bkgd_clr = data; + break; + case DP_BRUSH_FRGD_CLR: + s->regs.dp_brush_frgd_clr = data; + break; + case DP_CNTL: + s->regs.dp_cntl = data; + break; + case DP_DATATYPE: + s->regs.dp_datatype = data & 0xe0070f0f; + break; + case DP_MIX: + s->regs.dp_mix = data & 0x00ff0700; + break; + case DP_WRITE_MASK: + s->regs.dp_write_mask = data; + break; + case DEFAULT_OFFSET: + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.default_offset = data & 0xfffffff0; + } else { + /* Radeon has DEFAULT_PITCH_OFFSET here like DST_PITCH_OFFSET */ + s->regs.default_offset = (data & 0x3fffff) << 10; + s->regs.default_pitch = (data & 0x3fc00000) >> 16; + s->regs.default_tile = data >> 30; + } + break; + case DEFAULT_PITCH: + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + s->regs.default_pitch = data & 0x3fff; + s->regs.default_tile = (data >> 16) & 1; + } + break; + case DEFAULT_SC_BOTTOM_RIGHT: + s->regs.default_sc_bottom_right = data & 0x3fff3fff; + break; + default: + break; + } +} + +static const MemoryRegionOps ati_mm_ops = { + .read = ati_mm_read, + .write = ati_mm_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ati_vga_realize(PCIDevice *dev, Error **errp) +{ + ATIVGAState *s = ATI_VGA(dev); + VGACommonState *vga = &s->vga; + + if (s->model) { + int i; + for (i = 0; i < ARRAY_SIZE(ati_model_aliases); i++) { + if (!strcmp(s->model, ati_model_aliases[i].name)) { + s->dev_id = ati_model_aliases[i].dev_id; + break; + } + } + if (i >= ARRAY_SIZE(ati_model_aliases)) { + warn_report("Unknown ATI VGA model name, " + "using default rage128p"); + } + } + if (s->dev_id != PCI_DEVICE_ID_ATI_RAGE128_PF && + s->dev_id != PCI_DEVICE_ID_ATI_RADEON_QY) { + error_setg(errp, "Unknown ATI VGA device id, " + "only 0x5046 and 0x5159 are supported"); + return; + } + pci_set_word(dev->config + PCI_DEVICE_ID, s->dev_id); + + if (s->dev_id == PCI_DEVICE_ID_ATI_RADEON_QY && + s->vga.vram_size_mb < 16) { + warn_report("Too small video memory for device id"); + s->vga.vram_size_mb = 16; + } + + /* init vga bits */ + vga_common_init(vga, OBJECT(s)); + vga_init(vga, OBJECT(s), pci_address_space(dev), + pci_address_space_io(dev), true); + vga->con = graphic_console_init(DEVICE(s), 0, s->vga.hw_ops, &s->vga); + if (s->cursor_guest_mode) { + vga->cursor_invalidate = ati_cursor_invalidate; + vga->cursor_draw_line = ati_cursor_draw_line; + } + + /* ddc, edid */ + I2CBus *i2cbus = i2c_init_bus(DEVICE(s), "ati-vga.ddc"); + bitbang_i2c_init(&s->bbi2c, i2cbus); + I2CSlave *i2cddc = I2C_SLAVE(qdev_new(TYPE_I2CDDC)); + i2c_slave_set_address(i2cddc, 0x50); + qdev_realize_and_unref(DEVICE(i2cddc), BUS(i2cbus), &error_abort); + + /* mmio register space */ + memory_region_init_io(&s->mm, OBJECT(s), &ati_mm_ops, s, + "ati.mmregs", 0x4000); + /* io space is alias to beginning of mmregs */ + memory_region_init_alias(&s->io, OBJECT(s), "ati.io", &s->mm, 0, 0x100); + + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &vga->vram); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->io); + pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mm); + + /* most interrupts are not yet emulated but MacOS needs at least VBlank */ + dev->config[PCI_INTERRUPT_PIN] = 1; + timer_init_ns(&s->vblank_timer, QEMU_CLOCK_VIRTUAL, ati_vga_vblank_irq, s); +} + +static void ati_vga_reset(DeviceState *dev) +{ + ATIVGAState *s = ATI_VGA(dev); + + timer_del(&s->vblank_timer); + ati_vga_update_irq(s); + + /* reset vga */ + vga_common_reset(&s->vga); + s->mode = VGA_MODE; +} + +static void ati_vga_exit(PCIDevice *dev) +{ + ATIVGAState *s = ATI_VGA(dev); + + timer_del(&s->vblank_timer); + graphic_console_close(s->vga.con); +} + +static Property ati_vga_properties[] = { + DEFINE_PROP_UINT32("vgamem_mb", ATIVGAState, vga.vram_size_mb, 16), + DEFINE_PROP_STRING("model", ATIVGAState, model), + DEFINE_PROP_UINT16("x-device-id", ATIVGAState, dev_id, + PCI_DEVICE_ID_ATI_RAGE128_PF), + DEFINE_PROP_BOOL("guest_hwcursor", ATIVGAState, cursor_guest_mode, false), + DEFINE_PROP_END_OF_LIST() +}; + +static void ati_vga_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + dc->reset = ati_vga_reset; + device_class_set_props(dc, ati_vga_properties); + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + + k->class_id = PCI_CLASS_DISPLAY_VGA; + k->vendor_id = PCI_VENDOR_ID_ATI; + k->device_id = PCI_DEVICE_ID_ATI_RAGE128_PF; + k->romfile = "vgabios-ati.bin"; + k->realize = ati_vga_realize; + k->exit = ati_vga_exit; +} + +static const TypeInfo ati_vga_info = { + .name = TYPE_ATI_VGA, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(ATIVGAState), + .class_init = ati_vga_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void ati_vga_register_types(void) +{ + type_register_static(&ati_vga_info); +} + +type_init(ati_vga_register_types) diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c new file mode 100644 index 000000000..4dc10ea79 --- /dev/null +++ b/hw/display/ati_2d.c @@ -0,0 +1,206 @@ +/* + * QEMU ATI SVGA emulation + * 2D engine functions + * + * Copyright (c) 2019 BALATON Zoltan + * + * This work is licensed under the GNU GPL license version 2 or later. + */ + +#include "qemu/osdep.h" +#include "ati_int.h" +#include "ati_regs.h" +#include "qemu/log.h" +#include "ui/pixel_ops.h" + +/* + * NOTE: + * This is 2D _acceleration_ and supposed to be fast. Therefore, don't try to + * reinvent the wheel (unlikely to get better with a naive implementation than + * existing libraries) and avoid (poorly) reimplementing gfx primitives. + * That is unnecessary and would become a performance problem. Instead, try to + * map to and reuse existing optimised facilities (e.g. pixman) wherever + * possible. + */ + +static int ati_bpp_from_datatype(ATIVGAState *s) +{ + switch (s->regs.dp_datatype & 0xf) { + case 2: + return 8; + case 3: + case 4: + return 16; + case 5: + return 24; + case 6: + return 32; + default: + qemu_log_mask(LOG_UNIMP, "Unknown dst datatype %d\n", + s->regs.dp_datatype & 0xf); + return 0; + } +} + +#define DEFAULT_CNTL (s->regs.dp_gui_master_cntl & GMC_DST_PITCH_OFFSET_CNTL) + +void ati_2d_blt(ATIVGAState *s) +{ + /* FIXME it is probably more complex than this and may need to be */ + /* rewritten but for now as a start just to get some output: */ + DisplaySurface *ds = qemu_console_surface(s->vga.con); + DPRINTF("%p %u ds: %p %d %d rop: %x\n", s->vga.vram_ptr, + s->vga.vbe_start_addr, surface_data(ds), surface_stride(ds), + surface_bits_per_pixel(ds), + (s->regs.dp_mix & GMC_ROP3_MASK) >> 16); + unsigned dst_x = (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? + s->regs.dst_x : s->regs.dst_x + 1 - s->regs.dst_width); + unsigned dst_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? + s->regs.dst_y : s->regs.dst_y + 1 - s->regs.dst_height); + int bpp = ati_bpp_from_datatype(s); + if (!bpp) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid bpp\n"); + return; + } + int dst_stride = DEFAULT_CNTL ? s->regs.dst_pitch : s->regs.default_pitch; + if (!dst_stride) { + qemu_log_mask(LOG_GUEST_ERROR, "Zero dest pitch\n"); + return; + } + uint8_t *dst_bits = s->vga.vram_ptr + (DEFAULT_CNTL ? + s->regs.dst_offset : s->regs.default_offset); + + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + dst_bits += s->regs.crtc_offset & 0x07ffffff; + dst_stride *= bpp; + } + uint8_t *end = s->vga.vram_ptr + s->vga.vram_size; + if (dst_x > 0x3fff || dst_y > 0x3fff || dst_bits >= end + || dst_bits + dst_x + + (dst_y + s->regs.dst_height) * dst_stride >= end) { + qemu_log_mask(LOG_UNIMP, "blt outside vram not implemented\n"); + return; + } + DPRINTF("%d %d %d, %d %d %d, (%d,%d) -> (%d,%d) %dx%d %c %c\n", + s->regs.src_offset, s->regs.dst_offset, s->regs.default_offset, + s->regs.src_pitch, s->regs.dst_pitch, s->regs.default_pitch, + s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y, + s->regs.dst_width, s->regs.dst_height, + (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? '>' : '<'), + (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? 'v' : '^')); + switch (s->regs.dp_mix & GMC_ROP3_MASK) { + case ROP3_SRCCOPY: + { + unsigned src_x = (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? + s->regs.src_x : s->regs.src_x + 1 - s->regs.dst_width); + unsigned src_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? + s->regs.src_y : s->regs.src_y + 1 - s->regs.dst_height); + int src_stride = DEFAULT_CNTL ? + s->regs.src_pitch : s->regs.default_pitch; + if (!src_stride) { + qemu_log_mask(LOG_GUEST_ERROR, "Zero source pitch\n"); + return; + } + uint8_t *src_bits = s->vga.vram_ptr + (DEFAULT_CNTL ? + s->regs.src_offset : s->regs.default_offset); + + if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { + src_bits += s->regs.crtc_offset & 0x07ffffff; + src_stride *= bpp; + } + if (src_x > 0x3fff || src_y > 0x3fff || src_bits >= end + || src_bits + src_x + + (src_y + s->regs.dst_height) * src_stride >= end) { + qemu_log_mask(LOG_UNIMP, "blt outside vram not implemented\n"); + return; + } + + src_stride /= sizeof(uint32_t); + dst_stride /= sizeof(uint32_t); + DPRINTF("pixman_blt(%p, %p, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)\n", + src_bits, dst_bits, src_stride, dst_stride, bpp, bpp, + src_x, src_y, dst_x, dst_y, + s->regs.dst_width, s->regs.dst_height); + if (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT && + s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM) { + pixman_blt((uint32_t *)src_bits, (uint32_t *)dst_bits, + src_stride, dst_stride, bpp, bpp, + src_x, src_y, dst_x, dst_y, + s->regs.dst_width, s->regs.dst_height); + } else { + /* FIXME: We only really need a temporary if src and dst overlap */ + int llb = s->regs.dst_width * (bpp / 8); + int tmp_stride = DIV_ROUND_UP(llb, sizeof(uint32_t)); + uint32_t *tmp = g_malloc(tmp_stride * sizeof(uint32_t) * + s->regs.dst_height); + pixman_blt((uint32_t *)src_bits, tmp, + src_stride, tmp_stride, bpp, bpp, + src_x, src_y, 0, 0, + s->regs.dst_width, s->regs.dst_height); + pixman_blt(tmp, (uint32_t *)dst_bits, + tmp_stride, dst_stride, bpp, bpp, + 0, 0, dst_x, dst_y, + s->regs.dst_width, s->regs.dst_height); + g_free(tmp); + } + if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr && + dst_bits < s->vga.vram_ptr + s->vga.vbe_start_addr + + s->vga.vbe_regs[VBE_DISPI_INDEX_YRES] * s->vga.vbe_line_offset) { + memory_region_set_dirty(&s->vga.vram, s->vga.vbe_start_addr + + s->regs.dst_offset + + dst_y * surface_stride(ds), + s->regs.dst_height * surface_stride(ds)); + } + s->regs.dst_x = (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? + dst_x + s->regs.dst_width : dst_x); + s->regs.dst_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? + dst_y + s->regs.dst_height : dst_y); + break; + } + case ROP3_PATCOPY: + case ROP3_BLACKNESS: + case ROP3_WHITENESS: + { + uint32_t filler = 0; + + switch (s->regs.dp_mix & GMC_ROP3_MASK) { + case ROP3_PATCOPY: + filler = s->regs.dp_brush_frgd_clr; + break; + case ROP3_BLACKNESS: + filler = 0xffUL << 24 | rgb_to_pixel32(s->vga.palette[0], + s->vga.palette[1], s->vga.palette[2]); + break; + case ROP3_WHITENESS: + filler = 0xffUL << 24 | rgb_to_pixel32(s->vga.palette[3], + s->vga.palette[4], s->vga.palette[5]); + break; + } + + dst_stride /= sizeof(uint32_t); + DPRINTF("pixman_fill(%p, %d, %d, %d, %d, %d, %d, %x)\n", + dst_bits, dst_stride, bpp, + s->regs.dst_x, s->regs.dst_y, + s->regs.dst_width, s->regs.dst_height, + filler); + pixman_fill((uint32_t *)dst_bits, dst_stride, bpp, + s->regs.dst_x, s->regs.dst_y, + s->regs.dst_width, s->regs.dst_height, + filler); + if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr && + dst_bits < s->vga.vram_ptr + s->vga.vbe_start_addr + + s->vga.vbe_regs[VBE_DISPI_INDEX_YRES] * s->vga.vbe_line_offset) { + memory_region_set_dirty(&s->vga.vram, s->vga.vbe_start_addr + + s->regs.dst_offset + + dst_y * surface_stride(ds), + s->regs.dst_height * surface_stride(ds)); + } + s->regs.dst_y = (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? + dst_y + s->regs.dst_height : dst_y); + break; + } + default: + qemu_log_mask(LOG_UNIMP, "Unimplemented ati_2d blt op %x\n", + (s->regs.dp_mix & GMC_ROP3_MASK) >> 16); + } +} diff --git a/hw/display/ati_dbg.c b/hw/display/ati_dbg.c new file mode 100644 index 000000000..bd0ecd48c --- /dev/null +++ b/hw/display/ati_dbg.c @@ -0,0 +1,273 @@ +#include "qemu/osdep.h" +#include "ati_int.h" + +#ifdef DEBUG_ATI +struct ati_regdesc { + const char *name; + int num; +}; + +static struct ati_regdesc ati_reg_names[] = { + {"MM_INDEX", 0x0000}, + {"MM_DATA", 0x0004}, + {"CLOCK_CNTL_INDEX", 0x0008}, + {"CLOCK_CNTL_DATA", 0x000c}, + {"BIOS_0_SCRATCH", 0x0010}, + {"BUS_CNTL", 0x0030}, + {"BUS_CNTL1", 0x0034}, + {"GEN_INT_CNTL", 0x0040}, + {"GEN_INT_STATUS", 0x0044}, + {"CRTC_GEN_CNTL", 0x0050}, + {"CRTC_EXT_CNTL", 0x0054}, + {"DAC_CNTL", 0x0058}, + {"GPIO_VGA_DDC", 0x0060}, + {"GPIO_DVI_DDC", 0x0064}, + {"GPIO_MONID", 0x0068}, + {"I2C_CNTL_1", 0x0094}, + {"AMCGPIO_MASK_MIR", 0x009c}, + {"AMCGPIO_A_MIR", 0x00a0}, + {"AMCGPIO_Y_MIR", 0x00a4}, + {"AMCGPIO_EN_MIR", 0x00a8}, + {"PALETTE_INDEX", 0x00b0}, + {"PALETTE_DATA", 0x00b4}, + {"CNFG_CNTL", 0x00e0}, + {"GEN_RESET_CNTL", 0x00f0}, + {"CNFG_MEMSIZE", 0x00f8}, + {"CONFIG_APER_0_BASE", 0x0100}, + {"CONFIG_APER_1_BASE", 0x0104}, + {"CONFIG_APER_SIZE", 0x0108}, + {"CONFIG_REG_1_BASE", 0x010c}, + {"CONFIG_REG_APER_SIZE", 0x0110}, + {"MEM_CNTL", 0x0140}, + {"MC_FB_LOCATION", 0x0148}, + {"MC_AGP_LOCATION", 0x014C}, + {"MC_STATUS", 0x0150}, + {"MEM_SDRAM_MODE_REG", 0x0158}, + {"MEM_POWER_MISC", 0x015c}, + {"AGP_BASE", 0x0170}, + {"AGP_CNTL", 0x0174}, + {"AGP_APER_OFFSET", 0x0178}, + {"PCI_GART_PAGE", 0x017c}, + {"PC_NGUI_MODE", 0x0180}, + {"PC_NGUI_CTLSTAT", 0x0184}, + {"MPP_TB_CONFIG", 0x01C0}, + {"MPP_GP_CONFIG", 0x01C8}, + {"VIPH_CONTROL", 0x01D0}, + {"CRTC_H_TOTAL_DISP", 0x0200}, + {"CRTC_H_SYNC_STRT_WID", 0x0204}, + {"CRTC_V_TOTAL_DISP", 0x0208}, + {"CRTC_V_SYNC_STRT_WID", 0x020c}, + {"CRTC_VLINE_CRNT_VLINE", 0x0210}, + {"CRTC_CRNT_FRAME", 0x0214}, + {"CRTC_GUI_TRIG_VLINE", 0x0218}, + {"CRTC_OFFSET", 0x0224}, + {"CRTC_OFFSET_CNTL", 0x0228}, + {"CRTC_PITCH", 0x022c}, + {"OVR_CLR", 0x0230}, + {"OVR_WID_LEFT_RIGHT", 0x0234}, + {"OVR_WID_TOP_BOTTOM", 0x0238}, + {"CUR_OFFSET", 0x0260}, + {"CUR_HORZ_VERT_POSN", 0x0264}, + {"CUR_HORZ_VERT_OFF", 0x0268}, + {"CUR_CLR0", 0x026c}, + {"CUR_CLR1", 0x0270}, + {"LVDS_GEN_CNTL", 0x02d0}, + {"DDA_CONFIG", 0x02e0}, + {"DDA_ON_OFF", 0x02e4}, + {"VGA_DDA_CONFIG", 0x02e8}, + {"VGA_DDA_ON_OFF", 0x02ec}, + {"CRTC2_H_TOTAL_DISP", 0x0300}, + {"CRTC2_H_SYNC_STRT_WID", 0x0304}, + {"CRTC2_V_TOTAL_DISP", 0x0308}, + {"CRTC2_V_SYNC_STRT_WID", 0x030c}, + {"CRTC2_VLINE_CRNT_VLINE", 0x0310}, + {"CRTC2_CRNT_FRAME", 0x0314}, + {"CRTC2_GUI_TRIG_VLINE", 0x0318}, + {"CRTC2_OFFSET", 0x0324}, + {"CRTC2_OFFSET_CNTL", 0x0328}, + {"CRTC2_PITCH", 0x032c}, + {"DDA2_CONFIG", 0x03e0}, + {"DDA2_ON_OFF", 0x03e4}, + {"CRTC2_GEN_CNTL", 0x03f8}, + {"CRTC2_STATUS", 0x03fc}, + {"OV0_SCALE_CNTL", 0x0420}, + {"SUBPIC_CNTL", 0x0540}, + {"PM4_BUFFER_OFFSET", 0x0700}, + {"PM4_BUFFER_CNTL", 0x0704}, + {"PM4_BUFFER_WM_CNTL", 0x0708}, + {"PM4_BUFFER_DL_RPTR_ADDR", 0x070c}, + {"PM4_BUFFER_DL_RPTR", 0x0710}, + {"PM4_BUFFER_DL_WPTR", 0x0714}, + {"PM4_VC_FPU_SETUP", 0x071c}, + {"PM4_FPU_CNTL", 0x0720}, + {"PM4_VC_FORMAT", 0x0724}, + {"PM4_VC_CNTL", 0x0728}, + {"PM4_VC_I01", 0x072c}, + {"PM4_VC_VLOFF", 0x0730}, + {"PM4_VC_VLSIZE", 0x0734}, + {"PM4_IW_INDOFF", 0x0738}, + {"PM4_IW_INDSIZE", 0x073c}, + {"PM4_FPU_FPX0", 0x0740}, + {"PM4_FPU_FPY0", 0x0744}, + {"PM4_FPU_FPX1", 0x0748}, + {"PM4_FPU_FPY1", 0x074c}, + {"PM4_FPU_FPX2", 0x0750}, + {"PM4_FPU_FPY2", 0x0754}, + {"PM4_FPU_FPY3", 0x0758}, + {"PM4_FPU_FPY4", 0x075c}, + {"PM4_FPU_FPY5", 0x0760}, + {"PM4_FPU_FPY6", 0x0764}, + {"PM4_FPU_FPR", 0x0768}, + {"PM4_FPU_FPG", 0x076c}, + {"PM4_FPU_FPB", 0x0770}, + {"PM4_FPU_FPA", 0x0774}, + {"PM4_FPU_INTXY0", 0x0780}, + {"PM4_FPU_INTXY1", 0x0784}, + {"PM4_FPU_INTXY2", 0x0788}, + {"PM4_FPU_INTARGB", 0x078c}, + {"PM4_FPU_FPTWICEAREA", 0x0790}, + {"PM4_FPU_DMAJOR01", 0x0794}, + {"PM4_FPU_DMAJOR12", 0x0798}, + {"PM4_FPU_DMAJOR02", 0x079c}, + {"PM4_FPU_STAT", 0x07a0}, + {"PM4_STAT", 0x07b8}, + {"PM4_TEST_CNTL", 0x07d0}, + {"PM4_MICROCODE_ADDR", 0x07d4}, + {"PM4_MICROCODE_RADDR", 0x07d8}, + {"PM4_MICROCODE_DATAH", 0x07dc}, + {"PM4_MICROCODE_DATAL", 0x07e0}, + {"PM4_CMDFIFO_ADDR", 0x07e4}, + {"PM4_CMDFIFO_DATAH", 0x07e8}, + {"PM4_CMDFIFO_DATAL", 0x07ec}, + {"PM4_BUFFER_ADDR", 0x07f0}, + {"PM4_BUFFER_DATAH", 0x07f4}, + {"PM4_BUFFER_DATAL", 0x07f8}, + {"PM4_MICRO_CNTL", 0x07fc}, + {"CAP0_TRIG_CNTL", 0x0950}, + {"CAP1_TRIG_CNTL", 0x09c0}, + {"RBBM_STATUS", 0x0e40}, + {"PM4_FIFO_DATA_EVEN", 0x1000}, + {"PM4_FIFO_DATA_ODD", 0x1004}, + {"DST_OFFSET", 0x1404}, + {"DST_PITCH", 0x1408}, + {"DST_WIDTH", 0x140c}, + {"DST_HEIGHT", 0x1410}, + {"SRC_X", 0x1414}, + {"SRC_Y", 0x1418}, + {"DST_X", 0x141c}, + {"DST_Y", 0x1420}, + {"SRC_PITCH_OFFSET", 0x1428}, + {"DST_PITCH_OFFSET", 0x142c}, + {"SRC_Y_X", 0x1434}, + {"DST_Y_X", 0x1438}, + {"DST_HEIGHT_WIDTH", 0x143c}, + {"DP_GUI_MASTER_CNTL", 0x146c}, + {"BRUSH_SCALE", 0x1470}, + {"BRUSH_Y_X", 0x1474}, + {"DP_BRUSH_BKGD_CLR", 0x1478}, + {"DP_BRUSH_FRGD_CLR", 0x147c}, + {"DST_WIDTH_X", 0x1588}, + {"DST_HEIGHT_WIDTH_8", 0x158c}, + {"SRC_X_Y", 0x1590}, + {"DST_X_Y", 0x1594}, + {"DST_WIDTH_HEIGHT", 0x1598}, + {"DST_WIDTH_X_INCY", 0x159c}, + {"DST_HEIGHT_Y", 0x15a0}, + {"DST_X_SUB", 0x15a4}, + {"DST_Y_SUB", 0x15a8}, + {"SRC_OFFSET", 0x15ac}, + {"SRC_PITCH", 0x15b0}, + {"DST_HEIGHT_WIDTH_BW", 0x15b4}, + {"CLR_CMP_CNTL", 0x15c0}, + {"CLR_CMP_CLR_SRC", 0x15c4}, + {"CLR_CMP_CLR_DST", 0x15c8}, + {"CLR_CMP_MASK", 0x15cc}, + {"DP_SRC_FRGD_CLR", 0x15d8}, + {"DP_SRC_BKGD_CLR", 0x15dc}, + {"DST_BRES_ERR", 0x1628}, + {"DST_BRES_INC", 0x162c}, + {"DST_BRES_DEC", 0x1630}, + {"DST_BRES_LNTH", 0x1634}, + {"DST_BRES_LNTH_SUB", 0x1638}, + {"SC_LEFT", 0x1640}, + {"SC_RIGHT", 0x1644}, + {"SC_TOP", 0x1648}, + {"SC_BOTTOM", 0x164c}, + {"SRC_SC_RIGHT", 0x1654}, + {"SRC_SC_BOTTOM", 0x165c}, + {"GUI_DEBUG0", 0x16a0}, + {"GUI_DEBUG1", 0x16a4}, + {"GUI_TIMEOUT", 0x16b0}, + {"GUI_TIMEOUT0", 0x16b4}, + {"GUI_TIMEOUT1", 0x16b8}, + {"GUI_PROBE", 0x16bc}, + {"DP_CNTL", 0x16c0}, + {"DP_DATATYPE", 0x16c4}, + {"DP_MIX", 0x16c8}, + {"DP_WRITE_MASK", 0x16cc}, + {"DP_CNTL_XDIR_YDIR_YMAJOR", 0x16d0}, + {"DEFAULT_OFFSET", 0x16e0}, + {"DEFAULT_PITCH", 0x16e4}, + {"DEFAULT_SC_BOTTOM_RIGHT", 0x16e8}, + {"SC_TOP_LEFT", 0x16ec}, + {"SC_BOTTOM_RIGHT", 0x16f0}, + {"SRC_SC_BOTTOM_RIGHT", 0x16f4}, + {"DST_TILE", 0x1700}, + {"WAIT_UNTIL", 0x1720}, + {"CACHE_CNTL", 0x1724}, + {"GUI_STAT", 0x1740}, + {"PC_GUI_MODE", 0x1744}, + {"PC_GUI_CTLSTAT", 0x1748}, + {"PC_DEBUG_MODE", 0x1760}, + {"BRES_DST_ERR_DEC", 0x1780}, + {"TRAIL_BRES_T12_ERR_DEC", 0x1784}, + {"TRAIL_BRES_T12_INC", 0x1788}, + {"DP_T12_CNTL", 0x178c}, + {"DST_BRES_T1_LNTH", 0x1790}, + {"DST_BRES_T2_LNTH", 0x1794}, + {"SCALE_SRC_HEIGHT_WIDTH", 0x1994}, + {"SCALE_OFFSET_0", 0x1998}, + {"SCALE_PITCH", 0x199c}, + {"SCALE_X_INC", 0x19a0}, + {"SCALE_Y_INC", 0x19a4}, + {"SCALE_HACC", 0x19a8}, + {"SCALE_VACC", 0x19ac}, + {"SCALE_DST_X_Y", 0x19b0}, + {"SCALE_DST_HEIGHT_WIDTH", 0x19b4}, + {"SCALE_3D_CNTL", 0x1a00}, + {"SCALE_3D_DATATYPE", 0x1a20}, + {"SETUP_CNTL", 0x1bc4}, + {"SOLID_COLOR", 0x1bc8}, + {"WINDOW_XY_OFFSET", 0x1bcc}, + {"DRAW_LINE_POINT", 0x1bd0}, + {"SETUP_CNTL_PM4", 0x1bd4}, + {"DST_PITCH_OFFSET_C", 0x1c80}, + {"DP_GUI_MASTER_CNTL_C", 0x1c84}, + {"SC_TOP_LEFT_C", 0x1c88}, + {"SC_BOTTOM_RIGHT_C", 0x1c8c}, + {"CLR_CMP_MASK_3D", 0x1A28}, + {"MISC_3D_STATE_CNTL_REG", 0x1CA0}, + {"MC_SRC1_CNTL", 0x19D8}, + {"TEX_CNTL", 0x1800}, + {"RAGE128_MPP_TB_CONFIG", 0x01c0}, + {NULL, -1} +}; + +const char *ati_reg_name(int num) +{ + int i; + + num &= ~3; + for (i = 0; ati_reg_names[i].name; i++) { + if (ati_reg_names[i].num == num) { + return ati_reg_names[i].name; + } + } + return "unknown"; +} +#else +const char *ati_reg_name(int num) +{ + return ""; +} +#endif diff --git a/hw/display/ati_int.h b/hw/display/ati_int.h new file mode 100644 index 000000000..8acb9c746 --- /dev/null +++ b/hw/display/ati_int.h @@ -0,0 +1,107 @@ +/* + * QEMU ATI SVGA emulation + * + * Copyright (c) 2019 BALATON Zoltan + * + * This work is licensed under the GNU GPL license version 2 or later. + */ + +#ifndef ATI_INT_H +#define ATI_INT_H + +#include "qemu/timer.h" +#include "hw/pci/pci.h" +#include "hw/i2c/bitbang_i2c.h" +#include "vga_int.h" +#include "qom/object.h" + +/*#define DEBUG_ATI*/ + +#ifdef DEBUG_ATI +#define DPRINTF(fmt, ...) printf("%s: " fmt, __func__, ## __VA_ARGS__) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#define PCI_VENDOR_ID_ATI 0x1002 +/* Rage128 Pro GL */ +#define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046 +/* Radeon RV100 (VE) */ +#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 + +#define TYPE_ATI_VGA "ati-vga" +OBJECT_DECLARE_SIMPLE_TYPE(ATIVGAState, ATI_VGA) + +typedef struct ATIVGARegs { + uint32_t mm_index; + uint32_t bios_scratch[8]; + uint32_t gen_int_cntl; + uint32_t gen_int_status; + uint32_t crtc_gen_cntl; + uint32_t crtc_ext_cntl; + uint32_t dac_cntl; + uint32_t gpio_vga_ddc; + uint32_t gpio_dvi_ddc; + uint32_t gpio_monid; + uint32_t config_cntl; + uint32_t crtc_h_total_disp; + uint32_t crtc_h_sync_strt_wid; + uint32_t crtc_v_total_disp; + uint32_t crtc_v_sync_strt_wid; + uint32_t crtc_offset; + uint32_t crtc_offset_cntl; + uint32_t crtc_pitch; + uint32_t cur_offset; + uint32_t cur_hv_pos; + uint32_t cur_hv_offs; + uint32_t cur_color0; + uint32_t cur_color1; + uint32_t dst_offset; + uint32_t dst_pitch; + uint32_t dst_tile; + uint32_t dst_width; + uint32_t dst_height; + uint32_t src_offset; + uint32_t src_pitch; + uint32_t src_tile; + uint32_t src_x; + uint32_t src_y; + uint32_t dst_x; + uint32_t dst_y; + uint32_t dp_gui_master_cntl; + uint32_t dp_brush_bkgd_clr; + uint32_t dp_brush_frgd_clr; + uint32_t dp_src_frgd_clr; + uint32_t dp_src_bkgd_clr; + uint32_t dp_cntl; + uint32_t dp_datatype; + uint32_t dp_mix; + uint32_t dp_write_mask; + uint32_t default_offset; + uint32_t default_pitch; + uint32_t default_tile; + uint32_t default_sc_bottom_right; +} ATIVGARegs; + +struct ATIVGAState { + PCIDevice dev; + VGACommonState vga; + char *model; + uint16_t dev_id; + uint8_t mode; + bool cursor_guest_mode; + uint16_t cursor_size; + uint32_t cursor_offset; + QEMUCursor *cursor; + QEMUTimer vblank_timer; + bitbang_i2c_interface bbi2c; + MemoryRegion io; + MemoryRegion mm; + ATIVGARegs regs; +}; + +const char *ati_reg_name(int num); + +void ati_2d_blt(ATIVGAState *s); + +#endif /* ATI_INT_H */ diff --git a/hw/display/ati_regs.h b/hw/display/ati_regs.h new file mode 100644 index 000000000..d6282b2ef --- /dev/null +++ b/hw/display/ati_regs.h @@ -0,0 +1,481 @@ +/* + * ATI VGA register definitions + * + * based on: + * linux/include/video/aty128.h + * Register definitions for ATI Rage128 boards + * Anthony Tong , 1999 + * Brad Douglas , 2000 + * + * and linux/include/video/radeon.h + * + * This work is licensed under the GNU GPL license version 2. + */ + +/* + * Register mapping: + * 0x0000-0x00ff Misc regs also accessible via io and mmio space + * 0x0100-0x0eff Misc regs only accessible via mmio + * 0x0f00-0x0fff Read-only copy of PCI config regs + * 0x1000-0x13ff Concurrent Command Engine (CCE) regs + * 0x1400-0x1fff GUI (drawing engine) regs + */ + +#ifndef ATI_REGS_H +#define ATI_REGS_H + +#undef DEFAULT_PITCH /* needed for mingw builds */ + +#define MM_INDEX 0x0000 +#define MM_DATA 0x0004 +#define CLOCK_CNTL_INDEX 0x0008 +#define CLOCK_CNTL_DATA 0x000c +#define BIOS_0_SCRATCH 0x0010 +#define BUS_CNTL 0x0030 +#define BUS_CNTL1 0x0034 +#define GEN_INT_CNTL 0x0040 +#define GEN_INT_STATUS 0x0044 +#define CRTC_GEN_CNTL 0x0050 +#define CRTC_EXT_CNTL 0x0054 +#define DAC_CNTL 0x0058 +#define GPIO_VGA_DDC 0x0060 +#define GPIO_DVI_DDC 0x0064 +#define GPIO_MONID 0x0068 +#define I2C_CNTL_1 0x0094 +#define AMCGPIO_MASK_MIR 0x009c +#define AMCGPIO_A_MIR 0x00a0 +#define AMCGPIO_Y_MIR 0x00a4 +#define AMCGPIO_EN_MIR 0x00a8 +#define PALETTE_INDEX 0x00b0 +#define PALETTE_DATA 0x00b4 +#define CNFG_CNTL 0x00e0 +#define GEN_RESET_CNTL 0x00f0 +#define CNFG_MEMSIZE 0x00f8 +#define CONFIG_APER_0_BASE 0x0100 +#define CONFIG_APER_1_BASE 0x0104 +#define CONFIG_APER_SIZE 0x0108 +#define CONFIG_REG_1_BASE 0x010c +#define CONFIG_REG_APER_SIZE 0x0110 +#define MEM_CNTL 0x0140 +#define MC_FB_LOCATION 0x0148 +#define MC_AGP_LOCATION 0x014C +#define MC_STATUS 0x0150 +#define MEM_SDRAM_MODE_REG 0x0158 +#define MEM_POWER_MISC 0x015c +#define AGP_BASE 0x0170 +#define AGP_CNTL 0x0174 +#define AGP_APER_OFFSET 0x0178 +#define PCI_GART_PAGE 0x017c +#define PC_NGUI_MODE 0x0180 +#define PC_NGUI_CTLSTAT 0x0184 +#define MPP_TB_CONFIG 0x01C0 +#define MPP_GP_CONFIG 0x01C8 +#define VIPH_CONTROL 0x01D0 +#define CRTC_H_TOTAL_DISP 0x0200 +#define CRTC_H_SYNC_STRT_WID 0x0204 +#define CRTC_V_TOTAL_DISP 0x0208 +#define CRTC_V_SYNC_STRT_WID 0x020c +#define CRTC_VLINE_CRNT_VLINE 0x0210 +#define CRTC_CRNT_FRAME 0x0214 +#define CRTC_GUI_TRIG_VLINE 0x0218 +#define CRTC_OFFSET 0x0224 +#define CRTC_OFFSET_CNTL 0x0228 +#define CRTC_PITCH 0x022c +#define OVR_CLR 0x0230 +#define OVR_WID_LEFT_RIGHT 0x0234 +#define OVR_WID_TOP_BOTTOM 0x0238 +#define CUR_OFFSET 0x0260 +#define CUR_HORZ_VERT_POSN 0x0264 +#define CUR_HORZ_VERT_OFF 0x0268 +#define CUR_CLR0 0x026c +#define CUR_CLR1 0x0270 +#define LVDS_GEN_CNTL 0x02d0 +#define DDA_CONFIG 0x02e0 +#define DDA_ON_OFF 0x02e4 +#define VGA_DDA_CONFIG 0x02e8 +#define VGA_DDA_ON_OFF 0x02ec +#define CRTC2_H_TOTAL_DISP 0x0300 +#define CRTC2_H_SYNC_STRT_WID 0x0304 +#define CRTC2_V_TOTAL_DISP 0x0308 +#define CRTC2_V_SYNC_STRT_WID 0x030c +#define CRTC2_VLINE_CRNT_VLINE 0x0310 +#define CRTC2_CRNT_FRAME 0x0314 +#define CRTC2_GUI_TRIG_VLINE 0x0318 +#define CRTC2_OFFSET 0x0324 +#define CRTC2_OFFSET_CNTL 0x0328 +#define CRTC2_PITCH 0x032c +#define DDA2_CONFIG 0x03e0 +#define DDA2_ON_OFF 0x03e4 +#define CRTC2_GEN_CNTL 0x03f8 +#define CRTC2_STATUS 0x03fc +#define OV0_SCALE_CNTL 0x0420 +#define SUBPIC_CNTL 0x0540 +#define PM4_BUFFER_OFFSET 0x0700 +#define PM4_BUFFER_CNTL 0x0704 +#define PM4_BUFFER_WM_CNTL 0x0708 +#define PM4_BUFFER_DL_RPTR_ADDR 0x070c +#define PM4_BUFFER_DL_RPTR 0x0710 +#define PM4_BUFFER_DL_WPTR 0x0714 +#define PM4_VC_FPU_SETUP 0x071c +#define PM4_FPU_CNTL 0x0720 +#define PM4_VC_FORMAT 0x0724 +#define PM4_VC_CNTL 0x0728 +#define PM4_VC_I01 0x072c +#define PM4_VC_VLOFF 0x0730 +#define PM4_VC_VLSIZE 0x0734 +#define PM4_IW_INDOFF 0x0738 +#define PM4_IW_INDSIZE 0x073c +#define PM4_FPU_FPX0 0x0740 +#define PM4_FPU_FPY0 0x0744 +#define PM4_FPU_FPX1 0x0748 +#define PM4_FPU_FPY1 0x074c +#define PM4_FPU_FPX2 0x0750 +#define PM4_FPU_FPY2 0x0754 +#define PM4_FPU_FPY3 0x0758 +#define PM4_FPU_FPY4 0x075c +#define PM4_FPU_FPY5 0x0760 +#define PM4_FPU_FPY6 0x0764 +#define PM4_FPU_FPR 0x0768 +#define PM4_FPU_FPG 0x076c +#define PM4_FPU_FPB 0x0770 +#define PM4_FPU_FPA 0x0774 +#define PM4_FPU_INTXY0 0x0780 +#define PM4_FPU_INTXY1 0x0784 +#define PM4_FPU_INTXY2 0x0788 +#define PM4_FPU_INTARGB 0x078c +#define PM4_FPU_FPTWICEAREA 0x0790 +#define PM4_FPU_DMAJOR01 0x0794 +#define PM4_FPU_DMAJOR12 0x0798 +#define PM4_FPU_DMAJOR02 0x079c +#define PM4_FPU_STAT 0x07a0 +#define PM4_STAT 0x07b8 +#define PM4_TEST_CNTL 0x07d0 +#define PM4_MICROCODE_ADDR 0x07d4 +#define PM4_MICROCODE_RADDR 0x07d8 +#define PM4_MICROCODE_DATAH 0x07dc +#define PM4_MICROCODE_DATAL 0x07e0 +#define PM4_CMDFIFO_ADDR 0x07e4 +#define PM4_CMDFIFO_DATAH 0x07e8 +#define PM4_CMDFIFO_DATAL 0x07ec +#define PM4_BUFFER_ADDR 0x07f0 +#define PM4_BUFFER_DATAH 0x07f4 +#define PM4_BUFFER_DATAL 0x07f8 +#define PM4_MICRO_CNTL 0x07fc +#define CAP0_TRIG_CNTL 0x0950 +#define CAP1_TRIG_CNTL 0x09c0 + +#define RBBM_STATUS 0x0e40 + +/* + * GUI Block Memory Mapped Registers + * These registers are FIFOed. + */ +#define PM4_FIFO_DATA_EVEN 0x1000 +#define PM4_FIFO_DATA_ODD 0x1004 + +#define DST_OFFSET 0x1404 +#define DST_PITCH 0x1408 +#define DST_WIDTH 0x140c +#define DST_HEIGHT 0x1410 +#define SRC_X 0x1414 +#define SRC_Y 0x1418 +#define DST_X 0x141c +#define DST_Y 0x1420 +#define SRC_PITCH_OFFSET 0x1428 +#define DST_PITCH_OFFSET 0x142c +#define SRC_Y_X 0x1434 +#define DST_Y_X 0x1438 +#define DST_HEIGHT_WIDTH 0x143c +#define DP_GUI_MASTER_CNTL 0x146c +#define BRUSH_SCALE 0x1470 +#define BRUSH_Y_X 0x1474 +#define DP_BRUSH_BKGD_CLR 0x1478 +#define DP_BRUSH_FRGD_CLR 0x147c +#define DST_WIDTH_X 0x1588 +#define DST_HEIGHT_WIDTH_8 0x158c +#define SRC_X_Y 0x1590 +#define DST_X_Y 0x1594 +#define DST_WIDTH_HEIGHT 0x1598 +#define DST_WIDTH_X_INCY 0x159c +#define DST_HEIGHT_Y 0x15a0 +#define DST_X_SUB 0x15a4 +#define DST_Y_SUB 0x15a8 +#define SRC_OFFSET 0x15ac +#define SRC_PITCH 0x15b0 +#define DST_HEIGHT_WIDTH_BW 0x15b4 +#define CLR_CMP_CNTL 0x15c0 +#define CLR_CMP_CLR_SRC 0x15c4 +#define CLR_CMP_CLR_DST 0x15c8 +#define CLR_CMP_MASK 0x15cc +#define DP_SRC_FRGD_CLR 0x15d8 +#define DP_SRC_BKGD_CLR 0x15dc +#define DST_BRES_ERR 0x1628 +#define DST_BRES_INC 0x162c +#define DST_BRES_DEC 0x1630 +#define DST_BRES_LNTH 0x1634 +#define DST_BRES_LNTH_SUB 0x1638 +#define SC_LEFT 0x1640 +#define SC_RIGHT 0x1644 +#define SC_TOP 0x1648 +#define SC_BOTTOM 0x164c +#define SRC_SC_RIGHT 0x1654 +#define SRC_SC_BOTTOM 0x165c +#define GUI_DEBUG0 0x16a0 +#define GUI_DEBUG1 0x16a4 +#define GUI_TIMEOUT 0x16b0 +#define GUI_TIMEOUT0 0x16b4 +#define GUI_TIMEOUT1 0x16b8 +#define GUI_PROBE 0x16bc +#define DP_CNTL 0x16c0 +#define DP_DATATYPE 0x16c4 +#define DP_MIX 0x16c8 +#define DP_WRITE_MASK 0x16cc +#define DP_CNTL_XDIR_YDIR_YMAJOR 0x16d0 +#define DEFAULT_OFFSET 0x16e0 +#define DEFAULT_PITCH 0x16e4 +#define DEFAULT_SC_BOTTOM_RIGHT 0x16e8 +#define SC_TOP_LEFT 0x16ec +#define SC_BOTTOM_RIGHT 0x16f0 +#define SRC_SC_BOTTOM_RIGHT 0x16f4 +#define DST_TILE 0x1700 +#define WAIT_UNTIL 0x1720 +#define CACHE_CNTL 0x1724 +#define GUI_STAT 0x1740 +#define PC_GUI_MODE 0x1744 +#define PC_GUI_CTLSTAT 0x1748 +#define PC_DEBUG_MODE 0x1760 +#define BRES_DST_ERR_DEC 0x1780 +#define TRAIL_BRES_T12_ERR_DEC 0x1784 +#define TRAIL_BRES_T12_INC 0x1788 +#define DP_T12_CNTL 0x178c +#define DST_BRES_T1_LNTH 0x1790 +#define DST_BRES_T2_LNTH 0x1794 +#define SCALE_SRC_HEIGHT_WIDTH 0x1994 +#define SCALE_OFFSET_0 0x1998 +#define SCALE_PITCH 0x199c +#define SCALE_X_INC 0x19a0 +#define SCALE_Y_INC 0x19a4 +#define SCALE_HACC 0x19a8 +#define SCALE_VACC 0x19ac +#define SCALE_DST_X_Y 0x19b0 +#define SCALE_DST_HEIGHT_WIDTH 0x19b4 +#define SCALE_3D_CNTL 0x1a00 +#define SCALE_3D_DATATYPE 0x1a20 +#define SETUP_CNTL 0x1bc4 +#define SOLID_COLOR 0x1bc8 +#define WINDOW_XY_OFFSET 0x1bcc +#define DRAW_LINE_POINT 0x1bd0 +#define SETUP_CNTL_PM4 0x1bd4 +#define DST_PITCH_OFFSET_C 0x1c80 +#define DP_GUI_MASTER_CNTL_C 0x1c84 +#define SC_TOP_LEFT_C 0x1c88 +#define SC_BOTTOM_RIGHT_C 0x1c8c + +#define CLR_CMP_MASK_3D 0x1A28 +#define MISC_3D_STATE_CNTL_REG 0x1CA0 +#define MC_SRC1_CNTL 0x19D8 +#define TEX_CNTL 0x1800 + +/* CONSTANTS */ +#define GUI_ACTIVE 0x80000000 +#define ENGINE_IDLE 0x0 + +#define PLL_WR_EN 0x00000080 + +#define CLK_PIN_CNTL 0x01 +#define PPLL_CNTL 0x02 +#define PPLL_REF_DIV 0x03 +#define PPLL_DIV_0 0x04 +#define PPLL_DIV_1 0x05 +#define PPLL_DIV_2 0x06 +#define PPLL_DIV_3 0x07 +#define VCLK_ECP_CNTL 0x08 +#define HTOTAL_CNTL 0x09 +#define X_MPLL_REF_FB_DIV 0x0a +#define XPLL_CNTL 0x0b +#define XDLL_CNTL 0x0c +#define XCLK_CNTL 0x0d +#define MPLL_CNTL 0x0e +#define MCLK_CNTL 0x0f +#define AGP_PLL_CNTL 0x10 +#define FCP_CNTL 0x12 +#define PLL_TEST_CNTL 0x13 +#define P2PLL_CNTL 0x2a +#define P2PLL_REF_DIV 0x2b +#define P2PLL_DIV_0 0x2b +#define POWER_MANAGEMENT 0x2f + +#define PPLL_RESET 0x00000001 +#define PPLL_ATOMIC_UPDATE_EN 0x00010000 +#define PPLL_VGA_ATOMIC_UPDATE_EN 0x00020000 +#define PPLL_REF_DIV_MASK 0x000003FF +#define PPLL_FB3_DIV_MASK 0x000007FF +#define PPLL_POST3_DIV_MASK 0x00070000 +#define PPLL_ATOMIC_UPDATE_R 0x00008000 +#define PPLL_ATOMIC_UPDATE_W 0x00008000 +#define MEM_CFG_TYPE_MASK 0x00000003 +#define XCLK_SRC_SEL_MASK 0x00000007 +#define XPLL_FB_DIV_MASK 0x0000FF00 +#define X_MPLL_REF_DIV_MASK 0x000000FF + +/* GEN_INT_CNTL) */ +#define CRTC_VBLANK_INT 0x00000001 +#define CRTC_VLINE_INT 0x00000002 +#define CRTC_VSYNC_INT 0x00000004 + +/* Config control values (CONFIG_CNTL) */ +#define APER_0_ENDIAN 0x00000003 +#define APER_1_ENDIAN 0x0000000c +#define CFG_VGA_IO_DIS 0x00000400 + +/* CRTC control values (CRTC_GEN_CNTL) */ +#define CRTC_CSYNC_EN 0x00000010 + +#define CRTC2_DBL_SCAN_EN 0x00000001 +#define CRTC2_DISPLAY_DIS 0x00800000 +#define CRTC2_FIFO_EXTSENSE 0x00200000 +#define CRTC2_ICON_EN 0x00100000 +#define CRTC2_CUR_EN 0x00010000 +#define CRTC2_EXT_DISP_EN 0x01000000 +#define CRTC2_EN 0x02000000 +#define CRTC2_DISP_REQ_EN_B 0x04000000 + +#define CRTC_PIX_WIDTH_MASK 0x00000700 +#define CRTC_PIX_WIDTH_4BPP 0x00000100 +#define CRTC_PIX_WIDTH_8BPP 0x00000200 +#define CRTC_PIX_WIDTH_15BPP 0x00000300 +#define CRTC_PIX_WIDTH_16BPP 0x00000400 +#define CRTC_PIX_WIDTH_24BPP 0x00000500 +#define CRTC_PIX_WIDTH_32BPP 0x00000600 + +/* DAC_CNTL bit constants */ +#define DAC_8BIT_EN 0x00000100 +#define DAC_MASK 0xFF000000 +#define DAC_BLANKING 0x00000004 +#define DAC_RANGE_CNTL 0x00000003 +#define DAC_CLK_SEL 0x00000010 +#define DAC_PALETTE_ACCESS_CNTL 0x00000020 +#define DAC_PALETTE2_SNOOP_EN 0x00000040 +#define DAC_PDWN 0x00008000 + +/* CRTC_EXT_CNTL */ +#define CRT_CRTC_DISPLAY_DIS 0x00000400 +#define CRT_CRTC_ON 0x00008000 + +/* GEN_RESET_CNTL bit constants */ +#define SOFT_RESET_GUI 0x00000001 +#define SOFT_RESET_VCLK 0x00000100 +#define SOFT_RESET_PCLK 0x00000200 +#define SOFT_RESET_ECP 0x00000400 +#define SOFT_RESET_DISPENG_XCLK 0x00000800 + +/* PC_GUI_CTLSTAT bit constants */ +#define PC_BUSY_INIT 0x10000000 +#define PC_BUSY_GUI 0x20000000 +#define PC_BUSY_NGUI 0x40000000 +#define PC_BUSY 0x80000000 + +#define BUS_MASTER_DIS 0x00000040 +#define PM4_BUFFER_CNTL_NONPM4 0x00000000 + +/* DP_DATATYPE bit constants */ +#define DST_8BPP 0x00000002 +#define DST_15BPP 0x00000003 +#define DST_16BPP 0x00000004 +#define DST_24BPP 0x00000005 +#define DST_32BPP 0x00000006 + +#define BRUSH_SOLIDCOLOR 0x00000d00 + +/* DP_GUI_MASTER_CNTL bit constants */ +#define GMC_SRC_PITCH_OFFSET_CNTL 0x00000001 +#define GMC_DST_PITCH_OFFSET_CNTL 0x00000002 +#define GMC_SRC_CLIP_DEFAULT 0x00000000 +#define GMC_DST_CLIP_DEFAULT 0x00000000 +#define GMC_BRUSH_SOLIDCOLOR 0x000000d0 +#define GMC_SRC_DSTCOLOR 0x00003000 +#define GMC_BYTE_ORDER_MSB_TO_LSB 0x00000000 +#define GMC_DP_SRC_RECT 0x02000000 +#define GMC_3D_FCN_EN_CLR 0x00000000 +#define GMC_AUX_CLIP_CLEAR 0x20000000 +#define GMC_DST_CLR_CMP_FCN_CLEAR 0x10000000 +#define GMC_WRITE_MASK_SET 0x40000000 +#define GMC_DP_CONVERSION_TEMP_6500 0x00000000 + +/* DP_GUI_MASTER_CNTL ROP3 named constants */ +#define GMC_ROP3_MASK 0x00ff0000 +#define ROP3_BLACKNESS 0x00000000 +#define ROP3_SRCCOPY 0x00cc0000 +#define ROP3_PATCOPY 0x00f00000 +#define ROP3_WHITENESS 0x00ff0000 + +#define SRC_DSTCOLOR 0x00030000 + +/* DP_CNTL bit constants */ +#define DST_X_RIGHT_TO_LEFT 0x00000000 +#define DST_X_LEFT_TO_RIGHT 0x00000001 +#define DST_Y_BOTTOM_TO_TOP 0x00000000 +#define DST_Y_TOP_TO_BOTTOM 0x00000002 +#define DST_X_MAJOR 0x00000000 +#define DST_Y_MAJOR 0x00000004 +#define DST_X_TILE 0x00000008 +#define DST_Y_TILE 0x00000010 +#define DST_LAST_PEL 0x00000020 +#define DST_TRAIL_X_RIGHT_TO_LEFT 0x00000000 +#define DST_TRAIL_X_LEFT_TO_RIGHT 0x00000040 +#define DST_TRAP_FILL_RIGHT_TO_LEFT 0x00000000 +#define DST_TRAP_FILL_LEFT_TO_RIGHT 0x00000080 +#define DST_BRES_SIGN 0x00000100 +#define DST_HOST_BIG_ENDIAN_EN 0x00000200 +#define DST_POLYLINE_NONLAST 0x00008000 +#define DST_RASTER_STALL 0x00010000 +#define DST_POLY_EDGE 0x00040000 + +/* DP_MIX bit constants */ +#define DP_SRC_RECT 0x00000200 +#define DP_SRC_HOST 0x00000300 +#define DP_SRC_HOST_BYTEALIGN 0x00000400 + +/* LVDS_GEN_CNTL constants */ +#define LVDS_BL_MOD_LEVEL_MASK 0x0000ff00 +#define LVDS_BL_MOD_LEVEL_SHIFT 8 +#define LVDS_BL_MOD_EN 0x00010000 +#define LVDS_DIGION 0x00040000 +#define LVDS_BLON 0x00080000 +#define LVDS_ON 0x00000001 +#define LVDS_DISPLAY_DIS 0x00000002 +#define LVDS_PANEL_TYPE_2PIX_PER_CLK 0x00000004 +#define LVDS_PANEL_24BITS_TFT 0x00000008 +#define LVDS_FRAME_MOD_NO 0x00000000 +#define LVDS_FRAME_MOD_2_LEVELS 0x00000010 +#define LVDS_FRAME_MOD_4_LEVELS 0x00000020 +#define LVDS_RST_FM 0x00000040 +#define LVDS_EN 0x00000080 + +/* CRTC2_GEN_CNTL constants */ +#define CRTC2_EN 0x02000000 + +/* POWER_MANAGEMENT constants */ +#define PWR_MGT_ON 0x00000001 +#define PWR_MGT_MODE_MASK 0x00000006 +#define PWR_MGT_MODE_PIN 0x00000000 +#define PWR_MGT_MODE_REGISTER 0x00000002 +#define PWR_MGT_MODE_TIMER 0x00000004 +#define PWR_MGT_MODE_PCI 0x00000006 +#define PWR_MGT_AUTO_PWR_UP_EN 0x00000008 +#define PWR_MGT_ACTIVITY_PIN_ON 0x00000010 +#define PWR_MGT_STANDBY_POL 0x00000020 +#define PWR_MGT_SUSPEND_POL 0x00000040 +#define PWR_MGT_SELF_REFRESH 0x00000080 +#define PWR_MGT_ACTIVITY_PIN_EN 0x00000100 +#define PWR_MGT_KEYBD_SNOOP 0x00000200 +#define PWR_MGT_TRISTATE_MEM_EN 0x00000800 +#define PWR_MGT_SELW4MS 0x00001000 +#define PWR_MGT_SLOWDOWN_MCLK 0x00002000 + +#define PMI_PMSCR_REG 0x60 + +/* used by ATI bug fix for hardware ROM */ +#define RAGE128_MPP_TB_CONFIG 0x01c0 + +#endif /* ATI_REGS_H */ diff --git a/hw/display/bcm2835_fb.c b/hw/display/bcm2835_fb.c new file mode 100644 index 000000000..2be77bdd3 --- /dev/null +++ b/hw/display/bcm2835_fb.c @@ -0,0 +1,470 @@ +/* + * Raspberry Pi emulation (c) 2012 Gregory Estrade + * Refactoring for Pi2 Copyright (c) 2015, Microsoft. Written by Andrew Baumann. + * + * Heavily based on milkymist-vgafb.c, copyright terms below: + * QEMU model of the Milkymist VGA framebuffer. + * + * Copyright (c) 2010-2012 Michael Walle + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/display/bcm2835_fb.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "framebuffer.h" +#include "ui/pixel_ops.h" +#include "hw/misc/bcm2835_mbox_defs.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define DEFAULT_VCRAM_SIZE 0x4000000 +#define BCM2835_FB_OFFSET 0x00100000 + +/* Maximum permitted framebuffer size; experimentally determined on an rpi2 */ +#define XRES_MAX 3840 +#define YRES_MAX 2560 +/* Framebuffer size used if guest requests zero size */ +#define XRES_SMALL 592 +#define YRES_SMALL 488 + +static void fb_invalidate_display(void *opaque) +{ + BCM2835FBState *s = BCM2835_FB(opaque); + + s->invalidate = true; +} + +static void draw_line_src16(void *opaque, uint8_t *dst, const uint8_t *src, + int width, int deststep) +{ + BCM2835FBState *s = opaque; + uint16_t rgb565; + uint32_t rgb888; + uint8_t r, g, b; + DisplaySurface *surface = qemu_console_surface(s->con); + int bpp = surface_bits_per_pixel(surface); + + while (width--) { + switch (s->config.bpp) { + case 8: + /* lookup palette starting at video ram base + * TODO: cache translation, rather than doing this each time! + */ + rgb888 = ldl_le_phys(&s->dma_as, s->vcram_base + (*src << 2)); + r = (rgb888 >> 0) & 0xff; + g = (rgb888 >> 8) & 0xff; + b = (rgb888 >> 16) & 0xff; + src++; + break; + case 16: + rgb565 = lduw_le_p(src); + r = ((rgb565 >> 11) & 0x1f) << 3; + g = ((rgb565 >> 5) & 0x3f) << 2; + b = ((rgb565 >> 0) & 0x1f) << 3; + src += 2; + break; + case 24: + rgb888 = ldl_le_p(src); + r = (rgb888 >> 0) & 0xff; + g = (rgb888 >> 8) & 0xff; + b = (rgb888 >> 16) & 0xff; + src += 3; + break; + case 32: + rgb888 = ldl_le_p(src); + r = (rgb888 >> 0) & 0xff; + g = (rgb888 >> 8) & 0xff; + b = (rgb888 >> 16) & 0xff; + src += 4; + break; + default: + r = 0; + g = 0; + b = 0; + break; + } + + if (s->config.pixo == 0) { + /* swap to BGR pixel format */ + uint8_t tmp = r; + r = b; + b = tmp; + } + + switch (bpp) { + case 8: + *dst++ = rgb_to_pixel8(r, g, b); + break; + case 15: + *(uint16_t *)dst = rgb_to_pixel15(r, g, b); + dst += 2; + break; + case 16: + *(uint16_t *)dst = rgb_to_pixel16(r, g, b); + dst += 2; + break; + case 24: + rgb888 = rgb_to_pixel24(r, g, b); + *dst++ = rgb888 & 0xff; + *dst++ = (rgb888 >> 8) & 0xff; + *dst++ = (rgb888 >> 16) & 0xff; + break; + case 32: + *(uint32_t *)dst = rgb_to_pixel32(r, g, b); + dst += 4; + break; + default: + return; + } + } +} + +static bool fb_use_offsets(BCM2835FBConfig *config) +{ + /* + * Return true if we should use the viewport offsets. + * Experimentally, the hardware seems to do this only if the + * viewport size is larger than the physical screen. (It doesn't + * prevent the guest setting this silly viewport setting, though...) + */ + return config->xres_virtual > config->xres && + config->yres_virtual > config->yres; +} + +static void fb_update_display(void *opaque) +{ + BCM2835FBState *s = opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + int first = 0; + int last = 0; + int src_width = 0; + int dest_width = 0; + uint32_t xoff = 0, yoff = 0; + + if (s->lock || !s->config.xres) { + return; + } + + src_width = bcm2835_fb_get_pitch(&s->config); + if (fb_use_offsets(&s->config)) { + xoff = s->config.xoffset; + yoff = s->config.yoffset; + } + + dest_width = s->config.xres; + + switch (surface_bits_per_pixel(surface)) { + case 0: + return; + case 8: + break; + case 15: + dest_width *= 2; + break; + case 16: + dest_width *= 2; + break; + case 24: + dest_width *= 3; + break; + case 32: + dest_width *= 4; + break; + default: + hw_error("bcm2835_fb: bad color depth\n"); + break; + } + + if (s->invalidate) { + hwaddr base = s->config.base + xoff + (hwaddr)yoff * src_width; + framebuffer_update_memory_section(&s->fbsection, s->dma_mr, + base, + s->config.yres, src_width); + } + + framebuffer_update_display(surface, &s->fbsection, + s->config.xres, s->config.yres, + src_width, dest_width, 0, s->invalidate, + draw_line_src16, s, &first, &last); + + if (first >= 0) { + dpy_gfx_update(s->con, 0, first, s->config.xres, + last - first + 1); + } + + s->invalidate = false; +} + +void bcm2835_fb_validate_config(BCM2835FBConfig *config) +{ + /* + * Validate the config, and clip any bogus values into range, + * as the hardware does. Note that fb_update_display() relies on + * this happening to prevent it from performing out-of-range + * accesses on redraw. + */ + config->xres = MIN(config->xres, XRES_MAX); + config->xres_virtual = MIN(config->xres_virtual, XRES_MAX); + config->yres = MIN(config->yres, YRES_MAX); + config->yres_virtual = MIN(config->yres_virtual, YRES_MAX); + + /* + * These are not minima: a 40x40 framebuffer will be accepted. + * They're only used as defaults if the guest asks for zero size. + */ + if (config->xres == 0) { + config->xres = XRES_SMALL; + } + if (config->yres == 0) { + config->yres = YRES_SMALL; + } + if (config->xres_virtual == 0) { + config->xres_virtual = config->xres; + } + if (config->yres_virtual == 0) { + config->yres_virtual = config->yres; + } + + if (fb_use_offsets(config)) { + /* Clip the offsets so the viewport is within the physical screen */ + config->xoffset = MIN(config->xoffset, + config->xres_virtual - config->xres); + config->yoffset = MIN(config->yoffset, + config->yres_virtual - config->yres); + } +} + +void bcm2835_fb_reconfigure(BCM2835FBState *s, BCM2835FBConfig *newconfig) +{ + s->lock = true; + + s->config = *newconfig; + + s->invalidate = true; + qemu_console_resize(s->con, s->config.xres, s->config.yres); + s->lock = false; +} + +static void bcm2835_fb_mbox_push(BCM2835FBState *s, uint32_t value) +{ + uint32_t pitch; + uint32_t size; + BCM2835FBConfig newconf; + + value &= ~0xf; + + newconf.xres = ldl_le_phys(&s->dma_as, value); + newconf.yres = ldl_le_phys(&s->dma_as, value + 4); + newconf.xres_virtual = ldl_le_phys(&s->dma_as, value + 8); + newconf.yres_virtual = ldl_le_phys(&s->dma_as, value + 12); + newconf.bpp = ldl_le_phys(&s->dma_as, value + 20); + newconf.xoffset = ldl_le_phys(&s->dma_as, value + 24); + newconf.yoffset = ldl_le_phys(&s->dma_as, value + 28); + + newconf.base = s->vcram_base | (value & 0xc0000000); + newconf.base += BCM2835_FB_OFFSET; + + /* Copy fields which we don't want to change from the existing config */ + newconf.pixo = s->config.pixo; + newconf.alpha = s->config.alpha; + + bcm2835_fb_validate_config(&newconf); + + pitch = bcm2835_fb_get_pitch(&newconf); + size = bcm2835_fb_get_size(&newconf); + + stl_le_phys(&s->dma_as, value + 16, pitch); + stl_le_phys(&s->dma_as, value + 32, newconf.base); + stl_le_phys(&s->dma_as, value + 36, size); + + bcm2835_fb_reconfigure(s, &newconf); +} + +static uint64_t bcm2835_fb_read(void *opaque, hwaddr offset, unsigned size) +{ + BCM2835FBState *s = opaque; + uint32_t res = 0; + + switch (offset) { + case MBOX_AS_DATA: + res = MBOX_CHAN_FB; + s->pending = false; + qemu_set_irq(s->mbox_irq, 0); + break; + + case MBOX_AS_PENDING: + res = s->pending; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + return 0; + } + + return res; +} + +static void bcm2835_fb_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + BCM2835FBState *s = opaque; + + switch (offset) { + case MBOX_AS_DATA: + /* bcm2835_mbox should check our pending status before pushing */ + assert(!s->pending); + s->pending = true; + bcm2835_fb_mbox_push(s, value); + qemu_set_irq(s->mbox_irq, 1); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + return; + } +} + +static const MemoryRegionOps bcm2835_fb_ops = { + .read = bcm2835_fb_read, + .write = bcm2835_fb_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static const VMStateDescription vmstate_bcm2835_fb = { + .name = TYPE_BCM2835_FB, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(lock, BCM2835FBState), + VMSTATE_BOOL(invalidate, BCM2835FBState), + VMSTATE_BOOL(pending, BCM2835FBState), + VMSTATE_UINT32(config.xres, BCM2835FBState), + VMSTATE_UINT32(config.yres, BCM2835FBState), + VMSTATE_UINT32(config.xres_virtual, BCM2835FBState), + VMSTATE_UINT32(config.yres_virtual, BCM2835FBState), + VMSTATE_UINT32(config.xoffset, BCM2835FBState), + VMSTATE_UINT32(config.yoffset, BCM2835FBState), + VMSTATE_UINT32(config.bpp, BCM2835FBState), + VMSTATE_UINT32(config.base, BCM2835FBState), + VMSTATE_UNUSED(8), /* Was pitch and size */ + VMSTATE_UINT32(config.pixo, BCM2835FBState), + VMSTATE_UINT32(config.alpha, BCM2835FBState), + VMSTATE_END_OF_LIST() + } +}; + +static const GraphicHwOps vgafb_ops = { + .invalidate = fb_invalidate_display, + .gfx_update = fb_update_display, +}; + +static void bcm2835_fb_init(Object *obj) +{ + BCM2835FBState *s = BCM2835_FB(obj); + + memory_region_init_io(&s->iomem, obj, &bcm2835_fb_ops, s, TYPE_BCM2835_FB, + 0x10); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->mbox_irq); +} + +static void bcm2835_fb_reset(DeviceState *dev) +{ + BCM2835FBState *s = BCM2835_FB(dev); + + s->pending = false; + + s->config = s->initial_config; + + s->invalidate = true; + s->lock = false; +} + +static void bcm2835_fb_realize(DeviceState *dev, Error **errp) +{ + BCM2835FBState *s = BCM2835_FB(dev); + Object *obj; + + if (s->vcram_base == 0) { + error_setg(errp, "%s: required vcram-base property not set", __func__); + return; + } + + obj = object_property_get_link(OBJECT(dev), "dma-mr", &error_abort); + + /* Fill in the parts of initial_config that are not set by QOM properties */ + s->initial_config.xres_virtual = s->initial_config.xres; + s->initial_config.yres_virtual = s->initial_config.yres; + s->initial_config.xoffset = 0; + s->initial_config.yoffset = 0; + s->initial_config.base = s->vcram_base + BCM2835_FB_OFFSET; + + s->dma_mr = MEMORY_REGION(obj); + address_space_init(&s->dma_as, s->dma_mr, TYPE_BCM2835_FB "-memory"); + + bcm2835_fb_reset(dev); + + s->con = graphic_console_init(dev, 0, &vgafb_ops, s); + qemu_console_resize(s->con, s->config.xres, s->config.yres); +} + +static Property bcm2835_fb_props[] = { + DEFINE_PROP_UINT32("vcram-base", BCM2835FBState, vcram_base, 0),/*required*/ + DEFINE_PROP_UINT32("vcram-size", BCM2835FBState, vcram_size, + DEFAULT_VCRAM_SIZE), + DEFINE_PROP_UINT32("xres", BCM2835FBState, initial_config.xres, 640), + DEFINE_PROP_UINT32("yres", BCM2835FBState, initial_config.yres, 480), + DEFINE_PROP_UINT32("bpp", BCM2835FBState, initial_config.bpp, 16), + DEFINE_PROP_UINT32("pixo", BCM2835FBState, + initial_config.pixo, 1), /* 1=RGB, 0=BGR */ + DEFINE_PROP_UINT32("alpha", BCM2835FBState, + initial_config.alpha, 2), /* alpha ignored */ + DEFINE_PROP_END_OF_LIST() +}; + +static void bcm2835_fb_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, bcm2835_fb_props); + dc->realize = bcm2835_fb_realize; + dc->reset = bcm2835_fb_reset; + dc->vmsd = &vmstate_bcm2835_fb; +} + +static TypeInfo bcm2835_fb_info = { + .name = TYPE_BCM2835_FB, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835FBState), + .class_init = bcm2835_fb_class_init, + .instance_init = bcm2835_fb_init, +}; + +static void bcm2835_fb_register_types(void) +{ + type_register_static(&bcm2835_fb_info); +} + +type_init(bcm2835_fb_register_types) diff --git a/hw/display/blizzard.c b/hw/display/blizzard.c new file mode 100644 index 000000000..105241577 --- /dev/null +++ b/hw/display/blizzard.c @@ -0,0 +1,1026 @@ +/* + * Epson S1D13744/S1D13745 (Blizzard/Hailstorm/Tornado) LCD/TV controller. + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "ui/console.h" +#include "hw/display/blizzard.h" +#include "ui/pixel_ops.h" + +typedef void (*blizzard_fn_t)(uint8_t *, const uint8_t *, unsigned int); + +typedef struct { + uint8_t reg; + uint32_t addr; + int swallow; + + int pll; + int pll_range; + int pll_ctrl; + uint8_t pll_mode; + uint8_t clksel; + int memenable; + int memrefresh; + uint8_t timing[3]; + int priority; + + uint8_t lcd_config; + int x; + int y; + int skipx; + int skipy; + uint8_t hndp; + uint8_t vndp; + uint8_t hsync; + uint8_t vsync; + uint8_t pclk; + uint8_t u; + uint8_t v; + uint8_t yrc[2]; + int ix[2]; + int iy[2]; + int ox[2]; + int oy[2]; + + int enable; + int blank; + int bpp; + int invalidate; + int mx[2]; + int my[2]; + uint8_t mode; + uint8_t effect; + uint8_t iformat; + uint8_t source; + QemuConsole *con; + blizzard_fn_t *line_fn_tab[2]; + void *fb; + + uint8_t hssi_config[3]; + uint8_t tv_config; + uint8_t tv_timing[4]; + uint8_t vbi; + uint8_t tv_x; + uint8_t tv_y; + uint8_t tv_test; + uint8_t tv_filter_config; + uint8_t tv_filter_idx; + uint8_t tv_filter_coeff[0x20]; + uint8_t border_r; + uint8_t border_g; + uint8_t border_b; + uint8_t gamma_config; + uint8_t gamma_idx; + uint8_t gamma_lut[0x100]; + uint8_t matrix_ena; + uint8_t matrix_coeff[0x12]; + uint8_t matrix_r; + uint8_t matrix_g; + uint8_t matrix_b; + uint8_t pm; + uint8_t status; + uint8_t rgbgpio_dir; + uint8_t rgbgpio; + uint8_t gpio_dir; + uint8_t gpio; + uint8_t gpio_edge[2]; + uint8_t gpio_irq; + uint8_t gpio_pdown; + + struct { + int x; + int y; + int dx; + int dy; + int len; + int buflen; + void *buf; + void *data; + uint16_t *ptr; + int angle; + int pitch; + blizzard_fn_t line_fn; + } data; +} BlizzardState; + +/* Bytes(!) per pixel */ +static const int blizzard_iformat_bpp[0x10] = { + 0, + 2, /* RGB 5:6:5*/ + 3, /* RGB 6:6:6 mode 1 */ + 3, /* RGB 8:8:8 mode 1 */ + 0, 0, + 4, /* RGB 6:6:6 mode 2 */ + 4, /* RGB 8:8:8 mode 2 */ + 0, /* YUV 4:2:2 */ + 0, /* YUV 4:2:0 */ + 0, 0, 0, 0, 0, 0, +}; + +static void blizzard_window(BlizzardState *s) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + uint8_t *src, *dst; + int bypp[2]; + int bypl[3]; + int y; + blizzard_fn_t fn = s->data.line_fn; + + if (!fn) + return; + if (s->mx[0] > s->data.x) + s->mx[0] = s->data.x; + if (s->my[0] > s->data.y) + s->my[0] = s->data.y; + if (s->mx[1] < s->data.x + s->data.dx) + s->mx[1] = s->data.x + s->data.dx; + if (s->my[1] < s->data.y + s->data.dy) + s->my[1] = s->data.y + s->data.dy; + + bypp[0] = s->bpp; + bypp[1] = surface_bytes_per_pixel(surface); + bypl[0] = bypp[0] * s->data.pitch; + bypl[1] = bypp[1] * s->x; + bypl[2] = bypp[0] * s->data.dx; + + src = s->data.data; + dst = s->fb + bypl[1] * s->data.y + bypp[1] * s->data.x; + for (y = s->data.dy; y > 0; y --, src += bypl[0], dst += bypl[1]) + fn(dst, src, bypl[2]); +} + +static int blizzard_transfer_setup(BlizzardState *s) +{ + if (s->source > 3 || !s->bpp || + s->ix[1] < s->ix[0] || s->iy[1] < s->iy[0]) + return 0; + + s->data.angle = s->effect & 3; + s->data.line_fn = s->line_fn_tab[!!s->data.angle][s->iformat]; + s->data.x = s->ix[0]; + s->data.y = s->iy[0]; + s->data.dx = s->ix[1] - s->ix[0] + 1; + s->data.dy = s->iy[1] - s->iy[0] + 1; + s->data.len = s->bpp * s->data.dx * s->data.dy; + s->data.pitch = s->data.dx; + if (s->data.len > s->data.buflen) { + s->data.buf = g_realloc(s->data.buf, s->data.len); + s->data.buflen = s->data.len; + } + s->data.ptr = s->data.buf; + s->data.data = s->data.buf; + s->data.len /= 2; + return 1; +} + +static void blizzard_reset(BlizzardState *s) +{ + s->reg = 0; + s->swallow = 0; + + s->pll = 9; + s->pll_range = 1; + s->pll_ctrl = 0x14; + s->pll_mode = 0x32; + s->clksel = 0x00; + s->memenable = 0; + s->memrefresh = 0x25c; + s->timing[0] = 0x3f; + s->timing[1] = 0x13; + s->timing[2] = 0x21; + s->priority = 0; + + s->lcd_config = 0x74; + s->x = 8; + s->y = 1; + s->skipx = 0; + s->skipy = 0; + s->hndp = 3; + s->vndp = 2; + s->hsync = 1; + s->vsync = 1; + s->pclk = 0x80; + + s->ix[0] = 0; + s->ix[1] = 0; + s->iy[0] = 0; + s->iy[1] = 0; + s->ox[0] = 0; + s->ox[1] = 0; + s->oy[0] = 0; + s->oy[1] = 0; + + s->yrc[0] = 0x00; + s->yrc[1] = 0x30; + s->u = 0; + s->v = 0; + + s->iformat = 3; + s->source = 0; + s->bpp = blizzard_iformat_bpp[s->iformat]; + + s->hssi_config[0] = 0x00; + s->hssi_config[1] = 0x00; + s->hssi_config[2] = 0x01; + s->tv_config = 0x00; + s->tv_timing[0] = 0x00; + s->tv_timing[1] = 0x00; + s->tv_timing[2] = 0x00; + s->tv_timing[3] = 0x00; + s->vbi = 0x10; + s->tv_x = 0x14; + s->tv_y = 0x03; + s->tv_test = 0x00; + s->tv_filter_config = 0x80; + s->tv_filter_idx = 0x00; + s->border_r = 0x10; + s->border_g = 0x80; + s->border_b = 0x80; + s->gamma_config = 0x00; + s->gamma_idx = 0x00; + s->matrix_ena = 0x00; + memset(&s->matrix_coeff, 0, sizeof(s->matrix_coeff)); + s->matrix_r = 0x00; + s->matrix_g = 0x00; + s->matrix_b = 0x00; + s->pm = 0x02; + s->status = 0x00; + s->rgbgpio_dir = 0x00; + s->gpio_dir = 0x00; + s->gpio_edge[0] = 0x00; + s->gpio_edge[1] = 0x00; + s->gpio_irq = 0x00; + s->gpio_pdown = 0xff; +} + +static inline void blizzard_invalidate_display(void *opaque) { + BlizzardState *s = (BlizzardState *) opaque; + + s->invalidate = 1; +} + +static uint16_t blizzard_reg_read(void *opaque, uint8_t reg) +{ + BlizzardState *s = (BlizzardState *) opaque; + + switch (reg) { + case 0x00: /* Revision Code */ + return 0xa5; + + case 0x02: /* Configuration Readback */ + return 0x83; /* Macrovision OK, CNF[2:0] = 3 */ + + case 0x04: /* PLL M-Divider */ + return (s->pll - 1) | (1 << 7); + case 0x06: /* PLL Lock Range Control */ + return s->pll_range; + case 0x08: /* PLL Lock Synthesis Control 0 */ + return s->pll_ctrl & 0xff; + case 0x0a: /* PLL Lock Synthesis Control 1 */ + return s->pll_ctrl >> 8; + case 0x0c: /* PLL Mode Control 0 */ + return s->pll_mode; + + case 0x0e: /* Clock-Source Select */ + return s->clksel; + + case 0x10: /* Memory Controller Activate */ + case 0x14: /* Memory Controller Bank 0 Status Flag */ + return s->memenable; + + case 0x18: /* Auto-Refresh Interval Setting 0 */ + return s->memrefresh & 0xff; + case 0x1a: /* Auto-Refresh Interval Setting 1 */ + return s->memrefresh >> 8; + + case 0x1c: /* Power-On Sequence Timing Control */ + return s->timing[0]; + case 0x1e: /* Timing Control 0 */ + return s->timing[1]; + case 0x20: /* Timing Control 1 */ + return s->timing[2]; + + case 0x24: /* Arbitration Priority Control */ + return s->priority; + + case 0x28: /* LCD Panel Configuration */ + return s->lcd_config; + + case 0x2a: /* LCD Horizontal Display Width */ + return s->x >> 3; + case 0x2c: /* LCD Horizontal Non-display Period */ + return s->hndp; + case 0x2e: /* LCD Vertical Display Height 0 */ + return s->y & 0xff; + case 0x30: /* LCD Vertical Display Height 1 */ + return s->y >> 8; + case 0x32: /* LCD Vertical Non-display Period */ + return s->vndp; + case 0x34: /* LCD HS Pulse-width */ + return s->hsync; + case 0x36: /* LCd HS Pulse Start Position */ + return s->skipx >> 3; + case 0x38: /* LCD VS Pulse-width */ + return s->vsync; + case 0x3a: /* LCD VS Pulse Start Position */ + return s->skipy; + + case 0x3c: /* PCLK Polarity */ + return s->pclk; + + case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */ + return s->hssi_config[0]; + case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */ + return s->hssi_config[1]; + case 0x42: /* High-speed Serial Interface Tx Mode */ + return s->hssi_config[2]; + case 0x44: /* TV Display Configuration */ + return s->tv_config; + case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits */ + return s->tv_timing[(reg - 0x46) >> 1]; + case 0x4e: /* VBI: Closed Caption / XDS Control / Status */ + return s->vbi; + case 0x50: /* TV Horizontal Start Position */ + return s->tv_x; + case 0x52: /* TV Vertical Start Position */ + return s->tv_y; + case 0x54: /* TV Test Pattern Setting */ + return s->tv_test; + case 0x56: /* TV Filter Setting */ + return s->tv_filter_config; + case 0x58: /* TV Filter Coefficient Index */ + return s->tv_filter_idx; + case 0x5a: /* TV Filter Coefficient Data */ + if (s->tv_filter_idx < 0x20) + return s->tv_filter_coeff[s->tv_filter_idx ++]; + return 0; + + case 0x60: /* Input YUV/RGB Translate Mode 0 */ + return s->yrc[0]; + case 0x62: /* Input YUV/RGB Translate Mode 1 */ + return s->yrc[1]; + case 0x64: /* U Data Fix */ + return s->u; + case 0x66: /* V Data Fix */ + return s->v; + + case 0x68: /* Display Mode */ + return s->mode; + + case 0x6a: /* Special Effects */ + return s->effect; + + case 0x6c: /* Input Window X Start Position 0 */ + return s->ix[0] & 0xff; + case 0x6e: /* Input Window X Start Position 1 */ + return s->ix[0] >> 3; + case 0x70: /* Input Window Y Start Position 0 */ + return s->ix[0] & 0xff; + case 0x72: /* Input Window Y Start Position 1 */ + return s->ix[0] >> 3; + case 0x74: /* Input Window X End Position 0 */ + return s->ix[1] & 0xff; + case 0x76: /* Input Window X End Position 1 */ + return s->ix[1] >> 3; + case 0x78: /* Input Window Y End Position 0 */ + return s->ix[1] & 0xff; + case 0x7a: /* Input Window Y End Position 1 */ + return s->ix[1] >> 3; + case 0x7c: /* Output Window X Start Position 0 */ + return s->ox[0] & 0xff; + case 0x7e: /* Output Window X Start Position 1 */ + return s->ox[0] >> 3; + case 0x80: /* Output Window Y Start Position 0 */ + return s->oy[0] & 0xff; + case 0x82: /* Output Window Y Start Position 1 */ + return s->oy[0] >> 3; + case 0x84: /* Output Window X End Position 0 */ + return s->ox[1] & 0xff; + case 0x86: /* Output Window X End Position 1 */ + return s->ox[1] >> 3; + case 0x88: /* Output Window Y End Position 0 */ + return s->oy[1] & 0xff; + case 0x8a: /* Output Window Y End Position 1 */ + return s->oy[1] >> 3; + + case 0x8c: /* Input Data Format */ + return s->iformat; + case 0x8e: /* Data Source Select */ + return s->source; + case 0x90: /* Display Memory Data Port */ + return 0; + + case 0xa8: /* Border Color 0 */ + return s->border_r; + case 0xaa: /* Border Color 1 */ + return s->border_g; + case 0xac: /* Border Color 2 */ + return s->border_b; + + case 0xb4: /* Gamma Correction Enable */ + return s->gamma_config; + case 0xb6: /* Gamma Correction Table Index */ + return s->gamma_idx; + case 0xb8: /* Gamma Correction Table Data */ + return s->gamma_lut[s->gamma_idx ++]; + + case 0xba: /* 3x3 Matrix Enable */ + return s->matrix_ena; + case 0xbc ... 0xde: /* Coefficient Registers */ + return s->matrix_coeff[(reg - 0xbc) >> 1]; + case 0xe0: /* 3x3 Matrix Red Offset */ + return s->matrix_r; + case 0xe2: /* 3x3 Matrix Green Offset */ + return s->matrix_g; + case 0xe4: /* 3x3 Matrix Blue Offset */ + return s->matrix_b; + + case 0xe6: /* Power-save */ + return s->pm; + case 0xe8: /* Non-display Period Control / Status */ + return s->status | (1 << 5); + case 0xea: /* RGB Interface Control */ + return s->rgbgpio_dir; + case 0xec: /* RGB Interface Status */ + return s->rgbgpio; + case 0xee: /* General-purpose IO Pins Configuration */ + return s->gpio_dir; + case 0xf0: /* General-purpose IO Pins Status / Control */ + return s->gpio; + case 0xf2: /* GPIO Positive Edge Interrupt Trigger */ + return s->gpio_edge[0]; + case 0xf4: /* GPIO Negative Edge Interrupt Trigger */ + return s->gpio_edge[1]; + case 0xf6: /* GPIO Interrupt Status */ + return s->gpio_irq; + case 0xf8: /* GPIO Pull-down Control */ + return s->gpio_pdown; + + default: + fprintf(stderr, "%s: unknown register %02x\n", __func__, reg); + return 0; + } +} + +static void blizzard_reg_write(void *opaque, uint8_t reg, uint16_t value) +{ + BlizzardState *s = (BlizzardState *) opaque; + + switch (reg) { + case 0x04: /* PLL M-Divider */ + s->pll = (value & 0x3f) + 1; + break; + case 0x06: /* PLL Lock Range Control */ + s->pll_range = value & 3; + break; + case 0x08: /* PLL Lock Synthesis Control 0 */ + s->pll_ctrl &= 0xf00; + s->pll_ctrl |= (value << 0) & 0x0ff; + break; + case 0x0a: /* PLL Lock Synthesis Control 1 */ + s->pll_ctrl &= 0x0ff; + s->pll_ctrl |= (value << 8) & 0xf00; + break; + case 0x0c: /* PLL Mode Control 0 */ + s->pll_mode = value & 0x77; + if ((value & 3) == 0 || (value & 3) == 3) + fprintf(stderr, "%s: wrong PLL Control bits (%i)\n", + __func__, value & 3); + break; + + case 0x0e: /* Clock-Source Select */ + s->clksel = value & 0xff; + break; + + case 0x10: /* Memory Controller Activate */ + s->memenable = value & 1; + break; + case 0x14: /* Memory Controller Bank 0 Status Flag */ + break; + + case 0x18: /* Auto-Refresh Interval Setting 0 */ + s->memrefresh &= 0xf00; + s->memrefresh |= (value << 0) & 0x0ff; + break; + case 0x1a: /* Auto-Refresh Interval Setting 1 */ + s->memrefresh &= 0x0ff; + s->memrefresh |= (value << 8) & 0xf00; + break; + + case 0x1c: /* Power-On Sequence Timing Control */ + s->timing[0] = value & 0x7f; + break; + case 0x1e: /* Timing Control 0 */ + s->timing[1] = value & 0x17; + break; + case 0x20: /* Timing Control 1 */ + s->timing[2] = value & 0x35; + break; + + case 0x24: /* Arbitration Priority Control */ + s->priority = value & 1; + break; + + case 0x28: /* LCD Panel Configuration */ + s->lcd_config = value & 0xff; + if (value & (1 << 7)) + fprintf(stderr, "%s: data swap not supported!\n", __func__); + break; + + case 0x2a: /* LCD Horizontal Display Width */ + s->x = value << 3; + break; + case 0x2c: /* LCD Horizontal Non-display Period */ + s->hndp = value & 0xff; + break; + case 0x2e: /* LCD Vertical Display Height 0 */ + s->y &= 0x300; + s->y |= (value << 0) & 0x0ff; + break; + case 0x30: /* LCD Vertical Display Height 1 */ + s->y &= 0x0ff; + s->y |= (value << 8) & 0x300; + break; + case 0x32: /* LCD Vertical Non-display Period */ + s->vndp = value & 0xff; + break; + case 0x34: /* LCD HS Pulse-width */ + s->hsync = value & 0xff; + break; + case 0x36: /* LCD HS Pulse Start Position */ + s->skipx = value & 0xff; + break; + case 0x38: /* LCD VS Pulse-width */ + s->vsync = value & 0xbf; + break; + case 0x3a: /* LCD VS Pulse Start Position */ + s->skipy = value & 0xff; + break; + + case 0x3c: /* PCLK Polarity */ + s->pclk = value & 0x82; + /* Affects calculation of s->hndp, s->hsync and s->skipx. */ + break; + + case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */ + s->hssi_config[0] = value; + break; + case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */ + s->hssi_config[1] = value; + if (((value >> 4) & 3) == 3) + fprintf(stderr, "%s: Illegal active-data-links value\n", + __func__); + break; + case 0x42: /* High-speed Serial Interface Tx Mode */ + s->hssi_config[2] = value & 0xbd; + break; + + case 0x44: /* TV Display Configuration */ + s->tv_config = value & 0xfe; + break; + case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits 0 */ + s->tv_timing[(reg - 0x46) >> 1] = value; + break; + case 0x4e: /* VBI: Closed Caption / XDS Control / Status */ + s->vbi = value; + break; + case 0x50: /* TV Horizontal Start Position */ + s->tv_x = value; + break; + case 0x52: /* TV Vertical Start Position */ + s->tv_y = value & 0x7f; + break; + case 0x54: /* TV Test Pattern Setting */ + s->tv_test = value; + break; + case 0x56: /* TV Filter Setting */ + s->tv_filter_config = value & 0xbf; + break; + case 0x58: /* TV Filter Coefficient Index */ + s->tv_filter_idx = value & 0x1f; + break; + case 0x5a: /* TV Filter Coefficient Data */ + if (s->tv_filter_idx < 0x20) + s->tv_filter_coeff[s->tv_filter_idx ++] = value; + break; + + case 0x60: /* Input YUV/RGB Translate Mode 0 */ + s->yrc[0] = value & 0xb0; + break; + case 0x62: /* Input YUV/RGB Translate Mode 1 */ + s->yrc[1] = value & 0x30; + break; + case 0x64: /* U Data Fix */ + s->u = value & 0xff; + break; + case 0x66: /* V Data Fix */ + s->v = value & 0xff; + break; + + case 0x68: /* Display Mode */ + if ((s->mode ^ value) & 3) + s->invalidate = 1; + s->mode = value & 0xb7; + s->enable = value & 1; + s->blank = (value >> 1) & 1; + if (value & (1 << 4)) + fprintf(stderr, "%s: Macrovision enable attempt!\n", __func__); + break; + + case 0x6a: /* Special Effects */ + s->effect = value & 0xfb; + break; + + case 0x6c: /* Input Window X Start Position 0 */ + s->ix[0] &= 0x300; + s->ix[0] |= (value << 0) & 0x0ff; + break; + case 0x6e: /* Input Window X Start Position 1 */ + s->ix[0] &= 0x0ff; + s->ix[0] |= (value << 8) & 0x300; + break; + case 0x70: /* Input Window Y Start Position 0 */ + s->iy[0] &= 0x300; + s->iy[0] |= (value << 0) & 0x0ff; + break; + case 0x72: /* Input Window Y Start Position 1 */ + s->iy[0] &= 0x0ff; + s->iy[0] |= (value << 8) & 0x300; + break; + case 0x74: /* Input Window X End Position 0 */ + s->ix[1] &= 0x300; + s->ix[1] |= (value << 0) & 0x0ff; + break; + case 0x76: /* Input Window X End Position 1 */ + s->ix[1] &= 0x0ff; + s->ix[1] |= (value << 8) & 0x300; + break; + case 0x78: /* Input Window Y End Position 0 */ + s->iy[1] &= 0x300; + s->iy[1] |= (value << 0) & 0x0ff; + break; + case 0x7a: /* Input Window Y End Position 1 */ + s->iy[1] &= 0x0ff; + s->iy[1] |= (value << 8) & 0x300; + break; + case 0x7c: /* Output Window X Start Position 0 */ + s->ox[0] &= 0x300; + s->ox[0] |= (value << 0) & 0x0ff; + break; + case 0x7e: /* Output Window X Start Position 1 */ + s->ox[0] &= 0x0ff; + s->ox[0] |= (value << 8) & 0x300; + break; + case 0x80: /* Output Window Y Start Position 0 */ + s->oy[0] &= 0x300; + s->oy[0] |= (value << 0) & 0x0ff; + break; + case 0x82: /* Output Window Y Start Position 1 */ + s->oy[0] &= 0x0ff; + s->oy[0] |= (value << 8) & 0x300; + break; + case 0x84: /* Output Window X End Position 0 */ + s->ox[1] &= 0x300; + s->ox[1] |= (value << 0) & 0x0ff; + break; + case 0x86: /* Output Window X End Position 1 */ + s->ox[1] &= 0x0ff; + s->ox[1] |= (value << 8) & 0x300; + break; + case 0x88: /* Output Window Y End Position 0 */ + s->oy[1] &= 0x300; + s->oy[1] |= (value << 0) & 0x0ff; + break; + case 0x8a: /* Output Window Y End Position 1 */ + s->oy[1] &= 0x0ff; + s->oy[1] |= (value << 8) & 0x300; + break; + + case 0x8c: /* Input Data Format */ + s->iformat = value & 0xf; + s->bpp = blizzard_iformat_bpp[s->iformat]; + if (!s->bpp) + fprintf(stderr, "%s: Illegal or unsupported input format %x\n", + __func__, s->iformat); + break; + case 0x8e: /* Data Source Select */ + s->source = value & 7; + /* Currently all windows will be "destructive overlays". */ + if ((!(s->effect & (1 << 3)) && (s->ix[0] != s->ox[0] || + s->iy[0] != s->oy[0] || + s->ix[1] != s->ox[1] || + s->iy[1] != s->oy[1])) || + !((s->ix[1] - s->ix[0]) & (s->iy[1] - s->iy[0]) & + (s->ox[1] - s->ox[0]) & (s->oy[1] - s->oy[0]) & 1)) + fprintf(stderr, "%s: Illegal input/output window positions\n", + __func__); + + blizzard_transfer_setup(s); + break; + + case 0x90: /* Display Memory Data Port */ + if (!s->data.len && !blizzard_transfer_setup(s)) + break; + + *s->data.ptr ++ = value; + if (-- s->data.len == 0) + blizzard_window(s); + break; + + case 0xa8: /* Border Color 0 */ + s->border_r = value; + break; + case 0xaa: /* Border Color 1 */ + s->border_g = value; + break; + case 0xac: /* Border Color 2 */ + s->border_b = value; + break; + + case 0xb4: /* Gamma Correction Enable */ + s->gamma_config = value & 0x87; + break; + case 0xb6: /* Gamma Correction Table Index */ + s->gamma_idx = value; + break; + case 0xb8: /* Gamma Correction Table Data */ + s->gamma_lut[s->gamma_idx ++] = value; + break; + + case 0xba: /* 3x3 Matrix Enable */ + s->matrix_ena = value & 1; + break; + case 0xbc ... 0xde: /* Coefficient Registers */ + s->matrix_coeff[(reg - 0xbc) >> 1] = value & ((reg & 2) ? 0x80 : 0xff); + break; + case 0xe0: /* 3x3 Matrix Red Offset */ + s->matrix_r = value; + break; + case 0xe2: /* 3x3 Matrix Green Offset */ + s->matrix_g = value; + break; + case 0xe4: /* 3x3 Matrix Blue Offset */ + s->matrix_b = value; + break; + + case 0xe6: /* Power-save */ + s->pm = value & 0x83; + if (value & s->mode & 1) + fprintf(stderr, "%s: The display must be disabled before entering " + "Standby Mode\n", __func__); + break; + case 0xe8: /* Non-display Period Control / Status */ + s->status = value & 0x1b; + break; + case 0xea: /* RGB Interface Control */ + s->rgbgpio_dir = value & 0x8f; + break; + case 0xec: /* RGB Interface Status */ + s->rgbgpio = value & 0xcf; + break; + case 0xee: /* General-purpose IO Pins Configuration */ + s->gpio_dir = value; + break; + case 0xf0: /* General-purpose IO Pins Status / Control */ + s->gpio = value; + break; + case 0xf2: /* GPIO Positive Edge Interrupt Trigger */ + s->gpio_edge[0] = value; + break; + case 0xf4: /* GPIO Negative Edge Interrupt Trigger */ + s->gpio_edge[1] = value; + break; + case 0xf6: /* GPIO Interrupt Status */ + s->gpio_irq &= value; + break; + case 0xf8: /* GPIO Pull-down Control */ + s->gpio_pdown = value; + break; + + default: + fprintf(stderr, "%s: unknown register %02x\n", __func__, reg); + break; + } +} + +uint16_t s1d13745_read(void *opaque, int dc) +{ + BlizzardState *s = (BlizzardState *) opaque; + uint16_t value = blizzard_reg_read(s, s->reg); + + if (s->swallow -- > 0) + return 0; + if (dc) + s->reg ++; + + return value; +} + +void s1d13745_write(void *opaque, int dc, uint16_t value) +{ + BlizzardState *s = (BlizzardState *) opaque; + + if (s->swallow -- > 0) + return; + if (dc) { + blizzard_reg_write(s, s->reg, value); + + if (s->reg != 0x90 && s->reg != 0x5a && s->reg != 0xb8) + s->reg += 2; + } else + s->reg = value & 0xff; +} + +void s1d13745_write_block(void *opaque, int dc, + void *buf, size_t len, int pitch) +{ + BlizzardState *s = (BlizzardState *) opaque; + + while (len > 0) { + if (s->reg == 0x90 && dc && + (s->data.len || blizzard_transfer_setup(s)) && + len >= (s->data.len << 1)) { + len -= s->data.len << 1; + s->data.len = 0; + s->data.data = buf; + if (pitch) + s->data.pitch = pitch; + blizzard_window(s); + s->data.data = s->data.buf; + continue; + } + + s1d13745_write(opaque, dc, *(uint16_t *) buf); + len -= 2; + buf += 2; + } +} + +static void blizzard_update_display(void *opaque) +{ + BlizzardState *s = (BlizzardState *) opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + int y, bypp, bypl, bwidth; + uint8_t *src, *dst; + + if (!s->enable) + return; + + if (s->x != surface_width(surface) || s->y != surface_height(surface)) { + s->invalidate = 1; + qemu_console_resize(s->con, s->x, s->y); + surface = qemu_console_surface(s->con); + } + + if (s->invalidate) { + s->invalidate = 0; + + if (s->blank) { + bypp = surface_bytes_per_pixel(surface); + memset(surface_data(surface), 0, bypp * s->x * s->y); + return; + } + + s->mx[0] = 0; + s->mx[1] = s->x; + s->my[0] = 0; + s->my[1] = s->y; + } + + if (s->mx[1] <= s->mx[0]) + return; + + bypp = surface_bytes_per_pixel(surface); + bypl = bypp * s->x; + bwidth = bypp * (s->mx[1] - s->mx[0]); + y = s->my[0]; + src = s->fb + bypl * y + bypp * s->mx[0]; + dst = surface_data(surface) + bypl * y + bypp * s->mx[0]; + for (; y < s->my[1]; y ++, src += bypl, dst += bypl) + memcpy(dst, src, bwidth); + + dpy_gfx_update(s->con, s->mx[0], s->my[0], + s->mx[1] - s->mx[0], y - s->my[0]); + + s->mx[0] = s->x; + s->mx[1] = 0; + s->my[0] = s->y; + s->my[1] = 0; +} + +static void blizzard_draw_line16_32(uint32_t *dest, + const uint16_t *src, unsigned int width) +{ + uint16_t data; + unsigned int r, g, b; + const uint16_t *end = (const void *) src + width; + while (src < end) { + data = *src ++; + b = extract16(data, 0, 5) << 3; + g = extract16(data, 5, 6) << 2; + r = extract16(data, 11, 5) << 3; + *dest++ = rgb_to_pixel32(r, g, b); + } +} + +static void blizzard_draw_line24mode1_32(uint32_t *dest, + const uint8_t *src, unsigned int width) +{ + /* TODO: check if SDL 24-bit planes are not in the same format and + * if so, use memcpy */ + unsigned int r[2], g[2], b[2]; + const uint8_t *end = src + width; + while (src < end) { + g[0] = *src ++; + r[0] = *src ++; + r[1] = *src ++; + b[0] = *src ++; + *dest++ = rgb_to_pixel32(r[0], g[0], b[0]); + b[1] = *src ++; + g[1] = *src ++; + *dest++ = rgb_to_pixel32(r[1], g[1], b[1]); + } +} + +static void blizzard_draw_line24mode2_32(uint32_t *dest, + const uint8_t *src, unsigned int width) +{ + unsigned int r, g, b; + const uint8_t *end = src + width; + while (src < end) { + r = *src ++; + src ++; + b = *src ++; + g = *src ++; + *dest++ = rgb_to_pixel32(r, g, b); + } +} + +/* No rotation */ +static blizzard_fn_t blizzard_draw_fn_32[0x10] = { + NULL, + /* RGB 5:6:5*/ + (blizzard_fn_t) blizzard_draw_line16_32, + /* RGB 6:6:6 mode 1 */ + (blizzard_fn_t) blizzard_draw_line24mode1_32, + /* RGB 8:8:8 mode 1 */ + (blizzard_fn_t) blizzard_draw_line24mode1_32, + NULL, NULL, + /* RGB 6:6:6 mode 2 */ + (blizzard_fn_t) blizzard_draw_line24mode2_32, + /* RGB 8:8:8 mode 2 */ + (blizzard_fn_t) blizzard_draw_line24mode2_32, + /* YUV 4:2:2 */ + NULL, + /* YUV 4:2:0 */ + NULL, + NULL, NULL, NULL, NULL, NULL, NULL, +}; + +/* 90deg, 180deg and 270deg rotation */ +static blizzard_fn_t blizzard_draw_fn_r_32[0x10] = { + /* TODO */ + [0 ... 0xf] = NULL, +}; + +static const GraphicHwOps blizzard_ops = { + .invalidate = blizzard_invalidate_display, + .gfx_update = blizzard_update_display, +}; + +void *s1d13745_init(qemu_irq gpio_int) +{ + BlizzardState *s = (BlizzardState *) g_malloc0(sizeof(*s)); + DisplaySurface *surface; + + s->fb = g_malloc(0x180000); + + s->con = graphic_console_init(NULL, 0, &blizzard_ops, s); + surface = qemu_console_surface(s->con); + + assert(surface_bits_per_pixel(surface) == 32); + + s->line_fn_tab[0] = blizzard_draw_fn_32; + s->line_fn_tab[1] = blizzard_draw_fn_r_32; + + blizzard_reset(s); + + return s; +} diff --git a/hw/display/bochs-display.c b/hw/display/bochs-display.c new file mode 100644 index 000000000..8ed734b19 --- /dev/null +++ b/hw/display/bochs-display.c @@ -0,0 +1,390 @@ +/* + * QEMU PCI bochs display adapter. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/display/bochs-vbe.h" +#include "hw/display/edid.h" + +#include "qapi/error.h" + +#include "ui/console.h" +#include "ui/qemu-pixman.h" +#include "qom/object.h" + +typedef struct BochsDisplayMode { + pixman_format_code_t format; + uint32_t bytepp; + uint32_t width; + uint32_t height; + uint32_t stride; + uint64_t offset; + uint64_t size; +} BochsDisplayMode; + +struct BochsDisplayState { + /* parent */ + PCIDevice pci; + + /* device elements */ + QemuConsole *con; + MemoryRegion vram; + MemoryRegion mmio; + MemoryRegion vbe; + MemoryRegion qext; + MemoryRegion edid; + + /* device config */ + uint64_t vgamem; + bool enable_edid; + qemu_edid_info edid_info; + uint8_t edid_blob[256]; + + /* device registers */ + uint16_t vbe_regs[VBE_DISPI_INDEX_NB]; + bool big_endian_fb; + + /* device state */ + BochsDisplayMode mode; +}; + +#define TYPE_BOCHS_DISPLAY "bochs-display" +OBJECT_DECLARE_SIMPLE_TYPE(BochsDisplayState, BOCHS_DISPLAY) + +static const VMStateDescription vmstate_bochs_display = { + .name = "bochs-display", + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(pci, BochsDisplayState), + VMSTATE_UINT16_ARRAY(vbe_regs, BochsDisplayState, VBE_DISPI_INDEX_NB), + VMSTATE_BOOL(big_endian_fb, BochsDisplayState), + VMSTATE_END_OF_LIST() + } +}; + +static uint64_t bochs_display_vbe_read(void *ptr, hwaddr addr, + unsigned size) +{ + BochsDisplayState *s = ptr; + unsigned int index = addr >> 1; + + switch (index) { + case VBE_DISPI_INDEX_ID: + return VBE_DISPI_ID5; + case VBE_DISPI_INDEX_VIDEO_MEMORY_64K: + return s->vgamem / (64 * KiB); + } + + if (index >= ARRAY_SIZE(s->vbe_regs)) { + return -1; + } + return s->vbe_regs[index]; +} + +static void bochs_display_vbe_write(void *ptr, hwaddr addr, + uint64_t val, unsigned size) +{ + BochsDisplayState *s = ptr; + unsigned int index = addr >> 1; + + if (index >= ARRAY_SIZE(s->vbe_regs)) { + return; + } + s->vbe_regs[index] = val; +} + +static const MemoryRegionOps bochs_display_vbe_ops = { + .read = bochs_display_vbe_read, + .write = bochs_display_vbe_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 2, + .impl.max_access_size = 2, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t bochs_display_qext_read(void *ptr, hwaddr addr, + unsigned size) +{ + BochsDisplayState *s = ptr; + + switch (addr) { + case PCI_VGA_QEXT_REG_SIZE: + return PCI_VGA_QEXT_SIZE; + case PCI_VGA_QEXT_REG_BYTEORDER: + return s->big_endian_fb ? + PCI_VGA_QEXT_BIG_ENDIAN : PCI_VGA_QEXT_LITTLE_ENDIAN; + default: + return 0; + } +} + +static void bochs_display_qext_write(void *ptr, hwaddr addr, + uint64_t val, unsigned size) +{ + BochsDisplayState *s = ptr; + + switch (addr) { + case PCI_VGA_QEXT_REG_BYTEORDER: + if (val == PCI_VGA_QEXT_BIG_ENDIAN) { + s->big_endian_fb = true; + } + if (val == PCI_VGA_QEXT_LITTLE_ENDIAN) { + s->big_endian_fb = false; + } + break; + } +} + +static const MemoryRegionOps bochs_display_qext_ops = { + .read = bochs_display_qext_read, + .write = bochs_display_qext_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static int bochs_display_get_mode(BochsDisplayState *s, + BochsDisplayMode *mode) +{ + uint16_t *vbe = s->vbe_regs; + uint32_t virt_width; + + if (!(vbe[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED)) { + return -1; + } + + memset(mode, 0, sizeof(*mode)); + switch (vbe[VBE_DISPI_INDEX_BPP]) { + case 16: + /* best effort: support native endianess only */ + mode->format = PIXMAN_r5g6b5; + mode->bytepp = 2; + break; + case 32: + mode->format = s->big_endian_fb + ? PIXMAN_BE_x8r8g8b8 + : PIXMAN_LE_x8r8g8b8; + mode->bytepp = 4; + break; + default: + return -1; + } + + mode->width = vbe[VBE_DISPI_INDEX_XRES]; + mode->height = vbe[VBE_DISPI_INDEX_YRES]; + virt_width = vbe[VBE_DISPI_INDEX_VIRT_WIDTH]; + if (virt_width < mode->width) { + virt_width = mode->width; + } + mode->stride = virt_width * mode->bytepp; + mode->size = (uint64_t)mode->stride * mode->height; + mode->offset = ((uint64_t)vbe[VBE_DISPI_INDEX_X_OFFSET] * mode->bytepp + + (uint64_t)vbe[VBE_DISPI_INDEX_Y_OFFSET] * mode->stride); + + if (mode->width < 64 || mode->height < 64) { + return -1; + } + if (mode->offset + mode->size > s->vgamem) { + return -1; + } + return 0; +} + +static void bochs_display_update(void *opaque) +{ + BochsDisplayState *s = opaque; + DirtyBitmapSnapshot *snap = NULL; + bool full_update = false; + BochsDisplayMode mode; + DisplaySurface *ds; + uint8_t *ptr; + bool dirty; + int y, ys, ret; + + ret = bochs_display_get_mode(s, &mode); + if (ret < 0) { + /* no (valid) video mode */ + return; + } + + if (memcmp(&s->mode, &mode, sizeof(mode)) != 0) { + /* video mode switch */ + s->mode = mode; + ptr = memory_region_get_ram_ptr(&s->vram); + ds = qemu_create_displaysurface_from(mode.width, + mode.height, + mode.format, + mode.stride, + ptr + mode.offset); + dpy_gfx_replace_surface(s->con, ds); + full_update = true; + } + + if (full_update) { + dpy_gfx_update_full(s->con); + } else { + snap = memory_region_snapshot_and_clear_dirty(&s->vram, + mode.offset, mode.size, + DIRTY_MEMORY_VGA); + ys = -1; + for (y = 0; y < mode.height; y++) { + dirty = memory_region_snapshot_get_dirty(&s->vram, snap, + mode.offset + mode.stride * y, + mode.stride); + if (dirty && ys < 0) { + ys = y; + } + if (!dirty && ys >= 0) { + dpy_gfx_update(s->con, 0, ys, + mode.width, y - ys); + ys = -1; + } + } + if (ys >= 0) { + dpy_gfx_update(s->con, 0, ys, + mode.width, y - ys); + } + + g_free(snap); + } +} + +static const GraphicHwOps bochs_display_gfx_ops = { + .gfx_update = bochs_display_update, +}; + +static void bochs_display_realize(PCIDevice *dev, Error **errp) +{ + BochsDisplayState *s = BOCHS_DISPLAY(dev); + Object *obj = OBJECT(dev); + int ret; + + if (s->vgamem < 4 * MiB) { + error_setg(errp, "bochs-display: video memory too small"); + return; + } + if (s->vgamem > 256 * MiB) { + error_setg(errp, "bochs-display: video memory too big"); + return; + } + s->vgamem = pow2ceil(s->vgamem); + + s->con = graphic_console_init(DEVICE(dev), 0, &bochs_display_gfx_ops, s); + + memory_region_init_ram(&s->vram, obj, "bochs-display-vram", s->vgamem, + &error_fatal); + memory_region_init_io(&s->vbe, obj, &bochs_display_vbe_ops, s, + "bochs dispi interface", PCI_VGA_BOCHS_SIZE); + memory_region_init_io(&s->qext, obj, &bochs_display_qext_ops, s, + "qemu extended regs", PCI_VGA_QEXT_SIZE); + + memory_region_init_io(&s->mmio, obj, &unassigned_io_ops, NULL, + "bochs-display-mmio", PCI_VGA_MMIO_SIZE); + memory_region_add_subregion(&s->mmio, PCI_VGA_BOCHS_OFFSET, &s->vbe); + memory_region_add_subregion(&s->mmio, PCI_VGA_QEXT_OFFSET, &s->qext); + + pci_set_byte(&s->pci.config[PCI_REVISION_ID], 2); + pci_register_bar(&s->pci, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->vram); + pci_register_bar(&s->pci, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio); + + if (s->enable_edid) { + qemu_edid_generate(s->edid_blob, sizeof(s->edid_blob), &s->edid_info); + qemu_edid_region_io(&s->edid, obj, s->edid_blob, sizeof(s->edid_blob)); + memory_region_add_subregion(&s->mmio, 0, &s->edid); + } + + if (pci_bus_is_express(pci_get_bus(dev))) { + ret = pcie_endpoint_cap_init(dev, 0x80); + assert(ret > 0); + } else { + dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; + } + + memory_region_set_log(&s->vram, true, DIRTY_MEMORY_VGA); +} + +static bool bochs_display_get_big_endian_fb(Object *obj, Error **errp) +{ + BochsDisplayState *s = BOCHS_DISPLAY(obj); + + return s->big_endian_fb; +} + +static void bochs_display_set_big_endian_fb(Object *obj, bool value, + Error **errp) +{ + BochsDisplayState *s = BOCHS_DISPLAY(obj); + + s->big_endian_fb = value; +} + +static void bochs_display_init(Object *obj) +{ + PCIDevice *dev = PCI_DEVICE(obj); + + /* Expose framebuffer byteorder via QOM */ + object_property_add_bool(obj, "big-endian-framebuffer", + bochs_display_get_big_endian_fb, + bochs_display_set_big_endian_fb); + + dev->cap_present |= QEMU_PCI_CAP_EXPRESS; +} + +static void bochs_display_exit(PCIDevice *dev) +{ + BochsDisplayState *s = BOCHS_DISPLAY(dev); + + graphic_console_close(s->con); +} + +static Property bochs_display_properties[] = { + DEFINE_PROP_SIZE("vgamem", BochsDisplayState, vgamem, 16 * MiB), + DEFINE_PROP_BOOL("edid", BochsDisplayState, enable_edid, true), + DEFINE_EDID_PROPERTIES(BochsDisplayState, edid_info), + DEFINE_PROP_END_OF_LIST(), +}; + +static void bochs_display_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->class_id = PCI_CLASS_DISPLAY_OTHER; + k->vendor_id = PCI_VENDOR_ID_QEMU; + k->device_id = PCI_DEVICE_ID_QEMU_VGA; + + k->realize = bochs_display_realize; + k->romfile = "vgabios-bochs-display.bin"; + k->exit = bochs_display_exit; + dc->vmsd = &vmstate_bochs_display; + device_class_set_props(dc, bochs_display_properties); + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); +} + +static const TypeInfo bochs_display_type_info = { + .name = TYPE_BOCHS_DISPLAY, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(BochsDisplayState), + .instance_init = bochs_display_init, + .class_init = bochs_display_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void bochs_display_register_types(void) +{ + type_register_static(&bochs_display_type_info); +} + +type_init(bochs_display_register_types) diff --git a/hw/display/cg3.c b/hw/display/cg3.c new file mode 100644 index 000000000..4b7e78d91 --- /dev/null +++ b/hw/display/cg3.c @@ -0,0 +1,396 @@ +/* + * QEMU CG3 Frame buffer + * + * Copyright (c) 2012 Bob Breuer + * Copyright (c) 2013 Mark Cave-Ayland + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "ui/console.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" + +/* Change to 1 to enable debugging */ +#define DEBUG_CG3 0 + +#define CG3_ROM_FILE "QEMU,cgthree.bin" +#define FCODE_MAX_ROM_SIZE 0x10000 + +#define CG3_REG_SIZE 0x20 + +#define CG3_REG_BT458_ADDR 0x0 +#define CG3_REG_BT458_COLMAP 0x4 +#define CG3_REG_FBC_CTRL 0x10 +#define CG3_REG_FBC_STATUS 0x11 +#define CG3_REG_FBC_CURSTART 0x12 +#define CG3_REG_FBC_CUREND 0x13 +#define CG3_REG_FBC_VCTRL 0x14 + +/* Control register flags */ +#define CG3_CR_ENABLE_INTS 0x80 + +/* Status register flags */ +#define CG3_SR_PENDING_INT 0x80 +#define CG3_SR_1152_900_76_B 0x60 +#define CG3_SR_ID_COLOR 0x01 + +#define CG3_VRAM_SIZE 0x100000 +#define CG3_VRAM_OFFSET 0x800000 + +#define TYPE_CG3 "cgthree" +OBJECT_DECLARE_SIMPLE_TYPE(CG3State, CG3) + +struct CG3State { + SysBusDevice parent_obj; + + QemuConsole *con; + qemu_irq irq; + hwaddr prom_addr; + MemoryRegion vram_mem; + MemoryRegion rom; + MemoryRegion reg; + uint32_t vram_size; + int full_update; + uint8_t regs[16]; + uint8_t r[256], g[256], b[256]; + uint16_t width, height, depth; + uint8_t dac_index, dac_state; +}; + +static void cg3_update_display(void *opaque) +{ + CG3State *s = opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + const uint8_t *pix; + uint32_t *data; + uint32_t dval; + int x, y, y_start; + unsigned int width, height; + ram_addr_t page; + DirtyBitmapSnapshot *snap = NULL; + + if (surface_bits_per_pixel(surface) != 32) { + return; + } + width = s->width; + height = s->height; + + y_start = -1; + pix = memory_region_get_ram_ptr(&s->vram_mem); + data = (uint32_t *)surface_data(surface); + + if (!s->full_update) { + snap = memory_region_snapshot_and_clear_dirty(&s->vram_mem, 0x0, + memory_region_size(&s->vram_mem), + DIRTY_MEMORY_VGA); + } + + for (y = 0; y < height; y++) { + int update; + + page = (ram_addr_t)y * width; + + if (s->full_update) { + update = 1; + } else { + update = memory_region_snapshot_get_dirty(&s->vram_mem, snap, page, + width); + } + + if (update) { + if (y_start < 0) { + y_start = y; + } + + for (x = 0; x < width; x++) { + dval = *pix++; + dval = (s->r[dval] << 16) | (s->g[dval] << 8) | s->b[dval]; + *data++ = dval; + } + } else { + if (y_start >= 0) { + dpy_gfx_update(s->con, 0, y_start, width, y - y_start); + y_start = -1; + } + pix += width; + data += width; + } + } + s->full_update = 0; + if (y_start >= 0) { + dpy_gfx_update(s->con, 0, y_start, width, y - y_start); + } + /* vsync interrupt? */ + if (s->regs[0] & CG3_CR_ENABLE_INTS) { + s->regs[1] |= CG3_SR_PENDING_INT; + qemu_irq_raise(s->irq); + } + g_free(snap); +} + +static void cg3_invalidate_display(void *opaque) +{ + CG3State *s = opaque; + + memory_region_set_dirty(&s->vram_mem, 0, CG3_VRAM_SIZE); +} + +static uint64_t cg3_reg_read(void *opaque, hwaddr addr, unsigned size) +{ + CG3State *s = opaque; + int val; + + switch (addr) { + case CG3_REG_BT458_ADDR: + case CG3_REG_BT458_COLMAP: + val = 0; + break; + case CG3_REG_FBC_CTRL: + val = s->regs[0]; + break; + case CG3_REG_FBC_STATUS: + /* monitor ID 6, board type = 1 (color) */ + val = s->regs[1] | CG3_SR_1152_900_76_B | CG3_SR_ID_COLOR; + break; + case CG3_REG_FBC_CURSTART ... CG3_REG_SIZE - 1: + val = s->regs[addr - 0x10]; + break; + default: + qemu_log_mask(LOG_UNIMP, + "cg3: Unimplemented register read " + "reg 0x%" HWADDR_PRIx " size 0x%x\n", + addr, size); + val = 0; + break; + } + trace_cg3_read(addr, val, size); + + return val; +} + +static void cg3_reg_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + CG3State *s = opaque; + uint8_t regval; + int i; + + trace_cg3_write(addr, val, size); + switch (addr) { + case CG3_REG_BT458_ADDR: + s->dac_index = val; + s->dac_state = 0; + break; + case CG3_REG_BT458_COLMAP: + /* This register can be written to as either a long word or a byte */ + if (size == 1) { + val <<= 24; + } + + for (i = 0; i < size; i++) { + regval = val >> 24; + + switch (s->dac_state) { + case 0: + s->r[s->dac_index] = regval; + s->dac_state++; + break; + case 1: + s->g[s->dac_index] = regval; + s->dac_state++; + break; + case 2: + s->b[s->dac_index] = regval; + /* Index autoincrement */ + s->dac_index = (s->dac_index + 1) & 0xff; + /* fall through */ + default: + s->dac_state = 0; + break; + } + val <<= 8; + } + s->full_update = 1; + break; + case CG3_REG_FBC_CTRL: + s->regs[0] = val; + break; + case CG3_REG_FBC_STATUS: + if (s->regs[1] & CG3_SR_PENDING_INT) { + /* clear interrupt */ + s->regs[1] &= ~CG3_SR_PENDING_INT; + qemu_irq_lower(s->irq); + } + break; + case CG3_REG_FBC_CURSTART ... CG3_REG_SIZE - 1: + s->regs[addr - 0x10] = val; + break; + default: + qemu_log_mask(LOG_UNIMP, + "cg3: Unimplemented register write " + "reg 0x%" HWADDR_PRIx " size 0x%x value 0x%" PRIx64 "\n", + addr, size, val); + break; + } +} + +static const MemoryRegionOps cg3_reg_ops = { + .read = cg3_reg_read, + .write = cg3_reg_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static const GraphicHwOps cg3_ops = { + .invalidate = cg3_invalidate_display, + .gfx_update = cg3_update_display, +}; + +static void cg3_initfn(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + CG3State *s = CG3(obj); + + memory_region_init_rom_nomigrate(&s->rom, obj, "cg3.prom", + FCODE_MAX_ROM_SIZE, &error_fatal); + sysbus_init_mmio(sbd, &s->rom); + + memory_region_init_io(&s->reg, obj, &cg3_reg_ops, s, "cg3.reg", + CG3_REG_SIZE); + sysbus_init_mmio(sbd, &s->reg); +} + +static void cg3_realizefn(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + CG3State *s = CG3(dev); + int ret; + char *fcode_filename; + + /* FCode ROM */ + vmstate_register_ram_global(&s->rom); + fcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, CG3_ROM_FILE); + if (fcode_filename) { + ret = load_image_mr(fcode_filename, &s->rom); + g_free(fcode_filename); + if (ret < 0 || ret > FCODE_MAX_ROM_SIZE) { + warn_report("cg3: could not load prom '%s'", CG3_ROM_FILE); + } + } + + memory_region_init_ram(&s->vram_mem, NULL, "cg3.vram", s->vram_size, + &error_fatal); + memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA); + sysbus_init_mmio(sbd, &s->vram_mem); + + sysbus_init_irq(sbd, &s->irq); + + s->con = graphic_console_init(dev, 0, &cg3_ops, s); + qemu_console_resize(s->con, s->width, s->height); +} + +static int vmstate_cg3_post_load(void *opaque, int version_id) +{ + CG3State *s = opaque; + + cg3_invalidate_display(s); + + return 0; +} + +static const VMStateDescription vmstate_cg3 = { + .name = "cg3", + .version_id = 1, + .minimum_version_id = 1, + .post_load = vmstate_cg3_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT16(height, CG3State), + VMSTATE_UINT16(width, CG3State), + VMSTATE_UINT16(depth, CG3State), + VMSTATE_BUFFER(r, CG3State), + VMSTATE_BUFFER(g, CG3State), + VMSTATE_BUFFER(b, CG3State), + VMSTATE_UINT8(dac_index, CG3State), + VMSTATE_UINT8(dac_state, CG3State), + VMSTATE_END_OF_LIST() + } +}; + +static void cg3_reset(DeviceState *d) +{ + CG3State *s = CG3(d); + + /* Initialize palette */ + memset(s->r, 0, 256); + memset(s->g, 0, 256); + memset(s->b, 0, 256); + + s->dac_state = 0; + s->full_update = 1; + qemu_irq_lower(s->irq); +} + +static Property cg3_properties[] = { + DEFINE_PROP_UINT32("vram-size", CG3State, vram_size, -1), + DEFINE_PROP_UINT16("width", CG3State, width, -1), + DEFINE_PROP_UINT16("height", CG3State, height, -1), + DEFINE_PROP_UINT16("depth", CG3State, depth, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void cg3_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = cg3_realizefn; + dc->reset = cg3_reset; + dc->vmsd = &vmstate_cg3; + device_class_set_props(dc, cg3_properties); +} + +static const TypeInfo cg3_info = { + .name = TYPE_CG3, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CG3State), + .instance_init = cg3_initfn, + .class_init = cg3_class_init, +}; + +static void cg3_register_types(void) +{ + type_register_static(&cg3_info); +} + +type_init(cg3_register_types) diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c new file mode 100644 index 000000000..fdca6ca65 --- /dev/null +++ b/hw/display/cirrus_vga.c @@ -0,0 +1,3024 @@ +/* + * QEMU Cirrus CLGD 54xx VGA Emulator. + * + * Copyright (c) 2004 Fabrice Bellard + * Copyright (c) 2004 Makoto Suzuki (suzu) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* + * Reference: Finn Thogersons' VGADOC4b: + * + * http://web.archive.org/web/20021019054927/http://home.worldonline.dk/finth/ + * + * VGADOC4b.ZIP content available at: + * + * https://pdos.csail.mit.edu/6.828/2005/readings/hardware/vgadoc + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "qemu/log.h" +#include "sysemu/reset.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "ui/pixel_ops.h" +#include "cirrus_vga_internal.h" +#include "qom/object.h" + +/* + * TODO: + * - destination write mask support not complete (bits 5..7) + * - optimize linear mappings + * - optimize bitblt functions + */ + +//#define DEBUG_CIRRUS + +/*************************************** + * + * definitions + * + ***************************************/ + +// sequencer 0x07 +#define CIRRUS_SR7_BPP_VGA 0x00 +#define CIRRUS_SR7_BPP_SVGA 0x01 +#define CIRRUS_SR7_BPP_MASK 0x0e +#define CIRRUS_SR7_BPP_8 0x00 +#define CIRRUS_SR7_BPP_16_DOUBLEVCLK 0x02 +#define CIRRUS_SR7_BPP_24 0x04 +#define CIRRUS_SR7_BPP_16 0x06 +#define CIRRUS_SR7_BPP_32 0x08 +#define CIRRUS_SR7_ISAADDR_MASK 0xe0 + +// sequencer 0x0f +#define CIRRUS_MEMSIZE_512k 0x08 +#define CIRRUS_MEMSIZE_1M 0x10 +#define CIRRUS_MEMSIZE_2M 0x18 +#define CIRRUS_MEMFLAGS_BANKSWITCH 0x80 // bank switching is enabled. + +// sequencer 0x12 +#define CIRRUS_CURSOR_SHOW 0x01 +#define CIRRUS_CURSOR_HIDDENPEL 0x02 +#define CIRRUS_CURSOR_LARGE 0x04 // 64x64 if set, 32x32 if clear + +// sequencer 0x17 +#define CIRRUS_BUSTYPE_VLBFAST 0x10 +#define CIRRUS_BUSTYPE_PCI 0x20 +#define CIRRUS_BUSTYPE_VLBSLOW 0x30 +#define CIRRUS_BUSTYPE_ISA 0x38 +#define CIRRUS_MMIO_ENABLE 0x04 +#define CIRRUS_MMIO_USE_PCIADDR 0x40 // 0xb8000 if cleared. +#define CIRRUS_MEMSIZEEXT_DOUBLE 0x80 + +// control 0x0b +#define CIRRUS_BANKING_DUAL 0x01 +#define CIRRUS_BANKING_GRANULARITY_16K 0x20 // set:16k, clear:4k + +// control 0x30 +#define CIRRUS_BLTMODE_BACKWARDS 0x01 +#define CIRRUS_BLTMODE_MEMSYSDEST 0x02 +#define CIRRUS_BLTMODE_MEMSYSSRC 0x04 +#define CIRRUS_BLTMODE_TRANSPARENTCOMP 0x08 +#define CIRRUS_BLTMODE_PATTERNCOPY 0x40 +#define CIRRUS_BLTMODE_COLOREXPAND 0x80 +#define CIRRUS_BLTMODE_PIXELWIDTHMASK 0x30 +#define CIRRUS_BLTMODE_PIXELWIDTH8 0x00 +#define CIRRUS_BLTMODE_PIXELWIDTH16 0x10 +#define CIRRUS_BLTMODE_PIXELWIDTH24 0x20 +#define CIRRUS_BLTMODE_PIXELWIDTH32 0x30 + +// control 0x31 +#define CIRRUS_BLT_BUSY 0x01 +#define CIRRUS_BLT_START 0x02 +#define CIRRUS_BLT_RESET 0x04 +#define CIRRUS_BLT_FIFOUSED 0x10 +#define CIRRUS_BLT_AUTOSTART 0x80 + +// control 0x32 +#define CIRRUS_ROP_0 0x00 +#define CIRRUS_ROP_SRC_AND_DST 0x05 +#define CIRRUS_ROP_NOP 0x06 +#define CIRRUS_ROP_SRC_AND_NOTDST 0x09 +#define CIRRUS_ROP_NOTDST 0x0b +#define CIRRUS_ROP_SRC 0x0d +#define CIRRUS_ROP_1 0x0e +#define CIRRUS_ROP_NOTSRC_AND_DST 0x50 +#define CIRRUS_ROP_SRC_XOR_DST 0x59 +#define CIRRUS_ROP_SRC_OR_DST 0x6d +#define CIRRUS_ROP_NOTSRC_OR_NOTDST 0x90 +#define CIRRUS_ROP_SRC_NOTXOR_DST 0x95 +#define CIRRUS_ROP_SRC_OR_NOTDST 0xad +#define CIRRUS_ROP_NOTSRC 0xd0 +#define CIRRUS_ROP_NOTSRC_OR_DST 0xd6 +#define CIRRUS_ROP_NOTSRC_AND_NOTDST 0xda + +#define CIRRUS_ROP_NOP_INDEX 2 +#define CIRRUS_ROP_SRC_INDEX 5 + +// control 0x33 +#define CIRRUS_BLTMODEEXT_SOLIDFILL 0x04 +#define CIRRUS_BLTMODEEXT_COLOREXPINV 0x02 +#define CIRRUS_BLTMODEEXT_DWORDGRANULARITY 0x01 + +// memory-mapped IO +#define CIRRUS_MMIO_BLTBGCOLOR 0x00 // dword +#define CIRRUS_MMIO_BLTFGCOLOR 0x04 // dword +#define CIRRUS_MMIO_BLTWIDTH 0x08 // word +#define CIRRUS_MMIO_BLTHEIGHT 0x0a // word +#define CIRRUS_MMIO_BLTDESTPITCH 0x0c // word +#define CIRRUS_MMIO_BLTSRCPITCH 0x0e // word +#define CIRRUS_MMIO_BLTDESTADDR 0x10 // dword +#define CIRRUS_MMIO_BLTSRCADDR 0x14 // dword +#define CIRRUS_MMIO_BLTWRITEMASK 0x17 // byte +#define CIRRUS_MMIO_BLTMODE 0x18 // byte +#define CIRRUS_MMIO_BLTROP 0x1a // byte +#define CIRRUS_MMIO_BLTMODEEXT 0x1b // byte +#define CIRRUS_MMIO_BLTTRANSPARENTCOLOR 0x1c // word? +#define CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK 0x20 // word? +#define CIRRUS_MMIO_LINEARDRAW_START_X 0x24 // word +#define CIRRUS_MMIO_LINEARDRAW_START_Y 0x26 // word +#define CIRRUS_MMIO_LINEARDRAW_END_X 0x28 // word +#define CIRRUS_MMIO_LINEARDRAW_END_Y 0x2a // word +#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_INC 0x2c // byte +#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_ROLLOVER 0x2d // byte +#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_MASK 0x2e // byte +#define CIRRUS_MMIO_LINEARDRAW_LINESTYLE_ACCUM 0x2f // byte +#define CIRRUS_MMIO_BRESENHAM_K1 0x30 // word +#define CIRRUS_MMIO_BRESENHAM_K3 0x32 // word +#define CIRRUS_MMIO_BRESENHAM_ERROR 0x34 // word +#define CIRRUS_MMIO_BRESENHAM_DELTA_MAJOR 0x36 // word +#define CIRRUS_MMIO_BRESENHAM_DIRECTION 0x38 // byte +#define CIRRUS_MMIO_LINEDRAW_MODE 0x39 // byte +#define CIRRUS_MMIO_BLTSTATUS 0x40 // byte + +#define CIRRUS_PNPMMIO_SIZE 0x1000 + +typedef void (*cirrus_fill_t)(struct CirrusVGAState *s, + uint32_t dstaddr, int dst_pitch, + int width, int height); + +struct PCICirrusVGAState { + PCIDevice dev; + CirrusVGAState cirrus_vga; +}; + +#define TYPE_PCI_CIRRUS_VGA "cirrus-vga" +OBJECT_DECLARE_SIMPLE_TYPE(PCICirrusVGAState, PCI_CIRRUS_VGA) + +static uint8_t rop_to_index[256]; + +/*************************************** + * + * prototypes. + * + ***************************************/ + + +static void cirrus_bitblt_reset(CirrusVGAState *s); +static void cirrus_update_memory_access(CirrusVGAState *s); + +/*************************************** + * + * raster operations + * + ***************************************/ + +static bool blit_region_is_unsafe(struct CirrusVGAState *s, + int32_t pitch, int32_t addr) +{ + if (!pitch) { + return true; + } + if (pitch < 0) { + int64_t min = addr + + ((int64_t)s->cirrus_blt_height - 1) * pitch + - s->cirrus_blt_width; + if (min < -1 || addr >= s->vga.vram_size) { + return true; + } + } else { + int64_t max = addr + + ((int64_t)s->cirrus_blt_height-1) * pitch + + s->cirrus_blt_width; + if (max > s->vga.vram_size) { + return true; + } + } + return false; +} + +static bool blit_is_unsafe(struct CirrusVGAState *s, bool dst_only) +{ + /* should be the case, see cirrus_bitblt_start */ + assert(s->cirrus_blt_width > 0); + assert(s->cirrus_blt_height > 0); + + if (s->cirrus_blt_width > CIRRUS_BLTBUFSIZE) { + return true; + } + + if (blit_region_is_unsafe(s, s->cirrus_blt_dstpitch, + s->cirrus_blt_dstaddr)) { + return true; + } + if (dst_only) { + return false; + } + if (blit_region_is_unsafe(s, s->cirrus_blt_srcpitch, + s->cirrus_blt_srcaddr)) { + return true; + } + + return false; +} + +static void cirrus_bitblt_rop_nop(CirrusVGAState *s, + uint32_t dstaddr, uint32_t srcaddr, + int dstpitch,int srcpitch, + int bltwidth,int bltheight) +{ +} + +static void cirrus_bitblt_fill_nop(CirrusVGAState *s, + uint32_t dstaddr, + int dstpitch, int bltwidth,int bltheight) +{ +} + +static inline uint8_t cirrus_src(CirrusVGAState *s, uint32_t srcaddr) +{ + if (s->cirrus_srccounter) { + /* cputovideo */ + return s->cirrus_bltbuf[srcaddr & (CIRRUS_BLTBUFSIZE - 1)]; + } else { + /* videotovideo */ + return s->vga.vram_ptr[srcaddr & s->cirrus_addr_mask]; + } +} + +static inline uint16_t cirrus_src16(CirrusVGAState *s, uint32_t srcaddr) +{ + uint16_t *src; + + if (s->cirrus_srccounter) { + /* cputovideo */ + src = (void *)&s->cirrus_bltbuf[srcaddr & (CIRRUS_BLTBUFSIZE - 1) & ~1]; + } else { + /* videotovideo */ + src = (void *)&s->vga.vram_ptr[srcaddr & s->cirrus_addr_mask & ~1]; + } + return *src; +} + +static inline uint32_t cirrus_src32(CirrusVGAState *s, uint32_t srcaddr) +{ + uint32_t *src; + + if (s->cirrus_srccounter) { + /* cputovideo */ + src = (void *)&s->cirrus_bltbuf[srcaddr & (CIRRUS_BLTBUFSIZE - 1) & ~3]; + } else { + /* videotovideo */ + src = (void *)&s->vga.vram_ptr[srcaddr & s->cirrus_addr_mask & ~3]; + } + return *src; +} + +#define ROP_NAME 0 +#define ROP_FN(d, s) 0 +#include "cirrus_vga_rop.h" + +#define ROP_NAME src_and_dst +#define ROP_FN(d, s) (s) & (d) +#include "cirrus_vga_rop.h" + +#define ROP_NAME src_and_notdst +#define ROP_FN(d, s) (s) & (~(d)) +#include "cirrus_vga_rop.h" + +#define ROP_NAME notdst +#define ROP_FN(d, s) ~(d) +#include "cirrus_vga_rop.h" + +#define ROP_NAME src +#define ROP_FN(d, s) s +#include "cirrus_vga_rop.h" + +#define ROP_NAME 1 +#define ROP_FN(d, s) ~0 +#include "cirrus_vga_rop.h" + +#define ROP_NAME notsrc_and_dst +#define ROP_FN(d, s) (~(s)) & (d) +#include "cirrus_vga_rop.h" + +#define ROP_NAME src_xor_dst +#define ROP_FN(d, s) (s) ^ (d) +#include "cirrus_vga_rop.h" + +#define ROP_NAME src_or_dst +#define ROP_FN(d, s) (s) | (d) +#include "cirrus_vga_rop.h" + +#define ROP_NAME notsrc_or_notdst +#define ROP_FN(d, s) (~(s)) | (~(d)) +#include "cirrus_vga_rop.h" + +#define ROP_NAME src_notxor_dst +#define ROP_FN(d, s) ~((s) ^ (d)) +#include "cirrus_vga_rop.h" + +#define ROP_NAME src_or_notdst +#define ROP_FN(d, s) (s) | (~(d)) +#include "cirrus_vga_rop.h" + +#define ROP_NAME notsrc +#define ROP_FN(d, s) (~(s)) +#include "cirrus_vga_rop.h" + +#define ROP_NAME notsrc_or_dst +#define ROP_FN(d, s) (~(s)) | (d) +#include "cirrus_vga_rop.h" + +#define ROP_NAME notsrc_and_notdst +#define ROP_FN(d, s) (~(s)) & (~(d)) +#include "cirrus_vga_rop.h" + +static const cirrus_bitblt_rop_t cirrus_fwd_rop[16] = { + cirrus_bitblt_rop_fwd_0, + cirrus_bitblt_rop_fwd_src_and_dst, + cirrus_bitblt_rop_nop, + cirrus_bitblt_rop_fwd_src_and_notdst, + cirrus_bitblt_rop_fwd_notdst, + cirrus_bitblt_rop_fwd_src, + cirrus_bitblt_rop_fwd_1, + cirrus_bitblt_rop_fwd_notsrc_and_dst, + cirrus_bitblt_rop_fwd_src_xor_dst, + cirrus_bitblt_rop_fwd_src_or_dst, + cirrus_bitblt_rop_fwd_notsrc_or_notdst, + cirrus_bitblt_rop_fwd_src_notxor_dst, + cirrus_bitblt_rop_fwd_src_or_notdst, + cirrus_bitblt_rop_fwd_notsrc, + cirrus_bitblt_rop_fwd_notsrc_or_dst, + cirrus_bitblt_rop_fwd_notsrc_and_notdst, +}; + +static const cirrus_bitblt_rop_t cirrus_bkwd_rop[16] = { + cirrus_bitblt_rop_bkwd_0, + cirrus_bitblt_rop_bkwd_src_and_dst, + cirrus_bitblt_rop_nop, + cirrus_bitblt_rop_bkwd_src_and_notdst, + cirrus_bitblt_rop_bkwd_notdst, + cirrus_bitblt_rop_bkwd_src, + cirrus_bitblt_rop_bkwd_1, + cirrus_bitblt_rop_bkwd_notsrc_and_dst, + cirrus_bitblt_rop_bkwd_src_xor_dst, + cirrus_bitblt_rop_bkwd_src_or_dst, + cirrus_bitblt_rop_bkwd_notsrc_or_notdst, + cirrus_bitblt_rop_bkwd_src_notxor_dst, + cirrus_bitblt_rop_bkwd_src_or_notdst, + cirrus_bitblt_rop_bkwd_notsrc, + cirrus_bitblt_rop_bkwd_notsrc_or_dst, + cirrus_bitblt_rop_bkwd_notsrc_and_notdst, +}; + +#define TRANSP_ROP(name) {\ + name ## _8,\ + name ## _16,\ + } +#define TRANSP_NOP(func) {\ + func,\ + func,\ + } + +static const cirrus_bitblt_rop_t cirrus_fwd_transp_rop[16][2] = { + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_0), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_src_and_dst), + TRANSP_NOP(cirrus_bitblt_rop_nop), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_src_and_notdst), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_notdst), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_src), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_1), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_notsrc_and_dst), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_src_xor_dst), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_src_or_dst), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_notsrc_or_notdst), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_src_notxor_dst), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_src_or_notdst), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_notsrc), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_notsrc_or_dst), + TRANSP_ROP(cirrus_bitblt_rop_fwd_transp_notsrc_and_notdst), +}; + +static const cirrus_bitblt_rop_t cirrus_bkwd_transp_rop[16][2] = { + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_0), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_src_and_dst), + TRANSP_NOP(cirrus_bitblt_rop_nop), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_src_and_notdst), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_notdst), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_src), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_1), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_notsrc_and_dst), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_src_xor_dst), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_src_or_dst), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_notsrc_or_notdst), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_src_notxor_dst), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_src_or_notdst), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_notsrc), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_notsrc_or_dst), + TRANSP_ROP(cirrus_bitblt_rop_bkwd_transp_notsrc_and_notdst), +}; + +#define ROP2(name) {\ + name ## _8,\ + name ## _16,\ + name ## _24,\ + name ## _32,\ + } + +#define ROP_NOP2(func) {\ + func,\ + func,\ + func,\ + func,\ + } + +static const cirrus_bitblt_rop_t cirrus_patternfill[16][4] = { + ROP2(cirrus_patternfill_0), + ROP2(cirrus_patternfill_src_and_dst), + ROP_NOP2(cirrus_bitblt_rop_nop), + ROP2(cirrus_patternfill_src_and_notdst), + ROP2(cirrus_patternfill_notdst), + ROP2(cirrus_patternfill_src), + ROP2(cirrus_patternfill_1), + ROP2(cirrus_patternfill_notsrc_and_dst), + ROP2(cirrus_patternfill_src_xor_dst), + ROP2(cirrus_patternfill_src_or_dst), + ROP2(cirrus_patternfill_notsrc_or_notdst), + ROP2(cirrus_patternfill_src_notxor_dst), + ROP2(cirrus_patternfill_src_or_notdst), + ROP2(cirrus_patternfill_notsrc), + ROP2(cirrus_patternfill_notsrc_or_dst), + ROP2(cirrus_patternfill_notsrc_and_notdst), +}; + +static const cirrus_bitblt_rop_t cirrus_colorexpand_transp[16][4] = { + ROP2(cirrus_colorexpand_transp_0), + ROP2(cirrus_colorexpand_transp_src_and_dst), + ROP_NOP2(cirrus_bitblt_rop_nop), + ROP2(cirrus_colorexpand_transp_src_and_notdst), + ROP2(cirrus_colorexpand_transp_notdst), + ROP2(cirrus_colorexpand_transp_src), + ROP2(cirrus_colorexpand_transp_1), + ROP2(cirrus_colorexpand_transp_notsrc_and_dst), + ROP2(cirrus_colorexpand_transp_src_xor_dst), + ROP2(cirrus_colorexpand_transp_src_or_dst), + ROP2(cirrus_colorexpand_transp_notsrc_or_notdst), + ROP2(cirrus_colorexpand_transp_src_notxor_dst), + ROP2(cirrus_colorexpand_transp_src_or_notdst), + ROP2(cirrus_colorexpand_transp_notsrc), + ROP2(cirrus_colorexpand_transp_notsrc_or_dst), + ROP2(cirrus_colorexpand_transp_notsrc_and_notdst), +}; + +static const cirrus_bitblt_rop_t cirrus_colorexpand[16][4] = { + ROP2(cirrus_colorexpand_0), + ROP2(cirrus_colorexpand_src_and_dst), + ROP_NOP2(cirrus_bitblt_rop_nop), + ROP2(cirrus_colorexpand_src_and_notdst), + ROP2(cirrus_colorexpand_notdst), + ROP2(cirrus_colorexpand_src), + ROP2(cirrus_colorexpand_1), + ROP2(cirrus_colorexpand_notsrc_and_dst), + ROP2(cirrus_colorexpand_src_xor_dst), + ROP2(cirrus_colorexpand_src_or_dst), + ROP2(cirrus_colorexpand_notsrc_or_notdst), + ROP2(cirrus_colorexpand_src_notxor_dst), + ROP2(cirrus_colorexpand_src_or_notdst), + ROP2(cirrus_colorexpand_notsrc), + ROP2(cirrus_colorexpand_notsrc_or_dst), + ROP2(cirrus_colorexpand_notsrc_and_notdst), +}; + +static const cirrus_bitblt_rop_t cirrus_colorexpand_pattern_transp[16][4] = { + ROP2(cirrus_colorexpand_pattern_transp_0), + ROP2(cirrus_colorexpand_pattern_transp_src_and_dst), + ROP_NOP2(cirrus_bitblt_rop_nop), + ROP2(cirrus_colorexpand_pattern_transp_src_and_notdst), + ROP2(cirrus_colorexpand_pattern_transp_notdst), + ROP2(cirrus_colorexpand_pattern_transp_src), + ROP2(cirrus_colorexpand_pattern_transp_1), + ROP2(cirrus_colorexpand_pattern_transp_notsrc_and_dst), + ROP2(cirrus_colorexpand_pattern_transp_src_xor_dst), + ROP2(cirrus_colorexpand_pattern_transp_src_or_dst), + ROP2(cirrus_colorexpand_pattern_transp_notsrc_or_notdst), + ROP2(cirrus_colorexpand_pattern_transp_src_notxor_dst), + ROP2(cirrus_colorexpand_pattern_transp_src_or_notdst), + ROP2(cirrus_colorexpand_pattern_transp_notsrc), + ROP2(cirrus_colorexpand_pattern_transp_notsrc_or_dst), + ROP2(cirrus_colorexpand_pattern_transp_notsrc_and_notdst), +}; + +static const cirrus_bitblt_rop_t cirrus_colorexpand_pattern[16][4] = { + ROP2(cirrus_colorexpand_pattern_0), + ROP2(cirrus_colorexpand_pattern_src_and_dst), + ROP_NOP2(cirrus_bitblt_rop_nop), + ROP2(cirrus_colorexpand_pattern_src_and_notdst), + ROP2(cirrus_colorexpand_pattern_notdst), + ROP2(cirrus_colorexpand_pattern_src), + ROP2(cirrus_colorexpand_pattern_1), + ROP2(cirrus_colorexpand_pattern_notsrc_and_dst), + ROP2(cirrus_colorexpand_pattern_src_xor_dst), + ROP2(cirrus_colorexpand_pattern_src_or_dst), + ROP2(cirrus_colorexpand_pattern_notsrc_or_notdst), + ROP2(cirrus_colorexpand_pattern_src_notxor_dst), + ROP2(cirrus_colorexpand_pattern_src_or_notdst), + ROP2(cirrus_colorexpand_pattern_notsrc), + ROP2(cirrus_colorexpand_pattern_notsrc_or_dst), + ROP2(cirrus_colorexpand_pattern_notsrc_and_notdst), +}; + +static const cirrus_fill_t cirrus_fill[16][4] = { + ROP2(cirrus_fill_0), + ROP2(cirrus_fill_src_and_dst), + ROP_NOP2(cirrus_bitblt_fill_nop), + ROP2(cirrus_fill_src_and_notdst), + ROP2(cirrus_fill_notdst), + ROP2(cirrus_fill_src), + ROP2(cirrus_fill_1), + ROP2(cirrus_fill_notsrc_and_dst), + ROP2(cirrus_fill_src_xor_dst), + ROP2(cirrus_fill_src_or_dst), + ROP2(cirrus_fill_notsrc_or_notdst), + ROP2(cirrus_fill_src_notxor_dst), + ROP2(cirrus_fill_src_or_notdst), + ROP2(cirrus_fill_notsrc), + ROP2(cirrus_fill_notsrc_or_dst), + ROP2(cirrus_fill_notsrc_and_notdst), +}; + +static inline void cirrus_bitblt_fgcol(CirrusVGAState *s) +{ + unsigned int color; + switch (s->cirrus_blt_pixelwidth) { + case 1: + s->cirrus_blt_fgcol = s->cirrus_shadow_gr1; + break; + case 2: + color = s->cirrus_shadow_gr1 | (s->vga.gr[0x11] << 8); + s->cirrus_blt_fgcol = le16_to_cpu(color); + break; + case 3: + s->cirrus_blt_fgcol = s->cirrus_shadow_gr1 | + (s->vga.gr[0x11] << 8) | (s->vga.gr[0x13] << 16); + break; + default: + case 4: + color = s->cirrus_shadow_gr1 | (s->vga.gr[0x11] << 8) | + (s->vga.gr[0x13] << 16) | (s->vga.gr[0x15] << 24); + s->cirrus_blt_fgcol = le32_to_cpu(color); + break; + } +} + +static inline void cirrus_bitblt_bgcol(CirrusVGAState *s) +{ + unsigned int color; + switch (s->cirrus_blt_pixelwidth) { + case 1: + s->cirrus_blt_bgcol = s->cirrus_shadow_gr0; + break; + case 2: + color = s->cirrus_shadow_gr0 | (s->vga.gr[0x10] << 8); + s->cirrus_blt_bgcol = le16_to_cpu(color); + break; + case 3: + s->cirrus_blt_bgcol = s->cirrus_shadow_gr0 | + (s->vga.gr[0x10] << 8) | (s->vga.gr[0x12] << 16); + break; + default: + case 4: + color = s->cirrus_shadow_gr0 | (s->vga.gr[0x10] << 8) | + (s->vga.gr[0x12] << 16) | (s->vga.gr[0x14] << 24); + s->cirrus_blt_bgcol = le32_to_cpu(color); + break; + } +} + +static void cirrus_invalidate_region(CirrusVGAState * s, int off_begin, + int off_pitch, int bytesperline, + int lines) +{ + int y; + int off_cur; + int off_cur_end; + + if (off_pitch < 0) { + off_begin -= bytesperline - 1; + } + + for (y = 0; y < lines; y++) { + off_cur = off_begin & s->cirrus_addr_mask; + off_cur_end = ((off_cur + bytesperline - 1) & s->cirrus_addr_mask) + 1; + if (off_cur_end >= off_cur) { + memory_region_set_dirty(&s->vga.vram, off_cur, off_cur_end - off_cur); + } else { + /* wraparound */ + memory_region_set_dirty(&s->vga.vram, off_cur, + s->cirrus_addr_mask + 1 - off_cur); + memory_region_set_dirty(&s->vga.vram, 0, off_cur_end); + } + off_begin += off_pitch; + } +} + +static int cirrus_bitblt_common_patterncopy(CirrusVGAState *s) +{ + uint32_t patternsize; + bool videosrc = !s->cirrus_srccounter; + + if (videosrc) { + switch (s->vga.get_bpp(&s->vga)) { + case 8: + patternsize = 64; + break; + case 15: + case 16: + patternsize = 128; + break; + case 24: + case 32: + default: + patternsize = 256; + break; + } + s->cirrus_blt_srcaddr &= ~(patternsize - 1); + if (s->cirrus_blt_srcaddr + patternsize > s->vga.vram_size) { + return 0; + } + } + + if (blit_is_unsafe(s, true)) { + return 0; + } + + (*s->cirrus_rop) (s, s->cirrus_blt_dstaddr, + videosrc ? s->cirrus_blt_srcaddr : 0, + s->cirrus_blt_dstpitch, 0, + s->cirrus_blt_width, s->cirrus_blt_height); + cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, + s->cirrus_blt_dstpitch, s->cirrus_blt_width, + s->cirrus_blt_height); + return 1; +} + +/* fill */ + +static int cirrus_bitblt_solidfill(CirrusVGAState *s, int blt_rop) +{ + cirrus_fill_t rop_func; + + if (blit_is_unsafe(s, true)) { + return 0; + } + rop_func = cirrus_fill[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; + rop_func(s, s->cirrus_blt_dstaddr, + s->cirrus_blt_dstpitch, + s->cirrus_blt_width, s->cirrus_blt_height); + cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, + s->cirrus_blt_dstpitch, s->cirrus_blt_width, + s->cirrus_blt_height); + cirrus_bitblt_reset(s); + return 1; +} + +/*************************************** + * + * bitblt (video-to-video) + * + ***************************************/ + +static int cirrus_bitblt_videotovideo_patterncopy(CirrusVGAState * s) +{ + return cirrus_bitblt_common_patterncopy(s); +} + +static int cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h) +{ + int sx = 0, sy = 0; + int dx = 0, dy = 0; + int depth = 0; + int notify = 0; + + /* make sure to only copy if it's a plain copy ROP */ + if (*s->cirrus_rop == cirrus_bitblt_rop_fwd_src || + *s->cirrus_rop == cirrus_bitblt_rop_bkwd_src) { + + int width, height; + + depth = s->vga.get_bpp(&s->vga) / 8; + if (!depth) { + return 0; + } + s->vga.get_resolution(&s->vga, &width, &height); + + /* extra x, y */ + sx = (src % ABS(s->cirrus_blt_srcpitch)) / depth; + sy = (src / ABS(s->cirrus_blt_srcpitch)); + dx = (dst % ABS(s->cirrus_blt_dstpitch)) / depth; + dy = (dst / ABS(s->cirrus_blt_dstpitch)); + + /* normalize width */ + w /= depth; + + /* if we're doing a backward copy, we have to adjust + our x/y to be the upper left corner (instead of the lower + right corner) */ + if (s->cirrus_blt_dstpitch < 0) { + sx -= (s->cirrus_blt_width / depth) - 1; + dx -= (s->cirrus_blt_width / depth) - 1; + sy -= s->cirrus_blt_height - 1; + dy -= s->cirrus_blt_height - 1; + } + + /* are we in the visible portion of memory? */ + if (sx >= 0 && sy >= 0 && dx >= 0 && dy >= 0 && + (sx + w) <= width && (sy + h) <= height && + (dx + w) <= width && (dy + h) <= height) { + notify = 1; + } + } + + (*s->cirrus_rop) (s, s->cirrus_blt_dstaddr, + s->cirrus_blt_srcaddr, + s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, + s->cirrus_blt_width, s->cirrus_blt_height); + + if (notify) { + dpy_gfx_update(s->vga.con, dx, dy, + s->cirrus_blt_width / depth, + s->cirrus_blt_height); + } + + /* we don't have to notify the display that this portion has + changed since qemu_console_copy implies this */ + + cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, + s->cirrus_blt_dstpitch, s->cirrus_blt_width, + s->cirrus_blt_height); + + return 1; +} + +static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s) +{ + if (blit_is_unsafe(s, false)) + return 0; + + return cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->vga.start_addr, + s->cirrus_blt_srcaddr - s->vga.start_addr, + s->cirrus_blt_width, s->cirrus_blt_height); +} + +/*************************************** + * + * bitblt (cpu-to-video) + * + ***************************************/ + +static void cirrus_bitblt_cputovideo_next(CirrusVGAState * s) +{ + int copy_count; + uint8_t *end_ptr; + + if (s->cirrus_srccounter > 0) { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_PATTERNCOPY) { + cirrus_bitblt_common_patterncopy(s); + the_end: + s->cirrus_srccounter = 0; + cirrus_bitblt_reset(s); + } else { + /* at least one scan line */ + do { + (*s->cirrus_rop)(s, s->cirrus_blt_dstaddr, + 0, 0, 0, s->cirrus_blt_width, 1); + cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, 0, + s->cirrus_blt_width, 1); + s->cirrus_blt_dstaddr += s->cirrus_blt_dstpitch; + s->cirrus_srccounter -= s->cirrus_blt_srcpitch; + if (s->cirrus_srccounter <= 0) + goto the_end; + /* more bytes than needed can be transferred because of + word alignment, so we keep them for the next line */ + /* XXX: keep alignment to speed up transfer */ + end_ptr = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; + copy_count = s->cirrus_srcptr_end - end_ptr; + memmove(s->cirrus_bltbuf, end_ptr, copy_count); + s->cirrus_srcptr = s->cirrus_bltbuf + copy_count; + s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; + } while (s->cirrus_srcptr >= s->cirrus_srcptr_end); + } + } +} + +/*************************************** + * + * bitblt wrapper + * + ***************************************/ + +static void cirrus_bitblt_reset(CirrusVGAState * s) +{ + int need_update; + + s->vga.gr[0x31] &= + ~(CIRRUS_BLT_START | CIRRUS_BLT_BUSY | CIRRUS_BLT_FIFOUSED); + need_update = s->cirrus_srcptr != &s->cirrus_bltbuf[0] + || s->cirrus_srcptr_end != &s->cirrus_bltbuf[0]; + s->cirrus_srcptr = &s->cirrus_bltbuf[0]; + s->cirrus_srcptr_end = &s->cirrus_bltbuf[0]; + s->cirrus_srccounter = 0; + if (!need_update) + return; + cirrus_update_memory_access(s); +} + +static int cirrus_bitblt_cputovideo(CirrusVGAState * s) +{ + int w; + + if (blit_is_unsafe(s, true)) { + return 0; + } + + s->cirrus_blt_mode &= ~CIRRUS_BLTMODE_MEMSYSSRC; + s->cirrus_srcptr = &s->cirrus_bltbuf[0]; + s->cirrus_srcptr_end = &s->cirrus_bltbuf[0]; + + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_PATTERNCOPY) { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_COLOREXPAND) { + s->cirrus_blt_srcpitch = 8; + } else { + /* XXX: check for 24 bpp */ + s->cirrus_blt_srcpitch = 8 * 8 * s->cirrus_blt_pixelwidth; + } + s->cirrus_srccounter = s->cirrus_blt_srcpitch; + } else { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_COLOREXPAND) { + w = s->cirrus_blt_width / s->cirrus_blt_pixelwidth; + if (s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_DWORDGRANULARITY) + s->cirrus_blt_srcpitch = ((w + 31) >> 5); + else + s->cirrus_blt_srcpitch = ((w + 7) >> 3); + } else { + /* always align input size to 32 bits */ + s->cirrus_blt_srcpitch = (s->cirrus_blt_width + 3) & ~3; + } + s->cirrus_srccounter = s->cirrus_blt_srcpitch * s->cirrus_blt_height; + } + + /* the blit_is_unsafe call above should catch this */ + assert(s->cirrus_blt_srcpitch <= CIRRUS_BLTBUFSIZE); + + s->cirrus_srcptr = s->cirrus_bltbuf; + s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; + cirrus_update_memory_access(s); + return 1; +} + +static int cirrus_bitblt_videotocpu(CirrusVGAState * s) +{ + /* XXX */ + qemu_log_mask(LOG_UNIMP, + "cirrus: bitblt (video to cpu) is not implemented\n"); + return 0; +} + +static int cirrus_bitblt_videotovideo(CirrusVGAState * s) +{ + int ret; + + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_PATTERNCOPY) { + ret = cirrus_bitblt_videotovideo_patterncopy(s); + } else { + ret = cirrus_bitblt_videotovideo_copy(s); + } + if (ret) + cirrus_bitblt_reset(s); + return ret; +} + +static void cirrus_bitblt_start(CirrusVGAState * s) +{ + uint8_t blt_rop; + + if (!s->enable_blitter) { + goto bitblt_ignore; + } + + s->vga.gr[0x31] |= CIRRUS_BLT_BUSY; + + s->cirrus_blt_width = (s->vga.gr[0x20] | (s->vga.gr[0x21] << 8)) + 1; + s->cirrus_blt_height = (s->vga.gr[0x22] | (s->vga.gr[0x23] << 8)) + 1; + s->cirrus_blt_dstpitch = (s->vga.gr[0x24] | (s->vga.gr[0x25] << 8)); + s->cirrus_blt_srcpitch = (s->vga.gr[0x26] | (s->vga.gr[0x27] << 8)); + s->cirrus_blt_dstaddr = + (s->vga.gr[0x28] | (s->vga.gr[0x29] << 8) | (s->vga.gr[0x2a] << 16)); + s->cirrus_blt_srcaddr = + (s->vga.gr[0x2c] | (s->vga.gr[0x2d] << 8) | (s->vga.gr[0x2e] << 16)); + s->cirrus_blt_mode = s->vga.gr[0x30]; + s->cirrus_blt_modeext = s->vga.gr[0x33]; + blt_rop = s->vga.gr[0x32]; + + s->cirrus_blt_dstaddr &= s->cirrus_addr_mask; + s->cirrus_blt_srcaddr &= s->cirrus_addr_mask; + + trace_vga_cirrus_bitblt_start(blt_rop, + s->cirrus_blt_mode, + s->cirrus_blt_modeext, + s->cirrus_blt_width, + s->cirrus_blt_height, + s->cirrus_blt_dstpitch, + s->cirrus_blt_srcpitch, + s->cirrus_blt_dstaddr, + s->cirrus_blt_srcaddr, + s->vga.gr[0x2f]); + + switch (s->cirrus_blt_mode & CIRRUS_BLTMODE_PIXELWIDTHMASK) { + case CIRRUS_BLTMODE_PIXELWIDTH8: + s->cirrus_blt_pixelwidth = 1; + break; + case CIRRUS_BLTMODE_PIXELWIDTH16: + s->cirrus_blt_pixelwidth = 2; + break; + case CIRRUS_BLTMODE_PIXELWIDTH24: + s->cirrus_blt_pixelwidth = 3; + break; + case CIRRUS_BLTMODE_PIXELWIDTH32: + s->cirrus_blt_pixelwidth = 4; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: bitblt - pixel width is unknown\n"); + goto bitblt_ignore; + } + s->cirrus_blt_mode &= ~CIRRUS_BLTMODE_PIXELWIDTHMASK; + + if ((s-> + cirrus_blt_mode & (CIRRUS_BLTMODE_MEMSYSSRC | + CIRRUS_BLTMODE_MEMSYSDEST)) + == (CIRRUS_BLTMODE_MEMSYSSRC | CIRRUS_BLTMODE_MEMSYSDEST)) { + qemu_log_mask(LOG_UNIMP, + "cirrus: bitblt - memory-to-memory copy requested\n"); + goto bitblt_ignore; + } + + if ((s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_SOLIDFILL) && + (s->cirrus_blt_mode & (CIRRUS_BLTMODE_MEMSYSDEST | + CIRRUS_BLTMODE_TRANSPARENTCOMP | + CIRRUS_BLTMODE_PATTERNCOPY | + CIRRUS_BLTMODE_COLOREXPAND)) == + (CIRRUS_BLTMODE_PATTERNCOPY | CIRRUS_BLTMODE_COLOREXPAND)) { + cirrus_bitblt_fgcol(s); + cirrus_bitblt_solidfill(s, blt_rop); + } else { + if ((s->cirrus_blt_mode & (CIRRUS_BLTMODE_COLOREXPAND | + CIRRUS_BLTMODE_PATTERNCOPY)) == + CIRRUS_BLTMODE_COLOREXPAND) { + + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_TRANSPARENTCOMP) { + if (s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_COLOREXPINV) + cirrus_bitblt_bgcol(s); + else + cirrus_bitblt_fgcol(s); + s->cirrus_rop = cirrus_colorexpand_transp[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; + } else { + cirrus_bitblt_fgcol(s); + cirrus_bitblt_bgcol(s); + s->cirrus_rop = cirrus_colorexpand[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; + } + } else if (s->cirrus_blt_mode & CIRRUS_BLTMODE_PATTERNCOPY) { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_COLOREXPAND) { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_TRANSPARENTCOMP) { + if (s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_COLOREXPINV) + cirrus_bitblt_bgcol(s); + else + cirrus_bitblt_fgcol(s); + s->cirrus_rop = cirrus_colorexpand_pattern_transp[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; + } else { + cirrus_bitblt_fgcol(s); + cirrus_bitblt_bgcol(s); + s->cirrus_rop = cirrus_colorexpand_pattern[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; + } + } else { + s->cirrus_rop = cirrus_patternfill[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; + } + } else { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_TRANSPARENTCOMP) { + if (s->cirrus_blt_pixelwidth > 2) { + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: src transparent without colorexpand " + "must be 8bpp or 16bpp\n"); + goto bitblt_ignore; + } + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_BACKWARDS) { + s->cirrus_blt_dstpitch = -s->cirrus_blt_dstpitch; + s->cirrus_blt_srcpitch = -s->cirrus_blt_srcpitch; + s->cirrus_rop = cirrus_bkwd_transp_rop[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; + } else { + s->cirrus_rop = cirrus_fwd_transp_rop[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; + } + } else { + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_BACKWARDS) { + s->cirrus_blt_dstpitch = -s->cirrus_blt_dstpitch; + s->cirrus_blt_srcpitch = -s->cirrus_blt_srcpitch; + s->cirrus_rop = cirrus_bkwd_rop[rop_to_index[blt_rop]]; + } else { + s->cirrus_rop = cirrus_fwd_rop[rop_to_index[blt_rop]]; + } + } + } + // setup bitblt engine. + if (s->cirrus_blt_mode & CIRRUS_BLTMODE_MEMSYSSRC) { + if (!cirrus_bitblt_cputovideo(s)) + goto bitblt_ignore; + } else if (s->cirrus_blt_mode & CIRRUS_BLTMODE_MEMSYSDEST) { + if (!cirrus_bitblt_videotocpu(s)) + goto bitblt_ignore; + } else { + if (!cirrus_bitblt_videotovideo(s)) + goto bitblt_ignore; + } + } + return; + bitblt_ignore:; + cirrus_bitblt_reset(s); +} + +static void cirrus_write_bitblt(CirrusVGAState * s, unsigned reg_value) +{ + unsigned old_value; + + old_value = s->vga.gr[0x31]; + s->vga.gr[0x31] = reg_value; + + if (((old_value & CIRRUS_BLT_RESET) != 0) && + ((reg_value & CIRRUS_BLT_RESET) == 0)) { + cirrus_bitblt_reset(s); + } else if (((old_value & CIRRUS_BLT_START) == 0) && + ((reg_value & CIRRUS_BLT_START) != 0)) { + cirrus_bitblt_start(s); + } +} + + +/*************************************** + * + * basic parameters + * + ***************************************/ + +static void cirrus_get_offsets(VGACommonState *s1, + uint32_t *pline_offset, + uint32_t *pstart_addr, + uint32_t *pline_compare) +{ + CirrusVGAState * s = container_of(s1, CirrusVGAState, vga); + uint32_t start_addr, line_offset, line_compare; + + line_offset = s->vga.cr[0x13] + | ((s->vga.cr[0x1b] & 0x10) << 4); + line_offset <<= 3; + *pline_offset = line_offset; + + start_addr = (s->vga.cr[0x0c] << 8) + | s->vga.cr[0x0d] + | ((s->vga.cr[0x1b] & 0x01) << 16) + | ((s->vga.cr[0x1b] & 0x0c) << 15) + | ((s->vga.cr[0x1d] & 0x80) << 12); + *pstart_addr = start_addr; + + line_compare = s->vga.cr[0x18] | + ((s->vga.cr[0x07] & 0x10) << 4) | + ((s->vga.cr[0x09] & 0x40) << 3); + *pline_compare = line_compare; +} + +static uint32_t cirrus_get_bpp16_depth(CirrusVGAState * s) +{ + uint32_t ret = 16; + + switch (s->cirrus_hidden_dac_data & 0xf) { + case 0: + ret = 15; + break; /* Sierra HiColor */ + case 1: + ret = 16; + break; /* XGA HiColor */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: invalid DAC value 0x%x in 16bpp\n", + (s->cirrus_hidden_dac_data & 0xf)); + ret = 15; /* XXX */ + break; + } + return ret; +} + +static int cirrus_get_bpp(VGACommonState *s1) +{ + CirrusVGAState * s = container_of(s1, CirrusVGAState, vga); + uint32_t ret = 8; + + if ((s->vga.sr[0x07] & 0x01) != 0) { + /* Cirrus SVGA */ + switch (s->vga.sr[0x07] & CIRRUS_SR7_BPP_MASK) { + case CIRRUS_SR7_BPP_8: + ret = 8; + break; + case CIRRUS_SR7_BPP_16_DOUBLEVCLK: + ret = cirrus_get_bpp16_depth(s); + break; + case CIRRUS_SR7_BPP_24: + ret = 24; + break; + case CIRRUS_SR7_BPP_16: + ret = cirrus_get_bpp16_depth(s); + break; + case CIRRUS_SR7_BPP_32: + ret = 32; + break; + default: +#ifdef DEBUG_CIRRUS + printf("cirrus: unknown bpp - sr7=%x\n", s->vga.sr[0x7]); +#endif + ret = 8; + break; + } + } else { + /* VGA */ + ret = 0; + } + + return ret; +} + +static void cirrus_get_resolution(VGACommonState *s, int *pwidth, int *pheight) +{ + int width, height; + + width = (s->cr[0x01] + 1) * 8; + height = s->cr[0x12] | + ((s->cr[0x07] & 0x02) << 7) | + ((s->cr[0x07] & 0x40) << 3); + height = (height + 1); + /* interlace support */ + if (s->cr[0x1a] & 0x01) + height = height * 2; + *pwidth = width; + *pheight = height; +} + +/*************************************** + * + * bank memory + * + ***************************************/ + +static void cirrus_update_bank_ptr(CirrusVGAState * s, unsigned bank_index) +{ + unsigned offset; + unsigned limit; + + if ((s->vga.gr[0x0b] & 0x01) != 0) /* dual bank */ + offset = s->vga.gr[0x09 + bank_index]; + else /* single bank */ + offset = s->vga.gr[0x09]; + + if ((s->vga.gr[0x0b] & 0x20) != 0) + offset <<= 14; + else + offset <<= 12; + + if (s->real_vram_size <= offset) + limit = 0; + else + limit = s->real_vram_size - offset; + + if (((s->vga.gr[0x0b] & 0x01) == 0) && (bank_index != 0)) { + if (limit > 0x8000) { + offset += 0x8000; + limit -= 0x8000; + } else { + limit = 0; + } + } + + if (limit > 0) { + s->cirrus_bank_base[bank_index] = offset; + s->cirrus_bank_limit[bank_index] = limit; + } else { + s->cirrus_bank_base[bank_index] = 0; + s->cirrus_bank_limit[bank_index] = 0; + } +} + +/*************************************** + * + * I/O access between 0x3c4-0x3c5 + * + ***************************************/ + +static int cirrus_vga_read_sr(CirrusVGAState * s) +{ + switch (s->vga.sr_index) { + case 0x00: // Standard VGA + case 0x01: // Standard VGA + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + return s->vga.sr[s->vga.sr_index]; + case 0x06: // Unlock Cirrus extensions + return s->vga.sr[s->vga.sr_index]; + case 0x10: + case 0x30: + case 0x50: + case 0x70: // Graphics Cursor X + case 0x90: + case 0xb0: + case 0xd0: + case 0xf0: // Graphics Cursor X + return s->vga.sr[0x10]; + case 0x11: + case 0x31: + case 0x51: + case 0x71: // Graphics Cursor Y + case 0x91: + case 0xb1: + case 0xd1: + case 0xf1: // Graphics Cursor Y + return s->vga.sr[0x11]; + case 0x05: // ??? + case 0x07: // Extended Sequencer Mode + case 0x08: // EEPROM Control + case 0x09: // Scratch Register 0 + case 0x0a: // Scratch Register 1 + case 0x0b: // VCLK 0 + case 0x0c: // VCLK 1 + case 0x0d: // VCLK 2 + case 0x0e: // VCLK 3 + case 0x0f: // DRAM Control + case 0x12: // Graphics Cursor Attribute + case 0x13: // Graphics Cursor Pattern Address + case 0x14: // Scratch Register 2 + case 0x15: // Scratch Register 3 + case 0x16: // Performance Tuning Register + case 0x17: // Configuration Readback and Extended Control + case 0x18: // Signature Generator Control + case 0x19: // Signal Generator Result + case 0x1a: // Signal Generator Result + case 0x1b: // VCLK 0 Denominator & Post + case 0x1c: // VCLK 1 Denominator & Post + case 0x1d: // VCLK 2 Denominator & Post + case 0x1e: // VCLK 3 Denominator & Post + case 0x1f: // BIOS Write Enable and MCLK select +#ifdef DEBUG_CIRRUS + printf("cirrus: handled inport sr_index %02x\n", s->vga.sr_index); +#endif + return s->vga.sr[s->vga.sr_index]; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: inport sr_index 0x%02x\n", s->vga.sr_index); + return 0xff; + } +} + +static void cirrus_vga_write_sr(CirrusVGAState * s, uint32_t val) +{ + switch (s->vga.sr_index) { + case 0x00: // Standard VGA + case 0x01: // Standard VGA + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + s->vga.sr[s->vga.sr_index] = val & sr_mask[s->vga.sr_index]; + if (s->vga.sr_index == 1) + s->vga.update_retrace_info(&s->vga); + break; + case 0x06: // Unlock Cirrus extensions + val &= 0x17; + if (val == 0x12) { + s->vga.sr[s->vga.sr_index] = 0x12; + } else { + s->vga.sr[s->vga.sr_index] = 0x0f; + } + break; + case 0x10: + case 0x30: + case 0x50: + case 0x70: // Graphics Cursor X + case 0x90: + case 0xb0: + case 0xd0: + case 0xf0: // Graphics Cursor X + s->vga.sr[0x10] = val; + s->vga.hw_cursor_x = (val << 3) | (s->vga.sr_index >> 5); + break; + case 0x11: + case 0x31: + case 0x51: + case 0x71: // Graphics Cursor Y + case 0x91: + case 0xb1: + case 0xd1: + case 0xf1: // Graphics Cursor Y + s->vga.sr[0x11] = val; + s->vga.hw_cursor_y = (val << 3) | (s->vga.sr_index >> 5); + break; + case 0x07: // Extended Sequencer Mode + cirrus_update_memory_access(s); + /* fall through */ + case 0x08: // EEPROM Control + case 0x09: // Scratch Register 0 + case 0x0a: // Scratch Register 1 + case 0x0b: // VCLK 0 + case 0x0c: // VCLK 1 + case 0x0d: // VCLK 2 + case 0x0e: // VCLK 3 + case 0x0f: // DRAM Control + case 0x13: // Graphics Cursor Pattern Address + case 0x14: // Scratch Register 2 + case 0x15: // Scratch Register 3 + case 0x16: // Performance Tuning Register + case 0x18: // Signature Generator Control + case 0x19: // Signature Generator Result + case 0x1a: // Signature Generator Result + case 0x1b: // VCLK 0 Denominator & Post + case 0x1c: // VCLK 1 Denominator & Post + case 0x1d: // VCLK 2 Denominator & Post + case 0x1e: // VCLK 3 Denominator & Post + case 0x1f: // BIOS Write Enable and MCLK select + s->vga.sr[s->vga.sr_index] = val; +#ifdef DEBUG_CIRRUS + printf("cirrus: handled outport sr_index %02x, sr_value %02x\n", + s->vga.sr_index, val); +#endif + break; + case 0x12: // Graphics Cursor Attribute + s->vga.sr[0x12] = val; + s->vga.force_shadow = !!(val & CIRRUS_CURSOR_SHOW); +#ifdef DEBUG_CIRRUS + printf("cirrus: cursor ctl SR12=%02x (force shadow: %d)\n", + val, s->vga.force_shadow); +#endif + break; + case 0x17: // Configuration Readback and Extended Control + s->vga.sr[s->vga.sr_index] = (s->vga.sr[s->vga.sr_index] & 0x38) + | (val & 0xc7); + cirrus_update_memory_access(s); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: outport sr_index 0x%02x, sr_value 0x%02x\n", + s->vga.sr_index, val); + break; + } +} + +/*************************************** + * + * I/O access at 0x3c6 + * + ***************************************/ + +static int cirrus_read_hidden_dac(CirrusVGAState * s) +{ + if (++s->cirrus_hidden_dac_lockindex == 5) { + s->cirrus_hidden_dac_lockindex = 0; + return s->cirrus_hidden_dac_data; + } + return 0xff; +} + +static void cirrus_write_hidden_dac(CirrusVGAState * s, int reg_value) +{ + if (s->cirrus_hidden_dac_lockindex == 4) { + s->cirrus_hidden_dac_data = reg_value; +#if defined(DEBUG_CIRRUS) + printf("cirrus: outport hidden DAC, value %02x\n", reg_value); +#endif + } + s->cirrus_hidden_dac_lockindex = 0; +} + +/*************************************** + * + * I/O access at 0x3c9 + * + ***************************************/ + +static int cirrus_vga_read_palette(CirrusVGAState * s) +{ + int val; + + if ((s->vga.sr[0x12] & CIRRUS_CURSOR_HIDDENPEL)) { + val = s->cirrus_hidden_palette[(s->vga.dac_read_index & 0x0f) * 3 + + s->vga.dac_sub_index]; + } else { + val = s->vga.palette[s->vga.dac_read_index * 3 + s->vga.dac_sub_index]; + } + if (++s->vga.dac_sub_index == 3) { + s->vga.dac_sub_index = 0; + s->vga.dac_read_index++; + } + return val; +} + +static void cirrus_vga_write_palette(CirrusVGAState * s, int reg_value) +{ + s->vga.dac_cache[s->vga.dac_sub_index] = reg_value; + if (++s->vga.dac_sub_index == 3) { + if ((s->vga.sr[0x12] & CIRRUS_CURSOR_HIDDENPEL)) { + memcpy(&s->cirrus_hidden_palette[(s->vga.dac_write_index & 0x0f) * 3], + s->vga.dac_cache, 3); + } else { + memcpy(&s->vga.palette[s->vga.dac_write_index * 3], s->vga.dac_cache, 3); + } + /* XXX update cursor */ + s->vga.dac_sub_index = 0; + s->vga.dac_write_index++; + } +} + +/*************************************** + * + * I/O access between 0x3ce-0x3cf + * + ***************************************/ + +static int cirrus_vga_read_gr(CirrusVGAState * s, unsigned reg_index) +{ + switch (reg_index) { + case 0x00: // Standard VGA, BGCOLOR 0x000000ff + return s->cirrus_shadow_gr0; + case 0x01: // Standard VGA, FGCOLOR 0x000000ff + return s->cirrus_shadow_gr1; + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + case 0x06: // Standard VGA + case 0x07: // Standard VGA + case 0x08: // Standard VGA + return s->vga.gr[s->vga.gr_index]; + case 0x05: // Standard VGA, Cirrus extended mode + default: + break; + } + + if (reg_index < 0x3a) { + return s->vga.gr[reg_index]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: inport gr_index 0x%02x\n", reg_index); + return 0xff; + } +} + +static void +cirrus_vga_write_gr(CirrusVGAState * s, unsigned reg_index, int reg_value) +{ + trace_vga_cirrus_write_gr(reg_index, reg_value); + switch (reg_index) { + case 0x00: // Standard VGA, BGCOLOR 0x000000ff + s->vga.gr[reg_index] = reg_value & gr_mask[reg_index]; + s->cirrus_shadow_gr0 = reg_value; + break; + case 0x01: // Standard VGA, FGCOLOR 0x000000ff + s->vga.gr[reg_index] = reg_value & gr_mask[reg_index]; + s->cirrus_shadow_gr1 = reg_value; + break; + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + case 0x06: // Standard VGA + case 0x07: // Standard VGA + case 0x08: // Standard VGA + s->vga.gr[reg_index] = reg_value & gr_mask[reg_index]; + break; + case 0x05: // Standard VGA, Cirrus extended mode + s->vga.gr[reg_index] = reg_value & 0x7f; + cirrus_update_memory_access(s); + break; + case 0x09: // bank offset #0 + case 0x0A: // bank offset #1 + s->vga.gr[reg_index] = reg_value; + cirrus_update_bank_ptr(s, 0); + cirrus_update_bank_ptr(s, 1); + cirrus_update_memory_access(s); + break; + case 0x0B: + s->vga.gr[reg_index] = reg_value; + cirrus_update_bank_ptr(s, 0); + cirrus_update_bank_ptr(s, 1); + cirrus_update_memory_access(s); + break; + case 0x10: // BGCOLOR 0x0000ff00 + case 0x11: // FGCOLOR 0x0000ff00 + case 0x12: // BGCOLOR 0x00ff0000 + case 0x13: // FGCOLOR 0x00ff0000 + case 0x14: // BGCOLOR 0xff000000 + case 0x15: // FGCOLOR 0xff000000 + case 0x20: // BLT WIDTH 0x0000ff + case 0x22: // BLT HEIGHT 0x0000ff + case 0x24: // BLT DEST PITCH 0x0000ff + case 0x26: // BLT SRC PITCH 0x0000ff + case 0x28: // BLT DEST ADDR 0x0000ff + case 0x29: // BLT DEST ADDR 0x00ff00 + case 0x2c: // BLT SRC ADDR 0x0000ff + case 0x2d: // BLT SRC ADDR 0x00ff00 + case 0x2f: // BLT WRITEMASK + case 0x30: // BLT MODE + case 0x32: // RASTER OP + case 0x33: // BLT MODEEXT + case 0x34: // BLT TRANSPARENT COLOR 0x00ff + case 0x35: // BLT TRANSPARENT COLOR 0xff00 + case 0x38: // BLT TRANSPARENT COLOR MASK 0x00ff + case 0x39: // BLT TRANSPARENT COLOR MASK 0xff00 + s->vga.gr[reg_index] = reg_value; + break; + case 0x21: // BLT WIDTH 0x001f00 + case 0x23: // BLT HEIGHT 0x001f00 + case 0x25: // BLT DEST PITCH 0x001f00 + case 0x27: // BLT SRC PITCH 0x001f00 + s->vga.gr[reg_index] = reg_value & 0x1f; + break; + case 0x2a: // BLT DEST ADDR 0x3f0000 + s->vga.gr[reg_index] = reg_value & 0x3f; + /* if auto start mode, starts bit blt now */ + if (s->vga.gr[0x31] & CIRRUS_BLT_AUTOSTART) { + cirrus_bitblt_start(s); + } + break; + case 0x2e: // BLT SRC ADDR 0x3f0000 + s->vga.gr[reg_index] = reg_value & 0x3f; + break; + case 0x31: // BLT STATUS/START + cirrus_write_bitblt(s, reg_value); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: outport gr_index 0x%02x, gr_value 0x%02x\n", + reg_index, reg_value); + break; + } +} + +/*************************************** + * + * I/O access between 0x3d4-0x3d5 + * + ***************************************/ + +static int cirrus_vga_read_cr(CirrusVGAState * s, unsigned reg_index) +{ + switch (reg_index) { + case 0x00: // Standard VGA + case 0x01: // Standard VGA + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + case 0x05: // Standard VGA + case 0x06: // Standard VGA + case 0x07: // Standard VGA + case 0x08: // Standard VGA + case 0x09: // Standard VGA + case 0x0a: // Standard VGA + case 0x0b: // Standard VGA + case 0x0c: // Standard VGA + case 0x0d: // Standard VGA + case 0x0e: // Standard VGA + case 0x0f: // Standard VGA + case 0x10: // Standard VGA + case 0x11: // Standard VGA + case 0x12: // Standard VGA + case 0x13: // Standard VGA + case 0x14: // Standard VGA + case 0x15: // Standard VGA + case 0x16: // Standard VGA + case 0x17: // Standard VGA + case 0x18: // Standard VGA + return s->vga.cr[s->vga.cr_index]; + case 0x24: // Attribute Controller Toggle Readback (R) + return (s->vga.ar_flip_flop << 7); + case 0x19: // Interlace End + case 0x1a: // Miscellaneous Control + case 0x1b: // Extended Display Control + case 0x1c: // Sync Adjust and Genlock + case 0x1d: // Overlay Extended Control + case 0x22: // Graphics Data Latches Readback (R) + case 0x25: // Part Status + case 0x27: // Part ID (R) + return s->vga.cr[s->vga.cr_index]; + case 0x26: // Attribute Controller Index Readback (R) + return s->vga.ar_index & 0x3f; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: inport cr_index 0x%02x\n", reg_index); + return 0xff; + } +} + +static void cirrus_vga_write_cr(CirrusVGAState * s, int reg_value) +{ + switch (s->vga.cr_index) { + case 0x00: // Standard VGA + case 0x01: // Standard VGA + case 0x02: // Standard VGA + case 0x03: // Standard VGA + case 0x04: // Standard VGA + case 0x05: // Standard VGA + case 0x06: // Standard VGA + case 0x07: // Standard VGA + case 0x08: // Standard VGA + case 0x09: // Standard VGA + case 0x0a: // Standard VGA + case 0x0b: // Standard VGA + case 0x0c: // Standard VGA + case 0x0d: // Standard VGA + case 0x0e: // Standard VGA + case 0x0f: // Standard VGA + case 0x10: // Standard VGA + case 0x11: // Standard VGA + case 0x12: // Standard VGA + case 0x13: // Standard VGA + case 0x14: // Standard VGA + case 0x15: // Standard VGA + case 0x16: // Standard VGA + case 0x17: // Standard VGA + case 0x18: // Standard VGA + /* handle CR0-7 protection */ + if ((s->vga.cr[0x11] & 0x80) && s->vga.cr_index <= 7) { + /* can always write bit 4 of CR7 */ + if (s->vga.cr_index == 7) + s->vga.cr[7] = (s->vga.cr[7] & ~0x10) | (reg_value & 0x10); + return; + } + s->vga.cr[s->vga.cr_index] = reg_value; + switch(s->vga.cr_index) { + case 0x00: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x11: + case 0x17: + s->vga.update_retrace_info(&s->vga); + break; + } + break; + case 0x19: // Interlace End + case 0x1a: // Miscellaneous Control + case 0x1b: // Extended Display Control + case 0x1c: // Sync Adjust and Genlock + case 0x1d: // Overlay Extended Control + s->vga.cr[s->vga.cr_index] = reg_value; +#ifdef DEBUG_CIRRUS + printf("cirrus: handled outport cr_index %02x, cr_value %02x\n", + s->vga.cr_index, reg_value); +#endif + break; + case 0x22: // Graphics Data Latches Readback (R) + case 0x24: // Attribute Controller Toggle Readback (R) + case 0x26: // Attribute Controller Index Readback (R) + case 0x27: // Part ID (R) + break; + case 0x25: // Part Status + default: + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: outport cr_index 0x%02x, cr_value 0x%02x\n", + s->vga.cr_index, reg_value); + break; + } +} + +/*************************************** + * + * memory-mapped I/O (bitblt) + * + ***************************************/ + +static uint8_t cirrus_mmio_blt_read(CirrusVGAState * s, unsigned address) +{ + int value = 0xff; + + switch (address) { + case (CIRRUS_MMIO_BLTBGCOLOR + 0): + value = cirrus_vga_read_gr(s, 0x00); + break; + case (CIRRUS_MMIO_BLTBGCOLOR + 1): + value = cirrus_vga_read_gr(s, 0x10); + break; + case (CIRRUS_MMIO_BLTBGCOLOR + 2): + value = cirrus_vga_read_gr(s, 0x12); + break; + case (CIRRUS_MMIO_BLTBGCOLOR + 3): + value = cirrus_vga_read_gr(s, 0x14); + break; + case (CIRRUS_MMIO_BLTFGCOLOR + 0): + value = cirrus_vga_read_gr(s, 0x01); + break; + case (CIRRUS_MMIO_BLTFGCOLOR + 1): + value = cirrus_vga_read_gr(s, 0x11); + break; + case (CIRRUS_MMIO_BLTFGCOLOR + 2): + value = cirrus_vga_read_gr(s, 0x13); + break; + case (CIRRUS_MMIO_BLTFGCOLOR + 3): + value = cirrus_vga_read_gr(s, 0x15); + break; + case (CIRRUS_MMIO_BLTWIDTH + 0): + value = cirrus_vga_read_gr(s, 0x20); + break; + case (CIRRUS_MMIO_BLTWIDTH + 1): + value = cirrus_vga_read_gr(s, 0x21); + break; + case (CIRRUS_MMIO_BLTHEIGHT + 0): + value = cirrus_vga_read_gr(s, 0x22); + break; + case (CIRRUS_MMIO_BLTHEIGHT + 1): + value = cirrus_vga_read_gr(s, 0x23); + break; + case (CIRRUS_MMIO_BLTDESTPITCH + 0): + value = cirrus_vga_read_gr(s, 0x24); + break; + case (CIRRUS_MMIO_BLTDESTPITCH + 1): + value = cirrus_vga_read_gr(s, 0x25); + break; + case (CIRRUS_MMIO_BLTSRCPITCH + 0): + value = cirrus_vga_read_gr(s, 0x26); + break; + case (CIRRUS_MMIO_BLTSRCPITCH + 1): + value = cirrus_vga_read_gr(s, 0x27); + break; + case (CIRRUS_MMIO_BLTDESTADDR + 0): + value = cirrus_vga_read_gr(s, 0x28); + break; + case (CIRRUS_MMIO_BLTDESTADDR + 1): + value = cirrus_vga_read_gr(s, 0x29); + break; + case (CIRRUS_MMIO_BLTDESTADDR + 2): + value = cirrus_vga_read_gr(s, 0x2a); + break; + case (CIRRUS_MMIO_BLTSRCADDR + 0): + value = cirrus_vga_read_gr(s, 0x2c); + break; + case (CIRRUS_MMIO_BLTSRCADDR + 1): + value = cirrus_vga_read_gr(s, 0x2d); + break; + case (CIRRUS_MMIO_BLTSRCADDR + 2): + value = cirrus_vga_read_gr(s, 0x2e); + break; + case CIRRUS_MMIO_BLTWRITEMASK: + value = cirrus_vga_read_gr(s, 0x2f); + break; + case CIRRUS_MMIO_BLTMODE: + value = cirrus_vga_read_gr(s, 0x30); + break; + case CIRRUS_MMIO_BLTROP: + value = cirrus_vga_read_gr(s, 0x32); + break; + case CIRRUS_MMIO_BLTMODEEXT: + value = cirrus_vga_read_gr(s, 0x33); + break; + case (CIRRUS_MMIO_BLTTRANSPARENTCOLOR + 0): + value = cirrus_vga_read_gr(s, 0x34); + break; + case (CIRRUS_MMIO_BLTTRANSPARENTCOLOR + 1): + value = cirrus_vga_read_gr(s, 0x35); + break; + case (CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK + 0): + value = cirrus_vga_read_gr(s, 0x38); + break; + case (CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK + 1): + value = cirrus_vga_read_gr(s, 0x39); + break; + case CIRRUS_MMIO_BLTSTATUS: + value = cirrus_vga_read_gr(s, 0x31); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: mmio read - address 0x%04x\n", address); + break; + } + + trace_vga_cirrus_write_blt(address, value); + return (uint8_t) value; +} + +static void cirrus_mmio_blt_write(CirrusVGAState * s, unsigned address, + uint8_t value) +{ + trace_vga_cirrus_write_blt(address, value); + switch (address) { + case (CIRRUS_MMIO_BLTBGCOLOR + 0): + cirrus_vga_write_gr(s, 0x00, value); + break; + case (CIRRUS_MMIO_BLTBGCOLOR + 1): + cirrus_vga_write_gr(s, 0x10, value); + break; + case (CIRRUS_MMIO_BLTBGCOLOR + 2): + cirrus_vga_write_gr(s, 0x12, value); + break; + case (CIRRUS_MMIO_BLTBGCOLOR + 3): + cirrus_vga_write_gr(s, 0x14, value); + break; + case (CIRRUS_MMIO_BLTFGCOLOR + 0): + cirrus_vga_write_gr(s, 0x01, value); + break; + case (CIRRUS_MMIO_BLTFGCOLOR + 1): + cirrus_vga_write_gr(s, 0x11, value); + break; + case (CIRRUS_MMIO_BLTFGCOLOR + 2): + cirrus_vga_write_gr(s, 0x13, value); + break; + case (CIRRUS_MMIO_BLTFGCOLOR + 3): + cirrus_vga_write_gr(s, 0x15, value); + break; + case (CIRRUS_MMIO_BLTWIDTH + 0): + cirrus_vga_write_gr(s, 0x20, value); + break; + case (CIRRUS_MMIO_BLTWIDTH + 1): + cirrus_vga_write_gr(s, 0x21, value); + break; + case (CIRRUS_MMIO_BLTHEIGHT + 0): + cirrus_vga_write_gr(s, 0x22, value); + break; + case (CIRRUS_MMIO_BLTHEIGHT + 1): + cirrus_vga_write_gr(s, 0x23, value); + break; + case (CIRRUS_MMIO_BLTDESTPITCH + 0): + cirrus_vga_write_gr(s, 0x24, value); + break; + case (CIRRUS_MMIO_BLTDESTPITCH + 1): + cirrus_vga_write_gr(s, 0x25, value); + break; + case (CIRRUS_MMIO_BLTSRCPITCH + 0): + cirrus_vga_write_gr(s, 0x26, value); + break; + case (CIRRUS_MMIO_BLTSRCPITCH + 1): + cirrus_vga_write_gr(s, 0x27, value); + break; + case (CIRRUS_MMIO_BLTDESTADDR + 0): + cirrus_vga_write_gr(s, 0x28, value); + break; + case (CIRRUS_MMIO_BLTDESTADDR + 1): + cirrus_vga_write_gr(s, 0x29, value); + break; + case (CIRRUS_MMIO_BLTDESTADDR + 2): + cirrus_vga_write_gr(s, 0x2a, value); + break; + case (CIRRUS_MMIO_BLTDESTADDR + 3): + /* ignored */ + break; + case (CIRRUS_MMIO_BLTSRCADDR + 0): + cirrus_vga_write_gr(s, 0x2c, value); + break; + case (CIRRUS_MMIO_BLTSRCADDR + 1): + cirrus_vga_write_gr(s, 0x2d, value); + break; + case (CIRRUS_MMIO_BLTSRCADDR + 2): + cirrus_vga_write_gr(s, 0x2e, value); + break; + case CIRRUS_MMIO_BLTWRITEMASK: + cirrus_vga_write_gr(s, 0x2f, value); + break; + case CIRRUS_MMIO_BLTMODE: + cirrus_vga_write_gr(s, 0x30, value); + break; + case CIRRUS_MMIO_BLTROP: + cirrus_vga_write_gr(s, 0x32, value); + break; + case CIRRUS_MMIO_BLTMODEEXT: + cirrus_vga_write_gr(s, 0x33, value); + break; + case (CIRRUS_MMIO_BLTTRANSPARENTCOLOR + 0): + cirrus_vga_write_gr(s, 0x34, value); + break; + case (CIRRUS_MMIO_BLTTRANSPARENTCOLOR + 1): + cirrus_vga_write_gr(s, 0x35, value); + break; + case (CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK + 0): + cirrus_vga_write_gr(s, 0x38, value); + break; + case (CIRRUS_MMIO_BLTTRANSPARENTCOLORMASK + 1): + cirrus_vga_write_gr(s, 0x39, value); + break; + case CIRRUS_MMIO_BLTSTATUS: + cirrus_vga_write_gr(s, 0x31, value); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: mmio write - addr 0x%04x val 0x%02x (ignored)\n", + address, value); + break; + } +} + +/*************************************** + * + * write mode 4/5 + * + ***************************************/ + +static void cirrus_mem_writeb_mode4and5_8bpp(CirrusVGAState * s, + unsigned mode, + unsigned offset, + uint32_t mem_value) +{ + int x; + unsigned val = mem_value; + uint8_t *dst; + + for (x = 0; x < 8; x++) { + dst = s->vga.vram_ptr + ((offset + x) & s->cirrus_addr_mask); + if (val & 0x80) { + *dst = s->cirrus_shadow_gr1; + } else if (mode == 5) { + *dst = s->cirrus_shadow_gr0; + } + val <<= 1; + } + memory_region_set_dirty(&s->vga.vram, offset, 8); +} + +static void cirrus_mem_writeb_mode4and5_16bpp(CirrusVGAState * s, + unsigned mode, + unsigned offset, + uint32_t mem_value) +{ + int x; + unsigned val = mem_value; + uint8_t *dst; + + for (x = 0; x < 8; x++) { + dst = s->vga.vram_ptr + ((offset + 2 * x) & s->cirrus_addr_mask & ~1); + if (val & 0x80) { + *dst = s->cirrus_shadow_gr1; + *(dst + 1) = s->vga.gr[0x11]; + } else if (mode == 5) { + *dst = s->cirrus_shadow_gr0; + *(dst + 1) = s->vga.gr[0x10]; + } + val <<= 1; + } + memory_region_set_dirty(&s->vga.vram, offset, 16); +} + +/*************************************** + * + * memory access between 0xa0000-0xbffff + * + ***************************************/ + +static uint64_t cirrus_vga_mem_read(void *opaque, + hwaddr addr, + uint32_t size) +{ + CirrusVGAState *s = opaque; + unsigned bank_index; + unsigned bank_offset; + uint32_t val; + + if ((s->vga.sr[0x07] & 0x01) == 0) { + return vga_mem_readb(&s->vga, addr); + } + + if (addr < 0x10000) { + /* XXX handle bitblt */ + /* video memory */ + bank_index = addr >> 15; + bank_offset = addr & 0x7fff; + if (bank_offset < s->cirrus_bank_limit[bank_index]) { + bank_offset += s->cirrus_bank_base[bank_index]; + if ((s->vga.gr[0x0B] & 0x14) == 0x14) { + bank_offset <<= 4; + } else if (s->vga.gr[0x0B] & 0x02) { + bank_offset <<= 3; + } + bank_offset &= s->cirrus_addr_mask; + val = *(s->vga.vram_ptr + bank_offset); + } else + val = 0xff; + } else if (addr >= 0x18000 && addr < 0x18100) { + /* memory-mapped I/O */ + val = 0xff; + if ((s->vga.sr[0x17] & 0x44) == 0x04) { + val = cirrus_mmio_blt_read(s, addr & 0xff); + } + } else { + val = 0xff; + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: mem_readb 0x" TARGET_FMT_plx "\n", addr); + } + return val; +} + +static void cirrus_vga_mem_write(void *opaque, + hwaddr addr, + uint64_t mem_value, + uint32_t size) +{ + CirrusVGAState *s = opaque; + unsigned bank_index; + unsigned bank_offset; + unsigned mode; + + if ((s->vga.sr[0x07] & 0x01) == 0) { + vga_mem_writeb(&s->vga, addr, mem_value); + return; + } + + if (addr < 0x10000) { + if (s->cirrus_srcptr != s->cirrus_srcptr_end) { + /* bitblt */ + *s->cirrus_srcptr++ = (uint8_t) mem_value; + if (s->cirrus_srcptr >= s->cirrus_srcptr_end) { + cirrus_bitblt_cputovideo_next(s); + } + } else { + /* video memory */ + bank_index = addr >> 15; + bank_offset = addr & 0x7fff; + if (bank_offset < s->cirrus_bank_limit[bank_index]) { + bank_offset += s->cirrus_bank_base[bank_index]; + if ((s->vga.gr[0x0B] & 0x14) == 0x14) { + bank_offset <<= 4; + } else if (s->vga.gr[0x0B] & 0x02) { + bank_offset <<= 3; + } + bank_offset &= s->cirrus_addr_mask; + mode = s->vga.gr[0x05] & 0x7; + if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) { + *(s->vga.vram_ptr + bank_offset) = mem_value; + memory_region_set_dirty(&s->vga.vram, bank_offset, + sizeof(mem_value)); + } else { + if ((s->vga.gr[0x0B] & 0x14) != 0x14) { + cirrus_mem_writeb_mode4and5_8bpp(s, mode, + bank_offset, + mem_value); + } else { + cirrus_mem_writeb_mode4and5_16bpp(s, mode, + bank_offset, + mem_value); + } + } + } + } + } else if (addr >= 0x18000 && addr < 0x18100) { + /* memory-mapped I/O */ + if ((s->vga.sr[0x17] & 0x44) == 0x04) { + cirrus_mmio_blt_write(s, addr & 0xff, mem_value); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "cirrus: mem_writeb 0x" TARGET_FMT_plx " " + "value 0x%02" PRIx64 "\n", addr, mem_value); + } +} + +static const MemoryRegionOps cirrus_vga_mem_ops = { + .read = cirrus_vga_mem_read, + .write = cirrus_vga_mem_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +/*************************************** + * + * hardware cursor + * + ***************************************/ + +static inline void invalidate_cursor1(CirrusVGAState *s) +{ + if (s->last_hw_cursor_size) { + vga_invalidate_scanlines(&s->vga, + s->last_hw_cursor_y + s->last_hw_cursor_y_start, + s->last_hw_cursor_y + s->last_hw_cursor_y_end); + } +} + +static inline void cirrus_cursor_compute_yrange(CirrusVGAState *s) +{ + const uint8_t *src; + uint32_t content; + int y, y_min, y_max; + + src = s->vga.vram_ptr + s->real_vram_size - 16 * KiB; + if (s->vga.sr[0x12] & CIRRUS_CURSOR_LARGE) { + src += (s->vga.sr[0x13] & 0x3c) * 256; + y_min = 64; + y_max = -1; + for(y = 0; y < 64; y++) { + content = ((uint32_t *)src)[0] | + ((uint32_t *)src)[1] | + ((uint32_t *)src)[2] | + ((uint32_t *)src)[3]; + if (content) { + if (y < y_min) + y_min = y; + if (y > y_max) + y_max = y; + } + src += 16; + } + } else { + src += (s->vga.sr[0x13] & 0x3f) * 256; + y_min = 32; + y_max = -1; + for(y = 0; y < 32; y++) { + content = ((uint32_t *)src)[0] | + ((uint32_t *)(src + 128))[0]; + if (content) { + if (y < y_min) + y_min = y; + if (y > y_max) + y_max = y; + } + src += 4; + } + } + if (y_min > y_max) { + s->last_hw_cursor_y_start = 0; + s->last_hw_cursor_y_end = 0; + } else { + s->last_hw_cursor_y_start = y_min; + s->last_hw_cursor_y_end = y_max + 1; + } +} + +/* NOTE: we do not currently handle the cursor bitmap change, so we + update the cursor only if it moves. */ +static void cirrus_cursor_invalidate(VGACommonState *s1) +{ + CirrusVGAState *s = container_of(s1, CirrusVGAState, vga); + int size; + + if (!(s->vga.sr[0x12] & CIRRUS_CURSOR_SHOW)) { + size = 0; + } else { + if (s->vga.sr[0x12] & CIRRUS_CURSOR_LARGE) + size = 64; + else + size = 32; + } + /* invalidate last cursor and new cursor if any change */ + if (s->last_hw_cursor_size != size || + s->last_hw_cursor_x != s->vga.hw_cursor_x || + s->last_hw_cursor_y != s->vga.hw_cursor_y) { + + invalidate_cursor1(s); + + s->last_hw_cursor_size = size; + s->last_hw_cursor_x = s->vga.hw_cursor_x; + s->last_hw_cursor_y = s->vga.hw_cursor_y; + /* compute the real cursor min and max y */ + cirrus_cursor_compute_yrange(s); + invalidate_cursor1(s); + } +} + +static void vga_draw_cursor_line(uint8_t *d1, + const uint8_t *src1, + int poffset, int w, + unsigned int color0, + unsigned int color1, + unsigned int color_xor) +{ + const uint8_t *plane0, *plane1; + int x, b0, b1; + uint8_t *d; + + d = d1; + plane0 = src1; + plane1 = src1 + poffset; + for (x = 0; x < w; x++) { + b0 = (plane0[x >> 3] >> (7 - (x & 7))) & 1; + b1 = (plane1[x >> 3] >> (7 - (x & 7))) & 1; + switch (b0 | (b1 << 1)) { + case 0: + break; + case 1: + ((uint32_t *)d)[0] ^= color_xor; + break; + case 2: + ((uint32_t *)d)[0] = color0; + break; + case 3: + ((uint32_t *)d)[0] = color1; + break; + } + d += 4; + } +} + +static void cirrus_cursor_draw_line(VGACommonState *s1, uint8_t *d1, int scr_y) +{ + CirrusVGAState *s = container_of(s1, CirrusVGAState, vga); + int w, h, x1, x2, poffset; + unsigned int color0, color1; + const uint8_t *palette, *src; + uint32_t content; + + if (!(s->vga.sr[0x12] & CIRRUS_CURSOR_SHOW)) + return; + /* fast test to see if the cursor intersects with the scan line */ + if (s->vga.sr[0x12] & CIRRUS_CURSOR_LARGE) { + h = 64; + } else { + h = 32; + } + if (scr_y < s->vga.hw_cursor_y || + scr_y >= (s->vga.hw_cursor_y + h)) { + return; + } + + src = s->vga.vram_ptr + s->real_vram_size - 16 * KiB; + if (s->vga.sr[0x12] & CIRRUS_CURSOR_LARGE) { + src += (s->vga.sr[0x13] & 0x3c) * 256; + src += (scr_y - s->vga.hw_cursor_y) * 16; + poffset = 8; + content = ((uint32_t *)src)[0] | + ((uint32_t *)src)[1] | + ((uint32_t *)src)[2] | + ((uint32_t *)src)[3]; + } else { + src += (s->vga.sr[0x13] & 0x3f) * 256; + src += (scr_y - s->vga.hw_cursor_y) * 4; + + + poffset = 128; + content = ((uint32_t *)src)[0] | + ((uint32_t *)(src + 128))[0]; + } + /* if nothing to draw, no need to continue */ + if (!content) + return; + w = h; + + x1 = s->vga.hw_cursor_x; + if (x1 >= s->vga.last_scr_width) + return; + x2 = s->vga.hw_cursor_x + w; + if (x2 > s->vga.last_scr_width) + x2 = s->vga.last_scr_width; + w = x2 - x1; + palette = s->cirrus_hidden_palette; + color0 = rgb_to_pixel32(c6_to_8(palette[0x0 * 3]), + c6_to_8(palette[0x0 * 3 + 1]), + c6_to_8(palette[0x0 * 3 + 2])); + color1 = rgb_to_pixel32(c6_to_8(palette[0xf * 3]), + c6_to_8(palette[0xf * 3 + 1]), + c6_to_8(palette[0xf * 3 + 2])); + d1 += x1 * 4; + vga_draw_cursor_line(d1, src, poffset, w, color0, color1, 0xffffff); +} + +/*************************************** + * + * LFB memory access + * + ***************************************/ + +static uint64_t cirrus_linear_read(void *opaque, hwaddr addr, + unsigned size) +{ + CirrusVGAState *s = opaque; + uint32_t ret; + + addr &= s->cirrus_addr_mask; + + if (((s->vga.sr[0x17] & 0x44) == 0x44) && + ((addr & s->linear_mmio_mask) == s->linear_mmio_mask)) { + /* memory-mapped I/O */ + ret = cirrus_mmio_blt_read(s, addr & 0xff); + } else if (0) { + /* XXX handle bitblt */ + ret = 0xff; + } else { + /* video memory */ + if ((s->vga.gr[0x0B] & 0x14) == 0x14) { + addr <<= 4; + } else if (s->vga.gr[0x0B] & 0x02) { + addr <<= 3; + } + addr &= s->cirrus_addr_mask; + ret = *(s->vga.vram_ptr + addr); + } + + return ret; +} + +static void cirrus_linear_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + CirrusVGAState *s = opaque; + unsigned mode; + + addr &= s->cirrus_addr_mask; + + if (((s->vga.sr[0x17] & 0x44) == 0x44) && + ((addr & s->linear_mmio_mask) == s->linear_mmio_mask)) { + /* memory-mapped I/O */ + cirrus_mmio_blt_write(s, addr & 0xff, val); + } else if (s->cirrus_srcptr != s->cirrus_srcptr_end) { + /* bitblt */ + *s->cirrus_srcptr++ = (uint8_t) val; + if (s->cirrus_srcptr >= s->cirrus_srcptr_end) { + cirrus_bitblt_cputovideo_next(s); + } + } else { + /* video memory */ + if ((s->vga.gr[0x0B] & 0x14) == 0x14) { + addr <<= 4; + } else if (s->vga.gr[0x0B] & 0x02) { + addr <<= 3; + } + addr &= s->cirrus_addr_mask; + + mode = s->vga.gr[0x05] & 0x7; + if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) { + *(s->vga.vram_ptr + addr) = (uint8_t) val; + memory_region_set_dirty(&s->vga.vram, addr, 1); + } else { + if ((s->vga.gr[0x0B] & 0x14) != 0x14) { + cirrus_mem_writeb_mode4and5_8bpp(s, mode, addr, val); + } else { + cirrus_mem_writeb_mode4and5_16bpp(s, mode, addr, val); + } + } + } +} + +/*************************************** + * + * system to screen memory access + * + ***************************************/ + + +static uint64_t cirrus_linear_bitblt_read(void *opaque, + hwaddr addr, + unsigned size) +{ + CirrusVGAState *s = opaque; + + /* XXX handle bitblt */ + (void)s; + qemu_log_mask(LOG_UNIMP, + "cirrus: linear bitblt is not implemented\n"); + + return 0xff; +} + +static void cirrus_linear_bitblt_write(void *opaque, + hwaddr addr, + uint64_t val, + unsigned size) +{ + CirrusVGAState *s = opaque; + + if (s->cirrus_srcptr != s->cirrus_srcptr_end) { + /* bitblt */ + *s->cirrus_srcptr++ = (uint8_t) val; + if (s->cirrus_srcptr >= s->cirrus_srcptr_end) { + cirrus_bitblt_cputovideo_next(s); + } + } +} + +static const MemoryRegionOps cirrus_linear_bitblt_io_ops = { + .read = cirrus_linear_bitblt_read, + .write = cirrus_linear_bitblt_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void map_linear_vram_bank(CirrusVGAState *s, unsigned bank) +{ + MemoryRegion *mr = &s->cirrus_bank[bank]; + bool enabled = !(s->cirrus_srcptr != s->cirrus_srcptr_end) + && !((s->vga.sr[0x07] & 0x01) == 0) + && !((s->vga.gr[0x0B] & 0x14) == 0x14) + && !(s->vga.gr[0x0B] & 0x02); + + memory_region_set_enabled(mr, enabled); + memory_region_set_alias_offset(mr, s->cirrus_bank_base[bank]); +} + +static void map_linear_vram(CirrusVGAState *s) +{ + if (s->bustype == CIRRUS_BUSTYPE_PCI && !s->linear_vram) { + s->linear_vram = true; + memory_region_add_subregion_overlap(&s->pci_bar, 0, &s->vga.vram, 1); + } + map_linear_vram_bank(s, 0); + map_linear_vram_bank(s, 1); +} + +static void unmap_linear_vram(CirrusVGAState *s) +{ + if (s->bustype == CIRRUS_BUSTYPE_PCI && s->linear_vram) { + s->linear_vram = false; + memory_region_del_subregion(&s->pci_bar, &s->vga.vram); + } + memory_region_set_enabled(&s->cirrus_bank[0], false); + memory_region_set_enabled(&s->cirrus_bank[1], false); +} + +/* Compute the memory access functions */ +static void cirrus_update_memory_access(CirrusVGAState *s) +{ + unsigned mode; + + memory_region_transaction_begin(); + if ((s->vga.sr[0x17] & 0x44) == 0x44) { + goto generic_io; + } else if (s->cirrus_srcptr != s->cirrus_srcptr_end) { + goto generic_io; + } else { + if ((s->vga.gr[0x0B] & 0x14) == 0x14) { + goto generic_io; + } else if (s->vga.gr[0x0B] & 0x02) { + goto generic_io; + } + + mode = s->vga.gr[0x05] & 0x7; + if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) { + map_linear_vram(s); + } else { + generic_io: + unmap_linear_vram(s); + } + } + memory_region_transaction_commit(); +} + + +/* I/O ports */ + +static uint64_t cirrus_vga_ioport_read(void *opaque, hwaddr addr, + unsigned size) +{ + CirrusVGAState *c = opaque; + VGACommonState *s = &c->vga; + int val, index; + + addr += 0x3b0; + + if (vga_ioport_invalid(s, addr)) { + val = 0xff; + } else { + switch (addr) { + case 0x3c0: + if (s->ar_flip_flop == 0) { + val = s->ar_index; + } else { + val = 0; + } + break; + case 0x3c1: + index = s->ar_index & 0x1f; + if (index < 21) + val = s->ar[index]; + else + val = 0; + break; + case 0x3c2: + val = s->st00; + break; + case 0x3c4: + val = s->sr_index; + break; + case 0x3c5: + val = cirrus_vga_read_sr(c); + break; + break; + case 0x3c6: + val = cirrus_read_hidden_dac(c); + break; + case 0x3c7: + val = s->dac_state; + break; + case 0x3c8: + val = s->dac_write_index; + c->cirrus_hidden_dac_lockindex = 0; + break; + case 0x3c9: + val = cirrus_vga_read_palette(c); + break; + case 0x3ca: + val = s->fcr; + break; + case 0x3cc: + val = s->msr; + break; + case 0x3ce: + val = s->gr_index; + break; + case 0x3cf: + val = cirrus_vga_read_gr(c, s->gr_index); + break; + case 0x3b4: + case 0x3d4: + val = s->cr_index; + break; + case 0x3b5: + case 0x3d5: + val = cirrus_vga_read_cr(c, s->cr_index); + break; + case 0x3ba: + case 0x3da: + /* just toggle to fool polling */ + val = s->st01 = s->retrace(s); + s->ar_flip_flop = 0; + break; + default: + val = 0x00; + break; + } + } + trace_vga_cirrus_read_io(addr, val); + return val; +} + +static void cirrus_vga_ioport_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + CirrusVGAState *c = opaque; + VGACommonState *s = &c->vga; + int index; + + addr += 0x3b0; + + /* check port range access depending on color/monochrome mode */ + if (vga_ioport_invalid(s, addr)) { + return; + } + trace_vga_cirrus_write_io(addr, val); + + switch (addr) { + case 0x3c0: + if (s->ar_flip_flop == 0) { + val &= 0x3f; + s->ar_index = val; + } else { + index = s->ar_index & 0x1f; + switch (index) { + case 0x00 ... 0x0f: + s->ar[index] = val & 0x3f; + break; + case 0x10: + s->ar[index] = val & ~0x10; + break; + case 0x11: + s->ar[index] = val; + break; + case 0x12: + s->ar[index] = val & ~0xc0; + break; + case 0x13: + s->ar[index] = val & ~0xf0; + break; + case 0x14: + s->ar[index] = val & ~0xf0; + break; + default: + break; + } + } + s->ar_flip_flop ^= 1; + break; + case 0x3c2: + s->msr = val & ~0x10; + s->update_retrace_info(s); + break; + case 0x3c4: + s->sr_index = val; + break; + case 0x3c5: + cirrus_vga_write_sr(c, val); + break; + case 0x3c6: + cirrus_write_hidden_dac(c, val); + break; + case 0x3c7: + s->dac_read_index = val; + s->dac_sub_index = 0; + s->dac_state = 3; + break; + case 0x3c8: + s->dac_write_index = val; + s->dac_sub_index = 0; + s->dac_state = 0; + break; + case 0x3c9: + cirrus_vga_write_palette(c, val); + break; + case 0x3ce: + s->gr_index = val; + break; + case 0x3cf: + cirrus_vga_write_gr(c, s->gr_index, val); + break; + case 0x3b4: + case 0x3d4: + s->cr_index = val; + break; + case 0x3b5: + case 0x3d5: + cirrus_vga_write_cr(c, val); + break; + case 0x3ba: + case 0x3da: + s->fcr = val & 0x10; + break; + } +} + +/*************************************** + * + * memory-mapped I/O access + * + ***************************************/ + +static uint64_t cirrus_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + CirrusVGAState *s = opaque; + + if (addr >= 0x100) { + return cirrus_mmio_blt_read(s, addr - 0x100); + } else { + return cirrus_vga_ioport_read(s, addr + 0x10, size); + } +} + +static void cirrus_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + CirrusVGAState *s = opaque; + + if (addr >= 0x100) { + cirrus_mmio_blt_write(s, addr - 0x100, val); + } else { + cirrus_vga_ioport_write(s, addr + 0x10, val, size); + } +} + +static const MemoryRegionOps cirrus_mmio_io_ops = { + .read = cirrus_mmio_read, + .write = cirrus_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +/* load/save state */ + +static int cirrus_post_load(void *opaque, int version_id) +{ + CirrusVGAState *s = opaque; + + s->vga.gr[0x00] = s->cirrus_shadow_gr0 & 0x0f; + s->vga.gr[0x01] = s->cirrus_shadow_gr1 & 0x0f; + + cirrus_update_bank_ptr(s, 0); + cirrus_update_bank_ptr(s, 1); + cirrus_update_memory_access(s); + /* force refresh */ + s->vga.graphic_mode = -1; + + return 0; +} + +const VMStateDescription vmstate_cirrus_vga = { + .name = "cirrus_vga", + .version_id = 2, + .minimum_version_id = 1, + .post_load = cirrus_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(vga.latch, CirrusVGAState), + VMSTATE_UINT8(vga.sr_index, CirrusVGAState), + VMSTATE_BUFFER(vga.sr, CirrusVGAState), + VMSTATE_UINT8(vga.gr_index, CirrusVGAState), + VMSTATE_UINT8(cirrus_shadow_gr0, CirrusVGAState), + VMSTATE_UINT8(cirrus_shadow_gr1, CirrusVGAState), + VMSTATE_BUFFER_START_MIDDLE(vga.gr, CirrusVGAState, 2), + VMSTATE_UINT8(vga.ar_index, CirrusVGAState), + VMSTATE_BUFFER(vga.ar, CirrusVGAState), + VMSTATE_INT32(vga.ar_flip_flop, CirrusVGAState), + VMSTATE_UINT8(vga.cr_index, CirrusVGAState), + VMSTATE_BUFFER(vga.cr, CirrusVGAState), + VMSTATE_UINT8(vga.msr, CirrusVGAState), + VMSTATE_UINT8(vga.fcr, CirrusVGAState), + VMSTATE_UINT8(vga.st00, CirrusVGAState), + VMSTATE_UINT8(vga.st01, CirrusVGAState), + VMSTATE_UINT8(vga.dac_state, CirrusVGAState), + VMSTATE_UINT8(vga.dac_sub_index, CirrusVGAState), + VMSTATE_UINT8(vga.dac_read_index, CirrusVGAState), + VMSTATE_UINT8(vga.dac_write_index, CirrusVGAState), + VMSTATE_BUFFER(vga.dac_cache, CirrusVGAState), + VMSTATE_BUFFER(vga.palette, CirrusVGAState), + VMSTATE_INT32(vga.bank_offset, CirrusVGAState), + VMSTATE_UINT8(cirrus_hidden_dac_lockindex, CirrusVGAState), + VMSTATE_UINT8(cirrus_hidden_dac_data, CirrusVGAState), + VMSTATE_UINT32(vga.hw_cursor_x, CirrusVGAState), + VMSTATE_UINT32(vga.hw_cursor_y, CirrusVGAState), + /* XXX: we do not save the bitblt state - we assume we do not save + the state when the blitter is active */ + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pci_cirrus_vga = { + .name = "cirrus_vga", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PCICirrusVGAState), + VMSTATE_STRUCT(cirrus_vga, PCICirrusVGAState, 0, + vmstate_cirrus_vga, CirrusVGAState), + VMSTATE_END_OF_LIST() + } +}; + +/*************************************** + * + * initialize + * + ***************************************/ + +static void cirrus_reset(void *opaque) +{ + CirrusVGAState *s = opaque; + + vga_common_reset(&s->vga); + unmap_linear_vram(s); + s->vga.sr[0x06] = 0x0f; + if (s->device_id == CIRRUS_ID_CLGD5446) { + /* 4MB 64 bit memory config, always PCI */ + s->vga.sr[0x1F] = 0x2d; // MemClock + s->vga.gr[0x18] = 0x0f; // fastest memory configuration + s->vga.sr[0x0f] = 0x98; + s->vga.sr[0x17] = 0x20; + s->vga.sr[0x15] = 0x04; /* memory size, 3=2MB, 4=4MB */ + } else { + s->vga.sr[0x1F] = 0x22; // MemClock + s->vga.sr[0x0F] = CIRRUS_MEMSIZE_2M; + s->vga.sr[0x17] = s->bustype; + s->vga.sr[0x15] = 0x03; /* memory size, 3=2MB, 4=4MB */ + } + s->vga.cr[0x27] = s->device_id; + + s->cirrus_hidden_dac_lockindex = 5; + s->cirrus_hidden_dac_data = 0; +} + +static const MemoryRegionOps cirrus_linear_io_ops = { + .read = cirrus_linear_read, + .write = cirrus_linear_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static const MemoryRegionOps cirrus_vga_io_ops = { + .read = cirrus_vga_ioport_read, + .write = cirrus_vga_ioport_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +void cirrus_init_common(CirrusVGAState *s, Object *owner, + int device_id, int is_pci, + MemoryRegion *system_memory, MemoryRegion *system_io) +{ + int i; + static int inited; + + if (!inited) { + inited = 1; + for(i = 0;i < 256; i++) + rop_to_index[i] = CIRRUS_ROP_NOP_INDEX; /* nop rop */ + rop_to_index[CIRRUS_ROP_0] = 0; + rop_to_index[CIRRUS_ROP_SRC_AND_DST] = 1; + rop_to_index[CIRRUS_ROP_NOP] = 2; + rop_to_index[CIRRUS_ROP_SRC_AND_NOTDST] = 3; + rop_to_index[CIRRUS_ROP_NOTDST] = 4; + rop_to_index[CIRRUS_ROP_SRC] = 5; + rop_to_index[CIRRUS_ROP_1] = 6; + rop_to_index[CIRRUS_ROP_NOTSRC_AND_DST] = 7; + rop_to_index[CIRRUS_ROP_SRC_XOR_DST] = 8; + rop_to_index[CIRRUS_ROP_SRC_OR_DST] = 9; + rop_to_index[CIRRUS_ROP_NOTSRC_OR_NOTDST] = 10; + rop_to_index[CIRRUS_ROP_SRC_NOTXOR_DST] = 11; + rop_to_index[CIRRUS_ROP_SRC_OR_NOTDST] = 12; + rop_to_index[CIRRUS_ROP_NOTSRC] = 13; + rop_to_index[CIRRUS_ROP_NOTSRC_OR_DST] = 14; + rop_to_index[CIRRUS_ROP_NOTSRC_AND_NOTDST] = 15; + s->device_id = device_id; + if (is_pci) + s->bustype = CIRRUS_BUSTYPE_PCI; + else + s->bustype = CIRRUS_BUSTYPE_ISA; + } + + /* Register ioport 0x3b0 - 0x3df */ + memory_region_init_io(&s->cirrus_vga_io, owner, &cirrus_vga_io_ops, s, + "cirrus-io", 0x30); + memory_region_set_flush_coalesced(&s->cirrus_vga_io); + memory_region_add_subregion(system_io, 0x3b0, &s->cirrus_vga_io); + + memory_region_init(&s->low_mem_container, owner, + "cirrus-lowmem-container", + 0x20000); + + memory_region_init_io(&s->low_mem, owner, &cirrus_vga_mem_ops, s, + "cirrus-low-memory", 0x20000); + memory_region_add_subregion(&s->low_mem_container, 0, &s->low_mem); + for (i = 0; i < 2; ++i) { + static const char *names[] = { "vga.bank0", "vga.bank1" }; + MemoryRegion *bank = &s->cirrus_bank[i]; + memory_region_init_alias(bank, owner, names[i], &s->vga.vram, + 0, 0x8000); + memory_region_set_enabled(bank, false); + memory_region_add_subregion_overlap(&s->low_mem_container, i * 0x8000, + bank, 1); + } + memory_region_add_subregion_overlap(system_memory, + 0x000a0000, + &s->low_mem_container, + 1); + memory_region_set_coalescing(&s->low_mem); + + /* I/O handler for LFB */ + memory_region_init_io(&s->cirrus_linear_io, owner, &cirrus_linear_io_ops, s, + "cirrus-linear-io", s->vga.vram_size_mb * MiB); + memory_region_set_flush_coalesced(&s->cirrus_linear_io); + + /* I/O handler for LFB */ + memory_region_init_io(&s->cirrus_linear_bitblt_io, owner, + &cirrus_linear_bitblt_io_ops, + s, + "cirrus-bitblt-mmio", + 0x400000); + memory_region_set_flush_coalesced(&s->cirrus_linear_bitblt_io); + + /* I/O handler for memory-mapped I/O */ + memory_region_init_io(&s->cirrus_mmio_io, owner, &cirrus_mmio_io_ops, s, + "cirrus-mmio", CIRRUS_PNPMMIO_SIZE); + memory_region_set_flush_coalesced(&s->cirrus_mmio_io); + + s->real_vram_size = + (s->device_id == CIRRUS_ID_CLGD5446) ? 4 * MiB : 2 * MiB; + + /* XXX: s->vga.vram_size must be a power of two */ + s->cirrus_addr_mask = s->real_vram_size - 1; + s->linear_mmio_mask = s->real_vram_size - 256; + + s->vga.get_bpp = cirrus_get_bpp; + s->vga.get_offsets = cirrus_get_offsets; + s->vga.get_resolution = cirrus_get_resolution; + s->vga.cursor_invalidate = cirrus_cursor_invalidate; + s->vga.cursor_draw_line = cirrus_cursor_draw_line; + + qemu_register_reset(cirrus_reset, s); +} + +/*************************************** + * + * PCI bus support + * + ***************************************/ + +static void pci_cirrus_vga_realize(PCIDevice *dev, Error **errp) +{ + PCICirrusVGAState *d = PCI_CIRRUS_VGA(dev); + CirrusVGAState *s = &d->cirrus_vga; + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); + int16_t device_id = pc->device_id; + + /* follow real hardware, cirrus card emulated has 4 MB video memory. + Also accept 8 MB/16 MB for backward compatibility. */ + if (s->vga.vram_size_mb != 4 && s->vga.vram_size_mb != 8 && + s->vga.vram_size_mb != 16) { + error_setg(errp, "Invalid cirrus_vga ram size '%u'", + s->vga.vram_size_mb); + return; + } + /* setup VGA */ + vga_common_init(&s->vga, OBJECT(dev)); + cirrus_init_common(s, OBJECT(dev), device_id, 1, pci_address_space(dev), + pci_address_space_io(dev)); + s->vga.con = graphic_console_init(DEVICE(dev), 0, s->vga.hw_ops, &s->vga); + + /* setup PCI */ + + memory_region_init(&s->pci_bar, OBJECT(dev), "cirrus-pci-bar0", 0x2000000); + + /* XXX: add byte swapping apertures */ + memory_region_add_subregion(&s->pci_bar, 0, &s->cirrus_linear_io); + memory_region_add_subregion(&s->pci_bar, 0x1000000, + &s->cirrus_linear_bitblt_io); + + /* setup memory space */ + /* memory #0 LFB */ + /* memory #1 memory-mapped I/O */ + /* XXX: s->vga.vram_size must be a power of two */ + pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->pci_bar); + if (device_id == CIRRUS_ID_CLGD5446) { + pci_register_bar(&d->dev, 1, 0, &s->cirrus_mmio_io); + } +} + +static Property pci_vga_cirrus_properties[] = { + DEFINE_PROP_UINT32("vgamem_mb", struct PCICirrusVGAState, + cirrus_vga.vga.vram_size_mb, 4), + DEFINE_PROP_BOOL("blitter", struct PCICirrusVGAState, + cirrus_vga.enable_blitter, true), + DEFINE_PROP_BOOL("global-vmstate", struct PCICirrusVGAState, + cirrus_vga.vga.global_vmstate, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void cirrus_vga_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_cirrus_vga_realize; + k->romfile = VGABIOS_CIRRUS_FILENAME; + k->vendor_id = PCI_VENDOR_ID_CIRRUS; + k->device_id = CIRRUS_ID_CLGD5446; + k->class_id = PCI_CLASS_DISPLAY_VGA; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->desc = "Cirrus CLGD 54xx VGA"; + dc->vmsd = &vmstate_pci_cirrus_vga; + device_class_set_props(dc, pci_vga_cirrus_properties); + dc->hotpluggable = false; +} + +static const TypeInfo cirrus_vga_info = { + .name = TYPE_PCI_CIRRUS_VGA, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCICirrusVGAState), + .class_init = cirrus_vga_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void cirrus_vga_register_types(void) +{ + type_register_static(&cirrus_vga_info); +} + +type_init(cirrus_vga_register_types) diff --git a/hw/display/cirrus_vga_internal.h b/hw/display/cirrus_vga_internal.h new file mode 100644 index 000000000..a78ebbd92 --- /dev/null +++ b/hw/display/cirrus_vga_internal.h @@ -0,0 +1,103 @@ +/* + * QEMU Cirrus CLGD 54xx VGA Emulator, ISA bus support + * + * Copyright (c) 2004 Fabrice Bellard + * Copyright (c) 2004 Makoto Suzuki (suzu) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef CIRRUS_VGA_INTERNAL_H +#define CIRRUS_VGA_INTERNAL_H + +#include "vga_int.h" + +/* IDs */ +#define CIRRUS_ID_CLGD5422 (0x23 << 2) +#define CIRRUS_ID_CLGD5426 (0x24 << 2) +#define CIRRUS_ID_CLGD5424 (0x25 << 2) +#define CIRRUS_ID_CLGD5428 (0x26 << 2) +#define CIRRUS_ID_CLGD5430 (0x28 << 2) +#define CIRRUS_ID_CLGD5434 (0x2A << 2) +#define CIRRUS_ID_CLGD5436 (0x2B << 2) +#define CIRRUS_ID_CLGD5446 (0x2E << 2) + +extern const VMStateDescription vmstate_cirrus_vga; + +struct CirrusVGAState; +typedef void (*cirrus_bitblt_rop_t)(struct CirrusVGAState *s, + uint32_t dstaddr, uint32_t srcaddr, + int dstpitch, int srcpitch, + int bltwidth, int bltheight); + +typedef struct CirrusVGAState { + VGACommonState vga; + + MemoryRegion cirrus_vga_io; + MemoryRegion cirrus_linear_io; + MemoryRegion cirrus_linear_bitblt_io; + MemoryRegion cirrus_mmio_io; + MemoryRegion pci_bar; + bool linear_vram; /* vga.vram mapped over cirrus_linear_io */ + MemoryRegion low_mem_container; /* container for 0xa0000-0xc0000 */ + MemoryRegion low_mem; /* always mapped, overridden by: */ + MemoryRegion cirrus_bank[2]; /* aliases at 0xa0000-0xb0000 */ + uint32_t cirrus_addr_mask; + uint32_t linear_mmio_mask; + uint8_t cirrus_shadow_gr0; + uint8_t cirrus_shadow_gr1; + uint8_t cirrus_hidden_dac_lockindex; + uint8_t cirrus_hidden_dac_data; + uint32_t cirrus_bank_base[2]; + uint32_t cirrus_bank_limit[2]; + uint8_t cirrus_hidden_palette[48]; + bool enable_blitter; + int cirrus_blt_pixelwidth; + int cirrus_blt_width; + int cirrus_blt_height; + int cirrus_blt_dstpitch; + int cirrus_blt_srcpitch; + uint32_t cirrus_blt_fgcol; + uint32_t cirrus_blt_bgcol; + uint32_t cirrus_blt_dstaddr; + uint32_t cirrus_blt_srcaddr; + uint8_t cirrus_blt_mode; + uint8_t cirrus_blt_modeext; + cirrus_bitblt_rop_t cirrus_rop; +#define CIRRUS_BLTBUFSIZE (2048 * 4) /* one line width */ + uint8_t cirrus_bltbuf[CIRRUS_BLTBUFSIZE]; + uint8_t *cirrus_srcptr; + uint8_t *cirrus_srcptr_end; + uint32_t cirrus_srccounter; + /* hwcursor display state */ + int last_hw_cursor_size; + int last_hw_cursor_x; + int last_hw_cursor_y; + int last_hw_cursor_y_start; + int last_hw_cursor_y_end; + int real_vram_size; /* XXX: suppress that */ + int device_id; + int bustype; +} CirrusVGAState; + +void cirrus_init_common(CirrusVGAState *s, Object *owner, + int device_id, int is_pci, + MemoryRegion *system_memory, MemoryRegion *system_io); + +#endif diff --git a/hw/display/cirrus_vga_isa.c b/hw/display/cirrus_vga_isa.c new file mode 100644 index 000000000..4f6fb1af3 --- /dev/null +++ b/hw/display/cirrus_vga_isa.c @@ -0,0 +1,99 @@ +/* + * QEMU Cirrus CLGD 54xx VGA Emulator, ISA bus support + * + * Copyright (c) 2004 Fabrice Bellard + * Copyright (c) 2004 Makoto Suzuki (suzu) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "hw/isa/isa.h" +#include "cirrus_vga_internal.h" +#include "qom/object.h" + +#define TYPE_ISA_CIRRUS_VGA "isa-cirrus-vga" +OBJECT_DECLARE_SIMPLE_TYPE(ISACirrusVGAState, ISA_CIRRUS_VGA) + +struct ISACirrusVGAState { + ISADevice parent_obj; + + CirrusVGAState cirrus_vga; +}; + +static void isa_cirrus_vga_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + ISACirrusVGAState *d = ISA_CIRRUS_VGA(dev); + VGACommonState *s = &d->cirrus_vga.vga; + + /* follow real hardware, cirrus card emulated has 4 MB video memory. + Also accept 8 MB/16 MB for backward compatibility. */ + if (s->vram_size_mb != 4 && s->vram_size_mb != 8 && + s->vram_size_mb != 16) { + error_setg(errp, "Invalid cirrus_vga ram size '%u'", + s->vram_size_mb); + return; + } + s->global_vmstate = true; + vga_common_init(s, OBJECT(dev)); + cirrus_init_common(&d->cirrus_vga, OBJECT(dev), CIRRUS_ID_CLGD5430, 0, + isa_address_space(isadev), + isa_address_space_io(isadev)); + s->con = graphic_console_init(dev, 0, s->hw_ops, s); + rom_add_vga(VGABIOS_CIRRUS_FILENAME); + /* XXX ISA-LFB support */ + /* FIXME not qdev yet */ +} + +static Property isa_cirrus_vga_properties[] = { + DEFINE_PROP_UINT32("vgamem_mb", struct ISACirrusVGAState, + cirrus_vga.vga.vram_size_mb, 4), + DEFINE_PROP_BOOL("blitter", struct ISACirrusVGAState, + cirrus_vga.enable_blitter, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void isa_cirrus_vga_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_cirrus_vga; + dc->realize = isa_cirrus_vga_realizefn; + device_class_set_props(dc, isa_cirrus_vga_properties); + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); +} + +static const TypeInfo isa_cirrus_vga_info = { + .name = TYPE_ISA_CIRRUS_VGA, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISACirrusVGAState), + .class_init = isa_cirrus_vga_class_init, +}; + +static void cirrus_vga_isa_register_types(void) +{ + type_register_static(&isa_cirrus_vga_info); +} + +type_init(cirrus_vga_isa_register_types) diff --git a/hw/display/cirrus_vga_rop.h b/hw/display/cirrus_vga_rop.h new file mode 100644 index 000000000..0841b9efa --- /dev/null +++ b/hw/display/cirrus_vga_rop.h @@ -0,0 +1,246 @@ +/* + * QEMU Cirrus CLGD 54xx VGA Emulator. + * + * Copyright (c) 2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +static inline void glue(rop_8_, ROP_NAME)(CirrusVGAState *s, + uint32_t dstaddr, uint8_t src) +{ + uint8_t *dst = &s->vga.vram_ptr[dstaddr & s->cirrus_addr_mask]; + *dst = ROP_FN(*dst, src); +} + +static inline void glue(rop_tr_8_, ROP_NAME)(CirrusVGAState *s, + uint32_t dstaddr, uint8_t src, + uint8_t transp) +{ + uint8_t *dst = &s->vga.vram_ptr[dstaddr & s->cirrus_addr_mask]; + uint8_t pixel = ROP_FN(*dst, src); + if (pixel != transp) { + *dst = pixel; + } +} + +static inline void glue(rop_16_, ROP_NAME)(CirrusVGAState *s, + uint32_t dstaddr, uint16_t src) +{ + uint16_t *dst = (uint16_t *) + (&s->vga.vram_ptr[dstaddr & s->cirrus_addr_mask & ~1]); + *dst = ROP_FN(*dst, src); +} + +static inline void glue(rop_tr_16_, ROP_NAME)(CirrusVGAState *s, + uint32_t dstaddr, uint16_t src, + uint16_t transp) +{ + uint16_t *dst = (uint16_t *) + (&s->vga.vram_ptr[dstaddr & s->cirrus_addr_mask & ~1]); + uint16_t pixel = ROP_FN(*dst, src); + if (pixel != transp) { + *dst = pixel; + } +} + +static inline void glue(rop_32_, ROP_NAME)(CirrusVGAState *s, + uint32_t dstaddr, uint32_t src) +{ + uint32_t *dst = (uint32_t *) + (&s->vga.vram_ptr[dstaddr & s->cirrus_addr_mask & ~3]); + *dst = ROP_FN(*dst, src); +} + +#define ROP_OP(st, d, s) glue(rop_8_, ROP_NAME)(st, d, s) +#define ROP_OP_TR(st, d, s, t) glue(rop_tr_8_, ROP_NAME)(st, d, s, t) +#define ROP_OP_16(st, d, s) glue(rop_16_, ROP_NAME)(st, d, s) +#define ROP_OP_TR_16(st, d, s, t) glue(rop_tr_16_, ROP_NAME)(st, d, s, t) +#define ROP_OP_32(st, d, s) glue(rop_32_, ROP_NAME)(st, d, s) +#undef ROP_FN + +static void +glue(cirrus_bitblt_rop_fwd_, ROP_NAME)(CirrusVGAState *s, + uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, int srcpitch, + int bltwidth, int bltheight) +{ + int x,y; + dstpitch -= bltwidth; + srcpitch -= bltwidth; + + if (bltheight > 1 && (dstpitch < 0 || srcpitch < 0)) { + return; + } + + for (y = 0; y < bltheight; y++) { + for (x = 0; x < bltwidth; x++) { + ROP_OP(s, dstaddr, cirrus_src(s, srcaddr)); + dstaddr++; + srcaddr++; + } + dstaddr += dstpitch; + srcaddr += srcpitch; + } +} + +static void +glue(cirrus_bitblt_rop_bkwd_, ROP_NAME)(CirrusVGAState *s, + uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, int srcpitch, + int bltwidth, int bltheight) +{ + int x,y; + dstpitch += bltwidth; + srcpitch += bltwidth; + for (y = 0; y < bltheight; y++) { + for (x = 0; x < bltwidth; x++) { + ROP_OP(s, dstaddr, cirrus_src(s, srcaddr)); + dstaddr--; + srcaddr--; + } + dstaddr += dstpitch; + srcaddr += srcpitch; + } +} + +static void +glue(glue(cirrus_bitblt_rop_fwd_transp_, ROP_NAME),_8)(CirrusVGAState *s, + uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, + int srcpitch, + int bltwidth, + int bltheight) +{ + int x,y; + uint8_t transp = s->vga.gr[0x34]; + dstpitch -= bltwidth; + srcpitch -= bltwidth; + + if (bltheight > 1 && (dstpitch < 0 || srcpitch < 0)) { + return; + } + + for (y = 0; y < bltheight; y++) { + for (x = 0; x < bltwidth; x++) { + ROP_OP_TR(s, dstaddr, cirrus_src(s, srcaddr), transp); + dstaddr++; + srcaddr++; + } + dstaddr += dstpitch; + srcaddr += srcpitch; + } +} + +static void +glue(glue(cirrus_bitblt_rop_bkwd_transp_, ROP_NAME),_8)(CirrusVGAState *s, + uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, + int srcpitch, + int bltwidth, + int bltheight) +{ + int x,y; + uint8_t transp = s->vga.gr[0x34]; + dstpitch += bltwidth; + srcpitch += bltwidth; + for (y = 0; y < bltheight; y++) { + for (x = 0; x < bltwidth; x++) { + ROP_OP_TR(s, dstaddr, cirrus_src(s, srcaddr), transp); + dstaddr--; + srcaddr--; + } + dstaddr += dstpitch; + srcaddr += srcpitch; + } +} + +static void +glue(glue(cirrus_bitblt_rop_fwd_transp_, ROP_NAME),_16)(CirrusVGAState *s, + uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, + int srcpitch, + int bltwidth, + int bltheight) +{ + int x,y; + uint16_t transp = s->vga.gr[0x34] | (uint16_t)s->vga.gr[0x35] << 8; + dstpitch -= bltwidth; + srcpitch -= bltwidth; + + if (bltheight > 1 && (dstpitch < 0 || srcpitch < 0)) { + return; + } + + for (y = 0; y < bltheight; y++) { + for (x = 0; x < bltwidth; x+=2) { + ROP_OP_TR_16(s, dstaddr, cirrus_src16(s, srcaddr), transp); + dstaddr += 2; + srcaddr += 2; + } + dstaddr += dstpitch; + srcaddr += srcpitch; + } +} + +static void +glue(glue(cirrus_bitblt_rop_bkwd_transp_, ROP_NAME),_16)(CirrusVGAState *s, + uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, + int srcpitch, + int bltwidth, + int bltheight) +{ + int x,y; + uint16_t transp = s->vga.gr[0x34] | (uint16_t)s->vga.gr[0x35] << 8; + dstpitch += bltwidth; + srcpitch += bltwidth; + for (y = 0; y < bltheight; y++) { + for (x = 0; x < bltwidth; x+=2) { + ROP_OP_TR_16(s, dstaddr - 1, cirrus_src16(s, srcaddr - 1), transp); + dstaddr -= 2; + srcaddr -= 2; + } + dstaddr += dstpitch; + srcaddr += srcpitch; + } +} + +#define DEPTH 8 +#include "cirrus_vga_rop2.h" + +#define DEPTH 16 +#include "cirrus_vga_rop2.h" + +#define DEPTH 24 +#include "cirrus_vga_rop2.h" + +#define DEPTH 32 +#include "cirrus_vga_rop2.h" + +#undef ROP_NAME +#undef ROP_OP +#undef ROP_OP_16 +#undef ROP_OP_32 diff --git a/hw/display/cirrus_vga_rop2.h b/hw/display/cirrus_vga_rop2.h new file mode 100644 index 000000000..b208b7348 --- /dev/null +++ b/hw/display/cirrus_vga_rop2.h @@ -0,0 +1,284 @@ +/* + * QEMU Cirrus CLGD 54xx VGA Emulator. + * + * Copyright (c) 2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#if DEPTH == 8 +#define PUTPIXEL(s, a, c) ROP_OP(s, a, c) +#elif DEPTH == 16 +#define PUTPIXEL(s, a, c) ROP_OP_16(s, a, c) +#elif DEPTH == 24 +#define PUTPIXEL(s, a, c) do { \ + ROP_OP(s, a, c); \ + ROP_OP(s, a + 1, (c >> 8)); \ + ROP_OP(s, a + 2, (c >> 16)); \ + } while (0) +#elif DEPTH == 32 +#define PUTPIXEL(s, a, c) ROP_OP_32(s, a, c) +#else +#error unsupported DEPTH +#endif + +static void +glue(glue(glue(cirrus_patternfill_, ROP_NAME), _),DEPTH) + (CirrusVGAState *s, uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, int srcpitch, + int bltwidth, int bltheight) +{ + uint32_t addr; + int x, y, pattern_y, pattern_pitch, pattern_x; + unsigned int col; + uint32_t src1addr; +#if DEPTH == 24 + int skipleft = s->vga.gr[0x2f] & 0x1f; +#else + int skipleft = (s->vga.gr[0x2f] & 0x07) * (DEPTH / 8); +#endif + +#if DEPTH == 8 + pattern_pitch = 8; +#elif DEPTH == 16 + pattern_pitch = 16; +#else + pattern_pitch = 32; +#endif + pattern_y = s->cirrus_blt_srcaddr & 7; + for(y = 0; y < bltheight; y++) { + pattern_x = skipleft; + addr = dstaddr + skipleft; + src1addr = srcaddr + pattern_y * pattern_pitch; + for (x = skipleft; x < bltwidth; x += (DEPTH / 8)) { +#if DEPTH == 8 + col = cirrus_src(s, src1addr + pattern_x); + pattern_x = (pattern_x + 1) & 7; +#elif DEPTH == 16 + col = cirrus_src16(s, src1addr + pattern_x); + pattern_x = (pattern_x + 2) & 15; +#elif DEPTH == 24 + { + uint32_t src2addr = src1addr + pattern_x * 3; + col = cirrus_src(s, src2addr) | + (cirrus_src(s, src2addr + 1) << 8) | + (cirrus_src(s, src2addr + 2) << 16); + pattern_x = (pattern_x + 1) & 7; + } +#else + col = cirrus_src32(s, src1addr + pattern_x); + pattern_x = (pattern_x + 4) & 31; +#endif + PUTPIXEL(s, addr, col); + addr += (DEPTH / 8); + } + pattern_y = (pattern_y + 1) & 7; + dstaddr += dstpitch; + } +} + +/* NOTE: srcpitch is ignored */ +static void +glue(glue(glue(cirrus_colorexpand_transp_, ROP_NAME), _),DEPTH) + (CirrusVGAState *s, uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, int srcpitch, + int bltwidth, int bltheight) +{ + uint32_t addr; + int x, y; + unsigned bits, bits_xor; + unsigned int col; + unsigned bitmask; + unsigned index; +#if DEPTH == 24 + int dstskipleft = s->vga.gr[0x2f] & 0x1f; + int srcskipleft = dstskipleft / 3; +#else + int srcskipleft = s->vga.gr[0x2f] & 0x07; + int dstskipleft = srcskipleft * (DEPTH / 8); +#endif + + if (s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_COLOREXPINV) { + bits_xor = 0xff; + col = s->cirrus_blt_bgcol; + } else { + bits_xor = 0x00; + col = s->cirrus_blt_fgcol; + } + + for(y = 0; y < bltheight; y++) { + bitmask = 0x80 >> srcskipleft; + bits = cirrus_src(s, srcaddr++) ^ bits_xor; + addr = dstaddr + dstskipleft; + for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) { + if ((bitmask & 0xff) == 0) { + bitmask = 0x80; + bits = cirrus_src(s, srcaddr++) ^ bits_xor; + } + index = (bits & bitmask); + if (index) { + PUTPIXEL(s, addr, col); + } + addr += (DEPTH / 8); + bitmask >>= 1; + } + dstaddr += dstpitch; + } +} + +static void +glue(glue(glue(cirrus_colorexpand_, ROP_NAME), _),DEPTH) + (CirrusVGAState *s, uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, int srcpitch, + int bltwidth, int bltheight) +{ + uint32_t colors[2]; + uint32_t addr; + int x, y; + unsigned bits; + unsigned int col; + unsigned bitmask; + int srcskipleft = s->vga.gr[0x2f] & 0x07; + int dstskipleft = srcskipleft * (DEPTH / 8); + + colors[0] = s->cirrus_blt_bgcol; + colors[1] = s->cirrus_blt_fgcol; + for(y = 0; y < bltheight; y++) { + bitmask = 0x80 >> srcskipleft; + bits = cirrus_src(s, srcaddr++); + addr = dstaddr + dstskipleft; + for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) { + if ((bitmask & 0xff) == 0) { + bitmask = 0x80; + bits = cirrus_src(s, srcaddr++); + } + col = colors[!!(bits & bitmask)]; + PUTPIXEL(s, addr, col); + addr += (DEPTH / 8); + bitmask >>= 1; + } + dstaddr += dstpitch; + } +} + +static void +glue(glue(glue(cirrus_colorexpand_pattern_transp_, ROP_NAME), _),DEPTH) + (CirrusVGAState *s, uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, int srcpitch, + int bltwidth, int bltheight) +{ + uint32_t addr; + int x, y, bitpos, pattern_y; + unsigned int bits, bits_xor; + unsigned int col; +#if DEPTH == 24 + int dstskipleft = s->vga.gr[0x2f] & 0x1f; + int srcskipleft = dstskipleft / 3; +#else + int srcskipleft = s->vga.gr[0x2f] & 0x07; + int dstskipleft = srcskipleft * (DEPTH / 8); +#endif + + if (s->cirrus_blt_modeext & CIRRUS_BLTMODEEXT_COLOREXPINV) { + bits_xor = 0xff; + col = s->cirrus_blt_bgcol; + } else { + bits_xor = 0x00; + col = s->cirrus_blt_fgcol; + } + pattern_y = s->cirrus_blt_srcaddr & 7; + + for(y = 0; y < bltheight; y++) { + bits = cirrus_src(s, srcaddr + pattern_y) ^ bits_xor; + bitpos = 7 - srcskipleft; + addr = dstaddr + dstskipleft; + for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) { + if ((bits >> bitpos) & 1) { + PUTPIXEL(s, addr, col); + } + addr += (DEPTH / 8); + bitpos = (bitpos - 1) & 7; + } + pattern_y = (pattern_y + 1) & 7; + dstaddr += dstpitch; + } +} + +static void +glue(glue(glue(cirrus_colorexpand_pattern_, ROP_NAME), _),DEPTH) + (CirrusVGAState *s, uint32_t dstaddr, + uint32_t srcaddr, + int dstpitch, int srcpitch, + int bltwidth, int bltheight) +{ + uint32_t colors[2]; + uint32_t addr; + int x, y, bitpos, pattern_y; + unsigned int bits; + unsigned int col; + int srcskipleft = s->vga.gr[0x2f] & 0x07; + int dstskipleft = srcskipleft * (DEPTH / 8); + + colors[0] = s->cirrus_blt_bgcol; + colors[1] = s->cirrus_blt_fgcol; + pattern_y = s->cirrus_blt_srcaddr & 7; + + for(y = 0; y < bltheight; y++) { + bits = cirrus_src(s, srcaddr + pattern_y); + bitpos = 7 - srcskipleft; + addr = dstaddr + dstskipleft; + for (x = dstskipleft; x < bltwidth; x += (DEPTH / 8)) { + col = colors[(bits >> bitpos) & 1]; + PUTPIXEL(s, addr, col); + addr += (DEPTH / 8); + bitpos = (bitpos - 1) & 7; + } + pattern_y = (pattern_y + 1) & 7; + dstaddr += dstpitch; + } +} + +static void +glue(glue(glue(cirrus_fill_, ROP_NAME), _),DEPTH) + (CirrusVGAState *s, + uint32_t dstaddr, int dst_pitch, + int width, int height) +{ + uint32_t addr; + uint32_t col; + int x, y; + + col = s->cirrus_blt_fgcol; + + for(y = 0; y < height; y++) { + addr = dstaddr; + for(x = 0; x < width; x += (DEPTH / 8)) { + PUTPIXEL(s, addr, col); + addr += (DEPTH / 8); + } + dstaddr += dst_pitch; + } +} + +#undef DEPTH +#undef PUTPIXEL diff --git a/hw/display/dpcd.c b/hw/display/dpcd.c new file mode 100644 index 000000000..64463654a --- /dev/null +++ b/hw/display/dpcd.c @@ -0,0 +1,165 @@ +/* + * Xilinx Display Port Control Data + * + * Copyright (C) 2015 : GreenSocs Ltd + * http://www.greensocs.com/ , email: info@greensocs.com + * + * Developed by : + * Frederic Konrad + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option)any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +/* + * This is a simple AUX slave which emulates a connected screen. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/auxbus.h" +#include "migration/vmstate.h" +#include "hw/display/dpcd.h" +#include "trace.h" + +#define DPCD_READABLE_AREA 0x600 + +struct DPCDState { + /*< private >*/ + AUXSlave parent_obj; + + /*< public >*/ + /* + * The DCPD is 0x7FFFF length but read as 0 after offset 0x5FF. + */ + uint8_t dpcd_info[DPCD_READABLE_AREA]; + + MemoryRegion iomem; +}; + +static uint64_t dpcd_read(void *opaque, hwaddr offset, unsigned size) +{ + uint8_t ret; + DPCDState *e = DPCD(opaque); + + if (offset < DPCD_READABLE_AREA) { + ret = e->dpcd_info[offset]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "dpcd: Bad offset 0x%" HWADDR_PRIX "\n", + offset); + ret = 0; + } + trace_dpcd_read(offset, ret); + + return ret; +} + +static void dpcd_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + DPCDState *e = DPCD(opaque); + + trace_dpcd_write(offset, value); + if (offset < DPCD_READABLE_AREA) { + e->dpcd_info[offset] = value; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "dpcd: Bad offset 0x%" HWADDR_PRIX "\n", + offset); + } +} + +static const MemoryRegionOps aux_ops = { + .read = dpcd_read, + .write = dpcd_write, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void dpcd_reset(DeviceState *dev) +{ + DPCDState *s = DPCD(dev); + + memset(&(s->dpcd_info), 0, sizeof(s->dpcd_info)); + + s->dpcd_info[DPCD_REVISION] = DPCD_REV_1_0; + s->dpcd_info[DPCD_MAX_LINK_RATE] = DPCD_5_4GBPS; + s->dpcd_info[DPCD_MAX_LANE_COUNT] = DPCD_FOUR_LANES; + s->dpcd_info[DPCD_RECEIVE_PORT0_CAP_0] = DPCD_EDID_PRESENT; + /* buffer size */ + s->dpcd_info[DPCD_RECEIVE_PORT0_CAP_1] = 0xFF; + + s->dpcd_info[DPCD_LANE0_1_STATUS] = DPCD_LANE0_CR_DONE + | DPCD_LANE0_CHANNEL_EQ_DONE + | DPCD_LANE0_SYMBOL_LOCKED + | DPCD_LANE1_CR_DONE + | DPCD_LANE1_CHANNEL_EQ_DONE + | DPCD_LANE1_SYMBOL_LOCKED; + s->dpcd_info[DPCD_LANE2_3_STATUS] = DPCD_LANE2_CR_DONE + | DPCD_LANE2_CHANNEL_EQ_DONE + | DPCD_LANE2_SYMBOL_LOCKED + | DPCD_LANE3_CR_DONE + | DPCD_LANE3_CHANNEL_EQ_DONE + | DPCD_LANE3_SYMBOL_LOCKED; + + s->dpcd_info[DPCD_LANE_ALIGN_STATUS_UPDATED] = DPCD_INTERLANE_ALIGN_DONE; + s->dpcd_info[DPCD_SINK_STATUS] = DPCD_RECEIVE_PORT_0_STATUS; +} + +static void dpcd_init(Object *obj) +{ + DPCDState *s = DPCD(obj); + + memory_region_init_io(&s->iomem, obj, &aux_ops, s, TYPE_DPCD, 0x80000); + aux_init_mmio(AUX_SLAVE(obj), &s->iomem); +} + +static const VMStateDescription vmstate_dpcd = { + .name = TYPE_DPCD, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY_V(dpcd_info, DPCDState, DPCD_READABLE_AREA, 0), + VMSTATE_END_OF_LIST() + } +}; + +static void dpcd_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->reset = dpcd_reset; + dc->vmsd = &vmstate_dpcd; +} + +static const TypeInfo dpcd_info = { + .name = TYPE_DPCD, + .parent = TYPE_AUX_SLAVE, + .instance_size = sizeof(DPCDState), + .class_init = dpcd_class_init, + .instance_init = dpcd_init, +}; + +static void dpcd_register_types(void) +{ + type_register_static(&dpcd_info); +} + +type_init(dpcd_register_types) diff --git a/hw/display/edid-generate.c b/hw/display/edid-generate.c new file mode 100644 index 000000000..f2b874d5e --- /dev/null +++ b/hw/display/edid-generate.c @@ -0,0 +1,563 @@ +/* + * QEMU EDID generator. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu/bswap.h" +#include "hw/display/edid.h" + +static const struct edid_mode { + uint32_t xres; + uint32_t yres; + uint32_t byte; + uint32_t xtra3; + uint32_t bit; + uint32_t dta; +} modes[] = { + /* dea/dta extension timings (all @ 50 Hz) */ + { .xres = 5120, .yres = 2160, .dta = 125 }, + { .xres = 4096, .yres = 2160, .dta = 101 }, + { .xres = 3840, .yres = 2160, .dta = 96 }, + { .xres = 2560, .yres = 1080, .dta = 89 }, + { .xres = 2048, .yres = 1152 }, + { .xres = 1920, .yres = 1080, .dta = 31 }, + + /* additional standard timings 3 (all @ 60Hz) */ + { .xres = 1920, .yres = 1200, .xtra3 = 10, .bit = 0 }, + { .xres = 1600, .yres = 1200, .xtra3 = 9, .bit = 2 }, + { .xres = 1680, .yres = 1050, .xtra3 = 9, .bit = 5 }, + { .xres = 1440, .yres = 900, .xtra3 = 8, .bit = 5 }, + { .xres = 1280, .yres = 1024, .xtra3 = 7, .bit = 1 }, + { .xres = 1280, .yres = 960, .xtra3 = 7, .bit = 3 }, + { .xres = 1280, .yres = 768, .xtra3 = 7, .bit = 6 }, + + { .xres = 1920, .yres = 1440, .xtra3 = 11, .bit = 5 }, + { .xres = 1856, .yres = 1392, .xtra3 = 10, .bit = 3 }, + { .xres = 1792, .yres = 1344, .xtra3 = 10, .bit = 5 }, + { .xres = 1440, .yres = 1050, .xtra3 = 8, .bit = 1 }, + { .xres = 1360, .yres = 768, .xtra3 = 8, .bit = 7 }, + + /* established timings (all @ 60Hz) */ + { .xres = 1024, .yres = 768, .byte = 36, .bit = 3 }, + { .xres = 800, .yres = 600, .byte = 35, .bit = 0 }, + { .xres = 640, .yres = 480, .byte = 35, .bit = 5 }, +}; + +typedef struct Timings { + uint32_t xfront; + uint32_t xsync; + uint32_t xblank; + + uint32_t yfront; + uint32_t ysync; + uint32_t yblank; + + uint64_t clock; +} Timings; + +static void generate_timings(Timings *timings, uint32_t refresh_rate, + uint32_t xres, uint32_t yres) +{ + /* pull some realistic looking timings out of thin air */ + timings->xfront = xres * 25 / 100; + timings->xsync = xres * 3 / 100; + timings->xblank = xres * 35 / 100; + + timings->yfront = yres * 5 / 1000; + timings->ysync = yres * 5 / 1000; + timings->yblank = yres * 35 / 1000; + + timings->clock = ((uint64_t)refresh_rate * + (xres + timings->xblank) * + (yres + timings->yblank)) / 10000000; +} + +static void edid_ext_dta(uint8_t *dta) +{ + dta[0] = 0x02; + dta[1] = 0x03; + dta[2] = 0x05; + dta[3] = 0x00; + + /* video data block */ + dta[4] = 0x40; +} + +static void edid_ext_dta_mode(uint8_t *dta, uint8_t nr) +{ + dta[dta[2]] = nr; + dta[2]++; + dta[4]++; +} + +static int edid_std_mode(uint8_t *mode, uint32_t xres, uint32_t yres) +{ + uint32_t aspect; + + if (xres == 0 || yres == 0) { + mode[0] = 0x01; + mode[1] = 0x01; + return 0; + + } else if (xres * 10 == yres * 16) { + aspect = 0; + } else if (xres * 3 == yres * 4) { + aspect = 1; + } else if (xres * 4 == yres * 5) { + aspect = 2; + } else if (xres * 9 == yres * 16) { + aspect = 3; + } else { + return -1; + } + + if ((xres / 8) - 31 > 255) { + return -1; + } + + mode[0] = (xres / 8) - 31; + mode[1] = ((aspect << 6) | (60 - 60)); + return 0; +} + +static void edid_fill_modes(uint8_t *edid, uint8_t *xtra3, uint8_t *dta, + uint32_t maxx, uint32_t maxy) +{ + const struct edid_mode *mode; + int std = 38; + int rc, i; + + for (i = 0; i < ARRAY_SIZE(modes); i++) { + mode = modes + i; + + if ((maxx && mode->xres > maxx) || + (maxy && mode->yres > maxy)) { + continue; + } + + if (mode->byte) { + edid[mode->byte] |= (1 << mode->bit); + } else if (std < 54) { + rc = edid_std_mode(edid + std, mode->xres, mode->yres); + if (rc == 0) { + std += 2; + } + } else if (mode->xtra3 && xtra3) { + xtra3[mode->xtra3] |= (1 << mode->bit); + } + + if (dta && mode->dta) { + edid_ext_dta_mode(dta, mode->dta); + } + } + + while (std < 54) { + edid_std_mode(edid + std, 0, 0); + std += 2; + } +} + +static void edid_checksum(uint8_t *edid, size_t len) +{ + uint32_t sum = 0; + int i; + + for (i = 0; i < len; i++) { + sum += edid[i]; + } + sum &= 0xff; + if (sum) { + edid[len] = 0x100 - sum; + } +} + +static uint8_t *edid_desc_next(uint8_t *edid, uint8_t *dta, uint8_t *desc) +{ + if (desc == NULL) { + return NULL; + } + if (desc + 18 + 18 < edid + 127) { + return desc + 18; + } + if (dta) { + if (desc < edid + 127) { + return dta + dta[2]; + } + if (desc + 18 + 18 < dta + 127) { + return desc + 18; + } + } + return NULL; +} + +static void edid_desc_type(uint8_t *desc, uint8_t type) +{ + desc[0] = 0; + desc[1] = 0; + desc[2] = 0; + desc[3] = type; + desc[4] = 0; +} + +static void edid_desc_text(uint8_t *desc, uint8_t type, + const char *text) +{ + size_t len; + + edid_desc_type(desc, type); + memset(desc + 5, ' ', 13); + + len = strlen(text); + if (len > 12) { + len = 12; + } + memcpy(desc + 5, text, len); + desc[5 + len] = '\n'; +} + +static void edid_desc_ranges(uint8_t *desc) +{ + edid_desc_type(desc, 0xfd); + + /* vertical (50 -> 125 Hz) */ + desc[5] = 50; + desc[6] = 125; + + /* horizontal (30 -> 160 kHz) */ + desc[7] = 30; + desc[8] = 160; + + /* max dot clock (2550 MHz) */ + desc[9] = 2550 / 10; + + /* no extended timing information */ + desc[10] = 0x01; + + /* padding */ + desc[11] = '\n'; + memset(desc + 12, ' ', 6); +} + +/* additional standard timings 3 */ +static void edid_desc_xtra3_std(uint8_t *desc) +{ + edid_desc_type(desc, 0xf7); + desc[5] = 10; +} + +static void edid_desc_dummy(uint8_t *desc) +{ + edid_desc_type(desc, 0x10); +} + +static void edid_desc_timing(uint8_t *desc, uint32_t refresh_rate, + uint32_t xres, uint32_t yres, + uint32_t xmm, uint32_t ymm) +{ + Timings timings; + generate_timings(&timings, refresh_rate, xres, yres); + stl_le_p(desc, timings.clock); + + desc[2] = xres & 0xff; + desc[3] = timings.xblank & 0xff; + desc[4] = (((xres & 0xf00) >> 4) | + ((timings.xblank & 0xf00) >> 8)); + + desc[5] = yres & 0xff; + desc[6] = timings.yblank & 0xff; + desc[7] = (((yres & 0xf00) >> 4) | + ((timings.yblank & 0xf00) >> 8)); + + desc[8] = timings.xfront & 0xff; + desc[9] = timings.xsync & 0xff; + + desc[10] = (((timings.yfront & 0x00f) << 4) | + ((timings.ysync & 0x00f) << 0)); + desc[11] = (((timings.xfront & 0x300) >> 2) | + ((timings.xsync & 0x300) >> 4) | + ((timings.yfront & 0x030) >> 2) | + ((timings.ysync & 0x030) >> 4)); + + desc[12] = xmm & 0xff; + desc[13] = ymm & 0xff; + desc[14] = (((xmm & 0xf00) >> 4) | + ((ymm & 0xf00) >> 8)); + + desc[17] = 0x18; +} + +static uint32_t edid_to_10bit(float value) +{ + return (uint32_t)(value * 1024 + 0.5); +} + +static void edid_colorspace(uint8_t *edid, + float rx, float ry, + float gx, float gy, + float bx, float by, + float wx, float wy) +{ + uint32_t red_x = edid_to_10bit(rx); + uint32_t red_y = edid_to_10bit(ry); + uint32_t green_x = edid_to_10bit(gx); + uint32_t green_y = edid_to_10bit(gy); + uint32_t blue_x = edid_to_10bit(bx); + uint32_t blue_y = edid_to_10bit(by); + uint32_t white_x = edid_to_10bit(wx); + uint32_t white_y = edid_to_10bit(wy); + + edid[25] = (((red_x & 0x03) << 6) | + ((red_y & 0x03) << 4) | + ((green_x & 0x03) << 2) | + ((green_y & 0x03) << 0)); + edid[26] = (((blue_x & 0x03) << 6) | + ((blue_y & 0x03) << 4) | + ((white_x & 0x03) << 2) | + ((white_y & 0x03) << 0)); + edid[27] = red_x >> 2; + edid[28] = red_y >> 2; + edid[29] = green_x >> 2; + edid[30] = green_y >> 2; + edid[31] = blue_x >> 2; + edid[32] = blue_y >> 2; + edid[33] = white_x >> 2; + edid[34] = white_y >> 2; +} + +static uint32_t qemu_edid_dpi_from_mm(uint32_t mm, uint32_t res) +{ + return res * 254 / 10 / mm; +} + +uint32_t qemu_edid_dpi_to_mm(uint32_t dpi, uint32_t res) +{ + return res * 254 / 10 / dpi; +} + +static void init_displayid(uint8_t *did) +{ + did[0] = 0x70; /* display id extension */ + did[1] = 0x13; /* version 1.3 */ + did[2] = 4; /* length */ + did[3] = 0x03; /* product type (0x03 == standalone display device) */ + edid_checksum(did + 1, did[2] + 4); +} + +static void qemu_displayid_generate(uint8_t *did, uint32_t refresh_rate, + uint32_t xres, uint32_t yres, + uint32_t xmm, uint32_t ymm) +{ + Timings timings; + generate_timings(&timings, refresh_rate, xres, yres); + + did[0] = 0x70; /* display id extension */ + did[1] = 0x13; /* version 1.3 */ + did[2] = 23; /* length */ + did[3] = 0x03; /* product type (0x03 == standalone display device) */ + + did[5] = 0x03; /* Detailed Timings Data Block */ + did[6] = 0x00; /* revision */ + did[7] = 0x14; /* block length */ + + did[8] = timings.clock & 0xff; + did[9] = (timings.clock & 0xff00) >> 8; + did[10] = (timings.clock & 0xff0000) >> 16; + + did[11] = 0x88; /* leave aspect ratio undefined */ + + stw_le_p(did + 12, 0xffff & (xres - 1)); + stw_le_p(did + 14, 0xffff & (timings.xblank - 1)); + stw_le_p(did + 16, 0xffff & (timings.xfront - 1)); + stw_le_p(did + 18, 0xffff & (timings.xsync - 1)); + + stw_le_p(did + 20, 0xffff & (yres - 1)); + stw_le_p(did + 22, 0xffff & (timings.yblank - 1)); + stw_le_p(did + 24, 0xffff & (timings.yfront - 1)); + stw_le_p(did + 26, 0xffff & (timings.ysync - 1)); + + edid_checksum(did + 1, did[2] + 4); +} + +void qemu_edid_generate(uint8_t *edid, size_t size, + qemu_edid_info *info) +{ + uint8_t *desc = edid + 54; + uint8_t *xtra3 = NULL; + uint8_t *dta = NULL; + uint8_t *did = NULL; + uint32_t width_mm, height_mm; + uint32_t refresh_rate = info->refresh_rate ? info->refresh_rate : 75000; + uint32_t dpi = 100; /* if no width_mm/height_mm */ + uint32_t large_screen = 0; + + /* =============== set defaults =============== */ + + if (!info->vendor || strlen(info->vendor) != 3) { + info->vendor = "RHT"; + } + if (!info->name) { + info->name = "QEMU Monitor"; + } + if (!info->prefx) { + info->prefx = 1024; + } + if (!info->prefy) { + info->prefy = 768; + } + if (info->prefx >= 4096 || info->prefy >= 4096) { + large_screen = 1; + } + if (info->width_mm && info->height_mm) { + width_mm = info->width_mm; + height_mm = info->height_mm; + dpi = qemu_edid_dpi_from_mm(width_mm, info->prefx); + } else { + width_mm = qemu_edid_dpi_to_mm(dpi, info->prefx); + height_mm = qemu_edid_dpi_to_mm(dpi, info->prefy); + } + + /* =============== extensions =============== */ + + if (size >= 256) { + dta = edid + 128; + edid[126]++; + edid_ext_dta(dta); + } + + if (size >= 384 && large_screen) { + did = edid + 256; + edid[126]++; + init_displayid(did); + } + + /* =============== header information =============== */ + + /* fixed */ + edid[0] = 0x00; + edid[1] = 0xff; + edid[2] = 0xff; + edid[3] = 0xff; + edid[4] = 0xff; + edid[5] = 0xff; + edid[6] = 0xff; + edid[7] = 0x00; + + /* manufacturer id, product code, serial number */ + uint16_t vendor_id = ((((info->vendor[0] - '@') & 0x1f) << 10) | + (((info->vendor[1] - '@') & 0x1f) << 5) | + (((info->vendor[2] - '@') & 0x1f) << 0)); + uint16_t model_nr = 0x1234; + uint32_t serial_nr = info->serial ? atoi(info->serial) : 0; + stw_be_p(edid + 8, vendor_id); + stw_le_p(edid + 10, model_nr); + stl_le_p(edid + 12, serial_nr); + + /* manufacture week and year */ + edid[16] = 42; + edid[17] = 2014 - 1990; + + /* edid version */ + edid[18] = 1; + edid[19] = 4; + + + /* =============== basic display parameters =============== */ + + /* video input: digital, 8bpc, displayport */ + edid[20] = 0xa5; + + /* screen size: undefined */ + edid[21] = width_mm / 10; + edid[22] = height_mm / 10; + + /* display gamma: 2.2 */ + edid[23] = 220 - 100; + + /* supported features bitmap: std sRGB, preferred timing */ + edid[24] = 0x06; + + + /* =============== chromaticity coordinates =============== */ + + /* standard sRGB colorspace */ + edid_colorspace(edid, + 0.6400, 0.3300, /* red */ + 0.3000, 0.6000, /* green */ + 0.1500, 0.0600, /* blue */ + 0.3127, 0.3290); /* white point */ + + /* =============== established timing bitmap =============== */ + /* =============== standard timing information =============== */ + + /* both filled by edid_fill_modes() */ + + + /* =============== descriptor blocks =============== */ + + if (!large_screen) { + /* The DTD section has only 12 bits to store the resolution */ + edid_desc_timing(desc, refresh_rate, info->prefx, info->prefy, + width_mm, height_mm); + desc = edid_desc_next(edid, dta, desc); + } + + xtra3 = desc; + edid_desc_xtra3_std(xtra3); + desc = edid_desc_next(edid, dta, desc); + edid_fill_modes(edid, xtra3, dta, info->maxx, info->maxy); + /* + * dta video data block is finished at thus point, + * so dta descriptor offsets don't move any more. + */ + + edid_desc_ranges(desc); + desc = edid_desc_next(edid, dta, desc); + + if (desc && info->name) { + edid_desc_text(desc, 0xfc, info->name); + desc = edid_desc_next(edid, dta, desc); + } + + if (desc && info->serial) { + edid_desc_text(desc, 0xff, info->serial); + desc = edid_desc_next(edid, dta, desc); + } + + while (desc) { + edid_desc_dummy(desc); + desc = edid_desc_next(edid, dta, desc); + } + + /* =============== display id extensions =============== */ + + if (did && large_screen) { + qemu_displayid_generate(did, refresh_rate, info->prefx, info->prefy, + width_mm, height_mm); + } + + /* =============== finish up =============== */ + + edid_checksum(edid, 127); + if (dta) { + edid_checksum(dta, 127); + } + if (did) { + edid_checksum(did, 127); + } +} + +size_t qemu_edid_size(uint8_t *edid) +{ + uint32_t exts; + + if (edid[0] != 0x00 || + edid[1] != 0xff) { + /* doesn't look like a valid edid block */ + return 0; + } + + exts = edid[126]; + return 128 * (exts + 1); +} diff --git a/hw/display/edid-region.c b/hw/display/edid-region.c new file mode 100644 index 000000000..675429dc1 --- /dev/null +++ b/hw/display/edid-region.c @@ -0,0 +1,33 @@ +#include "qemu/osdep.h" +#include "exec/memory.h" +#include "hw/display/edid.h" + +static uint64_t edid_region_read(void *ptr, hwaddr addr, unsigned size) +{ + uint8_t *edid = ptr; + + return edid[addr]; +} + +static void edid_region_write(void *ptr, hwaddr addr, + uint64_t val, unsigned size) +{ + /* read only */ +} + +static const MemoryRegionOps edid_region_ops = { + .read = edid_region_read, + .write = edid_region_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +void qemu_edid_region_io(MemoryRegion *region, Object *owner, + uint8_t *edid, size_t size) +{ + memory_region_init_io(region, owner, &edid_region_ops, + edid, "edid", size); +} diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c new file mode 100644 index 000000000..34a960a97 --- /dev/null +++ b/hw/display/exynos4210_fimd.c @@ -0,0 +1,1972 @@ +/* + * Samsung exynos4210 Display Controller (FIMD) + * + * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. + * All rights reserved. + * Based on LCD controller for Samsung S5PC1xx-based board emulation + * by Kirill Batuzov + * + * Contributed by Mitsyanko Igor + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "ui/console.h" +#include "ui/pixel_ops.h" +#include "qemu/bswap.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "qom/object.h" + +/* Debug messages configuration */ +#define EXYNOS4210_FIMD_DEBUG 0 +#define EXYNOS4210_FIMD_MODE_TRACE 0 + +#if EXYNOS4210_FIMD_DEBUG == 0 + #define DPRINT_L1(fmt, args...) do { } while (0) + #define DPRINT_L2(fmt, args...) do { } while (0) +#elif EXYNOS4210_FIMD_DEBUG == 1 + #define DPRINT_L1(fmt, args...) \ + do {fprintf(stderr, "QEMU FIMD: "fmt, ## args); } while (0) + #define DPRINT_L2(fmt, args...) do { } while (0) +#else + #define DPRINT_L1(fmt, args...) \ + do {fprintf(stderr, "QEMU FIMD: "fmt, ## args); } while (0) + #define DPRINT_L2(fmt, args...) \ + do {fprintf(stderr, "QEMU FIMD: "fmt, ## args); } while (0) +#endif + +#if EXYNOS4210_FIMD_MODE_TRACE == 0 + #define DPRINT_TRACE(fmt, args...) do { } while (0) +#else + #define DPRINT_TRACE(fmt, args...) \ + do {fprintf(stderr, "QEMU FIMD: "fmt, ## args); } while (0) +#endif + +#define NUM_OF_WINDOWS 5 +#define FIMD_REGS_SIZE 0x4114 + +/* Video main control registers */ +#define FIMD_VIDCON0 0x0000 +#define FIMD_VIDCON1 0x0004 +#define FIMD_VIDCON2 0x0008 +#define FIMD_VIDCON3 0x000C +#define FIMD_VIDCON0_ENVID_F (1 << 0) +#define FIMD_VIDCON0_ENVID (1 << 1) +#define FIMD_VIDCON0_ENVID_MASK ((1 << 0) | (1 << 1)) +#define FIMD_VIDCON1_ROMASK 0x07FFE000 + +/* Video time control registers */ +#define FIMD_VIDTCON_START 0x10 +#define FIMD_VIDTCON_END 0x1C +#define FIMD_VIDTCON2_SIZE_MASK 0x07FF +#define FIMD_VIDTCON2_HOR_SHIFT 0 +#define FIMD_VIDTCON2_VER_SHIFT 11 + +/* Window control registers */ +#define FIMD_WINCON_START 0x0020 +#define FIMD_WINCON_END 0x0030 +#define FIMD_WINCON_ROMASK 0x82200000 +#define FIMD_WINCON_ENWIN (1 << 0) +#define FIMD_WINCON_BLD_PIX (1 << 6) +#define FIMD_WINCON_ALPHA_MUL (1 << 7) +#define FIMD_WINCON_ALPHA_SEL (1 << 1) +#define FIMD_WINCON_SWAP 0x078000 +#define FIMD_WINCON_SWAP_SHIFT 15 +#define FIMD_WINCON_SWAP_WORD 0x1 +#define FIMD_WINCON_SWAP_HWORD 0x2 +#define FIMD_WINCON_SWAP_BYTE 0x4 +#define FIMD_WINCON_SWAP_BITS 0x8 +#define FIMD_WINCON_BUFSTAT_L (1 << 21) +#define FIMD_WINCON_BUFSTAT_H (1 << 31) +#define FIMD_WINCON_BUFSTATUS ((1 << 21) | (1 << 31)) +#define FIMD_WINCON_BUF0_STAT ((0 << 21) | (0 << 31)) +#define FIMD_WINCON_BUF1_STAT ((1 << 21) | (0 << 31)) +#define FIMD_WINCON_BUF2_STAT ((0 << 21) | (1U << 31)) +#define FIMD_WINCON_BUFSELECT ((1 << 20) | (1 << 30)) +#define FIMD_WINCON_BUF0_SEL ((0 << 20) | (0 << 30)) +#define FIMD_WINCON_BUF1_SEL ((1 << 20) | (0 << 30)) +#define FIMD_WINCON_BUF2_SEL ((0 << 20) | (1 << 30)) +#define FIMD_WINCON_BUFMODE (1 << 14) +#define IS_PALETTIZED_MODE(w) (w->wincon & 0xC) +#define PAL_MODE_WITH_ALPHA(x) ((x) == 7) +#define WIN_BPP_MODE(w) ((w->wincon >> 2) & 0xF) +#define WIN_BPP_MODE_WITH_ALPHA(w) \ + (WIN_BPP_MODE(w) == 0xD || WIN_BPP_MODE(w) == 0xE) + +/* Shadow control register */ +#define FIMD_SHADOWCON 0x0034 +#define FIMD_WINDOW_PROTECTED(s, w) ((s) & (1 << (10 + (w)))) +/* Channel mapping control register */ +#define FIMD_WINCHMAP 0x003C + +/* Window position control registers */ +#define FIMD_VIDOSD_START 0x0040 +#define FIMD_VIDOSD_END 0x0088 +#define FIMD_VIDOSD_COORD_MASK 0x07FF +#define FIMD_VIDOSD_HOR_SHIFT 11 +#define FIMD_VIDOSD_VER_SHIFT 0 +#define FIMD_VIDOSD_ALPHA_AEN0 0xFFF000 +#define FIMD_VIDOSD_AEN0_SHIFT 12 +#define FIMD_VIDOSD_ALPHA_AEN1 0x000FFF + +/* Frame buffer address registers */ +#define FIMD_VIDWADD0_START 0x00A0 +#define FIMD_VIDWADD0_END 0x00C4 +#define FIMD_VIDWADD0_END 0x00C4 +#define FIMD_VIDWADD1_START 0x00D0 +#define FIMD_VIDWADD1_END 0x00F4 +#define FIMD_VIDWADD2_START 0x0100 +#define FIMD_VIDWADD2_END 0x0110 +#define FIMD_VIDWADD2_PAGEWIDTH 0x1FFF +#define FIMD_VIDWADD2_OFFSIZE 0x1FFF +#define FIMD_VIDWADD2_OFFSIZE_SHIFT 13 +#define FIMD_VIDW0ADD0_B2 0x20A0 +#define FIMD_VIDW4ADD0_B2 0x20C0 + +/* Video interrupt control registers */ +#define FIMD_VIDINTCON0 0x130 +#define FIMD_VIDINTCON1 0x134 + +/* Window color key registers */ +#define FIMD_WKEYCON_START 0x140 +#define FIMD_WKEYCON_END 0x15C +#define FIMD_WKEYCON0_COMPKEY 0x00FFFFFF +#define FIMD_WKEYCON0_CTL_SHIFT 24 +#define FIMD_WKEYCON0_DIRCON (1 << 24) +#define FIMD_WKEYCON0_KEYEN (1 << 25) +#define FIMD_WKEYCON0_KEYBLEN (1 << 26) +/* Window color key alpha control register */ +#define FIMD_WKEYALPHA_START 0x160 +#define FIMD_WKEYALPHA_END 0x16C + +/* Dithering control register */ +#define FIMD_DITHMODE 0x170 + +/* Window alpha control registers */ +#define FIMD_VIDALPHA_ALPHA_LOWER 0x000F0F0F +#define FIMD_VIDALPHA_ALPHA_UPPER 0x00F0F0F0 +#define FIMD_VIDWALPHA_START 0x21C +#define FIMD_VIDWALPHA_END 0x240 + +/* Window color map registers */ +#define FIMD_WINMAP_START 0x180 +#define FIMD_WINMAP_END 0x190 +#define FIMD_WINMAP_EN (1 << 24) +#define FIMD_WINMAP_COLOR_MASK 0x00FFFFFF + +/* Window palette control registers */ +#define FIMD_WPALCON_HIGH 0x019C +#define FIMD_WPALCON_LOW 0x01A0 +#define FIMD_WPALCON_UPDATEEN (1 << 9) +#define FIMD_WPAL_W0PAL_L 0x07 +#define FIMD_WPAL_W0PAL_L_SHT 0 +#define FIMD_WPAL_W1PAL_L 0x07 +#define FIMD_WPAL_W1PAL_L_SHT 3 +#define FIMD_WPAL_W2PAL_L 0x01 +#define FIMD_WPAL_W2PAL_L_SHT 6 +#define FIMD_WPAL_W2PAL_H 0x06 +#define FIMD_WPAL_W2PAL_H_SHT 8 +#define FIMD_WPAL_W3PAL_L 0x01 +#define FIMD_WPAL_W3PAL_L_SHT 7 +#define FIMD_WPAL_W3PAL_H 0x06 +#define FIMD_WPAL_W3PAL_H_SHT 12 +#define FIMD_WPAL_W4PAL_L 0x01 +#define FIMD_WPAL_W4PAL_L_SHT 8 +#define FIMD_WPAL_W4PAL_H 0x06 +#define FIMD_WPAL_W4PAL_H_SHT 16 + +/* Trigger control registers */ +#define FIMD_TRIGCON 0x01A4 +#define FIMD_TRIGCON_ROMASK 0x00000004 + +/* LCD I80 Interface Control */ +#define FIMD_I80IFCON_START 0x01B0 +#define FIMD_I80IFCON_END 0x01BC +/* Color gain control register */ +#define FIMD_COLORGAINCON 0x01C0 +/* LCD i80 Interface Command Control */ +#define FIMD_LDI_CMDCON0 0x01D0 +#define FIMD_LDI_CMDCON1 0x01D4 +/* I80 System Interface Manual Command Control */ +#define FIMD_SIFCCON0 0x01E0 +#define FIMD_SIFCCON2 0x01E8 + +/* Hue Control Registers */ +#define FIMD_HUECOEFCR_START 0x01EC +#define FIMD_HUECOEFCR_END 0x01F4 +#define FIMD_HUECOEFCB_START 0x01FC +#define FIMD_HUECOEFCB_END 0x0208 +#define FIMD_HUEOFFSET 0x020C + +/* Video interrupt control registers */ +#define FIMD_VIDINT_INTFIFOPEND (1 << 0) +#define FIMD_VIDINT_INTFRMPEND (1 << 1) +#define FIMD_VIDINT_INTI80PEND (1 << 2) +#define FIMD_VIDINT_INTEN (1 << 0) +#define FIMD_VIDINT_INTFIFOEN (1 << 1) +#define FIMD_VIDINT_INTFRMEN (1 << 12) +#define FIMD_VIDINT_I80IFDONE (1 << 17) + +/* Window blend equation control registers */ +#define FIMD_BLENDEQ_START 0x0244 +#define FIMD_BLENDEQ_END 0x0250 +#define FIMD_BLENDCON 0x0260 +#define FIMD_ALPHA_8BIT (1 << 0) +#define FIMD_BLENDEQ_COEF_MASK 0xF + +/* Window RTQOS Control Registers */ +#define FIMD_WRTQOSCON_START 0x0264 +#define FIMD_WRTQOSCON_END 0x0274 + +/* LCD I80 Interface Command */ +#define FIMD_I80IFCMD_START 0x0280 +#define FIMD_I80IFCMD_END 0x02AC + +/* Shadow windows control registers */ +#define FIMD_SHD_ADD0_START 0x40A0 +#define FIMD_SHD_ADD0_END 0x40C0 +#define FIMD_SHD_ADD1_START 0x40D0 +#define FIMD_SHD_ADD1_END 0x40F0 +#define FIMD_SHD_ADD2_START 0x4100 +#define FIMD_SHD_ADD2_END 0x4110 + +/* Palette memory */ +#define FIMD_PAL_MEM_START 0x2400 +#define FIMD_PAL_MEM_END 0x37FC +/* Palette memory aliases for windows 0 and 1 */ +#define FIMD_PALMEM_AL_START 0x0400 +#define FIMD_PALMEM_AL_END 0x0BFC + +typedef struct { + uint8_t r, g, b; + /* D[31..24]dummy, D[23..16]rAlpha, D[15..8]gAlpha, D[7..0]bAlpha */ + uint32_t a; +} rgba; +#define RGBA_SIZE 7 + +typedef void pixel_to_rgb_func(uint32_t pixel, rgba *p); +typedef struct Exynos4210fimdWindow Exynos4210fimdWindow; + +struct Exynos4210fimdWindow { + uint32_t wincon; /* Window control register */ + uint32_t buf_start[3]; /* Start address for video frame buffer */ + uint32_t buf_end[3]; /* End address for video frame buffer */ + uint32_t keycon[2]; /* Window color key registers */ + uint32_t keyalpha; /* Color key alpha control register */ + uint32_t winmap; /* Window color map register */ + uint32_t blendeq; /* Window blending equation control register */ + uint32_t rtqoscon; /* Window RTQOS Control Registers */ + uint32_t palette[256]; /* Palette RAM */ + uint32_t shadow_buf_start; /* Start address of shadow frame buffer */ + uint32_t shadow_buf_end; /* End address of shadow frame buffer */ + uint32_t shadow_buf_size; /* Virtual shadow screen width */ + + pixel_to_rgb_func *pixel_to_rgb; + void (*draw_line)(Exynos4210fimdWindow *w, uint8_t *src, uint8_t *dst, + bool blend); + uint32_t (*get_alpha)(Exynos4210fimdWindow *w, uint32_t pix_a); + uint16_t lefttop_x, lefttop_y; /* VIDOSD0 register */ + uint16_t rightbot_x, rightbot_y; /* VIDOSD1 register */ + uint32_t osdsize; /* VIDOSD2&3 register */ + uint32_t alpha_val[2]; /* VIDOSD2&3, VIDWALPHA registers */ + uint16_t virtpage_width; /* VIDWADD2 register */ + uint16_t virtpage_offsize; /* VIDWADD2 register */ + MemoryRegionSection mem_section; /* RAM fragment containing framebuffer */ + uint8_t *host_fb_addr; /* Host pointer to window's framebuffer */ + hwaddr fb_len; /* Framebuffer length */ +}; + +#define TYPE_EXYNOS4210_FIMD "exynos4210.fimd" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210fimdState, EXYNOS4210_FIMD) + +struct Exynos4210fimdState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + QemuConsole *console; + qemu_irq irq[3]; + + uint32_t vidcon[4]; /* Video main control registers 0-3 */ + uint32_t vidtcon[4]; /* Video time control registers 0-3 */ + uint32_t shadowcon; /* Window shadow control register */ + uint32_t winchmap; /* Channel mapping control register */ + uint32_t vidintcon[2]; /* Video interrupt control registers */ + uint32_t dithmode; /* Dithering control register */ + uint32_t wpalcon[2]; /* Window palette control registers */ + uint32_t trigcon; /* Trigger control register */ + uint32_t i80ifcon[4]; /* I80 interface control registers */ + uint32_t colorgaincon; /* Color gain control register */ + uint32_t ldi_cmdcon[2]; /* LCD I80 interface command control */ + uint32_t sifccon[3]; /* I80 System Interface Manual Command Control */ + uint32_t huecoef_cr[4]; /* Hue control registers */ + uint32_t huecoef_cb[4]; /* Hue control registers */ + uint32_t hueoffset; /* Hue offset control register */ + uint32_t blendcon; /* Blending control register */ + uint32_t i80ifcmd[12]; /* LCD I80 Interface Command */ + + Exynos4210fimdWindow window[5]; /* Window-specific registers */ + uint8_t *ifb; /* Internal frame buffer */ + bool invalidate; /* Image needs to be redrawn */ + bool enabled; /* Display controller is enabled */ +}; + +/* Perform byte/halfword/word swap of data according to WINCON */ +static inline void fimd_swap_data(unsigned int swap_ctl, uint64_t *data) +{ + int i; + uint64_t res; + uint64_t x = *data; + + if (swap_ctl & FIMD_WINCON_SWAP_BITS) { + res = 0; + for (i = 0; i < 64; i++) { + if (x & (1ULL << (63 - i))) { + res |= (1ULL << i); + } + } + x = res; + } + + if (swap_ctl & FIMD_WINCON_SWAP_BYTE) { + x = bswap64(x); + } + + if (swap_ctl & FIMD_WINCON_SWAP_HWORD) { + x = ((x & 0x000000000000FFFFULL) << 48) | + ((x & 0x00000000FFFF0000ULL) << 16) | + ((x & 0x0000FFFF00000000ULL) >> 16) | + ((x & 0xFFFF000000000000ULL) >> 48); + } + + if (swap_ctl & FIMD_WINCON_SWAP_WORD) { + x = ((x & 0x00000000FFFFFFFFULL) << 32) | + ((x & 0xFFFFFFFF00000000ULL) >> 32); + } + + *data = x; +} + +/* Conversion routines of Pixel data from frame buffer area to internal RGBA + * pixel representation. + * Every color component internally represented as 8-bit value. If original + * data has less than 8 bit for component, data is extended to 8 bit. For + * example, if blue component has only two possible values 0 and 1 it will be + * extended to 0 and 0xFF */ + +/* One bit for alpha representation */ +#define DEF_PIXEL_TO_RGB_A1(N, R, G, B) \ +static void N(uint32_t pixel, rgba *p) \ +{ \ + p->b = ((pixel & ((1 << (B)) - 1)) << (8 - (B))) | \ + ((pixel >> (2 * (B) - 8)) & ((1 << (8 - (B))) - 1)); \ + pixel >>= (B); \ + p->g = (pixel & ((1 << (G)) - 1)) << (8 - (G)) | \ + ((pixel >> (2 * (G) - 8)) & ((1 << (8 - (G))) - 1)); \ + pixel >>= (G); \ + p->r = (pixel & ((1 << (R)) - 1)) << (8 - (R)) | \ + ((pixel >> (2 * (R) - 8)) & ((1 << (8 - (R))) - 1)); \ + pixel >>= (R); \ + p->a = (pixel & 0x1); \ +} + +DEF_PIXEL_TO_RGB_A1(pixel_a444_to_rgb, 4, 4, 4) +DEF_PIXEL_TO_RGB_A1(pixel_a555_to_rgb, 5, 5, 5) +DEF_PIXEL_TO_RGB_A1(pixel_a666_to_rgb, 6, 6, 6) +DEF_PIXEL_TO_RGB_A1(pixel_a665_to_rgb, 6, 6, 5) +DEF_PIXEL_TO_RGB_A1(pixel_a888_to_rgb, 8, 8, 8) +DEF_PIXEL_TO_RGB_A1(pixel_a887_to_rgb, 8, 8, 7) + +/* Alpha component is always zero */ +#define DEF_PIXEL_TO_RGB_A0(N, R, G, B) \ +static void N(uint32_t pixel, rgba *p) \ +{ \ + p->b = ((pixel & ((1 << (B)) - 1)) << (8 - (B))) | \ + ((pixel >> (2 * (B) - 8)) & ((1 << (8 - (B))) - 1)); \ + pixel >>= (B); \ + p->g = (pixel & ((1 << (G)) - 1)) << (8 - (G)) | \ + ((pixel >> (2 * (G) - 8)) & ((1 << (8 - (G))) - 1)); \ + pixel >>= (G); \ + p->r = (pixel & ((1 << (R)) - 1)) << (8 - (R)) | \ + ((pixel >> (2 * (R) - 8)) & ((1 << (8 - (R))) - 1)); \ + p->a = 0x0; \ +} + +DEF_PIXEL_TO_RGB_A0(pixel_565_to_rgb, 5, 6, 5) +DEF_PIXEL_TO_RGB_A0(pixel_555_to_rgb, 5, 5, 5) +DEF_PIXEL_TO_RGB_A0(pixel_666_to_rgb, 6, 6, 6) +DEF_PIXEL_TO_RGB_A0(pixel_888_to_rgb, 8, 8, 8) + +/* Alpha component has some meaningful value */ +#define DEF_PIXEL_TO_RGB_A(N, R, G, B, A) \ +static void N(uint32_t pixel, rgba *p) \ +{ \ + p->b = ((pixel & ((1 << (B)) - 1)) << (8 - (B))) | \ + ((pixel >> (2 * (B) - 8)) & ((1 << (8 - (B))) - 1)); \ + pixel >>= (B); \ + p->g = (pixel & ((1 << (G)) - 1)) << (8 - (G)) | \ + ((pixel >> (2 * (G) - 8)) & ((1 << (8 - (G))) - 1)); \ + pixel >>= (G); \ + p->r = (pixel & ((1 << (R)) - 1)) << (8 - (R)) | \ + ((pixel >> (2 * (R) - 8)) & ((1 << (8 - (R))) - 1)); \ + pixel >>= (R); \ + p->a = (pixel & ((1 << (A)) - 1)) << (8 - (A)) | \ + ((pixel >> (2 * (A) - 8)) & ((1 << (8 - (A))) - 1)); \ + p->a = p->a | (p->a << 8) | (p->a << 16); \ +} + +DEF_PIXEL_TO_RGB_A(pixel_4444_to_rgb, 4, 4, 4, 4) +DEF_PIXEL_TO_RGB_A(pixel_8888_to_rgb, 8, 8, 8, 8) + +/* Lookup table to extent 2-bit color component to 8 bit */ +static const uint8_t pixel_lutable_2b[4] = { + 0x0, 0x55, 0xAA, 0xFF +}; +/* Lookup table to extent 3-bit color component to 8 bit */ +static const uint8_t pixel_lutable_3b[8] = { + 0x0, 0x24, 0x49, 0x6D, 0x92, 0xB6, 0xDB, 0xFF +}; +/* Special case for a232 bpp mode */ +static void pixel_a232_to_rgb(uint32_t pixel, rgba *p) +{ + p->b = pixel_lutable_2b[(pixel & 0x3)]; + pixel >>= 2; + p->g = pixel_lutable_3b[(pixel & 0x7)]; + pixel >>= 3; + p->r = pixel_lutable_2b[(pixel & 0x3)]; + pixel >>= 2; + p->a = (pixel & 0x1); +} + +/* Special case for (5+1, 5+1, 5+1) mode. Data bit 15 is common LSB + * for all three color components */ +static void pixel_1555_to_rgb(uint32_t pixel, rgba *p) +{ + uint8_t comm = (pixel >> 15) & 1; + p->b = ((((pixel & 0x1F) << 1) | comm) << 2) | ((pixel >> 3) & 0x3); + pixel >>= 5; + p->g = ((((pixel & 0x1F) << 1) | comm) << 2) | ((pixel >> 3) & 0x3); + pixel >>= 5; + p->r = ((((pixel & 0x1F) << 1) | comm) << 2) | ((pixel >> 3) & 0x3); + p->a = 0x0; +} + +/* Put/get pixel to/from internal LCD Controller framebuffer */ + +static int put_pixel_ifb(const rgba p, uint8_t *d) +{ + *(uint8_t *)d++ = p.r; + *(uint8_t *)d++ = p.g; + *(uint8_t *)d++ = p.b; + *(uint32_t *)d = p.a; + return RGBA_SIZE; +} + +static int get_pixel_ifb(const uint8_t *s, rgba *p) +{ + p->r = *(uint8_t *)s++; + p->g = *(uint8_t *)s++; + p->b = *(uint8_t *)s++; + p->a = (*(uint32_t *)s) & 0x00FFFFFF; + return RGBA_SIZE; +} + +static pixel_to_rgb_func *palette_data_format[8] = { + [0] = pixel_565_to_rgb, + [1] = pixel_a555_to_rgb, + [2] = pixel_666_to_rgb, + [3] = pixel_a665_to_rgb, + [4] = pixel_a666_to_rgb, + [5] = pixel_888_to_rgb, + [6] = pixel_a888_to_rgb, + [7] = pixel_8888_to_rgb +}; + +/* Returns Index in palette data formats table for given window number WINDOW */ +static uint32_t +exynos4210_fimd_palette_format(Exynos4210fimdState *s, int window) +{ + uint32_t ret; + + switch (window) { + case 0: + ret = (s->wpalcon[1] >> FIMD_WPAL_W0PAL_L_SHT) & FIMD_WPAL_W0PAL_L; + if (ret != 7) { + ret = 6 - ret; + } + break; + case 1: + ret = (s->wpalcon[1] >> FIMD_WPAL_W1PAL_L_SHT) & FIMD_WPAL_W1PAL_L; + if (ret != 7) { + ret = 6 - ret; + } + break; + case 2: + ret = ((s->wpalcon[0] >> FIMD_WPAL_W2PAL_H_SHT) & FIMD_WPAL_W2PAL_H) | + ((s->wpalcon[1] >> FIMD_WPAL_W2PAL_L_SHT) & FIMD_WPAL_W2PAL_L); + break; + case 3: + ret = ((s->wpalcon[0] >> FIMD_WPAL_W3PAL_H_SHT) & FIMD_WPAL_W3PAL_H) | + ((s->wpalcon[1] >> FIMD_WPAL_W3PAL_L_SHT) & FIMD_WPAL_W3PAL_L); + break; + case 4: + ret = ((s->wpalcon[0] >> FIMD_WPAL_W4PAL_H_SHT) & FIMD_WPAL_W4PAL_H) | + ((s->wpalcon[1] >> FIMD_WPAL_W4PAL_L_SHT) & FIMD_WPAL_W4PAL_L); + break; + default: + hw_error("exynos4210.fimd: incorrect window number %d\n", window); + ret = 0; + break; + } + return ret; +} + +#define FIMD_1_MINUS_COLOR(x) \ + ((0xFF - ((x) & 0xFF)) | (0xFF00 - ((x) & 0xFF00)) | \ + (0xFF0000 - ((x) & 0xFF0000))) +#define EXTEND_LOWER_HALFBYTE(x) (((x) & 0xF0F0F) | (((x) << 4) & 0xF0F0F0)) +#define EXTEND_UPPER_HALFBYTE(x) (((x) & 0xF0F0F0) | (((x) >> 4) & 0xF0F0F)) + +/* Multiply three lower bytes of two 32-bit words with each other. + * Each byte with values 0-255 is considered as a number with possible values + * in a range [0 - 1] */ +static inline uint32_t fimd_mult_each_byte(uint32_t a, uint32_t b) +{ + uint32_t tmp; + uint32_t ret; + + ret = ((tmp = (((a & 0xFF) * (b & 0xFF)) / 0xFF)) > 0xFF) ? 0xFF : tmp; + ret |= ((tmp = ((((a >> 8) & 0xFF) * ((b >> 8) & 0xFF)) / 0xFF)) > 0xFF) ? + 0xFF00 : tmp << 8; + ret |= ((tmp = ((((a >> 16) & 0xFF) * ((b >> 16) & 0xFF)) / 0xFF)) > 0xFF) ? + 0xFF0000 : tmp << 16; + return ret; +} + +/* For each corresponding bytes of two 32-bit words: (a*b + c*d) + * Byte values 0-255 are mapped to a range [0 .. 1] */ +static inline uint32_t +fimd_mult_and_sum_each_byte(uint32_t a, uint32_t b, uint32_t c, uint32_t d) +{ + uint32_t tmp; + uint32_t ret; + + ret = ((tmp = (((a & 0xFF) * (b & 0xFF) + (c & 0xFF) * (d & 0xFF)) / 0xFF)) + > 0xFF) ? 0xFF : tmp; + ret |= ((tmp = ((((a >> 8) & 0xFF) * ((b >> 8) & 0xFF) + ((c >> 8) & 0xFF) * + ((d >> 8) & 0xFF)) / 0xFF)) > 0xFF) ? 0xFF00 : tmp << 8; + ret |= ((tmp = ((((a >> 16) & 0xFF) * ((b >> 16) & 0xFF) + + ((c >> 16) & 0xFF) * ((d >> 16) & 0xFF)) / 0xFF)) > 0xFF) ? + 0xFF0000 : tmp << 16; + return ret; +} + +/* These routines cover all possible sources of window's transparent factor + * used in blending equation. Choice of routine is affected by WPALCON + * registers, BLENDCON register and window's WINCON register */ + +static uint32_t fimd_get_alpha_pix(Exynos4210fimdWindow *w, uint32_t pix_a) +{ + return pix_a; +} + +static uint32_t +fimd_get_alpha_pix_extlow(Exynos4210fimdWindow *w, uint32_t pix_a) +{ + return EXTEND_LOWER_HALFBYTE(pix_a); +} + +static uint32_t +fimd_get_alpha_pix_exthigh(Exynos4210fimdWindow *w, uint32_t pix_a) +{ + return EXTEND_UPPER_HALFBYTE(pix_a); +} + +static uint32_t fimd_get_alpha_mult(Exynos4210fimdWindow *w, uint32_t pix_a) +{ + return fimd_mult_each_byte(pix_a, w->alpha_val[0]); +} + +static uint32_t fimd_get_alpha_mult_ext(Exynos4210fimdWindow *w, uint32_t pix_a) +{ + return fimd_mult_each_byte(EXTEND_LOWER_HALFBYTE(pix_a), + EXTEND_UPPER_HALFBYTE(w->alpha_val[0])); +} + +static uint32_t fimd_get_alpha_aen(Exynos4210fimdWindow *w, uint32_t pix_a) +{ + return w->alpha_val[pix_a]; +} + +static uint32_t fimd_get_alpha_aen_ext(Exynos4210fimdWindow *w, uint32_t pix_a) +{ + return EXTEND_UPPER_HALFBYTE(w->alpha_val[pix_a]); +} + +static uint32_t fimd_get_alpha_sel(Exynos4210fimdWindow *w, uint32_t pix_a) +{ + return w->alpha_val[(w->wincon & FIMD_WINCON_ALPHA_SEL) ? 1 : 0]; +} + +static uint32_t fimd_get_alpha_sel_ext(Exynos4210fimdWindow *w, uint32_t pix_a) +{ + return EXTEND_UPPER_HALFBYTE(w->alpha_val[(w->wincon & + FIMD_WINCON_ALPHA_SEL) ? 1 : 0]); +} + +/* Updates currently active alpha value get function for specified window */ +static void fimd_update_get_alpha(Exynos4210fimdState *s, int win) +{ + Exynos4210fimdWindow *w = &s->window[win]; + const bool alpha_is_8bit = s->blendcon & FIMD_ALPHA_8BIT; + + if (w->wincon & FIMD_WINCON_BLD_PIX) { + if ((w->wincon & FIMD_WINCON_ALPHA_SEL) && WIN_BPP_MODE_WITH_ALPHA(w)) { + /* In this case, alpha component contains meaningful value */ + if (w->wincon & FIMD_WINCON_ALPHA_MUL) { + w->get_alpha = alpha_is_8bit ? + fimd_get_alpha_mult : fimd_get_alpha_mult_ext; + } else { + w->get_alpha = alpha_is_8bit ? + fimd_get_alpha_pix : fimd_get_alpha_pix_extlow; + } + } else { + if (IS_PALETTIZED_MODE(w) && + PAL_MODE_WITH_ALPHA(exynos4210_fimd_palette_format(s, win))) { + /* Alpha component has 8-bit numeric value */ + w->get_alpha = alpha_is_8bit ? + fimd_get_alpha_pix : fimd_get_alpha_pix_exthigh; + } else { + /* Alpha has only two possible values (AEN) */ + w->get_alpha = alpha_is_8bit ? + fimd_get_alpha_aen : fimd_get_alpha_aen_ext; + } + } + } else { + w->get_alpha = alpha_is_8bit ? fimd_get_alpha_sel : + fimd_get_alpha_sel_ext; + } +} + +/* Blends current window's (w) pixel (foreground pixel *ret) with background + * window (w_blend) pixel p_bg according to formula: + * NEW_COLOR = a_coef x FG_PIXEL_COLOR + b_coef x BG_PIXEL_COLOR + * NEW_ALPHA = p_coef x FG_ALPHA + q_coef x BG_ALPHA + */ +static void +exynos4210_fimd_blend_pixel(Exynos4210fimdWindow *w, rgba p_bg, rgba *ret) +{ + rgba p_fg = *ret; + uint32_t bg_color = ((p_bg.r & 0xFF) << 16) | ((p_bg.g & 0xFF) << 8) | + (p_bg.b & 0xFF); + uint32_t fg_color = ((p_fg.r & 0xFF) << 16) | ((p_fg.g & 0xFF) << 8) | + (p_fg.b & 0xFF); + uint32_t alpha_fg = p_fg.a; + int i; + /* It is possible that blending equation parameters a and b do not + * depend on window BLENEQ register. Account for this with first_coef */ + enum { A_COEF = 0, B_COEF = 1, P_COEF = 2, Q_COEF = 3, COEF_NUM = 4}; + uint32_t first_coef = A_COEF; + uint32_t blend_param[COEF_NUM]; + + if (w->keycon[0] & FIMD_WKEYCON0_KEYEN) { + uint32_t colorkey = (w->keycon[1] & + ~(w->keycon[0] & FIMD_WKEYCON0_COMPKEY)) & FIMD_WKEYCON0_COMPKEY; + + if ((w->keycon[0] & FIMD_WKEYCON0_DIRCON) && + (bg_color & ~(w->keycon[0] & FIMD_WKEYCON0_COMPKEY)) == colorkey) { + /* Foreground pixel is displayed */ + if (w->keycon[0] & FIMD_WKEYCON0_KEYBLEN) { + alpha_fg = w->keyalpha; + blend_param[A_COEF] = alpha_fg; + blend_param[B_COEF] = FIMD_1_MINUS_COLOR(alpha_fg); + } else { + alpha_fg = 0; + blend_param[A_COEF] = 0xFFFFFF; + blend_param[B_COEF] = 0x0; + } + first_coef = P_COEF; + } else if ((w->keycon[0] & FIMD_WKEYCON0_DIRCON) == 0 && + (fg_color & ~(w->keycon[0] & FIMD_WKEYCON0_COMPKEY)) == colorkey) { + /* Background pixel is displayed */ + if (w->keycon[0] & FIMD_WKEYCON0_KEYBLEN) { + alpha_fg = w->keyalpha; + blend_param[A_COEF] = alpha_fg; + blend_param[B_COEF] = FIMD_1_MINUS_COLOR(alpha_fg); + } else { + alpha_fg = 0; + blend_param[A_COEF] = 0x0; + blend_param[B_COEF] = 0xFFFFFF; + } + first_coef = P_COEF; + } + } + + for (i = first_coef; i < COEF_NUM; i++) { + switch ((w->blendeq >> i * 6) & FIMD_BLENDEQ_COEF_MASK) { + case 0: + blend_param[i] = 0; + break; + case 1: + blend_param[i] = 0xFFFFFF; + break; + case 2: + blend_param[i] = alpha_fg; + break; + case 3: + blend_param[i] = FIMD_1_MINUS_COLOR(alpha_fg); + break; + case 4: + blend_param[i] = p_bg.a; + break; + case 5: + blend_param[i] = FIMD_1_MINUS_COLOR(p_bg.a); + break; + case 6: + blend_param[i] = w->alpha_val[0]; + break; + case 10: + blend_param[i] = fg_color; + break; + case 11: + blend_param[i] = FIMD_1_MINUS_COLOR(fg_color); + break; + case 12: + blend_param[i] = bg_color; + break; + case 13: + blend_param[i] = FIMD_1_MINUS_COLOR(bg_color); + break; + default: + hw_error("exynos4210.fimd: blend equation coef illegal value\n"); + break; + } + } + + fg_color = fimd_mult_and_sum_each_byte(bg_color, blend_param[B_COEF], + fg_color, blend_param[A_COEF]); + ret->b = fg_color & 0xFF; + fg_color >>= 8; + ret->g = fg_color & 0xFF; + fg_color >>= 8; + ret->r = fg_color & 0xFF; + ret->a = fimd_mult_and_sum_each_byte(alpha_fg, blend_param[P_COEF], + p_bg.a, blend_param[Q_COEF]); +} + +/* These routines read data from video frame buffer in system RAM, convert + * this data to display controller internal representation, if necessary, + * perform pixel blending with data, currently presented in internal buffer. + * Result is stored in display controller internal frame buffer. */ + +/* Draw line with index in palette table in RAM frame buffer data */ +#define DEF_DRAW_LINE_PALETTE(N) \ +static void glue(draw_line_palette_, N)(Exynos4210fimdWindow *w, uint8_t *src, \ + uint8_t *dst, bool blend) \ +{ \ + int width = w->rightbot_x - w->lefttop_x + 1; \ + uint8_t *ifb = dst; \ + uint8_t swap = (w->wincon & FIMD_WINCON_SWAP) >> FIMD_WINCON_SWAP_SHIFT; \ + uint64_t data; \ + rgba p, p_old; \ + int i; \ + do { \ + memcpy(&data, src, sizeof(data)); \ + src += 8; \ + fimd_swap_data(swap, &data); \ + for (i = (64 / (N) - 1); i >= 0; i--) { \ + w->pixel_to_rgb(w->palette[(data >> ((N) * i)) & \ + ((1ULL << (N)) - 1)], &p); \ + p.a = w->get_alpha(w, p.a); \ + if (blend) { \ + ifb += get_pixel_ifb(ifb, &p_old); \ + exynos4210_fimd_blend_pixel(w, p_old, &p); \ + } \ + dst += put_pixel_ifb(p, dst); \ + } \ + width -= (64 / (N)); \ + } while (width > 0); \ +} + +/* Draw line with direct color value in RAM frame buffer data */ +#define DEF_DRAW_LINE_NOPALETTE(N) \ +static void glue(draw_line_, N)(Exynos4210fimdWindow *w, uint8_t *src, \ + uint8_t *dst, bool blend) \ +{ \ + int width = w->rightbot_x - w->lefttop_x + 1; \ + uint8_t *ifb = dst; \ + uint8_t swap = (w->wincon & FIMD_WINCON_SWAP) >> FIMD_WINCON_SWAP_SHIFT; \ + uint64_t data; \ + rgba p, p_old; \ + int i; \ + do { \ + memcpy(&data, src, sizeof(data)); \ + src += 8; \ + fimd_swap_data(swap, &data); \ + for (i = (64 / (N) - 1); i >= 0; i--) { \ + w->pixel_to_rgb((data >> ((N) * i)) & ((1ULL << (N)) - 1), &p); \ + p.a = w->get_alpha(w, p.a); \ + if (blend) { \ + ifb += get_pixel_ifb(ifb, &p_old); \ + exynos4210_fimd_blend_pixel(w, p_old, &p); \ + } \ + dst += put_pixel_ifb(p, dst); \ + } \ + width -= (64 / (N)); \ + } while (width > 0); \ +} + +DEF_DRAW_LINE_PALETTE(1) +DEF_DRAW_LINE_PALETTE(2) +DEF_DRAW_LINE_PALETTE(4) +DEF_DRAW_LINE_PALETTE(8) +DEF_DRAW_LINE_NOPALETTE(8) /* 8bpp mode has palette and non-palette versions */ +DEF_DRAW_LINE_NOPALETTE(16) +DEF_DRAW_LINE_NOPALETTE(32) + +/* Special draw line routine for window color map case */ +static void draw_line_mapcolor(Exynos4210fimdWindow *w, uint8_t *src, + uint8_t *dst, bool blend) +{ + rgba p, p_old; + uint8_t *ifb = dst; + int width = w->rightbot_x - w->lefttop_x + 1; + uint32_t map_color = w->winmap & FIMD_WINMAP_COLOR_MASK; + + do { + pixel_888_to_rgb(map_color, &p); + p.a = w->get_alpha(w, p.a); + if (blend) { + ifb += get_pixel_ifb(ifb, &p_old); + exynos4210_fimd_blend_pixel(w, p_old, &p); + } + dst += put_pixel_ifb(p, dst); + } while (--width); +} + +/* Write RGB to QEMU's GraphicConsole framebuffer */ + +static int put_to_qemufb_pixel8(const rgba p, uint8_t *d) +{ + uint32_t pixel = rgb_to_pixel8(p.r, p.g, p.b); + *(uint8_t *)d = pixel; + return 1; +} + +static int put_to_qemufb_pixel15(const rgba p, uint8_t *d) +{ + uint32_t pixel = rgb_to_pixel15(p.r, p.g, p.b); + *(uint16_t *)d = pixel; + return 2; +} + +static int put_to_qemufb_pixel16(const rgba p, uint8_t *d) +{ + uint32_t pixel = rgb_to_pixel16(p.r, p.g, p.b); + *(uint16_t *)d = pixel; + return 2; +} + +static int put_to_qemufb_pixel24(const rgba p, uint8_t *d) +{ + uint32_t pixel = rgb_to_pixel24(p.r, p.g, p.b); + *(uint8_t *)d++ = (pixel >> 0) & 0xFF; + *(uint8_t *)d++ = (pixel >> 8) & 0xFF; + *(uint8_t *)d++ = (pixel >> 16) & 0xFF; + return 3; +} + +static int put_to_qemufb_pixel32(const rgba p, uint8_t *d) +{ + uint32_t pixel = rgb_to_pixel24(p.r, p.g, p.b); + *(uint32_t *)d = pixel; + return 4; +} + +/* Routine to copy pixel from internal buffer to QEMU buffer */ +static int (*put_pixel_toqemu)(const rgba p, uint8_t *pixel); +static inline void fimd_update_putpix_qemu(int bpp) +{ + switch (bpp) { + case 8: + put_pixel_toqemu = put_to_qemufb_pixel8; + break; + case 15: + put_pixel_toqemu = put_to_qemufb_pixel15; + break; + case 16: + put_pixel_toqemu = put_to_qemufb_pixel16; + break; + case 24: + put_pixel_toqemu = put_to_qemufb_pixel24; + break; + case 32: + put_pixel_toqemu = put_to_qemufb_pixel32; + break; + default: + hw_error("exynos4210.fimd: unsupported BPP (%d)", bpp); + break; + } +} + +/* Routine to copy a line from internal frame buffer to QEMU display */ +static void fimd_copy_line_toqemu(int width, uint8_t *src, uint8_t *dst) +{ + rgba p; + + do { + src += get_pixel_ifb(src, &p); + dst += put_pixel_toqemu(p, dst); + } while (--width); +} + +/* Parse BPPMODE_F = WINCON1[5:2] bits */ +static void exynos4210_fimd_update_win_bppmode(Exynos4210fimdState *s, int win) +{ + Exynos4210fimdWindow *w = &s->window[win]; + + if (w->winmap & FIMD_WINMAP_EN) { + w->draw_line = draw_line_mapcolor; + return; + } + + switch (WIN_BPP_MODE(w)) { + case 0: + w->draw_line = draw_line_palette_1; + w->pixel_to_rgb = + palette_data_format[exynos4210_fimd_palette_format(s, win)]; + break; + case 1: + w->draw_line = draw_line_palette_2; + w->pixel_to_rgb = + palette_data_format[exynos4210_fimd_palette_format(s, win)]; + break; + case 2: + w->draw_line = draw_line_palette_4; + w->pixel_to_rgb = + palette_data_format[exynos4210_fimd_palette_format(s, win)]; + break; + case 3: + w->draw_line = draw_line_palette_8; + w->pixel_to_rgb = + palette_data_format[exynos4210_fimd_palette_format(s, win)]; + break; + case 4: + w->draw_line = draw_line_8; + w->pixel_to_rgb = pixel_a232_to_rgb; + break; + case 5: + w->draw_line = draw_line_16; + w->pixel_to_rgb = pixel_565_to_rgb; + break; + case 6: + w->draw_line = draw_line_16; + w->pixel_to_rgb = pixel_a555_to_rgb; + break; + case 7: + w->draw_line = draw_line_16; + w->pixel_to_rgb = pixel_1555_to_rgb; + break; + case 8: + w->draw_line = draw_line_32; + w->pixel_to_rgb = pixel_666_to_rgb; + break; + case 9: + w->draw_line = draw_line_32; + w->pixel_to_rgb = pixel_a665_to_rgb; + break; + case 10: + w->draw_line = draw_line_32; + w->pixel_to_rgb = pixel_a666_to_rgb; + break; + case 11: + w->draw_line = draw_line_32; + w->pixel_to_rgb = pixel_888_to_rgb; + break; + case 12: + w->draw_line = draw_line_32; + w->pixel_to_rgb = pixel_a887_to_rgb; + break; + case 13: + w->draw_line = draw_line_32; + if ((w->wincon & FIMD_WINCON_BLD_PIX) && (w->wincon & + FIMD_WINCON_ALPHA_SEL)) { + w->pixel_to_rgb = pixel_8888_to_rgb; + } else { + w->pixel_to_rgb = pixel_a888_to_rgb; + } + break; + case 14: + w->draw_line = draw_line_16; + if ((w->wincon & FIMD_WINCON_BLD_PIX) && (w->wincon & + FIMD_WINCON_ALPHA_SEL)) { + w->pixel_to_rgb = pixel_4444_to_rgb; + } else { + w->pixel_to_rgb = pixel_a444_to_rgb; + } + break; + case 15: + w->draw_line = draw_line_16; + w->pixel_to_rgb = pixel_555_to_rgb; + break; + } +} + +#if EXYNOS4210_FIMD_MODE_TRACE > 0 +static const char *exynos4210_fimd_get_bppmode(int mode_code) +{ + switch (mode_code) { + case 0: + return "1 bpp"; + case 1: + return "2 bpp"; + case 2: + return "4 bpp"; + case 3: + return "8 bpp (palettized)"; + case 4: + return "8 bpp (non-palettized, A: 1-R:2-G:3-B:2)"; + case 5: + return "16 bpp (non-palettized, R:5-G:6-B:5)"; + case 6: + return "16 bpp (non-palettized, A:1-R:5-G:5-B:5)"; + case 7: + return "16 bpp (non-palettized, I :1-R:5-G:5-B:5)"; + case 8: + return "Unpacked 18 bpp (non-palettized, R:6-G:6-B:6)"; + case 9: + return "Unpacked 18bpp (non-palettized,A:1-R:6-G:6-B:5)"; + case 10: + return "Unpacked 19bpp (non-palettized,A:1-R:6-G:6-B:6)"; + case 11: + return "Unpacked 24 bpp (non-palettized R:8-G:8-B:8)"; + case 12: + return "Unpacked 24 bpp (non-palettized A:1-R:8-G:8-B:7)"; + case 13: + return "Unpacked 25 bpp (non-palettized A:1-R:8-G:8-B:8)"; + case 14: + return "Unpacked 13 bpp (non-palettized A:1-R:4-G:4-B:4)"; + case 15: + return "Unpacked 15 bpp (non-palettized R:5-G:5-B:5)"; + default: + return "Non-existing bpp mode"; + } +} + +static inline void exynos4210_fimd_trace_bppmode(Exynos4210fimdState *s, + int win_num, uint32_t val) +{ + Exynos4210fimdWindow *w = &s->window[win_num]; + + if (w->winmap & FIMD_WINMAP_EN) { + printf("QEMU FIMD: Window %d is mapped with MAPCOLOR=0x%x\n", + win_num, w->winmap & 0xFFFFFF); + return; + } + + if ((val != 0xFFFFFFFF) && ((w->wincon >> 2) & 0xF) == ((val >> 2) & 0xF)) { + return; + } + printf("QEMU FIMD: Window %d BPP mode set to %s\n", win_num, + exynos4210_fimd_get_bppmode((val >> 2) & 0xF)); +} +#else +static inline void exynos4210_fimd_trace_bppmode(Exynos4210fimdState *s, + int win_num, uint32_t val) +{ + +} +#endif + +static inline int fimd_get_buffer_id(Exynos4210fimdWindow *w) +{ + switch (w->wincon & FIMD_WINCON_BUFSTATUS) { + case FIMD_WINCON_BUF0_STAT: + return 0; + case FIMD_WINCON_BUF1_STAT: + return 1; + case FIMD_WINCON_BUF2_STAT: + return 2; + default: + qemu_log_mask(LOG_GUEST_ERROR, "FIMD: Non-existent buffer index\n"); + return 0; + } +} + +static void exynos4210_fimd_invalidate(void *opaque) +{ + Exynos4210fimdState *s = (Exynos4210fimdState *)opaque; + s->invalidate = true; +} + +/* Updates specified window's MemorySection based on values of WINCON, + * VIDOSDA, VIDOSDB, VIDWADDx and SHADOWCON registers */ +static void fimd_update_memory_section(Exynos4210fimdState *s, unsigned win) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(s); + Exynos4210fimdWindow *w = &s->window[win]; + hwaddr fb_start_addr, fb_mapped_len; + + if (!s->enabled || !(w->wincon & FIMD_WINCON_ENWIN) || + FIMD_WINDOW_PROTECTED(s->shadowcon, win)) { + return; + } + + if (w->host_fb_addr) { + cpu_physical_memory_unmap(w->host_fb_addr, w->fb_len, 0, 0); + w->host_fb_addr = NULL; + w->fb_len = 0; + } + + fb_start_addr = w->buf_start[fimd_get_buffer_id(w)]; + /* Total number of bytes of virtual screen used by current window */ + w->fb_len = fb_mapped_len = (w->virtpage_width + w->virtpage_offsize) * + (w->rightbot_y - w->lefttop_y + 1); + + /* TODO: add .exit and unref the region there. Not needed yet since sysbus + * does not support hot-unplug. + */ + if (w->mem_section.mr) { + memory_region_set_log(w->mem_section.mr, false, DIRTY_MEMORY_VGA); + memory_region_unref(w->mem_section.mr); + } + + w->mem_section = memory_region_find(sysbus_address_space(sbd), + fb_start_addr, w->fb_len); + assert(w->mem_section.mr); + assert(w->mem_section.offset_within_address_space == fb_start_addr); + DPRINT_TRACE("Window %u framebuffer changed: address=0x%08x, len=0x%x\n", + win, fb_start_addr, w->fb_len); + + if (int128_get64(w->mem_section.size) != w->fb_len || + !memory_region_is_ram(w->mem_section.mr)) { + qemu_log_mask(LOG_GUEST_ERROR, + "FIMD: Failed to find window %u framebuffer region\n", + win); + goto error_return; + } + + w->host_fb_addr = cpu_physical_memory_map(fb_start_addr, &fb_mapped_len, + false); + if (!w->host_fb_addr) { + qemu_log_mask(LOG_GUEST_ERROR, + "FIMD: Failed to map window %u framebuffer\n", win); + goto error_return; + } + + if (fb_mapped_len != w->fb_len) { + qemu_log_mask(LOG_GUEST_ERROR, + "FIMD: Window %u mapped framebuffer length is less than " + "expected\n", win); + cpu_physical_memory_unmap(w->host_fb_addr, fb_mapped_len, 0, 0); + goto error_return; + } + memory_region_set_log(w->mem_section.mr, true, DIRTY_MEMORY_VGA); + exynos4210_fimd_invalidate(s); + return; + +error_return: + memory_region_unref(w->mem_section.mr); + w->mem_section.mr = NULL; + w->mem_section.size = int128_zero(); + w->host_fb_addr = NULL; + w->fb_len = 0; +} + +static void exynos4210_fimd_enable(Exynos4210fimdState *s, bool enabled) +{ + if (enabled && !s->enabled) { + unsigned w; + s->enabled = true; + for (w = 0; w < NUM_OF_WINDOWS; w++) { + fimd_update_memory_section(s, w); + } + } + s->enabled = enabled; + DPRINT_TRACE("display controller %s\n", enabled ? "enabled" : "disabled"); +} + +static inline uint32_t unpack_upper_4(uint32_t x) +{ + return ((x & 0xF00) << 12) | ((x & 0xF0) << 8) | ((x & 0xF) << 4); +} + +static inline uint32_t pack_upper_4(uint32_t x) +{ + return (((x & 0xF00000) >> 12) | ((x & 0xF000) >> 8) | + ((x & 0xF0) >> 4)) & 0xFFF; +} + +static void exynos4210_fimd_update_irq(Exynos4210fimdState *s) +{ + if (!(s->vidintcon[0] & FIMD_VIDINT_INTEN)) { + qemu_irq_lower(s->irq[0]); + qemu_irq_lower(s->irq[1]); + qemu_irq_lower(s->irq[2]); + return; + } + if ((s->vidintcon[0] & FIMD_VIDINT_INTFIFOEN) && + (s->vidintcon[1] & FIMD_VIDINT_INTFIFOPEND)) { + qemu_irq_raise(s->irq[0]); + } else { + qemu_irq_lower(s->irq[0]); + } + if ((s->vidintcon[0] & FIMD_VIDINT_INTFRMEN) && + (s->vidintcon[1] & FIMD_VIDINT_INTFRMPEND)) { + qemu_irq_raise(s->irq[1]); + } else { + qemu_irq_lower(s->irq[1]); + } + if ((s->vidintcon[0] & FIMD_VIDINT_I80IFDONE) && + (s->vidintcon[1] & FIMD_VIDINT_INTI80PEND)) { + qemu_irq_raise(s->irq[2]); + } else { + qemu_irq_lower(s->irq[2]); + } +} + +static void exynos4210_update_resolution(Exynos4210fimdState *s) +{ + DisplaySurface *surface = qemu_console_surface(s->console); + + /* LCD resolution is stored in VIDEO TIME CONTROL REGISTER 2 */ + uint32_t width = ((s->vidtcon[2] >> FIMD_VIDTCON2_HOR_SHIFT) & + FIMD_VIDTCON2_SIZE_MASK) + 1; + uint32_t height = ((s->vidtcon[2] >> FIMD_VIDTCON2_VER_SHIFT) & + FIMD_VIDTCON2_SIZE_MASK) + 1; + + if (s->ifb == NULL || surface_width(surface) != width || + surface_height(surface) != height) { + DPRINT_L1("Resolution changed from %ux%u to %ux%u\n", + surface_width(surface), surface_height(surface), width, height); + qemu_console_resize(s->console, width, height); + s->ifb = g_realloc(s->ifb, width * height * RGBA_SIZE + 1); + memset(s->ifb, 0, width * height * RGBA_SIZE + 1); + exynos4210_fimd_invalidate(s); + } +} + +static void exynos4210_fimd_update(void *opaque) +{ + Exynos4210fimdState *s = (Exynos4210fimdState *)opaque; + DisplaySurface *surface; + Exynos4210fimdWindow *w; + DirtyBitmapSnapshot *snap; + int i, line; + hwaddr fb_line_addr, inc_size; + int scrn_height; + int first_line = -1, last_line = -1, scrn_width; + bool blend = false; + uint8_t *host_fb_addr; + bool is_dirty = false; + int global_width; + + if (!s || !s->console || !s->enabled || + surface_bits_per_pixel(qemu_console_surface(s->console)) == 0) { + return; + } + + global_width = (s->vidtcon[2] & FIMD_VIDTCON2_SIZE_MASK) + 1; + exynos4210_update_resolution(s); + surface = qemu_console_surface(s->console); + + for (i = 0; i < NUM_OF_WINDOWS; i++) { + w = &s->window[i]; + if ((w->wincon & FIMD_WINCON_ENWIN) && w->host_fb_addr) { + scrn_height = w->rightbot_y - w->lefttop_y + 1; + scrn_width = w->virtpage_width; + /* Total width of virtual screen page in bytes */ + inc_size = scrn_width + w->virtpage_offsize; + host_fb_addr = w->host_fb_addr; + fb_line_addr = w->mem_section.offset_within_region; + snap = memory_region_snapshot_and_clear_dirty(w->mem_section.mr, + fb_line_addr, inc_size * scrn_height, DIRTY_MEMORY_VGA); + + for (line = 0; line < scrn_height; line++) { + is_dirty = memory_region_snapshot_get_dirty(w->mem_section.mr, + snap, fb_line_addr, scrn_width); + + if (s->invalidate || is_dirty) { + if (first_line == -1) { + first_line = line; + } + last_line = line; + w->draw_line(w, host_fb_addr, s->ifb + + w->lefttop_x * RGBA_SIZE + (w->lefttop_y + line) * + global_width * RGBA_SIZE, blend); + } + host_fb_addr += inc_size; + fb_line_addr += inc_size; + } + g_free(snap); + blend = true; + } + } + + /* Copy resulting image to QEMU_CONSOLE. */ + if (first_line >= 0) { + uint8_t *d; + int bpp; + + bpp = surface_bits_per_pixel(surface); + fimd_update_putpix_qemu(bpp); + bpp = (bpp + 1) >> 3; + d = surface_data(surface); + for (line = first_line; line <= last_line; line++) { + fimd_copy_line_toqemu(global_width, s->ifb + global_width * line * + RGBA_SIZE, d + global_width * line * bpp); + } + dpy_gfx_update_full(s->console); + } + s->invalidate = false; + s->vidintcon[1] |= FIMD_VIDINT_INTFRMPEND; + if ((s->vidcon[0] & FIMD_VIDCON0_ENVID_F) == 0) { + exynos4210_fimd_enable(s, false); + } + exynos4210_fimd_update_irq(s); +} + +static void exynos4210_fimd_reset(DeviceState *d) +{ + Exynos4210fimdState *s = EXYNOS4210_FIMD(d); + unsigned w; + + DPRINT_TRACE("Display controller reset\n"); + /* Set all display controller registers to 0 */ + memset(&s->vidcon, 0, (uint8_t *)&s->window - (uint8_t *)&s->vidcon); + for (w = 0; w < NUM_OF_WINDOWS; w++) { + memset(&s->window[w], 0, sizeof(Exynos4210fimdWindow)); + s->window[w].blendeq = 0xC2; + exynos4210_fimd_update_win_bppmode(s, w); + exynos4210_fimd_trace_bppmode(s, w, 0xFFFFFFFF); + fimd_update_get_alpha(s, w); + } + + g_free(s->ifb); + s->ifb = NULL; + + exynos4210_fimd_invalidate(s); + exynos4210_fimd_enable(s, false); + /* Some registers have non-zero initial values */ + s->winchmap = 0x7D517D51; + s->colorgaincon = 0x10040100; + s->huecoef_cr[0] = s->huecoef_cr[3] = 0x01000100; + s->huecoef_cb[0] = s->huecoef_cb[3] = 0x01000100; + s->hueoffset = 0x01800080; +} + +static void exynos4210_fimd_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + Exynos4210fimdState *s = (Exynos4210fimdState *)opaque; + unsigned w, i; + uint32_t old_value; + + DPRINT_L2("write offset 0x%08x, value=%llu(0x%08llx)\n", offset, + (long long unsigned int)val, (long long unsigned int)val); + + switch (offset) { + case FIMD_VIDCON0: + if ((val & FIMD_VIDCON0_ENVID_MASK) == FIMD_VIDCON0_ENVID_MASK) { + exynos4210_fimd_enable(s, true); + } else { + if ((val & FIMD_VIDCON0_ENVID) == 0) { + exynos4210_fimd_enable(s, false); + } + } + s->vidcon[0] = val; + break; + case FIMD_VIDCON1: + /* Leave read-only bits as is */ + val = (val & (~FIMD_VIDCON1_ROMASK)) | + (s->vidcon[1] & FIMD_VIDCON1_ROMASK); + s->vidcon[1] = val; + break; + case FIMD_VIDCON2 ... FIMD_VIDCON3: + s->vidcon[(offset) >> 2] = val; + break; + case FIMD_VIDTCON_START ... FIMD_VIDTCON_END: + s->vidtcon[(offset - FIMD_VIDTCON_START) >> 2] = val; + break; + case FIMD_WINCON_START ... FIMD_WINCON_END: + w = (offset - FIMD_WINCON_START) >> 2; + /* Window's current buffer ID */ + i = fimd_get_buffer_id(&s->window[w]); + old_value = s->window[w].wincon; + val = (val & ~FIMD_WINCON_ROMASK) | + (s->window[w].wincon & FIMD_WINCON_ROMASK); + if (w == 0) { + /* Window 0 wincon ALPHA_MUL bit must always be 0 */ + val &= ~FIMD_WINCON_ALPHA_MUL; + } + exynos4210_fimd_trace_bppmode(s, w, val); + switch (val & FIMD_WINCON_BUFSELECT) { + case FIMD_WINCON_BUF0_SEL: + val &= ~FIMD_WINCON_BUFSTATUS; + break; + case FIMD_WINCON_BUF1_SEL: + val = (val & ~FIMD_WINCON_BUFSTAT_H) | FIMD_WINCON_BUFSTAT_L; + break; + case FIMD_WINCON_BUF2_SEL: + if (val & FIMD_WINCON_BUFMODE) { + val = (val & ~FIMD_WINCON_BUFSTAT_L) | FIMD_WINCON_BUFSTAT_H; + } + break; + default: + break; + } + s->window[w].wincon = val; + exynos4210_fimd_update_win_bppmode(s, w); + fimd_update_get_alpha(s, w); + if ((i != fimd_get_buffer_id(&s->window[w])) || + (!(old_value & FIMD_WINCON_ENWIN) && (s->window[w].wincon & + FIMD_WINCON_ENWIN))) { + fimd_update_memory_section(s, w); + } + break; + case FIMD_SHADOWCON: + old_value = s->shadowcon; + s->shadowcon = val; + for (w = 0; w < NUM_OF_WINDOWS; w++) { + if (FIMD_WINDOW_PROTECTED(old_value, w) && + !FIMD_WINDOW_PROTECTED(s->shadowcon, w)) { + fimd_update_memory_section(s, w); + } + } + break; + case FIMD_WINCHMAP: + s->winchmap = val; + break; + case FIMD_VIDOSD_START ... FIMD_VIDOSD_END: + w = (offset - FIMD_VIDOSD_START) >> 4; + i = ((offset - FIMD_VIDOSD_START) & 0xF) >> 2; + switch (i) { + case 0: + old_value = s->window[w].lefttop_y; + s->window[w].lefttop_x = (val >> FIMD_VIDOSD_HOR_SHIFT) & + FIMD_VIDOSD_COORD_MASK; + s->window[w].lefttop_y = (val >> FIMD_VIDOSD_VER_SHIFT) & + FIMD_VIDOSD_COORD_MASK; + if (s->window[w].lefttop_y != old_value) { + fimd_update_memory_section(s, w); + } + break; + case 1: + old_value = s->window[w].rightbot_y; + s->window[w].rightbot_x = (val >> FIMD_VIDOSD_HOR_SHIFT) & + FIMD_VIDOSD_COORD_MASK; + s->window[w].rightbot_y = (val >> FIMD_VIDOSD_VER_SHIFT) & + FIMD_VIDOSD_COORD_MASK; + if (s->window[w].rightbot_y != old_value) { + fimd_update_memory_section(s, w); + } + break; + case 2: + if (w == 0) { + s->window[w].osdsize = val; + } else { + s->window[w].alpha_val[0] = + unpack_upper_4((val & FIMD_VIDOSD_ALPHA_AEN0) >> + FIMD_VIDOSD_AEN0_SHIFT) | + (s->window[w].alpha_val[0] & FIMD_VIDALPHA_ALPHA_LOWER); + s->window[w].alpha_val[1] = + unpack_upper_4(val & FIMD_VIDOSD_ALPHA_AEN1) | + (s->window[w].alpha_val[1] & FIMD_VIDALPHA_ALPHA_LOWER); + } + break; + case 3: + if (w != 1 && w != 2) { + qemu_log_mask(LOG_GUEST_ERROR, + "FIMD: Bad write offset 0x%08"HWADDR_PRIx"\n", + offset); + return; + } + s->window[w].osdsize = val; + break; + } + break; + case FIMD_VIDWADD0_START ... FIMD_VIDWADD0_END: + w = (offset - FIMD_VIDWADD0_START) >> 3; + i = ((offset - FIMD_VIDWADD0_START) >> 2) & 1; + if (i == fimd_get_buffer_id(&s->window[w]) && + s->window[w].buf_start[i] != val) { + s->window[w].buf_start[i] = val; + fimd_update_memory_section(s, w); + break; + } + s->window[w].buf_start[i] = val; + break; + case FIMD_VIDWADD1_START ... FIMD_VIDWADD1_END: + w = (offset - FIMD_VIDWADD1_START) >> 3; + i = ((offset - FIMD_VIDWADD1_START) >> 2) & 1; + s->window[w].buf_end[i] = val; + break; + case FIMD_VIDWADD2_START ... FIMD_VIDWADD2_END: + w = (offset - FIMD_VIDWADD2_START) >> 2; + if (((val & FIMD_VIDWADD2_PAGEWIDTH) != s->window[w].virtpage_width) || + (((val >> FIMD_VIDWADD2_OFFSIZE_SHIFT) & FIMD_VIDWADD2_OFFSIZE) != + s->window[w].virtpage_offsize)) { + s->window[w].virtpage_width = val & FIMD_VIDWADD2_PAGEWIDTH; + s->window[w].virtpage_offsize = + (val >> FIMD_VIDWADD2_OFFSIZE_SHIFT) & FIMD_VIDWADD2_OFFSIZE; + fimd_update_memory_section(s, w); + } + break; + case FIMD_VIDINTCON0: + s->vidintcon[0] = val; + break; + case FIMD_VIDINTCON1: + s->vidintcon[1] &= ~(val & 7); + exynos4210_fimd_update_irq(s); + break; + case FIMD_WKEYCON_START ... FIMD_WKEYCON_END: + w = ((offset - FIMD_WKEYCON_START) >> 3) + 1; + i = ((offset - FIMD_WKEYCON_START) >> 2) & 1; + s->window[w].keycon[i] = val; + break; + case FIMD_WKEYALPHA_START ... FIMD_WKEYALPHA_END: + w = ((offset - FIMD_WKEYALPHA_START) >> 2) + 1; + s->window[w].keyalpha = val; + break; + case FIMD_DITHMODE: + s->dithmode = val; + break; + case FIMD_WINMAP_START ... FIMD_WINMAP_END: + w = (offset - FIMD_WINMAP_START) >> 2; + old_value = s->window[w].winmap; + s->window[w].winmap = val; + if ((val & FIMD_WINMAP_EN) ^ (old_value & FIMD_WINMAP_EN)) { + exynos4210_fimd_invalidate(s); + exynos4210_fimd_update_win_bppmode(s, w); + exynos4210_fimd_trace_bppmode(s, w, 0xFFFFFFFF); + exynos4210_fimd_update(s); + } + break; + case FIMD_WPALCON_HIGH ... FIMD_WPALCON_LOW: + i = (offset - FIMD_WPALCON_HIGH) >> 2; + s->wpalcon[i] = val; + if (s->wpalcon[1] & FIMD_WPALCON_UPDATEEN) { + for (w = 0; w < NUM_OF_WINDOWS; w++) { + exynos4210_fimd_update_win_bppmode(s, w); + fimd_update_get_alpha(s, w); + } + } + break; + case FIMD_TRIGCON: + val = (val & ~FIMD_TRIGCON_ROMASK) | (s->trigcon & FIMD_TRIGCON_ROMASK); + s->trigcon = val; + break; + case FIMD_I80IFCON_START ... FIMD_I80IFCON_END: + s->i80ifcon[(offset - FIMD_I80IFCON_START) >> 2] = val; + break; + case FIMD_COLORGAINCON: + s->colorgaincon = val; + break; + case FIMD_LDI_CMDCON0 ... FIMD_LDI_CMDCON1: + s->ldi_cmdcon[(offset - FIMD_LDI_CMDCON0) >> 2] = val; + break; + case FIMD_SIFCCON0 ... FIMD_SIFCCON2: + i = (offset - FIMD_SIFCCON0) >> 2; + if (i != 2) { + s->sifccon[i] = val; + } + break; + case FIMD_HUECOEFCR_START ... FIMD_HUECOEFCR_END: + i = (offset - FIMD_HUECOEFCR_START) >> 2; + s->huecoef_cr[i] = val; + break; + case FIMD_HUECOEFCB_START ... FIMD_HUECOEFCB_END: + i = (offset - FIMD_HUECOEFCB_START) >> 2; + s->huecoef_cb[i] = val; + break; + case FIMD_HUEOFFSET: + s->hueoffset = val; + break; + case FIMD_VIDWALPHA_START ... FIMD_VIDWALPHA_END: + w = ((offset - FIMD_VIDWALPHA_START) >> 3); + i = ((offset - FIMD_VIDWALPHA_START) >> 2) & 1; + if (w == 0) { + s->window[w].alpha_val[i] = val; + } else { + s->window[w].alpha_val[i] = (val & FIMD_VIDALPHA_ALPHA_LOWER) | + (s->window[w].alpha_val[i] & FIMD_VIDALPHA_ALPHA_UPPER); + } + break; + case FIMD_BLENDEQ_START ... FIMD_BLENDEQ_END: + s->window[(offset - FIMD_BLENDEQ_START) >> 2].blendeq = val; + break; + case FIMD_BLENDCON: + old_value = s->blendcon; + s->blendcon = val; + if ((s->blendcon & FIMD_ALPHA_8BIT) != (old_value & FIMD_ALPHA_8BIT)) { + for (w = 0; w < NUM_OF_WINDOWS; w++) { + fimd_update_get_alpha(s, w); + } + } + break; + case FIMD_WRTQOSCON_START ... FIMD_WRTQOSCON_END: + s->window[(offset - FIMD_WRTQOSCON_START) >> 2].rtqoscon = val; + break; + case FIMD_I80IFCMD_START ... FIMD_I80IFCMD_END: + s->i80ifcmd[(offset - FIMD_I80IFCMD_START) >> 2] = val; + break; + case FIMD_VIDW0ADD0_B2 ... FIMD_VIDW4ADD0_B2: + if (offset & 0x0004) { + qemu_log_mask(LOG_GUEST_ERROR, + "FIMD: bad write offset 0x%08"HWADDR_PRIx"\n", + offset); + break; + } + w = (offset - FIMD_VIDW0ADD0_B2) >> 3; + if (fimd_get_buffer_id(&s->window[w]) == 2 && + s->window[w].buf_start[2] != val) { + s->window[w].buf_start[2] = val; + fimd_update_memory_section(s, w); + break; + } + s->window[w].buf_start[2] = val; + break; + case FIMD_SHD_ADD0_START ... FIMD_SHD_ADD0_END: + if (offset & 0x0004) { + qemu_log_mask(LOG_GUEST_ERROR, + "FIMD: bad write offset 0x%08"HWADDR_PRIx"\n", + offset); + break; + } + s->window[(offset - FIMD_SHD_ADD0_START) >> 3].shadow_buf_start = val; + break; + case FIMD_SHD_ADD1_START ... FIMD_SHD_ADD1_END: + if (offset & 0x0004) { + qemu_log_mask(LOG_GUEST_ERROR, + "FIMD: bad write offset 0x%08"HWADDR_PRIx"\n", + offset); + break; + } + s->window[(offset - FIMD_SHD_ADD1_START) >> 3].shadow_buf_end = val; + break; + case FIMD_SHD_ADD2_START ... FIMD_SHD_ADD2_END: + s->window[(offset - FIMD_SHD_ADD2_START) >> 2].shadow_buf_size = val; + break; + case FIMD_PAL_MEM_START ... FIMD_PAL_MEM_END: + w = (offset - FIMD_PAL_MEM_START) >> 10; + i = ((offset - FIMD_PAL_MEM_START) >> 2) & 0xFF; + s->window[w].palette[i] = val; + break; + case FIMD_PALMEM_AL_START ... FIMD_PALMEM_AL_END: + /* Palette memory aliases for windows 0 and 1 */ + w = (offset - FIMD_PALMEM_AL_START) >> 10; + i = ((offset - FIMD_PALMEM_AL_START) >> 2) & 0xFF; + s->window[w].palette[i] = val; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "FIMD: bad write offset 0x%08"HWADDR_PRIx"\n", offset); + break; + } +} + +static uint64_t exynos4210_fimd_read(void *opaque, hwaddr offset, + unsigned size) +{ + Exynos4210fimdState *s = (Exynos4210fimdState *)opaque; + int w, i; + uint32_t ret = 0; + + DPRINT_L2("read offset 0x%08x\n", offset); + + switch (offset) { + case FIMD_VIDCON0 ... FIMD_VIDCON3: + return s->vidcon[(offset - FIMD_VIDCON0) >> 2]; + case FIMD_VIDTCON_START ... FIMD_VIDTCON_END: + return s->vidtcon[(offset - FIMD_VIDTCON_START) >> 2]; + case FIMD_WINCON_START ... FIMD_WINCON_END: + return s->window[(offset - FIMD_WINCON_START) >> 2].wincon; + case FIMD_SHADOWCON: + return s->shadowcon; + case FIMD_WINCHMAP: + return s->winchmap; + case FIMD_VIDOSD_START ... FIMD_VIDOSD_END: + w = (offset - FIMD_VIDOSD_START) >> 4; + i = ((offset - FIMD_VIDOSD_START) & 0xF) >> 2; + switch (i) { + case 0: + ret = ((s->window[w].lefttop_x & FIMD_VIDOSD_COORD_MASK) << + FIMD_VIDOSD_HOR_SHIFT) | + (s->window[w].lefttop_y & FIMD_VIDOSD_COORD_MASK); + break; + case 1: + ret = ((s->window[w].rightbot_x & FIMD_VIDOSD_COORD_MASK) << + FIMD_VIDOSD_HOR_SHIFT) | + (s->window[w].rightbot_y & FIMD_VIDOSD_COORD_MASK); + break; + case 2: + if (w == 0) { + ret = s->window[w].osdsize; + } else { + ret = (pack_upper_4(s->window[w].alpha_val[0]) << + FIMD_VIDOSD_AEN0_SHIFT) | + pack_upper_4(s->window[w].alpha_val[1]); + } + break; + case 3: + if (w != 1 && w != 2) { + qemu_log_mask(LOG_GUEST_ERROR, + "FIMD: bad read offset 0x%08"HWADDR_PRIx"\n", + offset); + return 0xBAADBAAD; + } + ret = s->window[w].osdsize; + break; + } + return ret; + case FIMD_VIDWADD0_START ... FIMD_VIDWADD0_END: + w = (offset - FIMD_VIDWADD0_START) >> 3; + i = ((offset - FIMD_VIDWADD0_START) >> 2) & 1; + return s->window[w].buf_start[i]; + case FIMD_VIDWADD1_START ... FIMD_VIDWADD1_END: + w = (offset - FIMD_VIDWADD1_START) >> 3; + i = ((offset - FIMD_VIDWADD1_START) >> 2) & 1; + return s->window[w].buf_end[i]; + case FIMD_VIDWADD2_START ... FIMD_VIDWADD2_END: + w = (offset - FIMD_VIDWADD2_START) >> 2; + return s->window[w].virtpage_width | (s->window[w].virtpage_offsize << + FIMD_VIDWADD2_OFFSIZE_SHIFT); + case FIMD_VIDINTCON0 ... FIMD_VIDINTCON1: + return s->vidintcon[(offset - FIMD_VIDINTCON0) >> 2]; + case FIMD_WKEYCON_START ... FIMD_WKEYCON_END: + w = ((offset - FIMD_WKEYCON_START) >> 3) + 1; + i = ((offset - FIMD_WKEYCON_START) >> 2) & 1; + return s->window[w].keycon[i]; + case FIMD_WKEYALPHA_START ... FIMD_WKEYALPHA_END: + w = ((offset - FIMD_WKEYALPHA_START) >> 2) + 1; + return s->window[w].keyalpha; + case FIMD_DITHMODE: + return s->dithmode; + case FIMD_WINMAP_START ... FIMD_WINMAP_END: + return s->window[(offset - FIMD_WINMAP_START) >> 2].winmap; + case FIMD_WPALCON_HIGH ... FIMD_WPALCON_LOW: + return s->wpalcon[(offset - FIMD_WPALCON_HIGH) >> 2]; + case FIMD_TRIGCON: + return s->trigcon; + case FIMD_I80IFCON_START ... FIMD_I80IFCON_END: + return s->i80ifcon[(offset - FIMD_I80IFCON_START) >> 2]; + case FIMD_COLORGAINCON: + return s->colorgaincon; + case FIMD_LDI_CMDCON0 ... FIMD_LDI_CMDCON1: + return s->ldi_cmdcon[(offset - FIMD_LDI_CMDCON0) >> 2]; + case FIMD_SIFCCON0 ... FIMD_SIFCCON2: + i = (offset - FIMD_SIFCCON0) >> 2; + return s->sifccon[i]; + case FIMD_HUECOEFCR_START ... FIMD_HUECOEFCR_END: + i = (offset - FIMD_HUECOEFCR_START) >> 2; + return s->huecoef_cr[i]; + case FIMD_HUECOEFCB_START ... FIMD_HUECOEFCB_END: + i = (offset - FIMD_HUECOEFCB_START) >> 2; + return s->huecoef_cb[i]; + case FIMD_HUEOFFSET: + return s->hueoffset; + case FIMD_VIDWALPHA_START ... FIMD_VIDWALPHA_END: + w = ((offset - FIMD_VIDWALPHA_START) >> 3); + i = ((offset - FIMD_VIDWALPHA_START) >> 2) & 1; + return s->window[w].alpha_val[i] & + (w == 0 ? 0xFFFFFF : FIMD_VIDALPHA_ALPHA_LOWER); + case FIMD_BLENDEQ_START ... FIMD_BLENDEQ_END: + return s->window[(offset - FIMD_BLENDEQ_START) >> 2].blendeq; + case FIMD_BLENDCON: + return s->blendcon; + case FIMD_WRTQOSCON_START ... FIMD_WRTQOSCON_END: + return s->window[(offset - FIMD_WRTQOSCON_START) >> 2].rtqoscon; + case FIMD_I80IFCMD_START ... FIMD_I80IFCMD_END: + return s->i80ifcmd[(offset - FIMD_I80IFCMD_START) >> 2]; + case FIMD_VIDW0ADD0_B2 ... FIMD_VIDW4ADD0_B2: + if (offset & 0x0004) { + break; + } + return s->window[(offset - FIMD_VIDW0ADD0_B2) >> 3].buf_start[2]; + case FIMD_SHD_ADD0_START ... FIMD_SHD_ADD0_END: + if (offset & 0x0004) { + break; + } + return s->window[(offset - FIMD_SHD_ADD0_START) >> 3].shadow_buf_start; + case FIMD_SHD_ADD1_START ... FIMD_SHD_ADD1_END: + if (offset & 0x0004) { + break; + } + return s->window[(offset - FIMD_SHD_ADD1_START) >> 3].shadow_buf_end; + case FIMD_SHD_ADD2_START ... FIMD_SHD_ADD2_END: + return s->window[(offset - FIMD_SHD_ADD2_START) >> 2].shadow_buf_size; + case FIMD_PAL_MEM_START ... FIMD_PAL_MEM_END: + w = (offset - FIMD_PAL_MEM_START) >> 10; + i = ((offset - FIMD_PAL_MEM_START) >> 2) & 0xFF; + return s->window[w].palette[i]; + case FIMD_PALMEM_AL_START ... FIMD_PALMEM_AL_END: + /* Palette aliases for win 0,1 */ + w = (offset - FIMD_PALMEM_AL_START) >> 10; + i = ((offset - FIMD_PALMEM_AL_START) >> 2) & 0xFF; + return s->window[w].palette[i]; + } + + qemu_log_mask(LOG_GUEST_ERROR, + "FIMD: bad read offset 0x%08"HWADDR_PRIx"\n", offset); + return 0xBAADBAAD; +} + +static const MemoryRegionOps exynos4210_fimd_mmio_ops = { + .read = exynos4210_fimd_read, + .write = exynos4210_fimd_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int exynos4210_fimd_load(void *opaque, int version_id) +{ + Exynos4210fimdState *s = (Exynos4210fimdState *)opaque; + int w; + + if (version_id != 1) { + return -EINVAL; + } + + for (w = 0; w < NUM_OF_WINDOWS; w++) { + exynos4210_fimd_update_win_bppmode(s, w); + fimd_update_get_alpha(s, w); + fimd_update_memory_section(s, w); + } + + /* Redraw the whole screen */ + exynos4210_update_resolution(s); + exynos4210_fimd_invalidate(s); + exynos4210_fimd_enable(s, (s->vidcon[0] & FIMD_VIDCON0_ENVID_MASK) == + FIMD_VIDCON0_ENVID_MASK); + return 0; +} + +static const VMStateDescription exynos4210_fimd_window_vmstate = { + .name = "exynos4210.fimd_window", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(wincon, Exynos4210fimdWindow), + VMSTATE_UINT32_ARRAY(buf_start, Exynos4210fimdWindow, 3), + VMSTATE_UINT32_ARRAY(buf_end, Exynos4210fimdWindow, 3), + VMSTATE_UINT32_ARRAY(keycon, Exynos4210fimdWindow, 2), + VMSTATE_UINT32(keyalpha, Exynos4210fimdWindow), + VMSTATE_UINT32(winmap, Exynos4210fimdWindow), + VMSTATE_UINT32(blendeq, Exynos4210fimdWindow), + VMSTATE_UINT32(rtqoscon, Exynos4210fimdWindow), + VMSTATE_UINT32_ARRAY(palette, Exynos4210fimdWindow, 256), + VMSTATE_UINT32(shadow_buf_start, Exynos4210fimdWindow), + VMSTATE_UINT32(shadow_buf_end, Exynos4210fimdWindow), + VMSTATE_UINT32(shadow_buf_size, Exynos4210fimdWindow), + VMSTATE_UINT16(lefttop_x, Exynos4210fimdWindow), + VMSTATE_UINT16(lefttop_y, Exynos4210fimdWindow), + VMSTATE_UINT16(rightbot_x, Exynos4210fimdWindow), + VMSTATE_UINT16(rightbot_y, Exynos4210fimdWindow), + VMSTATE_UINT32(osdsize, Exynos4210fimdWindow), + VMSTATE_UINT32_ARRAY(alpha_val, Exynos4210fimdWindow, 2), + VMSTATE_UINT16(virtpage_width, Exynos4210fimdWindow), + VMSTATE_UINT16(virtpage_offsize, Exynos4210fimdWindow), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription exynos4210_fimd_vmstate = { + .name = "exynos4210.fimd", + .version_id = 1, + .minimum_version_id = 1, + .post_load = exynos4210_fimd_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(vidcon, Exynos4210fimdState, 4), + VMSTATE_UINT32_ARRAY(vidtcon, Exynos4210fimdState, 4), + VMSTATE_UINT32(shadowcon, Exynos4210fimdState), + VMSTATE_UINT32(winchmap, Exynos4210fimdState), + VMSTATE_UINT32_ARRAY(vidintcon, Exynos4210fimdState, 2), + VMSTATE_UINT32(dithmode, Exynos4210fimdState), + VMSTATE_UINT32_ARRAY(wpalcon, Exynos4210fimdState, 2), + VMSTATE_UINT32(trigcon, Exynos4210fimdState), + VMSTATE_UINT32_ARRAY(i80ifcon, Exynos4210fimdState, 4), + VMSTATE_UINT32(colorgaincon, Exynos4210fimdState), + VMSTATE_UINT32_ARRAY(ldi_cmdcon, Exynos4210fimdState, 2), + VMSTATE_UINT32_ARRAY(sifccon, Exynos4210fimdState, 3), + VMSTATE_UINT32_ARRAY(huecoef_cr, Exynos4210fimdState, 4), + VMSTATE_UINT32_ARRAY(huecoef_cb, Exynos4210fimdState, 4), + VMSTATE_UINT32(hueoffset, Exynos4210fimdState), + VMSTATE_UINT32_ARRAY(i80ifcmd, Exynos4210fimdState, 12), + VMSTATE_UINT32(blendcon, Exynos4210fimdState), + VMSTATE_STRUCT_ARRAY(window, Exynos4210fimdState, 5, 1, + exynos4210_fimd_window_vmstate, Exynos4210fimdWindow), + VMSTATE_END_OF_LIST() + } +}; + +static const GraphicHwOps exynos4210_fimd_ops = { + .invalidate = exynos4210_fimd_invalidate, + .gfx_update = exynos4210_fimd_update, +}; + +static void exynos4210_fimd_init(Object *obj) +{ + Exynos4210fimdState *s = EXYNOS4210_FIMD(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + s->ifb = NULL; + + sysbus_init_irq(dev, &s->irq[0]); + sysbus_init_irq(dev, &s->irq[1]); + sysbus_init_irq(dev, &s->irq[2]); + + memory_region_init_io(&s->iomem, obj, &exynos4210_fimd_mmio_ops, s, + "exynos4210.fimd", FIMD_REGS_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static void exynos4210_fimd_realize(DeviceState *dev, Error **errp) +{ + Exynos4210fimdState *s = EXYNOS4210_FIMD(dev); + + s->console = graphic_console_init(dev, 0, &exynos4210_fimd_ops, s); +} + +static void exynos4210_fimd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &exynos4210_fimd_vmstate; + dc->reset = exynos4210_fimd_reset; + dc->realize = exynos4210_fimd_realize; +} + +static const TypeInfo exynos4210_fimd_info = { + .name = TYPE_EXYNOS4210_FIMD, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210fimdState), + .instance_init = exynos4210_fimd_init, + .class_init = exynos4210_fimd_class_init, +}; + +static void exynos4210_fimd_register_types(void) +{ + type_register_static(&exynos4210_fimd_info); +} + +type_init(exynos4210_fimd_register_types) diff --git a/hw/display/framebuffer.c b/hw/display/framebuffer.c new file mode 100644 index 000000000..4485aa335 --- /dev/null +++ b/hw/display/framebuffer.c @@ -0,0 +1,122 @@ +/* + * Framebuffer device helper routines + * + * Copyright (c) 2009 CodeSourcery + * Written by Paul Brook + * + * This code is licensed under the GNU GPLv2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +/* TODO: + - Do something similar for framebuffers with local ram + - Handle rotation here instead of hacking dest_pitch + - Use common pixel conversion routines instead of per-device drawfn + - Remove all DisplayState knowledge from devices. + */ + +#include "qemu/osdep.h" +#include "ui/console.h" +#include "framebuffer.h" + +void framebuffer_update_memory_section( + MemoryRegionSection *mem_section, + MemoryRegion *root, + hwaddr base, + unsigned rows, + unsigned src_width) +{ + hwaddr src_len = (hwaddr)rows * src_width; + + if (mem_section->mr) { + memory_region_set_log(mem_section->mr, false, DIRTY_MEMORY_VGA); + memory_region_unref(mem_section->mr); + mem_section->mr = NULL; + } + + *mem_section = memory_region_find(root, base, src_len); + if (!mem_section->mr) { + return; + } + + if (int128_get64(mem_section->size) < src_len || + !memory_region_is_ram(mem_section->mr)) { + memory_region_unref(mem_section->mr); + mem_section->mr = NULL; + return; + } + + memory_region_set_log(mem_section->mr, true, DIRTY_MEMORY_VGA); +} + +/* Render an image from a shared memory framebuffer. */ +void framebuffer_update_display( + DisplaySurface *ds, + MemoryRegionSection *mem_section, + int cols, /* Width in pixels. */ + int rows, /* Height in pixels. */ + int src_width, /* Length of source line, in bytes. */ + int dest_row_pitch, /* Bytes between adjacent horizontal output pixels. */ + int dest_col_pitch, /* Bytes between adjacent vertical output pixels. */ + int invalidate, /* nonzero to redraw the whole image. */ + drawfn fn, + void *opaque, + int *first_row, /* Input and output. */ + int *last_row /* Output only */) +{ + DirtyBitmapSnapshot *snap; + uint8_t *dest; + uint8_t *src; + int first, last = 0; + int dirty; + int i; + ram_addr_t addr; + MemoryRegion *mem; + + i = *first_row; + *first_row = -1; + + mem = mem_section->mr; + if (!mem) { + return; + } + + addr = mem_section->offset_within_region; + src = memory_region_get_ram_ptr(mem) + addr; + + dest = surface_data(ds); + if (dest_col_pitch < 0) { + dest -= dest_col_pitch * (cols - 1); + } + if (dest_row_pitch < 0) { + dest -= dest_row_pitch * (rows - 1); + } + first = -1; + + addr += i * src_width; + src += i * src_width; + dest += i * dest_row_pitch; + + snap = memory_region_snapshot_and_clear_dirty(mem, addr, src_width * rows, + DIRTY_MEMORY_VGA); + for (; i < rows; i++) { + dirty = memory_region_snapshot_get_dirty(mem, snap, addr, src_width); + if (dirty || invalidate) { + fn(opaque, dest, src, cols, dest_col_pitch); + if (first == -1) + first = i; + last = i; + } + addr += src_width; + src += src_width; + dest += dest_row_pitch; + } + g_free(snap); + if (first < 0) { + return; + } + *first_row = first; + *last_row = last; +} diff --git a/hw/display/framebuffer.h b/hw/display/framebuffer.h new file mode 100644 index 000000000..38fa0dcec --- /dev/null +++ b/hw/display/framebuffer.h @@ -0,0 +1,65 @@ +#ifndef QEMU_FRAMEBUFFER_H +#define QEMU_FRAMEBUFFER_H + +#include "exec/memory.h" + +/* Framebuffer device helper routines. */ + +typedef void (*drawfn)(void *, uint8_t *, const uint8_t *, int, int); + +/* framebuffer_update_memory_section: Update framebuffer + * #MemoryRegionSection, for example if the framebuffer is switched to + * a different memory area. + * + * @mem_section: Output #MemoryRegionSection, to be passed to + * framebuffer_update_display(). + * @root: #MemoryRegion within which the framebuffer lies + * @base: Base address of the framebuffer within @root. + * @rows: Height of the screen. + * @src_width: Number of bytes in framebuffer memory between two rows. + */ +void framebuffer_update_memory_section( + MemoryRegionSection *mem_section, + MemoryRegion *root, + hwaddr base, + unsigned rows, + unsigned src_width); + +/* framebuffer_update_display: Draw the framebuffer on a surface. + * + * @ds: #DisplaySurface to draw to. + * @mem_section: #MemoryRegionSection provided by + * framebuffer_update_memory_section(). + * @cols: Width the screen. + * @rows: Height of the screen. + * @src_width: Number of bytes in framebuffer memory between two rows. + * @dest_row_pitch: Number of bytes in the surface data between two rows. + * Negative if the framebuffer is stored in the opposite order (e.g. + * bottom-to-top) compared to the framebuffer. + * @dest_col_pitch: Number of bytes in the surface data between two pixels. + * Negative if the framebuffer is stored in the opposite order (e.g. + * right-to-left) compared to the framebuffer. + * @invalidate: True if the function should redraw the whole screen + * without checking the DIRTY_MEMORY_VGA dirty bitmap. + * @fn: Drawing function to be called for each row that has to be drawn. + * @opaque: Opaque pointer passed to @fn. + * @first_row: Pointer to an integer, receives the number of the first row + * that was drawn (either the first dirty row, or 0 if @invalidate is true). + * @last_row: Pointer to an integer, receives the number of the last row that + * was drawn (either the last dirty row, or @rows-1 if @invalidate is true). + */ +void framebuffer_update_display( + DisplaySurface *ds, + MemoryRegionSection *mem_section, + int cols, + int rows, + int src_width, + int dest_row_pitch, + int dest_col_pitch, + int invalidate, + drawfn fn, + void *opaque, + int *first_row, + int *last_row); + +#endif diff --git a/hw/display/g364fb.c b/hw/display/g364fb.c new file mode 100644 index 000000000..caca86d77 --- /dev/null +++ b/hw/display/g364fb.c @@ -0,0 +1,554 @@ +/* + * QEMU G364 framebuffer Emulator. + * + * Copyright (c) 2007-2011 Herve Poussineau + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "ui/console.h" +#include "ui/pixel_ops.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +typedef struct G364State { + /* hardware */ + uint32_t vram_size; + qemu_irq irq; + MemoryRegion mem_vram; + MemoryRegion mem_ctrl; + /* registers */ + uint8_t color_palette[256][3]; + uint8_t cursor_palette[3][3]; + uint16_t cursor[512]; + uint32_t cursor_position; + uint32_t ctla; + uint32_t top_of_screen; + uint32_t width, height; /* in pixels */ + /* display refresh support */ + QemuConsole *con; + int depth; + int blanked; +} G364State; + +#define REG_BOOT 0x000000 +#define REG_DISPLAY 0x000118 +#define REG_VDISPLAY 0x000150 +#define REG_CTLA 0x000300 +#define REG_TOP 0x000400 +#define REG_CURS_PAL 0x000508 +#define REG_CURS_POS 0x000638 +#define REG_CLR_PAL 0x000800 +#define REG_CURS_PAT 0x001000 +#define REG_RESET 0x100000 + +#define CTLA_FORCE_BLANK 0x00000400 +#define CTLA_NO_CURSOR 0x00800000 + +#define G364_PAGE_SIZE 4096 + +static inline int check_dirty(G364State *s, DirtyBitmapSnapshot *snap, ram_addr_t page) +{ + return memory_region_snapshot_get_dirty(&s->mem_vram, snap, page, G364_PAGE_SIZE); +} + +static void g364fb_draw_graphic8(G364State *s) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + DirtyBitmapSnapshot *snap; + int i, w; + uint8_t *vram; + uint8_t *data_display, *dd; + ram_addr_t page; + int x, y; + int xmin, xmax; + int ymin, ymax; + int xcursor, ycursor; + unsigned int (*rgb_to_pixel)(unsigned int r, unsigned int g, unsigned int b); + + switch (surface_bits_per_pixel(surface)) { + case 8: + rgb_to_pixel = rgb_to_pixel8; + w = 1; + break; + case 15: + rgb_to_pixel = rgb_to_pixel15; + w = 2; + break; + case 16: + rgb_to_pixel = rgb_to_pixel16; + w = 2; + break; + case 32: + rgb_to_pixel = rgb_to_pixel32; + w = 4; + break; + default: + hw_error("g364: unknown host depth %d", + surface_bits_per_pixel(surface)); + return; + } + + page = 0; + + x = y = 0; + xmin = s->width; + xmax = 0; + ymin = s->height; + ymax = 0; + + if (!(s->ctla & CTLA_NO_CURSOR)) { + xcursor = s->cursor_position >> 12; + ycursor = s->cursor_position & 0xfff; + } else { + xcursor = ycursor = -65; + } + + vram = memory_region_get_ram_ptr(&s->mem_vram) + s->top_of_screen; + /* XXX: out of range in vram? */ + data_display = dd = surface_data(surface); + snap = memory_region_snapshot_and_clear_dirty(&s->mem_vram, 0, s->vram_size, + DIRTY_MEMORY_VGA); + while (y < s->height) { + if (check_dirty(s, snap, page)) { + if (y < ymin) + ymin = ymax = y; + if (x < xmin) + xmin = x; + for (i = 0; i < G364_PAGE_SIZE; i++) { + uint8_t index; + unsigned int color; + if (unlikely((y >= ycursor && y < ycursor + 64) && + (x >= xcursor && x < xcursor + 64))) { + /* pointer area */ + int xdiff = x - xcursor; + uint16_t curs = s->cursor[(y - ycursor) * 8 + xdiff / 8]; + int op = (curs >> ((xdiff & 7) * 2)) & 3; + if (likely(op == 0)) { + /* transparent */ + index = *vram; + color = (*rgb_to_pixel)( + s->color_palette[index][0], + s->color_palette[index][1], + s->color_palette[index][2]); + } else { + /* get cursor color */ + index = op - 1; + color = (*rgb_to_pixel)( + s->cursor_palette[index][0], + s->cursor_palette[index][1], + s->cursor_palette[index][2]); + } + } else { + /* normal area */ + index = *vram; + color = (*rgb_to_pixel)( + s->color_palette[index][0], + s->color_palette[index][1], + s->color_palette[index][2]); + } + memcpy(dd, &color, w); + dd += w; + x++; + vram++; + if (x == s->width) { + xmax = s->width - 1; + y++; + if (y == s->height) { + ymax = s->height - 1; + goto done; + } + data_display = dd = data_display + surface_stride(surface); + xmin = 0; + x = 0; + } + } + if (x > xmax) + xmax = x; + if (y > ymax) + ymax = y; + } else { + int dy; + if (xmax || ymax) { + dpy_gfx_update(s->con, xmin, ymin, + xmax - xmin + 1, ymax - ymin + 1); + xmin = s->width; + xmax = 0; + ymin = s->height; + ymax = 0; + } + x += G364_PAGE_SIZE; + dy = x / s->width; + x = x % s->width; + y += dy; + vram += G364_PAGE_SIZE; + data_display += dy * surface_stride(surface); + dd = data_display + x * w; + } + page += G364_PAGE_SIZE; + } + +done: + if (xmax || ymax) { + dpy_gfx_update(s->con, xmin, ymin, xmax - xmin + 1, ymax - ymin + 1); + } + g_free(snap); +} + +static void g364fb_draw_blank(G364State *s) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int i, w; + uint8_t *d; + + if (s->blanked) { + /* Screen is already blank. No need to redraw it */ + return; + } + + w = s->width * surface_bytes_per_pixel(surface); + d = surface_data(surface); + for (i = 0; i < s->height; i++) { + memset(d, 0, w); + d += surface_stride(surface); + } + + dpy_gfx_update_full(s->con); + s->blanked = 1; +} + +static void g364fb_update_display(void *opaque) +{ + G364State *s = opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + + qemu_flush_coalesced_mmio_buffer(); + + if (s->width == 0 || s->height == 0) + return; + + if (s->width != surface_width(surface) || + s->height != surface_height(surface)) { + qemu_console_resize(s->con, s->width, s->height); + } + + if (s->ctla & CTLA_FORCE_BLANK) { + g364fb_draw_blank(s); + } else if (s->depth == 8) { + g364fb_draw_graphic8(s); + } else { + error_report("g364: unknown guest depth %d", s->depth); + } + + qemu_irq_raise(s->irq); +} + +static inline void g364fb_invalidate_display(void *opaque) +{ + G364State *s = opaque; + + s->blanked = 0; + memory_region_set_dirty(&s->mem_vram, 0, s->vram_size); +} + +static void g364fb_reset(G364State *s) +{ + uint8_t *vram = memory_region_get_ram_ptr(&s->mem_vram); + + qemu_irq_lower(s->irq); + + memset(s->color_palette, 0, sizeof(s->color_palette)); + memset(s->cursor_palette, 0, sizeof(s->cursor_palette)); + memset(s->cursor, 0, sizeof(s->cursor)); + s->cursor_position = 0; + s->ctla = 0; + s->top_of_screen = 0; + s->width = s->height = 0; + memset(vram, 0, s->vram_size); + g364fb_invalidate_display(s); +} + +/* called for accesses to io ports */ +static uint64_t g364fb_ctrl_read(void *opaque, + hwaddr addr, + unsigned int size) +{ + G364State *s = opaque; + uint32_t val; + + if (addr >= REG_CURS_PAT && addr < REG_CURS_PAT + 0x1000) { + /* cursor pattern */ + int idx = (addr - REG_CURS_PAT) >> 3; + val = s->cursor[idx]; + } else if (addr >= REG_CURS_PAL && addr < REG_CURS_PAL + 0x18) { + /* cursor palette */ + int idx = (addr - REG_CURS_PAL) >> 3; + val = ((uint32_t)s->cursor_palette[idx][0] << 16); + val |= ((uint32_t)s->cursor_palette[idx][1] << 8); + val |= ((uint32_t)s->cursor_palette[idx][2] << 0); + } else { + switch (addr) { + case REG_DISPLAY: + val = s->width / 4; + break; + case REG_VDISPLAY: + val = s->height * 2; + break; + case REG_CTLA: + val = s->ctla; + break; + default: + { + error_report("g364: invalid read at [" TARGET_FMT_plx "]", + addr); + val = 0; + break; + } + } + } + + trace_g364fb_read(addr, val); + + return val; +} + +static void g364fb_update_depth(G364State *s) +{ + static const int depths[8] = { 1, 2, 4, 8, 15, 16, 0 }; + s->depth = depths[(s->ctla & 0x00700000) >> 20]; +} + +static void g364_invalidate_cursor_position(G364State *s) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int ymin, ymax, start, end; + + /* invalidate only near the cursor */ + ymin = s->cursor_position & 0xfff; + ymax = MIN(s->height, ymin + 64); + start = ymin * surface_stride(surface); + end = (ymax + 1) * surface_stride(surface); + + memory_region_set_dirty(&s->mem_vram, start, end - start); +} + +static void g364fb_ctrl_write(void *opaque, + hwaddr addr, + uint64_t val, + unsigned int size) +{ + G364State *s = opaque; + + trace_g364fb_write(addr, val); + + if (addr >= REG_CLR_PAL && addr < REG_CLR_PAL + 0x800) { + /* color palette */ + int idx = (addr - REG_CLR_PAL) >> 3; + s->color_palette[idx][0] = (val >> 16) & 0xff; + s->color_palette[idx][1] = (val >> 8) & 0xff; + s->color_palette[idx][2] = val & 0xff; + g364fb_invalidate_display(s); + } else if (addr >= REG_CURS_PAT && addr < REG_CURS_PAT + 0x1000) { + /* cursor pattern */ + int idx = (addr - REG_CURS_PAT) >> 3; + s->cursor[idx] = val; + g364fb_invalidate_display(s); + } else if (addr >= REG_CURS_PAL && addr < REG_CURS_PAL + 0x18) { + /* cursor palette */ + int idx = (addr - REG_CURS_PAL) >> 3; + s->cursor_palette[idx][0] = (val >> 16) & 0xff; + s->cursor_palette[idx][1] = (val >> 8) & 0xff; + s->cursor_palette[idx][2] = val & 0xff; + g364fb_invalidate_display(s); + } else { + switch (addr) { + case REG_BOOT: /* Boot timing */ + case 0x00108: /* Line timing: half sync */ + case 0x00110: /* Line timing: back porch */ + case 0x00120: /* Line timing: short display */ + case 0x00128: /* Frame timing: broad pulse */ + case 0x00130: /* Frame timing: v sync */ + case 0x00138: /* Frame timing: v preequalise */ + case 0x00140: /* Frame timing: v postequalise */ + case 0x00148: /* Frame timing: v blank */ + case 0x00158: /* Line timing: line time */ + case 0x00160: /* Frame store: line start */ + case 0x00168: /* vram cycle: mem init */ + case 0x00170: /* vram cycle: transfer delay */ + case 0x00200: /* vram cycle: mask register */ + /* ignore */ + break; + case REG_TOP: + s->top_of_screen = val; + g364fb_invalidate_display(s); + break; + case REG_DISPLAY: + s->width = val * 4; + break; + case REG_VDISPLAY: + s->height = val / 2; + break; + case REG_CTLA: + s->ctla = val; + g364fb_update_depth(s); + g364fb_invalidate_display(s); + break; + case REG_CURS_POS: + g364_invalidate_cursor_position(s); + s->cursor_position = val; + g364_invalidate_cursor_position(s); + break; + case REG_RESET: + g364fb_reset(s); + break; + default: + error_report("g364: invalid write of 0x%" PRIx64 + " at [" TARGET_FMT_plx "]", val, addr); + break; + } + } + qemu_irq_lower(s->irq); +} + +static const MemoryRegionOps g364fb_ctrl_ops = { + .read = g364fb_ctrl_read, + .write = g364fb_ctrl_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static int g364fb_post_load(void *opaque, int version_id) +{ + G364State *s = opaque; + + /* force refresh */ + g364fb_update_depth(s); + g364fb_invalidate_display(s); + + return 0; +} + +static const VMStateDescription vmstate_g364fb = { + .name = "g364fb", + .version_id = 2, + .minimum_version_id = 2, + .post_load = g364fb_post_load, + .fields = (VMStateField[]) { + VMSTATE_BUFFER_UNSAFE(color_palette, G364State, 0, 256 * 3), + VMSTATE_BUFFER_UNSAFE(cursor_palette, G364State, 0, 9), + VMSTATE_UINT16_ARRAY(cursor, G364State, 512), + VMSTATE_UINT32(cursor_position, G364State), + VMSTATE_UINT32(ctla, G364State), + VMSTATE_UINT32(top_of_screen, G364State), + VMSTATE_UINT32(width, G364State), + VMSTATE_UINT32(height, G364State), + VMSTATE_END_OF_LIST() + } +}; + +static const GraphicHwOps g364fb_ops = { + .invalidate = g364fb_invalidate_display, + .gfx_update = g364fb_update_display, +}; + +static void g364fb_init(DeviceState *dev, G364State *s) +{ + s->con = graphic_console_init(dev, 0, &g364fb_ops, s); + + memory_region_init_io(&s->mem_ctrl, OBJECT(dev), &g364fb_ctrl_ops, s, + "ctrl", 0x180000); + memory_region_init_ram(&s->mem_vram, NULL, "g364fb.vram", s->vram_size, + &error_fatal); + memory_region_set_log(&s->mem_vram, true, DIRTY_MEMORY_VGA); +} + +#define TYPE_G364 "sysbus-g364" +OBJECT_DECLARE_SIMPLE_TYPE(G364SysBusState, G364) + +struct G364SysBusState { + SysBusDevice parent_obj; + + G364State g364; +}; + +static void g364fb_sysbus_realize(DeviceState *dev, Error **errp) +{ + G364SysBusState *sbs = G364(dev); + G364State *s = &sbs->g364; + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + g364fb_init(dev, s); + sysbus_init_irq(sbd, &s->irq); + sysbus_init_mmio(sbd, &s->mem_ctrl); + sysbus_init_mmio(sbd, &s->mem_vram); +} + +static void g364fb_sysbus_reset(DeviceState *d) +{ + G364SysBusState *s = G364(d); + + g364fb_reset(&s->g364); +} + +static Property g364fb_sysbus_properties[] = { + DEFINE_PROP_UINT32("vram_size", G364SysBusState, g364.vram_size, 8 * MiB), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_g364fb_sysbus = { + .name = "g364fb-sysbus", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(g364, G364SysBusState, 2, vmstate_g364fb, G364State), + VMSTATE_END_OF_LIST() + } +}; + +static void g364fb_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = g364fb_sysbus_realize; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->desc = "G364 framebuffer"; + dc->reset = g364fb_sysbus_reset; + dc->vmsd = &vmstate_g364fb_sysbus; + device_class_set_props(dc, g364fb_sysbus_properties); +} + +static const TypeInfo g364fb_sysbus_info = { + .name = TYPE_G364, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(G364SysBusState), + .class_init = g364fb_sysbus_class_init, +}; + +static void g364fb_register_types(void) +{ + type_register_static(&g364fb_sysbus_info); +} + +type_init(g364fb_register_types) diff --git a/hw/display/i2c-ddc.c b/hw/display/i2c-ddc.c new file mode 100644 index 000000000..13eb529fc --- /dev/null +++ b/hw/display/i2c-ddc.c @@ -0,0 +1,129 @@ +/* A simple I2C slave for returning monitor EDID data via DDC. + * + * Copyright (c) 2011 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/i2c/i2c.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/display/i2c-ddc.h" + +#ifndef DEBUG_I2CDDC +#define DEBUG_I2CDDC 0 +#endif + +#define DPRINTF(fmt, ...) do { \ + if (DEBUG_I2CDDC) { \ + qemu_log("i2c-ddc: " fmt , ## __VA_ARGS__); \ + } \ +} while (0) + +static void i2c_ddc_reset(DeviceState *ds) +{ + I2CDDCState *s = I2CDDC(ds); + + s->firstbyte = false; + s->reg = 0; +} + +static int i2c_ddc_event(I2CSlave *i2c, enum i2c_event event) +{ + I2CDDCState *s = I2CDDC(i2c); + + if (event == I2C_START_SEND) { + s->firstbyte = true; + } + + return 0; +} + +static uint8_t i2c_ddc_rx(I2CSlave *i2c) +{ + I2CDDCState *s = I2CDDC(i2c); + + int value; + value = s->edid_blob[s->reg % sizeof(s->edid_blob)]; + s->reg++; + return value; +} + +static int i2c_ddc_tx(I2CSlave *i2c, uint8_t data) +{ + I2CDDCState *s = I2CDDC(i2c); + if (s->firstbyte) { + s->reg = data; + s->firstbyte = false; + DPRINTF("[EDID] Written new pointer: %u\n", data); + return 0; + } + + /* Ignore all writes */ + s->reg++; + return 0; +} + +static void i2c_ddc_init(Object *obj) +{ + I2CDDCState *s = I2CDDC(obj); + + qemu_edid_generate(s->edid_blob, sizeof(s->edid_blob), &s->edid_info); +} + +static const VMStateDescription vmstate_i2c_ddc = { + .name = TYPE_I2CDDC, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(firstbyte, I2CDDCState), + VMSTATE_UINT8(reg, I2CDDCState), + VMSTATE_END_OF_LIST() + } +}; + +static Property i2c_ddc_properties[] = { + DEFINE_EDID_PROPERTIES(I2CDDCState, edid_info), + DEFINE_PROP_END_OF_LIST(), +}; + +static void i2c_ddc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + I2CSlaveClass *isc = I2C_SLAVE_CLASS(oc); + + dc->reset = i2c_ddc_reset; + dc->vmsd = &vmstate_i2c_ddc; + device_class_set_props(dc, i2c_ddc_properties); + isc->event = i2c_ddc_event; + isc->recv = i2c_ddc_rx; + isc->send = i2c_ddc_tx; +} + +static TypeInfo i2c_ddc_info = { + .name = TYPE_I2CDDC, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(I2CDDCState), + .instance_init = i2c_ddc_init, + .class_init = i2c_ddc_class_init +}; + +static void ddc_register_devices(void) +{ + type_register_static(&i2c_ddc_info); +} + +type_init(ddc_register_devices); diff --git a/hw/display/jazz_led.c b/hw/display/jazz_led.c new file mode 100644 index 000000000..dd5f4696c --- /dev/null +++ b/hw/display/jazz_led.c @@ -0,0 +1,320 @@ +/* + * QEMU JAZZ LED emulator. + * + * Copyright (c) 2007-2012 Herve Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "ui/console.h" +#include "ui/pixel_ops.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +typedef enum { + REDRAW_NONE = 0, REDRAW_SEGMENTS = 1, REDRAW_BACKGROUND = 2, +} screen_state_t; + +#define TYPE_JAZZ_LED "jazz-led" +OBJECT_DECLARE_SIMPLE_TYPE(LedState, JAZZ_LED) + +struct LedState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint8_t segments; + QemuConsole *con; + screen_state_t state; +}; + +static uint64_t jazz_led_read(void *opaque, hwaddr addr, + unsigned int size) +{ + LedState *s = opaque; + uint8_t val; + + val = s->segments; + trace_jazz_led_read(addr, val); + + return val; +} + +static void jazz_led_write(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + LedState *s = opaque; + uint8_t new_val = val & 0xff; + + trace_jazz_led_write(addr, new_val); + + s->segments = new_val; + s->state |= REDRAW_SEGMENTS; +} + +static const MemoryRegionOps led_ops = { + .read = jazz_led_read, + .write = jazz_led_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 1, + .impl.max_access_size = 1, +}; + +/***********************************************************/ +/* jazz_led display */ + +static void draw_horizontal_line(DisplaySurface *ds, + int posy, int posx1, int posx2, + uint32_t color) +{ + uint8_t *d; + int x, bpp; + + bpp = (surface_bits_per_pixel(ds) + 7) >> 3; + d = surface_data(ds) + surface_stride(ds) * posy + bpp * posx1; + switch (bpp) { + case 1: + for (x = posx1; x <= posx2; x++) { + *((uint8_t *)d) = color; + d++; + } + break; + case 2: + for (x = posx1; x <= posx2; x++) { + *((uint16_t *)d) = color; + d += 2; + } + break; + case 4: + for (x = posx1; x <= posx2; x++) { + *((uint32_t *)d) = color; + d += 4; + } + break; + } +} + +static void draw_vertical_line(DisplaySurface *ds, + int posx, int posy1, int posy2, + uint32_t color) +{ + uint8_t *d; + int y, bpp; + + bpp = (surface_bits_per_pixel(ds) + 7) >> 3; + d = surface_data(ds) + surface_stride(ds) * posy1 + bpp * posx; + switch (bpp) { + case 1: + for (y = posy1; y <= posy2; y++) { + *((uint8_t *)d) = color; + d += surface_stride(ds); + } + break; + case 2: + for (y = posy1; y <= posy2; y++) { + *((uint16_t *)d) = color; + d += surface_stride(ds); + } + break; + case 4: + for (y = posy1; y <= posy2; y++) { + *((uint32_t *)d) = color; + d += surface_stride(ds); + } + break; + } +} + +static void jazz_led_update_display(void *opaque) +{ + LedState *s = opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + uint8_t *d1; + uint32_t color_segment, color_led; + int y, bpp; + + if (s->state & REDRAW_BACKGROUND) { + /* clear screen */ + bpp = (surface_bits_per_pixel(surface) + 7) >> 3; + d1 = surface_data(surface); + for (y = 0; y < surface_height(surface); y++) { + memset(d1, 0x00, surface_width(surface) * bpp); + d1 += surface_stride(surface); + } + } + + if (s->state & REDRAW_SEGMENTS) { + /* set colors according to bpp */ + switch (surface_bits_per_pixel(surface)) { + case 8: + color_segment = rgb_to_pixel8(0xaa, 0xaa, 0xaa); + color_led = rgb_to_pixel8(0x00, 0xff, 0x00); + break; + case 15: + color_segment = rgb_to_pixel15(0xaa, 0xaa, 0xaa); + color_led = rgb_to_pixel15(0x00, 0xff, 0x00); + break; + case 16: + color_segment = rgb_to_pixel16(0xaa, 0xaa, 0xaa); + color_led = rgb_to_pixel16(0x00, 0xff, 0x00); + break; + case 24: + color_segment = rgb_to_pixel24(0xaa, 0xaa, 0xaa); + color_led = rgb_to_pixel24(0x00, 0xff, 0x00); + break; + case 32: + color_segment = rgb_to_pixel32(0xaa, 0xaa, 0xaa); + color_led = rgb_to_pixel32(0x00, 0xff, 0x00); + break; + default: + return; + } + + /* display segments */ + draw_horizontal_line(surface, 40, 10, 40, + (s->segments & 0x02) ? color_segment : 0); + draw_vertical_line(surface, 10, 10, 40, + (s->segments & 0x04) ? color_segment : 0); + draw_vertical_line(surface, 10, 40, 70, + (s->segments & 0x08) ? color_segment : 0); + draw_horizontal_line(surface, 70, 10, 40, + (s->segments & 0x10) ? color_segment : 0); + draw_vertical_line(surface, 40, 40, 70, + (s->segments & 0x20) ? color_segment : 0); + draw_vertical_line(surface, 40, 10, 40, + (s->segments & 0x40) ? color_segment : 0); + draw_horizontal_line(surface, 10, 10, 40, + (s->segments & 0x80) ? color_segment : 0); + + /* display led */ + if (!(s->segments & 0x01)) { + color_led = 0; /* black */ + } + draw_horizontal_line(surface, 68, 50, 50, color_led); + draw_horizontal_line(surface, 69, 49, 51, color_led); + draw_horizontal_line(surface, 70, 48, 52, color_led); + draw_horizontal_line(surface, 71, 49, 51, color_led); + draw_horizontal_line(surface, 72, 50, 50, color_led); + } + + s->state = REDRAW_NONE; + dpy_gfx_update_full(s->con); +} + +static void jazz_led_invalidate_display(void *opaque) +{ + LedState *s = opaque; + s->state |= REDRAW_SEGMENTS | REDRAW_BACKGROUND; +} + +static void jazz_led_text_update(void *opaque, console_ch_t *chardata) +{ + LedState *s = opaque; + char buf[3]; + + dpy_text_cursor(s->con, -1, -1); + qemu_console_resize(s->con, 2, 1); + + /* TODO: draw the segments */ + snprintf(buf, 3, "%02hhx", s->segments); + console_write_ch(chardata++, ATTR2CHTYPE(buf[0], QEMU_COLOR_BLUE, + QEMU_COLOR_BLACK, 1)); + console_write_ch(chardata++, ATTR2CHTYPE(buf[1], QEMU_COLOR_BLUE, + QEMU_COLOR_BLACK, 1)); + + dpy_text_update(s->con, 0, 0, 2, 1); +} + +static int jazz_led_post_load(void *opaque, int version_id) +{ + /* force refresh */ + jazz_led_invalidate_display(opaque); + + return 0; +} + +static const VMStateDescription vmstate_jazz_led = { + .name = "jazz-led", + .version_id = 0, + .minimum_version_id = 0, + .post_load = jazz_led_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(segments, LedState), + VMSTATE_END_OF_LIST() + } +}; + +static const GraphicHwOps jazz_led_ops = { + .invalidate = jazz_led_invalidate_display, + .gfx_update = jazz_led_update_display, + .text_update = jazz_led_text_update, +}; + +static void jazz_led_init(Object *obj) +{ + LedState *s = JAZZ_LED(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &led_ops, s, "led", 1); + sysbus_init_mmio(dev, &s->iomem); +} + +static void jazz_led_realize(DeviceState *dev, Error **errp) +{ + LedState *s = JAZZ_LED(dev); + + s->con = graphic_console_init(dev, 0, &jazz_led_ops, s); +} + +static void jazz_led_reset(DeviceState *d) +{ + LedState *s = JAZZ_LED(d); + + s->segments = 0; + s->state = REDRAW_SEGMENTS | REDRAW_BACKGROUND; + qemu_console_resize(s->con, 60, 80); +} + +static void jazz_led_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Jazz LED display", + dc->vmsd = &vmstate_jazz_led; + dc->reset = jazz_led_reset; + dc->realize = jazz_led_realize; +} + +static const TypeInfo jazz_led_info = { + .name = TYPE_JAZZ_LED, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LedState), + .instance_init = jazz_led_init, + .class_init = jazz_led_class_init, +}; + +static void jazz_led_register(void) +{ + type_register_static(&jazz_led_info); +} + +type_init(jazz_led_register); diff --git a/hw/display/macfb.c b/hw/display/macfb.c new file mode 100644 index 000000000..277d3e663 --- /dev/null +++ b/hw/display/macfb.c @@ -0,0 +1,807 @@ +/* + * QEMU Motorola 680x0 Macintosh Video Card Emulation + * Copyright (c) 2012-2018 Laurent Vivier + * + * some parts from QEMU G364 framebuffer Emulator. + * Copyright (c) 2007-2011 Herve Poussineau + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/sysbus.h" +#include "ui/console.h" +#include "ui/pixel_ops.h" +#include "hw/nubus/nubus.h" +#include "hw/display/macfb.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "trace.h" + +#define VIDEO_BASE 0x0 +#define DAFB_BASE 0x00800000 + +#define MACFB_PAGE_SIZE 4096 +#define MACFB_VRAM_SIZE (4 * MiB) + +#define DAFB_MODE_VADDR1 0x0 +#define DAFB_MODE_VADDR2 0x4 +#define DAFB_MODE_CTRL1 0x8 +#define DAFB_MODE_CTRL2 0xc +#define DAFB_MODE_SENSE 0x1c +#define DAFB_INTR_MASK 0x104 +#define DAFB_INTR_STAT 0x108 +#define DAFB_INTR_CLEAR 0x10c +#define DAFB_RESET 0x200 +#define DAFB_LUT 0x213 + +#define DAFB_INTR_VBL 0x4 + +/* Vertical Blank period (60.15Hz) */ +#define DAFB_INTR_VBL_PERIOD_NS 16625800 + +/* + * Quadra sense codes taken from Apple Technical Note HW26: + * "Macintosh Quadra Built-In Video". The sense codes and + * extended sense codes have different meanings: + * + * Sense: + * bit 2: SENSE2 (pin 10) + * bit 1: SENSE1 (pin 7) + * bit 0: SENSE0 (pin 4) + * + * 0 = pin tied to ground + * 1 = pin unconnected + * + * Extended Sense: + * bit 2: pins 4-10 + * bit 1: pins 10-7 + * bit 0: pins 7-4 + * + * 0 = pins tied together + * 1 = pins unconnected + * + * Reads from the sense register appear to be active low, i.e. a 1 indicates + * that the pin is tied to ground, a 0 indicates the pin is disconnected. + * + * Writes to the sense register appear to activate pulldowns i.e. a 1 enables + * a pulldown on a particular pin. + * + * The MacOS toolbox appears to use a series of reads and writes to first + * determine if extended sense is to be used, and then check which pins are + * tied together in order to determine the display type. + */ + +typedef struct MacFbSense { + uint8_t type; + uint8_t sense; + uint8_t ext_sense; +} MacFbSense; + +static MacFbSense macfb_sense_table[] = { + { MACFB_DISPLAY_APPLE_21_COLOR, 0x0, 0 }, + { MACFB_DISPLAY_APPLE_PORTRAIT, 0x1, 0 }, + { MACFB_DISPLAY_APPLE_12_RGB, 0x2, 0 }, + { MACFB_DISPLAY_APPLE_2PAGE_MONO, 0x3, 0 }, + { MACFB_DISPLAY_NTSC_UNDERSCAN, 0x4, 0 }, + { MACFB_DISPLAY_NTSC_OVERSCAN, 0x4, 0 }, + { MACFB_DISPLAY_APPLE_12_MONO, 0x6, 0 }, + { MACFB_DISPLAY_APPLE_13_RGB, 0x6, 0 }, + { MACFB_DISPLAY_16_COLOR, 0x7, 0x3 }, + { MACFB_DISPLAY_PAL1_UNDERSCAN, 0x7, 0x0 }, + { MACFB_DISPLAY_PAL1_OVERSCAN, 0x7, 0x0 }, + { MACFB_DISPLAY_PAL2_UNDERSCAN, 0x7, 0x6 }, + { MACFB_DISPLAY_PAL2_OVERSCAN, 0x7, 0x6 }, + { MACFB_DISPLAY_VGA, 0x7, 0x5 }, + { MACFB_DISPLAY_SVGA, 0x7, 0x5 }, +}; + +static MacFbMode macfb_mode_table[] = { + { MACFB_DISPLAY_VGA, 1, 0x100, 0x71e, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 2, 0x100, 0x70e, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 4, 0x100, 0x706, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 8, 0x100, 0x702, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 24, 0x100, 0x7ff, 640, 480, 0x1000, 0x1000 }, + { MACFB_DISPLAY_VGA, 1, 0xd0 , 0x70e, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 2, 0xd0 , 0x706, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 4, 0xd0 , 0x702, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 8, 0xd0, 0x700, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 24, 0x340, 0x100, 800, 600, 0xd00, 0xe00 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 1, 0x90, 0x506, 1152, 870, 0x240, 0x80 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 2, 0x90, 0x502, 1152, 870, 0x240, 0x80 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 4, 0x90, 0x500, 1152, 870, 0x240, 0x80 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 8, 0x120, 0x5ff, 1152, 870, 0x480, 0x80 }, +}; + +typedef void macfb_draw_line_func(MacfbState *s, uint8_t *d, uint32_t addr, + int width); + +static inline uint8_t macfb_read_byte(MacfbState *s, uint32_t addr) +{ + return s->vram[addr & s->vram_bit_mask]; +} + +/* 1-bit color */ +static void macfb_draw_line1(MacfbState *s, uint8_t *d, uint32_t addr, + int width) +{ + uint8_t r, g, b; + int x; + + for (x = 0; x < width; x++) { + int bit = x & 7; + int idx = (macfb_read_byte(s, addr) >> (7 - bit)) & 1; + r = s->color_palette[idx * 3]; + g = s->color_palette[idx * 3 + 1]; + b = s->color_palette[idx * 3 + 2]; + addr += (bit == 7); + + *(uint32_t *)d = rgb_to_pixel32(r, g, b); + d += 4; + } +} + +/* 2-bit color */ +static void macfb_draw_line2(MacfbState *s, uint8_t *d, uint32_t addr, + int width) +{ + uint8_t r, g, b; + int x; + + for (x = 0; x < width; x++) { + int bit = (x & 3); + int idx = (macfb_read_byte(s, addr) >> ((3 - bit) << 1)) & 3; + r = s->color_palette[idx * 3]; + g = s->color_palette[idx * 3 + 1]; + b = s->color_palette[idx * 3 + 2]; + addr += (bit == 3); + + *(uint32_t *)d = rgb_to_pixel32(r, g, b); + d += 4; + } +} + +/* 4-bit color */ +static void macfb_draw_line4(MacfbState *s, uint8_t *d, uint32_t addr, + int width) +{ + uint8_t r, g, b; + int x; + + for (x = 0; x < width; x++) { + int bit = x & 1; + int idx = (macfb_read_byte(s, addr) >> ((1 - bit) << 2)) & 15; + r = s->color_palette[idx * 3]; + g = s->color_palette[idx * 3 + 1]; + b = s->color_palette[idx * 3 + 2]; + addr += (bit == 1); + + *(uint32_t *)d = rgb_to_pixel32(r, g, b); + d += 4; + } +} + +/* 8-bit color */ +static void macfb_draw_line8(MacfbState *s, uint8_t *d, uint32_t addr, + int width) +{ + uint8_t r, g, b; + int x; + + for (x = 0; x < width; x++) { + r = s->color_palette[macfb_read_byte(s, addr) * 3]; + g = s->color_palette[macfb_read_byte(s, addr) * 3 + 1]; + b = s->color_palette[macfb_read_byte(s, addr) * 3 + 2]; + addr++; + + *(uint32_t *)d = rgb_to_pixel32(r, g, b); + d += 4; + } +} + +/* 16-bit color */ +static void macfb_draw_line16(MacfbState *s, uint8_t *d, uint32_t addr, + int width) +{ + uint8_t r, g, b; + int x; + + for (x = 0; x < width; x++) { + uint16_t pixel; + pixel = (macfb_read_byte(s, addr) << 8) | macfb_read_byte(s, addr + 1); + r = ((pixel >> 10) & 0x1f) << 3; + g = ((pixel >> 5) & 0x1f) << 3; + b = (pixel & 0x1f) << 3; + addr += 2; + + *(uint32_t *)d = rgb_to_pixel32(r, g, b); + d += 4; + } +} + +/* 24-bit color */ +static void macfb_draw_line24(MacfbState *s, uint8_t *d, uint32_t addr, + int width) +{ + uint8_t r, g, b; + int x; + + for (x = 0; x < width; x++) { + r = macfb_read_byte(s, addr + 1); + g = macfb_read_byte(s, addr + 2); + b = macfb_read_byte(s, addr + 3); + addr += 4; + + *(uint32_t *)d = rgb_to_pixel32(r, g, b); + d += 4; + } +} + + +enum { + MACFB_DRAW_LINE1, + MACFB_DRAW_LINE2, + MACFB_DRAW_LINE4, + MACFB_DRAW_LINE8, + MACFB_DRAW_LINE16, + MACFB_DRAW_LINE24, + MACFB_DRAW_LINE_NB, +}; + +static macfb_draw_line_func * const + macfb_draw_line_table[MACFB_DRAW_LINE_NB] = { + macfb_draw_line1, + macfb_draw_line2, + macfb_draw_line4, + macfb_draw_line8, + macfb_draw_line16, + macfb_draw_line24, +}; + +static int macfb_check_dirty(MacfbState *s, DirtyBitmapSnapshot *snap, + ram_addr_t addr, int len) +{ + return memory_region_snapshot_get_dirty(&s->mem_vram, snap, addr, len); +} + +static void macfb_draw_graphic(MacfbState *s) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + DirtyBitmapSnapshot *snap = NULL; + ram_addr_t page; + uint32_t v = 0; + int y, ymin; + int macfb_stride = s->mode->stride; + macfb_draw_line_func *macfb_draw_line; + + switch (s->depth) { + case 1: + v = MACFB_DRAW_LINE1; + break; + case 2: + v = MACFB_DRAW_LINE2; + break; + case 4: + v = MACFB_DRAW_LINE4; + break; + case 8: + v = MACFB_DRAW_LINE8; + break; + case 16: + v = MACFB_DRAW_LINE16; + break; + case 24: + v = MACFB_DRAW_LINE24; + break; + } + + macfb_draw_line = macfb_draw_line_table[v]; + assert(macfb_draw_line != NULL); + + snap = memory_region_snapshot_and_clear_dirty(&s->mem_vram, 0x0, + memory_region_size(&s->mem_vram), + DIRTY_MEMORY_VGA); + + ymin = -1; + page = s->mode->offset; + for (y = 0; y < s->height; y++, page += macfb_stride) { + if (macfb_check_dirty(s, snap, page, macfb_stride)) { + uint8_t *data_display; + + data_display = surface_data(surface) + y * surface_stride(surface); + macfb_draw_line(s, data_display, page, s->width); + + if (ymin < 0) { + ymin = y; + } + } else { + if (ymin >= 0) { + dpy_gfx_update(s->con, 0, ymin, s->width, y - ymin); + ymin = -1; + } + } + } + + if (ymin >= 0) { + dpy_gfx_update(s->con, 0, ymin, s->width, y - ymin); + } + + g_free(snap); +} + +static void macfb_invalidate_display(void *opaque) +{ + MacfbState *s = opaque; + + memory_region_set_dirty(&s->mem_vram, 0, MACFB_VRAM_SIZE); +} + +static uint32_t macfb_sense_read(MacfbState *s) +{ + MacFbSense *macfb_sense; + uint8_t sense; + + assert(s->type < ARRAY_SIZE(macfb_sense_table)); + macfb_sense = &macfb_sense_table[s->type]; + if (macfb_sense->sense == 0x7) { + /* Extended sense */ + sense = 0; + if (!(macfb_sense->ext_sense & 1)) { + /* Pins 7-4 together */ + if (~s->regs[DAFB_MODE_SENSE >> 2] & 3) { + sense = (~s->regs[DAFB_MODE_SENSE >> 2] & 7) | 3; + } + } + if (!(macfb_sense->ext_sense & 2)) { + /* Pins 10-7 together */ + if (~s->regs[DAFB_MODE_SENSE >> 2] & 6) { + sense = (~s->regs[DAFB_MODE_SENSE >> 2] & 7) | 6; + } + } + if (!(macfb_sense->ext_sense & 4)) { + /* Pins 4-10 together */ + if (~s->regs[DAFB_MODE_SENSE >> 2] & 5) { + sense = (~s->regs[DAFB_MODE_SENSE >> 2] & 7) | 5; + } + } + } else { + /* Normal sense */ + sense = (~macfb_sense->sense & 7) | + (~s->regs[DAFB_MODE_SENSE >> 2] & 7); + } + + trace_macfb_sense_read(sense); + return sense; +} + +static void macfb_sense_write(MacfbState *s, uint32_t val) +{ + s->regs[DAFB_MODE_SENSE >> 2] = val; + + trace_macfb_sense_write(val); + return; +} + +static void macfb_update_mode(MacfbState *s) +{ + s->width = s->mode->width; + s->height = s->mode->height; + s->depth = s->mode->depth; + + trace_macfb_update_mode(s->width, s->height, s->depth); + macfb_invalidate_display(s); +} + +static void macfb_mode_write(MacfbState *s) +{ + MacFbMode *macfb_mode; + int i; + + for (i = 0; i < ARRAY_SIZE(macfb_mode_table); i++) { + macfb_mode = &macfb_mode_table[i]; + + if (s->type != macfb_mode->type) { + continue; + } + + if ((s->regs[DAFB_MODE_CTRL1 >> 2] & 0xff) == + (macfb_mode->mode_ctrl1 & 0xff) && + (s->regs[DAFB_MODE_CTRL2 >> 2] & 0xff) == + (macfb_mode->mode_ctrl2 & 0xff)) { + s->mode = macfb_mode; + macfb_update_mode(s); + break; + } + } +} + +static MacFbMode *macfb_find_mode(MacfbDisplayType display_type, + uint16_t width, uint16_t height, + uint8_t depth) +{ + MacFbMode *macfb_mode; + int i; + + for (i = 0; i < ARRAY_SIZE(macfb_mode_table); i++) { + macfb_mode = &macfb_mode_table[i]; + + if (display_type == macfb_mode->type && width == macfb_mode->width && + height == macfb_mode->height && depth == macfb_mode->depth) { + return macfb_mode; + } + } + + return NULL; +} + +static gchar *macfb_mode_list(void) +{ + GString *list = g_string_new(""); + MacFbMode *macfb_mode; + int i; + + for (i = 0; i < ARRAY_SIZE(macfb_mode_table); i++) { + macfb_mode = &macfb_mode_table[i]; + + g_string_append_printf(list, " %dx%dx%d\n", macfb_mode->width, + macfb_mode->height, macfb_mode->depth); + } + + return g_string_free(list, FALSE); +} + + +static void macfb_update_display(void *opaque) +{ + MacfbState *s = opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + + qemu_flush_coalesced_mmio_buffer(); + + if (s->width == 0 || s->height == 0) { + return; + } + + if (s->width != surface_width(surface) || + s->height != surface_height(surface)) { + qemu_console_resize(s->con, s->width, s->height); + } + + macfb_draw_graphic(s); +} + +static void macfb_update_irq(MacfbState *s) +{ + uint32_t irq_state = s->irq_state & s->irq_mask; + + if (irq_state) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static int64_t macfb_next_vbl(void) +{ + return (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + DAFB_INTR_VBL_PERIOD_NS) / + DAFB_INTR_VBL_PERIOD_NS * DAFB_INTR_VBL_PERIOD_NS; +} + +static void macfb_vbl_timer(void *opaque) +{ + MacfbState *s = opaque; + int64_t next_vbl; + + s->irq_state |= DAFB_INTR_VBL; + macfb_update_irq(s); + + /* 60 Hz irq */ + next_vbl = macfb_next_vbl(); + timer_mod(s->vbl_timer, next_vbl); +} + +static void macfb_reset(MacfbState *s) +{ + int i; + + s->palette_current = 0; + for (i = 0; i < 256; i++) { + s->color_palette[i * 3] = 255 - i; + s->color_palette[i * 3 + 1] = 255 - i; + s->color_palette[i * 3 + 2] = 255 - i; + } + memset(s->vram, 0, MACFB_VRAM_SIZE); + macfb_invalidate_display(s); +} + +static uint64_t macfb_ctrl_read(void *opaque, + hwaddr addr, + unsigned int size) +{ + MacfbState *s = opaque; + uint64_t val = 0; + + switch (addr) { + case DAFB_MODE_VADDR1: + case DAFB_MODE_VADDR2: + case DAFB_MODE_CTRL1: + case DAFB_MODE_CTRL2: + val = s->regs[addr >> 2]; + break; + case DAFB_INTR_STAT: + val = s->irq_state; + break; + case DAFB_MODE_SENSE: + val = macfb_sense_read(s); + break; + } + + trace_macfb_ctrl_read(addr, val, size); + return val; +} + +static void macfb_ctrl_write(void *opaque, + hwaddr addr, + uint64_t val, + unsigned int size) +{ + MacfbState *s = opaque; + int64_t next_vbl; + + switch (addr) { + case DAFB_MODE_VADDR1: + case DAFB_MODE_VADDR2: + s->regs[addr >> 2] = val; + break; + case DAFB_MODE_CTRL1 ... DAFB_MODE_CTRL1 + 3: + case DAFB_MODE_CTRL2 ... DAFB_MODE_CTRL2 + 3: + s->regs[addr >> 2] = val; + if (val) { + macfb_mode_write(s); + } + break; + case DAFB_MODE_SENSE: + macfb_sense_write(s, val); + break; + case DAFB_INTR_MASK: + s->irq_mask = val; + if (val & DAFB_INTR_VBL) { + next_vbl = macfb_next_vbl(); + timer_mod(s->vbl_timer, next_vbl); + } else { + timer_del(s->vbl_timer); + } + break; + case DAFB_INTR_CLEAR: + s->irq_state &= ~DAFB_INTR_VBL; + macfb_update_irq(s); + break; + case DAFB_RESET: + s->palette_current = 0; + s->irq_state &= ~DAFB_INTR_VBL; + macfb_update_irq(s); + break; + case DAFB_LUT: + s->color_palette[s->palette_current] = val; + s->palette_current = (s->palette_current + 1) % + ARRAY_SIZE(s->color_palette); + if (s->palette_current % 3) { + macfb_invalidate_display(s); + } + break; + } + + trace_macfb_ctrl_write(addr, val, size); +} + +static const MemoryRegionOps macfb_ctrl_ops = { + .read = macfb_ctrl_read, + .write = macfb_ctrl_write, + .endianness = DEVICE_BIG_ENDIAN, + .impl.min_access_size = 1, + .impl.max_access_size = 4, +}; + +static int macfb_post_load(void *opaque, int version_id) +{ + macfb_mode_write(opaque); + return 0; +} + +static const VMStateDescription vmstate_macfb = { + .name = "macfb", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .post_load = macfb_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(color_palette, MacfbState, 256 * 3), + VMSTATE_UINT32(palette_current, MacfbState), + VMSTATE_UINT32_ARRAY(regs, MacfbState, MACFB_NUM_REGS), + VMSTATE_END_OF_LIST() + } +}; + +static const GraphicHwOps macfb_ops = { + .invalidate = macfb_invalidate_display, + .gfx_update = macfb_update_display, +}; + +static bool macfb_common_realize(DeviceState *dev, MacfbState *s, Error **errp) +{ + DisplaySurface *surface; + + s->mode = macfb_find_mode(s->type, s->width, s->height, s->depth); + if (!s->mode) { + gchar *list; + error_setg(errp, "unknown display mode: width %d, height %d, depth %d", + s->width, s->height, s->depth); + list = macfb_mode_list(); + error_append_hint(errp, "Available modes:\n%s", list); + g_free(list); + + return false; + } + + s->con = graphic_console_init(dev, 0, &macfb_ops, s); + surface = qemu_console_surface(s->con); + + if (surface_bits_per_pixel(surface) != 32) { + error_setg(errp, "unknown host depth %d", + surface_bits_per_pixel(surface)); + return false; + } + + memory_region_init_io(&s->mem_ctrl, OBJECT(dev), &macfb_ctrl_ops, s, + "macfb-ctrl", 0x1000); + + memory_region_init_ram(&s->mem_vram, OBJECT(dev), "macfb-vram", + MACFB_VRAM_SIZE, &error_abort); + s->vram = memory_region_get_ram_ptr(&s->mem_vram); + s->vram_bit_mask = MACFB_VRAM_SIZE - 1; + memory_region_set_coalescing(&s->mem_vram); + + s->vbl_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, macfb_vbl_timer, s); + macfb_update_mode(s); + return true; +} + +static void macfb_sysbus_realize(DeviceState *dev, Error **errp) +{ + MacfbSysBusState *s = MACFB(dev); + MacfbState *ms = &s->macfb; + + if (!macfb_common_realize(dev, ms, errp)) { + return; + } + + sysbus_init_mmio(SYS_BUS_DEVICE(s), &ms->mem_ctrl); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &ms->mem_vram); + + qdev_init_gpio_out(dev, &ms->irq, 1); +} + +static void macfb_nubus_set_irq(void *opaque, int n, int level) +{ + MacfbNubusState *s = NUBUS_MACFB(opaque); + NubusDevice *nd = NUBUS_DEVICE(s); + + nubus_set_irq(nd, level); +} + +static void macfb_nubus_realize(DeviceState *dev, Error **errp) +{ + NubusDevice *nd = NUBUS_DEVICE(dev); + MacfbNubusState *s = NUBUS_MACFB(dev); + MacfbNubusDeviceClass *ndc = NUBUS_MACFB_GET_CLASS(dev); + MacfbState *ms = &s->macfb; + + ndc->parent_realize(dev, errp); + if (*errp) { + return; + } + + if (!macfb_common_realize(dev, ms, errp)) { + return; + } + + memory_region_add_subregion(&nd->slot_mem, DAFB_BASE, &ms->mem_ctrl); + memory_region_add_subregion(&nd->slot_mem, VIDEO_BASE, &ms->mem_vram); + + ms->irq = qemu_allocate_irq(macfb_nubus_set_irq, s, 0); +} + +static void macfb_nubus_unrealize(DeviceState *dev) +{ + MacfbNubusState *s = NUBUS_MACFB(dev); + MacfbNubusDeviceClass *ndc = NUBUS_MACFB_GET_CLASS(dev); + MacfbState *ms = &s->macfb; + + ndc->parent_unrealize(dev); + + qemu_free_irq(ms->irq); +} + +static void macfb_sysbus_reset(DeviceState *d) +{ + MacfbSysBusState *s = MACFB(d); + macfb_reset(&s->macfb); +} + +static void macfb_nubus_reset(DeviceState *d) +{ + MacfbNubusState *s = NUBUS_MACFB(d); + macfb_reset(&s->macfb); +} + +static Property macfb_sysbus_properties[] = { + DEFINE_PROP_UINT32("width", MacfbSysBusState, macfb.width, 640), + DEFINE_PROP_UINT32("height", MacfbSysBusState, macfb.height, 480), + DEFINE_PROP_UINT8("depth", MacfbSysBusState, macfb.depth, 8), + DEFINE_PROP_UINT8("display", MacfbSysBusState, macfb.type, + MACFB_DISPLAY_VGA), + DEFINE_PROP_END_OF_LIST(), +}; + +static Property macfb_nubus_properties[] = { + DEFINE_PROP_UINT32("width", MacfbNubusState, macfb.width, 640), + DEFINE_PROP_UINT32("height", MacfbNubusState, macfb.height, 480), + DEFINE_PROP_UINT8("depth", MacfbNubusState, macfb.depth, 8), + DEFINE_PROP_UINT8("display", MacfbNubusState, macfb.type, + MACFB_DISPLAY_VGA), + DEFINE_PROP_END_OF_LIST(), +}; + +static void macfb_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = macfb_sysbus_realize; + dc->desc = "SysBus Macintosh framebuffer"; + dc->reset = macfb_sysbus_reset; + dc->vmsd = &vmstate_macfb; + device_class_set_props(dc, macfb_sysbus_properties); +} + +static void macfb_nubus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + MacfbNubusDeviceClass *ndc = NUBUS_MACFB_CLASS(klass); + + device_class_set_parent_realize(dc, macfb_nubus_realize, + &ndc->parent_realize); + device_class_set_parent_unrealize(dc, macfb_nubus_unrealize, + &ndc->parent_unrealize); + dc->desc = "Nubus Macintosh framebuffer"; + dc->reset = macfb_nubus_reset; + dc->vmsd = &vmstate_macfb; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + device_class_set_props(dc, macfb_nubus_properties); +} + +static TypeInfo macfb_sysbus_info = { + .name = TYPE_MACFB, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MacfbSysBusState), + .class_init = macfb_sysbus_class_init, +}; + +static TypeInfo macfb_nubus_info = { + .name = TYPE_NUBUS_MACFB, + .parent = TYPE_NUBUS_DEVICE, + .instance_size = sizeof(MacfbNubusState), + .class_init = macfb_nubus_class_init, + .class_size = sizeof(MacfbNubusDeviceClass), +}; + +static void macfb_register_types(void) +{ + type_register_static(&macfb_sysbus_info); + type_register_static(&macfb_nubus_info); +} + +type_init(macfb_register_types) diff --git a/hw/display/meson.build b/hw/display/meson.build new file mode 100644 index 000000000..861c43ff9 --- /dev/null +++ b/hw/display/meson.build @@ -0,0 +1,100 @@ +hw_display_modules = {} + +softmmu_ss.add(when: 'CONFIG_DDC', if_true: files('i2c-ddc.c')) +softmmu_ss.add(when: 'CONFIG_EDID', if_true: files('edid-generate.c', 'edid-region.c')) + +softmmu_ss.add(when: 'CONFIG_FW_CFG_DMA', if_true: files('ramfb.c')) +softmmu_ss.add(when: 'CONFIG_FW_CFG_DMA', if_true: files('ramfb-standalone.c')) + +softmmu_ss.add(when: 'CONFIG_VGA_CIRRUS', if_true: files('cirrus_vga.c')) +softmmu_ss.add(when: ['CONFIG_VGA_CIRRUS', 'CONFIG_VGA_ISA'], if_true: files('cirrus_vga_isa.c')) +softmmu_ss.add(when: 'CONFIG_G364FB', if_true: files('g364fb.c')) +softmmu_ss.add(when: 'CONFIG_JAZZ_LED', if_true: files('jazz_led.c')) +softmmu_ss.add(when: 'CONFIG_PL110', if_true: files('pl110.c')) +softmmu_ss.add(when: 'CONFIG_SII9022', if_true: files('sii9022.c')) +softmmu_ss.add(when: 'CONFIG_SSD0303', if_true: files('ssd0303.c')) +softmmu_ss.add(when: 'CONFIG_SSD0323', if_true: files('ssd0323.c')) +softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xenfb.c')) + +softmmu_ss.add(when: 'CONFIG_VGA_PCI', if_true: files('vga-pci.c')) +softmmu_ss.add(when: 'CONFIG_VGA_ISA', if_true: files('vga-isa.c')) +softmmu_ss.add(when: 'CONFIG_VGA_ISA_MM', if_true: files('vga-isa-mm.c')) +softmmu_ss.add(when: 'CONFIG_VMWARE_VGA', if_true: files('vmware_vga.c')) +softmmu_ss.add(when: 'CONFIG_BOCHS_DISPLAY', if_true: files('bochs-display.c')) + +softmmu_ss.add(when: 'CONFIG_BLIZZARD', if_true: files('blizzard.c')) +softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_fimd.c')) +softmmu_ss.add(when: 'CONFIG_FRAMEBUFFER', if_true: files('framebuffer.c')) +softmmu_ss.add(when: 'CONFIG_ZAURUS', if_true: files('tc6393xb.c')) + +softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dss.c')) +softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_lcd.c')) +softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_fb.c')) +softmmu_ss.add(when: 'CONFIG_SM501', if_true: files('sm501.c')) +softmmu_ss.add(when: 'CONFIG_TCX', if_true: files('tcx.c')) +softmmu_ss.add(when: 'CONFIG_CG3', if_true: files('cg3.c')) +softmmu_ss.add(when: 'CONFIG_MACFB', if_true: files('macfb.c')) +softmmu_ss.add(when: 'CONFIG_NEXTCUBE', if_true: files('next-fb.c')) + +specific_ss.add(when: 'CONFIG_VGA', if_true: files('vga.c')) + +if config_all_devices.has_key('CONFIG_QXL') + qxl_ss = ss.source_set() + qxl_ss.add(when: 'CONFIG_QXL', if_true: [files('qxl.c', 'qxl-logger.c', 'qxl-render.c'), + pixman, spice]) + hw_display_modules += {'qxl': qxl_ss} +endif + +softmmu_ss.add(when: 'CONFIG_DPCD', if_true: files('dpcd.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx_dp.c')) + +softmmu_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c')) + +softmmu_ss.add(when: [pixman, 'CONFIG_ATI_VGA'], if_true: files('ati.c', 'ati_2d.c', 'ati_dbg.c')) + +if config_all_devices.has_key('CONFIG_VIRTIO_GPU') + virtio_gpu_ss = ss.source_set() + virtio_gpu_ss.add(when: 'CONFIG_VIRTIO_GPU', + if_true: [files('virtio-gpu-base.c', 'virtio-gpu.c'), pixman]) + virtio_gpu_ss.add(when: 'CONFIG_LINUX', if_true: files('virtio-gpu-udmabuf.c'), + if_false: files('virtio-gpu-udmabuf-stubs.c')) + virtio_gpu_ss.add(when: 'CONFIG_VHOST_USER_GPU', if_true: files('vhost-user-gpu.c')) + hw_display_modules += {'virtio-gpu': virtio_gpu_ss} + + virtio_gpu_gl_ss = ss.source_set() + virtio_gpu_gl_ss.add(when: ['CONFIG_VIRTIO_GPU', virgl, opengl], + if_true: [files('virtio-gpu-gl.c', 'virtio-gpu-virgl.c'), pixman, virgl]) + hw_display_modules += {'virtio-gpu-gl': virtio_gpu_gl_ss} +endif + +if config_all_devices.has_key('CONFIG_VIRTIO_PCI') + virtio_gpu_pci_ss = ss.source_set() + virtio_gpu_pci_ss.add(when: ['CONFIG_VIRTIO_GPU', 'CONFIG_VIRTIO_PCI'], + if_true: [files('virtio-gpu-pci.c'), pixman]) + virtio_gpu_pci_ss.add(when: ['CONFIG_VHOST_USER_GPU', 'CONFIG_VIRTIO_PCI'], + if_true: files('vhost-user-gpu-pci.c')) + hw_display_modules += {'virtio-gpu-pci': virtio_gpu_pci_ss} + + virtio_gpu_pci_gl_ss = ss.source_set() + virtio_gpu_pci_gl_ss.add(when: ['CONFIG_VIRTIO_GPU', 'CONFIG_VIRTIO_PCI', virgl, opengl], + if_true: [files('virtio-gpu-pci-gl.c'), pixman]) + hw_display_modules += {'virtio-gpu-pci-gl': virtio_gpu_pci_gl_ss} +endif + +if config_all_devices.has_key('CONFIG_VIRTIO_VGA') + virtio_vga_ss = ss.source_set() + virtio_vga_ss.add(when: 'CONFIG_VIRTIO_VGA', + if_true: [files('virtio-vga.c'), pixman]) + virtio_vga_ss.add(when: 'CONFIG_VHOST_USER_VGA', + if_true: files('vhost-user-vga.c')) + hw_display_modules += {'virtio-vga': virtio_vga_ss} + + virtio_vga_gl_ss = ss.source_set() + virtio_vga_gl_ss.add(when: ['CONFIG_VIRTIO_VGA', virgl, opengl], + if_true: [files('virtio-vga-gl.c'), pixman]) + hw_display_modules += {'virtio-vga-gl': virtio_vga_gl_ss} +endif + +specific_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_lcdc.c')) + +modules += { 'hw-display': hw_display_modules } diff --git a/hw/display/next-fb.c b/hw/display/next-fb.c new file mode 100644 index 000000000..dd6a1aa8a --- /dev/null +++ b/hw/display/next-fb.c @@ -0,0 +1,144 @@ +/* + * NeXT Cube/Station Framebuffer Emulation + * + * Copyright (c) 2011 Bryce Lanham + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "ui/console.h" +#include "hw/loader.h" +#include "framebuffer.h" +#include "ui/pixel_ops.h" +#include "hw/m68k/next-cube.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(NeXTFbState, NEXTFB) + +struct NeXTFbState { + SysBusDevice parent_obj; + + MemoryRegion fb_mr; + MemoryRegionSection fbsection; + QemuConsole *con; + + uint32_t cols; + uint32_t rows; + int invalidate; +}; + +static void nextfb_draw_line(void *opaque, uint8_t *d, const uint8_t *s, + int width, int pitch) +{ + NeXTFbState *nfbstate = NEXTFB(opaque); + static const uint32_t pal[4] = { + 0xFFFFFFFF, 0xFFAAAAAA, 0xFF555555, 0xFF000000 + }; + uint32_t *buf = (uint32_t *)d; + int i = 0; + + for (i = 0; i < nfbstate->cols / 4; i++) { + int j = i * 4; + uint8_t src = s[i]; + buf[j + 3] = pal[src & 0x3]; + src >>= 2; + buf[j + 2] = pal[src & 0x3]; + src >>= 2; + buf[j + 1] = pal[src & 0x3]; + src >>= 2; + buf[j + 0] = pal[src & 0x3]; + } +} + +static void nextfb_update(void *opaque) +{ + NeXTFbState *s = NEXTFB(opaque); + int dest_width = 4; + int src_width; + int first = 0; + int last = 0; + DisplaySurface *surface = qemu_console_surface(s->con); + + src_width = s->cols / 4 + 8; + dest_width = s->cols * 4; + + if (s->invalidate) { + framebuffer_update_memory_section(&s->fbsection, &s->fb_mr, 0, + s->cols, src_width); + s->invalidate = 0; + } + + framebuffer_update_display(surface, &s->fbsection, s->cols, s->rows, + src_width, dest_width, 0, 1, nextfb_draw_line, + s, &first, &last); + + dpy_gfx_update(s->con, 0, 0, s->cols, s->rows); +} + +static void nextfb_invalidate(void *opaque) +{ + NeXTFbState *s = NEXTFB(opaque); + s->invalidate = 1; +} + +static const GraphicHwOps nextfb_ops = { + .invalidate = nextfb_invalidate, + .gfx_update = nextfb_update, +}; + +static void nextfb_realize(DeviceState *dev, Error **errp) +{ + NeXTFbState *s = NEXTFB(dev); + + memory_region_init_ram(&s->fb_mr, OBJECT(dev), "next-video", 0x1CB100, + &error_fatal); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->fb_mr); + + s->invalidate = 1; + s->cols = 1120; + s->rows = 832; + + s->con = graphic_console_init(dev, 0, &nextfb_ops, s); + qemu_console_resize(s->con, s->cols, s->rows); +} + +static void nextfb_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->realize = nextfb_realize; + + /* Note: This device does not any state that we have to reset or migrate */ +} + +static const TypeInfo nextfb_info = { + .name = TYPE_NEXTFB, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NeXTFbState), + .class_init = nextfb_class_init, +}; + +static void nextfb_register_types(void) +{ + type_register_static(&nextfb_info); +} + +type_init(nextfb_register_types) diff --git a/hw/display/omap_dss.c b/hw/display/omap_dss.c new file mode 100644 index 000000000..21fde58a2 --- /dev/null +++ b/hw/display/omap_dss.c @@ -0,0 +1,1093 @@ +/* + * OMAP2 Display Subsystem. + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "ui/console.h" +#include "hw/arm/omap.h" + +struct omap_dss_s { + qemu_irq irq; + qemu_irq drq; + DisplayState *state; + MemoryRegion iomem_diss1, iomem_disc1, iomem_rfbi1, iomem_venc1, iomem_im3; + + int autoidle; + int control; + int enable; + + struct omap_dss_panel_s { + int enable; + int nx; + int ny; + + int x; + int y; + } dig, lcd; + + struct { + uint32_t idlemode; + uint32_t irqst; + uint32_t irqen; + uint32_t control; + uint32_t config; + uint32_t capable; + uint32_t timing[4]; + int line; + uint32_t bg[2]; + uint32_t trans[2]; + + struct omap_dss_plane_s { + int enable; + int bpp; + int posx; + int posy; + int nx; + int ny; + + hwaddr addr[3]; + + uint32_t attr; + uint32_t tresh; + int rowinc; + int colinc; + int wininc; + } l[3]; + + int invalidate; + uint16_t palette[256]; + } dispc; + + struct { + int idlemode; + uint32_t control; + int enable; + int pixels; + int busy; + int skiplines; + uint16_t rxbuf; + uint32_t config[2]; + uint32_t time[4]; + uint32_t data[6]; + uint16_t vsync; + uint16_t hsync; + struct rfbi_chip_s *chip[2]; + } rfbi; +}; + +static void omap_dispc_interrupt_update(struct omap_dss_s *s) +{ + qemu_set_irq(s->irq, s->dispc.irqst & s->dispc.irqen); +} + +static void omap_rfbi_reset(struct omap_dss_s *s) +{ + s->rfbi.idlemode = 0; + s->rfbi.control = 2; + s->rfbi.enable = 0; + s->rfbi.pixels = 0; + s->rfbi.skiplines = 0; + s->rfbi.busy = 0; + s->rfbi.config[0] = 0x00310000; + s->rfbi.config[1] = 0x00310000; + s->rfbi.time[0] = 0; + s->rfbi.time[1] = 0; + s->rfbi.time[2] = 0; + s->rfbi.time[3] = 0; + s->rfbi.data[0] = 0; + s->rfbi.data[1] = 0; + s->rfbi.data[2] = 0; + s->rfbi.data[3] = 0; + s->rfbi.data[4] = 0; + s->rfbi.data[5] = 0; + s->rfbi.vsync = 0; + s->rfbi.hsync = 0; +} + +void omap_dss_reset(struct omap_dss_s *s) +{ + s->autoidle = 0; + s->control = 0; + s->enable = 0; + + s->dig.enable = 0; + s->dig.nx = 1; + s->dig.ny = 1; + + s->lcd.enable = 0; + s->lcd.nx = 1; + s->lcd.ny = 1; + + s->dispc.idlemode = 0; + s->dispc.irqst = 0; + s->dispc.irqen = 0; + s->dispc.control = 0; + s->dispc.config = 0; + s->dispc.capable = 0x161; + s->dispc.timing[0] = 0; + s->dispc.timing[1] = 0; + s->dispc.timing[2] = 0; + s->dispc.timing[3] = 0; + s->dispc.line = 0; + s->dispc.bg[0] = 0; + s->dispc.bg[1] = 0; + s->dispc.trans[0] = 0; + s->dispc.trans[1] = 0; + + s->dispc.l[0].enable = 0; + s->dispc.l[0].bpp = 0; + s->dispc.l[0].addr[0] = 0; + s->dispc.l[0].addr[1] = 0; + s->dispc.l[0].addr[2] = 0; + s->dispc.l[0].posx = 0; + s->dispc.l[0].posy = 0; + s->dispc.l[0].nx = 1; + s->dispc.l[0].ny = 1; + s->dispc.l[0].attr = 0; + s->dispc.l[0].tresh = 0; + s->dispc.l[0].rowinc = 1; + s->dispc.l[0].colinc = 1; + s->dispc.l[0].wininc = 0; + + omap_rfbi_reset(s); + omap_dispc_interrupt_update(s); +} + +static uint64_t omap_diss_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_dss_s *s = (struct omap_dss_s *) opaque; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x00: /* DSS_REVISIONNUMBER */ + return 0x20; + + case 0x10: /* DSS_SYSCONFIG */ + return s->autoidle; + + case 0x14: /* DSS_SYSSTATUS */ + return 1; /* RESETDONE */ + + case 0x40: /* DSS_CONTROL */ + return s->control; + + case 0x50: /* DSS_PSA_LCD_REG_1 */ + case 0x54: /* DSS_PSA_LCD_REG_2 */ + case 0x58: /* DSS_PSA_VIDEO_REG */ + /* TODO: fake some values when appropriate s->control bits are set */ + return 0; + + case 0x5c: /* DSS_STATUS */ + return 1 + (s->control & 1); + + default: + break; + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_diss_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_dss_s *s = (struct omap_dss_s *) opaque; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* DSS_REVISIONNUMBER */ + case 0x14: /* DSS_SYSSTATUS */ + case 0x50: /* DSS_PSA_LCD_REG_1 */ + case 0x54: /* DSS_PSA_LCD_REG_2 */ + case 0x58: /* DSS_PSA_VIDEO_REG */ + case 0x5c: /* DSS_STATUS */ + OMAP_RO_REG(addr); + break; + + case 0x10: /* DSS_SYSCONFIG */ + if (value & 2) /* SOFTRESET */ + omap_dss_reset(s); + s->autoidle = value & 1; + break; + + case 0x40: /* DSS_CONTROL */ + s->control = value & 0x3dd; + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_diss_ops = { + .read = omap_diss_read, + .write = omap_diss_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static uint64_t omap_disc_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_dss_s *s = (struct omap_dss_s *) opaque; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x000: /* DISPC_REVISION */ + return 0x20; + + case 0x010: /* DISPC_SYSCONFIG */ + return s->dispc.idlemode; + + case 0x014: /* DISPC_SYSSTATUS */ + return 1; /* RESETDONE */ + + case 0x018: /* DISPC_IRQSTATUS */ + return s->dispc.irqst; + + case 0x01c: /* DISPC_IRQENABLE */ + return s->dispc.irqen; + + case 0x040: /* DISPC_CONTROL */ + return s->dispc.control; + + case 0x044: /* DISPC_CONFIG */ + return s->dispc.config; + + case 0x048: /* DISPC_CAPABLE */ + return s->dispc.capable; + + case 0x04c: /* DISPC_DEFAULT_COLOR0 */ + return s->dispc.bg[0]; + case 0x050: /* DISPC_DEFAULT_COLOR1 */ + return s->dispc.bg[1]; + case 0x054: /* DISPC_TRANS_COLOR0 */ + return s->dispc.trans[0]; + case 0x058: /* DISPC_TRANS_COLOR1 */ + return s->dispc.trans[1]; + + case 0x05c: /* DISPC_LINE_STATUS */ + return 0x7ff; + case 0x060: /* DISPC_LINE_NUMBER */ + return s->dispc.line; + + case 0x064: /* DISPC_TIMING_H */ + return s->dispc.timing[0]; + case 0x068: /* DISPC_TIMING_V */ + return s->dispc.timing[1]; + case 0x06c: /* DISPC_POL_FREQ */ + return s->dispc.timing[2]; + case 0x070: /* DISPC_DIVISOR */ + return s->dispc.timing[3]; + + case 0x078: /* DISPC_SIZE_DIG */ + return ((s->dig.ny - 1) << 16) | (s->dig.nx - 1); + case 0x07c: /* DISPC_SIZE_LCD */ + return ((s->lcd.ny - 1) << 16) | (s->lcd.nx - 1); + + case 0x080: /* DISPC_GFX_BA0 */ + return s->dispc.l[0].addr[0]; + case 0x084: /* DISPC_GFX_BA1 */ + return s->dispc.l[0].addr[1]; + case 0x088: /* DISPC_GFX_POSITION */ + return (s->dispc.l[0].posy << 16) | s->dispc.l[0].posx; + case 0x08c: /* DISPC_GFX_SIZE */ + return ((s->dispc.l[0].ny - 1) << 16) | (s->dispc.l[0].nx - 1); + case 0x0a0: /* DISPC_GFX_ATTRIBUTES */ + return s->dispc.l[0].attr; + case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */ + return s->dispc.l[0].tresh; + case 0x0a8: /* DISPC_GFX_FIFO_SIZE_STATUS */ + return 256; + case 0x0ac: /* DISPC_GFX_ROW_INC */ + return s->dispc.l[0].rowinc; + case 0x0b0: /* DISPC_GFX_PIXEL_INC */ + return s->dispc.l[0].colinc; + case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */ + return s->dispc.l[0].wininc; + case 0x0b8: /* DISPC_GFX_TABLE_BA */ + return s->dispc.l[0].addr[2]; + + case 0x0bc: /* DISPC_VID1_BA0 */ + case 0x0c0: /* DISPC_VID1_BA1 */ + case 0x0c4: /* DISPC_VID1_POSITION */ + case 0x0c8: /* DISPC_VID1_SIZE */ + case 0x0cc: /* DISPC_VID1_ATTRIBUTES */ + case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */ + case 0x0d4: /* DISPC_VID1_FIFO_SIZE_STATUS */ + case 0x0d8: /* DISPC_VID1_ROW_INC */ + case 0x0dc: /* DISPC_VID1_PIXEL_INC */ + case 0x0e0: /* DISPC_VID1_FIR */ + case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */ + case 0x0e8: /* DISPC_VID1_ACCU0 */ + case 0x0ec: /* DISPC_VID1_ACCU1 */ + case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */ + case 0x14c: /* DISPC_VID2_BA0 */ + case 0x150: /* DISPC_VID2_BA1 */ + case 0x154: /* DISPC_VID2_POSITION */ + case 0x158: /* DISPC_VID2_SIZE */ + case 0x15c: /* DISPC_VID2_ATTRIBUTES */ + case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */ + case 0x164: /* DISPC_VID2_FIFO_SIZE_STATUS */ + case 0x168: /* DISPC_VID2_ROW_INC */ + case 0x16c: /* DISPC_VID2_PIXEL_INC */ + case 0x170: /* DISPC_VID2_FIR */ + case 0x174: /* DISPC_VID2_PICTURE_SIZE */ + case 0x178: /* DISPC_VID2_ACCU0 */ + case 0x17c: /* DISPC_VID2_ACCU1 */ + case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */ + case 0x1d4: /* DISPC_DATA_CYCLE1 */ + case 0x1d8: /* DISPC_DATA_CYCLE2 */ + case 0x1dc: /* DISPC_DATA_CYCLE3 */ + return 0; + + default: + break; + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_disc_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_dss_s *s = (struct omap_dss_s *) opaque; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x010: /* DISPC_SYSCONFIG */ + if (value & 2) /* SOFTRESET */ + omap_dss_reset(s); + s->dispc.idlemode = value & 0x301b; + break; + + case 0x018: /* DISPC_IRQSTATUS */ + s->dispc.irqst &= ~value; + omap_dispc_interrupt_update(s); + break; + + case 0x01c: /* DISPC_IRQENABLE */ + s->dispc.irqen = value & 0xffff; + omap_dispc_interrupt_update(s); + break; + + case 0x040: /* DISPC_CONTROL */ + s->dispc.control = value & 0x07ff9fff; + s->dig.enable = (value >> 1) & 1; + s->lcd.enable = (value >> 0) & 1; + if (value & (1 << 12)) /* OVERLAY_OPTIMIZATION */ + if (!((s->dispc.l[1].attr | s->dispc.l[2].attr) & 1)) { + fprintf(stderr, "%s: Overlay Optimization when no overlay " + "region effectively exists leads to " + "unpredictable behaviour!\n", __func__); + } + if (value & (1 << 6)) { /* GODIGITAL */ + /* XXX: Shadowed fields are: + * s->dispc.config + * s->dispc.capable + * s->dispc.bg[0] + * s->dispc.bg[1] + * s->dispc.trans[0] + * s->dispc.trans[1] + * s->dispc.line + * s->dispc.timing[0] + * s->dispc.timing[1] + * s->dispc.timing[2] + * s->dispc.timing[3] + * s->lcd.nx + * s->lcd.ny + * s->dig.nx + * s->dig.ny + * s->dispc.l[0].addr[0] + * s->dispc.l[0].addr[1] + * s->dispc.l[0].addr[2] + * s->dispc.l[0].posx + * s->dispc.l[0].posy + * s->dispc.l[0].nx + * s->dispc.l[0].ny + * s->dispc.l[0].tresh + * s->dispc.l[0].rowinc + * s->dispc.l[0].colinc + * s->dispc.l[0].wininc + * All they need to be loaded here from their shadow registers. + */ + } + if (value & (1 << 5)) { /* GOLCD */ + /* XXX: Likewise for LCD here. */ + } + s->dispc.invalidate = 1; + break; + + case 0x044: /* DISPC_CONFIG */ + s->dispc.config = value & 0x3fff; + /* XXX: + * bits 2:1 (LOADMODE) reset to 0 after set to 1 and palette loaded + * bits 2:1 (LOADMODE) reset to 2 after set to 3 and palette loaded + */ + s->dispc.invalidate = 1; + break; + + case 0x048: /* DISPC_CAPABLE */ + s->dispc.capable = value & 0x3ff; + break; + + case 0x04c: /* DISPC_DEFAULT_COLOR0 */ + s->dispc.bg[0] = value & 0xffffff; + s->dispc.invalidate = 1; + break; + case 0x050: /* DISPC_DEFAULT_COLOR1 */ + s->dispc.bg[1] = value & 0xffffff; + s->dispc.invalidate = 1; + break; + case 0x054: /* DISPC_TRANS_COLOR0 */ + s->dispc.trans[0] = value & 0xffffff; + s->dispc.invalidate = 1; + break; + case 0x058: /* DISPC_TRANS_COLOR1 */ + s->dispc.trans[1] = value & 0xffffff; + s->dispc.invalidate = 1; + break; + + case 0x060: /* DISPC_LINE_NUMBER */ + s->dispc.line = value & 0x7ff; + break; + + case 0x064: /* DISPC_TIMING_H */ + s->dispc.timing[0] = value & 0x0ff0ff3f; + break; + case 0x068: /* DISPC_TIMING_V */ + s->dispc.timing[1] = value & 0x0ff0ff3f; + break; + case 0x06c: /* DISPC_POL_FREQ */ + s->dispc.timing[2] = value & 0x0003ffff; + break; + case 0x070: /* DISPC_DIVISOR */ + s->dispc.timing[3] = value & 0x00ff00ff; + break; + + case 0x078: /* DISPC_SIZE_DIG */ + s->dig.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */ + s->dig.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */ + s->dispc.invalidate = 1; + break; + case 0x07c: /* DISPC_SIZE_LCD */ + s->lcd.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */ + s->lcd.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */ + s->dispc.invalidate = 1; + break; + case 0x080: /* DISPC_GFX_BA0 */ + s->dispc.l[0].addr[0] = (hwaddr) value; + s->dispc.invalidate = 1; + break; + case 0x084: /* DISPC_GFX_BA1 */ + s->dispc.l[0].addr[1] = (hwaddr) value; + s->dispc.invalidate = 1; + break; + case 0x088: /* DISPC_GFX_POSITION */ + s->dispc.l[0].posx = ((value >> 0) & 0x7ff); /* GFXPOSX */ + s->dispc.l[0].posy = ((value >> 16) & 0x7ff); /* GFXPOSY */ + s->dispc.invalidate = 1; + break; + case 0x08c: /* DISPC_GFX_SIZE */ + s->dispc.l[0].nx = ((value >> 0) & 0x7ff) + 1; /* GFXSIZEX */ + s->dispc.l[0].ny = ((value >> 16) & 0x7ff) + 1; /* GFXSIZEY */ + s->dispc.invalidate = 1; + break; + case 0x0a0: /* DISPC_GFX_ATTRIBUTES */ + s->dispc.l[0].attr = value & 0x7ff; + if (value & (3 << 9)) + fprintf(stderr, "%s: Big-endian pixel format not supported\n", + __func__); + s->dispc.l[0].enable = value & 1; + s->dispc.l[0].bpp = (value >> 1) & 0xf; + s->dispc.invalidate = 1; + break; + case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */ + s->dispc.l[0].tresh = value & 0x01ff01ff; + break; + case 0x0ac: /* DISPC_GFX_ROW_INC */ + s->dispc.l[0].rowinc = value; + s->dispc.invalidate = 1; + break; + case 0x0b0: /* DISPC_GFX_PIXEL_INC */ + s->dispc.l[0].colinc = value; + s->dispc.invalidate = 1; + break; + case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */ + s->dispc.l[0].wininc = value; + break; + case 0x0b8: /* DISPC_GFX_TABLE_BA */ + s->dispc.l[0].addr[2] = (hwaddr) value; + s->dispc.invalidate = 1; + break; + + case 0x0bc: /* DISPC_VID1_BA0 */ + case 0x0c0: /* DISPC_VID1_BA1 */ + case 0x0c4: /* DISPC_VID1_POSITION */ + case 0x0c8: /* DISPC_VID1_SIZE */ + case 0x0cc: /* DISPC_VID1_ATTRIBUTES */ + case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */ + case 0x0d8: /* DISPC_VID1_ROW_INC */ + case 0x0dc: /* DISPC_VID1_PIXEL_INC */ + case 0x0e0: /* DISPC_VID1_FIR */ + case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */ + case 0x0e8: /* DISPC_VID1_ACCU0 */ + case 0x0ec: /* DISPC_VID1_ACCU1 */ + case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */ + case 0x14c: /* DISPC_VID2_BA0 */ + case 0x150: /* DISPC_VID2_BA1 */ + case 0x154: /* DISPC_VID2_POSITION */ + case 0x158: /* DISPC_VID2_SIZE */ + case 0x15c: /* DISPC_VID2_ATTRIBUTES */ + case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */ + case 0x168: /* DISPC_VID2_ROW_INC */ + case 0x16c: /* DISPC_VID2_PIXEL_INC */ + case 0x170: /* DISPC_VID2_FIR */ + case 0x174: /* DISPC_VID2_PICTURE_SIZE */ + case 0x178: /* DISPC_VID2_ACCU0 */ + case 0x17c: /* DISPC_VID2_ACCU1 */ + case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */ + case 0x1d4: /* DISPC_DATA_CYCLE1 */ + case 0x1d8: /* DISPC_DATA_CYCLE2 */ + case 0x1dc: /* DISPC_DATA_CYCLE3 */ + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_disc_ops = { + .read = omap_disc_read, + .write = omap_disc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_rfbi_transfer_stop(struct omap_dss_s *s) +{ + if (!s->rfbi.busy) + return; + + /* TODO: in non-Bypass mode we probably need to just deassert the DRQ. */ + + s->rfbi.busy = 0; +} + +static void omap_rfbi_transfer_start(struct omap_dss_s *s) +{ + void *data; + hwaddr len; + hwaddr data_addr; + int pitch; + static void *bounce_buffer; + static hwaddr bounce_len; + + if (!s->rfbi.enable || s->rfbi.busy) + return; + + if (s->rfbi.control & (1 << 1)) { /* BYPASS */ + /* TODO: in non-Bypass mode we probably need to just assert the + * DRQ and wait for DMA to write the pixels. */ + qemu_log_mask(LOG_UNIMP, "%s: Bypass mode unimplemented\n", __func__); + return; + } + + if (!(s->dispc.control & (1 << 11))) /* RFBIMODE */ + return; + /* TODO: check that LCD output is enabled in DISPC. */ + + s->rfbi.busy = 1; + + len = s->rfbi.pixels * 2; + + data_addr = s->dispc.l[0].addr[0]; + data = cpu_physical_memory_map(data_addr, &len, false); + if (data && len != s->rfbi.pixels * 2) { + cpu_physical_memory_unmap(data, len, 0, 0); + data = NULL; + len = s->rfbi.pixels * 2; + } + if (!data) { + if (len > bounce_len) { + bounce_buffer = g_realloc(bounce_buffer, len); + } + data = bounce_buffer; + cpu_physical_memory_read(data_addr, data, len); + } + + /* TODO bpp */ + s->rfbi.pixels = 0; + + /* TODO: negative values */ + pitch = s->dispc.l[0].nx + (s->dispc.l[0].rowinc - 1) / 2; + + if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) + s->rfbi.chip[0]->block(s->rfbi.chip[0]->opaque, 1, data, len, pitch); + if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) + s->rfbi.chip[1]->block(s->rfbi.chip[1]->opaque, 1, data, len, pitch); + + if (data != bounce_buffer) { + cpu_physical_memory_unmap(data, len, 0, len); + } + + omap_rfbi_transfer_stop(s); + + /* TODO */ + s->dispc.irqst |= 1; /* FRAMEDONE */ + omap_dispc_interrupt_update(s); +} + +static uint64_t omap_rfbi_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_dss_s *s = (struct omap_dss_s *) opaque; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x00: /* RFBI_REVISION */ + return 0x10; + + case 0x10: /* RFBI_SYSCONFIG */ + return s->rfbi.idlemode; + + case 0x14: /* RFBI_SYSSTATUS */ + return 1 | (s->rfbi.busy << 8); /* RESETDONE */ + + case 0x40: /* RFBI_CONTROL */ + return s->rfbi.control; + + case 0x44: /* RFBI_PIXELCNT */ + return s->rfbi.pixels; + + case 0x48: /* RFBI_LINE_NUMBER */ + return s->rfbi.skiplines; + + case 0x58: /* RFBI_READ */ + case 0x5c: /* RFBI_STATUS */ + return s->rfbi.rxbuf; + + case 0x60: /* RFBI_CONFIG0 */ + return s->rfbi.config[0]; + case 0x64: /* RFBI_ONOFF_TIME0 */ + return s->rfbi.time[0]; + case 0x68: /* RFBI_CYCLE_TIME0 */ + return s->rfbi.time[1]; + case 0x6c: /* RFBI_DATA_CYCLE1_0 */ + return s->rfbi.data[0]; + case 0x70: /* RFBI_DATA_CYCLE2_0 */ + return s->rfbi.data[1]; + case 0x74: /* RFBI_DATA_CYCLE3_0 */ + return s->rfbi.data[2]; + + case 0x78: /* RFBI_CONFIG1 */ + return s->rfbi.config[1]; + case 0x7c: /* RFBI_ONOFF_TIME1 */ + return s->rfbi.time[2]; + case 0x80: /* RFBI_CYCLE_TIME1 */ + return s->rfbi.time[3]; + case 0x84: /* RFBI_DATA_CYCLE1_1 */ + return s->rfbi.data[3]; + case 0x88: /* RFBI_DATA_CYCLE2_1 */ + return s->rfbi.data[4]; + case 0x8c: /* RFBI_DATA_CYCLE3_1 */ + return s->rfbi.data[5]; + + case 0x90: /* RFBI_VSYNC_WIDTH */ + return s->rfbi.vsync; + case 0x94: /* RFBI_HSYNC_WIDTH */ + return s->rfbi.hsync; + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_rfbi_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_dss_s *s = (struct omap_dss_s *) opaque; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x10: /* RFBI_SYSCONFIG */ + if (value & 2) /* SOFTRESET */ + omap_rfbi_reset(s); + s->rfbi.idlemode = value & 0x19; + break; + + case 0x40: /* RFBI_CONTROL */ + s->rfbi.control = value & 0xf; + s->rfbi.enable = value & 1; + if (value & (1 << 4) && /* ITE */ + !(s->rfbi.config[0] & s->rfbi.config[1] & 0xc)) + omap_rfbi_transfer_start(s); + break; + + case 0x44: /* RFBI_PIXELCNT */ + s->rfbi.pixels = value; + break; + + case 0x48: /* RFBI_LINE_NUMBER */ + s->rfbi.skiplines = value & 0x7ff; + break; + + case 0x4c: /* RFBI_CMD */ + if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) + s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 0, value & 0xffff); + if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) + s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 0, value & 0xffff); + break; + case 0x50: /* RFBI_PARAM */ + if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) + s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 1, value & 0xffff); + if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) + s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 1, value & 0xffff); + break; + case 0x54: /* RFBI_DATA */ + /* TODO: take into account the format set up in s->rfbi.config[?] and + * s->rfbi.data[?], but special-case the most usual scenario so that + * speed doesn't suffer. */ + if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) { + s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 1, value & 0xffff); + s->rfbi.chip[0]->write(s->rfbi.chip[0]->opaque, 1, value >> 16); + } + if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) { + s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 1, value & 0xffff); + s->rfbi.chip[1]->write(s->rfbi.chip[1]->opaque, 1, value >> 16); + } + if (!-- s->rfbi.pixels) + omap_rfbi_transfer_stop(s); + break; + case 0x58: /* RFBI_READ */ + if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) + s->rfbi.rxbuf = s->rfbi.chip[0]->read(s->rfbi.chip[0]->opaque, 1); + else if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) + s->rfbi.rxbuf = s->rfbi.chip[1]->read(s->rfbi.chip[1]->opaque, 1); + if (!-- s->rfbi.pixels) + omap_rfbi_transfer_stop(s); + break; + + case 0x5c: /* RFBI_STATUS */ + if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) + s->rfbi.rxbuf = s->rfbi.chip[0]->read(s->rfbi.chip[0]->opaque, 0); + else if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) + s->rfbi.rxbuf = s->rfbi.chip[1]->read(s->rfbi.chip[1]->opaque, 0); + if (!-- s->rfbi.pixels) + omap_rfbi_transfer_stop(s); + break; + + case 0x60: /* RFBI_CONFIG0 */ + s->rfbi.config[0] = value & 0x003f1fff; + break; + + case 0x64: /* RFBI_ONOFF_TIME0 */ + s->rfbi.time[0] = value & 0x3fffffff; + break; + case 0x68: /* RFBI_CYCLE_TIME0 */ + s->rfbi.time[1] = value & 0x0fffffff; + break; + case 0x6c: /* RFBI_DATA_CYCLE1_0 */ + s->rfbi.data[0] = value & 0x0f1f0f1f; + break; + case 0x70: /* RFBI_DATA_CYCLE2_0 */ + s->rfbi.data[1] = value & 0x0f1f0f1f; + break; + case 0x74: /* RFBI_DATA_CYCLE3_0 */ + s->rfbi.data[2] = value & 0x0f1f0f1f; + break; + case 0x78: /* RFBI_CONFIG1 */ + s->rfbi.config[1] = value & 0x003f1fff; + break; + + case 0x7c: /* RFBI_ONOFF_TIME1 */ + s->rfbi.time[2] = value & 0x3fffffff; + break; + case 0x80: /* RFBI_CYCLE_TIME1 */ + s->rfbi.time[3] = value & 0x0fffffff; + break; + case 0x84: /* RFBI_DATA_CYCLE1_1 */ + s->rfbi.data[3] = value & 0x0f1f0f1f; + break; + case 0x88: /* RFBI_DATA_CYCLE2_1 */ + s->rfbi.data[4] = value & 0x0f1f0f1f; + break; + case 0x8c: /* RFBI_DATA_CYCLE3_1 */ + s->rfbi.data[5] = value & 0x0f1f0f1f; + break; + + case 0x90: /* RFBI_VSYNC_WIDTH */ + s->rfbi.vsync = value & 0xffff; + break; + case 0x94: /* RFBI_HSYNC_WIDTH */ + s->rfbi.hsync = value & 0xffff; + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_rfbi_ops = { + .read = omap_rfbi_read, + .write = omap_rfbi_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static uint64_t omap_venc_read(void *opaque, hwaddr addr, + unsigned size) +{ + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x00: /* REV_ID */ + case 0x04: /* STATUS */ + case 0x08: /* F_CONTROL */ + case 0x10: /* VIDOUT_CTRL */ + case 0x14: /* SYNC_CTRL */ + case 0x1c: /* LLEN */ + case 0x20: /* FLENS */ + case 0x24: /* HFLTR_CTRL */ + case 0x28: /* CC_CARR_WSS_CARR */ + case 0x2c: /* C_PHASE */ + case 0x30: /* GAIN_U */ + case 0x34: /* GAIN_V */ + case 0x38: /* GAIN_Y */ + case 0x3c: /* BLACK_LEVEL */ + case 0x40: /* BLANK_LEVEL */ + case 0x44: /* X_COLOR */ + case 0x48: /* M_CONTROL */ + case 0x4c: /* BSTAMP_WSS_DATA */ + case 0x50: /* S_CARR */ + case 0x54: /* LINE21 */ + case 0x58: /* LN_SEL */ + case 0x5c: /* L21__WC_CTL */ + case 0x60: /* HTRIGGER_VTRIGGER */ + case 0x64: /* SAVID__EAVID */ + case 0x68: /* FLEN__FAL */ + case 0x6c: /* LAL__PHASE_RESET */ + case 0x70: /* HS_INT_START_STOP_X */ + case 0x74: /* HS_EXT_START_STOP_X */ + case 0x78: /* VS_INT_START_X */ + case 0x7c: /* VS_INT_STOP_X__VS_INT_START_Y */ + case 0x80: /* VS_INT_STOP_Y__VS_INT_START_X */ + case 0x84: /* VS_EXT_STOP_X__VS_EXT_START_Y */ + case 0x88: /* VS_EXT_STOP_Y */ + case 0x90: /* AVID_START_STOP_X */ + case 0x94: /* AVID_START_STOP_Y */ + case 0xa0: /* FID_INT_START_X__FID_INT_START_Y */ + case 0xa4: /* FID_INT_OFFSET_Y__FID_EXT_START_X */ + case 0xa8: /* FID_EXT_START_Y__FID_EXT_OFFSET_Y */ + case 0xb0: /* TVDETGP_INT_START_STOP_X */ + case 0xb4: /* TVDETGP_INT_START_STOP_Y */ + case 0xb8: /* GEN_CTRL */ + case 0xc4: /* DAC_TST__DAC_A */ + case 0xc8: /* DAC_B__DAC_C */ + return 0; + + default: + break; + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_venc_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + if (size != 4) { + omap_badwidth_write32(opaque, addr, size); + return; + } + + switch (addr) { + case 0x08: /* F_CONTROL */ + case 0x10: /* VIDOUT_CTRL */ + case 0x14: /* SYNC_CTRL */ + case 0x1c: /* LLEN */ + case 0x20: /* FLENS */ + case 0x24: /* HFLTR_CTRL */ + case 0x28: /* CC_CARR_WSS_CARR */ + case 0x2c: /* C_PHASE */ + case 0x30: /* GAIN_U */ + case 0x34: /* GAIN_V */ + case 0x38: /* GAIN_Y */ + case 0x3c: /* BLACK_LEVEL */ + case 0x40: /* BLANK_LEVEL */ + case 0x44: /* X_COLOR */ + case 0x48: /* M_CONTROL */ + case 0x4c: /* BSTAMP_WSS_DATA */ + case 0x50: /* S_CARR */ + case 0x54: /* LINE21 */ + case 0x58: /* LN_SEL */ + case 0x5c: /* L21__WC_CTL */ + case 0x60: /* HTRIGGER_VTRIGGER */ + case 0x64: /* SAVID__EAVID */ + case 0x68: /* FLEN__FAL */ + case 0x6c: /* LAL__PHASE_RESET */ + case 0x70: /* HS_INT_START_STOP_X */ + case 0x74: /* HS_EXT_START_STOP_X */ + case 0x78: /* VS_INT_START_X */ + case 0x7c: /* VS_INT_STOP_X__VS_INT_START_Y */ + case 0x80: /* VS_INT_STOP_Y__VS_INT_START_X */ + case 0x84: /* VS_EXT_STOP_X__VS_EXT_START_Y */ + case 0x88: /* VS_EXT_STOP_Y */ + case 0x90: /* AVID_START_STOP_X */ + case 0x94: /* AVID_START_STOP_Y */ + case 0xa0: /* FID_INT_START_X__FID_INT_START_Y */ + case 0xa4: /* FID_INT_OFFSET_Y__FID_EXT_START_X */ + case 0xa8: /* FID_EXT_START_Y__FID_EXT_OFFSET_Y */ + case 0xb0: /* TVDETGP_INT_START_STOP_X */ + case 0xb4: /* TVDETGP_INT_START_STOP_Y */ + case 0xb8: /* GEN_CTRL */ + case 0xc4: /* DAC_TST__DAC_A */ + case 0xc8: /* DAC_B__DAC_C */ + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_venc_ops = { + .read = omap_venc_read, + .write = omap_venc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static uint64_t omap_im3_read(void *opaque, hwaddr addr, + unsigned size) +{ + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x0a8: /* SBIMERRLOGA */ + case 0x0b0: /* SBIMERRLOG */ + case 0x190: /* SBIMSTATE */ + case 0x198: /* SBTMSTATE_L */ + case 0x19c: /* SBTMSTATE_H */ + case 0x1a8: /* SBIMCONFIG_L */ + case 0x1ac: /* SBIMCONFIG_H */ + case 0x1f8: /* SBID_L */ + case 0x1fc: /* SBID_H */ + return 0; + + default: + break; + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_im3_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x0b0: /* SBIMERRLOG */ + case 0x190: /* SBIMSTATE */ + case 0x198: /* SBTMSTATE_L */ + case 0x19c: /* SBTMSTATE_H */ + case 0x1a8: /* SBIMCONFIG_L */ + case 0x1ac: /* SBIMCONFIG_H */ + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_im3_ops = { + .read = omap_im3_read, + .write = omap_im3_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct omap_dss_s *omap_dss_init(struct omap_target_agent_s *ta, + MemoryRegion *sysmem, + hwaddr l3_base, + qemu_irq irq, qemu_irq drq, + omap_clk fck1, omap_clk fck2, omap_clk ck54m, + omap_clk ick1, omap_clk ick2) +{ + struct omap_dss_s *s = g_new0(struct omap_dss_s, 1); + + s->irq = irq; + s->drq = drq; + omap_dss_reset(s); + + memory_region_init_io(&s->iomem_diss1, NULL, &omap_diss_ops, s, "omap.diss1", + omap_l4_region_size(ta, 0)); + memory_region_init_io(&s->iomem_disc1, NULL, &omap_disc_ops, s, "omap.disc1", + omap_l4_region_size(ta, 1)); + memory_region_init_io(&s->iomem_rfbi1, NULL, &omap_rfbi_ops, s, "omap.rfbi1", + omap_l4_region_size(ta, 2)); + memory_region_init_io(&s->iomem_venc1, NULL, &omap_venc_ops, s, "omap.venc1", + omap_l4_region_size(ta, 3)); + memory_region_init_io(&s->iomem_im3, NULL, &omap_im3_ops, s, + "omap.im3", 0x1000); + + omap_l4_attach(ta, 0, &s->iomem_diss1); + omap_l4_attach(ta, 1, &s->iomem_disc1); + omap_l4_attach(ta, 2, &s->iomem_rfbi1); + omap_l4_attach(ta, 3, &s->iomem_venc1); + memory_region_add_subregion(sysmem, l3_base, &s->iomem_im3); + +#if 0 + s->state = graphic_console_init(omap_update_display, + omap_invalidate_display, omap_screen_dump, s); +#endif + + return s; +} + +void omap_rfbi_attach(struct omap_dss_s *s, int cs, struct rfbi_chip_s *chip) +{ + if (cs < 0 || cs > 1) + hw_error("%s: wrong CS %i\n", __func__, cs); + s->rfbi.chip[cs] = chip; +} diff --git a/hw/display/omap_lcdc.c b/hw/display/omap_lcdc.c new file mode 100644 index 000000000..0ba42ef63 --- /dev/null +++ b/hw/display/omap_lcdc.c @@ -0,0 +1,508 @@ +/* + * OMAP LCD controller. + * + * Copyright (C) 2006-2007 Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "ui/console.h" +#include "hw/arm/omap.h" +#include "framebuffer.h" +#include "ui/pixel_ops.h" + +struct omap_lcd_panel_s { + MemoryRegion *sysmem; + MemoryRegion iomem; + MemoryRegionSection fbsection; + qemu_irq irq; + QemuConsole *con; + + int plm; + int tft; + int mono; + int enable; + int width; + int height; + int interrupts; + uint32_t timing[3]; + uint32_t subpanel; + uint32_t ctrl; + + struct omap_dma_lcd_channel_s *dma; + uint16_t palette[256]; + int palette_done; + int frame_done; + int invalidate; + int sync_error; +}; + +static void omap_lcd_interrupts(struct omap_lcd_panel_s *s) +{ + if (s->frame_done && (s->interrupts & 1)) { + qemu_irq_raise(s->irq); + return; + } + + if (s->palette_done && (s->interrupts & 2)) { + qemu_irq_raise(s->irq); + return; + } + + if (s->sync_error) { + qemu_irq_raise(s->irq); + return; + } + + qemu_irq_lower(s->irq); +} + +/* + * 2-bit colour + */ +static void draw_line2_32(void *opaque, uint8_t *d, const uint8_t *s, + int width, int deststep) +{ + uint16_t *pal = opaque; + uint8_t v, r, g, b; + + do { + v = ldub_p((void *) s); + r = (pal[v & 3] >> 4) & 0xf0; + g = pal[v & 3] & 0xf0; + b = (pal[v & 3] << 4) & 0xf0; + ((uint32_t *) d)[0] = rgb_to_pixel32(r, g, b); + d += 4; + v >>= 2; + r = (pal[v & 3] >> 4) & 0xf0; + g = pal[v & 3] & 0xf0; + b = (pal[v & 3] << 4) & 0xf0; + ((uint32_t *) d)[0] = rgb_to_pixel32(r, g, b); + d += 4; + v >>= 2; + r = (pal[v & 3] >> 4) & 0xf0; + g = pal[v & 3] & 0xf0; + b = (pal[v & 3] << 4) & 0xf0; + ((uint32_t *) d)[0] = rgb_to_pixel32(r, g, b); + d += 4; + v >>= 2; + r = (pal[v & 3] >> 4) & 0xf0; + g = pal[v & 3] & 0xf0; + b = (pal[v & 3] << 4) & 0xf0; + ((uint32_t *) d)[0] = rgb_to_pixel32(r, g, b); + d += 4; + s++; + width -= 4; + } while (width > 0); +} + +/* + * 4-bit colour + */ +static void draw_line4_32(void *opaque, uint8_t *d, const uint8_t *s, + int width, int deststep) +{ + uint16_t *pal = opaque; + uint8_t v, r, g, b; + + do { + v = ldub_p((void *) s); + r = (pal[v & 0xf] >> 4) & 0xf0; + g = pal[v & 0xf] & 0xf0; + b = (pal[v & 0xf] << 4) & 0xf0; + ((uint32_t *) d)[0] = rgb_to_pixel32(r, g, b); + d += 4; + v >>= 4; + r = (pal[v & 0xf] >> 4) & 0xf0; + g = pal[v & 0xf] & 0xf0; + b = (pal[v & 0xf] << 4) & 0xf0; + ((uint32_t *) d)[0] = rgb_to_pixel32(r, g, b); + d += 4; + s++; + width -= 2; + } while (width > 0); +} + +/* + * 8-bit colour + */ +static void draw_line8_32(void *opaque, uint8_t *d, const uint8_t *s, + int width, int deststep) +{ + uint16_t *pal = opaque; + uint8_t v, r, g, b; + + do { + v = ldub_p((void *) s); + r = (pal[v] >> 4) & 0xf0; + g = pal[v] & 0xf0; + b = (pal[v] << 4) & 0xf0; + ((uint32_t *) d)[0] = rgb_to_pixel32(r, g, b); + s++; + d += 4; + } while (-- width != 0); +} + +/* + * 12-bit colour + */ +static void draw_line12_32(void *opaque, uint8_t *d, const uint8_t *s, + int width, int deststep) +{ + uint16_t v; + uint8_t r, g, b; + + do { + v = lduw_le_p((void *) s); + r = (v >> 4) & 0xf0; + g = v & 0xf0; + b = (v << 4) & 0xf0; + ((uint32_t *) d)[0] = rgb_to_pixel32(r, g, b); + s += 2; + d += 4; + } while (-- width != 0); +} + +/* + * 16-bit colour + */ +static void draw_line16_32(void *opaque, uint8_t *d, const uint8_t *s, + int width, int deststep) +{ + uint16_t v; + uint8_t r, g, b; + + do { + v = lduw_le_p((void *) s); + r = (v >> 8) & 0xf8; + g = (v >> 3) & 0xfc; + b = (v << 3) & 0xf8; + ((uint32_t *) d)[0] = rgb_to_pixel32(r, g, b); + s += 2; + d += 4; + } while (-- width != 0); +} + +static void omap_update_display(void *opaque) +{ + struct omap_lcd_panel_s *omap_lcd = (struct omap_lcd_panel_s *) opaque; + DisplaySurface *surface; + drawfn draw_line; + int size, height, first, last; + int width, linesize, step, bpp, frame_offset; + hwaddr frame_base; + + if (!omap_lcd || omap_lcd->plm == 1 || !omap_lcd->enable) { + return; + } + + surface = qemu_console_surface(omap_lcd->con); + if (!surface_bits_per_pixel(surface)) { + return; + } + + frame_offset = 0; + if (omap_lcd->plm != 2) { + cpu_physical_memory_read( + omap_lcd->dma->phys_framebuffer[omap_lcd->dma->current_frame], + omap_lcd->palette, 0x200); + switch (omap_lcd->palette[0] >> 12 & 7) { + case 3 ... 7: + frame_offset += 0x200; + break; + default: + frame_offset += 0x20; + } + } + + /* Colour depth */ + switch ((omap_lcd->palette[0] >> 12) & 7) { + case 1: + draw_line = draw_line2_32; + bpp = 2; + break; + + case 2: + draw_line = draw_line4_32; + bpp = 4; + break; + + case 3: + draw_line = draw_line8_32; + bpp = 8; + break; + + case 4 ... 7: + if (!omap_lcd->tft) + draw_line = draw_line12_32; + else + draw_line = draw_line16_32; + bpp = 16; + break; + + default: + /* Unsupported at the moment. */ + return; + } + + /* Resolution */ + width = omap_lcd->width; + if (width != surface_width(surface) || + omap_lcd->height != surface_height(surface)) { + qemu_console_resize(omap_lcd->con, + omap_lcd->width, omap_lcd->height); + surface = qemu_console_surface(omap_lcd->con); + omap_lcd->invalidate = 1; + } + + if (omap_lcd->dma->current_frame == 0) + size = omap_lcd->dma->src_f1_bottom - omap_lcd->dma->src_f1_top; + else + size = omap_lcd->dma->src_f2_bottom - omap_lcd->dma->src_f2_top; + + if (frame_offset + ((width * omap_lcd->height * bpp) >> 3) > size + 2) { + omap_lcd->sync_error = 1; + omap_lcd_interrupts(omap_lcd); + omap_lcd->enable = 0; + return; + } + + /* Content */ + frame_base = omap_lcd->dma->phys_framebuffer[ + omap_lcd->dma->current_frame] + frame_offset; + omap_lcd->dma->condition |= 1 << omap_lcd->dma->current_frame; + if (omap_lcd->dma->interrupts & 1) + qemu_irq_raise(omap_lcd->dma->irq); + if (omap_lcd->dma->dual) + omap_lcd->dma->current_frame ^= 1; + + if (!surface_bits_per_pixel(surface)) { + return; + } + + first = 0; + height = omap_lcd->height; + if (omap_lcd->subpanel & (1 << 31)) { + if (omap_lcd->subpanel & (1 << 29)) + first = (omap_lcd->subpanel >> 16) & 0x3ff; + else + height = (omap_lcd->subpanel >> 16) & 0x3ff; + /* TODO: fill the rest of the panel with DPD */ + } + + step = width * bpp >> 3; + linesize = surface_stride(surface); + if (omap_lcd->invalidate) { + framebuffer_update_memory_section(&omap_lcd->fbsection, + omap_lcd->sysmem, frame_base, + height, step); + } + + framebuffer_update_display(surface, &omap_lcd->fbsection, + width, height, + step, linesize, 0, + omap_lcd->invalidate, + draw_line, omap_lcd->palette, + &first, &last); + + if (first >= 0) { + dpy_gfx_update(omap_lcd->con, 0, first, width, last - first + 1); + } + omap_lcd->invalidate = 0; +} + +static void omap_invalidate_display(void *opaque) { + struct omap_lcd_panel_s *omap_lcd = opaque; + omap_lcd->invalidate = 1; +} + +static void omap_lcd_update(struct omap_lcd_panel_s *s) { + if (!s->enable) { + s->dma->current_frame = -1; + s->sync_error = 0; + if (s->plm != 1) + s->frame_done = 1; + omap_lcd_interrupts(s); + return; + } + + if (s->dma->current_frame == -1) { + s->frame_done = 0; + s->palette_done = 0; + s->dma->current_frame = 0; + } + + if (!s->dma->mpu->port[s->dma->src].addr_valid(s->dma->mpu, + s->dma->src_f1_top) || + !s->dma->mpu->port[ + s->dma->src].addr_valid(s->dma->mpu, + s->dma->src_f1_bottom) || + (s->dma->dual && + (!s->dma->mpu->port[ + s->dma->src].addr_valid(s->dma->mpu, + s->dma->src_f2_top) || + !s->dma->mpu->port[ + s->dma->src].addr_valid(s->dma->mpu, + s->dma->src_f2_bottom)))) { + s->dma->condition |= 1 << 2; + if (s->dma->interrupts & (1 << 1)) + qemu_irq_raise(s->dma->irq); + s->enable = 0; + return; + } + + s->dma->phys_framebuffer[0] = s->dma->src_f1_top; + s->dma->phys_framebuffer[1] = s->dma->src_f2_top; + + if (s->plm != 2 && !s->palette_done) { + cpu_physical_memory_read( + s->dma->phys_framebuffer[s->dma->current_frame], + s->palette, 0x200); + s->palette_done = 1; + omap_lcd_interrupts(s); + } +} + +static uint64_t omap_lcdc_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_lcd_panel_s *s = (struct omap_lcd_panel_s *) opaque; + + switch (addr) { + case 0x00: /* LCD_CONTROL */ + return (s->tft << 23) | (s->plm << 20) | + (s->tft << 7) | (s->interrupts << 3) | + (s->mono << 1) | s->enable | s->ctrl | 0xfe000c34; + + case 0x04: /* LCD_TIMING0 */ + return (s->timing[0] << 10) | (s->width - 1) | 0x0000000f; + + case 0x08: /* LCD_TIMING1 */ + return (s->timing[1] << 10) | (s->height - 1); + + case 0x0c: /* LCD_TIMING2 */ + return s->timing[2] | 0xfc000000; + + case 0x10: /* LCD_STATUS */ + return (s->palette_done << 6) | (s->sync_error << 2) | s->frame_done; + + case 0x14: /* LCD_SUBPANEL */ + return s->subpanel; + + default: + break; + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_lcdc_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_lcd_panel_s *s = (struct omap_lcd_panel_s *) opaque; + + switch (addr) { + case 0x00: /* LCD_CONTROL */ + s->plm = (value >> 20) & 3; + s->tft = (value >> 7) & 1; + s->interrupts = (value >> 3) & 3; + s->mono = (value >> 1) & 1; + s->ctrl = value & 0x01cff300; + if (s->enable != (value & 1)) { + s->enable = value & 1; + omap_lcd_update(s); + } + break; + + case 0x04: /* LCD_TIMING0 */ + s->timing[0] = value >> 10; + s->width = (value & 0x3ff) + 1; + break; + + case 0x08: /* LCD_TIMING1 */ + s->timing[1] = value >> 10; + s->height = (value & 0x3ff) + 1; + break; + + case 0x0c: /* LCD_TIMING2 */ + s->timing[2] = value; + break; + + case 0x10: /* LCD_STATUS */ + break; + + case 0x14: /* LCD_SUBPANEL */ + s->subpanel = value & 0xa1ffffff; + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_lcdc_ops = { + .read = omap_lcdc_read, + .write = omap_lcdc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +void omap_lcdc_reset(struct omap_lcd_panel_s *s) +{ + s->dma->current_frame = -1; + s->plm = 0; + s->tft = 0; + s->mono = 0; + s->enable = 0; + s->width = 0; + s->height = 0; + s->interrupts = 0; + s->timing[0] = 0; + s->timing[1] = 0; + s->timing[2] = 0; + s->subpanel = 0; + s->palette_done = 0; + s->frame_done = 0; + s->sync_error = 0; + s->invalidate = 1; + s->subpanel = 0; + s->ctrl = 0; +} + +static const GraphicHwOps omap_ops = { + .invalidate = omap_invalidate_display, + .gfx_update = omap_update_display, +}; + +struct omap_lcd_panel_s *omap_lcdc_init(MemoryRegion *sysmem, + hwaddr base, + qemu_irq irq, + struct omap_dma_lcd_channel_s *dma, + omap_clk clk) +{ + struct omap_lcd_panel_s *s = g_new0(struct omap_lcd_panel_s, 1); + + s->irq = irq; + s->dma = dma; + s->sysmem = sysmem; + omap_lcdc_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_lcdc_ops, s, "omap.lcdc", 0x100); + memory_region_add_subregion(sysmem, base, &s->iomem); + + s->con = graphic_console_init(NULL, 0, &omap_ops, s); + + return s; +} diff --git a/hw/display/pl110.c b/hw/display/pl110.c new file mode 100644 index 000000000..4bf15c1da --- /dev/null +++ b/hw/display/pl110.c @@ -0,0 +1,609 @@ +/* + * Arm PrimeCell PL110 Color LCD Controller + * + * Copyright (c) 2005-2009 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GNU LGPL + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "ui/console.h" +#include "framebuffer.h" +#include "ui/pixel_ops.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define PL110_CR_EN 0x001 +#define PL110_CR_BGR 0x100 +#define PL110_CR_BEBO 0x200 +#define PL110_CR_BEPO 0x400 +#define PL110_CR_PWR 0x800 +#define PL110_IE_NB 0x004 +#define PL110_IE_VC 0x008 + +enum pl110_bppmode +{ + BPP_1, + BPP_2, + BPP_4, + BPP_8, + BPP_16, + BPP_32, + BPP_16_565, /* PL111 only */ + BPP_12 /* PL111 only */ +}; + + +/* The Versatile/PB uses a slightly modified PL110 controller. */ +enum pl110_version +{ + VERSION_PL110, + VERSION_PL110_VERSATILE, + VERSION_PL111 +}; + +#define TYPE_PL110 "pl110" +OBJECT_DECLARE_SIMPLE_TYPE(PL110State, PL110) + +struct PL110State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + MemoryRegionSection fbsection; + QemuConsole *con; + QEMUTimer *vblank_timer; + + int version; + uint32_t timing[4]; + uint32_t cr; + uint32_t upbase; + uint32_t lpbase; + uint32_t int_status; + uint32_t int_mask; + int cols; + int rows; + enum pl110_bppmode bpp; + int invalidate; + uint32_t mux_ctrl; + uint32_t palette[256]; + uint32_t raw_palette[128]; + qemu_irq irq; +}; + +static int vmstate_pl110_post_load(void *opaque, int version_id); + +static const VMStateDescription vmstate_pl110 = { + .name = "pl110", + .version_id = 2, + .minimum_version_id = 1, + .post_load = vmstate_pl110_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT32(version, PL110State), + VMSTATE_UINT32_ARRAY(timing, PL110State, 4), + VMSTATE_UINT32(cr, PL110State), + VMSTATE_UINT32(upbase, PL110State), + VMSTATE_UINT32(lpbase, PL110State), + VMSTATE_UINT32(int_status, PL110State), + VMSTATE_UINT32(int_mask, PL110State), + VMSTATE_INT32(cols, PL110State), + VMSTATE_INT32(rows, PL110State), + VMSTATE_UINT32(bpp, PL110State), + VMSTATE_INT32(invalidate, PL110State), + VMSTATE_UINT32_ARRAY(palette, PL110State, 256), + VMSTATE_UINT32_ARRAY(raw_palette, PL110State, 128), + VMSTATE_UINT32_V(mux_ctrl, PL110State, 2), + VMSTATE_END_OF_LIST() + } +}; + +static const unsigned char pl110_id[] = +{ 0x10, 0x11, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; + +static const unsigned char pl111_id[] = { + 0x11, 0x11, 0x24, 0x00, 0x0d, 0xf0, 0x05, 0xb1 +}; + + +/* Indexed by pl110_version */ +static const unsigned char *idregs[] = { + pl110_id, + /* The ARM documentation (DDI0224C) says the CLCDC on the Versatile board + * has a different ID (0x93, 0x10, 0x04, 0x00, ...). However the hardware + * itself has the same ID values as a stock PL110, and guests (in + * particular Linux) rely on this. We emulate what the hardware does, + * rather than what the docs claim it ought to do. + */ + pl110_id, + pl111_id +}; + +#define COPY_PIXEL(to, from) do { *(uint32_t *)to = from; to += 4; } while (0) + +#undef RGB +#define BORDER bgr +#define ORDER 0 +#include "pl110_template.h" +#define ORDER 1 +#include "pl110_template.h" +#define ORDER 2 +#include "pl110_template.h" +#undef BORDER +#define RGB +#define BORDER rgb +#define ORDER 0 +#include "pl110_template.h" +#define ORDER 1 +#include "pl110_template.h" +#define ORDER 2 +#include "pl110_template.h" +#undef BORDER + +#undef COPY_PIXEL + +static drawfn pl110_draw_fn_32[48] = { + pl110_draw_line1_lblp_bgr, + pl110_draw_line2_lblp_bgr, + pl110_draw_line4_lblp_bgr, + pl110_draw_line8_lblp_bgr, + pl110_draw_line16_555_lblp_bgr, + pl110_draw_line32_lblp_bgr, + pl110_draw_line16_lblp_bgr, + pl110_draw_line12_lblp_bgr, + + pl110_draw_line1_bbbp_bgr, + pl110_draw_line2_bbbp_bgr, + pl110_draw_line4_bbbp_bgr, + pl110_draw_line8_bbbp_bgr, + pl110_draw_line16_555_bbbp_bgr, + pl110_draw_line32_bbbp_bgr, + pl110_draw_line16_bbbp_bgr, + pl110_draw_line12_bbbp_bgr, + + pl110_draw_line1_lbbp_bgr, + pl110_draw_line2_lbbp_bgr, + pl110_draw_line4_lbbp_bgr, + pl110_draw_line8_lbbp_bgr, + pl110_draw_line16_555_lbbp_bgr, + pl110_draw_line32_lbbp_bgr, + pl110_draw_line16_lbbp_bgr, + pl110_draw_line12_lbbp_bgr, + + pl110_draw_line1_lblp_rgb, + pl110_draw_line2_lblp_rgb, + pl110_draw_line4_lblp_rgb, + pl110_draw_line8_lblp_rgb, + pl110_draw_line16_555_lblp_rgb, + pl110_draw_line32_lblp_rgb, + pl110_draw_line16_lblp_rgb, + pl110_draw_line12_lblp_rgb, + + pl110_draw_line1_bbbp_rgb, + pl110_draw_line2_bbbp_rgb, + pl110_draw_line4_bbbp_rgb, + pl110_draw_line8_bbbp_rgb, + pl110_draw_line16_555_bbbp_rgb, + pl110_draw_line32_bbbp_rgb, + pl110_draw_line16_bbbp_rgb, + pl110_draw_line12_bbbp_rgb, + + pl110_draw_line1_lbbp_rgb, + pl110_draw_line2_lbbp_rgb, + pl110_draw_line4_lbbp_rgb, + pl110_draw_line8_lbbp_rgb, + pl110_draw_line16_555_lbbp_rgb, + pl110_draw_line32_lbbp_rgb, + pl110_draw_line16_lbbp_rgb, + pl110_draw_line12_lbbp_rgb, +}; + +static int pl110_enabled(PL110State *s) +{ + return (s->cr & PL110_CR_EN) && (s->cr & PL110_CR_PWR); +} + +static void pl110_update_display(void *opaque) +{ + PL110State *s = (PL110State *)opaque; + SysBusDevice *sbd; + DisplaySurface *surface = qemu_console_surface(s->con); + drawfn fn; + int src_width; + int bpp_offset; + int first; + int last; + + if (!pl110_enabled(s)) { + return; + } + + sbd = SYS_BUS_DEVICE(s); + + if (s->cr & PL110_CR_BGR) + bpp_offset = 0; + else + bpp_offset = 24; + + if ((s->version != VERSION_PL111) && (s->bpp == BPP_16)) { + /* The PL110's native 16 bit mode is 5551; however + * most boards with a PL110 implement an external + * mux which allows bits to be reshuffled to give + * 565 format. The mux is typically controlled by + * an external system register. + * This is controlled by a GPIO input pin + * so boards can wire it up to their register. + * + * The PL111 straightforwardly implements both + * 5551 and 565 under control of the bpp field + * in the LCDControl register. + */ + switch (s->mux_ctrl) { + case 3: /* 565 BGR */ + bpp_offset = (BPP_16_565 - BPP_16); + break; + case 1: /* 5551 */ + break; + case 0: /* 888; also if we have loaded vmstate from an old version */ + case 2: /* 565 RGB */ + default: + /* treat as 565 but honour BGR bit */ + bpp_offset += (BPP_16_565 - BPP_16); + break; + } + } + + if (s->cr & PL110_CR_BEBO) { + fn = pl110_draw_fn_32[s->bpp + 8 + bpp_offset]; + } else if (s->cr & PL110_CR_BEPO) { + fn = pl110_draw_fn_32[s->bpp + 16 + bpp_offset]; + } else { + fn = pl110_draw_fn_32[s->bpp + bpp_offset]; + } + + src_width = s->cols; + switch (s->bpp) { + case BPP_1: + src_width >>= 3; + break; + case BPP_2: + src_width >>= 2; + break; + case BPP_4: + src_width >>= 1; + break; + case BPP_8: + break; + case BPP_16: + case BPP_16_565: + case BPP_12: + src_width <<= 1; + break; + case BPP_32: + src_width <<= 2; + break; + } + first = 0; + if (s->invalidate) { + framebuffer_update_memory_section(&s->fbsection, + sysbus_address_space(sbd), + s->upbase, + s->rows, src_width); + } + + framebuffer_update_display(surface, &s->fbsection, + s->cols, s->rows, + src_width, s->cols * 4, 0, + s->invalidate, + fn, s->palette, + &first, &last); + + if (first >= 0) { + dpy_gfx_update(s->con, 0, first, s->cols, last - first + 1); + } + s->invalidate = 0; +} + +static void pl110_invalidate_display(void * opaque) +{ + PL110State *s = (PL110State *)opaque; + s->invalidate = 1; + if (pl110_enabled(s)) { + qemu_console_resize(s->con, s->cols, s->rows); + } +} + +static void pl110_update_palette(PL110State *s, int n) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int i; + uint32_t raw; + unsigned int r, g, b; + + raw = s->raw_palette[n]; + n <<= 1; + for (i = 0; i < 2; i++) { + r = (raw & 0x1f) << 3; + raw >>= 5; + g = (raw & 0x1f) << 3; + raw >>= 5; + b = (raw & 0x1f) << 3; + /* The I bit is ignored. */ + raw >>= 6; + switch (surface_bits_per_pixel(surface)) { + case 8: + s->palette[n] = rgb_to_pixel8(r, g, b); + break; + case 15: + s->palette[n] = rgb_to_pixel15(r, g, b); + break; + case 16: + s->palette[n] = rgb_to_pixel16(r, g, b); + break; + case 24: + case 32: + s->palette[n] = rgb_to_pixel32(r, g, b); + break; + } + n++; + } +} + +static void pl110_resize(PL110State *s, int width, int height) +{ + if (width != s->cols || height != s->rows) { + if (pl110_enabled(s)) { + qemu_console_resize(s->con, width, height); + } + } + s->cols = width; + s->rows = height; +} + +/* Update interrupts. */ +static void pl110_update(PL110State *s) +{ + /* Raise IRQ if enabled and any status bit is 1 */ + if (s->int_status & s->int_mask) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static void pl110_vblank_interrupt(void *opaque) +{ + PL110State *s = opaque; + + /* Fire the vertical compare and next base IRQs and re-arm */ + s->int_status |= (PL110_IE_NB | PL110_IE_VC); + timer_mod(s->vblank_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND / 60); + pl110_update(s); +} + +static uint64_t pl110_read(void *opaque, hwaddr offset, + unsigned size) +{ + PL110State *s = (PL110State *)opaque; + + if (offset >= 0xfe0 && offset < 0x1000) { + return idregs[s->version][(offset - 0xfe0) >> 2]; + } + if (offset >= 0x200 && offset < 0x400) { + return s->raw_palette[(offset - 0x200) >> 2]; + } + switch (offset >> 2) { + case 0: /* LCDTiming0 */ + return s->timing[0]; + case 1: /* LCDTiming1 */ + return s->timing[1]; + case 2: /* LCDTiming2 */ + return s->timing[2]; + case 3: /* LCDTiming3 */ + return s->timing[3]; + case 4: /* LCDUPBASE */ + return s->upbase; + case 5: /* LCDLPBASE */ + return s->lpbase; + case 6: /* LCDIMSC */ + if (s->version != VERSION_PL110) { + return s->cr; + } + return s->int_mask; + case 7: /* LCDControl */ + if (s->version != VERSION_PL110) { + return s->int_mask; + } + return s->cr; + case 8: /* LCDRIS */ + return s->int_status; + case 9: /* LCDMIS */ + return s->int_status & s->int_mask; + case 11: /* LCDUPCURR */ + /* TODO: Implement vertical refresh. */ + return s->upbase; + case 12: /* LCDLPCURR */ + return s->lpbase; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl110_read: Bad offset %x\n", (int)offset); + return 0; + } +} + +static void pl110_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PL110State *s = (PL110State *)opaque; + int n; + + /* For simplicity invalidate the display whenever a control register + is written to. */ + s->invalidate = 1; + if (offset >= 0x200 && offset < 0x400) { + /* Palette. */ + n = (offset - 0x200) >> 2; + s->raw_palette[(offset - 0x200) >> 2] = val; + pl110_update_palette(s, n); + return; + } + switch (offset >> 2) { + case 0: /* LCDTiming0 */ + s->timing[0] = val; + n = ((val & 0xfc) + 4) * 4; + pl110_resize(s, n, s->rows); + break; + case 1: /* LCDTiming1 */ + s->timing[1] = val; + n = (val & 0x3ff) + 1; + pl110_resize(s, s->cols, n); + break; + case 2: /* LCDTiming2 */ + s->timing[2] = val; + break; + case 3: /* LCDTiming3 */ + s->timing[3] = val; + break; + case 4: /* LCDUPBASE */ + s->upbase = val; + break; + case 5: /* LCDLPBASE */ + s->lpbase = val; + break; + case 6: /* LCDIMSC */ + if (s->version != VERSION_PL110) { + goto control; + } + imsc: + s->int_mask = val; + pl110_update(s); + break; + case 7: /* LCDControl */ + if (s->version != VERSION_PL110) { + goto imsc; + } + control: + s->cr = val; + s->bpp = (val >> 1) & 7; + if (pl110_enabled(s)) { + qemu_console_resize(s->con, s->cols, s->rows); + timer_mod(s->vblank_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND / 60); + } else { + timer_del(s->vblank_timer); + } + break; + case 10: /* LCDICR */ + s->int_status &= ~val; + pl110_update(s); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl110_write: Bad offset %x\n", (int)offset); + } +} + +static const MemoryRegionOps pl110_ops = { + .read = pl110_read, + .write = pl110_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pl110_mux_ctrl_set(void *opaque, int line, int level) +{ + PL110State *s = (PL110State *)opaque; + s->mux_ctrl = level; +} + +static int vmstate_pl110_post_load(void *opaque, int version_id) +{ + PL110State *s = opaque; + /* Make sure we redraw, and at the right size */ + pl110_invalidate_display(s); + return 0; +} + +static const GraphicHwOps pl110_gfx_ops = { + .invalidate = pl110_invalidate_display, + .gfx_update = pl110_update_display, +}; + +static void pl110_realize(DeviceState *dev, Error **errp) +{ + PL110State *s = PL110(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &pl110_ops, s, "pl110", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + s->vblank_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + pl110_vblank_interrupt, s); + qdev_init_gpio_in(dev, pl110_mux_ctrl_set, 1); + s->con = graphic_console_init(dev, 0, &pl110_gfx_ops, s); +} + +static void pl110_init(Object *obj) +{ + PL110State *s = PL110(obj); + + s->version = VERSION_PL110; +} + +static void pl110_versatile_init(Object *obj) +{ + PL110State *s = PL110(obj); + + s->version = VERSION_PL110_VERSATILE; +} + +static void pl111_init(Object *obj) +{ + PL110State *s = PL110(obj); + + s->version = VERSION_PL111; +} + +static void pl110_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->vmsd = &vmstate_pl110; + dc->realize = pl110_realize; +} + +static const TypeInfo pl110_info = { + .name = TYPE_PL110, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL110State), + .instance_init = pl110_init, + .class_init = pl110_class_init, +}; + +static const TypeInfo pl110_versatile_info = { + .name = "pl110_versatile", + .parent = TYPE_PL110, + .instance_init = pl110_versatile_init, +}; + +static const TypeInfo pl111_info = { + .name = "pl111", + .parent = TYPE_PL110, + .instance_init = pl111_init, +}; + +static void pl110_register_types(void) +{ + type_register_static(&pl110_info); + type_register_static(&pl110_versatile_info); + type_register_static(&pl111_info); +} + +type_init(pl110_register_types) diff --git a/hw/display/pl110_template.h b/hw/display/pl110_template.h new file mode 100644 index 000000000..877419aa8 --- /dev/null +++ b/hw/display/pl110_template.h @@ -0,0 +1,301 @@ +/* + * Arm PrimeCell PL110 Color LCD Controller + * + * Copyright (c) 2005 CodeSourcery, LLC. + * Written by Paul Brook + * + * This code is licensed under the GNU LGPL + * + * Framebuffer format conversion routines. + */ + +#ifndef ORDER +#error "pl110_template.h is only for inclusion by pl110.c" +#endif + +#if ORDER == 0 +#define NAME glue(lblp_, BORDER) +#ifdef HOST_WORDS_BIGENDIAN +#define SWAP_WORDS 1 +#endif +#elif ORDER == 1 +#define NAME glue(bbbp_, BORDER) +#ifndef HOST_WORDS_BIGENDIAN +#define SWAP_WORDS 1 +#endif +#else +#define SWAP_PIXELS 1 +#define NAME glue(lbbp_, BORDER) +#ifdef HOST_WORDS_BIGENDIAN +#define SWAP_WORDS 1 +#endif +#endif + +#define FN_2(x, y) FN(x, y) FN(x+1, y) +#define FN_4(x, y) FN_2(x, y) FN_2(x+2, y) +#define FN_8(y) FN_4(0, y) FN_4(4, y) + +static void glue(pl110_draw_line1_,NAME)(void *opaque, uint8_t *d, const uint8_t *src, int width, int deststep) +{ + uint32_t *palette = opaque; + uint32_t data; + while (width > 0) { + data = *(uint32_t *)src; +#ifdef SWAP_PIXELS +#define FN(x, y) COPY_PIXEL(d, palette[(data >> (y + 7 - (x))) & 1]); +#else +#define FN(x, y) COPY_PIXEL(d, palette[(data >> ((x) + y)) & 1]); +#endif +#ifdef SWAP_WORDS + FN_8(24) + FN_8(16) + FN_8(8) + FN_8(0) +#else + FN_8(0) + FN_8(8) + FN_8(16) + FN_8(24) +#endif +#undef FN + width -= 32; + src += 4; + } +} + +static void glue(pl110_draw_line2_,NAME)(void *opaque, uint8_t *d, const uint8_t *src, int width, int deststep) +{ + uint32_t *palette = opaque; + uint32_t data; + while (width > 0) { + data = *(uint32_t *)src; +#ifdef SWAP_PIXELS +#define FN(x, y) COPY_PIXEL(d, palette[(data >> (y + 6 - (x)*2)) & 3]); +#else +#define FN(x, y) COPY_PIXEL(d, palette[(data >> ((x)*2 + y)) & 3]); +#endif +#ifdef SWAP_WORDS + FN_4(0, 24) + FN_4(0, 16) + FN_4(0, 8) + FN_4(0, 0) +#else + FN_4(0, 0) + FN_4(0, 8) + FN_4(0, 16) + FN_4(0, 24) +#endif +#undef FN + width -= 16; + src += 4; + } +} + +static void glue(pl110_draw_line4_,NAME)(void *opaque, uint8_t *d, const uint8_t *src, int width, int deststep) +{ + uint32_t *palette = opaque; + uint32_t data; + while (width > 0) { + data = *(uint32_t *)src; +#ifdef SWAP_PIXELS +#define FN(x, y) COPY_PIXEL(d, palette[(data >> (y + 4 - (x)*4)) & 0xf]); +#else +#define FN(x, y) COPY_PIXEL(d, palette[(data >> ((x)*4 + y)) & 0xf]); +#endif +#ifdef SWAP_WORDS + FN_2(0, 24) + FN_2(0, 16) + FN_2(0, 8) + FN_2(0, 0) +#else + FN_2(0, 0) + FN_2(0, 8) + FN_2(0, 16) + FN_2(0, 24) +#endif +#undef FN + width -= 8; + src += 4; + } +} + +static void glue(pl110_draw_line8_,NAME)(void *opaque, uint8_t *d, const uint8_t *src, int width, int deststep) +{ + uint32_t *palette = opaque; + uint32_t data; + while (width > 0) { + data = *(uint32_t *)src; +#define FN(x) COPY_PIXEL(d, palette[(data >> (x)) & 0xff]); +#ifdef SWAP_WORDS + FN(24) + FN(16) + FN(8) + FN(0) +#else + FN(0) + FN(8) + FN(16) + FN(24) +#endif +#undef FN + width -= 4; + src += 4; + } +} + +static void glue(pl110_draw_line16_,NAME)(void *opaque, uint8_t *d, const uint8_t *src, int width, int deststep) +{ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *)src; +#ifdef SWAP_WORDS + data = bswap32(data); +#endif +#ifdef RGB +#define LSB r +#define MSB b +#else +#define LSB b +#define MSB r +#endif +#if 0 + LSB = data & 0x1f; + data >>= 5; + g = data & 0x3f; + data >>= 6; + MSB = data & 0x1f; + data >>= 5; +#else + LSB = (data & 0x1f) << 3; + data >>= 5; + g = (data & 0x3f) << 2; + data >>= 6; + MSB = (data & 0x1f) << 3; + data >>= 5; +#endif + COPY_PIXEL(d, rgb_to_pixel32(r, g, b)); + LSB = (data & 0x1f) << 3; + data >>= 5; + g = (data & 0x3f) << 2; + data >>= 6; + MSB = (data & 0x1f) << 3; + data >>= 5; + COPY_PIXEL(d, rgb_to_pixel32(r, g, b)); +#undef MSB +#undef LSB + width -= 2; + src += 4; + } +} + +static void glue(pl110_draw_line32_,NAME)(void *opaque, uint8_t *d, const uint8_t *src, int width, int deststep) +{ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *)src; +#ifdef RGB +#define LSB r +#define MSB b +#else +#define LSB b +#define MSB r +#endif +#ifndef SWAP_WORDS + LSB = data & 0xff; + g = (data >> 8) & 0xff; + MSB = (data >> 16) & 0xff; +#else + LSB = (data >> 24) & 0xff; + g = (data >> 16) & 0xff; + MSB = (data >> 8) & 0xff; +#endif + COPY_PIXEL(d, rgb_to_pixel32(r, g, b)); +#undef MSB +#undef LSB + width--; + src += 4; + } +} + +static void glue(pl110_draw_line16_555_,NAME)(void *opaque, uint8_t *d, const uint8_t *src, int width, int deststep) +{ + /* RGB 555 plus an intensity bit (which we ignore) */ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *)src; +#ifdef SWAP_WORDS + data = bswap32(data); +#endif +#ifdef RGB +#define LSB r +#define MSB b +#else +#define LSB b +#define MSB r +#endif + LSB = (data & 0x1f) << 3; + data >>= 5; + g = (data & 0x1f) << 3; + data >>= 5; + MSB = (data & 0x1f) << 3; + data >>= 5; + COPY_PIXEL(d, rgb_to_pixel32(r, g, b)); + LSB = (data & 0x1f) << 3; + data >>= 5; + g = (data & 0x1f) << 3; + data >>= 5; + MSB = (data & 0x1f) << 3; + data >>= 6; + COPY_PIXEL(d, rgb_to_pixel32(r, g, b)); +#undef MSB +#undef LSB + width -= 2; + src += 4; + } +} + +static void glue(pl110_draw_line12_,NAME)(void *opaque, uint8_t *d, const uint8_t *src, int width, int deststep) +{ + /* RGB 444 with 4 bits of zeroes at the top of each halfword */ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *)src; +#ifdef SWAP_WORDS + data = bswap32(data); +#endif +#ifdef RGB +#define LSB r +#define MSB b +#else +#define LSB b +#define MSB r +#endif + LSB = (data & 0xf) << 4; + data >>= 4; + g = (data & 0xf) << 4; + data >>= 4; + MSB = (data & 0xf) << 4; + data >>= 8; + COPY_PIXEL(d, rgb_to_pixel32(r, g, b)); + LSB = (data & 0xf) << 4; + data >>= 4; + g = (data & 0xf) << 4; + data >>= 4; + MSB = (data & 0xf) << 4; + data >>= 8; + COPY_PIXEL(d, rgb_to_pixel32(r, g, b)); +#undef MSB +#undef LSB + width -= 2; + src += 4; + } +} + +#undef SWAP_PIXELS +#undef NAME +#undef SWAP_WORDS +#undef ORDER diff --git a/hw/display/pxa2xx_lcd.c b/hw/display/pxa2xx_lcd.c new file mode 100644 index 000000000..2887ce496 --- /dev/null +++ b/hw/display/pxa2xx_lcd.c @@ -0,0 +1,1451 @@ +/* + * Intel XScale PXA255/270 LCDC emulation. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This code is licensed under the GPLv2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "ui/console.h" +#include "hw/arm/pxa.h" +#include "ui/pixel_ops.h" +#include "hw/boards.h" +/* FIXME: For graphic_rotate. Should probably be done in common code. */ +#include "sysemu/sysemu.h" +#include "framebuffer.h" + +struct DMAChannel { + uint32_t branch; + uint8_t up; + uint8_t palette[1024]; + uint8_t pbuffer[1024]; + void (*redraw)(PXA2xxLCDState *s, hwaddr addr, + int *miny, int *maxy); + + uint32_t descriptor; + uint32_t source; + uint32_t id; + uint32_t command; +}; + +struct PXA2xxLCDState { + MemoryRegion *sysmem; + MemoryRegion iomem; + MemoryRegionSection fbsection; + qemu_irq irq; + int irqlevel; + + int invalidated; + QemuConsole *con; + int dest_width; + int xres, yres; + int pal_for; + int transp; + enum { + pxa_lcdc_2bpp = 1, + pxa_lcdc_4bpp = 2, + pxa_lcdc_8bpp = 3, + pxa_lcdc_16bpp = 4, + pxa_lcdc_18bpp = 5, + pxa_lcdc_18pbpp = 6, + pxa_lcdc_19bpp = 7, + pxa_lcdc_19pbpp = 8, + pxa_lcdc_24bpp = 9, + pxa_lcdc_25bpp = 10, + } bpp; + + uint32_t control[6]; + uint32_t status[2]; + uint32_t ovl1c[2]; + uint32_t ovl2c[2]; + uint32_t ccr; + uint32_t cmdcr; + uint32_t trgbr; + uint32_t tcr; + uint32_t liidr; + uint8_t bscntr; + + struct DMAChannel dma_ch[7]; + + qemu_irq vsync_cb; + int orientation; +}; + +typedef struct QEMU_PACKED { + uint32_t fdaddr; + uint32_t fsaddr; + uint32_t fidr; + uint32_t ldcmd; +} PXAFrameDescriptor; + +#define LCCR0 0x000 /* LCD Controller Control register 0 */ +#define LCCR1 0x004 /* LCD Controller Control register 1 */ +#define LCCR2 0x008 /* LCD Controller Control register 2 */ +#define LCCR3 0x00c /* LCD Controller Control register 3 */ +#define LCCR4 0x010 /* LCD Controller Control register 4 */ +#define LCCR5 0x014 /* LCD Controller Control register 5 */ + +#define FBR0 0x020 /* DMA Channel 0 Frame Branch register */ +#define FBR1 0x024 /* DMA Channel 1 Frame Branch register */ +#define FBR2 0x028 /* DMA Channel 2 Frame Branch register */ +#define FBR3 0x02c /* DMA Channel 3 Frame Branch register */ +#define FBR4 0x030 /* DMA Channel 4 Frame Branch register */ +#define FBR5 0x110 /* DMA Channel 5 Frame Branch register */ +#define FBR6 0x114 /* DMA Channel 6 Frame Branch register */ + +#define LCSR1 0x034 /* LCD Controller Status register 1 */ +#define LCSR0 0x038 /* LCD Controller Status register 0 */ +#define LIIDR 0x03c /* LCD Controller Interrupt ID register */ + +#define TRGBR 0x040 /* TMED RGB Seed register */ +#define TCR 0x044 /* TMED Control register */ + +#define OVL1C1 0x050 /* Overlay 1 Control register 1 */ +#define OVL1C2 0x060 /* Overlay 1 Control register 2 */ +#define OVL2C1 0x070 /* Overlay 2 Control register 1 */ +#define OVL2C2 0x080 /* Overlay 2 Control register 2 */ +#define CCR 0x090 /* Cursor Control register */ + +#define CMDCR 0x100 /* Command Control register */ +#define PRSR 0x104 /* Panel Read Status register */ + +#define PXA_LCDDMA_CHANS 7 +#define DMA_FDADR 0x00 /* Frame Descriptor Address register */ +#define DMA_FSADR 0x04 /* Frame Source Address register */ +#define DMA_FIDR 0x08 /* Frame ID register */ +#define DMA_LDCMD 0x0c /* Command register */ + +/* LCD Buffer Strength Control register */ +#define BSCNTR 0x04000054 + +/* Bitfield masks */ +#define LCCR0_ENB (1 << 0) +#define LCCR0_CMS (1 << 1) +#define LCCR0_SDS (1 << 2) +#define LCCR0_LDM (1 << 3) +#define LCCR0_SOFM0 (1 << 4) +#define LCCR0_IUM (1 << 5) +#define LCCR0_EOFM0 (1 << 6) +#define LCCR0_PAS (1 << 7) +#define LCCR0_DPD (1 << 9) +#define LCCR0_DIS (1 << 10) +#define LCCR0_QDM (1 << 11) +#define LCCR0_PDD (0xff << 12) +#define LCCR0_BSM0 (1 << 20) +#define LCCR0_OUM (1 << 21) +#define LCCR0_LCDT (1 << 22) +#define LCCR0_RDSTM (1 << 23) +#define LCCR0_CMDIM (1 << 24) +#define LCCR0_OUC (1 << 25) +#define LCCR0_LDDALT (1 << 26) +#define LCCR1_PPL(x) ((x) & 0x3ff) +#define LCCR2_LPP(x) ((x) & 0x3ff) +#define LCCR3_API (15 << 16) +#define LCCR3_BPP(x) ((((x) >> 24) & 7) | (((x) >> 26) & 8)) +#define LCCR3_PDFOR(x) (((x) >> 30) & 3) +#define LCCR4_K1(x) (((x) >> 0) & 7) +#define LCCR4_K2(x) (((x) >> 3) & 7) +#define LCCR4_K3(x) (((x) >> 6) & 7) +#define LCCR4_PALFOR(x) (((x) >> 15) & 3) +#define LCCR5_SOFM(ch) (1 << (ch - 1)) +#define LCCR5_EOFM(ch) (1 << (ch + 7)) +#define LCCR5_BSM(ch) (1 << (ch + 15)) +#define LCCR5_IUM(ch) (1 << (ch + 23)) +#define OVLC1_EN (1 << 31) +#define CCR_CEN (1 << 31) +#define FBR_BRA (1 << 0) +#define FBR_BINT (1 << 1) +#define FBR_SRCADDR (0xfffffff << 4) +#define LCSR0_LDD (1 << 0) +#define LCSR0_SOF0 (1 << 1) +#define LCSR0_BER (1 << 2) +#define LCSR0_ABC (1 << 3) +#define LCSR0_IU0 (1 << 4) +#define LCSR0_IU1 (1 << 5) +#define LCSR0_OU (1 << 6) +#define LCSR0_QD (1 << 7) +#define LCSR0_EOF0 (1 << 8) +#define LCSR0_BS0 (1 << 9) +#define LCSR0_SINT (1 << 10) +#define LCSR0_RDST (1 << 11) +#define LCSR0_CMDINT (1 << 12) +#define LCSR0_BERCH(x) (((x) & 7) << 28) +#define LCSR1_SOF(ch) (1 << (ch - 1)) +#define LCSR1_EOF(ch) (1 << (ch + 7)) +#define LCSR1_BS(ch) (1 << (ch + 15)) +#define LCSR1_IU(ch) (1 << (ch + 23)) +#define LDCMD_LENGTH(x) ((x) & 0x001ffffc) +#define LDCMD_EOFINT (1 << 21) +#define LDCMD_SOFINT (1 << 22) +#define LDCMD_PAL (1 << 26) + +/* Size of a pixel in the QEMU UI output surface, in bytes */ +#define DEST_PIXEL_WIDTH 4 + +/* Line drawing code to handle the various possible guest pixel formats */ + +# define SKIP_PIXEL(to) do { to += deststep; } while (0) +# define COPY_PIXEL(to, from) \ + do { \ + *(uint32_t *) to = from; \ + SKIP_PIXEL(to); \ + } while (0) + +#ifdef HOST_WORDS_BIGENDIAN +# define SWAP_WORDS 1 +#endif + +#define FN_2(x) FN(x + 1) FN(x) +#define FN_4(x) FN_2(x + 2) FN_2(x) + +static void pxa2xx_draw_line2(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t *palette = opaque; + uint32_t data; + while (width > 0) { + data = *(uint32_t *) src; +#define FN(x) COPY_PIXEL(dest, palette[(data >> ((x) * 2)) & 3]); +#ifdef SWAP_WORDS + FN_4(12) + FN_4(8) + FN_4(4) + FN_4(0) +#else + FN_4(0) + FN_4(4) + FN_4(8) + FN_4(12) +#endif +#undef FN + width -= 16; + src += 4; + } +} + +static void pxa2xx_draw_line4(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t *palette = opaque; + uint32_t data; + while (width > 0) { + data = *(uint32_t *) src; +#define FN(x) COPY_PIXEL(dest, palette[(data >> ((x) * 4)) & 0xf]); +#ifdef SWAP_WORDS + FN_2(6) + FN_2(4) + FN_2(2) + FN_2(0) +#else + FN_2(0) + FN_2(2) + FN_2(4) + FN_2(6) +#endif +#undef FN + width -= 8; + src += 4; + } +} + +static void pxa2xx_draw_line8(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t *palette = opaque; + uint32_t data; + while (width > 0) { + data = *(uint32_t *) src; +#define FN(x) COPY_PIXEL(dest, palette[(data >> (x)) & 0xff]); +#ifdef SWAP_WORDS + FN(24) + FN(16) + FN(8) + FN(0) +#else + FN(0) + FN(8) + FN(16) + FN(24) +#endif +#undef FN + width -= 4; + src += 4; + } +} + +static void pxa2xx_draw_line16(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *) src; +#ifdef SWAP_WORDS + data = bswap32(data); +#endif + b = (data & 0x1f) << 3; + data >>= 5; + g = (data & 0x3f) << 2; + data >>= 6; + r = (data & 0x1f) << 3; + data >>= 5; + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + b = (data & 0x1f) << 3; + data >>= 5; + g = (data & 0x3f) << 2; + data >>= 6; + r = (data & 0x1f) << 3; + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + width -= 2; + src += 4; + } +} + +static void pxa2xx_draw_line16t(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *) src; +#ifdef SWAP_WORDS + data = bswap32(data); +#endif + b = (data & 0x1f) << 3; + data >>= 5; + g = (data & 0x1f) << 3; + data >>= 5; + r = (data & 0x1f) << 3; + data >>= 5; + if (data & 1) { + SKIP_PIXEL(dest); + } else { + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + } + data >>= 1; + b = (data & 0x1f) << 3; + data >>= 5; + g = (data & 0x1f) << 3; + data >>= 5; + r = (data & 0x1f) << 3; + data >>= 5; + if (data & 1) { + SKIP_PIXEL(dest); + } else { + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + } + width -= 2; + src += 4; + } +} + +static void pxa2xx_draw_line18(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *) src; +#ifdef SWAP_WORDS + data = bswap32(data); +#endif + b = (data & 0x3f) << 2; + data >>= 6; + g = (data & 0x3f) << 2; + data >>= 6; + r = (data & 0x3f) << 2; + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + width -= 1; + src += 4; + } +} + +/* The wicked packed format */ +static void pxa2xx_draw_line18p(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t data[3]; + unsigned int r, g, b; + while (width > 0) { + data[0] = *(uint32_t *) src; + src += 4; + data[1] = *(uint32_t *) src; + src += 4; + data[2] = *(uint32_t *) src; + src += 4; +#ifdef SWAP_WORDS + data[0] = bswap32(data[0]); + data[1] = bswap32(data[1]); + data[2] = bswap32(data[2]); +#endif + b = (data[0] & 0x3f) << 2; + data[0] >>= 6; + g = (data[0] & 0x3f) << 2; + data[0] >>= 6; + r = (data[0] & 0x3f) << 2; + data[0] >>= 12; + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + b = (data[0] & 0x3f) << 2; + data[0] >>= 6; + g = ((data[1] & 0xf) << 4) | (data[0] << 2); + data[1] >>= 4; + r = (data[1] & 0x3f) << 2; + data[1] >>= 12; + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + b = (data[1] & 0x3f) << 2; + data[1] >>= 6; + g = (data[1] & 0x3f) << 2; + data[1] >>= 6; + r = ((data[2] & 0x3) << 6) | (data[1] << 2); + data[2] >>= 8; + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + b = (data[2] & 0x3f) << 2; + data[2] >>= 6; + g = (data[2] & 0x3f) << 2; + data[2] >>= 6; + r = data[2] << 2; + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + width -= 4; + } +} + +static void pxa2xx_draw_line19(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *) src; +#ifdef SWAP_WORDS + data = bswap32(data); +#endif + b = (data & 0x3f) << 2; + data >>= 6; + g = (data & 0x3f) << 2; + data >>= 6; + r = (data & 0x3f) << 2; + data >>= 6; + if (data & 1) { + SKIP_PIXEL(dest); + } else { + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + } + width -= 1; + src += 4; + } +} + +/* The wicked packed format */ +static void pxa2xx_draw_line19p(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t data[3]; + unsigned int r, g, b; + while (width > 0) { + data[0] = *(uint32_t *) src; + src += 4; + data[1] = *(uint32_t *) src; + src += 4; + data[2] = *(uint32_t *) src; + src += 4; +# ifdef SWAP_WORDS + data[0] = bswap32(data[0]); + data[1] = bswap32(data[1]); + data[2] = bswap32(data[2]); +# endif + b = (data[0] & 0x3f) << 2; + data[0] >>= 6; + g = (data[0] & 0x3f) << 2; + data[0] >>= 6; + r = (data[0] & 0x3f) << 2; + data[0] >>= 6; + if (data[0] & 1) { + SKIP_PIXEL(dest); + } else { + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + } + data[0] >>= 6; + b = (data[0] & 0x3f) << 2; + data[0] >>= 6; + g = ((data[1] & 0xf) << 4) | (data[0] << 2); + data[1] >>= 4; + r = (data[1] & 0x3f) << 2; + data[1] >>= 6; + if (data[1] & 1) { + SKIP_PIXEL(dest); + } else { + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + } + data[1] >>= 6; + b = (data[1] & 0x3f) << 2; + data[1] >>= 6; + g = (data[1] & 0x3f) << 2; + data[1] >>= 6; + r = ((data[2] & 0x3) << 6) | (data[1] << 2); + data[2] >>= 2; + if (data[2] & 1) { + SKIP_PIXEL(dest); + } else { + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + } + data[2] >>= 6; + b = (data[2] & 0x3f) << 2; + data[2] >>= 6; + g = (data[2] & 0x3f) << 2; + data[2] >>= 6; + r = data[2] << 2; + data[2] >>= 6; + if (data[2] & 1) { + SKIP_PIXEL(dest); + } else { + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + } + width -= 4; + } +} + +static void pxa2xx_draw_line24(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *) src; +#ifdef SWAP_WORDS + data = bswap32(data); +#endif + b = data & 0xff; + data >>= 8; + g = data & 0xff; + data >>= 8; + r = data & 0xff; + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + width -= 1; + src += 4; + } +} + +static void pxa2xx_draw_line24t(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *) src; +#ifdef SWAP_WORDS + data = bswap32(data); +#endif + b = (data & 0x7f) << 1; + data >>= 7; + g = data & 0xff; + data >>= 8; + r = data & 0xff; + data >>= 8; + if (data & 1) { + SKIP_PIXEL(dest); + } else { + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + } + width -= 1; + src += 4; + } +} + +static void pxa2xx_draw_line25(void *opaque, uint8_t *dest, const uint8_t *src, + int width, int deststep) +{ + uint32_t data; + unsigned int r, g, b; + while (width > 0) { + data = *(uint32_t *) src; +#ifdef SWAP_WORDS + data = bswap32(data); +#endif + b = data & 0xff; + data >>= 8; + g = data & 0xff; + data >>= 8; + r = data & 0xff; + data >>= 8; + if (data & 1) { + SKIP_PIXEL(dest); + } else { + COPY_PIXEL(dest, rgb_to_pixel32(r, g, b)); + } + width -= 1; + src += 4; + } +} + +/* Overlay planes disabled, no transparency */ +static drawfn pxa2xx_draw_fn_32[16] = { + [0 ... 0xf] = NULL, + [pxa_lcdc_2bpp] = pxa2xx_draw_line2, + [pxa_lcdc_4bpp] = pxa2xx_draw_line4, + [pxa_lcdc_8bpp] = pxa2xx_draw_line8, + [pxa_lcdc_16bpp] = pxa2xx_draw_line16, + [pxa_lcdc_18bpp] = pxa2xx_draw_line18, + [pxa_lcdc_18pbpp] = pxa2xx_draw_line18p, + [pxa_lcdc_24bpp] = pxa2xx_draw_line24, +}; + +/* Overlay planes enabled, transparency used */ +static drawfn pxa2xx_draw_fn_32t[16] = { + [0 ... 0xf] = NULL, + [pxa_lcdc_4bpp] = pxa2xx_draw_line4, + [pxa_lcdc_8bpp] = pxa2xx_draw_line8, + [pxa_lcdc_16bpp] = pxa2xx_draw_line16t, + [pxa_lcdc_19bpp] = pxa2xx_draw_line19, + [pxa_lcdc_19pbpp] = pxa2xx_draw_line19p, + [pxa_lcdc_24bpp] = pxa2xx_draw_line24t, + [pxa_lcdc_25bpp] = pxa2xx_draw_line25, +}; + +#undef COPY_PIXEL +#undef SKIP_PIXEL + +#ifdef SWAP_WORDS +# undef SWAP_WORDS +#endif + +/* Route internal interrupt lines to the global IC */ +static void pxa2xx_lcdc_int_update(PXA2xxLCDState *s) +{ + int level = 0; + level |= (s->status[0] & LCSR0_LDD) && !(s->control[0] & LCCR0_LDM); + level |= (s->status[0] & LCSR0_SOF0) && !(s->control[0] & LCCR0_SOFM0); + level |= (s->status[0] & LCSR0_IU0) && !(s->control[0] & LCCR0_IUM); + level |= (s->status[0] & LCSR0_IU1) && !(s->control[5] & LCCR5_IUM(1)); + level |= (s->status[0] & LCSR0_OU) && !(s->control[0] & LCCR0_OUM); + level |= (s->status[0] & LCSR0_QD) && !(s->control[0] & LCCR0_QDM); + level |= (s->status[0] & LCSR0_EOF0) && !(s->control[0] & LCCR0_EOFM0); + level |= (s->status[0] & LCSR0_BS0) && !(s->control[0] & LCCR0_BSM0); + level |= (s->status[0] & LCSR0_RDST) && !(s->control[0] & LCCR0_RDSTM); + level |= (s->status[0] & LCSR0_CMDINT) && !(s->control[0] & LCCR0_CMDIM); + level |= (s->status[1] & ~s->control[5]); + + qemu_set_irq(s->irq, !!level); + s->irqlevel = level; +} + +/* Set Branch Status interrupt high and poke associated registers */ +static inline void pxa2xx_dma_bs_set(PXA2xxLCDState *s, int ch) +{ + int unmasked; + if (ch == 0) { + s->status[0] |= LCSR0_BS0; + unmasked = !(s->control[0] & LCCR0_BSM0); + } else { + s->status[1] |= LCSR1_BS(ch); + unmasked = !(s->control[5] & LCCR5_BSM(ch)); + } + + if (unmasked) { + if (s->irqlevel) + s->status[0] |= LCSR0_SINT; + else + s->liidr = s->dma_ch[ch].id; + } +} + +/* Set Start Of Frame Status interrupt high and poke associated registers */ +static inline void pxa2xx_dma_sof_set(PXA2xxLCDState *s, int ch) +{ + int unmasked; + if (!(s->dma_ch[ch].command & LDCMD_SOFINT)) + return; + + if (ch == 0) { + s->status[0] |= LCSR0_SOF0; + unmasked = !(s->control[0] & LCCR0_SOFM0); + } else { + s->status[1] |= LCSR1_SOF(ch); + unmasked = !(s->control[5] & LCCR5_SOFM(ch)); + } + + if (unmasked) { + if (s->irqlevel) + s->status[0] |= LCSR0_SINT; + else + s->liidr = s->dma_ch[ch].id; + } +} + +/* Set End Of Frame Status interrupt high and poke associated registers */ +static inline void pxa2xx_dma_eof_set(PXA2xxLCDState *s, int ch) +{ + int unmasked; + if (!(s->dma_ch[ch].command & LDCMD_EOFINT)) + return; + + if (ch == 0) { + s->status[0] |= LCSR0_EOF0; + unmasked = !(s->control[0] & LCCR0_EOFM0); + } else { + s->status[1] |= LCSR1_EOF(ch); + unmasked = !(s->control[5] & LCCR5_EOFM(ch)); + } + + if (unmasked) { + if (s->irqlevel) + s->status[0] |= LCSR0_SINT; + else + s->liidr = s->dma_ch[ch].id; + } +} + +/* Set Bus Error Status interrupt high and poke associated registers */ +static inline void pxa2xx_dma_ber_set(PXA2xxLCDState *s, int ch) +{ + s->status[0] |= LCSR0_BERCH(ch) | LCSR0_BER; + if (s->irqlevel) + s->status[0] |= LCSR0_SINT; + else + s->liidr = s->dma_ch[ch].id; +} + +/* Load new Frame Descriptors from DMA */ +static void pxa2xx_descriptor_load(PXA2xxLCDState *s) +{ + PXAFrameDescriptor desc; + hwaddr descptr; + int i; + + for (i = 0; i < PXA_LCDDMA_CHANS; i ++) { + s->dma_ch[i].source = 0; + + if (!s->dma_ch[i].up) + continue; + + if (s->dma_ch[i].branch & FBR_BRA) { + descptr = s->dma_ch[i].branch & FBR_SRCADDR; + if (s->dma_ch[i].branch & FBR_BINT) + pxa2xx_dma_bs_set(s, i); + s->dma_ch[i].branch &= ~FBR_BRA; + } else + descptr = s->dma_ch[i].descriptor; + + if (!((descptr >= PXA2XX_SDRAM_BASE && descptr + + sizeof(desc) <= PXA2XX_SDRAM_BASE + current_machine->ram_size) || + (descptr >= PXA2XX_INTERNAL_BASE && descptr + sizeof(desc) <= + PXA2XX_INTERNAL_BASE + PXA2XX_INTERNAL_SIZE))) { + continue; + } + + cpu_physical_memory_read(descptr, &desc, sizeof(desc)); + s->dma_ch[i].descriptor = le32_to_cpu(desc.fdaddr); + s->dma_ch[i].source = le32_to_cpu(desc.fsaddr); + s->dma_ch[i].id = le32_to_cpu(desc.fidr); + s->dma_ch[i].command = le32_to_cpu(desc.ldcmd); + } +} + +static uint64_t pxa2xx_lcdc_read(void *opaque, hwaddr offset, + unsigned size) +{ + PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; + int ch; + + switch (offset) { + case LCCR0: + return s->control[0]; + case LCCR1: + return s->control[1]; + case LCCR2: + return s->control[2]; + case LCCR3: + return s->control[3]; + case LCCR4: + return s->control[4]; + case LCCR5: + return s->control[5]; + + case OVL1C1: + return s->ovl1c[0]; + case OVL1C2: + return s->ovl1c[1]; + case OVL2C1: + return s->ovl2c[0]; + case OVL2C2: + return s->ovl2c[1]; + + case CCR: + return s->ccr; + + case CMDCR: + return s->cmdcr; + + case TRGBR: + return s->trgbr; + case TCR: + return s->tcr; + + case 0x200 ... 0x1000: /* DMA per-channel registers */ + ch = (offset - 0x200) >> 4; + if (!(ch >= 0 && ch < PXA_LCDDMA_CHANS)) + goto fail; + + switch (offset & 0xf) { + case DMA_FDADR: + return s->dma_ch[ch].descriptor; + case DMA_FSADR: + return s->dma_ch[ch].source; + case DMA_FIDR: + return s->dma_ch[ch].id; + case DMA_LDCMD: + return s->dma_ch[ch].command; + default: + goto fail; + } + + case FBR0: + return s->dma_ch[0].branch; + case FBR1: + return s->dma_ch[1].branch; + case FBR2: + return s->dma_ch[2].branch; + case FBR3: + return s->dma_ch[3].branch; + case FBR4: + return s->dma_ch[4].branch; + case FBR5: + return s->dma_ch[5].branch; + case FBR6: + return s->dma_ch[6].branch; + + case BSCNTR: + return s->bscntr; + + case PRSR: + return 0; + + case LCSR0: + return s->status[0]; + case LCSR1: + return s->status[1]; + case LIIDR: + return s->liidr; + + default: + fail: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + } + + return 0; +} + +static void pxa2xx_lcdc_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; + int ch; + + switch (offset) { + case LCCR0: + /* ACK Quick Disable done */ + if ((s->control[0] & LCCR0_ENB) && !(value & LCCR0_ENB)) + s->status[0] |= LCSR0_QD; + + if (!(s->control[0] & LCCR0_LCDT) && (value & LCCR0_LCDT)) { + qemu_log_mask(LOG_UNIMP, + "%s: internal frame buffer unsupported\n", __func__); + } + if ((s->control[3] & LCCR3_API) && + (value & LCCR0_ENB) && !(value & LCCR0_LCDT)) + s->status[0] |= LCSR0_ABC; + + s->control[0] = value & 0x07ffffff; + pxa2xx_lcdc_int_update(s); + + s->dma_ch[0].up = !!(value & LCCR0_ENB); + s->dma_ch[1].up = (s->ovl1c[0] & OVLC1_EN) || (value & LCCR0_SDS); + break; + + case LCCR1: + s->control[1] = value; + break; + + case LCCR2: + s->control[2] = value; + break; + + case LCCR3: + s->control[3] = value & 0xefffffff; + s->bpp = LCCR3_BPP(value); + break; + + case LCCR4: + s->control[4] = value & 0x83ff81ff; + break; + + case LCCR5: + s->control[5] = value & 0x3f3f3f3f; + break; + + case OVL1C1: + if (!(s->ovl1c[0] & OVLC1_EN) && (value & OVLC1_EN)) { + qemu_log_mask(LOG_UNIMP, "%s: Overlay 1 not supported\n", __func__); + } + s->ovl1c[0] = value & 0x80ffffff; + s->dma_ch[1].up = (value & OVLC1_EN) || (s->control[0] & LCCR0_SDS); + break; + + case OVL1C2: + s->ovl1c[1] = value & 0x000fffff; + break; + + case OVL2C1: + if (!(s->ovl2c[0] & OVLC1_EN) && (value & OVLC1_EN)) { + qemu_log_mask(LOG_UNIMP, "%s: Overlay 2 not supported\n", __func__); + } + s->ovl2c[0] = value & 0x80ffffff; + s->dma_ch[2].up = !!(value & OVLC1_EN); + s->dma_ch[3].up = !!(value & OVLC1_EN); + s->dma_ch[4].up = !!(value & OVLC1_EN); + break; + + case OVL2C2: + s->ovl2c[1] = value & 0x007fffff; + break; + + case CCR: + if (!(s->ccr & CCR_CEN) && (value & CCR_CEN)) { + qemu_log_mask(LOG_UNIMP, + "%s: Hardware cursor unimplemented\n", __func__); + } + s->ccr = value & 0x81ffffe7; + s->dma_ch[5].up = !!(value & CCR_CEN); + break; + + case CMDCR: + s->cmdcr = value & 0xff; + break; + + case TRGBR: + s->trgbr = value & 0x00ffffff; + break; + + case TCR: + s->tcr = value & 0x7fff; + break; + + case 0x200 ... 0x1000: /* DMA per-channel registers */ + ch = (offset - 0x200) >> 4; + if (!(ch >= 0 && ch < PXA_LCDDMA_CHANS)) + goto fail; + + switch (offset & 0xf) { + case DMA_FDADR: + s->dma_ch[ch].descriptor = value & 0xfffffff0; + break; + + default: + goto fail; + } + break; + + case FBR0: + s->dma_ch[0].branch = value & 0xfffffff3; + break; + case FBR1: + s->dma_ch[1].branch = value & 0xfffffff3; + break; + case FBR2: + s->dma_ch[2].branch = value & 0xfffffff3; + break; + case FBR3: + s->dma_ch[3].branch = value & 0xfffffff3; + break; + case FBR4: + s->dma_ch[4].branch = value & 0xfffffff3; + break; + case FBR5: + s->dma_ch[5].branch = value & 0xfffffff3; + break; + case FBR6: + s->dma_ch[6].branch = value & 0xfffffff3; + break; + + case BSCNTR: + s->bscntr = value & 0xf; + break; + + case PRSR: + break; + + case LCSR0: + s->status[0] &= ~(value & 0xfff); + if (value & LCSR0_BER) + s->status[0] &= ~LCSR0_BERCH(7); + break; + + case LCSR1: + s->status[1] &= ~(value & 0x3e3f3f); + break; + + default: + fail: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + } +} + +static const MemoryRegionOps pxa2xx_lcdc_ops = { + .read = pxa2xx_lcdc_read, + .write = pxa2xx_lcdc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* Load new palette for a given DMA channel, convert to internal format */ +static void pxa2xx_palette_parse(PXA2xxLCDState *s, int ch, int bpp) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int i, n, format, r, g, b, alpha; + uint32_t *dest; + uint8_t *src; + s->pal_for = LCCR4_PALFOR(s->control[4]); + format = s->pal_for; + + switch (bpp) { + case pxa_lcdc_2bpp: + n = 4; + break; + case pxa_lcdc_4bpp: + n = 16; + break; + case pxa_lcdc_8bpp: + n = 256; + break; + default: + return; + } + + src = (uint8_t *) s->dma_ch[ch].pbuffer; + dest = (uint32_t *) s->dma_ch[ch].palette; + alpha = r = g = b = 0; + + for (i = 0; i < n; i ++) { + switch (format) { + case 0: /* 16 bpp, no transparency */ + alpha = 0; + if (s->control[0] & LCCR0_CMS) { + r = g = b = *(uint16_t *) src & 0xff; + } + else { + r = (*(uint16_t *) src & 0xf800) >> 8; + g = (*(uint16_t *) src & 0x07e0) >> 3; + b = (*(uint16_t *) src & 0x001f) << 3; + } + src += 2; + break; + case 1: /* 16 bpp plus transparency */ + alpha = *(uint32_t *) src & (1 << 24); + if (s->control[0] & LCCR0_CMS) + r = g = b = *(uint32_t *) src & 0xff; + else { + r = (*(uint32_t *) src & 0xf80000) >> 16; + g = (*(uint32_t *) src & 0x00fc00) >> 8; + b = (*(uint32_t *) src & 0x0000f8); + } + src += 4; + break; + case 2: /* 18 bpp plus transparency */ + alpha = *(uint32_t *) src & (1 << 24); + if (s->control[0] & LCCR0_CMS) + r = g = b = *(uint32_t *) src & 0xff; + else { + r = (*(uint32_t *) src & 0xfc0000) >> 16; + g = (*(uint32_t *) src & 0x00fc00) >> 8; + b = (*(uint32_t *) src & 0x0000fc); + } + src += 4; + break; + case 3: /* 24 bpp plus transparency */ + alpha = *(uint32_t *) src & (1 << 24); + if (s->control[0] & LCCR0_CMS) + r = g = b = *(uint32_t *) src & 0xff; + else { + r = (*(uint32_t *) src & 0xff0000) >> 16; + g = (*(uint32_t *) src & 0x00ff00) >> 8; + b = (*(uint32_t *) src & 0x0000ff); + } + src += 4; + break; + } + switch (surface_bits_per_pixel(surface)) { + case 8: + *dest = rgb_to_pixel8(r, g, b) | alpha; + break; + case 15: + *dest = rgb_to_pixel15(r, g, b) | alpha; + break; + case 16: + *dest = rgb_to_pixel16(r, g, b) | alpha; + break; + case 24: + *dest = rgb_to_pixel24(r, g, b) | alpha; + break; + case 32: + *dest = rgb_to_pixel32(r, g, b) | alpha; + break; + } + dest ++; + } +} + +static inline drawfn pxa2xx_drawfn(PXA2xxLCDState *s) +{ + if (s->transp) { + return pxa2xx_draw_fn_32t[s->bpp]; + } else { + return pxa2xx_draw_fn_32[s->bpp]; + } +} + +static void pxa2xx_lcdc_dma0_redraw_rot0(PXA2xxLCDState *s, + hwaddr addr, int *miny, int *maxy) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int src_width, dest_width; + drawfn fn = pxa2xx_drawfn(s); + if (!fn) + return; + + src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ + if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) + src_width *= 3; + else if (s->bpp > pxa_lcdc_16bpp) + src_width *= 4; + else if (s->bpp > pxa_lcdc_8bpp) + src_width *= 2; + + dest_width = s->xres * DEST_PIXEL_WIDTH; + *miny = 0; + if (s->invalidated) { + framebuffer_update_memory_section(&s->fbsection, s->sysmem, + addr, s->yres, src_width); + } + framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres, + src_width, dest_width, DEST_PIXEL_WIDTH, + s->invalidated, + fn, s->dma_ch[0].palette, miny, maxy); +} + +static void pxa2xx_lcdc_dma0_redraw_rot90(PXA2xxLCDState *s, + hwaddr addr, int *miny, int *maxy) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int src_width, dest_width; + drawfn fn = pxa2xx_drawfn(s); + if (!fn) + return; + + src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ + if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) + src_width *= 3; + else if (s->bpp > pxa_lcdc_16bpp) + src_width *= 4; + else if (s->bpp > pxa_lcdc_8bpp) + src_width *= 2; + + dest_width = s->yres * DEST_PIXEL_WIDTH; + *miny = 0; + if (s->invalidated) { + framebuffer_update_memory_section(&s->fbsection, s->sysmem, + addr, s->yres, src_width); + } + framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres, + src_width, DEST_PIXEL_WIDTH, -dest_width, + s->invalidated, + fn, s->dma_ch[0].palette, + miny, maxy); +} + +static void pxa2xx_lcdc_dma0_redraw_rot180(PXA2xxLCDState *s, + hwaddr addr, int *miny, int *maxy) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int src_width, dest_width; + drawfn fn = pxa2xx_drawfn(s); + if (!fn) { + return; + } + + src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ + if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) { + src_width *= 3; + } else if (s->bpp > pxa_lcdc_16bpp) { + src_width *= 4; + } else if (s->bpp > pxa_lcdc_8bpp) { + src_width *= 2; + } + + dest_width = s->xres * DEST_PIXEL_WIDTH; + *miny = 0; + if (s->invalidated) { + framebuffer_update_memory_section(&s->fbsection, s->sysmem, + addr, s->yres, src_width); + } + framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres, + src_width, -dest_width, -DEST_PIXEL_WIDTH, + s->invalidated, + fn, s->dma_ch[0].palette, miny, maxy); +} + +static void pxa2xx_lcdc_dma0_redraw_rot270(PXA2xxLCDState *s, + hwaddr addr, int *miny, int *maxy) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int src_width, dest_width; + drawfn fn = pxa2xx_drawfn(s); + if (!fn) { + return; + } + + src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ + if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) { + src_width *= 3; + } else if (s->bpp > pxa_lcdc_16bpp) { + src_width *= 4; + } else if (s->bpp > pxa_lcdc_8bpp) { + src_width *= 2; + } + + dest_width = s->yres * DEST_PIXEL_WIDTH; + *miny = 0; + if (s->invalidated) { + framebuffer_update_memory_section(&s->fbsection, s->sysmem, + addr, s->yres, src_width); + } + framebuffer_update_display(surface, &s->fbsection, s->xres, s->yres, + src_width, -DEST_PIXEL_WIDTH, dest_width, + s->invalidated, + fn, s->dma_ch[0].palette, + miny, maxy); +} + +static void pxa2xx_lcdc_resize(PXA2xxLCDState *s) +{ + int width, height; + if (!(s->control[0] & LCCR0_ENB)) + return; + + width = LCCR1_PPL(s->control[1]) + 1; + height = LCCR2_LPP(s->control[2]) + 1; + + if (width != s->xres || height != s->yres) { + if (s->orientation == 90 || s->orientation == 270) { + qemu_console_resize(s->con, height, width); + } else { + qemu_console_resize(s->con, width, height); + } + s->invalidated = 1; + s->xres = width; + s->yres = height; + } +} + +static void pxa2xx_update_display(void *opaque) +{ + PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; + hwaddr fbptr; + int miny, maxy; + int ch; + if (!(s->control[0] & LCCR0_ENB)) + return; + + pxa2xx_descriptor_load(s); + + pxa2xx_lcdc_resize(s); + miny = s->yres; + maxy = 0; + s->transp = s->dma_ch[2].up || s->dma_ch[3].up; + /* Note: With overlay planes the order depends on LCCR0 bit 25. */ + for (ch = 0; ch < PXA_LCDDMA_CHANS; ch ++) + if (s->dma_ch[ch].up) { + if (!s->dma_ch[ch].source) { + pxa2xx_dma_ber_set(s, ch); + continue; + } + fbptr = s->dma_ch[ch].source; + if (!((fbptr >= PXA2XX_SDRAM_BASE && + fbptr <= PXA2XX_SDRAM_BASE + current_machine->ram_size) || + (fbptr >= PXA2XX_INTERNAL_BASE && + fbptr <= PXA2XX_INTERNAL_BASE + PXA2XX_INTERNAL_SIZE))) { + pxa2xx_dma_ber_set(s, ch); + continue; + } + + if (s->dma_ch[ch].command & LDCMD_PAL) { + cpu_physical_memory_read(fbptr, s->dma_ch[ch].pbuffer, + MAX(LDCMD_LENGTH(s->dma_ch[ch].command), + sizeof(s->dma_ch[ch].pbuffer))); + pxa2xx_palette_parse(s, ch, s->bpp); + } else { + /* Do we need to reparse palette */ + if (LCCR4_PALFOR(s->control[4]) != s->pal_for) + pxa2xx_palette_parse(s, ch, s->bpp); + + /* ACK frame start */ + pxa2xx_dma_sof_set(s, ch); + + s->dma_ch[ch].redraw(s, fbptr, &miny, &maxy); + s->invalidated = 0; + + /* ACK frame completed */ + pxa2xx_dma_eof_set(s, ch); + } + } + + if (s->control[0] & LCCR0_DIS) { + /* ACK last frame completed */ + s->control[0] &= ~LCCR0_ENB; + s->status[0] |= LCSR0_LDD; + } + + if (miny >= 0) { + switch (s->orientation) { + case 0: + dpy_gfx_update(s->con, 0, miny, s->xres, maxy - miny + 1); + break; + case 90: + dpy_gfx_update(s->con, miny, 0, maxy - miny + 1, s->xres); + break; + case 180: + maxy = s->yres - maxy - 1; + miny = s->yres - miny - 1; + dpy_gfx_update(s->con, 0, maxy, s->xres, miny - maxy + 1); + break; + case 270: + maxy = s->yres - maxy - 1; + miny = s->yres - miny - 1; + dpy_gfx_update(s->con, maxy, 0, miny - maxy + 1, s->xres); + break; + } + } + pxa2xx_lcdc_int_update(s); + + qemu_irq_raise(s->vsync_cb); +} + +static void pxa2xx_invalidate_display(void *opaque) +{ + PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; + s->invalidated = 1; +} + +static void pxa2xx_lcdc_orientation(void *opaque, int angle) +{ + PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; + + switch (angle) { + case 0: + s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot0; + break; + case 90: + s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot90; + break; + case 180: + s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot180; + break; + case 270: + s->dma_ch[0].redraw = pxa2xx_lcdc_dma0_redraw_rot270; + break; + } + + s->orientation = angle; + s->xres = s->yres = -1; + pxa2xx_lcdc_resize(s); +} + +static const VMStateDescription vmstate_dma_channel = { + .name = "dma_channel", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(branch, struct DMAChannel), + VMSTATE_UINT8(up, struct DMAChannel), + VMSTATE_BUFFER(pbuffer, struct DMAChannel), + VMSTATE_UINT32(descriptor, struct DMAChannel), + VMSTATE_UINT32(source, struct DMAChannel), + VMSTATE_UINT32(id, struct DMAChannel), + VMSTATE_UINT32(command, struct DMAChannel), + VMSTATE_END_OF_LIST() + } +}; + +static int pxa2xx_lcdc_post_load(void *opaque, int version_id) +{ + PXA2xxLCDState *s = opaque; + + s->bpp = LCCR3_BPP(s->control[3]); + s->xres = s->yres = s->pal_for = -1; + + return 0; +} + +static const VMStateDescription vmstate_pxa2xx_lcdc = { + .name = "pxa2xx_lcdc", + .version_id = 0, + .minimum_version_id = 0, + .post_load = pxa2xx_lcdc_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT32(irqlevel, PXA2xxLCDState), + VMSTATE_INT32(transp, PXA2xxLCDState), + VMSTATE_UINT32_ARRAY(control, PXA2xxLCDState, 6), + VMSTATE_UINT32_ARRAY(status, PXA2xxLCDState, 2), + VMSTATE_UINT32_ARRAY(ovl1c, PXA2xxLCDState, 2), + VMSTATE_UINT32_ARRAY(ovl2c, PXA2xxLCDState, 2), + VMSTATE_UINT32(ccr, PXA2xxLCDState), + VMSTATE_UINT32(cmdcr, PXA2xxLCDState), + VMSTATE_UINT32(trgbr, PXA2xxLCDState), + VMSTATE_UINT32(tcr, PXA2xxLCDState), + VMSTATE_UINT32(liidr, PXA2xxLCDState), + VMSTATE_UINT8(bscntr, PXA2xxLCDState), + VMSTATE_STRUCT_ARRAY(dma_ch, PXA2xxLCDState, 7, 0, + vmstate_dma_channel, struct DMAChannel), + VMSTATE_END_OF_LIST() + } +}; + +static const GraphicHwOps pxa2xx_ops = { + .invalidate = pxa2xx_invalidate_display, + .gfx_update = pxa2xx_update_display, +}; + +PXA2xxLCDState *pxa2xx_lcdc_init(MemoryRegion *sysmem, + hwaddr base, qemu_irq irq) +{ + PXA2xxLCDState *s; + + s = (PXA2xxLCDState *) g_malloc0(sizeof(PXA2xxLCDState)); + s->invalidated = 1; + s->irq = irq; + s->sysmem = sysmem; + + pxa2xx_lcdc_orientation(s, graphic_rotate); + + memory_region_init_io(&s->iomem, NULL, &pxa2xx_lcdc_ops, s, + "pxa2xx-lcd-controller", 0x00100000); + memory_region_add_subregion(sysmem, base, &s->iomem); + + s->con = graphic_console_init(NULL, 0, &pxa2xx_ops, s); + + vmstate_register(NULL, 0, &vmstate_pxa2xx_lcdc, s); + + return s; +} + +void pxa2xx_lcd_vsync_notifier(PXA2xxLCDState *s, qemu_irq handler) +{ + s->vsync_cb = handler; +} diff --git a/hw/display/qxl-logger.c b/hw/display/qxl-logger.c new file mode 100644 index 000000000..68bfa4756 --- /dev/null +++ b/hw/display/qxl-logger.c @@ -0,0 +1,274 @@ +/* + * qxl command logging -- for debug purposes + * + * Copyright (C) 2010 Red Hat, Inc. + * + * maintained by Gerd Hoffmann + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "qxl.h" + +static const char *const qxl_type[] = { + [ QXL_CMD_NOP ] = "nop", + [ QXL_CMD_DRAW ] = "draw", + [ QXL_CMD_UPDATE ] = "update", + [ QXL_CMD_CURSOR ] = "cursor", + [ QXL_CMD_MESSAGE ] = "message", + [ QXL_CMD_SURFACE ] = "surface", +}; + +static const char *const qxl_draw_type[] = { + [ QXL_DRAW_NOP ] = "nop", + [ QXL_DRAW_FILL ] = "fill", + [ QXL_DRAW_OPAQUE ] = "opaque", + [ QXL_DRAW_COPY ] = "copy", + [ QXL_COPY_BITS ] = "copy-bits", + [ QXL_DRAW_BLEND ] = "blend", + [ QXL_DRAW_BLACKNESS ] = "blackness", + [ QXL_DRAW_WHITENESS ] = "whitemess", + [ QXL_DRAW_INVERS ] = "invers", + [ QXL_DRAW_ROP3 ] = "rop3", + [ QXL_DRAW_STROKE ] = "stroke", + [ QXL_DRAW_TEXT ] = "text", + [ QXL_DRAW_TRANSPARENT ] = "transparent", + [ QXL_DRAW_ALPHA_BLEND ] = "alpha-blend", +}; + +static const char *const qxl_draw_effect[] = { + [ QXL_EFFECT_BLEND ] = "blend", + [ QXL_EFFECT_OPAQUE ] = "opaque", + [ QXL_EFFECT_REVERT_ON_DUP ] = "revert-on-dup", + [ QXL_EFFECT_BLACKNESS_ON_DUP ] = "blackness-on-dup", + [ QXL_EFFECT_WHITENESS_ON_DUP ] = "whiteness-on-dup", + [ QXL_EFFECT_NOP_ON_DUP ] = "nop-on-dup", + [ QXL_EFFECT_NOP ] = "nop", + [ QXL_EFFECT_OPAQUE_BRUSH ] = "opaque-brush", +}; + +static const char *const qxl_surface_cmd[] = { + [ QXL_SURFACE_CMD_CREATE ] = "create", + [ QXL_SURFACE_CMD_DESTROY ] = "destroy", +}; + +static const char *const spice_surface_fmt[] = { + [ SPICE_SURFACE_FMT_INVALID ] = "invalid", + [ SPICE_SURFACE_FMT_1_A ] = "alpha/1", + [ SPICE_SURFACE_FMT_8_A ] = "alpha/8", + [ SPICE_SURFACE_FMT_16_555 ] = "555/16", + [ SPICE_SURFACE_FMT_16_565 ] = "565/16", + [ SPICE_SURFACE_FMT_32_xRGB ] = "xRGB/32", + [ SPICE_SURFACE_FMT_32_ARGB ] = "ARGB/32", +}; + +static const char *const qxl_cursor_cmd[] = { + [ QXL_CURSOR_SET ] = "set", + [ QXL_CURSOR_MOVE ] = "move", + [ QXL_CURSOR_HIDE ] = "hide", + [ QXL_CURSOR_TRAIL ] = "trail", +}; + +static const char *const spice_cursor_type[] = { + [ SPICE_CURSOR_TYPE_ALPHA ] = "alpha", + [ SPICE_CURSOR_TYPE_MONO ] = "mono", + [ SPICE_CURSOR_TYPE_COLOR4 ] = "color4", + [ SPICE_CURSOR_TYPE_COLOR8 ] = "color8", + [ SPICE_CURSOR_TYPE_COLOR16 ] = "color16", + [ SPICE_CURSOR_TYPE_COLOR24 ] = "color24", + [ SPICE_CURSOR_TYPE_COLOR32 ] = "color32", +}; + +static const char *qxl_v2n(const char *const n[], size_t l, int v) +{ + if (v >= l || !n[v]) { + return "???"; + } + return n[v]; +} +#define qxl_name(_list, _value) qxl_v2n(_list, ARRAY_SIZE(_list), _value) + +static int qxl_log_image(PCIQXLDevice *qxl, QXLPHYSICAL addr, int group_id) +{ + QXLImage *image; + QXLImageDescriptor *desc; + + image = qxl_phys2virt(qxl, addr, group_id); + if (!image) { + return 1; + } + desc = &image->descriptor; + fprintf(stderr, " (id %" PRIx64 " type %d flags %d width %d height %d", + desc->id, desc->type, desc->flags, desc->width, desc->height); + switch (desc->type) { + case SPICE_IMAGE_TYPE_BITMAP: + fprintf(stderr, ", fmt %d flags %d x %d y %d stride %d" + " palette %" PRIx64 " data %" PRIx64, + image->bitmap.format, image->bitmap.flags, + image->bitmap.x, image->bitmap.y, + image->bitmap.stride, + image->bitmap.palette, image->bitmap.data); + break; + } + fprintf(stderr, ")"); + return 0; +} + +static void qxl_log_rect(QXLRect *rect) +{ + fprintf(stderr, " %dx%d+%d+%d", + rect->right - rect->left, + rect->bottom - rect->top, + rect->left, rect->top); +} + +static int qxl_log_cmd_draw_copy(PCIQXLDevice *qxl, QXLCopy *copy, + int group_id) +{ + int ret; + + fprintf(stderr, " src %" PRIx64, + copy->src_bitmap); + ret = qxl_log_image(qxl, copy->src_bitmap, group_id); + if (ret != 0) { + return ret; + } + fprintf(stderr, " area"); + qxl_log_rect(©->src_area); + fprintf(stderr, " rop %d", copy->rop_descriptor); + return 0; +} + +static int qxl_log_cmd_draw(PCIQXLDevice *qxl, QXLDrawable *draw, int group_id) +{ + fprintf(stderr, ": surface_id %d type %s effect %s", + draw->surface_id, + qxl_name(qxl_draw_type, draw->type), + qxl_name(qxl_draw_effect, draw->effect)); + switch (draw->type) { + case QXL_DRAW_COPY: + return qxl_log_cmd_draw_copy(qxl, &draw->u.copy, group_id); + } + return 0; +} + +static int qxl_log_cmd_draw_compat(PCIQXLDevice *qxl, QXLCompatDrawable *draw, + int group_id) +{ + fprintf(stderr, ": type %s effect %s", + qxl_name(qxl_draw_type, draw->type), + qxl_name(qxl_draw_effect, draw->effect)); + if (draw->bitmap_offset) { + fprintf(stderr, ": bitmap %d", + draw->bitmap_offset); + qxl_log_rect(&draw->bitmap_area); + } + switch (draw->type) { + case QXL_DRAW_COPY: + return qxl_log_cmd_draw_copy(qxl, &draw->u.copy, group_id); + } + return 0; +} + +static void qxl_log_cmd_surface(PCIQXLDevice *qxl, QXLSurfaceCmd *cmd) +{ + fprintf(stderr, ": %s id %d", + qxl_name(qxl_surface_cmd, cmd->type), + cmd->surface_id); + if (cmd->type == QXL_SURFACE_CMD_CREATE) { + fprintf(stderr, " size %dx%d stride %d format %s (count %u, max %u)", + cmd->u.surface_create.width, + cmd->u.surface_create.height, + cmd->u.surface_create.stride, + qxl_name(spice_surface_fmt, cmd->u.surface_create.format), + qxl->guest_surfaces.count, qxl->guest_surfaces.max); + } + if (cmd->type == QXL_SURFACE_CMD_DESTROY) { + fprintf(stderr, " (count %u)", qxl->guest_surfaces.count); + } +} + +int qxl_log_cmd_cursor(PCIQXLDevice *qxl, QXLCursorCmd *cmd, int group_id) +{ + QXLCursor *cursor; + + fprintf(stderr, ": %s", + qxl_name(qxl_cursor_cmd, cmd->type)); + switch (cmd->type) { + case QXL_CURSOR_SET: + fprintf(stderr, " +%d+%d visible %s, shape @ 0x%" PRIx64, + cmd->u.set.position.x, + cmd->u.set.position.y, + cmd->u.set.visible ? "yes" : "no", + cmd->u.set.shape); + cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id); + if (!cursor) { + return 1; + } + fprintf(stderr, " type %s size %dx%d hot-spot +%d+%d" + " unique 0x%" PRIx64 " data-size %d", + qxl_name(spice_cursor_type, cursor->header.type), + cursor->header.width, cursor->header.height, + cursor->header.hot_spot_x, cursor->header.hot_spot_y, + cursor->header.unique, cursor->data_size); + break; + case QXL_CURSOR_MOVE: + fprintf(stderr, " +%d+%d", cmd->u.position.x, cmd->u.position.y); + break; + } + return 0; +} + +int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext) +{ + bool compat = ext->flags & QXL_COMMAND_FLAG_COMPAT; + void *data; + int ret; + + if (!qxl->cmdlog) { + return 0; + } + fprintf(stderr, "%" PRId64 " qxl-%d/%s:", qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + qxl->id, ring); + fprintf(stderr, " cmd @ 0x%" PRIx64 " %s%s", ext->cmd.data, + qxl_name(qxl_type, ext->cmd.type), + compat ? "(compat)" : ""); + + data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + if (!data) { + return 1; + } + switch (ext->cmd.type) { + case QXL_CMD_DRAW: + if (!compat) { + ret = qxl_log_cmd_draw(qxl, data, ext->group_id); + } else { + ret = qxl_log_cmd_draw_compat(qxl, data, ext->group_id); + } + if (ret) { + return ret; + } + break; + case QXL_CMD_SURFACE: + qxl_log_cmd_surface(qxl, data); + break; + case QXL_CMD_CURSOR: + qxl_log_cmd_cursor(qxl, data, ext->group_id); + break; + } + fprintf(stderr, "\n"); + return 0; +} diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c new file mode 100644 index 000000000..d28849b12 --- /dev/null +++ b/hw/display/qxl-render.c @@ -0,0 +1,337 @@ +/* + * qxl local rendering (aka display on sdl/vnc) + * + * Copyright (C) 2010 Red Hat, Inc. + * + * maintained by Gerd Hoffmann + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qxl.h" +#include "sysemu/runstate.h" +#include "trace.h" + +static void qxl_blit(PCIQXLDevice *qxl, QXLRect *rect) +{ + DisplaySurface *surface = qemu_console_surface(qxl->vga.con); + uint8_t *dst = surface_data(surface); + uint8_t *src; + int len, i; + + if (is_buffer_shared(surface)) { + return; + } + trace_qxl_render_blit(qxl->guest_primary.qxl_stride, + rect->left, rect->right, rect->top, rect->bottom); + src = qxl->guest_primary.data; + if (qxl->guest_primary.qxl_stride < 0) { + /* qxl surface is upside down, walk src scanlines + * in reverse order to flip it */ + src += (qxl->guest_primary.surface.height - rect->top - 1) * + qxl->guest_primary.abs_stride; + } else { + src += rect->top * qxl->guest_primary.abs_stride; + } + dst += rect->top * qxl->guest_primary.abs_stride; + src += rect->left * qxl->guest_primary.bytes_pp; + dst += rect->left * qxl->guest_primary.bytes_pp; + len = (rect->right - rect->left) * qxl->guest_primary.bytes_pp; + + for (i = rect->top; i < rect->bottom; i++) { + memcpy(dst, src, len); + dst += qxl->guest_primary.abs_stride; + src += qxl->guest_primary.qxl_stride; + } +} + +void qxl_render_resize(PCIQXLDevice *qxl) +{ + QXLSurfaceCreate *sc = &qxl->guest_primary.surface; + + qxl->guest_primary.qxl_stride = sc->stride; + qxl->guest_primary.abs_stride = abs(sc->stride); + qxl->guest_primary.resized++; + switch (sc->format) { + case SPICE_SURFACE_FMT_16_555: + qxl->guest_primary.bytes_pp = 2; + qxl->guest_primary.bits_pp = 15; + break; + case SPICE_SURFACE_FMT_16_565: + qxl->guest_primary.bytes_pp = 2; + qxl->guest_primary.bits_pp = 16; + break; + case SPICE_SURFACE_FMT_32_xRGB: + case SPICE_SURFACE_FMT_32_ARGB: + qxl->guest_primary.bytes_pp = 4; + qxl->guest_primary.bits_pp = 32; + break; + default: + fprintf(stderr, "%s: unhandled format: %x\n", __func__, + qxl->guest_primary.surface.format); + qxl->guest_primary.bytes_pp = 4; + qxl->guest_primary.bits_pp = 32; + break; + } +} + +static void qxl_set_rect_to_surface(PCIQXLDevice *qxl, QXLRect *area) +{ + area->left = 0; + area->right = qxl->guest_primary.surface.width; + area->top = 0; + area->bottom = qxl->guest_primary.surface.height; +} + +static void qxl_render_update_area_unlocked(PCIQXLDevice *qxl) +{ + VGACommonState *vga = &qxl->vga; + DisplaySurface *surface; + int width = qxl->guest_head0_width ?: qxl->guest_primary.surface.width; + int height = qxl->guest_head0_height ?: qxl->guest_primary.surface.height; + int i; + + if (qxl->guest_primary.resized) { + qxl->guest_primary.resized = 0; + qxl->guest_primary.data = qxl_phys2virt(qxl, + qxl->guest_primary.surface.mem, + MEMSLOT_GROUP_GUEST); + if (!qxl->guest_primary.data) { + goto end; + } + qxl_set_rect_to_surface(qxl, &qxl->dirty[0]); + qxl->num_dirty_rects = 1; + trace_qxl_render_guest_primary_resized( + width, + height, + qxl->guest_primary.qxl_stride, + qxl->guest_primary.bytes_pp, + qxl->guest_primary.bits_pp); + if (qxl->guest_primary.qxl_stride > 0) { + pixman_format_code_t format = + qemu_default_pixman_format(qxl->guest_primary.bits_pp, true); + surface = qemu_create_displaysurface_from + (width, + height, + format, + qxl->guest_primary.abs_stride, + qxl->guest_primary.data); + } else { + surface = qemu_create_displaysurface + (width, + height); + } + dpy_gfx_replace_surface(vga->con, surface); + } + + if (!qxl->guest_primary.data) { + goto end; + } + for (i = 0; i < qxl->num_dirty_rects; i++) { + if (qemu_spice_rect_is_empty(qxl->dirty+i)) { + break; + } + if (qxl->dirty[i].left < 0 || + qxl->dirty[i].top < 0 || + qxl->dirty[i].left > qxl->dirty[i].right || + qxl->dirty[i].top > qxl->dirty[i].bottom || + qxl->dirty[i].right > width || + qxl->dirty[i].bottom > height) { + continue; + } + qxl_blit(qxl, qxl->dirty+i); + dpy_gfx_update(vga->con, + qxl->dirty[i].left, qxl->dirty[i].top, + qxl->dirty[i].right - qxl->dirty[i].left, + qxl->dirty[i].bottom - qxl->dirty[i].top); + } + qxl->num_dirty_rects = 0; + +end: + if (qxl->render_update_cookie_num == 0) { + graphic_hw_update_done(qxl->ssd.dcl.con); + } +} + +/* + * use ssd.lock to protect render_update_cookie_num. + * qxl_render_update is called by io thread or vcpu thread, and the completion + * callbacks are called by spice_server thread, deferring to bh called from the + * io thread. + */ +void qxl_render_update(PCIQXLDevice *qxl) +{ + QXLCookie *cookie; + + qemu_mutex_lock(&qxl->ssd.lock); + + if (!runstate_is_running() || !qxl->guest_primary.commands || + qxl->mode == QXL_MODE_UNDEFINED) { + qxl_render_update_area_unlocked(qxl); + qemu_mutex_unlock(&qxl->ssd.lock); + graphic_hw_update_done(qxl->ssd.dcl.con); + return; + } + + qxl->guest_primary.commands = 0; + qxl->render_update_cookie_num++; + qemu_mutex_unlock(&qxl->ssd.lock); + cookie = qxl_cookie_new(QXL_COOKIE_TYPE_RENDER_UPDATE_AREA, + 0); + qxl_set_rect_to_surface(qxl, &cookie->u.render.area); + qxl_spice_update_area(qxl, 0, &cookie->u.render.area, NULL, + 0, 1 /* clear_dirty_region */, QXL_ASYNC, cookie); +} + +void qxl_render_update_area_bh(void *opaque) +{ + PCIQXLDevice *qxl = opaque; + + qemu_mutex_lock(&qxl->ssd.lock); + qxl_render_update_area_unlocked(qxl); + qemu_mutex_unlock(&qxl->ssd.lock); +} + +void qxl_render_update_area_done(PCIQXLDevice *qxl, QXLCookie *cookie) +{ + qemu_mutex_lock(&qxl->ssd.lock); + trace_qxl_render_update_area_done(cookie); + qemu_bh_schedule(qxl->update_area_bh); + qxl->render_update_cookie_num--; + qemu_mutex_unlock(&qxl->ssd.lock); + g_free(cookie); +} + +static void qxl_unpack_chunks(void *dest, size_t size, PCIQXLDevice *qxl, + QXLDataChunk *chunk, uint32_t group_id) +{ + uint32_t max_chunks = 32; + size_t offset = 0; + size_t bytes; + + for (;;) { + bytes = MIN(size - offset, chunk->data_size); + memcpy(dest + offset, chunk->data, bytes); + offset += bytes; + if (offset == size) { + return; + } + chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id); + if (!chunk) { + return; + } + max_chunks--; + if (max_chunks == 0) { + return; + } + } +} + +static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor, + uint32_t group_id) +{ + QEMUCursor *c; + uint8_t *and_mask, *xor_mask; + size_t size; + + c = cursor_alloc(cursor->header.width, cursor->header.height); + c->hot_x = cursor->header.hot_spot_x; + c->hot_y = cursor->header.hot_spot_y; + switch (cursor->header.type) { + case SPICE_CURSOR_TYPE_MONO: + /* Assume that the full cursor is available in a single chunk. */ + size = 2 * cursor_get_mono_bpl(c) * c->height; + if (size != cursor->data_size) { + fprintf(stderr, "%s: bad monochrome cursor %ux%u with size %u\n", + __func__, c->width, c->height, cursor->data_size); + goto fail; + } + and_mask = cursor->chunk.data; + xor_mask = and_mask + cursor_get_mono_bpl(c) * c->height; + cursor_set_mono(c, 0xffffff, 0x000000, xor_mask, 1, and_mask); + if (qxl->debug > 2) { + cursor_print_ascii_art(c, "qxl/mono"); + } + break; + case SPICE_CURSOR_TYPE_ALPHA: + size = sizeof(uint32_t) * cursor->header.width * cursor->header.height; + qxl_unpack_chunks(c->data, size, qxl, &cursor->chunk, group_id); + if (qxl->debug > 2) { + cursor_print_ascii_art(c, "qxl/alpha"); + } + break; + default: + fprintf(stderr, "%s: not implemented: type %d\n", + __func__, cursor->header.type); + goto fail; + } + return c; + +fail: + cursor_put(c); + return NULL; +} + + +/* called from spice server thread context only */ +int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) +{ + QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + QXLCursor *cursor; + QEMUCursor *c; + + if (!cmd) { + return 1; + } + + if (!dpy_cursor_define_supported(qxl->vga.con)) { + return 0; + } + + if (qxl->debug > 1 && cmd->type != QXL_CURSOR_MOVE) { + fprintf(stderr, "%s", __func__); + qxl_log_cmd_cursor(qxl, cmd, ext->group_id); + fprintf(stderr, "\n"); + } + switch (cmd->type) { + case QXL_CURSOR_SET: + cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id); + if (!cursor) { + return 1; + } + c = qxl_cursor(qxl, cursor, ext->group_id); + if (c == NULL) { + c = cursor_builtin_left_ptr(); + } + qemu_mutex_lock(&qxl->ssd.lock); + if (qxl->ssd.cursor) { + cursor_put(qxl->ssd.cursor); + } + qxl->ssd.cursor = c; + qxl->ssd.mouse_x = cmd->u.set.position.x; + qxl->ssd.mouse_y = cmd->u.set.position.y; + qemu_mutex_unlock(&qxl->ssd.lock); + qemu_bh_schedule(qxl->ssd.cursor_bh); + break; + case QXL_CURSOR_MOVE: + qemu_mutex_lock(&qxl->ssd.lock); + qxl->ssd.mouse_x = cmd->u.position.x; + qxl->ssd.mouse_y = cmd->u.position.y; + qemu_mutex_unlock(&qxl->ssd.lock); + qemu_bh_schedule(qxl->ssd.cursor_bh); + break; + } + return 0; +} diff --git a/hw/display/qxl.c b/hw/display/qxl.c new file mode 100644 index 000000000..29c80b428 --- /dev/null +++ b/hw/display/qxl.c @@ -0,0 +1,2524 @@ +/* + * Copyright (C) 2010 Red Hat, Inc. + * + * written by Yaniv Kamay, Izik Eidus, Gerd Hoffmann + * maintained by Gerd Hoffmann + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include + +#include "qapi/error.h" +#include "qemu/timer.h" +#include "qemu/queue.h" +#include "qemu/atomic.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "hw/qdev-properties.h" +#include "sysemu/runstate.h" +#include "migration/vmstate.h" +#include "trace.h" + +#include "qxl.h" + +#undef SPICE_RING_CONS_ITEM +#define SPICE_RING_CONS_ITEM(qxl, r, ret) { \ + uint32_t cons = (r)->cons & SPICE_RING_INDEX_MASK(r); \ + if (cons >= ARRAY_SIZE((r)->items)) { \ + qxl_set_guest_bug(qxl, "SPICE_RING_CONS_ITEM indices mismatch " \ + "%u >= %zu", cons, ARRAY_SIZE((r)->items)); \ + ret = NULL; \ + } else { \ + ret = &(r)->items[cons].el; \ + } \ + } + +#undef ALIGN +#define ALIGN(a, b) (((a) + ((b) - 1)) & ~((b) - 1)) + +#define PIXEL_SIZE 0.2936875 //1280x1024 is 14.8" x 11.9" + +#define QXL_MODE(_x, _y, _b, _o) \ + { .x_res = _x, \ + .y_res = _y, \ + .bits = _b, \ + .stride = (_x) * (_b) / 8, \ + .x_mili = PIXEL_SIZE * (_x), \ + .y_mili = PIXEL_SIZE * (_y), \ + .orientation = _o, \ + } + +#define QXL_MODE_16_32(x_res, y_res, orientation) \ + QXL_MODE(x_res, y_res, 16, orientation), \ + QXL_MODE(x_res, y_res, 32, orientation) + +#define QXL_MODE_EX(x_res, y_res) \ + QXL_MODE_16_32(x_res, y_res, 0), \ + QXL_MODE_16_32(x_res, y_res, 1) + +static QXLMode qxl_modes[] = { + QXL_MODE_EX(640, 480), + QXL_MODE_EX(800, 480), + QXL_MODE_EX(800, 600), + QXL_MODE_EX(832, 624), + QXL_MODE_EX(960, 640), + QXL_MODE_EX(1024, 600), + QXL_MODE_EX(1024, 768), + QXL_MODE_EX(1152, 864), + QXL_MODE_EX(1152, 870), + QXL_MODE_EX(1280, 720), + QXL_MODE_EX(1280, 760), + QXL_MODE_EX(1280, 768), + QXL_MODE_EX(1280, 800), + QXL_MODE_EX(1280, 960), + QXL_MODE_EX(1280, 1024), + QXL_MODE_EX(1360, 768), + QXL_MODE_EX(1366, 768), + QXL_MODE_EX(1400, 1050), + QXL_MODE_EX(1440, 900), + QXL_MODE_EX(1600, 900), + QXL_MODE_EX(1600, 1200), + QXL_MODE_EX(1680, 1050), + QXL_MODE_EX(1920, 1080), + /* these modes need more than 8 MB video memory */ + QXL_MODE_EX(1920, 1200), + QXL_MODE_EX(1920, 1440), + QXL_MODE_EX(2000, 2000), + QXL_MODE_EX(2048, 1536), + QXL_MODE_EX(2048, 2048), + QXL_MODE_EX(2560, 1440), + QXL_MODE_EX(2560, 1600), + /* these modes need more than 16 MB video memory */ + QXL_MODE_EX(2560, 2048), + QXL_MODE_EX(2800, 2100), + QXL_MODE_EX(3200, 2400), + /* these modes need more than 32 MB video memory */ + QXL_MODE_EX(3840, 2160), /* 4k mainstream */ + QXL_MODE_EX(4096, 2160), /* 4k */ + /* these modes need more than 64 MB video memory */ + QXL_MODE_EX(7680, 4320), /* 8k mainstream */ + /* these modes need more than 128 MB video memory */ + QXL_MODE_EX(8192, 4320), /* 8k */ +}; + +static void qxl_send_events(PCIQXLDevice *d, uint32_t events); +static int qxl_destroy_primary(PCIQXLDevice *d, qxl_async_io async); +static void qxl_reset_memslots(PCIQXLDevice *d); +static void qxl_reset_surfaces(PCIQXLDevice *d); +static void qxl_ring_set_dirty(PCIQXLDevice *qxl); + +static void qxl_hw_update(void *opaque); + +void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) +{ + trace_qxl_set_guest_bug(qxl->id); + qxl_send_events(qxl, QXL_INTERRUPT_ERROR); + qxl->guest_bug = 1; + if (qxl->guestdebug) { + va_list ap; + va_start(ap, msg); + fprintf(stderr, "qxl-%d: guest bug: ", qxl->id); + vfprintf(stderr, msg, ap); + fprintf(stderr, "\n"); + va_end(ap); + } +} + +static void qxl_clear_guest_bug(PCIQXLDevice *qxl) +{ + qxl->guest_bug = 0; +} + +void qxl_spice_update_area(PCIQXLDevice *qxl, uint32_t surface_id, + struct QXLRect *area, struct QXLRect *dirty_rects, + uint32_t num_dirty_rects, + uint32_t clear_dirty_region, + qxl_async_io async, struct QXLCookie *cookie) +{ + trace_qxl_spice_update_area(qxl->id, surface_id, area->left, area->right, + area->top, area->bottom); + trace_qxl_spice_update_area_rest(qxl->id, num_dirty_rects, + clear_dirty_region); + if (async == QXL_SYNC) { + spice_qxl_update_area(&qxl->ssd.qxl, surface_id, area, + dirty_rects, num_dirty_rects, clear_dirty_region); + } else { + assert(cookie != NULL); + spice_qxl_update_area_async(&qxl->ssd.qxl, surface_id, area, + clear_dirty_region, (uintptr_t)cookie); + } +} + +static void qxl_spice_destroy_surface_wait_complete(PCIQXLDevice *qxl, + uint32_t id) +{ + trace_qxl_spice_destroy_surface_wait_complete(qxl->id, id); + qemu_mutex_lock(&qxl->track_lock); + qxl->guest_surfaces.cmds[id] = 0; + qxl->guest_surfaces.count--; + qemu_mutex_unlock(&qxl->track_lock); +} + +static void qxl_spice_destroy_surface_wait(PCIQXLDevice *qxl, uint32_t id, + qxl_async_io async) +{ + QXLCookie *cookie; + + trace_qxl_spice_destroy_surface_wait(qxl->id, id, async); + if (async) { + cookie = qxl_cookie_new(QXL_COOKIE_TYPE_IO, + QXL_IO_DESTROY_SURFACE_ASYNC); + cookie->u.surface_id = id; + spice_qxl_destroy_surface_async(&qxl->ssd.qxl, id, (uintptr_t)cookie); + } else { + spice_qxl_destroy_surface_wait(&qxl->ssd.qxl, id); + qxl_spice_destroy_surface_wait_complete(qxl, id); + } +} + +static void qxl_spice_flush_surfaces_async(PCIQXLDevice *qxl) +{ + trace_qxl_spice_flush_surfaces_async(qxl->id, qxl->guest_surfaces.count, + qxl->num_free_res); + spice_qxl_flush_surfaces_async(&qxl->ssd.qxl, + (uintptr_t)qxl_cookie_new(QXL_COOKIE_TYPE_IO, + QXL_IO_FLUSH_SURFACES_ASYNC)); +} + +void qxl_spice_loadvm_commands(PCIQXLDevice *qxl, struct QXLCommandExt *ext, + uint32_t count) +{ + trace_qxl_spice_loadvm_commands(qxl->id, ext, count); + spice_qxl_loadvm_commands(&qxl->ssd.qxl, ext, count); +} + +void qxl_spice_oom(PCIQXLDevice *qxl) +{ + trace_qxl_spice_oom(qxl->id); + spice_qxl_oom(&qxl->ssd.qxl); +} + +void qxl_spice_reset_memslots(PCIQXLDevice *qxl) +{ + trace_qxl_spice_reset_memslots(qxl->id); + spice_qxl_reset_memslots(&qxl->ssd.qxl); +} + +static void qxl_spice_destroy_surfaces_complete(PCIQXLDevice *qxl) +{ + trace_qxl_spice_destroy_surfaces_complete(qxl->id); + qemu_mutex_lock(&qxl->track_lock); + memset(qxl->guest_surfaces.cmds, 0, + sizeof(qxl->guest_surfaces.cmds[0]) * qxl->ssd.num_surfaces); + qxl->guest_surfaces.count = 0; + qemu_mutex_unlock(&qxl->track_lock); +} + +static void qxl_spice_destroy_surfaces(PCIQXLDevice *qxl, qxl_async_io async) +{ + trace_qxl_spice_destroy_surfaces(qxl->id, async); + if (async) { + spice_qxl_destroy_surfaces_async(&qxl->ssd.qxl, + (uintptr_t)qxl_cookie_new(QXL_COOKIE_TYPE_IO, + QXL_IO_DESTROY_ALL_SURFACES_ASYNC)); + } else { + spice_qxl_destroy_surfaces(&qxl->ssd.qxl); + qxl_spice_destroy_surfaces_complete(qxl); + } +} + +static void qxl_spice_monitors_config_async(PCIQXLDevice *qxl, int replay) +{ + QXLMonitorsConfig *cfg; + + trace_qxl_spice_monitors_config(qxl->id); + if (replay) { + /* + * don't use QXL_COOKIE_TYPE_IO: + * - we are not running yet (post_load), we will assert + * in send_events + * - this is not a guest io, but a reply, so async_io isn't set. + */ + spice_qxl_monitors_config_async(&qxl->ssd.qxl, + qxl->guest_monitors_config, + MEMSLOT_GROUP_GUEST, + (uintptr_t)qxl_cookie_new( + QXL_COOKIE_TYPE_POST_LOAD_MONITORS_CONFIG, + 0)); + } else { +/* >= release 0.12.6, < release 0.14.2 */ +#if SPICE_SERVER_VERSION >= 0x000c06 && SPICE_SERVER_VERSION < 0x000e02 + if (qxl->max_outputs) { + spice_qxl_set_max_monitors(&qxl->ssd.qxl, qxl->max_outputs); + } +#endif + qxl->guest_monitors_config = qxl->ram->monitors_config; + spice_qxl_monitors_config_async(&qxl->ssd.qxl, + qxl->ram->monitors_config, + MEMSLOT_GROUP_GUEST, + (uintptr_t)qxl_cookie_new(QXL_COOKIE_TYPE_IO, + QXL_IO_MONITORS_CONFIG_ASYNC)); + } + + cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST); + if (cfg != NULL && cfg->count == 1) { + qxl->guest_primary.resized = 1; + qxl->guest_head0_width = cfg->heads[0].width; + qxl->guest_head0_height = cfg->heads[0].height; + } else { + qxl->guest_head0_width = 0; + qxl->guest_head0_height = 0; + } +} + +void qxl_spice_reset_image_cache(PCIQXLDevice *qxl) +{ + trace_qxl_spice_reset_image_cache(qxl->id); + spice_qxl_reset_image_cache(&qxl->ssd.qxl); +} + +void qxl_spice_reset_cursor(PCIQXLDevice *qxl) +{ + trace_qxl_spice_reset_cursor(qxl->id); + spice_qxl_reset_cursor(&qxl->ssd.qxl); + qemu_mutex_lock(&qxl->track_lock); + qxl->guest_cursor = 0; + qemu_mutex_unlock(&qxl->track_lock); + if (qxl->ssd.cursor) { + cursor_put(qxl->ssd.cursor); + } + qxl->ssd.cursor = cursor_builtin_hidden(); +} + +static uint32_t qxl_crc32(const uint8_t *p, unsigned len) +{ + /* + * zlib xors the seed with 0xffffffff, and xors the result + * again with 0xffffffff; Both are not done with linux's crc32, + * which we want to be compatible with, so undo that. + */ + return crc32(0xffffffff, p, len) ^ 0xffffffff; +} + +static ram_addr_t qxl_rom_size(void) +{ +#define QXL_REQUIRED_SZ (sizeof(QXLRom) + sizeof(QXLModes) + sizeof(qxl_modes)) +#define QXL_ROM_SZ 8192 + + QEMU_BUILD_BUG_ON(QXL_REQUIRED_SZ > QXL_ROM_SZ); + return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size); +} + +static void init_qxl_rom(PCIQXLDevice *d) +{ + QXLRom *rom = memory_region_get_ram_ptr(&d->rom_bar); + QXLModes *modes = (QXLModes *)(rom + 1); + uint32_t ram_header_size; + uint32_t surface0_area_size; + uint32_t num_pages; + uint32_t fb; + int i, n; + + memset(rom, 0, d->rom_size); + + rom->magic = cpu_to_le32(QXL_ROM_MAGIC); + rom->id = cpu_to_le32(d->id); + rom->log_level = cpu_to_le32(d->guestdebug); + rom->modes_offset = cpu_to_le32(sizeof(QXLRom)); + + rom->slot_gen_bits = MEMSLOT_GENERATION_BITS; + rom->slot_id_bits = MEMSLOT_SLOT_BITS; + rom->slots_start = 1; + rom->slots_end = NUM_MEMSLOTS - 1; + rom->n_surfaces = cpu_to_le32(d->ssd.num_surfaces); + + for (i = 0, n = 0; i < ARRAY_SIZE(qxl_modes); i++) { + fb = qxl_modes[i].y_res * qxl_modes[i].stride; + if (fb > d->vgamem_size) { + continue; + } + modes->modes[n].id = cpu_to_le32(i); + modes->modes[n].x_res = cpu_to_le32(qxl_modes[i].x_res); + modes->modes[n].y_res = cpu_to_le32(qxl_modes[i].y_res); + modes->modes[n].bits = cpu_to_le32(qxl_modes[i].bits); + modes->modes[n].stride = cpu_to_le32(qxl_modes[i].stride); + modes->modes[n].x_mili = cpu_to_le32(qxl_modes[i].x_mili); + modes->modes[n].y_mili = cpu_to_le32(qxl_modes[i].y_mili); + modes->modes[n].orientation = cpu_to_le32(qxl_modes[i].orientation); + n++; + } + modes->n_modes = cpu_to_le32(n); + + ram_header_size = ALIGN(sizeof(QXLRam), 4096); + surface0_area_size = ALIGN(d->vgamem_size, 4096); + num_pages = d->vga.vram_size; + num_pages -= ram_header_size; + num_pages -= surface0_area_size; + num_pages = num_pages / QXL_PAGE_SIZE; + + assert(ram_header_size + surface0_area_size <= d->vga.vram_size); + + rom->draw_area_offset = cpu_to_le32(0); + rom->surface0_area_size = cpu_to_le32(surface0_area_size); + rom->pages_offset = cpu_to_le32(surface0_area_size); + rom->num_pages = cpu_to_le32(num_pages); + rom->ram_header_offset = cpu_to_le32(d->vga.vram_size - ram_header_size); + + if (d->xres && d->yres) { + /* needs linux kernel 4.12+ to work */ + rom->client_monitors_config.count = 1; + rom->client_monitors_config.heads[0].left = 0; + rom->client_monitors_config.heads[0].top = 0; + rom->client_monitors_config.heads[0].right = cpu_to_le32(d->xres); + rom->client_monitors_config.heads[0].bottom = cpu_to_le32(d->yres); + rom->client_monitors_config_crc = qxl_crc32( + (const uint8_t *)&rom->client_monitors_config, + sizeof(rom->client_monitors_config)); + } + + d->shadow_rom = *rom; + d->rom = rom; + d->modes = modes; +} + +static void init_qxl_ram(PCIQXLDevice *d) +{ + uint8_t *buf; + uint32_t prod; + QXLReleaseRing *ring; + + buf = d->vga.vram_ptr; + d->ram = (QXLRam *)(buf + le32_to_cpu(d->shadow_rom.ram_header_offset)); + d->ram->magic = cpu_to_le32(QXL_RAM_MAGIC); + d->ram->int_pending = cpu_to_le32(0); + d->ram->int_mask = cpu_to_le32(0); + d->ram->update_surface = 0; + d->ram->monitors_config = 0; + SPICE_RING_INIT(&d->ram->cmd_ring); + SPICE_RING_INIT(&d->ram->cursor_ring); + SPICE_RING_INIT(&d->ram->release_ring); + + ring = &d->ram->release_ring; + prod = ring->prod & SPICE_RING_INDEX_MASK(ring); + assert(prod < ARRAY_SIZE(ring->items)); + ring->items[prod].el = 0; + + qxl_ring_set_dirty(d); +} + +/* can be called from spice server thread context */ +static void qxl_set_dirty(MemoryRegion *mr, ram_addr_t addr, ram_addr_t end) +{ + memory_region_set_dirty(mr, addr, end - addr); +} + +static void qxl_rom_set_dirty(PCIQXLDevice *qxl) +{ + qxl_set_dirty(&qxl->rom_bar, 0, qxl->rom_size); +} + +/* called from spice server thread context only */ +static void qxl_ram_set_dirty(PCIQXLDevice *qxl, void *ptr) +{ + void *base = qxl->vga.vram_ptr; + intptr_t offset; + + offset = ptr - base; + assert(offset < qxl->vga.vram_size); + qxl_set_dirty(&qxl->vga.vram, offset, offset + 3); +} + +/* can be called from spice server thread context */ +static void qxl_ring_set_dirty(PCIQXLDevice *qxl) +{ + ram_addr_t addr = qxl->shadow_rom.ram_header_offset; + ram_addr_t end = qxl->vga.vram_size; + qxl_set_dirty(&qxl->vga.vram, addr, end); +} + +/* + * keep track of some command state, for savevm/loadvm. + * called from spice server thread context only + */ +static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext) +{ + switch (le32_to_cpu(ext->cmd.type)) { + case QXL_CMD_SURFACE: + { + QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + + if (!cmd) { + return 1; + } + uint32_t id = le32_to_cpu(cmd->surface_id); + + if (id >= qxl->ssd.num_surfaces) { + qxl_set_guest_bug(qxl, "QXL_CMD_SURFACE id %d >= %d", id, + qxl->ssd.num_surfaces); + return 1; + } + if (cmd->type == QXL_SURFACE_CMD_CREATE && + (cmd->u.surface_create.stride & 0x03) != 0) { + qxl_set_guest_bug(qxl, "QXL_CMD_SURFACE stride = %d %% 4 != 0\n", + cmd->u.surface_create.stride); + return 1; + } + WITH_QEMU_LOCK_GUARD(&qxl->track_lock) { + if (cmd->type == QXL_SURFACE_CMD_CREATE) { + qxl->guest_surfaces.cmds[id] = ext->cmd.data; + qxl->guest_surfaces.count++; + if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) { + qxl->guest_surfaces.max = qxl->guest_surfaces.count; + } + } + if (cmd->type == QXL_SURFACE_CMD_DESTROY) { + qxl->guest_surfaces.cmds[id] = 0; + qxl->guest_surfaces.count--; + } + } + break; + } + case QXL_CMD_CURSOR: + { + QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + + if (!cmd) { + return 1; + } + if (cmd->type == QXL_CURSOR_SET) { + qemu_mutex_lock(&qxl->track_lock); + qxl->guest_cursor = ext->cmd.data; + qemu_mutex_unlock(&qxl->track_lock); + } + if (cmd->type == QXL_CURSOR_HIDE) { + qemu_mutex_lock(&qxl->track_lock); + qxl->guest_cursor = 0; + qemu_mutex_unlock(&qxl->track_lock); + } + break; + } + } + return 0; +} + +/* spice display interface callbacks */ + +static void interface_attach_worker(QXLInstance *sin, QXLWorker *qxl_worker) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + + trace_qxl_interface_attach_worker(qxl->id); +} + +static void interface_set_compression_level(QXLInstance *sin, int level) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + + trace_qxl_interface_set_compression_level(qxl->id, level); + qxl->shadow_rom.compression_level = cpu_to_le32(level); + qxl->rom->compression_level = cpu_to_le32(level); + qxl_rom_set_dirty(qxl); +} + +#if SPICE_NEEDS_SET_MM_TIME +static void interface_set_mm_time(QXLInstance *sin, uint32_t mm_time) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + + if (!qemu_spice_display_is_running(&qxl->ssd)) { + return; + } + + trace_qxl_interface_set_mm_time(qxl->id, mm_time); + qxl->shadow_rom.mm_clock = cpu_to_le32(mm_time); + qxl->rom->mm_clock = cpu_to_le32(mm_time); + qxl_rom_set_dirty(qxl); +} +#endif + +static void interface_get_init_info(QXLInstance *sin, QXLDevInitInfo *info) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + + trace_qxl_interface_get_init_info(qxl->id); + info->memslot_gen_bits = MEMSLOT_GENERATION_BITS; + info->memslot_id_bits = MEMSLOT_SLOT_BITS; + info->num_memslots = NUM_MEMSLOTS; + info->num_memslots_groups = NUM_MEMSLOTS_GROUPS; + info->internal_groupslot_id = 0; + info->qxl_ram_size = + le32_to_cpu(qxl->shadow_rom.num_pages) << QXL_PAGE_BITS; + info->n_surfaces = qxl->ssd.num_surfaces; +} + +static const char *qxl_mode_to_string(int mode) +{ + switch (mode) { + case QXL_MODE_COMPAT: + return "compat"; + case QXL_MODE_NATIVE: + return "native"; + case QXL_MODE_UNDEFINED: + return "undefined"; + case QXL_MODE_VGA: + return "vga"; + } + return "INVALID"; +} + +static const char *io_port_to_string(uint32_t io_port) +{ + if (io_port >= QXL_IO_RANGE_SIZE) { + return "out of range"; + } + static const char *io_port_to_string[QXL_IO_RANGE_SIZE + 1] = { + [QXL_IO_NOTIFY_CMD] = "QXL_IO_NOTIFY_CMD", + [QXL_IO_NOTIFY_CURSOR] = "QXL_IO_NOTIFY_CURSOR", + [QXL_IO_UPDATE_AREA] = "QXL_IO_UPDATE_AREA", + [QXL_IO_UPDATE_IRQ] = "QXL_IO_UPDATE_IRQ", + [QXL_IO_NOTIFY_OOM] = "QXL_IO_NOTIFY_OOM", + [QXL_IO_RESET] = "QXL_IO_RESET", + [QXL_IO_SET_MODE] = "QXL_IO_SET_MODE", + [QXL_IO_LOG] = "QXL_IO_LOG", + [QXL_IO_MEMSLOT_ADD] = "QXL_IO_MEMSLOT_ADD", + [QXL_IO_MEMSLOT_DEL] = "QXL_IO_MEMSLOT_DEL", + [QXL_IO_DETACH_PRIMARY] = "QXL_IO_DETACH_PRIMARY", + [QXL_IO_ATTACH_PRIMARY] = "QXL_IO_ATTACH_PRIMARY", + [QXL_IO_CREATE_PRIMARY] = "QXL_IO_CREATE_PRIMARY", + [QXL_IO_DESTROY_PRIMARY] = "QXL_IO_DESTROY_PRIMARY", + [QXL_IO_DESTROY_SURFACE_WAIT] = "QXL_IO_DESTROY_SURFACE_WAIT", + [QXL_IO_DESTROY_ALL_SURFACES] = "QXL_IO_DESTROY_ALL_SURFACES", + [QXL_IO_UPDATE_AREA_ASYNC] = "QXL_IO_UPDATE_AREA_ASYNC", + [QXL_IO_MEMSLOT_ADD_ASYNC] = "QXL_IO_MEMSLOT_ADD_ASYNC", + [QXL_IO_CREATE_PRIMARY_ASYNC] = "QXL_IO_CREATE_PRIMARY_ASYNC", + [QXL_IO_DESTROY_PRIMARY_ASYNC] = "QXL_IO_DESTROY_PRIMARY_ASYNC", + [QXL_IO_DESTROY_SURFACE_ASYNC] = "QXL_IO_DESTROY_SURFACE_ASYNC", + [QXL_IO_DESTROY_ALL_SURFACES_ASYNC] + = "QXL_IO_DESTROY_ALL_SURFACES_ASYNC", + [QXL_IO_FLUSH_SURFACES_ASYNC] = "QXL_IO_FLUSH_SURFACES_ASYNC", + [QXL_IO_FLUSH_RELEASE] = "QXL_IO_FLUSH_RELEASE", + [QXL_IO_MONITORS_CONFIG_ASYNC] = "QXL_IO_MONITORS_CONFIG_ASYNC", + }; + return io_port_to_string[io_port]; +} + +/* called from spice server thread context only */ +static int interface_get_command(QXLInstance *sin, struct QXLCommandExt *ext) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + SimpleSpiceUpdate *update; + QXLCommandRing *ring; + QXLCommand *cmd; + int notify, ret; + + trace_qxl_ring_command_check(qxl->id, qxl_mode_to_string(qxl->mode)); + + switch (qxl->mode) { + case QXL_MODE_VGA: + ret = false; + qemu_mutex_lock(&qxl->ssd.lock); + update = QTAILQ_FIRST(&qxl->ssd.updates); + if (update != NULL) { + QTAILQ_REMOVE(&qxl->ssd.updates, update, next); + *ext = update->ext; + ret = true; + } + qemu_mutex_unlock(&qxl->ssd.lock); + if (ret) { + trace_qxl_ring_command_get(qxl->id, qxl_mode_to_string(qxl->mode)); + qxl_log_command(qxl, "vga", ext); + } + return ret; + case QXL_MODE_COMPAT: + case QXL_MODE_NATIVE: + case QXL_MODE_UNDEFINED: + ring = &qxl->ram->cmd_ring; + if (qxl->guest_bug || SPICE_RING_IS_EMPTY(ring)) { + return false; + } + SPICE_RING_CONS_ITEM(qxl, ring, cmd); + if (!cmd) { + return false; + } + ext->cmd = *cmd; + ext->group_id = MEMSLOT_GROUP_GUEST; + ext->flags = qxl->cmdflags; + SPICE_RING_POP(ring, notify); + qxl_ring_set_dirty(qxl); + if (notify) { + qxl_send_events(qxl, QXL_INTERRUPT_DISPLAY); + } + qxl->guest_primary.commands++; + qxl_track_command(qxl, ext); + qxl_log_command(qxl, "cmd", ext); + trace_qxl_ring_command_get(qxl->id, qxl_mode_to_string(qxl->mode)); + return true; + default: + return false; + } +} + +/* called from spice server thread context only */ +static int interface_req_cmd_notification(QXLInstance *sin) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + int wait = 1; + + trace_qxl_ring_command_req_notification(qxl->id); + switch (qxl->mode) { + case QXL_MODE_COMPAT: + case QXL_MODE_NATIVE: + case QXL_MODE_UNDEFINED: + SPICE_RING_CONS_WAIT(&qxl->ram->cmd_ring, wait); + qxl_ring_set_dirty(qxl); + break; + default: + /* nothing */ + break; + } + return wait; +} + +/* called from spice server thread context only */ +static inline void qxl_push_free_res(PCIQXLDevice *d, int flush) +{ + QXLReleaseRing *ring = &d->ram->release_ring; + uint32_t prod; + int notify; + +#define QXL_FREE_BUNCH_SIZE 32 + + if (ring->prod - ring->cons + 1 == ring->num_items) { + /* ring full -- can't push */ + return; + } + if (!flush && d->oom_running) { + /* collect everything from oom handler before pushing */ + return; + } + if (!flush && d->num_free_res < QXL_FREE_BUNCH_SIZE) { + /* collect a bit more before pushing */ + return; + } + + SPICE_RING_PUSH(ring, notify); + trace_qxl_ring_res_push(d->id, qxl_mode_to_string(d->mode), + d->guest_surfaces.count, d->num_free_res, + d->last_release, notify ? "yes" : "no"); + trace_qxl_ring_res_push_rest(d->id, ring->prod - ring->cons, + ring->num_items, ring->prod, ring->cons); + if (notify) { + qxl_send_events(d, QXL_INTERRUPT_DISPLAY); + } + + ring = &d->ram->release_ring; + prod = ring->prod & SPICE_RING_INDEX_MASK(ring); + if (prod >= ARRAY_SIZE(ring->items)) { + qxl_set_guest_bug(d, "SPICE_RING_PROD_ITEM indices mismatch " + "%u >= %zu", prod, ARRAY_SIZE(ring->items)); + return; + } + ring->items[prod].el = 0; + d->num_free_res = 0; + d->last_release = NULL; + qxl_ring_set_dirty(d); +} + +/* called from spice server thread context only */ +static void interface_release_resource(QXLInstance *sin, + QXLReleaseInfoExt ext) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + QXLReleaseRing *ring; + uint32_t prod; + uint64_t id; + + if (!ext.info) { + return; + } + if (ext.group_id == MEMSLOT_GROUP_HOST) { + /* host group -> vga mode update request */ + QXLCommandExt *cmdext = (void *)(intptr_t)(ext.info->id); + SimpleSpiceUpdate *update; + g_assert(cmdext->cmd.type == QXL_CMD_DRAW); + update = container_of(cmdext, SimpleSpiceUpdate, ext); + qemu_spice_destroy_update(&qxl->ssd, update); + return; + } + + /* + * ext->info points into guest-visible memory + * pci bar 0, $command.release_info + */ + ring = &qxl->ram->release_ring; + prod = ring->prod & SPICE_RING_INDEX_MASK(ring); + if (prod >= ARRAY_SIZE(ring->items)) { + qxl_set_guest_bug(qxl, "SPICE_RING_PROD_ITEM indices mismatch " + "%u >= %zu", prod, ARRAY_SIZE(ring->items)); + return; + } + if (ring->items[prod].el == 0) { + /* stick head into the ring */ + id = ext.info->id; + ext.info->next = 0; + qxl_ram_set_dirty(qxl, &ext.info->next); + ring->items[prod].el = id; + qxl_ring_set_dirty(qxl); + } else { + /* append item to the list */ + qxl->last_release->next = ext.info->id; + qxl_ram_set_dirty(qxl, &qxl->last_release->next); + ext.info->next = 0; + qxl_ram_set_dirty(qxl, &ext.info->next); + } + qxl->last_release = ext.info; + qxl->num_free_res++; + trace_qxl_ring_res_put(qxl->id, qxl->num_free_res); + qxl_push_free_res(qxl, 0); +} + +/* called from spice server thread context only */ +static int interface_get_cursor_command(QXLInstance *sin, struct QXLCommandExt *ext) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + QXLCursorRing *ring; + QXLCommand *cmd; + int notify; + + trace_qxl_ring_cursor_check(qxl->id, qxl_mode_to_string(qxl->mode)); + + switch (qxl->mode) { + case QXL_MODE_COMPAT: + case QXL_MODE_NATIVE: + case QXL_MODE_UNDEFINED: + ring = &qxl->ram->cursor_ring; + if (SPICE_RING_IS_EMPTY(ring)) { + return false; + } + SPICE_RING_CONS_ITEM(qxl, ring, cmd); + if (!cmd) { + return false; + } + ext->cmd = *cmd; + ext->group_id = MEMSLOT_GROUP_GUEST; + ext->flags = qxl->cmdflags; + SPICE_RING_POP(ring, notify); + qxl_ring_set_dirty(qxl); + if (notify) { + qxl_send_events(qxl, QXL_INTERRUPT_CURSOR); + } + qxl->guest_primary.commands++; + qxl_track_command(qxl, ext); + qxl_log_command(qxl, "csr", ext); + if (qxl->have_vga) { + qxl_render_cursor(qxl, ext); + } + trace_qxl_ring_cursor_get(qxl->id, qxl_mode_to_string(qxl->mode)); + return true; + default: + return false; + } +} + +/* called from spice server thread context only */ +static int interface_req_cursor_notification(QXLInstance *sin) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + int wait = 1; + + trace_qxl_ring_cursor_req_notification(qxl->id); + switch (qxl->mode) { + case QXL_MODE_COMPAT: + case QXL_MODE_NATIVE: + case QXL_MODE_UNDEFINED: + SPICE_RING_CONS_WAIT(&qxl->ram->cursor_ring, wait); + qxl_ring_set_dirty(qxl); + break; + default: + /* nothing */ + break; + } + return wait; +} + +/* called from spice server thread context */ +static void interface_notify_update(QXLInstance *sin, uint32_t update_id) +{ + /* + * Called by spice-server as a result of a QXL_CMD_UPDATE which is not in + * use by xf86-video-qxl and is defined out in the qxl windows driver. + * Probably was at some earlier version that is prior to git start (2009), + * and is still guest trigerrable. + */ + fprintf(stderr, "%s: deprecated\n", __func__); +} + +/* called from spice server thread context only */ +static int interface_flush_resources(QXLInstance *sin) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + int ret; + + ret = qxl->num_free_res; + if (ret) { + qxl_push_free_res(qxl, 1); + } + return ret; +} + +static void qxl_create_guest_primary_complete(PCIQXLDevice *d); + +/* called from spice server thread context only */ +static void interface_async_complete_io(PCIQXLDevice *qxl, QXLCookie *cookie) +{ + uint32_t current_async; + + qemu_mutex_lock(&qxl->async_lock); + current_async = qxl->current_async; + qxl->current_async = QXL_UNDEFINED_IO; + qemu_mutex_unlock(&qxl->async_lock); + + trace_qxl_interface_async_complete_io(qxl->id, current_async, cookie); + if (!cookie) { + fprintf(stderr, "qxl: %s: error, cookie is NULL\n", __func__); + return; + } + if (cookie && current_async != cookie->io) { + fprintf(stderr, + "qxl: %s: error: current_async = %d != %" + PRId64 " = cookie->io\n", __func__, current_async, cookie->io); + } + switch (current_async) { + case QXL_IO_MEMSLOT_ADD_ASYNC: + case QXL_IO_DESTROY_PRIMARY_ASYNC: + case QXL_IO_UPDATE_AREA_ASYNC: + case QXL_IO_FLUSH_SURFACES_ASYNC: + case QXL_IO_MONITORS_CONFIG_ASYNC: + break; + case QXL_IO_CREATE_PRIMARY_ASYNC: + qxl_create_guest_primary_complete(qxl); + break; + case QXL_IO_DESTROY_ALL_SURFACES_ASYNC: + qxl_spice_destroy_surfaces_complete(qxl); + break; + case QXL_IO_DESTROY_SURFACE_ASYNC: + qxl_spice_destroy_surface_wait_complete(qxl, cookie->u.surface_id); + break; + default: + fprintf(stderr, "qxl: %s: unexpected current_async %u\n", __func__, + current_async); + } + qxl_send_events(qxl, QXL_INTERRUPT_IO_CMD); +} + +/* called from spice server thread context only */ +static void interface_update_area_complete(QXLInstance *sin, + uint32_t surface_id, + QXLRect *dirty, uint32_t num_updated_rects) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + int i; + int qxl_i; + + QEMU_LOCK_GUARD(&qxl->ssd.lock); + if (surface_id != 0 || !num_updated_rects || + !qxl->render_update_cookie_num) { + return; + } + trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left, + dirty->right, dirty->top, dirty->bottom); + trace_qxl_interface_update_area_complete_rest(qxl->id, num_updated_rects); + if (qxl->num_dirty_rects + num_updated_rects > QXL_NUM_DIRTY_RECTS) { + /* + * overflow - treat this as a full update. Not expected to be common. + */ + trace_qxl_interface_update_area_complete_overflow(qxl->id, + QXL_NUM_DIRTY_RECTS); + qxl->guest_primary.resized = 1; + } + if (qxl->guest_primary.resized) { + /* + * Don't bother copying or scheduling the bh since we will flip + * the whole area anyway on completion of the update_area async call + */ + return; + } + qxl_i = qxl->num_dirty_rects; + for (i = 0; i < num_updated_rects; i++) { + qxl->dirty[qxl_i++] = dirty[i]; + } + qxl->num_dirty_rects += num_updated_rects; + trace_qxl_interface_update_area_complete_schedule_bh(qxl->id, + qxl->num_dirty_rects); + qemu_bh_schedule(qxl->update_area_bh); +} + +/* called from spice server thread context only */ +static void interface_async_complete(QXLInstance *sin, uint64_t cookie_token) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + QXLCookie *cookie = (QXLCookie *)(uintptr_t)cookie_token; + + switch (cookie->type) { + case QXL_COOKIE_TYPE_IO: + interface_async_complete_io(qxl, cookie); + g_free(cookie); + break; + case QXL_COOKIE_TYPE_RENDER_UPDATE_AREA: + qxl_render_update_area_done(qxl, cookie); + break; + case QXL_COOKIE_TYPE_POST_LOAD_MONITORS_CONFIG: + break; + default: + fprintf(stderr, "qxl: %s: unexpected cookie type %d\n", + __func__, cookie->type); + g_free(cookie); + } +} + +/* called from spice server thread context only */ +static void interface_set_client_capabilities(QXLInstance *sin, + uint8_t client_present, + uint8_t caps[58]) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + + if (qxl->revision < 4) { + trace_qxl_set_client_capabilities_unsupported_by_revision(qxl->id, + qxl->revision); + return; + } + + if (runstate_check(RUN_STATE_INMIGRATE) || + runstate_check(RUN_STATE_POSTMIGRATE)) { + return; + } + + qxl->shadow_rom.client_present = client_present; + memcpy(qxl->shadow_rom.client_capabilities, caps, + sizeof(qxl->shadow_rom.client_capabilities)); + qxl->rom->client_present = client_present; + memcpy(qxl->rom->client_capabilities, caps, + sizeof(qxl->rom->client_capabilities)); + qxl_rom_set_dirty(qxl); + + qxl_send_events(qxl, QXL_INTERRUPT_CLIENT); +} + +static bool qxl_rom_monitors_config_changed(QXLRom *rom, + VDAgentMonitorsConfig *monitors_config, + unsigned int max_outputs) +{ + int i; + unsigned int monitors_count; + + monitors_count = MIN(monitors_config->num_of_monitors, max_outputs); + + if (rom->client_monitors_config.count != monitors_count) { + return true; + } + + for (i = 0 ; i < rom->client_monitors_config.count ; ++i) { + VDAgentMonConfig *monitor = &monitors_config->monitors[i]; + QXLURect *rect = &rom->client_monitors_config.heads[i]; + /* monitor->depth ignored */ + if ((rect->left != monitor->x) || + (rect->top != monitor->y) || + (rect->right != monitor->x + monitor->width) || + (rect->bottom != monitor->y + monitor->height)) { + return true; + } + } + + return false; +} + +/* called from main context only */ +static int interface_client_monitors_config(QXLInstance *sin, + VDAgentMonitorsConfig *monitors_config) +{ + PCIQXLDevice *qxl = container_of(sin, PCIQXLDevice, ssd.qxl); + QXLRom *rom = memory_region_get_ram_ptr(&qxl->rom_bar); + int i; + unsigned max_outputs = ARRAY_SIZE(rom->client_monitors_config.heads); + bool config_changed = false; + + if (qxl->revision < 4) { + trace_qxl_client_monitors_config_unsupported_by_device(qxl->id, + qxl->revision); + return 0; + } + /* + * Older windows drivers set int_mask to 0 when their ISR is called, + * then later set it to ~0. So it doesn't relate to the actual interrupts + * handled. However, they are old, so clearly they don't support this + * interrupt + */ + if (qxl->ram->int_mask == 0 || qxl->ram->int_mask == ~0 || + !(qxl->ram->int_mask & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)) { + trace_qxl_client_monitors_config_unsupported_by_guest(qxl->id, + qxl->ram->int_mask, + monitors_config); + return 0; + } + if (!monitors_config) { + return 1; + } + +#if SPICE_SERVER_VERSION >= 0x000c06 /* release 0.12.6 */ + /* limit number of outputs based on setting limit */ + if (qxl->max_outputs && qxl->max_outputs <= max_outputs) { + max_outputs = qxl->max_outputs; + } +#endif + + config_changed = qxl_rom_monitors_config_changed(rom, + monitors_config, + max_outputs); + + memset(&rom->client_monitors_config, 0, + sizeof(rom->client_monitors_config)); + rom->client_monitors_config.count = monitors_config->num_of_monitors; + /* monitors_config->flags ignored */ + if (rom->client_monitors_config.count >= max_outputs) { + trace_qxl_client_monitors_config_capped(qxl->id, + monitors_config->num_of_monitors, + max_outputs); + rom->client_monitors_config.count = max_outputs; + } + for (i = 0 ; i < rom->client_monitors_config.count ; ++i) { + VDAgentMonConfig *monitor = &monitors_config->monitors[i]; + QXLURect *rect = &rom->client_monitors_config.heads[i]; + /* monitor->depth ignored */ + rect->left = monitor->x; + rect->top = monitor->y; + rect->right = monitor->x + monitor->width; + rect->bottom = monitor->y + monitor->height; + } + rom->client_monitors_config_crc = qxl_crc32( + (const uint8_t *)&rom->client_monitors_config, + sizeof(rom->client_monitors_config)); + trace_qxl_client_monitors_config_crc(qxl->id, + sizeof(rom->client_monitors_config), + rom->client_monitors_config_crc); + + trace_qxl_interrupt_client_monitors_config(qxl->id, + rom->client_monitors_config.count, + rom->client_monitors_config.heads); + if (config_changed) { + qxl_send_events(qxl, QXL_INTERRUPT_CLIENT_MONITORS_CONFIG); + } + return 1; +} + +static const QXLInterface qxl_interface = { + .base.type = SPICE_INTERFACE_QXL, + .base.description = "qxl gpu", + .base.major_version = SPICE_INTERFACE_QXL_MAJOR, + .base.minor_version = SPICE_INTERFACE_QXL_MINOR, + + .attache_worker = interface_attach_worker, + .set_compression_level = interface_set_compression_level, +#if SPICE_NEEDS_SET_MM_TIME + .set_mm_time = interface_set_mm_time, +#endif + .get_init_info = interface_get_init_info, + + /* the callbacks below are called from spice server thread context */ + .get_command = interface_get_command, + .req_cmd_notification = interface_req_cmd_notification, + .release_resource = interface_release_resource, + .get_cursor_command = interface_get_cursor_command, + .req_cursor_notification = interface_req_cursor_notification, + .notify_update = interface_notify_update, + .flush_resources = interface_flush_resources, + .async_complete = interface_async_complete, + .update_area_complete = interface_update_area_complete, + .set_client_capabilities = interface_set_client_capabilities, + .client_monitors_config = interface_client_monitors_config, +}; + +static const GraphicHwOps qxl_ops = { + .gfx_update = qxl_hw_update, + .gfx_update_async = true, +}; + +static void qxl_enter_vga_mode(PCIQXLDevice *d) +{ + if (d->mode == QXL_MODE_VGA) { + return; + } + trace_qxl_enter_vga_mode(d->id); + spice_qxl_driver_unload(&d->ssd.qxl); + graphic_console_set_hwops(d->ssd.dcl.con, d->vga.hw_ops, &d->vga); + update_displaychangelistener(&d->ssd.dcl, GUI_REFRESH_INTERVAL_DEFAULT); + qemu_spice_create_host_primary(&d->ssd); + d->mode = QXL_MODE_VGA; + qemu_spice_display_switch(&d->ssd, d->ssd.ds); + vga_dirty_log_start(&d->vga); + graphic_hw_update(d->vga.con); +} + +static void qxl_exit_vga_mode(PCIQXLDevice *d) +{ + if (d->mode != QXL_MODE_VGA) { + return; + } + trace_qxl_exit_vga_mode(d->id); + graphic_console_set_hwops(d->ssd.dcl.con, &qxl_ops, d); + update_displaychangelistener(&d->ssd.dcl, GUI_REFRESH_INTERVAL_IDLE); + vga_dirty_log_stop(&d->vga); + qxl_destroy_primary(d, QXL_SYNC); +} + +static void qxl_update_irq(PCIQXLDevice *d) +{ + uint32_t pending = le32_to_cpu(d->ram->int_pending); + uint32_t mask = le32_to_cpu(d->ram->int_mask); + int level = !!(pending & mask); + pci_set_irq(&d->pci, level); + qxl_ring_set_dirty(d); +} + +static void qxl_check_state(PCIQXLDevice *d) +{ + QXLRam *ram = d->ram; + int spice_display_running = qemu_spice_display_is_running(&d->ssd); + + assert(!spice_display_running || SPICE_RING_IS_EMPTY(&ram->cmd_ring)); + assert(!spice_display_running || SPICE_RING_IS_EMPTY(&ram->cursor_ring)); +} + +static void qxl_reset_state(PCIQXLDevice *d) +{ + QXLRom *rom = d->rom; + + qxl_check_state(d); + d->shadow_rom.update_id = cpu_to_le32(0); + *rom = d->shadow_rom; + qxl_rom_set_dirty(d); + init_qxl_ram(d); + d->num_free_res = 0; + d->last_release = NULL; + memset(&d->ssd.dirty, 0, sizeof(d->ssd.dirty)); + qxl_update_irq(d); +} + +static void qxl_soft_reset(PCIQXLDevice *d) +{ + trace_qxl_soft_reset(d->id); + qxl_check_state(d); + qxl_clear_guest_bug(d); + qemu_mutex_lock(&d->async_lock); + d->current_async = QXL_UNDEFINED_IO; + qemu_mutex_unlock(&d->async_lock); + + if (d->have_vga) { + qxl_enter_vga_mode(d); + } else { + d->mode = QXL_MODE_UNDEFINED; + } +} + +static void qxl_hard_reset(PCIQXLDevice *d, int loadvm) +{ + bool startstop = qemu_spice_display_is_running(&d->ssd); + + trace_qxl_hard_reset(d->id, loadvm); + + if (startstop) { + qemu_spice_display_stop(); + } + + qxl_spice_reset_cursor(d); + qxl_spice_reset_image_cache(d); + qxl_reset_surfaces(d); + qxl_reset_memslots(d); + + /* pre loadvm reset must not touch QXLRam. This lives in + * device memory, is migrated together with RAM and thus + * already loaded at this point */ + if (!loadvm) { + qxl_reset_state(d); + } + qemu_spice_create_host_memslot(&d->ssd); + qxl_soft_reset(d); + + if (startstop) { + qemu_spice_display_start(); + } +} + +static void qxl_reset_handler(DeviceState *dev) +{ + PCIQXLDevice *d = PCI_QXL(PCI_DEVICE(dev)); + + qxl_hard_reset(d, 0); +} + +static void qxl_vga_ioport_write(void *opaque, uint32_t addr, uint32_t val) +{ + VGACommonState *vga = opaque; + PCIQXLDevice *qxl = container_of(vga, PCIQXLDevice, vga); + + trace_qxl_io_write_vga(qxl->id, qxl_mode_to_string(qxl->mode), addr, val); + if (qxl->mode != QXL_MODE_VGA && + qxl->revision <= QXL_REVISION_STABLE_V12) { + qxl_destroy_primary(qxl, QXL_SYNC); + qxl_soft_reset(qxl); + } + vga_ioport_write(opaque, addr, val); +} + +static const MemoryRegionPortio qxl_vga_portio_list[] = { + { 0x04, 2, 1, .read = vga_ioport_read, + .write = qxl_vga_ioport_write }, /* 3b4 */ + { 0x0a, 1, 1, .read = vga_ioport_read, + .write = qxl_vga_ioport_write }, /* 3ba */ + { 0x10, 16, 1, .read = vga_ioport_read, + .write = qxl_vga_ioport_write }, /* 3c0 */ + { 0x24, 2, 1, .read = vga_ioport_read, + .write = qxl_vga_ioport_write }, /* 3d4 */ + { 0x2a, 1, 1, .read = vga_ioport_read, + .write = qxl_vga_ioport_write }, /* 3da */ + PORTIO_END_OF_LIST(), +}; + +static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, + qxl_async_io async) +{ + static const int regions[] = { + QXL_RAM_RANGE_INDEX, + QXL_VRAM_RANGE_INDEX, + QXL_VRAM64_RANGE_INDEX, + }; + uint64_t guest_start; + uint64_t guest_end; + int pci_region; + pcibus_t pci_start; + pcibus_t pci_end; + MemoryRegion *mr; + intptr_t virt_start; + QXLDevMemSlot memslot; + int i; + + guest_start = le64_to_cpu(d->guest_slots[slot_id].slot.mem_start); + guest_end = le64_to_cpu(d->guest_slots[slot_id].slot.mem_end); + + trace_qxl_memslot_add_guest(d->id, slot_id, guest_start, guest_end); + + if (slot_id >= NUM_MEMSLOTS) { + qxl_set_guest_bug(d, "%s: slot_id >= NUM_MEMSLOTS %d >= %d", __func__, + slot_id, NUM_MEMSLOTS); + return 1; + } + if (guest_start > guest_end) { + qxl_set_guest_bug(d, "%s: guest_start > guest_end 0x%" PRIx64 + " > 0x%" PRIx64, __func__, guest_start, guest_end); + return 1; + } + + for (i = 0; i < ARRAY_SIZE(regions); i++) { + pci_region = regions[i]; + pci_start = d->pci.io_regions[pci_region].addr; + pci_end = pci_start + d->pci.io_regions[pci_region].size; + /* mapped? */ + if (pci_start == -1) { + continue; + } + /* start address in range ? */ + if (guest_start < pci_start || guest_start > pci_end) { + continue; + } + /* end address in range ? */ + if (guest_end > pci_end) { + continue; + } + /* passed */ + break; + } + if (i == ARRAY_SIZE(regions)) { + qxl_set_guest_bug(d, "%s: finished loop without match", __func__); + return 1; + } + + switch (pci_region) { + case QXL_RAM_RANGE_INDEX: + mr = &d->vga.vram; + break; + case QXL_VRAM_RANGE_INDEX: + case 4 /* vram 64bit */: + mr = &d->vram_bar; + break; + default: + /* should not happen */ + qxl_set_guest_bug(d, "%s: pci_region = %d", __func__, pci_region); + return 1; + } + + virt_start = (intptr_t)memory_region_get_ram_ptr(mr); + memslot.slot_id = slot_id; + memslot.slot_group_id = MEMSLOT_GROUP_GUEST; /* guest group */ + memslot.virt_start = virt_start + (guest_start - pci_start); + memslot.virt_end = virt_start + (guest_end - pci_start); + memslot.addr_delta = memslot.virt_start - delta; + memslot.generation = d->rom->slot_generation = 0; + qxl_rom_set_dirty(d); + + qemu_spice_add_memslot(&d->ssd, &memslot, async); + d->guest_slots[slot_id].mr = mr; + d->guest_slots[slot_id].offset = memslot.virt_start - virt_start; + d->guest_slots[slot_id].size = memslot.virt_end - memslot.virt_start; + d->guest_slots[slot_id].delta = delta; + d->guest_slots[slot_id].active = 1; + return 0; +} + +static void qxl_del_memslot(PCIQXLDevice *d, uint32_t slot_id) +{ + qemu_spice_del_memslot(&d->ssd, MEMSLOT_GROUP_HOST, slot_id); + d->guest_slots[slot_id].active = 0; +} + +static void qxl_reset_memslots(PCIQXLDevice *d) +{ + qxl_spice_reset_memslots(d); + memset(&d->guest_slots, 0, sizeof(d->guest_slots)); +} + +static void qxl_reset_surfaces(PCIQXLDevice *d) +{ + trace_qxl_reset_surfaces(d->id); + d->mode = QXL_MODE_UNDEFINED; + qxl_spice_destroy_surfaces(d, QXL_SYNC); +} + +/* can be also called from spice server thread context */ +static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, + uint32_t *s, uint64_t *o) +{ + uint64_t phys = le64_to_cpu(pqxl); + uint32_t slot = (phys >> (64 - 8)) & 0xff; + uint64_t offset = phys & 0xffffffffffff; + + if (slot >= NUM_MEMSLOTS) { + qxl_set_guest_bug(qxl, "slot too large %d >= %d", slot, + NUM_MEMSLOTS); + return false; + } + if (!qxl->guest_slots[slot].active) { + qxl_set_guest_bug(qxl, "inactive slot %d\n", slot); + return false; + } + if (offset < qxl->guest_slots[slot].delta) { + qxl_set_guest_bug(qxl, + "slot %d offset %"PRIu64" < delta %"PRIu64"\n", + slot, offset, qxl->guest_slots[slot].delta); + return false; + } + offset -= qxl->guest_slots[slot].delta; + if (offset > qxl->guest_slots[slot].size) { + qxl_set_guest_bug(qxl, + "slot %d offset %"PRIu64" > size %"PRIu64"\n", + slot, offset, qxl->guest_slots[slot].size); + return false; + } + + *s = slot; + *o = offset; + return true; +} + +/* can be also called from spice server thread context */ +void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id) +{ + uint64_t offset; + uint32_t slot; + void *ptr; + + switch (group_id) { + case MEMSLOT_GROUP_HOST: + offset = le64_to_cpu(pqxl) & 0xffffffffffff; + return (void *)(intptr_t)offset; + case MEMSLOT_GROUP_GUEST: + if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset)) { + return NULL; + } + ptr = memory_region_get_ram_ptr(qxl->guest_slots[slot].mr); + ptr += qxl->guest_slots[slot].offset; + ptr += offset; + return ptr; + } + return NULL; +} + +static void qxl_create_guest_primary_complete(PCIQXLDevice *qxl) +{ + /* for local rendering */ + qxl_render_resize(qxl); +} + +static void qxl_create_guest_primary(PCIQXLDevice *qxl, int loadvm, + qxl_async_io async) +{ + QXLDevSurfaceCreate surface; + QXLSurfaceCreate *sc = &qxl->guest_primary.surface; + uint32_t requested_height = le32_to_cpu(sc->height); + int requested_stride = le32_to_cpu(sc->stride); + + if (requested_stride == INT32_MIN || + abs(requested_stride) * (uint64_t)requested_height + > qxl->vgamem_size) { + qxl_set_guest_bug(qxl, "%s: requested primary larger than framebuffer" + " stride %d x height %" PRIu32 " > %" PRIu32, + __func__, requested_stride, requested_height, + qxl->vgamem_size); + return; + } + + if (qxl->mode == QXL_MODE_NATIVE) { + qxl_set_guest_bug(qxl, "%s: nop since already in QXL_MODE_NATIVE", + __func__); + } + qxl_exit_vga_mode(qxl); + + surface.format = le32_to_cpu(sc->format); + surface.height = le32_to_cpu(sc->height); + surface.mem = le64_to_cpu(sc->mem); + surface.position = le32_to_cpu(sc->position); + surface.stride = le32_to_cpu(sc->stride); + surface.width = le32_to_cpu(sc->width); + surface.type = le32_to_cpu(sc->type); + surface.flags = le32_to_cpu(sc->flags); + trace_qxl_create_guest_primary(qxl->id, sc->width, sc->height, sc->mem, + sc->format, sc->position); + trace_qxl_create_guest_primary_rest(qxl->id, sc->stride, sc->type, + sc->flags); + + if ((surface.stride & 0x3) != 0) { + qxl_set_guest_bug(qxl, "primary surface stride = %d %% 4 != 0", + surface.stride); + return; + } + + surface.mouse_mode = true; + surface.group_id = MEMSLOT_GROUP_GUEST; + if (loadvm) { + surface.flags |= QXL_SURF_FLAG_KEEP_DATA; + } + + qxl->mode = QXL_MODE_NATIVE; + qxl->cmdflags = 0; + qemu_spice_create_primary_surface(&qxl->ssd, 0, &surface, async); + + if (async == QXL_SYNC) { + qxl_create_guest_primary_complete(qxl); + } +} + +/* return 1 if surface destoy was initiated (in QXL_ASYNC case) or + * done (in QXL_SYNC case), 0 otherwise. */ +static int qxl_destroy_primary(PCIQXLDevice *d, qxl_async_io async) +{ + if (d->mode == QXL_MODE_UNDEFINED) { + return 0; + } + trace_qxl_destroy_primary(d->id); + d->mode = QXL_MODE_UNDEFINED; + qemu_spice_destroy_primary_surface(&d->ssd, 0, async); + qxl_spice_reset_cursor(d); + return 1; +} + +static void qxl_set_mode(PCIQXLDevice *d, unsigned int modenr, int loadvm) +{ + pcibus_t start = d->pci.io_regions[QXL_RAM_RANGE_INDEX].addr; + pcibus_t end = d->pci.io_regions[QXL_RAM_RANGE_INDEX].size + start; + QXLMode *mode = d->modes->modes + modenr; + uint64_t devmem = d->pci.io_regions[QXL_RAM_RANGE_INDEX].addr; + QXLMemSlot slot = { + .mem_start = start, + .mem_end = end + }; + + if (modenr >= d->modes->n_modes) { + qxl_set_guest_bug(d, "mode number out of range"); + return; + } + + QXLSurfaceCreate surface = { + .width = mode->x_res, + .height = mode->y_res, + .stride = -mode->x_res * 4, + .format = SPICE_SURFACE_FMT_32_xRGB, + .flags = loadvm ? QXL_SURF_FLAG_KEEP_DATA : 0, + .mouse_mode = true, + .mem = devmem + d->shadow_rom.draw_area_offset, + }; + + trace_qxl_set_mode(d->id, modenr, mode->x_res, mode->y_res, mode->bits, + devmem); + if (!loadvm) { + qxl_hard_reset(d, 0); + } + + d->guest_slots[0].slot = slot; + assert(qxl_add_memslot(d, 0, devmem, QXL_SYNC) == 0); + + d->guest_primary.surface = surface; + qxl_create_guest_primary(d, 0, QXL_SYNC); + + d->mode = QXL_MODE_COMPAT; + d->cmdflags = QXL_COMMAND_FLAG_COMPAT; + if (mode->bits == 16) { + d->cmdflags |= QXL_COMMAND_FLAG_COMPAT_16BPP; + } + d->shadow_rom.mode = cpu_to_le32(modenr); + d->rom->mode = cpu_to_le32(modenr); + qxl_rom_set_dirty(d); +} + +static void ioport_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PCIQXLDevice *d = opaque; + uint32_t io_port = addr; + qxl_async_io async = QXL_SYNC; + uint32_t orig_io_port; + + if (d->guest_bug && io_port != QXL_IO_RESET) { + return; + } + + if (d->revision <= QXL_REVISION_STABLE_V10 && + io_port > QXL_IO_FLUSH_RELEASE) { + qxl_set_guest_bug(d, "unsupported io %d for revision %d\n", + io_port, d->revision); + return; + } + + switch (io_port) { + case QXL_IO_RESET: + case QXL_IO_SET_MODE: + case QXL_IO_MEMSLOT_ADD: + case QXL_IO_MEMSLOT_DEL: + case QXL_IO_CREATE_PRIMARY: + case QXL_IO_UPDATE_IRQ: + case QXL_IO_LOG: + case QXL_IO_MEMSLOT_ADD_ASYNC: + case QXL_IO_CREATE_PRIMARY_ASYNC: + break; + default: + if (d->mode != QXL_MODE_VGA) { + break; + } + trace_qxl_io_unexpected_vga_mode(d->id, + addr, val, io_port_to_string(io_port)); + /* be nice to buggy guest drivers */ + if (io_port >= QXL_IO_UPDATE_AREA_ASYNC && + io_port < QXL_IO_RANGE_SIZE) { + qxl_send_events(d, QXL_INTERRUPT_IO_CMD); + } + return; + } + + /* we change the io_port to avoid ifdeffery in the main switch */ + orig_io_port = io_port; + switch (io_port) { + case QXL_IO_UPDATE_AREA_ASYNC: + io_port = QXL_IO_UPDATE_AREA; + goto async_common; + case QXL_IO_MEMSLOT_ADD_ASYNC: + io_port = QXL_IO_MEMSLOT_ADD; + goto async_common; + case QXL_IO_CREATE_PRIMARY_ASYNC: + io_port = QXL_IO_CREATE_PRIMARY; + goto async_common; + case QXL_IO_DESTROY_PRIMARY_ASYNC: + io_port = QXL_IO_DESTROY_PRIMARY; + goto async_common; + case QXL_IO_DESTROY_SURFACE_ASYNC: + io_port = QXL_IO_DESTROY_SURFACE_WAIT; + goto async_common; + case QXL_IO_DESTROY_ALL_SURFACES_ASYNC: + io_port = QXL_IO_DESTROY_ALL_SURFACES; + goto async_common; + case QXL_IO_FLUSH_SURFACES_ASYNC: + case QXL_IO_MONITORS_CONFIG_ASYNC: +async_common: + async = QXL_ASYNC; + WITH_QEMU_LOCK_GUARD(&d->async_lock) { + if (d->current_async != QXL_UNDEFINED_IO) { + qxl_set_guest_bug(d, "%d async started before last (%d) complete", + io_port, d->current_async); + return; + } + d->current_async = orig_io_port; + } + break; + default: + break; + } + trace_qxl_io_write(d->id, qxl_mode_to_string(d->mode), + addr, io_port_to_string(addr), + val, size, async); + + switch (io_port) { + case QXL_IO_UPDATE_AREA: + { + QXLCookie *cookie = NULL; + QXLRect update = d->ram->update_area; + + if (d->ram->update_surface > d->ssd.num_surfaces) { + qxl_set_guest_bug(d, "QXL_IO_UPDATE_AREA: invalid surface id %d\n", + d->ram->update_surface); + break; + } + if (update.left >= update.right || update.top >= update.bottom || + update.left < 0 || update.top < 0) { + qxl_set_guest_bug(d, + "QXL_IO_UPDATE_AREA: invalid area (%ux%u)x(%ux%u)\n", + update.left, update.top, update.right, update.bottom); + if (update.left == update.right || update.top == update.bottom) { + /* old drivers may provide empty area, keep going */ + qxl_clear_guest_bug(d); + goto cancel_async; + } + break; + } + if (async == QXL_ASYNC) { + cookie = qxl_cookie_new(QXL_COOKIE_TYPE_IO, + QXL_IO_UPDATE_AREA_ASYNC); + cookie->u.area = update; + } + qxl_spice_update_area(d, d->ram->update_surface, + cookie ? &cookie->u.area : &update, + NULL, 0, 0, async, cookie); + break; + } + case QXL_IO_NOTIFY_CMD: + qemu_spice_wakeup(&d->ssd); + break; + case QXL_IO_NOTIFY_CURSOR: + qemu_spice_wakeup(&d->ssd); + break; + case QXL_IO_UPDATE_IRQ: + qxl_update_irq(d); + break; + case QXL_IO_NOTIFY_OOM: + if (!SPICE_RING_IS_EMPTY(&d->ram->release_ring)) { + break; + } + d->oom_running = 1; + qxl_spice_oom(d); + d->oom_running = 0; + break; + case QXL_IO_SET_MODE: + qxl_set_mode(d, val, 0); + break; + case QXL_IO_LOG: +#ifdef CONFIG_MODULES + /* + * FIXME + * trace_event_get_state_backends() does not work for modules, + * it leads to "undefined symbol: qemu_qxl_io_log_semaphore" + */ + if (true) { +#else + if (trace_event_get_state_backends(TRACE_QXL_IO_LOG) || d->guestdebug) { +#endif + /* We cannot trust the guest to NUL terminate d->ram->log_buf */ + char *log_buf = g_strndup((const char *)d->ram->log_buf, + sizeof(d->ram->log_buf)); + trace_qxl_io_log(d->id, log_buf); + if (d->guestdebug) { + fprintf(stderr, "qxl/guest-%d: %" PRId64 ": %s", d->id, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), log_buf); + } + g_free(log_buf); + } + break; + case QXL_IO_RESET: + qxl_hard_reset(d, 0); + break; + case QXL_IO_MEMSLOT_ADD: + if (val >= NUM_MEMSLOTS) { + qxl_set_guest_bug(d, "QXL_IO_MEMSLOT_ADD: val out of range"); + break; + } + if (d->guest_slots[val].active) { + qxl_set_guest_bug(d, + "QXL_IO_MEMSLOT_ADD: memory slot already active"); + break; + } + d->guest_slots[val].slot = d->ram->mem_slot; + qxl_add_memslot(d, val, 0, async); + break; + case QXL_IO_MEMSLOT_DEL: + if (val >= NUM_MEMSLOTS) { + qxl_set_guest_bug(d, "QXL_IO_MEMSLOT_DEL: val out of range"); + break; + } + qxl_del_memslot(d, val); + break; + case QXL_IO_CREATE_PRIMARY: + if (val != 0) { + qxl_set_guest_bug(d, "QXL_IO_CREATE_PRIMARY (async=%d): val != 0", + async); + goto cancel_async; + } + d->guest_primary.surface = d->ram->create_surface; + qxl_create_guest_primary(d, 0, async); + break; + case QXL_IO_DESTROY_PRIMARY: + if (val != 0) { + qxl_set_guest_bug(d, "QXL_IO_DESTROY_PRIMARY (async=%d): val != 0", + async); + goto cancel_async; + } + if (!qxl_destroy_primary(d, async)) { + trace_qxl_io_destroy_primary_ignored(d->id, + qxl_mode_to_string(d->mode)); + goto cancel_async; + } + break; + case QXL_IO_DESTROY_SURFACE_WAIT: + if (val >= d->ssd.num_surfaces) { + qxl_set_guest_bug(d, "QXL_IO_DESTROY_SURFACE (async=%d):" + "%" PRIu64 " >= NUM_SURFACES", async, val); + goto cancel_async; + } + qxl_spice_destroy_surface_wait(d, val, async); + break; + case QXL_IO_FLUSH_RELEASE: { + QXLReleaseRing *ring = &d->ram->release_ring; + if (ring->prod - ring->cons + 1 == ring->num_items) { + fprintf(stderr, + "ERROR: no flush, full release ring [p%d,%dc]\n", + ring->prod, ring->cons); + } + qxl_push_free_res(d, 1 /* flush */); + break; + } + case QXL_IO_FLUSH_SURFACES_ASYNC: + qxl_spice_flush_surfaces_async(d); + break; + case QXL_IO_DESTROY_ALL_SURFACES: + d->mode = QXL_MODE_UNDEFINED; + qxl_spice_destroy_surfaces(d, async); + break; + case QXL_IO_MONITORS_CONFIG_ASYNC: + qxl_spice_monitors_config_async(d, 0); + break; + default: + qxl_set_guest_bug(d, "%s: unexpected ioport=0x%x\n", __func__, io_port); + } + return; +cancel_async: + if (async) { + qxl_send_events(d, QXL_INTERRUPT_IO_CMD); + qemu_mutex_lock(&d->async_lock); + d->current_async = QXL_UNDEFINED_IO; + qemu_mutex_unlock(&d->async_lock); + } +} + +static uint64_t ioport_read(void *opaque, hwaddr addr, + unsigned size) +{ + PCIQXLDevice *qxl = opaque; + + trace_qxl_io_read_unexpected(qxl->id); + return 0xff; +} + +static const MemoryRegionOps qxl_io_ops = { + .read = ioport_read, + .write = ioport_write, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void qxl_update_irq_bh(void *opaque) +{ + PCIQXLDevice *d = opaque; + qxl_update_irq(d); +} + +static void qxl_send_events(PCIQXLDevice *d, uint32_t events) +{ + uint32_t old_pending; + uint32_t le_events = cpu_to_le32(events); + + trace_qxl_send_events(d->id, events); + if (!qemu_spice_display_is_running(&d->ssd)) { + /* spice-server tracks guest running state and should not do this */ + fprintf(stderr, "%s: spice-server bug: guest stopped, ignoring\n", + __func__); + trace_qxl_send_events_vm_stopped(d->id, events); + return; + } + /* + * Older versions of Spice forgot to define the QXLRam struct + * with the '__aligned__(4)' attribute. clang 7 and newer will + * thus warn that qatomic_fetch_or(&d->ram->int_pending, ...) + * might be a misaligned atomic access, and will generate an + * out-of-line call for it, which results in a link error since + * we don't currently link against libatomic. + * + * In fact we set up d->ram in init_qxl_ram() so it always starts + * at a 4K boundary, so we know that &d->ram->int_pending is + * naturally aligned for a uint32_t. Newer Spice versions + * (with Spice commit beda5ec7a6848be20c0cac2a9a8ef2a41e8069c1) + * will fix the bug directly. To deal with older versions, + * we tell the compiler to assume the address really is aligned. + * Any compiler which cares about the misalignment will have + * __builtin_assume_aligned. + */ +#ifdef HAS_ASSUME_ALIGNED +#define ALIGNED_UINT32_PTR(P) ((uint32_t *)__builtin_assume_aligned(P, 4)) +#else +#define ALIGNED_UINT32_PTR(P) ((uint32_t *)P) +#endif + + old_pending = qatomic_fetch_or(ALIGNED_UINT32_PTR(&d->ram->int_pending), + le_events); + if ((old_pending & le_events) == le_events) { + return; + } + qemu_bh_schedule(d->update_irq); +} + +/* graphics console */ + +static void qxl_hw_update(void *opaque) +{ + PCIQXLDevice *qxl = opaque; + + qxl_render_update(qxl); +} + +static void qxl_dirty_one_surface(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, + uint32_t height, int32_t stride) +{ + uint64_t offset, size; + uint32_t slot; + bool rc; + + rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset); + assert(rc == true); + size = (uint64_t)height * abs(stride); + trace_qxl_surfaces_dirty(qxl->id, offset, size); + qxl_set_dirty(qxl->guest_slots[slot].mr, + qxl->guest_slots[slot].offset + offset, + qxl->guest_slots[slot].offset + offset + size); +} + +static void qxl_dirty_surfaces(PCIQXLDevice *qxl) +{ + int i; + + if (qxl->mode != QXL_MODE_NATIVE && qxl->mode != QXL_MODE_COMPAT) { + return; + } + + /* dirty the primary surface */ + qxl_dirty_one_surface(qxl, qxl->guest_primary.surface.mem, + qxl->guest_primary.surface.height, + qxl->guest_primary.surface.stride); + + /* dirty the off-screen surfaces */ + for (i = 0; i < qxl->ssd.num_surfaces; i++) { + QXLSurfaceCmd *cmd; + + if (qxl->guest_surfaces.cmds[i] == 0) { + continue; + } + + cmd = qxl_phys2virt(qxl, qxl->guest_surfaces.cmds[i], + MEMSLOT_GROUP_GUEST); + assert(cmd); + assert(cmd->type == QXL_SURFACE_CMD_CREATE); + qxl_dirty_one_surface(qxl, cmd->u.surface_create.data, + cmd->u.surface_create.height, + cmd->u.surface_create.stride); + } +} + +static void qxl_vm_change_state_handler(void *opaque, bool running, + RunState state) +{ + PCIQXLDevice *qxl = opaque; + + if (running) { + /* + * if qxl_send_events was called from spice server context before + * migration ended, qxl_update_irq for these events might not have been + * called + */ + qxl_update_irq(qxl); + } else { + /* make sure surfaces are saved before migration */ + qxl_dirty_surfaces(qxl); + } +} + +/* display change listener */ + +static void display_update(DisplayChangeListener *dcl, + int x, int y, int w, int h) +{ + PCIQXLDevice *qxl = container_of(dcl, PCIQXLDevice, ssd.dcl); + + if (qxl->mode == QXL_MODE_VGA) { + qemu_spice_display_update(&qxl->ssd, x, y, w, h); + } +} + +static void display_switch(DisplayChangeListener *dcl, + struct DisplaySurface *surface) +{ + PCIQXLDevice *qxl = container_of(dcl, PCIQXLDevice, ssd.dcl); + + qxl->ssd.ds = surface; + if (qxl->mode == QXL_MODE_VGA) { + qemu_spice_display_switch(&qxl->ssd, surface); + } +} + +static void display_refresh(DisplayChangeListener *dcl) +{ + PCIQXLDevice *qxl = container_of(dcl, PCIQXLDevice, ssd.dcl); + + if (qxl->mode == QXL_MODE_VGA) { + qemu_spice_display_refresh(&qxl->ssd); + } +} + +static DisplayChangeListenerOps display_listener_ops = { + .dpy_name = "spice/qxl", + .dpy_gfx_update = display_update, + .dpy_gfx_switch = display_switch, + .dpy_refresh = display_refresh, +}; + +static void qxl_init_ramsize(PCIQXLDevice *qxl) +{ + /* vga mode framebuffer / primary surface (bar 0, first part) */ + if (qxl->vgamem_size_mb < 8) { + qxl->vgamem_size_mb = 8; + } + /* XXX: we round vgamem_size_mb up to a nearest power of two and it must be + * less than vga_common_init()'s maximum on qxl->vga.vram_size (512 now). + */ + if (qxl->vgamem_size_mb > 256) { + qxl->vgamem_size_mb = 256; + } + qxl->vgamem_size = qxl->vgamem_size_mb * MiB; + + /* vga ram (bar 0, total) */ + if (qxl->ram_size_mb != -1) { + qxl->vga.vram_size = qxl->ram_size_mb * MiB; + } + if (qxl->vga.vram_size < qxl->vgamem_size * 2) { + qxl->vga.vram_size = qxl->vgamem_size * 2; + } + + /* vram32 (surfaces, 32bit, bar 1) */ + if (qxl->vram32_size_mb != -1) { + qxl->vram32_size = qxl->vram32_size_mb * MiB; + } + if (qxl->vram32_size < 4096) { + qxl->vram32_size = 4096; + } + + /* vram (surfaces, 64bit, bar 4+5) */ + if (qxl->vram_size_mb != -1) { + qxl->vram_size = (uint64_t)qxl->vram_size_mb * MiB; + } + if (qxl->vram_size < qxl->vram32_size) { + qxl->vram_size = qxl->vram32_size; + } + + if (qxl->revision == 1) { + qxl->vram32_size = 4096; + qxl->vram_size = 4096; + } + qxl->vgamem_size = pow2ceil(qxl->vgamem_size); + qxl->vga.vram_size = pow2ceil(qxl->vga.vram_size); + qxl->vram32_size = pow2ceil(qxl->vram32_size); + qxl->vram_size = pow2ceil(qxl->vram_size); +} + +static void qxl_realize_common(PCIQXLDevice *qxl, Error **errp) +{ + uint8_t* config = qxl->pci.config; + uint32_t pci_device_rev; + uint32_t io_size; + + qemu_spice_display_init_common(&qxl->ssd); + qxl->mode = QXL_MODE_UNDEFINED; + qxl->num_memslots = NUM_MEMSLOTS; + qemu_mutex_init(&qxl->track_lock); + qemu_mutex_init(&qxl->async_lock); + qxl->current_async = QXL_UNDEFINED_IO; + qxl->guest_bug = 0; + + switch (qxl->revision) { + case 1: /* spice 0.4 -- qxl-1 */ + pci_device_rev = QXL_REVISION_STABLE_V04; + io_size = 8; + break; + case 2: /* spice 0.6 -- qxl-2 */ + pci_device_rev = QXL_REVISION_STABLE_V06; + io_size = 16; + break; + case 3: /* qxl-3 */ + pci_device_rev = QXL_REVISION_STABLE_V10; + io_size = 32; /* PCI region size must be pow2 */ + break; + case 4: /* qxl-4 */ + pci_device_rev = QXL_REVISION_STABLE_V12; + io_size = pow2ceil(QXL_IO_RANGE_SIZE); + break; + case 5: /* qxl-5 */ + pci_device_rev = QXL_REVISION_STABLE_V12 + 1; + io_size = pow2ceil(QXL_IO_RANGE_SIZE); + break; + default: + error_setg(errp, "Invalid revision %d for qxl device (max %d)", + qxl->revision, QXL_DEFAULT_REVISION); + return; + } + + pci_set_byte(&config[PCI_REVISION_ID], pci_device_rev); + pci_set_byte(&config[PCI_INTERRUPT_PIN], 1); + + qxl->rom_size = qxl_rom_size(); + memory_region_init_rom(&qxl->rom_bar, OBJECT(qxl), "qxl.vrom", + qxl->rom_size, &error_fatal); + init_qxl_rom(qxl); + init_qxl_ram(qxl); + + qxl->guest_surfaces.cmds = g_new0(QXLPHYSICAL, qxl->ssd.num_surfaces); + memory_region_init_ram(&qxl->vram_bar, OBJECT(qxl), "qxl.vram", + qxl->vram_size, &error_fatal); + memory_region_init_alias(&qxl->vram32_bar, OBJECT(qxl), "qxl.vram32", + &qxl->vram_bar, 0, qxl->vram32_size); + + memory_region_init_io(&qxl->io_bar, OBJECT(qxl), &qxl_io_ops, qxl, + "qxl-ioports", io_size); + if (qxl->have_vga) { + vga_dirty_log_start(&qxl->vga); + } + memory_region_set_flush_coalesced(&qxl->io_bar); + + + pci_register_bar(&qxl->pci, QXL_IO_RANGE_INDEX, + PCI_BASE_ADDRESS_SPACE_IO, &qxl->io_bar); + + pci_register_bar(&qxl->pci, QXL_ROM_RANGE_INDEX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &qxl->rom_bar); + + pci_register_bar(&qxl->pci, QXL_RAM_RANGE_INDEX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &qxl->vga.vram); + + pci_register_bar(&qxl->pci, QXL_VRAM_RANGE_INDEX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &qxl->vram32_bar); + + if (qxl->vram32_size < qxl->vram_size) { + /* + * Make the 64bit vram bar show up only in case it is + * configured to be larger than the 32bit vram bar. + */ + pci_register_bar(&qxl->pci, QXL_VRAM64_RANGE_INDEX, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64 | + PCI_BASE_ADDRESS_MEM_PREFETCH, + &qxl->vram_bar); + } + + /* print pci bar details */ + dprint(qxl, 1, "ram/%s: %" PRId64 " MB [region 0]\n", + qxl->have_vga ? "pri" : "sec", qxl->vga.vram_size / MiB); + dprint(qxl, 1, "vram/32: %" PRIx64 " MB [region 1]\n", + qxl->vram32_size / MiB); + dprint(qxl, 1, "vram/64: %" PRIx64 " MB %s\n", + qxl->vram_size / MiB, + qxl->vram32_size < qxl->vram_size ? "[region 4]" : "[unmapped]"); + + qxl->ssd.qxl.base.sif = &qxl_interface.base; + if (qemu_spice_add_display_interface(&qxl->ssd.qxl, qxl->vga.con) != 0) { + error_setg(errp, "qxl interface %d.%d not supported by spice-server", + SPICE_INTERFACE_QXL_MAJOR, SPICE_INTERFACE_QXL_MINOR); + return; + } + +#if SPICE_SERVER_VERSION >= 0x000e02 /* release 0.14.2 */ + char device_address[256] = ""; + if (qemu_spice_fill_device_address(qxl->vga.con, device_address, 256)) { + spice_qxl_set_device_info(&qxl->ssd.qxl, + device_address, + 0, + qxl->max_outputs); + } +#endif + + qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl); + + qxl->update_irq = qemu_bh_new(qxl_update_irq_bh, qxl); + qxl_reset_state(qxl); + + qxl->update_area_bh = qemu_bh_new(qxl_render_update_area_bh, qxl); + qxl->ssd.cursor_bh = qemu_bh_new(qemu_spice_cursor_refresh_bh, &qxl->ssd); +} + +static void qxl_realize_primary(PCIDevice *dev, Error **errp) +{ + PCIQXLDevice *qxl = PCI_QXL(dev); + VGACommonState *vga = &qxl->vga; + Error *local_err = NULL; + + qxl_init_ramsize(qxl); + vga->vbe_size = qxl->vgamem_size; + vga->vram_size_mb = qxl->vga.vram_size / MiB; + vga_common_init(vga, OBJECT(dev)); + vga_init(vga, OBJECT(dev), + pci_address_space(dev), pci_address_space_io(dev), false); + portio_list_init(&qxl->vga_port_list, OBJECT(dev), qxl_vga_portio_list, + vga, "vga"); + portio_list_set_flush_coalesced(&qxl->vga_port_list); + portio_list_add(&qxl->vga_port_list, pci_address_space_io(dev), 0x3b0); + qxl->have_vga = true; + + vga->con = graphic_console_init(DEVICE(dev), 0, &qxl_ops, qxl); + qxl->id = qemu_console_get_index(vga->con); /* == channel_id */ + if (qxl->id != 0) { + error_setg(errp, "primary qxl-vga device must be console 0 " + "(first display device on the command line)"); + return; + } + + qxl_realize_common(qxl, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + qxl->ssd.dcl.ops = &display_listener_ops; + qxl->ssd.dcl.con = vga->con; + register_displaychangelistener(&qxl->ssd.dcl); +} + +static void qxl_realize_secondary(PCIDevice *dev, Error **errp) +{ + PCIQXLDevice *qxl = PCI_QXL(dev); + + qxl_init_ramsize(qxl); + memory_region_init_ram(&qxl->vga.vram, OBJECT(dev), "qxl.vgavram", + qxl->vga.vram_size, &error_fatal); + qxl->vga.vram_ptr = memory_region_get_ram_ptr(&qxl->vga.vram); + qxl->vga.con = graphic_console_init(DEVICE(dev), 0, &qxl_ops, qxl); + qxl->ssd.dcl.con = qxl->vga.con; + qxl->id = qemu_console_get_index(qxl->vga.con); /* == channel_id */ + + qxl_realize_common(qxl, errp); +} + +static int qxl_pre_save(void *opaque) +{ + PCIQXLDevice* d = opaque; + uint8_t *ram_start = d->vga.vram_ptr; + + trace_qxl_pre_save(d->id); + if (d->last_release == NULL) { + d->last_release_offset = 0; + } else { + d->last_release_offset = (uint8_t *)d->last_release - ram_start; + } + if (d->last_release_offset >= d->vga.vram_size) { + return 1; + } + + return 0; +} + +static int qxl_pre_load(void *opaque) +{ + PCIQXLDevice* d = opaque; + + trace_qxl_pre_load(d->id); + qxl_hard_reset(d, 1); + qxl_exit_vga_mode(d); + return 0; +} + +static void qxl_create_memslots(PCIQXLDevice *d) +{ + int i; + + for (i = 0; i < NUM_MEMSLOTS; i++) { + if (!d->guest_slots[i].active) { + continue; + } + qxl_add_memslot(d, i, 0, QXL_SYNC); + } +} + +static int qxl_post_load(void *opaque, int version) +{ + PCIQXLDevice* d = opaque; + uint8_t *ram_start = d->vga.vram_ptr; + QXLCommandExt *cmds; + int in, out, newmode; + + assert(d->last_release_offset < d->vga.vram_size); + if (d->last_release_offset == 0) { + d->last_release = NULL; + } else { + d->last_release = (QXLReleaseInfo *)(ram_start + d->last_release_offset); + } + + d->modes = (QXLModes*)((uint8_t*)d->rom + d->rom->modes_offset); + + trace_qxl_post_load(d->id, qxl_mode_to_string(d->mode)); + newmode = d->mode; + d->mode = QXL_MODE_UNDEFINED; + + switch (newmode) { + case QXL_MODE_UNDEFINED: + qxl_create_memslots(d); + break; + case QXL_MODE_VGA: + qxl_create_memslots(d); + qxl_enter_vga_mode(d); + break; + case QXL_MODE_NATIVE: + qxl_create_memslots(d); + qxl_create_guest_primary(d, 1, QXL_SYNC); + + /* replay surface-create and cursor-set commands */ + cmds = g_new0(QXLCommandExt, d->ssd.num_surfaces + 1); + for (in = 0, out = 0; in < d->ssd.num_surfaces; in++) { + if (d->guest_surfaces.cmds[in] == 0) { + continue; + } + cmds[out].cmd.data = d->guest_surfaces.cmds[in]; + cmds[out].cmd.type = QXL_CMD_SURFACE; + cmds[out].group_id = MEMSLOT_GROUP_GUEST; + out++; + } + if (d->guest_cursor) { + cmds[out].cmd.data = d->guest_cursor; + cmds[out].cmd.type = QXL_CMD_CURSOR; + cmds[out].group_id = MEMSLOT_GROUP_GUEST; + out++; + } + qxl_spice_loadvm_commands(d, cmds, out); + g_free(cmds); + if (d->guest_monitors_config) { + qxl_spice_monitors_config_async(d, 1); + } + break; + case QXL_MODE_COMPAT: + /* note: no need to call qxl_create_memslots, qxl_set_mode + * creates the mem slot. */ + qxl_set_mode(d, d->shadow_rom.mode, 1); + break; + } + return 0; +} + +#define QXL_SAVE_VERSION 21 + +static bool qxl_monitors_config_needed(void *opaque) +{ + PCIQXLDevice *qxl = opaque; + + return qxl->guest_monitors_config != 0; +} + + +static const VMStateDescription qxl_memslot = { + .name = "qxl-memslot", + .version_id = QXL_SAVE_VERSION, + .minimum_version_id = QXL_SAVE_VERSION, + .fields = (VMStateField[]) { + VMSTATE_UINT64(slot.mem_start, struct guest_slots), + VMSTATE_UINT64(slot.mem_end, struct guest_slots), + VMSTATE_UINT32(active, struct guest_slots), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription qxl_surface = { + .name = "qxl-surface", + .version_id = QXL_SAVE_VERSION, + .minimum_version_id = QXL_SAVE_VERSION, + .fields = (VMStateField[]) { + VMSTATE_UINT32(width, QXLSurfaceCreate), + VMSTATE_UINT32(height, QXLSurfaceCreate), + VMSTATE_INT32(stride, QXLSurfaceCreate), + VMSTATE_UINT32(format, QXLSurfaceCreate), + VMSTATE_UINT32(position, QXLSurfaceCreate), + VMSTATE_UINT32(mouse_mode, QXLSurfaceCreate), + VMSTATE_UINT32(flags, QXLSurfaceCreate), + VMSTATE_UINT32(type, QXLSurfaceCreate), + VMSTATE_UINT64(mem, QXLSurfaceCreate), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription qxl_vmstate_monitors_config = { + .name = "qxl/monitors-config", + .version_id = 1, + .minimum_version_id = 1, + .needed = qxl_monitors_config_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(guest_monitors_config, PCIQXLDevice), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription qxl_vmstate = { + .name = "qxl", + .version_id = QXL_SAVE_VERSION, + .minimum_version_id = QXL_SAVE_VERSION, + .pre_save = qxl_pre_save, + .pre_load = qxl_pre_load, + .post_load = qxl_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(pci, PCIQXLDevice), + VMSTATE_STRUCT(vga, PCIQXLDevice, 0, vmstate_vga_common, VGACommonState), + VMSTATE_UINT32(shadow_rom.mode, PCIQXLDevice), + VMSTATE_UINT32(num_free_res, PCIQXLDevice), + VMSTATE_UINT32(last_release_offset, PCIQXLDevice), + VMSTATE_UINT32(mode, PCIQXLDevice), + VMSTATE_UINT32(ssd.unique, PCIQXLDevice), + VMSTATE_INT32_EQUAL(num_memslots, PCIQXLDevice, NULL), + VMSTATE_STRUCT_ARRAY(guest_slots, PCIQXLDevice, NUM_MEMSLOTS, 0, + qxl_memslot, struct guest_slots), + VMSTATE_STRUCT(guest_primary.surface, PCIQXLDevice, 0, + qxl_surface, QXLSurfaceCreate), + VMSTATE_INT32_EQUAL(ssd.num_surfaces, PCIQXLDevice, NULL), + VMSTATE_VARRAY_INT32(guest_surfaces.cmds, PCIQXLDevice, + ssd.num_surfaces, 0, + vmstate_info_uint64, uint64_t), + VMSTATE_UINT64(guest_cursor, PCIQXLDevice), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &qxl_vmstate_monitors_config, + NULL + } +}; + +static Property qxl_properties[] = { + DEFINE_PROP_UINT32("ram_size", PCIQXLDevice, vga.vram_size, 64 * MiB), + DEFINE_PROP_UINT64("vram_size", PCIQXLDevice, vram32_size, 64 * MiB), + DEFINE_PROP_UINT32("revision", PCIQXLDevice, revision, + QXL_DEFAULT_REVISION), + DEFINE_PROP_UINT32("debug", PCIQXLDevice, debug, 0), + DEFINE_PROP_UINT32("guestdebug", PCIQXLDevice, guestdebug, 0), + DEFINE_PROP_UINT32("cmdlog", PCIQXLDevice, cmdlog, 0), + DEFINE_PROP_UINT32("ram_size_mb", PCIQXLDevice, ram_size_mb, -1), + DEFINE_PROP_UINT32("vram_size_mb", PCIQXLDevice, vram32_size_mb, -1), + DEFINE_PROP_UINT32("vram64_size_mb", PCIQXLDevice, vram_size_mb, -1), + DEFINE_PROP_UINT32("vgamem_mb", PCIQXLDevice, vgamem_size_mb, 16), + DEFINE_PROP_INT32("surfaces", PCIQXLDevice, ssd.num_surfaces, 1024), +#if SPICE_SERVER_VERSION >= 0x000c06 /* release 0.12.6 */ + DEFINE_PROP_UINT16("max_outputs", PCIQXLDevice, max_outputs, 0), +#endif + DEFINE_PROP_UINT32("xres", PCIQXLDevice, xres, 0), + DEFINE_PROP_UINT32("yres", PCIQXLDevice, yres, 0), + DEFINE_PROP_BOOL("global-vmstate", PCIQXLDevice, vga.global_vmstate, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void qxl_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->vendor_id = REDHAT_PCI_VENDOR_ID; + k->device_id = QXL_DEVICE_ID_STABLE; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->reset = qxl_reset_handler; + dc->vmsd = &qxl_vmstate; + device_class_set_props(dc, qxl_properties); +} + +static const TypeInfo qxl_pci_type_info = { + .name = TYPE_PCI_QXL, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIQXLDevice), + .abstract = true, + .class_init = qxl_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void qxl_primary_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = qxl_realize_primary; + k->romfile = "vgabios-qxl.bin"; + k->class_id = PCI_CLASS_DISPLAY_VGA; + dc->desc = "Spice QXL GPU (primary, vga compatible)"; + dc->hotpluggable = false; +} + +static const TypeInfo qxl_primary_info = { + .name = "qxl-vga", + .parent = TYPE_PCI_QXL, + .class_init = qxl_primary_class_init, +}; +module_obj("qxl-vga"); + +static void qxl_secondary_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = qxl_realize_secondary; + k->class_id = PCI_CLASS_DISPLAY_OTHER; + dc->desc = "Spice QXL GPU (secondary)"; +} + +static const TypeInfo qxl_secondary_info = { + .name = "qxl", + .parent = TYPE_PCI_QXL, + .class_init = qxl_secondary_class_init, +}; +module_obj("qxl"); + +static void qxl_register_types(void) +{ + type_register_static(&qxl_pci_type_info); + type_register_static(&qxl_primary_info); + type_register_static(&qxl_secondary_info); +} + +type_init(qxl_register_types) + +module_dep("ui-spice-core"); diff --git a/hw/display/qxl.h b/hw/display/qxl.h new file mode 100644 index 000000000..30d21f4d0 --- /dev/null +++ b/hw/display/qxl.h @@ -0,0 +1,177 @@ +#ifndef HW_QXL_H +#define HW_QXL_H + + +#include "hw/pci/pci.h" +#include "vga_int.h" +#include "qemu/thread.h" + +#include "ui/qemu-spice.h" +#include "ui/spice-display.h" +#include "qom/object.h" + +enum qxl_mode { + QXL_MODE_UNDEFINED, + QXL_MODE_VGA, + QXL_MODE_COMPAT, /* spice 0.4.x */ + QXL_MODE_NATIVE, +}; + +#ifndef QXL_VRAM64_RANGE_INDEX +#define QXL_VRAM64_RANGE_INDEX 4 +#endif + +#define QXL_UNDEFINED_IO UINT32_MAX + +#define QXL_NUM_DIRTY_RECTS 64 + +#define QXL_PAGE_BITS 12 +#define QXL_PAGE_SIZE (1 << QXL_PAGE_BITS); + +struct PCIQXLDevice { + PCIDevice pci; + PortioList vga_port_list; + SimpleSpiceDisplay ssd; + int id; + bool have_vga; + uint32_t debug; + uint32_t guestdebug; + uint32_t cmdlog; + + uint32_t guest_bug; + + enum qxl_mode mode; + uint32_t cmdflags; + uint32_t revision; + + int32_t num_memslots; + + uint32_t current_async; + QemuMutex async_lock; + + struct guest_slots { + QXLMemSlot slot; + MemoryRegion *mr; + uint64_t offset; + uint64_t size; + uint64_t delta; + uint32_t active; + } guest_slots[NUM_MEMSLOTS]; + + struct guest_primary { + QXLSurfaceCreate surface; + uint32_t commands; + uint32_t resized; + int32_t qxl_stride; + uint32_t abs_stride; + uint32_t bits_pp; + uint32_t bytes_pp; + uint8_t *data; + } guest_primary; + + struct surfaces { + QXLPHYSICAL *cmds; + uint32_t count; + uint32_t max; + } guest_surfaces; + QXLPHYSICAL guest_cursor; + + QXLPHYSICAL guest_monitors_config; + uint32_t guest_head0_width; + uint32_t guest_head0_height; + + QemuMutex track_lock; + + /* thread signaling */ + QEMUBH *update_irq; + + /* ram pci bar */ + QXLRam *ram; + VGACommonState vga; + uint32_t num_free_res; + QXLReleaseInfo *last_release; + uint32_t last_release_offset; + uint32_t oom_running; + uint32_t vgamem_size; + + /* rom pci bar */ + QXLRom shadow_rom; + QXLRom *rom; + QXLModes *modes; + uint32_t rom_size; + MemoryRegion rom_bar; +#if SPICE_SERVER_VERSION >= 0x000c06 /* release 0.12.6 */ + uint16_t max_outputs; +#endif + + /* vram pci bar */ + uint64_t vram_size; + MemoryRegion vram_bar; + uint64_t vram32_size; + MemoryRegion vram32_bar; + + /* io bar */ + MemoryRegion io_bar; + + /* user-friendly properties (in megabytes) */ + uint32_t ram_size_mb; + uint32_t vram_size_mb; + uint32_t vram32_size_mb; + uint32_t vgamem_size_mb; + uint32_t xres; + uint32_t yres; + + /* qxl_render_update state */ + int render_update_cookie_num; + int num_dirty_rects; + QXLRect dirty[QXL_NUM_DIRTY_RECTS]; + QEMUBH *update_area_bh; +}; + +#define TYPE_PCI_QXL "pci-qxl" +OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL) + +#define PANIC_ON(x) if ((x)) { \ + printf("%s: PANIC %s failed\n", __func__, #x); \ + abort(); \ +} + +#define dprint(_qxl, _level, _fmt, ...) \ + do { \ + if (_qxl->debug >= _level) { \ + fprintf(stderr, "qxl-%d: ", _qxl->id); \ + fprintf(stderr, _fmt, ## __VA_ARGS__); \ + } \ + } while (0) + +#define QXL_DEFAULT_REVISION (QXL_REVISION_STABLE_V12 + 1) + +/* qxl.c */ +void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id); +void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) + GCC_FMT_ATTR(2, 3); + +void qxl_spice_update_area(PCIQXLDevice *qxl, uint32_t surface_id, + struct QXLRect *area, struct QXLRect *dirty_rects, + uint32_t num_dirty_rects, + uint32_t clear_dirty_region, + qxl_async_io async, QXLCookie *cookie); +void qxl_spice_loadvm_commands(PCIQXLDevice *qxl, struct QXLCommandExt *ext, + uint32_t count); +void qxl_spice_oom(PCIQXLDevice *qxl); +void qxl_spice_reset_memslots(PCIQXLDevice *qxl); +void qxl_spice_reset_image_cache(PCIQXLDevice *qxl); +void qxl_spice_reset_cursor(PCIQXLDevice *qxl); + +/* qxl-logger.c */ +int qxl_log_cmd_cursor(PCIQXLDevice *qxl, QXLCursorCmd *cmd, int group_id); +int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext); + +/* qxl-render.c */ +void qxl_render_resize(PCIQXLDevice *qxl); +void qxl_render_update(PCIQXLDevice *qxl); +int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext); +void qxl_render_update_area_done(PCIQXLDevice *qxl, QXLCookie *cookie); +void qxl_render_update_area_bh(void *opaque); + +#endif diff --git a/hw/display/ramfb-standalone.c b/hw/display/ramfb-standalone.c new file mode 100644 index 000000000..8c0094397 --- /dev/null +++ b/hw/display/ramfb-standalone.c @@ -0,0 +1,65 @@ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "hw/display/ramfb.h" +#include "ui/console.h" +#include "qom/object.h" + +typedef struct RAMFBStandaloneState RAMFBStandaloneState; +DECLARE_INSTANCE_CHECKER(RAMFBStandaloneState, RAMFB, + TYPE_RAMFB_DEVICE) + +struct RAMFBStandaloneState { + SysBusDevice parent_obj; + QemuConsole *con; + RAMFBState *state; +}; + +static void display_update_wrapper(void *dev) +{ + RAMFBStandaloneState *ramfb = RAMFB(dev); + + if (0 /* native driver active */) { + /* non-standalone device would run native display update here */; + } else { + ramfb_display_update(ramfb->con, ramfb->state); + } +} + +static const GraphicHwOps wrapper_ops = { + .gfx_update = display_update_wrapper, +}; + +static void ramfb_realizefn(DeviceState *dev, Error **errp) +{ + RAMFBStandaloneState *ramfb = RAMFB(dev); + + ramfb->con = graphic_console_init(dev, 0, &wrapper_ops, dev); + ramfb->state = ramfb_setup(errp); +} + +static void ramfb_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->realize = ramfb_realizefn; + dc->desc = "ram framebuffer standalone device"; + dc->user_creatable = true; +} + +static const TypeInfo ramfb_info = { + .name = TYPE_RAMFB_DEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RAMFBStandaloneState), + .class_init = ramfb_class_initfn, +}; + +static void ramfb_register_types(void) +{ + type_register_static(&ramfb_info); +} + +type_init(ramfb_register_types) diff --git a/hw/display/ramfb.c b/hw/display/ramfb.c new file mode 100644 index 000000000..79b9754a5 --- /dev/null +++ b/hw/display/ramfb.c @@ -0,0 +1,135 @@ +/* + * early boot framebuffer in guest ram + * configured using fw_cfg + * + * Copyright Red Hat, Inc. 2017 + * + * Author: + * Gerd Hoffmann + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/loader.h" +#include "hw/display/ramfb.h" +#include "hw/display/bochs-vbe.h" /* for limits */ +#include "ui/console.h" +#include "sysemu/reset.h" + +struct QEMU_PACKED RAMFBCfg { + uint64_t addr; + uint32_t fourcc; + uint32_t flags; + uint32_t width; + uint32_t height; + uint32_t stride; +}; + +struct RAMFBState { + DisplaySurface *ds; + uint32_t width, height; + struct RAMFBCfg cfg; +}; + +static void ramfb_unmap_display_surface(pixman_image_t *image, void *unused) +{ + void *data = pixman_image_get_data(image); + uint32_t size = pixman_image_get_stride(image) * + pixman_image_get_height(image); + cpu_physical_memory_unmap(data, size, 0, 0); +} + +static DisplaySurface *ramfb_create_display_surface(int width, int height, + pixman_format_code_t format, + hwaddr stride, hwaddr addr) +{ + DisplaySurface *surface; + hwaddr size, mapsize, linesize; + void *data; + + if (width < 16 || width > VBE_DISPI_MAX_XRES || + height < 16 || height > VBE_DISPI_MAX_YRES || + format == 0 /* unknown format */) + return NULL; + + linesize = width * PIXMAN_FORMAT_BPP(format) / 8; + if (stride == 0) { + stride = linesize; + } + + mapsize = size = stride * (height - 1) + linesize; + data = cpu_physical_memory_map(addr, &mapsize, false); + if (size != mapsize) { + cpu_physical_memory_unmap(data, mapsize, 0, 0); + return NULL; + } + + surface = qemu_create_displaysurface_from(width, height, + format, stride, data); + pixman_image_set_destroy_function(surface->image, + ramfb_unmap_display_surface, NULL); + + return surface; +} + +static void ramfb_fw_cfg_write(void *dev, off_t offset, size_t len) +{ + RAMFBState *s = dev; + DisplaySurface *surface; + uint32_t fourcc, format, width, height; + hwaddr stride, addr; + + width = be32_to_cpu(s->cfg.width); + height = be32_to_cpu(s->cfg.height); + stride = be32_to_cpu(s->cfg.stride); + fourcc = be32_to_cpu(s->cfg.fourcc); + addr = be64_to_cpu(s->cfg.addr); + format = qemu_drm_format_to_pixman(fourcc); + + surface = ramfb_create_display_surface(width, height, + format, stride, addr); + if (!surface) { + return; + } + + s->width = width; + s->height = height; + s->ds = surface; +} + +void ramfb_display_update(QemuConsole *con, RAMFBState *s) +{ + if (!s->width || !s->height) { + return; + } + + if (s->ds) { + dpy_gfx_replace_surface(con, s->ds); + s->ds = NULL; + } + + /* simple full screen update */ + dpy_gfx_update_full(con); +} + +RAMFBState *ramfb_setup(Error **errp) +{ + FWCfgState *fw_cfg = fw_cfg_find(); + RAMFBState *s; + + if (!fw_cfg || !fw_cfg->dma_enabled) { + error_setg(errp, "ramfb device requires fw_cfg with DMA"); + return NULL; + } + + s = g_new0(RAMFBState, 1); + + rom_add_vga("vgabios-ramfb.bin"); + fw_cfg_add_file_callback(fw_cfg, "etc/ramfb", + NULL, ramfb_fw_cfg_write, s, + &s->cfg, sizeof(s->cfg), false); + return s; +} diff --git a/hw/display/sii9022.c b/hw/display/sii9022.c new file mode 100644 index 000000000..b591a5878 --- /dev/null +++ b/hw/display/sii9022.c @@ -0,0 +1,193 @@ +/* + * Silicon Image SiI9022 + * + * This is a pretty hollow emulation: all we do is acknowledge that we + * exist (chip ID) and confirm that we get switched over into DDC mode + * so the emulated host can proceed to read out EDID data. All subsequent + * set-up of connectors etc will be acknowledged and ignored. + * + * Copyright (C) 2018 Linus Walleij + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "hw/i2c/i2c.h" +#include "migration/vmstate.h" +#include "hw/display/i2c-ddc.h" +#include "trace.h" +#include "qom/object.h" + +#define SII9022_SYS_CTRL_DATA 0x1a +#define SII9022_SYS_CTRL_PWR_DWN 0x10 +#define SII9022_SYS_CTRL_AV_MUTE 0x08 +#define SII9022_SYS_CTRL_DDC_BUS_REQ 0x04 +#define SII9022_SYS_CTRL_DDC_BUS_GRTD 0x02 +#define SII9022_SYS_CTRL_OUTPUT_MODE 0x01 +#define SII9022_SYS_CTRL_OUTPUT_HDMI 1 +#define SII9022_SYS_CTRL_OUTPUT_DVI 0 +#define SII9022_REG_CHIPID 0x1b +#define SII9022_INT_ENABLE 0x3c +#define SII9022_INT_STATUS 0x3d +#define SII9022_INT_STATUS_HOTPLUG 0x01; +#define SII9022_INT_STATUS_PLUGGED 0x04; + +#define TYPE_SII9022 "sii9022" +OBJECT_DECLARE_SIMPLE_TYPE(sii9022_state, SII9022) + +struct sii9022_state { + I2CSlave parent_obj; + uint8_t ptr; + bool addr_byte; + bool ddc_req; + bool ddc_skip_finish; + bool ddc; +}; + +static const VMStateDescription vmstate_sii9022 = { + .name = "sii9022", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_I2C_SLAVE(parent_obj, sii9022_state), + VMSTATE_UINT8(ptr, sii9022_state), + VMSTATE_BOOL(addr_byte, sii9022_state), + VMSTATE_BOOL(ddc_req, sii9022_state), + VMSTATE_BOOL(ddc_skip_finish, sii9022_state), + VMSTATE_BOOL(ddc, sii9022_state), + VMSTATE_END_OF_LIST() + } +}; + +static int sii9022_event(I2CSlave *i2c, enum i2c_event event) +{ + sii9022_state *s = SII9022(i2c); + + switch (event) { + case I2C_START_SEND: + s->addr_byte = true; + break; + case I2C_START_RECV: + break; + case I2C_FINISH: + break; + case I2C_NACK: + break; + } + + return 0; +} + +static uint8_t sii9022_rx(I2CSlave *i2c) +{ + sii9022_state *s = SII9022(i2c); + uint8_t res = 0x00; + + switch (s->ptr) { + case SII9022_SYS_CTRL_DATA: + if (s->ddc_req) { + /* Acknowledge DDC bus request */ + res = SII9022_SYS_CTRL_DDC_BUS_GRTD | SII9022_SYS_CTRL_DDC_BUS_REQ; + } + break; + case SII9022_REG_CHIPID: + res = 0xb0; + break; + case SII9022_INT_STATUS: + /* Something is cold-plugged in, no interrupts */ + res = SII9022_INT_STATUS_PLUGGED; + break; + default: + break; + } + + trace_sii9022_read_reg(s->ptr, res); + s->ptr++; + + return res; +} + +static int sii9022_tx(I2CSlave *i2c, uint8_t data) +{ + sii9022_state *s = SII9022(i2c); + + if (s->addr_byte) { + s->ptr = data; + s->addr_byte = false; + return 0; + } + + switch (s->ptr) { + case SII9022_SYS_CTRL_DATA: + if (data & SII9022_SYS_CTRL_DDC_BUS_REQ) { + s->ddc_req = true; + if (data & SII9022_SYS_CTRL_DDC_BUS_GRTD) { + s->ddc = true; + /* Skip this finish since we just switched to DDC */ + s->ddc_skip_finish = true; + trace_sii9022_switch_mode("DDC"); + } + } else { + s->ddc_req = false; + s->ddc = false; + trace_sii9022_switch_mode("normal"); + } + break; + default: + break; + } + + trace_sii9022_write_reg(s->ptr, data); + s->ptr++; + + return 0; +} + +static void sii9022_reset(DeviceState *dev) +{ + sii9022_state *s = SII9022(dev); + + s->ptr = 0; + s->addr_byte = false; + s->ddc_req = false; + s->ddc_skip_finish = false; + s->ddc = false; +} + +static void sii9022_realize(DeviceState *dev, Error **errp) +{ + I2CBus *bus; + + bus = I2C_BUS(qdev_get_parent_bus(dev)); + i2c_slave_create_simple(bus, TYPE_I2CDDC, 0x50); +} + +static void sii9022_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + k->event = sii9022_event; + k->recv = sii9022_rx; + k->send = sii9022_tx; + dc->reset = sii9022_reset; + dc->realize = sii9022_realize; + dc->vmsd = &vmstate_sii9022; +} + +static const TypeInfo sii9022_info = { + .name = TYPE_SII9022, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(sii9022_state), + .class_init = sii9022_class_init, +}; + +static void sii9022_register_types(void) +{ + type_register_static(&sii9022_info); +} + +type_init(sii9022_register_types) diff --git a/hw/display/sm501.c b/hw/display/sm501.c new file mode 100644 index 000000000..663c37e7f --- /dev/null +++ b/hw/display/sm501.c @@ -0,0 +1,2124 @@ +/* + * QEMU SM501 Device + * + * Copyright (c) 2008 Shin-ichiro KAWASAKI + * Copyright (c) 2016-2020 BALATON Zoltan + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/char/serial.h" +#include "ui/console.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/i2c/i2c.h" +#include "hw/display/i2c-ddc.h" +#include "qemu/range.h" +#include "ui/pixel_ops.h" +#include "qemu/bswap.h" +#include "trace.h" +#include "qom/object.h" + +#define MMIO_BASE_OFFSET 0x3e00000 +#define MMIO_SIZE 0x200000 +#define DC_PALETTE_ENTRIES (0x400 * 3) + +/* SM501 register definitions taken from "linux/include/linux/sm501-regs.h" */ + +/* System Configuration area */ +/* System config base */ +#define SM501_SYS_CONFIG (0x000000) + +/* config 1 */ +#define SM501_SYSTEM_CONTROL (0x000000) + +#define SM501_SYSCTRL_PANEL_TRISTATE (1 << 0) +#define SM501_SYSCTRL_MEM_TRISTATE (1 << 1) +#define SM501_SYSCTRL_CRT_TRISTATE (1 << 2) + +#define SM501_SYSCTRL_PCI_SLAVE_BURST_MASK (3 << 4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_1 (0 << 4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_2 (1 << 4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_4 (2 << 4) +#define SM501_SYSCTRL_PCI_SLAVE_BURST_8 (3 << 4) + +#define SM501_SYSCTRL_PCI_CLOCK_RUN_EN (1 << 6) +#define SM501_SYSCTRL_PCI_RETRY_DISABLE (1 << 7) +#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1 << 11) +#define SM501_SYSCTRL_PCI_BURST_READ_EN (1 << 15) + +/* miscellaneous control */ + +#define SM501_MISC_CONTROL (0x000004) + +#define SM501_MISC_BUS_SH (0x0) +#define SM501_MISC_BUS_PCI (0x1) +#define SM501_MISC_BUS_XSCALE (0x2) +#define SM501_MISC_BUS_NEC (0x6) +#define SM501_MISC_BUS_MASK (0x7) + +#define SM501_MISC_VR_62MB (1 << 3) +#define SM501_MISC_CDR_RESET (1 << 7) +#define SM501_MISC_USB_LB (1 << 8) +#define SM501_MISC_USB_SLAVE (1 << 9) +#define SM501_MISC_BL_1 (1 << 10) +#define SM501_MISC_MC (1 << 11) +#define SM501_MISC_DAC_POWER (1 << 12) +#define SM501_MISC_IRQ_INVERT (1 << 16) +#define SM501_MISC_SH (1 << 17) + +#define SM501_MISC_HOLD_EMPTY (0 << 18) +#define SM501_MISC_HOLD_8 (1 << 18) +#define SM501_MISC_HOLD_16 (2 << 18) +#define SM501_MISC_HOLD_24 (3 << 18) +#define SM501_MISC_HOLD_32 (4 << 18) +#define SM501_MISC_HOLD_MASK (7 << 18) + +#define SM501_MISC_FREQ_12 (1 << 24) +#define SM501_MISC_PNL_24BIT (1 << 25) +#define SM501_MISC_8051_LE (1 << 26) + + + +#define SM501_GPIO31_0_CONTROL (0x000008) +#define SM501_GPIO63_32_CONTROL (0x00000C) +#define SM501_DRAM_CONTROL (0x000010) + +/* command list */ +#define SM501_ARBTRTN_CONTROL (0x000014) + +/* command list */ +#define SM501_COMMAND_LIST_STATUS (0x000024) + +/* interrupt debug */ +#define SM501_RAW_IRQ_STATUS (0x000028) +#define SM501_RAW_IRQ_CLEAR (0x000028) +#define SM501_IRQ_STATUS (0x00002C) +#define SM501_IRQ_MASK (0x000030) +#define SM501_DEBUG_CONTROL (0x000034) + +/* power management */ +#define SM501_POWERMODE_P2X_SRC (1 << 29) +#define SM501_POWERMODE_V2X_SRC (1 << 20) +#define SM501_POWERMODE_M_SRC (1 << 12) +#define SM501_POWERMODE_M1_SRC (1 << 4) + +#define SM501_CURRENT_GATE (0x000038) +#define SM501_CURRENT_CLOCK (0x00003C) +#define SM501_POWER_MODE_0_GATE (0x000040) +#define SM501_POWER_MODE_0_CLOCK (0x000044) +#define SM501_POWER_MODE_1_GATE (0x000048) +#define SM501_POWER_MODE_1_CLOCK (0x00004C) +#define SM501_SLEEP_MODE_GATE (0x000050) +#define SM501_POWER_MODE_CONTROL (0x000054) + +/* power gates for units within the 501 */ +#define SM501_GATE_HOST (0) +#define SM501_GATE_MEMORY (1) +#define SM501_GATE_DISPLAY (2) +#define SM501_GATE_2D_ENGINE (3) +#define SM501_GATE_CSC (4) +#define SM501_GATE_ZVPORT (5) +#define SM501_GATE_GPIO (6) +#define SM501_GATE_UART0 (7) +#define SM501_GATE_UART1 (8) +#define SM501_GATE_SSP (10) +#define SM501_GATE_USB_HOST (11) +#define SM501_GATE_USB_GADGET (12) +#define SM501_GATE_UCONTROLLER (17) +#define SM501_GATE_AC97 (18) + +/* panel clock */ +#define SM501_CLOCK_P2XCLK (24) +/* crt clock */ +#define SM501_CLOCK_V2XCLK (16) +/* main clock */ +#define SM501_CLOCK_MCLK (8) +/* SDRAM controller clock */ +#define SM501_CLOCK_M1XCLK (0) + +/* config 2 */ +#define SM501_PCI_MASTER_BASE (0x000058) +#define SM501_ENDIAN_CONTROL (0x00005C) +#define SM501_DEVICEID (0x000060) +/* 0x050100A0 */ + +#define SM501_DEVICEID_SM501 (0x05010000) +#define SM501_DEVICEID_IDMASK (0xffff0000) +#define SM501_DEVICEID_REVMASK (0x000000ff) + +#define SM501_PLLCLOCK_COUNT (0x000064) +#define SM501_MISC_TIMING (0x000068) +#define SM501_CURRENT_SDRAM_CLOCK (0x00006C) + +#define SM501_PROGRAMMABLE_PLL_CONTROL (0x000074) + +/* GPIO base */ +#define SM501_GPIO (0x010000) +#define SM501_GPIO_DATA_LOW (0x00) +#define SM501_GPIO_DATA_HIGH (0x04) +#define SM501_GPIO_DDR_LOW (0x08) +#define SM501_GPIO_DDR_HIGH (0x0C) +#define SM501_GPIO_IRQ_SETUP (0x10) +#define SM501_GPIO_IRQ_STATUS (0x14) +#define SM501_GPIO_IRQ_RESET (0x14) + +/* I2C controller base */ +#define SM501_I2C (0x010040) +#define SM501_I2C_BYTE_COUNT (0x00) +#define SM501_I2C_CONTROL (0x01) +#define SM501_I2C_STATUS (0x02) +#define SM501_I2C_RESET (0x02) +#define SM501_I2C_SLAVE_ADDRESS (0x03) +#define SM501_I2C_DATA (0x04) + +#define SM501_I2C_CONTROL_START (1 << 2) +#define SM501_I2C_CONTROL_ENABLE (1 << 0) + +#define SM501_I2C_STATUS_COMPLETE (1 << 3) +#define SM501_I2C_STATUS_ERROR (1 << 2) + +#define SM501_I2C_RESET_ERROR (1 << 2) + +/* SSP base */ +#define SM501_SSP (0x020000) + +/* Uart 0 base */ +#define SM501_UART0 (0x030000) + +/* Uart 1 base */ +#define SM501_UART1 (0x030020) + +/* USB host port base */ +#define SM501_USB_HOST (0x040000) + +/* USB slave/gadget base */ +#define SM501_USB_GADGET (0x060000) + +/* USB slave/gadget data port base */ +#define SM501_USB_GADGET_DATA (0x070000) + +/* Display controller/video engine base */ +#define SM501_DC (0x080000) + +/* common defines for the SM501 address registers */ +#define SM501_ADDR_FLIP (1 << 31) +#define SM501_ADDR_EXT (1 << 27) +#define SM501_ADDR_CS1 (1 << 26) +#define SM501_ADDR_MASK (0x3f << 26) + +#define SM501_FIFO_MASK (0x3 << 16) +#define SM501_FIFO_1 (0x0 << 16) +#define SM501_FIFO_3 (0x1 << 16) +#define SM501_FIFO_7 (0x2 << 16) +#define SM501_FIFO_11 (0x3 << 16) + +/* common registers for panel and the crt */ +#define SM501_OFF_DC_H_TOT (0x000) +#define SM501_OFF_DC_V_TOT (0x008) +#define SM501_OFF_DC_H_SYNC (0x004) +#define SM501_OFF_DC_V_SYNC (0x00C) + +#define SM501_DC_PANEL_CONTROL (0x000) + +#define SM501_DC_PANEL_CONTROL_FPEN (1 << 27) +#define SM501_DC_PANEL_CONTROL_BIAS (1 << 26) +#define SM501_DC_PANEL_CONTROL_DATA (1 << 25) +#define SM501_DC_PANEL_CONTROL_VDD (1 << 24) +#define SM501_DC_PANEL_CONTROL_DP (1 << 23) + +#define SM501_DC_PANEL_CONTROL_TFT_888 (0 << 21) +#define SM501_DC_PANEL_CONTROL_TFT_333 (1 << 21) +#define SM501_DC_PANEL_CONTROL_TFT_444 (2 << 21) + +#define SM501_DC_PANEL_CONTROL_DE (1 << 20) + +#define SM501_DC_PANEL_CONTROL_LCD_TFT (0 << 18) +#define SM501_DC_PANEL_CONTROL_LCD_STN8 (1 << 18) +#define SM501_DC_PANEL_CONTROL_LCD_STN12 (2 << 18) + +#define SM501_DC_PANEL_CONTROL_CP (1 << 14) +#define SM501_DC_PANEL_CONTROL_VSP (1 << 13) +#define SM501_DC_PANEL_CONTROL_HSP (1 << 12) +#define SM501_DC_PANEL_CONTROL_CK (1 << 9) +#define SM501_DC_PANEL_CONTROL_TE (1 << 8) +#define SM501_DC_PANEL_CONTROL_VPD (1 << 7) +#define SM501_DC_PANEL_CONTROL_VP (1 << 6) +#define SM501_DC_PANEL_CONTROL_HPD (1 << 5) +#define SM501_DC_PANEL_CONTROL_HP (1 << 4) +#define SM501_DC_PANEL_CONTROL_GAMMA (1 << 3) +#define SM501_DC_PANEL_CONTROL_EN (1 << 2) + +#define SM501_DC_PANEL_CONTROL_8BPP (0 << 0) +#define SM501_DC_PANEL_CONTROL_16BPP (1 << 0) +#define SM501_DC_PANEL_CONTROL_32BPP (2 << 0) + + +#define SM501_DC_PANEL_PANNING_CONTROL (0x004) +#define SM501_DC_PANEL_COLOR_KEY (0x008) +#define SM501_DC_PANEL_FB_ADDR (0x00C) +#define SM501_DC_PANEL_FB_OFFSET (0x010) +#define SM501_DC_PANEL_FB_WIDTH (0x014) +#define SM501_DC_PANEL_FB_HEIGHT (0x018) +#define SM501_DC_PANEL_TL_LOC (0x01C) +#define SM501_DC_PANEL_BR_LOC (0x020) +#define SM501_DC_PANEL_H_TOT (0x024) +#define SM501_DC_PANEL_H_SYNC (0x028) +#define SM501_DC_PANEL_V_TOT (0x02C) +#define SM501_DC_PANEL_V_SYNC (0x030) +#define SM501_DC_PANEL_CUR_LINE (0x034) + +#define SM501_DC_VIDEO_CONTROL (0x040) +#define SM501_DC_VIDEO_FB0_ADDR (0x044) +#define SM501_DC_VIDEO_FB_WIDTH (0x048) +#define SM501_DC_VIDEO_FB0_LAST_ADDR (0x04C) +#define SM501_DC_VIDEO_TL_LOC (0x050) +#define SM501_DC_VIDEO_BR_LOC (0x054) +#define SM501_DC_VIDEO_SCALE (0x058) +#define SM501_DC_VIDEO_INIT_SCALE (0x05C) +#define SM501_DC_VIDEO_YUV_CONSTANTS (0x060) +#define SM501_DC_VIDEO_FB1_ADDR (0x064) +#define SM501_DC_VIDEO_FB1_LAST_ADDR (0x068) + +#define SM501_DC_VIDEO_ALPHA_CONTROL (0x080) +#define SM501_DC_VIDEO_ALPHA_FB_ADDR (0x084) +#define SM501_DC_VIDEO_ALPHA_FB_OFFSET (0x088) +#define SM501_DC_VIDEO_ALPHA_FB_LAST_ADDR (0x08C) +#define SM501_DC_VIDEO_ALPHA_TL_LOC (0x090) +#define SM501_DC_VIDEO_ALPHA_BR_LOC (0x094) +#define SM501_DC_VIDEO_ALPHA_SCALE (0x098) +#define SM501_DC_VIDEO_ALPHA_INIT_SCALE (0x09C) +#define SM501_DC_VIDEO_ALPHA_CHROMA_KEY (0x0A0) +#define SM501_DC_VIDEO_ALPHA_COLOR_LOOKUP (0x0A4) + +#define SM501_DC_PANEL_HWC_BASE (0x0F0) +#define SM501_DC_PANEL_HWC_ADDR (0x0F0) +#define SM501_DC_PANEL_HWC_LOC (0x0F4) +#define SM501_DC_PANEL_HWC_COLOR_1_2 (0x0F8) +#define SM501_DC_PANEL_HWC_COLOR_3 (0x0FC) + +#define SM501_HWC_EN (1 << 31) + +#define SM501_OFF_HWC_ADDR (0x00) +#define SM501_OFF_HWC_LOC (0x04) +#define SM501_OFF_HWC_COLOR_1_2 (0x08) +#define SM501_OFF_HWC_COLOR_3 (0x0C) + +#define SM501_DC_ALPHA_CONTROL (0x100) +#define SM501_DC_ALPHA_FB_ADDR (0x104) +#define SM501_DC_ALPHA_FB_OFFSET (0x108) +#define SM501_DC_ALPHA_TL_LOC (0x10C) +#define SM501_DC_ALPHA_BR_LOC (0x110) +#define SM501_DC_ALPHA_CHROMA_KEY (0x114) +#define SM501_DC_ALPHA_COLOR_LOOKUP (0x118) + +#define SM501_DC_CRT_CONTROL (0x200) + +#define SM501_DC_CRT_CONTROL_TVP (1 << 15) +#define SM501_DC_CRT_CONTROL_CP (1 << 14) +#define SM501_DC_CRT_CONTROL_VSP (1 << 13) +#define SM501_DC_CRT_CONTROL_HSP (1 << 12) +#define SM501_DC_CRT_CONTROL_VS (1 << 11) +#define SM501_DC_CRT_CONTROL_BLANK (1 << 10) +#define SM501_DC_CRT_CONTROL_SEL (1 << 9) +#define SM501_DC_CRT_CONTROL_TE (1 << 8) +#define SM501_DC_CRT_CONTROL_PIXEL_MASK (0xF << 4) +#define SM501_DC_CRT_CONTROL_GAMMA (1 << 3) +#define SM501_DC_CRT_CONTROL_ENABLE (1 << 2) + +#define SM501_DC_CRT_CONTROL_8BPP (0 << 0) +#define SM501_DC_CRT_CONTROL_16BPP (1 << 0) +#define SM501_DC_CRT_CONTROL_32BPP (2 << 0) + +#define SM501_DC_CRT_FB_ADDR (0x204) +#define SM501_DC_CRT_FB_OFFSET (0x208) +#define SM501_DC_CRT_H_TOT (0x20C) +#define SM501_DC_CRT_H_SYNC (0x210) +#define SM501_DC_CRT_V_TOT (0x214) +#define SM501_DC_CRT_V_SYNC (0x218) +#define SM501_DC_CRT_SIGNATURE_ANALYZER (0x21C) +#define SM501_DC_CRT_CUR_LINE (0x220) +#define SM501_DC_CRT_MONITOR_DETECT (0x224) + +#define SM501_DC_CRT_HWC_BASE (0x230) +#define SM501_DC_CRT_HWC_ADDR (0x230) +#define SM501_DC_CRT_HWC_LOC (0x234) +#define SM501_DC_CRT_HWC_COLOR_1_2 (0x238) +#define SM501_DC_CRT_HWC_COLOR_3 (0x23C) + +#define SM501_DC_PANEL_PALETTE (0x400) + +#define SM501_DC_VIDEO_PALETTE (0x800) + +#define SM501_DC_CRT_PALETTE (0xC00) + +/* Zoom Video port base */ +#define SM501_ZVPORT (0x090000) + +/* AC97/I2S base */ +#define SM501_AC97 (0x0A0000) + +/* 8051 micro controller base */ +#define SM501_UCONTROLLER (0x0B0000) + +/* 8051 micro controller SRAM base */ +#define SM501_UCONTROLLER_SRAM (0x0C0000) + +/* DMA base */ +#define SM501_DMA (0x0D0000) + +/* 2d engine base */ +#define SM501_2D_ENGINE (0x100000) +#define SM501_2D_SOURCE (0x00) +#define SM501_2D_DESTINATION (0x04) +#define SM501_2D_DIMENSION (0x08) +#define SM501_2D_CONTROL (0x0C) +#define SM501_2D_PITCH (0x10) +#define SM501_2D_FOREGROUND (0x14) +#define SM501_2D_BACKGROUND (0x18) +#define SM501_2D_STRETCH (0x1C) +#define SM501_2D_COLOR_COMPARE (0x20) +#define SM501_2D_COLOR_COMPARE_MASK (0x24) +#define SM501_2D_MASK (0x28) +#define SM501_2D_CLIP_TL (0x2C) +#define SM501_2D_CLIP_BR (0x30) +#define SM501_2D_MONO_PATTERN_LOW (0x34) +#define SM501_2D_MONO_PATTERN_HIGH (0x38) +#define SM501_2D_WINDOW_WIDTH (0x3C) +#define SM501_2D_SOURCE_BASE (0x40) +#define SM501_2D_DESTINATION_BASE (0x44) +#define SM501_2D_ALPHA (0x48) +#define SM501_2D_WRAP (0x4C) +#define SM501_2D_STATUS (0x50) + +#define SM501_CSC_Y_SOURCE_BASE (0xC8) +#define SM501_CSC_CONSTANTS (0xCC) +#define SM501_CSC_Y_SOURCE_X (0xD0) +#define SM501_CSC_Y_SOURCE_Y (0xD4) +#define SM501_CSC_U_SOURCE_BASE (0xD8) +#define SM501_CSC_V_SOURCE_BASE (0xDC) +#define SM501_CSC_SOURCE_DIMENSION (0xE0) +#define SM501_CSC_SOURCE_PITCH (0xE4) +#define SM501_CSC_DESTINATION (0xE8) +#define SM501_CSC_DESTINATION_DIMENSION (0xEC) +#define SM501_CSC_DESTINATION_PITCH (0xF0) +#define SM501_CSC_SCALE_FACTOR (0xF4) +#define SM501_CSC_DESTINATION_BASE (0xF8) +#define SM501_CSC_CONTROL (0xFC) + +/* 2d engine data port base */ +#define SM501_2D_ENGINE_DATA (0x110000) + +/* end of register definitions */ + +#define SM501_HWC_WIDTH (64) +#define SM501_HWC_HEIGHT (64) + +/* SM501 local memory size taken from "linux/drivers/mfd/sm501.c" */ +static const uint32_t sm501_mem_local_size[] = { + [0] = 4 * MiB, + [1] = 8 * MiB, + [2] = 16 * MiB, + [3] = 32 * MiB, + [4] = 64 * MiB, + [5] = 2 * MiB, +}; +#define get_local_mem_size(s) sm501_mem_local_size[(s)->local_mem_size_index] + +typedef struct SM501State { + /* graphic console status */ + QemuConsole *con; + + /* status & internal resources */ + uint32_t local_mem_size_index; + uint8_t *local_mem; + MemoryRegion local_mem_region; + MemoryRegion mmio_region; + MemoryRegion system_config_region; + MemoryRegion i2c_region; + MemoryRegion disp_ctrl_region; + MemoryRegion twoD_engine_region; + uint32_t last_width; + uint32_t last_height; + bool do_full_update; /* perform a full update next time */ + I2CBus *i2c_bus; + + /* mmio registers */ + uint32_t system_control; + uint32_t misc_control; + uint32_t gpio_31_0_control; + uint32_t gpio_63_32_control; + uint32_t dram_control; + uint32_t arbitration_control; + uint32_t irq_mask; + uint32_t misc_timing; + uint32_t power_mode_control; + + uint8_t i2c_byte_count; + uint8_t i2c_status; + uint8_t i2c_addr; + uint8_t i2c_data[16]; + + uint32_t uart0_ier; + uint32_t uart0_lcr; + uint32_t uart0_mcr; + uint32_t uart0_scr; + + uint8_t dc_palette[DC_PALETTE_ENTRIES]; + + uint32_t dc_panel_control; + uint32_t dc_panel_panning_control; + uint32_t dc_panel_fb_addr; + uint32_t dc_panel_fb_offset; + uint32_t dc_panel_fb_width; + uint32_t dc_panel_fb_height; + uint32_t dc_panel_tl_location; + uint32_t dc_panel_br_location; + uint32_t dc_panel_h_total; + uint32_t dc_panel_h_sync; + uint32_t dc_panel_v_total; + uint32_t dc_panel_v_sync; + + uint32_t dc_panel_hwc_addr; + uint32_t dc_panel_hwc_location; + uint32_t dc_panel_hwc_color_1_2; + uint32_t dc_panel_hwc_color_3; + + uint32_t dc_video_control; + + uint32_t dc_crt_control; + uint32_t dc_crt_fb_addr; + uint32_t dc_crt_fb_offset; + uint32_t dc_crt_h_total; + uint32_t dc_crt_h_sync; + uint32_t dc_crt_v_total; + uint32_t dc_crt_v_sync; + + uint32_t dc_crt_hwc_addr; + uint32_t dc_crt_hwc_location; + uint32_t dc_crt_hwc_color_1_2; + uint32_t dc_crt_hwc_color_3; + + uint32_t twoD_source; + uint32_t twoD_destination; + uint32_t twoD_dimension; + uint32_t twoD_control; + uint32_t twoD_pitch; + uint32_t twoD_foreground; + uint32_t twoD_background; + uint32_t twoD_stretch; + uint32_t twoD_color_compare; + uint32_t twoD_color_compare_mask; + uint32_t twoD_mask; + uint32_t twoD_clip_tl; + uint32_t twoD_clip_br; + uint32_t twoD_mono_pattern_low; + uint32_t twoD_mono_pattern_high; + uint32_t twoD_window_width; + uint32_t twoD_source_base; + uint32_t twoD_destination_base; + uint32_t twoD_alpha; + uint32_t twoD_wrap; +} SM501State; + +static uint32_t get_local_mem_size_index(uint32_t size) +{ + uint32_t norm_size = 0; + int i, index = 0; + + for (i = 0; i < ARRAY_SIZE(sm501_mem_local_size); i++) { + uint32_t new_size = sm501_mem_local_size[i]; + if (new_size >= size) { + if (norm_size == 0 || norm_size > new_size) { + norm_size = new_size; + index = i; + } + } + } + + return index; +} + +static ram_addr_t get_fb_addr(SM501State *s, int crt) +{ + return (crt ? s->dc_crt_fb_addr : s->dc_panel_fb_addr) & 0x3FFFFF0; +} + +static inline int get_width(SM501State *s, int crt) +{ + int width = crt ? s->dc_crt_h_total : s->dc_panel_h_total; + return (width & 0x00000FFF) + 1; +} + +static inline int get_height(SM501State *s, int crt) +{ + int height = crt ? s->dc_crt_v_total : s->dc_panel_v_total; + return (height & 0x00000FFF) + 1; +} + +static inline int get_bpp(SM501State *s, int crt) +{ + int bpp = crt ? s->dc_crt_control : s->dc_panel_control; + return 1 << (bpp & 3); +} + +/** + * Check the availability of hardware cursor. + * @param crt 0 for PANEL, 1 for CRT. + */ +static inline int is_hwc_enabled(SM501State *state, int crt) +{ + uint32_t addr = crt ? state->dc_crt_hwc_addr : state->dc_panel_hwc_addr; + return addr & SM501_HWC_EN; +} + +/** + * Get the address which holds cursor pattern data. + * @param crt 0 for PANEL, 1 for CRT. + */ +static inline uint8_t *get_hwc_address(SM501State *state, int crt) +{ + uint32_t addr = crt ? state->dc_crt_hwc_addr : state->dc_panel_hwc_addr; + return state->local_mem + (addr & 0x03FFFFF0); +} + +/** + * Get the cursor position in y coordinate. + * @param crt 0 for PANEL, 1 for CRT. + */ +static inline uint32_t get_hwc_y(SM501State *state, int crt) +{ + uint32_t location = crt ? state->dc_crt_hwc_location + : state->dc_panel_hwc_location; + return (location & 0x07FF0000) >> 16; +} + +/** + * Get the cursor position in x coordinate. + * @param crt 0 for PANEL, 1 for CRT. + */ +static inline uint32_t get_hwc_x(SM501State *state, int crt) +{ + uint32_t location = crt ? state->dc_crt_hwc_location + : state->dc_panel_hwc_location; + return location & 0x000007FF; +} + +/** + * Get the hardware cursor palette. + * @param crt 0 for PANEL, 1 for CRT. + * @param palette pointer to a [3 * 3] array to store color values in + */ +static inline void get_hwc_palette(SM501State *state, int crt, uint8_t *palette) +{ + int i; + uint32_t color_reg; + uint16_t rgb565; + + for (i = 0; i < 3; i++) { + if (i + 1 == 3) { + color_reg = crt ? state->dc_crt_hwc_color_3 + : state->dc_panel_hwc_color_3; + } else { + color_reg = crt ? state->dc_crt_hwc_color_1_2 + : state->dc_panel_hwc_color_1_2; + } + + if (i + 1 == 2) { + rgb565 = (color_reg >> 16) & 0xFFFF; + } else { + rgb565 = color_reg & 0xFFFF; + } + palette[i * 3 + 0] = ((rgb565 >> 11) * 527 + 23) >> 6; /* r */ + palette[i * 3 + 1] = (((rgb565 >> 5) & 0x3f) * 259 + 33) >> 6; /* g */ + palette[i * 3 + 2] = ((rgb565 & 0x1f) * 527 + 23) >> 6; /* b */ + } +} + +static inline void hwc_invalidate(SM501State *s, int crt) +{ + int w = get_width(s, crt); + int h = get_height(s, crt); + int bpp = get_bpp(s, crt); + int start = get_hwc_y(s, crt); + int end = MIN(h, start + SM501_HWC_HEIGHT) + 1; + + start *= w * bpp; + end *= w * bpp; + + memory_region_set_dirty(&s->local_mem_region, + get_fb_addr(s, crt) + start, end - start); +} + +static void sm501_2d_operation(SM501State *s) +{ + int cmd = (s->twoD_control >> 16) & 0x1F; + int rtl = s->twoD_control & BIT(27); + int format = (s->twoD_stretch >> 20) & 3; + int bypp = 1 << format; /* bytes per pixel */ + int rop_mode = (s->twoD_control >> 15) & 1; /* 1 for rop2, else rop3 */ + /* 1 if rop2 source is the pattern, otherwise the source is the bitmap */ + int rop2_source_is_pattern = (s->twoD_control >> 14) & 1; + int rop = s->twoD_control & 0xFF; + unsigned int dst_x = (s->twoD_destination >> 16) & 0x01FFF; + unsigned int dst_y = s->twoD_destination & 0xFFFF; + unsigned int width = (s->twoD_dimension >> 16) & 0x1FFF; + unsigned int height = s->twoD_dimension & 0xFFFF; + uint32_t dst_base = s->twoD_destination_base & 0x03FFFFFF; + unsigned int dst_pitch = (s->twoD_pitch >> 16) & 0x1FFF; + int crt = (s->dc_crt_control & SM501_DC_CRT_CONTROL_SEL) ? 1 : 0; + int fb_len = get_width(s, crt) * get_height(s, crt) * get_bpp(s, crt); + bool overlap = false; + + if ((s->twoD_stretch >> 16) & 0xF) { + qemu_log_mask(LOG_UNIMP, "sm501: only XY addressing is supported.\n"); + return; + } + + if (s->twoD_source_base & BIT(27) || s->twoD_destination_base & BIT(27)) { + qemu_log_mask(LOG_UNIMP, "sm501: only local memory is supported.\n"); + return; + } + + if (!dst_pitch) { + qemu_log_mask(LOG_GUEST_ERROR, "sm501: Zero dest pitch.\n"); + return; + } + + if (!width || !height) { + qemu_log_mask(LOG_GUEST_ERROR, "sm501: Zero size 2D op.\n"); + return; + } + + if (rtl) { + dst_x -= width - 1; + dst_y -= height - 1; + } + + if (dst_base >= get_local_mem_size(s) || + dst_base + (dst_x + width + (dst_y + height) * dst_pitch) * bypp >= + get_local_mem_size(s)) { + qemu_log_mask(LOG_GUEST_ERROR, "sm501: 2D op dest is outside vram.\n"); + return; + } + + switch (cmd) { + case 0: /* BitBlt */ + { + static uint32_t tmp_buf[16384]; + unsigned int src_x = (s->twoD_source >> 16) & 0x01FFF; + unsigned int src_y = s->twoD_source & 0xFFFF; + uint32_t src_base = s->twoD_source_base & 0x03FFFFFF; + unsigned int src_pitch = s->twoD_pitch & 0x1FFF; + + if (!src_pitch) { + qemu_log_mask(LOG_GUEST_ERROR, "sm501: Zero src pitch.\n"); + return; + } + + if (rtl) { + src_x -= width - 1; + src_y -= height - 1; + } + + if (src_base >= get_local_mem_size(s) || + src_base + (src_x + width + (src_y + height) * src_pitch) * bypp >= + get_local_mem_size(s)) { + qemu_log_mask(LOG_GUEST_ERROR, + "sm501: 2D op src is outside vram.\n"); + return; + } + + if ((rop_mode && rop == 0x5) || (!rop_mode && rop == 0x55)) { + /* Invert dest, is there a way to do this with pixman? */ + unsigned int x, y, i; + uint8_t *d = s->local_mem + dst_base; + + for (y = 0; y < height; y++) { + i = (dst_x + (dst_y + y) * dst_pitch) * bypp; + for (x = 0; x < width; x++, i += bypp) { + stn_he_p(&d[i], bypp, ~ldn_he_p(&d[i], bypp)); + } + } + } else { + /* Do copy src for unimplemented ops, better than unpainted area */ + if ((rop_mode && (rop != 0xc || rop2_source_is_pattern)) || + (!rop_mode && rop != 0xcc)) { + qemu_log_mask(LOG_UNIMP, + "sm501: rop%d op %x%s not implemented\n", + (rop_mode ? 2 : 3), rop, + (rop2_source_is_pattern ? + " with pattern source" : "")); + } + /* Ignore no-op blits, some guests seem to do this */ + if (src_base == dst_base && src_pitch == dst_pitch && + src_x == dst_x && src_y == dst_y) { + break; + } + /* Some clients also do 1 pixel blits, avoid overhead for these */ + if (width == 1 && height == 1) { + unsigned int si = (src_x + src_y * src_pitch) * bypp; + unsigned int di = (dst_x + dst_y * dst_pitch) * bypp; + stn_he_p(&s->local_mem[dst_base + di], bypp, + ldn_he_p(&s->local_mem[src_base + si], bypp)); + break; + } + /* If reverse blit do simple check for overlaps */ + if (rtl && src_base == dst_base && src_pitch == dst_pitch) { + overlap = (src_x < dst_x + width && src_x + width > dst_x && + src_y < dst_y + height && src_y + height > dst_y); + } else if (rtl) { + unsigned int sb, se, db, de; + sb = src_base + (src_x + src_y * src_pitch) * bypp; + se = sb + (width + (height - 1) * src_pitch) * bypp; + db = dst_base + (dst_x + dst_y * dst_pitch) * bypp; + de = db + (width + (height - 1) * dst_pitch) * bypp; + overlap = (db < se && sb < de); + } + if (overlap) { + /* pixman can't do reverse blit: copy via temporary */ + int tmp_stride = DIV_ROUND_UP(width * bypp, sizeof(uint32_t)); + uint32_t *tmp = tmp_buf; + + if (tmp_stride * sizeof(uint32_t) * height > sizeof(tmp_buf)) { + tmp = g_malloc(tmp_stride * sizeof(uint32_t) * height); + } + pixman_blt((uint32_t *)&s->local_mem[src_base], tmp, + src_pitch * bypp / sizeof(uint32_t), + tmp_stride, 8 * bypp, 8 * bypp, + src_x, src_y, 0, 0, width, height); + pixman_blt(tmp, (uint32_t *)&s->local_mem[dst_base], + tmp_stride, + dst_pitch * bypp / sizeof(uint32_t), + 8 * bypp, 8 * bypp, + 0, 0, dst_x, dst_y, width, height); + if (tmp != tmp_buf) { + g_free(tmp); + } + } else { + pixman_blt((uint32_t *)&s->local_mem[src_base], + (uint32_t *)&s->local_mem[dst_base], + src_pitch * bypp / sizeof(uint32_t), + dst_pitch * bypp / sizeof(uint32_t), + 8 * bypp, 8 * bypp, + src_x, src_y, dst_x, dst_y, width, height); + } + } + break; + } + case 1: /* Rectangle Fill */ + { + uint32_t color = s->twoD_foreground; + + if (format == 2) { + color = cpu_to_le32(color); + } else if (format == 1) { + color = cpu_to_le16(color); + } + + if (width == 1 && height == 1) { + unsigned int i = (dst_x + dst_y * dst_pitch) * bypp; + stn_he_p(&s->local_mem[dst_base + i], bypp, color); + } else { + pixman_fill((uint32_t *)&s->local_mem[dst_base], + dst_pitch * bypp / sizeof(uint32_t), + 8 * bypp, dst_x, dst_y, width, height, color); + } + break; + } + default: + qemu_log_mask(LOG_UNIMP, "sm501: not implemented 2D operation: %d\n", + cmd); + return; + } + + if (dst_base >= get_fb_addr(s, crt) && + dst_base <= get_fb_addr(s, crt) + fb_len) { + int dst_len = MIN(fb_len, ((dst_y + height - 1) * dst_pitch + + dst_x + width) * bypp); + if (dst_len) { + memory_region_set_dirty(&s->local_mem_region, dst_base, dst_len); + } + } +} + +static uint64_t sm501_system_config_read(void *opaque, hwaddr addr, + unsigned size) +{ + SM501State *s = (SM501State *)opaque; + uint32_t ret = 0; + + switch (addr) { + case SM501_SYSTEM_CONTROL: + ret = s->system_control; + break; + case SM501_MISC_CONTROL: + ret = s->misc_control; + break; + case SM501_GPIO31_0_CONTROL: + ret = s->gpio_31_0_control; + break; + case SM501_GPIO63_32_CONTROL: + ret = s->gpio_63_32_control; + break; + case SM501_DEVICEID: + ret = 0x050100A0; + break; + case SM501_DRAM_CONTROL: + ret = (s->dram_control & 0x07F107C0) | s->local_mem_size_index << 13; + break; + case SM501_ARBTRTN_CONTROL: + ret = s->arbitration_control; + break; + case SM501_COMMAND_LIST_STATUS: + ret = 0x00180002; /* FIFOs are empty, everything idle */ + break; + case SM501_IRQ_MASK: + ret = s->irq_mask; + break; + case SM501_MISC_TIMING: + /* TODO : simulate gate control */ + ret = s->misc_timing; + break; + case SM501_CURRENT_GATE: + /* TODO : simulate gate control */ + ret = 0x00021807; + break; + case SM501_CURRENT_CLOCK: + ret = 0x2A1A0A09; + break; + case SM501_POWER_MODE_CONTROL: + ret = s->power_mode_control; + break; + case SM501_ENDIAN_CONTROL: + ret = 0; /* Only default little endian mode is supported */ + break; + + default: + qemu_log_mask(LOG_UNIMP, "sm501: not implemented system config" + "register read. addr=%" HWADDR_PRIx "\n", addr); + } + trace_sm501_system_config_read(addr, ret); + return ret; +} + +static void sm501_system_config_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + SM501State *s = (SM501State *)opaque; + + trace_sm501_system_config_write((uint32_t)addr, (uint32_t)value); + switch (addr) { + case SM501_SYSTEM_CONTROL: + s->system_control &= 0x10DB0000; + s->system_control |= value & 0xEF00B8F7; + break; + case SM501_MISC_CONTROL: + s->misc_control &= 0xEF; + s->misc_control |= value & 0xFF7FFF10; + break; + case SM501_GPIO31_0_CONTROL: + s->gpio_31_0_control = value; + break; + case SM501_GPIO63_32_CONTROL: + s->gpio_63_32_control = value & 0xFF80FFFF; + break; + case SM501_DRAM_CONTROL: + s->local_mem_size_index = (value >> 13) & 0x7; + /* TODO : check validity of size change */ + s->dram_control &= 0x80000000; + s->dram_control |= value & 0x7FFFFFC3; + break; + case SM501_ARBTRTN_CONTROL: + s->arbitration_control = value & 0x37777777; + break; + case SM501_IRQ_MASK: + s->irq_mask = value & 0xFFDF3F5F; + break; + case SM501_MISC_TIMING: + s->misc_timing = value & 0xF31F1FFF; + break; + case SM501_POWER_MODE_0_GATE: + case SM501_POWER_MODE_1_GATE: + case SM501_POWER_MODE_0_CLOCK: + case SM501_POWER_MODE_1_CLOCK: + /* TODO : simulate gate & clock control */ + break; + case SM501_POWER_MODE_CONTROL: + s->power_mode_control = value & 0x00000003; + break; + case SM501_ENDIAN_CONTROL: + if (value & 0x00000001) { + qemu_log_mask(LOG_UNIMP, "sm501: system config big endian mode not" + " implemented.\n"); + } + break; + + default: + qemu_log_mask(LOG_UNIMP, "sm501: not implemented system config" + "register write. addr=%" HWADDR_PRIx + ", val=%" PRIx64 "\n", addr, value); + } +} + +static const MemoryRegionOps sm501_system_config_ops = { + .read = sm501_system_config_read, + .write = sm501_system_config_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t sm501_i2c_read(void *opaque, hwaddr addr, unsigned size) +{ + SM501State *s = (SM501State *)opaque; + uint8_t ret = 0; + + switch (addr) { + case SM501_I2C_BYTE_COUNT: + ret = s->i2c_byte_count; + break; + case SM501_I2C_STATUS: + ret = s->i2c_status; + break; + case SM501_I2C_SLAVE_ADDRESS: + ret = s->i2c_addr; + break; + case SM501_I2C_DATA ... SM501_I2C_DATA + 15: + ret = s->i2c_data[addr - SM501_I2C_DATA]; + break; + default: + qemu_log_mask(LOG_UNIMP, "sm501 i2c : not implemented register read." + " addr=0x%" HWADDR_PRIx "\n", addr); + } + trace_sm501_i2c_read((uint32_t)addr, ret); + return ret; +} + +static void sm501_i2c_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + SM501State *s = (SM501State *)opaque; + + trace_sm501_i2c_write((uint32_t)addr, (uint32_t)value); + switch (addr) { + case SM501_I2C_BYTE_COUNT: + s->i2c_byte_count = value & 0xf; + break; + case SM501_I2C_CONTROL: + if (value & SM501_I2C_CONTROL_ENABLE) { + if (value & SM501_I2C_CONTROL_START) { + bool is_recv = s->i2c_addr & 1; + int res = i2c_start_transfer(s->i2c_bus, + s->i2c_addr >> 1, + is_recv); + if (res) { + s->i2c_status |= SM501_I2C_STATUS_ERROR; + } else { + int i; + for (i = 0; i <= s->i2c_byte_count; i++) { + if (is_recv) { + s->i2c_data[i] = i2c_recv(s->i2c_bus); + } else if (i2c_send(s->i2c_bus, s->i2c_data[i]) < 0) { + s->i2c_status |= SM501_I2C_STATUS_ERROR; + return; + } + } + if (i) { + s->i2c_status = SM501_I2C_STATUS_COMPLETE; + } + } + } else { + i2c_end_transfer(s->i2c_bus); + s->i2c_status &= ~SM501_I2C_STATUS_ERROR; + } + } + break; + case SM501_I2C_RESET: + if ((value & SM501_I2C_RESET_ERROR) == 0) { + s->i2c_status &= ~SM501_I2C_STATUS_ERROR; + } + break; + case SM501_I2C_SLAVE_ADDRESS: + s->i2c_addr = value & 0xff; + break; + case SM501_I2C_DATA ... SM501_I2C_DATA + 15: + s->i2c_data[addr - SM501_I2C_DATA] = value & 0xff; + break; + default: + qemu_log_mask(LOG_UNIMP, "sm501 i2c : not implemented register write. " + "addr=0x%" HWADDR_PRIx " val=%" PRIx64 "\n", addr, value); + } +} + +static const MemoryRegionOps sm501_i2c_ops = { + .read = sm501_i2c_read, + .write = sm501_i2c_write, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint32_t sm501_palette_read(void *opaque, hwaddr addr) +{ + SM501State *s = (SM501State *)opaque; + + trace_sm501_palette_read((uint32_t)addr); + + /* TODO : consider BYTE/WORD access */ + /* TODO : consider endian */ + + assert(range_covers_byte(0, 0x400 * 3, addr)); + return *(uint32_t *)&s->dc_palette[addr]; +} + +static void sm501_palette_write(void *opaque, hwaddr addr, + uint32_t value) +{ + SM501State *s = (SM501State *)opaque; + + trace_sm501_palette_write((uint32_t)addr, value); + + /* TODO : consider BYTE/WORD access */ + /* TODO : consider endian */ + + assert(range_covers_byte(0, 0x400 * 3, addr)); + *(uint32_t *)&s->dc_palette[addr] = value; + s->do_full_update = true; +} + +static uint64_t sm501_disp_ctrl_read(void *opaque, hwaddr addr, + unsigned size) +{ + SM501State *s = (SM501State *)opaque; + uint32_t ret = 0; + + switch (addr) { + + case SM501_DC_PANEL_CONTROL: + ret = s->dc_panel_control; + break; + case SM501_DC_PANEL_PANNING_CONTROL: + ret = s->dc_panel_panning_control; + break; + case SM501_DC_PANEL_COLOR_KEY: + /* Not implemented yet */ + break; + case SM501_DC_PANEL_FB_ADDR: + ret = s->dc_panel_fb_addr; + break; + case SM501_DC_PANEL_FB_OFFSET: + ret = s->dc_panel_fb_offset; + break; + case SM501_DC_PANEL_FB_WIDTH: + ret = s->dc_panel_fb_width; + break; + case SM501_DC_PANEL_FB_HEIGHT: + ret = s->dc_panel_fb_height; + break; + case SM501_DC_PANEL_TL_LOC: + ret = s->dc_panel_tl_location; + break; + case SM501_DC_PANEL_BR_LOC: + ret = s->dc_panel_br_location; + break; + + case SM501_DC_PANEL_H_TOT: + ret = s->dc_panel_h_total; + break; + case SM501_DC_PANEL_H_SYNC: + ret = s->dc_panel_h_sync; + break; + case SM501_DC_PANEL_V_TOT: + ret = s->dc_panel_v_total; + break; + case SM501_DC_PANEL_V_SYNC: + ret = s->dc_panel_v_sync; + break; + + case SM501_DC_PANEL_HWC_ADDR: + ret = s->dc_panel_hwc_addr; + break; + case SM501_DC_PANEL_HWC_LOC: + ret = s->dc_panel_hwc_location; + break; + case SM501_DC_PANEL_HWC_COLOR_1_2: + ret = s->dc_panel_hwc_color_1_2; + break; + case SM501_DC_PANEL_HWC_COLOR_3: + ret = s->dc_panel_hwc_color_3; + break; + + case SM501_DC_VIDEO_CONTROL: + ret = s->dc_video_control; + break; + + case SM501_DC_CRT_CONTROL: + ret = s->dc_crt_control; + break; + case SM501_DC_CRT_FB_ADDR: + ret = s->dc_crt_fb_addr; + break; + case SM501_DC_CRT_FB_OFFSET: + ret = s->dc_crt_fb_offset; + break; + case SM501_DC_CRT_H_TOT: + ret = s->dc_crt_h_total; + break; + case SM501_DC_CRT_H_SYNC: + ret = s->dc_crt_h_sync; + break; + case SM501_DC_CRT_V_TOT: + ret = s->dc_crt_v_total; + break; + case SM501_DC_CRT_V_SYNC: + ret = s->dc_crt_v_sync; + break; + + case SM501_DC_CRT_HWC_ADDR: + ret = s->dc_crt_hwc_addr; + break; + case SM501_DC_CRT_HWC_LOC: + ret = s->dc_crt_hwc_location; + break; + case SM501_DC_CRT_HWC_COLOR_1_2: + ret = s->dc_crt_hwc_color_1_2; + break; + case SM501_DC_CRT_HWC_COLOR_3: + ret = s->dc_crt_hwc_color_3; + break; + + case SM501_DC_PANEL_PALETTE ... SM501_DC_PANEL_PALETTE + 0x400 * 3 - 4: + ret = sm501_palette_read(opaque, addr - SM501_DC_PANEL_PALETTE); + break; + + default: + qemu_log_mask(LOG_UNIMP, "sm501: not implemented disp ctrl register " + "read. addr=%" HWADDR_PRIx "\n", addr); + } + trace_sm501_disp_ctrl_read((uint32_t)addr, ret); + return ret; +} + +static void sm501_disp_ctrl_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + SM501State *s = (SM501State *)opaque; + + trace_sm501_disp_ctrl_write((uint32_t)addr, (uint32_t)value); + switch (addr) { + case SM501_DC_PANEL_CONTROL: + s->dc_panel_control = value & 0x0FFF73FF; + break; + case SM501_DC_PANEL_PANNING_CONTROL: + s->dc_panel_panning_control = value & 0xFF3FFF3F; + break; + case SM501_DC_PANEL_COLOR_KEY: + /* Not implemented yet */ + break; + case SM501_DC_PANEL_FB_ADDR: + s->dc_panel_fb_addr = value & 0x8FFFFFF0; + if (value & 0x8000000) { + qemu_log_mask(LOG_UNIMP, "Panel external memory not supported\n"); + } + s->do_full_update = true; + break; + case SM501_DC_PANEL_FB_OFFSET: + s->dc_panel_fb_offset = value & 0x3FF03FF0; + break; + case SM501_DC_PANEL_FB_WIDTH: + s->dc_panel_fb_width = value & 0x0FFF0FFF; + break; + case SM501_DC_PANEL_FB_HEIGHT: + s->dc_panel_fb_height = value & 0x0FFF0FFF; + break; + case SM501_DC_PANEL_TL_LOC: + s->dc_panel_tl_location = value & 0x07FF07FF; + break; + case SM501_DC_PANEL_BR_LOC: + s->dc_panel_br_location = value & 0x07FF07FF; + break; + + case SM501_DC_PANEL_H_TOT: + s->dc_panel_h_total = value & 0x0FFF0FFF; + break; + case SM501_DC_PANEL_H_SYNC: + s->dc_panel_h_sync = value & 0x00FF0FFF; + break; + case SM501_DC_PANEL_V_TOT: + s->dc_panel_v_total = value & 0x0FFF0FFF; + break; + case SM501_DC_PANEL_V_SYNC: + s->dc_panel_v_sync = value & 0x003F0FFF; + break; + + case SM501_DC_PANEL_HWC_ADDR: + value &= 0x8FFFFFF0; + if (value != s->dc_panel_hwc_addr) { + hwc_invalidate(s, 0); + s->dc_panel_hwc_addr = value; + } + break; + case SM501_DC_PANEL_HWC_LOC: + value &= 0x0FFF0FFF; + if (value != s->dc_panel_hwc_location) { + hwc_invalidate(s, 0); + s->dc_panel_hwc_location = value; + } + break; + case SM501_DC_PANEL_HWC_COLOR_1_2: + s->dc_panel_hwc_color_1_2 = value; + break; + case SM501_DC_PANEL_HWC_COLOR_3: + s->dc_panel_hwc_color_3 = value & 0x0000FFFF; + break; + + case SM501_DC_VIDEO_CONTROL: + s->dc_video_control = value & 0x00037FFF; + break; + + case SM501_DC_CRT_CONTROL: + s->dc_crt_control = value & 0x0003FFFF; + break; + case SM501_DC_CRT_FB_ADDR: + s->dc_crt_fb_addr = value & 0x8FFFFFF0; + if (value & 0x8000000) { + qemu_log_mask(LOG_UNIMP, "CRT external memory not supported\n"); + } + s->do_full_update = true; + break; + case SM501_DC_CRT_FB_OFFSET: + s->dc_crt_fb_offset = value & 0x3FF03FF0; + break; + case SM501_DC_CRT_H_TOT: + s->dc_crt_h_total = value & 0x0FFF0FFF; + break; + case SM501_DC_CRT_H_SYNC: + s->dc_crt_h_sync = value & 0x00FF0FFF; + break; + case SM501_DC_CRT_V_TOT: + s->dc_crt_v_total = value & 0x0FFF0FFF; + break; + case SM501_DC_CRT_V_SYNC: + s->dc_crt_v_sync = value & 0x003F0FFF; + break; + + case SM501_DC_CRT_HWC_ADDR: + value &= 0x8FFFFFF0; + if (value != s->dc_crt_hwc_addr) { + hwc_invalidate(s, 1); + s->dc_crt_hwc_addr = value; + } + break; + case SM501_DC_CRT_HWC_LOC: + value &= 0x0FFF0FFF; + if (value != s->dc_crt_hwc_location) { + hwc_invalidate(s, 1); + s->dc_crt_hwc_location = value; + } + break; + case SM501_DC_CRT_HWC_COLOR_1_2: + s->dc_crt_hwc_color_1_2 = value; + break; + case SM501_DC_CRT_HWC_COLOR_3: + s->dc_crt_hwc_color_3 = value & 0x0000FFFF; + break; + + case SM501_DC_PANEL_PALETTE ... SM501_DC_PANEL_PALETTE + 0x400 * 3 - 4: + sm501_palette_write(opaque, addr - SM501_DC_PANEL_PALETTE, value); + break; + + default: + qemu_log_mask(LOG_UNIMP, "sm501: not implemented disp ctrl register " + "write. addr=%" HWADDR_PRIx + ", val=%" PRIx64 "\n", addr, value); + } +} + +static const MemoryRegionOps sm501_disp_ctrl_ops = { + .read = sm501_disp_ctrl_read, + .write = sm501_disp_ctrl_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t sm501_2d_engine_read(void *opaque, hwaddr addr, + unsigned size) +{ + SM501State *s = (SM501State *)opaque; + uint32_t ret = 0; + + switch (addr) { + case SM501_2D_SOURCE: + ret = s->twoD_source; + break; + case SM501_2D_DESTINATION: + ret = s->twoD_destination; + break; + case SM501_2D_DIMENSION: + ret = s->twoD_dimension; + break; + case SM501_2D_CONTROL: + ret = s->twoD_control; + break; + case SM501_2D_PITCH: + ret = s->twoD_pitch; + break; + case SM501_2D_FOREGROUND: + ret = s->twoD_foreground; + break; + case SM501_2D_BACKGROUND: + ret = s->twoD_background; + break; + case SM501_2D_STRETCH: + ret = s->twoD_stretch; + break; + case SM501_2D_COLOR_COMPARE: + ret = s->twoD_color_compare; + break; + case SM501_2D_COLOR_COMPARE_MASK: + ret = s->twoD_color_compare_mask; + break; + case SM501_2D_MASK: + ret = s->twoD_mask; + break; + case SM501_2D_CLIP_TL: + ret = s->twoD_clip_tl; + break; + case SM501_2D_CLIP_BR: + ret = s->twoD_clip_br; + break; + case SM501_2D_MONO_PATTERN_LOW: + ret = s->twoD_mono_pattern_low; + break; + case SM501_2D_MONO_PATTERN_HIGH: + ret = s->twoD_mono_pattern_high; + break; + case SM501_2D_WINDOW_WIDTH: + ret = s->twoD_window_width; + break; + case SM501_2D_SOURCE_BASE: + ret = s->twoD_source_base; + break; + case SM501_2D_DESTINATION_BASE: + ret = s->twoD_destination_base; + break; + case SM501_2D_ALPHA: + ret = s->twoD_alpha; + break; + case SM501_2D_WRAP: + ret = s->twoD_wrap; + break; + case SM501_2D_STATUS: + ret = 0; /* Should return interrupt status */ + break; + default: + qemu_log_mask(LOG_UNIMP, "sm501: not implemented disp ctrl register " + "read. addr=%" HWADDR_PRIx "\n", addr); + } + trace_sm501_2d_engine_read((uint32_t)addr, ret); + return ret; +} + +static void sm501_2d_engine_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + SM501State *s = (SM501State *)opaque; + + trace_sm501_2d_engine_write((uint32_t)addr, (uint32_t)value); + switch (addr) { + case SM501_2D_SOURCE: + s->twoD_source = value; + break; + case SM501_2D_DESTINATION: + s->twoD_destination = value; + break; + case SM501_2D_DIMENSION: + s->twoD_dimension = value; + break; + case SM501_2D_CONTROL: + s->twoD_control = value; + + /* do 2d operation if start flag is set. */ + if (value & 0x80000000) { + sm501_2d_operation(s); + s->twoD_control &= ~0x80000000; /* start flag down */ + } + + break; + case SM501_2D_PITCH: + s->twoD_pitch = value; + break; + case SM501_2D_FOREGROUND: + s->twoD_foreground = value; + break; + case SM501_2D_BACKGROUND: + s->twoD_background = value; + break; + case SM501_2D_STRETCH: + if (((value >> 20) & 3) == 3) { + value &= ~BIT(20); + } + s->twoD_stretch = value; + break; + case SM501_2D_COLOR_COMPARE: + s->twoD_color_compare = value; + break; + case SM501_2D_COLOR_COMPARE_MASK: + s->twoD_color_compare_mask = value; + break; + case SM501_2D_MASK: + s->twoD_mask = value; + break; + case SM501_2D_CLIP_TL: + s->twoD_clip_tl = value; + break; + case SM501_2D_CLIP_BR: + s->twoD_clip_br = value; + break; + case SM501_2D_MONO_PATTERN_LOW: + s->twoD_mono_pattern_low = value; + break; + case SM501_2D_MONO_PATTERN_HIGH: + s->twoD_mono_pattern_high = value; + break; + case SM501_2D_WINDOW_WIDTH: + s->twoD_window_width = value; + break; + case SM501_2D_SOURCE_BASE: + s->twoD_source_base = value; + break; + case SM501_2D_DESTINATION_BASE: + s->twoD_destination_base = value; + break; + case SM501_2D_ALPHA: + s->twoD_alpha = value; + break; + case SM501_2D_WRAP: + s->twoD_wrap = value; + break; + case SM501_2D_STATUS: + /* ignored, writing 0 should clear interrupt status */ + break; + default: + qemu_log_mask(LOG_UNIMP, "sm501: not implemented 2d engine register " + "write. addr=%" HWADDR_PRIx + ", val=%" PRIx64 "\n", addr, value); + } +} + +static const MemoryRegionOps sm501_2d_engine_ops = { + .read = sm501_2d_engine_read, + .write = sm501_2d_engine_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* draw line functions for all console modes */ + +typedef void draw_line_func(uint8_t *d, const uint8_t *s, + int width, const uint32_t *pal); + +typedef void draw_hwc_line_func(uint8_t *d, const uint8_t *s, + int width, const uint8_t *palette, + int c_x, int c_y); + +static void draw_line8_32(uint8_t *d, const uint8_t *s, int width, + const uint32_t *pal) +{ + uint8_t v, r, g, b; + do { + v = ldub_p(s); + r = (pal[v] >> 16) & 0xff; + g = (pal[v] >> 8) & 0xff; + b = (pal[v] >> 0) & 0xff; + *(uint32_t *)d = rgb_to_pixel32(r, g, b); + s++; + d += 4; + } while (--width != 0); +} + +static void draw_line16_32(uint8_t *d, const uint8_t *s, int width, + const uint32_t *pal) +{ + uint16_t rgb565; + uint8_t r, g, b; + + do { + rgb565 = lduw_le_p(s); + r = (rgb565 >> 8) & 0xf8; + g = (rgb565 >> 3) & 0xfc; + b = (rgb565 << 3) & 0xf8; + *(uint32_t *)d = rgb_to_pixel32(r, g, b); + s += 2; + d += 4; + } while (--width != 0); +} + +static void draw_line32_32(uint8_t *d, const uint8_t *s, int width, + const uint32_t *pal) +{ + uint8_t r, g, b; + + do { + r = s[2]; + g = s[1]; + b = s[0]; + *(uint32_t *)d = rgb_to_pixel32(r, g, b); + s += 4; + d += 4; + } while (--width != 0); +} + +/** + * Draw hardware cursor image on the given line. + */ +static void draw_hwc_line_32(uint8_t *d, const uint8_t *s, int width, + const uint8_t *palette, int c_x, int c_y) +{ + int i; + uint8_t r, g, b, v, bitset = 0; + + /* get cursor position */ + assert(0 <= c_y && c_y < SM501_HWC_HEIGHT); + s += SM501_HWC_WIDTH * c_y / 4; /* 4 pixels per byte */ + d += c_x * 4; + + for (i = 0; i < SM501_HWC_WIDTH && c_x + i < width; i++) { + /* get pixel value */ + if (i % 4 == 0) { + bitset = ldub_p(s); + s++; + } + v = bitset & 3; + bitset >>= 2; + + /* write pixel */ + if (v) { + v--; + r = palette[v * 3 + 0]; + g = palette[v * 3 + 1]; + b = palette[v * 3 + 2]; + *(uint32_t *)d = rgb_to_pixel32(r, g, b); + } + d += 4; + } +} + +static void sm501_update_display(void *opaque) +{ + SM501State *s = (SM501State *)opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + DirtyBitmapSnapshot *snap; + int y, c_x = 0, c_y = 0; + int crt = (s->dc_crt_control & SM501_DC_CRT_CONTROL_SEL) ? 1 : 0; + int width = get_width(s, crt); + int height = get_height(s, crt); + int src_bpp = get_bpp(s, crt); + int dst_bpp = surface_bytes_per_pixel(surface); + draw_line_func *draw_line = NULL; + draw_hwc_line_func *draw_hwc_line = NULL; + int full_update = 0; + int y_start = -1; + ram_addr_t offset; + uint32_t *palette; + uint8_t hwc_palette[3 * 3]; + uint8_t *hwc_src = NULL; + + assert(dst_bpp == 4); /* Output is always 32-bit RGB */ + + if (!((crt ? s->dc_crt_control : s->dc_panel_control) + & SM501_DC_CRT_CONTROL_ENABLE)) { + return; + } + + palette = (uint32_t *)(crt ? &s->dc_palette[SM501_DC_CRT_PALETTE - + SM501_DC_PANEL_PALETTE] + : &s->dc_palette[0]); + + /* choose draw_line function */ + switch (src_bpp) { + case 1: + draw_line = draw_line8_32; + break; + case 2: + draw_line = draw_line16_32; + break; + case 4: + draw_line = draw_line32_32; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "sm501: update display" + "invalid control register value.\n"); + return; + } + + /* set up to draw hardware cursor */ + if (is_hwc_enabled(s, crt)) { + /* choose cursor draw line function */ + draw_hwc_line = draw_hwc_line_32; + hwc_src = get_hwc_address(s, crt); + c_x = get_hwc_x(s, crt); + c_y = get_hwc_y(s, crt); + get_hwc_palette(s, crt, hwc_palette); + } + + /* adjust console size */ + if (s->last_width != width || s->last_height != height) { + qemu_console_resize(s->con, width, height); + surface = qemu_console_surface(s->con); + s->last_width = width; + s->last_height = height; + full_update = 1; + } + + /* someone else requested a full update */ + if (s->do_full_update) { + s->do_full_update = false; + full_update = 1; + } + + /* draw each line according to conditions */ + offset = get_fb_addr(s, crt); + snap = memory_region_snapshot_and_clear_dirty(&s->local_mem_region, + offset, width * height * src_bpp, DIRTY_MEMORY_VGA); + for (y = 0; y < height; y++, offset += width * src_bpp) { + int update, update_hwc; + + /* check if hardware cursor is enabled and we're within its range */ + update_hwc = draw_hwc_line && c_y <= y && y < c_y + SM501_HWC_HEIGHT; + update = full_update || update_hwc; + /* check dirty flags for each line */ + update |= memory_region_snapshot_get_dirty(&s->local_mem_region, snap, + offset, width * src_bpp); + + /* draw line and change status */ + if (update) { + uint8_t *d = surface_data(surface); + d += y * width * dst_bpp; + + /* draw graphics layer */ + draw_line(d, s->local_mem + offset, width, palette); + + /* draw hardware cursor */ + if (update_hwc) { + draw_hwc_line(d, hwc_src, width, hwc_palette, c_x, y - c_y); + } + + if (y_start < 0) { + y_start = y; + } + } else { + if (y_start >= 0) { + /* flush to display */ + dpy_gfx_update(s->con, 0, y_start, width, y - y_start); + y_start = -1; + } + } + } + g_free(snap); + + /* complete flush to display */ + if (y_start >= 0) { + dpy_gfx_update(s->con, 0, y_start, width, y - y_start); + } +} + +static const GraphicHwOps sm501_ops = { + .gfx_update = sm501_update_display, +}; + +static void sm501_reset(SM501State *s) +{ + s->system_control = 0x00100000; /* 2D engine FIFO empty */ + /* Bits 17 (SH), 7 (CDR), 6:5 (Test), 2:0 (Bus) are all supposed + * to be determined at reset by GPIO lines which set config bits. + * We hardwire them: + * SH = 0 : Hitachi Ready Polarity == Active Low + * CDR = 0 : do not reset clock divider + * TEST = 0 : Normal mode (not testing the silicon) + * BUS = 0 : Hitachi SH3/SH4 + */ + s->misc_control = SM501_MISC_DAC_POWER; + s->gpio_31_0_control = 0; + s->gpio_63_32_control = 0; + s->dram_control = 0; + s->arbitration_control = 0x05146732; + s->irq_mask = 0; + s->misc_timing = 0; + s->power_mode_control = 0; + s->i2c_byte_count = 0; + s->i2c_status = 0; + s->i2c_addr = 0; + memset(s->i2c_data, 0, 16); + s->dc_panel_control = 0x00010000; /* FIFO level 3 */ + s->dc_video_control = 0; + s->dc_crt_control = 0x00010000; + s->twoD_source = 0; + s->twoD_destination = 0; + s->twoD_dimension = 0; + s->twoD_control = 0; + s->twoD_pitch = 0; + s->twoD_foreground = 0; + s->twoD_background = 0; + s->twoD_stretch = 0; + s->twoD_color_compare = 0; + s->twoD_color_compare_mask = 0; + s->twoD_mask = 0; + s->twoD_clip_tl = 0; + s->twoD_clip_br = 0; + s->twoD_mono_pattern_low = 0; + s->twoD_mono_pattern_high = 0; + s->twoD_window_width = 0; + s->twoD_source_base = 0; + s->twoD_destination_base = 0; + s->twoD_alpha = 0; + s->twoD_wrap = 0; +} + +static void sm501_init(SM501State *s, DeviceState *dev, + uint32_t local_mem_bytes) +{ + s->local_mem_size_index = get_local_mem_size_index(local_mem_bytes); + + /* local memory */ + memory_region_init_ram(&s->local_mem_region, OBJECT(dev), "sm501.local", + get_local_mem_size(s), &error_fatal); + memory_region_set_log(&s->local_mem_region, true, DIRTY_MEMORY_VGA); + s->local_mem = memory_region_get_ram_ptr(&s->local_mem_region); + + /* i2c */ + s->i2c_bus = i2c_init_bus(dev, "sm501.i2c"); + /* ddc */ + I2CDDCState *ddc = I2CDDC(qdev_new(TYPE_I2CDDC)); + i2c_slave_set_address(I2C_SLAVE(ddc), 0x50); + qdev_realize_and_unref(DEVICE(ddc), BUS(s->i2c_bus), &error_abort); + + /* mmio */ + memory_region_init(&s->mmio_region, OBJECT(dev), "sm501.mmio", MMIO_SIZE); + memory_region_init_io(&s->system_config_region, OBJECT(dev), + &sm501_system_config_ops, s, + "sm501-system-config", 0x6c); + memory_region_add_subregion(&s->mmio_region, SM501_SYS_CONFIG, + &s->system_config_region); + memory_region_init_io(&s->i2c_region, OBJECT(dev), &sm501_i2c_ops, s, + "sm501-i2c", 0x14); + memory_region_add_subregion(&s->mmio_region, SM501_I2C, &s->i2c_region); + memory_region_init_io(&s->disp_ctrl_region, OBJECT(dev), + &sm501_disp_ctrl_ops, s, + "sm501-disp-ctrl", 0x1000); + memory_region_add_subregion(&s->mmio_region, SM501_DC, + &s->disp_ctrl_region); + memory_region_init_io(&s->twoD_engine_region, OBJECT(dev), + &sm501_2d_engine_ops, s, + "sm501-2d-engine", 0x54); + memory_region_add_subregion(&s->mmio_region, SM501_2D_ENGINE, + &s->twoD_engine_region); + + /* create qemu graphic console */ + s->con = graphic_console_init(dev, 0, &sm501_ops, s); +} + +static const VMStateDescription vmstate_sm501_state = { + .name = "sm501-state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(local_mem_size_index, SM501State), + VMSTATE_UINT32(system_control, SM501State), + VMSTATE_UINT32(misc_control, SM501State), + VMSTATE_UINT32(gpio_31_0_control, SM501State), + VMSTATE_UINT32(gpio_63_32_control, SM501State), + VMSTATE_UINT32(dram_control, SM501State), + VMSTATE_UINT32(arbitration_control, SM501State), + VMSTATE_UINT32(irq_mask, SM501State), + VMSTATE_UINT32(misc_timing, SM501State), + VMSTATE_UINT32(power_mode_control, SM501State), + VMSTATE_UINT32(uart0_ier, SM501State), + VMSTATE_UINT32(uart0_lcr, SM501State), + VMSTATE_UINT32(uart0_mcr, SM501State), + VMSTATE_UINT32(uart0_scr, SM501State), + VMSTATE_UINT8_ARRAY(dc_palette, SM501State, DC_PALETTE_ENTRIES), + VMSTATE_UINT32(dc_panel_control, SM501State), + VMSTATE_UINT32(dc_panel_panning_control, SM501State), + VMSTATE_UINT32(dc_panel_fb_addr, SM501State), + VMSTATE_UINT32(dc_panel_fb_offset, SM501State), + VMSTATE_UINT32(dc_panel_fb_width, SM501State), + VMSTATE_UINT32(dc_panel_fb_height, SM501State), + VMSTATE_UINT32(dc_panel_tl_location, SM501State), + VMSTATE_UINT32(dc_panel_br_location, SM501State), + VMSTATE_UINT32(dc_panel_h_total, SM501State), + VMSTATE_UINT32(dc_panel_h_sync, SM501State), + VMSTATE_UINT32(dc_panel_v_total, SM501State), + VMSTATE_UINT32(dc_panel_v_sync, SM501State), + VMSTATE_UINT32(dc_panel_hwc_addr, SM501State), + VMSTATE_UINT32(dc_panel_hwc_location, SM501State), + VMSTATE_UINT32(dc_panel_hwc_color_1_2, SM501State), + VMSTATE_UINT32(dc_panel_hwc_color_3, SM501State), + VMSTATE_UINT32(dc_video_control, SM501State), + VMSTATE_UINT32(dc_crt_control, SM501State), + VMSTATE_UINT32(dc_crt_fb_addr, SM501State), + VMSTATE_UINT32(dc_crt_fb_offset, SM501State), + VMSTATE_UINT32(dc_crt_h_total, SM501State), + VMSTATE_UINT32(dc_crt_h_sync, SM501State), + VMSTATE_UINT32(dc_crt_v_total, SM501State), + VMSTATE_UINT32(dc_crt_v_sync, SM501State), + VMSTATE_UINT32(dc_crt_hwc_addr, SM501State), + VMSTATE_UINT32(dc_crt_hwc_location, SM501State), + VMSTATE_UINT32(dc_crt_hwc_color_1_2, SM501State), + VMSTATE_UINT32(dc_crt_hwc_color_3, SM501State), + VMSTATE_UINT32(twoD_source, SM501State), + VMSTATE_UINT32(twoD_destination, SM501State), + VMSTATE_UINT32(twoD_dimension, SM501State), + VMSTATE_UINT32(twoD_control, SM501State), + VMSTATE_UINT32(twoD_pitch, SM501State), + VMSTATE_UINT32(twoD_foreground, SM501State), + VMSTATE_UINT32(twoD_background, SM501State), + VMSTATE_UINT32(twoD_stretch, SM501State), + VMSTATE_UINT32(twoD_color_compare, SM501State), + VMSTATE_UINT32(twoD_color_compare_mask, SM501State), + VMSTATE_UINT32(twoD_mask, SM501State), + VMSTATE_UINT32(twoD_clip_tl, SM501State), + VMSTATE_UINT32(twoD_clip_br, SM501State), + VMSTATE_UINT32(twoD_mono_pattern_low, SM501State), + VMSTATE_UINT32(twoD_mono_pattern_high, SM501State), + VMSTATE_UINT32(twoD_window_width, SM501State), + VMSTATE_UINT32(twoD_source_base, SM501State), + VMSTATE_UINT32(twoD_destination_base, SM501State), + VMSTATE_UINT32(twoD_alpha, SM501State), + VMSTATE_UINT32(twoD_wrap, SM501State), + /* Added in version 2 */ + VMSTATE_UINT8(i2c_byte_count, SM501State), + VMSTATE_UINT8(i2c_status, SM501State), + VMSTATE_UINT8(i2c_addr, SM501State), + VMSTATE_UINT8_ARRAY(i2c_data, SM501State, 16), + VMSTATE_END_OF_LIST() + } +}; + +#define TYPE_SYSBUS_SM501 "sysbus-sm501" +OBJECT_DECLARE_SIMPLE_TYPE(SM501SysBusState, SYSBUS_SM501) + +struct SM501SysBusState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + SM501State state; + uint32_t vram_size; + uint32_t base; + SerialMM serial; +}; + +static void sm501_realize_sysbus(DeviceState *dev, Error **errp) +{ + SM501SysBusState *s = SYSBUS_SM501(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + DeviceState *usb_dev; + MemoryRegion *mr; + + sm501_init(&s->state, dev, s->vram_size); + if (get_local_mem_size(&s->state) != s->vram_size) { + error_setg(errp, "Invalid VRAM size, nearest valid size is %" PRIu32, + get_local_mem_size(&s->state)); + return; + } + sysbus_init_mmio(sbd, &s->state.local_mem_region); + sysbus_init_mmio(sbd, &s->state.mmio_region); + + /* bridge to usb host emulation module */ + usb_dev = qdev_new("sysbus-ohci"); + qdev_prop_set_uint32(usb_dev, "num-ports", 2); + qdev_prop_set_uint64(usb_dev, "dma-offset", s->base); + sysbus_realize_and_unref(SYS_BUS_DEVICE(usb_dev), &error_fatal); + memory_region_add_subregion(&s->state.mmio_region, SM501_USB_HOST, + sysbus_mmio_get_region(SYS_BUS_DEVICE(usb_dev), 0)); + sysbus_pass_irq(sbd, SYS_BUS_DEVICE(usb_dev)); + + /* bridge to serial emulation module */ + sysbus_realize(SYS_BUS_DEVICE(&s->serial), &error_fatal); + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->serial), 0); + memory_region_add_subregion(&s->state.mmio_region, SM501_UART0, mr); + /* TODO : chain irq to IRL */ +} + +static Property sm501_sysbus_properties[] = { + DEFINE_PROP_UINT32("vram-size", SM501SysBusState, vram_size, 0), + DEFINE_PROP_UINT32("base", SM501SysBusState, base, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sm501_reset_sysbus(DeviceState *dev) +{ + SM501SysBusState *s = SYSBUS_SM501(dev); + sm501_reset(&s->state); +} + +static const VMStateDescription vmstate_sm501_sysbus = { + .name = TYPE_SYSBUS_SM501, + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(state, SM501SysBusState, 1, + vmstate_sm501_state, SM501State), + VMSTATE_END_OF_LIST() + } +}; + +static void sm501_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sm501_realize_sysbus; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->desc = "SM501 Multimedia Companion"; + device_class_set_props(dc, sm501_sysbus_properties); + dc->reset = sm501_reset_sysbus; + dc->vmsd = &vmstate_sm501_sysbus; +} + +static void sm501_sysbus_init(Object *o) +{ + SM501SysBusState *sm501 = SYSBUS_SM501(o); + SerialMM *smm = &sm501->serial; + + object_initialize_child(o, "serial", smm, TYPE_SERIAL_MM); + qdev_set_legacy_instance_id(DEVICE(smm), SM501_UART0, 2); + qdev_prop_set_uint8(DEVICE(smm), "regshift", 2); + qdev_prop_set_uint8(DEVICE(smm), "endianness", DEVICE_LITTLE_ENDIAN); + + object_property_add_alias(o, "chardev", + OBJECT(smm), "chardev"); +} + +static const TypeInfo sm501_sysbus_info = { + .name = TYPE_SYSBUS_SM501, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SM501SysBusState), + .class_init = sm501_sysbus_class_init, + .instance_init = sm501_sysbus_init, +}; + +#define TYPE_PCI_SM501 "sm501" +OBJECT_DECLARE_SIMPLE_TYPE(SM501PCIState, PCI_SM501) + +struct SM501PCIState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + SM501State state; + uint32_t vram_size; +}; + +static void sm501_realize_pci(PCIDevice *dev, Error **errp) +{ + SM501PCIState *s = PCI_SM501(dev); + + sm501_init(&s->state, DEVICE(dev), s->vram_size); + if (get_local_mem_size(&s->state) != s->vram_size) { + error_setg(errp, "Invalid VRAM size, nearest valid size is %" PRIu32, + get_local_mem_size(&s->state)); + return; + } + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, + &s->state.local_mem_region); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, + &s->state.mmio_region); +} + +static Property sm501_pci_properties[] = { + DEFINE_PROP_UINT32("vram-size", SM501PCIState, vram_size, 64 * MiB), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sm501_reset_pci(DeviceState *dev) +{ + SM501PCIState *s = PCI_SM501(dev); + sm501_reset(&s->state); + /* Bits 2:0 of misc_control register is 001 for PCI */ + s->state.misc_control |= 1; +} + +static const VMStateDescription vmstate_sm501_pci = { + .name = TYPE_PCI_SM501, + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, SM501PCIState), + VMSTATE_STRUCT(state, SM501PCIState, 1, + vmstate_sm501_state, SM501State), + VMSTATE_END_OF_LIST() + } +}; + +static void sm501_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = sm501_realize_pci; + k->vendor_id = PCI_VENDOR_ID_SILICON_MOTION; + k->device_id = PCI_DEVICE_ID_SM501; + k->class_id = PCI_CLASS_DISPLAY_OTHER; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->desc = "SM501 Display Controller"; + device_class_set_props(dc, sm501_pci_properties); + dc->reset = sm501_reset_pci; + dc->hotpluggable = false; + dc->vmsd = &vmstate_sm501_pci; +} + +static const TypeInfo sm501_pci_info = { + .name = TYPE_PCI_SM501, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(SM501PCIState), + .class_init = sm501_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void sm501_register_types(void) +{ + type_register_static(&sm501_sysbus_info); + type_register_static(&sm501_pci_info); +} + +type_init(sm501_register_types) diff --git a/hw/display/ssd0303.c b/hw/display/ssd0303.c new file mode 100644 index 000000000..aeae22da9 --- /dev/null +++ b/hw/display/ssd0303.c @@ -0,0 +1,336 @@ +/* + * SSD0303 OLED controller with OSRAM Pictiva 96x16 display. + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +/* The controller can support a variety of different displays, but we only + implement one. Most of the commends relating to brightness and geometry + setup are ignored. */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "ui/console.h" +#include "qom/object.h" + +//#define DEBUG_SSD0303 1 + +#ifdef DEBUG_SSD0303 +#define DPRINTF(fmt, ...) \ +do { printf("ssd0303: " fmt , ## __VA_ARGS__); } while (0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "ssd0303: error: " fmt , ## __VA_ARGS__); exit(1);} while (0) +#else +#define DPRINTF(fmt, ...) do {} while(0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "ssd0303: error: " fmt , ## __VA_ARGS__);} while (0) +#endif + +/* Scaling factor for pixels. */ +#define MAGNIFY 4 + +enum ssd0303_mode +{ + SSD0303_IDLE, + SSD0303_DATA, + SSD0303_CMD +}; + +enum ssd0303_cmd { + SSD0303_CMD_NONE, + SSD0303_CMD_SKIP1 +}; + +#define TYPE_SSD0303 "ssd0303" +OBJECT_DECLARE_SIMPLE_TYPE(ssd0303_state, SSD0303) + +struct ssd0303_state { + I2CSlave parent_obj; + + QemuConsole *con; + int row; + int col; + int start_line; + int mirror; + int flash; + int enabled; + int inverse; + int redraw; + enum ssd0303_mode mode; + enum ssd0303_cmd cmd_state; + uint8_t framebuffer[132*8]; +}; + +static uint8_t ssd0303_recv(I2CSlave *i2c) +{ + BADF("Reads not implemented\n"); + return 0xff; +} + +static int ssd0303_send(I2CSlave *i2c, uint8_t data) +{ + ssd0303_state *s = SSD0303(i2c); + enum ssd0303_cmd old_cmd_state; + + switch (s->mode) { + case SSD0303_IDLE: + DPRINTF("byte 0x%02x\n", data); + if (data == 0x80) + s->mode = SSD0303_CMD; + else if (data == 0x40) + s->mode = SSD0303_DATA; + else + BADF("Unexpected byte 0x%x\n", data); + break; + case SSD0303_DATA: + DPRINTF("data 0x%02x\n", data); + if (s->col < 132) { + s->framebuffer[s->col + s->row * 132] = data; + s->col++; + s->redraw = 1; + } + break; + case SSD0303_CMD: + old_cmd_state = s->cmd_state; + s->cmd_state = SSD0303_CMD_NONE; + switch (old_cmd_state) { + case SSD0303_CMD_NONE: + DPRINTF("cmd 0x%02x\n", data); + s->mode = SSD0303_IDLE; + switch (data) { + case 0x00 ... 0x0f: /* Set lower column address. */ + s->col = (s->col & 0xf0) | (data & 0xf); + break; + case 0x10 ... 0x20: /* Set higher column address. */ + s->col = (s->col & 0x0f) | ((data & 0xf) << 4); + break; + case 0x40 ... 0x7f: /* Set start line. */ + s->start_line = 0; + break; + case 0x81: /* Set contrast (Ignored). */ + s->cmd_state = SSD0303_CMD_SKIP1; + break; + case 0xa0: /* Mirror off. */ + s->mirror = 0; + break; + case 0xa1: /* Mirror off. */ + s->mirror = 1; + break; + case 0xa4: /* Entire display off. */ + s->flash = 0; + break; + case 0xa5: /* Entire display on. */ + s->flash = 1; + break; + case 0xa6: /* Inverse off. */ + s->inverse = 0; + break; + case 0xa7: /* Inverse on. */ + s->inverse = 1; + break; + case 0xa8: /* Set multiplied ratio (Ignored). */ + s->cmd_state = SSD0303_CMD_SKIP1; + break; + case 0xad: /* DC-DC power control. */ + s->cmd_state = SSD0303_CMD_SKIP1; + break; + case 0xae: /* Display off. */ + s->enabled = 0; + break; + case 0xaf: /* Display on. */ + s->enabled = 1; + break; + case 0xb0 ... 0xbf: /* Set Page address. */ + s->row = data & 7; + break; + case 0xc0 ... 0xc8: /* Set COM output direction (Ignored). */ + break; + case 0xd3: /* Set display offset (Ignored). */ + s->cmd_state = SSD0303_CMD_SKIP1; + break; + case 0xd5: /* Set display clock (Ignored). */ + s->cmd_state = SSD0303_CMD_SKIP1; + break; + case 0xd8: /* Set color and power mode (Ignored). */ + s->cmd_state = SSD0303_CMD_SKIP1; + break; + case 0xd9: /* Set pre-charge period (Ignored). */ + s->cmd_state = SSD0303_CMD_SKIP1; + break; + case 0xda: /* Set COM pin configuration (Ignored). */ + s->cmd_state = SSD0303_CMD_SKIP1; + break; + case 0xdb: /* Set VCOM dselect level (Ignored). */ + s->cmd_state = SSD0303_CMD_SKIP1; + break; + case 0xe3: /* no-op. */ + break; + default: + BADF("Unknown command: 0x%x\n", data); + } + break; + case SSD0303_CMD_SKIP1: + DPRINTF("skip 0x%02x\n", data); + break; + } + break; + } + return 0; +} + +static int ssd0303_event(I2CSlave *i2c, enum i2c_event event) +{ + ssd0303_state *s = SSD0303(i2c); + + switch (event) { + case I2C_FINISH: + s->mode = SSD0303_IDLE; + break; + case I2C_START_RECV: + case I2C_START_SEND: + case I2C_NACK: + /* Nothing to do. */ + break; + } + + return 0; +} + +static void ssd0303_update_display(void *opaque) +{ + ssd0303_state *s = (ssd0303_state *)opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + uint8_t *dest; + uint8_t *src; + int x; + int y; + int line; + char *colors[2]; + char colortab[MAGNIFY * 8]; + int dest_width; + uint8_t mask; + + if (!s->redraw) + return; + + switch (surface_bits_per_pixel(surface)) { + case 0: + return; + case 15: + dest_width = 2; + break; + case 16: + dest_width = 2; + break; + case 24: + dest_width = 3; + break; + case 32: + dest_width = 4; + break; + default: + BADF("Bad color depth\n"); + return; + } + dest_width *= MAGNIFY; + memset(colortab, 0xff, dest_width); + memset(colortab + dest_width, 0, dest_width); + if (s->flash) { + colors[0] = colortab; + colors[1] = colortab; + } else if (s->inverse) { + colors[0] = colortab; + colors[1] = colortab + dest_width; + } else { + colors[0] = colortab + dest_width; + colors[1] = colortab; + } + dest = surface_data(surface); + for (y = 0; y < 16; y++) { + line = (y + s->start_line) & 63; + src = s->framebuffer + 132 * (line >> 3) + 36; + mask = 1 << (line & 7); + for (x = 0; x < 96; x++) { + memcpy(dest, colors[(*src & mask) != 0], dest_width); + dest += dest_width; + src++; + } + for (x = 1; x < MAGNIFY; x++) { + memcpy(dest, dest - dest_width * 96, dest_width * 96); + dest += dest_width * 96; + } + } + s->redraw = 0; + dpy_gfx_update(s->con, 0, 0, 96 * MAGNIFY, 16 * MAGNIFY); +} + +static void ssd0303_invalidate_display(void * opaque) +{ + ssd0303_state *s = (ssd0303_state *)opaque; + s->redraw = 1; +} + +static const VMStateDescription vmstate_ssd0303 = { + .name = "ssd0303_oled", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32(row, ssd0303_state), + VMSTATE_INT32(col, ssd0303_state), + VMSTATE_INT32(start_line, ssd0303_state), + VMSTATE_INT32(mirror, ssd0303_state), + VMSTATE_INT32(flash, ssd0303_state), + VMSTATE_INT32(enabled, ssd0303_state), + VMSTATE_INT32(inverse, ssd0303_state), + VMSTATE_INT32(redraw, ssd0303_state), + VMSTATE_UINT32(mode, ssd0303_state), + VMSTATE_UINT32(cmd_state, ssd0303_state), + VMSTATE_BUFFER(framebuffer, ssd0303_state), + VMSTATE_I2C_SLAVE(parent_obj, ssd0303_state), + VMSTATE_END_OF_LIST() + } +}; + +static const GraphicHwOps ssd0303_ops = { + .invalidate = ssd0303_invalidate_display, + .gfx_update = ssd0303_update_display, +}; + +static void ssd0303_realize(DeviceState *dev, Error **errp) +{ + ssd0303_state *s = SSD0303(dev); + + s->con = graphic_console_init(dev, 0, &ssd0303_ops, s); + qemu_console_resize(s->con, 96 * MAGNIFY, 16 * MAGNIFY); +} + +static void ssd0303_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + dc->realize = ssd0303_realize; + k->event = ssd0303_event; + k->recv = ssd0303_recv; + k->send = ssd0303_send; + dc->vmsd = &vmstate_ssd0303; +} + +static const TypeInfo ssd0303_info = { + .name = TYPE_SSD0303, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(ssd0303_state), + .class_init = ssd0303_class_init, +}; + +static void ssd0303_register_types(void) +{ + type_register_static(&ssd0303_info); +} + +type_init(ssd0303_register_types) diff --git a/hw/display/ssd0323.c b/hw/display/ssd0323.c new file mode 100644 index 000000000..ab229d32b --- /dev/null +++ b/hw/display/ssd0323.c @@ -0,0 +1,388 @@ +/* + * SSD0323 OLED controller with OSRAM Pictiva 128x64 display. + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +/* The controller can support a variety of different displays, but we only + implement one. Most of the commends relating to brightness and geometry + setup are ignored. */ + +#include "qemu/osdep.h" +#include "hw/ssi/ssi.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "ui/console.h" +#include "qom/object.h" + +//#define DEBUG_SSD0323 1 + +#ifdef DEBUG_SSD0323 +#define DPRINTF(fmt, ...) \ +do { printf("ssd0323: " fmt , ## __VA_ARGS__); } while (0) +#define BADF(fmt, ...) \ +do { \ + fprintf(stderr, "ssd0323: error: " fmt , ## __VA_ARGS__); abort(); \ +} while (0) +#else +#define DPRINTF(fmt, ...) do {} while(0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "ssd0323: error: " fmt , ## __VA_ARGS__);} while (0) +#endif + +/* Scaling factor for pixels. */ +#define MAGNIFY 4 + +#define REMAP_SWAP_COLUMN 0x01 +#define REMAP_SWAP_NYBBLE 0x02 +#define REMAP_VERTICAL 0x04 +#define REMAP_SWAP_COM 0x10 +#define REMAP_SPLIT_COM 0x40 + +enum ssd0323_mode +{ + SSD0323_CMD, + SSD0323_DATA +}; + +struct ssd0323_state { + SSIPeripheral ssidev; + QemuConsole *con; + + uint32_t cmd_len; + int32_t cmd; + int32_t cmd_data[8]; + int32_t row; + int32_t row_start; + int32_t row_end; + int32_t col; + int32_t col_start; + int32_t col_end; + int32_t redraw; + int32_t remap; + uint32_t mode; + uint8_t framebuffer[128 * 80 / 2]; +}; + +#define TYPE_SSD0323 "ssd0323" +OBJECT_DECLARE_SIMPLE_TYPE(ssd0323_state, SSD0323) + + +static uint32_t ssd0323_transfer(SSIPeripheral *dev, uint32_t data) +{ + ssd0323_state *s = SSD0323(dev); + + switch (s->mode) { + case SSD0323_DATA: + DPRINTF("data 0x%02x\n", data); + s->framebuffer[s->col + s->row * 64] = data; + if (s->remap & REMAP_VERTICAL) { + s->row++; + if (s->row > s->row_end) { + s->row = s->row_start; + s->col++; + } + if (s->col > s->col_end) { + s->col = s->col_start; + } + } else { + s->col++; + if (s->col > s->col_end) { + s->row++; + s->col = s->col_start; + } + if (s->row > s->row_end) { + s->row = s->row_start; + } + } + s->redraw = 1; + break; + case SSD0323_CMD: + DPRINTF("cmd 0x%02x\n", data); + if (s->cmd_len == 0) { + s->cmd = data; + } else { + s->cmd_data[s->cmd_len - 1] = data; + } + s->cmd_len++; + switch (s->cmd) { +#define DATA(x) if (s->cmd_len <= (x)) return 0 + case 0x15: /* Set column. */ + DATA(2); + s->col = s->col_start = s->cmd_data[0] % 64; + s->col_end = s->cmd_data[1] % 64; + break; + case 0x75: /* Set row. */ + DATA(2); + s->row = s->row_start = s->cmd_data[0] % 80; + s->row_end = s->cmd_data[1] % 80; + break; + case 0x81: /* Set contrast */ + DATA(1); + break; + case 0x84: case 0x85: case 0x86: /* Max current. */ + DATA(0); + break; + case 0xa0: /* Set remapping. */ + /* FIXME: Implement this. */ + DATA(1); + s->remap = s->cmd_data[0]; + break; + case 0xa1: /* Set display start line. */ + case 0xa2: /* Set display offset. */ + /* FIXME: Implement these. */ + DATA(1); + break; + case 0xa4: /* Normal mode. */ + case 0xa5: /* All on. */ + case 0xa6: /* All off. */ + case 0xa7: /* Inverse. */ + /* FIXME: Implement these. */ + DATA(0); + break; + case 0xa8: /* Set multiplex ratio. */ + case 0xad: /* Set DC-DC converter. */ + DATA(1); + /* Ignored. Don't care. */ + break; + case 0xae: /* Display off. */ + case 0xaf: /* Display on. */ + DATA(0); + /* TODO: Implement power control. */ + break; + case 0xb1: /* Set phase length. */ + case 0xb2: /* Set row period. */ + case 0xb3: /* Set clock rate. */ + case 0xbc: /* Set precharge. */ + case 0xbe: /* Set VCOMH. */ + case 0xbf: /* Set segment low. */ + DATA(1); + /* Ignored. Don't care. */ + break; + case 0xb8: /* Set grey scale table. */ + /* FIXME: Implement this. */ + DATA(8); + break; + case 0xe3: /* NOP. */ + DATA(0); + break; + case 0xff: /* Nasty hack because we don't handle chip selects + properly. */ + break; + default: + BADF("Unknown command: 0x%x\n", data); + } + s->cmd_len = 0; + return 0; + } + return 0; +} + +static void ssd0323_update_display(void *opaque) +{ + ssd0323_state *s = (ssd0323_state *)opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + uint8_t *dest; + uint8_t *src; + int x; + int y; + int i; + int line; + char *colors[16]; + char colortab[MAGNIFY * 64]; + char *p; + int dest_width; + + if (!s->redraw) + return; + + switch (surface_bits_per_pixel(surface)) { + case 0: + return; + case 15: + dest_width = 2; + break; + case 16: + dest_width = 2; + break; + case 24: + dest_width = 3; + break; + case 32: + dest_width = 4; + break; + default: + BADF("Bad color depth\n"); + return; + } + p = colortab; + for (i = 0; i < 16; i++) { + int n; + colors[i] = p; + switch (surface_bits_per_pixel(surface)) { + case 15: + n = i * 2 + (i >> 3); + p[0] = n | (n << 5); + p[1] = (n << 2) | (n >> 3); + break; + case 16: + n = i * 2 + (i >> 3); + p[0] = n | (n << 6) | ((n << 1) & 0x20); + p[1] = (n << 3) | (n >> 2); + break; + case 24: + case 32: + n = (i << 4) | i; + p[0] = p[1] = p[2] = n; + break; + default: + BADF("Bad color depth\n"); + return; + } + p += dest_width; + } + /* TODO: Implement row/column remapping. */ + dest = surface_data(surface); + for (y = 0; y < 64; y++) { + line = y; + src = s->framebuffer + 64 * line; + for (x = 0; x < 64; x++) { + int val; + val = *src >> 4; + for (i = 0; i < MAGNIFY; i++) { + memcpy(dest, colors[val], dest_width); + dest += dest_width; + } + val = *src & 0xf; + for (i = 0; i < MAGNIFY; i++) { + memcpy(dest, colors[val], dest_width); + dest += dest_width; + } + src++; + } + for (i = 1; i < MAGNIFY; i++) { + memcpy(dest, dest - dest_width * MAGNIFY * 128, + dest_width * 128 * MAGNIFY); + dest += dest_width * 128 * MAGNIFY; + } + } + s->redraw = 0; + dpy_gfx_update(s->con, 0, 0, 128 * MAGNIFY, 64 * MAGNIFY); +} + +static void ssd0323_invalidate_display(void * opaque) +{ + ssd0323_state *s = (ssd0323_state *)opaque; + s->redraw = 1; +} + +/* Command/data input. */ +static void ssd0323_cd(void *opaque, int n, int level) +{ + ssd0323_state *s = (ssd0323_state *)opaque; + DPRINTF("%s mode\n", level ? "Data" : "Command"); + s->mode = level ? SSD0323_DATA : SSD0323_CMD; +} + +static int ssd0323_post_load(void *opaque, int version_id) +{ + ssd0323_state *s = (ssd0323_state *)opaque; + + if (s->cmd_len > ARRAY_SIZE(s->cmd_data)) { + return -EINVAL; + } + if (s->row < 0 || s->row >= 80) { + return -EINVAL; + } + if (s->row_start < 0 || s->row_start >= 80) { + return -EINVAL; + } + if (s->row_end < 0 || s->row_end >= 80) { + return -EINVAL; + } + if (s->col < 0 || s->col >= 64) { + return -EINVAL; + } + if (s->col_start < 0 || s->col_start >= 64) { + return -EINVAL; + } + if (s->col_end < 0 || s->col_end >= 64) { + return -EINVAL; + } + if (s->mode != SSD0323_CMD && s->mode != SSD0323_DATA) { + return -EINVAL; + } + + return 0; +} + +static const VMStateDescription vmstate_ssd0323 = { + .name = "ssd0323_oled", + .version_id = 2, + .minimum_version_id = 2, + .post_load = ssd0323_post_load, + .fields = (VMStateField []) { + VMSTATE_UINT32(cmd_len, ssd0323_state), + VMSTATE_INT32(cmd, ssd0323_state), + VMSTATE_INT32_ARRAY(cmd_data, ssd0323_state, 8), + VMSTATE_INT32(row, ssd0323_state), + VMSTATE_INT32(row_start, ssd0323_state), + VMSTATE_INT32(row_end, ssd0323_state), + VMSTATE_INT32(col, ssd0323_state), + VMSTATE_INT32(col_start, ssd0323_state), + VMSTATE_INT32(col_end, ssd0323_state), + VMSTATE_INT32(redraw, ssd0323_state), + VMSTATE_INT32(remap, ssd0323_state), + VMSTATE_UINT32(mode, ssd0323_state), + VMSTATE_BUFFER(framebuffer, ssd0323_state), + VMSTATE_SSI_PERIPHERAL(ssidev, ssd0323_state), + VMSTATE_END_OF_LIST() + } +}; + +static const GraphicHwOps ssd0323_ops = { + .invalidate = ssd0323_invalidate_display, + .gfx_update = ssd0323_update_display, +}; + +static void ssd0323_realize(SSIPeripheral *d, Error **errp) +{ + DeviceState *dev = DEVICE(d); + ssd0323_state *s = SSD0323(d); + + s->col_end = 63; + s->row_end = 79; + s->con = graphic_console_init(dev, 0, &ssd0323_ops, s); + qemu_console_resize(s->con, 128 * MAGNIFY, 64 * MAGNIFY); + + qdev_init_gpio_in(dev, ssd0323_cd, 1); +} + +static void ssd0323_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + + k->realize = ssd0323_realize; + k->transfer = ssd0323_transfer; + k->cs_polarity = SSI_CS_HIGH; + dc->vmsd = &vmstate_ssd0323; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); +} + +static const TypeInfo ssd0323_info = { + .name = TYPE_SSD0323, + .parent = TYPE_SSI_PERIPHERAL, + .instance_size = sizeof(ssd0323_state), + .class_init = ssd0323_class_init, +}; + +static void ssd03232_register_types(void) +{ + type_register_static(&ssd0323_info); +} + +type_init(ssd03232_register_types) diff --git a/hw/display/tc6393xb.c b/hw/display/tc6393xb.c new file mode 100644 index 000000000..1f28223c7 --- /dev/null +++ b/hw/display/tc6393xb.c @@ -0,0 +1,568 @@ +/* + * Toshiba TC6393XB I/O Controller. + * Found in Sharp Zaurus SL-6000 (tosa) or some + * Toshiba e-Series PDAs. + * + * Most features are currently unsupported!!! + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/host-utils.h" +#include "hw/irq.h" +#include "hw/display/tc6393xb.h" +#include "exec/memory.h" +#include "hw/block/flash.h" +#include "ui/console.h" +#include "ui/pixel_ops.h" +#include "sysemu/blockdev.h" + +#define IRQ_TC6393_NAND 0 +#define IRQ_TC6393_MMC 1 +#define IRQ_TC6393_OHCI 2 +#define IRQ_TC6393_SERIAL 3 +#define IRQ_TC6393_FB 4 + +#define TC6393XB_NR_IRQS 8 + +#define TC6393XB_GPIOS 16 + +#define SCR_REVID 0x08 /* b Revision ID */ +#define SCR_ISR 0x50 /* b Interrupt Status */ +#define SCR_IMR 0x52 /* b Interrupt Mask */ +#define SCR_IRR 0x54 /* b Interrupt Routing */ +#define SCR_GPER 0x60 /* w GP Enable */ +#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */ +#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */ +#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */ +#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */ +#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */ +#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */ +#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */ +#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */ +#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */ +#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */ +#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */ +#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */ +#define SCR_CCR 0x98 /* w Clock Control */ +#define SCR_PLL2CR 0x9a /* w PLL2 Control */ +#define SCR_PLL1CR 0x9c /* l PLL1 Control */ +#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */ +#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */ +#define SCR_FER 0xe0 /* b Function Enable */ +#define SCR_MCR 0xe4 /* w Mode Control */ +#define SCR_CONFIG 0xfc /* b Configuration Control */ +#define SCR_DEBUG 0xff /* b Debug */ + +#define NAND_CFG_COMMAND 0x04 /* w Command */ +#define NAND_CFG_BASE 0x10 /* l Control Base Address */ +#define NAND_CFG_INTP 0x3d /* b Interrupt Pin */ +#define NAND_CFG_INTE 0x48 /* b Int Enable */ +#define NAND_CFG_EC 0x4a /* b Event Control */ +#define NAND_CFG_ICC 0x4c /* b Internal Clock Control */ +#define NAND_CFG_ECCC 0x5b /* b ECC Control */ +#define NAND_CFG_NFTC 0x60 /* b NAND Flash Transaction Control */ +#define NAND_CFG_NFM 0x61 /* b NAND Flash Monitor */ +#define NAND_CFG_NFPSC 0x62 /* b NAND Flash Power Supply Control */ +#define NAND_CFG_NFDC 0x63 /* b NAND Flash Detect Control */ + +#define NAND_DATA 0x00 /* l Data */ +#define NAND_MODE 0x04 /* b Mode */ +#define NAND_STATUS 0x05 /* b Status */ +#define NAND_ISR 0x06 /* b Interrupt Status */ +#define NAND_IMR 0x07 /* b Interrupt Mask */ + +#define NAND_MODE_WP 0x80 +#define NAND_MODE_CE 0x10 +#define NAND_MODE_ALE 0x02 +#define NAND_MODE_CLE 0x01 +#define NAND_MODE_ECC_MASK 0x60 +#define NAND_MODE_ECC_EN 0x20 +#define NAND_MODE_ECC_READ 0x40 +#define NAND_MODE_ECC_RST 0x60 + +struct TC6393xbState { + MemoryRegion iomem; + qemu_irq irq; + qemu_irq *sub_irqs; + struct { + uint8_t ISR; + uint8_t IMR; + uint8_t IRR; + uint16_t GPER; + uint8_t GPI_SR[3]; + uint8_t GPI_IMR[3]; + uint8_t GPI_EDER[3]; + uint8_t GPI_LIR[3]; + uint8_t GP_IARCR[3]; + uint8_t GP_IARLCR[3]; + uint8_t GPI_BCR[3]; + uint16_t GPA_IARCR; + uint16_t GPA_IARLCR; + uint16_t CCR; + uint16_t PLL2CR; + uint32_t PLL1CR; + uint8_t DIARCR; + uint8_t DBOCR; + uint8_t FER; + uint16_t MCR; + uint8_t CONFIG; + uint8_t DEBUG; + } scr; + uint32_t gpio_dir; + uint32_t gpio_level; + uint32_t prev_level; + qemu_irq handler[TC6393XB_GPIOS]; + qemu_irq *gpio_in; + + struct { + uint8_t mode; + uint8_t isr; + uint8_t imr; + } nand; + int nand_enable; + uint32_t nand_phys; + DeviceState *flash; + ECCState ecc; + + QemuConsole *con; + MemoryRegion vram; + uint16_t *vram_ptr; + uint32_t scr_width, scr_height; /* in pixels */ + qemu_irq l3v; + unsigned blank : 1, + blanked : 1; +}; + +static void tc6393xb_gpio_set(void *opaque, int line, int level) +{ +// TC6393xbState *s = opaque; + + if (line > TC6393XB_GPIOS) { + printf("%s: No GPIO pin %i\n", __func__, line); + return; + } + + // FIXME: how does the chip reflect the GPIO input level change? +} + +static void tc6393xb_gpio_handler_update(TC6393xbState *s) +{ + uint32_t level, diff; + int bit; + + level = s->gpio_level & s->gpio_dir; + level &= MAKE_64BIT_MASK(0, TC6393XB_GPIOS); + + for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { + bit = ctz32(diff); + qemu_set_irq(s->handler[bit], (level >> bit) & 1); + } + + s->prev_level = level; +} + +qemu_irq tc6393xb_l3v_get(TC6393xbState *s) +{ + return s->l3v; +} + +static void tc6393xb_l3v(void *opaque, int line, int level) +{ + TC6393xbState *s = opaque; + s->blank = !level; + fprintf(stderr, "L3V: %d\n", level); +} + +static void tc6393xb_sub_irq(void *opaque, int line, int level) { + TC6393xbState *s = opaque; + uint8_t isr = s->scr.ISR; + if (level) + isr |= 1 << line; + else + isr &= ~(1 << line); + s->scr.ISR = isr; + qemu_set_irq(s->irq, isr & s->scr.IMR); +} + +#define SCR_REG_B(N) \ + case SCR_ ##N: return s->scr.N +#define SCR_REG_W(N) \ + case SCR_ ##N: return s->scr.N; \ + case SCR_ ##N + 1: return s->scr.N >> 8; +#define SCR_REG_L(N) \ + case SCR_ ##N: return s->scr.N; \ + case SCR_ ##N + 1: return s->scr.N >> 8; \ + case SCR_ ##N + 2: return s->scr.N >> 16; \ + case SCR_ ##N + 3: return s->scr.N >> 24; +#define SCR_REG_A(N) \ + case SCR_ ##N(0): return s->scr.N[0]; \ + case SCR_ ##N(1): return s->scr.N[1]; \ + case SCR_ ##N(2): return s->scr.N[2] + +static uint32_t tc6393xb_scr_readb(TC6393xbState *s, hwaddr addr) +{ + switch (addr) { + case SCR_REVID: + return 3; + case SCR_REVID+1: + return 0; + SCR_REG_B(ISR); + SCR_REG_B(IMR); + SCR_REG_B(IRR); + SCR_REG_W(GPER); + SCR_REG_A(GPI_SR); + SCR_REG_A(GPI_IMR); + SCR_REG_A(GPI_EDER); + SCR_REG_A(GPI_LIR); + case SCR_GPO_DSR(0): + case SCR_GPO_DSR(1): + case SCR_GPO_DSR(2): + return (s->gpio_level >> ((addr - SCR_GPO_DSR(0)) * 8)) & 0xff; + case SCR_GPO_DOECR(0): + case SCR_GPO_DOECR(1): + case SCR_GPO_DOECR(2): + return (s->gpio_dir >> ((addr - SCR_GPO_DOECR(0)) * 8)) & 0xff; + SCR_REG_A(GP_IARCR); + SCR_REG_A(GP_IARLCR); + SCR_REG_A(GPI_BCR); + SCR_REG_W(GPA_IARCR); + SCR_REG_W(GPA_IARLCR); + SCR_REG_W(CCR); + SCR_REG_W(PLL2CR); + SCR_REG_L(PLL1CR); + SCR_REG_B(DIARCR); + SCR_REG_B(DBOCR); + SCR_REG_B(FER); + SCR_REG_W(MCR); + SCR_REG_B(CONFIG); + SCR_REG_B(DEBUG); + } + fprintf(stderr, "tc6393xb_scr: unhandled read at %08x\n", (uint32_t) addr); + return 0; +} +#undef SCR_REG_B +#undef SCR_REG_W +#undef SCR_REG_L +#undef SCR_REG_A + +#define SCR_REG_B(N) \ + case SCR_ ##N: s->scr.N = value; return; +#define SCR_REG_W(N) \ + case SCR_ ##N: s->scr.N = (s->scr.N & ~0xff) | (value & 0xff); return; \ + case SCR_ ##N + 1: s->scr.N = (s->scr.N & 0xff) | (value << 8); return +#define SCR_REG_L(N) \ + case SCR_ ##N: s->scr.N = (s->scr.N & ~0xff) | (value & 0xff); return; \ + case SCR_ ##N + 1: s->scr.N = (s->scr.N & ~(0xff << 8)) | (value & (0xff << 8)); return; \ + case SCR_ ##N + 2: s->scr.N = (s->scr.N & ~(0xff << 16)) | (value & (0xff << 16)); return; \ + case SCR_ ##N + 3: s->scr.N = (s->scr.N & ~(0xff << 24)) | (value & (0xff << 24)); return; +#define SCR_REG_A(N) \ + case SCR_ ##N(0): s->scr.N[0] = value; return; \ + case SCR_ ##N(1): s->scr.N[1] = value; return; \ + case SCR_ ##N(2): s->scr.N[2] = value; return + +static void tc6393xb_scr_writeb(TC6393xbState *s, hwaddr addr, uint32_t value) +{ + switch (addr) { + SCR_REG_B(ISR); + SCR_REG_B(IMR); + SCR_REG_B(IRR); + SCR_REG_W(GPER); + SCR_REG_A(GPI_SR); + SCR_REG_A(GPI_IMR); + SCR_REG_A(GPI_EDER); + SCR_REG_A(GPI_LIR); + case SCR_GPO_DSR(0): + case SCR_GPO_DSR(1): + case SCR_GPO_DSR(2): + s->gpio_level = (s->gpio_level & ~(0xff << ((addr - SCR_GPO_DSR(0))*8))) | ((value & 0xff) << ((addr - SCR_GPO_DSR(0))*8)); + tc6393xb_gpio_handler_update(s); + return; + case SCR_GPO_DOECR(0): + case SCR_GPO_DOECR(1): + case SCR_GPO_DOECR(2): + s->gpio_dir = (s->gpio_dir & ~(0xff << ((addr - SCR_GPO_DOECR(0))*8))) | ((value & 0xff) << ((addr - SCR_GPO_DOECR(0))*8)); + tc6393xb_gpio_handler_update(s); + return; + SCR_REG_A(GP_IARCR); + SCR_REG_A(GP_IARLCR); + SCR_REG_A(GPI_BCR); + SCR_REG_W(GPA_IARCR); + SCR_REG_W(GPA_IARLCR); + SCR_REG_W(CCR); + SCR_REG_W(PLL2CR); + SCR_REG_L(PLL1CR); + SCR_REG_B(DIARCR); + SCR_REG_B(DBOCR); + SCR_REG_B(FER); + SCR_REG_W(MCR); + SCR_REG_B(CONFIG); + SCR_REG_B(DEBUG); + } + fprintf(stderr, "tc6393xb_scr: unhandled write at %08x: %02x\n", + (uint32_t) addr, value & 0xff); +} +#undef SCR_REG_B +#undef SCR_REG_W +#undef SCR_REG_L +#undef SCR_REG_A + +static void tc6393xb_nand_irq(TC6393xbState *s) { + qemu_set_irq(s->sub_irqs[IRQ_TC6393_NAND], + (s->nand.imr & 0x80) && (s->nand.imr & s->nand.isr)); +} + +static uint32_t tc6393xb_nand_cfg_readb(TC6393xbState *s, hwaddr addr) { + switch (addr) { + case NAND_CFG_COMMAND: + return s->nand_enable ? 2 : 0; + case NAND_CFG_BASE: + case NAND_CFG_BASE + 1: + case NAND_CFG_BASE + 2: + case NAND_CFG_BASE + 3: + return s->nand_phys >> (addr - NAND_CFG_BASE); + } + fprintf(stderr, "tc6393xb_nand_cfg: unhandled read at %08x\n", (uint32_t) addr); + return 0; +} +static void tc6393xb_nand_cfg_writeb(TC6393xbState *s, hwaddr addr, uint32_t value) { + switch (addr) { + case NAND_CFG_COMMAND: + s->nand_enable = (value & 0x2); + return; + case NAND_CFG_BASE: + case NAND_CFG_BASE + 1: + case NAND_CFG_BASE + 2: + case NAND_CFG_BASE + 3: + s->nand_phys &= ~(0xff << ((addr - NAND_CFG_BASE) * 8)); + s->nand_phys |= (value & 0xff) << ((addr - NAND_CFG_BASE) * 8); + return; + } + fprintf(stderr, "tc6393xb_nand_cfg: unhandled write at %08x: %02x\n", + (uint32_t) addr, value & 0xff); +} + +static uint32_t tc6393xb_nand_readb(TC6393xbState *s, hwaddr addr) { + switch (addr) { + case NAND_DATA + 0: + case NAND_DATA + 1: + case NAND_DATA + 2: + case NAND_DATA + 3: + return nand_getio(s->flash); + case NAND_MODE: + return s->nand.mode; + case NAND_STATUS: + return 0x14; + case NAND_ISR: + return s->nand.isr; + case NAND_IMR: + return s->nand.imr; + } + fprintf(stderr, "tc6393xb_nand: unhandled read at %08x\n", (uint32_t) addr); + return 0; +} +static void tc6393xb_nand_writeb(TC6393xbState *s, hwaddr addr, uint32_t value) { +// fprintf(stderr, "tc6393xb_nand: write at %08x: %02x\n", +// (uint32_t) addr, value & 0xff); + switch (addr) { + case NAND_DATA + 0: + case NAND_DATA + 1: + case NAND_DATA + 2: + case NAND_DATA + 3: + nand_setio(s->flash, value); + s->nand.isr |= 1; + tc6393xb_nand_irq(s); + return; + case NAND_MODE: + s->nand.mode = value; + nand_setpins(s->flash, + value & NAND_MODE_CLE, + value & NAND_MODE_ALE, + !(value & NAND_MODE_CE), + value & NAND_MODE_WP, + 0); // FIXME: gnd + switch (value & NAND_MODE_ECC_MASK) { + case NAND_MODE_ECC_RST: + ecc_reset(&s->ecc); + break; + case NAND_MODE_ECC_READ: + // FIXME + break; + case NAND_MODE_ECC_EN: + ecc_reset(&s->ecc); + } + return; + case NAND_ISR: + s->nand.isr = value; + tc6393xb_nand_irq(s); + return; + case NAND_IMR: + s->nand.imr = value; + tc6393xb_nand_irq(s); + return; + } + fprintf(stderr, "tc6393xb_nand: unhandled write at %08x: %02x\n", + (uint32_t) addr, value & 0xff); +} + +static void tc6393xb_draw_graphic(TC6393xbState *s, int full_update) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int i; + uint16_t *data_buffer; + uint8_t *data_display; + + data_buffer = s->vram_ptr; + data_display = surface_data(surface); + for (i = 0; i < s->scr_height; i++) { + int j; + for (j = 0; j < s->scr_width; j++, data_display += 4, data_buffer++) { + uint16_t color = *data_buffer; + uint32_t dest_color = rgb_to_pixel32( + ((color & 0xf800) * 0x108) >> 11, + ((color & 0x7e0) * 0x41) >> 9, + ((color & 0x1f) * 0x21) >> 2 + ); + *(uint32_t *)data_display = dest_color; + } + } + dpy_gfx_update_full(s->con); +} + +static void tc6393xb_draw_blank(TC6393xbState *s, int full_update) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int i, w; + uint8_t *d; + + if (!full_update) + return; + + w = s->scr_width * surface_bytes_per_pixel(surface); + d = surface_data(surface); + for(i = 0; i < s->scr_height; i++) { + memset(d, 0, w); + d += surface_stride(surface); + } + + dpy_gfx_update_full(s->con); +} + +static void tc6393xb_update_display(void *opaque) +{ + TC6393xbState *s = opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + int full_update; + + if (s->scr_width == 0 || s->scr_height == 0) + return; + + full_update = 0; + if (s->blanked != s->blank) { + s->blanked = s->blank; + full_update = 1; + } + if (s->scr_width != surface_width(surface) || + s->scr_height != surface_height(surface)) { + qemu_console_resize(s->con, s->scr_width, s->scr_height); + full_update = 1; + } + if (s->blanked) + tc6393xb_draw_blank(s, full_update); + else + tc6393xb_draw_graphic(s, full_update); +} + + +static uint64_t tc6393xb_readb(void *opaque, hwaddr addr, + unsigned size) +{ + TC6393xbState *s = opaque; + + switch (addr >> 8) { + case 0: + return tc6393xb_scr_readb(s, addr & 0xff); + case 1: + return tc6393xb_nand_cfg_readb(s, addr & 0xff); + }; + + if ((addr &~0xff) == s->nand_phys && s->nand_enable) { +// return tc6393xb_nand_readb(s, addr & 0xff); + uint8_t d = tc6393xb_nand_readb(s, addr & 0xff); +// fprintf(stderr, "tc6393xb_nand: read at %08x: %02hhx\n", (uint32_t) addr, d); + return d; + } + +// fprintf(stderr, "tc6393xb: unhandled read at %08x\n", (uint32_t) addr); + return 0; +} + +static void tc6393xb_writeb(void *opaque, hwaddr addr, + uint64_t value, unsigned size) { + TC6393xbState *s = opaque; + + switch (addr >> 8) { + case 0: + tc6393xb_scr_writeb(s, addr & 0xff, value); + return; + case 1: + tc6393xb_nand_cfg_writeb(s, addr & 0xff, value); + return; + }; + + if ((addr &~0xff) == s->nand_phys && s->nand_enable) + tc6393xb_nand_writeb(s, addr & 0xff, value); + else + fprintf(stderr, "tc6393xb: unhandled write at %08x: %02x\n", + (uint32_t) addr, (int)value & 0xff); +} + +static const GraphicHwOps tc6393xb_gfx_ops = { + .gfx_update = tc6393xb_update_display, +}; + +TC6393xbState *tc6393xb_init(MemoryRegion *sysmem, uint32_t base, qemu_irq irq) +{ + TC6393xbState *s; + DriveInfo *nand; + static const MemoryRegionOps tc6393xb_ops = { + .read = tc6393xb_readb, + .write = tc6393xb_writeb, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + }; + + s = (TC6393xbState *) g_malloc0(sizeof(TC6393xbState)); + s->irq = irq; + s->gpio_in = qemu_allocate_irqs(tc6393xb_gpio_set, s, TC6393XB_GPIOS); + + s->l3v = qemu_allocate_irq(tc6393xb_l3v, s, 0); + s->blanked = 1; + + s->sub_irqs = qemu_allocate_irqs(tc6393xb_sub_irq, s, TC6393XB_NR_IRQS); + + nand = drive_get(IF_MTD, 0, 0); + s->flash = nand_init(nand ? blk_by_legacy_dinfo(nand) : NULL, + NAND_MFR_TOSHIBA, 0x76); + + memory_region_init_io(&s->iomem, NULL, &tc6393xb_ops, s, "tc6393xb", 0x10000); + memory_region_add_subregion(sysmem, base, &s->iomem); + + memory_region_init_ram(&s->vram, NULL, "tc6393xb.vram", 0x100000, + &error_fatal); + s->vram_ptr = memory_region_get_ram_ptr(&s->vram); + memory_region_add_subregion(sysmem, base + 0x100000, &s->vram); + s->scr_width = 480; + s->scr_height = 640; + s->con = graphic_console_init(NULL, 0, &tc6393xb_gfx_ops, s); + + return s; +} diff --git a/hw/display/tcx.c b/hw/display/tcx.c new file mode 100644 index 000000000..d4d09d0df --- /dev/null +++ b/hw/display/tcx.c @@ -0,0 +1,914 @@ +/* + * QEMU TCX Frame buffer + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "ui/console.h" +#include "ui/pixel_ops.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define TCX_ROM_FILE "QEMU,tcx.bin" +#define FCODE_MAX_ROM_SIZE 0x10000 + +#define MAXX 1024 +#define MAXY 768 +#define TCX_DAC_NREGS 16 +#define TCX_THC_NREGS 0x1000 +#define TCX_DHC_NREGS 0x4000 +#define TCX_TEC_NREGS 0x1000 +#define TCX_ALT_NREGS 0x8000 +#define TCX_STIP_NREGS 0x800000 +#define TCX_BLIT_NREGS 0x800000 +#define TCX_RSTIP_NREGS 0x800000 +#define TCX_RBLIT_NREGS 0x800000 + +#define TCX_THC_MISC 0x818 +#define TCX_THC_CURSXY 0x8fc +#define TCX_THC_CURSMASK 0x900 +#define TCX_THC_CURSBITS 0x980 + +#define TYPE_TCX "sun-tcx" +OBJECT_DECLARE_SIMPLE_TYPE(TCXState, TCX) + +struct TCXState { + SysBusDevice parent_obj; + + QemuConsole *con; + qemu_irq irq; + uint8_t *vram; + uint32_t *vram24, *cplane; + hwaddr prom_addr; + MemoryRegion rom; + MemoryRegion vram_mem; + MemoryRegion vram_8bit; + MemoryRegion vram_24bit; + MemoryRegion stip; + MemoryRegion blit; + MemoryRegion vram_cplane; + MemoryRegion rstip; + MemoryRegion rblit; + MemoryRegion tec; + MemoryRegion dac; + MemoryRegion thc; + MemoryRegion dhc; + MemoryRegion alt; + MemoryRegion thc24; + + ram_addr_t vram24_offset, cplane_offset; + uint32_t tmpblit; + uint32_t vram_size; + uint32_t palette[260]; + uint8_t r[260], g[260], b[260]; + uint16_t width, height, depth; + uint8_t dac_index, dac_state; + uint32_t thcmisc; + uint32_t cursmask[32]; + uint32_t cursbits[32]; + uint16_t cursx; + uint16_t cursy; +}; + +static void tcx_set_dirty(TCXState *s, ram_addr_t addr, int len) +{ + memory_region_set_dirty(&s->vram_mem, addr, len); + + if (s->depth == 24) { + memory_region_set_dirty(&s->vram_mem, s->vram24_offset + addr * 4, + len * 4); + memory_region_set_dirty(&s->vram_mem, s->cplane_offset + addr * 4, + len * 4); + } +} + +static int tcx_check_dirty(TCXState *s, DirtyBitmapSnapshot *snap, + ram_addr_t addr, int len) +{ + int ret; + + ret = memory_region_snapshot_get_dirty(&s->vram_mem, snap, addr, len); + + if (s->depth == 24) { + ret |= memory_region_snapshot_get_dirty(&s->vram_mem, snap, + s->vram24_offset + addr * 4, len * 4); + ret |= memory_region_snapshot_get_dirty(&s->vram_mem, snap, + s->cplane_offset + addr * 4, len * 4); + } + + return ret; +} + +static void update_palette_entries(TCXState *s, int start, int end) +{ + int i; + + for (i = start; i < end; i++) { + s->palette[i] = rgb_to_pixel32(s->r[i], s->g[i], s->b[i]); + } + tcx_set_dirty(s, 0, memory_region_size(&s->vram_mem)); +} + +static void tcx_draw_line32(TCXState *s1, uint8_t *d, + const uint8_t *s, int width) +{ + int x; + uint8_t val; + uint32_t *p = (uint32_t *)d; + + for (x = 0; x < width; x++) { + val = *s++; + *p++ = s1->palette[val]; + } +} + +static void tcx_draw_cursor32(TCXState *s1, uint8_t *d, + int y, int width) +{ + int x, len; + uint32_t mask, bits; + uint32_t *p = (uint32_t *)d; + + y = y - s1->cursy; + mask = s1->cursmask[y]; + bits = s1->cursbits[y]; + len = MIN(width - s1->cursx, 32); + p = &p[s1->cursx]; + for (x = 0; x < len; x++) { + if (mask & 0x80000000) { + if (bits & 0x80000000) { + *p = s1->palette[259]; + } else { + *p = s1->palette[258]; + } + } + p++; + mask <<= 1; + bits <<= 1; + } +} + +/* + * XXX Could be much more optimal: + * detect if line/page/whole screen is in 24 bit mode + */ +static inline void tcx24_draw_line32(TCXState *s1, uint8_t *d, + const uint8_t *s, int width, + const uint32_t *cplane, + const uint32_t *s24) +{ + int x, r, g, b; + uint8_t val, *p8; + uint32_t *p = (uint32_t *)d; + uint32_t dval; + for(x = 0; x < width; x++, s++, s24++) { + if (be32_to_cpu(*cplane) & 0x03000000) { + /* 24-bit direct, BGR order */ + p8 = (uint8_t *)s24; + p8++; + b = *p8++; + g = *p8++; + r = *p8; + dval = rgb_to_pixel32(r, g, b); + } else { + /* 8-bit pseudocolor */ + val = *s; + dval = s1->palette[val]; + } + *p++ = dval; + cplane++; + } +} + +/* Fixed line length 1024 allows us to do nice tricks not possible on + VGA... */ + +static void tcx_update_display(void *opaque) +{ + TCXState *ts = opaque; + DisplaySurface *surface = qemu_console_surface(ts->con); + ram_addr_t page; + DirtyBitmapSnapshot *snap = NULL; + int y, y_start, dd, ds; + uint8_t *d, *s; + + assert(surface_bits_per_pixel(surface) == 32); + + page = 0; + y_start = -1; + d = surface_data(surface); + s = ts->vram; + dd = surface_stride(surface); + ds = 1024; + + snap = memory_region_snapshot_and_clear_dirty(&ts->vram_mem, 0x0, + memory_region_size(&ts->vram_mem), + DIRTY_MEMORY_VGA); + + for (y = 0; y < ts->height; y++, page += ds) { + if (tcx_check_dirty(ts, snap, page, ds)) { + if (y_start < 0) + y_start = y; + + tcx_draw_line32(ts, d, s, ts->width); + if (y >= ts->cursy && y < ts->cursy + 32 && ts->cursx < ts->width) { + tcx_draw_cursor32(ts, d, y, ts->width); + } + } else { + if (y_start >= 0) { + /* flush to display */ + dpy_gfx_update(ts->con, 0, y_start, + ts->width, y - y_start); + y_start = -1; + } + } + s += ds; + d += dd; + } + if (y_start >= 0) { + /* flush to display */ + dpy_gfx_update(ts->con, 0, y_start, + ts->width, y - y_start); + } + g_free(snap); +} + +static void tcx24_update_display(void *opaque) +{ + TCXState *ts = opaque; + DisplaySurface *surface = qemu_console_surface(ts->con); + ram_addr_t page; + DirtyBitmapSnapshot *snap = NULL; + int y, y_start, dd, ds; + uint8_t *d, *s; + uint32_t *cptr, *s24; + + assert(surface_bits_per_pixel(surface) == 32); + + page = 0; + y_start = -1; + d = surface_data(surface); + s = ts->vram; + s24 = ts->vram24; + cptr = ts->cplane; + dd = surface_stride(surface); + ds = 1024; + + snap = memory_region_snapshot_and_clear_dirty(&ts->vram_mem, 0x0, + memory_region_size(&ts->vram_mem), + DIRTY_MEMORY_VGA); + + for (y = 0; y < ts->height; y++, page += ds) { + if (tcx_check_dirty(ts, snap, page, ds)) { + if (y_start < 0) + y_start = y; + + tcx24_draw_line32(ts, d, s, ts->width, cptr, s24); + if (y >= ts->cursy && y < ts->cursy+32 && ts->cursx < ts->width) { + tcx_draw_cursor32(ts, d, y, ts->width); + } + } else { + if (y_start >= 0) { + /* flush to display */ + dpy_gfx_update(ts->con, 0, y_start, + ts->width, y - y_start); + y_start = -1; + } + } + d += dd; + s += ds; + cptr += ds; + s24 += ds; + } + if (y_start >= 0) { + /* flush to display */ + dpy_gfx_update(ts->con, 0, y_start, + ts->width, y - y_start); + } + g_free(snap); +} + +static void tcx_invalidate_display(void *opaque) +{ + TCXState *s = opaque; + + tcx_set_dirty(s, 0, memory_region_size(&s->vram_mem)); + qemu_console_resize(s->con, s->width, s->height); +} + +static void tcx24_invalidate_display(void *opaque) +{ + TCXState *s = opaque; + + tcx_set_dirty(s, 0, memory_region_size(&s->vram_mem)); + qemu_console_resize(s->con, s->width, s->height); +} + +static int vmstate_tcx_post_load(void *opaque, int version_id) +{ + TCXState *s = opaque; + + update_palette_entries(s, 0, 256); + tcx_set_dirty(s, 0, memory_region_size(&s->vram_mem)); + return 0; +} + +static const VMStateDescription vmstate_tcx = { + .name ="tcx", + .version_id = 4, + .minimum_version_id = 4, + .post_load = vmstate_tcx_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT16(height, TCXState), + VMSTATE_UINT16(width, TCXState), + VMSTATE_UINT16(depth, TCXState), + VMSTATE_BUFFER(r, TCXState), + VMSTATE_BUFFER(g, TCXState), + VMSTATE_BUFFER(b, TCXState), + VMSTATE_UINT8(dac_index, TCXState), + VMSTATE_UINT8(dac_state, TCXState), + VMSTATE_END_OF_LIST() + } +}; + +static void tcx_reset(DeviceState *d) +{ + TCXState *s = TCX(d); + + /* Initialize palette */ + memset(s->r, 0, 260); + memset(s->g, 0, 260); + memset(s->b, 0, 260); + s->r[255] = s->g[255] = s->b[255] = 255; + s->r[256] = s->g[256] = s->b[256] = 255; + s->r[258] = s->g[258] = s->b[258] = 255; + update_palette_entries(s, 0, 260); + memset(s->vram, 0, MAXX*MAXY); + memory_region_reset_dirty(&s->vram_mem, 0, MAXX * MAXY * (1 + 4 + 4), + DIRTY_MEMORY_VGA); + s->dac_index = 0; + s->dac_state = 0; + s->cursx = 0xf000; /* Put cursor off screen */ + s->cursy = 0xf000; +} + +static uint64_t tcx_dac_readl(void *opaque, hwaddr addr, + unsigned size) +{ + TCXState *s = opaque; + uint32_t val = 0; + + switch (s->dac_state) { + case 0: + val = s->r[s->dac_index] << 24; + s->dac_state++; + break; + case 1: + val = s->g[s->dac_index] << 24; + s->dac_state++; + break; + case 2: + val = s->b[s->dac_index] << 24; + s->dac_index = (s->dac_index + 1) & 0xff; /* Index autoincrement */ + /* fall through */ + default: + s->dac_state = 0; + break; + } + + return val; +} + +static void tcx_dac_writel(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + TCXState *s = opaque; + unsigned index; + + switch (addr) { + case 0: /* Address */ + s->dac_index = val >> 24; + s->dac_state = 0; + break; + case 4: /* Pixel colours */ + case 12: /* Overlay (cursor) colours */ + if (addr & 8) { + index = (s->dac_index & 3) + 256; + } else { + index = s->dac_index; + } + switch (s->dac_state) { + case 0: + s->r[index] = val >> 24; + update_palette_entries(s, index, index + 1); + s->dac_state++; + break; + case 1: + s->g[index] = val >> 24; + update_palette_entries(s, index, index + 1); + s->dac_state++; + break; + case 2: + s->b[index] = val >> 24; + update_palette_entries(s, index, index + 1); + s->dac_index = (s->dac_index + 1) & 0xff; /* Index autoincrement */ + /* fall through */ + default: + s->dac_state = 0; + break; + } + break; + default: /* Control registers */ + break; + } +} + +static const MemoryRegionOps tcx_dac_ops = { + .read = tcx_dac_readl, + .write = tcx_dac_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t tcx_stip_readl(void *opaque, hwaddr addr, + unsigned size) +{ + return 0; +} + +static void tcx_stip_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + TCXState *s = opaque; + int i; + uint32_t col; + + if (!(addr & 4)) { + s->tmpblit = val; + } else { + addr = (addr >> 3) & 0xfffff; + col = cpu_to_be32(s->tmpblit); + if (s->depth == 24) { + for (i = 0; i < 32; i++) { + if (val & 0x80000000) { + s->vram[addr + i] = s->tmpblit; + s->vram24[addr + i] = col; + } + val <<= 1; + } + } else { + for (i = 0; i < 32; i++) { + if (val & 0x80000000) { + s->vram[addr + i] = s->tmpblit; + } + val <<= 1; + } + } + tcx_set_dirty(s, addr, 32); + } +} + +static void tcx_rstip_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + TCXState *s = opaque; + int i; + uint32_t col; + + if (!(addr & 4)) { + s->tmpblit = val; + } else { + addr = (addr >> 3) & 0xfffff; + col = cpu_to_be32(s->tmpblit); + if (s->depth == 24) { + for (i = 0; i < 32; i++) { + if (val & 0x80000000) { + s->vram[addr + i] = s->tmpblit; + s->vram24[addr + i] = col; + s->cplane[addr + i] = col; + } + val <<= 1; + } + } else { + for (i = 0; i < 32; i++) { + if (val & 0x80000000) { + s->vram[addr + i] = s->tmpblit; + } + val <<= 1; + } + } + tcx_set_dirty(s, addr, 32); + } +} + +static const MemoryRegionOps tcx_stip_ops = { + .read = tcx_stip_readl, + .write = tcx_stip_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; + +static const MemoryRegionOps tcx_rstip_ops = { + .read = tcx_stip_readl, + .write = tcx_rstip_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; + +static uint64_t tcx_blit_readl(void *opaque, hwaddr addr, + unsigned size) +{ + return 0; +} + +static void tcx_blit_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + TCXState *s = opaque; + uint32_t adsr, len; + int i; + + if (!(addr & 4)) { + s->tmpblit = val; + } else { + addr = (addr >> 3) & 0xfffff; + adsr = val & 0xffffff; + len = ((val >> 24) & 0x1f) + 1; + if (adsr == 0xffffff) { + memset(&s->vram[addr], s->tmpblit, len); + if (s->depth == 24) { + val = s->tmpblit & 0xffffff; + val = cpu_to_be32(val); + for (i = 0; i < len; i++) { + s->vram24[addr + i] = val; + } + } + } else { + memcpy(&s->vram[addr], &s->vram[adsr], len); + if (s->depth == 24) { + memcpy(&s->vram24[addr], &s->vram24[adsr], len * 4); + } + } + tcx_set_dirty(s, addr, len); + } +} + +static void tcx_rblit_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + TCXState *s = opaque; + uint32_t adsr, len; + int i; + + if (!(addr & 4)) { + s->tmpblit = val; + } else { + addr = (addr >> 3) & 0xfffff; + adsr = val & 0xffffff; + len = ((val >> 24) & 0x1f) + 1; + if (adsr == 0xffffff) { + memset(&s->vram[addr], s->tmpblit, len); + if (s->depth == 24) { + val = s->tmpblit & 0xffffff; + val = cpu_to_be32(val); + for (i = 0; i < len; i++) { + s->vram24[addr + i] = val; + s->cplane[addr + i] = val; + } + } + } else { + memcpy(&s->vram[addr], &s->vram[adsr], len); + if (s->depth == 24) { + memcpy(&s->vram24[addr], &s->vram24[adsr], len * 4); + memcpy(&s->cplane[addr], &s->cplane[adsr], len * 4); + } + } + tcx_set_dirty(s, addr, len); + } +} + +static const MemoryRegionOps tcx_blit_ops = { + .read = tcx_blit_readl, + .write = tcx_blit_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; + +static const MemoryRegionOps tcx_rblit_ops = { + .read = tcx_blit_readl, + .write = tcx_rblit_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; + +static void tcx_invalidate_cursor_position(TCXState *s) +{ + int ymin, ymax, start, end; + + /* invalidate only near the cursor */ + ymin = s->cursy; + if (ymin >= s->height) { + return; + } + ymax = MIN(s->height, ymin + 32); + start = ymin * 1024; + end = ymax * 1024; + + tcx_set_dirty(s, start, end - start); +} + +static uint64_t tcx_thc_readl(void *opaque, hwaddr addr, + unsigned size) +{ + TCXState *s = opaque; + uint64_t val; + + if (addr == TCX_THC_MISC) { + val = s->thcmisc | 0x02000000; + } else { + val = 0; + } + return val; +} + +static void tcx_thc_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + TCXState *s = opaque; + + if (addr == TCX_THC_CURSXY) { + tcx_invalidate_cursor_position(s); + s->cursx = val >> 16; + s->cursy = val; + tcx_invalidate_cursor_position(s); + } else if (addr >= TCX_THC_CURSMASK && addr < TCX_THC_CURSMASK + 128) { + s->cursmask[(addr - TCX_THC_CURSMASK) >> 2] = val; + tcx_invalidate_cursor_position(s); + } else if (addr >= TCX_THC_CURSBITS && addr < TCX_THC_CURSBITS + 128) { + s->cursbits[(addr - TCX_THC_CURSBITS) >> 2] = val; + tcx_invalidate_cursor_position(s); + } else if (addr == TCX_THC_MISC) { + s->thcmisc = val; + } + +} + +static const MemoryRegionOps tcx_thc_ops = { + .read = tcx_thc_readl, + .write = tcx_thc_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t tcx_dummy_readl(void *opaque, hwaddr addr, + unsigned size) +{ + return 0; +} + +static void tcx_dummy_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + return; +} + +static const MemoryRegionOps tcx_dummy_ops = { + .read = tcx_dummy_readl, + .write = tcx_dummy_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const GraphicHwOps tcx_ops = { + .invalidate = tcx_invalidate_display, + .gfx_update = tcx_update_display, +}; + +static const GraphicHwOps tcx24_ops = { + .invalidate = tcx24_invalidate_display, + .gfx_update = tcx24_update_display, +}; + +static void tcx_initfn(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + TCXState *s = TCX(obj); + + memory_region_init_rom_nomigrate(&s->rom, obj, "tcx.prom", + FCODE_MAX_ROM_SIZE, &error_fatal); + sysbus_init_mmio(sbd, &s->rom); + + /* 2/STIP : Stippler */ + memory_region_init_io(&s->stip, obj, &tcx_stip_ops, s, "tcx.stip", + TCX_STIP_NREGS); + sysbus_init_mmio(sbd, &s->stip); + + /* 3/BLIT : Blitter */ + memory_region_init_io(&s->blit, obj, &tcx_blit_ops, s, "tcx.blit", + TCX_BLIT_NREGS); + sysbus_init_mmio(sbd, &s->blit); + + /* 5/RSTIP : Raw Stippler */ + memory_region_init_io(&s->rstip, obj, &tcx_rstip_ops, s, "tcx.rstip", + TCX_RSTIP_NREGS); + sysbus_init_mmio(sbd, &s->rstip); + + /* 6/RBLIT : Raw Blitter */ + memory_region_init_io(&s->rblit, obj, &tcx_rblit_ops, s, "tcx.rblit", + TCX_RBLIT_NREGS); + sysbus_init_mmio(sbd, &s->rblit); + + /* 7/TEC : ??? */ + memory_region_init_io(&s->tec, obj, &tcx_dummy_ops, s, "tcx.tec", + TCX_TEC_NREGS); + sysbus_init_mmio(sbd, &s->tec); + + /* 8/CMAP : DAC */ + memory_region_init_io(&s->dac, obj, &tcx_dac_ops, s, "tcx.dac", + TCX_DAC_NREGS); + sysbus_init_mmio(sbd, &s->dac); + + /* 9/THC : Cursor */ + memory_region_init_io(&s->thc, obj, &tcx_thc_ops, s, "tcx.thc", + TCX_THC_NREGS); + sysbus_init_mmio(sbd, &s->thc); + + /* 11/DHC : ??? */ + memory_region_init_io(&s->dhc, obj, &tcx_dummy_ops, s, "tcx.dhc", + TCX_DHC_NREGS); + sysbus_init_mmio(sbd, &s->dhc); + + /* 12/ALT : ??? */ + memory_region_init_io(&s->alt, obj, &tcx_dummy_ops, s, "tcx.alt", + TCX_ALT_NREGS); + sysbus_init_mmio(sbd, &s->alt); +} + +static void tcx_realizefn(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + TCXState *s = TCX(dev); + ram_addr_t vram_offset = 0; + int size, ret; + uint8_t *vram_base; + char *fcode_filename; + + memory_region_init_ram_nomigrate(&s->vram_mem, OBJECT(s), "tcx.vram", + s->vram_size * (1 + 4 + 4), &error_fatal); + vmstate_register_ram_global(&s->vram_mem); + memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA); + vram_base = memory_region_get_ram_ptr(&s->vram_mem); + + /* 10/ROM : FCode ROM */ + vmstate_register_ram_global(&s->rom); + fcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, TCX_ROM_FILE); + if (fcode_filename) { + ret = load_image_mr(fcode_filename, &s->rom); + g_free(fcode_filename); + if (ret < 0 || ret > FCODE_MAX_ROM_SIZE) { + warn_report("tcx: could not load prom '%s'", TCX_ROM_FILE); + } + } + + /* 0/DFB8 : 8-bit plane */ + s->vram = vram_base; + size = s->vram_size; + memory_region_init_alias(&s->vram_8bit, OBJECT(s), "tcx.vram.8bit", + &s->vram_mem, vram_offset, size); + sysbus_init_mmio(sbd, &s->vram_8bit); + vram_offset += size; + vram_base += size; + + /* 1/DFB24 : 24bit plane */ + size = s->vram_size * 4; + s->vram24 = (uint32_t *)vram_base; + s->vram24_offset = vram_offset; + memory_region_init_alias(&s->vram_24bit, OBJECT(s), "tcx.vram.24bit", + &s->vram_mem, vram_offset, size); + sysbus_init_mmio(sbd, &s->vram_24bit); + vram_offset += size; + vram_base += size; + + /* 4/RDFB32 : Raw Framebuffer */ + size = s->vram_size * 4; + s->cplane = (uint32_t *)vram_base; + s->cplane_offset = vram_offset; + memory_region_init_alias(&s->vram_cplane, OBJECT(s), "tcx.vram.cplane", + &s->vram_mem, vram_offset, size); + sysbus_init_mmio(sbd, &s->vram_cplane); + + /* 9/THC24bits : NetBSD writes here even with 8-bit display: dummy */ + if (s->depth == 8) { + memory_region_init_io(&s->thc24, OBJECT(s), &tcx_dummy_ops, s, + "tcx.thc24", TCX_THC_NREGS); + sysbus_init_mmio(sbd, &s->thc24); + } + + sysbus_init_irq(sbd, &s->irq); + + if (s->depth == 8) { + s->con = graphic_console_init(dev, 0, &tcx_ops, s); + } else { + s->con = graphic_console_init(dev, 0, &tcx24_ops, s); + } + s->thcmisc = 0; + + qemu_console_resize(s->con, s->width, s->height); +} + +static Property tcx_properties[] = { + DEFINE_PROP_UINT32("vram_size", TCXState, vram_size, -1), + DEFINE_PROP_UINT16("width", TCXState, width, -1), + DEFINE_PROP_UINT16("height", TCXState, height, -1), + DEFINE_PROP_UINT16("depth", TCXState, depth, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void tcx_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = tcx_realizefn; + dc->reset = tcx_reset; + dc->vmsd = &vmstate_tcx; + device_class_set_props(dc, tcx_properties); +} + +static const TypeInfo tcx_info = { + .name = TYPE_TCX, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(TCXState), + .instance_init = tcx_initfn, + .class_init = tcx_class_init, +}; + +static void tcx_register_types(void) +{ + type_register_static(&tcx_info); +} + +type_init(tcx_register_types) diff --git a/hw/display/trace-events b/hw/display/trace-events new file mode 100644 index 000000000..3a7a2c957 --- /dev/null +++ b/hw/display/trace-events @@ -0,0 +1,176 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# jazz_led.c +jazz_led_read(uint64_t addr, uint8_t val) "read addr=0x%"PRIx64": 0x%x" +jazz_led_write(uint64_t addr, uint8_t new) "write addr=0x%"PRIx64": 0x%x" + +# xenfb.c +xenfb_mouse_event(void *opaque, int dx, int dy, int dz, int button_state, int abs_pointer_wanted) "%p x %d y %d z %d bs 0x%x abs %d" +xenfb_key_event(void *opaque, int scancode, int button_state) "%p scancode %d bs 0x%x" +xenfb_input_connected(void *xendev, int abs_pointer_wanted) "%p abs %d" + +# g364fb.c +g364fb_read(uint64_t addr, uint32_t val) "read addr=0x%"PRIx64": 0x%x" +g364fb_write(uint64_t addr, uint32_t new) "write addr=0x%"PRIx64": 0x%x" + +# vmware_vga.c +vmware_value_read(uint32_t index, uint32_t value) "index %d, value 0x%x" +vmware_value_write(uint32_t index, uint32_t value) "index %d, value 0x%x" +vmware_palette_read(uint32_t index, uint32_t value) "index %d, value 0x%x" +vmware_palette_write(uint32_t index, uint32_t value) "index %d, value 0x%x" +vmware_scratch_read(uint32_t index, uint32_t value) "index %d, value 0x%x" +vmware_scratch_write(uint32_t index, uint32_t value) "index %d, value 0x%x" +vmware_setmode(uint32_t w, uint32_t h, uint32_t bpp) "%dx%d @ %d bpp" + +# virtio-gpu-base.c +virtio_gpu_features(bool virgl) "virgl %d" + +# virtio-gpu-3d.c +# virtio-gpu.c +virtio_gpu_cmd_get_display_info(void) "" +virtio_gpu_cmd_get_edid(uint32_t scanout) "scanout %d" +virtio_gpu_cmd_set_scanout(uint32_t id, uint32_t res, uint32_t w, uint32_t h, uint32_t x, uint32_t y) "id %d, res 0x%x, w %d, h %d, x %d, y %d" +virtio_gpu_cmd_set_scanout_blob(uint32_t id, uint32_t res, uint32_t w, uint32_t h, uint32_t x, uint32_t y) "id %d, res 0x%x, w %d, h %d, x %d, y %d" +virtio_gpu_cmd_res_create_2d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h) "res 0x%x, fmt 0x%x, w %d, h %d" +virtio_gpu_cmd_res_create_3d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h, uint32_t d) "res 0x%x, fmt 0x%x, w %d, h %d, d %d" +virtio_gpu_cmd_res_create_blob(uint32_t res, uint64_t size) "res 0x%x, size %" PRId64 +virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x" +virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x" +virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x" +virtio_gpu_cmd_res_xfer_toh_2d(uint32_t res) "res 0x%x" +virtio_gpu_cmd_res_xfer_toh_3d(uint32_t res) "res 0x%x" +virtio_gpu_cmd_res_xfer_fromh_3d(uint32_t res) "res 0x%x" +virtio_gpu_cmd_res_flush(uint32_t res, uint32_t w, uint32_t h, uint32_t x, uint32_t y) "res 0x%x, w %d, h %d, x %d, y %d" +virtio_gpu_cmd_ctx_create(uint32_t ctx, const char *name) "ctx 0x%x, name %s" +virtio_gpu_cmd_ctx_destroy(uint32_t ctx) "ctx 0x%x" +virtio_gpu_cmd_ctx_res_attach(uint32_t ctx, uint32_t res) "ctx 0x%x, res 0x%x" +virtio_gpu_cmd_ctx_res_detach(uint32_t ctx, uint32_t res) "ctx 0x%x, res 0x%x" +virtio_gpu_cmd_ctx_submit(uint32_t ctx, uint32_t size) "ctx 0x%x, size %d" +virtio_gpu_update_cursor(uint32_t scanout, uint32_t x, uint32_t y, const char *type, uint32_t res) "scanout %d, x %d, y %d, %s, res 0x%x" +virtio_gpu_fence_ctrl(uint64_t fence, uint32_t type) "fence 0x%" PRIx64 ", type 0x%x" +virtio_gpu_fence_resp(uint64_t fence) "fence 0x%" PRIx64 + +# qxl.c +disable qxl_interface_set_mm_time(int qid, uint32_t mm_time) "%d %d" +disable qxl_io_write_vga(int qid, const char *mode, uint32_t addr, uint32_t val) "%d %s addr=%u val=%u" +qxl_create_guest_primary(int qid, uint32_t width, uint32_t height, uint64_t mem, uint32_t format, uint32_t position) "%d %ux%u mem=0x%" PRIx64 " %u,%u" +qxl_create_guest_primary_rest(int qid, int32_t stride, uint32_t type, uint32_t flags) "%d %d,%d,%d" +qxl_destroy_primary(int qid) "%d" +qxl_enter_vga_mode(int qid) "%d" +qxl_exit_vga_mode(int qid) "%d" +qxl_hard_reset(int qid, int64_t loadvm) "%d loadvm=%"PRId64 +qxl_interface_async_complete_io(int qid, uint32_t current_async, void *cookie) "%d current=%d cookie=%p" +qxl_interface_attach_worker(int qid) "%d" +qxl_interface_get_init_info(int qid) "%d" +qxl_interface_set_compression_level(int qid, int64_t level) "%d %"PRId64 +qxl_interface_update_area_complete(int qid, uint32_t surface_id, uint32_t dirty_left, uint32_t dirty_right, uint32_t dirty_top, uint32_t dirty_bottom) "%d surface=%d [%d,%d,%d,%d]" +qxl_interface_update_area_complete_rest(int qid, uint32_t num_updated_rects) "%d #=%d" +qxl_interface_update_area_complete_overflow(int qid, int max) "%d max=%d" +qxl_interface_update_area_complete_schedule_bh(int qid, uint32_t num_dirty) "%d #dirty=%d" +qxl_io_destroy_primary_ignored(int qid, const char *mode) "%d %s" +qxl_io_log(int qid, const char *log_buf) "%d %s" +qxl_io_read_unexpected(int qid) "%d" +qxl_io_unexpected_vga_mode(int qid, uint64_t addr, uint64_t val, const char *desc) "%d 0x%"PRIx64"=%"PRIu64" (%s)" +qxl_io_write(int qid, const char *mode, uint64_t addr, const char *aname, uint64_t val, unsigned size, int async) "%d %s addr=%"PRIu64 " (%s) val=%"PRIu64" size=%u async=%d" +qxl_memslot_add_guest(int qid, uint32_t slot_id, uint64_t guest_start, uint64_t guest_end) "%d %u: guest phys 0x%"PRIx64 " - 0x%" PRIx64 +qxl_post_load(int qid, const char *mode) "%d %s" +qxl_pre_load(int qid) "%d" +qxl_pre_save(int qid) "%d" +qxl_reset_surfaces(int qid) "%d" +qxl_ring_command_check(int qid, const char *mode) "%d %s" +qxl_ring_command_get(int qid, const char *mode) "%d %s" +qxl_ring_command_req_notification(int qid) "%d" +qxl_ring_cursor_check(int qid, const char *mode) "%d %s" +qxl_ring_cursor_get(int qid, const char *mode) "%d %s" +qxl_ring_cursor_req_notification(int qid) "%d" +qxl_ring_res_push(int qid, const char *mode, uint32_t surface_count, uint32_t free_res, void *last_release, const char *notify) "%d %s s#=%d res#=%d last=%p notify=%s" +qxl_ring_res_push_rest(int qid, uint32_t ring_has, uint32_t ring_size, uint32_t prod, uint32_t cons) "%d ring %d/%d [%d,%d]" +qxl_ring_res_put(int qid, uint32_t free_res) "%d #res=%d" +qxl_set_mode(int qid, int modenr, uint32_t x_res, uint32_t y_res, uint32_t bits, uint64_t devmem) "%d mode=%d [ x=%d y=%d @ bpp=%d devmem=0x%" PRIx64 " ]" +qxl_soft_reset(int qid) "%d" +qxl_spice_destroy_surfaces_complete(int qid) "%d" +qxl_spice_destroy_surfaces(int qid, int async) "%d async=%d" +qxl_spice_destroy_surface_wait_complete(int qid, uint32_t id) "%d sid=%d" +qxl_spice_destroy_surface_wait(int qid, uint32_t id, int async) "%d sid=%d async=%d" +qxl_spice_flush_surfaces_async(int qid, uint32_t surface_count, uint32_t num_free_res) "%d s#=%d, res#=%d" +qxl_spice_monitors_config(int qid) "%d" +qxl_spice_loadvm_commands(int qid, void *ext, uint32_t count) "%d ext=%p count=%d" +qxl_spice_oom(int qid) "%d" +qxl_spice_reset_cursor(int qid) "%d" +qxl_spice_reset_image_cache(int qid) "%d" +qxl_spice_reset_memslots(int qid) "%d" +qxl_spice_update_area(int qid, uint32_t surface_id, uint32_t left, uint32_t right, uint32_t top, uint32_t bottom) "%d sid=%d [%d,%d,%d,%d]" +qxl_spice_update_area_rest(int qid, uint32_t num_dirty_rects, uint32_t clear_dirty_region) "%d #d=%d clear=%d" +qxl_surfaces_dirty(int qid, uint64_t offset, uint64_t size) "%d offset=0x%"PRIx64" size=0x%"PRIx64 +qxl_send_events(int qid, uint32_t events) "%d %d" +qxl_send_events_vm_stopped(int qid, uint32_t events) "%d %d" +qxl_set_guest_bug(int qid) "%d" +qxl_interrupt_client_monitors_config(int qid, int num_heads, void *heads) "%d %d %p" +qxl_client_monitors_config_unsupported_by_guest(int qid, uint32_t int_mask, void *client_monitors_config) "%d 0x%X %p" +qxl_client_monitors_config_unsupported_by_device(int qid, int revision) "%d revision=%d" +qxl_client_monitors_config_capped(int qid, int requested, int limit) "%d %d %d" +qxl_client_monitors_config_crc(int qid, unsigned size, uint32_t crc32) "%d %u %u" +qxl_set_client_capabilities_unsupported_by_revision(int qid, int revision) "%d revision=%d" + +# qxl-render.c +qxl_render_blit(int32_t stride, int32_t left, int32_t right, int32_t top, int32_t bottom) "stride=%d [%d, %d, %d, %d]" +qxl_render_guest_primary_resized(int32_t width, int32_t height, int32_t stride, int32_t bytes_pp, int32_t bits_pp) "%dx%d, stride %d, bpp %d, depth %d" +qxl_render_update_area_done(void *cookie) "%p" + +# vga.c +vga_std_read_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x" +vga_std_write_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x" +vga_vbe_read(uint32_t index, uint32_t val) "index 0x%x, val 0x%x" +vga_vbe_write(uint32_t index, uint32_t val) "index 0x%x, val 0x%x" + +# cirrus_vga.c +vga_cirrus_read_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x" +vga_cirrus_write_io(uint32_t addr, uint32_t val) "addr 0x%x, val 0x%x" +vga_cirrus_write_blt(uint32_t offset, uint32_t val) "offset 0x%x, val 0x%x" +vga_cirrus_write_gr(uint8_t index, uint8_t val) "GR addr 0x%02x, val 0x%02x" +vga_cirrus_bitblt_start(uint8_t blt_rop, uint8_t blt_mode, uint8_t blt_modeext, int blt_width, int blt_height, int blt_dstpitch, int blt_srcpitch, uint32_t blt_dstaddr, uint32_t blt_srcaddr, uint8_t gr_val) "rop=0x%02x mode=0x%02x modeext=0x%02x w=%d h=%d dpitch=%d spitch=%d daddr=0x%08"PRIx32" saddr=0x%08"PRIx32" writemask=0x%02x" + +# sii9022.c +sii9022_read_reg(uint8_t addr, uint8_t val) "addr 0x%02x, val 0x%02x" +sii9022_write_reg(uint8_t addr, uint8_t val) "addr 0x%02x, val 0x%02x" +sii9022_switch_mode(const char *mode) "mode: %s" + +# ati.c +ati_mm_read(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 " %s -> 0x%"PRIx64 +ati_mm_write(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 " %s <- 0x%"PRIx64 + +# artist.c +artist_reg_read(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 "%s -> 0x%"PRIx64 +artist_reg_write(unsigned int size, uint64_t addr, const char *name, uint64_t val) "%u 0x%"PRIx64 "%s <- 0x%"PRIx64 +artist_vram_read(unsigned int size, uint64_t addr, int posx, int posy, uint64_t val) "%u 0x%"PRIx64 " %ux%u-> 0x%"PRIx64 +artist_vram_write(unsigned int size, uint64_t addr, uint64_t val) "%u 0x%"PRIx64 " <- 0x%"PRIx64 +artist_fill_window(unsigned int start_x, unsigned int start_y, unsigned int width, unsigned int height, uint32_t op, uint32_t ctlpln) "start=%ux%u length=%ux%u op=0x%08x ctlpln=0x%08x" +artist_block_move(unsigned int start_x, unsigned int start_y, unsigned int dest_x, unsigned int dest_y, unsigned int width, unsigned int height) "source %ux%u -> dest %ux%u size %ux%u" +artist_draw_line(unsigned int start_x, unsigned int start_y, unsigned int end_x, unsigned int end_y) "%ux%u %ux%u" + +# cg3.c +cg3_read(uint32_t addr, uint32_t val, unsigned size) "read addr:0x%06"PRIx32" val:0x%08"PRIx32" size:%u" +cg3_write(uint32_t addr, uint32_t val, unsigned size) "write addr:0x%06"PRIx32" val:0x%08"PRIx32" size:%u" + +# dpcd.c +dpcd_read(uint32_t addr, uint8_t val) "read addr:0x%"PRIx32" val:0x%02x" +dpcd_write(uint32_t addr, uint8_t val) "write addr:0x%"PRIx32" val:0x%02x" + +# sm501.c +sm501_system_config_read(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" +sm501_system_config_write(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" +sm501_i2c_read(uint32_t addr, uint8_t val) "addr=0x%x, val=0x%x" +sm501_i2c_write(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" +sm501_palette_read(uint32_t addr) "addr=0x%x" +sm501_palette_write(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" +sm501_disp_ctrl_read(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" +sm501_disp_ctrl_write(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" +sm501_2d_engine_read(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" +sm501_2d_engine_write(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" + +# macfb.c +macfb_ctrl_read(uint64_t addr, uint64_t value, unsigned int size) "addr 0x%"PRIx64 " value 0x%"PRIx64 " size %u" +macfb_ctrl_write(uint64_t addr, uint64_t value, unsigned int size) "addr 0x%"PRIx64 " value 0x%"PRIx64 " size %u" +macfb_sense_read(uint32_t value) "video sense: 0x%"PRIx32 +macfb_sense_write(uint32_t value) "video sense: 0x%"PRIx32 +macfb_update_mode(uint32_t width, uint32_t height, uint8_t depth) "setting mode to width %"PRId32 " height %"PRId32 " size %d" diff --git a/hw/display/trace.h b/hw/display/trace.h new file mode 100644 index 000000000..4ed0e9165 --- /dev/null +++ b/hw/display/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_display.h" diff --git a/hw/display/vga-access.h b/hw/display/vga-access.h new file mode 100644 index 000000000..c0fbd9958 --- /dev/null +++ b/hw/display/vga-access.h @@ -0,0 +1,49 @@ +/* + * QEMU VGA Emulator templates + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +static inline uint8_t vga_read_byte(VGACommonState *vga, uint32_t addr) +{ + return vga->vram_ptr[addr & vga->vbe_size_mask]; +} + +static inline uint16_t vga_read_word_le(VGACommonState *vga, uint32_t addr) +{ + uint32_t offset = addr & vga->vbe_size_mask & ~1; + uint16_t *ptr = (uint16_t *)(vga->vram_ptr + offset); + return lduw_le_p(ptr); +} + +static inline uint16_t vga_read_word_be(VGACommonState *vga, uint32_t addr) +{ + uint32_t offset = addr & vga->vbe_size_mask & ~1; + uint16_t *ptr = (uint16_t *)(vga->vram_ptr + offset); + return lduw_be_p(ptr); +} + +static inline uint32_t vga_read_dword_le(VGACommonState *vga, uint32_t addr) +{ + uint32_t offset = addr & vga->vbe_size_mask & ~3; + uint32_t *ptr = (uint32_t *)(vga->vram_ptr + offset); + return ldl_le_p(ptr); +} diff --git a/hw/display/vga-helpers.h b/hw/display/vga-helpers.h new file mode 100644 index 000000000..10e9cfd40 --- /dev/null +++ b/hw/display/vga-helpers.h @@ -0,0 +1,431 @@ +/* + * QEMU VGA Emulator templates + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +static inline void vga_draw_glyph_line(uint8_t *d, uint32_t font_data, + uint32_t xorcol, uint32_t bgcol) +{ + ((uint32_t *)d)[0] = (-((font_data >> 7)) & xorcol) ^ bgcol; + ((uint32_t *)d)[1] = (-((font_data >> 6) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[2] = (-((font_data >> 5) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[3] = (-((font_data >> 4) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[4] = (-((font_data >> 3) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[5] = (-((font_data >> 2) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[6] = (-((font_data >> 1) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[7] = (-((font_data >> 0) & 1) & xorcol) ^ bgcol; +} + +static void vga_draw_glyph8(uint8_t *d, int linesize, + const uint8_t *font_ptr, int h, + uint32_t fgcol, uint32_t bgcol) +{ + uint32_t font_data, xorcol; + + xorcol = bgcol ^ fgcol; + do { + font_data = font_ptr[0]; + vga_draw_glyph_line(d, font_data, xorcol, bgcol); + font_ptr += 4; + d += linesize; + } while (--h); +} + +static void vga_draw_glyph16(uint8_t *d, int linesize, + const uint8_t *font_ptr, int h, + uint32_t fgcol, uint32_t bgcol) +{ + uint32_t font_data, xorcol; + + xorcol = bgcol ^ fgcol; + do { + font_data = font_ptr[0]; + vga_draw_glyph_line(d, expand4to8[font_data >> 4], + xorcol, bgcol); + vga_draw_glyph_line(d + 32, expand4to8[font_data & 0x0f], + xorcol, bgcol); + font_ptr += 4; + d += linesize; + } while (--h); +} + +static void vga_draw_glyph9(uint8_t *d, int linesize, + const uint8_t *font_ptr, int h, + uint32_t fgcol, uint32_t bgcol, int dup9) +{ + uint32_t font_data, xorcol, v; + + xorcol = bgcol ^ fgcol; + do { + font_data = font_ptr[0]; + ((uint32_t *)d)[0] = (-((font_data >> 7)) & xorcol) ^ bgcol; + ((uint32_t *)d)[1] = (-((font_data >> 6) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[2] = (-((font_data >> 5) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[3] = (-((font_data >> 4) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[4] = (-((font_data >> 3) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[5] = (-((font_data >> 2) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[6] = (-((font_data >> 1) & 1) & xorcol) ^ bgcol; + v = (-((font_data >> 0) & 1) & xorcol) ^ bgcol; + ((uint32_t *)d)[7] = v; + if (dup9) + ((uint32_t *)d)[8] = v; + else + ((uint32_t *)d)[8] = bgcol; + font_ptr += 4; + d += linesize; + } while (--h); +} + +/* + * 4 color mode + */ +static void vga_draw_line2(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + uint32_t plane_mask, *palette, data, v; + int x; + + palette = vga->last_palette; + plane_mask = mask16[vga->ar[VGA_ATC_PLANE_ENABLE] & 0xf]; + width >>= 3; + for(x = 0; x < width; x++) { + data = vga_read_dword_le(vga, addr); + data &= plane_mask; + v = expand2[GET_PLANE(data, 0)]; + v |= expand2[GET_PLANE(data, 2)] << 2; + ((uint32_t *)d)[0] = palette[v >> 12]; + ((uint32_t *)d)[1] = palette[(v >> 8) & 0xf]; + ((uint32_t *)d)[2] = palette[(v >> 4) & 0xf]; + ((uint32_t *)d)[3] = palette[(v >> 0) & 0xf]; + + v = expand2[GET_PLANE(data, 1)]; + v |= expand2[GET_PLANE(data, 3)] << 2; + ((uint32_t *)d)[4] = palette[v >> 12]; + ((uint32_t *)d)[5] = palette[(v >> 8) & 0xf]; + ((uint32_t *)d)[6] = palette[(v >> 4) & 0xf]; + ((uint32_t *)d)[7] = palette[(v >> 0) & 0xf]; + d += 32; + addr += 4; + } +} + +#define PUT_PIXEL2(d, n, v) \ +((uint32_t *)d)[2*(n)] = ((uint32_t *)d)[2*(n)+1] = (v) + +/* + * 4 color mode, dup2 horizontal + */ +static void vga_draw_line2d2(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + uint32_t plane_mask, *palette, data, v; + int x; + + palette = vga->last_palette; + plane_mask = mask16[vga->ar[VGA_ATC_PLANE_ENABLE] & 0xf]; + width >>= 3; + for(x = 0; x < width; x++) { + data = vga_read_dword_le(vga, addr); + data &= plane_mask; + v = expand2[GET_PLANE(data, 0)]; + v |= expand2[GET_PLANE(data, 2)] << 2; + PUT_PIXEL2(d, 0, palette[v >> 12]); + PUT_PIXEL2(d, 1, palette[(v >> 8) & 0xf]); + PUT_PIXEL2(d, 2, palette[(v >> 4) & 0xf]); + PUT_PIXEL2(d, 3, palette[(v >> 0) & 0xf]); + + v = expand2[GET_PLANE(data, 1)]; + v |= expand2[GET_PLANE(data, 3)] << 2; + PUT_PIXEL2(d, 4, palette[v >> 12]); + PUT_PIXEL2(d, 5, palette[(v >> 8) & 0xf]); + PUT_PIXEL2(d, 6, palette[(v >> 4) & 0xf]); + PUT_PIXEL2(d, 7, palette[(v >> 0) & 0xf]); + d += 64; + addr += 4; + } +} + +/* + * 16 color mode + */ +static void vga_draw_line4(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + uint32_t plane_mask, data, v, *palette; + int x; + + palette = vga->last_palette; + plane_mask = mask16[vga->ar[VGA_ATC_PLANE_ENABLE] & 0xf]; + width >>= 3; + for(x = 0; x < width; x++) { + data = vga_read_dword_le(vga, addr); + data &= plane_mask; + v = expand4[GET_PLANE(data, 0)]; + v |= expand4[GET_PLANE(data, 1)] << 1; + v |= expand4[GET_PLANE(data, 2)] << 2; + v |= expand4[GET_PLANE(data, 3)] << 3; + ((uint32_t *)d)[0] = palette[v >> 28]; + ((uint32_t *)d)[1] = palette[(v >> 24) & 0xf]; + ((uint32_t *)d)[2] = palette[(v >> 20) & 0xf]; + ((uint32_t *)d)[3] = palette[(v >> 16) & 0xf]; + ((uint32_t *)d)[4] = palette[(v >> 12) & 0xf]; + ((uint32_t *)d)[5] = palette[(v >> 8) & 0xf]; + ((uint32_t *)d)[6] = palette[(v >> 4) & 0xf]; + ((uint32_t *)d)[7] = palette[(v >> 0) & 0xf]; + d += 32; + addr += 4; + } +} + +/* + * 16 color mode, dup2 horizontal + */ +static void vga_draw_line4d2(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + uint32_t plane_mask, data, v, *palette; + int x; + + palette = vga->last_palette; + plane_mask = mask16[vga->ar[VGA_ATC_PLANE_ENABLE] & 0xf]; + width >>= 3; + for(x = 0; x < width; x++) { + data = vga_read_dword_le(vga, addr); + data &= plane_mask; + v = expand4[GET_PLANE(data, 0)]; + v |= expand4[GET_PLANE(data, 1)] << 1; + v |= expand4[GET_PLANE(data, 2)] << 2; + v |= expand4[GET_PLANE(data, 3)] << 3; + PUT_PIXEL2(d, 0, palette[v >> 28]); + PUT_PIXEL2(d, 1, palette[(v >> 24) & 0xf]); + PUT_PIXEL2(d, 2, palette[(v >> 20) & 0xf]); + PUT_PIXEL2(d, 3, palette[(v >> 16) & 0xf]); + PUT_PIXEL2(d, 4, palette[(v >> 12) & 0xf]); + PUT_PIXEL2(d, 5, palette[(v >> 8) & 0xf]); + PUT_PIXEL2(d, 6, palette[(v >> 4) & 0xf]); + PUT_PIXEL2(d, 7, palette[(v >> 0) & 0xf]); + d += 64; + addr += 4; + } +} + +/* + * 256 color mode, double pixels + * + * XXX: add plane_mask support (never used in standard VGA modes) + */ +static void vga_draw_line8d2(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + uint32_t *palette; + int x; + + palette = vga->last_palette; + width >>= 3; + for(x = 0; x < width; x++) { + PUT_PIXEL2(d, 0, palette[vga_read_byte(vga, addr + 0)]); + PUT_PIXEL2(d, 1, palette[vga_read_byte(vga, addr + 1)]); + PUT_PIXEL2(d, 2, palette[vga_read_byte(vga, addr + 2)]); + PUT_PIXEL2(d, 3, palette[vga_read_byte(vga, addr + 3)]); + d += 32; + addr += 4; + } +} + +/* + * standard 256 color mode + * + * XXX: add plane_mask support (never used in standard VGA modes) + */ +static void vga_draw_line8(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + uint32_t *palette; + int x; + + palette = vga->last_palette; + width >>= 3; + for(x = 0; x < width; x++) { + ((uint32_t *)d)[0] = palette[vga_read_byte(vga, addr + 0)]; + ((uint32_t *)d)[1] = palette[vga_read_byte(vga, addr + 1)]; + ((uint32_t *)d)[2] = palette[vga_read_byte(vga, addr + 2)]; + ((uint32_t *)d)[3] = palette[vga_read_byte(vga, addr + 3)]; + ((uint32_t *)d)[4] = palette[vga_read_byte(vga, addr + 4)]; + ((uint32_t *)d)[5] = palette[vga_read_byte(vga, addr + 5)]; + ((uint32_t *)d)[6] = palette[vga_read_byte(vga, addr + 6)]; + ((uint32_t *)d)[7] = palette[vga_read_byte(vga, addr + 7)]; + d += 32; + addr += 8; + } +} + +/* + * 15 bit color + */ +static void vga_draw_line15_le(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + int w; + uint32_t v, r, g, b; + + w = width; + do { + v = vga_read_word_le(vga, addr); + r = (v >> 7) & 0xf8; + g = (v >> 2) & 0xf8; + b = (v << 3) & 0xf8; + ((uint32_t *)d)[0] = rgb_to_pixel32(r, g, b); + addr += 2; + d += 4; + } while (--w != 0); +} + +static void vga_draw_line15_be(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + int w; + uint32_t v, r, g, b; + + w = width; + do { + v = vga_read_word_be(vga, addr); + r = (v >> 7) & 0xf8; + g = (v >> 2) & 0xf8; + b = (v << 3) & 0xf8; + ((uint32_t *)d)[0] = rgb_to_pixel32(r, g, b); + addr += 2; + d += 4; + } while (--w != 0); +} + +/* + * 16 bit color + */ +static void vga_draw_line16_le(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + int w; + uint32_t v, r, g, b; + + w = width; + do { + v = vga_read_word_le(vga, addr); + r = (v >> 8) & 0xf8; + g = (v >> 3) & 0xfc; + b = (v << 3) & 0xf8; + ((uint32_t *)d)[0] = rgb_to_pixel32(r, g, b); + addr += 2; + d += 4; + } while (--w != 0); +} + +static void vga_draw_line16_be(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + int w; + uint32_t v, r, g, b; + + w = width; + do { + v = vga_read_word_be(vga, addr); + r = (v >> 8) & 0xf8; + g = (v >> 3) & 0xfc; + b = (v << 3) & 0xf8; + ((uint32_t *)d)[0] = rgb_to_pixel32(r, g, b); + addr += 2; + d += 4; + } while (--w != 0); +} + +/* + * 24 bit color + */ +static void vga_draw_line24_le(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + int w; + uint32_t r, g, b; + + w = width; + do { + b = vga_read_byte(vga, addr + 0); + g = vga_read_byte(vga, addr + 1); + r = vga_read_byte(vga, addr + 2); + ((uint32_t *)d)[0] = rgb_to_pixel32(r, g, b); + addr += 3; + d += 4; + } while (--w != 0); +} + +static void vga_draw_line24_be(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + int w; + uint32_t r, g, b; + + w = width; + do { + r = vga_read_byte(vga, addr + 0); + g = vga_read_byte(vga, addr + 1); + b = vga_read_byte(vga, addr + 2); + ((uint32_t *)d)[0] = rgb_to_pixel32(r, g, b); + addr += 3; + d += 4; + } while (--w != 0); +} + +/* + * 32 bit color + */ +static void vga_draw_line32_le(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + int w; + uint32_t r, g, b; + + w = width; + do { + b = vga_read_byte(vga, addr + 0); + g = vga_read_byte(vga, addr + 1); + r = vga_read_byte(vga, addr + 2); + ((uint32_t *)d)[0] = rgb_to_pixel32(r, g, b); + addr += 4; + d += 4; + } while (--w != 0); +} + +static void vga_draw_line32_be(VGACommonState *vga, uint8_t *d, + uint32_t addr, int width) +{ + int w; + uint32_t r, g, b; + + w = width; + do { + r = vga_read_byte(vga, addr + 1); + g = vga_read_byte(vga, addr + 2); + b = vga_read_byte(vga, addr + 3); + ((uint32_t *)d)[0] = rgb_to_pixel32(r, g, b); + addr += 4; + d += 4; + } while (--w != 0); +} diff --git a/hw/display/vga-isa-mm.c b/hw/display/vga-isa-mm.c new file mode 100644 index 000000000..7321b7a06 --- /dev/null +++ b/hw/display/vga-isa-mm.c @@ -0,0 +1,114 @@ +/* + * QEMU ISA MM VGA Emulator. + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/units.h" +#include "migration/vmstate.h" +#include "hw/display/vga.h" +#include "vga_int.h" +#include "ui/pixel_ops.h" + +#define VGA_RAM_SIZE (8 * MiB) + +typedef struct ISAVGAMMState { + VGACommonState vga; + int it_shift; +} ISAVGAMMState; + +/* Memory mapped interface */ +static uint64_t vga_mm_read(void *opaque, hwaddr addr, unsigned size) +{ + ISAVGAMMState *s = opaque; + + return vga_ioport_read(&s->vga, addr >> s->it_shift) & + MAKE_64BIT_MASK(0, size * 8); +} + +static void vga_mm_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + ISAVGAMMState *s = opaque; + + vga_ioport_write(&s->vga, addr >> s->it_shift, + value & MAKE_64BIT_MASK(0, size * 8)); +} + +static const MemoryRegionOps vga_mm_ctrl_ops = { + .read = vga_mm_read, + .write = vga_mm_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void vga_mm_init(ISAVGAMMState *s, hwaddr vram_base, + hwaddr ctrl_base, int it_shift, + MemoryRegion *address_space) +{ + MemoryRegion *s_ioport_ctrl, *vga_io_memory; + + s->it_shift = it_shift; + s_ioport_ctrl = g_malloc(sizeof(*s_ioport_ctrl)); + memory_region_init_io(s_ioport_ctrl, NULL, &vga_mm_ctrl_ops, s, + "vga-mm-ctrl", 0x100000); + memory_region_set_flush_coalesced(s_ioport_ctrl); + + vga_io_memory = g_malloc(sizeof(*vga_io_memory)); + /* XXX: endianness? */ + memory_region_init_io(vga_io_memory, NULL, &vga_mem_ops, &s->vga, + "vga-mem", 0x20000); + + vmstate_register(NULL, 0, &vmstate_vga_common, s); + + memory_region_add_subregion(address_space, ctrl_base, s_ioport_ctrl); + s->vga.bank_offset = 0; + memory_region_add_subregion(address_space, + vram_base + 0x000a0000, vga_io_memory); + memory_region_set_coalescing(vga_io_memory); +} + +int isa_vga_mm_init(hwaddr vram_base, + hwaddr ctrl_base, int it_shift, + MemoryRegion *address_space) +{ + ISAVGAMMState *s; + + s = g_malloc0(sizeof(*s)); + + s->vga.vram_size_mb = VGA_RAM_SIZE / MiB; + s->vga.global_vmstate = true; + vga_common_init(&s->vga, NULL); + vga_mm_init(s, vram_base, ctrl_base, it_shift, address_space); + + s->vga.con = graphic_console_init(NULL, 0, s->vga.hw_ops, s); + + memory_region_add_subregion(address_space, + VBE_DISPI_LFB_PHYSICAL_ADDRESS, + &s->vga.vram); + + return 0; +} diff --git a/hw/display/vga-isa.c b/hw/display/vga-isa.c new file mode 100644 index 000000000..90851e730 --- /dev/null +++ b/hw/display/vga-isa.c @@ -0,0 +1,115 @@ +/* + * QEMU ISA VGA Emulator. + * + * see docs/specs/standard-vga.txt for virtual hardware specs. + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "vga_int.h" +#include "ui/pixel_ops.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +#define TYPE_ISA_VGA "isa-vga" +OBJECT_DECLARE_SIMPLE_TYPE(ISAVGAState, ISA_VGA) + +struct ISAVGAState { + ISADevice parent_obj; + + struct VGACommonState state; + PortioList portio_vga; + PortioList portio_vbe; +}; + +static void vga_isa_reset(DeviceState *dev) +{ + ISAVGAState *d = ISA_VGA(dev); + VGACommonState *s = &d->state; + + vga_common_reset(s); +} + +static void vga_isa_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + ISAVGAState *d = ISA_VGA(dev); + VGACommonState *s = &d->state; + MemoryRegion *vga_io_memory; + const MemoryRegionPortio *vga_ports, *vbe_ports; + + s->global_vmstate = true; + vga_common_init(s, OBJECT(dev)); + s->legacy_address_space = isa_address_space(isadev); + vga_io_memory = vga_init_io(s, OBJECT(dev), &vga_ports, &vbe_ports); + isa_register_portio_list(isadev, &d->portio_vga, + 0x3b0, vga_ports, s, "vga"); + if (vbe_ports) { + isa_register_portio_list(isadev, &d->portio_vbe, + 0x1ce, vbe_ports, s, "vbe"); + } + memory_region_add_subregion_overlap(isa_address_space(isadev), + 0x000a0000, + vga_io_memory, 1); + memory_region_set_coalescing(vga_io_memory); + s->con = graphic_console_init(dev, 0, s->hw_ops, s); + + memory_region_add_subregion(isa_address_space(isadev), + VBE_DISPI_LFB_PHYSICAL_ADDRESS, + &s->vram); + /* ROM BIOS */ + rom_add_vga(VGABIOS_FILENAME); +} + +static Property vga_isa_properties[] = { + DEFINE_PROP_UINT32("vgamem_mb", ISAVGAState, state.vram_size_mb, 8), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vga_isa_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = vga_isa_realizefn; + dc->reset = vga_isa_reset; + dc->vmsd = &vmstate_vga_common; + device_class_set_props(dc, vga_isa_properties); + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); +} + +static const TypeInfo vga_isa_info = { + .name = TYPE_ISA_VGA, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISAVGAState), + .class_init = vga_isa_class_initfn, +}; + +static void vga_isa_register_types(void) +{ + type_register_static(&vga_isa_info); +} + +type_init(vga_isa_register_types) diff --git a/hw/display/vga-pci.c b/hw/display/vga-pci.c new file mode 100644 index 000000000..62fb5c38c --- /dev/null +++ b/hw/display/vga-pci.c @@ -0,0 +1,420 @@ +/* + * QEMU PCI VGA Emulator. + * + * see docs/specs/standard-vga.txt for virtual hardware specs. + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "vga_int.h" +#include "ui/pixel_ops.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/loader.h" +#include "hw/display/edid.h" +#include "qom/object.h" + +enum vga_pci_flags { + PCI_VGA_FLAG_ENABLE_MMIO = 1, + PCI_VGA_FLAG_ENABLE_QEXT = 2, + PCI_VGA_FLAG_ENABLE_EDID = 3, +}; + +struct PCIVGAState { + PCIDevice dev; + VGACommonState vga; + uint32_t flags; + qemu_edid_info edid_info; + MemoryRegion mmio; + MemoryRegion mrs[4]; + uint8_t edid[384]; +}; + +#define TYPE_PCI_VGA "pci-vga" +OBJECT_DECLARE_SIMPLE_TYPE(PCIVGAState, PCI_VGA) + +static const VMStateDescription vmstate_vga_pci = { + .name = "vga", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PCIVGAState), + VMSTATE_STRUCT(vga, PCIVGAState, 0, vmstate_vga_common, VGACommonState), + VMSTATE_END_OF_LIST() + } +}; + +static uint64_t pci_vga_ioport_read(void *ptr, hwaddr addr, + unsigned size) +{ + VGACommonState *s = ptr; + uint64_t ret = 0; + + switch (size) { + case 1: + ret = vga_ioport_read(s, addr + 0x3c0); + break; + case 2: + ret = vga_ioport_read(s, addr + 0x3c0); + ret |= vga_ioport_read(s, addr + 0x3c1) << 8; + break; + } + return ret; +} + +static void pci_vga_ioport_write(void *ptr, hwaddr addr, + uint64_t val, unsigned size) +{ + VGACommonState *s = ptr; + + switch (size) { + case 1: + vga_ioport_write(s, addr + 0x3c0, val); + break; + case 2: + /* + * Update bytes in little endian order. Allows to update + * indexed registers with a single word write because the + * index byte is updated first. + */ + vga_ioport_write(s, addr + 0x3c0, val & 0xff); + vga_ioport_write(s, addr + 0x3c1, (val >> 8) & 0xff); + break; + } +} + +static const MemoryRegionOps pci_vga_ioport_ops = { + .read = pci_vga_ioport_read, + .write = pci_vga_ioport_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 2, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t pci_vga_bochs_read(void *ptr, hwaddr addr, + unsigned size) +{ + VGACommonState *s = ptr; + int index = addr >> 1; + + vbe_ioport_write_index(s, 0, index); + return vbe_ioport_read_data(s, 0); +} + +static void pci_vga_bochs_write(void *ptr, hwaddr addr, + uint64_t val, unsigned size) +{ + VGACommonState *s = ptr; + int index = addr >> 1; + + vbe_ioport_write_index(s, 0, index); + vbe_ioport_write_data(s, 0, val); +} + +static const MemoryRegionOps pci_vga_bochs_ops = { + .read = pci_vga_bochs_read, + .write = pci_vga_bochs_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 2, + .impl.max_access_size = 2, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t pci_vga_qext_read(void *ptr, hwaddr addr, unsigned size) +{ + VGACommonState *s = ptr; + + switch (addr) { + case PCI_VGA_QEXT_REG_SIZE: + return PCI_VGA_QEXT_SIZE; + case PCI_VGA_QEXT_REG_BYTEORDER: + return s->big_endian_fb ? + PCI_VGA_QEXT_BIG_ENDIAN : PCI_VGA_QEXT_LITTLE_ENDIAN; + default: + return 0; + } +} + +static void pci_vga_qext_write(void *ptr, hwaddr addr, + uint64_t val, unsigned size) +{ + VGACommonState *s = ptr; + + switch (addr) { + case PCI_VGA_QEXT_REG_BYTEORDER: + if (val == PCI_VGA_QEXT_BIG_ENDIAN) { + s->big_endian_fb = true; + } + if (val == PCI_VGA_QEXT_LITTLE_ENDIAN) { + s->big_endian_fb = false; + } + break; + } +} + +static bool vga_get_big_endian_fb(Object *obj, Error **errp) +{ + PCIVGAState *d = PCI_VGA(PCI_DEVICE(obj)); + + return d->vga.big_endian_fb; +} + +static void vga_set_big_endian_fb(Object *obj, bool value, Error **errp) +{ + PCIVGAState *d = PCI_VGA(PCI_DEVICE(obj)); + + d->vga.big_endian_fb = value; +} + +static const MemoryRegionOps pci_vga_qext_ops = { + .read = pci_vga_qext_read, + .write = pci_vga_qext_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +void pci_std_vga_mmio_region_init(VGACommonState *s, + Object *owner, + MemoryRegion *parent, + MemoryRegion *subs, + bool qext, bool edid) +{ + PCIVGAState *d = container_of(s, PCIVGAState, vga); + + memory_region_init_io(&subs[0], owner, &pci_vga_ioport_ops, s, + "vga ioports remapped", PCI_VGA_IOPORT_SIZE); + memory_region_add_subregion(parent, PCI_VGA_IOPORT_OFFSET, + &subs[0]); + + memory_region_init_io(&subs[1], owner, &pci_vga_bochs_ops, s, + "bochs dispi interface", PCI_VGA_BOCHS_SIZE); + memory_region_add_subregion(parent, PCI_VGA_BOCHS_OFFSET, + &subs[1]); + + if (qext) { + memory_region_init_io(&subs[2], owner, &pci_vga_qext_ops, s, + "qemu extended regs", PCI_VGA_QEXT_SIZE); + memory_region_add_subregion(parent, PCI_VGA_QEXT_OFFSET, + &subs[2]); + } + + if (edid) { + qemu_edid_generate(d->edid, sizeof(d->edid), &d->edid_info); + qemu_edid_region_io(&subs[3], owner, d->edid, sizeof(d->edid)); + memory_region_add_subregion(parent, 0, &subs[3]); + } +} + +static void pci_std_vga_realize(PCIDevice *dev, Error **errp) +{ + PCIVGAState *d = PCI_VGA(dev); + VGACommonState *s = &d->vga; + bool qext = false; + bool edid = false; + + /* vga + console init */ + vga_common_init(s, OBJECT(dev)); + vga_init(s, OBJECT(dev), pci_address_space(dev), pci_address_space_io(dev), + true); + + s->con = graphic_console_init(DEVICE(dev), 0, s->hw_ops, s); + + /* XXX: VGA_RAM_SIZE must be a power of two */ + pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->vram); + + /* mmio bar for vga register access */ + if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_MMIO)) { + memory_region_init_io(&d->mmio, OBJECT(dev), &unassigned_io_ops, NULL, + "vga.mmio", PCI_VGA_MMIO_SIZE); + + if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_QEXT)) { + qext = true; + pci_set_byte(&d->dev.config[PCI_REVISION_ID], 2); + } + if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_EDID)) { + edid = true; + } + pci_std_vga_mmio_region_init(s, OBJECT(dev), &d->mmio, d->mrs, + qext, edid); + + pci_register_bar(&d->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio); + } +} + +static void pci_secondary_vga_realize(PCIDevice *dev, Error **errp) +{ + PCIVGAState *d = PCI_VGA(dev); + VGACommonState *s = &d->vga; + bool qext = false; + bool edid = false; + + /* vga + console init */ + vga_common_init(s, OBJECT(dev)); + s->con = graphic_console_init(DEVICE(dev), 0, s->hw_ops, s); + + /* mmio bar */ + memory_region_init_io(&d->mmio, OBJECT(dev), &unassigned_io_ops, NULL, + "vga.mmio", PCI_VGA_MMIO_SIZE); + + if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_QEXT)) { + qext = true; + pci_set_byte(&d->dev.config[PCI_REVISION_ID], 2); + } + if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_EDID)) { + edid = true; + } + pci_std_vga_mmio_region_init(s, OBJECT(dev), &d->mmio, d->mrs, qext, edid); + + pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->vram); + pci_register_bar(&d->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio); +} + +static void pci_secondary_vga_exit(PCIDevice *dev) +{ + PCIVGAState *d = PCI_VGA(dev); + VGACommonState *s = &d->vga; + + graphic_console_close(s->con); + memory_region_del_subregion(&d->mmio, &d->mrs[0]); + memory_region_del_subregion(&d->mmio, &d->mrs[1]); + if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_QEXT)) { + memory_region_del_subregion(&d->mmio, &d->mrs[2]); + } + if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_EDID)) { + memory_region_del_subregion(&d->mmio, &d->mrs[3]); + } +} + +static void pci_secondary_vga_init(Object *obj) +{ + /* Expose framebuffer byteorder via QOM */ + object_property_add_bool(obj, "big-endian-framebuffer", + vga_get_big_endian_fb, vga_set_big_endian_fb); +} + +static void pci_secondary_vga_reset(DeviceState *dev) +{ + PCIVGAState *d = PCI_VGA(PCI_DEVICE(dev)); + vga_common_reset(&d->vga); +} + +static Property vga_pci_properties[] = { + DEFINE_PROP_UINT32("vgamem_mb", PCIVGAState, vga.vram_size_mb, 16), + DEFINE_PROP_BIT("mmio", PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_MMIO, true), + DEFINE_PROP_BIT("qemu-extended-regs", + PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_QEXT, true), + DEFINE_PROP_BIT("edid", + PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_EDID, true), + DEFINE_EDID_PROPERTIES(PCIVGAState, edid_info), + DEFINE_PROP_BOOL("global-vmstate", PCIVGAState, vga.global_vmstate, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static Property secondary_pci_properties[] = { + DEFINE_PROP_UINT32("vgamem_mb", PCIVGAState, vga.vram_size_mb, 16), + DEFINE_PROP_BIT("qemu-extended-regs", + PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_QEXT, true), + DEFINE_PROP_BIT("edid", + PCIVGAState, flags, PCI_VGA_FLAG_ENABLE_EDID, true), + DEFINE_EDID_PROPERTIES(PCIVGAState, edid_info), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vga_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->vendor_id = PCI_VENDOR_ID_QEMU; + k->device_id = PCI_DEVICE_ID_QEMU_VGA; + dc->vmsd = &vmstate_vga_pci; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); +} + +static const TypeInfo vga_pci_type_info = { + .name = TYPE_PCI_VGA, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIVGAState), + .abstract = true, + .class_init = vga_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void vga_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_std_vga_realize; + k->romfile = "vgabios-stdvga.bin"; + k->class_id = PCI_CLASS_DISPLAY_VGA; + device_class_set_props(dc, vga_pci_properties); + dc->hotpluggable = false; + + /* Expose framebuffer byteorder via QOM */ + object_class_property_add_bool(klass, "big-endian-framebuffer", + vga_get_big_endian_fb, vga_set_big_endian_fb); +} + +static void secondary_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_secondary_vga_realize; + k->exit = pci_secondary_vga_exit; + k->class_id = PCI_CLASS_DISPLAY_OTHER; + device_class_set_props(dc, secondary_pci_properties); + dc->reset = pci_secondary_vga_reset; +} + +static const TypeInfo vga_info = { + .name = "VGA", + .parent = TYPE_PCI_VGA, + .class_init = vga_class_init, +}; + +static const TypeInfo secondary_info = { + .name = "secondary-vga", + .parent = TYPE_PCI_VGA, + .instance_init = pci_secondary_vga_init, + .class_init = secondary_class_init, +}; + +static void vga_register_types(void) +{ + type_register_static(&vga_pci_type_info); + type_register_static(&vga_info); + type_register_static(&secondary_info); +} + +type_init(vga_register_types) diff --git a/hw/display/vga.c b/hw/display/vga.c new file mode 100644 index 000000000..9d1f66af4 --- /dev/null +++ b/hw/display/vga.c @@ -0,0 +1,2305 @@ +/* + * QEMU VGA Emulator. + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "sysemu/reset.h" +#include "qapi/error.h" +#include "hw/display/vga.h" +#include "hw/pci/pci.h" +#include "vga_int.h" +#include "vga_regs.h" +#include "ui/pixel_ops.h" +#include "qemu/timer.h" +#include "hw/xen/xen.h" +#include "migration/vmstate.h" +#include "trace.h" + +//#define DEBUG_VGA_MEM +//#define DEBUG_VGA_REG + +bool have_vga = true; + +/* 16 state changes per vertical frame @60 Hz */ +#define VGA_TEXT_CURSOR_PERIOD_MS (1000 * 2 * 16 / 60) + +/* + * Video Graphics Array (VGA) + * + * Chipset docs for original IBM VGA: + * http://www.mcamafia.de/pdf/ibm_vgaxga_trm2.pdf + * + * FreeVGA site: + * http://www.osdever.net/FreeVGA/home.htm + * + * Standard VGA features and Bochs VBE extensions are implemented. + */ + +/* force some bits to zero */ +const uint8_t sr_mask[8] = { + 0x03, + 0x3d, + 0x0f, + 0x3f, + 0x0e, + 0x00, + 0x00, + 0xff, +}; + +const uint8_t gr_mask[16] = { + 0x0f, /* 0x00 */ + 0x0f, /* 0x01 */ + 0x0f, /* 0x02 */ + 0x1f, /* 0x03 */ + 0x03, /* 0x04 */ + 0x7b, /* 0x05 */ + 0x0f, /* 0x06 */ + 0x0f, /* 0x07 */ + 0xff, /* 0x08 */ + 0x00, /* 0x09 */ + 0x00, /* 0x0a */ + 0x00, /* 0x0b */ + 0x00, /* 0x0c */ + 0x00, /* 0x0d */ + 0x00, /* 0x0e */ + 0x00, /* 0x0f */ +}; + +#define cbswap_32(__x) \ +((uint32_t)( \ + (((uint32_t)(__x) & (uint32_t)0x000000ffUL) << 24) | \ + (((uint32_t)(__x) & (uint32_t)0x0000ff00UL) << 8) | \ + (((uint32_t)(__x) & (uint32_t)0x00ff0000UL) >> 8) | \ + (((uint32_t)(__x) & (uint32_t)0xff000000UL) >> 24) )) + +#ifdef HOST_WORDS_BIGENDIAN +#define PAT(x) cbswap_32(x) +#else +#define PAT(x) (x) +#endif + +#ifdef HOST_WORDS_BIGENDIAN +#define BIG 1 +#else +#define BIG 0 +#endif + +#ifdef HOST_WORDS_BIGENDIAN +#define GET_PLANE(data, p) (((data) >> (24 - (p) * 8)) & 0xff) +#else +#define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff) +#endif + +static const uint32_t mask16[16] = { + PAT(0x00000000), + PAT(0x000000ff), + PAT(0x0000ff00), + PAT(0x0000ffff), + PAT(0x00ff0000), + PAT(0x00ff00ff), + PAT(0x00ffff00), + PAT(0x00ffffff), + PAT(0xff000000), + PAT(0xff0000ff), + PAT(0xff00ff00), + PAT(0xff00ffff), + PAT(0xffff0000), + PAT(0xffff00ff), + PAT(0xffffff00), + PAT(0xffffffff), +}; + +#undef PAT + +#ifdef HOST_WORDS_BIGENDIAN +#define PAT(x) (x) +#else +#define PAT(x) cbswap_32(x) +#endif + +static uint32_t expand4[256]; +static uint16_t expand2[256]; +static uint8_t expand4to8[16]; + +static void vbe_update_vgaregs(VGACommonState *s); + +static inline bool vbe_enabled(VGACommonState *s) +{ + return s->vbe_regs[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED; +} + +static inline uint8_t sr(VGACommonState *s, int idx) +{ + return vbe_enabled(s) ? s->sr_vbe[idx] : s->sr[idx]; +} + +static void vga_update_memory_access(VGACommonState *s) +{ + hwaddr base, offset, size; + + if (s->legacy_address_space == NULL) { + return; + } + + if (s->has_chain4_alias) { + memory_region_del_subregion(s->legacy_address_space, &s->chain4_alias); + object_unparent(OBJECT(&s->chain4_alias)); + s->has_chain4_alias = false; + s->plane_updated = 0xf; + } + if ((sr(s, VGA_SEQ_PLANE_WRITE) & VGA_SR02_ALL_PLANES) == + VGA_SR02_ALL_PLANES && sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_CHN_4M) { + offset = 0; + switch ((s->gr[VGA_GFX_MISC] >> 2) & 3) { + case 0: + base = 0xa0000; + size = 0x20000; + break; + case 1: + base = 0xa0000; + size = 0x10000; + offset = s->bank_offset; + break; + case 2: + base = 0xb0000; + size = 0x8000; + break; + case 3: + default: + base = 0xb8000; + size = 0x8000; + break; + } + assert(offset + size <= s->vram_size); + memory_region_init_alias(&s->chain4_alias, memory_region_owner(&s->vram), + "vga.chain4", &s->vram, offset, size); + memory_region_add_subregion_overlap(s->legacy_address_space, base, + &s->chain4_alias, 2); + s->has_chain4_alias = true; + } +} + +static void vga_dumb_update_retrace_info(VGACommonState *s) +{ + (void) s; +} + +static void vga_precise_update_retrace_info(VGACommonState *s) +{ + int htotal_chars; + int hretr_start_char; + int hretr_skew_chars; + int hretr_end_char; + + int vtotal_lines; + int vretr_start_line; + int vretr_end_line; + + int dots; +#if 0 + int div2, sldiv2; +#endif + int clocking_mode; + int clock_sel; + const int clk_hz[] = {25175000, 28322000, 25175000, 25175000}; + int64_t chars_per_sec; + struct vga_precise_retrace *r = &s->retrace_info.precise; + + htotal_chars = s->cr[VGA_CRTC_H_TOTAL] + 5; + hretr_start_char = s->cr[VGA_CRTC_H_SYNC_START]; + hretr_skew_chars = (s->cr[VGA_CRTC_H_SYNC_END] >> 5) & 3; + hretr_end_char = s->cr[VGA_CRTC_H_SYNC_END] & 0x1f; + + vtotal_lines = (s->cr[VGA_CRTC_V_TOTAL] | + (((s->cr[VGA_CRTC_OVERFLOW] & 1) | + ((s->cr[VGA_CRTC_OVERFLOW] >> 4) & 2)) << 8)) + 2; + vretr_start_line = s->cr[VGA_CRTC_V_SYNC_START] | + ((((s->cr[VGA_CRTC_OVERFLOW] >> 2) & 1) | + ((s->cr[VGA_CRTC_OVERFLOW] >> 6) & 2)) << 8); + vretr_end_line = s->cr[VGA_CRTC_V_SYNC_END] & 0xf; + + clocking_mode = (sr(s, VGA_SEQ_CLOCK_MODE) >> 3) & 1; + clock_sel = (s->msr >> 2) & 3; + dots = (s->msr & 1) ? 8 : 9; + + chars_per_sec = clk_hz[clock_sel] / dots; + + htotal_chars <<= clocking_mode; + + r->total_chars = vtotal_lines * htotal_chars; + if (r->freq) { + r->ticks_per_char = NANOSECONDS_PER_SECOND / (r->total_chars * r->freq); + } else { + r->ticks_per_char = NANOSECONDS_PER_SECOND / chars_per_sec; + } + + r->vstart = vretr_start_line; + r->vend = r->vstart + vretr_end_line + 1; + + r->hstart = hretr_start_char + hretr_skew_chars; + r->hend = r->hstart + hretr_end_char + 1; + r->htotal = htotal_chars; + +#if 0 + div2 = (s->cr[VGA_CRTC_MODE] >> 2) & 1; + sldiv2 = (s->cr[VGA_CRTC_MODE] >> 3) & 1; + printf ( + "hz=%f\n" + "htotal = %d\n" + "hretr_start = %d\n" + "hretr_skew = %d\n" + "hretr_end = %d\n" + "vtotal = %d\n" + "vretr_start = %d\n" + "vretr_end = %d\n" + "div2 = %d sldiv2 = %d\n" + "clocking_mode = %d\n" + "clock_sel = %d %d\n" + "dots = %d\n" + "ticks/char = %" PRId64 "\n" + "\n", + (double) NANOSECONDS_PER_SECOND / (r->ticks_per_char * r->total_chars), + htotal_chars, + hretr_start_char, + hretr_skew_chars, + hretr_end_char, + vtotal_lines, + vretr_start_line, + vretr_end_line, + div2, sldiv2, + clocking_mode, + clock_sel, + clk_hz[clock_sel], + dots, + r->ticks_per_char + ); +#endif +} + +static uint8_t vga_precise_retrace(VGACommonState *s) +{ + struct vga_precise_retrace *r = &s->retrace_info.precise; + uint8_t val = s->st01 & ~(ST01_V_RETRACE | ST01_DISP_ENABLE); + + if (r->total_chars) { + int cur_line, cur_line_char, cur_char; + int64_t cur_tick; + + cur_tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + cur_char = (cur_tick / r->ticks_per_char) % r->total_chars; + cur_line = cur_char / r->htotal; + + if (cur_line >= r->vstart && cur_line <= r->vend) { + val |= ST01_V_RETRACE | ST01_DISP_ENABLE; + } else { + cur_line_char = cur_char % r->htotal; + if (cur_line_char >= r->hstart && cur_line_char <= r->hend) { + val |= ST01_DISP_ENABLE; + } + } + + return val; + } else { + return s->st01 ^ (ST01_V_RETRACE | ST01_DISP_ENABLE); + } +} + +static uint8_t vga_dumb_retrace(VGACommonState *s) +{ + return s->st01 ^ (ST01_V_RETRACE | ST01_DISP_ENABLE); +} + +int vga_ioport_invalid(VGACommonState *s, uint32_t addr) +{ + if (s->msr & VGA_MIS_COLOR) { + /* Color */ + return (addr >= 0x3b0 && addr <= 0x3bf); + } else { + /* Monochrome */ + return (addr >= 0x3d0 && addr <= 0x3df); + } +} + +uint32_t vga_ioport_read(void *opaque, uint32_t addr) +{ + VGACommonState *s = opaque; + int val, index; + + if (vga_ioport_invalid(s, addr)) { + val = 0xff; + } else { + switch(addr) { + case VGA_ATT_W: + if (s->ar_flip_flop == 0) { + val = s->ar_index; + } else { + val = 0; + } + break; + case VGA_ATT_R: + index = s->ar_index & 0x1f; + if (index < VGA_ATT_C) { + val = s->ar[index]; + } else { + val = 0; + } + break; + case VGA_MIS_W: + val = s->st00; + break; + case VGA_SEQ_I: + val = s->sr_index; + break; + case VGA_SEQ_D: + val = s->sr[s->sr_index]; +#ifdef DEBUG_VGA_REG + printf("vga: read SR%x = 0x%02x\n", s->sr_index, val); +#endif + break; + case VGA_PEL_IR: + val = s->dac_state; + break; + case VGA_PEL_IW: + val = s->dac_write_index; + break; + case VGA_PEL_D: + val = s->palette[s->dac_read_index * 3 + s->dac_sub_index]; + if (++s->dac_sub_index == 3) { + s->dac_sub_index = 0; + s->dac_read_index++; + } + break; + case VGA_FTC_R: + val = s->fcr; + break; + case VGA_MIS_R: + val = s->msr; + break; + case VGA_GFX_I: + val = s->gr_index; + break; + case VGA_GFX_D: + val = s->gr[s->gr_index]; +#ifdef DEBUG_VGA_REG + printf("vga: read GR%x = 0x%02x\n", s->gr_index, val); +#endif + break; + case VGA_CRT_IM: + case VGA_CRT_IC: + val = s->cr_index; + break; + case VGA_CRT_DM: + case VGA_CRT_DC: + val = s->cr[s->cr_index]; +#ifdef DEBUG_VGA_REG + printf("vga: read CR%x = 0x%02x\n", s->cr_index, val); +#endif + break; + case VGA_IS1_RM: + case VGA_IS1_RC: + /* just toggle to fool polling */ + val = s->st01 = s->retrace(s); + s->ar_flip_flop = 0; + break; + default: + val = 0x00; + break; + } + } + trace_vga_std_read_io(addr, val); + return val; +} + +void vga_ioport_write(void *opaque, uint32_t addr, uint32_t val) +{ + VGACommonState *s = opaque; + int index; + + /* check port range access depending on color/monochrome mode */ + if (vga_ioport_invalid(s, addr)) { + return; + } + trace_vga_std_write_io(addr, val); + + switch(addr) { + case VGA_ATT_W: + if (s->ar_flip_flop == 0) { + val &= 0x3f; + s->ar_index = val; + } else { + index = s->ar_index & 0x1f; + switch(index) { + case VGA_ATC_PALETTE0 ... VGA_ATC_PALETTEF: + s->ar[index] = val & 0x3f; + break; + case VGA_ATC_MODE: + s->ar[index] = val & ~0x10; + break; + case VGA_ATC_OVERSCAN: + s->ar[index] = val; + break; + case VGA_ATC_PLANE_ENABLE: + s->ar[index] = val & ~0xc0; + break; + case VGA_ATC_PEL: + s->ar[index] = val & ~0xf0; + break; + case VGA_ATC_COLOR_PAGE: + s->ar[index] = val & ~0xf0; + break; + default: + break; + } + } + s->ar_flip_flop ^= 1; + break; + case VGA_MIS_W: + s->msr = val & ~0x10; + s->update_retrace_info(s); + break; + case VGA_SEQ_I: + s->sr_index = val & 7; + break; + case VGA_SEQ_D: +#ifdef DEBUG_VGA_REG + printf("vga: write SR%x = 0x%02x\n", s->sr_index, val); +#endif + s->sr[s->sr_index] = val & sr_mask[s->sr_index]; + if (s->sr_index == VGA_SEQ_CLOCK_MODE) { + s->update_retrace_info(s); + } + vga_update_memory_access(s); + break; + case VGA_PEL_IR: + s->dac_read_index = val; + s->dac_sub_index = 0; + s->dac_state = 3; + break; + case VGA_PEL_IW: + s->dac_write_index = val; + s->dac_sub_index = 0; + s->dac_state = 0; + break; + case VGA_PEL_D: + s->dac_cache[s->dac_sub_index] = val; + if (++s->dac_sub_index == 3) { + memcpy(&s->palette[s->dac_write_index * 3], s->dac_cache, 3); + s->dac_sub_index = 0; + s->dac_write_index++; + } + break; + case VGA_GFX_I: + s->gr_index = val & 0x0f; + break; + case VGA_GFX_D: +#ifdef DEBUG_VGA_REG + printf("vga: write GR%x = 0x%02x\n", s->gr_index, val); +#endif + s->gr[s->gr_index] = val & gr_mask[s->gr_index]; + vbe_update_vgaregs(s); + vga_update_memory_access(s); + break; + case VGA_CRT_IM: + case VGA_CRT_IC: + s->cr_index = val; + break; + case VGA_CRT_DM: + case VGA_CRT_DC: +#ifdef DEBUG_VGA_REG + printf("vga: write CR%x = 0x%02x\n", s->cr_index, val); +#endif + /* handle CR0-7 protection */ + if ((s->cr[VGA_CRTC_V_SYNC_END] & VGA_CR11_LOCK_CR0_CR7) && + s->cr_index <= VGA_CRTC_OVERFLOW) { + /* can always write bit 4 of CR7 */ + if (s->cr_index == VGA_CRTC_OVERFLOW) { + s->cr[VGA_CRTC_OVERFLOW] = (s->cr[VGA_CRTC_OVERFLOW] & ~0x10) | + (val & 0x10); + vbe_update_vgaregs(s); + } + return; + } + s->cr[s->cr_index] = val; + vbe_update_vgaregs(s); + + switch(s->cr_index) { + case VGA_CRTC_H_TOTAL: + case VGA_CRTC_H_SYNC_START: + case VGA_CRTC_H_SYNC_END: + case VGA_CRTC_V_TOTAL: + case VGA_CRTC_OVERFLOW: + case VGA_CRTC_V_SYNC_END: + case VGA_CRTC_MODE: + s->update_retrace_info(s); + break; + } + break; + case VGA_IS1_RM: + case VGA_IS1_RC: + s->fcr = val & 0x10; + break; + } +} + +/* + * Sanity check vbe register writes. + * + * As we don't have a way to signal errors to the guest in the bochs + * dispi interface we'll go adjust the registers to the closest valid + * value. + */ +static void vbe_fixup_regs(VGACommonState *s) +{ + uint16_t *r = s->vbe_regs; + uint32_t bits, linelength, maxy, offset; + + if (!vbe_enabled(s)) { + /* vbe is turned off -- nothing to do */ + return; + } + + /* check depth */ + switch (r[VBE_DISPI_INDEX_BPP]) { + case 4: + case 8: + case 16: + case 24: + case 32: + bits = r[VBE_DISPI_INDEX_BPP]; + break; + case 15: + bits = 16; + break; + default: + bits = r[VBE_DISPI_INDEX_BPP] = 8; + break; + } + + /* check width */ + r[VBE_DISPI_INDEX_XRES] &= ~7u; + if (r[VBE_DISPI_INDEX_XRES] == 0) { + r[VBE_DISPI_INDEX_XRES] = 8; + } + if (r[VBE_DISPI_INDEX_XRES] > VBE_DISPI_MAX_XRES) { + r[VBE_DISPI_INDEX_XRES] = VBE_DISPI_MAX_XRES; + } + r[VBE_DISPI_INDEX_VIRT_WIDTH] &= ~7u; + if (r[VBE_DISPI_INDEX_VIRT_WIDTH] > VBE_DISPI_MAX_XRES) { + r[VBE_DISPI_INDEX_VIRT_WIDTH] = VBE_DISPI_MAX_XRES; + } + if (r[VBE_DISPI_INDEX_VIRT_WIDTH] < r[VBE_DISPI_INDEX_XRES]) { + r[VBE_DISPI_INDEX_VIRT_WIDTH] = r[VBE_DISPI_INDEX_XRES]; + } + + /* check height */ + linelength = r[VBE_DISPI_INDEX_VIRT_WIDTH] * bits / 8; + maxy = s->vbe_size / linelength; + if (r[VBE_DISPI_INDEX_YRES] == 0) { + r[VBE_DISPI_INDEX_YRES] = 1; + } + if (r[VBE_DISPI_INDEX_YRES] > VBE_DISPI_MAX_YRES) { + r[VBE_DISPI_INDEX_YRES] = VBE_DISPI_MAX_YRES; + } + if (r[VBE_DISPI_INDEX_YRES] > maxy) { + r[VBE_DISPI_INDEX_YRES] = maxy; + } + + /* check offset */ + if (r[VBE_DISPI_INDEX_X_OFFSET] > VBE_DISPI_MAX_XRES) { + r[VBE_DISPI_INDEX_X_OFFSET] = VBE_DISPI_MAX_XRES; + } + if (r[VBE_DISPI_INDEX_Y_OFFSET] > VBE_DISPI_MAX_YRES) { + r[VBE_DISPI_INDEX_Y_OFFSET] = VBE_DISPI_MAX_YRES; + } + offset = r[VBE_DISPI_INDEX_X_OFFSET] * bits / 8; + offset += r[VBE_DISPI_INDEX_Y_OFFSET] * linelength; + if (offset + r[VBE_DISPI_INDEX_YRES] * linelength > s->vbe_size) { + r[VBE_DISPI_INDEX_Y_OFFSET] = 0; + offset = r[VBE_DISPI_INDEX_X_OFFSET] * bits / 8; + if (offset + r[VBE_DISPI_INDEX_YRES] * linelength > s->vbe_size) { + r[VBE_DISPI_INDEX_X_OFFSET] = 0; + offset = 0; + } + } + + /* update vga state */ + r[VBE_DISPI_INDEX_VIRT_HEIGHT] = maxy; + s->vbe_line_offset = linelength; + s->vbe_start_addr = offset / 4; +} + +/* we initialize the VGA graphic mode */ +static void vbe_update_vgaregs(VGACommonState *s) +{ + int h, shift_control; + + if (!vbe_enabled(s)) { + /* vbe is turned off -- nothing to do */ + return; + } + + /* graphic mode + memory map 1 */ + s->gr[VGA_GFX_MISC] = (s->gr[VGA_GFX_MISC] & ~0x0c) | 0x04 | + VGA_GR06_GRAPHICS_MODE; + s->cr[VGA_CRTC_MODE] |= 3; /* no CGA modes */ + s->cr[VGA_CRTC_OFFSET] = s->vbe_line_offset >> 3; + /* width */ + s->cr[VGA_CRTC_H_DISP] = + (s->vbe_regs[VBE_DISPI_INDEX_XRES] >> 3) - 1; + /* height (only meaningful if < 1024) */ + h = s->vbe_regs[VBE_DISPI_INDEX_YRES] - 1; + s->cr[VGA_CRTC_V_DISP_END] = h; + s->cr[VGA_CRTC_OVERFLOW] = (s->cr[VGA_CRTC_OVERFLOW] & ~0x42) | + ((h >> 7) & 0x02) | ((h >> 3) & 0x40); + /* line compare to 1023 */ + s->cr[VGA_CRTC_LINE_COMPARE] = 0xff; + s->cr[VGA_CRTC_OVERFLOW] |= 0x10; + s->cr[VGA_CRTC_MAX_SCAN] |= 0x40; + + if (s->vbe_regs[VBE_DISPI_INDEX_BPP] == 4) { + shift_control = 0; + s->sr_vbe[VGA_SEQ_CLOCK_MODE] &= ~8; /* no double line */ + } else { + shift_control = 2; + /* set chain 4 mode */ + s->sr_vbe[VGA_SEQ_MEMORY_MODE] |= VGA_SR04_CHN_4M; + /* activate all planes */ + s->sr_vbe[VGA_SEQ_PLANE_WRITE] |= VGA_SR02_ALL_PLANES; + } + s->gr[VGA_GFX_MODE] = (s->gr[VGA_GFX_MODE] & ~0x60) | + (shift_control << 5); + s->cr[VGA_CRTC_MAX_SCAN] &= ~0x9f; /* no double scan */ +} + +static uint32_t vbe_ioport_read_index(void *opaque, uint32_t addr) +{ + VGACommonState *s = opaque; + return s->vbe_index; +} + +uint32_t vbe_ioport_read_data(void *opaque, uint32_t addr) +{ + VGACommonState *s = opaque; + uint32_t val; + + if (s->vbe_index < VBE_DISPI_INDEX_NB) { + if (s->vbe_regs[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_GETCAPS) { + switch(s->vbe_index) { + /* XXX: do not hardcode ? */ + case VBE_DISPI_INDEX_XRES: + val = VBE_DISPI_MAX_XRES; + break; + case VBE_DISPI_INDEX_YRES: + val = VBE_DISPI_MAX_YRES; + break; + case VBE_DISPI_INDEX_BPP: + val = VBE_DISPI_MAX_BPP; + break; + default: + val = s->vbe_regs[s->vbe_index]; + break; + } + } else { + val = s->vbe_regs[s->vbe_index]; + } + } else if (s->vbe_index == VBE_DISPI_INDEX_VIDEO_MEMORY_64K) { + val = s->vbe_size / (64 * KiB); + } else { + val = 0; + } + trace_vga_vbe_read(s->vbe_index, val); + return val; +} + +void vbe_ioport_write_index(void *opaque, uint32_t addr, uint32_t val) +{ + VGACommonState *s = opaque; + s->vbe_index = val; +} + +void vbe_ioport_write_data(void *opaque, uint32_t addr, uint32_t val) +{ + VGACommonState *s = opaque; + + if (s->vbe_index <= VBE_DISPI_INDEX_NB) { + trace_vga_vbe_write(s->vbe_index, val); + switch(s->vbe_index) { + case VBE_DISPI_INDEX_ID: + if (val == VBE_DISPI_ID0 || + val == VBE_DISPI_ID1 || + val == VBE_DISPI_ID2 || + val == VBE_DISPI_ID3 || + val == VBE_DISPI_ID4 || + val == VBE_DISPI_ID5) { + s->vbe_regs[s->vbe_index] = val; + } + break; + case VBE_DISPI_INDEX_XRES: + case VBE_DISPI_INDEX_YRES: + case VBE_DISPI_INDEX_BPP: + case VBE_DISPI_INDEX_VIRT_WIDTH: + case VBE_DISPI_INDEX_X_OFFSET: + case VBE_DISPI_INDEX_Y_OFFSET: + s->vbe_regs[s->vbe_index] = val; + vbe_fixup_regs(s); + vbe_update_vgaregs(s); + break; + case VBE_DISPI_INDEX_BANK: + val &= s->vbe_bank_mask; + s->vbe_regs[s->vbe_index] = val; + s->bank_offset = (val << 16); + vga_update_memory_access(s); + break; + case VBE_DISPI_INDEX_ENABLE: + if ((val & VBE_DISPI_ENABLED) && + !(s->vbe_regs[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED)) { + + s->vbe_regs[VBE_DISPI_INDEX_VIRT_WIDTH] = 0; + s->vbe_regs[VBE_DISPI_INDEX_X_OFFSET] = 0; + s->vbe_regs[VBE_DISPI_INDEX_Y_OFFSET] = 0; + s->vbe_regs[VBE_DISPI_INDEX_ENABLE] |= VBE_DISPI_ENABLED; + vbe_fixup_regs(s); + vbe_update_vgaregs(s); + + /* clear the screen */ + if (!(val & VBE_DISPI_NOCLEARMEM)) { + memset(s->vram_ptr, 0, + s->vbe_regs[VBE_DISPI_INDEX_YRES] * s->vbe_line_offset); + } + } else { + s->bank_offset = 0; + } + s->dac_8bit = (val & VBE_DISPI_8BIT_DAC) > 0; + s->vbe_regs[s->vbe_index] = val; + vga_update_memory_access(s); + break; + default: + break; + } + } +} + +/* called for accesses between 0xa0000 and 0xc0000 */ +uint32_t vga_mem_readb(VGACommonState *s, hwaddr addr) +{ + int memory_map_mode, plane; + uint32_t ret; + + /* convert to VGA memory offset */ + memory_map_mode = (s->gr[VGA_GFX_MISC] >> 2) & 3; + addr &= 0x1ffff; + switch(memory_map_mode) { + case 0: + break; + case 1: + if (addr >= 0x10000) + return 0xff; + addr += s->bank_offset; + break; + case 2: + addr -= 0x10000; + if (addr >= 0x8000) + return 0xff; + break; + default: + case 3: + addr -= 0x18000; + if (addr >= 0x8000) + return 0xff; + break; + } + + if (sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_CHN_4M) { + /* chain 4 mode : simplest access */ + assert(addr < s->vram_size); + ret = s->vram_ptr[addr]; + } else if (s->gr[VGA_GFX_MODE] & 0x10) { + /* odd/even mode (aka text mode mapping) */ + plane = (s->gr[VGA_GFX_PLANE_READ] & 2) | (addr & 1); + addr = ((addr & ~1) << 1) | plane; + if (addr >= s->vram_size) { + return 0xff; + } + ret = s->vram_ptr[addr]; + } else { + /* standard VGA latched access */ + if (addr * sizeof(uint32_t) >= s->vram_size) { + return 0xff; + } + s->latch = ((uint32_t *)s->vram_ptr)[addr]; + + if (!(s->gr[VGA_GFX_MODE] & 0x08)) { + /* read mode 0 */ + plane = s->gr[VGA_GFX_PLANE_READ]; + ret = GET_PLANE(s->latch, plane); + } else { + /* read mode 1 */ + ret = (s->latch ^ mask16[s->gr[VGA_GFX_COMPARE_VALUE]]) & + mask16[s->gr[VGA_GFX_COMPARE_MASK]]; + ret |= ret >> 16; + ret |= ret >> 8; + ret = (~ret) & 0xff; + } + } + return ret; +} + +/* called for accesses between 0xa0000 and 0xc0000 */ +void vga_mem_writeb(VGACommonState *s, hwaddr addr, uint32_t val) +{ + int memory_map_mode, plane, write_mode, b, func_select, mask; + uint32_t write_mask, bit_mask, set_mask; + +#ifdef DEBUG_VGA_MEM + printf("vga: [0x" TARGET_FMT_plx "] = 0x%02x\n", addr, val); +#endif + /* convert to VGA memory offset */ + memory_map_mode = (s->gr[VGA_GFX_MISC] >> 2) & 3; + addr &= 0x1ffff; + switch(memory_map_mode) { + case 0: + break; + case 1: + if (addr >= 0x10000) + return; + addr += s->bank_offset; + break; + case 2: + addr -= 0x10000; + if (addr >= 0x8000) + return; + break; + default: + case 3: + addr -= 0x18000; + if (addr >= 0x8000) + return; + break; + } + + if (sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_CHN_4M) { + /* chain 4 mode : simplest access */ + plane = addr & 3; + mask = (1 << plane); + if (sr(s, VGA_SEQ_PLANE_WRITE) & mask) { + assert(addr < s->vram_size); + s->vram_ptr[addr] = val; +#ifdef DEBUG_VGA_MEM + printf("vga: chain4: [0x" TARGET_FMT_plx "]\n", addr); +#endif + s->plane_updated |= mask; /* only used to detect font change */ + memory_region_set_dirty(&s->vram, addr, 1); + } + } else if (s->gr[VGA_GFX_MODE] & 0x10) { + /* odd/even mode (aka text mode mapping) */ + plane = (s->gr[VGA_GFX_PLANE_READ] & 2) | (addr & 1); + mask = (1 << plane); + if (sr(s, VGA_SEQ_PLANE_WRITE) & mask) { + addr = ((addr & ~1) << 1) | plane; + if (addr >= s->vram_size) { + return; + } + s->vram_ptr[addr] = val; +#ifdef DEBUG_VGA_MEM + printf("vga: odd/even: [0x" TARGET_FMT_plx "]\n", addr); +#endif + s->plane_updated |= mask; /* only used to detect font change */ + memory_region_set_dirty(&s->vram, addr, 1); + } + } else { + /* standard VGA latched access */ + write_mode = s->gr[VGA_GFX_MODE] & 3; + switch(write_mode) { + default: + case 0: + /* rotate */ + b = s->gr[VGA_GFX_DATA_ROTATE] & 7; + val = ((val >> b) | (val << (8 - b))) & 0xff; + val |= val << 8; + val |= val << 16; + + /* apply set/reset mask */ + set_mask = mask16[s->gr[VGA_GFX_SR_ENABLE]]; + val = (val & ~set_mask) | + (mask16[s->gr[VGA_GFX_SR_VALUE]] & set_mask); + bit_mask = s->gr[VGA_GFX_BIT_MASK]; + break; + case 1: + val = s->latch; + goto do_write; + case 2: + val = mask16[val & 0x0f]; + bit_mask = s->gr[VGA_GFX_BIT_MASK]; + break; + case 3: + /* rotate */ + b = s->gr[VGA_GFX_DATA_ROTATE] & 7; + val = (val >> b) | (val << (8 - b)); + + bit_mask = s->gr[VGA_GFX_BIT_MASK] & val; + val = mask16[s->gr[VGA_GFX_SR_VALUE]]; + break; + } + + /* apply logical operation */ + func_select = s->gr[VGA_GFX_DATA_ROTATE] >> 3; + switch(func_select) { + case 0: + default: + /* nothing to do */ + break; + case 1: + /* and */ + val &= s->latch; + break; + case 2: + /* or */ + val |= s->latch; + break; + case 3: + /* xor */ + val ^= s->latch; + break; + } + + /* apply bit mask */ + bit_mask |= bit_mask << 8; + bit_mask |= bit_mask << 16; + val = (val & bit_mask) | (s->latch & ~bit_mask); + + do_write: + /* mask data according to sr[2] */ + mask = sr(s, VGA_SEQ_PLANE_WRITE); + s->plane_updated |= mask; /* only used to detect font change */ + write_mask = mask16[mask]; + if (addr * sizeof(uint32_t) >= s->vram_size) { + return; + } + ((uint32_t *)s->vram_ptr)[addr] = + (((uint32_t *)s->vram_ptr)[addr] & ~write_mask) | + (val & write_mask); +#ifdef DEBUG_VGA_MEM + printf("vga: latch: [0x" TARGET_FMT_plx "] mask=0x%08x val=0x%08x\n", + addr * 4, write_mask, val); +#endif + memory_region_set_dirty(&s->vram, addr << 2, sizeof(uint32_t)); + } +} + +typedef void vga_draw_line_func(VGACommonState *s1, uint8_t *d, + uint32_t srcaddr, int width); + +#include "vga-access.h" +#include "vga-helpers.h" + +/* return true if the palette was modified */ +static int update_palette16(VGACommonState *s) +{ + int full_update, i; + uint32_t v, col, *palette; + + full_update = 0; + palette = s->last_palette; + for(i = 0; i < 16; i++) { + v = s->ar[i]; + if (s->ar[VGA_ATC_MODE] & 0x80) { + v = ((s->ar[VGA_ATC_COLOR_PAGE] & 0xf) << 4) | (v & 0xf); + } else { + v = ((s->ar[VGA_ATC_COLOR_PAGE] & 0xc) << 4) | (v & 0x3f); + } + v = v * 3; + col = rgb_to_pixel32(c6_to_8(s->palette[v]), + c6_to_8(s->palette[v + 1]), + c6_to_8(s->palette[v + 2])); + if (col != palette[i]) { + full_update = 1; + palette[i] = col; + } + } + return full_update; +} + +/* return true if the palette was modified */ +static int update_palette256(VGACommonState *s) +{ + int full_update, i; + uint32_t v, col, *palette; + + full_update = 0; + palette = s->last_palette; + v = 0; + for(i = 0; i < 256; i++) { + if (s->dac_8bit) { + col = rgb_to_pixel32(s->palette[v], + s->palette[v + 1], + s->palette[v + 2]); + } else { + col = rgb_to_pixel32(c6_to_8(s->palette[v]), + c6_to_8(s->palette[v + 1]), + c6_to_8(s->palette[v + 2])); + } + if (col != palette[i]) { + full_update = 1; + palette[i] = col; + } + v += 3; + } + return full_update; +} + +static void vga_get_offsets(VGACommonState *s, + uint32_t *pline_offset, + uint32_t *pstart_addr, + uint32_t *pline_compare) +{ + uint32_t start_addr, line_offset, line_compare; + + if (vbe_enabled(s)) { + line_offset = s->vbe_line_offset; + start_addr = s->vbe_start_addr; + line_compare = 65535; + } else { + /* compute line_offset in bytes */ + line_offset = s->cr[VGA_CRTC_OFFSET]; + line_offset <<= 3; + + /* starting address */ + start_addr = s->cr[VGA_CRTC_START_LO] | + (s->cr[VGA_CRTC_START_HI] << 8); + + /* line compare */ + line_compare = s->cr[VGA_CRTC_LINE_COMPARE] | + ((s->cr[VGA_CRTC_OVERFLOW] & 0x10) << 4) | + ((s->cr[VGA_CRTC_MAX_SCAN] & 0x40) << 3); + } + *pline_offset = line_offset; + *pstart_addr = start_addr; + *pline_compare = line_compare; +} + +/* update start_addr and line_offset. Return TRUE if modified */ +static int update_basic_params(VGACommonState *s) +{ + int full_update; + uint32_t start_addr, line_offset, line_compare; + + full_update = 0; + + s->get_offsets(s, &line_offset, &start_addr, &line_compare); + + if (line_offset != s->line_offset || + start_addr != s->start_addr || + line_compare != s->line_compare) { + s->line_offset = line_offset; + s->start_addr = start_addr; + s->line_compare = line_compare; + full_update = 1; + } + return full_update; +} + + +static const uint8_t cursor_glyph[32 * 4] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; + +static void vga_get_text_resolution(VGACommonState *s, int *pwidth, int *pheight, + int *pcwidth, int *pcheight) +{ + int width, cwidth, height, cheight; + + /* total width & height */ + cheight = (s->cr[VGA_CRTC_MAX_SCAN] & 0x1f) + 1; + cwidth = 8; + if (!(sr(s, VGA_SEQ_CLOCK_MODE) & VGA_SR01_CHAR_CLK_8DOTS)) { + cwidth = 9; + } + if (sr(s, VGA_SEQ_CLOCK_MODE) & 0x08) { + cwidth = 16; /* NOTE: no 18 pixel wide */ + } + width = (s->cr[VGA_CRTC_H_DISP] + 1); + if (s->cr[VGA_CRTC_V_TOTAL] == 100) { + /* ugly hack for CGA 160x100x16 - explain me the logic */ + height = 100; + } else { + height = s->cr[VGA_CRTC_V_DISP_END] | + ((s->cr[VGA_CRTC_OVERFLOW] & 0x02) << 7) | + ((s->cr[VGA_CRTC_OVERFLOW] & 0x40) << 3); + height = (height + 1) / cheight; + } + + *pwidth = width; + *pheight = height; + *pcwidth = cwidth; + *pcheight = cheight; +} + +/* + * Text mode update + * Missing: + * - double scan + * - double width + * - underline + * - flashing + */ +static void vga_draw_text(VGACommonState *s, int full_update) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int cx, cy, cheight, cw, ch, cattr, height, width, ch_attr; + int cx_min, cx_max, linesize, x_incr, line, line1; + uint32_t offset, fgcol, bgcol, v, cursor_offset; + uint8_t *d1, *d, *src, *dest, *cursor_ptr; + const uint8_t *font_ptr, *font_base[2]; + int dup9, line_offset; + uint32_t *palette; + uint32_t *ch_attr_ptr; + int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + + /* compute font data address (in plane 2) */ + v = sr(s, VGA_SEQ_CHARACTER_MAP); + offset = (((v >> 4) & 1) | ((v << 1) & 6)) * 8192 * 4 + 2; + if (offset != s->font_offsets[0]) { + s->font_offsets[0] = offset; + full_update = 1; + } + font_base[0] = s->vram_ptr + offset; + + offset = (((v >> 5) & 1) | ((v >> 1) & 6)) * 8192 * 4 + 2; + font_base[1] = s->vram_ptr + offset; + if (offset != s->font_offsets[1]) { + s->font_offsets[1] = offset; + full_update = 1; + } + if (s->plane_updated & (1 << 2) || s->has_chain4_alias) { + /* if the plane 2 was modified since the last display, it + indicates the font may have been modified */ + s->plane_updated = 0; + full_update = 1; + } + full_update |= update_basic_params(s); + + line_offset = s->line_offset; + + vga_get_text_resolution(s, &width, &height, &cw, &cheight); + if ((height * width) <= 1) { + /* better than nothing: exit if transient size is too small */ + return; + } + if ((height * width) > CH_ATTR_SIZE) { + /* better than nothing: exit if transient size is too big */ + return; + } + + if (width != s->last_width || height != s->last_height || + cw != s->last_cw || cheight != s->last_ch || s->last_depth) { + s->last_scr_width = width * cw; + s->last_scr_height = height * cheight; + qemu_console_resize(s->con, s->last_scr_width, s->last_scr_height); + surface = qemu_console_surface(s->con); + dpy_text_resize(s->con, width, height); + s->last_depth = 0; + s->last_width = width; + s->last_height = height; + s->last_ch = cheight; + s->last_cw = cw; + full_update = 1; + } + full_update |= update_palette16(s); + palette = s->last_palette; + x_incr = cw * surface_bytes_per_pixel(surface); + + if (full_update) { + s->full_update_text = 1; + } + if (s->full_update_gfx) { + s->full_update_gfx = 0; + full_update |= 1; + } + + cursor_offset = ((s->cr[VGA_CRTC_CURSOR_HI] << 8) | + s->cr[VGA_CRTC_CURSOR_LO]) - s->start_addr; + if (cursor_offset != s->cursor_offset || + s->cr[VGA_CRTC_CURSOR_START] != s->cursor_start || + s->cr[VGA_CRTC_CURSOR_END] != s->cursor_end) { + /* if the cursor position changed, we update the old and new + chars */ + if (s->cursor_offset < CH_ATTR_SIZE) + s->last_ch_attr[s->cursor_offset] = -1; + if (cursor_offset < CH_ATTR_SIZE) + s->last_ch_attr[cursor_offset] = -1; + s->cursor_offset = cursor_offset; + s->cursor_start = s->cr[VGA_CRTC_CURSOR_START]; + s->cursor_end = s->cr[VGA_CRTC_CURSOR_END]; + } + cursor_ptr = s->vram_ptr + (s->start_addr + cursor_offset) * 4; + if (now >= s->cursor_blink_time) { + s->cursor_blink_time = now + VGA_TEXT_CURSOR_PERIOD_MS / 2; + s->cursor_visible_phase = !s->cursor_visible_phase; + } + + dest = surface_data(surface); + linesize = surface_stride(surface); + ch_attr_ptr = s->last_ch_attr; + line = 0; + offset = s->start_addr * 4; + for(cy = 0; cy < height; cy++) { + d1 = dest; + src = s->vram_ptr + offset; + cx_min = width; + cx_max = -1; + for(cx = 0; cx < width; cx++) { + if (src + sizeof(uint16_t) > s->vram_ptr + s->vram_size) { + break; + } + ch_attr = *(uint16_t *)src; + if (full_update || ch_attr != *ch_attr_ptr || src == cursor_ptr) { + if (cx < cx_min) + cx_min = cx; + if (cx > cx_max) + cx_max = cx; + *ch_attr_ptr = ch_attr; +#ifdef HOST_WORDS_BIGENDIAN + ch = ch_attr >> 8; + cattr = ch_attr & 0xff; +#else + ch = ch_attr & 0xff; + cattr = ch_attr >> 8; +#endif + font_ptr = font_base[(cattr >> 3) & 1]; + font_ptr += 32 * 4 * ch; + bgcol = palette[cattr >> 4]; + fgcol = palette[cattr & 0x0f]; + if (cw == 16) { + vga_draw_glyph16(d1, linesize, + font_ptr, cheight, fgcol, bgcol); + } else if (cw != 9) { + vga_draw_glyph8(d1, linesize, + font_ptr, cheight, fgcol, bgcol); + } else { + dup9 = 0; + if (ch >= 0xb0 && ch <= 0xdf && + (s->ar[VGA_ATC_MODE] & 0x04)) { + dup9 = 1; + } + vga_draw_glyph9(d1, linesize, + font_ptr, cheight, fgcol, bgcol, dup9); + } + if (src == cursor_ptr && + !(s->cr[VGA_CRTC_CURSOR_START] & 0x20) && + s->cursor_visible_phase) { + int line_start, line_last, h; + /* draw the cursor */ + line_start = s->cr[VGA_CRTC_CURSOR_START] & 0x1f; + line_last = s->cr[VGA_CRTC_CURSOR_END] & 0x1f; + /* XXX: check that */ + if (line_last > cheight - 1) + line_last = cheight - 1; + if (line_last >= line_start && line_start < cheight) { + h = line_last - line_start + 1; + d = d1 + linesize * line_start; + if (cw == 16) { + vga_draw_glyph16(d, linesize, + cursor_glyph, h, fgcol, bgcol); + } else if (cw != 9) { + vga_draw_glyph8(d, linesize, + cursor_glyph, h, fgcol, bgcol); + } else { + vga_draw_glyph9(d, linesize, + cursor_glyph, h, fgcol, bgcol, 1); + } + } + } + } + d1 += x_incr; + src += 4; + ch_attr_ptr++; + } + if (cx_max != -1) { + dpy_gfx_update(s->con, cx_min * cw, cy * cheight, + (cx_max - cx_min + 1) * cw, cheight); + } + dest += linesize * cheight; + line1 = line + cheight; + offset += line_offset; + if (line < s->line_compare && line1 >= s->line_compare) { + offset = 0; + } + line = line1; + } +} + +enum { + VGA_DRAW_LINE2, + VGA_DRAW_LINE2D2, + VGA_DRAW_LINE4, + VGA_DRAW_LINE4D2, + VGA_DRAW_LINE8D2, + VGA_DRAW_LINE8, + VGA_DRAW_LINE15_LE, + VGA_DRAW_LINE16_LE, + VGA_DRAW_LINE24_LE, + VGA_DRAW_LINE32_LE, + VGA_DRAW_LINE15_BE, + VGA_DRAW_LINE16_BE, + VGA_DRAW_LINE24_BE, + VGA_DRAW_LINE32_BE, + VGA_DRAW_LINE_NB, +}; + +static vga_draw_line_func * const vga_draw_line_table[VGA_DRAW_LINE_NB] = { + vga_draw_line2, + vga_draw_line2d2, + vga_draw_line4, + vga_draw_line4d2, + vga_draw_line8d2, + vga_draw_line8, + vga_draw_line15_le, + vga_draw_line16_le, + vga_draw_line24_le, + vga_draw_line32_le, + vga_draw_line15_be, + vga_draw_line16_be, + vga_draw_line24_be, + vga_draw_line32_be, +}; + +static int vga_get_bpp(VGACommonState *s) +{ + int ret; + + if (vbe_enabled(s)) { + ret = s->vbe_regs[VBE_DISPI_INDEX_BPP]; + } else { + ret = 0; + } + return ret; +} + +static void vga_get_resolution(VGACommonState *s, int *pwidth, int *pheight) +{ + int width, height; + + if (vbe_enabled(s)) { + width = s->vbe_regs[VBE_DISPI_INDEX_XRES]; + height = s->vbe_regs[VBE_DISPI_INDEX_YRES]; + } else { + width = (s->cr[VGA_CRTC_H_DISP] + 1) * 8; + height = s->cr[VGA_CRTC_V_DISP_END] | + ((s->cr[VGA_CRTC_OVERFLOW] & 0x02) << 7) | + ((s->cr[VGA_CRTC_OVERFLOW] & 0x40) << 3); + height = (height + 1); + } + *pwidth = width; + *pheight = height; +} + +void vga_invalidate_scanlines(VGACommonState *s, int y1, int y2) +{ + int y; + if (y1 >= VGA_MAX_HEIGHT) + return; + if (y2 >= VGA_MAX_HEIGHT) + y2 = VGA_MAX_HEIGHT; + for(y = y1; y < y2; y++) { + s->invalidated_y_table[y >> 5] |= 1 << (y & 0x1f); + } +} + +static bool vga_scanline_invalidated(VGACommonState *s, int y) +{ + if (y >= VGA_MAX_HEIGHT) { + return false; + } + return s->invalidated_y_table[y >> 5] & (1 << (y & 0x1f)); +} + +void vga_dirty_log_start(VGACommonState *s) +{ + memory_region_set_log(&s->vram, true, DIRTY_MEMORY_VGA); +} + +void vga_dirty_log_stop(VGACommonState *s) +{ + memory_region_set_log(&s->vram, false, DIRTY_MEMORY_VGA); +} + +/* + * graphic modes + */ +static void vga_draw_graphic(VGACommonState *s, int full_update) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int y1, y, update, linesize, y_start, double_scan, mask, depth; + int width, height, shift_control, bwidth, bits; + ram_addr_t page0, page1, region_start, region_end; + DirtyBitmapSnapshot *snap = NULL; + int disp_width, multi_scan, multi_run; + uint8_t *d; + uint32_t v, addr1, addr; + vga_draw_line_func *vga_draw_line = NULL; + bool share_surface, force_shadow = false; + pixman_format_code_t format; +#ifdef HOST_WORDS_BIGENDIAN + bool byteswap = !s->big_endian_fb; +#else + bool byteswap = s->big_endian_fb; +#endif + + full_update |= update_basic_params(s); + + s->get_resolution(s, &width, &height); + disp_width = width; + depth = s->get_bpp(s); + + region_start = (s->start_addr * 4); + region_end = region_start + (ram_addr_t)s->line_offset * height; + region_end += width * depth / 8; /* scanline length */ + region_end -= s->line_offset; + if (region_end > s->vbe_size || depth == 0 || depth == 15) { + /* + * We land here on: + * - wraps around (can happen with cirrus vbe modes) + * - depth == 0 (256 color palette video mode) + * - depth == 15 + * + * Take the safe and slow route: + * - create a dirty bitmap snapshot for all vga memory. + * - force shadowing (so all vga memory access goes + * through vga_read_*() helpers). + * + * Given this affects only vga features which are pretty much + * unused by modern guests there should be no performance + * impact. + */ + region_start = 0; + region_end = s->vbe_size; + force_shadow = true; + } + + shift_control = (s->gr[VGA_GFX_MODE] >> 5) & 3; + double_scan = (s->cr[VGA_CRTC_MAX_SCAN] >> 7); + if (shift_control != 1) { + multi_scan = (((s->cr[VGA_CRTC_MAX_SCAN] & 0x1f) + 1) << double_scan) + - 1; + } else { + /* in CGA modes, multi_scan is ignored */ + /* XXX: is it correct ? */ + multi_scan = double_scan; + } + multi_run = multi_scan; + if (shift_control != s->shift_control || + double_scan != s->double_scan) { + full_update = 1; + s->shift_control = shift_control; + s->double_scan = double_scan; + } + + if (shift_control == 0) { + if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { + disp_width <<= 1; + } + } else if (shift_control == 1) { + if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { + disp_width <<= 1; + } + } + + /* + * Check whether we can share the surface with the backend + * or whether we need a shadow surface. We share native + * endian surfaces for 15bpp and above and byteswapped + * surfaces for 24bpp and above. + */ + format = qemu_default_pixman_format(depth, !byteswap); + if (format) { + share_surface = dpy_gfx_check_format(s->con, format) + && !s->force_shadow && !force_shadow; + } else { + share_surface = false; + } + + if (s->line_offset != s->last_line_offset || + disp_width != s->last_width || + height != s->last_height || + s->last_depth != depth || + s->last_byteswap != byteswap || + share_surface != is_buffer_shared(surface)) { + /* display parameters changed -> need new display surface */ + s->last_scr_width = disp_width; + s->last_scr_height = height; + s->last_width = disp_width; + s->last_height = height; + s->last_line_offset = s->line_offset; + s->last_depth = depth; + s->last_byteswap = byteswap; + full_update = 1; + } + if (surface_data(surface) != s->vram_ptr + (s->start_addr * 4) + && is_buffer_shared(surface)) { + /* base address changed (page flip) -> shared display surfaces + * must be updated with the new base address */ + full_update = 1; + } + + if (full_update) { + if (share_surface) { + surface = qemu_create_displaysurface_from(disp_width, + height, format, s->line_offset, + s->vram_ptr + (s->start_addr * 4)); + dpy_gfx_replace_surface(s->con, surface); + } else { + qemu_console_resize(s->con, disp_width, height); + surface = qemu_console_surface(s->con); + } + } + + if (shift_control == 0) { + full_update |= update_palette16(s); + if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { + v = VGA_DRAW_LINE4D2; + } else { + v = VGA_DRAW_LINE4; + } + bits = 4; + } else if (shift_control == 1) { + full_update |= update_palette16(s); + if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { + v = VGA_DRAW_LINE2D2; + } else { + v = VGA_DRAW_LINE2; + } + bits = 4; + } else { + switch(s->get_bpp(s)) { + default: + case 0: + full_update |= update_palette256(s); + v = VGA_DRAW_LINE8D2; + bits = 4; + break; + case 8: + full_update |= update_palette256(s); + v = VGA_DRAW_LINE8; + bits = 8; + break; + case 15: + v = s->big_endian_fb ? VGA_DRAW_LINE15_BE : VGA_DRAW_LINE15_LE; + bits = 16; + break; + case 16: + v = s->big_endian_fb ? VGA_DRAW_LINE16_BE : VGA_DRAW_LINE16_LE; + bits = 16; + break; + case 24: + v = s->big_endian_fb ? VGA_DRAW_LINE24_BE : VGA_DRAW_LINE24_LE; + bits = 24; + break; + case 32: + v = s->big_endian_fb ? VGA_DRAW_LINE32_BE : VGA_DRAW_LINE32_LE; + bits = 32; + break; + } + } + vga_draw_line = vga_draw_line_table[v]; + + if (!is_buffer_shared(surface) && s->cursor_invalidate) { + s->cursor_invalidate(s); + } + +#if 0 + printf("w=%d h=%d v=%d line_offset=%d cr[0x09]=0x%02x cr[0x17]=0x%02x linecmp=%d sr[0x01]=0x%02x\n", + width, height, v, line_offset, s->cr[9], s->cr[VGA_CRTC_MODE], + s->line_compare, sr(s, VGA_SEQ_CLOCK_MODE)); +#endif + addr1 = (s->start_addr * 4); + bwidth = DIV_ROUND_UP(width * bits, 8); + y_start = -1; + d = surface_data(surface); + linesize = surface_stride(surface); + y1 = 0; + + if (!full_update) { + if (s->line_compare < height) { + /* split screen mode */ + region_start = 0; + } + snap = memory_region_snapshot_and_clear_dirty(&s->vram, region_start, + region_end - region_start, + DIRTY_MEMORY_VGA); + } + + for(y = 0; y < height; y++) { + addr = addr1; + if (!(s->cr[VGA_CRTC_MODE] & 1)) { + int shift; + /* CGA compatibility handling */ + shift = 14 + ((s->cr[VGA_CRTC_MODE] >> 6) & 1); + addr = (addr & ~(1 << shift)) | ((y1 & 1) << shift); + } + if (!(s->cr[VGA_CRTC_MODE] & 2)) { + addr = (addr & ~0x8000) | ((y1 & 2) << 14); + } + page0 = addr & s->vbe_size_mask; + page1 = (addr + bwidth - 1) & s->vbe_size_mask; + if (full_update) { + update = 1; + } else if (page1 < page0) { + /* scanline wraps from end of video memory to the start */ + assert(force_shadow); + update = memory_region_snapshot_get_dirty(&s->vram, snap, + page0, s->vbe_size - page0); + update |= memory_region_snapshot_get_dirty(&s->vram, snap, + 0, page1); + } else { + update = memory_region_snapshot_get_dirty(&s->vram, snap, + page0, page1 - page0); + } + /* explicit invalidation for the hardware cursor (cirrus only) */ + update |= vga_scanline_invalidated(s, y); + if (update) { + if (y_start < 0) + y_start = y; + if (!(is_buffer_shared(surface))) { + vga_draw_line(s, d, addr, width); + if (s->cursor_draw_line) + s->cursor_draw_line(s, d, y); + } + } else { + if (y_start >= 0) { + /* flush to display */ + dpy_gfx_update(s->con, 0, y_start, + disp_width, y - y_start); + y_start = -1; + } + } + if (!multi_run) { + mask = (s->cr[VGA_CRTC_MODE] & 3) ^ 3; + if ((y1 & mask) == mask) + addr1 += s->line_offset; + y1++; + multi_run = multi_scan; + } else { + multi_run--; + } + /* line compare acts on the displayed lines */ + if (y == s->line_compare) + addr1 = 0; + d += linesize; + } + if (y_start >= 0) { + /* flush to display */ + dpy_gfx_update(s->con, 0, y_start, + disp_width, y - y_start); + } + g_free(snap); + memset(s->invalidated_y_table, 0, sizeof(s->invalidated_y_table)); +} + +static void vga_draw_blank(VGACommonState *s, int full_update) +{ + DisplaySurface *surface = qemu_console_surface(s->con); + int i, w; + uint8_t *d; + + if (!full_update) + return; + if (s->last_scr_width <= 0 || s->last_scr_height <= 0) + return; + + w = s->last_scr_width * surface_bytes_per_pixel(surface); + d = surface_data(surface); + for(i = 0; i < s->last_scr_height; i++) { + memset(d, 0, w); + d += surface_stride(surface); + } + dpy_gfx_update_full(s->con); +} + +#define GMODE_TEXT 0 +#define GMODE_GRAPH 1 +#define GMODE_BLANK 2 + +static void vga_update_display(void *opaque) +{ + VGACommonState *s = opaque; + DisplaySurface *surface = qemu_console_surface(s->con); + int full_update, graphic_mode; + + qemu_flush_coalesced_mmio_buffer(); + + if (surface_bits_per_pixel(surface) == 0) { + /* nothing to do */ + } else { + full_update = 0; + if (!(s->ar_index & 0x20)) { + graphic_mode = GMODE_BLANK; + } else { + graphic_mode = s->gr[VGA_GFX_MISC] & VGA_GR06_GRAPHICS_MODE; + } + if (graphic_mode != s->graphic_mode) { + s->graphic_mode = graphic_mode; + s->cursor_blink_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + full_update = 1; + } + switch(graphic_mode) { + case GMODE_TEXT: + vga_draw_text(s, full_update); + break; + case GMODE_GRAPH: + vga_draw_graphic(s, full_update); + break; + case GMODE_BLANK: + default: + vga_draw_blank(s, full_update); + break; + } + } +} + +/* force a full display refresh */ +static void vga_invalidate_display(void *opaque) +{ + VGACommonState *s = opaque; + + s->last_width = -1; + s->last_height = -1; +} + +void vga_common_reset(VGACommonState *s) +{ + s->sr_index = 0; + memset(s->sr, '\0', sizeof(s->sr)); + memset(s->sr_vbe, '\0', sizeof(s->sr_vbe)); + s->gr_index = 0; + memset(s->gr, '\0', sizeof(s->gr)); + s->ar_index = 0; + memset(s->ar, '\0', sizeof(s->ar)); + s->ar_flip_flop = 0; + s->cr_index = 0; + memset(s->cr, '\0', sizeof(s->cr)); + s->msr = 0; + s->fcr = 0; + s->st00 = 0; + s->st01 = 0; + s->dac_state = 0; + s->dac_sub_index = 0; + s->dac_read_index = 0; + s->dac_write_index = 0; + memset(s->dac_cache, '\0', sizeof(s->dac_cache)); + s->dac_8bit = 0; + memset(s->palette, '\0', sizeof(s->palette)); + s->bank_offset = 0; + s->vbe_index = 0; + memset(s->vbe_regs, '\0', sizeof(s->vbe_regs)); + s->vbe_regs[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID5; + s->vbe_start_addr = 0; + s->vbe_line_offset = 0; + s->vbe_bank_mask = (s->vram_size >> 16) - 1; + memset(s->font_offsets, '\0', sizeof(s->font_offsets)); + s->graphic_mode = -1; /* force full update */ + s->shift_control = 0; + s->double_scan = 0; + s->line_offset = 0; + s->line_compare = 0; + s->start_addr = 0; + s->plane_updated = 0; + s->last_cw = 0; + s->last_ch = 0; + s->last_width = 0; + s->last_height = 0; + s->last_scr_width = 0; + s->last_scr_height = 0; + s->cursor_start = 0; + s->cursor_end = 0; + s->cursor_offset = 0; + s->big_endian_fb = s->default_endian_fb; + memset(s->invalidated_y_table, '\0', sizeof(s->invalidated_y_table)); + memset(s->last_palette, '\0', sizeof(s->last_palette)); + memset(s->last_ch_attr, '\0', sizeof(s->last_ch_attr)); + switch (vga_retrace_method) { + case VGA_RETRACE_DUMB: + break; + case VGA_RETRACE_PRECISE: + memset(&s->retrace_info, 0, sizeof (s->retrace_info)); + break; + } + vga_update_memory_access(s); +} + +static void vga_reset(void *opaque) +{ + VGACommonState *s = opaque; + vga_common_reset(s); +} + +#define TEXTMODE_X(x) ((x) % width) +#define TEXTMODE_Y(x) ((x) / width) +#define VMEM2CHTYPE(v) ((v & 0xff0007ff) | \ + ((v & 0x00000800) << 10) | ((v & 0x00007000) >> 1)) +/* relay text rendering to the display driver + * instead of doing a full vga_update_display() */ +static void vga_update_text(void *opaque, console_ch_t *chardata) +{ + VGACommonState *s = opaque; + int graphic_mode, i, cursor_offset, cursor_visible; + int cw, cheight, width, height, size, c_min, c_max; + uint32_t *src; + console_ch_t *dst, val; + char msg_buffer[80]; + int full_update = 0; + + qemu_flush_coalesced_mmio_buffer(); + + if (!(s->ar_index & 0x20)) { + graphic_mode = GMODE_BLANK; + } else { + graphic_mode = s->gr[VGA_GFX_MISC] & VGA_GR06_GRAPHICS_MODE; + } + if (graphic_mode != s->graphic_mode) { + s->graphic_mode = graphic_mode; + full_update = 1; + } + if (s->last_width == -1) { + s->last_width = 0; + full_update = 1; + } + + switch (graphic_mode) { + case GMODE_TEXT: + /* TODO: update palette */ + full_update |= update_basic_params(s); + + /* total width & height */ + cheight = (s->cr[VGA_CRTC_MAX_SCAN] & 0x1f) + 1; + cw = 8; + if (!(sr(s, VGA_SEQ_CLOCK_MODE) & VGA_SR01_CHAR_CLK_8DOTS)) { + cw = 9; + } + if (sr(s, VGA_SEQ_CLOCK_MODE) & 0x08) { + cw = 16; /* NOTE: no 18 pixel wide */ + } + width = (s->cr[VGA_CRTC_H_DISP] + 1); + if (s->cr[VGA_CRTC_V_TOTAL] == 100) { + /* ugly hack for CGA 160x100x16 - explain me the logic */ + height = 100; + } else { + height = s->cr[VGA_CRTC_V_DISP_END] | + ((s->cr[VGA_CRTC_OVERFLOW] & 0x02) << 7) | + ((s->cr[VGA_CRTC_OVERFLOW] & 0x40) << 3); + height = (height + 1) / cheight; + } + + size = (height * width); + if (size > CH_ATTR_SIZE) { + if (!full_update) + return; + + snprintf(msg_buffer, sizeof(msg_buffer), "%i x %i Text mode", + width, height); + break; + } + + if (width != s->last_width || height != s->last_height || + cw != s->last_cw || cheight != s->last_ch) { + s->last_scr_width = width * cw; + s->last_scr_height = height * cheight; + qemu_console_resize(s->con, s->last_scr_width, s->last_scr_height); + dpy_text_resize(s->con, width, height); + s->last_depth = 0; + s->last_width = width; + s->last_height = height; + s->last_ch = cheight; + s->last_cw = cw; + full_update = 1; + } + + if (full_update) { + s->full_update_gfx = 1; + } + if (s->full_update_text) { + s->full_update_text = 0; + full_update |= 1; + } + + /* Update "hardware" cursor */ + cursor_offset = ((s->cr[VGA_CRTC_CURSOR_HI] << 8) | + s->cr[VGA_CRTC_CURSOR_LO]) - s->start_addr; + if (cursor_offset != s->cursor_offset || + s->cr[VGA_CRTC_CURSOR_START] != s->cursor_start || + s->cr[VGA_CRTC_CURSOR_END] != s->cursor_end || full_update) { + cursor_visible = !(s->cr[VGA_CRTC_CURSOR_START] & 0x20); + if (cursor_visible && cursor_offset < size && cursor_offset >= 0) + dpy_text_cursor(s->con, + TEXTMODE_X(cursor_offset), + TEXTMODE_Y(cursor_offset)); + else + dpy_text_cursor(s->con, -1, -1); + s->cursor_offset = cursor_offset; + s->cursor_start = s->cr[VGA_CRTC_CURSOR_START]; + s->cursor_end = s->cr[VGA_CRTC_CURSOR_END]; + } + + src = (uint32_t *) s->vram_ptr + s->start_addr; + dst = chardata; + + if (full_update) { + for (i = 0; i < size; src ++, dst ++, i ++) + console_write_ch(dst, VMEM2CHTYPE(le32_to_cpu(*src))); + + dpy_text_update(s->con, 0, 0, width, height); + } else { + c_max = 0; + + for (i = 0; i < size; src ++, dst ++, i ++) { + console_write_ch(&val, VMEM2CHTYPE(le32_to_cpu(*src))); + if (*dst != val) { + *dst = val; + c_max = i; + break; + } + } + c_min = i; + for (; i < size; src ++, dst ++, i ++) { + console_write_ch(&val, VMEM2CHTYPE(le32_to_cpu(*src))); + if (*dst != val) { + *dst = val; + c_max = i; + } + } + + if (c_min <= c_max) { + i = TEXTMODE_Y(c_min); + dpy_text_update(s->con, 0, i, width, TEXTMODE_Y(c_max) - i + 1); + } + } + + return; + case GMODE_GRAPH: + if (!full_update) + return; + + s->get_resolution(s, &width, &height); + snprintf(msg_buffer, sizeof(msg_buffer), "%i x %i Graphic mode", + width, height); + break; + case GMODE_BLANK: + default: + if (!full_update) + return; + + snprintf(msg_buffer, sizeof(msg_buffer), "VGA Blank mode"); + break; + } + + /* Display a message */ + s->last_width = 60; + s->last_height = height = 3; + dpy_text_cursor(s->con, -1, -1); + dpy_text_resize(s->con, s->last_width, height); + + for (dst = chardata, i = 0; i < s->last_width * height; i ++) + console_write_ch(dst ++, ' '); + + size = strlen(msg_buffer); + width = (s->last_width - size) / 2; + dst = chardata + s->last_width + width; + for (i = 0; i < size; i ++) + console_write_ch(dst ++, ATTR2CHTYPE(msg_buffer[i], QEMU_COLOR_BLUE, + QEMU_COLOR_BLACK, 1)); + + dpy_text_update(s->con, 0, 0, s->last_width, height); +} + +static uint64_t vga_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + VGACommonState *s = opaque; + + return vga_mem_readb(s, addr); +} + +static void vga_mem_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VGACommonState *s = opaque; + + vga_mem_writeb(s, addr, data); +} + +const MemoryRegionOps vga_mem_ops = { + .read = vga_mem_read, + .write = vga_mem_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static int vga_common_post_load(void *opaque, int version_id) +{ + VGACommonState *s = opaque; + + /* force refresh */ + s->graphic_mode = -1; + vbe_update_vgaregs(s); + vga_update_memory_access(s); + return 0; +} + +static bool vga_endian_state_needed(void *opaque) +{ + VGACommonState *s = opaque; + + /* + * Only send the endian state if it's different from the + * default one, thus ensuring backward compatibility for + * migration of the common case + */ + return s->default_endian_fb != s->big_endian_fb; +} + +static const VMStateDescription vmstate_vga_endian = { + .name = "vga.endian", + .version_id = 1, + .minimum_version_id = 1, + .needed = vga_endian_state_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(big_endian_fb, VGACommonState), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_vga_common = { + .name = "vga", + .version_id = 2, + .minimum_version_id = 2, + .post_load = vga_common_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(latch, VGACommonState), + VMSTATE_UINT8(sr_index, VGACommonState), + VMSTATE_PARTIAL_BUFFER(sr, VGACommonState, 8), + VMSTATE_UINT8(gr_index, VGACommonState), + VMSTATE_PARTIAL_BUFFER(gr, VGACommonState, 16), + VMSTATE_UINT8(ar_index, VGACommonState), + VMSTATE_BUFFER(ar, VGACommonState), + VMSTATE_INT32(ar_flip_flop, VGACommonState), + VMSTATE_UINT8(cr_index, VGACommonState), + VMSTATE_BUFFER(cr, VGACommonState), + VMSTATE_UINT8(msr, VGACommonState), + VMSTATE_UINT8(fcr, VGACommonState), + VMSTATE_UINT8(st00, VGACommonState), + VMSTATE_UINT8(st01, VGACommonState), + + VMSTATE_UINT8(dac_state, VGACommonState), + VMSTATE_UINT8(dac_sub_index, VGACommonState), + VMSTATE_UINT8(dac_read_index, VGACommonState), + VMSTATE_UINT8(dac_write_index, VGACommonState), + VMSTATE_BUFFER(dac_cache, VGACommonState), + VMSTATE_BUFFER(palette, VGACommonState), + + VMSTATE_INT32(bank_offset, VGACommonState), + VMSTATE_UINT8_EQUAL(is_vbe_vmstate, VGACommonState, NULL), + VMSTATE_UINT16(vbe_index, VGACommonState), + VMSTATE_UINT16_ARRAY(vbe_regs, VGACommonState, VBE_DISPI_INDEX_NB), + VMSTATE_UINT32(vbe_start_addr, VGACommonState), + VMSTATE_UINT32(vbe_line_offset, VGACommonState), + VMSTATE_UINT32(vbe_bank_mask, VGACommonState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_vga_endian, + NULL + } +}; + +static const GraphicHwOps vga_ops = { + .invalidate = vga_invalidate_display, + .gfx_update = vga_update_display, + .text_update = vga_update_text, +}; + +static inline uint32_t uint_clamp(uint32_t val, uint32_t vmin, uint32_t vmax) +{ + if (val < vmin) { + return vmin; + } + if (val > vmax) { + return vmax; + } + return val; +} + +void vga_common_init(VGACommonState *s, Object *obj) +{ + int i, j, v, b; + + for(i = 0;i < 256; i++) { + v = 0; + for(j = 0; j < 8; j++) { + v |= ((i >> j) & 1) << (j * 4); + } + expand4[i] = v; + + v = 0; + for(j = 0; j < 4; j++) { + v |= ((i >> (2 * j)) & 3) << (j * 4); + } + expand2[i] = v; + } + for(i = 0; i < 16; i++) { + v = 0; + for(j = 0; j < 4; j++) { + b = ((i >> j) & 1); + v |= b << (2 * j); + v |= b << (2 * j + 1); + } + expand4to8[i] = v; + } + + s->vram_size_mb = uint_clamp(s->vram_size_mb, 1, 512); + s->vram_size_mb = pow2ceil(s->vram_size_mb); + s->vram_size = s->vram_size_mb * MiB; + + if (!s->vbe_size) { + s->vbe_size = s->vram_size; + } + s->vbe_size_mask = s->vbe_size - 1; + + s->is_vbe_vmstate = 1; + memory_region_init_ram_nomigrate(&s->vram, obj, "vga.vram", s->vram_size, + &error_fatal); + vmstate_register_ram(&s->vram, s->global_vmstate ? NULL : DEVICE(obj)); + xen_register_framebuffer(&s->vram); + s->vram_ptr = memory_region_get_ram_ptr(&s->vram); + s->get_bpp = vga_get_bpp; + s->get_offsets = vga_get_offsets; + s->get_resolution = vga_get_resolution; + s->hw_ops = &vga_ops; + switch (vga_retrace_method) { + case VGA_RETRACE_DUMB: + s->retrace = vga_dumb_retrace; + s->update_retrace_info = vga_dumb_update_retrace_info; + break; + + case VGA_RETRACE_PRECISE: + s->retrace = vga_precise_retrace; + s->update_retrace_info = vga_precise_update_retrace_info; + break; + } + + /* + * Set default fb endian based on target, could probably be turned + * into a device attribute set by the machine/platform to remove + * all target endian dependencies from this file. + */ +#ifdef TARGET_WORDS_BIGENDIAN + s->default_endian_fb = true; +#else + s->default_endian_fb = false; +#endif + vga_dirty_log_start(s); +} + +static const MemoryRegionPortio vga_portio_list[] = { + { 0x04, 2, 1, .read = vga_ioport_read, .write = vga_ioport_write }, /* 3b4 */ + { 0x0a, 1, 1, .read = vga_ioport_read, .write = vga_ioport_write }, /* 3ba */ + { 0x10, 16, 1, .read = vga_ioport_read, .write = vga_ioport_write }, /* 3c0 */ + { 0x24, 2, 1, .read = vga_ioport_read, .write = vga_ioport_write }, /* 3d4 */ + { 0x2a, 1, 1, .read = vga_ioport_read, .write = vga_ioport_write }, /* 3da */ + PORTIO_END_OF_LIST(), +}; + +static const MemoryRegionPortio vbe_portio_list[] = { + { 0, 1, 2, .read = vbe_ioport_read_index, .write = vbe_ioport_write_index }, +# ifdef TARGET_I386 + { 1, 1, 2, .read = vbe_ioport_read_data, .write = vbe_ioport_write_data }, +# endif + { 2, 1, 2, .read = vbe_ioport_read_data, .write = vbe_ioport_write_data }, + PORTIO_END_OF_LIST(), +}; + +/* Used by both ISA and PCI */ +MemoryRegion *vga_init_io(VGACommonState *s, Object *obj, + const MemoryRegionPortio **vga_ports, + const MemoryRegionPortio **vbe_ports) +{ + MemoryRegion *vga_mem; + + *vga_ports = vga_portio_list; + *vbe_ports = vbe_portio_list; + + vga_mem = g_malloc(sizeof(*vga_mem)); + memory_region_init_io(vga_mem, obj, &vga_mem_ops, s, + "vga-lowmem", 0x20000); + memory_region_set_flush_coalesced(vga_mem); + + return vga_mem; +} + +void vga_init(VGACommonState *s, Object *obj, MemoryRegion *address_space, + MemoryRegion *address_space_io, bool init_vga_ports) +{ + MemoryRegion *vga_io_memory; + const MemoryRegionPortio *vga_ports, *vbe_ports; + + qemu_register_reset(vga_reset, s); + + s->bank_offset = 0; + + s->legacy_address_space = address_space; + + vga_io_memory = vga_init_io(s, obj, &vga_ports, &vbe_ports); + memory_region_add_subregion_overlap(address_space, + 0x000a0000, + vga_io_memory, + 1); + memory_region_set_coalescing(vga_io_memory); + if (init_vga_ports) { + portio_list_init(&s->vga_port_list, obj, vga_ports, s, "vga"); + portio_list_set_flush_coalesced(&s->vga_port_list); + portio_list_add(&s->vga_port_list, address_space_io, 0x3b0); + } + if (vbe_ports) { + portio_list_init(&s->vbe_port_list, obj, vbe_ports, s, "vbe"); + portio_list_add(&s->vbe_port_list, address_space_io, 0x1ce); + } +} diff --git a/hw/display/vga_int.h b/hw/display/vga_int.h new file mode 100644 index 000000000..847e784ca --- /dev/null +++ b/hw/display/vga_int.h @@ -0,0 +1,198 @@ +/* + * QEMU internal VGA defines. + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef HW_VGA_INT_H +#define HW_VGA_INT_H + +#include "exec/ioport.h" +#include "exec/memory.h" +#include "ui/console.h" + +#include "hw/display/bochs-vbe.h" + +#define ST01_V_RETRACE 0x08 +#define ST01_DISP_ENABLE 0x01 + +#define CH_ATTR_SIZE (160 * 100) +#define VGA_MAX_HEIGHT 2048 + +struct vga_precise_retrace { + int64_t ticks_per_char; + int64_t total_chars; + int htotal; + int hstart; + int hend; + int vstart; + int vend; + int freq; +}; + +union vga_retrace { + struct vga_precise_retrace precise; +}; + +struct VGACommonState; +typedef uint8_t (* vga_retrace_fn)(struct VGACommonState *s); +typedef void (* vga_update_retrace_info_fn)(struct VGACommonState *s); + +typedef struct VGACommonState { + MemoryRegion *legacy_address_space; + uint8_t *vram_ptr; + MemoryRegion vram; + uint32_t vram_size; + uint32_t vram_size_mb; /* property */ + uint32_t vbe_size; + uint32_t vbe_size_mask; + uint32_t latch; + bool has_chain4_alias; + MemoryRegion chain4_alias; + uint8_t sr_index; + uint8_t sr[256]; + uint8_t sr_vbe[256]; + uint8_t gr_index; + uint8_t gr[256]; + uint8_t ar_index; + uint8_t ar[21]; + int ar_flip_flop; + uint8_t cr_index; + uint8_t cr[256]; /* CRT registers */ + uint8_t msr; /* Misc Output Register */ + uint8_t fcr; /* Feature Control Register */ + uint8_t st00; /* status 0 */ + uint8_t st01; /* status 1 */ + uint8_t dac_state; + uint8_t dac_sub_index; + uint8_t dac_read_index; + uint8_t dac_write_index; + uint8_t dac_cache[3]; /* used when writing */ + int dac_8bit; + uint8_t palette[768]; + int32_t bank_offset; + int (*get_bpp)(struct VGACommonState *s); + void (*get_offsets)(struct VGACommonState *s, + uint32_t *pline_offset, + uint32_t *pstart_addr, + uint32_t *pline_compare); + void (*get_resolution)(struct VGACommonState *s, + int *pwidth, + int *pheight); + PortioList vga_port_list; + PortioList vbe_port_list; + /* bochs vbe state */ + uint16_t vbe_index; + uint16_t vbe_regs[VBE_DISPI_INDEX_NB]; + uint32_t vbe_start_addr; + uint32_t vbe_line_offset; + uint32_t vbe_bank_mask; + /* display refresh support */ + QemuConsole *con; + uint32_t font_offsets[2]; + int graphic_mode; + uint8_t shift_control; + uint8_t double_scan; + uint32_t line_offset; + uint32_t line_compare; + uint32_t start_addr; + uint32_t plane_updated; + uint32_t last_line_offset; + uint8_t last_cw, last_ch; + uint32_t last_width, last_height; /* in chars or pixels */ + uint32_t last_scr_width, last_scr_height; /* in pixels */ + uint32_t last_depth; /* in bits */ + bool last_byteswap; + bool force_shadow; + uint8_t cursor_start, cursor_end; + bool cursor_visible_phase; + int64_t cursor_blink_time; + uint32_t cursor_offset; + const GraphicHwOps *hw_ops; + bool full_update_text; + bool full_update_gfx; + bool big_endian_fb; + bool default_endian_fb; + bool global_vmstate; + /* hardware mouse cursor support */ + uint32_t invalidated_y_table[VGA_MAX_HEIGHT / 32]; + uint32_t hw_cursor_x; + uint32_t hw_cursor_y; + void (*cursor_invalidate)(struct VGACommonState *s); + void (*cursor_draw_line)(struct VGACommonState *s, uint8_t *d, int y); + /* tell for each page if it has been updated since the last time */ + uint32_t last_palette[256]; + uint32_t last_ch_attr[CH_ATTR_SIZE]; /* XXX: make it dynamic */ + /* retrace */ + vga_retrace_fn retrace; + vga_update_retrace_info_fn update_retrace_info; + union vga_retrace retrace_info; + uint8_t is_vbe_vmstate; +} VGACommonState; + +static inline int c6_to_8(int v) +{ + int b; + v &= 0x3f; + b = v & 1; + return (v << 2) | (b << 1) | b; +} + +void vga_common_init(VGACommonState *s, Object *obj); +void vga_init(VGACommonState *s, Object *obj, MemoryRegion *address_space, + MemoryRegion *address_space_io, bool init_vga_ports); +MemoryRegion *vga_init_io(VGACommonState *s, Object *obj, + const MemoryRegionPortio **vga_ports, + const MemoryRegionPortio **vbe_ports); +void vga_common_reset(VGACommonState *s); + +void vga_dirty_log_start(VGACommonState *s); +void vga_dirty_log_stop(VGACommonState *s); + +extern const VMStateDescription vmstate_vga_common; +uint32_t vga_ioport_read(void *opaque, uint32_t addr); +void vga_ioport_write(void *opaque, uint32_t addr, uint32_t val); +uint32_t vga_mem_readb(VGACommonState *s, hwaddr addr); +void vga_mem_writeb(VGACommonState *s, hwaddr addr, uint32_t val); +void vga_invalidate_scanlines(VGACommonState *s, int y1, int y2); + +int vga_ioport_invalid(VGACommonState *s, uint32_t addr); + +uint32_t vbe_ioport_read_data(void *opaque, uint32_t addr); +void vbe_ioport_write_index(void *opaque, uint32_t addr, uint32_t val); +void vbe_ioport_write_data(void *opaque, uint32_t addr, uint32_t val); + +extern const uint8_t sr_mask[8]; +extern const uint8_t gr_mask[16]; + +#define VGABIOS_FILENAME "vgabios.bin" +#define VGABIOS_CIRRUS_FILENAME "vgabios-cirrus.bin" + +extern const MemoryRegionOps vga_mem_ops; + +/* vga-pci.c */ +void pci_std_vga_mmio_region_init(VGACommonState *s, + Object *owner, + MemoryRegion *parent, + MemoryRegion *subs, + bool qext, bool edid); + +#endif diff --git a/hw/display/vga_regs.h b/hw/display/vga_regs.h new file mode 100644 index 000000000..30a98b873 --- /dev/null +++ b/hw/display/vga_regs.h @@ -0,0 +1,159 @@ +/* + * linux/include/video/vga.h -- standard VGA chipset interaction + * + * Copyright 1999 Jeff Garzik + * + * Copyright history from vga16fb.c: + * Copyright 1999 Ben Pfaff and Petr Vandrovec + * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm + * Based on VESA framebuffer (c) 1998 Gerd Knorr + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + */ + +#ifndef HW_VGA_REGS_H +#define HW_VGA_REGS_H + +/* Some of the code below is taken from SVGAlib. The original, + unmodified copyright notice for that code is below. */ +/* VGAlib version 1.2 - (c) 1993 Tommy Frandsen */ +/* */ +/* This library is free software; you can redistribute it and/or */ +/* modify it without any restrictions. This library is distributed */ +/* in the hope that it will be useful, but without any warranty. */ + +/* Multi-chipset support Copyright 1993 Harm Hanemaayer */ +/* partially copyrighted (C) 1993 by Hartmut Schirmer */ + +/* VGA data register ports */ +#define VGA_CRT_DC 0x3D5 /* CRT Controller Data Register - color emulation */ +#define VGA_CRT_DM 0x3B5 /* CRT Controller Data Register - mono emulation */ +#define VGA_ATT_R 0x3C1 /* Attribute Controller Data Read Register */ +#define VGA_ATT_W 0x3C0 /* Attribute Controller Data Write Register */ +#define VGA_GFX_D 0x3CF /* Graphics Controller Data Register */ +#define VGA_SEQ_D 0x3C5 /* Sequencer Data Register */ +#define VGA_MIS_R 0x3CC /* Misc Output Read Register */ +#define VGA_MIS_W 0x3C2 /* Misc Output Write Register */ +#define VGA_FTC_R 0x3CA /* Feature Control Read Register */ +#define VGA_IS1_RC 0x3DA /* Input Status Register 1 - color emulation */ +#define VGA_IS1_RM 0x3BA /* Input Status Register 1 - mono emulation */ +#define VGA_PEL_D 0x3C9 /* PEL Data Register */ +#define VGA_PEL_MSK 0x3C6 /* PEL mask register */ + +/* EGA-specific registers */ +#define EGA_GFX_E0 0x3CC /* Graphics enable processor 0 */ +#define EGA_GFX_E1 0x3CA /* Graphics enable processor 1 */ + +/* VGA index register ports */ +#define VGA_CRT_IC 0x3D4 /* CRT Controller Index - color emulation */ +#define VGA_CRT_IM 0x3B4 /* CRT Controller Index - mono emulation */ +#define VGA_ATT_IW 0x3C0 /* Attribute Controller Index & Data Write Register */ +#define VGA_GFX_I 0x3CE /* Graphics Controller Index */ +#define VGA_SEQ_I 0x3C4 /* Sequencer Index */ +#define VGA_PEL_IW 0x3C8 /* PEL Write Index */ +#define VGA_PEL_IR 0x3C7 /* PEL Read Index */ + +/* standard VGA indexes max counts */ +#define VGA_CRT_C 0x19 /* Number of CRT Controller Registers */ +#define VGA_ATT_C 0x15 /* Number of Attribute Controller Registers */ +#define VGA_GFX_C 0x09 /* Number of Graphics Controller Registers */ +#define VGA_SEQ_C 0x05 /* Number of Sequencer Registers */ +#define VGA_MIS_C 0x01 /* Number of Misc Output Register */ + +/* VGA misc register bit masks */ +#define VGA_MIS_COLOR 0x01 +#define VGA_MIS_ENB_MEM_ACCESS 0x02 +#define VGA_MIS_DCLK_28322_720 0x04 +#define VGA_MIS_ENB_PLL_LOAD (0x04 | 0x08) +#define VGA_MIS_SEL_HIGH_PAGE 0x20 + +/* VGA CRT controller register indices */ +#define VGA_CRTC_H_TOTAL 0 +#define VGA_CRTC_H_DISP 1 +#define VGA_CRTC_H_BLANK_START 2 +#define VGA_CRTC_H_BLANK_END 3 +#define VGA_CRTC_H_SYNC_START 4 +#define VGA_CRTC_H_SYNC_END 5 +#define VGA_CRTC_V_TOTAL 6 +#define VGA_CRTC_OVERFLOW 7 +#define VGA_CRTC_PRESET_ROW 8 +#define VGA_CRTC_MAX_SCAN 9 +#define VGA_CRTC_CURSOR_START 0x0A +#define VGA_CRTC_CURSOR_END 0x0B +#define VGA_CRTC_START_HI 0x0C +#define VGA_CRTC_START_LO 0x0D +#define VGA_CRTC_CURSOR_HI 0x0E +#define VGA_CRTC_CURSOR_LO 0x0F +#define VGA_CRTC_V_SYNC_START 0x10 +#define VGA_CRTC_V_SYNC_END 0x11 +#define VGA_CRTC_V_DISP_END 0x12 +#define VGA_CRTC_OFFSET 0x13 +#define VGA_CRTC_UNDERLINE 0x14 +#define VGA_CRTC_V_BLANK_START 0x15 +#define VGA_CRTC_V_BLANK_END 0x16 +#define VGA_CRTC_MODE 0x17 +#define VGA_CRTC_LINE_COMPARE 0x18 +#define VGA_CRTC_REGS VGA_CRT_C + +/* VGA CRT controller bit masks */ +#define VGA_CR11_LOCK_CR0_CR7 0x80 /* lock writes to CR0 - CR7 */ +#define VGA_CR17_H_V_SIGNALS_ENABLED 0x80 + +/* VGA attribute controller register indices */ +#define VGA_ATC_PALETTE0 0x00 +#define VGA_ATC_PALETTE1 0x01 +#define VGA_ATC_PALETTE2 0x02 +#define VGA_ATC_PALETTE3 0x03 +#define VGA_ATC_PALETTE4 0x04 +#define VGA_ATC_PALETTE5 0x05 +#define VGA_ATC_PALETTE6 0x06 +#define VGA_ATC_PALETTE7 0x07 +#define VGA_ATC_PALETTE8 0x08 +#define VGA_ATC_PALETTE9 0x09 +#define VGA_ATC_PALETTEA 0x0A +#define VGA_ATC_PALETTEB 0x0B +#define VGA_ATC_PALETTEC 0x0C +#define VGA_ATC_PALETTED 0x0D +#define VGA_ATC_PALETTEE 0x0E +#define VGA_ATC_PALETTEF 0x0F +#define VGA_ATC_MODE 0x10 +#define VGA_ATC_OVERSCAN 0x11 +#define VGA_ATC_PLANE_ENABLE 0x12 +#define VGA_ATC_PEL 0x13 +#define VGA_ATC_COLOR_PAGE 0x14 + +#define VGA_AR_ENABLE_DISPLAY 0x20 + +/* VGA sequencer register indices */ +#define VGA_SEQ_RESET 0x00 +#define VGA_SEQ_CLOCK_MODE 0x01 +#define VGA_SEQ_PLANE_WRITE 0x02 +#define VGA_SEQ_CHARACTER_MAP 0x03 +#define VGA_SEQ_MEMORY_MODE 0x04 + +/* VGA sequencer register bit masks */ +#define VGA_SR01_CHAR_CLK_8DOTS 0x01 /* bit 0: character clocks 8 dots wide are generated */ +#define VGA_SR01_SCREEN_OFF 0x20 /* bit 5: Screen is off */ +#define VGA_SR02_ALL_PLANES 0x0F /* bits 3-0: enable access to all planes */ +#define VGA_SR04_EXT_MEM 0x02 /* bit 1: allows complete mem access to 256K */ +#define VGA_SR04_SEQ_MODE 0x04 /* bit 2: directs system to use a sequential addressing mode */ +#define VGA_SR04_CHN_4M 0x08 /* bit 3: selects modulo 4 addressing for CPU access to display memory */ + +/* VGA graphics controller register indices */ +#define VGA_GFX_SR_VALUE 0x00 +#define VGA_GFX_SR_ENABLE 0x01 +#define VGA_GFX_COMPARE_VALUE 0x02 +#define VGA_GFX_DATA_ROTATE 0x03 +#define VGA_GFX_PLANE_READ 0x04 +#define VGA_GFX_MODE 0x05 +#define VGA_GFX_MISC 0x06 +#define VGA_GFX_COMPARE_MASK 0x07 +#define VGA_GFX_BIT_MASK 0x08 + +/* VGA graphics controller bit masks */ +#define VGA_GR06_GRAPHICS_MODE 0x01 + +#endif /* HW_VGA_REGS_H */ diff --git a/hw/display/vhost-user-gpu-pci.c b/hw/display/vhost-user-gpu-pci.c new file mode 100644 index 000000000..daefcf710 --- /dev/null +++ b/hw/display/vhost-user-gpu-pci.c @@ -0,0 +1,53 @@ +/* + * vhost-user GPU PCI device + * + * Copyright Red Hat, Inc. 2018 + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/virtio/virtio-gpu-pci.h" +#include "qom/object.h" + +#define TYPE_VHOST_USER_GPU_PCI "vhost-user-gpu-pci" +typedef struct VhostUserGPUPCI VhostUserGPUPCI; +DECLARE_INSTANCE_CHECKER(VhostUserGPUPCI, VHOST_USER_GPU_PCI, + TYPE_VHOST_USER_GPU_PCI) + +struct VhostUserGPUPCI { + VirtIOGPUPCIBase parent_obj; + + VhostUserGPU vdev; +}; + +static void vhost_user_gpu_pci_initfn(Object *obj) +{ + VhostUserGPUPCI *dev = VHOST_USER_GPU_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_GPU); + + VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev); + + object_property_add_alias(obj, "chardev", + OBJECT(&dev->vdev), "chardev"); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_gpu_pci_info = { + .generic_name = TYPE_VHOST_USER_GPU_PCI, + .parent = TYPE_VIRTIO_GPU_PCI_BASE, + .instance_size = sizeof(VhostUserGPUPCI), + .instance_init = vhost_user_gpu_pci_initfn, +}; +module_obj(TYPE_VHOST_USER_GPU_PCI); + +static void vhost_user_gpu_pci_register_types(void) +{ + virtio_pci_types_register(&vhost_user_gpu_pci_info); +} + +type_init(vhost_user_gpu_pci_register_types) diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c new file mode 100644 index 000000000..49df56cd1 --- /dev/null +++ b/hw/display/vhost-user-gpu.c @@ -0,0 +1,608 @@ +/* + * vhost-user GPU Device + * + * Copyright Red Hat, Inc. 2018 + * + * Authors: + * Marc-André Lureau + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-gpu.h" +#include "chardev/char-fe.h" +#include "qapi/error.h" +#include "migration/blocker.h" + +typedef enum VhostUserGpuRequest { + VHOST_USER_GPU_NONE = 0, + VHOST_USER_GPU_GET_PROTOCOL_FEATURES, + VHOST_USER_GPU_SET_PROTOCOL_FEATURES, + VHOST_USER_GPU_GET_DISPLAY_INFO, + VHOST_USER_GPU_CURSOR_POS, + VHOST_USER_GPU_CURSOR_POS_HIDE, + VHOST_USER_GPU_CURSOR_UPDATE, + VHOST_USER_GPU_SCANOUT, + VHOST_USER_GPU_UPDATE, + VHOST_USER_GPU_DMABUF_SCANOUT, + VHOST_USER_GPU_DMABUF_UPDATE, +} VhostUserGpuRequest; + +typedef struct VhostUserGpuDisplayInfoReply { + struct virtio_gpu_resp_display_info info; +} VhostUserGpuDisplayInfoReply; + +typedef struct VhostUserGpuCursorPos { + uint32_t scanout_id; + uint32_t x; + uint32_t y; +} QEMU_PACKED VhostUserGpuCursorPos; + +typedef struct VhostUserGpuCursorUpdate { + VhostUserGpuCursorPos pos; + uint32_t hot_x; + uint32_t hot_y; + uint32_t data[64 * 64]; +} QEMU_PACKED VhostUserGpuCursorUpdate; + +typedef struct VhostUserGpuScanout { + uint32_t scanout_id; + uint32_t width; + uint32_t height; +} QEMU_PACKED VhostUserGpuScanout; + +typedef struct VhostUserGpuUpdate { + uint32_t scanout_id; + uint32_t x; + uint32_t y; + uint32_t width; + uint32_t height; + uint8_t data[]; +} QEMU_PACKED VhostUserGpuUpdate; + +typedef struct VhostUserGpuDMABUFScanout { + uint32_t scanout_id; + uint32_t x; + uint32_t y; + uint32_t width; + uint32_t height; + uint32_t fd_width; + uint32_t fd_height; + uint32_t fd_stride; + uint32_t fd_flags; + int fd_drm_fourcc; +} QEMU_PACKED VhostUserGpuDMABUFScanout; + +typedef struct VhostUserGpuMsg { + uint32_t request; /* VhostUserGpuRequest */ + uint32_t flags; + uint32_t size; /* the following payload size */ + union { + VhostUserGpuCursorPos cursor_pos; + VhostUserGpuCursorUpdate cursor_update; + VhostUserGpuScanout scanout; + VhostUserGpuUpdate update; + VhostUserGpuDMABUFScanout dmabuf_scanout; + struct virtio_gpu_resp_display_info display_info; + uint64_t u64; + } payload; +} QEMU_PACKED VhostUserGpuMsg; + +static VhostUserGpuMsg m __attribute__ ((unused)); +#define VHOST_USER_GPU_HDR_SIZE \ + (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags)) + +#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4 + +static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked); + +static void +vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg) +{ + VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos; + struct virtio_gpu_scanout *s; + + if (pos->scanout_id >= g->parent_obj.conf.max_outputs) { + return; + } + s = &g->parent_obj.scanout[pos->scanout_id]; + + if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) { + VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update; + if (!s->current_cursor) { + s->current_cursor = cursor_alloc(64, 64); + } + + s->current_cursor->hot_x = up->hot_x; + s->current_cursor->hot_y = up->hot_y; + + memcpy(s->current_cursor->data, up->data, + 64 * 64 * sizeof(uint32_t)); + + dpy_cursor_define(s->con, s->current_cursor); + } + + dpy_mouse_set(s->con, pos->x, pos->y, + msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE); +} + +static void +vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg) +{ + qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg, + VHOST_USER_GPU_HDR_SIZE + msg->size); +} + +static void +vhost_user_gpu_unblock(VhostUserGPU *g) +{ + VhostUserGpuMsg msg = { + .request = VHOST_USER_GPU_DMABUF_UPDATE, + .flags = VHOST_USER_GPU_MSG_FLAG_REPLY, + }; + + vhost_user_gpu_send_msg(g, &msg); +} + +static void +vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg) +{ + QemuConsole *con = NULL; + struct virtio_gpu_scanout *s; + + switch (msg->request) { + case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: { + VhostUserGpuMsg reply = { + .request = msg->request, + .flags = VHOST_USER_GPU_MSG_FLAG_REPLY, + .size = sizeof(uint64_t), + }; + + vhost_user_gpu_send_msg(g, &reply); + break; + } + case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: { + break; + } + case VHOST_USER_GPU_GET_DISPLAY_INFO: { + struct virtio_gpu_resp_display_info display_info = { {} }; + VhostUserGpuMsg reply = { + .request = msg->request, + .flags = VHOST_USER_GPU_MSG_FLAG_REPLY, + .size = sizeof(struct virtio_gpu_resp_display_info), + }; + + display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; + virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); + memcpy(&reply.payload.display_info, &display_info, + sizeof(display_info)); + vhost_user_gpu_send_msg(g, &reply); + break; + } + case VHOST_USER_GPU_SCANOUT: { + VhostUserGpuScanout *m = &msg->payload.scanout; + + if (m->scanout_id >= g->parent_obj.conf.max_outputs) { + return; + } + + g->parent_obj.enable = 1; + s = &g->parent_obj.scanout[m->scanout_id]; + con = s->con; + + if (m->width == 0) { + dpy_gfx_replace_surface(con, NULL); + } else { + s->ds = qemu_create_displaysurface(m->width, m->height); + /* replace surface on next update */ + } + + break; + } + case VHOST_USER_GPU_DMABUF_SCANOUT: { + VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout; + int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr); + QemuDmaBuf *dmabuf; + + if (m->scanout_id >= g->parent_obj.conf.max_outputs) { + error_report("invalid scanout: %d", m->scanout_id); + if (fd >= 0) { + close(fd); + } + break; + } + + g->parent_obj.enable = 1; + con = g->parent_obj.scanout[m->scanout_id].con; + dmabuf = &g->dmabuf[m->scanout_id]; + if (dmabuf->fd >= 0) { + close(dmabuf->fd); + dmabuf->fd = -1; + } + dpy_gl_release_dmabuf(con, dmabuf); + if (fd == -1) { + dpy_gl_scanout_disable(con); + break; + } + *dmabuf = (QemuDmaBuf) { + .fd = fd, + .width = m->fd_width, + .height = m->fd_height, + .stride = m->fd_stride, + .fourcc = m->fd_drm_fourcc, + .y0_top = m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP, + }; + dpy_gl_scanout_dmabuf(con, dmabuf); + break; + } + case VHOST_USER_GPU_DMABUF_UPDATE: { + VhostUserGpuUpdate *m = &msg->payload.update; + + if (m->scanout_id >= g->parent_obj.conf.max_outputs || + !g->parent_obj.scanout[m->scanout_id].con) { + error_report("invalid scanout update: %d", m->scanout_id); + vhost_user_gpu_unblock(g); + break; + } + + con = g->parent_obj.scanout[m->scanout_id].con; + if (!console_has_gl(con)) { + error_report("console doesn't support GL!"); + vhost_user_gpu_unblock(g); + break; + } + dpy_gl_update(con, m->x, m->y, m->width, m->height); + g->backend_blocked = true; + break; + } + case VHOST_USER_GPU_UPDATE: { + VhostUserGpuUpdate *m = &msg->payload.update; + + if (m->scanout_id >= g->parent_obj.conf.max_outputs) { + break; + } + s = &g->parent_obj.scanout[m->scanout_id]; + con = s->con; + pixman_image_t *image = + pixman_image_create_bits(PIXMAN_x8r8g8b8, + m->width, + m->height, + (uint32_t *)m->data, + m->width * 4); + + pixman_image_composite(PIXMAN_OP_SRC, + image, NULL, s->ds->image, + 0, 0, 0, 0, m->x, m->y, m->width, m->height); + + pixman_image_unref(image); + if (qemu_console_surface(con) != s->ds) { + dpy_gfx_replace_surface(con, s->ds); + } else { + dpy_gfx_update(con, m->x, m->y, m->width, m->height); + } + break; + } + default: + g_warning("unhandled message %d %d", msg->request, msg->size); + } + + if (con && qemu_console_is_gl_blocked(con)) { + vhost_user_gpu_update_blocked(g, true); + } +} + +static void +vhost_user_gpu_chr_read(void *opaque) +{ + VhostUserGPU *g = opaque; + VhostUserGpuMsg *msg = NULL; + VhostUserGpuRequest request; + uint32_t size, flags; + int r; + + r = qemu_chr_fe_read_all(&g->vhost_chr, + (uint8_t *)&request, sizeof(uint32_t)); + if (r != sizeof(uint32_t)) { + error_report("failed to read msg header: %d, %d", r, errno); + goto end; + } + + r = qemu_chr_fe_read_all(&g->vhost_chr, + (uint8_t *)&flags, sizeof(uint32_t)); + if (r != sizeof(uint32_t)) { + error_report("failed to read msg flags"); + goto end; + } + + r = qemu_chr_fe_read_all(&g->vhost_chr, + (uint8_t *)&size, sizeof(uint32_t)); + if (r != sizeof(uint32_t)) { + error_report("failed to read msg size"); + goto end; + } + + msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size); + + r = qemu_chr_fe_read_all(&g->vhost_chr, + (uint8_t *)&msg->payload, size); + if (r != size) { + error_report("failed to read msg payload %d != %d", r, size); + goto end; + } + + msg->request = request; + msg->flags = size; + msg->size = size; + + if (request == VHOST_USER_GPU_CURSOR_UPDATE || + request == VHOST_USER_GPU_CURSOR_POS || + request == VHOST_USER_GPU_CURSOR_POS_HIDE) { + vhost_user_gpu_handle_cursor(g, msg); + } else { + vhost_user_gpu_handle_display(g, msg); + } + +end: + g_free(msg); +} + +static void +vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked) +{ + qemu_set_fd_handler(g->vhost_gpu_fd, + blocked ? NULL : vhost_user_gpu_chr_read, NULL, g); +} + +static void +vhost_user_gpu_gl_flushed(VirtIOGPUBase *b) +{ + VhostUserGPU *g = VHOST_USER_GPU(b); + + if (g->backend_blocked) { + vhost_user_gpu_unblock(VHOST_USER_GPU(g)); + g->backend_blocked = false; + } + + vhost_user_gpu_update_blocked(VHOST_USER_GPU(g), false); +} + +static bool +vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp) +{ + Chardev *chr; + int sv[2]; + + if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { + error_setg_errno(errp, errno, "socketpair() failed"); + return false; + } + + chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET)); + if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) { + error_setg(errp, "Failed to make socket chardev"); + goto err; + } + if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) { + goto err; + } + if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) { + error_setg(errp, "Failed to set vhost-user-gpu socket"); + qemu_chr_fe_deinit(&g->vhost_chr, false); + goto err; + } + + g->vhost_gpu_fd = sv[0]; + vhost_user_gpu_update_blocked(g, false); + close(sv[1]); + return true; + +err: + close(sv[0]); + close(sv[1]); + if (chr) { + object_unref(OBJECT(chr)); + } + return false; +} + +static void +vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev); + struct virtio_gpu_config *vgconfig = + (struct virtio_gpu_config *)config_data; + Error *local_err = NULL; + int ret; + + memset(config_data, 0, sizeof(struct virtio_gpu_config)); + + ret = vhost_dev_get_config(&g->vhost->dev, + config_data, sizeof(struct virtio_gpu_config), + &local_err); + if (ret) { + error_report_err(local_err); + return; + } + + /* those fields are managed by qemu */ + vgconfig->num_scanouts = b->virtio_config.num_scanouts; + vgconfig->events_read = b->virtio_config.events_read; + vgconfig->events_clear = b->virtio_config.events_clear; +} + +static void +vhost_user_gpu_set_config(VirtIODevice *vdev, + const uint8_t *config_data) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev); + const struct virtio_gpu_config *vgconfig = + (const struct virtio_gpu_config *)config_data; + int ret; + + if (vgconfig->events_clear) { + b->virtio_config.events_read &= ~vgconfig->events_clear; + } + + ret = vhost_dev_set_config(&g->vhost->dev, config_data, + 0, sizeof(struct virtio_gpu_config), + VHOST_SET_CONFIG_TYPE_MASTER); + if (ret) { + error_report("vhost-user-gpu: set device config space failed"); + return; + } +} + +static void +vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + Error *err = NULL; + + if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) { + if (!vhost_user_gpu_do_set_socket(g, &err)) { + error_report_err(err); + return; + } + vhost_user_backend_start(g->vhost); + } else { + /* unblock any wait and stop processing */ + if (g->vhost_gpu_fd != -1) { + vhost_user_gpu_update_blocked(g, true); + qemu_chr_fe_deinit(&g->vhost_chr, true); + g->vhost_gpu_fd = -1; + } + vhost_user_backend_stop(g->vhost); + } +} + +static bool +vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + + return vhost_virtqueue_pending(&g->vhost->dev, idx); +} + +static void +vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + + vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask); +} + +static void +vhost_user_gpu_instance_init(Object *obj) +{ + VhostUserGPU *g = VHOST_USER_GPU(obj); + + g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND)); + object_property_add_alias(obj, "chardev", + OBJECT(g->vhost), "chardev"); +} + +static void +vhost_user_gpu_instance_finalize(Object *obj) +{ + VhostUserGPU *g = VHOST_USER_GPU(obj); + + object_unref(OBJECT(g->vhost)); +} + +static void +vhost_user_gpu_reset(VirtIODevice *vdev) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + + virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); + + vhost_user_backend_stop(g->vhost); +} + +static int +vhost_user_gpu_config_change(struct vhost_dev *dev) +{ + error_report("vhost-user-gpu: unhandled backend config change"); + return -1; +} + +static const VhostDevConfigOps config_ops = { + .vhost_dev_config_notifier = vhost_user_gpu_config_change, +}; + +static void +vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp) +{ + VhostUserGPU *g = VHOST_USER_GPU(qdev); + VirtIODevice *vdev = VIRTIO_DEVICE(g); + + vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops); + if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) { + return; + } + + /* existing backend may send DMABUF, so let's add that requirement */ + g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED; + if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) { + g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED; + } + if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_EDID)) { + g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_EDID_ENABLED; + } else { + error_report("EDID requested but the backend doesn't support it."); + g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_EDID_ENABLED); + } + + if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) { + return; + } + + g->vhost_gpu_fd = -1; +} + +static Property vhost_user_gpu_properties[] = { + VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void +vhost_user_gpu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass); + + vgc->gl_flushed = vhost_user_gpu_gl_flushed; + + vdc->realize = vhost_user_gpu_device_realize; + vdc->reset = vhost_user_gpu_reset; + vdc->set_status = vhost_user_gpu_set_status; + vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask; + vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending; + vdc->get_config = vhost_user_gpu_get_config; + vdc->set_config = vhost_user_gpu_set_config; + + device_class_set_props(dc, vhost_user_gpu_properties); +} + +static const TypeInfo vhost_user_gpu_info = { + .name = TYPE_VHOST_USER_GPU, + .parent = TYPE_VIRTIO_GPU_BASE, + .instance_size = sizeof(VhostUserGPU), + .instance_init = vhost_user_gpu_instance_init, + .instance_finalize = vhost_user_gpu_instance_finalize, + .class_init = vhost_user_gpu_class_init, +}; +module_obj(TYPE_VHOST_USER_GPU); + +static void vhost_user_gpu_register_types(void) +{ + type_register_static(&vhost_user_gpu_info); +} + +type_init(vhost_user_gpu_register_types) diff --git a/hw/display/vhost-user-vga.c b/hw/display/vhost-user-vga.c new file mode 100644 index 000000000..072c9c65b --- /dev/null +++ b/hw/display/vhost-user-vga.c @@ -0,0 +1,54 @@ +/* + * vhost-user VGA device + * + * Copyright Red Hat, Inc. 2018 + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "virtio-vga.h" +#include "qom/object.h" + +#define TYPE_VHOST_USER_VGA "vhost-user-vga" + +typedef struct VhostUserVGA VhostUserVGA; +DECLARE_INSTANCE_CHECKER(VhostUserVGA, VHOST_USER_VGA, + TYPE_VHOST_USER_VGA) + +struct VhostUserVGA { + VirtIOVGABase parent_obj; + + VhostUserGPU vdev; +}; + +static void vhost_user_vga_inst_initfn(Object *obj) +{ + VhostUserVGA *dev = VHOST_USER_VGA(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_GPU); + + VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev); + + object_property_add_alias(obj, "chardev", + OBJECT(&dev->vdev), "chardev"); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_vga_info = { + .generic_name = TYPE_VHOST_USER_VGA, + .parent = TYPE_VIRTIO_VGA_BASE, + .instance_size = sizeof(VhostUserVGA), + .instance_init = vhost_user_vga_inst_initfn, +}; +module_obj(TYPE_VHOST_USER_VGA); + +static void vhost_user_vga_register_types(void) +{ + virtio_pci_types_register(&vhost_user_vga_info); +} + +type_init(vhost_user_vga_register_types) diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c new file mode 100644 index 000000000..c8da4806e --- /dev/null +++ b/hw/display/virtio-gpu-base.c @@ -0,0 +1,290 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie + * Gerd Hoffmann + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "hw/virtio/virtio-gpu.h" +#include "migration/blocker.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "trace.h" + +void +virtio_gpu_base_reset(VirtIOGPUBase *g) +{ + int i; + + g->enable = 0; + + for (i = 0; i < g->conf.max_outputs; i++) { + g->scanout[i].resource_id = 0; + g->scanout[i].width = 0; + g->scanout[i].height = 0; + g->scanout[i].x = 0; + g->scanout[i].y = 0; + g->scanout[i].ds = NULL; + } +} + +void +virtio_gpu_base_fill_display_info(VirtIOGPUBase *g, + struct virtio_gpu_resp_display_info *dpy_info) +{ + int i; + + for (i = 0; i < g->conf.max_outputs; i++) { + if (g->enabled_output_bitmask & (1 << i)) { + dpy_info->pmodes[i].enabled = 1; + dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width); + dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height); + } + } +} + +static void virtio_gpu_invalidate_display(void *opaque) +{ +} + +static void virtio_gpu_update_display(void *opaque) +{ +} + +static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) +{ +} + +static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type) +{ + g->virtio_config.events_read |= event_type; + virtio_notify_config(&g->parent_obj); +} + +static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) +{ + VirtIOGPUBase *g = opaque; + + if (idx >= g->conf.max_outputs) { + return -1; + } + + g->req_state[idx].x = info->xoff; + g->req_state[idx].y = info->yoff; + g->req_state[idx].width = info->width; + g->req_state[idx].height = info->height; + g->req_state[idx].width_mm = info->width_mm; + g->req_state[idx].height_mm = info->height_mm; + + if (info->width && info->height) { + g->enabled_output_bitmask |= (1 << idx); + } else { + g->enabled_output_bitmask &= ~(1 << idx); + } + + /* send event to guest */ + virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); + return 0; +} + +static void +virtio_gpu_gl_flushed(void *opaque) +{ + VirtIOGPUBase *g = opaque; + VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g); + + if (vgc->gl_flushed) { + vgc->gl_flushed(g); + } +} + +static void +virtio_gpu_gl_block(void *opaque, bool block) +{ + VirtIOGPUBase *g = opaque; + + if (block) { + g->renderer_blocked++; + } else { + g->renderer_blocked--; + } + assert(g->renderer_blocked >= 0); +} + +static int +virtio_gpu_get_flags(void *opaque) +{ + VirtIOGPUBase *g = opaque; + int flags = GRAPHIC_FLAGS_NONE; + + if (virtio_gpu_virgl_enabled(g->conf)) { + flags |= GRAPHIC_FLAGS_GL; + } + + if (virtio_gpu_dmabuf_enabled(g->conf)) { + flags |= GRAPHIC_FLAGS_DMABUF; + } + + return flags; +} + +static const GraphicHwOps virtio_gpu_ops = { + .get_flags = virtio_gpu_get_flags, + .invalidate = virtio_gpu_invalidate_display, + .gfx_update = virtio_gpu_update_display, + .text_update = virtio_gpu_text_update, + .ui_info = virtio_gpu_ui_info, + .gl_block = virtio_gpu_gl_block, + .gl_flushed = virtio_gpu_gl_flushed, +}; + +bool +virtio_gpu_base_device_realize(DeviceState *qdev, + VirtIOHandleOutput ctrl_cb, + VirtIOHandleOutput cursor_cb, + Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(qdev); + VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev); + int i; + + if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { + error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); + return false; + } + + if (virtio_gpu_virgl_enabled(g->conf)) { + error_setg(&g->migration_blocker, "virgl is not yet migratable"); + if (migrate_add_blocker(g->migration_blocker, errp) < 0) { + error_free(g->migration_blocker); + return false; + } + } + + g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs); + virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, + sizeof(struct virtio_gpu_config)); + + if (virtio_gpu_virgl_enabled(g->conf)) { + /* use larger control queue in 3d mode */ + virtio_add_queue(vdev, 256, ctrl_cb); + virtio_add_queue(vdev, 16, cursor_cb); + } else { + virtio_add_queue(vdev, 64, ctrl_cb); + virtio_add_queue(vdev, 16, cursor_cb); + } + + g->enabled_output_bitmask = 1; + + g->req_state[0].width = g->conf.xres; + g->req_state[0].height = g->conf.yres; + + g->hw_ops = &virtio_gpu_ops; + for (i = 0; i < g->conf.max_outputs; i++) { + g->scanout[i].con = + graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); + } + + return true; +} + +static uint64_t +virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); + + if (virtio_gpu_virgl_enabled(g->conf)) { + features |= (1 << VIRTIO_GPU_F_VIRGL); + } + if (virtio_gpu_edid_enabled(g->conf)) { + features |= (1 << VIRTIO_GPU_F_EDID); + } + if (virtio_gpu_blob_enabled(g->conf)) { + features |= (1 << VIRTIO_GPU_F_RESOURCE_BLOB); + } + + return features; +} + +static void +virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features) +{ + static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); + + trace_virtio_gpu_features(((features & virgl) == virgl)); +} + +static void +virtio_gpu_base_device_unrealize(DeviceState *qdev) +{ + VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev); + + if (g->migration_blocker) { + migrate_del_blocker(g->migration_blocker); + error_free(g->migration_blocker); + } +} + +static void +virtio_gpu_base_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + vdc->unrealize = virtio_gpu_base_device_unrealize; + vdc->get_features = virtio_gpu_base_get_features; + vdc->set_features = virtio_gpu_base_set_features; + + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->hotpluggable = false; +} + +static const TypeInfo virtio_gpu_base_info = { + .name = TYPE_VIRTIO_GPU_BASE, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOGPUBase), + .class_size = sizeof(VirtIOGPUBaseClass), + .class_init = virtio_gpu_base_class_init, + .abstract = true +}; +module_obj(TYPE_VIRTIO_GPU_BASE); + +static void +virtio_register_types(void) +{ + type_register_static(&virtio_gpu_base_info); +} + +type_init(virtio_register_types) + +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); + +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c new file mode 100644 index 000000000..6cc4313b1 --- /dev/null +++ b/hw/display/virtio-gpu-gl.c @@ -0,0 +1,171 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie + * Gerd Hoffmann + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "sysemu/sysemu.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-gpu-bswap.h" +#include "hw/virtio/virtio-gpu-pixman.h" +#include "hw/qdev-properties.h" + +#include + +static void virtio_gpu_gl_update_cursor_data(VirtIOGPU *g, + struct virtio_gpu_scanout *s, + uint32_t resource_id) +{ + uint32_t width, height; + uint32_t pixels, *data; + + data = virgl_renderer_get_cursor_data(resource_id, &width, &height); + if (!data) { + return; + } + + if (width != s->current_cursor->width || + height != s->current_cursor->height) { + free(data); + return; + } + + pixels = s->current_cursor->width * s->current_cursor->height; + memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); + free(data); +} + +static void virtio_gpu_gl_flushed(VirtIOGPUBase *b) +{ + VirtIOGPU *g = VIRTIO_GPU(b); + + virtio_gpu_process_cmdq(g); +} + +static void virtio_gpu_gl_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + VirtIOGPUGL *gl = VIRTIO_GPU_GL(vdev); + struct virtio_gpu_ctrl_command *cmd; + + if (!virtio_queue_ready(vq)) { + return; + } + + if (!gl->renderer_inited) { + virtio_gpu_virgl_init(g); + gl->renderer_inited = true; + } + if (gl->renderer_reset) { + gl->renderer_reset = false; + virtio_gpu_virgl_reset(g); + } + + cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); + while (cmd) { + cmd->vq = vq; + cmd->error = 0; + cmd->finished = false; + QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); + cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); + } + + virtio_gpu_process_cmdq(g); + virtio_gpu_virgl_fence_poll(g); +} + +static void virtio_gpu_gl_reset(VirtIODevice *vdev) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + VirtIOGPUGL *gl = VIRTIO_GPU_GL(vdev); + + virtio_gpu_reset(vdev); + + /* + * GL functions must be called with the associated GL context in main + * thread, and when the renderer is unblocked. + */ + if (gl->renderer_inited && !gl->renderer_reset) { + virtio_gpu_virgl_reset_scanout(g); + gl->renderer_reset = true; + } +} + +static void virtio_gpu_gl_device_realize(DeviceState *qdev, Error **errp) +{ + VirtIOGPU *g = VIRTIO_GPU(qdev); + +#if defined(HOST_WORDS_BIGENDIAN) + error_setg(errp, "virgl is not supported on bigendian platforms"); + return; +#endif + + if (!object_resolve_path_type("", TYPE_VIRTIO_GPU_GL, NULL)) { + error_setg(errp, "at most one %s device is permitted", TYPE_VIRTIO_GPU_GL); + return; + } + + if (!display_opengl) { + error_setg(errp, "opengl is not available"); + return; + } + + g->parent_obj.conf.flags |= (1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); + VIRTIO_GPU_BASE(g)->virtio_config.num_capsets = + virtio_gpu_virgl_get_num_capsets(g); + + virtio_gpu_device_realize(qdev, errp); +} + +static Property virtio_gpu_gl_properties[] = { + DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags, + VIRTIO_GPU_FLAG_STATS_ENABLED, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_gpu_gl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VirtIOGPUBaseClass *vbc = VIRTIO_GPU_BASE_CLASS(klass); + VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); + + vbc->gl_flushed = virtio_gpu_gl_flushed; + vgc->handle_ctrl = virtio_gpu_gl_handle_ctrl; + vgc->process_cmd = virtio_gpu_virgl_process_cmd; + vgc->update_cursor_data = virtio_gpu_gl_update_cursor_data; + + vdc->realize = virtio_gpu_gl_device_realize; + vdc->reset = virtio_gpu_gl_reset; + device_class_set_props(dc, virtio_gpu_gl_properties); +} + +static const TypeInfo virtio_gpu_gl_info = { + .name = TYPE_VIRTIO_GPU_GL, + .parent = TYPE_VIRTIO_GPU, + .instance_size = sizeof(VirtIOGPUGL), + .class_init = virtio_gpu_gl_class_init, +}; +module_obj(TYPE_VIRTIO_GPU_GL); + +static void virtio_register_types(void) +{ + type_register_static(&virtio_gpu_gl_info); +} + +type_init(virtio_register_types) + +module_dep("hw-display-virtio-gpu"); diff --git a/hw/display/virtio-gpu-pci-gl.c b/hw/display/virtio-gpu-pci-gl.c new file mode 100644 index 000000000..99b14a071 --- /dev/null +++ b/hw/display/virtio-gpu-pci-gl.c @@ -0,0 +1,58 @@ +/* + * Virtio video device + * + * Copyright Red Hat + * + * Authors: + * Dave Airlie + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-gpu-pci.h" +#include "qom/object.h" + +#define TYPE_VIRTIO_GPU_GL_PCI "virtio-gpu-gl-pci" +typedef struct VirtIOGPUGLPCI VirtIOGPUGLPCI; +DECLARE_INSTANCE_CHECKER(VirtIOGPUGLPCI, VIRTIO_GPU_GL_PCI, + TYPE_VIRTIO_GPU_GL_PCI) + +struct VirtIOGPUGLPCI { + VirtIOGPUPCIBase parent_obj; + VirtIOGPUGL vdev; +}; + +static void virtio_gpu_gl_initfn(Object *obj) +{ + VirtIOGPUGLPCI *dev = VIRTIO_GPU_GL_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_GPU_GL); + VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev); +} + +static const VirtioPCIDeviceTypeInfo virtio_gpu_gl_pci_info = { + .generic_name = TYPE_VIRTIO_GPU_GL_PCI, + .parent = TYPE_VIRTIO_GPU_PCI_BASE, + .instance_size = sizeof(VirtIOGPUGLPCI), + .instance_init = virtio_gpu_gl_initfn, +}; +module_obj(TYPE_VIRTIO_GPU_GL_PCI); + +static void virtio_gpu_gl_pci_register_types(void) +{ + virtio_pci_types_register(&virtio_gpu_gl_pci_info); +} + +type_init(virtio_gpu_gl_pci_register_types) + +module_dep("hw-display-virtio-gpu-pci"); diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c new file mode 100644 index 000000000..e36eee0c4 --- /dev/null +++ b/hw/display/virtio-gpu-pci.c @@ -0,0 +1,102 @@ +/* + * Virtio video device + * + * Copyright Red Hat + * + * Authors: + * Dave Airlie + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-gpu-pci.h" +#include "qom/object.h" + +static Property virtio_gpu_pci_base_properties[] = { + DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_gpu_pci_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOGPUPCIBase *vgpu = VIRTIO_GPU_PCI_BASE(vpci_dev); + VirtIOGPUBase *g = vgpu->vgpu; + DeviceState *vdev = DEVICE(g); + int i; + + virtio_pci_force_virtio_1(vpci_dev); + if (!qdev_realize(vdev, BUS(&vpci_dev->bus), errp)) { + return; + } + + for (i = 0; i < g->conf.max_outputs; i++) { + object_property_set_link(OBJECT(g->scanout[i].con), "device", + OBJECT(vpci_dev), &error_abort); + } +} + +static void virtio_gpu_pci_base_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + device_class_set_props(dc, virtio_gpu_pci_base_properties); + dc->hotpluggable = false; + k->realize = virtio_gpu_pci_base_realize; + pcidev_k->class_id = PCI_CLASS_DISPLAY_OTHER; +} + +static const TypeInfo virtio_gpu_pci_base_info = { + .name = TYPE_VIRTIO_GPU_PCI_BASE, + .parent = TYPE_VIRTIO_PCI, + .instance_size = sizeof(VirtIOGPUPCIBase), + .class_init = virtio_gpu_pci_base_class_init, + .abstract = true +}; +module_obj(TYPE_VIRTIO_GPU_PCI_BASE); + +#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci" +typedef struct VirtIOGPUPCI VirtIOGPUPCI; +DECLARE_INSTANCE_CHECKER(VirtIOGPUPCI, VIRTIO_GPU_PCI, + TYPE_VIRTIO_GPU_PCI) + +struct VirtIOGPUPCI { + VirtIOGPUPCIBase parent_obj; + VirtIOGPU vdev; +}; + +static void virtio_gpu_initfn(Object *obj) +{ + VirtIOGPUPCI *dev = VIRTIO_GPU_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_GPU); + VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev); +} + +static const VirtioPCIDeviceTypeInfo virtio_gpu_pci_info = { + .generic_name = TYPE_VIRTIO_GPU_PCI, + .parent = TYPE_VIRTIO_GPU_PCI_BASE, + .instance_size = sizeof(VirtIOGPUPCI), + .instance_init = virtio_gpu_initfn, +}; +module_obj(TYPE_VIRTIO_GPU_PCI); + +static void virtio_gpu_pci_register_types(void) +{ + type_register_static(&virtio_gpu_pci_base_info); + virtio_pci_types_register(&virtio_gpu_pci_info); +} + +type_init(virtio_gpu_pci_register_types) diff --git a/hw/display/virtio-gpu-udmabuf-stubs.c b/hw/display/virtio-gpu-udmabuf-stubs.c new file mode 100644 index 000000000..f692e1351 --- /dev/null +++ b/hw/display/virtio-gpu-udmabuf-stubs.c @@ -0,0 +1,28 @@ +#include "qemu/osdep.h" +#include "hw/virtio/virtio-gpu.h" + +bool virtio_gpu_have_udmabuf(void) +{ + /* nothing (stub) */ + return false; +} + +void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res) +{ + /* nothing (stub) */ +} + +void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res) +{ + /* nothing (stub) */ +} + +int virtio_gpu_update_dmabuf(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) +{ + /* nothing (stub) */ + return 0; +} diff --git a/hw/display/virtio-gpu-udmabuf.c b/hw/display/virtio-gpu-udmabuf.c new file mode 100644 index 000000000..1597921c5 --- /dev/null +++ b/hw/display/virtio-gpu-udmabuf.c @@ -0,0 +1,230 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie + * Gerd Hoffmann + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu-common.h" +#include "qemu/iov.h" +#include "ui/console.h" +#include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-gpu-pixman.h" +#include "trace.h" +#include "exec/ramblock.h" +#include "sysemu/hostmem.h" +#include +#include +#include +#include "qemu/memfd.h" +#include "standard-headers/linux/udmabuf.h" + +static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res) +{ + struct udmabuf_create_list *list; + RAMBlock *rb; + ram_addr_t offset; + int udmabuf, i; + + udmabuf = udmabuf_fd(); + if (udmabuf < 0) { + return; + } + + list = g_malloc0(sizeof(struct udmabuf_create_list) + + sizeof(struct udmabuf_create_item) * res->iov_cnt); + + for (i = 0; i < res->iov_cnt; i++) { + rcu_read_lock(); + rb = qemu_ram_block_from_host(res->iov[i].iov_base, false, &offset); + rcu_read_unlock(); + + if (!rb || rb->fd < 0) { + g_free(list); + return; + } + + list->list[i].memfd = rb->fd; + list->list[i].offset = offset; + list->list[i].size = res->iov[i].iov_len; + } + + list->count = res->iov_cnt; + list->flags = UDMABUF_FLAGS_CLOEXEC; + + res->dmabuf_fd = ioctl(udmabuf, UDMABUF_CREATE_LIST, list); + if (res->dmabuf_fd < 0) { + warn_report("%s: UDMABUF_CREATE_LIST: %s", __func__, + strerror(errno)); + } + g_free(list); +} + +static void virtio_gpu_remap_udmabuf(struct virtio_gpu_simple_resource *res) +{ + res->remapped = mmap(NULL, res->blob_size, PROT_READ, + MAP_SHARED, res->dmabuf_fd, 0); + if (res->remapped == MAP_FAILED) { + warn_report("%s: dmabuf mmap failed: %s", __func__, + strerror(errno)); + res->remapped = NULL; + } +} + +static void virtio_gpu_destroy_udmabuf(struct virtio_gpu_simple_resource *res) +{ + if (res->remapped) { + munmap(res->remapped, res->blob_size); + res->remapped = NULL; + } + if (res->dmabuf_fd >= 0) { + close(res->dmabuf_fd); + res->dmabuf_fd = -1; + } +} + +static int find_memory_backend_type(Object *obj, void *opaque) +{ + bool *memfd_backend = opaque; + int ret; + + if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { + HostMemoryBackend *backend = MEMORY_BACKEND(obj); + RAMBlock *rb = backend->mr.ram_block; + + if (rb && rb->fd > 0) { + ret = fcntl(rb->fd, F_GET_SEALS); + if (ret > 0) { + *memfd_backend = true; + } + } + } + + return 0; +} + +bool virtio_gpu_have_udmabuf(void) +{ + Object *memdev_root; + int udmabuf; + bool memfd_backend = false; + + udmabuf = udmabuf_fd(); + if (udmabuf < 0) { + return false; + } + + memdev_root = object_resolve_path("/objects", NULL); + object_child_foreach(memdev_root, find_memory_backend_type, &memfd_backend); + + return memfd_backend; +} + +void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res) +{ + void *pdata = NULL; + + res->dmabuf_fd = -1; + if (res->iov_cnt == 1) { + pdata = res->iov[0].iov_base; + } else { + virtio_gpu_create_udmabuf(res); + if (res->dmabuf_fd < 0) { + return; + } + virtio_gpu_remap_udmabuf(res); + if (!res->remapped) { + return; + } + pdata = res->remapped; + } + + res->blob = pdata; +} + +void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res) +{ + if (res->remapped) { + virtio_gpu_destroy_udmabuf(res); + } +} + +static void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf *dmabuf) +{ + struct virtio_gpu_scanout *scanout; + + scanout = &g->parent_obj.scanout[dmabuf->scanout_id]; + dpy_gl_release_dmabuf(scanout->con, &dmabuf->buf); + QTAILQ_REMOVE(&g->dmabuf.bufs, dmabuf, next); + g_free(dmabuf); +} + +static VGPUDMABuf +*virtio_gpu_create_dmabuf(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) +{ + VGPUDMABuf *dmabuf; + + if (res->dmabuf_fd < 0) { + return NULL; + } + + dmabuf = g_new0(VGPUDMABuf, 1); + dmabuf->buf.width = fb->width; + dmabuf->buf.height = fb->height; + dmabuf->buf.stride = fb->stride; + dmabuf->buf.x = r->x; + dmabuf->buf.y = r->y; + dmabuf->buf.scanout_width = r->width; + dmabuf->buf.scanout_height = r->height; + dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format); + dmabuf->buf.fd = res->dmabuf_fd; + dmabuf->buf.allow_fences = true; + dmabuf->buf.draw_submitted = false; + dmabuf->scanout_id = scanout_id; + QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next); + + return dmabuf; +} + +int virtio_gpu_update_dmabuf(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) +{ + struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; + VGPUDMABuf *new_primary, *old_primary = NULL; + + new_primary = virtio_gpu_create_dmabuf(g, scanout_id, res, fb, r); + if (!new_primary) { + return -EINVAL; + } + + if (g->dmabuf.primary[scanout_id]) { + old_primary = g->dmabuf.primary[scanout_id]; + } + + g->dmabuf.primary[scanout_id] = new_primary; + qemu_console_resize(scanout->con, + new_primary->buf.scanout_width, + new_primary->buf.scanout_height); + dpy_gl_scanout_dmabuf(scanout->con, &new_primary->buf); + + if (old_primary) { + virtio_gpu_free_dmabuf(g, old_primary); + } + + return 0; +} diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c new file mode 100644 index 000000000..18d054922 --- /dev/null +++ b/hw/display/virtio-gpu-virgl.c @@ -0,0 +1,634 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie + * Gerd Hoffmann + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/iov.h" +#include "trace.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-gpu.h" + +#include + +static struct virgl_renderer_callbacks virtio_gpu_3d_cbs; + +static void virgl_cmd_create_resource_2d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_create_2d c2d; + struct virgl_renderer_resource_create_args args; + + VIRTIO_GPU_FILL_CMD(c2d); + trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, + c2d.width, c2d.height); + + args.handle = c2d.resource_id; + args.target = 2; + args.format = c2d.format; + args.bind = (1 << 1); + args.width = c2d.width; + args.height = c2d.height; + args.depth = 1; + args.array_size = 1; + args.last_level = 0; + args.nr_samples = 0; + args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; + virgl_renderer_resource_create(&args, NULL, 0); +} + +static void virgl_cmd_create_resource_3d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_create_3d c3d; + struct virgl_renderer_resource_create_args args; + + VIRTIO_GPU_FILL_CMD(c3d); + trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format, + c3d.width, c3d.height, c3d.depth); + + args.handle = c3d.resource_id; + args.target = c3d.target; + args.format = c3d.format; + args.bind = c3d.bind; + args.width = c3d.width; + args.height = c3d.height; + args.depth = c3d.depth; + args.array_size = c3d.array_size; + args.last_level = c3d.last_level; + args.nr_samples = c3d.nr_samples; + args.flags = c3d.flags; + virgl_renderer_resource_create(&args, NULL, 0); +} + +static void virgl_cmd_resource_unref(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_unref unref; + struct iovec *res_iovs = NULL; + int num_iovs = 0; + + VIRTIO_GPU_FILL_CMD(unref); + trace_virtio_gpu_cmd_res_unref(unref.resource_id); + + virgl_renderer_resource_detach_iov(unref.resource_id, + &res_iovs, + &num_iovs); + if (res_iovs != NULL && num_iovs != 0) { + virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); + } + virgl_renderer_resource_unref(unref.resource_id); +} + +static void virgl_cmd_context_create(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_ctx_create cc; + + VIRTIO_GPU_FILL_CMD(cc); + trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id, + cc.debug_name); + + virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, + cc.debug_name); +} + +static void virgl_cmd_context_destroy(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_ctx_destroy cd; + + VIRTIO_GPU_FILL_CMD(cd); + trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id); + + virgl_renderer_context_destroy(cd.hdr.ctx_id); +} + +static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y, + int width, int height) +{ + if (!g->parent_obj.scanout[idx].con) { + return; + } + + dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height); +} + +static void virgl_cmd_resource_flush(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_flush rf; + int i; + + VIRTIO_GPU_FILL_CMD(rf); + trace_virtio_gpu_cmd_res_flush(rf.resource_id, + rf.r.width, rf.r.height, rf.r.x, rf.r.y); + + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + if (g->parent_obj.scanout[i].resource_id != rf.resource_id) { + continue; + } + virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height); + } +} + +static void virgl_cmd_set_scanout(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_set_scanout ss; + struct virgl_renderer_resource_info info; + int ret; + + VIRTIO_GPU_FILL_CMD(ss); + trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, + ss.r.width, ss.r.height, ss.r.x, ss.r.y); + + if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", + __func__, ss.scanout_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; + return; + } + g->parent_obj.enable = 1; + + memset(&info, 0, sizeof(info)); + + if (ss.resource_id && ss.r.width && ss.r.height) { + ret = virgl_renderer_resource_get_info(ss.resource_id, &info); + if (ret == -1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: illegal resource specified %d\n", + __func__, ss.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con, + ss.r.width, ss.r.height); + virgl_renderer_force_ctx_0(); + dpy_gl_scanout_texture( + g->parent_obj.scanout[ss.scanout_id].con, info.tex_id, + info.flags & 1 /* FIXME: Y_0_TOP */, + info.width, info.height, + ss.r.x, ss.r.y, ss.r.width, ss.r.height); + } else { + dpy_gfx_replace_surface( + g->parent_obj.scanout[ss.scanout_id].con, NULL); + dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con); + } + g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id; +} + +static void virgl_cmd_submit_3d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_cmd_submit cs; + void *buf; + size_t s; + + VIRTIO_GPU_FILL_CMD(cs); + trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size); + + buf = g_malloc(cs.size); + s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, + sizeof(cs), buf, cs.size); + if (s != cs.size) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)", + __func__, s, cs.size); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + goto out; + } + + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { + g->stats.req_3d++; + g->stats.bytes_3d += cs.size; + } + + virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); + +out: + g_free(buf); +} + +static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_transfer_to_host_2d t2d; + struct virtio_gpu_box box; + + VIRTIO_GPU_FILL_CMD(t2d); + trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); + + box.x = t2d.r.x; + box.y = t2d.r.y; + box.z = 0; + box.w = t2d.r.width; + box.h = t2d.r.height; + box.d = 1; + + virgl_renderer_transfer_write_iov(t2d.resource_id, + 0, + 0, + 0, + 0, + (struct virgl_box *)&box, + t2d.offset, NULL, 0); +} + +static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_transfer_host_3d t3d; + + VIRTIO_GPU_FILL_CMD(t3d); + trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id); + + virgl_renderer_transfer_write_iov(t3d.resource_id, + t3d.hdr.ctx_id, + t3d.level, + t3d.stride, + t3d.layer_stride, + (struct virgl_box *)&t3d.box, + t3d.offset, NULL, 0); +} + +static void +virgl_cmd_transfer_from_host_3d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_transfer_host_3d tf3d; + + VIRTIO_GPU_FILL_CMD(tf3d); + trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id); + + virgl_renderer_transfer_read_iov(tf3d.resource_id, + tf3d.hdr.ctx_id, + tf3d.level, + tf3d.stride, + tf3d.layer_stride, + (struct virgl_box *)&tf3d.box, + tf3d.offset, NULL, 0); +} + + +static void virgl_resource_attach_backing(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_attach_backing att_rb; + struct iovec *res_iovs; + uint32_t res_niov; + int ret; + + VIRTIO_GPU_FILL_CMD(att_rb); + trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id); + + ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb), + cmd, NULL, &res_iovs, &res_niov); + if (ret != 0) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + ret = virgl_renderer_resource_attach_iov(att_rb.resource_id, + res_iovs, res_niov); + + if (ret != 0) + virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov); +} + +static void virgl_resource_detach_backing(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_detach_backing detach_rb; + struct iovec *res_iovs = NULL; + int num_iovs = 0; + + VIRTIO_GPU_FILL_CMD(detach_rb); + trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id); + + virgl_renderer_resource_detach_iov(detach_rb.resource_id, + &res_iovs, + &num_iovs); + if (res_iovs == NULL || num_iovs == 0) { + return; + } + virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); +} + + +static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_ctx_resource att_res; + + VIRTIO_GPU_FILL_CMD(att_res); + trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id, + att_res.resource_id); + + virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); +} + +static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_ctx_resource det_res; + + VIRTIO_GPU_FILL_CMD(det_res); + trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id, + det_res.resource_id); + + virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); +} + +static void virgl_cmd_get_capset_info(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_get_capset_info info; + struct virtio_gpu_resp_capset_info resp; + + VIRTIO_GPU_FILL_CMD(info); + + memset(&resp, 0, sizeof(resp)); + if (info.capset_index == 0) { + resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; + virgl_renderer_get_cap_set(resp.capset_id, + &resp.capset_max_version, + &resp.capset_max_size); + } else if (info.capset_index == 1) { + resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; + virgl_renderer_get_cap_set(resp.capset_id, + &resp.capset_max_version, + &resp.capset_max_size); + } else { + resp.capset_max_version = 0; + resp.capset_max_size = 0; + } + resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; + virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); +} + +static void virgl_cmd_get_capset(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_get_capset gc; + struct virtio_gpu_resp_capset *resp; + uint32_t max_ver, max_size; + VIRTIO_GPU_FILL_CMD(gc); + + virgl_renderer_get_cap_set(gc.capset_id, &max_ver, + &max_size); + if (!max_size) { + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + resp = g_malloc0(sizeof(*resp) + max_size); + resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; + virgl_renderer_fill_caps(gc.capset_id, + gc.capset_version, + (void *)resp->capset_data); + virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); + g_free(resp); +} + +void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); + + virgl_renderer_force_ctx_0(); + switch (cmd->cmd_hdr.type) { + case VIRTIO_GPU_CMD_CTX_CREATE: + virgl_cmd_context_create(g, cmd); + break; + case VIRTIO_GPU_CMD_CTX_DESTROY: + virgl_cmd_context_destroy(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: + virgl_cmd_create_resource_2d(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: + virgl_cmd_create_resource_3d(g, cmd); + break; + case VIRTIO_GPU_CMD_SUBMIT_3D: + virgl_cmd_submit_3d(g, cmd); + break; + case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: + virgl_cmd_transfer_to_host_2d(g, cmd); + break; + case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: + virgl_cmd_transfer_to_host_3d(g, cmd); + break; + case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: + virgl_cmd_transfer_from_host_3d(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: + virgl_resource_attach_backing(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: + virgl_resource_detach_backing(g, cmd); + break; + case VIRTIO_GPU_CMD_SET_SCANOUT: + virgl_cmd_set_scanout(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_FLUSH: + virgl_cmd_resource_flush(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_UNREF: + virgl_cmd_resource_unref(g, cmd); + break; + case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: + /* TODO add security */ + virgl_cmd_ctx_attach_resource(g, cmd); + break; + case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: + /* TODO add security */ + virgl_cmd_ctx_detach_resource(g, cmd); + break; + case VIRTIO_GPU_CMD_GET_CAPSET_INFO: + virgl_cmd_get_capset_info(g, cmd); + break; + case VIRTIO_GPU_CMD_GET_CAPSET: + virgl_cmd_get_capset(g, cmd); + break; + case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: + virtio_gpu_get_display_info(g, cmd); + break; + case VIRTIO_GPU_CMD_GET_EDID: + virtio_gpu_get_edid(g, cmd); + break; + default: + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + break; + } + + if (cmd->finished) { + return; + } + if (cmd->error) { + fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__, + cmd->cmd_hdr.type, cmd->error); + virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error); + return; + } + if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { + virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); + return; + } + + trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); + virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); +} + +static void virgl_write_fence(void *opaque, uint32_t fence) +{ + VirtIOGPU *g = opaque; + struct virtio_gpu_ctrl_command *cmd, *tmp; + + QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { + /* + * the guest can end up emitting fences out of order + * so we should check all fenced cmds not just the first one. + */ + if (cmd->cmd_hdr.fence_id > fence) { + continue; + } + trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); + virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); + QTAILQ_REMOVE(&g->fenceq, cmd, next); + g_free(cmd); + g->inflight--; + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { + fprintf(stderr, "inflight: %3d (-)\r", g->inflight); + } + } +} + +static virgl_renderer_gl_context +virgl_create_context(void *opaque, int scanout_idx, + struct virgl_renderer_gl_ctx_param *params) +{ + VirtIOGPU *g = opaque; + QEMUGLContext ctx; + QEMUGLParams qparams; + + qparams.major_ver = params->major_ver; + qparams.minor_ver = params->minor_ver; + + ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams); + return (virgl_renderer_gl_context)ctx; +} + +static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx) +{ + VirtIOGPU *g = opaque; + QEMUGLContext qctx = (QEMUGLContext)ctx; + + dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx); +} + +static int virgl_make_context_current(void *opaque, int scanout_idx, + virgl_renderer_gl_context ctx) +{ + VirtIOGPU *g = opaque; + QEMUGLContext qctx = (QEMUGLContext)ctx; + + return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con, + qctx); +} + +static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = { + .version = 1, + .write_fence = virgl_write_fence, + .create_gl_context = virgl_create_context, + .destroy_gl_context = virgl_destroy_context, + .make_current = virgl_make_context_current, +}; + +static void virtio_gpu_print_stats(void *opaque) +{ + VirtIOGPU *g = opaque; + + if (g->stats.requests) { + fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n", + g->stats.requests, + g->stats.max_inflight, + g->stats.req_3d, + g->stats.bytes_3d); + g->stats.requests = 0; + g->stats.max_inflight = 0; + g->stats.req_3d = 0; + g->stats.bytes_3d = 0; + } else { + fprintf(stderr, "stats: idle\r"); + } + timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); +} + +static void virtio_gpu_fence_poll(void *opaque) +{ + VirtIOGPU *g = opaque; + + virgl_renderer_poll(); + virtio_gpu_process_cmdq(g); + if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) { + timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10); + } +} + +void virtio_gpu_virgl_fence_poll(VirtIOGPU *g) +{ + virtio_gpu_fence_poll(g); +} + +void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g) +{ + int i; + + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL); + dpy_gl_scanout_disable(g->parent_obj.scanout[i].con); + } +} + +void virtio_gpu_virgl_reset(VirtIOGPU *g) +{ + virgl_renderer_reset(); +} + +int virtio_gpu_virgl_init(VirtIOGPU *g) +{ + int ret; + + ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs); + if (ret != 0) { + return ret; + } + + g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL, + virtio_gpu_fence_poll, g); + + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { + g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL, + virtio_gpu_print_stats, g); + timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); + } + return 0; +} + +int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g) +{ + uint32_t capset2_max_ver, capset2_max_size; + virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, + &capset2_max_ver, + &capset2_max_size); + + return capset2_max_ver ? 2 : 1; +} diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c new file mode 100644 index 000000000..d78b9700c --- /dev/null +++ b/hw/display/virtio-gpu.c @@ -0,0 +1,1459 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie + * Gerd Hoffmann + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/iov.h" +#include "ui/console.h" +#include "trace.h" +#include "sysemu/dma.h" +#include "sysemu/sysemu.h" +#include "hw/virtio/virtio.h" +#include "migration/qemu-file-types.h" +#include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-gpu-bswap.h" +#include "hw/virtio/virtio-gpu-pixman.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/display/edid.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "qemu/error-report.h" + +#define VIRTIO_GPU_VM_VERSION 1 + +static struct virtio_gpu_simple_resource* +virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); +static struct virtio_gpu_simple_resource * +virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, + bool require_backing, + const char *caller, uint32_t *error); + +static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, + struct virtio_gpu_simple_resource *res); + +void virtio_gpu_update_cursor_data(VirtIOGPU *g, + struct virtio_gpu_scanout *s, + uint32_t resource_id) +{ + struct virtio_gpu_simple_resource *res; + uint32_t pixels; + void *data; + + res = virtio_gpu_find_check_resource(g, resource_id, false, + __func__, NULL); + if (!res) { + return; + } + + if (res->blob_size) { + if (res->blob_size < (s->current_cursor->width * + s->current_cursor->height * 4)) { + return; + } + data = res->blob; + } else { + if (pixman_image_get_width(res->image) != s->current_cursor->width || + pixman_image_get_height(res->image) != s->current_cursor->height) { + return; + } + data = pixman_image_get_data(res->image); + } + + pixels = s->current_cursor->width * s->current_cursor->height; + memcpy(s->current_cursor->data, data, + pixels * sizeof(uint32_t)); +} + +static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) +{ + struct virtio_gpu_scanout *s; + VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); + bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; + + if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { + return; + } + s = &g->parent_obj.scanout[cursor->pos.scanout_id]; + + trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, + cursor->pos.x, + cursor->pos.y, + move ? "move" : "update", + cursor->resource_id); + + if (!move) { + if (!s->current_cursor) { + s->current_cursor = cursor_alloc(64, 64); + } + + s->current_cursor->hot_x = cursor->hot_x; + s->current_cursor->hot_y = cursor->hot_y; + + if (cursor->resource_id > 0) { + vgc->update_cursor_data(g, s, cursor->resource_id); + } + dpy_cursor_define(s->con, s->current_cursor); + + s->cursor = *cursor; + } else { + s->cursor.pos.x = cursor->pos.x; + s->cursor.pos.y = cursor->pos.y; + } + dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, + cursor->resource_id ? 1 : 0); +} + +static struct virtio_gpu_simple_resource * +virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) +{ + struct virtio_gpu_simple_resource *res; + + QTAILQ_FOREACH(res, &g->reslist, next) { + if (res->resource_id == resource_id) { + return res; + } + } + return NULL; +} + +static struct virtio_gpu_simple_resource * +virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, + bool require_backing, + const char *caller, uint32_t *error) +{ + struct virtio_gpu_simple_resource *res; + + res = virtio_gpu_find_resource(g, resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", + caller, resource_id); + if (error) { + *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + } + return NULL; + } + + if (require_backing) { + if (!res->iov || (!res->image && !res->blob)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", + caller, resource_id); + if (error) { + *error = VIRTIO_GPU_RESP_ERR_UNSPEC; + } + return NULL; + } + } + + return res; +} + +void virtio_gpu_ctrl_response(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd, + struct virtio_gpu_ctrl_hdr *resp, + size_t resp_len) +{ + size_t s; + + if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { + resp->flags |= VIRTIO_GPU_FLAG_FENCE; + resp->fence_id = cmd->cmd_hdr.fence_id; + resp->ctx_id = cmd->cmd_hdr.ctx_id; + } + virtio_gpu_ctrl_hdr_bswap(resp); + s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); + if (s != resp_len) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: response size incorrect %zu vs %zu\n", + __func__, s, resp_len); + } + virtqueue_push(cmd->vq, &cmd->elem, s); + virtio_notify(VIRTIO_DEVICE(g), cmd->vq); + cmd->finished = true; +} + +void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd, + enum virtio_gpu_ctrl_type type) +{ + struct virtio_gpu_ctrl_hdr resp; + + memset(&resp, 0, sizeof(resp)); + resp.type = type; + virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); +} + +void virtio_gpu_get_display_info(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resp_display_info display_info; + + trace_virtio_gpu_cmd_get_display_info(); + memset(&display_info, 0, sizeof(display_info)); + display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; + virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); + virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, + sizeof(display_info)); +} + +static void +virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, + struct virtio_gpu_resp_edid *edid) +{ + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + qemu_edid_info info = { + .width_mm = b->req_state[scanout].width_mm, + .height_mm = b->req_state[scanout].height_mm, + .prefx = b->req_state[scanout].width, + .prefy = b->req_state[scanout].height, + }; + + edid->size = cpu_to_le32(sizeof(edid->edid)); + qemu_edid_generate(edid->edid, sizeof(edid->edid), &info); +} + +void virtio_gpu_get_edid(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resp_edid edid; + struct virtio_gpu_cmd_get_edid get_edid; + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + + VIRTIO_GPU_FILL_CMD(get_edid); + virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); + + if (get_edid.scanout >= b->conf.max_outputs) { + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + trace_virtio_gpu_cmd_get_edid(get_edid.scanout); + memset(&edid, 0, sizeof(edid)); + edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; + virtio_gpu_generate_edid(g, get_edid.scanout, &edid); + virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); +} + +static uint32_t calc_image_hostmem(pixman_format_code_t pformat, + uint32_t width, uint32_t height) +{ + /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. + * pixman_image_create_bits will fail in case it overflow. + */ + + int bpp = PIXMAN_FORMAT_BPP(pformat); + int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); + return height * stride; +} + +static void virtio_gpu_resource_create_2d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + pixman_format_code_t pformat; + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_create_2d c2d; + + VIRTIO_GPU_FILL_CMD(c2d); + virtio_gpu_bswap_32(&c2d, sizeof(c2d)); + trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, + c2d.width, c2d.height); + + if (c2d.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = virtio_gpu_find_resource(g, c2d.resource_id); + if (res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", + __func__, c2d.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = g_new0(struct virtio_gpu_simple_resource, 1); + + res->width = c2d.width; + res->height = c2d.height; + res->format = c2d.format; + res->resource_id = c2d.resource_id; + + pformat = virtio_gpu_get_pixman_format(c2d.format); + if (!pformat) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: host couldn't handle guest format %d\n", + __func__, c2d.format); + g_free(res); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); + if (res->hostmem + g->hostmem < g->conf_max_hostmem) { + res->image = pixman_image_create_bits(pformat, + c2d.width, + c2d.height, + NULL, 0); + } + + if (!res->image) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: resource creation failed %d %d %d\n", + __func__, c2d.resource_id, c2d.width, c2d.height); + g_free(res); + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; + return; + } + + QTAILQ_INSERT_HEAD(&g->reslist, res, next); + g->hostmem += res->hostmem; +} + +static void virtio_gpu_resource_create_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_create_blob cblob; + int ret; + + VIRTIO_GPU_FILL_CMD(cblob); + virtio_gpu_create_blob_bswap(&cblob); + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); + + if (cblob.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && + cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + if (virtio_gpu_find_resource(g, cblob.resource_id)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", + __func__, cblob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = g_new0(struct virtio_gpu_simple_resource, 1); + res->resource_id = cblob.resource_id; + res->blob_size = cblob.size; + + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), + cmd, &res->addrs, &res->iov, + &res->iov_cnt); + if (ret != 0) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + g_free(res); + return; + } + + virtio_gpu_init_udmabuf(res); + QTAILQ_INSERT_HEAD(&g->reslist, res, next); +} + +static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) +{ + struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; + struct virtio_gpu_simple_resource *res; + + if (scanout->resource_id == 0) { + return; + } + + res = virtio_gpu_find_resource(g, scanout->resource_id); + if (res) { + res->scanout_bitmask &= ~(1 << scanout_id); + } + + dpy_gfx_replace_surface(scanout->con, NULL); + scanout->resource_id = 0; + scanout->ds = NULL; + scanout->width = 0; + scanout->height = 0; +} + +static void virtio_gpu_resource_destroy(VirtIOGPU *g, + struct virtio_gpu_simple_resource *res) +{ + int i; + + if (res->scanout_bitmask) { + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + if (res->scanout_bitmask & (1 << i)) { + virtio_gpu_disable_scanout(g, i); + } + } + } + + qemu_pixman_image_unref(res->image); + virtio_gpu_cleanup_mapping(g, res); + QTAILQ_REMOVE(&g->reslist, res, next); + g->hostmem -= res->hostmem; + g_free(res); +} + +static void virtio_gpu_resource_unref(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_unref unref; + + VIRTIO_GPU_FILL_CMD(unref); + virtio_gpu_bswap_32(&unref, sizeof(unref)); + trace_virtio_gpu_cmd_res_unref(unref.resource_id); + + res = virtio_gpu_find_resource(g, unref.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", + __func__, unref.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + virtio_gpu_resource_destroy(g, res); +} + +static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + int h; + uint32_t src_offset, dst_offset, stride; + int bpp; + pixman_format_code_t format; + struct virtio_gpu_transfer_to_host_2d t2d; + + VIRTIO_GPU_FILL_CMD(t2d); + virtio_gpu_t2d_bswap(&t2d); + trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); + + res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, + __func__, &cmd->error); + if (!res || res->blob) { + return; + } + + if (t2d.r.x > res->width || + t2d.r.y > res->height || + t2d.r.width > res->width || + t2d.r.height > res->height || + t2d.r.x + t2d.r.width > res->width || + t2d.r.y + t2d.r.height > res->height) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" + " bounds for resource %d: %d %d %d %d vs %d %d\n", + __func__, t2d.resource_id, t2d.r.x, t2d.r.y, + t2d.r.width, t2d.r.height, res->width, res->height); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + format = pixman_image_get_format(res->image); + bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); + stride = pixman_image_get_stride(res->image); + + if (t2d.offset || t2d.r.x || t2d.r.y || + t2d.r.width != pixman_image_get_width(res->image)) { + void *img_data = pixman_image_get_data(res->image); + for (h = 0; h < t2d.r.height; h++) { + src_offset = t2d.offset + stride * h; + dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); + + iov_to_buf(res->iov, res->iov_cnt, src_offset, + (uint8_t *)img_data + + dst_offset, t2d.r.width * bpp); + } + } else { + iov_to_buf(res->iov, res->iov_cnt, 0, + pixman_image_get_data(res->image), + pixman_image_get_stride(res->image) + * pixman_image_get_height(res->image)); + } +} + +static void virtio_gpu_resource_flush(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_flush rf; + struct virtio_gpu_scanout *scanout; + pixman_region16_t flush_region; + int i; + + VIRTIO_GPU_FILL_CMD(rf); + virtio_gpu_bswap_32(&rf, sizeof(rf)); + trace_virtio_gpu_cmd_res_flush(rf.resource_id, + rf.r.width, rf.r.height, rf.r.x, rf.r.y); + + res = virtio_gpu_find_check_resource(g, rf.resource_id, false, + __func__, &cmd->error); + if (!res) { + return; + } + + if (res->blob) { + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + scanout = &g->parent_obj.scanout[i]; + if (scanout->resource_id == res->resource_id && + console_has_gl(scanout->con)) { + dpy_gl_update(scanout->con, 0, 0, scanout->width, + scanout->height); + } + } + return; + } + + if (!res->blob && + (rf.r.x > res->width || + rf.r.y > res->height || + rf.r.width > res->width || + rf.r.height > res->height || + rf.r.x + rf.r.width > res->width || + rf.r.y + rf.r.height > res->height)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" + " bounds for resource %d: %d %d %d %d vs %d %d\n", + __func__, rf.resource_id, rf.r.x, rf.r.y, + rf.r.width, rf.r.height, res->width, res->height); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + pixman_region_init_rect(&flush_region, + rf.r.x, rf.r.y, rf.r.width, rf.r.height); + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + pixman_region16_t region, finalregion; + pixman_box16_t *extents; + + if (!(res->scanout_bitmask & (1 << i))) { + continue; + } + scanout = &g->parent_obj.scanout[i]; + + pixman_region_init(&finalregion); + pixman_region_init_rect(®ion, scanout->x, scanout->y, + scanout->width, scanout->height); + + pixman_region_intersect(&finalregion, &flush_region, ®ion); + pixman_region_translate(&finalregion, -scanout->x, -scanout->y); + extents = pixman_region_extents(&finalregion); + /* work out the area we need to update for each console */ + dpy_gfx_update(g->parent_obj.scanout[i].con, + extents->x1, extents->y1, + extents->x2 - extents->x1, + extents->y2 - extents->y1); + + pixman_region_fini(®ion); + pixman_region_fini(&finalregion); + } + pixman_region_fini(&flush_region); +} + +static void virtio_unref_resource(pixman_image_t *image, void *data) +{ + pixman_image_unref(data); +} + +static void virtio_gpu_update_scanout(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_rect *r) +{ + struct virtio_gpu_simple_resource *ores; + struct virtio_gpu_scanout *scanout; + + scanout = &g->parent_obj.scanout[scanout_id]; + ores = virtio_gpu_find_resource(g, scanout->resource_id); + if (ores) { + ores->scanout_bitmask &= ~(1 << scanout_id); + } + + res->scanout_bitmask |= (1 << scanout_id); + scanout->resource_id = res->resource_id; + scanout->x = r->x; + scanout->y = r->y; + scanout->width = r->width; + scanout->height = r->height; +} + +static void virtio_gpu_do_set_scanout(VirtIOGPU *g, + uint32_t scanout_id, + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_simple_resource *res, + struct virtio_gpu_rect *r, + uint32_t *error) +{ + struct virtio_gpu_scanout *scanout; + uint8_t *data; + + scanout = &g->parent_obj.scanout[scanout_id]; + + if (r->x > fb->width || + r->y > fb->height || + r->width < 16 || + r->height < 16 || + r->width > fb->width || + r->height > fb->height || + r->x + r->width > fb->width || + r->y + r->height > fb->height) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" + " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", + __func__, scanout_id, res->resource_id, + r->x, r->y, r->width, r->height, + fb->width, fb->height); + *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + g->parent_obj.enable = 1; + + if (res->blob) { + if (console_has_gl(scanout->con)) { + if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { + virtio_gpu_update_scanout(g, scanout_id, res, r); + return; + } + } + + data = res->blob; + } else { + data = (uint8_t *)pixman_image_get_data(res->image); + } + + /* create a surface for this scanout */ + if ((res->blob && !console_has_gl(scanout->con)) || + !scanout->ds || + surface_data(scanout->ds) != data + fb->offset || + scanout->width != r->width || + scanout->height != r->height) { + pixman_image_t *rect; + void *ptr = data + fb->offset; + rect = pixman_image_create_bits(fb->format, r->width, r->height, + ptr, fb->stride); + + if (res->image) { + pixman_image_ref(res->image); + pixman_image_set_destroy_function(rect, virtio_unref_resource, + res->image); + } + + /* realloc the surface ptr */ + scanout->ds = qemu_create_displaysurface_pixman(rect); + if (!scanout->ds) { + *error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + pixman_image_unref(rect); + dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, + scanout->ds); + } + + virtio_gpu_update_scanout(g, scanout_id, res, r); +} + +static void virtio_gpu_set_scanout(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_framebuffer fb = { 0 }; + struct virtio_gpu_set_scanout ss; + + VIRTIO_GPU_FILL_CMD(ss); + virtio_gpu_bswap_32(&ss, sizeof(ss)); + trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, + ss.r.width, ss.r.height, ss.r.x, ss.r.y); + + if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", + __func__, ss.scanout_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; + return; + } + + if (ss.resource_id == 0) { + virtio_gpu_disable_scanout(g, ss.scanout_id); + return; + } + + res = virtio_gpu_find_check_resource(g, ss.resource_id, true, + __func__, &cmd->error); + if (!res) { + return; + } + + fb.format = pixman_image_get_format(res->image); + fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); + fb.width = pixman_image_get_width(res->image); + fb.height = pixman_image_get_height(res->image); + fb.stride = pixman_image_get_stride(res->image); + fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; + + virtio_gpu_do_set_scanout(g, ss.scanout_id, + &fb, res, &ss.r, &cmd->error); +} + +static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_framebuffer fb = { 0 }; + struct virtio_gpu_set_scanout_blob ss; + uint64_t fbend; + + VIRTIO_GPU_FILL_CMD(ss); + virtio_gpu_scanout_blob_bswap(&ss); + trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, + ss.r.width, ss.r.height, ss.r.x, + ss.r.y); + + if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", + __func__, ss.scanout_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; + return; + } + + if (ss.resource_id == 0) { + virtio_gpu_disable_scanout(g, ss.scanout_id); + return; + } + + res = virtio_gpu_find_check_resource(g, ss.resource_id, true, + __func__, &cmd->error); + if (!res) { + return; + } + + fb.format = virtio_gpu_get_pixman_format(ss.format); + if (!fb.format) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: host couldn't handle guest format %d\n", + __func__, ss.format); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); + fb.width = ss.width; + fb.height = ss.height; + fb.stride = ss.strides[0]; + fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; + + fbend = fb.offset; + fbend += fb.stride * (ss.r.height - 1); + fbend += fb.bytes_pp * ss.r.width; + if (fbend > res->blob_size) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: fb end out of range\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + virtio_gpu_do_set_scanout(g, ss.scanout_id, + &fb, res, &ss.r, &cmd->error); +} + +int virtio_gpu_create_mapping_iov(VirtIOGPU *g, + uint32_t nr_entries, uint32_t offset, + struct virtio_gpu_ctrl_command *cmd, + uint64_t **addr, struct iovec **iov, + uint32_t *niov) +{ + struct virtio_gpu_mem_entry *ents; + size_t esize, s; + int e, v; + + if (nr_entries > 16384) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: nr_entries is too big (%d > 16384)\n", + __func__, nr_entries); + return -1; + } + + esize = sizeof(*ents) * nr_entries; + ents = g_malloc(esize); + s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, + offset, ents, esize); + if (s != esize) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: command data size incorrect %zu vs %zu\n", + __func__, s, esize); + g_free(ents); + return -1; + } + + *iov = NULL; + if (addr) { + *addr = NULL; + } + for (e = 0, v = 0; e < nr_entries; e++) { + uint64_t a = le64_to_cpu(ents[e].addr); + uint32_t l = le32_to_cpu(ents[e].length); + hwaddr len; + void *map; + + do { + len = l; + map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, + a, &len, DMA_DIRECTION_TO_DEVICE); + if (!map) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" + " element %d\n", __func__, e); + virtio_gpu_cleanup_mapping_iov(g, *iov, v); + g_free(ents); + *iov = NULL; + if (addr) { + g_free(*addr); + *addr = NULL; + } + return -1; + } + + if (!(v % 16)) { + *iov = g_realloc(*iov, sizeof(struct iovec) * (v + 16)); + if (addr) { + *addr = g_realloc(*addr, sizeof(uint64_t) * (v + 16)); + } + } + (*iov)[v].iov_base = map; + (*iov)[v].iov_len = len; + if (addr) { + (*addr)[v] = a; + } + + a += len; + l -= len; + v += 1; + } while (l > 0); + } + *niov = v; + + g_free(ents); + return 0; +} + +void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, + struct iovec *iov, uint32_t count) +{ + int i; + + for (i = 0; i < count; i++) { + dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, + iov[i].iov_base, iov[i].iov_len, + DMA_DIRECTION_TO_DEVICE, + iov[i].iov_len); + } + g_free(iov); +} + +static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, + struct virtio_gpu_simple_resource *res) +{ + virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); + res->iov = NULL; + res->iov_cnt = 0; + g_free(res->addrs); + res->addrs = NULL; + + if (res->blob) { + virtio_gpu_fini_udmabuf(res); + } +} + +static void +virtio_gpu_resource_attach_backing(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_attach_backing ab; + int ret; + + VIRTIO_GPU_FILL_CMD(ab); + virtio_gpu_bswap_32(&ab, sizeof(ab)); + trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); + + res = virtio_gpu_find_resource(g, ab.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", + __func__, ab.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + if (res->iov) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, + &res->addrs, &res->iov, &res->iov_cnt); + if (ret != 0) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } +} + +static void +virtio_gpu_resource_detach_backing(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_detach_backing detach; + + VIRTIO_GPU_FILL_CMD(detach); + virtio_gpu_bswap_32(&detach, sizeof(detach)); + trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); + + res = virtio_gpu_find_check_resource(g, detach.resource_id, true, + __func__, &cmd->error); + if (!res) { + return; + } + virtio_gpu_cleanup_mapping(g, res); +} + +void virtio_gpu_simple_process_cmd(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); + virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); + + switch (cmd->cmd_hdr.type) { + case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: + virtio_gpu_get_display_info(g, cmd); + break; + case VIRTIO_GPU_CMD_GET_EDID: + virtio_gpu_get_edid(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: + virtio_gpu_resource_create_2d(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: + if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + break; + } + virtio_gpu_resource_create_blob(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_UNREF: + virtio_gpu_resource_unref(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_FLUSH: + virtio_gpu_resource_flush(g, cmd); + break; + case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: + virtio_gpu_transfer_to_host_2d(g, cmd); + break; + case VIRTIO_GPU_CMD_SET_SCANOUT: + virtio_gpu_set_scanout(g, cmd); + break; + case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: + if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + break; + } + virtio_gpu_set_scanout_blob(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: + virtio_gpu_resource_attach_backing(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: + virtio_gpu_resource_detach_backing(g, cmd); + break; + default: + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + break; + } + if (!cmd->finished) { + if (!g->parent_obj.renderer_blocked) { + virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : + VIRTIO_GPU_RESP_OK_NODATA); + } + } +} + +static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + qemu_bh_schedule(g->ctrl_bh); +} + +static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + qemu_bh_schedule(g->cursor_bh); +} + +void virtio_gpu_process_cmdq(VirtIOGPU *g) +{ + struct virtio_gpu_ctrl_command *cmd; + VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); + + if (g->processing_cmdq) { + return; + } + g->processing_cmdq = true; + while (!QTAILQ_EMPTY(&g->cmdq)) { + cmd = QTAILQ_FIRST(&g->cmdq); + + if (g->parent_obj.renderer_blocked) { + break; + } + + /* process command */ + vgc->process_cmd(g, cmd); + + QTAILQ_REMOVE(&g->cmdq, cmd, next); + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { + g->stats.requests++; + } + + if (!cmd->finished) { + QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); + g->inflight++; + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { + if (g->stats.max_inflight < g->inflight) { + g->stats.max_inflight = g->inflight; + } + fprintf(stderr, "inflight: %3d (+)\r", g->inflight); + } + } else { + g_free(cmd); + } + } + g->processing_cmdq = false; +} + +static void virtio_gpu_process_fenceq(VirtIOGPU *g) +{ + struct virtio_gpu_ctrl_command *cmd, *tmp; + + QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { + trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); + virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); + QTAILQ_REMOVE(&g->fenceq, cmd, next); + g_free(cmd); + g->inflight--; + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { + fprintf(stderr, "inflight: %3d (-)\r", g->inflight); + } + } +} + +static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) +{ + VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); + + virtio_gpu_process_fenceq(g); + virtio_gpu_process_cmdq(g); +} + +static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + struct virtio_gpu_ctrl_command *cmd; + + if (!virtio_queue_ready(vq)) { + return; + } + + cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); + while (cmd) { + cmd->vq = vq; + cmd->error = 0; + cmd->finished = false; + QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); + cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); + } + + virtio_gpu_process_cmdq(g); +} + +static void virtio_gpu_ctrl_bh(void *opaque) +{ + VirtIOGPU *g = opaque; + VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); + + vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); +} + +static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + VirtQueueElement *elem; + size_t s; + struct virtio_gpu_update_cursor cursor_info; + + if (!virtio_queue_ready(vq)) { + return; + } + for (;;) { + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + + s = iov_to_buf(elem->out_sg, elem->out_num, 0, + &cursor_info, sizeof(cursor_info)); + if (s != sizeof(cursor_info)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: cursor size incorrect %zu vs %zu\n", + __func__, s, sizeof(cursor_info)); + } else { + virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); + update_cursor(g, &cursor_info); + } + virtqueue_push(vq, elem, 0); + virtio_notify(vdev, vq); + g_free(elem); + } +} + +static void virtio_gpu_cursor_bh(void *opaque) +{ + VirtIOGPU *g = opaque; + virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); +} + +static const VMStateDescription vmstate_virtio_gpu_scanout = { + .name = "virtio-gpu-one-scanout", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), + VMSTATE_UINT32(width, struct virtio_gpu_scanout), + VMSTATE_UINT32(height, struct virtio_gpu_scanout), + VMSTATE_INT32(x, struct virtio_gpu_scanout), + VMSTATE_INT32(y, struct virtio_gpu_scanout), + VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), + VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), + VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), + VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), + VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_virtio_gpu_scanouts = { + .name = "virtio-gpu-scanouts", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), + VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, + struct VirtIOGPU, NULL), + VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, + parent_obj.conf.max_outputs, 1, + vmstate_virtio_gpu_scanout, + struct virtio_gpu_scanout), + VMSTATE_END_OF_LIST() + }, +}; + +static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + VirtIOGPU *g = opaque; + struct virtio_gpu_simple_resource *res; + int i; + + /* in 2d mode we should never find unprocessed commands here */ + assert(QTAILQ_EMPTY(&g->cmdq)); + + QTAILQ_FOREACH(res, &g->reslist, next) { + qemu_put_be32(f, res->resource_id); + qemu_put_be32(f, res->width); + qemu_put_be32(f, res->height); + qemu_put_be32(f, res->format); + qemu_put_be32(f, res->iov_cnt); + for (i = 0; i < res->iov_cnt; i++) { + qemu_put_be64(f, res->addrs[i]); + qemu_put_be32(f, res->iov[i].iov_len); + } + qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), + pixman_image_get_stride(res->image) * res->height); + } + qemu_put_be32(f, 0); /* end of list */ + + return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); +} + +static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field) +{ + VirtIOGPU *g = opaque; + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_scanout *scanout; + uint32_t resource_id, pformat; + int i; + + g->hostmem = 0; + + resource_id = qemu_get_be32(f); + while (resource_id != 0) { + res = virtio_gpu_find_resource(g, resource_id); + if (res) { + return -EINVAL; + } + + res = g_new0(struct virtio_gpu_simple_resource, 1); + res->resource_id = resource_id; + res->width = qemu_get_be32(f); + res->height = qemu_get_be32(f); + res->format = qemu_get_be32(f); + res->iov_cnt = qemu_get_be32(f); + + /* allocate */ + pformat = virtio_gpu_get_pixman_format(res->format); + if (!pformat) { + g_free(res); + return -EINVAL; + } + res->image = pixman_image_create_bits(pformat, + res->width, res->height, + NULL, 0); + if (!res->image) { + g_free(res); + return -EINVAL; + } + + res->hostmem = calc_image_hostmem(pformat, res->width, res->height); + + res->addrs = g_new(uint64_t, res->iov_cnt); + res->iov = g_new(struct iovec, res->iov_cnt); + + /* read data */ + for (i = 0; i < res->iov_cnt; i++) { + res->addrs[i] = qemu_get_be64(f); + res->iov[i].iov_len = qemu_get_be32(f); + } + qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), + pixman_image_get_stride(res->image) * res->height); + + /* restore mapping */ + for (i = 0; i < res->iov_cnt; i++) { + hwaddr len = res->iov[i].iov_len; + res->iov[i].iov_base = + dma_memory_map(VIRTIO_DEVICE(g)->dma_as, + res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE); + + if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { + /* Clean up the half-a-mapping we just created... */ + if (res->iov[i].iov_base) { + dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, + res->iov[i].iov_base, + len, + DMA_DIRECTION_TO_DEVICE, + 0); + } + /* ...and the mappings for previous loop iterations */ + res->iov_cnt = i; + virtio_gpu_cleanup_mapping(g, res); + pixman_image_unref(res->image); + g_free(res); + return -EINVAL; + } + } + + QTAILQ_INSERT_HEAD(&g->reslist, res, next); + g->hostmem += res->hostmem; + + resource_id = qemu_get_be32(f); + } + + /* load & apply scanout state */ + vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + scanout = &g->parent_obj.scanout[i]; + if (!scanout->resource_id) { + continue; + } + res = virtio_gpu_find_resource(g, scanout->resource_id); + if (!res) { + return -EINVAL; + } + scanout->ds = qemu_create_displaysurface_pixman(res->image); + if (!scanout->ds) { + return -EINVAL; + } + + dpy_gfx_replace_surface(scanout->con, scanout->ds); + dpy_gfx_update_full(scanout->con); + if (scanout->cursor.resource_id) { + update_cursor(g, &scanout->cursor); + } + res->scanout_bitmask |= (1 << i); + } + + return 0; +} + +void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(qdev); + VirtIOGPU *g = VIRTIO_GPU(qdev); + + if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { + if (!virtio_gpu_have_udmabuf()) { + error_setg(errp, "cannot enable blob resources without udmabuf"); + return; + } + + if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { + error_setg(errp, "blobs and virgl are not compatible (yet)"); + return; + } + } + + if (!virtio_gpu_base_device_realize(qdev, + virtio_gpu_handle_ctrl_cb, + virtio_gpu_handle_cursor_cb, + errp)) { + return; + } + + g->ctrl_vq = virtio_get_queue(vdev, 0); + g->cursor_vq = virtio_get_queue(vdev, 1); + g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); + g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); + QTAILQ_INIT(&g->reslist); + QTAILQ_INIT(&g->cmdq); + QTAILQ_INIT(&g->fenceq); +} + +void virtio_gpu_reset(VirtIODevice *vdev) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + struct virtio_gpu_simple_resource *res, *tmp; + struct virtio_gpu_ctrl_command *cmd; + + QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { + virtio_gpu_resource_destroy(g, res); + } + + while (!QTAILQ_EMPTY(&g->cmdq)) { + cmd = QTAILQ_FIRST(&g->cmdq); + QTAILQ_REMOVE(&g->cmdq, cmd, next); + g_free(cmd); + } + + while (!QTAILQ_EMPTY(&g->fenceq)) { + cmd = QTAILQ_FIRST(&g->fenceq); + QTAILQ_REMOVE(&g->fenceq, cmd, next); + g->inflight--; + g_free(cmd); + } + + virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); +} + +static void +virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); + + memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); +} + +static void +virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) +{ + VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); + const struct virtio_gpu_config *vgconfig = + (const struct virtio_gpu_config *)config; + + if (vgconfig->events_clear) { + g->virtio_config.events_read &= ~vgconfig->events_clear; + } +} + +/* + * For historical reasons virtio_gpu does not adhere to virtio migration + * scheme as described in doc/virtio-migration.txt, in a sense that no + * save/load callback are provided to the core. Instead the device data + * is saved/loaded after the core data. + * + * Because of this we need a special vmsd. + */ +static const VMStateDescription vmstate_virtio_gpu = { + .name = "virtio-gpu", + .minimum_version_id = VIRTIO_GPU_VM_VERSION, + .version_id = VIRTIO_GPU_VM_VERSION, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE /* core */, + { + .name = "virtio-gpu", + .info = &(const VMStateInfo) { + .name = "virtio-gpu", + .get = virtio_gpu_load, + .put = virtio_gpu_save, + }, + .flags = VMS_SINGLE, + } /* device */, + VMSTATE_END_OF_LIST() + }, +}; + +static Property virtio_gpu_properties[] = { + VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), + DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, + 256 * MiB), + DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, + VIRTIO_GPU_FLAG_BLOB_ENABLED, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_gpu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); + VirtIOGPUBaseClass *vgbc = &vgc->parent; + + vgc->handle_ctrl = virtio_gpu_handle_ctrl; + vgc->process_cmd = virtio_gpu_simple_process_cmd; + vgc->update_cursor_data = virtio_gpu_update_cursor_data; + vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; + + vdc->realize = virtio_gpu_device_realize; + vdc->reset = virtio_gpu_reset; + vdc->get_config = virtio_gpu_get_config; + vdc->set_config = virtio_gpu_set_config; + + dc->vmsd = &vmstate_virtio_gpu; + device_class_set_props(dc, virtio_gpu_properties); +} + +static const TypeInfo virtio_gpu_info = { + .name = TYPE_VIRTIO_GPU, + .parent = TYPE_VIRTIO_GPU_BASE, + .instance_size = sizeof(VirtIOGPU), + .class_size = sizeof(VirtIOGPUClass), + .class_init = virtio_gpu_class_init, +}; +module_obj(TYPE_VIRTIO_GPU); + +static void virtio_register_types(void) +{ + type_register_static(&virtio_gpu_info); +} + +type_init(virtio_register_types) diff --git a/hw/display/virtio-vga-gl.c b/hw/display/virtio-vga-gl.c new file mode 100644 index 000000000..f22549097 --- /dev/null +++ b/hw/display/virtio-vga-gl.c @@ -0,0 +1,50 @@ +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-gpu.h" +#include "hw/display/vga.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-vga.h" +#include "qom/object.h" + +#define TYPE_VIRTIO_VGA_GL "virtio-vga-gl" + +typedef struct VirtIOVGAGL VirtIOVGAGL; +DECLARE_INSTANCE_CHECKER(VirtIOVGAGL, VIRTIO_VGA_GL, + TYPE_VIRTIO_VGA_GL) + +struct VirtIOVGAGL { + VirtIOVGABase parent_obj; + + VirtIOGPUGL vdev; +}; + +static void virtio_vga_gl_inst_initfn(Object *obj) +{ + VirtIOVGAGL *dev = VIRTIO_VGA_GL(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_GPU_GL); + VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev); +} + + +static VirtioPCIDeviceTypeInfo virtio_vga_gl_info = { + .generic_name = TYPE_VIRTIO_VGA_GL, + .parent = TYPE_VIRTIO_VGA_BASE, + .instance_size = sizeof(VirtIOVGAGL), + .instance_init = virtio_vga_gl_inst_initfn, +}; +module_obj(TYPE_VIRTIO_VGA_GL); + +static void virtio_vga_register_types(void) +{ + if (have_vga) { + virtio_pci_types_register(&virtio_vga_gl_info); + } +} + +type_init(virtio_vga_register_types) + +module_dep("hw-display-virtio-vga"); diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c new file mode 100644 index 000000000..9e57f61e9 --- /dev/null +++ b/hw/display/virtio-vga.c @@ -0,0 +1,280 @@ +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-gpu.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-vga.h" +#include "qom/object.h" + +static void virtio_vga_base_invalidate_display(void *opaque) +{ + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; + + if (g->enable) { + g->hw_ops->invalidate(g); + } else { + vvga->vga.hw_ops->invalidate(&vvga->vga); + } +} + +static void virtio_vga_base_update_display(void *opaque) +{ + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; + + if (g->enable) { + g->hw_ops->gfx_update(g); + } else { + vvga->vga.hw_ops->gfx_update(&vvga->vga); + } +} + +static void virtio_vga_base_text_update(void *opaque, console_ch_t *chardata) +{ + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; + + if (g->enable) { + if (g->hw_ops->text_update) { + g->hw_ops->text_update(g, chardata); + } + } else { + if (vvga->vga.hw_ops->text_update) { + vvga->vga.hw_ops->text_update(&vvga->vga, chardata); + } + } +} + +static int virtio_vga_base_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) +{ + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; + + if (g->hw_ops->ui_info) { + return g->hw_ops->ui_info(g, idx, info); + } + return -1; +} + +static void virtio_vga_base_gl_block(void *opaque, bool block) +{ + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; + + if (g->hw_ops->gl_block) { + g->hw_ops->gl_block(g, block); + } +} + +static void virtio_vga_base_gl_flushed(void *opaque) +{ + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; + + if (g->hw_ops->gl_flushed) { + g->hw_ops->gl_flushed(g); + } +} + +static int virtio_vga_base_get_flags(void *opaque) +{ + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; + + return g->hw_ops->get_flags(g); +} + +static const GraphicHwOps virtio_vga_base_ops = { + .get_flags = virtio_vga_base_get_flags, + .invalidate = virtio_vga_base_invalidate_display, + .gfx_update = virtio_vga_base_update_display, + .text_update = virtio_vga_base_text_update, + .ui_info = virtio_vga_base_ui_info, + .gl_block = virtio_vga_base_gl_block, + .gl_flushed = virtio_vga_base_gl_flushed, +}; + +static const VMStateDescription vmstate_virtio_vga_base = { + .name = "virtio-vga", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + /* no pci stuff here, saving the virtio device will handle that */ + VMSTATE_STRUCT(vga, VirtIOVGABase, 0, + vmstate_vga_common, VGACommonState), + VMSTATE_END_OF_LIST() + } +}; + +/* VGA device wrapper around PCI device around virtio GPU */ +static void virtio_vga_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOVGABase *vvga = VIRTIO_VGA_BASE(vpci_dev); + VirtIOGPUBase *g = vvga->vgpu; + VGACommonState *vga = &vvga->vga; + uint32_t offset; + int i; + + /* init vga compat bits */ + vga->vram_size_mb = 8; + vga_common_init(vga, OBJECT(vpci_dev)); + vga_init(vga, OBJECT(vpci_dev), pci_address_space(&vpci_dev->pci_dev), + pci_address_space_io(&vpci_dev->pci_dev), true); + pci_register_bar(&vpci_dev->pci_dev, 0, + PCI_BASE_ADDRESS_MEM_PREFETCH, &vga->vram); + + /* + * Configure virtio bar and regions + * + * We use bar #2 for the mmio regions, to be compatible with stdvga. + * virtio regions are moved to the end of bar #2, to make room for + * the stdvga mmio registers at the start of bar #2. + */ + vpci_dev->modern_mem_bar_idx = 2; + vpci_dev->msix_bar_idx = 4; + vpci_dev->modern_io_bar_idx = 5; + + if (!(vpci_dev->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ)) { + /* + * with page-per-vq=off there is no padding space we can use + * for the stdvga registers. Make the common and isr regions + * smaller then. + */ + vpci_dev->common.size /= 2; + vpci_dev->isr.size /= 2; + } + + offset = memory_region_size(&vpci_dev->modern_bar); + offset -= vpci_dev->notify.size; + vpci_dev->notify.offset = offset; + offset -= vpci_dev->device.size; + vpci_dev->device.offset = offset; + offset -= vpci_dev->isr.size; + vpci_dev->isr.offset = offset; + offset -= vpci_dev->common.size; + vpci_dev->common.offset = offset; + + /* init virtio bits */ + virtio_pci_force_virtio_1(vpci_dev); + if (!qdev_realize(DEVICE(g), BUS(&vpci_dev->bus), errp)) { + return; + } + + /* add stdvga mmio regions */ + pci_std_vga_mmio_region_init(vga, OBJECT(vvga), &vpci_dev->modern_bar, + vvga->vga_mrs, true, false); + + vga->con = g->scanout[0].con; + graphic_console_set_hwops(vga->con, &virtio_vga_base_ops, vvga); + + for (i = 0; i < g->conf.max_outputs; i++) { + object_property_set_link(OBJECT(g->scanout[i].con), "device", + OBJECT(vpci_dev), &error_abort); + } +} + +static void virtio_vga_base_reset(DeviceState *dev) +{ + VirtIOVGABaseClass *klass = VIRTIO_VGA_BASE_GET_CLASS(dev); + VirtIOVGABase *vvga = VIRTIO_VGA_BASE(dev); + + /* reset virtio-gpu */ + klass->parent_reset(dev); + + /* reset vga */ + vga_common_reset(&vvga->vga); + vga_dirty_log_start(&vvga->vga); +} + +static bool virtio_vga_get_big_endian_fb(Object *obj, Error **errp) +{ + VirtIOVGABase *d = VIRTIO_VGA_BASE(obj); + + return d->vga.big_endian_fb; +} + +static void virtio_vga_set_big_endian_fb(Object *obj, bool value, Error **errp) +{ + VirtIOVGABase *d = VIRTIO_VGA_BASE(obj); + + d->vga.big_endian_fb = value; +} + +static Property virtio_vga_base_properties[] = { + DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_vga_base_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + VirtIOVGABaseClass *v = VIRTIO_VGA_BASE_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + device_class_set_props(dc, virtio_vga_base_properties); + dc->vmsd = &vmstate_virtio_vga_base; + dc->hotpluggable = false; + device_class_set_parent_reset(dc, virtio_vga_base_reset, + &v->parent_reset); + + k->realize = virtio_vga_base_realize; + pcidev_k->romfile = "vgabios-virtio.bin"; + pcidev_k->class_id = PCI_CLASS_DISPLAY_VGA; + + /* Expose framebuffer byteorder via QOM */ + object_class_property_add_bool(klass, "big-endian-framebuffer", + virtio_vga_get_big_endian_fb, + virtio_vga_set_big_endian_fb); +} + +static TypeInfo virtio_vga_base_info = { + .name = TYPE_VIRTIO_VGA_BASE, + .parent = TYPE_VIRTIO_PCI, + .instance_size = sizeof(VirtIOVGABase), + .class_size = sizeof(VirtIOVGABaseClass), + .class_init = virtio_vga_base_class_init, + .abstract = true, +}; +module_obj(TYPE_VIRTIO_VGA_BASE); + +#define TYPE_VIRTIO_VGA "virtio-vga" + +typedef struct VirtIOVGA VirtIOVGA; +DECLARE_INSTANCE_CHECKER(VirtIOVGA, VIRTIO_VGA, + TYPE_VIRTIO_VGA) + +struct VirtIOVGA { + VirtIOVGABase parent_obj; + + VirtIOGPU vdev; +}; + +static void virtio_vga_inst_initfn(Object *obj) +{ + VirtIOVGA *dev = VIRTIO_VGA(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_GPU); + VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev); +} + + +static VirtioPCIDeviceTypeInfo virtio_vga_info = { + .generic_name = TYPE_VIRTIO_VGA, + .parent = TYPE_VIRTIO_VGA_BASE, + .instance_size = sizeof(VirtIOVGA), + .instance_init = virtio_vga_inst_initfn, +}; +module_obj(TYPE_VIRTIO_VGA); + +static void virtio_vga_register_types(void) +{ + type_register_static(&virtio_vga_base_info); + virtio_pci_types_register(&virtio_vga_info); +} + +type_init(virtio_vga_register_types) diff --git a/hw/display/virtio-vga.h b/hw/display/virtio-vga.h new file mode 100644 index 000000000..977ad5edc --- /dev/null +++ b/hw/display/virtio-vga.h @@ -0,0 +1,29 @@ +#ifndef VIRTIO_VGA_H +#define VIRTIO_VGA_H + +#include "hw/virtio/virtio-gpu-pci.h" +#include "vga_int.h" +#include "qom/object.h" + +/* + * virtio-vga-base: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_VGA_BASE "virtio-vga-base" +OBJECT_DECLARE_TYPE(VirtIOVGABase, VirtIOVGABaseClass, + VIRTIO_VGA_BASE) + +struct VirtIOVGABase { + VirtIOPCIProxy parent_obj; + + VirtIOGPUBase *vgpu; + VGACommonState vga; + MemoryRegion vga_mrs[3]; +}; + +struct VirtIOVGABaseClass { + VirtioPCIClass parent_class; + + DeviceReset parent_reset; +}; + +#endif /* VIRTIO_VGA_H */ diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c new file mode 100644 index 000000000..e2969a6c8 --- /dev/null +++ b/hw/display/vmware_vga.c @@ -0,0 +1,1366 @@ +/* + * QEMU VMware-SVGA "chipset". + * + * Copyright (c) 2007 Andrzej Zaborowski + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "hw/loader.h" +#include "trace.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#undef VERBOSE +#define HW_RECT_ACCEL +#define HW_FILL_ACCEL +#define HW_MOUSE_ACCEL + +#include "vga_int.h" + +/* See http://vmware-svga.sf.net/ for some documentation on VMWare SVGA */ + +struct vmsvga_state_s { + VGACommonState vga; + + int invalidated; + int enable; + int config; + struct { + int id; + int x; + int y; + int on; + } cursor; + + int index; + int scratch_size; + uint32_t *scratch; + int new_width; + int new_height; + int new_depth; + uint32_t guest; + uint32_t svgaid; + int syncing; + + MemoryRegion fifo_ram; + uint8_t *fifo_ptr; + unsigned int fifo_size; + + uint32_t *fifo; + uint32_t fifo_min; + uint32_t fifo_max; + uint32_t fifo_next; + uint32_t fifo_stop; + +#define REDRAW_FIFO_LEN 512 + struct vmsvga_rect_s { + int x, y, w, h; + } redraw_fifo[REDRAW_FIFO_LEN]; + int redraw_fifo_first, redraw_fifo_last; +}; + +#define TYPE_VMWARE_SVGA "vmware-svga" + +DECLARE_INSTANCE_CHECKER(struct pci_vmsvga_state_s, VMWARE_SVGA, + TYPE_VMWARE_SVGA) + +struct pci_vmsvga_state_s { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + struct vmsvga_state_s chip; + MemoryRegion io_bar; +}; + +#define SVGA_MAGIC 0x900000UL +#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver)) +#define SVGA_ID_0 SVGA_MAKE_ID(0) +#define SVGA_ID_1 SVGA_MAKE_ID(1) +#define SVGA_ID_2 SVGA_MAKE_ID(2) + +#define SVGA_LEGACY_BASE_PORT 0x4560 +#define SVGA_INDEX_PORT 0x0 +#define SVGA_VALUE_PORT 0x1 +#define SVGA_BIOS_PORT 0x2 + +#define SVGA_VERSION_2 + +#ifdef SVGA_VERSION_2 +# define SVGA_ID SVGA_ID_2 +# define SVGA_IO_BASE SVGA_LEGACY_BASE_PORT +# define SVGA_IO_MUL 1 +# define SVGA_FIFO_SIZE 0x10000 +# define SVGA_PCI_DEVICE_ID PCI_DEVICE_ID_VMWARE_SVGA2 +#else +# define SVGA_ID SVGA_ID_1 +# define SVGA_IO_BASE SVGA_LEGACY_BASE_PORT +# define SVGA_IO_MUL 4 +# define SVGA_FIFO_SIZE 0x10000 +# define SVGA_PCI_DEVICE_ID PCI_DEVICE_ID_VMWARE_SVGA +#endif + +enum { + /* ID 0, 1 and 2 registers */ + SVGA_REG_ID = 0, + SVGA_REG_ENABLE = 1, + SVGA_REG_WIDTH = 2, + SVGA_REG_HEIGHT = 3, + SVGA_REG_MAX_WIDTH = 4, + SVGA_REG_MAX_HEIGHT = 5, + SVGA_REG_DEPTH = 6, + SVGA_REG_BITS_PER_PIXEL = 7, /* Current bpp in the guest */ + SVGA_REG_PSEUDOCOLOR = 8, + SVGA_REG_RED_MASK = 9, + SVGA_REG_GREEN_MASK = 10, + SVGA_REG_BLUE_MASK = 11, + SVGA_REG_BYTES_PER_LINE = 12, + SVGA_REG_FB_START = 13, + SVGA_REG_FB_OFFSET = 14, + SVGA_REG_VRAM_SIZE = 15, + SVGA_REG_FB_SIZE = 16, + + /* ID 1 and 2 registers */ + SVGA_REG_CAPABILITIES = 17, + SVGA_REG_MEM_START = 18, /* Memory for command FIFO */ + SVGA_REG_MEM_SIZE = 19, + SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */ + SVGA_REG_SYNC = 21, /* Write to force synchronization */ + SVGA_REG_BUSY = 22, /* Read to check if sync is done */ + SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */ + SVGA_REG_CURSOR_ID = 24, /* ID of cursor */ + SVGA_REG_CURSOR_X = 25, /* Set cursor X position */ + SVGA_REG_CURSOR_Y = 26, /* Set cursor Y position */ + SVGA_REG_CURSOR_ON = 27, /* Turn cursor on/off */ + SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* Current bpp in the host */ + SVGA_REG_SCRATCH_SIZE = 29, /* Number of scratch registers */ + SVGA_REG_MEM_REGS = 30, /* Number of FIFO registers */ + SVGA_REG_NUM_DISPLAYS = 31, /* Number of guest displays */ + SVGA_REG_PITCHLOCK = 32, /* Fixed pitch for all modes */ + + SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ + SVGA_PALETTE_END = SVGA_PALETTE_BASE + 767, + SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + 768, +}; + +#define SVGA_CAP_NONE 0 +#define SVGA_CAP_RECT_FILL (1 << 0) +#define SVGA_CAP_RECT_COPY (1 << 1) +#define SVGA_CAP_RECT_PAT_FILL (1 << 2) +#define SVGA_CAP_LEGACY_OFFSCREEN (1 << 3) +#define SVGA_CAP_RASTER_OP (1 << 4) +#define SVGA_CAP_CURSOR (1 << 5) +#define SVGA_CAP_CURSOR_BYPASS (1 << 6) +#define SVGA_CAP_CURSOR_BYPASS_2 (1 << 7) +#define SVGA_CAP_8BIT_EMULATION (1 << 8) +#define SVGA_CAP_ALPHA_CURSOR (1 << 9) +#define SVGA_CAP_GLYPH (1 << 10) +#define SVGA_CAP_GLYPH_CLIPPING (1 << 11) +#define SVGA_CAP_OFFSCREEN_1 (1 << 12) +#define SVGA_CAP_ALPHA_BLEND (1 << 13) +#define SVGA_CAP_3D (1 << 14) +#define SVGA_CAP_EXTENDED_FIFO (1 << 15) +#define SVGA_CAP_MULTIMON (1 << 16) +#define SVGA_CAP_PITCHLOCK (1 << 17) + +/* + * FIFO offsets (seen as an array of 32-bit words) + */ +enum { + /* + * The original defined FIFO offsets + */ + SVGA_FIFO_MIN = 0, + SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */ + SVGA_FIFO_NEXT, + SVGA_FIFO_STOP, + + /* + * Additional offsets added as of SVGA_CAP_EXTENDED_FIFO + */ + SVGA_FIFO_CAPABILITIES = 4, + SVGA_FIFO_FLAGS, + SVGA_FIFO_FENCE, + SVGA_FIFO_3D_HWVERSION, + SVGA_FIFO_PITCHLOCK, +}; + +#define SVGA_FIFO_CAP_NONE 0 +#define SVGA_FIFO_CAP_FENCE (1 << 0) +#define SVGA_FIFO_CAP_ACCELFRONT (1 << 1) +#define SVGA_FIFO_CAP_PITCHLOCK (1 << 2) + +#define SVGA_FIFO_FLAG_NONE 0 +#define SVGA_FIFO_FLAG_ACCELFRONT (1 << 0) + +/* These values can probably be changed arbitrarily. */ +#define SVGA_SCRATCH_SIZE 0x8000 +#define SVGA_MAX_WIDTH 2368 +#define SVGA_MAX_HEIGHT 1770 + +#ifdef VERBOSE +# define GUEST_OS_BASE 0x5001 +static const char *vmsvga_guest_id[] = { + [0x00] = "Dos", + [0x01] = "Windows 3.1", + [0x02] = "Windows 95", + [0x03] = "Windows 98", + [0x04] = "Windows ME", + [0x05] = "Windows NT", + [0x06] = "Windows 2000", + [0x07] = "Linux", + [0x08] = "OS/2", + [0x09] = "an unknown OS", + [0x0a] = "BSD", + [0x0b] = "Whistler", + [0x0c] = "an unknown OS", + [0x0d] = "an unknown OS", + [0x0e] = "an unknown OS", + [0x0f] = "an unknown OS", + [0x10] = "an unknown OS", + [0x11] = "an unknown OS", + [0x12] = "an unknown OS", + [0x13] = "an unknown OS", + [0x14] = "an unknown OS", + [0x15] = "Windows 2003", +}; +#endif + +enum { + SVGA_CMD_INVALID_CMD = 0, + SVGA_CMD_UPDATE = 1, + SVGA_CMD_RECT_FILL = 2, + SVGA_CMD_RECT_COPY = 3, + SVGA_CMD_DEFINE_BITMAP = 4, + SVGA_CMD_DEFINE_BITMAP_SCANLINE = 5, + SVGA_CMD_DEFINE_PIXMAP = 6, + SVGA_CMD_DEFINE_PIXMAP_SCANLINE = 7, + SVGA_CMD_RECT_BITMAP_FILL = 8, + SVGA_CMD_RECT_PIXMAP_FILL = 9, + SVGA_CMD_RECT_BITMAP_COPY = 10, + SVGA_CMD_RECT_PIXMAP_COPY = 11, + SVGA_CMD_FREE_OBJECT = 12, + SVGA_CMD_RECT_ROP_FILL = 13, + SVGA_CMD_RECT_ROP_COPY = 14, + SVGA_CMD_RECT_ROP_BITMAP_FILL = 15, + SVGA_CMD_RECT_ROP_PIXMAP_FILL = 16, + SVGA_CMD_RECT_ROP_BITMAP_COPY = 17, + SVGA_CMD_RECT_ROP_PIXMAP_COPY = 18, + SVGA_CMD_DEFINE_CURSOR = 19, + SVGA_CMD_DISPLAY_CURSOR = 20, + SVGA_CMD_MOVE_CURSOR = 21, + SVGA_CMD_DEFINE_ALPHA_CURSOR = 22, + SVGA_CMD_DRAW_GLYPH = 23, + SVGA_CMD_DRAW_GLYPH_CLIPPED = 24, + SVGA_CMD_UPDATE_VERBOSE = 25, + SVGA_CMD_SURFACE_FILL = 26, + SVGA_CMD_SURFACE_COPY = 27, + SVGA_CMD_SURFACE_ALPHA_BLEND = 28, + SVGA_CMD_FRONT_ROP_FILL = 29, + SVGA_CMD_FENCE = 30, +}; + +/* Legal values for the SVGA_REG_CURSOR_ON register in cursor bypass mode */ +enum { + SVGA_CURSOR_ON_HIDE = 0, + SVGA_CURSOR_ON_SHOW = 1, + SVGA_CURSOR_ON_REMOVE_FROM_FB = 2, + SVGA_CURSOR_ON_RESTORE_TO_FB = 3, +}; + +static inline bool vmsvga_verify_rect(DisplaySurface *surface, + const char *name, + int x, int y, int w, int h) +{ + if (x < 0) { + fprintf(stderr, "%s: x was < 0 (%d)\n", name, x); + return false; + } + if (x > SVGA_MAX_WIDTH) { + fprintf(stderr, "%s: x was > %d (%d)\n", name, SVGA_MAX_WIDTH, x); + return false; + } + if (w < 0) { + fprintf(stderr, "%s: w was < 0 (%d)\n", name, w); + return false; + } + if (w > SVGA_MAX_WIDTH) { + fprintf(stderr, "%s: w was > %d (%d)\n", name, SVGA_MAX_WIDTH, w); + return false; + } + if (x + w > surface_width(surface)) { + fprintf(stderr, "%s: width was > %d (x: %d, w: %d)\n", + name, surface_width(surface), x, w); + return false; + } + + if (y < 0) { + fprintf(stderr, "%s: y was < 0 (%d)\n", name, y); + return false; + } + if (y > SVGA_MAX_HEIGHT) { + fprintf(stderr, "%s: y was > %d (%d)\n", name, SVGA_MAX_HEIGHT, y); + return false; + } + if (h < 0) { + fprintf(stderr, "%s: h was < 0 (%d)\n", name, h); + return false; + } + if (h > SVGA_MAX_HEIGHT) { + fprintf(stderr, "%s: h was > %d (%d)\n", name, SVGA_MAX_HEIGHT, h); + return false; + } + if (y + h > surface_height(surface)) { + fprintf(stderr, "%s: update height > %d (y: %d, h: %d)\n", + name, surface_height(surface), y, h); + return false; + } + + return true; +} + +static inline void vmsvga_update_rect(struct vmsvga_state_s *s, + int x, int y, int w, int h) +{ + DisplaySurface *surface = qemu_console_surface(s->vga.con); + int line; + int bypl; + int width; + int start; + uint8_t *src; + uint8_t *dst; + + if (!vmsvga_verify_rect(surface, __func__, x, y, w, h)) { + /* go for a fullscreen update as fallback */ + x = 0; + y = 0; + w = surface_width(surface); + h = surface_height(surface); + } + + bypl = surface_stride(surface); + width = surface_bytes_per_pixel(surface) * w; + start = surface_bytes_per_pixel(surface) * x + bypl * y; + src = s->vga.vram_ptr + start; + dst = surface_data(surface) + start; + + for (line = h; line > 0; line--, src += bypl, dst += bypl) { + memcpy(dst, src, width); + } + dpy_gfx_update(s->vga.con, x, y, w, h); +} + +static inline void vmsvga_update_rect_delayed(struct vmsvga_state_s *s, + int x, int y, int w, int h) +{ + struct vmsvga_rect_s *rect = &s->redraw_fifo[s->redraw_fifo_last++]; + + s->redraw_fifo_last &= REDRAW_FIFO_LEN - 1; + rect->x = x; + rect->y = y; + rect->w = w; + rect->h = h; +} + +static inline void vmsvga_update_rect_flush(struct vmsvga_state_s *s) +{ + struct vmsvga_rect_s *rect; + + if (s->invalidated) { + s->redraw_fifo_first = s->redraw_fifo_last; + return; + } + /* Overlapping region updates can be optimised out here - if someone + * knows a smart algorithm to do that, please share. */ + while (s->redraw_fifo_first != s->redraw_fifo_last) { + rect = &s->redraw_fifo[s->redraw_fifo_first++]; + s->redraw_fifo_first &= REDRAW_FIFO_LEN - 1; + vmsvga_update_rect(s, rect->x, rect->y, rect->w, rect->h); + } +} + +#ifdef HW_RECT_ACCEL +static inline int vmsvga_copy_rect(struct vmsvga_state_s *s, + int x0, int y0, int x1, int y1, int w, int h) +{ + DisplaySurface *surface = qemu_console_surface(s->vga.con); + uint8_t *vram = s->vga.vram_ptr; + int bypl = surface_stride(surface); + int bypp = surface_bytes_per_pixel(surface); + int width = bypp * w; + int line = h; + uint8_t *ptr[2]; + + if (!vmsvga_verify_rect(surface, "vmsvga_copy_rect/src", x0, y0, w, h)) { + return -1; + } + if (!vmsvga_verify_rect(surface, "vmsvga_copy_rect/dst", x1, y1, w, h)) { + return -1; + } + + if (y1 > y0) { + ptr[0] = vram + bypp * x0 + bypl * (y0 + h - 1); + ptr[1] = vram + bypp * x1 + bypl * (y1 + h - 1); + for (; line > 0; line --, ptr[0] -= bypl, ptr[1] -= bypl) { + memmove(ptr[1], ptr[0], width); + } + } else { + ptr[0] = vram + bypp * x0 + bypl * y0; + ptr[1] = vram + bypp * x1 + bypl * y1; + for (; line > 0; line --, ptr[0] += bypl, ptr[1] += bypl) { + memmove(ptr[1], ptr[0], width); + } + } + + vmsvga_update_rect_delayed(s, x1, y1, w, h); + return 0; +} +#endif + +#ifdef HW_FILL_ACCEL +static inline int vmsvga_fill_rect(struct vmsvga_state_s *s, + uint32_t c, int x, int y, int w, int h) +{ + DisplaySurface *surface = qemu_console_surface(s->vga.con); + int bypl = surface_stride(surface); + int width = surface_bytes_per_pixel(surface) * w; + int line = h; + int column; + uint8_t *fst; + uint8_t *dst; + uint8_t *src; + uint8_t col[4]; + + if (!vmsvga_verify_rect(surface, __func__, x, y, w, h)) { + return -1; + } + + col[0] = c; + col[1] = c >> 8; + col[2] = c >> 16; + col[3] = c >> 24; + + fst = s->vga.vram_ptr + surface_bytes_per_pixel(surface) * x + bypl * y; + + if (line--) { + dst = fst; + src = col; + for (column = width; column > 0; column--) { + *(dst++) = *(src++); + if (src - col == surface_bytes_per_pixel(surface)) { + src = col; + } + } + dst = fst; + for (; line > 0; line--) { + dst += bypl; + memcpy(dst, fst, width); + } + } + + vmsvga_update_rect_delayed(s, x, y, w, h); + return 0; +} +#endif + +struct vmsvga_cursor_definition_s { + uint32_t width; + uint32_t height; + int id; + uint32_t bpp; + int hot_x; + int hot_y; + uint32_t mask[1024]; + uint32_t image[4096]; +}; + +#define SVGA_BITMAP_SIZE(w, h) ((((w) + 31) >> 5) * (h)) +#define SVGA_PIXMAP_SIZE(w, h, bpp) (((((w) * (bpp)) + 31) >> 5) * (h)) + +#ifdef HW_MOUSE_ACCEL +static inline void vmsvga_cursor_define(struct vmsvga_state_s *s, + struct vmsvga_cursor_definition_s *c) +{ + QEMUCursor *qc; + int i, pixels; + + qc = cursor_alloc(c->width, c->height); + qc->hot_x = c->hot_x; + qc->hot_y = c->hot_y; + switch (c->bpp) { + case 1: + cursor_set_mono(qc, 0xffffff, 0x000000, (void *)c->image, + 1, (void *)c->mask); +#ifdef DEBUG + cursor_print_ascii_art(qc, "vmware/mono"); +#endif + break; + case 32: + /* fill alpha channel from mask, set color to zero */ + cursor_set_mono(qc, 0x000000, 0x000000, (void *)c->mask, + 1, (void *)c->mask); + /* add in rgb values */ + pixels = c->width * c->height; + for (i = 0; i < pixels; i++) { + qc->data[i] |= c->image[i] & 0xffffff; + } +#ifdef DEBUG + cursor_print_ascii_art(qc, "vmware/32bit"); +#endif + break; + default: + fprintf(stderr, "%s: unhandled bpp %d, using fallback cursor\n", + __func__, c->bpp); + cursor_put(qc); + qc = cursor_builtin_left_ptr(); + } + + dpy_cursor_define(s->vga.con, qc); + cursor_put(qc); +} +#endif + +static inline int vmsvga_fifo_length(struct vmsvga_state_s *s) +{ + int num; + + if (!s->config || !s->enable) { + return 0; + } + + s->fifo_min = le32_to_cpu(s->fifo[SVGA_FIFO_MIN]); + s->fifo_max = le32_to_cpu(s->fifo[SVGA_FIFO_MAX]); + s->fifo_next = le32_to_cpu(s->fifo[SVGA_FIFO_NEXT]); + s->fifo_stop = le32_to_cpu(s->fifo[SVGA_FIFO_STOP]); + + /* Check range and alignment. */ + if ((s->fifo_min | s->fifo_max | s->fifo_next | s->fifo_stop) & 3) { + return 0; + } + if (s->fifo_min < sizeof(uint32_t) * 4) { + return 0; + } + if (s->fifo_max > SVGA_FIFO_SIZE || + s->fifo_min >= SVGA_FIFO_SIZE || + s->fifo_stop >= SVGA_FIFO_SIZE || + s->fifo_next >= SVGA_FIFO_SIZE) { + return 0; + } + if (s->fifo_max < s->fifo_min + 10 * KiB) { + return 0; + } + + num = s->fifo_next - s->fifo_stop; + if (num < 0) { + num += s->fifo_max - s->fifo_min; + } + return num >> 2; +} + +static inline uint32_t vmsvga_fifo_read_raw(struct vmsvga_state_s *s) +{ + uint32_t cmd = s->fifo[s->fifo_stop >> 2]; + + s->fifo_stop += 4; + if (s->fifo_stop >= s->fifo_max) { + s->fifo_stop = s->fifo_min; + } + s->fifo[SVGA_FIFO_STOP] = cpu_to_le32(s->fifo_stop); + return cmd; +} + +static inline uint32_t vmsvga_fifo_read(struct vmsvga_state_s *s) +{ + return le32_to_cpu(vmsvga_fifo_read_raw(s)); +} + +static void vmsvga_fifo_run(struct vmsvga_state_s *s) +{ + uint32_t cmd, colour; + int args, len, maxloop = 1024; + int x, y, dx, dy, width, height; + struct vmsvga_cursor_definition_s cursor; + uint32_t cmd_start; + + len = vmsvga_fifo_length(s); + while (len > 0 && --maxloop > 0) { + /* May need to go back to the start of the command if incomplete */ + cmd_start = s->fifo_stop; + + switch (cmd = vmsvga_fifo_read(s)) { + case SVGA_CMD_UPDATE: + case SVGA_CMD_UPDATE_VERBOSE: + len -= 5; + if (len < 0) { + goto rewind; + } + + x = vmsvga_fifo_read(s); + y = vmsvga_fifo_read(s); + width = vmsvga_fifo_read(s); + height = vmsvga_fifo_read(s); + vmsvga_update_rect_delayed(s, x, y, width, height); + break; + + case SVGA_CMD_RECT_FILL: + len -= 6; + if (len < 0) { + goto rewind; + } + + colour = vmsvga_fifo_read(s); + x = vmsvga_fifo_read(s); + y = vmsvga_fifo_read(s); + width = vmsvga_fifo_read(s); + height = vmsvga_fifo_read(s); +#ifdef HW_FILL_ACCEL + if (vmsvga_fill_rect(s, colour, x, y, width, height) == 0) { + break; + } +#endif + args = 0; + goto badcmd; + + case SVGA_CMD_RECT_COPY: + len -= 7; + if (len < 0) { + goto rewind; + } + + x = vmsvga_fifo_read(s); + y = vmsvga_fifo_read(s); + dx = vmsvga_fifo_read(s); + dy = vmsvga_fifo_read(s); + width = vmsvga_fifo_read(s); + height = vmsvga_fifo_read(s); +#ifdef HW_RECT_ACCEL + if (vmsvga_copy_rect(s, x, y, dx, dy, width, height) == 0) { + break; + } +#endif + args = 0; + goto badcmd; + + case SVGA_CMD_DEFINE_CURSOR: + len -= 8; + if (len < 0) { + goto rewind; + } + + cursor.id = vmsvga_fifo_read(s); + cursor.hot_x = vmsvga_fifo_read(s); + cursor.hot_y = vmsvga_fifo_read(s); + cursor.width = x = vmsvga_fifo_read(s); + cursor.height = y = vmsvga_fifo_read(s); + vmsvga_fifo_read(s); + cursor.bpp = vmsvga_fifo_read(s); + + args = SVGA_BITMAP_SIZE(x, y) + SVGA_PIXMAP_SIZE(x, y, cursor.bpp); + if (cursor.width > 256 + || cursor.height > 256 + || cursor.bpp > 32 + || SVGA_BITMAP_SIZE(x, y) > ARRAY_SIZE(cursor.mask) + || SVGA_PIXMAP_SIZE(x, y, cursor.bpp) + > ARRAY_SIZE(cursor.image)) { + goto badcmd; + } + + len -= args; + if (len < 0) { + goto rewind; + } + + for (args = 0; args < SVGA_BITMAP_SIZE(x, y); args++) { + cursor.mask[args] = vmsvga_fifo_read_raw(s); + } + for (args = 0; args < SVGA_PIXMAP_SIZE(x, y, cursor.bpp); args++) { + cursor.image[args] = vmsvga_fifo_read_raw(s); + } +#ifdef HW_MOUSE_ACCEL + vmsvga_cursor_define(s, &cursor); + break; +#else + args = 0; + goto badcmd; +#endif + + /* + * Other commands that we at least know the number of arguments + * for so we can avoid FIFO desync if driver uses them illegally. + */ + case SVGA_CMD_DEFINE_ALPHA_CURSOR: + len -= 6; + if (len < 0) { + goto rewind; + } + vmsvga_fifo_read(s); + vmsvga_fifo_read(s); + vmsvga_fifo_read(s); + x = vmsvga_fifo_read(s); + y = vmsvga_fifo_read(s); + args = x * y; + goto badcmd; + case SVGA_CMD_RECT_ROP_FILL: + args = 6; + goto badcmd; + case SVGA_CMD_RECT_ROP_COPY: + args = 7; + goto badcmd; + case SVGA_CMD_DRAW_GLYPH_CLIPPED: + len -= 4; + if (len < 0) { + goto rewind; + } + vmsvga_fifo_read(s); + vmsvga_fifo_read(s); + args = 7 + (vmsvga_fifo_read(s) >> 2); + goto badcmd; + case SVGA_CMD_SURFACE_ALPHA_BLEND: + args = 12; + goto badcmd; + + /* + * Other commands that are not listed as depending on any + * CAPABILITIES bits, but are not described in the README either. + */ + case SVGA_CMD_SURFACE_FILL: + case SVGA_CMD_SURFACE_COPY: + case SVGA_CMD_FRONT_ROP_FILL: + case SVGA_CMD_FENCE: + case SVGA_CMD_INVALID_CMD: + break; /* Nop */ + + default: + args = 0; + badcmd: + len -= args; + if (len < 0) { + goto rewind; + } + while (args--) { + vmsvga_fifo_read(s); + } + printf("%s: Unknown command 0x%02x in SVGA command FIFO\n", + __func__, cmd); + break; + + rewind: + s->fifo_stop = cmd_start; + s->fifo[SVGA_FIFO_STOP] = cpu_to_le32(s->fifo_stop); + break; + } + } + + s->syncing = 0; +} + +static uint32_t vmsvga_index_read(void *opaque, uint32_t address) +{ + struct vmsvga_state_s *s = opaque; + + return s->index; +} + +static void vmsvga_index_write(void *opaque, uint32_t address, uint32_t index) +{ + struct vmsvga_state_s *s = opaque; + + s->index = index; +} + +static uint32_t vmsvga_value_read(void *opaque, uint32_t address) +{ + uint32_t caps; + struct vmsvga_state_s *s = opaque; + DisplaySurface *surface = qemu_console_surface(s->vga.con); + PixelFormat pf; + uint32_t ret; + + switch (s->index) { + case SVGA_REG_ID: + ret = s->svgaid; + break; + + case SVGA_REG_ENABLE: + ret = s->enable; + break; + + case SVGA_REG_WIDTH: + ret = s->new_width ? s->new_width : surface_width(surface); + break; + + case SVGA_REG_HEIGHT: + ret = s->new_height ? s->new_height : surface_height(surface); + break; + + case SVGA_REG_MAX_WIDTH: + ret = SVGA_MAX_WIDTH; + break; + + case SVGA_REG_MAX_HEIGHT: + ret = SVGA_MAX_HEIGHT; + break; + + case SVGA_REG_DEPTH: + ret = (s->new_depth == 32) ? 24 : s->new_depth; + break; + + case SVGA_REG_BITS_PER_PIXEL: + case SVGA_REG_HOST_BITS_PER_PIXEL: + ret = s->new_depth; + break; + + case SVGA_REG_PSEUDOCOLOR: + ret = 0x0; + break; + + case SVGA_REG_RED_MASK: + pf = qemu_default_pixelformat(s->new_depth); + ret = pf.rmask; + break; + + case SVGA_REG_GREEN_MASK: + pf = qemu_default_pixelformat(s->new_depth); + ret = pf.gmask; + break; + + case SVGA_REG_BLUE_MASK: + pf = qemu_default_pixelformat(s->new_depth); + ret = pf.bmask; + break; + + case SVGA_REG_BYTES_PER_LINE: + if (s->new_width) { + ret = (s->new_depth * s->new_width) / 8; + } else { + ret = surface_stride(surface); + } + break; + + case SVGA_REG_FB_START: { + struct pci_vmsvga_state_s *pci_vmsvga + = container_of(s, struct pci_vmsvga_state_s, chip); + ret = pci_get_bar_addr(PCI_DEVICE(pci_vmsvga), 1); + break; + } + + case SVGA_REG_FB_OFFSET: + ret = 0x0; + break; + + case SVGA_REG_VRAM_SIZE: + ret = s->vga.vram_size; /* No physical VRAM besides the framebuffer */ + break; + + case SVGA_REG_FB_SIZE: + ret = s->vga.vram_size; + break; + + case SVGA_REG_CAPABILITIES: + caps = SVGA_CAP_NONE; +#ifdef HW_RECT_ACCEL + caps |= SVGA_CAP_RECT_COPY; +#endif +#ifdef HW_FILL_ACCEL + caps |= SVGA_CAP_RECT_FILL; +#endif +#ifdef HW_MOUSE_ACCEL + if (dpy_cursor_define_supported(s->vga.con)) { + caps |= SVGA_CAP_CURSOR | SVGA_CAP_CURSOR_BYPASS_2 | + SVGA_CAP_CURSOR_BYPASS; + } +#endif + ret = caps; + break; + + case SVGA_REG_MEM_START: { + struct pci_vmsvga_state_s *pci_vmsvga + = container_of(s, struct pci_vmsvga_state_s, chip); + ret = pci_get_bar_addr(PCI_DEVICE(pci_vmsvga), 2); + break; + } + + case SVGA_REG_MEM_SIZE: + ret = s->fifo_size; + break; + + case SVGA_REG_CONFIG_DONE: + ret = s->config; + break; + + case SVGA_REG_SYNC: + case SVGA_REG_BUSY: + ret = s->syncing; + break; + + case SVGA_REG_GUEST_ID: + ret = s->guest; + break; + + case SVGA_REG_CURSOR_ID: + ret = s->cursor.id; + break; + + case SVGA_REG_CURSOR_X: + ret = s->cursor.x; + break; + + case SVGA_REG_CURSOR_Y: + ret = s->cursor.y; + break; + + case SVGA_REG_CURSOR_ON: + ret = s->cursor.on; + break; + + case SVGA_REG_SCRATCH_SIZE: + ret = s->scratch_size; + break; + + case SVGA_REG_MEM_REGS: + case SVGA_REG_NUM_DISPLAYS: + case SVGA_REG_PITCHLOCK: + case SVGA_PALETTE_BASE ... SVGA_PALETTE_END: + ret = 0; + break; + + default: + if (s->index >= SVGA_SCRATCH_BASE && + s->index < SVGA_SCRATCH_BASE + s->scratch_size) { + ret = s->scratch[s->index - SVGA_SCRATCH_BASE]; + break; + } + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad register %02x\n", __func__, s->index); + ret = 0; + break; + } + + if (s->index >= SVGA_SCRATCH_BASE) { + trace_vmware_scratch_read(s->index, ret); + } else if (s->index >= SVGA_PALETTE_BASE) { + trace_vmware_palette_read(s->index, ret); + } else { + trace_vmware_value_read(s->index, ret); + } + return ret; +} + +static void vmsvga_value_write(void *opaque, uint32_t address, uint32_t value) +{ + struct vmsvga_state_s *s = opaque; + + if (s->index >= SVGA_SCRATCH_BASE) { + trace_vmware_scratch_write(s->index, value); + } else if (s->index >= SVGA_PALETTE_BASE) { + trace_vmware_palette_write(s->index, value); + } else { + trace_vmware_value_write(s->index, value); + } + switch (s->index) { + case SVGA_REG_ID: + if (value == SVGA_ID_2 || value == SVGA_ID_1 || value == SVGA_ID_0) { + s->svgaid = value; + } + break; + + case SVGA_REG_ENABLE: + s->enable = !!value; + s->invalidated = 1; + s->vga.hw_ops->invalidate(&s->vga); + if (s->enable && s->config) { + vga_dirty_log_stop(&s->vga); + } else { + vga_dirty_log_start(&s->vga); + } + break; + + case SVGA_REG_WIDTH: + if (value <= SVGA_MAX_WIDTH) { + s->new_width = value; + s->invalidated = 1; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad width: %i\n", __func__, value); + } + break; + + case SVGA_REG_HEIGHT: + if (value <= SVGA_MAX_HEIGHT) { + s->new_height = value; + s->invalidated = 1; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad height: %i\n", __func__, value); + } + break; + + case SVGA_REG_BITS_PER_PIXEL: + if (value != 32) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad bits per pixel: %i bits\n", __func__, value); + s->config = 0; + s->invalidated = 1; + } + break; + + case SVGA_REG_CONFIG_DONE: + if (value) { + s->fifo = (uint32_t *) s->fifo_ptr; + vga_dirty_log_stop(&s->vga); + } + s->config = !!value; + break; + + case SVGA_REG_SYNC: + s->syncing = 1; + vmsvga_fifo_run(s); /* Or should we just wait for update_display? */ + break; + + case SVGA_REG_GUEST_ID: + s->guest = value; +#ifdef VERBOSE + if (value >= GUEST_OS_BASE && value < GUEST_OS_BASE + + ARRAY_SIZE(vmsvga_guest_id)) { + printf("%s: guest runs %s.\n", __func__, + vmsvga_guest_id[value - GUEST_OS_BASE]); + } +#endif + break; + + case SVGA_REG_CURSOR_ID: + s->cursor.id = value; + break; + + case SVGA_REG_CURSOR_X: + s->cursor.x = value; + break; + + case SVGA_REG_CURSOR_Y: + s->cursor.y = value; + break; + + case SVGA_REG_CURSOR_ON: + s->cursor.on |= (value == SVGA_CURSOR_ON_SHOW); + s->cursor.on &= (value != SVGA_CURSOR_ON_HIDE); +#ifdef HW_MOUSE_ACCEL + if (value <= SVGA_CURSOR_ON_SHOW) { + dpy_mouse_set(s->vga.con, s->cursor.x, s->cursor.y, s->cursor.on); + } +#endif + break; + + case SVGA_REG_DEPTH: + case SVGA_REG_MEM_REGS: + case SVGA_REG_NUM_DISPLAYS: + case SVGA_REG_PITCHLOCK: + case SVGA_PALETTE_BASE ... SVGA_PALETTE_END: + break; + + default: + if (s->index >= SVGA_SCRATCH_BASE && + s->index < SVGA_SCRATCH_BASE + s->scratch_size) { + s->scratch[s->index - SVGA_SCRATCH_BASE] = value; + break; + } + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad register %02x\n", __func__, s->index); + } +} + +static uint32_t vmsvga_bios_read(void *opaque, uint32_t address) +{ + printf("%s: what are we supposed to return?\n", __func__); + return 0xcafe; +} + +static void vmsvga_bios_write(void *opaque, uint32_t address, uint32_t data) +{ + printf("%s: what are we supposed to do with (%08x)?\n", __func__, data); +} + +static inline void vmsvga_check_size(struct vmsvga_state_s *s) +{ + DisplaySurface *surface = qemu_console_surface(s->vga.con); + + if (s->new_width != surface_width(surface) || + s->new_height != surface_height(surface) || + s->new_depth != surface_bits_per_pixel(surface)) { + int stride = (s->new_depth * s->new_width) / 8; + pixman_format_code_t format = + qemu_default_pixman_format(s->new_depth, true); + trace_vmware_setmode(s->new_width, s->new_height, s->new_depth); + surface = qemu_create_displaysurface_from(s->new_width, s->new_height, + format, stride, + s->vga.vram_ptr); + dpy_gfx_replace_surface(s->vga.con, surface); + s->invalidated = 1; + } +} + +static void vmsvga_update_display(void *opaque) +{ + struct vmsvga_state_s *s = opaque; + + if (!s->enable || !s->config) { + /* in standard vga mode */ + s->vga.hw_ops->gfx_update(&s->vga); + return; + } + + vmsvga_check_size(s); + + vmsvga_fifo_run(s); + vmsvga_update_rect_flush(s); + + if (s->invalidated) { + s->invalidated = 0; + dpy_gfx_update_full(s->vga.con); + } +} + +static void vmsvga_reset(DeviceState *dev) +{ + struct pci_vmsvga_state_s *pci = VMWARE_SVGA(dev); + struct vmsvga_state_s *s = &pci->chip; + + s->index = 0; + s->enable = 0; + s->config = 0; + s->svgaid = SVGA_ID; + s->cursor.on = 0; + s->redraw_fifo_first = 0; + s->redraw_fifo_last = 0; + s->syncing = 0; + + vga_dirty_log_start(&s->vga); +} + +static void vmsvga_invalidate_display(void *opaque) +{ + struct vmsvga_state_s *s = opaque; + if (!s->enable) { + s->vga.hw_ops->invalidate(&s->vga); + return; + } + + s->invalidated = 1; +} + +static void vmsvga_text_update(void *opaque, console_ch_t *chardata) +{ + struct vmsvga_state_s *s = opaque; + + if (s->vga.hw_ops->text_update) { + s->vga.hw_ops->text_update(&s->vga, chardata); + } +} + +static int vmsvga_post_load(void *opaque, int version_id) +{ + struct vmsvga_state_s *s = opaque; + + s->invalidated = 1; + if (s->config) { + s->fifo = (uint32_t *) s->fifo_ptr; + } + return 0; +} + +static const VMStateDescription vmstate_vmware_vga_internal = { + .name = "vmware_vga_internal", + .version_id = 0, + .minimum_version_id = 0, + .post_load = vmsvga_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT32_EQUAL(new_depth, struct vmsvga_state_s, NULL), + VMSTATE_INT32(enable, struct vmsvga_state_s), + VMSTATE_INT32(config, struct vmsvga_state_s), + VMSTATE_INT32(cursor.id, struct vmsvga_state_s), + VMSTATE_INT32(cursor.x, struct vmsvga_state_s), + VMSTATE_INT32(cursor.y, struct vmsvga_state_s), + VMSTATE_INT32(cursor.on, struct vmsvga_state_s), + VMSTATE_INT32(index, struct vmsvga_state_s), + VMSTATE_VARRAY_INT32(scratch, struct vmsvga_state_s, + scratch_size, 0, vmstate_info_uint32, uint32_t), + VMSTATE_INT32(new_width, struct vmsvga_state_s), + VMSTATE_INT32(new_height, struct vmsvga_state_s), + VMSTATE_UINT32(guest, struct vmsvga_state_s), + VMSTATE_UINT32(svgaid, struct vmsvga_state_s), + VMSTATE_INT32(syncing, struct vmsvga_state_s), + VMSTATE_UNUSED(4), /* was fb_size */ + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_vmware_vga = { + .name = "vmware_vga", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, struct pci_vmsvga_state_s), + VMSTATE_STRUCT(chip, struct pci_vmsvga_state_s, 0, + vmstate_vmware_vga_internal, struct vmsvga_state_s), + VMSTATE_END_OF_LIST() + } +}; + +static const GraphicHwOps vmsvga_ops = { + .invalidate = vmsvga_invalidate_display, + .gfx_update = vmsvga_update_display, + .text_update = vmsvga_text_update, +}; + +static void vmsvga_init(DeviceState *dev, struct vmsvga_state_s *s, + MemoryRegion *address_space, MemoryRegion *io) +{ + s->scratch_size = SVGA_SCRATCH_SIZE; + s->scratch = g_malloc(s->scratch_size * 4); + + s->vga.con = graphic_console_init(dev, 0, &vmsvga_ops, s); + + s->fifo_size = SVGA_FIFO_SIZE; + memory_region_init_ram(&s->fifo_ram, NULL, "vmsvga.fifo", s->fifo_size, + &error_fatal); + s->fifo_ptr = memory_region_get_ram_ptr(&s->fifo_ram); + + vga_common_init(&s->vga, OBJECT(dev)); + vga_init(&s->vga, OBJECT(dev), address_space, io, true); + vmstate_register(NULL, 0, &vmstate_vga_common, &s->vga); + s->new_depth = 32; +} + +static uint64_t vmsvga_io_read(void *opaque, hwaddr addr, unsigned size) +{ + struct vmsvga_state_s *s = opaque; + + switch (addr) { + case SVGA_IO_MUL * SVGA_INDEX_PORT: return vmsvga_index_read(s, addr); + case SVGA_IO_MUL * SVGA_VALUE_PORT: return vmsvga_value_read(s, addr); + case SVGA_IO_MUL * SVGA_BIOS_PORT: return vmsvga_bios_read(s, addr); + default: return -1u; + } +} + +static void vmsvga_io_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + struct vmsvga_state_s *s = opaque; + + switch (addr) { + case SVGA_IO_MUL * SVGA_INDEX_PORT: + vmsvga_index_write(s, addr, data); + break; + case SVGA_IO_MUL * SVGA_VALUE_PORT: + vmsvga_value_write(s, addr, data); + break; + case SVGA_IO_MUL * SVGA_BIOS_PORT: + vmsvga_bios_write(s, addr, data); + break; + } +} + +static const MemoryRegionOps vmsvga_io_ops = { + .read = vmsvga_io_read, + .write = vmsvga_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = true, + }, + .impl = { + .unaligned = true, + }, +}; + +static void pci_vmsvga_realize(PCIDevice *dev, Error **errp) +{ + struct pci_vmsvga_state_s *s = VMWARE_SVGA(dev); + + dev->config[PCI_CACHE_LINE_SIZE] = 0x08; + dev->config[PCI_LATENCY_TIMER] = 0x40; + dev->config[PCI_INTERRUPT_LINE] = 0xff; /* End */ + + memory_region_init_io(&s->io_bar, OBJECT(dev), &vmsvga_io_ops, &s->chip, + "vmsvga-io", 0x10); + memory_region_set_flush_coalesced(&s->io_bar); + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_bar); + + vmsvga_init(DEVICE(dev), &s->chip, + pci_address_space(dev), pci_address_space_io(dev)); + + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_MEM_PREFETCH, + &s->chip.vga.vram); + pci_register_bar(dev, 2, PCI_BASE_ADDRESS_MEM_PREFETCH, + &s->chip.fifo_ram); +} + +static Property vga_vmware_properties[] = { + DEFINE_PROP_UINT32("vgamem_mb", struct pci_vmsvga_state_s, + chip.vga.vram_size_mb, 16), + DEFINE_PROP_BOOL("global-vmstate", struct pci_vmsvga_state_s, + chip.vga.global_vmstate, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vmsvga_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_vmsvga_realize; + k->romfile = "vgabios-vmware.bin"; + k->vendor_id = PCI_VENDOR_ID_VMWARE; + k->device_id = SVGA_PCI_DEVICE_ID; + k->class_id = PCI_CLASS_DISPLAY_VGA; + k->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE; + k->subsystem_id = SVGA_PCI_DEVICE_ID; + dc->reset = vmsvga_reset; + dc->vmsd = &vmstate_vmware_vga; + device_class_set_props(dc, vga_vmware_properties); + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); +} + +static const TypeInfo vmsvga_info = { + .name = TYPE_VMWARE_SVGA, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(struct pci_vmsvga_state_s), + .class_init = vmsvga_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void vmsvga_register_types(void) +{ + type_register_static(&vmsvga_info); +} + +type_init(vmsvga_register_types) diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c new file mode 100644 index 000000000..838260b6a --- /dev/null +++ b/hw/display/xenfb.c @@ -0,0 +1,987 @@ +/* + * xen paravirt framebuffer backend + * + * Copyright IBM, Corp. 2005-2006 + * Copyright Red Hat, Inc. 2006-2008 + * + * Authors: + * Anthony Liguori , + * Markus Armbruster , + * Daniel P. Berrange , + * Pat Campbell , + * Gerd Hoffmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; under version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" + +#include "ui/input.h" +#include "ui/console.h" +#include "hw/xen/xen-legacy-backend.h" + +#include "hw/xen/interface/io/fbif.h" +#include "hw/xen/interface/io/kbdif.h" +#include "hw/xen/interface/io/protocols.h" + +#include "trace.h" + +#ifndef BTN_LEFT +#define BTN_LEFT 0x110 /* from */ +#endif + +/* -------------------------------------------------------------------- */ + +struct common { + struct XenLegacyDevice xendev; /* must be first */ + void *page; +}; + +struct XenInput { + struct common c; + int abs_pointer_wanted; /* Whether guest supports absolute pointer */ + int raw_pointer_wanted; /* Whether guest supports raw (unscaled) pointer */ + QemuInputHandlerState *qkbd; + QemuInputHandlerState *qmou; + int axis[INPUT_AXIS__MAX]; + int wheel; +}; + +#define UP_QUEUE 8 + +struct XenFB { + struct common c; + QemuConsole *con; + size_t fb_len; + int row_stride; + int depth; + int width; + int height; + int offset; + void *pixels; + int fbpages; + int feature_update; + int bug_trigger; + int do_resize; + + struct { + int x,y,w,h; + } up_rects[UP_QUEUE]; + int up_count; + int up_fullscreen; +}; +static const GraphicHwOps xenfb_ops; + +/* -------------------------------------------------------------------- */ + +static int common_bind(struct common *c) +{ + uint64_t val; + xen_pfn_t mfn; + + if (xenstore_read_fe_uint64(&c->xendev, "page-ref", &val) == -1) + return -1; + mfn = (xen_pfn_t)val; + assert(val == mfn); + + if (xenstore_read_fe_int(&c->xendev, "event-channel", &c->xendev.remote_port) == -1) + return -1; + + c->page = xenforeignmemory_map(xen_fmem, c->xendev.dom, + PROT_READ | PROT_WRITE, 1, &mfn, NULL); + if (c->page == NULL) + return -1; + + xen_be_bind_evtchn(&c->xendev); + xen_pv_printf(&c->xendev, 1, + "ring mfn %"PRI_xen_pfn", remote-port %d, local-port %d\n", + mfn, c->xendev.remote_port, c->xendev.local_port); + + return 0; +} + +static void common_unbind(struct common *c) +{ + xen_pv_unbind_evtchn(&c->xendev); + if (c->page) { + xenforeignmemory_unmap(xen_fmem, c->page, 1); + c->page = NULL; + } +} + +/* -------------------------------------------------------------------- */ +/* Send an event to the keyboard frontend driver */ +static int xenfb_kbd_event(struct XenInput *xenfb, + union xenkbd_in_event *event) +{ + struct xenkbd_page *page = xenfb->c.page; + uint32_t prod; + + if (xenfb->c.xendev.be_state != XenbusStateConnected) + return 0; + if (!page) + return 0; + + prod = page->in_prod; + if (prod - page->in_cons == XENKBD_IN_RING_LEN) { + errno = EAGAIN; + return -1; + } + + xen_mb(); /* ensure ring space available */ + XENKBD_IN_RING_REF(page, prod) = *event; + xen_wmb(); /* ensure ring contents visible */ + page->in_prod = prod + 1; + return xen_pv_send_notify(&xenfb->c.xendev); +} + +/* Send a keyboard (or mouse button) event */ +static int xenfb_send_key(struct XenInput *xenfb, bool down, int keycode) +{ + union xenkbd_in_event event; + + memset(&event, 0, XENKBD_IN_EVENT_SIZE); + event.type = XENKBD_TYPE_KEY; + event.key.pressed = down ? 1 : 0; + event.key.keycode = keycode; + + return xenfb_kbd_event(xenfb, &event); +} + +/* Send a relative mouse movement event */ +static int xenfb_send_motion(struct XenInput *xenfb, + int rel_x, int rel_y, int rel_z) +{ + union xenkbd_in_event event; + + memset(&event, 0, XENKBD_IN_EVENT_SIZE); + event.type = XENKBD_TYPE_MOTION; + event.motion.rel_x = rel_x; + event.motion.rel_y = rel_y; + event.motion.rel_z = rel_z; + + return xenfb_kbd_event(xenfb, &event); +} + +/* Send an absolute mouse movement event */ +static int xenfb_send_position(struct XenInput *xenfb, + int abs_x, int abs_y, int z) +{ + union xenkbd_in_event event; + + memset(&event, 0, XENKBD_IN_EVENT_SIZE); + event.type = XENKBD_TYPE_POS; + event.pos.abs_x = abs_x; + event.pos.abs_y = abs_y; + event.pos.rel_z = z; + + return xenfb_kbd_event(xenfb, &event); +} + +/* + * Send a key event from the client to the guest OS + * QEMU gives us a QCode. + * We have to turn this into a Linux Input layer keycode. + * + * Wish we could just send scancodes straight to the guest which + * already has code for dealing with this... + */ +static void xenfb_key_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) +{ + struct XenInput *xenfb = (struct XenInput *)dev; + InputKeyEvent *key = evt->u.key.data; + int qcode = qemu_input_key_value_to_qcode(key->key); + int lnx; + + if (qcode < qemu_input_map_qcode_to_linux_len) { + lnx = qemu_input_map_qcode_to_linux[qcode]; + + if (lnx) { + trace_xenfb_key_event(xenfb, lnx, key->down); + xenfb_send_key(xenfb, key->down, lnx); + } + } +} + +/* + * Send a mouse event from the client to the guest OS + * + * The QEMU mouse can be in either relative, or absolute mode. + * Movement is sent separately from button state, which has to + * be encoded as virtual key events. We also don't actually get + * given any button up/down events, so have to track changes in + * the button state. + */ +static void xenfb_mouse_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) +{ + struct XenInput *xenfb = (struct XenInput *)dev; + InputBtnEvent *btn; + InputMoveEvent *move; + QemuConsole *con; + DisplaySurface *surface; + int scale; + + switch (evt->type) { + case INPUT_EVENT_KIND_BTN: + btn = evt->u.btn.data; + switch (btn->button) { + case INPUT_BUTTON_LEFT: + xenfb_send_key(xenfb, btn->down, BTN_LEFT); + break; + case INPUT_BUTTON_RIGHT: + xenfb_send_key(xenfb, btn->down, BTN_LEFT + 1); + break; + case INPUT_BUTTON_MIDDLE: + xenfb_send_key(xenfb, btn->down, BTN_LEFT + 2); + break; + case INPUT_BUTTON_WHEEL_UP: + if (btn->down) { + xenfb->wheel--; + } + break; + case INPUT_BUTTON_WHEEL_DOWN: + if (btn->down) { + xenfb->wheel++; + } + break; + default: + break; + } + break; + + case INPUT_EVENT_KIND_ABS: + move = evt->u.abs.data; + if (xenfb->raw_pointer_wanted) { + xenfb->axis[move->axis] = move->value; + } else { + con = qemu_console_lookup_by_index(0); + if (!con) { + xen_pv_printf(&xenfb->c.xendev, 0, "No QEMU console available"); + return; + } + surface = qemu_console_surface(con); + switch (move->axis) { + case INPUT_AXIS_X: + scale = surface_width(surface) - 1; + break; + case INPUT_AXIS_Y: + scale = surface_height(surface) - 1; + break; + default: + scale = 0x8000; + break; + } + xenfb->axis[move->axis] = move->value * scale / 0x7fff; + } + break; + + case INPUT_EVENT_KIND_REL: + move = evt->u.rel.data; + xenfb->axis[move->axis] += move->value; + break; + + default: + break; + } +} + +static void xenfb_mouse_sync(DeviceState *dev) +{ + struct XenInput *xenfb = (struct XenInput *)dev; + + trace_xenfb_mouse_event(xenfb, xenfb->axis[INPUT_AXIS_X], + xenfb->axis[INPUT_AXIS_Y], + xenfb->wheel, 0, + xenfb->abs_pointer_wanted); + if (xenfb->abs_pointer_wanted) { + xenfb_send_position(xenfb, xenfb->axis[INPUT_AXIS_X], + xenfb->axis[INPUT_AXIS_Y], + xenfb->wheel); + } else { + xenfb_send_motion(xenfb, xenfb->axis[INPUT_AXIS_X], + xenfb->axis[INPUT_AXIS_Y], + xenfb->wheel); + xenfb->axis[INPUT_AXIS_X] = 0; + xenfb->axis[INPUT_AXIS_Y] = 0; + } + xenfb->wheel = 0; +} + +static QemuInputHandler xenfb_keyboard = { + .name = "Xen PV Keyboard", + .mask = INPUT_EVENT_MASK_KEY, + .event = xenfb_key_event, +}; + +static QemuInputHandler xenfb_abs_mouse = { + .name = "Xen PV Mouse", + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_ABS, + .event = xenfb_mouse_event, + .sync = xenfb_mouse_sync, +}; + +static QemuInputHandler xenfb_rel_mouse = { + .name = "Xen PV Mouse", + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL, + .event = xenfb_mouse_event, + .sync = xenfb_mouse_sync, +}; + +static int input_init(struct XenLegacyDevice *xendev) +{ + xenstore_write_be_int(xendev, "feature-abs-pointer", 1); + xenstore_write_be_int(xendev, "feature-raw-pointer", 1); + return 0; +} + +static int input_initialise(struct XenLegacyDevice *xendev) +{ + struct XenInput *in = container_of(xendev, struct XenInput, c.xendev); + int rc; + + rc = common_bind(&in->c); + if (rc != 0) + return rc; + + return 0; +} + +static void input_connected(struct XenLegacyDevice *xendev) +{ + struct XenInput *in = container_of(xendev, struct XenInput, c.xendev); + + if (xenstore_read_fe_int(xendev, "request-abs-pointer", + &in->abs_pointer_wanted) == -1) { + in->abs_pointer_wanted = 0; + } + if (xenstore_read_fe_int(xendev, "request-raw-pointer", + &in->raw_pointer_wanted) == -1) { + in->raw_pointer_wanted = 0; + } + if (in->raw_pointer_wanted && in->abs_pointer_wanted == 0) { + xen_pv_printf(xendev, 0, "raw pointer set without abs pointer"); + } + + if (in->qkbd) { + qemu_input_handler_unregister(in->qkbd); + } + if (in->qmou) { + qemu_input_handler_unregister(in->qmou); + } + trace_xenfb_input_connected(xendev, in->abs_pointer_wanted); + + in->qkbd = qemu_input_handler_register((DeviceState *)in, &xenfb_keyboard); + in->qmou = qemu_input_handler_register((DeviceState *)in, + in->abs_pointer_wanted ? &xenfb_abs_mouse : &xenfb_rel_mouse); + + if (in->raw_pointer_wanted) { + qemu_input_handler_activate(in->qkbd); + qemu_input_handler_activate(in->qmou); + } +} + +static void input_disconnect(struct XenLegacyDevice *xendev) +{ + struct XenInput *in = container_of(xendev, struct XenInput, c.xendev); + + if (in->qkbd) { + qemu_input_handler_unregister(in->qkbd); + in->qkbd = NULL; + } + if (in->qmou) { + qemu_input_handler_unregister(in->qmou); + in->qmou = NULL; + } + common_unbind(&in->c); +} + +static void input_event(struct XenLegacyDevice *xendev) +{ + struct XenInput *xenfb = container_of(xendev, struct XenInput, c.xendev); + struct xenkbd_page *page = xenfb->c.page; + + /* We don't understand any keyboard events, so just ignore them. */ + if (page->out_prod == page->out_cons) + return; + page->out_cons = page->out_prod; + xen_pv_send_notify(&xenfb->c.xendev); +} + +/* -------------------------------------------------------------------- */ + +static void xenfb_copy_mfns(int mode, int count, xen_pfn_t *dst, void *src) +{ + uint32_t *src32 = src; + uint64_t *src64 = src; + int i; + + for (i = 0; i < count; i++) + dst[i] = (mode == 32) ? src32[i] : src64[i]; +} + +static int xenfb_map_fb(struct XenFB *xenfb) +{ + struct xenfb_page *page = xenfb->c.page; + char *protocol = xenfb->c.xendev.protocol; + int n_fbdirs; + xen_pfn_t *pgmfns = NULL; + xen_pfn_t *fbmfns = NULL; + void *map, *pd; + int mode, ret = -1; + + /* default to native */ + pd = page->pd; + mode = sizeof(unsigned long) * 8; + + if (!protocol) { + /* + * Undefined protocol, some guesswork needed. + * + * Old frontends which don't set the protocol use + * one page directory only, thus pd[1] must be zero. + * pd[1] of the 32bit struct layout and the lower + * 32 bits of pd[0] of the 64bit struct layout have + * the same location, so we can check that ... + */ + uint32_t *ptr32 = NULL; + uint32_t *ptr64 = NULL; +#if defined(__i386__) + ptr32 = (void*)page->pd; + ptr64 = ((void*)page->pd) + 4; +#elif defined(__x86_64__) + ptr32 = ((void*)page->pd) - 4; + ptr64 = (void*)page->pd; +#endif + if (ptr32) { + if (ptr32[1] == 0) { + mode = 32; + pd = ptr32; + } else { + mode = 64; + pd = ptr64; + } + } +#if defined(__x86_64__) + } else if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { + /* 64bit dom0, 32bit domU */ + mode = 32; + pd = ((void*)page->pd) - 4; +#elif defined(__i386__) + } else if (strcmp(protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { + /* 32bit dom0, 64bit domU */ + mode = 64; + pd = ((void*)page->pd) + 4; +#endif + } + + if (xenfb->pixels) { + munmap(xenfb->pixels, xenfb->fbpages * XC_PAGE_SIZE); + xenfb->pixels = NULL; + } + + xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XC_PAGE_SIZE); + n_fbdirs = xenfb->fbpages * mode / 8; + n_fbdirs = DIV_ROUND_UP(n_fbdirs, XC_PAGE_SIZE); + + pgmfns = g_malloc0(sizeof(xen_pfn_t) * n_fbdirs); + fbmfns = g_malloc0(sizeof(xen_pfn_t) * xenfb->fbpages); + + xenfb_copy_mfns(mode, n_fbdirs, pgmfns, pd); + map = xenforeignmemory_map(xen_fmem, xenfb->c.xendev.dom, + PROT_READ, n_fbdirs, pgmfns, NULL); + if (map == NULL) + goto out; + xenfb_copy_mfns(mode, xenfb->fbpages, fbmfns, map); + xenforeignmemory_unmap(xen_fmem, map, n_fbdirs); + + xenfb->pixels = xenforeignmemory_map(xen_fmem, xenfb->c.xendev.dom, + PROT_READ, xenfb->fbpages, fbmfns, NULL); + if (xenfb->pixels == NULL) + goto out; + + ret = 0; /* all is fine */ + +out: + g_free(pgmfns); + g_free(fbmfns); + return ret; +} + +static int xenfb_configure_fb(struct XenFB *xenfb, size_t fb_len_lim, + int width, int height, int depth, + size_t fb_len, int offset, int row_stride) +{ + size_t mfn_sz = sizeof_field(struct xenfb_page, pd[0]); + size_t pd_len = sizeof_field(struct xenfb_page, pd) / mfn_sz; + size_t fb_pages = pd_len * XC_PAGE_SIZE / mfn_sz; + size_t fb_len_max = fb_pages * XC_PAGE_SIZE; + int max_width, max_height; + + if (fb_len_lim > fb_len_max) { + xen_pv_printf(&xenfb->c.xendev, 0, + "fb size limit %zu exceeds %zu, corrected\n", + fb_len_lim, fb_len_max); + fb_len_lim = fb_len_max; + } + if (fb_len_lim && fb_len > fb_len_lim) { + xen_pv_printf(&xenfb->c.xendev, 0, + "frontend fb size %zu limited to %zu\n", + fb_len, fb_len_lim); + fb_len = fb_len_lim; + } + if (depth != 8 && depth != 16 && depth != 24 && depth != 32) { + xen_pv_printf(&xenfb->c.xendev, 0, + "can't handle frontend fb depth %d\n", + depth); + return -1; + } + if (row_stride <= 0 || row_stride > fb_len) { + xen_pv_printf(&xenfb->c.xendev, 0, "invalid frontend stride %d\n", + row_stride); + return -1; + } + max_width = row_stride / (depth / 8); + if (width < 0 || width > max_width) { + xen_pv_printf(&xenfb->c.xendev, 0, + "invalid frontend width %d limited to %d\n", + width, max_width); + width = max_width; + } + if (offset < 0 || offset >= fb_len) { + xen_pv_printf(&xenfb->c.xendev, 0, + "invalid frontend offset %d (max %zu)\n", + offset, fb_len - 1); + return -1; + } + max_height = (fb_len - offset) / row_stride; + if (height < 0 || height > max_height) { + xen_pv_printf(&xenfb->c.xendev, 0, + "invalid frontend height %d limited to %d\n", + height, max_height); + height = max_height; + } + xenfb->fb_len = fb_len; + xenfb->row_stride = row_stride; + xenfb->depth = depth; + xenfb->width = width; + xenfb->height = height; + xenfb->offset = offset; + xenfb->up_fullscreen = 1; + xenfb->do_resize = 1; + xen_pv_printf(&xenfb->c.xendev, 1, + "framebuffer %dx%dx%d offset %d stride %d\n", + width, height, depth, offset, row_stride); + return 0; +} + +/* A convenient function for munging pixels between different depths */ +#define BLT(SRC_T,DST_T,RSB,GSB,BSB,RDB,GDB,BDB) \ + for (line = y ; line < (y+h) ; line++) { \ + SRC_T *src = (SRC_T *)(xenfb->pixels \ + + xenfb->offset \ + + (line * xenfb->row_stride) \ + + (x * xenfb->depth / 8)); \ + DST_T *dst = (DST_T *)(data \ + + (line * linesize) \ + + (x * bpp / 8)); \ + int col; \ + const int RSS = 32 - (RSB + GSB + BSB); \ + const int GSS = 32 - (GSB + BSB); \ + const int BSS = 32 - (BSB); \ + const uint32_t RSM = (~0U) << (32 - RSB); \ + const uint32_t GSM = (~0U) << (32 - GSB); \ + const uint32_t BSM = (~0U) << (32 - BSB); \ + const int RDS = 32 - (RDB + GDB + BDB); \ + const int GDS = 32 - (GDB + BDB); \ + const int BDS = 32 - (BDB); \ + const uint32_t RDM = (~0U) << (32 - RDB); \ + const uint32_t GDM = (~0U) << (32 - GDB); \ + const uint32_t BDM = (~0U) << (32 - BDB); \ + for (col = x ; col < (x+w) ; col++) { \ + uint32_t spix = *src; \ + *dst = (((spix << RSS) & RSM & RDM) >> RDS) | \ + (((spix << GSS) & GSM & GDM) >> GDS) | \ + (((spix << BSS) & BSM & BDM) >> BDS); \ + src = (SRC_T *) ((unsigned long) src + xenfb->depth / 8); \ + dst = (DST_T *) ((unsigned long) dst + bpp / 8); \ + } \ + } + + +/* + * This copies data from the guest framebuffer region, into QEMU's + * displaysurface. qemu uses 16 or 32 bpp. In case the pv framebuffer + * uses something else we must convert and copy, otherwise we can + * supply the buffer directly and no thing here. + */ +static void xenfb_guest_copy(struct XenFB *xenfb, int x, int y, int w, int h) +{ + DisplaySurface *surface = qemu_console_surface(xenfb->con); + int line, oops = 0; + int bpp = surface_bits_per_pixel(surface); + int linesize = surface_stride(surface); + uint8_t *data = surface_data(surface); + + if (!is_buffer_shared(surface)) { + switch (xenfb->depth) { + case 8: + if (bpp == 16) { + BLT(uint8_t, uint16_t, 3, 3, 2, 5, 6, 5); + } else if (bpp == 32) { + BLT(uint8_t, uint32_t, 3, 3, 2, 8, 8, 8); + } else { + oops = 1; + } + break; + case 24: + if (bpp == 16) { + BLT(uint32_t, uint16_t, 8, 8, 8, 5, 6, 5); + } else if (bpp == 32) { + BLT(uint32_t, uint32_t, 8, 8, 8, 8, 8, 8); + } else { + oops = 1; + } + break; + default: + oops = 1; + } + } + if (oops) /* should not happen */ + xen_pv_printf(&xenfb->c.xendev, 0, "%s: oops: convert %d -> %d bpp?\n", + __func__, xenfb->depth, bpp); + + dpy_gfx_update(xenfb->con, x, y, w, h); +} + +#ifdef XENFB_TYPE_REFRESH_PERIOD +static int xenfb_queue_full(struct XenFB *xenfb) +{ + struct xenfb_page *page = xenfb->c.page; + uint32_t cons, prod; + + if (!page) + return 1; + + prod = page->in_prod; + cons = page->in_cons; + return prod - cons == XENFB_IN_RING_LEN; +} + +static void xenfb_send_event(struct XenFB *xenfb, union xenfb_in_event *event) +{ + uint32_t prod; + struct xenfb_page *page = xenfb->c.page; + + prod = page->in_prod; + /* caller ensures !xenfb_queue_full() */ + xen_mb(); /* ensure ring space available */ + XENFB_IN_RING_REF(page, prod) = *event; + xen_wmb(); /* ensure ring contents visible */ + page->in_prod = prod + 1; + + xen_pv_send_notify(&xenfb->c.xendev); +} + +static void xenfb_send_refresh_period(struct XenFB *xenfb, int period) +{ + union xenfb_in_event event; + + memset(&event, 0, sizeof(event)); + event.type = XENFB_TYPE_REFRESH_PERIOD; + event.refresh_period.period = period; + xenfb_send_event(xenfb, &event); +} +#endif + +/* + * Periodic update of display. + * Also transmit the refresh interval to the frontend. + * + * Never ever do any qemu display operations + * (resize, screen update) outside this function. + * Our screen might be inactive. When asked for + * an update we know it is active. + */ +static void xenfb_update(void *opaque) +{ + struct XenFB *xenfb = opaque; + DisplaySurface *surface; + int i; + + if (xenfb->c.xendev.be_state != XenbusStateConnected) + return; + + if (!xenfb->feature_update) { + /* we don't get update notifications, thus use the + * sledge hammer approach ... */ + xenfb->up_fullscreen = 1; + } + + /* resize if needed */ + if (xenfb->do_resize) { + pixman_format_code_t format; + + xenfb->do_resize = 0; + switch (xenfb->depth) { + case 16: + case 32: + /* console.c supported depth -> buffer can be used directly */ + format = qemu_default_pixman_format(xenfb->depth, true); + surface = qemu_create_displaysurface_from + (xenfb->width, xenfb->height, format, + xenfb->row_stride, xenfb->pixels + xenfb->offset); + break; + default: + /* we must convert stuff */ + surface = qemu_create_displaysurface(xenfb->width, xenfb->height); + break; + } + dpy_gfx_replace_surface(xenfb->con, surface); + xen_pv_printf(&xenfb->c.xendev, 1, + "update: resizing: %dx%d @ %d bpp%s\n", + xenfb->width, xenfb->height, xenfb->depth, + is_buffer_shared(surface) ? " (shared)" : ""); + xenfb->up_fullscreen = 1; + } + + /* run queued updates */ + if (xenfb->up_fullscreen) { + xen_pv_printf(&xenfb->c.xendev, 3, "update: fullscreen\n"); + xenfb_guest_copy(xenfb, 0, 0, xenfb->width, xenfb->height); + } else if (xenfb->up_count) { + xen_pv_printf(&xenfb->c.xendev, 3, "update: %d rects\n", + xenfb->up_count); + for (i = 0; i < xenfb->up_count; i++) + xenfb_guest_copy(xenfb, + xenfb->up_rects[i].x, + xenfb->up_rects[i].y, + xenfb->up_rects[i].w, + xenfb->up_rects[i].h); + } else { + xen_pv_printf(&xenfb->c.xendev, 3, "update: nothing\n"); + } + xenfb->up_count = 0; + xenfb->up_fullscreen = 0; +} + +static void xenfb_update_interval(void *opaque, uint64_t interval) +{ + struct XenFB *xenfb = opaque; + + if (xenfb->feature_update) { +#ifdef XENFB_TYPE_REFRESH_PERIOD + if (xenfb_queue_full(xenfb)) { + return; + } + xenfb_send_refresh_period(xenfb, interval); +#endif + } +} + +/* QEMU display state changed, so refresh the framebuffer copy */ +static void xenfb_invalidate(void *opaque) +{ + struct XenFB *xenfb = opaque; + xenfb->up_fullscreen = 1; +} + +static void xenfb_handle_events(struct XenFB *xenfb) +{ + uint32_t prod, cons, out_cons; + struct xenfb_page *page = xenfb->c.page; + + prod = page->out_prod; + out_cons = page->out_cons; + if (prod - out_cons > XENFB_OUT_RING_LEN) { + return; + } + xen_rmb(); /* ensure we see ring contents up to prod */ + for (cons = out_cons; cons != prod; cons++) { + union xenfb_out_event *event = &XENFB_OUT_RING_REF(page, cons); + uint8_t type = event->type; + int x, y, w, h; + + switch (type) { + case XENFB_TYPE_UPDATE: + if (xenfb->up_count == UP_QUEUE) + xenfb->up_fullscreen = 1; + if (xenfb->up_fullscreen) + break; + x = MAX(event->update.x, 0); + y = MAX(event->update.y, 0); + w = MIN(event->update.width, xenfb->width - x); + h = MIN(event->update.height, xenfb->height - y); + if (w < 0 || h < 0) { + xen_pv_printf(&xenfb->c.xendev, 1, "bogus update ignored\n"); + break; + } + if (x != event->update.x || + y != event->update.y || + w != event->update.width || + h != event->update.height) { + xen_pv_printf(&xenfb->c.xendev, 1, "bogus update clipped\n"); + } + if (w == xenfb->width && h > xenfb->height / 2) { + /* scroll detector: updated more than 50% of the lines, + * don't bother keeping track of the rectangles then */ + xenfb->up_fullscreen = 1; + } else { + xenfb->up_rects[xenfb->up_count].x = x; + xenfb->up_rects[xenfb->up_count].y = y; + xenfb->up_rects[xenfb->up_count].w = w; + xenfb->up_rects[xenfb->up_count].h = h; + xenfb->up_count++; + } + break; +#ifdef XENFB_TYPE_RESIZE + case XENFB_TYPE_RESIZE: + if (xenfb_configure_fb(xenfb, xenfb->fb_len, + event->resize.width, + event->resize.height, + event->resize.depth, + xenfb->fb_len, + event->resize.offset, + event->resize.stride) < 0) + break; + xenfb_invalidate(xenfb); + break; +#endif + } + } + xen_mb(); /* ensure we're done with ring contents */ + page->out_cons = cons; +} + +static int fb_init(struct XenLegacyDevice *xendev) +{ +#ifdef XENFB_TYPE_RESIZE + xenstore_write_be_int(xendev, "feature-resize", 1); +#endif + return 0; +} + +static int fb_initialise(struct XenLegacyDevice *xendev) +{ + struct XenFB *fb = container_of(xendev, struct XenFB, c.xendev); + struct xenfb_page *fb_page; + int videoram; + int rc; + + if (xenstore_read_fe_int(xendev, "videoram", &videoram) == -1) + videoram = 0; + + rc = common_bind(&fb->c); + if (rc != 0) + return rc; + + fb_page = fb->c.page; + rc = xenfb_configure_fb(fb, videoram * MiB, + fb_page->width, fb_page->height, fb_page->depth, + fb_page->mem_length, 0, fb_page->line_length); + if (rc != 0) + return rc; + + rc = xenfb_map_fb(fb); + if (rc != 0) + return rc; + + fb->con = graphic_console_init(NULL, 0, &xenfb_ops, fb); + + if (xenstore_read_fe_int(xendev, "feature-update", &fb->feature_update) == -1) + fb->feature_update = 0; + if (fb->feature_update) + xenstore_write_be_int(xendev, "request-update", 1); + + xen_pv_printf(xendev, 1, "feature-update=%d, videoram=%d\n", + fb->feature_update, videoram); + return 0; +} + +static void fb_disconnect(struct XenLegacyDevice *xendev) +{ + struct XenFB *fb = container_of(xendev, struct XenFB, c.xendev); + + /* + * FIXME: qemu can't un-init gfx display (yet?). + * Replacing the framebuffer with anonymous shared memory + * instead. This releases the guest pages and keeps qemu happy. + */ + xenforeignmemory_unmap(xen_fmem, fb->pixels, fb->fbpages); + fb->pixels = mmap(fb->pixels, fb->fbpages * XC_PAGE_SIZE, + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, + -1, 0); + if (fb->pixels == MAP_FAILED) { + xen_pv_printf(xendev, 0, + "Couldn't replace the framebuffer with anonymous memory errno=%d\n", + errno); + } + common_unbind(&fb->c); + fb->feature_update = 0; + fb->bug_trigger = 0; +} + +static void fb_frontend_changed(struct XenLegacyDevice *xendev, + const char *node) +{ + struct XenFB *fb = container_of(xendev, struct XenFB, c.xendev); + + /* + * Set state to Connected *again* once the frontend switched + * to connected. We must trigger the watch a second time to + * workaround a frontend bug. + */ + if (fb->bug_trigger == 0 && strcmp(node, "state") == 0 && + xendev->fe_state == XenbusStateConnected && + xendev->be_state == XenbusStateConnected) { + xen_pv_printf(xendev, 2, "re-trigger connected (frontend bug)\n"); + xen_be_set_state(xendev, XenbusStateConnected); + fb->bug_trigger = 1; /* only once */ + } +} + +static void fb_event(struct XenLegacyDevice *xendev) +{ + struct XenFB *xenfb = container_of(xendev, struct XenFB, c.xendev); + + xenfb_handle_events(xenfb); + xen_pv_send_notify(&xenfb->c.xendev); +} + +/* -------------------------------------------------------------------- */ + +struct XenDevOps xen_kbdmouse_ops = { + .size = sizeof(struct XenInput), + .init = input_init, + .initialise = input_initialise, + .connected = input_connected, + .disconnect = input_disconnect, + .event = input_event, +}; + +struct XenDevOps xen_framebuffer_ops = { + .size = sizeof(struct XenFB), + .init = fb_init, + .initialise = fb_initialise, + .disconnect = fb_disconnect, + .event = fb_event, + .frontend_changed = fb_frontend_changed, +}; + +static const GraphicHwOps xenfb_ops = { + .invalidate = xenfb_invalidate, + .gfx_update = xenfb_update, + .update_interval = xenfb_update_interval, +}; diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c new file mode 100644 index 000000000..9bb781e31 --- /dev/null +++ b/hw/display/xlnx_dp.c @@ -0,0 +1,1383 @@ +/* + * Xilinx Display Port + * + * Copyright (C) 2015 : GreenSocs Ltd + * http://www.greensocs.com/ , email: info@greensocs.com + * + * Developed by : + * Frederic Konrad + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option)any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/display/xlnx_dp.h" +#include "hw/irq.h" +#include "migration/vmstate.h" + +#ifndef DEBUG_DP +#define DEBUG_DP 0 +#endif + +#define DPRINTF(fmt, ...) do { \ + if (DEBUG_DP) { \ + qemu_log("xlnx_dp: " fmt , ## __VA_ARGS__); \ + } \ +} while (0) + +/* + * Register offset for DP. + */ +#define DP_LINK_BW_SET (0x0000 >> 2) +#define DP_LANE_COUNT_SET (0x0004 >> 2) +#define DP_ENHANCED_FRAME_EN (0x0008 >> 2) +#define DP_TRAINING_PATTERN_SET (0x000C >> 2) +#define DP_LINK_QUAL_PATTERN_SET (0x0010 >> 2) +#define DP_SCRAMBLING_DISABLE (0x0014 >> 2) +#define DP_DOWNSPREAD_CTRL (0x0018 >> 2) +#define DP_SOFTWARE_RESET (0x001C >> 2) +#define DP_TRANSMITTER_ENABLE (0x0080 >> 2) +#define DP_MAIN_STREAM_ENABLE (0x0084 >> 2) +#define DP_FORCE_SCRAMBLER_RESET (0x00C0 >> 2) +#define DP_VERSION_REGISTER (0x00F8 >> 2) +#define DP_CORE_ID (0x00FC >> 2) + +#define DP_AUX_COMMAND_REGISTER (0x0100 >> 2) +#define AUX_ADDR_ONLY_MASK (0x1000) +#define AUX_COMMAND_MASK (0x0F00) +#define AUX_COMMAND_SHIFT (8) +#define AUX_COMMAND_NBYTES (0x000F) + +#define DP_AUX_WRITE_FIFO (0x0104 >> 2) +#define DP_AUX_ADDRESS (0x0108 >> 2) +#define DP_AUX_CLOCK_DIVIDER (0x010C >> 2) +#define DP_TX_USER_FIFO_OVERFLOW (0x0110 >> 2) +#define DP_INTERRUPT_SIGNAL_STATE (0x0130 >> 2) +#define DP_AUX_REPLY_DATA (0x0134 >> 2) +#define DP_AUX_REPLY_CODE (0x0138 >> 2) +#define DP_AUX_REPLY_COUNT (0x013C >> 2) +#define DP_REPLY_DATA_COUNT (0x0148 >> 2) +#define DP_REPLY_STATUS (0x014C >> 2) +#define DP_HPD_DURATION (0x0150 >> 2) +#define DP_MAIN_STREAM_HTOTAL (0x0180 >> 2) +#define DP_MAIN_STREAM_VTOTAL (0x0184 >> 2) +#define DP_MAIN_STREAM_POLARITY (0x0188 >> 2) +#define DP_MAIN_STREAM_HSWIDTH (0x018C >> 2) +#define DP_MAIN_STREAM_VSWIDTH (0x0190 >> 2) +#define DP_MAIN_STREAM_HRES (0x0194 >> 2) +#define DP_MAIN_STREAM_VRES (0x0198 >> 2) +#define DP_MAIN_STREAM_HSTART (0x019C >> 2) +#define DP_MAIN_STREAM_VSTART (0x01A0 >> 2) +#define DP_MAIN_STREAM_MISC0 (0x01A4 >> 2) +#define DP_MAIN_STREAM_MISC1 (0x01A8 >> 2) +#define DP_MAIN_STREAM_M_VID (0x01AC >> 2) +#define DP_MSA_TRANSFER_UNIT_SIZE (0x01B0 >> 2) +#define DP_MAIN_STREAM_N_VID (0x01B4 >> 2) +#define DP_USER_DATA_COUNT_PER_LANE (0x01BC >> 2) +#define DP_MIN_BYTES_PER_TU (0x01C4 >> 2) +#define DP_FRAC_BYTES_PER_TU (0x01C8 >> 2) +#define DP_INIT_WAIT (0x01CC >> 2) +#define DP_PHY_RESET (0x0200 >> 2) +#define DP_PHY_VOLTAGE_DIFF_LANE_0 (0x0220 >> 2) +#define DP_PHY_VOLTAGE_DIFF_LANE_1 (0x0224 >> 2) +#define DP_TRANSMIT_PRBS7 (0x0230 >> 2) +#define DP_PHY_CLOCK_SELECT (0x0234 >> 2) +#define DP_TX_PHY_POWER_DOWN (0x0238 >> 2) +#define DP_PHY_PRECURSOR_LANE_0 (0x023C >> 2) +#define DP_PHY_PRECURSOR_LANE_1 (0x0240 >> 2) +#define DP_PHY_POSTCURSOR_LANE_0 (0x024C >> 2) +#define DP_PHY_POSTCURSOR_LANE_1 (0x0250 >> 2) +#define DP_PHY_STATUS (0x0280 >> 2) + +#define DP_TX_AUDIO_CONTROL (0x0300 >> 2) +#define DP_TX_AUD_CTRL (1) + +#define DP_TX_AUDIO_CHANNELS (0x0304 >> 2) +#define DP_TX_AUDIO_INFO_DATA(n) ((0x0308 + 4 * n) >> 2) +#define DP_TX_M_AUD (0x0328 >> 2) +#define DP_TX_N_AUD (0x032C >> 2) +#define DP_TX_AUDIO_EXT_DATA(n) ((0x0330 + 4 * n) >> 2) +#define DP_INT_STATUS (0x03A0 >> 2) +#define DP_INT_MASK (0x03A4 >> 2) +#define DP_INT_EN (0x03A8 >> 2) +#define DP_INT_DS (0x03AC >> 2) + +/* + * Registers offset for Audio Video Buffer configuration. + */ +#define V_BLEND_OFFSET (0xA000) +#define V_BLEND_BG_CLR_0 (0x0000 >> 2) +#define V_BLEND_BG_CLR_1 (0x0004 >> 2) +#define V_BLEND_BG_CLR_2 (0x0008 >> 2) +#define V_BLEND_SET_GLOBAL_ALPHA_REG (0x000C >> 2) +#define V_BLEND_OUTPUT_VID_FORMAT (0x0014 >> 2) +#define V_BLEND_LAYER0_CONTROL (0x0018 >> 2) +#define V_BLEND_LAYER1_CONTROL (0x001C >> 2) + +#define V_BLEND_RGB2YCBCR_COEFF(n) ((0x0020 + 4 * n) >> 2) +#define V_BLEND_IN1CSC_COEFF(n) ((0x0044 + 4 * n) >> 2) + +#define V_BLEND_LUMA_IN1CSC_OFFSET (0x0068 >> 2) +#define V_BLEND_CR_IN1CSC_OFFSET (0x006C >> 2) +#define V_BLEND_CB_IN1CSC_OFFSET (0x0070 >> 2) +#define V_BLEND_LUMA_OUTCSC_OFFSET (0x0074 >> 2) +#define V_BLEND_CR_OUTCSC_OFFSET (0x0078 >> 2) +#define V_BLEND_CB_OUTCSC_OFFSET (0x007C >> 2) + +#define V_BLEND_IN2CSC_COEFF(n) ((0x0080 + 4 * n) >> 2) + +#define V_BLEND_LUMA_IN2CSC_OFFSET (0x00A4 >> 2) +#define V_BLEND_CR_IN2CSC_OFFSET (0x00A8 >> 2) +#define V_BLEND_CB_IN2CSC_OFFSET (0x00AC >> 2) +#define V_BLEND_CHROMA_KEY_ENABLE (0x01D0 >> 2) +#define V_BLEND_CHROMA_KEY_COMP1 (0x01D4 >> 2) +#define V_BLEND_CHROMA_KEY_COMP2 (0x01D8 >> 2) +#define V_BLEND_CHROMA_KEY_COMP3 (0x01DC >> 2) + +/* + * Registers offset for Audio Video Buffer configuration. + */ +#define AV_BUF_MANAGER_OFFSET (0xB000) +#define AV_BUF_FORMAT (0x0000 >> 2) +#define AV_BUF_NON_LIVE_LATENCY (0x0008 >> 2) +#define AV_CHBUF0 (0x0010 >> 2) +#define AV_CHBUF1 (0x0014 >> 2) +#define AV_CHBUF2 (0x0018 >> 2) +#define AV_CHBUF3 (0x001C >> 2) +#define AV_CHBUF4 (0x0020 >> 2) +#define AV_CHBUF5 (0x0024 >> 2) +#define AV_BUF_STC_CONTROL (0x002C >> 2) +#define AV_BUF_STC_INIT_VALUE0 (0x0030 >> 2) +#define AV_BUF_STC_INIT_VALUE1 (0x0034 >> 2) +#define AV_BUF_STC_ADJ (0x0038 >> 2) +#define AV_BUF_STC_VIDEO_VSYNC_TS_REG0 (0x003C >> 2) +#define AV_BUF_STC_VIDEO_VSYNC_TS_REG1 (0x0040 >> 2) +#define AV_BUF_STC_EXT_VSYNC_TS_REG0 (0x0044 >> 2) +#define AV_BUF_STC_EXT_VSYNC_TS_REG1 (0x0048 >> 2) +#define AV_BUF_STC_CUSTOM_EVENT_TS_REG0 (0x004C >> 2) +#define AV_BUF_STC_CUSTOM_EVENT_TS_REG1 (0x0050 >> 2) +#define AV_BUF_STC_CUSTOM_EVENT2_TS_REG0 (0x0054 >> 2) +#define AV_BUF_STC_CUSTOM_EVENT2_TS_REG1 (0x0058 >> 2) +#define AV_BUF_STC_SNAPSHOT0 (0x0060 >> 2) +#define AV_BUF_STC_SNAPSHOT1 (0x0064 >> 2) +#define AV_BUF_OUTPUT_AUDIO_VIDEO_SELECT (0x0070 >> 2) +#define AV_BUF_HCOUNT_VCOUNT_INT0 (0x0074 >> 2) +#define AV_BUF_HCOUNT_VCOUNT_INT1 (0x0078 >> 2) +#define AV_BUF_DITHER_CONFIG (0x007C >> 2) +#define AV_BUF_DITHER_CONFIG_MAX (0x008C >> 2) +#define AV_BUF_DITHER_CONFIG_MIN (0x0090 >> 2) +#define AV_BUF_PATTERN_GEN_SELECT (0x0100 >> 2) +#define AV_BUF_AUD_VID_CLK_SOURCE (0x0120 >> 2) +#define AV_BUF_SRST_REG (0x0124 >> 2) +#define AV_BUF_AUDIO_RDY_INTERVAL (0x0128 >> 2) +#define AV_BUF_AUDIO_CH_CONFIG (0x012C >> 2) + +#define AV_BUF_GRAPHICS_COMP_SCALE_FACTOR(n)((0x0200 + 4 * n) >> 2) + +#define AV_BUF_VIDEO_COMP_SCALE_FACTOR(n) ((0x020C + 4 * n) >> 2) + +#define AV_BUF_LIVE_VIDEO_COMP_SF(n) ((0x0218 + 4 * n) >> 2) + +#define AV_BUF_LIVE_VID_CONFIG (0x0224 >> 2) + +#define AV_BUF_LIVE_GFX_COMP_SF(n) ((0x0228 + 4 * n) >> 2) + +#define AV_BUF_LIVE_GFX_CONFIG (0x0234 >> 2) + +#define AUDIO_MIXER_REGISTER_OFFSET (0xC000) +#define AUDIO_MIXER_VOLUME_CONTROL (0x0000 >> 2) +#define AUDIO_MIXER_META_DATA (0x0004 >> 2) +#define AUD_CH_STATUS_REG(n) ((0x0008 + 4 * n) >> 2) +#define AUD_CH_A_DATA_REG(n) ((0x0020 + 4 * n) >> 2) +#define AUD_CH_B_DATA_REG(n) ((0x0038 + 4 * n) >> 2) + +#define DP_AUDIO_DMA_CHANNEL(n) (4 + n) +#define DP_GRAPHIC_DMA_CHANNEL (3) +#define DP_VIDEO_DMA_CHANNEL (0) + +enum DPGraphicFmt { + DP_GRAPHIC_RGBA8888 = 0 << 8, + DP_GRAPHIC_ABGR8888 = 1 << 8, + DP_GRAPHIC_RGB888 = 2 << 8, + DP_GRAPHIC_BGR888 = 3 << 8, + DP_GRAPHIC_RGBA5551 = 4 << 8, + DP_GRAPHIC_RGBA4444 = 5 << 8, + DP_GRAPHIC_RGB565 = 6 << 8, + DP_GRAPHIC_8BPP = 7 << 8, + DP_GRAPHIC_4BPP = 8 << 8, + DP_GRAPHIC_2BPP = 9 << 8, + DP_GRAPHIC_1BPP = 10 << 8, + DP_GRAPHIC_MASK = 0xF << 8 +}; + +enum DPVideoFmt { + DP_NL_VID_CB_Y0_CR_Y1 = 0, + DP_NL_VID_CR_Y0_CB_Y1 = 1, + DP_NL_VID_Y0_CR_Y1_CB = 2, + DP_NL_VID_Y0_CB_Y1_CR = 3, + DP_NL_VID_YV16 = 4, + DP_NL_VID_YV24 = 5, + DP_NL_VID_YV16CL = 6, + DP_NL_VID_MONO = 7, + DP_NL_VID_YV16CL2 = 8, + DP_NL_VID_YUV444 = 9, + DP_NL_VID_RGB888 = 10, + DP_NL_VID_RGBA8880 = 11, + DP_NL_VID_RGB888_10BPC = 12, + DP_NL_VID_YUV444_10BPC = 13, + DP_NL_VID_YV16CL2_10BPC = 14, + DP_NL_VID_YV16CL_10BPC = 15, + DP_NL_VID_YV16_10BPC = 16, + DP_NL_VID_YV24_10BPC = 17, + DP_NL_VID_Y_ONLY_10BPC = 18, + DP_NL_VID_YV16_420 = 19, + DP_NL_VID_YV16CL_420 = 20, + DP_NL_VID_YV16CL2_420 = 21, + DP_NL_VID_YV16_420_10BPC = 22, + DP_NL_VID_YV16CL_420_10BPC = 23, + DP_NL_VID_YV16CL2_420_10BPC = 24, + DP_NL_VID_FMT_MASK = 0x1F +}; + +typedef enum DPGraphicFmt DPGraphicFmt; +typedef enum DPVideoFmt DPVideoFmt; + +static const VMStateDescription vmstate_dp = { + .name = TYPE_XLNX_DP, + .version_id = 1, + .fields = (VMStateField[]){ + VMSTATE_UINT32_ARRAY(core_registers, XlnxDPState, + DP_CORE_REG_ARRAY_SIZE), + VMSTATE_UINT32_ARRAY(avbufm_registers, XlnxDPState, + DP_AVBUF_REG_ARRAY_SIZE), + VMSTATE_UINT32_ARRAY(vblend_registers, XlnxDPState, + DP_VBLEND_REG_ARRAY_SIZE), + VMSTATE_UINT32_ARRAY(audio_registers, XlnxDPState, + DP_AUDIO_REG_ARRAY_SIZE), + VMSTATE_END_OF_LIST() + } +}; + +static void xlnx_dp_update_irq(XlnxDPState *s); + +static uint64_t xlnx_dp_audio_read(void *opaque, hwaddr offset, unsigned size) +{ + XlnxDPState *s = XLNX_DP(opaque); + + offset = offset >> 2; + return s->audio_registers[offset]; +} + +static void xlnx_dp_audio_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + XlnxDPState *s = XLNX_DP(opaque); + + offset = offset >> 2; + + switch (offset) { + case AUDIO_MIXER_META_DATA: + s->audio_registers[offset] = value & 0x00000001; + break; + default: + s->audio_registers[offset] = value; + break; + } +} + +static const MemoryRegionOps audio_ops = { + .read = xlnx_dp_audio_read, + .write = xlnx_dp_audio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static inline uint32_t xlnx_dp_audio_get_volume(XlnxDPState *s, + uint8_t channel) +{ + switch (channel) { + case 0: + return extract32(s->audio_registers[AUDIO_MIXER_VOLUME_CONTROL], 0, 16); + case 1: + return extract32(s->audio_registers[AUDIO_MIXER_VOLUME_CONTROL], 16, + 16); + default: + return 0; + } +} + +static inline void xlnx_dp_audio_activate(XlnxDPState *s) +{ + bool activated = ((s->core_registers[DP_TX_AUDIO_CONTROL] + & DP_TX_AUD_CTRL) != 0); + AUD_set_active_out(s->amixer_output_stream, activated); + xlnx_dpdma_set_host_data_location(s->dpdma, DP_AUDIO_DMA_CHANNEL(0), + &s->audio_buffer_0); + xlnx_dpdma_set_host_data_location(s->dpdma, DP_AUDIO_DMA_CHANNEL(1), + &s->audio_buffer_1); +} + +static inline void xlnx_dp_audio_mix_buffer(XlnxDPState *s) +{ + /* + * Audio packets are signed and have this shape: + * | 16 | 16 | 16 | 16 | 16 | 16 | 16 | 16 | + * | R3 | L3 | R2 | L2 | R1 | L1 | R0 | L0 | + * + * Output audio is 16bits saturated. + */ + int i; + + if ((s->audio_data_available[0]) && (xlnx_dp_audio_get_volume(s, 0))) { + for (i = 0; i < s->audio_data_available[0] / 2; i++) { + s->temp_buffer[i] = (int64_t)(s->audio_buffer_0[i]) + * xlnx_dp_audio_get_volume(s, 0) / 8192; + } + s->byte_left = s->audio_data_available[0]; + } else { + memset(s->temp_buffer, 0, s->audio_data_available[1] / 2); + } + + if ((s->audio_data_available[1]) && (xlnx_dp_audio_get_volume(s, 1))) { + if ((s->audio_data_available[0] == 0) + || (s->audio_data_available[1] == s->audio_data_available[0])) { + for (i = 0; i < s->audio_data_available[1] / 2; i++) { + s->temp_buffer[i] += (int64_t)(s->audio_buffer_1[i]) + * xlnx_dp_audio_get_volume(s, 1) / 8192; + } + s->byte_left = s->audio_data_available[1]; + } + } + + for (i = 0; i < s->byte_left / 2; i++) { + s->out_buffer[i] = MAX(-32767, MIN(s->temp_buffer[i], 32767)); + } + + s->data_ptr = 0; +} + +static void xlnx_dp_audio_callback(void *opaque, int avail) +{ + /* + * Get some data from the DPDMA and compute these datas. + * Then wait for QEMU's audio subsystem to call this callback. + */ + XlnxDPState *s = XLNX_DP(opaque); + size_t written = 0; + + /* If there are already some data don't get more data. */ + if (s->byte_left == 0) { + s->audio_data_available[0] = xlnx_dpdma_start_operation(s->dpdma, 4, + true); + s->audio_data_available[1] = xlnx_dpdma_start_operation(s->dpdma, 5, + true); + xlnx_dp_audio_mix_buffer(s); + } + + /* Send the buffer through the audio. */ + if (s->byte_left <= MAX_QEMU_BUFFER_SIZE) { + if (s->byte_left != 0) { + written = AUD_write(s->amixer_output_stream, + &s->out_buffer[s->data_ptr], s->byte_left); + } else { + int len_to_copy; + /* + * There is nothing to play.. We don't have any data! Fill the + * buffer with zero's and send it. + */ + written = 0; + while (avail) { + len_to_copy = MIN(AUD_CHBUF_MAX_DEPTH, avail); + memset(s->out_buffer, 0, len_to_copy); + avail -= AUD_write(s->amixer_output_stream, s->out_buffer, + len_to_copy); + } + } + } else { + written = AUD_write(s->amixer_output_stream, + &s->out_buffer[s->data_ptr], MAX_QEMU_BUFFER_SIZE); + } + s->byte_left -= written; + s->data_ptr += written; +} + +/* + * AUX channel related function. + */ +static void xlnx_dp_aux_clear_rx_fifo(XlnxDPState *s) +{ + fifo8_reset(&s->rx_fifo); +} + +static void xlnx_dp_aux_push_rx_fifo(XlnxDPState *s, uint8_t *buf, size_t len) +{ + DPRINTF("Push %u data in rx_fifo\n", (unsigned)len); + fifo8_push_all(&s->rx_fifo, buf, len); +} + +static uint8_t xlnx_dp_aux_pop_rx_fifo(XlnxDPState *s) +{ + uint8_t ret; + + if (fifo8_is_empty(&s->rx_fifo)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reading empty RX_FIFO\n", + __func__); + /* + * The datasheet is not clear about the reset value, it seems + * to be unspecified. We choose to return '0'. + */ + ret = 0; + } else { + ret = fifo8_pop(&s->rx_fifo); + DPRINTF("pop 0x%" PRIX8 " from rx_fifo.\n", ret); + } + return ret; +} + +static void xlnx_dp_aux_clear_tx_fifo(XlnxDPState *s) +{ + fifo8_reset(&s->tx_fifo); +} + +static void xlnx_dp_aux_push_tx_fifo(XlnxDPState *s, uint8_t *buf, size_t len) +{ + DPRINTF("Push %u data in tx_fifo\n", (unsigned)len); + fifo8_push_all(&s->tx_fifo, buf, len); +} + +static uint8_t xlnx_dp_aux_pop_tx_fifo(XlnxDPState *s) +{ + uint8_t ret; + + if (fifo8_is_empty(&s->tx_fifo)) { + error_report("%s: TX_FIFO underflow", __func__); + abort(); + } + ret = fifo8_pop(&s->tx_fifo); + DPRINTF("pop 0x%2.2X from tx_fifo.\n", ret); + return ret; +} + +static uint32_t xlnx_dp_aux_get_address(XlnxDPState *s) +{ + return s->core_registers[DP_AUX_ADDRESS]; +} + +/* + * Get command from the register. + */ +static void xlnx_dp_aux_set_command(XlnxDPState *s, uint32_t value) +{ + bool address_only = (value & AUX_ADDR_ONLY_MASK) != 0; + AUXCommand cmd = (value & AUX_COMMAND_MASK) >> AUX_COMMAND_SHIFT; + uint8_t nbytes = (value & AUX_COMMAND_NBYTES) + 1; + uint8_t buf[16]; + int i; + + /* + * When an address_only command is executed nothing happen to the fifo, so + * just make nbytes = 0. + */ + if (address_only) { + nbytes = 0; + } + + switch (cmd) { + case READ_AUX: + case READ_I2C: + case READ_I2C_MOT: + s->core_registers[DP_AUX_REPLY_CODE] = aux_request(s->aux_bus, cmd, + xlnx_dp_aux_get_address(s), + nbytes, buf); + s->core_registers[DP_REPLY_DATA_COUNT] = nbytes; + + if (s->core_registers[DP_AUX_REPLY_CODE] == AUX_I2C_ACK) { + xlnx_dp_aux_push_rx_fifo(s, buf, nbytes); + } + break; + case WRITE_AUX: + case WRITE_I2C: + case WRITE_I2C_MOT: + for (i = 0; i < nbytes; i++) { + buf[i] = xlnx_dp_aux_pop_tx_fifo(s); + } + s->core_registers[DP_AUX_REPLY_CODE] = aux_request(s->aux_bus, cmd, + xlnx_dp_aux_get_address(s), + nbytes, buf); + xlnx_dp_aux_clear_tx_fifo(s); + break; + case WRITE_I2C_STATUS: + qemu_log_mask(LOG_UNIMP, "xlnx_dp: Write i2c status not implemented\n"); + break; + default: + error_report("%s: invalid command: %u", __func__, cmd); + abort(); + } + + s->core_registers[DP_INTERRUPT_SIGNAL_STATE] |= 0x04; +} + +static void xlnx_dp_set_dpdma(const Object *obj, const char *name, Object *val, + Error **errp) +{ + XlnxDPState *s = XLNX_DP(obj); + if (s->console) { + DisplaySurface *surface = qemu_console_surface(s->console); + XlnxDPDMAState *dma = XLNX_DPDMA(val); + xlnx_dpdma_set_host_data_location(dma, DP_GRAPHIC_DMA_CHANNEL, + surface_data(surface)); + } +} + +static inline uint8_t xlnx_dp_global_alpha_value(XlnxDPState *s) +{ + return (s->vblend_registers[V_BLEND_SET_GLOBAL_ALPHA_REG] & 0x1FE) >> 1; +} + +static inline bool xlnx_dp_global_alpha_enabled(XlnxDPState *s) +{ + /* + * If the alpha is totally opaque (255) we consider the alpha is disabled to + * reduce CPU consumption. + */ + return ((xlnx_dp_global_alpha_value(s) != 0xFF) && + ((s->vblend_registers[V_BLEND_SET_GLOBAL_ALPHA_REG] & 0x01) != 0)); +} + +static void xlnx_dp_recreate_surface(XlnxDPState *s) +{ + /* + * Two possibilities, if blending is enabled the console displays + * bout_plane, if not g_plane is displayed. + */ + uint16_t width = s->core_registers[DP_MAIN_STREAM_HRES]; + uint16_t height = s->core_registers[DP_MAIN_STREAM_VRES]; + DisplaySurface *current_console_surface = qemu_console_surface(s->console); + + if ((width != 0) && (height != 0)) { + /* + * As dpy_gfx_replace_surface calls qemu_free_displaysurface on the + * surface we need to be careful and don't free the surface associated + * to the console or double free will happen. + */ + if (s->bout_plane.surface != current_console_surface) { + qemu_free_displaysurface(s->bout_plane.surface); + } + if (s->v_plane.surface != current_console_surface) { + qemu_free_displaysurface(s->v_plane.surface); + } + if (s->g_plane.surface != current_console_surface) { + qemu_free_displaysurface(s->g_plane.surface); + } + + s->g_plane.surface + = qemu_create_displaysurface_from(width, height, + s->g_plane.format, 0, NULL); + s->v_plane.surface + = qemu_create_displaysurface_from(width, height, + s->v_plane.format, 0, NULL); + if (xlnx_dp_global_alpha_enabled(s)) { + s->bout_plane.surface = + qemu_create_displaysurface_from(width, + height, + s->g_plane.format, + 0, NULL); + dpy_gfx_replace_surface(s->console, s->bout_plane.surface); + } else { + s->bout_plane.surface = NULL; + dpy_gfx_replace_surface(s->console, s->g_plane.surface); + } + + xlnx_dpdma_set_host_data_location(s->dpdma, DP_GRAPHIC_DMA_CHANNEL, + surface_data(s->g_plane.surface)); + xlnx_dpdma_set_host_data_location(s->dpdma, DP_VIDEO_DMA_CHANNEL, + surface_data(s->v_plane.surface)); + } +} + +/* + * Change the graphic format of the surface. + */ +static void xlnx_dp_change_graphic_fmt(XlnxDPState *s) +{ + switch (s->avbufm_registers[AV_BUF_FORMAT] & DP_GRAPHIC_MASK) { + case DP_GRAPHIC_RGBA8888: + s->g_plane.format = PIXMAN_r8g8b8a8; + break; + case DP_GRAPHIC_ABGR8888: + s->g_plane.format = PIXMAN_a8b8g8r8; + break; + case DP_GRAPHIC_RGB565: + s->g_plane.format = PIXMAN_r5g6b5; + break; + case DP_GRAPHIC_RGB888: + s->g_plane.format = PIXMAN_r8g8b8; + break; + case DP_GRAPHIC_BGR888: + s->g_plane.format = PIXMAN_b8g8r8; + break; + default: + error_report("%s: unsupported graphic format %u", __func__, + s->avbufm_registers[AV_BUF_FORMAT] & DP_GRAPHIC_MASK); + abort(); + } + + switch (s->avbufm_registers[AV_BUF_FORMAT] & DP_NL_VID_FMT_MASK) { + case 0: + s->v_plane.format = PIXMAN_x8b8g8r8; + break; + case DP_NL_VID_Y0_CB_Y1_CR: + s->v_plane.format = PIXMAN_yuy2; + break; + case DP_NL_VID_RGBA8880: + s->v_plane.format = PIXMAN_x8b8g8r8; + break; + default: + error_report("%s: unsupported video format %u", __func__, + s->avbufm_registers[AV_BUF_FORMAT] & DP_NL_VID_FMT_MASK); + abort(); + } + + xlnx_dp_recreate_surface(s); +} + +static void xlnx_dp_update_irq(XlnxDPState *s) +{ + uint32_t flags; + + flags = s->core_registers[DP_INT_STATUS] & ~s->core_registers[DP_INT_MASK]; + DPRINTF("update IRQ value = %" PRIx32 "\n", flags); + qemu_set_irq(s->irq, flags != 0); +} + +static uint64_t xlnx_dp_read(void *opaque, hwaddr offset, unsigned size) +{ + XlnxDPState *s = XLNX_DP(opaque); + uint64_t ret = 0; + + offset = offset >> 2; + + switch (offset) { + case DP_TX_USER_FIFO_OVERFLOW: + /* This register is cleared after a read */ + ret = s->core_registers[DP_TX_USER_FIFO_OVERFLOW]; + s->core_registers[DP_TX_USER_FIFO_OVERFLOW] = 0; + break; + case DP_AUX_REPLY_DATA: + ret = xlnx_dp_aux_pop_rx_fifo(s); + break; + case DP_INTERRUPT_SIGNAL_STATE: + /* + * XXX: Not sure it is the right thing to do actually. + * The register is not written by the device driver so it's stuck + * to 0x04. + */ + ret = s->core_registers[DP_INTERRUPT_SIGNAL_STATE]; + s->core_registers[DP_INTERRUPT_SIGNAL_STATE] &= ~0x04; + break; + case DP_AUX_WRITE_FIFO: + case DP_TX_AUDIO_INFO_DATA(0): + case DP_TX_AUDIO_INFO_DATA(1): + case DP_TX_AUDIO_INFO_DATA(2): + case DP_TX_AUDIO_INFO_DATA(3): + case DP_TX_AUDIO_INFO_DATA(4): + case DP_TX_AUDIO_INFO_DATA(5): + case DP_TX_AUDIO_INFO_DATA(6): + case DP_TX_AUDIO_INFO_DATA(7): + case DP_TX_AUDIO_EXT_DATA(0): + case DP_TX_AUDIO_EXT_DATA(1): + case DP_TX_AUDIO_EXT_DATA(2): + case DP_TX_AUDIO_EXT_DATA(3): + case DP_TX_AUDIO_EXT_DATA(4): + case DP_TX_AUDIO_EXT_DATA(5): + case DP_TX_AUDIO_EXT_DATA(6): + case DP_TX_AUDIO_EXT_DATA(7): + case DP_TX_AUDIO_EXT_DATA(8): + /* write only registers */ + ret = 0; + break; + default: + assert(offset <= (0x3AC >> 2)); + if (offset == (0x3A8 >> 2) || offset == (0x3AC >> 2)) { + ret = s->core_registers[DP_INT_MASK]; + } else { + ret = s->core_registers[offset]; + } + break; + } + + DPRINTF("core read @%" PRIx64 " = 0x%8.8" PRIX64 "\n", offset << 2, ret); + return ret; +} + +static void xlnx_dp_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + XlnxDPState *s = XLNX_DP(opaque); + + DPRINTF("core write @%" PRIx64 " = 0x%8.8" PRIX64 "\n", offset, value); + + offset = offset >> 2; + + switch (offset) { + /* + * Only special write case are handled. + */ + case DP_LINK_BW_SET: + s->core_registers[offset] = value & 0x000000FF; + break; + case DP_LANE_COUNT_SET: + case DP_MAIN_STREAM_MISC0: + s->core_registers[offset] = value & 0x0000000F; + break; + case DP_TRAINING_PATTERN_SET: + case DP_LINK_QUAL_PATTERN_SET: + case DP_MAIN_STREAM_POLARITY: + case DP_PHY_VOLTAGE_DIFF_LANE_0: + case DP_PHY_VOLTAGE_DIFF_LANE_1: + s->core_registers[offset] = value & 0x00000003; + break; + case DP_ENHANCED_FRAME_EN: + case DP_SCRAMBLING_DISABLE: + case DP_DOWNSPREAD_CTRL: + case DP_MAIN_STREAM_ENABLE: + case DP_TRANSMIT_PRBS7: + s->core_registers[offset] = value & 0x00000001; + break; + case DP_PHY_CLOCK_SELECT: + s->core_registers[offset] = value & 0x00000007; + break; + case DP_SOFTWARE_RESET: + /* + * No need to update this bit as it's read '0'. + */ + /* + * TODO: reset IP. + */ + break; + case DP_TRANSMITTER_ENABLE: + s->core_registers[offset] = value & 0x01; + break; + case DP_FORCE_SCRAMBLER_RESET: + /* + * No need to update this bit as it's read '0'. + */ + /* + * TODO: force a scrambler reset?? + */ + break; + case DP_AUX_COMMAND_REGISTER: + s->core_registers[offset] = value & 0x00001F0F; + xlnx_dp_aux_set_command(s, s->core_registers[offset]); + break; + case DP_MAIN_STREAM_HTOTAL: + case DP_MAIN_STREAM_VTOTAL: + case DP_MAIN_STREAM_HSTART: + case DP_MAIN_STREAM_VSTART: + s->core_registers[offset] = value & 0x0000FFFF; + break; + case DP_MAIN_STREAM_HRES: + case DP_MAIN_STREAM_VRES: + s->core_registers[offset] = value & 0x0000FFFF; + xlnx_dp_recreate_surface(s); + break; + case DP_MAIN_STREAM_HSWIDTH: + case DP_MAIN_STREAM_VSWIDTH: + s->core_registers[offset] = value & 0x00007FFF; + break; + case DP_MAIN_STREAM_MISC1: + s->core_registers[offset] = value & 0x00000086; + break; + case DP_MAIN_STREAM_M_VID: + case DP_MAIN_STREAM_N_VID: + s->core_registers[offset] = value & 0x00FFFFFF; + break; + case DP_MSA_TRANSFER_UNIT_SIZE: + case DP_MIN_BYTES_PER_TU: + case DP_INIT_WAIT: + s->core_registers[offset] = value & 0x00000007; + break; + case DP_USER_DATA_COUNT_PER_LANE: + s->core_registers[offset] = value & 0x0003FFFF; + break; + case DP_FRAC_BYTES_PER_TU: + s->core_registers[offset] = value & 0x000003FF; + break; + case DP_PHY_RESET: + s->core_registers[offset] = value & 0x00010003; + /* + * TODO: Reset something? + */ + break; + case DP_TX_PHY_POWER_DOWN: + s->core_registers[offset] = value & 0x0000000F; + /* + * TODO: Power down things? + */ + break; + case DP_AUX_WRITE_FIFO: { + uint8_t c = value; + xlnx_dp_aux_push_tx_fifo(s, &c, 1); + break; + } + case DP_AUX_CLOCK_DIVIDER: + break; + case DP_AUX_REPLY_COUNT: + /* + * Writing to this register clear the counter. + */ + s->core_registers[offset] = 0x00000000; + break; + case DP_AUX_ADDRESS: + s->core_registers[offset] = value & 0x000FFFFF; + break; + case DP_VERSION_REGISTER: + case DP_CORE_ID: + case DP_TX_USER_FIFO_OVERFLOW: + case DP_AUX_REPLY_DATA: + case DP_AUX_REPLY_CODE: + case DP_REPLY_DATA_COUNT: + case DP_REPLY_STATUS: + case DP_HPD_DURATION: + /* + * Write to read only location.. + */ + break; + case DP_TX_AUDIO_CONTROL: + s->core_registers[offset] = value & 0x00000001; + xlnx_dp_audio_activate(s); + break; + case DP_TX_AUDIO_CHANNELS: + s->core_registers[offset] = value & 0x00000007; + xlnx_dp_audio_activate(s); + break; + case DP_INT_STATUS: + s->core_registers[DP_INT_STATUS] &= ~value; + xlnx_dp_update_irq(s); + break; + case DP_INT_EN: + s->core_registers[DP_INT_MASK] &= ~value; + xlnx_dp_update_irq(s); + break; + case DP_INT_DS: + s->core_registers[DP_INT_MASK] |= ~value; + xlnx_dp_update_irq(s); + break; + default: + assert(offset <= (0x504C >> 2)); + s->core_registers[offset] = value; + break; + } +} + +static const MemoryRegionOps dp_ops = { + .read = xlnx_dp_read, + .write = xlnx_dp_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +/* + * This is to handle Read/Write to the Video Blender. + */ +static void xlnx_dp_vblend_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + XlnxDPState *s = XLNX_DP(opaque); + bool alpha_was_enabled; + + DPRINTF("vblend: write @0x%" HWADDR_PRIX " = 0x%" PRIX32 "\n", offset, + (uint32_t)value); + offset = offset >> 2; + + switch (offset) { + case V_BLEND_BG_CLR_0: + case V_BLEND_BG_CLR_1: + case V_BLEND_BG_CLR_2: + s->vblend_registers[offset] = value & 0x00000FFF; + break; + case V_BLEND_SET_GLOBAL_ALPHA_REG: + /* + * A write to this register can enable or disable blending. Thus we need + * to recreate the surfaces. + */ + alpha_was_enabled = xlnx_dp_global_alpha_enabled(s); + s->vblend_registers[offset] = value & 0x000001FF; + if (xlnx_dp_global_alpha_enabled(s) != alpha_was_enabled) { + xlnx_dp_recreate_surface(s); + } + break; + case V_BLEND_OUTPUT_VID_FORMAT: + s->vblend_registers[offset] = value & 0x00000017; + break; + case V_BLEND_LAYER0_CONTROL: + case V_BLEND_LAYER1_CONTROL: + s->vblend_registers[offset] = value & 0x00000103; + break; + case V_BLEND_RGB2YCBCR_COEFF(0): + case V_BLEND_RGB2YCBCR_COEFF(1): + case V_BLEND_RGB2YCBCR_COEFF(2): + case V_BLEND_RGB2YCBCR_COEFF(3): + case V_BLEND_RGB2YCBCR_COEFF(4): + case V_BLEND_RGB2YCBCR_COEFF(5): + case V_BLEND_RGB2YCBCR_COEFF(6): + case V_BLEND_RGB2YCBCR_COEFF(7): + case V_BLEND_RGB2YCBCR_COEFF(8): + case V_BLEND_IN1CSC_COEFF(0): + case V_BLEND_IN1CSC_COEFF(1): + case V_BLEND_IN1CSC_COEFF(2): + case V_BLEND_IN1CSC_COEFF(3): + case V_BLEND_IN1CSC_COEFF(4): + case V_BLEND_IN1CSC_COEFF(5): + case V_BLEND_IN1CSC_COEFF(6): + case V_BLEND_IN1CSC_COEFF(7): + case V_BLEND_IN1CSC_COEFF(8): + case V_BLEND_IN2CSC_COEFF(0): + case V_BLEND_IN2CSC_COEFF(1): + case V_BLEND_IN2CSC_COEFF(2): + case V_BLEND_IN2CSC_COEFF(3): + case V_BLEND_IN2CSC_COEFF(4): + case V_BLEND_IN2CSC_COEFF(5): + case V_BLEND_IN2CSC_COEFF(6): + case V_BLEND_IN2CSC_COEFF(7): + case V_BLEND_IN2CSC_COEFF(8): + s->vblend_registers[offset] = value & 0x0000FFFF; + break; + case V_BLEND_LUMA_IN1CSC_OFFSET: + case V_BLEND_CR_IN1CSC_OFFSET: + case V_BLEND_CB_IN1CSC_OFFSET: + case V_BLEND_LUMA_IN2CSC_OFFSET: + case V_BLEND_CR_IN2CSC_OFFSET: + case V_BLEND_CB_IN2CSC_OFFSET: + case V_BLEND_LUMA_OUTCSC_OFFSET: + case V_BLEND_CR_OUTCSC_OFFSET: + case V_BLEND_CB_OUTCSC_OFFSET: + s->vblend_registers[offset] = value & 0x3FFF7FFF; + break; + case V_BLEND_CHROMA_KEY_ENABLE: + s->vblend_registers[offset] = value & 0x00000003; + break; + case V_BLEND_CHROMA_KEY_COMP1: + case V_BLEND_CHROMA_KEY_COMP2: + case V_BLEND_CHROMA_KEY_COMP3: + s->vblend_registers[offset] = value & 0x0FFF0FFF; + break; + default: + s->vblend_registers[offset] = value; + break; + } +} + +static uint64_t xlnx_dp_vblend_read(void *opaque, hwaddr offset, + unsigned size) +{ + XlnxDPState *s = XLNX_DP(opaque); + + DPRINTF("vblend: read @0x%" HWADDR_PRIX " = 0x%" PRIX32 "\n", offset, + s->vblend_registers[offset >> 2]); + return s->vblend_registers[offset >> 2]; +} + +static const MemoryRegionOps vblend_ops = { + .read = xlnx_dp_vblend_read, + .write = xlnx_dp_vblend_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +/* + * This is to handle Read/Write to the Audio Video buffer manager. + */ +static void xlnx_dp_avbufm_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + XlnxDPState *s = XLNX_DP(opaque); + + DPRINTF("avbufm: write @0x%" HWADDR_PRIX " = 0x%" PRIX32 "\n", offset, + (uint32_t)value); + offset = offset >> 2; + + switch (offset) { + case AV_BUF_FORMAT: + s->avbufm_registers[offset] = value & 0x00000FFF; + xlnx_dp_change_graphic_fmt(s); + break; + case AV_CHBUF0: + case AV_CHBUF1: + case AV_CHBUF2: + case AV_CHBUF3: + case AV_CHBUF4: + case AV_CHBUF5: + s->avbufm_registers[offset] = value & 0x0000007F; + break; + case AV_BUF_OUTPUT_AUDIO_VIDEO_SELECT: + s->avbufm_registers[offset] = value & 0x0000007F; + break; + case AV_BUF_DITHER_CONFIG: + s->avbufm_registers[offset] = value & 0x000007FF; + break; + case AV_BUF_DITHER_CONFIG_MAX: + case AV_BUF_DITHER_CONFIG_MIN: + s->avbufm_registers[offset] = value & 0x00000FFF; + break; + case AV_BUF_PATTERN_GEN_SELECT: + s->avbufm_registers[offset] = value & 0xFFFFFF03; + break; + case AV_BUF_AUD_VID_CLK_SOURCE: + s->avbufm_registers[offset] = value & 0x00000007; + break; + case AV_BUF_SRST_REG: + s->avbufm_registers[offset] = value & 0x00000002; + break; + case AV_BUF_AUDIO_CH_CONFIG: + s->avbufm_registers[offset] = value & 0x00000003; + break; + case AV_BUF_GRAPHICS_COMP_SCALE_FACTOR(0): + case AV_BUF_GRAPHICS_COMP_SCALE_FACTOR(1): + case AV_BUF_GRAPHICS_COMP_SCALE_FACTOR(2): + case AV_BUF_VIDEO_COMP_SCALE_FACTOR(0): + case AV_BUF_VIDEO_COMP_SCALE_FACTOR(1): + case AV_BUF_VIDEO_COMP_SCALE_FACTOR(2): + s->avbufm_registers[offset] = value & 0x0000FFFF; + break; + case AV_BUF_LIVE_VIDEO_COMP_SF(0): + case AV_BUF_LIVE_VIDEO_COMP_SF(1): + case AV_BUF_LIVE_VIDEO_COMP_SF(2): + case AV_BUF_LIVE_VID_CONFIG: + case AV_BUF_LIVE_GFX_COMP_SF(0): + case AV_BUF_LIVE_GFX_COMP_SF(1): + case AV_BUF_LIVE_GFX_COMP_SF(2): + case AV_BUF_LIVE_GFX_CONFIG: + case AV_BUF_NON_LIVE_LATENCY: + case AV_BUF_STC_CONTROL: + case AV_BUF_STC_INIT_VALUE0: + case AV_BUF_STC_INIT_VALUE1: + case AV_BUF_STC_ADJ: + case AV_BUF_STC_VIDEO_VSYNC_TS_REG0: + case AV_BUF_STC_VIDEO_VSYNC_TS_REG1: + case AV_BUF_STC_EXT_VSYNC_TS_REG0: + case AV_BUF_STC_EXT_VSYNC_TS_REG1: + case AV_BUF_STC_CUSTOM_EVENT_TS_REG0: + case AV_BUF_STC_CUSTOM_EVENT_TS_REG1: + case AV_BUF_STC_CUSTOM_EVENT2_TS_REG0: + case AV_BUF_STC_CUSTOM_EVENT2_TS_REG1: + case AV_BUF_STC_SNAPSHOT0: + case AV_BUF_STC_SNAPSHOT1: + case AV_BUF_HCOUNT_VCOUNT_INT0: + case AV_BUF_HCOUNT_VCOUNT_INT1: + qemu_log_mask(LOG_UNIMP, "avbufm: unimplemented register 0x%04" + PRIx64 "\n", + offset << 2); + break; + default: + s->avbufm_registers[offset] = value; + break; + } +} + +static uint64_t xlnx_dp_avbufm_read(void *opaque, hwaddr offset, + unsigned size) +{ + XlnxDPState *s = XLNX_DP(opaque); + + offset = offset >> 2; + return s->avbufm_registers[offset]; +} + +static const MemoryRegionOps avbufm_ops = { + .read = xlnx_dp_avbufm_read, + .write = xlnx_dp_avbufm_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +/* + * This is a global alpha blending using pixman. + * Both graphic and video planes are multiplied with the global alpha + * coefficient and added. + */ +static inline void xlnx_dp_blend_surface(XlnxDPState *s) +{ + pixman_fixed_t alpha1[] = { pixman_double_to_fixed(1), + pixman_double_to_fixed(1), + pixman_double_to_fixed(1.0) }; + pixman_fixed_t alpha2[] = { pixman_double_to_fixed(1), + pixman_double_to_fixed(1), + pixman_double_to_fixed(1.0) }; + + if ((surface_width(s->g_plane.surface) + != surface_width(s->v_plane.surface)) || + (surface_height(s->g_plane.surface) + != surface_height(s->v_plane.surface))) { + return; + } + + alpha1[2] = pixman_double_to_fixed((double)(xlnx_dp_global_alpha_value(s)) + / 256.0); + alpha2[2] = pixman_double_to_fixed((255.0 + - (double)xlnx_dp_global_alpha_value(s)) + / 256.0); + + pixman_image_set_filter(s->g_plane.surface->image, + PIXMAN_FILTER_CONVOLUTION, alpha1, 3); + pixman_image_composite(PIXMAN_OP_SRC, s->g_plane.surface->image, 0, + s->bout_plane.surface->image, 0, 0, 0, 0, 0, 0, + surface_width(s->g_plane.surface), + surface_height(s->g_plane.surface)); + pixman_image_set_filter(s->v_plane.surface->image, + PIXMAN_FILTER_CONVOLUTION, alpha2, 3); + pixman_image_composite(PIXMAN_OP_ADD, s->v_plane.surface->image, 0, + s->bout_plane.surface->image, 0, 0, 0, 0, 0, 0, + surface_width(s->g_plane.surface), + surface_height(s->g_plane.surface)); +} + +static void xlnx_dp_update_display(void *opaque) +{ + XlnxDPState *s = XLNX_DP(opaque); + + if ((s->core_registers[DP_TRANSMITTER_ENABLE] & 0x01) == 0) { + return; + } + + s->core_registers[DP_INT_STATUS] |= (1 << 13); + xlnx_dp_update_irq(s); + + xlnx_dpdma_trigger_vsync_irq(s->dpdma); + + /* + * Trigger the DMA channel. + */ + if (!xlnx_dpdma_start_operation(s->dpdma, 3, false)) { + /* + * An error occurred don't do anything with the data.. + * Trigger an underflow interrupt. + */ + s->core_registers[DP_INT_STATUS] |= (1 << 21); + xlnx_dp_update_irq(s); + return; + } + + if (xlnx_dp_global_alpha_enabled(s)) { + if (!xlnx_dpdma_start_operation(s->dpdma, 0, false)) { + s->core_registers[DP_INT_STATUS] |= (1 << 21); + xlnx_dp_update_irq(s); + return; + } + xlnx_dp_blend_surface(s); + } + + /* + * XXX: We might want to update only what changed. + */ + dpy_gfx_update_full(s->console); +} + +static const GraphicHwOps xlnx_dp_gfx_ops = { + .gfx_update = xlnx_dp_update_display, +}; + +static void xlnx_dp_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + XlnxDPState *s = XLNX_DP(obj); + + memory_region_init(&s->container, obj, TYPE_XLNX_DP, 0xC050); + + memory_region_init_io(&s->core_iomem, obj, &dp_ops, s, TYPE_XLNX_DP + ".core", 0x3AF); + memory_region_add_subregion(&s->container, 0x0000, &s->core_iomem); + + memory_region_init_io(&s->vblend_iomem, obj, &vblend_ops, s, TYPE_XLNX_DP + ".v_blend", 0x1DF); + memory_region_add_subregion(&s->container, 0xA000, &s->vblend_iomem); + + memory_region_init_io(&s->avbufm_iomem, obj, &avbufm_ops, s, TYPE_XLNX_DP + ".av_buffer_manager", 0x238); + memory_region_add_subregion(&s->container, 0xB000, &s->avbufm_iomem); + + memory_region_init_io(&s->audio_iomem, obj, &audio_ops, s, TYPE_XLNX_DP + ".audio", sizeof(s->audio_registers)); + memory_region_add_subregion(&s->container, 0xC000, &s->audio_iomem); + + sysbus_init_mmio(sbd, &s->container); + sysbus_init_irq(sbd, &s->irq); + + object_property_add_link(obj, "dpdma", TYPE_XLNX_DPDMA, + (Object **) &s->dpdma, + xlnx_dp_set_dpdma, + OBJ_PROP_LINK_STRONG); + + /* + * Initialize AUX Bus. + */ + s->aux_bus = aux_bus_init(DEVICE(obj), "aux"); + + /* + * Initialize DPCD and EDID.. + */ + s->dpcd = DPCD(qdev_new("dpcd")); + object_property_add_child(OBJECT(s), "dpcd", OBJECT(s->dpcd)); + + s->edid = I2CDDC(qdev_new("i2c-ddc")); + i2c_slave_set_address(I2C_SLAVE(s->edid), 0x50); + object_property_add_child(OBJECT(s), "edid", OBJECT(s->edid)); + + fifo8_create(&s->rx_fifo, 16); + fifo8_create(&s->tx_fifo, 16); +} + +static void xlnx_dp_finalize(Object *obj) +{ + XlnxDPState *s = XLNX_DP(obj); + + fifo8_destroy(&s->tx_fifo); + fifo8_destroy(&s->rx_fifo); +} + +static void xlnx_dp_realize(DeviceState *dev, Error **errp) +{ + XlnxDPState *s = XLNX_DP(dev); + DisplaySurface *surface; + struct audsettings as; + + aux_bus_realize(s->aux_bus); + + qdev_realize(DEVICE(s->dpcd), BUS(s->aux_bus), &error_fatal); + aux_map_slave(AUX_SLAVE(s->dpcd), 0x0000); + + qdev_realize_and_unref(DEVICE(s->edid), BUS(aux_get_i2c_bus(s->aux_bus)), + &error_fatal); + + s->console = graphic_console_init(dev, 0, &xlnx_dp_gfx_ops, s); + surface = qemu_console_surface(s->console); + xlnx_dpdma_set_host_data_location(s->dpdma, DP_GRAPHIC_DMA_CHANNEL, + surface_data(surface)); + + as.freq = 44100; + as.nchannels = 2; + as.fmt = AUDIO_FORMAT_S16; + as.endianness = 0; + + AUD_register_card("xlnx_dp.audio", &s->aud_card); + + s->amixer_output_stream = AUD_open_out(&s->aud_card, + s->amixer_output_stream, + "xlnx_dp.audio.out", + s, + xlnx_dp_audio_callback, + &as); + AUD_set_volume_out(s->amixer_output_stream, 0, 255, 255); + xlnx_dp_audio_activate(s); +} + +static void xlnx_dp_reset(DeviceState *dev) +{ + XlnxDPState *s = XLNX_DP(dev); + + memset(s->core_registers, 0, sizeof(s->core_registers)); + s->core_registers[DP_VERSION_REGISTER] = 0x04010000; + s->core_registers[DP_CORE_ID] = 0x01020000; + s->core_registers[DP_REPLY_STATUS] = 0x00000010; + s->core_registers[DP_MSA_TRANSFER_UNIT_SIZE] = 0x00000040; + s->core_registers[DP_INIT_WAIT] = 0x00000020; + s->core_registers[DP_PHY_RESET] = 0x00010003; + s->core_registers[DP_INT_MASK] = 0xFFFFF03F; + s->core_registers[DP_PHY_STATUS] = 0x00000043; + s->core_registers[DP_INTERRUPT_SIGNAL_STATE] = 0x00000001; + + s->vblend_registers[V_BLEND_RGB2YCBCR_COEFF(0)] = 0x00001000; + s->vblend_registers[V_BLEND_RGB2YCBCR_COEFF(4)] = 0x00001000; + s->vblend_registers[V_BLEND_RGB2YCBCR_COEFF(8)] = 0x00001000; + s->vblend_registers[V_BLEND_IN1CSC_COEFF(0)] = 0x00001000; + s->vblend_registers[V_BLEND_IN1CSC_COEFF(4)] = 0x00001000; + s->vblend_registers[V_BLEND_IN1CSC_COEFF(8)] = 0x00001000; + s->vblend_registers[V_BLEND_IN2CSC_COEFF(0)] = 0x00001000; + s->vblend_registers[V_BLEND_IN2CSC_COEFF(4)] = 0x00001000; + s->vblend_registers[V_BLEND_IN2CSC_COEFF(8)] = 0x00001000; + + s->avbufm_registers[AV_BUF_NON_LIVE_LATENCY] = 0x00000180; + s->avbufm_registers[AV_BUF_OUTPUT_AUDIO_VIDEO_SELECT] = 0x00000008; + s->avbufm_registers[AV_BUF_DITHER_CONFIG_MAX] = 0x00000FFF; + s->avbufm_registers[AV_BUF_GRAPHICS_COMP_SCALE_FACTOR(0)] = 0x00010101; + s->avbufm_registers[AV_BUF_GRAPHICS_COMP_SCALE_FACTOR(1)] = 0x00010101; + s->avbufm_registers[AV_BUF_GRAPHICS_COMP_SCALE_FACTOR(2)] = 0x00010101; + s->avbufm_registers[AV_BUF_VIDEO_COMP_SCALE_FACTOR(0)] = 0x00010101; + s->avbufm_registers[AV_BUF_VIDEO_COMP_SCALE_FACTOR(1)] = 0x00010101; + s->avbufm_registers[AV_BUF_VIDEO_COMP_SCALE_FACTOR(2)] = 0x00010101; + s->avbufm_registers[AV_BUF_LIVE_VIDEO_COMP_SF(0)] = 0x00010101; + s->avbufm_registers[AV_BUF_LIVE_VIDEO_COMP_SF(1)] = 0x00010101; + s->avbufm_registers[AV_BUF_LIVE_VIDEO_COMP_SF(2)] = 0x00010101; + s->avbufm_registers[AV_BUF_LIVE_GFX_COMP_SF(0)] = 0x00010101; + s->avbufm_registers[AV_BUF_LIVE_GFX_COMP_SF(1)] = 0x00010101; + s->avbufm_registers[AV_BUF_LIVE_GFX_COMP_SF(2)] = 0x00010101; + + memset(s->audio_registers, 0, sizeof(s->audio_registers)); + s->byte_left = 0; + + xlnx_dp_aux_clear_rx_fifo(s); + xlnx_dp_change_graphic_fmt(s); + xlnx_dp_update_irq(s); +} + +static void xlnx_dp_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = xlnx_dp_realize; + dc->vmsd = &vmstate_dp; + dc->reset = xlnx_dp_reset; +} + +static const TypeInfo xlnx_dp_info = { + .name = TYPE_XLNX_DP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxDPState), + .instance_init = xlnx_dp_init, + .instance_finalize = xlnx_dp_finalize, + .class_init = xlnx_dp_class_init, +}; + +static void xlnx_dp_register_types(void) +{ + type_register_static(&xlnx_dp_info); +} + +type_init(xlnx_dp_register_types) diff --git a/hw/dma/Kconfig b/hw/dma/Kconfig new file mode 100644 index 000000000..98fbb1bb0 --- /dev/null +++ b/hw/dma/Kconfig @@ -0,0 +1,32 @@ +config RC4030 + bool + +config PL080 + bool + +config PL330 + bool + +config I82374 + bool + select I8257 + +config I8257 + bool + +config ZYNQ_DEVCFG + bool + select REGISTER + +config XLNX_ZDMA + bool + +config STP2000 + bool + +config SIFIVE_PDMA + bool + +config XLNX_CSU_DMA + bool + select REGISTER diff --git a/hw/dma/bcm2835_dma.c b/hw/dma/bcm2835_dma.c new file mode 100644 index 000000000..eb0002a2b --- /dev/null +++ b/hw/dma/bcm2835_dma.c @@ -0,0 +1,410 @@ +/* + * Raspberry Pi emulation (c) 2012 Gregory Estrade + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/dma/bcm2835_dma.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +/* DMA CS Control and Status bits */ +#define BCM2708_DMA_ACTIVE (1 << 0) +#define BCM2708_DMA_END (1 << 1) /* GE */ +#define BCM2708_DMA_INT (1 << 2) +#define BCM2708_DMA_ISPAUSED (1 << 4) /* Pause requested or not active */ +#define BCM2708_DMA_ISHELD (1 << 5) /* Is held by DREQ flow control */ +#define BCM2708_DMA_ERR (1 << 8) +#define BCM2708_DMA_ABORT (1 << 30) /* stop current CB, go to next, WO */ +#define BCM2708_DMA_RESET (1 << 31) /* WO, self clearing */ + +/* DMA control block "info" field bits */ +#define BCM2708_DMA_INT_EN (1 << 0) +#define BCM2708_DMA_TDMODE (1 << 1) +#define BCM2708_DMA_WAIT_RESP (1 << 3) +#define BCM2708_DMA_D_INC (1 << 4) +#define BCM2708_DMA_D_WIDTH (1 << 5) +#define BCM2708_DMA_D_DREQ (1 << 6) +#define BCM2708_DMA_D_IGNORE (1 << 7) +#define BCM2708_DMA_S_INC (1 << 8) +#define BCM2708_DMA_S_WIDTH (1 << 9) +#define BCM2708_DMA_S_DREQ (1 << 10) +#define BCM2708_DMA_S_IGNORE (1 << 11) + +/* Register offsets */ +#define BCM2708_DMA_CS 0x00 /* Control and Status */ +#define BCM2708_DMA_ADDR 0x04 /* Control block address */ +/* the current control block appears in the following registers - read only */ +#define BCM2708_DMA_INFO 0x08 +#define BCM2708_DMA_SOURCE_AD 0x0c +#define BCM2708_DMA_DEST_AD 0x10 +#define BCM2708_DMA_TXFR_LEN 0x14 +#define BCM2708_DMA_STRIDE 0x18 +#define BCM2708_DMA_NEXTCB 0x1C +#define BCM2708_DMA_DEBUG 0x20 + +#define BCM2708_DMA_INT_STATUS 0xfe0 /* Interrupt status of each channel */ +#define BCM2708_DMA_ENABLE 0xff0 /* Global enable bits for each channel */ + +#define BCM2708_DMA_CS_RW_MASK 0x30ff0001 /* All RW bits in DMA_CS */ + +static void bcm2835_dma_update(BCM2835DMAState *s, unsigned c) +{ + BCM2835DMAChan *ch = &s->chan[c]; + uint32_t data, xlen, xlen_td, ylen; + int16_t dst_stride, src_stride; + + if (!(s->enable & (1 << c))) { + return; + } + + while ((s->enable & (1 << c)) && (ch->conblk_ad != 0)) { + /* CB fetch */ + ch->ti = ldl_le_phys(&s->dma_as, ch->conblk_ad); + ch->source_ad = ldl_le_phys(&s->dma_as, ch->conblk_ad + 4); + ch->dest_ad = ldl_le_phys(&s->dma_as, ch->conblk_ad + 8); + ch->txfr_len = ldl_le_phys(&s->dma_as, ch->conblk_ad + 12); + ch->stride = ldl_le_phys(&s->dma_as, ch->conblk_ad + 16); + ch->nextconbk = ldl_le_phys(&s->dma_as, ch->conblk_ad + 20); + + ylen = 1; + if (ch->ti & BCM2708_DMA_TDMODE) { + /* 2D transfer mode */ + ylen += (ch->txfr_len >> 16) & 0x3fff; + xlen = ch->txfr_len & 0xffff; + dst_stride = ch->stride >> 16; + src_stride = ch->stride & 0xffff; + } else { + xlen = ch->txfr_len; + dst_stride = 0; + src_stride = 0; + } + xlen_td = xlen; + + while (ylen != 0) { + /* Normal transfer mode */ + while (xlen != 0) { + if (ch->ti & BCM2708_DMA_S_IGNORE) { + /* Ignore reads */ + data = 0; + } else { + data = ldl_le_phys(&s->dma_as, ch->source_ad); + } + if (ch->ti & BCM2708_DMA_S_INC) { + ch->source_ad += 4; + } + + if (ch->ti & BCM2708_DMA_D_IGNORE) { + /* Ignore writes */ + } else { + stl_le_phys(&s->dma_as, ch->dest_ad, data); + } + if (ch->ti & BCM2708_DMA_D_INC) { + ch->dest_ad += 4; + } + + /* update remaining transfer length */ + xlen -= 4; + if (ch->ti & BCM2708_DMA_TDMODE) { + ch->txfr_len = (ylen << 16) | xlen; + } else { + ch->txfr_len = xlen; + } + } + + if (--ylen != 0) { + ch->source_ad += src_stride; + ch->dest_ad += dst_stride; + xlen = xlen_td; + } + } + ch->cs |= BCM2708_DMA_END; + if (ch->ti & BCM2708_DMA_INT_EN) { + ch->cs |= BCM2708_DMA_INT; + s->int_status |= (1 << c); + qemu_set_irq(ch->irq, 1); + } + + /* Process next CB */ + ch->conblk_ad = ch->nextconbk; + } + + ch->cs &= ~BCM2708_DMA_ACTIVE; + ch->cs |= BCM2708_DMA_ISPAUSED; +} + +static void bcm2835_dma_chan_reset(BCM2835DMAChan *ch) +{ + ch->cs = 0; + ch->conblk_ad = 0; +} + +static uint64_t bcm2835_dma_read(BCM2835DMAState *s, hwaddr offset, + unsigned size, unsigned c) +{ + BCM2835DMAChan *ch; + uint32_t res = 0; + + assert(size == 4); + assert(c < BCM2835_DMA_NCHANS); + + ch = &s->chan[c]; + + switch (offset) { + case BCM2708_DMA_CS: + res = ch->cs; + break; + case BCM2708_DMA_ADDR: + res = ch->conblk_ad; + break; + case BCM2708_DMA_INFO: + res = ch->ti; + break; + case BCM2708_DMA_SOURCE_AD: + res = ch->source_ad; + break; + case BCM2708_DMA_DEST_AD: + res = ch->dest_ad; + break; + case BCM2708_DMA_TXFR_LEN: + res = ch->txfr_len; + break; + case BCM2708_DMA_STRIDE: + res = ch->stride; + break; + case BCM2708_DMA_NEXTCB: + res = ch->nextconbk; + break; + case BCM2708_DMA_DEBUG: + res = ch->debug; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, offset); + break; + } + return res; +} + +static void bcm2835_dma_write(BCM2835DMAState *s, hwaddr offset, + uint64_t value, unsigned size, unsigned c) +{ + BCM2835DMAChan *ch; + uint32_t oldcs; + + assert(size == 4); + assert(c < BCM2835_DMA_NCHANS); + + ch = &s->chan[c]; + + switch (offset) { + case BCM2708_DMA_CS: + oldcs = ch->cs; + if (value & BCM2708_DMA_RESET) { + bcm2835_dma_chan_reset(ch); + } + if (value & BCM2708_DMA_ABORT) { + /* abort is a no-op, since we always run to completion */ + } + if (value & BCM2708_DMA_END) { + ch->cs &= ~BCM2708_DMA_END; + } + if (value & BCM2708_DMA_INT) { + ch->cs &= ~BCM2708_DMA_INT; + s->int_status &= ~(1 << c); + qemu_set_irq(ch->irq, 0); + } + ch->cs &= ~BCM2708_DMA_CS_RW_MASK; + ch->cs |= (value & BCM2708_DMA_CS_RW_MASK); + if (!(oldcs & BCM2708_DMA_ACTIVE) && (ch->cs & BCM2708_DMA_ACTIVE)) { + bcm2835_dma_update(s, c); + } + break; + case BCM2708_DMA_ADDR: + ch->conblk_ad = value; + break; + case BCM2708_DMA_DEBUG: + ch->debug = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, offset); + break; + } +} + +static uint64_t bcm2835_dma0_read(void *opaque, hwaddr offset, unsigned size) +{ + BCM2835DMAState *s = opaque; + + if (offset < 0xf00) { + return bcm2835_dma_read(s, (offset & 0xff), size, (offset >> 8) & 0xf); + } else { + switch (offset) { + case BCM2708_DMA_INT_STATUS: + return s->int_status; + case BCM2708_DMA_ENABLE: + return s->enable; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, offset); + return 0; + } + } +} + +static uint64_t bcm2835_dma15_read(void *opaque, hwaddr offset, unsigned size) +{ + return bcm2835_dma_read(opaque, (offset & 0xff), size, 15); +} + +static void bcm2835_dma0_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + BCM2835DMAState *s = opaque; + + if (offset < 0xf00) { + bcm2835_dma_write(s, (offset & 0xff), value, size, (offset >> 8) & 0xf); + } else { + switch (offset) { + case BCM2708_DMA_INT_STATUS: + break; + case BCM2708_DMA_ENABLE: + s->enable = (value & 0xffff); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, offset); + } + } + +} + +static void bcm2835_dma15_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + bcm2835_dma_write(opaque, (offset & 0xff), value, size, 15); +} + +static const MemoryRegionOps bcm2835_dma0_ops = { + .read = bcm2835_dma0_read, + .write = bcm2835_dma0_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static const MemoryRegionOps bcm2835_dma15_ops = { + .read = bcm2835_dma15_read, + .write = bcm2835_dma15_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static const VMStateDescription vmstate_bcm2835_dma_chan = { + .name = TYPE_BCM2835_DMA "-chan", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cs, BCM2835DMAChan), + VMSTATE_UINT32(conblk_ad, BCM2835DMAChan), + VMSTATE_UINT32(ti, BCM2835DMAChan), + VMSTATE_UINT32(source_ad, BCM2835DMAChan), + VMSTATE_UINT32(dest_ad, BCM2835DMAChan), + VMSTATE_UINT32(txfr_len, BCM2835DMAChan), + VMSTATE_UINT32(stride, BCM2835DMAChan), + VMSTATE_UINT32(nextconbk, BCM2835DMAChan), + VMSTATE_UINT32(debug, BCM2835DMAChan), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_bcm2835_dma = { + .name = TYPE_BCM2835_DMA, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(chan, BCM2835DMAState, BCM2835_DMA_NCHANS, 1, + vmstate_bcm2835_dma_chan, BCM2835DMAChan), + VMSTATE_UINT32(int_status, BCM2835DMAState), + VMSTATE_UINT32(enable, BCM2835DMAState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_dma_init(Object *obj) +{ + BCM2835DMAState *s = BCM2835_DMA(obj); + int n; + + /* DMA channels 0-14 occupy a contiguous block of IO memory, along + * with the global enable and interrupt status bits. Channel 15 + * has the same register map, but is mapped at a discontiguous + * address in a separate IO block. + */ + memory_region_init_io(&s->iomem0, OBJECT(s), &bcm2835_dma0_ops, s, + TYPE_BCM2835_DMA, 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem0); + + memory_region_init_io(&s->iomem15, OBJECT(s), &bcm2835_dma15_ops, s, + TYPE_BCM2835_DMA "-chan15", 0x100); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem15); + + for (n = 0; n < 16; n++) { + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->chan[n].irq); + } +} + +static void bcm2835_dma_reset(DeviceState *dev) +{ + BCM2835DMAState *s = BCM2835_DMA(dev); + int n; + + s->enable = 0xffff; + s->int_status = 0; + for (n = 0; n < BCM2835_DMA_NCHANS; n++) { + bcm2835_dma_chan_reset(&s->chan[n]); + } +} + +static void bcm2835_dma_realize(DeviceState *dev, Error **errp) +{ + BCM2835DMAState *s = BCM2835_DMA(dev); + Object *obj; + + obj = object_property_get_link(OBJECT(dev), "dma-mr", &error_abort); + s->dma_mr = MEMORY_REGION(obj); + address_space_init(&s->dma_as, s->dma_mr, TYPE_BCM2835_DMA "-memory"); + + bcm2835_dma_reset(dev); +} + +static void bcm2835_dma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = bcm2835_dma_realize; + dc->reset = bcm2835_dma_reset; + dc->vmsd = &vmstate_bcm2835_dma; +} + +static TypeInfo bcm2835_dma_info = { + .name = TYPE_BCM2835_DMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835DMAState), + .class_init = bcm2835_dma_class_init, + .instance_init = bcm2835_dma_init, +}; + +static void bcm2835_dma_register_types(void) +{ + type_register_static(&bcm2835_dma_info); +} + +type_init(bcm2835_dma_register_types) diff --git a/hw/dma/etraxfs_dma.c b/hw/dma/etraxfs_dma.c new file mode 100644 index 000000000..c4334e87b --- /dev/null +++ b/hw/dma/etraxfs_dma.c @@ -0,0 +1,780 @@ +/* + * QEMU ETRAX DMA Controller. + * + * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "qemu/main-loop.h" +#include "sysemu/runstate.h" +#include "exec/address-spaces.h" + +#include "hw/cris/etraxfs_dma.h" + +#define D(x) + +#define RW_DATA (0x0 / 4) +#define RW_SAVED_DATA (0x58 / 4) +#define RW_SAVED_DATA_BUF (0x5c / 4) +#define RW_GROUP (0x60 / 4) +#define RW_GROUP_DOWN (0x7c / 4) +#define RW_CMD (0x80 / 4) +#define RW_CFG (0x84 / 4) +#define RW_STAT (0x88 / 4) +#define RW_INTR_MASK (0x8c / 4) +#define RW_ACK_INTR (0x90 / 4) +#define R_INTR (0x94 / 4) +#define R_MASKED_INTR (0x98 / 4) +#define RW_STREAM_CMD (0x9c / 4) + +#define DMA_REG_MAX (0x100 / 4) + +/* descriptors */ + +// ------------------------------------------------------------ dma_descr_group +typedef struct dma_descr_group { + uint32_t next; + unsigned eol : 1; + unsigned tol : 1; + unsigned bol : 1; + unsigned : 1; + unsigned intr : 1; + unsigned : 2; + unsigned en : 1; + unsigned : 7; + unsigned dis : 1; + unsigned md : 16; + struct dma_descr_group *up; + union { + struct dma_descr_context *context; + struct dma_descr_group *group; + } down; +} dma_descr_group; + +// ---------------------------------------------------------- dma_descr_context +typedef struct dma_descr_context { + uint32_t next; + unsigned eol : 1; + unsigned : 3; + unsigned intr : 1; + unsigned : 1; + unsigned store_mode : 1; + unsigned en : 1; + unsigned : 7; + unsigned dis : 1; + unsigned md0 : 16; + unsigned md1; + unsigned md2; + unsigned md3; + unsigned md4; + uint32_t saved_data; + uint32_t saved_data_buf; +} dma_descr_context; + +// ------------------------------------------------------------- dma_descr_data +typedef struct dma_descr_data { + uint32_t next; + uint32_t buf; + unsigned eol : 1; + unsigned : 2; + unsigned out_eop : 1; + unsigned intr : 1; + unsigned wait : 1; + unsigned : 2; + unsigned : 3; + unsigned in_eop : 1; + unsigned : 4; + unsigned md : 16; + uint32_t after; +} dma_descr_data; + +/* Constants */ +enum { + regk_dma_ack_pkt = 0x00000100, + regk_dma_anytime = 0x00000001, + regk_dma_array = 0x00000008, + regk_dma_burst = 0x00000020, + regk_dma_client = 0x00000002, + regk_dma_copy_next = 0x00000010, + regk_dma_copy_up = 0x00000020, + regk_dma_data_at_eol = 0x00000001, + regk_dma_dis_c = 0x00000010, + regk_dma_dis_g = 0x00000020, + regk_dma_idle = 0x00000001, + regk_dma_intern = 0x00000004, + regk_dma_load_c = 0x00000200, + regk_dma_load_c_n = 0x00000280, + regk_dma_load_c_next = 0x00000240, + regk_dma_load_d = 0x00000140, + regk_dma_load_g = 0x00000300, + regk_dma_load_g_down = 0x000003c0, + regk_dma_load_g_next = 0x00000340, + regk_dma_load_g_up = 0x00000380, + regk_dma_next_en = 0x00000010, + regk_dma_next_pkt = 0x00000010, + regk_dma_no = 0x00000000, + regk_dma_only_at_wait = 0x00000000, + regk_dma_restore = 0x00000020, + regk_dma_rst = 0x00000001, + regk_dma_running = 0x00000004, + regk_dma_rw_cfg_default = 0x00000000, + regk_dma_rw_cmd_default = 0x00000000, + regk_dma_rw_intr_mask_default = 0x00000000, + regk_dma_rw_stat_default = 0x00000101, + regk_dma_rw_stream_cmd_default = 0x00000000, + regk_dma_save_down = 0x00000020, + regk_dma_save_up = 0x00000020, + regk_dma_set_reg = 0x00000050, + regk_dma_set_w_size1 = 0x00000190, + regk_dma_set_w_size2 = 0x000001a0, + regk_dma_set_w_size4 = 0x000001c0, + regk_dma_stopped = 0x00000002, + regk_dma_store_c = 0x00000002, + regk_dma_store_descr = 0x00000000, + regk_dma_store_g = 0x00000004, + regk_dma_store_md = 0x00000001, + regk_dma_sw = 0x00000008, + regk_dma_update_down = 0x00000020, + regk_dma_yes = 0x00000001 +}; + +enum dma_ch_state +{ + RST = 1, + STOPPED = 2, + RUNNING = 4 +}; + +struct fs_dma_channel +{ + qemu_irq irq; + struct etraxfs_dma_client *client; + + /* Internal status. */ + int stream_cmd_src; + enum dma_ch_state state; + + unsigned int input : 1; + unsigned int eol : 1; + + struct dma_descr_group current_g; + struct dma_descr_context current_c; + struct dma_descr_data current_d; + + /* Control registers. */ + uint32_t regs[DMA_REG_MAX]; +}; + +struct fs_dma_ctrl +{ + MemoryRegion mmio; + int nr_channels; + struct fs_dma_channel *channels; + + QEMUBH *bh; +}; + +static void DMA_run(void *opaque); +static int channel_out_run(struct fs_dma_ctrl *ctrl, int c); + +static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg) +{ + return ctrl->channels[c].regs[reg]; +} + +static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c) +{ + return channel_reg(ctrl, c, RW_CFG) & 2; +} + +static inline int channel_en(struct fs_dma_ctrl *ctrl, int c) +{ + return (channel_reg(ctrl, c, RW_CFG) & 1) + && ctrl->channels[c].client; +} + +static inline int fs_channel(hwaddr addr) +{ + /* Every channel has a 0x2000 ctrl register map. */ + return addr >> 13; +} + +#ifdef USE_THIS_DEAD_CODE +static void channel_load_g(struct fs_dma_ctrl *ctrl, int c) +{ + hwaddr addr = channel_reg(ctrl, c, RW_GROUP); + + /* Load and decode. FIXME: handle endianness. */ + cpu_physical_memory_read(addr, &ctrl->channels[c].current_g, + sizeof(ctrl->channels[c].current_g)); +} + +static void dump_c(int ch, struct dma_descr_context *c) +{ + printf("%s ch=%d\n", __func__, ch); + printf("next=%x\n", c->next); + printf("saved_data=%x\n", c->saved_data); + printf("saved_data_buf=%x\n", c->saved_data_buf); + printf("eol=%x\n", (uint32_t) c->eol); +} + +static void dump_d(int ch, struct dma_descr_data *d) +{ + printf("%s ch=%d\n", __func__, ch); + printf("next=%x\n", d->next); + printf("buf=%x\n", d->buf); + printf("after=%x\n", d->after); + printf("intr=%x\n", (uint32_t) d->intr); + printf("out_eop=%x\n", (uint32_t) d->out_eop); + printf("in_eop=%x\n", (uint32_t) d->in_eop); + printf("eol=%x\n", (uint32_t) d->eol); +} +#endif + +static void channel_load_c(struct fs_dma_ctrl *ctrl, int c) +{ + hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN); + + /* Load and decode. FIXME: handle endianness. */ + cpu_physical_memory_read(addr, &ctrl->channels[c].current_c, + sizeof(ctrl->channels[c].current_c)); + + D(dump_c(c, &ctrl->channels[c].current_c)); + /* I guess this should update the current pos. */ + ctrl->channels[c].regs[RW_SAVED_DATA] = + (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data; + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = + (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf; +} + +static void channel_load_d(struct fs_dma_ctrl *ctrl, int c) +{ + hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA); + + /* Load and decode. FIXME: handle endianness. */ + D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr)); + cpu_physical_memory_read(addr, &ctrl->channels[c].current_d, + sizeof(ctrl->channels[c].current_d)); + + D(dump_d(c, &ctrl->channels[c].current_d)); + ctrl->channels[c].regs[RW_DATA] = addr; +} + +static void channel_store_c(struct fs_dma_ctrl *ctrl, int c) +{ + hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN); + + /* Encode and store. FIXME: handle endianness. */ + D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr)); + D(dump_d(c, &ctrl->channels[c].current_d)); + cpu_physical_memory_write(addr, &ctrl->channels[c].current_c, + sizeof(ctrl->channels[c].current_c)); +} + +static void channel_store_d(struct fs_dma_ctrl *ctrl, int c) +{ + hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA); + + /* Encode and store. FIXME: handle endianness. */ + D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr)); + cpu_physical_memory_write(addr, &ctrl->channels[c].current_d, + sizeof(ctrl->channels[c].current_d)); +} + +static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c) +{ + /* FIXME: */ +} + +static inline void channel_start(struct fs_dma_ctrl *ctrl, int c) +{ + if (ctrl->channels[c].client) + { + ctrl->channels[c].eol = 0; + ctrl->channels[c].state = RUNNING; + if (!ctrl->channels[c].input) + channel_out_run(ctrl, c); + } else + printf("WARNING: starting DMA ch %d with no client\n", c); + + qemu_bh_schedule_idle(ctrl->bh); +} + +static void channel_continue(struct fs_dma_ctrl *ctrl, int c) +{ + if (!channel_en(ctrl, c) + || channel_stopped(ctrl, c) + || ctrl->channels[c].state != RUNNING + /* Only reload the current data descriptor if it has eol set. */ + || !ctrl->channels[c].current_d.eol) { + D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n", + c, ctrl->channels[c].state, + channel_stopped(ctrl, c), + channel_en(ctrl,c), + ctrl->channels[c].eol)); + D(dump_d(c, &ctrl->channels[c].current_d)); + return; + } + + /* Reload the current descriptor. */ + channel_load_d(ctrl, c); + + /* If the current descriptor cleared the eol flag and we had already + reached eol state, do the continue. */ + if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) { + D(printf("continue %d ok %x\n", c, + ctrl->channels[c].current_d.next)); + ctrl->channels[c].regs[RW_SAVED_DATA] = + (uint32_t)(unsigned long)ctrl->channels[c].current_d.next; + channel_load_d(ctrl, c); + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = + (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; + + channel_start(ctrl, c); + } + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = + (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; +} + +static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v) +{ + unsigned int cmd = v & ((1 << 10) - 1); + + D(printf("%s ch=%d cmd=%x\n", + __func__, c, cmd)); + if (cmd & regk_dma_load_d) { + channel_load_d(ctrl, c); + if (cmd & regk_dma_burst) + channel_start(ctrl, c); + } + + if (cmd & regk_dma_load_c) { + channel_load_c(ctrl, c); + } +} + +static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c) +{ + D(printf("%s %d\n", __func__, c)); + ctrl->channels[c].regs[R_INTR] &= + ~(ctrl->channels[c].regs[RW_ACK_INTR]); + + ctrl->channels[c].regs[R_MASKED_INTR] = + ctrl->channels[c].regs[R_INTR] + & ctrl->channels[c].regs[RW_INTR_MASK]; + + D(printf("%s: chan=%d masked_intr=%x\n", __func__, + c, + ctrl->channels[c].regs[R_MASKED_INTR])); + + qemu_set_irq(ctrl->channels[c].irq, + !!ctrl->channels[c].regs[R_MASKED_INTR]); +} + +static int channel_out_run(struct fs_dma_ctrl *ctrl, int c) +{ + uint32_t len; + uint32_t saved_data_buf; + unsigned char buf[2 * 1024]; + + struct dma_context_metadata meta; + bool send_context = true; + + if (ctrl->channels[c].eol) + return 0; + + do { + bool out_eop; + D(printf("ch=%d buf=%x after=%x\n", + c, + (uint32_t)ctrl->channels[c].current_d.buf, + (uint32_t)ctrl->channels[c].current_d.after)); + + if (send_context) { + if (ctrl->channels[c].client->client.metadata_push) { + meta.metadata = ctrl->channels[c].current_d.md; + ctrl->channels[c].client->client.metadata_push( + ctrl->channels[c].client->client.opaque, + &meta); + } + send_context = false; + } + + channel_load_d(ctrl, c); + saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); + len = (uint32_t)(unsigned long) + ctrl->channels[c].current_d.after; + len -= saved_data_buf; + + if (len > sizeof buf) + len = sizeof buf; + cpu_physical_memory_read (saved_data_buf, buf, len); + + out_eop = ((saved_data_buf + len) == + ctrl->channels[c].current_d.after) && + ctrl->channels[c].current_d.out_eop; + + D(printf("channel %d pushes %x %u bytes eop=%u\n", c, + saved_data_buf, len, out_eop)); + + if (ctrl->channels[c].client->client.push) { + if (len > 0) { + ctrl->channels[c].client->client.push( + ctrl->channels[c].client->client.opaque, + buf, len, out_eop); + } + } else { + printf("WARNING: DMA ch%d dataloss," + " no attached client.\n", c); + } + + saved_data_buf += len; + + if (saved_data_buf == (uint32_t)(unsigned long) + ctrl->channels[c].current_d.after) { + /* Done. Step to next. */ + if (ctrl->channels[c].current_d.out_eop) { + send_context = true; + } + if (ctrl->channels[c].current_d.intr) { + /* data intr. */ + D(printf("signal intr %d eol=%d\n", + len, ctrl->channels[c].current_d.eol)); + ctrl->channels[c].regs[R_INTR] |= (1 << 2); + channel_update_irq(ctrl, c); + } + channel_store_d(ctrl, c); + if (ctrl->channels[c].current_d.eol) { + D(printf("channel %d EOL\n", c)); + ctrl->channels[c].eol = 1; + + /* Mark the context as disabled. */ + ctrl->channels[c].current_c.dis = 1; + channel_store_c(ctrl, c); + + channel_stop(ctrl, c); + } else { + ctrl->channels[c].regs[RW_SAVED_DATA] = + (uint32_t)(unsigned long)ctrl-> + channels[c].current_d.next; + /* Load new descriptor. */ + channel_load_d(ctrl, c); + saved_data_buf = (uint32_t)(unsigned long) + ctrl->channels[c].current_d.buf; + } + + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = + saved_data_buf; + D(dump_d(c, &ctrl->channels[c].current_d)); + } + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; + } while (!ctrl->channels[c].eol); + return 1; +} + +static int channel_in_process(struct fs_dma_ctrl *ctrl, int c, + unsigned char *buf, int buflen, int eop) +{ + uint32_t len; + uint32_t saved_data_buf; + + if (ctrl->channels[c].eol == 1) + return 0; + + channel_load_d(ctrl, c); + saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); + len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after; + len -= saved_data_buf; + + if (len > buflen) + len = buflen; + + cpu_physical_memory_write (saved_data_buf, buf, len); + saved_data_buf += len; + + if (saved_data_buf == + (uint32_t)(unsigned long)ctrl->channels[c].current_d.after + || eop) { + uint32_t r_intr = ctrl->channels[c].regs[R_INTR]; + + D(printf("in dscr end len=%d\n", + ctrl->channels[c].current_d.after + - ctrl->channels[c].current_d.buf)); + ctrl->channels[c].current_d.after = saved_data_buf; + + /* Done. Step to next. */ + if (ctrl->channels[c].current_d.intr) { + /* TODO: signal eop to the client. */ + /* data intr. */ + ctrl->channels[c].regs[R_INTR] |= 3; + } + if (eop) { + ctrl->channels[c].current_d.in_eop = 1; + ctrl->channels[c].regs[R_INTR] |= 8; + } + if (r_intr != ctrl->channels[c].regs[R_INTR]) + channel_update_irq(ctrl, c); + + channel_store_d(ctrl, c); + D(dump_d(c, &ctrl->channels[c].current_d)); + + if (ctrl->channels[c].current_d.eol) { + D(printf("channel %d EOL\n", c)); + ctrl->channels[c].eol = 1; + + /* Mark the context as disabled. */ + ctrl->channels[c].current_c.dis = 1; + channel_store_c(ctrl, c); + + channel_stop(ctrl, c); + } else { + ctrl->channels[c].regs[RW_SAVED_DATA] = + (uint32_t)(unsigned long)ctrl-> + channels[c].current_d.next; + /* Load new descriptor. */ + channel_load_d(ctrl, c); + saved_data_buf = (uint32_t)(unsigned long) + ctrl->channels[c].current_d.buf; + } + } + + ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; + return len; +} + +static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c) +{ + if (ctrl->channels[c].client->client.pull) { + ctrl->channels[c].client->client.pull( + ctrl->channels[c].client->client.opaque); + return 1; + } else + return 0; +} + +static uint32_t dma_rinvalid (void *opaque, hwaddr addr) +{ + hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n", addr); + return 0; +} + +static uint64_t +dma_read(void *opaque, hwaddr addr, unsigned int size) +{ + struct fs_dma_ctrl *ctrl = opaque; + int c; + uint32_t r = 0; + + if (size != 4) { + dma_rinvalid(opaque, addr); + } + + /* Make addr relative to this channel and bounded to nr regs. */ + c = fs_channel(addr); + addr &= 0xff; + addr >>= 2; + switch (addr) + { + case RW_STAT: + r = ctrl->channels[c].state & 7; + r |= ctrl->channels[c].eol << 5; + r |= ctrl->channels[c].stream_cmd_src << 8; + break; + + default: + r = ctrl->channels[c].regs[addr]; + D(printf ("%s c=%d addr=" TARGET_FMT_plx "\n", + __func__, c, addr)); + break; + } + return r; +} + +static void +dma_winvalid (void *opaque, hwaddr addr, uint32_t value) +{ + hw_error("Unsupported short waccess. reg=" TARGET_FMT_plx "\n", addr); +} + +static void +dma_update_state(struct fs_dma_ctrl *ctrl, int c) +{ + if (ctrl->channels[c].regs[RW_CFG] & 2) + ctrl->channels[c].state = STOPPED; + if (!(ctrl->channels[c].regs[RW_CFG] & 1)) + ctrl->channels[c].state = RST; +} + +static void +dma_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + struct fs_dma_ctrl *ctrl = opaque; + uint32_t value = val64; + int c; + + if (size != 4) { + dma_winvalid(opaque, addr, value); + } + + /* Make addr relative to this channel and bounded to nr regs. */ + c = fs_channel(addr); + addr &= 0xff; + addr >>= 2; + switch (addr) + { + case RW_DATA: + ctrl->channels[c].regs[addr] = value; + break; + + case RW_CFG: + ctrl->channels[c].regs[addr] = value; + dma_update_state(ctrl, c); + break; + case RW_CMD: + /* continue. */ + if (value & ~1) + printf("Invalid store to ch=%d RW_CMD %x\n", + c, value); + ctrl->channels[c].regs[addr] = value; + channel_continue(ctrl, c); + break; + + case RW_SAVED_DATA: + case RW_SAVED_DATA_BUF: + case RW_GROUP: + case RW_GROUP_DOWN: + ctrl->channels[c].regs[addr] = value; + break; + + case RW_ACK_INTR: + case RW_INTR_MASK: + ctrl->channels[c].regs[addr] = value; + channel_update_irq(ctrl, c); + if (addr == RW_ACK_INTR) + ctrl->channels[c].regs[RW_ACK_INTR] = 0; + break; + + case RW_STREAM_CMD: + if (value & ~1023) + printf("Invalid store to ch=%d " + "RW_STREAMCMD %x\n", + c, value); + ctrl->channels[c].regs[addr] = value; + D(printf("stream_cmd ch=%d\n", c)); + channel_stream_cmd(ctrl, c, value); + break; + + default: + D(printf ("%s c=%d " TARGET_FMT_plx "\n", + __func__, c, addr)); + break; + } +} + +static const MemoryRegionOps dma_ops = { + .read = dma_read, + .write = dma_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4 + } +}; + +static int etraxfs_dmac_run(void *opaque) +{ + struct fs_dma_ctrl *ctrl = opaque; + int i; + int p = 0; + + for (i = 0; + i < ctrl->nr_channels; + i++) + { + if (ctrl->channels[i].state == RUNNING) + { + if (ctrl->channels[i].input) { + p += channel_in_run(ctrl, i); + } else { + p += channel_out_run(ctrl, i); + } + } + } + return p; +} + +int etraxfs_dmac_input(struct etraxfs_dma_client *client, + void *buf, int len, int eop) +{ + return channel_in_process(client->ctrl, client->channel, + buf, len, eop); +} + +/* Connect an IRQ line with a channel. */ +void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input) +{ + struct fs_dma_ctrl *ctrl = opaque; + ctrl->channels[c].irq = *line; + ctrl->channels[c].input = input; +} + +void etraxfs_dmac_connect_client(void *opaque, int c, + struct etraxfs_dma_client *cl) +{ + struct fs_dma_ctrl *ctrl = opaque; + cl->ctrl = ctrl; + cl->channel = c; + ctrl->channels[c].client = cl; +} + + +static void DMA_run(void *opaque) +{ + struct fs_dma_ctrl *etraxfs_dmac = opaque; + int p = 1; + + if (runstate_is_running()) + p = etraxfs_dmac_run(etraxfs_dmac); + + if (p) + qemu_bh_schedule_idle(etraxfs_dmac->bh); +} + +void *etraxfs_dmac_init(hwaddr base, int nr_channels) +{ + struct fs_dma_ctrl *ctrl = NULL; + + ctrl = g_malloc0(sizeof *ctrl); + + ctrl->bh = qemu_bh_new(DMA_run, ctrl); + + ctrl->nr_channels = nr_channels; + ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels); + + memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma", + nr_channels * 0x2000); + memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio); + + return ctrl; +} diff --git a/hw/dma/i82374.c b/hw/dma/i82374.c new file mode 100644 index 000000000..34c3aaf7d --- /dev/null +++ b/hw/dma/i82374.c @@ -0,0 +1,168 @@ +/* + * QEMU Intel 82374 emulation (Enhanced DMA controller) + * + * Copyright (c) 2010 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/dma/i8257.h" +#include "qom/object.h" + +#define TYPE_I82374 "i82374" +OBJECT_DECLARE_SIMPLE_TYPE(I82374State, I82374) + +//#define DEBUG_I82374 + +#ifdef DEBUG_I82374 +#define DPRINTF(fmt, ...) \ +do { fprintf(stderr, "i82374: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ +do {} while (0) +#endif +#define BADF(fmt, ...) \ +do { fprintf(stderr, "i82374 ERROR: " fmt , ## __VA_ARGS__); } while (0) + +struct I82374State { + ISADevice parent_obj; + + uint32_t iobase; + uint8_t commands[8]; + PortioList port_list; +}; + +static const VMStateDescription vmstate_i82374 = { + .name = "i82374", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(commands, I82374State, 8), + VMSTATE_END_OF_LIST() + }, +}; + +static uint32_t i82374_read_isr(void *opaque, uint32_t nport) +{ + uint32_t val = 0; + + BADF("%s: %08x\n", __func__, nport); + + DPRINTF("%s: %08x=%08x\n", __func__, nport, val); + return val; +} + +static void i82374_write_command(void *opaque, uint32_t nport, uint32_t data) +{ + DPRINTF("%s: %08x=%08x\n", __func__, nport, data); + + if (data != 0x42) { + /* Not Stop S/G command */ + BADF("%s: %08x=%08x\n", __func__, nport, data); + } +} + +static uint32_t i82374_read_status(void *opaque, uint32_t nport) +{ + uint32_t val = 0; + + BADF("%s: %08x\n", __func__, nport); + + DPRINTF("%s: %08x=%08x\n", __func__, nport, val); + return val; +} + +static void i82374_write_descriptor(void *opaque, uint32_t nport, uint32_t data) +{ + DPRINTF("%s: %08x=%08x\n", __func__, nport, data); + + BADF("%s: %08x=%08x\n", __func__, nport, data); +} + +static uint32_t i82374_read_descriptor(void *opaque, uint32_t nport) +{ + uint32_t val = 0; + + BADF("%s: %08x\n", __func__, nport); + + DPRINTF("%s: %08x=%08x\n", __func__, nport, val); + return val; +} + +static const MemoryRegionPortio i82374_portio_list[] = { + { 0x0A, 1, 1, .read = i82374_read_isr, }, + { 0x10, 8, 1, .write = i82374_write_command, }, + { 0x18, 8, 1, .read = i82374_read_status, }, + { 0x20, 0x20, 1, + .write = i82374_write_descriptor, .read = i82374_read_descriptor, }, + PORTIO_END_OF_LIST(), +}; + +static void i82374_realize(DeviceState *dev, Error **errp) +{ + I82374State *s = I82374(dev); + ISABus *isa_bus = isa_bus_from_device(ISA_DEVICE(dev)); + + if (isa_get_dma(isa_bus, 0)) { + error_setg(errp, "DMA already initialized on ISA bus"); + return; + } + i8257_dma_init(isa_bus, true); + + portio_list_init(&s->port_list, OBJECT(s), i82374_portio_list, s, + "i82374"); + portio_list_add(&s->port_list, isa_address_space_io(&s->parent_obj), + s->iobase); + + memset(s->commands, 0, sizeof(s->commands)); +} + +static Property i82374_properties[] = { + DEFINE_PROP_UINT32("iobase", I82374State, iobase, 0x400), + DEFINE_PROP_END_OF_LIST() +}; + +static void i82374_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = i82374_realize; + dc->vmsd = &vmstate_i82374; + device_class_set_props(dc, i82374_properties); +} + +static const TypeInfo i82374_info = { + .name = TYPE_I82374, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(I82374State), + .class_init = i82374_class_init, +}; + +static void i82374_register_types(void) +{ + type_register_static(&i82374_info); +} + +type_init(i82374_register_types) diff --git a/hw/dma/i8257.c b/hw/dma/i8257.c new file mode 100644 index 000000000..de5f69691 --- /dev/null +++ b/hw/dma/i8257.c @@ -0,0 +1,657 @@ +/* + * QEMU DMA emulation + * + * Copyright (c) 2003-2004 Vassili Karpov (malc) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/dma/i8257.h" +#include "qapi/error.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "trace.h" + + +/* #define DEBUG_DMA */ + +#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__) +#ifdef DEBUG_DMA +#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__) +#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__) +#else +#define linfo(...) +#define ldebug(...) +#endif + +#define ADDR 0 +#define COUNT 1 + +enum { + CMD_MEMORY_TO_MEMORY = 0x01, + CMD_FIXED_ADDRESS = 0x02, + CMD_BLOCK_CONTROLLER = 0x04, + CMD_COMPRESSED_TIME = 0x08, + CMD_CYCLIC_PRIORITY = 0x10, + CMD_EXTENDED_WRITE = 0x20, + CMD_LOW_DREQ = 0x40, + CMD_LOW_DACK = 0x80, + CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS + | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE + | CMD_LOW_DREQ | CMD_LOW_DACK + +}; + +static void i8257_dma_run(void *opaque); + +static const int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0}; + +static void i8257_write_page(void *opaque, uint32_t nport, uint32_t data) +{ + I8257State *d = opaque; + int ichan; + + ichan = channels[nport & 7]; + if (-1 == ichan) { + dolog ("invalid channel %#x %#x\n", nport, data); + return; + } + d->regs[ichan].page = data; +} + +static void i8257_write_pageh(void *opaque, uint32_t nport, uint32_t data) +{ + I8257State *d = opaque; + int ichan; + + ichan = channels[nport & 7]; + if (-1 == ichan) { + dolog ("invalid channel %#x %#x\n", nport, data); + return; + } + d->regs[ichan].pageh = data; +} + +static uint32_t i8257_read_page(void *opaque, uint32_t nport) +{ + I8257State *d = opaque; + int ichan; + + ichan = channels[nport & 7]; + if (-1 == ichan) { + dolog ("invalid channel read %#x\n", nport); + return 0; + } + return d->regs[ichan].page; +} + +static uint32_t i8257_read_pageh(void *opaque, uint32_t nport) +{ + I8257State *d = opaque; + int ichan; + + ichan = channels[nport & 7]; + if (-1 == ichan) { + dolog ("invalid channel read %#x\n", nport); + return 0; + } + return d->regs[ichan].pageh; +} + +static inline void i8257_init_chan(I8257State *d, int ichan) +{ + I8257Regs *r; + + r = d->regs + ichan; + r->now[ADDR] = r->base[ADDR] << d->dshift; + r->now[COUNT] = 0; +} + +static inline int i8257_getff(I8257State *d) +{ + int ff; + + ff = d->flip_flop; + d->flip_flop = !ff; + return ff; +} + +static uint64_t i8257_read_chan(void *opaque, hwaddr nport, unsigned size) +{ + I8257State *d = opaque; + int ichan, nreg, iport, ff, val, dir; + I8257Regs *r; + + iport = (nport >> d->dshift) & 0x0f; + ichan = iport >> 1; + nreg = iport & 1; + r = d->regs + ichan; + + dir = ((r->mode >> 5) & 1) ? -1 : 1; + ff = i8257_getff(d); + if (nreg) + val = (r->base[COUNT] << d->dshift) - r->now[COUNT]; + else + val = r->now[ADDR] + r->now[COUNT] * dir; + + ldebug ("read_chan %#x -> %d\n", iport, val); + return (val >> (d->dshift + (ff << 3))) & 0xff; +} + +static void i8257_write_chan(void *opaque, hwaddr nport, uint64_t data, + unsigned int size) +{ + I8257State *d = opaque; + int iport, ichan, nreg; + I8257Regs *r; + + iport = (nport >> d->dshift) & 0x0f; + ichan = iport >> 1; + nreg = iport & 1; + r = d->regs + ichan; + if (i8257_getff(d)) { + r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00); + i8257_init_chan(d, ichan); + } else { + r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff); + } +} + +static void i8257_write_cont(void *opaque, hwaddr nport, uint64_t data, + unsigned int size) +{ + I8257State *d = opaque; + int iport, ichan = 0; + + iport = (nport >> d->dshift) & 0x0f; + switch (iport) { + case 0x00: /* command */ + if ((data != 0) && (data & CMD_NOT_SUPPORTED)) { + qemu_log_mask(LOG_UNIMP, "%s: cmd 0x%02"PRIx64" not supported\n", + __func__, data); + return; + } + d->command = data; + break; + + case 0x01: + ichan = data & 3; + if (data & 4) { + d->status |= 1 << (ichan + 4); + } + else { + d->status &= ~(1 << (ichan + 4)); + } + d->status &= ~(1 << ichan); + i8257_dma_run(d); + break; + + case 0x02: /* single mask */ + if (data & 4) + d->mask |= 1 << (data & 3); + else + d->mask &= ~(1 << (data & 3)); + i8257_dma_run(d); + break; + + case 0x03: /* mode */ + { + ichan = data & 3; +#ifdef DEBUG_DMA + { + int op, ai, dir, opmode; + op = (data >> 2) & 3; + ai = (data >> 4) & 1; + dir = (data >> 5) & 1; + opmode = (data >> 6) & 3; + + linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n", + ichan, op, ai, dir, opmode); + } +#endif + d->regs[ichan].mode = data; + break; + } + + case 0x04: /* clear flip flop */ + d->flip_flop = 0; + break; + + case 0x05: /* reset */ + d->flip_flop = 0; + d->mask = ~0; + d->status = 0; + d->command = 0; + break; + + case 0x06: /* clear mask for all channels */ + d->mask = 0; + i8257_dma_run(d); + break; + + case 0x07: /* write mask for all channels */ + d->mask = data; + i8257_dma_run(d); + break; + + default: + dolog ("unknown iport %#x\n", iport); + break; + } + +#ifdef DEBUG_DMA + if (0xc != iport) { + linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n", + nport, ichan, data); + } +#endif +} + +static uint64_t i8257_read_cont(void *opaque, hwaddr nport, unsigned size) +{ + I8257State *d = opaque; + int iport, val; + + iport = (nport >> d->dshift) & 0x0f; + switch (iport) { + case 0x00: /* status */ + val = d->status; + d->status &= 0xf0; + break; + case 0x01: /* mask */ + val = d->mask; + break; + default: + val = 0; + break; + } + + ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val); + return val; +} + +static bool i8257_dma_has_autoinitialization(IsaDma *obj, int nchan) +{ + I8257State *d = I8257(obj); + return (d->regs[nchan & 3].mode >> 4) & 1; +} + +static void i8257_dma_hold_DREQ(IsaDma *obj, int nchan) +{ + I8257State *d = I8257(obj); + int ichan; + + ichan = nchan & 3; + d->status |= 1 << (ichan + 4); + i8257_dma_run(d); +} + +static void i8257_dma_release_DREQ(IsaDma *obj, int nchan) +{ + I8257State *d = I8257(obj); + int ichan; + + ichan = nchan & 3; + d->status &= ~(1 << (ichan + 4)); + i8257_dma_run(d); +} + +static void i8257_channel_run(I8257State *d, int ichan) +{ + int ncont = d->dshift; + int n; + I8257Regs *r = &d->regs[ichan]; +#ifdef DEBUG_DMA + int dir, opmode; + + dir = (r->mode >> 5) & 1; + opmode = (r->mode >> 6) & 3; + + if (dir) { + dolog ("DMA in address decrement mode\n"); + } + if (opmode != 1) { + dolog ("DMA not in single mode select %#x\n", opmode); + } +#endif + + n = r->transfer_handler (r->opaque, ichan + (ncont << 2), + r->now[COUNT], (r->base[COUNT] + 1) << ncont); + r->now[COUNT] = n; + ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont); + if (n == (r->base[COUNT] + 1) << ncont) { + ldebug("transfer done\n"); + d->status |= (1 << ichan); + } +} + +static void i8257_dma_run(void *opaque) +{ + I8257State *d = opaque; + int ichan; + int rearm = 0; + + if (d->running) { + rearm = 1; + goto out; + } else { + d->running = 1; + } + + for (ichan = 0; ichan < 4; ichan++) { + int mask; + + mask = 1 << ichan; + + if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) { + i8257_channel_run(d, ichan); + rearm = 1; + } + } + + d->running = 0; +out: + if (rearm) { + qemu_bh_schedule_idle(d->dma_bh); + d->dma_bh_scheduled = true; + } +} + +static void i8257_dma_register_channel(IsaDma *obj, int nchan, + IsaDmaTransferHandler transfer_handler, + void *opaque) +{ + I8257State *d = I8257(obj); + I8257Regs *r; + int ichan; + + ichan = nchan & 3; + + r = d->regs + ichan; + r->transfer_handler = transfer_handler; + r->opaque = opaque; +} + +static bool i8257_is_verify_transfer(I8257Regs *r) +{ + return (r->mode & 0x0c) == 0; +} + +static int i8257_dma_read_memory(IsaDma *obj, int nchan, void *buf, int pos, + int len) +{ + I8257State *d = I8257(obj); + I8257Regs *r = &d->regs[nchan & 3]; + hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; + + if (i8257_is_verify_transfer(r)) { + return len; + } + + if (r->mode & 0x20) { + int i; + uint8_t *p = buf; + + cpu_physical_memory_read (addr - pos - len, buf, len); + /* What about 16bit transfers? */ + for (i = 0; i < len >> 1; i++) { + uint8_t b = p[len - i - 1]; + p[i] = b; + } + } + else + cpu_physical_memory_read (addr + pos, buf, len); + + return len; +} + +static int i8257_dma_write_memory(IsaDma *obj, int nchan, void *buf, int pos, + int len) +{ + I8257State *s = I8257(obj); + I8257Regs *r = &s->regs[nchan & 3]; + hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; + + if (i8257_is_verify_transfer(r)) { + return len; + } + + if (r->mode & 0x20) { + int i; + uint8_t *p = buf; + + cpu_physical_memory_write (addr - pos - len, buf, len); + /* What about 16bit transfers? */ + for (i = 0; i < len; i++) { + uint8_t b = p[len - i - 1]; + p[i] = b; + } + } + else + cpu_physical_memory_write (addr + pos, buf, len); + + return len; +} + +/* request the emulator to transfer a new DMA memory block ASAP (even + * if the idle bottom half would not have exited the iothread yet). + */ +static void i8257_dma_schedule(IsaDma *obj) +{ + I8257State *d = I8257(obj); + if (d->dma_bh_scheduled) { + qemu_notify_event(); + } +} + +static void i8257_reset(DeviceState *dev) +{ + I8257State *d = I8257(dev); + i8257_write_cont(d, (0x05 << d->dshift), 0, 1); +} + +static int i8257_phony_handler(void *opaque, int nchan, int dma_pos, + int dma_len) +{ + trace_i8257_unregistered_dma(nchan, dma_pos, dma_len); + return dma_pos; +} + + +static const MemoryRegionOps channel_io_ops = { + .read = i8257_read_chan, + .write = i8257_write_chan, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +/* IOport from page_base */ +static const MemoryRegionPortio page_portio_list[] = { + { 0x01, 3, 1, .write = i8257_write_page, .read = i8257_read_page, }, + { 0x07, 1, 1, .write = i8257_write_page, .read = i8257_read_page, }, + PORTIO_END_OF_LIST(), +}; + +/* IOport from pageh_base */ +static const MemoryRegionPortio pageh_portio_list[] = { + { 0x01, 3, 1, .write = i8257_write_pageh, .read = i8257_read_pageh, }, + { 0x07, 3, 1, .write = i8257_write_pageh, .read = i8257_read_pageh, }, + PORTIO_END_OF_LIST(), +}; + +static const MemoryRegionOps cont_io_ops = { + .read = i8257_read_cont, + .write = i8257_write_cont, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static const VMStateDescription vmstate_i8257_regs = { + .name = "dma_regs", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32_ARRAY(now, I8257Regs, 2), + VMSTATE_UINT16_ARRAY(base, I8257Regs, 2), + VMSTATE_UINT8(mode, I8257Regs), + VMSTATE_UINT8(page, I8257Regs), + VMSTATE_UINT8(pageh, I8257Regs), + VMSTATE_UINT8(dack, I8257Regs), + VMSTATE_UINT8(eop, I8257Regs), + VMSTATE_END_OF_LIST() + } +}; + +static int i8257_post_load(void *opaque, int version_id) +{ + I8257State *d = opaque; + i8257_dma_run(d); + + return 0; +} + +static const VMStateDescription vmstate_i8257 = { + .name = "dma", + .version_id = 1, + .minimum_version_id = 1, + .post_load = i8257_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(command, I8257State), + VMSTATE_UINT8(mask, I8257State), + VMSTATE_UINT8(flip_flop, I8257State), + VMSTATE_INT32(dshift, I8257State), + VMSTATE_STRUCT_ARRAY(regs, I8257State, 4, 1, vmstate_i8257_regs, + I8257Regs), + VMSTATE_END_OF_LIST() + } +}; + +static void i8257_realize(DeviceState *dev, Error **errp) +{ + ISADevice *isa = ISA_DEVICE(dev); + I8257State *d = I8257(dev); + int i; + + memory_region_init_io(&d->channel_io, OBJECT(dev), &channel_io_ops, d, + "dma-chan", 8 << d->dshift); + memory_region_add_subregion(isa_address_space_io(isa), + d->base, &d->channel_io); + + isa_register_portio_list(isa, &d->portio_page, + d->page_base, page_portio_list, d, + "dma-page"); + if (d->pageh_base >= 0) { + isa_register_portio_list(isa, &d->portio_pageh, + d->pageh_base, pageh_portio_list, d, + "dma-pageh"); + } + + memory_region_init_io(&d->cont_io, OBJECT(isa), &cont_io_ops, d, + "dma-cont", 8 << d->dshift); + memory_region_add_subregion(isa_address_space_io(isa), + d->base + (8 << d->dshift), &d->cont_io); + + for (i = 0; i < ARRAY_SIZE(d->regs); ++i) { + d->regs[i].transfer_handler = i8257_phony_handler; + } + + d->dma_bh = qemu_bh_new(i8257_dma_run, d); +} + +static Property i8257_properties[] = { + DEFINE_PROP_INT32("base", I8257State, base, 0x00), + DEFINE_PROP_INT32("page-base", I8257State, page_base, 0x80), + DEFINE_PROP_INT32("pageh-base", I8257State, pageh_base, 0x480), + DEFINE_PROP_INT32("dshift", I8257State, dshift, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void i8257_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IsaDmaClass *idc = ISADMA_CLASS(klass); + + dc->realize = i8257_realize; + dc->reset = i8257_reset; + dc->vmsd = &vmstate_i8257; + device_class_set_props(dc, i8257_properties); + + idc->has_autoinitialization = i8257_dma_has_autoinitialization; + idc->read_memory = i8257_dma_read_memory; + idc->write_memory = i8257_dma_write_memory; + idc->hold_DREQ = i8257_dma_hold_DREQ; + idc->release_DREQ = i8257_dma_release_DREQ; + idc->schedule = i8257_dma_schedule; + idc->register_channel = i8257_dma_register_channel; + /* Reason: needs to be wired up by isa_bus_dma() to work */ + dc->user_creatable = false; +} + +static const TypeInfo i8257_info = { + .name = TYPE_I8257, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(I8257State), + .class_init = i8257_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_ISADMA }, + { } + } +}; + +static void i8257_register_types(void) +{ + type_register_static(&i8257_info); +} + +type_init(i8257_register_types) + +void i8257_dma_init(ISABus *bus, bool high_page_enable) +{ + ISADevice *isa1, *isa2; + DeviceState *d; + + isa1 = isa_new(TYPE_I8257); + d = DEVICE(isa1); + qdev_prop_set_int32(d, "base", 0x00); + qdev_prop_set_int32(d, "page-base", 0x80); + qdev_prop_set_int32(d, "pageh-base", high_page_enable ? 0x480 : -1); + qdev_prop_set_int32(d, "dshift", 0); + isa_realize_and_unref(isa1, bus, &error_fatal); + + isa2 = isa_new(TYPE_I8257); + d = DEVICE(isa2); + qdev_prop_set_int32(d, "base", 0xc0); + qdev_prop_set_int32(d, "page-base", 0x88); + qdev_prop_set_int32(d, "pageh-base", high_page_enable ? 0x488 : -1); + qdev_prop_set_int32(d, "dshift", 1); + isa_realize_and_unref(isa2, bus, &error_fatal); + + isa_bus_dma(bus, ISADMA(isa1), ISADMA(isa2)); +} diff --git a/hw/dma/meson.build b/hw/dma/meson.build new file mode 100644 index 000000000..f3f0661bc --- /dev/null +++ b/hw/dma/meson.build @@ -0,0 +1,16 @@ +softmmu_ss.add(when: 'CONFIG_RC4030', if_true: files('rc4030.c')) +softmmu_ss.add(when: 'CONFIG_PL080', if_true: files('pl080.c')) +softmmu_ss.add(when: 'CONFIG_PL330', if_true: files('pl330.c')) +softmmu_ss.add(when: 'CONFIG_I82374', if_true: files('i82374.c')) +softmmu_ss.add(when: 'CONFIG_I8257', if_true: files('i8257.c')) +softmmu_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('xilinx_axidma.c')) +softmmu_ss.add(when: 'CONFIG_ZYNQ_DEVCFG', if_true: files('xlnx-zynq-devcfg.c')) +softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_dma.c')) +softmmu_ss.add(when: 'CONFIG_STP2000', if_true: files('sparc32_dma.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx_dpdma.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_ZDMA', if_true: files('xlnx-zdma.c')) +softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_dma.c', 'soc_dma.c')) +softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_dma.c')) +softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_dma.c')) +softmmu_ss.add(when: 'CONFIG_SIFIVE_PDMA', if_true: files('sifive_pdma.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_CSU_DMA', if_true: files('xlnx_csu_dma.c')) diff --git a/hw/dma/omap_dma.c b/hw/dma/omap_dma.c new file mode 100644 index 000000000..6677237d4 --- /dev/null +++ b/hw/dma/omap_dma.c @@ -0,0 +1,2124 @@ +/* + * TI OMAP DMA gigacell. + * + * Copyright (C) 2006-2008 Andrzej Zaborowski + * Copyright (C) 2007-2008 Lauro Ramos Venancio + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/timer.h" +#include "hw/arm/omap.h" +#include "hw/irq.h" +#include "hw/arm/soc_dma.h" + +struct omap_dma_channel_s { + /* transfer data */ + int burst[2]; + int pack[2]; + int endian[2]; + int endian_lock[2]; + int translate[2]; + enum omap_dma_port port[2]; + hwaddr addr[2]; + omap_dma_addressing_t mode[2]; + uint32_t elements; + uint16_t frames; + int32_t frame_index[2]; + int16_t element_index[2]; + int data_type; + + /* transfer type */ + int transparent_copy; + int constant_fill; + uint32_t color; + int prefetch; + + /* auto init and linked channel data */ + int end_prog; + int repeat; + int auto_init; + int link_enabled; + int link_next_ch; + + /* interruption data */ + int interrupts; + int status; + int cstatus; + + /* state data */ + int active; + int enable; + int sync; + int src_sync; + int pending_request; + int waiting_end_prog; + uint16_t cpc; + int set_update; + + /* sync type */ + int fs; + int bs; + + /* compatibility */ + int omap_3_1_compatible_disable; + + qemu_irq irq; + struct omap_dma_channel_s *sibling; + + struct omap_dma_reg_set_s { + hwaddr src, dest; + int frame; + int element; + int pck_element; + int frame_delta[2]; + int elem_delta[2]; + int frames; + int elements; + int pck_elements; + } active_set; + + struct soc_dma_ch_s *dma; + + /* unused parameters */ + int write_mode; + int priority; + int interleave_disabled; + int type; + int suspend; + int buf_disable; +}; + +struct omap_dma_s { + struct soc_dma_s *dma; + MemoryRegion iomem; + + struct omap_mpu_state_s *mpu; + omap_clk clk; + qemu_irq irq[4]; + void (*intr_update)(struct omap_dma_s *s); + enum omap_dma_model model; + int omap_3_1_mapping_disabled; + + uint32_t gcr; + uint32_t ocp; + uint32_t caps[5]; + uint32_t irqen[4]; + uint32_t irqstat[4]; + + int chans; + struct omap_dma_channel_s ch[32]; + struct omap_dma_lcd_channel_s lcd_ch; +}; + +/* Interrupts */ +#define TIMEOUT_INTR (1 << 0) +#define EVENT_DROP_INTR (1 << 1) +#define HALF_FRAME_INTR (1 << 2) +#define END_FRAME_INTR (1 << 3) +#define LAST_FRAME_INTR (1 << 4) +#define END_BLOCK_INTR (1 << 5) +#define SYNC (1 << 6) +#define END_PKT_INTR (1 << 7) +#define TRANS_ERR_INTR (1 << 8) +#define MISALIGN_INTR (1 << 11) + +static inline void omap_dma_interrupts_update(struct omap_dma_s *s) +{ + s->intr_update(s); +} + +static void omap_dma_channel_load(struct omap_dma_channel_s *ch) +{ + struct omap_dma_reg_set_s *a = &ch->active_set; + int i, normal; + int omap_3_1 = !ch->omap_3_1_compatible_disable; + + /* + * TODO: verify address ranges and alignment + * TODO: port endianness + */ + + a->src = ch->addr[0]; + a->dest = ch->addr[1]; + a->frames = ch->frames; + a->elements = ch->elements; + a->pck_elements = ch->frame_index[!ch->src_sync]; + a->frame = 0; + a->element = 0; + a->pck_element = 0; + + if (unlikely(!ch->elements || !ch->frames)) { + printf("%s: bad DMA request\n", __func__); + return; + } + + for (i = 0; i < 2; i ++) + switch (ch->mode[i]) { + case constant: + a->elem_delta[i] = 0; + a->frame_delta[i] = 0; + break; + case post_incremented: + a->elem_delta[i] = ch->data_type; + a->frame_delta[i] = 0; + break; + case single_index: + a->elem_delta[i] = ch->data_type + + ch->element_index[omap_3_1 ? 0 : i] - 1; + a->frame_delta[i] = 0; + break; + case double_index: + a->elem_delta[i] = ch->data_type + + ch->element_index[omap_3_1 ? 0 : i] - 1; + a->frame_delta[i] = ch->frame_index[omap_3_1 ? 0 : i] - + ch->element_index[omap_3_1 ? 0 : i]; + break; + default: + break; + } + + normal = !ch->transparent_copy && !ch->constant_fill && + /* FIFO is big-endian so either (ch->endian[n] == 1) OR + * (ch->endian_lock[n] == 1) mean no endianism conversion. */ + (ch->endian[0] | ch->endian_lock[0]) == + (ch->endian[1] | ch->endian_lock[1]); + for (i = 0; i < 2; i ++) { + /* TODO: for a->frame_delta[i] > 0 still use the fast path, just + * limit min_elems in omap_dma_transfer_setup to the nearest frame + * end. */ + if (!a->elem_delta[i] && normal && + (a->frames == 1 || !a->frame_delta[i])) + ch->dma->type[i] = soc_dma_access_const; + else if (a->elem_delta[i] == ch->data_type && normal && + (a->frames == 1 || !a->frame_delta[i])) + ch->dma->type[i] = soc_dma_access_linear; + else + ch->dma->type[i] = soc_dma_access_other; + + ch->dma->vaddr[i] = ch->addr[i]; + } + soc_dma_ch_update(ch->dma); +} + +static void omap_dma_activate_channel(struct omap_dma_s *s, + struct omap_dma_channel_s *ch) +{ + if (!ch->active) { + if (ch->set_update) { + /* It's not clear when the active set is supposed to be + * loaded from registers. We're already loading it when the + * channel is enabled, and for some guests this is not enough + * but that may be also because of a race condition (no + * delays in qemu) in the guest code, which we're just + * working around here. */ + omap_dma_channel_load(ch); + ch->set_update = 0; + } + + ch->active = 1; + soc_dma_set_request(ch->dma, 1); + if (ch->sync) + ch->status |= SYNC; + } +} + +static void omap_dma_deactivate_channel(struct omap_dma_s *s, + struct omap_dma_channel_s *ch) +{ + /* Update cpc */ + ch->cpc = ch->active_set.dest & 0xffff; + + if (ch->pending_request && !ch->waiting_end_prog && ch->enable) { + /* Don't deactivate the channel */ + ch->pending_request = 0; + return; + } + + /* Don't deactive the channel if it is synchronized and the DMA request is + active */ + if (ch->sync && ch->enable && (s->dma->drqbmp & (1ULL << ch->sync))) + return; + + if (ch->active) { + ch->active = 0; + ch->status &= ~SYNC; + soc_dma_set_request(ch->dma, 0); + } +} + +static void omap_dma_enable_channel(struct omap_dma_s *s, + struct omap_dma_channel_s *ch) +{ + if (!ch->enable) { + ch->enable = 1; + ch->waiting_end_prog = 0; + omap_dma_channel_load(ch); + /* TODO: theoretically if ch->sync && ch->prefetch && + * !s->dma->drqbmp[ch->sync], we should also activate and fetch + * from source and then stall until signalled. */ + if ((!ch->sync) || (s->dma->drqbmp & (1ULL << ch->sync))) { + omap_dma_activate_channel(s, ch); + } + } +} + +static void omap_dma_disable_channel(struct omap_dma_s *s, + struct omap_dma_channel_s *ch) +{ + if (ch->enable) { + ch->enable = 0; + /* Discard any pending request */ + ch->pending_request = 0; + omap_dma_deactivate_channel(s, ch); + } +} + +static void omap_dma_channel_end_prog(struct omap_dma_s *s, + struct omap_dma_channel_s *ch) +{ + if (ch->waiting_end_prog) { + ch->waiting_end_prog = 0; + if (!ch->sync || ch->pending_request) { + ch->pending_request = 0; + omap_dma_activate_channel(s, ch); + } + } +} + +static void omap_dma_interrupts_3_1_update(struct omap_dma_s *s) +{ + struct omap_dma_channel_s *ch = s->ch; + + /* First three interrupts are shared between two channels each. */ + if (ch[0].status | ch[6].status) + qemu_irq_raise(ch[0].irq); + if (ch[1].status | ch[7].status) + qemu_irq_raise(ch[1].irq); + if (ch[2].status | ch[8].status) + qemu_irq_raise(ch[2].irq); + if (ch[3].status) + qemu_irq_raise(ch[3].irq); + if (ch[4].status) + qemu_irq_raise(ch[4].irq); + if (ch[5].status) + qemu_irq_raise(ch[5].irq); +} + +static void omap_dma_interrupts_3_2_update(struct omap_dma_s *s) +{ + struct omap_dma_channel_s *ch = s->ch; + int i; + + for (i = s->chans; i; ch ++, i --) + if (ch->status) + qemu_irq_raise(ch->irq); +} + +static void omap_dma_enable_3_1_mapping(struct omap_dma_s *s) +{ + s->omap_3_1_mapping_disabled = 0; + s->chans = 9; + s->intr_update = omap_dma_interrupts_3_1_update; +} + +static void omap_dma_disable_3_1_mapping(struct omap_dma_s *s) +{ + s->omap_3_1_mapping_disabled = 1; + s->chans = 16; + s->intr_update = omap_dma_interrupts_3_2_update; +} + +static void omap_dma_process_request(struct omap_dma_s *s, int request) +{ + int channel; + int drop_event = 0; + struct omap_dma_channel_s *ch = s->ch; + + for (channel = 0; channel < s->chans; channel ++, ch ++) { + if (ch->enable && ch->sync == request) { + if (!ch->active) + omap_dma_activate_channel(s, ch); + else if (!ch->pending_request) + ch->pending_request = 1; + else { + /* Request collision */ + /* Second request received while processing other request */ + ch->status |= EVENT_DROP_INTR; + drop_event = 1; + } + } + } + + if (drop_event) + omap_dma_interrupts_update(s); +} + +static void omap_dma_transfer_generic(struct soc_dma_ch_s *dma) +{ + uint8_t value[4]; + struct omap_dma_channel_s *ch = dma->opaque; + struct omap_dma_reg_set_s *a = &ch->active_set; + int bytes = dma->bytes; +#ifdef MULTI_REQ + uint16_t status = ch->status; +#endif + + do { + /* Transfer a single element */ + /* FIXME: check the endianness */ + if (!ch->constant_fill) + cpu_physical_memory_read(a->src, value, ch->data_type); + else + *(uint32_t *) value = ch->color; + + if (!ch->transparent_copy || *(uint32_t *) value != ch->color) + cpu_physical_memory_write(a->dest, value, ch->data_type); + + a->src += a->elem_delta[0]; + a->dest += a->elem_delta[1]; + a->element ++; + +#ifndef MULTI_REQ + if (a->element == a->elements) { + /* End of Frame */ + a->element = 0; + a->src += a->frame_delta[0]; + a->dest += a->frame_delta[1]; + a->frame ++; + + /* If the channel is async, update cpc */ + if (!ch->sync) + ch->cpc = a->dest & 0xffff; + } + } while ((bytes -= ch->data_type)); +#else + /* If the channel is element synchronized, deactivate it */ + if (ch->sync && !ch->fs && !ch->bs) + omap_dma_deactivate_channel(s, ch); + + /* If it is the last frame, set the LAST_FRAME interrupt */ + if (a->element == 1 && a->frame == a->frames - 1) + if (ch->interrupts & LAST_FRAME_INTR) + ch->status |= LAST_FRAME_INTR; + + /* If the half of the frame was reached, set the HALF_FRAME + interrupt */ + if (a->element == (a->elements >> 1)) + if (ch->interrupts & HALF_FRAME_INTR) + ch->status |= HALF_FRAME_INTR; + + if (ch->fs && ch->bs) { + a->pck_element ++; + /* Check if a full packet has beed transferred. */ + if (a->pck_element == a->pck_elements) { + a->pck_element = 0; + + /* Set the END_PKT interrupt */ + if ((ch->interrupts & END_PKT_INTR) && !ch->src_sync) + ch->status |= END_PKT_INTR; + + /* If the channel is packet-synchronized, deactivate it */ + if (ch->sync) + omap_dma_deactivate_channel(s, ch); + } + } + + if (a->element == a->elements) { + /* End of Frame */ + a->element = 0; + a->src += a->frame_delta[0]; + a->dest += a->frame_delta[1]; + a->frame ++; + + /* If the channel is frame synchronized, deactivate it */ + if (ch->sync && ch->fs && !ch->bs) + omap_dma_deactivate_channel(s, ch); + + /* If the channel is async, update cpc */ + if (!ch->sync) + ch->cpc = a->dest & 0xffff; + + /* Set the END_FRAME interrupt */ + if (ch->interrupts & END_FRAME_INTR) + ch->status |= END_FRAME_INTR; + + if (a->frame == a->frames) { + /* End of Block */ + /* Disable the channel */ + + if (ch->omap_3_1_compatible_disable) { + omap_dma_disable_channel(s, ch); + if (ch->link_enabled) + omap_dma_enable_channel(s, + &s->ch[ch->link_next_ch]); + } else { + if (!ch->auto_init) + omap_dma_disable_channel(s, ch); + else if (ch->repeat || ch->end_prog) + omap_dma_channel_load(ch); + else { + ch->waiting_end_prog = 1; + omap_dma_deactivate_channel(s, ch); + } + } + + if (ch->interrupts & END_BLOCK_INTR) + ch->status |= END_BLOCK_INTR; + } + } + } while (status == ch->status && ch->active); + + omap_dma_interrupts_update(s); +#endif +} + +enum { + omap_dma_intr_element_sync, + omap_dma_intr_last_frame, + omap_dma_intr_half_frame, + omap_dma_intr_frame, + omap_dma_intr_frame_sync, + omap_dma_intr_packet, + omap_dma_intr_packet_sync, + omap_dma_intr_block, + __omap_dma_intr_last, +}; + +static void omap_dma_transfer_setup(struct soc_dma_ch_s *dma) +{ + struct omap_dma_port_if_s *src_p, *dest_p; + struct omap_dma_reg_set_s *a; + struct omap_dma_channel_s *ch = dma->opaque; + struct omap_dma_s *s = dma->dma->opaque; + int frames, min_elems, elements[__omap_dma_intr_last]; + + a = &ch->active_set; + + src_p = &s->mpu->port[ch->port[0]]; + dest_p = &s->mpu->port[ch->port[1]]; + if ((!ch->constant_fill && !src_p->addr_valid(s->mpu, a->src)) || + (!dest_p->addr_valid(s->mpu, a->dest))) { +#if 0 + /* Bus time-out */ + if (ch->interrupts & TIMEOUT_INTR) + ch->status |= TIMEOUT_INTR; + omap_dma_deactivate_channel(s, ch); + continue; +#endif + printf("%s: Bus time-out in DMA%i operation\n", + __func__, dma->num); + } + + min_elems = INT_MAX; + + /* Check all the conditions that terminate the transfer starting + * with those that can occur the soonest. */ +#define INTR_CHECK(cond, id, nelements) \ + if (cond) { \ + elements[id] = nelements; \ + if (elements[id] < min_elems) \ + min_elems = elements[id]; \ + } else \ + elements[id] = INT_MAX; + + /* Elements */ + INTR_CHECK( + ch->sync && !ch->fs && !ch->bs, + omap_dma_intr_element_sync, + 1) + + /* Frames */ + /* TODO: for transfers where entire frames can be read and written + * using memcpy() but a->frame_delta is non-zero, try to still do + * transfers using soc_dma but limit min_elems to a->elements - ... + * See also the TODO in omap_dma_channel_load. */ + INTR_CHECK( + (ch->interrupts & LAST_FRAME_INTR) && + ((a->frame < a->frames - 1) || !a->element), + omap_dma_intr_last_frame, + (a->frames - a->frame - 2) * a->elements + + (a->elements - a->element + 1)) + INTR_CHECK( + ch->interrupts & HALF_FRAME_INTR, + omap_dma_intr_half_frame, + (a->elements >> 1) + + (a->element >= (a->elements >> 1) ? a->elements : 0) - + a->element) + INTR_CHECK( + ch->sync && ch->fs && (ch->interrupts & END_FRAME_INTR), + omap_dma_intr_frame, + a->elements - a->element) + INTR_CHECK( + ch->sync && ch->fs && !ch->bs, + omap_dma_intr_frame_sync, + a->elements - a->element) + + /* Packets */ + INTR_CHECK( + ch->fs && ch->bs && + (ch->interrupts & END_PKT_INTR) && !ch->src_sync, + omap_dma_intr_packet, + a->pck_elements - a->pck_element) + INTR_CHECK( + ch->fs && ch->bs && ch->sync, + omap_dma_intr_packet_sync, + a->pck_elements - a->pck_element) + + /* Blocks */ + INTR_CHECK( + 1, + omap_dma_intr_block, + (a->frames - a->frame - 1) * a->elements + + (a->elements - a->element)) + + dma->bytes = min_elems * ch->data_type; + + /* Set appropriate interrupts and/or deactivate channels */ + +#ifdef MULTI_REQ + /* TODO: should all of this only be done if dma->update, and otherwise + * inside omap_dma_transfer_generic below - check what's faster. */ + if (dma->update) { +#endif + + /* If the channel is element synchronized, deactivate it */ + if (min_elems == elements[omap_dma_intr_element_sync]) + omap_dma_deactivate_channel(s, ch); + + /* If it is the last frame, set the LAST_FRAME interrupt */ + if (min_elems == elements[omap_dma_intr_last_frame]) + ch->status |= LAST_FRAME_INTR; + + /* If exactly half of the frame was reached, set the HALF_FRAME + interrupt */ + if (min_elems == elements[omap_dma_intr_half_frame]) + ch->status |= HALF_FRAME_INTR; + + /* If a full packet has been transferred, set the END_PKT interrupt */ + if (min_elems == elements[omap_dma_intr_packet]) + ch->status |= END_PKT_INTR; + + /* If the channel is packet-synchronized, deactivate it */ + if (min_elems == elements[omap_dma_intr_packet_sync]) + omap_dma_deactivate_channel(s, ch); + + /* If the channel is frame synchronized, deactivate it */ + if (min_elems == elements[omap_dma_intr_frame_sync]) + omap_dma_deactivate_channel(s, ch); + + /* Set the END_FRAME interrupt */ + if (min_elems == elements[omap_dma_intr_frame]) + ch->status |= END_FRAME_INTR; + + if (min_elems == elements[omap_dma_intr_block]) { + /* End of Block */ + /* Disable the channel */ + + if (ch->omap_3_1_compatible_disable) { + omap_dma_disable_channel(s, ch); + if (ch->link_enabled) + omap_dma_enable_channel(s, &s->ch[ch->link_next_ch]); + } else { + if (!ch->auto_init) + omap_dma_disable_channel(s, ch); + else if (ch->repeat || ch->end_prog) + omap_dma_channel_load(ch); + else { + ch->waiting_end_prog = 1; + omap_dma_deactivate_channel(s, ch); + } + } + + if (ch->interrupts & END_BLOCK_INTR) + ch->status |= END_BLOCK_INTR; + } + + /* Update packet number */ + if (ch->fs && ch->bs) { + a->pck_element += min_elems; + a->pck_element %= a->pck_elements; + } + + /* TODO: check if we really need to update anything here or perhaps we + * can skip part of this. */ +#ifndef MULTI_REQ + if (dma->update) { +#endif + a->element += min_elems; + + frames = a->element / a->elements; + a->element = a->element % a->elements; + a->frame += frames; + a->src += min_elems * a->elem_delta[0] + frames * a->frame_delta[0]; + a->dest += min_elems * a->elem_delta[1] + frames * a->frame_delta[1]; + + /* If the channel is async, update cpc */ + if (!ch->sync && frames) + ch->cpc = a->dest & 0xffff; + + /* TODO: if the destination port is IMIF or EMIFF, set the dirty + * bits on it. */ +#ifndef MULTI_REQ + } +#else + } +#endif + + omap_dma_interrupts_update(s); +} + +void omap_dma_reset(struct soc_dma_s *dma) +{ + int i; + struct omap_dma_s *s = dma->opaque; + + soc_dma_reset(s->dma); + if (s->model < omap_dma_4) + s->gcr = 0x0004; + else + s->gcr = 0x00010010; + s->ocp = 0x00000000; + memset(&s->irqstat, 0, sizeof(s->irqstat)); + memset(&s->irqen, 0, sizeof(s->irqen)); + s->lcd_ch.src = emiff; + s->lcd_ch.condition = 0; + s->lcd_ch.interrupts = 0; + s->lcd_ch.dual = 0; + if (s->model < omap_dma_4) + omap_dma_enable_3_1_mapping(s); + for (i = 0; i < s->chans; i ++) { + s->ch[i].suspend = 0; + s->ch[i].prefetch = 0; + s->ch[i].buf_disable = 0; + s->ch[i].src_sync = 0; + memset(&s->ch[i].burst, 0, sizeof(s->ch[i].burst)); + memset(&s->ch[i].port, 0, sizeof(s->ch[i].port)); + memset(&s->ch[i].mode, 0, sizeof(s->ch[i].mode)); + memset(&s->ch[i].frame_index, 0, sizeof(s->ch[i].frame_index)); + memset(&s->ch[i].element_index, 0, sizeof(s->ch[i].element_index)); + memset(&s->ch[i].endian, 0, sizeof(s->ch[i].endian)); + memset(&s->ch[i].endian_lock, 0, sizeof(s->ch[i].endian_lock)); + memset(&s->ch[i].translate, 0, sizeof(s->ch[i].translate)); + s->ch[i].write_mode = 0; + s->ch[i].data_type = 0; + s->ch[i].transparent_copy = 0; + s->ch[i].constant_fill = 0; + s->ch[i].color = 0x00000000; + s->ch[i].end_prog = 0; + s->ch[i].repeat = 0; + s->ch[i].auto_init = 0; + s->ch[i].link_enabled = 0; + if (s->model < omap_dma_4) + s->ch[i].interrupts = 0x0003; + else + s->ch[i].interrupts = 0x0000; + s->ch[i].status = 0; + s->ch[i].cstatus = 0; + s->ch[i].active = 0; + s->ch[i].enable = 0; + s->ch[i].sync = 0; + s->ch[i].pending_request = 0; + s->ch[i].waiting_end_prog = 0; + s->ch[i].cpc = 0x0000; + s->ch[i].fs = 0; + s->ch[i].bs = 0; + s->ch[i].omap_3_1_compatible_disable = 0; + memset(&s->ch[i].active_set, 0, sizeof(s->ch[i].active_set)); + s->ch[i].priority = 0; + s->ch[i].interleave_disabled = 0; + s->ch[i].type = 0; + } +} + +static int omap_dma_ch_reg_read(struct omap_dma_s *s, + struct omap_dma_channel_s *ch, int reg, uint16_t *value) +{ + switch (reg) { + case 0x00: /* SYS_DMA_CSDP_CH0 */ + *value = (ch->burst[1] << 14) | + (ch->pack[1] << 13) | + (ch->port[1] << 9) | + (ch->burst[0] << 7) | + (ch->pack[0] << 6) | + (ch->port[0] << 2) | + (ch->data_type >> 1); + break; + + case 0x02: /* SYS_DMA_CCR_CH0 */ + if (s->model <= omap_dma_3_1) + *value = 0 << 10; /* FIFO_FLUSH reads as 0 */ + else + *value = ch->omap_3_1_compatible_disable << 10; + *value |= (ch->mode[1] << 14) | + (ch->mode[0] << 12) | + (ch->end_prog << 11) | + (ch->repeat << 9) | + (ch->auto_init << 8) | + (ch->enable << 7) | + (ch->priority << 6) | + (ch->fs << 5) | ch->sync; + break; + + case 0x04: /* SYS_DMA_CICR_CH0 */ + *value = ch->interrupts; + break; + + case 0x06: /* SYS_DMA_CSR_CH0 */ + *value = ch->status; + ch->status &= SYNC; + if (!ch->omap_3_1_compatible_disable && ch->sibling) { + *value |= (ch->sibling->status & 0x3f) << 6; + ch->sibling->status &= SYNC; + } + qemu_irq_lower(ch->irq); + break; + + case 0x08: /* SYS_DMA_CSSA_L_CH0 */ + *value = ch->addr[0] & 0x0000ffff; + break; + + case 0x0a: /* SYS_DMA_CSSA_U_CH0 */ + *value = ch->addr[0] >> 16; + break; + + case 0x0c: /* SYS_DMA_CDSA_L_CH0 */ + *value = ch->addr[1] & 0x0000ffff; + break; + + case 0x0e: /* SYS_DMA_CDSA_U_CH0 */ + *value = ch->addr[1] >> 16; + break; + + case 0x10: /* SYS_DMA_CEN_CH0 */ + *value = ch->elements; + break; + + case 0x12: /* SYS_DMA_CFN_CH0 */ + *value = ch->frames; + break; + + case 0x14: /* SYS_DMA_CFI_CH0 */ + *value = ch->frame_index[0]; + break; + + case 0x16: /* SYS_DMA_CEI_CH0 */ + *value = ch->element_index[0]; + break; + + case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */ + if (ch->omap_3_1_compatible_disable) + *value = ch->active_set.src & 0xffff; /* CSAC */ + else + *value = ch->cpc; + break; + + case 0x1a: /* DMA_CDAC */ + *value = ch->active_set.dest & 0xffff; /* CDAC */ + break; + + case 0x1c: /* DMA_CDEI */ + *value = ch->element_index[1]; + break; + + case 0x1e: /* DMA_CDFI */ + *value = ch->frame_index[1]; + break; + + case 0x20: /* DMA_COLOR_L */ + *value = ch->color & 0xffff; + break; + + case 0x22: /* DMA_COLOR_U */ + *value = ch->color >> 16; + break; + + case 0x24: /* DMA_CCR2 */ + *value = (ch->bs << 2) | + (ch->transparent_copy << 1) | + ch->constant_fill; + break; + + case 0x28: /* DMA_CLNK_CTRL */ + *value = (ch->link_enabled << 15) | + (ch->link_next_ch & 0xf); + break; + + case 0x2a: /* DMA_LCH_CTRL */ + *value = (ch->interleave_disabled << 15) | + ch->type; + break; + + default: + return 1; + } + return 0; +} + +static int omap_dma_ch_reg_write(struct omap_dma_s *s, + struct omap_dma_channel_s *ch, int reg, uint16_t value) +{ + switch (reg) { + case 0x00: /* SYS_DMA_CSDP_CH0 */ + ch->burst[1] = (value & 0xc000) >> 14; + ch->pack[1] = (value & 0x2000) >> 13; + ch->port[1] = (enum omap_dma_port) ((value & 0x1e00) >> 9); + ch->burst[0] = (value & 0x0180) >> 7; + ch->pack[0] = (value & 0x0040) >> 6; + ch->port[0] = (enum omap_dma_port) ((value & 0x003c) >> 2); + if (ch->port[0] >= __omap_dma_port_last) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n", + __func__, ch->port[0]); + } + if (ch->port[1] >= __omap_dma_port_last) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n", + __func__, ch->port[1]); + } + ch->data_type = 1 << (value & 3); + if ((value & 3) == 3) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad data_type for DMA channel\n", __func__); + ch->data_type >>= 1; + } + break; + + case 0x02: /* SYS_DMA_CCR_CH0 */ + ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14); + ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12); + ch->end_prog = (value & 0x0800) >> 11; + if (s->model >= omap_dma_3_2) + ch->omap_3_1_compatible_disable = (value >> 10) & 0x1; + ch->repeat = (value & 0x0200) >> 9; + ch->auto_init = (value & 0x0100) >> 8; + ch->priority = (value & 0x0040) >> 6; + ch->fs = (value & 0x0020) >> 5; + ch->sync = value & 0x001f; + + if (value & 0x0080) + omap_dma_enable_channel(s, ch); + else + omap_dma_disable_channel(s, ch); + + if (ch->end_prog) + omap_dma_channel_end_prog(s, ch); + + break; + + case 0x04: /* SYS_DMA_CICR_CH0 */ + ch->interrupts = value & 0x3f; + break; + + case 0x06: /* SYS_DMA_CSR_CH0 */ + OMAP_RO_REG((hwaddr) reg); + break; + + case 0x08: /* SYS_DMA_CSSA_L_CH0 */ + ch->addr[0] &= 0xffff0000; + ch->addr[0] |= value; + break; + + case 0x0a: /* SYS_DMA_CSSA_U_CH0 */ + ch->addr[0] &= 0x0000ffff; + ch->addr[0] |= (uint32_t) value << 16; + break; + + case 0x0c: /* SYS_DMA_CDSA_L_CH0 */ + ch->addr[1] &= 0xffff0000; + ch->addr[1] |= value; + break; + + case 0x0e: /* SYS_DMA_CDSA_U_CH0 */ + ch->addr[1] &= 0x0000ffff; + ch->addr[1] |= (uint32_t) value << 16; + break; + + case 0x10: /* SYS_DMA_CEN_CH0 */ + ch->elements = value; + break; + + case 0x12: /* SYS_DMA_CFN_CH0 */ + ch->frames = value; + break; + + case 0x14: /* SYS_DMA_CFI_CH0 */ + ch->frame_index[0] = (int16_t) value; + break; + + case 0x16: /* SYS_DMA_CEI_CH0 */ + ch->element_index[0] = (int16_t) value; + break; + + case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */ + OMAP_RO_REG((hwaddr) reg); + break; + + case 0x1c: /* DMA_CDEI */ + ch->element_index[1] = (int16_t) value; + break; + + case 0x1e: /* DMA_CDFI */ + ch->frame_index[1] = (int16_t) value; + break; + + case 0x20: /* DMA_COLOR_L */ + ch->color &= 0xffff0000; + ch->color |= value; + break; + + case 0x22: /* DMA_COLOR_U */ + ch->color &= 0xffff; + ch->color |= (uint32_t)value << 16; + break; + + case 0x24: /* DMA_CCR2 */ + ch->bs = (value >> 2) & 0x1; + ch->transparent_copy = (value >> 1) & 0x1; + ch->constant_fill = value & 0x1; + break; + + case 0x28: /* DMA_CLNK_CTRL */ + ch->link_enabled = (value >> 15) & 0x1; + if (value & (1 << 14)) { /* Stop_Lnk */ + ch->link_enabled = 0; + omap_dma_disable_channel(s, ch); + } + ch->link_next_ch = value & 0x1f; + break; + + case 0x2a: /* DMA_LCH_CTRL */ + ch->interleave_disabled = (value >> 15) & 0x1; + ch->type = value & 0xf; + break; + + default: + return 1; + } + return 0; +} + +static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset, + uint16_t value) +{ + switch (offset) { + case 0xbc0: /* DMA_LCD_CSDP */ + s->brust_f2 = (value >> 14) & 0x3; + s->pack_f2 = (value >> 13) & 0x1; + s->data_type_f2 = (1 << ((value >> 11) & 0x3)); + s->brust_f1 = (value >> 7) & 0x3; + s->pack_f1 = (value >> 6) & 0x1; + s->data_type_f1 = (1 << ((value >> 0) & 0x3)); + break; + + case 0xbc2: /* DMA_LCD_CCR */ + s->mode_f2 = (value >> 14) & 0x3; + s->mode_f1 = (value >> 12) & 0x3; + s->end_prog = (value >> 11) & 0x1; + s->omap_3_1_compatible_disable = (value >> 10) & 0x1; + s->repeat = (value >> 9) & 0x1; + s->auto_init = (value >> 8) & 0x1; + s->running = (value >> 7) & 0x1; + s->priority = (value >> 6) & 0x1; + s->bs = (value >> 4) & 0x1; + break; + + case 0xbc4: /* DMA_LCD_CTRL */ + s->dst = (value >> 8) & 0x1; + s->src = ((value >> 6) & 0x3) << 1; + s->condition = 0; + /* Assume no bus errors and thus no BUS_ERROR irq bits. */ + s->interrupts = (value >> 1) & 1; + s->dual = value & 1; + break; + + case 0xbc8: /* TOP_B1_L */ + s->src_f1_top &= 0xffff0000; + s->src_f1_top |= 0x0000ffff & value; + break; + + case 0xbca: /* TOP_B1_U */ + s->src_f1_top &= 0x0000ffff; + s->src_f1_top |= (uint32_t)value << 16; + break; + + case 0xbcc: /* BOT_B1_L */ + s->src_f1_bottom &= 0xffff0000; + s->src_f1_bottom |= 0x0000ffff & value; + break; + + case 0xbce: /* BOT_B1_U */ + s->src_f1_bottom &= 0x0000ffff; + s->src_f1_bottom |= (uint32_t) value << 16; + break; + + case 0xbd0: /* TOP_B2_L */ + s->src_f2_top &= 0xffff0000; + s->src_f2_top |= 0x0000ffff & value; + break; + + case 0xbd2: /* TOP_B2_U */ + s->src_f2_top &= 0x0000ffff; + s->src_f2_top |= (uint32_t) value << 16; + break; + + case 0xbd4: /* BOT_B2_L */ + s->src_f2_bottom &= 0xffff0000; + s->src_f2_bottom |= 0x0000ffff & value; + break; + + case 0xbd6: /* BOT_B2_U */ + s->src_f2_bottom &= 0x0000ffff; + s->src_f2_bottom |= (uint32_t) value << 16; + break; + + case 0xbd8: /* DMA_LCD_SRC_EI_B1 */ + s->element_index_f1 = value; + break; + + case 0xbda: /* DMA_LCD_SRC_FI_B1_L */ + s->frame_index_f1 &= 0xffff0000; + s->frame_index_f1 |= 0x0000ffff & value; + break; + + case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */ + s->frame_index_f1 &= 0x0000ffff; + s->frame_index_f1 |= (uint32_t) value << 16; + break; + + case 0xbdc: /* DMA_LCD_SRC_EI_B2 */ + s->element_index_f2 = value; + break; + + case 0xbde: /* DMA_LCD_SRC_FI_B2_L */ + s->frame_index_f2 &= 0xffff0000; + s->frame_index_f2 |= 0x0000ffff & value; + break; + + case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */ + s->frame_index_f2 &= 0x0000ffff; + s->frame_index_f2 |= (uint32_t) value << 16; + break; + + case 0xbe0: /* DMA_LCD_SRC_EN_B1 */ + s->elements_f1 = value; + break; + + case 0xbe4: /* DMA_LCD_SRC_FN_B1 */ + s->frames_f1 = value; + break; + + case 0xbe2: /* DMA_LCD_SRC_EN_B2 */ + s->elements_f2 = value; + break; + + case 0xbe6: /* DMA_LCD_SRC_FN_B2 */ + s->frames_f2 = value; + break; + + case 0xbea: /* DMA_LCD_LCH_CTRL */ + s->lch_type = value & 0xf; + break; + + default: + return 1; + } + return 0; +} + +static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, + uint16_t *ret) +{ + switch (offset) { + case 0xbc0: /* DMA_LCD_CSDP */ + *ret = (s->brust_f2 << 14) | + (s->pack_f2 << 13) | + ((s->data_type_f2 >> 1) << 11) | + (s->brust_f1 << 7) | + (s->pack_f1 << 6) | + ((s->data_type_f1 >> 1) << 0); + break; + + case 0xbc2: /* DMA_LCD_CCR */ + *ret = (s->mode_f2 << 14) | + (s->mode_f1 << 12) | + (s->end_prog << 11) | + (s->omap_3_1_compatible_disable << 10) | + (s->repeat << 9) | + (s->auto_init << 8) | + (s->running << 7) | + (s->priority << 6) | + (s->bs << 4); + break; + + case 0xbc4: /* DMA_LCD_CTRL */ + qemu_irq_lower(s->irq); + *ret = (s->dst << 8) | + ((s->src & 0x6) << 5) | + (s->condition << 3) | + (s->interrupts << 1) | + s->dual; + break; + + case 0xbc8: /* TOP_B1_L */ + *ret = s->src_f1_top & 0xffff; + break; + + case 0xbca: /* TOP_B1_U */ + *ret = s->src_f1_top >> 16; + break; + + case 0xbcc: /* BOT_B1_L */ + *ret = s->src_f1_bottom & 0xffff; + break; + + case 0xbce: /* BOT_B1_U */ + *ret = s->src_f1_bottom >> 16; + break; + + case 0xbd0: /* TOP_B2_L */ + *ret = s->src_f2_top & 0xffff; + break; + + case 0xbd2: /* TOP_B2_U */ + *ret = s->src_f2_top >> 16; + break; + + case 0xbd4: /* BOT_B2_L */ + *ret = s->src_f2_bottom & 0xffff; + break; + + case 0xbd6: /* BOT_B2_U */ + *ret = s->src_f2_bottom >> 16; + break; + + case 0xbd8: /* DMA_LCD_SRC_EI_B1 */ + *ret = s->element_index_f1; + break; + + case 0xbda: /* DMA_LCD_SRC_FI_B1_L */ + *ret = s->frame_index_f1 & 0xffff; + break; + + case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */ + *ret = s->frame_index_f1 >> 16; + break; + + case 0xbdc: /* DMA_LCD_SRC_EI_B2 */ + *ret = s->element_index_f2; + break; + + case 0xbde: /* DMA_LCD_SRC_FI_B2_L */ + *ret = s->frame_index_f2 & 0xffff; + break; + + case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */ + *ret = s->frame_index_f2 >> 16; + break; + + case 0xbe0: /* DMA_LCD_SRC_EN_B1 */ + *ret = s->elements_f1; + break; + + case 0xbe4: /* DMA_LCD_SRC_FN_B1 */ + *ret = s->frames_f1; + break; + + case 0xbe2: /* DMA_LCD_SRC_EN_B2 */ + *ret = s->elements_f2; + break; + + case 0xbe6: /* DMA_LCD_SRC_FN_B2 */ + *ret = s->frames_f2; + break; + + case 0xbea: /* DMA_LCD_LCH_CTRL */ + *ret = s->lch_type; + break; + + default: + return 1; + } + return 0; +} + +static int omap_dma_3_1_lcd_write(struct omap_dma_lcd_channel_s *s, int offset, + uint16_t value) +{ + switch (offset) { + case 0x300: /* SYS_DMA_LCD_CTRL */ + s->src = (value & 0x40) ? imif : emiff; + s->condition = 0; + /* Assume no bus errors and thus no BUS_ERROR irq bits. */ + s->interrupts = (value >> 1) & 1; + s->dual = value & 1; + break; + + case 0x302: /* SYS_DMA_LCD_TOP_F1_L */ + s->src_f1_top &= 0xffff0000; + s->src_f1_top |= 0x0000ffff & value; + break; + + case 0x304: /* SYS_DMA_LCD_TOP_F1_U */ + s->src_f1_top &= 0x0000ffff; + s->src_f1_top |= (uint32_t)value << 16; + break; + + case 0x306: /* SYS_DMA_LCD_BOT_F1_L */ + s->src_f1_bottom &= 0xffff0000; + s->src_f1_bottom |= 0x0000ffff & value; + break; + + case 0x308: /* SYS_DMA_LCD_BOT_F1_U */ + s->src_f1_bottom &= 0x0000ffff; + s->src_f1_bottom |= (uint32_t)value << 16; + break; + + case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */ + s->src_f2_top &= 0xffff0000; + s->src_f2_top |= 0x0000ffff & value; + break; + + case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */ + s->src_f2_top &= 0x0000ffff; + s->src_f2_top |= (uint32_t)value << 16; + break; + + case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */ + s->src_f2_bottom &= 0xffff0000; + s->src_f2_bottom |= 0x0000ffff & value; + break; + + case 0x310: /* SYS_DMA_LCD_BOT_F2_U */ + s->src_f2_bottom &= 0x0000ffff; + s->src_f2_bottom |= (uint32_t)value << 16; + break; + + default: + return 1; + } + return 0; +} + +static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset, + uint16_t *ret) +{ + int i; + + switch (offset) { + case 0x300: /* SYS_DMA_LCD_CTRL */ + i = s->condition; + s->condition = 0; + qemu_irq_lower(s->irq); + *ret = ((s->src == imif) << 6) | (i << 3) | + (s->interrupts << 1) | s->dual; + break; + + case 0x302: /* SYS_DMA_LCD_TOP_F1_L */ + *ret = s->src_f1_top & 0xffff; + break; + + case 0x304: /* SYS_DMA_LCD_TOP_F1_U */ + *ret = s->src_f1_top >> 16; + break; + + case 0x306: /* SYS_DMA_LCD_BOT_F1_L */ + *ret = s->src_f1_bottom & 0xffff; + break; + + case 0x308: /* SYS_DMA_LCD_BOT_F1_U */ + *ret = s->src_f1_bottom >> 16; + break; + + case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */ + *ret = s->src_f2_top & 0xffff; + break; + + case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */ + *ret = s->src_f2_top >> 16; + break; + + case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */ + *ret = s->src_f2_bottom & 0xffff; + break; + + case 0x310: /* SYS_DMA_LCD_BOT_F2_U */ + *ret = s->src_f2_bottom >> 16; + break; + + default: + return 1; + } + return 0; +} + +static int omap_dma_sys_write(struct omap_dma_s *s, int offset, uint16_t value) +{ + switch (offset) { + case 0x400: /* SYS_DMA_GCR */ + s->gcr = value; + break; + + case 0x404: /* DMA_GSCR */ + if (value & 0x8) + omap_dma_disable_3_1_mapping(s); + else + omap_dma_enable_3_1_mapping(s); + break; + + case 0x408: /* DMA_GRST */ + if (value & 0x1) + omap_dma_reset(s->dma); + break; + + default: + return 1; + } + return 0; +} + +static int omap_dma_sys_read(struct omap_dma_s *s, int offset, + uint16_t *ret) +{ + switch (offset) { + case 0x400: /* SYS_DMA_GCR */ + *ret = s->gcr; + break; + + case 0x404: /* DMA_GSCR */ + *ret = s->omap_3_1_mapping_disabled << 3; + break; + + case 0x408: /* DMA_GRST */ + *ret = 0; + break; + + case 0x442: /* DMA_HW_ID */ + case 0x444: /* DMA_PCh2_ID */ + case 0x446: /* DMA_PCh0_ID */ + case 0x448: /* DMA_PCh1_ID */ + case 0x44a: /* DMA_PChG_ID */ + case 0x44c: /* DMA_PChD_ID */ + *ret = 1; + break; + + case 0x44e: /* DMA_CAPS_0_U */ + *ret = (s->caps[0] >> 16) & 0xffff; + break; + case 0x450: /* DMA_CAPS_0_L */ + *ret = (s->caps[0] >> 0) & 0xffff; + break; + + case 0x452: /* DMA_CAPS_1_U */ + *ret = (s->caps[1] >> 16) & 0xffff; + break; + case 0x454: /* DMA_CAPS_1_L */ + *ret = (s->caps[1] >> 0) & 0xffff; + break; + + case 0x456: /* DMA_CAPS_2 */ + *ret = s->caps[2]; + break; + + case 0x458: /* DMA_CAPS_3 */ + *ret = s->caps[3]; + break; + + case 0x45a: /* DMA_CAPS_4 */ + *ret = s->caps[4]; + break; + + case 0x460: /* DMA_PCh2_SR */ + case 0x480: /* DMA_PCh0_SR */ + case 0x482: /* DMA_PCh1_SR */ + case 0x4c0: /* DMA_PChD_SR_0 */ + qemu_log_mask(LOG_UNIMP, + "%s: Physical Channel Status Registers not implemented\n", + __func__); + *ret = 0xff; + break; + + default: + return 1; + } + return 0; +} + +static uint64_t omap_dma_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_dma_s *s = (struct omap_dma_s *) opaque; + int reg, ch; + uint16_t ret; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (addr) { + case 0x300 ... 0x3fe: + if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) { + if (omap_dma_3_1_lcd_read(&s->lcd_ch, addr, &ret)) + break; + return ret; + } + /* Fall through. */ + case 0x000 ... 0x2fe: + reg = addr & 0x3f; + ch = (addr >> 6) & 0x0f; + if (omap_dma_ch_reg_read(s, &s->ch[ch], reg, &ret)) + break; + return ret; + + case 0x404 ... 0x4fe: + if (s->model <= omap_dma_3_1) + break; + /* Fall through. */ + case 0x400: + if (omap_dma_sys_read(s, addr, &ret)) + break; + return ret; + + case 0xb00 ... 0xbfe: + if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) { + if (omap_dma_3_2_lcd_read(&s->lcd_ch, addr, &ret)) + break; + return ret; + } + break; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_dma_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_dma_s *s = (struct omap_dma_s *) opaque; + int reg, ch; + + if (size != 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (addr) { + case 0x300 ... 0x3fe: + if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) { + if (omap_dma_3_1_lcd_write(&s->lcd_ch, addr, value)) + break; + return; + } + /* Fall through. */ + case 0x000 ... 0x2fe: + reg = addr & 0x3f; + ch = (addr >> 6) & 0x0f; + if (omap_dma_ch_reg_write(s, &s->ch[ch], reg, value)) + break; + return; + + case 0x404 ... 0x4fe: + if (s->model <= omap_dma_3_1) + break; + /* fall through */ + case 0x400: + if (omap_dma_sys_write(s, addr, value)) + break; + return; + + case 0xb00 ... 0xbfe: + if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) { + if (omap_dma_3_2_lcd_write(&s->lcd_ch, addr, value)) + break; + return; + } + break; + } + + OMAP_BAD_REG(addr); +} + +static const MemoryRegionOps omap_dma_ops = { + .read = omap_dma_read, + .write = omap_dma_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_dma_request(void *opaque, int drq, int req) +{ + struct omap_dma_s *s = (struct omap_dma_s *) opaque; + /* The request pins are level triggered in QEMU. */ + if (req) { + if (~s->dma->drqbmp & (1ULL << drq)) { + s->dma->drqbmp |= 1ULL << drq; + omap_dma_process_request(s, drq); + } + } else + s->dma->drqbmp &= ~(1ULL << drq); +} + +/* XXX: this won't be needed once soc_dma knows about clocks. */ +static void omap_dma_clk_update(void *opaque, int line, int on) +{ + struct omap_dma_s *s = (struct omap_dma_s *) opaque; + int i; + + s->dma->freq = omap_clk_getrate(s->clk); + + for (i = 0; i < s->chans; i ++) + if (s->ch[i].active) + soc_dma_set_request(s->ch[i].dma, on); +} + +static void omap_dma_setcaps(struct omap_dma_s *s) +{ + switch (s->model) { + default: + case omap_dma_3_1: + break; + case omap_dma_3_2: + case omap_dma_4: + /* XXX Only available for sDMA */ + s->caps[0] = + (1 << 19) | /* Constant Fill Capability */ + (1 << 18); /* Transparent BLT Capability */ + s->caps[1] = + (1 << 1); /* 1-bit palettized capability (DMA 3.2 only) */ + s->caps[2] = + (1 << 8) | /* SEPARATE_SRC_AND_DST_INDEX_CPBLTY */ + (1 << 7) | /* DST_DOUBLE_INDEX_ADRS_CPBLTY */ + (1 << 6) | /* DST_SINGLE_INDEX_ADRS_CPBLTY */ + (1 << 5) | /* DST_POST_INCRMNT_ADRS_CPBLTY */ + (1 << 4) | /* DST_CONST_ADRS_CPBLTY */ + (1 << 3) | /* SRC_DOUBLE_INDEX_ADRS_CPBLTY */ + (1 << 2) | /* SRC_SINGLE_INDEX_ADRS_CPBLTY */ + (1 << 1) | /* SRC_POST_INCRMNT_ADRS_CPBLTY */ + (1 << 0); /* SRC_CONST_ADRS_CPBLTY */ + s->caps[3] = + (1 << 6) | /* BLOCK_SYNCHR_CPBLTY (DMA 4 only) */ + (1 << 7) | /* PKT_SYNCHR_CPBLTY (DMA 4 only) */ + (1 << 5) | /* CHANNEL_CHAINING_CPBLTY */ + (1 << 4) | /* LCh_INTERLEAVE_CPBLTY */ + (1 << 3) | /* AUTOINIT_REPEAT_CPBLTY (DMA 3.2 only) */ + (1 << 2) | /* AUTOINIT_ENDPROG_CPBLTY (DMA 3.2 only) */ + (1 << 1) | /* FRAME_SYNCHR_CPBLTY */ + (1 << 0); /* ELMNT_SYNCHR_CPBLTY */ + s->caps[4] = + (1 << 7) | /* PKT_INTERRUPT_CPBLTY (DMA 4 only) */ + (1 << 6) | /* SYNC_STATUS_CPBLTY */ + (1 << 5) | /* BLOCK_INTERRUPT_CPBLTY */ + (1 << 4) | /* LAST_FRAME_INTERRUPT_CPBLTY */ + (1 << 3) | /* FRAME_INTERRUPT_CPBLTY */ + (1 << 2) | /* HALF_FRAME_INTERRUPT_CPBLTY */ + (1 << 1) | /* EVENT_DROP_INTERRUPT_CPBLTY */ + (1 << 0); /* TIMEOUT_INTERRUPT_CPBLTY (DMA 3.2 only) */ + break; + } +} + +struct soc_dma_s *omap_dma_init(hwaddr base, qemu_irq *irqs, + MemoryRegion *sysmem, + qemu_irq lcd_irq, struct omap_mpu_state_s *mpu, omap_clk clk, + enum omap_dma_model model) +{ + int num_irqs, memsize, i; + struct omap_dma_s *s = g_new0(struct omap_dma_s, 1); + + if (model <= omap_dma_3_1) { + num_irqs = 6; + memsize = 0x800; + } else { + num_irqs = 16; + memsize = 0xc00; + } + s->model = model; + s->mpu = mpu; + s->clk = clk; + s->lcd_ch.irq = lcd_irq; + s->lcd_ch.mpu = mpu; + + s->dma = soc_dma_init((model <= omap_dma_3_1) ? 9 : 16); + s->dma->freq = omap_clk_getrate(clk); + s->dma->transfer_fn = omap_dma_transfer_generic; + s->dma->setup_fn = omap_dma_transfer_setup; + s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 32); + s->dma->opaque = s; + + while (num_irqs --) + s->ch[num_irqs].irq = irqs[num_irqs]; + for (i = 0; i < 3; i ++) { + s->ch[i].sibling = &s->ch[i + 6]; + s->ch[i + 6].sibling = &s->ch[i]; + } + for (i = (model <= omap_dma_3_1) ? 8 : 15; i >= 0; i --) { + s->ch[i].dma = &s->dma->ch[i]; + s->dma->ch[i].opaque = &s->ch[i]; + } + + omap_dma_setcaps(s); + omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0)); + omap_dma_reset(s->dma); + omap_dma_clk_update(s, 0, 1); + + memory_region_init_io(&s->iomem, NULL, &omap_dma_ops, s, "omap.dma", memsize); + memory_region_add_subregion(sysmem, base, &s->iomem); + + mpu->drq = s->dma->drq; + + return s->dma; +} + +static void omap_dma_interrupts_4_update(struct omap_dma_s *s) +{ + struct omap_dma_channel_s *ch = s->ch; + uint32_t bmp, bit; + + for (bmp = 0, bit = 1; bit; ch ++, bit <<= 1) + if (ch->status) { + bmp |= bit; + ch->cstatus |= ch->status; + ch->status = 0; + } + if ((s->irqstat[0] |= s->irqen[0] & bmp)) + qemu_irq_raise(s->irq[0]); + if ((s->irqstat[1] |= s->irqen[1] & bmp)) + qemu_irq_raise(s->irq[1]); + if ((s->irqstat[2] |= s->irqen[2] & bmp)) + qemu_irq_raise(s->irq[2]); + if ((s->irqstat[3] |= s->irqen[3] & bmp)) + qemu_irq_raise(s->irq[3]); +} + +static uint64_t omap_dma4_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_dma_s *s = (struct omap_dma_s *) opaque; + int irqn = 0, chnum; + struct omap_dma_channel_s *ch; + + if (size == 1) { + return omap_badwidth_read16(opaque, addr); + } + + switch (addr) { + case 0x00: /* DMA4_REVISION */ + return 0x40; + + case 0x14: /* DMA4_IRQSTATUS_L3 */ + irqn ++; + /* fall through */ + case 0x10: /* DMA4_IRQSTATUS_L2 */ + irqn ++; + /* fall through */ + case 0x0c: /* DMA4_IRQSTATUS_L1 */ + irqn ++; + /* fall through */ + case 0x08: /* DMA4_IRQSTATUS_L0 */ + return s->irqstat[irqn]; + + case 0x24: /* DMA4_IRQENABLE_L3 */ + irqn ++; + /* fall through */ + case 0x20: /* DMA4_IRQENABLE_L2 */ + irqn ++; + /* fall through */ + case 0x1c: /* DMA4_IRQENABLE_L1 */ + irqn ++; + /* fall through */ + case 0x18: /* DMA4_IRQENABLE_L0 */ + return s->irqen[irqn]; + + case 0x28: /* DMA4_SYSSTATUS */ + return 1; /* RESETDONE */ + + case 0x2c: /* DMA4_OCP_SYSCONFIG */ + return s->ocp; + + case 0x64: /* DMA4_CAPS_0 */ + return s->caps[0]; + case 0x6c: /* DMA4_CAPS_2 */ + return s->caps[2]; + case 0x70: /* DMA4_CAPS_3 */ + return s->caps[3]; + case 0x74: /* DMA4_CAPS_4 */ + return s->caps[4]; + + case 0x78: /* DMA4_GCR */ + return s->gcr; + + case 0x80 ... 0xfff: + addr -= 0x80; + chnum = addr / 0x60; + ch = s->ch + chnum; + addr -= chnum * 0x60; + break; + + default: + OMAP_BAD_REG(addr); + return 0; + } + + /* Per-channel registers */ + switch (addr) { + case 0x00: /* DMA4_CCR */ + return (ch->buf_disable << 25) | + (ch->src_sync << 24) | + (ch->prefetch << 23) | + ((ch->sync & 0x60) << 14) | + (ch->bs << 18) | + (ch->transparent_copy << 17) | + (ch->constant_fill << 16) | + (ch->mode[1] << 14) | + (ch->mode[0] << 12) | + (0 << 10) | (0 << 9) | + (ch->suspend << 8) | + (ch->enable << 7) | + (ch->priority << 6) | + (ch->fs << 5) | (ch->sync & 0x1f); + + case 0x04: /* DMA4_CLNK_CTRL */ + return (ch->link_enabled << 15) | ch->link_next_ch; + + case 0x08: /* DMA4_CICR */ + return ch->interrupts; + + case 0x0c: /* DMA4_CSR */ + return ch->cstatus; + + case 0x10: /* DMA4_CSDP */ + return (ch->endian[0] << 21) | + (ch->endian_lock[0] << 20) | + (ch->endian[1] << 19) | + (ch->endian_lock[1] << 18) | + (ch->write_mode << 16) | + (ch->burst[1] << 14) | + (ch->pack[1] << 13) | + (ch->translate[1] << 9) | + (ch->burst[0] << 7) | + (ch->pack[0] << 6) | + (ch->translate[0] << 2) | + (ch->data_type >> 1); + + case 0x14: /* DMA4_CEN */ + return ch->elements; + + case 0x18: /* DMA4_CFN */ + return ch->frames; + + case 0x1c: /* DMA4_CSSA */ + return ch->addr[0]; + + case 0x20: /* DMA4_CDSA */ + return ch->addr[1]; + + case 0x24: /* DMA4_CSEI */ + return ch->element_index[0]; + + case 0x28: /* DMA4_CSFI */ + return ch->frame_index[0]; + + case 0x2c: /* DMA4_CDEI */ + return ch->element_index[1]; + + case 0x30: /* DMA4_CDFI */ + return ch->frame_index[1]; + + case 0x34: /* DMA4_CSAC */ + return ch->active_set.src & 0xffff; + + case 0x38: /* DMA4_CDAC */ + return ch->active_set.dest & 0xffff; + + case 0x3c: /* DMA4_CCEN */ + return ch->active_set.element; + + case 0x40: /* DMA4_CCFN */ + return ch->active_set.frame; + + case 0x44: /* DMA4_COLOR */ + /* XXX only in sDMA */ + return ch->color; + + default: + OMAP_BAD_REG(addr); + return 0; + } +} + +static void omap_dma4_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_dma_s *s = (struct omap_dma_s *) opaque; + int chnum, irqn = 0; + struct omap_dma_channel_s *ch; + + if (size == 1) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (addr) { + case 0x14: /* DMA4_IRQSTATUS_L3 */ + irqn ++; + /* fall through */ + case 0x10: /* DMA4_IRQSTATUS_L2 */ + irqn ++; + /* fall through */ + case 0x0c: /* DMA4_IRQSTATUS_L1 */ + irqn ++; + /* fall through */ + case 0x08: /* DMA4_IRQSTATUS_L0 */ + s->irqstat[irqn] &= ~value; + if (!s->irqstat[irqn]) + qemu_irq_lower(s->irq[irqn]); + return; + + case 0x24: /* DMA4_IRQENABLE_L3 */ + irqn ++; + /* fall through */ + case 0x20: /* DMA4_IRQENABLE_L2 */ + irqn ++; + /* fall through */ + case 0x1c: /* DMA4_IRQENABLE_L1 */ + irqn ++; + /* fall through */ + case 0x18: /* DMA4_IRQENABLE_L0 */ + s->irqen[irqn] = value; + return; + + case 0x2c: /* DMA4_OCP_SYSCONFIG */ + if (value & 2) /* SOFTRESET */ + omap_dma_reset(s->dma); + s->ocp = value & 0x3321; + if (((s->ocp >> 12) & 3) == 3) { /* MIDLEMODE */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA power mode\n", + __func__); + } + return; + + case 0x78: /* DMA4_GCR */ + s->gcr = value & 0x00ff00ff; + if ((value & 0xff) == 0x00) { /* MAX_CHANNEL_FIFO_DEPTH */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: wrong FIFO depth in GCR\n", + __func__); + } + return; + + case 0x80 ... 0xfff: + addr -= 0x80; + chnum = addr / 0x60; + ch = s->ch + chnum; + addr -= chnum * 0x60; + break; + + case 0x00: /* DMA4_REVISION */ + case 0x28: /* DMA4_SYSSTATUS */ + case 0x64: /* DMA4_CAPS_0 */ + case 0x6c: /* DMA4_CAPS_2 */ + case 0x70: /* DMA4_CAPS_3 */ + case 0x74: /* DMA4_CAPS_4 */ + OMAP_RO_REG(addr); + return; + + default: + OMAP_BAD_REG(addr); + return; + } + + /* Per-channel registers */ + switch (addr) { + case 0x00: /* DMA4_CCR */ + ch->buf_disable = (value >> 25) & 1; + ch->src_sync = (value >> 24) & 1; /* XXX For CamDMA must be 1 */ + if (ch->buf_disable && !ch->src_sync) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Buffering disable is not allowed in " + "destination synchronised mode\n", __func__); + } + ch->prefetch = (value >> 23) & 1; + ch->bs = (value >> 18) & 1; + ch->transparent_copy = (value >> 17) & 1; + ch->constant_fill = (value >> 16) & 1; + ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14); + ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12); + ch->suspend = (value & 0x0100) >> 8; + ch->priority = (value & 0x0040) >> 6; + ch->fs = (value & 0x0020) >> 5; + if (ch->fs && ch->bs && ch->mode[0] && ch->mode[1]) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: For a packet transfer at least one port " + "must be constant-addressed\n", __func__); + } + ch->sync = (value & 0x001f) | ((value >> 14) & 0x0060); + /* XXX must be 0x01 for CamDMA */ + + if (value & 0x0080) + omap_dma_enable_channel(s, ch); + else + omap_dma_disable_channel(s, ch); + + break; + + case 0x04: /* DMA4_CLNK_CTRL */ + ch->link_enabled = (value >> 15) & 0x1; + ch->link_next_ch = value & 0x1f; + break; + + case 0x08: /* DMA4_CICR */ + ch->interrupts = value & 0x09be; + break; + + case 0x0c: /* DMA4_CSR */ + ch->cstatus &= ~value; + break; + + case 0x10: /* DMA4_CSDP */ + ch->endian[0] =(value >> 21) & 1; + ch->endian_lock[0] =(value >> 20) & 1; + ch->endian[1] =(value >> 19) & 1; + ch->endian_lock[1] =(value >> 18) & 1; + if (ch->endian[0] != ch->endian[1]) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: DMA endianness conversion enable attempt\n", + __func__); + } + ch->write_mode = (value >> 16) & 3; + ch->burst[1] = (value & 0xc000) >> 14; + ch->pack[1] = (value & 0x2000) >> 13; + ch->translate[1] = (value & 0x1e00) >> 9; + ch->burst[0] = (value & 0x0180) >> 7; + ch->pack[0] = (value & 0x0040) >> 6; + ch->translate[0] = (value & 0x003c) >> 2; + if (ch->translate[0] | ch->translate[1]) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad MReqAddressTranslate sideband signal\n", + __func__); + } + ch->data_type = 1 << (value & 3); + if ((value & 3) == 3) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad data_type for DMA channel\n", __func__); + ch->data_type >>= 1; + } + break; + + case 0x14: /* DMA4_CEN */ + ch->set_update = 1; + ch->elements = value & 0xffffff; + break; + + case 0x18: /* DMA4_CFN */ + ch->frames = value & 0xffff; + ch->set_update = 1; + break; + + case 0x1c: /* DMA4_CSSA */ + ch->addr[0] = (hwaddr) (uint32_t) value; + ch->set_update = 1; + break; + + case 0x20: /* DMA4_CDSA */ + ch->addr[1] = (hwaddr) (uint32_t) value; + ch->set_update = 1; + break; + + case 0x24: /* DMA4_CSEI */ + ch->element_index[0] = (int16_t) value; + ch->set_update = 1; + break; + + case 0x28: /* DMA4_CSFI */ + ch->frame_index[0] = (int32_t) value; + ch->set_update = 1; + break; + + case 0x2c: /* DMA4_CDEI */ + ch->element_index[1] = (int16_t) value; + ch->set_update = 1; + break; + + case 0x30: /* DMA4_CDFI */ + ch->frame_index[1] = (int32_t) value; + ch->set_update = 1; + break; + + case 0x44: /* DMA4_COLOR */ + /* XXX only in sDMA */ + ch->color = value; + break; + + case 0x34: /* DMA4_CSAC */ + case 0x38: /* DMA4_CDAC */ + case 0x3c: /* DMA4_CCEN */ + case 0x40: /* DMA4_CCFN */ + OMAP_RO_REG(addr); + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_dma4_ops = { + .read = omap_dma4_read, + .write = omap_dma4_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct soc_dma_s *omap_dma4_init(hwaddr base, qemu_irq *irqs, + MemoryRegion *sysmem, + struct omap_mpu_state_s *mpu, int fifo, + int chans, omap_clk iclk, omap_clk fclk) +{ + int i; + struct omap_dma_s *s = g_new0(struct omap_dma_s, 1); + + s->model = omap_dma_4; + s->chans = chans; + s->mpu = mpu; + s->clk = fclk; + + s->dma = soc_dma_init(s->chans); + s->dma->freq = omap_clk_getrate(fclk); + s->dma->transfer_fn = omap_dma_transfer_generic; + s->dma->setup_fn = omap_dma_transfer_setup; + s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 64); + s->dma->opaque = s; + for (i = 0; i < s->chans; i ++) { + s->ch[i].dma = &s->dma->ch[i]; + s->dma->ch[i].opaque = &s->ch[i]; + } + + memcpy(&s->irq, irqs, sizeof(s->irq)); + s->intr_update = omap_dma_interrupts_4_update; + + omap_dma_setcaps(s); + omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0)); + omap_dma_reset(s->dma); + omap_dma_clk_update(s, 0, !!s->dma->freq); + + memory_region_init_io(&s->iomem, NULL, &omap_dma4_ops, s, "omap.dma4", 0x1000); + memory_region_add_subregion(sysmem, base, &s->iomem); + + mpu->drq = s->dma->drq; + + return s->dma; +} + +struct omap_dma_lcd_channel_s *omap_dma_get_lcdch(struct soc_dma_s *dma) +{ + struct omap_dma_s *s = dma->opaque; + + return &s->lcd_ch; +} diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c new file mode 100644 index 000000000..2627307cc --- /dev/null +++ b/hw/dma/pl080.c @@ -0,0 +1,449 @@ +/* + * Arm PrimeCell PL080/PL081 DMA controller + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/dma/pl080.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" + +#define PL080_CONF_E 0x1 +#define PL080_CONF_M1 0x2 +#define PL080_CONF_M2 0x4 + +#define PL080_CCONF_H 0x40000 +#define PL080_CCONF_A 0x20000 +#define PL080_CCONF_L 0x10000 +#define PL080_CCONF_ITC 0x08000 +#define PL080_CCONF_IE 0x04000 +#define PL080_CCONF_E 0x00001 + +#define PL080_CCTRL_I 0x80000000 +#define PL080_CCTRL_DI 0x08000000 +#define PL080_CCTRL_SI 0x04000000 +#define PL080_CCTRL_D 0x02000000 +#define PL080_CCTRL_S 0x01000000 + +static const VMStateDescription vmstate_pl080_channel = { + .name = "pl080_channel", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(src, pl080_channel), + VMSTATE_UINT32(dest, pl080_channel), + VMSTATE_UINT32(lli, pl080_channel), + VMSTATE_UINT32(ctrl, pl080_channel), + VMSTATE_UINT32(conf, pl080_channel), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pl080 = { + .name = "pl080", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(tc_int, PL080State), + VMSTATE_UINT8(tc_mask, PL080State), + VMSTATE_UINT8(err_int, PL080State), + VMSTATE_UINT8(err_mask, PL080State), + VMSTATE_UINT32(conf, PL080State), + VMSTATE_UINT32(sync, PL080State), + VMSTATE_UINT32(req_single, PL080State), + VMSTATE_UINT32(req_burst, PL080State), + VMSTATE_UINT8(tc_int, PL080State), + VMSTATE_UINT8(tc_int, PL080State), + VMSTATE_UINT8(tc_int, PL080State), + VMSTATE_STRUCT_ARRAY(chan, PL080State, PL080_MAX_CHANNELS, + 1, vmstate_pl080_channel, pl080_channel), + VMSTATE_INT32(running, PL080State), + VMSTATE_END_OF_LIST() + } +}; + +static const unsigned char pl080_id[] = +{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 }; + +static const unsigned char pl081_id[] = +{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 }; + +static void pl080_update(PL080State *s) +{ + bool tclevel = (s->tc_int & s->tc_mask); + bool errlevel = (s->err_int & s->err_mask); + + qemu_set_irq(s->interr, errlevel); + qemu_set_irq(s->inttc, tclevel); + qemu_set_irq(s->irq, errlevel || tclevel); +} + +static void pl080_run(PL080State *s) +{ + int c; + int flow; + pl080_channel *ch; + int swidth; + int dwidth; + int xsize; + int n; + int src_id; + int dest_id; + int size; + uint8_t buff[4]; + uint32_t req; + + s->tc_mask = 0; + for (c = 0; c < s->nchannels; c++) { + if (s->chan[c].conf & PL080_CCONF_ITC) + s->tc_mask |= 1 << c; + if (s->chan[c].conf & PL080_CCONF_IE) + s->err_mask |= 1 << c; + } + + if ((s->conf & PL080_CONF_E) == 0) + return; + + /* If we are already in the middle of a DMA operation then indicate that + there may be new DMA requests and return immediately. */ + if (s->running) { + s->running++; + return; + } + s->running = 1; + while (s->running) { + for (c = 0; c < s->nchannels; c++) { + ch = &s->chan[c]; +again: + /* Test if thiws channel has any pending DMA requests. */ + if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E)) + != PL080_CCONF_E) + continue; + flow = (ch->conf >> 11) & 7; + if (flow >= 4) { + hw_error( + "pl080_run: Peripheral flow control not implemented\n"); + } + src_id = (ch->conf >> 1) & 0x1f; + dest_id = (ch->conf >> 6) & 0x1f; + size = ch->ctrl & 0xfff; + req = s->req_single | s->req_burst; + switch (flow) { + case 0: + break; + case 1: + if ((req & (1u << dest_id)) == 0) + size = 0; + break; + case 2: + if ((req & (1u << src_id)) == 0) + size = 0; + break; + case 3: + if ((req & (1u << src_id)) == 0 + || (req & (1u << dest_id)) == 0) + size = 0; + break; + } + if (!size) + continue; + + /* Transfer one element. */ + /* ??? Should transfer multiple elements for a burst request. */ + /* ??? Unclear what the proper behavior is when source and + destination widths are different. */ + swidth = 1 << ((ch->ctrl >> 18) & 7); + dwidth = 1 << ((ch->ctrl >> 21) & 7); + for (n = 0; n < dwidth; n+= swidth) { + address_space_read(&s->downstream_as, ch->src, + MEMTXATTRS_UNSPECIFIED, buff + n, swidth); + if (ch->ctrl & PL080_CCTRL_SI) + ch->src += swidth; + } + xsize = (dwidth < swidth) ? swidth : dwidth; + /* ??? This may pad the value incorrectly for dwidth < 32. */ + for (n = 0; n < xsize; n += dwidth) { + address_space_write(&s->downstream_as, ch->dest + n, + MEMTXATTRS_UNSPECIFIED, buff + n, dwidth); + if (ch->ctrl & PL080_CCTRL_DI) + ch->dest += swidth; + } + + size--; + ch->ctrl = (ch->ctrl & 0xfffff000) | size; + if (size == 0) { + /* Transfer complete. */ + if (ch->lli) { + ch->src = address_space_ldl_le(&s->downstream_as, + ch->lli, + MEMTXATTRS_UNSPECIFIED, + NULL); + ch->dest = address_space_ldl_le(&s->downstream_as, + ch->lli + 4, + MEMTXATTRS_UNSPECIFIED, + NULL); + ch->ctrl = address_space_ldl_le(&s->downstream_as, + ch->lli + 12, + MEMTXATTRS_UNSPECIFIED, + NULL); + ch->lli = address_space_ldl_le(&s->downstream_as, + ch->lli + 8, + MEMTXATTRS_UNSPECIFIED, + NULL); + } else { + ch->conf &= ~PL080_CCONF_E; + } + if (ch->ctrl & PL080_CCTRL_I) { + s->tc_int |= 1 << c; + } + } + goto again; + } + if (--s->running) + s->running = 1; + } +} + +static uint64_t pl080_read(void *opaque, hwaddr offset, + unsigned size) +{ + PL080State *s = (PL080State *)opaque; + uint32_t i; + uint32_t mask; + + if (offset >= 0xfe0 && offset < 0x1000) { + if (s->nchannels == 8) { + return pl080_id[(offset - 0xfe0) >> 2]; + } else { + return pl081_id[(offset - 0xfe0) >> 2]; + } + } + if (offset >= 0x100 && offset < 0x200) { + i = (offset & 0xe0) >> 5; + if (i >= s->nchannels) + goto bad_offset; + switch ((offset >> 2) & 7) { + case 0: /* SrcAddr */ + return s->chan[i].src; + case 1: /* DestAddr */ + return s->chan[i].dest; + case 2: /* LLI */ + return s->chan[i].lli; + case 3: /* Control */ + return s->chan[i].ctrl; + case 4: /* Configuration */ + return s->chan[i].conf; + default: + goto bad_offset; + } + } + switch (offset >> 2) { + case 0: /* IntStatus */ + return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask); + case 1: /* IntTCStatus */ + return (s->tc_int & s->tc_mask); + case 3: /* IntErrorStatus */ + return (s->err_int & s->err_mask); + case 5: /* RawIntTCStatus */ + return s->tc_int; + case 6: /* RawIntErrorStatus */ + return s->err_int; + case 7: /* EnbldChns */ + mask = 0; + for (i = 0; i < s->nchannels; i++) { + if (s->chan[i].conf & PL080_CCONF_E) + mask |= 1 << i; + } + return mask; + case 8: /* SoftBReq */ + case 9: /* SoftSReq */ + case 10: /* SoftLBReq */ + case 11: /* SoftLSReq */ + /* ??? Implement these. */ + return 0; + case 12: /* Configuration */ + return s->conf; + case 13: /* Sync */ + return s->sync; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "pl080_read: Bad offset %x\n", (int)offset); + return 0; + } +} + +static void pl080_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PL080State *s = (PL080State *)opaque; + int i; + + if (offset >= 0x100 && offset < 0x200) { + i = (offset & 0xe0) >> 5; + if (i >= s->nchannels) + goto bad_offset; + switch ((offset >> 2) & 7) { + case 0: /* SrcAddr */ + s->chan[i].src = value; + break; + case 1: /* DestAddr */ + s->chan[i].dest = value; + break; + case 2: /* LLI */ + s->chan[i].lli = value; + break; + case 3: /* Control */ + s->chan[i].ctrl = value; + break; + case 4: /* Configuration */ + s->chan[i].conf = value; + pl080_run(s); + break; + } + return; + } + switch (offset >> 2) { + case 2: /* IntTCClear */ + s->tc_int &= ~value; + break; + case 4: /* IntErrorClear */ + s->err_int &= ~value; + break; + case 8: /* SoftBReq */ + case 9: /* SoftSReq */ + case 10: /* SoftLBReq */ + case 11: /* SoftLSReq */ + /* ??? Implement these. */ + qemu_log_mask(LOG_UNIMP, "pl080_write: Soft DMA not implemented\n"); + break; + case 12: /* Configuration */ + s->conf = value; + if (s->conf & (PL080_CONF_M1 | PL080_CONF_M2)) { + qemu_log_mask(LOG_UNIMP, + "pl080_write: Big-endian DMA not implemented\n"); + } + pl080_run(s); + break; + case 13: /* Sync */ + s->sync = value; + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "pl080_write: Bad offset %x\n", (int)offset); + } + pl080_update(s); +} + +static const MemoryRegionOps pl080_ops = { + .read = pl080_read, + .write = pl080_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pl080_reset(DeviceState *dev) +{ + PL080State *s = PL080(dev); + int i; + + s->tc_int = 0; + s->tc_mask = 0; + s->err_int = 0; + s->err_mask = 0; + s->conf = 0; + s->sync = 0; + s->req_single = 0; + s->req_burst = 0; + s->running = 0; + + for (i = 0; i < s->nchannels; i++) { + s->chan[i].src = 0; + s->chan[i].dest = 0; + s->chan[i].lli = 0; + s->chan[i].ctrl = 0; + s->chan[i].conf = 0; + } +} + +static void pl080_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + PL080State *s = PL080(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &pl080_ops, s, "pl080", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->interr); + sysbus_init_irq(sbd, &s->inttc); + s->nchannels = 8; +} + +static void pl080_realize(DeviceState *dev, Error **errp) +{ + PL080State *s = PL080(dev); + + if (!s->downstream) { + error_setg(errp, "PL080 'downstream' link not set"); + return; + } + + address_space_init(&s->downstream_as, s->downstream, "pl080-downstream"); +} + +static void pl081_init(Object *obj) +{ + PL080State *s = PL080(obj); + + s->nchannels = 2; +} + +static Property pl080_properties[] = { + DEFINE_PROP_LINK("downstream", PL080State, downstream, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pl080_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->vmsd = &vmstate_pl080; + dc->realize = pl080_realize; + device_class_set_props(dc, pl080_properties); + dc->reset = pl080_reset; +} + +static const TypeInfo pl080_info = { + .name = TYPE_PL080, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL080State), + .instance_init = pl080_init, + .class_init = pl080_class_init, +}; + +static const TypeInfo pl081_info = { + .name = TYPE_PL081, + .parent = TYPE_PL080, + .instance_init = pl081_init, +}; + +/* The PL080 and PL081 are the same except for the number of channels + they implement (8 and 2 respectively). */ +static void pl080_register_types(void) +{ + type_register_static(&pl080_info); + type_register_static(&pl081_info); +} + +type_init(pl080_register_types) diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c new file mode 100644 index 000000000..0cb46191c --- /dev/null +++ b/hw/dma/pl330.c @@ -0,0 +1,1702 @@ +/* + * ARM PrimeCell PL330 DMA Controller + * + * Copyright (c) 2009 Samsung Electronics. + * Contributed by Kirill Batuzov + * Copyright (c) 2012 Peter A.G. Crosthwaite (peter.crosthwaite@petalogix.com) + * Copyright (c) 2012 PetaLogix Pty Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 or later. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "sysemu/dma.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" + +#ifndef PL330_ERR_DEBUG +#define PL330_ERR_DEBUG 0 +#endif + +#define PL330_PERIPH_NUM 32 +#define PL330_MAX_BURST_LEN 128 +#define PL330_INSN_MAXSIZE 6 + +#define PL330_FIFO_OK 0 +#define PL330_FIFO_STALL 1 +#define PL330_FIFO_ERR (-1) + +#define PL330_FAULT_UNDEF_INSTR (1 << 0) +#define PL330_FAULT_OPERAND_INVALID (1 << 1) +#define PL330_FAULT_DMAGO_ERR (1 << 4) +#define PL330_FAULT_EVENT_ERR (1 << 5) +#define PL330_FAULT_CH_PERIPH_ERR (1 << 6) +#define PL330_FAULT_CH_RDWR_ERR (1 << 7) +#define PL330_FAULT_ST_DATA_UNAVAILABLE (1 << 12) +#define PL330_FAULT_FIFOEMPTY_ERR (1 << 13) +#define PL330_FAULT_INSTR_FETCH_ERR (1 << 16) +#define PL330_FAULT_DATA_WRITE_ERR (1 << 17) +#define PL330_FAULT_DATA_READ_ERR (1 << 18) +#define PL330_FAULT_DBG_INSTR (1 << 30) +#define PL330_FAULT_LOCKUP_ERR (1 << 31) + +#define PL330_UNTAGGED 0xff + +#define PL330_SINGLE 0x0 +#define PL330_BURST 0x1 + +#define PL330_WATCHDOG_LIMIT 1024 + +/* IOMEM mapped registers */ +#define PL330_REG_DSR 0x000 +#define PL330_REG_DPC 0x004 +#define PL330_REG_INTEN 0x020 +#define PL330_REG_INT_EVENT_RIS 0x024 +#define PL330_REG_INTMIS 0x028 +#define PL330_REG_INTCLR 0x02C +#define PL330_REG_FSRD 0x030 +#define PL330_REG_FSRC 0x034 +#define PL330_REG_FTRD 0x038 +#define PL330_REG_FTR_BASE 0x040 +#define PL330_REG_CSR_BASE 0x100 +#define PL330_REG_CPC_BASE 0x104 +#define PL330_REG_CHANCTRL 0x400 +#define PL330_REG_DBGSTATUS 0xD00 +#define PL330_REG_DBGCMD 0xD04 +#define PL330_REG_DBGINST0 0xD08 +#define PL330_REG_DBGINST1 0xD0C +#define PL330_REG_CR0_BASE 0xE00 +#define PL330_REG_PERIPH_ID 0xFE0 + +#define PL330_IOMEM_SIZE 0x1000 + +#define CFG_BOOT_ADDR 2 +#define CFG_INS 3 +#define CFG_PNS 4 +#define CFG_CRD 5 + +static const uint32_t pl330_id[] = { + 0x30, 0x13, 0x24, 0x00, 0x0D, 0xF0, 0x05, 0xB1 +}; + +/* DMA channel states as they are described in PL330 Technical Reference Manual + * Most of them will not be used in emulation. + */ +typedef enum { + pl330_chan_stopped = 0, + pl330_chan_executing = 1, + pl330_chan_cache_miss = 2, + pl330_chan_updating_pc = 3, + pl330_chan_waiting_event = 4, + pl330_chan_at_barrier = 5, + pl330_chan_queue_busy = 6, + pl330_chan_waiting_periph = 7, + pl330_chan_killing = 8, + pl330_chan_completing = 9, + pl330_chan_fault_completing = 14, + pl330_chan_fault = 15, +} PL330ChanState; + +typedef struct PL330State PL330State; + +typedef struct PL330Chan { + uint32_t src; + uint32_t dst; + uint32_t pc; + uint32_t control; + uint32_t status; + uint32_t lc[2]; + uint32_t fault_type; + uint32_t watchdog_timer; + + bool ns; + uint8_t request_flag; + uint8_t wakeup; + uint8_t wfp_sbp; + + uint8_t state; + uint8_t stall; + + bool is_manager; + PL330State *parent; + uint8_t tag; +} PL330Chan; + +static const VMStateDescription vmstate_pl330_chan = { + .name = "pl330_chan", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(src, PL330Chan), + VMSTATE_UINT32(dst, PL330Chan), + VMSTATE_UINT32(pc, PL330Chan), + VMSTATE_UINT32(control, PL330Chan), + VMSTATE_UINT32(status, PL330Chan), + VMSTATE_UINT32_ARRAY(lc, PL330Chan, 2), + VMSTATE_UINT32(fault_type, PL330Chan), + VMSTATE_UINT32(watchdog_timer, PL330Chan), + VMSTATE_BOOL(ns, PL330Chan), + VMSTATE_UINT8(request_flag, PL330Chan), + VMSTATE_UINT8(wakeup, PL330Chan), + VMSTATE_UINT8(wfp_sbp, PL330Chan), + VMSTATE_UINT8(state, PL330Chan), + VMSTATE_UINT8(stall, PL330Chan), + VMSTATE_END_OF_LIST() + } +}; + +typedef struct PL330Fifo { + uint8_t *buf; + uint8_t *tag; + uint32_t head; + uint32_t num; + uint32_t buf_size; +} PL330Fifo; + +static const VMStateDescription vmstate_pl330_fifo = { + .name = "pl330_chan", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VBUFFER_UINT32(buf, PL330Fifo, 1, NULL, buf_size), + VMSTATE_VBUFFER_UINT32(tag, PL330Fifo, 1, NULL, buf_size), + VMSTATE_UINT32(head, PL330Fifo), + VMSTATE_UINT32(num, PL330Fifo), + VMSTATE_UINT32(buf_size, PL330Fifo), + VMSTATE_END_OF_LIST() + } +}; + +typedef struct PL330QueueEntry { + uint32_t addr; + uint32_t len; + uint8_t n; + bool inc; + bool z; + uint8_t tag; + uint8_t seqn; +} PL330QueueEntry; + +static const VMStateDescription vmstate_pl330_queue_entry = { + .name = "pl330_queue_entry", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(addr, PL330QueueEntry), + VMSTATE_UINT32(len, PL330QueueEntry), + VMSTATE_UINT8(n, PL330QueueEntry), + VMSTATE_BOOL(inc, PL330QueueEntry), + VMSTATE_BOOL(z, PL330QueueEntry), + VMSTATE_UINT8(tag, PL330QueueEntry), + VMSTATE_UINT8(seqn, PL330QueueEntry), + VMSTATE_END_OF_LIST() + } +}; + +typedef struct PL330Queue { + PL330State *parent; + PL330QueueEntry *queue; + uint32_t queue_size; +} PL330Queue; + +static const VMStateDescription vmstate_pl330_queue = { + .name = "pl330_queue", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(queue, PL330Queue, queue_size, + vmstate_pl330_queue_entry, + PL330QueueEntry), + VMSTATE_END_OF_LIST() + } +}; + +struct PL330State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq_abort; + qemu_irq *irq; + + /* Config registers. cfg[5] = CfgDn. */ + uint32_t cfg[6]; +#define EVENT_SEC_STATE 3 +#define PERIPH_SEC_STATE 4 + /* cfg 0 bits and pieces */ + uint32_t num_chnls; + uint8_t num_periph_req; + uint8_t num_events; + uint8_t mgr_ns_at_rst; + /* cfg 1 bits and pieces */ + uint8_t i_cache_len; + uint8_t num_i_cache_lines; + /* CRD bits and pieces */ + uint8_t data_width; + uint8_t wr_cap; + uint8_t wr_q_dep; + uint8_t rd_cap; + uint8_t rd_q_dep; + uint16_t data_buffer_dep; + + PL330Chan manager; + PL330Chan *chan; + PL330Fifo fifo; + PL330Queue read_queue; + PL330Queue write_queue; + uint8_t *lo_seqn; + uint8_t *hi_seqn; + QEMUTimer *timer; /* is used for restore dma. */ + + uint32_t inten; + uint32_t int_status; + uint32_t ev_status; + uint32_t dbg[2]; + uint8_t debug_status; + uint8_t num_faulting; + uint8_t periph_busy[PL330_PERIPH_NUM]; + + /* Memory region that DMA operation access */ + MemoryRegion *mem_mr; + AddressSpace *mem_as; +}; + +#define TYPE_PL330 "pl330" +OBJECT_DECLARE_SIMPLE_TYPE(PL330State, PL330) + +static const VMStateDescription vmstate_pl330 = { + .name = "pl330", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(manager, PL330State, 0, vmstate_pl330_chan, PL330Chan), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(chan, PL330State, num_chnls, + vmstate_pl330_chan, PL330Chan), + VMSTATE_VBUFFER_UINT32(lo_seqn, PL330State, 1, NULL, num_chnls), + VMSTATE_VBUFFER_UINT32(hi_seqn, PL330State, 1, NULL, num_chnls), + VMSTATE_STRUCT(fifo, PL330State, 0, vmstate_pl330_fifo, PL330Fifo), + VMSTATE_STRUCT(read_queue, PL330State, 0, vmstate_pl330_queue, + PL330Queue), + VMSTATE_STRUCT(write_queue, PL330State, 0, vmstate_pl330_queue, + PL330Queue), + VMSTATE_TIMER_PTR(timer, PL330State), + VMSTATE_UINT32(inten, PL330State), + VMSTATE_UINT32(int_status, PL330State), + VMSTATE_UINT32(ev_status, PL330State), + VMSTATE_UINT32_ARRAY(dbg, PL330State, 2), + VMSTATE_UINT8(debug_status, PL330State), + VMSTATE_UINT8(num_faulting, PL330State), + VMSTATE_UINT8_ARRAY(periph_busy, PL330State, PL330_PERIPH_NUM), + VMSTATE_END_OF_LIST() + } +}; + +typedef struct PL330InsnDesc { + /* OPCODE of the instruction */ + uint8_t opcode; + /* Mask so we can select several sibling instructions, such as + DMALD, DMALDS and DMALDB */ + uint8_t opmask; + /* Size of instruction in bytes */ + uint8_t size; + /* Interpreter */ + void (*exec)(PL330Chan *, uint8_t opcode, uint8_t *args, int len); +} PL330InsnDesc; + +static void pl330_hexdump(uint8_t *buf, size_t size) +{ + unsigned int b, i, len; + char tmpbuf[80]; + + for (b = 0; b < size; b += 16) { + len = size - b; + if (len > 16) { + len = 16; + } + tmpbuf[0] = '\0'; + for (i = 0; i < len; i++) { + if ((i % 4) == 0) { + strcat(tmpbuf, " "); + } + sprintf(tmpbuf + strlen(tmpbuf), " %02x", buf[b + i]); + } + trace_pl330_hexdump(b, tmpbuf); + } +} + +/* MFIFO Implementation + * + * MFIFO is implemented as a cyclic buffer of BUF_SIZE size. Tagged bytes are + * stored in this buffer. Data is stored in BUF field, tags - in the + * corresponding array elements of TAG field. + */ + +/* Initialize queue. */ + +static void pl330_fifo_init(PL330Fifo *s, uint32_t size) +{ + s->buf = g_malloc0(size); + s->tag = g_malloc0(size); + s->buf_size = size; +} + +/* Cyclic increment */ + +static inline int pl330_fifo_inc(PL330Fifo *s, int x) +{ + return (x + 1) % s->buf_size; +} + +/* Number of empty bytes in MFIFO */ + +static inline int pl330_fifo_num_free(PL330Fifo *s) +{ + return s->buf_size - s->num; +} + +/* Push LEN bytes of data stored in BUF to MFIFO and tag it with TAG. + * Zero returned on success, PL330_FIFO_STALL if there is no enough free + * space in MFIFO to store requested amount of data. If push was unsuccessful + * no data is stored to MFIFO. + */ + +static int pl330_fifo_push(PL330Fifo *s, uint8_t *buf, int len, uint8_t tag) +{ + int i; + + if (s->buf_size - s->num < len) { + return PL330_FIFO_STALL; + } + for (i = 0; i < len; i++) { + int push_idx = (s->head + s->num + i) % s->buf_size; + s->buf[push_idx] = buf[i]; + s->tag[push_idx] = tag; + } + s->num += len; + return PL330_FIFO_OK; +} + +/* Get LEN bytes of data from MFIFO and store it to BUF. Tag value of each + * byte is verified. Zero returned on success, PL330_FIFO_ERR on tag mismatch + * and PL330_FIFO_STALL if there is no enough data in MFIFO. If get was + * unsuccessful no data is removed from MFIFO. + */ + +static int pl330_fifo_get(PL330Fifo *s, uint8_t *buf, int len, uint8_t tag) +{ + int i; + + if (s->num < len) { + return PL330_FIFO_STALL; + } + for (i = 0; i < len; i++) { + if (s->tag[s->head] == tag) { + int get_idx = (s->head + i) % s->buf_size; + buf[i] = s->buf[get_idx]; + } else { /* Tag mismatch - Rollback transaction */ + return PL330_FIFO_ERR; + } + } + s->head = (s->head + len) % s->buf_size; + s->num -= len; + return PL330_FIFO_OK; +} + +/* Reset MFIFO. This completely erases all data in it. */ + +static inline void pl330_fifo_reset(PL330Fifo *s) +{ + s->head = 0; + s->num = 0; +} + +/* Return tag of the first byte stored in MFIFO. If MFIFO is empty + * PL330_UNTAGGED is returned. + */ + +static inline uint8_t pl330_fifo_tag(PL330Fifo *s) +{ + return (!s->num) ? PL330_UNTAGGED : s->tag[s->head]; +} + +/* Returns non-zero if tag TAG is present in fifo or zero otherwise */ + +static int pl330_fifo_has_tag(PL330Fifo *s, uint8_t tag) +{ + int i, n; + + i = s->head; + for (n = 0; n < s->num; n++) { + if (s->tag[i] == tag) { + return 1; + } + i = pl330_fifo_inc(s, i); + } + return 0; +} + +/* Remove all entry tagged with TAG from MFIFO */ + +static void pl330_fifo_tagged_remove(PL330Fifo *s, uint8_t tag) +{ + int i, t, n; + + t = i = s->head; + for (n = 0; n < s->num; n++) { + if (s->tag[i] != tag) { + s->buf[t] = s->buf[i]; + s->tag[t] = s->tag[i]; + t = pl330_fifo_inc(s, t); + } else { + s->num = s->num - 1; + } + i = pl330_fifo_inc(s, i); + } +} + +/* Read-Write Queue implementation + * + * A Read-Write Queue stores up to QUEUE_SIZE instructions (loads or stores). + * Each instruction is described by source (for loads) or destination (for + * stores) address ADDR, width of data to be loaded/stored LEN, number of + * stores/loads to be performed N, INC bit, Z bit and TAG to identify channel + * this instruction belongs to. Queue does not store any information about + * nature of the instruction: is it load or store. PL330 has different queues + * for loads and stores so this is already known at the top level where it + * matters. + * + * Queue works as FIFO for instructions with equivalent tags, but can issue + * instructions with different tags in arbitrary order. SEQN field attached to + * each instruction helps to achieve this. For each TAG queue contains + * instructions with consecutive SEQN values ranging from LO_SEQN[TAG] to + * HI_SEQN[TAG]-1 inclusive. SEQN is 8-bit unsigned integer, so SEQN=255 is + * followed by SEQN=0. + * + * Z bit indicates that zeroes should be stored. No MFIFO fetches are performed + * in this case. + */ + +static void pl330_queue_reset(PL330Queue *s) +{ + int i; + + for (i = 0; i < s->queue_size; i++) { + s->queue[i].tag = PL330_UNTAGGED; + } +} + +/* Initialize queue */ +static void pl330_queue_init(PL330Queue *s, int size, PL330State *parent) +{ + s->parent = parent; + s->queue = g_new0(PL330QueueEntry, size); + s->queue_size = size; +} + +/* Returns pointer to an empty slot or NULL if queue is full */ +static PL330QueueEntry *pl330_queue_find_empty(PL330Queue *s) +{ + int i; + + for (i = 0; i < s->queue_size; i++) { + if (s->queue[i].tag == PL330_UNTAGGED) { + return &s->queue[i]; + } + } + return NULL; +} + +/* Put instruction in queue. + * Return value: + * - zero - OK + * - non-zero - queue is full + */ + +static int pl330_queue_put_insn(PL330Queue *s, uint32_t addr, + int len, int n, bool inc, bool z, uint8_t tag) +{ + PL330QueueEntry *entry = pl330_queue_find_empty(s); + + if (!entry) { + return 1; + } + entry->tag = tag; + entry->addr = addr; + entry->len = len; + entry->n = n; + entry->z = z; + entry->inc = inc; + entry->seqn = s->parent->hi_seqn[tag]; + s->parent->hi_seqn[tag]++; + return 0; +} + +/* Returns a pointer to queue slot containing instruction which satisfies + * following conditions: + * - it has valid tag value (not PL330_UNTAGGED) + * - if enforce_seq is set it has to be issuable without violating queue + * logic (see above) + * - if TAG argument is not PL330_UNTAGGED this instruction has tag value + * equivalent to the argument TAG value. + * If such instruction cannot be found NULL is returned. + */ + +static PL330QueueEntry *pl330_queue_find_insn(PL330Queue *s, uint8_t tag, + bool enforce_seq) +{ + int i; + + for (i = 0; i < s->queue_size; i++) { + if (s->queue[i].tag != PL330_UNTAGGED) { + if ((!enforce_seq || + s->queue[i].seqn == s->parent->lo_seqn[s->queue[i].tag]) && + (s->queue[i].tag == tag || tag == PL330_UNTAGGED || + s->queue[i].z)) { + return &s->queue[i]; + } + } + } + return NULL; +} + +/* Removes instruction from queue. */ + +static inline void pl330_queue_remove_insn(PL330Queue *s, PL330QueueEntry *e) +{ + s->parent->lo_seqn[e->tag]++; + e->tag = PL330_UNTAGGED; +} + +/* Removes all instructions tagged with TAG from queue. */ + +static inline void pl330_queue_remove_tagged(PL330Queue *s, uint8_t tag) +{ + int i; + + for (i = 0; i < s->queue_size; i++) { + if (s->queue[i].tag == tag) { + s->queue[i].tag = PL330_UNTAGGED; + } + } +} + +/* DMA instruction execution engine */ + +/* Moves DMA channel to the FAULT state and updates it's status. */ + +static inline void pl330_fault(PL330Chan *ch, uint32_t flags) +{ + trace_pl330_fault(ch, flags); + ch->fault_type |= flags; + if (ch->state == pl330_chan_fault) { + return; + } + ch->state = pl330_chan_fault; + ch->parent->num_faulting++; + if (ch->parent->num_faulting == 1) { + trace_pl330_fault_abort(); + qemu_irq_raise(ch->parent->irq_abort); + } +} + +/* + * For information about instructions see PL330 Technical Reference Manual. + * + * Arguments: + * CH - channel executing the instruction + * OPCODE - opcode + * ARGS - array of 8-bit arguments + * LEN - number of elements in ARGS array + */ + +static void pl330_dmaadxh(PL330Chan *ch, uint8_t *args, bool ra, bool neg) +{ + uint32_t im = (args[1] << 8) | args[0]; + if (neg) { + im |= 0xffffu << 16; + } + + if (ch->is_manager) { + pl330_fault(ch, PL330_FAULT_UNDEF_INSTR); + return; + } + if (ra) { + ch->dst += im; + } else { + ch->src += im; + } +} + +static void pl330_dmaaddh(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + pl330_dmaadxh(ch, args, extract32(opcode, 1, 1), false); +} + +static void pl330_dmaadnh(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + pl330_dmaadxh(ch, args, extract32(opcode, 1, 1), true); +} + +static void pl330_dmaend(PL330Chan *ch, uint8_t opcode, + uint8_t *args, int len) +{ + PL330State *s = ch->parent; + + if (ch->state == pl330_chan_executing && !ch->is_manager) { + /* Wait for all transfers to complete */ + if (pl330_fifo_has_tag(&s->fifo, ch->tag) || + pl330_queue_find_insn(&s->read_queue, ch->tag, false) != NULL || + pl330_queue_find_insn(&s->write_queue, ch->tag, false) != NULL) { + + ch->stall = 1; + return; + } + } + trace_pl330_dmaend(); + pl330_fifo_tagged_remove(&s->fifo, ch->tag); + pl330_queue_remove_tagged(&s->read_queue, ch->tag); + pl330_queue_remove_tagged(&s->write_queue, ch->tag); + ch->state = pl330_chan_stopped; +} + +static void pl330_dmaflushp(PL330Chan *ch, uint8_t opcode, + uint8_t *args, int len) +{ + uint8_t periph_id; + + if (args[0] & 7) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + periph_id = (args[0] >> 3) & 0x1f; + if (periph_id >= ch->parent->num_periph_req) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) { + pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR); + return; + } + /* Do nothing */ +} + +static void pl330_dmago(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + uint8_t chan_id; + uint8_t ns; + uint32_t pc; + PL330Chan *s; + + trace_pl330_dmago(); + + if (!ch->is_manager) { + pl330_fault(ch, PL330_FAULT_UNDEF_INSTR); + return; + } + ns = !!(opcode & 2); + chan_id = args[0] & 7; + if ((args[0] >> 3)) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if (chan_id >= ch->parent->num_chnls) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + pc = (((uint32_t)args[4]) << 24) | (((uint32_t)args[3]) << 16) | + (((uint32_t)args[2]) << 8) | (((uint32_t)args[1])); + if (ch->parent->chan[chan_id].state != pl330_chan_stopped) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if (ch->ns && !ns) { + pl330_fault(ch, PL330_FAULT_DMAGO_ERR); + return; + } + s = &ch->parent->chan[chan_id]; + s->ns = ns; + s->pc = pc; + s->state = pl330_chan_executing; +} + +static void pl330_dmald(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + uint8_t bs = opcode & 3; + uint32_t size, num; + bool inc; + + if (bs == 2) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if ((bs == 1 && ch->request_flag == PL330_BURST) || + (bs == 3 && ch->request_flag == PL330_SINGLE)) { + /* Perform NOP */ + return; + } + if (bs == 1 && ch->request_flag == PL330_SINGLE) { + num = 1; + } else { + num = ((ch->control >> 4) & 0xf) + 1; + } + size = (uint32_t)1 << ((ch->control >> 1) & 0x7); + inc = !!(ch->control & 1); + ch->stall = pl330_queue_put_insn(&ch->parent->read_queue, ch->src, + size, num, inc, 0, ch->tag); + if (!ch->stall) { + trace_pl330_dmald(ch->tag, ch->src, size, num, inc ? 'Y' : 'N'); + ch->src += inc ? size * num - (ch->src & (size - 1)) : 0; + } +} + +static void pl330_dmaldp(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + uint8_t periph_id; + + if (args[0] & 7) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + periph_id = (args[0] >> 3) & 0x1f; + if (periph_id >= ch->parent->num_periph_req) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) { + pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR); + return; + } + pl330_dmald(ch, opcode, args, len); +} + +static void pl330_dmalp(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + uint8_t lc = (opcode & 2) >> 1; + + ch->lc[lc] = args[0]; +} + +static void pl330_dmakill(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + if (ch->state == pl330_chan_fault || + ch->state == pl330_chan_fault_completing) { + /* This is the only way for a channel to leave the faulting state */ + ch->fault_type = 0; + ch->parent->num_faulting--; + if (ch->parent->num_faulting == 0) { + trace_pl330_dmakill(); + qemu_irq_lower(ch->parent->irq_abort); + } + } + ch->state = pl330_chan_killing; + pl330_fifo_tagged_remove(&ch->parent->fifo, ch->tag); + pl330_queue_remove_tagged(&ch->parent->read_queue, ch->tag); + pl330_queue_remove_tagged(&ch->parent->write_queue, ch->tag); + ch->state = pl330_chan_stopped; +} + +static void pl330_dmalpend(PL330Chan *ch, uint8_t opcode, + uint8_t *args, int len) +{ + uint8_t nf = (opcode & 0x10) >> 4; + uint8_t bs = opcode & 3; + uint8_t lc = (opcode & 4) >> 2; + + trace_pl330_dmalpend(nf, bs, lc, ch->lc[lc], ch->request_flag); + + if (bs == 2) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if ((bs == 1 && ch->request_flag == PL330_BURST) || + (bs == 3 && ch->request_flag == PL330_SINGLE)) { + /* Perform NOP */ + return; + } + if (!nf || ch->lc[lc]) { + if (nf) { + ch->lc[lc]--; + } + trace_pl330_dmalpiter(); + ch->pc -= args[0]; + ch->pc -= len + 1; + /* "ch->pc -= args[0] + len + 1" is incorrect when args[0] == 256 */ + } else { + trace_pl330_dmalpfallthrough(); + } +} + + +static void pl330_dmamov(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + uint8_t rd = args[0] & 7; + uint32_t im; + + if ((args[0] >> 3)) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + im = (((uint32_t)args[4]) << 24) | (((uint32_t)args[3]) << 16) | + (((uint32_t)args[2]) << 8) | (((uint32_t)args[1])); + switch (rd) { + case 0: + ch->src = im; + break; + case 1: + ch->control = im; + break; + case 2: + ch->dst = im; + break; + default: + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } +} + +static void pl330_dmanop(PL330Chan *ch, uint8_t opcode, + uint8_t *args, int len) +{ + /* NOP is NOP. */ +} + +static void pl330_dmarmb(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + if (pl330_queue_find_insn(&ch->parent->read_queue, ch->tag, false)) { + ch->state = pl330_chan_at_barrier; + ch->stall = 1; + return; + } else { + ch->state = pl330_chan_executing; + } +} + +static void pl330_dmasev(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + uint8_t ev_id; + + if (args[0] & 7) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + ev_id = (args[0] >> 3) & 0x1f; + if (ev_id >= ch->parent->num_events) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if (ch->ns && !(ch->parent->cfg[CFG_INS] & (1 << ev_id))) { + pl330_fault(ch, PL330_FAULT_EVENT_ERR); + return; + } + if (ch->parent->inten & (1 << ev_id)) { + ch->parent->int_status |= (1 << ev_id); + trace_pl330_dmasev_evirq(ev_id); + qemu_irq_raise(ch->parent->irq[ev_id]); + } + trace_pl330_dmasev_event(ev_id); + ch->parent->ev_status |= (1 << ev_id); +} + +static void pl330_dmast(PL330Chan *ch, uint8_t opcode, uint8_t *args, int len) +{ + uint8_t bs = opcode & 3; + uint32_t size, num; + bool inc; + + if (bs == 2) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if ((bs == 1 && ch->request_flag == PL330_BURST) || + (bs == 3 && ch->request_flag == PL330_SINGLE)) { + /* Perform NOP */ + return; + } + num = ((ch->control >> 18) & 0xf) + 1; + size = (uint32_t)1 << ((ch->control >> 15) & 0x7); + inc = !!((ch->control >> 14) & 1); + ch->stall = pl330_queue_put_insn(&ch->parent->write_queue, ch->dst, + size, num, inc, 0, ch->tag); + if (!ch->stall) { + trace_pl330_dmast(ch->tag, ch->dst, size, num, inc ? 'Y' : 'N'); + ch->dst += inc ? size * num - (ch->dst & (size - 1)) : 0; + } +} + +static void pl330_dmastp(PL330Chan *ch, uint8_t opcode, + uint8_t *args, int len) +{ + uint8_t periph_id; + + if (args[0] & 7) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + periph_id = (args[0] >> 3) & 0x1f; + if (periph_id >= ch->parent->num_periph_req) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) { + pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR); + return; + } + pl330_dmast(ch, opcode, args, len); +} + +static void pl330_dmastz(PL330Chan *ch, uint8_t opcode, + uint8_t *args, int len) +{ + uint32_t size, num; + bool inc; + + num = ((ch->control >> 18) & 0xf) + 1; + size = (uint32_t)1 << ((ch->control >> 15) & 0x7); + inc = !!((ch->control >> 14) & 1); + ch->stall = pl330_queue_put_insn(&ch->parent->write_queue, ch->dst, + size, num, inc, 1, ch->tag); + if (inc) { + ch->dst += size * num; + } +} + +static void pl330_dmawfe(PL330Chan *ch, uint8_t opcode, + uint8_t *args, int len) +{ + uint8_t ev_id; + int i; + + if (args[0] & 5) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + ev_id = (args[0] >> 3) & 0x1f; + if (ev_id >= ch->parent->num_events) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if (ch->ns && !(ch->parent->cfg[CFG_INS] & (1 << ev_id))) { + pl330_fault(ch, PL330_FAULT_EVENT_ERR); + return; + } + ch->wakeup = ev_id; + ch->state = pl330_chan_waiting_event; + if (~ch->parent->inten & ch->parent->ev_status & 1 << ev_id) { + ch->state = pl330_chan_executing; + /* If anyone else is currently waiting on the same event, let them + * clear the ev_status so they pick up event as well + */ + for (i = 0; i < ch->parent->num_chnls; ++i) { + PL330Chan *peer = &ch->parent->chan[i]; + if (peer->state == pl330_chan_waiting_event && + peer->wakeup == ev_id) { + return; + } + } + ch->parent->ev_status &= ~(1 << ev_id); + trace_pl330_dmawfe(ev_id); + } else { + ch->stall = 1; + } +} + +static void pl330_dmawfp(PL330Chan *ch, uint8_t opcode, + uint8_t *args, int len) +{ + uint8_t bs = opcode & 3; + uint8_t periph_id; + + if (args[0] & 7) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + periph_id = (args[0] >> 3) & 0x1f; + if (periph_id >= ch->parent->num_periph_req) { + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + if (ch->ns && !(ch->parent->cfg[CFG_PNS] & (1 << periph_id))) { + pl330_fault(ch, PL330_FAULT_CH_PERIPH_ERR); + return; + } + switch (bs) { + case 0: /* S */ + ch->request_flag = PL330_SINGLE; + ch->wfp_sbp = 0; + break; + case 1: /* P */ + ch->request_flag = PL330_BURST; + ch->wfp_sbp = 2; + break; + case 2: /* B */ + ch->request_flag = PL330_BURST; + ch->wfp_sbp = 1; + break; + default: + pl330_fault(ch, PL330_FAULT_OPERAND_INVALID); + return; + } + + if (ch->parent->periph_busy[periph_id]) { + ch->state = pl330_chan_waiting_periph; + ch->stall = 1; + } else if (ch->state == pl330_chan_waiting_periph) { + ch->state = pl330_chan_executing; + } +} + +static void pl330_dmawmb(PL330Chan *ch, uint8_t opcode, + uint8_t *args, int len) +{ + if (pl330_queue_find_insn(&ch->parent->write_queue, ch->tag, false)) { + ch->state = pl330_chan_at_barrier; + ch->stall = 1; + return; + } else { + ch->state = pl330_chan_executing; + } +} + +/* NULL terminated array of the instruction descriptions. */ +static const PL330InsnDesc insn_desc[] = { + { .opcode = 0x54, .opmask = 0xFD, .size = 3, .exec = pl330_dmaaddh, }, + { .opcode = 0x5c, .opmask = 0xFD, .size = 3, .exec = pl330_dmaadnh, }, + { .opcode = 0x00, .opmask = 0xFF, .size = 1, .exec = pl330_dmaend, }, + { .opcode = 0x35, .opmask = 0xFF, .size = 2, .exec = pl330_dmaflushp, }, + { .opcode = 0xA0, .opmask = 0xFD, .size = 6, .exec = pl330_dmago, }, + { .opcode = 0x04, .opmask = 0xFC, .size = 1, .exec = pl330_dmald, }, + { .opcode = 0x25, .opmask = 0xFD, .size = 2, .exec = pl330_dmaldp, }, + { .opcode = 0x20, .opmask = 0xFD, .size = 2, .exec = pl330_dmalp, }, + /* dmastp must be before dmalpend in this list, because their maps + * are overlapping + */ + { .opcode = 0x29, .opmask = 0xFD, .size = 2, .exec = pl330_dmastp, }, + { .opcode = 0x28, .opmask = 0xE8, .size = 2, .exec = pl330_dmalpend, }, + { .opcode = 0x01, .opmask = 0xFF, .size = 1, .exec = pl330_dmakill, }, + { .opcode = 0xBC, .opmask = 0xFF, .size = 6, .exec = pl330_dmamov, }, + { .opcode = 0x18, .opmask = 0xFF, .size = 1, .exec = pl330_dmanop, }, + { .opcode = 0x12, .opmask = 0xFF, .size = 1, .exec = pl330_dmarmb, }, + { .opcode = 0x34, .opmask = 0xFF, .size = 2, .exec = pl330_dmasev, }, + { .opcode = 0x08, .opmask = 0xFC, .size = 1, .exec = pl330_dmast, }, + { .opcode = 0x0C, .opmask = 0xFF, .size = 1, .exec = pl330_dmastz, }, + { .opcode = 0x36, .opmask = 0xFF, .size = 2, .exec = pl330_dmawfe, }, + { .opcode = 0x30, .opmask = 0xFC, .size = 2, .exec = pl330_dmawfp, }, + { .opcode = 0x13, .opmask = 0xFF, .size = 1, .exec = pl330_dmawmb, }, + { .opcode = 0x00, .opmask = 0x00, .size = 0, .exec = NULL, } +}; + +/* Instructions which can be issued via debug registers. */ +static const PL330InsnDesc debug_insn_desc[] = { + { .opcode = 0xA0, .opmask = 0xFD, .size = 6, .exec = pl330_dmago, }, + { .opcode = 0x01, .opmask = 0xFF, .size = 1, .exec = pl330_dmakill, }, + { .opcode = 0x34, .opmask = 0xFF, .size = 2, .exec = pl330_dmasev, }, + { .opcode = 0x00, .opmask = 0x00, .size = 0, .exec = NULL, } +}; + +static inline const PL330InsnDesc *pl330_fetch_insn(PL330Chan *ch) +{ + uint8_t opcode; + int i; + + dma_memory_read(ch->parent->mem_as, ch->pc, &opcode, 1); + for (i = 0; insn_desc[i].size; i++) { + if ((opcode & insn_desc[i].opmask) == insn_desc[i].opcode) { + return &insn_desc[i]; + } + } + return NULL; +} + +static inline void pl330_exec_insn(PL330Chan *ch, const PL330InsnDesc *insn) +{ + uint8_t buf[PL330_INSN_MAXSIZE]; + + assert(insn->size <= PL330_INSN_MAXSIZE); + dma_memory_read(ch->parent->mem_as, ch->pc, buf, insn->size); + insn->exec(ch, buf[0], &buf[1], insn->size - 1); +} + +static inline void pl330_update_pc(PL330Chan *ch, + const PL330InsnDesc *insn) +{ + ch->pc += insn->size; +} + +/* Try to execute current instruction in channel CH. Number of executed + instructions is returned (0 or 1). */ +static int pl330_chan_exec(PL330Chan *ch) +{ + const PL330InsnDesc *insn; + + if (ch->state != pl330_chan_executing && + ch->state != pl330_chan_waiting_periph && + ch->state != pl330_chan_at_barrier && + ch->state != pl330_chan_waiting_event) { + return 0; + } + ch->stall = 0; + insn = pl330_fetch_insn(ch); + if (!insn) { + trace_pl330_chan_exec_undef(); + pl330_fault(ch, PL330_FAULT_UNDEF_INSTR); + return 0; + } + pl330_exec_insn(ch, insn); + if (!ch->stall) { + pl330_update_pc(ch, insn); + ch->watchdog_timer = 0; + return 1; + /* WDT only active in exec state */ + } else if (ch->state == pl330_chan_executing) { + ch->watchdog_timer++; + if (ch->watchdog_timer >= PL330_WATCHDOG_LIMIT) { + pl330_fault(ch, PL330_FAULT_LOCKUP_ERR); + } + } + return 0; +} + +/* Try to execute 1 instruction in each channel, one instruction from read + queue and one instruction from write queue. Number of successfully executed + instructions is returned. */ +static int pl330_exec_cycle(PL330Chan *channel) +{ + PL330State *s = channel->parent; + PL330QueueEntry *q; + int i; + int num_exec = 0; + int fifo_res = 0; + uint8_t buf[PL330_MAX_BURST_LEN]; + + /* Execute one instruction in each channel */ + num_exec += pl330_chan_exec(channel); + + /* Execute one instruction from read queue */ + q = pl330_queue_find_insn(&s->read_queue, PL330_UNTAGGED, true); + if (q != NULL && q->len <= pl330_fifo_num_free(&s->fifo)) { + int len = q->len - (q->addr & (q->len - 1)); + + dma_memory_read(s->mem_as, q->addr, buf, len); + trace_pl330_exec_cycle(q->addr, len); + if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) { + pl330_hexdump(buf, len); + } + fifo_res = pl330_fifo_push(&s->fifo, buf, len, q->tag); + if (fifo_res == PL330_FIFO_OK) { + if (q->inc) { + q->addr += len; + } + q->n--; + if (!q->n) { + pl330_queue_remove_insn(&s->read_queue, q); + } + num_exec++; + } + } + + /* Execute one instruction from write queue. */ + q = pl330_queue_find_insn(&s->write_queue, pl330_fifo_tag(&s->fifo), true); + if (q != NULL) { + int len = q->len - (q->addr & (q->len - 1)); + + if (q->z) { + for (i = 0; i < len; i++) { + buf[i] = 0; + } + } else { + fifo_res = pl330_fifo_get(&s->fifo, buf, len, q->tag); + } + if (fifo_res == PL330_FIFO_OK || q->z) { + dma_memory_write(s->mem_as, q->addr, buf, len); + trace_pl330_exec_cycle(q->addr, len); + if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) { + pl330_hexdump(buf, len); + } + if (q->inc) { + q->addr += len; + } + num_exec++; + } else if (fifo_res == PL330_FIFO_STALL) { + pl330_fault(&channel->parent->chan[q->tag], + PL330_FAULT_FIFOEMPTY_ERR); + } + q->n--; + if (!q->n) { + pl330_queue_remove_insn(&s->write_queue, q); + } + } + + return num_exec; +} + +static int pl330_exec_channel(PL330Chan *channel) +{ + int insr_exec = 0; + + /* TODO: Is it all right to execute everything or should we do per-cycle + simulation? */ + while (pl330_exec_cycle(channel)) { + insr_exec++; + } + + /* Detect deadlock */ + if (channel->state == pl330_chan_executing) { + pl330_fault(channel, PL330_FAULT_LOCKUP_ERR); + } + /* Situation when one of the queues has deadlocked but all channels + * have finished their programs should be impossible. + */ + + return insr_exec; +} + +static inline void pl330_exec(PL330State *s) +{ + int i, insr_exec; + trace_pl330_exec(); + do { + insr_exec = pl330_exec_channel(&s->manager); + + for (i = 0; i < s->num_chnls; i++) { + insr_exec += pl330_exec_channel(&s->chan[i]); + } + } while (insr_exec); +} + +static void pl330_exec_cycle_timer(void *opaque) +{ + PL330State *s = (PL330State *)opaque; + pl330_exec(s); +} + +/* Stop or restore dma operations */ + +static void pl330_dma_stop_irq(void *opaque, int irq, int level) +{ + PL330State *s = (PL330State *)opaque; + + if (s->periph_busy[irq] != level) { + s->periph_busy[irq] = level; + timer_mod(s->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + } +} + +static void pl330_debug_exec(PL330State *s) +{ + uint8_t args[5]; + uint8_t opcode; + uint8_t chan_id; + int i; + PL330Chan *ch; + const PL330InsnDesc *insn; + + s->debug_status = 1; + chan_id = (s->dbg[0] >> 8) & 0x07; + opcode = (s->dbg[0] >> 16) & 0xff; + args[0] = (s->dbg[0] >> 24) & 0xff; + args[1] = (s->dbg[1] >> 0) & 0xff; + args[2] = (s->dbg[1] >> 8) & 0xff; + args[3] = (s->dbg[1] >> 16) & 0xff; + args[4] = (s->dbg[1] >> 24) & 0xff; + trace_pl330_debug_exec(chan_id); + if (s->dbg[0] & 1) { + ch = &s->chan[chan_id]; + } else { + ch = &s->manager; + } + insn = NULL; + for (i = 0; debug_insn_desc[i].size; i++) { + if ((opcode & debug_insn_desc[i].opmask) == debug_insn_desc[i].opcode) { + insn = &debug_insn_desc[i]; + } + } + if (!insn) { + pl330_fault(ch, PL330_FAULT_UNDEF_INSTR | PL330_FAULT_DBG_INSTR); + return ; + } + ch->stall = 0; + insn->exec(ch, opcode, args, insn->size - 1); + if (ch->fault_type) { + ch->fault_type |= PL330_FAULT_DBG_INSTR; + } + if (ch->stall) { + trace_pl330_debug_exec_stall(); + qemu_log_mask(LOG_UNIMP, "pl330: stall of debug instruction not " + "implemented\n"); + } + s->debug_status = 0; +} + +/* IOMEM mapped registers */ + +static void pl330_iomem_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PL330State *s = (PL330State *) opaque; + int i; + + trace_pl330_iomem_write((unsigned)offset, (unsigned)value); + + switch (offset) { + case PL330_REG_INTEN: + s->inten = value; + break; + case PL330_REG_INTCLR: + for (i = 0; i < s->num_events; i++) { + if (s->int_status & s->inten & value & (1 << i)) { + trace_pl330_iomem_write_clr(i); + qemu_irq_lower(s->irq[i]); + } + } + s->ev_status &= ~(value & s->inten); + s->int_status &= ~(value & s->inten); + break; + case PL330_REG_DBGCMD: + if ((value & 3) == 0) { + pl330_debug_exec(s); + pl330_exec(s); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "pl330: write of illegal value %u " + "for offset " TARGET_FMT_plx "\n", (unsigned)value, + offset); + } + break; + case PL330_REG_DBGINST0: + s->dbg[0] = value; + break; + case PL330_REG_DBGINST1: + s->dbg[1] = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad write offset " TARGET_FMT_plx + "\n", offset); + break; + } +} + +static inline uint32_t pl330_iomem_read_imp(void *opaque, + hwaddr offset) +{ + PL330State *s = (PL330State *)opaque; + int chan_id; + int i; + uint32_t res; + + if (offset >= PL330_REG_PERIPH_ID && offset < PL330_REG_PERIPH_ID + 32) { + return pl330_id[(offset - PL330_REG_PERIPH_ID) >> 2]; + } + if (offset >= PL330_REG_CR0_BASE && offset < PL330_REG_CR0_BASE + 24) { + return s->cfg[(offset - PL330_REG_CR0_BASE) >> 2]; + } + if (offset >= PL330_REG_CHANCTRL && offset < PL330_REG_DBGSTATUS) { + offset -= PL330_REG_CHANCTRL; + chan_id = offset >> 5; + if (chan_id >= s->num_chnls) { + qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset " + TARGET_FMT_plx "\n", offset); + return 0; + } + switch (offset & 0x1f) { + case 0x00: + return s->chan[chan_id].src; + case 0x04: + return s->chan[chan_id].dst; + case 0x08: + return s->chan[chan_id].control; + case 0x0C: + return s->chan[chan_id].lc[0]; + case 0x10: + return s->chan[chan_id].lc[1]; + default: + qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset " + TARGET_FMT_plx "\n", offset); + return 0; + } + } + if (offset >= PL330_REG_CSR_BASE && offset < 0x400) { + offset -= PL330_REG_CSR_BASE; + chan_id = offset >> 3; + if (chan_id >= s->num_chnls) { + qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset " + TARGET_FMT_plx "\n", offset); + return 0; + } + switch ((offset >> 2) & 1) { + case 0x0: + res = (s->chan[chan_id].ns << 21) | + (s->chan[chan_id].wakeup << 4) | + (s->chan[chan_id].state) | + (s->chan[chan_id].wfp_sbp << 14); + return res; + case 0x1: + return s->chan[chan_id].pc; + default: + qemu_log_mask(LOG_GUEST_ERROR, "pl330: read error\n"); + return 0; + } + } + if (offset >= PL330_REG_FTR_BASE && offset < 0x100) { + offset -= PL330_REG_FTR_BASE; + chan_id = offset >> 2; + if (chan_id >= s->num_chnls) { + qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset " + TARGET_FMT_plx "\n", offset); + return 0; + } + return s->chan[chan_id].fault_type; + } + switch (offset) { + case PL330_REG_DSR: + return (s->manager.ns << 9) | (s->manager.wakeup << 4) | + (s->manager.state & 0xf); + case PL330_REG_DPC: + return s->manager.pc; + case PL330_REG_INTEN: + return s->inten; + case PL330_REG_INT_EVENT_RIS: + return s->ev_status; + case PL330_REG_INTMIS: + return s->int_status; + case PL330_REG_INTCLR: + /* Documentation says that we can't read this register + * but linux kernel does it + */ + return 0; + case PL330_REG_FSRD: + return s->manager.state ? 1 : 0; + case PL330_REG_FSRC: + res = 0; + for (i = 0; i < s->num_chnls; i++) { + if (s->chan[i].state == pl330_chan_fault || + s->chan[i].state == pl330_chan_fault_completing) { + res |= 1 << i; + } + } + return res; + case PL330_REG_FTRD: + return s->manager.fault_type; + case PL330_REG_DBGSTATUS: + return s->debug_status; + default: + qemu_log_mask(LOG_GUEST_ERROR, "pl330: bad read offset " + TARGET_FMT_plx "\n", offset); + } + return 0; +} + +static uint64_t pl330_iomem_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t ret = pl330_iomem_read_imp(opaque, offset); + trace_pl330_iomem_read((uint32_t)offset, ret); + return ret; +} + +static const MemoryRegionOps pl330_ops = { + .read = pl330_iomem_read, + .write = pl330_iomem_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +/* Controller logic and initialization */ + +static void pl330_chan_reset(PL330Chan *ch) +{ + ch->src = 0; + ch->dst = 0; + ch->pc = 0; + ch->state = pl330_chan_stopped; + ch->watchdog_timer = 0; + ch->stall = 0; + ch->control = 0; + ch->status = 0; + ch->fault_type = 0; +} + +static void pl330_reset(DeviceState *d) +{ + int i; + PL330State *s = PL330(d); + + s->inten = 0; + s->int_status = 0; + s->ev_status = 0; + s->debug_status = 0; + s->num_faulting = 0; + s->manager.ns = s->mgr_ns_at_rst; + pl330_fifo_reset(&s->fifo); + pl330_queue_reset(&s->read_queue); + pl330_queue_reset(&s->write_queue); + + for (i = 0; i < s->num_chnls; i++) { + pl330_chan_reset(&s->chan[i]); + } + for (i = 0; i < s->num_periph_req; i++) { + s->periph_busy[i] = 0; + } + + timer_del(s->timer); +} + +static void pl330_realize(DeviceState *dev, Error **errp) +{ + int i; + PL330State *s = PL330(dev); + + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq_abort); + memory_region_init_io(&s->iomem, OBJECT(s), &pl330_ops, s, + "dma", PL330_IOMEM_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + + if (!s->mem_mr) { + error_setg(errp, "'memory' link is not set"); + return; + } else if (s->mem_mr == get_system_memory()) { + /* Avoid creating new AS for system memory. */ + s->mem_as = &address_space_memory; + } else { + s->mem_as = g_new0(AddressSpace, 1); + address_space_init(s->mem_as, s->mem_mr, + memory_region_name(s->mem_mr)); + } + + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pl330_exec_cycle_timer, s); + + s->cfg[0] = (s->mgr_ns_at_rst ? 0x4 : 0) | + (s->num_periph_req > 0 ? 1 : 0) | + ((s->num_chnls - 1) & 0x7) << 4 | + ((s->num_periph_req - 1) & 0x1f) << 12 | + ((s->num_events - 1) & 0x1f) << 17; + + switch (s->i_cache_len) { + case (4): + s->cfg[1] |= 2; + break; + case (8): + s->cfg[1] |= 3; + break; + case (16): + s->cfg[1] |= 4; + break; + case (32): + s->cfg[1] |= 5; + break; + default: + error_setg(errp, "Bad value for i-cache_len property: %" PRIx8, + s->i_cache_len); + return; + } + s->cfg[1] |= ((s->num_i_cache_lines - 1) & 0xf) << 4; + + s->chan = g_new0(PL330Chan, s->num_chnls); + s->hi_seqn = g_new0(uint8_t, s->num_chnls); + s->lo_seqn = g_new0(uint8_t, s->num_chnls); + for (i = 0; i < s->num_chnls; i++) { + s->chan[i].parent = s; + s->chan[i].tag = (uint8_t)i; + } + s->manager.parent = s; + s->manager.tag = s->num_chnls; + s->manager.is_manager = true; + + s->irq = g_new0(qemu_irq, s->num_events); + for (i = 0; i < s->num_events; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); + } + + qdev_init_gpio_in(dev, pl330_dma_stop_irq, PL330_PERIPH_NUM); + + switch (s->data_width) { + case (32): + s->cfg[CFG_CRD] |= 0x2; + break; + case (64): + s->cfg[CFG_CRD] |= 0x3; + break; + case (128): + s->cfg[CFG_CRD] |= 0x4; + break; + default: + error_setg(errp, "Bad value for data_width property: %" PRIx8, + s->data_width); + return; + } + + s->cfg[CFG_CRD] |= ((s->wr_cap - 1) & 0x7) << 4 | + ((s->wr_q_dep - 1) & 0xf) << 8 | + ((s->rd_cap - 1) & 0x7) << 12 | + ((s->rd_q_dep - 1) & 0xf) << 16 | + ((s->data_buffer_dep - 1) & 0x1ff) << 20; + + pl330_queue_init(&s->read_queue, s->rd_q_dep, s); + pl330_queue_init(&s->write_queue, s->wr_q_dep, s); + pl330_fifo_init(&s->fifo, s->data_width / 4 * s->data_buffer_dep); +} + +static Property pl330_properties[] = { + /* CR0 */ + DEFINE_PROP_UINT32("num_chnls", PL330State, num_chnls, 8), + DEFINE_PROP_UINT8("num_periph_req", PL330State, num_periph_req, 4), + DEFINE_PROP_UINT8("num_events", PL330State, num_events, 16), + DEFINE_PROP_UINT8("mgr_ns_at_rst", PL330State, mgr_ns_at_rst, 0), + /* CR1 */ + DEFINE_PROP_UINT8("i-cache_len", PL330State, i_cache_len, 4), + DEFINE_PROP_UINT8("num_i-cache_lines", PL330State, num_i_cache_lines, 8), + /* CR2-4 */ + DEFINE_PROP_UINT32("boot_addr", PL330State, cfg[CFG_BOOT_ADDR], 0), + DEFINE_PROP_UINT32("INS", PL330State, cfg[CFG_INS], 0), + DEFINE_PROP_UINT32("PNS", PL330State, cfg[CFG_PNS], 0), + /* CRD */ + DEFINE_PROP_UINT8("data_width", PL330State, data_width, 64), + DEFINE_PROP_UINT8("wr_cap", PL330State, wr_cap, 8), + DEFINE_PROP_UINT8("wr_q_dep", PL330State, wr_q_dep, 16), + DEFINE_PROP_UINT8("rd_cap", PL330State, rd_cap, 8), + DEFINE_PROP_UINT8("rd_q_dep", PL330State, rd_q_dep, 16), + DEFINE_PROP_UINT16("data_buffer_dep", PL330State, data_buffer_dep, 256), + + DEFINE_PROP_LINK("memory", PL330State, mem_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void pl330_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pl330_realize; + dc->reset = pl330_reset; + device_class_set_props(dc, pl330_properties); + dc->vmsd = &vmstate_pl330; +} + +static const TypeInfo pl330_type_info = { + .name = TYPE_PL330, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL330State), + .class_init = pl330_class_init, +}; + +static void pl330_register_types(void) +{ + type_register_static(&pl330_type_info); +} + +type_init(pl330_register_types) diff --git a/hw/dma/pxa2xx_dma.c b/hw/dma/pxa2xx_dma.c new file mode 100644 index 000000000..fa896f7ed --- /dev/null +++ b/hw/dma/pxa2xx_dma.c @@ -0,0 +1,591 @@ +/* + * Intel XScale PXA255/270 DMA controller. + * + * Copyright (c) 2006 Openedhand Ltd. + * Copyright (c) 2006 Thorsten Zitterell + * Written by Andrzej Zaborowski + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/arm/pxa.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define PXA255_DMA_NUM_CHANNELS 16 +#define PXA27X_DMA_NUM_CHANNELS 32 + +#define PXA2XX_DMA_NUM_REQUESTS 75 + +typedef struct { + uint32_t descr; + uint32_t src; + uint32_t dest; + uint32_t cmd; + uint32_t state; + int request; +} PXA2xxDMAChannel; + +#define TYPE_PXA2XX_DMA "pxa2xx-dma" +OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxDMAState, PXA2XX_DMA) + +struct PXA2xxDMAState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq; + + uint32_t stopintr; + uint32_t eorintr; + uint32_t rasintr; + uint32_t startintr; + uint32_t endintr; + + uint32_t align; + uint32_t pio; + + int channels; + PXA2xxDMAChannel *chan; + + uint8_t req[PXA2XX_DMA_NUM_REQUESTS]; + + /* Flag to avoid recursive DMA invocations. */ + int running; +}; + +#define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */ +#define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */ +#define DALGN 0x00a0 /* DMA Alignment register */ +#define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */ +#define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */ +#define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */ +#define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */ +#define DINT 0x00f0 /* DMA Interrupt register */ +#define DRCMR0 0x0100 /* Request to Channel Map register 0 */ +#define DRCMR63 0x01fc /* Request to Channel Map register 63 */ +#define D_CH0 0x0200 /* Channel 0 Descriptor start */ +#define DRCMR64 0x1100 /* Request to Channel Map register 64 */ +#define DRCMR74 0x1128 /* Request to Channel Map register 74 */ + +/* Per-channel register */ +#define DDADR 0x00 +#define DSADR 0x01 +#define DTADR 0x02 +#define DCMD 0x03 + +/* Bit-field masks */ +#define DRCMR_CHLNUM 0x1f +#define DRCMR_MAPVLD (1 << 7) +#define DDADR_STOP (1 << 0) +#define DDADR_BREN (1 << 1) +#define DCMD_LEN 0x1fff +#define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1)) +#define DCMD_SIZE(x) (4 << (((x) >> 16) & 3)) +#define DCMD_FLYBYT (1 << 19) +#define DCMD_FLYBYS (1 << 20) +#define DCMD_ENDIRQEN (1 << 21) +#define DCMD_STARTIRQEN (1 << 22) +#define DCMD_CMPEN (1 << 25) +#define DCMD_FLOWTRG (1 << 28) +#define DCMD_FLOWSRC (1 << 29) +#define DCMD_INCTRGADDR (1 << 30) +#define DCMD_INCSRCADDR (1 << 31) +#define DCSR_BUSERRINTR (1 << 0) +#define DCSR_STARTINTR (1 << 1) +#define DCSR_ENDINTR (1 << 2) +#define DCSR_STOPINTR (1 << 3) +#define DCSR_RASINTR (1 << 4) +#define DCSR_REQPEND (1 << 8) +#define DCSR_EORINT (1 << 9) +#define DCSR_CMPST (1 << 10) +#define DCSR_MASKRUN (1 << 22) +#define DCSR_RASIRQEN (1 << 23) +#define DCSR_CLRCMPST (1 << 24) +#define DCSR_SETCMPST (1 << 25) +#define DCSR_EORSTOPEN (1 << 26) +#define DCSR_EORJMPEN (1 << 27) +#define DCSR_EORIRQEN (1 << 28) +#define DCSR_STOPIRQEN (1 << 29) +#define DCSR_NODESCFETCH (1 << 30) +#define DCSR_RUN (1 << 31) + +static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch) +{ + if (ch >= 0) { + if ((s->chan[ch].state & DCSR_STOPIRQEN) && + (s->chan[ch].state & DCSR_STOPINTR)) + s->stopintr |= 1 << ch; + else + s->stopintr &= ~(1 << ch); + + if ((s->chan[ch].state & DCSR_EORIRQEN) && + (s->chan[ch].state & DCSR_EORINT)) + s->eorintr |= 1 << ch; + else + s->eorintr &= ~(1 << ch); + + if ((s->chan[ch].state & DCSR_RASIRQEN) && + (s->chan[ch].state & DCSR_RASINTR)) + s->rasintr |= 1 << ch; + else + s->rasintr &= ~(1 << ch); + + if (s->chan[ch].state & DCSR_STARTINTR) + s->startintr |= 1 << ch; + else + s->startintr &= ~(1 << ch); + + if (s->chan[ch].state & DCSR_ENDINTR) + s->endintr |= 1 << ch; + else + s->endintr &= ~(1 << ch); + } + + if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr) + qemu_irq_raise(s->irq); + else + qemu_irq_lower(s->irq); +} + +static inline void pxa2xx_dma_descriptor_fetch( + PXA2xxDMAState *s, int ch) +{ + uint32_t desc[4]; + hwaddr daddr = s->chan[ch].descr & ~0xf; + if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST)) + daddr += 32; + + cpu_physical_memory_read(daddr, desc, 16); + s->chan[ch].descr = desc[DDADR]; + s->chan[ch].src = desc[DSADR]; + s->chan[ch].dest = desc[DTADR]; + s->chan[ch].cmd = desc[DCMD]; + + if (s->chan[ch].cmd & DCMD_FLOWSRC) + s->chan[ch].src &= ~3; + if (s->chan[ch].cmd & DCMD_FLOWTRG) + s->chan[ch].dest &= ~3; + + if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT)) + printf("%s: unsupported mode in channel %i\n", __func__, ch); + + if (s->chan[ch].cmd & DCMD_STARTIRQEN) + s->chan[ch].state |= DCSR_STARTINTR; +} + +static void pxa2xx_dma_run(PXA2xxDMAState *s) +{ + int c, srcinc, destinc; + uint32_t n, size; + uint32_t width; + uint32_t length; + uint8_t buffer[32]; + PXA2xxDMAChannel *ch; + + if (s->running ++) + return; + + while (s->running) { + s->running = 1; + for (c = 0; c < s->channels; c ++) { + ch = &s->chan[c]; + + while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) { + /* Test for pending requests */ + if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request) + break; + + length = ch->cmd & DCMD_LEN; + size = DCMD_SIZE(ch->cmd); + width = DCMD_WIDTH(ch->cmd); + + srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0; + destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0; + + while (length) { + size = MIN(length, size); + + for (n = 0; n < size; n += width) { + cpu_physical_memory_read(ch->src, buffer + n, width); + ch->src += srcinc; + } + + for (n = 0; n < size; n += width) { + cpu_physical_memory_write(ch->dest, buffer + n, width); + ch->dest += destinc; + } + + length -= size; + + if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && + !ch->request) { + ch->state |= DCSR_EORINT; + if (ch->state & DCSR_EORSTOPEN) + ch->state |= DCSR_STOPINTR; + if ((ch->state & DCSR_EORJMPEN) && + !(ch->state & DCSR_NODESCFETCH)) + pxa2xx_dma_descriptor_fetch(s, c); + break; + } + } + + ch->cmd = (ch->cmd & ~DCMD_LEN) | length; + + /* Is the transfer complete now? */ + if (!length) { + if (ch->cmd & DCMD_ENDIRQEN) + ch->state |= DCSR_ENDINTR; + + if ((ch->state & DCSR_NODESCFETCH) || + (ch->descr & DDADR_STOP) || + (ch->state & DCSR_EORSTOPEN)) { + ch->state |= DCSR_STOPINTR; + ch->state &= ~DCSR_RUN; + + break; + } + + ch->state |= DCSR_STOPINTR; + break; + } + } + } + + s->running --; + } +} + +static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset, + unsigned size) +{ + PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; + unsigned int channel; + + if (size != 4) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n", + __func__, size); + return 5; + } + + switch (offset) { + case DRCMR64 ... DRCMR74: + offset -= DRCMR64 - DRCMR0 - (64 << 2); + /* Fall through */ + case DRCMR0 ... DRCMR63: + channel = (offset - DRCMR0) >> 2; + return s->req[channel]; + + case DRQSR0: + case DRQSR1: + case DRQSR2: + return 0; + + case DCSR0 ... DCSR31: + channel = offset >> 2; + if (s->chan[channel].request) + return s->chan[channel].state | DCSR_REQPEND; + return s->chan[channel].state; + + case DINT: + return s->stopintr | s->eorintr | s->rasintr | + s->startintr | s->endintr; + + case DALGN: + return s->align; + + case DPCSR: + return s->pio; + } + + if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { + channel = (offset - D_CH0) >> 4; + switch ((offset & 0x0f) >> 2) { + case DDADR: + return s->chan[channel].descr; + case DSADR: + return s->chan[channel].src; + case DTADR: + return s->chan[channel].dest; + case DCMD: + return s->chan[channel].cmd; + } + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + return 7; +} + +static void pxa2xx_dma_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PXA2xxDMAState *s = (PXA2xxDMAState *) opaque; + unsigned int channel; + + if (size != 4) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n", + __func__, size); + return; + } + + switch (offset) { + case DRCMR64 ... DRCMR74: + offset -= DRCMR64 - DRCMR0 - (64 << 2); + /* Fall through */ + case DRCMR0 ... DRCMR63: + channel = (offset - DRCMR0) >> 2; + + if (value & DRCMR_MAPVLD) + if ((value & DRCMR_CHLNUM) > s->channels) + hw_error("%s: Bad DMA channel %i\n", + __func__, (unsigned)value & DRCMR_CHLNUM); + + s->req[channel] = value; + break; + + case DRQSR0: + case DRQSR1: + case DRQSR2: + /* Nothing to do */ + break; + + case DCSR0 ... DCSR31: + channel = offset >> 2; + s->chan[channel].state &= 0x0000071f & ~(value & + (DCSR_EORINT | DCSR_ENDINTR | + DCSR_STARTINTR | DCSR_BUSERRINTR)); + s->chan[channel].state |= value & 0xfc800000; + + if (s->chan[channel].state & DCSR_STOPIRQEN) + s->chan[channel].state &= ~DCSR_STOPINTR; + + if (value & DCSR_NODESCFETCH) { + /* No-descriptor-fetch mode */ + if (value & DCSR_RUN) { + s->chan[channel].state &= ~DCSR_STOPINTR; + pxa2xx_dma_run(s); + } + } else { + /* Descriptor-fetch mode */ + if (value & DCSR_RUN) { + s->chan[channel].state &= ~DCSR_STOPINTR; + pxa2xx_dma_descriptor_fetch(s, channel); + pxa2xx_dma_run(s); + } + } + + /* Shouldn't matter as our DMA is synchronous. */ + if (!(value & (DCSR_RUN | DCSR_MASKRUN))) + s->chan[channel].state |= DCSR_STOPINTR; + + if (value & DCSR_CLRCMPST) + s->chan[channel].state &= ~DCSR_CMPST; + if (value & DCSR_SETCMPST) + s->chan[channel].state |= DCSR_CMPST; + + pxa2xx_dma_update(s, channel); + break; + + case DALGN: + s->align = value; + break; + + case DPCSR: + s->pio = value & 0x80000001; + break; + + default: + if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) { + channel = (offset - D_CH0) >> 4; + switch ((offset & 0x0f) >> 2) { + case DDADR: + s->chan[channel].descr = value; + break; + case DSADR: + s->chan[channel].src = value; + break; + case DTADR: + s->chan[channel].dest = value; + break; + case DCMD: + s->chan[channel].cmd = value; + break; + default: + goto fail; + } + + break; + } + fail: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + } +} + +static const MemoryRegionOps pxa2xx_dma_ops = { + .read = pxa2xx_dma_read, + .write = pxa2xx_dma_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pxa2xx_dma_request(void *opaque, int req_num, int on) +{ + PXA2xxDMAState *s = opaque; + int ch; + if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS) + hw_error("%s: Bad DMA request %i\n", __func__, req_num); + + if (!(s->req[req_num] & DRCMR_MAPVLD)) + return; + ch = s->req[req_num] & DRCMR_CHLNUM; + + if (!s->chan[ch].request && on) + s->chan[ch].state |= DCSR_RASINTR; + else + s->chan[ch].state &= ~DCSR_RASINTR; + if (s->chan[ch].request && !on) + s->chan[ch].state |= DCSR_EORINT; + + s->chan[ch].request = on; + if (on) { + pxa2xx_dma_run(s); + pxa2xx_dma_update(s, ch); + } +} + +static void pxa2xx_dma_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + PXA2xxDMAState *s = PXA2XX_DMA(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS); + + qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS); + + memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s, + "pxa2xx.dma", 0x00010000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); +} + +static void pxa2xx_dma_realize(DeviceState *dev, Error **errp) +{ + PXA2xxDMAState *s = PXA2XX_DMA(dev); + int i; + + if (s->channels <= 0) { + error_setg(errp, "channels value invalid"); + return; + } + + s->chan = g_new0(PXA2xxDMAChannel, s->channels); + + for (i = 0; i < s->channels; i ++) + s->chan[i].state = DCSR_STOPINTR; +} + +DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq) +{ + DeviceState *dev; + + dev = qdev_new("pxa2xx-dma"); + qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); + + return dev; +} + +DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq) +{ + DeviceState *dev; + + dev = qdev_new("pxa2xx-dma"); + qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq); + + return dev; +} + +static bool is_version_0(void *opaque, int version_id) +{ + return version_id == 0; +} + +static const VMStateDescription vmstate_pxa2xx_dma_chan = { + .name = "pxa2xx_dma_chan", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(descr, PXA2xxDMAChannel), + VMSTATE_UINT32(src, PXA2xxDMAChannel), + VMSTATE_UINT32(dest, PXA2xxDMAChannel), + VMSTATE_UINT32(cmd, PXA2xxDMAChannel), + VMSTATE_UINT32(state, PXA2xxDMAChannel), + VMSTATE_INT32(request, PXA2xxDMAChannel), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_pxa2xx_dma = { + .name = "pxa2xx_dma", + .version_id = 1, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UNUSED_TEST(is_version_0, 4), + VMSTATE_UINT32(stopintr, PXA2xxDMAState), + VMSTATE_UINT32(eorintr, PXA2xxDMAState), + VMSTATE_UINT32(rasintr, PXA2xxDMAState), + VMSTATE_UINT32(startintr, PXA2xxDMAState), + VMSTATE_UINT32(endintr, PXA2xxDMAState), + VMSTATE_UINT32(align, PXA2xxDMAState), + VMSTATE_UINT32(pio, PXA2xxDMAState), + VMSTATE_BUFFER(req, PXA2xxDMAState), + VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels, + vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property pxa2xx_dma_properties[] = { + DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pxa2xx_dma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PXA2xx DMA controller"; + dc->vmsd = &vmstate_pxa2xx_dma; + device_class_set_props(dc, pxa2xx_dma_properties); + dc->realize = pxa2xx_dma_realize; +} + +static const TypeInfo pxa2xx_dma_info = { + .name = TYPE_PXA2XX_DMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxDMAState), + .instance_init = pxa2xx_dma_init, + .class_init = pxa2xx_dma_class_init, +}; + +static void pxa2xx_dma_register_types(void) +{ + type_register_static(&pxa2xx_dma_info); +} + +type_init(pxa2xx_dma_register_types) diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c new file mode 100644 index 000000000..e4d2f1725 --- /dev/null +++ b/hw/dma/rc4030.c @@ -0,0 +1,754 @@ +/* + * QEMU JAZZ RC4030 chipset + * + * Copyright (c) 2007-2013 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/irq.h" +#include "hw/mips/mips.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "exec/address-spaces.h" +#include "trace.h" +#include "qom/object.h" + +/********************************************************/ +/* rc4030 emulation */ + +typedef struct dma_pagetable_entry { + int32_t frame; + int32_t owner; +} QEMU_PACKED dma_pagetable_entry; + +#define DMA_PAGESIZE 4096 +#define DMA_REG_ENABLE 1 +#define DMA_REG_COUNT 2 +#define DMA_REG_ADDRESS 3 + +#define DMA_FLAG_ENABLE 0x0001 +#define DMA_FLAG_MEM_TO_DEV 0x0002 +#define DMA_FLAG_TC_INTR 0x0100 +#define DMA_FLAG_MEM_INTR 0x0200 +#define DMA_FLAG_ADDR_INTR 0x0400 + +#define TYPE_RC4030 "rc4030" +OBJECT_DECLARE_SIMPLE_TYPE(rc4030State, RC4030) + +#define TYPE_RC4030_IOMMU_MEMORY_REGION "rc4030-iommu-memory-region" + +struct rc4030State { + + SysBusDevice parent; + + uint32_t config; /* 0x0000: RC4030 config register */ + uint32_t revision; /* 0x0008: RC4030 Revision register */ + uint32_t invalid_address_register; /* 0x0010: Invalid Address register */ + + /* DMA */ + uint32_t dma_regs[8][4]; + uint32_t dma_tl_base; /* 0x0018: DMA transl. table base */ + uint32_t dma_tl_limit; /* 0x0020: DMA transl. table limit */ + + /* cache */ + uint32_t cache_maint; /* 0x0030: Cache Maintenance */ + uint32_t remote_failed_address; /* 0x0038: Remote Failed Address */ + uint32_t memory_failed_address; /* 0x0040: Memory Failed Address */ + uint32_t cache_ptag; /* 0x0048: I/O Cache Physical Tag */ + uint32_t cache_ltag; /* 0x0050: I/O Cache Logical Tag */ + uint32_t cache_bmask; /* 0x0058: I/O Cache Byte Mask */ + + uint32_t nmi_interrupt; /* 0x0200: interrupt source */ + uint32_t memory_refresh_rate; /* 0x0210: memory refresh rate */ + uint32_t nvram_protect; /* 0x0220: NV ram protect register */ + uint32_t rem_speed[16]; + uint32_t imr_jazz; /* Local bus int enable mask */ + uint32_t isr_jazz; /* Local bus int source */ + + /* timer */ + QEMUTimer *periodic_timer; + uint32_t itr; /* Interval timer reload */ + + qemu_irq timer_irq; + qemu_irq jazz_bus_irq; + + /* whole DMA memory region, root of DMA address space */ + IOMMUMemoryRegion dma_mr; + AddressSpace dma_as; + + MemoryRegion iomem_chipset; + MemoryRegion iomem_jazzio; +}; + +static void set_next_tick(rc4030State *s) +{ + uint32_t tm_hz; + qemu_irq_lower(s->timer_irq); + + tm_hz = 1000 / (s->itr + 1); + + timer_mod(s->periodic_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND / tm_hz); +} + +/* called for accesses to rc4030 */ +static uint64_t rc4030_read(void *opaque, hwaddr addr, unsigned int size) +{ + rc4030State *s = opaque; + uint32_t val; + + addr &= 0x3fff; + switch (addr & ~0x3) { + /* Global config register */ + case 0x0000: + val = s->config; + break; + /* Revision register */ + case 0x0008: + val = s->revision; + break; + /* Invalid Address register */ + case 0x0010: + val = s->invalid_address_register; + break; + /* DMA transl. table base */ + case 0x0018: + val = s->dma_tl_base; + break; + /* DMA transl. table limit */ + case 0x0020: + val = s->dma_tl_limit; + break; + /* Remote Failed Address */ + case 0x0038: + val = s->remote_failed_address; + break; + /* Memory Failed Address */ + case 0x0040: + val = s->memory_failed_address; + break; + /* I/O Cache Byte Mask */ + case 0x0058: + val = s->cache_bmask; + /* HACK */ + if (s->cache_bmask == (uint32_t)-1) { + s->cache_bmask = 0; + } + break; + /* Remote Speed Registers */ + case 0x0070: + case 0x0078: + case 0x0080: + case 0x0088: + case 0x0090: + case 0x0098: + case 0x00a0: + case 0x00a8: + case 0x00b0: + case 0x00b8: + case 0x00c0: + case 0x00c8: + case 0x00d0: + case 0x00d8: + case 0x00e0: + case 0x00e8: + val = s->rem_speed[(addr - 0x0070) >> 3]; + break; + /* DMA channel base address */ + case 0x0100: + case 0x0108: + case 0x0110: + case 0x0118: + case 0x0120: + case 0x0128: + case 0x0130: + case 0x0138: + case 0x0140: + case 0x0148: + case 0x0150: + case 0x0158: + case 0x0160: + case 0x0168: + case 0x0170: + case 0x0178: + case 0x0180: + case 0x0188: + case 0x0190: + case 0x0198: + case 0x01a0: + case 0x01a8: + case 0x01b0: + case 0x01b8: + case 0x01c0: + case 0x01c8: + case 0x01d0: + case 0x01d8: + case 0x01e0: + case 0x01e8: + case 0x01f0: + case 0x01f8: + { + int entry = (addr - 0x0100) >> 5; + int idx = (addr & 0x1f) >> 3; + val = s->dma_regs[entry][idx]; + } + break; + /* Interrupt source */ + case 0x0200: + val = s->nmi_interrupt; + break; + /* Error type */ + case 0x0208: + val = 0; + break; + /* Memory refresh rate */ + case 0x0210: + val = s->memory_refresh_rate; + break; + /* NV ram protect register */ + case 0x0220: + val = s->nvram_protect; + break; + /* Interval timer count */ + case 0x0230: + val = 0; + qemu_irq_lower(s->timer_irq); + break; + /* EISA interrupt */ + case 0x0238: + val = 7; /* FIXME: should be read from EISA controller */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "rc4030: invalid read at 0x%x", (int)addr); + val = 0; + break; + } + + if ((addr & ~3) != 0x230) { + trace_rc4030_read(addr, val); + } + + return val; +} + +static void rc4030_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + rc4030State *s = opaque; + uint32_t val = data; + addr &= 0x3fff; + + trace_rc4030_write(addr, val); + + switch (addr & ~0x3) { + /* Global config register */ + case 0x0000: + s->config = val; + break; + /* DMA transl. table base */ + case 0x0018: + s->dma_tl_base = val; + break; + /* DMA transl. table limit */ + case 0x0020: + s->dma_tl_limit = val; + break; + /* DMA transl. table invalidated */ + case 0x0028: + break; + /* Cache Maintenance */ + case 0x0030: + s->cache_maint = val; + break; + /* I/O Cache Physical Tag */ + case 0x0048: + s->cache_ptag = val; + break; + /* I/O Cache Logical Tag */ + case 0x0050: + s->cache_ltag = val; + break; + /* I/O Cache Byte Mask */ + case 0x0058: + s->cache_bmask |= val; /* HACK */ + break; + /* I/O Cache Buffer Window */ + case 0x0060: + /* HACK */ + if (s->cache_ltag == 0x80000001 && s->cache_bmask == 0xf0f0f0f) { + hwaddr dest = s->cache_ptag & ~0x1; + dest += (s->cache_maint & 0x3) << 3; + cpu_physical_memory_write(dest, &val, 4); + } + break; + /* Remote Speed Registers */ + case 0x0070: + case 0x0078: + case 0x0080: + case 0x0088: + case 0x0090: + case 0x0098: + case 0x00a0: + case 0x00a8: + case 0x00b0: + case 0x00b8: + case 0x00c0: + case 0x00c8: + case 0x00d0: + case 0x00d8: + case 0x00e0: + case 0x00e8: + s->rem_speed[(addr - 0x0070) >> 3] = val; + break; + /* DMA channel base address */ + case 0x0100: + case 0x0108: + case 0x0110: + case 0x0118: + case 0x0120: + case 0x0128: + case 0x0130: + case 0x0138: + case 0x0140: + case 0x0148: + case 0x0150: + case 0x0158: + case 0x0160: + case 0x0168: + case 0x0170: + case 0x0178: + case 0x0180: + case 0x0188: + case 0x0190: + case 0x0198: + case 0x01a0: + case 0x01a8: + case 0x01b0: + case 0x01b8: + case 0x01c0: + case 0x01c8: + case 0x01d0: + case 0x01d8: + case 0x01e0: + case 0x01e8: + case 0x01f0: + case 0x01f8: + { + int entry = (addr - 0x0100) >> 5; + int idx = (addr & 0x1f) >> 3; + s->dma_regs[entry][idx] = val; + } + break; + /* Memory refresh rate */ + case 0x0210: + s->memory_refresh_rate = val; + break; + /* Interval timer reload */ + case 0x0228: + s->itr = val & 0x01FF; + qemu_irq_lower(s->timer_irq); + set_next_tick(s); + break; + /* EISA interrupt */ + case 0x0238: + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "rc4030: invalid write of 0x%02x at 0x%x", + val, (int)addr); + break; + } +} + +static const MemoryRegionOps rc4030_ops = { + .read = rc4030_read, + .write = rc4030_write, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void update_jazz_irq(rc4030State *s) +{ + uint16_t pending; + + pending = s->isr_jazz & s->imr_jazz; + + if (pending != 0) { + qemu_irq_raise(s->jazz_bus_irq); + } else { + qemu_irq_lower(s->jazz_bus_irq); + } +} + +static void rc4030_irq_jazz_request(void *opaque, int irq, int level) +{ + rc4030State *s = opaque; + + if (level) { + s->isr_jazz |= 1 << irq; + } else { + s->isr_jazz &= ~(1 << irq); + } + + update_jazz_irq(s); +} + +static void rc4030_periodic_timer(void *opaque) +{ + rc4030State *s = opaque; + + set_next_tick(s); + qemu_irq_raise(s->timer_irq); +} + +static uint64_t jazzio_read(void *opaque, hwaddr addr, unsigned int size) +{ + rc4030State *s = opaque; + uint32_t val; + uint32_t irq; + addr &= 0xfff; + + switch (addr) { + /* Local bus int source */ + case 0x00: { + uint32_t pending = s->isr_jazz & s->imr_jazz; + val = 0; + irq = 0; + while (pending) { + if (pending & 1) { + val = (irq + 1) << 2; + break; + } + irq++; + pending >>= 1; + } + break; + } + /* Local bus int enable mask */ + case 0x02: + val = s->imr_jazz; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "rc4030/jazzio: invalid read at 0x%x", (int)addr); + val = 0; + break; + } + + trace_jazzio_read(addr, val); + + return val; +} + +static void jazzio_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + rc4030State *s = opaque; + uint32_t val = data; + addr &= 0xfff; + + trace_jazzio_write(addr, val); + + switch (addr) { + /* Local bus int enable mask */ + case 0x02: + s->imr_jazz = val; + update_jazz_irq(s); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "rc4030/jazzio: invalid write of 0x%02x at 0x%x", + val, (int)addr); + break; + } +} + +static const MemoryRegionOps jazzio_ops = { + .read = jazzio_read, + .write = jazzio_write, + .impl.min_access_size = 2, + .impl.max_access_size = 2, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static IOMMUTLBEntry rc4030_dma_translate(IOMMUMemoryRegion *iommu, hwaddr addr, + IOMMUAccessFlags flag, int iommu_idx) +{ + rc4030State *s = container_of(iommu, rc4030State, dma_mr); + IOMMUTLBEntry ret = { + .target_as = &address_space_memory, + .iova = addr & ~(DMA_PAGESIZE - 1), + .translated_addr = 0, + .addr_mask = DMA_PAGESIZE - 1, + .perm = IOMMU_NONE, + }; + uint64_t i, entry_address; + dma_pagetable_entry entry; + + i = addr / DMA_PAGESIZE; + if (i < s->dma_tl_limit / sizeof(entry)) { + entry_address = (s->dma_tl_base & 0x7fffffff) + i * sizeof(entry); + if (address_space_read(ret.target_as, entry_address, + MEMTXATTRS_UNSPECIFIED, &entry, sizeof(entry)) + == MEMTX_OK) { + ret.translated_addr = entry.frame & ~(DMA_PAGESIZE - 1); + ret.perm = IOMMU_RW; + } + } + + return ret; +} + +static void rc4030_reset(DeviceState *dev) +{ + rc4030State *s = RC4030(dev); + int i; + + s->config = 0x410; /* some boards seem to accept 0x104 too */ + s->revision = 1; + s->invalid_address_register = 0; + + memset(s->dma_regs, 0, sizeof(s->dma_regs)); + + s->remote_failed_address = s->memory_failed_address = 0; + s->cache_maint = 0; + s->cache_ptag = s->cache_ltag = 0; + s->cache_bmask = 0; + + s->memory_refresh_rate = 0x18186; + s->nvram_protect = 7; + for (i = 0; i < 15; i++) { + s->rem_speed[i] = 7; + } + s->imr_jazz = 0x10; /* XXX: required by firmware, but why? */ + s->isr_jazz = 0; + + s->itr = 0; + + qemu_irq_lower(s->timer_irq); + qemu_irq_lower(s->jazz_bus_irq); +} + +static int rc4030_post_load(void *opaque, int version_id) +{ + rc4030State *s = opaque; + + set_next_tick(s); + update_jazz_irq(s); + + return 0; +} + +static const VMStateDescription vmstate_rc4030 = { + .name = "rc4030", + .version_id = 3, + .post_load = rc4030_post_load, + .fields = (VMStateField []) { + VMSTATE_UINT32(config, rc4030State), + VMSTATE_UINT32(invalid_address_register, rc4030State), + VMSTATE_UINT32_2DARRAY(dma_regs, rc4030State, 8, 4), + VMSTATE_UINT32(dma_tl_base, rc4030State), + VMSTATE_UINT32(dma_tl_limit, rc4030State), + VMSTATE_UINT32(cache_maint, rc4030State), + VMSTATE_UINT32(remote_failed_address, rc4030State), + VMSTATE_UINT32(memory_failed_address, rc4030State), + VMSTATE_UINT32(cache_ptag, rc4030State), + VMSTATE_UINT32(cache_ltag, rc4030State), + VMSTATE_UINT32(cache_bmask, rc4030State), + VMSTATE_UINT32(memory_refresh_rate, rc4030State), + VMSTATE_UINT32(nvram_protect, rc4030State), + VMSTATE_UINT32_ARRAY(rem_speed, rc4030State, 16), + VMSTATE_UINT32(imr_jazz, rc4030State), + VMSTATE_UINT32(isr_jazz, rc4030State), + VMSTATE_UINT32(itr, rc4030State), + VMSTATE_END_OF_LIST() + } +}; + +static void rc4030_do_dma(void *opaque, int n, uint8_t *buf, + int len, bool is_write) +{ + rc4030State *s = opaque; + hwaddr dma_addr; + int dev_to_mem; + + s->dma_regs[n][DMA_REG_ENABLE] &= + ~(DMA_FLAG_TC_INTR | DMA_FLAG_MEM_INTR | DMA_FLAG_ADDR_INTR); + + /* Check DMA channel consistency */ + dev_to_mem = (s->dma_regs[n][DMA_REG_ENABLE] & DMA_FLAG_MEM_TO_DEV) ? 0 : 1; + if (!(s->dma_regs[n][DMA_REG_ENABLE] & DMA_FLAG_ENABLE) || + (is_write != dev_to_mem)) { + s->dma_regs[n][DMA_REG_ENABLE] |= DMA_FLAG_MEM_INTR; + s->nmi_interrupt |= 1 << n; + return; + } + + /* Get start address and len */ + if (len > s->dma_regs[n][DMA_REG_COUNT]) { + len = s->dma_regs[n][DMA_REG_COUNT]; + } + dma_addr = s->dma_regs[n][DMA_REG_ADDRESS]; + + /* Read/write data at right place */ + address_space_rw(&s->dma_as, dma_addr, MEMTXATTRS_UNSPECIFIED, + buf, len, is_write); + + s->dma_regs[n][DMA_REG_ENABLE] |= DMA_FLAG_TC_INTR; + s->dma_regs[n][DMA_REG_COUNT] -= len; +} + +struct rc4030DMAState { + void *opaque; + int n; +}; + +void rc4030_dma_read(void *dma, uint8_t *buf, int len) +{ + rc4030_dma s = dma; + rc4030_do_dma(s->opaque, s->n, buf, len, false); +} + +void rc4030_dma_write(void *dma, uint8_t *buf, int len) +{ + rc4030_dma s = dma; + rc4030_do_dma(s->opaque, s->n, buf, len, true); +} + +static rc4030_dma *rc4030_allocate_dmas(void *opaque, int n) +{ + rc4030_dma *s; + struct rc4030DMAState *p; + int i; + + s = (rc4030_dma *)g_new0(rc4030_dma, n); + p = (struct rc4030DMAState *)g_new0(struct rc4030DMAState, n); + for (i = 0; i < n; i++) { + p->opaque = opaque; + p->n = i; + s[i] = p; + p++; + } + return s; +} + +static void rc4030_initfn(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + rc4030State *s = RC4030(obj); + SysBusDevice *sysbus = SYS_BUS_DEVICE(obj); + + qdev_init_gpio_in(dev, rc4030_irq_jazz_request, 16); + + sysbus_init_irq(sysbus, &s->timer_irq); + sysbus_init_irq(sysbus, &s->jazz_bus_irq); + + sysbus_init_mmio(sysbus, &s->iomem_chipset); + sysbus_init_mmio(sysbus, &s->iomem_jazzio); +} + +static void rc4030_realize(DeviceState *dev, Error **errp) +{ + rc4030State *s = RC4030(dev); + Object *o = OBJECT(dev); + + s->periodic_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + rc4030_periodic_timer, s); + + memory_region_init_io(&s->iomem_chipset, o, &rc4030_ops, s, + "rc4030.chipset", 0x300); + memory_region_init_io(&s->iomem_jazzio, o, &jazzio_ops, s, + "rc4030.jazzio", 0x00001000); + + memory_region_init_iommu(&s->dma_mr, sizeof(s->dma_mr), + TYPE_RC4030_IOMMU_MEMORY_REGION, + o, "rc4030.dma", 4 * GiB); + address_space_init(&s->dma_as, MEMORY_REGION(&s->dma_mr), "rc4030-dma"); +} + +static void rc4030_unrealize(DeviceState *dev) +{ + rc4030State *s = RC4030(dev); + + timer_free(s->periodic_timer); + + address_space_destroy(&s->dma_as); + object_unparent(OBJECT(&s->dma_mr)); +} + +static void rc4030_class_init(ObjectClass *klass, void *class_data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = rc4030_realize; + dc->unrealize = rc4030_unrealize; + dc->reset = rc4030_reset; + dc->vmsd = &vmstate_rc4030; +} + +static const TypeInfo rc4030_info = { + .name = TYPE_RC4030, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(rc4030State), + .instance_init = rc4030_initfn, + .class_init = rc4030_class_init, +}; + +static void rc4030_iommu_memory_region_class_init(ObjectClass *klass, + void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = rc4030_dma_translate; +} + +static const TypeInfo rc4030_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_RC4030_IOMMU_MEMORY_REGION, + .class_init = rc4030_iommu_memory_region_class_init, +}; + +static void rc4030_register_types(void) +{ + type_register_static(&rc4030_info); + type_register_static(&rc4030_iommu_memory_region_info); +} + +type_init(rc4030_register_types) + +DeviceState *rc4030_init(rc4030_dma **dmas, IOMMUMemoryRegion **dma_mr) +{ + DeviceState *dev; + + dev = qdev_new(TYPE_RC4030); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + *dmas = rc4030_allocate_dmas(dev, 4); + *dma_mr = &RC4030(dev)->dma_mr; + return dev; +} diff --git a/hw/dma/sifive_pdma.c b/hw/dma/sifive_pdma.c new file mode 100644 index 000000000..85fe34f5f --- /dev/null +++ b/hw/dma/sifive_pdma.c @@ -0,0 +1,351 @@ +/* + * SiFive Platform DMA emulation + * + * Copyright (c) 2020 Wind River Systems, Inc. + * + * Author: + * Bin Meng + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "sysemu/dma.h" +#include "hw/dma/sifive_pdma.h" + +#define DMA_CONTROL 0x000 +#define CONTROL_CLAIM BIT(0) +#define CONTROL_RUN BIT(1) +#define CONTROL_DONE_IE BIT(14) +#define CONTROL_ERR_IE BIT(15) +#define CONTROL_DONE BIT(30) +#define CONTROL_ERR BIT(31) + +#define DMA_NEXT_CONFIG 0x004 +#define CONFIG_REPEAT BIT(2) +#define CONFIG_ORDER BIT(3) +#define CONFIG_WRSZ_SHIFT 24 +#define CONFIG_RDSZ_SHIFT 28 +#define CONFIG_SZ_MASK 0xf + +#define DMA_NEXT_BYTES 0x008 +#define DMA_NEXT_DST 0x010 +#define DMA_NEXT_SRC 0x018 +#define DMA_EXEC_CONFIG 0x104 +#define DMA_EXEC_BYTES 0x108 +#define DMA_EXEC_DST 0x110 +#define DMA_EXEC_SRC 0x118 + +/* + * FU540/FU740 docs are incorrect with NextConfig.wsize/rsize reset values. + * The reset values tested on Unleashed/Unmatched boards are 6 instead of 0. + */ +#define CONFIG_WRSZ_DEFAULT 6 +#define CONFIG_RDSZ_DEFAULT 6 + +enum dma_chan_state { + DMA_CHAN_STATE_IDLE, + DMA_CHAN_STATE_STARTED, + DMA_CHAN_STATE_ERROR, + DMA_CHAN_STATE_DONE +}; + +static void sifive_pdma_run(SiFivePDMAState *s, int ch) +{ + uint64_t bytes = s->chan[ch].next_bytes; + uint64_t dst = s->chan[ch].next_dst; + uint64_t src = s->chan[ch].next_src; + uint32_t config = s->chan[ch].next_config; + int wsize, rsize, size, remainder; + uint8_t buf[64]; + int n; + + /* do nothing if bytes to transfer is zero */ + if (!bytes) { + goto done; + } + + /* + * The manual does not describe how the hardware behaviors when + * config.wsize and config.rsize are given different values. + * A common case is memory to memory DMA, and in this case they + * are normally the same. Abort if this expectation fails. + */ + wsize = (config >> CONFIG_WRSZ_SHIFT) & CONFIG_SZ_MASK; + rsize = (config >> CONFIG_RDSZ_SHIFT) & CONFIG_SZ_MASK; + if (wsize != rsize) { + goto error; + } + + /* + * Calculate the transaction size + * + * size field is base 2 logarithm of DMA transaction size, + * but there is an upper limit of 64 bytes per transaction. + */ + size = wsize; + if (size > 6) { + size = 6; + } + size = 1 << size; + remainder = bytes % size; + + /* indicate a DMA transfer is started */ + s->chan[ch].state = DMA_CHAN_STATE_STARTED; + s->chan[ch].control &= ~CONTROL_DONE; + s->chan[ch].control &= ~CONTROL_ERR; + + /* load the next_ registers into their exec_ counterparts */ + s->chan[ch].exec_config = config; + s->chan[ch].exec_bytes = bytes; + s->chan[ch].exec_dst = dst; + s->chan[ch].exec_src = src; + + for (n = 0; n < bytes / size; n++) { + cpu_physical_memory_read(s->chan[ch].exec_src, buf, size); + cpu_physical_memory_write(s->chan[ch].exec_dst, buf, size); + s->chan[ch].exec_src += size; + s->chan[ch].exec_dst += size; + s->chan[ch].exec_bytes -= size; + } + + if (remainder) { + cpu_physical_memory_read(s->chan[ch].exec_src, buf, remainder); + cpu_physical_memory_write(s->chan[ch].exec_dst, buf, remainder); + s->chan[ch].exec_src += remainder; + s->chan[ch].exec_dst += remainder; + s->chan[ch].exec_bytes -= remainder; + } + + /* reload exec_ registers if repeat is required */ + if (s->chan[ch].next_config & CONFIG_REPEAT) { + s->chan[ch].exec_bytes = bytes; + s->chan[ch].exec_dst = dst; + s->chan[ch].exec_src = src; + } + +done: + /* indicate a DMA transfer is done */ + s->chan[ch].state = DMA_CHAN_STATE_DONE; + s->chan[ch].control &= ~CONTROL_RUN; + s->chan[ch].control |= CONTROL_DONE; + return; + +error: + s->chan[ch].state = DMA_CHAN_STATE_ERROR; + s->chan[ch].control |= CONTROL_ERR; + return; +} + +static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch) +{ + bool done_ie, err_ie; + + done_ie = !!(s->chan[ch].control & CONTROL_DONE_IE); + err_ie = !!(s->chan[ch].control & CONTROL_ERR_IE); + + if (done_ie && (s->chan[ch].control & CONTROL_DONE)) { + qemu_irq_raise(s->irq[ch * 2]); + } else { + qemu_irq_lower(s->irq[ch * 2]); + } + + if (err_ie && (s->chan[ch].control & CONTROL_ERR)) { + qemu_irq_raise(s->irq[ch * 2 + 1]); + } else { + qemu_irq_lower(s->irq[ch * 2 + 1]); + } + + s->chan[ch].state = DMA_CHAN_STATE_IDLE; +} + +static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size) +{ + SiFivePDMAState *s = opaque; + int ch = SIFIVE_PDMA_CHAN_NO(offset); + uint64_t val = 0; + + if (ch >= SIFIVE_PDMA_CHANS) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n", + __func__, ch); + return 0; + } + + offset &= 0xfff; + switch (offset) { + case DMA_CONTROL: + val = s->chan[ch].control; + break; + case DMA_NEXT_CONFIG: + val = s->chan[ch].next_config; + break; + case DMA_NEXT_BYTES: + val = s->chan[ch].next_bytes; + break; + case DMA_NEXT_DST: + val = s->chan[ch].next_dst; + break; + case DMA_NEXT_SRC: + val = s->chan[ch].next_src; + break; + case DMA_EXEC_CONFIG: + val = s->chan[ch].exec_config; + break; + case DMA_EXEC_BYTES: + val = s->chan[ch].exec_bytes; + break; + case DMA_EXEC_DST: + val = s->chan[ch].exec_dst; + break; + case DMA_EXEC_SRC: + val = s->chan[ch].exec_src; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + break; + } + + return val; +} + +static void sifive_pdma_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + SiFivePDMAState *s = opaque; + int ch = SIFIVE_PDMA_CHAN_NO(offset); + bool claimed, run; + + if (ch >= SIFIVE_PDMA_CHANS) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n", + __func__, ch); + return; + } + + offset &= 0xfff; + switch (offset) { + case DMA_CONTROL: + claimed = !!(s->chan[ch].control & CONTROL_CLAIM); + run = !!(s->chan[ch].control & CONTROL_RUN); + + if (!claimed && (value & CONTROL_CLAIM)) { + /* reset Next* registers */ + s->chan[ch].next_config = (CONFIG_RDSZ_DEFAULT << CONFIG_RDSZ_SHIFT) | + (CONFIG_WRSZ_DEFAULT << CONFIG_WRSZ_SHIFT); + s->chan[ch].next_bytes = 0; + s->chan[ch].next_dst = 0; + s->chan[ch].next_src = 0; + } + + /* claim bit can only be cleared when run is low */ + if (run && !(value & CONTROL_CLAIM)) { + value |= CONTROL_CLAIM; + } + + s->chan[ch].control = value; + + /* + * If channel was not claimed before run bit is set, + * or if the channel is disclaimed when run was low, + * DMA won't run. + */ + if (!claimed || (!run && !(value & CONTROL_CLAIM))) { + s->chan[ch].control &= ~CONTROL_RUN; + return; + } + + if (value & CONTROL_RUN) { + sifive_pdma_run(s, ch); + } + + sifive_pdma_update_irq(s, ch); + break; + case DMA_NEXT_CONFIG: + s->chan[ch].next_config = value; + break; + case DMA_NEXT_BYTES: + s->chan[ch].next_bytes = value; + break; + case DMA_NEXT_DST: + s->chan[ch].next_dst = value; + break; + case DMA_NEXT_SRC: + s->chan[ch].next_src = value; + break; + case DMA_EXEC_CONFIG: + case DMA_EXEC_BYTES: + case DMA_EXEC_DST: + case DMA_EXEC_SRC: + /* these are read-only registers */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + break; + } +} + +static const MemoryRegionOps sifive_pdma_ops = { + .read = sifive_pdma_read, + .write = sifive_pdma_write, + .endianness = DEVICE_LITTLE_ENDIAN, + /* there are 32-bit and 64-bit wide registers */ + .impl = { + .min_access_size = 4, + .max_access_size = 8, + } +}; + +static void sifive_pdma_realize(DeviceState *dev, Error **errp) +{ + SiFivePDMAState *s = SIFIVE_PDMA(dev); + int i; + + memory_region_init_io(&s->iomem, OBJECT(dev), &sifive_pdma_ops, s, + TYPE_SIFIVE_PDMA, SIFIVE_PDMA_REG_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + + for (i = 0; i < SIFIVE_PDMA_IRQS; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); + } +} + +static void sifive_pdma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "SiFive Platform DMA controller"; + dc->realize = sifive_pdma_realize; +} + +static const TypeInfo sifive_pdma_info = { + .name = TYPE_SIFIVE_PDMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SiFivePDMAState), + .class_init = sifive_pdma_class_init, +}; + +static void sifive_pdma_register_types(void) +{ + type_register_static(&sifive_pdma_info); +} + +type_init(sifive_pdma_register_types) diff --git a/hw/dma/soc_dma.c b/hw/dma/soc_dma.c new file mode 100644 index 000000000..3a430057f --- /dev/null +++ b/hw/dma/soc_dma.c @@ -0,0 +1,361 @@ +/* + * On-chip DMA controller framework. + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "hw/arm/soc_dma.h" + +static void transfer_mem2mem(struct soc_dma_ch_s *ch) +{ + memcpy(ch->paddr[0], ch->paddr[1], ch->bytes); + ch->paddr[0] += ch->bytes; + ch->paddr[1] += ch->bytes; +} + +static void transfer_mem2fifo(struct soc_dma_ch_s *ch) +{ + ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes); + ch->paddr[0] += ch->bytes; +} + +static void transfer_fifo2mem(struct soc_dma_ch_s *ch) +{ + ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes); + ch->paddr[1] += ch->bytes; +} + +/* This is further optimisable but isn't very important because often + * DMA peripherals forbid this kind of transfers and even when they don't, + * oprating systems may not need to use them. */ +static void *fifo_buf; +static int fifo_size; +static void transfer_fifo2fifo(struct soc_dma_ch_s *ch) +{ + if (ch->bytes > fifo_size) + fifo_buf = g_realloc(fifo_buf, fifo_size = ch->bytes); + + /* Implement as transfer_fifo2linear + transfer_linear2fifo. */ + ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes); + ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes); +} + +struct dma_s { + struct soc_dma_s soc; + int chnum; + uint64_t ch_enable_mask; + int64_t channel_freq; + int enabled_count; + + struct memmap_entry_s { + enum soc_dma_port_type type; + hwaddr addr; + union { + struct { + void *opaque; + soc_dma_io_t fn; + int out; + } fifo; + struct { + void *base; + size_t size; + } mem; + } u; + } *memmap; + int memmap_size; + + struct soc_dma_ch_s ch[]; +}; + +static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes) +{ + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + struct dma_s *dma = (struct dma_s *) ch->dma; + + timer_mod(ch->timer, now + delay_bytes / dma->channel_freq); +} + +static void soc_dma_ch_run(void *opaque) +{ + struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque; + + ch->running = 1; + ch->dma->setup_fn(ch); + ch->transfer_fn(ch); + ch->running = 0; + + if (ch->enable) + soc_dma_ch_schedule(ch, ch->bytes); + ch->bytes = 0; +} + +static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma, + hwaddr addr) +{ + struct memmap_entry_s *lo; + int hi; + + lo = dma->memmap; + hi = dma->memmap_size; + + while (hi > 1) { + hi /= 2; + if (lo[hi].addr <= addr) + lo += hi; + } + + return lo; +} + +static inline enum soc_dma_port_type soc_dma_ch_update_type( + struct soc_dma_ch_s *ch, int port) +{ + struct dma_s *dma = (struct dma_s *) ch->dma; + struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]); + + if (entry->type == soc_dma_port_fifo) { + while (entry < dma->memmap + dma->memmap_size && + entry->u.fifo.out != port) + entry ++; + if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port) + return soc_dma_port_other; + + if (ch->type[port] != soc_dma_access_const) + return soc_dma_port_other; + + ch->io_fn[port] = entry->u.fifo.fn; + ch->io_opaque[port] = entry->u.fifo.opaque; + return soc_dma_port_fifo; + } else if (entry->type == soc_dma_port_mem) { + if (entry->addr > ch->vaddr[port] || + entry->addr + entry->u.mem.size <= ch->vaddr[port]) + return soc_dma_port_other; + + /* TODO: support constant memory address for source port as used for + * drawing solid rectangles by PalmOS(R). */ + if (ch->type[port] != soc_dma_access_const) + return soc_dma_port_other; + + ch->paddr[port] = (uint8_t *) entry->u.mem.base + + (ch->vaddr[port] - entry->addr); + /* TODO: save bytes left to the end of the mapping somewhere so we + * can check we're not reading beyond it. */ + return soc_dma_port_mem; + } else + return soc_dma_port_other; +} + +void soc_dma_ch_update(struct soc_dma_ch_s *ch) +{ + enum soc_dma_port_type src, dst; + + src = soc_dma_ch_update_type(ch, 0); + if (src == soc_dma_port_other) { + ch->update = 0; + ch->transfer_fn = ch->dma->transfer_fn; + return; + } + dst = soc_dma_ch_update_type(ch, 1); + + /* TODO: use src and dst as array indices. */ + if (src == soc_dma_port_mem && dst == soc_dma_port_mem) + ch->transfer_fn = transfer_mem2mem; + else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo) + ch->transfer_fn = transfer_mem2fifo; + else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem) + ch->transfer_fn = transfer_fifo2mem; + else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo) + ch->transfer_fn = transfer_fifo2fifo; + else + ch->transfer_fn = ch->dma->transfer_fn; + + ch->update = (dst != soc_dma_port_other); +} + +static void soc_dma_ch_freq_update(struct dma_s *s) +{ + if (s->enabled_count) + /* We completely ignore channel priorities and stuff */ + s->channel_freq = s->soc.freq / s->enabled_count; + else { + /* TODO: Signal that we want to disable the functional clock and let + * the platform code decide what to do with it, i.e. check that + * auto-idle is enabled in the clock controller and if we are stopping + * the clock, do the same with any parent clocks that had only one + * user keeping them on and auto-idle enabled. */ + } +} + +void soc_dma_set_request(struct soc_dma_ch_s *ch, int level) +{ + struct dma_s *dma = (struct dma_s *) ch->dma; + + dma->enabled_count += level - ch->enable; + + if (level) + dma->ch_enable_mask |= 1 << ch->num; + else + dma->ch_enable_mask &= ~(1 << ch->num); + + if (level != ch->enable) { + soc_dma_ch_freq_update(dma); + ch->enable = level; + + if (!ch->enable) + timer_del(ch->timer); + else if (!ch->running) + soc_dma_ch_run(ch); + else + soc_dma_ch_schedule(ch, 1); + } +} + +void soc_dma_reset(struct soc_dma_s *soc) +{ + struct dma_s *s = (struct dma_s *) soc; + + s->soc.drqbmp = 0; + s->ch_enable_mask = 0; + s->enabled_count = 0; + soc_dma_ch_freq_update(s); +} + +/* TODO: take a functional-clock argument */ +struct soc_dma_s *soc_dma_init(int n) +{ + int i; + struct dma_s *s = g_malloc0(sizeof(*s) + n * sizeof(*s->ch)); + + s->chnum = n; + s->soc.ch = s->ch; + for (i = 0; i < n; i ++) { + s->ch[i].dma = &s->soc; + s->ch[i].num = i; + s->ch[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, soc_dma_ch_run, &s->ch[i]); + } + + soc_dma_reset(&s->soc); + fifo_size = 0; + + return &s->soc; +} + +void soc_dma_port_add_fifo(struct soc_dma_s *soc, hwaddr virt_base, + soc_dma_io_t fn, void *opaque, int out) +{ + struct memmap_entry_s *entry; + struct dma_s *dma = (struct dma_s *) soc; + + dma->memmap = g_realloc(dma->memmap, sizeof(*entry) * + (dma->memmap_size + 1)); + entry = soc_dma_lookup(dma, virt_base); + + if (dma->memmap_size) { + if (entry->type == soc_dma_port_mem) { + if (entry->addr <= virt_base && + entry->addr + entry->u.mem.size > virt_base) { + error_report("%s: FIFO at %"PRIx64 + " collides with RAM region at %"PRIx64 + "-%"PRIx64, __func__, + virt_base, entry->addr, + (entry->addr + entry->u.mem.size)); + exit(-1); + } + + if (entry->addr <= virt_base) + entry ++; + } else + while (entry < dma->memmap + dma->memmap_size && + entry->addr <= virt_base) { + if (entry->addr == virt_base && entry->u.fifo.out == out) { + error_report("%s: FIFO at %"PRIx64 + " collides FIFO at %"PRIx64, + __func__, virt_base, entry->addr); + exit(-1); + } + + entry ++; + } + + memmove(entry + 1, entry, + (uint8_t *) (dma->memmap + dma->memmap_size ++) - + (uint8_t *) entry); + } else + dma->memmap_size ++; + + entry->addr = virt_base; + entry->type = soc_dma_port_fifo; + entry->u.fifo.fn = fn; + entry->u.fifo.opaque = opaque; + entry->u.fifo.out = out; +} + +void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base, + hwaddr virt_base, size_t size) +{ + struct memmap_entry_s *entry; + struct dma_s *dma = (struct dma_s *) soc; + + dma->memmap = g_realloc(dma->memmap, sizeof(*entry) * + (dma->memmap_size + 1)); + entry = soc_dma_lookup(dma, virt_base); + + if (dma->memmap_size) { + if (entry->type == soc_dma_port_mem) { + if ((entry->addr >= virt_base && entry->addr < virt_base + size) || + (entry->addr <= virt_base && + entry->addr + entry->u.mem.size > virt_base)) { + error_report("%s: RAM at %"PRIx64 "-%"PRIx64 + " collides with RAM region at %"PRIx64 + "-%"PRIx64, __func__, + virt_base, virt_base + size, + entry->addr, entry->addr + entry->u.mem.size); + exit(-1); + } + + if (entry->addr <= virt_base) + entry ++; + } else { + if (entry->addr >= virt_base && + entry->addr < virt_base + size) { + error_report("%s: RAM at %"PRIx64 "-%"PRIx64 + " collides with FIFO at %"PRIx64, + __func__, virt_base, virt_base + size, + entry->addr); + exit(-1); + } + + while (entry < dma->memmap + dma->memmap_size && + entry->addr <= virt_base) + entry ++; + } + + memmove(entry + 1, entry, + (uint8_t *) (dma->memmap + dma->memmap_size ++) - + (uint8_t *) entry); + } else + dma->memmap_size ++; + + entry->addr = virt_base; + entry->type = soc_dma_port_mem; + entry->u.mem.base = phys_base; + entry->u.mem.size = size; +} + +/* TODO: port removal for ports like PCMCIA memory */ diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c new file mode 100644 index 000000000..03bc50087 --- /dev/null +++ b/hw/dma/sparc32_dma.c @@ -0,0 +1,449 @@ +/* + * QEMU Sparc32 DMA controller emulation + * + * Copyright (c) 2006 Fabrice Bellard + * + * Modifications: + * 2010-Feb-14 Artyom Tarasenko : reworked irq generation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sparc/sparc32_dma.h" +#include "hw/sparc/sun4m_iommu.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "sysemu/dma.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "trace.h" + +/* + * This is the DMA controller part of chip STP2000 (Master I/O), also + * produced as NCR89C100. See + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt + * and + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/DMA2.txt + */ + +#define DMA_SIZE (4 * sizeof(uint32_t)) +/* We need the mask, because one instance of the device is not page + aligned (ledma, start address 0x0010) */ +#define DMA_MASK (DMA_SIZE - 1) +/* OBP says 0x20 bytes for ledma, the extras are aliased to espdma */ +#define DMA_ETH_SIZE (8 * sizeof(uint32_t)) +#define DMA_MAX_REG_OFFSET (2 * DMA_SIZE - 1) + +#define DMA_VER 0xa0000000 +#define DMA_INTR 1 +#define DMA_INTREN 0x10 +#define DMA_WRITE_MEM 0x100 +#define DMA_EN 0x200 +#define DMA_LOADED 0x04000000 +#define DMA_DRAIN_FIFO 0x40 +#define DMA_RESET 0x80 + +/* XXX SCSI and ethernet should have different read-only bit masks */ +#define DMA_CSR_RO_MASK 0xfe000007 + +enum { + GPIO_RESET = 0, + GPIO_DMA, +}; + +/* Note: on sparc, the lance 16 bit bus is swapped */ +void ledma_memory_read(void *opaque, hwaddr addr, + uint8_t *buf, int len, int do_bswap) +{ + DMADeviceState *s = opaque; + IOMMUState *is = (IOMMUState *)s->iommu; + int i; + + addr |= s->dmaregs[3]; + trace_ledma_memory_read(addr, len); + if (do_bswap) { + dma_memory_read(&is->iommu_as, addr, buf, len); + } else { + addr &= ~1; + len &= ~1; + dma_memory_read(&is->iommu_as, addr, buf, len); + for(i = 0; i < len; i += 2) { + bswap16s((uint16_t *)(buf + i)); + } + } +} + +void ledma_memory_write(void *opaque, hwaddr addr, + uint8_t *buf, int len, int do_bswap) +{ + DMADeviceState *s = opaque; + IOMMUState *is = (IOMMUState *)s->iommu; + int l, i; + uint16_t tmp_buf[32]; + + addr |= s->dmaregs[3]; + trace_ledma_memory_write(addr, len); + if (do_bswap) { + dma_memory_write(&is->iommu_as, addr, buf, len); + } else { + addr &= ~1; + len &= ~1; + while (len > 0) { + l = len; + if (l > sizeof(tmp_buf)) + l = sizeof(tmp_buf); + for(i = 0; i < l; i += 2) { + tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i)); + } + dma_memory_write(&is->iommu_as, addr, tmp_buf, l); + len -= l; + buf += l; + addr += l; + } + } +} + +static void dma_set_irq(void *opaque, int irq, int level) +{ + DMADeviceState *s = opaque; + if (level) { + s->dmaregs[0] |= DMA_INTR; + if (s->dmaregs[0] & DMA_INTREN) { + trace_sparc32_dma_set_irq_raise(); + qemu_irq_raise(s->irq); + } + } else { + if (s->dmaregs[0] & DMA_INTR) { + s->dmaregs[0] &= ~DMA_INTR; + if (s->dmaregs[0] & DMA_INTREN) { + trace_sparc32_dma_set_irq_lower(); + qemu_irq_lower(s->irq); + } + } + } +} + +void espdma_memory_read(void *opaque, uint8_t *buf, int len) +{ + DMADeviceState *s = opaque; + IOMMUState *is = (IOMMUState *)s->iommu; + + trace_espdma_memory_read(s->dmaregs[1], len); + dma_memory_read(&is->iommu_as, s->dmaregs[1], buf, len); + s->dmaregs[1] += len; +} + +void espdma_memory_write(void *opaque, uint8_t *buf, int len) +{ + DMADeviceState *s = opaque; + IOMMUState *is = (IOMMUState *)s->iommu; + + trace_espdma_memory_write(s->dmaregs[1], len); + dma_memory_write(&is->iommu_as, s->dmaregs[1], buf, len); + s->dmaregs[1] += len; +} + +static uint64_t dma_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + DMADeviceState *s = opaque; + uint32_t saddr; + + saddr = (addr & DMA_MASK) >> 2; + trace_sparc32_dma_mem_readl(addr, s->dmaregs[saddr]); + return s->dmaregs[saddr]; +} + +static void dma_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + DMADeviceState *s = opaque; + uint32_t saddr; + + saddr = (addr & DMA_MASK) >> 2; + trace_sparc32_dma_mem_writel(addr, s->dmaregs[saddr], val); + switch (saddr) { + case 0: + if (val & DMA_INTREN) { + if (s->dmaregs[0] & DMA_INTR) { + trace_sparc32_dma_set_irq_raise(); + qemu_irq_raise(s->irq); + } + } else { + if (s->dmaregs[0] & (DMA_INTR | DMA_INTREN)) { + trace_sparc32_dma_set_irq_lower(); + qemu_irq_lower(s->irq); + } + } + if (val & DMA_RESET) { + qemu_irq_raise(s->gpio[GPIO_RESET]); + qemu_irq_lower(s->gpio[GPIO_RESET]); + } else if (val & DMA_DRAIN_FIFO) { + val &= ~DMA_DRAIN_FIFO; + } else if (val == 0) + val = DMA_DRAIN_FIFO; + + if (val & DMA_EN && !(s->dmaregs[0] & DMA_EN)) { + trace_sparc32_dma_enable_raise(); + qemu_irq_raise(s->gpio[GPIO_DMA]); + } else if (!(val & DMA_EN) && !!(s->dmaregs[0] & DMA_EN)) { + trace_sparc32_dma_enable_lower(); + qemu_irq_lower(s->gpio[GPIO_DMA]); + } + + val &= ~DMA_CSR_RO_MASK; + val |= DMA_VER; + s->dmaregs[0] = (s->dmaregs[0] & DMA_CSR_RO_MASK) | val; + break; + case 1: + s->dmaregs[0] |= DMA_LOADED; + /* fall through */ + default: + s->dmaregs[saddr] = val; + break; + } +} + +static const MemoryRegionOps dma_mem_ops = { + .read = dma_mem_read, + .write = dma_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sparc32_dma_device_reset(DeviceState *d) +{ + DMADeviceState *s = SPARC32_DMA_DEVICE(d); + + memset(s->dmaregs, 0, DMA_SIZE); + s->dmaregs[0] = DMA_VER; +} + +static const VMStateDescription vmstate_sparc32_dma_device = { + .name ="sparc32_dma", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(dmaregs, DMADeviceState, DMA_REGS), + VMSTATE_END_OF_LIST() + } +}; + +static void sparc32_dma_device_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + DMADeviceState *s = SPARC32_DMA_DEVICE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &s->irq); + + sysbus_init_mmio(sbd, &s->iomem); + + object_property_add_link(OBJECT(dev), "iommu", TYPE_SUN4M_IOMMU, + (Object **) &s->iommu, + qdev_prop_allow_set_link_before_realize, + 0); + + qdev_init_gpio_in(dev, dma_set_irq, 1); + qdev_init_gpio_out(dev, s->gpio, 2); +} + +static void sparc32_dma_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = sparc32_dma_device_reset; + dc->vmsd = &vmstate_sparc32_dma_device; +} + +static const TypeInfo sparc32_dma_device_info = { + .name = TYPE_SPARC32_DMA_DEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .abstract = true, + .instance_size = sizeof(DMADeviceState), + .instance_init = sparc32_dma_device_init, + .class_init = sparc32_dma_device_class_init, +}; + +static void sparc32_espdma_device_init(Object *obj) +{ + DMADeviceState *s = SPARC32_DMA_DEVICE(obj); + ESPDMADeviceState *es = SPARC32_ESPDMA_DEVICE(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &dma_mem_ops, s, + "espdma-mmio", DMA_SIZE); + + object_initialize_child(obj, "esp", &es->esp, TYPE_SYSBUS_ESP); +} + +static void sparc32_espdma_device_realize(DeviceState *dev, Error **errp) +{ + ESPDMADeviceState *es = SPARC32_ESPDMA_DEVICE(dev); + SysBusESPState *sysbus = SYSBUS_ESP(&es->esp); + ESPState *esp = &sysbus->esp; + + esp->dma_memory_read = espdma_memory_read; + esp->dma_memory_write = espdma_memory_write; + esp->dma_opaque = SPARC32_DMA_DEVICE(dev); + sysbus->it_shift = 2; + esp->dma_enabled = 1; + sysbus_realize(SYS_BUS_DEVICE(sysbus), &error_fatal); +} + +static void sparc32_espdma_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sparc32_espdma_device_realize; +} + +static const TypeInfo sparc32_espdma_device_info = { + .name = TYPE_SPARC32_ESPDMA_DEVICE, + .parent = TYPE_SPARC32_DMA_DEVICE, + .instance_size = sizeof(ESPDMADeviceState), + .instance_init = sparc32_espdma_device_init, + .class_init = sparc32_espdma_device_class_init, +}; + +static void sparc32_ledma_device_init(Object *obj) +{ + DMADeviceState *s = SPARC32_DMA_DEVICE(obj); + LEDMADeviceState *ls = SPARC32_LEDMA_DEVICE(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &dma_mem_ops, s, + "ledma-mmio", DMA_SIZE); + + object_initialize_child(obj, "lance", &ls->lance, TYPE_LANCE); +} + +static void sparc32_ledma_device_realize(DeviceState *dev, Error **errp) +{ + LEDMADeviceState *s = SPARC32_LEDMA_DEVICE(dev); + SysBusPCNetState *lance = SYSBUS_PCNET(&s->lance); + + object_property_set_link(OBJECT(lance), "dma", OBJECT(dev), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(lance), &error_fatal); +} + +static void sparc32_ledma_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sparc32_ledma_device_realize; +} + +static const TypeInfo sparc32_ledma_device_info = { + .name = TYPE_SPARC32_LEDMA_DEVICE, + .parent = TYPE_SPARC32_DMA_DEVICE, + .instance_size = sizeof(LEDMADeviceState), + .instance_init = sparc32_ledma_device_init, + .class_init = sparc32_ledma_device_class_init, +}; + +static void sparc32_dma_realize(DeviceState *dev, Error **errp) +{ + SPARC32DMAState *s = SPARC32_DMA(dev); + DeviceState *espdma, *esp, *ledma, *lance; + SysBusDevice *sbd; + Object *iommu; + + iommu = object_resolve_path_type("", TYPE_SUN4M_IOMMU, NULL); + if (!iommu) { + error_setg(errp, "unable to locate sun4m IOMMU device"); + return; + } + + espdma = DEVICE(&s->espdma); + object_property_set_link(OBJECT(espdma), "iommu", iommu, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(espdma), &error_fatal); + + esp = DEVICE(object_resolve_path_component(OBJECT(espdma), "esp")); + sbd = SYS_BUS_DEVICE(esp); + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(espdma, 0)); + qdev_connect_gpio_out(espdma, 0, qdev_get_gpio_in(esp, 0)); + qdev_connect_gpio_out(espdma, 1, qdev_get_gpio_in(esp, 1)); + + sbd = SYS_BUS_DEVICE(espdma); + memory_region_add_subregion(&s->dmamem, 0x0, + sysbus_mmio_get_region(sbd, 0)); + + ledma = DEVICE(&s->ledma); + object_property_set_link(OBJECT(ledma), "iommu", iommu, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(ledma), &error_fatal); + + lance = DEVICE(object_resolve_path_component(OBJECT(ledma), "lance")); + sbd = SYS_BUS_DEVICE(lance); + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(ledma, 0)); + qdev_connect_gpio_out(ledma, 0, qdev_get_gpio_in(lance, 0)); + + sbd = SYS_BUS_DEVICE(ledma); + memory_region_add_subregion(&s->dmamem, 0x10, + sysbus_mmio_get_region(sbd, 0)); + + /* Add ledma alias to handle SunOS 5.7 - Solaris 9 invalid access bug */ + memory_region_init_alias(&s->ledma_alias, OBJECT(dev), "ledma-alias", + sysbus_mmio_get_region(sbd, 0), 0x4, 0x4); + memory_region_add_subregion(&s->dmamem, 0x20, &s->ledma_alias); +} + +static void sparc32_dma_init(Object *obj) +{ + SPARC32DMAState *s = SPARC32_DMA(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init(&s->dmamem, OBJECT(s), "dma", DMA_SIZE + DMA_ETH_SIZE); + sysbus_init_mmio(sbd, &s->dmamem); + + object_initialize_child(obj, "espdma", &s->espdma, + TYPE_SPARC32_ESPDMA_DEVICE); + object_initialize_child(obj, "ledma", &s->ledma, + TYPE_SPARC32_LEDMA_DEVICE); +} + +static void sparc32_dma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sparc32_dma_realize; +} + +static const TypeInfo sparc32_dma_info = { + .name = TYPE_SPARC32_DMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SPARC32DMAState), + .instance_init = sparc32_dma_init, + .class_init = sparc32_dma_class_init, +}; + + +static void sparc32_dma_register_types(void) +{ + type_register_static(&sparc32_dma_device_info); + type_register_static(&sparc32_espdma_device_info); + type_register_static(&sparc32_ledma_device_info); + type_register_static(&sparc32_dma_info); +} + +type_init(sparc32_dma_register_types) diff --git a/hw/dma/trace-events b/hw/dma/trace-events new file mode 100644 index 000000000..3c47df54e --- /dev/null +++ b/hw/dma/trace-events @@ -0,0 +1,46 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# rc4030.c +jazzio_read(uint64_t addr, uint32_t ret) "read reg[0x%"PRIx64"] = 0x%x" +jazzio_write(uint64_t addr, uint32_t val) "write reg[0x%"PRIx64"] = 0x%x" +rc4030_read(uint64_t addr, uint32_t ret) "read reg[0x%"PRIx64"] = 0x%x" +rc4030_write(uint64_t addr, uint32_t val) "write reg[0x%"PRIx64"] = 0x%x" + +# sparc32_dma.c +ledma_memory_read(uint64_t addr, int len) "DMA read addr 0x%"PRIx64 " len %d" +ledma_memory_write(uint64_t addr, int len) "DMA write addr 0x%"PRIx64 " len %d" +sparc32_dma_set_irq_raise(void) "Raise IRQ" +sparc32_dma_set_irq_lower(void) "Lower IRQ" +espdma_memory_read(uint32_t addr, int len) "DMA read addr 0x%08x len %d" +espdma_memory_write(uint32_t addr, int len) "DMA write addr 0x%08x len %d" +sparc32_dma_mem_readl(uint64_t addr, uint32_t ret) "read dmareg 0x%"PRIx64": 0x%08x" +sparc32_dma_mem_writel(uint64_t addr, uint32_t old, uint32_t val) "write dmareg 0x%"PRIx64": 0x%08x -> 0x%08x" +sparc32_dma_enable_raise(void) "Raise DMA enable" +sparc32_dma_enable_lower(void) "Lower DMA enable" + +# i8257.c +i8257_unregistered_dma(int nchan, int dma_pos, int dma_len) "unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d" + +# pl330.c +pl330_fault(void *ptr, uint32_t flags) "ch: %p, flags: 0x%"PRIx32 +pl330_fault_abort(void) "abort interrupt raised" +pl330_dmaend(void) "DMA ending" +pl330_dmago(void) "DMA run" +pl330_dmald(uint8_t chan, uint32_t addr, uint32_t size, uint32_t num, char ch) "channel:%"PRId8" address:0x%08"PRIx32" size:0x%"PRIx32" num:%"PRId32"%c" +pl330_dmakill(void) "abort interrupt lowered" +pl330_dmalpend(uint8_t nf, uint8_t bs, uint8_t lc, uint8_t ch, uint8_t flag) "nf=0x%02x bs=0x%02x lc=0x%02x ch=0x%02x flag=0x%02x" +pl330_dmalpiter(void) "loop reiteration" +pl330_dmalpfallthrough(void) "loop fallthrough" +pl330_dmasev_evirq(uint8_t ev_id) "event interrupt raised %"PRId8 +pl330_dmasev_event(uint8_t ev_id) "event raised %"PRId8 +pl330_dmast(uint8_t chan, uint32_t addr, uint32_t sz, uint32_t num, char ch) "channel:%"PRId8" address:0x%08"PRIx32" size:0x%"PRIx32" num:%"PRId32" %c" +pl330_dmawfe(uint8_t ev_id) "event lowered 0x%"PRIx8 +pl330_chan_exec_undef(void) "undefined instruction" +pl330_exec_cycle(uint32_t addr, uint32_t size) "PL330 read from memory @0x%08"PRIx32" (size = 0x%08"PRIx32")" +pl330_hexdump(uint32_t offset, char *str) " 0x%04"PRIx32":%s" +pl330_exec(void) "pl330_exec" +pl330_debug_exec(uint8_t ch) "chan id: 0x%"PRIx8 +pl330_debug_exec_stall(void) "stall of debug instruction not implemented" +pl330_iomem_write(uint32_t offset, uint32_t value) "addr: 0x%08"PRIx32" data: 0x%08"PRIx32 +pl330_iomem_write_clr(int i) "event interrupt lowered %d" +pl330_iomem_read(uint32_t addr, uint32_t data) "addr: 0x%08"PRIx32" data: 0x%08"PRIx32 diff --git a/hw/dma/trace.h b/hw/dma/trace.h new file mode 100644 index 000000000..4bcb28b47 --- /dev/null +++ b/hw/dma/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_dma.h" diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c new file mode 100644 index 000000000..bc383f53c --- /dev/null +++ b/hw/dma/xilinx_axidma.c @@ -0,0 +1,662 @@ +/* + * QEMU model of Xilinx AXI-DMA block. + * + * Copyright (c) 2011 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#include "sysemu/dma.h" +#include "hw/stream.h" +#include "qom/object.h" + +#define D(x) + +#define TYPE_XILINX_AXI_DMA "xlnx.axi-dma" +#define TYPE_XILINX_AXI_DMA_DATA_STREAM "xilinx-axi-dma-data-stream" +#define TYPE_XILINX_AXI_DMA_CONTROL_STREAM "xilinx-axi-dma-control-stream" + +OBJECT_DECLARE_SIMPLE_TYPE(XilinxAXIDMA, XILINX_AXI_DMA) + +typedef struct XilinxAXIDMAStreamSink XilinxAXIDMAStreamSink; +DECLARE_INSTANCE_CHECKER(XilinxAXIDMAStreamSink, XILINX_AXI_DMA_DATA_STREAM, + TYPE_XILINX_AXI_DMA_DATA_STREAM) + +DECLARE_INSTANCE_CHECKER(XilinxAXIDMAStreamSink, XILINX_AXI_DMA_CONTROL_STREAM, + TYPE_XILINX_AXI_DMA_CONTROL_STREAM) + +#define R_DMACR (0x00 / 4) +#define R_DMASR (0x04 / 4) +#define R_CURDESC (0x08 / 4) +#define R_TAILDESC (0x10 / 4) +#define R_MAX (0x30 / 4) + +#define CONTROL_PAYLOAD_WORDS 5 +#define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t))) + + +enum { + DMACR_RUNSTOP = 1, + DMACR_TAILPTR_MODE = 2, + DMACR_RESET = 4 +}; + +enum { + DMASR_HALTED = 1, + DMASR_IDLE = 2, + DMASR_IOC_IRQ = 1 << 12, + DMASR_DLY_IRQ = 1 << 13, + + DMASR_IRQ_MASK = 7 << 12 +}; + +struct SDesc { + uint64_t nxtdesc; + uint64_t buffer_address; + uint64_t reserved; + uint32_t control; + uint32_t status; + uint8_t app[CONTROL_PAYLOAD_SIZE]; +}; + +enum { + SDESC_CTRL_EOF = (1 << 26), + SDESC_CTRL_SOF = (1 << 27), + + SDESC_CTRL_LEN_MASK = (1 << 23) - 1 +}; + +enum { + SDESC_STATUS_EOF = (1 << 26), + SDESC_STATUS_SOF_BIT = 27, + SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT), + SDESC_STATUS_COMPLETE = (1 << 31) +}; + +struct Stream { + struct XilinxAXIDMA *dma; + ptimer_state *ptimer; + qemu_irq irq; + + int nr; + + bool sof; + struct SDesc desc; + unsigned int complete_cnt; + uint32_t regs[R_MAX]; + uint8_t app[20]; + unsigned char txbuf[16 * 1024]; +}; + +struct XilinxAXIDMAStreamSink { + Object parent; + + struct XilinxAXIDMA *dma; +}; + +struct XilinxAXIDMA { + SysBusDevice busdev; + MemoryRegion iomem; + MemoryRegion *dma_mr; + AddressSpace as; + + uint32_t freqhz; + StreamSink *tx_data_dev; + StreamSink *tx_control_dev; + XilinxAXIDMAStreamSink rx_data_dev; + XilinxAXIDMAStreamSink rx_control_dev; + + struct Stream streams[2]; + + StreamCanPushNotifyFn notify; + void *notify_opaque; +}; + +/* + * Helper calls to extract info from descriptors and other trivial + * state from regs. + */ +static inline int stream_desc_sof(struct SDesc *d) +{ + return d->control & SDESC_CTRL_SOF; +} + +static inline int stream_desc_eof(struct SDesc *d) +{ + return d->control & SDESC_CTRL_EOF; +} + +static inline int stream_resetting(struct Stream *s) +{ + return !!(s->regs[R_DMACR] & DMACR_RESET); +} + +static inline int stream_running(struct Stream *s) +{ + return s->regs[R_DMACR] & DMACR_RUNSTOP; +} + +static inline int stream_idle(struct Stream *s) +{ + return !!(s->regs[R_DMASR] & DMASR_IDLE); +} + +static void stream_reset(struct Stream *s) +{ + s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */ + s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */ + s->sof = true; +} + +/* Map an offset addr into a channel index. */ +static inline int streamid_from_addr(hwaddr addr) +{ + int sid; + + sid = addr / (0x30); + sid &= 1; + return sid; +} + +static void stream_desc_load(struct Stream *s, hwaddr addr) +{ + struct SDesc *d = &s->desc; + + address_space_read(&s->dma->as, addr, MEMTXATTRS_UNSPECIFIED, d, sizeof *d); + + /* Convert from LE into host endianness. */ + d->buffer_address = le64_to_cpu(d->buffer_address); + d->nxtdesc = le64_to_cpu(d->nxtdesc); + d->control = le32_to_cpu(d->control); + d->status = le32_to_cpu(d->status); +} + +static void stream_desc_store(struct Stream *s, hwaddr addr) +{ + struct SDesc *d = &s->desc; + + /* Convert from host endianness into LE. */ + d->buffer_address = cpu_to_le64(d->buffer_address); + d->nxtdesc = cpu_to_le64(d->nxtdesc); + d->control = cpu_to_le32(d->control); + d->status = cpu_to_le32(d->status); + address_space_write(&s->dma->as, addr, MEMTXATTRS_UNSPECIFIED, + d, sizeof *d); +} + +static void stream_update_irq(struct Stream *s) +{ + unsigned int pending, mask, irq; + + pending = s->regs[R_DMASR] & DMASR_IRQ_MASK; + mask = s->regs[R_DMACR] & DMASR_IRQ_MASK; + + irq = pending & mask; + + qemu_set_irq(s->irq, !!irq); +} + +static void stream_reload_complete_cnt(struct Stream *s) +{ + unsigned int comp_th; + comp_th = (s->regs[R_DMACR] >> 16) & 0xff; + s->complete_cnt = comp_th; +} + +static void timer_hit(void *opaque) +{ + struct Stream *s = opaque; + + stream_reload_complete_cnt(s); + s->regs[R_DMASR] |= DMASR_DLY_IRQ; + stream_update_irq(s); +} + +static void stream_complete(struct Stream *s) +{ + unsigned int comp_delay; + + /* Start the delayed timer. */ + ptimer_transaction_begin(s->ptimer); + comp_delay = s->regs[R_DMACR] >> 24; + if (comp_delay) { + ptimer_stop(s->ptimer); + ptimer_set_count(s->ptimer, comp_delay); + ptimer_run(s->ptimer, 1); + } + + s->complete_cnt--; + if (s->complete_cnt == 0) { + /* Raise the IOC irq. */ + s->regs[R_DMASR] |= DMASR_IOC_IRQ; + stream_reload_complete_cnt(s); + } + ptimer_transaction_commit(s->ptimer); +} + +static void stream_process_mem2s(struct Stream *s, StreamSink *tx_data_dev, + StreamSink *tx_control_dev) +{ + uint32_t prev_d; + uint32_t txlen; + uint64_t addr; + bool eop; + + if (!stream_running(s) || stream_idle(s)) { + return; + } + + while (1) { + stream_desc_load(s, s->regs[R_CURDESC]); + + if (s->desc.status & SDESC_STATUS_COMPLETE) { + s->regs[R_DMASR] |= DMASR_HALTED; + break; + } + + if (stream_desc_sof(&s->desc)) { + stream_push(tx_control_dev, s->desc.app, sizeof(s->desc.app), true); + } + + txlen = s->desc.control & SDESC_CTRL_LEN_MASK; + + eop = stream_desc_eof(&s->desc); + addr = s->desc.buffer_address; + while (txlen) { + unsigned int len; + + len = txlen > sizeof s->txbuf ? sizeof s->txbuf : txlen; + address_space_read(&s->dma->as, addr, + MEMTXATTRS_UNSPECIFIED, + s->txbuf, len); + stream_push(tx_data_dev, s->txbuf, len, eop && len == txlen); + txlen -= len; + addr += len; + } + + if (eop) { + stream_complete(s); + } + + /* Update the descriptor. */ + s->desc.status = txlen | SDESC_STATUS_COMPLETE; + stream_desc_store(s, s->regs[R_CURDESC]); + + /* Advance. */ + prev_d = s->regs[R_CURDESC]; + s->regs[R_CURDESC] = s->desc.nxtdesc; + if (prev_d == s->regs[R_TAILDESC]) { + s->regs[R_DMASR] |= DMASR_IDLE; + break; + } + } +} + +static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf, + size_t len, bool eop) +{ + uint32_t prev_d; + unsigned int rxlen; + size_t pos = 0; + + if (!stream_running(s) || stream_idle(s)) { + return 0; + } + + while (len) { + stream_desc_load(s, s->regs[R_CURDESC]); + + if (s->desc.status & SDESC_STATUS_COMPLETE) { + s->regs[R_DMASR] |= DMASR_HALTED; + break; + } + + rxlen = s->desc.control & SDESC_CTRL_LEN_MASK; + if (rxlen > len) { + /* It fits. */ + rxlen = len; + } + + address_space_write(&s->dma->as, s->desc.buffer_address, + MEMTXATTRS_UNSPECIFIED, buf + pos, rxlen); + len -= rxlen; + pos += rxlen; + + /* Update the descriptor. */ + if (eop) { + stream_complete(s); + memcpy(s->desc.app, s->app, sizeof(s->desc.app)); + s->desc.status |= SDESC_STATUS_EOF; + } + + s->desc.status |= s->sof << SDESC_STATUS_SOF_BIT; + s->desc.status |= SDESC_STATUS_COMPLETE; + stream_desc_store(s, s->regs[R_CURDESC]); + s->sof = eop; + + /* Advance. */ + prev_d = s->regs[R_CURDESC]; + s->regs[R_CURDESC] = s->desc.nxtdesc; + if (prev_d == s->regs[R_TAILDESC]) { + s->regs[R_DMASR] |= DMASR_IDLE; + break; + } + } + + return pos; +} + +static void xilinx_axidma_reset(DeviceState *dev) +{ + int i; + XilinxAXIDMA *s = XILINX_AXI_DMA(dev); + + for (i = 0; i < 2; i++) { + stream_reset(&s->streams[i]); + } +} + +static size_t +xilinx_axidma_control_stream_push(StreamSink *obj, unsigned char *buf, + size_t len, bool eop) +{ + XilinxAXIDMAStreamSink *cs = XILINX_AXI_DMA_CONTROL_STREAM(obj); + struct Stream *s = &cs->dma->streams[1]; + + if (len != CONTROL_PAYLOAD_SIZE) { + hw_error("AXI DMA requires %d byte control stream payload\n", + (int)CONTROL_PAYLOAD_SIZE); + } + + memcpy(s->app, buf, len); + return len; +} + +static bool +xilinx_axidma_data_stream_can_push(StreamSink *obj, + StreamCanPushNotifyFn notify, + void *notify_opaque) +{ + XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj); + struct Stream *s = &ds->dma->streams[1]; + + if (!stream_running(s) || stream_idle(s)) { + ds->dma->notify = notify; + ds->dma->notify_opaque = notify_opaque; + return false; + } + + return true; +} + +static size_t +xilinx_axidma_data_stream_push(StreamSink *obj, unsigned char *buf, size_t len, + bool eop) +{ + XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(obj); + struct Stream *s = &ds->dma->streams[1]; + size_t ret; + + ret = stream_process_s2mem(s, buf, len, eop); + stream_update_irq(s); + return ret; +} + +static uint64_t axidma_read(void *opaque, hwaddr addr, + unsigned size) +{ + XilinxAXIDMA *d = opaque; + struct Stream *s; + uint32_t r = 0; + int sid; + + sid = streamid_from_addr(addr); + s = &d->streams[sid]; + + addr = addr % 0x30; + addr >>= 2; + switch (addr) { + case R_DMACR: + /* Simulate one cycles reset delay. */ + s->regs[addr] &= ~DMACR_RESET; + r = s->regs[addr]; + break; + case R_DMASR: + s->regs[addr] &= 0xffff; + s->regs[addr] |= (s->complete_cnt & 0xff) << 16; + s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24; + r = s->regs[addr]; + break; + default: + r = s->regs[addr]; + D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n", + __func__, sid, addr * 4, r)); + break; + } + return r; + +} + +static void axidma_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + XilinxAXIDMA *d = opaque; + struct Stream *s; + int sid; + + sid = streamid_from_addr(addr); + s = &d->streams[sid]; + + addr = addr % 0x30; + addr >>= 2; + switch (addr) { + case R_DMACR: + /* Tailptr mode is always on. */ + value |= DMACR_TAILPTR_MODE; + /* Remember our previous reset state. */ + value |= (s->regs[addr] & DMACR_RESET); + s->regs[addr] = value; + + if (value & DMACR_RESET) { + stream_reset(s); + } + + if ((value & 1) && !stream_resetting(s)) { + /* Start processing. */ + s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE); + } + stream_reload_complete_cnt(s); + break; + + case R_DMASR: + /* Mask away write to clear irq lines. */ + value &= ~(value & DMASR_IRQ_MASK); + s->regs[addr] = value; + break; + + case R_TAILDESC: + s->regs[addr] = value; + s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */ + if (!sid) { + stream_process_mem2s(s, d->tx_data_dev, d->tx_control_dev); + } + break; + default: + D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n", + __func__, sid, addr * 4, (unsigned)value)); + s->regs[addr] = value; + break; + } + if (sid == 1 && d->notify) { + StreamCanPushNotifyFn notifytmp = d->notify; + d->notify = NULL; + notifytmp(d->notify_opaque); + } + stream_update_irq(s); +} + +static const MemoryRegionOps axidma_ops = { + .read = axidma_read, + .write = axidma_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void xilinx_axidma_realize(DeviceState *dev, Error **errp) +{ + XilinxAXIDMA *s = XILINX_AXI_DMA(dev); + XilinxAXIDMAStreamSink *ds = XILINX_AXI_DMA_DATA_STREAM(&s->rx_data_dev); + XilinxAXIDMAStreamSink *cs = XILINX_AXI_DMA_CONTROL_STREAM( + &s->rx_control_dev); + int i; + + object_property_add_link(OBJECT(ds), "dma", TYPE_XILINX_AXI_DMA, + (Object **)&ds->dma, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + object_property_add_link(OBJECT(cs), "dma", TYPE_XILINX_AXI_DMA, + (Object **)&cs->dma, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + object_property_set_link(OBJECT(ds), "dma", OBJECT(s), &error_abort); + object_property_set_link(OBJECT(cs), "dma", OBJECT(s), &error_abort); + + for (i = 0; i < 2; i++) { + struct Stream *st = &s->streams[i]; + + st->dma = s; + st->nr = i; + st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT); + ptimer_transaction_begin(st->ptimer); + ptimer_set_freq(st->ptimer, s->freqhz); + ptimer_transaction_commit(st->ptimer); + } + + address_space_init(&s->as, + s->dma_mr ? s->dma_mr : get_system_memory(), "dma"); +} + +static void xilinx_axidma_init(Object *obj) +{ + XilinxAXIDMA *s = XILINX_AXI_DMA(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + object_initialize_child(OBJECT(s), "axistream-connected-target", + &s->rx_data_dev, TYPE_XILINX_AXI_DMA_DATA_STREAM); + object_initialize_child(OBJECT(s), "axistream-control-connected-target", + &s->rx_control_dev, + TYPE_XILINX_AXI_DMA_CONTROL_STREAM); + object_property_add_link(obj, "dma", TYPE_MEMORY_REGION, + (Object **)&s->dma_mr, + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + + sysbus_init_irq(sbd, &s->streams[0].irq); + sysbus_init_irq(sbd, &s->streams[1].irq); + + memory_region_init_io(&s->iomem, obj, &axidma_ops, s, + "xlnx.axi-dma", R_MAX * 4 * 2); + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property axidma_properties[] = { + DEFINE_PROP_UINT32("freqhz", XilinxAXIDMA, freqhz, 50000000), + DEFINE_PROP_LINK("axistream-connected", XilinxAXIDMA, + tx_data_dev, TYPE_STREAM_SINK, StreamSink *), + DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIDMA, + tx_control_dev, TYPE_STREAM_SINK, StreamSink *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void axidma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xilinx_axidma_realize, + dc->reset = xilinx_axidma_reset; + device_class_set_props(dc, axidma_properties); +} + +static StreamSinkClass xilinx_axidma_data_stream_class = { + .push = xilinx_axidma_data_stream_push, + .can_push = xilinx_axidma_data_stream_can_push, +}; + +static StreamSinkClass xilinx_axidma_control_stream_class = { + .push = xilinx_axidma_control_stream_push, +}; + +static void xilinx_axidma_stream_class_init(ObjectClass *klass, void *data) +{ + StreamSinkClass *ssc = STREAM_SINK_CLASS(klass); + + ssc->push = ((StreamSinkClass *)data)->push; + ssc->can_push = ((StreamSinkClass *)data)->can_push; +} + +static const TypeInfo axidma_info = { + .name = TYPE_XILINX_AXI_DMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XilinxAXIDMA), + .class_init = axidma_class_init, + .instance_init = xilinx_axidma_init, +}; + +static const TypeInfo xilinx_axidma_data_stream_info = { + .name = TYPE_XILINX_AXI_DMA_DATA_STREAM, + .parent = TYPE_OBJECT, + .instance_size = sizeof(XilinxAXIDMAStreamSink), + .class_init = xilinx_axidma_stream_class_init, + .class_data = &xilinx_axidma_data_stream_class, + .interfaces = (InterfaceInfo[]) { + { TYPE_STREAM_SINK }, + { } + } +}; + +static const TypeInfo xilinx_axidma_control_stream_info = { + .name = TYPE_XILINX_AXI_DMA_CONTROL_STREAM, + .parent = TYPE_OBJECT, + .instance_size = sizeof(XilinxAXIDMAStreamSink), + .class_init = xilinx_axidma_stream_class_init, + .class_data = &xilinx_axidma_control_stream_class, + .interfaces = (InterfaceInfo[]) { + { TYPE_STREAM_SINK }, + { } + } +}; + +static void xilinx_axidma_register_types(void) +{ + type_register_static(&axidma_info); + type_register_static(&xilinx_axidma_data_stream_info); + type_register_static(&xilinx_axidma_control_stream_info); +} + +type_init(xilinx_axidma_register_types) diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c new file mode 100644 index 000000000..a5a92b4ff --- /dev/null +++ b/hw/dma/xlnx-zdma.c @@ -0,0 +1,847 @@ +/* + * QEMU model of the ZynqMP generic DMA + * + * Copyright (c) 2014 Xilinx Inc. + * Copyright (c) 2018 FEIMTECH AB + * + * Written by Edgar E. Iglesias , + * Francisco Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/dma/xlnx-zdma.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" + +#ifndef XLNX_ZDMA_ERR_DEBUG +#define XLNX_ZDMA_ERR_DEBUG 0 +#endif + +REG32(ZDMA_ERR_CTRL, 0x0) + FIELD(ZDMA_ERR_CTRL, APB_ERR_RES, 0, 1) +REG32(ZDMA_CH_ISR, 0x100) + FIELD(ZDMA_CH_ISR, DMA_PAUSE, 11, 1) + FIELD(ZDMA_CH_ISR, DMA_DONE, 10, 1) + FIELD(ZDMA_CH_ISR, AXI_WR_DATA, 9, 1) + FIELD(ZDMA_CH_ISR, AXI_RD_DATA, 8, 1) + FIELD(ZDMA_CH_ISR, AXI_RD_DST_DSCR, 7, 1) + FIELD(ZDMA_CH_ISR, AXI_RD_SRC_DSCR, 6, 1) + FIELD(ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, 5, 1) + FIELD(ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, 4, 1) + FIELD(ZDMA_CH_ISR, BYTE_CNT_OVRFL, 3, 1) + FIELD(ZDMA_CH_ISR, DST_DSCR_DONE, 2, 1) + FIELD(ZDMA_CH_ISR, SRC_DSCR_DONE, 1, 1) + FIELD(ZDMA_CH_ISR, INV_APB, 0, 1) +REG32(ZDMA_CH_IMR, 0x104) + FIELD(ZDMA_CH_IMR, DMA_PAUSE, 11, 1) + FIELD(ZDMA_CH_IMR, DMA_DONE, 10, 1) + FIELD(ZDMA_CH_IMR, AXI_WR_DATA, 9, 1) + FIELD(ZDMA_CH_IMR, AXI_RD_DATA, 8, 1) + FIELD(ZDMA_CH_IMR, AXI_RD_DST_DSCR, 7, 1) + FIELD(ZDMA_CH_IMR, AXI_RD_SRC_DSCR, 6, 1) + FIELD(ZDMA_CH_IMR, IRQ_DST_ACCT_ERR, 5, 1) + FIELD(ZDMA_CH_IMR, IRQ_SRC_ACCT_ERR, 4, 1) + FIELD(ZDMA_CH_IMR, BYTE_CNT_OVRFL, 3, 1) + FIELD(ZDMA_CH_IMR, DST_DSCR_DONE, 2, 1) + FIELD(ZDMA_CH_IMR, SRC_DSCR_DONE, 1, 1) + FIELD(ZDMA_CH_IMR, INV_APB, 0, 1) +REG32(ZDMA_CH_IEN, 0x108) + FIELD(ZDMA_CH_IEN, DMA_PAUSE, 11, 1) + FIELD(ZDMA_CH_IEN, DMA_DONE, 10, 1) + FIELD(ZDMA_CH_IEN, AXI_WR_DATA, 9, 1) + FIELD(ZDMA_CH_IEN, AXI_RD_DATA, 8, 1) + FIELD(ZDMA_CH_IEN, AXI_RD_DST_DSCR, 7, 1) + FIELD(ZDMA_CH_IEN, AXI_RD_SRC_DSCR, 6, 1) + FIELD(ZDMA_CH_IEN, IRQ_DST_ACCT_ERR, 5, 1) + FIELD(ZDMA_CH_IEN, IRQ_SRC_ACCT_ERR, 4, 1) + FIELD(ZDMA_CH_IEN, BYTE_CNT_OVRFL, 3, 1) + FIELD(ZDMA_CH_IEN, DST_DSCR_DONE, 2, 1) + FIELD(ZDMA_CH_IEN, SRC_DSCR_DONE, 1, 1) + FIELD(ZDMA_CH_IEN, INV_APB, 0, 1) +REG32(ZDMA_CH_IDS, 0x10c) + FIELD(ZDMA_CH_IDS, DMA_PAUSE, 11, 1) + FIELD(ZDMA_CH_IDS, DMA_DONE, 10, 1) + FIELD(ZDMA_CH_IDS, AXI_WR_DATA, 9, 1) + FIELD(ZDMA_CH_IDS, AXI_RD_DATA, 8, 1) + FIELD(ZDMA_CH_IDS, AXI_RD_DST_DSCR, 7, 1) + FIELD(ZDMA_CH_IDS, AXI_RD_SRC_DSCR, 6, 1) + FIELD(ZDMA_CH_IDS, IRQ_DST_ACCT_ERR, 5, 1) + FIELD(ZDMA_CH_IDS, IRQ_SRC_ACCT_ERR, 4, 1) + FIELD(ZDMA_CH_IDS, BYTE_CNT_OVRFL, 3, 1) + FIELD(ZDMA_CH_IDS, DST_DSCR_DONE, 2, 1) + FIELD(ZDMA_CH_IDS, SRC_DSCR_DONE, 1, 1) + FIELD(ZDMA_CH_IDS, INV_APB, 0, 1) +REG32(ZDMA_CH_CTRL0, 0x110) + FIELD(ZDMA_CH_CTRL0, OVR_FETCH, 7, 1) + FIELD(ZDMA_CH_CTRL0, POINT_TYPE, 6, 1) + FIELD(ZDMA_CH_CTRL0, MODE, 4, 2) + FIELD(ZDMA_CH_CTRL0, RATE_CTRL, 3, 1) + FIELD(ZDMA_CH_CTRL0, CONT_ADDR, 2, 1) + FIELD(ZDMA_CH_CTRL0, CONT, 1, 1) +REG32(ZDMA_CH_CTRL1, 0x114) + FIELD(ZDMA_CH_CTRL1, DST_ISSUE, 5, 5) + FIELD(ZDMA_CH_CTRL1, SRC_ISSUE, 0, 5) +REG32(ZDMA_CH_FCI, 0x118) + FIELD(ZDMA_CH_FCI, PROG_CELL_CNT, 2, 2) + FIELD(ZDMA_CH_FCI, SIDE, 1, 1) + FIELD(ZDMA_CH_FCI, EN, 0, 1) +REG32(ZDMA_CH_STATUS, 0x11c) + FIELD(ZDMA_CH_STATUS, STATE, 0, 2) +REG32(ZDMA_CH_DATA_ATTR, 0x120) + FIELD(ZDMA_CH_DATA_ATTR, ARBURST, 26, 2) + FIELD(ZDMA_CH_DATA_ATTR, ARCACHE, 22, 4) + FIELD(ZDMA_CH_DATA_ATTR, ARQOS, 18, 4) + FIELD(ZDMA_CH_DATA_ATTR, ARLEN, 14, 4) + FIELD(ZDMA_CH_DATA_ATTR, AWBURST, 12, 2) + FIELD(ZDMA_CH_DATA_ATTR, AWCACHE, 8, 4) + FIELD(ZDMA_CH_DATA_ATTR, AWQOS, 4, 4) + FIELD(ZDMA_CH_DATA_ATTR, AWLEN, 0, 4) +REG32(ZDMA_CH_DSCR_ATTR, 0x124) + FIELD(ZDMA_CH_DSCR_ATTR, AXCOHRNT, 8, 1) + FIELD(ZDMA_CH_DSCR_ATTR, AXCACHE, 4, 4) + FIELD(ZDMA_CH_DSCR_ATTR, AXQOS, 0, 4) +REG32(ZDMA_CH_SRC_DSCR_WORD0, 0x128) +REG32(ZDMA_CH_SRC_DSCR_WORD1, 0x12c) + FIELD(ZDMA_CH_SRC_DSCR_WORD1, MSB, 0, 17) +REG32(ZDMA_CH_SRC_DSCR_WORD2, 0x130) + FIELD(ZDMA_CH_SRC_DSCR_WORD2, SIZE, 0, 30) +REG32(ZDMA_CH_SRC_DSCR_WORD3, 0x134) + FIELD(ZDMA_CH_SRC_DSCR_WORD3, CMD, 3, 2) + FIELD(ZDMA_CH_SRC_DSCR_WORD3, INTR, 2, 1) + FIELD(ZDMA_CH_SRC_DSCR_WORD3, TYPE, 1, 1) + FIELD(ZDMA_CH_SRC_DSCR_WORD3, COHRNT, 0, 1) +REG32(ZDMA_CH_DST_DSCR_WORD0, 0x138) +REG32(ZDMA_CH_DST_DSCR_WORD1, 0x13c) + FIELD(ZDMA_CH_DST_DSCR_WORD1, MSB, 0, 17) +REG32(ZDMA_CH_DST_DSCR_WORD2, 0x140) + FIELD(ZDMA_CH_DST_DSCR_WORD2, SIZE, 0, 30) +REG32(ZDMA_CH_DST_DSCR_WORD3, 0x144) + FIELD(ZDMA_CH_DST_DSCR_WORD3, INTR, 2, 1) + FIELD(ZDMA_CH_DST_DSCR_WORD3, TYPE, 1, 1) + FIELD(ZDMA_CH_DST_DSCR_WORD3, COHRNT, 0, 1) +REG32(ZDMA_CH_WR_ONLY_WORD0, 0x148) +REG32(ZDMA_CH_WR_ONLY_WORD1, 0x14c) +REG32(ZDMA_CH_WR_ONLY_WORD2, 0x150) +REG32(ZDMA_CH_WR_ONLY_WORD3, 0x154) +REG32(ZDMA_CH_SRC_START_LSB, 0x158) +REG32(ZDMA_CH_SRC_START_MSB, 0x15c) + FIELD(ZDMA_CH_SRC_START_MSB, ADDR, 0, 17) +REG32(ZDMA_CH_DST_START_LSB, 0x160) +REG32(ZDMA_CH_DST_START_MSB, 0x164) + FIELD(ZDMA_CH_DST_START_MSB, ADDR, 0, 17) +REG32(ZDMA_CH_RATE_CTRL, 0x18c) + FIELD(ZDMA_CH_RATE_CTRL, CNT, 0, 12) +REG32(ZDMA_CH_SRC_CUR_PYLD_LSB, 0x168) +REG32(ZDMA_CH_SRC_CUR_PYLD_MSB, 0x16c) + FIELD(ZDMA_CH_SRC_CUR_PYLD_MSB, ADDR, 0, 17) +REG32(ZDMA_CH_DST_CUR_PYLD_LSB, 0x170) +REG32(ZDMA_CH_DST_CUR_PYLD_MSB, 0x174) + FIELD(ZDMA_CH_DST_CUR_PYLD_MSB, ADDR, 0, 17) +REG32(ZDMA_CH_SRC_CUR_DSCR_LSB, 0x178) +REG32(ZDMA_CH_SRC_CUR_DSCR_MSB, 0x17c) + FIELD(ZDMA_CH_SRC_CUR_DSCR_MSB, ADDR, 0, 17) +REG32(ZDMA_CH_DST_CUR_DSCR_LSB, 0x180) +REG32(ZDMA_CH_DST_CUR_DSCR_MSB, 0x184) + FIELD(ZDMA_CH_DST_CUR_DSCR_MSB, ADDR, 0, 17) +REG32(ZDMA_CH_TOTAL_BYTE, 0x188) +REG32(ZDMA_CH_RATE_CNTL, 0x18c) + FIELD(ZDMA_CH_RATE_CNTL, CNT, 0, 12) +REG32(ZDMA_CH_IRQ_SRC_ACCT, 0x190) + FIELD(ZDMA_CH_IRQ_SRC_ACCT, CNT, 0, 8) +REG32(ZDMA_CH_IRQ_DST_ACCT, 0x194) + FIELD(ZDMA_CH_IRQ_DST_ACCT, CNT, 0, 8) +REG32(ZDMA_CH_DBG0, 0x198) + FIELD(ZDMA_CH_DBG0, CMN_BUF_FREE, 0, 9) +REG32(ZDMA_CH_DBG1, 0x19c) + FIELD(ZDMA_CH_DBG1, CMN_BUF_OCC, 0, 9) +REG32(ZDMA_CH_CTRL2, 0x200) + FIELD(ZDMA_CH_CTRL2, EN, 0, 1) + +enum { + PT_REG = 0, + PT_MEM = 1, +}; + +enum { + CMD_HALT = 1, + CMD_STOP = 2, +}; + +enum { + RW_MODE_RW = 0, + RW_MODE_WO = 1, + RW_MODE_RO = 2, +}; + +enum { + DTYPE_LINEAR = 0, + DTYPE_LINKED = 1, +}; + +enum { + AXI_BURST_FIXED = 0, + AXI_BURST_INCR = 1, +}; + +static void zdma_ch_imr_update_irq(XlnxZDMA *s) +{ + bool pending; + + pending = s->regs[R_ZDMA_CH_ISR] & ~s->regs[R_ZDMA_CH_IMR]; + + qemu_set_irq(s->irq_zdma_ch_imr, pending); +} + +static void zdma_ch_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZDMA *s = XLNX_ZDMA(reg->opaque); + zdma_ch_imr_update_irq(s); +} + +static uint64_t zdma_ch_ien_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZDMA *s = XLNX_ZDMA(reg->opaque); + uint32_t val = val64; + + s->regs[R_ZDMA_CH_IMR] &= ~val; + zdma_ch_imr_update_irq(s); + return 0; +} + +static uint64_t zdma_ch_ids_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZDMA *s = XLNX_ZDMA(reg->opaque); + uint32_t val = val64; + + s->regs[R_ZDMA_CH_IMR] |= val; + zdma_ch_imr_update_irq(s); + return 0; +} + +static void zdma_set_state(XlnxZDMA *s, XlnxZDMAState state) +{ + s->state = state; + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, state); + + /* Signal error if we have an error condition. */ + if (s->error) { + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, 3); + } +} + +static void zdma_src_done(XlnxZDMA *s) +{ + unsigned int cnt; + cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT); + cnt++; + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT, cnt); + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, SRC_DSCR_DONE, true); + + /* Did we overflow? */ + if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT)) { + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, true); + } + zdma_ch_imr_update_irq(s); +} + +static void zdma_dst_done(XlnxZDMA *s) +{ + unsigned int cnt; + cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT); + cnt++; + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT, cnt); + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DST_DSCR_DONE, true); + + /* Did we overflow? */ + if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT)) { + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, true); + } + zdma_ch_imr_update_irq(s); +} + +static uint64_t zdma_get_regaddr64(XlnxZDMA *s, unsigned int basereg) +{ + uint64_t addr; + + addr = s->regs[basereg + 1]; + addr <<= 32; + addr |= s->regs[basereg]; + + return addr; +} + +static void zdma_put_regaddr64(XlnxZDMA *s, unsigned int basereg, uint64_t addr) +{ + s->regs[basereg] = addr; + s->regs[basereg + 1] = addr >> 32; +} + +static void zdma_load_descriptor_reg(XlnxZDMA *s, unsigned int reg, + XlnxZDMADescr *descr) +{ + descr->addr = zdma_get_regaddr64(s, reg); + descr->size = s->regs[reg + 2]; + descr->attr = s->regs[reg + 3]; +} + +static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr, + XlnxZDMADescr *descr) +{ + /* ZDMA descriptors must be aligned to their own size. */ + if (addr % sizeof(XlnxZDMADescr)) { + qemu_log_mask(LOG_GUEST_ERROR, + "zdma: unaligned descriptor at %" PRIx64, + addr); + memset(descr, 0x0, sizeof(XlnxZDMADescr)); + s->error = true; + return false; + } + + descr->addr = address_space_ldq_le(&s->dma_as, addr, s->attr, NULL); + descr->size = address_space_ldl_le(&s->dma_as, addr + 8, s->attr, NULL); + descr->attr = address_space_ldl_le(&s->dma_as, addr + 12, s->attr, NULL); + return true; +} + +static void zdma_load_src_descriptor(XlnxZDMA *s) +{ + uint64_t src_addr; + unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE); + + if (ptype == PT_REG) { + zdma_load_descriptor_reg(s, R_ZDMA_CH_SRC_DSCR_WORD0, &s->dsc_src); + return; + } + + src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB); + + if (!zdma_load_descriptor(s, src_addr, &s->dsc_src)) { + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_SRC_DSCR, true); + } +} + +static void zdma_update_descr_addr(XlnxZDMA *s, bool type, + unsigned int basereg) +{ + uint64_t addr, next; + + if (type == DTYPE_LINEAR) { + addr = zdma_get_regaddr64(s, basereg); + next = addr + sizeof(s->dsc_dst); + } else { + addr = zdma_get_regaddr64(s, basereg); + addr += sizeof(s->dsc_dst); + next = address_space_ldq_le(&s->dma_as, addr, s->attr, NULL); + } + + zdma_put_regaddr64(s, basereg, next); +} + +static void zdma_load_dst_descriptor(XlnxZDMA *s) +{ + uint64_t dst_addr; + unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE); + bool dst_type; + + if (ptype == PT_REG) { + zdma_load_descriptor_reg(s, R_ZDMA_CH_DST_DSCR_WORD0, &s->dsc_dst); + return; + } + + dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB); + + if (!zdma_load_descriptor(s, dst_addr, &s->dsc_dst)) { + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_DST_DSCR, true); + } + + /* Advance the descriptor pointer. */ + dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3, TYPE); + zdma_update_descr_addr(s, dst_type, R_ZDMA_CH_DST_CUR_DSCR_LSB); +} + +static void zdma_write_dst(XlnxZDMA *s, uint8_t *buf, uint32_t len) +{ + uint32_t dst_size, dlen; + bool dst_intr; + unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE); + unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE); + unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR, + AWBURST); + + /* FIXED burst types are only supported in simple dma mode. */ + if (ptype != PT_REG) { + burst_type = AXI_BURST_INCR; + } + + while (len) { + dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2, + SIZE); + if (dst_size == 0 && ptype == PT_MEM) { + zdma_load_dst_descriptor(s); + dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2, + SIZE); + } + + /* Match what hardware does by ignoring the dst_size and only using + * the src size for Simple register mode. */ + if (ptype == PT_REG && rw_mode != RW_MODE_WO) { + dst_size = len; + } + + dst_intr = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3, + INTR); + + dlen = len > dst_size ? dst_size : len; + if (burst_type == AXI_BURST_FIXED) { + if (dlen > (s->cfg.bus_width / 8)) { + dlen = s->cfg.bus_width / 8; + } + } + + address_space_write(&s->dma_as, s->dsc_dst.addr, s->attr, buf, dlen); + if (burst_type == AXI_BURST_INCR) { + s->dsc_dst.addr += dlen; + } + dst_size -= dlen; + buf += dlen; + len -= dlen; + + if (dst_size == 0 && dst_intr) { + zdma_dst_done(s); + } + + /* Write back to buffered descriptor. */ + s->dsc_dst.words[2] = FIELD_DP32(s->dsc_dst.words[2], + ZDMA_CH_DST_DSCR_WORD2, + SIZE, + dst_size); + } +} + +static void zdma_process_descr(XlnxZDMA *s) +{ + uint64_t src_addr; + uint32_t src_size, len; + unsigned int src_cmd; + bool src_intr, src_type; + unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE); + unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE); + unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR, + ARBURST); + + src_addr = s->dsc_src.addr; + src_size = FIELD_EX32(s->dsc_src.words[2], ZDMA_CH_SRC_DSCR_WORD2, SIZE); + src_cmd = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, CMD); + src_type = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, TYPE); + src_intr = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, INTR); + + /* FIXED burst types and non-rw modes are only supported in + * simple dma mode. + */ + if (ptype != PT_REG) { + if (rw_mode != RW_MODE_RW) { + qemu_log_mask(LOG_GUEST_ERROR, + "zDMA: rw-mode=%d but not simple DMA mode.\n", + rw_mode); + } + if (burst_type != AXI_BURST_INCR) { + qemu_log_mask(LOG_GUEST_ERROR, + "zDMA: burst_type=%d but not simple DMA mode.\n", + burst_type); + } + burst_type = AXI_BURST_INCR; + rw_mode = RW_MODE_RW; + } + + if (rw_mode == RW_MODE_WO) { + /* In Simple DMA Write-Only, we need to push DST size bytes + * regardless of what SRC size is set to. */ + src_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2, + SIZE); + memcpy(s->buf, &s->regs[R_ZDMA_CH_WR_ONLY_WORD0], s->cfg.bus_width / 8); + } + + while (src_size) { + len = src_size > ARRAY_SIZE(s->buf) ? ARRAY_SIZE(s->buf) : src_size; + if (burst_type == AXI_BURST_FIXED) { + if (len > (s->cfg.bus_width / 8)) { + len = s->cfg.bus_width / 8; + } + } + + if (rw_mode == RW_MODE_WO) { + if (len > s->cfg.bus_width / 8) { + len = s->cfg.bus_width / 8; + } + } else { + address_space_read(&s->dma_as, src_addr, s->attr, s->buf, len); + if (burst_type == AXI_BURST_INCR) { + src_addr += len; + } + } + + if (rw_mode != RW_MODE_RO) { + zdma_write_dst(s, s->buf, len); + } + + s->regs[R_ZDMA_CH_TOTAL_BYTE] += len; + src_size -= len; + } + + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, true); + + if (src_intr) { + zdma_src_done(s); + } + + if (ptype == PT_REG || src_cmd == CMD_STOP) { + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL2, EN, 0); + zdma_set_state(s, DISABLED); + } + + if (src_cmd == CMD_HALT) { + zdma_set_state(s, PAUSED); + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_PAUSE, 1); + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, false); + zdma_ch_imr_update_irq(s); + return; + } + + zdma_update_descr_addr(s, src_type, R_ZDMA_CH_SRC_CUR_DSCR_LSB); +} + +static void zdma_run(XlnxZDMA *s) +{ + while (s->state == ENABLED && !s->error) { + zdma_load_src_descriptor(s); + + if (s->error) { + zdma_set_state(s, DISABLED); + } else { + zdma_process_descr(s); + } + } + + zdma_ch_imr_update_irq(s); +} + +static void zdma_update_descr_addr_from_start(XlnxZDMA *s) +{ + uint64_t src_addr, dst_addr; + + src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_START_LSB); + zdma_put_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB, src_addr); + dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_START_LSB); + zdma_put_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB, dst_addr); + zdma_load_dst_descriptor(s); +} + +static void zdma_ch_ctrlx_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZDMA *s = XLNX_ZDMA(reg->opaque); + + if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL2, EN)) { + s->error = false; + + if (s->state == PAUSED && + ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) { + if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT_ADDR) == 1) { + zdma_update_descr_addr_from_start(s); + } else { + bool src_type = FIELD_EX32(s->dsc_src.words[3], + ZDMA_CH_SRC_DSCR_WORD3, TYPE); + zdma_update_descr_addr(s, src_type, + R_ZDMA_CH_SRC_CUR_DSCR_LSB); + } + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL0, CONT, false); + zdma_set_state(s, ENABLED); + } else if (s->state == DISABLED) { + zdma_update_descr_addr_from_start(s); + zdma_set_state(s, ENABLED); + } + } else { + /* Leave Paused state? */ + if (s->state == PAUSED && + ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) { + zdma_set_state(s, DISABLED); + } + } + + zdma_run(s); +} + +static RegisterAccessInfo zdma_regs_info[] = { + { .name = "ZDMA_ERR_CTRL", .addr = A_ZDMA_ERR_CTRL, + .rsvd = 0xfffffffe, + },{ .name = "ZDMA_CH_ISR", .addr = A_ZDMA_CH_ISR, + .rsvd = 0xfffff000, + .w1c = 0xfff, + .post_write = zdma_ch_isr_postw, + },{ .name = "ZDMA_CH_IMR", .addr = A_ZDMA_CH_IMR, + .reset = 0xfff, + .rsvd = 0xfffff000, + .ro = 0xfff, + },{ .name = "ZDMA_CH_IEN", .addr = A_ZDMA_CH_IEN, + .rsvd = 0xfffff000, + .pre_write = zdma_ch_ien_prew, + },{ .name = "ZDMA_CH_IDS", .addr = A_ZDMA_CH_IDS, + .rsvd = 0xfffff000, + .pre_write = zdma_ch_ids_prew, + },{ .name = "ZDMA_CH_CTRL0", .addr = A_ZDMA_CH_CTRL0, + .reset = 0x80, + .rsvd = 0xffffff01, + .post_write = zdma_ch_ctrlx_postw, + },{ .name = "ZDMA_CH_CTRL1", .addr = A_ZDMA_CH_CTRL1, + .reset = 0x3ff, + .rsvd = 0xfffffc00, + },{ .name = "ZDMA_CH_FCI", .addr = A_ZDMA_CH_FCI, + .rsvd = 0xffffffc0, + },{ .name = "ZDMA_CH_STATUS", .addr = A_ZDMA_CH_STATUS, + .rsvd = 0xfffffffc, + .ro = 0x3, + },{ .name = "ZDMA_CH_DATA_ATTR", .addr = A_ZDMA_CH_DATA_ATTR, + .reset = 0x483d20f, + .rsvd = 0xf0000000, + },{ .name = "ZDMA_CH_DSCR_ATTR", .addr = A_ZDMA_CH_DSCR_ATTR, + .rsvd = 0xfffffe00, + },{ .name = "ZDMA_CH_SRC_DSCR_WORD0", .addr = A_ZDMA_CH_SRC_DSCR_WORD0, + },{ .name = "ZDMA_CH_SRC_DSCR_WORD1", .addr = A_ZDMA_CH_SRC_DSCR_WORD1, + .rsvd = 0xfffe0000, + },{ .name = "ZDMA_CH_SRC_DSCR_WORD2", .addr = A_ZDMA_CH_SRC_DSCR_WORD2, + .rsvd = 0xc0000000, + },{ .name = "ZDMA_CH_SRC_DSCR_WORD3", .addr = A_ZDMA_CH_SRC_DSCR_WORD3, + .rsvd = 0xffffffe0, + },{ .name = "ZDMA_CH_DST_DSCR_WORD0", .addr = A_ZDMA_CH_DST_DSCR_WORD0, + },{ .name = "ZDMA_CH_DST_DSCR_WORD1", .addr = A_ZDMA_CH_DST_DSCR_WORD1, + .rsvd = 0xfffe0000, + },{ .name = "ZDMA_CH_DST_DSCR_WORD2", .addr = A_ZDMA_CH_DST_DSCR_WORD2, + .rsvd = 0xc0000000, + },{ .name = "ZDMA_CH_DST_DSCR_WORD3", .addr = A_ZDMA_CH_DST_DSCR_WORD3, + .rsvd = 0xfffffffa, + },{ .name = "ZDMA_CH_WR_ONLY_WORD0", .addr = A_ZDMA_CH_WR_ONLY_WORD0, + },{ .name = "ZDMA_CH_WR_ONLY_WORD1", .addr = A_ZDMA_CH_WR_ONLY_WORD1, + },{ .name = "ZDMA_CH_WR_ONLY_WORD2", .addr = A_ZDMA_CH_WR_ONLY_WORD2, + },{ .name = "ZDMA_CH_WR_ONLY_WORD3", .addr = A_ZDMA_CH_WR_ONLY_WORD3, + },{ .name = "ZDMA_CH_SRC_START_LSB", .addr = A_ZDMA_CH_SRC_START_LSB, + },{ .name = "ZDMA_CH_SRC_START_MSB", .addr = A_ZDMA_CH_SRC_START_MSB, + .rsvd = 0xfffe0000, + },{ .name = "ZDMA_CH_DST_START_LSB", .addr = A_ZDMA_CH_DST_START_LSB, + },{ .name = "ZDMA_CH_DST_START_MSB", .addr = A_ZDMA_CH_DST_START_MSB, + .rsvd = 0xfffe0000, + },{ .name = "ZDMA_CH_SRC_CUR_PYLD_LSB", .addr = A_ZDMA_CH_SRC_CUR_PYLD_LSB, + .ro = 0xffffffff, + },{ .name = "ZDMA_CH_SRC_CUR_PYLD_MSB", .addr = A_ZDMA_CH_SRC_CUR_PYLD_MSB, + .rsvd = 0xfffe0000, + .ro = 0x1ffff, + },{ .name = "ZDMA_CH_DST_CUR_PYLD_LSB", .addr = A_ZDMA_CH_DST_CUR_PYLD_LSB, + .ro = 0xffffffff, + },{ .name = "ZDMA_CH_DST_CUR_PYLD_MSB", .addr = A_ZDMA_CH_DST_CUR_PYLD_MSB, + .rsvd = 0xfffe0000, + .ro = 0x1ffff, + },{ .name = "ZDMA_CH_SRC_CUR_DSCR_LSB", .addr = A_ZDMA_CH_SRC_CUR_DSCR_LSB, + .ro = 0xffffffff, + },{ .name = "ZDMA_CH_SRC_CUR_DSCR_MSB", .addr = A_ZDMA_CH_SRC_CUR_DSCR_MSB, + .rsvd = 0xfffe0000, + .ro = 0x1ffff, + },{ .name = "ZDMA_CH_DST_CUR_DSCR_LSB", .addr = A_ZDMA_CH_DST_CUR_DSCR_LSB, + .ro = 0xffffffff, + },{ .name = "ZDMA_CH_DST_CUR_DSCR_MSB", .addr = A_ZDMA_CH_DST_CUR_DSCR_MSB, + .rsvd = 0xfffe0000, + .ro = 0x1ffff, + },{ .name = "ZDMA_CH_TOTAL_BYTE", .addr = A_ZDMA_CH_TOTAL_BYTE, + .w1c = 0xffffffff, + },{ .name = "ZDMA_CH_RATE_CNTL", .addr = A_ZDMA_CH_RATE_CNTL, + .rsvd = 0xfffff000, + },{ .name = "ZDMA_CH_IRQ_SRC_ACCT", .addr = A_ZDMA_CH_IRQ_SRC_ACCT, + .rsvd = 0xffffff00, + .ro = 0xff, + .cor = 0xff, + },{ .name = "ZDMA_CH_IRQ_DST_ACCT", .addr = A_ZDMA_CH_IRQ_DST_ACCT, + .rsvd = 0xffffff00, + .ro = 0xff, + .cor = 0xff, + },{ .name = "ZDMA_CH_DBG0", .addr = A_ZDMA_CH_DBG0, + .rsvd = 0xfffffe00, + .ro = 0x1ff, + + /* + * There's SW out there that will check the debug regs for free space. + * Claim that we always have 0x100 free. + */ + .reset = 0x100 + },{ .name = "ZDMA_CH_DBG1", .addr = A_ZDMA_CH_DBG1, + .rsvd = 0xfffffe00, + .ro = 0x1ff, + },{ .name = "ZDMA_CH_CTRL2", .addr = A_ZDMA_CH_CTRL2, + .rsvd = 0xfffffffe, + .post_write = zdma_ch_ctrlx_postw, + } +}; + +static void zdma_reset(DeviceState *dev) +{ + XlnxZDMA *s = XLNX_ZDMA(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } + + zdma_ch_imr_update_irq(s); +} + +static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size) +{ + XlnxZDMA *s = XLNX_ZDMA(opaque); + RegisterInfo *r = &s->regs_info[addr / 4]; + + if (!r->data) { + char *path = object_get_canonical_path(OBJECT(s)); + qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n", + path, + addr); + g_free(path); + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true); + zdma_ch_imr_update_irq(s); + return 0; + } + return register_read(r, ~0, NULL, false); +} + +static void zdma_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + XlnxZDMA *s = XLNX_ZDMA(opaque); + RegisterInfo *r = &s->regs_info[addr / 4]; + + if (!r->data) { + char *path = object_get_canonical_path(OBJECT(s)); + qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n", + path, + addr, value); + g_free(path); + ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true); + zdma_ch_imr_update_irq(s); + return; + } + register_write(r, value, ~0, NULL, false); +} + +static const MemoryRegionOps zdma_ops = { + .read = zdma_read, + .write = zdma_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void zdma_realize(DeviceState *dev, Error **errp) +{ + XlnxZDMA *s = XLNX_ZDMA(dev); + unsigned int i; + + if (!s->dma_mr) { + error_setg(errp, TYPE_XLNX_ZDMA " 'dma' link not set"); + return; + } + address_space_init(&s->dma_as, s->dma_mr, "zdma-dma"); + + for (i = 0; i < ARRAY_SIZE(zdma_regs_info); ++i) { + RegisterInfo *r = &s->regs_info[zdma_regs_info[i].addr / 4]; + + *r = (RegisterInfo) { + .data = (uint8_t *)&s->regs[ + zdma_regs_info[i].addr / 4], + .data_size = sizeof(uint32_t), + .access = &zdma_regs_info[i], + .opaque = s, + }; + } + + s->attr = MEMTXATTRS_UNSPECIFIED; +} + +static void zdma_init(Object *obj) +{ + XlnxZDMA *s = XLNX_ZDMA(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &zdma_ops, s, + TYPE_XLNX_ZDMA, ZDMA_R_MAX * 4); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq_zdma_ch_imr); + + object_property_add_link(obj, "dma", TYPE_MEMORY_REGION, + (Object **)&s->dma_mr, + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); +} + +static const VMStateDescription vmstate_zdma = { + .name = TYPE_XLNX_ZDMA, + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX), + VMSTATE_UINT32(state, XlnxZDMA), + VMSTATE_UINT32_ARRAY(dsc_src.words, XlnxZDMA, 4), + VMSTATE_UINT32_ARRAY(dsc_dst.words, XlnxZDMA, 4), + VMSTATE_END_OF_LIST(), + } +}; + +static Property zdma_props[] = { + DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64), + DEFINE_PROP_END_OF_LIST(), +}; + +static void zdma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = zdma_reset; + dc->realize = zdma_realize; + device_class_set_props(dc, zdma_props); + dc->vmsd = &vmstate_zdma; +} + +static const TypeInfo zdma_info = { + .name = TYPE_XLNX_ZDMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZDMA), + .class_init = zdma_class_init, + .instance_init = zdma_init, +}; + +static void zdma_register_types(void) +{ + type_register_static(&zdma_info); +} + +type_init(zdma_register_types) diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c new file mode 100644 index 000000000..e33112b6f --- /dev/null +++ b/hw/dma/xlnx-zynq-devcfg.c @@ -0,0 +1,402 @@ +/* + * QEMU model of the Xilinx Zynq Devcfg Interface + * + * (C) 2011 PetaLogix Pty Ltd + * (C) 2014 Xilinx Inc. + * Written by Peter Crosthwaite + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/dma/xlnx-zynq-devcfg.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "sysemu/dma.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define FREQ_HZ 900000000 + +#define BTT_MAX 0x400 + +#ifndef XLNX_ZYNQ_DEVCFG_ERR_DEBUG +#define XLNX_ZYNQ_DEVCFG_ERR_DEBUG 0 +#endif + +#define DB_PRINT(fmt, args...) do { \ + if (XLNX_ZYNQ_DEVCFG_ERR_DEBUG) { \ + qemu_log("%s: " fmt, __func__, ## args); \ + } \ +} while (0) + +REG32(CTRL, 0x00) + FIELD(CTRL, FORCE_RST, 31, 1) /* Not supported, wr ignored */ + FIELD(CTRL, PCAP_PR, 27, 1) /* Forced to 0 on bad unlock */ + FIELD(CTRL, PCAP_MODE, 26, 1) + FIELD(CTRL, MULTIBOOT_EN, 24, 1) + FIELD(CTRL, USER_MODE, 15, 1) + FIELD(CTRL, PCFG_AES_FUSE, 12, 1) + FIELD(CTRL, PCFG_AES_EN, 9, 3) + FIELD(CTRL, SEU_EN, 8, 1) + FIELD(CTRL, SEC_EN, 7, 1) + FIELD(CTRL, SPNIDEN, 6, 1) + FIELD(CTRL, SPIDEN, 5, 1) + FIELD(CTRL, NIDEN, 4, 1) + FIELD(CTRL, DBGEN, 3, 1) + FIELD(CTRL, DAP_EN, 0, 3) + +REG32(LOCK, 0x04) +#define AES_FUSE_LOCK 4 +#define AES_EN_LOCK 3 +#define SEU_LOCK 2 +#define SEC_LOCK 1 +#define DBG_LOCK 0 + +/* mapping bits in R_LOCK to what they lock in R_CTRL */ +static const uint32_t lock_ctrl_map[] = { + [AES_FUSE_LOCK] = R_CTRL_PCFG_AES_FUSE_MASK, + [AES_EN_LOCK] = R_CTRL_PCFG_AES_EN_MASK, + [SEU_LOCK] = R_CTRL_SEU_EN_MASK, + [SEC_LOCK] = R_CTRL_SEC_EN_MASK, + [DBG_LOCK] = R_CTRL_SPNIDEN_MASK | R_CTRL_SPIDEN_MASK | + R_CTRL_NIDEN_MASK | R_CTRL_DBGEN_MASK | + R_CTRL_DAP_EN_MASK, +}; + +REG32(CFG, 0x08) + FIELD(CFG, RFIFO_TH, 10, 2) + FIELD(CFG, WFIFO_TH, 8, 2) + FIELD(CFG, RCLK_EDGE, 7, 1) + FIELD(CFG, WCLK_EDGE, 6, 1) + FIELD(CFG, DISABLE_SRC_INC, 5, 1) + FIELD(CFG, DISABLE_DST_INC, 4, 1) +#define R_CFG_RESET 0x50B + +REG32(INT_STS, 0x0C) + FIELD(INT_STS, PSS_GTS_USR_B, 31, 1) + FIELD(INT_STS, PSS_FST_CFG_B, 30, 1) + FIELD(INT_STS, PSS_CFG_RESET_B, 27, 1) + FIELD(INT_STS, RX_FIFO_OV, 18, 1) + FIELD(INT_STS, WR_FIFO_LVL, 17, 1) + FIELD(INT_STS, RD_FIFO_LVL, 16, 1) + FIELD(INT_STS, DMA_CMD_ERR, 15, 1) + FIELD(INT_STS, DMA_Q_OV, 14, 1) + FIELD(INT_STS, DMA_DONE, 13, 1) + FIELD(INT_STS, DMA_P_DONE, 12, 1) + FIELD(INT_STS, P2D_LEN_ERR, 11, 1) + FIELD(INT_STS, PCFG_DONE, 2, 1) +#define R_INT_STS_RSVD ((0x7 << 24) | (0x1 << 19) | (0xF < 7)) + +REG32(INT_MASK, 0x10) + +REG32(STATUS, 0x14) + FIELD(STATUS, DMA_CMD_Q_F, 31, 1) + FIELD(STATUS, DMA_CMD_Q_E, 30, 1) + FIELD(STATUS, DMA_DONE_CNT, 28, 2) + FIELD(STATUS, RX_FIFO_LVL, 20, 5) + FIELD(STATUS, TX_FIFO_LVL, 12, 7) + FIELD(STATUS, PSS_GTS_USR_B, 11, 1) + FIELD(STATUS, PSS_FST_CFG_B, 10, 1) + FIELD(STATUS, PSS_CFG_RESET_B, 5, 1) + +REG32(DMA_SRC_ADDR, 0x18) +REG32(DMA_DST_ADDR, 0x1C) +REG32(DMA_SRC_LEN, 0x20) +REG32(DMA_DST_LEN, 0x24) +REG32(ROM_SHADOW, 0x28) +REG32(SW_ID, 0x30) +REG32(UNLOCK, 0x34) + +#define R_UNLOCK_MAGIC 0x757BDF0D + +REG32(MCTRL, 0x80) + FIELD(MCTRL, PS_VERSION, 28, 4) + FIELD(MCTRL, PCFG_POR_B, 8, 1) + FIELD(MCTRL, INT_PCAP_LPBK, 4, 1) + FIELD(MCTRL, QEMU, 3, 1) + +static void xlnx_zynq_devcfg_update_ixr(XlnxZynqDevcfg *s) +{ + qemu_set_irq(s->irq, ~s->regs[R_INT_MASK] & s->regs[R_INT_STS]); +} + +static void xlnx_zynq_devcfg_reset(DeviceState *dev) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(dev); + int i; + + for (i = 0; i < XLNX_ZYNQ_DEVCFG_R_MAX; ++i) { + register_reset(&s->regs_info[i]); + } +} + +static void xlnx_zynq_devcfg_dma_go(XlnxZynqDevcfg *s) +{ + do { + uint8_t buf[BTT_MAX]; + XlnxZynqDevcfgDMACmd *dmah = s->dma_cmd_fifo; + uint32_t btt = BTT_MAX; + bool loopback = s->regs[R_MCTRL] & R_MCTRL_INT_PCAP_LPBK_MASK; + + btt = MIN(btt, dmah->src_len); + if (loopback) { + btt = MIN(btt, dmah->dest_len); + } + DB_PRINT("reading %x bytes from %x\n", btt, dmah->src_addr); + dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt); + dmah->src_len -= btt; + dmah->src_addr += btt; + if (loopback && (dmah->src_len || dmah->dest_len)) { + DB_PRINT("writing %x bytes from %x\n", btt, dmah->dest_addr); + dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt); + dmah->dest_len -= btt; + dmah->dest_addr += btt; + } + if (!dmah->src_len && !dmah->dest_len) { + DB_PRINT("dma operation finished\n"); + s->regs[R_INT_STS] |= R_INT_STS_DMA_DONE_MASK | + R_INT_STS_DMA_P_DONE_MASK; + s->dma_cmd_fifo_num--; + memmove(s->dma_cmd_fifo, &s->dma_cmd_fifo[1], + sizeof(s->dma_cmd_fifo) - sizeof(s->dma_cmd_fifo[0])); + } + xlnx_zynq_devcfg_update_ixr(s); + } while (s->dma_cmd_fifo_num); +} + +static void r_ixr_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque); + + xlnx_zynq_devcfg_update_ixr(s); +} + +static uint64_t r_ctrl_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque); + int i; + + for (i = 0; i < ARRAY_SIZE(lock_ctrl_map); ++i) { + if (s->regs[R_LOCK] & 1 << i) { + val &= ~lock_ctrl_map[i]; + val |= lock_ctrl_map[i] & s->regs[R_CTRL]; + } + } + return val; +} + +static void r_ctrl_post_write(RegisterInfo *reg, uint64_t val) +{ + const char *device_prefix = object_get_typename(OBJECT(reg->opaque)); + uint32_t aes_en = FIELD_EX32(val, CTRL, PCFG_AES_EN); + + if (aes_en != 0 && aes_en != 7) { + qemu_log_mask(LOG_UNIMP, "%s: warning, aes-en bits inconsistent," + "unimplemented security reset should happen!\n", + device_prefix); + } +} + +static void r_unlock_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque); + const char *device_prefix = object_get_typename(OBJECT(s)); + + if (val == R_UNLOCK_MAGIC) { + DB_PRINT("successful unlock\n"); + s->regs[R_CTRL] |= R_CTRL_PCAP_PR_MASK; + s->regs[R_CTRL] |= R_CTRL_PCFG_AES_EN_MASK; + memory_region_set_enabled(&s->iomem, true); + } else { /* bad unlock attempt */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed unlock\n", device_prefix); + s->regs[R_CTRL] &= ~R_CTRL_PCAP_PR_MASK; + s->regs[R_CTRL] &= ~R_CTRL_PCFG_AES_EN_MASK; + /* core becomes inaccessible */ + memory_region_set_enabled(&s->iomem, false); + } +} + +static uint64_t r_lock_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque); + + /* once bits are locked they stay locked */ + return s->regs[R_LOCK] | val; +} + +static void r_dma_dst_len_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque); + + s->dma_cmd_fifo[s->dma_cmd_fifo_num] = (XlnxZynqDevcfgDMACmd) { + .src_addr = s->regs[R_DMA_SRC_ADDR] & ~0x3UL, + .dest_addr = s->regs[R_DMA_DST_ADDR] & ~0x3UL, + .src_len = s->regs[R_DMA_SRC_LEN] << 2, + .dest_len = s->regs[R_DMA_DST_LEN] << 2, + }; + s->dma_cmd_fifo_num++; + DB_PRINT("dma transfer started; %d total transfers pending\n", + s->dma_cmd_fifo_num); + xlnx_zynq_devcfg_dma_go(s); +} + +static const RegisterAccessInfo xlnx_zynq_devcfg_regs_info[] = { + { .name = "CTRL", .addr = A_CTRL, + .reset = R_CTRL_PCAP_PR_MASK | R_CTRL_PCAP_MODE_MASK | 0x3 << 13, + .rsvd = 0x1 << 28 | 0x3ff << 13 | 0x3 << 13, + .pre_write = r_ctrl_pre_write, + .post_write = r_ctrl_post_write, + }, + { .name = "LOCK", .addr = A_LOCK, + .rsvd = MAKE_64BIT_MASK(5, 64 - 5), + .pre_write = r_lock_pre_write, + }, + { .name = "CFG", .addr = A_CFG, + .reset = R_CFG_RESET, + .rsvd = 0xfffff00f, + }, + { .name = "INT_STS", .addr = A_INT_STS, + .w1c = ~R_INT_STS_RSVD, + .reset = R_INT_STS_PSS_GTS_USR_B_MASK | + R_INT_STS_PSS_CFG_RESET_B_MASK | + R_INT_STS_WR_FIFO_LVL_MASK, + .rsvd = R_INT_STS_RSVD, + .post_write = r_ixr_post_write, + }, + { .name = "INT_MASK", .addr = A_INT_MASK, + .reset = ~0, + .rsvd = R_INT_STS_RSVD, + .post_write = r_ixr_post_write, + }, + { .name = "STATUS", .addr = A_STATUS, + .reset = R_STATUS_DMA_CMD_Q_E_MASK | + R_STATUS_PSS_GTS_USR_B_MASK | + R_STATUS_PSS_CFG_RESET_B_MASK, + .ro = ~0, + }, + { .name = "DMA_SRC_ADDR", .addr = A_DMA_SRC_ADDR, }, + { .name = "DMA_DST_ADDR", .addr = A_DMA_DST_ADDR, }, + { .name = "DMA_SRC_LEN", .addr = A_DMA_SRC_LEN, + .ro = MAKE_64BIT_MASK(27, 64 - 27) }, + { .name = "DMA_DST_LEN", .addr = A_DMA_DST_LEN, + .ro = MAKE_64BIT_MASK(27, 64 - 27), + .post_write = r_dma_dst_len_post_write, + }, + { .name = "ROM_SHADOW", .addr = A_ROM_SHADOW, + .rsvd = ~0ull, + }, + { .name = "SW_ID", .addr = A_SW_ID, }, + { .name = "UNLOCK", .addr = A_UNLOCK, + .post_write = r_unlock_post_write, + }, + { .name = "MCTRL", .addr = R_MCTRL * 4, + /* Silicon 3.0 for version field, the mysterious reserved bit 23 + * and QEMU platform identifier. + */ + .reset = 0x2 << R_MCTRL_PS_VERSION_SHIFT | 1 << 23 | R_MCTRL_QEMU_MASK, + .ro = ~R_MCTRL_INT_PCAP_LPBK_MASK, + .rsvd = 0x00f00303, + }, +}; + +static const MemoryRegionOps xlnx_zynq_devcfg_reg_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const VMStateDescription vmstate_xlnx_zynq_devcfg_dma_cmd = { + .name = "xlnx_zynq_devcfg_dma_cmd", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(src_addr, XlnxZynqDevcfgDMACmd), + VMSTATE_UINT32(dest_addr, XlnxZynqDevcfgDMACmd), + VMSTATE_UINT32(src_len, XlnxZynqDevcfgDMACmd), + VMSTATE_UINT32(dest_len, XlnxZynqDevcfgDMACmd), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_xlnx_zynq_devcfg = { + .name = "xlnx_zynq_devcfg", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(dma_cmd_fifo, XlnxZynqDevcfg, + XLNX_ZYNQ_DEVCFG_DMA_CMD_FIFO_LEN, 0, + vmstate_xlnx_zynq_devcfg_dma_cmd, + XlnxZynqDevcfgDMACmd), + VMSTATE_UINT8(dma_cmd_fifo_num, XlnxZynqDevcfg), + VMSTATE_UINT32_ARRAY(regs, XlnxZynqDevcfg, XLNX_ZYNQ_DEVCFG_R_MAX), + VMSTATE_END_OF_LIST() + } +}; + +static void xlnx_zynq_devcfg_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(obj); + RegisterInfoArray *reg_array; + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init(&s->iomem, obj, "devcfg", XLNX_ZYNQ_DEVCFG_R_MAX * 4); + reg_array = + register_init_block32(DEVICE(obj), xlnx_zynq_devcfg_regs_info, + ARRAY_SIZE(xlnx_zynq_devcfg_regs_info), + s->regs_info, s->regs, + &xlnx_zynq_devcfg_reg_ops, + XLNX_ZYNQ_DEVCFG_ERR_DEBUG, + XLNX_ZYNQ_DEVCFG_R_MAX); + memory_region_add_subregion(&s->iomem, + A_CTRL, + ®_array->mem); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static void xlnx_zynq_devcfg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = xlnx_zynq_devcfg_reset; + dc->vmsd = &vmstate_xlnx_zynq_devcfg; +} + +static const TypeInfo xlnx_zynq_devcfg_info = { + .name = TYPE_XLNX_ZYNQ_DEVCFG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZynqDevcfg), + .instance_init = xlnx_zynq_devcfg_init, + .class_init = xlnx_zynq_devcfg_class_init, +}; + +static void xlnx_zynq_devcfg_register_types(void) +{ + type_register_static(&xlnx_zynq_devcfg_info); +} + +type_init(xlnx_zynq_devcfg_register_types) diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c new file mode 100644 index 000000000..896bb3574 --- /dev/null +++ b/hw/dma/xlnx_csu_dma.c @@ -0,0 +1,743 @@ +/* + * Xilinx Platform CSU Stream DMA emulation + * + * This implementation is based on + * https://github.com/Xilinx/qemu/blob/master/hw/dma/csu_stream_dma.c + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "sysemu/dma.h" +#include "hw/ptimer.h" +#include "hw/stream.h" +#include "hw/register.h" +#include "hw/dma/xlnx_csu_dma.h" + +/* + * Ref: UG1087 (v1.7) February 8, 2019 + * https://www.xilinx.com/html_docs/registers/ug1087/ug1087-zynq-ultrascale-registers.html + * CSUDMA Module section + */ +REG32(ADDR, 0x0) + FIELD(ADDR, ADDR, 2, 30) /* wo */ +REG32(SIZE, 0x4) + FIELD(SIZE, SIZE, 2, 27) /* wo */ + FIELD(SIZE, LAST_WORD, 0, 1) /* rw, only exists in SRC */ +REG32(STATUS, 0x8) + FIELD(STATUS, DONE_CNT, 13, 3) /* wtc */ + FIELD(STATUS, FIFO_LEVEL, 5, 8) /* ro */ + FIELD(STATUS, OUTSTANDING, 1, 4) /* ro */ + FIELD(STATUS, BUSY, 0, 1) /* ro */ +REG32(CTRL, 0xc) + FIELD(CTRL, FIFOTHRESH, 25, 7) /* rw, only exists in DST, reset 0x40 */ + FIELD(CTRL, APB_ERR_RESP, 24, 1) /* rw */ + FIELD(CTRL, ENDIANNESS, 23, 1) /* rw */ + FIELD(CTRL, AXI_BRST_TYPE, 22, 1) /* rw */ + FIELD(CTRL, TIMEOUT_VAL, 10, 12) /* rw, reset: 0xFFE */ + FIELD(CTRL, FIFO_THRESH, 2, 8) /* rw, reset: 0x80 */ + FIELD(CTRL, PAUSE_STRM, 1, 1) /* rw */ + FIELD(CTRL, PAUSE_MEM, 0, 1) /* rw */ +REG32(CRC, 0x10) +REG32(INT_STATUS, 0x14) + FIELD(INT_STATUS, FIFO_OVERFLOW, 7, 1) /* wtc */ + FIELD(INT_STATUS, INVALID_APB, 6, 1) /* wtc */ + FIELD(INT_STATUS, THRESH_HIT, 5, 1) /* wtc */ + FIELD(INT_STATUS, TIMEOUT_MEM, 4, 1) /* wtc */ + FIELD(INT_STATUS, TIMEOUT_STRM, 3, 1) /* wtc */ + FIELD(INT_STATUS, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */ + FIELD(INT_STATUS, DONE, 1, 1) /* wtc */ + FIELD(INT_STATUS, MEM_DONE, 0, 1) /* wtc */ +REG32(INT_ENABLE, 0x18) + FIELD(INT_ENABLE, FIFO_OVERFLOW, 7, 1) /* wtc */ + FIELD(INT_ENABLE, INVALID_APB, 6, 1) /* wtc */ + FIELD(INT_ENABLE, THRESH_HIT, 5, 1) /* wtc */ + FIELD(INT_ENABLE, TIMEOUT_MEM, 4, 1) /* wtc */ + FIELD(INT_ENABLE, TIMEOUT_STRM, 3, 1) /* wtc */ + FIELD(INT_ENABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */ + FIELD(INT_ENABLE, DONE, 1, 1) /* wtc */ + FIELD(INT_ENABLE, MEM_DONE, 0, 1) /* wtc */ +REG32(INT_DISABLE, 0x1c) + FIELD(INT_DISABLE, FIFO_OVERFLOW, 7, 1) /* wtc */ + FIELD(INT_DISABLE, INVALID_APB, 6, 1) /* wtc */ + FIELD(INT_DISABLE, THRESH_HIT, 5, 1) /* wtc */ + FIELD(INT_DISABLE, TIMEOUT_MEM, 4, 1) /* wtc */ + FIELD(INT_DISABLE, TIMEOUT_STRM, 3, 1) /* wtc */ + FIELD(INT_DISABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */ + FIELD(INT_DISABLE, DONE, 1, 1) /* wtc */ + FIELD(INT_DISABLE, MEM_DONE, 0, 1) /* wtc */ +REG32(INT_MASK, 0x20) + FIELD(INT_MASK, FIFO_OVERFLOW, 7, 1) /* ro, reset: 0x1 */ + FIELD(INT_MASK, INVALID_APB, 6, 1) /* ro, reset: 0x1 */ + FIELD(INT_MASK, THRESH_HIT, 5, 1) /* ro, reset: 0x1 */ + FIELD(INT_MASK, TIMEOUT_MEM, 4, 1) /* ro, reset: 0x1 */ + FIELD(INT_MASK, TIMEOUT_STRM, 3, 1) /* ro, reset: 0x1 */ + FIELD(INT_MASK, AXI_BRESP_ERR, 2, 1) /* ro, reset: 0x1, SRC: AXI_RDERR */ + FIELD(INT_MASK, DONE, 1, 1) /* ro, reset: 0x1 */ + FIELD(INT_MASK, MEM_DONE, 0, 1) /* ro, reset: 0x1 */ +REG32(CTRL2, 0x24) + FIELD(CTRL2, ARCACHE, 24, 3) /* rw */ + FIELD(CTRL2, ROUTE_BIT, 23, 1) /* rw */ + FIELD(CTRL2, TIMEOUT_EN, 22, 1) /* rw */ + FIELD(CTRL2, TIMEOUT_PRE, 4, 12) /* rw, reset: 0xFFF */ + FIELD(CTRL2, MAX_OUTS_CMDS, 0, 4) /* rw, reset: 0x8 */ +REG32(ADDR_MSB, 0x28) + FIELD(ADDR_MSB, ADDR_MSB, 0, 17) /* wo */ + +#define R_CTRL_TIMEOUT_VAL_RESET (0xFFE) +#define R_CTRL_FIFO_THRESH_RESET (0x80) +#define R_CTRL_FIFOTHRESH_RESET (0x40) + +#define R_CTRL2_TIMEOUT_PRE_RESET (0xFFF) +#define R_CTRL2_MAX_OUTS_CMDS_RESET (0x8) + +#define XLNX_CSU_DMA_ERR_DEBUG (0) +#define XLNX_CSU_DMA_INT_R_MASK (0xff) + +/* UG1807: Set the prescaler value for the timeout in clk (~2.5ns) cycles */ +#define XLNX_CSU_DMA_TIMER_FREQ (400 * 1000 * 1000) + +static bool xlnx_csu_dma_is_paused(XlnxCSUDMA *s) +{ + bool paused; + + paused = !!(s->regs[R_CTRL] & R_CTRL_PAUSE_STRM_MASK); + paused |= !!(s->regs[R_CTRL] & R_CTRL_PAUSE_MEM_MASK); + + return paused; +} + +static bool xlnx_csu_dma_get_eop(XlnxCSUDMA *s) +{ + return s->r_size_last_word; +} + +static bool xlnx_csu_dma_burst_is_fixed(XlnxCSUDMA *s) +{ + return !!(s->regs[R_CTRL] & R_CTRL_AXI_BRST_TYPE_MASK); +} + +static bool xlnx_csu_dma_timeout_enabled(XlnxCSUDMA *s) +{ + return !!(s->regs[R_CTRL2] & R_CTRL2_TIMEOUT_EN_MASK); +} + +static void xlnx_csu_dma_update_done_cnt(XlnxCSUDMA *s, int a) +{ + int cnt; + + /* Increase DONE_CNT */ + cnt = ARRAY_FIELD_EX32(s->regs, STATUS, DONE_CNT) + a; + ARRAY_FIELD_DP32(s->regs, STATUS, DONE_CNT, cnt); +} + +static void xlnx_csu_dma_data_process(XlnxCSUDMA *s, uint8_t *buf, uint32_t len) +{ + uint32_t bswap; + uint32_t i; + + bswap = s->regs[R_CTRL] & R_CTRL_ENDIANNESS_MASK; + if (s->is_dst && !bswap) { + /* Fast when ENDIANNESS cleared */ + return; + } + + for (i = 0; i < len; i += 4) { + uint8_t *b = &buf[i]; + union { + uint8_t u8[4]; + uint32_t u32; + } v = { + .u8 = { b[0], b[1], b[2], b[3] } + }; + + if (!s->is_dst) { + s->regs[R_CRC] += v.u32; + } + if (bswap) { + /* + * No point using bswap, we need to writeback + * into a potentially unaligned pointer. + */ + b[0] = v.u8[3]; + b[1] = v.u8[2]; + b[2] = v.u8[1]; + b[3] = v.u8[0]; + } + } +} + +static void xlnx_csu_dma_update_irq(XlnxCSUDMA *s) +{ + qemu_set_irq(s->irq, !!(s->regs[R_INT_STATUS] & ~s->regs[R_INT_MASK])); +} + +/* len is in bytes */ +static uint32_t xlnx_csu_dma_read(XlnxCSUDMA *s, uint8_t *buf, uint32_t len) +{ + hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR]; + MemTxResult result = MEMTX_OK; + + if (xlnx_csu_dma_burst_is_fixed(s)) { + uint32_t i; + + for (i = 0; i < len && (result == MEMTX_OK); i += s->width) { + uint32_t mlen = MIN(len - i, s->width); + + result = address_space_rw(&s->dma_as, addr, s->attr, + buf + i, mlen, false); + } + } else { + result = address_space_rw(&s->dma_as, addr, s->attr, buf, len, false); + } + + if (result == MEMTX_OK) { + xlnx_csu_dma_data_process(s, buf, len); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " TARGET_FMT_plx + " for mem read", __func__, addr); + s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK; + xlnx_csu_dma_update_irq(s); + } + return len; +} + +/* len is in bytes */ +static uint32_t xlnx_csu_dma_write(XlnxCSUDMA *s, uint8_t *buf, uint32_t len) +{ + hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR]; + MemTxResult result = MEMTX_OK; + + xlnx_csu_dma_data_process(s, buf, len); + if (xlnx_csu_dma_burst_is_fixed(s)) { + uint32_t i; + + for (i = 0; i < len && (result == MEMTX_OK); i += s->width) { + uint32_t mlen = MIN(len - i, s->width); + + result = address_space_rw(&s->dma_as, addr, s->attr, + buf, mlen, true); + buf += mlen; + } + } else { + result = address_space_rw(&s->dma_as, addr, s->attr, buf, len, true); + } + + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " TARGET_FMT_plx + " for mem write", __func__, addr); + s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK; + xlnx_csu_dma_update_irq(s); + } + return len; +} + +static void xlnx_csu_dma_done(XlnxCSUDMA *s) +{ + s->regs[R_STATUS] &= ~R_STATUS_BUSY_MASK; + s->regs[R_INT_STATUS] |= R_INT_STATUS_DONE_MASK; + + if (!s->is_dst) { + s->regs[R_INT_STATUS] |= R_INT_STATUS_MEM_DONE_MASK; + } + + xlnx_csu_dma_update_done_cnt(s, 1); +} + +static uint32_t xlnx_csu_dma_advance(XlnxCSUDMA *s, uint32_t len) +{ + uint32_t size = s->regs[R_SIZE]; + hwaddr dst = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR]; + + assert(len <= size); + + size -= len; + s->regs[R_SIZE] = size; + + if (!xlnx_csu_dma_burst_is_fixed(s)) { + dst += len; + s->regs[R_ADDR] = (uint32_t) dst; + s->regs[R_ADDR_MSB] = dst >> 32; + } + + if (size == 0) { + xlnx_csu_dma_done(s); + } + + return size; +} + +static void xlnx_csu_dma_src_notify(void *opaque) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(opaque); + unsigned char buf[4 * 1024]; + size_t rlen = 0; + + ptimer_transaction_begin(s->src_timer); + /* Stop the backpreassure timer */ + ptimer_stop(s->src_timer); + + while (s->regs[R_SIZE] && !xlnx_csu_dma_is_paused(s) && + stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) { + uint32_t plen = MIN(s->regs[R_SIZE], sizeof buf); + bool eop = false; + + /* Did we fit it all? */ + if (s->regs[R_SIZE] == plen && xlnx_csu_dma_get_eop(s)) { + eop = true; + } + + /* DMA transfer */ + xlnx_csu_dma_read(s, buf, plen); + rlen = stream_push(s->tx_dev, buf, plen, eop); + xlnx_csu_dma_advance(s, rlen); + } + + if (xlnx_csu_dma_timeout_enabled(s) && s->regs[R_SIZE] && + !stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) { + uint32_t timeout = ARRAY_FIELD_EX32(s->regs, CTRL, TIMEOUT_VAL); + uint32_t div = ARRAY_FIELD_EX32(s->regs, CTRL2, TIMEOUT_PRE) + 1; + uint32_t freq = XLNX_CSU_DMA_TIMER_FREQ; + + freq /= div; + ptimer_set_freq(s->src_timer, freq); + ptimer_set_count(s->src_timer, timeout); + ptimer_run(s->src_timer, 1); + } + + ptimer_transaction_commit(s->src_timer); + xlnx_csu_dma_update_irq(s); +} + +static uint64_t addr_pre_write(RegisterInfo *reg, uint64_t val) +{ + /* Address is word aligned */ + return val & R_ADDR_ADDR_MASK; +} + +static uint64_t size_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque); + + if (s->regs[R_SIZE] != 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Starting DMA while already running.\n", __func__); + } + + if (!s->is_dst) { + s->r_size_last_word = !!(val & R_SIZE_LAST_WORD_MASK); + } + + /* Size is word aligned */ + return val & R_SIZE_SIZE_MASK; +} + +static uint64_t size_post_read(RegisterInfo *reg, uint64_t val) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque); + + return val | s->r_size_last_word; +} + +static void size_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque); + + s->regs[R_STATUS] |= R_STATUS_BUSY_MASK; + + /* + * Note that if SIZE is programmed to 0, and the DMA is started, + * the interrupts DONE and MEM_DONE will be asserted. + */ + if (s->regs[R_SIZE] == 0) { + xlnx_csu_dma_done(s); + xlnx_csu_dma_update_irq(s); + return; + } + + /* Set SIZE is considered the last step in transfer configuration */ + if (!s->is_dst) { + xlnx_csu_dma_src_notify(s); + } else { + if (s->notify) { + s->notify(s->notify_opaque); + } + } +} + +static uint64_t status_pre_write(RegisterInfo *reg, uint64_t val) +{ + return val & (R_STATUS_DONE_CNT_MASK | R_STATUS_BUSY_MASK); +} + +static void ctrl_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque); + + if (!s->is_dst) { + if (!xlnx_csu_dma_is_paused(s)) { + xlnx_csu_dma_src_notify(s); + } + } else { + if (!xlnx_csu_dma_is_paused(s) && s->notify) { + s->notify(s->notify_opaque); + } + } +} + +static uint64_t int_status_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque); + + /* DMA counter decrements when flag 'DONE' is cleared */ + if ((val & s->regs[R_INT_STATUS] & R_INT_STATUS_DONE_MASK)) { + xlnx_csu_dma_update_done_cnt(s, -1); + } + + return s->regs[R_INT_STATUS] & ~val; +} + +static void int_status_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque); + + xlnx_csu_dma_update_irq(s); +} + +static uint64_t int_enable_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque); + uint32_t v32 = val; + + /* + * R_INT_ENABLE doesn't have its own state. + * It is used to indirectly modify R_INT_MASK. + * + * 1: Enable this interrupt field (the mask bit will be cleared to 0) + * 0: No effect + */ + s->regs[R_INT_MASK] &= ~v32; + return 0; +} + +static void int_enable_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque); + + xlnx_csu_dma_update_irq(s); +} + +static uint64_t int_disable_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque); + uint32_t v32 = val; + + /* + * R_INT_DISABLE doesn't have its own state. + * It is used to indirectly modify R_INT_MASK. + * + * 1: Disable this interrupt field (the mask bit will be set to 1) + * 0: No effect + */ + s->regs[R_INT_MASK] |= v32; + return 0; +} + +static void int_disable_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque); + + xlnx_csu_dma_update_irq(s); +} + +static uint64_t addr_msb_pre_write(RegisterInfo *reg, uint64_t val) +{ + return val & R_ADDR_MSB_ADDR_MSB_MASK; +} + +static const RegisterAccessInfo *xlnx_csu_dma_regs_info[] = { +#define DMACH_REGINFO(NAME, snd) \ + (const RegisterAccessInfo []) { \ + { \ + .name = #NAME "_ADDR", \ + .addr = A_ADDR, \ + .pre_write = addr_pre_write \ + }, { \ + .name = #NAME "_SIZE", \ + .addr = A_SIZE, \ + .pre_write = size_pre_write, \ + .post_write = size_post_write, \ + .post_read = size_post_read \ + }, { \ + .name = #NAME "_STATUS", \ + .addr = A_STATUS, \ + .pre_write = status_pre_write, \ + .w1c = R_STATUS_DONE_CNT_MASK, \ + .ro = (R_STATUS_BUSY_MASK \ + | R_STATUS_FIFO_LEVEL_MASK \ + | R_STATUS_OUTSTANDING_MASK) \ + }, { \ + .name = #NAME "_CTRL", \ + .addr = A_CTRL, \ + .post_write = ctrl_post_write, \ + .reset = ((R_CTRL_TIMEOUT_VAL_RESET << R_CTRL_TIMEOUT_VAL_SHIFT) \ + | (R_CTRL_FIFO_THRESH_RESET << R_CTRL_FIFO_THRESH_SHIFT)\ + | (snd ? 0 : R_CTRL_FIFOTHRESH_RESET \ + << R_CTRL_FIFOTHRESH_SHIFT)) \ + }, { \ + .name = #NAME "_CRC", \ + .addr = A_CRC, \ + }, { \ + .name = #NAME "_INT_STATUS", \ + .addr = A_INT_STATUS, \ + .pre_write = int_status_pre_write, \ + .post_write = int_status_post_write \ + }, { \ + .name = #NAME "_INT_ENABLE", \ + .addr = A_INT_ENABLE, \ + .pre_write = int_enable_pre_write, \ + .post_write = int_enable_post_write \ + }, { \ + .name = #NAME "_INT_DISABLE", \ + .addr = A_INT_DISABLE, \ + .pre_write = int_disable_pre_write, \ + .post_write = int_disable_post_write \ + }, { \ + .name = #NAME "_INT_MASK", \ + .addr = A_INT_MASK, \ + .ro = ~0, \ + .reset = XLNX_CSU_DMA_INT_R_MASK \ + }, { \ + .name = #NAME "_CTRL2", \ + .addr = A_CTRL2, \ + .reset = ((R_CTRL2_TIMEOUT_PRE_RESET \ + << R_CTRL2_TIMEOUT_PRE_SHIFT) \ + | (R_CTRL2_MAX_OUTS_CMDS_RESET \ + << R_CTRL2_MAX_OUTS_CMDS_SHIFT)) \ + }, { \ + .name = #NAME "_ADDR_MSB", \ + .addr = A_ADDR_MSB, \ + .pre_write = addr_msb_pre_write \ + } \ + } + + DMACH_REGINFO(DMA_SRC, true), + DMACH_REGINFO(DMA_DST, false) +}; + +static const MemoryRegionOps xlnx_csu_dma_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static void xlnx_csu_dma_src_timeout_hit(void *opaque) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(opaque); + + /* Ignore if the timeout is masked */ + if (!xlnx_csu_dma_timeout_enabled(s)) { + return; + } + + s->regs[R_INT_STATUS] |= R_INT_STATUS_TIMEOUT_STRM_MASK; + xlnx_csu_dma_update_irq(s); +} + +static size_t xlnx_csu_dma_stream_push(StreamSink *obj, uint8_t *buf, + size_t len, bool eop) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(obj); + uint32_t size = s->regs[R_SIZE]; + uint32_t mlen = MIN(size, len) & (~3); /* Size is word aligned */ + + /* Be called when it's DST */ + assert(s->is_dst); + + if (size == 0 || len <= 0) { + return 0; + } + + if (len && (xlnx_csu_dma_is_paused(s) || mlen == 0)) { + qemu_log_mask(LOG_GUEST_ERROR, + "csu-dma: DST channel dropping %zd b of data.\n", len); + s->regs[R_INT_STATUS] |= R_INT_STATUS_FIFO_OVERFLOW_MASK; + return len; + } + + if (xlnx_csu_dma_write(s, buf, mlen) != mlen) { + return 0; + } + + xlnx_csu_dma_advance(s, mlen); + xlnx_csu_dma_update_irq(s); + + return mlen; +} + +static bool xlnx_csu_dma_stream_can_push(StreamSink *obj, + StreamCanPushNotifyFn notify, + void *notify_opaque) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(obj); + + if (s->regs[R_SIZE] != 0) { + return true; + } else { + s->notify = notify; + s->notify_opaque = notify_opaque; + return false; + } +} + +static void xlnx_csu_dma_reset(DeviceState *dev) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } +} + +static void xlnx_csu_dma_realize(DeviceState *dev, Error **errp) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(dev); + RegisterInfoArray *reg_array; + + if (!s->is_dst && !s->tx_dev) { + error_setg(errp, "zynqmp.csu-dma: Stream not connected"); + return; + } + + if (!s->dma_mr) { + error_setg(errp, TYPE_XLNX_CSU_DMA " 'dma' link not set"); + return; + } + address_space_init(&s->dma_as, s->dma_mr, "csu-dma"); + + reg_array = + register_init_block32(dev, xlnx_csu_dma_regs_info[!!s->is_dst], + XLNX_CSU_DMA_R_MAX, + s->regs_info, s->regs, + &xlnx_csu_dma_ops, + XLNX_CSU_DMA_ERR_DEBUG, + XLNX_CSU_DMA_R_MAX * 4); + memory_region_add_subregion(&s->iomem, + 0x0, + ®_array->mem); + + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); + + s->src_timer = ptimer_init(xlnx_csu_dma_src_timeout_hit, + s, PTIMER_POLICY_DEFAULT); + + s->attr = MEMTXATTRS_UNSPECIFIED; + + s->r_size_last_word = 0; +} + +static const VMStateDescription vmstate_xlnx_csu_dma = { + .name = TYPE_XLNX_CSU_DMA, + .version_id = 0, + .minimum_version_id = 0, + .minimum_version_id_old = 0, + .fields = (VMStateField[]) { + VMSTATE_PTIMER(src_timer, XlnxCSUDMA), + VMSTATE_UINT16(width, XlnxCSUDMA), + VMSTATE_BOOL(is_dst, XlnxCSUDMA), + VMSTATE_BOOL(r_size_last_word, XlnxCSUDMA), + VMSTATE_UINT32_ARRAY(regs, XlnxCSUDMA, XLNX_CSU_DMA_R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property xlnx_csu_dma_properties[] = { + /* + * Ref PG021, Stream Data Width: + * Data width in bits of the AXI S2MM AXI4-Stream Data bus. + * This value must be equal or less than the Memory Map Data Width. + * Valid values are 8, 16, 32, 64, 128, 512 and 1024. + * "dma-width" is the byte value of the "Stream Data Width". + */ + DEFINE_PROP_UINT16("dma-width", XlnxCSUDMA, width, 4), + /* + * The CSU DMA is a two-channel, simple DMA, allowing separate control of + * the SRC (read) channel and DST (write) channel. "is-dst" is used to mark + * which channel the device is connected to. + */ + DEFINE_PROP_BOOL("is-dst", XlnxCSUDMA, is_dst, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + StreamSinkClass *ssc = STREAM_SINK_CLASS(klass); + + dc->reset = xlnx_csu_dma_reset; + dc->realize = xlnx_csu_dma_realize; + dc->vmsd = &vmstate_xlnx_csu_dma; + device_class_set_props(dc, xlnx_csu_dma_properties); + + ssc->push = xlnx_csu_dma_stream_push; + ssc->can_push = xlnx_csu_dma_stream_can_push; +} + +static void xlnx_csu_dma_init(Object *obj) +{ + XlnxCSUDMA *s = XLNX_CSU_DMA(obj); + + memory_region_init(&s->iomem, obj, TYPE_XLNX_CSU_DMA, + XLNX_CSU_DMA_R_MAX * 4); + + object_property_add_link(obj, "stream-connected-dma", TYPE_STREAM_SINK, + (Object **)&s->tx_dev, + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + object_property_add_link(obj, "dma", TYPE_MEMORY_REGION, + (Object **)&s->dma_mr, + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); +} + +static const TypeInfo xlnx_csu_dma_info = { + .name = TYPE_XLNX_CSU_DMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxCSUDMA), + .class_init = xlnx_csu_dma_class_init, + .instance_init = xlnx_csu_dma_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_STREAM_SINK }, + { } + } +}; + +static void xlnx_csu_dma_register_types(void) +{ + type_register_static(&xlnx_csu_dma_info); +} + +type_init(xlnx_csu_dma_register_types) diff --git a/hw/dma/xlnx_dpdma.c b/hw/dma/xlnx_dpdma.c new file mode 100644 index 000000000..967548abd --- /dev/null +++ b/hw/dma/xlnx_dpdma.c @@ -0,0 +1,790 @@ +/* + * xlnx_dpdma.c + * + * Copyright (C) 2015 : GreenSocs Ltd + * http://www.greensocs.com/ , email: info@greensocs.com + * + * Developed by : + * Frederic Konrad + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/dma/xlnx_dpdma.h" +#include "hw/irq.h" +#include "migration/vmstate.h" + +#ifndef DEBUG_DPDMA +#define DEBUG_DPDMA 0 +#endif + +#define DPRINTF(fmt, ...) do { \ + if (DEBUG_DPDMA) { \ + qemu_log("xlnx_dpdma: " fmt , ## __VA_ARGS__); \ + } \ +} while (0) + +/* + * Registers offset for DPDMA. + */ +#define DPDMA_ERR_CTRL (0x0000) +#define DPDMA_ISR (0x0004 >> 2) +#define DPDMA_IMR (0x0008 >> 2) +#define DPDMA_IEN (0x000C >> 2) +#define DPDMA_IDS (0x0010 >> 2) +#define DPDMA_EISR (0x0014 >> 2) +#define DPDMA_EIMR (0x0018 >> 2) +#define DPDMA_EIEN (0x001C >> 2) +#define DPDMA_EIDS (0x0020 >> 2) +#define DPDMA_CNTL (0x0100 >> 2) + +#define DPDMA_GBL (0x0104 >> 2) +#define DPDMA_GBL_TRG_CH(n) (1 << n) +#define DPDMA_GBL_RTRG_CH(n) (1 << 6 << n) + +#define DPDMA_ALC0_CNTL (0x0108 >> 2) +#define DPDMA_ALC0_STATUS (0x010C >> 2) +#define DPDMA_ALC0_MAX (0x0110 >> 2) +#define DPDMA_ALC0_MIN (0x0114 >> 2) +#define DPDMA_ALC0_ACC (0x0118 >> 2) +#define DPDMA_ALC0_ACC_TRAN (0x011C >> 2) +#define DPDMA_ALC1_CNTL (0x0120 >> 2) +#define DPDMA_ALC1_STATUS (0x0124 >> 2) +#define DPDMA_ALC1_MAX (0x0128 >> 2) +#define DPDMA_ALC1_MIN (0x012C >> 2) +#define DPDMA_ALC1_ACC (0x0130 >> 2) +#define DPDMA_ALC1_ACC_TRAN (0x0134 >> 2) + +#define DPDMA_DSCR_STRT_ADDRE_CH(n) ((0x0200 + n * 0x100) >> 2) +#define DPDMA_DSCR_STRT_ADDR_CH(n) ((0x0204 + n * 0x100) >> 2) +#define DPDMA_DSCR_NEXT_ADDRE_CH(n) ((0x0208 + n * 0x100) >> 2) +#define DPDMA_DSCR_NEXT_ADDR_CH(n) ((0x020C + n * 0x100) >> 2) +#define DPDMA_PYLD_CUR_ADDRE_CH(n) ((0x0210 + n * 0x100) >> 2) +#define DPDMA_PYLD_CUR_ADDR_CH(n) ((0x0214 + n * 0x100) >> 2) + +#define DPDMA_CNTL_CH(n) ((0x0218 + n * 0x100) >> 2) +#define DPDMA_CNTL_CH_EN (1) +#define DPDMA_CNTL_CH_PAUSED (1 << 1) + +#define DPDMA_STATUS_CH(n) ((0x021C + n * 0x100) >> 2) +#define DPDMA_STATUS_BURST_TYPE (1 << 4) +#define DPDMA_STATUS_MODE (1 << 5) +#define DPDMA_STATUS_EN_CRC (1 << 6) +#define DPDMA_STATUS_LAST_DSCR (1 << 7) +#define DPDMA_STATUS_LDSCR_FRAME (1 << 8) +#define DPDMA_STATUS_IGNR_DONE (1 << 9) +#define DPDMA_STATUS_DSCR_DONE (1 << 10) +#define DPDMA_STATUS_EN_DSCR_UP (1 << 11) +#define DPDMA_STATUS_EN_DSCR_INTR (1 << 12) +#define DPDMA_STATUS_PREAMBLE_OFF (13) + +#define DPDMA_VDO_CH(n) ((0x0220 + n * 0x100) >> 2) +#define DPDMA_PYLD_SZ_CH(n) ((0x0224 + n * 0x100) >> 2) +#define DPDMA_DSCR_ID_CH(n) ((0x0228 + n * 0x100) >> 2) + +/* + * Descriptor control field. + */ +#define CONTROL_PREAMBLE_VALUE 0xA5 + +#define DSCR_CTRL_PREAMBLE 0xFF +#define DSCR_CTRL_EN_DSCR_DONE_INTR (1 << 8) +#define DSCR_CTRL_EN_DSCR_UPDATE (1 << 9) +#define DSCR_CTRL_IGNORE_DONE (1 << 10) +#define DSCR_CTRL_AXI_BURST_TYPE (1 << 11) +#define DSCR_CTRL_AXCACHE (0x0F << 12) +#define DSCR_CTRL_AXPROT (0x2 << 16) +#define DSCR_CTRL_DESCRIPTOR_MODE (1 << 18) +#define DSCR_CTRL_LAST_DESCRIPTOR (1 << 19) +#define DSCR_CTRL_ENABLE_CRC (1 << 20) +#define DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME (1 << 21) + +/* + * Descriptor timestamp field. + */ +#define STATUS_DONE (1 << 31) + +#define DPDMA_FRAG_MAX_SZ (4096) + +enum DPDMABurstType { + DPDMA_INCR = 0, + DPDMA_FIXED = 1 +}; + +enum DPDMAMode { + DPDMA_CONTIGOUS = 0, + DPDMA_FRAGMENTED = 1 +}; + +struct DPDMADescriptor { + uint32_t control; + uint32_t descriptor_id; + /* transfer size in byte. */ + uint32_t xfer_size; + uint32_t line_size_stride; + uint32_t timestamp_lsb; + uint32_t timestamp_msb; + /* contains extension for both descriptor and source. */ + uint32_t address_extension; + uint32_t next_descriptor; + uint32_t source_address; + uint32_t address_extension_23; + uint32_t address_extension_45; + uint32_t source_address2; + uint32_t source_address3; + uint32_t source_address4; + uint32_t source_address5; + uint32_t crc; +}; + +typedef enum DPDMABurstType DPDMABurstType; +typedef enum DPDMAMode DPDMAMode; +typedef struct DPDMADescriptor DPDMADescriptor; + +static bool xlnx_dpdma_desc_is_last(DPDMADescriptor *desc) +{ + return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0); +} + +static bool xlnx_dpdma_desc_is_last_of_frame(DPDMADescriptor *desc) +{ + return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0); +} + +static uint64_t xlnx_dpdma_desc_get_source_address(DPDMADescriptor *desc, + uint8_t frag) +{ + uint64_t addr = 0; + assert(frag < 5); + + switch (frag) { + case 0: + addr = desc->source_address + + (extract32(desc->address_extension, 16, 12) << 20); + break; + case 1: + addr = desc->source_address2 + + (extract32(desc->address_extension_23, 0, 12) << 8); + break; + case 2: + addr = desc->source_address3 + + (extract32(desc->address_extension_23, 16, 12) << 20); + break; + case 3: + addr = desc->source_address4 + + (extract32(desc->address_extension_45, 0, 12) << 8); + break; + case 4: + addr = desc->source_address5 + + (extract32(desc->address_extension_45, 16, 12) << 20); + break; + default: + addr = 0; + break; + } + + return addr; +} + +static uint32_t xlnx_dpdma_desc_get_transfer_size(DPDMADescriptor *desc) +{ + return desc->xfer_size; +} + +static uint32_t xlnx_dpdma_desc_get_line_size(DPDMADescriptor *desc) +{ + return extract32(desc->line_size_stride, 0, 18); +} + +static uint32_t xlnx_dpdma_desc_get_line_stride(DPDMADescriptor *desc) +{ + return extract32(desc->line_size_stride, 18, 14) * 16; +} + +static inline bool xlnx_dpdma_desc_crc_enabled(DPDMADescriptor *desc) +{ + return (desc->control & DSCR_CTRL_ENABLE_CRC) != 0; +} + +static inline bool xlnx_dpdma_desc_check_crc(DPDMADescriptor *desc) +{ + uint32_t *p = (uint32_t *)desc; + uint32_t crc = 0; + uint8_t i; + + /* + * CRC is calculated on the whole descriptor except the last 32bits word + * using 32bits addition. + */ + for (i = 0; i < 15; i++) { + crc += p[i]; + } + + return crc == desc->crc; +} + +static inline bool xlnx_dpdma_desc_completion_interrupt(DPDMADescriptor *desc) +{ + return (desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0; +} + +static inline bool xlnx_dpdma_desc_is_valid(DPDMADescriptor *desc) +{ + return (desc->control & DSCR_CTRL_PREAMBLE) == CONTROL_PREAMBLE_VALUE; +} + +static inline bool xlnx_dpdma_desc_is_contiguous(DPDMADescriptor *desc) +{ + return (desc->control & DSCR_CTRL_DESCRIPTOR_MODE) == 0; +} + +static inline bool xlnx_dpdma_desc_update_enabled(DPDMADescriptor *desc) +{ + return (desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0; +} + +static inline void xlnx_dpdma_desc_set_done(DPDMADescriptor *desc) +{ + desc->timestamp_msb |= STATUS_DONE; +} + +static inline bool xlnx_dpdma_desc_is_already_done(DPDMADescriptor *desc) +{ + return (desc->timestamp_msb & STATUS_DONE) != 0; +} + +static inline bool xlnx_dpdma_desc_ignore_done_bit(DPDMADescriptor *desc) +{ + return (desc->control & DSCR_CTRL_IGNORE_DONE) != 0; +} + +static const VMStateDescription vmstate_xlnx_dpdma = { + .name = TYPE_XLNX_DPDMA, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(registers, XlnxDPDMAState, + XLNX_DPDMA_REG_ARRAY_SIZE), + VMSTATE_BOOL_ARRAY(operation_finished, XlnxDPDMAState, 6), + VMSTATE_END_OF_LIST() + } +}; + +static void xlnx_dpdma_update_irq(XlnxDPDMAState *s) +{ + bool flags; + + flags = ((s->registers[DPDMA_ISR] & (~s->registers[DPDMA_IMR])) + || (s->registers[DPDMA_EISR] & (~s->registers[DPDMA_EIMR]))); + qemu_set_irq(s->irq, flags); +} + +static uint64_t xlnx_dpdma_descriptor_start_address(XlnxDPDMAState *s, + uint8_t channel) +{ + return (s->registers[DPDMA_DSCR_STRT_ADDRE_CH(channel)] << 16) + + s->registers[DPDMA_DSCR_STRT_ADDR_CH(channel)]; +} + +static uint64_t xlnx_dpdma_descriptor_next_address(XlnxDPDMAState *s, + uint8_t channel) +{ + return ((uint64_t)s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] << 32) + + s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)]; +} + +static bool xlnx_dpdma_is_channel_enabled(XlnxDPDMAState *s, + uint8_t channel) +{ + return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_EN) != 0; +} + +static bool xlnx_dpdma_is_channel_paused(XlnxDPDMAState *s, + uint8_t channel) +{ + return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_PAUSED) != 0; +} + +static inline bool xlnx_dpdma_is_channel_retriggered(XlnxDPDMAState *s, + uint8_t channel) +{ + /* Clear the retriggered bit after reading it. */ + bool channel_is_retriggered = s->registers[DPDMA_GBL] + & DPDMA_GBL_RTRG_CH(channel); + s->registers[DPDMA_GBL] &= ~DPDMA_GBL_RTRG_CH(channel); + return channel_is_retriggered; +} + +static inline bool xlnx_dpdma_is_channel_triggered(XlnxDPDMAState *s, + uint8_t channel) +{ + return s->registers[DPDMA_GBL] & DPDMA_GBL_TRG_CH(channel); +} + +static void xlnx_dpdma_update_desc_info(XlnxDPDMAState *s, uint8_t channel, + DPDMADescriptor *desc) +{ + s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] = + extract32(desc->address_extension, 0, 16); + s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)] = desc->next_descriptor; + s->registers[DPDMA_PYLD_CUR_ADDRE_CH(channel)] = + extract32(desc->address_extension, 16, 16); + s->registers[DPDMA_PYLD_CUR_ADDR_CH(channel)] = desc->source_address; + s->registers[DPDMA_VDO_CH(channel)] = + extract32(desc->line_size_stride, 18, 14) + + (extract32(desc->line_size_stride, 0, 18) + << 14); + s->registers[DPDMA_PYLD_SZ_CH(channel)] = desc->xfer_size; + s->registers[DPDMA_DSCR_ID_CH(channel)] = desc->descriptor_id; + + /* Compute the status register with the descriptor information. */ + s->registers[DPDMA_STATUS_CH(channel)] = + extract32(desc->control, 0, 8) << 13; + if ((desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0) { + s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_INTR; + } + if ((desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0) { + s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_UP; + } + if ((desc->timestamp_msb & STATUS_DONE) != 0) { + s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_DSCR_DONE; + } + if ((desc->control & DSCR_CTRL_IGNORE_DONE) != 0) { + s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_IGNR_DONE; + } + if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0) { + s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LDSCR_FRAME; + } + if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0) { + s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LAST_DSCR; + } + if ((desc->control & DSCR_CTRL_ENABLE_CRC) != 0) { + s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_CRC; + } + if ((desc->control & DSCR_CTRL_DESCRIPTOR_MODE) != 0) { + s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_MODE; + } + if ((desc->control & DSCR_CTRL_AXI_BURST_TYPE) != 0) { + s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_BURST_TYPE; + } +} + +static void xlnx_dpdma_dump_descriptor(DPDMADescriptor *desc) +{ + if (DEBUG_DPDMA) { + qemu_log("DUMP DESCRIPTOR:\n"); + qemu_hexdump(stdout, "", desc, sizeof(DPDMADescriptor)); + } +} + +static uint64_t xlnx_dpdma_read(void *opaque, hwaddr offset, + unsigned size) +{ + XlnxDPDMAState *s = XLNX_DPDMA(opaque); + + DPRINTF("read @%" HWADDR_PRIx "\n", offset); + offset = offset >> 2; + + switch (offset) { + /* + * Trying to read a write only register. + */ + case DPDMA_GBL: + return 0; + default: + assert(offset <= (0xFFC >> 2)); + return s->registers[offset]; + } + return 0; +} + +static void xlnx_dpdma_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + XlnxDPDMAState *s = XLNX_DPDMA(opaque); + + DPRINTF("write @%" HWADDR_PRIx " = %" PRIx64 "\n", offset, value); + offset = offset >> 2; + + switch (offset) { + case DPDMA_ISR: + s->registers[DPDMA_ISR] &= ~value; + xlnx_dpdma_update_irq(s); + break; + case DPDMA_IEN: + s->registers[DPDMA_IMR] &= ~value; + break; + case DPDMA_IDS: + s->registers[DPDMA_IMR] |= value; + break; + case DPDMA_EISR: + s->registers[DPDMA_EISR] &= ~value; + xlnx_dpdma_update_irq(s); + break; + case DPDMA_EIEN: + s->registers[DPDMA_EIMR] &= ~value; + break; + case DPDMA_EIDS: + s->registers[DPDMA_EIMR] |= value; + break; + case DPDMA_IMR: + case DPDMA_EIMR: + case DPDMA_DSCR_NEXT_ADDRE_CH(0): + case DPDMA_DSCR_NEXT_ADDRE_CH(1): + case DPDMA_DSCR_NEXT_ADDRE_CH(2): + case DPDMA_DSCR_NEXT_ADDRE_CH(3): + case DPDMA_DSCR_NEXT_ADDRE_CH(4): + case DPDMA_DSCR_NEXT_ADDRE_CH(5): + case DPDMA_DSCR_NEXT_ADDR_CH(0): + case DPDMA_DSCR_NEXT_ADDR_CH(1): + case DPDMA_DSCR_NEXT_ADDR_CH(2): + case DPDMA_DSCR_NEXT_ADDR_CH(3): + case DPDMA_DSCR_NEXT_ADDR_CH(4): + case DPDMA_DSCR_NEXT_ADDR_CH(5): + case DPDMA_PYLD_CUR_ADDRE_CH(0): + case DPDMA_PYLD_CUR_ADDRE_CH(1): + case DPDMA_PYLD_CUR_ADDRE_CH(2): + case DPDMA_PYLD_CUR_ADDRE_CH(3): + case DPDMA_PYLD_CUR_ADDRE_CH(4): + case DPDMA_PYLD_CUR_ADDRE_CH(5): + case DPDMA_PYLD_CUR_ADDR_CH(0): + case DPDMA_PYLD_CUR_ADDR_CH(1): + case DPDMA_PYLD_CUR_ADDR_CH(2): + case DPDMA_PYLD_CUR_ADDR_CH(3): + case DPDMA_PYLD_CUR_ADDR_CH(4): + case DPDMA_PYLD_CUR_ADDR_CH(5): + case DPDMA_STATUS_CH(0): + case DPDMA_STATUS_CH(1): + case DPDMA_STATUS_CH(2): + case DPDMA_STATUS_CH(3): + case DPDMA_STATUS_CH(4): + case DPDMA_STATUS_CH(5): + case DPDMA_VDO_CH(0): + case DPDMA_VDO_CH(1): + case DPDMA_VDO_CH(2): + case DPDMA_VDO_CH(3): + case DPDMA_VDO_CH(4): + case DPDMA_VDO_CH(5): + case DPDMA_PYLD_SZ_CH(0): + case DPDMA_PYLD_SZ_CH(1): + case DPDMA_PYLD_SZ_CH(2): + case DPDMA_PYLD_SZ_CH(3): + case DPDMA_PYLD_SZ_CH(4): + case DPDMA_PYLD_SZ_CH(5): + case DPDMA_DSCR_ID_CH(0): + case DPDMA_DSCR_ID_CH(1): + case DPDMA_DSCR_ID_CH(2): + case DPDMA_DSCR_ID_CH(3): + case DPDMA_DSCR_ID_CH(4): + case DPDMA_DSCR_ID_CH(5): + /* + * Trying to write to a read only register.. + */ + break; + case DPDMA_GBL: + /* + * This is a write only register so it's read as zero in the read + * callback. + * We store the value anyway so we can know if the channel is + * enabled. + */ + s->registers[offset] |= value & 0x00000FFF; + break; + case DPDMA_DSCR_STRT_ADDRE_CH(0): + case DPDMA_DSCR_STRT_ADDRE_CH(1): + case DPDMA_DSCR_STRT_ADDRE_CH(2): + case DPDMA_DSCR_STRT_ADDRE_CH(3): + case DPDMA_DSCR_STRT_ADDRE_CH(4): + case DPDMA_DSCR_STRT_ADDRE_CH(5): + value &= 0x0000FFFF; + s->registers[offset] = value; + break; + case DPDMA_CNTL_CH(0): + s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(0); + value &= 0x3FFFFFFF; + s->registers[offset] = value; + break; + case DPDMA_CNTL_CH(1): + s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(1); + value &= 0x3FFFFFFF; + s->registers[offset] = value; + break; + case DPDMA_CNTL_CH(2): + s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(2); + value &= 0x3FFFFFFF; + s->registers[offset] = value; + break; + case DPDMA_CNTL_CH(3): + s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(3); + value &= 0x3FFFFFFF; + s->registers[offset] = value; + break; + case DPDMA_CNTL_CH(4): + s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(4); + value &= 0x3FFFFFFF; + s->registers[offset] = value; + break; + case DPDMA_CNTL_CH(5): + s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(5); + value &= 0x3FFFFFFF; + s->registers[offset] = value; + break; + default: + assert(offset <= (0xFFC >> 2)); + s->registers[offset] = value; + break; + } +} + +static const MemoryRegionOps dma_ops = { + .read = xlnx_dpdma_read, + .write = xlnx_dpdma_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void xlnx_dpdma_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + XlnxDPDMAState *s = XLNX_DPDMA(obj); + + memory_region_init_io(&s->iomem, obj, &dma_ops, s, + TYPE_XLNX_DPDMA, 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); +} + +static void xlnx_dpdma_reset(DeviceState *dev) +{ + XlnxDPDMAState *s = XLNX_DPDMA(dev); + size_t i; + + memset(s->registers, 0, sizeof(s->registers)); + s->registers[DPDMA_IMR] = 0x07FFFFFF; + s->registers[DPDMA_EIMR] = 0xFFFFFFFF; + s->registers[DPDMA_ALC0_MIN] = 0x0000FFFF; + s->registers[DPDMA_ALC1_MIN] = 0x0000FFFF; + + for (i = 0; i < 6; i++) { + s->data[i] = NULL; + s->operation_finished[i] = true; + } +} + +static void xlnx_dpdma_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->vmsd = &vmstate_xlnx_dpdma; + dc->reset = xlnx_dpdma_reset; +} + +static const TypeInfo xlnx_dpdma_info = { + .name = TYPE_XLNX_DPDMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxDPDMAState), + .instance_init = xlnx_dpdma_init, + .class_init = xlnx_dpdma_class_init, +}; + +static void xlnx_dpdma_register_types(void) +{ + type_register_static(&xlnx_dpdma_info); +} + +size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel, + bool one_desc) +{ + uint64_t desc_addr; + uint64_t source_addr[6]; + DPDMADescriptor desc; + bool done = false; + size_t ptr = 0; + + assert(channel <= 5); + + DPRINTF("start dpdma channel 0x%" PRIX8 "\n", channel); + + if (!xlnx_dpdma_is_channel_triggered(s, channel)) { + DPRINTF("Channel isn't triggered..\n"); + return 0; + } + + if (!xlnx_dpdma_is_channel_enabled(s, channel)) { + DPRINTF("Channel isn't enabled..\n"); + return 0; + } + + if (xlnx_dpdma_is_channel_paused(s, channel)) { + DPRINTF("Channel is paused..\n"); + return 0; + } + + do { + if ((s->operation_finished[channel]) + || xlnx_dpdma_is_channel_retriggered(s, channel)) { + desc_addr = xlnx_dpdma_descriptor_start_address(s, channel); + s->operation_finished[channel] = false; + } else { + desc_addr = xlnx_dpdma_descriptor_next_address(s, channel); + } + + if (dma_memory_read(&address_space_memory, desc_addr, &desc, + sizeof(DPDMADescriptor))) { + s->registers[DPDMA_EISR] |= ((1 << 1) << channel); + xlnx_dpdma_update_irq(s); + s->operation_finished[channel] = true; + DPRINTF("Can't get the descriptor.\n"); + break; + } + + xlnx_dpdma_update_desc_info(s, channel, &desc); + +#ifdef DEBUG_DPDMA + xlnx_dpdma_dump_descriptor(&desc); +#endif + + DPRINTF("location of the descriptor: %" PRIx64 "\n", desc_addr); + if (!xlnx_dpdma_desc_is_valid(&desc)) { + s->registers[DPDMA_EISR] |= ((1 << 7) << channel); + xlnx_dpdma_update_irq(s); + s->operation_finished[channel] = true; + DPRINTF("Invalid descriptor..\n"); + break; + } + + if (xlnx_dpdma_desc_crc_enabled(&desc) + && !xlnx_dpdma_desc_check_crc(&desc)) { + s->registers[DPDMA_EISR] |= ((1 << 13) << channel); + xlnx_dpdma_update_irq(s); + s->operation_finished[channel] = true; + DPRINTF("Bad CRC for descriptor..\n"); + break; + } + + if (xlnx_dpdma_desc_is_already_done(&desc) + && !xlnx_dpdma_desc_ignore_done_bit(&desc)) { + /* We are trying to process an already processed descriptor. */ + s->registers[DPDMA_EISR] |= ((1 << 25) << channel); + xlnx_dpdma_update_irq(s); + s->operation_finished[channel] = true; + DPRINTF("Already processed descriptor..\n"); + break; + } + + done = xlnx_dpdma_desc_is_last(&desc) + || xlnx_dpdma_desc_is_last_of_frame(&desc); + + s->operation_finished[channel] = done; + if (s->data[channel]) { + int64_t transfer_len = xlnx_dpdma_desc_get_transfer_size(&desc); + uint32_t line_size = xlnx_dpdma_desc_get_line_size(&desc); + uint32_t line_stride = xlnx_dpdma_desc_get_line_stride(&desc); + if (xlnx_dpdma_desc_is_contiguous(&desc)) { + source_addr[0] = xlnx_dpdma_desc_get_source_address(&desc, 0); + while (transfer_len != 0) { + if (dma_memory_read(&address_space_memory, + source_addr[0], + &s->data[channel][ptr], + line_size)) { + s->registers[DPDMA_ISR] |= ((1 << 12) << channel); + xlnx_dpdma_update_irq(s); + DPRINTF("Can't get data.\n"); + break; + } + ptr += line_size; + transfer_len -= line_size; + source_addr[0] += line_stride; + } + } else { + DPRINTF("Source address:\n"); + int frag; + for (frag = 0; frag < 5; frag++) { + source_addr[frag] = + xlnx_dpdma_desc_get_source_address(&desc, frag); + DPRINTF("Fragment %u: %" PRIx64 "\n", frag + 1, + source_addr[frag]); + } + + frag = 0; + while ((transfer_len < 0) && (frag < 5)) { + size_t fragment_len = DPDMA_FRAG_MAX_SZ + - (source_addr[frag] % DPDMA_FRAG_MAX_SZ); + + if (dma_memory_read(&address_space_memory, + source_addr[frag], + &(s->data[channel][ptr]), + fragment_len)) { + s->registers[DPDMA_ISR] |= ((1 << 12) << channel); + xlnx_dpdma_update_irq(s); + DPRINTF("Can't get data.\n"); + break; + } + ptr += fragment_len; + transfer_len -= fragment_len; + frag += 1; + } + } + } + + if (xlnx_dpdma_desc_update_enabled(&desc)) { + /* The descriptor need to be updated when it's completed. */ + DPRINTF("update the descriptor with the done flag set.\n"); + xlnx_dpdma_desc_set_done(&desc); + dma_memory_write(&address_space_memory, desc_addr, &desc, + sizeof(DPDMADescriptor)); + } + + if (xlnx_dpdma_desc_completion_interrupt(&desc)) { + DPRINTF("completion interrupt enabled!\n"); + s->registers[DPDMA_ISR] |= (1 << channel); + xlnx_dpdma_update_irq(s); + } + + } while (!done && !one_desc); + + return ptr; +} + +void xlnx_dpdma_set_host_data_location(XlnxDPDMAState *s, uint8_t channel, + void *p) +{ + if (!s) { + qemu_log_mask(LOG_UNIMP, "DPDMA client not attached to valid DPDMA" + " instance\n"); + return; + } + + assert(channel <= 5); + s->data[channel] = p; +} + +void xlnx_dpdma_trigger_vsync_irq(XlnxDPDMAState *s) +{ + s->registers[DPDMA_ISR] |= (1 << 27); + xlnx_dpdma_update_irq(s); +} + +type_init(xlnx_dpdma_register_types) diff --git a/hw/gpio/Kconfig b/hw/gpio/Kconfig new file mode 100644 index 000000000..f0e7405f6 --- /dev/null +++ b/hw/gpio/Kconfig @@ -0,0 +1,15 @@ +config MAX7310 + bool + depends on I2C + +config PL061 + bool + +config GPIO_KEY + bool + +config GPIO_PWR + bool + +config SIFIVE_GPIO + bool diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c new file mode 100644 index 000000000..911d21c8c --- /dev/null +++ b/hw/gpio/aspeed_gpio.c @@ -0,0 +1,996 @@ +/* + * ASPEED GPIO Controller + * + * Copyright (C) 2017-2019 IBM Corp. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qemu/log.h" +#include "hw/gpio/aspeed_gpio.h" +#include "hw/misc/aspeed_scu.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "hw/irq.h" +#include "migration/vmstate.h" + +#define GPIOS_PER_GROUP 8 + +/* GPIO Source Types */ +#define ASPEED_CMD_SRC_MASK 0x01010101 +#define ASPEED_SOURCE_ARM 0 +#define ASPEED_SOURCE_LPC 1 +#define ASPEED_SOURCE_COPROCESSOR 2 +#define ASPEED_SOURCE_RESERVED 3 + +/* GPIO Interrupt Triggers */ +/* + * For each set of gpios there are three sensitivity registers that control + * the interrupt trigger mode. + * + * | 2 | 1 | 0 | trigger mode + * ----------------------------- + * | 0 | 0 | 0 | falling-edge + * | 0 | 0 | 1 | rising-edge + * | 0 | 1 | 0 | level-low + * | 0 | 1 | 1 | level-high + * | 1 | X | X | dual-edge + */ +#define ASPEED_FALLING_EDGE 0 +#define ASPEED_RISING_EDGE 1 +#define ASPEED_LEVEL_LOW 2 +#define ASPEED_LEVEL_HIGH 3 +#define ASPEED_DUAL_EDGE 4 + +/* GPIO Register Address Offsets */ +#define GPIO_ABCD_DATA_VALUE (0x000 >> 2) +#define GPIO_ABCD_DIRECTION (0x004 >> 2) +#define GPIO_ABCD_INT_ENABLE (0x008 >> 2) +#define GPIO_ABCD_INT_SENS_0 (0x00C >> 2) +#define GPIO_ABCD_INT_SENS_1 (0x010 >> 2) +#define GPIO_ABCD_INT_SENS_2 (0x014 >> 2) +#define GPIO_ABCD_INT_STATUS (0x018 >> 2) +#define GPIO_ABCD_RESET_TOLERANT (0x01C >> 2) +#define GPIO_EFGH_DATA_VALUE (0x020 >> 2) +#define GPIO_EFGH_DIRECTION (0x024 >> 2) +#define GPIO_EFGH_INT_ENABLE (0x028 >> 2) +#define GPIO_EFGH_INT_SENS_0 (0x02C >> 2) +#define GPIO_EFGH_INT_SENS_1 (0x030 >> 2) +#define GPIO_EFGH_INT_SENS_2 (0x034 >> 2) +#define GPIO_EFGH_INT_STATUS (0x038 >> 2) +#define GPIO_EFGH_RESET_TOLERANT (0x03C >> 2) +#define GPIO_ABCD_DEBOUNCE_1 (0x040 >> 2) +#define GPIO_ABCD_DEBOUNCE_2 (0x044 >> 2) +#define GPIO_EFGH_DEBOUNCE_1 (0x048 >> 2) +#define GPIO_EFGH_DEBOUNCE_2 (0x04C >> 2) +#define GPIO_DEBOUNCE_TIME_1 (0x050 >> 2) +#define GPIO_DEBOUNCE_TIME_2 (0x054 >> 2) +#define GPIO_DEBOUNCE_TIME_3 (0x058 >> 2) +#define GPIO_ABCD_COMMAND_SRC_0 (0x060 >> 2) +#define GPIO_ABCD_COMMAND_SRC_1 (0x064 >> 2) +#define GPIO_EFGH_COMMAND_SRC_0 (0x068 >> 2) +#define GPIO_EFGH_COMMAND_SRC_1 (0x06C >> 2) +#define GPIO_IJKL_DATA_VALUE (0x070 >> 2) +#define GPIO_IJKL_DIRECTION (0x074 >> 2) +#define GPIO_MNOP_DATA_VALUE (0x078 >> 2) +#define GPIO_MNOP_DIRECTION (0x07C >> 2) +#define GPIO_QRST_DATA_VALUE (0x080 >> 2) +#define GPIO_QRST_DIRECTION (0x084 >> 2) +#define GPIO_UVWX_DATA_VALUE (0x088 >> 2) +#define GPIO_UVWX_DIRECTION (0x08C >> 2) +#define GPIO_IJKL_COMMAND_SRC_0 (0x090 >> 2) +#define GPIO_IJKL_COMMAND_SRC_1 (0x094 >> 2) +#define GPIO_IJKL_INT_ENABLE (0x098 >> 2) +#define GPIO_IJKL_INT_SENS_0 (0x09C >> 2) +#define GPIO_IJKL_INT_SENS_1 (0x0A0 >> 2) +#define GPIO_IJKL_INT_SENS_2 (0x0A4 >> 2) +#define GPIO_IJKL_INT_STATUS (0x0A8 >> 2) +#define GPIO_IJKL_RESET_TOLERANT (0x0AC >> 2) +#define GPIO_IJKL_DEBOUNCE_1 (0x0B0 >> 2) +#define GPIO_IJKL_DEBOUNCE_2 (0x0B4 >> 2) +#define GPIO_IJKL_INPUT_MASK (0x0B8 >> 2) +#define GPIO_ABCD_DATA_READ (0x0C0 >> 2) +#define GPIO_EFGH_DATA_READ (0x0C4 >> 2) +#define GPIO_IJKL_DATA_READ (0x0C8 >> 2) +#define GPIO_MNOP_DATA_READ (0x0CC >> 2) +#define GPIO_QRST_DATA_READ (0x0D0 >> 2) +#define GPIO_UVWX_DATA_READ (0x0D4 >> 2) +#define GPIO_YZAAAB_DATA_READ (0x0D8 >> 2) +#define GPIO_AC_DATA_READ (0x0DC >> 2) +#define GPIO_MNOP_COMMAND_SRC_0 (0x0E0 >> 2) +#define GPIO_MNOP_COMMAND_SRC_1 (0x0E4 >> 2) +#define GPIO_MNOP_INT_ENABLE (0x0E8 >> 2) +#define GPIO_MNOP_INT_SENS_0 (0x0EC >> 2) +#define GPIO_MNOP_INT_SENS_1 (0x0F0 >> 2) +#define GPIO_MNOP_INT_SENS_2 (0x0F4 >> 2) +#define GPIO_MNOP_INT_STATUS (0x0F8 >> 2) +#define GPIO_MNOP_RESET_TOLERANT (0x0FC >> 2) +#define GPIO_MNOP_DEBOUNCE_1 (0x100 >> 2) +#define GPIO_MNOP_DEBOUNCE_2 (0x104 >> 2) +#define GPIO_MNOP_INPUT_MASK (0x108 >> 2) +#define GPIO_QRST_COMMAND_SRC_0 (0x110 >> 2) +#define GPIO_QRST_COMMAND_SRC_1 (0x114 >> 2) +#define GPIO_QRST_INT_ENABLE (0x118 >> 2) +#define GPIO_QRST_INT_SENS_0 (0x11C >> 2) +#define GPIO_QRST_INT_SENS_1 (0x120 >> 2) +#define GPIO_QRST_INT_SENS_2 (0x124 >> 2) +#define GPIO_QRST_INT_STATUS (0x128 >> 2) +#define GPIO_QRST_RESET_TOLERANT (0x12C >> 2) +#define GPIO_QRST_DEBOUNCE_1 (0x130 >> 2) +#define GPIO_QRST_DEBOUNCE_2 (0x134 >> 2) +#define GPIO_QRST_INPUT_MASK (0x138 >> 2) +#define GPIO_UVWX_COMMAND_SRC_0 (0x140 >> 2) +#define GPIO_UVWX_COMMAND_SRC_1 (0x144 >> 2) +#define GPIO_UVWX_INT_ENABLE (0x148 >> 2) +#define GPIO_UVWX_INT_SENS_0 (0x14C >> 2) +#define GPIO_UVWX_INT_SENS_1 (0x150 >> 2) +#define GPIO_UVWX_INT_SENS_2 (0x154 >> 2) +#define GPIO_UVWX_INT_STATUS (0x158 >> 2) +#define GPIO_UVWX_RESET_TOLERANT (0x15C >> 2) +#define GPIO_UVWX_DEBOUNCE_1 (0x160 >> 2) +#define GPIO_UVWX_DEBOUNCE_2 (0x164 >> 2) +#define GPIO_UVWX_INPUT_MASK (0x168 >> 2) +#define GPIO_YZAAAB_COMMAND_SRC_0 (0x170 >> 2) +#define GPIO_YZAAAB_COMMAND_SRC_1 (0x174 >> 2) +#define GPIO_YZAAAB_INT_ENABLE (0x178 >> 2) +#define GPIO_YZAAAB_INT_SENS_0 (0x17C >> 2) +#define GPIO_YZAAAB_INT_SENS_1 (0x180 >> 2) +#define GPIO_YZAAAB_INT_SENS_2 (0x184 >> 2) +#define GPIO_YZAAAB_INT_STATUS (0x188 >> 2) +#define GPIO_YZAAAB_RESET_TOLERANT (0x18C >> 2) +#define GPIO_YZAAAB_DEBOUNCE_1 (0x190 >> 2) +#define GPIO_YZAAAB_DEBOUNCE_2 (0x194 >> 2) +#define GPIO_YZAAAB_INPUT_MASK (0x198 >> 2) +#define GPIO_AC_COMMAND_SRC_0 (0x1A0 >> 2) +#define GPIO_AC_COMMAND_SRC_1 (0x1A4 >> 2) +#define GPIO_AC_INT_ENABLE (0x1A8 >> 2) +#define GPIO_AC_INT_SENS_0 (0x1AC >> 2) +#define GPIO_AC_INT_SENS_1 (0x1B0 >> 2) +#define GPIO_AC_INT_SENS_2 (0x1B4 >> 2) +#define GPIO_AC_INT_STATUS (0x1B8 >> 2) +#define GPIO_AC_RESET_TOLERANT (0x1BC >> 2) +#define GPIO_AC_DEBOUNCE_1 (0x1C0 >> 2) +#define GPIO_AC_DEBOUNCE_2 (0x1C4 >> 2) +#define GPIO_AC_INPUT_MASK (0x1C8 >> 2) +#define GPIO_ABCD_INPUT_MASK (0x1D0 >> 2) +#define GPIO_EFGH_INPUT_MASK (0x1D4 >> 2) +#define GPIO_YZAAAB_DATA_VALUE (0x1E0 >> 2) +#define GPIO_YZAAAB_DIRECTION (0x1E4 >> 2) +#define GPIO_AC_DATA_VALUE (0x1E8 >> 2) +#define GPIO_AC_DIRECTION (0x1EC >> 2) +#define GPIO_3_3V_MEM_SIZE 0x1F0 +#define GPIO_3_3V_REG_ARRAY_SIZE (GPIO_3_3V_MEM_SIZE >> 2) + +/* AST2600 only - 1.8V gpios */ +/* + * The AST2600 two copies of the GPIO controller: the same 3.3V gpios as the + * AST2400 (memory offsets 0x0-0x198) and a second controller with 1.8V gpios + * (memory offsets 0x800-0x9D4). + */ +#define GPIO_1_8V_ABCD_DATA_VALUE (0x000 >> 2) +#define GPIO_1_8V_ABCD_DIRECTION (0x004 >> 2) +#define GPIO_1_8V_ABCD_INT_ENABLE (0x008 >> 2) +#define GPIO_1_8V_ABCD_INT_SENS_0 (0x00C >> 2) +#define GPIO_1_8V_ABCD_INT_SENS_1 (0x010 >> 2) +#define GPIO_1_8V_ABCD_INT_SENS_2 (0x014 >> 2) +#define GPIO_1_8V_ABCD_INT_STATUS (0x018 >> 2) +#define GPIO_1_8V_ABCD_RESET_TOLERANT (0x01C >> 2) +#define GPIO_1_8V_E_DATA_VALUE (0x020 >> 2) +#define GPIO_1_8V_E_DIRECTION (0x024 >> 2) +#define GPIO_1_8V_E_INT_ENABLE (0x028 >> 2) +#define GPIO_1_8V_E_INT_SENS_0 (0x02C >> 2) +#define GPIO_1_8V_E_INT_SENS_1 (0x030 >> 2) +#define GPIO_1_8V_E_INT_SENS_2 (0x034 >> 2) +#define GPIO_1_8V_E_INT_STATUS (0x038 >> 2) +#define GPIO_1_8V_E_RESET_TOLERANT (0x03C >> 2) +#define GPIO_1_8V_ABCD_DEBOUNCE_1 (0x040 >> 2) +#define GPIO_1_8V_ABCD_DEBOUNCE_2 (0x044 >> 2) +#define GPIO_1_8V_E_DEBOUNCE_1 (0x048 >> 2) +#define GPIO_1_8V_E_DEBOUNCE_2 (0x04C >> 2) +#define GPIO_1_8V_DEBOUNCE_TIME_1 (0x050 >> 2) +#define GPIO_1_8V_DEBOUNCE_TIME_2 (0x054 >> 2) +#define GPIO_1_8V_DEBOUNCE_TIME_3 (0x058 >> 2) +#define GPIO_1_8V_ABCD_COMMAND_SRC_0 (0x060 >> 2) +#define GPIO_1_8V_ABCD_COMMAND_SRC_1 (0x064 >> 2) +#define GPIO_1_8V_E_COMMAND_SRC_0 (0x068 >> 2) +#define GPIO_1_8V_E_COMMAND_SRC_1 (0x06C >> 2) +#define GPIO_1_8V_ABCD_DATA_READ (0x0C0 >> 2) +#define GPIO_1_8V_E_DATA_READ (0x0C4 >> 2) +#define GPIO_1_8V_ABCD_INPUT_MASK (0x1D0 >> 2) +#define GPIO_1_8V_E_INPUT_MASK (0x1D4 >> 2) +#define GPIO_1_8V_MEM_SIZE 0x1D8 +#define GPIO_1_8V_REG_ARRAY_SIZE (GPIO_1_8V_MEM_SIZE >> 2) + +static int aspeed_evaluate_irq(GPIOSets *regs, int gpio_prev_high, int gpio) +{ + uint32_t falling_edge = 0, rising_edge = 0; + uint32_t int_trigger = extract32(regs->int_sens_0, gpio, 1) + | extract32(regs->int_sens_1, gpio, 1) << 1 + | extract32(regs->int_sens_2, gpio, 1) << 2; + uint32_t gpio_curr_high = extract32(regs->data_value, gpio, 1); + uint32_t gpio_int_enabled = extract32(regs->int_enable, gpio, 1); + + if (!gpio_int_enabled) { + return 0; + } + + /* Detect edges */ + if (gpio_curr_high && !gpio_prev_high) { + rising_edge = 1; + } else if (!gpio_curr_high && gpio_prev_high) { + falling_edge = 1; + } + + if (((int_trigger == ASPEED_FALLING_EDGE) && falling_edge) || + ((int_trigger == ASPEED_RISING_EDGE) && rising_edge) || + ((int_trigger == ASPEED_LEVEL_LOW) && !gpio_curr_high) || + ((int_trigger == ASPEED_LEVEL_HIGH) && gpio_curr_high) || + ((int_trigger >= ASPEED_DUAL_EDGE) && (rising_edge || falling_edge))) + { + regs->int_status = deposit32(regs->int_status, gpio, 1, 1); + return 1; + } + return 0; +} + +#define nested_struct_index(ta, pa, m, tb, pb) \ + (pb - ((tb *)(((char *)pa) + offsetof(ta, m)))) + +static ptrdiff_t aspeed_gpio_set_idx(AspeedGPIOState *s, GPIOSets *regs) +{ + return nested_struct_index(AspeedGPIOState, s, sets, GPIOSets, regs); +} + +static void aspeed_gpio_update(AspeedGPIOState *s, GPIOSets *regs, + uint32_t value) +{ + uint32_t input_mask = regs->input_mask; + uint32_t direction = regs->direction; + uint32_t old = regs->data_value; + uint32_t new = value; + uint32_t diff; + int gpio; + + diff = old ^ new; + if (diff) { + for (gpio = 0; gpio < ASPEED_GPIOS_PER_SET; gpio++) { + uint32_t mask = 1 << gpio; + + /* If the gpio needs to be updated... */ + if (!(diff & mask)) { + continue; + } + + /* ...and we're output or not input-masked... */ + if (!(direction & mask) && (input_mask & mask)) { + continue; + } + + /* ...then update the state. */ + if (mask & new) { + regs->data_value |= mask; + } else { + regs->data_value &= ~mask; + } + + /* If the gpio is set to output... */ + if (direction & mask) { + /* ...trigger the line-state IRQ */ + ptrdiff_t set = aspeed_gpio_set_idx(s, regs); + qemu_set_irq(s->gpios[set][gpio], !!(new & mask)); + } else { + /* ...otherwise if we meet the line's current IRQ policy... */ + if (aspeed_evaluate_irq(regs, old & mask, gpio)) { + /* ...trigger the VIC IRQ */ + s->pending++; + } + } + } + } + qemu_set_irq(s->irq, !!(s->pending)); +} + +static bool aspeed_gpio_get_pin_level(AspeedGPIOState *s, uint32_t set_idx, + uint32_t pin) +{ + uint32_t reg_val; + uint32_t pin_mask = 1 << pin; + + reg_val = s->sets[set_idx].data_value; + + return !!(reg_val & pin_mask); +} + +static void aspeed_gpio_set_pin_level(AspeedGPIOState *s, uint32_t set_idx, + uint32_t pin, bool level) +{ + uint32_t value = s->sets[set_idx].data_value; + uint32_t pin_mask = 1 << pin; + + if (level) { + value |= pin_mask; + } else { + value &= !pin_mask; + } + + aspeed_gpio_update(s, &s->sets[set_idx], value); +} + +/* + * | src_1 | src_2 | source | + * |-----------------------------| + * | 0 | 0 | ARM | + * | 0 | 1 | LPC | + * | 1 | 0 | Coprocessor| + * | 1 | 1 | Reserved | + * + * Once the source of a set is programmed, corresponding bits in the + * data_value, direction, interrupt [enable, sens[0-2]], reset_tol and + * debounce registers can only be written by the source. + * + * Source is ARM by default + * only bits 24, 16, 8, and 0 can be set + * + * we don't currently have a model for the LPC or Coprocessor + */ +static uint32_t update_value_control_source(GPIOSets *regs, uint32_t old_value, + uint32_t value) +{ + int i; + int cmd_source; + + /* assume the source is always ARM for now */ + int source = ASPEED_SOURCE_ARM; + + uint32_t new_value = 0; + + /* for each group in set */ + for (i = 0; i < ASPEED_GPIOS_PER_SET; i += GPIOS_PER_GROUP) { + cmd_source = extract32(regs->cmd_source_0, i, 1) + | (extract32(regs->cmd_source_1, i, 1) << 1); + + if (source == cmd_source) { + new_value |= (0xff << i) & value; + } else { + new_value |= (0xff << i) & old_value; + } + } + return new_value; +} + +static const AspeedGPIOReg aspeed_3_3v_gpios[GPIO_3_3V_REG_ARRAY_SIZE] = { + /* Set ABCD */ + [GPIO_ABCD_DATA_VALUE] = { 0, gpio_reg_data_value }, + [GPIO_ABCD_DIRECTION] = { 0, gpio_reg_direction }, + [GPIO_ABCD_INT_ENABLE] = { 0, gpio_reg_int_enable }, + [GPIO_ABCD_INT_SENS_0] = { 0, gpio_reg_int_sens_0 }, + [GPIO_ABCD_INT_SENS_1] = { 0, gpio_reg_int_sens_1 }, + [GPIO_ABCD_INT_SENS_2] = { 0, gpio_reg_int_sens_2 }, + [GPIO_ABCD_INT_STATUS] = { 0, gpio_reg_int_status }, + [GPIO_ABCD_RESET_TOLERANT] = { 0, gpio_reg_reset_tolerant }, + [GPIO_ABCD_DEBOUNCE_1] = { 0, gpio_reg_debounce_1 }, + [GPIO_ABCD_DEBOUNCE_2] = { 0, gpio_reg_debounce_2 }, + [GPIO_ABCD_COMMAND_SRC_0] = { 0, gpio_reg_cmd_source_0 }, + [GPIO_ABCD_COMMAND_SRC_1] = { 0, gpio_reg_cmd_source_1 }, + [GPIO_ABCD_DATA_READ] = { 0, gpio_reg_data_read }, + [GPIO_ABCD_INPUT_MASK] = { 0, gpio_reg_input_mask }, + /* Set EFGH */ + [GPIO_EFGH_DATA_VALUE] = { 1, gpio_reg_data_value }, + [GPIO_EFGH_DIRECTION] = { 1, gpio_reg_direction }, + [GPIO_EFGH_INT_ENABLE] = { 1, gpio_reg_int_enable }, + [GPIO_EFGH_INT_SENS_0] = { 1, gpio_reg_int_sens_0 }, + [GPIO_EFGH_INT_SENS_1] = { 1, gpio_reg_int_sens_1 }, + [GPIO_EFGH_INT_SENS_2] = { 1, gpio_reg_int_sens_2 }, + [GPIO_EFGH_INT_STATUS] = { 1, gpio_reg_int_status }, + [GPIO_EFGH_RESET_TOLERANT] = { 1, gpio_reg_reset_tolerant }, + [GPIO_EFGH_DEBOUNCE_1] = { 1, gpio_reg_debounce_1 }, + [GPIO_EFGH_DEBOUNCE_2] = { 1, gpio_reg_debounce_2 }, + [GPIO_EFGH_COMMAND_SRC_0] = { 1, gpio_reg_cmd_source_0 }, + [GPIO_EFGH_COMMAND_SRC_1] = { 1, gpio_reg_cmd_source_1 }, + [GPIO_EFGH_DATA_READ] = { 1, gpio_reg_data_read }, + [GPIO_EFGH_INPUT_MASK] = { 1, gpio_reg_input_mask }, + /* Set IJKL */ + [GPIO_IJKL_DATA_VALUE] = { 2, gpio_reg_data_value }, + [GPIO_IJKL_DIRECTION] = { 2, gpio_reg_direction }, + [GPIO_IJKL_INT_ENABLE] = { 2, gpio_reg_int_enable }, + [GPIO_IJKL_INT_SENS_0] = { 2, gpio_reg_int_sens_0 }, + [GPIO_IJKL_INT_SENS_1] = { 2, gpio_reg_int_sens_1 }, + [GPIO_IJKL_INT_SENS_2] = { 2, gpio_reg_int_sens_2 }, + [GPIO_IJKL_INT_STATUS] = { 2, gpio_reg_int_status }, + [GPIO_IJKL_RESET_TOLERANT] = { 2, gpio_reg_reset_tolerant }, + [GPIO_IJKL_DEBOUNCE_1] = { 2, gpio_reg_debounce_1 }, + [GPIO_IJKL_DEBOUNCE_2] = { 2, gpio_reg_debounce_2 }, + [GPIO_IJKL_COMMAND_SRC_0] = { 2, gpio_reg_cmd_source_0 }, + [GPIO_IJKL_COMMAND_SRC_1] = { 2, gpio_reg_cmd_source_1 }, + [GPIO_IJKL_DATA_READ] = { 2, gpio_reg_data_read }, + [GPIO_IJKL_INPUT_MASK] = { 2, gpio_reg_input_mask }, + /* Set MNOP */ + [GPIO_MNOP_DATA_VALUE] = { 3, gpio_reg_data_value }, + [GPIO_MNOP_DIRECTION] = { 3, gpio_reg_direction }, + [GPIO_MNOP_INT_ENABLE] = { 3, gpio_reg_int_enable }, + [GPIO_MNOP_INT_SENS_0] = { 3, gpio_reg_int_sens_0 }, + [GPIO_MNOP_INT_SENS_1] = { 3, gpio_reg_int_sens_1 }, + [GPIO_MNOP_INT_SENS_2] = { 3, gpio_reg_int_sens_2 }, + [GPIO_MNOP_INT_STATUS] = { 3, gpio_reg_int_status }, + [GPIO_MNOP_RESET_TOLERANT] = { 3, gpio_reg_reset_tolerant }, + [GPIO_MNOP_DEBOUNCE_1] = { 3, gpio_reg_debounce_1 }, + [GPIO_MNOP_DEBOUNCE_2] = { 3, gpio_reg_debounce_2 }, + [GPIO_MNOP_COMMAND_SRC_0] = { 3, gpio_reg_cmd_source_0 }, + [GPIO_MNOP_COMMAND_SRC_1] = { 3, gpio_reg_cmd_source_1 }, + [GPIO_MNOP_DATA_READ] = { 3, gpio_reg_data_read }, + [GPIO_MNOP_INPUT_MASK] = { 3, gpio_reg_input_mask }, + /* Set QRST */ + [GPIO_QRST_DATA_VALUE] = { 4, gpio_reg_data_value }, + [GPIO_QRST_DIRECTION] = { 4, gpio_reg_direction }, + [GPIO_QRST_INT_ENABLE] = { 4, gpio_reg_int_enable }, + [GPIO_QRST_INT_SENS_0] = { 4, gpio_reg_int_sens_0 }, + [GPIO_QRST_INT_SENS_1] = { 4, gpio_reg_int_sens_1 }, + [GPIO_QRST_INT_SENS_2] = { 4, gpio_reg_int_sens_2 }, + [GPIO_QRST_INT_STATUS] = { 4, gpio_reg_int_status }, + [GPIO_QRST_RESET_TOLERANT] = { 4, gpio_reg_reset_tolerant }, + [GPIO_QRST_DEBOUNCE_1] = { 4, gpio_reg_debounce_1 }, + [GPIO_QRST_DEBOUNCE_2] = { 4, gpio_reg_debounce_2 }, + [GPIO_QRST_COMMAND_SRC_0] = { 4, gpio_reg_cmd_source_0 }, + [GPIO_QRST_COMMAND_SRC_1] = { 4, gpio_reg_cmd_source_1 }, + [GPIO_QRST_DATA_READ] = { 4, gpio_reg_data_read }, + [GPIO_QRST_INPUT_MASK] = { 4, gpio_reg_input_mask }, + /* Set UVWX */ + [GPIO_UVWX_DATA_VALUE] = { 5, gpio_reg_data_value }, + [GPIO_UVWX_DIRECTION] = { 5, gpio_reg_direction }, + [GPIO_UVWX_INT_ENABLE] = { 5, gpio_reg_int_enable }, + [GPIO_UVWX_INT_SENS_0] = { 5, gpio_reg_int_sens_0 }, + [GPIO_UVWX_INT_SENS_1] = { 5, gpio_reg_int_sens_1 }, + [GPIO_UVWX_INT_SENS_2] = { 5, gpio_reg_int_sens_2 }, + [GPIO_UVWX_INT_STATUS] = { 5, gpio_reg_int_status }, + [GPIO_UVWX_RESET_TOLERANT] = { 5, gpio_reg_reset_tolerant }, + [GPIO_UVWX_DEBOUNCE_1] = { 5, gpio_reg_debounce_1 }, + [GPIO_UVWX_DEBOUNCE_2] = { 5, gpio_reg_debounce_2 }, + [GPIO_UVWX_COMMAND_SRC_0] = { 5, gpio_reg_cmd_source_0 }, + [GPIO_UVWX_COMMAND_SRC_1] = { 5, gpio_reg_cmd_source_1 }, + [GPIO_UVWX_DATA_READ] = { 5, gpio_reg_data_read }, + [GPIO_UVWX_INPUT_MASK] = { 5, gpio_reg_input_mask }, + /* Set YZAAAB */ + [GPIO_YZAAAB_DATA_VALUE] = { 6, gpio_reg_data_value }, + [GPIO_YZAAAB_DIRECTION] = { 6, gpio_reg_direction }, + [GPIO_YZAAAB_INT_ENABLE] = { 6, gpio_reg_int_enable }, + [GPIO_YZAAAB_INT_SENS_0] = { 6, gpio_reg_int_sens_0 }, + [GPIO_YZAAAB_INT_SENS_1] = { 6, gpio_reg_int_sens_1 }, + [GPIO_YZAAAB_INT_SENS_2] = { 6, gpio_reg_int_sens_2 }, + [GPIO_YZAAAB_INT_STATUS] = { 6, gpio_reg_int_status }, + [GPIO_YZAAAB_RESET_TOLERANT] = { 6, gpio_reg_reset_tolerant }, + [GPIO_YZAAAB_DEBOUNCE_1] = { 6, gpio_reg_debounce_1 }, + [GPIO_YZAAAB_DEBOUNCE_2] = { 6, gpio_reg_debounce_2 }, + [GPIO_YZAAAB_COMMAND_SRC_0] = { 6, gpio_reg_cmd_source_0 }, + [GPIO_YZAAAB_COMMAND_SRC_1] = { 6, gpio_reg_cmd_source_1 }, + [GPIO_YZAAAB_DATA_READ] = { 6, gpio_reg_data_read }, + [GPIO_YZAAAB_INPUT_MASK] = { 6, gpio_reg_input_mask }, + /* Set AC (ast2500 only) */ + [GPIO_AC_DATA_VALUE] = { 7, gpio_reg_data_value }, + [GPIO_AC_DIRECTION] = { 7, gpio_reg_direction }, + [GPIO_AC_INT_ENABLE] = { 7, gpio_reg_int_enable }, + [GPIO_AC_INT_SENS_0] = { 7, gpio_reg_int_sens_0 }, + [GPIO_AC_INT_SENS_1] = { 7, gpio_reg_int_sens_1 }, + [GPIO_AC_INT_SENS_2] = { 7, gpio_reg_int_sens_2 }, + [GPIO_AC_INT_STATUS] = { 7, gpio_reg_int_status }, + [GPIO_AC_RESET_TOLERANT] = { 7, gpio_reg_reset_tolerant }, + [GPIO_AC_DEBOUNCE_1] = { 7, gpio_reg_debounce_1 }, + [GPIO_AC_DEBOUNCE_2] = { 7, gpio_reg_debounce_2 }, + [GPIO_AC_COMMAND_SRC_0] = { 7, gpio_reg_cmd_source_0 }, + [GPIO_AC_COMMAND_SRC_1] = { 7, gpio_reg_cmd_source_1 }, + [GPIO_AC_DATA_READ] = { 7, gpio_reg_data_read }, + [GPIO_AC_INPUT_MASK] = { 7, gpio_reg_input_mask }, +}; + +static const AspeedGPIOReg aspeed_1_8v_gpios[GPIO_1_8V_REG_ARRAY_SIZE] = { + /* 1.8V Set ABCD */ + [GPIO_1_8V_ABCD_DATA_VALUE] = {0, gpio_reg_data_value}, + [GPIO_1_8V_ABCD_DIRECTION] = {0, gpio_reg_direction}, + [GPIO_1_8V_ABCD_INT_ENABLE] = {0, gpio_reg_int_enable}, + [GPIO_1_8V_ABCD_INT_SENS_0] = {0, gpio_reg_int_sens_0}, + [GPIO_1_8V_ABCD_INT_SENS_1] = {0, gpio_reg_int_sens_1}, + [GPIO_1_8V_ABCD_INT_SENS_2] = {0, gpio_reg_int_sens_2}, + [GPIO_1_8V_ABCD_INT_STATUS] = {0, gpio_reg_int_status}, + [GPIO_1_8V_ABCD_RESET_TOLERANT] = {0, gpio_reg_reset_tolerant}, + [GPIO_1_8V_ABCD_DEBOUNCE_1] = {0, gpio_reg_debounce_1}, + [GPIO_1_8V_ABCD_DEBOUNCE_2] = {0, gpio_reg_debounce_2}, + [GPIO_1_8V_ABCD_COMMAND_SRC_0] = {0, gpio_reg_cmd_source_0}, + [GPIO_1_8V_ABCD_COMMAND_SRC_1] = {0, gpio_reg_cmd_source_1}, + [GPIO_1_8V_ABCD_DATA_READ] = {0, gpio_reg_data_read}, + [GPIO_1_8V_ABCD_INPUT_MASK] = {0, gpio_reg_input_mask}, + /* 1.8V Set E */ + [GPIO_1_8V_E_DATA_VALUE] = {1, gpio_reg_data_value}, + [GPIO_1_8V_E_DIRECTION] = {1, gpio_reg_direction}, + [GPIO_1_8V_E_INT_ENABLE] = {1, gpio_reg_int_enable}, + [GPIO_1_8V_E_INT_SENS_0] = {1, gpio_reg_int_sens_0}, + [GPIO_1_8V_E_INT_SENS_1] = {1, gpio_reg_int_sens_1}, + [GPIO_1_8V_E_INT_SENS_2] = {1, gpio_reg_int_sens_2}, + [GPIO_1_8V_E_INT_STATUS] = {1, gpio_reg_int_status}, + [GPIO_1_8V_E_RESET_TOLERANT] = {1, gpio_reg_reset_tolerant}, + [GPIO_1_8V_E_DEBOUNCE_1] = {1, gpio_reg_debounce_1}, + [GPIO_1_8V_E_DEBOUNCE_2] = {1, gpio_reg_debounce_2}, + [GPIO_1_8V_E_COMMAND_SRC_0] = {1, gpio_reg_cmd_source_0}, + [GPIO_1_8V_E_COMMAND_SRC_1] = {1, gpio_reg_cmd_source_1}, + [GPIO_1_8V_E_DATA_READ] = {1, gpio_reg_data_read}, + [GPIO_1_8V_E_INPUT_MASK] = {1, gpio_reg_input_mask}, +}; + +static uint64_t aspeed_gpio_read(void *opaque, hwaddr offset, uint32_t size) +{ + AspeedGPIOState *s = ASPEED_GPIO(opaque); + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + uint64_t idx = -1; + const AspeedGPIOReg *reg; + GPIOSets *set; + + idx = offset >> 2; + if (idx >= GPIO_DEBOUNCE_TIME_1 && idx <= GPIO_DEBOUNCE_TIME_3) { + idx -= GPIO_DEBOUNCE_TIME_1; + return (uint64_t) s->debounce_regs[idx]; + } + + reg = &agc->reg_table[idx]; + if (reg->set_idx >= agc->nr_gpio_sets) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: no getter for offset 0x%" + HWADDR_PRIx"\n", __func__, offset); + return 0; + } + + set = &s->sets[reg->set_idx]; + switch (reg->type) { + case gpio_reg_data_value: + return set->data_value; + case gpio_reg_direction: + return set->direction; + case gpio_reg_int_enable: + return set->int_enable; + case gpio_reg_int_sens_0: + return set->int_sens_0; + case gpio_reg_int_sens_1: + return set->int_sens_1; + case gpio_reg_int_sens_2: + return set->int_sens_2; + case gpio_reg_int_status: + return set->int_status; + case gpio_reg_reset_tolerant: + return set->reset_tol; + case gpio_reg_debounce_1: + return set->debounce_1; + case gpio_reg_debounce_2: + return set->debounce_2; + case gpio_reg_cmd_source_0: + return set->cmd_source_0; + case gpio_reg_cmd_source_1: + return set->cmd_source_1; + case gpio_reg_data_read: + return set->data_read; + case gpio_reg_input_mask: + return set->input_mask; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: no getter for offset 0x%" + HWADDR_PRIx"\n", __func__, offset); + return 0; + }; +} + +static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data, + uint32_t size) +{ + AspeedGPIOState *s = ASPEED_GPIO(opaque); + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + const GPIOSetProperties *props; + uint64_t idx = -1; + const AspeedGPIOReg *reg; + GPIOSets *set; + uint32_t cleared; + + idx = offset >> 2; + if (idx >= GPIO_DEBOUNCE_TIME_1 && idx <= GPIO_DEBOUNCE_TIME_3) { + idx -= GPIO_DEBOUNCE_TIME_1; + s->debounce_regs[idx] = (uint32_t) data; + return; + } + + reg = &agc->reg_table[idx]; + if (reg->set_idx >= agc->nr_gpio_sets) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: no setter for offset 0x%" + HWADDR_PRIx"\n", __func__, offset); + return; + } + + set = &s->sets[reg->set_idx]; + props = &agc->props[reg->set_idx]; + + switch (reg->type) { + case gpio_reg_data_value: + data &= props->output; + data = update_value_control_source(set, set->data_value, data); + set->data_read = data; + aspeed_gpio_update(s, set, data); + return; + case gpio_reg_direction: + /* + * where data is the value attempted to be written to the pin: + * pin type | input mask | output mask | expected value + * ------------------------------------------------------------ + * bidirectional | 1 | 1 | data + * input only | 1 | 0 | 0 + * output only | 0 | 1 | 1 + * no pin | 0 | 0 | 0 + * + * which is captured by: + * data = ( data | ~input) & output; + */ + data = (data | ~props->input) & props->output; + set->direction = update_value_control_source(set, set->direction, data); + break; + case gpio_reg_int_enable: + set->int_enable = update_value_control_source(set, set->int_enable, + data); + break; + case gpio_reg_int_sens_0: + set->int_sens_0 = update_value_control_source(set, set->int_sens_0, + data); + break; + case gpio_reg_int_sens_1: + set->int_sens_1 = update_value_control_source(set, set->int_sens_1, + data); + break; + case gpio_reg_int_sens_2: + set->int_sens_2 = update_value_control_source(set, set->int_sens_2, + data); + break; + case gpio_reg_int_status: + cleared = ctpop32(data & set->int_status); + if (s->pending && cleared) { + assert(s->pending >= cleared); + s->pending -= cleared; + } + set->int_status &= ~data; + break; + case gpio_reg_reset_tolerant: + set->reset_tol = update_value_control_source(set, set->reset_tol, + data); + return; + case gpio_reg_debounce_1: + set->debounce_1 = update_value_control_source(set, set->debounce_1, + data); + return; + case gpio_reg_debounce_2: + set->debounce_2 = update_value_control_source(set, set->debounce_2, + data); + return; + case gpio_reg_cmd_source_0: + set->cmd_source_0 = data & ASPEED_CMD_SRC_MASK; + return; + case gpio_reg_cmd_source_1: + set->cmd_source_1 = data & ASPEED_CMD_SRC_MASK; + return; + case gpio_reg_data_read: + /* Read only register */ + return; + case gpio_reg_input_mask: + /* + * feeds into interrupt generation + * 0: read from data value reg will be updated + * 1: read from data value reg will not be updated + */ + set->input_mask = data & props->input; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: no setter for offset 0x%" + HWADDR_PRIx"\n", __func__, offset); + return; + } + aspeed_gpio_update(s, set, set->data_value); + return; +} + +static int get_set_idx(AspeedGPIOState *s, const char *group, int *group_idx) +{ + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + int set_idx, g_idx; + + for (set_idx = 0; set_idx < agc->nr_gpio_sets; set_idx++) { + const GPIOSetProperties *set_props = &agc->props[set_idx]; + for (g_idx = 0; g_idx < ASPEED_GROUPS_PER_SET; g_idx++) { + if (!strncmp(group, set_props->group_label[g_idx], strlen(group))) { + *group_idx = g_idx; + return set_idx; + } + } + } + return -1; +} + +static void aspeed_gpio_get_pin(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + int pin = 0xfff; + bool level = true; + char group[4]; + AspeedGPIOState *s = ASPEED_GPIO(obj); + int set_idx, group_idx = 0; + + if (sscanf(name, "gpio%2[A-Z]%1d", group, &pin) != 2) { + /* 1.8V gpio */ + if (sscanf(name, "gpio%3[18A-E]%1d", group, &pin) != 2) { + error_setg(errp, "%s: error reading %s", __func__, name); + return; + } + } + set_idx = get_set_idx(s, group, &group_idx); + if (set_idx == -1) { + error_setg(errp, "%s: invalid group %s", __func__, group); + return; + } + pin = pin + group_idx * GPIOS_PER_GROUP; + level = aspeed_gpio_get_pin_level(s, set_idx, pin); + visit_type_bool(v, name, &level, errp); +} + +static void aspeed_gpio_set_pin(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + bool level; + int pin = 0xfff; + char group[4]; + AspeedGPIOState *s = ASPEED_GPIO(obj); + int set_idx, group_idx = 0; + + if (!visit_type_bool(v, name, &level, errp)) { + return; + } + if (sscanf(name, "gpio%2[A-Z]%1d", group, &pin) != 2) { + /* 1.8V gpio */ + if (sscanf(name, "gpio%3[18A-E]%1d", group, &pin) != 2) { + error_setg(errp, "%s: error reading %s", __func__, name); + return; + } + } + set_idx = get_set_idx(s, group, &group_idx); + if (set_idx == -1) { + error_setg(errp, "%s: invalid group %s", __func__, group); + return; + } + pin = pin + group_idx * GPIOS_PER_GROUP; + aspeed_gpio_set_pin_level(s, set_idx, pin, level); +} + +/****************** Setup functions ******************/ +static const GPIOSetProperties ast2400_set_props[ASPEED_GPIO_MAX_NR_SETS] = { + [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, + [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, + [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, + [3] = {0xffffffff, 0xffffffff, {"M", "N", "O", "P"} }, + [4] = {0xffffffff, 0xffffffff, {"Q", "R", "S", "T"} }, + [5] = {0xffffffff, 0x0000ffff, {"U", "V", "W", "X"} }, + [6] = {0x0000000f, 0x0fffff0f, {"Y", "Z", "AA", "AB"} }, +}; + +static const GPIOSetProperties ast2500_set_props[ASPEED_GPIO_MAX_NR_SETS] = { + [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, + [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, + [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, + [3] = {0xffffffff, 0xffffffff, {"M", "N", "O", "P"} }, + [4] = {0xffffffff, 0xffffffff, {"Q", "R", "S", "T"} }, + [5] = {0xffffffff, 0x0000ffff, {"U", "V", "W", "X"} }, + [6] = {0x0fffffff, 0x0fffffff, {"Y", "Z", "AA", "AB"} }, + [7] = {0x000000ff, 0x000000ff, {"AC"} }, +}; + +static GPIOSetProperties ast2600_3_3v_set_props[ASPEED_GPIO_MAX_NR_SETS] = { + [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, + [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, + [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, + [3] = {0xffffffff, 0xffffffff, {"M", "N", "O", "P"} }, + [4] = {0xffffffff, 0x00ffffff, {"Q", "R", "S", "T"} }, + [5] = {0xffffffff, 0xffffff00, {"U", "V", "W", "X"} }, + [6] = {0x0000ffff, 0x0000ffff, {"Y", "Z"} }, +}; + +static GPIOSetProperties ast2600_1_8v_set_props[ASPEED_GPIO_MAX_NR_SETS] = { + [0] = {0xffffffff, 0xffffffff, {"18A", "18B", "18C", "18D"} }, + [1] = {0x0000000f, 0x0000000f, {"18E"} }, +}; + +static const MemoryRegionOps aspeed_gpio_ops = { + .read = aspeed_gpio_read, + .write = aspeed_gpio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void aspeed_gpio_reset(DeviceState *dev) +{ + AspeedGPIOState *s = ASPEED_GPIO(dev); + + /* TODO: respect the reset tolerance registers */ + memset(s->sets, 0, sizeof(s->sets)); +} + +static void aspeed_gpio_realize(DeviceState *dev, Error **errp) +{ + AspeedGPIOState *s = ASPEED_GPIO(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + + /* Interrupt parent line */ + sysbus_init_irq(sbd, &s->irq); + + /* Individual GPIOs */ + for (int i = 0; i < ASPEED_GPIO_MAX_NR_SETS; i++) { + const GPIOSetProperties *props = &agc->props[i]; + uint32_t skip = ~(props->input | props->output); + for (int j = 0; j < ASPEED_GPIOS_PER_SET; j++) { + if (skip >> j & 1) { + continue; + } + sysbus_init_irq(sbd, &s->gpios[i][j]); + } + } + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_gpio_ops, s, + TYPE_ASPEED_GPIO, 0x800); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static void aspeed_gpio_init(Object *obj) +{ + AspeedGPIOState *s = ASPEED_GPIO(obj); + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + + for (int i = 0; i < ASPEED_GPIO_MAX_NR_SETS; i++) { + const GPIOSetProperties *props = &agc->props[i]; + uint32_t skip = ~(props->input | props->output); + for (int j = 0; j < ASPEED_GPIOS_PER_SET; j++) { + if (skip >> j & 1) { + continue; + } + int group_idx = j / GPIOS_PER_GROUP; + int pin_idx = j % GPIOS_PER_GROUP; + const char *group = &props->group_label[group_idx][0]; + char *name = g_strdup_printf("gpio%s%d", group, pin_idx); + object_property_add(obj, name, "bool", aspeed_gpio_get_pin, + aspeed_gpio_set_pin, NULL, NULL); + g_free(name); + } + } +} + +static const VMStateDescription vmstate_gpio_regs = { + .name = TYPE_ASPEED_GPIO"/regs", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(data_value, GPIOSets), + VMSTATE_UINT32(data_read, GPIOSets), + VMSTATE_UINT32(direction, GPIOSets), + VMSTATE_UINT32(int_enable, GPIOSets), + VMSTATE_UINT32(int_sens_0, GPIOSets), + VMSTATE_UINT32(int_sens_1, GPIOSets), + VMSTATE_UINT32(int_sens_2, GPIOSets), + VMSTATE_UINT32(int_status, GPIOSets), + VMSTATE_UINT32(reset_tol, GPIOSets), + VMSTATE_UINT32(cmd_source_0, GPIOSets), + VMSTATE_UINT32(cmd_source_1, GPIOSets), + VMSTATE_UINT32(debounce_1, GPIOSets), + VMSTATE_UINT32(debounce_2, GPIOSets), + VMSTATE_UINT32(input_mask, GPIOSets), + VMSTATE_END_OF_LIST(), + } +}; + +static const VMStateDescription vmstate_aspeed_gpio = { + .name = TYPE_ASPEED_GPIO, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(sets, AspeedGPIOState, ASPEED_GPIO_MAX_NR_SETS, + 1, vmstate_gpio_regs, GPIOSets), + VMSTATE_UINT32_ARRAY(debounce_regs, AspeedGPIOState, + ASPEED_GPIO_NR_DEBOUNCE_REGS), + VMSTATE_END_OF_LIST(), + } +}; + +static void aspeed_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_gpio_realize; + dc->reset = aspeed_gpio_reset; + dc->desc = "Aspeed GPIO Controller"; + dc->vmsd = &vmstate_aspeed_gpio; +} + +static void aspeed_gpio_ast2400_class_init(ObjectClass *klass, void *data) +{ + AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); + + agc->props = ast2400_set_props; + agc->nr_gpio_pins = 216; + agc->nr_gpio_sets = 7; + agc->reg_table = aspeed_3_3v_gpios; +} + +static void aspeed_gpio_2500_class_init(ObjectClass *klass, void *data) +{ + AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); + + agc->props = ast2500_set_props; + agc->nr_gpio_pins = 228; + agc->nr_gpio_sets = 8; + agc->reg_table = aspeed_3_3v_gpios; +} + +static void aspeed_gpio_ast2600_3_3v_class_init(ObjectClass *klass, void *data) +{ + AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); + + agc->props = ast2600_3_3v_set_props; + agc->nr_gpio_pins = 208; + agc->nr_gpio_sets = 7; + agc->reg_table = aspeed_3_3v_gpios; +} + +static void aspeed_gpio_ast2600_1_8v_class_init(ObjectClass *klass, void *data) +{ + AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass); + + agc->props = ast2600_1_8v_set_props; + agc->nr_gpio_pins = 36; + agc->nr_gpio_sets = 2; + agc->reg_table = aspeed_1_8v_gpios; +} + +static const TypeInfo aspeed_gpio_info = { + .name = TYPE_ASPEED_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedGPIOState), + .class_size = sizeof(AspeedGPIOClass), + .class_init = aspeed_gpio_class_init, + .abstract = true, +}; + +static const TypeInfo aspeed_gpio_ast2400_info = { + .name = TYPE_ASPEED_GPIO "-ast2400", + .parent = TYPE_ASPEED_GPIO, + .class_init = aspeed_gpio_ast2400_class_init, + .instance_init = aspeed_gpio_init, +}; + +static const TypeInfo aspeed_gpio_ast2500_info = { + .name = TYPE_ASPEED_GPIO "-ast2500", + .parent = TYPE_ASPEED_GPIO, + .class_init = aspeed_gpio_2500_class_init, + .instance_init = aspeed_gpio_init, +}; + +static const TypeInfo aspeed_gpio_ast2600_3_3v_info = { + .name = TYPE_ASPEED_GPIO "-ast2600", + .parent = TYPE_ASPEED_GPIO, + .class_init = aspeed_gpio_ast2600_3_3v_class_init, + .instance_init = aspeed_gpio_init, +}; + +static const TypeInfo aspeed_gpio_ast2600_1_8v_info = { + .name = TYPE_ASPEED_GPIO "-ast2600-1_8v", + .parent = TYPE_ASPEED_GPIO, + .class_init = aspeed_gpio_ast2600_1_8v_class_init, + .instance_init = aspeed_gpio_init, +}; + +static void aspeed_gpio_register_types(void) +{ + type_register_static(&aspeed_gpio_info); + type_register_static(&aspeed_gpio_ast2400_info); + type_register_static(&aspeed_gpio_ast2500_info); + type_register_static(&aspeed_gpio_ast2600_3_3v_info); + type_register_static(&aspeed_gpio_ast2600_1_8v_info); +} + +type_init(aspeed_gpio_register_types); diff --git a/hw/gpio/bcm2835_gpio.c b/hw/gpio/bcm2835_gpio.c new file mode 100644 index 000000000..c995bba1d --- /dev/null +++ b/hw/gpio/bcm2835_gpio.c @@ -0,0 +1,344 @@ +/* + * Raspberry Pi (BCM2835) GPIO Controller + * + * Copyright (c) 2017 Antfield SAS + * + * Authors: + * Clement Deschamps + * Luc Michel + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/sd/sd.h" +#include "hw/gpio/bcm2835_gpio.h" +#include "hw/irq.h" + +#define GPFSEL0 0x00 +#define GPFSEL1 0x04 +#define GPFSEL2 0x08 +#define GPFSEL3 0x0C +#define GPFSEL4 0x10 +#define GPFSEL5 0x14 +#define GPSET0 0x1C +#define GPSET1 0x20 +#define GPCLR0 0x28 +#define GPCLR1 0x2C +#define GPLEV0 0x34 +#define GPLEV1 0x38 +#define GPEDS0 0x40 +#define GPEDS1 0x44 +#define GPREN0 0x4C +#define GPREN1 0x50 +#define GPFEN0 0x58 +#define GPFEN1 0x5C +#define GPHEN0 0x64 +#define GPHEN1 0x68 +#define GPLEN0 0x70 +#define GPLEN1 0x74 +#define GPAREN0 0x7C +#define GPAREN1 0x80 +#define GPAFEN0 0x88 +#define GPAFEN1 0x8C +#define GPPUD 0x94 +#define GPPUDCLK0 0x98 +#define GPPUDCLK1 0x9C + +static uint32_t gpfsel_get(BCM2835GpioState *s, uint8_t reg) +{ + int i; + uint32_t value = 0; + for (i = 0; i < 10; i++) { + uint32_t index = 10 * reg + i; + if (index < sizeof(s->fsel)) { + value |= (s->fsel[index] & 0x7) << (3 * i); + } + } + return value; +} + +static void gpfsel_set(BCM2835GpioState *s, uint8_t reg, uint32_t value) +{ + int i; + for (i = 0; i < 10; i++) { + uint32_t index = 10 * reg + i; + if (index < sizeof(s->fsel)) { + int fsel = (value >> (3 * i)) & 0x7; + s->fsel[index] = fsel; + } + } + + /* SD controller selection (48-53) */ + if (s->sd_fsel != 0 + && (s->fsel[48] == 0) /* SD_CLK_R */ + && (s->fsel[49] == 0) /* SD_CMD_R */ + && (s->fsel[50] == 0) /* SD_DATA0_R */ + && (s->fsel[51] == 0) /* SD_DATA1_R */ + && (s->fsel[52] == 0) /* SD_DATA2_R */ + && (s->fsel[53] == 0) /* SD_DATA3_R */ + ) { + /* SDHCI controller selected */ + sdbus_reparent_card(s->sdbus_sdhost, s->sdbus_sdhci); + s->sd_fsel = 0; + } else if (s->sd_fsel != 4 + && (s->fsel[48] == 4) /* SD_CLK_R */ + && (s->fsel[49] == 4) /* SD_CMD_R */ + && (s->fsel[50] == 4) /* SD_DATA0_R */ + && (s->fsel[51] == 4) /* SD_DATA1_R */ + && (s->fsel[52] == 4) /* SD_DATA2_R */ + && (s->fsel[53] == 4) /* SD_DATA3_R */ + ) { + /* SDHost controller selected */ + sdbus_reparent_card(s->sdbus_sdhci, s->sdbus_sdhost); + s->sd_fsel = 4; + } +} + +static int gpfsel_is_out(BCM2835GpioState *s, int index) +{ + if (index >= 0 && index < 54) { + return s->fsel[index] == 1; + } + return 0; +} + +static void gpset(BCM2835GpioState *s, + uint32_t val, uint8_t start, uint8_t count, uint32_t *lev) +{ + uint32_t changes = val & ~*lev; + uint32_t cur = 1; + + int i; + for (i = 0; i < count; i++) { + if ((changes & cur) && (gpfsel_is_out(s, start + i))) { + qemu_set_irq(s->out[start + i], 1); + } + cur <<= 1; + } + + *lev |= val; +} + +static void gpclr(BCM2835GpioState *s, + uint32_t val, uint8_t start, uint8_t count, uint32_t *lev) +{ + uint32_t changes = val & *lev; + uint32_t cur = 1; + + int i; + for (i = 0; i < count; i++) { + if ((changes & cur) && (gpfsel_is_out(s, start + i))) { + qemu_set_irq(s->out[start + i], 0); + } + cur <<= 1; + } + + *lev &= ~val; +} + +static uint64_t bcm2835_gpio_read(void *opaque, hwaddr offset, + unsigned size) +{ + BCM2835GpioState *s = (BCM2835GpioState *)opaque; + + switch (offset) { + case GPFSEL0: + case GPFSEL1: + case GPFSEL2: + case GPFSEL3: + case GPFSEL4: + case GPFSEL5: + return gpfsel_get(s, offset / 4); + case GPSET0: + case GPSET1: + /* Write Only */ + return 0; + case GPCLR0: + case GPCLR1: + /* Write Only */ + return 0; + case GPLEV0: + return s->lev0; + case GPLEV1: + return s->lev1; + case GPEDS0: + case GPEDS1: + case GPREN0: + case GPREN1: + case GPFEN0: + case GPFEN1: + case GPHEN0: + case GPHEN1: + case GPLEN0: + case GPLEN1: + case GPAREN0: + case GPAREN1: + case GPAFEN0: + case GPAFEN1: + case GPPUD: + case GPPUDCLK0: + case GPPUDCLK1: + /* Not implemented */ + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + break; + } + + return 0; +} + +static void bcm2835_gpio_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + BCM2835GpioState *s = (BCM2835GpioState *)opaque; + + switch (offset) { + case GPFSEL0: + case GPFSEL1: + case GPFSEL2: + case GPFSEL3: + case GPFSEL4: + case GPFSEL5: + gpfsel_set(s, offset / 4, value); + break; + case GPSET0: + gpset(s, value, 0, 32, &s->lev0); + break; + case GPSET1: + gpset(s, value, 32, 22, &s->lev1); + break; + case GPCLR0: + gpclr(s, value, 0, 32, &s->lev0); + break; + case GPCLR1: + gpclr(s, value, 32, 22, &s->lev1); + break; + case GPLEV0: + case GPLEV1: + /* Read Only */ + break; + case GPEDS0: + case GPEDS1: + case GPREN0: + case GPREN1: + case GPFEN0: + case GPFEN1: + case GPHEN0: + case GPHEN1: + case GPLEN0: + case GPLEN1: + case GPAREN0: + case GPAREN1: + case GPAFEN0: + case GPAFEN1: + case GPPUD: + case GPPUDCLK0: + case GPPUDCLK1: + /* Not implemented */ + break; + default: + goto err_out; + } + return; + +err_out: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); +} + +static void bcm2835_gpio_reset(DeviceState *dev) +{ + BCM2835GpioState *s = BCM2835_GPIO(dev); + + int i; + for (i = 0; i < 6; i++) { + gpfsel_set(s, i, 0); + } + + s->sd_fsel = 0; + + /* SDHCI is selected by default */ + sdbus_reparent_card(&s->sdbus, s->sdbus_sdhci); + + s->lev0 = 0; + s->lev1 = 0; +} + +static const MemoryRegionOps bcm2835_gpio_ops = { + .read = bcm2835_gpio_read, + .write = bcm2835_gpio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_bcm2835_gpio = { + .name = "bcm2835_gpio", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(fsel, BCM2835GpioState, 54), + VMSTATE_UINT32(lev0, BCM2835GpioState), + VMSTATE_UINT32(lev1, BCM2835GpioState), + VMSTATE_UINT8(sd_fsel, BCM2835GpioState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_gpio_init(Object *obj) +{ + BCM2835GpioState *s = BCM2835_GPIO(obj); + DeviceState *dev = DEVICE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(s), "sd-bus"); + + memory_region_init_io(&s->iomem, obj, + &bcm2835_gpio_ops, s, "bcm2835_gpio", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + qdev_init_gpio_out(dev, s->out, 54); +} + +static void bcm2835_gpio_realize(DeviceState *dev, Error **errp) +{ + BCM2835GpioState *s = BCM2835_GPIO(dev); + Object *obj; + + obj = object_property_get_link(OBJECT(dev), "sdbus-sdhci", &error_abort); + s->sdbus_sdhci = SD_BUS(obj); + + obj = object_property_get_link(OBJECT(dev), "sdbus-sdhost", &error_abort); + s->sdbus_sdhost = SD_BUS(obj); +} + +static void bcm2835_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_bcm2835_gpio; + dc->realize = &bcm2835_gpio_realize; + dc->reset = &bcm2835_gpio_reset; +} + +static const TypeInfo bcm2835_gpio_info = { + .name = TYPE_BCM2835_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835GpioState), + .instance_init = bcm2835_gpio_init, + .class_init = bcm2835_gpio_class_init, +}; + +static void bcm2835_gpio_register_types(void) +{ + type_register_static(&bcm2835_gpio_info); +} + +type_init(bcm2835_gpio_register_types) diff --git a/hw/gpio/gpio_key.c b/hw/gpio/gpio_key.c new file mode 100644 index 000000000..74f613835 --- /dev/null +++ b/hw/gpio/gpio_key.c @@ -0,0 +1,109 @@ +/* + * GPIO key + * + * Copyright (c) 2016 Linaro Limited + * + * Author: Shannon Zhao + * + * Emulate a (human) keypress -- when the key is triggered by + * setting the incoming gpio line, the outbound irq line is + * raised for 100ms before being dropped again. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qom/object.h" + +#define TYPE_GPIOKEY "gpio-key" +OBJECT_DECLARE_SIMPLE_TYPE(GPIOKEYState, GPIOKEY) +#define GPIO_KEY_LATENCY 100 /* 100ms */ + +struct GPIOKEYState { + SysBusDevice parent_obj; + + QEMUTimer *timer; + qemu_irq irq; +}; + +static const VMStateDescription vmstate_gpio_key = { + .name = "gpio-key", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(timer, GPIOKEYState), + VMSTATE_END_OF_LIST() + } +}; + +static void gpio_key_reset(DeviceState *dev) +{ + GPIOKEYState *s = GPIOKEY(dev); + + timer_del(s->timer); +} + +static void gpio_key_timer_expired(void *opaque) +{ + GPIOKEYState *s = (GPIOKEYState *)opaque; + + qemu_set_irq(s->irq, 0); + timer_del(s->timer); +} + +static void gpio_key_set_irq(void *opaque, int irq, int level) +{ + GPIOKEYState *s = (GPIOKEYState *)opaque; + + qemu_set_irq(s->irq, 1); + timer_mod(s->timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + GPIO_KEY_LATENCY); +} + +static void gpio_key_realize(DeviceState *dev, Error **errp) +{ + GPIOKEYState *s = GPIOKEY(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + sysbus_init_irq(sbd, &s->irq); + qdev_init_gpio_in(dev, gpio_key_set_irq, 1); + s->timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, gpio_key_timer_expired, s); +} + +static void gpio_key_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = gpio_key_realize; + dc->vmsd = &vmstate_gpio_key; + dc->reset = &gpio_key_reset; +} + +static const TypeInfo gpio_key_info = { + .name = TYPE_GPIOKEY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GPIOKEYState), + .class_init = gpio_key_class_init, +}; + +static void gpio_key_register_types(void) +{ + type_register_static(&gpio_key_info); +} + +type_init(gpio_key_register_types) diff --git a/hw/gpio/gpio_pwr.c b/hw/gpio/gpio_pwr.c new file mode 100644 index 000000000..dbaf1c70c --- /dev/null +++ b/hw/gpio/gpio_pwr.c @@ -0,0 +1,70 @@ +/* + * GPIO qemu power controller + * + * Copyright (c) 2020 Linaro Limited + * + * Author: Maxim Uvarov + * + * Virtual gpio driver which can be used on top of pl061 + * to reboot and shutdown qemu virtual machine. One of use + * case is gpio driver for secure world application (ARM + * Trusted Firmware.). + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +/* + * QEMU interface: + * two named input GPIO lines: + * 'reset' : when asserted, trigger system reset + * 'shutdown' : when asserted, trigger system shutdown + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "sysemu/runstate.h" + +#define TYPE_GPIOPWR "gpio-pwr" +OBJECT_DECLARE_SIMPLE_TYPE(GPIO_PWR_State, GPIOPWR) + +struct GPIO_PWR_State { + SysBusDevice parent_obj; +}; + +static void gpio_pwr_reset(void *opaque, int n, int level) +{ + if (level) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } +} + +static void gpio_pwr_shutdown(void *opaque, int n, int level) +{ + if (level) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } +} + +static void gpio_pwr_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + + qdev_init_gpio_in_named(dev, gpio_pwr_reset, "reset", 1); + qdev_init_gpio_in_named(dev, gpio_pwr_shutdown, "shutdown", 1); +} + +static const TypeInfo gpio_pwr_info = { + .name = TYPE_GPIOPWR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GPIO_PWR_State), + .instance_init = gpio_pwr_init, +}; + +static void gpio_pwr_register_types(void) +{ + type_register_static(&gpio_pwr_info); +} + +type_init(gpio_pwr_register_types) diff --git a/hw/gpio/imx_gpio.c b/hw/gpio/imx_gpio.c new file mode 100644 index 000000000..7a591804a --- /dev/null +++ b/hw/gpio/imx_gpio.c @@ -0,0 +1,355 @@ +/* + * i.MX processors GPIO emulation. + * + * Copyright (C) 2015 Jean-Christophe Dubois + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/gpio/imx_gpio.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef DEBUG_IMX_GPIO +#define DEBUG_IMX_GPIO 0 +#endif + +typedef enum IMXGPIOLevel { + IMX_GPIO_LEVEL_LOW = 0, + IMX_GPIO_LEVEL_HIGH = 1, +} IMXGPIOLevel; + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX_GPIO) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_GPIO, \ + __func__, ##args); \ + } \ + } while (0) + +static const char *imx_gpio_reg_name(uint32_t reg) +{ + switch (reg) { + case DR_ADDR: + return "DR"; + case GDIR_ADDR: + return "GDIR"; + case PSR_ADDR: + return "PSR"; + case ICR1_ADDR: + return "ICR1"; + case ICR2_ADDR: + return "ICR2"; + case IMR_ADDR: + return "IMR"; + case ISR_ADDR: + return "ISR"; + case EDGE_SEL_ADDR: + return "EDGE_SEL"; + default: + return "[?]"; + } +} + +static void imx_gpio_update_int(IMXGPIOState *s) +{ + if (s->has_upper_pin_irq) { + qemu_set_irq(s->irq[0], (s->isr & s->imr & 0x0000FFFF) ? 1 : 0); + qemu_set_irq(s->irq[1], (s->isr & s->imr & 0xFFFF0000) ? 1 : 0); + } else { + qemu_set_irq(s->irq[0], (s->isr & s->imr) ? 1 : 0); + } +} + +static void imx_gpio_set_int_line(IMXGPIOState *s, int line, IMXGPIOLevel level) +{ + /* if this signal isn't configured as an input signal, nothing to do */ + if (!extract32(s->gdir, line, 1)) { + return; + } + + /* When set, EDGE_SEL overrides the ICR config */ + if (extract32(s->edge_sel, line, 1)) { + /* we detect interrupt on rising and falling edge */ + if (extract32(s->psr, line, 1) != level) { + /* level changed */ + s->isr = deposit32(s->isr, line, 1, 1); + } + } else if (extract64(s->icr, 2*line + 1, 1)) { + /* interrupt is edge sensitive */ + if (extract32(s->psr, line, 1) != level) { + /* level changed */ + if (extract64(s->icr, 2*line, 1) != level) { + s->isr = deposit32(s->isr, line, 1, 1); + } + } + } else { + /* interrupt is level sensitive */ + if (extract64(s->icr, 2*line, 1) == level) { + s->isr = deposit32(s->isr, line, 1, 1); + } + } +} + +static void imx_gpio_set(void *opaque, int line, int level) +{ + IMXGPIOState *s = IMX_GPIO(opaque); + IMXGPIOLevel imx_level = level ? IMX_GPIO_LEVEL_HIGH : IMX_GPIO_LEVEL_LOW; + + imx_gpio_set_int_line(s, line, imx_level); + + /* this is an input signal, so set PSR */ + s->psr = deposit32(s->psr, line, 1, imx_level); + + imx_gpio_update_int(s); +} + +static void imx_gpio_set_all_int_lines(IMXGPIOState *s) +{ + int i; + + for (i = 0; i < IMX_GPIO_PIN_COUNT; i++) { + IMXGPIOLevel imx_level = extract32(s->psr, i, 1); + imx_gpio_set_int_line(s, i, imx_level); + } + + imx_gpio_update_int(s); +} + +static inline void imx_gpio_set_all_output_lines(IMXGPIOState *s) +{ + int i; + + for (i = 0; i < IMX_GPIO_PIN_COUNT; i++) { + /* + * if the line is set as output, then forward the line + * level to its user. + */ + if (extract32(s->gdir, i, 1) && s->output[i]) { + qemu_set_irq(s->output[i], extract32(s->dr, i, 1)); + } + } +} + +static uint64_t imx_gpio_read(void *opaque, hwaddr offset, unsigned size) +{ + IMXGPIOState *s = IMX_GPIO(opaque); + uint32_t reg_value = 0; + + switch (offset) { + case DR_ADDR: + /* + * depending on the "line" configuration, the bit values + * are coming either from DR or PSR + */ + reg_value = (s->dr & s->gdir) | (s->psr & ~s->gdir); + break; + + case GDIR_ADDR: + reg_value = s->gdir; + break; + + case PSR_ADDR: + reg_value = s->psr & ~s->gdir; + break; + + case ICR1_ADDR: + reg_value = extract64(s->icr, 0, 32); + break; + + case ICR2_ADDR: + reg_value = extract64(s->icr, 32, 32); + break; + + case IMR_ADDR: + reg_value = s->imr; + break; + + case ISR_ADDR: + reg_value = s->isr; + break; + + case EDGE_SEL_ADDR: + if (s->has_edge_sel) { + reg_value = s->edge_sel; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: EDGE_SEL register not " + "present on this version of GPIO device\n", + TYPE_IMX_GPIO, __func__); + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_GPIO, __func__, offset); + break; + } + + DPRINTF("(%s) = 0x%" PRIx32 "\n", imx_gpio_reg_name(offset), reg_value); + + return reg_value; +} + +static void imx_gpio_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + IMXGPIOState *s = IMX_GPIO(opaque); + + DPRINTF("(%s, value = 0x%" PRIx32 ")\n", imx_gpio_reg_name(offset), + (uint32_t)value); + + switch (offset) { + case DR_ADDR: + s->dr = value; + imx_gpio_set_all_output_lines(s); + break; + + case GDIR_ADDR: + s->gdir = value; + imx_gpio_set_all_output_lines(s); + imx_gpio_set_all_int_lines(s); + break; + + case ICR1_ADDR: + s->icr = deposit64(s->icr, 0, 32, value); + imx_gpio_set_all_int_lines(s); + break; + + case ICR2_ADDR: + s->icr = deposit64(s->icr, 32, 32, value); + imx_gpio_set_all_int_lines(s); + break; + + case IMR_ADDR: + s->imr = value; + imx_gpio_update_int(s); + break; + + case ISR_ADDR: + s->isr &= ~value; + imx_gpio_set_all_int_lines(s); + break; + + case EDGE_SEL_ADDR: + if (s->has_edge_sel) { + s->edge_sel = value; + imx_gpio_set_all_int_lines(s); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: EDGE_SEL register not " + "present on this version of GPIO device\n", + TYPE_IMX_GPIO, __func__); + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_GPIO, __func__, offset); + break; + } + + return; +} + +static const MemoryRegionOps imx_gpio_ops = { + .read = imx_gpio_read, + .write = imx_gpio_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_imx_gpio = { + .name = TYPE_IMX_GPIO, + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(dr, IMXGPIOState), + VMSTATE_UINT32(gdir, IMXGPIOState), + VMSTATE_UINT32(psr, IMXGPIOState), + VMSTATE_UINT64(icr, IMXGPIOState), + VMSTATE_UINT32(imr, IMXGPIOState), + VMSTATE_UINT32(isr, IMXGPIOState), + VMSTATE_BOOL(has_edge_sel, IMXGPIOState), + VMSTATE_UINT32(edge_sel, IMXGPIOState), + VMSTATE_END_OF_LIST() + } +}; + +static Property imx_gpio_properties[] = { + DEFINE_PROP_BOOL("has-edge-sel", IMXGPIOState, has_edge_sel, true), + DEFINE_PROP_BOOL("has-upper-pin-irq", IMXGPIOState, has_upper_pin_irq, + false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void imx_gpio_reset(DeviceState *dev) +{ + IMXGPIOState *s = IMX_GPIO(dev); + + s->dr = 0; + s->gdir = 0; + s->psr = 0; + s->icr = 0; + s->imr = 0; + s->isr = 0; + s->edge_sel = 0; + + imx_gpio_set_all_output_lines(s); + imx_gpio_update_int(s); +} + +static void imx_gpio_realize(DeviceState *dev, Error **errp) +{ + IMXGPIOState *s = IMX_GPIO(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &imx_gpio_ops, s, + TYPE_IMX_GPIO, IMX_GPIO_MEM_SIZE); + + qdev_init_gpio_in(DEVICE(s), imx_gpio_set, IMX_GPIO_PIN_COUNT); + qdev_init_gpio_out(DEVICE(s), s->output, IMX_GPIO_PIN_COUNT); + + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[0]); + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[1]); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); +} + +static void imx_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = imx_gpio_realize; + dc->reset = imx_gpio_reset; + device_class_set_props(dc, imx_gpio_properties); + dc->vmsd = &vmstate_imx_gpio; + dc->desc = "i.MX GPIO controller"; +} + +static const TypeInfo imx_gpio_info = { + .name = TYPE_IMX_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXGPIOState), + .class_init = imx_gpio_class_init, +}; + +static void imx_gpio_register_types(void) +{ + type_register_static(&imx_gpio_info); +} + +type_init(imx_gpio_register_types) diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c new file mode 100644 index 000000000..db6b5e3d7 --- /dev/null +++ b/hw/gpio/max7310.c @@ -0,0 +1,218 @@ +/* + * MAX7310 8-port GPIO expansion chip. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This file is licensed under GNU GPL. + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define TYPE_MAX7310 "max7310" +OBJECT_DECLARE_SIMPLE_TYPE(MAX7310State, MAX7310) + +struct MAX7310State { + I2CSlave parent_obj; + + int i2c_command_byte; + int len; + + uint8_t level; + uint8_t direction; + uint8_t polarity; + uint8_t status; + uint8_t command; + qemu_irq handler[8]; + qemu_irq *gpio_in; +}; + +static void max7310_reset(DeviceState *dev) +{ + MAX7310State *s = MAX7310(dev); + + s->level &= s->direction; + s->direction = 0xff; + s->polarity = 0xf0; + s->status = 0x01; + s->command = 0x00; +} + +static uint8_t max7310_rx(I2CSlave *i2c) +{ + MAX7310State *s = MAX7310(i2c); + + switch (s->command) { + case 0x00: /* Input port */ + return s->level ^ s->polarity; + + case 0x01: /* Output port */ + return s->level & ~s->direction; + + case 0x02: /* Polarity inversion */ + return s->polarity; + + case 0x03: /* Configuration */ + return s->direction; + + case 0x04: /* Timeout */ + return s->status; + + case 0xff: /* Reserved */ + return 0xff; + + default: + qemu_log_mask(LOG_UNIMP, "%s: Unsupported register 0x02%" PRIx8 "\n", + __func__, s->command); + break; + } + return 0xff; +} + +static int max7310_tx(I2CSlave *i2c, uint8_t data) +{ + MAX7310State *s = MAX7310(i2c); + uint8_t diff; + int line; + + if (s->len ++ > 1) { +#ifdef VERBOSE + printf("%s: message too long (%i bytes)\n", __func__, s->len); +#endif + return 1; + } + + if (s->i2c_command_byte) { + s->command = data; + s->i2c_command_byte = 0; + return 0; + } + + switch (s->command) { + case 0x01: /* Output port */ + for (diff = (data ^ s->level) & ~s->direction; diff; + diff &= ~(1 << line)) { + line = ctz32(diff); + if (s->handler[line]) + qemu_set_irq(s->handler[line], (data >> line) & 1); + } + s->level = (s->level & s->direction) | (data & ~s->direction); + break; + + case 0x02: /* Polarity inversion */ + s->polarity = data; + break; + + case 0x03: /* Configuration */ + s->level &= ~(s->direction ^ data); + s->direction = data; + break; + + case 0x04: /* Timeout */ + s->status = data; + break; + + case 0x00: /* Input port - ignore writes */ + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Unsupported register 0x02%" PRIx8 "\n", + __func__, s->command); + return 1; + } + + return 0; +} + +static int max7310_event(I2CSlave *i2c, enum i2c_event event) +{ + MAX7310State *s = MAX7310(i2c); + s->len = 0; + + switch (event) { + case I2C_START_SEND: + s->i2c_command_byte = 1; + break; + case I2C_FINISH: +#ifdef VERBOSE + if (s->len == 1) + printf("%s: message too short (%i bytes)\n", __func__, s->len); +#endif + break; + default: + break; + } + + return 0; +} + +static const VMStateDescription vmstate_max7310 = { + .name = "max7310", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_INT32(i2c_command_byte, MAX7310State), + VMSTATE_INT32(len, MAX7310State), + VMSTATE_UINT8(level, MAX7310State), + VMSTATE_UINT8(direction, MAX7310State), + VMSTATE_UINT8(polarity, MAX7310State), + VMSTATE_UINT8(status, MAX7310State), + VMSTATE_UINT8(command, MAX7310State), + VMSTATE_I2C_SLAVE(parent_obj, MAX7310State), + VMSTATE_END_OF_LIST() + } +}; + +static void max7310_gpio_set(void *opaque, int line, int level) +{ + MAX7310State *s = (MAX7310State *) opaque; + assert(line >= 0 && line < ARRAY_SIZE(s->handler)); + + if (level) + s->level |= s->direction & (1 << line); + else + s->level &= ~(s->direction & (1 << line)); +} + +/* MAX7310 is SMBus-compatible (can be used with only SMBus protocols), + * but also accepts sequences that are not SMBus so return an I2C device. */ +static void max7310_realize(DeviceState *dev, Error **errp) +{ + I2CSlave *i2c = I2C_SLAVE(dev); + MAX7310State *s = MAX7310(dev); + + qdev_init_gpio_in(&i2c->qdev, max7310_gpio_set, 8); + qdev_init_gpio_out(&i2c->qdev, s->handler, 8); +} + +static void max7310_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + dc->realize = max7310_realize; + k->event = max7310_event; + k->recv = max7310_rx; + k->send = max7310_tx; + dc->reset = max7310_reset; + dc->vmsd = &vmstate_max7310; +} + +static const TypeInfo max7310_info = { + .name = TYPE_MAX7310, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(MAX7310State), + .class_init = max7310_class_init, +}; + +static void max7310_register_types(void) +{ + type_register_static(&max7310_info); +} + +type_init(max7310_register_types) diff --git a/hw/gpio/meson.build b/hw/gpio/meson.build new file mode 100644 index 000000000..7bd6a5726 --- /dev/null +++ b/hw/gpio/meson.build @@ -0,0 +1,14 @@ +softmmu_ss.add(when: 'CONFIG_E500', if_true: files('mpc8xxx.c')) +softmmu_ss.add(when: 'CONFIG_GPIO_KEY', if_true: files('gpio_key.c')) +softmmu_ss.add(when: 'CONFIG_GPIO_PWR', if_true: files('gpio_pwr.c')) +softmmu_ss.add(when: 'CONFIG_MAX7310', if_true: files('max7310.c')) +softmmu_ss.add(when: 'CONFIG_PL061', if_true: files('pl061.c')) +softmmu_ss.add(when: 'CONFIG_ZAURUS', if_true: files('zaurus.c')) + +softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpio.c')) +softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_gpio.c')) +softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_gpio.c')) +softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_gpio.c')) +softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_gpio.c')) +softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_gpio.c')) +softmmu_ss.add(when: 'CONFIG_SIFIVE_GPIO', if_true: files('sifive_gpio.c')) diff --git a/hw/gpio/mpc8xxx.c b/hw/gpio/mpc8xxx.c new file mode 100644 index 000000000..cb42acb6d --- /dev/null +++ b/hw/gpio/mpc8xxx.c @@ -0,0 +1,224 @@ +/* + * GPIO Controller for a lot of Freescale SoCs + * + * Copyright (C) 2014 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Alexander Graf, + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define TYPE_MPC8XXX_GPIO "mpc8xxx_gpio" +OBJECT_DECLARE_SIMPLE_TYPE(MPC8XXXGPIOState, MPC8XXX_GPIO) + +struct MPC8XXXGPIOState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq; + qemu_irq out[32]; + + uint32_t dir; + uint32_t odr; + uint32_t dat; + uint32_t ier; + uint32_t imr; + uint32_t icr; +}; + +static const VMStateDescription vmstate_mpc8xxx_gpio = { + .name = "mpc8xxx_gpio", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(dir, MPC8XXXGPIOState), + VMSTATE_UINT32(odr, MPC8XXXGPIOState), + VMSTATE_UINT32(dat, MPC8XXXGPIOState), + VMSTATE_UINT32(ier, MPC8XXXGPIOState), + VMSTATE_UINT32(imr, MPC8XXXGPIOState), + VMSTATE_UINT32(icr, MPC8XXXGPIOState), + VMSTATE_END_OF_LIST() + } +}; + +static void mpc8xxx_gpio_update(MPC8XXXGPIOState *s) +{ + qemu_set_irq(s->irq, !!(s->ier & s->imr)); +} + +static uint64_t mpc8xxx_gpio_read(void *opaque, hwaddr offset, + unsigned size) +{ + MPC8XXXGPIOState *s = (MPC8XXXGPIOState *)opaque; + + if (size != 4) { + /* All registers are 32bit */ + return 0; + } + + switch (offset) { + case 0x0: /* Direction */ + return s->dir; + case 0x4: /* Open Drain */ + return s->odr; + case 0x8: /* Data */ + return s->dat; + case 0xC: /* Interrupt Event */ + return s->ier; + case 0x10: /* Interrupt Mask */ + return s->imr; + case 0x14: /* Interrupt Control */ + return s->icr; + default: + return 0; + } +} + +static void mpc8xxx_write_data(MPC8XXXGPIOState *s, uint32_t new_data) +{ + uint32_t old_data = s->dat; + uint32_t diff = old_data ^ new_data; + int i; + + for (i = 0; i < 32; i++) { + uint32_t mask = 0x80000000 >> i; + if (!(diff & mask)) { + continue; + } + + if (s->dir & mask) { + /* Output */ + qemu_set_irq(s->out[i], (new_data & mask) != 0); + } + } + + s->dat = new_data; +} + +static void mpc8xxx_gpio_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + MPC8XXXGPIOState *s = (MPC8XXXGPIOState *)opaque; + + if (size != 4) { + /* All registers are 32bit */ + return; + } + + switch (offset) { + case 0x0: /* Direction */ + s->dir = value; + break; + case 0x4: /* Open Drain */ + s->odr = value; + break; + case 0x8: /* Data */ + mpc8xxx_write_data(s, value); + break; + case 0xC: /* Interrupt Event */ + s->ier &= ~value; + break; + case 0x10: /* Interrupt Mask */ + s->imr = value; + break; + case 0x14: /* Interrupt Control */ + s->icr = value; + break; + } + + mpc8xxx_gpio_update(s); +} + +static void mpc8xxx_gpio_reset(DeviceState *dev) +{ + MPC8XXXGPIOState *s = MPC8XXX_GPIO(dev); + + s->dir = 0; + s->odr = 0; + s->dat = 0; + s->ier = 0; + s->imr = 0; + s->icr = 0; +} + +static void mpc8xxx_gpio_set_irq(void * opaque, int irq, int level) +{ + MPC8XXXGPIOState *s = (MPC8XXXGPIOState *)opaque; + uint32_t mask; + + mask = 0x80000000 >> irq; + if ((s->dir & mask) == 0) { + uint32_t old_value = s->dat & mask; + + s->dat &= ~mask; + if (level) + s->dat |= mask; + + if (!(s->icr & irq) || (old_value && !level)) { + s->ier |= mask; + } + + mpc8xxx_gpio_update(s); + } +} + +static const MemoryRegionOps mpc8xxx_gpio_ops = { + .read = mpc8xxx_gpio_read, + .write = mpc8xxx_gpio_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void mpc8xxx_gpio_initfn(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + MPC8XXXGPIOState *s = MPC8XXX_GPIO(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &mpc8xxx_gpio_ops, + s, "mpc8xxx_gpio", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + qdev_init_gpio_in(dev, mpc8xxx_gpio_set_irq, 32); + qdev_init_gpio_out(dev, s->out, 32); +} + +static void mpc8xxx_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_mpc8xxx_gpio; + dc->reset = mpc8xxx_gpio_reset; +} + +static const TypeInfo mpc8xxx_gpio_info = { + .name = TYPE_MPC8XXX_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MPC8XXXGPIOState), + .instance_init = mpc8xxx_gpio_initfn, + .class_init = mpc8xxx_gpio_class_init, +}; + +static void mpc8xxx_gpio_register_types(void) +{ + type_register_static(&mpc8xxx_gpio_info); +} + +type_init(mpc8xxx_gpio_register_types) diff --git a/hw/gpio/npcm7xx_gpio.c b/hw/gpio/npcm7xx_gpio.c new file mode 100644 index 000000000..3376901ab --- /dev/null +++ b/hw/gpio/npcm7xx_gpio.c @@ -0,0 +1,424 @@ +/* + * Nuvoton NPCM7xx General Purpose Input / Output (GPIO) + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/gpio/npcm7xx_gpio.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "trace.h" + +/* 32-bit register indices. */ +enum NPCM7xxGPIORegister { + NPCM7XX_GPIO_TLOCK1, + NPCM7XX_GPIO_DIN, + NPCM7XX_GPIO_POL, + NPCM7XX_GPIO_DOUT, + NPCM7XX_GPIO_OE, + NPCM7XX_GPIO_OTYP, + NPCM7XX_GPIO_MP, + NPCM7XX_GPIO_PU, + NPCM7XX_GPIO_PD, + NPCM7XX_GPIO_DBNC, + NPCM7XX_GPIO_EVTYP, + NPCM7XX_GPIO_EVBE, + NPCM7XX_GPIO_OBL0, + NPCM7XX_GPIO_OBL1, + NPCM7XX_GPIO_OBL2, + NPCM7XX_GPIO_OBL3, + NPCM7XX_GPIO_EVEN, + NPCM7XX_GPIO_EVENS, + NPCM7XX_GPIO_EVENC, + NPCM7XX_GPIO_EVST, + NPCM7XX_GPIO_SPLCK, + NPCM7XX_GPIO_MPLCK, + NPCM7XX_GPIO_IEM, + NPCM7XX_GPIO_OSRC, + NPCM7XX_GPIO_ODSC, + NPCM7XX_GPIO_DOS = 0x68 / sizeof(uint32_t), + NPCM7XX_GPIO_DOC, + NPCM7XX_GPIO_OES, + NPCM7XX_GPIO_OEC, + NPCM7XX_GPIO_TLOCK2 = 0x7c / sizeof(uint32_t), + NPCM7XX_GPIO_REGS_END, +}; + +#define NPCM7XX_GPIO_REGS_SIZE (4 * KiB) + +#define NPCM7XX_GPIO_LOCK_MAGIC1 (0xc0defa73) +#define NPCM7XX_GPIO_LOCK_MAGIC2 (0xc0de1248) + +static void npcm7xx_gpio_update_events(NPCM7xxGPIOState *s, uint32_t din_diff) +{ + uint32_t din_new = s->regs[NPCM7XX_GPIO_DIN]; + + /* Trigger on high level */ + s->regs[NPCM7XX_GPIO_EVST] |= din_new & ~s->regs[NPCM7XX_GPIO_EVTYP]; + /* Trigger on both edges */ + s->regs[NPCM7XX_GPIO_EVST] |= (din_diff & s->regs[NPCM7XX_GPIO_EVTYP] + & s->regs[NPCM7XX_GPIO_EVBE]); + /* Trigger on rising edge */ + s->regs[NPCM7XX_GPIO_EVST] |= (din_diff & din_new + & s->regs[NPCM7XX_GPIO_EVTYP]); + + trace_npcm7xx_gpio_update_events(DEVICE(s)->canonical_path, + s->regs[NPCM7XX_GPIO_EVST], + s->regs[NPCM7XX_GPIO_EVEN]); + qemu_set_irq(s->irq, !!(s->regs[NPCM7XX_GPIO_EVST] + & s->regs[NPCM7XX_GPIO_EVEN])); +} + +static void npcm7xx_gpio_update_pins(NPCM7xxGPIOState *s, uint32_t diff) +{ + uint32_t drive_en; + uint32_t drive_lvl; + uint32_t not_driven; + uint32_t undefined; + uint32_t pin_diff; + uint32_t din_old; + + /* Calculate level of each pin driven by GPIO controller. */ + drive_lvl = s->regs[NPCM7XX_GPIO_DOUT] ^ s->regs[NPCM7XX_GPIO_POL]; + /* If OTYP=1, only drive low (open drain) */ + drive_en = s->regs[NPCM7XX_GPIO_OE] & ~(s->regs[NPCM7XX_GPIO_OTYP] + & drive_lvl); + /* + * If a pin is driven to opposite levels by the GPIO controller and the + * external driver, the result is undefined. + */ + undefined = drive_en & s->ext_driven & (drive_lvl ^ s->ext_level); + if (undefined) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: pins have multiple drivers: 0x%" PRIx32 "\n", + DEVICE(s)->canonical_path, undefined); + } + + not_driven = ~(drive_en | s->ext_driven); + pin_diff = s->pin_level; + + /* Set pins to externally driven level. */ + s->pin_level = s->ext_level & s->ext_driven; + /* Set internally driven pins, ignoring any conflicts. */ + s->pin_level |= drive_lvl & drive_en; + /* Pull up undriven pins with internal pull-up enabled. */ + s->pin_level |= not_driven & s->regs[NPCM7XX_GPIO_PU]; + /* Pins not driven, pulled up or pulled down are undefined */ + undefined |= not_driven & ~(s->regs[NPCM7XX_GPIO_PU] + | s->regs[NPCM7XX_GPIO_PD]); + + /* If any pins changed state, update the outgoing GPIOs. */ + pin_diff ^= s->pin_level; + pin_diff |= undefined & diff; + if (pin_diff) { + int i; + + for (i = 0; i < NPCM7XX_GPIO_NR_PINS; i++) { + uint32_t mask = BIT(i); + if (pin_diff & mask) { + int level = (undefined & mask) ? -1 : !!(s->pin_level & mask); + trace_npcm7xx_gpio_set_output(DEVICE(s)->canonical_path, + i, level); + qemu_set_irq(s->output[i], level); + } + } + } + + /* Calculate new value of DIN after masking and polarity setting. */ + din_old = s->regs[NPCM7XX_GPIO_DIN]; + s->regs[NPCM7XX_GPIO_DIN] = ((s->pin_level & s->regs[NPCM7XX_GPIO_IEM]) + ^ s->regs[NPCM7XX_GPIO_POL]); + + /* See if any new events triggered because of all this. */ + npcm7xx_gpio_update_events(s, din_old ^ s->regs[NPCM7XX_GPIO_DIN]); +} + +static bool npcm7xx_gpio_is_locked(NPCM7xxGPIOState *s) +{ + return s->regs[NPCM7XX_GPIO_TLOCK1] == 1; +} + +static uint64_t npcm7xx_gpio_regs_read(void *opaque, hwaddr addr, + unsigned int size) +{ + hwaddr reg = addr / sizeof(uint32_t); + NPCM7xxGPIOState *s = opaque; + uint64_t value = 0; + + switch (reg) { + case NPCM7XX_GPIO_TLOCK1 ... NPCM7XX_GPIO_EVEN: + case NPCM7XX_GPIO_EVST ... NPCM7XX_GPIO_ODSC: + value = s->regs[reg]; + break; + + case NPCM7XX_GPIO_EVENS ... NPCM7XX_GPIO_EVENC: + case NPCM7XX_GPIO_DOS ... NPCM7XX_GPIO_TLOCK2: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from write-only register 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, addr); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from invalid offset 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, addr); + break; + } + + trace_npcm7xx_gpio_read(DEVICE(s)->canonical_path, addr, value); + + return value; +} + +static void npcm7xx_gpio_regs_write(void *opaque, hwaddr addr, uint64_t v, + unsigned int size) +{ + hwaddr reg = addr / sizeof(uint32_t); + NPCM7xxGPIOState *s = opaque; + uint32_t value = v; + uint32_t diff; + + trace_npcm7xx_gpio_write(DEVICE(s)->canonical_path, addr, v); + + if (npcm7xx_gpio_is_locked(s)) { + switch (reg) { + case NPCM7XX_GPIO_TLOCK1: + if (s->regs[NPCM7XX_GPIO_TLOCK2] == NPCM7XX_GPIO_LOCK_MAGIC2 && + value == NPCM7XX_GPIO_LOCK_MAGIC1) { + s->regs[NPCM7XX_GPIO_TLOCK1] = 0; + s->regs[NPCM7XX_GPIO_TLOCK2] = 0; + } + break; + + case NPCM7XX_GPIO_TLOCK2: + s->regs[reg] = value; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to locked register @ 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, addr); + break; + } + + return; + } + + diff = s->regs[reg] ^ value; + + switch (reg) { + case NPCM7XX_GPIO_TLOCK1: + case NPCM7XX_GPIO_TLOCK2: + s->regs[NPCM7XX_GPIO_TLOCK1] = 1; + s->regs[NPCM7XX_GPIO_TLOCK2] = 0; + break; + + case NPCM7XX_GPIO_DIN: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to read-only register @ 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, addr); + break; + + case NPCM7XX_GPIO_POL: + case NPCM7XX_GPIO_DOUT: + case NPCM7XX_GPIO_OE: + case NPCM7XX_GPIO_OTYP: + case NPCM7XX_GPIO_PU: + case NPCM7XX_GPIO_PD: + case NPCM7XX_GPIO_IEM: + s->regs[reg] = value; + npcm7xx_gpio_update_pins(s, diff); + break; + + case NPCM7XX_GPIO_DOS: + s->regs[NPCM7XX_GPIO_DOUT] |= value; + npcm7xx_gpio_update_pins(s, value); + break; + case NPCM7XX_GPIO_DOC: + s->regs[NPCM7XX_GPIO_DOUT] &= ~value; + npcm7xx_gpio_update_pins(s, value); + break; + case NPCM7XX_GPIO_OES: + s->regs[NPCM7XX_GPIO_OE] |= value; + npcm7xx_gpio_update_pins(s, value); + break; + case NPCM7XX_GPIO_OEC: + s->regs[NPCM7XX_GPIO_OE] &= ~value; + npcm7xx_gpio_update_pins(s, value); + break; + + case NPCM7XX_GPIO_EVTYP: + case NPCM7XX_GPIO_EVBE: + case NPCM7XX_GPIO_EVEN: + s->regs[reg] = value; + npcm7xx_gpio_update_events(s, 0); + break; + + case NPCM7XX_GPIO_EVENS: + s->regs[NPCM7XX_GPIO_EVEN] |= value; + npcm7xx_gpio_update_events(s, 0); + break; + case NPCM7XX_GPIO_EVENC: + s->regs[NPCM7XX_GPIO_EVEN] &= ~value; + npcm7xx_gpio_update_events(s, 0); + break; + + case NPCM7XX_GPIO_EVST: + s->regs[reg] &= ~value; + npcm7xx_gpio_update_events(s, 0); + break; + + case NPCM7XX_GPIO_MP: + case NPCM7XX_GPIO_DBNC: + case NPCM7XX_GPIO_OSRC: + case NPCM7XX_GPIO_ODSC: + /* Nothing to do; just store the value. */ + s->regs[reg] = value; + break; + + case NPCM7XX_GPIO_OBL0: + case NPCM7XX_GPIO_OBL1: + case NPCM7XX_GPIO_OBL2: + case NPCM7XX_GPIO_OBL3: + s->regs[reg] = value; + qemu_log_mask(LOG_UNIMP, "%s: Blinking is not implemented\n", + __func__); + break; + + case NPCM7XX_GPIO_SPLCK: + case NPCM7XX_GPIO_MPLCK: + qemu_log_mask(LOG_UNIMP, "%s: Per-pin lock is not implemented\n", + __func__); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to invalid offset 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, addr); + break; + } +} + +static const MemoryRegionOps npcm7xx_gpio_regs_ops = { + .read = npcm7xx_gpio_regs_read, + .write = npcm7xx_gpio_regs_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm7xx_gpio_set_input(void *opaque, int line, int level) +{ + NPCM7xxGPIOState *s = opaque; + + trace_npcm7xx_gpio_set_input(DEVICE(s)->canonical_path, line, level); + + g_assert(line >= 0 && line < NPCM7XX_GPIO_NR_PINS); + + s->ext_driven = deposit32(s->ext_driven, line, 1, level >= 0); + s->ext_level = deposit32(s->ext_level, line, 1, level > 0); + + npcm7xx_gpio_update_pins(s, BIT(line)); +} + +static void npcm7xx_gpio_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxGPIOState *s = NPCM7XX_GPIO(obj); + + memset(s->regs, 0, sizeof(s->regs)); + + s->regs[NPCM7XX_GPIO_PU] = s->reset_pu; + s->regs[NPCM7XX_GPIO_PD] = s->reset_pd; + s->regs[NPCM7XX_GPIO_OSRC] = s->reset_osrc; + s->regs[NPCM7XX_GPIO_ODSC] = s->reset_odsc; +} + +static void npcm7xx_gpio_hold_reset(Object *obj) +{ + NPCM7xxGPIOState *s = NPCM7XX_GPIO(obj); + + npcm7xx_gpio_update_pins(s, -1); +} + +static void npcm7xx_gpio_init(Object *obj) +{ + NPCM7xxGPIOState *s = NPCM7XX_GPIO(obj); + DeviceState *dev = DEVICE(obj); + + memory_region_init_io(&s->mmio, obj, &npcm7xx_gpio_regs_ops, s, + "regs", NPCM7XX_GPIO_REGS_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + qdev_init_gpio_in(dev, npcm7xx_gpio_set_input, NPCM7XX_GPIO_NR_PINS); + qdev_init_gpio_out(dev, s->output, NPCM7XX_GPIO_NR_PINS); +} + +static const VMStateDescription vmstate_npcm7xx_gpio = { + .name = "npcm7xx-gpio", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(pin_level, NPCM7xxGPIOState), + VMSTATE_UINT32(ext_level, NPCM7xxGPIOState), + VMSTATE_UINT32(ext_driven, NPCM7xxGPIOState), + VMSTATE_UINT32_ARRAY(regs, NPCM7xxGPIOState, NPCM7XX_GPIO_NR_REGS), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property npcm7xx_gpio_properties[] = { + /* Bit n set => pin n has pullup enabled by default. */ + DEFINE_PROP_UINT32("reset-pullup", NPCM7xxGPIOState, reset_pu, 0), + /* Bit n set => pin n has pulldown enabled by default. */ + DEFINE_PROP_UINT32("reset-pulldown", NPCM7xxGPIOState, reset_pd, 0), + /* Bit n set => pin n has high slew rate by default. */ + DEFINE_PROP_UINT32("reset-osrc", NPCM7xxGPIOState, reset_osrc, 0), + /* Bit n set => pin n has high drive strength by default. */ + DEFINE_PROP_UINT32("reset-odsc", NPCM7xxGPIOState, reset_odsc, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void npcm7xx_gpio_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *reset = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + QEMU_BUILD_BUG_ON(NPCM7XX_GPIO_REGS_END > NPCM7XX_GPIO_NR_REGS); + + dc->desc = "NPCM7xx GPIO Controller"; + dc->vmsd = &vmstate_npcm7xx_gpio; + reset->phases.enter = npcm7xx_gpio_enter_reset; + reset->phases.hold = npcm7xx_gpio_hold_reset; + device_class_set_props(dc, npcm7xx_gpio_properties); +} + +static const TypeInfo npcm7xx_gpio_types[] = { + { + .name = TYPE_NPCM7XX_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxGPIOState), + .class_init = npcm7xx_gpio_class_init, + .instance_init = npcm7xx_gpio_init, + }, +}; +DEFINE_TYPES(npcm7xx_gpio_types); diff --git a/hw/gpio/nrf51_gpio.c b/hw/gpio/nrf51_gpio.c new file mode 100644 index 000000000..b47fddf4e --- /dev/null +++ b/hw/gpio/nrf51_gpio.c @@ -0,0 +1,318 @@ +/* + * nRF51 System-on-Chip general purpose input/output register definition + * + * Reference Manual: http://infocenter.nordicsemi.com/pdf/nRF51_RM_v3.0.pdf + * Product Spec: http://infocenter.nordicsemi.com/pdf/nRF51822_PS_v3.1.pdf + * + * Copyright 2018 Steffen Görtz + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/gpio/nrf51_gpio.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "trace.h" + +/* + * Check if the output driver is connected to the direction switch + * given the current configuration and logic level. + * It is not differentiated between standard and "high"(-power) drive modes. + */ +static bool is_connected(uint32_t config, uint32_t level) +{ + bool state; + uint32_t drive_config = extract32(config, 8, 3); + + switch (drive_config) { + case 0 ... 3: + state = true; + break; + case 4 ... 5: + state = level != 0; + break; + case 6 ... 7: + state = level == 0; + break; + default: + g_assert_not_reached(); + break; + } + + return state; +} + +static int pull_value(uint32_t config) +{ + int pull = extract32(config, 2, 2); + if (pull == NRF51_GPIO_PULLDOWN) { + return 0; + } else if (pull == NRF51_GPIO_PULLUP) { + return 1; + } + return -1; +} + +static void update_output_irq(NRF51GPIOState *s, size_t i, + bool connected, bool level) +{ + int64_t irq_level = connected ? level : -1; + bool old_connected = extract32(s->old_out_connected, i, 1); + bool old_level = extract32(s->old_out, i, 1); + + if ((old_connected != connected) || (old_level != level)) { + qemu_set_irq(s->output[i], irq_level); + trace_nrf51_gpio_update_output_irq(i, irq_level); + } + + s->old_out = deposit32(s->old_out, i, 1, level); + s->old_out_connected = deposit32(s->old_out_connected, i, 1, connected); +} + +static void update_state(NRF51GPIOState *s) +{ + int pull; + size_t i; + bool connected_out, dir, connected_in, out, in, input; + + for (i = 0; i < NRF51_GPIO_PINS; i++) { + pull = pull_value(s->cnf[i]); + dir = extract32(s->cnf[i], 0, 1); + connected_in = extract32(s->in_mask, i, 1); + out = extract32(s->out, i, 1); + in = extract32(s->in, i, 1); + input = !extract32(s->cnf[i], 1, 1); + connected_out = is_connected(s->cnf[i], out) && dir; + + if (!input) { + if (pull >= 0) { + /* Input buffer disconnected from external drives */ + s->in = deposit32(s->in, i, 1, pull); + } + } else { + if (connected_out && connected_in && out != in) { + /* Pin both driven externally and internally */ + qemu_log_mask(LOG_GUEST_ERROR, + "GPIO pin %zu short circuited\n", i); + } + if (!connected_in) { + /* + * Floating input: the output stimulates IN if connected, + * otherwise pull-up/pull-down resistors put a value on both + * IN and OUT. + */ + if (pull >= 0 && !connected_out) { + connected_out = true; + out = pull; + } + if (connected_out) { + s->in = deposit32(s->in, i, 1, out); + } + } + } + update_output_irq(s, i, connected_out, out); + } +} + +/* + * Direction is exposed in both the DIR register and the DIR bit + * of each PINs CNF configuration register. Reflect bits for pins in DIR + * to individual pin configuration registers. + */ +static void reflect_dir_bit_in_cnf(NRF51GPIOState *s) +{ + size_t i; + + uint32_t value = s->dir; + + for (i = 0; i < NRF51_GPIO_PINS; i++) { + s->cnf[i] = (s->cnf[i] & ~(1UL)) | ((value >> i) & 0x01); + } +} + +static uint64_t nrf51_gpio_read(void *opaque, hwaddr offset, unsigned int size) +{ + NRF51GPIOState *s = NRF51_GPIO(opaque); + uint64_t r = 0; + size_t idx; + + switch (offset) { + case NRF51_GPIO_REG_OUT ... NRF51_GPIO_REG_OUTCLR: + r = s->out; + break; + + case NRF51_GPIO_REG_IN: + r = s->in; + break; + + case NRF51_GPIO_REG_DIR ... NRF51_GPIO_REG_DIRCLR: + r = s->dir; + break; + + case NRF51_GPIO_REG_CNF_START ... NRF51_GPIO_REG_CNF_END: + idx = (offset - NRF51_GPIO_REG_CNF_START) / 4; + r = s->cnf[idx]; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad read offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + + trace_nrf51_gpio_read(offset, r); + + return r; +} + +static void nrf51_gpio_write(void *opaque, hwaddr offset, + uint64_t value, unsigned int size) +{ + NRF51GPIOState *s = NRF51_GPIO(opaque); + size_t idx; + + trace_nrf51_gpio_write(offset, value); + + switch (offset) { + case NRF51_GPIO_REG_OUT: + s->out = value; + break; + + case NRF51_GPIO_REG_OUTSET: + s->out |= value; + break; + + case NRF51_GPIO_REG_OUTCLR: + s->out &= ~value; + break; + + case NRF51_GPIO_REG_DIR: + s->dir = value; + reflect_dir_bit_in_cnf(s); + break; + + case NRF51_GPIO_REG_DIRSET: + s->dir |= value; + reflect_dir_bit_in_cnf(s); + break; + + case NRF51_GPIO_REG_DIRCLR: + s->dir &= ~value; + reflect_dir_bit_in_cnf(s); + break; + + case NRF51_GPIO_REG_CNF_START ... NRF51_GPIO_REG_CNF_END: + idx = (offset - NRF51_GPIO_REG_CNF_START) / 4; + s->cnf[idx] = value; + /* + * direction is exposed in both the DIR register and the DIR bit + * of each PINs CNF configuration register. + */ + s->dir = (s->dir & ~(1UL << idx)) | ((value & 0x01) << idx); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad write offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + + update_state(s); +} + +static const MemoryRegionOps gpio_ops = { + .read = nrf51_gpio_read, + .write = nrf51_gpio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static void nrf51_gpio_set(void *opaque, int line, int value) +{ + NRF51GPIOState *s = NRF51_GPIO(opaque); + + trace_nrf51_gpio_set(line, value); + + assert(line >= 0 && line < NRF51_GPIO_PINS); + + s->in_mask = deposit32(s->in_mask, line, 1, value >= 0); + if (value >= 0) { + s->in = deposit32(s->in, line, 1, value != 0); + } + + update_state(s); +} + +static void nrf51_gpio_reset(DeviceState *dev) +{ + NRF51GPIOState *s = NRF51_GPIO(dev); + size_t i; + + s->out = 0; + s->old_out = 0; + s->old_out_connected = 0; + s->in = 0; + s->in_mask = 0; + s->dir = 0; + + for (i = 0; i < NRF51_GPIO_PINS; i++) { + s->cnf[i] = 0x00000002; + } +} + +static const VMStateDescription vmstate_nrf51_gpio = { + .name = TYPE_NRF51_GPIO, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(out, NRF51GPIOState), + VMSTATE_UINT32(in, NRF51GPIOState), + VMSTATE_UINT32(in_mask, NRF51GPIOState), + VMSTATE_UINT32(dir, NRF51GPIOState), + VMSTATE_UINT32_ARRAY(cnf, NRF51GPIOState, NRF51_GPIO_PINS), + VMSTATE_UINT32(old_out, NRF51GPIOState), + VMSTATE_UINT32(old_out_connected, NRF51GPIOState), + VMSTATE_END_OF_LIST() + } +}; + +static void nrf51_gpio_init(Object *obj) +{ + NRF51GPIOState *s = NRF51_GPIO(obj); + + memory_region_init_io(&s->mmio, obj, &gpio_ops, s, + TYPE_NRF51_GPIO, NRF51_GPIO_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + qdev_init_gpio_in(DEVICE(s), nrf51_gpio_set, NRF51_GPIO_PINS); + qdev_init_gpio_out(DEVICE(s), s->output, NRF51_GPIO_PINS); +} + +static void nrf51_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_nrf51_gpio; + dc->reset = nrf51_gpio_reset; + dc->desc = "nRF51 GPIO"; +} + +static const TypeInfo nrf51_gpio_info = { + .name = TYPE_NRF51_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NRF51GPIOState), + .instance_init = nrf51_gpio_init, + .class_init = nrf51_gpio_class_init +}; + +static void nrf51_gpio_register_types(void) +{ + type_register_static(&nrf51_gpio_info); +} + +type_init(nrf51_gpio_register_types) diff --git a/hw/gpio/omap_gpio.c b/hw/gpio/omap_gpio.c new file mode 100644 index 000000000..e25084b40 --- /dev/null +++ b/hw/gpio/omap_gpio.c @@ -0,0 +1,813 @@ +/* + * TI OMAP processors GPIO emulation. + * + * Copyright (C) 2006-2008 Andrzej Zaborowski + * Copyright (C) 2007-2009 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/arm/omap.h" +#include "hw/sysbus.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" + +struct omap_gpio_s { + qemu_irq irq; + qemu_irq handler[16]; + + uint16_t inputs; + uint16_t outputs; + uint16_t dir; + uint16_t edge; + uint16_t mask; + uint16_t ints; + uint16_t pins; +}; + +struct omap_gpif_s { + SysBusDevice parent_obj; + + MemoryRegion iomem; + int mpu_model; + void *clk; + struct omap_gpio_s omap1; +}; + +/* General-Purpose I/O of OMAP1 */ +static void omap_gpio_set(void *opaque, int line, int level) +{ + struct omap_gpio_s *s = &((struct omap_gpif_s *) opaque)->omap1; + uint16_t prev = s->inputs; + + if (level) + s->inputs |= 1 << line; + else + s->inputs &= ~(1 << line); + + if (((s->edge & s->inputs & ~prev) | (~s->edge & ~s->inputs & prev)) & + (1 << line) & s->dir & ~s->mask) { + s->ints |= 1 << line; + qemu_irq_raise(s->irq); + } +} + +static uint64_t omap_gpio_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_gpio_s *s = (struct omap_gpio_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (offset) { + case 0x00: /* DATA_INPUT */ + return s->inputs & s->pins; + + case 0x04: /* DATA_OUTPUT */ + return s->outputs; + + case 0x08: /* DIRECTION_CONTROL */ + return s->dir; + + case 0x0c: /* INTERRUPT_CONTROL */ + return s->edge; + + case 0x10: /* INTERRUPT_MASK */ + return s->mask; + + case 0x14: /* INTERRUPT_STATUS */ + return s->ints; + + case 0x18: /* PIN_CONTROL (not in OMAP310) */ + OMAP_BAD_REG(addr); + return s->pins; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_gpio_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_gpio_s *s = (struct omap_gpio_s *) opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + uint16_t diff; + int ln; + + if (size != 2) { + omap_badwidth_write16(opaque, addr, value); + return; + } + + switch (offset) { + case 0x00: /* DATA_INPUT */ + OMAP_RO_REG(addr); + return; + + case 0x04: /* DATA_OUTPUT */ + diff = (s->outputs ^ value) & ~s->dir; + s->outputs = value; + while ((ln = ctz32(diff)) != 32) { + if (s->handler[ln]) + qemu_set_irq(s->handler[ln], (value >> ln) & 1); + diff &= ~(1 << ln); + } + break; + + case 0x08: /* DIRECTION_CONTROL */ + diff = s->outputs & (s->dir ^ value); + s->dir = value; + + value = s->outputs & ~s->dir; + while ((ln = ctz32(diff)) != 32) { + if (s->handler[ln]) + qemu_set_irq(s->handler[ln], (value >> ln) & 1); + diff &= ~(1 << ln); + } + break; + + case 0x0c: /* INTERRUPT_CONTROL */ + s->edge = value; + break; + + case 0x10: /* INTERRUPT_MASK */ + s->mask = value; + break; + + case 0x14: /* INTERRUPT_STATUS */ + s->ints &= ~value; + if (!s->ints) + qemu_irq_lower(s->irq); + break; + + case 0x18: /* PIN_CONTROL (not in OMAP310 TRM) */ + OMAP_BAD_REG(addr); + s->pins = value; + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +/* *Some* sources say the memory region is 32-bit. */ +static const MemoryRegionOps omap_gpio_ops = { + .read = omap_gpio_read, + .write = omap_gpio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_gpio_reset(struct omap_gpio_s *s) +{ + s->inputs = 0; + s->outputs = ~0; + s->dir = ~0; + s->edge = ~0; + s->mask = ~0; + s->ints = 0; + s->pins = ~0; +} + +struct omap2_gpio_s { + qemu_irq irq[2]; + qemu_irq wkup; + qemu_irq *handler; + MemoryRegion iomem; + + uint8_t revision; + uint8_t config[2]; + uint32_t inputs; + uint32_t outputs; + uint32_t dir; + uint32_t level[2]; + uint32_t edge[2]; + uint32_t mask[2]; + uint32_t wumask; + uint32_t ints[2]; + uint32_t debounce; + uint8_t delay; +}; + +struct omap2_gpif_s { + SysBusDevice parent_obj; + + MemoryRegion iomem; + int mpu_model; + void *iclk; + void *fclk[6]; + int modulecount; + struct omap2_gpio_s *modules; + qemu_irq *handler; + int autoidle; + int gpo; +}; + +/* General-Purpose Interface of OMAP2/3 */ +static inline void omap2_gpio_module_int_update(struct omap2_gpio_s *s, + int line) +{ + qemu_set_irq(s->irq[line], s->ints[line] & s->mask[line]); +} + +static void omap2_gpio_module_wake(struct omap2_gpio_s *s, int line) +{ + if (!(s->config[0] & (1 << 2))) /* ENAWAKEUP */ + return; + if (!(s->config[0] & (3 << 3))) /* Force Idle */ + return; + if (!(s->wumask & (1 << line))) + return; + + qemu_irq_raise(s->wkup); +} + +static inline void omap2_gpio_module_out_update(struct omap2_gpio_s *s, + uint32_t diff) +{ + int ln; + + s->outputs ^= diff; + diff &= ~s->dir; + while ((ln = ctz32(diff)) != 32) { + qemu_set_irq(s->handler[ln], (s->outputs >> ln) & 1); + diff &= ~(1 << ln); + } +} + +static void omap2_gpio_module_level_update(struct omap2_gpio_s *s, int line) +{ + s->ints[line] |= s->dir & + ((s->inputs & s->level[1]) | (~s->inputs & s->level[0])); + omap2_gpio_module_int_update(s, line); +} + +static inline void omap2_gpio_module_int(struct omap2_gpio_s *s, int line) +{ + s->ints[0] |= 1 << line; + omap2_gpio_module_int_update(s, 0); + s->ints[1] |= 1 << line; + omap2_gpio_module_int_update(s, 1); + omap2_gpio_module_wake(s, line); +} + +static void omap2_gpio_set(void *opaque, int line, int level) +{ + struct omap2_gpif_s *p = opaque; + struct omap2_gpio_s *s = &p->modules[line >> 5]; + + line &= 31; + if (level) { + if (s->dir & (1 << line) & ((~s->inputs & s->edge[0]) | s->level[1])) + omap2_gpio_module_int(s, line); + s->inputs |= 1 << line; + } else { + if (s->dir & (1 << line) & ((s->inputs & s->edge[1]) | s->level[0])) + omap2_gpio_module_int(s, line); + s->inputs &= ~(1 << line); + } +} + +static void omap2_gpio_module_reset(struct omap2_gpio_s *s) +{ + s->config[0] = 0; + s->config[1] = 2; + s->ints[0] = 0; + s->ints[1] = 0; + s->mask[0] = 0; + s->mask[1] = 0; + s->wumask = 0; + s->dir = ~0; + s->level[0] = 0; + s->level[1] = 0; + s->edge[0] = 0; + s->edge[1] = 0; + s->debounce = 0; + s->delay = 0; +} + +static uint32_t omap2_gpio_module_read(void *opaque, hwaddr addr) +{ + struct omap2_gpio_s *s = (struct omap2_gpio_s *) opaque; + + switch (addr) { + case 0x00: /* GPIO_REVISION */ + return s->revision; + + case 0x10: /* GPIO_SYSCONFIG */ + return s->config[0]; + + case 0x14: /* GPIO_SYSSTATUS */ + return 0x01; + + case 0x18: /* GPIO_IRQSTATUS1 */ + return s->ints[0]; + + case 0x1c: /* GPIO_IRQENABLE1 */ + case 0x60: /* GPIO_CLEARIRQENABLE1 */ + case 0x64: /* GPIO_SETIRQENABLE1 */ + return s->mask[0]; + + case 0x20: /* GPIO_WAKEUPENABLE */ + case 0x80: /* GPIO_CLEARWKUENA */ + case 0x84: /* GPIO_SETWKUENA */ + return s->wumask; + + case 0x28: /* GPIO_IRQSTATUS2 */ + return s->ints[1]; + + case 0x2c: /* GPIO_IRQENABLE2 */ + case 0x70: /* GPIO_CLEARIRQENABLE2 */ + case 0x74: /* GPIO_SETIREQNEABLE2 */ + return s->mask[1]; + + case 0x30: /* GPIO_CTRL */ + return s->config[1]; + + case 0x34: /* GPIO_OE */ + return s->dir; + + case 0x38: /* GPIO_DATAIN */ + return s->inputs; + + case 0x3c: /* GPIO_DATAOUT */ + case 0x90: /* GPIO_CLEARDATAOUT */ + case 0x94: /* GPIO_SETDATAOUT */ + return s->outputs; + + case 0x40: /* GPIO_LEVELDETECT0 */ + return s->level[0]; + + case 0x44: /* GPIO_LEVELDETECT1 */ + return s->level[1]; + + case 0x48: /* GPIO_RISINGDETECT */ + return s->edge[0]; + + case 0x4c: /* GPIO_FALLINGDETECT */ + return s->edge[1]; + + case 0x50: /* GPIO_DEBOUNCENABLE */ + return s->debounce; + + case 0x54: /* GPIO_DEBOUNCINGTIME */ + return s->delay; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap2_gpio_module_write(void *opaque, hwaddr addr, + uint32_t value) +{ + struct omap2_gpio_s *s = (struct omap2_gpio_s *) opaque; + uint32_t diff; + int ln; + + switch (addr) { + case 0x00: /* GPIO_REVISION */ + case 0x14: /* GPIO_SYSSTATUS */ + case 0x38: /* GPIO_DATAIN */ + OMAP_RO_REG(addr); + break; + + case 0x10: /* GPIO_SYSCONFIG */ + if (((value >> 3) & 3) == 3) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Illegal IDLEMODE value: 3\n", __func__); + } + if (value & 2) + omap2_gpio_module_reset(s); + s->config[0] = value & 0x1d; + break; + + case 0x18: /* GPIO_IRQSTATUS1 */ + if (s->ints[0] & value) { + s->ints[0] &= ~value; + omap2_gpio_module_level_update(s, 0); + } + break; + + case 0x1c: /* GPIO_IRQENABLE1 */ + s->mask[0] = value; + omap2_gpio_module_int_update(s, 0); + break; + + case 0x20: /* GPIO_WAKEUPENABLE */ + s->wumask = value; + break; + + case 0x28: /* GPIO_IRQSTATUS2 */ + if (s->ints[1] & value) { + s->ints[1] &= ~value; + omap2_gpio_module_level_update(s, 1); + } + break; + + case 0x2c: /* GPIO_IRQENABLE2 */ + s->mask[1] = value; + omap2_gpio_module_int_update(s, 1); + break; + + case 0x30: /* GPIO_CTRL */ + s->config[1] = value & 7; + break; + + case 0x34: /* GPIO_OE */ + diff = s->outputs & (s->dir ^ value); + s->dir = value; + + value = s->outputs & ~s->dir; + while ((ln = ctz32(diff)) != 32) { + diff &= ~(1 << ln); + qemu_set_irq(s->handler[ln], (value >> ln) & 1); + } + + omap2_gpio_module_level_update(s, 0); + omap2_gpio_module_level_update(s, 1); + break; + + case 0x3c: /* GPIO_DATAOUT */ + omap2_gpio_module_out_update(s, s->outputs ^ value); + break; + + case 0x40: /* GPIO_LEVELDETECT0 */ + s->level[0] = value; + omap2_gpio_module_level_update(s, 0); + omap2_gpio_module_level_update(s, 1); + break; + + case 0x44: /* GPIO_LEVELDETECT1 */ + s->level[1] = value; + omap2_gpio_module_level_update(s, 0); + omap2_gpio_module_level_update(s, 1); + break; + + case 0x48: /* GPIO_RISINGDETECT */ + s->edge[0] = value; + break; + + case 0x4c: /* GPIO_FALLINGDETECT */ + s->edge[1] = value; + break; + + case 0x50: /* GPIO_DEBOUNCENABLE */ + s->debounce = value; + break; + + case 0x54: /* GPIO_DEBOUNCINGTIME */ + s->delay = value; + break; + + case 0x60: /* GPIO_CLEARIRQENABLE1 */ + s->mask[0] &= ~value; + omap2_gpio_module_int_update(s, 0); + break; + + case 0x64: /* GPIO_SETIRQENABLE1 */ + s->mask[0] |= value; + omap2_gpio_module_int_update(s, 0); + break; + + case 0x70: /* GPIO_CLEARIRQENABLE2 */ + s->mask[1] &= ~value; + omap2_gpio_module_int_update(s, 1); + break; + + case 0x74: /* GPIO_SETIREQNEABLE2 */ + s->mask[1] |= value; + omap2_gpio_module_int_update(s, 1); + break; + + case 0x80: /* GPIO_CLEARWKUENA */ + s->wumask &= ~value; + break; + + case 0x84: /* GPIO_SETWKUENA */ + s->wumask |= value; + break; + + case 0x90: /* GPIO_CLEARDATAOUT */ + omap2_gpio_module_out_update(s, s->outputs & value); + break; + + case 0x94: /* GPIO_SETDATAOUT */ + omap2_gpio_module_out_update(s, ~s->outputs & value); + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static uint64_t omap2_gpio_module_readp(void *opaque, hwaddr addr, + unsigned size) +{ + return omap2_gpio_module_read(opaque, addr & ~3) >> ((addr & 3) << 3); +} + +static void omap2_gpio_module_writep(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + uint32_t cur = 0; + uint32_t mask = 0xffff; + + if (size == 4) { + omap2_gpio_module_write(opaque, addr, value); + return; + } + + switch (addr & ~3) { + case 0x00: /* GPIO_REVISION */ + case 0x14: /* GPIO_SYSSTATUS */ + case 0x38: /* GPIO_DATAIN */ + OMAP_RO_REG(addr); + break; + + case 0x10: /* GPIO_SYSCONFIG */ + case 0x1c: /* GPIO_IRQENABLE1 */ + case 0x20: /* GPIO_WAKEUPENABLE */ + case 0x2c: /* GPIO_IRQENABLE2 */ + case 0x30: /* GPIO_CTRL */ + case 0x34: /* GPIO_OE */ + case 0x3c: /* GPIO_DATAOUT */ + case 0x40: /* GPIO_LEVELDETECT0 */ + case 0x44: /* GPIO_LEVELDETECT1 */ + case 0x48: /* GPIO_RISINGDETECT */ + case 0x4c: /* GPIO_FALLINGDETECT */ + case 0x50: /* GPIO_DEBOUNCENABLE */ + case 0x54: /* GPIO_DEBOUNCINGTIME */ + cur = omap2_gpio_module_read(opaque, addr & ~3) & + ~(mask << ((addr & 3) << 3)); + + /* Fall through. */ + case 0x18: /* GPIO_IRQSTATUS1 */ + case 0x28: /* GPIO_IRQSTATUS2 */ + case 0x60: /* GPIO_CLEARIRQENABLE1 */ + case 0x64: /* GPIO_SETIRQENABLE1 */ + case 0x70: /* GPIO_CLEARIRQENABLE2 */ + case 0x74: /* GPIO_SETIREQNEABLE2 */ + case 0x80: /* GPIO_CLEARWKUENA */ + case 0x84: /* GPIO_SETWKUENA */ + case 0x90: /* GPIO_CLEARDATAOUT */ + case 0x94: /* GPIO_SETDATAOUT */ + value <<= (addr & 3) << 3; + omap2_gpio_module_write(opaque, addr, cur | value); + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap2_gpio_module_ops = { + .read = omap2_gpio_module_readp, + .write = omap2_gpio_module_writep, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_gpif_reset(DeviceState *dev) +{ + struct omap_gpif_s *s = OMAP1_GPIO(dev); + + omap_gpio_reset(&s->omap1); +} + +static void omap2_gpif_reset(DeviceState *dev) +{ + struct omap2_gpif_s *s = OMAP2_GPIO(dev); + int i; + + for (i = 0; i < s->modulecount; i++) { + omap2_gpio_module_reset(&s->modules[i]); + } + s->autoidle = 0; + s->gpo = 0; +} + +static uint64_t omap2_gpif_top_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap2_gpif_s *s = (struct omap2_gpif_s *) opaque; + + switch (addr) { + case 0x00: /* IPGENERICOCPSPL_REVISION */ + return 0x18; + + case 0x10: /* IPGENERICOCPSPL_SYSCONFIG */ + return s->autoidle; + + case 0x14: /* IPGENERICOCPSPL_SYSSTATUS */ + return 0x01; + + case 0x18: /* IPGENERICOCPSPL_IRQSTATUS */ + return 0x00; + + case 0x40: /* IPGENERICOCPSPL_GPO */ + return s->gpo; + + case 0x50: /* IPGENERICOCPSPL_GPI */ + return 0x00; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap2_gpif_top_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap2_gpif_s *s = (struct omap2_gpif_s *) opaque; + + switch (addr) { + case 0x00: /* IPGENERICOCPSPL_REVISION */ + case 0x14: /* IPGENERICOCPSPL_SYSSTATUS */ + case 0x18: /* IPGENERICOCPSPL_IRQSTATUS */ + case 0x50: /* IPGENERICOCPSPL_GPI */ + OMAP_RO_REG(addr); + break; + + case 0x10: /* IPGENERICOCPSPL_SYSCONFIG */ + if (value & (1 << 1)) /* SOFTRESET */ + omap2_gpif_reset(DEVICE(s)); + s->autoidle = value & 1; + break; + + case 0x40: /* IPGENERICOCPSPL_GPO */ + s->gpo = value & 1; + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap2_gpif_top_ops = { + .read = omap2_gpif_top_read, + .write = omap2_gpif_top_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_gpio_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + struct omap_gpif_s *s = OMAP1_GPIO(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + qdev_init_gpio_in(dev, omap_gpio_set, 16); + qdev_init_gpio_out(dev, s->omap1.handler, 16); + sysbus_init_irq(sbd, &s->omap1.irq); + memory_region_init_io(&s->iomem, obj, &omap_gpio_ops, &s->omap1, + "omap.gpio", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void omap_gpio_realize(DeviceState *dev, Error **errp) +{ + struct omap_gpif_s *s = OMAP1_GPIO(dev); + + if (!s->clk) { + error_setg(errp, "omap-gpio: clk not connected"); + } +} + +static void omap2_gpio_realize(DeviceState *dev, Error **errp) +{ + struct omap2_gpif_s *s = OMAP2_GPIO(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i; + + if (!s->iclk) { + error_setg(errp, "omap2-gpio: iclk not connected"); + return; + } + + s->modulecount = s->mpu_model < omap2430 ? 4 + : s->mpu_model < omap3430 ? 5 + : 6; + + if (s->mpu_model < omap3430) { + memory_region_init_io(&s->iomem, OBJECT(dev), &omap2_gpif_top_ops, s, + "omap2.gpio", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + } + + s->modules = g_new0(struct omap2_gpio_s, s->modulecount); + s->handler = g_new0(qemu_irq, s->modulecount * 32); + qdev_init_gpio_in(dev, omap2_gpio_set, s->modulecount * 32); + qdev_init_gpio_out(dev, s->handler, s->modulecount * 32); + + for (i = 0; i < s->modulecount; i++) { + struct omap2_gpio_s *m = &s->modules[i]; + + if (!s->fclk[i]) { + error_setg(errp, "omap2-gpio: fclk%d not connected", i); + return; + } + + m->revision = (s->mpu_model < omap3430) ? 0x18 : 0x25; + m->handler = &s->handler[i * 32]; + sysbus_init_irq(sbd, &m->irq[0]); /* mpu irq */ + sysbus_init_irq(sbd, &m->irq[1]); /* dsp irq */ + sysbus_init_irq(sbd, &m->wkup); + memory_region_init_io(&m->iomem, OBJECT(dev), &omap2_gpio_module_ops, m, + "omap.gpio-module", 0x1000); + sysbus_init_mmio(sbd, &m->iomem); + } +} + +void omap_gpio_set_clk(omap_gpif *gpio, omap_clk clk) +{ + gpio->clk = clk; +} + +static Property omap_gpio_properties[] = { + DEFINE_PROP_INT32("mpu_model", struct omap_gpif_s, mpu_model, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void omap_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = omap_gpio_realize; + dc->reset = omap_gpif_reset; + device_class_set_props(dc, omap_gpio_properties); + /* Reason: pointer property "clk" */ + dc->user_creatable = false; +} + +static const TypeInfo omap_gpio_info = { + .name = TYPE_OMAP1_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(struct omap_gpif_s), + .instance_init = omap_gpio_init, + .class_init = omap_gpio_class_init, +}; + +void omap2_gpio_set_iclk(omap2_gpif *gpio, omap_clk clk) +{ + gpio->iclk = clk; +} + +void omap2_gpio_set_fclk(omap2_gpif *gpio, uint8_t i, omap_clk clk) +{ + assert(i <= 5); + gpio->fclk[i] = clk; +} + +static Property omap2_gpio_properties[] = { + DEFINE_PROP_INT32("mpu_model", struct omap2_gpif_s, mpu_model, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void omap2_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = omap2_gpio_realize; + dc->reset = omap2_gpif_reset; + device_class_set_props(dc, omap2_gpio_properties); + /* Reason: pointer properties "iclk", "fclk0", ..., "fclk5" */ + dc->user_creatable = false; +} + +static const TypeInfo omap2_gpio_info = { + .name = TYPE_OMAP2_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(struct omap2_gpif_s), + .class_init = omap2_gpio_class_init, +}; + +static void omap_gpio_register_types(void) +{ + type_register_static(&omap_gpio_info); + type_register_static(&omap2_gpio_info); +} + +type_init(omap_gpio_register_types) diff --git a/hw/gpio/pl061.c b/hw/gpio/pl061.c new file mode 100644 index 000000000..899be861c --- /dev/null +++ b/hw/gpio/pl061.c @@ -0,0 +1,603 @@ +/* + * Arm PrimeCell PL061 General Purpose IO with additional + * Luminary Micro Stellaris bits. + * + * Copyright (c) 2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + * + * QEMU interface: + * + sysbus MMIO region 0: the device registers + * + sysbus IRQ: the GPIOINTR interrupt line + * + unnamed GPIO inputs 0..7: inputs to connect to the emulated GPIO lines + * + unnamed GPIO outputs 0..7: the emulated GPIO lines, considered as + * outputs + * + QOM property "pullups": an integer defining whether non-floating lines + * configured as inputs should be pulled up to logical 1 (ie whether in + * real hardware they have a pullup resistor on the line out of the PL061). + * This should be an 8-bit value, where bit 0 is 1 if GPIO line 0 should + * be pulled high, bit 1 configures line 1, and so on. The default is 0xff, + * indicating that all GPIO lines are pulled up to logical 1. + * + QOM property "pulldowns": an integer defining whether non-floating lines + * configured as inputs should be pulled down to logical 0 (ie whether in + * real hardware they have a pulldown resistor on the line out of the PL061). + * This should be an 8-bit value, where bit 0 is 1 if GPIO line 0 should + * be pulled low, bit 1 configures line 1, and so on. The default is 0x0. + * It is an error to set a bit in both "pullups" and "pulldowns". If a bit + * is 0 in both, then the line is considered to be floating, and it will + * not have qemu_set_irq() called on it when it is configured as an input. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" +#include "trace.h" + +static const uint8_t pl061_id[12] = + { 0x00, 0x00, 0x00, 0x00, 0x61, 0x10, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; +static const uint8_t pl061_id_luminary[12] = + { 0x00, 0x00, 0x00, 0x00, 0x61, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1 }; + +#define TYPE_PL061 "pl061" +OBJECT_DECLARE_SIMPLE_TYPE(PL061State, PL061) + +#define N_GPIOS 8 + +struct PL061State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t locked; + uint32_t data; + uint32_t old_out_data; + uint32_t old_in_data; + uint32_t dir; + uint32_t isense; + uint32_t ibe; + uint32_t iev; + uint32_t im; + uint32_t istate; + uint32_t afsel; + uint32_t dr2r; + uint32_t dr4r; + uint32_t dr8r; + uint32_t odr; + uint32_t pur; + uint32_t pdr; + uint32_t slr; + uint32_t den; + uint32_t cr; + uint32_t amsel; + qemu_irq irq; + qemu_irq out[N_GPIOS]; + const unsigned char *id; + /* Properties, for non-Luminary PL061 */ + uint32_t pullups; + uint32_t pulldowns; +}; + +static const VMStateDescription vmstate_pl061 = { + .name = "pl061", + .version_id = 4, + .minimum_version_id = 4, + .fields = (VMStateField[]) { + VMSTATE_UINT32(locked, PL061State), + VMSTATE_UINT32(data, PL061State), + VMSTATE_UINT32(old_out_data, PL061State), + VMSTATE_UINT32(old_in_data, PL061State), + VMSTATE_UINT32(dir, PL061State), + VMSTATE_UINT32(isense, PL061State), + VMSTATE_UINT32(ibe, PL061State), + VMSTATE_UINT32(iev, PL061State), + VMSTATE_UINT32(im, PL061State), + VMSTATE_UINT32(istate, PL061State), + VMSTATE_UINT32(afsel, PL061State), + VMSTATE_UINT32(dr2r, PL061State), + VMSTATE_UINT32(dr4r, PL061State), + VMSTATE_UINT32(dr8r, PL061State), + VMSTATE_UINT32(odr, PL061State), + VMSTATE_UINT32(pur, PL061State), + VMSTATE_UINT32(pdr, PL061State), + VMSTATE_UINT32(slr, PL061State), + VMSTATE_UINT32(den, PL061State), + VMSTATE_UINT32(cr, PL061State), + VMSTATE_UINT32_V(amsel, PL061State, 2), + VMSTATE_END_OF_LIST() + } +}; + +static uint8_t pl061_floating(PL061State *s) +{ + /* + * Return mask of bits which correspond to pins configured as inputs + * and which are floating (neither pulled up to 1 nor down to 0). + */ + uint8_t floating; + + if (s->id == pl061_id_luminary) { + /* + * If both PUR and PDR bits are clear, there is neither a pullup + * nor a pulldown in place, and the output truly floats. + */ + floating = ~(s->pur | s->pdr); + } else { + floating = ~(s->pullups | s->pulldowns); + } + return floating & ~s->dir; +} + +static uint8_t pl061_pullups(PL061State *s) +{ + /* + * Return mask of bits which correspond to pins configured as inputs + * and which are pulled up to 1. + */ + uint8_t pullups; + + if (s->id == pl061_id_luminary) { + /* + * The Luminary variant of the PL061 has an extra registers which + * the guest can use to configure whether lines should be pullup + * or pulldown. + */ + pullups = s->pur; + } else { + pullups = s->pullups; + } + return pullups & ~s->dir; +} + +static void pl061_update(PL061State *s) +{ + uint8_t changed; + uint8_t mask; + uint8_t out; + int i; + uint8_t pullups = pl061_pullups(s); + uint8_t floating = pl061_floating(s); + + trace_pl061_update(DEVICE(s)->canonical_path, s->dir, s->data, + pullups, floating); + + /* + * Pins configured as output are driven from the data register; + * otherwise if they're pulled up they're 1, and if they're floating + * then we give them the same value they had previously, so we don't + * report any change to the other end. + */ + out = (s->data & s->dir) | pullups | (s->old_out_data & floating); + changed = s->old_out_data ^ out; + if (changed) { + s->old_out_data = out; + for (i = 0; i < N_GPIOS; i++) { + mask = 1 << i; + if (changed & mask) { + int level = (out & mask) != 0; + trace_pl061_set_output(DEVICE(s)->canonical_path, i, level); + qemu_set_irq(s->out[i], level); + } + } + } + + /* Inputs */ + changed = (s->old_in_data ^ s->data) & ~s->dir; + if (changed) { + s->old_in_data = s->data; + for (i = 0; i < N_GPIOS; i++) { + mask = 1 << i; + if (changed & mask) { + trace_pl061_input_change(DEVICE(s)->canonical_path, i, + (s->data & mask) != 0); + + if (!(s->isense & mask)) { + /* Edge interrupt */ + if (s->ibe & mask) { + /* Any edge triggers the interrupt */ + s->istate |= mask; + } else { + /* Edge is selected by IEV */ + s->istate |= ~(s->data ^ s->iev) & mask; + } + } + } + } + } + + /* Level interrupt */ + s->istate |= ~(s->data ^ s->iev) & s->isense; + + trace_pl061_update_istate(DEVICE(s)->canonical_path, + s->istate, s->im, (s->istate & s->im) != 0); + + qemu_set_irq(s->irq, (s->istate & s->im) != 0); +} + +static uint64_t pl061_read(void *opaque, hwaddr offset, + unsigned size) +{ + PL061State *s = (PL061State *)opaque; + uint64_t r = 0; + + switch (offset) { + case 0x0 ... 0x3ff: /* Data */ + r = s->data & (offset >> 2); + break; + case 0x400: /* Direction */ + r = s->dir; + break; + case 0x404: /* Interrupt sense */ + r = s->isense; + break; + case 0x408: /* Interrupt both edges */ + r = s->ibe; + break; + case 0x40c: /* Interrupt event */ + r = s->iev; + break; + case 0x410: /* Interrupt mask */ + r = s->im; + break; + case 0x414: /* Raw interrupt status */ + r = s->istate; + break; + case 0x418: /* Masked interrupt status */ + r = s->istate & s->im; + break; + case 0x420: /* Alternate function select */ + r = s->afsel; + break; + case 0x500: /* 2mA drive */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->dr2r; + break; + case 0x504: /* 4mA drive */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->dr4r; + break; + case 0x508: /* 8mA drive */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->dr8r; + break; + case 0x50c: /* Open drain */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->odr; + break; + case 0x510: /* Pull-up */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->pur; + break; + case 0x514: /* Pull-down */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->pdr; + break; + case 0x518: /* Slew rate control */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->slr; + break; + case 0x51c: /* Digital enable */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->den; + break; + case 0x520: /* Lock */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->locked; + break; + case 0x524: /* Commit */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->cr; + break; + case 0x528: /* Analog mode select */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + r = s->amsel; + break; + case 0xfd0 ... 0xfff: /* ID registers */ + r = s->id[(offset - 0xfd0) >> 2]; + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "pl061_read: Bad offset %x\n", (int)offset); + break; + } + + trace_pl061_read(DEVICE(s)->canonical_path, offset, r); + return r; +} + +static void pl061_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PL061State *s = (PL061State *)opaque; + uint8_t mask; + + trace_pl061_write(DEVICE(s)->canonical_path, offset, value); + + switch (offset) { + case 0 ... 0x3ff: + mask = (offset >> 2) & s->dir; + s->data = (s->data & ~mask) | (value & mask); + pl061_update(s); + return; + case 0x400: /* Direction */ + s->dir = value & 0xff; + break; + case 0x404: /* Interrupt sense */ + s->isense = value & 0xff; + break; + case 0x408: /* Interrupt both edges */ + s->ibe = value & 0xff; + break; + case 0x40c: /* Interrupt event */ + s->iev = value & 0xff; + break; + case 0x410: /* Interrupt mask */ + s->im = value & 0xff; + break; + case 0x41c: /* Interrupt clear */ + s->istate &= ~value; + break; + case 0x420: /* Alternate function select */ + mask = s->cr; + s->afsel = (s->afsel & ~mask) | (value & mask); + break; + case 0x500: /* 2mA drive */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + s->dr2r = value & 0xff; + break; + case 0x504: /* 4mA drive */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + s->dr4r = value & 0xff; + break; + case 0x508: /* 8mA drive */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + s->dr8r = value & 0xff; + break; + case 0x50c: /* Open drain */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + s->odr = value & 0xff; + break; + case 0x510: /* Pull-up */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + s->pur = value & 0xff; + break; + case 0x514: /* Pull-down */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + s->pdr = value & 0xff; + break; + case 0x518: /* Slew rate control */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + s->slr = value & 0xff; + break; + case 0x51c: /* Digital enable */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + s->den = value & 0xff; + break; + case 0x520: /* Lock */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + s->locked = (value != 0xacce551); + break; + case 0x524: /* Commit */ + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + if (!s->locked) + s->cr = value & 0xff; + break; + case 0x528: + if (s->id != pl061_id_luminary) { + goto bad_offset; + } + s->amsel = value & 0xff; + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "pl061_write: Bad offset %x\n", (int)offset); + return; + } + pl061_update(s); + return; +} + +static void pl061_enter_reset(Object *obj, ResetType type) +{ + PL061State *s = PL061(obj); + + trace_pl061_reset(DEVICE(s)->canonical_path); + + /* reset values from PL061 TRM, Stellaris LM3S5P31 & LM3S8962 Data Sheet */ + + /* + * FIXME: For the LM3S6965, not all of the PL061 instances have the + * same reset values for GPIOPUR, GPIOAFSEL and GPIODEN, so in theory + * we should allow the board to configure these via properties. + * In practice, we don't wire anything up to the affected GPIO lines + * (PB7, PC0, PC1, PC2, PC3 -- they're used for JTAG), so we can + * get away with this inaccuracy. + */ + s->data = 0; + s->old_in_data = 0; + s->dir = 0; + s->isense = 0; + s->ibe = 0; + s->iev = 0; + s->im = 0; + s->istate = 0; + s->afsel = 0; + s->dr2r = 0xff; + s->dr4r = 0; + s->dr8r = 0; + s->odr = 0; + s->pur = 0; + s->pdr = 0; + s->slr = 0; + s->den = 0; + s->locked = 1; + s->cr = 0xff; + s->amsel = 0; +} + +static void pl061_hold_reset(Object *obj) +{ + PL061State *s = PL061(obj); + int i, level; + uint8_t floating = pl061_floating(s); + uint8_t pullups = pl061_pullups(s); + + for (i = 0; i < N_GPIOS; i++) { + if (extract32(floating, i, 1)) { + continue; + } + level = extract32(pullups, i, 1); + trace_pl061_set_output(DEVICE(s)->canonical_path, i, level); + qemu_set_irq(s->out[i], level); + } + s->old_out_data = pullups; +} + +static void pl061_set_irq(void * opaque, int irq, int level) +{ + PL061State *s = (PL061State *)opaque; + uint8_t mask; + + mask = 1 << irq; + if ((s->dir & mask) == 0) { + s->data &= ~mask; + if (level) + s->data |= mask; + pl061_update(s); + } +} + +static const MemoryRegionOps pl061_ops = { + .read = pl061_read, + .write = pl061_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pl061_luminary_init(Object *obj) +{ + PL061State *s = PL061(obj); + + s->id = pl061_id_luminary; +} + +static void pl061_init(Object *obj) +{ + PL061State *s = PL061(obj); + DeviceState *dev = DEVICE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + s->id = pl061_id; + + memory_region_init_io(&s->iomem, obj, &pl061_ops, s, "pl061", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + qdev_init_gpio_in(dev, pl061_set_irq, N_GPIOS); + qdev_init_gpio_out(dev, s->out, N_GPIOS); +} + +static void pl061_realize(DeviceState *dev, Error **errp) +{ + PL061State *s = PL061(dev); + + if (s->pullups > 0xff) { + error_setg(errp, "pullups property must be between 0 and 0xff"); + return; + } + if (s->pulldowns > 0xff) { + error_setg(errp, "pulldowns property must be between 0 and 0xff"); + return; + } + if (s->pullups & s->pulldowns) { + error_setg(errp, "no bit may be set both in pullups and pulldowns"); + return; + } +} + +static Property pl061_props[] = { + DEFINE_PROP_UINT32("pullups", PL061State, pullups, 0xff), + DEFINE_PROP_UINT32("pulldowns", PL061State, pulldowns, 0x0), + DEFINE_PROP_END_OF_LIST() +}; + +static void pl061_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->vmsd = &vmstate_pl061; + dc->realize = pl061_realize; + device_class_set_props(dc, pl061_props); + rc->phases.enter = pl061_enter_reset; + rc->phases.hold = pl061_hold_reset; +} + +static const TypeInfo pl061_info = { + .name = TYPE_PL061, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL061State), + .instance_init = pl061_init, + .class_init = pl061_class_init, +}; + +static const TypeInfo pl061_luminary_info = { + .name = "pl061_luminary", + .parent = TYPE_PL061, + .instance_init = pl061_luminary_init, +}; + +static void pl061_register_types(void) +{ + type_register_static(&pl061_info); + type_register_static(&pl061_luminary_info); +} + +type_init(pl061_register_types) diff --git a/hw/gpio/sifive_gpio.c b/hw/gpio/sifive_gpio.c new file mode 100644 index 000000000..78bf29e99 --- /dev/null +++ b/hw/gpio/sifive_gpio.c @@ -0,0 +1,397 @@ +/* + * SiFive System-on-Chip general purpose input/output register definition + * + * Copyright 2019 AdaCore + * + * Base on nrf51_gpio.c: + * + * Copyright 2018 Steffen Görtz + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/gpio/sifive_gpio.h" +#include "migration/vmstate.h" +#include "trace.h" + +static void update_output_irq(SIFIVEGPIOState *s) +{ + uint32_t pending; + uint32_t pin; + + pending = s->high_ip & s->high_ie; + pending |= s->low_ip & s->low_ie; + pending |= s->rise_ip & s->rise_ie; + pending |= s->fall_ip & s->fall_ie; + + for (int i = 0; i < s->ngpio; i++) { + pin = 1 << i; + qemu_set_irq(s->irq[i], (pending & pin) != 0); + trace_sifive_gpio_update_output_irq(i, (pending & pin) != 0); + } +} + +static void update_state(SIFIVEGPIOState *s) +{ + size_t i; + bool prev_ival, in, in_mask, port, out_xor, pull, output_en, input_en, + rise_ip, fall_ip, low_ip, high_ip, oval, actual_value, ival; + + for (i = 0; i < s->ngpio; i++) { + + prev_ival = extract32(s->value, i, 1); + in = extract32(s->in, i, 1); + in_mask = extract32(s->in_mask, i, 1); + port = extract32(s->port, i, 1); + out_xor = extract32(s->out_xor, i, 1); + pull = extract32(s->pue, i, 1); + output_en = extract32(s->output_en, i, 1); + input_en = extract32(s->input_en, i, 1); + rise_ip = extract32(s->rise_ip, i, 1); + fall_ip = extract32(s->fall_ip, i, 1); + low_ip = extract32(s->low_ip, i, 1); + high_ip = extract32(s->high_ip, i, 1); + + /* Output value (IOF not supported) */ + oval = output_en && (port ^ out_xor); + + /* Pin both driven externally and internally */ + if (output_en && in_mask) { + qemu_log_mask(LOG_GUEST_ERROR, "GPIO pin %zu short circuited\n", i); + } + + if (in_mask) { + /* The pin is driven by external device */ + actual_value = in; + } else if (output_en) { + /* The pin is driven by internal circuit */ + actual_value = oval; + } else { + /* Floating? Apply pull-up resistor */ + actual_value = pull; + } + + if (output_en) { + qemu_set_irq(s->output[i], actual_value); + } + + /* Input value */ + ival = input_en && actual_value; + + /* Interrupts */ + high_ip = high_ip || ival; + s->high_ip = deposit32(s->high_ip, i, 1, high_ip); + + low_ip = low_ip || !ival; + s->low_ip = deposit32(s->low_ip, i, 1, low_ip); + + rise_ip = rise_ip || (ival && !prev_ival); + s->rise_ip = deposit32(s->rise_ip, i, 1, rise_ip); + + fall_ip = fall_ip || (!ival && prev_ival); + s->fall_ip = deposit32(s->fall_ip, i, 1, fall_ip); + + /* Update value */ + s->value = deposit32(s->value, i, 1, ival); + } + update_output_irq(s); +} + +static uint64_t sifive_gpio_read(void *opaque, hwaddr offset, unsigned int size) +{ + SIFIVEGPIOState *s = SIFIVE_GPIO(opaque); + uint64_t r = 0; + + switch (offset) { + case SIFIVE_GPIO_REG_VALUE: + r = s->value; + break; + + case SIFIVE_GPIO_REG_INPUT_EN: + r = s->input_en; + break; + + case SIFIVE_GPIO_REG_OUTPUT_EN: + r = s->output_en; + break; + + case SIFIVE_GPIO_REG_PORT: + r = s->port; + break; + + case SIFIVE_GPIO_REG_PUE: + r = s->pue; + break; + + case SIFIVE_GPIO_REG_DS: + r = s->ds; + break; + + case SIFIVE_GPIO_REG_RISE_IE: + r = s->rise_ie; + break; + + case SIFIVE_GPIO_REG_RISE_IP: + r = s->rise_ip; + break; + + case SIFIVE_GPIO_REG_FALL_IE: + r = s->fall_ie; + break; + + case SIFIVE_GPIO_REG_FALL_IP: + r = s->fall_ip; + break; + + case SIFIVE_GPIO_REG_HIGH_IE: + r = s->high_ie; + break; + + case SIFIVE_GPIO_REG_HIGH_IP: + r = s->high_ip; + break; + + case SIFIVE_GPIO_REG_LOW_IE: + r = s->low_ie; + break; + + case SIFIVE_GPIO_REG_LOW_IP: + r = s->low_ip; + break; + + case SIFIVE_GPIO_REG_IOF_EN: + r = s->iof_en; + break; + + case SIFIVE_GPIO_REG_IOF_SEL: + r = s->iof_sel; + break; + + case SIFIVE_GPIO_REG_OUT_XOR: + r = s->out_xor; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad read offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + + trace_sifive_gpio_read(offset, r); + + return r; +} + +static void sifive_gpio_write(void *opaque, hwaddr offset, + uint64_t value, unsigned int size) +{ + SIFIVEGPIOState *s = SIFIVE_GPIO(opaque); + + trace_sifive_gpio_write(offset, value); + + switch (offset) { + + case SIFIVE_GPIO_REG_INPUT_EN: + s->input_en = value; + break; + + case SIFIVE_GPIO_REG_OUTPUT_EN: + s->output_en = value; + break; + + case SIFIVE_GPIO_REG_PORT: + s->port = value; + break; + + case SIFIVE_GPIO_REG_PUE: + s->pue = value; + break; + + case SIFIVE_GPIO_REG_DS: + s->ds = value; + break; + + case SIFIVE_GPIO_REG_RISE_IE: + s->rise_ie = value; + break; + + case SIFIVE_GPIO_REG_RISE_IP: + /* Write 1 to clear */ + s->rise_ip &= ~value; + break; + + case SIFIVE_GPIO_REG_FALL_IE: + s->fall_ie = value; + break; + + case SIFIVE_GPIO_REG_FALL_IP: + /* Write 1 to clear */ + s->fall_ip &= ~value; + break; + + case SIFIVE_GPIO_REG_HIGH_IE: + s->high_ie = value; + break; + + case SIFIVE_GPIO_REG_HIGH_IP: + /* Write 1 to clear */ + s->high_ip &= ~value; + break; + + case SIFIVE_GPIO_REG_LOW_IE: + s->low_ie = value; + break; + + case SIFIVE_GPIO_REG_LOW_IP: + /* Write 1 to clear */ + s->low_ip &= ~value; + break; + + case SIFIVE_GPIO_REG_IOF_EN: + s->iof_en = value; + break; + + case SIFIVE_GPIO_REG_IOF_SEL: + s->iof_sel = value; + break; + + case SIFIVE_GPIO_REG_OUT_XOR: + s->out_xor = value; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad write offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + + update_state(s); +} + +static const MemoryRegionOps gpio_ops = { + .read = sifive_gpio_read, + .write = sifive_gpio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static void sifive_gpio_set(void *opaque, int line, int value) +{ + SIFIVEGPIOState *s = SIFIVE_GPIO(opaque); + + trace_sifive_gpio_set(line, value); + + assert(line >= 0 && line < SIFIVE_GPIO_PINS); + + s->in_mask = deposit32(s->in_mask, line, 1, value >= 0); + if (value >= 0) { + s->in = deposit32(s->in, line, 1, value != 0); + } + + update_state(s); +} + +static void sifive_gpio_reset(DeviceState *dev) +{ + SIFIVEGPIOState *s = SIFIVE_GPIO(dev); + + s->value = 0; + s->input_en = 0; + s->output_en = 0; + s->port = 0; + s->pue = 0; + s->ds = 0; + s->rise_ie = 0; + s->rise_ip = 0; + s->fall_ie = 0; + s->fall_ip = 0; + s->high_ie = 0; + s->high_ip = 0; + s->low_ie = 0; + s->low_ip = 0; + s->iof_en = 0; + s->iof_sel = 0; + s->out_xor = 0; + s->in = 0; + s->in_mask = 0; +} + +static const VMStateDescription vmstate_sifive_gpio = { + .name = TYPE_SIFIVE_GPIO, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(value, SIFIVEGPIOState), + VMSTATE_UINT32(input_en, SIFIVEGPIOState), + VMSTATE_UINT32(output_en, SIFIVEGPIOState), + VMSTATE_UINT32(port, SIFIVEGPIOState), + VMSTATE_UINT32(pue, SIFIVEGPIOState), + VMSTATE_UINT32(rise_ie, SIFIVEGPIOState), + VMSTATE_UINT32(rise_ip, SIFIVEGPIOState), + VMSTATE_UINT32(fall_ie, SIFIVEGPIOState), + VMSTATE_UINT32(fall_ip, SIFIVEGPIOState), + VMSTATE_UINT32(high_ie, SIFIVEGPIOState), + VMSTATE_UINT32(high_ip, SIFIVEGPIOState), + VMSTATE_UINT32(low_ie, SIFIVEGPIOState), + VMSTATE_UINT32(low_ip, SIFIVEGPIOState), + VMSTATE_UINT32(iof_en, SIFIVEGPIOState), + VMSTATE_UINT32(iof_sel, SIFIVEGPIOState), + VMSTATE_UINT32(out_xor, SIFIVEGPIOState), + VMSTATE_UINT32(in, SIFIVEGPIOState), + VMSTATE_UINT32(in_mask, SIFIVEGPIOState), + VMSTATE_END_OF_LIST() + } +}; + +static Property sifive_gpio_properties[] = { + DEFINE_PROP_UINT32("ngpio", SIFIVEGPIOState, ngpio, SIFIVE_GPIO_PINS), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sifive_gpio_realize(DeviceState *dev, Error **errp) +{ + SIFIVEGPIOState *s = SIFIVE_GPIO(dev); + + memory_region_init_io(&s->mmio, OBJECT(dev), &gpio_ops, s, + TYPE_SIFIVE_GPIO, SIFIVE_GPIO_SIZE); + + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); + + for (int i = 0; i < s->ngpio; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); + } + + qdev_init_gpio_in(DEVICE(s), sifive_gpio_set, s->ngpio); + qdev_init_gpio_out(DEVICE(s), s->output, s->ngpio); +} + +static void sifive_gpio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, sifive_gpio_properties); + dc->vmsd = &vmstate_sifive_gpio; + dc->realize = sifive_gpio_realize; + dc->reset = sifive_gpio_reset; + dc->desc = "SiFive GPIO"; +} + +static const TypeInfo sifive_gpio_info = { + .name = TYPE_SIFIVE_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SIFIVEGPIOState), + .class_init = sifive_gpio_class_init +}; + +static void sifive_gpio_register_types(void) +{ + type_register_static(&sifive_gpio_info); +} + +type_init(sifive_gpio_register_types) diff --git a/hw/gpio/trace-events b/hw/gpio/trace-events new file mode 100644 index 000000000..1dab99c56 --- /dev/null +++ b/hw/gpio/trace-events @@ -0,0 +1,29 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# npcm7xx_gpio.c +npcm7xx_gpio_read(const char *id, uint64_t offset, uint64_t value) " %s offset: 0x%04" PRIx64 " value 0x%08" PRIx64 +npcm7xx_gpio_write(const char *id, uint64_t offset, uint64_t value) "%s offset: 0x%04" PRIx64 " value 0x%08" PRIx64 +npcm7xx_gpio_set_input(const char *id, int32_t line, int32_t level) "%s line: %" PRIi32 " level: %" PRIi32 +npcm7xx_gpio_set_output(const char *id, int32_t line, int32_t level) "%s line: %" PRIi32 " level: %" PRIi32 +npcm7xx_gpio_update_events(const char *id, uint32_t evst, uint32_t even) "%s evst: 0x%08" PRIx32 " even: 0x%08" PRIx32 + +# nrf51_gpio.c +nrf51_gpio_read(uint64_t offset, uint64_t r) "offset 0x%" PRIx64 " value 0x%" PRIx64 +nrf51_gpio_write(uint64_t offset, uint64_t value) "offset 0x%" PRIx64 " value 0x%" PRIx64 +nrf51_gpio_set(int64_t line, int64_t value) "line %" PRIi64 " value %" PRIi64 +nrf51_gpio_update_output_irq(int64_t line, int64_t value) "line %" PRIi64 " value %" PRIi64 + +# pl061.c +pl061_update(const char *id, uint32_t dir, uint32_t data, uint32_t pullups, uint32_t floating) "%s GPIODIR 0x%x GPIODATA 0x%x pullups 0x%x floating 0x%x" +pl061_set_output(const char *id, int gpio, int level) "%s setting output %d to %d" +pl061_input_change(const char *id, int gpio, int level) "%s input %d changed to %d" +pl061_update_istate(const char *id, uint32_t istate, uint32_t im, int level) "%s GPIORIS 0x%x GPIOIE 0x%x interrupt level %d" +pl061_read(const char *id, uint64_t offset, uint64_t r) "%s offset 0x%" PRIx64 " value 0x%" PRIx64 +pl061_write(const char *id, uint64_t offset, uint64_t value) "%s offset 0x%" PRIx64 " value 0x%" PRIx64 +pl061_reset(const char *id) "%s reset" + +# sifive_gpio.c +sifive_gpio_read(uint64_t offset, uint64_t r) "offset 0x%" PRIx64 " value 0x%" PRIx64 +sifive_gpio_write(uint64_t offset, uint64_t value) "offset 0x%" PRIx64 " value 0x%" PRIx64 +sifive_gpio_set(int64_t line, int64_t value) "line %" PRIi64 " value %" PRIi64 +sifive_gpio_update_output_irq(int64_t line, int64_t value) "line %" PRIi64 " value %" PRIi64 diff --git a/hw/gpio/trace.h b/hw/gpio/trace.h new file mode 100644 index 000000000..8b139071b --- /dev/null +++ b/hw/gpio/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_gpio.h" diff --git a/hw/gpio/zaurus.c b/hw/gpio/zaurus.c new file mode 100644 index 000000000..7cf52a504 --- /dev/null +++ b/hw/gpio/zaurus.c @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2006-2008 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/arm/sharpsl.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "qom/object.h" + +/* SCOOP devices */ + +#define TYPE_SCOOP "scoop" +OBJECT_DECLARE_SIMPLE_TYPE(ScoopInfo, SCOOP) + +struct ScoopInfo { + SysBusDevice parent_obj; + + qemu_irq handler[16]; + MemoryRegion iomem; + uint16_t status; + uint16_t power; + uint32_t gpio_level; + uint32_t gpio_dir; + uint32_t prev_level; + + uint16_t mcr; + uint16_t cdr; + uint16_t ccr; + uint16_t irr; + uint16_t imr; + uint16_t isr; +}; + +#define SCOOP_MCR 0x00 +#define SCOOP_CDR 0x04 +#define SCOOP_CSR 0x08 +#define SCOOP_CPR 0x0c +#define SCOOP_CCR 0x10 +#define SCOOP_IRR_IRM 0x14 +#define SCOOP_IMR 0x18 +#define SCOOP_ISR 0x1c +#define SCOOP_GPCR 0x20 +#define SCOOP_GPWR 0x24 +#define SCOOP_GPRR 0x28 + +static inline void scoop_gpio_handler_update(ScoopInfo *s) { + uint32_t level, diff; + int bit; + level = s->gpio_level & s->gpio_dir; + + for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { + bit = ctz32(diff); + qemu_set_irq(s->handler[bit], (level >> bit) & 1); + } + + s->prev_level = level; +} + +static uint64_t scoop_read(void *opaque, hwaddr addr, + unsigned size) +{ + ScoopInfo *s = (ScoopInfo *) opaque; + + switch (addr & 0x3f) { + case SCOOP_MCR: + return s->mcr; + case SCOOP_CDR: + return s->cdr; + case SCOOP_CSR: + return s->status; + case SCOOP_CPR: + return s->power; + case SCOOP_CCR: + return s->ccr; + case SCOOP_IRR_IRM: + return s->irr; + case SCOOP_IMR: + return s->imr; + case SCOOP_ISR: + return s->isr; + case SCOOP_GPCR: + return s->gpio_dir; + case SCOOP_GPWR: + case SCOOP_GPRR: + return s->gpio_level; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "scoop_read: bad register offset 0x%02" HWADDR_PRIx "\n", + addr); + } + + return 0; +} + +static void scoop_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + ScoopInfo *s = (ScoopInfo *) opaque; + value &= 0xffff; + + switch (addr & 0x3f) { + case SCOOP_MCR: + s->mcr = value; + break; + case SCOOP_CDR: + s->cdr = value; + break; + case SCOOP_CPR: + s->power = value; + if (value & 0x80) + s->power |= 0x8040; + break; + case SCOOP_CCR: + s->ccr = value; + break; + case SCOOP_IRR_IRM: + s->irr = value; + break; + case SCOOP_IMR: + s->imr = value; + break; + case SCOOP_ISR: + s->isr = value; + break; + case SCOOP_GPCR: + s->gpio_dir = value; + scoop_gpio_handler_update(s); + break; + case SCOOP_GPWR: + case SCOOP_GPRR: /* GPRR is probably R/O in real HW */ + s->gpio_level = value & s->gpio_dir; + scoop_gpio_handler_update(s); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "scoop_write: bad register offset 0x%02" HWADDR_PRIx "\n", + addr); + } +} + +static const MemoryRegionOps scoop_ops = { + .read = scoop_read, + .write = scoop_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void scoop_gpio_set(void *opaque, int line, int level) +{ + ScoopInfo *s = (ScoopInfo *) opaque; + + if (level) + s->gpio_level |= (1 << line); + else + s->gpio_level &= ~(1 << line); +} + +static void scoop_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + ScoopInfo *s = SCOOP(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + s->status = 0x02; + qdev_init_gpio_out(dev, s->handler, 16); + qdev_init_gpio_in(dev, scoop_gpio_set, 16); + memory_region_init_io(&s->iomem, obj, &scoop_ops, s, "scoop", 0x1000); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static int scoop_post_load(void *opaque, int version_id) +{ + ScoopInfo *s = (ScoopInfo *) opaque; + int i; + uint32_t level; + + level = s->gpio_level & s->gpio_dir; + + for (i = 0; i < 16; i++) { + qemu_set_irq(s->handler[i], (level >> i) & 1); + } + + s->prev_level = level; + + return 0; +} + +static bool is_version_0 (void *opaque, int version_id) +{ + return version_id == 0; +} + +static bool vmstate_scoop_validate(void *opaque, int version_id) +{ + ScoopInfo *s = opaque; + + return !(s->prev_level & 0xffff0000) && + !(s->gpio_level & 0xffff0000) && + !(s->gpio_dir & 0xffff0000); +} + +static const VMStateDescription vmstate_scoop_regs = { + .name = "scoop", + .version_id = 1, + .minimum_version_id = 0, + .post_load = scoop_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT16(status, ScoopInfo), + VMSTATE_UINT16(power, ScoopInfo), + VMSTATE_UINT32(gpio_level, ScoopInfo), + VMSTATE_UINT32(gpio_dir, ScoopInfo), + VMSTATE_UINT32(prev_level, ScoopInfo), + VMSTATE_VALIDATE("irq levels are 16 bit", vmstate_scoop_validate), + VMSTATE_UINT16(mcr, ScoopInfo), + VMSTATE_UINT16(cdr, ScoopInfo), + VMSTATE_UINT16(ccr, ScoopInfo), + VMSTATE_UINT16(irr, ScoopInfo), + VMSTATE_UINT16(imr, ScoopInfo), + VMSTATE_UINT16(isr, ScoopInfo), + VMSTATE_UNUSED_TEST(is_version_0, 2), + VMSTATE_END_OF_LIST(), + }, +}; + +static void scoop_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Scoop2 Sharp custom ASIC"; + dc->vmsd = &vmstate_scoop_regs; +} + +static const TypeInfo scoop_sysbus_info = { + .name = TYPE_SCOOP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ScoopInfo), + .instance_init = scoop_init, + .class_init = scoop_sysbus_class_init, +}; + +static void scoop_register_types(void) +{ + type_register_static(&scoop_sysbus_info); +} + +type_init(scoop_register_types) + +/* Write the bootloader parameters memory area. */ + +#define MAGIC_CHG(a, b, c, d) ((d << 24) | (c << 16) | (b << 8) | a) + +static struct QEMU_PACKED sl_param_info { + uint32_t comadj_keyword; + int32_t comadj; + + uint32_t uuid_keyword; + char uuid[16]; + + uint32_t touch_keyword; + int32_t touch_xp; + int32_t touch_yp; + int32_t touch_xd; + int32_t touch_yd; + + uint32_t adadj_keyword; + int32_t adadj; + + uint32_t phad_keyword; + int32_t phadadj; +} zaurus_bootparam = { + .comadj_keyword = MAGIC_CHG('C', 'M', 'A', 'D'), + .comadj = 125, + .uuid_keyword = MAGIC_CHG('U', 'U', 'I', 'D'), + .uuid = { -1 }, + .touch_keyword = MAGIC_CHG('T', 'U', 'C', 'H'), + .touch_xp = -1, + .adadj_keyword = MAGIC_CHG('B', 'V', 'A', 'D'), + .adadj = -1, + .phad_keyword = MAGIC_CHG('P', 'H', 'A', 'D'), + .phadadj = 0x01, +}; + +void sl_bootparam_write(hwaddr ptr) +{ + cpu_physical_memory_write(ptr, &zaurus_bootparam, + sizeof(struct sl_param_info)); +} diff --git a/hw/hppa/Kconfig b/hw/hppa/Kconfig new file mode 100644 index 000000000..22948db02 --- /dev/null +++ b/hw/hppa/Kconfig @@ -0,0 +1,16 @@ +config DINO + bool + imply PCI_DEVICES + imply E1000_PCI + imply VIRTIO_VGA + select PCI + select SERIAL + select ISA_BUS + select I8259 + select IDE_CMD646 + select MC146818RTC + select LSI_SCSI_PCI + select LASI_82596 + select LASIPS2 + select PARALLEL + select ARTIST diff --git a/hw/hppa/dino.c b/hw/hppa/dino.c new file mode 100644 index 000000000..eab96dd84 --- /dev/null +++ b/hw/hppa/dino.c @@ -0,0 +1,608 @@ +/* + * HP-PARISC Dino PCI chipset emulation, as in B160L and similiar machines + * + * (C) 2017-2019 by Helge Deller + * + * This work is licensed under the GNU GPL license version 2 or later. + * + * Documentation available at: + * https://parisc.wiki.kernel.org/images-parisc/9/91/Dino_ers.pdf + * https://parisc.wiki.kernel.org/images-parisc/7/70/Dino_3_1_Errata.pdf + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "migration/vmstate.h" +#include "hppa_sys.h" +#include "trace.h" +#include "qom/object.h" + + +#define TYPE_DINO_PCI_HOST_BRIDGE "dino-pcihost" + +#define DINO_IAR0 0x004 +#define DINO_IODC 0x008 +#define DINO_IRR0 0x00C /* RO */ +#define DINO_IAR1 0x010 +#define DINO_IRR1 0x014 /* RO */ +#define DINO_IMR 0x018 +#define DINO_IPR 0x01C +#define DINO_TOC_ADDR 0x020 +#define DINO_ICR 0x024 +#define DINO_ILR 0x028 /* RO */ +#define DINO_IO_COMMAND 0x030 /* WO */ +#define DINO_IO_STATUS 0x034 /* RO */ +#define DINO_IO_CONTROL 0x038 +#define DINO_IO_GSC_ERR_RESP 0x040 /* RO */ +#define DINO_IO_ERR_INFO 0x044 /* RO */ +#define DINO_IO_PCI_ERR_RESP 0x048 /* RO */ +#define DINO_IO_FBB_EN 0x05c +#define DINO_IO_ADDR_EN 0x060 +#define DINO_PCI_CONFIG_ADDR 0x064 +#define DINO_PCI_CONFIG_DATA 0x068 +#define DINO_PCI_IO_DATA 0x06c +#define DINO_PCI_MEM_DATA 0x070 /* Dino 3.x only */ +#define DINO_GSC2X_CONFIG 0x7b4 /* RO */ +#define DINO_GMASK 0x800 +#define DINO_PAMR 0x804 +#define DINO_PAPR 0x808 +#define DINO_DAMODE 0x80c +#define DINO_PCICMD 0x810 +#define DINO_PCISTS 0x814 /* R/WC */ +#define DINO_MLTIM 0x81c +#define DINO_BRDG_FEAT 0x820 +#define DINO_PCIROR 0x824 +#define DINO_PCIWOR 0x828 +#define DINO_TLTIM 0x830 + +#define DINO_IRQS 11 /* bits 0-10 are architected */ +#define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */ +#define DINO_LOCAL_IRQS (DINO_IRQS + 1) +#define DINO_MASK_IRQ(x) (1 << (x)) + +#define PCIINTA 0x001 +#define PCIINTB 0x002 +#define PCIINTC 0x004 +#define PCIINTD 0x008 +#define PCIINTE 0x010 +#define PCIINTF 0x020 +#define GSCEXTINT 0x040 +/* #define xxx 0x080 - bit 7 is "default" */ +/* #define xxx 0x100 - bit 8 not used */ +/* #define xxx 0x200 - bit 9 not used */ +#define RS232INT 0x400 + +#define DINO_MEM_CHUNK_SIZE (8 * MiB) + +OBJECT_DECLARE_SIMPLE_TYPE(DinoState, DINO_PCI_HOST_BRIDGE) + +#define DINO800_REGS (1 + (DINO_TLTIM - DINO_GMASK) / 4) +static const uint32_t reg800_keep_bits[DINO800_REGS] = { + MAKE_64BIT_MASK(0, 1), /* GMASK */ + MAKE_64BIT_MASK(0, 7), /* PAMR */ + MAKE_64BIT_MASK(0, 7), /* PAPR */ + MAKE_64BIT_MASK(0, 8), /* DAMODE */ + MAKE_64BIT_MASK(0, 7), /* PCICMD */ + MAKE_64BIT_MASK(0, 9), /* PCISTS */ + MAKE_64BIT_MASK(0, 32), /* Undefined */ + MAKE_64BIT_MASK(0, 8), /* MLTIM */ + MAKE_64BIT_MASK(0, 30), /* BRDG_FEAT */ + MAKE_64BIT_MASK(0, 24), /* PCIROR */ + MAKE_64BIT_MASK(0, 22), /* PCIWOR */ + MAKE_64BIT_MASK(0, 32), /* Undocumented */ + MAKE_64BIT_MASK(0, 9), /* TLTIM */ +}; + +struct DinoState { + PCIHostState parent_obj; + + /* PCI_CONFIG_ADDR is parent_obj.config_reg, via pci_host_conf_be_ops, + so that we can map PCI_CONFIG_DATA to pci_host_data_be_ops. */ + uint32_t config_reg_dino; /* keep original copy, including 2 lowest bits */ + + uint32_t iar0; + uint32_t iar1; + uint32_t imr; + uint32_t ipr; + uint32_t icr; + uint32_t ilr; + uint32_t io_fbb_en; + uint32_t io_addr_en; + uint32_t io_control; + uint32_t toc_addr; + + uint32_t reg800[DINO800_REGS]; + + MemoryRegion this_mem; + MemoryRegion pci_mem; + MemoryRegion pci_mem_alias[32]; + + AddressSpace bm_as; + MemoryRegion bm; + MemoryRegion bm_ram_alias; + MemoryRegion bm_pci_alias; + MemoryRegion bm_cpu_alias; +}; + +/* + * Dino can forward memory accesses from the CPU in the range between + * 0xf0800000 and 0xff000000 to the PCI bus. + */ +static void gsc_to_pci_forwarding(DinoState *s) +{ + uint32_t io_addr_en, tmp; + int enabled, i; + + tmp = extract32(s->io_control, 7, 2); + enabled = (tmp == 0x01); + io_addr_en = s->io_addr_en; + /* Mask out first (=firmware) and last (=Dino) areas. */ + io_addr_en &= ~(BIT(31) | BIT(0)); + + memory_region_transaction_begin(); + for (i = 1; i < 31; i++) { + MemoryRegion *mem = &s->pci_mem_alias[i]; + if (enabled && (io_addr_en & (1U << i))) { + if (!memory_region_is_mapped(mem)) { + uint32_t addr = 0xf0000000 + i * DINO_MEM_CHUNK_SIZE; + memory_region_add_subregion(get_system_memory(), addr, mem); + } + } else if (memory_region_is_mapped(mem)) { + memory_region_del_subregion(get_system_memory(), mem); + } + } + memory_region_transaction_commit(); +} + +static bool dino_chip_mem_valid(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + bool ret = false; + + switch (addr) { + case DINO_IAR0: + case DINO_IAR1: + case DINO_IRR0: + case DINO_IRR1: + case DINO_IMR: + case DINO_IPR: + case DINO_ICR: + case DINO_ILR: + case DINO_IO_CONTROL: + case DINO_IO_FBB_EN: + case DINO_IO_ADDR_EN: + case DINO_PCI_IO_DATA: + case DINO_TOC_ADDR: + case DINO_GMASK ... DINO_PCISTS: + case DINO_MLTIM ... DINO_PCIWOR: + case DINO_TLTIM: + ret = true; + break; + case DINO_PCI_IO_DATA + 2: + ret = (size <= 2); + break; + case DINO_PCI_IO_DATA + 1: + case DINO_PCI_IO_DATA + 3: + ret = (size == 1); + } + trace_dino_chip_mem_valid(addr, ret); + return ret; +} + +static MemTxResult dino_chip_read_with_attrs(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + DinoState *s = opaque; + MemTxResult ret = MEMTX_OK; + AddressSpace *io; + uint16_t ioaddr; + uint32_t val; + + switch (addr) { + case DINO_PCI_IO_DATA ... DINO_PCI_IO_DATA + 3: + /* Read from PCI IO space. */ + io = &address_space_io; + ioaddr = s->parent_obj.config_reg + (addr & 3); + switch (size) { + case 1: + val = address_space_ldub(io, ioaddr, attrs, &ret); + break; + case 2: + val = address_space_lduw_be(io, ioaddr, attrs, &ret); + break; + case 4: + val = address_space_ldl_be(io, ioaddr, attrs, &ret); + break; + default: + g_assert_not_reached(); + } + break; + + case DINO_IO_FBB_EN: + val = s->io_fbb_en; + break; + case DINO_IO_ADDR_EN: + val = s->io_addr_en; + break; + case DINO_IO_CONTROL: + val = s->io_control; + break; + + case DINO_IAR0: + val = s->iar0; + break; + case DINO_IAR1: + val = s->iar1; + break; + case DINO_IMR: + val = s->imr; + break; + case DINO_ICR: + val = s->icr; + break; + case DINO_IPR: + val = s->ipr; + /* Any read to IPR clears the register. */ + s->ipr = 0; + break; + case DINO_ILR: + val = s->ilr; + break; + case DINO_IRR0: + val = s->ilr & s->imr & ~s->icr; + break; + case DINO_IRR1: + val = s->ilr & s->imr & s->icr; + break; + case DINO_TOC_ADDR: + val = s->toc_addr; + break; + case DINO_GMASK ... DINO_TLTIM: + val = s->reg800[(addr - DINO_GMASK) / 4]; + if (addr == DINO_PAMR) { + val &= ~0x01; /* LSB is hardwired to 0 */ + } + if (addr == DINO_MLTIM) { + val &= ~0x07; /* 3 LSB are hardwired to 0 */ + } + if (addr == DINO_BRDG_FEAT) { + val &= ~(0x10710E0ul | 8); /* bits 5-7, 24 & 15 reserved */ + } + break; + + default: + /* Controlled by dino_chip_mem_valid above. */ + g_assert_not_reached(); + } + + trace_dino_chip_read(addr, val); + *data = val; + return ret; +} + +static MemTxResult dino_chip_write_with_attrs(void *opaque, hwaddr addr, + uint64_t val, unsigned size, + MemTxAttrs attrs) +{ + DinoState *s = opaque; + AddressSpace *io; + MemTxResult ret; + uint16_t ioaddr; + int i; + + trace_dino_chip_write(addr, val); + + switch (addr) { + case DINO_IO_DATA ... DINO_PCI_IO_DATA + 3: + /* Write into PCI IO space. */ + io = &address_space_io; + ioaddr = s->parent_obj.config_reg + (addr & 3); + switch (size) { + case 1: + address_space_stb(io, ioaddr, val, attrs, &ret); + break; + case 2: + address_space_stw_be(io, ioaddr, val, attrs, &ret); + break; + case 4: + address_space_stl_be(io, ioaddr, val, attrs, &ret); + break; + default: + g_assert_not_reached(); + } + return ret; + + case DINO_IO_FBB_EN: + s->io_fbb_en = val & 0x03; + break; + case DINO_IO_ADDR_EN: + s->io_addr_en = val; + gsc_to_pci_forwarding(s); + break; + case DINO_IO_CONTROL: + s->io_control = val; + gsc_to_pci_forwarding(s); + break; + + case DINO_IAR0: + s->iar0 = val; + break; + case DINO_IAR1: + s->iar1 = val; + break; + case DINO_IMR: + s->imr = val; + break; + case DINO_ICR: + s->icr = val; + break; + case DINO_IPR: + /* Any write to IPR clears the register. */ + s->ipr = 0; + break; + case DINO_TOC_ADDR: + /* IO_COMMAND of CPU with client_id bits */ + s->toc_addr = 0xFFFA0030 | (val & 0x1e000); + break; + + case DINO_ILR: + case DINO_IRR0: + case DINO_IRR1: + /* These registers are read-only. */ + break; + + case DINO_GMASK ... DINO_TLTIM: + i = (addr - DINO_GMASK) / 4; + val &= reg800_keep_bits[i]; + s->reg800[i] = val; + break; + + default: + /* Controlled by dino_chip_mem_valid above. */ + g_assert_not_reached(); + } + return MEMTX_OK; +} + +static const MemoryRegionOps dino_chip_ops = { + .read_with_attrs = dino_chip_read_with_attrs, + .write_with_attrs = dino_chip_write_with_attrs, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + .accepts = dino_chip_mem_valid, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static const VMStateDescription vmstate_dino = { + .name = "Dino", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(iar0, DinoState), + VMSTATE_UINT32(iar1, DinoState), + VMSTATE_UINT32(imr, DinoState), + VMSTATE_UINT32(ipr, DinoState), + VMSTATE_UINT32(icr, DinoState), + VMSTATE_UINT32(ilr, DinoState), + VMSTATE_UINT32(io_fbb_en, DinoState), + VMSTATE_UINT32(io_addr_en, DinoState), + VMSTATE_UINT32(io_control, DinoState), + VMSTATE_UINT32(toc_addr, DinoState), + VMSTATE_END_OF_LIST() + } +}; + +/* Unlike pci_config_data_le_ops, no check of high bit set in config_reg. */ + +static uint64_t dino_config_data_read(void *opaque, hwaddr addr, unsigned len) +{ + PCIHostState *s = opaque; + return pci_data_read(s->bus, s->config_reg | (addr & 3), len); +} + +static void dino_config_data_write(void *opaque, hwaddr addr, + uint64_t val, unsigned len) +{ + PCIHostState *s = opaque; + pci_data_write(s->bus, s->config_reg | (addr & 3), val, len); +} + +static const MemoryRegionOps dino_config_data_ops = { + .read = dino_config_data_read, + .write = dino_config_data_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t dino_config_addr_read(void *opaque, hwaddr addr, unsigned len) +{ + DinoState *s = opaque; + return s->config_reg_dino; +} + +static void dino_config_addr_write(void *opaque, hwaddr addr, + uint64_t val, unsigned len) +{ + PCIHostState *s = opaque; + DinoState *ds = opaque; + ds->config_reg_dino = val; /* keep a copy of original value */ + s->config_reg = val & ~3U; +} + +static const MemoryRegionOps dino_config_addr_ops = { + .read = dino_config_addr_read, + .write = dino_config_addr_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static AddressSpace *dino_pcihost_set_iommu(PCIBus *bus, void *opaque, + int devfn) +{ + DinoState *s = opaque; + + return &s->bm_as; +} + +/* + * Dino interrupts are connected as shown on Page 78, Table 23 + * (Little-endian bit numbers) + * 0 PCI INTA + * 1 PCI INTB + * 2 PCI INTC + * 3 PCI INTD + * 4 PCI INTE + * 5 PCI INTF + * 6 GSC External Interrupt + * 7 Bus Error for "less than fatal" mode + * 8 PS2 + * 9 Unused + * 10 RS232 + */ + +static void dino_set_irq(void *opaque, int irq, int level) +{ + DinoState *s = opaque; + uint32_t bit = 1u << irq; + uint32_t old_ilr = s->ilr; + + if (level) { + uint32_t ena = bit & ~old_ilr; + s->ipr |= ena; + s->ilr = old_ilr | bit; + if (ena & s->imr) { + uint32_t iar = (ena & s->icr ? s->iar1 : s->iar0); + stl_be_phys(&address_space_memory, iar & -32, iar & 31); + } + } else { + s->ilr = old_ilr & ~bit; + } +} + +static int dino_pci_map_irq(PCIDevice *d, int irq_num) +{ + int slot = PCI_SLOT(d->devfn); + + assert(irq_num >= 0 && irq_num <= 3); + + return slot & 0x03; +} + +static void dino_set_timer_irq(void *opaque, int irq, int level) +{ + /* ??? Not connected. */ +} + +static void dino_set_serial_irq(void *opaque, int irq, int level) +{ + dino_set_irq(opaque, 10, level); +} + +PCIBus *dino_init(MemoryRegion *addr_space, + qemu_irq *p_rtc_irq, qemu_irq *p_ser_irq) +{ + DeviceState *dev; + DinoState *s; + PCIBus *b; + int i; + + dev = qdev_new(TYPE_DINO_PCI_HOST_BRIDGE); + s = DINO_PCI_HOST_BRIDGE(dev); + s->iar0 = s->iar1 = CPU_HPA + 3; + s->toc_addr = 0xFFFA0030; /* IO_COMMAND of CPU */ + + /* Dino PCI access from main memory. */ + memory_region_init_io(&s->this_mem, OBJECT(s), &dino_chip_ops, + s, "dino", 4096); + memory_region_add_subregion(addr_space, DINO_HPA, &s->this_mem); + + /* Dino PCI config. */ + memory_region_init_io(&s->parent_obj.conf_mem, OBJECT(&s->parent_obj), + &dino_config_addr_ops, dev, "pci-conf-idx", 4); + memory_region_init_io(&s->parent_obj.data_mem, OBJECT(&s->parent_obj), + &dino_config_data_ops, dev, "pci-conf-data", 4); + memory_region_add_subregion(&s->this_mem, DINO_PCI_CONFIG_ADDR, + &s->parent_obj.conf_mem); + memory_region_add_subregion(&s->this_mem, DINO_CONFIG_DATA, + &s->parent_obj.data_mem); + + /* Dino PCI bus memory. */ + memory_region_init(&s->pci_mem, OBJECT(s), "pci-memory", 4 * GiB); + + b = pci_register_root_bus(dev, "pci", dino_set_irq, dino_pci_map_irq, s, + &s->pci_mem, get_system_io(), + PCI_DEVFN(0, 0), 32, TYPE_PCI_BUS); + s->parent_obj.bus = b; + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* Set up windows into PCI bus memory. */ + for (i = 1; i < 31; i++) { + uint32_t addr = 0xf0000000 + i * DINO_MEM_CHUNK_SIZE; + char *name = g_strdup_printf("PCI Outbound Window %d", i); + memory_region_init_alias(&s->pci_mem_alias[i], OBJECT(s), + name, &s->pci_mem, addr, + DINO_MEM_CHUNK_SIZE); + g_free(name); + } + + /* Set up PCI view of memory: Bus master address space. */ + memory_region_init(&s->bm, OBJECT(s), "bm-dino", 4 * GiB); + memory_region_init_alias(&s->bm_ram_alias, OBJECT(s), + "bm-system", addr_space, 0, + 0xf0000000 + DINO_MEM_CHUNK_SIZE); + memory_region_init_alias(&s->bm_pci_alias, OBJECT(s), + "bm-pci", &s->pci_mem, + 0xf0000000 + DINO_MEM_CHUNK_SIZE, + 30 * DINO_MEM_CHUNK_SIZE); + memory_region_init_alias(&s->bm_cpu_alias, OBJECT(s), + "bm-cpu", addr_space, 0xfff00000, + 0xfffff); + memory_region_add_subregion(&s->bm, 0, + &s->bm_ram_alias); + memory_region_add_subregion(&s->bm, + 0xf0000000 + DINO_MEM_CHUNK_SIZE, + &s->bm_pci_alias); + memory_region_add_subregion(&s->bm, 0xfff00000, + &s->bm_cpu_alias); + address_space_init(&s->bm_as, &s->bm, "pci-bm"); + pci_setup_iommu(b, dino_pcihost_set_iommu, s); + + *p_rtc_irq = qemu_allocate_irq(dino_set_timer_irq, s, 0); + *p_ser_irq = qemu_allocate_irq(dino_set_serial_irq, s, 0); + + return b; +} + +static void dino_pcihost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_dino; +} + +static const TypeInfo dino_pcihost_info = { + .name = TYPE_DINO_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(DinoState), + .class_init = dino_pcihost_class_init, +}; + +static void dino_register_types(void) +{ + type_register_static(&dino_pcihost_info); +} + +type_init(dino_register_types) diff --git a/hw/hppa/hppa_hardware.h b/hw/hppa/hppa_hardware.h new file mode 100644 index 000000000..bc258895c --- /dev/null +++ b/hw/hppa/hppa_hardware.h @@ -0,0 +1,51 @@ +/* HPPA cores and system support chips. */ + +#ifndef HW_HPPA_HPPA_HARDWARE_H +#define HW_HPPA_HPPA_HARDWARE_H + +#define FIRMWARE_START 0xf0000000 +#define FIRMWARE_END 0xf0800000 + +#define DEVICE_HPA_LEN 0x00100000 + +#define GSC_HPA 0xffc00000 +#define DINO_HPA 0xfff80000 +#define DINO_UART_HPA 0xfff83000 +#define DINO_UART_BASE 0xfff83800 +#define DINO_SCSI_HPA 0xfff8c000 +#define LASI_HPA 0xffd00000 +#define LASI_UART_HPA 0xffd05000 +#define LASI_SCSI_HPA 0xffd06000 +#define LASI_LAN_HPA 0xffd07000 +#define LASI_RTC_HPA 0xffd09000 +#define LASI_LPT_HPA 0xffd02000 +#define LASI_AUDIO_HPA 0xffd04000 +#define LASI_PS2KBD_HPA 0xffd08000 +#define LASI_PS2MOU_HPA 0xffd08100 +#define LASI_GFX_HPA 0xf8000000 +#define ARTIST_FB_ADDR 0xf9000000 +#define CPU_HPA 0xfffb0000 +#define MEMORY_HPA 0xfffbf000 + +#define PCI_HPA DINO_HPA /* PCI bus */ +#define IDE_HPA 0xf9000000 /* Boot disc controller */ + +/* offsets to DINO HPA: */ +#define DINO_PCI_ADDR 0x064 +#define DINO_CONFIG_DATA 0x068 +#define DINO_IO_DATA 0x06c + +#define PORT_PCI_CMD (PCI_HPA + DINO_PCI_ADDR) +#define PORT_PCI_DATA (PCI_HPA + DINO_CONFIG_DATA) + +#define FW_CFG_IO_BASE 0xfffa0000 + +#define PORT_SERIAL1 (DINO_UART_HPA + 0x800) +#define PORT_SERIAL2 (LASI_UART_HPA + 0x800) + +#define HPPA_MAX_CPUS 8 /* max. number of SMP CPUs */ +#define CPU_CLOCK_MHZ 250 /* emulate a 250 MHz CPU */ + +#define CPU_HPA_CR_REG 7 /* store CPU HPA in cr7 (SeaBIOS internal) */ + +#endif diff --git a/hw/hppa/hppa_sys.h b/hw/hppa/hppa_sys.h new file mode 100644 index 000000000..0b18271cc --- /dev/null +++ b/hw/hppa/hppa_sys.h @@ -0,0 +1,24 @@ +/* HPPA cores and system support chips. */ + +#ifndef HW_HPPA_SYS_H +#define HW_HPPA_SYS_H + +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/boards.h" +#include "hw/intc/i8259.h" + +#include "hppa_hardware.h" + +PCIBus *dino_init(MemoryRegion *, qemu_irq *, qemu_irq *); +DeviceState *lasi_init(MemoryRegion *); +#define enable_lasi_lan() 0 + +#define TYPE_DINO_PCI_HOST_BRIDGE "dino-pcihost" + +/* hppa_pci.c. */ +extern const MemoryRegionOps hppa_pci_ignore_ops; +extern const MemoryRegionOps hppa_pci_conf1_ops; +extern const MemoryRegionOps hppa_pci_iack_ops; + +#endif diff --git a/hw/hppa/lasi.c b/hw/hppa/lasi.c new file mode 100644 index 000000000..88c3791eb --- /dev/null +++ b/hw/hppa/lasi.c @@ -0,0 +1,367 @@ +/* + * HP-PARISC Lasi chipset emulation. + * + * (C) 2019 by Helge Deller + * + * This work is licensed under the GNU GPL license version 2 or later. + * + * Documentation available at: + * https://parisc.wiki.kernel.org/images-parisc/7/79/Lasi_ers.pdf + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/irq.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" +#include "hppa_sys.h" +#include "hw/net/lasi_82596.h" +#include "hw/char/parallel.h" +#include "hw/char/serial.h" +#include "hw/input/lasips2.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#define TYPE_LASI_CHIP "lasi-chip" + +#define LASI_IRR 0x00 /* RO */ +#define LASI_IMR 0x04 +#define LASI_IPR 0x08 +#define LASI_ICR 0x0c +#define LASI_IAR 0x10 + +#define LASI_PCR 0x0C000 /* LASI Power Control register */ +#define LASI_ERRLOG 0x0C004 /* LASI Error Logging register */ +#define LASI_VER 0x0C008 /* LASI Version Control register */ +#define LASI_IORESET 0x0C00C /* LASI I/O Reset register */ +#define LASI_AMR 0x0C010 /* LASI Arbitration Mask register */ +#define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */ +#define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */ + +#define LASI_BIT(x) (1ul << (x)) +#define LASI_IRQ_BITS (LASI_BIT(5) | LASI_BIT(7) | LASI_BIT(8) | LASI_BIT(9) \ + | LASI_BIT(13) | LASI_BIT(14) | LASI_BIT(16) | LASI_BIT(17) \ + | LASI_BIT(18) | LASI_BIT(19) | LASI_BIT(20) | LASI_BIT(21) \ + | LASI_BIT(26)) + +#define ICR_BUS_ERROR_BIT LASI_BIT(8) /* bit 8 in ICR */ +#define ICR_TOC_BIT LASI_BIT(1) /* bit 1 in ICR */ + +OBJECT_DECLARE_SIMPLE_TYPE(LasiState, LASI_CHIP) + +struct LasiState { + PCIHostState parent_obj; + + uint32_t irr; + uint32_t imr; + uint32_t ipr; + uint32_t icr; + uint32_t iar; + + uint32_t errlog; + uint32_t amr; + uint32_t rtc; + time_t rtc_ref; + + MemoryRegion this_mem; +}; + +static bool lasi_chip_mem_valid(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + bool ret = false; + + switch (addr) { + case LASI_IRR: + case LASI_IMR: + case LASI_IPR: + case LASI_ICR: + case LASI_IAR: + + case (LASI_LAN_HPA - LASI_HPA): + case (LASI_LPT_HPA - LASI_HPA): + case (LASI_UART_HPA - LASI_HPA): + case (LASI_RTC_HPA - LASI_HPA): + + case LASI_PCR ... LASI_AMR: + ret = true; + } + + trace_lasi_chip_mem_valid(addr, ret); + return ret; +} + +static MemTxResult lasi_chip_read_with_attrs(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + LasiState *s = opaque; + MemTxResult ret = MEMTX_OK; + uint32_t val; + + switch (addr) { + case LASI_IRR: + val = s->irr; + break; + case LASI_IMR: + val = s->imr; + break; + case LASI_IPR: + val = s->ipr; + /* Any read to IPR clears the register. */ + s->ipr = 0; + break; + case LASI_ICR: + val = s->icr & ICR_BUS_ERROR_BIT; /* bus_error */ + break; + case LASI_IAR: + val = s->iar; + break; + + case (LASI_LAN_HPA - LASI_HPA): + case (LASI_LPT_HPA - LASI_HPA): + case (LASI_UART_HPA - LASI_HPA): + val = 0; + break; + case (LASI_RTC_HPA - LASI_HPA): + val = time(NULL); + val += s->rtc_ref; + break; + + case LASI_PCR: + case LASI_VER: /* only version 0 existed. */ + case LASI_IORESET: + val = 0; + break; + case LASI_ERRLOG: + val = s->errlog; + break; + case LASI_AMR: + val = s->amr; + break; + + default: + /* Controlled by lasi_chip_mem_valid above. */ + g_assert_not_reached(); + } + + trace_lasi_chip_read(addr, val); + + *data = val; + return ret; +} + +static MemTxResult lasi_chip_write_with_attrs(void *opaque, hwaddr addr, + uint64_t val, unsigned size, + MemTxAttrs attrs) +{ + LasiState *s = opaque; + + trace_lasi_chip_write(addr, val); + + switch (addr) { + case LASI_IRR: + /* read-only. */ + break; + case LASI_IMR: + s->imr = val; + if (((val & LASI_IRQ_BITS) != val) && (val != 0xffffffff)) + qemu_log_mask(LOG_GUEST_ERROR, + "LASI: tried to set invalid %lx IMR value.\n", + (unsigned long) val); + break; + case LASI_IPR: + /* Any write to IPR clears the register. */ + s->ipr = 0; + break; + case LASI_ICR: + s->icr = val; + /* if (val & ICR_TOC_BIT) issue_toc(); */ + break; + case LASI_IAR: + s->iar = val; + break; + + case (LASI_LAN_HPA - LASI_HPA): + /* XXX: reset LAN card */ + break; + case (LASI_LPT_HPA - LASI_HPA): + /* XXX: reset parallel port */ + break; + case (LASI_UART_HPA - LASI_HPA): + /* XXX: reset serial port */ + break; + case (LASI_RTC_HPA - LASI_HPA): + s->rtc_ref = val - time(NULL); + break; + + case LASI_PCR: + if (val == 0x02) /* immediately power off */ + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + break; + case LASI_ERRLOG: + s->errlog = val; + break; + case LASI_VER: + /* read-only. */ + break; + case LASI_IORESET: + break; /* XXX: TODO: Reset various devices. */ + case LASI_AMR: + s->amr = val; + break; + + default: + /* Controlled by lasi_chip_mem_valid above. */ + g_assert_not_reached(); + } + return MEMTX_OK; +} + +static const MemoryRegionOps lasi_chip_ops = { + .read_with_attrs = lasi_chip_read_with_attrs, + .write_with_attrs = lasi_chip_write_with_attrs, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + .accepts = lasi_chip_mem_valid, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static const VMStateDescription vmstate_lasi = { + .name = "Lasi", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(irr, LasiState), + VMSTATE_UINT32(imr, LasiState), + VMSTATE_UINT32(ipr, LasiState), + VMSTATE_UINT32(icr, LasiState), + VMSTATE_UINT32(iar, LasiState), + VMSTATE_UINT32(errlog, LasiState), + VMSTATE_UINT32(amr, LasiState), + VMSTATE_END_OF_LIST() + } +}; + + +static void lasi_set_irq(void *opaque, int irq, int level) +{ + LasiState *s = opaque; + uint32_t bit = 1u << irq; + + if (level) { + s->ipr |= bit; + if (bit & s->imr) { + uint32_t iar = s->iar; + s->irr |= bit; + if ((s->icr & ICR_BUS_ERROR_BIT) == 0) { + stl_be_phys(&address_space_memory, iar & -32, iar & 31); + } + } + } +} + +static int lasi_get_irq(unsigned long hpa) +{ + switch (hpa) { + case LASI_HPA: + return 14; + case LASI_UART_HPA: + return 5; + case LASI_LPT_HPA: + return 7; + case LASI_LAN_HPA: + return 8; + case LASI_SCSI_HPA: + return 9; + case LASI_AUDIO_HPA: + return 13; + case LASI_PS2KBD_HPA: + case LASI_PS2MOU_HPA: + return 26; + default: + g_assert_not_reached(); + } +} + +DeviceState *lasi_init(MemoryRegion *address_space) +{ + DeviceState *dev; + LasiState *s; + + dev = qdev_new(TYPE_LASI_CHIP); + s = LASI_CHIP(dev); + s->iar = CPU_HPA + 3; + + /* Lasi access from main memory. */ + memory_region_init_io(&s->this_mem, OBJECT(s), &lasi_chip_ops, + s, "lasi", 0x100000); + memory_region_add_subregion(address_space, LASI_HPA, &s->this_mem); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* LAN */ + if (enable_lasi_lan()) { + qemu_irq lan_irq = qemu_allocate_irq(lasi_set_irq, s, + lasi_get_irq(LASI_LAN_HPA)); + lasi_82596_init(address_space, LASI_LAN_HPA, lan_irq); + } + + /* Parallel port */ + qemu_irq lpt_irq = qemu_allocate_irq(lasi_set_irq, s, + lasi_get_irq(LASI_LPT_HPA)); + parallel_mm_init(address_space, LASI_LPT_HPA + 0x800, 0, + lpt_irq, parallel_hds[0]); + + /* Real time clock (RTC), it's only one 32-bit counter @9000 */ + + s->rtc = time(NULL); + s->rtc_ref = 0; + + if (serial_hd(1)) { + /* Serial port */ + qemu_irq serial_irq = qemu_allocate_irq(lasi_set_irq, s, + lasi_get_irq(LASI_UART_HPA)); + serial_mm_init(address_space, LASI_UART_HPA + 0x800, 0, + serial_irq, 8000000 / 16, + serial_hd(0), DEVICE_NATIVE_ENDIAN); + } + + /* PS/2 Keyboard/Mouse */ + qemu_irq ps2kbd_irq = qemu_allocate_irq(lasi_set_irq, s, + lasi_get_irq(LASI_PS2KBD_HPA)); + lasips2_init(address_space, LASI_PS2KBD_HPA, ps2kbd_irq); + + return dev; +} + +static void lasi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_lasi; +} + +static const TypeInfo lasi_pcihost_info = { + .name = TYPE_LASI_CHIP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LasiState), + .class_init = lasi_class_init, +}; + +static void lasi_register_types(void) +{ + type_register_static(&lasi_pcihost_info); +} + +type_init(lasi_register_types) diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c new file mode 100644 index 000000000..2a46af5bc --- /dev/null +++ b/hw/hppa/machine.c @@ -0,0 +1,374 @@ +/* + * QEMU HPPA hardware system emulator. + * Copyright 2018 Helge Deller + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "cpu.h" +#include "elf.h" +#include "hw/loader.h" +#include "qemu/error-report.h" +#include "sysemu/reset.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/timer/i8254.h" +#include "hw/char/serial.h" +#include "hw/net/lasi_82596.h" +#include "hppa_sys.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "net/net.h" +#include "qemu/log.h" +#include "net/net.h" + +#define MAX_IDE_BUS 2 + +#define MIN_SEABIOS_HPPA_VERSION 1 /* require at least this fw version */ + +#define HPA_POWER_BUTTON (FIRMWARE_END - 0x10) + +static void hppa_powerdown_req(Notifier *n, void *opaque) +{ + hwaddr soft_power_reg = HPA_POWER_BUTTON; + uint32_t val; + + val = ldl_be_phys(&address_space_memory, soft_power_reg); + if ((val >> 8) == 0) { + /* immediately shut down when under hardware control */ + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + return; + } + + /* clear bit 31 to indicate that the power switch was pressed. */ + val &= ~1; + stl_be_phys(&address_space_memory, soft_power_reg, val); +} + +static Notifier hppa_system_powerdown_notifier = { + .notify = hppa_powerdown_req +}; + + +static ISABus *hppa_isa_bus(void) +{ + ISABus *isa_bus; + qemu_irq *isa_irqs; + MemoryRegion *isa_region; + + isa_region = g_new(MemoryRegion, 1); + memory_region_init_io(isa_region, NULL, &hppa_pci_ignore_ops, + NULL, "isa-io", 0x800); + memory_region_add_subregion(get_system_memory(), IDE_HPA, + isa_region); + + isa_bus = isa_bus_new(NULL, get_system_memory(), isa_region, + &error_abort); + isa_irqs = i8259_init(isa_bus, + /* qemu_allocate_irq(dino_set_isa_irq, s, 0)); */ + NULL); + isa_bus_irqs(isa_bus, isa_irqs); + + return isa_bus; +} + +static uint64_t cpu_hppa_to_phys(void *opaque, uint64_t addr) +{ + addr &= (0x10000000 - 1); + return addr; +} + +static HPPACPU *cpu[HPPA_MAX_CPUS]; +static uint64_t firmware_entry; + +static void fw_cfg_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); +} + +static FWCfgState *create_fw_cfg(MachineState *ms) +{ + FWCfgState *fw_cfg; + uint64_t val; + + fw_cfg = fw_cfg_init_mem(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, ms->smp.cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, HPPA_MAX_CPUS); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, ms->ram_size); + + val = cpu_to_le64(MIN_SEABIOS_HPPA_VERSION); + fw_cfg_add_file(fw_cfg, "/etc/firmware-min-version", + g_memdup(&val, sizeof(val)), sizeof(val)); + + val = cpu_to_le64(HPPA_TLB_ENTRIES); + fw_cfg_add_file(fw_cfg, "/etc/cpu/tlb_entries", + g_memdup(&val, sizeof(val)), sizeof(val)); + + val = cpu_to_le64(HPPA_BTLB_ENTRIES); + fw_cfg_add_file(fw_cfg, "/etc/cpu/btlb_entries", + g_memdup(&val, sizeof(val)), sizeof(val)); + + val = cpu_to_le64(HPA_POWER_BUTTON); + fw_cfg_add_file(fw_cfg, "/etc/power-button-addr", + g_memdup(&val, sizeof(val)), sizeof(val)); + + fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ms->boot_order[0]); + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); + + return fw_cfg; +} + +static void machine_hppa_init(MachineState *machine) +{ + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *initrd_filename = machine->initrd_filename; + DeviceState *dev; + PCIBus *pci_bus; + ISABus *isa_bus; + qemu_irq rtc_irq, serial_irq; + char *firmware_filename; + uint64_t firmware_low, firmware_high; + long size; + uint64_t kernel_entry = 0, kernel_low, kernel_high; + MemoryRegion *addr_space = get_system_memory(); + MemoryRegion *rom_region; + MemoryRegion *cpu_region; + long i; + unsigned int smp_cpus = machine->smp.cpus; + SysBusDevice *s; + + /* Create CPUs. */ + for (i = 0; i < smp_cpus; i++) { + char *name = g_strdup_printf("cpu%ld-io-eir", i); + cpu[i] = HPPA_CPU(cpu_create(machine->cpu_type)); + + cpu_region = g_new(MemoryRegion, 1); + memory_region_init_io(cpu_region, OBJECT(cpu[i]), &hppa_io_eir_ops, + cpu[i], name, 4); + memory_region_add_subregion(addr_space, CPU_HPA + i * 0x1000, + cpu_region); + g_free(name); + } + + /* Main memory region. */ + if (machine->ram_size > 3 * GiB) { + error_report("RAM size is currently restricted to 3GB"); + exit(EXIT_FAILURE); + } + memory_region_add_subregion_overlap(addr_space, 0, machine->ram, -1); + + + /* Init Lasi chip */ + lasi_init(addr_space); + + /* Init Dino (PCI host bus chip). */ + pci_bus = dino_init(addr_space, &rtc_irq, &serial_irq); + assert(pci_bus); + + /* Create ISA bus. */ + isa_bus = hppa_isa_bus(); + assert(isa_bus); + + /* Realtime clock, used by firmware for PDC_TOD call. */ + mc146818_rtc_init(isa_bus, 2000, rtc_irq); + + /* Serial code setup. */ + if (serial_hd(0)) { + uint32_t addr = DINO_UART_HPA + 0x800; + serial_mm_init(addr_space, addr, 0, serial_irq, + 115200, serial_hd(0), DEVICE_BIG_ENDIAN); + } + + /* fw_cfg configuration interface */ + create_fw_cfg(machine); + + /* SCSI disk setup. */ + dev = DEVICE(pci_create_simple(pci_bus, -1, "lsi53c895a")); + lsi53c8xx_handle_legacy_cmdline(dev); + + /* Graphics setup. */ + if (machine->enable_graphics && vga_interface_type != VGA_NONE) { + dev = qdev_new("artist"); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, LASI_GFX_HPA); + sysbus_mmio_map(s, 1, ARTIST_FB_ADDR); + } + + /* Network setup. */ + for (i = 0; i < nb_nics; i++) { + if (!enable_lasi_lan()) { + pci_nic_init_nofail(&nd_table[i], pci_bus, "tulip", NULL); + } + } + + /* register power switch emulation */ + qemu_register_powerdown_notifier(&hppa_system_powerdown_notifier); + + /* Load firmware. Given that this is not "real" firmware, + but one explicitly written for the emulation, we might as + well load it directly from an ELF image. */ + firmware_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, + machine->firmware ?: "hppa-firmware.img"); + if (firmware_filename == NULL) { + error_report("no firmware provided"); + exit(1); + } + + size = load_elf(firmware_filename, NULL, NULL, NULL, + &firmware_entry, &firmware_low, &firmware_high, NULL, + true, EM_PARISC, 0, 0); + + /* Unfortunately, load_elf sign-extends reading elf32. */ + firmware_entry = (target_ureg)firmware_entry; + firmware_low = (target_ureg)firmware_low; + firmware_high = (target_ureg)firmware_high; + + if (size < 0) { + error_report("could not load firmware '%s'", firmware_filename); + exit(1); + } + qemu_log_mask(CPU_LOG_PAGE, "Firmware loaded at 0x%08" PRIx64 + "-0x%08" PRIx64 ", entry at 0x%08" PRIx64 ".\n", + firmware_low, firmware_high, firmware_entry); + if (firmware_low < FIRMWARE_START || firmware_high >= FIRMWARE_END) { + error_report("Firmware overlaps with memory or IO space"); + exit(1); + } + g_free(firmware_filename); + + rom_region = g_new(MemoryRegion, 1); + memory_region_init_ram(rom_region, NULL, "firmware", + (FIRMWARE_END - FIRMWARE_START), &error_fatal); + memory_region_add_subregion(addr_space, FIRMWARE_START, rom_region); + + /* Load kernel */ + if (kernel_filename) { + size = load_elf(kernel_filename, NULL, &cpu_hppa_to_phys, + NULL, &kernel_entry, &kernel_low, &kernel_high, NULL, + true, EM_PARISC, 0, 0); + + /* Unfortunately, load_elf sign-extends reading elf32. */ + kernel_entry = (target_ureg) cpu_hppa_to_phys(NULL, kernel_entry); + kernel_low = (target_ureg)kernel_low; + kernel_high = (target_ureg)kernel_high; + + if (size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + qemu_log_mask(CPU_LOG_PAGE, "Kernel loaded at 0x%08" PRIx64 + "-0x%08" PRIx64 ", entry at 0x%08" PRIx64 + ", size %" PRIu64 " kB\n", + kernel_low, kernel_high, kernel_entry, size / KiB); + + if (kernel_cmdline) { + cpu[0]->env.gr[24] = 0x4000; + pstrcpy_targphys("cmdline", cpu[0]->env.gr[24], + TARGET_PAGE_SIZE, kernel_cmdline); + } + + if (initrd_filename) { + ram_addr_t initrd_base; + int64_t initrd_size; + + initrd_size = get_image_size(initrd_filename); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + initrd_filename); + exit(1); + } + + /* Load the initrd image high in memory. + Mirror the algorithm used by palo: + (1) Due to sign-extension problems and PDC, + put the initrd no higher than 1G. + (2) Reserve 64k for stack. */ + initrd_base = MIN(machine->ram_size, 1 * GiB); + initrd_base = initrd_base - 64 * KiB; + initrd_base = (initrd_base - initrd_size) & TARGET_PAGE_MASK; + + if (initrd_base < kernel_high) { + error_report("kernel and initial ram disk too large!"); + exit(1); + } + + load_image_targphys(initrd_filename, initrd_base, initrd_size); + cpu[0]->env.gr[23] = initrd_base; + cpu[0]->env.gr[22] = initrd_base + initrd_size; + } + } + + if (!kernel_entry) { + /* When booting via firmware, tell firmware if we want interactive + * mode (kernel_entry=1), and to boot from CD (gr[24]='d') + * or hard disc * (gr[24]='c'). + */ + kernel_entry = boot_menu ? 1 : 0; + cpu[0]->env.gr[24] = machine->boot_order[0]; + } + + /* We jump to the firmware entry routine and pass the + * various parameters in registers. After firmware initialization, + * firmware will start the Linux kernel with ramdisk and cmdline. + */ + cpu[0]->env.gr[26] = machine->ram_size; + cpu[0]->env.gr[25] = kernel_entry; + + /* tell firmware how many SMP CPUs to present in inventory table */ + cpu[0]->env.gr[21] = smp_cpus; + + /* tell firmware fw_cfg port */ + cpu[0]->env.gr[19] = FW_CFG_IO_BASE; +} + +static void hppa_machine_reset(MachineState *ms) +{ + unsigned int smp_cpus = ms->smp.cpus; + int i; + + qemu_devices_reset(); + + /* Start all CPUs at the firmware entry point. + * Monarch CPU will initialize firmware, secondary CPUs + * will enter a small idle look and wait for rendevouz. */ + for (i = 0; i < smp_cpus; i++) { + cpu_set_pc(CPU(cpu[i]), firmware_entry); + cpu[i]->env.gr[5] = CPU_HPA + i * 0x1000; + } + + /* already initialized by machine_hppa_init()? */ + if (cpu[0]->env.gr[26] == ms->ram_size) { + return; + } + + cpu[0]->env.gr[26] = ms->ram_size; + cpu[0]->env.gr[25] = 0; /* no firmware boot menu */ + cpu[0]->env.gr[24] = 'c'; + /* gr22/gr23 unused, no initrd while reboot. */ + cpu[0]->env.gr[21] = smp_cpus; + /* tell firmware fw_cfg port */ + cpu[0]->env.gr[19] = FW_CFG_IO_BASE; +} + + +static void machine_hppa_machine_init(MachineClass *mc) +{ + mc->desc = "HPPA generic machine"; + mc->default_cpu_type = TYPE_HPPA_CPU; + mc->init = machine_hppa_init; + mc->reset = hppa_machine_reset; + mc->block_default_type = IF_SCSI; + mc->max_cpus = HPPA_MAX_CPUS; + mc->default_cpus = 1; + mc->is_default = true; + mc->default_ram_size = 512 * MiB; + mc->default_boot_order = "cd"; + mc->default_ram_id = "ram"; +} + +DEFINE_MACHINE("hppa", machine_hppa_machine_init) diff --git a/hw/hppa/meson.build b/hw/hppa/meson.build new file mode 100644 index 000000000..1deae83ae --- /dev/null +++ b/hw/hppa/meson.build @@ -0,0 +1,4 @@ +hppa_ss = ss.source_set() +hppa_ss.add(when: 'CONFIG_DINO', if_true: files('pci.c', 'machine.c', 'dino.c', 'lasi.c')) + +hw_arch += {'hppa': hppa_ss} diff --git a/hw/hppa/pci.c b/hw/hppa/pci.c new file mode 100644 index 000000000..32609aba6 --- /dev/null +++ b/hw/hppa/pci.c @@ -0,0 +1,88 @@ +/* + * QEMU HP-PARISC PCI support functions. + * + */ + +#include "qemu/osdep.h" +#include "hppa_sys.h" +#include "qemu/log.h" +#include "trace.h" + + +/* Fallback for unassigned PCI I/O operations. Avoids MCHK. */ + +static uint64_t ignore_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void ignore_write(void *opaque, hwaddr addr, uint64_t v, unsigned size) +{ +} + +const MemoryRegionOps hppa_pci_ignore_ops = { + .read = ignore_read, + .write = ignore_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + + +/* PCI config space reads/writes, to byte-word addressable memory. */ +static uint64_t bw_conf1_read(void *opaque, hwaddr addr, + unsigned size) +{ + PCIBus *b = opaque; + return pci_data_read(b, addr, size); +} + +static void bw_conf1_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PCIBus *b = opaque; + pci_data_write(b, addr, val, size); +} + +const MemoryRegionOps hppa_pci_conf1_ops = { + .read = bw_conf1_read, + .write = bw_conf1_write, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +/* PCI/EISA Interrupt Acknowledge Cycle. */ + +static uint64_t iack_read(void *opaque, hwaddr addr, unsigned size) +{ + return pic_read_irq(isa_pic); +} + +static void special_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + trace_hppa_pci_iack_write(); +} + +const MemoryRegionOps hppa_pci_iack_ops = { + .read = iack_read, + .write = special_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; diff --git a/hw/hppa/trace-events b/hw/hppa/trace-events new file mode 100644 index 000000000..3f42be905 --- /dev/null +++ b/hw/hppa/trace-events @@ -0,0 +1,14 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# pci.c +hppa_pci_iack_write(void) "" + +# dino.c +dino_chip_mem_valid(uint64_t addr, uint32_t val) "access to addr 0x%"PRIx64" is %d" +dino_chip_read(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" +dino_chip_write(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" + +# lasi.c +lasi_chip_mem_valid(uint64_t addr, uint32_t val) "access to addr 0x%"PRIx64" is %d" +lasi_chip_read(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" +lasi_chip_write(uint64_t addr, uint32_t val) "addr 0x%"PRIx64" val 0x%08x" diff --git a/hw/hppa/trace.h b/hw/hppa/trace.h new file mode 100644 index 000000000..4e8b52dc2 --- /dev/null +++ b/hw/hppa/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_hppa.h" diff --git a/hw/hyperv/Kconfig b/hw/hyperv/Kconfig new file mode 100644 index 000000000..3fbfe41c9 --- /dev/null +++ b/hw/hyperv/Kconfig @@ -0,0 +1,13 @@ +config HYPERV + bool + depends on KVM + +config HYPERV_TESTDEV + bool + default y if TEST_DEVICES + depends on HYPERV + +config VMBUS + bool + default y + depends on HYPERV diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c new file mode 100644 index 000000000..cb1074f23 --- /dev/null +++ b/hw/hyperv/hyperv.c @@ -0,0 +1,663 @@ +/* + * Hyper-V guest/hypervisor interaction + * + * Copyright (c) 2015-2018 Virtuozzo International GmbH. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "exec/address-spaces.h" +#include "sysemu/kvm.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qemu/lockable.h" +#include "qemu/queue.h" +#include "qemu/rcu.h" +#include "qemu/rcu_queue.h" +#include "hw/hyperv/hyperv.h" +#include "qom/object.h" + +struct SynICState { + DeviceState parent_obj; + + CPUState *cs; + + bool enabled; + hwaddr msg_page_addr; + hwaddr event_page_addr; + MemoryRegion msg_page_mr; + MemoryRegion event_page_mr; + struct hyperv_message_page *msg_page; + struct hyperv_event_flags_page *event_page; +}; + +#define TYPE_SYNIC "hyperv-synic" +OBJECT_DECLARE_SIMPLE_TYPE(SynICState, SYNIC) + +static bool synic_enabled; + +bool hyperv_is_synic_enabled(void) +{ + return synic_enabled; +} + +static SynICState *get_synic(CPUState *cs) +{ + return SYNIC(object_resolve_path_component(OBJECT(cs), "synic")); +} + +static void synic_update(SynICState *synic, bool enable, + hwaddr msg_page_addr, hwaddr event_page_addr) +{ + + synic->enabled = enable; + if (synic->msg_page_addr != msg_page_addr) { + if (synic->msg_page_addr) { + memory_region_del_subregion(get_system_memory(), + &synic->msg_page_mr); + } + if (msg_page_addr) { + memory_region_add_subregion(get_system_memory(), msg_page_addr, + &synic->msg_page_mr); + } + synic->msg_page_addr = msg_page_addr; + } + if (synic->event_page_addr != event_page_addr) { + if (synic->event_page_addr) { + memory_region_del_subregion(get_system_memory(), + &synic->event_page_mr); + } + if (event_page_addr) { + memory_region_add_subregion(get_system_memory(), event_page_addr, + &synic->event_page_mr); + } + synic->event_page_addr = event_page_addr; + } +} + +void hyperv_synic_update(CPUState *cs, bool enable, + hwaddr msg_page_addr, hwaddr event_page_addr) +{ + SynICState *synic = get_synic(cs); + + if (!synic) { + return; + } + + synic_update(synic, enable, msg_page_addr, event_page_addr); +} + +static void synic_realize(DeviceState *dev, Error **errp) +{ + Object *obj = OBJECT(dev); + SynICState *synic = SYNIC(dev); + char *msgp_name, *eventp_name; + uint32_t vp_index; + + /* memory region names have to be globally unique */ + vp_index = hyperv_vp_index(synic->cs); + msgp_name = g_strdup_printf("synic-%u-msg-page", vp_index); + eventp_name = g_strdup_printf("synic-%u-event-page", vp_index); + + memory_region_init_ram(&synic->msg_page_mr, obj, msgp_name, + sizeof(*synic->msg_page), &error_abort); + memory_region_init_ram(&synic->event_page_mr, obj, eventp_name, + sizeof(*synic->event_page), &error_abort); + synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr); + synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr); + + g_free(msgp_name); + g_free(eventp_name); +} +static void synic_reset(DeviceState *dev) +{ + SynICState *synic = SYNIC(dev); + memset(synic->msg_page, 0, sizeof(*synic->msg_page)); + memset(synic->event_page, 0, sizeof(*synic->event_page)); + synic_update(synic, false, 0, 0); +} + +static void synic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = synic_realize; + dc->reset = synic_reset; + dc->user_creatable = false; +} + +void hyperv_synic_add(CPUState *cs) +{ + Object *obj; + SynICState *synic; + + obj = object_new(TYPE_SYNIC); + synic = SYNIC(obj); + synic->cs = cs; + object_property_add_child(OBJECT(cs), "synic", obj); + object_unref(obj); + qdev_realize(DEVICE(obj), NULL, &error_abort); + synic_enabled = true; +} + +void hyperv_synic_reset(CPUState *cs) +{ + SynICState *synic = get_synic(cs); + + if (synic) { + device_legacy_reset(DEVICE(synic)); + } +} + +static const TypeInfo synic_type_info = { + .name = TYPE_SYNIC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SynICState), + .class_init = synic_class_init, +}; + +static void synic_register_types(void) +{ + type_register_static(&synic_type_info); +} + +type_init(synic_register_types) + +/* + * KVM has its own message producers (SynIC timers). To guarantee + * serialization with both KVM vcpu and the guest cpu, the messages are first + * staged in an intermediate area and then posted to the SynIC message page in + * the vcpu thread. + */ +typedef struct HvSintStagedMessage { + /* message content staged by hyperv_post_msg */ + struct hyperv_message msg; + /* callback + data (r/o) to complete the processing in a BH */ + HvSintMsgCb cb; + void *cb_data; + /* message posting status filled by cpu_post_msg */ + int status; + /* passing the buck: */ + enum { + /* initial state */ + HV_STAGED_MSG_FREE, + /* + * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE -> + * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu + */ + HV_STAGED_MSG_BUSY, + /* + * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot, + * notify the guest, records the status, marks the posting done (BUSY + * -> POSTED), and schedules sint_msg_bh BH + */ + HV_STAGED_MSG_POSTED, + /* + * sint_msg_bh (BH) verifies that the posting is done, runs the + * callback, and starts over (POSTED -> FREE) + */ + } state; +} HvSintStagedMessage; + +struct HvSintRoute { + uint32_t sint; + SynICState *synic; + int gsi; + EventNotifier sint_set_notifier; + EventNotifier sint_ack_notifier; + + HvSintStagedMessage *staged_msg; + + unsigned refcount; +}; + +static CPUState *hyperv_find_vcpu(uint32_t vp_index) +{ + CPUState *cs = qemu_get_cpu(vp_index); + assert(hyperv_vp_index(cs) == vp_index); + return cs; +} + +/* + * BH to complete the processing of a staged message. + */ +static void sint_msg_bh(void *opaque) +{ + HvSintRoute *sint_route = opaque; + HvSintStagedMessage *staged_msg = sint_route->staged_msg; + + if (qatomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) { + /* status nor ready yet (spurious ack from guest?), ignore */ + return; + } + + staged_msg->cb(staged_msg->cb_data, staged_msg->status); + staged_msg->status = 0; + + /* staged message processing finished, ready to start over */ + qatomic_set(&staged_msg->state, HV_STAGED_MSG_FREE); + /* drop the reference taken in hyperv_post_msg */ + hyperv_sint_route_unref(sint_route); +} + +/* + * Worker to transfer the message from the staging area into the SynIC message + * page in vcpu context. + */ +static void cpu_post_msg(CPUState *cs, run_on_cpu_data data) +{ + HvSintRoute *sint_route = data.host_ptr; + HvSintStagedMessage *staged_msg = sint_route->staged_msg; + SynICState *synic = sint_route->synic; + struct hyperv_message *dst_msg; + bool wait_for_sint_ack = false; + + assert(staged_msg->state == HV_STAGED_MSG_BUSY); + + if (!synic->enabled || !synic->msg_page_addr) { + staged_msg->status = -ENXIO; + goto posted; + } + + dst_msg = &synic->msg_page->slot[sint_route->sint]; + + if (dst_msg->header.message_type != HV_MESSAGE_NONE) { + dst_msg->header.message_flags |= HV_MESSAGE_FLAG_PENDING; + staged_msg->status = -EAGAIN; + wait_for_sint_ack = true; + } else { + memcpy(dst_msg, &staged_msg->msg, sizeof(*dst_msg)); + staged_msg->status = hyperv_sint_route_set_sint(sint_route); + } + + memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page)); + +posted: + qatomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED); + /* + * Notify the msg originator of the progress made; if the slot was busy we + * set msg_pending flag in it so it will be the guest who will do EOM and + * trigger the notification from KVM via sint_ack_notifier + */ + if (!wait_for_sint_ack) { + aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, + sint_route); + } +} + +/* + * Post a Hyper-V message to the staging area, for delivery to guest in the + * vcpu thread. + */ +int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg) +{ + HvSintStagedMessage *staged_msg = sint_route->staged_msg; + + assert(staged_msg); + + /* grab the staging area */ + if (qatomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE, + HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) { + return -EAGAIN; + } + + memcpy(&staged_msg->msg, src_msg, sizeof(*src_msg)); + + /* hold a reference on sint_route until the callback is finished */ + hyperv_sint_route_ref(sint_route); + + /* schedule message posting attempt in vcpu thread */ + async_run_on_cpu(sint_route->synic->cs, cpu_post_msg, + RUN_ON_CPU_HOST_PTR(sint_route)); + return 0; +} + +static void sint_ack_handler(EventNotifier *notifier) +{ + HvSintRoute *sint_route = container_of(notifier, HvSintRoute, + sint_ack_notifier); + event_notifier_test_and_clear(notifier); + + /* + * the guest consumed the previous message so complete the current one with + * -EAGAIN and let the msg originator retry + */ + aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, sint_route); +} + +/* + * Set given event flag for a given sint on a given vcpu, and signal the sint. + */ +int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno) +{ + int ret; + SynICState *synic = sint_route->synic; + unsigned long *flags, set_mask; + unsigned set_idx; + + if (eventno > HV_EVENT_FLAGS_COUNT) { + return -EINVAL; + } + if (!synic->enabled || !synic->event_page_addr) { + return -ENXIO; + } + + set_idx = BIT_WORD(eventno); + set_mask = BIT_MASK(eventno); + flags = synic->event_page->slot[sint_route->sint].flags; + + if ((qatomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) { + memory_region_set_dirty(&synic->event_page_mr, 0, + sizeof(*synic->event_page)); + ret = hyperv_sint_route_set_sint(sint_route); + } else { + ret = 0; + } + return ret; +} + +HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint, + HvSintMsgCb cb, void *cb_data) +{ + HvSintRoute *sint_route; + EventNotifier *ack_notifier; + int r, gsi; + CPUState *cs; + SynICState *synic; + + cs = hyperv_find_vcpu(vp_index); + if (!cs) { + return NULL; + } + + synic = get_synic(cs); + if (!synic) { + return NULL; + } + + sint_route = g_new0(HvSintRoute, 1); + r = event_notifier_init(&sint_route->sint_set_notifier, false); + if (r) { + goto err; + } + + + ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL; + if (ack_notifier) { + sint_route->staged_msg = g_new0(HvSintStagedMessage, 1); + sint_route->staged_msg->cb = cb; + sint_route->staged_msg->cb_data = cb_data; + + r = event_notifier_init(ack_notifier, false); + if (r) { + goto err_sint_set_notifier; + } + + event_notifier_set_handler(ack_notifier, sint_ack_handler); + } + + gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint); + if (gsi < 0) { + goto err_gsi; + } + + r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, + &sint_route->sint_set_notifier, + ack_notifier, gsi); + if (r) { + goto err_irqfd; + } + sint_route->gsi = gsi; + sint_route->synic = synic; + sint_route->sint = sint; + sint_route->refcount = 1; + + return sint_route; + +err_irqfd: + kvm_irqchip_release_virq(kvm_state, gsi); +err_gsi: + if (ack_notifier) { + event_notifier_set_handler(ack_notifier, NULL); + event_notifier_cleanup(ack_notifier); + g_free(sint_route->staged_msg); + } +err_sint_set_notifier: + event_notifier_cleanup(&sint_route->sint_set_notifier); +err: + g_free(sint_route); + + return NULL; +} + +void hyperv_sint_route_ref(HvSintRoute *sint_route) +{ + sint_route->refcount++; +} + +void hyperv_sint_route_unref(HvSintRoute *sint_route) +{ + if (!sint_route) { + return; + } + + assert(sint_route->refcount > 0); + + if (--sint_route->refcount) { + return; + } + + kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, + &sint_route->sint_set_notifier, + sint_route->gsi); + kvm_irqchip_release_virq(kvm_state, sint_route->gsi); + if (sint_route->staged_msg) { + event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL); + event_notifier_cleanup(&sint_route->sint_ack_notifier); + g_free(sint_route->staged_msg); + } + event_notifier_cleanup(&sint_route->sint_set_notifier); + g_free(sint_route); +} + +int hyperv_sint_route_set_sint(HvSintRoute *sint_route) +{ + return event_notifier_set(&sint_route->sint_set_notifier); +} + +typedef struct MsgHandler { + struct rcu_head rcu; + QLIST_ENTRY(MsgHandler) link; + uint32_t conn_id; + HvMsgHandler handler; + void *data; +} MsgHandler; + +typedef struct EventFlagHandler { + struct rcu_head rcu; + QLIST_ENTRY(EventFlagHandler) link; + uint32_t conn_id; + EventNotifier *notifier; +} EventFlagHandler; + +static QLIST_HEAD(, MsgHandler) msg_handlers; +static QLIST_HEAD(, EventFlagHandler) event_flag_handlers; +static QemuMutex handlers_mutex; + +static void __attribute__((constructor)) hv_init(void) +{ + QLIST_INIT(&msg_handlers); + QLIST_INIT(&event_flag_handlers); + qemu_mutex_init(&handlers_mutex); +} + +int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data) +{ + int ret; + MsgHandler *mh; + + QEMU_LOCK_GUARD(&handlers_mutex); + QLIST_FOREACH(mh, &msg_handlers, link) { + if (mh->conn_id == conn_id) { + if (handler) { + ret = -EEXIST; + } else { + QLIST_REMOVE_RCU(mh, link); + g_free_rcu(mh, rcu); + ret = 0; + } + return ret; + } + } + + if (handler) { + mh = g_new(MsgHandler, 1); + mh->conn_id = conn_id; + mh->handler = handler; + mh->data = data; + QLIST_INSERT_HEAD_RCU(&msg_handlers, mh, link); + ret = 0; + } else { + ret = -ENOENT; + } + + return ret; +} + +uint16_t hyperv_hcall_post_message(uint64_t param, bool fast) +{ + uint16_t ret; + hwaddr len; + struct hyperv_post_message_input *msg; + MsgHandler *mh; + + if (fast) { + return HV_STATUS_INVALID_HYPERCALL_CODE; + } + if (param & (__alignof__(*msg) - 1)) { + return HV_STATUS_INVALID_ALIGNMENT; + } + + len = sizeof(*msg); + msg = cpu_physical_memory_map(param, &len, 0); + if (len < sizeof(*msg)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto unmap; + } + if (msg->payload_size > sizeof(msg->payload)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + goto unmap; + } + + ret = HV_STATUS_INVALID_CONNECTION_ID; + WITH_RCU_READ_LOCK_GUARD() { + QLIST_FOREACH_RCU(mh, &msg_handlers, link) { + if (mh->conn_id == (msg->connection_id & HV_CONNECTION_ID_MASK)) { + ret = mh->handler(msg, mh->data); + break; + } + } + } + +unmap: + cpu_physical_memory_unmap(msg, len, 0, 0); + return ret; +} + +static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier) +{ + int ret; + EventFlagHandler *handler; + + QEMU_LOCK_GUARD(&handlers_mutex); + QLIST_FOREACH(handler, &event_flag_handlers, link) { + if (handler->conn_id == conn_id) { + if (notifier) { + ret = -EEXIST; + } else { + QLIST_REMOVE_RCU(handler, link); + g_free_rcu(handler, rcu); + ret = 0; + } + return ret; + } + } + + if (notifier) { + handler = g_new(EventFlagHandler, 1); + handler->conn_id = conn_id; + handler->notifier = notifier; + QLIST_INSERT_HEAD_RCU(&event_flag_handlers, handler, link); + ret = 0; + } else { + ret = -ENOENT; + } + + return ret; +} + +static bool process_event_flags_userspace; + +int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier) +{ + if (!process_event_flags_userspace && + !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) { + process_event_flags_userspace = true; + + warn_report("Hyper-V event signaling is not supported by this kernel; " + "using slower userspace hypercall processing"); + } + + if (!process_event_flags_userspace) { + struct kvm_hyperv_eventfd hvevfd = { + .conn_id = conn_id, + .fd = notifier ? event_notifier_get_fd(notifier) : -1, + .flags = notifier ? 0 : KVM_HYPERV_EVENTFD_DEASSIGN, + }; + + return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd); + } + return set_event_flag_handler(conn_id, notifier); +} + +uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast) +{ + EventFlagHandler *handler; + + if (unlikely(!fast)) { + hwaddr addr = param; + + if (addr & (__alignof__(addr) - 1)) { + return HV_STATUS_INVALID_ALIGNMENT; + } + + param = ldq_phys(&address_space_memory, addr); + } + + /* + * Per spec, bits 32-47 contain the extra "flag number". However, we + * have no use for it, and in all known usecases it is zero, so just + * report lookup failure if it isn't. + */ + if (param & 0xffff00000000ULL) { + return HV_STATUS_INVALID_PORT_ID; + } + /* remaining bits are reserved-zero */ + if (param & ~HV_CONNECTION_ID_MASK) { + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } + + RCU_READ_LOCK_GUARD(); + QLIST_FOREACH_RCU(handler, &event_flag_handlers, link) { + if (handler->conn_id == param) { + event_notifier_set(handler->notifier); + return 0; + } + } + return HV_STATUS_INVALID_CONNECTION_ID; +} diff --git a/hw/hyperv/hyperv_testdev.c b/hw/hyperv/hyperv_testdev.c new file mode 100644 index 000000000..9a56ddf83 --- /dev/null +++ b/hw/hyperv/hyperv_testdev.c @@ -0,0 +1,326 @@ +/* + * QEMU KVM Hyper-V test device to support Hyper-V kvm-unit-tests + * + * Copyright (C) 2015 Andrey Smetanin + * + * Authors: + * Andrey Smetanin + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/queue.h" +#include "hw/isa/isa.h" +#include "hw/hyperv/hyperv.h" +#include "qom/object.h" + +typedef struct TestSintRoute { + QLIST_ENTRY(TestSintRoute) le; + uint8_t vp_index; + uint8_t sint; + HvSintRoute *sint_route; +} TestSintRoute; + +typedef struct TestMsgConn { + QLIST_ENTRY(TestMsgConn) le; + uint8_t conn_id; + HvSintRoute *sint_route; + struct hyperv_message msg; +} TestMsgConn; + +typedef struct TestEvtConn { + QLIST_ENTRY(TestEvtConn) le; + uint8_t conn_id; + HvSintRoute *sint_route; + EventNotifier notifier; +} TestEvtConn; + +struct HypervTestDev { + ISADevice parent_obj; + MemoryRegion sint_control; + QLIST_HEAD(, TestSintRoute) sint_routes; + QLIST_HEAD(, TestMsgConn) msg_conns; + QLIST_HEAD(, TestEvtConn) evt_conns; +}; + +#define TYPE_HYPERV_TEST_DEV "hyperv-testdev" +OBJECT_DECLARE_SIMPLE_TYPE(HypervTestDev, HYPERV_TEST_DEV) + +enum { + HV_TEST_DEV_SINT_ROUTE_CREATE = 1, + HV_TEST_DEV_SINT_ROUTE_DESTROY, + HV_TEST_DEV_SINT_ROUTE_SET_SINT, + HV_TEST_DEV_MSG_CONN_CREATE, + HV_TEST_DEV_MSG_CONN_DESTROY, + HV_TEST_DEV_EVT_CONN_CREATE, + HV_TEST_DEV_EVT_CONN_DESTROY, +}; + +static void sint_route_create(HypervTestDev *dev, + uint8_t vp_index, uint8_t sint) +{ + TestSintRoute *sint_route; + + sint_route = g_new0(TestSintRoute, 1); + assert(sint_route); + + sint_route->vp_index = vp_index; + sint_route->sint = sint; + + sint_route->sint_route = hyperv_sint_route_new(vp_index, sint, NULL, NULL); + assert(sint_route->sint_route); + + QLIST_INSERT_HEAD(&dev->sint_routes, sint_route, le); +} + +static TestSintRoute *sint_route_find(HypervTestDev *dev, + uint8_t vp_index, uint8_t sint) +{ + TestSintRoute *sint_route; + + QLIST_FOREACH(sint_route, &dev->sint_routes, le) { + if (sint_route->vp_index == vp_index && sint_route->sint == sint) { + return sint_route; + } + } + assert(false); + return NULL; +} + +static void sint_route_destroy(HypervTestDev *dev, + uint8_t vp_index, uint8_t sint) +{ + TestSintRoute *sint_route; + + sint_route = sint_route_find(dev, vp_index, sint); + QLIST_REMOVE(sint_route, le); + hyperv_sint_route_unref(sint_route->sint_route); + g_free(sint_route); +} + +static void sint_route_set_sint(HypervTestDev *dev, + uint8_t vp_index, uint8_t sint) +{ + TestSintRoute *sint_route; + + sint_route = sint_route_find(dev, vp_index, sint); + + hyperv_sint_route_set_sint(sint_route->sint_route); +} + +static void msg_retry(void *opaque) +{ + TestMsgConn *conn = opaque; + assert(!hyperv_post_msg(conn->sint_route, &conn->msg)); +} + +static void msg_cb(void *data, int status) +{ + TestMsgConn *conn = data; + + if (!status) { + return; + } + + assert(status == -EAGAIN); + + aio_bh_schedule_oneshot(qemu_get_aio_context(), msg_retry, conn); +} + +static uint16_t msg_handler(const struct hyperv_post_message_input *msg, + void *data) +{ + int ret; + TestMsgConn *conn = data; + + /* post the same message we've got */ + conn->msg.header.message_type = msg->message_type; + assert(msg->payload_size < sizeof(conn->msg.payload)); + conn->msg.header.payload_size = msg->payload_size; + memcpy(&conn->msg.payload, msg->payload, msg->payload_size); + + ret = hyperv_post_msg(conn->sint_route, &conn->msg); + + switch (ret) { + case 0: + return HV_STATUS_SUCCESS; + case -EAGAIN: + return HV_STATUS_INSUFFICIENT_BUFFERS; + default: + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } +} + +static void msg_conn_create(HypervTestDev *dev, uint8_t vp_index, + uint8_t sint, uint8_t conn_id) +{ + TestMsgConn *conn; + + conn = g_new0(TestMsgConn, 1); + assert(conn); + + conn->conn_id = conn_id; + + conn->sint_route = hyperv_sint_route_new(vp_index, sint, msg_cb, conn); + assert(conn->sint_route); + + assert(!hyperv_set_msg_handler(conn->conn_id, msg_handler, conn)); + + QLIST_INSERT_HEAD(&dev->msg_conns, conn, le); +} + +static void msg_conn_destroy(HypervTestDev *dev, uint8_t conn_id) +{ + TestMsgConn *conn; + + QLIST_FOREACH(conn, &dev->msg_conns, le) { + if (conn->conn_id == conn_id) { + QLIST_REMOVE(conn, le); + hyperv_set_msg_handler(conn->conn_id, NULL, NULL); + hyperv_sint_route_unref(conn->sint_route); + g_free(conn); + return; + } + } + assert(false); +} + +static void evt_conn_handler(EventNotifier *notifier) +{ + TestEvtConn *conn = container_of(notifier, TestEvtConn, notifier); + + event_notifier_test_and_clear(notifier); + + /* signal the same event flag we've got */ + assert(!hyperv_set_event_flag(conn->sint_route, conn->conn_id)); +} + +static void evt_conn_create(HypervTestDev *dev, uint8_t vp_index, + uint8_t sint, uint8_t conn_id) +{ + TestEvtConn *conn; + + conn = g_new0(TestEvtConn, 1); + assert(conn); + + conn->conn_id = conn_id; + + conn->sint_route = hyperv_sint_route_new(vp_index, sint, NULL, NULL); + assert(conn->sint_route); + + assert(!event_notifier_init(&conn->notifier, false)); + + event_notifier_set_handler(&conn->notifier, evt_conn_handler); + + assert(!hyperv_set_event_flag_handler(conn_id, &conn->notifier)); + + QLIST_INSERT_HEAD(&dev->evt_conns, conn, le); +} + +static void evt_conn_destroy(HypervTestDev *dev, uint8_t conn_id) +{ + TestEvtConn *conn; + + QLIST_FOREACH(conn, &dev->evt_conns, le) { + if (conn->conn_id == conn_id) { + QLIST_REMOVE(conn, le); + hyperv_set_event_flag_handler(conn->conn_id, NULL); + event_notifier_set_handler(&conn->notifier, NULL); + event_notifier_cleanup(&conn->notifier); + hyperv_sint_route_unref(conn->sint_route); + g_free(conn); + return; + } + } + assert(false); +} + +static uint64_t hv_test_dev_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void hv_test_dev_write(void *opaque, hwaddr addr, uint64_t data, + uint32_t len) +{ + HypervTestDev *dev = HYPERV_TEST_DEV(opaque); + uint8_t sint = data & 0xFF; + uint8_t vp_index = (data >> 8ULL) & 0xFF; + uint8_t ctl = (data >> 16ULL) & 0xFF; + uint8_t conn_id = (data >> 24ULL) & 0xFF; + + switch (ctl) { + case HV_TEST_DEV_SINT_ROUTE_CREATE: + sint_route_create(dev, vp_index, sint); + break; + case HV_TEST_DEV_SINT_ROUTE_DESTROY: + sint_route_destroy(dev, vp_index, sint); + break; + case HV_TEST_DEV_SINT_ROUTE_SET_SINT: + sint_route_set_sint(dev, vp_index, sint); + break; + case HV_TEST_DEV_MSG_CONN_CREATE: + msg_conn_create(dev, vp_index, sint, conn_id); + break; + case HV_TEST_DEV_MSG_CONN_DESTROY: + msg_conn_destroy(dev, conn_id); + break; + case HV_TEST_DEV_EVT_CONN_CREATE: + evt_conn_create(dev, vp_index, sint, conn_id); + break; + case HV_TEST_DEV_EVT_CONN_DESTROY: + evt_conn_destroy(dev, conn_id); + break; + default: + break; + } +} + +static const MemoryRegionOps synic_test_sint_ops = { + .read = hv_test_dev_read, + .write = hv_test_dev_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void hv_test_dev_realizefn(DeviceState *d, Error **errp) +{ + ISADevice *isa = ISA_DEVICE(d); + HypervTestDev *dev = HYPERV_TEST_DEV(d); + MemoryRegion *io = isa_address_space_io(isa); + + QLIST_INIT(&dev->sint_routes); + QLIST_INIT(&dev->msg_conns); + QLIST_INIT(&dev->evt_conns); + memory_region_init_io(&dev->sint_control, OBJECT(dev), + &synic_test_sint_ops, dev, + "hyperv-testdev-ctl", 4); + memory_region_add_subregion(io, 0x3000, &dev->sint_control); +} + +static void hv_test_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->realize = hv_test_dev_realizefn; +} + +static const TypeInfo hv_test_dev_info = { + .name = TYPE_HYPERV_TEST_DEV, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(HypervTestDev), + .class_init = hv_test_dev_class_init, +}; + +static void hv_test_dev_register_types(void) +{ + type_register_static(&hv_test_dev_info); +} +type_init(hv_test_dev_register_types); diff --git a/hw/hyperv/meson.build b/hw/hyperv/meson.build new file mode 100644 index 000000000..1367e2994 --- /dev/null +++ b/hw/hyperv/meson.build @@ -0,0 +1,3 @@ +specific_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c')) +specific_ss.add(when: 'CONFIG_HYPERV_TESTDEV', if_true: files('hyperv_testdev.c')) +specific_ss.add(when: 'CONFIG_VMBUS', if_true: files('vmbus.c')) diff --git a/hw/hyperv/trace-events b/hw/hyperv/trace-events new file mode 100644 index 000000000..b4c35ca8e --- /dev/null +++ b/hw/hyperv/trace-events @@ -0,0 +1,18 @@ +# vmbus.c +vmbus_recv_message(uint32_t type, uint32_t size) "type %d size %d" +vmbus_signal_event(void) "" +vmbus_channel_notify_guest(uint32_t chan_id) "channel #%d" +vmbus_post_msg(uint32_t type, uint32_t size) "type %d size %d" +vmbus_msg_cb(int status) "message status %d" +vmbus_process_incoming_message(uint32_t message_type) "type %d" +vmbus_initiate_contact(uint16_t major, uint16_t minor, uint32_t vcpu, uint64_t monitor_page1, uint64_t monitor_page2, uint64_t interrupt_page) "version %d.%d target vp %d mon pages 0x%"PRIx64",0x%"PRIx64" int page 0x%"PRIx64 +vmbus_send_offer(uint32_t chan_id, void *dev) "channel #%d dev %p" +vmbus_terminate_offers(void) "" +vmbus_gpadl_header(uint32_t gpadl_id, uint16_t num_gfns) "gpadl #%d gfns %d" +vmbus_gpadl_body(uint32_t gpadl_id) "gpadl #%d" +vmbus_gpadl_created(uint32_t gpadl_id) "gpadl #%d" +vmbus_gpadl_teardown(uint32_t gpadl_id) "gpadl #%d" +vmbus_gpadl_torndown(uint32_t gpadl_id) "gpadl #%d" +vmbus_open_channel(uint32_t chan_id, uint32_t gpadl_id, uint32_t target_vp) "channel #%d gpadl #%d target vp %d" +vmbus_channel_open(uint32_t chan_id, uint32_t status) "channel #%d status %d" +vmbus_close_channel(uint32_t chan_id) "channel #%d" diff --git a/hw/hyperv/trace.h b/hw/hyperv/trace.h new file mode 100644 index 000000000..7f2a88881 --- /dev/null +++ b/hw/hyperv/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_hyperv.h" diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c new file mode 100644 index 000000000..dbce3b35f --- /dev/null +++ b/hw/hyperv/vmbus.c @@ -0,0 +1,2785 @@ +/* + * QEMU Hyper-V VMBus + * + * Copyright (c) 2017-2018 Virtuozzo International GmbH. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/hyperv/hyperv.h" +#include "hw/hyperv/vmbus.h" +#include "hw/hyperv/vmbus-bridge.h" +#include "hw/sysbus.h" +#include "cpu.h" +#include "trace.h" + +enum { + VMGPADL_INIT, + VMGPADL_ALIVE, + VMGPADL_TEARINGDOWN, + VMGPADL_TORNDOWN, +}; + +struct VMBusGpadl { + /* GPADL id */ + uint32_t id; + /* associated channel id (rudimentary?) */ + uint32_t child_relid; + + /* number of pages in the GPADL as declared in GPADL_HEADER message */ + uint32_t num_gfns; + /* + * Due to limited message size, GPADL may not fit fully in a single + * GPADL_HEADER message, and is further popluated using GPADL_BODY + * messages. @seen_gfns is the number of pages seen so far; once it + * reaches @num_gfns, the GPADL is ready to use. + */ + uint32_t seen_gfns; + /* array of GFNs (of size @num_gfns once allocated) */ + uint64_t *gfns; + + uint8_t state; + + QTAILQ_ENTRY(VMBusGpadl) link; + VMBus *vmbus; + unsigned refcount; +}; + +/* + * Wrap sequential read from / write to GPADL. + */ +typedef struct GpadlIter { + VMBusGpadl *gpadl; + AddressSpace *as; + DMADirection dir; + /* offset into GPADL where the next i/o will be performed */ + uint32_t off; + /* + * Cached mapping of the currently accessed page, up to page boundary. + * Updated lazily on i/o. + * Note: MemoryRegionCache can not be used here because pages in the GPADL + * are non-contiguous and may belong to different memory regions. + */ + void *map; + /* offset after last i/o (i.e. not affected by seek) */ + uint32_t last_off; + /* + * Indicator that the iterator is active and may have a cached mapping. + * Allows to enforce bracketing of all i/o (which may create cached + * mappings) and thus exclude mapping leaks. + */ + bool active; +} GpadlIter; + +/* + * Ring buffer. There are two of them, sitting in the same GPADL, for each + * channel. + * Each ring buffer consists of a set of pages, with the first page containing + * the ring buffer header, and the remaining pages being for data packets. + */ +typedef struct VMBusRingBufCommon { + AddressSpace *as; + /* GPA of the ring buffer header */ + dma_addr_t rb_addr; + /* start and length of the ring buffer data area within GPADL */ + uint32_t base; + uint32_t len; + + GpadlIter iter; +} VMBusRingBufCommon; + +typedef struct VMBusSendRingBuf { + VMBusRingBufCommon common; + /* current write index, to be committed at the end of send */ + uint32_t wr_idx; + /* write index at the start of send */ + uint32_t last_wr_idx; + /* space to be requested from the guest */ + uint32_t wanted; + /* space reserved for planned sends */ + uint32_t reserved; + /* last seen read index */ + uint32_t last_seen_rd_idx; +} VMBusSendRingBuf; + +typedef struct VMBusRecvRingBuf { + VMBusRingBufCommon common; + /* current read index, to be committed at the end of receive */ + uint32_t rd_idx; + /* read index at the start of receive */ + uint32_t last_rd_idx; + /* last seen write index */ + uint32_t last_seen_wr_idx; +} VMBusRecvRingBuf; + + +enum { + VMOFFER_INIT, + VMOFFER_SENDING, + VMOFFER_SENT, +}; + +enum { + VMCHAN_INIT, + VMCHAN_OPENING, + VMCHAN_OPEN, +}; + +struct VMBusChannel { + VMBusDevice *dev; + + /* channel id */ + uint32_t id; + /* + * subchannel index within the device; subchannel #0 is "primary" and + * always exists + */ + uint16_t subchan_idx; + uint32_t open_id; + /* VP_INDEX of the vCPU to notify with (synthetic) interrupts */ + uint32_t target_vp; + /* GPADL id to use for the ring buffers */ + uint32_t ringbuf_gpadl; + /* start (in pages) of the send ring buffer within @ringbuf_gpadl */ + uint32_t ringbuf_send_offset; + + uint8_t offer_state; + uint8_t state; + bool is_open; + + /* main device worker; copied from the device class */ + VMBusChannelNotifyCb notify_cb; + /* + * guest->host notifications, either sent directly or dispatched via + * interrupt page (older VMBus) + */ + EventNotifier notifier; + + VMBus *vmbus; + /* + * SINT route to signal with host->guest notifications; may be shared with + * the main VMBus SINT route + */ + HvSintRoute *notify_route; + VMBusGpadl *gpadl; + + VMBusSendRingBuf send_ringbuf; + VMBusRecvRingBuf recv_ringbuf; + + QTAILQ_ENTRY(VMBusChannel) link; +}; + +/* + * Hyper-V spec mandates that every message port has 16 buffers, which means + * that the guest can post up to this many messages without blocking. + * Therefore a queue for incoming messages has to be provided. + * For outgoing (i.e. host->guest) messages there's no queue; the VMBus just + * doesn't transition to a new state until the message is known to have been + * successfully delivered to the respective SynIC message slot. + */ +#define HV_MSG_QUEUE_LEN 16 + +/* Hyper-V devices never use channel #0. Must be something special. */ +#define VMBUS_FIRST_CHANID 1 +/* Each channel occupies one bit within a single event page sint slot. */ +#define VMBUS_CHANID_COUNT (HV_EVENT_FLAGS_COUNT - VMBUS_FIRST_CHANID) +/* Leave a few connection numbers for other purposes. */ +#define VMBUS_CHAN_CONNECTION_OFFSET 16 + +/* + * Since the success or failure of sending a message is reported + * asynchronously, the VMBus state machine has effectively two entry points: + * vmbus_run and vmbus_msg_cb (the latter is called when the host->guest + * message delivery status becomes known). Both are run as oneshot BHs on the + * main aio context, ensuring serialization. + */ +enum { + VMBUS_LISTEN, + VMBUS_HANDSHAKE, + VMBUS_OFFER, + VMBUS_CREATE_GPADL, + VMBUS_TEARDOWN_GPADL, + VMBUS_OPEN_CHANNEL, + VMBUS_UNLOAD, + VMBUS_STATE_MAX +}; + +struct VMBus { + BusState parent; + + uint8_t state; + /* protection against recursive aio_poll (see vmbus_run) */ + bool in_progress; + /* whether there's a message being delivered to the guest */ + bool msg_in_progress; + uint32_t version; + /* VP_INDEX of the vCPU to send messages and interrupts to */ + uint32_t target_vp; + HvSintRoute *sint_route; + /* + * interrupt page for older protocol versions; newer ones use SynIC event + * flags directly + */ + hwaddr int_page_gpa; + + DECLARE_BITMAP(chanid_bitmap, VMBUS_CHANID_COUNT); + + /* incoming message queue */ + struct hyperv_post_message_input rx_queue[HV_MSG_QUEUE_LEN]; + uint8_t rx_queue_head; + uint8_t rx_queue_size; + QemuMutex rx_queue_lock; + + QTAILQ_HEAD(, VMBusGpadl) gpadl_list; + QTAILQ_HEAD(, VMBusChannel) channel_list; + + /* + * guest->host notifications for older VMBus, to be dispatched via + * interrupt page + */ + EventNotifier notifier; +}; + +static bool gpadl_full(VMBusGpadl *gpadl) +{ + return gpadl->seen_gfns == gpadl->num_gfns; +} + +static VMBusGpadl *create_gpadl(VMBus *vmbus, uint32_t id, + uint32_t child_relid, uint32_t num_gfns) +{ + VMBusGpadl *gpadl = g_new0(VMBusGpadl, 1); + + gpadl->id = id; + gpadl->child_relid = child_relid; + gpadl->num_gfns = num_gfns; + gpadl->gfns = g_new(uint64_t, num_gfns); + QTAILQ_INSERT_HEAD(&vmbus->gpadl_list, gpadl, link); + gpadl->vmbus = vmbus; + gpadl->refcount = 1; + return gpadl; +} + +static void free_gpadl(VMBusGpadl *gpadl) +{ + QTAILQ_REMOVE(&gpadl->vmbus->gpadl_list, gpadl, link); + g_free(gpadl->gfns); + g_free(gpadl); +} + +static VMBusGpadl *find_gpadl(VMBus *vmbus, uint32_t gpadl_id) +{ + VMBusGpadl *gpadl; + QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) { + if (gpadl->id == gpadl_id) { + return gpadl; + } + } + return NULL; +} + +VMBusGpadl *vmbus_get_gpadl(VMBusChannel *chan, uint32_t gpadl_id) +{ + VMBusGpadl *gpadl = find_gpadl(chan->vmbus, gpadl_id); + if (!gpadl || !gpadl_full(gpadl)) { + return NULL; + } + gpadl->refcount++; + return gpadl; +} + +void vmbus_put_gpadl(VMBusGpadl *gpadl) +{ + if (!gpadl) { + return; + } + if (--gpadl->refcount) { + return; + } + free_gpadl(gpadl); +} + +uint32_t vmbus_gpadl_len(VMBusGpadl *gpadl) +{ + return gpadl->num_gfns * TARGET_PAGE_SIZE; +} + +static void gpadl_iter_init(GpadlIter *iter, VMBusGpadl *gpadl, + AddressSpace *as, DMADirection dir) +{ + iter->gpadl = gpadl; + iter->as = as; + iter->dir = dir; + iter->active = false; +} + +static inline void gpadl_iter_cache_unmap(GpadlIter *iter) +{ + uint32_t map_start_in_page = (uintptr_t)iter->map & ~TARGET_PAGE_MASK; + uint32_t io_end_in_page = ((iter->last_off - 1) & ~TARGET_PAGE_MASK) + 1; + + /* mapping is only done to do non-zero amount of i/o */ + assert(iter->last_off > 0); + assert(map_start_in_page < io_end_in_page); + + dma_memory_unmap(iter->as, iter->map, TARGET_PAGE_SIZE - map_start_in_page, + iter->dir, io_end_in_page - map_start_in_page); +} + +/* + * Copy exactly @len bytes between the GPADL pointed to by @iter and @buf. + * The direction of the copy is determined by @iter->dir. + * The caller must ensure the operation overflows neither @buf nor the GPADL + * (there's an assert for the latter). + * Reuse the currently mapped page in the GPADL if possible. + */ +static ssize_t gpadl_iter_io(GpadlIter *iter, void *buf, uint32_t len) +{ + ssize_t ret = len; + + assert(iter->active); + + while (len) { + uint32_t off_in_page = iter->off & ~TARGET_PAGE_MASK; + uint32_t pgleft = TARGET_PAGE_SIZE - off_in_page; + uint32_t cplen = MIN(pgleft, len); + void *p; + + /* try to reuse the cached mapping */ + if (iter->map) { + uint32_t map_start_in_page = + (uintptr_t)iter->map & ~TARGET_PAGE_MASK; + uint32_t off_base = iter->off & ~TARGET_PAGE_MASK; + uint32_t mapped_base = (iter->last_off - 1) & ~TARGET_PAGE_MASK; + if (off_base != mapped_base || off_in_page < map_start_in_page) { + gpadl_iter_cache_unmap(iter); + iter->map = NULL; + } + } + + if (!iter->map) { + dma_addr_t maddr; + dma_addr_t mlen = pgleft; + uint32_t idx = iter->off >> TARGET_PAGE_BITS; + assert(idx < iter->gpadl->num_gfns); + + maddr = (iter->gpadl->gfns[idx] << TARGET_PAGE_BITS) | off_in_page; + + iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir); + if (mlen != pgleft) { + dma_memory_unmap(iter->as, iter->map, mlen, iter->dir, 0); + iter->map = NULL; + return -EFAULT; + } + } + + p = (void *)(uintptr_t)(((uintptr_t)iter->map & TARGET_PAGE_MASK) | + off_in_page); + if (iter->dir == DMA_DIRECTION_FROM_DEVICE) { + memcpy(p, buf, cplen); + } else { + memcpy(buf, p, cplen); + } + + buf += cplen; + len -= cplen; + iter->off += cplen; + iter->last_off = iter->off; + } + + return ret; +} + +/* + * Position the iterator @iter at new offset @new_off. + * If this results in the cached mapping being unusable with the new offset, + * unmap it. + */ +static inline void gpadl_iter_seek(GpadlIter *iter, uint32_t new_off) +{ + assert(iter->active); + iter->off = new_off; +} + +/* + * Start a series of i/o on the GPADL. + * After this i/o and seek operations on @iter become legal. + */ +static inline void gpadl_iter_start_io(GpadlIter *iter) +{ + assert(!iter->active); + /* mapping is cached lazily on i/o */ + iter->map = NULL; + iter->active = true; +} + +/* + * End the eariler started series of i/o on the GPADL and release the cached + * mapping if any. + */ +static inline void gpadl_iter_end_io(GpadlIter *iter) +{ + assert(iter->active); + + if (iter->map) { + gpadl_iter_cache_unmap(iter); + } + + iter->active = false; +} + +static void vmbus_resched(VMBus *vmbus); +static void vmbus_msg_cb(void *data, int status); + +ssize_t vmbus_iov_to_gpadl(VMBusChannel *chan, VMBusGpadl *gpadl, uint32_t off, + const struct iovec *iov, size_t iov_cnt) +{ + GpadlIter iter; + size_t i; + ssize_t ret = 0; + + gpadl_iter_init(&iter, gpadl, chan->dev->dma_as, + DMA_DIRECTION_FROM_DEVICE); + gpadl_iter_start_io(&iter); + gpadl_iter_seek(&iter, off); + for (i = 0; i < iov_cnt; i++) { + ret = gpadl_iter_io(&iter, iov[i].iov_base, iov[i].iov_len); + if (ret < 0) { + goto out; + } + } +out: + gpadl_iter_end_io(&iter); + return ret; +} + +int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov, + unsigned iov_cnt, size_t len, size_t off) +{ + int ret_cnt = 0, ret; + unsigned i; + QEMUSGList *sgl = &req->sgl; + ScatterGatherEntry *sg = sgl->sg; + + for (i = 0; i < sgl->nsg; i++) { + if (sg[i].len > off) { + break; + } + off -= sg[i].len; + } + for (; len && i < sgl->nsg; i++) { + dma_addr_t mlen = MIN(sg[i].len - off, len); + dma_addr_t addr = sg[i].base + off; + len -= mlen; + off = 0; + + for (; mlen; ret_cnt++) { + dma_addr_t l = mlen; + dma_addr_t a = addr; + + if (ret_cnt == iov_cnt) { + ret = -ENOBUFS; + goto err; + } + + iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir); + if (!l) { + ret = -EFAULT; + goto err; + } + iov[ret_cnt].iov_len = l; + addr += l; + mlen -= l; + } + } + + return ret_cnt; +err: + vmbus_unmap_sgl(req, dir, iov, ret_cnt, 0); + return ret; +} + +void vmbus_unmap_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov, + unsigned iov_cnt, size_t accessed) +{ + QEMUSGList *sgl = &req->sgl; + unsigned i; + + for (i = 0; i < iov_cnt; i++) { + size_t acsd = MIN(accessed, iov[i].iov_len); + dma_memory_unmap(sgl->as, iov[i].iov_base, iov[i].iov_len, dir, acsd); + accessed -= acsd; + } +} + +static const VMStateDescription vmstate_gpadl = { + .name = "vmbus/gpadl", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(id, VMBusGpadl), + VMSTATE_UINT32(child_relid, VMBusGpadl), + VMSTATE_UINT32(num_gfns, VMBusGpadl), + VMSTATE_UINT32(seen_gfns, VMBusGpadl), + VMSTATE_VARRAY_UINT32_ALLOC(gfns, VMBusGpadl, num_gfns, 0, + vmstate_info_uint64, uint64_t), + VMSTATE_UINT8(state, VMBusGpadl), + VMSTATE_END_OF_LIST() + } +}; + +/* + * Wrap the index into a ring buffer of @len bytes. + * @idx is assumed not to exceed twice the size of the ringbuffer, so only + * single wraparound is considered. + */ +static inline uint32_t rb_idx_wrap(uint32_t idx, uint32_t len) +{ + if (idx >= len) { + idx -= len; + } + return idx; +} + +/* + * Circular difference between two indices into a ring buffer of @len bytes. + * @allow_catchup - whether @idx1 may catch up @idx2; e.g. read index may catch + * up write index but not vice versa. + */ +static inline uint32_t rb_idx_delta(uint32_t idx1, uint32_t idx2, uint32_t len, + bool allow_catchup) +{ + return rb_idx_wrap(idx2 + len - idx1 - !allow_catchup, len); +} + +static vmbus_ring_buffer *ringbuf_map_hdr(VMBusRingBufCommon *ringbuf) +{ + vmbus_ring_buffer *rb; + dma_addr_t mlen = sizeof(*rb); + + rb = dma_memory_map(ringbuf->as, ringbuf->rb_addr, &mlen, + DMA_DIRECTION_FROM_DEVICE); + if (mlen != sizeof(*rb)) { + dma_memory_unmap(ringbuf->as, rb, mlen, + DMA_DIRECTION_FROM_DEVICE, 0); + return NULL; + } + return rb; +} + +static void ringbuf_unmap_hdr(VMBusRingBufCommon *ringbuf, + vmbus_ring_buffer *rb, bool dirty) +{ + assert(rb); + + dma_memory_unmap(ringbuf->as, rb, sizeof(*rb), DMA_DIRECTION_FROM_DEVICE, + dirty ? sizeof(*rb) : 0); +} + +static void ringbuf_init_common(VMBusRingBufCommon *ringbuf, VMBusGpadl *gpadl, + AddressSpace *as, DMADirection dir, + uint32_t begin, uint32_t end) +{ + ringbuf->as = as; + ringbuf->rb_addr = gpadl->gfns[begin] << TARGET_PAGE_BITS; + ringbuf->base = (begin + 1) << TARGET_PAGE_BITS; + ringbuf->len = (end - begin - 1) << TARGET_PAGE_BITS; + gpadl_iter_init(&ringbuf->iter, gpadl, as, dir); +} + +static int ringbufs_init(VMBusChannel *chan) +{ + vmbus_ring_buffer *rb; + VMBusSendRingBuf *send_ringbuf = &chan->send_ringbuf; + VMBusRecvRingBuf *recv_ringbuf = &chan->recv_ringbuf; + + if (chan->ringbuf_send_offset <= 1 || + chan->gpadl->num_gfns <= chan->ringbuf_send_offset + 1) { + return -EINVAL; + } + + ringbuf_init_common(&recv_ringbuf->common, chan->gpadl, chan->dev->dma_as, + DMA_DIRECTION_TO_DEVICE, 0, chan->ringbuf_send_offset); + ringbuf_init_common(&send_ringbuf->common, chan->gpadl, chan->dev->dma_as, + DMA_DIRECTION_FROM_DEVICE, chan->ringbuf_send_offset, + chan->gpadl->num_gfns); + send_ringbuf->wanted = 0; + send_ringbuf->reserved = 0; + + rb = ringbuf_map_hdr(&recv_ringbuf->common); + if (!rb) { + return -EFAULT; + } + recv_ringbuf->rd_idx = recv_ringbuf->last_rd_idx = rb->read_index; + ringbuf_unmap_hdr(&recv_ringbuf->common, rb, false); + + rb = ringbuf_map_hdr(&send_ringbuf->common); + if (!rb) { + return -EFAULT; + } + send_ringbuf->wr_idx = send_ringbuf->last_wr_idx = rb->write_index; + send_ringbuf->last_seen_rd_idx = rb->read_index; + rb->feature_bits |= VMBUS_RING_BUFFER_FEAT_PENDING_SZ; + ringbuf_unmap_hdr(&send_ringbuf->common, rb, true); + + if (recv_ringbuf->rd_idx >= recv_ringbuf->common.len || + send_ringbuf->wr_idx >= send_ringbuf->common.len) { + return -EOVERFLOW; + } + + return 0; +} + +/* + * Perform io between the GPADL-backed ringbuffer @ringbuf and @buf, wrapping + * around if needed. + * @len is assumed not to exceed the size of the ringbuffer, so only single + * wraparound is considered. + */ +static ssize_t ringbuf_io(VMBusRingBufCommon *ringbuf, void *buf, uint32_t len) +{ + ssize_t ret1 = 0, ret2 = 0; + uint32_t remain = ringbuf->len + ringbuf->base - ringbuf->iter.off; + + if (len >= remain) { + ret1 = gpadl_iter_io(&ringbuf->iter, buf, remain); + if (ret1 < 0) { + return ret1; + } + gpadl_iter_seek(&ringbuf->iter, ringbuf->base); + buf += remain; + len -= remain; + } + ret2 = gpadl_iter_io(&ringbuf->iter, buf, len); + if (ret2 < 0) { + return ret2; + } + return ret1 + ret2; +} + +/* + * Position the circular iterator within @ringbuf to offset @new_off, wrapping + * around if needed. + * @new_off is assumed not to exceed twice the size of the ringbuffer, so only + * single wraparound is considered. + */ +static inline void ringbuf_seek(VMBusRingBufCommon *ringbuf, uint32_t new_off) +{ + gpadl_iter_seek(&ringbuf->iter, + ringbuf->base + rb_idx_wrap(new_off, ringbuf->len)); +} + +static inline uint32_t ringbuf_tell(VMBusRingBufCommon *ringbuf) +{ + return ringbuf->iter.off - ringbuf->base; +} + +static inline void ringbuf_start_io(VMBusRingBufCommon *ringbuf) +{ + gpadl_iter_start_io(&ringbuf->iter); +} + +static inline void ringbuf_end_io(VMBusRingBufCommon *ringbuf) +{ + gpadl_iter_end_io(&ringbuf->iter); +} + +VMBusDevice *vmbus_channel_device(VMBusChannel *chan) +{ + return chan->dev; +} + +VMBusChannel *vmbus_device_channel(VMBusDevice *dev, uint32_t chan_idx) +{ + if (chan_idx >= dev->num_channels) { + return NULL; + } + return &dev->channels[chan_idx]; +} + +uint32_t vmbus_channel_idx(VMBusChannel *chan) +{ + return chan - chan->dev->channels; +} + +void vmbus_channel_notify_host(VMBusChannel *chan) +{ + event_notifier_set(&chan->notifier); +} + +bool vmbus_channel_is_open(VMBusChannel *chan) +{ + return chan->is_open; +} + +/* + * Notify the guest side about the data to work on in the channel ring buffer. + * The notification is done by signaling a dedicated per-channel SynIC event + * flag (more recent guests) or setting a bit in the interrupt page and firing + * the VMBus SINT (older guests). + */ +static int vmbus_channel_notify_guest(VMBusChannel *chan) +{ + int res = 0; + unsigned long *int_map, mask; + unsigned idx; + hwaddr addr = chan->vmbus->int_page_gpa; + hwaddr len = TARGET_PAGE_SIZE / 2, dirty = 0; + + trace_vmbus_channel_notify_guest(chan->id); + + if (!addr) { + return hyperv_set_event_flag(chan->notify_route, chan->id); + } + + int_map = cpu_physical_memory_map(addr, &len, 1); + if (len != TARGET_PAGE_SIZE / 2) { + res = -ENXIO; + goto unmap; + } + + idx = BIT_WORD(chan->id); + mask = BIT_MASK(chan->id); + if ((qatomic_fetch_or(&int_map[idx], mask) & mask) != mask) { + res = hyperv_sint_route_set_sint(chan->notify_route); + dirty = len; + } + +unmap: + cpu_physical_memory_unmap(int_map, len, 1, dirty); + return res; +} + +#define VMBUS_PKT_TRAILER sizeof(uint64_t) + +static uint32_t vmbus_pkt_hdr_set_offsets(vmbus_packet_hdr *hdr, + uint32_t desclen, uint32_t msglen) +{ + hdr->offset_qwords = sizeof(*hdr) / sizeof(uint64_t) + + DIV_ROUND_UP(desclen, sizeof(uint64_t)); + hdr->len_qwords = hdr->offset_qwords + + DIV_ROUND_UP(msglen, sizeof(uint64_t)); + return hdr->len_qwords * sizeof(uint64_t) + VMBUS_PKT_TRAILER; +} + +/* + * Simplified ring buffer operation with paired barriers annotations in the + * producer and consumer loops: + * + * producer * consumer + * ~~~~~~~~ * ~~~~~~~~ + * write pending_send_sz * read write_index + * smp_mb [A] * smp_mb [C] + * read read_index * read packet + * smp_mb [B] * read/write out-of-band data + * read/write out-of-band data * smp_mb [B] + * write packet * write read_index + * smp_mb [C] * smp_mb [A] + * write write_index * read pending_send_sz + * smp_wmb [D] * smp_rmb [D] + * write pending_send_sz * read write_index + * ... * ... + */ + +static inline uint32_t ringbuf_send_avail(VMBusSendRingBuf *ringbuf) +{ + /* don't trust guest data */ + if (ringbuf->last_seen_rd_idx >= ringbuf->common.len) { + return 0; + } + return rb_idx_delta(ringbuf->wr_idx, ringbuf->last_seen_rd_idx, + ringbuf->common.len, false); +} + +static ssize_t ringbuf_send_update_idx(VMBusChannel *chan) +{ + VMBusSendRingBuf *ringbuf = &chan->send_ringbuf; + vmbus_ring_buffer *rb; + uint32_t written; + + written = rb_idx_delta(ringbuf->last_wr_idx, ringbuf->wr_idx, + ringbuf->common.len, true); + if (!written) { + return 0; + } + + rb = ringbuf_map_hdr(&ringbuf->common); + if (!rb) { + return -EFAULT; + } + + ringbuf->reserved -= written; + + /* prevent reorder with the data operation and packet write */ + smp_mb(); /* barrier pair [C] */ + rb->write_index = ringbuf->wr_idx; + + /* + * If the producer earlier indicated that it wants to be notified when the + * consumer frees certain amount of space in the ring buffer, that amount + * is reduced by the size of the completed write. + */ + if (ringbuf->wanted) { + /* otherwise reservation would fail */ + assert(ringbuf->wanted < written); + ringbuf->wanted -= written; + /* prevent reorder with write_index write */ + smp_wmb(); /* barrier pair [D] */ + rb->pending_send_sz = ringbuf->wanted; + } + + /* prevent reorder with write_index or pending_send_sz write */ + smp_mb(); /* barrier pair [A] */ + ringbuf->last_seen_rd_idx = rb->read_index; + + /* + * The consumer may have missed the reduction of pending_send_sz and skip + * notification, so re-check the blocking condition, and, if it's no longer + * true, ensure processing another iteration by simulating consumer's + * notification. + */ + if (ringbuf_send_avail(ringbuf) >= ringbuf->wanted) { + vmbus_channel_notify_host(chan); + } + + /* skip notification by consumer's request */ + if (rb->interrupt_mask) { + goto out; + } + + /* + * The consumer hasn't caught up with the producer's previous state so it's + * not blocked. + * (last_seen_rd_idx comes from the guest but it's safe to use w/o + * validation here as it only affects notification.) + */ + if (rb_idx_delta(ringbuf->last_seen_rd_idx, ringbuf->wr_idx, + ringbuf->common.len, true) > written) { + goto out; + } + + vmbus_channel_notify_guest(chan); +out: + ringbuf_unmap_hdr(&ringbuf->common, rb, true); + ringbuf->last_wr_idx = ringbuf->wr_idx; + return written; +} + +int vmbus_channel_reserve(VMBusChannel *chan, + uint32_t desclen, uint32_t msglen) +{ + VMBusSendRingBuf *ringbuf = &chan->send_ringbuf; + vmbus_ring_buffer *rb = NULL; + vmbus_packet_hdr hdr; + uint32_t needed = ringbuf->reserved + + vmbus_pkt_hdr_set_offsets(&hdr, desclen, msglen); + + /* avoid touching the guest memory if possible */ + if (likely(needed <= ringbuf_send_avail(ringbuf))) { + goto success; + } + + rb = ringbuf_map_hdr(&ringbuf->common); + if (!rb) { + return -EFAULT; + } + + /* fetch read index from guest memory and try again */ + ringbuf->last_seen_rd_idx = rb->read_index; + + if (likely(needed <= ringbuf_send_avail(ringbuf))) { + goto success; + } + + rb->pending_send_sz = needed; + + /* + * The consumer may have made progress and freed up some space before + * seeing updated pending_send_sz, so re-read read_index (preventing + * reorder with the pending_send_sz write) and try again. + */ + smp_mb(); /* barrier pair [A] */ + ringbuf->last_seen_rd_idx = rb->read_index; + + if (needed > ringbuf_send_avail(ringbuf)) { + goto out; + } + +success: + ringbuf->reserved = needed; + needed = 0; + + /* clear pending_send_sz if it was set */ + if (ringbuf->wanted) { + if (!rb) { + rb = ringbuf_map_hdr(&ringbuf->common); + if (!rb) { + /* failure to clear pending_send_sz is non-fatal */ + goto out; + } + } + + rb->pending_send_sz = 0; + } + + /* prevent reorder of the following data operation with read_index read */ + smp_mb(); /* barrier pair [B] */ + +out: + if (rb) { + ringbuf_unmap_hdr(&ringbuf->common, rb, ringbuf->wanted == needed); + } + ringbuf->wanted = needed; + return needed ? -ENOSPC : 0; +} + +ssize_t vmbus_channel_send(VMBusChannel *chan, uint16_t pkt_type, + void *desc, uint32_t desclen, + void *msg, uint32_t msglen, + bool need_comp, uint64_t transaction_id) +{ + ssize_t ret = 0; + vmbus_packet_hdr hdr; + uint32_t totlen; + VMBusSendRingBuf *ringbuf = &chan->send_ringbuf; + + if (!vmbus_channel_is_open(chan)) { + return -EINVAL; + } + + totlen = vmbus_pkt_hdr_set_offsets(&hdr, desclen, msglen); + hdr.type = pkt_type; + hdr.flags = need_comp ? VMBUS_PACKET_FLAG_REQUEST_COMPLETION : 0; + hdr.transaction_id = transaction_id; + + assert(totlen <= ringbuf->reserved); + + ringbuf_start_io(&ringbuf->common); + ringbuf_seek(&ringbuf->common, ringbuf->wr_idx); + ret = ringbuf_io(&ringbuf->common, &hdr, sizeof(hdr)); + if (ret < 0) { + goto out; + } + if (desclen) { + assert(desc); + ret = ringbuf_io(&ringbuf->common, desc, desclen); + if (ret < 0) { + goto out; + } + ringbuf_seek(&ringbuf->common, + ringbuf->wr_idx + hdr.offset_qwords * sizeof(uint64_t)); + } + ret = ringbuf_io(&ringbuf->common, msg, msglen); + if (ret < 0) { + goto out; + } + ringbuf_seek(&ringbuf->common, ringbuf->wr_idx + totlen); + ringbuf->wr_idx = ringbuf_tell(&ringbuf->common); + ret = 0; +out: + ringbuf_end_io(&ringbuf->common); + if (ret) { + return ret; + } + return ringbuf_send_update_idx(chan); +} + +ssize_t vmbus_channel_send_completion(VMBusChanReq *req, + void *msg, uint32_t msglen) +{ + assert(req->need_comp); + return vmbus_channel_send(req->chan, VMBUS_PACKET_COMP, NULL, 0, + msg, msglen, false, req->transaction_id); +} + +static int sgl_from_gpa_ranges(QEMUSGList *sgl, VMBusDevice *dev, + VMBusRingBufCommon *ringbuf, uint32_t len) +{ + int ret; + vmbus_pkt_gpa_direct hdr; + hwaddr curaddr = 0; + hwaddr curlen = 0; + int num; + + if (len < sizeof(hdr)) { + return -EIO; + } + ret = ringbuf_io(ringbuf, &hdr, sizeof(hdr)); + if (ret < 0) { + return ret; + } + len -= sizeof(hdr); + + num = (len - hdr.rangecount * sizeof(vmbus_gpa_range)) / sizeof(uint64_t); + if (num < 0) { + return -EIO; + } + qemu_sglist_init(sgl, DEVICE(dev), num, ringbuf->as); + + for (; hdr.rangecount; hdr.rangecount--) { + vmbus_gpa_range range; + + if (len < sizeof(range)) { + goto eio; + } + ret = ringbuf_io(ringbuf, &range, sizeof(range)); + if (ret < 0) { + goto err; + } + len -= sizeof(range); + + if (range.byte_offset & TARGET_PAGE_MASK) { + goto eio; + } + + for (; range.byte_count; range.byte_offset = 0) { + uint64_t paddr; + uint32_t plen = MIN(range.byte_count, + TARGET_PAGE_SIZE - range.byte_offset); + + if (len < sizeof(uint64_t)) { + goto eio; + } + ret = ringbuf_io(ringbuf, &paddr, sizeof(paddr)); + if (ret < 0) { + goto err; + } + len -= sizeof(uint64_t); + paddr <<= TARGET_PAGE_BITS; + paddr |= range.byte_offset; + range.byte_count -= plen; + + if (curaddr + curlen == paddr) { + /* consecutive fragments - join */ + curlen += plen; + } else { + if (curlen) { + qemu_sglist_add(sgl, curaddr, curlen); + } + + curaddr = paddr; + curlen = plen; + } + } + } + + if (curlen) { + qemu_sglist_add(sgl, curaddr, curlen); + } + + return 0; +eio: + ret = -EIO; +err: + qemu_sglist_destroy(sgl); + return ret; +} + +static VMBusChanReq *vmbus_alloc_req(VMBusChannel *chan, + uint32_t size, uint16_t pkt_type, + uint32_t msglen, uint64_t transaction_id, + bool need_comp) +{ + VMBusChanReq *req; + uint32_t msgoff = QEMU_ALIGN_UP(size, __alignof__(*req->msg)); + uint32_t totlen = msgoff + msglen; + + req = g_malloc0(totlen); + req->chan = chan; + req->pkt_type = pkt_type; + req->msg = (void *)req + msgoff; + req->msglen = msglen; + req->transaction_id = transaction_id; + req->need_comp = need_comp; + return req; +} + +int vmbus_channel_recv_start(VMBusChannel *chan) +{ + VMBusRecvRingBuf *ringbuf = &chan->recv_ringbuf; + vmbus_ring_buffer *rb; + + rb = ringbuf_map_hdr(&ringbuf->common); + if (!rb) { + return -EFAULT; + } + ringbuf->last_seen_wr_idx = rb->write_index; + ringbuf_unmap_hdr(&ringbuf->common, rb, false); + + if (ringbuf->last_seen_wr_idx >= ringbuf->common.len) { + return -EOVERFLOW; + } + + /* prevent reorder of the following data operation with write_index read */ + smp_mb(); /* barrier pair [C] */ + return 0; +} + +void *vmbus_channel_recv_peek(VMBusChannel *chan, uint32_t size) +{ + VMBusRecvRingBuf *ringbuf = &chan->recv_ringbuf; + vmbus_packet_hdr hdr = {}; + VMBusChanReq *req; + uint32_t avail; + uint32_t totlen, pktlen, msglen, msgoff, desclen; + + assert(size >= sizeof(*req)); + + /* safe as last_seen_wr_idx is validated in vmbus_channel_recv_start */ + avail = rb_idx_delta(ringbuf->rd_idx, ringbuf->last_seen_wr_idx, + ringbuf->common.len, true); + if (avail < sizeof(hdr)) { + return NULL; + } + + ringbuf_seek(&ringbuf->common, ringbuf->rd_idx); + if (ringbuf_io(&ringbuf->common, &hdr, sizeof(hdr)) < 0) { + return NULL; + } + + pktlen = hdr.len_qwords * sizeof(uint64_t); + totlen = pktlen + VMBUS_PKT_TRAILER; + if (totlen > avail) { + return NULL; + } + + msgoff = hdr.offset_qwords * sizeof(uint64_t); + if (msgoff > pktlen || msgoff < sizeof(hdr)) { + error_report("%s: malformed packet: %u %u", __func__, msgoff, pktlen); + return NULL; + } + + msglen = pktlen - msgoff; + + req = vmbus_alloc_req(chan, size, hdr.type, msglen, hdr.transaction_id, + hdr.flags & VMBUS_PACKET_FLAG_REQUEST_COMPLETION); + + switch (hdr.type) { + case VMBUS_PACKET_DATA_USING_GPA_DIRECT: + desclen = msgoff - sizeof(hdr); + if (sgl_from_gpa_ranges(&req->sgl, chan->dev, &ringbuf->common, + desclen) < 0) { + error_report("%s: failed to convert GPA ranges to SGL", __func__); + goto free_req; + } + break; + case VMBUS_PACKET_DATA_INBAND: + case VMBUS_PACKET_COMP: + break; + default: + error_report("%s: unexpected msg type: %x", __func__, hdr.type); + goto free_req; + } + + ringbuf_seek(&ringbuf->common, ringbuf->rd_idx + msgoff); + if (ringbuf_io(&ringbuf->common, req->msg, msglen) < 0) { + goto free_req; + } + ringbuf_seek(&ringbuf->common, ringbuf->rd_idx + totlen); + + return req; +free_req: + vmbus_free_req(req); + return NULL; +} + +void vmbus_channel_recv_pop(VMBusChannel *chan) +{ + VMBusRecvRingBuf *ringbuf = &chan->recv_ringbuf; + ringbuf->rd_idx = ringbuf_tell(&ringbuf->common); +} + +ssize_t vmbus_channel_recv_done(VMBusChannel *chan) +{ + VMBusRecvRingBuf *ringbuf = &chan->recv_ringbuf; + vmbus_ring_buffer *rb; + uint32_t read; + + read = rb_idx_delta(ringbuf->last_rd_idx, ringbuf->rd_idx, + ringbuf->common.len, true); + if (!read) { + return 0; + } + + rb = ringbuf_map_hdr(&ringbuf->common); + if (!rb) { + return -EFAULT; + } + + /* prevent reorder with the data operation and packet read */ + smp_mb(); /* barrier pair [B] */ + rb->read_index = ringbuf->rd_idx; + + /* prevent reorder of the following pending_send_sz read */ + smp_mb(); /* barrier pair [A] */ + + if (rb->interrupt_mask) { + goto out; + } + + if (rb->feature_bits & VMBUS_RING_BUFFER_FEAT_PENDING_SZ) { + uint32_t wr_idx, wr_avail; + uint32_t wanted = rb->pending_send_sz; + + if (!wanted) { + goto out; + } + + /* prevent reorder with pending_send_sz read */ + smp_rmb(); /* barrier pair [D] */ + wr_idx = rb->write_index; + + wr_avail = rb_idx_delta(wr_idx, ringbuf->rd_idx, ringbuf->common.len, + true); + + /* the producer wasn't blocked on the consumer state */ + if (wr_avail >= read + wanted) { + goto out; + } + /* there's not enough space for the producer to make progress */ + if (wr_avail < wanted) { + goto out; + } + } + + vmbus_channel_notify_guest(chan); +out: + ringbuf_unmap_hdr(&ringbuf->common, rb, true); + ringbuf->last_rd_idx = ringbuf->rd_idx; + return read; +} + +void vmbus_free_req(void *req) +{ + VMBusChanReq *r = req; + + if (!req) { + return; + } + + if (r->sgl.dev) { + qemu_sglist_destroy(&r->sgl); + } + g_free(req); +} + +static const VMStateDescription vmstate_sgent = { + .name = "vmbus/sgentry", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT64(base, ScatterGatherEntry), + VMSTATE_UINT64(len, ScatterGatherEntry), + VMSTATE_END_OF_LIST() + } +}; + +typedef struct VMBusChanReqSave { + uint16_t chan_idx; + uint16_t pkt_type; + uint32_t msglen; + void *msg; + uint64_t transaction_id; + bool need_comp; + uint32_t num; + ScatterGatherEntry *sgl; +} VMBusChanReqSave; + +static const VMStateDescription vmstate_vmbus_chan_req = { + .name = "vmbus/vmbus_chan_req", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT16(chan_idx, VMBusChanReqSave), + VMSTATE_UINT16(pkt_type, VMBusChanReqSave), + VMSTATE_UINT32(msglen, VMBusChanReqSave), + VMSTATE_VBUFFER_ALLOC_UINT32(msg, VMBusChanReqSave, 0, NULL, msglen), + VMSTATE_UINT64(transaction_id, VMBusChanReqSave), + VMSTATE_BOOL(need_comp, VMBusChanReqSave), + VMSTATE_UINT32(num, VMBusChanReqSave), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(sgl, VMBusChanReqSave, num, + vmstate_sgent, ScatterGatherEntry), + VMSTATE_END_OF_LIST() + } +}; + +void vmbus_save_req(QEMUFile *f, VMBusChanReq *req) +{ + VMBusChanReqSave req_save; + + req_save.chan_idx = req->chan->subchan_idx; + req_save.pkt_type = req->pkt_type; + req_save.msglen = req->msglen; + req_save.msg = req->msg; + req_save.transaction_id = req->transaction_id; + req_save.need_comp = req->need_comp; + req_save.num = req->sgl.nsg; + req_save.sgl = g_memdup(req->sgl.sg, + req_save.num * sizeof(ScatterGatherEntry)); + + vmstate_save_state(f, &vmstate_vmbus_chan_req, &req_save, NULL); + + g_free(req_save.sgl); +} + +void *vmbus_load_req(QEMUFile *f, VMBusDevice *dev, uint32_t size) +{ + VMBusChanReqSave req_save; + VMBusChanReq *req = NULL; + VMBusChannel *chan = NULL; + uint32_t i; + + vmstate_load_state(f, &vmstate_vmbus_chan_req, &req_save, 0); + + if (req_save.chan_idx >= dev->num_channels) { + error_report("%s: %u(chan_idx) > %u(num_channels)", __func__, + req_save.chan_idx, dev->num_channels); + goto out; + } + chan = &dev->channels[req_save.chan_idx]; + + if (vmbus_channel_reserve(chan, 0, req_save.msglen)) { + goto out; + } + + req = vmbus_alloc_req(chan, size, req_save.pkt_type, req_save.msglen, + req_save.transaction_id, req_save.need_comp); + if (req_save.msglen) { + memcpy(req->msg, req_save.msg, req_save.msglen); + } + + for (i = 0; i < req_save.num; i++) { + qemu_sglist_add(&req->sgl, req_save.sgl[i].base, req_save.sgl[i].len); + } + +out: + if (req_save.msglen) { + g_free(req_save.msg); + } + if (req_save.num) { + g_free(req_save.sgl); + } + return req; +} + +static void channel_event_cb(EventNotifier *e) +{ + VMBusChannel *chan = container_of(e, VMBusChannel, notifier); + if (event_notifier_test_and_clear(e)) { + /* + * All receives are supposed to happen within the device worker, so + * bracket it with ringbuf_start/end_io on the receive ringbuffer, and + * potentially reuse the cached mapping throughout the worker. + * Can't do this for sends as they may happen outside the device + * worker. + */ + VMBusRecvRingBuf *ringbuf = &chan->recv_ringbuf; + ringbuf_start_io(&ringbuf->common); + chan->notify_cb(chan); + ringbuf_end_io(&ringbuf->common); + + } +} + +static int alloc_chan_id(VMBus *vmbus) +{ + int ret; + + ret = find_next_zero_bit(vmbus->chanid_bitmap, VMBUS_CHANID_COUNT, 0); + if (ret == VMBUS_CHANID_COUNT) { + return -ENOMEM; + } + return ret + VMBUS_FIRST_CHANID; +} + +static int register_chan_id(VMBusChannel *chan) +{ + return test_and_set_bit(chan->id - VMBUS_FIRST_CHANID, + chan->vmbus->chanid_bitmap) ? -EEXIST : 0; +} + +static void unregister_chan_id(VMBusChannel *chan) +{ + clear_bit(chan->id - VMBUS_FIRST_CHANID, chan->vmbus->chanid_bitmap); +} + +static uint32_t chan_connection_id(VMBusChannel *chan) +{ + return VMBUS_CHAN_CONNECTION_OFFSET + chan->id; +} + +static void init_channel(VMBus *vmbus, VMBusDevice *dev, VMBusDeviceClass *vdc, + VMBusChannel *chan, uint16_t idx, Error **errp) +{ + int res; + + chan->dev = dev; + chan->notify_cb = vdc->chan_notify_cb; + chan->subchan_idx = idx; + chan->vmbus = vmbus; + + res = alloc_chan_id(vmbus); + if (res < 0) { + error_setg(errp, "no spare channel id"); + return; + } + chan->id = res; + register_chan_id(chan); + + /* + * The guest drivers depend on the device subchannels (idx #1+) to be + * offered after the primary channel (idx #0) of that device. To ensure + * that, record the channels on the channel list in the order they appear + * within the device. + */ + QTAILQ_INSERT_TAIL(&vmbus->channel_list, chan, link); +} + +static void deinit_channel(VMBusChannel *chan) +{ + assert(chan->state == VMCHAN_INIT); + QTAILQ_REMOVE(&chan->vmbus->channel_list, chan, link); + unregister_chan_id(chan); +} + +static void create_channels(VMBus *vmbus, VMBusDevice *dev, Error **errp) +{ + uint16_t i; + VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(dev); + Error *err = NULL; + + dev->num_channels = vdc->num_channels ? vdc->num_channels(dev) : 1; + if (dev->num_channels < 1) { + error_setg(errp, "invalid #channels: %u", dev->num_channels); + return; + } + + dev->channels = g_new0(VMBusChannel, dev->num_channels); + for (i = 0; i < dev->num_channels; i++) { + init_channel(vmbus, dev, vdc, &dev->channels[i], i, &err); + if (err) { + goto err_init; + } + } + + return; + +err_init: + while (i--) { + deinit_channel(&dev->channels[i]); + } + error_propagate(errp, err); +} + +static void free_channels(VMBusDevice *dev) +{ + uint16_t i; + for (i = 0; i < dev->num_channels; i++) { + deinit_channel(&dev->channels[i]); + } + g_free(dev->channels); +} + +static HvSintRoute *make_sint_route(VMBus *vmbus, uint32_t vp_index) +{ + VMBusChannel *chan; + + if (vp_index == vmbus->target_vp) { + hyperv_sint_route_ref(vmbus->sint_route); + return vmbus->sint_route; + } + + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + if (chan->target_vp == vp_index && vmbus_channel_is_open(chan)) { + hyperv_sint_route_ref(chan->notify_route); + return chan->notify_route; + } + } + + return hyperv_sint_route_new(vp_index, VMBUS_SINT, NULL, NULL); +} + +static void open_channel(VMBusChannel *chan) +{ + VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(chan->dev); + + chan->gpadl = vmbus_get_gpadl(chan, chan->ringbuf_gpadl); + if (!chan->gpadl) { + return; + } + + if (ringbufs_init(chan)) { + goto put_gpadl; + } + + if (event_notifier_init(&chan->notifier, 0)) { + goto put_gpadl; + } + + event_notifier_set_handler(&chan->notifier, channel_event_cb); + + if (hyperv_set_event_flag_handler(chan_connection_id(chan), + &chan->notifier)) { + goto cleanup_notifier; + } + + chan->notify_route = make_sint_route(chan->vmbus, chan->target_vp); + if (!chan->notify_route) { + goto clear_event_flag_handler; + } + + if (vdc->open_channel && vdc->open_channel(chan)) { + goto unref_sint_route; + } + + chan->is_open = true; + return; + +unref_sint_route: + hyperv_sint_route_unref(chan->notify_route); +clear_event_flag_handler: + hyperv_set_event_flag_handler(chan_connection_id(chan), NULL); +cleanup_notifier: + event_notifier_set_handler(&chan->notifier, NULL); + event_notifier_cleanup(&chan->notifier); +put_gpadl: + vmbus_put_gpadl(chan->gpadl); +} + +static void close_channel(VMBusChannel *chan) +{ + VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(chan->dev); + + if (!chan->is_open) { + return; + } + + if (vdc->close_channel) { + vdc->close_channel(chan); + } + + hyperv_sint_route_unref(chan->notify_route); + hyperv_set_event_flag_handler(chan_connection_id(chan), NULL); + event_notifier_set_handler(&chan->notifier, NULL); + event_notifier_cleanup(&chan->notifier); + vmbus_put_gpadl(chan->gpadl); + chan->is_open = false; +} + +static int channel_post_load(void *opaque, int version_id) +{ + VMBusChannel *chan = opaque; + + return register_chan_id(chan); +} + +static const VMStateDescription vmstate_channel = { + .name = "vmbus/channel", + .version_id = 0, + .minimum_version_id = 0, + .post_load = channel_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(id, VMBusChannel), + VMSTATE_UINT16(subchan_idx, VMBusChannel), + VMSTATE_UINT32(open_id, VMBusChannel), + VMSTATE_UINT32(target_vp, VMBusChannel), + VMSTATE_UINT32(ringbuf_gpadl, VMBusChannel), + VMSTATE_UINT32(ringbuf_send_offset, VMBusChannel), + VMSTATE_UINT8(offer_state, VMBusChannel), + VMSTATE_UINT8(state, VMBusChannel), + VMSTATE_END_OF_LIST() + } +}; + +static VMBusChannel *find_channel(VMBus *vmbus, uint32_t id) +{ + VMBusChannel *chan; + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + if (chan->id == id) { + return chan; + } + } + return NULL; +} + +static int enqueue_incoming_message(VMBus *vmbus, + const struct hyperv_post_message_input *msg) +{ + int ret = 0; + uint8_t idx, prev_size; + + qemu_mutex_lock(&vmbus->rx_queue_lock); + + if (vmbus->rx_queue_size == HV_MSG_QUEUE_LEN) { + ret = -ENOBUFS; + goto out; + } + + prev_size = vmbus->rx_queue_size; + idx = (vmbus->rx_queue_head + vmbus->rx_queue_size) % HV_MSG_QUEUE_LEN; + memcpy(&vmbus->rx_queue[idx], msg, sizeof(*msg)); + vmbus->rx_queue_size++; + + /* only need to resched if the queue was empty before */ + if (!prev_size) { + vmbus_resched(vmbus); + } +out: + qemu_mutex_unlock(&vmbus->rx_queue_lock); + return ret; +} + +static uint16_t vmbus_recv_message(const struct hyperv_post_message_input *msg, + void *data) +{ + VMBus *vmbus = data; + struct vmbus_message_header *vmbus_msg; + + if (msg->message_type != HV_MESSAGE_VMBUS) { + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } + + if (msg->payload_size < sizeof(struct vmbus_message_header)) { + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } + + vmbus_msg = (struct vmbus_message_header *)msg->payload; + + trace_vmbus_recv_message(vmbus_msg->message_type, msg->payload_size); + + if (vmbus_msg->message_type == VMBUS_MSG_INVALID || + vmbus_msg->message_type >= VMBUS_MSG_COUNT) { + error_report("vmbus: unknown message type %#x", + vmbus_msg->message_type); + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } + + if (enqueue_incoming_message(vmbus, msg)) { + return HV_STATUS_INSUFFICIENT_BUFFERS; + } + return HV_STATUS_SUCCESS; +} + +static bool vmbus_initialized(VMBus *vmbus) +{ + return vmbus->version > 0 && vmbus->version <= VMBUS_VERSION_CURRENT; +} + +static void vmbus_reset_all(VMBus *vmbus) +{ + qbus_reset_all(BUS(vmbus)); +} + +static void post_msg(VMBus *vmbus, void *msgdata, uint32_t msglen) +{ + int ret; + struct hyperv_message msg = { + .header.message_type = HV_MESSAGE_VMBUS, + }; + + assert(!vmbus->msg_in_progress); + assert(msglen <= sizeof(msg.payload)); + assert(msglen >= sizeof(struct vmbus_message_header)); + + vmbus->msg_in_progress = true; + + trace_vmbus_post_msg(((struct vmbus_message_header *)msgdata)->message_type, + msglen); + + memcpy(msg.payload, msgdata, msglen); + msg.header.payload_size = ROUND_UP(msglen, VMBUS_MESSAGE_SIZE_ALIGN); + + ret = hyperv_post_msg(vmbus->sint_route, &msg); + if (ret == 0 || ret == -EAGAIN) { + return; + } + + error_report("message delivery fatal failure: %d; aborting vmbus", ret); + vmbus_reset_all(vmbus); +} + +static int vmbus_init(VMBus *vmbus) +{ + if (vmbus->target_vp != (uint32_t)-1) { + vmbus->sint_route = hyperv_sint_route_new(vmbus->target_vp, VMBUS_SINT, + vmbus_msg_cb, vmbus); + if (!vmbus->sint_route) { + error_report("failed to set up SINT route"); + return -ENOMEM; + } + } + return 0; +} + +static void vmbus_deinit(VMBus *vmbus) +{ + VMBusGpadl *gpadl, *tmp_gpadl; + VMBusChannel *chan; + + QTAILQ_FOREACH_SAFE(gpadl, &vmbus->gpadl_list, link, tmp_gpadl) { + if (gpadl->state == VMGPADL_TORNDOWN) { + continue; + } + vmbus_put_gpadl(gpadl); + } + + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + chan->offer_state = VMOFFER_INIT; + } + + hyperv_sint_route_unref(vmbus->sint_route); + vmbus->sint_route = NULL; + vmbus->int_page_gpa = 0; + vmbus->target_vp = (uint32_t)-1; + vmbus->version = 0; + vmbus->state = VMBUS_LISTEN; + vmbus->msg_in_progress = false; +} + +static void handle_initiate_contact(VMBus *vmbus, + vmbus_message_initiate_contact *msg, + uint32_t msglen) +{ + if (msglen < sizeof(*msg)) { + return; + } + + trace_vmbus_initiate_contact(msg->version_requested >> 16, + msg->version_requested & 0xffff, + msg->target_vcpu, msg->monitor_page1, + msg->monitor_page2, msg->interrupt_page); + + /* + * Reset vmbus on INITIATE_CONTACT regardless of its previous state. + * Useful, in particular, with vmbus-aware BIOS which can't shut vmbus down + * before handing over to OS loader. + */ + vmbus_reset_all(vmbus); + + vmbus->target_vp = msg->target_vcpu; + vmbus->version = msg->version_requested; + if (vmbus->version < VMBUS_VERSION_WIN8) { + /* linux passes interrupt page even when it doesn't need it */ + vmbus->int_page_gpa = msg->interrupt_page; + } + vmbus->state = VMBUS_HANDSHAKE; + + if (vmbus_init(vmbus)) { + error_report("failed to init vmbus; aborting"); + vmbus_deinit(vmbus); + return; + } +} + +static void send_handshake(VMBus *vmbus) +{ + struct vmbus_message_version_response msg = { + .header.message_type = VMBUS_MSG_VERSION_RESPONSE, + .version_supported = vmbus_initialized(vmbus), + }; + + post_msg(vmbus, &msg, sizeof(msg)); +} + +static void handle_request_offers(VMBus *vmbus, void *msgdata, uint32_t msglen) +{ + VMBusChannel *chan; + + if (!vmbus_initialized(vmbus)) { + return; + } + + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + if (chan->offer_state == VMOFFER_INIT) { + chan->offer_state = VMOFFER_SENDING; + break; + } + } + + vmbus->state = VMBUS_OFFER; +} + +static void send_offer(VMBus *vmbus) +{ + VMBusChannel *chan; + struct vmbus_message_header alloffers_msg = { + .message_type = VMBUS_MSG_ALLOFFERS_DELIVERED, + }; + + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + if (chan->offer_state == VMOFFER_SENDING) { + VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(chan->dev); + /* Hyper-V wants LE GUIDs */ + QemuUUID classid = qemu_uuid_bswap(vdc->classid); + QemuUUID instanceid = qemu_uuid_bswap(chan->dev->instanceid); + struct vmbus_message_offer_channel msg = { + .header.message_type = VMBUS_MSG_OFFERCHANNEL, + .child_relid = chan->id, + .connection_id = chan_connection_id(chan), + .channel_flags = vdc->channel_flags, + .mmio_size_mb = vdc->mmio_size_mb, + .sub_channel_index = vmbus_channel_idx(chan), + .interrupt_flags = VMBUS_OFFER_INTERRUPT_DEDICATED, + }; + + memcpy(msg.type_uuid, &classid, sizeof(classid)); + memcpy(msg.instance_uuid, &instanceid, sizeof(instanceid)); + + trace_vmbus_send_offer(chan->id, chan->dev); + + post_msg(vmbus, &msg, sizeof(msg)); + return; + } + } + + /* no more offers, send terminator message */ + trace_vmbus_terminate_offers(); + post_msg(vmbus, &alloffers_msg, sizeof(alloffers_msg)); +} + +static bool complete_offer(VMBus *vmbus) +{ + VMBusChannel *chan; + + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + if (chan->offer_state == VMOFFER_SENDING) { + chan->offer_state = VMOFFER_SENT; + goto next_offer; + } + } + /* + * no transitioning channels found so this is completing the terminator + * message, and vmbus can move to the next state + */ + return true; + +next_offer: + /* try to mark another channel for offering */ + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + if (chan->offer_state == VMOFFER_INIT) { + chan->offer_state = VMOFFER_SENDING; + break; + } + } + /* + * if an offer has been sent there are more offers or the terminator yet to + * send, so no state transition for vmbus + */ + return false; +} + + +static void handle_gpadl_header(VMBus *vmbus, vmbus_message_gpadl_header *msg, + uint32_t msglen) +{ + VMBusGpadl *gpadl; + uint32_t num_gfns, i; + + /* must include at least one gpa range */ + if (msglen < sizeof(*msg) + sizeof(msg->range[0]) || + !vmbus_initialized(vmbus)) { + return; + } + + num_gfns = (msg->range_buflen - msg->rangecount * sizeof(msg->range[0])) / + sizeof(msg->range[0].pfn_array[0]); + + trace_vmbus_gpadl_header(msg->gpadl_id, num_gfns); + + /* + * In theory the GPADL_HEADER message can define a GPADL with multiple GPA + * ranges each with arbitrary size and alignment. However in practice only + * single-range page-aligned GPADLs have been observed so just ignore + * anything else and simplify things greatly. + */ + if (msg->rangecount != 1 || msg->range[0].byte_offset || + (msg->range[0].byte_count != (num_gfns << TARGET_PAGE_BITS))) { + return; + } + + /* ignore requests to create already existing GPADLs */ + if (find_gpadl(vmbus, msg->gpadl_id)) { + return; + } + + gpadl = create_gpadl(vmbus, msg->gpadl_id, msg->child_relid, num_gfns); + + for (i = 0; i < num_gfns && + (void *)&msg->range[0].pfn_array[i + 1] <= (void *)msg + msglen; + i++) { + gpadl->gfns[gpadl->seen_gfns++] = msg->range[0].pfn_array[i]; + } + + if (gpadl_full(gpadl)) { + vmbus->state = VMBUS_CREATE_GPADL; + } +} + +static void handle_gpadl_body(VMBus *vmbus, vmbus_message_gpadl_body *msg, + uint32_t msglen) +{ + VMBusGpadl *gpadl; + uint32_t num_gfns_left, i; + + if (msglen < sizeof(*msg) || !vmbus_initialized(vmbus)) { + return; + } + + trace_vmbus_gpadl_body(msg->gpadl_id); + + gpadl = find_gpadl(vmbus, msg->gpadl_id); + if (!gpadl) { + return; + } + + num_gfns_left = gpadl->num_gfns - gpadl->seen_gfns; + assert(num_gfns_left); + + for (i = 0; i < num_gfns_left && + (void *)&msg->pfn_array[i + 1] <= (void *)msg + msglen; i++) { + gpadl->gfns[gpadl->seen_gfns++] = msg->pfn_array[i]; + } + + if (gpadl_full(gpadl)) { + vmbus->state = VMBUS_CREATE_GPADL; + } +} + +static void send_create_gpadl(VMBus *vmbus) +{ + VMBusGpadl *gpadl; + + QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) { + if (gpadl_full(gpadl) && gpadl->state == VMGPADL_INIT) { + struct vmbus_message_gpadl_created msg = { + .header.message_type = VMBUS_MSG_GPADL_CREATED, + .gpadl_id = gpadl->id, + .child_relid = gpadl->child_relid, + }; + + trace_vmbus_gpadl_created(gpadl->id); + post_msg(vmbus, &msg, sizeof(msg)); + return; + } + } + + assert(false); +} + +static bool complete_create_gpadl(VMBus *vmbus) +{ + VMBusGpadl *gpadl; + + QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) { + if (gpadl_full(gpadl) && gpadl->state == VMGPADL_INIT) { + gpadl->state = VMGPADL_ALIVE; + + return true; + } + } + + assert(false); + return false; +} + +static void handle_gpadl_teardown(VMBus *vmbus, + vmbus_message_gpadl_teardown *msg, + uint32_t msglen) +{ + VMBusGpadl *gpadl; + + if (msglen < sizeof(*msg) || !vmbus_initialized(vmbus)) { + return; + } + + trace_vmbus_gpadl_teardown(msg->gpadl_id); + + gpadl = find_gpadl(vmbus, msg->gpadl_id); + if (!gpadl || gpadl->state == VMGPADL_TORNDOWN) { + return; + } + + gpadl->state = VMGPADL_TEARINGDOWN; + vmbus->state = VMBUS_TEARDOWN_GPADL; +} + +static void send_teardown_gpadl(VMBus *vmbus) +{ + VMBusGpadl *gpadl; + + QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) { + if (gpadl->state == VMGPADL_TEARINGDOWN) { + struct vmbus_message_gpadl_torndown msg = { + .header.message_type = VMBUS_MSG_GPADL_TORNDOWN, + .gpadl_id = gpadl->id, + }; + + trace_vmbus_gpadl_torndown(gpadl->id); + post_msg(vmbus, &msg, sizeof(msg)); + return; + } + } + + assert(false); +} + +static bool complete_teardown_gpadl(VMBus *vmbus) +{ + VMBusGpadl *gpadl; + + QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) { + if (gpadl->state == VMGPADL_TEARINGDOWN) { + gpadl->state = VMGPADL_TORNDOWN; + vmbus_put_gpadl(gpadl); + return true; + } + } + + assert(false); + return false; +} + +static void handle_open_channel(VMBus *vmbus, vmbus_message_open_channel *msg, + uint32_t msglen) +{ + VMBusChannel *chan; + + if (msglen < sizeof(*msg) || !vmbus_initialized(vmbus)) { + return; + } + + trace_vmbus_open_channel(msg->child_relid, msg->ring_buffer_gpadl_id, + msg->target_vp); + chan = find_channel(vmbus, msg->child_relid); + if (!chan || chan->state != VMCHAN_INIT) { + return; + } + + chan->ringbuf_gpadl = msg->ring_buffer_gpadl_id; + chan->ringbuf_send_offset = msg->ring_buffer_offset; + chan->target_vp = msg->target_vp; + chan->open_id = msg->open_id; + + open_channel(chan); + + chan->state = VMCHAN_OPENING; + vmbus->state = VMBUS_OPEN_CHANNEL; +} + +static void send_open_channel(VMBus *vmbus) +{ + VMBusChannel *chan; + + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + if (chan->state == VMCHAN_OPENING) { + struct vmbus_message_open_result msg = { + .header.message_type = VMBUS_MSG_OPENCHANNEL_RESULT, + .child_relid = chan->id, + .open_id = chan->open_id, + .status = !vmbus_channel_is_open(chan), + }; + + trace_vmbus_channel_open(chan->id, msg.status); + post_msg(vmbus, &msg, sizeof(msg)); + return; + } + } + + assert(false); +} + +static bool complete_open_channel(VMBus *vmbus) +{ + VMBusChannel *chan; + + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + if (chan->state == VMCHAN_OPENING) { + if (vmbus_channel_is_open(chan)) { + chan->state = VMCHAN_OPEN; + /* + * simulate guest notification of ringbuffer space made + * available, for the channel protocols where the host + * initiates the communication + */ + vmbus_channel_notify_host(chan); + } else { + chan->state = VMCHAN_INIT; + } + return true; + } + } + + assert(false); + return false; +} + +static void vdev_reset_on_close(VMBusDevice *vdev) +{ + uint16_t i; + + for (i = 0; i < vdev->num_channels; i++) { + if (vmbus_channel_is_open(&vdev->channels[i])) { + return; + } + } + + /* all channels closed -- reset device */ + qdev_reset_all(DEVICE(vdev)); +} + +static void handle_close_channel(VMBus *vmbus, vmbus_message_close_channel *msg, + uint32_t msglen) +{ + VMBusChannel *chan; + + if (msglen < sizeof(*msg) || !vmbus_initialized(vmbus)) { + return; + } + + trace_vmbus_close_channel(msg->child_relid); + + chan = find_channel(vmbus, msg->child_relid); + if (!chan) { + return; + } + + close_channel(chan); + chan->state = VMCHAN_INIT; + + vdev_reset_on_close(chan->dev); +} + +static void handle_unload(VMBus *vmbus, void *msg, uint32_t msglen) +{ + vmbus->state = VMBUS_UNLOAD; +} + +static void send_unload(VMBus *vmbus) +{ + vmbus_message_header msg = { + .message_type = VMBUS_MSG_UNLOAD_RESPONSE, + }; + + qemu_mutex_lock(&vmbus->rx_queue_lock); + vmbus->rx_queue_size = 0; + qemu_mutex_unlock(&vmbus->rx_queue_lock); + + post_msg(vmbus, &msg, sizeof(msg)); + return; +} + +static bool complete_unload(VMBus *vmbus) +{ + vmbus_reset_all(vmbus); + return true; +} + +static void process_message(VMBus *vmbus) +{ + struct hyperv_post_message_input *hv_msg; + struct vmbus_message_header *msg; + void *msgdata; + uint32_t msglen; + + qemu_mutex_lock(&vmbus->rx_queue_lock); + + if (!vmbus->rx_queue_size) { + goto unlock; + } + + hv_msg = &vmbus->rx_queue[vmbus->rx_queue_head]; + msglen = hv_msg->payload_size; + if (msglen < sizeof(*msg)) { + goto out; + } + msgdata = hv_msg->payload; + msg = (struct vmbus_message_header *)msgdata; + + trace_vmbus_process_incoming_message(msg->message_type); + + switch (msg->message_type) { + case VMBUS_MSG_INITIATE_CONTACT: + handle_initiate_contact(vmbus, msgdata, msglen); + break; + case VMBUS_MSG_REQUESTOFFERS: + handle_request_offers(vmbus, msgdata, msglen); + break; + case VMBUS_MSG_GPADL_HEADER: + handle_gpadl_header(vmbus, msgdata, msglen); + break; + case VMBUS_MSG_GPADL_BODY: + handle_gpadl_body(vmbus, msgdata, msglen); + break; + case VMBUS_MSG_GPADL_TEARDOWN: + handle_gpadl_teardown(vmbus, msgdata, msglen); + break; + case VMBUS_MSG_OPENCHANNEL: + handle_open_channel(vmbus, msgdata, msglen); + break; + case VMBUS_MSG_CLOSECHANNEL: + handle_close_channel(vmbus, msgdata, msglen); + break; + case VMBUS_MSG_UNLOAD: + handle_unload(vmbus, msgdata, msglen); + break; + default: + error_report("unknown message type %#x", msg->message_type); + break; + } + +out: + vmbus->rx_queue_size--; + vmbus->rx_queue_head++; + vmbus->rx_queue_head %= HV_MSG_QUEUE_LEN; + + vmbus_resched(vmbus); +unlock: + qemu_mutex_unlock(&vmbus->rx_queue_lock); +} + +static const struct { + void (*run)(VMBus *vmbus); + bool (*complete)(VMBus *vmbus); +} state_runner[] = { + [VMBUS_LISTEN] = {process_message, NULL}, + [VMBUS_HANDSHAKE] = {send_handshake, NULL}, + [VMBUS_OFFER] = {send_offer, complete_offer}, + [VMBUS_CREATE_GPADL] = {send_create_gpadl, complete_create_gpadl}, + [VMBUS_TEARDOWN_GPADL] = {send_teardown_gpadl, complete_teardown_gpadl}, + [VMBUS_OPEN_CHANNEL] = {send_open_channel, complete_open_channel}, + [VMBUS_UNLOAD] = {send_unload, complete_unload}, +}; + +static void vmbus_do_run(VMBus *vmbus) +{ + if (vmbus->msg_in_progress) { + return; + } + + assert(vmbus->state < VMBUS_STATE_MAX); + assert(state_runner[vmbus->state].run); + state_runner[vmbus->state].run(vmbus); +} + +static void vmbus_run(void *opaque) +{ + VMBus *vmbus = opaque; + + /* make sure no recursion happens (e.g. due to recursive aio_poll()) */ + if (vmbus->in_progress) { + return; + } + + vmbus->in_progress = true; + /* + * FIXME: if vmbus_resched() is called from within vmbus_do_run(), it + * should go *after* the code that can result in aio_poll; otherwise + * reschedules can be missed. No idea how to enforce that. + */ + vmbus_do_run(vmbus); + vmbus->in_progress = false; +} + +static void vmbus_msg_cb(void *data, int status) +{ + VMBus *vmbus = data; + bool (*complete)(VMBus *vmbus); + + assert(vmbus->msg_in_progress); + + trace_vmbus_msg_cb(status); + + if (status == -EAGAIN) { + goto out; + } + if (status) { + error_report("message delivery fatal failure: %d; aborting vmbus", + status); + vmbus_reset_all(vmbus); + return; + } + + assert(vmbus->state < VMBUS_STATE_MAX); + complete = state_runner[vmbus->state].complete; + if (!complete || complete(vmbus)) { + vmbus->state = VMBUS_LISTEN; + } +out: + vmbus->msg_in_progress = false; + vmbus_resched(vmbus); +} + +static void vmbus_resched(VMBus *vmbus) +{ + aio_bh_schedule_oneshot(qemu_get_aio_context(), vmbus_run, vmbus); +} + +static void vmbus_signal_event(EventNotifier *e) +{ + VMBusChannel *chan; + VMBus *vmbus = container_of(e, VMBus, notifier); + unsigned long *int_map; + hwaddr addr, len; + bool is_dirty = false; + + if (!event_notifier_test_and_clear(e)) { + return; + } + + trace_vmbus_signal_event(); + + if (!vmbus->int_page_gpa) { + return; + } + + addr = vmbus->int_page_gpa + TARGET_PAGE_SIZE / 2; + len = TARGET_PAGE_SIZE / 2; + int_map = cpu_physical_memory_map(addr, &len, 1); + if (len != TARGET_PAGE_SIZE / 2) { + goto unmap; + } + + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + if (bitmap_test_and_clear_atomic(int_map, chan->id, 1)) { + if (!vmbus_channel_is_open(chan)) { + continue; + } + vmbus_channel_notify_host(chan); + is_dirty = true; + } + } + +unmap: + cpu_physical_memory_unmap(int_map, len, 1, is_dirty); +} + +static void vmbus_dev_realize(DeviceState *dev, Error **errp) +{ + VMBusDevice *vdev = VMBUS_DEVICE(dev); + VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(vdev); + VMBus *vmbus = VMBUS(qdev_get_parent_bus(dev)); + BusChild *child; + Error *err = NULL; + char idstr[UUID_FMT_LEN + 1]; + + assert(!qemu_uuid_is_null(&vdev->instanceid)); + + if (!qemu_uuid_is_null(&vdc->instanceid)) { + /* Class wants to only have a single instance with a fixed UUID */ + if (!qemu_uuid_is_equal(&vdev->instanceid, &vdc->instanceid)) { + error_setg(&err, "instance id can't be changed"); + goto error_out; + } + } + + /* Check for instance id collision for this class id */ + QTAILQ_FOREACH(child, &BUS(vmbus)->children, sibling) { + VMBusDevice *child_dev = VMBUS_DEVICE(child->child); + + if (child_dev == vdev) { + continue; + } + + if (qemu_uuid_is_equal(&child_dev->instanceid, &vdev->instanceid)) { + qemu_uuid_unparse(&vdev->instanceid, idstr); + error_setg(&err, "duplicate vmbus device instance id %s", idstr); + goto error_out; + } + } + + vdev->dma_as = &address_space_memory; + + create_channels(vmbus, vdev, &err); + if (err) { + goto error_out; + } + + if (vdc->vmdev_realize) { + vdc->vmdev_realize(vdev, &err); + if (err) { + goto err_vdc_realize; + } + } + return; + +err_vdc_realize: + free_channels(vdev); +error_out: + error_propagate(errp, err); +} + +static void vmbus_dev_reset(DeviceState *dev) +{ + uint16_t i; + VMBusDevice *vdev = VMBUS_DEVICE(dev); + VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(vdev); + + if (vdev->channels) { + for (i = 0; i < vdev->num_channels; i++) { + VMBusChannel *chan = &vdev->channels[i]; + close_channel(chan); + chan->state = VMCHAN_INIT; + } + } + + if (vdc->vmdev_reset) { + vdc->vmdev_reset(vdev); + } +} + +static void vmbus_dev_unrealize(DeviceState *dev) +{ + VMBusDevice *vdev = VMBUS_DEVICE(dev); + VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(vdev); + + if (vdc->vmdev_unrealize) { + vdc->vmdev_unrealize(vdev); + } + free_channels(vdev); +} + +static Property vmbus_dev_props[] = { + DEFINE_PROP_UUID("instanceid", VMBusDevice, instanceid), + DEFINE_PROP_END_OF_LIST() +}; + + +static void vmbus_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *kdev = DEVICE_CLASS(klass); + device_class_set_props(kdev, vmbus_dev_props); + kdev->bus_type = TYPE_VMBUS; + kdev->realize = vmbus_dev_realize; + kdev->unrealize = vmbus_dev_unrealize; + kdev->reset = vmbus_dev_reset; +} + +static void vmbus_dev_instance_init(Object *obj) +{ + VMBusDevice *vdev = VMBUS_DEVICE(obj); + VMBusDeviceClass *vdc = VMBUS_DEVICE_GET_CLASS(vdev); + + if (!qemu_uuid_is_null(&vdc->instanceid)) { + /* Class wants to only have a single instance with a fixed UUID */ + vdev->instanceid = vdc->instanceid; + } +} + +const VMStateDescription vmstate_vmbus_dev = { + .name = TYPE_VMBUS_DEVICE, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(instanceid.data, VMBusDevice, 16), + VMSTATE_UINT16(num_channels, VMBusDevice), + VMSTATE_STRUCT_VARRAY_POINTER_UINT16(channels, VMBusDevice, + num_channels, vmstate_channel, + VMBusChannel), + VMSTATE_END_OF_LIST() + } +}; + +/* vmbus generic device base */ +static const TypeInfo vmbus_dev_type_info = { + .name = TYPE_VMBUS_DEVICE, + .parent = TYPE_DEVICE, + .abstract = true, + .instance_size = sizeof(VMBusDevice), + .class_size = sizeof(VMBusDeviceClass), + .class_init = vmbus_dev_class_init, + .instance_init = vmbus_dev_instance_init, +}; + +static void vmbus_realize(BusState *bus, Error **errp) +{ + int ret = 0; + Error *local_err = NULL; + VMBus *vmbus = VMBUS(bus); + + qemu_mutex_init(&vmbus->rx_queue_lock); + + QTAILQ_INIT(&vmbus->gpadl_list); + QTAILQ_INIT(&vmbus->channel_list); + + ret = hyperv_set_msg_handler(VMBUS_MESSAGE_CONNECTION_ID, + vmbus_recv_message, vmbus); + if (ret != 0) { + error_setg(&local_err, "hyperv set message handler failed: %d", ret); + goto error_out; + } + + ret = event_notifier_init(&vmbus->notifier, 0); + if (ret != 0) { + error_setg(&local_err, "event notifier failed to init with %d", ret); + goto remove_msg_handler; + } + + event_notifier_set_handler(&vmbus->notifier, vmbus_signal_event); + ret = hyperv_set_event_flag_handler(VMBUS_EVENT_CONNECTION_ID, + &vmbus->notifier); + if (ret != 0) { + error_setg(&local_err, "hyperv set event handler failed with %d", ret); + goto clear_event_notifier; + } + + return; + +clear_event_notifier: + event_notifier_cleanup(&vmbus->notifier); +remove_msg_handler: + hyperv_set_msg_handler(VMBUS_MESSAGE_CONNECTION_ID, NULL, NULL); +error_out: + qemu_mutex_destroy(&vmbus->rx_queue_lock); + error_propagate(errp, local_err); +} + +static void vmbus_unrealize(BusState *bus) +{ + VMBus *vmbus = VMBUS(bus); + + hyperv_set_msg_handler(VMBUS_MESSAGE_CONNECTION_ID, NULL, NULL); + hyperv_set_event_flag_handler(VMBUS_EVENT_CONNECTION_ID, NULL); + event_notifier_cleanup(&vmbus->notifier); + + qemu_mutex_destroy(&vmbus->rx_queue_lock); +} + +static void vmbus_reset(BusState *bus) +{ + vmbus_deinit(VMBUS(bus)); +} + +static char *vmbus_get_dev_path(DeviceState *dev) +{ + BusState *bus = qdev_get_parent_bus(dev); + return qdev_get_dev_path(bus->parent); +} + +static char *vmbus_get_fw_dev_path(DeviceState *dev) +{ + VMBusDevice *vdev = VMBUS_DEVICE(dev); + char uuid[UUID_FMT_LEN + 1]; + + qemu_uuid_unparse(&vdev->instanceid, uuid); + return g_strdup_printf("%s@%s", qdev_fw_name(dev), uuid); +} + +static void vmbus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->get_dev_path = vmbus_get_dev_path; + k->get_fw_dev_path = vmbus_get_fw_dev_path; + k->realize = vmbus_realize; + k->unrealize = vmbus_unrealize; + k->reset = vmbus_reset; +} + +static int vmbus_pre_load(void *opaque) +{ + VMBusChannel *chan; + VMBus *vmbus = VMBUS(opaque); + + /* + * channel IDs allocated by the source will come in the migration stream + * for each channel, so clean up the ones allocated at realize + */ + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + unregister_chan_id(chan); + } + + return 0; +} +static int vmbus_post_load(void *opaque, int version_id) +{ + int ret; + VMBus *vmbus = VMBUS(opaque); + VMBusGpadl *gpadl; + VMBusChannel *chan; + + ret = vmbus_init(vmbus); + if (ret) { + return ret; + } + + QTAILQ_FOREACH(gpadl, &vmbus->gpadl_list, link) { + gpadl->vmbus = vmbus; + gpadl->refcount = 1; + } + + /* + * reopening channels depends on initialized vmbus so it's done here + * instead of channel_post_load() + */ + QTAILQ_FOREACH(chan, &vmbus->channel_list, link) { + + if (chan->state == VMCHAN_OPENING || chan->state == VMCHAN_OPEN) { + open_channel(chan); + } + + if (chan->state != VMCHAN_OPEN) { + continue; + } + + if (!vmbus_channel_is_open(chan)) { + /* reopen failed, abort loading */ + return -1; + } + + /* resume processing on the guest side if it missed the notification */ + hyperv_sint_route_set_sint(chan->notify_route); + /* ditto on the host side */ + vmbus_channel_notify_host(chan); + } + + vmbus_resched(vmbus); + return 0; +} + +static const VMStateDescription vmstate_post_message_input = { + .name = "vmbus/hyperv_post_message_input", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + /* + * skip connection_id and message_type as they are validated before + * queueing and ignored on dequeueing + */ + VMSTATE_UINT32(payload_size, struct hyperv_post_message_input), + VMSTATE_UINT8_ARRAY(payload, struct hyperv_post_message_input, + HV_MESSAGE_PAYLOAD_SIZE), + VMSTATE_END_OF_LIST() + } +}; + +static bool vmbus_rx_queue_needed(void *opaque) +{ + VMBus *vmbus = VMBUS(opaque); + return vmbus->rx_queue_size; +} + +static const VMStateDescription vmstate_rx_queue = { + .name = "vmbus/rx_queue", + .version_id = 0, + .minimum_version_id = 0, + .needed = vmbus_rx_queue_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(rx_queue_head, VMBus), + VMSTATE_UINT8(rx_queue_size, VMBus), + VMSTATE_STRUCT_ARRAY(rx_queue, VMBus, + HV_MSG_QUEUE_LEN, 0, + vmstate_post_message_input, + struct hyperv_post_message_input), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_vmbus = { + .name = TYPE_VMBUS, + .version_id = 0, + .minimum_version_id = 0, + .pre_load = vmbus_pre_load, + .post_load = vmbus_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(state, VMBus), + VMSTATE_UINT32(version, VMBus), + VMSTATE_UINT32(target_vp, VMBus), + VMSTATE_UINT64(int_page_gpa, VMBus), + VMSTATE_QTAILQ_V(gpadl_list, VMBus, 0, + vmstate_gpadl, VMBusGpadl, link), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_rx_queue, + NULL + } +}; + +static const TypeInfo vmbus_type_info = { + .name = TYPE_VMBUS, + .parent = TYPE_BUS, + .instance_size = sizeof(VMBus), + .class_init = vmbus_class_init, +}; + +static void vmbus_bridge_realize(DeviceState *dev, Error **errp) +{ + VMBusBridge *bridge = VMBUS_BRIDGE(dev); + + /* + * here there's at least one vmbus bridge that is being realized, so + * vmbus_bridge_find can only return NULL if it's not unique + */ + if (!vmbus_bridge_find()) { + error_setg(errp, "there can be at most one %s in the system", + TYPE_VMBUS_BRIDGE); + return; + } + + if (!hyperv_is_synic_enabled()) { + error_report("VMBus requires usable Hyper-V SynIC and VP_INDEX"); + return; + } + + bridge->bus = VMBUS(qbus_new(TYPE_VMBUS, dev, "vmbus")); +} + +static char *vmbus_bridge_ofw_unit_address(const SysBusDevice *dev) +{ + /* there can be only one VMBus */ + return g_strdup("0"); +} + +static const VMStateDescription vmstate_vmbus_bridge = { + .name = TYPE_VMBUS_BRIDGE, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_POINTER(bus, VMBusBridge, vmstate_vmbus, VMBus), + VMSTATE_END_OF_LIST() + }, +}; + +static Property vmbus_bridge_props[] = { + DEFINE_PROP_UINT8("irq", VMBusBridge, irq, 7), + DEFINE_PROP_END_OF_LIST() +}; + +static void vmbus_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + SysBusDeviceClass *sk = SYS_BUS_DEVICE_CLASS(klass); + + k->realize = vmbus_bridge_realize; + k->fw_name = "vmbus"; + sk->explicit_ofw_unit_address = vmbus_bridge_ofw_unit_address; + set_bit(DEVICE_CATEGORY_BRIDGE, k->categories); + k->vmsd = &vmstate_vmbus_bridge; + device_class_set_props(k, vmbus_bridge_props); + /* override SysBusDevice's default */ + k->user_creatable = true; +} + +static const TypeInfo vmbus_bridge_type_info = { + .name = TYPE_VMBUS_BRIDGE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VMBusBridge), + .class_init = vmbus_bridge_class_init, +}; + +static void vmbus_register_types(void) +{ + type_register_static(&vmbus_bridge_type_info); + type_register_static(&vmbus_dev_type_info); + type_register_static(&vmbus_type_info); +} + +type_init(vmbus_register_types) diff --git a/hw/i2c/Kconfig b/hw/i2c/Kconfig new file mode 100644 index 000000000..8217cb504 --- /dev/null +++ b/hw/i2c/Kconfig @@ -0,0 +1,38 @@ +config I2C + bool + +config SMBUS + bool + select I2C + +config SMBUS_EEPROM + bool + select SMBUS + +config VERSATILE_I2C + bool + select BITBANG_I2C + +config ACPI_SMBUS + bool + select SMBUS + +config BITBANG_I2C + bool + select I2C + +config IMX_I2C + bool + select I2C + +config MPC_I2C + bool + select I2C + +config PCA954X + bool + select I2C + +config PMBUS + bool + select SMBUS diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c new file mode 100644 index 000000000..03a4f5a91 --- /dev/null +++ b/hw/i2c/aspeed_i2c.c @@ -0,0 +1,1038 @@ +/* + * ARM Aspeed I2C controller + * + * Copyright (C) 2016 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/i2c/aspeed_i2c.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "trace.h" + +/* I2C Global Register */ + +#define I2C_CTRL_STATUS 0x00 /* Device Interrupt Status */ +#define I2C_CTRL_ASSIGN 0x08 /* Device Interrupt Target + Assignment */ +#define I2C_CTRL_GLOBAL 0x0C /* Global Control Register */ +#define I2C_CTRL_SRAM_EN BIT(0) + +/* I2C Device (Bus) Register */ + +#define I2CD_FUN_CTRL_REG 0x00 /* I2CD Function Control */ +#define I2CD_POOL_PAGE_SEL(x) (((x) >> 20) & 0x7) /* AST2400 */ +#define I2CD_M_SDA_LOCK_EN (0x1 << 16) +#define I2CD_MULTI_MASTER_DIS (0x1 << 15) +#define I2CD_M_SCL_DRIVE_EN (0x1 << 14) +#define I2CD_MSB_STS (0x1 << 9) +#define I2CD_SDA_DRIVE_1T_EN (0x1 << 8) +#define I2CD_M_SDA_DRIVE_1T_EN (0x1 << 7) +#define I2CD_M_HIGH_SPEED_EN (0x1 << 6) +#define I2CD_DEF_ADDR_EN (0x1 << 5) +#define I2CD_DEF_ALERT_EN (0x1 << 4) +#define I2CD_DEF_ARP_EN (0x1 << 3) +#define I2CD_DEF_GCALL_EN (0x1 << 2) +#define I2CD_SLAVE_EN (0x1 << 1) +#define I2CD_MASTER_EN (0x1) + +#define I2CD_AC_TIMING_REG1 0x04 /* Clock and AC Timing Control #1 */ +#define I2CD_AC_TIMING_REG2 0x08 /* Clock and AC Timing Control #1 */ +#define I2CD_INTR_CTRL_REG 0x0c /* I2CD Interrupt Control */ +#define I2CD_INTR_STS_REG 0x10 /* I2CD Interrupt Status */ + +#define I2CD_INTR_SLAVE_ADDR_MATCH (0x1 << 31) /* 0: addr1 1: addr2 */ +#define I2CD_INTR_SLAVE_ADDR_RX_PENDING (0x1 << 30) +/* bits[19-16] Reserved */ + +/* All bits below are cleared by writing 1 */ +#define I2CD_INTR_SLAVE_INACTIVE_TIMEOUT (0x1 << 15) +#define I2CD_INTR_SDA_DL_TIMEOUT (0x1 << 14) +#define I2CD_INTR_BUS_RECOVER_DONE (0x1 << 13) +#define I2CD_INTR_SMBUS_ALERT (0x1 << 12) /* Bus [0-3] only */ +#define I2CD_INTR_SMBUS_ARP_ADDR (0x1 << 11) /* Removed */ +#define I2CD_INTR_SMBUS_DEV_ALERT_ADDR (0x1 << 10) /* Removed */ +#define I2CD_INTR_SMBUS_DEF_ADDR (0x1 << 9) /* Removed */ +#define I2CD_INTR_GCALL_ADDR (0x1 << 8) /* Removed */ +#define I2CD_INTR_SLAVE_ADDR_RX_MATCH (0x1 << 7) /* use RX_DONE */ +#define I2CD_INTR_SCL_TIMEOUT (0x1 << 6) +#define I2CD_INTR_ABNORMAL (0x1 << 5) +#define I2CD_INTR_NORMAL_STOP (0x1 << 4) +#define I2CD_INTR_ARBIT_LOSS (0x1 << 3) +#define I2CD_INTR_RX_DONE (0x1 << 2) +#define I2CD_INTR_TX_NAK (0x1 << 1) +#define I2CD_INTR_TX_ACK (0x1 << 0) + +#define I2CD_CMD_REG 0x14 /* I2CD Command/Status */ +#define I2CD_SDA_OE (0x1 << 28) +#define I2CD_SDA_O (0x1 << 27) +#define I2CD_SCL_OE (0x1 << 26) +#define I2CD_SCL_O (0x1 << 25) +#define I2CD_TX_TIMING (0x1 << 24) +#define I2CD_TX_STATUS (0x1 << 23) + +#define I2CD_TX_STATE_SHIFT 19 /* Tx State Machine */ +#define I2CD_TX_STATE_MASK 0xf +#define I2CD_IDLE 0x0 +#define I2CD_MACTIVE 0x8 +#define I2CD_MSTART 0x9 +#define I2CD_MSTARTR 0xa +#define I2CD_MSTOP 0xb +#define I2CD_MTXD 0xc +#define I2CD_MRXACK 0xd +#define I2CD_MRXD 0xe +#define I2CD_MTXACK 0xf +#define I2CD_SWAIT 0x1 +#define I2CD_SRXD 0x4 +#define I2CD_STXACK 0x5 +#define I2CD_STXD 0x6 +#define I2CD_SRXACK 0x7 +#define I2CD_RECOVER 0x3 + +#define I2CD_SCL_LINE_STS (0x1 << 18) +#define I2CD_SDA_LINE_STS (0x1 << 17) +#define I2CD_BUS_BUSY_STS (0x1 << 16) +#define I2CD_SDA_OE_OUT_DIR (0x1 << 15) +#define I2CD_SDA_O_OUT_DIR (0x1 << 14) +#define I2CD_SCL_OE_OUT_DIR (0x1 << 13) +#define I2CD_SCL_O_OUT_DIR (0x1 << 12) +#define I2CD_BUS_RECOVER_CMD_EN (0x1 << 11) +#define I2CD_S_ALT_EN (0x1 << 10) + +/* Command Bit */ +#define I2CD_RX_DMA_ENABLE (0x1 << 9) +#define I2CD_TX_DMA_ENABLE (0x1 << 8) +#define I2CD_RX_BUFF_ENABLE (0x1 << 7) +#define I2CD_TX_BUFF_ENABLE (0x1 << 6) +#define I2CD_M_STOP_CMD (0x1 << 5) +#define I2CD_M_S_RX_CMD_LAST (0x1 << 4) +#define I2CD_M_RX_CMD (0x1 << 3) +#define I2CD_S_TX_CMD (0x1 << 2) +#define I2CD_M_TX_CMD (0x1 << 1) +#define I2CD_M_START_CMD (0x1) + +#define I2CD_DEV_ADDR_REG 0x18 /* Slave Device Address */ +#define I2CD_POOL_CTRL_REG 0x1c /* Pool Buffer Control */ +#define I2CD_POOL_RX_COUNT(x) (((x) >> 24) & 0xff) +#define I2CD_POOL_RX_SIZE(x) ((((x) >> 16) & 0xff) + 1) +#define I2CD_POOL_TX_COUNT(x) ((((x) >> 8) & 0xff) + 1) +#define I2CD_POOL_OFFSET(x) (((x) & 0x3f) << 2) /* AST2400 */ +#define I2CD_BYTE_BUF_REG 0x20 /* Transmit/Receive Byte Buffer */ +#define I2CD_BYTE_BUF_TX_SHIFT 0 +#define I2CD_BYTE_BUF_TX_MASK 0xff +#define I2CD_BYTE_BUF_RX_SHIFT 8 +#define I2CD_BYTE_BUF_RX_MASK 0xff +#define I2CD_DMA_ADDR 0x24 /* DMA Buffer Address */ +#define I2CD_DMA_LEN 0x28 /* DMA Transfer Length < 4KB */ + +static inline bool aspeed_i2c_bus_is_master(AspeedI2CBus *bus) +{ + return bus->ctrl & I2CD_MASTER_EN; +} + +static inline bool aspeed_i2c_bus_is_enabled(AspeedI2CBus *bus) +{ + return bus->ctrl & (I2CD_MASTER_EN | I2CD_SLAVE_EN); +} + +static inline void aspeed_i2c_bus_raise_interrupt(AspeedI2CBus *bus) +{ + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); + + trace_aspeed_i2c_bus_raise_interrupt(bus->intr_status, + bus->intr_status & I2CD_INTR_TX_NAK ? "nak|" : "", + bus->intr_status & I2CD_INTR_TX_ACK ? "ack|" : "", + bus->intr_status & I2CD_INTR_RX_DONE ? "done|" : "", + bus->intr_status & I2CD_INTR_NORMAL_STOP ? "normal|" : "", + bus->intr_status & I2CD_INTR_ABNORMAL ? "abnormal" : ""); + + bus->intr_status &= bus->intr_ctrl; + if (bus->intr_status) { + bus->controller->intr_status |= 1 << bus->id; + qemu_irq_raise(aic->bus_get_irq(bus)); + } +} + +static uint64_t aspeed_i2c_bus_read(void *opaque, hwaddr offset, + unsigned size) +{ + AspeedI2CBus *bus = opaque; + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); + uint64_t value = -1; + + switch (offset) { + case I2CD_FUN_CTRL_REG: + value = bus->ctrl; + break; + case I2CD_AC_TIMING_REG1: + value = bus->timing[0]; + break; + case I2CD_AC_TIMING_REG2: + value = bus->timing[1]; + break; + case I2CD_INTR_CTRL_REG: + value = bus->intr_ctrl; + break; + case I2CD_INTR_STS_REG: + value = bus->intr_status; + break; + case I2CD_POOL_CTRL_REG: + value = bus->pool_ctrl; + break; + case I2CD_BYTE_BUF_REG: + value = bus->buf; + break; + case I2CD_CMD_REG: + value = bus->cmd | (i2c_bus_busy(bus->bus) << 16); + break; + case I2CD_DMA_ADDR: + if (!aic->has_dma) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); + break; + } + value = bus->dma_addr; + break; + case I2CD_DMA_LEN: + if (!aic->has_dma) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); + break; + } + value = bus->dma_len; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); + value = -1; + break; + } + + trace_aspeed_i2c_bus_read(bus->id, offset, size, value); + return value; +} + +static void aspeed_i2c_set_state(AspeedI2CBus *bus, uint8_t state) +{ + bus->cmd &= ~(I2CD_TX_STATE_MASK << I2CD_TX_STATE_SHIFT); + bus->cmd |= (state & I2CD_TX_STATE_MASK) << I2CD_TX_STATE_SHIFT; +} + +static uint8_t aspeed_i2c_get_state(AspeedI2CBus *bus) +{ + return (bus->cmd >> I2CD_TX_STATE_SHIFT) & I2CD_TX_STATE_MASK; +} + +static int aspeed_i2c_dma_read(AspeedI2CBus *bus, uint8_t *data) +{ + MemTxResult result; + AspeedI2CState *s = bus->controller; + + result = address_space_read(&s->dram_as, bus->dma_addr, + MEMTXATTRS_UNSPECIFIED, data, 1); + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM read failed @%08x\n", + __func__, bus->dma_addr); + return -1; + } + + bus->dma_addr++; + bus->dma_len--; + return 0; +} + +static int aspeed_i2c_bus_send(AspeedI2CBus *bus, uint8_t pool_start) +{ + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); + int ret = -1; + int i; + + if (bus->cmd & I2CD_TX_BUFF_ENABLE) { + for (i = pool_start; i < I2CD_POOL_TX_COUNT(bus->pool_ctrl); i++) { + uint8_t *pool_base = aic->bus_pool_base(bus); + + trace_aspeed_i2c_bus_send("BUF", i + 1, + I2CD_POOL_TX_COUNT(bus->pool_ctrl), + pool_base[i]); + ret = i2c_send(bus->bus, pool_base[i]); + if (ret) { + break; + } + } + bus->cmd &= ~I2CD_TX_BUFF_ENABLE; + } else if (bus->cmd & I2CD_TX_DMA_ENABLE) { + while (bus->dma_len) { + uint8_t data; + aspeed_i2c_dma_read(bus, &data); + trace_aspeed_i2c_bus_send("DMA", bus->dma_len, bus->dma_len, data); + ret = i2c_send(bus->bus, data); + if (ret) { + break; + } + } + bus->cmd &= ~I2CD_TX_DMA_ENABLE; + } else { + trace_aspeed_i2c_bus_send("BYTE", pool_start, 1, bus->buf); + ret = i2c_send(bus->bus, bus->buf); + } + + return ret; +} + +static void aspeed_i2c_bus_recv(AspeedI2CBus *bus) +{ + AspeedI2CState *s = bus->controller; + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); + uint8_t data; + int i; + + if (bus->cmd & I2CD_RX_BUFF_ENABLE) { + uint8_t *pool_base = aic->bus_pool_base(bus); + + for (i = 0; i < I2CD_POOL_RX_SIZE(bus->pool_ctrl); i++) { + pool_base[i] = i2c_recv(bus->bus); + trace_aspeed_i2c_bus_recv("BUF", i + 1, + I2CD_POOL_RX_SIZE(bus->pool_ctrl), + pool_base[i]); + } + + /* Update RX count */ + bus->pool_ctrl &= ~(0xff << 24); + bus->pool_ctrl |= (i & 0xff) << 24; + bus->cmd &= ~I2CD_RX_BUFF_ENABLE; + } else if (bus->cmd & I2CD_RX_DMA_ENABLE) { + uint8_t data; + + while (bus->dma_len) { + MemTxResult result; + + data = i2c_recv(bus->bus); + trace_aspeed_i2c_bus_recv("DMA", bus->dma_len, bus->dma_len, data); + result = address_space_write(&s->dram_as, bus->dma_addr, + MEMTXATTRS_UNSPECIFIED, &data, 1); + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed @%08x\n", + __func__, bus->dma_addr); + return; + } + bus->dma_addr++; + bus->dma_len--; + } + bus->cmd &= ~I2CD_RX_DMA_ENABLE; + } else { + data = i2c_recv(bus->bus); + trace_aspeed_i2c_bus_recv("BYTE", 1, 1, bus->buf); + bus->buf = (data & I2CD_BYTE_BUF_RX_MASK) << I2CD_BYTE_BUF_RX_SHIFT; + } +} + +static void aspeed_i2c_handle_rx_cmd(AspeedI2CBus *bus) +{ + aspeed_i2c_set_state(bus, I2CD_MRXD); + aspeed_i2c_bus_recv(bus); + bus->intr_status |= I2CD_INTR_RX_DONE; + if (bus->cmd & I2CD_M_S_RX_CMD_LAST) { + i2c_nack(bus->bus); + } + bus->cmd &= ~(I2CD_M_RX_CMD | I2CD_M_S_RX_CMD_LAST); + aspeed_i2c_set_state(bus, I2CD_MACTIVE); +} + +static uint8_t aspeed_i2c_get_addr(AspeedI2CBus *bus) +{ + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); + + if (bus->cmd & I2CD_TX_BUFF_ENABLE) { + uint8_t *pool_base = aic->bus_pool_base(bus); + + return pool_base[0]; + } else if (bus->cmd & I2CD_TX_DMA_ENABLE) { + uint8_t data; + + aspeed_i2c_dma_read(bus, &data); + return data; + } else { + return bus->buf; + } +} + +static bool aspeed_i2c_check_sram(AspeedI2CBus *bus) +{ + AspeedI2CState *s = bus->controller; + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); + + if (!aic->check_sram) { + return true; + } + + /* + * AST2500: SRAM must be enabled before using the Buffer Pool or + * DMA mode. + */ + if (!(s->ctrl_global & I2C_CTRL_SRAM_EN) && + (bus->cmd & (I2CD_RX_DMA_ENABLE | I2CD_TX_DMA_ENABLE | + I2CD_RX_BUFF_ENABLE | I2CD_TX_BUFF_ENABLE))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: SRAM is not enabled\n", __func__); + return false; + } + + return true; +} + +static void aspeed_i2c_bus_cmd_dump(AspeedI2CBus *bus) +{ + g_autofree char *cmd_flags = NULL; + uint32_t count; + + if (bus->cmd & (I2CD_RX_BUFF_ENABLE | I2CD_RX_BUFF_ENABLE)) { + count = I2CD_POOL_TX_COUNT(bus->pool_ctrl); + } else if (bus->cmd & (I2CD_RX_DMA_ENABLE | I2CD_RX_DMA_ENABLE)) { + count = bus->dma_len; + } else { /* BYTE mode */ + count = 1; + } + + cmd_flags = g_strdup_printf("%s%s%s%s%s%s%s%s%s", + bus->cmd & I2CD_M_START_CMD ? "start|" : "", + bus->cmd & I2CD_RX_DMA_ENABLE ? "rxdma|" : "", + bus->cmd & I2CD_TX_DMA_ENABLE ? "txdma|" : "", + bus->cmd & I2CD_RX_BUFF_ENABLE ? "rxbuf|" : "", + bus->cmd & I2CD_TX_BUFF_ENABLE ? "txbuf|" : "", + bus->cmd & I2CD_M_TX_CMD ? "tx|" : "", + bus->cmd & I2CD_M_RX_CMD ? "rx|" : "", + bus->cmd & I2CD_M_S_RX_CMD_LAST ? "last|" : "", + bus->cmd & I2CD_M_STOP_CMD ? "stop" : ""); + + trace_aspeed_i2c_bus_cmd(bus->cmd, cmd_flags, count, bus->intr_status); +} + +/* + * The state machine needs some refinement. It is only used to track + * invalid STOP commands for the moment. + */ +static void aspeed_i2c_bus_handle_cmd(AspeedI2CBus *bus, uint64_t value) +{ + uint8_t pool_start = 0; + + bus->cmd &= ~0xFFFF; + bus->cmd |= value & 0xFFFF; + + if (!aspeed_i2c_check_sram(bus)) { + return; + } + + if (trace_event_get_state_backends(TRACE_ASPEED_I2C_BUS_CMD)) { + aspeed_i2c_bus_cmd_dump(bus); + } + + if (bus->cmd & I2CD_M_START_CMD) { + uint8_t state = aspeed_i2c_get_state(bus) & I2CD_MACTIVE ? + I2CD_MSTARTR : I2CD_MSTART; + uint8_t addr; + + aspeed_i2c_set_state(bus, state); + + addr = aspeed_i2c_get_addr(bus); + + if (i2c_start_transfer(bus->bus, extract32(addr, 1, 7), + extract32(addr, 0, 1))) { + bus->intr_status |= I2CD_INTR_TX_NAK; + } else { + bus->intr_status |= I2CD_INTR_TX_ACK; + } + + bus->cmd &= ~I2CD_M_START_CMD; + + /* + * The START command is also a TX command, as the slave + * address is sent on the bus. Drop the TX flag if nothing + * else needs to be sent in this sequence. + */ + if (bus->cmd & I2CD_TX_BUFF_ENABLE) { + if (I2CD_POOL_TX_COUNT(bus->pool_ctrl) == 1) { + bus->cmd &= ~I2CD_M_TX_CMD; + } else { + /* + * Increase the start index in the TX pool buffer to + * skip the address byte. + */ + pool_start++; + } + } else if (bus->cmd & I2CD_TX_DMA_ENABLE) { + if (bus->dma_len == 0) { + bus->cmd &= ~I2CD_M_TX_CMD; + } + } else { + bus->cmd &= ~I2CD_M_TX_CMD; + } + + /* No slave found */ + if (!i2c_bus_busy(bus->bus)) { + return; + } + aspeed_i2c_set_state(bus, I2CD_MACTIVE); + } + + if (bus->cmd & I2CD_M_TX_CMD) { + aspeed_i2c_set_state(bus, I2CD_MTXD); + if (aspeed_i2c_bus_send(bus, pool_start)) { + bus->intr_status |= (I2CD_INTR_TX_NAK); + i2c_end_transfer(bus->bus); + } else { + bus->intr_status |= I2CD_INTR_TX_ACK; + } + bus->cmd &= ~I2CD_M_TX_CMD; + aspeed_i2c_set_state(bus, I2CD_MACTIVE); + } + + if ((bus->cmd & (I2CD_M_RX_CMD | I2CD_M_S_RX_CMD_LAST)) && + !(bus->intr_status & I2CD_INTR_RX_DONE)) { + aspeed_i2c_handle_rx_cmd(bus); + } + + if (bus->cmd & I2CD_M_STOP_CMD) { + if (!(aspeed_i2c_get_state(bus) & I2CD_MACTIVE)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: abnormal stop\n", __func__); + bus->intr_status |= I2CD_INTR_ABNORMAL; + } else { + aspeed_i2c_set_state(bus, I2CD_MSTOP); + i2c_end_transfer(bus->bus); + bus->intr_status |= I2CD_INTR_NORMAL_STOP; + } + bus->cmd &= ~I2CD_M_STOP_CMD; + aspeed_i2c_set_state(bus, I2CD_IDLE); + } +} + +static void aspeed_i2c_bus_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + AspeedI2CBus *bus = opaque; + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(bus->controller); + bool handle_rx; + + trace_aspeed_i2c_bus_write(bus->id, offset, size, value); + + switch (offset) { + case I2CD_FUN_CTRL_REG: + if (value & I2CD_SLAVE_EN) { + qemu_log_mask(LOG_UNIMP, "%s: slave mode not implemented\n", + __func__); + break; + } + bus->ctrl = value & 0x0071C3FF; + break; + case I2CD_AC_TIMING_REG1: + bus->timing[0] = value & 0xFFFFF0F; + break; + case I2CD_AC_TIMING_REG2: + bus->timing[1] = value & 0x7; + break; + case I2CD_INTR_CTRL_REG: + bus->intr_ctrl = value & 0x7FFF; + break; + case I2CD_INTR_STS_REG: + handle_rx = (bus->intr_status & I2CD_INTR_RX_DONE) && + (value & I2CD_INTR_RX_DONE); + bus->intr_status &= ~(value & 0x7FFF); + if (!bus->intr_status) { + bus->controller->intr_status &= ~(1 << bus->id); + qemu_irq_lower(aic->bus_get_irq(bus)); + } + if (handle_rx && (bus->cmd & (I2CD_M_RX_CMD | I2CD_M_S_RX_CMD_LAST))) { + aspeed_i2c_handle_rx_cmd(bus); + aspeed_i2c_bus_raise_interrupt(bus); + } + break; + case I2CD_DEV_ADDR_REG: + qemu_log_mask(LOG_UNIMP, "%s: slave mode not implemented\n", + __func__); + break; + case I2CD_POOL_CTRL_REG: + bus->pool_ctrl &= ~0xffffff; + bus->pool_ctrl |= (value & 0xffffff); + break; + + case I2CD_BYTE_BUF_REG: + bus->buf = (value & I2CD_BYTE_BUF_TX_MASK) << I2CD_BYTE_BUF_TX_SHIFT; + break; + case I2CD_CMD_REG: + if (!aspeed_i2c_bus_is_enabled(bus)) { + break; + } + + if (!aspeed_i2c_bus_is_master(bus)) { + qemu_log_mask(LOG_UNIMP, "%s: slave mode not implemented\n", + __func__); + break; + } + + if (!aic->has_dma && + value & (I2CD_RX_DMA_ENABLE | I2CD_TX_DMA_ENABLE)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); + break; + } + + aspeed_i2c_bus_handle_cmd(bus, value); + aspeed_i2c_bus_raise_interrupt(bus); + break; + case I2CD_DMA_ADDR: + if (!aic->has_dma) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); + break; + } + + bus->dma_addr = value & 0x3ffffffc; + break; + + case I2CD_DMA_LEN: + if (!aic->has_dma) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No DMA support\n", __func__); + break; + } + + bus->dma_len = value & 0xfff; + if (!bus->dma_len) { + qemu_log_mask(LOG_UNIMP, "%s: invalid DMA length\n", __func__); + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } +} + +static uint64_t aspeed_i2c_ctrl_read(void *opaque, hwaddr offset, + unsigned size) +{ + AspeedI2CState *s = opaque; + + switch (offset) { + case I2C_CTRL_STATUS: + return s->intr_status; + case I2C_CTRL_GLOBAL: + return s->ctrl_global; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + } + + return -1; +} + +static void aspeed_i2c_ctrl_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + AspeedI2CState *s = opaque; + + switch (offset) { + case I2C_CTRL_GLOBAL: + s->ctrl_global = value; + break; + case I2C_CTRL_STATUS: + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + } +} + +static const MemoryRegionOps aspeed_i2c_bus_ops = { + .read = aspeed_i2c_bus_read, + .write = aspeed_i2c_bus_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps aspeed_i2c_ctrl_ops = { + .read = aspeed_i2c_ctrl_read, + .write = aspeed_i2c_ctrl_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t aspeed_i2c_pool_read(void *opaque, hwaddr offset, + unsigned size) +{ + AspeedI2CState *s = opaque; + uint64_t ret = 0; + int i; + + for (i = 0; i < size; i++) { + ret |= (uint64_t) s->pool[offset + i] << (8 * i); + } + + return ret; +} + +static void aspeed_i2c_pool_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + AspeedI2CState *s = opaque; + int i; + + for (i = 0; i < size; i++) { + s->pool[offset + i] = (value >> (8 * i)) & 0xFF; + } +} + +static const MemoryRegionOps aspeed_i2c_pool_ops = { + .read = aspeed_i2c_pool_read, + .write = aspeed_i2c_pool_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static const VMStateDescription aspeed_i2c_bus_vmstate = { + .name = TYPE_ASPEED_I2C, + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_UINT8(id, AspeedI2CBus), + VMSTATE_UINT32(ctrl, AspeedI2CBus), + VMSTATE_UINT32_ARRAY(timing, AspeedI2CBus, 2), + VMSTATE_UINT32(intr_ctrl, AspeedI2CBus), + VMSTATE_UINT32(intr_status, AspeedI2CBus), + VMSTATE_UINT32(cmd, AspeedI2CBus), + VMSTATE_UINT32(buf, AspeedI2CBus), + VMSTATE_UINT32(pool_ctrl, AspeedI2CBus), + VMSTATE_UINT32(dma_addr, AspeedI2CBus), + VMSTATE_UINT32(dma_len, AspeedI2CBus), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription aspeed_i2c_vmstate = { + .name = TYPE_ASPEED_I2C, + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32(intr_status, AspeedI2CState), + VMSTATE_STRUCT_ARRAY(busses, AspeedI2CState, + ASPEED_I2C_NR_BUSSES, 1, aspeed_i2c_bus_vmstate, + AspeedI2CBus), + VMSTATE_UINT8_ARRAY(pool, AspeedI2CState, ASPEED_I2C_MAX_POOL_SIZE), + VMSTATE_END_OF_LIST() + } +}; + +static void aspeed_i2c_reset(DeviceState *dev) +{ + AspeedI2CState *s = ASPEED_I2C(dev); + + s->intr_status = 0; +} + +static void aspeed_i2c_instance_init(Object *obj) +{ + AspeedI2CState *s = ASPEED_I2C(obj); + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); + int i; + + for (i = 0; i < aic->num_busses; i++) { + object_initialize_child(obj, "bus[*]", &s->busses[i], + TYPE_ASPEED_I2C_BUS); + } +} + +/* + * Address Definitions (AST2400 and AST2500) + * + * 0x000 ... 0x03F: Global Register + * 0x040 ... 0x07F: Device 1 + * 0x080 ... 0x0BF: Device 2 + * 0x0C0 ... 0x0FF: Device 3 + * 0x100 ... 0x13F: Device 4 + * 0x140 ... 0x17F: Device 5 + * 0x180 ... 0x1BF: Device 6 + * 0x1C0 ... 0x1FF: Device 7 + * 0x200 ... 0x2FF: Buffer Pool (unused in linux driver) + * 0x300 ... 0x33F: Device 8 + * 0x340 ... 0x37F: Device 9 + * 0x380 ... 0x3BF: Device 10 + * 0x3C0 ... 0x3FF: Device 11 + * 0x400 ... 0x43F: Device 12 + * 0x440 ... 0x47F: Device 13 + * 0x480 ... 0x4BF: Device 14 + * 0x800 ... 0xFFF: Buffer Pool (unused in linux driver) + */ +static void aspeed_i2c_realize(DeviceState *dev, Error **errp) +{ + int i; + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedI2CState *s = ASPEED_I2C(dev); + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); + + sysbus_init_irq(sbd, &s->irq); + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_i2c_ctrl_ops, s, + "aspeed.i2c", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + + for (i = 0; i < aic->num_busses; i++) { + Object *bus = OBJECT(&s->busses[i]); + int offset = i < aic->gap ? 1 : 5; + + if (!object_property_set_link(bus, "controller", OBJECT(s), errp)) { + return; + } + + if (!object_property_set_uint(bus, "bus-id", i, errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(bus), errp)) { + return; + } + + memory_region_add_subregion(&s->iomem, aic->reg_size * (i + offset), + &s->busses[i].mr); + } + + memory_region_init_io(&s->pool_iomem, OBJECT(s), &aspeed_i2c_pool_ops, s, + "aspeed.i2c-pool", aic->pool_size); + memory_region_add_subregion(&s->iomem, aic->pool_base, &s->pool_iomem); + + if (aic->has_dma) { + if (!s->dram_mr) { + error_setg(errp, TYPE_ASPEED_I2C ": 'dram' link not set"); + return; + } + + address_space_init(&s->dram_as, s->dram_mr, + TYPE_ASPEED_I2C "-dma-dram"); + } +} + +static Property aspeed_i2c_properties[] = { + DEFINE_PROP_LINK("dram", AspeedI2CState, dram_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &aspeed_i2c_vmstate; + dc->reset = aspeed_i2c_reset; + device_class_set_props(dc, aspeed_i2c_properties); + dc->realize = aspeed_i2c_realize; + dc->desc = "Aspeed I2C Controller"; +} + +static const TypeInfo aspeed_i2c_info = { + .name = TYPE_ASPEED_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_i2c_instance_init, + .instance_size = sizeof(AspeedI2CState), + .class_init = aspeed_i2c_class_init, + .class_size = sizeof(AspeedI2CClass), + .abstract = true, +}; + +static void aspeed_i2c_bus_reset(DeviceState *dev) +{ + AspeedI2CBus *s = ASPEED_I2C_BUS(dev); + + s->intr_ctrl = 0; + s->intr_status = 0; + s->cmd = 0; + s->buf = 0; + s->dma_addr = 0; + s->dma_len = 0; + i2c_end_transfer(s->bus); +} + +static void aspeed_i2c_bus_realize(DeviceState *dev, Error **errp) +{ + AspeedI2CBus *s = ASPEED_I2C_BUS(dev); + AspeedI2CClass *aic; + g_autofree char *name = g_strdup_printf(TYPE_ASPEED_I2C_BUS ".%d", s->id); + + if (!s->controller) { + error_setg(errp, TYPE_ASPEED_I2C_BUS ": 'controller' link not set"); + return; + } + + aic = ASPEED_I2C_GET_CLASS(s->controller); + + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); + + s->bus = i2c_init_bus(dev, name); + + memory_region_init_io(&s->mr, OBJECT(s), &aspeed_i2c_bus_ops, + s, name, aic->reg_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr); +} + +static Property aspeed_i2c_bus_properties[] = { + DEFINE_PROP_UINT8("bus-id", AspeedI2CBus, id, 0), + DEFINE_PROP_LINK("controller", AspeedI2CBus, controller, TYPE_ASPEED_I2C, + AspeedI2CState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_i2c_bus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Aspeed I2C Bus"; + dc->realize = aspeed_i2c_bus_realize; + dc->reset = aspeed_i2c_bus_reset; + device_class_set_props(dc, aspeed_i2c_bus_properties); +} + +static const TypeInfo aspeed_i2c_bus_info = { + .name = TYPE_ASPEED_I2C_BUS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedI2CBus), + .class_init = aspeed_i2c_bus_class_init, +}; + +static qemu_irq aspeed_2400_i2c_bus_get_irq(AspeedI2CBus *bus) +{ + return bus->controller->irq; +} + +static uint8_t *aspeed_2400_i2c_bus_pool_base(AspeedI2CBus *bus) +{ + uint8_t *pool_page = + &bus->controller->pool[I2CD_POOL_PAGE_SEL(bus->ctrl) * 0x100]; + + return &pool_page[I2CD_POOL_OFFSET(bus->pool_ctrl)]; +} + +static void aspeed_2400_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass); + + dc->desc = "ASPEED 2400 I2C Controller"; + + aic->num_busses = 14; + aic->reg_size = 0x40; + aic->gap = 7; + aic->bus_get_irq = aspeed_2400_i2c_bus_get_irq; + aic->pool_size = 0x800; + aic->pool_base = 0x800; + aic->bus_pool_base = aspeed_2400_i2c_bus_pool_base; +} + +static const TypeInfo aspeed_2400_i2c_info = { + .name = TYPE_ASPEED_2400_I2C, + .parent = TYPE_ASPEED_I2C, + .class_init = aspeed_2400_i2c_class_init, +}; + +static qemu_irq aspeed_2500_i2c_bus_get_irq(AspeedI2CBus *bus) +{ + return bus->controller->irq; +} + +static uint8_t *aspeed_2500_i2c_bus_pool_base(AspeedI2CBus *bus) +{ + return &bus->controller->pool[bus->id * 0x10]; +} + +static void aspeed_2500_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass); + + dc->desc = "ASPEED 2500 I2C Controller"; + + aic->num_busses = 14; + aic->reg_size = 0x40; + aic->gap = 7; + aic->bus_get_irq = aspeed_2500_i2c_bus_get_irq; + aic->pool_size = 0x100; + aic->pool_base = 0x200; + aic->bus_pool_base = aspeed_2500_i2c_bus_pool_base; + aic->check_sram = true; + aic->has_dma = true; +} + +static const TypeInfo aspeed_2500_i2c_info = { + .name = TYPE_ASPEED_2500_I2C, + .parent = TYPE_ASPEED_I2C, + .class_init = aspeed_2500_i2c_class_init, +}; + +static qemu_irq aspeed_2600_i2c_bus_get_irq(AspeedI2CBus *bus) +{ + return bus->irq; +} + +static uint8_t *aspeed_2600_i2c_bus_pool_base(AspeedI2CBus *bus) +{ + return &bus->controller->pool[bus->id * 0x20]; +} + +static void aspeed_2600_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedI2CClass *aic = ASPEED_I2C_CLASS(klass); + + dc->desc = "ASPEED 2600 I2C Controller"; + + aic->num_busses = 16; + aic->reg_size = 0x80; + aic->gap = -1; /* no gap */ + aic->bus_get_irq = aspeed_2600_i2c_bus_get_irq; + aic->pool_size = 0x200; + aic->pool_base = 0xC00; + aic->bus_pool_base = aspeed_2600_i2c_bus_pool_base; + aic->has_dma = true; +} + +static const TypeInfo aspeed_2600_i2c_info = { + .name = TYPE_ASPEED_2600_I2C, + .parent = TYPE_ASPEED_I2C, + .class_init = aspeed_2600_i2c_class_init, +}; + +static void aspeed_i2c_register_types(void) +{ + type_register_static(&aspeed_i2c_bus_info); + type_register_static(&aspeed_i2c_info); + type_register_static(&aspeed_2400_i2c_info); + type_register_static(&aspeed_2500_i2c_info); + type_register_static(&aspeed_2600_i2c_info); +} + +type_init(aspeed_i2c_register_types) + + +I2CBus *aspeed_i2c_get_bus(AspeedI2CState *s, int busnr) +{ + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); + I2CBus *bus = NULL; + + if (busnr >= 0 && busnr < aic->num_busses) { + bus = s->busses[busnr].bus; + } + + return bus; +} diff --git a/hw/i2c/bitbang_i2c.c b/hw/i2c/bitbang_i2c.c new file mode 100644 index 000000000..e9a0612a0 --- /dev/null +++ b/hw/i2c/bitbang_i2c.c @@ -0,0 +1,226 @@ +/* + * Bit-Bang i2c emulation extracted from + * Marvell MV88W8618 / Freecom MusicPal emulation. + * + * Copyright (c) 2008 Jan Kiszka + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/i2c/bitbang_i2c.h" +#include "hw/sysbus.h" +#include "qemu/module.h" +#include "qom/object.h" + +//#define DEBUG_BITBANG_I2C + +#ifdef DEBUG_BITBANG_I2C +#define DPRINTF(fmt, ...) \ +do { printf("bitbang_i2c: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while(0) +#endif + +static void bitbang_i2c_enter_stop(bitbang_i2c_interface *i2c) +{ + DPRINTF("STOP\n"); + if (i2c->current_addr >= 0) + i2c_end_transfer(i2c->bus); + i2c->current_addr = -1; + i2c->state = STOPPED; +} + +/* Set device data pin. */ +static int bitbang_i2c_ret(bitbang_i2c_interface *i2c, int level) +{ + i2c->device_out = level; + //DPRINTF("%d %d %d\n", i2c->last_clock, i2c->last_data, i2c->device_out); + return level & i2c->last_data; +} + +/* Leave device data pin unodified. */ +static int bitbang_i2c_nop(bitbang_i2c_interface *i2c) +{ + return bitbang_i2c_ret(i2c, i2c->device_out); +} + +/* Returns data line level. */ +int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level) +{ + int data; + + if (level != 0 && level != 1) { + abort(); + } + + if (line == BITBANG_I2C_SDA) { + if (level == i2c->last_data) { + return bitbang_i2c_nop(i2c); + } + i2c->last_data = level; + if (i2c->last_clock == 0) { + return bitbang_i2c_nop(i2c); + } + if (level == 0) { + DPRINTF("START\n"); + /* START condition. */ + i2c->state = SENDING_BIT7; + i2c->current_addr = -1; + } else { + /* STOP condition. */ + bitbang_i2c_enter_stop(i2c); + } + return bitbang_i2c_ret(i2c, 1); + } + + data = i2c->last_data; + if (i2c->last_clock == level) { + return bitbang_i2c_nop(i2c); + } + i2c->last_clock = level; + if (level == 0) { + /* State is set/read at the start of the clock pulse. + release the data line at the end. */ + return bitbang_i2c_ret(i2c, 1); + } + switch (i2c->state) { + case STOPPED: + case SENT_NACK: + return bitbang_i2c_ret(i2c, 1); + + case SENDING_BIT7 ... SENDING_BIT0: + i2c->buffer = (i2c->buffer << 1) | data; + /* will end up in WAITING_FOR_ACK */ + i2c->state++; + return bitbang_i2c_ret(i2c, 1); + + case WAITING_FOR_ACK: + { + int ret; + + if (i2c->current_addr < 0) { + i2c->current_addr = i2c->buffer; + DPRINTF("Address 0x%02x\n", i2c->current_addr); + ret = i2c_start_transfer(i2c->bus, i2c->current_addr >> 1, + i2c->current_addr & 1); + } else { + DPRINTF("Sent 0x%02x\n", i2c->buffer); + ret = i2c_send(i2c->bus, i2c->buffer); + } + if (ret) { + /* NACK (either addressing a nonexistent device, or the + * device we were sending to decided to NACK us). + */ + DPRINTF("Got NACK\n"); + bitbang_i2c_enter_stop(i2c); + return bitbang_i2c_ret(i2c, 1); + } + if (i2c->current_addr & 1) { + i2c->state = RECEIVING_BIT7; + } else { + i2c->state = SENDING_BIT7; + } + return bitbang_i2c_ret(i2c, 0); + } + case RECEIVING_BIT7: + i2c->buffer = i2c_recv(i2c->bus); + DPRINTF("RX byte 0x%02x\n", i2c->buffer); + /* Fall through... */ + case RECEIVING_BIT6 ... RECEIVING_BIT0: + data = i2c->buffer >> 7; + /* will end up in SENDING_ACK */ + i2c->state++; + i2c->buffer <<= 1; + return bitbang_i2c_ret(i2c, data); + + case SENDING_ACK: + i2c->state = RECEIVING_BIT7; + if (data != 0) { + DPRINTF("NACKED\n"); + i2c->state = SENT_NACK; + i2c_nack(i2c->bus); + } else { + DPRINTF("ACKED\n"); + } + return bitbang_i2c_ret(i2c, 1); + } + abort(); +} + +void bitbang_i2c_init(bitbang_i2c_interface *s, I2CBus *bus) +{ + s->bus = bus; + s->last_data = 1; + s->last_clock = 1; + s->device_out = 1; +} + +/* GPIO interface. */ + +#define TYPE_GPIO_I2C "gpio_i2c" +OBJECT_DECLARE_SIMPLE_TYPE(GPIOI2CState, GPIO_I2C) + +struct GPIOI2CState { + SysBusDevice parent_obj; + + MemoryRegion dummy_iomem; + bitbang_i2c_interface bitbang; + int last_level; + qemu_irq out; +}; + +static void bitbang_i2c_gpio_set(void *opaque, int irq, int level) +{ + GPIOI2CState *s = opaque; + + level = bitbang_i2c_set(&s->bitbang, irq, level); + if (level != s->last_level) { + s->last_level = level; + qemu_set_irq(s->out, level); + } +} + +static void gpio_i2c_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + GPIOI2CState *s = GPIO_I2C(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + I2CBus *bus; + + memory_region_init(&s->dummy_iomem, obj, "gpio_i2c", 0); + sysbus_init_mmio(sbd, &s->dummy_iomem); + + bus = i2c_init_bus(dev, "i2c"); + bitbang_i2c_init(&s->bitbang, bus); + + qdev_init_gpio_in(dev, bitbang_i2c_gpio_set, 2); + qdev_init_gpio_out(dev, &s->out, 1); +} + +static void gpio_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->desc = "Virtual GPIO to I2C bridge"; +} + +static const TypeInfo gpio_i2c_info = { + .name = TYPE_GPIO_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GPIOI2CState), + .instance_init = gpio_i2c_init, + .class_init = gpio_i2c_class_init, +}; + +static void bitbang_i2c_register_types(void) +{ + type_register_static(&gpio_i2c_info); +} + +type_init(bitbang_i2c_register_types) diff --git a/hw/i2c/core.c b/hw/i2c/core.c new file mode 100644 index 000000000..0e7d2763b --- /dev/null +++ b/hw/i2c/core.c @@ -0,0 +1,357 @@ +/* + * QEMU I2C bus interface. + * + * Copyright (c) 2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the LGPL. + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "trace.h" + +#define I2C_BROADCAST 0x00 + +static Property i2c_props[] = { + DEFINE_PROP_UINT8("address", struct I2CSlave, address, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static const TypeInfo i2c_bus_info = { + .name = TYPE_I2C_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(I2CBus), +}; + +static int i2c_bus_pre_save(void *opaque) +{ + I2CBus *bus = opaque; + + bus->saved_address = -1; + if (!QLIST_EMPTY(&bus->current_devs)) { + if (!bus->broadcast) { + bus->saved_address = QLIST_FIRST(&bus->current_devs)->elt->address; + } else { + bus->saved_address = I2C_BROADCAST; + } + } + + return 0; +} + +static const VMStateDescription vmstate_i2c_bus = { + .name = "i2c_bus", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = i2c_bus_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT8(saved_address, I2CBus), + VMSTATE_END_OF_LIST() + } +}; + +/* Create a new I2C bus. */ +I2CBus *i2c_init_bus(DeviceState *parent, const char *name) +{ + I2CBus *bus; + + bus = I2C_BUS(qbus_new(TYPE_I2C_BUS, parent, name)); + QLIST_INIT(&bus->current_devs); + vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_i2c_bus, bus); + return bus; +} + +void i2c_slave_set_address(I2CSlave *dev, uint8_t address) +{ + dev->address = address; +} + +/* Return nonzero if bus is busy. */ +int i2c_bus_busy(I2CBus *bus) +{ + return !QLIST_EMPTY(&bus->current_devs); +} + +bool i2c_scan_bus(I2CBus *bus, uint8_t address, bool broadcast, + I2CNodeList *current_devs) +{ + BusChild *kid; + + QTAILQ_FOREACH(kid, &bus->qbus.children, sibling) { + DeviceState *qdev = kid->child; + I2CSlave *candidate = I2C_SLAVE(qdev); + I2CSlaveClass *sc = I2C_SLAVE_GET_CLASS(candidate); + + if (sc->match_and_add(candidate, address, broadcast, current_devs)) { + if (!broadcast) { + return true; + } + } + } + + /* + * If broadcast was true, and the list was full or empty, return true. If + * broadcast was false, return false. + */ + return broadcast; +} + +/* TODO: Make this handle multiple masters. */ +/* + * Start or continue an i2c transaction. When this is called for the + * first time or after an i2c_end_transfer(), if it returns an error + * the bus transaction is terminated (or really never started). If + * this is called after another i2c_start_transfer() without an + * intervening i2c_end_transfer(), and it returns an error, the + * transaction will not be terminated. The caller must do it. + * + * This corresponds with the way real hardware works. The SMBus + * protocol uses a start transfer to switch from write to read mode + * without releasing the bus. If that fails, the bus is still + * in a transaction. + * + * @event must be I2C_START_RECV or I2C_START_SEND. + */ +static int i2c_do_start_transfer(I2CBus *bus, uint8_t address, + enum i2c_event event) +{ + I2CSlaveClass *sc; + I2CNode *node; + bool bus_scanned = false; + + if (address == I2C_BROADCAST) { + /* + * This is a broadcast, the current_devs will be all the devices of the + * bus. + */ + bus->broadcast = true; + } + + /* + * If there are already devices in the list, that means we are in + * the middle of a transaction and we shouldn't rescan the bus. + * + * This happens with any SMBus transaction, even on a pure I2C + * device. The interface does a transaction start without + * terminating the previous transaction. + */ + if (QLIST_EMPTY(&bus->current_devs)) { + /* Disregard whether devices were found. */ + (void)i2c_scan_bus(bus, address, bus->broadcast, &bus->current_devs); + bus_scanned = true; + } + + if (QLIST_EMPTY(&bus->current_devs)) { + return 1; + } + + QLIST_FOREACH(node, &bus->current_devs, next) { + I2CSlave *s = node->elt; + int rv; + + sc = I2C_SLAVE_GET_CLASS(s); + /* If the bus is already busy, assume this is a repeated + start condition. */ + + if (sc->event) { + trace_i2c_event("start", s->address); + rv = sc->event(s, event); + if (rv && !bus->broadcast) { + if (bus_scanned) { + /* First call, terminate the transfer. */ + i2c_end_transfer(bus); + } + return rv; + } + } + } + return 0; +} + +int i2c_start_transfer(I2CBus *bus, uint8_t address, bool is_recv) +{ + return i2c_do_start_transfer(bus, address, is_recv + ? I2C_START_RECV + : I2C_START_SEND); +} + +int i2c_start_recv(I2CBus *bus, uint8_t address) +{ + return i2c_do_start_transfer(bus, address, I2C_START_RECV); +} + +int i2c_start_send(I2CBus *bus, uint8_t address) +{ + return i2c_do_start_transfer(bus, address, I2C_START_SEND); +} + +void i2c_end_transfer(I2CBus *bus) +{ + I2CSlaveClass *sc; + I2CNode *node, *next; + + QLIST_FOREACH_SAFE(node, &bus->current_devs, next, next) { + I2CSlave *s = node->elt; + sc = I2C_SLAVE_GET_CLASS(s); + if (sc->event) { + trace_i2c_event("finish", s->address); + sc->event(s, I2C_FINISH); + } + QLIST_REMOVE(node, next); + g_free(node); + } + bus->broadcast = false; +} + +int i2c_send(I2CBus *bus, uint8_t data) +{ + I2CSlaveClass *sc; + I2CSlave *s; + I2CNode *node; + int ret = 0; + + QLIST_FOREACH(node, &bus->current_devs, next) { + s = node->elt; + sc = I2C_SLAVE_GET_CLASS(s); + if (sc->send) { + trace_i2c_send(s->address, data); + ret = ret || sc->send(s, data); + } else { + ret = -1; + } + } + + return ret ? -1 : 0; +} + +uint8_t i2c_recv(I2CBus *bus) +{ + uint8_t data = 0xff; + I2CSlaveClass *sc; + I2CSlave *s; + + if (!QLIST_EMPTY(&bus->current_devs) && !bus->broadcast) { + sc = I2C_SLAVE_GET_CLASS(QLIST_FIRST(&bus->current_devs)->elt); + if (sc->recv) { + s = QLIST_FIRST(&bus->current_devs)->elt; + data = sc->recv(s); + trace_i2c_recv(s->address, data); + } + } + + return data; +} + +void i2c_nack(I2CBus *bus) +{ + I2CSlaveClass *sc; + I2CNode *node; + + if (QLIST_EMPTY(&bus->current_devs)) { + return; + } + + QLIST_FOREACH(node, &bus->current_devs, next) { + sc = I2C_SLAVE_GET_CLASS(node->elt); + if (sc->event) { + trace_i2c_event("nack", node->elt->address); + sc->event(node->elt, I2C_NACK); + } + } +} + +static int i2c_slave_post_load(void *opaque, int version_id) +{ + I2CSlave *dev = opaque; + I2CBus *bus; + I2CNode *node; + + bus = I2C_BUS(qdev_get_parent_bus(DEVICE(dev))); + if ((bus->saved_address == dev->address) || + (bus->saved_address == I2C_BROADCAST)) { + node = g_malloc(sizeof(struct I2CNode)); + node->elt = dev; + QLIST_INSERT_HEAD(&bus->current_devs, node, next); + } + return 0; +} + +const VMStateDescription vmstate_i2c_slave = { + .name = "I2CSlave", + .version_id = 1, + .minimum_version_id = 1, + .post_load = i2c_slave_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(address, I2CSlave), + VMSTATE_END_OF_LIST() + } +}; + +I2CSlave *i2c_slave_new(const char *name, uint8_t addr) +{ + DeviceState *dev; + + dev = qdev_new(name); + qdev_prop_set_uint8(dev, "address", addr); + return I2C_SLAVE(dev); +} + +bool i2c_slave_realize_and_unref(I2CSlave *dev, I2CBus *bus, Error **errp) +{ + return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); +} + +I2CSlave *i2c_slave_create_simple(I2CBus *bus, const char *name, uint8_t addr) +{ + I2CSlave *dev = i2c_slave_new(name, addr); + + i2c_slave_realize_and_unref(dev, bus, &error_abort); + + return dev; +} + +static bool i2c_slave_match(I2CSlave *candidate, uint8_t address, + bool broadcast, I2CNodeList *current_devs) +{ + if ((candidate->address == address) || (broadcast)) { + I2CNode *node = g_malloc(sizeof(struct I2CNode)); + node->elt = candidate; + QLIST_INSERT_HEAD(current_devs, node, next); + return true; + } + + /* Not found and not broadcast. */ + return false; +} + +static void i2c_slave_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); + set_bit(DEVICE_CATEGORY_MISC, k->categories); + k->bus_type = TYPE_I2C_BUS; + device_class_set_props(k, i2c_props); + sc->match_and_add = i2c_slave_match; +} + +static const TypeInfo i2c_slave_type_info = { + .name = TYPE_I2C_SLAVE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(I2CSlave), + .abstract = true, + .class_size = sizeof(I2CSlaveClass), + .class_init = i2c_slave_class_init, +}; + +static void i2c_slave_register_types(void) +{ + type_register_static(&i2c_bus_info); + type_register_static(&i2c_slave_type_info); +} + +type_init(i2c_slave_register_types) diff --git a/hw/i2c/exynos4210_i2c.c b/hw/i2c/exynos4210_i2c.c new file mode 100644 index 000000000..b65a7d022 --- /dev/null +++ b/hw/i2c/exynos4210_i2c.c @@ -0,0 +1,333 @@ +/* + * Exynos4210 I2C Bus Serial Interface Emulation + * + * Copyright (C) 2012 Samsung Electronics Co Ltd. + * Maksim Kozlov, + * Igor Mitsyanko, + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "qom/object.h" + +#ifndef EXYNOS4_I2C_DEBUG +#define EXYNOS4_I2C_DEBUG 0 +#endif + +#define TYPE_EXYNOS4_I2C "exynos4210.i2c" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210I2CState, EXYNOS4_I2C) + +/* Exynos4210 I2C memory map */ +#define EXYNOS4_I2C_MEM_SIZE 0x14 +#define I2CCON_ADDR 0x00 /* control register */ +#define I2CSTAT_ADDR 0x04 /* control/status register */ +#define I2CADD_ADDR 0x08 /* address register */ +#define I2CDS_ADDR 0x0c /* data shift register */ +#define I2CLC_ADDR 0x10 /* line control register */ + +#define I2CCON_ACK_GEN (1 << 7) +#define I2CCON_INTRS_EN (1 << 5) +#define I2CCON_INT_PEND (1 << 4) + +#define EXYNOS4_I2C_MODE(reg) (((reg) >> 6) & 3) +#define I2C_IN_MASTER_MODE(reg) (((reg) >> 6) & 2) +#define I2CMODE_MASTER_Rx 0x2 +#define I2CMODE_MASTER_Tx 0x3 +#define I2CSTAT_LAST_BIT (1 << 0) +#define I2CSTAT_OUTPUT_EN (1 << 4) +#define I2CSTAT_START_BUSY (1 << 5) + + +#if EXYNOS4_I2C_DEBUG +#define DPRINT(fmt, args...) \ + do { fprintf(stderr, "QEMU I2C: "fmt, ## args); } while (0) + +static const char *exynos4_i2c_get_regname(unsigned offset) +{ + switch (offset) { + case I2CCON_ADDR: + return "I2CCON"; + case I2CSTAT_ADDR: + return "I2CSTAT"; + case I2CADD_ADDR: + return "I2CADD"; + case I2CDS_ADDR: + return "I2CDS"; + case I2CLC_ADDR: + return "I2CLC"; + default: + return "[?]"; + } +} + +#else +#define DPRINT(fmt, args...) do { } while (0) +#endif + +struct Exynos4210I2CState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + I2CBus *bus; + qemu_irq irq; + + uint8_t i2ccon; + uint8_t i2cstat; + uint8_t i2cadd; + uint8_t i2cds; + uint8_t i2clc; + bool scl_free; +}; + +static inline void exynos4210_i2c_raise_interrupt(Exynos4210I2CState *s) +{ + if (s->i2ccon & I2CCON_INTRS_EN) { + s->i2ccon |= I2CCON_INT_PEND; + qemu_irq_raise(s->irq); + } +} + +static void exynos4210_i2c_data_receive(void *opaque) +{ + Exynos4210I2CState *s = (Exynos4210I2CState *)opaque; + + s->i2cstat &= ~I2CSTAT_LAST_BIT; + s->scl_free = false; + s->i2cds = i2c_recv(s->bus); + exynos4210_i2c_raise_interrupt(s); +} + +static void exynos4210_i2c_data_send(void *opaque) +{ + Exynos4210I2CState *s = (Exynos4210I2CState *)opaque; + + s->i2cstat &= ~I2CSTAT_LAST_BIT; + s->scl_free = false; + if (i2c_send(s->bus, s->i2cds) < 0 && (s->i2ccon & I2CCON_ACK_GEN)) { + s->i2cstat |= I2CSTAT_LAST_BIT; + } + exynos4210_i2c_raise_interrupt(s); +} + +static uint64_t exynos4210_i2c_read(void *opaque, hwaddr offset, + unsigned size) +{ + Exynos4210I2CState *s = (Exynos4210I2CState *)opaque; + uint8_t value; + + switch (offset) { + case I2CCON_ADDR: + value = s->i2ccon; + break; + case I2CSTAT_ADDR: + value = s->i2cstat; + break; + case I2CADD_ADDR: + value = s->i2cadd; + break; + case I2CDS_ADDR: + value = s->i2cds; + s->scl_free = true; + if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Rx && + (s->i2cstat & I2CSTAT_START_BUSY) && + !(s->i2ccon & I2CCON_INT_PEND)) { + exynos4210_i2c_data_receive(s); + } + break; + case I2CLC_ADDR: + value = s->i2clc; + break; + default: + value = 0; + DPRINT("ERROR: Bad read offset 0x%x\n", (unsigned int)offset); + break; + } + + DPRINT("read %s [0x%02x] -> 0x%02x\n", exynos4_i2c_get_regname(offset), + (unsigned int)offset, value); + return value; +} + +static void exynos4210_i2c_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + Exynos4210I2CState *s = (Exynos4210I2CState *)opaque; + uint8_t v = value & 0xff; + + DPRINT("write %s [0x%02x] <- 0x%02x\n", exynos4_i2c_get_regname(offset), + (unsigned int)offset, v); + + switch (offset) { + case I2CCON_ADDR: + s->i2ccon = (v & ~I2CCON_INT_PEND) | (s->i2ccon & I2CCON_INT_PEND); + if ((s->i2ccon & I2CCON_INT_PEND) && !(v & I2CCON_INT_PEND)) { + s->i2ccon &= ~I2CCON_INT_PEND; + qemu_irq_lower(s->irq); + if (!(s->i2ccon & I2CCON_INTRS_EN)) { + s->i2cstat &= ~I2CSTAT_START_BUSY; + } + + if (s->i2cstat & I2CSTAT_START_BUSY) { + if (s->scl_free) { + if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Tx) { + exynos4210_i2c_data_send(s); + } else if (EXYNOS4_I2C_MODE(s->i2cstat) == + I2CMODE_MASTER_Rx) { + exynos4210_i2c_data_receive(s); + } + } else { + s->i2ccon |= I2CCON_INT_PEND; + qemu_irq_raise(s->irq); + } + } + } + break; + case I2CSTAT_ADDR: + s->i2cstat = + (s->i2cstat & I2CSTAT_START_BUSY) | (v & ~I2CSTAT_START_BUSY); + + if (!(s->i2cstat & I2CSTAT_OUTPUT_EN)) { + s->i2cstat &= ~I2CSTAT_START_BUSY; + s->scl_free = true; + qemu_irq_lower(s->irq); + break; + } + + /* Nothing to do if in i2c slave mode */ + if (!I2C_IN_MASTER_MODE(s->i2cstat)) { + break; + } + + if (v & I2CSTAT_START_BUSY) { + s->i2cstat &= ~I2CSTAT_LAST_BIT; + s->i2cstat |= I2CSTAT_START_BUSY; /* Line is busy */ + s->scl_free = false; + + /* Generate start bit and send slave address */ + if (i2c_start_transfer(s->bus, s->i2cds >> 1, s->i2cds & 0x1) && + (s->i2ccon & I2CCON_ACK_GEN)) { + s->i2cstat |= I2CSTAT_LAST_BIT; + } else if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Rx) { + exynos4210_i2c_data_receive(s); + } + exynos4210_i2c_raise_interrupt(s); + } else { + i2c_end_transfer(s->bus); + if (!(s->i2ccon & I2CCON_INT_PEND)) { + s->i2cstat &= ~I2CSTAT_START_BUSY; + } + s->scl_free = true; + } + break; + case I2CADD_ADDR: + if ((s->i2cstat & I2CSTAT_OUTPUT_EN) == 0) { + s->i2cadd = v; + } + break; + case I2CDS_ADDR: + if (s->i2cstat & I2CSTAT_OUTPUT_EN) { + s->i2cds = v; + s->scl_free = true; + if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Tx && + (s->i2cstat & I2CSTAT_START_BUSY) && + !(s->i2ccon & I2CCON_INT_PEND)) { + exynos4210_i2c_data_send(s); + } + } + break; + case I2CLC_ADDR: + s->i2clc = v; + break; + default: + DPRINT("ERROR: Bad write offset 0x%x\n", (unsigned int)offset); + break; + } +} + +static const MemoryRegionOps exynos4210_i2c_ops = { + .read = exynos4210_i2c_read, + .write = exynos4210_i2c_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription exynos4210_i2c_vmstate = { + .name = "exynos4210.i2c", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(i2ccon, Exynos4210I2CState), + VMSTATE_UINT8(i2cstat, Exynos4210I2CState), + VMSTATE_UINT8(i2cds, Exynos4210I2CState), + VMSTATE_UINT8(i2cadd, Exynos4210I2CState), + VMSTATE_UINT8(i2clc, Exynos4210I2CState), + VMSTATE_BOOL(scl_free, Exynos4210I2CState), + VMSTATE_END_OF_LIST() + } +}; + +static void exynos4210_i2c_reset(DeviceState *d) +{ + Exynos4210I2CState *s = EXYNOS4_I2C(d); + + s->i2ccon = 0x00; + s->i2cstat = 0x00; + s->i2cds = 0xFF; + s->i2clc = 0x00; + s->i2cadd = 0xFF; + s->scl_free = true; +} + +static void exynos4210_i2c_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + Exynos4210I2CState *s = EXYNOS4_I2C(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &exynos4210_i2c_ops, s, + TYPE_EXYNOS4_I2C, EXYNOS4_I2C_MEM_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + s->bus = i2c_init_bus(dev, "i2c"); +} + +static void exynos4210_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &exynos4210_i2c_vmstate; + dc->reset = exynos4210_i2c_reset; +} + +static const TypeInfo exynos4210_i2c_type_info = { + .name = TYPE_EXYNOS4_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210I2CState), + .instance_init = exynos4210_i2c_init, + .class_init = exynos4210_i2c_class_init, +}; + +static void exynos4210_i2c_register_types(void) +{ + type_register_static(&exynos4210_i2c_type_info); +} + +type_init(exynos4210_i2c_register_types) diff --git a/hw/i2c/i2c_mux_pca954x.c b/hw/i2c/i2c_mux_pca954x.c new file mode 100644 index 000000000..847c59921 --- /dev/null +++ b/hw/i2c/i2c_mux_pca954x.c @@ -0,0 +1,290 @@ +/* + * I2C multiplexer for PCA954x series of I2C multiplexer/switch chips. + * + * Copyright 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/i2c/i2c.h" +#include "hw/i2c/i2c_mux_pca954x.h" +#include "hw/i2c/smbus_slave.h" +#include "hw/qdev-core.h" +#include "hw/sysbus.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/queue.h" +#include "qom/object.h" +#include "trace.h" + +#define PCA9548_CHANNEL_COUNT 8 +#define PCA9546_CHANNEL_COUNT 4 + +/* + * struct Pca954xChannel - The i2c mux device will have N of these states + * that own the i2c channel bus. + * @bus: The owned channel bus. + * @enabled: Is this channel active? + */ +typedef struct Pca954xChannel { + SysBusDevice parent; + + I2CBus *bus; + + bool enabled; +} Pca954xChannel; + +#define TYPE_PCA954X_CHANNEL "pca954x-channel" +#define PCA954X_CHANNEL(obj) \ + OBJECT_CHECK(Pca954xChannel, (obj), TYPE_PCA954X_CHANNEL) + +/* + * struct Pca954xState - The pca954x state object. + * @control: The value written to the mux control. + * @channel: The set of i2c channel buses that act as channels which own the + * i2c children. + */ +typedef struct Pca954xState { + SMBusDevice parent; + + uint8_t control; + + /* The channel i2c buses. */ + Pca954xChannel channel[PCA9548_CHANNEL_COUNT]; +} Pca954xState; + +/* + * struct Pca954xClass - The pca954x class object. + * @nchans: The number of i2c channels this device has. + */ +typedef struct Pca954xClass { + SMBusDeviceClass parent; + + uint8_t nchans; +} Pca954xClass; + +#define TYPE_PCA954X "pca954x" +OBJECT_DECLARE_TYPE(Pca954xState, Pca954xClass, PCA954X) + +/* + * For each channel, if it's enabled, recursively call match on those children. + */ +static bool pca954x_match(I2CSlave *candidate, uint8_t address, + bool broadcast, + I2CNodeList *current_devs) +{ + Pca954xState *mux = PCA954X(candidate); + Pca954xClass *mc = PCA954X_GET_CLASS(mux); + int i; + + /* They are talking to the mux itself (or all devices enabled). */ + if ((candidate->address == address) || broadcast) { + I2CNode *node = g_malloc(sizeof(struct I2CNode)); + node->elt = candidate; + QLIST_INSERT_HEAD(current_devs, node, next); + if (!broadcast) { + return true; + } + } + + for (i = 0; i < mc->nchans; i++) { + if (!mux->channel[i].enabled) { + continue; + } + + if (i2c_scan_bus(mux->channel[i].bus, address, broadcast, + current_devs)) { + if (!broadcast) { + return true; + } + } + } + + /* If we arrived here we didn't find a match, return broadcast. */ + return broadcast; +} + +static void pca954x_enable_channel(Pca954xState *s, uint8_t enable_mask) +{ + Pca954xClass *mc = PCA954X_GET_CLASS(s); + int i; + + /* + * For each channel, check if their bit is set in enable_mask and if yes, + * enable it, otherwise disable, hide it. + */ + for (i = 0; i < mc->nchans; i++) { + if (enable_mask & (1 << i)) { + s->channel[i].enabled = true; + } else { + s->channel[i].enabled = false; + } + } +} + +static void pca954x_write(Pca954xState *s, uint8_t data) +{ + s->control = data; + pca954x_enable_channel(s, data); + + trace_pca954x_write_bytes(data); +} + +static int pca954x_write_data(SMBusDevice *d, uint8_t *buf, uint8_t len) +{ + Pca954xState *s = PCA954X(d); + + if (len == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: writing empty data\n", __func__); + return -1; + } + + /* + * len should be 1, because they write one byte to enable/disable channels. + */ + if (len > 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: extra data after channel selection mask\n", + __func__); + return -1; + } + + pca954x_write(s, buf[0]); + return 0; +} + +static uint8_t pca954x_read_byte(SMBusDevice *d) +{ + Pca954xState *s = PCA954X(d); + uint8_t data = s->control; + trace_pca954x_read_data(data); + return data; +} + +static void pca954x_enter_reset(Object *obj, ResetType type) +{ + Pca954xState *s = PCA954X(obj); + /* Reset will disable all channels. */ + pca954x_write(s, 0); +} + +I2CBus *pca954x_i2c_get_bus(I2CSlave *mux, uint8_t channel) +{ + Pca954xClass *pc = PCA954X_GET_CLASS(mux); + Pca954xState *pca954x = PCA954X(mux); + + g_assert(channel < pc->nchans); + return I2C_BUS(qdev_get_child_bus(DEVICE(&pca954x->channel[channel]), + "i2c-bus")); +} + +static void pca954x_channel_init(Object *obj) +{ + Pca954xChannel *s = PCA954X_CHANNEL(obj); + s->bus = i2c_init_bus(DEVICE(s), "i2c-bus"); + + /* Start all channels as disabled. */ + s->enabled = false; +} + +static void pca954x_channel_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->desc = "Pca954x Channel"; +} + +static void pca9546_class_init(ObjectClass *klass, void *data) +{ + Pca954xClass *s = PCA954X_CLASS(klass); + s->nchans = PCA9546_CHANNEL_COUNT; +} + +static void pca9548_class_init(ObjectClass *klass, void *data) +{ + Pca954xClass *s = PCA954X_CLASS(klass); + s->nchans = PCA9548_CHANNEL_COUNT; +} + +static void pca954x_realize(DeviceState *dev, Error **errp) +{ + Pca954xState *s = PCA954X(dev); + Pca954xClass *c = PCA954X_GET_CLASS(s); + int i; + + /* SMBus modules. Cannot fail. */ + for (i = 0; i < c->nchans; i++) { + sysbus_realize(SYS_BUS_DEVICE(&s->channel[i]), &error_abort); + } +} + +static void pca954x_init(Object *obj) +{ + Pca954xState *s = PCA954X(obj); + Pca954xClass *c = PCA954X_GET_CLASS(obj); + int i; + + /* Only initialize the children we expect. */ + for (i = 0; i < c->nchans; i++) { + object_initialize_child(obj, "channel[*]", &s->channel[i], + TYPE_PCA954X_CHANNEL); + } +} + +static void pca954x_class_init(ObjectClass *klass, void *data) +{ + I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + SMBusDeviceClass *k = SMBUS_DEVICE_CLASS(klass); + + sc->match_and_add = pca954x_match; + + rc->phases.enter = pca954x_enter_reset; + + dc->desc = "Pca954x i2c-mux"; + dc->realize = pca954x_realize; + + k->write_data = pca954x_write_data; + k->receive_byte = pca954x_read_byte; +} + +static const TypeInfo pca954x_info[] = { + { + .name = TYPE_PCA954X, + .parent = TYPE_SMBUS_DEVICE, + .instance_size = sizeof(Pca954xState), + .instance_init = pca954x_init, + .class_size = sizeof(Pca954xClass), + .class_init = pca954x_class_init, + .abstract = true, + }, + { + .name = TYPE_PCA9546, + .parent = TYPE_PCA954X, + .class_init = pca9546_class_init, + }, + { + .name = TYPE_PCA9548, + .parent = TYPE_PCA954X, + .class_init = pca9548_class_init, + }, + { + .name = TYPE_PCA954X_CHANNEL, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = pca954x_channel_class_init, + .instance_size = sizeof(Pca954xChannel), + .instance_init = pca954x_channel_init, + } +}; + +DEFINE_TYPES(pca954x_info) diff --git a/hw/i2c/imx_i2c.c b/hw/i2c/imx_i2c.c new file mode 100644 index 000000000..9792583fe --- /dev/null +++ b/hw/i2c/imx_i2c.c @@ -0,0 +1,333 @@ +/* + * i.MX I2C Bus Serial Interface Emulation + * + * Copyright (C) 2013 Jean-Christophe Dubois. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "hw/i2c/imx_i2c.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/i2c/i2c.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef DEBUG_IMX_I2C +#define DEBUG_IMX_I2C 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX_I2C) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_I2C, \ + __func__, ##args); \ + } \ + } while (0) + +static const char *imx_i2c_get_regname(unsigned offset) +{ + switch (offset) { + case IADR_ADDR: + return "IADR"; + case IFDR_ADDR: + return "IFDR"; + case I2CR_ADDR: + return "I2CR"; + case I2SR_ADDR: + return "I2SR"; + case I2DR_ADDR: + return "I2DR"; + default: + return "[?]"; + } +} + +static inline bool imx_i2c_is_enabled(IMXI2CState *s) +{ + return s->i2cr & I2CR_IEN; +} + +static inline bool imx_i2c_interrupt_is_enabled(IMXI2CState *s) +{ + return s->i2cr & I2CR_IIEN; +} + +static inline bool imx_i2c_is_master(IMXI2CState *s) +{ + return s->i2cr & I2CR_MSTA; +} + +static void imx_i2c_reset(DeviceState *dev) +{ + IMXI2CState *s = IMX_I2C(dev); + + if (s->address != ADDR_RESET) { + i2c_end_transfer(s->bus); + } + + s->address = ADDR_RESET; + s->iadr = IADR_RESET; + s->ifdr = IFDR_RESET; + s->i2cr = I2CR_RESET; + s->i2sr = I2SR_RESET; + s->i2dr_read = I2DR_RESET; + s->i2dr_write = I2DR_RESET; +} + +static inline void imx_i2c_raise_interrupt(IMXI2CState *s) +{ + /* + * raise an interrupt if the device is enabled and it is configured + * to generate some interrupts. + */ + if (imx_i2c_is_enabled(s) && imx_i2c_interrupt_is_enabled(s)) { + s->i2sr |= I2SR_IIF; + qemu_irq_raise(s->irq); + } +} + +static uint64_t imx_i2c_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint16_t value; + IMXI2CState *s = IMX_I2C(opaque); + + switch (offset) { + case IADR_ADDR: + value = s->iadr; + break; + case IFDR_ADDR: + value = s->ifdr; + break; + case I2CR_ADDR: + value = s->i2cr; + break; + case I2SR_ADDR: + value = s->i2sr; + break; + case I2DR_ADDR: + value = s->i2dr_read; + + if (imx_i2c_is_master(s)) { + uint8_t ret = 0xff; + + if (s->address == ADDR_RESET) { + /* something is wrong as the address is not set */ + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Trying to read " + "without specifying the slave address\n", + TYPE_IMX_I2C, __func__); + } else if (s->i2cr & I2CR_MTX) { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Trying to read " + "but MTX is set\n", TYPE_IMX_I2C, __func__); + } else { + /* get the next byte */ + ret = i2c_recv(s->bus); + imx_i2c_raise_interrupt(s); + } + + s->i2dr_read = ret; + } else { + qemu_log_mask(LOG_UNIMP, "[%s]%s: slave mode not implemented\n", + TYPE_IMX_I2C, __func__); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_I2C, __func__, offset); + value = 0; + break; + } + + DPRINTF("read %s [0x%" HWADDR_PRIx "] -> 0x%02x\n", + imx_i2c_get_regname(offset), offset, value); + + return (uint64_t)value; +} + +static void imx_i2c_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + IMXI2CState *s = IMX_I2C(opaque); + + DPRINTF("write %s [0x%" HWADDR_PRIx "] <- 0x%02x\n", + imx_i2c_get_regname(offset), offset, (int)value); + + value &= 0xff; + + switch (offset) { + case IADR_ADDR: + s->iadr = value & IADR_MASK; + /* i2c_slave_set_address(s->bus, (uint8_t)s->iadr); */ + break; + case IFDR_ADDR: + s->ifdr = value & IFDR_MASK; + break; + case I2CR_ADDR: + if (imx_i2c_is_enabled(s) && ((value & I2CR_IEN) == 0)) { + /* This is a soft reset. IADR is preserved during soft resets */ + uint16_t iadr = s->iadr; + imx_i2c_reset(DEVICE(s)); + s->iadr = iadr; + } else { /* normal write */ + s->i2cr = value & I2CR_MASK; + + if (imx_i2c_is_master(s)) { + /* set the bus to busy */ + s->i2sr |= I2SR_IBB; + } else { /* slave mode */ + /* bus is not busy anymore */ + s->i2sr &= ~I2SR_IBB; + + /* + * if we unset the master mode then it ends the ongoing + * transfer if any + */ + if (s->address != ADDR_RESET) { + i2c_end_transfer(s->bus); + s->address = ADDR_RESET; + } + } + + if (s->i2cr & I2CR_RSTA) { /* Restart */ + /* if this is a restart then it ends the ongoing transfer */ + if (s->address != ADDR_RESET) { + i2c_end_transfer(s->bus); + s->address = ADDR_RESET; + s->i2cr &= ~I2CR_RSTA; + } + } + } + break; + case I2SR_ADDR: + /* + * if the user writes 0 to IIF then lower the interrupt and + * reset the bit + */ + if ((s->i2sr & I2SR_IIF) && !(value & I2SR_IIF)) { + s->i2sr &= ~I2SR_IIF; + qemu_irq_lower(s->irq); + } + + /* + * if the user writes 0 to IAL, reset the bit + */ + if ((s->i2sr & I2SR_IAL) && !(value & I2SR_IAL)) { + s->i2sr &= ~I2SR_IAL; + } + + break; + case I2DR_ADDR: + /* if the device is not enabled, nothing to do */ + if (!imx_i2c_is_enabled(s)) { + break; + } + + s->i2dr_write = value & I2DR_MASK; + + if (imx_i2c_is_master(s)) { + /* If this is the first write cycle then it is the slave addr */ + if (s->address == ADDR_RESET) { + if (i2c_start_transfer(s->bus, extract32(s->i2dr_write, 1, 7), + extract32(s->i2dr_write, 0, 1))) { + /* if non zero is returned, the address is not valid */ + s->i2sr |= I2SR_RXAK; + } else { + s->address = s->i2dr_write; + s->i2sr &= ~I2SR_RXAK; + imx_i2c_raise_interrupt(s); + } + } else { /* This is a normal data write */ + if (i2c_send(s->bus, s->i2dr_write)) { + /* if the target return non zero then end the transfer */ + s->i2sr |= I2SR_RXAK; + s->address = ADDR_RESET; + i2c_end_transfer(s->bus); + } else { + s->i2sr &= ~I2SR_RXAK; + imx_i2c_raise_interrupt(s); + } + } + } else { + qemu_log_mask(LOG_UNIMP, "[%s]%s: slave mode not implemented\n", + TYPE_IMX_I2C, __func__); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_I2C, __func__, offset); + break; + } +} + +static const MemoryRegionOps imx_i2c_ops = { + .read = imx_i2c_read, + .write = imx_i2c_write, + .valid.min_access_size = 1, + .valid.max_access_size = 2, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription imx_i2c_vmstate = { + .name = TYPE_IMX_I2C, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(address, IMXI2CState), + VMSTATE_UINT16(iadr, IMXI2CState), + VMSTATE_UINT16(ifdr, IMXI2CState), + VMSTATE_UINT16(i2cr, IMXI2CState), + VMSTATE_UINT16(i2sr, IMXI2CState), + VMSTATE_UINT16(i2dr_read, IMXI2CState), + VMSTATE_UINT16(i2dr_write, IMXI2CState), + VMSTATE_END_OF_LIST() + } +}; + +static void imx_i2c_realize(DeviceState *dev, Error **errp) +{ + IMXI2CState *s = IMX_I2C(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &imx_i2c_ops, s, TYPE_IMX_I2C, + IMX_I2C_MEM_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); + s->bus = i2c_init_bus(dev, NULL); +} + +static void imx_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &imx_i2c_vmstate; + dc->reset = imx_i2c_reset; + dc->realize = imx_i2c_realize; + dc->desc = "i.MX I2C Controller"; +} + +static const TypeInfo imx_i2c_type_info = { + .name = TYPE_IMX_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXI2CState), + .class_init = imx_i2c_class_init, +}; + +static void imx_i2c_register_types(void) +{ + type_register_static(&imx_i2c_type_info); +} + +type_init(imx_i2c_register_types) diff --git a/hw/i2c/meson.build b/hw/i2c/meson.build new file mode 100644 index 000000000..d3df27325 --- /dev/null +++ b/hw/i2c/meson.build @@ -0,0 +1,19 @@ +i2c_ss = ss.source_set() +i2c_ss.add(when: 'CONFIG_I2C', if_true: files('core.c')) +i2c_ss.add(when: 'CONFIG_SMBUS', if_true: files('smbus_slave.c', 'smbus_master.c')) +i2c_ss.add(when: 'CONFIG_ACPI_SMBUS', if_true: files('pm_smbus.c')) +i2c_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('smbus_ich9.c')) +i2c_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_i2c.c')) +i2c_ss.add(when: 'CONFIG_BITBANG_I2C', if_true: files('bitbang_i2c.c')) +i2c_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_i2c.c')) +i2c_ss.add(when: 'CONFIG_IMX_I2C', if_true: files('imx_i2c.c')) +i2c_ss.add(when: 'CONFIG_MPC_I2C', if_true: files('mpc_i2c.c')) +i2c_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('microbit_i2c.c')) +i2c_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_smbus.c')) +i2c_ss.add(when: 'CONFIG_SMBUS_EEPROM', if_true: files('smbus_eeprom.c')) +i2c_ss.add(when: 'CONFIG_VERSATILE_I2C', if_true: files('versatile_i2c.c')) +i2c_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_i2c.c')) +i2c_ss.add(when: 'CONFIG_PPC4XX', if_true: files('ppc4xx_i2c.c')) +i2c_ss.add(when: 'CONFIG_PCA954X', if_true: files('i2c_mux_pca954x.c')) +i2c_ss.add(when: 'CONFIG_PMBUS', if_true: files('pmbus_device.c')) +softmmu_ss.add_all(when: 'CONFIG_I2C', if_true: i2c_ss) diff --git a/hw/i2c/microbit_i2c.c b/hw/i2c/microbit_i2c.c new file mode 100644 index 000000000..e92f9f84e --- /dev/null +++ b/hw/i2c/microbit_i2c.c @@ -0,0 +1,130 @@ +/* + * Microbit stub for Nordic Semiconductor nRF51 SoC Two-Wire Interface + * http://infocenter.nordicsemi.com/pdf/nRF51_RM_v3.0.1.pdf + * + * This is a microbit-specific stub for the TWI controller on the nRF51 SoC. + * We don't emulate I2C devices but the firmware probes the + * accelerometer/magnetometer on startup and panics if they are not found. + * Therefore we stub out the probing. + * + * In the future this file could evolve into a full nRF51 TWI controller + * device. + * + * Copyright 2018 Steffen Görtz + * Copyright 2019 Red Hat, Inc. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/i2c/microbit_i2c.h" +#include "migration/vmstate.h" + +static const uint32_t twi_read_sequence[] = {0x5A, 0x5A, 0x40}; + +static uint64_t microbit_i2c_read(void *opaque, hwaddr addr, unsigned int size) +{ + MicrobitI2CState *s = opaque; + uint64_t data = 0x00; + + switch (addr) { + case NRF51_TWI_EVENT_STOPPED: + data = 0x01; + break; + case NRF51_TWI_EVENT_RXDREADY: + data = 0x01; + break; + case NRF51_TWI_EVENT_TXDSENT: + data = 0x01; + break; + case NRF51_TWI_REG_RXD: + data = twi_read_sequence[s->read_idx]; + if (s->read_idx < G_N_ELEMENTS(twi_read_sequence)) { + s->read_idx++; + } + break; + default: + data = s->regs[addr / sizeof(s->regs[0])]; + break; + } + + qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx " [%u] = %" PRIx32 "\n", + __func__, addr, size, (uint32_t)data); + + + return data; +} + +static void microbit_i2c_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + MicrobitI2CState *s = opaque; + + qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx " <- 0x%" PRIx64 " [%u]\n", + __func__, addr, data, size); + s->regs[addr / sizeof(s->regs[0])] = data; +} + +static const MemoryRegionOps microbit_i2c_ops = { + .read = microbit_i2c_read, + .write = microbit_i2c_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static const VMStateDescription microbit_i2c_vmstate = { + .name = TYPE_MICROBIT_I2C, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, MicrobitI2CState, MICROBIT_I2C_NREGS), + VMSTATE_UINT32(read_idx, MicrobitI2CState), + VMSTATE_END_OF_LIST() + }, +}; + +static void microbit_i2c_reset(DeviceState *dev) +{ + MicrobitI2CState *s = MICROBIT_I2C(dev); + + memset(s->regs, 0, sizeof(s->regs)); + s->read_idx = 0; +} + +static void microbit_i2c_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + MicrobitI2CState *s = MICROBIT_I2C(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), µbit_i2c_ops, s, + "microbit.twi", NRF51_PERIPHERAL_SIZE); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void microbit_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = µbit_i2c_vmstate; + dc->reset = microbit_i2c_reset; + dc->realize = microbit_i2c_realize; + dc->desc = "Microbit I2C controller"; +} + +static const TypeInfo microbit_i2c_info = { + .name = TYPE_MICROBIT_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MicrobitI2CState), + .class_init = microbit_i2c_class_init, +}; + +static void microbit_i2c_register_types(void) +{ + type_register_static(µbit_i2c_info); +} + +type_init(microbit_i2c_register_types) diff --git a/hw/i2c/mpc_i2c.c b/hw/i2c/mpc_i2c.c new file mode 100644 index 000000000..845392505 --- /dev/null +++ b/hw/i2c/mpc_i2c.c @@ -0,0 +1,359 @@ +/* + * Copyright (C) 2014 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Amit Tomar, + * + * Description: + * This file is derived from IMX I2C controller, + * by Jean-Christophe DUBOIS . + * + * Thanks to Scott Wood and Alexander Graf for their kind help on this. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2 or later, + * as published by the Free Software Foundation. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +/* #define DEBUG_I2C */ + +#ifdef DEBUG_I2C +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, "mpc_i2c[%s]: " fmt, __func__, ## __VA_ARGS__); \ + } while (0) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#define TYPE_MPC_I2C "mpc-i2c" +OBJECT_DECLARE_SIMPLE_TYPE(MPCI2CState, MPC_I2C) + +#define MPC_I2C_ADR 0x00 +#define MPC_I2C_FDR 0x04 +#define MPC_I2C_CR 0x08 +#define MPC_I2C_SR 0x0c +#define MPC_I2C_DR 0x10 +#define MPC_I2C_DFSRR 0x14 + +#define CCR_MEN (1 << 7) +#define CCR_MIEN (1 << 6) +#define CCR_MSTA (1 << 5) +#define CCR_MTX (1 << 4) +#define CCR_TXAK (1 << 3) +#define CCR_RSTA (1 << 2) +#define CCR_BCST (1 << 0) + +#define CSR_MCF (1 << 7) +#define CSR_MAAS (1 << 6) +#define CSR_MBB (1 << 5) +#define CSR_MAL (1 << 4) +#define CSR_SRW (1 << 2) +#define CSR_MIF (1 << 1) +#define CSR_RXAK (1 << 0) + +#define CADR_MASK 0xFE +#define CFDR_MASK 0x3F +#define CCR_MASK 0xFC +#define CSR_MASK 0xED +#define CDR_MASK 0xFF + +#define CYCLE_RESET 0xFF + +struct MPCI2CState { + SysBusDevice parent_obj; + + I2CBus *bus; + qemu_irq irq; + MemoryRegion iomem; + + uint8_t address; + uint8_t adr; + uint8_t fdr; + uint8_t cr; + uint8_t sr; + uint8_t dr; + uint8_t dfssr; +}; + +static bool mpc_i2c_is_enabled(MPCI2CState *s) +{ + return s->cr & CCR_MEN; +} + +static bool mpc_i2c_is_master(MPCI2CState *s) +{ + return s->cr & CCR_MSTA; +} + +static bool mpc_i2c_direction_is_tx(MPCI2CState *s) +{ + return s->cr & CCR_MTX; +} + +static bool mpc_i2c_irq_pending(MPCI2CState *s) +{ + return s->sr & CSR_MIF; +} + +static bool mpc_i2c_irq_is_enabled(MPCI2CState *s) +{ + return s->cr & CCR_MIEN; +} + +static void mpc_i2c_reset(DeviceState *dev) +{ + MPCI2CState *i2c = MPC_I2C(dev); + + i2c->address = 0xFF; + i2c->adr = 0x00; + i2c->fdr = 0x00; + i2c->cr = 0x00; + i2c->sr = 0x81; + i2c->dr = 0x00; +} + +static void mpc_i2c_irq(MPCI2CState *s) +{ + bool irq_active = false; + + if (mpc_i2c_is_enabled(s) && mpc_i2c_irq_is_enabled(s) + && mpc_i2c_irq_pending(s)) { + irq_active = true; + } + + if (irq_active) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static void mpc_i2c_soft_reset(MPCI2CState *s) +{ + /* This is a soft reset. ADR is preserved during soft resets */ + uint8_t adr = s->adr; + mpc_i2c_reset(DEVICE(s)); + s->adr = adr; +} + +static void mpc_i2c_address_send(MPCI2CState *s) +{ + /* if returns non zero slave address is not right */ + if (i2c_start_transfer(s->bus, s->dr >> 1, s->dr & (0x01))) { + s->sr |= CSR_RXAK; + } else { + s->address = s->dr; + s->sr &= ~CSR_RXAK; + s->sr |= CSR_MCF; /* Set after Byte Transfer is completed */ + s->sr |= CSR_MIF; /* Set after Byte Transfer is completed */ + mpc_i2c_irq(s); + } +} + +static void mpc_i2c_data_send(MPCI2CState *s) +{ + if (i2c_send(s->bus, s->dr)) { + /* End of transfer */ + s->sr |= CSR_RXAK; + i2c_end_transfer(s->bus); + } else { + s->sr &= ~CSR_RXAK; + s->sr |= CSR_MCF; /* Set after Byte Transfer is completed */ + s->sr |= CSR_MIF; /* Set after Byte Transfer is completed */ + mpc_i2c_irq(s); + } +} + +static void mpc_i2c_data_recive(MPCI2CState *s) +{ + int ret; + /* get the next byte */ + ret = i2c_recv(s->bus); + if (ret >= 0) { + s->sr |= CSR_MCF; /* Set after Byte Transfer is completed */ + s->sr |= CSR_MIF; /* Set after Byte Transfer is completed */ + mpc_i2c_irq(s); + } else { + DPRINTF("read failed for device"); + ret = 0xff; + } + s->dr = ret; +} + +static uint64_t mpc_i2c_read(void *opaque, hwaddr addr, unsigned size) +{ + MPCI2CState *s = opaque; + uint8_t value; + + switch (addr) { + case MPC_I2C_ADR: + value = s->adr; + break; + case MPC_I2C_FDR: + value = s->fdr; + break; + case MPC_I2C_CR: + value = s->cr; + break; + case MPC_I2C_SR: + value = s->sr; + break; + case MPC_I2C_DR: + value = s->dr; + if (mpc_i2c_is_master(s)) { /* master mode */ + if (mpc_i2c_direction_is_tx(s)) { + DPRINTF("MTX is set not in recv mode\n"); + } else { + mpc_i2c_data_recive(s); + } + } + break; + default: + value = 0; + DPRINTF("ERROR: Bad read addr 0x%x\n", (unsigned int)addr); + break; + } + + DPRINTF("%s: addr " TARGET_FMT_plx " %02" PRIx32 "\n", __func__, + addr, value); + return (uint64_t)value; +} + +static void mpc_i2c_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + MPCI2CState *s = opaque; + + DPRINTF("%s: addr " TARGET_FMT_plx " val %08" PRIx64 "\n", __func__, + addr, value); + switch (addr) { + case MPC_I2C_ADR: + s->adr = value & CADR_MASK; + break; + case MPC_I2C_FDR: + s->fdr = value & CFDR_MASK; + break; + case MPC_I2C_CR: + if (mpc_i2c_is_enabled(s) && ((value & CCR_MEN) == 0)) { + mpc_i2c_soft_reset(s); + break; + } + /* normal write */ + s->cr = value & CCR_MASK; + if (mpc_i2c_is_master(s)) { /* master mode */ + /* set the bus to busy after master is set as per RM */ + s->sr |= CSR_MBB; + } else { + /* bus is not busy anymore */ + s->sr &= ~CSR_MBB; + /* Reset the address for fresh write/read cycle */ + if (s->address != CYCLE_RESET) { + i2c_end_transfer(s->bus); + s->address = CYCLE_RESET; + } + } + /* For restart end the onging transfer */ + if (s->cr & CCR_RSTA) { + if (s->address != CYCLE_RESET) { + s->address = CYCLE_RESET; + i2c_end_transfer(s->bus); + s->cr &= ~CCR_RSTA; + } + } + break; + case MPC_I2C_SR: + s->sr = value & CSR_MASK; + /* Lower the interrupt */ + if (!(s->sr & CSR_MIF) || !(s->sr & CSR_MAL)) { + mpc_i2c_irq(s); + } + break; + case MPC_I2C_DR: + /* if the device is not enabled, nothing to do */ + if (!mpc_i2c_is_enabled(s)) { + break; + } + s->dr = value & CDR_MASK; + if (mpc_i2c_is_master(s)) { /* master mode */ + if (s->address == CYCLE_RESET) { + mpc_i2c_address_send(s); + } else { + mpc_i2c_data_send(s); + } + } + break; + case MPC_I2C_DFSRR: + s->dfssr = value; + break; + default: + DPRINTF("ERROR: Bad write addr 0x%x\n", (unsigned int)addr); + break; + } +} + +static const MemoryRegionOps i2c_ops = { + .read = mpc_i2c_read, + .write = mpc_i2c_write, + .valid.max_access_size = 1, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription mpc_i2c_vmstate = { + .name = TYPE_MPC_I2C, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(address, MPCI2CState), + VMSTATE_UINT8(adr, MPCI2CState), + VMSTATE_UINT8(fdr, MPCI2CState), + VMSTATE_UINT8(cr, MPCI2CState), + VMSTATE_UINT8(sr, MPCI2CState), + VMSTATE_UINT8(dr, MPCI2CState), + VMSTATE_UINT8(dfssr, MPCI2CState), + VMSTATE_END_OF_LIST() + } +}; + +static void mpc_i2c_realize(DeviceState *dev, Error **errp) +{ + MPCI2CState *i2c = MPC_I2C(dev); + sysbus_init_irq(SYS_BUS_DEVICE(dev), &i2c->irq); + memory_region_init_io(&i2c->iomem, OBJECT(i2c), &i2c_ops, i2c, + "mpc-i2c", 0x14); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &i2c->iomem); + i2c->bus = i2c_init_bus(dev, "i2c"); +} + +static void mpc_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &mpc_i2c_vmstate ; + dc->reset = mpc_i2c_reset; + dc->realize = mpc_i2c_realize; + dc->desc = "MPC I2C Controller"; +} + +static const TypeInfo mpc_i2c_type_info = { + .name = TYPE_MPC_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MPCI2CState), + .class_init = mpc_i2c_class_init, +}; + +static void mpc_i2c_register_types(void) +{ + type_register_static(&mpc_i2c_type_info); +} + +type_init(mpc_i2c_register_types) diff --git a/hw/i2c/npcm7xx_smbus.c b/hw/i2c/npcm7xx_smbus.c new file mode 100644 index 000000000..e7e0ba66f --- /dev/null +++ b/hw/i2c/npcm7xx_smbus.c @@ -0,0 +1,1098 @@ +/* + * Nuvoton NPCM7xx SMBus Module. + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/i2c/npcm7xx_smbus.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/guest-random.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" + +#include "trace.h" + +enum NPCM7xxSMBusCommonRegister { + NPCM7XX_SMB_SDA = 0x0, + NPCM7XX_SMB_ST = 0x2, + NPCM7XX_SMB_CST = 0x4, + NPCM7XX_SMB_CTL1 = 0x6, + NPCM7XX_SMB_ADDR1 = 0x8, + NPCM7XX_SMB_CTL2 = 0xa, + NPCM7XX_SMB_ADDR2 = 0xc, + NPCM7XX_SMB_CTL3 = 0xe, + NPCM7XX_SMB_CST2 = 0x18, + NPCM7XX_SMB_CST3 = 0x19, + NPCM7XX_SMB_VER = 0x1f, +}; + +enum NPCM7xxSMBusBank0Register { + NPCM7XX_SMB_ADDR3 = 0x10, + NPCM7XX_SMB_ADDR7 = 0x11, + NPCM7XX_SMB_ADDR4 = 0x12, + NPCM7XX_SMB_ADDR8 = 0x13, + NPCM7XX_SMB_ADDR5 = 0x14, + NPCM7XX_SMB_ADDR9 = 0x15, + NPCM7XX_SMB_ADDR6 = 0x16, + NPCM7XX_SMB_ADDR10 = 0x17, + NPCM7XX_SMB_CTL4 = 0x1a, + NPCM7XX_SMB_CTL5 = 0x1b, + NPCM7XX_SMB_SCLLT = 0x1c, + NPCM7XX_SMB_FIF_CTL = 0x1d, + NPCM7XX_SMB_SCLHT = 0x1e, +}; + +enum NPCM7xxSMBusBank1Register { + NPCM7XX_SMB_FIF_CTS = 0x10, + NPCM7XX_SMB_FAIR_PER = 0x11, + NPCM7XX_SMB_TXF_CTL = 0x12, + NPCM7XX_SMB_T_OUT = 0x14, + NPCM7XX_SMB_TXF_STS = 0x1a, + NPCM7XX_SMB_RXF_STS = 0x1c, + NPCM7XX_SMB_RXF_CTL = 0x1e, +}; + +/* ST fields */ +#define NPCM7XX_SMBST_STP BIT(7) +#define NPCM7XX_SMBST_SDAST BIT(6) +#define NPCM7XX_SMBST_BER BIT(5) +#define NPCM7XX_SMBST_NEGACK BIT(4) +#define NPCM7XX_SMBST_STASTR BIT(3) +#define NPCM7XX_SMBST_NMATCH BIT(2) +#define NPCM7XX_SMBST_MODE BIT(1) +#define NPCM7XX_SMBST_XMIT BIT(0) + +/* CST fields */ +#define NPCM7XX_SMBCST_ARPMATCH BIT(7) +#define NPCM7XX_SMBCST_MATCHAF BIT(6) +#define NPCM7XX_SMBCST_TGSCL BIT(5) +#define NPCM7XX_SMBCST_TSDA BIT(4) +#define NPCM7XX_SMBCST_GCMATCH BIT(3) +#define NPCM7XX_SMBCST_MATCH BIT(2) +#define NPCM7XX_SMBCST_BB BIT(1) +#define NPCM7XX_SMBCST_BUSY BIT(0) + +/* CST2 fields */ +#define NPCM7XX_SMBCST2_INTSTS BIT(7) +#define NPCM7XX_SMBCST2_MATCH7F BIT(6) +#define NPCM7XX_SMBCST2_MATCH6F BIT(5) +#define NPCM7XX_SMBCST2_MATCH5F BIT(4) +#define NPCM7XX_SMBCST2_MATCH4F BIT(3) +#define NPCM7XX_SMBCST2_MATCH3F BIT(2) +#define NPCM7XX_SMBCST2_MATCH2F BIT(1) +#define NPCM7XX_SMBCST2_MATCH1F BIT(0) + +/* CST3 fields */ +#define NPCM7XX_SMBCST3_EO_BUSY BIT(7) +#define NPCM7XX_SMBCST3_MATCH10F BIT(2) +#define NPCM7XX_SMBCST3_MATCH9F BIT(1) +#define NPCM7XX_SMBCST3_MATCH8F BIT(0) + +/* CTL1 fields */ +#define NPCM7XX_SMBCTL1_STASTRE BIT(7) +#define NPCM7XX_SMBCTL1_NMINTE BIT(6) +#define NPCM7XX_SMBCTL1_GCMEN BIT(5) +#define NPCM7XX_SMBCTL1_ACK BIT(4) +#define NPCM7XX_SMBCTL1_EOBINTE BIT(3) +#define NPCM7XX_SMBCTL1_INTEN BIT(2) +#define NPCM7XX_SMBCTL1_STOP BIT(1) +#define NPCM7XX_SMBCTL1_START BIT(0) + +/* CTL2 fields */ +#define NPCM7XX_SMBCTL2_SCLFRQ(rv) extract8((rv), 1, 6) +#define NPCM7XX_SMBCTL2_ENABLE BIT(0) + +/* CTL3 fields */ +#define NPCM7XX_SMBCTL3_SCL_LVL BIT(7) +#define NPCM7XX_SMBCTL3_SDA_LVL BIT(6) +#define NPCM7XX_SMBCTL3_BNK_SEL BIT(5) +#define NPCM7XX_SMBCTL3_400K_MODE BIT(4) +#define NPCM7XX_SMBCTL3_IDL_START BIT(3) +#define NPCM7XX_SMBCTL3_ARPMEN BIT(2) +#define NPCM7XX_SMBCTL3_SCLFRQ(rv) extract8((rv), 0, 2) + +/* ADDR fields */ +#define NPCM7XX_ADDR_EN BIT(7) +#define NPCM7XX_ADDR_A(rv) extract8((rv), 0, 6) + +/* FIFO Mode Register Fields */ +/* FIF_CTL fields */ +#define NPCM7XX_SMBFIF_CTL_FIFO_EN BIT(4) +#define NPCM7XX_SMBFIF_CTL_FAIR_RDY_IE BIT(2) +#define NPCM7XX_SMBFIF_CTL_FAIR_RDY BIT(1) +#define NPCM7XX_SMBFIF_CTL_FAIR_BUSY BIT(0) +/* FIF_CTS fields */ +#define NPCM7XX_SMBFIF_CTS_STR BIT(7) +#define NPCM7XX_SMBFIF_CTS_CLR_FIFO BIT(6) +#define NPCM7XX_SMBFIF_CTS_RFTE_IE BIT(3) +#define NPCM7XX_SMBFIF_CTS_RXF_TXE BIT(1) +/* TXF_CTL fields */ +#define NPCM7XX_SMBTXF_CTL_THR_TXIE BIT(6) +#define NPCM7XX_SMBTXF_CTL_TX_THR(rv) extract8((rv), 0, 5) +/* T_OUT fields */ +#define NPCM7XX_SMBT_OUT_ST BIT(7) +#define NPCM7XX_SMBT_OUT_IE BIT(6) +#define NPCM7XX_SMBT_OUT_CLKDIV(rv) extract8((rv), 0, 6) +/* TXF_STS fields */ +#define NPCM7XX_SMBTXF_STS_TX_THST BIT(6) +#define NPCM7XX_SMBTXF_STS_TX_BYTES(rv) extract8((rv), 0, 5) +/* RXF_STS fields */ +#define NPCM7XX_SMBRXF_STS_RX_THST BIT(6) +#define NPCM7XX_SMBRXF_STS_RX_BYTES(rv) extract8((rv), 0, 5) +/* RXF_CTL fields */ +#define NPCM7XX_SMBRXF_CTL_THR_RXIE BIT(6) +#define NPCM7XX_SMBRXF_CTL_LAST BIT(5) +#define NPCM7XX_SMBRXF_CTL_RX_THR(rv) extract8((rv), 0, 5) + +#define KEEP_OLD_BIT(o, n, b) (((n) & (~(b))) | ((o) & (b))) +#define WRITE_ONE_CLEAR(o, n, b) ((n) & (b) ? (o) & (~(b)) : (o)) + +#define NPCM7XX_SMBUS_ENABLED(s) ((s)->ctl2 & NPCM7XX_SMBCTL2_ENABLE) +#define NPCM7XX_SMBUS_FIFO_ENABLED(s) ((s)->fif_ctl & \ + NPCM7XX_SMBFIF_CTL_FIFO_EN) + +/* VERSION fields values, read-only. */ +#define NPCM7XX_SMBUS_VERSION_NUMBER 1 +#define NPCM7XX_SMBUS_VERSION_FIFO_SUPPORTED 1 + +/* Reset values */ +#define NPCM7XX_SMB_ST_INIT_VAL 0x00 +#define NPCM7XX_SMB_CST_INIT_VAL 0x10 +#define NPCM7XX_SMB_CST2_INIT_VAL 0x00 +#define NPCM7XX_SMB_CST3_INIT_VAL 0x00 +#define NPCM7XX_SMB_CTL1_INIT_VAL 0x00 +#define NPCM7XX_SMB_CTL2_INIT_VAL 0x00 +#define NPCM7XX_SMB_CTL3_INIT_VAL 0xc0 +#define NPCM7XX_SMB_CTL4_INIT_VAL 0x07 +#define NPCM7XX_SMB_CTL5_INIT_VAL 0x00 +#define NPCM7XX_SMB_ADDR_INIT_VAL 0x00 +#define NPCM7XX_SMB_SCLLT_INIT_VAL 0x00 +#define NPCM7XX_SMB_SCLHT_INIT_VAL 0x00 +#define NPCM7XX_SMB_FIF_CTL_INIT_VAL 0x00 +#define NPCM7XX_SMB_FIF_CTS_INIT_VAL 0x00 +#define NPCM7XX_SMB_FAIR_PER_INIT_VAL 0x00 +#define NPCM7XX_SMB_TXF_CTL_INIT_VAL 0x00 +#define NPCM7XX_SMB_T_OUT_INIT_VAL 0x3f +#define NPCM7XX_SMB_TXF_STS_INIT_VAL 0x00 +#define NPCM7XX_SMB_RXF_STS_INIT_VAL 0x00 +#define NPCM7XX_SMB_RXF_CTL_INIT_VAL 0x01 + +static uint8_t npcm7xx_smbus_get_version(void) +{ + return NPCM7XX_SMBUS_VERSION_FIFO_SUPPORTED << 7 | + NPCM7XX_SMBUS_VERSION_NUMBER; +} + +static void npcm7xx_smbus_update_irq(NPCM7xxSMBusState *s) +{ + int level; + + if (s->ctl1 & NPCM7XX_SMBCTL1_INTEN) { + level = !!((s->ctl1 & NPCM7XX_SMBCTL1_NMINTE && + s->st & NPCM7XX_SMBST_NMATCH) || + (s->st & NPCM7XX_SMBST_BER) || + (s->st & NPCM7XX_SMBST_NEGACK) || + (s->st & NPCM7XX_SMBST_SDAST) || + (s->ctl1 & NPCM7XX_SMBCTL1_STASTRE && + s->st & NPCM7XX_SMBST_SDAST) || + (s->ctl1 & NPCM7XX_SMBCTL1_EOBINTE && + s->cst3 & NPCM7XX_SMBCST3_EO_BUSY) || + (s->rxf_ctl & NPCM7XX_SMBRXF_CTL_THR_RXIE && + s->rxf_sts & NPCM7XX_SMBRXF_STS_RX_THST) || + (s->txf_ctl & NPCM7XX_SMBTXF_CTL_THR_TXIE && + s->txf_sts & NPCM7XX_SMBTXF_STS_TX_THST) || + (s->fif_cts & NPCM7XX_SMBFIF_CTS_RFTE_IE && + s->fif_cts & NPCM7XX_SMBFIF_CTS_RXF_TXE)); + + if (level) { + s->cst2 |= NPCM7XX_SMBCST2_INTSTS; + } else { + s->cst2 &= ~NPCM7XX_SMBCST2_INTSTS; + } + qemu_set_irq(s->irq, level); + } +} + +static void npcm7xx_smbus_nack(NPCM7xxSMBusState *s) +{ + s->st &= ~NPCM7XX_SMBST_SDAST; + s->st |= NPCM7XX_SMBST_NEGACK; + s->status = NPCM7XX_SMBUS_STATUS_NEGACK; +} + +static void npcm7xx_smbus_clear_buffer(NPCM7xxSMBusState *s) +{ + s->fif_cts &= ~NPCM7XX_SMBFIF_CTS_RXF_TXE; + s->txf_sts = 0; + s->rxf_sts = 0; +} + +static void npcm7xx_smbus_send_byte(NPCM7xxSMBusState *s, uint8_t value) +{ + int rv = i2c_send(s->bus, value); + + if (rv) { + npcm7xx_smbus_nack(s); + } else { + s->st |= NPCM7XX_SMBST_SDAST; + if (NPCM7XX_SMBUS_FIFO_ENABLED(s)) { + s->fif_cts |= NPCM7XX_SMBFIF_CTS_RXF_TXE; + if (NPCM7XX_SMBTXF_STS_TX_BYTES(s->txf_sts) == + NPCM7XX_SMBTXF_CTL_TX_THR(s->txf_ctl)) { + s->txf_sts = NPCM7XX_SMBTXF_STS_TX_THST; + } else { + s->txf_sts = 0; + } + } + } + trace_npcm7xx_smbus_send_byte((DEVICE(s)->canonical_path), value, !rv); + npcm7xx_smbus_update_irq(s); +} + +static void npcm7xx_smbus_recv_byte(NPCM7xxSMBusState *s) +{ + s->sda = i2c_recv(s->bus); + s->st |= NPCM7XX_SMBST_SDAST; + if (s->st & NPCM7XX_SMBCTL1_ACK) { + trace_npcm7xx_smbus_nack(DEVICE(s)->canonical_path); + i2c_nack(s->bus); + s->st &= NPCM7XX_SMBCTL1_ACK; + } + trace_npcm7xx_smbus_recv_byte((DEVICE(s)->canonical_path), s->sda); + npcm7xx_smbus_update_irq(s); +} + +static void npcm7xx_smbus_recv_fifo(NPCM7xxSMBusState *s) +{ + uint8_t expected_bytes = NPCM7XX_SMBRXF_CTL_RX_THR(s->rxf_ctl); + uint8_t received_bytes = NPCM7XX_SMBRXF_STS_RX_BYTES(s->rxf_sts); + uint8_t pos; + + if (received_bytes == expected_bytes) { + return; + } + + while (received_bytes < expected_bytes && + received_bytes < NPCM7XX_SMBUS_FIFO_SIZE) { + pos = (s->rx_cur + received_bytes) % NPCM7XX_SMBUS_FIFO_SIZE; + s->rx_fifo[pos] = i2c_recv(s->bus); + trace_npcm7xx_smbus_recv_byte((DEVICE(s)->canonical_path), + s->rx_fifo[pos]); + ++received_bytes; + } + + trace_npcm7xx_smbus_recv_fifo((DEVICE(s)->canonical_path), + received_bytes, expected_bytes); + s->rxf_sts = received_bytes; + if (unlikely(received_bytes < expected_bytes)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid rx_thr value: 0x%02x\n", + DEVICE(s)->canonical_path, expected_bytes); + return; + } + + s->rxf_sts |= NPCM7XX_SMBRXF_STS_RX_THST; + if (s->rxf_ctl & NPCM7XX_SMBRXF_CTL_LAST) { + trace_npcm7xx_smbus_nack(DEVICE(s)->canonical_path); + i2c_nack(s->bus); + s->rxf_ctl &= ~NPCM7XX_SMBRXF_CTL_LAST; + } + if (received_bytes == NPCM7XX_SMBUS_FIFO_SIZE) { + s->st |= NPCM7XX_SMBST_SDAST; + s->fif_cts |= NPCM7XX_SMBFIF_CTS_RXF_TXE; + } else if (!(s->rxf_ctl & NPCM7XX_SMBRXF_CTL_THR_RXIE)) { + s->st |= NPCM7XX_SMBST_SDAST; + } else { + s->st &= ~NPCM7XX_SMBST_SDAST; + } + npcm7xx_smbus_update_irq(s); +} + +static void npcm7xx_smbus_read_byte_fifo(NPCM7xxSMBusState *s) +{ + uint8_t received_bytes = NPCM7XX_SMBRXF_STS_RX_BYTES(s->rxf_sts); + + if (received_bytes == 0) { + npcm7xx_smbus_recv_fifo(s); + return; + } + + s->sda = s->rx_fifo[s->rx_cur]; + s->rx_cur = (s->rx_cur + 1u) % NPCM7XX_SMBUS_FIFO_SIZE; + --s->rxf_sts; + npcm7xx_smbus_update_irq(s); +} + +static void npcm7xx_smbus_start(NPCM7xxSMBusState *s) +{ + /* + * We can start the bus if one of these is true: + * 1. The bus is idle (so we can request it) + * 2. We are the occupier (it's a repeated start condition.) + */ + int available = !i2c_bus_busy(s->bus) || + s->status != NPCM7XX_SMBUS_STATUS_IDLE; + + if (available) { + s->st |= NPCM7XX_SMBST_MODE | NPCM7XX_SMBST_XMIT | NPCM7XX_SMBST_SDAST; + s->cst |= NPCM7XX_SMBCST_BUSY; + if (NPCM7XX_SMBUS_FIFO_ENABLED(s)) { + s->fif_cts |= NPCM7XX_SMBFIF_CTS_RXF_TXE; + } + } else { + s->st &= ~NPCM7XX_SMBST_MODE; + s->cst &= ~NPCM7XX_SMBCST_BUSY; + s->st |= NPCM7XX_SMBST_BER; + } + + trace_npcm7xx_smbus_start(DEVICE(s)->canonical_path, available); + s->cst |= NPCM7XX_SMBCST_BB; + s->status = NPCM7XX_SMBUS_STATUS_IDLE; + npcm7xx_smbus_update_irq(s); +} + +static void npcm7xx_smbus_send_address(NPCM7xxSMBusState *s, uint8_t value) +{ + int recv; + int rv; + + recv = value & BIT(0); + rv = i2c_start_transfer(s->bus, value >> 1, recv); + trace_npcm7xx_smbus_send_address(DEVICE(s)->canonical_path, + value >> 1, recv, !rv); + if (rv) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: requesting i2c bus for 0x%02x failed: %d\n", + DEVICE(s)->canonical_path, value, rv); + /* Failed to start transfer. NACK to reject.*/ + if (recv) { + s->st &= ~NPCM7XX_SMBST_XMIT; + } else { + s->st |= NPCM7XX_SMBST_XMIT; + } + npcm7xx_smbus_nack(s); + npcm7xx_smbus_update_irq(s); + return; + } + + s->st &= ~NPCM7XX_SMBST_NEGACK; + if (recv) { + s->status = NPCM7XX_SMBUS_STATUS_RECEIVING; + s->st &= ~NPCM7XX_SMBST_XMIT; + } else { + s->status = NPCM7XX_SMBUS_STATUS_SENDING; + s->st |= NPCM7XX_SMBST_XMIT; + } + + if (s->ctl1 & NPCM7XX_SMBCTL1_STASTRE) { + s->st |= NPCM7XX_SMBST_STASTR; + if (!recv) { + s->st |= NPCM7XX_SMBST_SDAST; + } + } else if (recv) { + s->st |= NPCM7XX_SMBST_SDAST; + if (NPCM7XX_SMBUS_FIFO_ENABLED(s)) { + npcm7xx_smbus_recv_fifo(s); + } else { + npcm7xx_smbus_recv_byte(s); + } + } else if (NPCM7XX_SMBUS_FIFO_ENABLED(s)) { + s->st |= NPCM7XX_SMBST_SDAST; + s->fif_cts |= NPCM7XX_SMBFIF_CTS_RXF_TXE; + } + npcm7xx_smbus_update_irq(s); +} + +static void npcm7xx_smbus_execute_stop(NPCM7xxSMBusState *s) +{ + i2c_end_transfer(s->bus); + s->st = 0; + s->cst = 0; + s->status = NPCM7XX_SMBUS_STATUS_IDLE; + s->cst3 |= NPCM7XX_SMBCST3_EO_BUSY; + trace_npcm7xx_smbus_stop(DEVICE(s)->canonical_path); + npcm7xx_smbus_update_irq(s); +} + + +static void npcm7xx_smbus_stop(NPCM7xxSMBusState *s) +{ + if (s->st & NPCM7XX_SMBST_MODE) { + switch (s->status) { + case NPCM7XX_SMBUS_STATUS_RECEIVING: + case NPCM7XX_SMBUS_STATUS_STOPPING_LAST_RECEIVE: + s->status = NPCM7XX_SMBUS_STATUS_STOPPING_LAST_RECEIVE; + break; + + case NPCM7XX_SMBUS_STATUS_NEGACK: + s->status = NPCM7XX_SMBUS_STATUS_STOPPING_NEGACK; + break; + + default: + npcm7xx_smbus_execute_stop(s); + break; + } + } +} + +static uint8_t npcm7xx_smbus_read_sda(NPCM7xxSMBusState *s) +{ + uint8_t value = s->sda; + + switch (s->status) { + case NPCM7XX_SMBUS_STATUS_STOPPING_LAST_RECEIVE: + if (NPCM7XX_SMBUS_FIFO_ENABLED(s)) { + if (NPCM7XX_SMBRXF_STS_RX_BYTES(s->rxf_sts) <= 1) { + npcm7xx_smbus_execute_stop(s); + } + if (NPCM7XX_SMBRXF_STS_RX_BYTES(s->rxf_sts) == 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read to SDA with an empty rx-fifo buffer, " + "result undefined: %u\n", + DEVICE(s)->canonical_path, s->sda); + break; + } + npcm7xx_smbus_read_byte_fifo(s); + value = s->sda; + } else { + npcm7xx_smbus_execute_stop(s); + } + break; + + case NPCM7XX_SMBUS_STATUS_RECEIVING: + if (NPCM7XX_SMBUS_FIFO_ENABLED(s)) { + npcm7xx_smbus_read_byte_fifo(s); + value = s->sda; + } else { + npcm7xx_smbus_recv_byte(s); + } + break; + + default: + /* Do nothing */ + break; + } + + return value; +} + +static void npcm7xx_smbus_write_sda(NPCM7xxSMBusState *s, uint8_t value) +{ + s->sda = value; + if (s->st & NPCM7XX_SMBST_MODE) { + switch (s->status) { + case NPCM7XX_SMBUS_STATUS_IDLE: + npcm7xx_smbus_send_address(s, value); + break; + case NPCM7XX_SMBUS_STATUS_SENDING: + npcm7xx_smbus_send_byte(s, value); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to SDA in invalid status %d: %u\n", + DEVICE(s)->canonical_path, s->status, value); + break; + } + } +} + +static void npcm7xx_smbus_write_st(NPCM7xxSMBusState *s, uint8_t value) +{ + s->st = WRITE_ONE_CLEAR(s->st, value, NPCM7XX_SMBST_STP); + s->st = WRITE_ONE_CLEAR(s->st, value, NPCM7XX_SMBST_BER); + s->st = WRITE_ONE_CLEAR(s->st, value, NPCM7XX_SMBST_STASTR); + s->st = WRITE_ONE_CLEAR(s->st, value, NPCM7XX_SMBST_NMATCH); + + if (value & NPCM7XX_SMBST_NEGACK) { + s->st &= ~NPCM7XX_SMBST_NEGACK; + if (s->status == NPCM7XX_SMBUS_STATUS_STOPPING_NEGACK) { + npcm7xx_smbus_execute_stop(s); + } + } + + if (value & NPCM7XX_SMBST_STASTR && + s->status == NPCM7XX_SMBUS_STATUS_RECEIVING) { + if (NPCM7XX_SMBUS_FIFO_ENABLED(s)) { + npcm7xx_smbus_recv_fifo(s); + } else { + npcm7xx_smbus_recv_byte(s); + } + } + + npcm7xx_smbus_update_irq(s); +} + +static void npcm7xx_smbus_write_cst(NPCM7xxSMBusState *s, uint8_t value) +{ + uint8_t new_value = s->cst; + + s->cst = WRITE_ONE_CLEAR(new_value, value, NPCM7XX_SMBCST_BB); + npcm7xx_smbus_update_irq(s); +} + +static void npcm7xx_smbus_write_cst3(NPCM7xxSMBusState *s, uint8_t value) +{ + s->cst3 = WRITE_ONE_CLEAR(s->cst3, value, NPCM7XX_SMBCST3_EO_BUSY); + npcm7xx_smbus_update_irq(s); +} + +static void npcm7xx_smbus_write_ctl1(NPCM7xxSMBusState *s, uint8_t value) +{ + s->ctl1 = KEEP_OLD_BIT(s->ctl1, value, + NPCM7XX_SMBCTL1_START | NPCM7XX_SMBCTL1_STOP | NPCM7XX_SMBCTL1_ACK); + + if (value & NPCM7XX_SMBCTL1_START) { + npcm7xx_smbus_start(s); + } + + if (value & NPCM7XX_SMBCTL1_STOP) { + npcm7xx_smbus_stop(s); + } + + npcm7xx_smbus_update_irq(s); +} + +static void npcm7xx_smbus_write_ctl2(NPCM7xxSMBusState *s, uint8_t value) +{ + s->ctl2 = value; + + if (!NPCM7XX_SMBUS_ENABLED(s)) { + /* Disable this SMBus module. */ + s->ctl1 = 0; + s->st = 0; + s->cst3 = s->cst3 & (~NPCM7XX_SMBCST3_EO_BUSY); + s->cst = 0; + npcm7xx_smbus_clear_buffer(s); + } +} + +static void npcm7xx_smbus_write_ctl3(NPCM7xxSMBusState *s, uint8_t value) +{ + uint8_t old_ctl3 = s->ctl3; + + /* Write to SDA and SCL bits are ignored. */ + s->ctl3 = KEEP_OLD_BIT(old_ctl3, value, + NPCM7XX_SMBCTL3_SCL_LVL | NPCM7XX_SMBCTL3_SDA_LVL); +} + +static void npcm7xx_smbus_write_fif_ctl(NPCM7xxSMBusState *s, uint8_t value) +{ + uint8_t new_ctl = value; + + new_ctl = KEEP_OLD_BIT(s->fif_ctl, new_ctl, NPCM7XX_SMBFIF_CTL_FAIR_RDY); + new_ctl = WRITE_ONE_CLEAR(new_ctl, value, NPCM7XX_SMBFIF_CTL_FAIR_RDY); + new_ctl = KEEP_OLD_BIT(s->fif_ctl, new_ctl, NPCM7XX_SMBFIF_CTL_FAIR_BUSY); + s->fif_ctl = new_ctl; +} + +static void npcm7xx_smbus_write_fif_cts(NPCM7xxSMBusState *s, uint8_t value) +{ + s->fif_cts = WRITE_ONE_CLEAR(s->fif_cts, value, NPCM7XX_SMBFIF_CTS_STR); + s->fif_cts = WRITE_ONE_CLEAR(s->fif_cts, value, NPCM7XX_SMBFIF_CTS_RXF_TXE); + s->fif_cts = KEEP_OLD_BIT(value, s->fif_cts, NPCM7XX_SMBFIF_CTS_RFTE_IE); + + if (value & NPCM7XX_SMBFIF_CTS_CLR_FIFO) { + npcm7xx_smbus_clear_buffer(s); + } +} + +static void npcm7xx_smbus_write_txf_ctl(NPCM7xxSMBusState *s, uint8_t value) +{ + s->txf_ctl = value; +} + +static void npcm7xx_smbus_write_t_out(NPCM7xxSMBusState *s, uint8_t value) +{ + uint8_t new_t_out = value; + + if ((value & NPCM7XX_SMBT_OUT_ST) || (!(s->t_out & NPCM7XX_SMBT_OUT_ST))) { + new_t_out &= ~NPCM7XX_SMBT_OUT_ST; + } else { + new_t_out |= NPCM7XX_SMBT_OUT_ST; + } + + s->t_out = new_t_out; +} + +static void npcm7xx_smbus_write_txf_sts(NPCM7xxSMBusState *s, uint8_t value) +{ + s->txf_sts = WRITE_ONE_CLEAR(s->txf_sts, value, NPCM7XX_SMBTXF_STS_TX_THST); +} + +static void npcm7xx_smbus_write_rxf_sts(NPCM7xxSMBusState *s, uint8_t value) +{ + if (value & NPCM7XX_SMBRXF_STS_RX_THST) { + s->rxf_sts &= ~NPCM7XX_SMBRXF_STS_RX_THST; + if (s->status == NPCM7XX_SMBUS_STATUS_RECEIVING) { + npcm7xx_smbus_recv_fifo(s); + } + } +} + +static void npcm7xx_smbus_write_rxf_ctl(NPCM7xxSMBusState *s, uint8_t value) +{ + uint8_t new_ctl = value; + + if (!(value & NPCM7XX_SMBRXF_CTL_LAST)) { + new_ctl = KEEP_OLD_BIT(s->rxf_ctl, new_ctl, NPCM7XX_SMBRXF_CTL_LAST); + } + s->rxf_ctl = new_ctl; +} + +static uint64_t npcm7xx_smbus_read(void *opaque, hwaddr offset, unsigned size) +{ + NPCM7xxSMBusState *s = opaque; + uint64_t value = 0; + uint8_t bank = s->ctl3 & NPCM7XX_SMBCTL3_BNK_SEL; + + /* The order of the registers are their order in memory. */ + switch (offset) { + case NPCM7XX_SMB_SDA: + value = npcm7xx_smbus_read_sda(s); + break; + + case NPCM7XX_SMB_ST: + value = s->st; + break; + + case NPCM7XX_SMB_CST: + value = s->cst; + break; + + case NPCM7XX_SMB_CTL1: + value = s->ctl1; + break; + + case NPCM7XX_SMB_ADDR1: + value = s->addr[0]; + break; + + case NPCM7XX_SMB_CTL2: + value = s->ctl2; + break; + + case NPCM7XX_SMB_ADDR2: + value = s->addr[1]; + break; + + case NPCM7XX_SMB_CTL3: + value = s->ctl3; + break; + + case NPCM7XX_SMB_CST2: + value = s->cst2; + break; + + case NPCM7XX_SMB_CST3: + value = s->cst3; + break; + + case NPCM7XX_SMB_VER: + value = npcm7xx_smbus_get_version(); + break; + + /* This register is either invalid or banked at this point. */ + default: + if (bank) { + /* Bank 1 */ + switch (offset) { + case NPCM7XX_SMB_FIF_CTS: + value = s->fif_cts; + break; + + case NPCM7XX_SMB_FAIR_PER: + value = s->fair_per; + break; + + case NPCM7XX_SMB_TXF_CTL: + value = s->txf_ctl; + break; + + case NPCM7XX_SMB_T_OUT: + value = s->t_out; + break; + + case NPCM7XX_SMB_TXF_STS: + value = s->txf_sts; + break; + + case NPCM7XX_SMB_RXF_STS: + value = s->rxf_sts; + break; + + case NPCM7XX_SMB_RXF_CTL: + value = s->rxf_ctl; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from invalid offset 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, offset); + break; + } + } else { + /* Bank 0 */ + switch (offset) { + case NPCM7XX_SMB_ADDR3: + value = s->addr[2]; + break; + + case NPCM7XX_SMB_ADDR7: + value = s->addr[6]; + break; + + case NPCM7XX_SMB_ADDR4: + value = s->addr[3]; + break; + + case NPCM7XX_SMB_ADDR8: + value = s->addr[7]; + break; + + case NPCM7XX_SMB_ADDR5: + value = s->addr[4]; + break; + + case NPCM7XX_SMB_ADDR9: + value = s->addr[8]; + break; + + case NPCM7XX_SMB_ADDR6: + value = s->addr[5]; + break; + + case NPCM7XX_SMB_ADDR10: + value = s->addr[9]; + break; + + case NPCM7XX_SMB_CTL4: + value = s->ctl4; + break; + + case NPCM7XX_SMB_CTL5: + value = s->ctl5; + break; + + case NPCM7XX_SMB_SCLLT: + value = s->scllt; + break; + + case NPCM7XX_SMB_FIF_CTL: + value = s->fif_ctl; + break; + + case NPCM7XX_SMB_SCLHT: + value = s->sclht; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from invalid offset 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, offset); + break; + } + } + break; + } + + trace_npcm7xx_smbus_read(DEVICE(s)->canonical_path, offset, value, size); + + return value; +} + +static void npcm7xx_smbus_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + NPCM7xxSMBusState *s = opaque; + uint8_t bank = s->ctl3 & NPCM7XX_SMBCTL3_BNK_SEL; + + trace_npcm7xx_smbus_write(DEVICE(s)->canonical_path, offset, value, size); + + /* The order of the registers are their order in memory. */ + switch (offset) { + case NPCM7XX_SMB_SDA: + npcm7xx_smbus_write_sda(s, value); + break; + + case NPCM7XX_SMB_ST: + npcm7xx_smbus_write_st(s, value); + break; + + case NPCM7XX_SMB_CST: + npcm7xx_smbus_write_cst(s, value); + break; + + case NPCM7XX_SMB_CTL1: + npcm7xx_smbus_write_ctl1(s, value); + break; + + case NPCM7XX_SMB_ADDR1: + s->addr[0] = value; + break; + + case NPCM7XX_SMB_CTL2: + npcm7xx_smbus_write_ctl2(s, value); + break; + + case NPCM7XX_SMB_ADDR2: + s->addr[1] = value; + break; + + case NPCM7XX_SMB_CTL3: + npcm7xx_smbus_write_ctl3(s, value); + break; + + case NPCM7XX_SMB_CST2: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to read-only reg: offset 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, offset); + break; + + case NPCM7XX_SMB_CST3: + npcm7xx_smbus_write_cst3(s, value); + break; + + case NPCM7XX_SMB_VER: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to read-only reg: offset 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, offset); + break; + + /* This register is either invalid or banked at this point. */ + default: + if (bank) { + /* Bank 1 */ + switch (offset) { + case NPCM7XX_SMB_FIF_CTS: + npcm7xx_smbus_write_fif_cts(s, value); + break; + + case NPCM7XX_SMB_FAIR_PER: + s->fair_per = value; + break; + + case NPCM7XX_SMB_TXF_CTL: + npcm7xx_smbus_write_txf_ctl(s, value); + break; + + case NPCM7XX_SMB_T_OUT: + npcm7xx_smbus_write_t_out(s, value); + break; + + case NPCM7XX_SMB_TXF_STS: + npcm7xx_smbus_write_txf_sts(s, value); + break; + + case NPCM7XX_SMB_RXF_STS: + npcm7xx_smbus_write_rxf_sts(s, value); + break; + + case NPCM7XX_SMB_RXF_CTL: + npcm7xx_smbus_write_rxf_ctl(s, value); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to invalid offset 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, offset); + break; + } + } else { + /* Bank 0 */ + switch (offset) { + case NPCM7XX_SMB_ADDR3: + s->addr[2] = value; + break; + + case NPCM7XX_SMB_ADDR7: + s->addr[6] = value; + break; + + case NPCM7XX_SMB_ADDR4: + s->addr[3] = value; + break; + + case NPCM7XX_SMB_ADDR8: + s->addr[7] = value; + break; + + case NPCM7XX_SMB_ADDR5: + s->addr[4] = value; + break; + + case NPCM7XX_SMB_ADDR9: + s->addr[8] = value; + break; + + case NPCM7XX_SMB_ADDR6: + s->addr[5] = value; + break; + + case NPCM7XX_SMB_ADDR10: + s->addr[9] = value; + break; + + case NPCM7XX_SMB_CTL4: + s->ctl4 = value; + break; + + case NPCM7XX_SMB_CTL5: + s->ctl5 = value; + break; + + case NPCM7XX_SMB_SCLLT: + s->scllt = value; + break; + + case NPCM7XX_SMB_FIF_CTL: + npcm7xx_smbus_write_fif_ctl(s, value); + break; + + case NPCM7XX_SMB_SCLHT: + s->sclht = value; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to invalid offset 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, offset); + break; + } + } + break; + } +} + +static const MemoryRegionOps npcm7xx_smbus_ops = { + .read = npcm7xx_smbus_read, + .write = npcm7xx_smbus_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + .unaligned = false, + }, +}; + +static void npcm7xx_smbus_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxSMBusState *s = NPCM7XX_SMBUS(obj); + + s->st = NPCM7XX_SMB_ST_INIT_VAL; + s->cst = NPCM7XX_SMB_CST_INIT_VAL; + s->cst2 = NPCM7XX_SMB_CST2_INIT_VAL; + s->cst3 = NPCM7XX_SMB_CST3_INIT_VAL; + s->ctl1 = NPCM7XX_SMB_CTL1_INIT_VAL; + s->ctl2 = NPCM7XX_SMB_CTL2_INIT_VAL; + s->ctl3 = NPCM7XX_SMB_CTL3_INIT_VAL; + s->ctl4 = NPCM7XX_SMB_CTL4_INIT_VAL; + s->ctl5 = NPCM7XX_SMB_CTL5_INIT_VAL; + + for (int i = 0; i < NPCM7XX_SMBUS_NR_ADDRS; ++i) { + s->addr[i] = NPCM7XX_SMB_ADDR_INIT_VAL; + } + s->scllt = NPCM7XX_SMB_SCLLT_INIT_VAL; + s->sclht = NPCM7XX_SMB_SCLHT_INIT_VAL; + + s->fif_ctl = NPCM7XX_SMB_FIF_CTL_INIT_VAL; + s->fif_cts = NPCM7XX_SMB_FIF_CTS_INIT_VAL; + s->fair_per = NPCM7XX_SMB_FAIR_PER_INIT_VAL; + s->txf_ctl = NPCM7XX_SMB_TXF_CTL_INIT_VAL; + s->t_out = NPCM7XX_SMB_T_OUT_INIT_VAL; + s->txf_sts = NPCM7XX_SMB_TXF_STS_INIT_VAL; + s->rxf_sts = NPCM7XX_SMB_RXF_STS_INIT_VAL; + s->rxf_ctl = NPCM7XX_SMB_RXF_CTL_INIT_VAL; + + npcm7xx_smbus_clear_buffer(s); + s->status = NPCM7XX_SMBUS_STATUS_IDLE; + s->rx_cur = 0; +} + +static void npcm7xx_smbus_hold_reset(Object *obj) +{ + NPCM7xxSMBusState *s = NPCM7XX_SMBUS(obj); + + qemu_irq_lower(s->irq); +} + +static void npcm7xx_smbus_init(Object *obj) +{ + NPCM7xxSMBusState *s = NPCM7XX_SMBUS(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &s->irq); + memory_region_init_io(&s->iomem, obj, &npcm7xx_smbus_ops, s, + "regs", 4 * KiB); + sysbus_init_mmio(sbd, &s->iomem); + + s->bus = i2c_init_bus(DEVICE(s), "i2c-bus"); +} + +static const VMStateDescription vmstate_npcm7xx_smbus = { + .name = "npcm7xx-smbus", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(sda, NPCM7xxSMBusState), + VMSTATE_UINT8(st, NPCM7xxSMBusState), + VMSTATE_UINT8(cst, NPCM7xxSMBusState), + VMSTATE_UINT8(cst2, NPCM7xxSMBusState), + VMSTATE_UINT8(cst3, NPCM7xxSMBusState), + VMSTATE_UINT8(ctl1, NPCM7xxSMBusState), + VMSTATE_UINT8(ctl2, NPCM7xxSMBusState), + VMSTATE_UINT8(ctl3, NPCM7xxSMBusState), + VMSTATE_UINT8(ctl4, NPCM7xxSMBusState), + VMSTATE_UINT8(ctl5, NPCM7xxSMBusState), + VMSTATE_UINT8_ARRAY(addr, NPCM7xxSMBusState, NPCM7XX_SMBUS_NR_ADDRS), + VMSTATE_UINT8(scllt, NPCM7xxSMBusState), + VMSTATE_UINT8(sclht, NPCM7xxSMBusState), + VMSTATE_UINT8(fif_ctl, NPCM7xxSMBusState), + VMSTATE_UINT8(fif_cts, NPCM7xxSMBusState), + VMSTATE_UINT8(fair_per, NPCM7xxSMBusState), + VMSTATE_UINT8(txf_ctl, NPCM7xxSMBusState), + VMSTATE_UINT8(t_out, NPCM7xxSMBusState), + VMSTATE_UINT8(txf_sts, NPCM7xxSMBusState), + VMSTATE_UINT8(rxf_sts, NPCM7xxSMBusState), + VMSTATE_UINT8(rxf_ctl, NPCM7xxSMBusState), + VMSTATE_UINT8_ARRAY(rx_fifo, NPCM7xxSMBusState, + NPCM7XX_SMBUS_FIFO_SIZE), + VMSTATE_UINT8(rx_cur, NPCM7xxSMBusState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_smbus_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx System Management Bus"; + dc->vmsd = &vmstate_npcm7xx_smbus; + rc->phases.enter = npcm7xx_smbus_enter_reset; + rc->phases.hold = npcm7xx_smbus_hold_reset; +} + +static const TypeInfo npcm7xx_smbus_types[] = { + { + .name = TYPE_NPCM7XX_SMBUS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxSMBusState), + .class_init = npcm7xx_smbus_class_init, + .instance_init = npcm7xx_smbus_init, + }, +}; +DEFINE_TYPES(npcm7xx_smbus_types); diff --git a/hw/i2c/omap_i2c.c b/hw/i2c/omap_i2c.c new file mode 100644 index 000000000..e5d205dda --- /dev/null +++ b/hw/i2c/omap_i2c.c @@ -0,0 +1,549 @@ +/* + * TI OMAP on-chip I2C controller. Only "new I2C" mode supported. + * + * Copyright (C) 2007 Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/arm/omap.h" +#include "hw/sysbus.h" +#include "qemu/error-report.h" +#include "qapi/error.h" + +struct OMAPI2CState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq; + qemu_irq drq[2]; + I2CBus *bus; + + uint8_t revision; + void *iclk; + void *fclk; + + uint8_t mask; + uint16_t stat; + uint16_t dma; + uint16_t count; + int count_cur; + uint32_t fifo; + int rxlen; + int txlen; + uint16_t control; + uint16_t addr[2]; + uint8_t divider; + uint8_t times[2]; + uint16_t test; +}; + +#define OMAP2_INTR_REV 0x34 +#define OMAP2_GC_REV 0x34 + +static void omap_i2c_interrupts_update(OMAPI2CState *s) +{ + qemu_set_irq(s->irq, s->stat & s->mask); + if ((s->dma >> 15) & 1) /* RDMA_EN */ + qemu_set_irq(s->drq[0], (s->stat >> 3) & 1); /* RRDY */ + if ((s->dma >> 7) & 1) /* XDMA_EN */ + qemu_set_irq(s->drq[1], (s->stat >> 4) & 1); /* XRDY */ +} + +static void omap_i2c_fifo_run(OMAPI2CState *s) +{ + int ack = 1; + + if (!i2c_bus_busy(s->bus)) + return; + + if ((s->control >> 2) & 1) { /* RM */ + if ((s->control >> 1) & 1) { /* STP */ + i2c_end_transfer(s->bus); + s->control &= ~(1 << 1); /* STP */ + s->count_cur = s->count; + s->txlen = 0; + } else if ((s->control >> 9) & 1) { /* TRX */ + while (ack && s->txlen) + ack = (i2c_send(s->bus, + (s->fifo >> ((-- s->txlen) << 3)) & + 0xff) >= 0); + s->stat |= 1 << 4; /* XRDY */ + } else { + while (s->rxlen < 4) + s->fifo |= i2c_recv(s->bus) << ((s->rxlen ++) << 3); + s->stat |= 1 << 3; /* RRDY */ + } + } else { + if ((s->control >> 9) & 1) { /* TRX */ + while (ack && s->count_cur && s->txlen) { + ack = (i2c_send(s->bus, + (s->fifo >> ((-- s->txlen) << 3)) & + 0xff) >= 0); + s->count_cur --; + } + if (ack && s->count_cur) + s->stat |= 1 << 4; /* XRDY */ + else + s->stat &= ~(1 << 4); /* XRDY */ + if (!s->count_cur) { + s->stat |= 1 << 2; /* ARDY */ + s->control &= ~(1 << 10); /* MST */ + } + } else { + while (s->count_cur && s->rxlen < 4) { + s->fifo |= i2c_recv(s->bus) << ((s->rxlen ++) << 3); + s->count_cur --; + } + if (s->rxlen) + s->stat |= 1 << 3; /* RRDY */ + else + s->stat &= ~(1 << 3); /* RRDY */ + } + if (!s->count_cur) { + if ((s->control >> 1) & 1) { /* STP */ + i2c_end_transfer(s->bus); + s->control &= ~(1 << 1); /* STP */ + s->count_cur = s->count; + s->txlen = 0; + } else { + s->stat |= 1 << 2; /* ARDY */ + s->control &= ~(1 << 10); /* MST */ + } + } + } + + s->stat |= (!ack) << 1; /* NACK */ + if (!ack) + s->control &= ~(1 << 1); /* STP */ +} + +static void omap_i2c_reset(DeviceState *dev) +{ + OMAPI2CState *s = OMAP_I2C(dev); + + s->mask = 0; + s->stat = 0; + s->dma = 0; + s->count = 0; + s->count_cur = 0; + s->fifo = 0; + s->rxlen = 0; + s->txlen = 0; + s->control = 0; + s->addr[0] = 0; + s->addr[1] = 0; + s->divider = 0; + s->times[0] = 0; + s->times[1] = 0; + s->test = 0; +} + +static uint32_t omap_i2c_read(void *opaque, hwaddr addr) +{ + OMAPI2CState *s = opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + uint16_t ret; + + switch (offset) { + case 0x00: /* I2C_REV */ + return s->revision; /* REV */ + + case 0x04: /* I2C_IE */ + return s->mask; + + case 0x08: /* I2C_STAT */ + return s->stat | (i2c_bus_busy(s->bus) << 12); + + case 0x0c: /* I2C_IV */ + if (s->revision >= OMAP2_INTR_REV) + break; + ret = ctz32(s->stat & s->mask); + if (ret != 32) { + s->stat ^= 1 << ret; + ret++; + } else { + ret = 0; + } + omap_i2c_interrupts_update(s); + return ret; + + case 0x10: /* I2C_SYSS */ + return (s->control >> 15) & 1; /* I2C_EN */ + + case 0x14: /* I2C_BUF */ + return s->dma; + + case 0x18: /* I2C_CNT */ + return s->count_cur; /* DCOUNT */ + + case 0x1c: /* I2C_DATA */ + ret = 0; + if (s->control & (1 << 14)) { /* BE */ + ret |= ((s->fifo >> 0) & 0xff) << 8; + ret |= ((s->fifo >> 8) & 0xff) << 0; + } else { + ret |= ((s->fifo >> 8) & 0xff) << 8; + ret |= ((s->fifo >> 0) & 0xff) << 0; + } + if (s->rxlen == 1) { + s->stat |= 1 << 15; /* SBD */ + s->rxlen = 0; + } else if (s->rxlen > 1) { + if (s->rxlen > 2) + s->fifo >>= 16; + s->rxlen -= 2; + } else { + /* XXX: remote access (qualifier) error - what's that? */ + } + if (!s->rxlen) { + s->stat &= ~(1 << 3); /* RRDY */ + if (((s->control >> 10) & 1) && /* MST */ + ((~s->control >> 9) & 1)) { /* TRX */ + s->stat |= 1 << 2; /* ARDY */ + s->control &= ~(1 << 10); /* MST */ + } + } + s->stat &= ~(1 << 11); /* ROVR */ + omap_i2c_fifo_run(s); + omap_i2c_interrupts_update(s); + return ret; + + case 0x20: /* I2C_SYSC */ + return 0; + + case 0x24: /* I2C_CON */ + return s->control; + + case 0x28: /* I2C_OA */ + return s->addr[0]; + + case 0x2c: /* I2C_SA */ + return s->addr[1]; + + case 0x30: /* I2C_PSC */ + return s->divider; + + case 0x34: /* I2C_SCLL */ + return s->times[0]; + + case 0x38: /* I2C_SCLH */ + return s->times[1]; + + case 0x3c: /* I2C_SYSTEST */ + if (s->test & (1 << 15)) { /* ST_EN */ + s->test ^= 0xa; + return s->test; + } else + return s->test & ~0x300f; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_i2c_write(void *opaque, hwaddr addr, + uint32_t value) +{ + OMAPI2CState *s = opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + int nack; + + switch (offset) { + case 0x00: /* I2C_REV */ + case 0x0c: /* I2C_IV */ + case 0x10: /* I2C_SYSS */ + OMAP_RO_REG(addr); + return; + + case 0x04: /* I2C_IE */ + s->mask = value & (s->revision < OMAP2_GC_REV ? 0x1f : 0x3f); + break; + + case 0x08: /* I2C_STAT */ + if (s->revision < OMAP2_INTR_REV) { + OMAP_RO_REG(addr); + return; + } + + /* RRDY and XRDY are reset by hardware. (in all versions???) */ + s->stat &= ~(value & 0x27); + omap_i2c_interrupts_update(s); + break; + + case 0x14: /* I2C_BUF */ + s->dma = value & 0x8080; + if (value & (1 << 15)) /* RDMA_EN */ + s->mask &= ~(1 << 3); /* RRDY_IE */ + if (value & (1 << 7)) /* XDMA_EN */ + s->mask &= ~(1 << 4); /* XRDY_IE */ + break; + + case 0x18: /* I2C_CNT */ + s->count = value; /* DCOUNT */ + break; + + case 0x1c: /* I2C_DATA */ + if (s->txlen > 2) { + /* XXX: remote access (qualifier) error - what's that? */ + break; + } + s->fifo <<= 16; + s->txlen += 2; + if (s->control & (1 << 14)) { /* BE */ + s->fifo |= ((value >> 8) & 0xff) << 8; + s->fifo |= ((value >> 0) & 0xff) << 0; + } else { + s->fifo |= ((value >> 0) & 0xff) << 8; + s->fifo |= ((value >> 8) & 0xff) << 0; + } + s->stat &= ~(1 << 10); /* XUDF */ + if (s->txlen > 2) + s->stat &= ~(1 << 4); /* XRDY */ + omap_i2c_fifo_run(s); + omap_i2c_interrupts_update(s); + break; + + case 0x20: /* I2C_SYSC */ + if (s->revision < OMAP2_INTR_REV) { + OMAP_BAD_REG(addr); + return; + } + + if (value & 2) { + omap_i2c_reset(DEVICE(s)); + } + break; + + case 0x24: /* I2C_CON */ + s->control = value & 0xcf87; + if (~value & (1 << 15)) { /* I2C_EN */ + if (s->revision < OMAP2_INTR_REV) { + omap_i2c_reset(DEVICE(s)); + } + break; + } + if ((value & (1 << 15)) && !(value & (1 << 10))) { /* MST */ + qemu_log_mask(LOG_UNIMP, "%s: I^2C slave mode not supported\n", + __func__); + break; + } + if ((value & (1 << 15)) && value & (1 << 8)) { /* XA */ + qemu_log_mask(LOG_UNIMP, + "%s: 10-bit addressing mode not supported\n", + __func__); + break; + } + if ((value & (1 << 15)) && value & (1 << 0)) { /* STT */ + nack = !!i2c_start_transfer(s->bus, s->addr[1], /* SA */ + (~value >> 9) & 1); /* TRX */ + s->stat |= nack << 1; /* NACK */ + s->control &= ~(1 << 0); /* STT */ + s->fifo = 0; + if (nack) + s->control &= ~(1 << 1); /* STP */ + else { + s->count_cur = s->count; + omap_i2c_fifo_run(s); + } + omap_i2c_interrupts_update(s); + } + break; + + case 0x28: /* I2C_OA */ + s->addr[0] = value & 0x3ff; + break; + + case 0x2c: /* I2C_SA */ + s->addr[1] = value & 0x3ff; + break; + + case 0x30: /* I2C_PSC */ + s->divider = value; + break; + + case 0x34: /* I2C_SCLL */ + s->times[0] = value; + break; + + case 0x38: /* I2C_SCLH */ + s->times[1] = value; + break; + + case 0x3c: /* I2C_SYSTEST */ + s->test = value & 0xf80f; + if (value & (1 << 11)) /* SBB */ + if (s->revision >= OMAP2_INTR_REV) { + s->stat |= 0x3f; + omap_i2c_interrupts_update(s); + } + if (value & (1 << 15)) { /* ST_EN */ + qemu_log_mask(LOG_UNIMP, + "%s: System Test not supported\n", __func__); + } + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static void omap_i2c_writeb(void *opaque, hwaddr addr, + uint32_t value) +{ + OMAPI2CState *s = opaque; + int offset = addr & OMAP_MPUI_REG_MASK; + + switch (offset) { + case 0x1c: /* I2C_DATA */ + if (s->txlen > 2) { + /* XXX: remote access (qualifier) error - what's that? */ + break; + } + s->fifo <<= 8; + s->txlen += 1; + s->fifo |= value & 0xff; + s->stat &= ~(1 << 10); /* XUDF */ + if (s->txlen > 2) + s->stat &= ~(1 << 4); /* XRDY */ + omap_i2c_fifo_run(s); + omap_i2c_interrupts_update(s); + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static uint64_t omap_i2c_readfn(void *opaque, hwaddr addr, + unsigned size) +{ + switch (size) { + case 2: + return omap_i2c_read(opaque, addr); + default: + return omap_badwidth_read16(opaque, addr); + } +} + +static void omap_i2c_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + switch (size) { + case 1: + /* Only the last fifo write can be 8 bit. */ + omap_i2c_writeb(opaque, addr, value); + break; + case 2: + omap_i2c_write(opaque, addr, value); + break; + default: + omap_badwidth_write16(opaque, addr, value); + break; + } +} + +static const MemoryRegionOps omap_i2c_ops = { + .read = omap_i2c_readfn, + .write = omap_i2c_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_i2c_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + OMAPI2CState *s = OMAP_I2C(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->drq[0]); + sysbus_init_irq(sbd, &s->drq[1]); + sysbus_init_mmio(sbd, &s->iomem); + s->bus = i2c_init_bus(dev, NULL); +} + +static void omap_i2c_realize(DeviceState *dev, Error **errp) +{ + OMAPI2CState *s = OMAP_I2C(dev); + + memory_region_init_io(&s->iomem, OBJECT(dev), &omap_i2c_ops, s, "omap.i2c", + (s->revision < OMAP2_INTR_REV) ? 0x800 : 0x1000); + + if (!s->fclk) { + error_setg(errp, "omap_i2c: fclk not connected"); + return; + } + if (s->revision >= OMAP2_INTR_REV && !s->iclk) { + /* Note that OMAP1 doesn't have a separate interface clock */ + error_setg(errp, "omap_i2c: iclk not connected"); + return; + } +} + +void omap_i2c_set_iclk(OMAPI2CState *i2c, omap_clk clk) +{ + i2c->iclk = clk; +} + +void omap_i2c_set_fclk(OMAPI2CState *i2c, omap_clk clk) +{ + i2c->fclk = clk; +} + +static Property omap_i2c_properties[] = { + DEFINE_PROP_UINT8("revision", OMAPI2CState, revision, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void omap_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, omap_i2c_properties); + dc->reset = omap_i2c_reset; + /* Reason: pointer properties "iclk", "fclk" */ + dc->user_creatable = false; + dc->realize = omap_i2c_realize; +} + +static const TypeInfo omap_i2c_info = { + .name = TYPE_OMAP_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(OMAPI2CState), + .instance_init = omap_i2c_init, + .class_init = omap_i2c_class_init, +}; + +static void omap_i2c_register_types(void) +{ + type_register_static(&omap_i2c_info); +} + +I2CBus *omap_i2c_bus(DeviceState *omap_i2c) +{ + OMAPI2CState *s = OMAP_I2C(omap_i2c); + return s->bus; +} + +type_init(omap_i2c_register_types) diff --git a/hw/i2c/pm_smbus.c b/hw/i2c/pm_smbus.c new file mode 100644 index 000000000..d7eae548c --- /dev/null +++ b/hw/i2c/pm_smbus.c @@ -0,0 +1,497 @@ +/* + * PC SMBus implementation + * splitted from acpi.c + * + * Copyright (c) 2006 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * . + */ + +#include "qemu/osdep.h" +#include "hw/boards.h" +#include "hw/i2c/pm_smbus.h" +#include "hw/i2c/smbus_master.h" +#include "migration/vmstate.h" + +#define SMBHSTSTS 0x00 +#define SMBHSTCNT 0x02 +#define SMBHSTCMD 0x03 +#define SMBHSTADD 0x04 +#define SMBHSTDAT0 0x05 +#define SMBHSTDAT1 0x06 +#define SMBBLKDAT 0x07 +#define SMBAUXCTL 0x0d + +#define STS_HOST_BUSY (1 << 0) +#define STS_INTR (1 << 1) +#define STS_DEV_ERR (1 << 2) +#define STS_BUS_ERR (1 << 3) +#define STS_FAILED (1 << 4) +#define STS_SMBALERT (1 << 5) +#define STS_INUSE_STS (1 << 6) +#define STS_BYTE_DONE (1 << 7) +/* Signs of successfully transaction end : +* ByteDoneStatus = 1 (STS_BYTE_DONE) and INTR = 1 (STS_INTR ) +*/ + +#define CTL_INTREN (1 << 0) +#define CTL_KILL (1 << 1) +#define CTL_LAST_BYTE (1 << 5) +#define CTL_START (1 << 6) +#define CTL_PEC_EN (1 << 7) +#define CTL_RETURN_MASK 0x1f + +#define PROT_QUICK 0 +#define PROT_BYTE 1 +#define PROT_BYTE_DATA 2 +#define PROT_WORD_DATA 3 +#define PROT_PROC_CALL 4 +#define PROT_BLOCK_DATA 5 +#define PROT_I2C_BLOCK_READ 6 + +#define AUX_PEC (1 << 0) +#define AUX_BLK (1 << 1) +#define AUX_MASK 0x3 + +/*#define DEBUG*/ + +#ifdef DEBUG +# define SMBUS_DPRINTF(format, ...) printf(format, ## __VA_ARGS__) +#else +# define SMBUS_DPRINTF(format, ...) do { } while (0) +#endif + + +static void smb_transaction(PMSMBus *s) +{ + uint8_t prot = (s->smb_ctl >> 2) & 0x07; + uint8_t read = s->smb_addr & 0x01; + uint8_t cmd = s->smb_cmd; + uint8_t addr = s->smb_addr >> 1; + I2CBus *bus = s->smbus; + int ret; + + SMBUS_DPRINTF("SMBus trans addr=0x%02x prot=0x%02x\n", addr, prot); + /* Transaction isn't exec if STS_DEV_ERR bit set */ + if ((s->smb_stat & STS_DEV_ERR) != 0) { + goto error; + } + + switch(prot) { + case PROT_QUICK: + ret = smbus_quick_command(bus, addr, read); + goto done; + case PROT_BYTE: + if (read) { + ret = smbus_receive_byte(bus, addr); + goto data8; + } else { + ret = smbus_send_byte(bus, addr, cmd); + goto done; + } + case PROT_BYTE_DATA: + if (read) { + ret = smbus_read_byte(bus, addr, cmd); + goto data8; + } else { + ret = smbus_write_byte(bus, addr, cmd, s->smb_data0); + goto done; + } + break; + case PROT_WORD_DATA: + if (read) { + ret = smbus_read_word(bus, addr, cmd); + goto data16; + } else { + ret = smbus_write_word(bus, addr, cmd, + (s->smb_data1 << 8) | s->smb_data0); + goto done; + } + break; + case PROT_I2C_BLOCK_READ: + /* According to the Linux i2c-i801 driver: + * NB: page 240 of ICH5 datasheet shows that the R/#W + * bit should be cleared here, even when reading. + * However if SPD Write Disable is set (Lynx Point and later), + * the read will fail if we don't set the R/#W bit. + * So at least Linux may or may not set the read bit here. + * So just ignore the read bit for this command. + */ + if (i2c_start_send(bus, addr)) { + goto error; + } + ret = i2c_send(bus, s->smb_data1); + if (ret) { + goto error; + } + if (i2c_start_recv(bus, addr)) { + goto error; + } + s->in_i2c_block_read = true; + s->smb_blkdata = i2c_recv(s->smbus); + s->op_done = false; + s->smb_stat |= STS_HOST_BUSY | STS_BYTE_DONE; + goto out; + + case PROT_BLOCK_DATA: + if (read) { + ret = smbus_read_block(bus, addr, cmd, s->smb_data, + sizeof(s->smb_data), !s->i2c_enable, + !s->i2c_enable); + if (ret < 0) { + goto error; + } + s->smb_index = 0; + s->op_done = false; + if (s->smb_auxctl & AUX_BLK) { + s->smb_stat |= STS_INTR; + } else { + s->smb_blkdata = s->smb_data[0]; + s->smb_stat |= STS_HOST_BUSY | STS_BYTE_DONE; + } + s->smb_data0 = ret; + goto out; + } else { + if (s->smb_auxctl & AUX_BLK) { + if (s->smb_index != s->smb_data0) { + s->smb_index = 0; + goto error; + } + /* Data is already all written to the queue, just do + the operation. */ + s->smb_index = 0; + ret = smbus_write_block(bus, addr, cmd, s->smb_data, + s->smb_data0, !s->i2c_enable); + if (ret < 0) { + goto error; + } + s->op_done = true; + s->smb_stat |= STS_INTR; + s->smb_stat &= ~STS_HOST_BUSY; + } else { + s->op_done = false; + s->smb_stat |= STS_HOST_BUSY | STS_BYTE_DONE; + s->smb_data[0] = s->smb_blkdata; + s->smb_index = 0; + } + goto out; + } + break; + default: + goto error; + } + abort(); + +data16: + if (ret < 0) { + goto error; + } + s->smb_data1 = ret >> 8; +data8: + if (ret < 0) { + goto error; + } + s->smb_data0 = ret; +done: + if (ret < 0) { + goto error; + } + s->smb_stat |= STS_INTR; +out: + return; + +error: + s->smb_stat |= STS_DEV_ERR; + return; +} + +static void smb_transaction_start(PMSMBus *s) +{ + if (s->smb_ctl & CTL_INTREN) { + smb_transaction(s); + s->start_transaction_on_status_read = false; + } else { + /* Do not execute immediately the command; it will be + * executed when guest will read SMB_STAT register. This + * is to work around a bug in AMIBIOS (that is working + * around another bug in some specific hardware) where + * it waits for STS_HOST_BUSY to be set before waiting + * checking for status. If STS_HOST_BUSY doesn't get + * set, it gets stuck. */ + s->smb_stat |= STS_HOST_BUSY; + s->start_transaction_on_status_read = true; + } +} + +static bool +smb_irq_value(PMSMBus *s) +{ + return ((s->smb_stat & ~STS_HOST_BUSY) != 0) && (s->smb_ctl & CTL_INTREN); +} + +static bool +smb_byte_by_byte(PMSMBus *s) +{ + if (s->op_done) { + return false; + } + if (s->in_i2c_block_read) { + return true; + } + return !(s->smb_auxctl & AUX_BLK); +} + +static void smb_ioport_writeb(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + PMSMBus *s = opaque; + uint8_t clear_byte_done; + + SMBUS_DPRINTF("SMB writeb port=0x%04" HWADDR_PRIx + " val=0x%02" PRIx64 "\n", addr, val); + switch(addr) { + case SMBHSTSTS: + clear_byte_done = s->smb_stat & val & STS_BYTE_DONE; + s->smb_stat &= ~(val & ~STS_HOST_BUSY); + if (clear_byte_done && smb_byte_by_byte(s)) { + uint8_t read = s->smb_addr & 0x01; + + if (s->in_i2c_block_read) { + /* See comment below PROT_I2C_BLOCK_READ above. */ + read = 1; + } + + s->smb_index++; + if (s->smb_index >= PM_SMBUS_MAX_MSG_SIZE) { + s->smb_index = 0; + } + if (!read && s->smb_index == s->smb_data0) { + uint8_t prot = (s->smb_ctl >> 2) & 0x07; + uint8_t cmd = s->smb_cmd; + uint8_t addr = s->smb_addr >> 1; + int ret; + + if (prot == PROT_I2C_BLOCK_READ) { + s->smb_stat |= STS_DEV_ERR; + goto out; + } + + ret = smbus_write_block(s->smbus, addr, cmd, s->smb_data, + s->smb_data0, !s->i2c_enable); + if (ret < 0) { + s->smb_stat |= STS_DEV_ERR; + goto out; + } + s->op_done = true; + s->smb_stat |= STS_INTR; + s->smb_stat &= ~STS_HOST_BUSY; + } else if (!read) { + s->smb_data[s->smb_index] = s->smb_blkdata; + s->smb_stat |= STS_BYTE_DONE; + } else if (s->smb_ctl & CTL_LAST_BYTE) { + s->op_done = true; + if (s->in_i2c_block_read) { + s->in_i2c_block_read = false; + s->smb_blkdata = i2c_recv(s->smbus); + i2c_nack(s->smbus); + i2c_end_transfer(s->smbus); + } else { + s->smb_blkdata = s->smb_data[s->smb_index]; + } + s->smb_index = 0; + s->smb_stat |= STS_INTR; + s->smb_stat &= ~STS_HOST_BUSY; + } else { + if (s->in_i2c_block_read) { + s->smb_blkdata = i2c_recv(s->smbus); + } else { + s->smb_blkdata = s->smb_data[s->smb_index]; + } + s->smb_stat |= STS_BYTE_DONE; + } + } + break; + case SMBHSTCNT: + s->smb_ctl = val & ~CTL_START; /* CTL_START always reads 0 */ + if (val & CTL_START) { + if (!s->op_done) { + s->smb_index = 0; + s->op_done = true; + if (s->in_i2c_block_read) { + s->in_i2c_block_read = false; + i2c_end_transfer(s->smbus); + } + } + smb_transaction_start(s); + } + if (s->smb_ctl & CTL_KILL) { + s->op_done = true; + s->smb_index = 0; + s->smb_stat |= STS_FAILED; + s->smb_stat &= ~STS_HOST_BUSY; + } + break; + case SMBHSTCMD: + s->smb_cmd = val; + break; + case SMBHSTADD: + s->smb_addr = val; + break; + case SMBHSTDAT0: + s->smb_data0 = val; + break; + case SMBHSTDAT1: + s->smb_data1 = val; + break; + case SMBBLKDAT: + if (s->smb_index >= PM_SMBUS_MAX_MSG_SIZE) { + s->smb_index = 0; + } + if (s->smb_auxctl & AUX_BLK) { + s->smb_data[s->smb_index++] = val; + } else { + s->smb_blkdata = val; + } + break; + case SMBAUXCTL: + s->smb_auxctl = val & AUX_MASK; + break; + default: + break; + } + + out: + if (s->set_irq) { + s->set_irq(s, smb_irq_value(s)); + } +} + +static uint64_t smb_ioport_readb(void *opaque, hwaddr addr, unsigned width) +{ + PMSMBus *s = opaque; + uint32_t val; + + switch(addr) { + case SMBHSTSTS: + val = s->smb_stat; + if (s->start_transaction_on_status_read) { + /* execute command now */ + s->start_transaction_on_status_read = false; + s->smb_stat &= ~STS_HOST_BUSY; + smb_transaction(s); + } + break; + case SMBHSTCNT: + val = s->smb_ctl & CTL_RETURN_MASK; + break; + case SMBHSTCMD: + val = s->smb_cmd; + break; + case SMBHSTADD: + val = s->smb_addr; + break; + case SMBHSTDAT0: + val = s->smb_data0; + break; + case SMBHSTDAT1: + val = s->smb_data1; + break; + case SMBBLKDAT: + if (s->smb_auxctl & AUX_BLK && !s->in_i2c_block_read) { + if (s->smb_index >= PM_SMBUS_MAX_MSG_SIZE) { + s->smb_index = 0; + } + val = s->smb_data[s->smb_index++]; + if (!s->op_done && s->smb_index == s->smb_data0) { + s->op_done = true; + s->smb_index = 0; + s->smb_stat &= ~STS_HOST_BUSY; + } + } else { + val = s->smb_blkdata; + } + break; + case SMBAUXCTL: + val = s->smb_auxctl; + break; + default: + val = 0; + break; + } + SMBUS_DPRINTF("SMB readb port=0x%04" HWADDR_PRIx " val=0x%02x\n", + addr, val); + + if (s->set_irq) { + s->set_irq(s, smb_irq_value(s)); + } + + return val; +} + +static void pm_smbus_reset(PMSMBus *s) +{ + s->op_done = true; + s->smb_index = 0; + s->smb_stat = 0; +} + +static const MemoryRegionOps pm_smbus_ops = { + .read = smb_ioport_readb, + .write = smb_ioport_writeb, + .valid.min_access_size = 1, + .valid.max_access_size = 1, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +bool pm_smbus_vmstate_needed(void) +{ + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + + return !mc->smbus_no_migration_support; +} + +const VMStateDescription pmsmb_vmstate = { + .name = "pmsmb", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(smb_stat, PMSMBus), + VMSTATE_UINT8(smb_ctl, PMSMBus), + VMSTATE_UINT8(smb_cmd, PMSMBus), + VMSTATE_UINT8(smb_addr, PMSMBus), + VMSTATE_UINT8(smb_data0, PMSMBus), + VMSTATE_UINT8(smb_data1, PMSMBus), + VMSTATE_UINT32(smb_index, PMSMBus), + VMSTATE_UINT8_ARRAY(smb_data, PMSMBus, PM_SMBUS_MAX_MSG_SIZE), + VMSTATE_UINT8(smb_auxctl, PMSMBus), + VMSTATE_UINT8(smb_blkdata, PMSMBus), + VMSTATE_BOOL(i2c_enable, PMSMBus), + VMSTATE_BOOL(op_done, PMSMBus), + VMSTATE_BOOL(in_i2c_block_read, PMSMBus), + VMSTATE_BOOL(start_transaction_on_status_read, PMSMBus), + VMSTATE_END_OF_LIST() + } +}; + +void pm_smbus_init(DeviceState *parent, PMSMBus *smb, bool force_aux_blk) +{ + smb->op_done = true; + smb->reset = pm_smbus_reset; + smb->smbus = i2c_init_bus(parent, "i2c"); + if (force_aux_blk) { + smb->smb_auxctl |= AUX_BLK; + } + memory_region_init_io(&smb->io, OBJECT(parent), &pm_smbus_ops, smb, + "pm-smbus", 64); +} diff --git a/hw/i2c/pmbus_device.c b/hw/i2c/pmbus_device.c new file mode 100644 index 000000000..24f8f522d --- /dev/null +++ b/hw/i2c/pmbus_device.c @@ -0,0 +1,1612 @@ +/* + * PMBus wrapper over SMBus + * + * Copyright 2021 Google LLC + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include +#include +#include "hw/i2c/pmbus_device.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qemu/log.h" + +uint16_t pmbus_data2direct_mode(PMBusCoefficients c, uint32_t value) +{ + /* R is usually negative to fit large readings into 16 bits */ + uint16_t y = (c.m * value + c.b) * pow(10, c.R); + return y; +} + +uint32_t pmbus_direct_mode2data(PMBusCoefficients c, uint16_t value) +{ + /* X = (Y * 10^-R - b) / m */ + uint32_t x = (value / pow(10, c.R) - c.b) / c.m; + return x; +} + +void pmbus_send(PMBusDevice *pmdev, const uint8_t *data, uint16_t len) +{ + if (pmdev->out_buf_len + len > SMBUS_DATA_MAX_LEN) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMBus device tried to send too much data"); + len = 0; + } + + for (int i = len - 1; i >= 0; i--) { + pmdev->out_buf[i + pmdev->out_buf_len] = data[len - i - 1]; + } + pmdev->out_buf_len += len; +} + +/* Internal only, convert unsigned ints to the little endian bus */ +static void pmbus_send_uint(PMBusDevice *pmdev, uint64_t data, uint8_t size) +{ + uint8_t bytes[8]; + g_assert(size <= 8); + + for (int i = 0; i < size; i++) { + bytes[i] = data & 0xFF; + data = data >> 8; + } + pmbus_send(pmdev, bytes, size); +} + +void pmbus_send8(PMBusDevice *pmdev, uint8_t data) +{ + pmbus_send_uint(pmdev, data, 1); +} + +void pmbus_send16(PMBusDevice *pmdev, uint16_t data) +{ + pmbus_send_uint(pmdev, data, 2); +} + +void pmbus_send32(PMBusDevice *pmdev, uint32_t data) +{ + pmbus_send_uint(pmdev, data, 4); +} + +void pmbus_send64(PMBusDevice *pmdev, uint64_t data) +{ + pmbus_send_uint(pmdev, data, 8); +} + +void pmbus_send_string(PMBusDevice *pmdev, const char *data) +{ + size_t len = strlen(data); + g_assert(len > 0); + g_assert(len + pmdev->out_buf_len < SMBUS_DATA_MAX_LEN); + pmdev->out_buf[len + pmdev->out_buf_len] = len; + + for (int i = len - 1; i >= 0; i--) { + pmdev->out_buf[i + pmdev->out_buf_len] = data[len - 1 - i]; + } + pmdev->out_buf_len += len + 1; +} + + +static uint64_t pmbus_receive_uint(const uint8_t *buf, uint8_t len) +{ + uint64_t ret = 0; + + /* Exclude command code from return value */ + buf++; + len--; + + for (int i = len - 1; i >= 0; i--) { + ret = ret << 8 | buf[i]; + } + return ret; +} + +uint8_t pmbus_receive8(PMBusDevice *pmdev) +{ + if (pmdev->in_buf_len - 1 != 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: length mismatch. Expected 1 byte, got %d bytes\n", + __func__, pmdev->in_buf_len - 1); + } + return pmbus_receive_uint(pmdev->in_buf, pmdev->in_buf_len); +} + +uint16_t pmbus_receive16(PMBusDevice *pmdev) +{ + if (pmdev->in_buf_len - 1 != 2) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: length mismatch. Expected 2 bytes, got %d bytes\n", + __func__, pmdev->in_buf_len - 1); + } + return pmbus_receive_uint(pmdev->in_buf, pmdev->in_buf_len); +} + +uint32_t pmbus_receive32(PMBusDevice *pmdev) +{ + if (pmdev->in_buf_len - 1 != 4) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: length mismatch. Expected 4 bytes, got %d bytes\n", + __func__, pmdev->in_buf_len - 1); + } + return pmbus_receive_uint(pmdev->in_buf, pmdev->in_buf_len); +} + +uint64_t pmbus_receive64(PMBusDevice *pmdev) +{ + if (pmdev->in_buf_len - 1 != 8) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: length mismatch. Expected 8 bytes, got %d bytes\n", + __func__, pmdev->in_buf_len - 1); + } + return pmbus_receive_uint(pmdev->in_buf, pmdev->in_buf_len); +} + +static uint8_t pmbus_out_buf_pop(PMBusDevice *pmdev) +{ + if (pmdev->out_buf_len == 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: tried to read from empty buffer", + __func__); + return 0xFF; + } + uint8_t data = pmdev->out_buf[pmdev->out_buf_len - 1]; + pmdev->out_buf_len--; + return data; +} + +static void pmbus_quick_cmd(SMBusDevice *smd, uint8_t read) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(smd); + PMBusDeviceClass *pmdc = PMBUS_DEVICE_GET_CLASS(pmdev); + + if (pmdc->quick_cmd) { + pmdc->quick_cmd(pmdev, read); + } +} + +static void pmbus_pages_alloc(PMBusDevice *pmdev) +{ + /* some PMBus devices don't use the PAGE command, so they get 1 page */ + PMBusDeviceClass *k = PMBUS_DEVICE_GET_CLASS(pmdev); + if (k->device_num_pages == 0) { + k->device_num_pages = 1; + } + pmdev->num_pages = k->device_num_pages; + pmdev->pages = g_new0(PMBusPage, k->device_num_pages); +} + +void pmbus_check_limits(PMBusDevice *pmdev) +{ + for (int i = 0; i < pmdev->num_pages; i++) { + if ((pmdev->pages[i].operation & PB_OP_ON) == 0) { + continue; /* don't check powered off devices */ + } + + if (pmdev->pages[i].read_vout > pmdev->pages[i].vout_ov_fault_limit) { + pmdev->pages[i].status_word |= PB_STATUS_VOUT; + pmdev->pages[i].status_vout |= PB_STATUS_VOUT_OV_FAULT; + } + + if (pmdev->pages[i].read_vout > pmdev->pages[i].vout_ov_warn_limit) { + pmdev->pages[i].status_word |= PB_STATUS_VOUT; + pmdev->pages[i].status_vout |= PB_STATUS_VOUT_OV_WARN; + } + + if (pmdev->pages[i].read_vout < pmdev->pages[i].vout_uv_warn_limit) { + pmdev->pages[i].status_word |= PB_STATUS_VOUT; + pmdev->pages[i].status_vout |= PB_STATUS_VOUT_UV_WARN; + } + + if (pmdev->pages[i].read_vout < pmdev->pages[i].vout_uv_fault_limit) { + pmdev->pages[i].status_word |= PB_STATUS_VOUT; + pmdev->pages[i].status_vout |= PB_STATUS_VOUT_UV_FAULT; + } + + if (pmdev->pages[i].read_vin > pmdev->pages[i].vin_ov_warn_limit) { + pmdev->pages[i].status_word |= PB_STATUS_INPUT; + pmdev->pages[i].status_input |= PB_STATUS_INPUT_VIN_OV_WARN; + } + + if (pmdev->pages[i].read_vin < pmdev->pages[i].vin_uv_warn_limit) { + pmdev->pages[i].status_word |= PB_STATUS_INPUT; + pmdev->pages[i].status_input |= PB_STATUS_INPUT_VIN_UV_WARN; + } + + if (pmdev->pages[i].read_iout > pmdev->pages[i].iout_oc_warn_limit) { + pmdev->pages[i].status_word |= PB_STATUS_IOUT_POUT; + pmdev->pages[i].status_iout |= PB_STATUS_IOUT_OC_WARN; + } + + if (pmdev->pages[i].read_iout > pmdev->pages[i].iout_oc_fault_limit) { + pmdev->pages[i].status_word |= PB_STATUS_IOUT_POUT; + pmdev->pages[i].status_iout |= PB_STATUS_IOUT_OC_FAULT; + } + + if (pmdev->pages[i].read_pin > pmdev->pages[i].pin_op_warn_limit) { + pmdev->pages[i].status_word |= PB_STATUS_INPUT; + pmdev->pages[i].status_input |= PB_STATUS_INPUT_PIN_OP_WARN; + } + + if (pmdev->pages[i].read_temperature_1 + > pmdev->pages[i].ot_fault_limit) { + pmdev->pages[i].status_word |= PB_STATUS_TEMPERATURE; + pmdev->pages[i].status_temperature |= PB_STATUS_OT_FAULT; + } + + if (pmdev->pages[i].read_temperature_1 + > pmdev->pages[i].ot_warn_limit) { + pmdev->pages[i].status_word |= PB_STATUS_TEMPERATURE; + pmdev->pages[i].status_temperature |= PB_STATUS_OT_WARN; + } + } +} + +static uint8_t pmbus_receive_byte(SMBusDevice *smd) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(smd); + PMBusDeviceClass *pmdc = PMBUS_DEVICE_GET_CLASS(pmdev); + uint8_t ret = 0xFF; + uint8_t index = pmdev->page; + + if (pmdev->out_buf_len != 0) { + ret = pmbus_out_buf_pop(pmdev); + return ret; + } + + switch (pmdev->code) { + case PMBUS_PAGE: + pmbus_send8(pmdev, pmdev->page); + break; + + case PMBUS_OPERATION: /* R/W byte */ + pmbus_send8(pmdev, pmdev->pages[index].operation); + break; + + case PMBUS_ON_OFF_CONFIG: /* R/W byte */ + pmbus_send8(pmdev, pmdev->pages[index].on_off_config); + break; + + case PMBUS_PHASE: /* R/W byte */ + pmbus_send8(pmdev, pmdev->pages[index].phase); + break; + + case PMBUS_WRITE_PROTECT: /* R/W byte */ + pmbus_send8(pmdev, pmdev->pages[index].write_protect); + break; + + case PMBUS_CAPABILITY: + pmbus_send8(pmdev, pmdev->capability); + break; + + case PMBUS_VOUT_MODE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT_MODE) { + pmbus_send8(pmdev, pmdev->pages[index].vout_mode); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_COMMAND: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_command); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_TRIM: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_trim); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_CAL_OFFSET: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_cal_offset); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_MAX: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_max); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_MARGIN_HIGH: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT_MARGIN) { + pmbus_send16(pmdev, pmdev->pages[index].vout_margin_high); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_MARGIN_LOW: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT_MARGIN) { + pmbus_send16(pmdev, pmdev->pages[index].vout_margin_low); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_TRANSITION_RATE: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_transition_rate); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_DROOP: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_droop); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_SCALE_LOOP: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_scale_loop); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_SCALE_MONITOR: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_scale_monitor); + } else { + goto passthough; + } + break; + + /* TODO: implement coefficients support */ + + case PMBUS_POUT_MAX: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_POUT) { + pmbus_send16(pmdev, pmdev->pages[index].pout_max); + } else { + goto passthough; + } + break; + + case PMBUS_VIN_ON: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmbus_send16(pmdev, pmdev->pages[index].vin_on); + } else { + goto passthough; + } + break; + + case PMBUS_VIN_OFF: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmbus_send16(pmdev, pmdev->pages[index].vin_off); + } else { + goto passthough; + } + break; + + case PMBUS_IOUT_CAL_GAIN: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT_GAIN) { + pmbus_send16(pmdev, pmdev->pages[index].iout_cal_gain); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_OV_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_ov_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_OV_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send8(pmdev, pmdev->pages[index].vout_ov_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_OV_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_ov_warn_limit); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_UV_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_uv_warn_limit); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_UV_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].vout_uv_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_VOUT_UV_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send8(pmdev, pmdev->pages[index].vout_uv_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_IOUT_OC_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmbus_send16(pmdev, pmdev->pages[index].iout_oc_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_IOUT_OC_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmbus_send8(pmdev, pmdev->pages[index].iout_oc_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_IOUT_OC_LV_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmbus_send16(pmdev, pmdev->pages[index].iout_oc_lv_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_IOUT_OC_LV_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmbus_send8(pmdev, pmdev->pages[index].iout_oc_lv_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_IOUT_OC_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmbus_send16(pmdev, pmdev->pages[index].iout_oc_warn_limit); + } else { + goto passthough; + } + break; + + case PMBUS_IOUT_UC_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmbus_send16(pmdev, pmdev->pages[index].iout_uc_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_IOUT_UC_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmbus_send8(pmdev, pmdev->pages[index].iout_uc_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_OT_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmbus_send16(pmdev, pmdev->pages[index].ot_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_OT_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmbus_send8(pmdev, pmdev->pages[index].ot_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_OT_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmbus_send16(pmdev, pmdev->pages[index].ot_warn_limit); + } else { + goto passthough; + } + break; + + case PMBUS_UT_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmbus_send16(pmdev, pmdev->pages[index].ut_warn_limit); + } else { + goto passthough; + } + break; + + case PMBUS_UT_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmbus_send16(pmdev, pmdev->pages[index].ut_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_UT_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmbus_send8(pmdev, pmdev->pages[index].ut_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_VIN_OV_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmbus_send16(pmdev, pmdev->pages[index].vin_ov_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_VIN_OV_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmbus_send8(pmdev, pmdev->pages[index].vin_ov_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_VIN_OV_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmbus_send16(pmdev, pmdev->pages[index].vin_ov_warn_limit); + } else { + goto passthough; + } + break; + + case PMBUS_VIN_UV_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmbus_send16(pmdev, pmdev->pages[index].vin_uv_warn_limit); + } else { + goto passthough; + } + break; + + case PMBUS_VIN_UV_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmbus_send16(pmdev, pmdev->pages[index].vin_uv_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_VIN_UV_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmbus_send8(pmdev, pmdev->pages[index].vin_uv_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_IIN_OC_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IIN) { + pmbus_send16(pmdev, pmdev->pages[index].iin_oc_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_IIN_OC_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_IIN) { + pmbus_send8(pmdev, pmdev->pages[index].iin_oc_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_IIN_OC_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IIN) { + pmbus_send16(pmdev, pmdev->pages[index].iin_oc_warn_limit); + } else { + goto passthough; + } + break; + + case PMBUS_POUT_OP_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_POUT) { + pmbus_send16(pmdev, pmdev->pages[index].pout_op_fault_limit); + } else { + goto passthough; + } + break; + + case PMBUS_POUT_OP_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_POUT) { + pmbus_send8(pmdev, pmdev->pages[index].pout_op_fault_response); + } else { + goto passthough; + } + break; + + case PMBUS_POUT_OP_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_POUT) { + pmbus_send16(pmdev, pmdev->pages[index].pout_op_warn_limit); + } else { + goto passthough; + } + break; + + case PMBUS_PIN_OP_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_PIN) { + pmbus_send16(pmdev, pmdev->pages[index].pin_op_warn_limit); + } else { + goto passthough; + } + break; + + case PMBUS_STATUS_BYTE: /* R/W byte */ + pmbus_send8(pmdev, pmdev->pages[index].status_word & 0xFF); + break; + + case PMBUS_STATUS_WORD: /* R/W word */ + pmbus_send16(pmdev, pmdev->pages[index].status_word); + break; + + case PMBUS_STATUS_VOUT: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send8(pmdev, pmdev->pages[index].status_vout); + } else { + goto passthough; + } + break; + + case PMBUS_STATUS_IOUT: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmbus_send8(pmdev, pmdev->pages[index].status_iout); + } else { + goto passthough; + } + break; + + case PMBUS_STATUS_INPUT: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN || + pmdev->pages[index].page_flags & PB_HAS_IIN || + pmdev->pages[index].page_flags & PB_HAS_PIN) { + pmbus_send8(pmdev, pmdev->pages[index].status_input); + } else { + goto passthough; + } + break; + + case PMBUS_STATUS_TEMPERATURE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmbus_send8(pmdev, pmdev->pages[index].status_temperature); + } else { + goto passthough; + } + break; + + case PMBUS_STATUS_CML: /* R/W byte */ + pmbus_send8(pmdev, pmdev->pages[index].status_cml); + break; + + case PMBUS_STATUS_OTHER: /* R/W byte */ + pmbus_send8(pmdev, pmdev->pages[index].status_other); + break; + + case PMBUS_READ_EIN: /* Read-Only block 5 bytes */ + if (pmdev->pages[index].page_flags & PB_HAS_EIN) { + pmbus_send(pmdev, pmdev->pages[index].read_ein, 5); + } else { + goto passthough; + } + break; + + case PMBUS_READ_EOUT: /* Read-Only block 5 bytes */ + if (pmdev->pages[index].page_flags & PB_HAS_EOUT) { + pmbus_send(pmdev, pmdev->pages[index].read_eout, 5); + } else { + goto passthough; + } + break; + + case PMBUS_READ_VIN: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmbus_send16(pmdev, pmdev->pages[index].read_vin); + } else { + goto passthough; + } + break; + + case PMBUS_READ_IIN: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_IIN) { + pmbus_send16(pmdev, pmdev->pages[index].read_iin); + } else { + goto passthough; + } + break; + + case PMBUS_READ_VOUT: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmbus_send16(pmdev, pmdev->pages[index].read_vout); + } else { + goto passthough; + } + break; + + case PMBUS_READ_IOUT: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmbus_send16(pmdev, pmdev->pages[index].read_iout); + } else { + goto passthough; + } + break; + + case PMBUS_READ_TEMPERATURE_1: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmbus_send16(pmdev, pmdev->pages[index].read_temperature_1); + } else { + goto passthough; + } + break; + + case PMBUS_READ_TEMPERATURE_2: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMP2) { + pmbus_send16(pmdev, pmdev->pages[index].read_temperature_2); + } else { + goto passthough; + } + break; + + case PMBUS_READ_TEMPERATURE_3: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMP3) { + pmbus_send16(pmdev, pmdev->pages[index].read_temperature_3); + } else { + goto passthough; + } + break; + + case PMBUS_READ_POUT: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_POUT) { + pmbus_send16(pmdev, pmdev->pages[index].read_pout); + } else { + goto passthough; + } + break; + + case PMBUS_READ_PIN: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_PIN) { + pmbus_send16(pmdev, pmdev->pages[index].read_pin); + } else { + goto passthough; + } + break; + + case PMBUS_REVISION: /* Read-Only byte */ + pmbus_send8(pmdev, pmdev->pages[index].revision); + break; + + case PMBUS_MFR_ID: /* R/W block */ + if (pmdev->pages[index].page_flags & PB_HAS_MFR_INFO) { + pmbus_send_string(pmdev, pmdev->pages[index].mfr_id); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_MODEL: /* R/W block */ + if (pmdev->pages[index].page_flags & PB_HAS_MFR_INFO) { + pmbus_send_string(pmdev, pmdev->pages[index].mfr_model); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_REVISION: /* R/W block */ + if (pmdev->pages[index].page_flags & PB_HAS_MFR_INFO) { + pmbus_send_string(pmdev, pmdev->pages[index].mfr_revision); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_LOCATION: /* R/W block */ + if (pmdev->pages[index].page_flags & PB_HAS_MFR_INFO) { + pmbus_send_string(pmdev, pmdev->pages[index].mfr_location); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_VIN_MIN: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_vin_min); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_VIN_MAX: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_vin_max); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_IIN_MAX: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_IIN_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_iin_max); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_PIN_MAX: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_PIN_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_pin_max); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_VOUT_MIN: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_vout_min); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_VOUT_MAX: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_vout_max); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_IOUT_MAX: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_iout_max); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_POUT_MAX: /* Read-Only word */ + if (pmdev->pages[index].page_flags & PB_HAS_POUT_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_pout_max); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_MAX_TEMP_1: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMP_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_max_temp_1); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_MAX_TEMP_2: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMP_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_max_temp_2); + } else { + goto passthough; + } + break; + + case PMBUS_MFR_MAX_TEMP_3: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMP_RATING) { + pmbus_send16(pmdev, pmdev->pages[index].mfr_max_temp_3); + } else { + goto passthough; + } + break; + + case PMBUS_CLEAR_FAULTS: /* Send Byte */ + case PMBUS_PAGE_PLUS_WRITE: /* Block Write-only */ + case PMBUS_STORE_DEFAULT_ALL: /* Send Byte */ + case PMBUS_RESTORE_DEFAULT_ALL: /* Send Byte */ + case PMBUS_STORE_DEFAULT_CODE: /* Write-only Byte */ + case PMBUS_RESTORE_DEFAULT_CODE: /* Write-only Byte */ + case PMBUS_STORE_USER_ALL: /* Send Byte */ + case PMBUS_RESTORE_USER_ALL: /* Send Byte */ + case PMBUS_STORE_USER_CODE: /* Write-only Byte */ + case PMBUS_RESTORE_USER_CODE: /* Write-only Byte */ + case PMBUS_QUERY: /* Write-Only */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: reading from write only register 0x%02x\n", + __func__, pmdev->code); + break; + +passthough: + default: + /* Pass through read request if not handled */ + if (pmdc->receive_byte) { + ret = pmdc->receive_byte(pmdev); + } + break; + } + + if (pmdev->out_buf_len != 0) { + ret = pmbus_out_buf_pop(pmdev); + return ret; + } + + return ret; +} + +/* + * PMBus clear faults command applies to all status registers, existing faults + * should separately get re-asserted. + */ +static void pmbus_clear_faults(PMBusDevice *pmdev) +{ + for (uint8_t i = 0; i < pmdev->num_pages; i++) { + pmdev->pages[i].status_word = 0; + pmdev->pages[i].status_vout = 0; + pmdev->pages[i].status_iout = 0; + pmdev->pages[i].status_input = 0; + pmdev->pages[i].status_temperature = 0; + pmdev->pages[i].status_cml = 0; + pmdev->pages[i].status_other = 0; + pmdev->pages[i].status_mfr_specific = 0; + pmdev->pages[i].status_fans_1_2 = 0; + pmdev->pages[i].status_fans_3_4 = 0; + } + +} + +/* + * PMBus operation is used to turn On and Off PSUs + * Therefore, default value for the Operation should be PB_OP_ON or 0x80 + */ +static void pmbus_operation(PMBusDevice *pmdev) +{ + uint8_t index = pmdev->page; + if ((pmdev->pages[index].operation & PB_OP_ON) == 0) { + pmdev->pages[index].read_vout = 0; + pmdev->pages[index].read_iout = 0; + pmdev->pages[index].read_pout = 0; + return; + } + + if (pmdev->pages[index].operation & (PB_OP_ON | PB_OP_MARGIN_HIGH)) { + pmdev->pages[index].read_vout = pmdev->pages[index].vout_margin_high; + } + + if (pmdev->pages[index].operation & (PB_OP_ON | PB_OP_MARGIN_LOW)) { + pmdev->pages[index].read_vout = pmdev->pages[index].vout_margin_low; + } + pmbus_check_limits(pmdev); +} + +static int pmbus_write_data(SMBusDevice *smd, uint8_t *buf, uint8_t len) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(smd); + PMBusDeviceClass *pmdc = PMBUS_DEVICE_GET_CLASS(pmdev); + int ret = 0; + uint8_t index; + + if (len == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: writing empty data\n", __func__); + return -1; + } + + if (!pmdev->pages) { /* allocate memory for pages on first use */ + pmbus_pages_alloc(pmdev); + } + + pmdev->in_buf_len = len; + pmdev->in_buf = buf; + + pmdev->code = buf[0]; /* PMBus command code */ + if (len == 1) { /* Single length writes are command codes only */ + return 0; + } + + if (pmdev->code == PMBUS_PAGE) { + pmdev->page = pmbus_receive8(pmdev); + return 0; + } + /* loop through all the pages when 0xFF is received */ + if (pmdev->page == PB_ALL_PAGES) { + for (int i = 0; i < pmdev->num_pages; i++) { + pmdev->page = i; + pmbus_write_data(smd, buf, len); + } + pmdev->page = PB_ALL_PAGES; + return 0; + } + + index = pmdev->page; + + switch (pmdev->code) { + case PMBUS_OPERATION: /* R/W byte */ + pmdev->pages[index].operation = pmbus_receive8(pmdev); + pmbus_operation(pmdev); + break; + + case PMBUS_ON_OFF_CONFIG: /* R/W byte */ + pmdev->pages[index].on_off_config = pmbus_receive8(pmdev); + break; + + case PMBUS_CLEAR_FAULTS: /* Send Byte */ + pmbus_clear_faults(pmdev); + break; + + case PMBUS_PHASE: /* R/W byte */ + pmdev->pages[index].phase = pmbus_receive8(pmdev); + break; + + case PMBUS_PAGE_PLUS_WRITE: /* Block Write-only */ + case PMBUS_WRITE_PROTECT: /* R/W byte */ + pmdev->pages[index].write_protect = pmbus_receive8(pmdev); + break; + + case PMBUS_VOUT_MODE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT_MODE) { + pmdev->pages[index].vout_mode = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_COMMAND: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_command = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_TRIM: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_trim = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_CAL_OFFSET: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_cal_offset = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_MAX: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_max = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_MARGIN_HIGH: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT_MARGIN) { + pmdev->pages[index].vout_margin_high = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_MARGIN_LOW: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT_MARGIN) { + pmdev->pages[index].vout_margin_low = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_TRANSITION_RATE: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_transition_rate = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_DROOP: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_droop = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_SCALE_LOOP: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_scale_loop = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_SCALE_MONITOR: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_scale_monitor = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_POUT_MAX: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].pout_max = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VIN_ON: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmdev->pages[index].vin_on = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VIN_OFF: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmdev->pages[index].vin_off = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IOUT_CAL_GAIN: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT_GAIN) { + pmdev->pages[index].iout_cal_gain = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_OV_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_ov_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_OV_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_ov_fault_response = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_OV_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_ov_warn_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_UV_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_uv_warn_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_UV_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_uv_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VOUT_UV_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].vout_uv_fault_response = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IOUT_OC_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmdev->pages[index].iout_oc_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IOUT_OC_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmdev->pages[index].iout_oc_fault_response = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IOUT_OC_LV_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmdev->pages[index].iout_oc_lv_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IOUT_OC_LV_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmdev->pages[index].iout_oc_lv_fault_response + = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IOUT_OC_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmdev->pages[index].iout_oc_warn_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IOUT_UC_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmdev->pages[index].iout_uc_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IOUT_UC_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmdev->pages[index].iout_uc_fault_response = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_OT_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmdev->pages[index].ot_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_OT_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmdev->pages[index].ot_fault_response = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_OT_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmdev->pages[index].ot_warn_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_UT_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmdev->pages[index].ut_warn_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_UT_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmdev->pages[index].ut_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_UT_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmdev->pages[index].ut_fault_response = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VIN_OV_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmdev->pages[index].vin_ov_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VIN_OV_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmdev->pages[index].vin_ov_fault_response = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VIN_OV_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmdev->pages[index].vin_ov_warn_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VIN_UV_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmdev->pages[index].vin_uv_warn_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VIN_UV_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmdev->pages[index].vin_uv_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_VIN_UV_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VIN) { + pmdev->pages[index].vin_uv_fault_response = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IIN_OC_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IIN) { + pmdev->pages[index].iin_oc_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IIN_OC_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_IIN) { + pmdev->pages[index].iin_oc_fault_response = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_IIN_OC_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_IIN) { + pmdev->pages[index].iin_oc_warn_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_POUT_OP_FAULT_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].pout_op_fault_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_POUT_OP_FAULT_RESPONSE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].pout_op_fault_response = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_POUT_OP_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].pout_op_warn_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_PIN_OP_WARN_LIMIT: /* R/W word */ + if (pmdev->pages[index].page_flags & PB_HAS_PIN) { + pmdev->pages[index].pin_op_warn_limit = pmbus_receive16(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_STATUS_BYTE: /* R/W byte */ + pmdev->pages[index].status_word = pmbus_receive8(pmdev); + break; + + case PMBUS_STATUS_WORD: /* R/W word */ + pmdev->pages[index].status_word = pmbus_receive16(pmdev); + break; + + case PMBUS_STATUS_VOUT: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_VOUT) { + pmdev->pages[index].status_vout = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_STATUS_IOUT: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_IOUT) { + pmdev->pages[index].status_iout = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_STATUS_INPUT: /* R/W byte */ + pmdev->pages[index].status_input = pmbus_receive8(pmdev); + break; + + case PMBUS_STATUS_TEMPERATURE: /* R/W byte */ + if (pmdev->pages[index].page_flags & PB_HAS_TEMPERATURE) { + pmdev->pages[index].status_temperature = pmbus_receive8(pmdev); + } else { + goto passthrough; + } + break; + + case PMBUS_STATUS_CML: /* R/W byte */ + pmdev->pages[index].status_cml = pmbus_receive8(pmdev); + break; + + case PMBUS_STATUS_OTHER: /* R/W byte */ + pmdev->pages[index].status_other = pmbus_receive8(pmdev); + break; + + case PMBUS_PAGE_PLUS_READ: /* Block Read-only */ + case PMBUS_CAPABILITY: /* Read-Only byte */ + case PMBUS_COEFFICIENTS: /* Read-only block 5 bytes */ + case PMBUS_READ_EIN: /* Read-Only block 5 bytes */ + case PMBUS_READ_EOUT: /* Read-Only block 5 bytes */ + case PMBUS_READ_VIN: /* Read-Only word */ + case PMBUS_READ_IIN: /* Read-Only word */ + case PMBUS_READ_VCAP: /* Read-Only word */ + case PMBUS_READ_VOUT: /* Read-Only word */ + case PMBUS_READ_IOUT: /* Read-Only word */ + case PMBUS_READ_TEMPERATURE_1: /* Read-Only word */ + case PMBUS_READ_TEMPERATURE_2: /* Read-Only word */ + case PMBUS_READ_TEMPERATURE_3: /* Read-Only word */ + case PMBUS_READ_FAN_SPEED_1: /* Read-Only word */ + case PMBUS_READ_FAN_SPEED_2: /* Read-Only word */ + case PMBUS_READ_FAN_SPEED_3: /* Read-Only word */ + case PMBUS_READ_FAN_SPEED_4: /* Read-Only word */ + case PMBUS_READ_DUTY_CYCLE: /* Read-Only word */ + case PMBUS_READ_FREQUENCY: /* Read-Only word */ + case PMBUS_READ_POUT: /* Read-Only word */ + case PMBUS_READ_PIN: /* Read-Only word */ + case PMBUS_REVISION: /* Read-Only byte */ + case PMBUS_APP_PROFILE_SUPPORT: /* Read-Only block-read */ + case PMBUS_MFR_VIN_MIN: /* Read-Only word */ + case PMBUS_MFR_VIN_MAX: /* Read-Only word */ + case PMBUS_MFR_IIN_MAX: /* Read-Only word */ + case PMBUS_MFR_PIN_MAX: /* Read-Only word */ + case PMBUS_MFR_VOUT_MIN: /* Read-Only word */ + case PMBUS_MFR_VOUT_MAX: /* Read-Only word */ + case PMBUS_MFR_IOUT_MAX: /* Read-Only word */ + case PMBUS_MFR_POUT_MAX: /* Read-Only word */ + case PMBUS_MFR_TAMBIENT_MAX: /* Read-Only word */ + case PMBUS_MFR_TAMBIENT_MIN: /* Read-Only word */ + case PMBUS_MFR_EFFICIENCY_LL: /* Read-Only block 14 bytes */ + case PMBUS_MFR_EFFICIENCY_HL: /* Read-Only block 14 bytes */ + case PMBUS_MFR_PIN_ACCURACY: /* Read-Only byte */ + case PMBUS_IC_DEVICE_ID: /* Read-Only block-read */ + case PMBUS_IC_DEVICE_REV: /* Read-Only block-read */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: writing to read-only register 0x%02x\n", + __func__, pmdev->code); + break; + +passthrough: + /* Unimplimented registers get passed to the device */ + default: + if (pmdc->write_data) { + ret = pmdc->write_data(pmdev, buf, len); + } + break; + } + pmbus_check_limits(pmdev); + pmdev->in_buf_len = 0; + return ret; +} + +int pmbus_page_config(PMBusDevice *pmdev, uint8_t index, uint64_t flags) +{ + if (!pmdev->pages) { /* allocate memory for pages on first use */ + pmbus_pages_alloc(pmdev); + } + + /* The 0xFF page is special for commands applying to all pages */ + if (index == PB_ALL_PAGES) { + for (int i = 0; i < pmdev->num_pages; i++) { + pmdev->pages[i].page_flags = flags; + } + return 0; + } + + if (index > pmdev->num_pages - 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: index %u is out of range\n", + __func__, index); + return -1; + } + + pmdev->pages[index].page_flags = flags; + + return 0; +} + +/* TODO: include pmbus page info in vmstate */ +const VMStateDescription vmstate_pmbus_device = { + .name = TYPE_PMBUS_DEVICE, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_SMBUS_DEVICE(smb, PMBusDevice), + VMSTATE_UINT8(num_pages, PMBusDevice), + VMSTATE_UINT8(code, PMBusDevice), + VMSTATE_UINT8(page, PMBusDevice), + VMSTATE_UINT8(capability, PMBusDevice), + VMSTATE_END_OF_LIST() + } +}; + +static void pmbus_device_finalize(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + g_free(pmdev->pages); +} + +static void pmbus_device_class_init(ObjectClass *klass, void *data) +{ + SMBusDeviceClass *k = SMBUS_DEVICE_CLASS(klass); + + k->quick_cmd = pmbus_quick_cmd; + k->write_data = pmbus_write_data; + k->receive_byte = pmbus_receive_byte; +} + +static const TypeInfo pmbus_device_type_info = { + .name = TYPE_PMBUS_DEVICE, + .parent = TYPE_SMBUS_DEVICE, + .instance_size = sizeof(PMBusDevice), + .instance_finalize = pmbus_device_finalize, + .abstract = true, + .class_size = sizeof(PMBusDeviceClass), + .class_init = pmbus_device_class_init, +}; + +static void pmbus_device_register_types(void) +{ + type_register_static(&pmbus_device_type_info); +} + +type_init(pmbus_device_register_types) diff --git a/hw/i2c/ppc4xx_i2c.c b/hw/i2c/ppc4xx_i2c.c new file mode 100644 index 000000000..75d50f151 --- /dev/null +++ b/hw/i2c/ppc4xx_i2c.c @@ -0,0 +1,377 @@ +/* + * PPC4xx I2C controller emulation + * + * Documentation: PPC405GP User's Manual, Chapter 22. IIC Bus Interface + * + * Copyright (c) 2007 Jocelyn Mayer + * Copyright (c) 2012 François Revol + * Copyright (c) 2016-2018 BALATON Zoltan + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/i2c/ppc4xx_i2c.h" +#include "hw/irq.h" + +#define PPC4xx_I2C_MEM_SIZE 18 + +enum { + IIC_MDBUF = 0, + /* IIC_SDBUF = 2, */ + IIC_LMADR = 4, + IIC_HMADR, + IIC_CNTL, + IIC_MDCNTL, + IIC_STS, + IIC_EXTSTS, + IIC_LSADR, + IIC_HSADR, + IIC_CLKDIV, + IIC_INTRMSK, + IIC_XFRCNT, + IIC_XTCNTLSS, + IIC_DIRECTCNTL + /* IIC_INTR */ +}; + +#define IIC_CNTL_PT (1 << 0) +#define IIC_CNTL_READ (1 << 1) +#define IIC_CNTL_CHT (1 << 2) +#define IIC_CNTL_RPST (1 << 3) +#define IIC_CNTL_AMD (1 << 6) +#define IIC_CNTL_HMT (1 << 7) + +#define IIC_MDCNTL_EINT (1 << 2) +#define IIC_MDCNTL_ESM (1 << 3) +#define IIC_MDCNTL_FMDB (1 << 6) + +#define IIC_STS_PT (1 << 0) +#define IIC_STS_IRQA (1 << 1) +#define IIC_STS_ERR (1 << 2) +#define IIC_STS_MDBF (1 << 4) +#define IIC_STS_MDBS (1 << 5) + +#define IIC_EXTSTS_XFRA (1 << 0) +#define IIC_EXTSTS_BCS_FREE (4 << 4) +#define IIC_EXTSTS_BCS_BUSY (5 << 4) + +#define IIC_INTRMSK_EIMTC (1 << 0) +#define IIC_INTRMSK_EITA (1 << 1) +#define IIC_INTRMSK_EIIC (1 << 2) +#define IIC_INTRMSK_EIHE (1 << 3) + +#define IIC_XTCNTLSS_SRST (1 << 0) + +#define IIC_DIRECTCNTL_SDAC (1 << 3) +#define IIC_DIRECTCNTL_SCLC (1 << 2) +#define IIC_DIRECTCNTL_MSDA (1 << 1) +#define IIC_DIRECTCNTL_MSCL (1 << 0) + +static void ppc4xx_i2c_reset(DeviceState *s) +{ + PPC4xxI2CState *i2c = PPC4xx_I2C(s); + + i2c->mdidx = -1; + memset(i2c->mdata, 0, ARRAY_SIZE(i2c->mdata)); + /* [hl][ms]addr are not affected by reset */ + i2c->cntl = 0; + i2c->mdcntl = 0; + i2c->sts = 0; + i2c->extsts = IIC_EXTSTS_BCS_FREE; + i2c->clkdiv = 0; + i2c->intrmsk = 0; + i2c->xfrcnt = 0; + i2c->xtcntlss = 0; + i2c->directcntl = 0xf; /* all non-reserved bits set */ +} + +static uint64_t ppc4xx_i2c_readb(void *opaque, hwaddr addr, unsigned int size) +{ + PPC4xxI2CState *i2c = PPC4xx_I2C(opaque); + uint64_t ret; + int i; + + switch (addr) { + case IIC_MDBUF: + if (i2c->mdidx < 0) { + ret = 0xff; + break; + } + ret = i2c->mdata[0]; + if (i2c->mdidx == 3) { + i2c->sts &= ~IIC_STS_MDBF; + } else if (i2c->mdidx == 0) { + i2c->sts &= ~IIC_STS_MDBS; + } + for (i = 0; i < i2c->mdidx; i++) { + i2c->mdata[i] = i2c->mdata[i + 1]; + } + if (i2c->mdidx >= 0) { + i2c->mdidx--; + } + break; + case IIC_LMADR: + ret = i2c->lmadr; + break; + case IIC_HMADR: + ret = i2c->hmadr; + break; + case IIC_CNTL: + ret = i2c->cntl; + break; + case IIC_MDCNTL: + ret = i2c->mdcntl; + break; + case IIC_STS: + ret = i2c->sts; + break; + case IIC_EXTSTS: + ret = i2c_bus_busy(i2c->bus) ? + IIC_EXTSTS_BCS_BUSY : IIC_EXTSTS_BCS_FREE; + break; + case IIC_LSADR: + ret = i2c->lsadr; + break; + case IIC_HSADR: + ret = i2c->hsadr; + break; + case IIC_CLKDIV: + ret = i2c->clkdiv; + break; + case IIC_INTRMSK: + ret = i2c->intrmsk; + break; + case IIC_XFRCNT: + ret = i2c->xfrcnt; + break; + case IIC_XTCNTLSS: + ret = i2c->xtcntlss; + break; + case IIC_DIRECTCNTL: + ret = i2c->directcntl; + break; + default: + if (addr < PPC4xx_I2C_MEM_SIZE) { + qemu_log_mask(LOG_UNIMP, "%s: Unimplemented register 0x%" + HWADDR_PRIx "\n", __func__, addr); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address 0x%" + HWADDR_PRIx "\n", __func__, addr); + } + ret = 0; + break; + } + return ret; +} + +static void ppc4xx_i2c_writeb(void *opaque, hwaddr addr, uint64_t value, + unsigned int size) +{ + PPC4xxI2CState *i2c = opaque; + + switch (addr) { + case IIC_MDBUF: + if (i2c->mdidx >= 3) { + break; + } + i2c->mdata[++i2c->mdidx] = value; + if (i2c->mdidx == 3) { + i2c->sts |= IIC_STS_MDBF; + } else if (i2c->mdidx == 0) { + i2c->sts |= IIC_STS_MDBS; + } + break; + case IIC_LMADR: + i2c->lmadr = value; + break; + case IIC_HMADR: + i2c->hmadr = value; + break; + case IIC_CNTL: + i2c->cntl = value & ~IIC_CNTL_PT; + if (value & IIC_CNTL_AMD) { + qemu_log_mask(LOG_UNIMP, "%s: only 7 bit addresses supported\n", + __func__); + } + if (value & IIC_CNTL_HMT && i2c_bus_busy(i2c->bus)) { + i2c_end_transfer(i2c->bus); + if (i2c->mdcntl & IIC_MDCNTL_EINT && + i2c->intrmsk & IIC_INTRMSK_EIHE) { + i2c->sts |= IIC_STS_IRQA; + qemu_irq_raise(i2c->irq); + } + } else if (value & IIC_CNTL_PT) { + int recv = (value & IIC_CNTL_READ) >> 1; + int tct = value >> 4 & 3; + int i; + + if (recv && (i2c->lmadr >> 1) >= 0x50 && (i2c->lmadr >> 1) < 0x58) { + /* smbus emulation does not like multi byte reads w/o restart */ + value |= IIC_CNTL_RPST; + } + + for (i = 0; i <= tct; i++) { + if (!i2c_bus_busy(i2c->bus)) { + i2c->extsts = IIC_EXTSTS_BCS_FREE; + if (i2c_start_transfer(i2c->bus, i2c->lmadr >> 1, recv)) { + i2c->sts |= IIC_STS_ERR; + i2c->extsts |= IIC_EXTSTS_XFRA; + break; + } else { + i2c->sts &= ~IIC_STS_ERR; + } + } + if (!(i2c->sts & IIC_STS_ERR)) { + if (recv) { + i2c->mdata[i] = i2c_recv(i2c->bus); + } else if (i2c_send(i2c->bus, i2c->mdata[i]) < 0) { + i2c->sts |= IIC_STS_ERR; + i2c->extsts |= IIC_EXTSTS_XFRA; + break; + } + } + if (value & IIC_CNTL_RPST || !(value & IIC_CNTL_CHT)) { + i2c_end_transfer(i2c->bus); + } + } + i2c->xfrcnt = i; + i2c->mdidx = i - 1; + if (recv && i2c->mdidx >= 0) { + i2c->sts |= IIC_STS_MDBS; + } + if (recv && i2c->mdidx == 3) { + i2c->sts |= IIC_STS_MDBF; + } + if (i && i2c->mdcntl & IIC_MDCNTL_EINT && + i2c->intrmsk & IIC_INTRMSK_EIMTC) { + i2c->sts |= IIC_STS_IRQA; + qemu_irq_raise(i2c->irq); + } + } + break; + case IIC_MDCNTL: + i2c->mdcntl = value & 0x3d; + if (value & IIC_MDCNTL_ESM) { + qemu_log_mask(LOG_UNIMP, "%s: slave mode not implemented\n", + __func__); + } + if (value & IIC_MDCNTL_FMDB) { + i2c->mdidx = -1; + memset(i2c->mdata, 0, ARRAY_SIZE(i2c->mdata)); + i2c->sts &= ~(IIC_STS_MDBF | IIC_STS_MDBS); + } + break; + case IIC_STS: + i2c->sts &= ~(value & 0x0a); + if (value & IIC_STS_IRQA && i2c->mdcntl & IIC_MDCNTL_EINT) { + qemu_irq_lower(i2c->irq); + } + break; + case IIC_EXTSTS: + i2c->extsts &= ~(value & 0x8f); + break; + case IIC_LSADR: + i2c->lsadr = value; + break; + case IIC_HSADR: + i2c->hsadr = value; + break; + case IIC_CLKDIV: + i2c->clkdiv = value; + break; + case IIC_INTRMSK: + i2c->intrmsk = value; + break; + case IIC_XFRCNT: + i2c->xfrcnt = value & 0x77; + break; + case IIC_XTCNTLSS: + i2c->xtcntlss &= ~(value & 0xf0); + if (value & IIC_XTCNTLSS_SRST) { + /* Is it actually a full reset? U-Boot sets some regs before */ + ppc4xx_i2c_reset(DEVICE(i2c)); + break; + } + break; + case IIC_DIRECTCNTL: + i2c->directcntl = value & (IIC_DIRECTCNTL_SDAC & IIC_DIRECTCNTL_SCLC); + i2c->directcntl |= (value & IIC_DIRECTCNTL_SCLC ? 1 : 0); + bitbang_i2c_set(&i2c->bitbang, BITBANG_I2C_SCL, + i2c->directcntl & IIC_DIRECTCNTL_MSCL); + i2c->directcntl |= bitbang_i2c_set(&i2c->bitbang, BITBANG_I2C_SDA, + (value & IIC_DIRECTCNTL_SDAC) != 0) << 1; + break; + default: + if (addr < PPC4xx_I2C_MEM_SIZE) { + qemu_log_mask(LOG_UNIMP, "%s: Unimplemented register 0x%" + HWADDR_PRIx "\n", __func__, addr); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address 0x%" + HWADDR_PRIx "\n", __func__, addr); + } + break; + } +} + +static const MemoryRegionOps ppc4xx_i2c_ops = { + .read = ppc4xx_i2c_readb, + .write = ppc4xx_i2c_writeb, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void ppc4xx_i2c_init(Object *o) +{ + PPC4xxI2CState *s = PPC4xx_I2C(o); + + memory_region_init_io(&s->iomem, OBJECT(s), &ppc4xx_i2c_ops, s, + TYPE_PPC4xx_I2C, PPC4xx_I2C_MEM_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); + s->bus = i2c_init_bus(DEVICE(s), "i2c"); + bitbang_i2c_init(&s->bitbang, s->bus); +} + +static void ppc4xx_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = ppc4xx_i2c_reset; +} + +static const TypeInfo ppc4xx_i2c_type_info = { + .name = TYPE_PPC4xx_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PPC4xxI2CState), + .instance_init = ppc4xx_i2c_init, + .class_init = ppc4xx_i2c_class_init, +}; + +static void ppc4xx_i2c_register_types(void) +{ + type_register_static(&ppc4xx_i2c_type_info); +} + +type_init(ppc4xx_i2c_register_types) diff --git a/hw/i2c/smbus_eeprom.c b/hw/i2c/smbus_eeprom.c new file mode 100644 index 000000000..12c5741f3 --- /dev/null +++ b/hw/i2c/smbus_eeprom.c @@ -0,0 +1,300 @@ +/* + * QEMU SMBus EEPROM device + * + * Copyright (c) 2007 Arastra, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/i2c/i2c.h" +#include "hw/i2c/smbus_slave.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/i2c/smbus_eeprom.h" +#include "qom/object.h" + +//#define DEBUG + +#define TYPE_SMBUS_EEPROM "smbus-eeprom" + +OBJECT_DECLARE_SIMPLE_TYPE(SMBusEEPROMDevice, SMBUS_EEPROM) + +#define SMBUS_EEPROM_SIZE 256 + +struct SMBusEEPROMDevice { + SMBusDevice smbusdev; + uint8_t data[SMBUS_EEPROM_SIZE]; + uint8_t *init_data; + uint8_t offset; + bool accessed; +}; + +static uint8_t eeprom_receive_byte(SMBusDevice *dev) +{ + SMBusEEPROMDevice *eeprom = SMBUS_EEPROM(dev); + uint8_t *data = eeprom->data; + uint8_t val = data[eeprom->offset++]; + + eeprom->accessed = true; +#ifdef DEBUG + printf("eeprom_receive_byte: addr=0x%02x val=0x%02x\n", + dev->i2c.address, val); +#endif + return val; +} + +static int eeprom_write_data(SMBusDevice *dev, uint8_t *buf, uint8_t len) +{ + SMBusEEPROMDevice *eeprom = SMBUS_EEPROM(dev); + uint8_t *data = eeprom->data; + + eeprom->accessed = true; +#ifdef DEBUG + printf("eeprom_write_byte: addr=0x%02x cmd=0x%02x val=0x%02x\n", + dev->i2c.address, buf[0], buf[1]); +#endif + /* len is guaranteed to be > 0 */ + eeprom->offset = buf[0]; + buf++; + len--; + + for (; len > 0; len--) { + data[eeprom->offset] = *buf++; + eeprom->offset = (eeprom->offset + 1) % SMBUS_EEPROM_SIZE; + } + + return 0; +} + +static bool smbus_eeprom_vmstate_needed(void *opaque) +{ + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + SMBusEEPROMDevice *eeprom = opaque; + + return (eeprom->accessed || smbus_vmstate_needed(&eeprom->smbusdev)) && + !mc->smbus_no_migration_support; +} + +static const VMStateDescription vmstate_smbus_eeprom = { + .name = "smbus-eeprom", + .version_id = 1, + .minimum_version_id = 1, + .needed = smbus_eeprom_vmstate_needed, + .fields = (VMStateField[]) { + VMSTATE_SMBUS_DEVICE(smbusdev, SMBusEEPROMDevice), + VMSTATE_UINT8_ARRAY(data, SMBusEEPROMDevice, SMBUS_EEPROM_SIZE), + VMSTATE_UINT8(offset, SMBusEEPROMDevice), + VMSTATE_BOOL(accessed, SMBusEEPROMDevice), + VMSTATE_END_OF_LIST() + } +}; + +/* + * Reset the EEPROM contents to the initial state on a reset. This + * isn't really how an EEPROM works, of course, but the general + * principle of QEMU is to restore function on reset to what it would + * be if QEMU was stopped and started. + * + * The proper thing to do would be to have a backing blockdev to hold + * the contents and restore that on startup, and not do this on reset. + * But until that time, act as if we had been stopped and restarted. + */ +static void smbus_eeprom_reset(DeviceState *dev) +{ + SMBusEEPROMDevice *eeprom = SMBUS_EEPROM(dev); + + memcpy(eeprom->data, eeprom->init_data, SMBUS_EEPROM_SIZE); + eeprom->offset = 0; +} + +static void smbus_eeprom_realize(DeviceState *dev, Error **errp) +{ + SMBusEEPROMDevice *eeprom = SMBUS_EEPROM(dev); + + smbus_eeprom_reset(dev); + if (eeprom->init_data == NULL) { + error_setg(errp, "init_data cannot be NULL"); + } +} + +static void smbus_eeprom_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SMBusDeviceClass *sc = SMBUS_DEVICE_CLASS(klass); + + dc->realize = smbus_eeprom_realize; + dc->reset = smbus_eeprom_reset; + sc->receive_byte = eeprom_receive_byte; + sc->write_data = eeprom_write_data; + dc->vmsd = &vmstate_smbus_eeprom; + /* Reason: init_data */ + dc->user_creatable = false; +} + +static const TypeInfo smbus_eeprom_info = { + .name = TYPE_SMBUS_EEPROM, + .parent = TYPE_SMBUS_DEVICE, + .instance_size = sizeof(SMBusEEPROMDevice), + .class_init = smbus_eeprom_class_initfn, +}; + +static void smbus_eeprom_register_types(void) +{ + type_register_static(&smbus_eeprom_info); +} + +type_init(smbus_eeprom_register_types) + +void smbus_eeprom_init_one(I2CBus *smbus, uint8_t address, uint8_t *eeprom_buf) +{ + DeviceState *dev; + + dev = qdev_new(TYPE_SMBUS_EEPROM); + qdev_prop_set_uint8(dev, "address", address); + /* FIXME: use an array of byte or block backend property? */ + SMBUS_EEPROM(dev)->init_data = eeprom_buf; + qdev_realize_and_unref(dev, (BusState *)smbus, &error_fatal); +} + +void smbus_eeprom_init(I2CBus *smbus, int nb_eeprom, + const uint8_t *eeprom_spd, int eeprom_spd_size) +{ + int i; + /* XXX: make this persistent */ + + assert(nb_eeprom <= 8); + uint8_t *eeprom_buf = g_malloc0(8 * SMBUS_EEPROM_SIZE); + if (eeprom_spd_size > 0) { + memcpy(eeprom_buf, eeprom_spd, eeprom_spd_size); + } + + for (i = 0; i < nb_eeprom; i++) { + smbus_eeprom_init_one(smbus, 0x50 + i, + eeprom_buf + (i * SMBUS_EEPROM_SIZE)); + } +} + +/* Generate SDRAM SPD EEPROM data describing a module of type and size */ +uint8_t *spd_data_generate(enum sdram_type type, ram_addr_t ram_size) +{ + uint8_t *spd; + uint8_t nbanks; + uint16_t density; + uint32_t size; + int min_log2, max_log2, sz_log2; + int i; + + switch (type) { + case SDR: + min_log2 = 2; + max_log2 = 9; + break; + case DDR: + min_log2 = 5; + max_log2 = 12; + break; + case DDR2: + min_log2 = 7; + max_log2 = 14; + break; + default: + g_assert_not_reached(); + } + size = ram_size >> 20; /* work in terms of megabytes */ + sz_log2 = 31 - clz32(size); + size = 1U << sz_log2; + assert(ram_size == size * MiB); + assert(sz_log2 >= min_log2); + + nbanks = 1; + while (sz_log2 > max_log2 && nbanks < 8) { + sz_log2--; + nbanks *= 2; + } + + assert(size == (1ULL << sz_log2) * nbanks); + + /* split to 2 banks if possible to avoid a bug in MIPS Malta firmware */ + if (nbanks == 1 && sz_log2 > min_log2) { + sz_log2--; + nbanks++; + } + + density = 1ULL << (sz_log2 - 2); + switch (type) { + case DDR2: + density = (density & 0xe0) | (density >> 8 & 0x1f); + break; + case DDR: + density = (density & 0xf8) | (density >> 8 & 0x07); + break; + case SDR: + default: + density &= 0xff; + break; + } + + spd = g_malloc0(256); + spd[0] = 128; /* data bytes in EEPROM */ + spd[1] = 8; /* log2 size of EEPROM */ + spd[2] = type; + spd[3] = 13; /* row address bits */ + spd[4] = 10; /* column address bits */ + spd[5] = (type == DDR2 ? nbanks - 1 : nbanks); + spd[6] = 64; /* module data width */ + /* reserved / data width high */ + spd[8] = 4; /* interface voltage level */ + spd[9] = 0x25; /* highest CAS latency */ + spd[10] = 1; /* access time */ + /* DIMM configuration 0 = non-ECC */ + spd[12] = 0x82; /* refresh requirements */ + spd[13] = 8; /* primary SDRAM width */ + /* ECC SDRAM width */ + spd[15] = (type == DDR2 ? 0 : 1); /* reserved / delay for random col rd */ + spd[16] = 12; /* burst lengths supported */ + spd[17] = 4; /* banks per SDRAM device */ + spd[18] = 12; /* ~CAS latencies supported */ + spd[19] = (type == DDR2 ? 0 : 1); /* reserved / ~CS latencies supported */ + spd[20] = 2; /* DIMM type / ~WE latencies */ + spd[21] = (type < DDR2 ? 0x20 : 0); /* module features */ + /* memory chip features */ + spd[23] = 0x12; /* clock cycle time @ medium CAS latency */ + /* data access time */ + /* clock cycle time @ short CAS latency */ + /* data access time */ + spd[27] = 20; /* min. row precharge time */ + spd[28] = 15; /* min. row active row delay */ + spd[29] = 20; /* min. ~RAS to ~CAS delay */ + spd[30] = 45; /* min. active to precharge time */ + spd[31] = density; + spd[32] = 20; /* addr/cmd setup time */ + spd[33] = 8; /* addr/cmd hold time */ + spd[34] = 20; /* data input setup time */ + spd[35] = 8; /* data input hold time */ + + /* checksum */ + for (i = 0; i < 63; i++) { + spd[63] += spd[i]; + } + return spd; +} diff --git a/hw/i2c/smbus_ich9.c b/hw/i2c/smbus_ich9.c new file mode 100644 index 000000000..44dd5653b --- /dev/null +++ b/hw/i2c/smbus_ich9.c @@ -0,0 +1,155 @@ +/* + * ACPI implementation + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2009 Isaku Yamahata + * VA Linux Systems Japan K.K. + * Copyright (C) 2012 Jason Baron + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + */ + +#include "qemu/osdep.h" +#include "qemu/range.h" +#include "hw/i2c/pm_smbus.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +#include "hw/i386/ich9.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(ICH9SMBState, ICH9_SMB_DEVICE) + +struct ICH9SMBState { + PCIDevice dev; + + bool irq_enabled; + + PMSMBus smb; +}; + +static bool ich9_vmstate_need_smbus(void *opaque, int version_id) +{ + return pm_smbus_vmstate_needed(); +} + +static const VMStateDescription vmstate_ich9_smbus = { + .name = "ich9_smb", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, ICH9SMBState), + VMSTATE_BOOL_TEST(irq_enabled, ICH9SMBState, ich9_vmstate_need_smbus), + VMSTATE_STRUCT_TEST(smb, ICH9SMBState, ich9_vmstate_need_smbus, 1, + pmsmb_vmstate, PMSMBus), + VMSTATE_END_OF_LIST() + } +}; + +static void ich9_smbus_write_config(PCIDevice *d, uint32_t address, + uint32_t val, int len) +{ + ICH9SMBState *s = ICH9_SMB_DEVICE(d); + + pci_default_write_config(d, address, val, len); + if (range_covers_byte(address, len, ICH9_SMB_HOSTC)) { + uint8_t hostc = s->dev.config[ICH9_SMB_HOSTC]; + if (hostc & ICH9_SMB_HOSTC_HST_EN) { + memory_region_set_enabled(&s->smb.io, true); + } else { + memory_region_set_enabled(&s->smb.io, false); + } + s->smb.i2c_enable = (hostc & ICH9_SMB_HOSTC_I2C_EN) != 0; + if (hostc & ICH9_SMB_HOSTC_SSRESET) { + s->smb.reset(&s->smb); + s->dev.config[ICH9_SMB_HOSTC] &= ~ICH9_SMB_HOSTC_SSRESET; + } + } +} + +static void ich9_smbus_realize(PCIDevice *d, Error **errp) +{ + ICH9SMBState *s = ICH9_SMB_DEVICE(d); + + /* TODO? D31IP.SMIP in chipset configuration space */ + pci_config_set_interrupt_pin(d->config, 0x01); /* interrupt pin 1 */ + + pci_set_byte(d->config + ICH9_SMB_HOSTC, 0); + /* TODO bar0, bar1: 64bit BAR support*/ + + pm_smbus_init(&d->qdev, &s->smb, false); + pci_register_bar(d, ICH9_SMB_SMB_BASE_BAR, PCI_BASE_ADDRESS_SPACE_IO, + &s->smb.io); +} + +static void ich9_smb_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_ICH9_6; + k->revision = ICH9_A2_SMB_REVISION; + k->class_id = PCI_CLASS_SERIAL_SMBUS; + dc->vmsd = &vmstate_ich9_smbus; + dc->desc = "ICH9 SMBUS Bridge"; + k->realize = ich9_smbus_realize; + k->config_write = ich9_smbus_write_config; + /* + * Reason: part of ICH9 southbridge, needs to be wired up by + * pc_q35_init() + */ + dc->user_creatable = false; +} + +static void ich9_smb_set_irq(PMSMBus *pmsmb, bool enabled) +{ + ICH9SMBState *s = pmsmb->opaque; + + if (enabled == s->irq_enabled) { + return; + } + + s->irq_enabled = enabled; + pci_set_irq(&s->dev, enabled); +} + +I2CBus *ich9_smb_init(PCIBus *bus, int devfn, uint32_t smb_io_base) +{ + PCIDevice *d = + pci_create_simple_multifunction(bus, devfn, true, TYPE_ICH9_SMB_DEVICE); + ICH9SMBState *s = ICH9_SMB_DEVICE(d); + s->smb.set_irq = ich9_smb_set_irq; + s->smb.opaque = s; + return s->smb.smbus; +} + +static const TypeInfo ich9_smb_info = { + .name = TYPE_ICH9_SMB_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(ICH9SMBState), + .class_init = ich9_smb_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void ich9_smb_register(void) +{ + type_register_static(&ich9_smb_info); +} + +type_init(ich9_smb_register); diff --git a/hw/i2c/smbus_master.c b/hw/i2c/smbus_master.c new file mode 100644 index 000000000..6a53c34e7 --- /dev/null +++ b/hw/i2c/smbus_master.c @@ -0,0 +1,164 @@ +/* + * QEMU SMBus host (master) emulation. + * + * This code emulates SMBus transactions from the master point of view, + * it runs the individual I2C transaction to do the SMBus protocol + * over I2C. + * + * Copyright (c) 2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the LGPL. + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "hw/i2c/smbus_master.h" + +/* Master device commands. */ +int smbus_quick_command(I2CBus *bus, uint8_t addr, int read) +{ + if (i2c_start_transfer(bus, addr, read)) { + return -1; + } + i2c_end_transfer(bus); + return 0; +} + +int smbus_receive_byte(I2CBus *bus, uint8_t addr) +{ + uint8_t data; + + if (i2c_start_recv(bus, addr)) { + return -1; + } + data = i2c_recv(bus); + i2c_nack(bus); + i2c_end_transfer(bus); + return data; +} + +int smbus_send_byte(I2CBus *bus, uint8_t addr, uint8_t data) +{ + if (i2c_start_send(bus, addr)) { + return -1; + } + i2c_send(bus, data); + i2c_end_transfer(bus); + return 0; +} + +int smbus_read_byte(I2CBus *bus, uint8_t addr, uint8_t command) +{ + uint8_t data; + if (i2c_start_send(bus, addr)) { + return -1; + } + i2c_send(bus, command); + if (i2c_start_recv(bus, addr)) { + i2c_end_transfer(bus); + return -1; + } + data = i2c_recv(bus); + i2c_nack(bus); + i2c_end_transfer(bus); + return data; +} + +int smbus_write_byte(I2CBus *bus, uint8_t addr, uint8_t command, uint8_t data) +{ + if (i2c_start_send(bus, addr)) { + return -1; + } + i2c_send(bus, command); + i2c_send(bus, data); + i2c_end_transfer(bus); + return 0; +} + +int smbus_read_word(I2CBus *bus, uint8_t addr, uint8_t command) +{ + uint16_t data; + if (i2c_start_send(bus, addr)) { + return -1; + } + i2c_send(bus, command); + if (i2c_start_recv(bus, addr)) { + i2c_end_transfer(bus); + return -1; + } + data = i2c_recv(bus); + data |= i2c_recv(bus) << 8; + i2c_nack(bus); + i2c_end_transfer(bus); + return data; +} + +int smbus_write_word(I2CBus *bus, uint8_t addr, uint8_t command, uint16_t data) +{ + if (i2c_start_send(bus, addr)) { + return -1; + } + i2c_send(bus, command); + i2c_send(bus, data & 0xff); + i2c_send(bus, data >> 8); + i2c_end_transfer(bus); + return 0; +} + +int smbus_read_block(I2CBus *bus, uint8_t addr, uint8_t command, uint8_t *data, + int len, bool recv_len, bool send_cmd) +{ + int rlen; + int i; + + if (send_cmd) { + if (i2c_start_send(bus, addr)) { + return -1; + } + i2c_send(bus, command); + } + if (i2c_start_recv(bus, addr)) { + if (send_cmd) { + i2c_end_transfer(bus); + } + return -1; + } + if (recv_len) { + rlen = i2c_recv(bus); + } else { + rlen = len; + } + if (rlen > len) { + rlen = 0; + } + for (i = 0; i < rlen; i++) { + data[i] = i2c_recv(bus); + } + i2c_nack(bus); + i2c_end_transfer(bus); + return rlen; +} + +int smbus_write_block(I2CBus *bus, uint8_t addr, uint8_t command, uint8_t *data, + int len, bool send_len) +{ + int i; + + if (len > 32) { + len = 32; + } + + if (i2c_start_send(bus, addr)) { + return -1; + } + i2c_send(bus, command); + if (send_len) { + i2c_send(bus, len); + } + for (i = 0; i < len; i++) { + i2c_send(bus, data[i]); + } + i2c_end_transfer(bus); + return 0; +} diff --git a/hw/i2c/smbus_slave.c b/hw/i2c/smbus_slave.c new file mode 100644 index 000000000..5d10e2766 --- /dev/null +++ b/hw/i2c/smbus_slave.c @@ -0,0 +1,237 @@ +/* + * QEMU SMBus device emulation. + * + * This code is a helper for SMBus device emulation. It implements an + * I2C device inteface and runs the SMBus protocol from the device + * point of view and maps those to simple calls to emulate. + * + * Copyright (c) 2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the LGPL. + */ + +/* TODO: Implement PEC. */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "hw/i2c/smbus_slave.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +//#define DEBUG_SMBUS 1 + +#ifdef DEBUG_SMBUS +#define DPRINTF(fmt, ...) \ +do { printf("smbus(%02x): " fmt , dev->i2c.address, ## __VA_ARGS__); } while (0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "smbus: error: " fmt , ## __VA_ARGS__); exit(1);} while (0) +#else +#define DPRINTF(fmt, ...) do {} while(0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "smbus: error: " fmt , ## __VA_ARGS__);} while (0) +#endif + +enum { + SMBUS_IDLE, + SMBUS_WRITE_DATA, + SMBUS_READ_DATA, + SMBUS_DONE, + SMBUS_CONFUSED = -1 +}; + +static void smbus_do_quick_cmd(SMBusDevice *dev, int recv) +{ + SMBusDeviceClass *sc = SMBUS_DEVICE_GET_CLASS(dev); + + DPRINTF("Quick Command %d\n", recv); + if (sc->quick_cmd) { + sc->quick_cmd(dev, recv); + } +} + +static void smbus_do_write(SMBusDevice *dev) +{ + SMBusDeviceClass *sc = SMBUS_DEVICE_GET_CLASS(dev); + + DPRINTF("Command %d len %d\n", dev->data_buf[0], dev->data_len); + if (sc->write_data) { + sc->write_data(dev, dev->data_buf, dev->data_len); + } +} + +static int smbus_i2c_event(I2CSlave *s, enum i2c_event event) +{ + SMBusDevice *dev = SMBUS_DEVICE(s); + + switch (event) { + case I2C_START_SEND: + switch (dev->mode) { + case SMBUS_IDLE: + DPRINTF("Incoming data\n"); + dev->mode = SMBUS_WRITE_DATA; + break; + + default: + BADF("Unexpected send start condition in state %d\n", dev->mode); + dev->mode = SMBUS_CONFUSED; + break; + } + break; + + case I2C_START_RECV: + switch (dev->mode) { + case SMBUS_IDLE: + DPRINTF("Read mode\n"); + dev->mode = SMBUS_READ_DATA; + break; + + case SMBUS_WRITE_DATA: + if (dev->data_len == 0) { + BADF("Read after write with no data\n"); + dev->mode = SMBUS_CONFUSED; + } else { + smbus_do_write(dev); + DPRINTF("Read mode\n"); + dev->mode = SMBUS_READ_DATA; + } + break; + + default: + BADF("Unexpected recv start condition in state %d\n", dev->mode); + dev->mode = SMBUS_CONFUSED; + break; + } + break; + + case I2C_FINISH: + if (dev->data_len == 0) { + if (dev->mode == SMBUS_WRITE_DATA || dev->mode == SMBUS_READ_DATA) { + smbus_do_quick_cmd(dev, dev->mode == SMBUS_READ_DATA); + } + } else { + switch (dev->mode) { + case SMBUS_WRITE_DATA: + smbus_do_write(dev); + break; + + case SMBUS_READ_DATA: + BADF("Unexpected stop during receive\n"); + break; + + default: + /* Nothing to do. */ + break; + } + } + dev->mode = SMBUS_IDLE; + dev->data_len = 0; + break; + + case I2C_NACK: + switch (dev->mode) { + case SMBUS_DONE: + /* Nothing to do. */ + break; + + case SMBUS_READ_DATA: + dev->mode = SMBUS_DONE; + break; + + default: + BADF("Unexpected NACK in state %d\n", dev->mode); + dev->mode = SMBUS_CONFUSED; + break; + } + } + + return 0; +} + +static uint8_t smbus_i2c_recv(I2CSlave *s) +{ + SMBusDevice *dev = SMBUS_DEVICE(s); + SMBusDeviceClass *sc = SMBUS_DEVICE_GET_CLASS(dev); + uint8_t ret = 0xff; + + switch (dev->mode) { + case SMBUS_READ_DATA: + if (sc->receive_byte) { + ret = sc->receive_byte(dev); + } + DPRINTF("Read data %02x\n", ret); + break; + + default: + BADF("Unexpected read in state %d\n", dev->mode); + dev->mode = SMBUS_CONFUSED; + break; + } + + return ret; +} + +static int smbus_i2c_send(I2CSlave *s, uint8_t data) +{ + SMBusDevice *dev = SMBUS_DEVICE(s); + + switch (dev->mode) { + case SMBUS_WRITE_DATA: + DPRINTF("Write data %02x\n", data); + if (dev->data_len >= sizeof(dev->data_buf)) { + BADF("Too many bytes sent\n"); + } else { + dev->data_buf[dev->data_len++] = data; + } + break; + + default: + BADF("Unexpected write in state %d\n", dev->mode); + break; + } + + return 0; +} + +static void smbus_device_class_init(ObjectClass *klass, void *data) +{ + I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); + + sc->event = smbus_i2c_event; + sc->recv = smbus_i2c_recv; + sc->send = smbus_i2c_send; +} + +bool smbus_vmstate_needed(SMBusDevice *dev) +{ + return dev->mode != SMBUS_IDLE; +} + +const VMStateDescription vmstate_smbus_device = { + .name = TYPE_SMBUS_DEVICE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_I2C_SLAVE(i2c, SMBusDevice), + VMSTATE_INT32(mode, SMBusDevice), + VMSTATE_INT32(data_len, SMBusDevice), + VMSTATE_UINT8_ARRAY(data_buf, SMBusDevice, SMBUS_DATA_MAX_LEN), + VMSTATE_END_OF_LIST() + } +}; + +static const TypeInfo smbus_device_type_info = { + .name = TYPE_SMBUS_DEVICE, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(SMBusDevice), + .abstract = true, + .class_size = sizeof(SMBusDeviceClass), + .class_init = smbus_device_class_init, +}; + +static void smbus_device_register_types(void) +{ + type_register_static(&smbus_device_type_info); +} + +type_init(smbus_device_register_types) diff --git a/hw/i2c/trace-events b/hw/i2c/trace-events new file mode 100644 index 000000000..7d8907c1e --- /dev/null +++ b/hw/i2c/trace-events @@ -0,0 +1,33 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# core.c + +i2c_event(const char *event, uint8_t address) "%s(addr:0x%02x)" +i2c_send(uint8_t address, uint8_t data) "send(addr:0x%02x) data:0x%02x" +i2c_recv(uint8_t address, uint8_t data) "recv(addr:0x%02x) data:0x%02x" + +# aspeed_i2c.c + +aspeed_i2c_bus_cmd(uint32_t cmd, const char *cmd_flags, uint32_t count, uint32_t intr_status) "handling cmd=0x%x %s count=%d intr=0x%x" +aspeed_i2c_bus_raise_interrupt(uint32_t intr_status, const char *str1, const char *str2, const char *str3, const char *str4, const char *str5) "handled intr=0x%x %s%s%s%s%s" +aspeed_i2c_bus_read(uint32_t busid, uint64_t offset, unsigned size, uint64_t value) "bus[%d]: To 0x%" PRIx64 " of size %u: 0x%" PRIx64 +aspeed_i2c_bus_write(uint32_t busid, uint64_t offset, unsigned size, uint64_t value) "bus[%d]: To 0x%" PRIx64 " of size %u: 0x%" PRIx64 +aspeed_i2c_bus_send(const char *mode, int i, int count, uint8_t byte) "%s send %d/%d 0x%02x" +aspeed_i2c_bus_recv(const char *mode, int i, int count, uint8_t byte) "%s recv %d/%d 0x%02x" + +# npcm7xx_smbus.c + +npcm7xx_smbus_read(const char *id, uint64_t offset, uint64_t value, unsigned size) "%s offset: 0x%04" PRIx64 " value: 0x%02" PRIx64 " size: %u" +npcm7xx_smbus_write(const char *id, uint64_t offset, uint64_t value, unsigned size) "%s offset: 0x%04" PRIx64 " value: 0x%02" PRIx64 " size: %u" +npcm7xx_smbus_start(const char *id, int success) "%s starting, success: %d" +npcm7xx_smbus_send_address(const char *id, uint8_t addr, int recv, int success) "%s sending address: 0x%02x, recv: %d, success: %d" +npcm7xx_smbus_send_byte(const char *id, uint8_t value, int success) "%s send byte: 0x%02x, success: %d" +npcm7xx_smbus_recv_byte(const char *id, uint8_t value) "%s recv byte: 0x%02x" +npcm7xx_smbus_stop(const char *id) "%s stopping" +npcm7xx_smbus_nack(const char *id) "%s nacking" +npcm7xx_smbus_recv_fifo(const char *id, uint8_t received, uint8_t expected) "%s recv fifo: received %u, expected %u" + +# i2c-mux-pca954x.c + +pca954x_write_bytes(uint8_t value) "PCA954X write data: 0x%02x" +pca954x_read_data(uint8_t value) "PCA954X read data: 0x%02x" diff --git a/hw/i2c/trace.h b/hw/i2c/trace.h new file mode 100644 index 000000000..4843a8d54 --- /dev/null +++ b/hw/i2c/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_i2c.h" diff --git a/hw/i2c/versatile_i2c.c b/hw/i2c/versatile_i2c.c new file mode 100644 index 000000000..3a04ba396 --- /dev/null +++ b/hw/i2c/versatile_i2c.c @@ -0,0 +1,112 @@ +/* + * ARM SBCon two-wire serial bus interface (I2C bitbang) + * a.k.a. ARM Versatile I2C controller + * + * Copyright (c) 2006-2007 CodeSourcery. + * Copyright (c) 2012 Oskar Andero + * + * This file is derived from hw/realview.c by Paul Brook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "hw/i2c/arm_sbcon_i2c.h" +#include "hw/registerfields.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +typedef ArmSbconI2CState VersatileI2CState; +DECLARE_INSTANCE_CHECKER(VersatileI2CState, VERSATILE_I2C, + TYPE_VERSATILE_I2C) + + + +REG32(CONTROL_GET, 0) +REG32(CONTROL_SET, 0) +REG32(CONTROL_CLR, 4) + +#define SCL BIT(0) +#define SDA BIT(1) + +static uint64_t versatile_i2c_read(void *opaque, hwaddr offset, + unsigned size) +{ + VersatileI2CState *s = (VersatileI2CState *)opaque; + + switch (offset) { + case A_CONTROL_SET: + return (s->out & 1) | (s->in << 1); + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%x\n", __func__, (int)offset); + return -1; + } +} + +static void versatile_i2c_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + VersatileI2CState *s = (VersatileI2CState *)opaque; + + switch (offset) { + case A_CONTROL_SET: + s->out |= value & 3; + break; + case A_CONTROL_CLR: + s->out &= ~value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%x\n", __func__, (int)offset); + } + bitbang_i2c_set(&s->bitbang, BITBANG_I2C_SCL, (s->out & SCL) != 0); + s->in = bitbang_i2c_set(&s->bitbang, BITBANG_I2C_SDA, (s->out & SDA) != 0); +} + +static const MemoryRegionOps versatile_i2c_ops = { + .read = versatile_i2c_read, + .write = versatile_i2c_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void versatile_i2c_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + VersatileI2CState *s = VERSATILE_I2C(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + I2CBus *bus; + + bus = i2c_init_bus(dev, "i2c"); + bitbang_i2c_init(&s->bitbang, bus); + memory_region_init_io(&s->iomem, obj, &versatile_i2c_ops, s, + "arm_sbcon_i2c", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const TypeInfo versatile_i2c_info = { + .name = TYPE_VERSATILE_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VersatileI2CState), + .instance_init = versatile_i2c_init, +}; + +static void versatile_i2c_register_types(void) +{ + type_register_static(&versatile_i2c_info); +} + +type_init(versatile_i2c_register_types) diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig new file mode 100644 index 000000000..d22ac4a4b --- /dev/null +++ b/hw/i386/Kconfig @@ -0,0 +1,139 @@ +config X86_FW_OVMF + bool + +config SEV + bool + select X86_FW_OVMF + depends on KVM + +config SGX + bool + depends on KVM + +config PC + bool + imply APPLESMC + imply HYPERV + imply ISA_IPMI_KCS + imply ISA_IPMI_BT + imply PCI_IPMI_KCS + imply PCI_IPMI_BT + imply IPMI_SSIF + imply ISA_DEBUG + imply PARALLEL + imply PCI_DEVICES + imply PVPANIC_ISA + imply QXL + imply SEV + imply SGX + imply SGA + imply TEST_DEVICES + imply TPM_CRB + imply TPM_TIS_ISA + imply VGA_PCI + imply VIRTIO_VGA + imply NVDIMM + select FDC_ISA + select I8259 + select I8254 + select PCKBD + select PCSPK + select I8257 + select MC146818RTC + # For ACPI builder: + select SERIAL_ISA + select ACPI_PCI + select ACPI_VMGENID + select VIRTIO_PMEM_SUPPORTED + select VIRTIO_MEM_SUPPORTED + +config PC_PCI + bool + select APIC + select IOAPIC + select APM + select PC + +config PC_ACPI + bool + select ACPI_X86 + select ACPI_CPU_HOTPLUG + select ACPI_MEMORY_HOTPLUG + select ACPI_VIOT + select SMBUS_EEPROM + select PFLASH_CFI01 + depends on ACPI_SMBUS + +config I440FX + bool + imply E1000_PCI + imply VMPORT + imply VMMOUSE + select PC_PCI + select PC_ACPI + select ACPI_SMBUS + select PCI_I440FX + select PIIX3 + select IDE_PIIX + select DIMM + select SMBIOS + select FW_CFG_DMA + +config ISAPC + bool + select ISA_BUS + select PC + select IDE_ISA + select VGA_ISA + # FIXME: it is in the same file as i440fx, and does not compile + # if separated + depends on I440FX + +config Q35 + bool + imply VTD + imply AMD_IOMMU + imply E1000E_PCI_EXPRESS + imply VMPORT + imply VMMOUSE + select PC_PCI + select PC_ACPI + select PCI_EXPRESS_Q35 + select LPC_ICH9 + select AHCI_ICH9 + select DIMM + select SMBIOS + select FW_CFG_DMA + +config MICROVM + bool + select SERIAL_ISA # for serial_hds_isa_init() + select ISA_BUS + select APIC + select IOAPIC + select I8259 + select MC146818RTC + select VIRTIO_MMIO + select ACPI_HW_REDUCED + select PCI_EXPRESS_GENERIC_BRIDGE + select USB_XHCI_SYSBUS + select I8254 + +config X86_IOMMU + bool + depends on PC + +config VTD + bool + select X86_IOMMU + +config AMD_IOMMU + bool + select X86_IOMMU + +config VMPORT + bool + +config VMMOUSE + bool + depends on VMPORT diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c new file mode 100644 index 000000000..a99c6e4fe --- /dev/null +++ b/hw/i386/acpi-build.c @@ -0,0 +1,2873 @@ +/* Support for generating ACPI tables and passing them to Guests + * + * Copyright (C) 2008-2010 Kevin O'Connor + * Copyright (C) 2006 Fabrice Bellard + * Copyright (C) 2013 Red Hat Inc + * + * Author: Michael S. Tsirkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qmp/qnum.h" +#include "acpi-build.h" +#include "acpi-common.h" +#include "qemu/bitmap.h" +#include "qemu/error-report.h" +#include "hw/pci/pci.h" +#include "hw/core/cpu.h" +#include "target/i386/cpu.h" +#include "hw/misc/pvpanic.h" +#include "hw/timer/hpet.h" +#include "hw/acpi/acpi-defs.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/cpu.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/acpi/bios-linker-loader.h" +#include "hw/isa/isa.h" +#include "hw/block/fdc.h" +#include "hw/acpi/memory_hotplug.h" +#include "sysemu/tpm.h" +#include "hw/acpi/tpm.h" +#include "hw/acpi/vmgenid.h" +#include "sysemu/tpm_backend.h" +#include "hw/rtc/mc146818rtc_regs.h" +#include "migration/vmstate.h" +#include "hw/mem/memory-device.h" +#include "hw/mem/nvdimm.h" +#include "sysemu/numa.h" +#include "sysemu/reset.h" +#include "hw/hyperv/vmbus-bridge.h" + +/* Supported chipsets: */ +#include "hw/southbridge/piix.h" +#include "hw/acpi/pcihp.h" +#include "hw/i386/fw_cfg.h" +#include "hw/i386/ich9.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci-host/q35.h" +#include "hw/i386/x86-iommu.h" + +#include "hw/acpi/aml-build.h" +#include "hw/acpi/utils.h" +#include "hw/acpi/pci.h" + +#include "qom/qom-qobject.h" +#include "hw/i386/amd_iommu.h" +#include "hw/i386/intel_iommu.h" +#include "hw/virtio/virtio-iommu.h" + +#include "hw/acpi/ipmi.h" +#include "hw/acpi/hmat.h" +#include "hw/acpi/viot.h" + +/* These are used to size the ACPI tables for -M pc-i440fx-1.7 and + * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows + * a little bit, there should be plenty of free space since the DSDT + * shrunk by ~1.5k between QEMU 2.0 and QEMU 2.1. + */ +#define ACPI_BUILD_LEGACY_CPU_AML_SIZE 97 +#define ACPI_BUILD_ALIGN_SIZE 0x1000 + +#define ACPI_BUILD_TABLE_SIZE 0x20000 + +/* #define DEBUG_ACPI_BUILD */ +#ifdef DEBUG_ACPI_BUILD +#define ACPI_BUILD_DPRINTF(fmt, ...) \ + do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0) +#else +#define ACPI_BUILD_DPRINTF(fmt, ...) +#endif + +typedef struct AcpiPmInfo { + bool s3_disabled; + bool s4_disabled; + bool pcihp_bridge_en; + bool smi_on_cpuhp; + bool smi_on_cpu_unplug; + bool pcihp_root_en; + uint8_t s4_val; + AcpiFadtData fadt; + uint16_t cpu_hp_io_base; + uint16_t pcihp_io_base; + uint16_t pcihp_io_len; +} AcpiPmInfo; + +typedef struct AcpiMiscInfo { + bool is_piix4; + bool has_hpet; +#ifdef CONFIG_TPM + TPMVersion tpm_version; +#endif + const unsigned char *dsdt_code; + unsigned dsdt_size; + uint16_t pvpanic_port; + uint16_t applesmc_io_base; +} AcpiMiscInfo; + +typedef struct AcpiBuildPciBusHotplugState { + GArray *device_table; + GArray *notify_table; + struct AcpiBuildPciBusHotplugState *parent; + bool pcihp_bridge_en; +} AcpiBuildPciBusHotplugState; + +typedef struct FwCfgTPMConfig { + uint32_t tpmppi_address; + uint8_t tpm_version; + uint8_t tpmppi_version; +} QEMU_PACKED FwCfgTPMConfig; + +static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg); + +const struct AcpiGenericAddress x86_nvdimm_acpi_dsmio = { + .space_id = AML_AS_SYSTEM_IO, + .address = NVDIMM_ACPI_IO_BASE, + .bit_width = NVDIMM_ACPI_IO_LEN << 3 +}; + +static void init_common_fadt_data(MachineState *ms, Object *o, + AcpiFadtData *data) +{ + X86MachineState *x86ms = X86_MACHINE(ms); + /* + * "ICH9-LPC" or "PIIX4_PM" has "smm-compat" property to keep the old + * behavior for compatibility irrelevant to smm_enabled, which doesn't + * comforms to ACPI spec. + */ + bool smm_enabled = object_property_get_bool(o, "smm-compat", NULL) ? + true : x86_machine_is_smm_enabled(x86ms); + uint32_t io = object_property_get_uint(o, ACPI_PM_PROP_PM_IO_BASE, NULL); + AmlAddressSpace as = AML_AS_SYSTEM_IO; + AcpiFadtData fadt = { + .rev = 3, + .flags = + (1 << ACPI_FADT_F_WBINVD) | + (1 << ACPI_FADT_F_PROC_C1) | + (1 << ACPI_FADT_F_SLP_BUTTON) | + (1 << ACPI_FADT_F_RTC_S4) | + (1 << ACPI_FADT_F_USE_PLATFORM_CLOCK) | + /* APIC destination mode ("Flat Logical") has an upper limit of 8 + * CPUs for more than 8 CPUs, "Clustered Logical" mode has to be + * used + */ + ((ms->smp.max_cpus > 8) ? + (1 << ACPI_FADT_F_FORCE_APIC_CLUSTER_MODEL) : 0), + .int_model = 1 /* Multiple APIC */, + .rtc_century = RTC_CENTURY, + .plvl2_lat = 0xfff /* C2 state not supported */, + .plvl3_lat = 0xfff /* C3 state not supported */, + .smi_cmd = smm_enabled ? ACPI_PORT_SMI_CMD : 0, + .sci_int = object_property_get_uint(o, ACPI_PM_PROP_SCI_INT, NULL), + .acpi_enable_cmd = + smm_enabled ? + object_property_get_uint(o, ACPI_PM_PROP_ACPI_ENABLE_CMD, NULL) : + 0, + .acpi_disable_cmd = + smm_enabled ? + object_property_get_uint(o, ACPI_PM_PROP_ACPI_DISABLE_CMD, NULL) : + 0, + .pm1a_evt = { .space_id = as, .bit_width = 4 * 8, .address = io }, + .pm1a_cnt = { .space_id = as, .bit_width = 2 * 8, + .address = io + 0x04 }, + .pm_tmr = { .space_id = as, .bit_width = 4 * 8, .address = io + 0x08 }, + .gpe0_blk = { .space_id = as, .bit_width = + object_property_get_uint(o, ACPI_PM_PROP_GPE0_BLK_LEN, NULL) * 8, + .address = object_property_get_uint(o, ACPI_PM_PROP_GPE0_BLK, NULL) + }, + }; + *data = fadt; +} + +static Object *object_resolve_type_unambiguous(const char *typename) +{ + bool ambig; + Object *o = object_resolve_path_type("", typename, &ambig); + + if (ambig || !o) { + return NULL; + } + return o; +} + +static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm) +{ + Object *piix = object_resolve_type_unambiguous(TYPE_PIIX4_PM); + Object *lpc = object_resolve_type_unambiguous(TYPE_ICH9_LPC_DEVICE); + Object *obj = piix ? piix : lpc; + QObject *o; + pm->cpu_hp_io_base = 0; + pm->pcihp_io_base = 0; + pm->pcihp_io_len = 0; + pm->smi_on_cpuhp = false; + pm->smi_on_cpu_unplug = false; + + assert(obj); + init_common_fadt_data(machine, obj, &pm->fadt); + if (piix) { + /* w2k requires FADT(rev1) or it won't boot, keep PC compatible */ + pm->fadt.rev = 1; + pm->cpu_hp_io_base = PIIX4_CPU_HOTPLUG_IO_BASE; + } + if (lpc) { + uint64_t smi_features = object_property_get_uint(lpc, + ICH9_LPC_SMI_NEGOTIATED_FEAT_PROP, NULL); + struct AcpiGenericAddress r = { .space_id = AML_AS_SYSTEM_IO, + .bit_width = 8, .address = ICH9_RST_CNT_IOPORT }; + pm->fadt.reset_reg = r; + pm->fadt.reset_val = 0xf; + pm->fadt.flags |= 1 << ACPI_FADT_F_RESET_REG_SUP; + pm->cpu_hp_io_base = ICH9_CPU_HOTPLUG_IO_BASE; + pm->smi_on_cpuhp = + !!(smi_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT)); + pm->smi_on_cpu_unplug = + !!(smi_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT)); + } + pm->pcihp_io_base = + object_property_get_uint(obj, ACPI_PCIHP_IO_BASE_PROP, NULL); + pm->pcihp_io_len = + object_property_get_uint(obj, ACPI_PCIHP_IO_LEN_PROP, NULL); + + /* The above need not be conditional on machine type because the reset port + * happens to be the same on PIIX (pc) and ICH9 (q35). */ + QEMU_BUILD_BUG_ON(ICH9_RST_CNT_IOPORT != PIIX_RCR_IOPORT); + + /* Fill in optional s3/s4 related properties */ + o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL); + if (o) { + pm->s3_disabled = qnum_get_uint(qobject_to(QNum, o)); + } else { + pm->s3_disabled = false; + } + qobject_unref(o); + o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_DISABLED, NULL); + if (o) { + pm->s4_disabled = qnum_get_uint(qobject_to(QNum, o)); + } else { + pm->s4_disabled = false; + } + qobject_unref(o); + o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_VAL, NULL); + if (o) { + pm->s4_val = qnum_get_uint(qobject_to(QNum, o)); + } else { + pm->s4_val = false; + } + qobject_unref(o); + + pm->pcihp_bridge_en = + object_property_get_bool(obj, ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, + NULL); + pm->pcihp_root_en = + object_property_get_bool(obj, ACPI_PM_PROP_ACPI_PCI_ROOTHP, + NULL); +} + +static void acpi_get_misc_info(AcpiMiscInfo *info) +{ + Object *piix = object_resolve_type_unambiguous(TYPE_PIIX4_PM); + Object *lpc = object_resolve_type_unambiguous(TYPE_ICH9_LPC_DEVICE); + assert(!!piix != !!lpc); + + if (piix) { + info->is_piix4 = true; + } + if (lpc) { + info->is_piix4 = false; + } + + info->has_hpet = hpet_find(); +#ifdef CONFIG_TPM + info->tpm_version = tpm_get_version(tpm_find()); +#endif + info->pvpanic_port = pvpanic_port(); + info->applesmc_io_base = applesmc_port(); +} + +/* + * Because of the PXB hosts we cannot simply query TYPE_PCI_HOST_BRIDGE. + * On i386 arch we only have two pci hosts, so we can look only for them. + */ +Object *acpi_get_i386_pci_host(void) +{ + PCIHostState *host; + + host = PCI_HOST_BRIDGE(object_resolve_path("/machine/i440fx", NULL)); + if (!host) { + host = PCI_HOST_BRIDGE(object_resolve_path("/machine/q35", NULL)); + } + + return OBJECT(host); +} + +static void acpi_get_pci_holes(Range *hole, Range *hole64) +{ + Object *pci_host; + + pci_host = acpi_get_i386_pci_host(); + + if (!pci_host) { + return; + } + + range_set_bounds1(hole, + object_property_get_uint(pci_host, + PCI_HOST_PROP_PCI_HOLE_START, + NULL), + object_property_get_uint(pci_host, + PCI_HOST_PROP_PCI_HOLE_END, + NULL)); + range_set_bounds1(hole64, + object_property_get_uint(pci_host, + PCI_HOST_PROP_PCI_HOLE64_START, + NULL), + object_property_get_uint(pci_host, + PCI_HOST_PROP_PCI_HOLE64_END, + NULL)); +} + +static void acpi_align_size(GArray *blob, unsigned align) +{ + /* Align size to multiple of given size. This reduces the chance + * we need to change size in the future (breaking cross version migration). + */ + g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); +} + +/* + * ACPI spec 1.0b, + * 5.2.6 Firmware ACPI Control Structure + */ +static void +build_facs(GArray *table_data) +{ + const char *sig = "FACS"; + const uint8_t reserved[40] = {}; + + g_array_append_vals(table_data, sig, 4); /* Signature */ + build_append_int_noprefix(table_data, 64, 4); /* Length */ + build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */ + build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */ + build_append_int_noprefix(table_data, 0, 4); /* Global Lock */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + g_array_append_vals(table_data, reserved, 40); /* Reserved */ +} + +static void build_append_pcihp_notify_entry(Aml *method, int slot) +{ + Aml *if_ctx; + int32_t devfn = PCI_DEVFN(slot, 0); + + if_ctx = aml_if(aml_and(aml_arg(0), aml_int(0x1U << slot), NULL)); + aml_append(if_ctx, aml_notify(aml_name("S%.02X", devfn), aml_arg(1))); + aml_append(method, if_ctx); +} + +static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus, + bool pcihp_bridge_en) +{ + Aml *dev, *notify_method = NULL, *method; + QObject *bsel; + PCIBus *sec; + int devfn; + + bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL); + if (bsel) { + uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel)); + + aml_append(parent_scope, aml_name_decl("BSEL", aml_int(bsel_val))); + notify_method = aml_method("DVNT", 2, AML_NOTSERIALIZED); + } + + for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + DeviceClass *dc; + PCIDeviceClass *pc; + PCIDevice *pdev = bus->devices[devfn]; + int slot = PCI_SLOT(devfn); + int func = PCI_FUNC(devfn); + /* ACPI spec: 1.0b: Table 6-2 _ADR Object Bus Types, PCI type */ + int adr = slot << 16 | func; + bool hotplug_enabled_dev; + bool bridge_in_acpi; + bool cold_plugged_bridge; + + if (!pdev) { + /* + * add hotplug slots for non present devices. + * hotplug is supported only for non-multifunction device + * so generate device description only for function 0 + */ + if (bsel && !func) { + if (pci_bus_is_express(bus) && slot > 0) { + break; + } + dev = aml_device("S%.02X", devfn); + aml_append(dev, aml_name_decl("_SUN", aml_int(slot))); + aml_append(dev, aml_name_decl("_ADR", aml_int(adr))); + method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); + aml_append(method, + aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN")) + ); + aml_append(dev, method); + method = aml_method("_DSM", 4, AML_SERIALIZED); + aml_append(method, + aml_return(aml_call6("PDSM", aml_arg(0), aml_arg(1), + aml_arg(2), aml_arg(3), + aml_name("BSEL"), aml_name("_SUN"))) + ); + aml_append(dev, method); + aml_append(parent_scope, dev); + + build_append_pcihp_notify_entry(notify_method, slot); + } + continue; + } + + pc = PCI_DEVICE_GET_CLASS(pdev); + dc = DEVICE_GET_CLASS(pdev); + + /* + * Cold plugged bridges aren't themselves hot-pluggable. + * Hotplugged bridges *are* hot-pluggable. + */ + cold_plugged_bridge = pc->is_bridge && !DEVICE(pdev)->hotplugged; + bridge_in_acpi = cold_plugged_bridge && pcihp_bridge_en; + + hotplug_enabled_dev = bsel && dc->hotpluggable && !cold_plugged_bridge; + + if (pc->class_id == PCI_CLASS_BRIDGE_ISA) { + continue; + } + + /* + * allow describing coldplugged bridges in ACPI even if they are not + * on function 0, as they are not unpluggable, for all other devices + * generate description only for function 0 per slot + */ + if (func && !bridge_in_acpi) { + continue; + } + + /* start to compose PCI device descriptor */ + dev = aml_device("S%.02X", devfn); + aml_append(dev, aml_name_decl("_ADR", aml_int(adr))); + + if (bsel) { + /* + * Can't declare _SUN here for every device as it changes 'slot' + * enumeration order in linux kernel, so use another variable for it + */ + aml_append(dev, aml_name_decl("ASUN", aml_int(slot))); + method = aml_method("_DSM", 4, AML_SERIALIZED); + aml_append(method, aml_return( + aml_call6("PDSM", aml_arg(0), aml_arg(1), aml_arg(2), + aml_arg(3), aml_name("BSEL"), aml_name("ASUN")) + )); + aml_append(dev, method); + } + + if (pc->class_id == PCI_CLASS_DISPLAY_VGA) { + /* add VGA specific AML methods */ + int s3d; + + if (object_dynamic_cast(OBJECT(pdev), "qxl-vga")) { + s3d = 3; + } else { + s3d = 0; + } + + method = aml_method("_S1D", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0))); + aml_append(dev, method); + + method = aml_method("_S2D", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0))); + aml_append(dev, method); + + method = aml_method("_S3D", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(s3d))); + aml_append(dev, method); + } else if (hotplug_enabled_dev) { + aml_append(dev, aml_name_decl("_SUN", aml_int(slot))); + /* add _EJ0 to make slot hotpluggable */ + method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); + aml_append(method, + aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN")) + ); + aml_append(dev, method); + + if (bsel) { + build_append_pcihp_notify_entry(notify_method, slot); + } + } else if (bridge_in_acpi) { + /* + * device is coldplugged bridge, + * add child device descriptions into its scope + */ + PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); + + build_append_pci_bus_devices(dev, sec_bus, pcihp_bridge_en); + } + /* device descriptor has been composed, add it into parent context */ + aml_append(parent_scope, dev); + } + + if (bsel) { + aml_append(parent_scope, notify_method); + } + + /* Append PCNT method to notify about events on local and child buses. + * Add this method for root bus only when hotplug is enabled since DSDT + * expects it. + */ + if (bsel || pcihp_bridge_en) { + method = aml_method("PCNT", 0, AML_NOTSERIALIZED); + + /* If bus supports hotplug select it and notify about local events */ + if (bsel) { + uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel)); + + aml_append(method, aml_store(aml_int(bsel_val), aml_name("BNUM"))); + aml_append(method, aml_call2("DVNT", aml_name("PCIU"), + aml_int(1))); /* Device Check */ + aml_append(method, aml_call2("DVNT", aml_name("PCID"), + aml_int(3))); /* Eject Request */ + } + + /* Notify about child bus events in any case */ + if (pcihp_bridge_en) { + QLIST_FOREACH(sec, &bus->child, sibling) { + if (pci_bus_is_root(sec)) { + continue; + } + + aml_append(method, aml_name("^S%.02X.PCNT", + sec->parent_dev->devfn)); + } + } + + aml_append(parent_scope, method); + } + qobject_unref(bsel); +} + +Aml *aml_pci_device_dsm(void) +{ + Aml *method, *UUID, *ifctx, *ifctx1, *ifctx2, *ifctx3, *elsectx; + Aml *acpi_index = aml_local(0); + Aml *zero = aml_int(0); + Aml *bnum = aml_arg(4); + Aml *func = aml_arg(2); + Aml *rev = aml_arg(1); + Aml *sun = aml_arg(5); + + method = aml_method("PDSM", 6, AML_SERIALIZED); + + /* + * PCI Firmware Specification 3.1 + * 4.6. _DSM Definitions for PCI + */ + UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); + ifctx = aml_if(aml_equal(aml_arg(0), UUID)); + { + aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sun), acpi_index)); + ifctx1 = aml_if(aml_equal(func, zero)); + { + uint8_t byte_list[1]; + + ifctx2 = aml_if(aml_equal(rev, aml_int(2))); + { + /* + * advertise function 7 if device has acpi-index + * acpi_index values: + * 0: not present (default value) + * FFFFFFFF: not supported (old QEMU without PIDX reg) + * other: device's acpi-index + */ + ifctx3 = aml_if(aml_lnot( + aml_or(aml_equal(acpi_index, zero), + aml_equal(acpi_index, aml_int(0xFFFFFFFF)), NULL) + )); + { + byte_list[0] = + 1 /* have supported functions */ | + 1 << 7 /* support for function 7 */ + ; + aml_append(ifctx3, aml_return(aml_buffer(1, byte_list))); + } + aml_append(ifctx2, ifctx3); + } + aml_append(ifctx1, ifctx2); + + byte_list[0] = 0; /* nothing supported */ + aml_append(ifctx1, aml_return(aml_buffer(1, byte_list))); + } + aml_append(ifctx, ifctx1); + elsectx = aml_else(); + /* + * PCI Firmware Specification 3.1 + * 4.6.7. _DSM for Naming a PCI or PCI Express Device Under + * Operating Systems + */ + ifctx1 = aml_if(aml_equal(func, aml_int(7))); + { + Aml *pkg = aml_package(2); + Aml *ret = aml_local(1); + + aml_append(pkg, zero); + /* + * optional, if not impl. should return null string + */ + aml_append(pkg, aml_string("%s", "")); + aml_append(ifctx1, aml_store(pkg, ret)); + /* + * update acpi-index to actual value + */ + aml_append(ifctx1, aml_store(acpi_index, aml_index(ret, zero))); + aml_append(ifctx1, aml_return(ret)); + } + aml_append(elsectx, ifctx1); + aml_append(ifctx, elsectx); + } + aml_append(method, ifctx); + return method; +} + +/** + * build_prt_entry: + * @link_name: link name for PCI route entry + * + * build AML package containing a PCI route entry for @link_name + */ +static Aml *build_prt_entry(const char *link_name) +{ + Aml *a_zero = aml_int(0); + Aml *pkg = aml_package(4); + aml_append(pkg, a_zero); + aml_append(pkg, a_zero); + aml_append(pkg, aml_name("%s", link_name)); + aml_append(pkg, a_zero); + return pkg; +} + +/* + * initialize_route - Initialize the interrupt routing rule + * through a specific LINK: + * if (lnk_idx == idx) + * route using link 'link_name' + */ +static Aml *initialize_route(Aml *route, const char *link_name, + Aml *lnk_idx, int idx) +{ + Aml *if_ctx = aml_if(aml_equal(lnk_idx, aml_int(idx))); + Aml *pkg = build_prt_entry(link_name); + + aml_append(if_ctx, aml_store(pkg, route)); + + return if_ctx; +} + +/* + * build_prt - Define interrupt rounting rules + * + * Returns an array of 128 routes, one for each device, + * based on device location. + * The main goal is to equaly distribute the interrupts + * over the 4 existing ACPI links (works only for i440fx). + * The hash function is (slot + pin) & 3 -> "LNK[D|A|B|C]". + * + */ +static Aml *build_prt(bool is_pci0_prt) +{ + Aml *method, *while_ctx, *pin, *res; + + method = aml_method("_PRT", 0, AML_NOTSERIALIZED); + res = aml_local(0); + pin = aml_local(1); + aml_append(method, aml_store(aml_package(128), res)); + aml_append(method, aml_store(aml_int(0), pin)); + + /* while (pin < 128) */ + while_ctx = aml_while(aml_lless(pin, aml_int(128))); + { + Aml *slot = aml_local(2); + Aml *lnk_idx = aml_local(3); + Aml *route = aml_local(4); + + /* slot = pin >> 2 */ + aml_append(while_ctx, + aml_store(aml_shiftright(pin, aml_int(2), NULL), slot)); + /* lnk_idx = (slot + pin) & 3 */ + aml_append(while_ctx, + aml_store(aml_and(aml_add(pin, slot, NULL), aml_int(3), NULL), + lnk_idx)); + + /* route[2] = "LNK[D|A|B|C]", selection based on pin % 3 */ + aml_append(while_ctx, initialize_route(route, "LNKD", lnk_idx, 0)); + if (is_pci0_prt) { + Aml *if_device_1, *if_pin_4, *else_pin_4; + + /* device 1 is the power-management device, needs SCI */ + if_device_1 = aml_if(aml_equal(lnk_idx, aml_int(1))); + { + if_pin_4 = aml_if(aml_equal(pin, aml_int(4))); + { + aml_append(if_pin_4, + aml_store(build_prt_entry("LNKS"), route)); + } + aml_append(if_device_1, if_pin_4); + else_pin_4 = aml_else(); + { + aml_append(else_pin_4, + aml_store(build_prt_entry("LNKA"), route)); + } + aml_append(if_device_1, else_pin_4); + } + aml_append(while_ctx, if_device_1); + } else { + aml_append(while_ctx, initialize_route(route, "LNKA", lnk_idx, 1)); + } + aml_append(while_ctx, initialize_route(route, "LNKB", lnk_idx, 2)); + aml_append(while_ctx, initialize_route(route, "LNKC", lnk_idx, 3)); + + /* route[0] = 0x[slot]FFFF */ + aml_append(while_ctx, + aml_store(aml_or(aml_shiftleft(slot, aml_int(16)), aml_int(0xFFFF), + NULL), + aml_index(route, aml_int(0)))); + /* route[1] = pin & 3 */ + aml_append(while_ctx, + aml_store(aml_and(pin, aml_int(3), NULL), + aml_index(route, aml_int(1)))); + /* res[pin] = route */ + aml_append(while_ctx, aml_store(route, aml_index(res, pin))); + /* pin++ */ + aml_append(while_ctx, aml_increment(pin)); + } + aml_append(method, while_ctx); + /* return res*/ + aml_append(method, aml_return(res)); + + return method; +} + +static void build_hpet_aml(Aml *table) +{ + Aml *crs; + Aml *field; + Aml *method; + Aml *if_ctx; + Aml *scope = aml_scope("_SB"); + Aml *dev = aml_device("HPET"); + Aml *zero = aml_int(0); + Aml *id = aml_local(0); + Aml *period = aml_local(1); + + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0103"))); + aml_append(dev, aml_name_decl("_UID", zero)); + + aml_append(dev, + aml_operation_region("HPTM", AML_SYSTEM_MEMORY, aml_int(HPET_BASE), + HPET_LEN)); + field = aml_field("HPTM", AML_DWORD_ACC, AML_LOCK, AML_PRESERVE); + aml_append(field, aml_named_field("VEND", 32)); + aml_append(field, aml_named_field("PRD", 32)); + aml_append(dev, field); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_store(aml_name("VEND"), id)); + aml_append(method, aml_store(aml_name("PRD"), period)); + aml_append(method, aml_shiftright(id, aml_int(16), id)); + if_ctx = aml_if(aml_lor(aml_equal(id, zero), + aml_equal(id, aml_int(0xffff)))); + { + aml_append(if_ctx, aml_return(zero)); + } + aml_append(method, if_ctx); + + if_ctx = aml_if(aml_lor(aml_equal(period, zero), + aml_lgreater(period, aml_int(100000000)))); + { + aml_append(if_ctx, aml_return(zero)); + } + aml_append(method, if_ctx); + + aml_append(method, aml_return(aml_int(0x0F))); + aml_append(dev, method); + + crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(HPET_BASE, HPET_LEN, AML_READ_ONLY)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + aml_append(scope, dev); + aml_append(table, scope); +} + +static Aml *build_vmbus_device_aml(VMBusBridge *vmbus_bridge) +{ + Aml *dev; + Aml *method; + Aml *crs; + + dev = aml_device("VMBS"); + aml_append(dev, aml_name_decl("STA", aml_int(0xF))); + aml_append(dev, aml_name_decl("_HID", aml_string("VMBus"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0x0))); + aml_append(dev, aml_name_decl("_DDN", aml_string("VMBUS"))); + + method = aml_method("_DIS", 0, AML_NOTSERIALIZED); + aml_append(method, aml_store(aml_and(aml_name("STA"), aml_int(0xD), NULL), + aml_name("STA"))); + aml_append(dev, method); + + method = aml_method("_PS0", 0, AML_NOTSERIALIZED); + aml_append(method, aml_store(aml_or(aml_name("STA"), aml_int(0xF), NULL), + aml_name("STA"))); + aml_append(dev, method); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_name("STA"))); + aml_append(dev, method); + + aml_append(dev, aml_name_decl("_PS3", aml_int(0x0))); + + crs = aml_resource_template(); + aml_append(crs, aml_irq_no_flags(vmbus_bridge->irq)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + return dev; +} + +static void build_isa_devices_aml(Aml *table) +{ + bool ambiguous; + Object *obj = object_resolve_path_type("", TYPE_ISA_BUS, &ambiguous); + Aml *scope; + + assert(obj && !ambiguous); + + scope = aml_scope("_SB.PCI0.ISA"); + build_acpi_ipmi_devices(scope, BUS(obj), "\\_SB.PCI0.ISA"); + isa_build_aml(ISA_BUS(obj), scope); + + aml_append(table, scope); +} + +static void build_dbg_aml(Aml *table) +{ + Aml *field; + Aml *method; + Aml *while_ctx; + Aml *scope = aml_scope("\\"); + Aml *buf = aml_local(0); + Aml *len = aml_local(1); + Aml *idx = aml_local(2); + + aml_append(scope, + aml_operation_region("DBG", AML_SYSTEM_IO, aml_int(0x0402), 0x01)); + field = aml_field("DBG", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("DBGB", 8)); + aml_append(scope, field); + + method = aml_method("DBUG", 1, AML_NOTSERIALIZED); + + aml_append(method, aml_to_hexstring(aml_arg(0), buf)); + aml_append(method, aml_to_buffer(buf, buf)); + aml_append(method, aml_subtract(aml_sizeof(buf), aml_int(1), len)); + aml_append(method, aml_store(aml_int(0), idx)); + + while_ctx = aml_while(aml_lless(idx, len)); + aml_append(while_ctx, + aml_store(aml_derefof(aml_index(buf, idx)), aml_name("DBGB"))); + aml_append(while_ctx, aml_increment(idx)); + aml_append(method, while_ctx); + + aml_append(method, aml_store(aml_int(0x0A), aml_name("DBGB"))); + aml_append(scope, method); + + aml_append(table, scope); +} + +static Aml *build_link_dev(const char *name, uint8_t uid, Aml *reg) +{ + Aml *dev; + Aml *crs; + Aml *method; + uint32_t irqs[] = {5, 10, 11}; + + dev = aml_device("%s", name); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C0F"))); + aml_append(dev, aml_name_decl("_UID", aml_int(uid))); + + crs = aml_resource_template(); + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_SHARED, irqs, ARRAY_SIZE(irqs))); + aml_append(dev, aml_name_decl("_PRS", crs)); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_call1("IQST", reg))); + aml_append(dev, method); + + method = aml_method("_DIS", 0, AML_NOTSERIALIZED); + aml_append(method, aml_or(reg, aml_int(0x80), reg)); + aml_append(dev, method); + + method = aml_method("_CRS", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_call1("IQCR", reg))); + aml_append(dev, method); + + method = aml_method("_SRS", 1, AML_NOTSERIALIZED); + aml_append(method, aml_create_dword_field(aml_arg(0), aml_int(5), "PRRI")); + aml_append(method, aml_store(aml_name("PRRI"), reg)); + aml_append(dev, method); + + return dev; + } + +static Aml *build_gsi_link_dev(const char *name, uint8_t uid, uint8_t gsi) +{ + Aml *dev; + Aml *crs; + Aml *method; + uint32_t irqs; + + dev = aml_device("%s", name); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C0F"))); + aml_append(dev, aml_name_decl("_UID", aml_int(uid))); + + crs = aml_resource_template(); + irqs = gsi; + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_SHARED, &irqs, 1)); + aml_append(dev, aml_name_decl("_PRS", crs)); + + aml_append(dev, aml_name_decl("_CRS", crs)); + + /* + * _DIS can be no-op because the interrupt cannot be disabled. + */ + method = aml_method("_DIS", 0, AML_NOTSERIALIZED); + aml_append(dev, method); + + method = aml_method("_SRS", 1, AML_NOTSERIALIZED); + aml_append(dev, method); + + return dev; +} + +/* _CRS method - get current settings */ +static Aml *build_iqcr_method(bool is_piix4) +{ + Aml *if_ctx; + uint32_t irqs; + Aml *method = aml_method("IQCR", 1, AML_SERIALIZED); + Aml *crs = aml_resource_template(); + + irqs = 0; + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, + AML_ACTIVE_HIGH, AML_SHARED, &irqs, 1)); + aml_append(method, aml_name_decl("PRR0", crs)); + + aml_append(method, + aml_create_dword_field(aml_name("PRR0"), aml_int(5), "PRRI")); + + if (is_piix4) { + if_ctx = aml_if(aml_lless(aml_arg(0), aml_int(0x80))); + aml_append(if_ctx, aml_store(aml_arg(0), aml_name("PRRI"))); + aml_append(method, if_ctx); + } else { + aml_append(method, + aml_store(aml_and(aml_arg(0), aml_int(0xF), NULL), + aml_name("PRRI"))); + } + + aml_append(method, aml_return(aml_name("PRR0"))); + return method; +} + +/* _STA method - get status */ +static Aml *build_irq_status_method(void) +{ + Aml *if_ctx; + Aml *method = aml_method("IQST", 1, AML_NOTSERIALIZED); + + if_ctx = aml_if(aml_and(aml_int(0x80), aml_arg(0), NULL)); + aml_append(if_ctx, aml_return(aml_int(0x09))); + aml_append(method, if_ctx); + aml_append(method, aml_return(aml_int(0x0B))); + return method; +} + +static void build_piix4_pci0_int(Aml *table) +{ + Aml *dev; + Aml *crs; + Aml *field; + Aml *method; + uint32_t irqs; + Aml *sb_scope = aml_scope("_SB"); + Aml *pci0_scope = aml_scope("PCI0"); + + aml_append(pci0_scope, build_prt(true)); + aml_append(sb_scope, pci0_scope); + + field = aml_field("PCI0.ISA.P40C", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("PRQ0", 8)); + aml_append(field, aml_named_field("PRQ1", 8)); + aml_append(field, aml_named_field("PRQ2", 8)); + aml_append(field, aml_named_field("PRQ3", 8)); + aml_append(sb_scope, field); + + aml_append(sb_scope, build_irq_status_method()); + aml_append(sb_scope, build_iqcr_method(true)); + + aml_append(sb_scope, build_link_dev("LNKA", 0, aml_name("PRQ0"))); + aml_append(sb_scope, build_link_dev("LNKB", 1, aml_name("PRQ1"))); + aml_append(sb_scope, build_link_dev("LNKC", 2, aml_name("PRQ2"))); + aml_append(sb_scope, build_link_dev("LNKD", 3, aml_name("PRQ3"))); + + dev = aml_device("LNKS"); + { + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C0F"))); + aml_append(dev, aml_name_decl("_UID", aml_int(4))); + + crs = aml_resource_template(); + irqs = 9; + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, + AML_ACTIVE_HIGH, AML_SHARED, + &irqs, 1)); + aml_append(dev, aml_name_decl("_PRS", crs)); + + /* The SCI cannot be disabled and is always attached to GSI 9, + * so these are no-ops. We only need this link to override the + * polarity to active high and match the content of the MADT. + */ + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0x0b))); + aml_append(dev, method); + + method = aml_method("_DIS", 0, AML_NOTSERIALIZED); + aml_append(dev, method); + + method = aml_method("_CRS", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_name("_PRS"))); + aml_append(dev, method); + + method = aml_method("_SRS", 1, AML_NOTSERIALIZED); + aml_append(dev, method); + } + aml_append(sb_scope, dev); + + aml_append(table, sb_scope); +} + +static void append_q35_prt_entry(Aml *ctx, uint32_t nr, const char *name) +{ + int i; + int head; + Aml *pkg; + char base = name[3] < 'E' ? 'A' : 'E'; + char *s = g_strdup(name); + Aml *a_nr = aml_int((nr << 16) | 0xffff); + + assert(strlen(s) == 4); + + head = name[3] - base; + for (i = 0; i < 4; i++) { + if (head + i > 3) { + head = i * -1; + } + s[3] = base + head + i; + pkg = aml_package(4); + aml_append(pkg, a_nr); + aml_append(pkg, aml_int(i)); + aml_append(pkg, aml_name("%s", s)); + aml_append(pkg, aml_int(0)); + aml_append(ctx, pkg); + } + g_free(s); +} + +static Aml *build_q35_routing_table(const char *str) +{ + int i; + Aml *pkg; + char *name = g_strdup_printf("%s ", str); + + pkg = aml_package(128); + for (i = 0; i < 0x18; i++) { + name[3] = 'E' + (i & 0x3); + append_q35_prt_entry(pkg, i, name); + } + + name[3] = 'E'; + append_q35_prt_entry(pkg, 0x18, name); + + /* INTA -> PIRQA for slot 25 - 31, see the default value of DIR */ + for (i = 0x0019; i < 0x1e; i++) { + name[3] = 'A'; + append_q35_prt_entry(pkg, i, name); + } + + /* PCIe->PCI bridge. use PIRQ[E-H] */ + name[3] = 'E'; + append_q35_prt_entry(pkg, 0x1e, name); + name[3] = 'A'; + append_q35_prt_entry(pkg, 0x1f, name); + + g_free(name); + return pkg; +} + +static void build_q35_pci0_int(Aml *table) +{ + Aml *field; + Aml *method; + Aml *sb_scope = aml_scope("_SB"); + Aml *pci0_scope = aml_scope("PCI0"); + + /* Zero => PIC mode, One => APIC Mode */ + aml_append(table, aml_name_decl("PICF", aml_int(0))); + method = aml_method("_PIC", 1, AML_NOTSERIALIZED); + { + aml_append(method, aml_store(aml_arg(0), aml_name("PICF"))); + } + aml_append(table, method); + + aml_append(pci0_scope, + aml_name_decl("PRTP", build_q35_routing_table("LNK"))); + aml_append(pci0_scope, + aml_name_decl("PRTA", build_q35_routing_table("GSI"))); + + method = aml_method("_PRT", 0, AML_NOTSERIALIZED); + { + Aml *if_ctx; + Aml *else_ctx; + + /* PCI IRQ routing table, example from ACPI 2.0a specification, + section 6.2.8.1 */ + /* Note: we provide the same info as the PCI routing + table of the Bochs BIOS */ + if_ctx = aml_if(aml_equal(aml_name("PICF"), aml_int(0))); + aml_append(if_ctx, aml_return(aml_name("PRTP"))); + aml_append(method, if_ctx); + else_ctx = aml_else(); + aml_append(else_ctx, aml_return(aml_name("PRTA"))); + aml_append(method, else_ctx); + } + aml_append(pci0_scope, method); + aml_append(sb_scope, pci0_scope); + + field = aml_field("PCI0.ISA.PIRQ", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("PRQA", 8)); + aml_append(field, aml_named_field("PRQB", 8)); + aml_append(field, aml_named_field("PRQC", 8)); + aml_append(field, aml_named_field("PRQD", 8)); + aml_append(field, aml_reserved_field(0x20)); + aml_append(field, aml_named_field("PRQE", 8)); + aml_append(field, aml_named_field("PRQF", 8)); + aml_append(field, aml_named_field("PRQG", 8)); + aml_append(field, aml_named_field("PRQH", 8)); + aml_append(sb_scope, field); + + aml_append(sb_scope, build_irq_status_method()); + aml_append(sb_scope, build_iqcr_method(false)); + + aml_append(sb_scope, build_link_dev("LNKA", 0, aml_name("PRQA"))); + aml_append(sb_scope, build_link_dev("LNKB", 1, aml_name("PRQB"))); + aml_append(sb_scope, build_link_dev("LNKC", 2, aml_name("PRQC"))); + aml_append(sb_scope, build_link_dev("LNKD", 3, aml_name("PRQD"))); + aml_append(sb_scope, build_link_dev("LNKE", 4, aml_name("PRQE"))); + aml_append(sb_scope, build_link_dev("LNKF", 5, aml_name("PRQF"))); + aml_append(sb_scope, build_link_dev("LNKG", 6, aml_name("PRQG"))); + aml_append(sb_scope, build_link_dev("LNKH", 7, aml_name("PRQH"))); + + aml_append(sb_scope, build_gsi_link_dev("GSIA", 0x10, 0x10)); + aml_append(sb_scope, build_gsi_link_dev("GSIB", 0x11, 0x11)); + aml_append(sb_scope, build_gsi_link_dev("GSIC", 0x12, 0x12)); + aml_append(sb_scope, build_gsi_link_dev("GSID", 0x13, 0x13)); + aml_append(sb_scope, build_gsi_link_dev("GSIE", 0x14, 0x14)); + aml_append(sb_scope, build_gsi_link_dev("GSIF", 0x15, 0x15)); + aml_append(sb_scope, build_gsi_link_dev("GSIG", 0x16, 0x16)); + aml_append(sb_scope, build_gsi_link_dev("GSIH", 0x17, 0x17)); + + aml_append(table, sb_scope); +} + +static Aml *build_q35_dram_controller(const AcpiMcfgInfo *mcfg) +{ + Aml *dev; + Aml *resource_template; + + /* DRAM controller */ + dev = aml_device("DRAC"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0C01"))); + + resource_template = aml_resource_template(); + if (mcfg->base + mcfg->size - 1 >= (1ULL << 32)) { + aml_append(resource_template, + aml_qword_memory(AML_POS_DECODE, + AML_MIN_FIXED, + AML_MAX_FIXED, + AML_NON_CACHEABLE, + AML_READ_WRITE, + 0x0000000000000000, + mcfg->base, + mcfg->base + mcfg->size - 1, + 0x0000000000000000, + mcfg->size)); + } else { + aml_append(resource_template, + aml_dword_memory(AML_POS_DECODE, + AML_MIN_FIXED, + AML_MAX_FIXED, + AML_NON_CACHEABLE, + AML_READ_WRITE, + 0x0000000000000000, + mcfg->base, + mcfg->base + mcfg->size - 1, + 0x0000000000000000, + mcfg->size)); + } + aml_append(dev, aml_name_decl("_CRS", resource_template)); + + return dev; +} + +static void build_q35_isa_bridge(Aml *table) +{ + Aml *dev; + Aml *scope; + + scope = aml_scope("_SB.PCI0"); + dev = aml_device("ISA"); + aml_append(dev, aml_name_decl("_ADR", aml_int(0x001F0000))); + + /* ICH9 PCI to ISA irq remapping */ + aml_append(dev, aml_operation_region("PIRQ", AML_PCI_CONFIG, + aml_int(0x60), 0x0C)); + + aml_append(scope, dev); + aml_append(table, scope); +} + +static void build_piix4_isa_bridge(Aml *table) +{ + Aml *dev; + Aml *scope; + + scope = aml_scope("_SB.PCI0"); + dev = aml_device("ISA"); + aml_append(dev, aml_name_decl("_ADR", aml_int(0x00010000))); + + /* PIIX PCI to ISA irq remapping */ + aml_append(dev, aml_operation_region("P40C", AML_PCI_CONFIG, + aml_int(0x60), 0x04)); + + aml_append(scope, dev); + aml_append(table, scope); +} + +static void build_x86_acpi_pci_hotplug(Aml *table, uint64_t pcihp_addr) +{ + Aml *scope; + Aml *field; + Aml *method; + + scope = aml_scope("_SB.PCI0"); + + aml_append(scope, + aml_operation_region("PCST", AML_SYSTEM_IO, aml_int(pcihp_addr), 0x08)); + field = aml_field("PCST", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); + aml_append(field, aml_named_field("PCIU", 32)); + aml_append(field, aml_named_field("PCID", 32)); + aml_append(scope, field); + + aml_append(scope, + aml_operation_region("SEJ", AML_SYSTEM_IO, + aml_int(pcihp_addr + ACPI_PCIHP_SEJ_BASE), 0x04)); + field = aml_field("SEJ", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); + aml_append(field, aml_named_field("B0EJ", 32)); + aml_append(scope, field); + + aml_append(scope, + aml_operation_region("BNMR", AML_SYSTEM_IO, + aml_int(pcihp_addr + ACPI_PCIHP_BNMR_BASE), 0x08)); + field = aml_field("BNMR", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); + aml_append(field, aml_named_field("BNUM", 32)); + aml_append(field, aml_named_field("PIDX", 32)); + aml_append(scope, field); + + aml_append(scope, aml_mutex("BLCK", 0)); + + method = aml_method("PCEJ", 2, AML_NOTSERIALIZED); + aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF)); + aml_append(method, aml_store(aml_arg(0), aml_name("BNUM"))); + aml_append(method, + aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("B0EJ"))); + aml_append(method, aml_release(aml_name("BLCK"))); + aml_append(method, aml_return(aml_int(0))); + aml_append(scope, method); + + method = aml_method("AIDX", 2, AML_NOTSERIALIZED); + aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF)); + aml_append(method, aml_store(aml_arg(0), aml_name("BNUM"))); + aml_append(method, + aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("PIDX"))); + aml_append(method, aml_store(aml_name("PIDX"), aml_local(0))); + aml_append(method, aml_release(aml_name("BLCK"))); + aml_append(method, aml_return(aml_local(0))); + aml_append(scope, method); + + aml_append(scope, aml_pci_device_dsm()); + + aml_append(table, scope); +} + +static Aml *build_q35_osc_method(bool enable_native_pcie_hotplug) +{ + Aml *if_ctx; + Aml *if_ctx2; + Aml *else_ctx; + Aml *method; + Aml *a_cwd1 = aml_name("CDW1"); + Aml *a_ctrl = aml_local(0); + + method = aml_method("_OSC", 4, AML_NOTSERIALIZED); + aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1")); + + if_ctx = aml_if(aml_equal( + aml_arg(0), aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766"))); + aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2")); + aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3")); + + aml_append(if_ctx, aml_store(aml_name("CDW3"), a_ctrl)); + + /* + * Always allow native PME, AER (no dependencies) + * Allow SHPC (PCI bridges can have SHPC controller) + * Disable PCIe Native Hot-plug if ACPI PCI Hot-plug is enabled. + */ + aml_append(if_ctx, aml_and(a_ctrl, + aml_int(0x1E | (enable_native_pcie_hotplug ? 0x1 : 0x0)), a_ctrl)); + + if_ctx2 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(1)))); + /* Unknown revision */ + aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x08), a_cwd1)); + aml_append(if_ctx, if_ctx2); + + if_ctx2 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), a_ctrl))); + /* Capabilities bits were masked */ + aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x10), a_cwd1)); + aml_append(if_ctx, if_ctx2); + + /* Update DWORD3 in the buffer */ + aml_append(if_ctx, aml_store(a_ctrl, aml_name("CDW3"))); + aml_append(method, if_ctx); + + else_ctx = aml_else(); + /* Unrecognized UUID */ + aml_append(else_ctx, aml_or(a_cwd1, aml_int(4), a_cwd1)); + aml_append(method, else_ctx); + + aml_append(method, aml_return(aml_arg(3))); + return method; +} + +static void build_smb0(Aml *table, I2CBus *smbus, int devnr, int func) +{ + Aml *scope = aml_scope("_SB.PCI0"); + Aml *dev = aml_device("SMB0"); + + aml_append(dev, aml_name_decl("_ADR", aml_int(devnr << 16 | func))); + build_acpi_ipmi_devices(dev, BUS(smbus), "\\_SB.PCI0.SMB0"); + aml_append(scope, dev); + aml_append(table, scope); +} + +static void +build_dsdt(GArray *table_data, BIOSLinker *linker, + AcpiPmInfo *pm, AcpiMiscInfo *misc, + Range *pci_hole, Range *pci_hole64, MachineState *machine) +{ + CrsRangeEntry *entry; + Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs; + CrsRangeSet crs_range_set; + PCMachineState *pcms = PC_MACHINE(machine); + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(machine); + X86MachineState *x86ms = X86_MACHINE(machine); + AcpiMcfgInfo mcfg; + bool mcfg_valid = !!acpi_get_mcfg(&mcfg); + uint32_t nr_mem = machine->ram_slots; + int root_bus_limit = 0xFF; + PCIBus *bus = NULL; +#ifdef CONFIG_TPM + TPMIf *tpm = tpm_find(); +#endif + int i; + VMBusBridge *vmbus_bridge = vmbus_bridge_find(); + AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = x86ms->oem_id, + .oem_table_id = x86ms->oem_table_id }; + + acpi_table_begin(&table, table_data); + dsdt = init_aml_allocator(); + + build_dbg_aml(dsdt); + if (misc->is_piix4) { + sb_scope = aml_scope("_SB"); + dev = aml_device("PCI0"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); + aml_append(dev, aml_name_decl("_ADR", aml_int(0))); + aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid))); + aml_append(sb_scope, dev); + aml_append(dsdt, sb_scope); + + if (misc->has_hpet) { + build_hpet_aml(dsdt); + } + build_piix4_isa_bridge(dsdt); + build_isa_devices_aml(dsdt); + if (pm->pcihp_bridge_en || pm->pcihp_root_en) { + build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base); + } + build_piix4_pci0_int(dsdt); + } else { + sb_scope = aml_scope("_SB"); + dev = aml_device("PCI0"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); + aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); + aml_append(dev, aml_name_decl("_ADR", aml_int(0))); + aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid))); + aml_append(dev, build_q35_osc_method(!pm->pcihp_bridge_en)); + aml_append(sb_scope, dev); + if (mcfg_valid) { + aml_append(sb_scope, build_q35_dram_controller(&mcfg)); + } + + if (pm->smi_on_cpuhp) { + /* reserve SMI block resources, IO ports 0xB2, 0xB3 */ + dev = aml_device("PCI0.SMI0"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A06"))); + aml_append(dev, aml_name_decl("_UID", aml_string("SMI resources"))); + crs = aml_resource_template(); + aml_append(crs, + aml_io( + AML_DECODE16, + ACPI_PORT_SMI_CMD, + ACPI_PORT_SMI_CMD, + 1, + 2) + ); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(dev, aml_operation_region("SMIR", AML_SYSTEM_IO, + aml_int(ACPI_PORT_SMI_CMD), 2)); + field = aml_field("SMIR", AML_BYTE_ACC, AML_NOLOCK, + AML_WRITE_AS_ZEROS); + aml_append(field, aml_named_field("SMIC", 8)); + aml_append(field, aml_reserved_field(8)); + aml_append(dev, field); + aml_append(sb_scope, dev); + } + + aml_append(dsdt, sb_scope); + + if (misc->has_hpet) { + build_hpet_aml(dsdt); + } + build_q35_isa_bridge(dsdt); + build_isa_devices_aml(dsdt); + if (pm->pcihp_bridge_en) { + build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base); + } + build_q35_pci0_int(dsdt); + if (pcms->smbus && !pcmc->do_not_add_smb_acpi) { + build_smb0(dsdt, pcms->smbus, ICH9_SMB_DEV, ICH9_SMB_FUNC); + } + } + + if (vmbus_bridge) { + sb_scope = aml_scope("_SB"); + aml_append(sb_scope, build_vmbus_device_aml(vmbus_bridge)); + aml_append(dsdt, sb_scope); + } + + if (pcmc->legacy_cpu_hotplug) { + build_legacy_cpu_hotplug_aml(dsdt, machine, pm->cpu_hp_io_base); + } else { + CPUHotplugFeatures opts = { + .acpi_1_compatible = true, .has_legacy_cphp = true, + .smi_path = pm->smi_on_cpuhp ? "\\_SB.PCI0.SMI0.SMIC" : NULL, + .fw_unplugs_cpu = pm->smi_on_cpu_unplug, + }; + build_cpus_aml(dsdt, machine, opts, pm->cpu_hp_io_base, + "\\_SB.PCI0", "\\_GPE._E02"); + } + + if (pcms->memhp_io_base && nr_mem) { + build_memory_hotplug_aml(dsdt, nr_mem, "\\_SB.PCI0", + "\\_GPE._E03", AML_SYSTEM_IO, + pcms->memhp_io_base); + } + + scope = aml_scope("_GPE"); + { + aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006"))); + + if (pm->pcihp_bridge_en || pm->pcihp_root_en) { + method = aml_method("_E01", 0, AML_NOTSERIALIZED); + aml_append(method, + aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF)); + aml_append(method, aml_call0("\\_SB.PCI0.PCNT")); + aml_append(method, aml_release(aml_name("\\_SB.PCI0.BLCK"))); + aml_append(scope, method); + } + + if (machine->nvdimms_state->is_enabled) { + method = aml_method("_E04", 0, AML_NOTSERIALIZED); + aml_append(method, aml_notify(aml_name("\\_SB.NVDR"), + aml_int(0x80))); + aml_append(scope, method); + } + } + aml_append(dsdt, scope); + + crs_range_set_init(&crs_range_set); + bus = PC_MACHINE(machine)->bus; + if (bus) { + QLIST_FOREACH(bus, &bus->child, sibling) { + uint8_t bus_num = pci_bus_num(bus); + uint8_t numa_node = pci_bus_numa_node(bus); + + /* look only for expander root buses */ + if (!pci_bus_is_root(bus)) { + continue; + } + + if (bus_num < root_bus_limit) { + root_bus_limit = bus_num - 1; + } + + scope = aml_scope("\\_SB"); + dev = aml_device("PC%.02X", bus_num); + aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); + aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); + if (pci_bus_is_express(bus)) { + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); + aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); + + /* Expander bridges do not have ACPI PCI Hot-plug enabled */ + aml_append(dev, build_q35_osc_method(true)); + } else { + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); + } + + if (numa_node != NUMA_NODE_UNASSIGNED) { + aml_append(dev, aml_name_decl("_PXM", aml_int(numa_node))); + } + + aml_append(dev, build_prt(false)); + crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), &crs_range_set, + 0, 0, 0, 0); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + aml_append(dsdt, scope); + } + } + + /* + * At this point crs_range_set has all the ranges used by pci + * busses *other* than PCI0. These ranges will be excluded from + * the PCI0._CRS. Add mmconfig to the set so it will be excluded + * too. + */ + if (mcfg_valid) { + crs_range_insert(crs_range_set.mem_ranges, + mcfg.base, mcfg.base + mcfg.size - 1); + } + + scope = aml_scope("\\_SB.PCI0"); + /* build PCI0._CRS */ + crs = aml_resource_template(); + aml_append(crs, + aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, + 0x0000, 0x0, root_bus_limit, + 0x0000, root_bus_limit + 1)); + aml_append(crs, aml_io(AML_DECODE16, 0x0CF8, 0x0CF8, 0x01, 0x08)); + + aml_append(crs, + aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, + AML_POS_DECODE, AML_ENTIRE_RANGE, + 0x0000, 0x0000, 0x0CF7, 0x0000, 0x0CF8)); + + crs_replace_with_free_ranges(crs_range_set.io_ranges, 0x0D00, 0xFFFF); + for (i = 0; i < crs_range_set.io_ranges->len; i++) { + entry = g_ptr_array_index(crs_range_set.io_ranges, i); + aml_append(crs, + aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, + AML_POS_DECODE, AML_ENTIRE_RANGE, + 0x0000, entry->base, entry->limit, + 0x0000, entry->limit - entry->base + 1)); + } + + aml_append(crs, + aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_CACHEABLE, AML_READ_WRITE, + 0, 0x000A0000, 0x000BFFFF, 0, 0x00020000)); + + crs_replace_with_free_ranges(crs_range_set.mem_ranges, + range_lob(pci_hole), + range_upb(pci_hole)); + for (i = 0; i < crs_range_set.mem_ranges->len; i++) { + entry = g_ptr_array_index(crs_range_set.mem_ranges, i); + aml_append(crs, + aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_NON_CACHEABLE, AML_READ_WRITE, + 0, entry->base, entry->limit, + 0, entry->limit - entry->base + 1)); + } + + if (!range_is_empty(pci_hole64)) { + crs_replace_with_free_ranges(crs_range_set.mem_64bit_ranges, + range_lob(pci_hole64), + range_upb(pci_hole64)); + for (i = 0; i < crs_range_set.mem_64bit_ranges->len; i++) { + entry = g_ptr_array_index(crs_range_set.mem_64bit_ranges, i); + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, + AML_MAX_FIXED, + AML_CACHEABLE, AML_READ_WRITE, + 0, entry->base, entry->limit, + 0, entry->limit - entry->base + 1)); + } + } + +#ifdef CONFIG_TPM + if (TPM_IS_TIS_ISA(tpm_find())) { + aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, + TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); + } +#endif + aml_append(scope, aml_name_decl("_CRS", crs)); + + /* reserve GPE0 block resources */ + dev = aml_device("GPE0"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); + aml_append(dev, aml_name_decl("_UID", aml_string("GPE0 resources"))); + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + crs = aml_resource_template(); + aml_append(crs, + aml_io( + AML_DECODE16, + pm->fadt.gpe0_blk.address, + pm->fadt.gpe0_blk.address, + 1, + pm->fadt.gpe0_blk.bit_width / 8) + ); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + + crs_range_set_free(&crs_range_set); + + /* reserve PCIHP resources */ + if (pm->pcihp_io_len && (pm->pcihp_bridge_en || pm->pcihp_root_en)) { + dev = aml_device("PHPR"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); + aml_append(dev, + aml_name_decl("_UID", aml_string("PCI Hotplug resources"))); + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + crs = aml_resource_template(); + aml_append(crs, + aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1, + pm->pcihp_io_len) + ); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + } + aml_append(dsdt, scope); + + /* create S3_ / S4_ / S5_ packages if necessary */ + scope = aml_scope("\\"); + if (!pm->s3_disabled) { + pkg = aml_package(4); + aml_append(pkg, aml_int(1)); /* PM1a_CNT.SLP_TYP */ + aml_append(pkg, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(scope, aml_name_decl("_S3", pkg)); + } + + if (!pm->s4_disabled) { + pkg = aml_package(4); + aml_append(pkg, aml_int(pm->s4_val)); /* PM1a_CNT.SLP_TYP */ + /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ + aml_append(pkg, aml_int(pm->s4_val)); + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(scope, aml_name_decl("_S4", pkg)); + } + + pkg = aml_package(4); + aml_append(pkg, aml_int(0)); /* PM1a_CNT.SLP_TYP */ + aml_append(pkg, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(scope, aml_name_decl("_S5", pkg)); + aml_append(dsdt, scope); + + /* create fw_cfg node, unconditionally */ + { + scope = aml_scope("\\_SB.PCI0"); + fw_cfg_add_acpi_dsdt(scope, x86ms->fw_cfg); + aml_append(dsdt, scope); + } + + if (misc->applesmc_io_base) { + scope = aml_scope("\\_SB.PCI0.ISA"); + dev = aml_device("SMC"); + + aml_append(dev, aml_name_decl("_HID", aml_eisaid("APP0001"))); + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + + crs = aml_resource_template(); + aml_append(crs, + aml_io(AML_DECODE16, misc->applesmc_io_base, misc->applesmc_io_base, + 0x01, APPLESMC_MAX_DATA_LENGTH) + ); + aml_append(crs, aml_irq_no_flags(6)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + aml_append(scope, dev); + aml_append(dsdt, scope); + } + + if (misc->pvpanic_port) { + scope = aml_scope("\\_SB.PCI0.ISA"); + + dev = aml_device("PEVT"); + aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0001"))); + + crs = aml_resource_template(); + aml_append(crs, + aml_io(AML_DECODE16, misc->pvpanic_port, misc->pvpanic_port, 1, 1) + ); + aml_append(dev, aml_name_decl("_CRS", crs)); + + aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO, + aml_int(misc->pvpanic_port), 1)); + field = aml_field("PEOR", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("PEPT", 8)); + aml_append(dev, field); + + /* device present, functioning, decoding, shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); + + method = aml_method("RDPT", 0, AML_NOTSERIALIZED); + aml_append(method, aml_store(aml_name("PEPT"), aml_local(0))); + aml_append(method, aml_return(aml_local(0))); + aml_append(dev, method); + + method = aml_method("WRPT", 1, AML_NOTSERIALIZED); + aml_append(method, aml_store(aml_arg(0), aml_name("PEPT"))); + aml_append(dev, method); + + aml_append(scope, dev); + aml_append(dsdt, scope); + } + + sb_scope = aml_scope("\\_SB"); + { + Object *pci_host; + PCIBus *bus = NULL; + + pci_host = acpi_get_i386_pci_host(); + + if (pci_host) { + bus = PCI_HOST_BRIDGE(pci_host)->bus; + } + + if (bus) { + Aml *scope = aml_scope("PCI0"); + /* Scan all PCI buses. Generate tables to support hotplug. */ + build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en); + +#ifdef CONFIG_TPM + if (TPM_IS_TIS_ISA(tpm)) { + if (misc->tpm_version == TPM_VERSION_2_0) { + dev = aml_device("TPM"); + aml_append(dev, aml_name_decl("_HID", + aml_string("MSFT0101"))); + } else { + dev = aml_device("ISA.TPM"); + aml_append(dev, aml_name_decl("_HID", + aml_eisaid("PNP0C31"))); + } + + aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); + crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, + TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); + /* + FIXME: TPM_TIS_IRQ=5 conflicts with PNP0C0F irqs, + Rewrite to take IRQ from TPM device model and + fix default IRQ value there to use some unused IRQ + */ + /* aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); */ + aml_append(dev, aml_name_decl("_CRS", crs)); + + tpm_build_ppi_acpi(tpm, dev); + + aml_append(scope, dev); + } +#endif + + aml_append(sb_scope, scope); + } + } + +#ifdef CONFIG_TPM + if (TPM_IS_CRB(tpm)) { + dev = aml_device("TPM"); + aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101"))); + crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(TPM_CRB_ADDR_BASE, + TPM_CRB_ADDR_SIZE, AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + aml_append(dev, aml_name_decl("_STA", aml_int(0xf))); + + tpm_build_ppi_acpi(tpm, dev); + + aml_append(sb_scope, dev); + } +#endif + + if (pcms->sgx_epc.size != 0) { + uint64_t epc_base = pcms->sgx_epc.base; + uint64_t epc_size = pcms->sgx_epc.size; + + dev = aml_device("EPC"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("INT0E0C"))); + aml_append(dev, aml_name_decl("_STR", + aml_unicode("Enclave Page Cache 1.0"))); + crs = aml_resource_template(); + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, + AML_MAX_FIXED, AML_NON_CACHEABLE, + AML_READ_WRITE, 0, epc_base, + epc_base + epc_size - 1, 0, epc_size)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0x0f))); + aml_append(dev, method); + + aml_append(sb_scope, dev); + } + aml_append(dsdt, sb_scope); + + /* copy AML table into ACPI tables blob and patch header there */ + g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +/* + * IA-PC HPET (High Precision Event Timers) Specification (Revision: 1.0a) + * 3.2.4The ACPI 2.0 HPET Description Table (HPET) + */ +static void +build_hpet(GArray *table_data, BIOSLinker *linker, const char *oem_id, + const char *oem_table_id) +{ + AcpiTable table = { .sig = "HPET", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + /* Note timer_block_id value must be kept in sync with value advertised by + * emulated hpet + */ + /* Event Timer Block ID */ + build_append_int_noprefix(table_data, 0x8086a201, 4); + /* BASE_ADDRESS */ + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 0, 0, 0, HPET_BASE); + /* HPET Number */ + build_append_int_noprefix(table_data, 0, 1); + /* Main Counter Minimum Clock_tick in Periodic Mode */ + build_append_int_noprefix(table_data, 0, 2); + /* Page Protection And OEM Attribute */ + build_append_int_noprefix(table_data, 0, 1); + acpi_table_end(linker, &table); +} + +#ifdef CONFIG_TPM +/* + * TCPA Description Table + * + * Following Level 00, Rev 00.37 of specs: + * http://www.trustedcomputinggroup.org/resources/tcg_acpi_specification + * 7.1.2 ACPI Table Layout + */ +static void +build_tpm_tcpa(GArray *table_data, BIOSLinker *linker, GArray *tcpalog, + const char *oem_id, const char *oem_table_id) +{ + unsigned log_addr_offset; + AcpiTable table = { .sig = "TCPA", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + /* Platform Class */ + build_append_int_noprefix(table_data, TPM_TCPA_ACPI_CLASS_CLIENT, 2); + /* Log Area Minimum Length (LAML) */ + build_append_int_noprefix(table_data, TPM_LOG_AREA_MINIMUM_SIZE, 4); + /* Log Area Start Address (LASA) */ + log_addr_offset = table_data->len; + build_append_int_noprefix(table_data, 0, 8); + + /* allocate/reserve space for TPM log area */ + acpi_data_push(tcpalog, TPM_LOG_AREA_MINIMUM_SIZE); + bios_linker_loader_alloc(linker, ACPI_BUILD_TPMLOG_FILE, tcpalog, 1, + false /* high memory */); + /* log area start address to be filled by Guest linker */ + bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, + log_addr_offset, 8, ACPI_BUILD_TPMLOG_FILE, 0); + + acpi_table_end(linker, &table); +} +#endif + +#define HOLE_640K_START (640 * KiB) +#define HOLE_640K_END (1 * MiB) + +/* + * ACPI spec, Revision 3.0 + * 5.2.15 System Resource Affinity Table (SRAT) + */ +static void +build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) +{ + int i; + int numa_mem_start, slots; + uint64_t mem_len, mem_base, next_base; + MachineClass *mc = MACHINE_GET_CLASS(machine); + X86MachineState *x86ms = X86_MACHINE(machine); + const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine); + PCMachineState *pcms = PC_MACHINE(machine); + int nb_numa_nodes = machine->numa_state->num_nodes; + NodeInfo *numa_info = machine->numa_state->nodes; + ram_addr_t hotpluggable_address_space_size = + object_property_get_int(OBJECT(pcms), PC_MACHINE_DEVMEM_REGION_SIZE, + NULL); + AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = x86ms->oem_id, + .oem_table_id = x86ms->oem_table_id }; + + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ + + for (i = 0; i < apic_ids->len; i++) { + int node_id = apic_ids->cpus[i].props.node_id; + uint32_t apic_id = apic_ids->cpus[i].arch_id; + + if (apic_id < 255) { + /* 5.2.15.1 Processor Local APIC/SAPIC Affinity Structure */ + build_append_int_noprefix(table_data, 0, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + /* Proximity Domain [7:0] */ + build_append_int_noprefix(table_data, node_id, 1); + build_append_int_noprefix(table_data, apic_id, 1); /* APIC ID */ + /* Flags, Table 5-36 */ + build_append_int_noprefix(table_data, 1, 4); + build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */ + /* Proximity Domain [31:8] */ + build_append_int_noprefix(table_data, 0, 3); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + } else { + /* + * ACPI spec, Revision 4.0 + * 5.2.16.3 Processor Local x2APIC Affinity Structure + */ + build_append_int_noprefix(table_data, 2, 1); /* Type */ + build_append_int_noprefix(table_data, 24, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Proximity Domain */ + build_append_int_noprefix(table_data, node_id, 4); + build_append_int_noprefix(table_data, apic_id, 4); /* X2APIC ID */ + /* Flags, Table 5-39 */ + build_append_int_noprefix(table_data, 1 /* Enabled */, 4); + build_append_int_noprefix(table_data, 0, 4); /* Clock Domain */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + } + } + + /* the memory map is a bit tricky, it contains at least one hole + * from 640k-1M and possibly another one from 3.5G-4G. + */ + next_base = 0; + numa_mem_start = table_data->len; + + for (i = 1; i < nb_numa_nodes + 1; ++i) { + mem_base = next_base; + mem_len = numa_info[i - 1].node_mem; + next_base = mem_base + mem_len; + + /* Cut out the 640K hole */ + if (mem_base <= HOLE_640K_START && + next_base > HOLE_640K_START) { + mem_len -= next_base - HOLE_640K_START; + if (mem_len > 0) { + build_srat_memory(table_data, mem_base, mem_len, i - 1, + MEM_AFFINITY_ENABLED); + } + + /* Check for the rare case: 640K < RAM < 1M */ + if (next_base <= HOLE_640K_END) { + next_base = HOLE_640K_END; + continue; + } + mem_base = HOLE_640K_END; + mem_len = next_base - HOLE_640K_END; + } + + /* Cut out the ACPI_PCI hole */ + if (mem_base <= x86ms->below_4g_mem_size && + next_base > x86ms->below_4g_mem_size) { + mem_len -= next_base - x86ms->below_4g_mem_size; + if (mem_len > 0) { + build_srat_memory(table_data, mem_base, mem_len, i - 1, + MEM_AFFINITY_ENABLED); + } + mem_base = 1ULL << 32; + mem_len = next_base - x86ms->below_4g_mem_size; + next_base = mem_base + mem_len; + } + + if (mem_len > 0) { + build_srat_memory(table_data, mem_base, mem_len, i - 1, + MEM_AFFINITY_ENABLED); + } + } + + if (machine->nvdimms_state->is_enabled) { + nvdimm_build_srat(table_data); + } + + /* + * TODO: this part is not in ACPI spec and current linux kernel boots fine + * without these entries. But I recall there were issues the last time I + * tried to remove it with some ancient guest OS, however I can't remember + * what that was so keep this around for now + */ + slots = (table_data->len - numa_mem_start) / 40 /* mem affinity len */; + for (; slots < nb_numa_nodes + 2; slots++) { + build_srat_memory(table_data, 0, 0, 0, MEM_AFFINITY_NOFLAGS); + } + + /* + * Entry is required for Windows to enable memory hotplug in OS + * and for Linux to enable SWIOTLB when booted with less than + * 4G of RAM. Windows works better if the entry sets proximity + * to the highest NUMA node in the machine. + * Memory devices may override proximity set by this entry, + * providing _PXM method if necessary. + */ + if (hotpluggable_address_space_size) { + build_srat_memory(table_data, machine->device_memory->base, + hotpluggable_address_space_size, nb_numa_nodes - 1, + MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); + } + + acpi_table_end(linker, &table); +} + +/* + * Insert DMAR scope for PCI bridges and endpoint devcie + */ +static void +insert_scope(PCIBus *bus, PCIDevice *dev, void *opaque) +{ + const size_t device_scope_size = 6 /* device scope structure */ + + 2 /* 1 path entry */; + GArray *scope_blob = opaque; + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { + /* Dmar Scope Type: 0x02 for PCI Bridge */ + build_append_int_noprefix(scope_blob, 0x02, 1); + } else { + /* Dmar Scope Type: 0x01 for PCI Endpoint Device */ + build_append_int_noprefix(scope_blob, 0x01, 1); + } + + /* length */ + build_append_int_noprefix(scope_blob, device_scope_size, 1); + /* reserved */ + build_append_int_noprefix(scope_blob, 0, 2); + /* enumeration_id */ + build_append_int_noprefix(scope_blob, 0, 1); + /* bus */ + build_append_int_noprefix(scope_blob, pci_bus_num(bus), 1); + /* device */ + build_append_int_noprefix(scope_blob, PCI_SLOT(dev->devfn), 1); + /* function */ + build_append_int_noprefix(scope_blob, PCI_FUNC(dev->devfn), 1); +} + +/* For a given PCI host bridge, walk and insert DMAR scope */ +static int +dmar_host_bridges(Object *obj, void *opaque) +{ + GArray *scope_blob = opaque; + + if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) { + PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; + + if (bus && !pci_bus_bypass_iommu(bus)) { + pci_for_each_device_under_bus(bus, insert_scope, scope_blob); + } + } + + return 0; +} + +/* + * Intel ® Virtualization Technology for Directed I/O + * Architecture Specification. Revision 3.3 + * 8.1 DMA Remapping Reporting Structure + */ +static void +build_dmar_q35(GArray *table_data, BIOSLinker *linker, const char *oem_id, + const char *oem_table_id) +{ + uint8_t dmar_flags = 0; + uint8_t rsvd10[10] = {}; + /* Root complex IOAPIC uses one path only */ + const size_t ioapic_scope_size = 6 /* device scope structure */ + + 2 /* 1 path entry */; + X86IOMMUState *iommu = x86_iommu_get_default(); + IntelIOMMUState *intel_iommu = INTEL_IOMMU_DEVICE(iommu); + GArray *scope_blob = g_array_new(false, true, 1); + + AcpiTable table = { .sig = "DMAR", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; + + /* + * A PCI bus walk, for each PCI host bridge. + * Insert scope for each PCI bridge and endpoint device which + * is attached to a bus with iommu enabled. + */ + object_child_foreach_recursive(object_get_root(), + dmar_host_bridges, scope_blob); + + assert(iommu); + if (x86_iommu_ir_supported(iommu)) { + dmar_flags |= 0x1; /* Flags: 0x1: INT_REMAP */ + } + + acpi_table_begin(&table, table_data); + /* Host Address Width */ + build_append_int_noprefix(table_data, intel_iommu->aw_bits - 1, 1); + build_append_int_noprefix(table_data, dmar_flags, 1); /* Flags */ + g_array_append_vals(table_data, rsvd10, sizeof(rsvd10)); /* Reserved */ + + /* 8.3 DMAR Remapping Hardware Unit Definition structure */ + build_append_int_noprefix(table_data, 0, 2); /* Type */ + /* Length */ + build_append_int_noprefix(table_data, + 16 + ioapic_scope_size + scope_blob->len, 2); + /* Flags */ + build_append_int_noprefix(table_data, 0 /* Don't include all pci device */ , + 1); + build_append_int_noprefix(table_data, 0 , 1); /* Reserved */ + build_append_int_noprefix(table_data, 0 , 2); /* Segment Number */ + /* Register Base Address */ + build_append_int_noprefix(table_data, Q35_HOST_BRIDGE_IOMMU_ADDR , 8); + + /* Scope definition for the root-complex IOAPIC. See VT-d spec + * 8.3.1 (version Oct. 2014 or later). */ + build_append_int_noprefix(table_data, 0x03 /* IOAPIC */, 1); /* Type */ + build_append_int_noprefix(table_data, ioapic_scope_size, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Enumeration ID */ + build_append_int_noprefix(table_data, ACPI_BUILD_IOAPIC_ID, 1); + /* Start Bus Number */ + build_append_int_noprefix(table_data, Q35_PSEUDO_BUS_PLATFORM, 1); + /* Path, {Device, Function} pair */ + build_append_int_noprefix(table_data, PCI_SLOT(Q35_PSEUDO_DEVFN_IOAPIC), 1); + build_append_int_noprefix(table_data, PCI_FUNC(Q35_PSEUDO_DEVFN_IOAPIC), 1); + + /* Add scope found above */ + g_array_append_vals(table_data, scope_blob->data, scope_blob->len); + g_array_free(scope_blob, true); + + if (iommu->dt_supported) { + /* 8.5 Root Port ATS Capability Reporting Structure */ + build_append_int_noprefix(table_data, 2, 2); /* Type */ + build_append_int_noprefix(table_data, 8, 2); /* Length */ + build_append_int_noprefix(table_data, 1 /* ALL_PORTS */, 1); /* Flags */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); /* Segment Number */ + } + + acpi_table_end(linker, &table); +} + +/* + * Windows ACPI Emulated Devices Table + * (Version 1.0 - April 6, 2009) + * Spec: http://download.microsoft.com/download/7/E/7/7E7662CF-CBEA-470B-A97E-CE7CE0D98DC2/WAET.docx + * + * Helpful to speedup Windows guests and ignored by others. + */ +static void +build_waet(GArray *table_data, BIOSLinker *linker, const char *oem_id, + const char *oem_table_id) +{ + AcpiTable table = { .sig = "WAET", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + /* + * Set "ACPI PM timer good" flag. + * + * Tells Windows guests that our ACPI PM timer is reliable in the + * sense that guest can read it only once to obtain a reliable value. + * Which avoids costly VMExits caused by guest re-reading it unnecessarily. + */ + build_append_int_noprefix(table_data, 1 << 1 /* ACPI PM timer good */, 4); + acpi_table_end(linker, &table); +} + +/* + * IVRS table as specified in AMD IOMMU Specification v2.62, Section 5.2 + * accessible here http://support.amd.com/TechDocs/48882_IOMMU.pdf + */ +#define IOAPIC_SB_DEVID (uint64_t)PCI_BUILD_BDF(0, PCI_DEVFN(0x14, 0)) + +/* + * Insert IVHD entry for device and recurse, insert alias, or insert range as + * necessary for the PCI topology. + */ +static void +insert_ivhd(PCIBus *bus, PCIDevice *dev, void *opaque) +{ + GArray *table_data = opaque; + uint32_t entry; + + /* "Select" IVHD entry, type 0x2 */ + entry = PCI_BUILD_BDF(pci_bus_num(bus), dev->devfn) << 8 | 0x2; + build_append_int_noprefix(table_data, entry, 4); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { + PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); + uint8_t sec = pci_bus_num(sec_bus); + uint8_t sub = dev->config[PCI_SUBORDINATE_BUS]; + + if (pci_bus_is_express(sec_bus)) { + /* + * Walk the bus if there are subordinates, otherwise use a range + * to cover an entire leaf bus. We could potentially also use a + * range for traversed buses, but we'd need to take care not to + * create both Select and Range entries covering the same device. + * This is easier and potentially more compact. + * + * An example bare metal system seems to use Select entries for + * root ports without a slot (ie. built-ins) and Range entries + * when there is a slot. The same system also only hard-codes + * the alias range for an onboard PCIe-to-PCI bridge, apparently + * making no effort to support nested bridges. We attempt to + * be more thorough here. + */ + if (sec == sub) { /* leaf bus */ + /* "Start of Range" IVHD entry, type 0x3 */ + entry = PCI_BUILD_BDF(sec, PCI_DEVFN(0, 0)) << 8 | 0x3; + build_append_int_noprefix(table_data, entry, 4); + /* "End of Range" IVHD entry, type 0x4 */ + entry = PCI_BUILD_BDF(sub, PCI_DEVFN(31, 7)) << 8 | 0x4; + build_append_int_noprefix(table_data, entry, 4); + } else { + pci_for_each_device(sec_bus, sec, insert_ivhd, table_data); + } + } else { + /* + * If the secondary bus is conventional, then we need to create an + * Alias range for everything downstream. The range covers the + * first devfn on the secondary bus to the last devfn on the + * subordinate bus. The alias target depends on legacy versus + * express bridges, just as in pci_device_iommu_address_space(). + * DeviceIDa vs DeviceIDb as per the AMD IOMMU spec. + */ + uint16_t dev_id_a, dev_id_b; + + dev_id_a = PCI_BUILD_BDF(sec, PCI_DEVFN(0, 0)); + + if (pci_is_express(dev) && + pcie_cap_get_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) { + dev_id_b = dev_id_a; + } else { + dev_id_b = PCI_BUILD_BDF(pci_bus_num(bus), dev->devfn); + } + + /* "Alias Start of Range" IVHD entry, type 0x43, 8 bytes */ + build_append_int_noprefix(table_data, dev_id_a << 8 | 0x43, 4); + build_append_int_noprefix(table_data, dev_id_b << 8 | 0x0, 4); + + /* "End of Range" IVHD entry, type 0x4 */ + entry = PCI_BUILD_BDF(sub, PCI_DEVFN(31, 7)) << 8 | 0x4; + build_append_int_noprefix(table_data, entry, 4); + } + } +} + +/* For all PCI host bridges, walk and insert IVHD entries */ +static int +ivrs_host_bridges(Object *obj, void *opaque) +{ + GArray *ivhd_blob = opaque; + + if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) { + PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; + + if (bus && !pci_bus_bypass_iommu(bus)) { + pci_for_each_device_under_bus(bus, insert_ivhd, ivhd_blob); + } + } + + return 0; +} + +static void +build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, + const char *oem_table_id) +{ + int ivhd_table_len = 24; + AMDVIState *s = AMD_IOMMU_DEVICE(x86_iommu_get_default()); + GArray *ivhd_blob = g_array_new(false, true, 1); + AcpiTable table = { .sig = "IVRS", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + /* IVinfo - IO virtualization information common to all + * IOMMU units in a system + */ + build_append_int_noprefix(table_data, 40UL << 8/* PASize */, 4); + /* reserved */ + build_append_int_noprefix(table_data, 0, 8); + + /* IVHD definition - type 10h */ + build_append_int_noprefix(table_data, 0x10, 1); + /* virtualization flags */ + build_append_int_noprefix(table_data, + (1UL << 0) | /* HtTunEn */ + (1UL << 4) | /* iotblSup */ + (1UL << 6) | /* PrefSup */ + (1UL << 7), /* PPRSup */ + 1); + + /* + * A PCI bus walk, for each PCI host bridge, is necessary to create a + * complete set of IVHD entries. Do this into a separate blob so that we + * can calculate the total IVRS table length here and then append the new + * blob further below. Fall back to an entry covering all devices, which + * is sufficient when no aliases are present. + */ + object_child_foreach_recursive(object_get_root(), + ivrs_host_bridges, ivhd_blob); + + if (!ivhd_blob->len) { + /* + * Type 1 device entry reporting all devices + * These are 4-byte device entries currently reporting the range of + * Refer to Spec - Table 95:IVHD Device Entry Type Codes(4-byte) + */ + build_append_int_noprefix(ivhd_blob, 0x0000001, 4); + } + + ivhd_table_len += ivhd_blob->len; + + /* + * When interrupt remapping is supported, we add a special IVHD device + * for type IO-APIC. + */ + if (x86_iommu_ir_supported(x86_iommu_get_default())) { + ivhd_table_len += 8; + } + + /* IVHD length */ + build_append_int_noprefix(table_data, ivhd_table_len, 2); + /* DeviceID */ + build_append_int_noprefix(table_data, s->devid, 2); + /* Capability offset */ + build_append_int_noprefix(table_data, s->capab_offset, 2); + /* IOMMU base address */ + build_append_int_noprefix(table_data, s->mmio.addr, 8); + /* PCI Segment Group */ + build_append_int_noprefix(table_data, 0, 2); + /* IOMMU info */ + build_append_int_noprefix(table_data, 0, 2); + /* IOMMU Feature Reporting */ + build_append_int_noprefix(table_data, + (48UL << 30) | /* HATS */ + (48UL << 28) | /* GATS */ + (1UL << 2) | /* GTSup */ + (1UL << 6), /* GASup */ + 4); + + /* IVHD entries as found above */ + g_array_append_vals(table_data, ivhd_blob->data, ivhd_blob->len); + g_array_free(ivhd_blob, TRUE); + + /* + * Add a special IVHD device type. + * Refer to spec - Table 95: IVHD device entry type codes + * + * Linux IOMMU driver checks for the special IVHD device (type IO-APIC). + * See Linux kernel commit 'c2ff5cf5294bcbd7fa50f7d860e90a66db7e5059' + */ + if (x86_iommu_ir_supported(x86_iommu_get_default())) { + build_append_int_noprefix(table_data, + (0x1ull << 56) | /* type IOAPIC */ + (IOAPIC_SB_DEVID << 40) | /* IOAPIC devid */ + 0x48, /* special device */ + 8); + } + acpi_table_end(linker, &table); +} + +typedef +struct AcpiBuildState { + /* Copy of table in RAM (for patching). */ + MemoryRegion *table_mr; + /* Is table patched? */ + uint8_t patched; + void *rsdp; + MemoryRegion *rsdp_mr; + MemoryRegion *linker_mr; +} AcpiBuildState; + +static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg) +{ + Object *pci_host; + QObject *o; + + pci_host = acpi_get_i386_pci_host(); + if (!pci_host) { + return false; + } + + o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_BASE, NULL); + if (!o) { + return false; + } + mcfg->base = qnum_get_uint(qobject_to(QNum, o)); + qobject_unref(o); + if (mcfg->base == PCIE_BASE_ADDR_UNMAPPED) { + return false; + } + + o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_SIZE, NULL); + assert(o); + mcfg->size = qnum_get_uint(qobject_to(QNum, o)); + qobject_unref(o); + return true; +} + +static +void acpi_build(AcpiBuildTables *tables, MachineState *machine) +{ + PCMachineState *pcms = PC_MACHINE(machine); + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(machine); + DeviceState *iommu = pcms->iommu; + GArray *table_offsets; + unsigned facs, dsdt, rsdt, fadt; + AcpiPmInfo pm; + AcpiMiscInfo misc; + AcpiMcfgInfo mcfg; + Range pci_hole = {}, pci_hole64 = {}; + uint8_t *u; + size_t aml_len = 0; + GArray *tables_blob = tables->table_data; + AcpiSlicOem slic_oem = { .id = NULL, .table_id = NULL }; + Object *vmgenid_dev; + char *oem_id; + char *oem_table_id; + + acpi_get_pm_info(machine, &pm); + acpi_get_misc_info(&misc); + acpi_get_pci_holes(&pci_hole, &pci_hole64); + acpi_get_slic_oem(&slic_oem); + + if (slic_oem.id) { + oem_id = slic_oem.id; + } else { + oem_id = x86ms->oem_id; + } + + if (slic_oem.table_id) { + oem_table_id = slic_oem.table_id; + } else { + oem_table_id = x86ms->oem_table_id; + } + + table_offsets = g_array_new(false, true /* clear */, + sizeof(uint32_t)); + ACPI_BUILD_DPRINTF("init ACPI tables\n"); + + bios_linker_loader_alloc(tables->linker, + ACPI_BUILD_TABLE_FILE, tables_blob, + 64 /* Ensure FACS is aligned */, + false /* high memory */); + + /* + * FACS is pointed to by FADT. + * We place it first since it's the only table that has alignment + * requirements. + */ + facs = tables_blob->len; + build_facs(tables_blob); + + /* DSDT is pointed to by FADT */ + dsdt = tables_blob->len; + build_dsdt(tables_blob, tables->linker, &pm, &misc, + &pci_hole, &pci_hole64, machine); + + /* Count the size of the DSDT and SSDT, we will need it for legacy + * sizing of ACPI tables. + */ + aml_len += tables_blob->len - dsdt; + + /* ACPI tables pointed to by RSDT */ + fadt = tables_blob->len; + acpi_add_table(table_offsets, tables_blob); + pm.fadt.facs_tbl_offset = &facs; + pm.fadt.dsdt_tbl_offset = &dsdt; + pm.fadt.xdsdt_tbl_offset = &dsdt; + build_fadt(tables_blob, tables->linker, &pm.fadt, oem_id, oem_table_id); + aml_len += tables_blob->len - fadt; + + acpi_add_table(table_offsets, tables_blob); + acpi_build_madt(tables_blob, tables->linker, x86ms, + ACPI_DEVICE_IF(x86ms->acpi_dev), x86ms->oem_id, + x86ms->oem_table_id); + + vmgenid_dev = find_vmgenid_dev(); + if (vmgenid_dev) { + acpi_add_table(table_offsets, tables_blob); + vmgenid_build_acpi(VMGENID(vmgenid_dev), tables_blob, + tables->vmgenid, tables->linker, x86ms->oem_id); + } + + if (misc.has_hpet) { + acpi_add_table(table_offsets, tables_blob); + build_hpet(tables_blob, tables->linker, x86ms->oem_id, + x86ms->oem_table_id); + } +#ifdef CONFIG_TPM + if (misc.tpm_version != TPM_VERSION_UNSPEC) { + if (misc.tpm_version == TPM_VERSION_1_2) { + acpi_add_table(table_offsets, tables_blob); + build_tpm_tcpa(tables_blob, tables->linker, tables->tcpalog, + x86ms->oem_id, x86ms->oem_table_id); + } else { /* TPM_VERSION_2_0 */ + acpi_add_table(table_offsets, tables_blob); + build_tpm2(tables_blob, tables->linker, tables->tcpalog, + x86ms->oem_id, x86ms->oem_table_id); + } + } +#endif + if (machine->numa_state->num_nodes) { + acpi_add_table(table_offsets, tables_blob); + build_srat(tables_blob, tables->linker, machine); + if (machine->numa_state->have_numa_distance) { + acpi_add_table(table_offsets, tables_blob); + build_slit(tables_blob, tables->linker, machine, x86ms->oem_id, + x86ms->oem_table_id); + } + if (machine->numa_state->hmat_enabled) { + acpi_add_table(table_offsets, tables_blob); + build_hmat(tables_blob, tables->linker, machine->numa_state, + x86ms->oem_id, x86ms->oem_table_id); + } + } + if (acpi_get_mcfg(&mcfg)) { + acpi_add_table(table_offsets, tables_blob); + build_mcfg(tables_blob, tables->linker, &mcfg, x86ms->oem_id, + x86ms->oem_table_id); + } + if (object_dynamic_cast(OBJECT(iommu), TYPE_AMD_IOMMU_DEVICE)) { + acpi_add_table(table_offsets, tables_blob); + build_amd_iommu(tables_blob, tables->linker, x86ms->oem_id, + x86ms->oem_table_id); + } else if (object_dynamic_cast(OBJECT(iommu), TYPE_INTEL_IOMMU_DEVICE)) { + acpi_add_table(table_offsets, tables_blob); + build_dmar_q35(tables_blob, tables->linker, x86ms->oem_id, + x86ms->oem_table_id); + } else if (object_dynamic_cast(OBJECT(iommu), TYPE_VIRTIO_IOMMU_PCI)) { + PCIDevice *pdev = PCI_DEVICE(iommu); + + acpi_add_table(table_offsets, tables_blob); + build_viot(machine, tables_blob, tables->linker, pci_get_bdf(pdev), + x86ms->oem_id, x86ms->oem_table_id); + } + if (machine->nvdimms_state->is_enabled) { + nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, + machine->nvdimms_state, machine->ram_slots, + x86ms->oem_id, x86ms->oem_table_id); + } + + acpi_add_table(table_offsets, tables_blob); + build_waet(tables_blob, tables->linker, x86ms->oem_id, x86ms->oem_table_id); + + /* Add tables supplied by user (if any) */ + for (u = acpi_table_first(); u; u = acpi_table_next(u)) { + unsigned len = acpi_table_len(u); + + acpi_add_table(table_offsets, tables_blob); + g_array_append_vals(tables_blob, u, len); + } + + /* RSDT is pointed to by RSDP */ + rsdt = tables_blob->len; + build_rsdt(tables_blob, tables->linker, table_offsets, + oem_id, oem_table_id); + + /* RSDP is in FSEG memory, so allocate it separately */ + { + AcpiRsdpData rsdp_data = { + .revision = 0, + .oem_id = x86ms->oem_id, + .xsdt_tbl_offset = NULL, + .rsdt_tbl_offset = &rsdt, + }; + build_rsdp(tables->rsdp, tables->linker, &rsdp_data); + if (!pcmc->rsdp_in_ram) { + /* We used to allocate some extra space for RSDP revision 2 but + * only used the RSDP revision 0 space. The extra bytes were + * zeroed out and not used. + * Here we continue wasting those extra 16 bytes to make sure we + * don't break migration for machine types 2.2 and older due to + * RSDP blob size mismatch. + */ + build_append_int_noprefix(tables->rsdp, 0, 16); + } + } + + /* We'll expose it all to Guest so we want to reduce + * chance of size changes. + * + * We used to align the tables to 4k, but of course this would + * too simple to be enough. 4k turned out to be too small an + * alignment very soon, and in fact it is almost impossible to + * keep the table size stable for all (max_cpus, max_memory_slots) + * combinations. So the table size is always 64k for pc-i440fx-2.1 + * and we give an error if the table grows beyond that limit. + * + * We still have the problem of migrating from "-M pc-i440fx-2.0". For + * that, we exploit the fact that QEMU 2.1 generates _smaller_ tables + * than 2.0 and we can always pad the smaller tables with zeros. We can + * then use the exact size of the 2.0 tables. + * + * All this is for PIIX4, since QEMU 2.0 didn't support Q35 migration. + */ + if (pcmc->legacy_acpi_table_size) { + /* Subtracting aml_len gives the size of fixed tables. Then add the + * size of the PIIX4 DSDT/SSDT in QEMU 2.0. + */ + int legacy_aml_len = + pcmc->legacy_acpi_table_size + + ACPI_BUILD_LEGACY_CPU_AML_SIZE * x86ms->apic_id_limit; + int legacy_table_size = + ROUND_UP(tables_blob->len - aml_len + legacy_aml_len, + ACPI_BUILD_ALIGN_SIZE); + if (tables_blob->len > legacy_table_size) { + /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */ + warn_report("ACPI table size %u exceeds %d bytes," + " migration may not work", + tables_blob->len, legacy_table_size); + error_printf("Try removing CPUs, NUMA nodes, memory slots" + " or PCI bridges."); + } + g_array_set_size(tables_blob, legacy_table_size); + } else { + /* Make sure we have a buffer in case we need to resize the tables. */ + if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { + /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */ + warn_report("ACPI table size %u exceeds %d bytes," + " migration may not work", + tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); + error_printf("Try removing CPUs, NUMA nodes, memory slots" + " or PCI bridges."); + } + acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE); + } + + acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE); + + /* Cleanup memory that's no longer used. */ + g_array_free(table_offsets, true); +} + +static void acpi_ram_update(MemoryRegion *mr, GArray *data) +{ + uint32_t size = acpi_data_len(data); + + /* Make sure RAM size is correct - in case it got changed e.g. by migration */ + memory_region_ram_resize(mr, size, &error_abort); + + memcpy(memory_region_get_ram_ptr(mr), data->data, size); + memory_region_set_dirty(mr, 0, size); +} + +static void acpi_build_update(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + AcpiBuildTables tables; + + /* No state to update or already patched? Nothing to do. */ + if (!build_state || build_state->patched) { + return; + } + build_state->patched = 1; + + acpi_build_tables_init(&tables); + + acpi_build(&tables, MACHINE(qdev_get_machine())); + + acpi_ram_update(build_state->table_mr, tables.table_data); + + if (build_state->rsdp) { + memcpy(build_state->rsdp, tables.rsdp->data, acpi_data_len(tables.rsdp)); + } else { + acpi_ram_update(build_state->rsdp_mr, tables.rsdp); + } + + acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob); + acpi_build_tables_cleanup(&tables, true); +} + +static void acpi_build_reset(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + build_state->patched = 0; +} + +static const VMStateDescription vmstate_acpi_build = { + .name = "acpi_build", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(patched, AcpiBuildState), + VMSTATE_END_OF_LIST() + }, +}; + +void acpi_setup(void) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(pcms); + AcpiBuildTables tables; + AcpiBuildState *build_state; + Object *vmgenid_dev; +#ifdef CONFIG_TPM + TPMIf *tpm; + static FwCfgTPMConfig tpm_config; +#endif + + if (!x86ms->fw_cfg) { + ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n"); + return; + } + + if (!pcms->acpi_build_enabled) { + ACPI_BUILD_DPRINTF("ACPI build disabled. Bailing out.\n"); + return; + } + + if (!x86_machine_is_acpi_enabled(X86_MACHINE(pcms))) { + ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n"); + return; + } + + build_state = g_malloc0(sizeof *build_state); + + acpi_build_tables_init(&tables); + acpi_build(&tables, MACHINE(pcms)); + + /* Now expose it all to Guest */ + build_state->table_mr = acpi_add_rom_blob(acpi_build_update, + build_state, tables.table_data, + ACPI_BUILD_TABLE_FILE); + assert(build_state->table_mr != NULL); + + build_state->linker_mr = + acpi_add_rom_blob(acpi_build_update, build_state, + tables.linker->cmd_blob, ACPI_BUILD_LOADER_FILE); + +#ifdef CONFIG_TPM + fw_cfg_add_file(x86ms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, + tables.tcpalog->data, acpi_data_len(tables.tcpalog)); + + tpm = tpm_find(); + if (tpm && object_property_get_bool(OBJECT(tpm), "ppi", &error_abort)) { + tpm_config = (FwCfgTPMConfig) { + .tpmppi_address = cpu_to_le32(TPM_PPI_ADDR_BASE), + .tpm_version = tpm_get_version(tpm), + .tpmppi_version = TPM_PPI_VERSION_1_30 + }; + fw_cfg_add_file(x86ms->fw_cfg, "etc/tpm/config", + &tpm_config, sizeof tpm_config); + } +#endif + + vmgenid_dev = find_vmgenid_dev(); + if (vmgenid_dev) { + vmgenid_add_fw_cfg(VMGENID(vmgenid_dev), x86ms->fw_cfg, + tables.vmgenid); + } + + if (!pcmc->rsdp_in_ram) { + /* + * Keep for compatibility with old machine types. + * Though RSDP is small, its contents isn't immutable, so + * we'll update it along with the rest of tables on guest access. + */ + uint32_t rsdp_size = acpi_data_len(tables.rsdp); + + build_state->rsdp = g_memdup(tables.rsdp->data, rsdp_size); + fw_cfg_add_file_callback(x86ms->fw_cfg, ACPI_BUILD_RSDP_FILE, + acpi_build_update, NULL, build_state, + build_state->rsdp, rsdp_size, true); + build_state->rsdp_mr = NULL; + } else { + build_state->rsdp = NULL; + build_state->rsdp_mr = acpi_add_rom_blob(acpi_build_update, + build_state, tables.rsdp, + ACPI_BUILD_RSDP_FILE); + } + + qemu_register_reset(acpi_build_reset, build_state); + acpi_build_reset(build_state); + vmstate_register(NULL, 0, &vmstate_acpi_build, build_state); + + /* Cleanup tables but don't free the memory: we track it + * in build_state. + */ + acpi_build_tables_cleanup(&tables, false); +} diff --git a/hw/i386/acpi-build.h b/hw/i386/acpi-build.h new file mode 100644 index 000000000..0dce155c8 --- /dev/null +++ b/hw/i386/acpi-build.h @@ -0,0 +1,15 @@ + +#ifndef HW_I386_ACPI_BUILD_H +#define HW_I386_ACPI_BUILD_H +#include "hw/acpi/acpi-defs.h" + +extern const struct AcpiGenericAddress x86_nvdimm_acpi_dsmio; + +/* PCI Hot-plug registers bases. See docs/spec/acpi_pci_hotplug.txt */ +#define ACPI_PCIHP_SEJ_BASE 0x8 +#define ACPI_PCIHP_BNMR_BASE 0x10 + +void acpi_setup(void); +Object *acpi_get_i386_pci_host(void); + +#endif diff --git a/hw/i386/acpi-common.c b/hw/i386/acpi-common.c new file mode 100644 index 000000000..4aaafbdd7 --- /dev/null +++ b/hw/i386/acpi-common.c @@ -0,0 +1,165 @@ +/* Support for generating ACPI tables and passing them to Guests + * + * Copyright (C) 2008-2010 Kevin O'Connor + * Copyright (C) 2006 Fabrice Bellard + * Copyright (C) 2013 Red Hat Inc + * + * Author: Michael S. Tsirkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" + +#include "exec/memory.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/utils.h" +#include "hw/i386/pc.h" +#include "target/i386/cpu.h" + +#include "acpi-build.h" +#include "acpi-common.h" + +void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled) +{ + uint32_t apic_id = apic_ids->cpus[uid].arch_id; + /* Flags – Local APIC Flags */ + uint32_t flags = apic_ids->cpus[uid].cpu != NULL || force_enabled ? + 1 /* Enabled */ : 0; + + /* ACPI spec says that LAPIC entry for non present + * CPU may be omitted from MADT or it must be marked + * as disabled. However omitting non present CPU from + * MADT breaks hotplug on linux. So possible CPUs + * should be put in MADT but kept disabled. + */ + if (apic_id < 255) { + /* Rev 1.0b, Table 5-13 Processor Local APIC Structure */ + build_append_int_noprefix(entry, 0, 1); /* Type */ + build_append_int_noprefix(entry, 8, 1); /* Length */ + build_append_int_noprefix(entry, uid, 1); /* ACPI Processor ID */ + build_append_int_noprefix(entry, apic_id, 1); /* APIC ID */ + build_append_int_noprefix(entry, flags, 4); /* Flags */ + } else { + /* Rev 4.0, 5.2.12.12 Processor Local x2APIC Structure */ + build_append_int_noprefix(entry, 9, 1); /* Type */ + build_append_int_noprefix(entry, 16, 1); /* Length */ + build_append_int_noprefix(entry, 0, 2); /* Reserved */ + build_append_int_noprefix(entry, apic_id, 4); /* X2APIC ID */ + build_append_int_noprefix(entry, flags, 4); /* Flags */ + build_append_int_noprefix(entry, uid, 4); /* ACPI Processor UID */ + } +} + +static void build_ioapic(GArray *entry, uint8_t id, uint32_t addr, uint32_t irq) +{ + /* Rev 1.0b, 5.2.8.2 IO APIC */ + build_append_int_noprefix(entry, 1, 1); /* Type */ + build_append_int_noprefix(entry, 12, 1); /* Length */ + build_append_int_noprefix(entry, id, 1); /* IO APIC ID */ + build_append_int_noprefix(entry, 0, 1); /* Reserved */ + build_append_int_noprefix(entry, addr, 4); /* IO APIC Address */ + build_append_int_noprefix(entry, irq, 4); /* System Vector Base */ +} + +static void +build_xrupt_override(GArray *entry, uint8_t src, uint32_t gsi, uint16_t flags) +{ + /* Rev 1.0b, 5.2.8.3.1 Interrupt Source Overrides */ + build_append_int_noprefix(entry, 2, 1); /* Type */ + build_append_int_noprefix(entry, 10, 1); /* Length */ + build_append_int_noprefix(entry, 0, 1); /* Bus */ + build_append_int_noprefix(entry, src, 1); /* Source */ + /* Global System Interrupt Vector */ + build_append_int_noprefix(entry, gsi, 4); + build_append_int_noprefix(entry, flags, 2); /* Flags */ +} + +/* + * ACPI spec, Revision 1.0b + * 5.2.8 Multiple APIC Description Table + */ +void acpi_build_madt(GArray *table_data, BIOSLinker *linker, + X86MachineState *x86ms, AcpiDeviceIf *adev, + const char *oem_id, const char *oem_table_id) +{ + int i; + bool x2apic_mode = false; + MachineClass *mc = MACHINE_GET_CLASS(x86ms); + const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(x86ms)); + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(adev); + AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + /* Local APIC Address */ + build_append_int_noprefix(table_data, APIC_DEFAULT_ADDRESS, 4); + build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */ + + for (i = 0; i < apic_ids->len; i++) { + adevc->madt_cpu(adev, i, apic_ids, table_data, false); + if (apic_ids->cpus[i].arch_id > 254) { + x2apic_mode = true; + } + } + + build_ioapic(table_data, ACPI_BUILD_IOAPIC_ID, IO_APIC_DEFAULT_ADDRESS, 0); + if (x86ms->ioapic2) { + build_ioapic(table_data, ACPI_BUILD_IOAPIC_ID + 1, + IO_APIC_SECONDARY_ADDRESS, IO_APIC_SECONDARY_IRQBASE); + } + + if (x86ms->apic_xrupt_override) { + build_xrupt_override(table_data, 0, 2, + 0 /* Flags: Conforms to the specifications of the bus */); + } + + for (i = 1; i < 16; i++) { + if (!(x86ms->pci_irq_mask & (1 << i))) { + /* No need for a INT source override structure. */ + continue; + } + build_xrupt_override(table_data, i, i, + 0xd /* Flags: Active high, Level Triggered */); + } + + if (x2apic_mode) { + /* Rev 4.0, 5.2.12.13 Local x2APIC NMI Structure*/ + build_append_int_noprefix(table_data, 0xA, 1); /* Type */ + build_append_int_noprefix(table_data, 12, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Flags */ + /* ACPI Processor UID */ + build_append_int_noprefix(table_data, 0xFFFFFFFF /* all processors */, + 4); + /* Local x2APIC LINT# */ + build_append_int_noprefix(table_data, 1 /* ACPI_LINT1 */, 1); + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ + } else { + /* Rev 1.0b, 5.2.8.3.3 Local APIC NMI */ + build_append_int_noprefix(table_data, 4, 1); /* Type */ + build_append_int_noprefix(table_data, 6, 1); /* Length */ + /* ACPI Processor ID */ + build_append_int_noprefix(table_data, 0xFF /* all processors */, 1); + build_append_int_noprefix(table_data, 0, 2); /* Flags */ + /* Local APIC INTI# */ + build_append_int_noprefix(table_data, 1 /* ACPI_LINT1 */, 1); + } + + acpi_table_end(linker, &table); +} + diff --git a/hw/i386/acpi-common.h b/hw/i386/acpi-common.h new file mode 100644 index 000000000..a68825acf --- /dev/null +++ b/hw/i386/acpi-common.h @@ -0,0 +1,15 @@ +#ifndef HW_I386_ACPI_COMMON_H +#define HW_I386_ACPI_COMMON_H + +#include "hw/acpi/acpi_dev_interface.h" +#include "hw/acpi/bios-linker-loader.h" +#include "hw/i386/x86.h" + +/* Default IOAPIC ID */ +#define ACPI_BUILD_IOAPIC_ID 0x0 + +void acpi_build_madt(GArray *table_data, BIOSLinker *linker, + X86MachineState *x86ms, AcpiDeviceIf *adev, + const char *oem_id, const char *oem_table_id); + +#endif diff --git a/hw/i386/acpi-microvm.c b/hw/i386/acpi-microvm.c new file mode 100644 index 000000000..196d31849 --- /dev/null +++ b/hw/i386/acpi-microvm.c @@ -0,0 +1,258 @@ +/* Support for generating ACPI tables and passing them to Guests + * + * Copyright (C) 2008-2010 Kevin O'Connor + * Copyright (C) 2006 Fabrice Bellard + * Copyright (C) 2013 Red Hat Inc + * + * Author: Michael S. Tsirkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qapi/error.h" + +#include "exec/memory.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/bios-linker-loader.h" +#include "hw/acpi/generic_event_device.h" +#include "hw/acpi/utils.h" +#include "hw/i386/fw_cfg.h" +#include "hw/i386/microvm.h" +#include "hw/pci/pci.h" +#include "hw/pci/pcie_host.h" +#include "hw/usb/xhci.h" +#include "hw/virtio/virtio-mmio.h" + +#include "acpi-common.h" +#include "acpi-microvm.h" + +static void acpi_dsdt_add_virtio(Aml *scope, + MicrovmMachineState *mms) +{ + gchar *separator; + long int index; + BusState *bus; + BusChild *kid; + + bus = sysbus_get_default(); + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + Object *obj = object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MMIO); + + if (obj) { + VirtIOMMIOProxy *mmio = VIRTIO_MMIO(obj); + VirtioBusState *mmio_virtio_bus = &mmio->bus; + BusState *mmio_bus = &mmio_virtio_bus->parent_obj; + + if (QTAILQ_EMPTY(&mmio_bus->children)) { + continue; + } + separator = g_strrstr(mmio_bus->name, "."); + if (!separator) { + continue; + } + if (qemu_strtol(separator + 1, NULL, 10, &index) != 0) { + continue; + } + + uint32_t irq = mms->virtio_irq_base + index; + hwaddr base = VIRTIO_MMIO_BASE + index * 512; + hwaddr size = 512; + + Aml *dev = aml_device("VR%02u", (unsigned)index); + aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005"))); + aml_append(dev, aml_name_decl("_UID", aml_int(index))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE)); + aml_append(crs, + aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + } + } +} + +static void acpi_dsdt_add_xhci(Aml *scope, MicrovmMachineState *mms) +{ + if (machine_usb(MACHINE(mms))) { + xhci_sysbus_build_aml(scope, MICROVM_XHCI_BASE, MICROVM_XHCI_IRQ); + } +} + +static void acpi_dsdt_add_pci(Aml *scope, MicrovmMachineState *mms) +{ + if (mms->pcie != ON_OFF_AUTO_ON) { + return; + } + + acpi_dsdt_add_gpex(scope, &mms->gpex); +} + +static void +build_dsdt_microvm(GArray *table_data, BIOSLinker *linker, + MicrovmMachineState *mms) +{ + X86MachineState *x86ms = X86_MACHINE(mms); + Aml *dsdt, *sb_scope, *scope, *pkg; + bool ambiguous; + Object *isabus; + AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = x86ms->oem_id, + .oem_table_id = x86ms->oem_table_id }; + + isabus = object_resolve_path_type("", TYPE_ISA_BUS, &ambiguous); + assert(isabus); + assert(!ambiguous); + + acpi_table_begin(&table, table_data); + dsdt = init_aml_allocator(); + + sb_scope = aml_scope("_SB"); + fw_cfg_add_acpi_dsdt(sb_scope, x86ms->fw_cfg); + isa_build_aml(ISA_BUS(isabus), sb_scope); + build_ged_aml(sb_scope, GED_DEVICE, x86ms->acpi_dev, + GED_MMIO_IRQ, AML_SYSTEM_MEMORY, GED_MMIO_BASE); + acpi_dsdt_add_power_button(sb_scope); + acpi_dsdt_add_virtio(sb_scope, mms); + acpi_dsdt_add_xhci(sb_scope, mms); + acpi_dsdt_add_pci(sb_scope, mms); + aml_append(dsdt, sb_scope); + + /* ACPI 5.0: Table 7-209 System State Package */ + scope = aml_scope("\\"); + pkg = aml_package(4); + aml_append(pkg, aml_int(ACPI_GED_SLP_TYP_S5)); + aml_append(pkg, aml_int(0)); /* ignored */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(scope, aml_name_decl("_S5", pkg)); + aml_append(dsdt, scope); + + /* copy AML bytecode into ACPI tables blob */ + g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); + + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +static void acpi_build_microvm(AcpiBuildTables *tables, + MicrovmMachineState *mms) +{ + MachineState *machine = MACHINE(mms); + X86MachineState *x86ms = X86_MACHINE(mms); + GArray *table_offsets; + GArray *tables_blob = tables->table_data; + unsigned dsdt, xsdt; + AcpiFadtData pmfadt = { + /* ACPI 5.0: 4.1 Hardware-Reduced ACPI */ + .rev = 5, + .flags = ((1 << ACPI_FADT_F_HW_REDUCED_ACPI) | + (1 << ACPI_FADT_F_RESET_REG_SUP)), + + /* ACPI 5.0: 4.8.3.7 Sleep Control and Status Registers */ + .sleep_ctl = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = GED_MMIO_BASE_REGS + ACPI_GED_REG_SLEEP_CTL, + }, + .sleep_sts = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = GED_MMIO_BASE_REGS + ACPI_GED_REG_SLEEP_STS, + }, + + /* ACPI 5.0: 4.8.3.6 Reset Register */ + .reset_reg = { + .space_id = AML_AS_SYSTEM_MEMORY, + .bit_width = 8, + .address = GED_MMIO_BASE_REGS + ACPI_GED_REG_RESET, + }, + .reset_val = ACPI_GED_RESET_VALUE, + }; + + table_offsets = g_array_new(false, true /* clear */, + sizeof(uint32_t)); + bios_linker_loader_alloc(tables->linker, + ACPI_BUILD_TABLE_FILE, tables_blob, + 64 /* Ensure FACS is aligned */, + false /* high memory */); + + dsdt = tables_blob->len; + build_dsdt_microvm(tables_blob, tables->linker, mms); + + pmfadt.dsdt_tbl_offset = &dsdt; + pmfadt.xdsdt_tbl_offset = &dsdt; + acpi_add_table(table_offsets, tables_blob); + build_fadt(tables_blob, tables->linker, &pmfadt, x86ms->oem_id, + x86ms->oem_table_id); + + acpi_add_table(table_offsets, tables_blob); + acpi_build_madt(tables_blob, tables->linker, X86_MACHINE(machine), + ACPI_DEVICE_IF(x86ms->acpi_dev), x86ms->oem_id, + x86ms->oem_table_id); + + xsdt = tables_blob->len; + build_xsdt(tables_blob, tables->linker, table_offsets, x86ms->oem_id, + x86ms->oem_table_id); + + /* RSDP is in FSEG memory, so allocate it separately */ + { + AcpiRsdpData rsdp_data = { + /* ACPI 2.0: 5.2.4.3 RSDP Structure */ + .revision = 2, /* xsdt needs v2 */ + .oem_id = x86ms->oem_id, + .xsdt_tbl_offset = &xsdt, + .rsdt_tbl_offset = NULL, + }; + build_rsdp(tables->rsdp, tables->linker, &rsdp_data); + } + + /* Cleanup memory that's no longer used. */ + g_array_free(table_offsets, true); +} + +static void acpi_build_no_update(void *build_opaque) +{ + /* nothing, microvm tables don't change at runtime */ +} + +void acpi_setup_microvm(MicrovmMachineState *mms) +{ + X86MachineState *x86ms = X86_MACHINE(mms); + AcpiBuildTables tables; + + assert(x86ms->fw_cfg); + + if (!x86_machine_is_acpi_enabled(x86ms)) { + return; + } + + acpi_build_tables_init(&tables); + acpi_build_microvm(&tables, mms); + + /* Now expose it all to Guest */ + acpi_add_rom_blob(acpi_build_no_update, NULL, tables.table_data, + ACPI_BUILD_TABLE_FILE); + acpi_add_rom_blob(acpi_build_no_update, NULL, tables.linker->cmd_blob, + ACPI_BUILD_LOADER_FILE); + acpi_add_rom_blob(acpi_build_no_update, NULL, tables.rsdp, + ACPI_BUILD_RSDP_FILE); + + acpi_build_tables_cleanup(&tables, false); +} diff --git a/hw/i386/acpi-microvm.h b/hw/i386/acpi-microvm.h new file mode 100644 index 000000000..dfe853690 --- /dev/null +++ b/hw/i386/acpi-microvm.h @@ -0,0 +1,8 @@ +#ifndef HW_I386_ACPI_MICROVM_H +#define HW_I386_ACPI_MICROVM_H + +#include "hw/i386/microvm.h" + +void acpi_setup_microvm(MicrovmMachineState *mms); + +#endif diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c new file mode 100644 index 000000000..91fe34ae5 --- /dev/null +++ b/hw/i386/amd_iommu.c @@ -0,0 +1,1662 @@ +/* + * QEMU emulation of AMD IOMMU (AMD-Vi) + * + * Copyright (C) 2011 Eduard - Gabriel Munteanu + * Copyright (C) 2015, 2016 David Kiarie Kahurani + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Cache implementation inspired by hw/i386/intel_iommu.c + */ + +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/pci/msi.h" +#include "hw/pci/pci_bus.h" +#include "migration/vmstate.h" +#include "amd_iommu.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/i386/apic_internal.h" +#include "trace.h" +#include "hw/i386/apic-msidef.h" + +/* used AMD-Vi MMIO registers */ +const char *amdvi_mmio_low[] = { + "AMDVI_MMIO_DEVTAB_BASE", + "AMDVI_MMIO_CMDBUF_BASE", + "AMDVI_MMIO_EVTLOG_BASE", + "AMDVI_MMIO_CONTROL", + "AMDVI_MMIO_EXCL_BASE", + "AMDVI_MMIO_EXCL_LIMIT", + "AMDVI_MMIO_EXT_FEATURES", + "AMDVI_MMIO_PPR_BASE", + "UNHANDLED" +}; +const char *amdvi_mmio_high[] = { + "AMDVI_MMIO_COMMAND_HEAD", + "AMDVI_MMIO_COMMAND_TAIL", + "AMDVI_MMIO_EVTLOG_HEAD", + "AMDVI_MMIO_EVTLOG_TAIL", + "AMDVI_MMIO_STATUS", + "AMDVI_MMIO_PPR_HEAD", + "AMDVI_MMIO_PPR_TAIL", + "UNHANDLED" +}; + +struct AMDVIAddressSpace { + uint8_t bus_num; /* bus number */ + uint8_t devfn; /* device function */ + AMDVIState *iommu_state; /* AMDVI - one per machine */ + MemoryRegion root; /* AMDVI Root memory map region */ + IOMMUMemoryRegion iommu; /* Device's address translation region */ + MemoryRegion iommu_ir; /* Device's interrupt remapping region */ + AddressSpace as; /* device's corresponding address space */ +}; + +/* AMDVI cache entry */ +typedef struct AMDVIIOTLBEntry { + uint16_t domid; /* assigned domain id */ + uint16_t devid; /* device owning entry */ + uint64_t perms; /* access permissions */ + uint64_t translated_addr; /* translated address */ + uint64_t page_mask; /* physical page size */ +} AMDVIIOTLBEntry; + +/* configure MMIO registers at startup/reset */ +static void amdvi_set_quad(AMDVIState *s, hwaddr addr, uint64_t val, + uint64_t romask, uint64_t w1cmask) +{ + stq_le_p(&s->mmior[addr], val); + stq_le_p(&s->romask[addr], romask); + stq_le_p(&s->w1cmask[addr], w1cmask); +} + +static uint16_t amdvi_readw(AMDVIState *s, hwaddr addr) +{ + return lduw_le_p(&s->mmior[addr]); +} + +static uint32_t amdvi_readl(AMDVIState *s, hwaddr addr) +{ + return ldl_le_p(&s->mmior[addr]); +} + +static uint64_t amdvi_readq(AMDVIState *s, hwaddr addr) +{ + return ldq_le_p(&s->mmior[addr]); +} + +/* internal write */ +static void amdvi_writeq_raw(AMDVIState *s, hwaddr addr, uint64_t val) +{ + stq_le_p(&s->mmior[addr], val); +} + +/* external write */ +static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val) +{ + uint16_t romask = lduw_le_p(&s->romask[addr]); + uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]); + uint16_t oldval = lduw_le_p(&s->mmior[addr]); + stw_le_p(&s->mmior[addr], + ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); +} + +static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val) +{ + uint32_t romask = ldl_le_p(&s->romask[addr]); + uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); + uint32_t oldval = ldl_le_p(&s->mmior[addr]); + stl_le_p(&s->mmior[addr], + ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); +} + +static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val) +{ + uint64_t romask = ldq_le_p(&s->romask[addr]); + uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); + uint32_t oldval = ldq_le_p(&s->mmior[addr]); + stq_le_p(&s->mmior[addr], + ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); +} + +/* OR a 64-bit register with a 64-bit value */ +static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val) +{ + return amdvi_readq(s, addr) | val; +} + +/* OR a 64-bit register with a 64-bit value storing result in the register */ +static void amdvi_assign_orq(AMDVIState *s, hwaddr addr, uint64_t val) +{ + amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) | val); +} + +/* AND a 64-bit register with a 64-bit value storing result in the register */ +static void amdvi_assign_andq(AMDVIState *s, hwaddr addr, uint64_t val) +{ + amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) & val); +} + +static void amdvi_generate_msi_interrupt(AMDVIState *s) +{ + MSIMessage msg = {}; + MemTxAttrs attrs = { + .requester_id = pci_requester_id(&s->pci.dev) + }; + + if (msi_enabled(&s->pci.dev)) { + msg = msi_get_message(&s->pci.dev, 0); + address_space_stl_le(&address_space_memory, msg.address, msg.data, + attrs, NULL); + } +} + +static void amdvi_log_event(AMDVIState *s, uint64_t *evt) +{ + /* event logging not enabled */ + if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS, + AMDVI_MMIO_STATUS_EVT_OVF)) { + return; + } + + /* event log buffer full */ + if (s->evtlog_tail >= s->evtlog_len) { + amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF); + /* generate interrupt */ + amdvi_generate_msi_interrupt(s); + return; + } + + if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail, + evt, AMDVI_EVENT_LEN)) { + trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail); + } + + s->evtlog_tail += AMDVI_EVENT_LEN; + amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); + amdvi_generate_msi_interrupt(s); +} + +static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start, + int length) +{ + int index = start / 64, bitpos = start % 64; + uint64_t mask = MAKE_64BIT_MASK(start, length); + buffer[index] &= ~mask; + buffer[index] |= (value << bitpos) & mask; +} +/* + * AMDVi event structure + * 0:15 -> DeviceID + * 55:63 -> event type + miscellaneous info + * 63:127 -> related address + */ +static void amdvi_encode_event(uint64_t *evt, uint16_t devid, uint64_t addr, + uint16_t info) +{ + amdvi_setevent_bits(evt, devid, 0, 16); + amdvi_setevent_bits(evt, info, 55, 8); + amdvi_setevent_bits(evt, addr, 63, 64); +} +/* log an error encountered during a page walk + * + * @addr: virtual address in translation request + */ +static void amdvi_page_fault(AMDVIState *s, uint16_t devid, + hwaddr addr, uint16_t info) +{ + uint64_t evt[4]; + + info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF; + amdvi_encode_event(evt, devid, addr, info); + amdvi_log_event(s, evt); + pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, + PCI_STATUS_SIG_TARGET_ABORT); +} +/* + * log a master abort accessing device table + * @devtab : address of device table entry + * @info : error flags + */ +static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid, + hwaddr devtab, uint16_t info) +{ + uint64_t evt[4]; + + info |= AMDVI_EVENT_DEV_TAB_HW_ERROR; + + amdvi_encode_event(evt, devid, devtab, info); + amdvi_log_event(s, evt); + pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, + PCI_STATUS_SIG_TARGET_ABORT); +} +/* log an event trying to access command buffer + * @addr : address that couldn't be accessed + */ +static void amdvi_log_command_error(AMDVIState *s, hwaddr addr) +{ + uint64_t evt[4], info = AMDVI_EVENT_COMMAND_HW_ERROR; + + amdvi_encode_event(evt, 0, addr, info); + amdvi_log_event(s, evt); + pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, + PCI_STATUS_SIG_TARGET_ABORT); +} +/* log an illegal comand event + * @addr : address of illegal command + */ +static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info, + hwaddr addr) +{ + uint64_t evt[4]; + + info |= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR; + amdvi_encode_event(evt, 0, addr, info); + amdvi_log_event(s, evt); +} +/* log an error accessing device table + * + * @devid : device owning the table entry + * @devtab : address of device table entry + * @info : error flags + */ +static void amdvi_log_illegaldevtab_error(AMDVIState *s, uint16_t devid, + hwaddr addr, uint16_t info) +{ + uint64_t evt[4]; + + info |= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY; + amdvi_encode_event(evt, devid, addr, info); + amdvi_log_event(s, evt); +} +/* log an error accessing a PTE entry + * @addr : address that couldn't be accessed + */ +static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid, + hwaddr addr, uint16_t info) +{ + uint64_t evt[4]; + + info |= AMDVI_EVENT_PAGE_TAB_HW_ERROR; + amdvi_encode_event(evt, devid, addr, info); + amdvi_log_event(s, evt); + pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, + PCI_STATUS_SIG_TARGET_ABORT); +} + +static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2) +{ + return *((const uint64_t *)v1) == *((const uint64_t *)v2); +} + +static guint amdvi_uint64_hash(gconstpointer v) +{ + return (guint)*(const uint64_t *)v; +} + +static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr, + uint64_t devid) +{ + uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) | + ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); + return g_hash_table_lookup(s->iotlb, &key); +} + +static void amdvi_iotlb_reset(AMDVIState *s) +{ + assert(s->iotlb); + trace_amdvi_iotlb_reset(); + g_hash_table_remove_all(s->iotlb); +} + +static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value, + gpointer user_data) +{ + AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value; + uint16_t devid = *(uint16_t *)user_data; + return entry->devid == devid; +} + +static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr, + uint64_t devid) +{ + uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) | + ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); + g_hash_table_remove(s->iotlb, &key); +} + +static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid, + uint64_t gpa, IOMMUTLBEntry to_cache, + uint16_t domid) +{ + AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1); + uint64_t *key = g_new(uint64_t, 1); + uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K; + + /* don't cache erroneous translations */ + if (to_cache.perm != IOMMU_NONE) { + trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid), + PCI_FUNC(devid), gpa, to_cache.translated_addr); + + if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) { + amdvi_iotlb_reset(s); + } + + entry->domid = domid; + entry->perms = to_cache.perm; + entry->translated_addr = to_cache.translated_addr; + entry->page_mask = to_cache.addr_mask; + *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); + g_hash_table_replace(s->iotlb, key, entry); + } +} + +static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd) +{ + /* pad the last 3 bits */ + hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3; + uint64_t data = cpu_to_le64(cmd[1]); + + if (extract64(cmd[0], 52, 8)) { + amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), + s->cmdbuf + s->cmdbuf_head); + } + if (extract64(cmd[0], 0, 1)) { + if (dma_memory_write(&address_space_memory, addr, &data, + AMDVI_COMPLETION_DATA_SIZE)) { + trace_amdvi_completion_wait_fail(addr); + } + } + /* set completion interrupt */ + if (extract64(cmd[0], 1, 1)) { + amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); + /* generate interrupt */ + amdvi_generate_msi_interrupt(s); + } + trace_amdvi_completion_wait(addr, data); +} + +/* log error without aborting since linux seems to be using reserved bits */ +static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd) +{ + uint16_t devid = cpu_to_le16((uint16_t)extract64(cmd[0], 0, 16)); + + /* This command should invalidate internal caches of which there isn't */ + if (extract64(cmd[0], 16, 44) || cmd[1]) { + amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), + s->cmdbuf + s->cmdbuf_head); + } + trace_amdvi_devtab_inval(PCI_BUS_NUM(devid), PCI_SLOT(devid), + PCI_FUNC(devid)); +} + +static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd) +{ + if (extract64(cmd[0], 16, 16) || extract64(cmd[0], 52, 8) || + extract64(cmd[1], 0, 2) || extract64(cmd[1], 3, 29) + || extract64(cmd[1], 48, 16)) { + amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), + s->cmdbuf + s->cmdbuf_head); + } + trace_amdvi_ppr_exec(); +} + +static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd) +{ + if (extract64(cmd[0], 0, 60) || cmd[1]) { + amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), + s->cmdbuf + s->cmdbuf_head); + } + + amdvi_iotlb_reset(s); + trace_amdvi_all_inval(); +} + +static gboolean amdvi_iotlb_remove_by_domid(gpointer key, gpointer value, + gpointer user_data) +{ + AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value; + uint16_t domid = *(uint16_t *)user_data; + return entry->domid == domid; +} + +/* we don't have devid - we can't remove pages by address */ +static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd) +{ + uint16_t domid = cpu_to_le16((uint16_t)extract64(cmd[0], 32, 16)); + + if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 48, 12) || + extract64(cmd[1], 3, 9)) { + amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), + s->cmdbuf + s->cmdbuf_head); + } + + g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_domid, + &domid); + trace_amdvi_pages_inval(domid); +} + +static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd) +{ + if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 52, 8) || + extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) || + extract64(cmd[1], 5, 7)) { + amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), + s->cmdbuf + s->cmdbuf_head); + } + + trace_amdvi_prefetch_pages(); +} + +static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd) +{ + if (extract64(cmd[0], 16, 44) || cmd[1]) { + amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), + s->cmdbuf + s->cmdbuf_head); + return; + } + + trace_amdvi_intr_inval(); +} + +/* FIXME: Try to work with the specified size instead of all the pages + * when the S bit is on + */ +static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd) +{ + + uint16_t devid = extract64(cmd[0], 0, 16); + if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) || + extract64(cmd[1], 6, 6)) { + amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), + s->cmdbuf + s->cmdbuf_head); + return; + } + + if (extract64(cmd[1], 0, 1)) { + g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_devid, + &devid); + } else { + amdvi_iotlb_remove_page(s, cpu_to_le64(extract64(cmd[1], 12, 52)) << 12, + cpu_to_le16(extract64(cmd[1], 0, 16))); + } + trace_amdvi_iotlb_inval(); +} + +/* not honouring reserved bits is regarded as an illegal command */ +static void amdvi_cmdbuf_exec(AMDVIState *s) +{ + uint64_t cmd[2]; + + if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head, + cmd, AMDVI_COMMAND_SIZE)) { + trace_amdvi_command_read_fail(s->cmdbuf, s->cmdbuf_head); + amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head); + return; + } + + switch (extract64(cmd[0], 60, 4)) { + case AMDVI_CMD_COMPLETION_WAIT: + amdvi_completion_wait(s, cmd); + break; + case AMDVI_CMD_INVAL_DEVTAB_ENTRY: + amdvi_inval_devtab_entry(s, cmd); + break; + case AMDVI_CMD_INVAL_AMDVI_PAGES: + amdvi_inval_pages(s, cmd); + break; + case AMDVI_CMD_INVAL_IOTLB_PAGES: + iommu_inval_iotlb(s, cmd); + break; + case AMDVI_CMD_INVAL_INTR_TABLE: + amdvi_inval_inttable(s, cmd); + break; + case AMDVI_CMD_PREFETCH_AMDVI_PAGES: + amdvi_prefetch_pages(s, cmd); + break; + case AMDVI_CMD_COMPLETE_PPR_REQUEST: + amdvi_complete_ppr(s, cmd); + break; + case AMDVI_CMD_INVAL_AMDVI_ALL: + amdvi_inval_all(s, cmd); + break; + default: + trace_amdvi_unhandled_command(extract64(cmd[1], 60, 4)); + /* log illegal command */ + amdvi_log_illegalcom_error(s, extract64(cmd[1], 60, 4), + s->cmdbuf + s->cmdbuf_head); + } +} + +static void amdvi_cmdbuf_run(AMDVIState *s) +{ + if (!s->cmdbuf_enabled) { + trace_amdvi_command_error(amdvi_readq(s, AMDVI_MMIO_CONTROL)); + return; + } + + /* check if there is work to do. */ + while (s->cmdbuf_head != s->cmdbuf_tail) { + trace_amdvi_command_exec(s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf); + amdvi_cmdbuf_exec(s); + s->cmdbuf_head += AMDVI_COMMAND_SIZE; + amdvi_writeq_raw(s, AMDVI_MMIO_COMMAND_HEAD, s->cmdbuf_head); + + /* wrap head pointer */ + if (s->cmdbuf_head >= s->cmdbuf_len * AMDVI_COMMAND_SIZE) { + s->cmdbuf_head = 0; + } + } +} + +static void amdvi_mmio_trace(hwaddr addr, unsigned size) +{ + uint8_t index = (addr & ~0x2000) / 8; + + if ((addr & 0x2000)) { + /* high table */ + index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index; + trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07); + } else { + index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index; + trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07); + } +} + +static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + AMDVIState *s = opaque; + + uint64_t val = -1; + if (addr + size > AMDVI_MMIO_SIZE) { + trace_amdvi_mmio_read_invalid(AMDVI_MMIO_SIZE, addr, size); + return (uint64_t)-1; + } + + if (size == 2) { + val = amdvi_readw(s, addr); + } else if (size == 4) { + val = amdvi_readl(s, addr); + } else if (size == 8) { + val = amdvi_readq(s, addr); + } + amdvi_mmio_trace(addr, size); + + return val; +} + +static void amdvi_handle_control_write(AMDVIState *s) +{ + unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL); + s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN); + + s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN); + s->evtlog_enabled = s->enabled && !!(control & + AMDVI_MMIO_CONTROL_EVENTLOGEN); + + s->evtlog_intr = !!(control & AMDVI_MMIO_CONTROL_EVENTINTEN); + s->completion_wait_intr = !!(control & AMDVI_MMIO_CONTROL_COMWAITINTEN); + s->cmdbuf_enabled = s->enabled && !!(control & + AMDVI_MMIO_CONTROL_CMDBUFLEN); + s->ga_enabled = !!(control & AMDVI_MMIO_CONTROL_GAEN); + + /* update the flags depending on the control register */ + if (s->cmdbuf_enabled) { + amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_CMDBUF_RUN); + } else { + amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_CMDBUF_RUN); + } + if (s->evtlog_enabled) { + amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_RUN); + } else { + amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_EVT_RUN); + } + + trace_amdvi_control_status(control); + amdvi_cmdbuf_run(s); +} + +static inline void amdvi_handle_devtab_write(AMDVIState *s) + +{ + uint64_t val = amdvi_readq(s, AMDVI_MMIO_DEVICE_TABLE); + s->devtab = (val & AMDVI_MMIO_DEVTAB_BASE_MASK); + + /* set device table length */ + s->devtab_len = ((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1 * + (AMDVI_MMIO_DEVTAB_SIZE_UNIT / + AMDVI_MMIO_DEVTAB_ENTRY_SIZE)); +} + +static inline void amdvi_handle_cmdhead_write(AMDVIState *s) +{ + s->cmdbuf_head = amdvi_readq(s, AMDVI_MMIO_COMMAND_HEAD) + & AMDVI_MMIO_CMDBUF_HEAD_MASK; + amdvi_cmdbuf_run(s); +} + +static inline void amdvi_handle_cmdbase_write(AMDVIState *s) +{ + s->cmdbuf = amdvi_readq(s, AMDVI_MMIO_COMMAND_BASE) + & AMDVI_MMIO_CMDBUF_BASE_MASK; + s->cmdbuf_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_CMDBUF_SIZE_BYTE) + & AMDVI_MMIO_CMDBUF_SIZE_MASK); + s->cmdbuf_head = s->cmdbuf_tail = 0; +} + +static inline void amdvi_handle_cmdtail_write(AMDVIState *s) +{ + s->cmdbuf_tail = amdvi_readq(s, AMDVI_MMIO_COMMAND_TAIL) + & AMDVI_MMIO_CMDBUF_TAIL_MASK; + amdvi_cmdbuf_run(s); +} + +static inline void amdvi_handle_excllim_write(AMDVIState *s) +{ + uint64_t val = amdvi_readq(s, AMDVI_MMIO_EXCL_LIMIT); + s->excl_limit = (val & AMDVI_MMIO_EXCL_LIMIT_MASK) | + AMDVI_MMIO_EXCL_LIMIT_LOW; +} + +static inline void amdvi_handle_evtbase_write(AMDVIState *s) +{ + uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE); + s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK; + s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE) + & AMDVI_MMIO_EVTLOG_SIZE_MASK); +} + +static inline void amdvi_handle_evttail_write(AMDVIState *s) +{ + uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_TAIL); + s->evtlog_tail = val & AMDVI_MMIO_EVTLOG_TAIL_MASK; +} + +static inline void amdvi_handle_evthead_write(AMDVIState *s) +{ + uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_HEAD); + s->evtlog_head = val & AMDVI_MMIO_EVTLOG_HEAD_MASK; +} + +static inline void amdvi_handle_pprbase_write(AMDVIState *s) +{ + uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_BASE); + s->ppr_log = val & AMDVI_MMIO_PPRLOG_BASE_MASK; + s->pprlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_PPRLOG_SIZE_BYTE) + & AMDVI_MMIO_PPRLOG_SIZE_MASK); +} + +static inline void amdvi_handle_pprhead_write(AMDVIState *s) +{ + uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_HEAD); + s->pprlog_head = val & AMDVI_MMIO_PPRLOG_HEAD_MASK; +} + +static inline void amdvi_handle_pprtail_write(AMDVIState *s) +{ + uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_TAIL); + s->pprlog_tail = val & AMDVI_MMIO_PPRLOG_TAIL_MASK; +} + +/* FIXME: something might go wrong if System Software writes in chunks + * of one byte but linux writes in chunks of 4 bytes so currently it + * works correctly with linux but will definitely be busted if software + * reads/writes 8 bytes + */ +static void amdvi_mmio_reg_write(AMDVIState *s, unsigned size, uint64_t val, + hwaddr addr) +{ + if (size == 2) { + amdvi_writew(s, addr, val); + } else if (size == 4) { + amdvi_writel(s, addr, val); + } else if (size == 8) { + amdvi_writeq(s, addr, val); + } +} + +static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + AMDVIState *s = opaque; + unsigned long offset = addr & 0x07; + + if (addr + size > AMDVI_MMIO_SIZE) { + trace_amdvi_mmio_write("error: addr outside region: max ", + (uint64_t)AMDVI_MMIO_SIZE, size, val, offset); + return; + } + + amdvi_mmio_trace(addr, size); + switch (addr & ~0x07) { + case AMDVI_MMIO_CONTROL: + amdvi_mmio_reg_write(s, size, val, addr); + amdvi_handle_control_write(s); + break; + case AMDVI_MMIO_DEVICE_TABLE: + amdvi_mmio_reg_write(s, size, val, addr); + /* set device table address + * This also suffers from inability to tell whether software + * is done writing + */ + if (offset || (size == 8)) { + amdvi_handle_devtab_write(s); + } + break; + case AMDVI_MMIO_COMMAND_HEAD: + amdvi_mmio_reg_write(s, size, val, addr); + amdvi_handle_cmdhead_write(s); + break; + case AMDVI_MMIO_COMMAND_BASE: + amdvi_mmio_reg_write(s, size, val, addr); + /* FIXME - make sure System Software has finished writing incase + * it writes in chucks less than 8 bytes in a robust way.As for + * now, this hacks works for the linux driver + */ + if (offset || (size == 8)) { + amdvi_handle_cmdbase_write(s); + } + break; + case AMDVI_MMIO_COMMAND_TAIL: + amdvi_mmio_reg_write(s, size, val, addr); + amdvi_handle_cmdtail_write(s); + break; + case AMDVI_MMIO_EVENT_BASE: + amdvi_mmio_reg_write(s, size, val, addr); + amdvi_handle_evtbase_write(s); + break; + case AMDVI_MMIO_EVENT_HEAD: + amdvi_mmio_reg_write(s, size, val, addr); + amdvi_handle_evthead_write(s); + break; + case AMDVI_MMIO_EVENT_TAIL: + amdvi_mmio_reg_write(s, size, val, addr); + amdvi_handle_evttail_write(s); + break; + case AMDVI_MMIO_EXCL_LIMIT: + amdvi_mmio_reg_write(s, size, val, addr); + amdvi_handle_excllim_write(s); + break; + /* PPR log base - unused for now */ + case AMDVI_MMIO_PPR_BASE: + amdvi_mmio_reg_write(s, size, val, addr); + amdvi_handle_pprbase_write(s); + break; + /* PPR log head - also unused for now */ + case AMDVI_MMIO_PPR_HEAD: + amdvi_mmio_reg_write(s, size, val, addr); + amdvi_handle_pprhead_write(s); + break; + /* PPR log tail - unused for now */ + case AMDVI_MMIO_PPR_TAIL: + amdvi_mmio_reg_write(s, size, val, addr); + amdvi_handle_pprtail_write(s); + break; + } +} + +static inline uint64_t amdvi_get_perms(uint64_t entry) +{ + return (entry & (AMDVI_DEV_PERM_READ | AMDVI_DEV_PERM_WRITE)) >> + AMDVI_DEV_PERM_SHIFT; +} + +/* validate that reserved bits are honoured */ +static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid, + uint64_t *dte) +{ + if ((dte[0] & AMDVI_DTE_LOWER_QUAD_RESERVED) + || (dte[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED) + || (dte[2] & AMDVI_DTE_UPPER_QUAD_RESERVED) || dte[3]) { + amdvi_log_illegaldevtab_error(s, devid, + s->devtab + + devid * AMDVI_DEVTAB_ENTRY_SIZE, 0); + return false; + } + + return true; +} + +/* get a device table entry given the devid */ +static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry) +{ + uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE; + + if (dma_memory_read(&address_space_memory, s->devtab + offset, entry, + AMDVI_DEVTAB_ENTRY_SIZE)) { + trace_amdvi_dte_get_fail(s->devtab, offset); + /* log error accessing dte */ + amdvi_log_devtab_error(s, devid, s->devtab + offset, 0); + return false; + } + + *entry = le64_to_cpu(*entry); + if (!amdvi_validate_dte(s, devid, entry)) { + trace_amdvi_invalid_dte(entry[0]); + return false; + } + + return true; +} + +/* get pte translation mode */ +static inline uint8_t get_pte_translation_mode(uint64_t pte) +{ + return (pte >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK; +} + +static inline uint64_t pte_override_page_mask(uint64_t pte) +{ + uint8_t page_mask = 13; + uint64_t addr = (pte & AMDVI_DEV_PT_ROOT_MASK) >> 12; + /* find the first zero bit */ + while (addr & 1) { + page_mask++; + addr = addr >> 1; + } + + return ~((1ULL << page_mask) - 1); +} + +static inline uint64_t pte_get_page_mask(uint64_t oldlevel) +{ + return ~((1UL << ((oldlevel * 9) + 3)) - 1); +} + +static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr, + uint16_t devid) +{ + uint64_t pte; + + if (dma_memory_read(&address_space_memory, pte_addr, &pte, sizeof(pte))) { + trace_amdvi_get_pte_hwerror(pte_addr); + amdvi_log_pagetab_error(s, devid, pte_addr, 0); + pte = 0; + return pte; + } + + pte = le64_to_cpu(pte); + return pte; +} + +static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte, + IOMMUTLBEntry *ret, unsigned perms, + hwaddr addr) +{ + unsigned level, present, pte_perms, oldlevel; + uint64_t pte = dte[0], pte_addr, page_mask; + + /* make sure the DTE has TV = 1 */ + if (pte & AMDVI_DEV_TRANSLATION_VALID) { + level = get_pte_translation_mode(pte); + if (level >= 7) { + trace_amdvi_mode_invalid(level, addr); + return; + } + if (level == 0) { + goto no_remap; + } + + /* we are at the leaf page table or page table encodes a huge page */ + while (level > 0) { + pte_perms = amdvi_get_perms(pte); + present = pte & 1; + if (!present || perms != (perms & pte_perms)) { + amdvi_page_fault(as->iommu_state, as->devfn, addr, perms); + trace_amdvi_page_fault(addr); + return; + } + + /* go to the next lower level */ + pte_addr = pte & AMDVI_DEV_PT_ROOT_MASK; + /* add offset and load pte */ + pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3; + pte = amdvi_get_pte_entry(as->iommu_state, pte_addr, as->devfn); + if (!pte) { + return; + } + oldlevel = level; + level = get_pte_translation_mode(pte); + if (level == 0x7) { + break; + } + } + + if (level == 0x7) { + page_mask = pte_override_page_mask(pte); + } else { + page_mask = pte_get_page_mask(oldlevel); + } + + /* get access permissions from pte */ + ret->iova = addr & page_mask; + ret->translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) & page_mask; + ret->addr_mask = ~page_mask; + ret->perm = amdvi_get_perms(pte); + return; + } +no_remap: + ret->iova = addr & AMDVI_PAGE_MASK_4K; + ret->translated_addr = addr & AMDVI_PAGE_MASK_4K; + ret->addr_mask = ~AMDVI_PAGE_MASK_4K; + ret->perm = amdvi_get_perms(pte); +} + +static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr, + bool is_write, IOMMUTLBEntry *ret) +{ + AMDVIState *s = as->iommu_state; + uint16_t devid = PCI_BUILD_BDF(as->bus_num, as->devfn); + AMDVIIOTLBEntry *iotlb_entry = amdvi_iotlb_lookup(s, addr, devid); + uint64_t entry[4]; + + if (iotlb_entry) { + trace_amdvi_iotlb_hit(PCI_BUS_NUM(devid), PCI_SLOT(devid), + PCI_FUNC(devid), addr, iotlb_entry->translated_addr); + ret->iova = addr & ~iotlb_entry->page_mask; + ret->translated_addr = iotlb_entry->translated_addr; + ret->addr_mask = iotlb_entry->page_mask; + ret->perm = iotlb_entry->perms; + return; + } + + if (!amdvi_get_dte(s, devid, entry)) { + return; + } + + /* devices with V = 0 are not translated */ + if (!(entry[0] & AMDVI_DEV_VALID)) { + goto out; + } + + amdvi_page_walk(as, entry, ret, + is_write ? AMDVI_PERM_WRITE : AMDVI_PERM_READ, addr); + + amdvi_update_iotlb(s, devid, addr, *ret, + entry[1] & AMDVI_DEV_DOMID_ID_MASK); + return; + +out: + ret->iova = addr & AMDVI_PAGE_MASK_4K; + ret->translated_addr = addr & AMDVI_PAGE_MASK_4K; + ret->addr_mask = ~AMDVI_PAGE_MASK_4K; + ret->perm = IOMMU_RW; +} + +static inline bool amdvi_is_interrupt_addr(hwaddr addr) +{ + return addr >= AMDVI_INT_ADDR_FIRST && addr <= AMDVI_INT_ADDR_LAST; +} + +static IOMMUTLBEntry amdvi_translate(IOMMUMemoryRegion *iommu, hwaddr addr, + IOMMUAccessFlags flag, int iommu_idx) +{ + AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu); + AMDVIState *s = as->iommu_state; + IOMMUTLBEntry ret = { + .target_as = &address_space_memory, + .iova = addr, + .translated_addr = 0, + .addr_mask = ~(hwaddr)0, + .perm = IOMMU_NONE + }; + + if (!s->enabled) { + /* AMDVI disabled - corresponds to iommu=off not + * failure to provide any parameter + */ + ret.iova = addr & AMDVI_PAGE_MASK_4K; + ret.translated_addr = addr & AMDVI_PAGE_MASK_4K; + ret.addr_mask = ~AMDVI_PAGE_MASK_4K; + ret.perm = IOMMU_RW; + return ret; + } else if (amdvi_is_interrupt_addr(addr)) { + ret.iova = addr & AMDVI_PAGE_MASK_4K; + ret.translated_addr = addr & AMDVI_PAGE_MASK_4K; + ret.addr_mask = ~AMDVI_PAGE_MASK_4K; + ret.perm = IOMMU_WO; + return ret; + } + + amdvi_do_translate(as, addr, flag & IOMMU_WO, &ret); + trace_amdvi_translation_result(as->bus_num, PCI_SLOT(as->devfn), + PCI_FUNC(as->devfn), addr, ret.translated_addr); + return ret; +} + +static int amdvi_get_irte(AMDVIState *s, MSIMessage *origin, uint64_t *dte, + union irte *irte, uint16_t devid) +{ + uint64_t irte_root, offset; + + irte_root = dte[2] & AMDVI_IR_PHYS_ADDR_MASK; + offset = (origin->data & AMDVI_IRTE_OFFSET) << 2; + + trace_amdvi_ir_irte(irte_root, offset); + + if (dma_memory_read(&address_space_memory, irte_root + offset, + irte, sizeof(*irte))) { + trace_amdvi_ir_err("failed to get irte"); + return -AMDVI_IR_GET_IRTE; + } + + trace_amdvi_ir_irte_val(irte->val); + + return 0; +} + +static int amdvi_int_remap_legacy(AMDVIState *iommu, + MSIMessage *origin, + MSIMessage *translated, + uint64_t *dte, + X86IOMMUIrq *irq, + uint16_t sid) +{ + int ret; + union irte irte; + + /* get interrupt remapping table */ + ret = amdvi_get_irte(iommu, origin, dte, &irte, sid); + if (ret < 0) { + return ret; + } + + if (!irte.fields.valid) { + trace_amdvi_ir_target_abort("RemapEn is disabled"); + return -AMDVI_IR_TARGET_ABORT; + } + + if (irte.fields.guest_mode) { + error_report_once("guest mode is not zero"); + return -AMDVI_IR_ERR; + } + + if (irte.fields.int_type > AMDVI_IOAPIC_INT_TYPE_ARBITRATED) { + error_report_once("reserved int_type"); + return -AMDVI_IR_ERR; + } + + irq->delivery_mode = irte.fields.int_type; + irq->vector = irte.fields.vector; + irq->dest_mode = irte.fields.dm; + irq->redir_hint = irte.fields.rq_eoi; + irq->dest = irte.fields.destination; + + return 0; +} + +static int amdvi_get_irte_ga(AMDVIState *s, MSIMessage *origin, uint64_t *dte, + struct irte_ga *irte, uint16_t devid) +{ + uint64_t irte_root, offset; + + irte_root = dte[2] & AMDVI_IR_PHYS_ADDR_MASK; + offset = (origin->data & AMDVI_IRTE_OFFSET) << 4; + trace_amdvi_ir_irte(irte_root, offset); + + if (dma_memory_read(&address_space_memory, irte_root + offset, + irte, sizeof(*irte))) { + trace_amdvi_ir_err("failed to get irte_ga"); + return -AMDVI_IR_GET_IRTE; + } + + trace_amdvi_ir_irte_ga_val(irte->hi.val, irte->lo.val); + return 0; +} + +static int amdvi_int_remap_ga(AMDVIState *iommu, + MSIMessage *origin, + MSIMessage *translated, + uint64_t *dte, + X86IOMMUIrq *irq, + uint16_t sid) +{ + int ret; + struct irte_ga irte; + + /* get interrupt remapping table */ + ret = amdvi_get_irte_ga(iommu, origin, dte, &irte, sid); + if (ret < 0) { + return ret; + } + + if (!irte.lo.fields_remap.valid) { + trace_amdvi_ir_target_abort("RemapEn is disabled"); + return -AMDVI_IR_TARGET_ABORT; + } + + if (irte.lo.fields_remap.guest_mode) { + error_report_once("guest mode is not zero"); + return -AMDVI_IR_ERR; + } + + if (irte.lo.fields_remap.int_type > AMDVI_IOAPIC_INT_TYPE_ARBITRATED) { + error_report_once("reserved int_type is set"); + return -AMDVI_IR_ERR; + } + + irq->delivery_mode = irte.lo.fields_remap.int_type; + irq->vector = irte.hi.fields.vector; + irq->dest_mode = irte.lo.fields_remap.dm; + irq->redir_hint = irte.lo.fields_remap.rq_eoi; + irq->dest = irte.lo.fields_remap.destination; + + return 0; +} + +static int __amdvi_int_remap_msi(AMDVIState *iommu, + MSIMessage *origin, + MSIMessage *translated, + uint64_t *dte, + X86IOMMUIrq *irq, + uint16_t sid) +{ + int ret; + uint8_t int_ctl; + + int_ctl = (dte[2] >> AMDVI_IR_INTCTL_SHIFT) & 3; + trace_amdvi_ir_intctl(int_ctl); + + switch (int_ctl) { + case AMDVI_IR_INTCTL_PASS: + memcpy(translated, origin, sizeof(*origin)); + return 0; + case AMDVI_IR_INTCTL_REMAP: + break; + case AMDVI_IR_INTCTL_ABORT: + trace_amdvi_ir_target_abort("int_ctl abort"); + return -AMDVI_IR_TARGET_ABORT; + default: + trace_amdvi_ir_err("int_ctl reserved"); + return -AMDVI_IR_ERR; + } + + if (iommu->ga_enabled) { + ret = amdvi_int_remap_ga(iommu, origin, translated, dte, irq, sid); + } else { + ret = amdvi_int_remap_legacy(iommu, origin, translated, dte, irq, sid); + } + + return ret; +} + +/* Interrupt remapping for MSI/MSI-X entry */ +static int amdvi_int_remap_msi(AMDVIState *iommu, + MSIMessage *origin, + MSIMessage *translated, + uint16_t sid) +{ + int ret = 0; + uint64_t pass = 0; + uint64_t dte[4] = { 0 }; + X86IOMMUIrq irq = { 0 }; + uint8_t dest_mode, delivery_mode; + + assert(origin && translated); + + /* + * When IOMMU is enabled, interrupt remap request will come either from + * IO-APIC or PCI device. If interrupt is from PCI device then it will + * have a valid requester id but if the interrupt is from IO-APIC + * then requester id will be invalid. + */ + if (sid == X86_IOMMU_SID_INVALID) { + sid = AMDVI_IOAPIC_SB_DEVID; + } + + trace_amdvi_ir_remap_msi_req(origin->address, origin->data, sid); + + /* check if device table entry is set before we go further. */ + if (!iommu || !iommu->devtab_len) { + memcpy(translated, origin, sizeof(*origin)); + goto out; + } + + if (!amdvi_get_dte(iommu, sid, dte)) { + return -AMDVI_IR_ERR; + } + + /* Check if IR is enabled in DTE */ + if (!(dte[2] & AMDVI_IR_REMAP_ENABLE)) { + memcpy(translated, origin, sizeof(*origin)); + goto out; + } + + /* validate that we are configure with intremap=on */ + if (!x86_iommu_ir_supported(X86_IOMMU_DEVICE(iommu))) { + trace_amdvi_err("Interrupt remapping is enabled in the guest but " + "not in the host. Use intremap=on to enable interrupt " + "remapping in amd-iommu."); + return -AMDVI_IR_ERR; + } + + if (origin->address & AMDVI_MSI_ADDR_HI_MASK) { + trace_amdvi_err("MSI address high 32 bits non-zero when " + "Interrupt Remapping enabled."); + return -AMDVI_IR_ERR; + } + + if ((origin->address & AMDVI_MSI_ADDR_LO_MASK) != APIC_DEFAULT_ADDRESS) { + trace_amdvi_err("MSI is not from IOAPIC."); + return -AMDVI_IR_ERR; + } + + /* + * The MSI data register [10:8] are used to get the upstream interrupt type. + * + * See MSI/MSI-X format: + * https://pdfs.semanticscholar.org/presentation/9420/c279e942eca568157711ef5c92b800c40a79.pdf + * (page 5) + */ + delivery_mode = (origin->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 7; + + switch (delivery_mode) { + case AMDVI_IOAPIC_INT_TYPE_FIXED: + case AMDVI_IOAPIC_INT_TYPE_ARBITRATED: + trace_amdvi_ir_delivery_mode("fixed/arbitrated"); + ret = __amdvi_int_remap_msi(iommu, origin, translated, dte, &irq, sid); + if (ret < 0) { + goto remap_fail; + } else { + /* Translate IRQ to MSI messages */ + x86_iommu_irq_to_msi_message(&irq, translated); + goto out; + } + break; + case AMDVI_IOAPIC_INT_TYPE_SMI: + error_report("SMI is not supported!"); + ret = -AMDVI_IR_ERR; + break; + case AMDVI_IOAPIC_INT_TYPE_NMI: + pass = dte[3] & AMDVI_DEV_NMI_PASS_MASK; + trace_amdvi_ir_delivery_mode("nmi"); + break; + case AMDVI_IOAPIC_INT_TYPE_INIT: + pass = dte[3] & AMDVI_DEV_INT_PASS_MASK; + trace_amdvi_ir_delivery_mode("init"); + break; + case AMDVI_IOAPIC_INT_TYPE_EINT: + pass = dte[3] & AMDVI_DEV_EINT_PASS_MASK; + trace_amdvi_ir_delivery_mode("eint"); + break; + default: + trace_amdvi_ir_delivery_mode("unsupported delivery_mode"); + ret = -AMDVI_IR_ERR; + break; + } + + if (ret < 0) { + goto remap_fail; + } + + /* + * The MSI address register bit[2] is used to get the destination + * mode. The dest_mode 1 is valid for fixed and arbitrated interrupts + * only. + */ + dest_mode = (origin->address >> MSI_ADDR_DEST_MODE_SHIFT) & 1; + if (dest_mode) { + trace_amdvi_ir_err("invalid dest_mode"); + ret = -AMDVI_IR_ERR; + goto remap_fail; + } + + if (pass) { + memcpy(translated, origin, sizeof(*origin)); + } else { + trace_amdvi_ir_err("passthrough is not enabled"); + ret = -AMDVI_IR_ERR; + goto remap_fail; + } + +out: + trace_amdvi_ir_remap_msi(origin->address, origin->data, + translated->address, translated->data); + return 0; + +remap_fail: + return ret; +} + +static int amdvi_int_remap(X86IOMMUState *iommu, + MSIMessage *origin, + MSIMessage *translated, + uint16_t sid) +{ + return amdvi_int_remap_msi(AMD_IOMMU_DEVICE(iommu), origin, + translated, sid); +} + +static MemTxResult amdvi_mem_ir_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + int ret; + MSIMessage from = { 0, 0 }, to = { 0, 0 }; + uint16_t sid = AMDVI_IOAPIC_SB_DEVID; + + from.address = (uint64_t) addr + AMDVI_INT_ADDR_FIRST; + from.data = (uint32_t) value; + + trace_amdvi_mem_ir_write_req(addr, value, size); + + if (!attrs.unspecified) { + /* We have explicit Source ID */ + sid = attrs.requester_id; + } + + ret = amdvi_int_remap_msi(opaque, &from, &to, sid); + if (ret < 0) { + /* TODO: log the event using IOMMU log event interface */ + error_report_once("failed to remap interrupt from devid 0x%x", sid); + return MEMTX_ERROR; + } + + apic_get_class()->send_msi(&to); + + trace_amdvi_mem_ir_write(to.address, to.data); + return MEMTX_OK; +} + +static MemTxResult amdvi_mem_ir_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + return MEMTX_OK; +} + +static const MemoryRegionOps amdvi_ir_ops = { + .read_with_attrs = amdvi_mem_ir_read, + .write_with_attrs = amdvi_mem_ir_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + char name[128]; + AMDVIState *s = opaque; + AMDVIAddressSpace **iommu_as, *amdvi_dev_as; + int bus_num = pci_bus_num(bus); + + iommu_as = s->address_spaces[bus_num]; + + /* allocate memory during the first run */ + if (!iommu_as) { + iommu_as = g_malloc0(sizeof(AMDVIAddressSpace *) * PCI_DEVFN_MAX); + s->address_spaces[bus_num] = iommu_as; + } + + /* set up AMD-Vi region */ + if (!iommu_as[devfn]) { + snprintf(name, sizeof(name), "amd_iommu_devfn_%d", devfn); + + iommu_as[devfn] = g_malloc0(sizeof(AMDVIAddressSpace)); + iommu_as[devfn]->bus_num = (uint8_t)bus_num; + iommu_as[devfn]->devfn = (uint8_t)devfn; + iommu_as[devfn]->iommu_state = s; + + amdvi_dev_as = iommu_as[devfn]; + + /* + * Memory region relationships looks like (Address range shows + * only lower 32 bits to make it short in length...): + * + * |-----------------+-------------------+----------| + * | Name | Address range | Priority | + * |-----------------+-------------------+----------+ + * | amdvi_root | 00000000-ffffffff | 0 | + * | amdvi_iommu | 00000000-ffffffff | 1 | + * | amdvi_iommu_ir | fee00000-feefffff | 64 | + * |-----------------+-------------------+----------| + */ + memory_region_init_iommu(&amdvi_dev_as->iommu, + sizeof(amdvi_dev_as->iommu), + TYPE_AMD_IOMMU_MEMORY_REGION, + OBJECT(s), + "amd_iommu", UINT64_MAX); + memory_region_init(&amdvi_dev_as->root, OBJECT(s), + "amdvi_root", UINT64_MAX); + address_space_init(&amdvi_dev_as->as, &amdvi_dev_as->root, name); + memory_region_init_io(&amdvi_dev_as->iommu_ir, OBJECT(s), + &amdvi_ir_ops, s, "amd_iommu_ir", + AMDVI_INT_ADDR_SIZE); + memory_region_add_subregion_overlap(&amdvi_dev_as->root, + AMDVI_INT_ADDR_FIRST, + &amdvi_dev_as->iommu_ir, + 64); + memory_region_add_subregion_overlap(&amdvi_dev_as->root, 0, + MEMORY_REGION(&amdvi_dev_as->iommu), + 1); + } + return &iommu_as[devfn]->as; +} + +static const MemoryRegionOps mmio_mem_ops = { + .read = amdvi_mmio_read, + .write = amdvi_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + } +}; + +static int amdvi_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, + IOMMUNotifierFlag old, + IOMMUNotifierFlag new, + Error **errp) +{ + AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu); + + if (new & IOMMU_NOTIFIER_MAP) { + error_setg(errp, + "device %02x.%02x.%x requires iommu notifier which is not " + "currently supported", as->bus_num, PCI_SLOT(as->devfn), + PCI_FUNC(as->devfn)); + return -EINVAL; + } + return 0; +} + +static void amdvi_init(AMDVIState *s) +{ + amdvi_iotlb_reset(s); + + s->devtab_len = 0; + s->cmdbuf_len = 0; + s->cmdbuf_head = 0; + s->cmdbuf_tail = 0; + s->evtlog_head = 0; + s->evtlog_tail = 0; + s->excl_enabled = false; + s->excl_allow = false; + s->mmio_enabled = false; + s->enabled = false; + s->ats_enabled = false; + s->cmdbuf_enabled = false; + + /* reset MMIO */ + memset(s->mmior, 0, AMDVI_MMIO_SIZE); + amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES, + 0xffffffffffffffef, 0); + amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67); + + /* reset device ident */ + pci_config_set_vendor_id(s->pci.dev.config, PCI_VENDOR_ID_AMD); + pci_config_set_prog_interface(s->pci.dev.config, 00); + pci_config_set_device_id(s->pci.dev.config, s->devid); + pci_config_set_class(s->pci.dev.config, 0x0806); + + /* reset AMDVI specific capabilities, all r/o */ + pci_set_long(s->pci.dev.config + s->capab_offset, AMDVI_CAPAB_FEATURES); + pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_LOW, + s->mmio.addr & ~(0xffff0000)); + pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH, + (s->mmio.addr & ~(0xffff)) >> 16); + pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_RANGE, + 0xff000000); + pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, 0); + pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, + AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR); +} + +static void amdvi_sysbus_reset(DeviceState *dev) +{ + AMDVIState *s = AMD_IOMMU_DEVICE(dev); + + msi_reset(&s->pci.dev); + amdvi_init(s); +} + +static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) +{ + int ret = 0; + AMDVIState *s = AMD_IOMMU_DEVICE(dev); + MachineState *ms = MACHINE(qdev_get_machine()); + PCMachineState *pcms = PC_MACHINE(ms); + X86MachineState *x86ms = X86_MACHINE(ms); + PCIBus *bus = pcms->bus; + + s->iotlb = g_hash_table_new_full(amdvi_uint64_hash, + amdvi_uint64_equal, g_free, g_free); + + /* This device should take care of IOMMU PCI properties */ + if (!qdev_realize(DEVICE(&s->pci), &bus->qbus, errp)) { + return; + } + ret = pci_add_capability(&s->pci.dev, AMDVI_CAPAB_ID_SEC, 0, + AMDVI_CAPAB_SIZE, errp); + if (ret < 0) { + return; + } + s->capab_offset = ret; + + ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_MSI, 0, + AMDVI_CAPAB_REG_SIZE, errp); + if (ret < 0) { + return; + } + ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_HT, 0, + AMDVI_CAPAB_REG_SIZE, errp); + if (ret < 0) { + return; + } + + /* Pseudo address space under root PCI bus. */ + x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID); + + /* set up MMIO */ + memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio", + AMDVI_MMIO_SIZE); + + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio); + sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR); + pci_setup_iommu(bus, amdvi_host_dma_iommu, s); + s->devid = object_property_get_int(OBJECT(&s->pci), "addr", &error_abort); + msi_init(&s->pci.dev, 0, 1, true, false, errp); + amdvi_init(s); +} + +static const VMStateDescription vmstate_amdvi_sysbus = { + .name = "amd-iommu", + .unmigratable = 1 +}; + +static void amdvi_sysbus_instance_init(Object *klass) +{ + AMDVIState *s = AMD_IOMMU_DEVICE(klass); + + object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI); +} + +static void amdvi_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + X86IOMMUClass *dc_class = X86_IOMMU_DEVICE_CLASS(klass); + + dc->reset = amdvi_sysbus_reset; + dc->vmsd = &vmstate_amdvi_sysbus; + dc->hotpluggable = false; + dc_class->realize = amdvi_sysbus_realize; + dc_class->int_remap = amdvi_int_remap; + /* Supported by the pc-q35-* machine types */ + dc->user_creatable = true; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; +} + +static const TypeInfo amdvi_sysbus = { + .name = TYPE_AMD_IOMMU_DEVICE, + .parent = TYPE_X86_IOMMU_DEVICE, + .instance_size = sizeof(AMDVIState), + .instance_init = amdvi_sysbus_instance_init, + .class_init = amdvi_sysbus_class_init +}; + +static void amdvi_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; +} + +static const TypeInfo amdvi_pci = { + .name = TYPE_AMD_IOMMU_PCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(AMDVIPCIState), + .class_init = amdvi_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void amdvi_iommu_memory_region_class_init(ObjectClass *klass, void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = amdvi_translate; + imrc->notify_flag_changed = amdvi_iommu_notify_flag_changed; +} + +static const TypeInfo amdvi_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_AMD_IOMMU_MEMORY_REGION, + .class_init = amdvi_iommu_memory_region_class_init, +}; + +static void amdvi_register_types(void) +{ + type_register_static(&amdvi_pci); + type_register_static(&amdvi_sysbus); + type_register_static(&amdvi_iommu_memory_region_info); +} + +type_init(amdvi_register_types); diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h new file mode 100644 index 000000000..79d38a3e4 --- /dev/null +++ b/hw/i386/amd_iommu.h @@ -0,0 +1,372 @@ +/* + * QEMU emulation of an AMD IOMMU (AMD-Vi) + * + * Copyright (C) 2011 Eduard - Gabriel Munteanu + * Copyright (C) 2015, 2016 David Kiarie Kahurani + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef AMD_IOMMU_H +#define AMD_IOMMU_H + +#include "hw/pci/pci.h" +#include "hw/i386/x86-iommu.h" +#include "qom/object.h" + +/* Capability registers */ +#define AMDVI_CAPAB_BAR_LOW 0x04 +#define AMDVI_CAPAB_BAR_HIGH 0x08 +#define AMDVI_CAPAB_RANGE 0x0C +#define AMDVI_CAPAB_MISC 0x10 + +#define AMDVI_CAPAB_SIZE 0x18 +#define AMDVI_CAPAB_REG_SIZE 0x04 + +/* Capability header data */ +#define AMDVI_CAPAB_ID_SEC 0xf +#define AMDVI_CAPAB_FLAT_EXT (1 << 28) +#define AMDVI_CAPAB_EFR_SUP (1 << 27) +#define AMDVI_CAPAB_FLAG_NPCACHE (1 << 26) +#define AMDVI_CAPAB_FLAG_HTTUNNEL (1 << 25) +#define AMDVI_CAPAB_FLAG_IOTLBSUP (1 << 24) +#define AMDVI_CAPAB_INIT_TYPE (3 << 16) + +/* No. of used MMIO registers */ +#define AMDVI_MMIO_REGS_HIGH 7 +#define AMDVI_MMIO_REGS_LOW 8 + +/* MMIO registers */ +#define AMDVI_MMIO_DEVICE_TABLE 0x0000 +#define AMDVI_MMIO_COMMAND_BASE 0x0008 +#define AMDVI_MMIO_EVENT_BASE 0x0010 +#define AMDVI_MMIO_CONTROL 0x0018 +#define AMDVI_MMIO_EXCL_BASE 0x0020 +#define AMDVI_MMIO_EXCL_LIMIT 0x0028 +#define AMDVI_MMIO_EXT_FEATURES 0x0030 +#define AMDVI_MMIO_COMMAND_HEAD 0x2000 +#define AMDVI_MMIO_COMMAND_TAIL 0x2008 +#define AMDVI_MMIO_EVENT_HEAD 0x2010 +#define AMDVI_MMIO_EVENT_TAIL 0x2018 +#define AMDVI_MMIO_STATUS 0x2020 +#define AMDVI_MMIO_PPR_BASE 0x0038 +#define AMDVI_MMIO_PPR_HEAD 0x2030 +#define AMDVI_MMIO_PPR_TAIL 0x2038 + +#define AMDVI_MMIO_SIZE 0x4000 + +#define AMDVI_MMIO_DEVTAB_SIZE_MASK ((1ULL << 12) - 1) +#define AMDVI_MMIO_DEVTAB_BASE_MASK (((1ULL << 52) - 1) & ~ \ + AMDVI_MMIO_DEVTAB_SIZE_MASK) +#define AMDVI_MMIO_DEVTAB_ENTRY_SIZE 32 +#define AMDVI_MMIO_DEVTAB_SIZE_UNIT 4096 + +/* some of this are similar but just for readability */ +#define AMDVI_MMIO_CMDBUF_SIZE_BYTE (AMDVI_MMIO_COMMAND_BASE + 7) +#define AMDVI_MMIO_CMDBUF_SIZE_MASK 0x0f +#define AMDVI_MMIO_CMDBUF_BASE_MASK AMDVI_MMIO_DEVTAB_BASE_MASK +#define AMDVI_MMIO_CMDBUF_HEAD_MASK (((1ULL << 19) - 1) & ~0x0f) +#define AMDVI_MMIO_CMDBUF_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK + +#define AMDVI_MMIO_EVTLOG_SIZE_BYTE (AMDVI_MMIO_EVENT_BASE + 7) +#define AMDVI_MMIO_EVTLOG_SIZE_MASK AMDVI_MMIO_CMDBUF_SIZE_MASK +#define AMDVI_MMIO_EVTLOG_BASE_MASK AMDVI_MMIO_CMDBUF_BASE_MASK +#define AMDVI_MMIO_EVTLOG_HEAD_MASK (((1ULL << 19) - 1) & ~0x0f) +#define AMDVI_MMIO_EVTLOG_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK + +#define AMDVI_MMIO_PPRLOG_SIZE_BYTE (AMDVI_MMIO_EVENT_BASE + 7) +#define AMDVI_MMIO_PPRLOG_HEAD_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK +#define AMDVI_MMIO_PPRLOG_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK +#define AMDVI_MMIO_PPRLOG_BASE_MASK AMDVI_MMIO_EVTLOG_BASE_MASK +#define AMDVI_MMIO_PPRLOG_SIZE_MASK AMDVI_MMIO_EVTLOG_SIZE_MASK + +#define AMDVI_MMIO_EXCL_ENABLED_MASK (1ULL << 0) +#define AMDVI_MMIO_EXCL_ALLOW_MASK (1ULL << 1) +#define AMDVI_MMIO_EXCL_LIMIT_MASK AMDVI_MMIO_DEVTAB_BASE_MASK +#define AMDVI_MMIO_EXCL_LIMIT_LOW 0xfff + +/* mmio control register flags */ +#define AMDVI_MMIO_CONTROL_AMDVIEN (1ULL << 0) +#define AMDVI_MMIO_CONTROL_HTTUNEN (1ULL << 1) +#define AMDVI_MMIO_CONTROL_EVENTLOGEN (1ULL << 2) +#define AMDVI_MMIO_CONTROL_EVENTINTEN (1ULL << 3) +#define AMDVI_MMIO_CONTROL_COMWAITINTEN (1ULL << 4) +#define AMDVI_MMIO_CONTROL_CMDBUFLEN (1ULL << 12) +#define AMDVI_MMIO_CONTROL_GAEN (1ULL << 17) + +/* MMIO status register bits */ +#define AMDVI_MMIO_STATUS_CMDBUF_RUN (1 << 4) +#define AMDVI_MMIO_STATUS_EVT_RUN (1 << 3) +#define AMDVI_MMIO_STATUS_COMP_INT (1 << 2) +#define AMDVI_MMIO_STATUS_EVT_OVF (1 << 0) + +#define AMDVI_CMDBUF_ID_BYTE 0x07 +#define AMDVI_CMDBUF_ID_RSHIFT 4 + +#define AMDVI_CMD_COMPLETION_WAIT 0x01 +#define AMDVI_CMD_INVAL_DEVTAB_ENTRY 0x02 +#define AMDVI_CMD_INVAL_AMDVI_PAGES 0x03 +#define AMDVI_CMD_INVAL_IOTLB_PAGES 0x04 +#define AMDVI_CMD_INVAL_INTR_TABLE 0x05 +#define AMDVI_CMD_PREFETCH_AMDVI_PAGES 0x06 +#define AMDVI_CMD_COMPLETE_PPR_REQUEST 0x07 +#define AMDVI_CMD_INVAL_AMDVI_ALL 0x08 + +#define AMDVI_DEVTAB_ENTRY_SIZE 32 + +/* Device table entry bits 0:63 */ +#define AMDVI_DEV_VALID (1ULL << 0) +#define AMDVI_DEV_TRANSLATION_VALID (1ULL << 1) +#define AMDVI_DEV_MODE_MASK 0x7 +#define AMDVI_DEV_MODE_RSHIFT 9 +#define AMDVI_DEV_PT_ROOT_MASK 0xffffffffff000 +#define AMDVI_DEV_PT_ROOT_RSHIFT 12 +#define AMDVI_DEV_PERM_SHIFT 61 +#define AMDVI_DEV_PERM_READ (1ULL << 61) +#define AMDVI_DEV_PERM_WRITE (1ULL << 62) + +/* Device table entry bits 64:127 */ +#define AMDVI_DEV_DOMID_ID_MASK ((1ULL << 16) - 1) + +/* Event codes and flags, as stored in the info field */ +#define AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY (0x1U << 12) +#define AMDVI_EVENT_IOPF (0x2U << 12) +#define AMDVI_EVENT_IOPF_I (1U << 3) +#define AMDVI_EVENT_DEV_TAB_HW_ERROR (0x3U << 12) +#define AMDVI_EVENT_PAGE_TAB_HW_ERROR (0x4U << 12) +#define AMDVI_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12) +#define AMDVI_EVENT_COMMAND_HW_ERROR (0x6U << 12) + +#define AMDVI_EVENT_LEN 16 +#define AMDVI_PERM_READ (1 << 0) +#define AMDVI_PERM_WRITE (1 << 1) + +#define AMDVI_FEATURE_PREFETCH (1ULL << 0) /* page prefetch */ +#define AMDVI_FEATURE_PPR (1ULL << 1) /* PPR Support */ +#define AMDVI_FEATURE_GT (1ULL << 4) /* Guest Translation */ +#define AMDVI_FEATURE_IA (1ULL << 6) /* inval all support */ +#define AMDVI_FEATURE_GA (1ULL << 7) /* guest VAPIC support */ +#define AMDVI_FEATURE_HE (1ULL << 8) /* hardware error regs */ +#define AMDVI_FEATURE_PC (1ULL << 9) /* Perf counters */ + +/* reserved DTE bits */ +#define AMDVI_DTE_LOWER_QUAD_RESERVED 0x80300000000000fc +#define AMDVI_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100 +#define AMDVI_DTE_UPPER_QUAD_RESERVED 0x08f0000000000000 + +/* AMDVI paging mode */ +#define AMDVI_GATS_MODE (2ULL << 12) +#define AMDVI_HATS_MODE (2ULL << 10) + +/* IOTLB */ +#define AMDVI_IOTLB_MAX_SIZE 1024 +#define AMDVI_DEVID_SHIFT 36 + +/* extended feature support */ +#define AMDVI_EXT_FEATURES (AMDVI_FEATURE_PREFETCH | AMDVI_FEATURE_PPR | \ + AMDVI_FEATURE_IA | AMDVI_FEATURE_GT | AMDVI_FEATURE_HE | \ + AMDVI_GATS_MODE | AMDVI_HATS_MODE | AMDVI_FEATURE_GA) + +/* capabilities header */ +#define AMDVI_CAPAB_FEATURES (AMDVI_CAPAB_FLAT_EXT | \ + AMDVI_CAPAB_FLAG_NPCACHE | AMDVI_CAPAB_FLAG_IOTLBSUP \ + | AMDVI_CAPAB_ID_SEC | AMDVI_CAPAB_INIT_TYPE | \ + AMDVI_CAPAB_FLAG_HTTUNNEL | AMDVI_CAPAB_EFR_SUP) + +/* AMDVI default address */ +#define AMDVI_BASE_ADDR 0xfed80000 + +/* page management constants */ +#define AMDVI_PAGE_SHIFT 12 +#define AMDVI_PAGE_SIZE (1ULL << AMDVI_PAGE_SHIFT) + +#define AMDVI_PAGE_SHIFT_4K 12 +#define AMDVI_PAGE_MASK_4K (~((1ULL << AMDVI_PAGE_SHIFT_4K) - 1)) + +#define AMDVI_MAX_VA_ADDR (48UL << 5) +#define AMDVI_MAX_PH_ADDR (40UL << 8) +#define AMDVI_MAX_GVA_ADDR (48UL << 15) + +/* Completion Wait data size */ +#define AMDVI_COMPLETION_DATA_SIZE 8 + +#define AMDVI_COMMAND_SIZE 16 +/* Completion Wait data size */ +#define AMDVI_COMPLETION_DATA_SIZE 8 + +#define AMDVI_COMMAND_SIZE 16 + +#define AMDVI_INT_ADDR_FIRST 0xfee00000 +#define AMDVI_INT_ADDR_LAST 0xfeefffff +#define AMDVI_INT_ADDR_SIZE (AMDVI_INT_ADDR_LAST - AMDVI_INT_ADDR_FIRST + 1) +#define AMDVI_MSI_ADDR_HI_MASK (0xffffffff00000000ULL) +#define AMDVI_MSI_ADDR_LO_MASK (0x00000000ffffffffULL) + +/* SB IOAPIC is always on this device in AMD systems */ +#define AMDVI_IOAPIC_SB_DEVID PCI_BUILD_BDF(0, PCI_DEVFN(0x14, 0)) + +/* Interrupt remapping errors */ +#define AMDVI_IR_ERR 0x1 +#define AMDVI_IR_GET_IRTE 0x2 +#define AMDVI_IR_TARGET_ABORT 0x3 + +/* Interrupt remapping */ +#define AMDVI_IR_REMAP_ENABLE 1ULL +#define AMDVI_IR_INTCTL_SHIFT 60 +#define AMDVI_IR_INTCTL_ABORT 0 +#define AMDVI_IR_INTCTL_PASS 1 +#define AMDVI_IR_INTCTL_REMAP 2 + +#define AMDVI_IR_PHYS_ADDR_MASK (((1ULL << 45) - 1) << 6) + +/* MSI data 10:0 bits (section 2.2.5.1 Fig 14) */ +#define AMDVI_IRTE_OFFSET 0x7ff + +/* Delivery mode of MSI data (same as IOAPIC deilver mode encoding) */ +#define AMDVI_IOAPIC_INT_TYPE_FIXED 0x0 +#define AMDVI_IOAPIC_INT_TYPE_ARBITRATED 0x1 +#define AMDVI_IOAPIC_INT_TYPE_SMI 0x2 +#define AMDVI_IOAPIC_INT_TYPE_NMI 0x4 +#define AMDVI_IOAPIC_INT_TYPE_INIT 0x5 +#define AMDVI_IOAPIC_INT_TYPE_EINT 0x7 + +/* Pass through interrupt */ +#define AMDVI_DEV_INT_PASS_MASK (1ULL << 56) +#define AMDVI_DEV_EINT_PASS_MASK (1ULL << 57) +#define AMDVI_DEV_NMI_PASS_MASK (1ULL << 58) +#define AMDVI_DEV_LINT0_PASS_MASK (1ULL << 62) +#define AMDVI_DEV_LINT1_PASS_MASK (1ULL << 63) + +/* Interrupt remapping table fields (Guest VAPIC not enabled) */ +union irte { + uint32_t val; + struct { + uint32_t valid:1, + no_fault:1, + int_type:3, + rq_eoi:1, + dm:1, + guest_mode:1, + destination:8, + vector:8, + rsvd:8; + } fields; +}; + +/* Interrupt remapping table fields (Guest VAPIC is enabled) */ +union irte_ga_lo { + uint64_t val; + + /* For int remapping */ + struct { + uint64_t valid:1, + no_fault:1, + /* ------ */ + int_type:3, + rq_eoi:1, + dm:1, + /* ------ */ + guest_mode:1, + destination:8, + rsvd_1:48; + } fields_remap; +}; + +union irte_ga_hi { + uint64_t val; + struct { + uint64_t vector:8, + rsvd_2:56; + } fields; +}; + +struct irte_ga { + union irte_ga_lo lo; + union irte_ga_hi hi; +}; + +#define TYPE_AMD_IOMMU_DEVICE "amd-iommu" +OBJECT_DECLARE_SIMPLE_TYPE(AMDVIState, AMD_IOMMU_DEVICE) + +#define TYPE_AMD_IOMMU_PCI "AMDVI-PCI" + +#define TYPE_AMD_IOMMU_MEMORY_REGION "amd-iommu-iommu-memory-region" + +typedef struct AMDVIAddressSpace AMDVIAddressSpace; + +/* functions to steal PCI config space */ +typedef struct AMDVIPCIState { + PCIDevice dev; /* The PCI device itself */ +} AMDVIPCIState; + +struct AMDVIState { + X86IOMMUState iommu; /* IOMMU bus device */ + AMDVIPCIState pci; /* IOMMU PCI device */ + + uint32_t version; + uint32_t capab_offset; /* capability offset pointer */ + + uint64_t mmio_addr; + + uint32_t devid; /* auto-assigned devid */ + + bool enabled; /* IOMMU enabled */ + bool ats_enabled; /* address translation enabled */ + bool cmdbuf_enabled; /* command buffer enabled */ + bool evtlog_enabled; /* event log enabled */ + bool excl_enabled; + + hwaddr devtab; /* base address device table */ + size_t devtab_len; /* device table length */ + + hwaddr cmdbuf; /* command buffer base address */ + uint64_t cmdbuf_len; /* command buffer length */ + uint32_t cmdbuf_head; /* current IOMMU read position */ + uint32_t cmdbuf_tail; /* next Software write position */ + bool completion_wait_intr; + + hwaddr evtlog; /* base address event log */ + bool evtlog_intr; + uint32_t evtlog_len; /* event log length */ + uint32_t evtlog_head; /* current IOMMU write position */ + uint32_t evtlog_tail; /* current Software read position */ + + /* unused for now */ + hwaddr excl_base; /* base DVA - IOMMU exclusion range */ + hwaddr excl_limit; /* limit of IOMMU exclusion range */ + bool excl_allow; /* translate accesses to the exclusion range */ + bool excl_enable; /* exclusion range enabled */ + + hwaddr ppr_log; /* base address ppr log */ + uint32_t pprlog_len; /* ppr log len */ + uint32_t pprlog_head; /* ppr log head */ + uint32_t pprlog_tail; /* ppr log tail */ + + MemoryRegion mmio; /* MMIO region */ + uint8_t mmior[AMDVI_MMIO_SIZE]; /* read/write MMIO */ + uint8_t w1cmask[AMDVI_MMIO_SIZE]; /* read/write 1 clear mask */ + uint8_t romask[AMDVI_MMIO_SIZE]; /* MMIO read/only mask */ + bool mmio_enabled; + + /* for each served device */ + AMDVIAddressSpace **address_spaces[PCI_BUS_MAX]; + + /* IOTLB */ + GHashTable *iotlb; + + /* Interrupt remapping */ + bool ga_enabled; +}; + +#endif diff --git a/hw/i386/e820_memory_layout.c b/hw/i386/e820_memory_layout.c new file mode 100644 index 000000000..bcf9eaf83 --- /dev/null +++ b/hw/i386/e820_memory_layout.c @@ -0,0 +1,59 @@ +/* + * QEMU BIOS e820 routines + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * SPDX-License-Identifier: MIT + */ + +#include "qemu/osdep.h" +#include "qemu/bswap.h" +#include "e820_memory_layout.h" + +static size_t e820_entries; +struct e820_table e820_reserve; +struct e820_entry *e820_table; + +int e820_add_entry(uint64_t address, uint64_t length, uint32_t type) +{ + int index = le32_to_cpu(e820_reserve.count); + struct e820_entry *entry; + + if (type != E820_RAM) { + /* old FW_CFG_E820_TABLE entry -- reservations only */ + if (index >= E820_NR_ENTRIES) { + return -EBUSY; + } + entry = &e820_reserve.entry[index++]; + + entry->address = cpu_to_le64(address); + entry->length = cpu_to_le64(length); + entry->type = cpu_to_le32(type); + + e820_reserve.count = cpu_to_le32(index); + } + + /* new "etc/e820" file -- include ram too */ + e820_table = g_renew(struct e820_entry, e820_table, e820_entries + 1); + e820_table[e820_entries].address = cpu_to_le64(address); + e820_table[e820_entries].length = cpu_to_le64(length); + e820_table[e820_entries].type = cpu_to_le32(type); + e820_entries++; + + return e820_entries; +} + +int e820_get_num_entries(void) +{ + return e820_entries; +} + +bool e820_get_entry(int idx, uint32_t type, uint64_t *address, uint64_t *length) +{ + if (idx < e820_entries && e820_table[idx].type == cpu_to_le32(type)) { + *address = le64_to_cpu(e820_table[idx].address); + *length = le64_to_cpu(e820_table[idx].length); + return true; + } + return false; +} diff --git a/hw/i386/e820_memory_layout.h b/hw/i386/e820_memory_layout.h new file mode 100644 index 000000000..2a0ceb8b9 --- /dev/null +++ b/hw/i386/e820_memory_layout.h @@ -0,0 +1,42 @@ +/* + * QEMU BIOS e820 routines + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * SPDX-License-Identifier: MIT + */ + +#ifndef HW_I386_E820_H +#define HW_I386_E820_H + +/* e820 types */ +#define E820_RAM 1 +#define E820_RESERVED 2 +#define E820_ACPI 3 +#define E820_NVS 4 +#define E820_UNUSABLE 5 + +#define E820_NR_ENTRIES 16 + +struct e820_entry { + uint64_t address; + uint64_t length; + uint32_t type; +} QEMU_PACKED __attribute((__aligned__(4))); + +struct e820_table { + uint32_t count; + struct e820_entry entry[E820_NR_ENTRIES]; +} QEMU_PACKED __attribute((__aligned__(4))); + +extern struct e820_table e820_reserve; +extern struct e820_entry *e820_table; + +int e820_add_entry(uint64_t address, uint64_t length, uint32_t type); +int e820_get_num_entries(void); +bool e820_get_entry(int index, uint32_t type, + uint64_t *address, uint64_t *length); + + + +#endif diff --git a/hw/i386/fw_cfg.c b/hw/i386/fw_cfg.c new file mode 100644 index 000000000..a283785a8 --- /dev/null +++ b/hw/i386/fw_cfg.c @@ -0,0 +1,221 @@ +/* + * QEMU fw_cfg helpers (X86 specific) + * + * Copyright (c) 2019 Red Hat, Inc. + * + * Author: + * Philippe Mathieu-Daudé + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "sysemu/numa.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/firmware/smbios.h" +#include "hw/i386/fw_cfg.h" +#include "hw/timer/hpet.h" +#include "hw/nvram/fw_cfg.h" +#include "e820_memory_layout.h" +#include "kvm/kvm_i386.h" +#include "qapi/error.h" +#include CONFIG_DEVICES + +struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX}; + +const char *fw_cfg_arch_key_name(uint16_t key) +{ + static const struct { + uint16_t key; + const char *name; + } fw_cfg_arch_wellknown_keys[] = { + {FW_CFG_ACPI_TABLES, "acpi_tables"}, + {FW_CFG_SMBIOS_ENTRIES, "smbios_entries"}, + {FW_CFG_IRQ0_OVERRIDE, "irq0_override"}, + {FW_CFG_E820_TABLE, "e820_table"}, + {FW_CFG_HPET, "hpet"}, + }; + + for (size_t i = 0; i < ARRAY_SIZE(fw_cfg_arch_wellknown_keys); i++) { + if (fw_cfg_arch_wellknown_keys[i].key == key) { + return fw_cfg_arch_wellknown_keys[i].name; + } + } + return NULL; +} + +void fw_cfg_build_smbios(MachineState *ms, FWCfgState *fw_cfg) +{ +#ifdef CONFIG_SMBIOS + uint8_t *smbios_tables, *smbios_anchor; + size_t smbios_tables_len, smbios_anchor_len; + struct smbios_phys_mem_area *mem_array; + unsigned i, array_count; + X86CPU *cpu = X86_CPU(ms->possible_cpus->cpus[0].cpu); + + /* tell smbios about cpuid version and features */ + smbios_set_cpuid(cpu->env.cpuid_version, cpu->env.features[FEAT_1_EDX]); + + smbios_tables = smbios_get_table_legacy(ms, &smbios_tables_len); + if (smbios_tables) { + fw_cfg_add_bytes(fw_cfg, FW_CFG_SMBIOS_ENTRIES, + smbios_tables, smbios_tables_len); + } + + /* build the array of physical mem area from e820 table */ + mem_array = g_malloc0(sizeof(*mem_array) * e820_get_num_entries()); + for (i = 0, array_count = 0; i < e820_get_num_entries(); i++) { + uint64_t addr, len; + + if (e820_get_entry(i, E820_RAM, &addr, &len)) { + mem_array[array_count].address = addr; + mem_array[array_count].length = len; + array_count++; + } + } + smbios_get_tables(ms, mem_array, array_count, + &smbios_tables, &smbios_tables_len, + &smbios_anchor, &smbios_anchor_len, + &error_fatal); + g_free(mem_array); + + if (smbios_anchor) { + fw_cfg_add_file(fw_cfg, "etc/smbios/smbios-tables", + smbios_tables, smbios_tables_len); + fw_cfg_add_file(fw_cfg, "etc/smbios/smbios-anchor", + smbios_anchor, smbios_anchor_len); + } +#endif +} + +FWCfgState *fw_cfg_arch_create(MachineState *ms, + uint16_t boot_cpus, + uint16_t apic_id_limit) +{ + FWCfgState *fw_cfg; + uint64_t *numa_fw_cfg; + int i; + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *cpus = mc->possible_cpu_arch_ids(ms); + int nb_numa_nodes = ms->numa_state->num_nodes; + + fw_cfg = fw_cfg_init_io_dma(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4, + &address_space_memory); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, boot_cpus); + + /* FW_CFG_MAX_CPUS is a bit confusing/problematic on x86: + * + * For machine types prior to 1.8, SeaBIOS needs FW_CFG_MAX_CPUS for + * building MPTable, ACPI MADT, ACPI CPU hotplug and ACPI SRAT table, + * that tables are based on xAPIC ID and QEMU<->SeaBIOS interface + * for CPU hotplug also uses APIC ID and not "CPU index". + * This means that FW_CFG_MAX_CPUS is not the "maximum number of CPUs", + * but the "limit to the APIC ID values SeaBIOS may see". + * + * So for compatibility reasons with old BIOSes we are stuck with + * "etc/max-cpus" actually being apic_id_limit + */ + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, apic_id_limit); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, ms->ram_size); +#ifdef CONFIG_ACPI + fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES, + acpi_tables, acpi_tables_len); +#endif + fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, 1); + + fw_cfg_add_bytes(fw_cfg, FW_CFG_E820_TABLE, + &e820_reserve, sizeof(e820_reserve)); + fw_cfg_add_file(fw_cfg, "etc/e820", e820_table, + sizeof(struct e820_entry) * e820_get_num_entries()); + + fw_cfg_add_bytes(fw_cfg, FW_CFG_HPET, &hpet_cfg, sizeof(hpet_cfg)); + /* allocate memory for the NUMA channel: one (64bit) word for the number + * of nodes, one word for each VCPU->node and one word for each node to + * hold the amount of memory. + */ + numa_fw_cfg = g_new0(uint64_t, 1 + apic_id_limit + nb_numa_nodes); + numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes); + for (i = 0; i < cpus->len; i++) { + unsigned int apic_id = cpus->cpus[i].arch_id; + assert(apic_id < apic_id_limit); + numa_fw_cfg[apic_id + 1] = cpu_to_le64(cpus->cpus[i].props.node_id); + } + for (i = 0; i < nb_numa_nodes; i++) { + numa_fw_cfg[apic_id_limit + 1 + i] = + cpu_to_le64(ms->numa_state->nodes[i].node_mem); + } + fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, numa_fw_cfg, + (1 + apic_id_limit + nb_numa_nodes) * + sizeof(*numa_fw_cfg)); + + return fw_cfg; +} + +void fw_cfg_build_feature_control(MachineState *ms, FWCfgState *fw_cfg) +{ + X86CPU *cpu = X86_CPU(ms->possible_cpus->cpus[0].cpu); + CPUX86State *env = &cpu->env; + uint32_t unused, ebx, ecx, edx; + uint64_t feature_control_bits = 0; + uint64_t *val; + + cpu_x86_cpuid(env, 1, 0, &unused, &unused, &ecx, &edx); + if (ecx & CPUID_EXT_VMX) { + feature_control_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; + } + + if ((edx & (CPUID_EXT2_MCE | CPUID_EXT2_MCA)) == + (CPUID_EXT2_MCE | CPUID_EXT2_MCA) && + (env->mcg_cap & MCG_LMCE_P)) { + feature_control_bits |= FEATURE_CONTROL_LMCE; + } + + if (env->cpuid_level >= 7) { + cpu_x86_cpuid(env, 0x7, 0, &unused, &ebx, &ecx, &unused); + if (ebx & CPUID_7_0_EBX_SGX) { + feature_control_bits |= FEATURE_CONTROL_SGX; + } + if (ecx & CPUID_7_0_ECX_SGX_LC) { + feature_control_bits |= FEATURE_CONTROL_SGX_LC; + } + } + + if (!feature_control_bits) { + return; + } + + val = g_malloc(sizeof(*val)); + *val = cpu_to_le64(feature_control_bits | FEATURE_CONTROL_LOCKED); + fw_cfg_add_file(fw_cfg, "etc/msr_feature_control", val, sizeof(*val)); +} + +void fw_cfg_add_acpi_dsdt(Aml *scope, FWCfgState *fw_cfg) +{ + /* + * when using port i/o, the 8-bit data register *always* overlaps + * with half of the 16-bit control register. Hence, the total size + * of the i/o region used is FW_CFG_CTL_SIZE; when using DMA, the + * DMA control register is located at FW_CFG_DMA_IO_BASE + 4 + */ + Object *obj = OBJECT(fw_cfg); + uint8_t io_size = object_property_get_bool(obj, "dma_enabled", NULL) ? + ROUND_UP(FW_CFG_CTL_SIZE, 4) + sizeof(dma_addr_t) : + FW_CFG_CTL_SIZE; + Aml *dev = aml_device("FWCF"); + Aml *crs = aml_resource_template(); + + aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); + + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + + aml_append(crs, + aml_io(AML_DECODE16, FW_CFG_IO_BASE, FW_CFG_IO_BASE, 0x01, io_size)); + + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} diff --git a/hw/i386/fw_cfg.h b/hw/i386/fw_cfg.h new file mode 100644 index 000000000..275f15c1c --- /dev/null +++ b/hw/i386/fw_cfg.h @@ -0,0 +1,30 @@ +/* + * QEMU fw_cfg helpers (X86 specific) + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * SPDX-License-Identifier: MIT + */ + +#ifndef HW_I386_FW_CFG_H +#define HW_I386_FW_CFG_H + +#include "hw/boards.h" +#include "hw/nvram/fw_cfg.h" + +#define FW_CFG_IO_BASE 0x510 + +#define FW_CFG_ACPI_TABLES (FW_CFG_ARCH_LOCAL + 0) +#define FW_CFG_SMBIOS_ENTRIES (FW_CFG_ARCH_LOCAL + 1) +#define FW_CFG_IRQ0_OVERRIDE (FW_CFG_ARCH_LOCAL + 2) +#define FW_CFG_E820_TABLE (FW_CFG_ARCH_LOCAL + 3) +#define FW_CFG_HPET (FW_CFG_ARCH_LOCAL + 4) + +FWCfgState *fw_cfg_arch_create(MachineState *ms, + uint16_t boot_cpus, + uint16_t apic_id_limit); +void fw_cfg_build_smbios(MachineState *ms, FWCfgState *fw_cfg); +void fw_cfg_build_feature_control(MachineState *ms, FWCfgState *fw_cfg); +void fw_cfg_add_acpi_dsdt(Aml *scope, FWCfgState *fw_cfg); + +#endif diff --git a/hw/i386/generic_event_device_x86.c b/hw/i386/generic_event_device_x86.c new file mode 100644 index 000000000..e26fb02a2 --- /dev/null +++ b/hw/i386/generic_event_device_x86.c @@ -0,0 +1,36 @@ +/* + * x86 variant of the generic event device for hw reduced acpi + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + */ + +#include "qemu/osdep.h" +#include "hw/acpi/generic_event_device.h" +#include "hw/i386/pc.h" + +static void acpi_ged_x86_class_init(ObjectClass *class, void *data) +{ + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_CLASS(class); + + adevc->madt_cpu = pc_madt_cpu_entry; +} + +static const TypeInfo acpi_ged_x86_info = { + .name = TYPE_ACPI_GED_X86, + .parent = TYPE_ACPI_GED, + .class_init = acpi_ged_x86_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { TYPE_ACPI_DEVICE_IF }, + { } + } +}; + +static void acpi_ged_x86_register_types(void) +{ + type_register_static(&acpi_ged_x86_info); +} + +type_init(acpi_ged_x86_register_types) diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c new file mode 100644 index 000000000..f584449d8 --- /dev/null +++ b/hw/i386/intel_iommu.c @@ -0,0 +1,3900 @@ +/* + * QEMU emulation of an Intel IOMMU (VT-d) + * (DMA Remapping device) + * + * Copyright (C) 2013 Knut Omang, Oracle + * Copyright (C) 2014 Le Tan, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "intel_iommu_internal.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "hw/qdev-properties.h" +#include "hw/i386/pc.h" +#include "hw/i386/apic-msidef.h" +#include "hw/i386/x86-iommu.h" +#include "hw/pci-host/q35.h" +#include "sysemu/kvm.h" +#include "sysemu/dma.h" +#include "sysemu/sysemu.h" +#include "hw/i386/apic_internal.h" +#include "kvm/kvm_i386.h" +#include "migration/vmstate.h" +#include "trace.h" + +/* context entry operations */ +#define VTD_CE_GET_RID2PASID(ce) \ + ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK) +#define VTD_CE_GET_PASID_DIR_TABLE(ce) \ + ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK) + +/* pe operations */ +#define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT) +#define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW)) +#define VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write) {\ + if (ret_fr) { \ + ret_fr = -ret_fr; \ + if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { \ + trace_vtd_fault_disabled(); \ + } else { \ + vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); \ + } \ + goto error; \ + } \ +} + +static void vtd_address_space_refresh_all(IntelIOMMUState *s); +static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n); + +static void vtd_panic_require_caching_mode(void) +{ + error_report("We need to set caching-mode=on for intel-iommu to enable " + "device assignment with IOMMU protection."); + exit(1); +} + +static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val, + uint64_t wmask, uint64_t w1cmask) +{ + stq_le_p(&s->csr[addr], val); + stq_le_p(&s->wmask[addr], wmask); + stq_le_p(&s->w1cmask[addr], w1cmask); +} + +static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask) +{ + stq_le_p(&s->womask[addr], mask); +} + +static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val, + uint32_t wmask, uint32_t w1cmask) +{ + stl_le_p(&s->csr[addr], val); + stl_le_p(&s->wmask[addr], wmask); + stl_le_p(&s->w1cmask[addr], w1cmask); +} + +static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask) +{ + stl_le_p(&s->womask[addr], mask); +} + +/* "External" get/set operations */ +static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val) +{ + uint64_t oldval = ldq_le_p(&s->csr[addr]); + uint64_t wmask = ldq_le_p(&s->wmask[addr]); + uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); + stq_le_p(&s->csr[addr], + ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val)); +} + +static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val) +{ + uint32_t oldval = ldl_le_p(&s->csr[addr]); + uint32_t wmask = ldl_le_p(&s->wmask[addr]); + uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); + stl_le_p(&s->csr[addr], + ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val)); +} + +static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr) +{ + uint64_t val = ldq_le_p(&s->csr[addr]); + uint64_t womask = ldq_le_p(&s->womask[addr]); + return val & ~womask; +} + +static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr) +{ + uint32_t val = ldl_le_p(&s->csr[addr]); + uint32_t womask = ldl_le_p(&s->womask[addr]); + return val & ~womask; +} + +/* "Internal" get/set operations */ +static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr) +{ + return ldq_le_p(&s->csr[addr]); +} + +static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr) +{ + return ldl_le_p(&s->csr[addr]); +} + +static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val) +{ + stq_le_p(&s->csr[addr], val); +} + +static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr, + uint32_t clear, uint32_t mask) +{ + uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask; + stl_le_p(&s->csr[addr], new_val); + return new_val; +} + +static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr, + uint64_t clear, uint64_t mask) +{ + uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask; + stq_le_p(&s->csr[addr], new_val); + return new_val; +} + +static inline void vtd_iommu_lock(IntelIOMMUState *s) +{ + qemu_mutex_lock(&s->iommu_lock); +} + +static inline void vtd_iommu_unlock(IntelIOMMUState *s) +{ + qemu_mutex_unlock(&s->iommu_lock); +} + +static void vtd_update_scalable_state(IntelIOMMUState *s) +{ + uint64_t val = vtd_get_quad_raw(s, DMAR_RTADDR_REG); + + if (s->scalable_mode) { + s->root_scalable = val & VTD_RTADDR_SMT; + } +} + +/* Whether the address space needs to notify new mappings */ +static inline gboolean vtd_as_has_map_notifier(VTDAddressSpace *as) +{ + return as->notifier_flags & IOMMU_NOTIFIER_MAP; +} + +/* GHashTable functions */ +static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2) +{ + return *((const uint64_t *)v1) == *((const uint64_t *)v2); +} + +static guint vtd_uint64_hash(gconstpointer v) +{ + return (guint)*(const uint64_t *)v; +} + +static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value, + gpointer user_data) +{ + VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; + uint16_t domain_id = *(uint16_t *)user_data; + return entry->domain_id == domain_id; +} + +/* The shift of an addr for a certain level of paging structure */ +static inline uint32_t vtd_slpt_level_shift(uint32_t level) +{ + assert(level != 0); + return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS; +} + +static inline uint64_t vtd_slpt_level_page_mask(uint32_t level) +{ + return ~((1ULL << vtd_slpt_level_shift(level)) - 1); +} + +static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value, + gpointer user_data) +{ + VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; + VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data; + uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; + uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; + return (entry->domain_id == info->domain_id) && + (((entry->gfn & info->mask) == gfn) || + (entry->gfn == gfn_tlb)); +} + +/* Reset all the gen of VTDAddressSpace to zero and set the gen of + * IntelIOMMUState to 1. Must be called with IOMMU lock held. + */ +static void vtd_reset_context_cache_locked(IntelIOMMUState *s) +{ + VTDAddressSpace *vtd_as; + VTDBus *vtd_bus; + GHashTableIter bus_it; + uint32_t devfn_it; + + trace_vtd_context_cache_reset(); + + g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr); + + while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) { + for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { + vtd_as = vtd_bus->dev_as[devfn_it]; + if (!vtd_as) { + continue; + } + vtd_as->context_cache_entry.context_cache_gen = 0; + } + } + s->context_cache_gen = 1; +} + +/* Must be called with IOMMU lock held. */ +static void vtd_reset_iotlb_locked(IntelIOMMUState *s) +{ + assert(s->iotlb); + g_hash_table_remove_all(s->iotlb); +} + +static void vtd_reset_iotlb(IntelIOMMUState *s) +{ + vtd_iommu_lock(s); + vtd_reset_iotlb_locked(s); + vtd_iommu_unlock(s); +} + +static void vtd_reset_caches(IntelIOMMUState *s) +{ + vtd_iommu_lock(s); + vtd_reset_iotlb_locked(s); + vtd_reset_context_cache_locked(s); + vtd_iommu_unlock(s); +} + +static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id, + uint32_t level) +{ + return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) | + ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT); +} + +static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level) +{ + return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K; +} + +/* Must be called with IOMMU lock held */ +static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id, + hwaddr addr) +{ + VTDIOTLBEntry *entry; + uint64_t key; + int level; + + for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) { + key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level), + source_id, level); + entry = g_hash_table_lookup(s->iotlb, &key); + if (entry) { + goto out; + } + } + +out: + return entry; +} + +/* Must be with IOMMU lock held */ +static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, + uint16_t domain_id, hwaddr addr, uint64_t slpte, + uint8_t access_flags, uint32_t level) +{ + VTDIOTLBEntry *entry = g_malloc(sizeof(*entry)); + uint64_t *key = g_malloc(sizeof(*key)); + uint64_t gfn = vtd_get_iotlb_gfn(addr, level); + + trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id); + if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) { + trace_vtd_iotlb_reset("iotlb exceeds size limit"); + vtd_reset_iotlb_locked(s); + } + + entry->gfn = gfn; + entry->domain_id = domain_id; + entry->slpte = slpte; + entry->access_flags = access_flags; + entry->mask = vtd_slpt_level_page_mask(level); + *key = vtd_get_iotlb_key(gfn, source_id, level); + g_hash_table_replace(s->iotlb, key, entry); +} + +/* Given the reg addr of both the message data and address, generate an + * interrupt via MSI. + */ +static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg, + hwaddr mesg_data_reg) +{ + MSIMessage msi; + + assert(mesg_data_reg < DMAR_REG_SIZE); + assert(mesg_addr_reg < DMAR_REG_SIZE); + + msi.address = vtd_get_long_raw(s, mesg_addr_reg); + msi.data = vtd_get_long_raw(s, mesg_data_reg); + + trace_vtd_irq_generate(msi.address, msi.data); + + apic_get_class()->send_msi(&msi); +} + +/* Generate a fault event to software via MSI if conditions are met. + * Notice that the value of FSTS_REG being passed to it should be the one + * before any update. + */ +static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts) +{ + if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO || + pre_fsts & VTD_FSTS_IQE) { + error_report_once("There are previous interrupt conditions " + "to be serviced by software, fault event " + "is not generated"); + return; + } + vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP); + if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) { + error_report_once("Interrupt Mask set, irq is not generated"); + } else { + vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); + vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); + } +} + +/* Check if the Fault (F) field of the Fault Recording Register referenced by + * @index is Set. + */ +static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index) +{ + /* Each reg is 128-bit */ + hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); + addr += 8; /* Access the high 64-bit half */ + + assert(index < DMAR_FRCD_REG_NR); + + return vtd_get_quad_raw(s, addr) & VTD_FRCD_F; +} + +/* Update the PPF field of Fault Status Register. + * Should be called whenever change the F field of any fault recording + * registers. + */ +static void vtd_update_fsts_ppf(IntelIOMMUState *s) +{ + uint32_t i; + uint32_t ppf_mask = 0; + + for (i = 0; i < DMAR_FRCD_REG_NR; i++) { + if (vtd_is_frcd_set(s, i)) { + ppf_mask = VTD_FSTS_PPF; + break; + } + } + vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask); + trace_vtd_fsts_ppf(!!ppf_mask); +} + +static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index) +{ + /* Each reg is 128-bit */ + hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); + addr += 8; /* Access the high 64-bit half */ + + assert(index < DMAR_FRCD_REG_NR); + + vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F); + vtd_update_fsts_ppf(s); +} + +/* Must not update F field now, should be done later */ +static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index, + uint16_t source_id, hwaddr addr, + VTDFaultReason fault, bool is_write) +{ + uint64_t hi = 0, lo; + hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); + + assert(index < DMAR_FRCD_REG_NR); + + lo = VTD_FRCD_FI(addr); + hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault); + if (!is_write) { + hi |= VTD_FRCD_T; + } + vtd_set_quad_raw(s, frcd_reg_addr, lo); + vtd_set_quad_raw(s, frcd_reg_addr + 8, hi); + + trace_vtd_frr_new(index, hi, lo); +} + +/* Try to collapse multiple pending faults from the same requester */ +static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id) +{ + uint32_t i; + uint64_t frcd_reg; + hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */ + + for (i = 0; i < DMAR_FRCD_REG_NR; i++) { + frcd_reg = vtd_get_quad_raw(s, addr); + if ((frcd_reg & VTD_FRCD_F) && + ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) { + return true; + } + addr += 16; /* 128-bit for each */ + } + return false; +} + +/* Log and report an DMAR (address translation) fault to software */ +static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id, + hwaddr addr, VTDFaultReason fault, + bool is_write) +{ + uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); + + assert(fault < VTD_FR_MAX); + + if (fault == VTD_FR_RESERVED_ERR) { + /* This is not a normal fault reason case. Drop it. */ + return; + } + + trace_vtd_dmar_fault(source_id, fault, addr, is_write); + + if (fsts_reg & VTD_FSTS_PFO) { + error_report_once("New fault is not recorded due to " + "Primary Fault Overflow"); + return; + } + + if (vtd_try_collapse_fault(s, source_id)) { + error_report_once("New fault is not recorded due to " + "compression of faults"); + return; + } + + if (vtd_is_frcd_set(s, s->next_frcd_reg)) { + error_report_once("Next Fault Recording Reg is used, " + "new fault is not recorded, set PFO field"); + vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO); + return; + } + + vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write); + + if (fsts_reg & VTD_FSTS_PPF) { + error_report_once("There are pending faults already, " + "fault event is not generated"); + vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); + s->next_frcd_reg++; + if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { + s->next_frcd_reg = 0; + } + } else { + vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK, + VTD_FSTS_FRI(s->next_frcd_reg)); + vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */ + s->next_frcd_reg++; + if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { + s->next_frcd_reg = 0; + } + /* This case actually cause the PPF to be Set. + * So generate fault event (interrupt). + */ + vtd_generate_fault_event(s, fsts_reg); + } +} + +/* Handle Invalidation Queue Errors of queued invalidation interface error + * conditions. + */ +static void vtd_handle_inv_queue_error(IntelIOMMUState *s) +{ + uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); + + vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE); + vtd_generate_fault_event(s, fsts_reg); +} + +/* Set the IWC field and try to generate an invalidation completion interrupt */ +static void vtd_generate_completion_event(IntelIOMMUState *s) +{ + if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) { + trace_vtd_inv_desc_wait_irq("One pending, skip current"); + return; + } + vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC); + vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP); + if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) { + trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, " + "new event not generated"); + return; + } else { + /* Generate the interrupt event */ + trace_vtd_inv_desc_wait_irq("Generating complete event"); + vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG); + vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); + } +} + +static inline bool vtd_root_entry_present(IntelIOMMUState *s, + VTDRootEntry *re, + uint8_t devfn) +{ + if (s->root_scalable && devfn > UINT8_MAX / 2) { + return re->hi & VTD_ROOT_ENTRY_P; + } + + return re->lo & VTD_ROOT_ENTRY_P; +} + +static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, + VTDRootEntry *re) +{ + dma_addr_t addr; + + addr = s->root + index * sizeof(*re); + if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) { + re->lo = 0; + return -VTD_FR_ROOT_TABLE_INV; + } + re->lo = le64_to_cpu(re->lo); + re->hi = le64_to_cpu(re->hi); + return 0; +} + +static inline bool vtd_ce_present(VTDContextEntry *context) +{ + return context->lo & VTD_CONTEXT_ENTRY_P; +} + +static int vtd_get_context_entry_from_root(IntelIOMMUState *s, + VTDRootEntry *re, + uint8_t index, + VTDContextEntry *ce) +{ + dma_addr_t addr, ce_size; + + /* we have checked that root entry is present */ + ce_size = s->root_scalable ? VTD_CTX_ENTRY_SCALABLE_SIZE : + VTD_CTX_ENTRY_LEGACY_SIZE; + + if (s->root_scalable && index > UINT8_MAX / 2) { + index = index & (~VTD_DEVFN_CHECK_MASK); + addr = re->hi & VTD_ROOT_ENTRY_CTP; + } else { + addr = re->lo & VTD_ROOT_ENTRY_CTP; + } + + addr = addr + index * ce_size; + if (dma_memory_read(&address_space_memory, addr, ce, ce_size)) { + return -VTD_FR_CONTEXT_TABLE_INV; + } + + ce->lo = le64_to_cpu(ce->lo); + ce->hi = le64_to_cpu(ce->hi); + if (ce_size == VTD_CTX_ENTRY_SCALABLE_SIZE) { + ce->val[2] = le64_to_cpu(ce->val[2]); + ce->val[3] = le64_to_cpu(ce->val[3]); + } + return 0; +} + +static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce) +{ + return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR; +} + +static inline uint64_t vtd_get_slpte_addr(uint64_t slpte, uint8_t aw) +{ + return slpte & VTD_SL_PT_BASE_ADDR_MASK(aw); +} + +/* Whether the pte indicates the address of the page frame */ +static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level) +{ + return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK); +} + +/* Get the content of a spte located in @base_addr[@index] */ +static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index) +{ + uint64_t slpte; + + assert(index < VTD_SL_PT_ENTRY_NR); + + if (dma_memory_read(&address_space_memory, + base_addr + index * sizeof(slpte), &slpte, + sizeof(slpte))) { + slpte = (uint64_t)-1; + return slpte; + } + slpte = le64_to_cpu(slpte); + return slpte; +} + +/* Given an iova and the level of paging structure, return the offset + * of current level. + */ +static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level) +{ + return (iova >> vtd_slpt_level_shift(level)) & + ((1ULL << VTD_SL_LEVEL_BITS) - 1); +} + +/* Check Capability Register to see if the @level of page-table is supported */ +static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level) +{ + return VTD_CAP_SAGAW_MASK & s->cap & + (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT)); +} + +/* Return true if check passed, otherwise false */ +static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu, + VTDPASIDEntry *pe) +{ + switch (VTD_PE_GET_TYPE(pe)) { + case VTD_SM_PASID_ENTRY_FLT: + case VTD_SM_PASID_ENTRY_SLT: + case VTD_SM_PASID_ENTRY_NESTED: + break; + case VTD_SM_PASID_ENTRY_PT: + if (!x86_iommu->pt_supported) { + return false; + } + break; + default: + /* Unknown type */ + return false; + } + return true; +} + +static inline bool vtd_pdire_present(VTDPASIDDirEntry *pdire) +{ + return pdire->val & 1; +} + +/** + * Caller of this function should check present bit if wants + * to use pdir entry for further usage except for fpd bit check. + */ +static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base, + uint32_t pasid, + VTDPASIDDirEntry *pdire) +{ + uint32_t index; + dma_addr_t addr, entry_size; + + index = VTD_PASID_DIR_INDEX(pasid); + entry_size = VTD_PASID_DIR_ENTRY_SIZE; + addr = pasid_dir_base + index * entry_size; + if (dma_memory_read(&address_space_memory, addr, pdire, entry_size)) { + return -VTD_FR_PASID_TABLE_INV; + } + + return 0; +} + +static inline bool vtd_pe_present(VTDPASIDEntry *pe) +{ + return pe->val[0] & VTD_PASID_ENTRY_P; +} + +static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s, + uint32_t pasid, + dma_addr_t addr, + VTDPASIDEntry *pe) +{ + uint32_t index; + dma_addr_t entry_size; + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); + + index = VTD_PASID_TABLE_INDEX(pasid); + entry_size = VTD_PASID_ENTRY_SIZE; + addr = addr + index * entry_size; + if (dma_memory_read(&address_space_memory, addr, pe, entry_size)) { + return -VTD_FR_PASID_TABLE_INV; + } + + /* Do translation type check */ + if (!vtd_pe_type_check(x86_iommu, pe)) { + return -VTD_FR_PASID_TABLE_INV; + } + + if (!vtd_is_level_supported(s, VTD_PE_GET_LEVEL(pe))) { + return -VTD_FR_PASID_TABLE_INV; + } + + return 0; +} + +/** + * Caller of this function should check present bit if wants + * to use pasid entry for further usage except for fpd bit check. + */ +static int vtd_get_pe_from_pdire(IntelIOMMUState *s, + uint32_t pasid, + VTDPASIDDirEntry *pdire, + VTDPASIDEntry *pe) +{ + dma_addr_t addr = pdire->val & VTD_PASID_TABLE_BASE_ADDR_MASK; + + return vtd_get_pe_in_pasid_leaf_table(s, pasid, addr, pe); +} + +/** + * This function gets a pasid entry from a specified pasid + * table (includes dir and leaf table) with a specified pasid. + * Sanity check should be done to ensure return a present + * pasid entry to caller. + */ +static int vtd_get_pe_from_pasid_table(IntelIOMMUState *s, + dma_addr_t pasid_dir_base, + uint32_t pasid, + VTDPASIDEntry *pe) +{ + int ret; + VTDPASIDDirEntry pdire; + + ret = vtd_get_pdire_from_pdir_table(pasid_dir_base, + pasid, &pdire); + if (ret) { + return ret; + } + + if (!vtd_pdire_present(&pdire)) { + return -VTD_FR_PASID_TABLE_INV; + } + + ret = vtd_get_pe_from_pdire(s, pasid, &pdire, pe); + if (ret) { + return ret; + } + + if (!vtd_pe_present(pe)) { + return -VTD_FR_PASID_TABLE_INV; + } + + return 0; +} + +static int vtd_ce_get_rid2pasid_entry(IntelIOMMUState *s, + VTDContextEntry *ce, + VTDPASIDEntry *pe) +{ + uint32_t pasid; + dma_addr_t pasid_dir_base; + int ret = 0; + + pasid = VTD_CE_GET_RID2PASID(ce); + pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); + ret = vtd_get_pe_from_pasid_table(s, pasid_dir_base, pasid, pe); + + return ret; +} + +static int vtd_ce_get_pasid_fpd(IntelIOMMUState *s, + VTDContextEntry *ce, + bool *pe_fpd_set) +{ + int ret; + uint32_t pasid; + dma_addr_t pasid_dir_base; + VTDPASIDDirEntry pdire; + VTDPASIDEntry pe; + + pasid = VTD_CE_GET_RID2PASID(ce); + pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); + + /* + * No present bit check since fpd is meaningful even + * if the present bit is clear. + */ + ret = vtd_get_pdire_from_pdir_table(pasid_dir_base, pasid, &pdire); + if (ret) { + return ret; + } + + if (pdire.val & VTD_PASID_DIR_FPD) { + *pe_fpd_set = true; + return 0; + } + + if (!vtd_pdire_present(&pdire)) { + return -VTD_FR_PASID_TABLE_INV; + } + + /* + * No present bit check since fpd is meaningful even + * if the present bit is clear. + */ + ret = vtd_get_pe_from_pdire(s, pasid, &pdire, &pe); + if (ret) { + return ret; + } + + if (pe.val[0] & VTD_PASID_ENTRY_FPD) { + *pe_fpd_set = true; + } + + return 0; +} + +/* Get the page-table level that hardware should use for the second-level + * page-table walk from the Address Width field of context-entry. + */ +static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce) +{ + return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW); +} + +static uint32_t vtd_get_iova_level(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return VTD_PE_GET_LEVEL(&pe); + } + + return vtd_ce_get_level(ce); +} + +static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce) +{ + return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9; +} + +static uint32_t vtd_get_iova_agaw(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return 30 + ((pe.val[0] >> 2) & VTD_SM_PASID_ENTRY_AW) * 9; + } + + return vtd_ce_get_agaw(ce); +} + +static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce) +{ + return ce->lo & VTD_CONTEXT_ENTRY_TT; +} + +/* Only for Legacy Mode. Return true if check passed, otherwise false */ +static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, + VTDContextEntry *ce) +{ + switch (vtd_ce_get_type(ce)) { + case VTD_CONTEXT_TT_MULTI_LEVEL: + /* Always supported */ + break; + case VTD_CONTEXT_TT_DEV_IOTLB: + if (!x86_iommu->dt_supported) { + error_report_once("%s: DT specified but not supported", __func__); + return false; + } + break; + case VTD_CONTEXT_TT_PASS_THROUGH: + if (!x86_iommu->pt_supported) { + error_report_once("%s: PT specified but not supported", __func__); + return false; + } + break; + default: + /* Unknown type */ + error_report_once("%s: unknown ce type: %"PRIu32, __func__, + vtd_ce_get_type(ce)); + return false; + } + return true; +} + +static inline uint64_t vtd_iova_limit(IntelIOMMUState *s, + VTDContextEntry *ce, uint8_t aw) +{ + uint32_t ce_agaw = vtd_get_iova_agaw(s, ce); + return 1ULL << MIN(ce_agaw, aw); +} + +/* Return true if IOVA passes range check, otherwise false. */ +static inline bool vtd_iova_range_check(IntelIOMMUState *s, + uint64_t iova, VTDContextEntry *ce, + uint8_t aw) +{ + /* + * Check if @iova is above 2^X-1, where X is the minimum of MGAW + * in CAP_REG and AW in context-entry. + */ + return !(iova & ~(vtd_iova_limit(s, ce, aw) - 1)); +} + +static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR; + } + + return vtd_ce_get_slpt_base(ce); +} + +/* + * Rsvd field masks for spte: + * vtd_spte_rsvd 4k pages + * vtd_spte_rsvd_large large pages + */ +static uint64_t vtd_spte_rsvd[5]; +static uint64_t vtd_spte_rsvd_large[5]; + +static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level) +{ + uint64_t rsvd_mask = vtd_spte_rsvd[level]; + + if ((level == VTD_SL_PD_LEVEL || level == VTD_SL_PDP_LEVEL) && + (slpte & VTD_SL_PT_PAGE_SIZE_MASK)) { + /* large page */ + rsvd_mask = vtd_spte_rsvd_large[level]; + } + + return slpte & rsvd_mask; +} + +/* Find the VTD address space associated with a given bus number */ +static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num) +{ + VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num]; + GHashTableIter iter; + + if (vtd_bus) { + return vtd_bus; + } + + /* + * Iterate over the registered buses to find the one which + * currently holds this bus number and update the bus_num + * lookup table. + */ + g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); + while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { + if (pci_bus_num(vtd_bus->bus) == bus_num) { + s->vtd_as_by_bus_num[bus_num] = vtd_bus; + return vtd_bus; + } + } + + return NULL; +} + +/* Given the @iova, get relevant @slptep. @slpte_level will be the last level + * of the translation, can be used for deciding the size of large page. + */ +static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, + uint64_t iova, bool is_write, + uint64_t *slptep, uint32_t *slpte_level, + bool *reads, bool *writes, uint8_t aw_bits) +{ + dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); + uint32_t level = vtd_get_iova_level(s, ce); + uint32_t offset; + uint64_t slpte; + uint64_t access_right_check; + + if (!vtd_iova_range_check(s, iova, ce, aw_bits)) { + error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ")", + __func__, iova); + return -VTD_FR_ADDR_BEYOND_MGAW; + } + + /* FIXME: what is the Atomics request here? */ + access_right_check = is_write ? VTD_SL_W : VTD_SL_R; + + while (true) { + offset = vtd_iova_level_offset(iova, level); + slpte = vtd_get_slpte(addr, offset); + + if (slpte == (uint64_t)-1) { + error_report_once("%s: detected read error on DMAR slpte " + "(iova=0x%" PRIx64 ")", __func__, iova); + if (level == vtd_get_iova_level(s, ce)) { + /* Invalid programming of context-entry */ + return -VTD_FR_CONTEXT_ENTRY_INV; + } else { + return -VTD_FR_PAGING_ENTRY_INV; + } + } + *reads = (*reads) && (slpte & VTD_SL_R); + *writes = (*writes) && (slpte & VTD_SL_W); + if (!(slpte & access_right_check)) { + error_report_once("%s: detected slpte permission error " + "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", " + "slpte=0x%" PRIx64 ", write=%d)", __func__, + iova, level, slpte, is_write); + return is_write ? -VTD_FR_WRITE : -VTD_FR_READ; + } + if (vtd_slpte_nonzero_rsvd(slpte, level)) { + error_report_once("%s: detected splte reserve non-zero " + "iova=0x%" PRIx64 ", level=0x%" PRIx32 + "slpte=0x%" PRIx64 ")", __func__, iova, + level, slpte); + return -VTD_FR_PAGING_ENTRY_RSVD; + } + + if (vtd_is_last_slpte(slpte, level)) { + *slptep = slpte; + *slpte_level = level; + return 0; + } + addr = vtd_get_slpte_addr(slpte, aw_bits); + level--; + } +} + +typedef int (*vtd_page_walk_hook)(IOMMUTLBEvent *event, void *private); + +/** + * Constant information used during page walking + * + * @hook_fn: hook func to be called when detected page + * @private: private data to be passed into hook func + * @notify_unmap: whether we should notify invalid entries + * @as: VT-d address space of the device + * @aw: maximum address width + * @domain: domain ID of the page walk + */ +typedef struct { + VTDAddressSpace *as; + vtd_page_walk_hook hook_fn; + void *private; + bool notify_unmap; + uint8_t aw; + uint16_t domain_id; +} vtd_page_walk_info; + +static int vtd_page_walk_one(IOMMUTLBEvent *event, vtd_page_walk_info *info) +{ + VTDAddressSpace *as = info->as; + vtd_page_walk_hook hook_fn = info->hook_fn; + void *private = info->private; + IOMMUTLBEntry *entry = &event->entry; + DMAMap target = { + .iova = entry->iova, + .size = entry->addr_mask, + .translated_addr = entry->translated_addr, + .perm = entry->perm, + }; + const DMAMap *mapped = iova_tree_find(as->iova_tree, &target); + + if (event->type == IOMMU_NOTIFIER_UNMAP && !info->notify_unmap) { + trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); + return 0; + } + + assert(hook_fn); + + /* Update local IOVA mapped ranges */ + if (event->type == IOMMU_NOTIFIER_MAP) { + if (mapped) { + /* If it's exactly the same translation, skip */ + if (!memcmp(mapped, &target, sizeof(target))) { + trace_vtd_page_walk_one_skip_map(entry->iova, entry->addr_mask, + entry->translated_addr); + return 0; + } else { + /* + * Translation changed. Normally this should not + * happen, but it can happen when with buggy guest + * OSes. Note that there will be a small window that + * we don't have map at all. But that's the best + * effort we can do. The ideal way to emulate this is + * atomically modify the PTE to follow what has + * changed, but we can't. One example is that vfio + * driver only has VFIO_IOMMU_[UN]MAP_DMA but no + * interface to modify a mapping (meanwhile it seems + * meaningless to even provide one). Anyway, let's + * mark this as a TODO in case one day we'll have + * a better solution. + */ + IOMMUAccessFlags cache_perm = entry->perm; + int ret; + + /* Emulate an UNMAP */ + event->type = IOMMU_NOTIFIER_UNMAP; + entry->perm = IOMMU_NONE; + trace_vtd_page_walk_one(info->domain_id, + entry->iova, + entry->translated_addr, + entry->addr_mask, + entry->perm); + ret = hook_fn(event, private); + if (ret) { + return ret; + } + /* Drop any existing mapping */ + iova_tree_remove(as->iova_tree, &target); + /* Recover the correct type */ + event->type = IOMMU_NOTIFIER_MAP; + entry->perm = cache_perm; + } + } + iova_tree_insert(as->iova_tree, &target); + } else { + if (!mapped) { + /* Skip since we didn't map this range at all */ + trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); + return 0; + } + iova_tree_remove(as->iova_tree, &target); + } + + trace_vtd_page_walk_one(info->domain_id, entry->iova, + entry->translated_addr, entry->addr_mask, + entry->perm); + return hook_fn(event, private); +} + +/** + * vtd_page_walk_level - walk over specific level for IOVA range + * + * @addr: base GPA addr to start the walk + * @start: IOVA range start address + * @end: IOVA range end address (start <= addr < end) + * @read: whether parent level has read permission + * @write: whether parent level has write permission + * @info: constant information for the page walk + */ +static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, + uint64_t end, uint32_t level, bool read, + bool write, vtd_page_walk_info *info) +{ + bool read_cur, write_cur, entry_valid; + uint32_t offset; + uint64_t slpte; + uint64_t subpage_size, subpage_mask; + IOMMUTLBEvent event; + uint64_t iova = start; + uint64_t iova_next; + int ret = 0; + + trace_vtd_page_walk_level(addr, level, start, end); + + subpage_size = 1ULL << vtd_slpt_level_shift(level); + subpage_mask = vtd_slpt_level_page_mask(level); + + while (iova < end) { + iova_next = (iova & subpage_mask) + subpage_size; + + offset = vtd_iova_level_offset(iova, level); + slpte = vtd_get_slpte(addr, offset); + + if (slpte == (uint64_t)-1) { + trace_vtd_page_walk_skip_read(iova, iova_next); + goto next; + } + + if (vtd_slpte_nonzero_rsvd(slpte, level)) { + trace_vtd_page_walk_skip_reserve(iova, iova_next); + goto next; + } + + /* Permissions are stacked with parents' */ + read_cur = read && (slpte & VTD_SL_R); + write_cur = write && (slpte & VTD_SL_W); + + /* + * As long as we have either read/write permission, this is a + * valid entry. The rule works for both page entries and page + * table entries. + */ + entry_valid = read_cur | write_cur; + + if (!vtd_is_last_slpte(slpte, level) && entry_valid) { + /* + * This is a valid PDE (or even bigger than PDE). We need + * to walk one further level. + */ + ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, info->aw), + iova, MIN(iova_next, end), level - 1, + read_cur, write_cur, info); + } else { + /* + * This means we are either: + * + * (1) the real page entry (either 4K page, or huge page) + * (2) the whole range is invalid + * + * In either case, we send an IOTLB notification down. + */ + event.entry.target_as = &address_space_memory; + event.entry.iova = iova & subpage_mask; + event.entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur); + event.entry.addr_mask = ~subpage_mask; + /* NOTE: this is only meaningful if entry_valid == true */ + event.entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw); + event.type = event.entry.perm ? IOMMU_NOTIFIER_MAP : + IOMMU_NOTIFIER_UNMAP; + ret = vtd_page_walk_one(&event, info); + } + + if (ret < 0) { + return ret; + } + +next: + iova = iova_next; + } + + return 0; +} + +/** + * vtd_page_walk - walk specific IOVA range, and call the hook + * + * @s: intel iommu state + * @ce: context entry to walk upon + * @start: IOVA address to start the walk + * @end: IOVA range end address (start <= addr < end) + * @info: page walking information struct + */ +static int vtd_page_walk(IntelIOMMUState *s, VTDContextEntry *ce, + uint64_t start, uint64_t end, + vtd_page_walk_info *info) +{ + dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); + uint32_t level = vtd_get_iova_level(s, ce); + + if (!vtd_iova_range_check(s, start, ce, info->aw)) { + return -VTD_FR_ADDR_BEYOND_MGAW; + } + + if (!vtd_iova_range_check(s, end, ce, info->aw)) { + /* Fix end so that it reaches the maximum */ + end = vtd_iova_limit(s, ce, info->aw); + } + + return vtd_page_walk_level(addr, start, end, level, true, true, info); +} + +static int vtd_root_entry_rsvd_bits_check(IntelIOMMUState *s, + VTDRootEntry *re) +{ + /* Legacy Mode reserved bits check */ + if (!s->root_scalable && + (re->hi || (re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) + goto rsvd_err; + + /* Scalable Mode reserved bits check */ + if (s->root_scalable && + ((re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)) || + (re->hi & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) + goto rsvd_err; + + return 0; + +rsvd_err: + error_report_once("%s: invalid root entry: hi=0x%"PRIx64 + ", lo=0x%"PRIx64, + __func__, re->hi, re->lo); + return -VTD_FR_ROOT_ENTRY_RSVD; +} + +static inline int vtd_context_entry_rsvd_bits_check(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + if (!s->root_scalable && + (ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI || + ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) { + error_report_once("%s: invalid context entry: hi=%"PRIx64 + ", lo=%"PRIx64" (reserved nonzero)", + __func__, ce->hi, ce->lo); + return -VTD_FR_CONTEXT_ENTRY_RSVD; + } + + if (s->root_scalable && + (ce->val[0] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(s->aw_bits) || + ce->val[1] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 || + ce->val[2] || + ce->val[3])) { + error_report_once("%s: invalid context entry: val[3]=%"PRIx64 + ", val[2]=%"PRIx64 + ", val[1]=%"PRIx64 + ", val[0]=%"PRIx64" (reserved nonzero)", + __func__, ce->val[3], ce->val[2], + ce->val[1], ce->val[0]); + return -VTD_FR_CONTEXT_ENTRY_RSVD; + } + + return 0; +} + +static int vtd_ce_rid2pasid_check(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + /* + * Make sure in Scalable Mode, a present context entry + * has valid rid2pasid setting, which includes valid + * rid2pasid field and corresponding pasid entry setting + */ + return vtd_ce_get_rid2pasid_entry(s, ce, &pe); +} + +/* Map a device to its corresponding domain (context-entry) */ +static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, + uint8_t devfn, VTDContextEntry *ce) +{ + VTDRootEntry re; + int ret_fr; + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); + + ret_fr = vtd_get_root_entry(s, bus_num, &re); + if (ret_fr) { + return ret_fr; + } + + if (!vtd_root_entry_present(s, &re, devfn)) { + /* Not error - it's okay we don't have root entry. */ + trace_vtd_re_not_present(bus_num); + return -VTD_FR_ROOT_ENTRY_P; + } + + ret_fr = vtd_root_entry_rsvd_bits_check(s, &re); + if (ret_fr) { + return ret_fr; + } + + ret_fr = vtd_get_context_entry_from_root(s, &re, devfn, ce); + if (ret_fr) { + return ret_fr; + } + + if (!vtd_ce_present(ce)) { + /* Not error - it's okay we don't have context entry. */ + trace_vtd_ce_not_present(bus_num, devfn); + return -VTD_FR_CONTEXT_ENTRY_P; + } + + ret_fr = vtd_context_entry_rsvd_bits_check(s, ce); + if (ret_fr) { + return ret_fr; + } + + /* Check if the programming of context-entry is valid */ + if (!s->root_scalable && + !vtd_is_level_supported(s, vtd_ce_get_level(ce))) { + error_report_once("%s: invalid context entry: hi=%"PRIx64 + ", lo=%"PRIx64" (level %d not supported)", + __func__, ce->hi, ce->lo, + vtd_ce_get_level(ce)); + return -VTD_FR_CONTEXT_ENTRY_INV; + } + + if (!s->root_scalable) { + /* Do translation type check */ + if (!vtd_ce_type_check(x86_iommu, ce)) { + /* Errors dumped in vtd_ce_type_check() */ + return -VTD_FR_CONTEXT_ENTRY_INV; + } + } else { + /* + * Check if the programming of context-entry.rid2pasid + * and corresponding pasid setting is valid, and thus + * avoids to check pasid entry fetching result in future + * helper function calling. + */ + ret_fr = vtd_ce_rid2pasid_check(s, ce); + if (ret_fr) { + return ret_fr; + } + } + + return 0; +} + +static int vtd_sync_shadow_page_hook(IOMMUTLBEvent *event, + void *private) +{ + memory_region_notify_iommu(private, 0, *event); + return 0; +} + +static uint16_t vtd_get_domain_id(IntelIOMMUState *s, + VTDContextEntry *ce) +{ + VTDPASIDEntry pe; + + if (s->root_scalable) { + vtd_ce_get_rid2pasid_entry(s, ce, &pe); + return VTD_SM_PASID_ENTRY_DID(pe.val[1]); + } + + return VTD_CONTEXT_ENTRY_DID(ce->hi); +} + +static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, + VTDContextEntry *ce, + hwaddr addr, hwaddr size) +{ + IntelIOMMUState *s = vtd_as->iommu_state; + vtd_page_walk_info info = { + .hook_fn = vtd_sync_shadow_page_hook, + .private = (void *)&vtd_as->iommu, + .notify_unmap = true, + .aw = s->aw_bits, + .as = vtd_as, + .domain_id = vtd_get_domain_id(s, ce), + }; + + return vtd_page_walk(s, ce, addr, addr + size, &info); +} + +static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) +{ + int ret; + VTDContextEntry ce; + IOMMUNotifier *n; + + if (!(vtd_as->iommu.iommu_notify_flags & IOMMU_NOTIFIER_IOTLB_EVENTS)) { + return 0; + } + + ret = vtd_dev_to_context_entry(vtd_as->iommu_state, + pci_bus_num(vtd_as->bus), + vtd_as->devfn, &ce); + if (ret) { + if (ret == -VTD_FR_CONTEXT_ENTRY_P) { + /* + * It's a valid scenario to have a context entry that is + * not present. For example, when a device is removed + * from an existing domain then the context entry will be + * zeroed by the guest before it was put into another + * domain. When this happens, instead of synchronizing + * the shadow pages we should invalidate all existing + * mappings and notify the backends. + */ + IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { + vtd_address_space_unmap(vtd_as, n); + } + ret = 0; + } + return ret; + } + + return vtd_sync_shadow_page_table_range(vtd_as, &ce, 0, UINT64_MAX); +} + +/* + * Check if specific device is configured to bypass address + * translation for DMA requests. In Scalable Mode, bypass + * 1st-level translation or 2nd-level translation, it depends + * on PGTT setting. + */ +static bool vtd_dev_pt_enabled(VTDAddressSpace *as) +{ + IntelIOMMUState *s; + VTDContextEntry ce; + VTDPASIDEntry pe; + int ret; + + assert(as); + + s = as->iommu_state; + ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus), + as->devfn, &ce); + if (ret) { + /* + * Possibly failed to parse the context entry for some reason + * (e.g., during init, or any guest configuration errors on + * context entries). We should assume PT not enabled for + * safety. + */ + return false; + } + + if (s->root_scalable) { + ret = vtd_ce_get_rid2pasid_entry(s, &ce, &pe); + if (ret) { + error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %"PRId32, + __func__, ret); + return false; + } + return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT); + } + + return (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH); +} + +/* Return whether the device is using IOMMU translation. */ +static bool vtd_switch_address_space(VTDAddressSpace *as) +{ + bool use_iommu; + /* Whether we need to take the BQL on our own */ + bool take_bql = !qemu_mutex_iothread_locked(); + + assert(as); + + use_iommu = as->iommu_state->dmar_enabled && !vtd_dev_pt_enabled(as); + + trace_vtd_switch_address_space(pci_bus_num(as->bus), + VTD_PCI_SLOT(as->devfn), + VTD_PCI_FUNC(as->devfn), + use_iommu); + + /* + * It's possible that we reach here without BQL, e.g., when called + * from vtd_pt_enable_fast_path(). However the memory APIs need + * it. We'd better make sure we have had it already, or, take it. + */ + if (take_bql) { + qemu_mutex_lock_iothread(); + } + + /* Turn off first then on the other */ + if (use_iommu) { + memory_region_set_enabled(&as->nodmar, false); + memory_region_set_enabled(MEMORY_REGION(&as->iommu), true); + } else { + memory_region_set_enabled(MEMORY_REGION(&as->iommu), false); + memory_region_set_enabled(&as->nodmar, true); + } + + if (take_bql) { + qemu_mutex_unlock_iothread(); + } + + return use_iommu; +} + +static void vtd_switch_address_space_all(IntelIOMMUState *s) +{ + GHashTableIter iter; + VTDBus *vtd_bus; + int i; + + g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); + while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { + for (i = 0; i < PCI_DEVFN_MAX; i++) { + if (!vtd_bus->dev_as[i]) { + continue; + } + vtd_switch_address_space(vtd_bus->dev_as[i]); + } + } +} + +static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn) +{ + return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL); +} + +static const bool vtd_qualified_faults[] = { + [VTD_FR_RESERVED] = false, + [VTD_FR_ROOT_ENTRY_P] = false, + [VTD_FR_CONTEXT_ENTRY_P] = true, + [VTD_FR_CONTEXT_ENTRY_INV] = true, + [VTD_FR_ADDR_BEYOND_MGAW] = true, + [VTD_FR_WRITE] = true, + [VTD_FR_READ] = true, + [VTD_FR_PAGING_ENTRY_INV] = true, + [VTD_FR_ROOT_TABLE_INV] = false, + [VTD_FR_CONTEXT_TABLE_INV] = false, + [VTD_FR_ROOT_ENTRY_RSVD] = false, + [VTD_FR_PAGING_ENTRY_RSVD] = true, + [VTD_FR_CONTEXT_ENTRY_TT] = true, + [VTD_FR_PASID_TABLE_INV] = false, + [VTD_FR_RESERVED_ERR] = false, + [VTD_FR_MAX] = false, +}; + +/* To see if a fault condition is "qualified", which is reported to software + * only if the FPD field in the context-entry used to process the faulting + * request is 0. + */ +static inline bool vtd_is_qualified_fault(VTDFaultReason fault) +{ + return vtd_qualified_faults[fault]; +} + +static inline bool vtd_is_interrupt_addr(hwaddr addr) +{ + return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST; +} + +static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id) +{ + VTDBus *vtd_bus; + VTDAddressSpace *vtd_as; + bool success = false; + + vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id)); + if (!vtd_bus) { + goto out; + } + + vtd_as = vtd_bus->dev_as[VTD_SID_TO_DEVFN(source_id)]; + if (!vtd_as) { + goto out; + } + + if (vtd_switch_address_space(vtd_as) == false) { + /* We switched off IOMMU region successfully. */ + success = true; + } + +out: + trace_vtd_pt_enable_fast_path(source_id, success); +} + +/* Map dev to context-entry then do a paging-structures walk to do a iommu + * translation. + * + * Called from RCU critical section. + * + * @bus_num: The bus number + * @devfn: The devfn, which is the combined of device and function number + * @is_write: The access is a write operation + * @entry: IOMMUTLBEntry that contain the addr to be translated and result + * + * Returns true if translation is successful, otherwise false. + */ +static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, + uint8_t devfn, hwaddr addr, bool is_write, + IOMMUTLBEntry *entry) +{ + IntelIOMMUState *s = vtd_as->iommu_state; + VTDContextEntry ce; + uint8_t bus_num = pci_bus_num(bus); + VTDContextCacheEntry *cc_entry; + uint64_t slpte, page_mask; + uint32_t level; + uint16_t source_id = vtd_make_source_id(bus_num, devfn); + int ret_fr; + bool is_fpd_set = false; + bool reads = true; + bool writes = true; + uint8_t access_flags; + VTDIOTLBEntry *iotlb_entry; + + /* + * We have standalone memory region for interrupt addresses, we + * should never receive translation requests in this region. + */ + assert(!vtd_is_interrupt_addr(addr)); + + vtd_iommu_lock(s); + + cc_entry = &vtd_as->context_cache_entry; + + /* Try to fetch slpte form IOTLB */ + iotlb_entry = vtd_lookup_iotlb(s, source_id, addr); + if (iotlb_entry) { + trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, + iotlb_entry->domain_id); + slpte = iotlb_entry->slpte; + access_flags = iotlb_entry->access_flags; + page_mask = iotlb_entry->mask; + goto out; + } + + /* Try to fetch context-entry from cache first */ + if (cc_entry->context_cache_gen == s->context_cache_gen) { + trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi, + cc_entry->context_entry.lo, + cc_entry->context_cache_gen); + ce = cc_entry->context_entry; + is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; + if (!is_fpd_set && s->root_scalable) { + ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); + VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); + } + } else { + ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce); + is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; + if (!ret_fr && !is_fpd_set && s->root_scalable) { + ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); + } + VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); + /* Update context-cache */ + trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo, + cc_entry->context_cache_gen, + s->context_cache_gen); + cc_entry->context_entry = ce; + cc_entry->context_cache_gen = s->context_cache_gen; + } + + /* + * We don't need to translate for pass-through context entries. + * Also, let's ignore IOTLB caching as well for PT devices. + */ + if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) { + entry->iova = addr & VTD_PAGE_MASK_4K; + entry->translated_addr = entry->iova; + entry->addr_mask = ~VTD_PAGE_MASK_4K; + entry->perm = IOMMU_RW; + trace_vtd_translate_pt(source_id, entry->iova); + + /* + * When this happens, it means firstly caching-mode is not + * enabled, and this is the first passthrough translation for + * the device. Let's enable the fast path for passthrough. + * + * When passthrough is disabled again for the device, we can + * capture it via the context entry invalidation, then the + * IOMMU region can be swapped back. + */ + vtd_pt_enable_fast_path(s, source_id); + vtd_iommu_unlock(s); + return true; + } + + ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &slpte, &level, + &reads, &writes, s->aw_bits); + VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); + + page_mask = vtd_slpt_level_page_mask(level); + access_flags = IOMMU_ACCESS_FLAG(reads, writes); + vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce), addr, slpte, + access_flags, level); +out: + vtd_iommu_unlock(s); + entry->iova = addr & page_mask; + entry->translated_addr = vtd_get_slpte_addr(slpte, s->aw_bits) & page_mask; + entry->addr_mask = ~page_mask; + entry->perm = access_flags; + return true; + +error: + vtd_iommu_unlock(s); + entry->iova = 0; + entry->translated_addr = 0; + entry->addr_mask = 0; + entry->perm = IOMMU_NONE; + return false; +} + +static void vtd_root_table_setup(IntelIOMMUState *s) +{ + s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG); + s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits); + + vtd_update_scalable_state(s); + + trace_vtd_reg_dmar_root(s->root, s->root_scalable); +} + +static void vtd_iec_notify_all(IntelIOMMUState *s, bool global, + uint32_t index, uint32_t mask) +{ + x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask); +} + +static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s) +{ + uint64_t value = 0; + value = vtd_get_quad_raw(s, DMAR_IRTA_REG); + s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1); + s->intr_root = value & VTD_IRTA_ADDR_MASK(s->aw_bits); + s->intr_eime = value & VTD_IRTA_EIME; + + /* Notify global invalidation */ + vtd_iec_notify_all(s, true, 0, 0); + + trace_vtd_reg_ir_root(s->intr_root, s->intr_size); +} + +static void vtd_iommu_replay_all(IntelIOMMUState *s) +{ + VTDAddressSpace *vtd_as; + + QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { + vtd_sync_shadow_page_table(vtd_as); + } +} + +static void vtd_context_global_invalidate(IntelIOMMUState *s) +{ + trace_vtd_inv_desc_cc_global(); + /* Protects context cache */ + vtd_iommu_lock(s); + s->context_cache_gen++; + if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) { + vtd_reset_context_cache_locked(s); + } + vtd_iommu_unlock(s); + vtd_address_space_refresh_all(s); + /* + * From VT-d spec 6.5.2.1, a global context entry invalidation + * should be followed by a IOTLB global invalidation, so we should + * be safe even without this. Hoewever, let's replay the region as + * well to be safer, and go back here when we need finer tunes for + * VT-d emulation codes. + */ + vtd_iommu_replay_all(s); +} + +/* Do a context-cache device-selective invalidation. + * @func_mask: FM field after shifting + */ +static void vtd_context_device_invalidate(IntelIOMMUState *s, + uint16_t source_id, + uint16_t func_mask) +{ + uint16_t mask; + VTDBus *vtd_bus; + VTDAddressSpace *vtd_as; + uint8_t bus_n, devfn; + uint16_t devfn_it; + + trace_vtd_inv_desc_cc_devices(source_id, func_mask); + + switch (func_mask & 3) { + case 0: + mask = 0; /* No bits in the SID field masked */ + break; + case 1: + mask = 4; /* Mask bit 2 in the SID field */ + break; + case 2: + mask = 6; /* Mask bit 2:1 in the SID field */ + break; + case 3: + mask = 7; /* Mask bit 2:0 in the SID field */ + break; + default: + g_assert_not_reached(); + } + mask = ~mask; + + bus_n = VTD_SID_TO_BUS(source_id); + vtd_bus = vtd_find_as_from_bus_num(s, bus_n); + if (vtd_bus) { + devfn = VTD_SID_TO_DEVFN(source_id); + for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { + vtd_as = vtd_bus->dev_as[devfn_it]; + if (vtd_as && ((devfn_it & mask) == (devfn & mask))) { + trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it), + VTD_PCI_FUNC(devfn_it)); + vtd_iommu_lock(s); + vtd_as->context_cache_entry.context_cache_gen = 0; + vtd_iommu_unlock(s); + /* + * Do switch address space when needed, in case if the + * device passthrough bit is switched. + */ + vtd_switch_address_space(vtd_as); + /* + * So a device is moving out of (or moving into) a + * domain, resync the shadow page table. + * This won't bring bad even if we have no such + * notifier registered - the IOMMU notification + * framework will skip MAP notifications if that + * happened. + */ + vtd_sync_shadow_page_table(vtd_as); + } + } + } +} + +/* Context-cache invalidation + * Returns the Context Actual Invalidation Granularity. + * @val: the content of the CCMD_REG + */ +static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val) +{ + uint64_t caig; + uint64_t type = val & VTD_CCMD_CIRG_MASK; + + switch (type) { + case VTD_CCMD_DOMAIN_INVL: + /* Fall through */ + case VTD_CCMD_GLOBAL_INVL: + caig = VTD_CCMD_GLOBAL_INVL_A; + vtd_context_global_invalidate(s); + break; + + case VTD_CCMD_DEVICE_INVL: + caig = VTD_CCMD_DEVICE_INVL_A; + vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val)); + break; + + default: + error_report_once("%s: invalid context: 0x%" PRIx64, + __func__, val); + caig = 0; + } + return caig; +} + +static void vtd_iotlb_global_invalidate(IntelIOMMUState *s) +{ + trace_vtd_inv_desc_iotlb_global(); + vtd_reset_iotlb(s); + vtd_iommu_replay_all(s); +} + +static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) +{ + VTDContextEntry ce; + VTDAddressSpace *vtd_as; + + trace_vtd_inv_desc_iotlb_domain(domain_id); + + vtd_iommu_lock(s); + g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain, + &domain_id); + vtd_iommu_unlock(s); + + QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { + if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), + vtd_as->devfn, &ce) && + domain_id == vtd_get_domain_id(s, &ce)) { + vtd_sync_shadow_page_table(vtd_as); + } + } +} + +static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, + uint16_t domain_id, hwaddr addr, + uint8_t am) +{ + VTDAddressSpace *vtd_as; + VTDContextEntry ce; + int ret; + hwaddr size = (1 << am) * VTD_PAGE_SIZE; + + QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) { + ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), + vtd_as->devfn, &ce); + if (!ret && domain_id == vtd_get_domain_id(s, &ce)) { + if (vtd_as_has_map_notifier(vtd_as)) { + /* + * As long as we have MAP notifications registered in + * any of our IOMMU notifiers, we need to sync the + * shadow page table. + */ + vtd_sync_shadow_page_table_range(vtd_as, &ce, addr, size); + } else { + /* + * For UNMAP-only notifiers, we don't need to walk the + * page tables. We just deliver the PSI down to + * invalidate caches. + */ + IOMMUTLBEvent event = { + .type = IOMMU_NOTIFIER_UNMAP, + .entry = { + .target_as = &address_space_memory, + .iova = addr, + .translated_addr = 0, + .addr_mask = size - 1, + .perm = IOMMU_NONE, + }, + }; + memory_region_notify_iommu(&vtd_as->iommu, 0, event); + } + } + } +} + +static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, + hwaddr addr, uint8_t am) +{ + VTDIOTLBPageInvInfo info; + + trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am); + + assert(am <= VTD_MAMV); + info.domain_id = domain_id; + info.addr = addr; + info.mask = ~((1 << am) - 1); + vtd_iommu_lock(s); + g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info); + vtd_iommu_unlock(s); + vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am); +} + +/* Flush IOTLB + * Returns the IOTLB Actual Invalidation Granularity. + * @val: the content of the IOTLB_REG + */ +static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val) +{ + uint64_t iaig; + uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK; + uint16_t domain_id; + hwaddr addr; + uint8_t am; + + switch (type) { + case VTD_TLB_GLOBAL_FLUSH: + iaig = VTD_TLB_GLOBAL_FLUSH_A; + vtd_iotlb_global_invalidate(s); + break; + + case VTD_TLB_DSI_FLUSH: + domain_id = VTD_TLB_DID(val); + iaig = VTD_TLB_DSI_FLUSH_A; + vtd_iotlb_domain_invalidate(s, domain_id); + break; + + case VTD_TLB_PSI_FLUSH: + domain_id = VTD_TLB_DID(val); + addr = vtd_get_quad_raw(s, DMAR_IVA_REG); + am = VTD_IVA_AM(addr); + addr = VTD_IVA_ADDR(addr); + if (am > VTD_MAMV) { + error_report_once("%s: address mask overflow: 0x%" PRIx64, + __func__, vtd_get_quad_raw(s, DMAR_IVA_REG)); + iaig = 0; + break; + } + iaig = VTD_TLB_PSI_FLUSH_A; + vtd_iotlb_page_invalidate(s, domain_id, addr, am); + break; + + default: + error_report_once("%s: invalid granularity: 0x%" PRIx64, + __func__, val); + iaig = 0; + } + return iaig; +} + +static void vtd_fetch_inv_desc(IntelIOMMUState *s); + +static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s) +{ + return s->qi_enabled && (s->iq_tail == s->iq_head) && + (s->iq_last_desc_type == VTD_INV_DESC_WAIT); +} + +static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en) +{ + uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG); + + trace_vtd_inv_qi_enable(en); + + if (en) { + s->iq = iqa_val & VTD_IQA_IQA_MASK(s->aw_bits); + /* 2^(x+8) entries */ + s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8 - (s->iq_dw ? 1 : 0)); + s->qi_enabled = true; + trace_vtd_inv_qi_setup(s->iq, s->iq_size); + /* Ok - report back to driver */ + vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES); + + if (s->iq_tail != 0) { + /* + * This is a spec violation but Windows guests are known to set up + * Queued Invalidation this way so we allow the write and process + * Invalidation Descriptors right away. + */ + trace_vtd_warn_invalid_qi_tail(s->iq_tail); + if (!(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { + vtd_fetch_inv_desc(s); + } + } + } else { + if (vtd_queued_inv_disable_check(s)) { + /* disable Queued Invalidation */ + vtd_set_quad_raw(s, DMAR_IQH_REG, 0); + s->iq_head = 0; + s->qi_enabled = false; + /* Ok - report back to driver */ + vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0); + } else { + error_report_once("%s: detected improper state when disable QI " + "(head=0x%x, tail=0x%x, last_type=%d)", + __func__, + s->iq_head, s->iq_tail, s->iq_last_desc_type); + } + } +} + +/* Set Root Table Pointer */ +static void vtd_handle_gcmd_srtp(IntelIOMMUState *s) +{ + vtd_root_table_setup(s); + /* Ok - report back to driver */ + vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS); + vtd_reset_caches(s); + vtd_address_space_refresh_all(s); +} + +/* Set Interrupt Remap Table Pointer */ +static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s) +{ + vtd_interrupt_remap_table_setup(s); + /* Ok - report back to driver */ + vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS); +} + +/* Handle Translation Enable/Disable */ +static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en) +{ + if (s->dmar_enabled == en) { + return; + } + + trace_vtd_dmar_enable(en); + + if (en) { + s->dmar_enabled = true; + /* Ok - report back to driver */ + vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES); + } else { + s->dmar_enabled = false; + + /* Clear the index of Fault Recording Register */ + s->next_frcd_reg = 0; + /* Ok - report back to driver */ + vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0); + } + + vtd_reset_caches(s); + vtd_address_space_refresh_all(s); +} + +/* Handle Interrupt Remap Enable/Disable */ +static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en) +{ + trace_vtd_ir_enable(en); + + if (en) { + s->intr_enabled = true; + /* Ok - report back to driver */ + vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES); + } else { + s->intr_enabled = false; + /* Ok - report back to driver */ + vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0); + } +} + +/* Handle write to Global Command Register */ +static void vtd_handle_gcmd_write(IntelIOMMUState *s) +{ + uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG); + uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG); + uint32_t changed = status ^ val; + + trace_vtd_reg_write_gcmd(status, val); + if (changed & VTD_GCMD_TE) { + /* Translation enable/disable */ + vtd_handle_gcmd_te(s, val & VTD_GCMD_TE); + } + if (val & VTD_GCMD_SRTP) { + /* Set/update the root-table pointer */ + vtd_handle_gcmd_srtp(s); + } + if (changed & VTD_GCMD_QIE) { + /* Queued Invalidation Enable */ + vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE); + } + if (val & VTD_GCMD_SIRTP) { + /* Set/update the interrupt remapping root-table pointer */ + vtd_handle_gcmd_sirtp(s); + } + if (changed & VTD_GCMD_IRE) { + /* Interrupt remap enable/disable */ + vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE); + } +} + +/* Handle write to Context Command Register */ +static void vtd_handle_ccmd_write(IntelIOMMUState *s) +{ + uint64_t ret; + uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG); + + /* Context-cache invalidation request */ + if (val & VTD_CCMD_ICC) { + if (s->qi_enabled) { + error_report_once("Queued Invalidation enabled, " + "should not use register-based invalidation"); + return; + } + ret = vtd_context_cache_invalidate(s, val); + /* Invalidation completed. Change something to show */ + vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL); + ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK, + ret); + } +} + +/* Handle write to IOTLB Invalidation Register */ +static void vtd_handle_iotlb_write(IntelIOMMUState *s) +{ + uint64_t ret; + uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG); + + /* IOTLB invalidation request */ + if (val & VTD_TLB_IVT) { + if (s->qi_enabled) { + error_report_once("Queued Invalidation enabled, " + "should not use register-based invalidation"); + return; + } + ret = vtd_iotlb_flush(s, val); + /* Invalidation completed. Change something to show */ + vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL); + ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, + VTD_TLB_FLUSH_GRANU_MASK_A, ret); + } +} + +/* Fetch an Invalidation Descriptor from the Invalidation Queue */ +static bool vtd_get_inv_desc(IntelIOMMUState *s, + VTDInvDesc *inv_desc) +{ + dma_addr_t base_addr = s->iq; + uint32_t offset = s->iq_head; + uint32_t dw = s->iq_dw ? 32 : 16; + dma_addr_t addr = base_addr + offset * dw; + + if (dma_memory_read(&address_space_memory, addr, inv_desc, dw)) { + error_report_once("Read INV DESC failed."); + return false; + } + inv_desc->lo = le64_to_cpu(inv_desc->lo); + inv_desc->hi = le64_to_cpu(inv_desc->hi); + if (dw == 32) { + inv_desc->val[2] = le64_to_cpu(inv_desc->val[2]); + inv_desc->val[3] = le64_to_cpu(inv_desc->val[3]); + } + return true; +} + +static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) +{ + if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) || + (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) { + error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 + " (reserved nonzero)", __func__, inv_desc->hi, + inv_desc->lo); + return false; + } + if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) { + /* Status Write */ + uint32_t status_data = (uint32_t)(inv_desc->lo >> + VTD_INV_DESC_WAIT_DATA_SHIFT); + + assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF)); + + /* FIXME: need to be masked with HAW? */ + dma_addr_t status_addr = inv_desc->hi; + trace_vtd_inv_desc_wait_sw(status_addr, status_data); + status_data = cpu_to_le32(status_data); + if (dma_memory_write(&address_space_memory, status_addr, &status_data, + sizeof(status_data))) { + trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); + return false; + } + } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { + /* Interrupt flag */ + vtd_generate_completion_event(s); + } else { + error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 + " (unknown type)", __func__, inv_desc->hi, + inv_desc->lo); + return false; + } + return true; +} + +static bool vtd_process_context_cache_desc(IntelIOMMUState *s, + VTDInvDesc *inv_desc) +{ + uint16_t sid, fmask; + + if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) { + error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64 + " (reserved nonzero)", __func__, inv_desc->hi, + inv_desc->lo); + return false; + } + switch (inv_desc->lo & VTD_INV_DESC_CC_G) { + case VTD_INV_DESC_CC_DOMAIN: + trace_vtd_inv_desc_cc_domain( + (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo)); + /* Fall through */ + case VTD_INV_DESC_CC_GLOBAL: + vtd_context_global_invalidate(s); + break; + + case VTD_INV_DESC_CC_DEVICE: + sid = VTD_INV_DESC_CC_SID(inv_desc->lo); + fmask = VTD_INV_DESC_CC_FM(inv_desc->lo); + vtd_context_device_invalidate(s, sid, fmask); + break; + + default: + error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64 + " (invalid type)", __func__, inv_desc->hi, + inv_desc->lo); + return false; + } + return true; +} + +static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) +{ + uint16_t domain_id; + uint8_t am; + hwaddr addr; + + if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) || + (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) { + error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 + ", lo=0x%"PRIx64" (reserved bits unzero)", + __func__, inv_desc->hi, inv_desc->lo); + return false; + } + + switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) { + case VTD_INV_DESC_IOTLB_GLOBAL: + vtd_iotlb_global_invalidate(s); + break; + + case VTD_INV_DESC_IOTLB_DOMAIN: + domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); + vtd_iotlb_domain_invalidate(s, domain_id); + break; + + case VTD_INV_DESC_IOTLB_PAGE: + domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); + addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi); + am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi); + if (am > VTD_MAMV) { + error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 + ", lo=0x%"PRIx64" (am=%u > VTD_MAMV=%u)", + __func__, inv_desc->hi, inv_desc->lo, + am, (unsigned)VTD_MAMV); + return false; + } + vtd_iotlb_page_invalidate(s, domain_id, addr, am); + break; + + default: + error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 + ", lo=0x%"PRIx64" (type mismatch: 0x%llx)", + __func__, inv_desc->hi, inv_desc->lo, + inv_desc->lo & VTD_INV_DESC_IOTLB_G); + return false; + } + return true; +} + +static bool vtd_process_inv_iec_desc(IntelIOMMUState *s, + VTDInvDesc *inv_desc) +{ + trace_vtd_inv_desc_iec(inv_desc->iec.granularity, + inv_desc->iec.index, + inv_desc->iec.index_mask); + + vtd_iec_notify_all(s, !inv_desc->iec.granularity, + inv_desc->iec.index, + inv_desc->iec.index_mask); + return true; +} + +static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, + VTDInvDesc *inv_desc) +{ + VTDAddressSpace *vtd_dev_as; + IOMMUTLBEvent event; + struct VTDBus *vtd_bus; + hwaddr addr; + uint64_t sz; + uint16_t sid; + uint8_t devfn; + bool size; + uint8_t bus_num; + + addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); + sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); + devfn = sid & 0xff; + bus_num = sid >> 8; + size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); + + if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) || + (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) { + error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64 + ", lo=%"PRIx64" (reserved nonzero)", __func__, + inv_desc->hi, inv_desc->lo); + return false; + } + + vtd_bus = vtd_find_as_from_bus_num(s, bus_num); + if (!vtd_bus) { + goto done; + } + + vtd_dev_as = vtd_bus->dev_as[devfn]; + if (!vtd_dev_as) { + goto done; + } + + /* According to ATS spec table 2.4: + * S = 0, bits 15:12 = xxxx range size: 4K + * S = 1, bits 15:12 = xxx0 range size: 8K + * S = 1, bits 15:12 = xx01 range size: 16K + * S = 1, bits 15:12 = x011 range size: 32K + * S = 1, bits 15:12 = 0111 range size: 64K + * ... + */ + if (size) { + sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT); + addr &= ~(sz - 1); + } else { + sz = VTD_PAGE_SIZE; + } + + event.type = IOMMU_NOTIFIER_DEVIOTLB_UNMAP; + event.entry.target_as = &vtd_dev_as->as; + event.entry.addr_mask = sz - 1; + event.entry.iova = addr; + event.entry.perm = IOMMU_NONE; + event.entry.translated_addr = 0; + memory_region_notify_iommu(&vtd_dev_as->iommu, 0, event); + +done: + return true; +} + +static bool vtd_process_inv_desc(IntelIOMMUState *s) +{ + VTDInvDesc inv_desc; + uint8_t desc_type; + + trace_vtd_inv_qi_head(s->iq_head); + if (!vtd_get_inv_desc(s, &inv_desc)) { + s->iq_last_desc_type = VTD_INV_DESC_NONE; + return false; + } + + desc_type = inv_desc.lo & VTD_INV_DESC_TYPE; + /* FIXME: should update at first or at last? */ + s->iq_last_desc_type = desc_type; + + switch (desc_type) { + case VTD_INV_DESC_CC: + trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo); + if (!vtd_process_context_cache_desc(s, &inv_desc)) { + return false; + } + break; + + case VTD_INV_DESC_IOTLB: + trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo); + if (!vtd_process_iotlb_desc(s, &inv_desc)) { + return false; + } + break; + + /* + * TODO: the entity of below two cases will be implemented in future series. + * To make guest (which integrates scalable mode support patch set in + * iommu driver) work, just return true is enough so far. + */ + case VTD_INV_DESC_PC: + break; + + case VTD_INV_DESC_PIOTLB: + break; + + case VTD_INV_DESC_WAIT: + trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo); + if (!vtd_process_wait_desc(s, &inv_desc)) { + return false; + } + break; + + case VTD_INV_DESC_IEC: + trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo); + if (!vtd_process_inv_iec_desc(s, &inv_desc)) { + return false; + } + break; + + case VTD_INV_DESC_DEVICE: + trace_vtd_inv_desc("device", inv_desc.hi, inv_desc.lo); + if (!vtd_process_device_iotlb_desc(s, &inv_desc)) { + return false; + } + break; + + default: + error_report_once("%s: invalid inv desc: hi=%"PRIx64", lo=%"PRIx64 + " (unknown type)", __func__, inv_desc.hi, + inv_desc.lo); + return false; + } + s->iq_head++; + if (s->iq_head == s->iq_size) { + s->iq_head = 0; + } + return true; +} + +/* Try to fetch and process more Invalidation Descriptors */ +static void vtd_fetch_inv_desc(IntelIOMMUState *s) +{ + int qi_shift; + + /* Refer to 10.4.23 of VT-d spec 3.0 */ + qi_shift = s->iq_dw ? VTD_IQH_QH_SHIFT_5 : VTD_IQH_QH_SHIFT_4; + + trace_vtd_inv_qi_fetch(); + + if (s->iq_tail >= s->iq_size) { + /* Detects an invalid Tail pointer */ + error_report_once("%s: detected invalid QI tail " + "(tail=0x%x, size=0x%x)", + __func__, s->iq_tail, s->iq_size); + vtd_handle_inv_queue_error(s); + return; + } + while (s->iq_head != s->iq_tail) { + if (!vtd_process_inv_desc(s)) { + /* Invalidation Queue Errors */ + vtd_handle_inv_queue_error(s); + break; + } + /* Must update the IQH_REG in time */ + vtd_set_quad_raw(s, DMAR_IQH_REG, + (((uint64_t)(s->iq_head)) << qi_shift) & + VTD_IQH_QH_MASK); + } +} + +/* Handle write to Invalidation Queue Tail Register */ +static void vtd_handle_iqt_write(IntelIOMMUState *s) +{ + uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG); + + if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) { + error_report_once("%s: RSV bit is set: val=0x%"PRIx64, + __func__, val); + return; + } + s->iq_tail = VTD_IQT_QT(s->iq_dw, val); + trace_vtd_inv_qi_tail(s->iq_tail); + + if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { + /* Process Invalidation Queue here */ + vtd_fetch_inv_desc(s); + } +} + +static void vtd_handle_fsts_write(IntelIOMMUState *s) +{ + uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); + uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG); + uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE; + + if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) { + vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); + trace_vtd_fsts_clear_ip(); + } + /* FIXME: when IQE is Clear, should we try to fetch some Invalidation + * Descriptors if there are any when Queued Invalidation is enabled? + */ +} + +static void vtd_handle_fectl_write(IntelIOMMUState *s) +{ + uint32_t fectl_reg; + /* FIXME: when software clears the IM field, check the IP field. But do we + * need to compare the old value and the new value to conclude that + * software clears the IM field? Or just check if the IM field is zero? + */ + fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG); + + trace_vtd_reg_write_fectl(fectl_reg); + + if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) { + vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); + vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); + } +} + +static void vtd_handle_ics_write(IntelIOMMUState *s) +{ + uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG); + uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG); + + if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) { + trace_vtd_reg_ics_clear_ip(); + vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); + } +} + +static void vtd_handle_iectl_write(IntelIOMMUState *s) +{ + uint32_t iectl_reg; + /* FIXME: when software clears the IM field, check the IP field. But do we + * need to compare the old value and the new value to conclude that + * software clears the IM field? Or just check if the IM field is zero? + */ + iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG); + + trace_vtd_reg_write_iectl(iectl_reg); + + if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) { + vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG); + vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); + } +} + +static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + IntelIOMMUState *s = opaque; + uint64_t val; + + trace_vtd_reg_read(addr, size); + + if (addr + size > DMAR_REG_SIZE) { + error_report_once("%s: MMIO over range: addr=0x%" PRIx64 + " size=0x%x", __func__, addr, size); + return (uint64_t)-1; + } + + switch (addr) { + /* Root Table Address Register, 64-bit */ + case DMAR_RTADDR_REG: + val = vtd_get_quad_raw(s, DMAR_RTADDR_REG); + if (size == 4) { + val = val & ((1ULL << 32) - 1); + } + break; + + case DMAR_RTADDR_REG_HI: + assert(size == 4); + val = vtd_get_quad_raw(s, DMAR_RTADDR_REG) >> 32; + break; + + /* Invalidation Queue Address Register, 64-bit */ + case DMAR_IQA_REG: + val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS); + if (size == 4) { + val = val & ((1ULL << 32) - 1); + } + break; + + case DMAR_IQA_REG_HI: + assert(size == 4); + val = s->iq >> 32; + break; + + default: + if (size == 4) { + val = vtd_get_long(s, addr); + } else { + val = vtd_get_quad(s, addr); + } + } + + return val; +} + +static void vtd_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + IntelIOMMUState *s = opaque; + + trace_vtd_reg_write(addr, size, val); + + if (addr + size > DMAR_REG_SIZE) { + error_report_once("%s: MMIO over range: addr=0x%" PRIx64 + " size=0x%x", __func__, addr, size); + return; + } + + switch (addr) { + /* Global Command Register, 32-bit */ + case DMAR_GCMD_REG: + vtd_set_long(s, addr, val); + vtd_handle_gcmd_write(s); + break; + + /* Context Command Register, 64-bit */ + case DMAR_CCMD_REG: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + vtd_set_quad(s, addr, val); + vtd_handle_ccmd_write(s); + } + break; + + case DMAR_CCMD_REG_HI: + assert(size == 4); + vtd_set_long(s, addr, val); + vtd_handle_ccmd_write(s); + break; + + /* IOTLB Invalidation Register, 64-bit */ + case DMAR_IOTLB_REG: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + vtd_set_quad(s, addr, val); + vtd_handle_iotlb_write(s); + } + break; + + case DMAR_IOTLB_REG_HI: + assert(size == 4); + vtd_set_long(s, addr, val); + vtd_handle_iotlb_write(s); + break; + + /* Invalidate Address Register, 64-bit */ + case DMAR_IVA_REG: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + vtd_set_quad(s, addr, val); + } + break; + + case DMAR_IVA_REG_HI: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + /* Fault Status Register, 32-bit */ + case DMAR_FSTS_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + vtd_handle_fsts_write(s); + break; + + /* Fault Event Control Register, 32-bit */ + case DMAR_FECTL_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + vtd_handle_fectl_write(s); + break; + + /* Fault Event Data Register, 32-bit */ + case DMAR_FEDATA_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + /* Fault Event Address Register, 32-bit */ + case DMAR_FEADDR_REG: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + /* + * While the register is 32-bit only, some guests (Xen...) write to + * it with 64-bit. + */ + vtd_set_quad(s, addr, val); + } + break; + + /* Fault Event Upper Address Register, 32-bit */ + case DMAR_FEUADDR_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + /* Protected Memory Enable Register, 32-bit */ + case DMAR_PMEN_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + /* Root Table Address Register, 64-bit */ + case DMAR_RTADDR_REG: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + vtd_set_quad(s, addr, val); + } + break; + + case DMAR_RTADDR_REG_HI: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + /* Invalidation Queue Tail Register, 64-bit */ + case DMAR_IQT_REG: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + vtd_set_quad(s, addr, val); + } + vtd_handle_iqt_write(s); + break; + + case DMAR_IQT_REG_HI: + assert(size == 4); + vtd_set_long(s, addr, val); + /* 19:63 of IQT_REG is RsvdZ, do nothing here */ + break; + + /* Invalidation Queue Address Register, 64-bit */ + case DMAR_IQA_REG: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + vtd_set_quad(s, addr, val); + } + if (s->ecap & VTD_ECAP_SMTS && + val & VTD_IQA_DW_MASK) { + s->iq_dw = true; + } else { + s->iq_dw = false; + } + break; + + case DMAR_IQA_REG_HI: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + /* Invalidation Completion Status Register, 32-bit */ + case DMAR_ICS_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + vtd_handle_ics_write(s); + break; + + /* Invalidation Event Control Register, 32-bit */ + case DMAR_IECTL_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + vtd_handle_iectl_write(s); + break; + + /* Invalidation Event Data Register, 32-bit */ + case DMAR_IEDATA_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + /* Invalidation Event Address Register, 32-bit */ + case DMAR_IEADDR_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + /* Invalidation Event Upper Address Register, 32-bit */ + case DMAR_IEUADDR_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + /* Fault Recording Registers, 128-bit */ + case DMAR_FRCD_REG_0_0: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + vtd_set_quad(s, addr, val); + } + break; + + case DMAR_FRCD_REG_0_1: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + case DMAR_FRCD_REG_0_2: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + vtd_set_quad(s, addr, val); + /* May clear bit 127 (Fault), update PPF */ + vtd_update_fsts_ppf(s); + } + break; + + case DMAR_FRCD_REG_0_3: + assert(size == 4); + vtd_set_long(s, addr, val); + /* May clear bit 127 (Fault), update PPF */ + vtd_update_fsts_ppf(s); + break; + + case DMAR_IRTA_REG: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + vtd_set_quad(s, addr, val); + } + break; + + case DMAR_IRTA_REG_HI: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + + default: + if (size == 4) { + vtd_set_long(s, addr, val); + } else { + vtd_set_quad(s, addr, val); + } + } +} + +static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr, + IOMMUAccessFlags flag, int iommu_idx) +{ + VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); + IntelIOMMUState *s = vtd_as->iommu_state; + IOMMUTLBEntry iotlb = { + /* We'll fill in the rest later. */ + .target_as = &address_space_memory, + }; + bool success; + + if (likely(s->dmar_enabled)) { + success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, + addr, flag & IOMMU_WO, &iotlb); + } else { + /* DMAR disabled, passthrough, use 4k-page*/ + iotlb.iova = addr & VTD_PAGE_MASK_4K; + iotlb.translated_addr = addr & VTD_PAGE_MASK_4K; + iotlb.addr_mask = ~VTD_PAGE_MASK_4K; + iotlb.perm = IOMMU_RW; + success = true; + } + + if (likely(success)) { + trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus), + VTD_PCI_SLOT(vtd_as->devfn), + VTD_PCI_FUNC(vtd_as->devfn), + iotlb.iova, iotlb.translated_addr, + iotlb.addr_mask); + } else { + error_report_once("%s: detected translation failure " + "(dev=%02x:%02x:%02x, iova=0x%" PRIx64 ")", + __func__, pci_bus_num(vtd_as->bus), + VTD_PCI_SLOT(vtd_as->devfn), + VTD_PCI_FUNC(vtd_as->devfn), + addr); + } + + return iotlb; +} + +static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, + IOMMUNotifierFlag old, + IOMMUNotifierFlag new, + Error **errp) +{ + VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); + IntelIOMMUState *s = vtd_as->iommu_state; + + /* Update per-address-space notifier flags */ + vtd_as->notifier_flags = new; + + if (old == IOMMU_NOTIFIER_NONE) { + QLIST_INSERT_HEAD(&s->vtd_as_with_notifiers, vtd_as, next); + } else if (new == IOMMU_NOTIFIER_NONE) { + QLIST_REMOVE(vtd_as, next); + } + return 0; +} + +static int vtd_post_load(void *opaque, int version_id) +{ + IntelIOMMUState *iommu = opaque; + + /* + * Memory regions are dynamically turned on/off depending on + * context entry configurations from the guest. After migration, + * we need to make sure the memory regions are still correct. + */ + vtd_switch_address_space_all(iommu); + + /* + * We don't need to migrate the root_scalable because we can + * simply do the calculation after the loading is complete. We + * can actually do similar things with root, dmar_enabled, etc. + * however since we've had them already so we'd better keep them + * for compatibility of migration. + */ + vtd_update_scalable_state(iommu); + + return 0; +} + +static const VMStateDescription vtd_vmstate = { + .name = "iommu-intel", + .version_id = 1, + .minimum_version_id = 1, + .priority = MIG_PRI_IOMMU, + .post_load = vtd_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64(root, IntelIOMMUState), + VMSTATE_UINT64(intr_root, IntelIOMMUState), + VMSTATE_UINT64(iq, IntelIOMMUState), + VMSTATE_UINT32(intr_size, IntelIOMMUState), + VMSTATE_UINT16(iq_head, IntelIOMMUState), + VMSTATE_UINT16(iq_tail, IntelIOMMUState), + VMSTATE_UINT16(iq_size, IntelIOMMUState), + VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState), + VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE), + VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState), + VMSTATE_UNUSED(1), /* bool root_extended is obsolete by VT-d */ + VMSTATE_BOOL(dmar_enabled, IntelIOMMUState), + VMSTATE_BOOL(qi_enabled, IntelIOMMUState), + VMSTATE_BOOL(intr_enabled, IntelIOMMUState), + VMSTATE_BOOL(intr_eime, IntelIOMMUState), + VMSTATE_END_OF_LIST() + } +}; + +static const MemoryRegionOps vtd_mem_ops = { + .read = vtd_mem_read, + .write = vtd_mem_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; + +static Property vtd_properties[] = { + DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0), + DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false), + DEFINE_PROP_UINT8("aw-bits", IntelIOMMUState, aw_bits, + VTD_HOST_ADDRESS_WIDTH), + DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE), + DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState, scalable_mode, FALSE), + DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true), + DEFINE_PROP_END_OF_LIST(), +}; + +/* Read IRTE entry with specific index */ +static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, + VTD_IR_TableEntry *entry, uint16_t sid) +{ + static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \ + {0xffff, 0xfffb, 0xfff9, 0xfff8}; + dma_addr_t addr = 0x00; + uint16_t mask, source_id; + uint8_t bus, bus_max, bus_min; + + if (index >= iommu->intr_size) { + error_report_once("%s: index too large: ind=0x%x", + __func__, index); + return -VTD_FR_IR_INDEX_OVER; + } + + addr = iommu->intr_root + index * sizeof(*entry); + if (dma_memory_read(&address_space_memory, addr, entry, + sizeof(*entry))) { + error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64, + __func__, index, addr); + return -VTD_FR_IR_ROOT_INVAL; + } + + trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]), + le64_to_cpu(entry->data[0])); + + if (!entry->irte.present) { + error_report_once("%s: detected non-present IRTE " + "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", + __func__, index, le64_to_cpu(entry->data[1]), + le64_to_cpu(entry->data[0])); + return -VTD_FR_IR_ENTRY_P; + } + + if (entry->irte.__reserved_0 || entry->irte.__reserved_1 || + entry->irte.__reserved_2) { + error_report_once("%s: detected non-zero reserved IRTE " + "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", + __func__, index, le64_to_cpu(entry->data[1]), + le64_to_cpu(entry->data[0])); + return -VTD_FR_IR_IRTE_RSVD; + } + + if (sid != X86_IOMMU_SID_INVALID) { + /* Validate IRTE SID */ + source_id = le32_to_cpu(entry->irte.source_id); + switch (entry->irte.sid_vtype) { + case VTD_SVT_NONE: + break; + + case VTD_SVT_ALL: + mask = vtd_svt_mask[entry->irte.sid_q]; + if ((source_id & mask) != (sid & mask)) { + error_report_once("%s: invalid IRTE SID " + "(index=%u, sid=%u, source_id=%u)", + __func__, index, sid, source_id); + return -VTD_FR_IR_SID_ERR; + } + break; + + case VTD_SVT_BUS: + bus_max = source_id >> 8; + bus_min = source_id & 0xff; + bus = sid >> 8; + if (bus > bus_max || bus < bus_min) { + error_report_once("%s: invalid SVT_BUS " + "(index=%u, bus=%u, min=%u, max=%u)", + __func__, index, bus, bus_min, bus_max); + return -VTD_FR_IR_SID_ERR; + } + break; + + default: + error_report_once("%s: detected invalid IRTE SVT " + "(index=%u, type=%d)", __func__, + index, entry->irte.sid_vtype); + /* Take this as verification failure. */ + return -VTD_FR_IR_SID_ERR; + } + } + + return 0; +} + +/* Fetch IRQ information of specific IR index */ +static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index, + X86IOMMUIrq *irq, uint16_t sid) +{ + VTD_IR_TableEntry irte = {}; + int ret = 0; + + ret = vtd_irte_get(iommu, index, &irte, sid); + if (ret) { + return ret; + } + + irq->trigger_mode = irte.irte.trigger_mode; + irq->vector = irte.irte.vector; + irq->delivery_mode = irte.irte.delivery_mode; + irq->dest = le32_to_cpu(irte.irte.dest_id); + if (!iommu->intr_eime) { +#define VTD_IR_APIC_DEST_MASK (0xff00ULL) +#define VTD_IR_APIC_DEST_SHIFT (8) + irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >> + VTD_IR_APIC_DEST_SHIFT; + } + irq->dest_mode = irte.irte.dest_mode; + irq->redir_hint = irte.irte.redir_hint; + + trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector, + irq->delivery_mode, irq->dest, irq->dest_mode); + + return 0; +} + +/* Interrupt remapping for MSI/MSI-X entry */ +static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu, + MSIMessage *origin, + MSIMessage *translated, + uint16_t sid) +{ + int ret = 0; + VTD_IR_MSIAddress addr; + uint16_t index; + X86IOMMUIrq irq = {}; + + assert(origin && translated); + + trace_vtd_ir_remap_msi_req(origin->address, origin->data); + + if (!iommu || !iommu->intr_enabled) { + memcpy(translated, origin, sizeof(*origin)); + goto out; + } + + if (origin->address & VTD_MSI_ADDR_HI_MASK) { + error_report_once("%s: MSI address high 32 bits non-zero detected: " + "address=0x%" PRIx64, __func__, origin->address); + return -VTD_FR_IR_REQ_RSVD; + } + + addr.data = origin->address & VTD_MSI_ADDR_LO_MASK; + if (addr.addr.__head != 0xfee) { + error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32, + __func__, addr.data); + return -VTD_FR_IR_REQ_RSVD; + } + + /* This is compatible mode. */ + if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) { + memcpy(translated, origin, sizeof(*origin)); + goto out; + } + + index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l); + +#define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff) +#define VTD_IR_MSI_DATA_RESERVED (0xffff0000) + + if (addr.addr.sub_valid) { + /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */ + index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE; + } + + ret = vtd_remap_irq_get(iommu, index, &irq, sid); + if (ret) { + return ret; + } + + if (addr.addr.sub_valid) { + trace_vtd_ir_remap_type("MSI"); + if (origin->data & VTD_IR_MSI_DATA_RESERVED) { + error_report_once("%s: invalid IR MSI " + "(sid=%u, address=0x%" PRIx64 + ", data=0x%" PRIx32 ")", + __func__, sid, origin->address, origin->data); + return -VTD_FR_IR_REQ_RSVD; + } + } else { + uint8_t vector = origin->data & 0xff; + uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; + + trace_vtd_ir_remap_type("IOAPIC"); + /* IOAPIC entry vector should be aligned with IRTE vector + * (see vt-d spec 5.1.5.1). */ + if (vector != irq.vector) { + trace_vtd_warn_ir_vector(sid, index, vector, irq.vector); + } + + /* The Trigger Mode field must match the Trigger Mode in the IRTE. + * (see vt-d spec 5.1.5.1). */ + if (trigger_mode != irq.trigger_mode) { + trace_vtd_warn_ir_trigger(sid, index, trigger_mode, + irq.trigger_mode); + } + } + + /* + * We'd better keep the last two bits, assuming that guest OS + * might modify it. Keep it does not hurt after all. + */ + irq.msi_addr_last_bits = addr.addr.__not_care; + + /* Translate X86IOMMUIrq to MSI message */ + x86_iommu_irq_to_msi_message(&irq, translated); + +out: + trace_vtd_ir_remap_msi(origin->address, origin->data, + translated->address, translated->data); + return 0; +} + +static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src, + MSIMessage *dst, uint16_t sid) +{ + return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu), + src, dst, sid); +} + +static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + return MEMTX_OK; +} + +static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + int ret = 0; + MSIMessage from = {}, to = {}; + uint16_t sid = X86_IOMMU_SID_INVALID; + + from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST; + from.data = (uint32_t) value; + + if (!attrs.unspecified) { + /* We have explicit Source ID */ + sid = attrs.requester_id; + } + + ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid); + if (ret) { + /* TODO: report error */ + /* Drop this interrupt */ + return MEMTX_ERROR; + } + + apic_get_class()->send_msi(&to); + + return MEMTX_OK; +} + +static const MemoryRegionOps vtd_mem_ir_ops = { + .read_with_attrs = vtd_mem_ir_read, + .write_with_attrs = vtd_mem_ir_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) +{ + uintptr_t key = (uintptr_t)bus; + VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key); + VTDAddressSpace *vtd_dev_as; + char name[128]; + + if (!vtd_bus) { + uintptr_t *new_key = g_malloc(sizeof(*new_key)); + *new_key = (uintptr_t)bus; + /* No corresponding free() */ + vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \ + PCI_DEVFN_MAX); + vtd_bus->bus = bus; + g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus); + } + + vtd_dev_as = vtd_bus->dev_as[devfn]; + + if (!vtd_dev_as) { + snprintf(name, sizeof(name), "vtd-%02x.%x", PCI_SLOT(devfn), + PCI_FUNC(devfn)); + vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace)); + + vtd_dev_as->bus = bus; + vtd_dev_as->devfn = (uint8_t)devfn; + vtd_dev_as->iommu_state = s; + vtd_dev_as->context_cache_entry.context_cache_gen = 0; + vtd_dev_as->iova_tree = iova_tree_new(); + + memory_region_init(&vtd_dev_as->root, OBJECT(s), name, UINT64_MAX); + address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, "vtd-root"); + + /* + * Build the DMAR-disabled container with aliases to the + * shared MRs. Note that aliasing to a shared memory region + * could help the memory API to detect same FlatViews so we + * can have devices to share the same FlatView when DMAR is + * disabled (either by not providing "intel_iommu=on" or with + * "iommu=pt"). It will greatly reduce the total number of + * FlatViews of the system hence VM runs faster. + */ + memory_region_init_alias(&vtd_dev_as->nodmar, OBJECT(s), + "vtd-nodmar", &s->mr_nodmar, 0, + memory_region_size(&s->mr_nodmar)); + + /* + * Build the per-device DMAR-enabled container. + * + * TODO: currently we have per-device IOMMU memory region only + * because we have per-device IOMMU notifiers for devices. If + * one day we can abstract the IOMMU notifiers out of the + * memory regions then we can also share the same memory + * region here just like what we've done above with the nodmar + * region. + */ + strcat(name, "-dmar"); + memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu), + TYPE_INTEL_IOMMU_MEMORY_REGION, OBJECT(s), + name, UINT64_MAX); + memory_region_init_alias(&vtd_dev_as->iommu_ir, OBJECT(s), "vtd-ir", + &s->mr_ir, 0, memory_region_size(&s->mr_ir)); + memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->iommu), + VTD_INTERRUPT_ADDR_FIRST, + &vtd_dev_as->iommu_ir, 1); + + /* + * Hook both the containers under the root container, we + * switch between DMAR & noDMAR by enable/disable + * corresponding sub-containers + */ + memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, + MEMORY_REGION(&vtd_dev_as->iommu), + 0); + memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, + &vtd_dev_as->nodmar, 0); + + vtd_switch_address_space(vtd_dev_as); + } + return vtd_dev_as; +} + +/* Unmap the whole range in the notifier's scope. */ +static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) +{ + hwaddr size, remain; + hwaddr start = n->start; + hwaddr end = n->end; + IntelIOMMUState *s = as->iommu_state; + DMAMap map; + + /* + * Note: all the codes in this function has a assumption that IOVA + * bits are no more than VTD_MGAW bits (which is restricted by + * VT-d spec), otherwise we need to consider overflow of 64 bits. + */ + + if (end > VTD_ADDRESS_SIZE(s->aw_bits) - 1) { + /* + * Don't need to unmap regions that is bigger than the whole + * VT-d supported address space size + */ + end = VTD_ADDRESS_SIZE(s->aw_bits) - 1; + } + + assert(start <= end); + size = remain = end - start + 1; + + while (remain >= VTD_PAGE_SIZE) { + IOMMUTLBEvent event; + uint64_t mask = dma_aligned_pow2_mask(start, end, s->aw_bits); + uint64_t size = mask + 1; + + assert(size); + + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.iova = start; + event.entry.addr_mask = mask; + event.entry.target_as = &address_space_memory; + event.entry.perm = IOMMU_NONE; + /* This field is meaningless for unmap */ + event.entry.translated_addr = 0; + + memory_region_notify_iommu_one(n, &event); + + start += size; + remain -= size; + } + + assert(!remain); + + trace_vtd_as_unmap_whole(pci_bus_num(as->bus), + VTD_PCI_SLOT(as->devfn), + VTD_PCI_FUNC(as->devfn), + n->start, size); + + map.iova = n->start; + map.size = size; + iova_tree_remove(as->iova_tree, &map); +} + +static void vtd_address_space_unmap_all(IntelIOMMUState *s) +{ + VTDAddressSpace *vtd_as; + IOMMUNotifier *n; + + QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { + IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { + vtd_address_space_unmap(vtd_as, n); + } + } +} + +static void vtd_address_space_refresh_all(IntelIOMMUState *s) +{ + vtd_address_space_unmap_all(s); + vtd_switch_address_space_all(s); +} + +static int vtd_replay_hook(IOMMUTLBEvent *event, void *private) +{ + memory_region_notify_iommu_one(private, event); + return 0; +} + +static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) +{ + VTDAddressSpace *vtd_as = container_of(iommu_mr, VTDAddressSpace, iommu); + IntelIOMMUState *s = vtd_as->iommu_state; + uint8_t bus_n = pci_bus_num(vtd_as->bus); + VTDContextEntry ce; + + /* + * The replay can be triggered by either a invalidation or a newly + * created entry. No matter what, we release existing mappings + * (it means flushing caches for UNMAP-only registers). + */ + vtd_address_space_unmap(vtd_as, n); + + if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { + trace_vtd_replay_ce_valid(s->root_scalable ? "scalable mode" : + "legacy mode", + bus_n, PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn), + vtd_get_domain_id(s, &ce), + ce.hi, ce.lo); + if (vtd_as_has_map_notifier(vtd_as)) { + /* This is required only for MAP typed notifiers */ + vtd_page_walk_info info = { + .hook_fn = vtd_replay_hook, + .private = (void *)n, + .notify_unmap = false, + .aw = s->aw_bits, + .as = vtd_as, + .domain_id = vtd_get_domain_id(s, &ce), + }; + + vtd_page_walk(s, &ce, 0, ~0ULL, &info); + } + } else { + trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + } + + return; +} + +/* Do the initialization. It will also be called when reset, so pay + * attention when adding new initialization stuff. + */ +static void vtd_init(IntelIOMMUState *s) +{ + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); + + memset(s->csr, 0, DMAR_REG_SIZE); + memset(s->wmask, 0, DMAR_REG_SIZE); + memset(s->w1cmask, 0, DMAR_REG_SIZE); + memset(s->womask, 0, DMAR_REG_SIZE); + + s->root = 0; + s->root_scalable = false; + s->dmar_enabled = false; + s->intr_enabled = false; + s->iq_head = 0; + s->iq_tail = 0; + s->iq = 0; + s->iq_size = 0; + s->qi_enabled = false; + s->iq_last_desc_type = VTD_INV_DESC_NONE; + s->iq_dw = false; + s->next_frcd_reg = 0; + s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | + VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS | + VTD_CAP_SAGAW_39bit | VTD_CAP_MGAW(s->aw_bits); + if (s->dma_drain) { + s->cap |= VTD_CAP_DRAIN; + } + if (s->aw_bits == VTD_HOST_AW_48BIT) { + s->cap |= VTD_CAP_SAGAW_48bit; + } + s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO; + + /* + * Rsvd field masks for spte + */ + vtd_spte_rsvd[0] = ~0ULL; + vtd_spte_rsvd[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits, + x86_iommu->dt_supported); + vtd_spte_rsvd[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits); + vtd_spte_rsvd[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits); + vtd_spte_rsvd[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits); + + vtd_spte_rsvd_large[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits, + x86_iommu->dt_supported); + vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits, + x86_iommu->dt_supported); + + if (s->scalable_mode) { + vtd_spte_rsvd[1] &= ~VTD_SPTE_SNP; + vtd_spte_rsvd_large[2] &= ~VTD_SPTE_SNP; + vtd_spte_rsvd_large[3] &= ~VTD_SPTE_SNP; + } + + if (x86_iommu_ir_supported(x86_iommu)) { + s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV; + if (s->intr_eim == ON_OFF_AUTO_ON) { + s->ecap |= VTD_ECAP_EIM; + } + assert(s->intr_eim != ON_OFF_AUTO_AUTO); + } + + if (x86_iommu->dt_supported) { + s->ecap |= VTD_ECAP_DT; + } + + if (x86_iommu->pt_supported) { + s->ecap |= VTD_ECAP_PT; + } + + if (s->caching_mode) { + s->cap |= VTD_CAP_CM; + } + + /* TODO: read cap/ecap from host to decide which cap to be exposed. */ + if (s->scalable_mode) { + s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS; + } + + vtd_reset_caches(s); + + /* Define registers with default values and bit semantics */ + vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0); + vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0); + vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0); + vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0); + vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL); + vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0); + vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffffc00ULL, 0); + vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0); + vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL); + + /* Advanced Fault Logging not supported */ + vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL); + vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0); + vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0); + vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0); + + /* Treated as RsvdZ when EIM in ECAP_REG is not supported + * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0); + */ + vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0); + + /* Treated as RO for implementations that PLMR and PHMR fields reported + * as Clear in the CAP_REG. + * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0); + */ + vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0); + + vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0); + vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0); + vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff807ULL, 0); + vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL); + vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0); + vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0); + vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0); + /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */ + vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0); + + /* IOTLB registers */ + vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0); + vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0); + vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL); + + /* Fault Recording Registers, 128-bit */ + vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0); + vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL); + + /* + * Interrupt remapping registers. + */ + vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0); +} + +/* Should not reset address_spaces when reset because devices will still use + * the address space they got at first (won't ask the bus again). + */ +static void vtd_reset(DeviceState *dev) +{ + IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); + + vtd_init(s); + vtd_address_space_refresh_all(s); +} + +static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + IntelIOMMUState *s = opaque; + VTDAddressSpace *vtd_as; + + assert(0 <= devfn && devfn < PCI_DEVFN_MAX); + + vtd_as = vtd_find_add_as(s, bus, devfn); + return &vtd_as->as; +} + +static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) +{ + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); + + if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu_ir_supported(x86_iommu)) { + error_setg(errp, "eim=on cannot be selected without intremap=on"); + return false; + } + + if (s->intr_eim == ON_OFF_AUTO_AUTO) { + s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim) + && x86_iommu_ir_supported(x86_iommu) ? + ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + } + if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) { + if (!kvm_irqchip_in_kernel()) { + error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split"); + return false; + } + if (!kvm_enable_x2apic()) { + error_setg(errp, "eim=on requires support on the KVM side" + "(X2APIC_API, first shipped in v4.7)"); + return false; + } + } + + /* Currently only address widths supported are 39 and 48 bits */ + if ((s->aw_bits != VTD_HOST_AW_39BIT) && + (s->aw_bits != VTD_HOST_AW_48BIT)) { + error_setg(errp, "Supported values for aw-bits are: %d, %d", + VTD_HOST_AW_39BIT, VTD_HOST_AW_48BIT); + return false; + } + + if (s->scalable_mode && !s->dma_drain) { + error_setg(errp, "Need to set dma_drain for scalable mode"); + return false; + } + + return true; +} + +static int vtd_machine_done_notify_one(Object *child, void *unused) +{ + IntelIOMMUState *iommu = INTEL_IOMMU_DEVICE(x86_iommu_get_default()); + + /* + * We hard-coded here because vfio-pci is the only special case + * here. Let's be more elegant in the future when we can, but so + * far there seems to be no better way. + */ + if (object_dynamic_cast(child, "vfio-pci") && !iommu->caching_mode) { + vtd_panic_require_caching_mode(); + } + + return 0; +} + +static void vtd_machine_done_hook(Notifier *notifier, void *unused) +{ + object_child_foreach_recursive(object_get_root(), + vtd_machine_done_notify_one, NULL); +} + +static Notifier vtd_machine_done_notify = { + .notify = vtd_machine_done_hook, +}; + +static void vtd_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + PCMachineState *pcms = PC_MACHINE(ms); + X86MachineState *x86ms = X86_MACHINE(ms); + PCIBus *bus = pcms->bus; + IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); + + if (!vtd_decide_config(s, errp)) { + return; + } + + QLIST_INIT(&s->vtd_as_with_notifiers); + qemu_mutex_init(&s->iommu_lock); + memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num)); + memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s, + "intel_iommu", DMAR_REG_SIZE); + + /* Create the shared memory regions by all devices */ + memory_region_init(&s->mr_nodmar, OBJECT(s), "vtd-nodmar", + UINT64_MAX); + memory_region_init_io(&s->mr_ir, OBJECT(s), &vtd_mem_ir_ops, + s, "vtd-ir", VTD_INTERRUPT_ADDR_SIZE); + memory_region_init_alias(&s->mr_sys_alias, OBJECT(s), + "vtd-sys-alias", get_system_memory(), 0, + memory_region_size(get_system_memory())); + memory_region_add_subregion_overlap(&s->mr_nodmar, 0, + &s->mr_sys_alias, 0); + memory_region_add_subregion_overlap(&s->mr_nodmar, + VTD_INTERRUPT_ADDR_FIRST, + &s->mr_ir, 1); + + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem); + /* No corresponding destroy */ + s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, + g_free, g_free); + s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, + g_free, g_free); + vtd_init(s); + sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR); + pci_setup_iommu(bus, vtd_host_dma_iommu, dev); + /* Pseudo address space under root PCI bus. */ + x86ms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); + qemu_add_machine_init_done_notifier(&vtd_machine_done_notify); +} + +static void vtd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + X86IOMMUClass *x86_class = X86_IOMMU_DEVICE_CLASS(klass); + + dc->reset = vtd_reset; + dc->vmsd = &vtd_vmstate; + device_class_set_props(dc, vtd_properties); + dc->hotpluggable = false; + x86_class->realize = vtd_realize; + x86_class->int_remap = vtd_int_remap; + /* Supported by the pc-q35-* machine types */ + dc->user_creatable = true; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "Intel IOMMU (VT-d) DMA Remapping device"; +} + +static const TypeInfo vtd_info = { + .name = TYPE_INTEL_IOMMU_DEVICE, + .parent = TYPE_X86_IOMMU_DEVICE, + .instance_size = sizeof(IntelIOMMUState), + .class_init = vtd_class_init, +}; + +static void vtd_iommu_memory_region_class_init(ObjectClass *klass, + void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = vtd_iommu_translate; + imrc->notify_flag_changed = vtd_iommu_notify_flag_changed; + imrc->replay = vtd_iommu_replay; +} + +static const TypeInfo vtd_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_INTEL_IOMMU_MEMORY_REGION, + .class_init = vtd_iommu_memory_region_class_init, +}; + +static void vtd_register_types(void) +{ + type_register_static(&vtd_info); + type_register_static(&vtd_iommu_memory_region_info); +} + +type_init(vtd_register_types) diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h new file mode 100644 index 000000000..a6c788049 --- /dev/null +++ b/hw/i386/intel_iommu_internal.h @@ -0,0 +1,518 @@ +/* + * QEMU emulation of an Intel IOMMU (VT-d) + * (DMA Remapping device) + * + * Copyright (C) 2013 Knut Omang, Oracle + * Copyright (C) 2014 Le Tan, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Lots of defines copied from kernel/include/linux/intel-iommu.h: + * Copyright (C) 2006-2008 Intel Corporation + * Author: Ashok Raj + * Author: Anil S Keshavamurthy + * + */ + +#ifndef HW_I386_INTEL_IOMMU_INTERNAL_H +#define HW_I386_INTEL_IOMMU_INTERNAL_H +#include "hw/i386/intel_iommu.h" + +/* + * Intel IOMMU register specification + */ +#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ +#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ +#define DMAR_CAP_REG_HI 0xc /* High 32-bit of DMAR_CAP_REG */ +#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ +#define DMAR_ECAP_REG_HI 0X14 +#define DMAR_GCMD_REG 0x18 /* Global command */ +#define DMAR_GSTS_REG 0x1c /* Global status */ +#define DMAR_RTADDR_REG 0x20 /* Root entry table */ +#define DMAR_RTADDR_REG_HI 0X24 +#define DMAR_CCMD_REG 0x28 /* Context command */ +#define DMAR_CCMD_REG_HI 0x2c +#define DMAR_FSTS_REG 0x34 /* Fault status */ +#define DMAR_FECTL_REG 0x38 /* Fault control */ +#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data */ +#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr */ +#define DMAR_FEUADDR_REG 0x44 /* Upper address */ +#define DMAR_AFLOG_REG 0x58 /* Advanced fault control */ +#define DMAR_AFLOG_REG_HI 0X5c +#define DMAR_PMEN_REG 0x64 /* Enable protected memory region */ +#define DMAR_PLMBASE_REG 0x68 /* PMRR low addr */ +#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ +#define DMAR_PHMBASE_REG 0x70 /* PMRR high base addr */ +#define DMAR_PHMBASE_REG_HI 0X74 +#define DMAR_PHMLIMIT_REG 0x78 /* PMRR high limit */ +#define DMAR_PHMLIMIT_REG_HI 0x7c +#define DMAR_IQH_REG 0x80 /* Invalidation queue head */ +#define DMAR_IQH_REG_HI 0X84 +#define DMAR_IQT_REG 0x88 /* Invalidation queue tail */ +#define DMAR_IQT_REG_HI 0X8c +#define DMAR_IQA_REG 0x90 /* Invalidation queue addr */ +#define DMAR_IQA_REG_HI 0x94 +#define DMAR_ICS_REG 0x9c /* Invalidation complete status */ +#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr */ +#define DMAR_IRTA_REG_HI 0xbc +#define DMAR_IECTL_REG 0xa0 /* Invalidation event control */ +#define DMAR_IEDATA_REG 0xa4 /* Invalidation event data */ +#define DMAR_IEADDR_REG 0xa8 /* Invalidation event address */ +#define DMAR_IEUADDR_REG 0xac /* Invalidation event address */ +#define DMAR_PQH_REG 0xc0 /* Page request queue head */ +#define DMAR_PQH_REG_HI 0xc4 +#define DMAR_PQT_REG 0xc8 /* Page request queue tail*/ +#define DMAR_PQT_REG_HI 0xcc +#define DMAR_PQA_REG 0xd0 /* Page request queue address */ +#define DMAR_PQA_REG_HI 0xd4 +#define DMAR_PRS_REG 0xdc /* Page request status */ +#define DMAR_PECTL_REG 0xe0 /* Page request event control */ +#define DMAR_PEDATA_REG 0xe4 /* Page request event data */ +#define DMAR_PEADDR_REG 0xe8 /* Page request event address */ +#define DMAR_PEUADDR_REG 0xec /* Page event upper address */ +#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability */ +#define DMAR_MTRRCAP_REG_HI 0x104 +#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type */ +#define DMAR_MTRRDEF_REG_HI 0x10c + +/* IOTLB registers */ +#define DMAR_IOTLB_REG_OFFSET 0xf0 /* Offset to the IOTLB registers */ +#define DMAR_IVA_REG DMAR_IOTLB_REG_OFFSET /* Invalidate address */ +#define DMAR_IVA_REG_HI (DMAR_IVA_REG + 4) +/* IOTLB invalidate register */ +#define DMAR_IOTLB_REG (DMAR_IOTLB_REG_OFFSET + 0x8) +#define DMAR_IOTLB_REG_HI (DMAR_IOTLB_REG + 4) + +/* FRCD */ +#define DMAR_FRCD_REG_OFFSET 0x220 /* Offset to the fault recording regs */ +/* NOTICE: If you change the DMAR_FRCD_REG_NR, please remember to change the + * DMAR_REG_SIZE in include/hw/i386/intel_iommu.h. + * #define DMAR_REG_SIZE (DMAR_FRCD_REG_OFFSET + 16 * DMAR_FRCD_REG_NR) + */ +#define DMAR_FRCD_REG_NR 1ULL /* Num of fault recording regs */ + +#define DMAR_FRCD_REG_0_0 0x220 /* The 0th fault recording regs */ +#define DMAR_FRCD_REG_0_1 0x224 +#define DMAR_FRCD_REG_0_2 0x228 +#define DMAR_FRCD_REG_0_3 0x22c + +/* Interrupt Address Range */ +#define VTD_INTERRUPT_ADDR_FIRST 0xfee00000ULL +#define VTD_INTERRUPT_ADDR_LAST 0xfeefffffULL +#define VTD_INTERRUPT_ADDR_SIZE (VTD_INTERRUPT_ADDR_LAST - \ + VTD_INTERRUPT_ADDR_FIRST + 1) + +/* The shift of source_id in the key of IOTLB hash table */ +#define VTD_IOTLB_SID_SHIFT 36 +#define VTD_IOTLB_LVL_SHIFT 52 +#define VTD_IOTLB_MAX_SIZE 1024 /* Max size of the hash table */ + +/* IOTLB_REG */ +#define VTD_TLB_GLOBAL_FLUSH (1ULL << 60) /* Global invalidation */ +#define VTD_TLB_DSI_FLUSH (2ULL << 60) /* Domain-selective */ +#define VTD_TLB_PSI_FLUSH (3ULL << 60) /* Page-selective */ +#define VTD_TLB_FLUSH_GRANU_MASK (3ULL << 60) +#define VTD_TLB_GLOBAL_FLUSH_A (1ULL << 57) +#define VTD_TLB_DSI_FLUSH_A (2ULL << 57) +#define VTD_TLB_PSI_FLUSH_A (3ULL << 57) +#define VTD_TLB_FLUSH_GRANU_MASK_A (3ULL << 57) +#define VTD_TLB_IVT (1ULL << 63) +#define VTD_TLB_DID(val) (((val) >> 32) & VTD_DOMAIN_ID_MASK) + +/* IVA_REG */ +#define VTD_IVA_ADDR(val) ((val) & ~0xfffULL) +#define VTD_IVA_AM(val) ((val) & 0x3fULL) + +/* GCMD_REG */ +#define VTD_GCMD_TE (1UL << 31) +#define VTD_GCMD_SRTP (1UL << 30) +#define VTD_GCMD_SFL (1UL << 29) +#define VTD_GCMD_EAFL (1UL << 28) +#define VTD_GCMD_WBF (1UL << 27) +#define VTD_GCMD_QIE (1UL << 26) +#define VTD_GCMD_IRE (1UL << 25) +#define VTD_GCMD_SIRTP (1UL << 24) +#define VTD_GCMD_CFI (1UL << 23) + +/* GSTS_REG */ +#define VTD_GSTS_TES (1UL << 31) +#define VTD_GSTS_RTPS (1UL << 30) +#define VTD_GSTS_FLS (1UL << 29) +#define VTD_GSTS_AFLS (1UL << 28) +#define VTD_GSTS_WBFS (1UL << 27) +#define VTD_GSTS_QIES (1UL << 26) +#define VTD_GSTS_IRES (1UL << 25) +#define VTD_GSTS_IRTPS (1UL << 24) +#define VTD_GSTS_CFIS (1UL << 23) + +/* CCMD_REG */ +#define VTD_CCMD_ICC (1ULL << 63) +#define VTD_CCMD_GLOBAL_INVL (1ULL << 61) +#define VTD_CCMD_DOMAIN_INVL (2ULL << 61) +#define VTD_CCMD_DEVICE_INVL (3ULL << 61) +#define VTD_CCMD_CIRG_MASK (3ULL << 61) +#define VTD_CCMD_GLOBAL_INVL_A (1ULL << 59) +#define VTD_CCMD_DOMAIN_INVL_A (2ULL << 59) +#define VTD_CCMD_DEVICE_INVL_A (3ULL << 59) +#define VTD_CCMD_CAIG_MASK (3ULL << 59) +#define VTD_CCMD_DID(val) ((val) & VTD_DOMAIN_ID_MASK) +#define VTD_CCMD_SID(val) (((val) >> 16) & 0xffffULL) +#define VTD_CCMD_FM(val) (((val) >> 32) & 3ULL) + +/* RTADDR_REG */ +#define VTD_RTADDR_SMT (1ULL << 10) +#define VTD_RTADDR_ADDR_MASK(aw) (VTD_HAW_MASK(aw) ^ 0xfffULL) + +/* IRTA_REG */ +#define VTD_IRTA_ADDR_MASK(aw) (VTD_HAW_MASK(aw) ^ 0xfffULL) +#define VTD_IRTA_EIME (1ULL << 11) +#define VTD_IRTA_SIZE_MASK (0xfULL) + +/* ECAP_REG */ +/* (offset >> 4) << 8 */ +#define VTD_ECAP_IRO (DMAR_IOTLB_REG_OFFSET << 4) +#define VTD_ECAP_QI (1ULL << 1) +#define VTD_ECAP_DT (1ULL << 2) +/* Interrupt Remapping support */ +#define VTD_ECAP_IR (1ULL << 3) +#define VTD_ECAP_EIM (1ULL << 4) +#define VTD_ECAP_PT (1ULL << 6) +#define VTD_ECAP_MHMV (15ULL << 20) +#define VTD_ECAP_SRS (1ULL << 31) +#define VTD_ECAP_SMTS (1ULL << 43) +#define VTD_ECAP_SLTS (1ULL << 46) + +/* CAP_REG */ +/* (offset >> 4) << 24 */ +#define VTD_CAP_FRO (DMAR_FRCD_REG_OFFSET << 20) +#define VTD_CAP_NFR ((DMAR_FRCD_REG_NR - 1) << 40) +#define VTD_DOMAIN_ID_SHIFT 16 /* 16-bit domain id for 64K domains */ +#define VTD_DOMAIN_ID_MASK ((1UL << VTD_DOMAIN_ID_SHIFT) - 1) +#define VTD_CAP_ND (((VTD_DOMAIN_ID_SHIFT - 4) / 2) & 7ULL) +#define VTD_ADDRESS_SIZE(aw) (1ULL << (aw)) +#define VTD_CAP_MGAW(aw) ((((aw) - 1) & 0x3fULL) << 16) +#define VTD_MAMV 18ULL +#define VTD_CAP_MAMV (VTD_MAMV << 48) +#define VTD_CAP_PSI (1ULL << 39) +#define VTD_CAP_SLLPS ((1ULL << 34) | (1ULL << 35)) +#define VTD_CAP_DRAIN_WRITE (1ULL << 54) +#define VTD_CAP_DRAIN_READ (1ULL << 55) +#define VTD_CAP_DRAIN (VTD_CAP_DRAIN_READ | VTD_CAP_DRAIN_WRITE) +#define VTD_CAP_CM (1ULL << 7) + +/* Supported Adjusted Guest Address Widths */ +#define VTD_CAP_SAGAW_SHIFT 8 +#define VTD_CAP_SAGAW_MASK (0x1fULL << VTD_CAP_SAGAW_SHIFT) + /* 39-bit AGAW, 3-level page-table */ +#define VTD_CAP_SAGAW_39bit (0x2ULL << VTD_CAP_SAGAW_SHIFT) + /* 48-bit AGAW, 4-level page-table */ +#define VTD_CAP_SAGAW_48bit (0x4ULL << VTD_CAP_SAGAW_SHIFT) + +/* IQT_REG */ +#define VTD_IQT_QT(dw_bit, val) (dw_bit ? (((val) >> 5) & 0x3fffULL) : \ + (((val) >> 4) & 0x7fffULL)) +#define VTD_IQT_QT_256_RSV_BIT 0x10 + +/* IQA_REG */ +#define VTD_IQA_IQA_MASK(aw) (VTD_HAW_MASK(aw) ^ 0xfffULL) +#define VTD_IQA_QS 0x7ULL +#define VTD_IQA_DW_MASK 0x800 + +/* IQH_REG */ +#define VTD_IQH_QH_SHIFT_4 4 +#define VTD_IQH_QH_SHIFT_5 5 +#define VTD_IQH_QH_MASK 0x7fff0ULL + +/* ICS_REG */ +#define VTD_ICS_IWC 1UL + +/* IECTL_REG */ +#define VTD_IECTL_IM (1UL << 31) +#define VTD_IECTL_IP (1UL << 30) + +/* FSTS_REG */ +#define VTD_FSTS_FRI_MASK 0xff00UL +#define VTD_FSTS_FRI(val) ((((uint32_t)(val)) << 8) & VTD_FSTS_FRI_MASK) +#define VTD_FSTS_IQE (1UL << 4) +#define VTD_FSTS_PPF (1UL << 1) +#define VTD_FSTS_PFO 1UL + +/* FECTL_REG */ +#define VTD_FECTL_IM (1UL << 31) +#define VTD_FECTL_IP (1UL << 30) + +/* Fault Recording Register */ +/* For the high 64-bit of 128-bit */ +#define VTD_FRCD_F (1ULL << 63) +#define VTD_FRCD_T (1ULL << 62) +#define VTD_FRCD_FR(val) (((val) & 0xffULL) << 32) +#define VTD_FRCD_SID_MASK 0xffffULL +#define VTD_FRCD_SID(val) ((val) & VTD_FRCD_SID_MASK) +/* For the low 64-bit of 128-bit */ +#define VTD_FRCD_FI(val) ((val) & ~0xfffULL) + +/* DMA Remapping Fault Conditions */ +typedef enum VTDFaultReason { + VTD_FR_RESERVED = 0, /* Reserved for Advanced Fault logging */ + VTD_FR_ROOT_ENTRY_P = 1, /* The Present(P) field of root-entry is 0 */ + VTD_FR_CONTEXT_ENTRY_P, /* The Present(P) field of context-entry is 0 */ + VTD_FR_CONTEXT_ENTRY_INV, /* Invalid programming of a context-entry */ + VTD_FR_ADDR_BEYOND_MGAW, /* Input-address above (2^x-1) */ + VTD_FR_WRITE, /* No write permission */ + VTD_FR_READ, /* No read permission */ + /* Fail to access a second-level paging entry (not SL_PML4E) */ + VTD_FR_PAGING_ENTRY_INV, + VTD_FR_ROOT_TABLE_INV, /* Fail to access a root-entry */ + VTD_FR_CONTEXT_TABLE_INV, /* Fail to access a context-entry */ + /* Non-zero reserved field in a present root-entry */ + VTD_FR_ROOT_ENTRY_RSVD, + /* Non-zero reserved field in a present context-entry */ + VTD_FR_CONTEXT_ENTRY_RSVD, + /* Non-zero reserved field in a second-level paging entry with at lease one + * Read(R) and Write(W) or Execute(E) field is Set. + */ + VTD_FR_PAGING_ENTRY_RSVD, + /* Translation request or translated request explicitly blocked dut to the + * programming of the Translation Type (T) field in the present + * context-entry. + */ + VTD_FR_CONTEXT_ENTRY_TT, + + /* Interrupt remapping transition faults */ + VTD_FR_IR_REQ_RSVD = 0x20, /* One or more IR request reserved + * fields set */ + VTD_FR_IR_INDEX_OVER = 0x21, /* Index value greater than max */ + VTD_FR_IR_ENTRY_P = 0x22, /* Present (P) not set in IRTE */ + VTD_FR_IR_ROOT_INVAL = 0x23, /* IR Root table invalid */ + VTD_FR_IR_IRTE_RSVD = 0x24, /* IRTE Rsvd field non-zero with + * Present flag set */ + VTD_FR_IR_REQ_COMPAT = 0x25, /* Encountered compatible IR + * request while disabled */ + VTD_FR_IR_SID_ERR = 0x26, /* Invalid Source-ID */ + + VTD_FR_PASID_TABLE_INV = 0x58, /*Invalid PASID table entry */ + + /* This is not a normal fault reason. We use this to indicate some faults + * that are not referenced by the VT-d specification. + * Fault event with such reason should not be recorded. + */ + VTD_FR_RESERVED_ERR, + VTD_FR_MAX, /* Guard */ +} VTDFaultReason; + +#define VTD_CONTEXT_CACHE_GEN_MAX 0xffffffffUL + +/* Interrupt Entry Cache Invalidation Descriptor: VT-d 6.5.2.7. */ +struct VTDInvDescIEC { + uint32_t type:4; /* Should always be 0x4 */ + uint32_t granularity:1; /* If set, it's global IR invalidation */ + uint32_t resved_1:22; + uint32_t index_mask:5; /* 2^N for continuous int invalidation */ + uint32_t index:16; /* Start index to invalidate */ + uint32_t reserved_2:16; +}; +typedef struct VTDInvDescIEC VTDInvDescIEC; + +/* Queued Invalidation Descriptor */ +union VTDInvDesc { + struct { + uint64_t lo; + uint64_t hi; + }; + struct { + uint64_t val[4]; + }; + union { + VTDInvDescIEC iec; + }; +}; +typedef union VTDInvDesc VTDInvDesc; + +/* Masks for struct VTDInvDesc */ +#define VTD_INV_DESC_TYPE 0xf +#define VTD_INV_DESC_CC 0x1 /* Context-cache Invalidate Desc */ +#define VTD_INV_DESC_IOTLB 0x2 +#define VTD_INV_DESC_DEVICE 0x3 +#define VTD_INV_DESC_IEC 0x4 /* Interrupt Entry Cache + Invalidate Descriptor */ +#define VTD_INV_DESC_WAIT 0x5 /* Invalidation Wait Descriptor */ +#define VTD_INV_DESC_PIOTLB 0x6 /* PASID-IOTLB Invalidate Desc */ +#define VTD_INV_DESC_PC 0x7 /* PASID-cache Invalidate Desc */ +#define VTD_INV_DESC_NONE 0 /* Not an Invalidate Descriptor */ + +/* Masks for Invalidation Wait Descriptor*/ +#define VTD_INV_DESC_WAIT_SW (1ULL << 5) +#define VTD_INV_DESC_WAIT_IF (1ULL << 4) +#define VTD_INV_DESC_WAIT_FN (1ULL << 6) +#define VTD_INV_DESC_WAIT_DATA_SHIFT 32 +#define VTD_INV_DESC_WAIT_RSVD_LO 0Xffffff80ULL +#define VTD_INV_DESC_WAIT_RSVD_HI 3ULL + +/* Masks for Context-cache Invalidation Descriptor */ +#define VTD_INV_DESC_CC_G (3ULL << 4) +#define VTD_INV_DESC_CC_GLOBAL (1ULL << 4) +#define VTD_INV_DESC_CC_DOMAIN (2ULL << 4) +#define VTD_INV_DESC_CC_DEVICE (3ULL << 4) +#define VTD_INV_DESC_CC_DID(val) (((val) >> 16) & VTD_DOMAIN_ID_MASK) +#define VTD_INV_DESC_CC_SID(val) (((val) >> 32) & 0xffffUL) +#define VTD_INV_DESC_CC_FM(val) (((val) >> 48) & 3UL) +#define VTD_INV_DESC_CC_RSVD 0xfffc00000000ffc0ULL + +/* Masks for IOTLB Invalidate Descriptor */ +#define VTD_INV_DESC_IOTLB_G (3ULL << 4) +#define VTD_INV_DESC_IOTLB_GLOBAL (1ULL << 4) +#define VTD_INV_DESC_IOTLB_DOMAIN (2ULL << 4) +#define VTD_INV_DESC_IOTLB_PAGE (3ULL << 4) +#define VTD_INV_DESC_IOTLB_DID(val) (((val) >> 16) & VTD_DOMAIN_ID_MASK) +#define VTD_INV_DESC_IOTLB_ADDR(val) ((val) & ~0xfffULL) +#define VTD_INV_DESC_IOTLB_AM(val) ((val) & 0x3fULL) +#define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000ff00ULL +#define VTD_INV_DESC_IOTLB_RSVD_HI 0xf80ULL + +/* Mask for Device IOTLB Invalidate Descriptor */ +#define VTD_INV_DESC_DEVICE_IOTLB_ADDR(val) ((val) & 0xfffffffffffff000ULL) +#define VTD_INV_DESC_DEVICE_IOTLB_SIZE(val) ((val) & 0x1) +#define VTD_INV_DESC_DEVICE_IOTLB_SID(val) (((val) >> 32) & 0xFFFFULL) +#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI 0xffeULL +#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0fff8 + +/* Rsvd field masks for spte */ +#define VTD_SPTE_SNP 0x800ULL + +#define VTD_SPTE_PAGE_L1_RSVD_MASK(aw, dt_supported) \ + dt_supported ? \ + (0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ + (0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) +#define VTD_SPTE_PAGE_L2_RSVD_MASK(aw) \ + (0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) +#define VTD_SPTE_PAGE_L3_RSVD_MASK(aw) \ + (0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) +#define VTD_SPTE_PAGE_L4_RSVD_MASK(aw) \ + (0x880ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) + +#define VTD_SPTE_LPAGE_L2_RSVD_MASK(aw, dt_supported) \ + dt_supported ? \ + (0x1ff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ + (0x1ff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) +#define VTD_SPTE_LPAGE_L3_RSVD_MASK(aw, dt_supported) \ + dt_supported ? \ + (0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ + (0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) + +/* Information about page-selective IOTLB invalidate */ +struct VTDIOTLBPageInvInfo { + uint16_t domain_id; + uint64_t addr; + uint8_t mask; +}; +typedef struct VTDIOTLBPageInvInfo VTDIOTLBPageInvInfo; + +/* Pagesize of VTD paging structures, including root and context tables */ +#define VTD_PAGE_SHIFT 12 +#define VTD_PAGE_SIZE (1ULL << VTD_PAGE_SHIFT) + +#define VTD_PAGE_SHIFT_4K 12 +#define VTD_PAGE_MASK_4K (~((1ULL << VTD_PAGE_SHIFT_4K) - 1)) +#define VTD_PAGE_SHIFT_2M 21 +#define VTD_PAGE_MASK_2M (~((1ULL << VTD_PAGE_SHIFT_2M) - 1)) +#define VTD_PAGE_SHIFT_1G 30 +#define VTD_PAGE_MASK_1G (~((1ULL << VTD_PAGE_SHIFT_1G) - 1)) + +struct VTDRootEntry { + uint64_t lo; + uint64_t hi; +}; +typedef struct VTDRootEntry VTDRootEntry; + +/* Masks for struct VTDRootEntry */ +#define VTD_ROOT_ENTRY_P 1ULL +#define VTD_ROOT_ENTRY_CTP (~0xfffULL) + +#define VTD_ROOT_ENTRY_NR (VTD_PAGE_SIZE / sizeof(VTDRootEntry)) +#define VTD_ROOT_ENTRY_RSVD(aw) (0xffeULL | ~VTD_HAW_MASK(aw)) + +#define VTD_DEVFN_CHECK_MASK 0x80 + +/* Masks for struct VTDContextEntry */ +/* lo */ +#define VTD_CONTEXT_ENTRY_P (1ULL << 0) +#define VTD_CONTEXT_ENTRY_FPD (1ULL << 1) /* Fault Processing Disable */ +#define VTD_CONTEXT_ENTRY_TT (3ULL << 2) /* Translation Type */ +#define VTD_CONTEXT_TT_MULTI_LEVEL 0 +#define VTD_CONTEXT_TT_DEV_IOTLB (1ULL << 2) +#define VTD_CONTEXT_TT_PASS_THROUGH (2ULL << 2) +/* Second Level Page Translation Pointer*/ +#define VTD_CONTEXT_ENTRY_SLPTPTR (~0xfffULL) +#define VTD_CONTEXT_ENTRY_RSVD_LO(aw) (0xff0ULL | ~VTD_HAW_MASK(aw)) +/* hi */ +#define VTD_CONTEXT_ENTRY_AW 7ULL /* Adjusted guest-address-width */ +#define VTD_CONTEXT_ENTRY_DID(val) (((val) >> 8) & VTD_DOMAIN_ID_MASK) +#define VTD_CONTEXT_ENTRY_RSVD_HI 0xffffffffff000080ULL + +#define VTD_CONTEXT_ENTRY_NR (VTD_PAGE_SIZE / sizeof(VTDContextEntry)) + +#define VTD_CTX_ENTRY_LEGACY_SIZE 16 +#define VTD_CTX_ENTRY_SCALABLE_SIZE 32 + +#define VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK 0xfffff +#define VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(aw) (0x1e0ULL | ~VTD_HAW_MASK(aw)) +#define VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 0xffffffffffe00000ULL + +/* PASID Table Related Definitions */ +#define VTD_PASID_DIR_BASE_ADDR_MASK (~0xfffULL) +#define VTD_PASID_TABLE_BASE_ADDR_MASK (~0xfffULL) +#define VTD_PASID_DIR_ENTRY_SIZE 8 +#define VTD_PASID_ENTRY_SIZE 64 +#define VTD_PASID_DIR_BITS_MASK (0x3fffULL) +#define VTD_PASID_DIR_INDEX(pasid) (((pasid) >> 6) & VTD_PASID_DIR_BITS_MASK) +#define VTD_PASID_DIR_FPD (1ULL << 1) /* Fault Processing Disable */ +#define VTD_PASID_TABLE_BITS_MASK (0x3fULL) +#define VTD_PASID_TABLE_INDEX(pasid) ((pasid) & VTD_PASID_TABLE_BITS_MASK) +#define VTD_PASID_ENTRY_FPD (1ULL << 1) /* Fault Processing Disable */ + +/* PASID Granular Translation Type Mask */ +#define VTD_PASID_ENTRY_P 1ULL +#define VTD_SM_PASID_ENTRY_PGTT (7ULL << 6) +#define VTD_SM_PASID_ENTRY_FLT (1ULL << 6) +#define VTD_SM_PASID_ENTRY_SLT (2ULL << 6) +#define VTD_SM_PASID_ENTRY_NESTED (3ULL << 6) +#define VTD_SM_PASID_ENTRY_PT (4ULL << 6) + +#define VTD_SM_PASID_ENTRY_AW 7ULL /* Adjusted guest-address-width */ +#define VTD_SM_PASID_ENTRY_DID(val) ((val) & VTD_DOMAIN_ID_MASK) + +/* Second Level Page Translation Pointer*/ +#define VTD_SM_PASID_ENTRY_SLPTPTR (~0xfffULL) + +/* Paging Structure common */ +#define VTD_SL_PT_PAGE_SIZE_MASK (1ULL << 7) +/* Bits to decide the offset for each level */ +#define VTD_SL_LEVEL_BITS 9 + +/* Second Level Paging Structure */ +#define VTD_SL_PML4_LEVEL 4 +#define VTD_SL_PDP_LEVEL 3 +#define VTD_SL_PD_LEVEL 2 +#define VTD_SL_PT_LEVEL 1 +#define VTD_SL_PT_ENTRY_NR 512 + +/* Masks for Second Level Paging Entry */ +#define VTD_SL_RW_MASK 3ULL +#define VTD_SL_R 1ULL +#define VTD_SL_W (1ULL << 1) +#define VTD_SL_PT_BASE_ADDR_MASK(aw) (~(VTD_PAGE_SIZE - 1) & VTD_HAW_MASK(aw)) +#define VTD_SL_IGN_COM 0xbff0000000000000ULL +#define VTD_SL_TM (1ULL << 62) + +#endif diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c new file mode 100644 index 000000000..1e89ca089 --- /dev/null +++ b/hw/i386/kvm/apic.c @@ -0,0 +1,271 @@ +/* + * KVM in-kernel APIC support + * + * Copyright (c) 2011 Siemens AG + * + * Authors: + * Jan Kiszka + * + * This work is licensed under the terms of the GNU GPL version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "hw/i386/apic_internal.h" +#include "hw/pci/msi.h" +#include "sysemu/hw_accel.h" +#include "sysemu/kvm.h" +#include "kvm/kvm_i386.h" + +static inline void kvm_apic_set_reg(struct kvm_lapic_state *kapic, + int reg_id, uint32_t val) +{ + *((uint32_t *)(kapic->regs + (reg_id << 4))) = val; +} + +static inline uint32_t kvm_apic_get_reg(struct kvm_lapic_state *kapic, + int reg_id) +{ + return *((uint32_t *)(kapic->regs + (reg_id << 4))); +} + +static void kvm_put_apic_state(APICCommonState *s, struct kvm_lapic_state *kapic) +{ + int i; + + memset(kapic, 0, sizeof(*kapic)); + if (kvm_has_x2apic_api() && s->apicbase & MSR_IA32_APICBASE_EXTD) { + kvm_apic_set_reg(kapic, 0x2, s->initial_apic_id); + } else { + kvm_apic_set_reg(kapic, 0x2, s->id << 24); + } + kvm_apic_set_reg(kapic, 0x8, s->tpr); + kvm_apic_set_reg(kapic, 0xd, s->log_dest << 24); + kvm_apic_set_reg(kapic, 0xe, s->dest_mode << 28 | 0x0fffffff); + kvm_apic_set_reg(kapic, 0xf, s->spurious_vec); + for (i = 0; i < 8; i++) { + kvm_apic_set_reg(kapic, 0x10 + i, s->isr[i]); + kvm_apic_set_reg(kapic, 0x18 + i, s->tmr[i]); + kvm_apic_set_reg(kapic, 0x20 + i, s->irr[i]); + } + kvm_apic_set_reg(kapic, 0x28, s->esr); + kvm_apic_set_reg(kapic, 0x30, s->icr[0]); + kvm_apic_set_reg(kapic, 0x31, s->icr[1]); + for (i = 0; i < APIC_LVT_NB; i++) { + kvm_apic_set_reg(kapic, 0x32 + i, s->lvt[i]); + } + kvm_apic_set_reg(kapic, 0x38, s->initial_count); + kvm_apic_set_reg(kapic, 0x3e, s->divide_conf); +} + +void kvm_get_apic_state(DeviceState *dev, struct kvm_lapic_state *kapic) +{ + APICCommonState *s = APIC_COMMON(dev); + int i, v; + + if (kvm_has_x2apic_api() && s->apicbase & MSR_IA32_APICBASE_EXTD) { + assert(kvm_apic_get_reg(kapic, 0x2) == s->initial_apic_id); + } else { + s->id = kvm_apic_get_reg(kapic, 0x2) >> 24; + } + s->tpr = kvm_apic_get_reg(kapic, 0x8); + s->arb_id = kvm_apic_get_reg(kapic, 0x9); + s->log_dest = kvm_apic_get_reg(kapic, 0xd) >> 24; + s->dest_mode = kvm_apic_get_reg(kapic, 0xe) >> 28; + s->spurious_vec = kvm_apic_get_reg(kapic, 0xf); + for (i = 0; i < 8; i++) { + s->isr[i] = kvm_apic_get_reg(kapic, 0x10 + i); + s->tmr[i] = kvm_apic_get_reg(kapic, 0x18 + i); + s->irr[i] = kvm_apic_get_reg(kapic, 0x20 + i); + } + s->esr = kvm_apic_get_reg(kapic, 0x28); + s->icr[0] = kvm_apic_get_reg(kapic, 0x30); + s->icr[1] = kvm_apic_get_reg(kapic, 0x31); + for (i = 0; i < APIC_LVT_NB; i++) { + s->lvt[i] = kvm_apic_get_reg(kapic, 0x32 + i); + } + s->initial_count = kvm_apic_get_reg(kapic, 0x38); + s->divide_conf = kvm_apic_get_reg(kapic, 0x3e); + + v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4); + s->count_shift = (v + 1) & 7; + + s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + apic_next_timer(s, s->initial_count_load_time); +} + +static void kvm_apic_set_base(APICCommonState *s, uint64_t val) +{ + s->apicbase = val; +} + +static void kvm_apic_set_tpr(APICCommonState *s, uint8_t val) +{ + s->tpr = (val & 0x0f) << 4; +} + +static uint8_t kvm_apic_get_tpr(APICCommonState *s) +{ + return s->tpr >> 4; +} + +static void kvm_apic_enable_tpr_reporting(APICCommonState *s, bool enable) +{ + struct kvm_tpr_access_ctl ctl = { + .enabled = enable + }; + + kvm_vcpu_ioctl(CPU(s->cpu), KVM_TPR_ACCESS_REPORTING, &ctl); +} + +static void kvm_apic_vapic_base_update(APICCommonState *s) +{ + struct kvm_vapic_addr vapid_addr = { + .vapic_addr = s->vapic_paddr, + }; + int ret; + + ret = kvm_vcpu_ioctl(CPU(s->cpu), KVM_SET_VAPIC_ADDR, &vapid_addr); + if (ret < 0) { + fprintf(stderr, "KVM: setting VAPIC address failed (%s)\n", + strerror(-ret)); + abort(); + } +} + +static void kvm_apic_put(CPUState *cs, run_on_cpu_data data) +{ + APICCommonState *s = data.host_ptr; + struct kvm_lapic_state kapic; + int ret; + + kvm_put_apicbase(s->cpu, s->apicbase); + kvm_put_apic_state(s, &kapic); + + ret = kvm_vcpu_ioctl(CPU(s->cpu), KVM_SET_LAPIC, &kapic); + if (ret < 0) { + fprintf(stderr, "KVM_SET_LAPIC failed: %s\n", strerror(-ret)); + abort(); + } +} + +static void kvm_apic_post_load(APICCommonState *s) +{ + run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s)); +} + +static void do_inject_external_nmi(CPUState *cpu, run_on_cpu_data data) +{ + APICCommonState *s = data.host_ptr; + uint32_t lvt; + int ret; + + cpu_synchronize_state(cpu); + + lvt = s->lvt[APIC_LVT_LINT1]; + if (!(lvt & APIC_LVT_MASKED) && ((lvt >> 8) & 7) == APIC_DM_NMI) { + ret = kvm_vcpu_ioctl(cpu, KVM_NMI); + if (ret < 0) { + fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n", + strerror(-ret)); + } + } +} + +static void kvm_apic_external_nmi(APICCommonState *s) +{ + run_on_cpu(CPU(s->cpu), do_inject_external_nmi, RUN_ON_CPU_HOST_PTR(s)); +} + +static void kvm_send_msi(MSIMessage *msg) +{ + int ret; + + /* + * The message has already passed through interrupt remapping if enabled, + * but the legacy extended destination ID in low bits still needs to be + * handled. + */ + msg->address = kvm_swizzle_msi_ext_dest_id(msg->address); + + ret = kvm_irqchip_send_msi(kvm_state, *msg); + if (ret < 0) { + fprintf(stderr, "KVM: injection failed, MSI lost (%s)\n", + strerror(-ret)); + } +} + +static uint64_t kvm_apic_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + return ~(uint64_t)0; +} + +static void kvm_apic_mem_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + MSIMessage msg = { .address = addr, .data = data }; + + kvm_send_msi(&msg); +} + +static const MemoryRegionOps kvm_apic_io_ops = { + .read = kvm_apic_mem_read, + .write = kvm_apic_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void kvm_apic_reset(APICCommonState *s) +{ + /* Not used by KVM, which uses the CPU mp_state instead. */ + s->wait_for_sipi = 0; + + run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s)); +} + +static void kvm_apic_realize(DeviceState *dev, Error **errp) +{ + APICCommonState *s = APIC_COMMON(dev); + + memory_region_init_io(&s->io_memory, OBJECT(s), &kvm_apic_io_ops, s, + "kvm-apic-msi", APIC_SPACE_SIZE); + + assert(kvm_has_gsi_routing()); + msi_nonbroken = true; +} + +static void kvm_apic_unrealize(DeviceState *dev) +{ +} + +static void kvm_apic_class_init(ObjectClass *klass, void *data) +{ + APICCommonClass *k = APIC_COMMON_CLASS(klass); + + k->realize = kvm_apic_realize; + k->unrealize = kvm_apic_unrealize; + k->reset = kvm_apic_reset; + k->set_base = kvm_apic_set_base; + k->set_tpr = kvm_apic_set_tpr; + k->get_tpr = kvm_apic_get_tpr; + k->post_load = kvm_apic_post_load; + k->enable_tpr_reporting = kvm_apic_enable_tpr_reporting; + k->vapic_base_update = kvm_apic_vapic_base_update; + k->external_nmi = kvm_apic_external_nmi; + k->send_msi = kvm_send_msi; +} + +static const TypeInfo kvm_apic_info = { + .name = "kvm-apic", + .parent = TYPE_APIC_COMMON, + .instance_size = sizeof(APICCommonState), + .class_init = kvm_apic_class_init, +}; + +static void kvm_apic_register_types(void) +{ + type_register_static(&kvm_apic_info); +} + +type_init(kvm_apic_register_types) diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c new file mode 100644 index 000000000..df70b4a03 --- /dev/null +++ b/hw/i386/kvm/clock.c @@ -0,0 +1,350 @@ +/* + * QEMU KVM support, paravirtual clock device + * + * Copyright (C) 2011 Siemens AG + * + * Authors: + * Jan Kiszka + * + * This work is licensed under the terms of the GNU GPL version 2. + * See the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qemu/module.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "sysemu/hw_accel.h" +#include "kvm/kvm_i386.h" +#include "migration/vmstate.h" +#include "hw/sysbus.h" +#include "hw/kvm/clock.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" + +#include +#include "standard-headers/asm-x86/kvm_para.h" +#include "qom/object.h" + +#define TYPE_KVM_CLOCK "kvmclock" +OBJECT_DECLARE_SIMPLE_TYPE(KVMClockState, KVM_CLOCK) + +struct KVMClockState { + /*< private >*/ + SysBusDevice busdev; + /*< public >*/ + + uint64_t clock; + bool clock_valid; + + /* whether the 'clock' value was obtained in the 'paused' state */ + bool runstate_paused; + + /* whether machine type supports reliable KVM_GET_CLOCK */ + bool mach_use_reliable_get_clock; + + /* whether the 'clock' value was obtained in a host with + * reliable KVM_GET_CLOCK */ + bool clock_is_reliable; +}; + +struct pvclock_vcpu_time_info { + uint32_t version; + uint32_t pad0; + uint64_t tsc_timestamp; + uint64_t system_time; + uint32_t tsc_to_system_mul; + int8_t tsc_shift; + uint8_t flags; + uint8_t pad[2]; +} __attribute__((__packed__)); /* 32 bytes */ + +static uint64_t kvmclock_current_nsec(KVMClockState *s) +{ + CPUState *cpu = first_cpu; + CPUX86State *env = cpu->env_ptr; + hwaddr kvmclock_struct_pa; + uint64_t migration_tsc = env->tsc; + struct pvclock_vcpu_time_info time; + uint64_t delta; + uint64_t nsec_lo; + uint64_t nsec_hi; + uint64_t nsec; + + cpu_synchronize_state(cpu); + + if (!(env->system_time_msr & 1ULL)) { + /* KVM clock not active */ + return 0; + } + + kvmclock_struct_pa = env->system_time_msr & ~1ULL; + cpu_physical_memory_read(kvmclock_struct_pa, &time, sizeof(time)); + + assert(time.tsc_timestamp <= migration_tsc); + delta = migration_tsc - time.tsc_timestamp; + if (time.tsc_shift < 0) { + delta >>= -time.tsc_shift; + } else { + delta <<= time.tsc_shift; + } + + mulu64(&nsec_lo, &nsec_hi, delta, time.tsc_to_system_mul); + nsec = (nsec_lo >> 32) | (nsec_hi << 32); + return nsec + time.system_time; +} + +static void kvm_update_clock(KVMClockState *s) +{ + struct kvm_clock_data data; + int ret; + + ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data); + if (ret < 0) { + fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(-ret)); + abort(); + } + s->clock = data.clock; + + /* If kvm_has_adjust_clock_stable() is false, KVM_GET_CLOCK returns + * essentially CLOCK_MONOTONIC plus a guest-specific adjustment. This + * can drift from the TSC-based value that is computed by the guest, + * so we need to go through kvmclock_current_nsec(). If + * kvm_has_adjust_clock_stable() is true, and the flags contain + * KVM_CLOCK_TSC_STABLE, then KVM_GET_CLOCK returns a TSC-based value + * and kvmclock_current_nsec() is not necessary. + * + * Here, however, we need not check KVM_CLOCK_TSC_STABLE. This is because: + * + * - if the host has disabled the kvmclock master clock, the guest already + * has protection against time going backwards. This "safety net" is only + * absent when kvmclock is stable; + * + * - therefore, we can replace a check like + * + * if last KVM_GET_CLOCK was not reliable then + * read from memory + * + * with + * + * if last KVM_GET_CLOCK was not reliable && masterclock is enabled + * read from memory + * + * However: + * + * - if kvm_has_adjust_clock_stable() returns false, the left side is + * always true (KVM_GET_CLOCK is never reliable), and the right side is + * unknown (because we don't have data.flags). We must assume it's true + * and read from memory. + * + * - if kvm_has_adjust_clock_stable() returns true, the result of the && + * is always false (masterclock is enabled iff KVM_GET_CLOCK is reliable) + * + * So we can just use this instead: + * + * if !kvm_has_adjust_clock_stable() then + * read from memory + */ + s->clock_is_reliable = kvm_has_adjust_clock_stable(); +} + +static void do_kvmclock_ctrl(CPUState *cpu, run_on_cpu_data data) +{ + int ret = kvm_vcpu_ioctl(cpu, KVM_KVMCLOCK_CTRL, 0); + + if (ret && ret != -EINVAL) { + fprintf(stderr, "%s: %s\n", __func__, strerror(-ret)); + } +} + +static void kvmclock_vm_state_change(void *opaque, bool running, + RunState state) +{ + KVMClockState *s = opaque; + CPUState *cpu; + int cap_clock_ctrl = kvm_check_extension(kvm_state, KVM_CAP_KVMCLOCK_CTRL); + int ret; + + if (running) { + struct kvm_clock_data data = {}; + + /* + * If the host where s->clock was read did not support reliable + * KVM_GET_CLOCK, read kvmclock value from memory. + */ + if (!s->clock_is_reliable) { + uint64_t pvclock_via_mem = kvmclock_current_nsec(s); + /* We can't rely on the saved clock value, just discard it */ + if (pvclock_via_mem) { + s->clock = pvclock_via_mem; + } + } + + s->clock_valid = false; + + data.clock = s->clock; + ret = kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data); + if (ret < 0) { + fprintf(stderr, "KVM_SET_CLOCK failed: %s\n", strerror(-ret)); + abort(); + } + + if (!cap_clock_ctrl) { + return; + } + CPU_FOREACH(cpu) { + run_on_cpu(cpu, do_kvmclock_ctrl, RUN_ON_CPU_NULL); + } + } else { + + if (s->clock_valid) { + return; + } + + s->runstate_paused = runstate_check(RUN_STATE_PAUSED); + + kvm_synchronize_all_tsc(); + + kvm_update_clock(s); + /* + * If the VM is stopped, declare the clock state valid to + * avoid re-reading it on next vmsave (which would return + * a different value). Will be reset when the VM is continued. + */ + s->clock_valid = true; + } +} + +static void kvmclock_realize(DeviceState *dev, Error **errp) +{ + KVMClockState *s = KVM_CLOCK(dev); + + if (!kvm_enabled()) { + error_setg(errp, "kvmclock device requires KVM"); + return; + } + + kvm_update_clock(s); + + qemu_add_vm_change_state_handler(kvmclock_vm_state_change, s); +} + +static bool kvmclock_clock_is_reliable_needed(void *opaque) +{ + KVMClockState *s = opaque; + + return s->mach_use_reliable_get_clock; +} + +static const VMStateDescription kvmclock_reliable_get_clock = { + .name = "kvmclock/clock_is_reliable", + .version_id = 1, + .minimum_version_id = 1, + .needed = kvmclock_clock_is_reliable_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(clock_is_reliable, KVMClockState), + VMSTATE_END_OF_LIST() + } +}; + +/* + * When migrating, assume the source has an unreliable + * KVM_GET_CLOCK unless told otherwise. + */ +static int kvmclock_pre_load(void *opaque) +{ + KVMClockState *s = opaque; + + s->clock_is_reliable = false; + + return 0; +} + +/* + * When migrating a running guest, read the clock just + * before migration, so that the guest clock counts + * during the events between: + * + * * vm_stop() + * * + * * pre_save() + * + * This reduces kvmclock difference on migration from 5s + * to 0.1s (when max_downtime == 5s), because sending the + * final pages of memory (which happens between vm_stop() + * and pre_save()) takes max_downtime. + */ +static int kvmclock_pre_save(void *opaque) +{ + KVMClockState *s = opaque; + + if (!s->runstate_paused) { + kvm_update_clock(s); + } + + return 0; +} + +static const VMStateDescription kvmclock_vmsd = { + .name = "kvmclock", + .version_id = 1, + .minimum_version_id = 1, + .pre_load = kvmclock_pre_load, + .pre_save = kvmclock_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT64(clock, KVMClockState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &kvmclock_reliable_get_clock, + NULL + } +}; + +static Property kvmclock_properties[] = { + DEFINE_PROP_BOOL("x-mach-use-reliable-get-clock", KVMClockState, + mach_use_reliable_get_clock, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void kvmclock_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = kvmclock_realize; + dc->vmsd = &kvmclock_vmsd; + device_class_set_props(dc, kvmclock_properties); +} + +static const TypeInfo kvmclock_info = { + .name = TYPE_KVM_CLOCK, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(KVMClockState), + .class_init = kvmclock_class_init, +}; + +/* Note: Must be called after VCPU initialization. */ +void kvmclock_create(bool create_always) +{ + X86CPU *cpu = X86_CPU(first_cpu); + + if (!kvm_enabled() || !kvm_has_adjust_clock()) + return; + + if (create_always || + cpu->env.features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) | + (1ULL << KVM_FEATURE_CLOCKSOURCE2))) { + sysbus_create_simple(TYPE_KVM_CLOCK, -1, NULL); + } +} + +static void kvmclock_register_types(void) +{ + type_register_static(&kvmclock_info); +} + +type_init(kvmclock_register_types) diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c new file mode 100644 index 000000000..191a26fa5 --- /dev/null +++ b/hw/i386/kvm/i8254.c @@ -0,0 +1,337 @@ +/* + * KVM in-kernel PIT (i8254) support + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2012 Jan Kiszka, Siemens AG + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include +#include "qapi/qapi-types-machine.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "sysemu/runstate.h" +#include "hw/timer/i8254.h" +#include "hw/timer/i8254_internal.h" +#include "hw/qdev-properties-system.h" +#include "sysemu/kvm.h" +#include "qom/object.h" + +#define KVM_PIT_REINJECT_BIT 0 + +#define CALIBRATION_ROUNDS 3 + +typedef struct KVMPITClass KVMPITClass; +typedef struct KVMPITState KVMPITState; +DECLARE_OBJ_CHECKERS(KVMPITState, KVMPITClass, + KVM_PIT, TYPE_KVM_I8254) + +struct KVMPITState { + PITCommonState parent_obj; + + LostTickPolicy lost_tick_policy; + bool vm_stopped; + int64_t kernel_clock_offset; +}; + +struct KVMPITClass { + PITCommonClass parent_class; + + DeviceRealize parent_realize; +}; + +static void kvm_pit_update_clock_offset(KVMPITState *s) +{ + int64_t offset, clock_offset; + struct timespec ts; + int i; + + /* + * Measure the delta between CLOCK_MONOTONIC, the base used for + * kvm_pit_channel_state::count_load_time, and QEMU_CLOCK_VIRTUAL. Take the + * minimum of several samples to filter out scheduling noise. + */ + clock_offset = INT64_MAX; + for (i = 0; i < CALIBRATION_ROUNDS; i++) { + offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + clock_gettime(CLOCK_MONOTONIC, &ts); + offset -= ts.tv_nsec; + offset -= (int64_t)ts.tv_sec * 1000000000; + if (uabs64(offset) < uabs64(clock_offset)) { + clock_offset = offset; + } + } + s->kernel_clock_offset = clock_offset; +} + +static void kvm_pit_get(PITCommonState *pit) +{ + KVMPITState *s = KVM_PIT(pit); + struct kvm_pit_state2 kpit; + struct kvm_pit_channel_state *kchan; + struct PITChannelState *sc; + int i, ret; + + /* No need to re-read the state if VM is stopped. */ + if (s->vm_stopped) { + return; + } + + if (kvm_has_pit_state2()) { + ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT2, &kpit); + if (ret < 0) { + fprintf(stderr, "KVM_GET_PIT2 failed: %s\n", strerror(-ret)); + abort(); + } + pit->channels[0].irq_disabled = kpit.flags & KVM_PIT_FLAGS_HPET_LEGACY; + } else { + /* + * kvm_pit_state2 is superset of kvm_pit_state struct, + * so we can use it for KVM_GET_PIT as well. + */ + ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT, &kpit); + if (ret < 0) { + fprintf(stderr, "KVM_GET_PIT failed: %s\n", strerror(-ret)); + abort(); + } + } + for (i = 0; i < 3; i++) { + kchan = &kpit.channels[i]; + sc = &pit->channels[i]; + sc->count = kchan->count; + sc->latched_count = kchan->latched_count; + sc->count_latched = kchan->count_latched; + sc->status_latched = kchan->status_latched; + sc->status = kchan->status; + sc->read_state = kchan->read_state; + sc->write_state = kchan->write_state; + sc->write_latch = kchan->write_latch; + sc->rw_mode = kchan->rw_mode; + sc->mode = kchan->mode; + sc->bcd = kchan->bcd; + sc->gate = kchan->gate; + sc->count_load_time = kchan->count_load_time + s->kernel_clock_offset; + } + + sc = &pit->channels[0]; + sc->next_transition_time = + pit_get_next_transition_time(sc, sc->count_load_time); +} + +static void kvm_pit_put(PITCommonState *pit) +{ + KVMPITState *s = KVM_PIT(pit); + struct kvm_pit_state2 kpit = {}; + struct kvm_pit_channel_state *kchan; + struct PITChannelState *sc; + int i, ret; + + /* The offset keeps changing as long as the VM is stopped. */ + if (s->vm_stopped) { + kvm_pit_update_clock_offset(s); + } + + kpit.flags = pit->channels[0].irq_disabled ? KVM_PIT_FLAGS_HPET_LEGACY : 0; + for (i = 0; i < 3; i++) { + kchan = &kpit.channels[i]; + sc = &pit->channels[i]; + kchan->count = sc->count; + kchan->latched_count = sc->latched_count; + kchan->count_latched = sc->count_latched; + kchan->status_latched = sc->status_latched; + kchan->status = sc->status; + kchan->read_state = sc->read_state; + kchan->write_state = sc->write_state; + kchan->write_latch = sc->write_latch; + kchan->rw_mode = sc->rw_mode; + kchan->mode = sc->mode; + kchan->bcd = sc->bcd; + kchan->gate = sc->gate; + kchan->count_load_time = sc->count_load_time - s->kernel_clock_offset; + } + + ret = kvm_vm_ioctl(kvm_state, + kvm_has_pit_state2() ? KVM_SET_PIT2 : KVM_SET_PIT, + &kpit); + if (ret < 0) { + fprintf(stderr, "%s failed: %s\n", + kvm_has_pit_state2() ? "KVM_SET_PIT2" : "KVM_SET_PIT", + strerror(-ret)); + abort(); + } +} + +static void kvm_pit_set_gate(PITCommonState *s, PITChannelState *sc, int val) +{ + kvm_pit_get(s); + + switch (sc->mode) { + default: + case 0: + case 4: + /* XXX: just disable/enable counting */ + break; + case 1: + case 2: + case 3: + case 5: + if (sc->gate < val) { + /* restart counting on rising edge */ + sc->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + } + break; + } + sc->gate = val; + + kvm_pit_put(s); +} + +static void kvm_pit_get_channel_info(PITCommonState *s, PITChannelState *sc, + PITChannelInfo *info) +{ + kvm_pit_get(s); + + pit_get_channel_info_common(s, sc, info); +} + +static void kvm_pit_reset(DeviceState *dev) +{ + PITCommonState *s = PIT_COMMON(dev); + + pit_reset_common(s); + + kvm_pit_put(s); +} + +static void kvm_pit_irq_control(void *opaque, int n, int enable) +{ + PITCommonState *pit = opaque; + PITChannelState *s = &pit->channels[0]; + + kvm_pit_get(pit); + + s->irq_disabled = !enable; + + kvm_pit_put(pit); +} + +static void kvm_pit_vm_state_change(void *opaque, bool running, + RunState state) +{ + KVMPITState *s = opaque; + + if (running) { + kvm_pit_update_clock_offset(s); + kvm_pit_put(PIT_COMMON(s)); + s->vm_stopped = false; + } else { + kvm_pit_update_clock_offset(s); + kvm_pit_get(PIT_COMMON(s)); + s->vm_stopped = true; + } +} + +static void kvm_pit_realizefn(DeviceState *dev, Error **errp) +{ + PITCommonState *pit = PIT_COMMON(dev); + KVMPITClass *kpc = KVM_PIT_GET_CLASS(dev); + KVMPITState *s = KVM_PIT(pit); + struct kvm_pit_config config = { + .flags = 0, + }; + int ret; + + if (kvm_check_extension(kvm_state, KVM_CAP_PIT2)) { + ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT2, &config); + } else { + ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT); + } + if (ret < 0) { + error_setg(errp, "Create kernel PIC irqchip failed: %s", + strerror(-ret)); + return; + } + switch (s->lost_tick_policy) { + case LOST_TICK_POLICY_DELAY: + break; /* enabled by default */ + case LOST_TICK_POLICY_DISCARD: + if (kvm_check_extension(kvm_state, KVM_CAP_REINJECT_CONTROL)) { + struct kvm_reinject_control control = { .pit_reinject = 0 }; + + ret = kvm_vm_ioctl(kvm_state, KVM_REINJECT_CONTROL, &control); + if (ret < 0) { + error_setg(errp, + "Can't disable in-kernel PIT reinjection: %s", + strerror(-ret)); + return; + } + } + break; + default: + error_setg(errp, "Lost tick policy not supported."); + return; + } + + memory_region_init_io(&pit->ioports, OBJECT(dev), NULL, NULL, "kvm-pit", 4); + + qdev_init_gpio_in(dev, kvm_pit_irq_control, 1); + + qemu_add_vm_change_state_handler(kvm_pit_vm_state_change, s); + + kpc->parent_realize(dev, errp); +} + +static Property kvm_pit_properties[] = { + DEFINE_PROP_UINT32("iobase", PITCommonState, iobase, -1), + DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", KVMPITState, + lost_tick_policy, LOST_TICK_POLICY_DELAY), + DEFINE_PROP_END_OF_LIST(), +}; + +static void kvm_pit_class_init(ObjectClass *klass, void *data) +{ + KVMPITClass *kpc = KVM_PIT_CLASS(klass); + PITCommonClass *k = PIT_COMMON_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_parent_realize(dc, kvm_pit_realizefn, + &kpc->parent_realize); + k->set_channel_gate = kvm_pit_set_gate; + k->get_channel_info = kvm_pit_get_channel_info; + dc->reset = kvm_pit_reset; + device_class_set_props(dc, kvm_pit_properties); +} + +static const TypeInfo kvm_pit_info = { + .name = TYPE_KVM_I8254, + .parent = TYPE_PIT_COMMON, + .instance_size = sizeof(KVMPITState), + .class_init = kvm_pit_class_init, + .class_size = sizeof(KVMPITClass), +}; + +static void kvm_pit_register(void) +{ + type_register_static(&kvm_pit_info); +} + +type_init(kvm_pit_register) diff --git a/hw/i386/kvm/i8259.c b/hw/i386/kvm/i8259.c new file mode 100644 index 000000000..d61bae4dc --- /dev/null +++ b/hw/i386/kvm/i8259.c @@ -0,0 +1,167 @@ +/* + * KVM in-kernel PIC (i8259) support + * + * Copyright (c) 2011 Siemens AG + * + * Authors: + * Jan Kiszka + * + * This work is licensed under the terms of the GNU GPL version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/isa/i8259_internal.h" +#include "hw/intc/i8259.h" +#include "qemu/module.h" +#include "hw/i386/apic_internal.h" +#include "hw/irq.h" +#include "sysemu/kvm.h" +#include "qom/object.h" + +#define TYPE_KVM_I8259 "kvm-i8259" +typedef struct KVMPICClass KVMPICClass; +DECLARE_CLASS_CHECKERS(KVMPICClass, KVM_PIC, + TYPE_KVM_I8259) + +/** + * KVMPICClass: + * @parent_realize: The parent's realizefn. + */ +struct KVMPICClass { + PICCommonClass parent_class; + + DeviceRealize parent_realize; +}; + +static void kvm_pic_get(PICCommonState *s) +{ + struct kvm_irqchip chip; + struct kvm_pic_state *kpic; + int ret; + + chip.chip_id = s->master ? KVM_IRQCHIP_PIC_MASTER : KVM_IRQCHIP_PIC_SLAVE; + ret = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, &chip); + if (ret < 0) { + fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(-ret)); + abort(); + } + + kpic = &chip.chip.pic; + + s->last_irr = kpic->last_irr; + s->irr = kpic->irr; + s->imr = kpic->imr; + s->isr = kpic->isr; + s->priority_add = kpic->priority_add; + s->irq_base = kpic->irq_base; + s->read_reg_select = kpic->read_reg_select; + s->poll = kpic->poll; + s->special_mask = kpic->special_mask; + s->init_state = kpic->init_state; + s->auto_eoi = kpic->auto_eoi; + s->rotate_on_auto_eoi = kpic->rotate_on_auto_eoi; + s->special_fully_nested_mode = kpic->special_fully_nested_mode; + s->init4 = kpic->init4; + s->elcr = kpic->elcr; + s->elcr_mask = kpic->elcr_mask; +} + +static void kvm_pic_put(PICCommonState *s) +{ + struct kvm_irqchip chip; + struct kvm_pic_state *kpic; + int ret; + + chip.chip_id = s->master ? KVM_IRQCHIP_PIC_MASTER : KVM_IRQCHIP_PIC_SLAVE; + + kpic = &chip.chip.pic; + + kpic->last_irr = s->last_irr; + kpic->irr = s->irr; + kpic->imr = s->imr; + kpic->isr = s->isr; + kpic->priority_add = s->priority_add; + kpic->irq_base = s->irq_base; + kpic->read_reg_select = s->read_reg_select; + kpic->poll = s->poll; + kpic->special_mask = s->special_mask; + kpic->init_state = s->init_state; + kpic->auto_eoi = s->auto_eoi; + kpic->rotate_on_auto_eoi = s->rotate_on_auto_eoi; + kpic->special_fully_nested_mode = s->special_fully_nested_mode; + kpic->init4 = s->init4; + kpic->elcr = s->elcr; + kpic->elcr_mask = s->elcr_mask; + + ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, &chip); + if (ret < 0) { + fprintf(stderr, "KVM_SET_IRQCHIP failed: %s\n", strerror(-ret)); + abort(); + } +} + +static void kvm_pic_reset(DeviceState *dev) +{ + PICCommonState *s = PIC_COMMON(dev); + + s->elcr = 0; + pic_reset_common(s); + + kvm_pic_put(s); +} + +static void kvm_pic_set_irq(void *opaque, int irq, int level) +{ + int delivered; + + pic_stat_update_irq(irq, level); + delivered = kvm_set_irq(kvm_state, irq, level); + apic_report_irq_delivered(delivered); +} + +static void kvm_pic_realize(DeviceState *dev, Error **errp) +{ + PICCommonState *s = PIC_COMMON(dev); + KVMPICClass *kpc = KVM_PIC_GET_CLASS(dev); + + memory_region_init_io(&s->base_io, OBJECT(dev), NULL, NULL, "kvm-pic", 2); + memory_region_init_io(&s->elcr_io, OBJECT(dev), NULL, NULL, "kvm-elcr", 1); + + kpc->parent_realize(dev, errp); +} + +qemu_irq *kvm_i8259_init(ISABus *bus) +{ + i8259_init_chip(TYPE_KVM_I8259, bus, true); + i8259_init_chip(TYPE_KVM_I8259, bus, false); + + return qemu_allocate_irqs(kvm_pic_set_irq, NULL, ISA_NUM_IRQS); +} + +static void kvm_i8259_class_init(ObjectClass *klass, void *data) +{ + KVMPICClass *kpc = KVM_PIC_CLASS(klass); + PICCommonClass *k = PIC_COMMON_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = kvm_pic_reset; + device_class_set_parent_realize(dc, kvm_pic_realize, &kpc->parent_realize); + k->pre_save = kvm_pic_get; + k->post_load = kvm_pic_put; +} + +static const TypeInfo kvm_i8259_info = { + .name = TYPE_KVM_I8259, + .parent = TYPE_PIC_COMMON, + .instance_size = sizeof(PICCommonState), + .class_init = kvm_i8259_class_init, + .class_size = sizeof(KVMPICClass), +}; + +static void kvm_pic_register_types(void) +{ + type_register_static(&kvm_i8259_info); +} + +type_init(kvm_pic_register_types) diff --git a/hw/i386/kvm/ioapic.c b/hw/i386/kvm/ioapic.c new file mode 100644 index 000000000..ee7c8ef68 --- /dev/null +++ b/hw/i386/kvm/ioapic.c @@ -0,0 +1,165 @@ +/* + * KVM in-kernel IOPIC support + * + * Copyright (c) 2011 Siemens AG + * + * Authors: + * Jan Kiszka + * + * This work is licensed under the terms of the GNU GPL version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "monitor/monitor.h" +#include "hw/i386/x86.h" +#include "hw/qdev-properties.h" +#include "hw/i386/ioapic_internal.h" +#include "hw/i386/apic_internal.h" +#include "sysemu/kvm.h" + +/* PC Utility function */ +void kvm_pc_setup_irq_routing(bool pci_enabled) +{ + KVMState *s = kvm_state; + int i; + + assert(kvm_has_gsi_routing()); + for (i = 0; i < 8; ++i) { + if (i == 2) { + continue; + } + kvm_irqchip_add_irq_route(s, i, KVM_IRQCHIP_PIC_MASTER, i); + } + for (i = 8; i < 16; ++i) { + kvm_irqchip_add_irq_route(s, i, KVM_IRQCHIP_PIC_SLAVE, i - 8); + } + if (pci_enabled) { + for (i = 0; i < 24; ++i) { + if (i == 0) { + kvm_irqchip_add_irq_route(s, i, KVM_IRQCHIP_IOAPIC, 2); + } else if (i != 2) { + kvm_irqchip_add_irq_route(s, i, KVM_IRQCHIP_IOAPIC, i); + } + } + } + kvm_irqchip_commit_routes(s); +} + +typedef struct KVMIOAPICState KVMIOAPICState; + +struct KVMIOAPICState { + IOAPICCommonState ioapic; + uint32_t kvm_gsi_base; +}; + +static void kvm_ioapic_get(IOAPICCommonState *s) +{ + struct kvm_irqchip chip; + struct kvm_ioapic_state *kioapic; + int ret, i; + + chip.chip_id = KVM_IRQCHIP_IOAPIC; + ret = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, &chip); + if (ret < 0) { + fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(-ret)); + abort(); + } + + kioapic = &chip.chip.ioapic; + + s->id = kioapic->id; + s->ioregsel = kioapic->ioregsel; + s->irr = kioapic->irr; + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + s->ioredtbl[i] = kioapic->redirtbl[i].bits; + } +} + +static void kvm_ioapic_put(IOAPICCommonState *s) +{ + struct kvm_irqchip chip; + struct kvm_ioapic_state *kioapic; + int ret, i; + + chip.chip_id = KVM_IRQCHIP_IOAPIC; + kioapic = &chip.chip.ioapic; + + kioapic->id = s->id; + kioapic->ioregsel = s->ioregsel; + kioapic->base_address = s->busdev.mmio[0].addr; + kioapic->irr = s->irr; + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + kioapic->redirtbl[i].bits = s->ioredtbl[i]; + } + + ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, &chip); + if (ret < 0) { + fprintf(stderr, "KVM_SET_IRQCHIP failed: %s\n", strerror(-ret)); + abort(); + } +} + +static void kvm_ioapic_reset(DeviceState *dev) +{ + IOAPICCommonState *s = IOAPIC_COMMON(dev); + + ioapic_reset_common(dev); + kvm_ioapic_put(s); +} + +static void kvm_ioapic_set_irq(void *opaque, int irq, int level) +{ + KVMIOAPICState *s = opaque; + IOAPICCommonState *common = IOAPIC_COMMON(s); + int delivered; + + ioapic_stat_update_irq(common, irq, level); + delivered = kvm_set_irq(kvm_state, s->kvm_gsi_base + irq, level); + apic_report_irq_delivered(delivered); +} + +static void kvm_ioapic_realize(DeviceState *dev, Error **errp) +{ + IOAPICCommonState *s = IOAPIC_COMMON(dev); + + memory_region_init_io(&s->io_memory, OBJECT(dev), NULL, NULL, "kvm-ioapic", 0x1000); + /* + * KVM ioapic only supports 0x11 now. This will only be used when + * we want to dump ioapic version. + */ + s->version = 0x11; + + qdev_init_gpio_in(dev, kvm_ioapic_set_irq, IOAPIC_NUM_PINS); +} + +static Property kvm_ioapic_properties[] = { + DEFINE_PROP_UINT32("gsi_base", KVMIOAPICState, kvm_gsi_base, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void kvm_ioapic_class_init(ObjectClass *klass, void *data) +{ + IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = kvm_ioapic_realize; + k->pre_save = kvm_ioapic_get; + k->post_load = kvm_ioapic_put; + dc->reset = kvm_ioapic_reset; + device_class_set_props(dc, kvm_ioapic_properties); +} + +static const TypeInfo kvm_ioapic_info = { + .name = TYPE_KVM_IOAPIC, + .parent = TYPE_IOAPIC_COMMON, + .instance_size = sizeof(KVMIOAPICState), + .class_init = kvm_ioapic_class_init, +}; + +static void kvm_ioapic_register_types(void) +{ + type_register_static(&kvm_ioapic_info); +} + +type_init(kvm_ioapic_register_types) diff --git a/hw/i386/kvm/meson.build b/hw/i386/kvm/meson.build new file mode 100644 index 000000000..95467f1de --- /dev/null +++ b/hw/i386/kvm/meson.build @@ -0,0 +1,8 @@ +i386_kvm_ss = ss.source_set() +i386_kvm_ss.add(files('clock.c')) +i386_kvm_ss.add(when: 'CONFIG_APIC', if_true: files('apic.c')) +i386_kvm_ss.add(when: 'CONFIG_I8254', if_true: files('i8254.c')) +i386_kvm_ss.add(when: 'CONFIG_I8259', if_true: files('i8259.c')) +i386_kvm_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic.c')) + +i386_ss.add_all(when: 'CONFIG_KVM', if_true: i386_kvm_ss) diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c new file mode 100644 index 000000000..43f8a8f67 --- /dev/null +++ b/hw/i386/kvmvapic.c @@ -0,0 +1,871 @@ +/* + * TPR optimization for 32-bit Windows guests (XP and Server 2003) + * + * Copyright (C) 2007-2008 Qumranet Technologies + * Copyright (C) 2012 Jan Kiszka, Siemens AG + * + * This work is licensed under the terms of the GNU GPL version 2, or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "sysemu/sysemu.h" +#include "sysemu/cpus.h" +#include "sysemu/hw_accel.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "hw/i386/apic_internal.h" +#include "hw/sysbus.h" +#include "hw/boards.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#define VAPIC_IO_PORT 0x7e + +#define VAPIC_CPU_SHIFT 7 + +#define ROM_BLOCK_SIZE 512 +#define ROM_BLOCK_MASK (~(ROM_BLOCK_SIZE - 1)) + +typedef enum VAPICMode { + VAPIC_INACTIVE = 0, + VAPIC_ACTIVE = 1, + VAPIC_STANDBY = 2, +} VAPICMode; + +typedef struct VAPICHandlers { + uint32_t set_tpr; + uint32_t set_tpr_eax; + uint32_t get_tpr[8]; + uint32_t get_tpr_stack; +} QEMU_PACKED VAPICHandlers; + +typedef struct GuestROMState { + char signature[8]; + uint32_t vaddr; + uint32_t fixup_start; + uint32_t fixup_end; + uint32_t vapic_vaddr; + uint32_t vapic_size; + uint32_t vcpu_shift; + uint32_t real_tpr_addr; + VAPICHandlers up; + VAPICHandlers mp; +} QEMU_PACKED GuestROMState; + +struct VAPICROMState { + SysBusDevice busdev; + MemoryRegion io; + MemoryRegion rom; + uint32_t state; + uint32_t rom_state_paddr; + uint32_t rom_state_vaddr; + uint32_t vapic_paddr; + uint32_t real_tpr_addr; + GuestROMState rom_state; + size_t rom_size; + bool rom_mapped_writable; + VMChangeStateEntry *vmsentry; +}; + +#define TYPE_VAPIC "kvmvapic" +OBJECT_DECLARE_SIMPLE_TYPE(VAPICROMState, VAPIC) + +#define TPR_INSTR_ABS_MODRM 0x1 +#define TPR_INSTR_MATCH_MODRM_REG 0x2 + +typedef struct TPRInstruction { + uint8_t opcode; + uint8_t modrm_reg; + unsigned int flags; + TPRAccess access; + size_t length; + off_t addr_offset; +} TPRInstruction; + +/* must be sorted by length, shortest first */ +static const TPRInstruction tpr_instr[] = { + { /* mov abs to eax */ + .opcode = 0xa1, + .access = TPR_ACCESS_READ, + .length = 5, + .addr_offset = 1, + }, + { /* mov eax to abs */ + .opcode = 0xa3, + .access = TPR_ACCESS_WRITE, + .length = 5, + .addr_offset = 1, + }, + { /* mov r32 to r/m32 */ + .opcode = 0x89, + .flags = TPR_INSTR_ABS_MODRM, + .access = TPR_ACCESS_WRITE, + .length = 6, + .addr_offset = 2, + }, + { /* mov r/m32 to r32 */ + .opcode = 0x8b, + .flags = TPR_INSTR_ABS_MODRM, + .access = TPR_ACCESS_READ, + .length = 6, + .addr_offset = 2, + }, + { /* push r/m32 */ + .opcode = 0xff, + .modrm_reg = 6, + .flags = TPR_INSTR_ABS_MODRM | TPR_INSTR_MATCH_MODRM_REG, + .access = TPR_ACCESS_READ, + .length = 6, + .addr_offset = 2, + }, + { /* mov imm32, r/m32 (c7/0) */ + .opcode = 0xc7, + .modrm_reg = 0, + .flags = TPR_INSTR_ABS_MODRM | TPR_INSTR_MATCH_MODRM_REG, + .access = TPR_ACCESS_WRITE, + .length = 10, + .addr_offset = 2, + }, +}; + +static void read_guest_rom_state(VAPICROMState *s) +{ + cpu_physical_memory_read(s->rom_state_paddr, &s->rom_state, + sizeof(GuestROMState)); +} + +static void write_guest_rom_state(VAPICROMState *s) +{ + cpu_physical_memory_write(s->rom_state_paddr, &s->rom_state, + sizeof(GuestROMState)); +} + +static void update_guest_rom_state(VAPICROMState *s) +{ + read_guest_rom_state(s); + + s->rom_state.real_tpr_addr = cpu_to_le32(s->real_tpr_addr); + s->rom_state.vcpu_shift = cpu_to_le32(VAPIC_CPU_SHIFT); + + write_guest_rom_state(s); +} + +static int find_real_tpr_addr(VAPICROMState *s, CPUX86State *env) +{ + CPUState *cs = env_cpu(env); + hwaddr paddr; + target_ulong addr; + + if (s->state == VAPIC_ACTIVE) { + return 0; + } + /* + * If there is no prior TPR access instruction we could analyze (which is + * the case after resume from hibernation), we need to scan the possible + * virtual address space for the APIC mapping. + */ + for (addr = 0xfffff000; addr >= 0x80000000; addr -= TARGET_PAGE_SIZE) { + paddr = cpu_get_phys_page_debug(cs, addr); + if (paddr != APIC_DEFAULT_ADDRESS) { + continue; + } + s->real_tpr_addr = addr + 0x80; + update_guest_rom_state(s); + return 0; + } + return -1; +} + +static uint8_t modrm_reg(uint8_t modrm) +{ + return (modrm >> 3) & 7; +} + +static bool is_abs_modrm(uint8_t modrm) +{ + return (modrm & 0xc7) == 0x05; +} + +static bool opcode_matches(uint8_t *opcode, const TPRInstruction *instr) +{ + return opcode[0] == instr->opcode && + (!(instr->flags & TPR_INSTR_ABS_MODRM) || is_abs_modrm(opcode[1])) && + (!(instr->flags & TPR_INSTR_MATCH_MODRM_REG) || + modrm_reg(opcode[1]) == instr->modrm_reg); +} + +static int evaluate_tpr_instruction(VAPICROMState *s, X86CPU *cpu, + target_ulong *pip, TPRAccess access) +{ + CPUState *cs = CPU(cpu); + const TPRInstruction *instr; + target_ulong ip = *pip; + uint8_t opcode[2]; + uint32_t real_tpr_addr; + int i; + + if ((ip & 0xf0000000ULL) != 0x80000000ULL && + (ip & 0xf0000000ULL) != 0xe0000000ULL) { + return -1; + } + + /* + * Early Windows 2003 SMP initialization contains a + * + * mov imm32, r/m32 + * + * instruction that is patched by TPR optimization. The problem is that + * RSP, used by the patched instruction, is zero, so the guest gets a + * double fault and dies. + */ + if (cpu->env.regs[R_ESP] == 0) { + return -1; + } + + if (kvm_enabled() && !kvm_irqchip_in_kernel()) { + /* + * KVM without kernel-based TPR access reporting will pass an IP that + * points after the accessing instruction. So we need to look backward + * to find the reason. + */ + for (i = 0; i < ARRAY_SIZE(tpr_instr); i++) { + instr = &tpr_instr[i]; + if (instr->access != access) { + continue; + } + if (cpu_memory_rw_debug(cs, ip - instr->length, opcode, + sizeof(opcode), 0) < 0) { + return -1; + } + if (opcode_matches(opcode, instr)) { + ip -= instr->length; + goto instruction_ok; + } + } + return -1; + } else { + if (cpu_memory_rw_debug(cs, ip, opcode, sizeof(opcode), 0) < 0) { + return -1; + } + for (i = 0; i < ARRAY_SIZE(tpr_instr); i++) { + instr = &tpr_instr[i]; + if (opcode_matches(opcode, instr)) { + goto instruction_ok; + } + } + return -1; + } + +instruction_ok: + /* + * Grab the virtual TPR address from the instruction + * and update the cached values. + */ + if (cpu_memory_rw_debug(cs, ip + instr->addr_offset, + (void *)&real_tpr_addr, + sizeof(real_tpr_addr), 0) < 0) { + return -1; + } + real_tpr_addr = le32_to_cpu(real_tpr_addr); + if ((real_tpr_addr & 0xfff) != 0x80) { + return -1; + } + s->real_tpr_addr = real_tpr_addr; + update_guest_rom_state(s); + + *pip = ip; + return 0; +} + +static int update_rom_mapping(VAPICROMState *s, CPUX86State *env, target_ulong ip) +{ + CPUState *cs = env_cpu(env); + hwaddr paddr; + uint32_t rom_state_vaddr; + uint32_t pos, patch, offset; + + /* nothing to do if already activated */ + if (s->state == VAPIC_ACTIVE) { + return 0; + } + + /* bail out if ROM init code was not executed (missing ROM?) */ + if (s->state == VAPIC_INACTIVE) { + return -1; + } + + /* find out virtual address of the ROM */ + rom_state_vaddr = s->rom_state_paddr + (ip & 0xf0000000); + paddr = cpu_get_phys_page_debug(cs, rom_state_vaddr); + if (paddr == -1) { + return -1; + } + paddr += rom_state_vaddr & ~TARGET_PAGE_MASK; + if (paddr != s->rom_state_paddr) { + return -1; + } + read_guest_rom_state(s); + if (memcmp(s->rom_state.signature, "kvm aPiC", 8) != 0) { + return -1; + } + s->rom_state_vaddr = rom_state_vaddr; + + /* fixup addresses in ROM if needed */ + if (rom_state_vaddr == le32_to_cpu(s->rom_state.vaddr)) { + return 0; + } + for (pos = le32_to_cpu(s->rom_state.fixup_start); + pos < le32_to_cpu(s->rom_state.fixup_end); + pos += 4) { + cpu_physical_memory_read(paddr + pos - s->rom_state.vaddr, + &offset, sizeof(offset)); + offset = le32_to_cpu(offset); + cpu_physical_memory_read(paddr + offset, &patch, sizeof(patch)); + patch = le32_to_cpu(patch); + patch += rom_state_vaddr - le32_to_cpu(s->rom_state.vaddr); + patch = cpu_to_le32(patch); + cpu_physical_memory_write(paddr + offset, &patch, sizeof(patch)); + } + read_guest_rom_state(s); + s->vapic_paddr = paddr + le32_to_cpu(s->rom_state.vapic_vaddr) - + le32_to_cpu(s->rom_state.vaddr); + + return 0; +} + +/* + * Tries to read the unique processor number from the Kernel Processor Control + * Region (KPCR) of 32-bit Windows XP and Server 2003. Returns -1 if the KPCR + * cannot be accessed or is considered invalid. This also ensures that we are + * not patching the wrong guest. + */ +static int get_kpcr_number(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + struct kpcr { + uint8_t fill1[0x1c]; + uint32_t self; + uint8_t fill2[0x31]; + uint8_t number; + } QEMU_PACKED kpcr; + + if (cpu_memory_rw_debug(CPU(cpu), env->segs[R_FS].base, + (void *)&kpcr, sizeof(kpcr), 0) < 0 || + kpcr.self != env->segs[R_FS].base) { + return -1; + } + return kpcr.number; +} + +static int vapic_enable(VAPICROMState *s, X86CPU *cpu) +{ + int cpu_number = get_kpcr_number(cpu); + hwaddr vapic_paddr; + static const uint8_t enabled = 1; + + if (cpu_number < 0) { + return -1; + } + vapic_paddr = s->vapic_paddr + + (((hwaddr)cpu_number) << VAPIC_CPU_SHIFT); + cpu_physical_memory_write(vapic_paddr + offsetof(VAPICState, enabled), + &enabled, sizeof(enabled)); + apic_enable_vapic(cpu->apic_state, vapic_paddr); + + s->state = VAPIC_ACTIVE; + + return 0; +} + +static void patch_byte(X86CPU *cpu, target_ulong addr, uint8_t byte) +{ + cpu_memory_rw_debug(CPU(cpu), addr, &byte, 1, 1); +} + +static void patch_call(X86CPU *cpu, target_ulong ip, uint32_t target) +{ + uint32_t offset; + + offset = cpu_to_le32(target - ip - 5); + patch_byte(cpu, ip, 0xe8); /* call near */ + cpu_memory_rw_debug(CPU(cpu), ip + 1, (void *)&offset, sizeof(offset), 1); +} + +typedef struct PatchInfo { + VAPICHandlers *handler; + target_ulong ip; +} PatchInfo; + +static void do_patch_instruction(CPUState *cs, run_on_cpu_data data) +{ + X86CPU *x86_cpu = X86_CPU(cs); + PatchInfo *info = (PatchInfo *) data.host_ptr; + VAPICHandlers *handlers = info->handler; + target_ulong ip = info->ip; + uint8_t opcode[2]; + uint32_t imm32 = 0; + + cpu_memory_rw_debug(cs, ip, opcode, sizeof(opcode), 0); + + switch (opcode[0]) { + case 0x89: /* mov r32 to r/m32 */ + patch_byte(x86_cpu, ip, 0x50 + modrm_reg(opcode[1])); /* push reg */ + patch_call(x86_cpu, ip + 1, handlers->set_tpr); + break; + case 0x8b: /* mov r/m32 to r32 */ + patch_byte(x86_cpu, ip, 0x90); + patch_call(x86_cpu, ip + 1, handlers->get_tpr[modrm_reg(opcode[1])]); + break; + case 0xa1: /* mov abs to eax */ + patch_call(x86_cpu, ip, handlers->get_tpr[0]); + break; + case 0xa3: /* mov eax to abs */ + patch_call(x86_cpu, ip, handlers->set_tpr_eax); + break; + case 0xc7: /* mov imm32, r/m32 (c7/0) */ + patch_byte(x86_cpu, ip, 0x68); /* push imm32 */ + cpu_memory_rw_debug(cs, ip + 6, (void *)&imm32, sizeof(imm32), 0); + cpu_memory_rw_debug(cs, ip + 1, (void *)&imm32, sizeof(imm32), 1); + patch_call(x86_cpu, ip + 5, handlers->set_tpr); + break; + case 0xff: /* push r/m32 */ + patch_byte(x86_cpu, ip, 0x50); /* push eax */ + patch_call(x86_cpu, ip + 1, handlers->get_tpr_stack); + break; + default: + abort(); + } + + g_free(info); +} + +static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + CPUState *cs = CPU(cpu); + VAPICHandlers *handlers; + PatchInfo *info; + + if (ms->smp.cpus == 1) { + handlers = &s->rom_state.up; + } else { + handlers = &s->rom_state.mp; + } + + info = g_new(PatchInfo, 1); + info->handler = handlers; + info->ip = ip; + + async_safe_run_on_cpu(cs, do_patch_instruction, RUN_ON_CPU_HOST_PTR(info)); +} + +void vapic_report_tpr_access(DeviceState *dev, CPUState *cs, target_ulong ip, + TPRAccess access) +{ + VAPICROMState *s = VAPIC(dev); + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + cpu_synchronize_state(cs); + + if (evaluate_tpr_instruction(s, cpu, &ip, access) < 0) { + if (s->state == VAPIC_ACTIVE) { + vapic_enable(s, cpu); + } + return; + } + if (update_rom_mapping(s, env, ip) < 0) { + return; + } + if (vapic_enable(s, cpu) < 0) { + return; + } + patch_instruction(s, cpu, ip); +} + +typedef struct VAPICEnableTPRReporting { + DeviceState *apic; + bool enable; +} VAPICEnableTPRReporting; + +static void vapic_do_enable_tpr_reporting(CPUState *cpu, run_on_cpu_data data) +{ + VAPICEnableTPRReporting *info = data.host_ptr; + apic_enable_tpr_access_reporting(info->apic, info->enable); +} + +static void vapic_enable_tpr_reporting(bool enable) +{ + VAPICEnableTPRReporting info = { + .enable = enable, + }; + CPUState *cs; + X86CPU *cpu; + + CPU_FOREACH(cs) { + cpu = X86_CPU(cs); + info.apic = cpu->apic_state; + run_on_cpu(cs, vapic_do_enable_tpr_reporting, RUN_ON_CPU_HOST_PTR(&info)); + } +} + +static void vapic_reset(DeviceState *dev) +{ + VAPICROMState *s = VAPIC(dev); + + s->state = VAPIC_INACTIVE; + s->rom_state_paddr = 0; + vapic_enable_tpr_reporting(false); +} + +/* + * Set the IRQ polling hypercalls to the supported variant: + * - vmcall if using KVM in-kernel irqchip + * - 32-bit VAPIC port write otherwise + */ +static int patch_hypercalls(VAPICROMState *s) +{ + hwaddr rom_paddr = s->rom_state_paddr & ROM_BLOCK_MASK; + static const uint8_t vmcall_pattern[] = { /* vmcall */ + 0xb8, 0x1, 0, 0, 0, 0xf, 0x1, 0xc1 + }; + static const uint8_t outl_pattern[] = { /* nop; outl %eax,0x7e */ + 0xb8, 0x1, 0, 0, 0, 0x90, 0xe7, 0x7e + }; + uint8_t alternates[2]; + const uint8_t *pattern; + const uint8_t *patch; + off_t pos; + uint8_t *rom; + + rom = g_malloc(s->rom_size); + cpu_physical_memory_read(rom_paddr, rom, s->rom_size); + + for (pos = 0; pos < s->rom_size - sizeof(vmcall_pattern); pos++) { + if (kvm_irqchip_in_kernel()) { + pattern = outl_pattern; + alternates[0] = outl_pattern[7]; + alternates[1] = outl_pattern[7]; + patch = &vmcall_pattern[5]; + } else { + pattern = vmcall_pattern; + alternates[0] = vmcall_pattern[7]; + alternates[1] = 0xd9; /* AMD's VMMCALL */ + patch = &outl_pattern[5]; + } + if (memcmp(rom + pos, pattern, 7) == 0 && + (rom[pos + 7] == alternates[0] || rom[pos + 7] == alternates[1])) { + cpu_physical_memory_write(rom_paddr + pos + 5, patch, 3); + /* + * Don't flush the tb here. Under ordinary conditions, the patched + * calls are miles away from the current IP. Under malicious + * conditions, the guest could trick us to crash. + */ + } + } + + g_free(rom); + return 0; +} + +/* + * For TCG mode or the time KVM honors read-only memory regions, we need to + * enable write access to the option ROM so that variables can be updated by + * the guest. + */ +static int vapic_map_rom_writable(VAPICROMState *s) +{ + hwaddr rom_paddr = s->rom_state_paddr & ROM_BLOCK_MASK; + MemoryRegionSection section; + MemoryRegion *as; + size_t rom_size; + uint8_t *ram; + + as = sysbus_address_space(&s->busdev); + + if (s->rom_mapped_writable) { + memory_region_del_subregion(as, &s->rom); + object_unparent(OBJECT(&s->rom)); + } + + /* grab RAM memory region (region @rom_paddr may still be pc.rom) */ + section = memory_region_find(as, 0, 1); + + /* read ROM size from RAM region */ + if (rom_paddr + 2 >= memory_region_size(section.mr)) { + return -1; + } + ram = memory_region_get_ram_ptr(section.mr); + rom_size = ram[rom_paddr + 2] * ROM_BLOCK_SIZE; + if (rom_size == 0) { + return -1; + } + s->rom_size = rom_size; + + /* We need to round to avoid creating subpages + * from which we cannot run code. */ + rom_size += rom_paddr & ~TARGET_PAGE_MASK; + rom_paddr &= TARGET_PAGE_MASK; + rom_size = TARGET_PAGE_ALIGN(rom_size); + + memory_region_init_alias(&s->rom, OBJECT(s), "kvmvapic-rom", section.mr, + rom_paddr, rom_size); + memory_region_add_subregion_overlap(as, rom_paddr, &s->rom, 1000); + s->rom_mapped_writable = true; + memory_region_unref(section.mr); + + return 0; +} + +static int vapic_prepare(VAPICROMState *s) +{ + if (vapic_map_rom_writable(s) < 0) { + return -1; + } + + if (patch_hypercalls(s) < 0) { + return -1; + } + + vapic_enable_tpr_reporting(true); + + return 0; +} + +static void vapic_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + VAPICROMState *s = opaque; + X86CPU *cpu; + CPUX86State *env; + hwaddr rom_paddr; + + if (!current_cpu) { + return; + } + + cpu_synchronize_state(current_cpu); + cpu = X86_CPU(current_cpu); + env = &cpu->env; + + /* + * The VAPIC supports two PIO-based hypercalls, both via port 0x7E. + * o 16-bit write access: + * Reports the option ROM initialization to the hypervisor. Written + * value is the offset of the state structure in the ROM. + * o 8-bit write access: + * Reactivates the VAPIC after a guest hibernation, i.e. after the + * option ROM content has been re-initialized by a guest power cycle. + * o 32-bit write access: + * Poll for pending IRQs, considering the current VAPIC state. + */ + switch (size) { + case 2: + if (s->state == VAPIC_INACTIVE) { + rom_paddr = (env->segs[R_CS].base + env->eip) & ROM_BLOCK_MASK; + s->rom_state_paddr = rom_paddr + data; + + s->state = VAPIC_STANDBY; + } + if (vapic_prepare(s) < 0) { + s->state = VAPIC_INACTIVE; + s->rom_state_paddr = 0; + break; + } + break; + case 1: + if (kvm_enabled()) { + /* + * Disable triggering instruction in ROM by writing a NOP. + * + * We cannot do this in TCG mode as the reported IP is not + * accurate. + */ + pause_all_vcpus(); + patch_byte(cpu, env->eip - 2, 0x66); + patch_byte(cpu, env->eip - 1, 0x90); + resume_all_vcpus(); + } + + if (s->state == VAPIC_ACTIVE) { + break; + } + if (update_rom_mapping(s, env, env->eip) < 0) { + break; + } + if (find_real_tpr_addr(s, env) < 0) { + break; + } + vapic_enable(s, cpu); + break; + default: + case 4: + if (!kvm_irqchip_in_kernel()) { + apic_poll_irq(cpu->apic_state); + } + break; + } +} + +static uint64_t vapic_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0xffffffff; +} + +static const MemoryRegionOps vapic_ops = { + .write = vapic_write, + .read = vapic_read, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void vapic_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + VAPICROMState *s = VAPIC(dev); + + memory_region_init_io(&s->io, OBJECT(s), &vapic_ops, s, "kvmvapic", 2); + sysbus_add_io(sbd, VAPIC_IO_PORT, &s->io); + sysbus_init_ioports(sbd, VAPIC_IO_PORT, 2); + + option_rom[nb_option_roms].name = "kvmvapic.bin"; + option_rom[nb_option_roms].bootindex = -1; + nb_option_roms++; +} + +static void do_vapic_enable(CPUState *cs, run_on_cpu_data data) +{ + VAPICROMState *s = data.host_ptr; + X86CPU *cpu = X86_CPU(cs); + + static const uint8_t enabled = 1; + cpu_physical_memory_write(s->vapic_paddr + offsetof(VAPICState, enabled), + &enabled, sizeof(enabled)); + apic_enable_vapic(cpu->apic_state, s->vapic_paddr); + s->state = VAPIC_ACTIVE; +} + +static void kvmvapic_vm_state_change(void *opaque, bool running, + RunState state) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + VAPICROMState *s = opaque; + uint8_t *zero; + + if (!running) { + return; + } + + if (s->state == VAPIC_ACTIVE) { + if (ms->smp.cpus == 1) { + run_on_cpu(first_cpu, do_vapic_enable, RUN_ON_CPU_HOST_PTR(s)); + } else { + zero = g_malloc0(s->rom_state.vapic_size); + cpu_physical_memory_write(s->vapic_paddr, zero, + s->rom_state.vapic_size); + g_free(zero); + } + } + + qemu_del_vm_change_state_handler(s->vmsentry); + s->vmsentry = NULL; +} + +static int vapic_post_load(void *opaque, int version_id) +{ + VAPICROMState *s = opaque; + + /* + * The old implementation of qemu-kvm did not provide the state + * VAPIC_STANDBY. Reconstruct it. + */ + if (s->state == VAPIC_INACTIVE && s->rom_state_paddr != 0) { + s->state = VAPIC_STANDBY; + } + + if (s->state != VAPIC_INACTIVE) { + if (vapic_prepare(s) < 0) { + return -1; + } + } + + if (!s->vmsentry) { + s->vmsentry = + qemu_add_vm_change_state_handler(kvmvapic_vm_state_change, s); + } + return 0; +} + +static const VMStateDescription vmstate_handlers = { + .name = "kvmvapic-handlers", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(set_tpr, VAPICHandlers), + VMSTATE_UINT32(set_tpr_eax, VAPICHandlers), + VMSTATE_UINT32_ARRAY(get_tpr, VAPICHandlers, 8), + VMSTATE_UINT32(get_tpr_stack, VAPICHandlers), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_guest_rom = { + .name = "kvmvapic-guest-rom", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UNUSED(8), /* signature */ + VMSTATE_UINT32(vaddr, GuestROMState), + VMSTATE_UINT32(fixup_start, GuestROMState), + VMSTATE_UINT32(fixup_end, GuestROMState), + VMSTATE_UINT32(vapic_vaddr, GuestROMState), + VMSTATE_UINT32(vapic_size, GuestROMState), + VMSTATE_UINT32(vcpu_shift, GuestROMState), + VMSTATE_UINT32(real_tpr_addr, GuestROMState), + VMSTATE_STRUCT(up, GuestROMState, 0, vmstate_handlers, VAPICHandlers), + VMSTATE_STRUCT(mp, GuestROMState, 0, vmstate_handlers, VAPICHandlers), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_vapic = { + .name = "kvm-tpr-opt", /* compatible with qemu-kvm VAPIC */ + .version_id = 1, + .minimum_version_id = 1, + .post_load = vapic_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(rom_state, VAPICROMState, 0, vmstate_guest_rom, + GuestROMState), + VMSTATE_UINT32(state, VAPICROMState), + VMSTATE_UINT32(real_tpr_addr, VAPICROMState), + VMSTATE_UINT32(rom_state_vaddr, VAPICROMState), + VMSTATE_UINT32(vapic_paddr, VAPICROMState), + VMSTATE_UINT32(rom_state_paddr, VAPICROMState), + VMSTATE_END_OF_LIST() + } +}; + +static void vapic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = vapic_reset; + dc->vmsd = &vmstate_vapic; + dc->realize = vapic_realize; +} + +static const TypeInfo vapic_type = { + .name = TYPE_VAPIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VAPICROMState), + .class_init = vapic_class_init, +}; + +static void vapic_register(void) +{ + type_register_static(&vapic_type); +} + +type_init(vapic_register); diff --git a/hw/i386/meson.build b/hw/i386/meson.build new file mode 100644 index 000000000..213e2e82b --- /dev/null +++ b/hw/i386/meson.build @@ -0,0 +1,37 @@ +i386_ss = ss.source_set() +i386_ss.add(files( + 'fw_cfg.c', + 'kvmvapic.c', + 'e820_memory_layout.c', + 'multiboot.c', + 'x86.c', +)) + +i386_ss.add(when: 'CONFIG_X86_IOMMU', if_true: files('x86-iommu.c'), + if_false: files('x86-iommu-stub.c')) +i386_ss.add(when: 'CONFIG_AMD_IOMMU', if_true: files('amd_iommu.c')) +i386_ss.add(when: 'CONFIG_I440FX', if_true: files('pc_piix.c')) +i386_ss.add(when: 'CONFIG_MICROVM', if_true: files('microvm.c', 'acpi-microvm.c', 'microvm-dt.c')) +i386_ss.add(when: 'CONFIG_Q35', if_true: files('pc_q35.c')) +i386_ss.add(when: 'CONFIG_VMMOUSE', if_true: files('vmmouse.c')) +i386_ss.add(when: 'CONFIG_VMPORT', if_true: files('vmport.c')) +i386_ss.add(when: 'CONFIG_VTD', if_true: files('intel_iommu.c')) +i386_ss.add(when: 'CONFIG_SGX', if_true: files('sgx-epc.c','sgx.c'), + if_false: files('sgx-stub.c')) + +i386_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-common.c')) +i386_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device_x86.c')) +i386_ss.add(when: 'CONFIG_PC', if_true: files( + 'pc.c', + 'pc_sysfw.c', + 'acpi-build.c', + 'port92.c')) +i386_ss.add(when: 'CONFIG_X86_FW_OVMF', if_true: files('pc_sysfw_ovmf.c'), + if_false: files('pc_sysfw_ovmf-stubs.c')) + +subdir('kvm') +subdir('xen') + +i386_ss.add_all(xenpv_ss) + +hw_arch += {'i386': i386_ss} diff --git a/hw/i386/microvm-dt.c b/hw/i386/microvm-dt.c new file mode 100644 index 000000000..9c3c4995b --- /dev/null +++ b/hw/i386/microvm-dt.c @@ -0,0 +1,348 @@ +/* + * microvm device tree support + * + * This generates an device tree for microvm and exports it via fw_cfg + * as "etc/fdt" to the firmware (edk2 specifically). + * + * The use case is to allow edk2 find the pcie ecam and the virtio + * devices, without adding an ACPI parser, reusing the fdt parser + * which is needed anyway for the arm platform. + * + * Note 1: The device tree is incomplete. CPUs and memory is missing + * for example, those can be detected using other fw_cfg files. + * Also pci ecam irq routing is not there, edk2 doesn't use + * interrupts. + * + * Note 2: This is for firmware only. OSes should use the more + * complete ACPI tables for hardware discovery. + * + * ---------------------------------------------------------------------- + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "sysemu/device_tree.h" +#include "hw/char/serial.h" +#include "hw/i386/fw_cfg.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/sysbus.h" +#include "hw/virtio/virtio-mmio.h" +#include "hw/usb/xhci.h" + +#include "microvm-dt.h" + +static bool debug; + +static void dt_add_microvm_irq(MicrovmMachineState *mms, + const char *nodename, uint32_t irq) +{ + int index = 0; + + if (irq >= IO_APIC_SECONDARY_IRQBASE) { + irq -= IO_APIC_SECONDARY_IRQBASE; + index++; + } + + qemu_fdt_setprop_cell(mms->fdt, nodename, "interrupt-parent", + mms->ioapic_phandle[index]); + qemu_fdt_setprop_cells(mms->fdt, nodename, "interrupts", irq, 0); +} + +static void dt_add_virtio(MicrovmMachineState *mms, VirtIOMMIOProxy *mmio) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(mmio); + VirtioBusState *mmio_virtio_bus = &mmio->bus; + BusState *mmio_bus = &mmio_virtio_bus->parent_obj; + char *nodename; + + if (QTAILQ_EMPTY(&mmio_bus->children)) { + return; + } + + hwaddr base = dev->mmio[0].addr; + hwaddr size = 512; + unsigned index = (base - VIRTIO_MMIO_BASE) / size; + uint32_t irq = mms->virtio_irq_base + index; + + nodename = g_strdup_printf("/virtio_mmio@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop_string(mms->fdt, nodename, "compatible", "virtio,mmio"); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + qemu_fdt_setprop(mms->fdt, nodename, "dma-coherent", NULL, 0); + dt_add_microvm_irq(mms, nodename, irq); + g_free(nodename); +} + +static void dt_add_xhci(MicrovmMachineState *mms) +{ + const char compat[] = "generic-xhci"; + uint32_t irq = MICROVM_XHCI_IRQ; + hwaddr base = MICROVM_XHCI_BASE; + hwaddr size = XHCI_LEN_REGS; + char *nodename; + + nodename = g_strdup_printf("/usb@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop(mms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + qemu_fdt_setprop(mms->fdt, nodename, "dma-coherent", NULL, 0); + dt_add_microvm_irq(mms, nodename, irq); + g_free(nodename); +} + +static void dt_add_pcie(MicrovmMachineState *mms) +{ + hwaddr base = PCIE_MMIO_BASE; + int nr_pcie_buses; + char *nodename; + + nodename = g_strdup_printf("/pcie@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop_string(mms->fdt, nodename, + "compatible", "pci-host-ecam-generic"); + qemu_fdt_setprop_string(mms->fdt, nodename, "device_type", "pci"); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#address-cells", 3); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#size-cells", 2); + qemu_fdt_setprop_cell(mms->fdt, nodename, "linux,pci-domain", 0); + qemu_fdt_setprop(mms->fdt, nodename, "dma-coherent", NULL, 0); + + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", + 2, PCIE_ECAM_BASE, 2, PCIE_ECAM_SIZE); + if (mms->gpex.mmio64.size) { + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "ranges", + + 1, FDT_PCI_RANGE_MMIO, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.size, + + 1, FDT_PCI_RANGE_MMIO_64BIT, + 2, mms->gpex.mmio64.base, + 2, mms->gpex.mmio64.base, + 2, mms->gpex.mmio64.size); + } else { + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "ranges", + + 1, FDT_PCI_RANGE_MMIO, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.size); + } + + nr_pcie_buses = PCIE_ECAM_SIZE / PCIE_MMCFG_SIZE_MIN; + qemu_fdt_setprop_cells(mms->fdt, nodename, "bus-range", 0, + nr_pcie_buses - 1); + + g_free(nodename); +} + +static void dt_add_ioapic(MicrovmMachineState *mms, SysBusDevice *dev) +{ + hwaddr base = dev->mmio[0].addr; + char *nodename; + uint32_t ph; + int index; + + switch (base) { + case IO_APIC_DEFAULT_ADDRESS: + index = 0; + break; + case IO_APIC_SECONDARY_ADDRESS: + index = 1; + break; + default: + fprintf(stderr, "unknown ioapic @ %" PRIx64 "\n", base); + return; + } + + nodename = g_strdup_printf("/ioapic%d@%" PRIx64, index + 1, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop_string(mms->fdt, nodename, + "compatible", "intel,ce4100-ioapic"); + qemu_fdt_setprop(mms->fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#interrupt-cells", 0x2); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#address-cells", 0x2); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", + 2, base, 2, 0x1000); + + ph = qemu_fdt_alloc_phandle(mms->fdt); + qemu_fdt_setprop_cell(mms->fdt, nodename, "phandle", ph); + qemu_fdt_setprop_cell(mms->fdt, nodename, "linux,phandle", ph); + mms->ioapic_phandle[index] = ph; + + g_free(nodename); +} + +static void dt_add_isa_serial(MicrovmMachineState *mms, ISADevice *dev) +{ + const char compat[] = "ns16550"; + uint32_t irq = object_property_get_int(OBJECT(dev), "irq", NULL); + hwaddr base = object_property_get_int(OBJECT(dev), "iobase", NULL); + hwaddr size = 8; + char *nodename; + + nodename = g_strdup_printf("/serial@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop(mms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + dt_add_microvm_irq(mms, nodename, irq); + + if (base == 0x3f8 /* com1 */) { + qemu_fdt_setprop_string(mms->fdt, "/chosen", "stdout-path", nodename); + } + + g_free(nodename); +} + +static void dt_add_isa_rtc(MicrovmMachineState *mms, ISADevice *dev) +{ + const char compat[] = "motorola,mc146818"; + uint32_t irq = RTC_ISA_IRQ; + hwaddr base = RTC_ISA_BASE; + hwaddr size = 8; + char *nodename; + + nodename = g_strdup_printf("/rtc@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop(mms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + dt_add_microvm_irq(mms, nodename, irq); + g_free(nodename); +} + +static void dt_setup_isa_bus(MicrovmMachineState *mms, DeviceState *bridge) +{ + BusState *bus = qdev_get_child_bus(bridge, "isa.0"); + BusChild *kid; + Object *obj; + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + /* serial */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_ISA_SERIAL); + if (obj) { + dt_add_isa_serial(mms, ISA_DEVICE(obj)); + continue; + } + + /* rtc */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_MC146818_RTC); + if (obj) { + dt_add_isa_rtc(mms, ISA_DEVICE(obj)); + continue; + } + + if (debug) { + fprintf(stderr, "%s: unhandled: %s\n", __func__, + object_get_typename(OBJECT(dev))); + } + } +} + +static void dt_setup_sys_bus(MicrovmMachineState *mms) +{ + BusState *bus; + BusChild *kid; + Object *obj; + + /* sysbus devices */ + bus = sysbus_get_default(); + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + /* ioapic */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_IOAPIC); + if (obj) { + dt_add_ioapic(mms, SYS_BUS_DEVICE(obj)); + continue; + } + } + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + /* virtio */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MMIO); + if (obj) { + dt_add_virtio(mms, VIRTIO_MMIO(obj)); + continue; + } + + /* xhci */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_XHCI_SYSBUS); + if (obj) { + dt_add_xhci(mms); + continue; + } + + /* pcie */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_GPEX_HOST); + if (obj) { + dt_add_pcie(mms); + continue; + } + + /* isa */ + obj = object_dynamic_cast(OBJECT(dev), "isabus-bridge"); + if (obj) { + dt_setup_isa_bus(mms, DEVICE(obj)); + continue; + } + + if (debug) { + obj = object_dynamic_cast(OBJECT(dev), TYPE_IOAPIC); + if (obj) { + /* ioapic already added in first pass */ + continue; + } + fprintf(stderr, "%s: unhandled: %s\n", __func__, + object_get_typename(OBJECT(dev))); + } + } +} + +void dt_setup_microvm(MicrovmMachineState *mms) +{ + X86MachineState *x86ms = X86_MACHINE(mms); + int size = 0; + + mms->fdt = create_device_tree(&size); + + /* root node */ + qemu_fdt_setprop_string(mms->fdt, "/", "compatible", "linux,microvm"); + qemu_fdt_setprop_cell(mms->fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_cell(mms->fdt, "/", "#size-cells", 0x2); + + qemu_fdt_add_subnode(mms->fdt, "/chosen"); + dt_setup_sys_bus(mms); + + /* add to fw_cfg */ + if (debug) { + fprintf(stderr, "%s: add etc/fdt to fw_cfg\n", __func__); + } + fw_cfg_add_file(x86ms->fw_cfg, "etc/fdt", mms->fdt, size); + + if (debug) { + fprintf(stderr, "%s: writing microvm.fdt\n", __func__); + if (!g_file_set_contents("microvm.fdt", mms->fdt, size, NULL)) { + fprintf(stderr, "%s: writing microvm.fdt failed\n", __func__); + return; + } + int ret = system("dtc -I dtb -O dts microvm.fdt"); + if (ret != 0) { + fprintf(stderr, "%s: oops, dtc not installed?\n", __func__); + } + } +} diff --git a/hw/i386/microvm-dt.h b/hw/i386/microvm-dt.h new file mode 100644 index 000000000..77c79cbdd --- /dev/null +++ b/hw/i386/microvm-dt.h @@ -0,0 +1,8 @@ +#ifndef HW_I386_MICROVM_DT_H +#define HW_I386_MICROVM_DT_H + +#include "hw/i386/microvm.h" + +void dt_setup_microvm(MicrovmMachineState *mms); + +#endif diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c new file mode 100644 index 000000000..4b3b1dd26 --- /dev/null +++ b/hw/i386/microvm.c @@ -0,0 +1,779 @@ +/* + * Copyright (c) 2018 Intel Corporation + * Copyright (c) 2019 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/cutils.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qapi/qapi-visit-common.h" +#include "sysemu/sysemu.h" +#include "sysemu/cpus.h" +#include "sysemu/numa.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "acpi-microvm.h" +#include "microvm-dt.h" + +#include "hw/loader.h" +#include "hw/irq.h" +#include "hw/kvm/clock.h" +#include "hw/i386/microvm.h" +#include "hw/i386/x86.h" +#include "target/i386/cpu.h" +#include "hw/intc/i8259.h" +#include "hw/timer/i8254.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/char/serial.h" +#include "hw/display/ramfb.h" +#include "hw/i386/topology.h" +#include "hw/i386/e820_memory_layout.h" +#include "hw/i386/fw_cfg.h" +#include "hw/virtio/virtio-mmio.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/generic_event_device.h" +#include "hw/pci-host/gpex.h" +#include "hw/usb/xhci.h" + +#include "elf.h" +#include "kvm/kvm_i386.h" +#include "hw/xen/start_info.h" + +#define MICROVM_QBOOT_FILENAME "qboot.rom" +#define MICROVM_BIOS_FILENAME "bios-microvm.bin" + +static void microvm_set_rtc(MicrovmMachineState *mms, ISADevice *s) +{ + X86MachineState *x86ms = X86_MACHINE(mms); + int val; + + val = MIN(x86ms->below_4g_mem_size / KiB, 640); + rtc_set_memory(s, 0x15, val); + rtc_set_memory(s, 0x16, val >> 8); + /* extended memory (next 64MiB) */ + if (x86ms->below_4g_mem_size > 1 * MiB) { + val = (x86ms->below_4g_mem_size - 1 * MiB) / KiB; + } else { + val = 0; + } + if (val > 65535) { + val = 65535; + } + rtc_set_memory(s, 0x17, val); + rtc_set_memory(s, 0x18, val >> 8); + rtc_set_memory(s, 0x30, val); + rtc_set_memory(s, 0x31, val >> 8); + /* memory between 16MiB and 4GiB */ + if (x86ms->below_4g_mem_size > 16 * MiB) { + val = (x86ms->below_4g_mem_size - 16 * MiB) / (64 * KiB); + } else { + val = 0; + } + if (val > 65535) { + val = 65535; + } + rtc_set_memory(s, 0x34, val); + rtc_set_memory(s, 0x35, val >> 8); + /* memory above 4GiB */ + val = x86ms->above_4g_mem_size / 65536; + rtc_set_memory(s, 0x5b, val); + rtc_set_memory(s, 0x5c, val >> 8); + rtc_set_memory(s, 0x5d, val >> 16); +} + +static void create_gpex(MicrovmMachineState *mms) +{ + X86MachineState *x86ms = X86_MACHINE(mms); + MemoryRegion *mmio32_alias; + MemoryRegion *mmio64_alias; + MemoryRegion *mmio_reg; + MemoryRegion *ecam_alias; + MemoryRegion *ecam_reg; + DeviceState *dev; + int i; + + dev = qdev_new(TYPE_GPEX_HOST); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* Map only the first size_ecam bytes of ECAM space */ + ecam_alias = g_new0(MemoryRegion, 1); + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam", + ecam_reg, 0, mms->gpex.ecam.size); + memory_region_add_subregion(get_system_memory(), + mms->gpex.ecam.base, ecam_alias); + + /* Map the MMIO window into system address space so as to expose + * the section of PCI MMIO space which starts at the same base address + * (ie 1:1 mapping for that part of PCI MMIO space visible through + * the window). + */ + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + if (mms->gpex.mmio32.size) { + mmio32_alias = g_new0(MemoryRegion, 1); + memory_region_init_alias(mmio32_alias, OBJECT(dev), "pcie-mmio32", mmio_reg, + mms->gpex.mmio32.base, mms->gpex.mmio32.size); + memory_region_add_subregion(get_system_memory(), + mms->gpex.mmio32.base, mmio32_alias); + } + if (mms->gpex.mmio64.size) { + mmio64_alias = g_new0(MemoryRegion, 1); + memory_region_init_alias(mmio64_alias, OBJECT(dev), "pcie-mmio64", mmio_reg, + mms->gpex.mmio64.base, mms->gpex.mmio64.size); + memory_region_add_subregion(get_system_memory(), + mms->gpex.mmio64.base, mmio64_alias); + } + + for (i = 0; i < GPEX_NUM_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, + x86ms->gsi[mms->gpex.irq + i]); + } +} + +static int microvm_ioapics(MicrovmMachineState *mms) +{ + if (!x86_machine_is_acpi_enabled(X86_MACHINE(mms))) { + return 1; + } + if (mms->ioapic2 == ON_OFF_AUTO_OFF) { + return 1; + } + return 2; +} + +static void microvm_devices_init(MicrovmMachineState *mms) +{ + const char *default_firmware; + X86MachineState *x86ms = X86_MACHINE(mms); + ISABus *isa_bus; + ISADevice *rtc_state; + GSIState *gsi_state; + int ioapics; + int i; + + /* Core components */ + ioapics = microvm_ioapics(mms); + gsi_state = g_malloc0(sizeof(*gsi_state)); + x86ms->gsi = qemu_allocate_irqs(gsi_handler, gsi_state, + IOAPIC_NUM_PINS * ioapics); + + isa_bus = isa_bus_new(NULL, get_system_memory(), get_system_io(), + &error_abort); + isa_bus_irqs(isa_bus, x86ms->gsi); + + ioapic_init_gsi(gsi_state, "machine"); + if (ioapics > 1) { + x86ms->ioapic2 = ioapic_init_secondary(gsi_state); + } + + kvmclock_create(true); + + mms->virtio_irq_base = 5; + mms->virtio_num_transports = 8; + if (x86ms->ioapic2) { + mms->pcie_irq_base = 16; /* 16 -> 19 */ + /* use second ioapic (24 -> 47) for virtio-mmio irq lines */ + mms->virtio_irq_base = IO_APIC_SECONDARY_IRQBASE; + mms->virtio_num_transports = IOAPIC_NUM_PINS; + } else if (x86_machine_is_acpi_enabled(x86ms)) { + mms->pcie_irq_base = 12; /* 12 -> 15 */ + mms->virtio_irq_base = 16; /* 16 -> 23 */ + } + + for (i = 0; i < mms->virtio_num_transports; i++) { + sysbus_create_simple("virtio-mmio", + VIRTIO_MMIO_BASE + i * 512, + x86ms->gsi[mms->virtio_irq_base + i]); + } + + /* Optional and legacy devices */ + if (x86_machine_is_acpi_enabled(x86ms)) { + DeviceState *dev = qdev_new(TYPE_ACPI_GED_X86); + qdev_prop_set_uint32(dev, "ged-event", ACPI_GED_PWR_DOWN_EVT); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, GED_MMIO_BASE); + /* sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, GED_MMIO_BASE_MEMHP); */ + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, GED_MMIO_BASE_REGS); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + x86ms->gsi[GED_MMIO_IRQ]); + sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + x86ms->acpi_dev = HOTPLUG_HANDLER(dev); + } + + if (x86_machine_is_acpi_enabled(x86ms) && machine_usb(MACHINE(mms))) { + DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS); + qdev_prop_set_uint32(dev, "intrs", 1); + qdev_prop_set_uint32(dev, "slots", XHCI_MAXSLOTS); + qdev_prop_set_uint32(dev, "p2", 8); + qdev_prop_set_uint32(dev, "p3", 8); + sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, MICROVM_XHCI_BASE); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + x86ms->gsi[MICROVM_XHCI_IRQ]); + } + + if (x86_machine_is_acpi_enabled(x86ms) && mms->pcie == ON_OFF_AUTO_ON) { + /* use topmost 25% of the address space available */ + hwaddr phys_size = (hwaddr)1 << X86_CPU(first_cpu)->phys_bits; + if (phys_size > 0x1000000ll) { + mms->gpex.mmio64.size = phys_size / 4; + mms->gpex.mmio64.base = phys_size - mms->gpex.mmio64.size; + } + mms->gpex.mmio32.base = PCIE_MMIO_BASE; + mms->gpex.mmio32.size = PCIE_MMIO_SIZE; + mms->gpex.ecam.base = PCIE_ECAM_BASE; + mms->gpex.ecam.size = PCIE_ECAM_SIZE; + mms->gpex.irq = mms->pcie_irq_base; + create_gpex(mms); + x86ms->pci_irq_mask = ((1 << (mms->pcie_irq_base + 0)) | + (1 << (mms->pcie_irq_base + 1)) | + (1 << (mms->pcie_irq_base + 2)) | + (1 << (mms->pcie_irq_base + 3))); + } else { + x86ms->pci_irq_mask = 0; + } + + if (mms->pic == ON_OFF_AUTO_ON || mms->pic == ON_OFF_AUTO_AUTO) { + qemu_irq *i8259; + + i8259 = i8259_init(isa_bus, x86_allocate_cpu_irq()); + for (i = 0; i < ISA_NUM_IRQS; i++) { + gsi_state->i8259_irq[i] = i8259[i]; + } + g_free(i8259); + } + + if (mms->pit == ON_OFF_AUTO_ON || mms->pit == ON_OFF_AUTO_AUTO) { + if (kvm_pit_in_kernel()) { + kvm_pit_init(isa_bus, 0x40); + } else { + i8254_pit_init(isa_bus, 0x40, 0, NULL); + } + } + + if (mms->rtc == ON_OFF_AUTO_ON || + (mms->rtc == ON_OFF_AUTO_AUTO && !kvm_enabled())) { + rtc_state = mc146818_rtc_init(isa_bus, 2000, NULL); + microvm_set_rtc(mms, rtc_state); + } + + if (mms->isa_serial) { + serial_hds_isa_init(isa_bus, 0, 1); + } + + default_firmware = x86_machine_is_acpi_enabled(x86ms) + ? MICROVM_BIOS_FILENAME + : MICROVM_QBOOT_FILENAME; + x86_bios_rom_init(MACHINE(mms), default_firmware, get_system_memory(), true); +} + +static void microvm_memory_init(MicrovmMachineState *mms) +{ + MachineState *machine = MACHINE(mms); + X86MachineState *x86ms = X86_MACHINE(mms); + MemoryRegion *ram_below_4g, *ram_above_4g; + MemoryRegion *system_memory = get_system_memory(); + FWCfgState *fw_cfg; + ram_addr_t lowmem = 0xc0000000; /* 3G */ + int i; + + if (machine->ram_size > lowmem) { + x86ms->above_4g_mem_size = machine->ram_size - lowmem; + x86ms->below_4g_mem_size = lowmem; + } else { + x86ms->above_4g_mem_size = 0; + x86ms->below_4g_mem_size = machine->ram_size; + } + + ram_below_4g = g_malloc(sizeof(*ram_below_4g)); + memory_region_init_alias(ram_below_4g, NULL, "ram-below-4g", machine->ram, + 0, x86ms->below_4g_mem_size); + memory_region_add_subregion(system_memory, 0, ram_below_4g); + + e820_add_entry(0, x86ms->below_4g_mem_size, E820_RAM); + + if (x86ms->above_4g_mem_size > 0) { + ram_above_4g = g_malloc(sizeof(*ram_above_4g)); + memory_region_init_alias(ram_above_4g, NULL, "ram-above-4g", + machine->ram, + x86ms->below_4g_mem_size, + x86ms->above_4g_mem_size); + memory_region_add_subregion(system_memory, 0x100000000ULL, + ram_above_4g); + e820_add_entry(0x100000000ULL, x86ms->above_4g_mem_size, E820_RAM); + } + + fw_cfg = fw_cfg_init_io_dma(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4, + &address_space_memory); + + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, machine->smp.cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, machine->smp.max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)machine->ram_size); + fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, 1); + fw_cfg_add_bytes(fw_cfg, FW_CFG_E820_TABLE, + &e820_reserve, sizeof(e820_reserve)); + fw_cfg_add_file(fw_cfg, "etc/e820", e820_table, + sizeof(struct e820_entry) * e820_get_num_entries()); + + rom_set_fw(fw_cfg); + + if (machine->kernel_filename != NULL) { + x86_load_linux(x86ms, fw_cfg, 0, true); + } + + if (mms->option_roms) { + for (i = 0; i < nb_option_roms; i++) { + rom_add_option(option_rom[i].name, option_rom[i].bootindex); + } + } + + x86ms->fw_cfg = fw_cfg; + x86ms->ioapic_as = &address_space_memory; +} + +static gchar *microvm_get_mmio_cmdline(gchar *name, uint32_t virtio_irq_base) +{ + gchar *cmdline; + gchar *separator; + long int index; + int ret; + + separator = g_strrstr(name, "."); + if (!separator) { + return NULL; + } + + if (qemu_strtol(separator + 1, NULL, 10, &index) != 0) { + return NULL; + } + + cmdline = g_malloc0(VIRTIO_CMDLINE_MAXLEN); + ret = g_snprintf(cmdline, VIRTIO_CMDLINE_MAXLEN, + " virtio_mmio.device=512@0x%lx:%ld", + VIRTIO_MMIO_BASE + index * 512, + virtio_irq_base + index); + if (ret < 0 || ret >= VIRTIO_CMDLINE_MAXLEN) { + g_free(cmdline); + return NULL; + } + + return cmdline; +} + +static void microvm_fix_kernel_cmdline(MachineState *machine) +{ + X86MachineState *x86ms = X86_MACHINE(machine); + MicrovmMachineState *mms = MICROVM_MACHINE(machine); + BusState *bus; + BusChild *kid; + char *cmdline; + + /* + * Find MMIO transports with attached devices, and add them to the kernel + * command line. + * + * Yes, this is a hack, but one that heavily improves the UX without + * introducing any significant issues. + */ + cmdline = g_strdup(machine->kernel_cmdline); + bus = sysbus_get_default(); + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + ObjectClass *class = object_get_class(OBJECT(dev)); + + if (class == object_class_by_name(TYPE_VIRTIO_MMIO)) { + VirtIOMMIOProxy *mmio = VIRTIO_MMIO(OBJECT(dev)); + VirtioBusState *mmio_virtio_bus = &mmio->bus; + BusState *mmio_bus = &mmio_virtio_bus->parent_obj; + + if (!QTAILQ_EMPTY(&mmio_bus->children)) { + gchar *mmio_cmdline = microvm_get_mmio_cmdline + (mmio_bus->name, mms->virtio_irq_base); + if (mmio_cmdline) { + char *newcmd = g_strjoin(NULL, cmdline, mmio_cmdline, NULL); + g_free(mmio_cmdline); + g_free(cmdline); + cmdline = newcmd; + } + } + } + } + + fw_cfg_modify_i32(x86ms->fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(cmdline) + 1); + fw_cfg_modify_string(x86ms->fw_cfg, FW_CFG_CMDLINE_DATA, cmdline); + + g_free(cmdline); +} + +static void microvm_device_pre_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + X86CPU *cpu = X86_CPU(dev); + + cpu->host_phys_bits = true; /* need reliable phys-bits */ + x86_cpu_pre_plug(hotplug_dev, dev, errp); +} + +static void microvm_device_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + x86_cpu_plug(hotplug_dev, dev, errp); +} + +static void microvm_device_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + error_setg(errp, "unplug not supported by microvm"); +} + +static void microvm_device_unplug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + error_setg(errp, "unplug not supported by microvm"); +} + +static HotplugHandler *microvm_get_hotplug_handler(MachineState *machine, + DeviceState *dev) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + return HOTPLUG_HANDLER(machine); + } + return NULL; +} + +static void microvm_machine_state_init(MachineState *machine) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(machine); + X86MachineState *x86ms = X86_MACHINE(machine); + + microvm_memory_init(mms); + + x86_cpus_init(x86ms, CPU_VERSION_LATEST); + + microvm_devices_init(mms); +} + +static void microvm_machine_reset(MachineState *machine) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(machine); + CPUState *cs; + X86CPU *cpu; + + if (!x86_machine_is_acpi_enabled(X86_MACHINE(machine)) && + machine->kernel_filename != NULL && + mms->auto_kernel_cmdline && !mms->kernel_cmdline_fixed) { + microvm_fix_kernel_cmdline(machine); + mms->kernel_cmdline_fixed = true; + } + + qemu_devices_reset(); + + CPU_FOREACH(cs) { + cpu = X86_CPU(cs); + + if (cpu->apic_state) { + device_legacy_reset(cpu->apic_state); + } + } +} + +static void microvm_machine_get_pic(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + OnOffAuto pic = mms->pic; + + visit_type_OnOffAuto(v, name, &pic, errp); +} + +static void microvm_machine_set_pic(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &mms->pic, errp); +} + +static void microvm_machine_get_pit(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + OnOffAuto pit = mms->pit; + + visit_type_OnOffAuto(v, name, &pit, errp); +} + +static void microvm_machine_set_pit(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &mms->pit, errp); +} + +static void microvm_machine_get_rtc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + OnOffAuto rtc = mms->rtc; + + visit_type_OnOffAuto(v, name, &rtc, errp); +} + +static void microvm_machine_set_rtc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &mms->rtc, errp); +} + +static void microvm_machine_get_pcie(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + OnOffAuto pcie = mms->pcie; + + visit_type_OnOffAuto(v, name, &pcie, errp); +} + +static void microvm_machine_set_pcie(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &mms->pcie, errp); +} + +static void microvm_machine_get_ioapic2(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + OnOffAuto ioapic2 = mms->ioapic2; + + visit_type_OnOffAuto(v, name, &ioapic2, errp); +} + +static void microvm_machine_set_ioapic2(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &mms->ioapic2, errp); +} + +static bool microvm_machine_get_isa_serial(Object *obj, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + return mms->isa_serial; +} + +static void microvm_machine_set_isa_serial(Object *obj, bool value, + Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + mms->isa_serial = value; +} + +static bool microvm_machine_get_option_roms(Object *obj, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + return mms->option_roms; +} + +static void microvm_machine_set_option_roms(Object *obj, bool value, + Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + mms->option_roms = value; +} + +static bool microvm_machine_get_auto_kernel_cmdline(Object *obj, Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + return mms->auto_kernel_cmdline; +} + +static void microvm_machine_set_auto_kernel_cmdline(Object *obj, bool value, + Error **errp) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + mms->auto_kernel_cmdline = value; +} + +static void microvm_machine_done(Notifier *notifier, void *data) +{ + MicrovmMachineState *mms = container_of(notifier, MicrovmMachineState, + machine_done); + + acpi_setup_microvm(mms); + dt_setup_microvm(mms); +} + +static void microvm_powerdown_req(Notifier *notifier, void *data) +{ + MicrovmMachineState *mms = container_of(notifier, MicrovmMachineState, + powerdown_req); + X86MachineState *x86ms = X86_MACHINE(mms); + + if (x86ms->acpi_dev) { + Object *obj = OBJECT(x86ms->acpi_dev); + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj); + adevc->send_event(ACPI_DEVICE_IF(x86ms->acpi_dev), + ACPI_POWER_DOWN_STATUS); + } +} + +static void microvm_machine_initfn(Object *obj) +{ + MicrovmMachineState *mms = MICROVM_MACHINE(obj); + + /* Configuration */ + mms->pic = ON_OFF_AUTO_AUTO; + mms->pit = ON_OFF_AUTO_AUTO; + mms->rtc = ON_OFF_AUTO_AUTO; + mms->pcie = ON_OFF_AUTO_AUTO; + mms->ioapic2 = ON_OFF_AUTO_AUTO; + mms->isa_serial = true; + mms->option_roms = true; + mms->auto_kernel_cmdline = true; + + /* State */ + mms->kernel_cmdline_fixed = false; + + mms->machine_done.notify = microvm_machine_done; + qemu_add_machine_init_done_notifier(&mms->machine_done); + mms->powerdown_req.notify = microvm_powerdown_req; + qemu_register_powerdown_notifier(&mms->powerdown_req); +} + +static void microvm_class_init(ObjectClass *oc, void *data) +{ + X86MachineClass *x86mc = X86_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + + mc->init = microvm_machine_state_init; + + mc->family = "microvm_i386"; + mc->desc = "microvm (i386)"; + mc->units_per_default_bus = 1; + mc->no_floppy = 1; + mc->max_cpus = 288; + mc->has_hotpluggable_cpus = false; + mc->auto_enable_numa_with_memhp = false; + mc->auto_enable_numa_with_memdev = false; + mc->default_cpu_type = TARGET_DEFAULT_CPU_TYPE; + mc->nvdimm_supported = false; + mc->default_ram_id = "microvm.ram"; + + /* Avoid relying too much on kernel components */ + mc->default_kernel_irqchip_split = true; + + /* Machine class handlers */ + mc->reset = microvm_machine_reset; + + /* hotplug (for cpu coldplug) */ + mc->get_hotplug_handler = microvm_get_hotplug_handler; + hc->pre_plug = microvm_device_pre_plug_cb; + hc->plug = microvm_device_plug_cb; + hc->unplug_request = microvm_device_unplug_request_cb; + hc->unplug = microvm_device_unplug_cb; + + x86mc->fwcfg_dma_enabled = true; + + object_class_property_add(oc, MICROVM_MACHINE_PIC, "OnOffAuto", + microvm_machine_get_pic, + microvm_machine_set_pic, + NULL, NULL); + object_class_property_set_description(oc, MICROVM_MACHINE_PIC, + "Enable i8259 PIC"); + + object_class_property_add(oc, MICROVM_MACHINE_PIT, "OnOffAuto", + microvm_machine_get_pit, + microvm_machine_set_pit, + NULL, NULL); + object_class_property_set_description(oc, MICROVM_MACHINE_PIT, + "Enable i8254 PIT"); + + object_class_property_add(oc, MICROVM_MACHINE_RTC, "OnOffAuto", + microvm_machine_get_rtc, + microvm_machine_set_rtc, + NULL, NULL); + object_class_property_set_description(oc, MICROVM_MACHINE_RTC, + "Enable MC146818 RTC"); + + object_class_property_add(oc, MICROVM_MACHINE_PCIE, "OnOffAuto", + microvm_machine_get_pcie, + microvm_machine_set_pcie, + NULL, NULL); + object_class_property_set_description(oc, MICROVM_MACHINE_PCIE, + "Enable PCIe"); + + object_class_property_add(oc, MICROVM_MACHINE_IOAPIC2, "OnOffAuto", + microvm_machine_get_ioapic2, + microvm_machine_set_ioapic2, + NULL, NULL); + object_class_property_set_description(oc, MICROVM_MACHINE_IOAPIC2, + "Enable second IO-APIC"); + + object_class_property_add_bool(oc, MICROVM_MACHINE_ISA_SERIAL, + microvm_machine_get_isa_serial, + microvm_machine_set_isa_serial); + object_class_property_set_description(oc, MICROVM_MACHINE_ISA_SERIAL, + "Set off to disable the instantiation an ISA serial port"); + + object_class_property_add_bool(oc, MICROVM_MACHINE_OPTION_ROMS, + microvm_machine_get_option_roms, + microvm_machine_set_option_roms); + object_class_property_set_description(oc, MICROVM_MACHINE_OPTION_ROMS, + "Set off to disable loading option ROMs"); + + object_class_property_add_bool(oc, MICROVM_MACHINE_AUTO_KERNEL_CMDLINE, + microvm_machine_get_auto_kernel_cmdline, + microvm_machine_set_auto_kernel_cmdline); + object_class_property_set_description(oc, + MICROVM_MACHINE_AUTO_KERNEL_CMDLINE, + "Set off to disable adding virtio-mmio devices to the kernel cmdline"); + + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); +} + +static const TypeInfo microvm_machine_info = { + .name = TYPE_MICROVM_MACHINE, + .parent = TYPE_X86_MACHINE, + .instance_size = sizeof(MicrovmMachineState), + .instance_init = microvm_machine_initfn, + .class_size = sizeof(MicrovmMachineClass), + .class_init = microvm_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + }, +}; + +static void microvm_machine_init(void) +{ + type_register_static(µvm_machine_info); +} +type_init(microvm_machine_init); diff --git a/hw/i386/multiboot.c b/hw/i386/multiboot.c new file mode 100644 index 000000000..0a10089f1 --- /dev/null +++ b/hw/i386/multiboot.c @@ -0,0 +1,415 @@ +/* + * QEMU PC System Emulator + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/option.h" +#include "cpu.h" +#include "hw/nvram/fw_cfg.h" +#include "multiboot.h" +#include "hw/loader.h" +#include "elf.h" +#include "sysemu/sysemu.h" +#include "qemu/error-report.h" + +/* Show multiboot debug output */ +//#define DEBUG_MULTIBOOT + +#ifdef DEBUG_MULTIBOOT +#define mb_debug(a...) error_report(a) +#else +#define mb_debug(a...) +#endif + +#define MULTIBOOT_STRUCT_ADDR 0x9000 + +#if MULTIBOOT_STRUCT_ADDR > 0xf0000 +#error multiboot struct needs to fit in 16 bit real mode +#endif + +enum { + /* Multiboot info */ + MBI_FLAGS = 0, + MBI_MEM_LOWER = 4, + MBI_MEM_UPPER = 8, + MBI_BOOT_DEVICE = 12, + MBI_CMDLINE = 16, + MBI_MODS_COUNT = 20, + MBI_MODS_ADDR = 24, + MBI_MMAP_ADDR = 48, + MBI_BOOTLOADER = 64, + + MBI_SIZE = 88, + + /* Multiboot modules */ + MB_MOD_START = 0, + MB_MOD_END = 4, + MB_MOD_CMDLINE = 8, + + MB_MOD_SIZE = 16, + + /* Region offsets */ + ADDR_E820_MAP = MULTIBOOT_STRUCT_ADDR + 0, + ADDR_MBI = ADDR_E820_MAP + 0x500, + + /* Multiboot flags */ + MULTIBOOT_FLAGS_MEMORY = 1 << 0, + MULTIBOOT_FLAGS_BOOT_DEVICE = 1 << 1, + MULTIBOOT_FLAGS_CMDLINE = 1 << 2, + MULTIBOOT_FLAGS_MODULES = 1 << 3, + MULTIBOOT_FLAGS_MMAP = 1 << 6, + MULTIBOOT_FLAGS_BOOTLOADER = 1 << 9, +}; + +typedef struct { + /* buffer holding kernel, cmdlines and mb_infos */ + void *mb_buf; + /* address in target */ + hwaddr mb_buf_phys; + /* size of mb_buf in bytes */ + unsigned mb_buf_size; + /* offset of mb-info's in bytes */ + hwaddr offset_mbinfo; + /* offset in buffer for cmdlines in bytes */ + hwaddr offset_cmdlines; + /* offset in buffer for bootloader name in bytes */ + hwaddr offset_bootloader; + /* offset of modules in bytes */ + hwaddr offset_mods; + /* available slots for mb modules infos */ + int mb_mods_avail; + /* currently used slots of mb modules */ + int mb_mods_count; +} MultibootState; + +const char *bootloader_name = "qemu"; + +static uint32_t mb_add_cmdline(MultibootState *s, const char *cmdline) +{ + hwaddr p = s->offset_cmdlines; + char *b = (char *)s->mb_buf + p; + + memcpy(b, cmdline, strlen(cmdline) + 1); + s->offset_cmdlines += strlen(b) + 1; + return s->mb_buf_phys + p; +} + +static uint32_t mb_add_bootloader(MultibootState *s, const char *bootloader) +{ + hwaddr p = s->offset_bootloader; + char *b = (char *)s->mb_buf + p; + + memcpy(b, bootloader, strlen(bootloader) + 1); + s->offset_bootloader += strlen(b) + 1; + return s->mb_buf_phys + p; +} + +static void mb_add_mod(MultibootState *s, + hwaddr start, hwaddr end, + hwaddr cmdline_phys) +{ + char *p; + assert(s->mb_mods_count < s->mb_mods_avail); + + p = (char *)s->mb_buf + s->offset_mbinfo + MB_MOD_SIZE * s->mb_mods_count; + + stl_p(p + MB_MOD_START, start); + stl_p(p + MB_MOD_END, end); + stl_p(p + MB_MOD_CMDLINE, cmdline_phys); + + mb_debug("mod%02d: "TARGET_FMT_plx" - "TARGET_FMT_plx, + s->mb_mods_count, start, end); + + s->mb_mods_count++; +} + +int load_multiboot(X86MachineState *x86ms, + FWCfgState *fw_cfg, + FILE *f, + const char *kernel_filename, + const char *initrd_filename, + const char *kernel_cmdline, + int kernel_file_size, + uint8_t *header) +{ + bool multiboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled; + int i, is_multiboot = 0; + uint32_t flags = 0; + uint32_t mh_entry_addr; + uint32_t mh_load_addr; + uint32_t mb_kernel_size; + MultibootState mbs; + uint8_t bootinfo[MBI_SIZE]; + uint8_t *mb_bootinfo_data; + uint32_t cmdline_len; + GList *mods = NULL; + + /* Ok, let's see if it is a multiboot image. + The header is 12x32bit long, so the latest entry may be 8192 - 48. */ + for (i = 0; i < (8192 - 48); i += 4) { + if (ldl_p(header+i) == 0x1BADB002) { + uint32_t checksum = ldl_p(header+i+8); + flags = ldl_p(header+i+4); + checksum += flags; + checksum += (uint32_t)0x1BADB002; + if (!checksum) { + is_multiboot = 1; + break; + } + } + } + + if (!is_multiboot) + return 0; /* no multiboot */ + + mb_debug("I believe we found a multiboot image!"); + memset(bootinfo, 0, sizeof(bootinfo)); + memset(&mbs, 0, sizeof(mbs)); + + if (flags & 0x00000004) { /* MULTIBOOT_HEADER_HAS_VBE */ + error_report("multiboot knows VBE. we don't"); + } + if (!(flags & 0x00010000)) { /* MULTIBOOT_HEADER_HAS_ADDR */ + uint64_t elf_entry; + uint64_t elf_low, elf_high; + int kernel_size; + fclose(f); + + if (((struct elf64_hdr*)header)->e_machine == EM_X86_64) { + error_report("Cannot load x86-64 image, give a 32bit one."); + exit(1); + } + + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, + &elf_low, &elf_high, NULL, 0, I386_ELF_MACHINE, + 0, 0); + if (kernel_size < 0) { + error_report("Error while loading elf kernel"); + exit(1); + } + mh_load_addr = elf_low; + mb_kernel_size = elf_high - elf_low; + mh_entry_addr = elf_entry; + + mbs.mb_buf = g_malloc(mb_kernel_size); + if (rom_copy(mbs.mb_buf, mh_load_addr, mb_kernel_size) != mb_kernel_size) { + error_report("Error while fetching elf kernel from rom"); + exit(1); + } + + mb_debug("loading multiboot-elf kernel " + "(%#x bytes) with entry %#zx", + mb_kernel_size, (size_t)mh_entry_addr); + } else { + /* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_ADDR. */ + uint32_t mh_header_addr = ldl_p(header+i+12); + uint32_t mh_load_end_addr = ldl_p(header+i+20); + uint32_t mh_bss_end_addr = ldl_p(header+i+24); + + mh_load_addr = ldl_p(header+i+16); + if (mh_header_addr < mh_load_addr) { + error_report("invalid load_addr address"); + exit(1); + } + if (mh_header_addr - mh_load_addr > i) { + error_report("invalid header_addr address"); + exit(1); + } + + uint32_t mb_kernel_text_offset = i - (mh_header_addr - mh_load_addr); + uint32_t mb_load_size = 0; + mh_entry_addr = ldl_p(header+i+28); + + if (mh_load_end_addr) { + if (mh_load_end_addr < mh_load_addr) { + error_report("invalid load_end_addr address"); + exit(1); + } + mb_load_size = mh_load_end_addr - mh_load_addr; + } else { + if (kernel_file_size < mb_kernel_text_offset) { + error_report("invalid kernel_file_size"); + exit(1); + } + mb_load_size = kernel_file_size - mb_kernel_text_offset; + } + if (mb_load_size > UINT32_MAX - mh_load_addr) { + error_report("kernel does not fit in address space"); + exit(1); + } + if (mh_bss_end_addr) { + if (mh_bss_end_addr < (mh_load_addr + mb_load_size)) { + error_report("invalid bss_end_addr address"); + exit(1); + } + mb_kernel_size = mh_bss_end_addr - mh_load_addr; + } else { + mb_kernel_size = mb_load_size; + } + + mb_debug("multiboot: header_addr = %#x", mh_header_addr); + mb_debug("multiboot: load_addr = %#x", mh_load_addr); + mb_debug("multiboot: load_end_addr = %#x", mh_load_end_addr); + mb_debug("multiboot: bss_end_addr = %#x", mh_bss_end_addr); + mb_debug("loading multiboot kernel (%#x bytes) at %#x", + mb_load_size, mh_load_addr); + + mbs.mb_buf = g_malloc(mb_kernel_size); + fseek(f, mb_kernel_text_offset, SEEK_SET); + if (fread(mbs.mb_buf, 1, mb_load_size, f) != mb_load_size) { + error_report("fread() failed"); + exit(1); + } + memset(mbs.mb_buf + mb_load_size, 0, mb_kernel_size - mb_load_size); + fclose(f); + } + + mbs.mb_buf_phys = mh_load_addr; + + mbs.mb_buf_size = TARGET_PAGE_ALIGN(mb_kernel_size); + mbs.offset_mbinfo = mbs.mb_buf_size; + + /* Calculate space for cmdlines, bootloader name, and mb_mods */ + cmdline_len = strlen(kernel_filename) + 1; + cmdline_len += strlen(kernel_cmdline) + 1; + if (initrd_filename) { + const char *r = initrd_filename; + cmdline_len += strlen(initrd_filename) + 1; + while (*r) { + char *value; + r = get_opt_value(r, &value); + mbs.mb_mods_avail++; + mods = g_list_append(mods, value); + if (*r) { + r++; + } + } + } + + mbs.mb_buf_size += cmdline_len; + mbs.mb_buf_size += MB_MOD_SIZE * mbs.mb_mods_avail; + mbs.mb_buf_size += strlen(bootloader_name) + 1; + + mbs.mb_buf_size = TARGET_PAGE_ALIGN(mbs.mb_buf_size); + + /* enlarge mb_buf to hold cmdlines, bootloader, mb-info structs */ + mbs.mb_buf = g_realloc(mbs.mb_buf, mbs.mb_buf_size); + mbs.offset_cmdlines = mbs.offset_mbinfo + mbs.mb_mods_avail * MB_MOD_SIZE; + mbs.offset_bootloader = mbs.offset_cmdlines + cmdline_len; + + if (mods) { + GList *tmpl = mods; + mbs.offset_mods = mbs.mb_buf_size; + + while (tmpl) { + char *next_space; + int mb_mod_length; + uint32_t offs = mbs.mb_buf_size; + char *one_file = tmpl->data; + + /* if a space comes after the module filename, treat everything + after that as parameters */ + hwaddr c = mb_add_cmdline(&mbs, one_file); + next_space = strchr(one_file, ' '); + if (next_space) { + *next_space = '\0'; + } + mb_debug("multiboot loading module: %s", one_file); + mb_mod_length = get_image_size(one_file); + if (mb_mod_length < 0) { + error_report("Failed to open file '%s'", one_file); + exit(1); + } + + mbs.mb_buf_size = TARGET_PAGE_ALIGN(mb_mod_length + mbs.mb_buf_size); + mbs.mb_buf = g_realloc(mbs.mb_buf, mbs.mb_buf_size); + + if (load_image_size(one_file, (unsigned char *)mbs.mb_buf + offs, + mbs.mb_buf_size - offs) < 0) { + error_report("Error loading file '%s'", one_file); + exit(1); + } + mb_add_mod(&mbs, mbs.mb_buf_phys + offs, + mbs.mb_buf_phys + offs + mb_mod_length, c); + + mb_debug("mod_start: %p\nmod_end: %p\n cmdline: "TARGET_FMT_plx, + (char *)mbs.mb_buf + offs, + (char *)mbs.mb_buf + offs + mb_mod_length, c); + g_free(one_file); + tmpl = tmpl->next; + } + g_list_free(mods); + } + + /* Commandline support */ + char kcmdline[strlen(kernel_filename) + strlen(kernel_cmdline) + 2]; + snprintf(kcmdline, sizeof(kcmdline), "%s %s", + kernel_filename, kernel_cmdline); + stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline)); + + stl_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs, bootloader_name)); + + stl_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo); + stl_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */ + + /* the kernel is where we want it to be now */ + stl_p(bootinfo + MBI_FLAGS, MULTIBOOT_FLAGS_MEMORY + | MULTIBOOT_FLAGS_BOOT_DEVICE + | MULTIBOOT_FLAGS_CMDLINE + | MULTIBOOT_FLAGS_MODULES + | MULTIBOOT_FLAGS_MMAP + | MULTIBOOT_FLAGS_BOOTLOADER); + stl_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */ + stl_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP); + + mb_debug("multiboot: entry_addr = %#x", mh_entry_addr); + mb_debug(" mb_buf_phys = "TARGET_FMT_plx, mbs.mb_buf_phys); + mb_debug(" mod_start = "TARGET_FMT_plx, + mbs.mb_buf_phys + mbs.offset_mods); + mb_debug(" mb_mods_count = %d", mbs.mb_mods_count); + + /* save bootinfo off the stack */ + mb_bootinfo_data = g_memdup(bootinfo, sizeof(bootinfo)); + + /* Pass variables to option rom */ + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ENTRY, mh_entry_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, mh_load_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, mbs.mb_buf_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, + mbs.mb_buf, mbs.mb_buf_size); + + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, ADDR_MBI); + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, sizeof(bootinfo)); + fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, mb_bootinfo_data, + sizeof(bootinfo)); + + if (multiboot_dma_enabled) { + option_rom[nb_option_roms].name = "multiboot_dma.bin"; + } else { + option_rom[nb_option_roms].name = "multiboot.bin"; + } + option_rom[nb_option_roms].bootindex = 0; + nb_option_roms++; + + return 1; /* yes, we are multiboot */ +} diff --git a/hw/i386/multiboot.h b/hw/i386/multiboot.h new file mode 100644 index 000000000..2b9182a8e --- /dev/null +++ b/hw/i386/multiboot.h @@ -0,0 +1,16 @@ +#ifndef QEMU_MULTIBOOT_H +#define QEMU_MULTIBOOT_H + +#include "hw/nvram/fw_cfg.h" +#include "hw/i386/x86.h" + +int load_multiboot(X86MachineState *x86ms, + FWCfgState *fw_cfg, + FILE *f, + const char *kernel_filename, + const char *initrd_filename, + const char *kernel_cmdline, + int kernel_file_size, + uint8_t *header); + +#endif diff --git a/hw/i386/pc.c b/hw/i386/pc.c new file mode 100644 index 000000000..a2ef40ecb --- /dev/null +++ b/hw/i386/pc.c @@ -0,0 +1,1777 @@ +/* + * QEMU PC System Emulator + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/i386/x86.h" +#include "hw/i386/pc.h" +#include "hw/char/serial.h" +#include "hw/char/parallel.h" +#include "hw/i386/apic.h" +#include "hw/i386/topology.h" +#include "hw/i386/fw_cfg.h" +#include "hw/i386/vmport.h" +#include "sysemu/cpus.h" +#include "hw/block/fdc.h" +#include "hw/ide.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/timer/hpet.h" +#include "hw/firmware/smbios.h" +#include "hw/loader.h" +#include "elf.h" +#include "migration/vmstate.h" +#include "multiboot.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/intc/i8259.h" +#include "hw/dma/i8257.h" +#include "hw/timer/i8254.h" +#include "hw/input/i8042.h" +#include "hw/irq.h" +#include "hw/audio/pcspk.h" +#include "hw/pci/msi.h" +#include "hw/sysbus.h" +#include "sysemu/sysemu.h" +#include "sysemu/tcg.h" +#include "sysemu/numa.h" +#include "sysemu/kvm.h" +#include "sysemu/xen.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "kvm/kvm_i386.h" +#include "hw/xen/xen.h" +#include "hw/xen/start_info.h" +#include "ui/qemu-spice.h" +#include "exec/memory.h" +#include "qemu/bitmap.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "qemu/cutils.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/cpu_hotplug.h" +#include "acpi-build.h" +#include "hw/mem/pc-dimm.h" +#include "hw/mem/nvdimm.h" +#include "qapi/error.h" +#include "qapi/qapi-visit-common.h" +#include "qapi/visitor.h" +#include "hw/core/cpu.h" +#include "hw/usb.h" +#include "hw/i386/intel_iommu.h" +#include "hw/net/ne2000-isa.h" +#include "standard-headers/asm-x86/bootparam.h" +#include "hw/virtio/virtio-iommu.h" +#include "hw/virtio/virtio-pmem-pci.h" +#include "hw/virtio/virtio-mem-pci.h" +#include "hw/mem/memory-device.h" +#include "sysemu/replay.h" +#include "qapi/qmp/qerror.h" +#include "e820_memory_layout.h" +#include "fw_cfg.h" +#include "trace.h" +#include CONFIG_DEVICES + +GlobalProperty pc_compat_6_1[] = { + { TYPE_X86_CPU, "hv-version-id-build", "0x1bbc" }, + { TYPE_X86_CPU, "hv-version-id-major", "0x0006" }, + { TYPE_X86_CPU, "hv-version-id-minor", "0x0001" }, + { "ICH9-LPC", "x-keep-pci-slot-hpc", "false" }, +}; +const size_t pc_compat_6_1_len = G_N_ELEMENTS(pc_compat_6_1); + +GlobalProperty pc_compat_6_0[] = { + { "qemu64" "-" TYPE_X86_CPU, "family", "6" }, + { "qemu64" "-" TYPE_X86_CPU, "model", "6" }, + { "qemu64" "-" TYPE_X86_CPU, "stepping", "3" }, + { TYPE_X86_CPU, "x-vendor-cpuid-only", "off" }, + { "ICH9-LPC", ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, "off" }, + { "ICH9-LPC", "x-keep-pci-slot-hpc", "true" }, +}; +const size_t pc_compat_6_0_len = G_N_ELEMENTS(pc_compat_6_0); + +GlobalProperty pc_compat_5_2[] = { + { "ICH9-LPC", "x-smi-cpu-hotunplug", "off" }, +}; +const size_t pc_compat_5_2_len = G_N_ELEMENTS(pc_compat_5_2); + +GlobalProperty pc_compat_5_1[] = { + { "ICH9-LPC", "x-smi-cpu-hotplug", "off" }, + { TYPE_X86_CPU, "kvm-msi-ext-dest-id", "off" }, +}; +const size_t pc_compat_5_1_len = G_N_ELEMENTS(pc_compat_5_1); + +GlobalProperty pc_compat_5_0[] = { +}; +const size_t pc_compat_5_0_len = G_N_ELEMENTS(pc_compat_5_0); + +GlobalProperty pc_compat_4_2[] = { + { "mch", "smbase-smram", "off" }, +}; +const size_t pc_compat_4_2_len = G_N_ELEMENTS(pc_compat_4_2); + +GlobalProperty pc_compat_4_1[] = {}; +const size_t pc_compat_4_1_len = G_N_ELEMENTS(pc_compat_4_1); + +GlobalProperty pc_compat_4_0[] = {}; +const size_t pc_compat_4_0_len = G_N_ELEMENTS(pc_compat_4_0); + +GlobalProperty pc_compat_3_1[] = { + { "intel-iommu", "dma-drain", "off" }, + { "Opteron_G3" "-" TYPE_X86_CPU, "rdtscp", "off" }, + { "Opteron_G4" "-" TYPE_X86_CPU, "rdtscp", "off" }, + { "Opteron_G4" "-" TYPE_X86_CPU, "npt", "off" }, + { "Opteron_G4" "-" TYPE_X86_CPU, "nrip-save", "off" }, + { "Opteron_G5" "-" TYPE_X86_CPU, "rdtscp", "off" }, + { "Opteron_G5" "-" TYPE_X86_CPU, "npt", "off" }, + { "Opteron_G5" "-" TYPE_X86_CPU, "nrip-save", "off" }, + { "EPYC" "-" TYPE_X86_CPU, "npt", "off" }, + { "EPYC" "-" TYPE_X86_CPU, "nrip-save", "off" }, + { "EPYC-IBPB" "-" TYPE_X86_CPU, "npt", "off" }, + { "EPYC-IBPB" "-" TYPE_X86_CPU, "nrip-save", "off" }, + { "Skylake-Client" "-" TYPE_X86_CPU, "mpx", "on" }, + { "Skylake-Client-IBRS" "-" TYPE_X86_CPU, "mpx", "on" }, + { "Skylake-Server" "-" TYPE_X86_CPU, "mpx", "on" }, + { "Skylake-Server-IBRS" "-" TYPE_X86_CPU, "mpx", "on" }, + { "Cascadelake-Server" "-" TYPE_X86_CPU, "mpx", "on" }, + { "Icelake-Client" "-" TYPE_X86_CPU, "mpx", "on" }, + { "Icelake-Server" "-" TYPE_X86_CPU, "mpx", "on" }, + { "Cascadelake-Server" "-" TYPE_X86_CPU, "stepping", "5" }, + { TYPE_X86_CPU, "x-intel-pt-auto-level", "off" }, +}; +const size_t pc_compat_3_1_len = G_N_ELEMENTS(pc_compat_3_1); + +GlobalProperty pc_compat_3_0[] = { + { TYPE_X86_CPU, "x-hv-synic-kvm-only", "on" }, + { "Skylake-Server" "-" TYPE_X86_CPU, "pku", "off" }, + { "Skylake-Server-IBRS" "-" TYPE_X86_CPU, "pku", "off" }, +}; +const size_t pc_compat_3_0_len = G_N_ELEMENTS(pc_compat_3_0); + +GlobalProperty pc_compat_2_12[] = { + { TYPE_X86_CPU, "legacy-cache", "on" }, + { TYPE_X86_CPU, "topoext", "off" }, + { "EPYC-" TYPE_X86_CPU, "xlevel", "0x8000000a" }, + { "EPYC-IBPB-" TYPE_X86_CPU, "xlevel", "0x8000000a" }, +}; +const size_t pc_compat_2_12_len = G_N_ELEMENTS(pc_compat_2_12); + +GlobalProperty pc_compat_2_11[] = { + { TYPE_X86_CPU, "x-migrate-smi-count", "off" }, + { "Skylake-Server" "-" TYPE_X86_CPU, "clflushopt", "off" }, +}; +const size_t pc_compat_2_11_len = G_N_ELEMENTS(pc_compat_2_11); + +GlobalProperty pc_compat_2_10[] = { + { TYPE_X86_CPU, "x-hv-max-vps", "0x40" }, + { "i440FX-pcihost", "x-pci-hole64-fix", "off" }, + { "q35-pcihost", "x-pci-hole64-fix", "off" }, +}; +const size_t pc_compat_2_10_len = G_N_ELEMENTS(pc_compat_2_10); + +GlobalProperty pc_compat_2_9[] = { + { "mch", "extended-tseg-mbytes", "0" }, +}; +const size_t pc_compat_2_9_len = G_N_ELEMENTS(pc_compat_2_9); + +GlobalProperty pc_compat_2_8[] = { + { TYPE_X86_CPU, "tcg-cpuid", "off" }, + { "kvmclock", "x-mach-use-reliable-get-clock", "off" }, + { "ICH9-LPC", "x-smi-broadcast", "off" }, + { TYPE_X86_CPU, "vmware-cpuid-freq", "off" }, + { "Haswell-" TYPE_X86_CPU, "stepping", "1" }, +}; +const size_t pc_compat_2_8_len = G_N_ELEMENTS(pc_compat_2_8); + +GlobalProperty pc_compat_2_7[] = { + { TYPE_X86_CPU, "l3-cache", "off" }, + { TYPE_X86_CPU, "full-cpuid-auto-level", "off" }, + { "Opteron_G3" "-" TYPE_X86_CPU, "family", "15" }, + { "Opteron_G3" "-" TYPE_X86_CPU, "model", "6" }, + { "Opteron_G3" "-" TYPE_X86_CPU, "stepping", "1" }, + { "isa-pcspk", "migrate", "off" }, +}; +const size_t pc_compat_2_7_len = G_N_ELEMENTS(pc_compat_2_7); + +GlobalProperty pc_compat_2_6[] = { + { TYPE_X86_CPU, "cpuid-0xb", "off" }, + { "vmxnet3", "romfile", "" }, + { TYPE_X86_CPU, "fill-mtrr-mask", "off" }, + { "apic-common", "legacy-instance-id", "on", } +}; +const size_t pc_compat_2_6_len = G_N_ELEMENTS(pc_compat_2_6); + +GlobalProperty pc_compat_2_5[] = {}; +const size_t pc_compat_2_5_len = G_N_ELEMENTS(pc_compat_2_5); + +GlobalProperty pc_compat_2_4[] = { + PC_CPU_MODEL_IDS("2.4.0") + { "Haswell-" TYPE_X86_CPU, "abm", "off" }, + { "Haswell-noTSX-" TYPE_X86_CPU, "abm", "off" }, + { "Broadwell-" TYPE_X86_CPU, "abm", "off" }, + { "Broadwell-noTSX-" TYPE_X86_CPU, "abm", "off" }, + { "host" "-" TYPE_X86_CPU, "host-cache-info", "on" }, + { TYPE_X86_CPU, "check", "off" }, + { "qemu64" "-" TYPE_X86_CPU, "sse4a", "on" }, + { "qemu64" "-" TYPE_X86_CPU, "abm", "on" }, + { "qemu64" "-" TYPE_X86_CPU, "popcnt", "on" }, + { "qemu32" "-" TYPE_X86_CPU, "popcnt", "on" }, + { "Opteron_G2" "-" TYPE_X86_CPU, "rdtscp", "on" }, + { "Opteron_G3" "-" TYPE_X86_CPU, "rdtscp", "on" }, + { "Opteron_G4" "-" TYPE_X86_CPU, "rdtscp", "on" }, + { "Opteron_G5" "-" TYPE_X86_CPU, "rdtscp", "on", } +}; +const size_t pc_compat_2_4_len = G_N_ELEMENTS(pc_compat_2_4); + +GlobalProperty pc_compat_2_3[] = { + PC_CPU_MODEL_IDS("2.3.0") + { TYPE_X86_CPU, "arat", "off" }, + { "qemu64" "-" TYPE_X86_CPU, "min-level", "4" }, + { "kvm64" "-" TYPE_X86_CPU, "min-level", "5" }, + { "pentium3" "-" TYPE_X86_CPU, "min-level", "2" }, + { "n270" "-" TYPE_X86_CPU, "min-level", "5" }, + { "Conroe" "-" TYPE_X86_CPU, "min-level", "4" }, + { "Penryn" "-" TYPE_X86_CPU, "min-level", "4" }, + { "Nehalem" "-" TYPE_X86_CPU, "min-level", "4" }, + { "n270" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { "Penryn" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { "Conroe" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { "Nehalem" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { "Westmere" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { "SandyBridge" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { "IvyBridge" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { "Haswell" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { "Haswell-noTSX" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { "Broadwell" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { "Broadwell-noTSX" "-" TYPE_X86_CPU, "min-xlevel", "0x8000000a" }, + { TYPE_X86_CPU, "kvm-no-smi-migration", "on" }, +}; +const size_t pc_compat_2_3_len = G_N_ELEMENTS(pc_compat_2_3); + +GlobalProperty pc_compat_2_2[] = { + PC_CPU_MODEL_IDS("2.2.0") + { "kvm64" "-" TYPE_X86_CPU, "vme", "off" }, + { "kvm32" "-" TYPE_X86_CPU, "vme", "off" }, + { "Conroe" "-" TYPE_X86_CPU, "vme", "off" }, + { "Penryn" "-" TYPE_X86_CPU, "vme", "off" }, + { "Nehalem" "-" TYPE_X86_CPU, "vme", "off" }, + { "Westmere" "-" TYPE_X86_CPU, "vme", "off" }, + { "SandyBridge" "-" TYPE_X86_CPU, "vme", "off" }, + { "Haswell" "-" TYPE_X86_CPU, "vme", "off" }, + { "Broadwell" "-" TYPE_X86_CPU, "vme", "off" }, + { "Opteron_G1" "-" TYPE_X86_CPU, "vme", "off" }, + { "Opteron_G2" "-" TYPE_X86_CPU, "vme", "off" }, + { "Opteron_G3" "-" TYPE_X86_CPU, "vme", "off" }, + { "Opteron_G4" "-" TYPE_X86_CPU, "vme", "off" }, + { "Opteron_G5" "-" TYPE_X86_CPU, "vme", "off" }, + { "Haswell" "-" TYPE_X86_CPU, "f16c", "off" }, + { "Haswell" "-" TYPE_X86_CPU, "rdrand", "off" }, + { "Broadwell" "-" TYPE_X86_CPU, "f16c", "off" }, + { "Broadwell" "-" TYPE_X86_CPU, "rdrand", "off" }, +}; +const size_t pc_compat_2_2_len = G_N_ELEMENTS(pc_compat_2_2); + +GlobalProperty pc_compat_2_1[] = { + PC_CPU_MODEL_IDS("2.1.0") + { "coreduo" "-" TYPE_X86_CPU, "vmx", "on" }, + { "core2duo" "-" TYPE_X86_CPU, "vmx", "on" }, +}; +const size_t pc_compat_2_1_len = G_N_ELEMENTS(pc_compat_2_1); + +GlobalProperty pc_compat_2_0[] = { + PC_CPU_MODEL_IDS("2.0.0") + { "virtio-scsi-pci", "any_layout", "off" }, + { "PIIX4_PM", "memory-hotplug-support", "off" }, + { "apic", "version", "0x11" }, + { "nec-usb-xhci", "superspeed-ports-first", "off" }, + { "nec-usb-xhci", "force-pcie-endcap", "on" }, + { "pci-serial", "prog_if", "0" }, + { "pci-serial-2x", "prog_if", "0" }, + { "pci-serial-4x", "prog_if", "0" }, + { "virtio-net-pci", "guest_announce", "off" }, + { "ICH9-LPC", "memory-hotplug-support", "off" }, + { "xio3130-downstream", COMPAT_PROP_PCP, "off" }, + { "ioh3420", COMPAT_PROP_PCP, "off" }, +}; +const size_t pc_compat_2_0_len = G_N_ELEMENTS(pc_compat_2_0); + +GlobalProperty pc_compat_1_7[] = { + PC_CPU_MODEL_IDS("1.7.0") + { TYPE_USB_DEVICE, "msos-desc", "no" }, + { "PIIX4_PM", ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, "off" }, + { "hpet", HPET_INTCAP, "4" }, +}; +const size_t pc_compat_1_7_len = G_N_ELEMENTS(pc_compat_1_7); + +GlobalProperty pc_compat_1_6[] = { + PC_CPU_MODEL_IDS("1.6.0") + { "e1000", "mitigation", "off" }, + { "qemu64-" TYPE_X86_CPU, "model", "2" }, + { "qemu32-" TYPE_X86_CPU, "model", "3" }, + { "i440FX-pcihost", "short_root_bus", "1" }, + { "q35-pcihost", "short_root_bus", "1" }, +}; +const size_t pc_compat_1_6_len = G_N_ELEMENTS(pc_compat_1_6); + +GlobalProperty pc_compat_1_5[] = { + PC_CPU_MODEL_IDS("1.5.0") + { "Conroe-" TYPE_X86_CPU, "model", "2" }, + { "Conroe-" TYPE_X86_CPU, "min-level", "2" }, + { "Penryn-" TYPE_X86_CPU, "model", "2" }, + { "Penryn-" TYPE_X86_CPU, "min-level", "2" }, + { "Nehalem-" TYPE_X86_CPU, "model", "2" }, + { "Nehalem-" TYPE_X86_CPU, "min-level", "2" }, + { "virtio-net-pci", "any_layout", "off" }, + { TYPE_X86_CPU, "pmu", "on" }, + { "i440FX-pcihost", "short_root_bus", "0" }, + { "q35-pcihost", "short_root_bus", "0" }, +}; +const size_t pc_compat_1_5_len = G_N_ELEMENTS(pc_compat_1_5); + +GlobalProperty pc_compat_1_4[] = { + PC_CPU_MODEL_IDS("1.4.0") + { "scsi-hd", "discard_granularity", "0" }, + { "scsi-cd", "discard_granularity", "0" }, + { "ide-hd", "discard_granularity", "0" }, + { "ide-cd", "discard_granularity", "0" }, + { "virtio-blk-pci", "discard_granularity", "0" }, + /* DEV_NVECTORS_UNSPECIFIED as a uint32_t string: */ + { "virtio-serial-pci", "vectors", "0xFFFFFFFF" }, + { "virtio-net-pci", "ctrl_guest_offloads", "off" }, + { "e1000", "romfile", "pxe-e1000.rom" }, + { "ne2k_pci", "romfile", "pxe-ne2k_pci.rom" }, + { "pcnet", "romfile", "pxe-pcnet.rom" }, + { "rtl8139", "romfile", "pxe-rtl8139.rom" }, + { "virtio-net-pci", "romfile", "pxe-virtio.rom" }, + { "486-" TYPE_X86_CPU, "model", "0" }, + { "n270" "-" TYPE_X86_CPU, "movbe", "off" }, + { "Westmere" "-" TYPE_X86_CPU, "pclmulqdq", "off" }, +}; +const size_t pc_compat_1_4_len = G_N_ELEMENTS(pc_compat_1_4); + +GSIState *pc_gsi_create(qemu_irq **irqs, bool pci_enabled) +{ + GSIState *s; + + s = g_new0(GSIState, 1); + if (kvm_ioapic_in_kernel()) { + kvm_pc_setup_irq_routing(pci_enabled); + } + *irqs = qemu_allocate_irqs(gsi_handler, s, GSI_NUM_PINS); + + return s; +} + +static void ioport80_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ +} + +static uint64_t ioport80_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0xffffffffffffffffULL; +} + +/* MSDOS compatibility mode FPU exception support */ +static void ioportF0_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + if (tcg_enabled()) { + cpu_set_ignne(); + } +} + +static uint64_t ioportF0_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0xffffffffffffffffULL; +} + +/* PC cmos mappings */ + +#define REG_EQUIPMENT_BYTE 0x14 + +static void cmos_init_hd(ISADevice *s, int type_ofs, int info_ofs, + int16_t cylinders, int8_t heads, int8_t sectors) +{ + rtc_set_memory(s, type_ofs, 47); + rtc_set_memory(s, info_ofs, cylinders); + rtc_set_memory(s, info_ofs + 1, cylinders >> 8); + rtc_set_memory(s, info_ofs + 2, heads); + rtc_set_memory(s, info_ofs + 3, 0xff); + rtc_set_memory(s, info_ofs + 4, 0xff); + rtc_set_memory(s, info_ofs + 5, 0xc0 | ((heads > 8) << 3)); + rtc_set_memory(s, info_ofs + 6, cylinders); + rtc_set_memory(s, info_ofs + 7, cylinders >> 8); + rtc_set_memory(s, info_ofs + 8, sectors); +} + +/* convert boot_device letter to something recognizable by the bios */ +static int boot_device2nibble(char boot_device) +{ + switch(boot_device) { + case 'a': + case 'b': + return 0x01; /* floppy boot */ + case 'c': + return 0x02; /* hard drive boot */ + case 'd': + return 0x03; /* CD-ROM boot */ + case 'n': + return 0x04; /* Network boot */ + } + return 0; +} + +static void set_boot_dev(ISADevice *s, const char *boot_device, Error **errp) +{ +#define PC_MAX_BOOT_DEVICES 3 + int nbds, bds[3] = { 0, }; + int i; + + nbds = strlen(boot_device); + if (nbds > PC_MAX_BOOT_DEVICES) { + error_setg(errp, "Too many boot devices for PC"); + return; + } + for (i = 0; i < nbds; i++) { + bds[i] = boot_device2nibble(boot_device[i]); + if (bds[i] == 0) { + error_setg(errp, "Invalid boot device for PC: '%c'", + boot_device[i]); + return; + } + } + rtc_set_memory(s, 0x3d, (bds[1] << 4) | bds[0]); + rtc_set_memory(s, 0x38, (bds[2] << 4) | (fd_bootchk ? 0x0 : 0x1)); +} + +static void pc_boot_set(void *opaque, const char *boot_device, Error **errp) +{ + set_boot_dev(opaque, boot_device, errp); +} + +static void pc_cmos_init_floppy(ISADevice *rtc_state, ISADevice *floppy) +{ + int val, nb, i; + FloppyDriveType fd_type[2] = { FLOPPY_DRIVE_TYPE_NONE, + FLOPPY_DRIVE_TYPE_NONE }; + + /* floppy type */ + if (floppy) { + for (i = 0; i < 2; i++) { + fd_type[i] = isa_fdc_get_drive_type(floppy, i); + } + } + val = (cmos_get_fd_drive_type(fd_type[0]) << 4) | + cmos_get_fd_drive_type(fd_type[1]); + rtc_set_memory(rtc_state, 0x10, val); + + val = rtc_get_memory(rtc_state, REG_EQUIPMENT_BYTE); + nb = 0; + if (fd_type[0] != FLOPPY_DRIVE_TYPE_NONE) { + nb++; + } + if (fd_type[1] != FLOPPY_DRIVE_TYPE_NONE) { + nb++; + } + switch (nb) { + case 0: + break; + case 1: + val |= 0x01; /* 1 drive, ready for boot */ + break; + case 2: + val |= 0x41; /* 2 drives, ready for boot */ + break; + } + rtc_set_memory(rtc_state, REG_EQUIPMENT_BYTE, val); +} + +typedef struct pc_cmos_init_late_arg { + ISADevice *rtc_state; + BusState *idebus[2]; +} pc_cmos_init_late_arg; + +typedef struct check_fdc_state { + ISADevice *floppy; + bool multiple; +} CheckFdcState; + +static int check_fdc(Object *obj, void *opaque) +{ + CheckFdcState *state = opaque; + Object *fdc; + uint32_t iobase; + Error *local_err = NULL; + + fdc = object_dynamic_cast(obj, TYPE_ISA_FDC); + if (!fdc) { + return 0; + } + + iobase = object_property_get_uint(obj, "iobase", &local_err); + if (local_err || iobase != 0x3f0) { + error_free(local_err); + return 0; + } + + if (state->floppy) { + state->multiple = true; + } else { + state->floppy = ISA_DEVICE(obj); + } + return 0; +} + +static const char * const fdc_container_path[] = { + "/unattached", "/peripheral", "/peripheral-anon" +}; + +/* + * Locate the FDC at IO address 0x3f0, in order to configure the CMOS registers + * and ACPI objects. + */ +ISADevice *pc_find_fdc0(void) +{ + int i; + Object *container; + CheckFdcState state = { 0 }; + + for (i = 0; i < ARRAY_SIZE(fdc_container_path); i++) { + container = container_get(qdev_get_machine(), fdc_container_path[i]); + object_child_foreach(container, check_fdc, &state); + } + + if (state.multiple) { + warn_report("multiple floppy disk controllers with " + "iobase=0x3f0 have been found"); + error_printf("the one being picked for CMOS setup might not reflect " + "your intent"); + } + + return state.floppy; +} + +static void pc_cmos_init_late(void *opaque) +{ + pc_cmos_init_late_arg *arg = opaque; + ISADevice *s = arg->rtc_state; + int16_t cylinders; + int8_t heads, sectors; + int val; + int i, trans; + + val = 0; + if (arg->idebus[0] && ide_get_geometry(arg->idebus[0], 0, + &cylinders, &heads, §ors) >= 0) { + cmos_init_hd(s, 0x19, 0x1b, cylinders, heads, sectors); + val |= 0xf0; + } + if (arg->idebus[0] && ide_get_geometry(arg->idebus[0], 1, + &cylinders, &heads, §ors) >= 0) { + cmos_init_hd(s, 0x1a, 0x24, cylinders, heads, sectors); + val |= 0x0f; + } + rtc_set_memory(s, 0x12, val); + + val = 0; + for (i = 0; i < 4; i++) { + /* NOTE: ide_get_geometry() returns the physical + geometry. It is always such that: 1 <= sects <= 63, 1 + <= heads <= 16, 1 <= cylinders <= 16383. The BIOS + geometry can be different if a translation is done. */ + if (arg->idebus[i / 2] && + ide_get_geometry(arg->idebus[i / 2], i % 2, + &cylinders, &heads, §ors) >= 0) { + trans = ide_get_bios_chs_trans(arg->idebus[i / 2], i % 2) - 1; + assert((trans & ~3) == 0); + val |= trans << (i * 2); + } + } + rtc_set_memory(s, 0x39, val); + + pc_cmos_init_floppy(s, pc_find_fdc0()); + + qemu_unregister_reset(pc_cmos_init_late, opaque); +} + +void pc_cmos_init(PCMachineState *pcms, + BusState *idebus0, BusState *idebus1, + ISADevice *s) +{ + int val; + static pc_cmos_init_late_arg arg; + X86MachineState *x86ms = X86_MACHINE(pcms); + + /* various important CMOS locations needed by PC/Bochs bios */ + + /* memory size */ + /* base memory (first MiB) */ + val = MIN(x86ms->below_4g_mem_size / KiB, 640); + rtc_set_memory(s, 0x15, val); + rtc_set_memory(s, 0x16, val >> 8); + /* extended memory (next 64MiB) */ + if (x86ms->below_4g_mem_size > 1 * MiB) { + val = (x86ms->below_4g_mem_size - 1 * MiB) / KiB; + } else { + val = 0; + } + if (val > 65535) + val = 65535; + rtc_set_memory(s, 0x17, val); + rtc_set_memory(s, 0x18, val >> 8); + rtc_set_memory(s, 0x30, val); + rtc_set_memory(s, 0x31, val >> 8); + /* memory between 16MiB and 4GiB */ + if (x86ms->below_4g_mem_size > 16 * MiB) { + val = (x86ms->below_4g_mem_size - 16 * MiB) / (64 * KiB); + } else { + val = 0; + } + if (val > 65535) + val = 65535; + rtc_set_memory(s, 0x34, val); + rtc_set_memory(s, 0x35, val >> 8); + /* memory above 4GiB */ + val = x86ms->above_4g_mem_size / 65536; + rtc_set_memory(s, 0x5b, val); + rtc_set_memory(s, 0x5c, val >> 8); + rtc_set_memory(s, 0x5d, val >> 16); + + object_property_add_link(OBJECT(pcms), "rtc_state", + TYPE_ISA_DEVICE, + (Object **)&x86ms->rtc, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + object_property_set_link(OBJECT(pcms), "rtc_state", OBJECT(s), + &error_abort); + + set_boot_dev(s, MACHINE(pcms)->boot_order, &error_fatal); + + val = 0; + val |= 0x02; /* FPU is there */ + val |= 0x04; /* PS/2 mouse installed */ + rtc_set_memory(s, REG_EQUIPMENT_BYTE, val); + + /* hard drives and FDC */ + arg.rtc_state = s; + arg.idebus[0] = idebus0; + arg.idebus[1] = idebus1; + qemu_register_reset(pc_cmos_init_late, &arg); +} + +static void handle_a20_line_change(void *opaque, int irq, int level) +{ + X86CPU *cpu = opaque; + + /* XXX: send to all CPUs ? */ + /* XXX: add logic to handle multiple A20 line sources */ + x86_cpu_set_a20(cpu, level); +} + +#define NE2000_NB_MAX 6 + +static const int ne2000_io[NE2000_NB_MAX] = { 0x300, 0x320, 0x340, 0x360, + 0x280, 0x380 }; +static const int ne2000_irq[NE2000_NB_MAX] = { 9, 10, 11, 3, 4, 5 }; + +void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd) +{ + static int nb_ne2k = 0; + + if (nb_ne2k == NE2000_NB_MAX) + return; + isa_ne2000_init(bus, ne2000_io[nb_ne2k], + ne2000_irq[nb_ne2k], nd); + nb_ne2k++; +} + +void pc_acpi_smi_interrupt(void *opaque, int irq, int level) +{ + X86CPU *cpu = opaque; + + if (level) { + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); + } +} + +static +void pc_machine_done(Notifier *notifier, void *data) +{ + PCMachineState *pcms = container_of(notifier, + PCMachineState, machine_done); + X86MachineState *x86ms = X86_MACHINE(pcms); + + /* set the number of CPUs */ + x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus); + + fw_cfg_add_extra_pci_roots(pcms->bus, x86ms->fw_cfg); + + acpi_setup(); + if (x86ms->fw_cfg) { + fw_cfg_build_smbios(MACHINE(pcms), x86ms->fw_cfg); + fw_cfg_build_feature_control(MACHINE(pcms), x86ms->fw_cfg); + /* update FW_CFG_NB_CPUS to account for -device added CPUs */ + fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); + } + + + if (x86ms->apic_id_limit > 255 && !xen_enabled() && + !kvm_irqchip_in_kernel()) { + error_report("current -smp configuration requires kernel " + "irqchip support."); + exit(EXIT_FAILURE); + } +} + +void pc_guest_info_init(PCMachineState *pcms) +{ + X86MachineState *x86ms = X86_MACHINE(pcms); + + x86ms->apic_xrupt_override = true; + pcms->machine_done.notify = pc_machine_done; + qemu_add_machine_init_done_notifier(&pcms->machine_done); +} + +/* setup pci memory address space mapping into system address space */ +void pc_pci_as_mapping_init(Object *owner, MemoryRegion *system_memory, + MemoryRegion *pci_address_space) +{ + /* Set to lower priority than RAM */ + memory_region_add_subregion_overlap(system_memory, 0x0, + pci_address_space, -1); +} + +void xen_load_linux(PCMachineState *pcms) +{ + int i; + FWCfgState *fw_cfg; + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(pcms); + + assert(MACHINE(pcms)->kernel_filename != NULL); + + fw_cfg = fw_cfg_init_io(FW_CFG_IO_BASE); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); + rom_set_fw(fw_cfg); + + x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, + pcmc->pvh_enabled); + for (i = 0; i < nb_option_roms; i++) { + assert(!strcmp(option_rom[i].name, "linuxboot.bin") || + !strcmp(option_rom[i].name, "linuxboot_dma.bin") || + !strcmp(option_rom[i].name, "pvh.bin") || + !strcmp(option_rom[i].name, "multiboot.bin") || + !strcmp(option_rom[i].name, "multiboot_dma.bin")); + rom_add_option(option_rom[i].name, option_rom[i].bootindex); + } + x86ms->fw_cfg = fw_cfg; +} + +#define PC_ROM_MIN_VGA 0xc0000 +#define PC_ROM_MIN_OPTION 0xc8000 +#define PC_ROM_MAX 0xe0000 +#define PC_ROM_ALIGN 0x800 +#define PC_ROM_SIZE (PC_ROM_MAX - PC_ROM_MIN_VGA) + +void pc_memory_init(PCMachineState *pcms, + MemoryRegion *system_memory, + MemoryRegion *rom_memory, + MemoryRegion **ram_memory) +{ + int linux_boot, i; + MemoryRegion *option_rom_mr; + MemoryRegion *ram_below_4g, *ram_above_4g; + FWCfgState *fw_cfg; + MachineState *machine = MACHINE(pcms); + MachineClass *mc = MACHINE_GET_CLASS(machine); + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(pcms); + + assert(machine->ram_size == x86ms->below_4g_mem_size + + x86ms->above_4g_mem_size); + + linux_boot = (machine->kernel_filename != NULL); + + /* + * Split single memory region and use aliases to address portions of it, + * done for backwards compatibility with older qemus. + */ + *ram_memory = machine->ram; + ram_below_4g = g_malloc(sizeof(*ram_below_4g)); + memory_region_init_alias(ram_below_4g, NULL, "ram-below-4g", machine->ram, + 0, x86ms->below_4g_mem_size); + memory_region_add_subregion(system_memory, 0, ram_below_4g); + e820_add_entry(0, x86ms->below_4g_mem_size, E820_RAM); + if (x86ms->above_4g_mem_size > 0) { + ram_above_4g = g_malloc(sizeof(*ram_above_4g)); + memory_region_init_alias(ram_above_4g, NULL, "ram-above-4g", + machine->ram, + x86ms->below_4g_mem_size, + x86ms->above_4g_mem_size); + memory_region_add_subregion(system_memory, 0x100000000ULL, + ram_above_4g); + e820_add_entry(0x100000000ULL, x86ms->above_4g_mem_size, E820_RAM); + } + + if (pcms->sgx_epc.size != 0) { + e820_add_entry(pcms->sgx_epc.base, pcms->sgx_epc.size, E820_RESERVED); + } + + if (!pcmc->has_reserved_memory && + (machine->ram_slots || + (machine->maxram_size > machine->ram_size))) { + + error_report("\"-memory 'slots|maxmem'\" is not supported by: %s", + mc->name); + exit(EXIT_FAILURE); + } + + /* always allocate the device memory information */ + machine->device_memory = g_malloc0(sizeof(*machine->device_memory)); + + /* initialize device memory address space */ + if (pcmc->has_reserved_memory && + (machine->ram_size < machine->maxram_size)) { + ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size; + + if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) { + error_report("unsupported amount of memory slots: %"PRIu64, + machine->ram_slots); + exit(EXIT_FAILURE); + } + + if (QEMU_ALIGN_UP(machine->maxram_size, + TARGET_PAGE_SIZE) != machine->maxram_size) { + error_report("maximum memory size must by aligned to multiple of " + "%d bytes", TARGET_PAGE_SIZE); + exit(EXIT_FAILURE); + } + + if (pcms->sgx_epc.size != 0) { + machine->device_memory->base = sgx_epc_above_4g_end(&pcms->sgx_epc); + } else { + machine->device_memory->base = + 0x100000000ULL + x86ms->above_4g_mem_size; + } + + machine->device_memory->base = + ROUND_UP(machine->device_memory->base, 1 * GiB); + + if (pcmc->enforce_aligned_dimm) { + /* size device region assuming 1G page max alignment per slot */ + device_mem_size += (1 * GiB) * machine->ram_slots; + } + + if ((machine->device_memory->base + device_mem_size) < + device_mem_size) { + error_report("unsupported amount of maximum memory: " RAM_ADDR_FMT, + machine->maxram_size); + exit(EXIT_FAILURE); + } + + memory_region_init(&machine->device_memory->mr, OBJECT(pcms), + "device-memory", device_mem_size); + memory_region_add_subregion(system_memory, machine->device_memory->base, + &machine->device_memory->mr); + } + + /* Initialize PC system firmware */ + pc_system_firmware_init(pcms, rom_memory); + + option_rom_mr = g_malloc(sizeof(*option_rom_mr)); + memory_region_init_ram(option_rom_mr, NULL, "pc.rom", PC_ROM_SIZE, + &error_fatal); + if (pcmc->pci_enabled) { + memory_region_set_readonly(option_rom_mr, true); + } + memory_region_add_subregion_overlap(rom_memory, + PC_ROM_MIN_VGA, + option_rom_mr, + 1); + + fw_cfg = fw_cfg_arch_create(machine, + x86ms->boot_cpus, x86ms->apic_id_limit); + + rom_set_fw(fw_cfg); + + if (pcmc->has_reserved_memory && machine->device_memory->base) { + uint64_t *val = g_malloc(sizeof(*val)); + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + uint64_t res_mem_end = machine->device_memory->base; + + if (!pcmc->broken_reserved_end) { + res_mem_end += memory_region_size(&machine->device_memory->mr); + } + *val = cpu_to_le64(ROUND_UP(res_mem_end, 1 * GiB)); + fw_cfg_add_file(fw_cfg, "etc/reserved-memory-end", val, sizeof(*val)); + } + + if (linux_boot) { + x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, + pcmc->pvh_enabled); + } + + for (i = 0; i < nb_option_roms; i++) { + rom_add_option(option_rom[i].name, option_rom[i].bootindex); + } + x86ms->fw_cfg = fw_cfg; + + /* Init default IOAPIC address space */ + x86ms->ioapic_as = &address_space_memory; + + /* Init ACPI memory hotplug IO base address */ + pcms->memhp_io_base = ACPI_MEMORY_HOTPLUG_BASE; +} + +/* + * The 64bit pci hole starts after "above 4G RAM" and + * potentially the space reserved for memory hotplug. + */ +uint64_t pc_pci_hole64_start(void) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + MachineState *ms = MACHINE(pcms); + X86MachineState *x86ms = X86_MACHINE(pcms); + uint64_t hole64_start = 0; + + if (pcmc->has_reserved_memory && ms->device_memory->base) { + hole64_start = ms->device_memory->base; + if (!pcmc->broken_reserved_end) { + hole64_start += memory_region_size(&ms->device_memory->mr); + } + } else if (pcms->sgx_epc.size != 0) { + hole64_start = sgx_epc_above_4g_end(&pcms->sgx_epc); + } else { + hole64_start = 0x100000000ULL + x86ms->above_4g_mem_size; + } + + return ROUND_UP(hole64_start, 1 * GiB); +} + +DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus) +{ + DeviceState *dev = NULL; + + rom_set_order_override(FW_CFG_ORDER_OVERRIDE_VGA); + if (pci_bus) { + PCIDevice *pcidev = pci_vga_init(pci_bus); + dev = pcidev ? &pcidev->qdev : NULL; + } else if (isa_bus) { + ISADevice *isadev = isa_vga_init(isa_bus); + dev = isadev ? DEVICE(isadev) : NULL; + } + rom_reset_order_override(); + return dev; +} + +static const MemoryRegionOps ioport80_io_ops = { + .write = ioport80_write, + .read = ioport80_read, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static const MemoryRegionOps ioportF0_io_ops = { + .write = ioportF0_write, + .read = ioportF0_read, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void pc_superio_init(ISABus *isa_bus, bool create_fdctrl, bool no_vmport) +{ + int i; + DriveInfo *fd[MAX_FD]; + qemu_irq *a20_line; + ISADevice *fdc, *i8042, *port92, *vmmouse; + + serial_hds_isa_init(isa_bus, 0, MAX_ISA_SERIAL_PORTS); + parallel_hds_isa_init(isa_bus, MAX_PARALLEL_PORTS); + + for (i = 0; i < MAX_FD; i++) { + fd[i] = drive_get(IF_FLOPPY, 0, i); + create_fdctrl |= !!fd[i]; + } + if (create_fdctrl) { + fdc = isa_new(TYPE_ISA_FDC); + if (fdc) { + isa_realize_and_unref(fdc, isa_bus, &error_fatal); + isa_fdc_init_drives(fdc, fd); + } + } + + i8042 = isa_create_simple(isa_bus, "i8042"); + if (!no_vmport) { + isa_create_simple(isa_bus, TYPE_VMPORT); + vmmouse = isa_try_new("vmmouse"); + } else { + vmmouse = NULL; + } + if (vmmouse) { + object_property_set_link(OBJECT(vmmouse), "i8042", OBJECT(i8042), + &error_abort); + isa_realize_and_unref(vmmouse, isa_bus, &error_fatal); + } + port92 = isa_create_simple(isa_bus, TYPE_PORT92); + + a20_line = qemu_allocate_irqs(handle_a20_line_change, first_cpu, 2); + i8042_setup_a20_line(i8042, a20_line[0]); + qdev_connect_gpio_out_named(DEVICE(port92), + PORT92_A20_LINE, 0, a20_line[1]); + g_free(a20_line); +} + +void pc_basic_device_init(struct PCMachineState *pcms, + ISABus *isa_bus, qemu_irq *gsi, + ISADevice **rtc_state, + bool create_fdctrl, + uint32_t hpet_irqs) +{ + int i; + DeviceState *hpet = NULL; + int pit_isa_irq = 0; + qemu_irq pit_alt_irq = NULL; + qemu_irq rtc_irq = NULL; + ISADevice *pit = NULL; + MemoryRegion *ioport80_io = g_new(MemoryRegion, 1); + MemoryRegion *ioportF0_io = g_new(MemoryRegion, 1); + + memory_region_init_io(ioport80_io, NULL, &ioport80_io_ops, NULL, "ioport80", 1); + memory_region_add_subregion(isa_bus->address_space_io, 0x80, ioport80_io); + + memory_region_init_io(ioportF0_io, NULL, &ioportF0_io_ops, NULL, "ioportF0", 1); + memory_region_add_subregion(isa_bus->address_space_io, 0xf0, ioportF0_io); + + /* + * Check if an HPET shall be created. + * + * Without KVM_CAP_PIT_STATE2, we cannot switch off the in-kernel PIT + * when the HPET wants to take over. Thus we have to disable the latter. + */ + if (pcms->hpet_enabled && (!kvm_irqchip_in_kernel() || + kvm_has_pit_state2())) { + hpet = qdev_try_new(TYPE_HPET); + if (!hpet) { + error_report("couldn't create HPET device"); + exit(1); + } + /* + * For pc-piix-*, hpet's intcap is always IRQ2. For pc-q35-1.7 and + * earlier, use IRQ2 for compat. Otherwise, use IRQ16~23, IRQ8 and + * IRQ2. + */ + uint8_t compat = object_property_get_uint(OBJECT(hpet), + HPET_INTCAP, NULL); + if (!compat) { + qdev_prop_set_uint32(hpet, HPET_INTCAP, hpet_irqs); + } + sysbus_realize_and_unref(SYS_BUS_DEVICE(hpet), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(hpet), 0, HPET_BASE); + + for (i = 0; i < GSI_NUM_PINS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(hpet), i, gsi[i]); + } + pit_isa_irq = -1; + pit_alt_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_PIT_INT); + rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT); + } + *rtc_state = mc146818_rtc_init(isa_bus, 2000, rtc_irq); + + qemu_register_boot_set(pc_boot_set, *rtc_state); + + if (!xen_enabled() && pcms->pit_enabled) { + if (kvm_pit_in_kernel()) { + pit = kvm_pit_init(isa_bus, 0x40); + } else { + pit = i8254_pit_init(isa_bus, 0x40, pit_isa_irq, pit_alt_irq); + } + if (hpet) { + /* connect PIT to output control line of the HPET */ + qdev_connect_gpio_out(hpet, 0, qdev_get_gpio_in(DEVICE(pit), 0)); + } + pcspk_init(pcms->pcspk, isa_bus, pit); + } + + i8257_dma_init(isa_bus, 0); + + /* Super I/O */ + pc_superio_init(isa_bus, create_fdctrl, pcms->vmport != ON_OFF_AUTO_ON); +} + +void pc_nic_init(PCMachineClass *pcmc, ISABus *isa_bus, PCIBus *pci_bus) +{ + int i; + + rom_set_order_override(FW_CFG_ORDER_OVERRIDE_NIC); + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + const char *model = nd->model ? nd->model : pcmc->default_nic_model; + + if (g_str_equal(model, "ne2k_isa")) { + pc_init_ne2k_isa(isa_bus, nd); + } else { + pci_nic_init_nofail(nd, pci_bus, model, NULL); + } + } + rom_reset_order_override(); +} + +void pc_i8259_create(ISABus *isa_bus, qemu_irq *i8259_irqs) +{ + qemu_irq *i8259; + + if (kvm_pic_in_kernel()) { + i8259 = kvm_i8259_init(isa_bus); + } else if (xen_enabled()) { + i8259 = xen_interrupt_controller_init(); + } else { + i8259 = i8259_init(isa_bus, x86_allocate_cpu_irq()); + } + + for (size_t i = 0; i < ISA_NUM_IRQS; i++) { + i8259_irqs[i] = i8259[i]; + } + + g_free(i8259); +} + +static void pc_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + const PCMachineState *pcms = PC_MACHINE(hotplug_dev); + const X86MachineState *x86ms = X86_MACHINE(hotplug_dev); + const PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + const MachineState *ms = MACHINE(hotplug_dev); + const bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); + const uint64_t legacy_align = TARGET_PAGE_SIZE; + Error *local_err = NULL; + + /* + * When -no-acpi is used with Q35 machine type, no ACPI is built, + * but pcms->acpi_dev is still created. Check !acpi_enabled in + * addition to cover this case. + */ + if (!x86ms->acpi_dev || !x86_machine_is_acpi_enabled(x86ms)) { + error_setg(errp, + "memory hotplug is not enabled: missing acpi device or acpi disabled"); + return; + } + + if (is_nvdimm && !ms->nvdimms_state->is_enabled) { + error_setg(errp, "nvdimm is not enabled: missing 'nvdimm' in '-M'"); + return; + } + + hotplug_handler_pre_plug(x86ms->acpi_dev, dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + pc_dimm_pre_plug(PC_DIMM(dev), MACHINE(hotplug_dev), + pcmc->enforce_aligned_dimm ? NULL : &legacy_align, errp); +} + +static void pc_memory_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(hotplug_dev); + X86MachineState *x86ms = X86_MACHINE(hotplug_dev); + MachineState *ms = MACHINE(hotplug_dev); + bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); + + pc_dimm_plug(PC_DIMM(dev), MACHINE(pcms)); + + if (is_nvdimm) { + nvdimm_plug(ms->nvdimms_state); + } + + hotplug_handler_plug(x86ms->acpi_dev, dev, &error_abort); +} + +static void pc_memory_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(hotplug_dev); + + /* + * When -no-acpi is used with Q35 machine type, no ACPI is built, + * but pcms->acpi_dev is still created. Check !acpi_enabled in + * addition to cover this case. + */ + if (!x86ms->acpi_dev || !x86_machine_is_acpi_enabled(x86ms)) { + error_setg(errp, + "memory hotplug is not enabled: missing acpi device or acpi disabled"); + return; + } + + if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { + error_setg(errp, "nvdimm device hot unplug is not supported yet."); + return; + } + + hotplug_handler_unplug_request(x86ms->acpi_dev, dev, + errp); +} + +static void pc_memory_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(hotplug_dev); + X86MachineState *x86ms = X86_MACHINE(hotplug_dev); + Error *local_err = NULL; + + hotplug_handler_unplug(x86ms->acpi_dev, dev, &local_err); + if (local_err) { + goto out; + } + + pc_dimm_unplug(PC_DIMM(dev), MACHINE(pcms)); + qdev_unrealize(dev); + out: + error_propagate(errp, local_err); +} + +static void pc_virtio_md_pci_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev); + Error *local_err = NULL; + + if (!hotplug_dev2 && dev->hotplugged) { + /* + * Without a bus hotplug handler, we cannot control the plug/unplug + * order. We should never reach this point when hotplugging on x86, + * however, better add a safety net. + */ + error_setg(errp, "hotplug of virtio based memory devices not supported" + " on this bus."); + return; + } + /* + * First, see if we can plug this memory device at all. If that + * succeeds, branch of to the actual hotplug handler. + */ + memory_device_pre_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev), NULL, + &local_err); + if (!local_err && hotplug_dev2) { + hotplug_handler_pre_plug(hotplug_dev2, dev, &local_err); + } + error_propagate(errp, local_err); +} + +static void pc_virtio_md_pci_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev); + Error *local_err = NULL; + + /* + * Plug the memory device first and then branch off to the actual + * hotplug handler. If that one fails, we can easily undo the memory + * device bits. + */ + memory_device_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev)); + if (hotplug_dev2) { + hotplug_handler_plug(hotplug_dev2, dev, &local_err); + if (local_err) { + memory_device_unplug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev)); + } + } + error_propagate(errp, local_err); +} + +static void pc_virtio_md_pci_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + /* We don't support hot unplug of virtio based memory devices */ + error_setg(errp, "virtio based memory devices cannot be unplugged."); +} + +static void pc_virtio_md_pci_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + /* We don't support hot unplug of virtio based memory devices */ +} + +static void pc_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + pc_memory_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + x86_cpu_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { + pc_virtio_md_pci_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + /* Declare the APIC range as the reserved MSI region */ + char *resv_prop_str = g_strdup_printf("0xfee00000:0xfeefffff:%d", + VIRTIO_IOMMU_RESV_MEM_T_MSI); + + object_property_set_uint(OBJECT(dev), "len-reserved-regions", 1, errp); + object_property_set_str(OBJECT(dev), "reserved-regions[0]", + resv_prop_str, errp); + g_free(resv_prop_str); + } + + if (object_dynamic_cast(OBJECT(dev), TYPE_X86_IOMMU_DEVICE) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + PCMachineState *pcms = PC_MACHINE(hotplug_dev); + + if (pcms->iommu) { + error_setg(errp, "QEMU does not support multiple vIOMMUs " + "for x86 yet."); + return; + } + pcms->iommu = dev; + } +} + +static void pc_machine_device_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + pc_memory_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + x86_cpu_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { + pc_virtio_md_pci_plug(hotplug_dev, dev, errp); + } +} + +static void pc_machine_device_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + pc_memory_unplug_request(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + x86_cpu_unplug_request_cb(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { + pc_virtio_md_pci_unplug_request(hotplug_dev, dev, errp); + } else { + error_setg(errp, "acpi: device unplug request for not supported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +static void pc_machine_device_unplug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + pc_memory_unplug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + x86_cpu_unplug_cb(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { + pc_virtio_md_pci_unplug(hotplug_dev, dev, errp); + } else { + error_setg(errp, "acpi: device unplug for not supported device" + " type: %s", object_get_typename(OBJECT(dev))); + } +} + +static HotplugHandler *pc_get_hotplug_handler(MachineState *machine, + DeviceState *dev) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || + object_dynamic_cast(OBJECT(dev), TYPE_CPU) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_X86_IOMMU_DEVICE)) { + return HOTPLUG_HANDLER(machine); + } + + return NULL; +} + +static void +pc_machine_get_device_memory_region_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + MachineState *ms = MACHINE(obj); + int64_t value = 0; + + if (ms->device_memory) { + value = memory_region_size(&ms->device_memory->mr); + } + + visit_type_int(v, name, &value, errp); +} + +static void pc_machine_get_vmport(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + OnOffAuto vmport = pcms->vmport; + + visit_type_OnOffAuto(v, name, &vmport, errp); +} + +static void pc_machine_set_vmport(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &pcms->vmport, errp); +} + +static bool pc_machine_get_smbus(Object *obj, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + return pcms->smbus_enabled; +} + +static void pc_machine_set_smbus(Object *obj, bool value, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + pcms->smbus_enabled = value; +} + +static bool pc_machine_get_sata(Object *obj, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + return pcms->sata_enabled; +} + +static void pc_machine_set_sata(Object *obj, bool value, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + pcms->sata_enabled = value; +} + +static bool pc_machine_get_pit(Object *obj, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + return pcms->pit_enabled; +} + +static void pc_machine_set_pit(Object *obj, bool value, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + pcms->pit_enabled = value; +} + +static bool pc_machine_get_hpet(Object *obj, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + return pcms->hpet_enabled; +} + +static void pc_machine_set_hpet(Object *obj, bool value, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + pcms->hpet_enabled = value; +} + +static bool pc_machine_get_default_bus_bypass_iommu(Object *obj, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + return pcms->default_bus_bypass_iommu; +} + +static void pc_machine_set_default_bus_bypass_iommu(Object *obj, bool value, + Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + + pcms->default_bus_bypass_iommu = value; +} + +static void pc_machine_get_max_ram_below_4g(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + uint64_t value = pcms->max_ram_below_4g; + + visit_type_size(v, name, &value, errp); +} + +static void pc_machine_set_max_ram_below_4g(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + uint64_t value; + + if (!visit_type_size(v, name, &value, errp)) { + return; + } + if (value > 4 * GiB) { + error_setg(errp, + "Machine option 'max-ram-below-4g=%"PRIu64 + "' expects size less than or equal to 4G", value); + return; + } + + if (value < 1 * MiB) { + warn_report("Only %" PRIu64 " bytes of RAM below the 4GiB boundary," + "BIOS may not work with less than 1MiB", value); + } + + pcms->max_ram_below_4g = value; +} + +static void pc_machine_get_max_fw_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + uint64_t value = pcms->max_fw_size; + + visit_type_size(v, name, &value, errp); +} + +static void pc_machine_set_max_fw_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(obj); + Error *error = NULL; + uint64_t value; + + visit_type_size(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + + /* + * We don't have a theoretically justifiable exact lower bound on the base + * address of any flash mapping. In practice, the IO-APIC MMIO range is + * [0xFEE00000..0xFEE01000] -- see IO_APIC_DEFAULT_ADDRESS --, leaving free + * only 18MB-4KB below 4G. For now, restrict the cumulative mapping to 8MB in + * size. + */ + if (value > 16 * MiB) { + error_setg(errp, + "User specified max allowed firmware size %" PRIu64 " is " + "greater than 16MiB. If combined firwmare size exceeds " + "16MiB the system may not boot, or experience intermittent" + "stability issues.", + value); + return; + } + + pcms->max_fw_size = value; +} + + +static void pc_machine_initfn(Object *obj) +{ + PCMachineState *pcms = PC_MACHINE(obj); + +#ifdef CONFIG_VMPORT + pcms->vmport = ON_OFF_AUTO_AUTO; +#else + pcms->vmport = ON_OFF_AUTO_OFF; +#endif /* CONFIG_VMPORT */ + pcms->max_ram_below_4g = 0; /* use default */ + /* acpi build is enabled by default if machine supports it */ + pcms->acpi_build_enabled = PC_MACHINE_GET_CLASS(pcms)->has_acpi_build; + pcms->smbus_enabled = true; + pcms->sata_enabled = true; + pcms->pit_enabled = true; + pcms->max_fw_size = 8 * MiB; +#ifdef CONFIG_HPET + pcms->hpet_enabled = true; +#endif + pcms->default_bus_bypass_iommu = false; + + pc_system_flash_create(pcms); + pcms->pcspk = isa_new(TYPE_PC_SPEAKER); + object_property_add_alias(OBJECT(pcms), "pcspk-audiodev", + OBJECT(pcms->pcspk), "audiodev"); +} + +static void pc_machine_reset(MachineState *machine) +{ + CPUState *cs; + X86CPU *cpu; + + qemu_devices_reset(); + + /* Reset APIC after devices have been reset to cancel + * any changes that qemu_devices_reset() might have done. + */ + CPU_FOREACH(cs) { + cpu = X86_CPU(cs); + + if (cpu->apic_state) { + device_legacy_reset(cpu->apic_state); + } + } +} + +static void pc_machine_wakeup(MachineState *machine) +{ + cpu_synchronize_all_states(); + pc_machine_reset(machine); + cpu_synchronize_all_post_reset(); +} + +static bool pc_hotplug_allowed(MachineState *ms, DeviceState *dev, Error **errp) +{ + X86IOMMUState *iommu = x86_iommu_get_default(); + IntelIOMMUState *intel_iommu; + + if (iommu && + object_dynamic_cast((Object *)iommu, TYPE_INTEL_IOMMU_DEVICE) && + object_dynamic_cast((Object *)dev, "vfio-pci")) { + intel_iommu = INTEL_IOMMU_DEVICE(iommu); + if (!intel_iommu->caching_mode) { + error_setg(errp, "Device assignment is not allowed without " + "enabling caching-mode=on for Intel IOMMU."); + return false; + } + } + + return true; +} + +static void pc_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + PCMachineClass *pcmc = PC_MACHINE_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + + pcmc->pci_enabled = true; + pcmc->has_acpi_build = true; + pcmc->rsdp_in_ram = true; + pcmc->smbios_defaults = true; + pcmc->smbios_uuid_encoded = true; + pcmc->gigabyte_align = true; + pcmc->has_reserved_memory = true; + pcmc->kvmclock_enabled = true; + pcmc->enforce_aligned_dimm = true; + /* BIOS ACPI tables: 128K. Other BIOS datastructures: less than 4K reported + * to be used at the moment, 32K should be enough for a while. */ + pcmc->acpi_data_size = 0x20000 + 0x8000; + pcmc->pvh_enabled = true; + pcmc->kvmclock_create_always = true; + assert(!mc->get_hotplug_handler); + mc->get_hotplug_handler = pc_get_hotplug_handler; + mc->hotplug_allowed = pc_hotplug_allowed; + mc->cpu_index_to_instance_props = x86_cpu_index_to_props; + mc->get_default_cpu_node_id = x86_get_default_cpu_node_id; + mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids; + mc->auto_enable_numa_with_memhp = true; + mc->auto_enable_numa_with_memdev = true; + mc->has_hotpluggable_cpus = true; + mc->default_boot_order = "cad"; + mc->block_default_type = IF_IDE; + mc->max_cpus = 255; + mc->reset = pc_machine_reset; + mc->wakeup = pc_machine_wakeup; + hc->pre_plug = pc_machine_device_pre_plug_cb; + hc->plug = pc_machine_device_plug_cb; + hc->unplug_request = pc_machine_device_unplug_request_cb; + hc->unplug = pc_machine_device_unplug_cb; + mc->default_cpu_type = TARGET_DEFAULT_CPU_TYPE; + mc->nvdimm_supported = true; + mc->smp_props.dies_supported = true; + mc->default_ram_id = "pc.ram"; + + object_class_property_add(oc, PC_MACHINE_MAX_RAM_BELOW_4G, "size", + pc_machine_get_max_ram_below_4g, pc_machine_set_max_ram_below_4g, + NULL, NULL); + object_class_property_set_description(oc, PC_MACHINE_MAX_RAM_BELOW_4G, + "Maximum ram below the 4G boundary (32bit boundary)"); + + object_class_property_add(oc, PC_MACHINE_DEVMEM_REGION_SIZE, "int", + pc_machine_get_device_memory_region_size, NULL, + NULL, NULL); + + object_class_property_add(oc, PC_MACHINE_VMPORT, "OnOffAuto", + pc_machine_get_vmport, pc_machine_set_vmport, + NULL, NULL); + object_class_property_set_description(oc, PC_MACHINE_VMPORT, + "Enable vmport (pc & q35)"); + + object_class_property_add_bool(oc, PC_MACHINE_SMBUS, + pc_machine_get_smbus, pc_machine_set_smbus); + + object_class_property_add_bool(oc, PC_MACHINE_SATA, + pc_machine_get_sata, pc_machine_set_sata); + + object_class_property_add_bool(oc, PC_MACHINE_PIT, + pc_machine_get_pit, pc_machine_set_pit); + + object_class_property_add_bool(oc, "hpet", + pc_machine_get_hpet, pc_machine_set_hpet); + + object_class_property_add_bool(oc, "default-bus-bypass-iommu", + pc_machine_get_default_bus_bypass_iommu, + pc_machine_set_default_bus_bypass_iommu); + + object_class_property_add(oc, PC_MACHINE_MAX_FW_SIZE, "size", + pc_machine_get_max_fw_size, pc_machine_set_max_fw_size, + NULL, NULL); + object_class_property_set_description(oc, PC_MACHINE_MAX_FW_SIZE, + "Maximum combined firmware size"); +} + +static const TypeInfo pc_machine_info = { + .name = TYPE_PC_MACHINE, + .parent = TYPE_X86_MACHINE, + .abstract = true, + .instance_size = sizeof(PCMachineState), + .instance_init = pc_machine_initfn, + .class_size = sizeof(PCMachineClass), + .class_init = pc_machine_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + }, +}; + +static void pc_machine_register_types(void) +{ + type_register_static(&pc_machine_info); +} + +type_init(pc_machine_register_types) diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c new file mode 100644 index 000000000..223dd3e05 --- /dev/null +++ b/hw/i386/pc_piix.c @@ -0,0 +1,951 @@ +/* + * QEMU PC System Emulator + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include CONFIG_DEVICES + +#include "qemu/units.h" +#include "hw/loader.h" +#include "hw/i386/x86.h" +#include "hw/i386/pc.h" +#include "hw/i386/apic.h" +#include "hw/pci-host/i440fx.h" +#include "hw/southbridge/piix.h" +#include "hw/display/ramfb.h" +#include "hw/firmware/smbios.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_ids.h" +#include "hw/usb.h" +#include "net/net.h" +#include "hw/ide/pci.h" +#include "hw/irq.h" +#include "sysemu/kvm.h" +#include "hw/kvm/clock.h" +#include "hw/sysbus.h" +#include "hw/i2c/smbus_eeprom.h" +#include "hw/xen/xen-x86.h" +#include "exec/memory.h" +#include "hw/acpi/acpi.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "sysemu/xen.h" +#ifdef CONFIG_XEN +#include +#include "hw/xen/xen_pt.h" +#endif +#include "migration/global_state.h" +#include "migration/misc.h" +#include "sysemu/numa.h" +#include "hw/hyperv/vmbus-bridge.h" +#include "hw/mem/nvdimm.h" +#include "hw/i386/acpi-build.h" +#include "kvm/kvm-cpu.h" + +#define MAX_IDE_BUS 2 + +#ifdef CONFIG_IDE_ISA +static const int ide_iobase[MAX_IDE_BUS] = { 0x1f0, 0x170 }; +static const int ide_iobase2[MAX_IDE_BUS] = { 0x3f6, 0x376 }; +static const int ide_irq[MAX_IDE_BUS] = { 14, 15 }; +#endif + +/* PC hardware initialisation */ +static void pc_init1(MachineState *machine, + const char *host_type, const char *pci_type) +{ + PCMachineState *pcms = PC_MACHINE(machine); + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(machine); + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *system_io = get_system_io(); + PCIBus *pci_bus; + ISABus *isa_bus; + PCII440FXState *i440fx_state; + int piix3_devfn = -1; + qemu_irq smi_irq; + GSIState *gsi_state; + BusState *idebus[MAX_IDE_BUS]; + ISADevice *rtc_state; + MemoryRegion *ram_memory; + MemoryRegion *pci_memory; + MemoryRegion *rom_memory; + ram_addr_t lowmem; + + /* + * Calculate ram split, for memory below and above 4G. It's a bit + * complicated for backward compatibility reasons ... + * + * - Traditional split is 3.5G (lowmem = 0xe0000000). This is the + * default value for max_ram_below_4g now. + * + * - Then, to gigabyte align the memory, we move the split to 3G + * (lowmem = 0xc0000000). But only in case we have to split in + * the first place, i.e. ram_size is larger than (traditional) + * lowmem. And for new machine types (gigabyte_align = true) + * only, for live migration compatibility reasons. + * + * - Next the max-ram-below-4g option was added, which allowed to + * reduce lowmem to a smaller value, to allow a larger PCI I/O + * window below 4G. qemu doesn't enforce gigabyte alignment here, + * but prints a warning. + * + * - Finally max-ram-below-4g got updated to also allow raising lowmem, + * so legacy non-PAE guests can get as much memory as possible in + * the 32bit address space below 4G. + * + * - Note that Xen has its own ram setup code in xen_ram_init(), + * called via xen_hvm_init_pc(). + * + * Examples: + * qemu -M pc-1.7 -m 4G (old default) -> 3584M low, 512M high + * qemu -M pc -m 4G (new default) -> 3072M low, 1024M high + * qemu -M pc,max-ram-below-4g=2G -m 4G -> 2048M low, 2048M high + * qemu -M pc,max-ram-below-4g=4G -m 3968M -> 3968M low (=4G-128M) + */ + if (xen_enabled()) { + xen_hvm_init_pc(pcms, &ram_memory); + } else { + if (!pcms->max_ram_below_4g) { + pcms->max_ram_below_4g = 0xe0000000; /* default: 3.5G */ + } + lowmem = pcms->max_ram_below_4g; + if (machine->ram_size >= pcms->max_ram_below_4g) { + if (pcmc->gigabyte_align) { + if (lowmem > 0xc0000000) { + lowmem = 0xc0000000; + } + if (lowmem & (1 * GiB - 1)) { + warn_report("Large machine and max_ram_below_4g " + "(%" PRIu64 ") not a multiple of 1G; " + "possible bad performance.", + pcms->max_ram_below_4g); + } + } + } + + if (machine->ram_size >= lowmem) { + x86ms->above_4g_mem_size = machine->ram_size - lowmem; + x86ms->below_4g_mem_size = lowmem; + } else { + x86ms->above_4g_mem_size = 0; + x86ms->below_4g_mem_size = machine->ram_size; + } + } + + pc_machine_init_sgx_epc(pcms); + x86_cpus_init(x86ms, pcmc->default_cpu_version); + + if (pcmc->kvmclock_enabled) { + kvmclock_create(pcmc->kvmclock_create_always); + } + + if (pcmc->pci_enabled) { + pci_memory = g_new(MemoryRegion, 1); + memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); + rom_memory = pci_memory; + } else { + pci_memory = NULL; + rom_memory = system_memory; + } + + pc_guest_info_init(pcms); + + if (pcmc->smbios_defaults) { + MachineClass *mc = MACHINE_GET_CLASS(machine); + /* These values are guest ABI, do not change */ + smbios_set_defaults("QEMU", "Standard PC (i440FX + PIIX, 1996)", + mc->name, pcmc->smbios_legacy_mode, + pcmc->smbios_uuid_encoded, + SMBIOS_ENTRY_POINT_21); + } + + /* allocate ram and load rom/bios */ + if (!xen_enabled()) { + pc_memory_init(pcms, system_memory, + rom_memory, &ram_memory); + } else { + pc_system_flash_cleanup_unused(pcms); + if (machine->kernel_filename != NULL) { + /* For xen HVM direct kernel boot, load linux here */ + xen_load_linux(pcms); + } + } + + gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled); + + if (pcmc->pci_enabled) { + PIIX3State *piix3; + + pci_bus = i440fx_init(host_type, + pci_type, + &i440fx_state, + system_memory, system_io, machine->ram_size, + x86ms->below_4g_mem_size, + x86ms->above_4g_mem_size, + pci_memory, ram_memory); + pcms->bus = pci_bus; + + piix3 = piix3_create(pci_bus, &isa_bus); + piix3->pic = x86ms->gsi; + piix3_devfn = piix3->dev.devfn; + } else { + pci_bus = NULL; + i440fx_state = NULL; + isa_bus = isa_bus_new(NULL, get_system_memory(), system_io, + &error_abort); + pcms->hpet_enabled = false; + } + isa_bus_irqs(isa_bus, x86ms->gsi); + + pc_i8259_create(isa_bus, gsi_state->i8259_irq); + + if (pcmc->pci_enabled) { + ioapic_init_gsi(gsi_state, "i440fx"); + } + + if (tcg_enabled()) { + x86_register_ferr_irq(x86ms->gsi[13]); + } + + pc_vga_init(isa_bus, pcmc->pci_enabled ? pci_bus : NULL); + + assert(pcms->vmport != ON_OFF_AUTO__MAX); + if (pcms->vmport == ON_OFF_AUTO_AUTO) { + pcms->vmport = xen_enabled() ? ON_OFF_AUTO_OFF : ON_OFF_AUTO_ON; + } + + /* init basic PC hardware */ + pc_basic_device_init(pcms, isa_bus, x86ms->gsi, &rtc_state, true, + 0x4); + + pc_nic_init(pcmc, isa_bus, pci_bus); + + if (pcmc->pci_enabled) { + PCIDevice *dev; + + dev = pci_create_simple(pci_bus, piix3_devfn + 1, + xen_enabled() ? "piix3-ide-xen" : "piix3-ide"); + pci_ide_create_devs(dev); + idebus[0] = qdev_get_child_bus(&dev->qdev, "ide.0"); + idebus[1] = qdev_get_child_bus(&dev->qdev, "ide.1"); + pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state); + } +#ifdef CONFIG_IDE_ISA + else { + DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; + int i; + + ide_drive_get(hd, ARRAY_SIZE(hd)); + for (i = 0; i < MAX_IDE_BUS; i++) { + ISADevice *dev; + char busname[] = "ide.0"; + dev = isa_ide_init(isa_bus, ide_iobase[i], ide_iobase2[i], + ide_irq[i], + hd[MAX_IDE_DEVS * i], hd[MAX_IDE_DEVS * i + 1]); + /* + * The ide bus name is ide.0 for the first bus and ide.1 for the + * second one. + */ + busname[4] = '0' + i; + idebus[i] = qdev_get_child_bus(DEVICE(dev), busname); + } + pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state); + } +#endif + + if (pcmc->pci_enabled && machine_usb(machine)) { + pci_create_simple(pci_bus, piix3_devfn + 2, "piix3-usb-uhci"); + } + + if (pcmc->pci_enabled && x86_machine_is_acpi_enabled(X86_MACHINE(pcms))) { + DeviceState *piix4_pm; + + smi_irq = qemu_allocate_irq(pc_acpi_smi_interrupt, first_cpu, 0); + /* TODO: Populate SPD eeprom data. */ + pcms->smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, + x86ms->gsi[9], smi_irq, + x86_machine_is_smm_enabled(x86ms), + &piix4_pm); + smbus_eeprom_init(pcms->smbus, 8, NULL, 0); + + object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, + TYPE_HOTPLUG_HANDLER, + (Object **)&x86ms->acpi_dev, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + object_property_set_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, + OBJECT(piix4_pm), &error_abort); + } + + if (machine->nvdimms_state->is_enabled) { + nvdimm_init_acpi_state(machine->nvdimms_state, system_io, + x86_nvdimm_acpi_dsmio, + x86ms->fw_cfg, OBJECT(pcms)); + } +} + +/* Looking for a pc_compat_2_4() function? It doesn't exist. + * pc_compat_*() functions that run on machine-init time and + * change global QEMU state are deprecated. Please don't create + * one, and implement any pc-*-2.4 (and newer) compat code in + * hw_compat_*, pc_compat_*, or * pc_*_machine_options(). + */ + +static void pc_compat_2_3_fn(MachineState *machine) +{ + X86MachineState *x86ms = X86_MACHINE(machine); + if (kvm_enabled()) { + x86ms->smm = ON_OFF_AUTO_OFF; + } +} + +static void pc_compat_2_2_fn(MachineState *machine) +{ + pc_compat_2_3_fn(machine); +} + +static void pc_compat_2_1_fn(MachineState *machine) +{ + pc_compat_2_2_fn(machine); + x86_cpu_change_kvm_default("svm", NULL); +} + +static void pc_compat_2_0_fn(MachineState *machine) +{ + pc_compat_2_1_fn(machine); +} + +static void pc_compat_1_7_fn(MachineState *machine) +{ + pc_compat_2_0_fn(machine); + x86_cpu_change_kvm_default("x2apic", NULL); +} + +static void pc_compat_1_6_fn(MachineState *machine) +{ + pc_compat_1_7_fn(machine); +} + +static void pc_compat_1_5_fn(MachineState *machine) +{ + pc_compat_1_6_fn(machine); +} + +static void pc_compat_1_4_fn(MachineState *machine) +{ + pc_compat_1_5_fn(machine); +} + +static void pc_init_isa(MachineState *machine) +{ + pc_init1(machine, TYPE_I440FX_PCI_HOST_BRIDGE, TYPE_I440FX_PCI_DEVICE); +} + +#ifdef CONFIG_XEN +static void pc_xen_hvm_init_pci(MachineState *machine) +{ + const char *pci_type = xen_igd_gfx_pt_enabled() ? + TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE : TYPE_I440FX_PCI_DEVICE; + + pc_init1(machine, + TYPE_I440FX_PCI_HOST_BRIDGE, + pci_type); +} + +static void pc_xen_hvm_init(MachineState *machine) +{ + PCMachineState *pcms = PC_MACHINE(machine); + + if (!xen_enabled()) { + error_report("xenfv machine requires the xen accelerator"); + exit(1); + } + + pc_xen_hvm_init_pci(machine); + pci_create_simple(pcms->bus, -1, "xen-platform"); +} +#endif + +#define DEFINE_I440FX_MACHINE(suffix, name, compatfn, optionfn) \ + static void pc_init_##suffix(MachineState *machine) \ + { \ + void (*compat)(MachineState *m) = (compatfn); \ + if (compat) { \ + compat(machine); \ + } \ + pc_init1(machine, TYPE_I440FX_PCI_HOST_BRIDGE, \ + TYPE_I440FX_PCI_DEVICE); \ + } \ + DEFINE_PC_MACHINE(suffix, name, pc_init_##suffix, optionfn) + +static void pc_i440fx_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + pcmc->default_nic_model = "e1000"; + pcmc->pci_root_uid = 0; + + m->family = "pc_piix"; + m->desc = "Standard PC (i440FX + PIIX, 1996)"; + m->default_machine_opts = "firmware=bios-256k.bin"; + m->default_display = "std"; + machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); + machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); +} + +static void pc_i440fx_6_2_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + pc_i440fx_machine_options(m); + m->alias = "pc"; + m->is_default = true; + pcmc->default_cpu_version = 1; +} + +DEFINE_I440FX_MACHINE(v6_2, "pc-i440fx-6.2", NULL, + pc_i440fx_6_2_machine_options); + +static void pc_i440fx_6_1_machine_options(MachineClass *m) +{ + pc_i440fx_6_2_machine_options(m); + m->alias = NULL; + m->is_default = false; + compat_props_add(m->compat_props, hw_compat_6_1, hw_compat_6_1_len); + compat_props_add(m->compat_props, pc_compat_6_1, pc_compat_6_1_len); + m->smp_props.prefer_sockets = true; +} + +DEFINE_I440FX_MACHINE(v6_1, "pc-i440fx-6.1", NULL, + pc_i440fx_6_1_machine_options); + +static void pc_i440fx_6_0_machine_options(MachineClass *m) +{ + pc_i440fx_6_1_machine_options(m); + m->alias = NULL; + m->is_default = false; + compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len); + compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len); +} + +DEFINE_I440FX_MACHINE(v6_0, "pc-i440fx-6.0", NULL, + pc_i440fx_6_0_machine_options); + +static void pc_i440fx_5_2_machine_options(MachineClass *m) +{ + pc_i440fx_6_0_machine_options(m); + m->alias = NULL; + m->is_default = false; + compat_props_add(m->compat_props, hw_compat_5_2, hw_compat_5_2_len); + compat_props_add(m->compat_props, pc_compat_5_2, pc_compat_5_2_len); +} + +DEFINE_I440FX_MACHINE(v5_2, "pc-i440fx-5.2", NULL, + pc_i440fx_5_2_machine_options); + +static void pc_i440fx_5_1_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_i440fx_5_2_machine_options(m); + m->alias = NULL; + m->is_default = false; + compat_props_add(m->compat_props, hw_compat_5_1, hw_compat_5_1_len); + compat_props_add(m->compat_props, pc_compat_5_1, pc_compat_5_1_len); + pcmc->kvmclock_create_always = false; + pcmc->pci_root_uid = 1; +} + +DEFINE_I440FX_MACHINE(v5_1, "pc-i440fx-5.1", NULL, + pc_i440fx_5_1_machine_options); + +static void pc_i440fx_5_0_machine_options(MachineClass *m) +{ + pc_i440fx_5_1_machine_options(m); + m->alias = NULL; + m->is_default = false; + m->numa_mem_supported = true; + compat_props_add(m->compat_props, hw_compat_5_0, hw_compat_5_0_len); + compat_props_add(m->compat_props, pc_compat_5_0, pc_compat_5_0_len); + m->auto_enable_numa_with_memdev = false; +} + +DEFINE_I440FX_MACHINE(v5_0, "pc-i440fx-5.0", NULL, + pc_i440fx_5_0_machine_options); + +static void pc_i440fx_4_2_machine_options(MachineClass *m) +{ + pc_i440fx_5_0_machine_options(m); + m->alias = NULL; + m->is_default = false; + compat_props_add(m->compat_props, hw_compat_4_2, hw_compat_4_2_len); + compat_props_add(m->compat_props, pc_compat_4_2, pc_compat_4_2_len); +} + +DEFINE_I440FX_MACHINE(v4_2, "pc-i440fx-4.2", NULL, + pc_i440fx_4_2_machine_options); + +static void pc_i440fx_4_1_machine_options(MachineClass *m) +{ + pc_i440fx_4_2_machine_options(m); + m->alias = NULL; + m->is_default = false; + compat_props_add(m->compat_props, hw_compat_4_1, hw_compat_4_1_len); + compat_props_add(m->compat_props, pc_compat_4_1, pc_compat_4_1_len); +} + +DEFINE_I440FX_MACHINE(v4_1, "pc-i440fx-4.1", NULL, + pc_i440fx_4_1_machine_options); + +static void pc_i440fx_4_0_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + pc_i440fx_4_1_machine_options(m); + m->alias = NULL; + m->is_default = false; + pcmc->default_cpu_version = CPU_VERSION_LEGACY; + compat_props_add(m->compat_props, hw_compat_4_0, hw_compat_4_0_len); + compat_props_add(m->compat_props, pc_compat_4_0, pc_compat_4_0_len); +} + +DEFINE_I440FX_MACHINE(v4_0, "pc-i440fx-4.0", NULL, + pc_i440fx_4_0_machine_options); + +static void pc_i440fx_3_1_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_i440fx_4_0_machine_options(m); + m->is_default = false; + pcmc->do_not_add_smb_acpi = true; + m->smbus_no_migration_support = true; + m->alias = NULL; + pcmc->pvh_enabled = false; + compat_props_add(m->compat_props, hw_compat_3_1, hw_compat_3_1_len); + compat_props_add(m->compat_props, pc_compat_3_1, pc_compat_3_1_len); +} + +DEFINE_I440FX_MACHINE(v3_1, "pc-i440fx-3.1", NULL, + pc_i440fx_3_1_machine_options); + +static void pc_i440fx_3_0_machine_options(MachineClass *m) +{ + pc_i440fx_3_1_machine_options(m); + compat_props_add(m->compat_props, hw_compat_3_0, hw_compat_3_0_len); + compat_props_add(m->compat_props, pc_compat_3_0, pc_compat_3_0_len); +} + +DEFINE_I440FX_MACHINE(v3_0, "pc-i440fx-3.0", NULL, + pc_i440fx_3_0_machine_options); + +static void pc_i440fx_2_12_machine_options(MachineClass *m) +{ + pc_i440fx_3_0_machine_options(m); + compat_props_add(m->compat_props, hw_compat_2_12, hw_compat_2_12_len); + compat_props_add(m->compat_props, pc_compat_2_12, pc_compat_2_12_len); +} + +DEFINE_I440FX_MACHINE(v2_12, "pc-i440fx-2.12", NULL, + pc_i440fx_2_12_machine_options); + +static void pc_i440fx_2_11_machine_options(MachineClass *m) +{ + pc_i440fx_2_12_machine_options(m); + compat_props_add(m->compat_props, hw_compat_2_11, hw_compat_2_11_len); + compat_props_add(m->compat_props, pc_compat_2_11, pc_compat_2_11_len); +} + +DEFINE_I440FX_MACHINE(v2_11, "pc-i440fx-2.11", NULL, + pc_i440fx_2_11_machine_options); + +static void pc_i440fx_2_10_machine_options(MachineClass *m) +{ + pc_i440fx_2_11_machine_options(m); + compat_props_add(m->compat_props, hw_compat_2_10, hw_compat_2_10_len); + compat_props_add(m->compat_props, pc_compat_2_10, pc_compat_2_10_len); + m->auto_enable_numa_with_memhp = false; +} + +DEFINE_I440FX_MACHINE(v2_10, "pc-i440fx-2.10", NULL, + pc_i440fx_2_10_machine_options); + +static void pc_i440fx_2_9_machine_options(MachineClass *m) +{ + pc_i440fx_2_10_machine_options(m); + compat_props_add(m->compat_props, hw_compat_2_9, hw_compat_2_9_len); + compat_props_add(m->compat_props, pc_compat_2_9, pc_compat_2_9_len); +} + +DEFINE_I440FX_MACHINE(v2_9, "pc-i440fx-2.9", NULL, + pc_i440fx_2_9_machine_options); + +static void pc_i440fx_2_8_machine_options(MachineClass *m) +{ + pc_i440fx_2_9_machine_options(m); + compat_props_add(m->compat_props, hw_compat_2_8, hw_compat_2_8_len); + compat_props_add(m->compat_props, pc_compat_2_8, pc_compat_2_8_len); +} + +DEFINE_I440FX_MACHINE(v2_8, "pc-i440fx-2.8", NULL, + pc_i440fx_2_8_machine_options); + +static void pc_i440fx_2_7_machine_options(MachineClass *m) +{ + pc_i440fx_2_8_machine_options(m); + compat_props_add(m->compat_props, hw_compat_2_7, hw_compat_2_7_len); + compat_props_add(m->compat_props, pc_compat_2_7, pc_compat_2_7_len); +} + +DEFINE_I440FX_MACHINE(v2_7, "pc-i440fx-2.7", NULL, + pc_i440fx_2_7_machine_options); + +static void pc_i440fx_2_6_machine_options(MachineClass *m) +{ + X86MachineClass *x86mc = X86_MACHINE_CLASS(m); + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_i440fx_2_7_machine_options(m); + pcmc->legacy_cpu_hotplug = true; + x86mc->fwcfg_dma_enabled = false; + compat_props_add(m->compat_props, hw_compat_2_6, hw_compat_2_6_len); + compat_props_add(m->compat_props, pc_compat_2_6, pc_compat_2_6_len); +} + +DEFINE_I440FX_MACHINE(v2_6, "pc-i440fx-2.6", NULL, + pc_i440fx_2_6_machine_options); + +static void pc_i440fx_2_5_machine_options(MachineClass *m) +{ + X86MachineClass *x86mc = X86_MACHINE_CLASS(m); + + pc_i440fx_2_6_machine_options(m); + x86mc->save_tsc_khz = false; + m->legacy_fw_cfg_order = 1; + compat_props_add(m->compat_props, hw_compat_2_5, hw_compat_2_5_len); + compat_props_add(m->compat_props, pc_compat_2_5, pc_compat_2_5_len); +} + +DEFINE_I440FX_MACHINE(v2_5, "pc-i440fx-2.5", NULL, + pc_i440fx_2_5_machine_options); + +static void pc_i440fx_2_4_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_i440fx_2_5_machine_options(m); + m->hw_version = "2.4.0"; + pcmc->broken_reserved_end = true; + compat_props_add(m->compat_props, hw_compat_2_4, hw_compat_2_4_len); + compat_props_add(m->compat_props, pc_compat_2_4, pc_compat_2_4_len); +} + +DEFINE_I440FX_MACHINE(v2_4, "pc-i440fx-2.4", NULL, + pc_i440fx_2_4_machine_options) + +static void pc_i440fx_2_3_machine_options(MachineClass *m) +{ + pc_i440fx_2_4_machine_options(m); + m->hw_version = "2.3.0"; + compat_props_add(m->compat_props, hw_compat_2_3, hw_compat_2_3_len); + compat_props_add(m->compat_props, pc_compat_2_3, pc_compat_2_3_len); +} + +DEFINE_I440FX_MACHINE(v2_3, "pc-i440fx-2.3", pc_compat_2_3_fn, + pc_i440fx_2_3_machine_options); + +static void pc_i440fx_2_2_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_i440fx_2_3_machine_options(m); + m->hw_version = "2.2.0"; + m->default_machine_opts = "firmware=bios-256k.bin,suppress-vmdesc=on"; + compat_props_add(m->compat_props, hw_compat_2_2, hw_compat_2_2_len); + compat_props_add(m->compat_props, pc_compat_2_2, pc_compat_2_2_len); + pcmc->rsdp_in_ram = false; +} + +DEFINE_I440FX_MACHINE(v2_2, "pc-i440fx-2.2", pc_compat_2_2_fn, + pc_i440fx_2_2_machine_options); + +static void pc_i440fx_2_1_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_i440fx_2_2_machine_options(m); + m->hw_version = "2.1.0"; + m->default_display = NULL; + compat_props_add(m->compat_props, hw_compat_2_1, hw_compat_2_1_len); + compat_props_add(m->compat_props, pc_compat_2_1, pc_compat_2_1_len); + pcmc->smbios_uuid_encoded = false; + pcmc->enforce_aligned_dimm = false; +} + +DEFINE_I440FX_MACHINE(v2_1, "pc-i440fx-2.1", pc_compat_2_1_fn, + pc_i440fx_2_1_machine_options); + +static void pc_i440fx_2_0_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_i440fx_2_1_machine_options(m); + m->hw_version = "2.0.0"; + compat_props_add(m->compat_props, pc_compat_2_0, pc_compat_2_0_len); + pcmc->smbios_legacy_mode = true; + pcmc->has_reserved_memory = false; + /* This value depends on the actual DSDT and SSDT compiled into + * the source QEMU; unfortunately it depends on the binary and + * not on the machine type, so we cannot make pc-i440fx-1.7 work on + * both QEMU 1.7 and QEMU 2.0. + * + * Large variations cause migration to fail for more than one + * consecutive value of the "-smp" maxcpus option. + * + * For small variations of the kind caused by different iasl versions, + * the 4k rounding usually leaves slack. However, there could be still + * one or two values that break. For QEMU 1.7 and QEMU 2.0 the + * slack is only ~10 bytes before one "-smp maxcpus" value breaks! + * + * 6652 is valid for QEMU 2.0, the right value for pc-i440fx-1.7 on + * QEMU 1.7 it is 6414. For RHEL/CentOS 7.0 it is 6418. + */ + pcmc->legacy_acpi_table_size = 6652; + pcmc->acpi_data_size = 0x10000; +} + +DEFINE_I440FX_MACHINE(v2_0, "pc-i440fx-2.0", pc_compat_2_0_fn, + pc_i440fx_2_0_machine_options); + +static void pc_i440fx_1_7_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_i440fx_2_0_machine_options(m); + m->hw_version = "1.7.0"; + m->default_machine_opts = NULL; + m->option_rom_has_mr = true; + compat_props_add(m->compat_props, pc_compat_1_7, pc_compat_1_7_len); + pcmc->smbios_defaults = false; + pcmc->gigabyte_align = false; + pcmc->legacy_acpi_table_size = 6414; +} + +DEFINE_I440FX_MACHINE(v1_7, "pc-i440fx-1.7", pc_compat_1_7_fn, + pc_i440fx_1_7_machine_options); + +static void pc_i440fx_1_6_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_i440fx_1_7_machine_options(m); + m->hw_version = "1.6.0"; + m->rom_file_has_mr = false; + compat_props_add(m->compat_props, pc_compat_1_6, pc_compat_1_6_len); + pcmc->has_acpi_build = false; +} + +DEFINE_I440FX_MACHINE(v1_6, "pc-i440fx-1.6", pc_compat_1_6_fn, + pc_i440fx_1_6_machine_options); + +static void pc_i440fx_1_5_machine_options(MachineClass *m) +{ + pc_i440fx_1_6_machine_options(m); + m->hw_version = "1.5.0"; + compat_props_add(m->compat_props, pc_compat_1_5, pc_compat_1_5_len); +} + +DEFINE_I440FX_MACHINE(v1_5, "pc-i440fx-1.5", pc_compat_1_5_fn, + pc_i440fx_1_5_machine_options); + +static void pc_i440fx_1_4_machine_options(MachineClass *m) +{ + pc_i440fx_1_5_machine_options(m); + m->hw_version = "1.4.0"; + compat_props_add(m->compat_props, pc_compat_1_4, pc_compat_1_4_len); +} + +DEFINE_I440FX_MACHINE(v1_4, "pc-i440fx-1.4", pc_compat_1_4_fn, + pc_i440fx_1_4_machine_options); + +typedef struct { + uint16_t gpu_device_id; + uint16_t pch_device_id; + uint8_t pch_revision_id; +} IGDDeviceIDInfo; + +/* In real world different GPU should have different PCH. But actually + * the different PCH DIDs likely map to different PCH SKUs. We do the + * same thing for the GPU. For PCH, the different SKUs are going to be + * all the same silicon design and implementation, just different + * features turn on and off with fuses. The SW interfaces should be + * consistent across all SKUs in a given family (eg LPT). But just same + * features may not be supported. + * + * Most of these different PCH features probably don't matter to the + * Gfx driver, but obviously any difference in display port connections + * will so it should be fine with any PCH in case of passthrough. + * + * So currently use one PCH version, 0x8c4e, to cover all HSW(Haswell) + * scenarios, 0x9cc3 for BDW(Broadwell). + */ +static const IGDDeviceIDInfo igd_combo_id_infos[] = { + /* HSW Classic */ + {0x0402, 0x8c4e, 0x04}, /* HSWGT1D, HSWD_w7 */ + {0x0406, 0x8c4e, 0x04}, /* HSWGT1M, HSWM_w7 */ + {0x0412, 0x8c4e, 0x04}, /* HSWGT2D, HSWD_w7 */ + {0x0416, 0x8c4e, 0x04}, /* HSWGT2M, HSWM_w7 */ + {0x041E, 0x8c4e, 0x04}, /* HSWGT15D, HSWD_w7 */ + /* HSW ULT */ + {0x0A06, 0x8c4e, 0x04}, /* HSWGT1UT, HSWM_w7 */ + {0x0A16, 0x8c4e, 0x04}, /* HSWGT2UT, HSWM_w7 */ + {0x0A26, 0x8c4e, 0x06}, /* HSWGT3UT, HSWM_w7 */ + {0x0A2E, 0x8c4e, 0x04}, /* HSWGT3UT28W, HSWM_w7 */ + {0x0A1E, 0x8c4e, 0x04}, /* HSWGT2UX, HSWM_w7 */ + {0x0A0E, 0x8c4e, 0x04}, /* HSWGT1ULX, HSWM_w7 */ + /* HSW CRW */ + {0x0D26, 0x8c4e, 0x04}, /* HSWGT3CW, HSWM_w7 */ + {0x0D22, 0x8c4e, 0x04}, /* HSWGT3CWDT, HSWD_w7 */ + /* HSW Server */ + {0x041A, 0x8c4e, 0x04}, /* HSWSVGT2, HSWD_w7 */ + /* HSW SRVR */ + {0x040A, 0x8c4e, 0x04}, /* HSWSVGT1, HSWD_w7 */ + /* BSW */ + {0x1606, 0x9cc3, 0x03}, /* BDWULTGT1, BDWM_w7 */ + {0x1616, 0x9cc3, 0x03}, /* BDWULTGT2, BDWM_w7 */ + {0x1626, 0x9cc3, 0x03}, /* BDWULTGT3, BDWM_w7 */ + {0x160E, 0x9cc3, 0x03}, /* BDWULXGT1, BDWM_w7 */ + {0x161E, 0x9cc3, 0x03}, /* BDWULXGT2, BDWM_w7 */ + {0x1602, 0x9cc3, 0x03}, /* BDWHALOGT1, BDWM_w7 */ + {0x1612, 0x9cc3, 0x03}, /* BDWHALOGT2, BDWM_w7 */ + {0x1622, 0x9cc3, 0x03}, /* BDWHALOGT3, BDWM_w7 */ + {0x162B, 0x9cc3, 0x03}, /* BDWHALO28W, BDWM_w7 */ + {0x162A, 0x9cc3, 0x03}, /* BDWGT3WRKS, BDWM_w7 */ + {0x162D, 0x9cc3, 0x03}, /* BDWGT3SRVR, BDWM_w7 */ +}; + +static void isa_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + dc->desc = "ISA bridge faked to support IGD PT"; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->class_id = PCI_CLASS_BRIDGE_ISA; +}; + +static TypeInfo isa_bridge_info = { + .name = "igd-passthrough-isa-bridge", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = isa_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void pt_graphics_register_types(void) +{ + type_register_static(&isa_bridge_info); +} +type_init(pt_graphics_register_types) + +void igd_passthrough_isa_bridge_create(PCIBus *bus, uint16_t gpu_dev_id) +{ + struct PCIDevice *bridge_dev; + int i, num; + uint16_t pch_dev_id = 0xffff; + uint8_t pch_rev_id = 0; + + num = ARRAY_SIZE(igd_combo_id_infos); + for (i = 0; i < num; i++) { + if (gpu_dev_id == igd_combo_id_infos[i].gpu_device_id) { + pch_dev_id = igd_combo_id_infos[i].pch_device_id; + pch_rev_id = igd_combo_id_infos[i].pch_revision_id; + } + } + + if (pch_dev_id == 0xffff) { + return; + } + + /* Currently IGD drivers always need to access PCH by 1f.0. */ + bridge_dev = pci_create_simple(bus, PCI_DEVFN(0x1f, 0), + "igd-passthrough-isa-bridge"); + + /* + * Note that vendor id is always PCI_VENDOR_ID_INTEL. + */ + if (!bridge_dev) { + fprintf(stderr, "set igd-passthrough-isa-bridge failed!\n"); + return; + } + pci_config_set_device_id(bridge_dev->config, pch_dev_id); + pci_config_set_revision(bridge_dev->config, pch_rev_id); +} + +static void isapc_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + m->desc = "ISA-only PC"; + m->max_cpus = 1; + m->option_rom_has_mr = true; + m->rom_file_has_mr = false; + pcmc->pci_enabled = false; + pcmc->has_acpi_build = false; + pcmc->smbios_defaults = false; + pcmc->gigabyte_align = false; + pcmc->smbios_legacy_mode = true; + pcmc->has_reserved_memory = false; + pcmc->default_nic_model = "ne2k_isa"; + m->default_cpu_type = X86_CPU_TYPE_NAME("486"); +} + +DEFINE_PC_MACHINE(isapc, "isapc", pc_init_isa, + isapc_machine_options); + + +#ifdef CONFIG_XEN +static void xenfv_4_2_machine_options(MachineClass *m) +{ + pc_i440fx_4_2_machine_options(m); + m->desc = "Xen Fully-virtualized PC"; + m->max_cpus = HVM_MAX_VCPUS; + m->default_machine_opts = "accel=xen,suppress-vmdesc=on"; +} + +DEFINE_PC_MACHINE(xenfv_4_2, "xenfv-4.2", pc_xen_hvm_init, + xenfv_4_2_machine_options); + +static void xenfv_3_1_machine_options(MachineClass *m) +{ + pc_i440fx_3_1_machine_options(m); + m->desc = "Xen Fully-virtualized PC"; + m->alias = "xenfv"; + m->max_cpus = HVM_MAX_VCPUS; + m->default_machine_opts = "accel=xen,suppress-vmdesc=on"; +} + +DEFINE_PC_MACHINE(xenfv, "xenfv-3.1", pc_xen_hvm_init, + xenfv_3_1_machine_options); +#endif diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c new file mode 100644 index 000000000..e1e100316 --- /dev/null +++ b/hw/i386/pc_q35.c @@ -0,0 +1,620 @@ +/* + * Q35 chipset based pc system emulator + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2009, 2010 + * Isaku Yamahata + * VA Linux Systems Japan K.K. + * Copyright (C) 2012 Jason Baron + * + * This is based on pc.c, but heavily modified. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/loader.h" +#include "hw/i2c/smbus_eeprom.h" +#include "hw/rtc/mc146818rtc.h" +#include "sysemu/kvm.h" +#include "hw/kvm/clock.h" +#include "hw/pci-host/q35.h" +#include "hw/pci/pcie_port.h" +#include "hw/qdev-properties.h" +#include "hw/i386/x86.h" +#include "hw/i386/pc.h" +#include "hw/i386/ich9.h" +#include "hw/i386/amd_iommu.h" +#include "hw/i386/intel_iommu.h" +#include "hw/display/ramfb.h" +#include "hw/firmware/smbios.h" +#include "hw/ide/pci.h" +#include "hw/ide/ahci.h" +#include "hw/usb.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "sysemu/numa.h" +#include "hw/hyperv/vmbus-bridge.h" +#include "hw/mem/nvdimm.h" +#include "hw/i386/acpi-build.h" + +/* ICH9 AHCI has 6 ports */ +#define MAX_SATA_PORTS 6 + +struct ehci_companions { + const char *name; + int func; + int port; +}; + +static const struct ehci_companions ich9_1d[] = { + { .name = "ich9-usb-uhci1", .func = 0, .port = 0 }, + { .name = "ich9-usb-uhci2", .func = 1, .port = 2 }, + { .name = "ich9-usb-uhci3", .func = 2, .port = 4 }, +}; + +static const struct ehci_companions ich9_1a[] = { + { .name = "ich9-usb-uhci4", .func = 0, .port = 0 }, + { .name = "ich9-usb-uhci5", .func = 1, .port = 2 }, + { .name = "ich9-usb-uhci6", .func = 2, .port = 4 }, +}; + +static int ehci_create_ich9_with_companions(PCIBus *bus, int slot) +{ + const struct ehci_companions *comp; + PCIDevice *ehci, *uhci; + BusState *usbbus; + const char *name; + int i; + + switch (slot) { + case 0x1d: + name = "ich9-usb-ehci1"; + comp = ich9_1d; + break; + case 0x1a: + name = "ich9-usb-ehci2"; + comp = ich9_1a; + break; + default: + return -1; + } + + ehci = pci_new_multifunction(PCI_DEVFN(slot, 7), true, name); + pci_realize_and_unref(ehci, bus, &error_fatal); + usbbus = QLIST_FIRST(&ehci->qdev.child_bus); + + for (i = 0; i < 3; i++) { + uhci = pci_new_multifunction(PCI_DEVFN(slot, comp[i].func), true, + comp[i].name); + qdev_prop_set_string(&uhci->qdev, "masterbus", usbbus->name); + qdev_prop_set_uint32(&uhci->qdev, "firstport", comp[i].port); + pci_realize_and_unref(uhci, bus, &error_fatal); + } + return 0; +} + +/* PC hardware initialisation */ +static void pc_q35_init(MachineState *machine) +{ + PCMachineState *pcms = PC_MACHINE(machine); + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(machine); + Q35PCIHost *q35_host; + PCIHostState *phb; + PCIBus *host_bus; + PCIDevice *lpc; + DeviceState *lpc_dev; + BusState *idebus[MAX_SATA_PORTS]; + ISADevice *rtc_state; + MemoryRegion *system_io = get_system_io(); + MemoryRegion *pci_memory; + MemoryRegion *rom_memory; + MemoryRegion *ram_memory; + GSIState *gsi_state; + ISABus *isa_bus; + int i; + ICH9LPCState *ich9_lpc; + PCIDevice *ahci; + ram_addr_t lowmem; + DriveInfo *hd[MAX_SATA_PORTS]; + MachineClass *mc = MACHINE_GET_CLASS(machine); + bool acpi_pcihp; + bool keep_pci_slot_hpc; + + /* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory + * and 256 Mbytes for PCI Express Enhanced Configuration Access Mapping + * also known as MMCFG). + * If it doesn't, we need to split it in chunks below and above 4G. + * In any case, try to make sure that guest addresses aligned at + * 1G boundaries get mapped to host addresses aligned at 1G boundaries. + */ + if (machine->ram_size >= 0xb0000000) { + lowmem = 0x80000000; + } else { + lowmem = 0xb0000000; + } + + /* Handle the machine opt max-ram-below-4g. It is basically doing + * min(qemu limit, user limit). + */ + if (!pcms->max_ram_below_4g) { + pcms->max_ram_below_4g = 4 * GiB; + } + if (lowmem > pcms->max_ram_below_4g) { + lowmem = pcms->max_ram_below_4g; + if (machine->ram_size - lowmem > lowmem && + lowmem & (1 * GiB - 1)) { + warn_report("There is possibly poor performance as the ram size " + " (0x%" PRIx64 ") is more then twice the size of" + " max-ram-below-4g (%"PRIu64") and" + " max-ram-below-4g is not a multiple of 1G.", + (uint64_t)machine->ram_size, pcms->max_ram_below_4g); + } + } + + if (machine->ram_size >= lowmem) { + x86ms->above_4g_mem_size = machine->ram_size - lowmem; + x86ms->below_4g_mem_size = lowmem; + } else { + x86ms->above_4g_mem_size = 0; + x86ms->below_4g_mem_size = machine->ram_size; + } + + pc_machine_init_sgx_epc(pcms); + x86_cpus_init(x86ms, pcmc->default_cpu_version); + + kvmclock_create(pcmc->kvmclock_create_always); + + /* pci enabled */ + if (pcmc->pci_enabled) { + pci_memory = g_new(MemoryRegion, 1); + memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); + rom_memory = pci_memory; + } else { + pci_memory = NULL; + rom_memory = get_system_memory(); + } + + pc_guest_info_init(pcms); + + if (pcmc->smbios_defaults) { + /* These values are guest ABI, do not change */ + smbios_set_defaults("QEMU", "Standard PC (Q35 + ICH9, 2009)", + mc->name, pcmc->smbios_legacy_mode, + pcmc->smbios_uuid_encoded, + SMBIOS_ENTRY_POINT_21); + } + + /* allocate ram and load rom/bios */ + pc_memory_init(pcms, get_system_memory(), rom_memory, &ram_memory); + + /* create pci host bus */ + q35_host = Q35_HOST_DEVICE(qdev_new(TYPE_Q35_HOST_DEVICE)); + + object_property_add_child(qdev_get_machine(), "q35", OBJECT(q35_host)); + object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_RAM_MEM, + OBJECT(ram_memory), NULL); + object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_PCI_MEM, + OBJECT(pci_memory), NULL); + object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_SYSTEM_MEM, + OBJECT(get_system_memory()), NULL); + object_property_set_link(OBJECT(q35_host), MCH_HOST_PROP_IO_MEM, + OBJECT(system_io), NULL); + object_property_set_int(OBJECT(q35_host), PCI_HOST_BELOW_4G_MEM_SIZE, + x86ms->below_4g_mem_size, NULL); + object_property_set_int(OBJECT(q35_host), PCI_HOST_ABOVE_4G_MEM_SIZE, + x86ms->above_4g_mem_size, NULL); + /* pci */ + sysbus_realize_and_unref(SYS_BUS_DEVICE(q35_host), &error_fatal); + phb = PCI_HOST_BRIDGE(q35_host); + host_bus = phb->bus; + /* create ISA bus */ + lpc = pci_create_simple_multifunction(host_bus, PCI_DEVFN(ICH9_LPC_DEV, + ICH9_LPC_FUNC), true, + TYPE_ICH9_LPC_DEVICE); + + object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, + TYPE_HOTPLUG_HANDLER, + (Object **)&x86ms->acpi_dev, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + object_property_set_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, + OBJECT(lpc), &error_abort); + + acpi_pcihp = object_property_get_bool(OBJECT(lpc), + ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, + NULL); + + keep_pci_slot_hpc = object_property_get_bool(OBJECT(lpc), + "x-keep-pci-slot-hpc", + NULL); + + if (!keep_pci_slot_hpc && acpi_pcihp) { + object_register_sugar_prop(TYPE_PCIE_SLOT, "x-native-hotplug", + "false", true); + } + + /* irq lines */ + gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled); + + ich9_lpc = ICH9_LPC_DEVICE(lpc); + lpc_dev = DEVICE(lpc); + for (i = 0; i < GSI_NUM_PINS; i++) { + qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, x86ms->gsi[i]); + } + pci_bus_irqs(host_bus, ich9_lpc_set_irq, ich9_lpc_map_irq, ich9_lpc, + ICH9_LPC_NB_PIRQS); + pci_bus_set_route_irq_fn(host_bus, ich9_route_intx_pin_to_irq); + isa_bus = ich9_lpc->isa_bus; + + pc_i8259_create(isa_bus, gsi_state->i8259_irq); + + if (pcmc->pci_enabled) { + ioapic_init_gsi(gsi_state, "q35"); + } + + if (tcg_enabled()) { + x86_register_ferr_irq(x86ms->gsi[13]); + } + + assert(pcms->vmport != ON_OFF_AUTO__MAX); + if (pcms->vmport == ON_OFF_AUTO_AUTO) { + pcms->vmport = ON_OFF_AUTO_ON; + } + + /* init basic PC hardware */ + pc_basic_device_init(pcms, isa_bus, x86ms->gsi, &rtc_state, !mc->no_floppy, + 0xff0104); + + /* connect pm stuff to lpc */ + ich9_lpc_pm_init(lpc, x86_machine_is_smm_enabled(x86ms)); + + if (pcms->sata_enabled) { + /* ahci and SATA device, for q35 1 ahci controller is built-in */ + ahci = pci_create_simple_multifunction(host_bus, + PCI_DEVFN(ICH9_SATA1_DEV, + ICH9_SATA1_FUNC), + true, "ich9-ahci"); + idebus[0] = qdev_get_child_bus(&ahci->qdev, "ide.0"); + idebus[1] = qdev_get_child_bus(&ahci->qdev, "ide.1"); + g_assert(MAX_SATA_PORTS == ahci_get_num_ports(ahci)); + ide_drive_get(hd, ahci_get_num_ports(ahci)); + ahci_ide_create_devs(ahci, hd); + } else { + idebus[0] = idebus[1] = NULL; + } + + if (machine_usb(machine)) { + /* Should we create 6 UHCI according to ich9 spec? */ + ehci_create_ich9_with_companions(host_bus, 0x1d); + } + + if (pcms->smbus_enabled) { + /* TODO: Populate SPD eeprom data. */ + pcms->smbus = ich9_smb_init(host_bus, + PCI_DEVFN(ICH9_SMB_DEV, ICH9_SMB_FUNC), + 0xb100); + smbus_eeprom_init(pcms->smbus, 8, NULL, 0); + } + + pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state); + + /* the rest devices to which pci devfn is automatically assigned */ + pc_vga_init(isa_bus, host_bus); + pc_nic_init(pcmc, isa_bus, host_bus); + + if (machine->nvdimms_state->is_enabled) { + nvdimm_init_acpi_state(machine->nvdimms_state, system_io, + x86_nvdimm_acpi_dsmio, + x86ms->fw_cfg, OBJECT(pcms)); + } +} + +#define DEFINE_Q35_MACHINE(suffix, name, compatfn, optionfn) \ + static void pc_init_##suffix(MachineState *machine) \ + { \ + void (*compat)(MachineState *m) = (compatfn); \ + if (compat) { \ + compat(machine); \ + } \ + pc_q35_init(machine); \ + } \ + DEFINE_PC_MACHINE(suffix, name, pc_init_##suffix, optionfn) + + +static void pc_q35_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + pcmc->default_nic_model = "e1000e"; + pcmc->pci_root_uid = 0; + + m->family = "pc_q35"; + m->desc = "Standard PC (Q35 + ICH9, 2009)"; + m->units_per_default_bus = 1; + m->default_machine_opts = "firmware=bios-256k.bin"; + m->default_display = "std"; + m->default_kernel_irqchip_split = false; + m->no_floppy = 1; + machine_class_allow_dynamic_sysbus_dev(m, TYPE_AMD_IOMMU_DEVICE); + machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE); + machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE); + machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); + m->max_cpus = 288; +} + +static void pc_q35_6_2_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + pc_q35_machine_options(m); + m->alias = "q35"; + pcmc->default_cpu_version = 1; +} + +DEFINE_Q35_MACHINE(v6_2, "pc-q35-6.2", NULL, + pc_q35_6_2_machine_options); + +static void pc_q35_6_1_machine_options(MachineClass *m) +{ + pc_q35_6_2_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_6_1, hw_compat_6_1_len); + compat_props_add(m->compat_props, pc_compat_6_1, pc_compat_6_1_len); + m->smp_props.prefer_sockets = true; +} + +DEFINE_Q35_MACHINE(v6_1, "pc-q35-6.1", NULL, + pc_q35_6_1_machine_options); + +static void pc_q35_6_0_machine_options(MachineClass *m) +{ + pc_q35_6_1_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_6_0, hw_compat_6_0_len); + compat_props_add(m->compat_props, pc_compat_6_0, pc_compat_6_0_len); +} + +DEFINE_Q35_MACHINE(v6_0, "pc-q35-6.0", NULL, + pc_q35_6_0_machine_options); + +static void pc_q35_5_2_machine_options(MachineClass *m) +{ + pc_q35_6_0_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_5_2, hw_compat_5_2_len); + compat_props_add(m->compat_props, pc_compat_5_2, pc_compat_5_2_len); +} + +DEFINE_Q35_MACHINE(v5_2, "pc-q35-5.2", NULL, + pc_q35_5_2_machine_options); + +static void pc_q35_5_1_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_q35_5_2_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_5_1, hw_compat_5_1_len); + compat_props_add(m->compat_props, pc_compat_5_1, pc_compat_5_1_len); + pcmc->kvmclock_create_always = false; + pcmc->pci_root_uid = 1; +} + +DEFINE_Q35_MACHINE(v5_1, "pc-q35-5.1", NULL, + pc_q35_5_1_machine_options); + +static void pc_q35_5_0_machine_options(MachineClass *m) +{ + pc_q35_5_1_machine_options(m); + m->alias = NULL; + m->numa_mem_supported = true; + compat_props_add(m->compat_props, hw_compat_5_0, hw_compat_5_0_len); + compat_props_add(m->compat_props, pc_compat_5_0, pc_compat_5_0_len); + m->auto_enable_numa_with_memdev = false; +} + +DEFINE_Q35_MACHINE(v5_0, "pc-q35-5.0", NULL, + pc_q35_5_0_machine_options); + +static void pc_q35_4_2_machine_options(MachineClass *m) +{ + pc_q35_5_0_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_4_2, hw_compat_4_2_len); + compat_props_add(m->compat_props, pc_compat_4_2, pc_compat_4_2_len); +} + +DEFINE_Q35_MACHINE(v4_2, "pc-q35-4.2", NULL, + pc_q35_4_2_machine_options); + +static void pc_q35_4_1_machine_options(MachineClass *m) +{ + pc_q35_4_2_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_4_1, hw_compat_4_1_len); + compat_props_add(m->compat_props, pc_compat_4_1, pc_compat_4_1_len); +} + +DEFINE_Q35_MACHINE(v4_1, "pc-q35-4.1", NULL, + pc_q35_4_1_machine_options); + +static void pc_q35_4_0_1_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + pc_q35_4_1_machine_options(m); + m->alias = NULL; + pcmc->default_cpu_version = CPU_VERSION_LEGACY; + /* + * This is the default machine for the 4.0-stable branch. It is basically + * a 4.0 that doesn't use split irqchip by default. It MUST hence apply the + * 4.0 compat props. + */ + compat_props_add(m->compat_props, hw_compat_4_0, hw_compat_4_0_len); + compat_props_add(m->compat_props, pc_compat_4_0, pc_compat_4_0_len); +} + +DEFINE_Q35_MACHINE(v4_0_1, "pc-q35-4.0.1", NULL, + pc_q35_4_0_1_machine_options); + +static void pc_q35_4_0_machine_options(MachineClass *m) +{ + pc_q35_4_0_1_machine_options(m); + m->default_kernel_irqchip_split = true; + m->alias = NULL; + /* Compat props are applied by the 4.0.1 machine */ +} + +DEFINE_Q35_MACHINE(v4_0, "pc-q35-4.0", NULL, + pc_q35_4_0_machine_options); + +static void pc_q35_3_1_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_q35_4_0_machine_options(m); + m->default_kernel_irqchip_split = false; + pcmc->do_not_add_smb_acpi = true; + m->smbus_no_migration_support = true; + m->alias = NULL; + pcmc->pvh_enabled = false; + compat_props_add(m->compat_props, hw_compat_3_1, hw_compat_3_1_len); + compat_props_add(m->compat_props, pc_compat_3_1, pc_compat_3_1_len); +} + +DEFINE_Q35_MACHINE(v3_1, "pc-q35-3.1", NULL, + pc_q35_3_1_machine_options); + +static void pc_q35_3_0_machine_options(MachineClass *m) +{ + pc_q35_3_1_machine_options(m); + compat_props_add(m->compat_props, hw_compat_3_0, hw_compat_3_0_len); + compat_props_add(m->compat_props, pc_compat_3_0, pc_compat_3_0_len); +} + +DEFINE_Q35_MACHINE(v3_0, "pc-q35-3.0", NULL, + pc_q35_3_0_machine_options); + +static void pc_q35_2_12_machine_options(MachineClass *m) +{ + pc_q35_3_0_machine_options(m); + compat_props_add(m->compat_props, hw_compat_2_12, hw_compat_2_12_len); + compat_props_add(m->compat_props, pc_compat_2_12, pc_compat_2_12_len); +} + +DEFINE_Q35_MACHINE(v2_12, "pc-q35-2.12", NULL, + pc_q35_2_12_machine_options); + +static void pc_q35_2_11_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_q35_2_12_machine_options(m); + pcmc->default_nic_model = "e1000"; + compat_props_add(m->compat_props, hw_compat_2_11, hw_compat_2_11_len); + compat_props_add(m->compat_props, pc_compat_2_11, pc_compat_2_11_len); +} + +DEFINE_Q35_MACHINE(v2_11, "pc-q35-2.11", NULL, + pc_q35_2_11_machine_options); + +static void pc_q35_2_10_machine_options(MachineClass *m) +{ + pc_q35_2_11_machine_options(m); + compat_props_add(m->compat_props, hw_compat_2_10, hw_compat_2_10_len); + compat_props_add(m->compat_props, pc_compat_2_10, pc_compat_2_10_len); + m->auto_enable_numa_with_memhp = false; +} + +DEFINE_Q35_MACHINE(v2_10, "pc-q35-2.10", NULL, + pc_q35_2_10_machine_options); + +static void pc_q35_2_9_machine_options(MachineClass *m) +{ + pc_q35_2_10_machine_options(m); + compat_props_add(m->compat_props, hw_compat_2_9, hw_compat_2_9_len); + compat_props_add(m->compat_props, pc_compat_2_9, pc_compat_2_9_len); +} + +DEFINE_Q35_MACHINE(v2_9, "pc-q35-2.9", NULL, + pc_q35_2_9_machine_options); + +static void pc_q35_2_8_machine_options(MachineClass *m) +{ + pc_q35_2_9_machine_options(m); + compat_props_add(m->compat_props, hw_compat_2_8, hw_compat_2_8_len); + compat_props_add(m->compat_props, pc_compat_2_8, pc_compat_2_8_len); +} + +DEFINE_Q35_MACHINE(v2_8, "pc-q35-2.8", NULL, + pc_q35_2_8_machine_options); + +static void pc_q35_2_7_machine_options(MachineClass *m) +{ + pc_q35_2_8_machine_options(m); + m->max_cpus = 255; + compat_props_add(m->compat_props, hw_compat_2_7, hw_compat_2_7_len); + compat_props_add(m->compat_props, pc_compat_2_7, pc_compat_2_7_len); +} + +DEFINE_Q35_MACHINE(v2_7, "pc-q35-2.7", NULL, + pc_q35_2_7_machine_options); + +static void pc_q35_2_6_machine_options(MachineClass *m) +{ + X86MachineClass *x86mc = X86_MACHINE_CLASS(m); + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_q35_2_7_machine_options(m); + pcmc->legacy_cpu_hotplug = true; + x86mc->fwcfg_dma_enabled = false; + compat_props_add(m->compat_props, hw_compat_2_6, hw_compat_2_6_len); + compat_props_add(m->compat_props, pc_compat_2_6, pc_compat_2_6_len); +} + +DEFINE_Q35_MACHINE(v2_6, "pc-q35-2.6", NULL, + pc_q35_2_6_machine_options); + +static void pc_q35_2_5_machine_options(MachineClass *m) +{ + X86MachineClass *x86mc = X86_MACHINE_CLASS(m); + + pc_q35_2_6_machine_options(m); + x86mc->save_tsc_khz = false; + m->legacy_fw_cfg_order = 1; + compat_props_add(m->compat_props, hw_compat_2_5, hw_compat_2_5_len); + compat_props_add(m->compat_props, pc_compat_2_5, pc_compat_2_5_len); +} + +DEFINE_Q35_MACHINE(v2_5, "pc-q35-2.5", NULL, + pc_q35_2_5_machine_options); + +static void pc_q35_2_4_machine_options(MachineClass *m) +{ + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + pc_q35_2_5_machine_options(m); + m->hw_version = "2.4.0"; + pcmc->broken_reserved_end = true; + compat_props_add(m->compat_props, hw_compat_2_4, hw_compat_2_4_len); + compat_props_add(m->compat_props, pc_compat_2_4, pc_compat_2_4_len); +} + +DEFINE_Q35_MACHINE(v2_4, "pc-q35-2.4", NULL, + pc_q35_2_4_machine_options); diff --git a/hw/i386/pc_sysfw.c b/hw/i386/pc_sysfw.c new file mode 100644 index 000000000..c8b17af95 --- /dev/null +++ b/hw/i386/pc_sysfw.c @@ -0,0 +1,262 @@ +/* + * QEMU PC System Firmware + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2011-2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qapi/error.h" +#include "sysemu/block-backend.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "qemu/units.h" +#include "hw/sysbus.h" +#include "hw/i386/x86.h" +#include "hw/i386/pc.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "hw/block/flash.h" +#include "sysemu/kvm.h" +#include "sev.h" + +#define FLASH_SECTOR_SIZE 4096 + +static void pc_isa_bios_init(MemoryRegion *rom_memory, + MemoryRegion *flash_mem, + int ram_size) +{ + int isa_bios_size; + MemoryRegion *isa_bios; + uint64_t flash_size; + void *flash_ptr, *isa_bios_ptr; + + flash_size = memory_region_size(flash_mem); + + /* map the last 128KB of the BIOS in ISA space */ + isa_bios_size = MIN(flash_size, 128 * KiB); + isa_bios = g_malloc(sizeof(*isa_bios)); + memory_region_init_ram(isa_bios, NULL, "isa-bios", isa_bios_size, + &error_fatal); + memory_region_add_subregion_overlap(rom_memory, + 0x100000 - isa_bios_size, + isa_bios, + 1); + + /* copy ISA rom image from top of flash memory */ + flash_ptr = memory_region_get_ram_ptr(flash_mem); + isa_bios_ptr = memory_region_get_ram_ptr(isa_bios); + memcpy(isa_bios_ptr, + ((uint8_t*)flash_ptr) + (flash_size - isa_bios_size), + isa_bios_size); + + memory_region_set_readonly(isa_bios, true); +} + +static PFlashCFI01 *pc_pflash_create(PCMachineState *pcms, + const char *name, + const char *alias_prop_name) +{ + DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01); + + qdev_prop_set_uint64(dev, "sector-length", FLASH_SECTOR_SIZE); + qdev_prop_set_uint8(dev, "width", 1); + qdev_prop_set_string(dev, "name", name); + object_property_add_child(OBJECT(pcms), name, OBJECT(dev)); + object_property_add_alias(OBJECT(pcms), alias_prop_name, + OBJECT(dev), "drive"); + /* + * The returned reference is tied to the child property and + * will be removed with object_unparent. + */ + object_unref(OBJECT(dev)); + return PFLASH_CFI01(dev); +} + +void pc_system_flash_create(PCMachineState *pcms) +{ + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + + if (pcmc->pci_enabled) { + pcms->flash[0] = pc_pflash_create(pcms, "system.flash0", + "pflash0"); + pcms->flash[1] = pc_pflash_create(pcms, "system.flash1", + "pflash1"); + } +} + +void pc_system_flash_cleanup_unused(PCMachineState *pcms) +{ + char *prop_name; + int i; + Object *dev_obj; + + assert(PC_MACHINE_GET_CLASS(pcms)->pci_enabled); + + for (i = 0; i < ARRAY_SIZE(pcms->flash); i++) { + dev_obj = OBJECT(pcms->flash[i]); + if (!object_property_get_bool(dev_obj, "realized", &error_abort)) { + prop_name = g_strdup_printf("pflash%d", i); + object_property_del(OBJECT(pcms), prop_name); + g_free(prop_name); + object_unparent(dev_obj); + pcms->flash[i] = NULL; + } + } +} + +/* + * Map the pcms->flash[] from 4GiB downward, and realize. + * Map them in descending order, i.e. pcms->flash[0] at the top, + * without gaps. + * Stop at the first pcms->flash[0] lacking a block backend. + * Set each flash's size from its block backend. Fatal error if the + * size isn't a non-zero multiple of 4KiB, or the total size exceeds + * pcms->max_fw_size. + * + * If pcms->flash[0] has a block backend, its memory is passed to + * pc_isa_bios_init(). Merging several flash devices for isa-bios is + * not supported. + */ +static void pc_system_flash_map(PCMachineState *pcms, + MemoryRegion *rom_memory) +{ + hwaddr total_size = 0; + int i; + BlockBackend *blk; + int64_t size; + PFlashCFI01 *system_flash; + MemoryRegion *flash_mem; + void *flash_ptr; + int flash_size; + int ret; + + assert(PC_MACHINE_GET_CLASS(pcms)->pci_enabled); + + for (i = 0; i < ARRAY_SIZE(pcms->flash); i++) { + system_flash = pcms->flash[i]; + blk = pflash_cfi01_get_blk(system_flash); + if (!blk) { + break; + } + size = blk_getlength(blk); + if (size < 0) { + error_report("can't get size of block device %s: %s", + blk_name(blk), strerror(-size)); + exit(1); + } + if (size == 0 || !QEMU_IS_ALIGNED(size, FLASH_SECTOR_SIZE)) { + error_report("system firmware block device %s has invalid size " + "%" PRId64, + blk_name(blk), size); + info_report("its size must be a non-zero multiple of 0x%x", + FLASH_SECTOR_SIZE); + exit(1); + } + if ((hwaddr)size != size + || total_size > HWADDR_MAX - size + || total_size + size > pcms->max_fw_size) { + error_report("combined size of system firmware exceeds " + "%" PRIu64 " bytes", + pcms->max_fw_size); + exit(1); + } + + total_size += size; + qdev_prop_set_uint32(DEVICE(system_flash), "num-blocks", + size / FLASH_SECTOR_SIZE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(system_flash), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(system_flash), 0, + 0x100000000ULL - total_size); + + if (i == 0) { + flash_mem = pflash_cfi01_get_memory(system_flash); + pc_isa_bios_init(rom_memory, flash_mem, size); + + /* Encrypt the pflash boot ROM */ + if (sev_enabled()) { + flash_ptr = memory_region_get_ram_ptr(flash_mem); + flash_size = memory_region_size(flash_mem); + /* + * OVMF places a GUIDed structures in the flash, so + * search for them + */ + pc_system_parse_ovmf_flash(flash_ptr, flash_size); + + ret = sev_es_save_reset_vector(flash_ptr, flash_size); + if (ret) { + error_report("failed to locate and/or save reset vector"); + exit(1); + } + + sev_encrypt_flash(flash_ptr, flash_size, &error_fatal); + } + } + } +} + +void pc_system_firmware_init(PCMachineState *pcms, + MemoryRegion *rom_memory) +{ + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + int i; + BlockBackend *pflash_blk[ARRAY_SIZE(pcms->flash)]; + + if (!pcmc->pci_enabled) { + x86_bios_rom_init(MACHINE(pcms), "bios.bin", rom_memory, true); + return; + } + + /* Map legacy -drive if=pflash to machine properties */ + for (i = 0; i < ARRAY_SIZE(pcms->flash); i++) { + pflash_cfi01_legacy_drive(pcms->flash[i], + drive_get(IF_PFLASH, 0, i)); + pflash_blk[i] = pflash_cfi01_get_blk(pcms->flash[i]); + } + + /* Reject gaps */ + for (i = 1; i < ARRAY_SIZE(pcms->flash); i++) { + if (pflash_blk[i] && !pflash_blk[i - 1]) { + error_report("pflash%d requires pflash%d", i, i - 1); + exit(1); + } + } + + if (!pflash_blk[0]) { + /* Machine property pflash0 not set, use ROM mode */ + x86_bios_rom_init(MACHINE(pcms), "bios.bin", rom_memory, false); + } else { + if (kvm_enabled() && !kvm_readonly_mem_enabled()) { + /* + * Older KVM cannot execute from device memory. So, flash + * memory cannot be used unless the readonly memory kvm + * capability is present. + */ + error_report("pflash with kvm requires KVM readonly memory support"); + exit(1); + } + + pc_system_flash_map(pcms, rom_memory); + } + + pc_system_flash_cleanup_unused(pcms); +} diff --git a/hw/i386/pc_sysfw_ovmf-stubs.c b/hw/i386/pc_sysfw_ovmf-stubs.c new file mode 100644 index 000000000..aabe78b27 --- /dev/null +++ b/hw/i386/pc_sysfw_ovmf-stubs.c @@ -0,0 +1,26 @@ +/* + * QEMU PC System Firmware (OVMF stubs) + * + * Copyright (c) 2021 Red Hat, Inc. + * + * Author: + * Philippe Mathieu-Daudé + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/i386/pc.h" + +bool pc_system_ovmf_table_find(const char *entry, uint8_t **data, int *data_len) +{ + g_assert_not_reached(); +} + +void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size) +{ + g_assert_not_reached(); +} diff --git a/hw/i386/pc_sysfw_ovmf.c b/hw/i386/pc_sysfw_ovmf.c new file mode 100644 index 000000000..f4dd92c58 --- /dev/null +++ b/hw/i386/pc_sysfw_ovmf.c @@ -0,0 +1,151 @@ +/* + * QEMU PC System Firmware (OVMF specific) + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2011-2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "cpu.h" + +#define OVMF_TABLE_FOOTER_GUID "96b582de-1fb2-45f7-baea-a366c55a082d" + +static bool ovmf_flash_parsed; +static uint8_t *ovmf_table; +static int ovmf_table_len; + +void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size) +{ + uint8_t *ptr; + QemuUUID guid; + int tot_len; + + /* should only be called once */ + if (ovmf_flash_parsed) { + return; + } + + ovmf_flash_parsed = true; + + if (flash_size < TARGET_PAGE_SIZE) { + return; + } + + /* + * if this is OVMF there will be a table footer + * guid 48 bytes before the end of the flash file. If it's + * not found, silently abort the flash parsing. + */ + qemu_uuid_parse(OVMF_TABLE_FOOTER_GUID, &guid); + guid = qemu_uuid_bswap(guid); /* guids are LE */ + ptr = flash_ptr + flash_size - 48; + if (!qemu_uuid_is_equal((QemuUUID *)ptr, &guid)) { + return; + } + + /* if found, just before is two byte table length */ + ptr -= sizeof(uint16_t); + tot_len = le16_to_cpu(*(uint16_t *)ptr) - sizeof(guid) - sizeof(uint16_t); + + if (tot_len <= 0) { + return; + } + + ovmf_table = g_malloc(tot_len); + ovmf_table_len = tot_len; + + /* + * ptr is the foot of the table, so copy it all to the newly + * allocated ovmf_table and then set the ovmf_table pointer + * to the table foot + */ + memcpy(ovmf_table, ptr - tot_len, tot_len); + ovmf_table += tot_len; +} + +/** + * pc_system_ovmf_table_find - Find the data associated with an entry in OVMF's + * reset vector GUIDed table. + * + * @entry: GUID string of the entry to lookup + * @data: Filled with a pointer to the entry's value (if not NULL) + * @data_len: Filled with the length of the entry's value (if not NULL). Pass + * NULL here if the length of data is known. + * + * Return: true if the entry was found in the OVMF table; false otherwise. + */ +bool pc_system_ovmf_table_find(const char *entry, uint8_t **data, + int *data_len) +{ + uint8_t *ptr = ovmf_table; + int tot_len = ovmf_table_len; + QemuUUID entry_guid; + + assert(ovmf_flash_parsed); + + if (qemu_uuid_parse(entry, &entry_guid) < 0) { + return false; + } + + if (!ptr) { + return false; + } + + entry_guid = qemu_uuid_bswap(entry_guid); /* guids are LE */ + while (tot_len >= sizeof(QemuUUID) + sizeof(uint16_t)) { + int len; + QemuUUID *guid; + + /* + * The data structure is + * arbitrary length data + * 2 byte length of entire entry + * 16 byte guid + */ + guid = (QemuUUID *)(ptr - sizeof(QemuUUID)); + len = le16_to_cpu(*(uint16_t *)(ptr - sizeof(QemuUUID) - + sizeof(uint16_t))); + + /* + * just in case the table is corrupt, wouldn't want to spin in + * the zero case + */ + if (len < sizeof(QemuUUID) + sizeof(uint16_t)) { + return false; + } else if (len > tot_len) { + return false; + } + + ptr -= len; + tot_len -= len; + if (qemu_uuid_is_equal(guid, &entry_guid)) { + if (data) { + *data = ptr; + } + if (data_len) { + *data_len = len - sizeof(QemuUUID) - sizeof(uint16_t); + } + return true; + } + } + return false; +} diff --git a/hw/i386/port92.c b/hw/i386/port92.c new file mode 100644 index 000000000..e1379a4f9 --- /dev/null +++ b/hw/i386/port92.c @@ -0,0 +1,127 @@ +/* + * QEMU I/O port 0x92 (System Control Port A, to handle Fast Gate A20) + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * SPDX-License-Identifier: MIT + */ + +#include "qemu/osdep.h" +#include "sysemu/runstate.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/i386/pc.h" +#include "trace.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(Port92State, PORT92) + +struct Port92State { + ISADevice parent_obj; + + MemoryRegion io; + uint8_t outport; + qemu_irq a20_out; +}; + +static void port92_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + Port92State *s = opaque; + int oldval = s->outport; + + trace_port92_write(val); + s->outport = val; + qemu_set_irq(s->a20_out, (val >> 1) & 1); + if ((val & 1) && !(oldval & 1)) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } +} + +static uint64_t port92_read(void *opaque, hwaddr addr, + unsigned size) +{ + Port92State *s = opaque; + uint32_t ret; + + ret = s->outport; + trace_port92_read(ret); + + return ret; +} + +static const VMStateDescription vmstate_port92_isa = { + .name = "port92", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(outport, Port92State), + VMSTATE_END_OF_LIST() + } +}; + +static void port92_reset(DeviceState *d) +{ + Port92State *s = PORT92(d); + + s->outport &= ~1; +} + +static const MemoryRegionOps port92_ops = { + .read = port92_read, + .write = port92_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void port92_initfn(Object *obj) +{ + Port92State *s = PORT92(obj); + + memory_region_init_io(&s->io, OBJECT(s), &port92_ops, s, "port92", 1); + + s->outport = 0; + + qdev_init_gpio_out_named(DEVICE(obj), &s->a20_out, PORT92_A20_LINE, 1); +} + +static void port92_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + Port92State *s = PORT92(dev); + + isa_register_ioport(isadev, &s->io, 0x92); +} + +static void port92_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = port92_realizefn; + dc->reset = port92_reset; + dc->vmsd = &vmstate_port92_isa; + /* + * Reason: unlike ordinary ISA devices, this one needs additional + * wiring: its A20 output line needs to be wired up with + * qdev_connect_gpio_out_named(). + */ + dc->user_creatable = false; +} + +static const TypeInfo port92_info = { + .name = TYPE_PORT92, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(Port92State), + .instance_init = port92_initfn, + .class_init = port92_class_initfn, +}; + +static void port92_register_types(void) +{ + type_register_static(&port92_info); +} + +type_init(port92_register_types) diff --git a/hw/i386/sgx-epc.c b/hw/i386/sgx-epc.c new file mode 100644 index 000000000..e508827e7 --- /dev/null +++ b/hw/i386/sgx-epc.c @@ -0,0 +1,185 @@ +/* + * SGX EPC device + * + * Copyright (C) 2019 Intel Corporation + * + * Authors: + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "hw/mem/memory-device.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "target/i386/cpu.h" +#include "exec/address-spaces.h" + +static Property sgx_epc_properties[] = { + DEFINE_PROP_UINT64(SGX_EPC_ADDR_PROP, SGXEPCDevice, addr, 0), + DEFINE_PROP_LINK(SGX_EPC_MEMDEV_PROP, SGXEPCDevice, hostmem, + TYPE_MEMORY_BACKEND_EPC, HostMemoryBackendEpc *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sgx_epc_get_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Error *local_err = NULL; + uint64_t value; + + value = memory_device_get_region_size(MEMORY_DEVICE(obj), &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + visit_type_uint64(v, name, &value, errp); +} + +static void sgx_epc_init(Object *obj) +{ + object_property_add(obj, SGX_EPC_SIZE_PROP, "uint64", sgx_epc_get_size, + NULL, NULL, NULL); +} + +static void sgx_epc_realize(DeviceState *dev, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + X86MachineState *x86ms = X86_MACHINE(pcms); + MemoryDeviceState *md = MEMORY_DEVICE(dev); + SGXEPCState *sgx_epc = &pcms->sgx_epc; + SGXEPCDevice *epc = SGX_EPC(dev); + HostMemoryBackend *hostmem; + const char *path; + + if (x86ms->boot_cpus != 0) { + error_setg(errp, "'" TYPE_SGX_EPC "' can't be created after vCPUs," + "e.g. via -device"); + return; + } + + if (!epc->hostmem) { + error_setg(errp, "'" SGX_EPC_MEMDEV_PROP "' property is not set"); + return; + } + hostmem = MEMORY_BACKEND(epc->hostmem); + if (host_memory_backend_is_mapped(hostmem)) { + path = object_get_canonical_path_component(OBJECT(hostmem)); + error_setg(errp, "can't use already busy memdev: %s", path); + return; + } + + epc->addr = sgx_epc->base + sgx_epc->size; + + memory_region_add_subregion(&sgx_epc->mr, epc->addr - sgx_epc->base, + host_memory_backend_get_memory(hostmem)); + + host_memory_backend_set_mapped(hostmem, true); + + sgx_epc->sections = g_renew(SGXEPCDevice *, sgx_epc->sections, + sgx_epc->nr_sections + 1); + sgx_epc->sections[sgx_epc->nr_sections++] = epc; + + sgx_epc->size += memory_device_get_region_size(md, errp); +} + +static void sgx_epc_unrealize(DeviceState *dev) +{ + SGXEPCDevice *epc = SGX_EPC(dev); + HostMemoryBackend *hostmem = MEMORY_BACKEND(epc->hostmem); + + host_memory_backend_set_mapped(hostmem, false); +} + +static uint64_t sgx_epc_md_get_addr(const MemoryDeviceState *md) +{ + const SGXEPCDevice *epc = SGX_EPC(md); + + return epc->addr; +} + +static void sgx_epc_md_set_addr(MemoryDeviceState *md, uint64_t addr, + Error **errp) +{ + object_property_set_uint(OBJECT(md), SGX_EPC_ADDR_PROP, addr, errp); +} + +static uint64_t sgx_epc_md_get_plugged_size(const MemoryDeviceState *md, + Error **errp) +{ + return 0; +} + +static MemoryRegion *sgx_epc_md_get_memory_region(MemoryDeviceState *md, + Error **errp) +{ + SGXEPCDevice *epc = SGX_EPC(md); + HostMemoryBackend *hostmem; + + if (!epc->hostmem) { + error_setg(errp, "'" SGX_EPC_MEMDEV_PROP "' property must be set"); + return NULL; + } + + hostmem = MEMORY_BACKEND(epc->hostmem); + return host_memory_backend_get_memory(hostmem); +} + +static void sgx_epc_md_fill_device_info(const MemoryDeviceState *md, + MemoryDeviceInfo *info) +{ + SgxEPCDeviceInfo *se = g_new0(SgxEPCDeviceInfo, 1); + SGXEPCDevice *epc = SGX_EPC(md); + + se->memaddr = epc->addr; + se->size = object_property_get_uint(OBJECT(epc), SGX_EPC_SIZE_PROP, + NULL); + se->memdev = object_get_canonical_path(OBJECT(epc->hostmem)); + + info->u.sgx_epc.data = se; + info->type = MEMORY_DEVICE_INFO_KIND_SGX_EPC; +} + +static void sgx_epc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc); + + dc->hotpluggable = false; + dc->realize = sgx_epc_realize; + dc->unrealize = sgx_epc_unrealize; + dc->desc = "SGX EPC section"; + dc->user_creatable = false; + device_class_set_props(dc, sgx_epc_properties); + + mdc->get_addr = sgx_epc_md_get_addr; + mdc->set_addr = sgx_epc_md_set_addr; + mdc->get_plugged_size = sgx_epc_md_get_plugged_size; + mdc->get_memory_region = sgx_epc_md_get_memory_region; + mdc->fill_device_info = sgx_epc_md_fill_device_info; +} + +static TypeInfo sgx_epc_info = { + .name = TYPE_SGX_EPC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SGXEPCDevice), + .instance_init = sgx_epc_init, + .class_init = sgx_epc_class_init, + .class_size = sizeof(DeviceClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_MEMORY_DEVICE }, + { } + }, +}; + +static void sgx_epc_register_types(void) +{ + type_register_static(&sgx_epc_info); +} + +type_init(sgx_epc_register_types) diff --git a/hw/i386/sgx-stub.c b/hw/i386/sgx-stub.c new file mode 100644 index 000000000..c9b379e66 --- /dev/null +++ b/hw/i386/sgx-stub.c @@ -0,0 +1,34 @@ +#include "qemu/osdep.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-target.h" + +SGXInfo *qmp_query_sgx(Error **errp) +{ + error_setg(errp, "SGX support is not compiled in"); + return NULL; +} + +SGXInfo *qmp_query_sgx_capabilities(Error **errp) +{ + error_setg(errp, "SGX support is not compiled in"); + return NULL; +} + +void hmp_info_sgx(Monitor *mon, const QDict *qdict) +{ + monitor_printf(mon, "SGX is not available in this QEMU\n"); +} + +void pc_machine_init_sgx_epc(PCMachineState *pcms) +{ + memset(&pcms->sgx_epc, 0, sizeof(SGXEPCState)); +} + +bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size) +{ + g_assert_not_reached(); +} diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c new file mode 100644 index 000000000..8fef3dd8f --- /dev/null +++ b/hw/i386/sgx.c @@ -0,0 +1,243 @@ +/* + * SGX common code + * + * Copyright (C) 2021 Intel Corporation + * + * Authors: + * Yang Zhong + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "hw/mem/memory-device.h" +#include "monitor/qdev.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-target.h" +#include "exec/address-spaces.h" +#include "sysemu/hw_accel.h" +#include "sysemu/reset.h" +#include + +#define SGX_MAX_EPC_SECTIONS 8 +#define SGX_CPUID_EPC_INVALID 0x0 + +/* A valid EPC section. */ +#define SGX_CPUID_EPC_SECTION 0x1 +#define SGX_CPUID_EPC_MASK 0xF + +#define SGX_MAGIC 0xA4 +#define SGX_IOC_VEPC_REMOVE_ALL _IO(SGX_MAGIC, 0x04) + +#define RETRY_NUM 2 + +static uint64_t sgx_calc_section_metric(uint64_t low, uint64_t high) +{ + return (low & MAKE_64BIT_MASK(12, 20)) + + ((high & MAKE_64BIT_MASK(0, 20)) << 32); +} + +static uint64_t sgx_calc_host_epc_section_size(void) +{ + uint32_t i, type; + uint32_t eax, ebx, ecx, edx; + uint64_t size = 0; + + for (i = 0; i < SGX_MAX_EPC_SECTIONS; i++) { + host_cpuid(0x12, i + 2, &eax, &ebx, &ecx, &edx); + + type = eax & SGX_CPUID_EPC_MASK; + if (type == SGX_CPUID_EPC_INVALID) { + break; + } + + if (type != SGX_CPUID_EPC_SECTION) { + break; + } + + size += sgx_calc_section_metric(ecx, edx); + } + + return size; +} + +static void sgx_epc_reset(void *opaque) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + HostMemoryBackend *hostmem; + SGXEPCDevice *epc; + int failures; + int fd, i, j, r; + static bool warned = false; + + /* + * The second pass is needed to remove SECS pages that could not + * be removed during the first. + */ + for (i = 0; i < RETRY_NUM; i++) { + failures = 0; + for (j = 0; j < pcms->sgx_epc.nr_sections; j++) { + epc = pcms->sgx_epc.sections[j]; + hostmem = MEMORY_BACKEND(epc->hostmem); + fd = memory_region_get_fd(host_memory_backend_get_memory(hostmem)); + + r = ioctl(fd, SGX_IOC_VEPC_REMOVE_ALL); + if (r == -ENOTTY && !warned) { + warned = true; + warn_report("kernel does not support SGX_IOC_VEPC_REMOVE_ALL"); + warn_report("SGX might operate incorrectly in the guest after reset"); + break; + } else if (r > 0) { + /* SECS pages remain */ + failures++; + if (i == 1) { + error_report("cannot reset vEPC section %d", j); + } + } + } + if (!failures) { + break; + } + } +} + +SGXInfo *qmp_query_sgx_capabilities(Error **errp) +{ + SGXInfo *info = NULL; + uint32_t eax, ebx, ecx, edx; + + int fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); + if (fd < 0) { + error_setg(errp, "SGX is not enabled in KVM"); + return NULL; + } + + info = g_new0(SGXInfo, 1); + host_cpuid(0x7, 0, &eax, &ebx, &ecx, &edx); + + info->sgx = ebx & (1U << 2) ? true : false; + info->flc = ecx & (1U << 30) ? true : false; + + host_cpuid(0x12, 0, &eax, &ebx, &ecx, &edx); + info->sgx1 = eax & (1U << 0) ? true : false; + info->sgx2 = eax & (1U << 1) ? true : false; + + info->section_size = sgx_calc_host_epc_section_size(); + + close(fd); + + return info; +} + +SGXInfo *qmp_query_sgx(Error **errp) +{ + SGXInfo *info = NULL; + X86MachineState *x86ms; + PCMachineState *pcms = + (PCMachineState *)object_dynamic_cast(qdev_get_machine(), + TYPE_PC_MACHINE); + if (!pcms) { + error_setg(errp, "SGX is only supported on PC machines"); + return NULL; + } + + x86ms = X86_MACHINE(pcms); + if (!x86ms->sgx_epc_list) { + error_setg(errp, "No EPC regions defined, SGX not available"); + return NULL; + } + + SGXEPCState *sgx_epc = &pcms->sgx_epc; + info = g_new0(SGXInfo, 1); + + info->sgx = true; + info->sgx1 = true; + info->sgx2 = true; + info->flc = true; + info->section_size = sgx_epc->size; + + return info; +} + +void hmp_info_sgx(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + g_autoptr(SGXInfo) info = qmp_query_sgx(&err); + + if (err) { + error_report_err(err); + return; + } + monitor_printf(mon, "SGX support: %s\n", + info->sgx ? "enabled" : "disabled"); + monitor_printf(mon, "SGX1 support: %s\n", + info->sgx1 ? "enabled" : "disabled"); + monitor_printf(mon, "SGX2 support: %s\n", + info->sgx2 ? "enabled" : "disabled"); + monitor_printf(mon, "FLC support: %s\n", + info->flc ? "enabled" : "disabled"); + monitor_printf(mon, "size: %" PRIu64 "\n", + info->section_size); +} + +bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + SGXEPCDevice *epc; + + if (pcms->sgx_epc.size == 0 || pcms->sgx_epc.nr_sections <= section_nr) { + return true; + } + + epc = pcms->sgx_epc.sections[section_nr]; + + *addr = epc->addr; + *size = memory_device_get_region_size(MEMORY_DEVICE(epc), &error_fatal); + + return false; +} + +void pc_machine_init_sgx_epc(PCMachineState *pcms) +{ + SGXEPCState *sgx_epc = &pcms->sgx_epc; + X86MachineState *x86ms = X86_MACHINE(pcms); + SgxEPCList *list = NULL; + Object *obj; + + memset(sgx_epc, 0, sizeof(SGXEPCState)); + if (!x86ms->sgx_epc_list) { + return; + } + + sgx_epc->base = 0x100000000ULL + x86ms->above_4g_mem_size; + + memory_region_init(&sgx_epc->mr, OBJECT(pcms), "sgx-epc", UINT64_MAX); + memory_region_add_subregion(get_system_memory(), sgx_epc->base, + &sgx_epc->mr); + + for (list = x86ms->sgx_epc_list; list; list = list->next) { + obj = object_new("sgx-epc"); + + /* set the memdev link with memory backend */ + object_property_parse(obj, SGX_EPC_MEMDEV_PROP, list->value->memdev, + &error_fatal); + object_property_set_bool(obj, "realized", true, &error_fatal); + object_unref(obj); + } + + if ((sgx_epc->base + sgx_epc->size) < sgx_epc->base) { + error_report("Size of all 'sgx-epc' =0x%"PRIu64" causes EPC to wrap", + sgx_epc->size); + exit(EXIT_FAILURE); + } + + memory_region_set_size(&sgx_epc->mr, sgx_epc->size); + + /* register the reset callback for sgx epc */ + qemu_register_reset(sgx_epc_reset, NULL); +} diff --git a/hw/i386/trace-events b/hw/i386/trace-events new file mode 100644 index 000000000..5bf7e52bf --- /dev/null +++ b/hw/i386/trace-events @@ -0,0 +1,121 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# x86-iommu.c +x86_iommu_iec_notify(bool global, uint32_t index, uint32_t mask) "Notify IEC invalidation: global=%d index=%" PRIu32 " mask=%" PRIu32 + +# intel_iommu.c +vtd_inv_desc(const char *type, uint64_t hi, uint64_t lo) "invalidate desc type %s high 0x%"PRIx64" low 0x%"PRIx64 +vtd_inv_desc_cc_domain(uint16_t domain) "context invalidate domain 0x%"PRIx16 +vtd_inv_desc_cc_global(void) "context invalidate globally" +vtd_inv_desc_cc_device(uint8_t bus, uint8_t dev, uint8_t fn) "context invalidate device %02"PRIx8":%02"PRIx8".%02"PRIx8 +vtd_inv_desc_cc_devices(uint16_t sid, uint16_t fmask) "context invalidate devices sid 0x%"PRIx16" fmask 0x%"PRIx16 +vtd_inv_desc_iotlb_global(void) "iotlb invalidate global" +vtd_inv_desc_iotlb_domain(uint16_t domain) "iotlb invalidate whole domain 0x%"PRIx16 +vtd_inv_desc_iotlb_pages(uint16_t domain, uint64_t addr, uint8_t mask) "iotlb invalidate domain 0x%"PRIx16" addr 0x%"PRIx64" mask 0x%"PRIx8 +vtd_inv_desc_wait_sw(uint64_t addr, uint32_t data) "wait invalidate status write addr 0x%"PRIx64" data 0x%"PRIx32 +vtd_inv_desc_wait_irq(const char *msg) "%s" +vtd_inv_desc_wait_write_fail(uint64_t hi, uint64_t lo) "write fail for wait desc hi 0x%"PRIx64" lo 0x%"PRIx64 +vtd_inv_desc_iec(uint32_t granularity, uint32_t index, uint32_t mask) "granularity 0x%"PRIx32" index 0x%"PRIx32" mask 0x%"PRIx32 +vtd_inv_qi_enable(bool enable) "enabled %d" +vtd_inv_qi_setup(uint64_t addr, int size) "addr 0x%"PRIx64" size %d" +vtd_inv_qi_head(uint16_t head) "read head %d" +vtd_inv_qi_tail(uint16_t head) "write tail %d" +vtd_inv_qi_fetch(void) "" +vtd_context_cache_reset(void) "" +vtd_re_not_present(uint8_t bus) "Root entry bus %"PRIu8" not present" +vtd_ce_not_present(uint8_t bus, uint8_t devfn) "Context entry bus %"PRIu8" devfn %"PRIu8" not present" +vtd_iotlb_page_hit(uint16_t sid, uint64_t addr, uint64_t slpte, uint16_t domain) "IOTLB page hit sid 0x%"PRIx16" iova 0x%"PRIx64" slpte 0x%"PRIx64" domain 0x%"PRIx16 +vtd_iotlb_page_update(uint16_t sid, uint64_t addr, uint64_t slpte, uint16_t domain) "IOTLB page update sid 0x%"PRIx16" iova 0x%"PRIx64" slpte 0x%"PRIx64" domain 0x%"PRIx16 +vtd_iotlb_cc_hit(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32_t gen) "IOTLB context hit bus 0x%"PRIx8" devfn 0x%"PRIx8" high 0x%"PRIx64" low 0x%"PRIx64" gen %"PRIu32 +vtd_iotlb_cc_update(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32_t gen1, uint32_t gen2) "IOTLB context update bus 0x%"PRIx8" devfn 0x%"PRIx8" high 0x%"PRIx64" low 0x%"PRIx64" gen %"PRIu32" -> gen %"PRIu32 +vtd_iotlb_reset(const char *reason) "IOTLB reset (reason: %s)" +vtd_fault_disabled(void) "Fault processing disabled for context entry" +vtd_replay_ce_valid(const char *mode, uint8_t bus, uint8_t dev, uint8_t fn, uint16_t domain, uint64_t hi, uint64_t lo) "%s: replay valid context device %02"PRIx8":%02"PRIx8".%02"PRIx8" domain 0x%"PRIx16" hi 0x%"PRIx64" lo 0x%"PRIx64 +vtd_replay_ce_invalid(uint8_t bus, uint8_t dev, uint8_t fn) "replay invalid context device %02"PRIx8":%02"PRIx8".%02"PRIx8 +vtd_page_walk_level(uint64_t addr, uint32_t level, uint64_t start, uint64_t end) "walk (base=0x%"PRIx64", level=%"PRIu32") iova range 0x%"PRIx64" - 0x%"PRIx64 +vtd_page_walk_one(uint16_t domain, uint64_t iova, uint64_t gpa, uint64_t mask, int perm) "domain 0x%"PRIu16" iova 0x%"PRIx64" -> gpa 0x%"PRIx64" mask 0x%"PRIx64" perm %d" +vtd_page_walk_one_skip_map(uint64_t iova, uint64_t mask, uint64_t translated) "iova 0x%"PRIx64" mask 0x%"PRIx64" translated 0x%"PRIx64 +vtd_page_walk_one_skip_unmap(uint64_t iova, uint64_t mask) "iova 0x%"PRIx64" mask 0x%"PRIx64 +vtd_page_walk_skip_read(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"PRIx64" - 0x%"PRIx64" due to unable to read" +vtd_page_walk_skip_reserve(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"PRIx64" - 0x%"PRIx64" due to rsrv set" +vtd_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)" +vtd_as_unmap_whole(uint8_t bus, uint8_t slot, uint8_t fn, uint64_t iova, uint64_t size) "Device %02x:%02x.%x start 0x%"PRIx64" size 0x%"PRIx64 +vtd_translate_pt(uint16_t sid, uint64_t addr) "source id 0x%"PRIu16", iova 0x%"PRIx64 +vtd_pt_enable_fast_path(uint16_t sid, bool success) "sid 0x%"PRIu16" %d" +vtd_irq_generate(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" data 0x%"PRIx64 +vtd_reg_read(uint64_t addr, uint64_t size) "addr 0x%"PRIx64" size 0x%"PRIx64 +vtd_reg_write(uint64_t addr, uint64_t size, uint64_t val) "addr 0x%"PRIx64" size 0x%"PRIx64" value 0x%"PRIx64 +vtd_reg_dmar_root(uint64_t addr, bool scalable) "addr 0x%"PRIx64" scalable %d" +vtd_reg_ir_root(uint64_t addr, uint32_t size) "addr 0x%"PRIx64" size 0x%"PRIx32 +vtd_reg_write_gcmd(uint32_t status, uint32_t val) "status 0x%"PRIx32" value 0x%"PRIx32 +vtd_reg_write_fectl(uint32_t value) "value 0x%"PRIx32 +vtd_reg_write_iectl(uint32_t value) "value 0x%"PRIx32 +vtd_reg_ics_clear_ip(void) "" +vtd_dmar_translate(uint8_t bus, uint8_t slot, uint8_t func, uint64_t iova, uint64_t gpa, uint64_t mask) "dev %02x:%02x.%02x iova 0x%"PRIx64" -> gpa 0x%"PRIx64" mask 0x%"PRIx64 +vtd_dmar_enable(bool en) "enable %d" +vtd_dmar_fault(uint16_t sid, int fault, uint64_t addr, bool is_write) "sid 0x%"PRIx16" fault %d addr 0x%"PRIx64" write %d" +vtd_ir_enable(bool en) "enable %d" +vtd_ir_irte_get(int index, uint64_t lo, uint64_t hi) "index %d low 0x%"PRIx64" high 0x%"PRIx64 +vtd_ir_remap(int index, int tri, int vec, int deliver, uint32_t dest, int dest_mode) "index %d trigger %d vector %d deliver %d dest 0x%"PRIx32" mode %d" +vtd_ir_remap_type(const char *type) "%s" +vtd_ir_remap_msi(uint64_t addr, uint64_t data, uint64_t addr2, uint64_t data2) "(addr 0x%"PRIx64", data 0x%"PRIx64") -> (addr 0x%"PRIx64", data 0x%"PRIx64")" +vtd_ir_remap_msi_req(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" data 0x%"PRIx64 +vtd_fsts_ppf(bool set) "FSTS PPF bit set to %d" +vtd_fsts_clear_ip(void) "" +vtd_frr_new(int index, uint64_t hi, uint64_t lo) "index %d high 0x%"PRIx64" low 0x%"PRIx64 +vtd_warn_invalid_qi_tail(uint16_t tail) "tail 0x%"PRIx16 +vtd_warn_ir_vector(uint16_t sid, int index, int vec, int target) "sid 0x%"PRIx16" index %d vec %d (should be: %d)" +vtd_warn_ir_trigger(uint16_t sid, int index, int trig, int target) "sid 0x%"PRIx16" index %d trigger %d (should be: %d)" + +# amd_iommu.c +amdvi_evntlog_fail(uint64_t addr, uint32_t head) "error: fail to write at addr 0x%"PRIx64" + offset 0x%"PRIx32 +amdvi_cache_update(uint16_t domid, uint8_t bus, uint8_t slot, uint8_t func, uint64_t gpa, uint64_t txaddr) " update iotlb domid 0x%"PRIx16" devid: %02x:%02x.%x gpa 0x%"PRIx64" hpa 0x%"PRIx64 +amdvi_completion_wait_fail(uint64_t addr) "error: fail to write at address 0x%"PRIx64 +amdvi_mmio_write(const char *reg, uint64_t addr, unsigned size, uint64_t val, uint64_t offset) "%s write addr 0x%"PRIx64", size %u, val 0x%"PRIx64", offset 0x%"PRIx64 +amdvi_mmio_read(const char *reg, uint64_t addr, unsigned size, uint64_t offset) "%s read addr 0x%"PRIx64", size %u offset 0x%"PRIx64 +amdvi_mmio_read_invalid(int max, uint64_t addr, unsigned size) "error: addr outside region (max 0x%x): read addr 0x%" PRIx64 ", size %u" +amdvi_command_error(uint64_t status) "error: Executing commands with command buffer disabled 0x%"PRIx64 +amdvi_command_read_fail(uint64_t addr, uint32_t head) "error: fail to access memory at 0x%"PRIx64" + 0x%"PRIx32 +amdvi_command_exec(uint32_t head, uint32_t tail, uint64_t buf) "command buffer head at 0x%"PRIx32" command buffer tail at 0x%"PRIx32" command buffer base at 0x%"PRIx64 +amdvi_unhandled_command(uint8_t type) "unhandled command 0x%"PRIx8 +amdvi_intr_inval(void) "Interrupt table invalidated" +amdvi_iotlb_inval(void) "IOTLB pages invalidated" +amdvi_prefetch_pages(void) "Pre-fetch of AMD-Vi pages requested" +amdvi_pages_inval(uint16_t domid) "AMD-Vi pages for domain 0x%"PRIx16 " invalidated" +amdvi_all_inval(void) "Invalidation of all AMD-Vi cache requested " +amdvi_ppr_exec(void) "Execution of PPR queue requested " +amdvi_devtab_inval(uint8_t bus, uint8_t slot, uint8_t func) "device table entry for devid: %02x:%02x.%x invalidated" +amdvi_completion_wait(uint64_t addr, uint64_t data) "completion wait requested with store address 0x%"PRIx64" and store data 0x%"PRIx64 +amdvi_control_status(uint64_t val) "MMIO_STATUS state 0x%"PRIx64 +amdvi_iotlb_reset(void) "IOTLB exceed size limit - reset " +amdvi_dte_get_fail(uint64_t addr, uint32_t offset) "error: failed to access Device Entry devtab 0x%"PRIx64" offset 0x%"PRIx32 +amdvi_invalid_dte(uint64_t addr) "PTE entry at 0x%"PRIx64" is invalid " +amdvi_get_pte_hwerror(uint64_t addr) "hardware error eccessing PTE at addr 0x%"PRIx64 +amdvi_mode_invalid(uint8_t level, uint64_t addr)"error: translation level 0x%"PRIx8" translating addr 0x%"PRIx64 +amdvi_page_fault(uint64_t addr) "error: page fault accessing guest physical address 0x%"PRIx64 +amdvi_iotlb_hit(uint8_t bus, uint8_t slot, uint8_t func, uint64_t addr, uint64_t txaddr) "hit iotlb devid %02x:%02x.%x gpa 0x%"PRIx64" hpa 0x%"PRIx64 +amdvi_translation_result(uint8_t bus, uint8_t slot, uint8_t func, uint64_t addr, uint64_t txaddr) "devid: %02x:%02x.%x gpa 0x%"PRIx64" hpa 0x%"PRIx64 +amdvi_mem_ir_write_req(uint64_t addr, uint64_t val, uint32_t size) "addr 0x%"PRIx64" data 0x%"PRIx64" size 0x%"PRIx32 +amdvi_mem_ir_write(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" data 0x%"PRIx64 +amdvi_ir_remap_msi_req(uint64_t addr, uint64_t data, uint8_t devid) "addr 0x%"PRIx64" data 0x%"PRIx64" devid 0x%"PRIx8 +amdvi_ir_remap_msi(uint64_t addr, uint64_t data, uint64_t addr2, uint64_t data2) "(addr 0x%"PRIx64", data 0x%"PRIx64") -> (addr 0x%"PRIx64", data 0x%"PRIx64")" +amdvi_err(const char *str) "%s" +amdvi_ir_irte(uint64_t addr, uint64_t data) "addr 0x%"PRIx64" offset 0x%"PRIx64 +amdvi_ir_irte_val(uint32_t data) "data 0x%"PRIx32 +amdvi_ir_err(const char *str) "%s" +amdvi_ir_intctl(uint8_t val) "int_ctl 0x%"PRIx8 +amdvi_ir_target_abort(const char *str) "%s" +amdvi_ir_delivery_mode(const char *str) "%s" +amdvi_ir_irte_ga_val(uint64_t hi, uint64_t lo) "hi 0x%"PRIx64" lo 0x%"PRIx64 + +# vmport.c +vmport_register(unsigned char command, void *func, void *opaque) "command: 0x%02x func: %p opaque: %p" +vmport_command(unsigned char command) "command: 0x%02x" + +# x86.c +x86_gsi_interrupt(int irqn, int level) "GSI interrupt #%d level:%d" +x86_pic_interrupt(int irqn, int level) "PIC interrupt #%d level:%d" + +# port92.c +port92_read(uint8_t val) "port92: read 0x%02x" +port92_write(uint8_t val) "port92: write 0x%02x" diff --git a/hw/i386/trace.h b/hw/i386/trace.h new file mode 100644 index 000000000..37a9f67e5 --- /dev/null +++ b/hw/i386/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_i386.h" diff --git a/hw/i386/vmmouse.c b/hw/i386/vmmouse.c new file mode 100644 index 000000000..3d6636828 --- /dev/null +++ b/hw/i386/vmmouse.c @@ -0,0 +1,327 @@ +/* + * QEMU VMMouse emulation + * + * Copyright (C) 2007 Anthony Liguori + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "ui/console.h" +#include "hw/i386/vmport.h" +#include "hw/input/i8042.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "cpu.h" +#include "qom/object.h" + +/* debug only vmmouse */ +//#define DEBUG_VMMOUSE + +#define VMMOUSE_READ_ID 0x45414552 +#define VMMOUSE_DISABLE 0x000000f5 +#define VMMOUSE_REQUEST_RELATIVE 0x4c455252 +#define VMMOUSE_REQUEST_ABSOLUTE 0x53424152 + +#define VMMOUSE_QUEUE_SIZE 1024 + +#define VMMOUSE_VERSION 0x3442554a + +#ifdef DEBUG_VMMOUSE +#define DPRINTF(fmt, ...) printf(fmt, ## __VA_ARGS__) +#else +#define DPRINTF(fmt, ...) do { } while (0) +#endif + +#define TYPE_VMMOUSE "vmmouse" +OBJECT_DECLARE_SIMPLE_TYPE(VMMouseState, VMMOUSE) + +struct VMMouseState { + ISADevice parent_obj; + + uint32_t queue[VMMOUSE_QUEUE_SIZE]; + int32_t queue_size; + uint16_t nb_queue; + uint16_t status; + uint8_t absolute; + QEMUPutMouseEntry *entry; + ISAKBDState *i8042; +}; + +static void vmmouse_get_data(uint32_t *data) +{ + X86CPU *cpu = X86_CPU(current_cpu); + CPUX86State *env = &cpu->env; + + data[0] = env->regs[R_EAX]; data[1] = env->regs[R_EBX]; + data[2] = env->regs[R_ECX]; data[3] = env->regs[R_EDX]; + data[4] = env->regs[R_ESI]; data[5] = env->regs[R_EDI]; +} + +static void vmmouse_set_data(const uint32_t *data) +{ + X86CPU *cpu = X86_CPU(current_cpu); + CPUX86State *env = &cpu->env; + + env->regs[R_EAX] = data[0]; env->regs[R_EBX] = data[1]; + env->regs[R_ECX] = data[2]; env->regs[R_EDX] = data[3]; + env->regs[R_ESI] = data[4]; env->regs[R_EDI] = data[5]; +} + +static uint32_t vmmouse_get_status(VMMouseState *s) +{ + DPRINTF("vmmouse_get_status()\n"); + return (s->status << 16) | s->nb_queue; +} + +static void vmmouse_mouse_event(void *opaque, int x, int y, int dz, int buttons_state) +{ + VMMouseState *s = opaque; + int buttons = 0; + + if (s->nb_queue > (VMMOUSE_QUEUE_SIZE - 4)) + return; + + DPRINTF("vmmouse_mouse_event(%d, %d, %d, %d)\n", + x, y, dz, buttons_state); + + if ((buttons_state & MOUSE_EVENT_LBUTTON)) + buttons |= 0x20; + if ((buttons_state & MOUSE_EVENT_RBUTTON)) + buttons |= 0x10; + if ((buttons_state & MOUSE_EVENT_MBUTTON)) + buttons |= 0x08; + + if (s->absolute) { + x <<= 1; + y <<= 1; + } + + s->queue[s->nb_queue++] = buttons; + s->queue[s->nb_queue++] = x; + s->queue[s->nb_queue++] = y; + s->queue[s->nb_queue++] = dz; + + /* need to still generate PS2 events to notify driver to + read from queue */ + i8042_isa_mouse_fake_event(s->i8042); +} + +static void vmmouse_remove_handler(VMMouseState *s) +{ + if (s->entry) { + qemu_remove_mouse_event_handler(s->entry); + s->entry = NULL; + } +} + +static void vmmouse_update_handler(VMMouseState *s, int absolute) +{ + if (s->status != 0) { + return; + } + if (s->absolute != absolute) { + s->absolute = absolute; + vmmouse_remove_handler(s); + } + if (s->entry == NULL) { + s->entry = qemu_add_mouse_event_handler(vmmouse_mouse_event, + s, s->absolute, + "vmmouse"); + qemu_activate_mouse_event_handler(s->entry); + } +} + +static void vmmouse_read_id(VMMouseState *s) +{ + DPRINTF("vmmouse_read_id()\n"); + + if (s->nb_queue == VMMOUSE_QUEUE_SIZE) + return; + + s->queue[s->nb_queue++] = VMMOUSE_VERSION; + s->status = 0; + vmmouse_update_handler(s, s->absolute); +} + +static void vmmouse_request_relative(VMMouseState *s) +{ + DPRINTF("vmmouse_request_relative()\n"); + vmmouse_update_handler(s, 0); +} + +static void vmmouse_request_absolute(VMMouseState *s) +{ + DPRINTF("vmmouse_request_absolute()\n"); + vmmouse_update_handler(s, 1); +} + +static void vmmouse_disable(VMMouseState *s) +{ + DPRINTF("vmmouse_disable()\n"); + s->status = 0xffff; + vmmouse_remove_handler(s); +} + +static void vmmouse_data(VMMouseState *s, uint32_t *data, uint32_t size) +{ + int i; + + DPRINTF("vmmouse_data(%d)\n", size); + + if (size == 0 || size > 6 || size > s->nb_queue) { + printf("vmmouse: driver requested too much data %d\n", size); + s->status = 0xffff; + vmmouse_remove_handler(s); + return; + } + + for (i = 0; i < size; i++) + data[i] = s->queue[i]; + + s->nb_queue -= size; + if (s->nb_queue) + memmove(s->queue, &s->queue[size], sizeof(s->queue[0]) * s->nb_queue); +} + +static uint32_t vmmouse_ioport_read(void *opaque, uint32_t addr) +{ + VMMouseState *s = opaque; + uint32_t data[6]; + uint16_t command; + + vmmouse_get_data(data); + + command = data[2] & 0xFFFF; + + switch (command) { + case VMPORT_CMD_VMMOUSE_STATUS: + data[0] = vmmouse_get_status(s); + break; + case VMPORT_CMD_VMMOUSE_COMMAND: + switch (data[1]) { + case VMMOUSE_DISABLE: + vmmouse_disable(s); + break; + case VMMOUSE_READ_ID: + vmmouse_read_id(s); + break; + case VMMOUSE_REQUEST_RELATIVE: + vmmouse_request_relative(s); + break; + case VMMOUSE_REQUEST_ABSOLUTE: + vmmouse_request_absolute(s); + break; + default: + printf("vmmouse: unknown command %x\n", data[1]); + break; + } + break; + case VMPORT_CMD_VMMOUSE_DATA: + vmmouse_data(s, data, data[1]); + break; + default: + printf("vmmouse: unknown command %x\n", command); + break; + } + + vmmouse_set_data(data); + return data[0]; +} + +static int vmmouse_post_load(void *opaque, int version_id) +{ + VMMouseState *s = opaque; + + vmmouse_remove_handler(s); + vmmouse_update_handler(s, s->absolute); + return 0; +} + +static const VMStateDescription vmstate_vmmouse = { + .name = "vmmouse", + .version_id = 0, + .minimum_version_id = 0, + .post_load = vmmouse_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT32_EQUAL(queue_size, VMMouseState, NULL), + VMSTATE_UINT32_ARRAY(queue, VMMouseState, VMMOUSE_QUEUE_SIZE), + VMSTATE_UINT16(nb_queue, VMMouseState), + VMSTATE_UINT16(status, VMMouseState), + VMSTATE_UINT8(absolute, VMMouseState), + VMSTATE_END_OF_LIST() + } +}; + +static void vmmouse_reset(DeviceState *d) +{ + VMMouseState *s = VMMOUSE(d); + + s->queue_size = VMMOUSE_QUEUE_SIZE; + s->nb_queue = 0; + + vmmouse_disable(s); +} + +static void vmmouse_realizefn(DeviceState *dev, Error **errp) +{ + VMMouseState *s = VMMOUSE(dev); + + DPRINTF("vmmouse_init\n"); + + if (!object_resolve_path_type("", TYPE_VMPORT, NULL)) { + error_setg(errp, "vmmouse needs a machine with vmport"); + return; + } + + vmport_register(VMPORT_CMD_VMMOUSE_STATUS, vmmouse_ioport_read, s); + vmport_register(VMPORT_CMD_VMMOUSE_COMMAND, vmmouse_ioport_read, s); + vmport_register(VMPORT_CMD_VMMOUSE_DATA, vmmouse_ioport_read, s); +} + +static Property vmmouse_properties[] = { + DEFINE_PROP_LINK("i8042", VMMouseState, i8042, TYPE_I8042, ISAKBDState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vmmouse_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = vmmouse_realizefn; + dc->reset = vmmouse_reset; + dc->vmsd = &vmstate_vmmouse; + device_class_set_props(dc, vmmouse_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo vmmouse_info = { + .name = TYPE_VMMOUSE, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(VMMouseState), + .class_init = vmmouse_class_initfn, +}; + +static void vmmouse_register_types(void) +{ + type_register_static(&vmmouse_info); +} + +type_init(vmmouse_register_types) diff --git a/hw/i386/vmport.c b/hw/i386/vmport.c new file mode 100644 index 000000000..7cc75dbc6 --- /dev/null +++ b/hw/i386/vmport.c @@ -0,0 +1,313 @@ +/* + * QEMU VMPort emulation + * + * Copyright (C) 2007 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * Guest code that interacts with this virtual device can be found + * in VMware open-vm-tools open-source project: + * https://github.com/vmware/open-vm-tools + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "hw/i386/vmport.h" +#include "hw/qdev-properties.h" +#include "hw/boards.h" +#include "sysemu/sysemu.h" +#include "sysemu/hw_accel.h" +#include "sysemu/qtest.h" +#include "qemu/log.h" +#include "trace.h" +#include "qom/object.h" + +#define VMPORT_MAGIC 0x564D5868 + +/* Compatibility flags for migration */ +#define VMPORT_COMPAT_READ_SET_EAX_BIT 0 +#define VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD_BIT 1 +#define VMPORT_COMPAT_REPORT_VMX_TYPE_BIT 2 +#define VMPORT_COMPAT_CMDS_V2_BIT 3 +#define VMPORT_COMPAT_READ_SET_EAX \ + (1 << VMPORT_COMPAT_READ_SET_EAX_BIT) +#define VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD \ + (1 << VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD_BIT) +#define VMPORT_COMPAT_REPORT_VMX_TYPE \ + (1 << VMPORT_COMPAT_REPORT_VMX_TYPE_BIT) +#define VMPORT_COMPAT_CMDS_V2 \ + (1 << VMPORT_COMPAT_CMDS_V2_BIT) + +/* vCPU features reported by CMD_GET_VCPU_INFO */ +#define VCPU_INFO_SLC64_BIT 0 +#define VCPU_INFO_SYNC_VTSCS_BIT 1 +#define VCPU_INFO_HV_REPLAY_OK_BIT 2 +#define VCPU_INFO_LEGACY_X2APIC_BIT 3 +#define VCPU_INFO_RESERVED_BIT 31 + +OBJECT_DECLARE_SIMPLE_TYPE(VMPortState, VMPORT) + +struct VMPortState { + ISADevice parent_obj; + + MemoryRegion io; + VMPortReadFunc *func[VMPORT_ENTRIES]; + void *opaque[VMPORT_ENTRIES]; + + uint32_t vmware_vmx_version; + uint8_t vmware_vmx_type; + + uint32_t compat_flags; +}; + +static VMPortState *port_state; + +void vmport_register(VMPortCommand command, VMPortReadFunc *func, void *opaque) +{ + assert(command < VMPORT_ENTRIES); + assert(port_state); + + trace_vmport_register(command, func, opaque); + port_state->func[command] = func; + port_state->opaque[command] = opaque; +} + +static uint64_t vmport_ioport_read(void *opaque, hwaddr addr, + unsigned size) +{ + VMPortState *s = opaque; + CPUState *cs = current_cpu; + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env; + unsigned char command; + uint32_t eax; + + if (qtest_enabled()) { + return -1; + } + env = &cpu->env; + cpu_synchronize_state(cs); + + eax = env->regs[R_EAX]; + if (eax != VMPORT_MAGIC) { + goto err; + } + + command = env->regs[R_ECX]; + trace_vmport_command(command); + if (command >= VMPORT_ENTRIES || !s->func[command]) { + qemu_log_mask(LOG_UNIMP, "vmport: unknown command %x\n", command); + goto err; + } + + eax = s->func[command](s->opaque[command], addr); + goto out; + +err: + if (s->compat_flags & VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD) { + eax = UINT32_MAX; + } + +out: + /* + * The call above to cpu_synchronize_state() gets vCPU registers values + * to QEMU but also cause QEMU to write QEMU vCPU registers values to + * vCPU implementation (e.g. Accelerator such as KVM) just before + * resuming guest. + * + * Therefore, in order to make IOPort return value propagate to + * guest EAX, we need to explicitly update QEMU EAX register value. + */ + if (s->compat_flags & VMPORT_COMPAT_READ_SET_EAX) { + cpu->env.regs[R_EAX] = eax; + } + + return eax; +} + +static void vmport_ioport_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + X86CPU *cpu = X86_CPU(current_cpu); + + if (qtest_enabled()) { + return; + } + cpu->env.regs[R_EAX] = vmport_ioport_read(opaque, addr, 4); +} + +static uint32_t vmport_cmd_get_version(void *opaque, uint32_t addr) +{ + X86CPU *cpu = X86_CPU(current_cpu); + + if (qtest_enabled()) { + return -1; + } + cpu->env.regs[R_EBX] = VMPORT_MAGIC; + if (port_state->compat_flags & VMPORT_COMPAT_REPORT_VMX_TYPE) { + cpu->env.regs[R_ECX] = port_state->vmware_vmx_type; + } + return port_state->vmware_vmx_version; +} + +static uint32_t vmport_cmd_get_bios_uuid(void *opaque, uint32_t addr) +{ + X86CPU *cpu = X86_CPU(current_cpu); + uint32_t *uuid_parts = (uint32_t *)(qemu_uuid.data); + + cpu->env.regs[R_EAX] = le32_to_cpu(uuid_parts[0]); + cpu->env.regs[R_EBX] = le32_to_cpu(uuid_parts[1]); + cpu->env.regs[R_ECX] = le32_to_cpu(uuid_parts[2]); + cpu->env.regs[R_EDX] = le32_to_cpu(uuid_parts[3]); + return cpu->env.regs[R_EAX]; +} + +static uint32_t vmport_cmd_ram_size(void *opaque, uint32_t addr) +{ + X86CPU *cpu = X86_CPU(current_cpu); + + if (qtest_enabled()) { + return -1; + } + cpu->env.regs[R_EBX] = 0x1177; + return current_machine->ram_size; +} + +static uint32_t vmport_cmd_get_hz(void *opaque, uint32_t addr) +{ + X86CPU *cpu = X86_CPU(current_cpu); + + if (cpu->env.tsc_khz && cpu->env.apic_bus_freq) { + uint64_t tsc_freq = (uint64_t)cpu->env.tsc_khz * 1000; + + cpu->env.regs[R_ECX] = cpu->env.apic_bus_freq; + cpu->env.regs[R_EBX] = (uint32_t)(tsc_freq >> 32); + cpu->env.regs[R_EAX] = (uint32_t)tsc_freq; + } else { + /* Signal cmd as not supported */ + cpu->env.regs[R_EBX] = UINT32_MAX; + } + + return cpu->env.regs[R_EAX]; +} + +static uint32_t vmport_cmd_get_vcpu_info(void *opaque, uint32_t addr) +{ + X86CPU *cpu = X86_CPU(current_cpu); + uint32_t ret = 0; + + if (cpu->env.features[FEAT_1_ECX] & CPUID_EXT_X2APIC) { + ret |= 1 << VCPU_INFO_LEGACY_X2APIC_BIT; + } + + return ret; +} + +static const MemoryRegionOps vmport_ops = { + .read = vmport_ioport_read, + .write = vmport_ioport_write, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void vmport_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + VMPortState *s = VMPORT(dev); + + memory_region_init_io(&s->io, OBJECT(s), &vmport_ops, s, "vmport", 1); + isa_register_ioport(isadev, &s->io, 0x5658); + + port_state = s; + + /* Register some generic port commands */ + vmport_register(VMPORT_CMD_GETVERSION, vmport_cmd_get_version, NULL); + vmport_register(VMPORT_CMD_GETRAMSIZE, vmport_cmd_ram_size, NULL); + if (s->compat_flags & VMPORT_COMPAT_CMDS_V2) { + vmport_register(VMPORT_CMD_GETBIOSUUID, vmport_cmd_get_bios_uuid, NULL); + vmport_register(VMPORT_CMD_GETHZ, vmport_cmd_get_hz, NULL); + vmport_register(VMPORT_CMD_GET_VCPU_INFO, vmport_cmd_get_vcpu_info, + NULL); + } +} + +static Property vmport_properties[] = { + /* Used to enforce compatibility for migration */ + DEFINE_PROP_BIT("x-read-set-eax", VMPortState, compat_flags, + VMPORT_COMPAT_READ_SET_EAX_BIT, true), + DEFINE_PROP_BIT("x-signal-unsupported-cmd", VMPortState, compat_flags, + VMPORT_COMPAT_SIGNAL_UNSUPPORTED_CMD_BIT, true), + DEFINE_PROP_BIT("x-report-vmx-type", VMPortState, compat_flags, + VMPORT_COMPAT_REPORT_VMX_TYPE_BIT, true), + DEFINE_PROP_BIT("x-cmds-v2", VMPortState, compat_flags, + VMPORT_COMPAT_CMDS_V2_BIT, true), + + /* Default value taken from open-vm-tools code VERSION_MAGIC definition */ + DEFINE_PROP_UINT32("vmware-vmx-version", VMPortState, + vmware_vmx_version, 6), + /* + * Value determines which VMware product type host report itself to guest. + * + * Most guests are fine with exposing host as VMware ESX server. + * Some legacy/proprietary guests hard-code a given type. + * + * For a complete list of values, refer to enum VMXType at open-vm-tools + * project (Defined at lib/include/vm_vmx_type.h). + * + * Reasonable options: + * 0 - Unset + * 1 - VMware Express (deprecated) + * 2 - VMware ESX Server + * 3 - VMware Server (Deprecated) + * 4 - VMware Workstation + * 5 - ACE 1.x (Deprecated) + */ + DEFINE_PROP_UINT8("vmware-vmx-type", VMPortState, vmware_vmx_type, 2), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void vmport_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = vmport_realizefn; + /* Reason: realize sets global port_state */ + dc->user_creatable = false; + device_class_set_props(dc, vmport_properties); +} + +static const TypeInfo vmport_info = { + .name = TYPE_VMPORT, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(VMPortState), + .class_init = vmport_class_initfn, +}; + +static void vmport_register_types(void) +{ + type_register_static(&vmport_info); +} + +type_init(vmport_register_types) diff --git a/hw/i386/x86-iommu-stub.c b/hw/i386/x86-iommu-stub.c new file mode 100644 index 000000000..781b5ff92 --- /dev/null +++ b/hw/i386/x86-iommu-stub.c @@ -0,0 +1,38 @@ +/* + * Stubs for X86 IOMMU emulation + * + * Copyright (C) 2019 Red Hat, Inc. + * + * Author: Paolo Bonzini + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/i386/x86-iommu.h" + +void x86_iommu_iec_register_notifier(X86IOMMUState *iommu, + iec_notify_fn fn, void *data) +{ +} + +X86IOMMUState *x86_iommu_get_default(void) +{ + return NULL; +} + +bool x86_iommu_ir_supported(X86IOMMUState *s) +{ + return false; +} diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c new file mode 100644 index 000000000..01d11325a --- /dev/null +++ b/hw/i386/x86-iommu.c @@ -0,0 +1,162 @@ +/* + * QEMU emulation of common X86 IOMMU + * + * Copyright (C) 2016 Peter Xu, Red Hat + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/i386/x86-iommu.h" +#include "hw/qdev-properties.h" +#include "hw/i386/pc.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "trace.h" +#include "sysemu/kvm.h" + +void x86_iommu_iec_register_notifier(X86IOMMUState *iommu, + iec_notify_fn fn, void *data) +{ + IEC_Notifier *notifier = g_new0(IEC_Notifier, 1); + + notifier->iec_notify = fn; + notifier->private = data; + + QLIST_INSERT_HEAD(&iommu->iec_notifiers, notifier, list); +} + +void x86_iommu_iec_notify_all(X86IOMMUState *iommu, bool global, + uint32_t index, uint32_t mask) +{ + IEC_Notifier *notifier; + + trace_x86_iommu_iec_notify(global, index, mask); + + QLIST_FOREACH(notifier, &iommu->iec_notifiers, list) { + if (notifier->iec_notify) { + notifier->iec_notify(notifier->private, global, + index, mask); + } + } +} + +/* Generate one MSI message from VTDIrq info */ +void x86_iommu_irq_to_msi_message(X86IOMMUIrq *irq, MSIMessage *msg_out) +{ + X86IOMMU_MSIMessage msg = {}; + + /* Generate address bits */ + msg.dest_mode = irq->dest_mode; + msg.redir_hint = irq->redir_hint; + msg.dest = irq->dest; + msg.__addr_hi = irq->dest & 0xffffff00; + msg.__addr_head = cpu_to_le32(0xfee); + /* Keep this from original MSI address bits */ + msg.__not_used = irq->msi_addr_last_bits; + + /* Generate data bits */ + msg.vector = irq->vector; + msg.delivery_mode = irq->delivery_mode; + msg.level = 1; + msg.trigger_mode = irq->trigger_mode; + + msg_out->address = msg.msi_addr; + msg_out->data = msg.msi_data; +} + +X86IOMMUState *x86_iommu_get_default(void) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + PCMachineState *pcms = + PC_MACHINE(object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)); + + if (pcms && + object_dynamic_cast(OBJECT(pcms->iommu), TYPE_X86_IOMMU_DEVICE)) { + return X86_IOMMU_DEVICE(pcms->iommu); + } + return NULL; +} + +static void x86_iommu_realize(DeviceState *dev, Error **errp) +{ + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); + X86IOMMUClass *x86_class = X86_IOMMU_DEVICE_GET_CLASS(dev); + MachineState *ms = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(ms); + PCMachineState *pcms = + PC_MACHINE(object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)); + QLIST_INIT(&x86_iommu->iec_notifiers); + bool irq_all_kernel = kvm_irqchip_in_kernel() && !kvm_irqchip_is_split(); + + if (!pcms || !pcms->bus) { + error_setg(errp, "Machine-type '%s' not supported by IOMMU", + mc->name); + return; + } + + /* If the user didn't specify IR, choose a default value for it */ + if (x86_iommu->intr_supported == ON_OFF_AUTO_AUTO) { + x86_iommu->intr_supported = irq_all_kernel ? + ON_OFF_AUTO_OFF : ON_OFF_AUTO_ON; + } + + /* Both Intel and AMD IOMMU IR only support "kernel-irqchip={off|split}" */ + if (x86_iommu_ir_supported(x86_iommu) && irq_all_kernel) { + error_setg(errp, "Interrupt Remapping cannot work with " + "kernel-irqchip=on, please use 'split|off'."); + return; + } + + if (x86_class->realize) { + x86_class->realize(dev, errp); + } +} + +static Property x86_iommu_properties[] = { + DEFINE_PROP_ON_OFF_AUTO("intremap", X86IOMMUState, + intr_supported, ON_OFF_AUTO_AUTO), + DEFINE_PROP_BOOL("device-iotlb", X86IOMMUState, dt_supported, false), + DEFINE_PROP_BOOL("pt", X86IOMMUState, pt_supported, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void x86_iommu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = x86_iommu_realize; + device_class_set_props(dc, x86_iommu_properties); +} + +bool x86_iommu_ir_supported(X86IOMMUState *s) +{ + return s->intr_supported == ON_OFF_AUTO_ON; +} + +static const TypeInfo x86_iommu_info = { + .name = TYPE_X86_IOMMU_DEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(X86IOMMUState), + .class_init = x86_iommu_class_init, + .class_size = sizeof(X86IOMMUClass), + .abstract = true, +}; + +static void x86_iommu_register_types(void) +{ + type_register_static(&x86_iommu_info); +} + +type_init(x86_iommu_register_types) diff --git a/hw/i386/x86.c b/hw/i386/x86.c new file mode 100644 index 000000000..b84840a1b --- /dev/null +++ b/hw/i386/x86.c @@ -0,0 +1,1399 @@ +/* + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2019 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "qemu/cutils.h" +#include "qemu/units.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "qapi/qmp/qerror.h" +#include "qapi/qapi-visit-common.h" +#include "qapi/clone-visitor.h" +#include "qapi/qapi-visit-machine.h" +#include "qapi/visitor.h" +#include "sysemu/qtest.h" +#include "sysemu/whpx.h" +#include "sysemu/numa.h" +#include "sysemu/replay.h" +#include "sysemu/sysemu.h" +#include "sysemu/cpu-timers.h" +#include "trace.h" + +#include "hw/i386/x86.h" +#include "target/i386/cpu.h" +#include "hw/i386/topology.h" +#include "hw/i386/fw_cfg.h" +#include "hw/intc/i8259.h" +#include "hw/rtc/mc146818rtc.h" +#include "target/i386/sev.h" + +#include "hw/acpi/cpu_hotplug.h" +#include "hw/irq.h" +#include "hw/nmi.h" +#include "hw/loader.h" +#include "multiboot.h" +#include "elf.h" +#include "standard-headers/asm-x86/bootparam.h" +#include CONFIG_DEVICES +#include "kvm/kvm_i386.h" + +/* Physical Address of PVH entry point read from kernel ELF NOTE */ +static size_t pvh_start_addr; + +inline void init_topo_info(X86CPUTopoInfo *topo_info, + const X86MachineState *x86ms) +{ + MachineState *ms = MACHINE(x86ms); + + topo_info->dies_per_pkg = ms->smp.dies; + topo_info->cores_per_die = ms->smp.cores; + topo_info->threads_per_core = ms->smp.threads; +} + +/* + * Calculates initial APIC ID for a specific CPU index + * + * Currently we need to be able to calculate the APIC ID from the CPU index + * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have + * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of + * all CPUs up to max_cpus. + */ +uint32_t x86_cpu_apic_id_from_index(X86MachineState *x86ms, + unsigned int cpu_index) +{ + X86MachineClass *x86mc = X86_MACHINE_GET_CLASS(x86ms); + X86CPUTopoInfo topo_info; + uint32_t correct_id; + static bool warned; + + init_topo_info(&topo_info, x86ms); + + correct_id = x86_apicid_from_cpu_idx(&topo_info, cpu_index); + if (x86mc->compat_apic_id_mode) { + if (cpu_index != correct_id && !warned && !qtest_enabled()) { + error_report("APIC IDs set in compatibility mode, " + "CPU topology won't match the configuration"); + warned = true; + } + return cpu_index; + } else { + return correct_id; + } +} + + +void x86_cpu_new(X86MachineState *x86ms, int64_t apic_id, Error **errp) +{ + Object *cpu = object_new(MACHINE(x86ms)->cpu_type); + + if (!object_property_set_uint(cpu, "apic-id", apic_id, errp)) { + goto out; + } + qdev_realize(DEVICE(cpu), NULL, errp); + +out: + object_unref(cpu); +} + +void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version) +{ + int i; + const CPUArchIdList *possible_cpus; + MachineState *ms = MACHINE(x86ms); + MachineClass *mc = MACHINE_GET_CLASS(x86ms); + + x86_cpu_set_default_version(default_cpu_version); + + /* + * Calculates the limit to CPU APIC ID values + * + * Limit for the APIC ID value, so that all + * CPU APIC IDs are < x86ms->apic_id_limit. + * + * This is used for FW_CFG_MAX_CPUS. See comments on fw_cfg_arch_create(). + */ + x86ms->apic_id_limit = x86_cpu_apic_id_from_index(x86ms, + ms->smp.max_cpus - 1) + 1; + possible_cpus = mc->possible_cpu_arch_ids(ms); + for (i = 0; i < ms->smp.cpus; i++) { + x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal); + } +} + +void x86_rtc_set_cpus_count(ISADevice *rtc, uint16_t cpus_count) +{ + if (cpus_count > 0xff) { + /* + * If the number of CPUs can't be represented in 8 bits, the + * BIOS must use "FW_CFG_NB_CPUS". Set RTC field to 0 just + * to make old BIOSes fail more predictably. + */ + rtc_set_memory(rtc, 0x5f, 0); + } else { + rtc_set_memory(rtc, 0x5f, cpus_count - 1); + } +} + +static int x86_apic_cmp(const void *a, const void *b) +{ + CPUArchId *apic_a = (CPUArchId *)a; + CPUArchId *apic_b = (CPUArchId *)b; + + return apic_a->arch_id - apic_b->arch_id; +} + +/* + * returns pointer to CPUArchId descriptor that matches CPU's apic_id + * in ms->possible_cpus->cpus, if ms->possible_cpus->cpus has no + * entry corresponding to CPU's apic_id returns NULL. + */ +CPUArchId *x86_find_cpu_slot(MachineState *ms, uint32_t id, int *idx) +{ + CPUArchId apic_id, *found_cpu; + + apic_id.arch_id = id; + found_cpu = bsearch(&apic_id, ms->possible_cpus->cpus, + ms->possible_cpus->len, sizeof(*ms->possible_cpus->cpus), + x86_apic_cmp); + if (found_cpu && idx) { + *idx = found_cpu - ms->possible_cpus->cpus; + } + return found_cpu; +} + +void x86_cpu_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + CPUArchId *found_cpu; + Error *local_err = NULL; + X86CPU *cpu = X86_CPU(dev); + X86MachineState *x86ms = X86_MACHINE(hotplug_dev); + + if (x86ms->acpi_dev) { + hotplug_handler_plug(x86ms->acpi_dev, dev, &local_err); + if (local_err) { + goto out; + } + } + + /* increment the number of CPUs */ + x86ms->boot_cpus++; + if (x86ms->rtc) { + x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus); + } + if (x86ms->fw_cfg) { + fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); + } + + found_cpu = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, NULL); + found_cpu->cpu = OBJECT(dev); +out: + error_propagate(errp, local_err); +} + +void x86_cpu_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + int idx = -1; + X86CPU *cpu = X86_CPU(dev); + X86MachineState *x86ms = X86_MACHINE(hotplug_dev); + + if (!x86ms->acpi_dev) { + error_setg(errp, "CPU hot unplug not supported without ACPI"); + return; + } + + x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, &idx); + assert(idx != -1); + if (idx == 0) { + error_setg(errp, "Boot CPU is unpluggable"); + return; + } + + hotplug_handler_unplug_request(x86ms->acpi_dev, dev, + errp); +} + +void x86_cpu_unplug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + CPUArchId *found_cpu; + Error *local_err = NULL; + X86CPU *cpu = X86_CPU(dev); + X86MachineState *x86ms = X86_MACHINE(hotplug_dev); + + hotplug_handler_unplug(x86ms->acpi_dev, dev, &local_err); + if (local_err) { + goto out; + } + + found_cpu = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, NULL); + found_cpu->cpu = NULL; + qdev_unrealize(dev); + + /* decrement the number of CPUs */ + x86ms->boot_cpus--; + /* Update the number of CPUs in CMOS */ + x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus); + fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); + out: + error_propagate(errp, local_err); +} + +void x86_cpu_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + int idx; + CPUState *cs; + CPUArchId *cpu_slot; + X86CPUTopoIDs topo_ids; + X86CPU *cpu = X86_CPU(dev); + CPUX86State *env = &cpu->env; + MachineState *ms = MACHINE(hotplug_dev); + X86MachineState *x86ms = X86_MACHINE(hotplug_dev); + unsigned int smp_cores = ms->smp.cores; + unsigned int smp_threads = ms->smp.threads; + X86CPUTopoInfo topo_info; + + if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) { + error_setg(errp, "Invalid CPU type, expected cpu type: '%s'", + ms->cpu_type); + return; + } + + if (x86ms->acpi_dev) { + Error *local_err = NULL; + + hotplug_handler_pre_plug(HOTPLUG_HANDLER(x86ms->acpi_dev), dev, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } + + init_topo_info(&topo_info, x86ms); + + env->nr_dies = ms->smp.dies; + + /* + * If APIC ID is not set, + * set it based on socket/die/core/thread properties. + */ + if (cpu->apic_id == UNASSIGNED_APIC_ID) { + int max_socket = (ms->smp.max_cpus - 1) / + smp_threads / smp_cores / ms->smp.dies; + + /* + * die-id was optional in QEMU 4.0 and older, so keep it optional + * if there's only one die per socket. + */ + if (cpu->die_id < 0 && ms->smp.dies == 1) { + cpu->die_id = 0; + } + + if (cpu->socket_id < 0) { + error_setg(errp, "CPU socket-id is not set"); + return; + } else if (cpu->socket_id > max_socket) { + error_setg(errp, "Invalid CPU socket-id: %u must be in range 0:%u", + cpu->socket_id, max_socket); + return; + } + if (cpu->die_id < 0) { + error_setg(errp, "CPU die-id is not set"); + return; + } else if (cpu->die_id > ms->smp.dies - 1) { + error_setg(errp, "Invalid CPU die-id: %u must be in range 0:%u", + cpu->die_id, ms->smp.dies - 1); + return; + } + if (cpu->core_id < 0) { + error_setg(errp, "CPU core-id is not set"); + return; + } else if (cpu->core_id > (smp_cores - 1)) { + error_setg(errp, "Invalid CPU core-id: %u must be in range 0:%u", + cpu->core_id, smp_cores - 1); + return; + } + if (cpu->thread_id < 0) { + error_setg(errp, "CPU thread-id is not set"); + return; + } else if (cpu->thread_id > (smp_threads - 1)) { + error_setg(errp, "Invalid CPU thread-id: %u must be in range 0:%u", + cpu->thread_id, smp_threads - 1); + return; + } + + topo_ids.pkg_id = cpu->socket_id; + topo_ids.die_id = cpu->die_id; + topo_ids.core_id = cpu->core_id; + topo_ids.smt_id = cpu->thread_id; + cpu->apic_id = x86_apicid_from_topo_ids(&topo_info, &topo_ids); + } + + cpu_slot = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, &idx); + if (!cpu_slot) { + MachineState *ms = MACHINE(x86ms); + + x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids); + error_setg(errp, + "Invalid CPU [socket: %u, die: %u, core: %u, thread: %u] with" + " APIC ID %" PRIu32 ", valid index range 0:%d", + topo_ids.pkg_id, topo_ids.die_id, topo_ids.core_id, topo_ids.smt_id, + cpu->apic_id, ms->possible_cpus->len - 1); + return; + } + + if (cpu_slot->cpu) { + error_setg(errp, "CPU[%d] with APIC ID %" PRIu32 " exists", + idx, cpu->apic_id); + return; + } + + /* if 'address' properties socket-id/core-id/thread-id are not set, set them + * so that machine_query_hotpluggable_cpus would show correct values + */ + /* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn() + * once -smp refactoring is complete and there will be CPU private + * CPUState::nr_cores and CPUState::nr_threads fields instead of globals */ + x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids); + if (cpu->socket_id != -1 && cpu->socket_id != topo_ids.pkg_id) { + error_setg(errp, "property socket-id: %u doesn't match set apic-id:" + " 0x%x (socket-id: %u)", cpu->socket_id, cpu->apic_id, + topo_ids.pkg_id); + return; + } + cpu->socket_id = topo_ids.pkg_id; + + if (cpu->die_id != -1 && cpu->die_id != topo_ids.die_id) { + error_setg(errp, "property die-id: %u doesn't match set apic-id:" + " 0x%x (die-id: %u)", cpu->die_id, cpu->apic_id, topo_ids.die_id); + return; + } + cpu->die_id = topo_ids.die_id; + + if (cpu->core_id != -1 && cpu->core_id != topo_ids.core_id) { + error_setg(errp, "property core-id: %u doesn't match set apic-id:" + " 0x%x (core-id: %u)", cpu->core_id, cpu->apic_id, + topo_ids.core_id); + return; + } + cpu->core_id = topo_ids.core_id; + + if (cpu->thread_id != -1 && cpu->thread_id != topo_ids.smt_id) { + error_setg(errp, "property thread-id: %u doesn't match set apic-id:" + " 0x%x (thread-id: %u)", cpu->thread_id, cpu->apic_id, + topo_ids.smt_id); + return; + } + cpu->thread_id = topo_ids.smt_id; + + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && + !kvm_hv_vpindex_settable()) { + error_setg(errp, "kernel doesn't allow setting HyperV VP_INDEX"); + return; + } + + cs = CPU(cpu); + cs->cpu_index = idx; + + numa_cpu_pre_plug(cpu_slot, dev, errp); +} + +CpuInstanceProperties +x86_cpu_index_to_props(MachineState *ms, unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +int64_t x86_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + X86CPUTopoIDs topo_ids; + X86MachineState *x86ms = X86_MACHINE(ms); + X86CPUTopoInfo topo_info; + + init_topo_info(&topo_info, x86ms); + + assert(idx < ms->possible_cpus->len); + x86_topo_ids_from_apicid(ms->possible_cpus->cpus[idx].arch_id, + &topo_info, &topo_ids); + return topo_ids.pkg_id % ms->numa_state->num_nodes; +} + +const CPUArchIdList *x86_possible_cpu_arch_ids(MachineState *ms) +{ + X86MachineState *x86ms = X86_MACHINE(ms); + unsigned int max_cpus = ms->smp.max_cpus; + X86CPUTopoInfo topo_info; + int i; + + if (ms->possible_cpus) { + /* + * make sure that max_cpus hasn't changed since the first use, i.e. + * -smp hasn't been parsed after it + */ + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + + init_topo_info(&topo_info, x86ms); + + for (i = 0; i < ms->possible_cpus->len; i++) { + X86CPUTopoIDs topo_ids; + + ms->possible_cpus->cpus[i].type = ms->cpu_type; + ms->possible_cpus->cpus[i].vcpus_count = 1; + ms->possible_cpus->cpus[i].arch_id = + x86_cpu_apic_id_from_index(x86ms, i); + x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id, + &topo_info, &topo_ids); + ms->possible_cpus->cpus[i].props.has_socket_id = true; + ms->possible_cpus->cpus[i].props.socket_id = topo_ids.pkg_id; + if (ms->smp.dies > 1) { + ms->possible_cpus->cpus[i].props.has_die_id = true; + ms->possible_cpus->cpus[i].props.die_id = topo_ids.die_id; + } + ms->possible_cpus->cpus[i].props.has_core_id = true; + ms->possible_cpus->cpus[i].props.core_id = topo_ids.core_id; + ms->possible_cpus->cpus[i].props.has_thread_id = true; + ms->possible_cpus->cpus[i].props.thread_id = topo_ids.smt_id; + } + return ms->possible_cpus; +} + +static void x86_nmi(NMIState *n, int cpu_index, Error **errp) +{ + /* cpu index isn't used */ + CPUState *cs; + + CPU_FOREACH(cs) { + X86CPU *cpu = X86_CPU(cs); + + if (!cpu->apic_state) { + cpu_interrupt(cs, CPU_INTERRUPT_NMI); + } else { + apic_deliver_nmi(cpu->apic_state); + } + } +} + +static long get_file_size(FILE *f) +{ + long where, size; + + /* XXX: on Unix systems, using fstat() probably makes more sense */ + + where = ftell(f); + fseek(f, 0, SEEK_END); + size = ftell(f); + fseek(f, where, SEEK_SET); + + return size; +} + +/* TSC handling */ +uint64_t cpu_get_tsc(CPUX86State *env) +{ + return cpus_get_elapsed_ticks(); +} + +/* IRQ handling */ +static void pic_irq_request(void *opaque, int irq, int level) +{ + CPUState *cs = first_cpu; + X86CPU *cpu = X86_CPU(cs); + + trace_x86_pic_interrupt(irq, level); + if (cpu->apic_state && !kvm_irqchip_in_kernel() && + !whpx_apic_in_platform()) { + CPU_FOREACH(cs) { + cpu = X86_CPU(cs); + if (apic_accept_pic_intr(cpu->apic_state)) { + apic_deliver_pic_intr(cpu->apic_state, level); + } + } + } else { + if (level) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + } +} + +qemu_irq x86_allocate_cpu_irq(void) +{ + return qemu_allocate_irq(pic_irq_request, NULL, 0); +} + +int cpu_get_pic_interrupt(CPUX86State *env) +{ + X86CPU *cpu = env_archcpu(env); + int intno; + + if (!kvm_irqchip_in_kernel() && !whpx_apic_in_platform()) { + intno = apic_get_interrupt(cpu->apic_state); + if (intno >= 0) { + return intno; + } + /* read the irq from the PIC */ + if (!apic_accept_pic_intr(cpu->apic_state)) { + return -1; + } + } + + intno = pic_read_irq(isa_pic); + return intno; +} + +DeviceState *cpu_get_current_apic(void) +{ + if (current_cpu) { + X86CPU *cpu = X86_CPU(current_cpu); + return cpu->apic_state; + } else { + return NULL; + } +} + +void gsi_handler(void *opaque, int n, int level) +{ + GSIState *s = opaque; + + trace_x86_gsi_interrupt(n, level); + switch (n) { + case 0 ... ISA_NUM_IRQS - 1: + if (s->i8259_irq[n]) { + /* Under KVM, Kernel will forward to both PIC and IOAPIC */ + qemu_set_irq(s->i8259_irq[n], level); + } + /* fall through */ + case ISA_NUM_IRQS ... IOAPIC_NUM_PINS - 1: + qemu_set_irq(s->ioapic_irq[n], level); + break; + case IO_APIC_SECONDARY_IRQBASE + ... IO_APIC_SECONDARY_IRQBASE + IOAPIC_NUM_PINS - 1: + qemu_set_irq(s->ioapic2_irq[n - IO_APIC_SECONDARY_IRQBASE], level); + break; + } +} + +void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name) +{ + DeviceState *dev; + SysBusDevice *d; + unsigned int i; + + assert(parent_name); + if (kvm_ioapic_in_kernel()) { + dev = qdev_new(TYPE_KVM_IOAPIC); + } else { + dev = qdev_new(TYPE_IOAPIC); + } + object_property_add_child(object_resolve_path(parent_name, NULL), + "ioapic", OBJECT(dev)); + d = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(d, &error_fatal); + sysbus_mmio_map(d, 0, IO_APIC_DEFAULT_ADDRESS); + + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + gsi_state->ioapic_irq[i] = qdev_get_gpio_in(dev, i); + } +} + +DeviceState *ioapic_init_secondary(GSIState *gsi_state) +{ + DeviceState *dev; + SysBusDevice *d; + unsigned int i; + + dev = qdev_new(TYPE_IOAPIC); + d = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(d, &error_fatal); + sysbus_mmio_map(d, 0, IO_APIC_SECONDARY_ADDRESS); + + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + gsi_state->ioapic2_irq[i] = qdev_get_gpio_in(dev, i); + } + return dev; +} + +struct setup_data { + uint64_t next; + uint32_t type; + uint32_t len; + uint8_t data[]; +} __attribute__((packed)); + + +/* + * The entry point into the kernel for PVH boot is different from + * the native entry point. The PVH entry is defined by the x86/HVM + * direct boot ABI and is available in an ELFNOTE in the kernel binary. + * + * This function is passed to load_elf() when it is called from + * load_elfboot() which then additionally checks for an ELF Note of + * type XEN_ELFNOTE_PHYS32_ENTRY and passes it to this function to + * parse the PVH entry address from the ELF Note. + * + * Due to trickery in elf_opts.h, load_elf() is actually available as + * load_elf32() or load_elf64() and this routine needs to be able + * to deal with being called as 32 or 64 bit. + * + * The address of the PVH entry point is saved to the 'pvh_start_addr' + * global variable. (although the entry point is 32-bit, the kernel + * binary can be either 32-bit or 64-bit). + */ +static uint64_t read_pvh_start_addr(void *arg1, void *arg2, bool is64) +{ + size_t *elf_note_data_addr; + + /* Check if ELF Note header passed in is valid */ + if (arg1 == NULL) { + return 0; + } + + if (is64) { + struct elf64_note *nhdr64 = (struct elf64_note *)arg1; + uint64_t nhdr_size64 = sizeof(struct elf64_note); + uint64_t phdr_align = *(uint64_t *)arg2; + uint64_t nhdr_namesz = nhdr64->n_namesz; + + elf_note_data_addr = + ((void *)nhdr64) + nhdr_size64 + + QEMU_ALIGN_UP(nhdr_namesz, phdr_align); + + pvh_start_addr = *elf_note_data_addr; + } else { + struct elf32_note *nhdr32 = (struct elf32_note *)arg1; + uint32_t nhdr_size32 = sizeof(struct elf32_note); + uint32_t phdr_align = *(uint32_t *)arg2; + uint32_t nhdr_namesz = nhdr32->n_namesz; + + elf_note_data_addr = + ((void *)nhdr32) + nhdr_size32 + + QEMU_ALIGN_UP(nhdr_namesz, phdr_align); + + pvh_start_addr = *(uint32_t *)elf_note_data_addr; + } + + return pvh_start_addr; +} + +static bool load_elfboot(const char *kernel_filename, + int kernel_file_size, + uint8_t *header, + size_t pvh_xen_start_addr, + FWCfgState *fw_cfg) +{ + uint32_t flags = 0; + uint32_t mh_load_addr = 0; + uint32_t elf_kernel_size = 0; + uint64_t elf_entry; + uint64_t elf_low, elf_high; + int kernel_size; + + if (ldl_p(header) != 0x464c457f) { + return false; /* no elfboot */ + } + + bool elf_is64 = header[EI_CLASS] == ELFCLASS64; + flags = elf_is64 ? + ((Elf64_Ehdr *)header)->e_flags : ((Elf32_Ehdr *)header)->e_flags; + + if (flags & 0x00010004) { /* LOAD_ELF_HEADER_HAS_ADDR */ + error_report("elfboot unsupported flags = %x", flags); + exit(1); + } + + uint64_t elf_note_type = XEN_ELFNOTE_PHYS32_ENTRY; + kernel_size = load_elf(kernel_filename, read_pvh_start_addr, + NULL, &elf_note_type, &elf_entry, + &elf_low, &elf_high, NULL, 0, I386_ELF_MACHINE, + 0, 0); + + if (kernel_size < 0) { + error_report("Error while loading elf kernel"); + exit(1); + } + mh_load_addr = elf_low; + elf_kernel_size = elf_high - elf_low; + + if (pvh_start_addr == 0) { + error_report("Error loading uncompressed kernel without PVH ELF Note"); + exit(1); + } + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ENTRY, pvh_start_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, mh_load_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, elf_kernel_size); + + return true; +} + +void x86_load_linux(X86MachineState *x86ms, + FWCfgState *fw_cfg, + int acpi_data_size, + bool pvh_enabled) +{ + bool linuxboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled; + uint16_t protocol; + int setup_size, kernel_size, cmdline_size; + int dtb_size, setup_data_offset; + uint32_t initrd_max; + uint8_t header[8192], *setup, *kernel; + hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0; + FILE *f; + char *vmode; + MachineState *machine = MACHINE(x86ms); + struct setup_data *setup_data; + const char *kernel_filename = machine->kernel_filename; + const char *initrd_filename = machine->initrd_filename; + const char *dtb_filename = machine->dtb; + const char *kernel_cmdline = machine->kernel_cmdline; + SevKernelLoaderContext sev_load_ctx = {}; + + /* Align to 16 bytes as a paranoia measure */ + cmdline_size = (strlen(kernel_cmdline) + 16) & ~15; + + /* load the kernel header */ + f = fopen(kernel_filename, "rb"); + if (!f) { + fprintf(stderr, "qemu: could not open kernel file '%s': %s\n", + kernel_filename, strerror(errno)); + exit(1); + } + + kernel_size = get_file_size(f); + if (!kernel_size || + fread(header, 1, MIN(ARRAY_SIZE(header), kernel_size), f) != + MIN(ARRAY_SIZE(header), kernel_size)) { + fprintf(stderr, "qemu: could not load kernel '%s': %s\n", + kernel_filename, strerror(errno)); + exit(1); + } + + /* kernel protocol version */ + if (ldl_p(header + 0x202) == 0x53726448) { + protocol = lduw_p(header + 0x206); + } else { + /* + * This could be a multiboot kernel. If it is, let's stop treating it + * like a Linux kernel. + * Note: some multiboot images could be in the ELF format (the same of + * PVH), so we try multiboot first since we check the multiboot magic + * header before to load it. + */ + if (load_multiboot(x86ms, fw_cfg, f, kernel_filename, initrd_filename, + kernel_cmdline, kernel_size, header)) { + return; + } + /* + * Check if the file is an uncompressed kernel file (ELF) and load it, + * saving the PVH entry point used by the x86/HVM direct boot ABI. + * If load_elfboot() is successful, populate the fw_cfg info. + */ + if (pvh_enabled && + load_elfboot(kernel_filename, kernel_size, + header, pvh_start_addr, fw_cfg)) { + fclose(f); + + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, + strlen(kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); + + fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, sizeof(header)); + fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, + header, sizeof(header)); + + /* load initrd */ + if (initrd_filename) { + GMappedFile *mapped_file; + gsize initrd_size; + gchar *initrd_data; + GError *gerr = NULL; + + mapped_file = g_mapped_file_new(initrd_filename, false, &gerr); + if (!mapped_file) { + fprintf(stderr, "qemu: error reading initrd %s: %s\n", + initrd_filename, gerr->message); + exit(1); + } + x86ms->initrd_mapped_file = mapped_file; + + initrd_data = g_mapped_file_get_contents(mapped_file); + initrd_size = g_mapped_file_get_length(mapped_file); + initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1; + if (initrd_size >= initrd_max) { + fprintf(stderr, "qemu: initrd is too large, cannot support." + "(max: %"PRIu32", need %"PRId64")\n", + initrd_max, (uint64_t)initrd_size); + exit(1); + } + + initrd_addr = (initrd_max - initrd_size) & ~4095; + + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, + initrd_size); + } + + option_rom[nb_option_roms].bootindex = 0; + option_rom[nb_option_roms].name = "pvh.bin"; + nb_option_roms++; + + return; + } + protocol = 0; + } + + if (protocol < 0x200 || !(header[0x211] & 0x01)) { + /* Low kernel */ + real_addr = 0x90000; + cmdline_addr = 0x9a000 - cmdline_size; + prot_addr = 0x10000; + } else if (protocol < 0x202) { + /* High but ancient kernel */ + real_addr = 0x90000; + cmdline_addr = 0x9a000 - cmdline_size; + prot_addr = 0x100000; + } else { + /* High and recent kernel */ + real_addr = 0x10000; + cmdline_addr = 0x20000; + prot_addr = 0x100000; + } + + /* highest address for loading the initrd */ + if (protocol >= 0x20c && + lduw_p(header + 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) { + /* + * Linux has supported initrd up to 4 GB for a very long time (2007, + * long before XLF_CAN_BE_LOADED_ABOVE_4G which was added in 2013), + * though it only sets initrd_max to 2 GB to "work around bootloader + * bugs". Luckily, QEMU firmware(which does something like bootloader) + * has supported this. + * + * It's believed that if XLF_CAN_BE_LOADED_ABOVE_4G is set, initrd can + * be loaded into any address. + * + * In addition, initrd_max is uint32_t simply because QEMU doesn't + * support the 64-bit boot protocol (specifically the ext_ramdisk_image + * field). + * + * Therefore here just limit initrd_max to UINT32_MAX simply as well. + */ + initrd_max = UINT32_MAX; + } else if (protocol >= 0x203) { + initrd_max = ldl_p(header + 0x22c); + } else { + initrd_max = 0x37ffffff; + } + + if (initrd_max >= x86ms->below_4g_mem_size - acpi_data_size) { + initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1; + } + + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); + sev_load_ctx.cmdline_data = (char *)kernel_cmdline; + sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1; + + if (protocol >= 0x202) { + stl_p(header + 0x228, cmdline_addr); + } else { + stw_p(header + 0x20, 0xA33F); + stw_p(header + 0x22, cmdline_addr - real_addr); + } + + /* handle vga= parameter */ + vmode = strstr(kernel_cmdline, "vga="); + if (vmode) { + unsigned int video_mode; + const char *end; + int ret; + /* skip "vga=" */ + vmode += 4; + if (!strncmp(vmode, "normal", 6)) { + video_mode = 0xffff; + } else if (!strncmp(vmode, "ext", 3)) { + video_mode = 0xfffe; + } else if (!strncmp(vmode, "ask", 3)) { + video_mode = 0xfffd; + } else { + ret = qemu_strtoui(vmode, &end, 0, &video_mode); + if (ret != 0 || (*end && *end != ' ')) { + fprintf(stderr, "qemu: invalid 'vga=' kernel parameter.\n"); + exit(1); + } + } + stw_p(header + 0x1fa, video_mode); + } + + /* loader type */ + /* + * High nybble = B reserved for QEMU; low nybble is revision number. + * If this code is substantially changed, you may want to consider + * incrementing the revision. + */ + if (protocol >= 0x200) { + header[0x210] = 0xB0; + } + /* heap */ + if (protocol >= 0x201) { + header[0x211] |= 0x80; /* CAN_USE_HEAP */ + stw_p(header + 0x224, cmdline_addr - real_addr - 0x200); + } + + /* load initrd */ + if (initrd_filename) { + GMappedFile *mapped_file; + gsize initrd_size; + gchar *initrd_data; + GError *gerr = NULL; + + if (protocol < 0x200) { + fprintf(stderr, "qemu: linux kernel too old to load a ram disk\n"); + exit(1); + } + + mapped_file = g_mapped_file_new(initrd_filename, false, &gerr); + if (!mapped_file) { + fprintf(stderr, "qemu: error reading initrd %s: %s\n", + initrd_filename, gerr->message); + exit(1); + } + x86ms->initrd_mapped_file = mapped_file; + + initrd_data = g_mapped_file_get_contents(mapped_file); + initrd_size = g_mapped_file_get_length(mapped_file); + if (initrd_size >= initrd_max) { + fprintf(stderr, "qemu: initrd is too large, cannot support." + "(max: %"PRIu32", need %"PRId64")\n", + initrd_max, (uint64_t)initrd_size); + exit(1); + } + + initrd_addr = (initrd_max - initrd_size) & ~4095; + + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, initrd_size); + sev_load_ctx.initrd_data = initrd_data; + sev_load_ctx.initrd_size = initrd_size; + + stl_p(header + 0x218, initrd_addr); + stl_p(header + 0x21c, initrd_size); + } + + /* load kernel and setup */ + setup_size = header[0x1f1]; + if (setup_size == 0) { + setup_size = 4; + } + setup_size = (setup_size + 1) * 512; + if (setup_size > kernel_size) { + fprintf(stderr, "qemu: invalid kernel header\n"); + exit(1); + } + kernel_size -= setup_size; + + setup = g_malloc(setup_size); + kernel = g_malloc(kernel_size); + fseek(f, 0, SEEK_SET); + if (fread(setup, 1, setup_size, f) != setup_size) { + fprintf(stderr, "fread() failed\n"); + exit(1); + } + if (fread(kernel, 1, kernel_size, f) != kernel_size) { + fprintf(stderr, "fread() failed\n"); + exit(1); + } + fclose(f); + + /* append dtb to kernel */ + if (dtb_filename) { + if (protocol < 0x209) { + fprintf(stderr, "qemu: Linux kernel too old to load a dtb\n"); + exit(1); + } + + dtb_size = get_image_size(dtb_filename); + if (dtb_size <= 0) { + fprintf(stderr, "qemu: error reading dtb %s: %s\n", + dtb_filename, strerror(errno)); + exit(1); + } + + setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16); + kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size; + kernel = g_realloc(kernel, kernel_size); + + stq_p(header + 0x250, prot_addr + setup_data_offset); + + setup_data = (struct setup_data *)(kernel + setup_data_offset); + setup_data->next = 0; + setup_data->type = cpu_to_le32(SETUP_DTB); + setup_data->len = cpu_to_le32(dtb_size); + + load_image_size(dtb_filename, setup_data->data, dtb_size); + } + + /* + * If we're starting an encrypted VM, it will be OVMF based, which uses the + * efi stub for booting and doesn't require any values to be placed in the + * kernel header. We therefore don't update the header so the hash of the + * kernel on the other side of the fw_cfg interface matches the hash of the + * file the user passed in. + */ + if (!sev_enabled()) { + memcpy(setup, header, MIN(sizeof(header), setup_size)); + } + + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); + sev_load_ctx.kernel_data = (char *)kernel; + sev_load_ctx.kernel_size = kernel_size; + + fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size); + sev_load_ctx.setup_data = (char *)setup; + sev_load_ctx.setup_size = setup_size; + + if (sev_enabled()) { + sev_add_kernel_loader_hashes(&sev_load_ctx, &error_fatal); + } + + option_rom[nb_option_roms].bootindex = 0; + option_rom[nb_option_roms].name = "linuxboot.bin"; + if (linuxboot_dma_enabled && fw_cfg_dma_enabled(fw_cfg)) { + option_rom[nb_option_roms].name = "linuxboot_dma.bin"; + } + nb_option_roms++; +} + +void x86_bios_rom_init(MachineState *ms, const char *default_firmware, + MemoryRegion *rom_memory, bool isapc_ram_fw) +{ + const char *bios_name; + char *filename; + MemoryRegion *bios, *isa_bios; + int bios_size, isa_bios_size; + int ret; + + /* BIOS load */ + bios_name = ms->firmware ?: default_firmware; + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (filename) { + bios_size = get_image_size(filename); + } else { + bios_size = -1; + } + if (bios_size <= 0 || + (bios_size % 65536) != 0) { + goto bios_error; + } + bios = g_malloc(sizeof(*bios)); + memory_region_init_ram(bios, NULL, "pc.bios", bios_size, &error_fatal); + if (!isapc_ram_fw) { + memory_region_set_readonly(bios, true); + } + ret = rom_add_file_fixed(bios_name, (uint32_t)(-bios_size), -1); + if (ret != 0) { + bios_error: + fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name); + exit(1); + } + g_free(filename); + + /* map the last 128KB of the BIOS in ISA space */ + isa_bios_size = MIN(bios_size, 128 * KiB); + isa_bios = g_malloc(sizeof(*isa_bios)); + memory_region_init_alias(isa_bios, NULL, "isa-bios", bios, + bios_size - isa_bios_size, isa_bios_size); + memory_region_add_subregion_overlap(rom_memory, + 0x100000 - isa_bios_size, + isa_bios, + 1); + if (!isapc_ram_fw) { + memory_region_set_readonly(isa_bios, true); + } + + /* map all the bios at the top of memory */ + memory_region_add_subregion(rom_memory, + (uint32_t)(-bios_size), + bios); +} + +bool x86_machine_is_smm_enabled(const X86MachineState *x86ms) +{ + bool smm_available = false; + + if (x86ms->smm == ON_OFF_AUTO_OFF) { + return false; + } + + if (tcg_enabled() || qtest_enabled()) { + smm_available = true; + } else if (kvm_enabled()) { + smm_available = kvm_has_smm(); + } + + if (smm_available) { + return true; + } + + if (x86ms->smm == ON_OFF_AUTO_ON) { + error_report("System Management Mode not supported by this hypervisor."); + exit(1); + } + return false; +} + +static void x86_machine_get_smm(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + OnOffAuto smm = x86ms->smm; + + visit_type_OnOffAuto(v, name, &smm, errp); +} + +static void x86_machine_set_smm(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &x86ms->smm, errp); +} + +bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms) +{ + if (x86ms->acpi == ON_OFF_AUTO_OFF) { + return false; + } + return true; +} + +static void x86_machine_get_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + OnOffAuto acpi = x86ms->acpi; + + visit_type_OnOffAuto(v, name, &acpi, errp); +} + +static void x86_machine_set_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &x86ms->acpi, errp); +} + +static char *x86_machine_get_oem_id(Object *obj, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + + return g_strdup(x86ms->oem_id); +} + +static void x86_machine_set_oem_id(Object *obj, const char *value, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + size_t len = strlen(value); + + if (len > 6) { + error_setg(errp, + "User specified "X86_MACHINE_OEM_ID" value is bigger than " + "6 bytes in size"); + return; + } + + strncpy(x86ms->oem_id, value, 6); +} + +static char *x86_machine_get_oem_table_id(Object *obj, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + + return g_strdup(x86ms->oem_table_id); +} + +static void x86_machine_set_oem_table_id(Object *obj, const char *value, + Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + size_t len = strlen(value); + + if (len > 8) { + error_setg(errp, + "User specified "X86_MACHINE_OEM_TABLE_ID + " value is bigger than " + "8 bytes in size"); + return; + } + strncpy(x86ms->oem_table_id, value, 8); +} + +static void x86_machine_get_bus_lock_ratelimit(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + uint64_t bus_lock_ratelimit = x86ms->bus_lock_ratelimit; + + visit_type_uint64(v, name, &bus_lock_ratelimit, errp); +} + +static void x86_machine_set_bus_lock_ratelimit(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + + visit_type_uint64(v, name, &x86ms->bus_lock_ratelimit, errp); +} + +static void machine_get_sgx_epc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + SgxEPCList *list = x86ms->sgx_epc_list; + + visit_type_SgxEPCList(v, name, &list, errp); +} + +static void machine_set_sgx_epc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + SgxEPCList *list; + + list = x86ms->sgx_epc_list; + visit_type_SgxEPCList(v, name, &x86ms->sgx_epc_list, errp); + + qapi_free_SgxEPCList(list); +} + +static void x86_machine_initfn(Object *obj) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + + x86ms->smm = ON_OFF_AUTO_AUTO; + x86ms->acpi = ON_OFF_AUTO_AUTO; + x86ms->pci_irq_mask = ACPI_BUILD_PCI_IRQS; + x86ms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); + x86ms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); + x86ms->bus_lock_ratelimit = 0; +} + +static void x86_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + X86MachineClass *x86mc = X86_MACHINE_CLASS(oc); + NMIClass *nc = NMI_CLASS(oc); + + mc->cpu_index_to_instance_props = x86_cpu_index_to_props; + mc->get_default_cpu_node_id = x86_get_default_cpu_node_id; + mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids; + x86mc->compat_apic_id_mode = false; + x86mc->save_tsc_khz = true; + x86mc->fwcfg_dma_enabled = true; + nc->nmi_monitor_handler = x86_nmi; + + object_class_property_add(oc, X86_MACHINE_SMM, "OnOffAuto", + x86_machine_get_smm, x86_machine_set_smm, + NULL, NULL); + object_class_property_set_description(oc, X86_MACHINE_SMM, + "Enable SMM"); + + object_class_property_add(oc, X86_MACHINE_ACPI, "OnOffAuto", + x86_machine_get_acpi, x86_machine_set_acpi, + NULL, NULL); + object_class_property_set_description(oc, X86_MACHINE_ACPI, + "Enable ACPI"); + + object_class_property_add_str(oc, X86_MACHINE_OEM_ID, + x86_machine_get_oem_id, + x86_machine_set_oem_id); + object_class_property_set_description(oc, X86_MACHINE_OEM_ID, + "Override the default value of field OEMID " + "in ACPI table header." + "The string may be up to 6 bytes in size"); + + + object_class_property_add_str(oc, X86_MACHINE_OEM_TABLE_ID, + x86_machine_get_oem_table_id, + x86_machine_set_oem_table_id); + object_class_property_set_description(oc, X86_MACHINE_OEM_TABLE_ID, + "Override the default value of field OEM Table ID " + "in ACPI table header." + "The string may be up to 8 bytes in size"); + + object_class_property_add(oc, X86_MACHINE_BUS_LOCK_RATELIMIT, "uint64_t", + x86_machine_get_bus_lock_ratelimit, + x86_machine_set_bus_lock_ratelimit, NULL, NULL); + object_class_property_set_description(oc, X86_MACHINE_BUS_LOCK_RATELIMIT, + "Set the ratelimit for the bus locks acquired in VMs"); + + object_class_property_add(oc, "sgx-epc", "SgxEPC", + machine_get_sgx_epc, machine_set_sgx_epc, + NULL, NULL); + object_class_property_set_description(oc, "sgx-epc", + "SGX EPC device"); +} + +static const TypeInfo x86_machine_info = { + .name = TYPE_X86_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(X86MachineState), + .instance_init = x86_machine_initfn, + .class_size = sizeof(X86MachineClass), + .class_init = x86_machine_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NMI }, + { } + }, +}; + +static void x86_machine_register_types(void) +{ + type_register_static(&x86_machine_info); +} + +type_init(x86_machine_register_types) diff --git a/hw/i386/xen/meson.build b/hw/i386/xen/meson.build new file mode 100644 index 000000000..be8413030 --- /dev/null +++ b/hw/i386/xen/meson.build @@ -0,0 +1,7 @@ +i386_ss.add(when: 'CONFIG_XEN', if_true: files( + 'xen-hvm.c', + 'xen-mapcache.c', + 'xen_apic.c', + 'xen_platform.c', + 'xen_pvdevice.c', +)) diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events new file mode 100644 index 000000000..5d6be6109 --- /dev/null +++ b/hw/i386/xen/trace-events @@ -0,0 +1,28 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# xen_platform.c +xen_platform_log(char *s) "xen platform: %s" + +# xen_pvdevice.c +xen_pv_mmio_read(uint64_t addr) "WARNING: read from Xen PV Device MMIO space (address 0x%"PRIx64")" +xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (address 0x%"PRIx64")" + +# xen-hvm.c +xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: 0x%lx, size 0x%lx" +xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "0x%"PRIx64" size 0x%lx, log_dirty %i" +handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d" +cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d" +cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d" +xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p" +cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x" +cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x" + +# xen-mapcache.c +xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64 +xen_remap_bucket(uint64_t index) "index 0x%"PRIx64 +xen_map_cache_return(void* ptr) "%p" + diff --git a/hw/i386/xen/trace.h b/hw/i386/xen/trace.h new file mode 100644 index 000000000..a02bf755d --- /dev/null +++ b/hw/i386/xen/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_i386_xen.h" diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c new file mode 100644 index 000000000..482be9541 --- /dev/null +++ b/hw/i386/xen/xen-hvm.c @@ -0,0 +1,1620 @@ +/* + * Copyright (C) 2010 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" + +#include "cpu.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/i386/pc.h" +#include "hw/southbridge/piix.h" +#include "hw/irq.h" +#include "hw/hw.h" +#include "hw/i386/apic-msidef.h" +#include "hw/xen/xen_common.h" +#include "hw/xen/xen-legacy-backend.h" +#include "hw/xen/xen-bus.h" +#include "hw/xen/xen-x86.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-migration.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/range.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "sysemu/xen.h" +#include "sysemu/xen-mapcache.h" +#include "trace.h" + +#include +#include + +//#define DEBUG_XEN_HVM + +#ifdef DEBUG_XEN_HVM +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; +static MemoryRegion *framebuffer; +static bool xen_in_migration; + +/* Compatibility with older version */ + +/* This allows QEMU to build on a system that has Xen 4.5 or earlier + * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h + * needs to be included before this block and hw/xen/xen_common.h needs to + * be included before xen/hvm/ioreq.h + */ +#ifndef IOREQ_TYPE_VMWARE_PORT +#define IOREQ_TYPE_VMWARE_PORT 3 +struct vmware_regs { + uint32_t esi; + uint32_t edi; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; +}; +typedef struct vmware_regs vmware_regs_t; + +struct shared_vmport_iopage { + struct vmware_regs vcpu_vmport_regs[1]; +}; +typedef struct shared_vmport_iopage shared_vmport_iopage_t; +#endif + +static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) +{ + return shared_page->vcpu_ioreq[i].vp_eport; +} +static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) +{ + return &shared_page->vcpu_ioreq[vcpu]; +} + +#define BUFFER_IO_MAX_DELAY 100 + +typedef struct XenPhysmap { + hwaddr start_addr; + ram_addr_t size; + const char *name; + hwaddr phys_offset; + + QLIST_ENTRY(XenPhysmap) list; +} XenPhysmap; + +static QLIST_HEAD(, XenPhysmap) xen_physmap; + +typedef struct XenPciDevice { + PCIDevice *pci_dev; + uint32_t sbdf; + QLIST_ENTRY(XenPciDevice) entry; +} XenPciDevice; + +typedef struct XenIOState { + ioservid_t ioservid; + shared_iopage_t *shared_page; + shared_vmport_iopage_t *shared_vmport_page; + buffered_iopage_t *buffered_io_page; + xenforeignmemory_resource_handle *fres; + QEMUTimer *buffered_io_timer; + CPUState **cpu_by_vcpu_id; + /* the evtchn port for polling the notification, */ + evtchn_port_t *ioreq_local_port; + /* evtchn remote and local ports for buffered io */ + evtchn_port_t bufioreq_remote_port; + evtchn_port_t bufioreq_local_port; + /* the evtchn fd for polling */ + xenevtchn_handle *xce_handle; + /* which vcpu we are serving */ + int send_vcpu; + + struct xs_handle *xenstore; + MemoryListener memory_listener; + MemoryListener io_listener; + QLIST_HEAD(, XenPciDevice) dev_list; + DeviceListener device_listener; + hwaddr free_phys_offset; + const XenPhysmap *log_for_dirtybit; + /* Buffer used by xen_sync_dirty_bitmap */ + unsigned long *dirty_bitmap; + + Notifier exit; + Notifier suspend; + Notifier wakeup; +} XenIOState; + +/* Xen specific function for piix pci */ + +int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) +{ + return irq_num + (PCI_SLOT(pci_dev->devfn) << 2); +} + +void xen_piix3_set_irq(void *opaque, int irq_num, int level) +{ + xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, + irq_num & 3, level); +} + +void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) +{ + int i; + + /* Scan for updates to PCI link routes (0x60-0x63). */ + for (i = 0; i < len; i++) { + uint8_t v = (val >> (8 * i)) & 0xff; + if (v & 0x80) { + v = 0; + } + v &= 0xf; + if (((address + i) >= PIIX_PIRQCA) && ((address + i) <= PIIX_PIRQCD)) { + xen_set_pci_link_route(xen_domid, address + i - PIIX_PIRQCA, v); + } + } +} + +int xen_is_pirq_msi(uint32_t msi_data) +{ + /* If vector is 0, the msi is remapped into a pirq, passed as + * dest_id. + */ + return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0; +} + +void xen_hvm_inject_msi(uint64_t addr, uint32_t data) +{ + xen_inject_msi(xen_domid, addr, data); +} + +static void xen_suspend_notifier(Notifier *notifier, void *data) +{ + xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); +} + +/* Xen Interrupt Controller */ + +static void xen_set_irq(void *opaque, int irq, int level) +{ + xen_set_isa_irq_level(xen_domid, irq, level); +} + +qemu_irq *xen_interrupt_controller_init(void) +{ + return qemu_allocate_irqs(xen_set_irq, NULL, 16); +} + +/* Memory Ops */ + +static void xen_ram_init(PCMachineState *pcms, + ram_addr_t ram_size, MemoryRegion **ram_memory_p) +{ + X86MachineState *x86ms = X86_MACHINE(pcms); + MemoryRegion *sysmem = get_system_memory(); + ram_addr_t block_len; + uint64_t user_lowmem = + object_property_get_uint(qdev_get_machine(), + PC_MACHINE_MAX_RAM_BELOW_4G, + &error_abort); + + /* Handle the machine opt max-ram-below-4g. It is basically doing + * min(xen limit, user limit). + */ + if (!user_lowmem) { + user_lowmem = HVM_BELOW_4G_RAM_END; /* default */ + } + if (HVM_BELOW_4G_RAM_END <= user_lowmem) { + user_lowmem = HVM_BELOW_4G_RAM_END; + } + + if (ram_size >= user_lowmem) { + x86ms->above_4g_mem_size = ram_size - user_lowmem; + x86ms->below_4g_mem_size = user_lowmem; + } else { + x86ms->above_4g_mem_size = 0; + x86ms->below_4g_mem_size = ram_size; + } + if (!x86ms->above_4g_mem_size) { + block_len = ram_size; + } else { + /* + * Xen does not allocate the memory continuously, it keeps a + * hole of the size computed above or passed in. + */ + block_len = (4 * GiB) + x86ms->above_4g_mem_size; + } + memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, + &error_fatal); + *ram_memory_p = &ram_memory; + + memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k", + &ram_memory, 0, 0xa0000); + memory_region_add_subregion(sysmem, 0, &ram_640k); + /* Skip of the VGA IO memory space, it will be registered later by the VGA + * emulated device. + * + * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load + * the Options ROM, so it is registered here as RAM. + */ + memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", + &ram_memory, 0xc0000, + x86ms->below_4g_mem_size - 0xc0000); + memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); + if (x86ms->above_4g_mem_size > 0) { + memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", + &ram_memory, 0x100000000ULL, + x86ms->above_4g_mem_size); + memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); + } +} + +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, + Error **errp) +{ + unsigned long nr_pfn; + xen_pfn_t *pfn_list; + int i; + + if (runstate_check(RUN_STATE_INMIGRATE)) { + /* RAM already populated in Xen */ + fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT + " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", + __func__, size, ram_addr); + return; + } + + if (mr == &ram_memory) { + return; + } + + trace_xen_ram_alloc(ram_addr, size); + + nr_pfn = size >> TARGET_PAGE_BITS; + pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); + + for (i = 0; i < nr_pfn; i++) { + pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; + } + + if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { + error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, + ram_addr); + } + + g_free(pfn_list); +} + +static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size) +{ + XenPhysmap *physmap = NULL; + + start_addr &= TARGET_PAGE_MASK; + + QLIST_FOREACH(physmap, &xen_physmap, list) { + if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { + return physmap; + } + } + return NULL; +} + +static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size) +{ + hwaddr addr = phys_offset & TARGET_PAGE_MASK; + XenPhysmap *physmap = NULL; + + QLIST_FOREACH(physmap, &xen_physmap, list) { + if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) { + return physmap->start_addr + (phys_offset - physmap->phys_offset); + } + } + + return phys_offset; +} + +#ifdef XEN_COMPAT_PHYSMAP +static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap) +{ + char path[80], value[17]; + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", + xen_domid, (uint64_t)physmap->phys_offset); + snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr); + if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { + return -1; + } + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", + xen_domid, (uint64_t)physmap->phys_offset); + snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size); + if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { + return -1; + } + if (physmap->name) { + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", + xen_domid, (uint64_t)physmap->phys_offset); + if (!xs_write(state->xenstore, 0, path, + physmap->name, strlen(physmap->name))) { + return -1; + } + } + return 0; +} +#else +static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap) +{ + return 0; +} +#endif + +static int xen_add_to_physmap(XenIOState *state, + hwaddr start_addr, + ram_addr_t size, + MemoryRegion *mr, + hwaddr offset_within_region) +{ + unsigned long nr_pages; + int rc = 0; + XenPhysmap *physmap = NULL; + hwaddr pfn, start_gpfn; + hwaddr phys_offset = memory_region_get_ram_addr(mr); + const char *mr_name; + + if (get_physmapping(start_addr, size)) { + return 0; + } + if (size <= 0) { + return -1; + } + + /* Xen can only handle a single dirty log region for now and we want + * the linear framebuffer to be that region. + * Avoid tracking any regions that is not videoram and avoid tracking + * the legacy vga region. */ + if (mr == framebuffer && start_addr > 0xbffff) { + goto go_physmap; + } + return -1; + +go_physmap: + DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", + start_addr, start_addr + size); + + mr_name = memory_region_name(mr); + + physmap = g_malloc(sizeof(XenPhysmap)); + + physmap->start_addr = start_addr; + physmap->size = size; + physmap->name = mr_name; + physmap->phys_offset = phys_offset; + + QLIST_INSERT_HEAD(&xen_physmap, physmap, list); + + if (runstate_check(RUN_STATE_INMIGRATE)) { + /* Now when we have a physmap entry we can replace a dummy mapping with + * a real one of guest foreign memory. */ + uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size); + assert(p && p == memory_region_get_ram_ptr(mr)); + + return 0; + } + + pfn = phys_offset >> TARGET_PAGE_BITS; + start_gpfn = start_addr >> TARGET_PAGE_BITS; + nr_pages = size >> TARGET_PAGE_BITS; + rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn, + start_gpfn); + if (rc) { + int saved_errno = errno; + + error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx + " to GFN %"HWADDR_PRIx" failed: %s", + nr_pages, pfn, start_gpfn, strerror(saved_errno)); + errno = saved_errno; + return -1; + } + + rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid, + start_addr >> TARGET_PAGE_BITS, + (start_addr + size - 1) >> TARGET_PAGE_BITS, + XEN_DOMCTL_MEM_CACHEATTR_WB); + if (rc) { + error_report("pin_memory_cacheattr failed: %s", strerror(errno)); + } + return xen_save_physmap(state, physmap); +} + +static int xen_remove_from_physmap(XenIOState *state, + hwaddr start_addr, + ram_addr_t size) +{ + int rc = 0; + XenPhysmap *physmap = NULL; + hwaddr phys_offset = 0; + + physmap = get_physmapping(start_addr, size); + if (physmap == NULL) { + return -1; + } + + phys_offset = physmap->phys_offset; + size = physmap->size; + + DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at " + "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset); + + size >>= TARGET_PAGE_BITS; + start_addr >>= TARGET_PAGE_BITS; + phys_offset >>= TARGET_PAGE_BITS; + rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr, + phys_offset); + if (rc) { + int saved_errno = errno; + + error_report("relocate_memory "RAM_ADDR_FMT" pages" + " from GFN %"HWADDR_PRIx + " to GFN %"HWADDR_PRIx" failed: %s", + size, start_addr, phys_offset, strerror(saved_errno)); + errno = saved_errno; + return -1; + } + + QLIST_REMOVE(physmap, list); + if (state->log_for_dirtybit == physmap) { + state->log_for_dirtybit = NULL; + g_free(state->dirty_bitmap); + state->dirty_bitmap = NULL; + } + g_free(physmap); + + return 0; +} + +static void xen_set_memory(struct MemoryListener *listener, + MemoryRegionSection *section, + bool add) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + hwaddr start_addr = section->offset_within_address_space; + ram_addr_t size = int128_get64(section->size); + bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); + hvmmem_type_t mem_type; + + if (section->mr == &ram_memory) { + return; + } else { + if (add) { + xen_map_memory_section(xen_domid, state->ioservid, + section); + } else { + xen_unmap_memory_section(xen_domid, state->ioservid, + section); + } + } + + if (!memory_region_is_ram(section->mr)) { + return; + } + + if (log_dirty != add) { + return; + } + + trace_xen_client_set_memory(start_addr, size, log_dirty); + + start_addr &= TARGET_PAGE_MASK; + size = TARGET_PAGE_ALIGN(size); + + if (add) { + if (!memory_region_is_rom(section->mr)) { + xen_add_to_physmap(state, start_addr, size, + section->mr, section->offset_within_region); + } else { + mem_type = HVMMEM_ram_ro; + if (xen_set_mem_type(xen_domid, mem_type, + start_addr >> TARGET_PAGE_BITS, + size >> TARGET_PAGE_BITS)) { + DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n", + start_addr); + } + } + } else { + if (xen_remove_from_physmap(state, start_addr, size) < 0) { + DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); + } + } +} + +static void xen_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + memory_region_ref(section->mr); + xen_set_memory(listener, section, true); +} + +static void xen_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + xen_set_memory(listener, section, false); + memory_region_unref(section->mr); +} + +static void xen_io_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, io_listener); + MemoryRegion *mr = section->mr; + + if (mr->ops == &unassigned_io_ops) { + return; + } + + memory_region_ref(mr); + + xen_map_io_section(xen_domid, state->ioservid, section); +} + +static void xen_io_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, io_listener); + MemoryRegion *mr = section->mr; + + if (mr->ops == &unassigned_io_ops) { + return; + } + + xen_unmap_io_section(xen_domid, state->ioservid, section); + + memory_region_unref(mr); +} + +static void xen_device_realize(DeviceListener *listener, + DeviceState *dev) +{ + XenIOState *state = container_of(listener, XenIOState, device_listener); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pci_dev = PCI_DEVICE(dev); + XenPciDevice *xendev = g_new(XenPciDevice, 1); + + xendev->pci_dev = pci_dev; + xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev), + pci_dev->devfn); + QLIST_INSERT_HEAD(&state->dev_list, xendev, entry); + + xen_map_pcidev(xen_domid, state->ioservid, pci_dev); + } +} + +static void xen_device_unrealize(DeviceListener *listener, + DeviceState *dev) +{ + XenIOState *state = container_of(listener, XenIOState, device_listener); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pci_dev = PCI_DEVICE(dev); + XenPciDevice *xendev, *next; + + xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); + + QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) { + if (xendev->pci_dev == pci_dev) { + QLIST_REMOVE(xendev, entry); + g_free(xendev); + break; + } + } + } +} + +static void xen_sync_dirty_bitmap(XenIOState *state, + hwaddr start_addr, + ram_addr_t size) +{ + hwaddr npages = size >> TARGET_PAGE_BITS; + const int width = sizeof(unsigned long) * 8; + size_t bitmap_size = DIV_ROUND_UP(npages, width); + int rc, i, j; + const XenPhysmap *physmap = NULL; + + physmap = get_physmapping(start_addr, size); + if (physmap == NULL) { + /* not handled */ + return; + } + + if (state->log_for_dirtybit == NULL) { + state->log_for_dirtybit = physmap; + state->dirty_bitmap = g_new(unsigned long, bitmap_size); + } else if (state->log_for_dirtybit != physmap) { + /* Only one range for dirty bitmap can be tracked. */ + return; + } + + rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, + npages, state->dirty_bitmap); + if (rc < 0) { +#ifndef ENODATA +#define ENODATA ENOENT +#endif + if (errno == ENODATA) { + memory_region_set_dirty(framebuffer, 0, size); + DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx + ", 0x" TARGET_FMT_plx "): %s\n", + start_addr, start_addr + size, strerror(errno)); + } + return; + } + + for (i = 0; i < bitmap_size; i++) { + unsigned long map = state->dirty_bitmap[i]; + while (map != 0) { + j = ctzl(map); + map &= ~(1ul << j); + memory_region_set_dirty(framebuffer, + (i * width + j) * TARGET_PAGE_SIZE, + TARGET_PAGE_SIZE); + }; + } +} + +static void xen_log_start(MemoryListener *listener, + MemoryRegionSection *section, + int old, int new) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + + if (new & ~old & (1 << DIRTY_MEMORY_VGA)) { + xen_sync_dirty_bitmap(state, section->offset_within_address_space, + int128_get64(section->size)); + } +} + +static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, + int old, int new) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + + if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { + state->log_for_dirtybit = NULL; + g_free(state->dirty_bitmap); + state->dirty_bitmap = NULL; + /* Disable dirty bit tracking */ + xen_track_dirty_vram(xen_domid, 0, 0, NULL); + } +} + +static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + + xen_sync_dirty_bitmap(state, section->offset_within_address_space, + int128_get64(section->size)); +} + +static void xen_log_global_start(MemoryListener *listener) +{ + if (xen_enabled()) { + xen_in_migration = true; + } +} + +static void xen_log_global_stop(MemoryListener *listener) +{ + xen_in_migration = false; +} + +static MemoryListener xen_memory_listener = { + .name = "xen-memory", + .region_add = xen_region_add, + .region_del = xen_region_del, + .log_start = xen_log_start, + .log_stop = xen_log_stop, + .log_sync = xen_log_sync, + .log_global_start = xen_log_global_start, + .log_global_stop = xen_log_global_stop, + .priority = 10, +}; + +static MemoryListener xen_io_listener = { + .name = "xen-io", + .region_add = xen_io_add, + .region_del = xen_io_del, + .priority = 10, +}; + +static DeviceListener xen_device_listener = { + .realize = xen_device_realize, + .unrealize = xen_device_unrealize, +}; + +/* get the ioreq packets from share mem */ +static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) +{ + ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); + + if (req->state != STATE_IOREQ_READY) { + DPRINTF("I/O request not ready: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %u, size: %u\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + return NULL; + } + + xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ + + req->state = STATE_IOREQ_INPROCESS; + return req; +} + +/* use poll to get the port notification */ +/* ioreq_vec--out,the */ +/* retval--the number of ioreq packet */ +static ioreq_t *cpu_get_ioreq(XenIOState *state) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + unsigned int max_cpus = ms->smp.max_cpus; + int i; + evtchn_port_t port; + + port = xenevtchn_pending(state->xce_handle); + if (port == state->bufioreq_local_port) { + timer_mod(state->buffered_io_timer, + BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); + return NULL; + } + + if (port != -1) { + for (i = 0; i < max_cpus; i++) { + if (state->ioreq_local_port[i] == port) { + break; + } + } + + if (i == max_cpus) { + hw_error("Fatal error while trying to get io event!\n"); + } + + /* unmask the wanted port again */ + xenevtchn_unmask(state->xce_handle, port); + + /* get the io packet from shared memory */ + state->send_vcpu = i; + return cpu_get_ioreq_from_shared_memory(state, i); + } + + /* read error or read nothing */ + return NULL; +} + +static uint32_t do_inp(uint32_t addr, unsigned long size) +{ + switch (size) { + case 1: + return cpu_inb(addr); + case 2: + return cpu_inw(addr); + case 4: + return cpu_inl(addr); + default: + hw_error("inp: bad size: %04x %lx", addr, size); + } +} + +static void do_outp(uint32_t addr, + unsigned long size, uint32_t val) +{ + switch (size) { + case 1: + return cpu_outb(addr, val); + case 2: + return cpu_outw(addr, val); + case 4: + return cpu_outl(addr, val); + default: + hw_error("outp: bad size: %04x %lx", addr, size); + } +} + +/* + * Helper functions which read/write an object from/to physical guest + * memory, as part of the implementation of an ioreq. + * + * Equivalent to + * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, + * val, req->size, 0/1) + * except without the integer overflow problems. + */ +static void rw_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val, int rw) +{ + /* Do everything unsigned so overflow just results in a truncated result + * and accesses to undesired parts of guest memory, which is up + * to the guest */ + hwaddr offset = (hwaddr)req->size * i; + if (req->df) { + addr -= offset; + } else { + addr += offset; + } + cpu_physical_memory_rw(addr, val, req->size, rw); +} + +static inline void read_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val) +{ + rw_phys_req_item(addr, req, i, val, 0); +} +static inline void write_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val) +{ + rw_phys_req_item(addr, req, i, val, 1); +} + + +static void cpu_ioreq_pio(ioreq_t *req) +{ + uint32_t i; + + trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + + if (req->size > sizeof(uint32_t)) { + hw_error("PIO: bad size (%u)", req->size); + } + + if (req->dir == IOREQ_READ) { + if (!req->data_is_ptr) { + req->data = do_inp(req->addr, req->size); + trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, + req->size); + } else { + uint32_t tmp; + + for (i = 0; i < req->count; i++) { + tmp = do_inp(req->addr, req->size); + write_phys_req_item(req->data, req, i, &tmp); + } + } + } else if (req->dir == IOREQ_WRITE) { + if (!req->data_is_ptr) { + trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, + req->size); + do_outp(req->addr, req->size, req->data); + } else { + for (i = 0; i < req->count; i++) { + uint32_t tmp = 0; + + read_phys_req_item(req->data, req, i, &tmp); + do_outp(req->addr, req->size, tmp); + } + } + } +} + +static void cpu_ioreq_move(ioreq_t *req) +{ + uint32_t i; + + trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + + if (req->size > sizeof(req->data)) { + hw_error("MMIO: bad size (%u)", req->size); + } + + if (!req->data_is_ptr) { + if (req->dir == IOREQ_READ) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->addr, req, i, &req->data); + } + } else if (req->dir == IOREQ_WRITE) { + for (i = 0; i < req->count; i++) { + write_phys_req_item(req->addr, req, i, &req->data); + } + } + } else { + uint64_t tmp; + + if (req->dir == IOREQ_READ) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->addr, req, i, &tmp); + write_phys_req_item(req->data, req, i, &tmp); + } + } else if (req->dir == IOREQ_WRITE) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->data, req, i, &tmp); + write_phys_req_item(req->addr, req, i, &tmp); + } + } + } +} + +static void cpu_ioreq_config(XenIOState *state, ioreq_t *req) +{ + uint32_t sbdf = req->addr >> 32; + uint32_t reg = req->addr; + XenPciDevice *xendev; + + if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) && + req->size != sizeof(uint32_t)) { + hw_error("PCI config access: bad size (%u)", req->size); + } + + if (req->count != 1) { + hw_error("PCI config access: bad count (%u)", req->count); + } + + QLIST_FOREACH(xendev, &state->dev_list, entry) { + if (xendev->sbdf != sbdf) { + continue; + } + + if (!req->data_is_ptr) { + if (req->dir == IOREQ_READ) { + req->data = pci_host_config_read_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + req->size); + trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, + req->size, req->data); + } else if (req->dir == IOREQ_WRITE) { + trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, + req->size, req->data); + pci_host_config_write_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + req->data, req->size); + } + } else { + uint32_t tmp; + + if (req->dir == IOREQ_READ) { + tmp = pci_host_config_read_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + req->size); + trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, + req->size, tmp); + write_phys_req_item(req->data, req, 0, &tmp); + } else if (req->dir == IOREQ_WRITE) { + read_phys_req_item(req->data, req, 0, &tmp); + trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, + req->size, tmp); + pci_host_config_write_common( + xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, + tmp, req->size); + } + } + } +} + +static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req) +{ + X86CPU *cpu; + CPUX86State *env; + + cpu = X86_CPU(current_cpu); + env = &cpu->env; + env->regs[R_EAX] = req->data; + env->regs[R_EBX] = vmport_regs->ebx; + env->regs[R_ECX] = vmport_regs->ecx; + env->regs[R_EDX] = vmport_regs->edx; + env->regs[R_ESI] = vmport_regs->esi; + env->regs[R_EDI] = vmport_regs->edi; +} + +static void regs_from_cpu(vmware_regs_t *vmport_regs) +{ + X86CPU *cpu = X86_CPU(current_cpu); + CPUX86State *env = &cpu->env; + + vmport_regs->ebx = env->regs[R_EBX]; + vmport_regs->ecx = env->regs[R_ECX]; + vmport_regs->edx = env->regs[R_EDX]; + vmport_regs->esi = env->regs[R_ESI]; + vmport_regs->edi = env->regs[R_EDI]; +} + +static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req) +{ + vmware_regs_t *vmport_regs; + + assert(state->shared_vmport_page); + vmport_regs = + &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu]; + QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs)); + + current_cpu = state->cpu_by_vcpu_id[state->send_vcpu]; + regs_to_cpu(vmport_regs, req); + cpu_ioreq_pio(req); + regs_from_cpu(vmport_regs); + current_cpu = NULL; +} + +static void handle_ioreq(XenIOState *state, ioreq_t *req) +{ + trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + + if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && + (req->size < sizeof (target_ulong))) { + req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; + } + + if (req->dir == IOREQ_WRITE) + trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + + switch (req->type) { + case IOREQ_TYPE_PIO: + cpu_ioreq_pio(req); + break; + case IOREQ_TYPE_COPY: + cpu_ioreq_move(req); + break; + case IOREQ_TYPE_VMWARE_PORT: + handle_vmport_ioreq(state, req); + break; + case IOREQ_TYPE_TIMEOFFSET: + break; + case IOREQ_TYPE_INVALIDATE: + xen_invalidate_map_cache(); + break; + case IOREQ_TYPE_PCI_CONFIG: + cpu_ioreq_config(state, req); + break; + default: + hw_error("Invalid ioreq type 0x%x\n", req->type); + } + if (req->dir == IOREQ_READ) { + trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + } +} + +static int handle_buffered_iopage(XenIOState *state) +{ + buffered_iopage_t *buf_page = state->buffered_io_page; + buf_ioreq_t *buf_req = NULL; + ioreq_t req; + int qw; + + if (!buf_page) { + return 0; + } + + memset(&req, 0x00, sizeof(req)); + req.state = STATE_IOREQ_READY; + req.count = 1; + req.dir = IOREQ_WRITE; + + for (;;) { + uint32_t rdptr = buf_page->read_pointer, wrptr; + + xen_rmb(); + wrptr = buf_page->write_pointer; + xen_rmb(); + if (rdptr != buf_page->read_pointer) { + continue; + } + if (rdptr == wrptr) { + break; + } + buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; + req.size = 1U << buf_req->size; + req.addr = buf_req->addr; + req.data = buf_req->data; + req.type = buf_req->type; + xen_rmb(); + qw = (req.size == 8); + if (qw) { + if (rdptr + 1 == wrptr) { + hw_error("Incomplete quad word buffered ioreq"); + } + buf_req = &buf_page->buf_ioreq[(rdptr + 1) % + IOREQ_BUFFER_SLOT_NUM]; + req.data |= ((uint64_t)buf_req->data) << 32; + xen_rmb(); + } + + handle_ioreq(state, &req); + + /* Only req.data may get updated by handle_ioreq(), albeit even that + * should not happen as such data would never make it to the guest (we + * can only usefully see writes here after all). + */ + assert(req.state == STATE_IOREQ_READY); + assert(req.count == 1); + assert(req.dir == IOREQ_WRITE); + assert(!req.data_is_ptr); + + qatomic_add(&buf_page->read_pointer, qw + 1); + } + + return req.count; +} + +static void handle_buffered_io(void *opaque) +{ + XenIOState *state = opaque; + + if (handle_buffered_iopage(state)) { + timer_mod(state->buffered_io_timer, + BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); + } else { + timer_del(state->buffered_io_timer); + xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port); + } +} + +static void cpu_handle_ioreq(void *opaque) +{ + XenIOState *state = opaque; + ioreq_t *req = cpu_get_ioreq(state); + + handle_buffered_iopage(state); + if (req) { + ioreq_t copy = *req; + + xen_rmb(); + handle_ioreq(state, ©); + req->data = copy.data; + + if (req->state != STATE_IOREQ_INPROCESS) { + fprintf(stderr, "Badness in I/O request ... not in service?!: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %u, size: %u, type: %u\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size, req->type); + destroy_hvm_domain(false); + return; + } + + xen_wmb(); /* Update ioreq contents /then/ update state. */ + + /* + * We do this before we send the response so that the tools + * have the opportunity to pick up on the reset before the + * guest resumes and does a hlt with interrupts disabled which + * causes Xen to powerdown the domain. + */ + if (runstate_is_running()) { + ShutdownCause request; + + if (qemu_shutdown_requested_get()) { + destroy_hvm_domain(false); + } + request = qemu_reset_requested_get(); + if (request) { + qemu_system_reset(request); + destroy_hvm_domain(true); + } + } + + req->state = STATE_IORESP_READY; + xenevtchn_notify(state->xce_handle, + state->ioreq_local_port[state->send_vcpu]); + } +} + +static void xen_main_loop_prepare(XenIOState *state) +{ + int evtchn_fd = -1; + + if (state->xce_handle != NULL) { + evtchn_fd = xenevtchn_fd(state->xce_handle); + } + + state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, + state); + + if (evtchn_fd != -1) { + CPUState *cpu_state; + + DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); + CPU_FOREACH(cpu_state) { + DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", + __func__, cpu_state->cpu_index, cpu_state); + state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; + } + qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); + } +} + + +static void xen_hvm_change_state_handler(void *opaque, bool running, + RunState rstate) +{ + XenIOState *state = opaque; + + if (running) { + xen_main_loop_prepare(state); + } + + xen_set_ioreq_server_state(xen_domid, + state->ioservid, + (rstate == RUN_STATE_RUNNING)); +} + +static void xen_exit_notifier(Notifier *n, void *data) +{ + XenIOState *state = container_of(n, XenIOState, exit); + + xen_destroy_ioreq_server(xen_domid, state->ioservid); + if (state->fres != NULL) { + xenforeignmemory_unmap_resource(xen_fmem, state->fres); + } + + xenevtchn_close(state->xce_handle); + xs_daemon_close(state->xenstore); +} + +#ifdef XEN_COMPAT_PHYSMAP +static void xen_read_physmap(XenIOState *state) +{ + XenPhysmap *physmap = NULL; + unsigned int len, num, i; + char path[80], *value = NULL; + char **entries = NULL; + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap", xen_domid); + entries = xs_directory(state->xenstore, 0, path, &num); + if (entries == NULL) + return; + + for (i = 0; i < num; i++) { + physmap = g_malloc(sizeof (XenPhysmap)); + physmap->phys_offset = strtoull(entries[i], NULL, 16); + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%s/start_addr", + xen_domid, entries[i]); + value = xs_read(state->xenstore, 0, path, &len); + if (value == NULL) { + g_free(physmap); + continue; + } + physmap->start_addr = strtoull(value, NULL, 16); + free(value); + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%s/size", + xen_domid, entries[i]); + value = xs_read(state->xenstore, 0, path, &len); + if (value == NULL) { + g_free(physmap); + continue; + } + physmap->size = strtoull(value, NULL, 16); + free(value); + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%s/name", + xen_domid, entries[i]); + physmap->name = xs_read(state->xenstore, 0, path, &len); + + QLIST_INSERT_HEAD(&xen_physmap, physmap, list); + } + free(entries); +} +#else +static void xen_read_physmap(XenIOState *state) +{ +} +#endif + +static void xen_wakeup_notifier(Notifier *notifier, void *data) +{ + xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0); +} + +static int xen_map_ioreq_server(XenIOState *state) +{ + void *addr = NULL; + xen_pfn_t ioreq_pfn; + xen_pfn_t bufioreq_pfn; + evtchn_port_t bufioreq_evtchn; + int rc; + + /* + * Attempt to map using the resource API and fall back to normal + * foreign mapping if this is not supported. + */ + QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0); + QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1); + state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid, + XENMEM_resource_ioreq_server, + state->ioservid, 0, 2, + &addr, + PROT_READ | PROT_WRITE, 0); + if (state->fres != NULL) { + trace_xen_map_resource_ioreq(state->ioservid, addr); + state->buffered_io_page = addr; + state->shared_page = addr + TARGET_PAGE_SIZE; + } else if (errno != EOPNOTSUPP) { + error_report("failed to map ioreq server resources: error %d handle=%p", + errno, xen_xc); + return -1; + } + + rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, + (state->shared_page == NULL) ? + &ioreq_pfn : NULL, + (state->buffered_io_page == NULL) ? + &bufioreq_pfn : NULL, + &bufioreq_evtchn); + if (rc < 0) { + error_report("failed to get ioreq server info: error %d handle=%p", + errno, xen_xc); + return rc; + } + + if (state->shared_page == NULL) { + DPRINTF("shared page at pfn %lx\n", ioreq_pfn); + + state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, + PROT_READ | PROT_WRITE, + 1, &ioreq_pfn, NULL); + if (state->shared_page == NULL) { + error_report("map shared IO page returned error %d handle=%p", + errno, xen_xc); + } + } + + if (state->buffered_io_page == NULL) { + DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); + + state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, + PROT_READ | PROT_WRITE, + 1, &bufioreq_pfn, + NULL); + if (state->buffered_io_page == NULL) { + error_report("map buffered IO page returned error %d", errno); + return -1; + } + } + + if (state->shared_page == NULL || state->buffered_io_page == NULL) { + return -1; + } + + DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); + + state->bufioreq_remote_port = bufioreq_evtchn; + + return 0; +} + +void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) +{ + MachineState *ms = MACHINE(pcms); + unsigned int max_cpus = ms->smp.max_cpus; + int i, rc; + xen_pfn_t ioreq_pfn; + XenIOState *state; + + state = g_malloc0(sizeof (XenIOState)); + + state->xce_handle = xenevtchn_open(NULL, 0); + if (state->xce_handle == NULL) { + perror("xen: event channel open"); + goto err; + } + + state->xenstore = xs_daemon_open(); + if (state->xenstore == NULL) { + perror("xen: xenstore open"); + goto err; + } + + xen_create_ioreq_server(xen_domid, &state->ioservid); + + state->exit.notify = xen_exit_notifier; + qemu_add_exit_notifier(&state->exit); + + state->suspend.notify = xen_suspend_notifier; + qemu_register_suspend_notifier(&state->suspend); + + state->wakeup.notify = xen_wakeup_notifier; + qemu_register_wakeup_notifier(&state->wakeup); + + /* + * Register wake-up support in QMP query-current-machine API + */ + qemu_register_wakeup_support(); + + rc = xen_map_ioreq_server(state); + if (rc < 0) { + goto err; + } + + rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); + if (!rc) { + DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn); + state->shared_vmport_page = + xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, + 1, &ioreq_pfn, NULL); + if (state->shared_vmport_page == NULL) { + error_report("map shared vmport IO page returned error %d handle=%p", + errno, xen_xc); + goto err; + } + } else if (rc != -ENOSYS) { + error_report("get vmport regs pfn returned error %d, rc=%d", + errno, rc); + goto err; + } + + /* Note: cpus is empty at this point in init */ + state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); + + rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); + if (rc < 0) { + error_report("failed to enable ioreq server info: error %d handle=%p", + errno, xen_xc); + goto err; + } + + state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); + + /* FIXME: how about if we overflow the page here? */ + for (i = 0; i < max_cpus; i++) { + rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, + xen_vcpu_eport(state->shared_page, i)); + if (rc == -1) { + error_report("shared evtchn %d bind error %d", i, errno); + goto err; + } + state->ioreq_local_port[i] = rc; + } + + rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, + state->bufioreq_remote_port); + if (rc == -1) { + error_report("buffered evtchn bind error %d", errno); + goto err; + } + state->bufioreq_local_port = rc; + + /* Init RAM management */ +#ifdef XEN_COMPAT_PHYSMAP + xen_map_cache_init(xen_phys_offset_to_gaddr, state); +#else + xen_map_cache_init(NULL, state); +#endif + xen_ram_init(pcms, ms->ram_size, ram_memory); + + qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); + + state->memory_listener = xen_memory_listener; + memory_listener_register(&state->memory_listener, &address_space_memory); + state->log_for_dirtybit = NULL; + + state->io_listener = xen_io_listener; + memory_listener_register(&state->io_listener, &address_space_io); + + state->device_listener = xen_device_listener; + QLIST_INIT(&state->dev_list); + device_listener_register(&state->device_listener); + + xen_bus_init(); + + /* Initialize backend core & drivers */ + if (xen_be_init() != 0) { + error_report("xen backend core setup failed"); + goto err; + } + xen_be_register_common(); + + QLIST_INIT(&xen_physmap); + xen_read_physmap(state); + + /* Disable ACPI build because Xen handles it */ + pcms->acpi_build_enabled = false; + + return; + +err: + error_report("xen hardware virtual machine initialisation failed"); + exit(1); +} + +void destroy_hvm_domain(bool reboot) +{ + xc_interface *xc_handle; + int sts; + int rc; + + unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff; + + if (xen_dmod) { + rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason); + if (!rc) { + return; + } + if (errno != ENOTTY /* old Xen */) { + perror("xendevicemodel_shutdown failed"); + } + /* well, try the old thing then */ + } + + xc_handle = xc_interface_open(0, 0, 0); + if (xc_handle == NULL) { + fprintf(stderr, "Cannot acquire xenctrl handle\n"); + } else { + sts = xc_domain_shutdown(xc_handle, xen_domid, reason); + if (sts != 0) { + fprintf(stderr, "xc_domain_shutdown failed to issue %s, " + "sts %d, %s\n", reboot ? "reboot" : "poweroff", + sts, strerror(errno)); + } else { + fprintf(stderr, "Issued domain %d %s\n", xen_domid, + reboot ? "reboot" : "poweroff"); + } + xc_interface_close(xc_handle); + } +} + +void xen_register_framebuffer(MemoryRegion *mr) +{ + framebuffer = mr; +} + +void xen_shutdown_fatal_error(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + fprintf(stderr, "Will destroy the domain.\n"); + /* destroy the domain */ + qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR); +} + +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) +{ + if (unlikely(xen_in_migration)) { + int rc; + ram_addr_t start_pfn, nb_pages; + + start = xen_phys_offset_to_gaddr(start, length); + + if (length == 0) { + length = TARGET_PAGE_SIZE; + } + start_pfn = start >> TARGET_PAGE_BITS; + nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) + - start_pfn; + rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); + if (rc) { + fprintf(stderr, + "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", + __func__, start, nb_pages, errno, strerror(errno)); + } + } +} + +void qmp_xen_set_global_dirty_log(bool enable, Error **errp) +{ + if (enable) { + memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); + } else { + memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); + } +} diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c new file mode 100644 index 000000000..bd47c3d67 --- /dev/null +++ b/hw/i386/xen/xen-mapcache.c @@ -0,0 +1,593 @@ +/* + * Copyright (C) 2011 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" + +#include + +#include "hw/xen/xen-legacy-backend.h" +#include "qemu/bitmap.h" + +#include "sysemu/runstate.h" +#include "sysemu/xen-mapcache.h" +#include "trace.h" + + +//#define MAPCACHE_DEBUG + +#ifdef MAPCACHE_DEBUG +# define DPRINTF(fmt, ...) do { \ + fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \ +} while (0) +#else +# define DPRINTF(fmt, ...) do { } while (0) +#endif + +#if HOST_LONG_BITS == 32 +# define MCACHE_BUCKET_SHIFT 16 +# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */ +#else +# define MCACHE_BUCKET_SHIFT 20 +# define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */ +#endif +#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT) + +/* This is the size of the virtual address space reserve to QEMU that will not + * be use by MapCache. + * From empirical tests I observed that qemu use 75MB more than the + * max_mcache_size. + */ +#define NON_MCACHE_MEMORY_SIZE (80 * MiB) + +typedef struct MapCacheEntry { + hwaddr paddr_index; + uint8_t *vaddr_base; + unsigned long *valid_mapping; + uint8_t lock; +#define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0) + uint8_t flags; + hwaddr size; + struct MapCacheEntry *next; +} MapCacheEntry; + +typedef struct MapCacheRev { + uint8_t *vaddr_req; + hwaddr paddr_index; + hwaddr size; + QTAILQ_ENTRY(MapCacheRev) next; + bool dma; +} MapCacheRev; + +typedef struct MapCache { + MapCacheEntry *entry; + unsigned long nr_buckets; + QTAILQ_HEAD(, MapCacheRev) locked_entries; + + /* For most cases (>99.9%), the page address is the same. */ + MapCacheEntry *last_entry; + unsigned long max_mcache_size; + unsigned int mcache_bucket_shift; + + phys_offset_to_gaddr_t phys_offset_to_gaddr; + QemuMutex lock; + void *opaque; +} MapCache; + +static MapCache *mapcache; + +static inline void mapcache_lock(void) +{ + qemu_mutex_lock(&mapcache->lock); +} + +static inline void mapcache_unlock(void) +{ + qemu_mutex_unlock(&mapcache->lock); +} + +static inline int test_bits(int nr, int size, const unsigned long *addr) +{ + unsigned long res = find_next_zero_bit(addr, size + nr, nr); + if (res >= nr + size) + return 1; + else + return 0; +} + +void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) +{ + unsigned long size; + struct rlimit rlimit_as; + + mapcache = g_malloc0(sizeof (MapCache)); + + mapcache->phys_offset_to_gaddr = f; + mapcache->opaque = opaque; + qemu_mutex_init(&mapcache->lock); + + QTAILQ_INIT(&mapcache->locked_entries); + + if (geteuid() == 0) { + rlimit_as.rlim_cur = RLIM_INFINITY; + rlimit_as.rlim_max = RLIM_INFINITY; + mapcache->max_mcache_size = MCACHE_MAX_SIZE; + } else { + getrlimit(RLIMIT_AS, &rlimit_as); + rlimit_as.rlim_cur = rlimit_as.rlim_max; + + if (rlimit_as.rlim_max != RLIM_INFINITY) { + warn_report("QEMU's maximum size of virtual" + " memory is not infinity"); + } + if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) { + mapcache->max_mcache_size = rlimit_as.rlim_max - + NON_MCACHE_MEMORY_SIZE; + } else { + mapcache->max_mcache_size = MCACHE_MAX_SIZE; + } + } + + setrlimit(RLIMIT_AS, &rlimit_as); + + mapcache->nr_buckets = + (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + + (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> + (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); + + size = mapcache->nr_buckets * sizeof (MapCacheEntry); + size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); + DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__, + mapcache->nr_buckets, size); + mapcache->entry = g_malloc0(size); +} + +static void xen_remap_bucket(MapCacheEntry *entry, + void *vaddr, + hwaddr size, + hwaddr address_index, + bool dummy) +{ + uint8_t *vaddr_base; + xen_pfn_t *pfns; + int *err; + unsigned int i; + hwaddr nb_pfn = size >> XC_PAGE_SHIFT; + + trace_xen_remap_bucket(address_index); + + pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t)); + err = g_malloc0(nb_pfn * sizeof (int)); + + if (entry->vaddr_base != NULL) { + if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { + ram_block_notify_remove(entry->vaddr_base, entry->size, + entry->size); + } + + /* + * If an entry is being replaced by another mapping and we're using + * MAP_FIXED flag for it - there is possibility of a race for vaddr + * address with another thread doing an mmap call itself + * (see man 2 mmap). To avoid that we skip explicit unmapping here + * and allow the kernel to destroy the previous mappings by replacing + * them in mmap call later. + * + * Non-identical replacements are not allowed therefore. + */ + assert(!vaddr || (entry->vaddr_base == vaddr && entry->size == size)); + + if (!vaddr && munmap(entry->vaddr_base, entry->size) != 0) { + perror("unmap fails"); + exit(-1); + } + } + g_free(entry->valid_mapping); + entry->valid_mapping = NULL; + + for (i = 0; i < nb_pfn; i++) { + pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; + } + + /* + * If the caller has requested the mapping at a specific address use + * MAP_FIXED to make sure it's honored. + */ + if (!dummy) { + vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr, + PROT_READ | PROT_WRITE, + vaddr ? MAP_FIXED : 0, + nb_pfn, pfns, err); + if (vaddr_base == NULL) { + perror("xenforeignmemory_map2"); + exit(-1); + } + } else { + /* + * We create dummy mappings where we are unable to create a foreign + * mapping immediately due to certain circumstances (i.e. on resume now) + */ + vaddr_base = mmap(vaddr, size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_SHARED | (vaddr ? MAP_FIXED : 0), + -1, 0); + if (vaddr_base == MAP_FAILED) { + perror("mmap"); + exit(-1); + } + } + + if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { + ram_block_notify_add(vaddr_base, size, size); + } + + entry->vaddr_base = vaddr_base; + entry->paddr_index = address_index; + entry->size = size; + entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) * + BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); + + if (dummy) { + entry->flags |= XEN_MAPCACHE_ENTRY_DUMMY; + } else { + entry->flags &= ~(XEN_MAPCACHE_ENTRY_DUMMY); + } + + bitmap_zero(entry->valid_mapping, nb_pfn); + for (i = 0; i < nb_pfn; i++) { + if (!err[i]) { + bitmap_set(entry->valid_mapping, i, 1); + } + } + + g_free(pfns); + g_free(err); +} + +static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size, + uint8_t lock, bool dma) +{ + MapCacheEntry *entry, *pentry = NULL, + *free_entry = NULL, *free_pentry = NULL; + hwaddr address_index; + hwaddr address_offset; + hwaddr cache_size = size; + hwaddr test_bit_size; + bool translated G_GNUC_UNUSED = false; + bool dummy = false; + +tryagain: + address_index = phys_addr >> MCACHE_BUCKET_SHIFT; + address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); + + trace_xen_map_cache(phys_addr); + + /* test_bit_size is always a multiple of XC_PAGE_SIZE */ + if (size) { + test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1)); + + if (test_bit_size % XC_PAGE_SIZE) { + test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); + } + } else { + test_bit_size = XC_PAGE_SIZE; + } + + if (mapcache->last_entry != NULL && + mapcache->last_entry->paddr_index == address_index && + !lock && !size && + test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + mapcache->last_entry->valid_mapping)) { + trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); + return mapcache->last_entry->vaddr_base + address_offset; + } + + /* size is always a multiple of MCACHE_BUCKET_SIZE */ + if (size) { + cache_size = size + address_offset; + if (cache_size % MCACHE_BUCKET_SIZE) { + cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); + } + } else { + cache_size = MCACHE_BUCKET_SIZE; + } + + entry = &mapcache->entry[address_index % mapcache->nr_buckets]; + + while (entry && (lock || entry->lock) && entry->vaddr_base && + (entry->paddr_index != address_index || entry->size != cache_size || + !test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping))) { + if (!free_entry && !entry->lock) { + free_entry = entry; + free_pentry = pentry; + } + pentry = entry; + entry = entry->next; + } + if (!entry && free_entry) { + entry = free_entry; + pentry = free_pentry; + } + if (!entry) { + entry = g_malloc0(sizeof (MapCacheEntry)); + pentry->next = entry; + xen_remap_bucket(entry, NULL, cache_size, address_index, dummy); + } else if (!entry->lock) { + if (!entry->vaddr_base || entry->paddr_index != address_index || + entry->size != cache_size || + !test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + xen_remap_bucket(entry, NULL, cache_size, address_index, dummy); + } + } + + if(!test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + mapcache->last_entry = NULL; +#ifdef XEN_COMPAT_PHYSMAP + if (!translated && mapcache->phys_offset_to_gaddr) { + phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size); + translated = true; + goto tryagain; + } +#endif + if (!dummy && runstate_check(RUN_STATE_INMIGRATE)) { + dummy = true; + goto tryagain; + } + trace_xen_map_cache_return(NULL); + return NULL; + } + + mapcache->last_entry = entry; + if (lock) { + MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev)); + entry->lock++; + reventry->dma = dma; + reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset; + reventry->paddr_index = mapcache->last_entry->paddr_index; + reventry->size = entry->size; + QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); + } + + trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); + return mapcache->last_entry->vaddr_base + address_offset; +} + +uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, + uint8_t lock, bool dma) +{ + uint8_t *p; + + mapcache_lock(); + p = xen_map_cache_unlocked(phys_addr, size, lock, dma); + mapcache_unlock(); + return p; +} + +ram_addr_t xen_ram_addr_from_mapcache(void *ptr) +{ + MapCacheEntry *entry = NULL; + MapCacheRev *reventry; + hwaddr paddr_index; + hwaddr size; + ram_addr_t raddr; + int found = 0; + + mapcache_lock(); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (reventry->vaddr_req == ptr) { + paddr_index = reventry->paddr_index; + size = reventry->size; + found = 1; + break; + } + } + if (!found) { + fprintf(stderr, "%s, could not find %p\n", __func__, ptr); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, + reventry->vaddr_req); + } + abort(); + return 0; + } + + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr); + raddr = 0; + } else { + raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) + + ((unsigned long) ptr - (unsigned long) entry->vaddr_base); + } + mapcache_unlock(); + return raddr; +} + +static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) +{ + MapCacheEntry *entry = NULL, *pentry = NULL; + MapCacheRev *reventry; + hwaddr paddr_index; + hwaddr size; + int found = 0; + + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (reventry->vaddr_req == buffer) { + paddr_index = reventry->paddr_index; + size = reventry->size; + found = 1; + break; + } + } + if (!found) { + DPRINTF("%s, could not find %p\n", __func__, buffer); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req); + } + return; + } + QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); + g_free(reventry); + + if (mapcache->last_entry != NULL && + mapcache->last_entry->paddr_index == paddr_index) { + mapcache->last_entry = NULL; + } + + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { + pentry = entry; + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer); + return; + } + entry->lock--; + if (entry->lock > 0 || pentry == NULL) { + return; + } + + pentry->next = entry->next; + ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size); + if (munmap(entry->vaddr_base, entry->size) != 0) { + perror("unmap fails"); + exit(-1); + } + g_free(entry->valid_mapping); + g_free(entry); +} + +void xen_invalidate_map_cache_entry(uint8_t *buffer) +{ + mapcache_lock(); + xen_invalidate_map_cache_entry_unlocked(buffer); + mapcache_unlock(); +} + +void xen_invalidate_map_cache(void) +{ + unsigned long i; + MapCacheRev *reventry; + + /* Flush pending AIO before destroying the mapcache */ + bdrv_drain_all(); + + mapcache_lock(); + + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (!reventry->dma) { + continue; + } + fprintf(stderr, "Locked DMA mapping while invalidating mapcache!" + " "TARGET_FMT_plx" -> %p is present\n", + reventry->paddr_index, reventry->vaddr_req); + } + + for (i = 0; i < mapcache->nr_buckets; i++) { + MapCacheEntry *entry = &mapcache->entry[i]; + + if (entry->vaddr_base == NULL) { + continue; + } + if (entry->lock > 0) { + continue; + } + + if (munmap(entry->vaddr_base, entry->size) != 0) { + perror("unmap fails"); + exit(-1); + } + + entry->paddr_index = 0; + entry->vaddr_base = NULL; + entry->size = 0; + g_free(entry->valid_mapping); + entry->valid_mapping = NULL; + } + + mapcache->last_entry = NULL; + + mapcache_unlock(); +} + +static uint8_t *xen_replace_cache_entry_unlocked(hwaddr old_phys_addr, + hwaddr new_phys_addr, + hwaddr size) +{ + MapCacheEntry *entry; + hwaddr address_index, address_offset; + hwaddr test_bit_size, cache_size = size; + + address_index = old_phys_addr >> MCACHE_BUCKET_SHIFT; + address_offset = old_phys_addr & (MCACHE_BUCKET_SIZE - 1); + + assert(size); + /* test_bit_size is always a multiple of XC_PAGE_SIZE */ + test_bit_size = size + (old_phys_addr & (XC_PAGE_SIZE - 1)); + if (test_bit_size % XC_PAGE_SIZE) { + test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); + } + cache_size = size + address_offset; + if (cache_size % MCACHE_BUCKET_SIZE) { + cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); + } + + entry = &mapcache->entry[address_index % mapcache->nr_buckets]; + while (entry && !(entry->paddr_index == address_index && + entry->size == cache_size)) { + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to update an entry for "TARGET_FMT_plx \ + "that is not in the mapcache!\n", old_phys_addr); + return NULL; + } + + address_index = new_phys_addr >> MCACHE_BUCKET_SHIFT; + address_offset = new_phys_addr & (MCACHE_BUCKET_SIZE - 1); + + fprintf(stderr, "Replacing a dummy mapcache entry for "TARGET_FMT_plx \ + " with "TARGET_FMT_plx"\n", old_phys_addr, new_phys_addr); + + xen_remap_bucket(entry, entry->vaddr_base, + cache_size, address_index, false); + if (!test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + DPRINTF("Unable to update a mapcache entry for "TARGET_FMT_plx"!\n", + old_phys_addr); + return NULL; + } + + return entry->vaddr_base + address_offset; +} + +uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr, + hwaddr new_phys_addr, + hwaddr size) +{ + uint8_t *p; + + mapcache_lock(); + p = xen_replace_cache_entry_unlocked(old_phys_addr, new_phys_addr, size); + mapcache_unlock(); + return p; +} diff --git a/hw/i386/xen/xen_apic.c b/hw/i386/xen/xen_apic.c new file mode 100644 index 000000000..7c7a60b16 --- /dev/null +++ b/hw/i386/xen/xen_apic.c @@ -0,0 +1,103 @@ +/* + * Xen basic APIC support + * + * Copyright (c) 2012 Citrix + * + * Authors: + * Wei Liu + * + * This work is licensed under the terms of the GNU GPL version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/i386/apic_internal.h" +#include "hw/pci/msi.h" +#include "hw/xen/xen.h" +#include "qemu/module.h" + +static uint64_t xen_apic_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + return ~(uint64_t)0; +} + +static void xen_apic_mem_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + if (size != sizeof(uint32_t)) { + fprintf(stderr, "Xen: APIC write data size = %d, invalid\n", size); + return; + } + + xen_hvm_inject_msi(addr, data); +} + +static const MemoryRegionOps xen_apic_io_ops = { + .read = xen_apic_mem_read, + .write = xen_apic_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void xen_apic_realize(DeviceState *dev, Error **errp) +{ + APICCommonState *s = APIC_COMMON(dev); + + s->vapic_control = 0; + memory_region_init_io(&s->io_memory, OBJECT(s), &xen_apic_io_ops, s, + "xen-apic-msi", APIC_SPACE_SIZE); + msi_nonbroken = true; +} + +static void xen_apic_set_base(APICCommonState *s, uint64_t val) +{ +} + +static void xen_apic_set_tpr(APICCommonState *s, uint8_t val) +{ +} + +static uint8_t xen_apic_get_tpr(APICCommonState *s) +{ + return 0; +} + +static void xen_apic_vapic_base_update(APICCommonState *s) +{ +} + +static void xen_apic_external_nmi(APICCommonState *s) +{ +} + +static void xen_send_msi(MSIMessage *msi) +{ + xen_hvm_inject_msi(msi->address, msi->data); +} + +static void xen_apic_class_init(ObjectClass *klass, void *data) +{ + APICCommonClass *k = APIC_COMMON_CLASS(klass); + + k->realize = xen_apic_realize; + k->set_base = xen_apic_set_base; + k->set_tpr = xen_apic_set_tpr; + k->get_tpr = xen_apic_get_tpr; + k->vapic_base_update = xen_apic_vapic_base_update; + k->external_nmi = xen_apic_external_nmi; + k->send_msi = xen_send_msi; +} + +static const TypeInfo xen_apic_info = { + .name = "xen-apic", + .parent = TYPE_APIC_COMMON, + .instance_size = sizeof(APICCommonState), + .class_init = xen_apic_class_init, +}; + +static void xen_apic_register_types(void) +{ + type_register_static(&xen_apic_info); +} + +type_init(xen_apic_register_types) diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c new file mode 100644 index 000000000..72028449b --- /dev/null +++ b/hw/i386/xen/xen_platform.c @@ -0,0 +1,519 @@ +/* + * XEN platform pci device, formerly known as the event channel device + * + * Copyright (c) 2003-2004 Intel Corp. + * Copyright (c) 2006 XenSource + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/ide.h" +#include "hw/pci/pci.h" +#include "hw/xen/xen_common.h" +#include "migration/vmstate.h" +#include "hw/xen/xen-legacy-backend.h" +#include "trace.h" +#include "sysemu/xen.h" +#include "sysemu/block-backend.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qom/object.h" + +//#define DEBUG_PLATFORM + +#ifdef DEBUG_PLATFORM +#define DPRINTF(fmt, ...) do { \ + fprintf(stderr, "xen_platform: " fmt, ## __VA_ARGS__); \ +} while (0) +#else +#define DPRINTF(fmt, ...) do { } while (0) +#endif + +#define PFFLAG_ROM_LOCK 1 /* Sets whether ROM memory area is RW or RO */ + +struct PCIXenPlatformState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + MemoryRegion fixed_io; + MemoryRegion bar; + MemoryRegion mmio_bar; + uint8_t flags; /* used only for version_id == 2 */ + uint16_t driver_product_version; + + /* Log from guest drivers */ + char log_buffer[4096]; + int log_buffer_off; +}; + +#define TYPE_XEN_PLATFORM "xen-platform" +OBJECT_DECLARE_SIMPLE_TYPE(PCIXenPlatformState, XEN_PLATFORM) + +#define XEN_PLATFORM_IOPORT 0x10 + +/* Send bytes to syslog */ +static void log_writeb(PCIXenPlatformState *s, char val) +{ + if (val == '\n' || s->log_buffer_off == sizeof(s->log_buffer) - 1) { + /* Flush buffer */ + s->log_buffer[s->log_buffer_off] = 0; + trace_xen_platform_log(s->log_buffer); + s->log_buffer_off = 0; + } else { + s->log_buffer[s->log_buffer_off++] = val; + } +} + +/* + * Unplug device flags. + * + * The logic got a little confused at some point in the past but this is + * what they do now. + * + * bit 0: Unplug all IDE and SCSI disks. + * bit 1: Unplug all NICs. + * bit 2: Unplug IDE disks except primary master. This is overridden if + * bit 0 is also present in the mask. + * bit 3: Unplug all NVMe disks. + * + */ +#define _UNPLUG_IDE_SCSI_DISKS 0 +#define UNPLUG_IDE_SCSI_DISKS (1u << _UNPLUG_IDE_SCSI_DISKS) + +#define _UNPLUG_ALL_NICS 1 +#define UNPLUG_ALL_NICS (1u << _UNPLUG_ALL_NICS) + +#define _UNPLUG_AUX_IDE_DISKS 2 +#define UNPLUG_AUX_IDE_DISKS (1u << _UNPLUG_AUX_IDE_DISKS) + +#define _UNPLUG_NVME_DISKS 3 +#define UNPLUG_NVME_DISKS (1u << _UNPLUG_NVME_DISKS) + +static void unplug_nic(PCIBus *b, PCIDevice *d, void *o) +{ + /* We have to ignore passthrough devices */ + if (pci_get_word(d->config + PCI_CLASS_DEVICE) == + PCI_CLASS_NETWORK_ETHERNET + && strcmp(d->name, "xen-pci-passthrough") != 0) { + object_unparent(OBJECT(d)); + } +} + +/* Remove the peer of the NIC device. Normally, this would be a tap device. */ +static void del_nic_peer(NICState *nic, void *opaque) +{ + NetClientState *nc; + + nc = qemu_get_queue(nic); + if (nc->peer) + qemu_del_net_client(nc->peer); +} + +static void pci_unplug_nics(PCIBus *bus) +{ + qemu_foreach_nic(del_nic_peer, NULL); + pci_for_each_device(bus, 0, unplug_nic, NULL); +} + +static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque) +{ + uint32_t flags = *(uint32_t *)opaque; + bool aux = (flags & UNPLUG_AUX_IDE_DISKS) && + !(flags & UNPLUG_IDE_SCSI_DISKS); + + /* We have to ignore passthrough devices */ + if (!strcmp(d->name, "xen-pci-passthrough")) { + return; + } + + switch (pci_get_word(d->config + PCI_CLASS_DEVICE)) { + case PCI_CLASS_STORAGE_IDE: + pci_piix3_xen_ide_unplug(DEVICE(d), aux); + break; + + case PCI_CLASS_STORAGE_SCSI: + if (!aux) { + object_unparent(OBJECT(d)); + } + break; + + case PCI_CLASS_STORAGE_EXPRESS: + if (flags & UNPLUG_NVME_DISKS) { + object_unparent(OBJECT(d)); + } + + default: + break; + } +} + +static void pci_unplug_disks(PCIBus *bus, uint32_t flags) +{ + pci_for_each_device(bus, 0, unplug_disks, &flags); +} + +static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t val) +{ + PCIXenPlatformState *s = opaque; + + switch (addr) { + case 0: { + PCIDevice *pci_dev = PCI_DEVICE(s); + /* Unplug devices. See comment above flag definitions */ + if (val & (UNPLUG_IDE_SCSI_DISKS | UNPLUG_AUX_IDE_DISKS | + UNPLUG_NVME_DISKS)) { + DPRINTF("unplug disks\n"); + pci_unplug_disks(pci_get_bus(pci_dev), val); + } + if (val & UNPLUG_ALL_NICS) { + DPRINTF("unplug nics\n"); + pci_unplug_nics(pci_get_bus(pci_dev)); + } + break; + } + case 2: + switch (val) { + case 1: + DPRINTF("Citrix Windows PV drivers loaded in guest\n"); + break; + case 0: + DPRINTF("Guest claimed to be running PV product 0?\n"); + break; + default: + DPRINTF("Unknown PV product %d loaded in guest\n", val); + break; + } + s->driver_product_version = val; + break; + } +} + +static void platform_fixed_ioport_writel(void *opaque, uint32_t addr, + uint32_t val) +{ + switch (addr) { + case 0: + /* PV driver version */ + break; + } +} + +static void platform_fixed_ioport_writeb(void *opaque, uint32_t addr, uint32_t val) +{ + PCIXenPlatformState *s = opaque; + + switch (addr) { + case 0: /* Platform flags */ { + hvmmem_type_t mem_type = (val & PFFLAG_ROM_LOCK) ? + HVMMEM_ram_ro : HVMMEM_ram_rw; + if (xen_set_mem_type(xen_domid, mem_type, 0xc0, 0x40)) { + DPRINTF("unable to change ro/rw state of ROM memory area!\n"); + } else { + s->flags = val & PFFLAG_ROM_LOCK; + DPRINTF("changed ro/rw state of ROM memory area. now is %s state.\n", + (mem_type == HVMMEM_ram_ro ? "ro":"rw")); + } + break; + } + case 2: + log_writeb(s, val); + break; + } +} + +static uint32_t platform_fixed_ioport_readw(void *opaque, uint32_t addr) +{ + switch (addr) { + case 0: + /* Magic value so that you can identify the interface. */ + return 0x49d2; + default: + return 0xffff; + } +} + +static uint32_t platform_fixed_ioport_readb(void *opaque, uint32_t addr) +{ + PCIXenPlatformState *s = opaque; + + switch (addr) { + case 0: + /* Platform flags */ + return s->flags; + case 2: + /* Version number */ + return 1; + default: + return 0xff; + } +} + +static void platform_fixed_ioport_reset(void *opaque) +{ + PCIXenPlatformState *s = opaque; + + platform_fixed_ioport_writeb(s, 0, 0); +} + +static uint64_t platform_fixed_ioport_read(void *opaque, + hwaddr addr, + unsigned size) +{ + switch (size) { + case 1: + return platform_fixed_ioport_readb(opaque, addr); + case 2: + return platform_fixed_ioport_readw(opaque, addr); + default: + return -1; + } +} + +static void platform_fixed_ioport_write(void *opaque, hwaddr addr, + + uint64_t val, unsigned size) +{ + switch (size) { + case 1: + platform_fixed_ioport_writeb(opaque, addr, val); + break; + case 2: + platform_fixed_ioport_writew(opaque, addr, val); + break; + case 4: + platform_fixed_ioport_writel(opaque, addr, val); + break; + } +} + + +static const MemoryRegionOps platform_fixed_io_ops = { + .read = platform_fixed_ioport_read, + .write = platform_fixed_ioport_write, + .valid = { + .unaligned = true, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + .unaligned = true, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void platform_fixed_ioport_init(PCIXenPlatformState* s) +{ + memory_region_init_io(&s->fixed_io, OBJECT(s), &platform_fixed_io_ops, s, + "xen-fixed", 16); + memory_region_add_subregion(get_system_io(), XEN_PLATFORM_IOPORT, + &s->fixed_io); +} + +/* Xen Platform PCI Device */ + +static uint64_t xen_platform_ioport_readb(void *opaque, hwaddr addr, + unsigned int size) +{ + if (addr == 0) { + return platform_fixed_ioport_readb(opaque, 0); + } else { + return ~0u; + } +} + +static void xen_platform_ioport_writeb(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + PCIXenPlatformState *s = opaque; + PCIDevice *pci_dev = PCI_DEVICE(s); + + switch (addr) { + case 0: /* Platform flags */ + platform_fixed_ioport_writeb(opaque, 0, (uint32_t)val); + break; + case 4: + if (val == 1) { + /* + * SUSE unplug for Xenlinux + * xen-kmp used this since xen-3.0.4, instead the official protocol + * from xen-3.3+ It did an unconditional "outl(1, (ioaddr + 4));" + * Pre VMDP 1.7 used 4 and 8 depending on how VMDP was configured. + * If VMDP was to control both disk and LAN it would use 4. + * If it controlled just disk or just LAN, it would use 8 below. + */ + pci_unplug_disks(pci_get_bus(pci_dev), UNPLUG_IDE_SCSI_DISKS); + pci_unplug_nics(pci_get_bus(pci_dev)); + } + break; + case 8: + switch (val) { + case 1: + pci_unplug_disks(pci_get_bus(pci_dev), UNPLUG_IDE_SCSI_DISKS); + break; + case 2: + pci_unplug_nics(pci_get_bus(pci_dev)); + break; + default: + log_writeb(s, (uint32_t)val); + break; + } + break; + default: + break; + } +} + +static const MemoryRegionOps xen_pci_io_ops = { + .read = xen_platform_ioport_readb, + .write = xen_platform_ioport_writeb, + .impl.min_access_size = 1, + .impl.max_access_size = 1, +}; + +static void platform_ioport_bar_setup(PCIXenPlatformState *d) +{ + memory_region_init_io(&d->bar, OBJECT(d), &xen_pci_io_ops, d, + "xen-pci", 0x100); +} + +static uint64_t platform_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + DPRINTF("Warning: attempted read from physical address " + "0x" TARGET_FMT_plx " in xen platform mmio space\n", addr); + + return 0; +} + +static void platform_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + DPRINTF("Warning: attempted write of 0x%"PRIx64" to physical " + "address 0x" TARGET_FMT_plx " in xen platform mmio space\n", + val, addr); +} + +static const MemoryRegionOps platform_mmio_handler = { + .read = &platform_mmio_read, + .write = &platform_mmio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void platform_mmio_setup(PCIXenPlatformState *d) +{ + memory_region_init_io(&d->mmio_bar, OBJECT(d), &platform_mmio_handler, d, + "xen-mmio", 0x1000000); +} + +static int xen_platform_post_load(void *opaque, int version_id) +{ + PCIXenPlatformState *s = opaque; + + platform_fixed_ioport_writeb(s, 0, s->flags); + + return 0; +} + +static const VMStateDescription vmstate_xen_platform = { + .name = "platform", + .version_id = 4, + .minimum_version_id = 4, + .post_load = xen_platform_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCIXenPlatformState), + VMSTATE_UINT8(flags, PCIXenPlatformState), + VMSTATE_END_OF_LIST() + } +}; + +static void xen_platform_realize(PCIDevice *dev, Error **errp) +{ + PCIXenPlatformState *d = XEN_PLATFORM(dev); + uint8_t *pci_conf; + + /* Device will crash on reset if xen is not initialized */ + if (!xen_enabled()) { + error_setg(errp, "xen-platform device requires the Xen accelerator"); + return; + } + + pci_conf = dev->config; + + pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY); + + pci_config_set_prog_interface(pci_conf, 0); + + pci_conf[PCI_INTERRUPT_PIN] = 1; + + platform_ioport_bar_setup(d); + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->bar); + + /* reserve 16MB mmio address for share memory*/ + platform_mmio_setup(d); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_MEM_PREFETCH, + &d->mmio_bar); + + platform_fixed_ioport_init(d); +} + +static void platform_reset(DeviceState *dev) +{ + PCIXenPlatformState *s = XEN_PLATFORM(dev); + + platform_fixed_ioport_reset(s); +} + +static void xen_platform_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = xen_platform_realize; + k->vendor_id = PCI_VENDOR_ID_XEN; + k->device_id = PCI_DEVICE_ID_XEN_PLATFORM; + k->class_id = PCI_CLASS_OTHERS << 8 | 0x80; + k->subsystem_vendor_id = PCI_VENDOR_ID_XEN; + k->subsystem_id = PCI_DEVICE_ID_XEN_PLATFORM; + k->revision = 1; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "XEN platform pci device"; + dc->reset = platform_reset; + dc->vmsd = &vmstate_xen_platform; +} + +static const TypeInfo xen_platform_info = { + .name = TYPE_XEN_PLATFORM, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIXenPlatformState), + .class_init = xen_platform_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void xen_platform_register_types(void) +{ + type_register_static(&xen_platform_info); +} + +type_init(xen_platform_register_types) diff --git a/hw/i386/xen/xen_pvdevice.c b/hw/i386/xen/xen_pvdevice.c new file mode 100644 index 000000000..1ea95fa60 --- /dev/null +++ b/hw/i386/xen/xen_pvdevice.c @@ -0,0 +1,154 @@ +/* Copyright (c) Citrix Systems Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, + * with or without modification, are permitted provided + * that the following conditions are met: + * + * * Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the + * following disclaimer in the documentation and/or other + * materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qom/object.h" + +#define TYPE_XEN_PV_DEVICE "xen-pvdevice" + +OBJECT_DECLARE_SIMPLE_TYPE(XenPVDevice, XEN_PV_DEVICE) + +struct XenPVDevice { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + uint16_t vendor_id; + uint16_t device_id; + uint8_t revision; + uint32_t size; + MemoryRegion mmio; +}; + +static uint64_t xen_pv_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + trace_xen_pv_mmio_read(addr); + + return ~(uint64_t)0; +} + +static void xen_pv_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + trace_xen_pv_mmio_write(addr); +} + +static const MemoryRegionOps xen_pv_mmio_ops = { + .read = &xen_pv_mmio_read, + .write = &xen_pv_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const VMStateDescription vmstate_xen_pvdevice = { + .name = "xen-pvdevice", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, XenPVDevice), + VMSTATE_END_OF_LIST() + } +}; + +static void xen_pv_realize(PCIDevice *pci_dev, Error **errp) +{ + XenPVDevice *d = XEN_PV_DEVICE(pci_dev); + uint8_t *pci_conf; + + /* device-id property must always be supplied */ + if (d->device_id == 0xffff) { + error_setg(errp, "Device ID invalid, it must always be supplied"); + return; + } + + pci_conf = pci_dev->config; + + pci_set_word(pci_conf + PCI_VENDOR_ID, d->vendor_id); + pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, d->vendor_id); + pci_set_word(pci_conf + PCI_DEVICE_ID, d->device_id); + pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, d->device_id); + pci_set_byte(pci_conf + PCI_REVISION_ID, d->revision); + + pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_MEMORY); + + pci_config_set_prog_interface(pci_conf, 0); + + pci_conf[PCI_INTERRUPT_PIN] = 1; + + memory_region_init_io(&d->mmio, NULL, &xen_pv_mmio_ops, d, + "mmio", d->size); + + pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_MEM_PREFETCH, + &d->mmio); +} + +static Property xen_pv_props[] = { + DEFINE_PROP_UINT16("vendor-id", XenPVDevice, vendor_id, PCI_VENDOR_ID_XEN), + DEFINE_PROP_UINT16("device-id", XenPVDevice, device_id, 0xffff), + DEFINE_PROP_UINT8("revision", XenPVDevice, revision, 0x01), + DEFINE_PROP_UINT32("size", XenPVDevice, size, 0x400000), + DEFINE_PROP_END_OF_LIST() +}; + +static void xen_pv_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = xen_pv_realize; + k->class_id = PCI_CLASS_SYSTEM_OTHER; + dc->desc = "Xen PV Device"; + device_class_set_props(dc, xen_pv_props); + dc->vmsd = &vmstate_xen_pvdevice; +} + +static const TypeInfo xen_pv_type_info = { + .name = TYPE_XEN_PV_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(XenPVDevice), + .class_init = xen_pv_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void xen_pv_register_types(void) +{ + type_register_static(&xen_pv_type_info); +} + +type_init(xen_pv_register_types) diff --git a/hw/ide/Kconfig b/hw/ide/Kconfig new file mode 100644 index 000000000..dd85fa361 --- /dev/null +++ b/hw/ide/Kconfig @@ -0,0 +1,59 @@ +config IDE_CORE + bool + +config IDE_QDEV + bool + select IDE_CORE + +config IDE_PCI + bool + depends on PCI + select IDE_QDEV + +config IDE_ISA + bool + depends on ISA_BUS + select IDE_QDEV + +config IDE_PIIX + bool + select IDE_PCI + select IDE_QDEV + +config IDE_CMD646 + bool + select IDE_PCI + select IDE_QDEV + +config IDE_MACIO + bool + select IDE_QDEV + +config IDE_MMIO + bool + select IDE_QDEV + +config IDE_VIA + bool + select IDE_PCI + select IDE_QDEV + +config MICRODRIVE + bool + select IDE_QDEV + depends on PCMCIA + +config AHCI + bool + select IDE_QDEV + +config AHCI_ICH9 + bool + default y if PCI_DEVICES + depends on PCI + select AHCI + +config IDE_SII3112 + bool + select IDE_PCI + select IDE_QDEV diff --git a/hw/ide/ahci-allwinner.c b/hw/ide/ahci-allwinner.c new file mode 100644 index 000000000..227e747ba --- /dev/null +++ b/hw/ide/ahci-allwinner.c @@ -0,0 +1,127 @@ +/* + * QEMU Allwinner AHCI Emulation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "sysemu/dma.h" +#include "hw/ide/internal.h" +#include "migration/vmstate.h" +#include "ahci_internal.h" + +#include "trace.h" + +#define ALLWINNER_AHCI_BISTAFR ((0xa0 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_BISTCR ((0xa4 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_BISTFCTR ((0xa8 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_BISTSR ((0xac - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_BISTDECR ((0xb0 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_DIAGNR0 ((0xb4 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_DIAGNR1 ((0xb8 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_OOBR ((0xbc - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_PHYCS0R ((0xc0 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_PHYCS1R ((0xc4 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_PHYCS2R ((0xc8 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_TIMER1MS ((0xe0 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_GPARAM1R ((0xe8 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_GPARAM2R ((0xec - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_PPARAMR ((0xf0 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_TESTR ((0xf4 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_VERSIONR ((0xf8 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_IDR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_RWCR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4) + +static uint64_t allwinner_ahci_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + AllwinnerAHCIState *a = opaque; + AHCIState *s = &(SYSBUS_AHCI(a)->ahci); + uint64_t val = a->regs[addr / 4]; + + switch (addr / 4) { + case ALLWINNER_AHCI_PHYCS0R: + val |= 0x2 << 28; + break; + case ALLWINNER_AHCI_PHYCS2R: + val &= ~(0x1 << 24); + break; + } + trace_allwinner_ahci_mem_read(s, a, addr, val, size); + return val; +} + +static void allwinner_ahci_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + AllwinnerAHCIState *a = opaque; + AHCIState *s = &(SYSBUS_AHCI(a)->ahci); + + trace_allwinner_ahci_mem_write(s, a, addr, val, size); + a->regs[addr / 4] = val; +} + +static const MemoryRegionOps allwinner_ahci_mem_ops = { + .read = allwinner_ahci_mem_read, + .write = allwinner_ahci_mem_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void allwinner_ahci_init(Object *obj) +{ + SysbusAHCIState *s = SYSBUS_AHCI(obj); + AllwinnerAHCIState *a = ALLWINNER_AHCI(obj); + + memory_region_init_io(&a->mmio, obj, &allwinner_ahci_mem_ops, a, + "allwinner-ahci", ALLWINNER_AHCI_MMIO_SIZE); + memory_region_add_subregion(&s->ahci.mem, ALLWINNER_AHCI_MMIO_OFF, + &a->mmio); +} + +static const VMStateDescription vmstate_allwinner_ahci = { + .name = "allwinner-ahci", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AllwinnerAHCIState, + ALLWINNER_AHCI_MMIO_SIZE / 4), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_ahci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_allwinner_ahci; +} + +static const TypeInfo allwinner_ahci_info = { + .name = TYPE_ALLWINNER_AHCI, + .parent = TYPE_SYSBUS_AHCI, + .instance_size = sizeof(AllwinnerAHCIState), + .instance_init = allwinner_ahci_init, + .class_init = allwinner_ahci_class_init, +}; + +static void sysbus_ahci_register_types(void) +{ + type_register_static(&allwinner_ahci_info); +} + +type_init(sysbus_ahci_register_types) diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c new file mode 100644 index 000000000..a94c6e26f --- /dev/null +++ b/hw/ide/ahci.c @@ -0,0 +1,1843 @@ +/* + * QEMU AHCI Emulation + * + * Copyright (c) 2010 qiaochong@loongson.cn + * Copyright (c) 2010 Roland Elek + * Copyright (c) 2010 Sebastian Herbszt + * Copyright (c) 2010 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "hw/pci/msi.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "sysemu/block-backend.h" +#include "sysemu/dma.h" +#include "hw/ide/internal.h" +#include "hw/ide/pci.h" +#include "ahci_internal.h" + +#include "trace.h" + +static void check_cmd(AHCIState *s, int port); +static int handle_cmd(AHCIState *s, int port, uint8_t slot); +static void ahci_reset_port(AHCIState *s, int port); +static bool ahci_write_fis_d2h(AHCIDevice *ad); +static void ahci_init_d2h(AHCIDevice *ad); +static int ahci_dma_prepare_buf(const IDEDMA *dma, int32_t limit); +static bool ahci_map_clb_address(AHCIDevice *ad); +static bool ahci_map_fis_address(AHCIDevice *ad); +static void ahci_unmap_clb_address(AHCIDevice *ad); +static void ahci_unmap_fis_address(AHCIDevice *ad); + +static const char *AHCIHostReg_lookup[AHCI_HOST_REG__COUNT] = { + [AHCI_HOST_REG_CAP] = "CAP", + [AHCI_HOST_REG_CTL] = "GHC", + [AHCI_HOST_REG_IRQ_STAT] = "IS", + [AHCI_HOST_REG_PORTS_IMPL] = "PI", + [AHCI_HOST_REG_VERSION] = "VS", + [AHCI_HOST_REG_CCC_CTL] = "CCC_CTL", + [AHCI_HOST_REG_CCC_PORTS] = "CCC_PORTS", + [AHCI_HOST_REG_EM_LOC] = "EM_LOC", + [AHCI_HOST_REG_EM_CTL] = "EM_CTL", + [AHCI_HOST_REG_CAP2] = "CAP2", + [AHCI_HOST_REG_BOHC] = "BOHC", +}; + +static const char *AHCIPortReg_lookup[AHCI_PORT_REG__COUNT] = { + [AHCI_PORT_REG_LST_ADDR] = "PxCLB", + [AHCI_PORT_REG_LST_ADDR_HI] = "PxCLBU", + [AHCI_PORT_REG_FIS_ADDR] = "PxFB", + [AHCI_PORT_REG_FIS_ADDR_HI] = "PxFBU", + [AHCI_PORT_REG_IRQ_STAT] = "PxIS", + [AHCI_PORT_REG_IRQ_MASK] = "PXIE", + [AHCI_PORT_REG_CMD] = "PxCMD", + [7] = "Reserved", + [AHCI_PORT_REG_TFDATA] = "PxTFD", + [AHCI_PORT_REG_SIG] = "PxSIG", + [AHCI_PORT_REG_SCR_STAT] = "PxSSTS", + [AHCI_PORT_REG_SCR_CTL] = "PxSCTL", + [AHCI_PORT_REG_SCR_ERR] = "PxSERR", + [AHCI_PORT_REG_SCR_ACT] = "PxSACT", + [AHCI_PORT_REG_CMD_ISSUE] = "PxCI", + [AHCI_PORT_REG_SCR_NOTIF] = "PxSNTF", + [AHCI_PORT_REG_FIS_CTL] = "PxFBS", + [AHCI_PORT_REG_DEV_SLEEP] = "PxDEVSLP", + [18 ... 27] = "Reserved", + [AHCI_PORT_REG_VENDOR_1 ... + AHCI_PORT_REG_VENDOR_4] = "PxVS", +}; + +static const char *AHCIPortIRQ_lookup[AHCI_PORT_IRQ__COUNT] = { + [AHCI_PORT_IRQ_BIT_DHRS] = "DHRS", + [AHCI_PORT_IRQ_BIT_PSS] = "PSS", + [AHCI_PORT_IRQ_BIT_DSS] = "DSS", + [AHCI_PORT_IRQ_BIT_SDBS] = "SDBS", + [AHCI_PORT_IRQ_BIT_UFS] = "UFS", + [AHCI_PORT_IRQ_BIT_DPS] = "DPS", + [AHCI_PORT_IRQ_BIT_PCS] = "PCS", + [AHCI_PORT_IRQ_BIT_DMPS] = "DMPS", + [8 ... 21] = "RESERVED", + [AHCI_PORT_IRQ_BIT_PRCS] = "PRCS", + [AHCI_PORT_IRQ_BIT_IPMS] = "IPMS", + [AHCI_PORT_IRQ_BIT_OFS] = "OFS", + [25] = "RESERVED", + [AHCI_PORT_IRQ_BIT_INFS] = "INFS", + [AHCI_PORT_IRQ_BIT_IFS] = "IFS", + [AHCI_PORT_IRQ_BIT_HBDS] = "HBDS", + [AHCI_PORT_IRQ_BIT_HBFS] = "HBFS", + [AHCI_PORT_IRQ_BIT_TFES] = "TFES", + [AHCI_PORT_IRQ_BIT_CPDS] = "CPDS" +}; + +static uint32_t ahci_port_read(AHCIState *s, int port, int offset) +{ + uint32_t val; + AHCIPortRegs *pr = &s->dev[port].port_regs; + enum AHCIPortReg regnum = offset / sizeof(uint32_t); + assert(regnum < (AHCI_PORT_ADDR_OFFSET_LEN / sizeof(uint32_t))); + + switch (regnum) { + case AHCI_PORT_REG_LST_ADDR: + val = pr->lst_addr; + break; + case AHCI_PORT_REG_LST_ADDR_HI: + val = pr->lst_addr_hi; + break; + case AHCI_PORT_REG_FIS_ADDR: + val = pr->fis_addr; + break; + case AHCI_PORT_REG_FIS_ADDR_HI: + val = pr->fis_addr_hi; + break; + case AHCI_PORT_REG_IRQ_STAT: + val = pr->irq_stat; + break; + case AHCI_PORT_REG_IRQ_MASK: + val = pr->irq_mask; + break; + case AHCI_PORT_REG_CMD: + val = pr->cmd; + break; + case AHCI_PORT_REG_TFDATA: + val = pr->tfdata; + break; + case AHCI_PORT_REG_SIG: + val = pr->sig; + break; + case AHCI_PORT_REG_SCR_STAT: + if (s->dev[port].port.ifs[0].blk) { + val = SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP | + SATA_SCR_SSTATUS_SPD_GEN1 | SATA_SCR_SSTATUS_IPM_ACTIVE; + } else { + val = SATA_SCR_SSTATUS_DET_NODEV; + } + break; + case AHCI_PORT_REG_SCR_CTL: + val = pr->scr_ctl; + break; + case AHCI_PORT_REG_SCR_ERR: + val = pr->scr_err; + break; + case AHCI_PORT_REG_SCR_ACT: + val = pr->scr_act; + break; + case AHCI_PORT_REG_CMD_ISSUE: + val = pr->cmd_issue; + break; + default: + trace_ahci_port_read_default(s, port, AHCIPortReg_lookup[regnum], + offset); + val = 0; + } + + trace_ahci_port_read(s, port, AHCIPortReg_lookup[regnum], offset, val); + return val; +} + +static void ahci_irq_raise(AHCIState *s) +{ + DeviceState *dev_state = s->container; + PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state), + TYPE_PCI_DEVICE); + + trace_ahci_irq_raise(s); + + if (pci_dev && msi_enabled(pci_dev)) { + msi_notify(pci_dev, 0); + } else { + qemu_irq_raise(s->irq); + } +} + +static void ahci_irq_lower(AHCIState *s) +{ + DeviceState *dev_state = s->container; + PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state), + TYPE_PCI_DEVICE); + + trace_ahci_irq_lower(s); + + if (!pci_dev || !msi_enabled(pci_dev)) { + qemu_irq_lower(s->irq); + } +} + +static void ahci_check_irq(AHCIState *s) +{ + int i; + uint32_t old_irq = s->control_regs.irqstatus; + + s->control_regs.irqstatus = 0; + for (i = 0; i < s->ports; i++) { + AHCIPortRegs *pr = &s->dev[i].port_regs; + if (pr->irq_stat & pr->irq_mask) { + s->control_regs.irqstatus |= (1 << i); + } + } + trace_ahci_check_irq(s, old_irq, s->control_regs.irqstatus); + if (s->control_regs.irqstatus && + (s->control_regs.ghc & HOST_CTL_IRQ_EN)) { + ahci_irq_raise(s); + } else { + ahci_irq_lower(s); + } +} + +static void ahci_trigger_irq(AHCIState *s, AHCIDevice *d, + enum AHCIPortIRQ irqbit) +{ + g_assert((unsigned)irqbit < 32); + uint32_t irq = 1U << irqbit; + uint32_t irqstat = d->port_regs.irq_stat | irq; + + trace_ahci_trigger_irq(s, d->port_no, + AHCIPortIRQ_lookup[irqbit], irq, + d->port_regs.irq_stat, irqstat, + irqstat & d->port_regs.irq_mask); + + d->port_regs.irq_stat = irqstat; + ahci_check_irq(s); +} + +static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr, + uint32_t wanted) +{ + hwaddr len = wanted; + + if (*ptr) { + dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); + } + + *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE); + if (len < wanted && *ptr) { + dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); + *ptr = NULL; + } +} + +/** + * Check the cmd register to see if we should start or stop + * the DMA or FIS RX engines. + * + * @ad: Device to dis/engage. + * + * @return 0 on success, -1 on error. + */ +static int ahci_cond_start_engines(AHCIDevice *ad) +{ + AHCIPortRegs *pr = &ad->port_regs; + bool cmd_start = pr->cmd & PORT_CMD_START; + bool cmd_on = pr->cmd & PORT_CMD_LIST_ON; + bool fis_start = pr->cmd & PORT_CMD_FIS_RX; + bool fis_on = pr->cmd & PORT_CMD_FIS_ON; + + if (cmd_start && !cmd_on) { + if (!ahci_map_clb_address(ad)) { + pr->cmd &= ~PORT_CMD_START; + error_report("AHCI: Failed to start DMA engine: " + "bad command list buffer address"); + return -1; + } + } else if (!cmd_start && cmd_on) { + ahci_unmap_clb_address(ad); + } + + if (fis_start && !fis_on) { + if (!ahci_map_fis_address(ad)) { + pr->cmd &= ~PORT_CMD_FIS_RX; + error_report("AHCI: Failed to start FIS receive engine: " + "bad FIS receive buffer address"); + return -1; + } + } else if (!fis_start && fis_on) { + ahci_unmap_fis_address(ad); + } + + return 0; +} + +static void ahci_port_write(AHCIState *s, int port, int offset, uint32_t val) +{ + AHCIPortRegs *pr = &s->dev[port].port_regs; + enum AHCIPortReg regnum = offset / sizeof(uint32_t); + assert(regnum < (AHCI_PORT_ADDR_OFFSET_LEN / sizeof(uint32_t))); + trace_ahci_port_write(s, port, AHCIPortReg_lookup[regnum], offset, val); + + switch (regnum) { + case AHCI_PORT_REG_LST_ADDR: + pr->lst_addr = val; + break; + case AHCI_PORT_REG_LST_ADDR_HI: + pr->lst_addr_hi = val; + break; + case AHCI_PORT_REG_FIS_ADDR: + pr->fis_addr = val; + break; + case AHCI_PORT_REG_FIS_ADDR_HI: + pr->fis_addr_hi = val; + break; + case AHCI_PORT_REG_IRQ_STAT: + pr->irq_stat &= ~val; + ahci_check_irq(s); + break; + case AHCI_PORT_REG_IRQ_MASK: + pr->irq_mask = val & 0xfdc000ff; + ahci_check_irq(s); + break; + case AHCI_PORT_REG_CMD: + /* Block any Read-only fields from being set; + * including LIST_ON and FIS_ON. + * The spec requires to set ICC bits to zero after the ICC change + * is done. We don't support ICC state changes, therefore always + * force the ICC bits to zero. + */ + pr->cmd = (pr->cmd & PORT_CMD_RO_MASK) | + (val & ~(PORT_CMD_RO_MASK | PORT_CMD_ICC_MASK)); + + /* Check FIS RX and CLB engines */ + ahci_cond_start_engines(&s->dev[port]); + + /* XXX usually the FIS would be pending on the bus here and + issuing deferred until the OS enables FIS receival. + Instead, we only submit it once - which works in most + cases, but is a hack. */ + if ((pr->cmd & PORT_CMD_FIS_ON) && + !s->dev[port].init_d2h_sent) { + ahci_init_d2h(&s->dev[port]); + } + + check_cmd(s, port); + break; + case AHCI_PORT_REG_TFDATA: + case AHCI_PORT_REG_SIG: + case AHCI_PORT_REG_SCR_STAT: + /* Read Only */ + break; + case AHCI_PORT_REG_SCR_CTL: + if (((pr->scr_ctl & AHCI_SCR_SCTL_DET) == 1) && + ((val & AHCI_SCR_SCTL_DET) == 0)) { + ahci_reset_port(s, port); + } + pr->scr_ctl = val; + break; + case AHCI_PORT_REG_SCR_ERR: + pr->scr_err &= ~val; + break; + case AHCI_PORT_REG_SCR_ACT: + /* RW1 */ + pr->scr_act |= val; + break; + case AHCI_PORT_REG_CMD_ISSUE: + pr->cmd_issue |= val; + check_cmd(s, port); + break; + default: + trace_ahci_port_write_unimpl(s, port, AHCIPortReg_lookup[regnum], + offset, val); + qemu_log_mask(LOG_UNIMP, "Attempted write to unimplemented register: " + "AHCI port %d register %s, offset 0x%x: 0x%"PRIx32, + port, AHCIPortReg_lookup[regnum], offset, val); + break; + } +} + +static uint64_t ahci_mem_read_32(void *opaque, hwaddr addr) +{ + AHCIState *s = opaque; + uint32_t val = 0; + + if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) { + enum AHCIHostReg regnum = addr / 4; + assert(regnum < AHCI_HOST_REG__COUNT); + + switch (regnum) { + case AHCI_HOST_REG_CAP: + val = s->control_regs.cap; + break; + case AHCI_HOST_REG_CTL: + val = s->control_regs.ghc; + break; + case AHCI_HOST_REG_IRQ_STAT: + val = s->control_regs.irqstatus; + break; + case AHCI_HOST_REG_PORTS_IMPL: + val = s->control_regs.impl; + break; + case AHCI_HOST_REG_VERSION: + val = s->control_regs.version; + break; + default: + trace_ahci_mem_read_32_host_default(s, AHCIHostReg_lookup[regnum], + addr); + } + trace_ahci_mem_read_32_host(s, AHCIHostReg_lookup[regnum], addr, val); + } else if ((addr >= AHCI_PORT_REGS_START_ADDR) && + (addr < (AHCI_PORT_REGS_START_ADDR + + (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) { + val = ahci_port_read(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7, + addr & AHCI_PORT_ADDR_OFFSET_MASK); + } else { + trace_ahci_mem_read_32_default(s, addr, val); + } + + trace_ahci_mem_read_32(s, addr, val); + return val; +} + + +/** + * AHCI 1.3 section 3 ("HBA Memory Registers") + * Support unaligned 8/16/32 bit reads, and 64 bit aligned reads. + * Caller is responsible for masking unwanted higher order bytes. + */ +static uint64_t ahci_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + hwaddr aligned = addr & ~0x3; + int ofst = addr - aligned; + uint64_t lo = ahci_mem_read_32(opaque, aligned); + uint64_t hi; + uint64_t val; + + /* if < 8 byte read does not cross 4 byte boundary */ + if (ofst + size <= 4) { + val = lo >> (ofst * 8); + } else { + g_assert(size > 1); + + /* If the 64bit read is unaligned, we will produce undefined + * results. AHCI does not support unaligned 64bit reads. */ + hi = ahci_mem_read_32(opaque, aligned + 4); + val = (hi << 32 | lo) >> (ofst * 8); + } + + trace_ahci_mem_read(opaque, size, addr, val); + return val; +} + + +static void ahci_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + AHCIState *s = opaque; + + trace_ahci_mem_write(s, size, addr, val); + + /* Only aligned reads are allowed on AHCI */ + if (addr & 3) { + qemu_log_mask(LOG_GUEST_ERROR, + "ahci: Mis-aligned write to addr 0x%03" HWADDR_PRIX "\n", + addr); + return; + } + + if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) { + enum AHCIHostReg regnum = addr / 4; + assert(regnum < AHCI_HOST_REG__COUNT); + + switch (regnum) { + case AHCI_HOST_REG_CAP: /* R/WO, RO */ + /* FIXME handle R/WO */ + break; + case AHCI_HOST_REG_CTL: /* R/W */ + if (val & HOST_CTL_RESET) { + ahci_reset(s); + } else { + s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN; + ahci_check_irq(s); + } + break; + case AHCI_HOST_REG_IRQ_STAT: /* R/WC, RO */ + s->control_regs.irqstatus &= ~val; + ahci_check_irq(s); + break; + case AHCI_HOST_REG_PORTS_IMPL: /* R/WO, RO */ + /* FIXME handle R/WO */ + break; + case AHCI_HOST_REG_VERSION: /* RO */ + /* FIXME report write? */ + break; + default: + qemu_log_mask(LOG_UNIMP, + "Attempted write to unimplemented register: " + "AHCI host register %s, " + "offset 0x%"PRIx64": 0x%"PRIx64, + AHCIHostReg_lookup[regnum], addr, val); + trace_ahci_mem_write_host_unimpl(s, size, + AHCIHostReg_lookup[regnum], addr); + } + trace_ahci_mem_write_host(s, size, AHCIHostReg_lookup[regnum], + addr, val); + } else if ((addr >= AHCI_PORT_REGS_START_ADDR) && + (addr < (AHCI_PORT_REGS_START_ADDR + + (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) { + ahci_port_write(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7, + addr & AHCI_PORT_ADDR_OFFSET_MASK, val); + } else { + qemu_log_mask(LOG_UNIMP, "Attempted write to unimplemented register: " + "AHCI global register at offset 0x%"PRIx64": 0x%"PRIx64, + addr, val); + trace_ahci_mem_write_unimpl(s, size, addr, val); + } +} + +static const MemoryRegionOps ahci_mem_ops = { + .read = ahci_mem_read, + .write = ahci_mem_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t ahci_idp_read(void *opaque, hwaddr addr, + unsigned size) +{ + AHCIState *s = opaque; + + if (addr == s->idp_offset) { + /* index register */ + return s->idp_index; + } else if (addr == s->idp_offset + 4) { + /* data register - do memory read at location selected by index */ + return ahci_mem_read(opaque, s->idp_index, size); + } else { + return 0; + } +} + +static void ahci_idp_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + AHCIState *s = opaque; + + if (addr == s->idp_offset) { + /* index register - mask off reserved bits */ + s->idp_index = (uint32_t)val & ((AHCI_MEM_BAR_SIZE - 1) & ~3); + } else if (addr == s->idp_offset + 4) { + /* data register - do memory write at location selected by index */ + ahci_mem_write(opaque, s->idp_index, val, size); + } +} + +static const MemoryRegionOps ahci_idp_ops = { + .read = ahci_idp_read, + .write = ahci_idp_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + + +static void ahci_reg_init(AHCIState *s) +{ + int i; + + s->control_regs.cap = (s->ports - 1) | + (AHCI_NUM_COMMAND_SLOTS << 8) | + (AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) | + HOST_CAP_NCQ | HOST_CAP_AHCI | HOST_CAP_64; + + s->control_regs.impl = (1 << s->ports) - 1; + + s->control_regs.version = AHCI_VERSION_1_0; + + for (i = 0; i < s->ports; i++) { + s->dev[i].port_state = STATE_RUN; + } +} + +static void check_cmd(AHCIState *s, int port) +{ + AHCIPortRegs *pr = &s->dev[port].port_regs; + uint8_t slot; + + if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) { + for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) { + if ((pr->cmd_issue & (1U << slot)) && + !handle_cmd(s, port, slot)) { + pr->cmd_issue &= ~(1U << slot); + } + } + } +} + +static void ahci_check_cmd_bh(void *opaque) +{ + AHCIDevice *ad = opaque; + + qemu_bh_delete(ad->check_bh); + ad->check_bh = NULL; + + check_cmd(ad->hba, ad->port_no); +} + +static void ahci_init_d2h(AHCIDevice *ad) +{ + IDEState *ide_state = &ad->port.ifs[0]; + AHCIPortRegs *pr = &ad->port_regs; + + if (ad->init_d2h_sent) { + return; + } + + if (ahci_write_fis_d2h(ad)) { + ad->init_d2h_sent = true; + /* We're emulating receiving the first Reg H2D Fis from the device; + * Update the SIG register, but otherwise proceed as normal. */ + pr->sig = ((uint32_t)ide_state->hcyl << 24) | + (ide_state->lcyl << 16) | + (ide_state->sector << 8) | + (ide_state->nsector & 0xFF); + } +} + +static void ahci_set_signature(AHCIDevice *ad, uint32_t sig) +{ + IDEState *s = &ad->port.ifs[0]; + s->hcyl = sig >> 24 & 0xFF; + s->lcyl = sig >> 16 & 0xFF; + s->sector = sig >> 8 & 0xFF; + s->nsector = sig & 0xFF; + + trace_ahci_set_signature(ad->hba, ad->port_no, s->nsector, s->sector, + s->lcyl, s->hcyl, sig); +} + +static void ahci_reset_port(AHCIState *s, int port) +{ + AHCIDevice *d = &s->dev[port]; + AHCIPortRegs *pr = &d->port_regs; + IDEState *ide_state = &d->port.ifs[0]; + int i; + + trace_ahci_reset_port(s, port); + + ide_bus_reset(&d->port); + ide_state->ncq_queues = AHCI_MAX_CMDS; + + pr->scr_stat = 0; + pr->scr_err = 0; + pr->scr_act = 0; + pr->tfdata = 0x7F; + pr->sig = 0xFFFFFFFF; + d->busy_slot = -1; + d->init_d2h_sent = false; + + ide_state = &s->dev[port].port.ifs[0]; + if (!ide_state->blk) { + return; + } + + /* reset ncq queue */ + for (i = 0; i < AHCI_MAX_CMDS; i++) { + NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[i]; + ncq_tfs->halt = false; + if (!ncq_tfs->used) { + continue; + } + + if (ncq_tfs->aiocb) { + blk_aio_cancel(ncq_tfs->aiocb); + ncq_tfs->aiocb = NULL; + } + + /* Maybe we just finished the request thanks to blk_aio_cancel() */ + if (!ncq_tfs->used) { + continue; + } + + qemu_sglist_destroy(&ncq_tfs->sglist); + ncq_tfs->used = 0; + } + + s->dev[port].port_state = STATE_RUN; + if (ide_state->drive_kind == IDE_CD) { + ahci_set_signature(d, SATA_SIGNATURE_CDROM);\ + ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT; + } else { + ahci_set_signature(d, SATA_SIGNATURE_DISK); + ide_state->status = SEEK_STAT | WRERR_STAT; + } + + ide_state->error = 1; + ahci_init_d2h(d); +} + +/* Buffer pretty output based on a raw FIS structure. */ +static char *ahci_pretty_buffer_fis(const uint8_t *fis, int cmd_len) +{ + int i; + GString *s = g_string_new("FIS:"); + + for (i = 0; i < cmd_len; i++) { + if ((i & 0xf) == 0) { + g_string_append_printf(s, "\n0x%02x: ", i); + } + g_string_append_printf(s, "%02x ", fis[i]); + } + g_string_append_c(s, '\n'); + + return g_string_free(s, FALSE); +} + +static bool ahci_map_fis_address(AHCIDevice *ad) +{ + AHCIPortRegs *pr = &ad->port_regs; + map_page(ad->hba->as, &ad->res_fis, + ((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256); + if (ad->res_fis != NULL) { + pr->cmd |= PORT_CMD_FIS_ON; + return true; + } + + pr->cmd &= ~PORT_CMD_FIS_ON; + return false; +} + +static void ahci_unmap_fis_address(AHCIDevice *ad) +{ + if (ad->res_fis == NULL) { + trace_ahci_unmap_fis_address_null(ad->hba, ad->port_no); + return; + } + ad->port_regs.cmd &= ~PORT_CMD_FIS_ON; + dma_memory_unmap(ad->hba->as, ad->res_fis, 256, + DMA_DIRECTION_FROM_DEVICE, 256); + ad->res_fis = NULL; +} + +static bool ahci_map_clb_address(AHCIDevice *ad) +{ + AHCIPortRegs *pr = &ad->port_regs; + ad->cur_cmd = NULL; + map_page(ad->hba->as, &ad->lst, + ((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024); + if (ad->lst != NULL) { + pr->cmd |= PORT_CMD_LIST_ON; + return true; + } + + pr->cmd &= ~PORT_CMD_LIST_ON; + return false; +} + +static void ahci_unmap_clb_address(AHCIDevice *ad) +{ + if (ad->lst == NULL) { + trace_ahci_unmap_clb_address_null(ad->hba, ad->port_no); + return; + } + ad->port_regs.cmd &= ~PORT_CMD_LIST_ON; + dma_memory_unmap(ad->hba->as, ad->lst, 1024, + DMA_DIRECTION_FROM_DEVICE, 1024); + ad->lst = NULL; +} + +static void ahci_write_fis_sdb(AHCIState *s, NCQTransferState *ncq_tfs) +{ + AHCIDevice *ad = ncq_tfs->drive; + AHCIPortRegs *pr = &ad->port_regs; + IDEState *ide_state; + SDBFIS *sdb_fis; + + if (!ad->res_fis || + !(pr->cmd & PORT_CMD_FIS_RX)) { + return; + } + + sdb_fis = (SDBFIS *)&ad->res_fis[RES_FIS_SDBFIS]; + ide_state = &ad->port.ifs[0]; + + sdb_fis->type = SATA_FIS_TYPE_SDB; + /* Interrupt pending & Notification bit */ + sdb_fis->flags = 0x40; /* Interrupt bit, always 1 for NCQ */ + sdb_fis->status = ide_state->status & 0x77; + sdb_fis->error = ide_state->error; + /* update SAct field in SDB_FIS */ + sdb_fis->payload = cpu_to_le32(ad->finished); + + /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */ + pr->tfdata = (ad->port.ifs[0].error << 8) | + (ad->port.ifs[0].status & 0x77) | + (pr->tfdata & 0x88); + pr->scr_act &= ~ad->finished; + ad->finished = 0; + + /* Trigger IRQ if interrupt bit is set (which currently, it always is) */ + if (sdb_fis->flags & 0x40) { + ahci_trigger_irq(s, ad, AHCI_PORT_IRQ_BIT_SDBS); + } +} + +static void ahci_write_fis_pio(AHCIDevice *ad, uint16_t len, bool pio_fis_i) +{ + AHCIPortRegs *pr = &ad->port_regs; + uint8_t *pio_fis; + IDEState *s = &ad->port.ifs[0]; + + if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { + return; + } + + pio_fis = &ad->res_fis[RES_FIS_PSFIS]; + + pio_fis[0] = SATA_FIS_TYPE_PIO_SETUP; + pio_fis[1] = (pio_fis_i ? (1 << 6) : 0); + pio_fis[2] = s->status; + pio_fis[3] = s->error; + + pio_fis[4] = s->sector; + pio_fis[5] = s->lcyl; + pio_fis[6] = s->hcyl; + pio_fis[7] = s->select; + pio_fis[8] = s->hob_sector; + pio_fis[9] = s->hob_lcyl; + pio_fis[10] = s->hob_hcyl; + pio_fis[11] = 0; + pio_fis[12] = s->nsector & 0xFF; + pio_fis[13] = (s->nsector >> 8) & 0xFF; + pio_fis[14] = 0; + pio_fis[15] = s->status; + pio_fis[16] = len & 255; + pio_fis[17] = len >> 8; + pio_fis[18] = 0; + pio_fis[19] = 0; + + /* Update shadow registers: */ + pr->tfdata = (ad->port.ifs[0].error << 8) | + ad->port.ifs[0].status; + + if (pio_fis[2] & ERR_STAT) { + ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_TFES); + } +} + +static bool ahci_write_fis_d2h(AHCIDevice *ad) +{ + AHCIPortRegs *pr = &ad->port_regs; + uint8_t *d2h_fis; + int i; + IDEState *s = &ad->port.ifs[0]; + + if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { + return false; + } + + d2h_fis = &ad->res_fis[RES_FIS_RFIS]; + + d2h_fis[0] = SATA_FIS_TYPE_REGISTER_D2H; + d2h_fis[1] = (1 << 6); /* interrupt bit */ + d2h_fis[2] = s->status; + d2h_fis[3] = s->error; + + d2h_fis[4] = s->sector; + d2h_fis[5] = s->lcyl; + d2h_fis[6] = s->hcyl; + d2h_fis[7] = s->select; + d2h_fis[8] = s->hob_sector; + d2h_fis[9] = s->hob_lcyl; + d2h_fis[10] = s->hob_hcyl; + d2h_fis[11] = 0; + d2h_fis[12] = s->nsector & 0xFF; + d2h_fis[13] = (s->nsector >> 8) & 0xFF; + for (i = 14; i < 20; i++) { + d2h_fis[i] = 0; + } + + /* Update shadow registers: */ + pr->tfdata = (ad->port.ifs[0].error << 8) | + ad->port.ifs[0].status; + + if (d2h_fis[2] & ERR_STAT) { + ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_TFES); + } + + ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_DHRS); + return true; +} + +static int prdt_tbl_entry_size(const AHCI_SG *tbl) +{ + /* flags_size is zero-based */ + return (le32_to_cpu(tbl->flags_size) & AHCI_PRDT_SIZE_MASK) + 1; +} + +/** + * Fetch entries in a guest-provided PRDT and convert it into a QEMU SGlist. + * @ad: The AHCIDevice for whom we are building the SGList. + * @sglist: The SGList target to add PRD entries to. + * @cmd: The AHCI Command Header that describes where the PRDT is. + * @limit: The remaining size of the S/ATA transaction, in bytes. + * @offset: The number of bytes already transferred, in bytes. + * + * The AHCI PRDT can describe up to 256GiB. S/ATA only support transactions of + * up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 512 byte sector size. We stop + * building the sglist from the PRDT as soon as we hit @limit bytes, + * which is <= INT32_MAX/2GiB. + */ +static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, + AHCICmdHdr *cmd, int64_t limit, uint64_t offset) +{ + uint16_t opts = le16_to_cpu(cmd->opts); + uint16_t prdtl = le16_to_cpu(cmd->prdtl); + uint64_t cfis_addr = le64_to_cpu(cmd->tbl_addr); + uint64_t prdt_addr = cfis_addr + 0x80; + dma_addr_t prdt_len = (prdtl * sizeof(AHCI_SG)); + dma_addr_t real_prdt_len = prdt_len; + uint8_t *prdt; + int i; + int r = 0; + uint64_t sum = 0; + int off_idx = -1; + int64_t off_pos = -1; + int tbl_entry_size; + IDEBus *bus = &ad->port; + BusState *qbus = BUS(bus); + + trace_ahci_populate_sglist(ad->hba, ad->port_no); + + if (!prdtl) { + trace_ahci_populate_sglist_no_prdtl(ad->hba, ad->port_no, opts); + return -1; + } + + /* map PRDT */ + if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len, + DMA_DIRECTION_TO_DEVICE))){ + trace_ahci_populate_sglist_no_map(ad->hba, ad->port_no); + return -1; + } + + if (prdt_len < real_prdt_len) { + trace_ahci_populate_sglist_short_map(ad->hba, ad->port_no); + r = -1; + goto out; + } + + /* Get entries in the PRDT, init a qemu sglist accordingly */ + if (prdtl > 0) { + AHCI_SG *tbl = (AHCI_SG *)prdt; + sum = 0; + for (i = 0; i < prdtl; i++) { + tbl_entry_size = prdt_tbl_entry_size(&tbl[i]); + if (offset < (sum + tbl_entry_size)) { + off_idx = i; + off_pos = offset - sum; + break; + } + sum += tbl_entry_size; + } + if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) { + trace_ahci_populate_sglist_bad_offset(ad->hba, ad->port_no, + off_idx, off_pos); + r = -1; + goto out; + } + + qemu_sglist_init(sglist, qbus->parent, (prdtl - off_idx), + ad->hba->as); + qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr) + off_pos, + MIN(prdt_tbl_entry_size(&tbl[off_idx]) - off_pos, + limit)); + + for (i = off_idx + 1; i < prdtl && sglist->size < limit; i++) { + qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr), + MIN(prdt_tbl_entry_size(&tbl[i]), + limit - sglist->size)); + } + } + +out: + dma_memory_unmap(ad->hba->as, prdt, prdt_len, + DMA_DIRECTION_TO_DEVICE, prdt_len); + return r; +} + +static void ncq_err(NCQTransferState *ncq_tfs) +{ + IDEState *ide_state = &ncq_tfs->drive->port.ifs[0]; + + ide_state->error = ABRT_ERR; + ide_state->status = READY_STAT | ERR_STAT; + ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag); + qemu_sglist_destroy(&ncq_tfs->sglist); + ncq_tfs->used = 0; +} + +static void ncq_finish(NCQTransferState *ncq_tfs) +{ + /* If we didn't error out, set our finished bit. Errored commands + * do not get a bit set for the SDB FIS ACT register, nor do they + * clear the outstanding bit in scr_act (PxSACT). */ + if (!(ncq_tfs->drive->port_regs.scr_err & (1 << ncq_tfs->tag))) { + ncq_tfs->drive->finished |= (1 << ncq_tfs->tag); + } + + ahci_write_fis_sdb(ncq_tfs->drive->hba, ncq_tfs); + + trace_ncq_finish(ncq_tfs->drive->hba, ncq_tfs->drive->port_no, + ncq_tfs->tag); + + block_acct_done(blk_get_stats(ncq_tfs->drive->port.ifs[0].blk), + &ncq_tfs->acct); + qemu_sglist_destroy(&ncq_tfs->sglist); + ncq_tfs->used = 0; +} + +static void ncq_cb(void *opaque, int ret) +{ + NCQTransferState *ncq_tfs = (NCQTransferState *)opaque; + IDEState *ide_state = &ncq_tfs->drive->port.ifs[0]; + + ncq_tfs->aiocb = NULL; + + if (ret < 0) { + bool is_read = ncq_tfs->cmd == READ_FPDMA_QUEUED; + BlockErrorAction action = blk_get_error_action(ide_state->blk, + is_read, -ret); + if (action == BLOCK_ERROR_ACTION_STOP) { + ncq_tfs->halt = true; + ide_state->bus->error_status = IDE_RETRY_HBA; + } else if (action == BLOCK_ERROR_ACTION_REPORT) { + ncq_err(ncq_tfs); + } + blk_error_action(ide_state->blk, action, is_read, -ret); + } else { + ide_state->status = READY_STAT | SEEK_STAT; + } + + if (!ncq_tfs->halt) { + ncq_finish(ncq_tfs); + } +} + +static int is_ncq(uint8_t ata_cmd) +{ + /* Based on SATA 3.2 section 13.6.3.2 */ + switch (ata_cmd) { + case READ_FPDMA_QUEUED: + case WRITE_FPDMA_QUEUED: + case NCQ_NON_DATA: + case RECEIVE_FPDMA_QUEUED: + case SEND_FPDMA_QUEUED: + return 1; + default: + return 0; + } +} + +static void execute_ncq_command(NCQTransferState *ncq_tfs) +{ + AHCIDevice *ad = ncq_tfs->drive; + IDEState *ide_state = &ad->port.ifs[0]; + int port = ad->port_no; + + g_assert(is_ncq(ncq_tfs->cmd)); + ncq_tfs->halt = false; + + switch (ncq_tfs->cmd) { + case READ_FPDMA_QUEUED: + trace_execute_ncq_command_read(ad->hba, port, ncq_tfs->tag, + ncq_tfs->sector_count, ncq_tfs->lba); + dma_acct_start(ide_state->blk, &ncq_tfs->acct, + &ncq_tfs->sglist, BLOCK_ACCT_READ); + ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist, + ncq_tfs->lba << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, + ncq_cb, ncq_tfs); + break; + case WRITE_FPDMA_QUEUED: + trace_execute_ncq_command_read(ad->hba, port, ncq_tfs->tag, + ncq_tfs->sector_count, ncq_tfs->lba); + dma_acct_start(ide_state->blk, &ncq_tfs->acct, + &ncq_tfs->sglist, BLOCK_ACCT_WRITE); + ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist, + ncq_tfs->lba << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, + ncq_cb, ncq_tfs); + break; + default: + trace_execute_ncq_command_unsup(ad->hba, port, + ncq_tfs->tag, ncq_tfs->cmd); + ncq_err(ncq_tfs); + } +} + + +static void process_ncq_command(AHCIState *s, int port, const uint8_t *cmd_fis, + uint8_t slot) +{ + AHCIDevice *ad = &s->dev[port]; + const NCQFrame *ncq_fis = (NCQFrame *)cmd_fis; + uint8_t tag = ncq_fis->tag >> 3; + NCQTransferState *ncq_tfs = &ad->ncq_tfs[tag]; + size_t size; + + g_assert(is_ncq(ncq_fis->command)); + if (ncq_tfs->used) { + /* error - already in use */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: tag %d already used\n", + __func__, tag); + return; + } + + ncq_tfs->used = 1; + ncq_tfs->drive = ad; + ncq_tfs->slot = slot; + ncq_tfs->cmdh = &((AHCICmdHdr *)ad->lst)[slot]; + ncq_tfs->cmd = ncq_fis->command; + ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) | + ((uint64_t)ncq_fis->lba4 << 32) | + ((uint64_t)ncq_fis->lba3 << 24) | + ((uint64_t)ncq_fis->lba2 << 16) | + ((uint64_t)ncq_fis->lba1 << 8) | + (uint64_t)ncq_fis->lba0; + ncq_tfs->tag = tag; + + /* Sanity-check the NCQ packet */ + if (tag != slot) { + trace_process_ncq_command_mismatch(s, port, tag, slot); + } + + if (ncq_fis->aux0 || ncq_fis->aux1 || ncq_fis->aux2 || ncq_fis->aux3) { + trace_process_ncq_command_aux(s, port, tag); + } + if (ncq_fis->prio || ncq_fis->icc) { + trace_process_ncq_command_prioicc(s, port, tag); + } + if (ncq_fis->fua & NCQ_FIS_FUA_MASK) { + trace_process_ncq_command_fua(s, port, tag); + } + if (ncq_fis->tag & NCQ_FIS_RARC_MASK) { + trace_process_ncq_command_rarc(s, port, tag); + } + + ncq_tfs->sector_count = ((ncq_fis->sector_count_high << 8) | + ncq_fis->sector_count_low); + if (!ncq_tfs->sector_count) { + ncq_tfs->sector_count = 0x10000; + } + size = ncq_tfs->sector_count * BDRV_SECTOR_SIZE; + ahci_populate_sglist(ad, &ncq_tfs->sglist, ncq_tfs->cmdh, size, 0); + + if (ncq_tfs->sglist.size < size) { + error_report("ahci: PRDT length for NCQ command (0x%zx) " + "is smaller than the requested size (0x%zx)", + ncq_tfs->sglist.size, size); + ncq_err(ncq_tfs); + ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_OFS); + return; + } else if (ncq_tfs->sglist.size != size) { + trace_process_ncq_command_large(s, port, tag, + ncq_tfs->sglist.size, size); + } + + trace_process_ncq_command(s, port, tag, + ncq_fis->command, + ncq_tfs->lba, + ncq_tfs->lba + ncq_tfs->sector_count - 1); + execute_ncq_command(ncq_tfs); +} + +static AHCICmdHdr *get_cmd_header(AHCIState *s, uint8_t port, uint8_t slot) +{ + if (port >= s->ports || slot >= AHCI_MAX_CMDS) { + return NULL; + } + + return s->dev[port].lst ? &((AHCICmdHdr *)s->dev[port].lst)[slot] : NULL; +} + +static void handle_reg_h2d_fis(AHCIState *s, int port, + uint8_t slot, const uint8_t *cmd_fis) +{ + IDEState *ide_state = &s->dev[port].port.ifs[0]; + AHCICmdHdr *cmd = get_cmd_header(s, port, slot); + uint16_t opts = le16_to_cpu(cmd->opts); + + if (cmd_fis[1] & 0x0F) { + trace_handle_reg_h2d_fis_pmp(s, port, cmd_fis[1], + cmd_fis[2], cmd_fis[3]); + return; + } + + if (cmd_fis[1] & 0x70) { + trace_handle_reg_h2d_fis_res(s, port, cmd_fis[1], + cmd_fis[2], cmd_fis[3]); + return; + } + + if (!(cmd_fis[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER)) { + switch (s->dev[port].port_state) { + case STATE_RUN: + if (cmd_fis[15] & ATA_SRST) { + s->dev[port].port_state = STATE_RESET; + } + break; + case STATE_RESET: + if (!(cmd_fis[15] & ATA_SRST)) { + ahci_reset_port(s, port); + } + break; + } + return; + } + + /* Check for NCQ command */ + if (is_ncq(cmd_fis[2])) { + process_ncq_command(s, port, cmd_fis, slot); + return; + } + + /* Decompose the FIS: + * AHCI does not interpret FIS packets, it only forwards them. + * SATA 1.0 describes how to decode LBA28 and CHS FIS packets. + * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets. + * + * ATA4 describes sector number for LBA28/CHS commands. + * ATA6 describes sector number for LBA48 commands. + * ATA8 deprecates CHS fully, describing only LBA28/48. + * + * We dutifully convert the FIS into IDE registers, and allow the + * core layer to interpret them as needed. */ + ide_state->feature = cmd_fis[3]; + ide_state->sector = cmd_fis[4]; /* LBA 7:0 */ + ide_state->lcyl = cmd_fis[5]; /* LBA 15:8 */ + ide_state->hcyl = cmd_fis[6]; /* LBA 23:16 */ + ide_state->select = cmd_fis[7]; /* LBA 27:24 (LBA28) */ + ide_state->hob_sector = cmd_fis[8]; /* LBA 31:24 */ + ide_state->hob_lcyl = cmd_fis[9]; /* LBA 39:32 */ + ide_state->hob_hcyl = cmd_fis[10]; /* LBA 47:40 */ + ide_state->hob_feature = cmd_fis[11]; + ide_state->nsector = (int64_t)((cmd_fis[13] << 8) | cmd_fis[12]); + /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */ + /* 15: Only valid when UPDATE_COMMAND not set. */ + + /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command + * table to ide_state->io_buffer */ + if (opts & AHCI_CMD_ATAPI) { + memcpy(ide_state->io_buffer, &cmd_fis[AHCI_COMMAND_TABLE_ACMD], 0x10); + if (trace_event_get_state_backends(TRACE_HANDLE_REG_H2D_FIS_DUMP)) { + char *pretty_fis = ahci_pretty_buffer_fis(ide_state->io_buffer, 0x10); + trace_handle_reg_h2d_fis_dump(s, port, pretty_fis); + g_free(pretty_fis); + } + } + + ide_state->error = 0; + s->dev[port].done_first_drq = false; + /* Reset transferred byte counter */ + cmd->status = 0; + + /* We're ready to process the command in FIS byte 2. */ + ide_exec_cmd(&s->dev[port].port, cmd_fis[2]); +} + +static int handle_cmd(AHCIState *s, int port, uint8_t slot) +{ + IDEState *ide_state; + uint64_t tbl_addr; + AHCICmdHdr *cmd; + uint8_t *cmd_fis; + dma_addr_t cmd_len; + + if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { + /* Engine currently busy, try again later */ + trace_handle_cmd_busy(s, port); + return -1; + } + + if (!s->dev[port].lst) { + trace_handle_cmd_nolist(s, port); + return -1; + } + cmd = get_cmd_header(s, port, slot); + /* remember current slot handle for later */ + s->dev[port].cur_cmd = cmd; + + /* The device we are working for */ + ide_state = &s->dev[port].port.ifs[0]; + if (!ide_state->blk) { + trace_handle_cmd_badport(s, port); + return -1; + } + + tbl_addr = le64_to_cpu(cmd->tbl_addr); + cmd_len = 0x80; + cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len, + DMA_DIRECTION_TO_DEVICE); + if (!cmd_fis) { + trace_handle_cmd_badfis(s, port); + return -1; + } else if (cmd_len != 0x80) { + ahci_trigger_irq(s, &s->dev[port], AHCI_PORT_IRQ_BIT_HBFS); + trace_handle_cmd_badmap(s, port, cmd_len); + goto out; + } + if (trace_event_get_state_backends(TRACE_HANDLE_CMD_FIS_DUMP)) { + char *pretty_fis = ahci_pretty_buffer_fis(cmd_fis, 0x80); + trace_handle_cmd_fis_dump(s, port, pretty_fis); + g_free(pretty_fis); + } + switch (cmd_fis[0]) { + case SATA_FIS_TYPE_REGISTER_H2D: + handle_reg_h2d_fis(s, port, slot, cmd_fis); + break; + default: + trace_handle_cmd_unhandled_fis(s, port, + cmd_fis[0], cmd_fis[1], cmd_fis[2]); + break; + } + +out: + dma_memory_unmap(s->as, cmd_fis, cmd_len, DMA_DIRECTION_TO_DEVICE, + cmd_len); + + if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { + /* async command, complete later */ + s->dev[port].busy_slot = slot; + return -1; + } + + /* done handling the command */ + return 0; +} + +/* Transfer PIO data between RAM and device */ +static void ahci_pio_transfer(const IDEDMA *dma) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + IDEState *s = &ad->port.ifs[0]; + uint32_t size = (uint32_t)(s->data_end - s->data_ptr); + /* write == ram -> device */ + uint16_t opts = le16_to_cpu(ad->cur_cmd->opts); + int is_write = opts & AHCI_CMD_WRITE; + int is_atapi = opts & AHCI_CMD_ATAPI; + int has_sglist = 0; + bool pio_fis_i; + + /* The PIO Setup FIS is received prior to transfer, but the interrupt + * is only triggered after data is received. + * + * The device only sets the 'I' bit in the PIO Setup FIS for device->host + * requests (see "DPIOI1" in the SATA spec), or for host->device DRQs after + * the first (see "DPIOO1"). The latter is consistent with the spec's + * description of the PACKET protocol, where the command part of ATAPI requests + * ("DPKT0") has the 'I' bit clear, while the data part of PIO ATAPI requests + * ("DPKT4a" and "DPKT7") has the 'I' bit set for both directions for all DRQs. + */ + pio_fis_i = ad->done_first_drq || (!is_atapi && !is_write); + ahci_write_fis_pio(ad, size, pio_fis_i); + + if (is_atapi && !ad->done_first_drq) { + /* already prepopulated iobuffer */ + goto out; + } + + if (ahci_dma_prepare_buf(dma, size)) { + has_sglist = 1; + } + + trace_ahci_pio_transfer(ad->hba, ad->port_no, is_write ? "writ" : "read", + size, is_atapi ? "atapi" : "ata", + has_sglist ? "" : "o"); + + if (has_sglist && size) { + if (is_write) { + dma_buf_write(s->data_ptr, size, &s->sg); + } else { + dma_buf_read(s->data_ptr, size, &s->sg); + } + } + + /* Update number of transferred bytes, destroy sglist */ + dma_buf_commit(s, size); + +out: + /* declare that we processed everything */ + s->data_ptr = s->data_end; + + ad->done_first_drq = true; + if (pio_fis_i) { + ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_PSS); + } +} + +static void ahci_start_dma(const IDEDMA *dma, IDEState *s, + BlockCompletionFunc *dma_cb) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + trace_ahci_start_dma(ad->hba, ad->port_no); + s->io_buffer_offset = 0; + dma_cb(s, 0); +} + +static void ahci_restart_dma(const IDEDMA *dma) +{ + /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */ +} + +/** + * IDE/PIO restarts are handled by the core layer, but NCQ commands + * need an extra kick from the AHCI HBA. + */ +static void ahci_restart(const IDEDMA *dma) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + int i; + + for (i = 0; i < AHCI_MAX_CMDS; i++) { + NCQTransferState *ncq_tfs = &ad->ncq_tfs[i]; + if (ncq_tfs->halt) { + execute_ncq_command(ncq_tfs); + } + } +} + +/** + * Called in DMA and PIO R/W chains to read the PRDT. + * Not shared with NCQ pathways. + */ +static int32_t ahci_dma_prepare_buf(const IDEDMA *dma, int32_t limit) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + IDEState *s = &ad->port.ifs[0]; + + if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd, + limit, s->io_buffer_offset) == -1) { + trace_ahci_dma_prepare_buf_fail(ad->hba, ad->port_no); + return -1; + } + s->io_buffer_size = s->sg.size; + + trace_ahci_dma_prepare_buf(ad->hba, ad->port_no, limit, s->io_buffer_size); + return s->io_buffer_size; +} + +/** + * Updates the command header with a bytes-read value. + * Called via dma_buf_commit, for both DMA and PIO paths. + * sglist destruction is handled within dma_buf_commit. + */ +static void ahci_commit_buf(const IDEDMA *dma, uint32_t tx_bytes) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + + tx_bytes += le32_to_cpu(ad->cur_cmd->status); + ad->cur_cmd->status = cpu_to_le32(tx_bytes); +} + +static int ahci_dma_rw_buf(const IDEDMA *dma, bool is_write) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + IDEState *s = &ad->port.ifs[0]; + uint8_t *p = s->io_buffer + s->io_buffer_index; + int l = s->io_buffer_size - s->io_buffer_index; + + if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd, l, s->io_buffer_offset)) { + return 0; + } + + if (is_write) { + dma_buf_read(p, l, &s->sg); + } else { + dma_buf_write(p, l, &s->sg); + } + + /* free sglist, update byte count */ + dma_buf_commit(s, l); + s->io_buffer_index += l; + + trace_ahci_dma_rw_buf(ad->hba, ad->port_no, l); + return 1; +} + +static void ahci_cmd_done(const IDEDMA *dma) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + + trace_ahci_cmd_done(ad->hba, ad->port_no); + + /* no longer busy */ + if (ad->busy_slot != -1) { + ad->port_regs.cmd_issue &= ~(1 << ad->busy_slot); + ad->busy_slot = -1; + } + + /* update d2h status */ + ahci_write_fis_d2h(ad); + + if (ad->port_regs.cmd_issue && !ad->check_bh) { + ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); + qemu_bh_schedule(ad->check_bh); + } +} + +static void ahci_irq_set(void *opaque, int n, int level) +{ + qemu_log_mask(LOG_UNIMP, "ahci: IRQ#%d level:%d\n", n, level); +} + +static const IDEDMAOps ahci_dma_ops = { + .start_dma = ahci_start_dma, + .restart = ahci_restart, + .restart_dma = ahci_restart_dma, + .pio_transfer = ahci_pio_transfer, + .prepare_buf = ahci_dma_prepare_buf, + .commit_buf = ahci_commit_buf, + .rw_buf = ahci_dma_rw_buf, + .cmd_done = ahci_cmd_done, +}; + +void ahci_init(AHCIState *s, DeviceState *qdev) +{ + s->container = qdev; + /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */ + memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s, + "ahci", AHCI_MEM_BAR_SIZE); + memory_region_init_io(&s->idp, OBJECT(qdev), &ahci_idp_ops, s, + "ahci-idp", 32); +} + +void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports) +{ + qemu_irq *irqs; + int i; + + s->as = as; + s->ports = ports; + s->dev = g_new0(AHCIDevice, ports); + ahci_reg_init(s); + irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports); + for (i = 0; i < s->ports; i++) { + AHCIDevice *ad = &s->dev[i]; + + ide_bus_init(&ad->port, sizeof(ad->port), qdev, i, 1); + ide_init2(&ad->port, irqs[i]); + + ad->hba = s; + ad->port_no = i; + ad->port.dma = &ad->dma; + ad->port.dma->ops = &ahci_dma_ops; + ide_register_restart_cb(&ad->port); + } + g_free(irqs); +} + +void ahci_uninit(AHCIState *s) +{ + int i, j; + + for (i = 0; i < s->ports; i++) { + AHCIDevice *ad = &s->dev[i]; + + for (j = 0; j < 2; j++) { + IDEState *s = &ad->port.ifs[j]; + + ide_exit(s); + } + object_unparent(OBJECT(&ad->port)); + } + + g_free(s->dev); +} + +void ahci_reset(AHCIState *s) +{ + AHCIPortRegs *pr; + int i; + + trace_ahci_reset(s); + + s->control_regs.irqstatus = 0; + /* AHCI Enable (AE) + * The implementation of this bit is dependent upon the value of the + * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and + * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be + * read-only and shall have a reset value of '1'. + * + * We set HOST_CAP_AHCI so we must enable AHCI at reset. + */ + s->control_regs.ghc = HOST_CTL_AHCI_EN; + + for (i = 0; i < s->ports; i++) { + pr = &s->dev[i].port_regs; + pr->irq_stat = 0; + pr->irq_mask = 0; + pr->scr_ctl = 0; + pr->cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON; + ahci_reset_port(s, i); + } +} + +static const VMStateDescription vmstate_ncq_tfs = { + .name = "ncq state", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(sector_count, NCQTransferState), + VMSTATE_UINT64(lba, NCQTransferState), + VMSTATE_UINT8(tag, NCQTransferState), + VMSTATE_UINT8(cmd, NCQTransferState), + VMSTATE_UINT8(slot, NCQTransferState), + VMSTATE_BOOL(used, NCQTransferState), + VMSTATE_BOOL(halt, NCQTransferState), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_ahci_device = { + .name = "ahci port", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_IDE_BUS(port, AHCIDevice), + VMSTATE_IDE_DRIVE(port.ifs[0], AHCIDevice), + VMSTATE_UINT32(port_state, AHCIDevice), + VMSTATE_UINT32(finished, AHCIDevice), + VMSTATE_UINT32(port_regs.lst_addr, AHCIDevice), + VMSTATE_UINT32(port_regs.lst_addr_hi, AHCIDevice), + VMSTATE_UINT32(port_regs.fis_addr, AHCIDevice), + VMSTATE_UINT32(port_regs.fis_addr_hi, AHCIDevice), + VMSTATE_UINT32(port_regs.irq_stat, AHCIDevice), + VMSTATE_UINT32(port_regs.irq_mask, AHCIDevice), + VMSTATE_UINT32(port_regs.cmd, AHCIDevice), + VMSTATE_UINT32(port_regs.tfdata, AHCIDevice), + VMSTATE_UINT32(port_regs.sig, AHCIDevice), + VMSTATE_UINT32(port_regs.scr_stat, AHCIDevice), + VMSTATE_UINT32(port_regs.scr_ctl, AHCIDevice), + VMSTATE_UINT32(port_regs.scr_err, AHCIDevice), + VMSTATE_UINT32(port_regs.scr_act, AHCIDevice), + VMSTATE_UINT32(port_regs.cmd_issue, AHCIDevice), + VMSTATE_BOOL(done_first_drq, AHCIDevice), + VMSTATE_INT32(busy_slot, AHCIDevice), + VMSTATE_BOOL(init_d2h_sent, AHCIDevice), + VMSTATE_STRUCT_ARRAY(ncq_tfs, AHCIDevice, AHCI_MAX_CMDS, + 1, vmstate_ncq_tfs, NCQTransferState), + VMSTATE_END_OF_LIST() + }, +}; + +static int ahci_state_post_load(void *opaque, int version_id) +{ + int i, j; + struct AHCIDevice *ad; + NCQTransferState *ncq_tfs; + AHCIPortRegs *pr; + AHCIState *s = opaque; + + for (i = 0; i < s->ports; i++) { + ad = &s->dev[i]; + pr = &ad->port_regs; + + if (!(pr->cmd & PORT_CMD_START) && (pr->cmd & PORT_CMD_LIST_ON)) { + error_report("AHCI: DMA engine should be off, but status bit " + "indicates it is still running."); + return -1; + } + if (!(pr->cmd & PORT_CMD_FIS_RX) && (pr->cmd & PORT_CMD_FIS_ON)) { + error_report("AHCI: FIS RX engine should be off, but status bit " + "indicates it is still running."); + return -1; + } + + /* After a migrate, the DMA/FIS engines are "off" and + * need to be conditionally restarted */ + pr->cmd &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON); + if (ahci_cond_start_engines(ad) != 0) { + return -1; + } + + for (j = 0; j < AHCI_MAX_CMDS; j++) { + ncq_tfs = &ad->ncq_tfs[j]; + ncq_tfs->drive = ad; + + if (ncq_tfs->used != ncq_tfs->halt) { + return -1; + } + if (!ncq_tfs->halt) { + continue; + } + if (!is_ncq(ncq_tfs->cmd)) { + return -1; + } + if (ncq_tfs->slot != ncq_tfs->tag) { + return -1; + } + /* If ncq_tfs->halt is justly set, the engine should be engaged, + * and the command list buffer should be mapped. */ + ncq_tfs->cmdh = get_cmd_header(s, i, ncq_tfs->slot); + if (!ncq_tfs->cmdh) { + return -1; + } + ahci_populate_sglist(ncq_tfs->drive, &ncq_tfs->sglist, + ncq_tfs->cmdh, + ncq_tfs->sector_count * BDRV_SECTOR_SIZE, + 0); + if (ncq_tfs->sector_count != ncq_tfs->sglist.size >> 9) { + return -1; + } + } + + + /* + * If an error is present, ad->busy_slot will be valid and not -1. + * In this case, an operation is waiting to resume and will re-check + * for additional AHCI commands to execute upon completion. + * + * In the case where no error was present, busy_slot will be -1, + * and we should check to see if there are additional commands waiting. + */ + if (ad->busy_slot == -1) { + check_cmd(s, i); + } else { + /* We are in the middle of a command, and may need to access + * the command header in guest memory again. */ + if (ad->busy_slot < 0 || ad->busy_slot >= AHCI_MAX_CMDS) { + return -1; + } + ad->cur_cmd = get_cmd_header(s, i, ad->busy_slot); + } + } + + return 0; +} + +const VMStateDescription vmstate_ahci = { + .name = "ahci", + .version_id = 1, + .post_load = ahci_state_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev, AHCIState, ports, + vmstate_ahci_device, AHCIDevice), + VMSTATE_UINT32(control_regs.cap, AHCIState), + VMSTATE_UINT32(control_regs.ghc, AHCIState), + VMSTATE_UINT32(control_regs.irqstatus, AHCIState), + VMSTATE_UINT32(control_regs.impl, AHCIState), + VMSTATE_UINT32(control_regs.version, AHCIState), + VMSTATE_UINT32(idp_index, AHCIState), + VMSTATE_INT32_EQUAL(ports, AHCIState, NULL), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_sysbus_ahci = { + .name = "sysbus-ahci", + .fields = (VMStateField[]) { + VMSTATE_AHCI(ahci, SysbusAHCIState), + VMSTATE_END_OF_LIST() + }, +}; + +static void sysbus_ahci_reset(DeviceState *dev) +{ + SysbusAHCIState *s = SYSBUS_AHCI(dev); + + ahci_reset(&s->ahci); +} + +static void sysbus_ahci_init(Object *obj) +{ + SysbusAHCIState *s = SYSBUS_AHCI(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + ahci_init(&s->ahci, DEVICE(obj)); + + sysbus_init_mmio(sbd, &s->ahci.mem); + sysbus_init_irq(sbd, &s->ahci.irq); +} + +static void sysbus_ahci_realize(DeviceState *dev, Error **errp) +{ + SysbusAHCIState *s = SYSBUS_AHCI(dev); + + ahci_realize(&s->ahci, dev, &address_space_memory, s->num_ports); +} + +static Property sysbus_ahci_properties[] = { + DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, num_ports, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sysbus_ahci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sysbus_ahci_realize; + dc->vmsd = &vmstate_sysbus_ahci; + device_class_set_props(dc, sysbus_ahci_properties); + dc->reset = sysbus_ahci_reset; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo sysbus_ahci_info = { + .name = TYPE_SYSBUS_AHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SysbusAHCIState), + .instance_init = sysbus_ahci_init, + .class_init = sysbus_ahci_class_init, +}; + +static void sysbus_ahci_register_types(void) +{ + type_register_static(&sysbus_ahci_info); +} + +type_init(sysbus_ahci_register_types) + +int32_t ahci_get_num_ports(PCIDevice *dev) +{ + AHCIPCIState *d = ICH9_AHCI(dev); + AHCIState *ahci = &d->ahci; + + return ahci->ports; +} + +void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd) +{ + AHCIPCIState *d = ICH9_AHCI(dev); + AHCIState *ahci = &d->ahci; + int i; + + for (i = 0; i < ahci->ports; i++) { + if (hd[i] == NULL) { + continue; + } + ide_create_drive(&ahci->dev[i].port, 0, hd[i]); + } + +} diff --git a/hw/ide/ahci_internal.h b/hw/ide/ahci_internal.h new file mode 100644 index 000000000..109de9e2d --- /dev/null +++ b/hw/ide/ahci_internal.h @@ -0,0 +1,393 @@ +/* + * QEMU AHCI Emulation + * + * Copyright (c) 2010 qiaochong@loongson.cn + * Copyright (c) 2010 Roland Elek + * Copyright (c) 2010 Sebastian Herbszt + * Copyright (c) 2010 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#ifndef HW_IDE_AHCI_INTERNAL_H +#define HW_IDE_AHCI_INTERNAL_H + +#include "hw/ide/ahci.h" +#include "hw/ide/internal.h" +#include "hw/pci/pci.h" + +#define AHCI_MEM_BAR_SIZE 0x1000 +#define AHCI_MAX_PORTS 32 +#define AHCI_MAX_SG 168 /* hardware max is 64K */ +#define AHCI_DMA_BOUNDARY 0xffffffff +#define AHCI_USE_CLUSTERING 0 +#define AHCI_MAX_CMDS 32 +#define AHCI_CMD_SZ 32 +#define AHCI_CMD_SLOT_SZ (AHCI_MAX_CMDS * AHCI_CMD_SZ) +#define AHCI_RX_FIS_SZ 256 +#define AHCI_CMD_TBL_CDB 0x40 +#define AHCI_CMD_TBL_HDR_SZ 0x80 +#define AHCI_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16)) +#define AHCI_CMD_TBL_AR_SZ (AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS) +#define AHCI_PORT_PRIV_DMA_SZ (AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + \ + AHCI_RX_FIS_SZ) + +#define AHCI_IRQ_ON_SG (1U << 31) +#define AHCI_CMD_ATAPI (1 << 5) +#define AHCI_CMD_WRITE (1 << 6) +#define AHCI_CMD_PREFETCH (1 << 7) +#define AHCI_CMD_RESET (1 << 8) +#define AHCI_CMD_CLR_BUSY (1 << 10) + +#define RX_FIS_D2H_REG 0x40 /* offset of D2H Register FIS data */ +#define RX_FIS_SDB 0x58 /* offset of SDB FIS data */ +#define RX_FIS_UNK 0x60 /* offset of Unknown FIS data */ + +/* global controller registers */ +enum AHCIHostReg { + AHCI_HOST_REG_CAP = 0, /* CAP: host capabilities */ + AHCI_HOST_REG_CTL = 1, /* GHC: global host control */ + AHCI_HOST_REG_IRQ_STAT = 2, /* IS: interrupt status */ + AHCI_HOST_REG_PORTS_IMPL = 3, /* PI: bitmap of implemented ports */ + AHCI_HOST_REG_VERSION = 4, /* VS: AHCI spec. version compliancy */ + AHCI_HOST_REG_CCC_CTL = 5, /* CCC_CTL: CCC Control */ + AHCI_HOST_REG_CCC_PORTS = 6, /* CCC_PORTS: CCC Ports */ + AHCI_HOST_REG_EM_LOC = 7, /* EM_LOC: Enclosure Mgmt Location */ + AHCI_HOST_REG_EM_CTL = 8, /* EM_CTL: Enclosure Mgmt Control */ + AHCI_HOST_REG_CAP2 = 9, /* CAP2: host capabilities, extended */ + AHCI_HOST_REG_BOHC = 10, /* BOHC: firmare/os handoff ctrl & status */ + AHCI_HOST_REG__COUNT = 11 +}; + +/* HOST_CTL bits */ +#define HOST_CTL_RESET (1 << 0) /* reset controller; self-clear */ +#define HOST_CTL_IRQ_EN (1 << 1) /* global IRQ enable */ +#define HOST_CTL_AHCI_EN (1U << 31) /* AHCI enabled */ + +/* HOST_CAP bits */ +#define HOST_CAP_SSC (1 << 14) /* Slumber capable */ +#define HOST_CAP_AHCI (1 << 18) /* AHCI only */ +#define HOST_CAP_CLO (1 << 24) /* Command List Override support */ +#define HOST_CAP_SSS (1 << 27) /* Staggered Spin-up */ +#define HOST_CAP_NCQ (1 << 30) /* Native Command Queueing */ +#define HOST_CAP_64 (1U << 31) /* PCI DAC (64-bit DMA) support */ + +/* registers for each SATA port */ +enum AHCIPortReg { + AHCI_PORT_REG_LST_ADDR = 0, /* PxCLB: command list DMA addr */ + AHCI_PORT_REG_LST_ADDR_HI = 1, /* PxCLBU: command list DMA addr hi */ + AHCI_PORT_REG_FIS_ADDR = 2, /* PxFB: FIS rx buf addr */ + AHCI_PORT_REG_FIS_ADDR_HI = 3, /* PxFBU: FIX rx buf addr hi */ + AHCI_PORT_REG_IRQ_STAT = 4, /* PxIS: interrupt status */ + AHCI_PORT_REG_IRQ_MASK = 5, /* PxIE: interrupt enable/disable mask */ + AHCI_PORT_REG_CMD = 6, /* PxCMD: port command */ + /* RESERVED */ + AHCI_PORT_REG_TFDATA = 8, /* PxTFD: taskfile data */ + AHCI_PORT_REG_SIG = 9, /* PxSIG: device TF signature */ + AHCI_PORT_REG_SCR_STAT = 10, /* PxSSTS: SATA phy register: SStatus */ + AHCI_PORT_REG_SCR_CTL = 11, /* PxSCTL: SATA phy register: SControl */ + AHCI_PORT_REG_SCR_ERR = 12, /* PxSERR: SATA phy register: SError */ + AHCI_PORT_REG_SCR_ACT = 13, /* PxSACT: SATA phy register: SActive */ + AHCI_PORT_REG_CMD_ISSUE = 14, /* PxCI: command issue */ + AHCI_PORT_REG_SCR_NOTIF = 15, /* PxSNTF: SATA phy register: SNotification */ + AHCI_PORT_REG_FIS_CTL = 16, /* PxFBS: Port multiplier switching ctl */ + AHCI_PORT_REG_DEV_SLEEP = 17, /* PxDEVSLP: device sleep control */ + /* RESERVED */ + AHCI_PORT_REG_VENDOR_1 = 28, /* PxVS: Vendor Specific */ + AHCI_PORT_REG_VENDOR_2 = 29, + AHCI_PORT_REG_VENDOR_3 = 30, + AHCI_PORT_REG_VENDOR_4 = 31, + AHCI_PORT_REG__COUNT = 32 +}; + +/* Port interrupt bit descriptors */ +enum AHCIPortIRQ { + AHCI_PORT_IRQ_BIT_DHRS = 0, + AHCI_PORT_IRQ_BIT_PSS = 1, + AHCI_PORT_IRQ_BIT_DSS = 2, + AHCI_PORT_IRQ_BIT_SDBS = 3, + AHCI_PORT_IRQ_BIT_UFS = 4, + AHCI_PORT_IRQ_BIT_DPS = 5, + AHCI_PORT_IRQ_BIT_PCS = 6, + AHCI_PORT_IRQ_BIT_DMPS = 7, + /* RESERVED */ + AHCI_PORT_IRQ_BIT_PRCS = 22, + AHCI_PORT_IRQ_BIT_IPMS = 23, + AHCI_PORT_IRQ_BIT_OFS = 24, + /* RESERVED */ + AHCI_PORT_IRQ_BIT_INFS = 26, + AHCI_PORT_IRQ_BIT_IFS = 27, + AHCI_PORT_IRQ_BIT_HBDS = 28, + AHCI_PORT_IRQ_BIT_HBFS = 29, + AHCI_PORT_IRQ_BIT_TFES = 30, + AHCI_PORT_IRQ_BIT_CPDS = 31, + AHCI_PORT_IRQ__COUNT = 32 +}; + + +/* PORT_IRQ_{STAT,MASK} bits */ +#define PORT_IRQ_COLD_PRES (1U << 31) /* cold presence detect */ +#define PORT_IRQ_TF_ERR (1 << 30) /* task file error */ +#define PORT_IRQ_HBUS_ERR (1 << 29) /* host bus fatal error */ +#define PORT_IRQ_HBUS_DATA_ERR (1 << 28) /* host bus data error */ +#define PORT_IRQ_IF_ERR (1 << 27) /* interface fatal error */ +#define PORT_IRQ_IF_NONFATAL (1 << 26) /* interface non-fatal error */ + /* reserved */ +#define PORT_IRQ_OVERFLOW (1 << 24) /* xfer exhausted available S/G */ +#define PORT_IRQ_BAD_PMP (1 << 23) /* incorrect port multiplier */ +#define PORT_IRQ_PHYRDY (1 << 22) /* PhyRdy changed */ + /* reserved */ +#define PORT_IRQ_DEV_ILCK (1 << 7) /* device interlock */ +#define PORT_IRQ_CONNECT (1 << 6) /* port connect change status */ +#define PORT_IRQ_SG_DONE (1 << 5) /* descriptor processed */ +#define PORT_IRQ_UNK_FIS (1 << 4) /* unknown FIS rx'd */ +#define PORT_IRQ_SDB_FIS (1 << 3) /* Set Device Bits FIS rx'd */ +#define PORT_IRQ_DMAS_FIS (1 << 2) /* DMA Setup FIS rx'd */ +#define PORT_IRQ_PIOS_FIS (1 << 1) /* PIO Setup FIS rx'd */ +#define PORT_IRQ_D2H_REG_FIS (1 << 0) /* D2H Register FIS rx'd */ + +#define PORT_IRQ_FREEZE (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | \ + PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY | \ + PORT_IRQ_UNK_FIS) +#define PORT_IRQ_ERROR (PORT_IRQ_FREEZE | PORT_IRQ_TF_ERR | \ + PORT_IRQ_HBUS_DATA_ERR) +#define DEF_PORT_IRQ (PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | \ + PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | \ + PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS) + +/* PORT_CMD bits */ +#define PORT_CMD_ATAPI (1 << 24) /* Device is ATAPI */ +#define PORT_CMD_LIST_ON (1 << 15) /* cmd list DMA engine running */ +#define PORT_CMD_FIS_ON (1 << 14) /* FIS DMA engine running */ +#define PORT_CMD_FIS_RX (1 << 4) /* Enable FIS receive DMA engine */ +#define PORT_CMD_CLO (1 << 3) /* Command list override */ +#define PORT_CMD_POWER_ON (1 << 2) /* Power up device */ +#define PORT_CMD_SPIN_UP (1 << 1) /* Spin up device */ +#define PORT_CMD_START (1 << 0) /* Enable port DMA engine */ + +#define PORT_CMD_ICC_MASK (0xfU << 28) /* i/f ICC state mask */ +#define PORT_CMD_ICC_ACTIVE (0x1 << 28) /* Put i/f in active state */ +#define PORT_CMD_ICC_PARTIAL (0x2 << 28) /* Put i/f in partial state */ +#define PORT_CMD_ICC_SLUMBER (0x6 << 28) /* Put i/f in slumber state */ + +#define PORT_CMD_RO_MASK 0x007dffe0 /* Which CMD bits are read only? */ + +/* ap->flags bits */ +#define AHCI_FLAG_NO_NCQ (1 << 24) +#define AHCI_FLAG_IGN_IRQ_IF_ERR (1 << 25) /* ignore IRQ_IF_ERR */ +#define AHCI_FLAG_HONOR_PI (1 << 26) /* honor PORTS_IMPL */ +#define AHCI_FLAG_IGN_SERR_INTERNAL (1 << 27) /* ignore SERR_INTERNAL */ +#define AHCI_FLAG_32BIT_ONLY (1 << 28) /* force 32bit */ + +#define ATA_SRST (1 << 2) /* software reset */ + +#define STATE_RUN 0 +#define STATE_RESET 1 + +#define SATA_SCR_SSTATUS_DET_NODEV 0x0 +#define SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP 0x3 + +#define SATA_SCR_SSTATUS_SPD_NODEV 0x00 +#define SATA_SCR_SSTATUS_SPD_GEN1 0x10 + +#define SATA_SCR_SSTATUS_IPM_NODEV 0x000 +#define SATA_SCR_SSTATUS_IPM_ACTIVE 0X100 + +#define AHCI_SCR_SCTL_DET 0xf + +#define SATA_FIS_TYPE_REGISTER_H2D 0x27 +#define SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER 0x80 +#define SATA_FIS_TYPE_REGISTER_D2H 0x34 +#define SATA_FIS_TYPE_PIO_SETUP 0x5f +#define SATA_FIS_TYPE_SDB 0xA1 + +#define AHCI_CMD_HDR_CMD_FIS_LEN 0x1f +#define AHCI_CMD_HDR_PRDT_LEN 16 + +#define SATA_SIGNATURE_CDROM 0xeb140101 +#define SATA_SIGNATURE_DISK 0x00000101 + +#define AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR 0x2c + +#define AHCI_PORT_REGS_START_ADDR 0x100 +#define AHCI_PORT_ADDR_OFFSET_MASK 0x7f +#define AHCI_PORT_ADDR_OFFSET_LEN 0x80 + +#define AHCI_NUM_COMMAND_SLOTS 31 +#define AHCI_SUPPORTED_SPEED 20 +#define AHCI_SUPPORTED_SPEED_GEN1 1 +#define AHCI_VERSION_1_0 0x10000 + +#define AHCI_PROGMODE_MAJOR_REV_1 1 + +#define AHCI_COMMAND_TABLE_ACMD 0x40 + +#define AHCI_PRDT_SIZE_MASK 0x3fffff + +#define IDE_FEATURE_DMA 1 + +#define READ_FPDMA_QUEUED 0x60 +#define WRITE_FPDMA_QUEUED 0x61 +#define NCQ_NON_DATA 0x63 +#define RECEIVE_FPDMA_QUEUED 0x65 +#define SEND_FPDMA_QUEUED 0x64 + +#define NCQ_FIS_FUA_MASK 0x80 +#define NCQ_FIS_RARC_MASK 0x01 + +#define RES_FIS_DSFIS 0x00 +#define RES_FIS_PSFIS 0x20 +#define RES_FIS_RFIS 0x40 +#define RES_FIS_SDBFIS 0x58 +#define RES_FIS_UFIS 0x60 + +#define SATA_CAP_SIZE 0x8 +#define SATA_CAP_REV 0x2 +#define SATA_CAP_BAR 0x4 + +typedef struct AHCIPortRegs { + uint32_t lst_addr; + uint32_t lst_addr_hi; + uint32_t fis_addr; + uint32_t fis_addr_hi; + uint32_t irq_stat; + uint32_t irq_mask; + uint32_t cmd; + uint32_t unused0; + uint32_t tfdata; + uint32_t sig; + uint32_t scr_stat; + uint32_t scr_ctl; + uint32_t scr_err; + uint32_t scr_act; + uint32_t cmd_issue; + uint32_t reserved; +} AHCIPortRegs; + +typedef struct AHCICmdHdr { + uint16_t opts; + uint16_t prdtl; + uint32_t status; + uint64_t tbl_addr; + uint32_t reserved[4]; +} QEMU_PACKED AHCICmdHdr; + +typedef struct AHCI_SG { + uint64_t addr; + uint32_t reserved; + uint32_t flags_size; +} QEMU_PACKED AHCI_SG; + +typedef struct NCQTransferState { + AHCIDevice *drive; + BlockAIOCB *aiocb; + AHCICmdHdr *cmdh; + QEMUSGList sglist; + BlockAcctCookie acct; + uint32_t sector_count; + uint64_t lba; + uint8_t tag; + uint8_t cmd; + uint8_t slot; + bool used; + bool halt; +} NCQTransferState; + +struct AHCIDevice { + IDEDMA dma; + IDEBus port; + int port_no; + uint32_t port_state; + uint32_t finished; + AHCIPortRegs port_regs; + struct AHCIState *hba; + QEMUBH *check_bh; + uint8_t *lst; + uint8_t *res_fis; + bool done_first_drq; + int32_t busy_slot; + bool init_d2h_sent; + AHCICmdHdr *cur_cmd; + NCQTransferState ncq_tfs[AHCI_MAX_CMDS]; +}; + +struct AHCIPCIState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + AHCIState ahci; +}; + +extern const VMStateDescription vmstate_ahci; + +#define VMSTATE_AHCI(_field, _state) { \ + .name = (stringify(_field)), \ + .size = sizeof(AHCIState), \ + .vmsd = &vmstate_ahci, \ + .flags = VMS_STRUCT, \ + .offset = vmstate_offset_value(_state, _field, AHCIState), \ +} + +/** + * NCQFrame is the same as a Register H2D FIS (described in SATA 3.2), + * but some fields have been re-mapped and re-purposed, as seen in + * SATA 3.2 section 13.6.4.1 ("READ FPDMA QUEUED") + * + * cmd_fis[3], feature 7:0, becomes sector count 7:0. + * cmd_fis[7], device 7:0, uses bit 7 as the Force Unit Access bit. + * cmd_fis[11], feature 15:8, becomes sector count 15:8. + * cmd_fis[12], count 7:0, becomes the NCQ TAG (7:3) and RARC bit (0) + * cmd_fis[13], count 15:8, becomes the priority value (7:6) + * bytes 16-19 become an le32 "auxiliary" field. + */ +typedef struct NCQFrame { + uint8_t fis_type; + uint8_t c; + uint8_t command; + uint8_t sector_count_low; /* (feature 7:0) */ + uint8_t lba0; + uint8_t lba1; + uint8_t lba2; + uint8_t fua; /* (device 7:0) */ + uint8_t lba3; + uint8_t lba4; + uint8_t lba5; + uint8_t sector_count_high; /* (feature 15:8) */ + uint8_t tag; /* (count 0:7) */ + uint8_t prio; /* (count 15:8) */ + uint8_t icc; + uint8_t control; + uint8_t aux0; + uint8_t aux1; + uint8_t aux2; + uint8_t aux3; +} QEMU_PACKED NCQFrame; + +typedef struct SDBFIS { + uint8_t type; + uint8_t flags; + uint8_t status; + uint8_t error; + uint32_t payload; +} QEMU_PACKED SDBFIS; + +void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports); +void ahci_init(AHCIState *s, DeviceState *qdev); +void ahci_uninit(AHCIState *s); + +void ahci_reset(AHCIState *s); + +#endif /* HW_IDE_AHCI_INTERNAL_H */ diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c new file mode 100644 index 000000000..b626199e3 --- /dev/null +++ b/hw/ide/atapi.c @@ -0,0 +1,1377 @@ +/* + * QEMU ATAPI Emulation + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/ide/internal.h" +#include "hw/scsi/scsi.h" +#include "sysemu/block-backend.h" +#include "trace.h" + +#define ATAPI_SECTOR_BITS (2 + BDRV_SECTOR_BITS) +#define ATAPI_SECTOR_SIZE (1 << ATAPI_SECTOR_BITS) + +static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret); + +static void padstr8(uint8_t *buf, int buf_size, const char *src) +{ + int i; + for(i = 0; i < buf_size; i++) { + if (*src) + buf[i] = *src++; + else + buf[i] = ' '; + } +} + +static void lba_to_msf(uint8_t *buf, int lba) +{ + lba += 150; + buf[0] = (lba / 75) / 60; + buf[1] = (lba / 75) % 60; + buf[2] = lba % 75; +} + +static inline int media_present(IDEState *s) +{ + return !s->tray_open && s->nb_sectors > 0; +} + +/* XXX: DVDs that could fit on a CD will be reported as a CD */ +static inline int media_is_dvd(IDEState *s) +{ + return (media_present(s) && s->nb_sectors > CD_MAX_SECTORS); +} + +static inline int media_is_cd(IDEState *s) +{ + return (media_present(s) && s->nb_sectors <= CD_MAX_SECTORS); +} + +static void cd_data_to_raw(uint8_t *buf, int lba) +{ + /* sync bytes */ + buf[0] = 0x00; + memset(buf + 1, 0xff, 10); + buf[11] = 0x00; + buf += 12; + /* MSF */ + lba_to_msf(buf, lba); + buf[3] = 0x01; /* mode 1 data */ + buf += 4; + /* data */ + buf += 2048; + /* XXX: ECC not computed */ + memset(buf, 0, 288); +} + +static int +cd_read_sector_sync(IDEState *s) +{ + int ret; + block_acct_start(blk_get_stats(s->blk), &s->acct, + ATAPI_SECTOR_SIZE, BLOCK_ACCT_READ); + + trace_cd_read_sector_sync(s->lba); + + switch (s->cd_sector_size) { + case 2048: + ret = blk_pread(s->blk, (int64_t)s->lba << ATAPI_SECTOR_BITS, + s->io_buffer, ATAPI_SECTOR_SIZE); + break; + case 2352: + ret = blk_pread(s->blk, (int64_t)s->lba << ATAPI_SECTOR_BITS, + s->io_buffer + 16, ATAPI_SECTOR_SIZE); + if (ret >= 0) { + cd_data_to_raw(s->io_buffer, s->lba); + } + break; + default: + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ); + return -EIO; + } + + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + } else { + block_acct_done(blk_get_stats(s->blk), &s->acct); + s->lba++; + s->io_buffer_index = 0; + } + + return ret; +} + +static void cd_read_sector_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + + trace_cd_read_sector_cb(s->lba, ret); + + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + ide_atapi_io_error(s, ret); + return; + } + + block_acct_done(blk_get_stats(s->blk), &s->acct); + + if (s->cd_sector_size == 2352) { + cd_data_to_raw(s->io_buffer, s->lba); + } + + s->lba++; + s->io_buffer_index = 0; + s->status &= ~BUSY_STAT; + + ide_atapi_cmd_reply_end(s); +} + +static int cd_read_sector(IDEState *s) +{ + void *buf; + + if (s->cd_sector_size != 2048 && s->cd_sector_size != 2352) { + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ); + return -EINVAL; + } + + buf = (s->cd_sector_size == 2352) ? s->io_buffer + 16 : s->io_buffer; + qemu_iovec_init_buf(&s->qiov, buf, ATAPI_SECTOR_SIZE); + + trace_cd_read_sector(s->lba); + + block_acct_start(blk_get_stats(s->blk), &s->acct, + ATAPI_SECTOR_SIZE, BLOCK_ACCT_READ); + + ide_buffered_readv(s, (int64_t)s->lba << 2, &s->qiov, 4, + cd_read_sector_cb, s); + + s->status |= BUSY_STAT; + return 0; +} + +void ide_atapi_cmd_ok(IDEState *s) +{ + s->error = 0; + s->status = READY_STAT | SEEK_STAT; + s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; + ide_transfer_stop(s); + ide_set_irq(s->bus); +} + +void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc) +{ + trace_ide_atapi_cmd_error(s, sense_key, asc); + s->error = sense_key << 4; + s->status = READY_STAT | ERR_STAT; + s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; + s->sense_key = sense_key; + s->asc = asc; + ide_transfer_stop(s); + ide_set_irq(s->bus); +} + +void ide_atapi_io_error(IDEState *s, int ret) +{ + /* XXX: handle more errors */ + if (ret == -ENOMEDIUM) { + ide_atapi_cmd_error(s, NOT_READY, + ASC_MEDIUM_NOT_PRESENT); + } else { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_LOGICAL_BLOCK_OOR); + } +} + +static uint16_t atapi_byte_count_limit(IDEState *s) +{ + uint16_t bcl; + + bcl = s->lcyl | (s->hcyl << 8); + if (bcl == 0xffff) { + return 0xfffe; + } + return bcl; +} + +/* The whole ATAPI transfer logic is handled in this function */ +void ide_atapi_cmd_reply_end(IDEState *s) +{ + int byte_count_limit, size, ret; + while (s->packet_transfer_size > 0) { + trace_ide_atapi_cmd_reply_end(s, s->packet_transfer_size, + s->elementary_transfer_size, + s->io_buffer_index); + + /* see if a new sector must be read */ + if (s->lba != -1 && s->io_buffer_index >= s->cd_sector_size) { + if (!s->elementary_transfer_size) { + ret = cd_read_sector(s); + if (ret < 0) { + ide_atapi_io_error(s, ret); + } + return; + } else { + /* rebuffering within an elementary transfer is + * only possible with a sync request because we + * end up with a race condition otherwise */ + ret = cd_read_sector_sync(s); + if (ret < 0) { + ide_atapi_io_error(s, ret); + return; + } + } + } + if (s->elementary_transfer_size > 0) { + /* there are some data left to transmit in this elementary + transfer */ + size = s->cd_sector_size - s->io_buffer_index; + if (size > s->elementary_transfer_size) + size = s->elementary_transfer_size; + } else { + /* a new transfer is needed */ + s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO; + ide_set_irq(s->bus); + byte_count_limit = atapi_byte_count_limit(s); + trace_ide_atapi_cmd_reply_end_bcl(s, byte_count_limit); + size = s->packet_transfer_size; + if (size > byte_count_limit) { + /* byte count limit must be even if this case */ + if (byte_count_limit & 1) + byte_count_limit--; + size = byte_count_limit; + } + s->lcyl = size; + s->hcyl = size >> 8; + s->elementary_transfer_size = size; + /* we cannot transmit more than one sector at a time */ + if (s->lba != -1) { + if (size > (s->cd_sector_size - s->io_buffer_index)) + size = (s->cd_sector_size - s->io_buffer_index); + } + trace_ide_atapi_cmd_reply_end_new(s, s->status); + } + s->packet_transfer_size -= size; + s->elementary_transfer_size -= size; + s->io_buffer_index += size; + assert(size <= s->io_buffer_total_len); + assert(s->io_buffer_index <= s->io_buffer_total_len); + + /* Some adapters process PIO data right away. In that case, we need + * to avoid mutual recursion between ide_transfer_start + * and ide_atapi_cmd_reply_end. + */ + if (!ide_transfer_start_norecurse(s, + s->io_buffer + s->io_buffer_index - size, + size, ide_atapi_cmd_reply_end)) { + return; + } + } + + /* end of transfer */ + trace_ide_atapi_cmd_reply_end_eot(s, s->status); + ide_atapi_cmd_ok(s); + ide_set_irq(s->bus); +} + +/* send a reply of 'size' bytes in s->io_buffer to an ATAPI command */ +static void ide_atapi_cmd_reply(IDEState *s, int size, int max_size) +{ + if (size > max_size) + size = max_size; + s->lba = -1; /* no sector read */ + s->packet_transfer_size = size; + s->io_buffer_size = size; /* dma: send the reply data as one chunk */ + s->elementary_transfer_size = 0; + + if (s->atapi_dma) { + block_acct_start(blk_get_stats(s->blk), &s->acct, size, + BLOCK_ACCT_READ); + s->status = READY_STAT | SEEK_STAT | DRQ_STAT; + ide_start_dma(s, ide_atapi_cmd_read_dma_cb); + } else { + s->status = READY_STAT | SEEK_STAT; + s->io_buffer_index = 0; + ide_atapi_cmd_reply_end(s); + } +} + +/* start a CD-CDROM read command */ +static void ide_atapi_cmd_read_pio(IDEState *s, int lba, int nb_sectors, + int sector_size) +{ + assert(0 <= lba && lba < (s->nb_sectors >> 2)); + + s->lba = lba; + s->packet_transfer_size = nb_sectors * sector_size; + s->elementary_transfer_size = 0; + s->io_buffer_index = sector_size; + s->cd_sector_size = sector_size; + + ide_atapi_cmd_reply_end(s); +} + +static void ide_atapi_cmd_check_status(IDEState *s) +{ + trace_ide_atapi_cmd_check_status(s); + s->error = MC_ERR | (UNIT_ATTENTION << 4); + s->status = ERR_STAT; + s->nsector = 0; + ide_set_irq(s->bus); +} +/* ATAPI DMA support */ + +static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + int data_offset, n; + + if (ret < 0) { + if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) { + if (s->bus->error_status) { + s->bus->dma->aiocb = NULL; + return; + } + goto eot; + } + } + + if (s->io_buffer_size > 0) { + /* + * For a cdrom read sector command (s->lba != -1), + * adjust the lba for the next s->io_buffer_size chunk + * and dma the current chunk. + * For a command != read (s->lba == -1), just transfer + * the reply data. + */ + if (s->lba != -1) { + if (s->cd_sector_size == 2352) { + n = 1; + cd_data_to_raw(s->io_buffer, s->lba); + } else { + n = s->io_buffer_size >> 11; + } + s->lba += n; + } + s->packet_transfer_size -= s->io_buffer_size; + if (s->bus->dma->ops->rw_buf(s->bus->dma, 1) == 0) + goto eot; + } + + if (s->packet_transfer_size <= 0) { + s->status = READY_STAT | SEEK_STAT; + s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; + ide_set_irq(s->bus); + goto eot; + } + + s->io_buffer_index = 0; + if (s->cd_sector_size == 2352) { + n = 1; + s->io_buffer_size = s->cd_sector_size; + data_offset = 16; + } else { + n = s->packet_transfer_size >> 11; + if (n > (IDE_DMA_BUF_SECTORS / 4)) + n = (IDE_DMA_BUF_SECTORS / 4); + s->io_buffer_size = n * 2048; + data_offset = 0; + } + trace_ide_atapi_cmd_read_dma_cb_aio(s, s->lba, n); + qemu_iovec_init_buf(&s->bus->dma->qiov, s->io_buffer + data_offset, + n * ATAPI_SECTOR_SIZE); + + s->bus->dma->aiocb = ide_buffered_readv(s, (int64_t)s->lba << 2, + &s->bus->dma->qiov, n * 4, + ide_atapi_cmd_read_dma_cb, s); + return; + +eot: + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + } else { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } + ide_set_inactive(s, false); +} + +/* start a CD-CDROM read command with DMA */ +/* XXX: test if DMA is available */ +static void ide_atapi_cmd_read_dma(IDEState *s, int lba, int nb_sectors, + int sector_size) +{ + assert(0 <= lba && lba < (s->nb_sectors >> 2)); + + s->lba = lba; + s->packet_transfer_size = nb_sectors * sector_size; + s->io_buffer_size = 0; + s->cd_sector_size = sector_size; + + block_acct_start(blk_get_stats(s->blk), &s->acct, s->packet_transfer_size, + BLOCK_ACCT_READ); + + /* XXX: check if BUSY_STAT should be set */ + s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT; + ide_start_dma(s, ide_atapi_cmd_read_dma_cb); +} + +static void ide_atapi_cmd_read(IDEState *s, int lba, int nb_sectors, + int sector_size) +{ + trace_ide_atapi_cmd_read(s, s->atapi_dma ? "dma" : "pio", + lba, nb_sectors); + if (s->atapi_dma) { + ide_atapi_cmd_read_dma(s, lba, nb_sectors, sector_size); + } else { + ide_atapi_cmd_read_pio(s, lba, nb_sectors, sector_size); + } +} + +void ide_atapi_dma_restart(IDEState *s) +{ + /* + * At this point we can just re-evaluate the packet command and start over. + * The presence of ->dma_cb callback in the pre_save ensures that the packet + * command has been completely sent and we can safely restart command. + */ + s->unit = s->bus->retry_unit; + s->bus->dma->ops->restart_dma(s->bus->dma); + ide_atapi_cmd(s); +} + +static inline uint8_t ide_atapi_set_profile(uint8_t *buf, uint8_t *index, + uint16_t profile) +{ + uint8_t *buf_profile = buf + 12; /* start of profiles */ + + buf_profile += ((*index) * 4); /* start of indexed profile */ + stw_be_p(buf_profile, profile); + buf_profile[2] = ((buf_profile[0] == buf[6]) && (buf_profile[1] == buf[7])); + + /* each profile adds 4 bytes to the response */ + (*index)++; + buf[11] += 4; /* Additional Length */ + + return 4; +} + +static int ide_dvd_read_structure(IDEState *s, int format, + const uint8_t *packet, uint8_t *buf) +{ + switch (format) { + case 0x0: /* Physical format information */ + { + int layer = packet[6]; + uint64_t total_sectors; + + if (layer != 0) + return -ASC_INV_FIELD_IN_CMD_PACKET; + + total_sectors = s->nb_sectors >> 2; + if (total_sectors == 0) { + return -ASC_MEDIUM_NOT_PRESENT; + } + + buf[4] = 1; /* DVD-ROM, part version 1 */ + buf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ + buf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ + buf[7] = 0; /* default densities */ + + /* FIXME: 0x30000 per spec? */ + stl_be_p(buf + 8, 0); /* start sector */ + stl_be_p(buf + 12, total_sectors - 1); /* end sector */ + stl_be_p(buf + 16, total_sectors - 1); /* l0 end sector */ + + /* Size of buffer, not including 2 byte size field */ + stw_be_p(buf, 2048 + 2); + + /* 2k data + 4 byte header */ + return (2048 + 4); + } + + case 0x01: /* DVD copyright information */ + buf[4] = 0; /* no copyright data */ + buf[5] = 0; /* no region restrictions */ + + /* Size of buffer, not including 2 byte size field */ + stw_be_p(buf, 4 + 2); + + /* 4 byte header + 4 byte data */ + return (4 + 4); + + case 0x03: /* BCA information - invalid field for no BCA info */ + return -ASC_INV_FIELD_IN_CMD_PACKET; + + case 0x04: /* DVD disc manufacturing information */ + /* Size of buffer, not including 2 byte size field */ + stw_be_p(buf, 2048 + 2); + + /* 2k data + 4 byte header */ + return (2048 + 4); + + case 0xff: + /* + * This lists all the command capabilities above. Add new ones + * in order and update the length and buffer return values. + */ + + buf[4] = 0x00; /* Physical format */ + buf[5] = 0x40; /* Not writable, is readable */ + stw_be_p(buf + 6, 2048 + 4); + + buf[8] = 0x01; /* Copyright info */ + buf[9] = 0x40; /* Not writable, is readable */ + stw_be_p(buf + 10, 4 + 4); + + buf[12] = 0x03; /* BCA info */ + buf[13] = 0x40; /* Not writable, is readable */ + stw_be_p(buf + 14, 188 + 4); + + buf[16] = 0x04; /* Manufacturing info */ + buf[17] = 0x40; /* Not writable, is readable */ + stw_be_p(buf + 18, 2048 + 4); + + /* Size of buffer, not including 2 byte size field */ + stw_be_p(buf, 16 + 2); + + /* data written + 4 byte header */ + return (16 + 4); + + default: /* TODO: formats beyond DVD-ROM requires */ + return -ASC_INV_FIELD_IN_CMD_PACKET; + } +} + +static unsigned int event_status_media(IDEState *s, + uint8_t *buf) +{ + uint8_t event_code, media_status; + + media_status = 0; + if (s->tray_open) { + media_status = MS_TRAY_OPEN; + } else if (blk_is_inserted(s->blk)) { + media_status = MS_MEDIA_PRESENT; + } + + /* Event notification descriptor */ + event_code = MEC_NO_CHANGE; + if (media_status != MS_TRAY_OPEN) { + if (s->events.new_media) { + event_code = MEC_NEW_MEDIA; + s->events.new_media = false; + } else if (s->events.eject_request) { + event_code = MEC_EJECT_REQUESTED; + s->events.eject_request = false; + } + } + + buf[4] = event_code; + buf[5] = media_status; + + /* These fields are reserved, just clear them. */ + buf[6] = 0; + buf[7] = 0; + + return 8; /* We wrote to 4 extra bytes from the header */ +} + +/* + * Before transferring data or otherwise signalling acceptance of a command + * marked CONDDATA, we must check the validity of the byte_count_limit. + */ +static bool validate_bcl(IDEState *s) +{ + /* TODO: Check IDENTIFY data word 125 for defacult BCL (currently 0) */ + if (s->atapi_dma || atapi_byte_count_limit(s)) { + return true; + } + + /* TODO: Move abort back into core.c and introduce proper error flow between + * ATAPI layer and IDE core layer */ + ide_abort_command(s); + return false; +} + +static void cmd_get_event_status_notification(IDEState *s, + uint8_t *buf) +{ + const uint8_t *packet = buf; + + struct { + uint8_t opcode; + uint8_t polled; /* lsb bit is polled; others are reserved */ + uint8_t reserved2[2]; + uint8_t class; + uint8_t reserved3[2]; + uint16_t len; + uint8_t control; + } QEMU_PACKED *gesn_cdb; + + struct { + uint16_t len; + uint8_t notification_class; + uint8_t supported_events; + } QEMU_PACKED *gesn_event_header; + unsigned int max_len, used_len; + + gesn_cdb = (void *)packet; + gesn_event_header = (void *)buf; + + max_len = be16_to_cpu(gesn_cdb->len); + + /* It is fine by the MMC spec to not support async mode operations */ + if (!(gesn_cdb->polled & 0x01)) { /* asynchronous mode */ + /* Only polling is supported, asynchronous mode is not. */ + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + + /* polling mode operation */ + + /* + * These are the supported events. + * + * We currently only support requests of the 'media' type. + * Notification class requests and supported event classes are bitmasks, + * but they are build from the same values as the "notification class" + * field. + */ + gesn_event_header->supported_events = 1 << GESN_MEDIA; + + /* + * We use |= below to set the class field; other bits in this byte + * are reserved now but this is useful to do if we have to use the + * reserved fields later. + */ + gesn_event_header->notification_class = 0; + + /* + * Responses to requests are to be based on request priority. The + * notification_class_request_type enum above specifies the + * priority: upper elements are higher prio than lower ones. + */ + if (gesn_cdb->class & (1 << GESN_MEDIA)) { + gesn_event_header->notification_class |= GESN_MEDIA; + used_len = event_status_media(s, buf); + } else { + gesn_event_header->notification_class = 0x80; /* No event available */ + used_len = sizeof(*gesn_event_header); + } + gesn_event_header->len = cpu_to_be16(used_len + - sizeof(*gesn_event_header)); + ide_atapi_cmd_reply(s, used_len, max_len); +} + +static void cmd_request_sense(IDEState *s, uint8_t *buf) +{ + int max_len = buf[4]; + + memset(buf, 0, 18); + buf[0] = 0x70 | (1 << 7); + buf[2] = s->sense_key; + buf[7] = 10; + buf[12] = s->asc; + + if (s->sense_key == UNIT_ATTENTION) { + s->sense_key = NO_SENSE; + } + + ide_atapi_cmd_reply(s, 18, max_len); +} + +static void cmd_inquiry(IDEState *s, uint8_t *buf) +{ + uint8_t page_code = buf[2]; + int max_len = buf[4]; + + unsigned idx = 0; + unsigned size_idx; + unsigned preamble_len; + + /* If the EVPD (Enable Vital Product Data) bit is set in byte 1, + * we are being asked for a specific page of info indicated by byte 2. */ + if (buf[1] & 0x01) { + preamble_len = 4; + size_idx = 3; + + buf[idx++] = 0x05; /* CD-ROM */ + buf[idx++] = page_code; /* Page Code */ + buf[idx++] = 0x00; /* reserved */ + idx++; /* length (set later) */ + + switch (page_code) { + case 0x00: + /* Supported Pages: List of supported VPD responses. */ + buf[idx++] = 0x00; /* 0x00: Supported Pages, and: */ + buf[idx++] = 0x83; /* 0x83: Device Identification. */ + break; + + case 0x83: + /* Device Identification. Each entry is optional, but the entries + * included here are modeled after libata's VPD responses. + * If the response is given, at least one entry must be present. */ + + /* Entry 1: Serial */ + if (idx + 24 > max_len) { + /* Not enough room for even the first entry: */ + /* 4 byte header + 20 byte string */ + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_DATA_PHASE_ERROR); + return; + } + buf[idx++] = 0x02; /* Ascii */ + buf[idx++] = 0x00; /* Vendor Specific */ + buf[idx++] = 0x00; + buf[idx++] = 20; /* Remaining length */ + padstr8(buf + idx, 20, s->drive_serial_str); + idx += 20; + + /* Entry 2: Drive Model and Serial */ + if (idx + 72 > max_len) { + /* 4 (header) + 8 (vendor) + 60 (model & serial) */ + goto out; + } + buf[idx++] = 0x02; /* Ascii */ + buf[idx++] = 0x01; /* T10 Vendor */ + buf[idx++] = 0x00; + buf[idx++] = 68; + padstr8(buf + idx, 8, "ATA"); /* Generic T10 vendor */ + idx += 8; + padstr8(buf + idx, 40, s->drive_model_str); + idx += 40; + padstr8(buf + idx, 20, s->drive_serial_str); + idx += 20; + + /* Entry 3: WWN */ + if (s->wwn && (idx + 12 <= max_len)) { + /* 4 byte header + 8 byte wwn */ + buf[idx++] = 0x01; /* Binary */ + buf[idx++] = 0x03; /* NAA */ + buf[idx++] = 0x00; + buf[idx++] = 0x08; + stq_be_p(&buf[idx], s->wwn); + idx += 8; + } + break; + + default: + /* SPC-3, revision 23 sec. 6.4 */ + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + } else { + preamble_len = 5; + size_idx = 4; + + buf[0] = 0x05; /* CD-ROM */ + buf[1] = 0x80; /* removable */ + buf[2] = 0x00; /* ISO */ + buf[3] = 0x21; /* ATAPI-2 (XXX: put ATAPI-4 ?) */ + /* buf[size_idx] set below. */ + buf[5] = 0; /* reserved */ + buf[6] = 0; /* reserved */ + buf[7] = 0; /* reserved */ + padstr8(buf + 8, 8, "QEMU"); + padstr8(buf + 16, 16, "QEMU DVD-ROM"); + padstr8(buf + 32, 4, s->version); + idx = 36; + } + + out: + buf[size_idx] = idx - preamble_len; + ide_atapi_cmd_reply(s, idx, max_len); +} + +static void cmd_get_configuration(IDEState *s, uint8_t *buf) +{ + uint32_t len; + uint8_t index = 0; + int max_len; + + /* only feature 0 is supported */ + if (buf[2] != 0 || buf[3] != 0) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + + /* XXX: could result in alignment problems in some architectures */ + max_len = lduw_be_p(buf + 7); + + /* + * XXX: avoid overflow for io_buffer if max_len is bigger than + * the size of that buffer (dimensioned to max number of + * sectors to transfer at once) + * + * Only a problem if the feature/profiles grow. + */ + if (max_len > BDRV_SECTOR_SIZE) { + /* XXX: assume 1 sector */ + max_len = BDRV_SECTOR_SIZE; + } + + memset(buf, 0, max_len); + /* + * the number of sectors from the media tells us which profile + * to use as current. 0 means there is no media + */ + if (media_is_dvd(s)) { + stw_be_p(buf + 6, MMC_PROFILE_DVD_ROM); + } else if (media_is_cd(s)) { + stw_be_p(buf + 6, MMC_PROFILE_CD_ROM); + } + + buf[10] = 0x02 | 0x01; /* persistent and current */ + len = 12; /* headers: 8 + 4 */ + len += ide_atapi_set_profile(buf, &index, MMC_PROFILE_DVD_ROM); + len += ide_atapi_set_profile(buf, &index, MMC_PROFILE_CD_ROM); + stl_be_p(buf, len - 4); /* data length */ + + ide_atapi_cmd_reply(s, len, max_len); +} + +static void cmd_mode_sense(IDEState *s, uint8_t *buf) +{ + int action, code; + int max_len; + + max_len = lduw_be_p(buf + 7); + action = buf[2] >> 6; + code = buf[2] & 0x3f; + + switch(action) { + case 0: /* current values */ + switch(code) { + case MODE_PAGE_R_W_ERROR: /* error recovery */ + stw_be_p(&buf[0], 16 - 2); + buf[2] = 0x70; + buf[3] = 0; + buf[4] = 0; + buf[5] = 0; + buf[6] = 0; + buf[7] = 0; + + buf[8] = MODE_PAGE_R_W_ERROR; + buf[9] = 16 - 10; + buf[10] = 0x00; + buf[11] = 0x05; + buf[12] = 0x00; + buf[13] = 0x00; + buf[14] = 0x00; + buf[15] = 0x00; + ide_atapi_cmd_reply(s, 16, max_len); + break; + case MODE_PAGE_AUDIO_CTL: + stw_be_p(&buf[0], 24 - 2); + buf[2] = 0x70; + buf[3] = 0; + buf[4] = 0; + buf[5] = 0; + buf[6] = 0; + buf[7] = 0; + + buf[8] = MODE_PAGE_AUDIO_CTL; + buf[9] = 24 - 10; + /* Fill with CDROM audio volume */ + buf[17] = 0; + buf[19] = 0; + buf[21] = 0; + buf[23] = 0; + + ide_atapi_cmd_reply(s, 24, max_len); + break; + case MODE_PAGE_CAPABILITIES: + stw_be_p(&buf[0], 30 - 2); + buf[2] = 0x70; + buf[3] = 0; + buf[4] = 0; + buf[5] = 0; + buf[6] = 0; + buf[7] = 0; + + buf[8] = MODE_PAGE_CAPABILITIES; + buf[9] = 30 - 10; + buf[10] = 0x3b; /* read CDR/CDRW/DVDROM/DVDR/DVDRAM */ + buf[11] = 0x00; + + /* Claim PLAY_AUDIO capability (0x01) since some Linux + code checks for this to automount media. */ + buf[12] = 0x71; + buf[13] = 3 << 5; + buf[14] = (1 << 0) | (1 << 3) | (1 << 5); + if (s->tray_locked) { + buf[14] |= 1 << 1; + } + buf[15] = 0x00; /* No volume & mute control, no changer */ + stw_be_p(&buf[16], 704); /* 4x read speed */ + buf[18] = 0; /* Two volume levels */ + buf[19] = 2; + stw_be_p(&buf[20], 512); /* 512k buffer */ + stw_be_p(&buf[22], 704); /* 4x read speed current */ + buf[24] = 0; + buf[25] = 0; + buf[26] = 0; + buf[27] = 0; + buf[28] = 0; + buf[29] = 0; + ide_atapi_cmd_reply(s, 30, max_len); + break; + default: + goto error_cmd; + } + break; + case 1: /* changeable values */ + goto error_cmd; + case 2: /* default values */ + goto error_cmd; + default: + case 3: /* saved values */ + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_SAVING_PARAMETERS_NOT_SUPPORTED); + break; + } + return; + +error_cmd: + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_INV_FIELD_IN_CMD_PACKET); +} + +static void cmd_test_unit_ready(IDEState *s, uint8_t *buf) +{ + /* Not Ready Conditions are already handled in ide_atapi_cmd(), so if we + * come here, we know that it's ready. */ + ide_atapi_cmd_ok(s); +} + +static void cmd_prevent_allow_medium_removal(IDEState *s, uint8_t* buf) +{ + s->tray_locked = buf[4] & 1; + blk_lock_medium(s->blk, buf[4] & 1); + ide_atapi_cmd_ok(s); +} + +static void cmd_read(IDEState *s, uint8_t* buf) +{ + unsigned int nb_sectors, lba; + + /* Total logical sectors of ATAPI_SECTOR_SIZE(=2048) bytes */ + uint64_t total_sectors = s->nb_sectors >> 2; + + if (buf[0] == GPCMD_READ_10) { + nb_sectors = lduw_be_p(buf + 7); + } else { + nb_sectors = ldl_be_p(buf + 6); + } + if (nb_sectors == 0) { + ide_atapi_cmd_ok(s); + return; + } + + lba = ldl_be_p(buf + 2); + if (lba >= total_sectors || lba + nb_sectors - 1 >= total_sectors) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_LOGICAL_BLOCK_OOR); + return; + } + + ide_atapi_cmd_read(s, lba, nb_sectors, 2048); +} + +static void cmd_read_cd(IDEState *s, uint8_t* buf) +{ + unsigned int nb_sectors, lba, transfer_request; + + /* Total logical sectors of ATAPI_SECTOR_SIZE(=2048) bytes */ + uint64_t total_sectors = s->nb_sectors >> 2; + + nb_sectors = (buf[6] << 16) | (buf[7] << 8) | buf[8]; + if (nb_sectors == 0) { + ide_atapi_cmd_ok(s); + return; + } + + lba = ldl_be_p(buf + 2); + if (lba >= total_sectors || lba + nb_sectors - 1 >= total_sectors) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_LOGICAL_BLOCK_OOR); + return; + } + + transfer_request = buf[9] & 0xf8; + if (transfer_request == 0x00) { + /* nothing */ + ide_atapi_cmd_ok(s); + return; + } + + /* Check validity of BCL before transferring data */ + if (!validate_bcl(s)) { + return; + } + + switch (transfer_request) { + case 0x10: + /* normal read */ + ide_atapi_cmd_read(s, lba, nb_sectors, 2048); + break; + case 0xf8: + /* read all data */ + ide_atapi_cmd_read(s, lba, nb_sectors, 2352); + break; + default: + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + break; + } +} + +static void cmd_seek(IDEState *s, uint8_t* buf) +{ + unsigned int lba; + uint64_t total_sectors = s->nb_sectors >> 2; + + lba = ldl_be_p(buf + 2); + if (lba >= total_sectors) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_LOGICAL_BLOCK_OOR); + return; + } + + ide_atapi_cmd_ok(s); +} + +static void cmd_start_stop_unit(IDEState *s, uint8_t* buf) +{ + int sense; + bool start = buf[4] & 1; + bool loej = buf[4] & 2; /* load on start, eject on !start */ + int pwrcnd = buf[4] & 0xf0; + + if (pwrcnd) { + /* eject/load only happens for power condition == 0 */ + ide_atapi_cmd_ok(s); + return; + } + + if (loej) { + if (!start && !s->tray_open && s->tray_locked) { + sense = blk_is_inserted(s->blk) + ? NOT_READY : ILLEGAL_REQUEST; + ide_atapi_cmd_error(s, sense, ASC_MEDIA_REMOVAL_PREVENTED); + return; + } + + if (s->tray_open != !start) { + blk_eject(s->blk, !start); + s->tray_open = !start; + } + } + + ide_atapi_cmd_ok(s); +} + +static void cmd_mechanism_status(IDEState *s, uint8_t* buf) +{ + int max_len = lduw_be_p(buf + 8); + + stw_be_p(buf, 0); + /* no current LBA */ + buf[2] = 0; + buf[3] = 0; + buf[4] = 0; + buf[5] = 1; + stw_be_p(buf + 6, 0); + ide_atapi_cmd_reply(s, 8, max_len); +} + +static void cmd_read_toc_pma_atip(IDEState *s, uint8_t* buf) +{ + int format, msf, start_track, len; + int max_len; + uint64_t total_sectors = s->nb_sectors >> 2; + + max_len = lduw_be_p(buf + 7); + format = buf[9] >> 6; + msf = (buf[1] >> 1) & 1; + start_track = buf[6]; + + switch(format) { + case 0: + len = cdrom_read_toc(total_sectors, buf, msf, start_track); + if (len < 0) + goto error_cmd; + ide_atapi_cmd_reply(s, len, max_len); + break; + case 1: + /* multi session : only a single session defined */ + memset(buf, 0, 12); + buf[1] = 0x0a; + buf[2] = 0x01; + buf[3] = 0x01; + ide_atapi_cmd_reply(s, 12, max_len); + break; + case 2: + len = cdrom_read_toc_raw(total_sectors, buf, msf, start_track); + if (len < 0) + goto error_cmd; + ide_atapi_cmd_reply(s, len, max_len); + break; + default: + error_cmd: + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + } +} + +static void cmd_read_cdvd_capacity(IDEState *s, uint8_t* buf) +{ + uint64_t total_sectors = s->nb_sectors >> 2; + + /* NOTE: it is really the number of sectors minus 1 */ + stl_be_p(buf, total_sectors - 1); + stl_be_p(buf + 4, 2048); + ide_atapi_cmd_reply(s, 8, 8); +} + +static void cmd_read_disc_information(IDEState *s, uint8_t* buf) +{ + uint8_t type = buf[1] & 7; + uint32_t max_len = lduw_be_p(buf + 7); + + /* Types 1/2 are only defined for Blu-Ray. */ + if (type != 0) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + + memset(buf, 0, 34); + buf[1] = 32; + buf[2] = 0xe; /* last session complete, disc finalized */ + buf[3] = 1; /* first track on disc */ + buf[4] = 1; /* # of sessions */ + buf[5] = 1; /* first track of last session */ + buf[6] = 1; /* last track of last session */ + buf[7] = 0x20; /* unrestricted use */ + buf[8] = 0x00; /* CD-ROM or DVD-ROM */ + /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ + /* 12-23: not meaningful for CD-ROM or DVD-ROM */ + /* 24-31: disc bar code */ + /* 32: disc application code */ + /* 33: number of OPC tables */ + + ide_atapi_cmd_reply(s, 34, max_len); +} + +static void cmd_read_dvd_structure(IDEState *s, uint8_t* buf) +{ + int max_len; + int media = buf[1]; + int format = buf[7]; + int ret; + + max_len = lduw_be_p(buf + 8); + + if (format < 0xff) { + if (media_is_cd(s)) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INCOMPATIBLE_FORMAT); + return; + } else if (!media_present(s)) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + } + + memset(buf, 0, max_len > IDE_DMA_BUF_SECTORS * BDRV_SECTOR_SIZE + 4 ? + IDE_DMA_BUF_SECTORS * BDRV_SECTOR_SIZE + 4 : max_len); + + switch (format) { + case 0x00 ... 0x7f: + case 0xff: + if (media == 0) { + ret = ide_dvd_read_structure(s, format, buf, buf); + + if (ret < 0) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, -ret); + } else { + ide_atapi_cmd_reply(s, ret, max_len); + } + + break; + } + /* TODO: BD support, fall through for now */ + + /* Generic disk structures */ + case 0x80: /* TODO: AACS volume identifier */ + case 0x81: /* TODO: AACS media serial number */ + case 0x82: /* TODO: AACS media identifier */ + case 0x83: /* TODO: AACS media key block */ + case 0x90: /* TODO: List of recognized format layers */ + case 0xc0: /* TODO: Write protection status */ + default: + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + break; + } +} + +static void cmd_set_speed(IDEState *s, uint8_t* buf) +{ + ide_atapi_cmd_ok(s); +} + +enum { + /* + * Only commands flagged as ALLOW_UA are allowed to run under a + * unit attention condition. (See MMC-5, section 4.1.6.1) + */ + ALLOW_UA = 0x01, + + /* + * Commands flagged with CHECK_READY can only execute if a medium is present. + * Otherwise they report the Not Ready Condition. (See MMC-5, section + * 4.1.8) + */ + CHECK_READY = 0x02, + + /* + * Commands flagged with NONDATA do not in any circumstances return + * any data via ide_atapi_cmd_reply. These commands are exempt from + * the normal byte_count_limit constraints. + * See ATA8-ACS3 "7.21.5 Byte Count Limit" + */ + NONDATA = 0x04, + + /* + * CONDDATA implies a command that transfers data only conditionally based + * on the presence of suboptions. It should be exempt from the BCL check at + * command validation time, but it needs to be checked at the command + * handler level instead. + */ + CONDDATA = 0x08, +}; + +static const struct AtapiCmd { + void (*handler)(IDEState *s, uint8_t *buf); + int flags; +} atapi_cmd_table[0x100] = { + [ 0x00 ] = { cmd_test_unit_ready, CHECK_READY | NONDATA }, + [ 0x03 ] = { cmd_request_sense, ALLOW_UA }, + [ 0x12 ] = { cmd_inquiry, ALLOW_UA }, + [ 0x1b ] = { cmd_start_stop_unit, NONDATA }, /* [1] */ + [ 0x1e ] = { cmd_prevent_allow_medium_removal, NONDATA }, + [ 0x25 ] = { cmd_read_cdvd_capacity, CHECK_READY }, + [ 0x28 ] = { cmd_read, /* (10) */ CHECK_READY }, + [ 0x2b ] = { cmd_seek, CHECK_READY | NONDATA }, + [ 0x43 ] = { cmd_read_toc_pma_atip, CHECK_READY }, + [ 0x46 ] = { cmd_get_configuration, ALLOW_UA }, + [ 0x4a ] = { cmd_get_event_status_notification, ALLOW_UA }, + [ 0x51 ] = { cmd_read_disc_information, CHECK_READY }, + [ 0x5a ] = { cmd_mode_sense, /* (10) */ 0 }, + [ 0xa8 ] = { cmd_read, /* (12) */ CHECK_READY }, + [ 0xad ] = { cmd_read_dvd_structure, CHECK_READY }, + [ 0xbb ] = { cmd_set_speed, NONDATA }, + [ 0xbd ] = { cmd_mechanism_status, 0 }, + [ 0xbe ] = { cmd_read_cd, CHECK_READY | CONDDATA }, + /* [1] handler detects and reports not ready condition itself */ +}; + +void ide_atapi_cmd(IDEState *s) +{ + uint8_t *buf = s->io_buffer; + const struct AtapiCmd *cmd = &atapi_cmd_table[s->io_buffer[0]]; + + trace_ide_atapi_cmd(s, s->io_buffer[0]); + + if (trace_event_get_state_backends(TRACE_IDE_ATAPI_CMD_PACKET)) { + /* Each pretty-printed byte needs two bytes and a space; */ + char *ppacket = g_malloc(ATAPI_PACKET_SIZE * 3 + 1); + int i; + for (i = 0; i < ATAPI_PACKET_SIZE; i++) { + sprintf(ppacket + (i * 3), "%02x ", buf[i]); + } + trace_ide_atapi_cmd_packet(s, s->lcyl | (s->hcyl << 8), ppacket); + g_free(ppacket); + } + + /* + * If there's a UNIT_ATTENTION condition pending, only command flagged with + * ALLOW_UA are allowed to complete. with other commands getting a CHECK + * condition response unless a higher priority status, defined by the drive + * here, is pending. + */ + if (s->sense_key == UNIT_ATTENTION && !(cmd->flags & ALLOW_UA)) { + ide_atapi_cmd_check_status(s); + return; + } + /* + * When a CD gets changed, we have to report an ejected state and + * then a loaded state to guests so that they detect tray + * open/close and media change events. Guests that do not use + * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close + * states rely on this behavior. + */ + if (!(cmd->flags & ALLOW_UA) && + !s->tray_open && blk_is_inserted(s->blk) && s->cdrom_changed) { + + if (s->cdrom_changed == 1) { + ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT); + s->cdrom_changed = 2; + } else { + ide_atapi_cmd_error(s, UNIT_ATTENTION, ASC_MEDIUM_MAY_HAVE_CHANGED); + s->cdrom_changed = 0; + } + + return; + } + + /* Report a Not Ready condition if appropriate for the command */ + if ((cmd->flags & CHECK_READY) && + (!media_present(s) || !blk_is_inserted(s->blk))) + { + ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT); + return; + } + + /* Commands that don't transfer DATA permit the byte_count_limit to be 0. + * If this is a data-transferring PIO command and BCL is 0, + * we abort at the /ATA/ level, not the ATAPI level. + * See ATA8 ACS3 section 7.17.6.49 and 7.21.5 */ + if (cmd->handler && !(cmd->flags & (NONDATA | CONDDATA))) { + if (!validate_bcl(s)) { + return; + } + } + + /* Execute the command */ + if (cmd->handler) { + cmd->handler(s, buf); + return; + } + + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_ILLEGAL_OPCODE); +} diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c new file mode 100644 index 000000000..94c576262 --- /dev/null +++ b/hw/ide/cmd646.c @@ -0,0 +1,351 @@ +/* + * QEMU IDE Emulation: PCI cmd646 support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "hw/isa/isa.h" +#include "sysemu/dma.h" +#include "sysemu/reset.h" + +#include "hw/ide/pci.h" +#include "trace.h" + +/* CMD646 specific */ +#define CFR 0x50 +#define CFR_INTR_CH0 0x04 +#define CNTRL 0x51 +#define CNTRL_EN_CH0 0x04 +#define CNTRL_EN_CH1 0x08 +#define ARTTIM23 0x57 +#define ARTTIM23_INTR_CH1 0x10 +#define MRDMODE 0x71 +#define MRDMODE_INTR_CH0 0x04 +#define MRDMODE_INTR_CH1 0x08 +#define MRDMODE_BLK_CH0 0x10 +#define MRDMODE_BLK_CH1 0x20 +#define UDIDETCR0 0x73 +#define UDIDETCR1 0x7B + +static void cmd646_update_irq(PCIDevice *pd); + +static void cmd646_update_dma_interrupts(PCIDevice *pd) +{ + /* Sync DMA interrupt status from UDMA interrupt status */ + if (pd->config[MRDMODE] & MRDMODE_INTR_CH0) { + pd->config[CFR] |= CFR_INTR_CH0; + } else { + pd->config[CFR] &= ~CFR_INTR_CH0; + } + + if (pd->config[MRDMODE] & MRDMODE_INTR_CH1) { + pd->config[ARTTIM23] |= ARTTIM23_INTR_CH1; + } else { + pd->config[ARTTIM23] &= ~ARTTIM23_INTR_CH1; + } +} + +static void cmd646_update_udma_interrupts(PCIDevice *pd) +{ + /* Sync UDMA interrupt status from DMA interrupt status */ + if (pd->config[CFR] & CFR_INTR_CH0) { + pd->config[MRDMODE] |= MRDMODE_INTR_CH0; + } else { + pd->config[MRDMODE] &= ~MRDMODE_INTR_CH0; + } + + if (pd->config[ARTTIM23] & ARTTIM23_INTR_CH1) { + pd->config[MRDMODE] |= MRDMODE_INTR_CH1; + } else { + pd->config[MRDMODE] &= ~MRDMODE_INTR_CH1; + } +} + +static uint64_t bmdma_read(void *opaque, hwaddr addr, + unsigned size) +{ + BMDMAState *bm = opaque; + PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); + uint32_t val; + + if (size != 1) { + return ((uint64_t)1 << (size * 8)) - 1; + } + + switch(addr & 3) { + case 0: + val = bm->cmd; + break; + case 1: + val = pci_dev->config[MRDMODE]; + break; + case 2: + val = bm->status; + break; + case 3: + if (bm == &bm->pci_dev->bmdma[0]) { + val = pci_dev->config[UDIDETCR0]; + } else { + val = pci_dev->config[UDIDETCR1]; + } + break; + default: + val = 0xff; + break; + } + + trace_bmdma_read_cmd646(addr, val); + return val; +} + +static void bmdma_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + BMDMAState *bm = opaque; + PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); + + if (size != 1) { + return; + } + + trace_bmdma_write_cmd646(addr, val); + switch(addr & 3) { + case 0: + bmdma_cmd_writeb(bm, val); + break; + case 1: + pci_dev->config[MRDMODE] = + (pci_dev->config[MRDMODE] & ~0x30) | (val & 0x30); + cmd646_update_dma_interrupts(pci_dev); + cmd646_update_irq(pci_dev); + break; + case 2: + bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06); + break; + case 3: + if (bm == &bm->pci_dev->bmdma[0]) { + pci_dev->config[UDIDETCR0] = val; + } else { + pci_dev->config[UDIDETCR1] = val; + } + break; + } +} + +static const MemoryRegionOps cmd646_bmdma_ops = { + .read = bmdma_read, + .write = bmdma_write, +}; + +static void bmdma_setup_bar(PCIIDEState *d) +{ + BMDMAState *bm; + int i; + + memory_region_init(&d->bmdma_bar, OBJECT(d), "cmd646-bmdma", 16); + for(i = 0;i < 2; i++) { + bm = &d->bmdma[i]; + memory_region_init_io(&bm->extra_io, OBJECT(d), &cmd646_bmdma_ops, bm, + "cmd646-bmdma-bus", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8, &bm->extra_io); + memory_region_init_io(&bm->addr_ioport, OBJECT(d), + &bmdma_addr_ioport_ops, bm, + "cmd646-bmdma-ioport", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8 + 4, &bm->addr_ioport); + } +} + +static void cmd646_update_irq(PCIDevice *pd) +{ + int pci_level; + + pci_level = ((pd->config[MRDMODE] & MRDMODE_INTR_CH0) && + !(pd->config[MRDMODE] & MRDMODE_BLK_CH0)) || + ((pd->config[MRDMODE] & MRDMODE_INTR_CH1) && + !(pd->config[MRDMODE] & MRDMODE_BLK_CH1)); + pci_set_irq(pd, pci_level); +} + +/* the PCI irq level is the logical OR of the two channels */ +static void cmd646_set_irq(void *opaque, int channel, int level) +{ + PCIIDEState *d = opaque; + PCIDevice *pd = PCI_DEVICE(d); + int irq_mask; + + irq_mask = MRDMODE_INTR_CH0 << channel; + if (level) { + pd->config[MRDMODE] |= irq_mask; + } else { + pd->config[MRDMODE] &= ~irq_mask; + } + cmd646_update_dma_interrupts(pd); + cmd646_update_irq(pd); +} + +static void cmd646_reset(DeviceState *dev) +{ + PCIIDEState *d = PCI_IDE(dev); + unsigned int i; + + for (i = 0; i < 2; i++) { + ide_bus_reset(&d->bus[i]); + } +} + +static uint32_t cmd646_pci_config_read(PCIDevice *d, + uint32_t address, int len) +{ + return pci_default_read_config(d, address, len); +} + +static void cmd646_pci_config_write(PCIDevice *d, uint32_t addr, uint32_t val, + int l) +{ + uint32_t i; + + pci_default_write_config(d, addr, val, l); + + for (i = addr; i < addr + l; i++) { + switch (i) { + case CFR: + case ARTTIM23: + cmd646_update_udma_interrupts(d); + break; + case MRDMODE: + cmd646_update_dma_interrupts(d); + break; + } + } + + cmd646_update_irq(d); +} + +/* CMD646 PCI IDE controller */ +static void pci_cmd646_ide_realize(PCIDevice *dev, Error **errp) +{ + PCIIDEState *d = PCI_IDE(dev); + DeviceState *ds = DEVICE(dev); + uint8_t *pci_conf = dev->config; + int i; + + pci_conf[PCI_CLASS_PROG] = 0x8f; + + pci_conf[CNTRL] = CNTRL_EN_CH0; // enable IDE0 + if (d->secondary) { + /* XXX: if not enabled, really disable the seconday IDE controller */ + pci_conf[CNTRL] |= CNTRL_EN_CH1; /* enable IDE1 */ + } + + /* Set write-to-clear interrupt bits */ + dev->wmask[CFR] = 0x0; + dev->w1cmask[CFR] = CFR_INTR_CH0; + dev->wmask[ARTTIM23] = 0x0; + dev->w1cmask[ARTTIM23] = ARTTIM23_INTR_CH1; + dev->wmask[MRDMODE] = 0x0; + dev->w1cmask[MRDMODE] = MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1; + + memory_region_init_io(&d->data_bar[0], OBJECT(d), &pci_ide_data_le_ops, + &d->bus[0], "cmd646-data0", 8); + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->data_bar[0]); + + memory_region_init_io(&d->cmd_bar[0], OBJECT(d), &pci_ide_cmd_le_ops, + &d->bus[0], "cmd646-cmd0", 4); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd_bar[0]); + + memory_region_init_io(&d->data_bar[1], OBJECT(d), &pci_ide_data_le_ops, + &d->bus[1], "cmd646-data1", 8); + pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &d->data_bar[1]); + + memory_region_init_io(&d->cmd_bar[1], OBJECT(d), &pci_ide_cmd_le_ops, + &d->bus[1], "cmd646-cmd1", 4); + pci_register_bar(dev, 3, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd_bar[1]); + + bmdma_setup_bar(d); + pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar); + + /* TODO: RST# value should be 0 */ + pci_conf[PCI_INTERRUPT_PIN] = 0x01; // interrupt on pin 1 + + qdev_init_gpio_in(ds, cmd646_set_irq, 2); + for (i = 0; i < 2; i++) { + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); + ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i)); + + bmdma_init(&d->bus[i], &d->bmdma[i], d); + d->bmdma[i].bus = &d->bus[i]; + ide_register_restart_cb(&d->bus[i]); + } +} + +static void pci_cmd646_ide_exitfn(PCIDevice *dev) +{ + PCIIDEState *d = PCI_IDE(dev); + unsigned i; + + for (i = 0; i < 2; ++i) { + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io); + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport); + } +} + +static Property cmd646_ide_properties[] = { + DEFINE_PROP_UINT32("secondary", PCIIDEState, secondary, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void cmd646_ide_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + dc->reset = cmd646_reset; + dc->vmsd = &vmstate_ide_pci; + k->realize = pci_cmd646_ide_realize; + k->exit = pci_cmd646_ide_exitfn; + k->vendor_id = PCI_VENDOR_ID_CMD; + k->device_id = PCI_DEVICE_ID_CMD_646; + k->revision = 0x07; + k->class_id = PCI_CLASS_STORAGE_IDE; + k->config_read = cmd646_pci_config_read; + k->config_write = cmd646_pci_config_write; + device_class_set_props(dc, cmd646_ide_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo cmd646_ide_info = { + .name = "cmd646-ide", + .parent = TYPE_PCI_IDE, + .class_init = cmd646_ide_class_init, +}; + +static void cmd646_ide_register_types(void) +{ + type_register_static(&cmd646_ide_info); +} + +type_init(cmd646_ide_register_types) diff --git a/hw/ide/core.c b/hw/ide/core.c new file mode 100644 index 000000000..e28f8aad6 --- /dev/null +++ b/hw/ide/core.c @@ -0,0 +1,2946 @@ +/* + * QEMU IDE disk and CD/DVD-ROM Emulator + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "sysemu/blockdev.h" +#include "sysemu/dma.h" +#include "hw/block/block.h" +#include "sysemu/block-backend.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "sysemu/replay.h" +#include "sysemu/runstate.h" +#include "hw/ide/internal.h" +#include "trace.h" + +/* These values were based on a Seagate ST3500418AS but have been modified + to make more sense in QEMU */ +static const int smart_attributes[][12] = { + /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */ + /* raw read error rate*/ + { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}, + /* spin up */ + { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + /* start stop count */ + { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14}, + /* remapped sectors */ + { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24}, + /* power on hours */ + { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + /* power cycle count */ + { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + /* airflow-temperature-celsius */ + { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32}, +}; + +const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = { + [IDE_DMA_READ] = "DMA READ", + [IDE_DMA_WRITE] = "DMA WRITE", + [IDE_DMA_TRIM] = "DMA TRIM", + [IDE_DMA_ATAPI] = "DMA ATAPI" +}; + +static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval) +{ + if ((unsigned)enval < IDE_DMA__COUNT) { + return IDE_DMA_CMD_lookup[enval]; + } + return "DMA UNKNOWN CMD"; +} + +static void ide_dummy_transfer_stop(IDEState *s); + +static void padstr(char *str, const char *src, int len) +{ + int i, v; + for(i = 0; i < len; i++) { + if (*src) + v = *src++; + else + v = ' '; + str[i^1] = v; + } +} + +static void put_le16(uint16_t *p, unsigned int v) +{ + *p = cpu_to_le16(v); +} + +static void ide_identify_size(IDEState *s) +{ + uint16_t *p = (uint16_t *)s->identify_data; + int64_t nb_sectors_lba28 = s->nb_sectors; + if (nb_sectors_lba28 >= 1 << 28) { + nb_sectors_lba28 = (1 << 28) - 1; + } + put_le16(p + 60, nb_sectors_lba28); + put_le16(p + 61, nb_sectors_lba28 >> 16); + put_le16(p + 100, s->nb_sectors); + put_le16(p + 101, s->nb_sectors >> 16); + put_le16(p + 102, s->nb_sectors >> 32); + put_le16(p + 103, s->nb_sectors >> 48); +} + +static void ide_identify(IDEState *s) +{ + uint16_t *p; + unsigned int oldsize; + IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master; + + p = (uint16_t *)s->identify_data; + if (s->identify_set) { + goto fill_buffer; + } + memset(p, 0, sizeof(s->identify_data)); + + put_le16(p + 0, 0x0040); + put_le16(p + 1, s->cylinders); + put_le16(p + 3, s->heads); + put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */ + put_le16(p + 5, 512); /* XXX: retired, remove ? */ + put_le16(p + 6, s->sectors); + padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ + put_le16(p + 20, 3); /* XXX: retired, remove ? */ + put_le16(p + 21, 512); /* cache size in sectors */ + put_le16(p + 22, 4); /* ecc bytes */ + padstr((char *)(p + 23), s->version, 8); /* firmware version */ + padstr((char *)(p + 27), s->drive_model_str, 40); /* model */ +#if MAX_MULT_SECTORS > 1 + put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS); +#endif + put_le16(p + 48, 1); /* dword I/O */ + put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */ + put_le16(p + 51, 0x200); /* PIO transfer cycle */ + put_le16(p + 52, 0x200); /* DMA transfer cycle */ + put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */ + put_le16(p + 54, s->cylinders); + put_le16(p + 55, s->heads); + put_le16(p + 56, s->sectors); + oldsize = s->cylinders * s->heads * s->sectors; + put_le16(p + 57, oldsize); + put_le16(p + 58, oldsize >> 16); + if (s->mult_sectors) + put_le16(p + 59, 0x100 | s->mult_sectors); + /* *(p + 60) := nb_sectors -- see ide_identify_size */ + /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */ + put_le16(p + 62, 0x07); /* single word dma0-2 supported */ + put_le16(p + 63, 0x07); /* mdma0-2 supported */ + put_le16(p + 64, 0x03); /* pio3-4 supported */ + put_le16(p + 65, 120); + put_le16(p + 66, 120); + put_le16(p + 67, 120); + put_le16(p + 68, 120); + if (dev && dev->conf.discard_granularity) { + put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */ + } + + if (s->ncq_queues) { + put_le16(p + 75, s->ncq_queues - 1); + /* NCQ supported */ + put_le16(p + 76, (1 << 8)); + } + + put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */ + put_le16(p + 81, 0x16); /* conforms to ata5 */ + /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */ + put_le16(p + 82, (1 << 14) | (1 << 5) | 1); + /* 13=flush_cache_ext,12=flush_cache,10=lba48 */ + put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10)); + /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */ + if (s->wwn) { + put_le16(p + 84, (1 << 14) | (1 << 8) | 0); + } else { + put_le16(p + 84, (1 << 14) | 0); + } + /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */ + if (blk_enable_write_cache(s->blk)) { + put_le16(p + 85, (1 << 14) | (1 << 5) | 1); + } else { + put_le16(p + 85, (1 << 14) | 1); + } + /* 13=flush_cache_ext,12=flush_cache,10=lba48 */ + put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10)); + /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */ + if (s->wwn) { + put_le16(p + 87, (1 << 14) | (1 << 8) | 0); + } else { + put_le16(p + 87, (1 << 14) | 0); + } + put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */ + put_le16(p + 93, 1 | (1 << 14) | 0x2000); + /* *(p + 100) := nb_sectors -- see ide_identify_size */ + /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */ + /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */ + /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */ + + if (dev && dev->conf.physical_block_size) + put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf)); + if (s->wwn) { + /* LE 16-bit words 111-108 contain 64-bit World Wide Name */ + put_le16(p + 108, s->wwn >> 48); + put_le16(p + 109, s->wwn >> 32); + put_le16(p + 110, s->wwn >> 16); + put_le16(p + 111, s->wwn); + } + if (dev && dev->conf.discard_granularity) { + put_le16(p + 169, 1); /* TRIM support */ + } + if (dev) { + put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */ + } + + ide_identify_size(s); + s->identify_set = 1; + +fill_buffer: + memcpy(s->io_buffer, p, sizeof(s->identify_data)); +} + +static void ide_atapi_identify(IDEState *s) +{ + uint16_t *p; + + p = (uint16_t *)s->identify_data; + if (s->identify_set) { + goto fill_buffer; + } + memset(p, 0, sizeof(s->identify_data)); + + /* Removable CDROM, 50us response, 12 byte packets */ + put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0)); + padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ + put_le16(p + 20, 3); /* buffer type */ + put_le16(p + 21, 512); /* cache size in sectors */ + put_le16(p + 22, 4); /* ecc bytes */ + padstr((char *)(p + 23), s->version, 8); /* firmware version */ + padstr((char *)(p + 27), s->drive_model_str, 40); /* model */ + put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */ +#ifdef USE_DMA_CDROM + put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */ + put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */ + put_le16(p + 62, 7); /* single word dma0-2 supported */ + put_le16(p + 63, 7); /* mdma0-2 supported */ +#else + put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */ + put_le16(p + 53, 3); /* words 64-70, 54-58 valid */ + put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */ +#endif + put_le16(p + 64, 3); /* pio3-4 supported */ + put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */ + put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */ + put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */ + put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */ + + put_le16(p + 71, 30); /* in ns */ + put_le16(p + 72, 30); /* in ns */ + + if (s->ncq_queues) { + put_le16(p + 75, s->ncq_queues - 1); + /* NCQ supported */ + put_le16(p + 76, (1 << 8)); + } + + put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */ + if (s->wwn) { + put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */ + put_le16(p + 87, (1 << 8)); /* WWN enabled */ + } + +#ifdef USE_DMA_CDROM + put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */ +#endif + + if (s->wwn) { + /* LE 16-bit words 111-108 contain 64-bit World Wide Name */ + put_le16(p + 108, s->wwn >> 48); + put_le16(p + 109, s->wwn >> 32); + put_le16(p + 110, s->wwn >> 16); + put_le16(p + 111, s->wwn); + } + + s->identify_set = 1; + +fill_buffer: + memcpy(s->io_buffer, p, sizeof(s->identify_data)); +} + +static void ide_cfata_identify_size(IDEState *s) +{ + uint16_t *p = (uint16_t *)s->identify_data; + put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */ + put_le16(p + 8, s->nb_sectors); /* Sectors per card */ + put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */ + put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */ +} + +static void ide_cfata_identify(IDEState *s) +{ + uint16_t *p; + uint32_t cur_sec; + + p = (uint16_t *)s->identify_data; + if (s->identify_set) { + goto fill_buffer; + } + memset(p, 0, sizeof(s->identify_data)); + + cur_sec = s->cylinders * s->heads * s->sectors; + + put_le16(p + 0, 0x848a); /* CF Storage Card signature */ + put_le16(p + 1, s->cylinders); /* Default cylinders */ + put_le16(p + 3, s->heads); /* Default heads */ + put_le16(p + 6, s->sectors); /* Default sectors per track */ + /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */ + /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */ + padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ + put_le16(p + 22, 0x0004); /* ECC bytes */ + padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */ + padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */ +#if MAX_MULT_SECTORS > 1 + put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS); +#else + put_le16(p + 47, 0x0000); +#endif + put_le16(p + 49, 0x0f00); /* Capabilities */ + put_le16(p + 51, 0x0002); /* PIO cycle timing mode */ + put_le16(p + 52, 0x0001); /* DMA cycle timing mode */ + put_le16(p + 53, 0x0003); /* Translation params valid */ + put_le16(p + 54, s->cylinders); /* Current cylinders */ + put_le16(p + 55, s->heads); /* Current heads */ + put_le16(p + 56, s->sectors); /* Current sectors */ + put_le16(p + 57, cur_sec); /* Current capacity */ + put_le16(p + 58, cur_sec >> 16); /* Current capacity */ + if (s->mult_sectors) /* Multiple sector setting */ + put_le16(p + 59, 0x100 | s->mult_sectors); + /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */ + /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */ + put_le16(p + 63, 0x0203); /* Multiword DMA capability */ + put_le16(p + 64, 0x0001); /* Flow Control PIO support */ + put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */ + put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */ + put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */ + put_le16(p + 82, 0x400c); /* Command Set supported */ + put_le16(p + 83, 0x7068); /* Command Set supported */ + put_le16(p + 84, 0x4000); /* Features supported */ + put_le16(p + 85, 0x000c); /* Command Set enabled */ + put_le16(p + 86, 0x7044); /* Command Set enabled */ + put_le16(p + 87, 0x4000); /* Features enabled */ + put_le16(p + 91, 0x4060); /* Current APM level */ + put_le16(p + 129, 0x0002); /* Current features option */ + put_le16(p + 130, 0x0005); /* Reassigned sectors */ + put_le16(p + 131, 0x0001); /* Initial power mode */ + put_le16(p + 132, 0x0000); /* User signature */ + put_le16(p + 160, 0x8100); /* Power requirement */ + put_le16(p + 161, 0x8001); /* CF command set */ + + ide_cfata_identify_size(s); + s->identify_set = 1; + +fill_buffer: + memcpy(s->io_buffer, p, sizeof(s->identify_data)); +} + +static void ide_set_signature(IDEState *s) +{ + s->select &= ~(ATA_DEV_HS); /* clear head */ + /* put signature */ + s->nsector = 1; + s->sector = 1; + if (s->drive_kind == IDE_CD) { + s->lcyl = 0x14; + s->hcyl = 0xeb; + } else if (s->blk) { + s->lcyl = 0; + s->hcyl = 0; + } else { + s->lcyl = 0xff; + s->hcyl = 0xff; + } +} + +static bool ide_sect_range_ok(IDEState *s, + uint64_t sector, uint64_t nb_sectors) +{ + uint64_t total_sectors; + + blk_get_geometry(s->blk, &total_sectors); + if (sector > total_sectors || nb_sectors > total_sectors - sector) { + return false; + } + return true; +} + +typedef struct TrimAIOCB { + BlockAIOCB common; + IDEState *s; + QEMUBH *bh; + int ret; + QEMUIOVector *qiov; + BlockAIOCB *aiocb; + int i, j; +} TrimAIOCB; + +static void trim_aio_cancel(BlockAIOCB *acb) +{ + TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common); + + /* Exit the loop so ide_issue_trim_cb will not continue */ + iocb->j = iocb->qiov->niov - 1; + iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1; + + iocb->ret = -ECANCELED; + + if (iocb->aiocb) { + blk_aio_cancel_async(iocb->aiocb); + iocb->aiocb = NULL; + } +} + +static const AIOCBInfo trim_aiocb_info = { + .aiocb_size = sizeof(TrimAIOCB), + .cancel_async = trim_aio_cancel, +}; + +static void ide_trim_bh_cb(void *opaque) +{ + TrimAIOCB *iocb = opaque; + + iocb->common.cb(iocb->common.opaque, iocb->ret); + + qemu_bh_delete(iocb->bh); + iocb->bh = NULL; + qemu_aio_unref(iocb); +} + +static void ide_issue_trim_cb(void *opaque, int ret) +{ + TrimAIOCB *iocb = opaque; + IDEState *s = iocb->s; + + if (iocb->i >= 0) { + if (ret >= 0) { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } else { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + } + } + + if (ret >= 0) { + while (iocb->j < iocb->qiov->niov) { + int j = iocb->j; + while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) { + int i = iocb->i; + uint64_t *buffer = iocb->qiov->iov[j].iov_base; + + /* 6-byte LBA + 2-byte range per entry */ + uint64_t entry = le64_to_cpu(buffer[i]); + uint64_t sector = entry & 0x0000ffffffffffffULL; + uint16_t count = entry >> 48; + + if (count == 0) { + continue; + } + + if (!ide_sect_range_ok(s, sector, count)) { + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP); + iocb->ret = -EINVAL; + goto done; + } + + block_acct_start(blk_get_stats(s->blk), &s->acct, + count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP); + + /* Got an entry! Submit and exit. */ + iocb->aiocb = blk_aio_pdiscard(s->blk, + sector << BDRV_SECTOR_BITS, + count << BDRV_SECTOR_BITS, + ide_issue_trim_cb, opaque); + return; + } + + iocb->j++; + iocb->i = -1; + } + } else { + iocb->ret = ret; + } + +done: + iocb->aiocb = NULL; + if (iocb->bh) { + replay_bh_schedule_event(iocb->bh); + } +} + +BlockAIOCB *ide_issue_trim( + int64_t offset, QEMUIOVector *qiov, + BlockCompletionFunc *cb, void *cb_opaque, void *opaque) +{ + IDEState *s = opaque; + TrimAIOCB *iocb; + + iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque); + iocb->s = s; + iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb); + iocb->ret = 0; + iocb->qiov = qiov; + iocb->i = -1; + iocb->j = 0; + ide_issue_trim_cb(iocb, 0); + return &iocb->common; +} + +void ide_abort_command(IDEState *s) +{ + ide_transfer_stop(s); + s->status = READY_STAT | ERR_STAT; + s->error = ABRT_ERR; +} + +static void ide_set_retry(IDEState *s) +{ + s->bus->retry_unit = s->unit; + s->bus->retry_sector_num = ide_get_sector(s); + s->bus->retry_nsector = s->nsector; +} + +static void ide_clear_retry(IDEState *s) +{ + s->bus->retry_unit = -1; + s->bus->retry_sector_num = 0; + s->bus->retry_nsector = 0; +} + +/* prepare data transfer and tell what to do after */ +bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size, + EndTransferFunc *end_transfer_func) +{ + s->data_ptr = buf; + s->data_end = buf + size; + ide_set_retry(s); + if (!(s->status & ERR_STAT)) { + s->status |= DRQ_STAT; + } + if (!s->bus->dma->ops->pio_transfer) { + s->end_transfer_func = end_transfer_func; + return false; + } + s->bus->dma->ops->pio_transfer(s->bus->dma); + return true; +} + +void ide_transfer_start(IDEState *s, uint8_t *buf, int size, + EndTransferFunc *end_transfer_func) +{ + if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) { + end_transfer_func(s); + } +} + +static void ide_cmd_done(IDEState *s) +{ + if (s->bus->dma->ops->cmd_done) { + s->bus->dma->ops->cmd_done(s->bus->dma); + } +} + +static void ide_transfer_halt(IDEState *s) +{ + s->end_transfer_func = ide_transfer_stop; + s->data_ptr = s->io_buffer; + s->data_end = s->io_buffer; + s->status &= ~DRQ_STAT; +} + +void ide_transfer_stop(IDEState *s) +{ + ide_transfer_halt(s); + ide_cmd_done(s); +} + +int64_t ide_get_sector(IDEState *s) +{ + int64_t sector_num; + if (s->select & (ATA_DEV_LBA)) { + if (s->lba48) { + sector_num = ((int64_t)s->hob_hcyl << 40) | + ((int64_t) s->hob_lcyl << 32) | + ((int64_t) s->hob_sector << 24) | + ((int64_t) s->hcyl << 16) | + ((int64_t) s->lcyl << 8) | s->sector; + } else { + /* LBA28 */ + sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) | + (s->hcyl << 16) | (s->lcyl << 8) | s->sector; + } + } else { + /* CHS */ + sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors + + (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1); + } + + return sector_num; +} + +void ide_set_sector(IDEState *s, int64_t sector_num) +{ + unsigned int cyl, r; + if (s->select & (ATA_DEV_LBA)) { + if (s->lba48) { + s->sector = sector_num; + s->lcyl = sector_num >> 8; + s->hcyl = sector_num >> 16; + s->hob_sector = sector_num >> 24; + s->hob_lcyl = sector_num >> 32; + s->hob_hcyl = sector_num >> 40; + } else { + /* LBA28 */ + s->select = (s->select & ~(ATA_DEV_LBA_MSB)) | + ((sector_num >> 24) & (ATA_DEV_LBA_MSB)); + s->hcyl = (sector_num >> 16); + s->lcyl = (sector_num >> 8); + s->sector = (sector_num); + } + } else { + /* CHS */ + cyl = sector_num / (s->heads * s->sectors); + r = sector_num % (s->heads * s->sectors); + s->hcyl = cyl >> 8; + s->lcyl = cyl; + s->select = (s->select & ~(ATA_DEV_HS)) | + ((r / s->sectors) & (ATA_DEV_HS)); + s->sector = (r % s->sectors) + 1; + } +} + +static void ide_rw_error(IDEState *s) { + ide_abort_command(s); + ide_set_irq(s->bus); +} + +static void ide_buffered_readv_cb(void *opaque, int ret) +{ + IDEBufferedRequest *req = opaque; + if (!req->orphaned) { + if (!ret) { + assert(req->qiov.size == req->original_qiov->size); + qemu_iovec_from_buf(req->original_qiov, 0, + req->qiov.local_iov.iov_base, + req->original_qiov->size); + } + req->original_cb(req->original_opaque, ret); + } + QLIST_REMOVE(req, list); + qemu_vfree(qemu_iovec_buf(&req->qiov)); + g_free(req); +} + +#define MAX_BUFFERED_REQS 16 + +BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque) +{ + BlockAIOCB *aioreq; + IDEBufferedRequest *req; + int c = 0; + + QLIST_FOREACH(req, &s->buffered_requests, list) { + c++; + } + if (c > MAX_BUFFERED_REQS) { + return blk_abort_aio_request(s->blk, cb, opaque, -EIO); + } + + req = g_new0(IDEBufferedRequest, 1); + req->original_qiov = iov; + req->original_cb = cb; + req->original_opaque = opaque; + qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size), + iov->size); + + aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS, + &req->qiov, 0, ide_buffered_readv_cb, req); + + QLIST_INSERT_HEAD(&s->buffered_requests, req, list); + return aioreq; +} + +/** + * Cancel all pending DMA requests. + * Any buffered DMA requests are instantly canceled, + * but any pending unbuffered DMA requests must be waited on. + */ +void ide_cancel_dma_sync(IDEState *s) +{ + IDEBufferedRequest *req; + + /* First invoke the callbacks of all buffered requests + * and flag those requests as orphaned. Ideally there + * are no unbuffered (Scatter Gather DMA Requests or + * write requests) pending and we can avoid to drain. */ + QLIST_FOREACH(req, &s->buffered_requests, list) { + if (!req->orphaned) { + trace_ide_cancel_dma_sync_buffered(req->original_cb, req); + req->original_cb(req->original_opaque, -ECANCELED); + } + req->orphaned = true; + } + + /* + * We can't cancel Scatter Gather DMA in the middle of the + * operation or a partial (not full) DMA transfer would reach + * the storage so we wait for completion instead (we behave + * like if the DMA was completed by the time the guest trying + * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not + * set). + * + * In the future we'll be able to safely cancel the I/O if the + * whole DMA operation will be submitted to disk with a single + * aio operation with preadv/pwritev. + */ + if (s->bus->dma->aiocb) { + trace_ide_cancel_dma_sync_remaining(); + blk_drain(s->blk); + assert(s->bus->dma->aiocb == NULL); + } +} + +static void ide_sector_read(IDEState *s); + +static void ide_sector_read_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + int n; + + s->pio_aiocb = NULL; + s->status &= ~BUSY_STAT; + + if (ret != 0) { + if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO | + IDE_RETRY_READ)) { + return; + } + } + + block_acct_done(blk_get_stats(s->blk), &s->acct); + + n = s->nsector; + if (n > s->req_nb_sectors) { + n = s->req_nb_sectors; + } + + ide_set_sector(s, ide_get_sector(s) + n); + s->nsector -= n; + /* Allow the guest to read the io_buffer */ + ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read); + ide_set_irq(s->bus); +} + +static void ide_sector_read(IDEState *s) +{ + int64_t sector_num; + int n; + + s->status = READY_STAT | SEEK_STAT; + s->error = 0; /* not needed by IDE spec, but needed by Windows */ + sector_num = ide_get_sector(s); + n = s->nsector; + + if (n == 0) { + ide_transfer_stop(s); + return; + } + + s->status |= BUSY_STAT; + + if (n > s->req_nb_sectors) { + n = s->req_nb_sectors; + } + + trace_ide_sector_read(sector_num, n); + + if (!ide_sect_range_ok(s, sector_num, n)) { + ide_rw_error(s); + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ); + return; + } + + qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE); + + block_acct_start(blk_get_stats(s->blk), &s->acct, + n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); + s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n, + ide_sector_read_cb, s); +} + +void dma_buf_commit(IDEState *s, uint32_t tx_bytes) +{ + if (s->bus->dma->ops->commit_buf) { + s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes); + } + s->io_buffer_offset += tx_bytes; + qemu_sglist_destroy(&s->sg); +} + +void ide_set_inactive(IDEState *s, bool more) +{ + s->bus->dma->aiocb = NULL; + ide_clear_retry(s); + if (s->bus->dma->ops->set_inactive) { + s->bus->dma->ops->set_inactive(s->bus->dma, more); + } + ide_cmd_done(s); +} + +void ide_dma_error(IDEState *s) +{ + dma_buf_commit(s, 0); + ide_abort_command(s); + ide_set_inactive(s, false); + ide_set_irq(s->bus); +} + +int ide_handle_rw_error(IDEState *s, int error, int op) +{ + bool is_read = (op & IDE_RETRY_READ) != 0; + BlockErrorAction action = blk_get_error_action(s->blk, is_read, error); + + if (action == BLOCK_ERROR_ACTION_STOP) { + assert(s->bus->retry_unit == s->unit); + s->bus->error_status = op; + } else if (action == BLOCK_ERROR_ACTION_REPORT) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + if (IS_IDE_RETRY_DMA(op)) { + ide_dma_error(s); + } else if (IS_IDE_RETRY_ATAPI(op)) { + ide_atapi_io_error(s, -error); + } else { + ide_rw_error(s); + } + } + blk_error_action(s->blk, action, is_read, error); + return action != BLOCK_ERROR_ACTION_IGNORE; +} + +static void ide_dma_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + int n; + int64_t sector_num; + uint64_t offset; + bool stay_active = false; + int32_t prep_size = 0; + + if (ret == -EINVAL) { + ide_dma_error(s); + return; + } + + if (ret < 0) { + if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) { + s->bus->dma->aiocb = NULL; + dma_buf_commit(s, 0); + return; + } + } + + if (s->io_buffer_size > s->nsector * 512) { + /* + * The PRDs were longer than needed for this request. + * The Active bit must remain set after the request completes. + */ + n = s->nsector; + stay_active = true; + } else { + n = s->io_buffer_size >> 9; + } + + sector_num = ide_get_sector(s); + if (n > 0) { + assert(n * 512 == s->sg.size); + dma_buf_commit(s, s->sg.size); + sector_num += n; + ide_set_sector(s, sector_num); + s->nsector -= n; + } + + /* end of transfer ? */ + if (s->nsector == 0) { + s->status = READY_STAT | SEEK_STAT; + ide_set_irq(s->bus); + goto eot; + } + + /* launch next transfer */ + n = s->nsector; + s->io_buffer_index = 0; + s->io_buffer_size = n * 512; + prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size); + /* prepare_buf() must succeed and respect the limit */ + assert(prep_size >= 0 && prep_size <= n * 512); + + /* + * Now prep_size stores the number of bytes in the sglist, and + * s->io_buffer_size stores the number of bytes described by the PRDs. + */ + + if (prep_size < n * 512) { + /* + * The PRDs are too short for this request. Error condition! + * Reset the Active bit and don't raise the interrupt. + */ + s->status = READY_STAT | SEEK_STAT; + dma_buf_commit(s, 0); + goto eot; + } + + trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd)); + + if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) && + !ide_sect_range_ok(s, sector_num, n)) { + ide_dma_error(s); + block_acct_invalid(blk_get_stats(s->blk), s->acct.type); + return; + } + + offset = sector_num << BDRV_SECTOR_BITS; + switch (s->dma_cmd) { + case IDE_DMA_READ: + s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset, + BDRV_SECTOR_SIZE, ide_dma_cb, s); + break; + case IDE_DMA_WRITE: + s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset, + BDRV_SECTOR_SIZE, ide_dma_cb, s); + break; + case IDE_DMA_TRIM: + s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), + &s->sg, offset, BDRV_SECTOR_SIZE, + ide_issue_trim, s, ide_dma_cb, s, + DMA_DIRECTION_TO_DEVICE); + break; + default: + abort(); + } + return; + +eot: + if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } + ide_set_inactive(s, stay_active); +} + +static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd) +{ + s->status = READY_STAT | SEEK_STAT | DRQ_STAT; + s->io_buffer_size = 0; + s->dma_cmd = dma_cmd; + + switch (dma_cmd) { + case IDE_DMA_READ: + block_acct_start(blk_get_stats(s->blk), &s->acct, + s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); + break; + case IDE_DMA_WRITE: + block_acct_start(blk_get_stats(s->blk), &s->acct, + s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE); + break; + default: + break; + } + + ide_start_dma(s, ide_dma_cb); +} + +void ide_start_dma(IDEState *s, BlockCompletionFunc *cb) +{ + s->io_buffer_index = 0; + ide_set_retry(s); + if (s->bus->dma->ops->start_dma) { + s->bus->dma->ops->start_dma(s->bus->dma, s, cb); + } +} + +static void ide_sector_write(IDEState *s); + +static void ide_sector_write_timer_cb(void *opaque) +{ + IDEState *s = opaque; + ide_set_irq(s->bus); +} + +static void ide_sector_write_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + int n; + + s->pio_aiocb = NULL; + s->status &= ~BUSY_STAT; + + if (ret != 0) { + if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) { + return; + } + } + + block_acct_done(blk_get_stats(s->blk), &s->acct); + + n = s->nsector; + if (n > s->req_nb_sectors) { + n = s->req_nb_sectors; + } + s->nsector -= n; + + ide_set_sector(s, ide_get_sector(s) + n); + if (s->nsector == 0) { + /* no more sectors to write */ + ide_transfer_stop(s); + } else { + int n1 = s->nsector; + if (n1 > s->req_nb_sectors) { + n1 = s->req_nb_sectors; + } + ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE, + ide_sector_write); + } + + if (win2k_install_hack && ((++s->irq_count % 16) == 0)) { + /* It seems there is a bug in the Windows 2000 installer HDD + IDE driver which fills the disk with empty logs when the + IDE write IRQ comes too early. This hack tries to correct + that at the expense of slower write performances. Use this + option _only_ to install Windows 2000. You must disable it + for normal use. */ + timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (NANOSECONDS_PER_SECOND / 1000)); + } else { + ide_set_irq(s->bus); + } +} + +static void ide_sector_write(IDEState *s) +{ + int64_t sector_num; + int n; + + s->status = READY_STAT | SEEK_STAT | BUSY_STAT; + sector_num = ide_get_sector(s); + + n = s->nsector; + if (n > s->req_nb_sectors) { + n = s->req_nb_sectors; + } + + trace_ide_sector_write(sector_num, n); + + if (!ide_sect_range_ok(s, sector_num, n)) { + ide_rw_error(s); + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE); + return; + } + + qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE); + + block_acct_start(blk_get_stats(s->blk), &s->acct, + n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE); + s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS, + &s->qiov, 0, ide_sector_write_cb, s); +} + +static void ide_flush_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + + s->pio_aiocb = NULL; + + if (ret < 0) { + /* XXX: What sector number to set here? */ + if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) { + return; + } + } + + if (s->blk) { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } + s->status = READY_STAT | SEEK_STAT; + ide_cmd_done(s); + ide_set_irq(s->bus); +} + +static void ide_flush_cache(IDEState *s) +{ + if (s->blk == NULL) { + ide_flush_cb(s, 0); + return; + } + + s->status |= BUSY_STAT; + ide_set_retry(s); + block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH); + s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s); +} + +static void ide_cfata_metadata_inquiry(IDEState *s) +{ + uint16_t *p; + uint32_t spd; + + p = (uint16_t *) s->io_buffer; + memset(p, 0, 0x200); + spd = ((s->mdata_size - 1) >> 9) + 1; + + put_le16(p + 0, 0x0001); /* Data format revision */ + put_le16(p + 1, 0x0000); /* Media property: silicon */ + put_le16(p + 2, s->media_changed); /* Media status */ + put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */ + put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */ + put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */ + put_le16(p + 6, spd >> 16); /* Sectors per device (high) */ +} + +static void ide_cfata_metadata_read(IDEState *s) +{ + uint16_t *p; + + if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) { + s->status = ERR_STAT; + s->error = ABRT_ERR; + return; + } + + p = (uint16_t *) s->io_buffer; + memset(p, 0, 0x200); + + put_le16(p + 0, s->media_changed); /* Media status */ + memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9), + MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9), + s->nsector << 9), 0x200 - 2)); +} + +static void ide_cfata_metadata_write(IDEState *s) +{ + if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) { + s->status = ERR_STAT; + s->error = ABRT_ERR; + return; + } + + s->media_changed = 0; + + memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9), + s->io_buffer + 2, + MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9), + s->nsector << 9), 0x200 - 2)); +} + +/* called when the inserted state of the media has changed */ +static void ide_cd_change_cb(void *opaque, bool load, Error **errp) +{ + IDEState *s = opaque; + uint64_t nb_sectors; + + s->tray_open = !load; + blk_get_geometry(s->blk, &nb_sectors); + s->nb_sectors = nb_sectors; + + /* + * First indicate to the guest that a CD has been removed. That's + * done on the next command the guest sends us. + * + * Then we set UNIT_ATTENTION, by which the guest will + * detect a new CD in the drive. See ide_atapi_cmd() for details. + */ + s->cdrom_changed = 1; + s->events.new_media = true; + s->events.eject_request = false; + ide_set_irq(s->bus); +} + +static void ide_cd_eject_request_cb(void *opaque, bool force) +{ + IDEState *s = opaque; + + s->events.eject_request = true; + if (force) { + s->tray_locked = false; + } + ide_set_irq(s->bus); +} + +static void ide_cmd_lba48_transform(IDEState *s, int lba48) +{ + s->lba48 = lba48; + + /* handle the 'magic' 0 nsector count conversion here. to avoid + * fiddling with the rest of the read logic, we just store the + * full sector count in ->nsector and ignore ->hob_nsector from now + */ + if (!s->lba48) { + if (!s->nsector) + s->nsector = 256; + } else { + if (!s->nsector && !s->hob_nsector) + s->nsector = 65536; + else { + int lo = s->nsector; + int hi = s->hob_nsector; + + s->nsector = (hi << 8) | lo; + } + } +} + +static void ide_clear_hob(IDEBus *bus) +{ + /* any write clears HOB high bit of device control register */ + bus->cmd &= ~(IDE_CTRL_HOB); +} + +/* IOport [W]rite [R]egisters */ +enum ATA_IOPORT_WR { + ATA_IOPORT_WR_DATA = 0, + ATA_IOPORT_WR_FEATURES = 1, + ATA_IOPORT_WR_SECTOR_COUNT = 2, + ATA_IOPORT_WR_SECTOR_NUMBER = 3, + ATA_IOPORT_WR_CYLINDER_LOW = 4, + ATA_IOPORT_WR_CYLINDER_HIGH = 5, + ATA_IOPORT_WR_DEVICE_HEAD = 6, + ATA_IOPORT_WR_COMMAND = 7, + ATA_IOPORT_WR_NUM_REGISTERS, +}; + +const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = { + [ATA_IOPORT_WR_DATA] = "Data", + [ATA_IOPORT_WR_FEATURES] = "Features", + [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count", + [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number", + [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low", + [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High", + [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head", + [ATA_IOPORT_WR_COMMAND] = "Command" +}; + +void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + int reg_num = addr & 7; + + trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s); + + /* ignore writes to command block while busy with previous command */ + if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) { + return; + } + + /* NOTE: Device0 and Device1 both receive incoming register writes. + * (They're on the same bus! They have to!) */ + + switch (reg_num) { + case 0: + break; + case ATA_IOPORT_WR_FEATURES: + ide_clear_hob(bus); + bus->ifs[0].hob_feature = bus->ifs[0].feature; + bus->ifs[1].hob_feature = bus->ifs[1].feature; + bus->ifs[0].feature = val; + bus->ifs[1].feature = val; + break; + case ATA_IOPORT_WR_SECTOR_COUNT: + ide_clear_hob(bus); + bus->ifs[0].hob_nsector = bus->ifs[0].nsector; + bus->ifs[1].hob_nsector = bus->ifs[1].nsector; + bus->ifs[0].nsector = val; + bus->ifs[1].nsector = val; + break; + case ATA_IOPORT_WR_SECTOR_NUMBER: + ide_clear_hob(bus); + bus->ifs[0].hob_sector = bus->ifs[0].sector; + bus->ifs[1].hob_sector = bus->ifs[1].sector; + bus->ifs[0].sector = val; + bus->ifs[1].sector = val; + break; + case ATA_IOPORT_WR_CYLINDER_LOW: + ide_clear_hob(bus); + bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl; + bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl; + bus->ifs[0].lcyl = val; + bus->ifs[1].lcyl = val; + break; + case ATA_IOPORT_WR_CYLINDER_HIGH: + ide_clear_hob(bus); + bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl; + bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl; + bus->ifs[0].hcyl = val; + bus->ifs[1].hcyl = val; + break; + case ATA_IOPORT_WR_DEVICE_HEAD: + ide_clear_hob(bus); + bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON); + bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON); + /* select drive */ + bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0; + break; + default: + case ATA_IOPORT_WR_COMMAND: + ide_clear_hob(bus); + qemu_irq_lower(bus->irq); + ide_exec_cmd(bus, val); + break; + } +} + +static void ide_reset(IDEState *s) +{ + trace_ide_reset(s); + + if (s->pio_aiocb) { + blk_aio_cancel(s->pio_aiocb); + s->pio_aiocb = NULL; + } + + if (s->drive_kind == IDE_CFATA) + s->mult_sectors = 0; + else + s->mult_sectors = MAX_MULT_SECTORS; + /* ide regs */ + s->feature = 0; + s->error = 0; + s->nsector = 0; + s->sector = 0; + s->lcyl = 0; + s->hcyl = 0; + + /* lba48 */ + s->hob_feature = 0; + s->hob_sector = 0; + s->hob_nsector = 0; + s->hob_lcyl = 0; + s->hob_hcyl = 0; + + s->select = (ATA_DEV_ALWAYS_ON); + s->status = READY_STAT | SEEK_STAT; + + s->lba48 = 0; + + /* ATAPI specific */ + s->sense_key = 0; + s->asc = 0; + s->cdrom_changed = 0; + s->packet_transfer_size = 0; + s->elementary_transfer_size = 0; + s->io_buffer_index = 0; + s->cd_sector_size = 0; + s->atapi_dma = 0; + s->tray_locked = 0; + s->tray_open = 0; + /* ATA DMA state */ + s->io_buffer_size = 0; + s->req_nb_sectors = 0; + + ide_set_signature(s); + /* init the transfer handler so that 0xffff is returned on data + accesses */ + s->end_transfer_func = ide_dummy_transfer_stop; + ide_dummy_transfer_stop(s); + s->media_changed = 0; +} + +static bool cmd_nop(IDEState *s, uint8_t cmd) +{ + return true; +} + +static bool cmd_device_reset(IDEState *s, uint8_t cmd) +{ + /* Halt PIO (in the DRQ phase), then DMA */ + ide_transfer_halt(s); + ide_cancel_dma_sync(s); + + /* Reset any PIO commands, reset signature, etc */ + ide_reset(s); + + /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs"; + * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */ + s->status = 0x00; + + /* Do not overwrite status register */ + return false; +} + +static bool cmd_data_set_management(IDEState *s, uint8_t cmd) +{ + switch (s->feature) { + case DSM_TRIM: + if (s->blk) { + ide_sector_start_dma(s, IDE_DMA_TRIM); + return false; + } + break; + } + + ide_abort_command(s); + return true; +} + +static bool cmd_identify(IDEState *s, uint8_t cmd) +{ + if (s->blk && s->drive_kind != IDE_CD) { + if (s->drive_kind != IDE_CFATA) { + ide_identify(s); + } else { + ide_cfata_identify(s); + } + s->status = READY_STAT | SEEK_STAT; + ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop); + ide_set_irq(s->bus); + return false; + } else { + if (s->drive_kind == IDE_CD) { + ide_set_signature(s); + } + ide_abort_command(s); + } + + return true; +} + +static bool cmd_verify(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_VERIFY_EXT); + + /* do sector number check ? */ + ide_cmd_lba48_transform(s, lba48); + + return true; +} + +static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd) +{ + if (s->drive_kind == IDE_CFATA && s->nsector == 0) { + /* Disable Read and Write Multiple */ + s->mult_sectors = 0; + } else if ((s->nsector & 0xff) != 0 && + ((s->nsector & 0xff) > MAX_MULT_SECTORS || + (s->nsector & (s->nsector - 1)) != 0)) { + ide_abort_command(s); + } else { + s->mult_sectors = s->nsector & 0xff; + } + + return true; +} + +static bool cmd_read_multiple(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_MULTREAD_EXT); + + if (!s->blk || !s->mult_sectors) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + s->req_nb_sectors = s->mult_sectors; + ide_sector_read(s); + return false; +} + +static bool cmd_write_multiple(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_MULTWRITE_EXT); + int n; + + if (!s->blk || !s->mult_sectors) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + + s->req_nb_sectors = s->mult_sectors; + n = MIN(s->nsector, s->req_nb_sectors); + + s->status = SEEK_STAT | READY_STAT; + ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write); + + s->media_changed = 1; + + return false; +} + +static bool cmd_read_pio(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_READ_EXT); + + if (s->drive_kind == IDE_CD) { + ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */ + ide_abort_command(s); + return true; + } + + if (!s->blk) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + s->req_nb_sectors = 1; + ide_sector_read(s); + + return false; +} + +static bool cmd_write_pio(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_WRITE_EXT); + + if (!s->blk) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + + s->req_nb_sectors = 1; + s->status = SEEK_STAT | READY_STAT; + ide_transfer_start(s, s->io_buffer, 512, ide_sector_write); + + s->media_changed = 1; + + return false; +} + +static bool cmd_read_dma(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_READDMA_EXT); + + if (!s->blk) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + ide_sector_start_dma(s, IDE_DMA_READ); + + return false; +} + +static bool cmd_write_dma(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_WRITEDMA_EXT); + + if (!s->blk) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + ide_sector_start_dma(s, IDE_DMA_WRITE); + + s->media_changed = 1; + + return false; +} + +static bool cmd_flush_cache(IDEState *s, uint8_t cmd) +{ + ide_flush_cache(s); + return false; +} + +static bool cmd_seek(IDEState *s, uint8_t cmd) +{ + /* XXX: Check that seek is within bounds */ + return true; +} + +static bool cmd_read_native_max(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT); + + /* Refuse if no sectors are addressable (e.g. medium not inserted) */ + if (s->nb_sectors == 0) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + ide_set_sector(s, s->nb_sectors - 1); + + return true; +} + +static bool cmd_check_power_mode(IDEState *s, uint8_t cmd) +{ + s->nsector = 0xff; /* device active or idle */ + return true; +} + +static bool cmd_set_features(IDEState *s, uint8_t cmd) +{ + uint16_t *identify_data; + + if (!s->blk) { + ide_abort_command(s); + return true; + } + + /* XXX: valid for CDROM ? */ + switch (s->feature) { + case 0x02: /* write cache enable */ + blk_set_enable_write_cache(s->blk, true); + identify_data = (uint16_t *)s->identify_data; + put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1); + return true; + case 0x82: /* write cache disable */ + blk_set_enable_write_cache(s->blk, false); + identify_data = (uint16_t *)s->identify_data; + put_le16(identify_data + 85, (1 << 14) | 1); + ide_flush_cache(s); + return false; + case 0xcc: /* reverting to power-on defaults enable */ + case 0x66: /* reverting to power-on defaults disable */ + case 0xaa: /* read look-ahead enable */ + case 0x55: /* read look-ahead disable */ + case 0x05: /* set advanced power management mode */ + case 0x85: /* disable advanced power management mode */ + case 0x69: /* NOP */ + case 0x67: /* NOP */ + case 0x96: /* NOP */ + case 0x9a: /* NOP */ + case 0x42: /* enable Automatic Acoustic Mode */ + case 0xc2: /* disable Automatic Acoustic Mode */ + return true; + case 0x03: /* set transfer mode */ + { + uint8_t val = s->nsector & 0x07; + identify_data = (uint16_t *)s->identify_data; + + switch (s->nsector >> 3) { + case 0x00: /* pio default */ + case 0x01: /* pio mode */ + put_le16(identify_data + 62, 0x07); + put_le16(identify_data + 63, 0x07); + put_le16(identify_data + 88, 0x3f); + break; + case 0x02: /* sigle word dma mode*/ + put_le16(identify_data + 62, 0x07 | (1 << (val + 8))); + put_le16(identify_data + 63, 0x07); + put_le16(identify_data + 88, 0x3f); + break; + case 0x04: /* mdma mode */ + put_le16(identify_data + 62, 0x07); + put_le16(identify_data + 63, 0x07 | (1 << (val + 8))); + put_le16(identify_data + 88, 0x3f); + break; + case 0x08: /* udma mode */ + put_le16(identify_data + 62, 0x07); + put_le16(identify_data + 63, 0x07); + put_le16(identify_data + 88, 0x3f | (1 << (val + 8))); + break; + default: + goto abort_cmd; + } + return true; + } + } + +abort_cmd: + ide_abort_command(s); + return true; +} + + +/*** ATAPI commands ***/ + +static bool cmd_identify_packet(IDEState *s, uint8_t cmd) +{ + ide_atapi_identify(s); + s->status = READY_STAT | SEEK_STAT; + ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop); + ide_set_irq(s->bus); + return false; +} + +static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd) +{ + ide_set_signature(s); + + if (s->drive_kind == IDE_CD) { + s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet + * devices to return a clear status register + * with READY_STAT *not* set. */ + s->error = 0x01; + } else { + s->status = READY_STAT | SEEK_STAT; + /* The bits of the error register are not as usual for this command! + * They are part of the regular output (this is why ERR_STAT isn't set) + * Device 0 passed, Device 1 passed or not present. */ + s->error = 0x01; + ide_set_irq(s->bus); + } + + return false; +} + +static bool cmd_packet(IDEState *s, uint8_t cmd) +{ + /* overlapping commands not supported */ + if (s->feature & 0x02) { + ide_abort_command(s); + return true; + } + + s->status = READY_STAT | SEEK_STAT; + s->atapi_dma = s->feature & 1; + if (s->atapi_dma) { + s->dma_cmd = IDE_DMA_ATAPI; + } + s->nsector = 1; + ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE, + ide_atapi_cmd); + return false; +} + + +/*** CF-ATA commands ***/ + +static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd) +{ + s->error = 0x09; /* miscellaneous error */ + s->status = READY_STAT | SEEK_STAT; + ide_set_irq(s->bus); + + return false; +} + +static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd) +{ + /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is + * required for Windows 8 to work with AHCI */ + + if (cmd == CFA_WEAR_LEVEL) { + s->nsector = 0; + } + + if (cmd == CFA_ERASE_SECTORS) { + s->media_changed = 1; + } + + return true; +} + +static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd) +{ + s->status = READY_STAT | SEEK_STAT; + + memset(s->io_buffer, 0, 0x200); + s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */ + s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */ + s->io_buffer[0x02] = s->select; /* Head */ + s->io_buffer[0x03] = s->sector; /* Sector */ + s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */ + s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */ + s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */ + s->io_buffer[0x13] = 0x00; /* Erase flag */ + s->io_buffer[0x18] = 0x00; /* Hot count */ + s->io_buffer[0x19] = 0x00; /* Hot count */ + s->io_buffer[0x1a] = 0x01; /* Hot count */ + + ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); + ide_set_irq(s->bus); + + return false; +} + +static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd) +{ + switch (s->feature) { + case 0x02: /* Inquiry Metadata Storage */ + ide_cfata_metadata_inquiry(s); + break; + case 0x03: /* Read Metadata Storage */ + ide_cfata_metadata_read(s); + break; + case 0x04: /* Write Metadata Storage */ + ide_cfata_metadata_write(s); + break; + default: + ide_abort_command(s); + return true; + } + + ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); + s->status = 0x00; /* NOTE: READY is _not_ set */ + ide_set_irq(s->bus); + + return false; +} + +static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd) +{ + switch (s->feature) { + case 0x01: /* sense temperature in device */ + s->nsector = 0x50; /* +20 C */ + break; + default: + ide_abort_command(s); + return true; + } + + return true; +} + + +/*** SMART commands ***/ + +static bool cmd_smart(IDEState *s, uint8_t cmd) +{ + int n; + + if (s->hcyl != 0xc2 || s->lcyl != 0x4f) { + goto abort_cmd; + } + + if (!s->smart_enabled && s->feature != SMART_ENABLE) { + goto abort_cmd; + } + + switch (s->feature) { + case SMART_DISABLE: + s->smart_enabled = 0; + return true; + + case SMART_ENABLE: + s->smart_enabled = 1; + return true; + + case SMART_ATTR_AUTOSAVE: + switch (s->sector) { + case 0x00: + s->smart_autosave = 0; + break; + case 0xf1: + s->smart_autosave = 1; + break; + default: + goto abort_cmd; + } + return true; + + case SMART_STATUS: + if (!s->smart_errors) { + s->hcyl = 0xc2; + s->lcyl = 0x4f; + } else { + s->hcyl = 0x2c; + s->lcyl = 0xf4; + } + return true; + + case SMART_READ_THRESH: + memset(s->io_buffer, 0, 0x200); + s->io_buffer[0] = 0x01; /* smart struct version */ + + for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) { + s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0]; + s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11]; + } + + /* checksum */ + for (n = 0; n < 511; n++) { + s->io_buffer[511] += s->io_buffer[n]; + } + s->io_buffer[511] = 0x100 - s->io_buffer[511]; + + s->status = READY_STAT | SEEK_STAT; + ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); + ide_set_irq(s->bus); + return false; + + case SMART_READ_DATA: + memset(s->io_buffer, 0, 0x200); + s->io_buffer[0] = 0x01; /* smart struct version */ + + for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) { + int i; + for (i = 0; i < 11; i++) { + s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i]; + } + } + + s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00); + if (s->smart_selftest_count == 0) { + s->io_buffer[363] = 0; + } else { + s->io_buffer[363] = + s->smart_selftest_data[3 + + (s->smart_selftest_count - 1) * + 24]; + } + s->io_buffer[364] = 0x20; + s->io_buffer[365] = 0x01; + /* offline data collection capacity: execute + self-test*/ + s->io_buffer[367] = (1 << 4 | 1 << 3 | 1); + s->io_buffer[368] = 0x03; /* smart capability (1) */ + s->io_buffer[369] = 0x00; /* smart capability (2) */ + s->io_buffer[370] = 0x01; /* error logging supported */ + s->io_buffer[372] = 0x02; /* minutes for poll short test */ + s->io_buffer[373] = 0x36; /* minutes for poll ext test */ + s->io_buffer[374] = 0x01; /* minutes for poll conveyance */ + + for (n = 0; n < 511; n++) { + s->io_buffer[511] += s->io_buffer[n]; + } + s->io_buffer[511] = 0x100 - s->io_buffer[511]; + + s->status = READY_STAT | SEEK_STAT; + ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); + ide_set_irq(s->bus); + return false; + + case SMART_READ_LOG: + switch (s->sector) { + case 0x01: /* summary smart error log */ + memset(s->io_buffer, 0, 0x200); + s->io_buffer[0] = 0x01; + s->io_buffer[1] = 0x00; /* no error entries */ + s->io_buffer[452] = s->smart_errors & 0xff; + s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8; + + for (n = 0; n < 511; n++) { + s->io_buffer[511] += s->io_buffer[n]; + } + s->io_buffer[511] = 0x100 - s->io_buffer[511]; + break; + case 0x06: /* smart self test log */ + memset(s->io_buffer, 0, 0x200); + s->io_buffer[0] = 0x01; + if (s->smart_selftest_count == 0) { + s->io_buffer[508] = 0; + } else { + s->io_buffer[508] = s->smart_selftest_count; + for (n = 2; n < 506; n++) { + s->io_buffer[n] = s->smart_selftest_data[n]; + } + } + + for (n = 0; n < 511; n++) { + s->io_buffer[511] += s->io_buffer[n]; + } + s->io_buffer[511] = 0x100 - s->io_buffer[511]; + break; + default: + goto abort_cmd; + } + s->status = READY_STAT | SEEK_STAT; + ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); + ide_set_irq(s->bus); + return false; + + case SMART_EXECUTE_OFFLINE: + switch (s->sector) { + case 0: /* off-line routine */ + case 1: /* short self test */ + case 2: /* extended self test */ + s->smart_selftest_count++; + if (s->smart_selftest_count > 21) { + s->smart_selftest_count = 1; + } + n = 2 + (s->smart_selftest_count - 1) * 24; + s->smart_selftest_data[n] = s->sector; + s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */ + s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */ + s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */ + break; + default: + goto abort_cmd; + } + return true; + } + +abort_cmd: + ide_abort_command(s); + return true; +} + +#define HD_OK (1u << IDE_HD) +#define CD_OK (1u << IDE_CD) +#define CFA_OK (1u << IDE_CFATA) +#define HD_CFA_OK (HD_OK | CFA_OK) +#define ALL_OK (HD_OK | CD_OK | CFA_OK) + +/* Set the Disk Seek Completed status bit during completion */ +#define SET_DSC (1u << 8) + +/* See ACS-2 T13/2015-D Table B.2 Command codes */ +static const struct { + /* Returns true if the completion code should be run */ + bool (*handler)(IDEState *s, uint8_t cmd); + int flags; +} ide_cmd_table[0x100] = { + /* NOP not implemented, mandatory for CD */ + [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK }, + [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK }, + [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK }, + [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC}, + [WIN_READ] = { cmd_read_pio, ALL_OK }, + [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK }, + [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK }, + [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK }, + [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC }, + [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK }, + [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK }, + [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK }, + [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK }, + [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK }, + [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK }, + [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK }, + [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK }, + [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC }, + [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC }, + [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC }, + [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC }, + [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK }, + [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK }, + [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC }, + [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK }, + [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK }, + [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK }, + [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK }, + [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC }, + [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK }, + [WIN_PACKETCMD] = { cmd_packet, CD_OK }, + [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK }, + [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC }, + [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK }, + [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC }, + [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK }, + [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK }, + [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC }, + [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK }, + [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK }, + [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK }, + [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK }, + [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK }, + [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK }, + [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK }, + [WIN_STANDBY] = { cmd_nop, HD_CFA_OK }, + [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK }, + [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC }, + [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK }, + [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK }, + [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK }, + [WIN_IDENTIFY] = { cmd_identify, ALL_OK }, + [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC }, + [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC }, + [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC }, + [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC }, +}; + +static bool ide_cmd_permitted(IDEState *s, uint32_t cmd) +{ + return cmd < ARRAY_SIZE(ide_cmd_table) + && (ide_cmd_table[cmd].flags & (1u << s->drive_kind)); +} + +void ide_exec_cmd(IDEBus *bus, uint32_t val) +{ + IDEState *s; + bool complete; + + s = idebus_active_if(bus); + trace_ide_exec_cmd(bus, s, val); + + /* ignore commands to non existent slave */ + if (s != bus->ifs && !s->blk) { + return; + } + + /* Only RESET is allowed while BSY and/or DRQ are set, + * and only to ATAPI devices. */ + if (s->status & (BUSY_STAT|DRQ_STAT)) { + if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) { + return; + } + } + + if (!ide_cmd_permitted(s, val)) { + ide_abort_command(s); + ide_set_irq(s->bus); + return; + } + + s->status = READY_STAT | BUSY_STAT; + s->error = 0; + s->io_buffer_offset = 0; + + complete = ide_cmd_table[val].handler(s, val); + if (complete) { + s->status &= ~BUSY_STAT; + assert(!!s->error == !!(s->status & ERR_STAT)); + + if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) { + s->status |= SEEK_STAT; + } + + ide_cmd_done(s); + ide_set_irq(s->bus); + } +} + +/* IOport [R]ead [R]egisters */ +enum ATA_IOPORT_RR { + ATA_IOPORT_RR_DATA = 0, + ATA_IOPORT_RR_ERROR = 1, + ATA_IOPORT_RR_SECTOR_COUNT = 2, + ATA_IOPORT_RR_SECTOR_NUMBER = 3, + ATA_IOPORT_RR_CYLINDER_LOW = 4, + ATA_IOPORT_RR_CYLINDER_HIGH = 5, + ATA_IOPORT_RR_DEVICE_HEAD = 6, + ATA_IOPORT_RR_STATUS = 7, + ATA_IOPORT_RR_NUM_REGISTERS, +}; + +const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = { + [ATA_IOPORT_RR_DATA] = "Data", + [ATA_IOPORT_RR_ERROR] = "Error", + [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count", + [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number", + [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low", + [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High", + [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head", + [ATA_IOPORT_RR_STATUS] = "Status" +}; + +uint32_t ide_ioport_read(void *opaque, uint32_t addr) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + uint32_t reg_num; + int ret, hob; + + reg_num = addr & 7; + hob = bus->cmd & (IDE_CTRL_HOB); + switch (reg_num) { + case ATA_IOPORT_RR_DATA: + ret = 0xff; + break; + case ATA_IOPORT_RR_ERROR: + if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || + (s != bus->ifs && !s->blk)) { + ret = 0; + } else if (!hob) { + ret = s->error; + } else { + ret = s->hob_feature; + } + break; + case ATA_IOPORT_RR_SECTOR_COUNT: + if (!bus->ifs[0].blk && !bus->ifs[1].blk) { + ret = 0; + } else if (!hob) { + ret = s->nsector & 0xff; + } else { + ret = s->hob_nsector; + } + break; + case ATA_IOPORT_RR_SECTOR_NUMBER: + if (!bus->ifs[0].blk && !bus->ifs[1].blk) { + ret = 0; + } else if (!hob) { + ret = s->sector; + } else { + ret = s->hob_sector; + } + break; + case ATA_IOPORT_RR_CYLINDER_LOW: + if (!bus->ifs[0].blk && !bus->ifs[1].blk) { + ret = 0; + } else if (!hob) { + ret = s->lcyl; + } else { + ret = s->hob_lcyl; + } + break; + case ATA_IOPORT_RR_CYLINDER_HIGH: + if (!bus->ifs[0].blk && !bus->ifs[1].blk) { + ret = 0; + } else if (!hob) { + ret = s->hcyl; + } else { + ret = s->hob_hcyl; + } + break; + case ATA_IOPORT_RR_DEVICE_HEAD: + if (!bus->ifs[0].blk && !bus->ifs[1].blk) { + ret = 0; + } else { + ret = s->select; + } + break; + default: + case ATA_IOPORT_RR_STATUS: + if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || + (s != bus->ifs && !s->blk)) { + ret = 0; + } else { + ret = s->status; + } + qemu_irq_lower(bus->irq); + break; + } + + trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s); + return ret; +} + +uint32_t ide_status_read(void *opaque, uint32_t addr) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + int ret; + + if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || + (s != bus->ifs && !s->blk)) { + ret = 0; + } else { + ret = s->status; + } + + trace_ide_status_read(addr, ret, bus, s); + return ret; +} + +static void ide_perform_srst(IDEState *s) +{ + s->status |= BUSY_STAT; + + /* Halt PIO (Via register state); PIO BH remains scheduled. */ + ide_transfer_halt(s); + + /* Cancel DMA -- may drain block device and invoke callbacks */ + ide_cancel_dma_sync(s); + + /* Cancel PIO callback, reset registers/signature, etc */ + ide_reset(s); + + /* perform diagnostic */ + cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE); +} + +static void ide_bus_perform_srst(void *opaque) +{ + IDEBus *bus = opaque; + IDEState *s; + int i; + + for (i = 0; i < 2; i++) { + s = &bus->ifs[i]; + ide_perform_srst(s); + } + + bus->cmd &= ~IDE_CTRL_RESET; +} + +void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val) +{ + IDEBus *bus = opaque; + IDEState *s; + int i; + + trace_ide_ctrl_write(addr, val, bus); + + /* Device0 and Device1 each have their own control register, + * but QEMU models it as just one register in the controller. */ + if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) { + for (i = 0; i < 2; i++) { + s = &bus->ifs[i]; + s->status |= BUSY_STAT; + } + replay_bh_schedule_oneshot_event(qemu_get_aio_context(), + ide_bus_perform_srst, bus); + } + + bus->cmd = val; +} + +/* + * Returns true if the running PIO transfer is a PIO out (i.e. data is + * transferred from the device to the guest), false if it's a PIO in + */ +static bool ide_is_pio_out(IDEState *s) +{ + if (s->end_transfer_func == ide_sector_write || + s->end_transfer_func == ide_atapi_cmd) { + return false; + } else if (s->end_transfer_func == ide_sector_read || + s->end_transfer_func == ide_transfer_stop || + s->end_transfer_func == ide_atapi_cmd_reply_end || + s->end_transfer_func == ide_dummy_transfer_stop) { + return true; + } + + abort(); +} + +void ide_data_writew(void *opaque, uint32_t addr, uint32_t val) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + uint8_t *p; + + trace_ide_data_writew(addr, val, bus, s); + + /* PIO data access allowed only when DRQ bit is set. The result of a write + * during PIO out is indeterminate, just ignore it. */ + if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) { + return; + } + + p = s->data_ptr; + if (p + 2 > s->data_end) { + return; + } + + *(uint16_t *)p = le16_to_cpu(val); + p += 2; + s->data_ptr = p; + if (p >= s->data_end) { + s->status &= ~DRQ_STAT; + s->end_transfer_func(s); + } +} + +uint32_t ide_data_readw(void *opaque, uint32_t addr) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + uint8_t *p; + int ret; + + /* PIO data access allowed only when DRQ bit is set. The result of a read + * during PIO in is indeterminate, return 0 and don't move forward. */ + if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) { + return 0; + } + + p = s->data_ptr; + if (p + 2 > s->data_end) { + return 0; + } + + ret = cpu_to_le16(*(uint16_t *)p); + p += 2; + s->data_ptr = p; + if (p >= s->data_end) { + s->status &= ~DRQ_STAT; + s->end_transfer_func(s); + } + + trace_ide_data_readw(addr, ret, bus, s); + return ret; +} + +void ide_data_writel(void *opaque, uint32_t addr, uint32_t val) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + uint8_t *p; + + trace_ide_data_writel(addr, val, bus, s); + + /* PIO data access allowed only when DRQ bit is set. The result of a write + * during PIO out is indeterminate, just ignore it. */ + if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) { + return; + } + + p = s->data_ptr; + if (p + 4 > s->data_end) { + return; + } + + *(uint32_t *)p = le32_to_cpu(val); + p += 4; + s->data_ptr = p; + if (p >= s->data_end) { + s->status &= ~DRQ_STAT; + s->end_transfer_func(s); + } +} + +uint32_t ide_data_readl(void *opaque, uint32_t addr) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + uint8_t *p; + int ret; + + /* PIO data access allowed only when DRQ bit is set. The result of a read + * during PIO in is indeterminate, return 0 and don't move forward. */ + if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) { + ret = 0; + goto out; + } + + p = s->data_ptr; + if (p + 4 > s->data_end) { + return 0; + } + + ret = cpu_to_le32(*(uint32_t *)p); + p += 4; + s->data_ptr = p; + if (p >= s->data_end) { + s->status &= ~DRQ_STAT; + s->end_transfer_func(s); + } + +out: + trace_ide_data_readl(addr, ret, bus, s); + return ret; +} + +static void ide_dummy_transfer_stop(IDEState *s) +{ + s->data_ptr = s->io_buffer; + s->data_end = s->io_buffer; + s->io_buffer[0] = 0xff; + s->io_buffer[1] = 0xff; + s->io_buffer[2] = 0xff; + s->io_buffer[3] = 0xff; +} + +void ide_bus_reset(IDEBus *bus) +{ + bus->unit = 0; + bus->cmd = 0; + ide_reset(&bus->ifs[0]); + ide_reset(&bus->ifs[1]); + ide_clear_hob(bus); + + /* pending async DMA */ + if (bus->dma->aiocb) { + trace_ide_bus_reset_aio(); + blk_aio_cancel(bus->dma->aiocb); + bus->dma->aiocb = NULL; + } + + /* reset dma provider too */ + if (bus->dma->ops->reset) { + bus->dma->ops->reset(bus->dma); + } +} + +static bool ide_cd_is_tray_open(void *opaque) +{ + return ((IDEState *)opaque)->tray_open; +} + +static bool ide_cd_is_medium_locked(void *opaque) +{ + return ((IDEState *)opaque)->tray_locked; +} + +static void ide_resize_cb(void *opaque) +{ + IDEState *s = opaque; + uint64_t nb_sectors; + + if (!s->identify_set) { + return; + } + + blk_get_geometry(s->blk, &nb_sectors); + s->nb_sectors = nb_sectors; + + /* Update the identify data buffer. */ + if (s->drive_kind == IDE_CFATA) { + ide_cfata_identify_size(s); + } else { + /* IDE_CD uses a different set of callbacks entirely. */ + assert(s->drive_kind != IDE_CD); + ide_identify_size(s); + } +} + +static const BlockDevOps ide_cd_block_ops = { + .change_media_cb = ide_cd_change_cb, + .eject_request_cb = ide_cd_eject_request_cb, + .is_tray_open = ide_cd_is_tray_open, + .is_medium_locked = ide_cd_is_medium_locked, +}; + +static const BlockDevOps ide_hd_block_ops = { + .resize_cb = ide_resize_cb, +}; + +int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, + const char *version, const char *serial, const char *model, + uint64_t wwn, + uint32_t cylinders, uint32_t heads, uint32_t secs, + int chs_trans, Error **errp) +{ + uint64_t nb_sectors; + + s->blk = blk; + s->drive_kind = kind; + + blk_get_geometry(blk, &nb_sectors); + s->cylinders = cylinders; + s->heads = heads; + s->sectors = secs; + s->chs_trans = chs_trans; + s->nb_sectors = nb_sectors; + s->wwn = wwn; + /* The SMART values should be preserved across power cycles + but they aren't. */ + s->smart_enabled = 1; + s->smart_autosave = 1; + s->smart_errors = 0; + s->smart_selftest_count = 0; + if (kind == IDE_CD) { + blk_set_dev_ops(blk, &ide_cd_block_ops, s); + blk_set_guest_block_size(blk, 2048); + } else { + if (!blk_is_inserted(s->blk)) { + error_setg(errp, "Device needs media, but drive is empty"); + return -1; + } + if (!blk_is_writable(blk)) { + error_setg(errp, "Can't use a read-only drive"); + return -1; + } + blk_set_dev_ops(blk, &ide_hd_block_ops, s); + } + if (serial) { + pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial); + } else { + snprintf(s->drive_serial_str, sizeof(s->drive_serial_str), + "QM%05d", s->drive_serial); + } + if (model) { + pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model); + } else { + switch (kind) { + case IDE_CD: + strcpy(s->drive_model_str, "QEMU DVD-ROM"); + break; + case IDE_CFATA: + strcpy(s->drive_model_str, "QEMU MICRODRIVE"); + break; + default: + strcpy(s->drive_model_str, "QEMU HARDDISK"); + break; + } + } + + if (version) { + pstrcpy(s->version, sizeof(s->version), version); + } else { + pstrcpy(s->version, sizeof(s->version), qemu_hw_version()); + } + + ide_reset(s); + blk_iostatus_enable(blk); + return 0; +} + +static void ide_init1(IDEBus *bus, int unit) +{ + static int drive_serial = 1; + IDEState *s = &bus->ifs[unit]; + + s->bus = bus; + s->unit = unit; + s->drive_serial = drive_serial++; + /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */ + s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4; + s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len); + memset(s->io_buffer, 0, s->io_buffer_total_len); + + s->smart_selftest_data = blk_blockalign(s->blk, 512); + memset(s->smart_selftest_data, 0, 512); + + s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + ide_sector_write_timer_cb, s); +} + +static int ide_nop_int(const IDEDMA *dma, bool is_write) +{ + return 0; +} + +static void ide_nop(const IDEDMA *dma) +{ +} + +static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l) +{ + return 0; +} + +static const IDEDMAOps ide_dma_nop_ops = { + .prepare_buf = ide_nop_int32, + .restart_dma = ide_nop, + .rw_buf = ide_nop_int, +}; + +static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd) +{ + s->unit = s->bus->retry_unit; + ide_set_sector(s, s->bus->retry_sector_num); + s->nsector = s->bus->retry_nsector; + s->bus->dma->ops->restart_dma(s->bus->dma); + s->io_buffer_size = 0; + s->dma_cmd = dma_cmd; + ide_start_dma(s, ide_dma_cb); +} + +static void ide_restart_bh(void *opaque) +{ + IDEBus *bus = opaque; + IDEState *s; + bool is_read; + int error_status; + + qemu_bh_delete(bus->bh); + bus->bh = NULL; + + error_status = bus->error_status; + if (bus->error_status == 0) { + return; + } + + s = idebus_active_if(bus); + is_read = (bus->error_status & IDE_RETRY_READ) != 0; + + /* The error status must be cleared before resubmitting the request: The + * request may fail again, and this case can only be distinguished if the + * called function can set a new error status. */ + bus->error_status = 0; + + /* The HBA has generically asked to be kicked on retry */ + if (error_status & IDE_RETRY_HBA) { + if (s->bus->dma->ops->restart) { + s->bus->dma->ops->restart(s->bus->dma); + } + } else if (IS_IDE_RETRY_DMA(error_status)) { + if (error_status & IDE_RETRY_TRIM) { + ide_restart_dma(s, IDE_DMA_TRIM); + } else { + ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE); + } + } else if (IS_IDE_RETRY_PIO(error_status)) { + if (is_read) { + ide_sector_read(s); + } else { + ide_sector_write(s); + } + } else if (error_status & IDE_RETRY_FLUSH) { + ide_flush_cache(s); + } else if (IS_IDE_RETRY_ATAPI(error_status)) { + assert(s->end_transfer_func == ide_atapi_cmd); + ide_atapi_dma_restart(s); + } else { + abort(); + } +} + +static void ide_restart_cb(void *opaque, bool running, RunState state) +{ + IDEBus *bus = opaque; + + if (!running) + return; + + if (!bus->bh) { + bus->bh = qemu_bh_new(ide_restart_bh, bus); + qemu_bh_schedule(bus->bh); + } +} + +void ide_register_restart_cb(IDEBus *bus) +{ + if (bus->dma->ops->restart_dma) { + bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus); + } +} + +static IDEDMA ide_dma_nop = { + .ops = &ide_dma_nop_ops, + .aiocb = NULL, +}; + +void ide_init2(IDEBus *bus, qemu_irq irq) +{ + int i; + + for(i = 0; i < 2; i++) { + ide_init1(bus, i); + ide_reset(&bus->ifs[i]); + } + bus->irq = irq; + bus->dma = &ide_dma_nop; +} + +void ide_exit(IDEState *s) +{ + timer_free(s->sector_write_timer); + qemu_vfree(s->smart_selftest_data); + qemu_vfree(s->io_buffer); +} + +static bool is_identify_set(void *opaque, int version_id) +{ + IDEState *s = opaque; + + return s->identify_set != 0; +} + +static EndTransferFunc* transfer_end_table[] = { + ide_sector_read, + ide_sector_write, + ide_transfer_stop, + ide_atapi_cmd_reply_end, + ide_atapi_cmd, + ide_dummy_transfer_stop, +}; + +static int transfer_end_table_idx(EndTransferFunc *fn) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++) + if (transfer_end_table[i] == fn) + return i; + + return -1; +} + +static int ide_drive_post_load(void *opaque, int version_id) +{ + IDEState *s = opaque; + + if (s->blk && s->identify_set) { + blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5))); + } + return 0; +} + +static int ide_drive_pio_post_load(void *opaque, int version_id) +{ + IDEState *s = opaque; + + if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) { + return -EINVAL; + } + s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx]; + s->data_ptr = s->io_buffer + s->cur_io_buffer_offset; + s->data_end = s->data_ptr + s->cur_io_buffer_len; + s->atapi_dma = s->feature & 1; /* as per cmd_packet */ + + return 0; +} + +static int ide_drive_pio_pre_save(void *opaque) +{ + IDEState *s = opaque; + int idx; + + s->cur_io_buffer_offset = s->data_ptr - s->io_buffer; + s->cur_io_buffer_len = s->data_end - s->data_ptr; + + idx = transfer_end_table_idx(s->end_transfer_func); + if (idx == -1) { + fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n", + __func__); + s->end_transfer_fn_idx = 2; + } else { + s->end_transfer_fn_idx = idx; + } + + return 0; +} + +static bool ide_drive_pio_state_needed(void *opaque) +{ + IDEState *s = opaque; + + return ((s->status & DRQ_STAT) != 0) + || (s->bus->error_status & IDE_RETRY_PIO); +} + +static bool ide_tray_state_needed(void *opaque) +{ + IDEState *s = opaque; + + return s->tray_open || s->tray_locked; +} + +static bool ide_atapi_gesn_needed(void *opaque) +{ + IDEState *s = opaque; + + return s->events.new_media || s->events.eject_request; +} + +static bool ide_error_needed(void *opaque) +{ + IDEBus *bus = opaque; + + return (bus->error_status != 0); +} + +/* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */ +static const VMStateDescription vmstate_ide_atapi_gesn_state = { + .name ="ide_drive/atapi/gesn_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = ide_atapi_gesn_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(events.new_media, IDEState), + VMSTATE_BOOL(events.eject_request, IDEState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ide_tray_state = { + .name = "ide_drive/tray_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = ide_tray_state_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(tray_open, IDEState), + VMSTATE_BOOL(tray_locked, IDEState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ide_drive_pio_state = { + .name = "ide_drive/pio_state", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = ide_drive_pio_pre_save, + .post_load = ide_drive_pio_post_load, + .needed = ide_drive_pio_state_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(req_nb_sectors, IDEState), + VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1, + vmstate_info_uint8, uint8_t), + VMSTATE_INT32(cur_io_buffer_offset, IDEState), + VMSTATE_INT32(cur_io_buffer_len, IDEState), + VMSTATE_UINT8(end_transfer_fn_idx, IDEState), + VMSTATE_INT32(elementary_transfer_size, IDEState), + VMSTATE_INT32(packet_transfer_size, IDEState), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_ide_drive = { + .name = "ide_drive", + .version_id = 3, + .minimum_version_id = 0, + .post_load = ide_drive_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT32(mult_sectors, IDEState), + VMSTATE_INT32(identify_set, IDEState), + VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set), + VMSTATE_UINT8(feature, IDEState), + VMSTATE_UINT8(error, IDEState), + VMSTATE_UINT32(nsector, IDEState), + VMSTATE_UINT8(sector, IDEState), + VMSTATE_UINT8(lcyl, IDEState), + VMSTATE_UINT8(hcyl, IDEState), + VMSTATE_UINT8(hob_feature, IDEState), + VMSTATE_UINT8(hob_sector, IDEState), + VMSTATE_UINT8(hob_nsector, IDEState), + VMSTATE_UINT8(hob_lcyl, IDEState), + VMSTATE_UINT8(hob_hcyl, IDEState), + VMSTATE_UINT8(select, IDEState), + VMSTATE_UINT8(status, IDEState), + VMSTATE_UINT8(lba48, IDEState), + VMSTATE_UINT8(sense_key, IDEState), + VMSTATE_UINT8(asc, IDEState), + VMSTATE_UINT8_V(cdrom_changed, IDEState, 3), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_ide_drive_pio_state, + &vmstate_ide_tray_state, + &vmstate_ide_atapi_gesn_state, + NULL + } +}; + +static const VMStateDescription vmstate_ide_error_status = { + .name ="ide_bus/error", + .version_id = 2, + .minimum_version_id = 1, + .needed = ide_error_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(error_status, IDEBus), + VMSTATE_INT64_V(retry_sector_num, IDEBus, 2), + VMSTATE_UINT32_V(retry_nsector, IDEBus, 2), + VMSTATE_UINT8_V(retry_unit, IDEBus, 2), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_ide_bus = { + .name = "ide_bus", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(cmd, IDEBus), + VMSTATE_UINT8(unit, IDEBus), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_ide_error_status, + NULL + } +}; + +void ide_drive_get(DriveInfo **hd, int n) +{ + int i; + + for (i = 0; i < n; i++) { + hd[i] = drive_get_by_index(IF_IDE, i); + } +} diff --git a/hw/ide/ich.c b/hw/ide/ich.c new file mode 100644 index 000000000..1007a51fc --- /dev/null +++ b/hw/ide/ich.c @@ -0,0 +1,197 @@ +/* + * QEMU ICH Emulation + * + * Copyright (c) 2010 Sebastian Herbszt + * Copyright (c) 2010 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + * + * lspci dump of a ICH-9 real device + * + * 00:1f.2 SATA controller [0106]: Intel Corporation 82801IR/IO/IH (ICH9R/DO/DH) 6 port SATA AHCI Controller [8086:2922] (rev 02) (prog-if 01 [AHCI 1.0]) + * Subsystem: Intel Corporation 82801IR/IO/IH (ICH9R/DO/DH) 6 port SATA AHCI Controller [8086:2922] + * Control: I/O+ Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B- DisINTx+ + * Status: Cap+ 66MHz+ UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- SERR- + * Capabilities: [b0] Vendor Specific Information + * Kernel driver in use: ahci + * Kernel modules: ahci + * 00: 86 80 22 29 07 04 b0 02 02 01 06 01 00 00 00 00 + * 10: 01 d0 00 00 01 cc 00 00 81 c8 00 00 01 c8 00 00 + * 20: 81 c4 00 00 00 90 bf fe 00 00 00 00 86 80 22 29 + * 30: 00 00 00 00 80 00 00 00 00 00 00 00 0f 02 00 00 + * 40: 00 80 00 80 00 00 00 00 00 00 00 00 00 00 00 00 + * 50: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * 60: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * 70: 01 a8 03 40 08 00 00 00 00 00 00 00 00 00 00 00 + * 80: 05 70 09 00 0c f0 e0 fe d9 41 00 00 00 00 00 00 + * 90: 40 00 0f 82 93 01 00 00 00 00 00 00 00 00 00 00 + * a0: ac 00 00 00 0a 00 12 00 12 b0 10 00 48 00 00 00 + * b0: 09 00 06 20 00 00 00 00 00 00 00 00 00 00 00 00 + * c0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * d0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * f0: 00 00 00 00 00 00 00 00 86 0f 02 00 00 00 00 00 + * + */ + +#include "qemu/osdep.h" +#include "hw/pci/msi.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "hw/isa/isa.h" +#include "sysemu/dma.h" +#include "hw/ide/pci.h" +#include "ahci_internal.h" + +#define ICH9_MSI_CAP_OFFSET 0x80 +#define ICH9_SATA_CAP_OFFSET 0xA8 + +#define ICH9_IDP_BAR 4 +#define ICH9_MEM_BAR 5 + +#define ICH9_IDP_INDEX 0x10 +#define ICH9_IDP_INDEX_LOG2 0x04 + +static const VMStateDescription vmstate_ich9_ahci = { + .name = "ich9_ahci", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, AHCIPCIState), + VMSTATE_AHCI(ahci, AHCIPCIState), + VMSTATE_END_OF_LIST() + }, +}; + +static void pci_ich9_reset(DeviceState *dev) +{ + AHCIPCIState *d = ICH9_AHCI(dev); + + ahci_reset(&d->ahci); +} + +static void pci_ich9_ahci_init(Object *obj) +{ + struct AHCIPCIState *d = ICH9_AHCI(obj); + + ahci_init(&d->ahci, DEVICE(obj)); +} + +static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp) +{ + struct AHCIPCIState *d; + int sata_cap_offset; + uint8_t *sata_cap; + d = ICH9_AHCI(dev); + int ret; + + ahci_realize(&d->ahci, DEVICE(dev), pci_get_address_space(dev), 6); + + pci_config_set_prog_interface(dev->config, AHCI_PROGMODE_MAJOR_REV_1); + + dev->config[PCI_CACHE_LINE_SIZE] = 0x08; /* Cache line size */ + dev->config[PCI_LATENCY_TIMER] = 0x00; /* Latency timer */ + pci_config_set_interrupt_pin(dev->config, 1); + + /* XXX Software should program this register */ + dev->config[0x90] = 1 << 6; /* Address Map Register - AHCI mode */ + + d->ahci.irq = pci_allocate_irq(dev); + + pci_register_bar(dev, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO, + &d->ahci.idp); + pci_register_bar(dev, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY, + &d->ahci.mem); + + sata_cap_offset = pci_add_capability(dev, PCI_CAP_ID_SATA, + ICH9_SATA_CAP_OFFSET, SATA_CAP_SIZE, + errp); + if (sata_cap_offset < 0) { + return; + } + + sata_cap = dev->config + sata_cap_offset; + pci_set_word(sata_cap + SATA_CAP_REV, 0x10); + pci_set_long(sata_cap + SATA_CAP_BAR, + (ICH9_IDP_BAR + 0x4) | (ICH9_IDP_INDEX_LOG2 << 4)); + d->ahci.idp_offset = ICH9_IDP_INDEX; + + /* Although the AHCI 1.3 specification states that the first capability + * should be PMCAP, the Intel ICH9 data sheet specifies that the ICH9 + * AHCI device puts the MSI capability first, pointing to 0x80. */ + ret = msi_init(dev, ICH9_MSI_CAP_OFFSET, 1, true, false, NULL); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error. Fall back to INTx silently on -ENOTSUP */ + assert(!ret || ret == -ENOTSUP); +} + +static void pci_ich9_uninit(PCIDevice *dev) +{ + struct AHCIPCIState *d; + d = ICH9_AHCI(dev); + + msi_uninit(dev); + ahci_uninit(&d->ahci); + qemu_free_irq(d->ahci.irq); +} + +static void ich_ahci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_ich9_ahci_realize; + k->exit = pci_ich9_uninit; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82801IR; + k->revision = 0x02; + k->class_id = PCI_CLASS_STORAGE_SATA; + dc->vmsd = &vmstate_ich9_ahci; + dc->reset = pci_ich9_reset; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo ich_ahci_info = { + .name = TYPE_ICH9_AHCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(AHCIPCIState), + .instance_init = pci_ich9_ahci_init, + .class_init = ich_ahci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void ich_ahci_register_types(void) +{ + type_register_static(&ich_ahci_info); +} + +type_init(ich_ahci_register_types) diff --git a/hw/ide/ioport.c b/hw/ide/ioport.c new file mode 100644 index 000000000..e6caa537f --- /dev/null +++ b/hw/ide/ioport.c @@ -0,0 +1,68 @@ +/* + * QEMU IDE disk and CD/DVD-ROM Emulator + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "sysemu/blockdev.h" +#include "sysemu/dma.h" +#include "hw/block/block.h" +#include "sysemu/block-backend.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "sysemu/replay.h" + +#include "hw/ide/internal.h" +#include "trace.h" + +static const MemoryRegionPortio ide_portio_list[] = { + { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write }, + { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew }, + { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel }, + PORTIO_END_OF_LIST(), +}; + +static const MemoryRegionPortio ide_portio2_list[] = { + { 0, 1, 1, .read = ide_status_read, .write = ide_ctrl_write }, + PORTIO_END_OF_LIST(), +}; + +int ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2) +{ + int ret; + + /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA + bridge has been setup properly to always register with ISA. */ + ret = isa_register_portio_list(dev, &bus->portio_list, + iobase, ide_portio_list, bus, "ide"); + + if (ret == 0 && iobase2) { + ret = isa_register_portio_list(dev, &bus->portio2_list, + iobase2, ide_portio2_list, bus, "ide"); + } + + return ret; +} diff --git a/hw/ide/isa.c b/hw/ide/isa.c new file mode 100644 index 000000000..24bbde24c --- /dev/null +++ b/hw/ide/isa.c @@ -0,0 +1,138 @@ +/* + * QEMU IDE Emulation: ISA Bus support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "sysemu/dma.h" + +#include "hw/ide/internal.h" +#include "qom/object.h" + +/***********************************************************/ +/* ISA IDE definitions */ + +#define TYPE_ISA_IDE "isa-ide" +OBJECT_DECLARE_SIMPLE_TYPE(ISAIDEState, ISA_IDE) + +struct ISAIDEState { + ISADevice parent_obj; + + IDEBus bus; + uint32_t iobase; + uint32_t iobase2; + uint32_t isairq; + qemu_irq irq; +}; + +static void isa_ide_reset(DeviceState *d) +{ + ISAIDEState *s = ISA_IDE(d); + + ide_bus_reset(&s->bus); +} + +static const VMStateDescription vmstate_ide_isa = { + .name = "isa-ide", + .version_id = 3, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_IDE_BUS(bus, ISAIDEState), + VMSTATE_IDE_DRIVES(bus.ifs, ISAIDEState), + VMSTATE_END_OF_LIST() + } +}; + +static void isa_ide_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + ISAIDEState *s = ISA_IDE(dev); + + ide_bus_init(&s->bus, sizeof(s->bus), dev, 0, 2); + ide_init_ioport(&s->bus, isadev, s->iobase, s->iobase2); + isa_init_irq(isadev, &s->irq, s->isairq); + ide_init2(&s->bus, s->irq); + vmstate_register(VMSTATE_IF(dev), 0, &vmstate_ide_isa, s); + ide_register_restart_cb(&s->bus); +} + +ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq, + DriveInfo *hd0, DriveInfo *hd1) +{ + DeviceState *dev; + ISADevice *isadev; + ISAIDEState *s; + + isadev = isa_new(TYPE_ISA_IDE); + dev = DEVICE(isadev); + qdev_prop_set_uint32(dev, "iobase", iobase); + qdev_prop_set_uint32(dev, "iobase2", iobase2); + qdev_prop_set_uint32(dev, "irq", isairq); + isa_realize_and_unref(isadev, bus, &error_fatal); + + s = ISA_IDE(dev); + if (hd0) { + ide_create_drive(&s->bus, 0, hd0); + } + if (hd1) { + ide_create_drive(&s->bus, 1, hd1); + } + return isadev; +} + +static Property isa_ide_properties[] = { + DEFINE_PROP_UINT32("iobase", ISAIDEState, iobase, 0x1f0), + DEFINE_PROP_UINT32("iobase2", ISAIDEState, iobase2, 0x3f6), + DEFINE_PROP_UINT32("irq", ISAIDEState, isairq, 14), + DEFINE_PROP_END_OF_LIST(), +}; + +static void isa_ide_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = isa_ide_realizefn; + dc->fw_name = "ide"; + dc->reset = isa_ide_reset; + device_class_set_props(dc, isa_ide_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo isa_ide_info = { + .name = TYPE_ISA_IDE, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISAIDEState), + .class_init = isa_ide_class_initfn, +}; + +static void isa_ide_register_types(void) +{ + type_register_static(&isa_ide_info); +} + +type_init(isa_ide_register_types) diff --git a/hw/ide/macio.c b/hw/ide/macio.c new file mode 100644 index 000000000..b03d401ce --- /dev/null +++ b/hw/ide/macio.c @@ -0,0 +1,513 @@ +/* + * QEMU IDE Emulation: MacIO support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/ppc/mac.h" +#include "hw/ppc/mac_dbdma.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "hw/misc/macio/macio.h" +#include "sysemu/block-backend.h" +#include "sysemu/dma.h" + +#include "hw/ide/internal.h" + +/* debug MACIO */ +// #define DEBUG_MACIO + +#ifdef DEBUG_MACIO +static const int debug_macio = 1; +#else +static const int debug_macio = 0; +#endif + +#define MACIO_DPRINTF(fmt, ...) do { \ + if (debug_macio) { \ + printf(fmt , ## __VA_ARGS__); \ + } \ + } while (0) + + +/***********************************************************/ +/* MacIO based PowerPC IDE */ + +#define MACIO_PAGE_SIZE 4096 + +static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) +{ + DBDMA_io *io = opaque; + MACIOIDEState *m = io->opaque; + IDEState *s = idebus_active_if(&m->bus); + int64_t offset; + + MACIO_DPRINTF("pmac_ide_atapi_transfer_cb\n"); + + if (ret < 0) { + MACIO_DPRINTF("DMA error: %d\n", ret); + qemu_sglist_destroy(&s->sg); + ide_atapi_io_error(s, ret); + goto done; + } + + if (!m->dma_active) { + MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n", + s->nsector, io->len, s->status); + /* data not ready yet, wait for the channel to get restarted */ + io->processing = false; + return; + } + + if (s->io_buffer_size <= 0) { + MACIO_DPRINTF("End of IDE transfer\n"); + qemu_sglist_destroy(&s->sg); + ide_atapi_cmd_ok(s); + m->dma_active = false; + goto done; + } + + if (io->len == 0) { + MACIO_DPRINTF("End of DMA transfer\n"); + goto done; + } + + if (s->lba == -1) { + /* Non-block ATAPI transfer - just copy to RAM */ + s->io_buffer_size = MIN(s->io_buffer_size, io->len); + dma_memory_write(&address_space_memory, io->addr, s->io_buffer, + s->io_buffer_size); + io->len = 0; + ide_atapi_cmd_ok(s); + m->dma_active = false; + goto done; + } + + /* Calculate current offset */ + offset = ((int64_t)s->lba << 11) + s->io_buffer_index; + + qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1, + &address_space_memory); + qemu_sglist_add(&s->sg, io->addr, io->len); + s->io_buffer_size -= io->len; + s->io_buffer_index += io->len; + io->len = 0; + + s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset, 0x1, + pmac_ide_atapi_transfer_cb, io); + return; + +done: + dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len, + io->dir, io->dma_len); + + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + } else { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } + + ide_set_inactive(s, false); + io->dma_end(opaque); +} + +static void pmac_ide_transfer_cb(void *opaque, int ret) +{ + DBDMA_io *io = opaque; + MACIOIDEState *m = io->opaque; + IDEState *s = idebus_active_if(&m->bus); + int64_t offset; + + MACIO_DPRINTF("pmac_ide_transfer_cb\n"); + + if (ret < 0) { + MACIO_DPRINTF("DMA error: %d\n", ret); + qemu_sglist_destroy(&s->sg); + ide_dma_error(s); + goto done; + } + + if (!m->dma_active) { + MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n", + s->nsector, io->len, s->status); + /* data not ready yet, wait for the channel to get restarted */ + io->processing = false; + return; + } + + if (s->io_buffer_size <= 0) { + MACIO_DPRINTF("End of IDE transfer\n"); + qemu_sglist_destroy(&s->sg); + s->status = READY_STAT | SEEK_STAT; + ide_set_irq(s->bus); + m->dma_active = false; + goto done; + } + + if (io->len == 0) { + MACIO_DPRINTF("End of DMA transfer\n"); + goto done; + } + + /* Calculate number of sectors */ + offset = (ide_get_sector(s) << 9) + s->io_buffer_index; + + qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1, + &address_space_memory); + qemu_sglist_add(&s->sg, io->addr, io->len); + s->io_buffer_size -= io->len; + s->io_buffer_index += io->len; + io->len = 0; + + switch (s->dma_cmd) { + case IDE_DMA_READ: + s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset, 0x1, + pmac_ide_atapi_transfer_cb, io); + break; + case IDE_DMA_WRITE: + s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset, 0x1, + pmac_ide_transfer_cb, io); + break; + case IDE_DMA_TRIM: + s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), &s->sg, + offset, 0x1, ide_issue_trim, s, + pmac_ide_transfer_cb, io, + DMA_DIRECTION_TO_DEVICE); + break; + default: + abort(); + } + + return; + +done: + dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len, + io->dir, io->dma_len); + + if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + } else { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } + } + + ide_set_inactive(s, false); + io->dma_end(opaque); +} + +static void pmac_ide_transfer(DBDMA_io *io) +{ + MACIOIDEState *m = io->opaque; + IDEState *s = idebus_active_if(&m->bus); + + MACIO_DPRINTF("\n"); + + if (s->drive_kind == IDE_CD) { + block_acct_start(blk_get_stats(s->blk), &s->acct, io->len, + BLOCK_ACCT_READ); + + pmac_ide_atapi_transfer_cb(io, 0); + return; + } + + switch (s->dma_cmd) { + case IDE_DMA_READ: + block_acct_start(blk_get_stats(s->blk), &s->acct, io->len, + BLOCK_ACCT_READ); + break; + case IDE_DMA_WRITE: + block_acct_start(blk_get_stats(s->blk), &s->acct, io->len, + BLOCK_ACCT_WRITE); + break; + default: + break; + } + + pmac_ide_transfer_cb(io, 0); +} + +static void pmac_ide_flush(DBDMA_io *io) +{ + MACIOIDEState *m = io->opaque; + IDEState *s = idebus_active_if(&m->bus); + + if (s->bus->dma->aiocb) { + blk_drain(s->blk); + } +} + +/* PowerMac IDE memory IO */ +static uint64_t pmac_ide_read(void *opaque, hwaddr addr, unsigned size) +{ + MACIOIDEState *d = opaque; + uint64_t retval = 0xffffffff; + int reg = addr >> 4; + + switch (reg) { + case 0x0: + if (size == 2) { + retval = ide_data_readw(&d->bus, 0); + } else if (size == 4) { + retval = ide_data_readl(&d->bus, 0); + } + break; + case 0x1 ... 0x7: + if (size == 1) { + retval = ide_ioport_read(&d->bus, reg); + } + break; + case 0x8: + case 0x16: + if (size == 1) { + retval = ide_status_read(&d->bus, 0); + } + break; + case 0x20: + if (size == 4) { + retval = d->timing_reg; + } + break; + case 0x30: + /* This is an interrupt state register that only exists + * in the KeyLargo and later variants. Bit 0x8000_0000 + * latches the DMA interrupt and has to be written to + * clear. Bit 0x4000_0000 is an image of the disk + * interrupt. MacOS X relies on this and will hang if + * we don't provide at least the disk interrupt + */ + if (size == 4) { + retval = d->irq_reg; + } + break; + } + + return retval; +} + + +static void pmac_ide_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + MACIOIDEState *d = opaque; + int reg = addr >> 4; + + switch (reg) { + case 0x0: + if (size == 2) { + ide_data_writew(&d->bus, 0, val); + } else if (size == 4) { + ide_data_writel(&d->bus, 0, val); + } + break; + case 0x1 ... 0x7: + if (size == 1) { + ide_ioport_write(&d->bus, reg, val); + } + break; + case 0x8: + case 0x16: + if (size == 1) { + ide_ctrl_write(&d->bus, 0, val); + } + break; + case 0x20: + if (size == 4) { + d->timing_reg = val; + } + break; + case 0x30: + if (size == 4) { + if (val & 0x80000000u) { + d->irq_reg &= 0x7fffffff; + } + } + break; + } +} + +static const MemoryRegionOps pmac_ide_ops = { + .read = pmac_ide_read, + .write = pmac_ide_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const VMStateDescription vmstate_pmac = { + .name = "ide", + .version_id = 5, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_IDE_BUS(bus, MACIOIDEState), + VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState), + VMSTATE_BOOL(dma_active, MACIOIDEState), + VMSTATE_UINT32(timing_reg, MACIOIDEState), + VMSTATE_UINT32(irq_reg, MACIOIDEState), + VMSTATE_END_OF_LIST() + } +}; + +static void macio_ide_reset(DeviceState *dev) +{ + MACIOIDEState *d = MACIO_IDE(dev); + + ide_bus_reset(&d->bus); +} + +static int ide_nop_int(const IDEDMA *dma, bool is_write) +{ + return 0; +} + +static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l) +{ + return 0; +} + +static void ide_dbdma_start(const IDEDMA *dma, IDEState *s, + BlockCompletionFunc *cb) +{ + MACIOIDEState *m = container_of(dma, MACIOIDEState, dma); + + s->io_buffer_index = 0; + if (s->drive_kind == IDE_CD) { + s->io_buffer_size = s->packet_transfer_size; + } else { + s->io_buffer_size = s->nsector * BDRV_SECTOR_SIZE; + } + + MACIO_DPRINTF("\n\n------------ IDE transfer\n"); + MACIO_DPRINTF("buffer_size: %x buffer_index: %x\n", + s->io_buffer_size, s->io_buffer_index); + MACIO_DPRINTF("lba: %x size: %x\n", s->lba, s->io_buffer_size); + MACIO_DPRINTF("-------------------------\n"); + + m->dma_active = true; + DBDMA_kick(m->dbdma); +} + +static const IDEDMAOps dbdma_ops = { + .start_dma = ide_dbdma_start, + .prepare_buf = ide_nop_int32, + .rw_buf = ide_nop_int, +}; + +static void macio_ide_realizefn(DeviceState *dev, Error **errp) +{ + MACIOIDEState *s = MACIO_IDE(dev); + + ide_init2(&s->bus, s->ide_irq); + + /* Register DMA callbacks */ + s->dma.ops = &dbdma_ops; + s->bus.dma = &s->dma; +} + +static void pmac_ide_irq(void *opaque, int n, int level) +{ + MACIOIDEState *s = opaque; + uint32_t mask = 0x80000000u >> n; + + /* We need to reflect the IRQ state in the irq register */ + if (level) { + s->irq_reg |= mask; + } else { + s->irq_reg &= ~mask; + } + + if (n) { + qemu_set_irq(s->real_ide_irq, level); + } else { + qemu_set_irq(s->real_dma_irq, level); + } +} + +static void macio_ide_initfn(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + MACIOIDEState *s = MACIO_IDE(obj); + + ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); + memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000); + sysbus_init_mmio(d, &s->mem); + sysbus_init_irq(d, &s->real_ide_irq); + sysbus_init_irq(d, &s->real_dma_irq); + s->dma_irq = qemu_allocate_irq(pmac_ide_irq, s, 0); + s->ide_irq = qemu_allocate_irq(pmac_ide_irq, s, 1); + + object_property_add_link(obj, "dbdma", TYPE_MAC_DBDMA, + (Object **) &s->dbdma, + qdev_prop_allow_set_link_before_realize, 0); +} + +static Property macio_ide_properties[] = { + DEFINE_PROP_UINT32("channel", MACIOIDEState, channel, 0), + DEFINE_PROP_UINT32("addr", MACIOIDEState, addr, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void macio_ide_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = macio_ide_realizefn; + dc->reset = macio_ide_reset; + device_class_set_props(dc, macio_ide_properties); + dc->vmsd = &vmstate_pmac; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo macio_ide_type_info = { + .name = TYPE_MACIO_IDE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MACIOIDEState), + .instance_init = macio_ide_initfn, + .class_init = macio_ide_class_init, +}; + +static void macio_ide_register_types(void) +{ + type_register_static(&macio_ide_type_info); +} + +/* hd_table must contain 2 block drivers */ +void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table) +{ + int i; + + for (i = 0; i < 2; i++) { + if (hd_table[i]) { + ide_create_drive(&s->bus, i, hd_table[i]); + } + } +} + +void macio_ide_register_dma(MACIOIDEState *s) +{ + DBDMA_register_channel(s->dbdma, s->channel, s->dma_irq, + pmac_ide_transfer, pmac_ide_flush, s); +} + +type_init(macio_ide_register_types) diff --git a/hw/ide/meson.build b/hw/ide/meson.build new file mode 100644 index 000000000..ddcb3b28d --- /dev/null +++ b/hw/ide/meson.build @@ -0,0 +1,14 @@ +softmmu_ss.add(when: 'CONFIG_AHCI', if_true: files('ahci.c')) +softmmu_ss.add(when: 'CONFIG_AHCI_ICH9', if_true: files('ich.c')) +softmmu_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('ahci-allwinner.c')) +softmmu_ss.add(when: 'CONFIG_IDE_CMD646', if_true: files('cmd646.c')) +softmmu_ss.add(when: 'CONFIG_IDE_CORE', if_true: files('core.c', 'atapi.c')) +softmmu_ss.add(when: 'CONFIG_IDE_ISA', if_true: files('isa.c', 'ioport.c')) +softmmu_ss.add(when: 'CONFIG_IDE_MACIO', if_true: files('macio.c')) +softmmu_ss.add(when: 'CONFIG_IDE_MMIO', if_true: files('mmio.c')) +softmmu_ss.add(when: 'CONFIG_IDE_PCI', if_true: files('pci.c')) +softmmu_ss.add(when: 'CONFIG_IDE_PIIX', if_true: files('piix.c', 'ioport.c')) +softmmu_ss.add(when: 'CONFIG_IDE_QDEV', if_true: files('qdev.c')) +softmmu_ss.add(when: 'CONFIG_IDE_SII3112', if_true: files('sii3112.c')) +softmmu_ss.add(when: 'CONFIG_IDE_VIA', if_true: files('via.c')) +softmmu_ss.add(when: 'CONFIG_MICRODRIVE', if_true: files('microdrive.c')) diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c new file mode 100644 index 000000000..6df9b4cbb --- /dev/null +++ b/hw/ide/microdrive.c @@ -0,0 +1,643 @@ +/* + * QEMU IDE Emulation: microdrive (CF / PCMCIA) + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pcmcia.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "sysemu/dma.h" + +#include "hw/ide/internal.h" +#include "qom/object.h" + +#define TYPE_MICRODRIVE "microdrive" +OBJECT_DECLARE_SIMPLE_TYPE(MicroDriveState, MICRODRIVE) + +/***********************************************************/ +/* CF-ATA Microdrive */ + +#define METADATA_SIZE 0x20 + +/* DSCM-1XXXX Microdrive hard disk with CF+ II / PCMCIA interface. */ + +struct MicroDriveState { + /*< private >*/ + PCMCIACardState parent_obj; + /*< public >*/ + + IDEBus bus; + uint32_t attr_base; + uint32_t io_base; + + /* Card state */ + uint8_t opt; + uint8_t stat; + uint8_t pins; + + uint8_t ctrl; + uint16_t io; + uint8_t cycle; +}; + +/* Register bitfields */ +enum md_opt { + OPT_MODE_MMAP = 0, + OPT_MODE_IOMAP16 = 1, + OPT_MODE_IOMAP1 = 2, + OPT_MODE_IOMAP2 = 3, + OPT_MODE = 0x3f, + OPT_LEVIREQ = 0x40, + OPT_SRESET = 0x80, +}; +enum md_cstat { + STAT_INT = 0x02, + STAT_PWRDWN = 0x04, + STAT_XE = 0x10, + STAT_IOIS8 = 0x20, + STAT_SIGCHG = 0x40, + STAT_CHANGED = 0x80, +}; +enum md_pins { + PINS_MRDY = 0x02, + PINS_CRDY = 0x20, +}; +enum md_ctrl { + CTRL_IEN = 0x02, + CTRL_SRST = 0x04, +}; + +static inline void md_interrupt_update(MicroDriveState *s) +{ + PCMCIACardState *card = PCMCIA_CARD(s); + + if (card->slot == NULL) { + return; + } + + qemu_set_irq(card->slot->irq, + !(s->stat & STAT_INT) && /* Inverted */ + !(s->ctrl & (CTRL_IEN | CTRL_SRST)) && + !(s->opt & OPT_SRESET)); +} + +static void md_set_irq(void *opaque, int irq, int level) +{ + MicroDriveState *s = opaque; + + if (level) { + s->stat |= STAT_INT; + } else { + s->stat &= ~STAT_INT; + } + + md_interrupt_update(s); +} + +static void md_reset(DeviceState *dev) +{ + MicroDriveState *s = MICRODRIVE(dev); + + s->opt = OPT_MODE_MMAP; + s->stat = 0; + s->pins = 0; + s->cycle = 0; + s->ctrl = 0; + ide_bus_reset(&s->bus); +} + +static uint8_t md_attr_read(PCMCIACardState *card, uint32_t at) +{ + MicroDriveState *s = MICRODRIVE(card); + PCMCIACardClass *pcc = PCMCIA_CARD_GET_CLASS(card); + + if (at < s->attr_base) { + if (at < pcc->cis_len) { + return pcc->cis[at]; + } else { + return 0x00; + } + } + + at -= s->attr_base; + + switch (at) { + case 0x00: /* Configuration Option Register */ + return s->opt; + case 0x02: /* Card Configuration Status Register */ + if (s->ctrl & CTRL_IEN) { + return s->stat & ~STAT_INT; + } else { + return s->stat; + } + case 0x04: /* Pin Replacement Register */ + return (s->pins & PINS_CRDY) | 0x0c; + case 0x06: /* Socket and Copy Register */ + return 0x00; +#ifdef VERBOSE + default: + printf("%s: Bad attribute space register %02x\n", __func__, at); +#endif + } + + return 0; +} + +static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value) +{ + MicroDriveState *s = MICRODRIVE(card); + + at -= s->attr_base; + + switch (at) { + case 0x00: /* Configuration Option Register */ + s->opt = value & 0xcf; + if (value & OPT_SRESET) { + device_legacy_reset(DEVICE(s)); + } + md_interrupt_update(s); + break; + case 0x02: /* Card Configuration Status Register */ + if ((s->stat ^ value) & STAT_PWRDWN) { + s->pins |= PINS_CRDY; + } + s->stat &= 0x82; + s->stat |= value & 0x74; + md_interrupt_update(s); + /* Word 170 in Identify Device must be equal to STAT_XE */ + break; + case 0x04: /* Pin Replacement Register */ + s->pins &= PINS_CRDY; + s->pins |= value & PINS_MRDY; + break; + case 0x06: /* Socket and Copy Register */ + break; + default: + printf("%s: Bad attribute space register %02x\n", __func__, at); + } +} + +static uint16_t md_common_read(PCMCIACardState *card, uint32_t at) +{ + MicroDriveState *s = MICRODRIVE(card); + IDEState *ifs; + uint16_t ret; + at -= s->io_base; + + switch (s->opt & OPT_MODE) { + case OPT_MODE_MMAP: + if ((at & ~0x3ff) == 0x400) { + at = 0; + } + break; + case OPT_MODE_IOMAP16: + at &= 0xf; + break; + case OPT_MODE_IOMAP1: + if ((at & ~0xf) == 0x3f0) { + at -= 0x3e8; + } else if ((at & ~0xf) == 0x1f0) { + at -= 0x1f0; + } + break; + case OPT_MODE_IOMAP2: + if ((at & ~0xf) == 0x370) { + at -= 0x368; + } else if ((at & ~0xf) == 0x170) { + at -= 0x170; + } + } + + switch (at) { + case 0x0: /* Even RD Data */ + case 0x8: + return ide_data_readw(&s->bus, 0); + + /* TODO: 8-bit accesses */ + if (s->cycle) { + ret = s->io >> 8; + } else { + s->io = ide_data_readw(&s->bus, 0); + ret = s->io & 0xff; + } + s->cycle = !s->cycle; + return ret; + case 0x9: /* Odd RD Data */ + return s->io >> 8; + case 0xd: /* Error */ + return ide_ioport_read(&s->bus, 0x1); + case 0xe: /* Alternate Status */ + ifs = idebus_active_if(&s->bus); + if (ifs->blk) { + return ifs->status; + } else { + return 0; + } + case 0xf: /* Device Address */ + ifs = idebus_active_if(&s->bus); + return 0xc2 | ((~ifs->select << 2) & 0x3c); + default: + return ide_ioport_read(&s->bus, at); + } + + return 0; +} + +static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value) +{ + MicroDriveState *s = MICRODRIVE(card); + at -= s->io_base; + + switch (s->opt & OPT_MODE) { + case OPT_MODE_MMAP: + if ((at & ~0x3ff) == 0x400) { + at = 0; + } + break; + case OPT_MODE_IOMAP16: + at &= 0xf; + break; + case OPT_MODE_IOMAP1: + if ((at & ~0xf) == 0x3f0) { + at -= 0x3e8; + } else if ((at & ~0xf) == 0x1f0) { + at -= 0x1f0; + } + break; + case OPT_MODE_IOMAP2: + if ((at & ~0xf) == 0x370) { + at -= 0x368; + } else if ((at & ~0xf) == 0x170) { + at -= 0x170; + } + } + + switch (at) { + case 0x0: /* Even WR Data */ + case 0x8: + ide_data_writew(&s->bus, 0, value); + break; + + /* TODO: 8-bit accesses */ + if (s->cycle) { + ide_data_writew(&s->bus, 0, s->io | (value << 8)); + } else { + s->io = value & 0xff; + } + s->cycle = !s->cycle; + break; + case 0x9: + s->io = value & 0xff; + s->cycle = !s->cycle; + break; + case 0xd: /* Features */ + ide_ioport_write(&s->bus, 0x1, value); + break; + case 0xe: /* Device Control */ + s->ctrl = value; + if (value & CTRL_SRST) { + device_legacy_reset(DEVICE(s)); + } + md_interrupt_update(s); + break; + default: + if (s->stat & STAT_PWRDWN) { + s->pins |= PINS_CRDY; + s->stat &= ~STAT_PWRDWN; + } + ide_ioport_write(&s->bus, at, value); + } +} + +static const VMStateDescription vmstate_microdrive = { + .name = "microdrive", + .version_id = 3, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(opt, MicroDriveState), + VMSTATE_UINT8(stat, MicroDriveState), + VMSTATE_UINT8(pins, MicroDriveState), + VMSTATE_UINT8(ctrl, MicroDriveState), + VMSTATE_UINT16(io, MicroDriveState), + VMSTATE_UINT8(cycle, MicroDriveState), + VMSTATE_IDE_BUS(bus, MicroDriveState), + VMSTATE_IDE_DRIVES(bus.ifs, MicroDriveState), + VMSTATE_END_OF_LIST() + } +}; + +static const uint8_t dscm1xxxx_cis[0x14a] = { + [0x000] = CISTPL_DEVICE, /* 5V Device Information */ + [0x002] = 0x03, /* Tuple length = 4 bytes */ + [0x004] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ + [0x006] = 0x01, /* Size = 2K bytes */ + [0x008] = CISTPL_ENDMARK, + + [0x00a] = CISTPL_DEVICE_OC, /* Additional Device Information */ + [0x00c] = 0x04, /* Tuple length = 4 byest */ + [0x00e] = 0x03, /* Conditions: Ext = 0, Vcc 3.3V, MWAIT = 1 */ + [0x010] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ + [0x012] = 0x01, /* Size = 2K bytes */ + [0x014] = CISTPL_ENDMARK, + + [0x016] = CISTPL_JEDEC_C, /* JEDEC ID */ + [0x018] = 0x02, /* Tuple length = 2 bytes */ + [0x01a] = 0xdf, /* PC Card ATA with no Vpp required */ + [0x01c] = 0x01, + + [0x01e] = CISTPL_MANFID, /* Manufacture ID */ + [0x020] = 0x04, /* Tuple length = 4 bytes */ + [0x022] = 0xa4, /* TPLMID_MANF = 00a4 (IBM) */ + [0x024] = 0x00, + [0x026] = 0x00, /* PLMID_CARD = 0000 */ + [0x028] = 0x00, + + [0x02a] = CISTPL_VERS_1, /* Level 1 Version */ + [0x02c] = 0x12, /* Tuple length = 23 bytes */ + [0x02e] = 0x04, /* Major Version = JEIDA 4.2 / PCMCIA 2.1 */ + [0x030] = 0x01, /* Minor Version = 1 */ + [0x032] = 'I', + [0x034] = 'B', + [0x036] = 'M', + [0x038] = 0x00, + [0x03a] = 'm', + [0x03c] = 'i', + [0x03e] = 'c', + [0x040] = 'r', + [0x042] = 'o', + [0x044] = 'd', + [0x046] = 'r', + [0x048] = 'i', + [0x04a] = 'v', + [0x04c] = 'e', + [0x04e] = 0x00, + [0x050] = CISTPL_ENDMARK, + + [0x052] = CISTPL_FUNCID, /* Function ID */ + [0x054] = 0x02, /* Tuple length = 2 bytes */ + [0x056] = 0x04, /* TPLFID_FUNCTION = Fixed Disk */ + [0x058] = 0x01, /* TPLFID_SYSINIT: POST = 1, ROM = 0 */ + + [0x05a] = CISTPL_FUNCE, /* Function Extension */ + [0x05c] = 0x02, /* Tuple length = 2 bytes */ + [0x05e] = 0x01, /* TPLFE_TYPE = Disk Device Interface */ + [0x060] = 0x01, /* TPLFE_DATA = PC Card ATA Interface */ + + [0x062] = CISTPL_FUNCE, /* Function Extension */ + [0x064] = 0x03, /* Tuple length = 3 bytes */ + [0x066] = 0x02, /* TPLFE_TYPE = Basic PC Card ATA Interface */ + [0x068] = 0x08, /* TPLFE_DATA: Rotating, Unique, Single */ + [0x06a] = 0x0f, /* TPLFE_DATA: Sleep, Standby, Idle, Auto */ + + [0x06c] = CISTPL_CONFIG, /* Configuration */ + [0x06e] = 0x05, /* Tuple length = 5 bytes */ + [0x070] = 0x01, /* TPCC_RASZ = 2 bytes, TPCC_RMSZ = 1 byte */ + [0x072] = 0x07, /* TPCC_LAST = 7 */ + [0x074] = 0x00, /* TPCC_RADR = 0200 */ + [0x076] = 0x02, + [0x078] = 0x0f, /* TPCC_RMSK = 200, 202, 204, 206 */ + + [0x07a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x07c] = 0x0b, /* Tuple length = 11 bytes */ + [0x07e] = 0xc0, /* TPCE_INDX = Memory Mode, Default, Iface */ + [0x080] = 0xc0, /* TPCE_IF = Memory, no BVDs, no WP, READY */ + [0x082] = 0xa1, /* TPCE_FS = Vcc only, no I/O, Memory, Misc */ + [0x084] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x086] = 0x55, /* NomV: 5.0 V */ + [0x088] = 0x4d, /* MinV: 4.5 V */ + [0x08a] = 0x5d, /* MaxV: 5.5 V */ + [0x08c] = 0x4e, /* Peakl: 450 mA */ + [0x08e] = 0x08, /* TPCE_MS = 1 window, 1 byte, Host address */ + [0x090] = 0x00, /* Window descriptor: Window length = 0 */ + [0x092] = 0x20, /* TPCE_MI: support power down mode, RW */ + + [0x094] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x096] = 0x06, /* Tuple length = 6 bytes */ + [0x098] = 0x00, /* TPCE_INDX = Memory Mode, no Default */ + [0x09a] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x09c] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x09e] = 0xb5, /* NomV: 3.3 V */ + [0x0a0] = 0x1e, + [0x0a2] = 0x3e, /* Peakl: 350 mA */ + + [0x0a4] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0a6] = 0x0d, /* Tuple length = 13 bytes */ + [0x0a8] = 0xc1, /* TPCE_INDX = I/O and Memory Mode, Default */ + [0x0aa] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x0ac] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x0ae] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x0b0] = 0x55, /* NomV: 5.0 V */ + [0x0b2] = 0x4d, /* MinV: 4.5 V */ + [0x0b4] = 0x5d, /* MaxV: 5.5 V */ + [0x0b6] = 0x4e, /* Peakl: 450 mA */ + [0x0b8] = 0x64, /* TPCE_IO = 16-byte boundary, 16/8 accesses */ + [0x0ba] = 0xf0, /* TPCE_IR = MASK, Level, Pulse, Share */ + [0x0bc] = 0xff, /* IRQ0..IRQ7 supported */ + [0x0be] = 0xff, /* IRQ8..IRQ15 supported */ + [0x0c0] = 0x20, /* TPCE_MI = support power down mode */ + + [0x0c2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0c4] = 0x06, /* Tuple length = 6 bytes */ + [0x0c6] = 0x01, /* TPCE_INDX = I/O and Memory Mode */ + [0x0c8] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x0ca] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x0cc] = 0xb5, /* NomV: 3.3 V */ + [0x0ce] = 0x1e, + [0x0d0] = 0x3e, /* Peakl: 350 mA */ + + [0x0d2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0d4] = 0x12, /* Tuple length = 18 bytes */ + [0x0d6] = 0xc2, /* TPCE_INDX = I/O Primary Mode */ + [0x0d8] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x0da] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x0dc] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x0de] = 0x55, /* NomV: 5.0 V */ + [0x0e0] = 0x4d, /* MinV: 4.5 V */ + [0x0e2] = 0x5d, /* MaxV: 5.5 V */ + [0x0e4] = 0x4e, /* Peakl: 450 mA */ + [0x0e6] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ + [0x0e8] = 0x61, /* Range: 2 fields, 2 bytes addr, 1 byte len */ + [0x0ea] = 0xf0, /* Field 1 address = 0x01f0 */ + [0x0ec] = 0x01, + [0x0ee] = 0x07, /* Address block length = 8 */ + [0x0f0] = 0xf6, /* Field 2 address = 0x03f6 */ + [0x0f2] = 0x03, + [0x0f4] = 0x01, /* Address block length = 2 */ + [0x0f6] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ + [0x0f8] = 0x20, /* TPCE_MI = support power down mode */ + + [0x0fa] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0fc] = 0x06, /* Tuple length = 6 bytes */ + [0x0fe] = 0x02, /* TPCE_INDX = I/O Primary Mode, no Default */ + [0x100] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x102] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x104] = 0xb5, /* NomV: 3.3 V */ + [0x106] = 0x1e, + [0x108] = 0x3e, /* Peakl: 350 mA */ + + [0x10a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x10c] = 0x12, /* Tuple length = 18 bytes */ + [0x10e] = 0xc3, /* TPCE_INDX = I/O Secondary Mode, Default */ + [0x110] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x112] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x114] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x116] = 0x55, /* NomV: 5.0 V */ + [0x118] = 0x4d, /* MinV: 4.5 V */ + [0x11a] = 0x5d, /* MaxV: 5.5 V */ + [0x11c] = 0x4e, /* Peakl: 450 mA */ + [0x11e] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ + [0x120] = 0x61, /* Range: 2 fields, 2 byte addr, 1 byte len */ + [0x122] = 0x70, /* Field 1 address = 0x0170 */ + [0x124] = 0x01, + [0x126] = 0x07, /* Address block length = 8 */ + [0x128] = 0x76, /* Field 2 address = 0x0376 */ + [0x12a] = 0x03, + [0x12c] = 0x01, /* Address block length = 2 */ + [0x12e] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ + [0x130] = 0x20, /* TPCE_MI = support power down mode */ + + [0x132] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x134] = 0x06, /* Tuple length = 6 bytes */ + [0x136] = 0x03, /* TPCE_INDX = I/O Secondary Mode */ + [0x138] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x13a] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x13c] = 0xb5, /* NomV: 3.3 V */ + [0x13e] = 0x1e, + [0x140] = 0x3e, /* Peakl: 350 mA */ + + [0x142] = CISTPL_NO_LINK, /* No Link */ + [0x144] = 0x00, /* Tuple length = 0 bytes */ + + [0x146] = CISTPL_END, /* Tuple End */ +}; + +#define TYPE_DSCM1XXXX "dscm1xxxx" + +static int dscm1xxxx_attach(PCMCIACardState *card) +{ + MicroDriveState *md = MICRODRIVE(card); + PCMCIACardClass *pcc = PCMCIA_CARD_GET_CLASS(card); + + md->attr_base = pcc->cis[0x74] | (pcc->cis[0x76] << 8); + md->io_base = 0x0; + + device_legacy_reset(DEVICE(md)); + md_interrupt_update(md); + + return 0; +} + +static int dscm1xxxx_detach(PCMCIACardState *card) +{ + MicroDriveState *md = MICRODRIVE(card); + + device_legacy_reset(DEVICE(md)); + return 0; +} + +PCMCIACardState *dscm1xxxx_init(DriveInfo *dinfo) +{ + MicroDriveState *md; + + md = MICRODRIVE(object_new(TYPE_DSCM1XXXX)); + qdev_realize(DEVICE(md), NULL, &error_fatal); + + if (dinfo != NULL) { + ide_create_drive(&md->bus, 0, dinfo); + } + md->bus.ifs[0].drive_kind = IDE_CFATA; + md->bus.ifs[0].mdata_size = METADATA_SIZE; + md->bus.ifs[0].mdata_storage = g_malloc0(METADATA_SIZE); + + return PCMCIA_CARD(md); +} + +static void dscm1xxxx_class_init(ObjectClass *oc, void *data) +{ + PCMCIACardClass *pcc = PCMCIA_CARD_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + pcc->cis = dscm1xxxx_cis; + pcc->cis_len = sizeof(dscm1xxxx_cis); + + pcc->attach = dscm1xxxx_attach; + pcc->detach = dscm1xxxx_detach; + /* Reason: Needs to be wired-up in code, see dscm1xxxx_init() */ + dc->user_creatable = false; +} + +static const TypeInfo dscm1xxxx_type_info = { + .name = TYPE_DSCM1XXXX, + .parent = TYPE_MICRODRIVE, + .class_init = dscm1xxxx_class_init, +}; + +static void microdrive_realize(DeviceState *dev, Error **errp) +{ + MicroDriveState *md = MICRODRIVE(dev); + + ide_init2(&md->bus, qemu_allocate_irq(md_set_irq, md, 0)); +} + +static void microdrive_init(Object *obj) +{ + MicroDriveState *md = MICRODRIVE(obj); + + ide_bus_init(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1); +} + +static void microdrive_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCMCIACardClass *pcc = PCMCIA_CARD_CLASS(oc); + + pcc->attr_read = md_attr_read; + pcc->attr_write = md_attr_write; + pcc->common_read = md_common_read; + pcc->common_write = md_common_write; + pcc->io_read = md_common_read; + pcc->io_write = md_common_write; + + dc->realize = microdrive_realize; + dc->reset = md_reset; + dc->vmsd = &vmstate_microdrive; +} + +static const TypeInfo microdrive_type_info = { + .name = TYPE_MICRODRIVE, + .parent = TYPE_PCMCIA_CARD, + .instance_size = sizeof(MicroDriveState), + .instance_init = microdrive_init, + .abstract = true, + .class_init = microdrive_class_init, +}; + +static void microdrive_register_types(void) +{ + type_register_static(µdrive_type_info); + type_register_static(&dscm1xxxx_type_info); +} + +type_init(microdrive_register_types) diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c new file mode 100644 index 000000000..fb2ebd484 --- /dev/null +++ b/hw/ide/mmio.c @@ -0,0 +1,189 @@ +/* + * QEMU IDE Emulation: mmio support (for embedded). + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "sysemu/dma.h" + +#include "hw/ide/internal.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +/***********************************************************/ +/* MMIO based ide port + * This emulates IDE device connected directly to the CPU bus without + * dedicated ide controller, which is often seen on embedded boards. + */ + +#define TYPE_MMIO_IDE "mmio-ide" +typedef struct MMIOIDEState MMIOState; +DECLARE_INSTANCE_CHECKER(MMIOState, MMIO_IDE, + TYPE_MMIO_IDE) + +struct MMIOIDEState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + IDEBus bus; + + uint32_t shift; + qemu_irq irq; + MemoryRegion iomem1, iomem2; +}; + +static void mmio_ide_reset(DeviceState *dev) +{ + MMIOState *s = MMIO_IDE(dev); + + ide_bus_reset(&s->bus); +} + +static uint64_t mmio_ide_read(void *opaque, hwaddr addr, + unsigned size) +{ + MMIOState *s = opaque; + addr >>= s->shift; + if (addr & 7) + return ide_ioport_read(&s->bus, addr); + else + return ide_data_readw(&s->bus, 0); +} + +static void mmio_ide_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MMIOState *s = opaque; + addr >>= s->shift; + if (addr & 7) + ide_ioport_write(&s->bus, addr, val); + else + ide_data_writew(&s->bus, 0, val); +} + +static const MemoryRegionOps mmio_ide_ops = { + .read = mmio_ide_read, + .write = mmio_ide_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t mmio_ide_status_read(void *opaque, hwaddr addr, + unsigned size) +{ + MMIOState *s= opaque; + return ide_status_read(&s->bus, 0); +} + +static void mmio_ide_ctrl_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MMIOState *s = opaque; + ide_ctrl_write(&s->bus, 0, val); +} + +static const MemoryRegionOps mmio_ide_cs_ops = { + .read = mmio_ide_status_read, + .write = mmio_ide_ctrl_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const VMStateDescription vmstate_ide_mmio = { + .name = "mmio-ide", + .version_id = 3, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_IDE_BUS(bus, MMIOState), + VMSTATE_IDE_DRIVES(bus.ifs, MMIOState), + VMSTATE_END_OF_LIST() + } +}; + +static void mmio_ide_realizefn(DeviceState *dev, Error **errp) +{ + SysBusDevice *d = SYS_BUS_DEVICE(dev); + MMIOState *s = MMIO_IDE(dev); + + ide_init2(&s->bus, s->irq); + + memory_region_init_io(&s->iomem1, OBJECT(s), &mmio_ide_ops, s, + "ide-mmio.1", 16 << s->shift); + memory_region_init_io(&s->iomem2, OBJECT(s), &mmio_ide_cs_ops, s, + "ide-mmio.2", 2 << s->shift); + sysbus_init_mmio(d, &s->iomem1); + sysbus_init_mmio(d, &s->iomem2); +} + +static void mmio_ide_initfn(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + MMIOState *s = MMIO_IDE(obj); + + ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); + sysbus_init_irq(d, &s->irq); +} + +static Property mmio_ide_properties[] = { + DEFINE_PROP_UINT32("shift", MMIOState, shift, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void mmio_ide_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = mmio_ide_realizefn; + dc->reset = mmio_ide_reset; + device_class_set_props(dc, mmio_ide_properties); + dc->vmsd = &vmstate_ide_mmio; +} + +static const TypeInfo mmio_ide_type_info = { + .name = TYPE_MMIO_IDE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MMIOState), + .instance_init = mmio_ide_initfn, + .class_init = mmio_ide_class_init, +}; + +static void mmio_ide_register_types(void) +{ + type_register_static(&mmio_ide_type_info); +} + +void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1) +{ + MMIOState *s = MMIO_IDE(dev); + + if (hd0 != NULL) { + ide_create_drive(&s->bus, 0, hd0); + } + if (hd1 != NULL) { + ide_create_drive(&s->bus, 1, hd1); + } +} + +type_init(mmio_ide_register_types) diff --git a/hw/ide/pci.c b/hw/ide/pci.c new file mode 100644 index 000000000..84ba73354 --- /dev/null +++ b/hw/ide/pci.c @@ -0,0 +1,534 @@ +/* + * QEMU IDE Emulation: PCI Bus support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "sysemu/dma.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/ide/pci.h" +#include "trace.h" + +#define BMDMA_PAGE_SIZE 4096 + +#define BM_MIGRATION_COMPAT_STATUS_BITS \ + (IDE_RETRY_DMA | IDE_RETRY_PIO | \ + IDE_RETRY_READ | IDE_RETRY_FLUSH) + +static uint64_t pci_ide_status_read(void *opaque, hwaddr addr, unsigned size) +{ + IDEBus *bus = opaque; + + if (addr != 2 || size != 1) { + return ((uint64_t)1 << (size * 8)) - 1; + } + return ide_status_read(bus, addr + 2); +} + +static void pci_ide_ctrl_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + IDEBus *bus = opaque; + + if (addr != 2 || size != 1) { + return; + } + ide_ctrl_write(bus, addr + 2, data); +} + +const MemoryRegionOps pci_ide_cmd_le_ops = { + .read = pci_ide_status_read, + .write = pci_ide_ctrl_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t pci_ide_data_read(void *opaque, hwaddr addr, unsigned size) +{ + IDEBus *bus = opaque; + + if (size == 1) { + return ide_ioport_read(bus, addr); + } else if (addr == 0) { + if (size == 2) { + return ide_data_readw(bus, addr); + } else { + return ide_data_readl(bus, addr); + } + } + return ((uint64_t)1 << (size * 8)) - 1; +} + +static void pci_ide_data_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + IDEBus *bus = opaque; + + if (size == 1) { + ide_ioport_write(bus, addr, data); + } else if (addr == 0) { + if (size == 2) { + ide_data_writew(bus, addr, data); + } else { + ide_data_writel(bus, addr, data); + } + } +} + +const MemoryRegionOps pci_ide_data_le_ops = { + .read = pci_ide_data_read, + .write = pci_ide_data_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void bmdma_start_dma(const IDEDMA *dma, IDEState *s, + BlockCompletionFunc *dma_cb) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + + bm->dma_cb = dma_cb; + bm->cur_prd_last = 0; + bm->cur_prd_addr = 0; + bm->cur_prd_len = 0; + + if (bm->status & BM_STATUS_DMAING) { + bm->dma_cb(bmdma_active_if(bm), 0); + } +} + +/** + * Prepare an sglist based on available PRDs. + * @limit: How many bytes to prepare total. + * + * Returns the number of bytes prepared, -1 on error. + * IDEState.io_buffer_size will contain the number of bytes described + * by the PRDs, whether or not we added them to the sglist. + */ +static int32_t bmdma_prepare_buf(const IDEDMA *dma, int32_t limit) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + IDEState *s = bmdma_active_if(bm); + PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); + struct { + uint32_t addr; + uint32_t size; + } prd; + int l, len; + + pci_dma_sglist_init(&s->sg, pci_dev, + s->nsector / (BMDMA_PAGE_SIZE / BDRV_SECTOR_SIZE) + 1); + s->io_buffer_size = 0; + for(;;) { + if (bm->cur_prd_len == 0) { + /* end of table (with a fail safe of one page) */ + if (bm->cur_prd_last || + (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) { + return s->sg.size; + } + pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); + bm->cur_addr += 8; + prd.addr = le32_to_cpu(prd.addr); + prd.size = le32_to_cpu(prd.size); + len = prd.size & 0xfffe; + if (len == 0) + len = 0x10000; + bm->cur_prd_len = len; + bm->cur_prd_addr = prd.addr; + bm->cur_prd_last = (prd.size & 0x80000000); + } + l = bm->cur_prd_len; + if (l > 0) { + uint64_t sg_len; + + /* Don't add extra bytes to the SGList; consume any remaining + * PRDs from the guest, but ignore them. */ + sg_len = MIN(limit - s->sg.size, bm->cur_prd_len); + if (sg_len) { + qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len); + } + + bm->cur_prd_addr += l; + bm->cur_prd_len -= l; + s->io_buffer_size += l; + } + } + + qemu_sglist_destroy(&s->sg); + s->io_buffer_size = 0; + return -1; +} + +/* return 0 if buffer completed */ +static int bmdma_rw_buf(const IDEDMA *dma, bool is_write) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + IDEState *s = bmdma_active_if(bm); + PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); + struct { + uint32_t addr; + uint32_t size; + } prd; + int l, len; + + for(;;) { + l = s->io_buffer_size - s->io_buffer_index; + if (l <= 0) + break; + if (bm->cur_prd_len == 0) { + /* end of table (with a fail safe of one page) */ + if (bm->cur_prd_last || + (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) + return 0; + pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); + bm->cur_addr += 8; + prd.addr = le32_to_cpu(prd.addr); + prd.size = le32_to_cpu(prd.size); + len = prd.size & 0xfffe; + if (len == 0) + len = 0x10000; + bm->cur_prd_len = len; + bm->cur_prd_addr = prd.addr; + bm->cur_prd_last = (prd.size & 0x80000000); + } + if (l > bm->cur_prd_len) + l = bm->cur_prd_len; + if (l > 0) { + if (is_write) { + pci_dma_write(pci_dev, bm->cur_prd_addr, + s->io_buffer + s->io_buffer_index, l); + } else { + pci_dma_read(pci_dev, bm->cur_prd_addr, + s->io_buffer + s->io_buffer_index, l); + } + bm->cur_prd_addr += l; + bm->cur_prd_len -= l; + s->io_buffer_index += l; + } + } + return 1; +} + +static void bmdma_set_inactive(const IDEDMA *dma, bool more) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + + bm->dma_cb = NULL; + if (more) { + bm->status |= BM_STATUS_DMAING; + } else { + bm->status &= ~BM_STATUS_DMAING; + } +} + +static void bmdma_restart_dma(const IDEDMA *dma) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + + bm->cur_addr = bm->addr; +} + +static void bmdma_cancel(BMDMAState *bm) +{ + if (bm->status & BM_STATUS_DMAING) { + /* cancel DMA request */ + bmdma_set_inactive(&bm->dma, false); + } +} + +static void bmdma_reset(const IDEDMA *dma) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + + trace_bmdma_reset(); + bmdma_cancel(bm); + bm->cmd = 0; + bm->status = 0; + bm->addr = 0; + bm->cur_addr = 0; + bm->cur_prd_last = 0; + bm->cur_prd_addr = 0; + bm->cur_prd_len = 0; +} + +static void bmdma_irq(void *opaque, int n, int level) +{ + BMDMAState *bm = opaque; + + if (!level) { + /* pass through lower */ + qemu_set_irq(bm->irq, level); + return; + } + + bm->status |= BM_STATUS_INT; + + /* trigger the real irq */ + qemu_set_irq(bm->irq, level); +} + +void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val) +{ + trace_bmdma_cmd_writeb(val); + + /* Ignore writes to SSBM if it keeps the old value */ + if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) { + if (!(val & BM_CMD_START)) { + ide_cancel_dma_sync(idebus_active_if(bm->bus)); + bm->status &= ~BM_STATUS_DMAING; + } else { + bm->cur_addr = bm->addr; + if (!(bm->status & BM_STATUS_DMAING)) { + bm->status |= BM_STATUS_DMAING; + /* start dma transfer if possible */ + if (bm->dma_cb) + bm->dma_cb(bmdma_active_if(bm), 0); + } + } + } + + bm->cmd = val & 0x09; +} + +static uint64_t bmdma_addr_read(void *opaque, hwaddr addr, + unsigned width) +{ + BMDMAState *bm = opaque; + uint32_t mask = (1ULL << (width * 8)) - 1; + uint64_t data; + + data = (bm->addr >> (addr * 8)) & mask; + trace_bmdma_addr_read(data); + return data; +} + +static void bmdma_addr_write(void *opaque, hwaddr addr, + uint64_t data, unsigned width) +{ + BMDMAState *bm = opaque; + int shift = addr * 8; + uint32_t mask = (1ULL << (width * 8)) - 1; + + trace_bmdma_addr_write(data); + bm->addr &= ~(mask << shift); + bm->addr |= ((data & mask) << shift) & ~3; +} + +MemoryRegionOps bmdma_addr_ioport_ops = { + .read = bmdma_addr_read, + .write = bmdma_addr_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static bool ide_bmdma_current_needed(void *opaque) +{ + BMDMAState *bm = opaque; + + return (bm->cur_prd_len != 0); +} + +static bool ide_bmdma_status_needed(void *opaque) +{ + BMDMAState *bm = opaque; + + /* Older versions abused some bits in the status register for internal + * error state. If any of these bits are set, we must add a subsection to + * transfer the real status register */ + uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; + + return ((bm->status & abused_bits) != 0); +} + +static int ide_bmdma_pre_save(void *opaque) +{ + BMDMAState *bm = opaque; + uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; + + if (!(bm->status & BM_STATUS_DMAING) && bm->dma_cb) { + bm->bus->error_status = + ide_dma_cmd_to_retry(bmdma_active_if(bm)->dma_cmd); + } + bm->migration_retry_unit = bm->bus->retry_unit; + bm->migration_retry_sector_num = bm->bus->retry_sector_num; + bm->migration_retry_nsector = bm->bus->retry_nsector; + bm->migration_compat_status = + (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits); + + return 0; +} + +/* This function accesses bm->bus->error_status which is loaded only after + * BMDMA itself. This is why the function is called from ide_pci_post_load + * instead of being registered with VMState where it would run too early. */ +static int ide_bmdma_post_load(void *opaque, int version_id) +{ + BMDMAState *bm = opaque; + uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; + + if (bm->status == 0) { + bm->status = bm->migration_compat_status & ~abused_bits; + bm->bus->error_status |= bm->migration_compat_status & abused_bits; + } + if (bm->bus->error_status) { + bm->bus->retry_sector_num = bm->migration_retry_sector_num; + bm->bus->retry_nsector = bm->migration_retry_nsector; + bm->bus->retry_unit = bm->migration_retry_unit; + } + + return 0; +} + +static const VMStateDescription vmstate_bmdma_current = { + .name = "ide bmdma_current", + .version_id = 1, + .minimum_version_id = 1, + .needed = ide_bmdma_current_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cur_addr, BMDMAState), + VMSTATE_UINT32(cur_prd_last, BMDMAState), + VMSTATE_UINT32(cur_prd_addr, BMDMAState), + VMSTATE_UINT32(cur_prd_len, BMDMAState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_bmdma_status = { + .name ="ide bmdma/status", + .version_id = 1, + .minimum_version_id = 1, + .needed = ide_bmdma_status_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(status, BMDMAState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_bmdma = { + .name = "ide bmdma", + .version_id = 3, + .minimum_version_id = 0, + .pre_save = ide_bmdma_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT8(cmd, BMDMAState), + VMSTATE_UINT8(migration_compat_status, BMDMAState), + VMSTATE_UINT32(addr, BMDMAState), + VMSTATE_INT64(migration_retry_sector_num, BMDMAState), + VMSTATE_UINT32(migration_retry_nsector, BMDMAState), + VMSTATE_UINT8(migration_retry_unit, BMDMAState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_bmdma_current, + &vmstate_bmdma_status, + NULL + } +}; + +static int ide_pci_post_load(void *opaque, int version_id) +{ + PCIIDEState *d = opaque; + int i; + + for(i = 0; i < 2; i++) { + /* current versions always store 0/1, but older version + stored bigger values. We only need last bit */ + d->bmdma[i].migration_retry_unit &= 1; + ide_bmdma_post_load(&d->bmdma[i], -1); + } + + return 0; +} + +const VMStateDescription vmstate_ide_pci = { + .name = "ide", + .version_id = 3, + .minimum_version_id = 0, + .post_load = ide_pci_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState), + VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0, + vmstate_bmdma, BMDMAState), + VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2), + VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState), + VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState), + VMSTATE_END_OF_LIST() + } +}; + +/* hd_table must contain 4 block drivers */ +void pci_ide_create_devs(PCIDevice *dev) +{ + PCIIDEState *d = PCI_IDE(dev); + DriveInfo *hd_table[2 * MAX_IDE_DEVS]; + static const int bus[4] = { 0, 0, 1, 1 }; + static const int unit[4] = { 0, 1, 0, 1 }; + int i; + + ide_drive_get(hd_table, ARRAY_SIZE(hd_table)); + for (i = 0; i < 4; i++) { + if (hd_table[i]) { + ide_create_drive(d->bus + bus[i], unit[i], hd_table[i]); + } + } +} + +static const struct IDEDMAOps bmdma_ops = { + .start_dma = bmdma_start_dma, + .prepare_buf = bmdma_prepare_buf, + .rw_buf = bmdma_rw_buf, + .restart_dma = bmdma_restart_dma, + .set_inactive = bmdma_set_inactive, + .reset = bmdma_reset, +}; + +void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d) +{ + if (bus->dma == &bm->dma) { + return; + } + + bm->dma.ops = &bmdma_ops; + bus->dma = &bm->dma; + bm->irq = bus->irq; + bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0); + bm->pci_dev = d; +} + +static const TypeInfo pci_ide_type_info = { + .name = TYPE_PCI_IDE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIIDEState), + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void pci_ide_register_types(void) +{ + type_register_static(&pci_ide_type_info); +} + +type_init(pci_ide_register_types) diff --git a/hw/ide/piix.c b/hw/ide/piix.c new file mode 100644 index 000000000..ce89fd0aa --- /dev/null +++ b/hw/ide/piix.c @@ -0,0 +1,279 @@ +/* + * QEMU IDE Emulation: PCI PIIX3/4 support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" +#include "sysemu/dma.h" + +#include "hw/ide/pci.h" +#include "trace.h" + +static uint64_t bmdma_read(void *opaque, hwaddr addr, unsigned size) +{ + BMDMAState *bm = opaque; + uint32_t val; + + if (size != 1) { + return ((uint64_t)1 << (size * 8)) - 1; + } + + switch(addr & 3) { + case 0: + val = bm->cmd; + break; + case 2: + val = bm->status; + break; + default: + val = 0xff; + break; + } + + trace_bmdma_read(addr, val); + return val; +} + +static void bmdma_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + BMDMAState *bm = opaque; + + if (size != 1) { + return; + } + + trace_bmdma_write(addr, val); + + switch(addr & 3) { + case 0: + bmdma_cmd_writeb(bm, val); + break; + case 2: + bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06); + break; + } +} + +static const MemoryRegionOps piix_bmdma_ops = { + .read = bmdma_read, + .write = bmdma_write, +}; + +static void bmdma_setup_bar(PCIIDEState *d) +{ + int i; + + memory_region_init(&d->bmdma_bar, OBJECT(d), "piix-bmdma-container", 16); + for(i = 0;i < 2; i++) { + BMDMAState *bm = &d->bmdma[i]; + + memory_region_init_io(&bm->extra_io, OBJECT(d), &piix_bmdma_ops, bm, + "piix-bmdma", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8, &bm->extra_io); + memory_region_init_io(&bm->addr_ioport, OBJECT(d), + &bmdma_addr_ioport_ops, bm, "bmdma", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8 + 4, &bm->addr_ioport); + } +} + +static void piix_ide_reset(DeviceState *dev) +{ + PCIIDEState *d = PCI_IDE(dev); + PCIDevice *pd = PCI_DEVICE(d); + uint8_t *pci_conf = pd->config; + int i; + + for (i = 0; i < 2; i++) { + ide_bus_reset(&d->bus[i]); + } + + /* TODO: this is the default. do not override. */ + pci_conf[PCI_COMMAND] = 0x00; + /* TODO: this is the default. do not override. */ + pci_conf[PCI_COMMAND + 1] = 0x00; + /* TODO: use pci_set_word */ + pci_conf[PCI_STATUS] = PCI_STATUS_FAST_BACK; + pci_conf[PCI_STATUS + 1] = PCI_STATUS_DEVSEL_MEDIUM >> 8; + pci_conf[0x20] = 0x01; /* BMIBA: 20-23h */ +} + +static int pci_piix_init_ports(PCIIDEState *d) +{ + static const struct { + int iobase; + int iobase2; + int isairq; + } port_info[] = { + {0x1f0, 0x3f6, 14}, + {0x170, 0x376, 15}, + }; + int i, ret; + + for (i = 0; i < 2; i++) { + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2); + ret = ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase, + port_info[i].iobase2); + if (ret) { + return ret; + } + ide_init2(&d->bus[i], isa_get_irq(NULL, port_info[i].isairq)); + + bmdma_init(&d->bus[i], &d->bmdma[i], d); + d->bmdma[i].bus = &d->bus[i]; + ide_register_restart_cb(&d->bus[i]); + } + + return 0; +} + +static void pci_piix_ide_realize(PCIDevice *dev, Error **errp) +{ + PCIIDEState *d = PCI_IDE(dev); + uint8_t *pci_conf = dev->config; + int rc; + + pci_conf[PCI_CLASS_PROG] = 0x80; // legacy ATA mode + + bmdma_setup_bar(d); + pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar); + + vmstate_register(VMSTATE_IF(dev), 0, &vmstate_ide_pci, d); + + rc = pci_piix_init_ports(d); + if (rc) { + error_setg_errno(errp, -rc, "Failed to realize %s", + object_get_typename(OBJECT(dev))); + } +} + +int pci_piix3_xen_ide_unplug(DeviceState *dev, bool aux) +{ + PCIIDEState *pci_ide; + int i; + IDEDevice *idedev; + IDEBus *idebus; + BlockBackend *blk; + + pci_ide = PCI_IDE(dev); + + for (i = aux ? 1 : 0; i < 4; i++) { + idebus = &pci_ide->bus[i / 2]; + blk = idebus->ifs[i % 2].blk; + + if (blk && idebus->ifs[i % 2].drive_kind != IDE_CD) { + if (!(i % 2)) { + idedev = idebus->master; + } else { + idedev = idebus->slave; + } + + blk_drain(blk); + blk_flush(blk); + + blk_detach_dev(blk, DEVICE(idedev)); + idebus->ifs[i % 2].blk = NULL; + idedev->conf.blk = NULL; + monitor_remove_blk(blk); + blk_unref(blk); + } + } + qdev_reset_all(dev); + return 0; +} + +static void pci_piix_ide_exitfn(PCIDevice *dev) +{ + PCIIDEState *d = PCI_IDE(dev); + unsigned i; + + for (i = 0; i < 2; ++i) { + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io); + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport); + } +} + +/* NOTE: for the PIIX3, the IRQs and IOports are hardcoded */ +static void piix3_ide_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + dc->reset = piix_ide_reset; + k->realize = pci_piix_ide_realize; + k->exit = pci_piix_ide_exitfn; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82371SB_1; + k->class_id = PCI_CLASS_STORAGE_IDE; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->hotpluggable = false; +} + +static const TypeInfo piix3_ide_info = { + .name = "piix3-ide", + .parent = TYPE_PCI_IDE, + .class_init = piix3_ide_class_init, +}; + +static const TypeInfo piix3_ide_xen_info = { + .name = "piix3-ide-xen", + .parent = TYPE_PCI_IDE, + .class_init = piix3_ide_class_init, +}; + +/* NOTE: for the PIIX4, the IRQs and IOports are hardcoded */ +static void piix4_ide_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + dc->reset = piix_ide_reset; + k->realize = pci_piix_ide_realize; + k->exit = pci_piix_ide_exitfn; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82371AB; + k->class_id = PCI_CLASS_STORAGE_IDE; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->hotpluggable = false; +} + +static const TypeInfo piix4_ide_info = { + .name = "piix4-ide", + .parent = TYPE_PCI_IDE, + .class_init = piix4_ide_class_init, +}; + +static void piix_ide_register_types(void) +{ + type_register_static(&piix3_ide_info); + type_register_static(&piix3_ide_xen_info); + type_register_static(&piix4_ide_info); +} + +type_init(piix_ide_register_types) diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c new file mode 100644 index 000000000..618045b85 --- /dev/null +++ b/hw/ide/qdev.c @@ -0,0 +1,371 @@ +/* + * ide bus support for qdev. + * + * Copyright (c) 2009 Gerd Hoffmann + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "sysemu/dma.h" +#include "qapi/error.h" +#include "qapi/qapi-types-block.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "hw/ide/internal.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" +#include "hw/block/block.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" +#include "qapi/visitor.h" + +/* --------------------------------- */ + +static char *idebus_get_fw_dev_path(DeviceState *dev); +static void idebus_unrealize(BusState *qdev); + +static Property ide_props[] = { + DEFINE_PROP_UINT32("unit", IDEDevice, unit, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ide_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->get_fw_dev_path = idebus_get_fw_dev_path; + k->unrealize = idebus_unrealize; +} + +static void idebus_unrealize(BusState *bus) +{ + IDEBus *ibus = IDE_BUS(bus); + + if (ibus->vmstate) { + qemu_del_vm_change_state_handler(ibus->vmstate); + } +} + +static const TypeInfo ide_bus_info = { + .name = TYPE_IDE_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(IDEBus), + .class_init = ide_bus_class_init, +}; + +void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev, + int bus_id, int max_units) +{ + qbus_init(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL); + idebus->bus_id = bus_id; + idebus->max_units = max_units; +} + +static char *idebus_get_fw_dev_path(DeviceState *dev) +{ + char path[30]; + + snprintf(path, sizeof(path), "%s@%x", qdev_fw_name(dev), + ((IDEBus*)dev->parent_bus)->bus_id); + + return g_strdup(path); +} + +static void ide_qdev_realize(DeviceState *qdev, Error **errp) +{ + IDEDevice *dev = IDE_DEVICE(qdev); + IDEDeviceClass *dc = IDE_DEVICE_GET_CLASS(dev); + IDEBus *bus = DO_UPCAST(IDEBus, qbus, qdev->parent_bus); + + if (dev->unit == -1) { + dev->unit = bus->master ? 1 : 0; + } + + if (dev->unit >= bus->max_units) { + error_setg(errp, "Can't create IDE unit %d, bus supports only %d units", + dev->unit, bus->max_units); + return; + } + + switch (dev->unit) { + case 0: + if (bus->master) { + error_setg(errp, "IDE unit %d is in use", dev->unit); + return; + } + bus->master = dev; + break; + case 1: + if (bus->slave) { + error_setg(errp, "IDE unit %d is in use", dev->unit); + return; + } + bus->slave = dev; + break; + default: + error_setg(errp, "Invalid IDE unit %d", dev->unit); + return; + } + dc->realize(dev, errp); +} + +IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive) +{ + DeviceState *dev; + + dev = qdev_new(drive->media_cd ? "ide-cd" : "ide-hd"); + qdev_prop_set_uint32(dev, "unit", unit); + qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(drive), + &error_fatal); + qdev_realize_and_unref(dev, &bus->qbus, &error_fatal); + return DO_UPCAST(IDEDevice, qdev, dev); +} + +int ide_get_geometry(BusState *bus, int unit, + int16_t *cyls, int8_t *heads, int8_t *secs) +{ + IDEState *s = &DO_UPCAST(IDEBus, qbus, bus)->ifs[unit]; + + if (s->drive_kind != IDE_HD || !s->blk) { + return -1; + } + + *cyls = s->cylinders; + *heads = s->heads; + *secs = s->sectors; + return 0; +} + +int ide_get_bios_chs_trans(BusState *bus, int unit) +{ + return DO_UPCAST(IDEBus, qbus, bus)->ifs[unit].chs_trans; +} + +/* --------------------------------- */ + +typedef struct IDEDrive { + IDEDevice dev; +} IDEDrive; + +static void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp) +{ + IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus); + IDEState *s = bus->ifs + dev->unit; + int ret; + + if (!dev->conf.blk) { + if (kind != IDE_CD) { + error_setg(errp, "No drive specified"); + return; + } else { + /* Anonymous BlockBackend for an empty drive */ + dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); + ret = blk_attach_dev(dev->conf.blk, &dev->qdev); + assert(ret == 0); + } + } + + if (dev->conf.discard_granularity == -1) { + dev->conf.discard_granularity = 512; + } else if (dev->conf.discard_granularity && + dev->conf.discard_granularity != 512) { + error_setg(errp, "discard_granularity must be 512 for ide"); + return; + } + + if (!blkconf_blocksizes(&dev->conf, errp)) { + return; + } + + if (dev->conf.logical_block_size != 512) { + error_setg(errp, "logical_block_size must be 512 for IDE"); + return; + } + + if (kind != IDE_CD) { + if (!blkconf_geometry(&dev->conf, &dev->chs_trans, 65535, 16, 255, + errp)) { + return; + } + } + if (!blkconf_apply_backend_options(&dev->conf, kind == IDE_CD, + kind != IDE_CD, errp)) { + return; + } + + if (ide_init_drive(s, dev->conf.blk, kind, + dev->version, dev->serial, dev->model, dev->wwn, + dev->conf.cyls, dev->conf.heads, dev->conf.secs, + dev->chs_trans, errp) < 0) { + return; + } + + if (!dev->version) { + dev->version = g_strdup(s->version); + } + if (!dev->serial) { + dev->serial = g_strdup(s->drive_serial_str); + } + + add_boot_device_path(dev->conf.bootindex, &dev->qdev, + dev->unit ? "/disk@1" : "/disk@0"); + + add_boot_device_lchs(&dev->qdev, dev->unit ? "/disk@1" : "/disk@0", + dev->conf.lcyls, + dev->conf.lheads, + dev->conf.lsecs); +} + +static void ide_dev_get_bootindex(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + IDEDevice *d = IDE_DEVICE(obj); + + visit_type_int32(v, name, &d->conf.bootindex, errp); +} + +static void ide_dev_set_bootindex(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + IDEDevice *d = IDE_DEVICE(obj); + int32_t boot_index; + Error *local_err = NULL; + + if (!visit_type_int32(v, name, &boot_index, errp)) { + return; + } + /* check whether bootindex is present in fw_boot_order list */ + check_boot_index(boot_index, &local_err); + if (local_err) { + goto out; + } + /* change bootindex to a new one */ + d->conf.bootindex = boot_index; + + if (d->unit != -1) { + add_boot_device_path(d->conf.bootindex, &d->qdev, + d->unit ? "/disk@1" : "/disk@0"); + } +out: + error_propagate(errp, local_err); +} + +static void ide_dev_instance_init(Object *obj) +{ + object_property_add(obj, "bootindex", "int32", + ide_dev_get_bootindex, + ide_dev_set_bootindex, NULL, NULL); + object_property_set_int(obj, "bootindex", -1, NULL); +} + +static void ide_hd_realize(IDEDevice *dev, Error **errp) +{ + ide_dev_initfn(dev, IDE_HD, errp); +} + +static void ide_cd_realize(IDEDevice *dev, Error **errp) +{ + ide_dev_initfn(dev, IDE_CD, errp); +} + +#define DEFINE_IDE_DEV_PROPERTIES() \ + DEFINE_BLOCK_PROPERTIES(IDEDrive, dev.conf), \ + DEFINE_BLOCK_ERROR_PROPERTIES(IDEDrive, dev.conf), \ + DEFINE_PROP_STRING("ver", IDEDrive, dev.version), \ + DEFINE_PROP_UINT64("wwn", IDEDrive, dev.wwn, 0), \ + DEFINE_PROP_STRING("serial", IDEDrive, dev.serial),\ + DEFINE_PROP_STRING("model", IDEDrive, dev.model) + +static Property ide_hd_properties[] = { + DEFINE_IDE_DEV_PROPERTIES(), + DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf), + DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans", + IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO), + DEFINE_PROP_UINT16("rotation_rate", IDEDrive, dev.rotation_rate, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ide_hd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IDEDeviceClass *k = IDE_DEVICE_CLASS(klass); + + k->realize = ide_hd_realize; + dc->fw_name = "drive"; + dc->desc = "virtual IDE disk"; + device_class_set_props(dc, ide_hd_properties); +} + +static const TypeInfo ide_hd_info = { + .name = "ide-hd", + .parent = TYPE_IDE_DEVICE, + .instance_size = sizeof(IDEDrive), + .class_init = ide_hd_class_init, +}; + +static Property ide_cd_properties[] = { + DEFINE_IDE_DEV_PROPERTIES(), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ide_cd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IDEDeviceClass *k = IDE_DEVICE_CLASS(klass); + + k->realize = ide_cd_realize; + dc->fw_name = "drive"; + dc->desc = "virtual IDE CD-ROM"; + device_class_set_props(dc, ide_cd_properties); +} + +static const TypeInfo ide_cd_info = { + .name = "ide-cd", + .parent = TYPE_IDE_DEVICE, + .instance_size = sizeof(IDEDrive), + .class_init = ide_cd_class_init, +}; + +static void ide_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + k->realize = ide_qdev_realize; + set_bit(DEVICE_CATEGORY_STORAGE, k->categories); + k->bus_type = TYPE_IDE_BUS; + device_class_set_props(k, ide_props); +} + +static const TypeInfo ide_device_type_info = { + .name = TYPE_IDE_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(IDEDevice), + .abstract = true, + .class_size = sizeof(IDEDeviceClass), + .class_init = ide_device_class_init, + .instance_init = ide_dev_instance_init, +}; + +static void ide_register_types(void) +{ + type_register_static(&ide_bus_info); + type_register_static(&ide_hd_info); + type_register_static(&ide_cd_info); + type_register_static(&ide_device_type_info); +} + +type_init(ide_register_types) diff --git a/hw/ide/sii3112.c b/hw/ide/sii3112.c new file mode 100644 index 000000000..46204f10d --- /dev/null +++ b/hw/ide/sii3112.c @@ -0,0 +1,322 @@ +/* + * QEMU SiI3112A PCI to Serial ATA Controller Emulation + * + * Copyright (C) 2017 BALATON Zoltan + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +/* For documentation on this and similar cards see: + * http://wiki.osdev.org/User:Quok/Silicon_Image_Datasheets + */ + +#include "qemu/osdep.h" +#include "hw/ide/pci.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" + +#define TYPE_SII3112_PCI "sii3112" +OBJECT_DECLARE_SIMPLE_TYPE(SiI3112PCIState, SII3112_PCI) + +typedef struct SiI3112Regs { + uint32_t confstat; + uint32_t scontrol; + uint16_t sien; + uint8_t swdata; +} SiI3112Regs; + +struct SiI3112PCIState { + PCIIDEState i; + MemoryRegion mmio; + SiI3112Regs regs[2]; +}; + +/* The sii3112_reg_read and sii3112_reg_write functions implement the + * Internal Register Space - BAR5 (section 6.7 of the data sheet). + */ + +static uint64_t sii3112_reg_read(void *opaque, hwaddr addr, + unsigned int size) +{ + SiI3112PCIState *d = opaque; + uint64_t val; + + switch (addr) { + case 0x00: + val = d->i.bmdma[0].cmd; + break; + case 0x01: + val = d->regs[0].swdata; + break; + case 0x02: + val = d->i.bmdma[0].status; + break; + case 0x03: + val = 0; + break; + case 0x04 ... 0x07: + val = bmdma_addr_ioport_ops.read(&d->i.bmdma[0], addr - 4, size); + break; + case 0x08: + val = d->i.bmdma[1].cmd; + break; + case 0x09: + val = d->regs[1].swdata; + break; + case 0x0a: + val = d->i.bmdma[1].status; + break; + case 0x0b: + val = 0; + break; + case 0x0c ... 0x0f: + val = bmdma_addr_ioport_ops.read(&d->i.bmdma[1], addr - 12, size); + break; + case 0x10: + val = d->i.bmdma[0].cmd; + val |= (d->regs[0].confstat & (1UL << 11) ? (1 << 4) : 0); /*SATAINT0*/ + val |= (d->regs[1].confstat & (1UL << 11) ? (1 << 6) : 0); /*SATAINT1*/ + val |= (d->i.bmdma[1].status & BM_STATUS_INT ? (1 << 14) : 0); + val |= (uint32_t)d->i.bmdma[0].status << 16; + val |= (uint32_t)d->i.bmdma[1].status << 24; + break; + case 0x18: + val = d->i.bmdma[1].cmd; + val |= (d->regs[1].confstat & (1UL << 11) ? (1 << 4) : 0); + val |= (uint32_t)d->i.bmdma[1].status << 16; + break; + case 0x80 ... 0x87: + val = pci_ide_data_le_ops.read(&d->i.bus[0], addr - 0x80, size); + break; + case 0x8a: + val = pci_ide_cmd_le_ops.read(&d->i.bus[0], 2, size); + break; + case 0xa0: + val = d->regs[0].confstat; + break; + case 0xc0 ... 0xc7: + val = pci_ide_data_le_ops.read(&d->i.bus[1], addr - 0xc0, size); + break; + case 0xca: + val = pci_ide_cmd_le_ops.read(&d->i.bus[1], 2, size); + break; + case 0xe0: + val = d->regs[1].confstat; + break; + case 0x100: + val = d->regs[0].scontrol; + break; + case 0x104: + val = (d->i.bus[0].ifs[0].blk) ? 0x113 : 0; + break; + case 0x148: + val = (uint32_t)d->regs[0].sien << 16; + break; + case 0x180: + val = d->regs[1].scontrol; + break; + case 0x184: + val = (d->i.bus[1].ifs[0].blk) ? 0x113 : 0; + break; + case 0x1c8: + val = (uint32_t)d->regs[1].sien << 16; + break; + default: + val = 0; + break; + } + trace_sii3112_read(size, addr, val); + return val; +} + +static void sii3112_reg_write(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + SiI3112PCIState *d = opaque; + + trace_sii3112_write(size, addr, val); + switch (addr) { + case 0x00: + case 0x10: + bmdma_cmd_writeb(&d->i.bmdma[0], val); + break; + case 0x01: + case 0x11: + d->regs[0].swdata = val & 0x3f; + break; + case 0x02: + case 0x12: + d->i.bmdma[0].status = (val & 0x60) | (d->i.bmdma[0].status & 1) | + (d->i.bmdma[0].status & ~val & 6); + break; + case 0x04 ... 0x07: + bmdma_addr_ioport_ops.write(&d->i.bmdma[0], addr - 4, val, size); + break; + case 0x08: + case 0x18: + bmdma_cmd_writeb(&d->i.bmdma[1], val); + break; + case 0x09: + case 0x19: + d->regs[1].swdata = val & 0x3f; + break; + case 0x0a: + case 0x1a: + d->i.bmdma[1].status = (val & 0x60) | (d->i.bmdma[1].status & 1) | + (d->i.bmdma[1].status & ~val & 6); + break; + case 0x0c ... 0x0f: + bmdma_addr_ioport_ops.write(&d->i.bmdma[1], addr - 12, val, size); + break; + case 0x80 ... 0x87: + pci_ide_data_le_ops.write(&d->i.bus[0], addr - 0x80, val, size); + break; + case 0x8a: + pci_ide_cmd_le_ops.write(&d->i.bus[0], 2, val, size); + break; + case 0xc0 ... 0xc7: + pci_ide_data_le_ops.write(&d->i.bus[1], addr - 0xc0, val, size); + break; + case 0xca: + pci_ide_cmd_le_ops.write(&d->i.bus[1], 2, val, size); + break; + case 0x100: + d->regs[0].scontrol = val & 0xfff; + if (val & 1) { + ide_bus_reset(&d->i.bus[0]); + } + break; + case 0x148: + d->regs[0].sien = (val >> 16) & 0x3eed; + break; + case 0x180: + d->regs[1].scontrol = val & 0xfff; + if (val & 1) { + ide_bus_reset(&d->i.bus[1]); + } + break; + case 0x1c8: + d->regs[1].sien = (val >> 16) & 0x3eed; + break; + default: + break; + } +} + +static const MemoryRegionOps sii3112_reg_ops = { + .read = sii3112_reg_read, + .write = sii3112_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* the PCI irq level is the logical OR of the two channels */ +static void sii3112_update_irq(SiI3112PCIState *s) +{ + int i, set = 0; + + for (i = 0; i < 2; i++) { + set |= s->regs[i].confstat & (1UL << 11); + } + pci_set_irq(PCI_DEVICE(s), (set ? 1 : 0)); +} + +static void sii3112_set_irq(void *opaque, int channel, int level) +{ + SiI3112PCIState *s = opaque; + + trace_sii3112_set_irq(channel, level); + if (level) { + s->regs[channel].confstat |= (1UL << 11); + } else { + s->regs[channel].confstat &= ~(1UL << 11); + } + + sii3112_update_irq(s); +} + +static void sii3112_reset(DeviceState *dev) +{ + SiI3112PCIState *s = SII3112_PCI(dev); + int i; + + for (i = 0; i < 2; i++) { + s->regs[i].confstat = 0x6515 << 16; + ide_bus_reset(&s->i.bus[i]); + } +} + +static void sii3112_pci_realize(PCIDevice *dev, Error **errp) +{ + SiI3112PCIState *d = SII3112_PCI(dev); + PCIIDEState *s = PCI_IDE(dev); + DeviceState *ds = DEVICE(dev); + MemoryRegion *mr; + int i; + + pci_config_set_interrupt_pin(dev->config, 1); + pci_set_byte(dev->config + PCI_CACHE_LINE_SIZE, 8); + + /* BAR5 is in PCI memory space */ + memory_region_init_io(&d->mmio, OBJECT(d), &sii3112_reg_ops, d, + "sii3112.bar5", 0x200); + pci_register_bar(dev, 5, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio); + + /* BAR0-BAR4 are PCI I/O space aliases into BAR5 */ + mr = g_new(MemoryRegion, 1); + memory_region_init_alias(mr, OBJECT(d), "sii3112.bar0", &d->mmio, 0x80, 8); + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, mr); + mr = g_new(MemoryRegion, 1); + memory_region_init_alias(mr, OBJECT(d), "sii3112.bar1", &d->mmio, 0x88, 4); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_IO, mr); + mr = g_new(MemoryRegion, 1); + memory_region_init_alias(mr, OBJECT(d), "sii3112.bar2", &d->mmio, 0xc0, 8); + pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_IO, mr); + mr = g_new(MemoryRegion, 1); + memory_region_init_alias(mr, OBJECT(d), "sii3112.bar3", &d->mmio, 0xc8, 4); + pci_register_bar(dev, 3, PCI_BASE_ADDRESS_SPACE_IO, mr); + mr = g_new(MemoryRegion, 1); + memory_region_init_alias(mr, OBJECT(d), "sii3112.bar4", &d->mmio, 0, 16); + pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, mr); + + qdev_init_gpio_in(ds, sii3112_set_irq, 2); + for (i = 0; i < 2; i++) { + ide_bus_init(&s->bus[i], sizeof(s->bus[i]), ds, i, 1); + ide_init2(&s->bus[i], qdev_get_gpio_in(ds, i)); + + bmdma_init(&s->bus[i], &s->bmdma[i], s); + s->bmdma[i].bus = &s->bus[i]; + ide_register_restart_cb(&s->bus[i]); + } +} + +static void sii3112_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pd = PCI_DEVICE_CLASS(klass); + + pd->vendor_id = 0x1095; + pd->device_id = 0x3112; + pd->class_id = PCI_CLASS_STORAGE_RAID; + pd->revision = 1; + pd->realize = sii3112_pci_realize; + dc->reset = sii3112_reset; + dc->desc = "SiI3112A SATA controller"; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo sii3112_pci_info = { + .name = TYPE_SII3112_PCI, + .parent = TYPE_PCI_IDE, + .instance_size = sizeof(SiI3112PCIState), + .class_init = sii3112_pci_class_init, +}; + +static void sii3112_register_types(void) +{ + type_register_static(&sii3112_pci_info); +} + +type_init(sii3112_register_types) diff --git a/hw/ide/trace-events b/hw/ide/trace-events new file mode 100644 index 000000000..15d7921f1 --- /dev/null +++ b/hw/ide/trace-events @@ -0,0 +1,124 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# core.c +# portio +ide_ioport_read(uint32_t addr, const char *reg, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (%s); val 0x%02"PRIx32"; bus %p IDEState %p" +ide_ioport_write(uint32_t addr, const char *reg, uint32_t val, void *bus, void *s) "IDE PIO wr @ 0x%"PRIx32" (%s); val 0x%02"PRIx32"; bus %p IDEState %p" +ide_status_read(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (Alt Status); val 0x%02"PRIx32"; bus %p; IDEState %p" +ide_ctrl_write(uint32_t addr, uint32_t val, void *bus) "IDE PIO wr @ 0x%"PRIx32" (Device Control); val 0x%02"PRIx32"; bus %p" +# Warning: verbose +ide_data_readw(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (Data: Word); val 0x%04"PRIx32"; bus %p; IDEState %p" +ide_data_writew(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO wr @ 0x%"PRIx32" (Data: Word); val 0x%04"PRIx32"; bus %p; IDEState %p" +ide_data_readl(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO rd @ 0x%"PRIx32" (Data: Long); val 0x%08"PRIx32"; bus %p; IDEState %p" +ide_data_writel(uint32_t addr, uint32_t val, void *bus, void *s) "IDE PIO wr @ 0x%"PRIx32" (Data: Long); val 0x%08"PRIx32"; bus %p; IDEState %p" +# misc +ide_exec_cmd(void *bus, void *state, uint32_t cmd) "IDE exec cmd: bus %p; state %p; cmd 0x%02x" +ide_cancel_dma_sync_buffered(void *fn, void *req) "invoking cb %p of buffered request %p with -ECANCELED" +ide_cancel_dma_sync_remaining(void) "draining all remaining requests" +ide_sector_read(int64_t sector_num, int nsectors) "sector=%"PRId64" nsectors=%d" +ide_sector_write(int64_t sector_num, int nsectors) "sector=%"PRId64" nsectors=%d" +ide_reset(void *s) "IDEstate %p" +ide_bus_reset_aio(void) "aio_cancel" +ide_dma_cb(void *s, int64_t sector_num, int n, const char *dma) "IDEState %p; sector_num=%"PRId64" n=%d cmd=%s" + +# BMDMA HBAs: + +# cmd646.c +bmdma_read_cmd646(uint64_t addr, uint32_t val) "bmdma: readb 0x%"PRIx64" : 0x%02x" +bmdma_write_cmd646(uint64_t addr, uint64_t val) "bmdma: writeb 0x%"PRIx64" : 0x%02"PRIx64 + +# pci.c +bmdma_reset(void) "" +bmdma_cmd_writeb(uint32_t val) "val: 0x%08x" +bmdma_addr_read(uint64_t data) "data: 0x%016"PRIx64 +bmdma_addr_write(uint64_t data) "data: 0x%016"PRIx64 + +# piix.c +bmdma_read(uint64_t addr, uint8_t val) "bmdma: readb 0x%"PRIx64" : 0x%02x" +bmdma_write(uint64_t addr, uint64_t val) "bmdma: writeb 0x%"PRIx64" : 0x%02"PRIx64 + +# sii3112.c +sii3112_read(int size, uint64_t addr, uint64_t val) "bmdma: read (size %d) 0x%"PRIx64" : 0x%02"PRIx64 +sii3112_write(int size, uint64_t addr, uint64_t val) "bmdma: write (size %d) 0x%"PRIx64" : 0x%02"PRIx64 +sii3112_set_irq(int channel, int level) "channel %d level %d" + +# via.c +bmdma_read_via(uint64_t addr, uint32_t val) "bmdma: readb 0x%"PRIx64" : 0x%02x" +bmdma_write_via(uint64_t addr, uint64_t val) "bmdma: writeb 0x%"PRIx64" : 0x%02"PRIx64 + +# atapi.c +cd_read_sector_sync(int lba) "lba=%d" +cd_read_sector_cb(int lba, int ret) "lba=%d ret=%d" +cd_read_sector(int lba) "lba=%d" +ide_atapi_cmd_error(void *s, int sense_key, int asc) "IDEState: %p; sense=0x%x asc=0x%x" +ide_atapi_cmd_reply_end(void *s, int tx_size, int elem_tx_size, int32_t index) "IDEState %p; reply: tx_size=%d elem_tx_size=%d index=%"PRId32 +ide_atapi_cmd_reply_end_eot(void *s, int status) "IDEState: %p; end of transfer, status=0x%x" +ide_atapi_cmd_reply_end_bcl(void *s, int bcl) "IDEState: %p; byte_count_limit=%d" +ide_atapi_cmd_reply_end_new(void *s, int status) "IDEState: %p; new transfer started, status=0x%x" +ide_atapi_cmd_check_status(void *s) "IDEState: %p" +ide_atapi_cmd_read(void *s, const char *method, int lba, int nb_sectors) "IDEState: %p; read %s: LBA=%d nb_sectors=%d" +ide_atapi_cmd(void *s, uint8_t cmd) "IDEState: %p; cmd: 0x%02x" +ide_atapi_cmd_read_dma_cb_aio(void *s, int lba, int n) "IDEState: %p; aio read: lba=%d n=%d" +# Warning: Verbose +ide_atapi_cmd_packet(void *s, uint16_t limit, const char *packet) "IDEState: %p; limit=0x%x packet: %s" + +# ahci.c +ahci_port_read(void *s, int port, const char *reg, int offset, uint32_t ret) "ahci(%p)[%d]: port read [reg:%s] @ 0x%x: 0x%08x" +ahci_port_read_default(void *s, int port, const char *reg, int offset) "ahci(%p)[%d]: unimplemented port read [reg:%s] @ 0x%x" +ahci_irq_raise(void *s) "ahci(%p): raise irq" +ahci_irq_lower(void *s) "ahci(%p): lower irq" +ahci_check_irq(void *s, uint32_t old, uint32_t new) "ahci(%p): check irq 0x%08x --> 0x%08x" +ahci_trigger_irq(void *s, int port, const char *name, uint32_t val, uint32_t old, uint32_t new, uint32_t effective) "ahci(%p)[%d]: trigger irq +%s (0x%08x); irqstat: 0x%08x --> 0x%08x; effective: 0x%08x" +ahci_port_write(void *s, int port, const char *reg, int offset, uint32_t val) "ahci(%p)[%d]: port write [reg:%s] @ 0x%x: 0x%08x" +ahci_port_write_unimpl(void *s, int port, const char *reg, int offset, uint32_t val) "ahci(%p)[%d]: unimplemented port write [reg:%s] @ 0x%x: 0x%08x" +ahci_mem_read_32(void *s, uint64_t addr, uint32_t val) "ahci(%p): mem read @ 0x%"PRIx64": 0x%08x" +ahci_mem_read_32_default(void *s, uint64_t addr, uint32_t val) "ahci(%p): mem read @ 0x%"PRIx64": 0x%08x" +ahci_mem_read_32_host(void *s, const char *reg, uint64_t addr, uint32_t val) "ahci(%p): mem read [reg:%s] @ 0x%"PRIx64": 0x%08x" +ahci_mem_read_32_host_default(void *s, const char *reg, uint64_t addr) "ahci(%p): unimplemented mem read [reg:%s] @ 0x%"PRIx64 +ahci_mem_read(void *s, unsigned size, uint64_t addr, uint64_t val) "ahci(%p): read%u @ 0x%"PRIx64": 0x%016"PRIx64 +ahci_mem_write(void *s, unsigned size, uint64_t addr, uint64_t val) "ahci(%p): write%u @ 0x%"PRIx64": 0x%016"PRIx64 +ahci_mem_write_host_unimpl(void *s, unsigned size, const char *reg, uint64_t addr) "ahci(%p) unimplemented write%u [reg:%s] @ 0x%"PRIx64 +ahci_mem_write_host(void *s, unsigned size, const char *reg, uint64_t addr, uint64_t val) "ahci(%p) write%u [reg:%s] @ 0x%"PRIx64": 0x%016"PRIx64 +ahci_mem_write_unimpl(void *s, unsigned size, uint64_t addr, uint64_t val) "ahci(%p): write%u to unknown register 0x%"PRIx64": 0x%016"PRIx64 +ahci_set_signature(void *s, int port, uint8_t nsector, uint8_t sector, uint8_t lcyl, uint8_t hcyl, uint32_t sig) "ahci(%p)[%d]: set signature sector:0x%02x nsector:0x%02x lcyl:0x%02x hcyl:0x%02x (cumulatively: 0x%08x)" +ahci_reset_port(void *s, int port) "ahci(%p)[%d]: reset port" +ahci_unmap_fis_address_null(void *s, int port) "ahci(%p)[%d]: Attempt to unmap NULL FIS address" +ahci_unmap_clb_address_null(void *s, int port) "ahci(%p)[%d]: Attempt to unmap NULL CLB address" +ahci_populate_sglist(void *s, int port) "ahci(%p)[%d]" +ahci_populate_sglist_no_prdtl(void *s, int port, uint16_t opts) "ahci(%p)[%d]: no sg list given by guest: 0x%04x" +ahci_populate_sglist_no_map(void *s, int port) "ahci(%p)[%d]: DMA mapping failed" +ahci_populate_sglist_short_map(void *s, int port) "ahci(%p)[%d]: mapped less than expected" +ahci_populate_sglist_bad_offset(void *s, int port, int off_idx, int64_t off_pos) "ahci(%p)[%d]: Incorrect offset! off_idx: %d, off_pos: %"PRId64 +ncq_finish(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: NCQ transfer finished" +execute_ncq_command_read(void *s, int port, uint8_t tag, int count, int64_t lba) "ahci(%p)[%d][tag:%d]: NCQ reading %d sectors from LBA %"PRId64 +execute_ncq_command_unsup(void *s, int port, uint8_t tag, uint8_t cmd) "ahci(%p)[%d][tag:%d]: error: unsupported NCQ command (0x%02x) received" +process_ncq_command_mismatch(void *s, int port, uint8_t tag, uint8_t slot) "ahci(%p)[%d][tag:%d]: Warning: NCQ slot (%d) did not match the given tag" +process_ncq_command_aux(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Attempt to use NCQ auxiliary fields" +process_ncq_command_prioicc(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Unsupported attempt to use PRIO/ICC fields" +process_ncq_command_fua(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Unsupported attempt to use Force Unit Access" +process_ncq_command_rarc(void *s, int port, uint8_t tag) "ahci(%p)[%d][tag:%d]: Warn: Unsupported attempt to use Rebuild Assist" +process_ncq_command_large(void *s, int port, uint8_t tag, size_t prdtl, size_t size) "ahci(%p)[%d][tag:%d]: Warn: PRDTL (0x%zx) does not match requested size (0x%zx)" +process_ncq_command(void *s, int port, uint8_t tag, uint8_t cmd, uint64_t lba, uint64_t end) "ahci(%p)[%d][tag:%d]: NCQ op 0x%02x on sectors [%"PRId64",%"PRId64"]" +handle_reg_h2d_fis_pmp(void *s, int port, char b0, char b1, char b2) "ahci(%p)[%d]: Port Multiplier not supported, FIS: 0x%02x-%02x-%02x" +handle_reg_h2d_fis_res(void *s, int port, char b0, char b1, char b2) "ahci(%p)[%d]: Reserved flags set in H2D Register FIS, FIS: 0x%02x-%02x-%02x" +handle_cmd_busy(void *s, int port) "ahci(%p)[%d]: engine busy" +handle_cmd_nolist(void *s, int port) "ahci(%p)[%d]: handle_cmd called without s->dev[port].lst" +handle_cmd_badport(void *s, int port) "ahci(%p)[%d]: guest accessed unused port" +handle_cmd_badfis(void *s, int port) "ahci(%p)[%d]: guest provided an invalid cmd FIS" +handle_cmd_badmap(void *s, int port, uint64_t len) "ahci(%p)[%d]: dma_memory_map failed, 0x%02"PRIx64" != 0x80" +handle_cmd_unhandled_fis(void *s, int port, uint8_t b0, uint8_t b1, uint8_t b2) "ahci(%p)[%d]: unhandled FIS type. cmd_fis: 0x%02x-%02x-%02x" +ahci_pio_transfer(void *s, int port, const char *rw, uint32_t size, const char *tgt, const char *sgl) "ahci(%p)[%d]: %sing %d bytes on %s w/%s sglist" +ahci_start_dma(void *s, int port) "ahci(%p)[%d]: start dma" +ahci_dma_prepare_buf(void *s, int port, int32_t io_buffer_size, int32_t limit) "ahci(%p)[%d]: prepare buf limit=%"PRId32" prepared=%"PRId32 +ahci_dma_prepare_buf_fail(void *s, int port) "ahci(%p)[%d]: sglist population failed" +ahci_dma_rw_buf(void *s, int port, int l) "ahci(%p)[%d] len=0x%x" +ahci_cmd_done(void *s, int port) "ahci(%p)[%d]: cmd done" +ahci_reset(void *s) "ahci(%p): HBA reset" + +# Warning: Verbose +handle_reg_h2d_fis_dump(void *s, int port, const char *fis) "ahci(%p)[%d]: %s" +handle_cmd_fis_dump(void *s, int port, const char *fis) "ahci(%p)[%d]: %s" + +# ahci-allwinner.c +allwinner_ahci_mem_read(void *s, void *a, uint64_t addr, uint64_t val, unsigned size) "ahci(%p): read a=%p addr=0x%"PRIx64" val=0x%"PRIx64", size=%d" +allwinner_ahci_mem_write(void *s, void *a, uint64_t addr, uint64_t val, unsigned size) "ahci(%p): write a=%p addr=0x%"PRIx64" val=0x%"PRIx64", size=%d" diff --git a/hw/ide/trace.h b/hw/ide/trace.h new file mode 100644 index 000000000..e060e0aef --- /dev/null +++ b/hw/ide/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_ide.h" diff --git a/hw/ide/via.c b/hw/ide/via.c new file mode 100644 index 000000000..82def819c --- /dev/null +++ b/hw/ide/via.c @@ -0,0 +1,243 @@ +/* + * QEMU IDE Emulation: PCI VIA82C686B support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * Copyright (c) 2010 Huacai Chen + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "sysemu/dma.h" +#include "hw/isa/vt82c686.h" +#include "hw/ide/pci.h" +#include "trace.h" + +static uint64_t bmdma_read(void *opaque, hwaddr addr, + unsigned size) +{ + BMDMAState *bm = opaque; + uint32_t val; + + if (size != 1) { + return ((uint64_t)1 << (size * 8)) - 1; + } + + switch (addr & 3) { + case 0: + val = bm->cmd; + break; + case 2: + val = bm->status; + break; + default: + val = 0xff; + break; + } + + trace_bmdma_read_via(addr, val); + return val; +} + +static void bmdma_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + BMDMAState *bm = opaque; + + if (size != 1) { + return; + } + + trace_bmdma_write_via(addr, val); + switch (addr & 3) { + case 0: + bmdma_cmd_writeb(bm, val); + break; + case 2: + bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06); + break; + default:; + } +} + +static const MemoryRegionOps via_bmdma_ops = { + .read = bmdma_read, + .write = bmdma_write, +}; + +static void bmdma_setup_bar(PCIIDEState *d) +{ + int i; + + memory_region_init(&d->bmdma_bar, OBJECT(d), "via-bmdma-container", 16); + for(i = 0;i < 2; i++) { + BMDMAState *bm = &d->bmdma[i]; + + memory_region_init_io(&bm->extra_io, OBJECT(d), &via_bmdma_ops, bm, + "via-bmdma", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8, &bm->extra_io); + memory_region_init_io(&bm->addr_ioport, OBJECT(d), + &bmdma_addr_ioport_ops, bm, "bmdma", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8 + 4, &bm->addr_ioport); + } +} + +static void via_ide_set_irq(void *opaque, int n, int level) +{ + PCIDevice *d = PCI_DEVICE(opaque); + + if (level) { + d->config[0x70 + n * 8] |= 0x80; + } else { + d->config[0x70 + n * 8] &= ~0x80; + } + + via_isa_set_irq(pci_get_function_0(d), 14 + n, level); +} + +static void via_ide_reset(DeviceState *dev) +{ + PCIIDEState *d = PCI_IDE(dev); + PCIDevice *pd = PCI_DEVICE(dev); + uint8_t *pci_conf = pd->config; + int i; + + for (i = 0; i < 2; i++) { + ide_bus_reset(&d->bus[i]); + } + + pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_WAIT); + pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_FAST_BACK | + PCI_STATUS_DEVSEL_MEDIUM); + + pci_set_long(pci_conf + PCI_BASE_ADDRESS_0, 0x000001f0); + pci_set_long(pci_conf + PCI_BASE_ADDRESS_1, 0x000003f4); + pci_set_long(pci_conf + PCI_BASE_ADDRESS_2, 0x00000170); + pci_set_long(pci_conf + PCI_BASE_ADDRESS_3, 0x00000374); + pci_set_long(pci_conf + PCI_BASE_ADDRESS_4, 0x0000cc01); /* BMIBA: 20-23h */ + pci_set_long(pci_conf + PCI_INTERRUPT_LINE, 0x0000010e); + + /* IDE chip enable, IDE configuration 1/2, IDE FIFO Configuration*/ + pci_set_long(pci_conf + 0x40, 0x0a090600); + /* IDE misc configuration 1/2/3 */ + pci_set_long(pci_conf + 0x44, 0x00c00068); + /* IDE Timing control */ + pci_set_long(pci_conf + 0x48, 0xa8a8a8a8); + /* IDE Address Setup Time */ + pci_set_long(pci_conf + 0x4c, 0x000000ff); + /* UltraDMA Extended Timing Control*/ + pci_set_long(pci_conf + 0x50, 0x07070707); + /* UltraDMA FIFO Control */ + pci_set_long(pci_conf + 0x54, 0x00000004); + /* IDE primary sector size */ + pci_set_long(pci_conf + 0x60, 0x00000200); + /* IDE secondary sector size */ + pci_set_long(pci_conf + 0x68, 0x00000200); + /* PCI PM Block */ + pci_set_long(pci_conf + 0xc0, 0x00020001); +} + +static void via_ide_realize(PCIDevice *dev, Error **errp) +{ + PCIIDEState *d = PCI_IDE(dev); + DeviceState *ds = DEVICE(dev); + uint8_t *pci_conf = dev->config; + int i; + + pci_config_set_prog_interface(pci_conf, 0x8a); /* legacy mode */ + pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x000000c0); + dev->wmask[PCI_INTERRUPT_LINE] = 0; + dev->wmask[PCI_CLASS_PROG] = 5; + + memory_region_init_io(&d->data_bar[0], OBJECT(d), &pci_ide_data_le_ops, + &d->bus[0], "via-ide0-data", 8); + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->data_bar[0]); + + memory_region_init_io(&d->cmd_bar[0], OBJECT(d), &pci_ide_cmd_le_ops, + &d->bus[0], "via-ide0-cmd", 4); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd_bar[0]); + + memory_region_init_io(&d->data_bar[1], OBJECT(d), &pci_ide_data_le_ops, + &d->bus[1], "via-ide1-data", 8); + pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &d->data_bar[1]); + + memory_region_init_io(&d->cmd_bar[1], OBJECT(d), &pci_ide_cmd_le_ops, + &d->bus[1], "via-ide1-cmd", 4); + pci_register_bar(dev, 3, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd_bar[1]); + + bmdma_setup_bar(d); + pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar); + + qdev_init_gpio_in(ds, via_ide_set_irq, 2); + for (i = 0; i < 2; i++) { + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); + ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i)); + + bmdma_init(&d->bus[i], &d->bmdma[i], d); + d->bmdma[i].bus = &d->bus[i]; + ide_register_restart_cb(&d->bus[i]); + } +} + +static void via_ide_exitfn(PCIDevice *dev) +{ + PCIIDEState *d = PCI_IDE(dev); + unsigned i; + + for (i = 0; i < 2; ++i) { + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io); + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport); + } +} + +static void via_ide_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + dc->reset = via_ide_reset; + dc->vmsd = &vmstate_ide_pci; + /* Reason: only works as function of VIA southbridge */ + dc->user_creatable = false; + + k->realize = via_ide_realize; + k->exit = via_ide_exitfn; + k->vendor_id = PCI_VENDOR_ID_VIA; + k->device_id = PCI_DEVICE_ID_VIA_IDE; + k->revision = 0x06; + k->class_id = PCI_CLASS_STORAGE_IDE; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo via_ide_info = { + .name = "via-ide", + .parent = TYPE_PCI_IDE, + .class_init = via_ide_class_init, +}; + +static void via_ide_register_types(void) +{ + type_register_static(&via_ide_info); +} + +type_init(via_ide_register_types) diff --git a/hw/input/Kconfig b/hw/input/Kconfig new file mode 100644 index 000000000..55865bb38 --- /dev/null +++ b/hw/input/Kconfig @@ -0,0 +1,48 @@ +config ADB + bool + +config ADS7846 + bool + +config LM832X + bool + depends on I2C + +config PCKBD + bool + select PS2 + depends on ISA_BUS + +config PL050 + bool + select PS2 + +config PS2 + bool + +config STELLARIS_INPUT + bool + +config TSC2005 + bool + +config VIRTIO_INPUT + bool + default y + depends on VIRTIO + +config VIRTIO_INPUT_HOST + bool + default y + depends on VIRTIO_INPUT && LINUX + +config VHOST_USER_INPUT + bool + default y + depends on VIRTIO_INPUT && VHOST_USER + +config TSC210X + bool + +config LASIPS2 + select PS2 diff --git a/hw/input/adb-internal.h b/hw/input/adb-internal.h new file mode 100644 index 000000000..8d92165c4 --- /dev/null +++ b/hw/input/adb-internal.h @@ -0,0 +1,53 @@ +/* + * QEMU ADB support + * + * Copyright (c) 2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef HW_INPUT_ADB_INTERNAL_H +#define HW_INPUT_ADB_INTERNAL_H + +/* ADB commands */ + +#define ADB_BUSRESET 0x00 +#define ADB_FLUSH 0x01 +#define ADB_WRITEREG 0x08 +#define ADB_READREG 0x0c + +/* ADB device commands */ + +#define ADB_CMD_SELF_TEST 0xff +#define ADB_CMD_CHANGE_ID 0xfe +#define ADB_CMD_CHANGE_ID_AND_ACT 0xfd +#define ADB_CMD_CHANGE_ID_AND_ENABLE 0x00 + +/* ADB default device IDs (upper 4 bits of ADB command byte) */ + +#define ADB_DEVID_DONGLE 1 +#define ADB_DEVID_KEYBOARD 2 +#define ADB_DEVID_MOUSE 3 +#define ADB_DEVID_TABLET 4 +#define ADB_DEVID_MODEM 5 +#define ADB_DEVID_MISC 7 + +extern const VMStateDescription vmstate_adb_device; + +#endif diff --git a/hw/input/adb-kbd.c b/hw/input/adb-kbd.c new file mode 100644 index 000000000..a9088c910 --- /dev/null +++ b/hw/input/adb-kbd.c @@ -0,0 +1,408 @@ +/* + * QEMU ADB keyboard support + * + * Copyright (c) 2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/input/adb.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "ui/input.h" +#include "hw/input/adb-keys.h" +#include "adb-internal.h" +#include "trace.h" +#include "qom/object.h" + +OBJECT_DECLARE_TYPE(KBDState, ADBKeyboardClass, ADB_KEYBOARD) + +struct KBDState { + /*< private >*/ + ADBDevice parent_obj; + /*< public >*/ + + uint8_t data[128]; + int rptr, wptr, count; +}; + + +struct ADBKeyboardClass { + /*< private >*/ + ADBDeviceClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; +}; + +/* The adb keyboard doesn't have every key imaginable */ +#define NO_KEY 0xff + +int qcode_to_adb_keycode[] = { + /* Make sure future additions are automatically set to NO_KEY */ + [0 ... 0xff] = NO_KEY, + + [Q_KEY_CODE_SHIFT] = ADB_KEY_LEFT_SHIFT, + [Q_KEY_CODE_SHIFT_R] = ADB_KEY_RIGHT_SHIFT, + [Q_KEY_CODE_ALT] = ADB_KEY_LEFT_OPTION, + [Q_KEY_CODE_ALT_R] = ADB_KEY_RIGHT_OPTION, + [Q_KEY_CODE_CTRL] = ADB_KEY_LEFT_CONTROL, + [Q_KEY_CODE_CTRL_R] = ADB_KEY_RIGHT_CONTROL, + [Q_KEY_CODE_META_L] = ADB_KEY_COMMAND, + [Q_KEY_CODE_META_R] = ADB_KEY_COMMAND, + [Q_KEY_CODE_SPC] = ADB_KEY_SPACEBAR, + + [Q_KEY_CODE_ESC] = ADB_KEY_ESC, + [Q_KEY_CODE_1] = ADB_KEY_1, + [Q_KEY_CODE_2] = ADB_KEY_2, + [Q_KEY_CODE_3] = ADB_KEY_3, + [Q_KEY_CODE_4] = ADB_KEY_4, + [Q_KEY_CODE_5] = ADB_KEY_5, + [Q_KEY_CODE_6] = ADB_KEY_6, + [Q_KEY_CODE_7] = ADB_KEY_7, + [Q_KEY_CODE_8] = ADB_KEY_8, + [Q_KEY_CODE_9] = ADB_KEY_9, + [Q_KEY_CODE_0] = ADB_KEY_0, + [Q_KEY_CODE_MINUS] = ADB_KEY_MINUS, + [Q_KEY_CODE_EQUAL] = ADB_KEY_EQUAL, + [Q_KEY_CODE_BACKSPACE] = ADB_KEY_DELETE, + [Q_KEY_CODE_TAB] = ADB_KEY_TAB, + [Q_KEY_CODE_Q] = ADB_KEY_Q, + [Q_KEY_CODE_W] = ADB_KEY_W, + [Q_KEY_CODE_E] = ADB_KEY_E, + [Q_KEY_CODE_R] = ADB_KEY_R, + [Q_KEY_CODE_T] = ADB_KEY_T, + [Q_KEY_CODE_Y] = ADB_KEY_Y, + [Q_KEY_CODE_U] = ADB_KEY_U, + [Q_KEY_CODE_I] = ADB_KEY_I, + [Q_KEY_CODE_O] = ADB_KEY_O, + [Q_KEY_CODE_P] = ADB_KEY_P, + [Q_KEY_CODE_BRACKET_LEFT] = ADB_KEY_LEFT_BRACKET, + [Q_KEY_CODE_BRACKET_RIGHT] = ADB_KEY_RIGHT_BRACKET, + [Q_KEY_CODE_RET] = ADB_KEY_RETURN, + [Q_KEY_CODE_A] = ADB_KEY_A, + [Q_KEY_CODE_S] = ADB_KEY_S, + [Q_KEY_CODE_D] = ADB_KEY_D, + [Q_KEY_CODE_F] = ADB_KEY_F, + [Q_KEY_CODE_G] = ADB_KEY_G, + [Q_KEY_CODE_H] = ADB_KEY_H, + [Q_KEY_CODE_J] = ADB_KEY_J, + [Q_KEY_CODE_K] = ADB_KEY_K, + [Q_KEY_CODE_L] = ADB_KEY_L, + [Q_KEY_CODE_SEMICOLON] = ADB_KEY_SEMICOLON, + [Q_KEY_CODE_APOSTROPHE] = ADB_KEY_APOSTROPHE, + [Q_KEY_CODE_GRAVE_ACCENT] = ADB_KEY_GRAVE_ACCENT, + [Q_KEY_CODE_BACKSLASH] = ADB_KEY_BACKSLASH, + [Q_KEY_CODE_Z] = ADB_KEY_Z, + [Q_KEY_CODE_X] = ADB_KEY_X, + [Q_KEY_CODE_C] = ADB_KEY_C, + [Q_KEY_CODE_V] = ADB_KEY_V, + [Q_KEY_CODE_B] = ADB_KEY_B, + [Q_KEY_CODE_N] = ADB_KEY_N, + [Q_KEY_CODE_M] = ADB_KEY_M, + [Q_KEY_CODE_COMMA] = ADB_KEY_COMMA, + [Q_KEY_CODE_DOT] = ADB_KEY_PERIOD, + [Q_KEY_CODE_SLASH] = ADB_KEY_FORWARD_SLASH, + [Q_KEY_CODE_ASTERISK] = ADB_KEY_KP_MULTIPLY, + [Q_KEY_CODE_CAPS_LOCK] = ADB_KEY_CAPS_LOCK, + + [Q_KEY_CODE_F1] = ADB_KEY_F1, + [Q_KEY_CODE_F2] = ADB_KEY_F2, + [Q_KEY_CODE_F3] = ADB_KEY_F3, + [Q_KEY_CODE_F4] = ADB_KEY_F4, + [Q_KEY_CODE_F5] = ADB_KEY_F5, + [Q_KEY_CODE_F6] = ADB_KEY_F6, + [Q_KEY_CODE_F7] = ADB_KEY_F7, + [Q_KEY_CODE_F8] = ADB_KEY_F8, + [Q_KEY_CODE_F9] = ADB_KEY_F9, + [Q_KEY_CODE_F10] = ADB_KEY_F10, + [Q_KEY_CODE_F11] = ADB_KEY_F11, + [Q_KEY_CODE_F12] = ADB_KEY_F12, + [Q_KEY_CODE_PRINT] = ADB_KEY_F13, + [Q_KEY_CODE_SYSRQ] = ADB_KEY_F13, + [Q_KEY_CODE_SCROLL_LOCK] = ADB_KEY_F14, + [Q_KEY_CODE_PAUSE] = ADB_KEY_F15, + + [Q_KEY_CODE_NUM_LOCK] = ADB_KEY_KP_CLEAR, + [Q_KEY_CODE_KP_EQUALS] = ADB_KEY_KP_EQUAL, + [Q_KEY_CODE_KP_DIVIDE] = ADB_KEY_KP_DIVIDE, + [Q_KEY_CODE_KP_MULTIPLY] = ADB_KEY_KP_MULTIPLY, + [Q_KEY_CODE_KP_SUBTRACT] = ADB_KEY_KP_SUBTRACT, + [Q_KEY_CODE_KP_ADD] = ADB_KEY_KP_PLUS, + [Q_KEY_CODE_KP_ENTER] = ADB_KEY_KP_ENTER, + [Q_KEY_CODE_KP_DECIMAL] = ADB_KEY_KP_PERIOD, + [Q_KEY_CODE_KP_0] = ADB_KEY_KP_0, + [Q_KEY_CODE_KP_1] = ADB_KEY_KP_1, + [Q_KEY_CODE_KP_2] = ADB_KEY_KP_2, + [Q_KEY_CODE_KP_3] = ADB_KEY_KP_3, + [Q_KEY_CODE_KP_4] = ADB_KEY_KP_4, + [Q_KEY_CODE_KP_5] = ADB_KEY_KP_5, + [Q_KEY_CODE_KP_6] = ADB_KEY_KP_6, + [Q_KEY_CODE_KP_7] = ADB_KEY_KP_7, + [Q_KEY_CODE_KP_8] = ADB_KEY_KP_8, + [Q_KEY_CODE_KP_9] = ADB_KEY_KP_9, + + [Q_KEY_CODE_UP] = ADB_KEY_UP, + [Q_KEY_CODE_DOWN] = ADB_KEY_DOWN, + [Q_KEY_CODE_LEFT] = ADB_KEY_LEFT, + [Q_KEY_CODE_RIGHT] = ADB_KEY_RIGHT, + + [Q_KEY_CODE_HELP] = ADB_KEY_HELP, + [Q_KEY_CODE_INSERT] = ADB_KEY_HELP, + [Q_KEY_CODE_DELETE] = ADB_KEY_FORWARD_DELETE, + [Q_KEY_CODE_HOME] = ADB_KEY_HOME, + [Q_KEY_CODE_END] = ADB_KEY_END, + [Q_KEY_CODE_PGUP] = ADB_KEY_PAGE_UP, + [Q_KEY_CODE_PGDN] = ADB_KEY_PAGE_DOWN, + + [Q_KEY_CODE_POWER] = ADB_KEY_POWER +}; + +static void adb_kbd_put_keycode(void *opaque, int keycode) +{ + KBDState *s = opaque; + + if (s->count < sizeof(s->data)) { + s->data[s->wptr] = keycode; + if (++s->wptr == sizeof(s->data)) { + s->wptr = 0; + } + s->count++; + } +} + +static int adb_kbd_poll(ADBDevice *d, uint8_t *obuf) +{ + KBDState *s = ADB_KEYBOARD(d); + int keycode; + + if (s->count == 0) { + return 0; + } + keycode = s->data[s->rptr]; + s->rptr++; + if (s->rptr == sizeof(s->data)) { + s->rptr = 0; + } + s->count--; + /* + * The power key is the only two byte value key, so it is a special case. + * Since 0x7f is not a used keycode for ADB we overload it to indicate the + * power button when we're storing keycodes in our internal buffer, and + * expand it out to two bytes when we send to the guest. + */ + if (keycode == 0x7f) { + obuf[0] = 0x7f; + obuf[1] = 0x7f; + } else { + obuf[0] = keycode; + /* NOTE: the power key key-up is the two byte sequence 0xff 0xff; + * otherwise we could in theory send a second keycode in the second + * byte, but choose not to bother. + */ + obuf[1] = 0xff; + } + + return 2; +} + +static int adb_kbd_request(ADBDevice *d, uint8_t *obuf, + const uint8_t *buf, int len) +{ + KBDState *s = ADB_KEYBOARD(d); + int cmd, reg, olen; + + if ((buf[0] & 0x0f) == ADB_FLUSH) { + /* flush keyboard fifo */ + s->wptr = s->rptr = s->count = 0; + return 0; + } + + cmd = buf[0] & 0xc; + reg = buf[0] & 0x3; + olen = 0; + switch (cmd) { + case ADB_WRITEREG: + trace_adb_device_kbd_writereg(reg, buf[1]); + switch (reg) { + case 2: + /* LED status */ + break; + case 3: + switch (buf[2]) { + case ADB_CMD_SELF_TEST: + break; + case ADB_CMD_CHANGE_ID: + case ADB_CMD_CHANGE_ID_AND_ACT: + case ADB_CMD_CHANGE_ID_AND_ENABLE: + d->devaddr = buf[1] & 0xf; + trace_adb_device_kbd_request_change_addr(d->devaddr); + break; + default: + d->devaddr = buf[1] & 0xf; + /* + * we support handlers: + * 1: Apple Standard Keyboard + * 2: Apple Extended Keyboard (LShift = RShift) + * 3: Apple Extended Keyboard (LShift != RShift) + */ + if (buf[2] == 1 || buf[2] == 2 || buf[2] == 3) { + d->handler = buf[2]; + } + + trace_adb_device_kbd_request_change_addr_and_handler( + d->devaddr, d->handler); + break; + } + } + break; + case ADB_READREG: + switch (reg) { + case 0: + olen = adb_kbd_poll(d, obuf); + break; + case 1: + break; + case 2: + obuf[0] = 0x00; /* XXX: check this */ + obuf[1] = 0x07; /* led status */ + olen = 2; + break; + case 3: + obuf[0] = d->devaddr; + obuf[1] = d->handler; + olen = 2; + break; + } + trace_adb_device_kbd_readreg(reg, obuf[0], obuf[1]); + break; + } + return olen; +} + +static bool adb_kbd_has_data(ADBDevice *d) +{ + KBDState *s = ADB_KEYBOARD(d); + + return s->count > 0; +} + +/* This is where keyboard events enter this file */ +static void adb_keyboard_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) +{ + KBDState *s = (KBDState *)dev; + int qcode, keycode; + + qcode = qemu_input_key_value_to_qcode(evt->u.key.data->key); + if (qcode >= ARRAY_SIZE(qcode_to_adb_keycode)) { + return; + } + /* FIXME: take handler into account when translating qcode */ + keycode = qcode_to_adb_keycode[qcode]; + if (keycode == NO_KEY) { /* We don't want to send this to the guest */ + trace_adb_device_kbd_no_key(); + return; + } + if (evt->u.key.data->down == false) { /* if key release event */ + keycode = keycode | 0x80; /* create keyboard break code */ + } + + adb_kbd_put_keycode(s, keycode); +} + +static const VMStateDescription vmstate_adb_kbd = { + .name = "adb_kbd", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(parent_obj, KBDState, 0, vmstate_adb_device, ADBDevice), + VMSTATE_BUFFER(data, KBDState), + VMSTATE_INT32(rptr, KBDState), + VMSTATE_INT32(wptr, KBDState), + VMSTATE_INT32(count, KBDState), + VMSTATE_END_OF_LIST() + } +}; + +static void adb_kbd_reset(DeviceState *dev) +{ + ADBDevice *d = ADB_DEVICE(dev); + KBDState *s = ADB_KEYBOARD(dev); + + d->handler = 1; + d->devaddr = ADB_DEVID_KEYBOARD; + memset(s->data, 0, sizeof(s->data)); + s->rptr = 0; + s->wptr = 0; + s->count = 0; +} + +static QemuInputHandler adb_keyboard_handler = { + .name = "QEMU ADB Keyboard", + .mask = INPUT_EVENT_MASK_KEY, + .event = adb_keyboard_event, +}; + +static void adb_kbd_realizefn(DeviceState *dev, Error **errp) +{ + ADBKeyboardClass *akc = ADB_KEYBOARD_GET_CLASS(dev); + akc->parent_realize(dev, errp); + qemu_input_handler_register(dev, &adb_keyboard_handler); +} + +static void adb_kbd_initfn(Object *obj) +{ + ADBDevice *d = ADB_DEVICE(obj); + + d->devaddr = ADB_DEVID_KEYBOARD; +} + +static void adb_kbd_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc); + ADBKeyboardClass *akc = ADB_KEYBOARD_CLASS(oc); + + device_class_set_parent_realize(dc, adb_kbd_realizefn, + &akc->parent_realize); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + + adc->devreq = adb_kbd_request; + adc->devhasdata = adb_kbd_has_data; + dc->reset = adb_kbd_reset; + dc->vmsd = &vmstate_adb_kbd; +} + +static const TypeInfo adb_kbd_type_info = { + .name = TYPE_ADB_KEYBOARD, + .parent = TYPE_ADB_DEVICE, + .instance_size = sizeof(KBDState), + .instance_init = adb_kbd_initfn, + .class_init = adb_kbd_class_init, + .class_size = sizeof(ADBKeyboardClass), +}; + +static void adb_kbd_register_types(void) +{ + type_register_static(&adb_kbd_type_info); +} + +type_init(adb_kbd_register_types) diff --git a/hw/input/adb-mouse.c b/hw/input/adb-mouse.c new file mode 100644 index 000000000..e6b341f02 --- /dev/null +++ b/hw/input/adb-mouse.c @@ -0,0 +1,279 @@ +/* + * QEMU ADB mouse support + * + * Copyright (c) 2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "ui/console.h" +#include "hw/input/adb.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "adb-internal.h" +#include "trace.h" +#include "qom/object.h" + +OBJECT_DECLARE_TYPE(MouseState, ADBMouseClass, ADB_MOUSE) + +struct MouseState { + /*< public >*/ + ADBDevice parent_obj; + /*< private >*/ + + int buttons_state, last_buttons_state; + int dx, dy, dz; +}; + + +struct ADBMouseClass { + /*< public >*/ + ADBDeviceClass parent_class; + /*< private >*/ + + DeviceRealize parent_realize; +}; + +static void adb_mouse_event(void *opaque, + int dx1, int dy1, int dz1, int buttons_state) +{ + MouseState *s = opaque; + + s->dx += dx1; + s->dy += dy1; + s->dz += dz1; + s->buttons_state = buttons_state; +} + + +static int adb_mouse_poll(ADBDevice *d, uint8_t *obuf) +{ + MouseState *s = ADB_MOUSE(d); + int dx, dy; + + if (s->last_buttons_state == s->buttons_state && + s->dx == 0 && s->dy == 0) { + return 0; + } + + dx = s->dx; + if (dx < -63) { + dx = -63; + } else if (dx > 63) { + dx = 63; + } + + dy = s->dy; + if (dy < -63) { + dy = -63; + } else if (dy > 63) { + dy = 63; + } + + s->dx -= dx; + s->dy -= dy; + s->last_buttons_state = s->buttons_state; + + dx &= 0x7f; + dy &= 0x7f; + + if (!(s->buttons_state & MOUSE_EVENT_LBUTTON)) { + dy |= 0x80; + } + if (!(s->buttons_state & MOUSE_EVENT_RBUTTON)) { + dx |= 0x80; + } + + obuf[0] = dy; + obuf[1] = dx; + return 2; +} + +static int adb_mouse_request(ADBDevice *d, uint8_t *obuf, + const uint8_t *buf, int len) +{ + MouseState *s = ADB_MOUSE(d); + int cmd, reg, olen; + + if ((buf[0] & 0x0f) == ADB_FLUSH) { + /* flush mouse fifo */ + s->buttons_state = s->last_buttons_state; + s->dx = 0; + s->dy = 0; + s->dz = 0; + trace_adb_device_mouse_flush(); + return 0; + } + + cmd = buf[0] & 0xc; + reg = buf[0] & 0x3; + olen = 0; + switch (cmd) { + case ADB_WRITEREG: + trace_adb_device_mouse_writereg(reg, buf[1]); + switch (reg) { + case 2: + break; + case 3: + /* + * MacOS 9 has a bug in its ADB driver whereby after configuring + * the ADB bus devices it sends another write of invalid length + * to reg 3. Make sure we ignore it to prevent an address clash + * with the previous device. + */ + if (len != 3) { + return 0; + } + + switch (buf[2]) { + case ADB_CMD_SELF_TEST: + break; + case ADB_CMD_CHANGE_ID: + case ADB_CMD_CHANGE_ID_AND_ACT: + case ADB_CMD_CHANGE_ID_AND_ENABLE: + d->devaddr = buf[1] & 0xf; + trace_adb_device_mouse_request_change_addr(d->devaddr); + break; + default: + d->devaddr = buf[1] & 0xf; + /* + * we support handlers: + * 0x01: Classic Apple Mouse Protocol / 100 cpi operations + * 0x02: Classic Apple Mouse Protocol / 200 cpi operations + * we don't support handlers (at least): + * 0x03: Mouse systems A3 trackball + * 0x04: Extended Apple Mouse Protocol + * 0x2f: Microspeed mouse + * 0x42: Macally + * 0x5f: Microspeed mouse + * 0x66: Microspeed mouse + */ + if (buf[2] == 1 || buf[2] == 2) { + d->handler = buf[2]; + } + + trace_adb_device_mouse_request_change_addr_and_handler( + d->devaddr, d->handler); + break; + } + } + break; + case ADB_READREG: + switch (reg) { + case 0: + olen = adb_mouse_poll(d, obuf); + break; + case 1: + break; + case 3: + obuf[0] = d->devaddr; + obuf[1] = d->handler; + olen = 2; + break; + } + trace_adb_device_mouse_readreg(reg, obuf[0], obuf[1]); + break; + } + return olen; +} + +static bool adb_mouse_has_data(ADBDevice *d) +{ + MouseState *s = ADB_MOUSE(d); + + return !(s->last_buttons_state == s->buttons_state && + s->dx == 0 && s->dy == 0); +} + +static void adb_mouse_reset(DeviceState *dev) +{ + ADBDevice *d = ADB_DEVICE(dev); + MouseState *s = ADB_MOUSE(dev); + + d->handler = 2; + d->devaddr = ADB_DEVID_MOUSE; + s->last_buttons_state = s->buttons_state = 0; + s->dx = s->dy = s->dz = 0; +} + +static const VMStateDescription vmstate_adb_mouse = { + .name = "adb_mouse", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(parent_obj, MouseState, 0, vmstate_adb_device, + ADBDevice), + VMSTATE_INT32(buttons_state, MouseState), + VMSTATE_INT32(last_buttons_state, MouseState), + VMSTATE_INT32(dx, MouseState), + VMSTATE_INT32(dy, MouseState), + VMSTATE_INT32(dz, MouseState), + VMSTATE_END_OF_LIST() + } +}; + +static void adb_mouse_realizefn(DeviceState *dev, Error **errp) +{ + MouseState *s = ADB_MOUSE(dev); + ADBMouseClass *amc = ADB_MOUSE_GET_CLASS(dev); + + amc->parent_realize(dev, errp); + + qemu_add_mouse_event_handler(adb_mouse_event, s, 0, "QEMU ADB Mouse"); +} + +static void adb_mouse_initfn(Object *obj) +{ + ADBDevice *d = ADB_DEVICE(obj); + + d->devaddr = ADB_DEVID_MOUSE; +} + +static void adb_mouse_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc); + ADBMouseClass *amc = ADB_MOUSE_CLASS(oc); + + device_class_set_parent_realize(dc, adb_mouse_realizefn, + &amc->parent_realize); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + + adc->devreq = adb_mouse_request; + adc->devhasdata = adb_mouse_has_data; + dc->reset = adb_mouse_reset; + dc->vmsd = &vmstate_adb_mouse; +} + +static const TypeInfo adb_mouse_type_info = { + .name = TYPE_ADB_MOUSE, + .parent = TYPE_ADB_DEVICE, + .instance_size = sizeof(MouseState), + .instance_init = adb_mouse_initfn, + .class_init = adb_mouse_class_init, + .class_size = sizeof(ADBMouseClass), +}; + +static void adb_mouse_register_types(void) +{ + type_register_static(&adb_mouse_type_info); +} + +type_init(adb_mouse_register_types) diff --git a/hw/input/adb.c b/hw/input/adb.c new file mode 100644 index 000000000..84331b9fc --- /dev/null +++ b/hw/input/adb.c @@ -0,0 +1,324 @@ +/* + * QEMU ADB support + * + * Copyright (c) 2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/input/adb.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "adb-internal.h" +#include "trace.h" + +/* error codes */ +#define ADB_RET_NOTPRESENT (-2) + +static const char *adb_commands[] = { + "RESET", "FLUSH", "(Reserved 0x2)", "(Reserved 0x3)", + "Reserved (0x4)", "(Reserved 0x5)", "(Reserved 0x6)", "(Reserved 0x7)", + "LISTEN r0", "LISTEN r1", "LISTEN r2", "LISTEN r3", + "TALK r0", "TALK r1", "TALK r2", "TALK r3", +}; + +static void adb_device_reset(ADBDevice *d) +{ + qdev_reset_all(DEVICE(d)); +} + +static int do_adb_request(ADBBusState *s, uint8_t *obuf, const uint8_t *buf, + int len) +{ + ADBDevice *d; + ADBDeviceClass *adc; + int devaddr, cmd, olen, i; + + cmd = buf[0] & 0xf; + if (cmd == ADB_BUSRESET) { + for (i = 0; i < s->nb_devices; i++) { + d = s->devices[i]; + adb_device_reset(d); + } + s->status = 0; + return 0; + } + + s->pending = 0; + for (i = 0; i < s->nb_devices; i++) { + d = s->devices[i]; + adc = ADB_DEVICE_GET_CLASS(d); + + if (adc->devhasdata(d)) { + s->pending |= (1 << d->devaddr); + } + } + + s->status = 0; + devaddr = buf[0] >> 4; + for (i = 0; i < s->nb_devices; i++) { + d = s->devices[i]; + adc = ADB_DEVICE_GET_CLASS(d); + + if (d->devaddr == devaddr) { + olen = adc->devreq(d, obuf, buf, len); + if (!olen) { + s->status |= ADB_STATUS_BUSTIMEOUT; + } + return olen; + } + } + + s->status |= ADB_STATUS_BUSTIMEOUT; + return ADB_RET_NOTPRESENT; +} + +int adb_request(ADBBusState *s, uint8_t *obuf, const uint8_t *buf, int len) +{ + int ret; + + trace_adb_bus_request(buf[0] >> 4, adb_commands[buf[0] & 0xf], len); + + assert(s->autopoll_blocked); + + ret = do_adb_request(s, obuf, buf, len); + + trace_adb_bus_request_done(buf[0] >> 4, adb_commands[buf[0] & 0xf], ret); + return ret; +} + +int adb_poll(ADBBusState *s, uint8_t *obuf, uint16_t poll_mask) +{ + ADBDevice *d; + int olen, i; + uint8_t buf[1]; + + olen = 0; + for (i = 0; i < s->nb_devices; i++) { + if (s->poll_index >= s->nb_devices) { + s->poll_index = 0; + } + d = s->devices[s->poll_index]; + if ((1 << d->devaddr) & poll_mask) { + buf[0] = ADB_READREG | (d->devaddr << 4); + olen = do_adb_request(s, obuf + 1, buf, 1); + /* if there is data, we poll again the same device */ + if (olen > 0) { + s->status |= ADB_STATUS_POLLREPLY; + obuf[0] = buf[0]; + olen++; + return olen; + } + } + s->poll_index++; + } + return olen; +} + +void adb_set_autopoll_enabled(ADBBusState *s, bool enabled) +{ + if (s->autopoll_enabled != enabled) { + s->autopoll_enabled = enabled; + if (s->autopoll_enabled) { + timer_mod(s->autopoll_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + + s->autopoll_rate_ms); + } else { + timer_del(s->autopoll_timer); + } + } +} + +void adb_set_autopoll_rate_ms(ADBBusState *s, int rate_ms) +{ + s->autopoll_rate_ms = rate_ms; + + if (s->autopoll_enabled) { + timer_mod(s->autopoll_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + + s->autopoll_rate_ms); + } +} + +void adb_set_autopoll_mask(ADBBusState *s, uint16_t mask) +{ + if (s->autopoll_mask != mask) { + s->autopoll_mask = mask; + if (s->autopoll_enabled && s->autopoll_mask) { + timer_mod(s->autopoll_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + + s->autopoll_rate_ms); + } else { + timer_del(s->autopoll_timer); + } + } +} + +void adb_autopoll_block(ADBBusState *s) +{ + s->autopoll_blocked = true; + trace_adb_bus_autopoll_block(s->autopoll_blocked); + + if (s->autopoll_enabled) { + timer_del(s->autopoll_timer); + } +} + +void adb_autopoll_unblock(ADBBusState *s) +{ + s->autopoll_blocked = false; + trace_adb_bus_autopoll_block(s->autopoll_blocked); + + if (s->autopoll_enabled) { + timer_mod(s->autopoll_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + + s->autopoll_rate_ms); + } +} + +static void adb_autopoll(void *opaque) +{ + ADBBusState *s = opaque; + + if (!s->autopoll_blocked) { + trace_adb_bus_autopoll_cb(s->autopoll_mask); + s->autopoll_cb(s->autopoll_cb_opaque); + trace_adb_bus_autopoll_cb_done(s->autopoll_mask); + } + + timer_mod(s->autopoll_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + + s->autopoll_rate_ms); +} + +void adb_register_autopoll_callback(ADBBusState *s, void (*cb)(void *opaque), + void *opaque) +{ + s->autopoll_cb = cb; + s->autopoll_cb_opaque = opaque; +} + +static const VMStateDescription vmstate_adb_bus = { + .name = "adb_bus", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(autopoll_timer, ADBBusState), + VMSTATE_BOOL(autopoll_enabled, ADBBusState), + VMSTATE_UINT8(autopoll_rate_ms, ADBBusState), + VMSTATE_UINT16(autopoll_mask, ADBBusState), + VMSTATE_BOOL(autopoll_blocked, ADBBusState), + VMSTATE_END_OF_LIST() + } +}; + +static void adb_bus_reset(BusState *qbus) +{ + ADBBusState *adb_bus = ADB_BUS(qbus); + + adb_bus->autopoll_enabled = false; + adb_bus->autopoll_mask = 0xffff; + adb_bus->autopoll_rate_ms = 20; +} + +static void adb_bus_realize(BusState *qbus, Error **errp) +{ + ADBBusState *adb_bus = ADB_BUS(qbus); + + adb_bus->autopoll_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, adb_autopoll, + adb_bus); + + vmstate_register(NULL, -1, &vmstate_adb_bus, adb_bus); +} + +static void adb_bus_unrealize(BusState *qbus) +{ + ADBBusState *adb_bus = ADB_BUS(qbus); + + timer_del(adb_bus->autopoll_timer); + + vmstate_unregister(NULL, &vmstate_adb_bus, adb_bus); +} + +static void adb_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->realize = adb_bus_realize; + k->unrealize = adb_bus_unrealize; + k->reset = adb_bus_reset; +} + +static const TypeInfo adb_bus_type_info = { + .name = TYPE_ADB_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(ADBBusState), + .class_init = adb_bus_class_init, +}; + +const VMStateDescription vmstate_adb_device = { + .name = "adb_device", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_INT32(devaddr, ADBDevice), + VMSTATE_INT32(handler, ADBDevice), + VMSTATE_END_OF_LIST() + } +}; + +static void adb_device_realizefn(DeviceState *dev, Error **errp) +{ + ADBDevice *d = ADB_DEVICE(dev); + ADBBusState *bus = ADB_BUS(qdev_get_parent_bus(dev)); + + if (bus->nb_devices >= MAX_ADB_DEVICES) { + return; + } + + bus->devices[bus->nb_devices++] = d; +} + +static void adb_device_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = adb_device_realizefn; + dc->bus_type = TYPE_ADB_BUS; +} + +static const TypeInfo adb_device_type_info = { + .name = TYPE_ADB_DEVICE, + .parent = TYPE_DEVICE, + .class_size = sizeof(ADBDeviceClass), + .instance_size = sizeof(ADBDevice), + .abstract = true, + .class_init = adb_device_class_init, +}; + +static void adb_register_types(void) +{ + type_register_static(&adb_bus_type_info); + type_register_static(&adb_device_type_info); +} + +type_init(adb_register_types) diff --git a/hw/input/ads7846.c b/hw/input/ads7846.c new file mode 100644 index 000000000..1d4e04a2d --- /dev/null +++ b/hw/input/ads7846.c @@ -0,0 +1,186 @@ +/* + * TI ADS7846 / TSC2046 chip emulation. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/ssi/ssi.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "ui/console.h" +#include "qom/object.h" + +struct ADS7846State { + SSIPeripheral ssidev; + qemu_irq interrupt; + + int input[8]; + int pressure; + int noise; + + int cycle; + int output; +}; + +#define TYPE_ADS7846 "ads7846" +OBJECT_DECLARE_SIMPLE_TYPE(ADS7846State, ADS7846) + +/* Control-byte bitfields */ +#define CB_PD0 (1 << 0) +#define CB_PD1 (1 << 1) +#define CB_SER (1 << 2) +#define CB_MODE (1 << 3) +#define CB_A0 (1 << 4) +#define CB_A1 (1 << 5) +#define CB_A2 (1 << 6) +#define CB_START (1 << 7) + +#define X_AXIS_DMAX 3470 +#define X_AXIS_MIN 290 +#define Y_AXIS_DMAX 3450 +#define Y_AXIS_MIN 200 + +#define ADS_VBAT 2000 +#define ADS_VAUX 2000 +#define ADS_TEMP0 2000 +#define ADS_TEMP1 3000 +#define ADS_XPOS(x, y) (X_AXIS_MIN + ((X_AXIS_DMAX * (x)) >> 15)) +#define ADS_YPOS(x, y) (Y_AXIS_MIN + ((Y_AXIS_DMAX * (y)) >> 15)) +#define ADS_Z1POS(x, y) 600 +#define ADS_Z2POS(x, y) (600 + 6000 / ADS_XPOS(x, y)) + +static void ads7846_int_update(ADS7846State *s) +{ + if (s->interrupt) + qemu_set_irq(s->interrupt, s->pressure == 0); +} + +static uint32_t ads7846_transfer(SSIPeripheral *dev, uint32_t value) +{ + ADS7846State *s = ADS7846(dev); + + switch (s->cycle ++) { + case 0: + if (!(value & CB_START)) { + s->cycle = 0; + break; + } + + s->output = s->input[(value >> 4) & 7]; + + /* Imitate the ADC noise, some drivers expect this. */ + s->noise = (s->noise + 3) & 7; + switch ((value >> 4) & 7) { + case 1: s->output += s->noise ^ 2; break; + case 3: s->output += s->noise ^ 0; break; + case 4: s->output += s->noise ^ 7; break; + case 5: s->output += s->noise ^ 5; break; + } + + if (value & CB_MODE) + s->output >>= 4; /* 8 bits instead of 12 */ + + break; + case 1: + s->cycle = 0; + break; + } + return s->output; +} + +static void ads7846_ts_event(void *opaque, + int x, int y, int z, int buttons_state) +{ + ADS7846State *s = opaque; + + if (buttons_state) { + x = 0x7fff - x; + s->input[1] = ADS_XPOS(x, y); + s->input[3] = ADS_Z1POS(x, y); + s->input[4] = ADS_Z2POS(x, y); + s->input[5] = ADS_YPOS(x, y); + } + + if (s->pressure == !buttons_state) { + s->pressure = !!buttons_state; + + ads7846_int_update(s); + } +} + +static int ads7856_post_load(void *opaque, int version_id) +{ + ADS7846State *s = opaque; + + s->pressure = 0; + ads7846_int_update(s); + return 0; +} + +static const VMStateDescription vmstate_ads7846 = { + .name = "ads7846", + .version_id = 1, + .minimum_version_id = 1, + .post_load = ads7856_post_load, + .fields = (VMStateField[]) { + VMSTATE_SSI_PERIPHERAL(ssidev, ADS7846State), + VMSTATE_INT32_ARRAY(input, ADS7846State, 8), + VMSTATE_INT32(noise, ADS7846State), + VMSTATE_INT32(cycle, ADS7846State), + VMSTATE_INT32(output, ADS7846State), + VMSTATE_END_OF_LIST() + } +}; + +static void ads7846_realize(SSIPeripheral *d, Error **errp) +{ + DeviceState *dev = DEVICE(d); + ADS7846State *s = ADS7846(d); + + qdev_init_gpio_out(dev, &s->interrupt, 1); + + s->input[0] = ADS_TEMP0; /* TEMP0 */ + s->input[2] = ADS_VBAT; /* VBAT */ + s->input[6] = ADS_VAUX; /* VAUX */ + s->input[7] = ADS_TEMP1; /* TEMP1 */ + + /* We want absolute coordinates */ + qemu_add_mouse_event_handler(ads7846_ts_event, s, 1, + "QEMU ADS7846-driven Touchscreen"); + + ads7846_int_update(s); + + vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_ads7846, s); +} + +static void ads7846_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + + k->realize = ads7846_realize; + k->transfer = ads7846_transfer; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo ads7846_info = { + .name = TYPE_ADS7846, + .parent = TYPE_SSI_PERIPHERAL, + .instance_size = sizeof(ADS7846State), + .class_init = ads7846_class_init, +}; + +static void ads7846_register_types(void) +{ + type_register_static(&ads7846_info); +} + +type_init(ads7846_register_types) diff --git a/hw/input/hid.c b/hw/input/hid.c new file mode 100644 index 000000000..8aab0521f --- /dev/null +++ b/hw/input/hid.c @@ -0,0 +1,624 @@ +/* + * QEMU HID devices + * + * Copyright (c) 2005 Fabrice Bellard + * Copyright (c) 2007 OpenMoko, Inc. (andrew@openedhand.com) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "ui/console.h" +#include "qemu/timer.h" +#include "hw/input/hid.h" +#include "migration/vmstate.h" +#include "trace.h" + +#define HID_USAGE_ERROR_ROLLOVER 0x01 +#define HID_USAGE_POSTFAIL 0x02 +#define HID_USAGE_ERROR_UNDEFINED 0x03 + +/* Indices are QEMU keycodes, values are from HID Usage Table. Indices + * above 0x80 are for keys that come after 0xe0 or 0xe1+0x1d or 0xe1+0x9d. */ +static const uint8_t hid_usage_keys[0x100] = { + 0x00, 0x29, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, + 0x24, 0x25, 0x26, 0x27, 0x2d, 0x2e, 0x2a, 0x2b, + 0x14, 0x1a, 0x08, 0x15, 0x17, 0x1c, 0x18, 0x0c, + 0x12, 0x13, 0x2f, 0x30, 0x28, 0xe0, 0x04, 0x16, + 0x07, 0x09, 0x0a, 0x0b, 0x0d, 0x0e, 0x0f, 0x33, + 0x34, 0x35, 0xe1, 0x31, 0x1d, 0x1b, 0x06, 0x19, + 0x05, 0x11, 0x10, 0x36, 0x37, 0x38, 0xe5, 0x55, + 0xe2, 0x2c, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, + 0x3f, 0x40, 0x41, 0x42, 0x43, 0x53, 0x47, 0x5f, + 0x60, 0x61, 0x56, 0x5c, 0x5d, 0x5e, 0x57, 0x59, + 0x5a, 0x5b, 0x62, 0x63, 0x46, 0x00, 0x64, 0x44, + 0x45, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, + 0xe8, 0xe9, 0x71, 0x72, 0x73, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, 0x00, 0x00, + 0x88, 0x00, 0x00, 0x87, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x8a, 0x00, 0x8b, 0x00, 0x89, 0xe7, 0x65, + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x58, 0xe4, 0x00, 0x00, + 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x54, 0x00, 0x46, + 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x48, 0x4a, + 0x52, 0x4b, 0x00, 0x50, 0x00, 0x4f, 0x00, 0x4d, + 0x51, 0x4e, 0x49, 0x4c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xe3, 0xe7, 0x65, 0x66, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +bool hid_has_events(HIDState *hs) +{ + return hs->n > 0 || hs->idle_pending; +} + +static void hid_idle_timer(void *opaque) +{ + HIDState *hs = opaque; + + hs->idle_pending = true; + hs->event(hs); +} + +static void hid_del_idle_timer(HIDState *hs) +{ + if (hs->idle_timer) { + timer_free(hs->idle_timer); + hs->idle_timer = NULL; + } +} + +void hid_set_next_idle(HIDState *hs) +{ + if (hs->idle) { + uint64_t expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND * hs->idle * 4 / 1000; + if (!hs->idle_timer) { + hs->idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, hid_idle_timer, hs); + } + timer_mod_ns(hs->idle_timer, expire_time); + } else { + hid_del_idle_timer(hs); + } +} + +static void hid_pointer_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) +{ + static const int bmap[INPUT_BUTTON__MAX] = { + [INPUT_BUTTON_LEFT] = 0x01, + [INPUT_BUTTON_RIGHT] = 0x02, + [INPUT_BUTTON_MIDDLE] = 0x04, + }; + HIDState *hs = (HIDState *)dev; + HIDPointerEvent *e; + InputMoveEvent *move; + InputBtnEvent *btn; + + assert(hs->n < QUEUE_LENGTH); + e = &hs->ptr.queue[(hs->head + hs->n) & QUEUE_MASK]; + + switch (evt->type) { + case INPUT_EVENT_KIND_REL: + move = evt->u.rel.data; + if (move->axis == INPUT_AXIS_X) { + e->xdx += move->value; + } else if (move->axis == INPUT_AXIS_Y) { + e->ydy += move->value; + } + break; + + case INPUT_EVENT_KIND_ABS: + move = evt->u.abs.data; + if (move->axis == INPUT_AXIS_X) { + e->xdx = move->value; + } else if (move->axis == INPUT_AXIS_Y) { + e->ydy = move->value; + } + break; + + case INPUT_EVENT_KIND_BTN: + btn = evt->u.btn.data; + if (btn->down) { + e->buttons_state |= bmap[btn->button]; + if (btn->button == INPUT_BUTTON_WHEEL_UP) { + e->dz--; + } else if (btn->button == INPUT_BUTTON_WHEEL_DOWN) { + e->dz++; + } + } else { + e->buttons_state &= ~bmap[btn->button]; + } + break; + + default: + /* keep gcc happy */ + break; + } + +} + +static void hid_pointer_sync(DeviceState *dev) +{ + HIDState *hs = (HIDState *)dev; + HIDPointerEvent *prev, *curr, *next; + bool event_compression = false; + + if (hs->n == QUEUE_LENGTH-1) { + /* + * Queue full. We are losing information, but we at least + * keep track of most recent button state. + */ + return; + } + + prev = &hs->ptr.queue[(hs->head + hs->n - 1) & QUEUE_MASK]; + curr = &hs->ptr.queue[(hs->head + hs->n) & QUEUE_MASK]; + next = &hs->ptr.queue[(hs->head + hs->n + 1) & QUEUE_MASK]; + + if (hs->n > 0) { + /* + * No button state change between previous and current event + * (and previous wasn't seen by the guest yet), so there is + * motion information only and we can combine the two event + * into one. + */ + if (curr->buttons_state == prev->buttons_state) { + event_compression = true; + } + } + + if (event_compression) { + /* add current motion to previous, clear current */ + if (hs->kind == HID_MOUSE) { + prev->xdx += curr->xdx; + curr->xdx = 0; + prev->ydy += curr->ydy; + curr->ydy = 0; + } else { + prev->xdx = curr->xdx; + prev->ydy = curr->ydy; + } + prev->dz += curr->dz; + curr->dz = 0; + } else { + /* prepate next (clear rel, copy abs + btns) */ + if (hs->kind == HID_MOUSE) { + next->xdx = 0; + next->ydy = 0; + } else { + next->xdx = curr->xdx; + next->ydy = curr->ydy; + } + next->dz = 0; + next->buttons_state = curr->buttons_state; + /* make current guest visible, notify guest */ + hs->n++; + hs->event(hs); + } +} + +static void hid_keyboard_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) +{ + HIDState *hs = (HIDState *)dev; + int scancodes[3], i, count; + int slot; + InputKeyEvent *key = evt->u.key.data; + + count = qemu_input_key_value_to_scancode(key->key, + key->down, + scancodes); + if (hs->n + count > QUEUE_LENGTH) { + trace_hid_kbd_queue_full(); + return; + } + for (i = 0; i < count; i++) { + slot = (hs->head + hs->n) & QUEUE_MASK; hs->n++; + hs->kbd.keycodes[slot] = scancodes[i]; + } + hs->event(hs); +} + +static void hid_keyboard_process_keycode(HIDState *hs) +{ + uint8_t hid_code, index, key; + int i, keycode, slot; + + if (hs->n == 0) { + return; + } + slot = hs->head & QUEUE_MASK; QUEUE_INCR(hs->head); hs->n--; + keycode = hs->kbd.keycodes[slot]; + + if (!hs->n) { + trace_hid_kbd_queue_empty(); + } + + key = keycode & 0x7f; + index = key | ((hs->kbd.modifiers & (1 << 8)) >> 1); + hid_code = hid_usage_keys[index]; + hs->kbd.modifiers &= ~(1 << 8); + + switch (hid_code) { + case 0x00: + return; + + case 0xe0: + assert(key == 0x1d); + if (hs->kbd.modifiers & (1 << 9)) { + /* The hid_codes for the 0xe1/0x1d scancode sequence are 0xe9/0xe0. + * Here we're processing the second hid_code. By dropping bit 9 + * and setting bit 8, the scancode after 0x1d will access the + * second half of the table. + */ + hs->kbd.modifiers ^= (1 << 8) | (1 << 9); + return; + } + /* fall through to process Ctrl_L */ + case 0xe1 ... 0xe7: + /* Ctrl_L/Ctrl_R, Shift_L/Shift_R, Alt_L/Alt_R, Win_L/Win_R. + * Handle releases here, or fall through to process presses. + */ + if (keycode & (1 << 7)) { + hs->kbd.modifiers &= ~(1 << (hid_code & 0x0f)); + return; + } + /* fall through */ + case 0xe8 ... 0xe9: + /* USB modifiers are just 1 byte long. Bits 8 and 9 of + * hs->kbd.modifiers implement a state machine that detects the + * 0xe0 and 0xe1/0x1d sequences. These bits do not follow the + * usual rules where bit 7 marks released keys; they are cleared + * elsewhere in the function as the state machine dictates. + */ + hs->kbd.modifiers |= 1 << (hid_code & 0x0f); + return; + + case 0xea ... 0xef: + abort(); + + default: + break; + } + + if (keycode & (1 << 7)) { + for (i = hs->kbd.keys - 1; i >= 0; i--) { + if (hs->kbd.key[i] == hid_code) { + hs->kbd.key[i] = hs->kbd.key[-- hs->kbd.keys]; + hs->kbd.key[hs->kbd.keys] = 0x00; + break; + } + } + if (i < 0) { + return; + } + } else { + for (i = hs->kbd.keys - 1; i >= 0; i--) { + if (hs->kbd.key[i] == hid_code) { + break; + } + } + if (i < 0) { + if (hs->kbd.keys < sizeof(hs->kbd.key)) { + hs->kbd.key[hs->kbd.keys++] = hid_code; + } + } else { + return; + } + } +} + +static inline int int_clamp(int val, int vmin, int vmax) +{ + if (val < vmin) { + return vmin; + } else if (val > vmax) { + return vmax; + } else { + return val; + } +} + +void hid_pointer_activate(HIDState *hs) +{ + if (!hs->ptr.mouse_grabbed) { + qemu_input_handler_activate(hs->s); + hs->ptr.mouse_grabbed = 1; + } +} + +int hid_pointer_poll(HIDState *hs, uint8_t *buf, int len) +{ + int dx, dy, dz, l; + int index; + HIDPointerEvent *e; + + hs->idle_pending = false; + + hid_pointer_activate(hs); + + /* When the buffer is empty, return the last event. Relative + movements will all be zero. */ + index = (hs->n ? hs->head : hs->head - 1); + e = &hs->ptr.queue[index & QUEUE_MASK]; + + if (hs->kind == HID_MOUSE) { + dx = int_clamp(e->xdx, -127, 127); + dy = int_clamp(e->ydy, -127, 127); + e->xdx -= dx; + e->ydy -= dy; + } else { + dx = e->xdx; + dy = e->ydy; + } + dz = int_clamp(e->dz, -127, 127); + e->dz -= dz; + + if (hs->n && + !e->dz && + (hs->kind == HID_TABLET || (!e->xdx && !e->ydy))) { + /* that deals with this event */ + QUEUE_INCR(hs->head); + hs->n--; + } + + /* Appears we have to invert the wheel direction */ + dz = 0 - dz; + l = 0; + switch (hs->kind) { + case HID_MOUSE: + if (len > l) { + buf[l++] = e->buttons_state; + } + if (len > l) { + buf[l++] = dx; + } + if (len > l) { + buf[l++] = dy; + } + if (len > l) { + buf[l++] = dz; + } + break; + + case HID_TABLET: + if (len > l) { + buf[l++] = e->buttons_state; + } + if (len > l) { + buf[l++] = dx & 0xff; + } + if (len > l) { + buf[l++] = dx >> 8; + } + if (len > l) { + buf[l++] = dy & 0xff; + } + if (len > l) { + buf[l++] = dy >> 8; + } + if (len > l) { + buf[l++] = dz; + } + break; + + default: + abort(); + } + + return l; +} + +int hid_keyboard_poll(HIDState *hs, uint8_t *buf, int len) +{ + hs->idle_pending = false; + + if (len < 2) { + return 0; + } + + hid_keyboard_process_keycode(hs); + + buf[0] = hs->kbd.modifiers & 0xff; + buf[1] = 0; + if (hs->kbd.keys > 6) { + memset(buf + 2, HID_USAGE_ERROR_ROLLOVER, MIN(8, len) - 2); + } else { + memcpy(buf + 2, hs->kbd.key, MIN(8, len) - 2); + } + + return MIN(8, len); +} + +int hid_keyboard_write(HIDState *hs, uint8_t *buf, int len) +{ + if (len > 0) { + int ledstate = 0; + /* 0x01: Num Lock LED + * 0x02: Caps Lock LED + * 0x04: Scroll Lock LED + * 0x08: Compose LED + * 0x10: Kana LED */ + hs->kbd.leds = buf[0]; + if (hs->kbd.leds & 0x04) { + ledstate |= QEMU_SCROLL_LOCK_LED; + } + if (hs->kbd.leds & 0x01) { + ledstate |= QEMU_NUM_LOCK_LED; + } + if (hs->kbd.leds & 0x02) { + ledstate |= QEMU_CAPS_LOCK_LED; + } + kbd_put_ledstate(ledstate); + } + return 0; +} + +void hid_reset(HIDState *hs) +{ + switch (hs->kind) { + case HID_KEYBOARD: + memset(hs->kbd.keycodes, 0, sizeof(hs->kbd.keycodes)); + memset(hs->kbd.key, 0, sizeof(hs->kbd.key)); + hs->kbd.keys = 0; + hs->kbd.modifiers = 0; + break; + case HID_MOUSE: + case HID_TABLET: + memset(hs->ptr.queue, 0, sizeof(hs->ptr.queue)); + break; + } + hs->head = 0; + hs->n = 0; + hs->protocol = 1; + hs->idle = 0; + hs->idle_pending = false; + hid_del_idle_timer(hs); +} + +void hid_free(HIDState *hs) +{ + qemu_input_handler_unregister(hs->s); + hid_del_idle_timer(hs); +} + +static QemuInputHandler hid_keyboard_handler = { + .name = "QEMU HID Keyboard", + .mask = INPUT_EVENT_MASK_KEY, + .event = hid_keyboard_event, +}; + +static QemuInputHandler hid_mouse_handler = { + .name = "QEMU HID Mouse", + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL, + .event = hid_pointer_event, + .sync = hid_pointer_sync, +}; + +static QemuInputHandler hid_tablet_handler = { + .name = "QEMU HID Tablet", + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_ABS, + .event = hid_pointer_event, + .sync = hid_pointer_sync, +}; + +void hid_init(HIDState *hs, int kind, HIDEventFunc event) +{ + hs->kind = kind; + hs->event = event; + + if (hs->kind == HID_KEYBOARD) { + hs->s = qemu_input_handler_register((DeviceState *)hs, + &hid_keyboard_handler); + qemu_input_handler_activate(hs->s); + } else if (hs->kind == HID_MOUSE) { + hs->s = qemu_input_handler_register((DeviceState *)hs, + &hid_mouse_handler); + } else if (hs->kind == HID_TABLET) { + hs->s = qemu_input_handler_register((DeviceState *)hs, + &hid_tablet_handler); + } +} + +static int hid_post_load(void *opaque, int version_id) +{ + HIDState *s = opaque; + + hid_set_next_idle(s); + + if (s->n == QUEUE_LENGTH && (s->kind == HID_TABLET || + s->kind == HID_MOUSE)) { + /* + * Handle ptr device migration from old qemu with full queue. + * + * Throw away everything but the last event, so we propagate + * at least the current button state to the guest. Also keep + * current position for the tablet, signal "no motion" for the + * mouse. + */ + HIDPointerEvent evt; + evt = s->ptr.queue[(s->head+s->n) & QUEUE_MASK]; + if (s->kind == HID_MOUSE) { + evt.xdx = 0; + evt.ydy = 0; + } + s->ptr.queue[0] = evt; + s->head = 0; + s->n = 1; + } + return 0; +} + +static const VMStateDescription vmstate_hid_ptr_queue = { + .name = "HIDPointerEventQueue", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32(xdx, HIDPointerEvent), + VMSTATE_INT32(ydy, HIDPointerEvent), + VMSTATE_INT32(dz, HIDPointerEvent), + VMSTATE_INT32(buttons_state, HIDPointerEvent), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_hid_ptr_device = { + .name = "HIDPointerDevice", + .version_id = 1, + .minimum_version_id = 1, + .post_load = hid_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(ptr.queue, HIDState, QUEUE_LENGTH, 0, + vmstate_hid_ptr_queue, HIDPointerEvent), + VMSTATE_UINT32(head, HIDState), + VMSTATE_UINT32(n, HIDState), + VMSTATE_INT32(protocol, HIDState), + VMSTATE_UINT8(idle, HIDState), + VMSTATE_END_OF_LIST(), + } +}; + +const VMStateDescription vmstate_hid_keyboard_device = { + .name = "HIDKeyboardDevice", + .version_id = 1, + .minimum_version_id = 1, + .post_load = hid_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(kbd.keycodes, HIDState, QUEUE_LENGTH), + VMSTATE_UINT32(head, HIDState), + VMSTATE_UINT32(n, HIDState), + VMSTATE_UINT16(kbd.modifiers, HIDState), + VMSTATE_UINT8(kbd.leds, HIDState), + VMSTATE_UINT8_ARRAY(kbd.key, HIDState, 16), + VMSTATE_INT32(kbd.keys, HIDState), + VMSTATE_INT32(protocol, HIDState), + VMSTATE_UINT8(idle, HIDState), + VMSTATE_END_OF_LIST(), + } +}; diff --git a/hw/input/lasips2.c b/hw/input/lasips2.c new file mode 100644 index 000000000..68d741d34 --- /dev/null +++ b/hw/input/lasips2.c @@ -0,0 +1,288 @@ +/* + * QEMU HP Lasi PS/2 interface emulation + * + * Copyright (c) 2019 Sven Schnelle + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/qdev-properties.h" +#include "hw/input/ps2.h" +#include "hw/input/lasips2.h" +#include "exec/hwaddr.h" +#include "trace.h" +#include "exec/address-spaces.h" +#include "migration/vmstate.h" +#include "hw/irq.h" + + +struct LASIPS2State; +typedef struct LASIPS2Port { + struct LASIPS2State *parent; + MemoryRegion reg; + void *dev; + uint8_t id; + uint8_t control; + uint8_t buf; + bool loopback_rbne; + bool irq; +} LASIPS2Port; + +typedef struct LASIPS2State { + LASIPS2Port kbd; + LASIPS2Port mouse; + qemu_irq irq; +} LASIPS2State; + +static const VMStateDescription vmstate_lasips2 = { + .name = "lasips2", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(kbd.control, LASIPS2State), + VMSTATE_UINT8(kbd.id, LASIPS2State), + VMSTATE_BOOL(kbd.irq, LASIPS2State), + VMSTATE_UINT8(mouse.control, LASIPS2State), + VMSTATE_UINT8(mouse.id, LASIPS2State), + VMSTATE_BOOL(mouse.irq, LASIPS2State), + VMSTATE_END_OF_LIST() + } +}; + +typedef enum { + REG_PS2_ID = 0, + REG_PS2_RCVDATA = 4, + REG_PS2_CONTROL = 8, + REG_PS2_STATUS = 12, +} lasips2_read_reg_t; + +typedef enum { + REG_PS2_RESET = 0, + REG_PS2_XMTDATA = 4, +} lasips2_write_reg_t; + +typedef enum { + LASIPS2_CONTROL_ENABLE = 0x01, + LASIPS2_CONTROL_LOOPBACK = 0x02, + LASIPS2_CONTROL_DIAG = 0x20, + LASIPS2_CONTROL_DATDIR = 0x40, + LASIPS2_CONTROL_CLKDIR = 0x80, +} lasips2_control_reg_t; + +typedef enum { + LASIPS2_STATUS_RBNE = 0x01, + LASIPS2_STATUS_TBNE = 0x02, + LASIPS2_STATUS_TERR = 0x04, + LASIPS2_STATUS_PERR = 0x08, + LASIPS2_STATUS_CMPINTR = 0x10, + LASIPS2_STATUS_DATSHD = 0x40, + LASIPS2_STATUS_CLKSHD = 0x80, +} lasips2_status_reg_t; + +static const char *lasips2_read_reg_name(uint64_t addr) +{ + switch (addr & 0xc) { + case REG_PS2_ID: + return " PS2_ID"; + + case REG_PS2_RCVDATA: + return " PS2_RCVDATA"; + + case REG_PS2_CONTROL: + return " PS2_CONTROL"; + + case REG_PS2_STATUS: + return " PS2_STATUS"; + + default: + return ""; + } +} + +static const char *lasips2_write_reg_name(uint64_t addr) +{ + switch (addr & 0x0c) { + case REG_PS2_RESET: + return " PS2_RESET"; + + case REG_PS2_XMTDATA: + return " PS2_XMTDATA"; + + case REG_PS2_CONTROL: + return " PS2_CONTROL"; + + default: + return ""; + } +} + +static void lasips2_update_irq(LASIPS2State *s) +{ + trace_lasips2_intr(s->kbd.irq | s->mouse.irq); + qemu_set_irq(s->irq, s->kbd.irq | s->mouse.irq); +} + +static void lasips2_reg_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + LASIPS2Port *port = opaque; + + trace_lasips2_reg_write(size, port->id, addr, + lasips2_write_reg_name(addr), val); + + switch (addr & 0xc) { + case REG_PS2_CONTROL: + port->control = val; + break; + + case REG_PS2_XMTDATA: + if (port->control & LASIPS2_CONTROL_LOOPBACK) { + port->buf = val; + port->irq = true; + port->loopback_rbne = true; + lasips2_update_irq(port->parent); + break; + } + + if (port->id) { + ps2_write_mouse(port->dev, val); + } else { + ps2_write_keyboard(port->dev, val); + } + break; + + case REG_PS2_RESET: + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: unknown register 0x%02" HWADDR_PRIx "\n", + __func__, addr); + break; + } +} + +static uint64_t lasips2_reg_read(void *opaque, hwaddr addr, unsigned size) +{ + LASIPS2Port *port = opaque; + uint64_t ret = 0; + + switch (addr & 0xc) { + case REG_PS2_ID: + ret = port->id; + break; + + case REG_PS2_RCVDATA: + if (port->control & LASIPS2_CONTROL_LOOPBACK) { + port->irq = false; + port->loopback_rbne = false; + lasips2_update_irq(port->parent); + ret = port->buf; + break; + } + + ret = ps2_read_data(port->dev); + break; + + case REG_PS2_CONTROL: + ret = port->control; + break; + + case REG_PS2_STATUS: + + ret = LASIPS2_STATUS_DATSHD | LASIPS2_STATUS_CLKSHD; + + if (port->control & LASIPS2_CONTROL_DIAG) { + if (!(port->control & LASIPS2_CONTROL_DATDIR)) { + ret &= ~LASIPS2_STATUS_DATSHD; + } + + if (!(port->control & LASIPS2_CONTROL_CLKDIR)) { + ret &= ~LASIPS2_STATUS_CLKSHD; + } + } + + if (port->control & LASIPS2_CONTROL_LOOPBACK) { + if (port->loopback_rbne) { + ret |= LASIPS2_STATUS_RBNE; + } + } else { + if (!ps2_queue_empty(port->dev)) { + ret |= LASIPS2_STATUS_RBNE; + } + } + + if (port->parent->kbd.irq || port->parent->mouse.irq) { + ret |= LASIPS2_STATUS_CMPINTR; + } + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: unknown register 0x%02" HWADDR_PRIx "\n", + __func__, addr); + break; + } + trace_lasips2_reg_read(size, port->id, addr, + lasips2_read_reg_name(addr), ret); + + return ret; +} + +static const MemoryRegionOps lasips2_reg_ops = { + .read = lasips2_reg_read, + .write = lasips2_reg_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void ps2dev_update_irq(void *opaque, int level) +{ + LASIPS2Port *port = opaque; + port->irq = level; + lasips2_update_irq(port->parent); +} + +void lasips2_init(MemoryRegion *address_space, + hwaddr base, qemu_irq irq) +{ + LASIPS2State *s; + + s = g_malloc0(sizeof(LASIPS2State)); + + s->irq = irq; + s->mouse.id = 1; + s->kbd.parent = s; + s->mouse.parent = s; + + vmstate_register(NULL, base, &vmstate_lasips2, s); + + s->kbd.dev = ps2_kbd_init(ps2dev_update_irq, &s->kbd); + s->mouse.dev = ps2_mouse_init(ps2dev_update_irq, &s->mouse); + + memory_region_init_io(&s->kbd.reg, NULL, &lasips2_reg_ops, &s->kbd, + "lasips2-kbd", 0x100); + memory_region_add_subregion(address_space, base, &s->kbd.reg); + + memory_region_init_io(&s->mouse.reg, NULL, &lasips2_reg_ops, &s->mouse, + "lasips2-mouse", 0x100); + memory_region_add_subregion(address_space, base + 0x100, &s->mouse.reg); +} diff --git a/hw/input/lm832x.c b/hw/input/lm832x.c new file mode 100644 index 000000000..19a646d9b --- /dev/null +++ b/hw/input/lm832x.c @@ -0,0 +1,528 @@ +/* + * National Semiconductor LM8322/8323 GPIO keyboard & PWM chips. + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/input/lm832x.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "ui/console.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(LM823KbdState, LM8323) + +struct LM823KbdState { + I2CSlave parent_obj; + + uint8_t i2c_dir; + uint8_t i2c_cycle; + uint8_t reg; + + qemu_irq nirq; + uint16_t model; + + struct { + qemu_irq out[2]; + int in[2][2]; + } mux; + + uint8_t config; + uint8_t status; + uint8_t acttime; + uint8_t error; + uint8_t clock; + + struct { + uint16_t pull; + uint16_t mask; + uint16_t dir; + uint16_t level; + qemu_irq out[16]; + } gpio; + + struct { + uint8_t dbnctime; + uint8_t size; + uint8_t start; + uint8_t len; + uint8_t fifo[16]; + } kbd; + + struct { + uint16_t file[256]; + uint8_t faddr; + uint8_t addr[3]; + QEMUTimer *tm[3]; + } pwm; +}; + +#define INT_KEYPAD (1 << 0) +#define INT_ERROR (1 << 3) +#define INT_NOINIT (1 << 4) +#define INT_PWMEND(n) (1 << (5 + n)) + +#define ERR_BADPAR (1 << 0) +#define ERR_CMDUNK (1 << 1) +#define ERR_KEYOVR (1 << 2) +#define ERR_FIFOOVR (1 << 6) + +static void lm_kbd_irq_update(LM823KbdState *s) +{ + qemu_set_irq(s->nirq, !s->status); +} + +static void lm_kbd_gpio_update(LM823KbdState *s) +{ +} + +static void lm_kbd_reset(DeviceState *dev) +{ + LM823KbdState *s = LM8323(dev); + + s->config = 0x80; + s->status = INT_NOINIT; + s->acttime = 125; + s->kbd.dbnctime = 3; + s->kbd.size = 0x33; + s->clock = 0x08; + + lm_kbd_irq_update(s); + lm_kbd_gpio_update(s); +} + +static void lm_kbd_error(LM823KbdState *s, int err) +{ + s->error |= err; + s->status |= INT_ERROR; + lm_kbd_irq_update(s); +} + +static void lm_kbd_pwm_tick(LM823KbdState *s, int line) +{ +} + +static void lm_kbd_pwm_start(LM823KbdState *s, int line) +{ + lm_kbd_pwm_tick(s, line); +} + +static void lm_kbd_pwm0_tick(void *opaque) +{ + lm_kbd_pwm_tick(opaque, 0); +} +static void lm_kbd_pwm1_tick(void *opaque) +{ + lm_kbd_pwm_tick(opaque, 1); +} +static void lm_kbd_pwm2_tick(void *opaque) +{ + lm_kbd_pwm_tick(opaque, 2); +} + +enum { + LM832x_CMD_READ_ID = 0x80, /* Read chip ID. */ + LM832x_CMD_WRITE_CFG = 0x81, /* Set configuration item. */ + LM832x_CMD_READ_INT = 0x82, /* Get interrupt status. */ + LM832x_CMD_RESET = 0x83, /* Reset, same as external one */ + LM823x_CMD_WRITE_PULL_DOWN = 0x84, /* Select GPIO pull-up/down. */ + LM832x_CMD_WRITE_PORT_SEL = 0x85, /* Select GPIO in/out. */ + LM832x_CMD_WRITE_PORT_STATE = 0x86, /* Set GPIO pull-up/down. */ + LM832x_CMD_READ_PORT_SEL = 0x87, /* Get GPIO in/out. */ + LM832x_CMD_READ_PORT_STATE = 0x88, /* Get GPIO pull-up/down. */ + LM832x_CMD_READ_FIFO = 0x89, /* Read byte from FIFO. */ + LM832x_CMD_RPT_READ_FIFO = 0x8a, /* Read FIFO (no increment). */ + LM832x_CMD_SET_ACTIVE = 0x8b, /* Set active time. */ + LM832x_CMD_READ_ERROR = 0x8c, /* Get error status. */ + LM832x_CMD_READ_ROTATOR = 0x8e, /* Read rotator status. */ + LM832x_CMD_SET_DEBOUNCE = 0x8f, /* Set debouncing time. */ + LM832x_CMD_SET_KEY_SIZE = 0x90, /* Set keypad size. */ + LM832x_CMD_READ_KEY_SIZE = 0x91, /* Get keypad size. */ + LM832x_CMD_READ_CFG = 0x92, /* Get configuration item. */ + LM832x_CMD_WRITE_CLOCK = 0x93, /* Set clock config. */ + LM832x_CMD_READ_CLOCK = 0x94, /* Get clock config. */ + LM832x_CMD_PWM_WRITE = 0x95, /* Write PWM script. */ + LM832x_CMD_PWM_START = 0x96, /* Start PWM engine. */ + LM832x_CMD_PWM_STOP = 0x97, /* Stop PWM engine. */ + LM832x_GENERAL_ERROR = 0xff, /* There was one error. + Previously was represented by -1 + This is not a command */ +}; + +#define LM832x_MAX_KPX 8 +#define LM832x_MAX_KPY 12 + +static uint8_t lm_kbd_read(LM823KbdState *s, int reg, int byte) +{ + int ret; + + switch (reg) { + case LM832x_CMD_READ_ID: + ret = 0x0400; + break; + + case LM832x_CMD_READ_INT: + ret = s->status; + if (!(s->status & INT_NOINIT)) { + s->status = 0; + lm_kbd_irq_update(s); + } + break; + + case LM832x_CMD_READ_PORT_SEL: + ret = s->gpio.dir; + break; + case LM832x_CMD_READ_PORT_STATE: + ret = s->gpio.mask; + break; + + case LM832x_CMD_READ_FIFO: + if (s->kbd.len <= 1) + return 0x00; + + /* Example response from the two commands after a INT_KEYPAD + * interrupt caused by the key 0x3c being pressed: + * RPT_READ_FIFO: 55 bc 00 4e ff 0a 50 08 00 29 d9 08 01 c9 01 + * READ_FIFO: bc 00 00 4e ff 0a 50 08 00 29 d9 08 01 c9 01 + * RPT_READ_FIFO: bc 00 00 4e ff 0a 50 08 00 29 d9 08 01 c9 01 + * + * 55 is the code of the key release event serviced in the previous + * interrupt handling. + * + * TODO: find out whether the FIFO is advanced a single character + * before reading every byte or the whole size of the FIFO at the + * last LM832x_CMD_READ_FIFO. This affects LM832x_CMD_RPT_READ_FIFO + * output in cases where there are more than one event in the FIFO. + * Assume 0xbc and 0x3c events are in the FIFO: + * RPT_READ_FIFO: 55 bc 3c 00 4e ff 0a 50 08 00 29 d9 08 01 c9 + * READ_FIFO: bc 3c 00 00 4e ff 0a 50 08 00 29 d9 08 01 c9 + * Does RPT_READ_FIFO now return 0xbc and 0x3c or only 0x3c? + */ + s->kbd.start ++; + s->kbd.start &= sizeof(s->kbd.fifo) - 1; + s->kbd.len --; + + return s->kbd.fifo[s->kbd.start]; + case LM832x_CMD_RPT_READ_FIFO: + if (byte >= s->kbd.len) + return 0x00; + + return s->kbd.fifo[(s->kbd.start + byte) & (sizeof(s->kbd.fifo) - 1)]; + + case LM832x_CMD_READ_ERROR: + return s->error; + + case LM832x_CMD_READ_ROTATOR: + return 0; + + case LM832x_CMD_READ_KEY_SIZE: + return s->kbd.size; + + case LM832x_CMD_READ_CFG: + return s->config & 0xf; + + case LM832x_CMD_READ_CLOCK: + return (s->clock & 0xfc) | 2; + + default: + lm_kbd_error(s, ERR_CMDUNK); + fprintf(stderr, "%s: unknown command %02x\n", __func__, reg); + return 0x00; + } + + return ret >> (byte << 3); +} + +static void lm_kbd_write(LM823KbdState *s, int reg, int byte, uint8_t value) +{ + switch (reg) { + case LM832x_CMD_WRITE_CFG: + s->config = value; + /* This must be done whenever s->mux.in is updated (never). */ + if ((s->config >> 1) & 1) /* MUX1EN */ + qemu_set_irq(s->mux.out[0], s->mux.in[0][(s->config >> 0) & 1]); + if ((s->config >> 3) & 1) /* MUX2EN */ + qemu_set_irq(s->mux.out[0], s->mux.in[0][(s->config >> 2) & 1]); + /* TODO: check that this is issued only following the chip reset + * and not in the middle of operation and that it is followed by + * the GPIO ports re-resablishing through WRITE_PORT_SEL and + * WRITE_PORT_STATE (using a timer perhaps) and otherwise output + * warnings. */ + s->status = 0; + lm_kbd_irq_update(s); + s->kbd.len = 0; + s->kbd.start = 0; + s->reg = LM832x_GENERAL_ERROR; + break; + + case LM832x_CMD_RESET: + if (value == 0xaa) + lm_kbd_reset(DEVICE(s)); + else + lm_kbd_error(s, ERR_BADPAR); + s->reg = LM832x_GENERAL_ERROR; + break; + + case LM823x_CMD_WRITE_PULL_DOWN: + if (!byte) + s->gpio.pull = value; + else { + s->gpio.pull |= value << 8; + lm_kbd_gpio_update(s); + s->reg = LM832x_GENERAL_ERROR; + } + break; + case LM832x_CMD_WRITE_PORT_SEL: + if (!byte) + s->gpio.dir = value; + else { + s->gpio.dir |= value << 8; + lm_kbd_gpio_update(s); + s->reg = LM832x_GENERAL_ERROR; + } + break; + case LM832x_CMD_WRITE_PORT_STATE: + if (!byte) + s->gpio.mask = value; + else { + s->gpio.mask |= value << 8; + lm_kbd_gpio_update(s); + s->reg = LM832x_GENERAL_ERROR; + } + break; + + case LM832x_CMD_SET_ACTIVE: + s->acttime = value; + s->reg = LM832x_GENERAL_ERROR; + break; + + case LM832x_CMD_SET_DEBOUNCE: + s->kbd.dbnctime = value; + s->reg = LM832x_GENERAL_ERROR; + if (!value) + lm_kbd_error(s, ERR_BADPAR); + break; + + case LM832x_CMD_SET_KEY_SIZE: + s->kbd.size = value; + s->reg = LM832x_GENERAL_ERROR; + if ( + (value & 0xf) < 3 || (value & 0xf) > LM832x_MAX_KPY || + (value >> 4) < 3 || (value >> 4) > LM832x_MAX_KPX) + lm_kbd_error(s, ERR_BADPAR); + break; + + case LM832x_CMD_WRITE_CLOCK: + s->clock = value; + s->reg = LM832x_GENERAL_ERROR; + if ((value & 3) && (value & 3) != 3) { + lm_kbd_error(s, ERR_BADPAR); + fprintf(stderr, "%s: invalid clock setting in RCPWM\n", + __func__); + } + /* TODO: Validate that the command is only issued once */ + break; + + case LM832x_CMD_PWM_WRITE: + if (byte == 0) { + if (!(value & 3) || (value >> 2) > 59) { + lm_kbd_error(s, ERR_BADPAR); + s->reg = LM832x_GENERAL_ERROR; + break; + } + + s->pwm.faddr = value; + s->pwm.file[s->pwm.faddr] = 0; + } else if (byte == 1) { + s->pwm.file[s->pwm.faddr] |= value << 8; + } else if (byte == 2) { + s->pwm.file[s->pwm.faddr] |= value << 0; + s->reg = LM832x_GENERAL_ERROR; + } + break; + case LM832x_CMD_PWM_START: + s->reg = LM832x_GENERAL_ERROR; + if (!(value & 3) || (value >> 2) > 59) { + lm_kbd_error(s, ERR_BADPAR); + break; + } + + s->pwm.addr[(value & 3) - 1] = value >> 2; + lm_kbd_pwm_start(s, (value & 3) - 1); + break; + case LM832x_CMD_PWM_STOP: + s->reg = LM832x_GENERAL_ERROR; + if (!(value & 3)) { + lm_kbd_error(s, ERR_BADPAR); + break; + } + + timer_del(s->pwm.tm[(value & 3) - 1]); + break; + + case LM832x_GENERAL_ERROR: + lm_kbd_error(s, ERR_BADPAR); + break; + default: + lm_kbd_error(s, ERR_CMDUNK); + fprintf(stderr, "%s: unknown command %02x\n", __func__, reg); + break; + } +} + +static int lm_i2c_event(I2CSlave *i2c, enum i2c_event event) +{ + LM823KbdState *s = LM8323(i2c); + + switch (event) { + case I2C_START_RECV: + case I2C_START_SEND: + s->i2c_cycle = 0; + s->i2c_dir = (event == I2C_START_SEND); + break; + + default: + break; + } + + return 0; +} + +static uint8_t lm_i2c_rx(I2CSlave *i2c) +{ + LM823KbdState *s = LM8323(i2c); + + return lm_kbd_read(s, s->reg, s->i2c_cycle ++); +} + +static int lm_i2c_tx(I2CSlave *i2c, uint8_t data) +{ + LM823KbdState *s = LM8323(i2c); + + if (!s->i2c_cycle) + s->reg = data; + else + lm_kbd_write(s, s->reg, s->i2c_cycle - 1, data); + s->i2c_cycle ++; + + return 0; +} + +static int lm_kbd_post_load(void *opaque, int version_id) +{ + LM823KbdState *s = opaque; + + lm_kbd_irq_update(s); + lm_kbd_gpio_update(s); + + return 0; +} + +static const VMStateDescription vmstate_lm_kbd = { + .name = "LM8323", + .version_id = 0, + .minimum_version_id = 0, + .post_load = lm_kbd_post_load, + .fields = (VMStateField[]) { + VMSTATE_I2C_SLAVE(parent_obj, LM823KbdState), + VMSTATE_UINT8(i2c_dir, LM823KbdState), + VMSTATE_UINT8(i2c_cycle, LM823KbdState), + VMSTATE_UINT8(reg, LM823KbdState), + VMSTATE_UINT8(config, LM823KbdState), + VMSTATE_UINT8(status, LM823KbdState), + VMSTATE_UINT8(acttime, LM823KbdState), + VMSTATE_UINT8(error, LM823KbdState), + VMSTATE_UINT8(clock, LM823KbdState), + VMSTATE_UINT16(gpio.pull, LM823KbdState), + VMSTATE_UINT16(gpio.mask, LM823KbdState), + VMSTATE_UINT16(gpio.dir, LM823KbdState), + VMSTATE_UINT16(gpio.level, LM823KbdState), + VMSTATE_UINT8(kbd.dbnctime, LM823KbdState), + VMSTATE_UINT8(kbd.size, LM823KbdState), + VMSTATE_UINT8(kbd.start, LM823KbdState), + VMSTATE_UINT8(kbd.len, LM823KbdState), + VMSTATE_BUFFER(kbd.fifo, LM823KbdState), + VMSTATE_UINT16_ARRAY(pwm.file, LM823KbdState, 256), + VMSTATE_UINT8(pwm.faddr, LM823KbdState), + VMSTATE_BUFFER(pwm.addr, LM823KbdState), + VMSTATE_TIMER_PTR_ARRAY(pwm.tm, LM823KbdState, 3), + VMSTATE_END_OF_LIST() + } +}; + + +static void lm8323_realize(DeviceState *dev, Error **errp) +{ + LM823KbdState *s = LM8323(dev); + + s->model = 0x8323; + s->pwm.tm[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm0_tick, s); + s->pwm.tm[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm1_tick, s); + s->pwm.tm[2] = timer_new_ns(QEMU_CLOCK_VIRTUAL, lm_kbd_pwm2_tick, s); + qdev_init_gpio_out(dev, &s->nirq, 1); +} + +void lm832x_key_event(DeviceState *dev, int key, int state) +{ + LM823KbdState *s = LM8323(dev); + + if ((s->status & INT_ERROR) && (s->error & ERR_FIFOOVR)) + return; + + if (s->kbd.len >= sizeof(s->kbd.fifo)) { + lm_kbd_error(s, ERR_FIFOOVR); + return; + } + + s->kbd.fifo[(s->kbd.start + s->kbd.len ++) & (sizeof(s->kbd.fifo) - 1)] = + key | (state << 7); + + /* We never set ERR_KEYOVR because we support multiple keys fine. */ + s->status |= INT_KEYPAD; + lm_kbd_irq_update(s); +} + +static void lm8323_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + dc->reset = lm_kbd_reset; + dc->realize = lm8323_realize; + k->event = lm_i2c_event; + k->recv = lm_i2c_rx; + k->send = lm_i2c_tx; + dc->vmsd = &vmstate_lm_kbd; +} + +static const TypeInfo lm8323_info = { + .name = TYPE_LM8323, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(LM823KbdState), + .class_init = lm8323_class_init, +}; + +static void lm832x_register_types(void) +{ + type_register_static(&lm8323_info); +} + +type_init(lm832x_register_types) diff --git a/hw/input/meson.build b/hw/input/meson.build new file mode 100644 index 000000000..8deb011d4 --- /dev/null +++ b/hw/input/meson.build @@ -0,0 +1,18 @@ +softmmu_ss.add(files('hid.c')) +softmmu_ss.add(when: 'CONFIG_ADB', if_true: files('adb.c', 'adb-mouse.c', 'adb-kbd.c')) +softmmu_ss.add(when: 'CONFIG_ADS7846', if_true: files('ads7846.c')) +softmmu_ss.add(when: 'CONFIG_LM832X', if_true: files('lm832x.c')) +softmmu_ss.add(when: 'CONFIG_PCKBD', if_true: files('pckbd.c')) +softmmu_ss.add(when: 'CONFIG_PL050', if_true: files('pl050.c')) +softmmu_ss.add(when: 'CONFIG_PS2', if_true: files('ps2.c')) +softmmu_ss.add(when: 'CONFIG_STELLARIS_INPUT', if_true: files('stellaris_input.c')) +softmmu_ss.add(when: 'CONFIG_TSC2005', if_true: files('tsc2005.c')) + +softmmu_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input.c')) +softmmu_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-hid.c')) +softmmu_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host.c')) +softmmu_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input.c')) + +softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_keypad.c')) +softmmu_ss.add(when: 'CONFIG_TSC210X', if_true: files('tsc210x.c')) +softmmu_ss.add(when: 'CONFIG_LASIPS2', if_true: files('lasips2.c')) diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c new file mode 100644 index 000000000..baba62f35 --- /dev/null +++ b/hw/input/pckbd.c @@ -0,0 +1,814 @@ +/* + * QEMU PC keyboard emulation + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/timer.h" +#include "hw/isa/isa.h" +#include "migration/vmstate.h" +#include "hw/acpi/aml-build.h" +#include "hw/input/ps2.h" +#include "hw/irq.h" +#include "hw/input/i8042.h" +#include "hw/qdev-properties.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" + +#include "trace.h" + +/* Keyboard Controller Commands */ +#define KBD_CCMD_READ_MODE 0x20 /* Read mode bits */ +#define KBD_CCMD_WRITE_MODE 0x60 /* Write mode bits */ +#define KBD_CCMD_GET_VERSION 0xA1 /* Get controller version */ +#define KBD_CCMD_MOUSE_DISABLE 0xA7 /* Disable mouse interface */ +#define KBD_CCMD_MOUSE_ENABLE 0xA8 /* Enable mouse interface */ +#define KBD_CCMD_TEST_MOUSE 0xA9 /* Mouse interface test */ +#define KBD_CCMD_SELF_TEST 0xAA /* Controller self test */ +#define KBD_CCMD_KBD_TEST 0xAB /* Keyboard interface test */ +#define KBD_CCMD_KBD_DISABLE 0xAD /* Keyboard interface disable */ +#define KBD_CCMD_KBD_ENABLE 0xAE /* Keyboard interface enable */ +#define KBD_CCMD_READ_INPORT 0xC0 /* read input port */ +#define KBD_CCMD_READ_OUTPORT 0xD0 /* read output port */ +#define KBD_CCMD_WRITE_OUTPORT 0xD1 /* write output port */ +#define KBD_CCMD_WRITE_OBUF 0xD2 +#define KBD_CCMD_WRITE_AUX_OBUF 0xD3 /* Write to output buffer as if + initiated by the auxiliary device */ +#define KBD_CCMD_WRITE_MOUSE 0xD4 /* Write the following byte to the mouse */ +#define KBD_CCMD_DISABLE_A20 0xDD /* HP vectra only ? */ +#define KBD_CCMD_ENABLE_A20 0xDF /* HP vectra only ? */ +#define KBD_CCMD_PULSE_BITS_3_0 0xF0 /* Pulse bits 3-0 of the output port P2. */ +#define KBD_CCMD_RESET 0xFE /* Pulse bit 0 of the output port P2 = CPU reset. */ +#define KBD_CCMD_NO_OP 0xFF /* Pulse no bits of the output port P2. */ + +/* Status Register Bits */ +#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */ +#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */ +#define KBD_STAT_SELFTEST 0x04 /* Self test successful */ +#define KBD_STAT_CMD 0x08 /* Last write was a command write (0=data) */ +#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */ +#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */ +#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */ +#define KBD_STAT_PERR 0x80 /* Parity error */ + +/* Controller Mode Register Bits */ +#define KBD_MODE_KBD_INT 0x01 /* Keyboard data generate IRQ1 */ +#define KBD_MODE_MOUSE_INT 0x02 /* Mouse data generate IRQ12 */ +#define KBD_MODE_SYS 0x04 /* The system flag (?) */ +#define KBD_MODE_NO_KEYLOCK 0x08 /* The keylock doesn't affect the keyboard if set */ +#define KBD_MODE_DISABLE_KBD 0x10 /* Disable keyboard interface */ +#define KBD_MODE_DISABLE_MOUSE 0x20 /* Disable mouse interface */ +#define KBD_MODE_KCC 0x40 /* Scan code conversion to PC format */ +#define KBD_MODE_RFU 0x80 + +/* Output Port Bits */ +#define KBD_OUT_RESET 0x01 /* 1=normal mode, 0=reset */ +#define KBD_OUT_A20 0x02 /* x86 only */ +#define KBD_OUT_OBF 0x10 /* Keyboard output buffer full */ +#define KBD_OUT_MOUSE_OBF 0x20 /* Mouse output buffer full */ + +/* OSes typically write 0xdd/0xdf to turn the A20 line off and on. + * We make the default value of the outport include these four bits, + * so that the subsection is rarely necessary. + */ +#define KBD_OUT_ONES 0xcc + +#define KBD_PENDING_KBD_COMPAT 0x01 +#define KBD_PENDING_AUX_COMPAT 0x02 +#define KBD_PENDING_CTRL_KBD 0x04 +#define KBD_PENDING_CTRL_AUX 0x08 +#define KBD_PENDING_KBD KBD_MODE_DISABLE_KBD /* 0x10 */ +#define KBD_PENDING_AUX KBD_MODE_DISABLE_MOUSE /* 0x20 */ + +#define KBD_MIGR_TIMER_PENDING 0x1 + +#define KBD_OBSRC_KBD 0x01 +#define KBD_OBSRC_MOUSE 0x02 +#define KBD_OBSRC_CTRL 0x04 + +typedef struct KBDState { + uint8_t write_cmd; /* if non zero, write data to port 60 is expected */ + uint8_t status; + uint8_t mode; + uint8_t outport; + uint32_t migration_flags; + uint32_t obsrc; + bool outport_present; + bool extended_state; + bool extended_state_loaded; + /* Bitmask of devices with data available. */ + uint8_t pending; + uint8_t obdata; + uint8_t cbdata; + uint8_t pending_tmp; + void *kbd; + void *mouse; + QEMUTimer *throttle_timer; + + qemu_irq irq_kbd; + qemu_irq irq_mouse; + qemu_irq a20_out; + hwaddr mask; +} KBDState; + +/* XXX: not generating the irqs if KBD_MODE_DISABLE_KBD is set may be + incorrect, but it avoids having to simulate exact delays */ +static void kbd_update_irq_lines(KBDState *s) +{ + int irq_kbd_level, irq_mouse_level; + + irq_kbd_level = 0; + irq_mouse_level = 0; + + if (s->status & KBD_STAT_OBF) { + if (s->status & KBD_STAT_MOUSE_OBF) { + if (s->mode & KBD_MODE_MOUSE_INT) { + irq_mouse_level = 1; + } + } else { + if ((s->mode & KBD_MODE_KBD_INT) && + !(s->mode & KBD_MODE_DISABLE_KBD)) { + irq_kbd_level = 1; + } + } + } + qemu_set_irq(s->irq_kbd, irq_kbd_level); + qemu_set_irq(s->irq_mouse, irq_mouse_level); +} + +static void kbd_deassert_irq(KBDState *s) +{ + s->status &= ~(KBD_STAT_OBF | KBD_STAT_MOUSE_OBF); + s->outport &= ~(KBD_OUT_OBF | KBD_OUT_MOUSE_OBF); + kbd_update_irq_lines(s); +} + +static uint8_t kbd_pending(KBDState *s) +{ + if (s->extended_state) { + return s->pending & (~s->mode | ~(KBD_PENDING_KBD | KBD_PENDING_AUX)); + } else { + return s->pending; + } +} + +/* update irq and KBD_STAT_[MOUSE_]OBF */ +static void kbd_update_irq(KBDState *s) +{ + uint8_t pending = kbd_pending(s); + + s->status &= ~(KBD_STAT_OBF | KBD_STAT_MOUSE_OBF); + s->outport &= ~(KBD_OUT_OBF | KBD_OUT_MOUSE_OBF); + if (pending) { + s->status |= KBD_STAT_OBF; + s->outport |= KBD_OUT_OBF; + if (pending & KBD_PENDING_CTRL_KBD) { + s->obsrc = KBD_OBSRC_CTRL; + } else if (pending & KBD_PENDING_CTRL_AUX) { + s->status |= KBD_STAT_MOUSE_OBF; + s->outport |= KBD_OUT_MOUSE_OBF; + s->obsrc = KBD_OBSRC_CTRL; + } else if (pending & KBD_PENDING_KBD) { + s->obsrc = KBD_OBSRC_KBD; + } else { + s->status |= KBD_STAT_MOUSE_OBF; + s->outport |= KBD_OUT_MOUSE_OBF; + s->obsrc = KBD_OBSRC_MOUSE; + } + } + kbd_update_irq_lines(s); +} + +static void kbd_safe_update_irq(KBDState *s) +{ + /* + * with KBD_STAT_OBF set, a call to kbd_read_data() will eventually call + * kbd_update_irq() + */ + if (s->status & KBD_STAT_OBF) { + return; + } + /* the throttle timer is pending and will call kbd_update_irq() */ + if (s->throttle_timer && timer_pending(s->throttle_timer)) { + return; + } + if (kbd_pending(s)) { + kbd_update_irq(s); + } +} + +static void kbd_update_kbd_irq(void *opaque, int level) +{ + KBDState *s = opaque; + + if (level) { + s->pending |= KBD_PENDING_KBD; + } else { + s->pending &= ~KBD_PENDING_KBD; + } + kbd_safe_update_irq(s); +} + +static void kbd_update_aux_irq(void *opaque, int level) +{ + KBDState *s = opaque; + + if (level) { + s->pending |= KBD_PENDING_AUX; + } else { + s->pending &= ~KBD_PENDING_AUX; + } + kbd_safe_update_irq(s); +} + +static void kbd_throttle_timeout(void *opaque) +{ + KBDState *s = opaque; + + if (kbd_pending(s)) { + kbd_update_irq(s); + } +} + +static uint64_t kbd_read_status(void *opaque, hwaddr addr, + unsigned size) +{ + KBDState *s = opaque; + int val; + val = s->status; + trace_pckbd_kbd_read_status(val); + return val; +} + +static void kbd_queue(KBDState *s, int b, int aux) +{ + if (s->extended_state) { + s->cbdata = b; + s->pending &= ~KBD_PENDING_CTRL_KBD & ~KBD_PENDING_CTRL_AUX; + s->pending |= aux ? KBD_PENDING_CTRL_AUX : KBD_PENDING_CTRL_KBD; + kbd_safe_update_irq(s); + } else { + ps2_queue(aux ? s->mouse : s->kbd, b); + } +} + +static uint8_t kbd_dequeue(KBDState *s) +{ + uint8_t b = s->cbdata; + + s->pending &= ~KBD_PENDING_CTRL_KBD & ~KBD_PENDING_CTRL_AUX; + if (kbd_pending(s)) { + kbd_update_irq(s); + } + return b; +} + +static void outport_write(KBDState *s, uint32_t val) +{ + trace_pckbd_outport_write(val); + s->outport = val; + qemu_set_irq(s->a20_out, (val >> 1) & 1); + if (!(val & 1)) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } +} + +static void kbd_write_command(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + KBDState *s = opaque; + + trace_pckbd_kbd_write_command(val); + + /* Bits 3-0 of the output port P2 of the keyboard controller may be pulsed + * low for approximately 6 micro seconds. Bits 3-0 of the KBD_CCMD_PULSE + * command specify the output port bits to be pulsed. + * 0: Bit should be pulsed. 1: Bit should not be modified. + * The only useful version of this command is pulsing bit 0, + * which does a CPU reset. + */ + if((val & KBD_CCMD_PULSE_BITS_3_0) == KBD_CCMD_PULSE_BITS_3_0) { + if(!(val & 1)) + val = KBD_CCMD_RESET; + else + val = KBD_CCMD_NO_OP; + } + + switch(val) { + case KBD_CCMD_READ_MODE: + kbd_queue(s, s->mode, 0); + break; + case KBD_CCMD_WRITE_MODE: + case KBD_CCMD_WRITE_OBUF: + case KBD_CCMD_WRITE_AUX_OBUF: + case KBD_CCMD_WRITE_MOUSE: + case KBD_CCMD_WRITE_OUTPORT: + s->write_cmd = val; + break; + case KBD_CCMD_MOUSE_DISABLE: + s->mode |= KBD_MODE_DISABLE_MOUSE; + break; + case KBD_CCMD_MOUSE_ENABLE: + s->mode &= ~KBD_MODE_DISABLE_MOUSE; + kbd_safe_update_irq(s); + break; + case KBD_CCMD_TEST_MOUSE: + kbd_queue(s, 0x00, 0); + break; + case KBD_CCMD_SELF_TEST: + s->status |= KBD_STAT_SELFTEST; + kbd_queue(s, 0x55, 0); + break; + case KBD_CCMD_KBD_TEST: + kbd_queue(s, 0x00, 0); + break; + case KBD_CCMD_KBD_DISABLE: + s->mode |= KBD_MODE_DISABLE_KBD; + break; + case KBD_CCMD_KBD_ENABLE: + s->mode &= ~KBD_MODE_DISABLE_KBD; + kbd_safe_update_irq(s); + break; + case KBD_CCMD_READ_INPORT: + kbd_queue(s, 0x80, 0); + break; + case KBD_CCMD_READ_OUTPORT: + kbd_queue(s, s->outport, 0); + break; + case KBD_CCMD_ENABLE_A20: + qemu_irq_raise(s->a20_out); + s->outport |= KBD_OUT_A20; + break; + case KBD_CCMD_DISABLE_A20: + qemu_irq_lower(s->a20_out); + s->outport &= ~KBD_OUT_A20; + break; + case KBD_CCMD_RESET: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + break; + case KBD_CCMD_NO_OP: + /* ignore that */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "unsupported keyboard cmd=0x%02" PRIx64 "\n", val); + break; + } +} + +static uint64_t kbd_read_data(void *opaque, hwaddr addr, + unsigned size) +{ + KBDState *s = opaque; + + if (s->status & KBD_STAT_OBF) { + kbd_deassert_irq(s); + if (s->obsrc & KBD_OBSRC_KBD) { + if (s->throttle_timer) { + timer_mod(s->throttle_timer, + qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 1000); + } + s->obdata = ps2_read_data(s->kbd); + } else if (s->obsrc & KBD_OBSRC_MOUSE) { + s->obdata = ps2_read_data(s->mouse); + } else if (s->obsrc & KBD_OBSRC_CTRL) { + s->obdata = kbd_dequeue(s); + } + } + + trace_pckbd_kbd_read_data(s->obdata); + return s->obdata; +} + +static void kbd_write_data(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + KBDState *s = opaque; + + trace_pckbd_kbd_write_data(val); + + switch(s->write_cmd) { + case 0: + ps2_write_keyboard(s->kbd, val); + /* sending data to the keyboard reenables PS/2 communication */ + s->mode &= ~KBD_MODE_DISABLE_KBD; + kbd_safe_update_irq(s); + break; + case KBD_CCMD_WRITE_MODE: + s->mode = val; + ps2_keyboard_set_translation(s->kbd, (s->mode & KBD_MODE_KCC) != 0); + /* + * a write to the mode byte interrupt enable flags directly updates + * the irq lines + */ + kbd_update_irq_lines(s); + /* + * a write to the mode byte disable interface flags may raise + * an irq if there is pending data in the PS/2 queues. + */ + kbd_safe_update_irq(s); + break; + case KBD_CCMD_WRITE_OBUF: + kbd_queue(s, val, 0); + break; + case KBD_CCMD_WRITE_AUX_OBUF: + kbd_queue(s, val, 1); + break; + case KBD_CCMD_WRITE_OUTPORT: + outport_write(s, val); + break; + case KBD_CCMD_WRITE_MOUSE: + ps2_write_mouse(s->mouse, val); + /* sending data to the mouse reenables PS/2 communication */ + s->mode &= ~KBD_MODE_DISABLE_MOUSE; + kbd_safe_update_irq(s); + break; + default: + break; + } + s->write_cmd = 0; +} + +static void kbd_reset(void *opaque) +{ + KBDState *s = opaque; + + s->mode = KBD_MODE_KBD_INT | KBD_MODE_MOUSE_INT; + s->status = KBD_STAT_CMD | KBD_STAT_UNLOCKED; + s->outport = KBD_OUT_RESET | KBD_OUT_A20 | KBD_OUT_ONES; + s->pending = 0; + kbd_deassert_irq(s); + if (s->throttle_timer) { + timer_del(s->throttle_timer); + } +} + +static uint8_t kbd_outport_default(KBDState *s) +{ + return KBD_OUT_RESET | KBD_OUT_A20 | KBD_OUT_ONES + | (s->status & KBD_STAT_OBF ? KBD_OUT_OBF : 0) + | (s->status & KBD_STAT_MOUSE_OBF ? KBD_OUT_MOUSE_OBF : 0); +} + +static int kbd_outport_post_load(void *opaque, int version_id) +{ + KBDState *s = opaque; + s->outport_present = true; + return 0; +} + +static bool kbd_outport_needed(void *opaque) +{ + KBDState *s = opaque; + return s->outport != kbd_outport_default(s); +} + +static const VMStateDescription vmstate_kbd_outport = { + .name = "pckbd_outport", + .version_id = 1, + .minimum_version_id = 1, + .post_load = kbd_outport_post_load, + .needed = kbd_outport_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(outport, KBDState), + VMSTATE_END_OF_LIST() + } +}; + +static int kbd_extended_state_pre_save(void *opaque) +{ + KBDState *s = opaque; + + s->migration_flags = 0; + if (s->throttle_timer && timer_pending(s->throttle_timer)) { + s->migration_flags |= KBD_MIGR_TIMER_PENDING; + } + + return 0; +} + +static int kbd_extended_state_post_load(void *opaque, int version_id) +{ + KBDState *s = opaque; + + if (s->migration_flags & KBD_MIGR_TIMER_PENDING) { + kbd_throttle_timeout(s); + } + s->extended_state_loaded = true; + + return 0; +} + +static bool kbd_extended_state_needed(void *opaque) +{ + KBDState *s = opaque; + + return s->extended_state; +} + +static const VMStateDescription vmstate_kbd_extended_state = { + .name = "pckbd/extended_state", + .post_load = kbd_extended_state_post_load, + .pre_save = kbd_extended_state_pre_save, + .needed = kbd_extended_state_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(migration_flags, KBDState), + VMSTATE_UINT32(obsrc, KBDState), + VMSTATE_UINT8(obdata, KBDState), + VMSTATE_UINT8(cbdata, KBDState), + VMSTATE_END_OF_LIST() + } +}; + +static int kbd_pre_save(void *opaque) +{ + KBDState *s = opaque; + + if (s->extended_state) { + s->pending_tmp = s->pending; + } else { + s->pending_tmp = 0; + if (s->pending & KBD_PENDING_KBD) { + s->pending_tmp |= KBD_PENDING_KBD_COMPAT; + } + if (s->pending & KBD_PENDING_AUX) { + s->pending_tmp |= KBD_PENDING_AUX_COMPAT; + } + } + return 0; +} + +static int kbd_pre_load(void *opaque) +{ + KBDState *s = opaque; + + s->outport_present = false; + s->extended_state_loaded = false; + return 0; +} + +static int kbd_post_load(void *opaque, int version_id) +{ + KBDState *s = opaque; + if (!s->outport_present) { + s->outport = kbd_outport_default(s); + } + s->pending = s->pending_tmp; + if (!s->extended_state_loaded) { + s->obsrc = s->status & KBD_STAT_OBF ? + (s->status & KBD_STAT_MOUSE_OBF ? KBD_OBSRC_MOUSE : KBD_OBSRC_KBD) : + 0; + if (s->pending & KBD_PENDING_KBD_COMPAT) { + s->pending |= KBD_PENDING_KBD; + } + if (s->pending & KBD_PENDING_AUX_COMPAT) { + s->pending |= KBD_PENDING_AUX; + } + } + /* clear all unused flags */ + s->pending &= KBD_PENDING_CTRL_KBD | KBD_PENDING_CTRL_AUX | + KBD_PENDING_KBD | KBD_PENDING_AUX; + return 0; +} + +static const VMStateDescription vmstate_kbd = { + .name = "pckbd", + .version_id = 3, + .minimum_version_id = 3, + .pre_load = kbd_pre_load, + .post_load = kbd_post_load, + .pre_save = kbd_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT8(write_cmd, KBDState), + VMSTATE_UINT8(status, KBDState), + VMSTATE_UINT8(mode, KBDState), + VMSTATE_UINT8(pending_tmp, KBDState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_kbd_outport, + &vmstate_kbd_extended_state, + NULL + } +}; + +/* Memory mapped interface */ +static uint64_t kbd_mm_readfn(void *opaque, hwaddr addr, unsigned size) +{ + KBDState *s = opaque; + + if (addr & s->mask) + return kbd_read_status(s, 0, 1) & 0xff; + else + return kbd_read_data(s, 0, 1) & 0xff; +} + +static void kbd_mm_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + KBDState *s = opaque; + + if (addr & s->mask) + kbd_write_command(s, 0, value & 0xff, 1); + else + kbd_write_data(s, 0, value & 0xff, 1); +} + + +static const MemoryRegionOps i8042_mmio_ops = { + .read = kbd_mm_readfn, + .write = kbd_mm_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +void i8042_mm_init(qemu_irq kbd_irq, qemu_irq mouse_irq, + MemoryRegion *region, ram_addr_t size, + hwaddr mask) +{ + KBDState *s = g_malloc0(sizeof(KBDState)); + + s->irq_kbd = kbd_irq; + s->irq_mouse = mouse_irq; + s->mask = mask; + + s->extended_state = true; + + vmstate_register(NULL, 0, &vmstate_kbd, s); + + memory_region_init_io(region, NULL, &i8042_mmio_ops, s, "i8042", size); + + s->kbd = ps2_kbd_init(kbd_update_kbd_irq, s); + s->mouse = ps2_mouse_init(kbd_update_aux_irq, s); + qemu_register_reset(kbd_reset, s); +} + +struct ISAKBDState { + ISADevice parent_obj; + + KBDState kbd; + bool kbd_throttle; + MemoryRegion io[2]; +}; + +void i8042_isa_mouse_fake_event(ISAKBDState *isa) +{ + KBDState *s = &isa->kbd; + + ps2_mouse_fake_event(s->mouse); +} + +void i8042_setup_a20_line(ISADevice *dev, qemu_irq a20_out) +{ + qdev_connect_gpio_out_named(DEVICE(dev), I8042_A20_LINE, 0, a20_out); +} + +static const VMStateDescription vmstate_kbd_isa = { + .name = "pckbd", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(kbd, ISAKBDState, 0, vmstate_kbd, KBDState), + VMSTATE_END_OF_LIST() + } +}; + +static const MemoryRegionOps i8042_data_ops = { + .read = kbd_read_data, + .write = kbd_write_data, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps i8042_cmd_ops = { + .read = kbd_read_status, + .write = kbd_write_command, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void i8042_initfn(Object *obj) +{ + ISAKBDState *isa_s = I8042(obj); + KBDState *s = &isa_s->kbd; + + memory_region_init_io(isa_s->io + 0, obj, &i8042_data_ops, s, + "i8042-data", 1); + memory_region_init_io(isa_s->io + 1, obj, &i8042_cmd_ops, s, + "i8042-cmd", 1); + + qdev_init_gpio_out_named(DEVICE(obj), &s->a20_out, I8042_A20_LINE, 1); +} + +static void i8042_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + ISAKBDState *isa_s = I8042(dev); + KBDState *s = &isa_s->kbd; + + isa_init_irq(isadev, &s->irq_kbd, 1); + isa_init_irq(isadev, &s->irq_mouse, 12); + + isa_register_ioport(isadev, isa_s->io + 0, 0x60); + isa_register_ioport(isadev, isa_s->io + 1, 0x64); + + s->kbd = ps2_kbd_init(kbd_update_kbd_irq, s); + s->mouse = ps2_mouse_init(kbd_update_aux_irq, s); + if (isa_s->kbd_throttle && !isa_s->kbd.extended_state) { + warn_report(TYPE_I8042 ": can't enable kbd-throttle without" + " extended-state, disabling kbd-throttle"); + } else if (isa_s->kbd_throttle) { + s->throttle_timer = timer_new_us(QEMU_CLOCK_VIRTUAL, + kbd_throttle_timeout, s); + } + qemu_register_reset(kbd_reset, s); +} + +static void i8042_build_aml(ISADevice *isadev, Aml *scope) +{ + Aml *kbd; + Aml *mou; + Aml *crs; + + crs = aml_resource_template(); + aml_append(crs, aml_io(AML_DECODE16, 0x0060, 0x0060, 0x01, 0x01)); + aml_append(crs, aml_io(AML_DECODE16, 0x0064, 0x0064, 0x01, 0x01)); + aml_append(crs, aml_irq_no_flags(1)); + + kbd = aml_device("KBD"); + aml_append(kbd, aml_name_decl("_HID", aml_eisaid("PNP0303"))); + aml_append(kbd, aml_name_decl("_STA", aml_int(0xf))); + aml_append(kbd, aml_name_decl("_CRS", crs)); + + crs = aml_resource_template(); + aml_append(crs, aml_irq_no_flags(12)); + + mou = aml_device("MOU"); + aml_append(mou, aml_name_decl("_HID", aml_eisaid("PNP0F13"))); + aml_append(mou, aml_name_decl("_STA", aml_int(0xf))); + aml_append(mou, aml_name_decl("_CRS", crs)); + + aml_append(scope, kbd); + aml_append(scope, mou); +} + +static Property i8042_properties[] = { + DEFINE_PROP_BOOL("extended-state", ISAKBDState, kbd.extended_state, true), + DEFINE_PROP_BOOL("kbd-throttle", ISAKBDState, kbd_throttle, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void i8042_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ISADeviceClass *isa = ISA_DEVICE_CLASS(klass); + + device_class_set_props(dc, i8042_properties); + dc->realize = i8042_realizefn; + dc->vmsd = &vmstate_kbd_isa; + isa->build_aml = i8042_build_aml; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo i8042_info = { + .name = TYPE_I8042, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISAKBDState), + .instance_init = i8042_initfn, + .class_init = i8042_class_initfn, +}; + +static void i8042_register_types(void) +{ + type_register_static(&i8042_info); +} + +type_init(i8042_register_types) diff --git a/hw/input/pl050.c b/hw/input/pl050.c new file mode 100644 index 000000000..d279b6c14 --- /dev/null +++ b/hw/input/pl050.c @@ -0,0 +1,210 @@ +/* + * Arm PrimeCell PL050 Keyboard / Mouse Interface + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/input/ps2.h" +#include "hw/irq.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define TYPE_PL050 "pl050" +OBJECT_DECLARE_SIMPLE_TYPE(PL050State, PL050) + +struct PL050State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + void *dev; + uint32_t cr; + uint32_t clk; + uint32_t last; + int pending; + qemu_irq irq; + bool is_mouse; +}; + +static const VMStateDescription vmstate_pl050 = { + .name = "pl050", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cr, PL050State), + VMSTATE_UINT32(clk, PL050State), + VMSTATE_UINT32(last, PL050State), + VMSTATE_INT32(pending, PL050State), + VMSTATE_END_OF_LIST() + } +}; + +#define PL050_TXEMPTY (1 << 6) +#define PL050_TXBUSY (1 << 5) +#define PL050_RXFULL (1 << 4) +#define PL050_RXBUSY (1 << 3) +#define PL050_RXPARITY (1 << 2) +#define PL050_KMIC (1 << 1) +#define PL050_KMID (1 << 0) + +static const unsigned char pl050_id[] = +{ 0x50, 0x10, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; + +static void pl050_update(void *opaque, int level) +{ + PL050State *s = (PL050State *)opaque; + int raise; + + s->pending = level; + raise = (s->pending && (s->cr & 0x10) != 0) + || (s->cr & 0x08) != 0; + qemu_set_irq(s->irq, raise); +} + +static uint64_t pl050_read(void *opaque, hwaddr offset, + unsigned size) +{ + PL050State *s = (PL050State *)opaque; + if (offset >= 0xfe0 && offset < 0x1000) + return pl050_id[(offset - 0xfe0) >> 2]; + + switch (offset >> 2) { + case 0: /* KMICR */ + return s->cr; + case 1: /* KMISTAT */ + { + uint8_t val; + uint32_t stat; + + val = s->last; + val = val ^ (val >> 4); + val = val ^ (val >> 2); + val = (val ^ (val >> 1)) & 1; + + stat = PL050_TXEMPTY; + if (val) + stat |= PL050_RXPARITY; + if (s->pending) + stat |= PL050_RXFULL; + + return stat; + } + case 2: /* KMIDATA */ + if (s->pending) + s->last = ps2_read_data(s->dev); + return s->last; + case 3: /* KMICLKDIV */ + return s->clk; + case 4: /* KMIIR */ + return s->pending | 2; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl050_read: Bad offset %x\n", (int)offset); + return 0; + } +} + +static void pl050_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PL050State *s = (PL050State *)opaque; + switch (offset >> 2) { + case 0: /* KMICR */ + s->cr = value; + pl050_update(s, s->pending); + /* ??? Need to implement the enable/disable bit. */ + break; + case 2: /* KMIDATA */ + /* ??? This should toggle the TX interrupt line. */ + /* ??? This means kbd/mouse can block each other. */ + if (s->is_mouse) { + ps2_write_mouse(s->dev, value); + } else { + ps2_write_keyboard(s->dev, value); + } + break; + case 3: /* KMICLKDIV */ + s->clk = value; + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl050_write: Bad offset %x\n", (int)offset); + } +} +static const MemoryRegionOps pl050_ops = { + .read = pl050_read, + .write = pl050_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pl050_realize(DeviceState *dev, Error **errp) +{ + PL050State *s = PL050(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &pl050_ops, s, "pl050", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + if (s->is_mouse) { + s->dev = ps2_mouse_init(pl050_update, s); + } else { + s->dev = ps2_kbd_init(pl050_update, s); + } +} + +static void pl050_keyboard_init(Object *obj) +{ + PL050State *s = PL050(obj); + + s->is_mouse = false; +} + +static void pl050_mouse_init(Object *obj) +{ + PL050State *s = PL050(obj); + + s->is_mouse = true; +} + +static const TypeInfo pl050_kbd_info = { + .name = "pl050_keyboard", + .parent = TYPE_PL050, + .instance_init = pl050_keyboard_init, +}; + +static const TypeInfo pl050_mouse_info = { + .name = "pl050_mouse", + .parent = TYPE_PL050, + .instance_init = pl050_mouse_init, +}; + +static void pl050_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = pl050_realize; + dc->vmsd = &vmstate_pl050; +} + +static const TypeInfo pl050_type_info = { + .name = TYPE_PL050, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL050State), + .abstract = true, + .class_init = pl050_class_init, +}; + +static void pl050_register_types(void) +{ + type_register_static(&pl050_type_info); + type_register_static(&pl050_kbd_info); + type_register_static(&pl050_mouse_info); +} + +type_init(pl050_register_types) diff --git a/hw/input/ps2.c b/hw/input/ps2.c new file mode 100644 index 000000000..9376a8f4c --- /dev/null +++ b/hw/input/ps2.c @@ -0,0 +1,1220 @@ +/* + * QEMU PS/2 keyboard/mouse emulation + * + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/input/ps2.h" +#include "migration/vmstate.h" +#include "ui/console.h" +#include "ui/input.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" + +#include "trace.h" + +/* Keyboard Commands */ +#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */ +#define KBD_CMD_ECHO 0xEE +#define KBD_CMD_SCANCODE 0xF0 /* Get/set scancode set */ +#define KBD_CMD_GET_ID 0xF2 /* get keyboard ID */ +#define KBD_CMD_SET_RATE 0xF3 /* Set typematic rate */ +#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */ +#define KBD_CMD_RESET_DISABLE 0xF5 /* reset and disable scanning */ +#define KBD_CMD_RESET_ENABLE 0xF6 /* reset and enable scanning */ +#define KBD_CMD_RESET 0xFF /* Reset */ +#define KBD_CMD_SET_MAKE_BREAK 0xFC /* Set Make and Break mode */ +#define KBD_CMD_SET_TYPEMATIC 0xFA /* Set Typematic Make and Break mode */ + +/* Keyboard Replies */ +#define KBD_REPLY_POR 0xAA /* Power on reset */ +#define KBD_REPLY_ID 0xAB /* Keyboard ID */ +#define KBD_REPLY_ACK 0xFA /* Command ACK */ +#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */ + +/* Mouse Commands */ +#define AUX_SET_SCALE11 0xE6 /* Set 1:1 scaling */ +#define AUX_SET_SCALE21 0xE7 /* Set 2:1 scaling */ +#define AUX_SET_RES 0xE8 /* Set resolution */ +#define AUX_GET_SCALE 0xE9 /* Get scaling factor */ +#define AUX_SET_STREAM 0xEA /* Set stream mode */ +#define AUX_POLL 0xEB /* Poll */ +#define AUX_RESET_WRAP 0xEC /* Reset wrap mode */ +#define AUX_SET_WRAP 0xEE /* Set wrap mode */ +#define AUX_SET_REMOTE 0xF0 /* Set remote mode */ +#define AUX_GET_TYPE 0xF2 /* Get type */ +#define AUX_SET_SAMPLE 0xF3 /* Set sample rate */ +#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */ +#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */ +#define AUX_SET_DEFAULT 0xF6 +#define AUX_RESET 0xFF /* Reset aux device */ +#define AUX_ACK 0xFA /* Command byte ACK. */ + +#define MOUSE_STATUS_REMOTE 0x40 +#define MOUSE_STATUS_ENABLED 0x20 +#define MOUSE_STATUS_SCALE21 0x10 + +/* + * PS/2 buffer size. Keep 256 bytes for compatibility with + * older QEMU versions. + */ +#define PS2_BUFFER_SIZE 256 +#define PS2_QUEUE_SIZE 16 /* Queue size required by PS/2 protocol */ +#define PS2_QUEUE_HEADROOM 8 /* Queue size for keyboard command replies */ + +/* Bits for 'modifiers' field in PS2KbdState */ +#define MOD_CTRL_L (1 << 0) +#define MOD_SHIFT_L (1 << 1) +#define MOD_ALT_L (1 << 2) +#define MOD_CTRL_R (1 << 3) +#define MOD_SHIFT_R (1 << 4) +#define MOD_ALT_R (1 << 5) + +typedef struct { + uint8_t data[PS2_BUFFER_SIZE]; + int rptr, wptr, cwptr, count; +} PS2Queue; + +struct PS2State { + PS2Queue queue; + int32_t write_cmd; + void (*update_irq)(void *, int); + void *update_arg; +}; + +typedef struct { + PS2State common; + int scan_enabled; + int translate; + int scancode_set; /* 1=XT, 2=AT, 3=PS/2 */ + int ledstate; + bool need_high_bit; + unsigned int modifiers; /* bitmask of MOD_* constants above */ +} PS2KbdState; + +typedef struct { + PS2State common; + uint8_t mouse_status; + uint8_t mouse_resolution; + uint8_t mouse_sample_rate; + uint8_t mouse_wrap; + uint8_t mouse_type; /* 0 = PS2, 3 = IMPS/2, 4 = IMEX */ + uint8_t mouse_detect_state; + int mouse_dx; /* current values, needed for 'poll' mode */ + int mouse_dy; + int mouse_dz; + uint8_t mouse_buttons; +} PS2MouseState; + +static uint8_t translate_table[256] = { + 0xff, 0x43, 0x41, 0x3f, 0x3d, 0x3b, 0x3c, 0x58, + 0x64, 0x44, 0x42, 0x40, 0x3e, 0x0f, 0x29, 0x59, + 0x65, 0x38, 0x2a, 0x70, 0x1d, 0x10, 0x02, 0x5a, + 0x66, 0x71, 0x2c, 0x1f, 0x1e, 0x11, 0x03, 0x5b, + 0x67, 0x2e, 0x2d, 0x20, 0x12, 0x05, 0x04, 0x5c, + 0x68, 0x39, 0x2f, 0x21, 0x14, 0x13, 0x06, 0x5d, + 0x69, 0x31, 0x30, 0x23, 0x22, 0x15, 0x07, 0x5e, + 0x6a, 0x72, 0x32, 0x24, 0x16, 0x08, 0x09, 0x5f, + 0x6b, 0x33, 0x25, 0x17, 0x18, 0x0b, 0x0a, 0x60, + 0x6c, 0x34, 0x35, 0x26, 0x27, 0x19, 0x0c, 0x61, + 0x6d, 0x73, 0x28, 0x74, 0x1a, 0x0d, 0x62, 0x6e, + 0x3a, 0x36, 0x1c, 0x1b, 0x75, 0x2b, 0x63, 0x76, + 0x55, 0x56, 0x77, 0x78, 0x79, 0x7a, 0x0e, 0x7b, + 0x7c, 0x4f, 0x7d, 0x4b, 0x47, 0x7e, 0x7f, 0x6f, + 0x52, 0x53, 0x50, 0x4c, 0x4d, 0x48, 0x01, 0x45, + 0x57, 0x4e, 0x51, 0x4a, 0x37, 0x49, 0x46, 0x54, + 0x80, 0x81, 0x82, 0x41, 0x54, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, +}; + +static unsigned int ps2_modifier_bit(QKeyCode key) +{ + switch (key) { + case Q_KEY_CODE_CTRL: + return MOD_CTRL_L; + case Q_KEY_CODE_CTRL_R: + return MOD_CTRL_R; + case Q_KEY_CODE_SHIFT: + return MOD_SHIFT_L; + case Q_KEY_CODE_SHIFT_R: + return MOD_SHIFT_R; + case Q_KEY_CODE_ALT: + return MOD_ALT_L; + case Q_KEY_CODE_ALT_R: + return MOD_ALT_R; + default: + return 0; + } +} + +static void ps2_reset_queue(PS2State *s) +{ + PS2Queue *q = &s->queue; + + q->rptr = 0; + q->wptr = 0; + q->cwptr = -1; + q->count = 0; +} + +int ps2_queue_empty(PS2State *s) +{ + return s->queue.count == 0; +} + +void ps2_queue_noirq(PS2State *s, int b) +{ + PS2Queue *q = &s->queue; + + if (q->count >= PS2_QUEUE_SIZE) { + return; + } + + q->data[q->wptr] = b; + if (++q->wptr == PS2_BUFFER_SIZE) { + q->wptr = 0; + } + q->count++; +} + +void ps2_raise_irq(PS2State *s) +{ + s->update_irq(s->update_arg, 1); +} + +void ps2_queue(PS2State *s, int b) +{ + if (PS2_QUEUE_SIZE - s->queue.count < 1) { + return; + } + + ps2_queue_noirq(s, b); + ps2_raise_irq(s); +} + +void ps2_queue_2(PS2State *s, int b1, int b2) +{ + if (PS2_QUEUE_SIZE - s->queue.count < 2) { + return; + } + + ps2_queue_noirq(s, b1); + ps2_queue_noirq(s, b2); + ps2_raise_irq(s); +} + +void ps2_queue_3(PS2State *s, int b1, int b2, int b3) +{ + if (PS2_QUEUE_SIZE - s->queue.count < 3) { + return; + } + + ps2_queue_noirq(s, b1); + ps2_queue_noirq(s, b2); + ps2_queue_noirq(s, b3); + ps2_raise_irq(s); +} + +void ps2_queue_4(PS2State *s, int b1, int b2, int b3, int b4) +{ + if (PS2_QUEUE_SIZE - s->queue.count < 4) { + return; + } + + ps2_queue_noirq(s, b1); + ps2_queue_noirq(s, b2); + ps2_queue_noirq(s, b3); + ps2_queue_noirq(s, b4); + ps2_raise_irq(s); +} + +static void ps2_cqueue_data(PS2Queue *q, int b) +{ + q->data[q->cwptr] = b; + if (++q->cwptr >= PS2_BUFFER_SIZE) { + q->cwptr = 0; + } + q->count++; +} + +static void ps2_cqueue_1(PS2State *s, int b1) +{ + PS2Queue *q = &s->queue; + + q->rptr = (q->rptr - 1) & (PS2_BUFFER_SIZE - 1); + q->cwptr = q->rptr; + ps2_cqueue_data(q, b1); + ps2_raise_irq(s); +} + +static void ps2_cqueue_2(PS2State *s, int b1, int b2) +{ + PS2Queue *q = &s->queue; + + q->rptr = (q->rptr - 2) & (PS2_BUFFER_SIZE - 1); + q->cwptr = q->rptr; + ps2_cqueue_data(q, b1); + ps2_cqueue_data(q, b2); + ps2_raise_irq(s); +} + +static void ps2_cqueue_3(PS2State *s, int b1, int b2, int b3) +{ + PS2Queue *q = &s->queue; + + q->rptr = (q->rptr - 3) & (PS2_BUFFER_SIZE - 1); + q->cwptr = q->rptr; + ps2_cqueue_data(q, b1); + ps2_cqueue_data(q, b2); + ps2_cqueue_data(q, b3); + ps2_raise_irq(s); +} + +static void ps2_cqueue_reset(PS2State *s) +{ + PS2Queue *q = &s->queue; + int ccount; + + if (q->cwptr == -1) { + return; + } + + ccount = (q->cwptr - q->rptr) & (PS2_BUFFER_SIZE - 1); + q->count -= ccount; + q->rptr = q->cwptr; + q->cwptr = -1; +} + +/* keycode is the untranslated scancode in the current scancode set. */ +static void ps2_put_keycode(void *opaque, int keycode) +{ + PS2KbdState *s = opaque; + + trace_ps2_put_keycode(opaque, keycode); + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); + + if (s->translate) { + if (keycode == 0xf0) { + s->need_high_bit = true; + } else if (s->need_high_bit) { + ps2_queue(&s->common, translate_table[keycode] | 0x80); + s->need_high_bit = false; + } else { + ps2_queue(&s->common, translate_table[keycode]); + } + } else { + ps2_queue(&s->common, keycode); + } +} + +static void ps2_keyboard_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) +{ + PS2KbdState *s = (PS2KbdState *)dev; + InputKeyEvent *key = evt->u.key.data; + int qcode; + uint16_t keycode = 0; + int mod; + + /* do not process events while disabled to prevent stream corruption */ + if (!s->scan_enabled) { + return; + } + + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); + assert(evt->type == INPUT_EVENT_KIND_KEY); + qcode = qemu_input_key_value_to_qcode(key->key); + + mod = ps2_modifier_bit(qcode); + trace_ps2_keyboard_event(s, qcode, key->down, mod, + s->modifiers, s->scancode_set, s->translate); + if (key->down) { + s->modifiers |= mod; + } else { + s->modifiers &= ~mod; + } + + if (s->scancode_set == 1) { + if (qcode == Q_KEY_CODE_PAUSE) { + if (s->modifiers & (MOD_CTRL_L | MOD_CTRL_R)) { + if (key->down) { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x46); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xc6); + } + } else { + if (key->down) { + ps2_put_keycode(s, 0xe1); + ps2_put_keycode(s, 0x1d); + ps2_put_keycode(s, 0x45); + ps2_put_keycode(s, 0xe1); + ps2_put_keycode(s, 0x9d); + ps2_put_keycode(s, 0xc5); + } + } + } else if (qcode == Q_KEY_CODE_PRINT) { + if (s->modifiers & MOD_ALT_L) { + if (key->down) { + ps2_put_keycode(s, 0xb8); + ps2_put_keycode(s, 0x38); + ps2_put_keycode(s, 0x54); + } else { + ps2_put_keycode(s, 0xd4); + ps2_put_keycode(s, 0xb8); + ps2_put_keycode(s, 0x38); + } + } else if (s->modifiers & MOD_ALT_R) { + if (key->down) { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xb8); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x38); + ps2_put_keycode(s, 0x54); + } else { + ps2_put_keycode(s, 0xd4); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xb8); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x38); + } + } else if (s->modifiers & (MOD_SHIFT_L | MOD_CTRL_L | + MOD_SHIFT_R | MOD_CTRL_R)) { + if (key->down) { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x37); + } else { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xb7); + } + } else { + if (key->down) { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x2a); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x37); + } else { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xb7); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xaa); + } + } + } else { + if (qcode < qemu_input_map_qcode_to_atset1_len) + keycode = qemu_input_map_qcode_to_atset1[qcode]; + if (keycode) { + if (keycode & 0xff00) { + ps2_put_keycode(s, keycode >> 8); + } + if (!key->down) { + keycode |= 0x80; + } + ps2_put_keycode(s, keycode & 0xff); + } else { + qemu_log_mask(LOG_UNIMP, + "ps2: ignoring key with qcode %d\n", qcode); + } + } + } else if (s->scancode_set == 2) { + if (qcode == Q_KEY_CODE_PAUSE) { + if (s->modifiers & (MOD_CTRL_L | MOD_CTRL_R)) { + if (key->down) { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x7e); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x7e); + } + } else { + if (key->down) { + ps2_put_keycode(s, 0xe1); + ps2_put_keycode(s, 0x14); + ps2_put_keycode(s, 0x77); + ps2_put_keycode(s, 0xe1); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x14); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x77); + } + } + } else if (qcode == Q_KEY_CODE_PRINT) { + if (s->modifiers & MOD_ALT_L) { + if (key->down) { + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x11); + ps2_put_keycode(s, 0x11); + ps2_put_keycode(s, 0x84); + } else { + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x84); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x11); + ps2_put_keycode(s, 0x11); + } + } else if (s->modifiers & MOD_ALT_R) { + if (key->down) { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x11); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x11); + ps2_put_keycode(s, 0x84); + } else { + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x84); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x11); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x11); + } + } else if (s->modifiers & (MOD_SHIFT_L | MOD_CTRL_L | + MOD_SHIFT_R | MOD_CTRL_R)) { + if (key->down) { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x7c); + } else { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x7c); + } + } else { + if (key->down) { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x12); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x7c); + } else { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x7c); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x12); + } + } + } else { + if (qcode < qemu_input_map_qcode_to_atset2_len) + keycode = qemu_input_map_qcode_to_atset2[qcode]; + if (keycode) { + if (keycode & 0xff00) { + ps2_put_keycode(s, keycode >> 8); + } + if (!key->down) { + ps2_put_keycode(s, 0xf0); + } + ps2_put_keycode(s, keycode & 0xff); + } else { + qemu_log_mask(LOG_UNIMP, + "ps2: ignoring key with qcode %d\n", qcode); + } + } + } else if (s->scancode_set == 3) { + if (qcode < qemu_input_map_qcode_to_atset3_len) + keycode = qemu_input_map_qcode_to_atset3[qcode]; + if (keycode) { + /* FIXME: break code should be configured on a key by key basis */ + if (!key->down) { + ps2_put_keycode(s, 0xf0); + } + ps2_put_keycode(s, keycode); + } else { + qemu_log_mask(LOG_UNIMP, + "ps2: ignoring key with qcode %d\n", qcode); + } + } +} + +uint32_t ps2_read_data(PS2State *s) +{ + PS2Queue *q; + int val, index; + + trace_ps2_read_data(s); + q = &s->queue; + if (q->count == 0) { + /* NOTE: if no data left, we return the last keyboard one + (needed for EMM386) */ + /* XXX: need a timer to do things correctly */ + index = q->rptr - 1; + if (index < 0) { + index = PS2_BUFFER_SIZE - 1; + } + val = q->data[index]; + } else { + val = q->data[q->rptr]; + if (++q->rptr == PS2_BUFFER_SIZE) { + q->rptr = 0; + } + q->count--; + if (q->rptr == q->cwptr) { + /* command reply queue is empty */ + q->cwptr = -1; + } + /* reading deasserts IRQ */ + s->update_irq(s->update_arg, 0); + /* reassert IRQs if data left */ + if (q->count) { + s->update_irq(s->update_arg, 1); + } + } + return val; +} + +static void ps2_set_ledstate(PS2KbdState *s, int ledstate) +{ + trace_ps2_set_ledstate(s, ledstate); + s->ledstate = ledstate; + kbd_put_ledstate(ledstate); +} + +static void ps2_reset_keyboard(PS2KbdState *s) +{ + trace_ps2_reset_keyboard(s); + s->scan_enabled = 1; + s->scancode_set = 2; + ps2_reset_queue(&s->common); + ps2_set_ledstate(s, 0); +} + +void ps2_write_keyboard(void *opaque, int val) +{ + PS2KbdState *s = (PS2KbdState *)opaque; + + trace_ps2_write_keyboard(opaque, val); + ps2_cqueue_reset(&s->common); + switch(s->common.write_cmd) { + default: + case -1: + switch(val) { + case 0x00: + ps2_cqueue_1(&s->common, KBD_REPLY_ACK); + break; + case 0x05: + ps2_cqueue_1(&s->common, KBD_REPLY_RESEND); + break; + case KBD_CMD_GET_ID: + /* We emulate a MF2 AT keyboard here */ + ps2_cqueue_3(&s->common, KBD_REPLY_ACK, KBD_REPLY_ID, + s->translate ? 0x41 : 0x83); + break; + case KBD_CMD_ECHO: + ps2_cqueue_1(&s->common, KBD_CMD_ECHO); + break; + case KBD_CMD_ENABLE: + s->scan_enabled = 1; + ps2_cqueue_1(&s->common, KBD_REPLY_ACK); + break; + case KBD_CMD_SCANCODE: + case KBD_CMD_SET_LEDS: + case KBD_CMD_SET_RATE: + case KBD_CMD_SET_MAKE_BREAK: + s->common.write_cmd = val; + ps2_cqueue_1(&s->common, KBD_REPLY_ACK); + break; + case KBD_CMD_RESET_DISABLE: + ps2_reset_keyboard(s); + s->scan_enabled = 0; + ps2_cqueue_1(&s->common, KBD_REPLY_ACK); + break; + case KBD_CMD_RESET_ENABLE: + ps2_reset_keyboard(s); + s->scan_enabled = 1; + ps2_cqueue_1(&s->common, KBD_REPLY_ACK); + break; + case KBD_CMD_RESET: + ps2_reset_keyboard(s); + ps2_cqueue_2(&s->common, + KBD_REPLY_ACK, + KBD_REPLY_POR); + break; + case KBD_CMD_SET_TYPEMATIC: + ps2_cqueue_1(&s->common, KBD_REPLY_ACK); + break; + default: + ps2_cqueue_1(&s->common, KBD_REPLY_RESEND); + break; + } + break; + case KBD_CMD_SET_MAKE_BREAK: + ps2_cqueue_1(&s->common, KBD_REPLY_ACK); + s->common.write_cmd = -1; + break; + case KBD_CMD_SCANCODE: + if (val == 0) { + ps2_cqueue_2(&s->common, KBD_REPLY_ACK, s->translate ? + translate_table[s->scancode_set] : s->scancode_set); + } else if (val >= 1 && val <= 3) { + s->scancode_set = val; + ps2_cqueue_1(&s->common, KBD_REPLY_ACK); + } else { + ps2_cqueue_1(&s->common, KBD_REPLY_RESEND); + } + s->common.write_cmd = -1; + break; + case KBD_CMD_SET_LEDS: + ps2_set_ledstate(s, val); + ps2_cqueue_1(&s->common, KBD_REPLY_ACK); + s->common.write_cmd = -1; + break; + case KBD_CMD_SET_RATE: + ps2_cqueue_1(&s->common, KBD_REPLY_ACK); + s->common.write_cmd = -1; + break; + } +} + +/* Set the scancode translation mode. + 0 = raw scancodes. + 1 = translated scancodes (used by qemu internally). */ + +void ps2_keyboard_set_translation(void *opaque, int mode) +{ + PS2KbdState *s = (PS2KbdState *)opaque; + trace_ps2_keyboard_set_translation(opaque, mode); + s->translate = mode; +} + +static int ps2_mouse_send_packet(PS2MouseState *s) +{ + /* IMPS/2 and IMEX send 4 bytes, PS2 sends 3 bytes */ + const int needed = s->mouse_type ? 4 : 3; + unsigned int b; + int dx1, dy1, dz1; + + if (PS2_QUEUE_SIZE - s->common.queue.count < needed) { + return 0; + } + + dx1 = s->mouse_dx; + dy1 = s->mouse_dy; + dz1 = s->mouse_dz; + /* XXX: increase range to 8 bits ? */ + if (dx1 > 127) + dx1 = 127; + else if (dx1 < -127) + dx1 = -127; + if (dy1 > 127) + dy1 = 127; + else if (dy1 < -127) + dy1 = -127; + b = 0x08 | ((dx1 < 0) << 4) | ((dy1 < 0) << 5) | (s->mouse_buttons & 0x07); + ps2_queue_noirq(&s->common, b); + ps2_queue_noirq(&s->common, dx1 & 0xff); + ps2_queue_noirq(&s->common, dy1 & 0xff); + /* extra byte for IMPS/2 or IMEX */ + switch(s->mouse_type) { + default: + break; + case 3: + if (dz1 > 127) + dz1 = 127; + else if (dz1 < -127) + dz1 = -127; + ps2_queue_noirq(&s->common, dz1 & 0xff); + break; + case 4: + if (dz1 > 7) + dz1 = 7; + else if (dz1 < -7) + dz1 = -7; + b = (dz1 & 0x0f) | ((s->mouse_buttons & 0x18) << 1); + ps2_queue_noirq(&s->common, b); + break; + } + + ps2_raise_irq(&s->common); + + trace_ps2_mouse_send_packet(s, dx1, dy1, dz1, b); + /* update deltas */ + s->mouse_dx -= dx1; + s->mouse_dy -= dy1; + s->mouse_dz -= dz1; + + return 1; +} + +static void ps2_mouse_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) +{ + static const int bmap[INPUT_BUTTON__MAX] = { + [INPUT_BUTTON_LEFT] = PS2_MOUSE_BUTTON_LEFT, + [INPUT_BUTTON_MIDDLE] = PS2_MOUSE_BUTTON_MIDDLE, + [INPUT_BUTTON_RIGHT] = PS2_MOUSE_BUTTON_RIGHT, + [INPUT_BUTTON_SIDE] = PS2_MOUSE_BUTTON_SIDE, + [INPUT_BUTTON_EXTRA] = PS2_MOUSE_BUTTON_EXTRA, + }; + PS2MouseState *s = (PS2MouseState *)dev; + InputMoveEvent *move; + InputBtnEvent *btn; + + /* check if deltas are recorded when disabled */ + if (!(s->mouse_status & MOUSE_STATUS_ENABLED)) + return; + + switch (evt->type) { + case INPUT_EVENT_KIND_REL: + move = evt->u.rel.data; + if (move->axis == INPUT_AXIS_X) { + s->mouse_dx += move->value; + } else if (move->axis == INPUT_AXIS_Y) { + s->mouse_dy -= move->value; + } + break; + + case INPUT_EVENT_KIND_BTN: + btn = evt->u.btn.data; + if (btn->down) { + s->mouse_buttons |= bmap[btn->button]; + if (btn->button == INPUT_BUTTON_WHEEL_UP) { + s->mouse_dz--; + } else if (btn->button == INPUT_BUTTON_WHEEL_DOWN) { + s->mouse_dz++; + } + } else { + s->mouse_buttons &= ~bmap[btn->button]; + } + break; + + default: + /* keep gcc happy */ + break; + } +} + +static void ps2_mouse_sync(DeviceState *dev) +{ + PS2MouseState *s = (PS2MouseState *)dev; + + /* do not sync while disabled to prevent stream corruption */ + if (!(s->mouse_status & MOUSE_STATUS_ENABLED)) { + return; + } + + if (s->mouse_buttons) { + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); + } + if (!(s->mouse_status & MOUSE_STATUS_REMOTE)) { + /* if not remote, send event. Multiple events are sent if + too big deltas */ + while (ps2_mouse_send_packet(s)) { + if (s->mouse_dx == 0 && s->mouse_dy == 0 && s->mouse_dz == 0) + break; + } + } +} + +void ps2_mouse_fake_event(void *opaque) +{ + PS2MouseState *s = opaque; + trace_ps2_mouse_fake_event(opaque); + s->mouse_dx++; + ps2_mouse_sync(opaque); +} + +void ps2_write_mouse(void *opaque, int val) +{ + PS2MouseState *s = (PS2MouseState *)opaque; + + trace_ps2_write_mouse(opaque, val); + switch(s->common.write_cmd) { + default: + case -1: + /* mouse command */ + if (s->mouse_wrap) { + if (val == AUX_RESET_WRAP) { + s->mouse_wrap = 0; + ps2_queue(&s->common, AUX_ACK); + return; + } else if (val != AUX_RESET) { + ps2_queue(&s->common, val); + return; + } + } + switch(val) { + case AUX_SET_SCALE11: + s->mouse_status &= ~MOUSE_STATUS_SCALE21; + ps2_queue(&s->common, AUX_ACK); + break; + case AUX_SET_SCALE21: + s->mouse_status |= MOUSE_STATUS_SCALE21; + ps2_queue(&s->common, AUX_ACK); + break; + case AUX_SET_STREAM: + s->mouse_status &= ~MOUSE_STATUS_REMOTE; + ps2_queue(&s->common, AUX_ACK); + break; + case AUX_SET_WRAP: + s->mouse_wrap = 1; + ps2_queue(&s->common, AUX_ACK); + break; + case AUX_SET_REMOTE: + s->mouse_status |= MOUSE_STATUS_REMOTE; + ps2_queue(&s->common, AUX_ACK); + break; + case AUX_GET_TYPE: + ps2_queue_2(&s->common, + AUX_ACK, + s->mouse_type); + break; + case AUX_SET_RES: + case AUX_SET_SAMPLE: + s->common.write_cmd = val; + ps2_queue(&s->common, AUX_ACK); + break; + case AUX_GET_SCALE: + ps2_queue_4(&s->common, + AUX_ACK, + s->mouse_status, + s->mouse_resolution, + s->mouse_sample_rate); + break; + case AUX_POLL: + ps2_queue(&s->common, AUX_ACK); + ps2_mouse_send_packet(s); + break; + case AUX_ENABLE_DEV: + s->mouse_status |= MOUSE_STATUS_ENABLED; + ps2_queue(&s->common, AUX_ACK); + break; + case AUX_DISABLE_DEV: + s->mouse_status &= ~MOUSE_STATUS_ENABLED; + ps2_queue(&s->common, AUX_ACK); + break; + case AUX_SET_DEFAULT: + s->mouse_sample_rate = 100; + s->mouse_resolution = 2; + s->mouse_status = 0; + ps2_queue(&s->common, AUX_ACK); + break; + case AUX_RESET: + s->mouse_sample_rate = 100; + s->mouse_resolution = 2; + s->mouse_status = 0; + s->mouse_type = 0; + ps2_reset_queue(&s->common); + ps2_queue_3(&s->common, + AUX_ACK, + 0xaa, + s->mouse_type); + break; + default: + break; + } + break; + case AUX_SET_SAMPLE: + s->mouse_sample_rate = val; + /* detect IMPS/2 or IMEX */ + switch(s->mouse_detect_state) { + default: + case 0: + if (val == 200) + s->mouse_detect_state = 1; + break; + case 1: + if (val == 100) + s->mouse_detect_state = 2; + else if (val == 200) + s->mouse_detect_state = 3; + else + s->mouse_detect_state = 0; + break; + case 2: + if (val == 80) + s->mouse_type = 3; /* IMPS/2 */ + s->mouse_detect_state = 0; + break; + case 3: + if (val == 80) + s->mouse_type = 4; /* IMEX */ + s->mouse_detect_state = 0; + break; + } + ps2_queue(&s->common, AUX_ACK); + s->common.write_cmd = -1; + break; + case AUX_SET_RES: + s->mouse_resolution = val; + ps2_queue(&s->common, AUX_ACK); + s->common.write_cmd = -1; + break; + } +} + +static void ps2_common_reset(PS2State *s) +{ + s->write_cmd = -1; + ps2_reset_queue(s); + s->update_irq(s->update_arg, 0); +} + +static void ps2_common_post_load(PS2State *s) +{ + PS2Queue *q = &s->queue; + int ccount = 0; + + /* limit the number of queued command replies to PS2_QUEUE_HEADROOM */ + if (q->cwptr != -1) { + ccount = (q->cwptr - q->rptr) & (PS2_BUFFER_SIZE - 1); + if (ccount > PS2_QUEUE_HEADROOM) { + ccount = PS2_QUEUE_HEADROOM; + } + } + + /* limit the scancode queue size to PS2_QUEUE_SIZE */ + if (q->count < ccount) { + q->count = ccount; + } else if (q->count > ccount + PS2_QUEUE_SIZE) { + q->count = ccount + PS2_QUEUE_SIZE; + } + + /* sanitize rptr and recalculate wptr and cwptr */ + q->rptr = q->rptr & (PS2_BUFFER_SIZE - 1); + q->wptr = (q->rptr + q->count) & (PS2_BUFFER_SIZE - 1); + q->cwptr = ccount ? (q->rptr + ccount) & (PS2_BUFFER_SIZE - 1) : -1; +} + +static void ps2_kbd_reset(void *opaque) +{ + PS2KbdState *s = (PS2KbdState *) opaque; + + trace_ps2_kbd_reset(opaque); + ps2_common_reset(&s->common); + s->scan_enabled = 1; + s->translate = 0; + s->scancode_set = 2; + s->modifiers = 0; +} + +static void ps2_mouse_reset(void *opaque) +{ + PS2MouseState *s = (PS2MouseState *) opaque; + + trace_ps2_mouse_reset(opaque); + ps2_common_reset(&s->common); + s->mouse_status = 0; + s->mouse_resolution = 0; + s->mouse_sample_rate = 0; + s->mouse_wrap = 0; + s->mouse_type = 0; + s->mouse_detect_state = 0; + s->mouse_dx = 0; + s->mouse_dy = 0; + s->mouse_dz = 0; + s->mouse_buttons = 0; +} + +static const VMStateDescription vmstate_ps2_common = { + .name = "PS2 Common State", + .version_id = 3, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_INT32(write_cmd, PS2State), + VMSTATE_INT32(queue.rptr, PS2State), + VMSTATE_INT32(queue.wptr, PS2State), + VMSTATE_INT32(queue.count, PS2State), + VMSTATE_BUFFER(queue.data, PS2State), + VMSTATE_END_OF_LIST() + } +}; + +static bool ps2_keyboard_ledstate_needed(void *opaque) +{ + PS2KbdState *s = opaque; + + return s->ledstate != 0; /* 0 is default state */ +} + +static int ps2_kbd_ledstate_post_load(void *opaque, int version_id) +{ + PS2KbdState *s = opaque; + + kbd_put_ledstate(s->ledstate); + return 0; +} + +static const VMStateDescription vmstate_ps2_keyboard_ledstate = { + .name = "ps2kbd/ledstate", + .version_id = 3, + .minimum_version_id = 2, + .post_load = ps2_kbd_ledstate_post_load, + .needed = ps2_keyboard_ledstate_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(ledstate, PS2KbdState), + VMSTATE_END_OF_LIST() + } +}; + +static bool ps2_keyboard_need_high_bit_needed(void *opaque) +{ + PS2KbdState *s = opaque; + return s->need_high_bit != 0; /* 0 is the usual state */ +} + +static const VMStateDescription vmstate_ps2_keyboard_need_high_bit = { + .name = "ps2kbd/need_high_bit", + .version_id = 1, + .minimum_version_id = 1, + .needed = ps2_keyboard_need_high_bit_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(need_high_bit, PS2KbdState), + VMSTATE_END_OF_LIST() + } +}; + +static bool ps2_keyboard_cqueue_needed(void *opaque) +{ + PS2KbdState *s = opaque; + + return s->common.queue.cwptr != -1; /* the queue is mostly empty */ +} + +static const VMStateDescription vmstate_ps2_keyboard_cqueue = { + .name = "ps2kbd/command_reply_queue", + .needed = ps2_keyboard_cqueue_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(common.queue.cwptr, PS2KbdState), + VMSTATE_END_OF_LIST() + } +}; + +static int ps2_kbd_post_load(void* opaque, int version_id) +{ + PS2KbdState *s = (PS2KbdState*)opaque; + PS2State *ps2 = &s->common; + + if (version_id == 2) + s->scancode_set=2; + + ps2_common_post_load(ps2); + + return 0; +} + +static const VMStateDescription vmstate_ps2_keyboard = { + .name = "ps2kbd", + .version_id = 3, + .minimum_version_id = 2, + .post_load = ps2_kbd_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(common, PS2KbdState, 0, vmstate_ps2_common, PS2State), + VMSTATE_INT32(scan_enabled, PS2KbdState), + VMSTATE_INT32(translate, PS2KbdState), + VMSTATE_INT32_V(scancode_set, PS2KbdState,3), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_ps2_keyboard_ledstate, + &vmstate_ps2_keyboard_need_high_bit, + &vmstate_ps2_keyboard_cqueue, + NULL + } +}; + +static int ps2_mouse_post_load(void *opaque, int version_id) +{ + PS2MouseState *s = (PS2MouseState *)opaque; + PS2State *ps2 = &s->common; + + ps2_common_post_load(ps2); + + return 0; +} + +static const VMStateDescription vmstate_ps2_mouse = { + .name = "ps2mouse", + .version_id = 2, + .minimum_version_id = 2, + .post_load = ps2_mouse_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(common, PS2MouseState, 0, vmstate_ps2_common, PS2State), + VMSTATE_UINT8(mouse_status, PS2MouseState), + VMSTATE_UINT8(mouse_resolution, PS2MouseState), + VMSTATE_UINT8(mouse_sample_rate, PS2MouseState), + VMSTATE_UINT8(mouse_wrap, PS2MouseState), + VMSTATE_UINT8(mouse_type, PS2MouseState), + VMSTATE_UINT8(mouse_detect_state, PS2MouseState), + VMSTATE_INT32(mouse_dx, PS2MouseState), + VMSTATE_INT32(mouse_dy, PS2MouseState), + VMSTATE_INT32(mouse_dz, PS2MouseState), + VMSTATE_UINT8(mouse_buttons, PS2MouseState), + VMSTATE_END_OF_LIST() + } +}; + +static QemuInputHandler ps2_keyboard_handler = { + .name = "QEMU PS/2 Keyboard", + .mask = INPUT_EVENT_MASK_KEY, + .event = ps2_keyboard_event, +}; + +void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg) +{ + PS2KbdState *s = (PS2KbdState *)g_malloc0(sizeof(PS2KbdState)); + + trace_ps2_kbd_init(s); + s->common.update_irq = update_irq; + s->common.update_arg = update_arg; + s->scancode_set = 2; + vmstate_register(NULL, 0, &vmstate_ps2_keyboard, s); + qemu_input_handler_register((DeviceState *)s, + &ps2_keyboard_handler); + qemu_register_reset(ps2_kbd_reset, s); + return s; +} + +static QemuInputHandler ps2_mouse_handler = { + .name = "QEMU PS/2 Mouse", + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL, + .event = ps2_mouse_event, + .sync = ps2_mouse_sync, +}; + +void *ps2_mouse_init(void (*update_irq)(void *, int), void *update_arg) +{ + PS2MouseState *s = (PS2MouseState *)g_malloc0(sizeof(PS2MouseState)); + + trace_ps2_mouse_init(s); + s->common.update_irq = update_irq; + s->common.update_arg = update_arg; + vmstate_register(NULL, 0, &vmstate_ps2_mouse, s); + qemu_input_handler_register((DeviceState *)s, + &ps2_mouse_handler); + qemu_register_reset(ps2_mouse_reset, s); + return s; +} diff --git a/hw/input/pxa2xx_keypad.c b/hw/input/pxa2xx_keypad.c new file mode 100644 index 000000000..7f2f739fb --- /dev/null +++ b/hw/input/pxa2xx_keypad.c @@ -0,0 +1,331 @@ +/* + * Intel PXA27X Keypad Controller emulation. + * + * Copyright (c) 2007 MontaVista Software, Inc + * Written by Armin Kuster + * or + * + * This code is licensed under the GPLv2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/arm/pxa.h" +#include "ui/console.h" + +/* + * Keypad + */ +#define KPC 0x00 /* Keypad Interface Control register */ +#define KPDK 0x08 /* Keypad Interface Direct Key register */ +#define KPREC 0x10 /* Keypad Interface Rotary Encoder register */ +#define KPMK 0x18 /* Keypad Interface Matrix Key register */ +#define KPAS 0x20 /* Keypad Interface Automatic Scan register */ +#define KPASMKP0 0x28 /* Keypad Interface Automatic Scan Multiple + Key Presser register 0 */ +#define KPASMKP1 0x30 /* Keypad Interface Automatic Scan Multiple + Key Presser register 1 */ +#define KPASMKP2 0x38 /* Keypad Interface Automatic Scan Multiple + Key Presser register 2 */ +#define KPASMKP3 0x40 /* Keypad Interface Automatic Scan Multiple + Key Presser register 3 */ +#define KPKDI 0x48 /* Keypad Interface Key Debounce Interval + register */ + +/* Keypad defines */ +#define KPC_AS (0x1 << 30) /* Automatic Scan bit */ +#define KPC_ASACT (0x1 << 29) /* Automatic Scan on Activity */ +#define KPC_MI (0x1 << 22) /* Matrix interrupt bit */ +#define KPC_IMKP (0x1 << 21) /* Ignore Multiple Key Press */ +#define KPC_MS7 (0x1 << 20) /* Matrix scan line 7 */ +#define KPC_MS6 (0x1 << 19) /* Matrix scan line 6 */ +#define KPC_MS5 (0x1 << 18) /* Matrix scan line 5 */ +#define KPC_MS4 (0x1 << 17) /* Matrix scan line 4 */ +#define KPC_MS3 (0x1 << 16) /* Matrix scan line 3 */ +#define KPC_MS2 (0x1 << 15) /* Matrix scan line 2 */ +#define KPC_MS1 (0x1 << 14) /* Matrix scan line 1 */ +#define KPC_MS0 (0x1 << 13) /* Matrix scan line 0 */ +#define KPC_ME (0x1 << 12) /* Matrix Keypad Enable */ +#define KPC_MIE (0x1 << 11) /* Matrix Interrupt Enable */ +#define KPC_DK_DEB_SEL (0x1 << 9) /* Direct Keypad Debounce Select */ +#define KPC_DI (0x1 << 5) /* Direct key interrupt bit */ +#define KPC_RE_ZERO_DEB (0x1 << 4) /* Rotary Encoder Zero Debounce */ +#define KPC_REE1 (0x1 << 3) /* Rotary Encoder1 Enable */ +#define KPC_REE0 (0x1 << 2) /* Rotary Encoder0 Enable */ +#define KPC_DE (0x1 << 1) /* Direct Keypad Enable */ +#define KPC_DIE (0x1 << 0) /* Direct Keypad interrupt Enable */ + +#define KPDK_DKP (0x1 << 31) +#define KPDK_DK7 (0x1 << 7) +#define KPDK_DK6 (0x1 << 6) +#define KPDK_DK5 (0x1 << 5) +#define KPDK_DK4 (0x1 << 4) +#define KPDK_DK3 (0x1 << 3) +#define KPDK_DK2 (0x1 << 2) +#define KPDK_DK1 (0x1 << 1) +#define KPDK_DK0 (0x1 << 0) + +#define KPREC_OF1 (0x1 << 31) +#define KPREC_UF1 (0x1 << 30) +#define KPREC_OF0 (0x1 << 15) +#define KPREC_UF0 (0x1 << 14) + +#define KPMK_MKP (0x1 << 31) +#define KPAS_SO (0x1 << 31) +#define KPASMKPx_SO (0x1 << 31) + + +#define KPASMKPx_MKC(row, col) (1 << (row + 16 * (col % 2))) + +#define PXAKBD_MAXROW 8 +#define PXAKBD_MAXCOL 8 + +struct PXA2xxKeyPadState { + MemoryRegion iomem; + qemu_irq irq; + const struct keymap *map; + int pressed_cnt; + int alt_code; + + uint32_t kpc; + uint32_t kpdk; + uint32_t kprec; + uint32_t kpmk; + uint32_t kpas; + uint32_t kpasmkp[4]; + uint32_t kpkdi; +}; + +static void pxa27x_keypad_find_pressed_key(PXA2xxKeyPadState *kp, int *row, int *col) +{ + int i; + for (i = 0; i < 4; i++) + { + *col = i * 2; + for (*row = 0; *row < 8; (*row)++) { + if (kp->kpasmkp[i] & (1 << *row)) + return; + } + *col = i * 2 + 1; + for (*row = 0; *row < 8; (*row)++) { + if (kp->kpasmkp[i] & (1 << (*row + 16))) + return; + } + } +} + +static void pxa27x_keyboard_event (PXA2xxKeyPadState *kp, int keycode) +{ + int row, col, rel, assert_irq = 0; + uint32_t val; + + if (keycode == 0xe0) { + kp->alt_code = 1; + return; + } + + if(!(kp->kpc & KPC_ME)) /* skip if not enabled */ + return; + + rel = (keycode & 0x80) ? 1 : 0; /* key release from qemu */ + keycode &= ~0x80; /* strip qemu key release bit */ + if (kp->alt_code) { + keycode |= 0x80; + kp->alt_code = 0; + } + + row = kp->map[keycode].row; + col = kp->map[keycode].column; + if (row == -1 || col == -1) { + return; + } + + val = KPASMKPx_MKC(row, col); + if (rel) { + if (kp->kpasmkp[col / 2] & val) { + kp->kpasmkp[col / 2] &= ~val; + kp->pressed_cnt--; + assert_irq = 1; + } + } else { + if (!(kp->kpasmkp[col / 2] & val)) { + kp->kpasmkp[col / 2] |= val; + kp->pressed_cnt++; + assert_irq = 1; + } + } + kp->kpas = ((kp->pressed_cnt & 0x1f) << 26) | (0xf << 4) | 0xf; + if (kp->pressed_cnt == 1) { + kp->kpas &= ~((0xf << 4) | 0xf); + if (rel) { + pxa27x_keypad_find_pressed_key(kp, &row, &col); + } + kp->kpas |= ((row & 0xf) << 4) | (col & 0xf); + } + + if (!(kp->kpc & (KPC_AS | KPC_ASACT))) + assert_irq = 0; + + if (assert_irq && (kp->kpc & KPC_MIE)) { + kp->kpc |= KPC_MI; + qemu_irq_raise(kp->irq); + } +} + +static uint64_t pxa2xx_keypad_read(void *opaque, hwaddr offset, + unsigned size) +{ + PXA2xxKeyPadState *s = (PXA2xxKeyPadState *) opaque; + uint32_t tmp; + + switch (offset) { + case KPC: + tmp = s->kpc; + if(tmp & KPC_MI) + s->kpc &= ~(KPC_MI); + if(tmp & KPC_DI) + s->kpc &= ~(KPC_DI); + qemu_irq_lower(s->irq); + return tmp; + case KPDK: + return s->kpdk; + case KPREC: + tmp = s->kprec; + if(tmp & KPREC_OF1) + s->kprec &= ~(KPREC_OF1); + if(tmp & KPREC_UF1) + s->kprec &= ~(KPREC_UF1); + if(tmp & KPREC_OF0) + s->kprec &= ~(KPREC_OF0); + if(tmp & KPREC_UF0) + s->kprec &= ~(KPREC_UF0); + return tmp; + case KPMK: + tmp = s->kpmk; + if(tmp & KPMK_MKP) + s->kpmk &= ~(KPMK_MKP); + return tmp; + case KPAS: + return s->kpas; + case KPASMKP0: + return s->kpasmkp[0]; + case KPASMKP1: + return s->kpasmkp[1]; + case KPASMKP2: + return s->kpasmkp[2]; + case KPASMKP3: + return s->kpasmkp[3]; + case KPKDI: + return s->kpkdi; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad read offset 0x%"HWADDR_PRIx"\n", + __func__, offset); + } + + return 0; +} + +static void pxa2xx_keypad_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PXA2xxKeyPadState *s = (PXA2xxKeyPadState *) opaque; + + switch (offset) { + case KPC: + s->kpc = value; + if (s->kpc & KPC_AS) { + s->kpc &= ~(KPC_AS); + } + break; + case KPDK: + s->kpdk = value; + break; + case KPREC: + s->kprec = value; + break; + case KPMK: + s->kpmk = value; + break; + case KPAS: + s->kpas = value; + break; + case KPASMKP0: + s->kpasmkp[0] = value; + break; + case KPASMKP1: + s->kpasmkp[1] = value; + break; + case KPASMKP2: + s->kpasmkp[2] = value; + break; + case KPASMKP3: + s->kpasmkp[3] = value; + break; + case KPKDI: + s->kpkdi = value; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad write offset 0x%"HWADDR_PRIx"\n", + __func__, offset); + } +} + +static const MemoryRegionOps pxa2xx_keypad_ops = { + .read = pxa2xx_keypad_read, + .write = pxa2xx_keypad_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_pxa2xx_keypad = { + .name = "pxa2xx_keypad", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(kpc, PXA2xxKeyPadState), + VMSTATE_UINT32(kpdk, PXA2xxKeyPadState), + VMSTATE_UINT32(kprec, PXA2xxKeyPadState), + VMSTATE_UINT32(kpmk, PXA2xxKeyPadState), + VMSTATE_UINT32(kpas, PXA2xxKeyPadState), + VMSTATE_UINT32_ARRAY(kpasmkp, PXA2xxKeyPadState, 4), + VMSTATE_UINT32(kpkdi, PXA2xxKeyPadState), + VMSTATE_END_OF_LIST() + } +}; + +PXA2xxKeyPadState *pxa27x_keypad_init(MemoryRegion *sysmem, + hwaddr base, + qemu_irq irq) +{ + PXA2xxKeyPadState *s; + + s = (PXA2xxKeyPadState *) g_malloc0(sizeof(PXA2xxKeyPadState)); + s->irq = irq; + + memory_region_init_io(&s->iomem, NULL, &pxa2xx_keypad_ops, s, + "pxa2xx-keypad", 0x00100000); + memory_region_add_subregion(sysmem, base, &s->iomem); + + vmstate_register(NULL, 0, &vmstate_pxa2xx_keypad, s); + + return s; +} + +void pxa27x_register_keypad(PXA2xxKeyPadState *kp, + const struct keymap *map, int size) +{ + if(!map || size < 0x80) { + fprintf(stderr, "%s - No PXA keypad map defined\n", __func__); + exit(-1); + } + + kp->map = map; + qemu_add_kbd_event_handler((QEMUPutKBDEvent *) pxa27x_keyboard_event, kp); +} diff --git a/hw/input/stellaris_input.c b/hw/input/stellaris_input.c new file mode 100644 index 000000000..e6ee5e11f --- /dev/null +++ b/hw/input/stellaris_input.c @@ -0,0 +1,93 @@ +/* + * Gamepad style buttons connected to IRQ/GPIO lines + * + * Copyright (c) 2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/input/gamepad.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "ui/console.h" + +typedef struct { + qemu_irq irq; + int keycode; + uint8_t pressed; +} gamepad_button; + +typedef struct { + gamepad_button *buttons; + int num_buttons; + int extension; +} gamepad_state; + +static void stellaris_gamepad_put_key(void * opaque, int keycode) +{ + gamepad_state *s = (gamepad_state *)opaque; + int i; + int down; + + if (keycode == 0xe0 && !s->extension) { + s->extension = 0x80; + return; + } + + down = (keycode & 0x80) == 0; + keycode = (keycode & 0x7f) | s->extension; + + for (i = 0; i < s->num_buttons; i++) { + if (s->buttons[i].keycode == keycode + && s->buttons[i].pressed != down) { + s->buttons[i].pressed = down; + qemu_set_irq(s->buttons[i].irq, down); + } + } + + s->extension = 0; +} + +static const VMStateDescription vmstate_stellaris_button = { + .name = "stellaris_button", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(pressed, gamepad_button), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_stellaris_gamepad = { + .name = "stellaris_gamepad", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_INT32(extension, gamepad_state), + VMSTATE_STRUCT_VARRAY_POINTER_INT32(buttons, gamepad_state, + num_buttons, + vmstate_stellaris_button, + gamepad_button), + VMSTATE_END_OF_LIST() + } +}; + +/* Returns an array of 5 output slots. */ +void stellaris_gamepad_init(int n, qemu_irq *irq, const int *keycode) +{ + gamepad_state *s; + int i; + + s = g_new0(gamepad_state, 1); + s->buttons = g_new0(gamepad_button, n); + for (i = 0; i < n; i++) { + s->buttons[i].irq = irq[i]; + s->buttons[i].keycode = keycode[i]; + } + s->num_buttons = n; + qemu_add_kbd_event_handler(stellaris_gamepad_put_key, s); + vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, + &vmstate_stellaris_gamepad, s); +} diff --git a/hw/input/trace-events b/hw/input/trace-events new file mode 100644 index 000000000..e0bfe7f3e --- /dev/null +++ b/hw/input/trace-events @@ -0,0 +1,60 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# adb-kbd.c +adb_device_kbd_no_key(void) "Ignoring NO_KEY" +adb_device_kbd_writereg(int reg, uint8_t val) "reg %d val 0x%2.2x" +adb_device_kbd_readreg(int reg, uint8_t val0, uint8_t val1) "reg %d obuf[0] 0x%2.2x obuf[1] 0x%2.2x" +adb_device_kbd_request_change_addr(int devaddr) "change addr to 0x%x" +adb_device_kbd_request_change_addr_and_handler(int devaddr, int handler) "change addr and handler to 0x%x, 0x%x" + +# adb-mouse.c +adb_device_mouse_flush(void) "flush" +adb_device_mouse_writereg(int reg, uint8_t val) "reg %d val 0x%2.2x" +adb_device_mouse_readreg(int reg, uint8_t val0, uint8_t val1) "reg %d obuf[0] 0x%2.2x obuf[1] 0x%2.2x" +adb_device_mouse_request_change_addr(int devaddr) "change addr to 0x%x" +adb_device_mouse_request_change_addr_and_handler(int devaddr, int handler) "change addr and handler to 0x%x, 0x%x" + +# adb.c +adb_bus_request(uint8_t addr, const char *cmd, int size) "device 0x%x %s cmdsize=%d" +adb_bus_request_done(uint8_t addr, const char *cmd, int size) "device 0x%x %s replysize=%d" +adb_bus_autopoll_block(bool blocked) "blocked: %d" +adb_bus_autopoll_cb(uint16_t mask) "executing autopoll_cb with autopoll mask 0x%x" +adb_bus_autopoll_cb_done(uint16_t mask) "done executing autopoll_cb with autopoll mask 0x%x" + +# pckbd.c +pckbd_kbd_read_data(uint32_t val) "0x%02x" +pckbd_kbd_read_status(int status) "0x%02x" +pckbd_outport_write(uint32_t val) "0x%02x" +pckbd_kbd_write_command(uint64_t val) "0x%02"PRIx64 +pckbd_kbd_write_data(uint64_t val) "0x%02"PRIx64 + +# ps2.c +ps2_put_keycode(void *opaque, int keycode) "%p keycode 0x%02x" +ps2_keyboard_event(void *opaque, int qcode, int down, unsigned int modifier, unsigned int modifiers, int set, int xlate) "%p qcode %d down %d modifier 0x%x modifiers 0x%x set %d xlate %d" +ps2_read_data(void *opaque) "%p" +ps2_set_ledstate(void *s, int ledstate) "%p ledstate %d" +ps2_reset_keyboard(void *s) "%p" +ps2_write_keyboard(void *opaque, int val) "%p val %d" +ps2_keyboard_set_translation(void *opaque, int mode) "%p mode %d" +ps2_mouse_send_packet(void *s, int dx1, int dy1, int dz1, int b) "%p x %d y %d z %d bs 0x%x" +ps2_mouse_fake_event(void *opaque) "%p" +ps2_write_mouse(void *opaque, int val) "%p val %d" +ps2_kbd_reset(void *opaque) "%p" +ps2_mouse_reset(void *opaque) "%p" +ps2_kbd_init(void *s) "%p" +ps2_mouse_init(void *s) "%p" + +# hid.c +hid_kbd_queue_full(void) "queue full" +hid_kbd_queue_empty(void) "queue empty" + +# tsc2005.c +tsc2005_sense(const char *state) "touchscreen sense %s" + +# virtio-input.c +virtio_input_queue_full(void) "queue full" + +# lasips2.c +lasips2_reg_read(unsigned int size, int id, uint64_t addr, const char *name, uint64_t val) "%u %d addr 0x%"PRIx64 "%s -> 0x%"PRIx64 +lasips2_reg_write(unsigned int size, int id, uint64_t addr, const char *name, uint64_t val) "%u %d addr 0x%"PRIx64 "%s <- 0x%"PRIx64 +lasips2_intr(unsigned int val) "%d" diff --git a/hw/input/trace.h b/hw/input/trace.h new file mode 100644 index 000000000..d1cc5d924 --- /dev/null +++ b/hw/input/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_input.h" diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c new file mode 100644 index 000000000..55d61cc84 --- /dev/null +++ b/hw/input/tsc2005.c @@ -0,0 +1,559 @@ +/* + * TI TSC2005 emulator. + * + * Copyright (c) 2006 Andrzej Zaborowski + * Copyright (C) 2008 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/timer.h" +#include "sysemu/reset.h" +#include "ui/console.h" +#include "hw/input/tsc2xxx.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "trace.h" + +#define TSC_CUT_RESOLUTION(value, p) ((value) >> (16 - (p ? 12 : 10))) + +typedef struct { + qemu_irq pint; /* Combination of the nPENIRQ and DAV signals */ + QEMUTimer *timer; + uint16_t model; + + int32_t x, y; + bool pressure; + + uint8_t reg, state; + bool irq, command; + uint16_t data, dav; + + bool busy; + bool enabled; + bool host_mode; + int8_t function; + int8_t nextfunction; + bool precision; + bool nextprecision; + uint16_t filter; + uint8_t pin_func; + uint16_t timing[2]; + uint8_t noise; + bool reset; + bool pdst; + bool pnd0; + uint16_t temp_thr[2]; + uint16_t aux_thr[2]; + + int32_t tr[8]; +} TSC2005State; + +enum { + TSC_MODE_XYZ_SCAN = 0x0, + TSC_MODE_XY_SCAN, + TSC_MODE_X, + TSC_MODE_Y, + TSC_MODE_Z, + TSC_MODE_AUX, + TSC_MODE_TEMP1, + TSC_MODE_TEMP2, + TSC_MODE_AUX_SCAN, + TSC_MODE_X_TEST, + TSC_MODE_Y_TEST, + TSC_MODE_TS_TEST, + TSC_MODE_RESERVED, + TSC_MODE_XX_DRV, + TSC_MODE_YY_DRV, + TSC_MODE_YX_DRV, +}; + +static const uint16_t mode_regs[16] = { + 0xf000, /* X, Y, Z scan */ + 0xc000, /* X, Y scan */ + 0x8000, /* X */ + 0x4000, /* Y */ + 0x3000, /* Z */ + 0x0800, /* AUX */ + 0x0400, /* TEMP1 */ + 0x0200, /* TEMP2 */ + 0x0800, /* AUX scan */ + 0x0040, /* X test */ + 0x0020, /* Y test */ + 0x0080, /* Short-circuit test */ + 0x0000, /* Reserved */ + 0x0000, /* X+, X- drivers */ + 0x0000, /* Y+, Y- drivers */ + 0x0000, /* Y+, X- drivers */ +}; + +#define X_TRANSFORM(s) \ + ((s->y * s->tr[0] - s->x * s->tr[1]) / s->tr[2] + s->tr[3]) +#define Y_TRANSFORM(s) \ + ((s->y * s->tr[4] - s->x * s->tr[5]) / s->tr[6] + s->tr[7]) +#define Z1_TRANSFORM(s) \ + ((400 - ((s)->x >> 7) + ((s)->pressure << 10)) << 4) +#define Z2_TRANSFORM(s) \ + ((4000 + ((s)->y >> 7) - ((s)->pressure << 10)) << 4) + +#define AUX_VAL (700 << 4) /* +/- 3 at 12-bit */ +#define TEMP1_VAL (1264 << 4) /* +/- 5 at 12-bit */ +#define TEMP2_VAL (1531 << 4) /* +/- 5 at 12-bit */ + +static uint16_t tsc2005_read(TSC2005State *s, int reg) +{ + uint16_t ret; + + switch (reg) { + case 0x0: /* X */ + s->dav &= ~mode_regs[TSC_MODE_X]; + return TSC_CUT_RESOLUTION(X_TRANSFORM(s), s->precision) + + (s->noise & 3); + case 0x1: /* Y */ + s->dav &= ~mode_regs[TSC_MODE_Y]; + s->noise ++; + return TSC_CUT_RESOLUTION(Y_TRANSFORM(s), s->precision) ^ + (s->noise & 3); + case 0x2: /* Z1 */ + s->dav &= 0xdfff; + return TSC_CUT_RESOLUTION(Z1_TRANSFORM(s), s->precision) - + (s->noise & 3); + case 0x3: /* Z2 */ + s->dav &= 0xefff; + return TSC_CUT_RESOLUTION(Z2_TRANSFORM(s), s->precision) | + (s->noise & 3); + + case 0x4: /* AUX */ + s->dav &= ~mode_regs[TSC_MODE_AUX]; + return TSC_CUT_RESOLUTION(AUX_VAL, s->precision); + + case 0x5: /* TEMP1 */ + s->dav &= ~mode_regs[TSC_MODE_TEMP1]; + return TSC_CUT_RESOLUTION(TEMP1_VAL, s->precision) - + (s->noise & 5); + case 0x6: /* TEMP2 */ + s->dav &= 0xdfff; + s->dav &= ~mode_regs[TSC_MODE_TEMP2]; + return TSC_CUT_RESOLUTION(TEMP2_VAL, s->precision) ^ + (s->noise & 3); + + case 0x7: /* Status */ + ret = s->dav | (s->reset << 7) | (s->pdst << 2) | 0x0; + s->dav &= ~(mode_regs[TSC_MODE_X_TEST] | mode_regs[TSC_MODE_Y_TEST] | + mode_regs[TSC_MODE_TS_TEST]); + s->reset = true; + return ret; + + case 0x8: /* AUX high treshold */ + return s->aux_thr[1]; + case 0x9: /* AUX low treshold */ + return s->aux_thr[0]; + + case 0xa: /* TEMP high treshold */ + return s->temp_thr[1]; + case 0xb: /* TEMP low treshold */ + return s->temp_thr[0]; + + case 0xc: /* CFR0 */ + return (s->pressure << 15) | ((!s->busy) << 14) | + (s->nextprecision << 13) | s->timing[0]; + case 0xd: /* CFR1 */ + return s->timing[1]; + case 0xe: /* CFR2 */ + return (s->pin_func << 14) | s->filter; + + case 0xf: /* Function select status */ + return s->function >= 0 ? 1 << s->function : 0; + } + + /* Never gets here */ + return 0xffff; +} + +static void tsc2005_write(TSC2005State *s, int reg, uint16_t data) +{ + switch (reg) { + case 0x8: /* AUX high treshold */ + s->aux_thr[1] = data; + break; + case 0x9: /* AUX low treshold */ + s->aux_thr[0] = data; + break; + + case 0xa: /* TEMP high treshold */ + s->temp_thr[1] = data; + break; + case 0xb: /* TEMP low treshold */ + s->temp_thr[0] = data; + break; + + case 0xc: /* CFR0 */ + s->host_mode = (data >> 15) != 0; + if (s->enabled != !(data & 0x4000)) { + s->enabled = !(data & 0x4000); + trace_tsc2005_sense(s->enabled ? "enabled" : "disabled"); + if (s->busy && !s->enabled) + timer_del(s->timer); + s->busy = s->busy && s->enabled; + } + s->nextprecision = (data >> 13) & 1; + s->timing[0] = data & 0x1fff; + if ((s->timing[0] >> 11) == 3) { + qemu_log_mask(LOG_GUEST_ERROR, + "tsc2005_write: illegal conversion clock setting\n"); + } + break; + case 0xd: /* CFR1 */ + s->timing[1] = data & 0xf07; + break; + case 0xe: /* CFR2 */ + s->pin_func = (data >> 14) & 3; + s->filter = data & 0x3fff; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write into read-only register 0x%x\n", + __func__, reg); + } +} + +/* This handles most of the chip's logic. */ +static void tsc2005_pin_update(TSC2005State *s) +{ + int64_t expires; + bool pin_state; + + switch (s->pin_func) { + case 0: + pin_state = !s->pressure && !!s->dav; + break; + case 1: + case 3: + default: + pin_state = !s->dav; + break; + case 2: + pin_state = !s->pressure; + } + + if (pin_state != s->irq) { + s->irq = pin_state; + qemu_set_irq(s->pint, s->irq); + } + + switch (s->nextfunction) { + case TSC_MODE_XYZ_SCAN: + case TSC_MODE_XY_SCAN: + if (!s->host_mode && s->dav) + s->enabled = false; + if (!s->pressure) + return; + /* Fall through */ + case TSC_MODE_AUX_SCAN: + break; + + case TSC_MODE_X: + case TSC_MODE_Y: + case TSC_MODE_Z: + if (!s->pressure) + return; + /* Fall through */ + case TSC_MODE_AUX: + case TSC_MODE_TEMP1: + case TSC_MODE_TEMP2: + case TSC_MODE_X_TEST: + case TSC_MODE_Y_TEST: + case TSC_MODE_TS_TEST: + if (s->dav) + s->enabled = false; + break; + + case TSC_MODE_RESERVED: + case TSC_MODE_XX_DRV: + case TSC_MODE_YY_DRV: + case TSC_MODE_YX_DRV: + default: + return; + } + + if (!s->enabled || s->busy) + return; + + s->busy = true; + s->precision = s->nextprecision; + s->function = s->nextfunction; + s->pdst = !s->pnd0; /* Synchronised on internal clock */ + expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (NANOSECONDS_PER_SECOND >> 7); + timer_mod(s->timer, expires); +} + +static void tsc2005_reset(TSC2005State *s) +{ + s->state = 0; + s->pin_func = 0; + s->enabled = false; + s->busy = false; + s->nextprecision = false; + s->nextfunction = 0; + s->timing[0] = 0; + s->timing[1] = 0; + s->irq = false; + s->dav = 0; + s->reset = false; + s->pdst = true; + s->pnd0 = false; + s->function = -1; + s->temp_thr[0] = 0x000; + s->temp_thr[1] = 0xfff; + s->aux_thr[0] = 0x000; + s->aux_thr[1] = 0xfff; + + tsc2005_pin_update(s); +} + +static uint8_t tsc2005_txrx_word(void *opaque, uint8_t value) +{ + TSC2005State *s = opaque; + uint32_t ret = 0; + + switch (s->state ++) { + case 0: + if (value & 0x80) { + /* Command */ + if (value & (1 << 1)) + tsc2005_reset(s); + else { + s->nextfunction = (value >> 3) & 0xf; + s->nextprecision = (value >> 2) & 1; + if (s->enabled != !(value & 1)) { + s->enabled = !(value & 1); + trace_tsc2005_sense(s->enabled ? "enabled" : "disabled"); + if (s->busy && !s->enabled) + timer_del(s->timer); + s->busy = s->busy && s->enabled; + } + tsc2005_pin_update(s); + } + + s->state = 0; + } else if (value) { + /* Data transfer */ + s->reg = (value >> 3) & 0xf; + s->pnd0 = (value >> 1) & 1; + s->command = value & 1; + + if (s->command) { + /* Read */ + s->data = tsc2005_read(s, s->reg); + tsc2005_pin_update(s); + } else + s->data = 0; + } else + s->state = 0; + break; + + case 1: + if (s->command) + ret = (s->data >> 8) & 0xff; + else + s->data |= value << 8; + break; + + case 2: + if (s->command) + ret = s->data & 0xff; + else { + s->data |= value; + tsc2005_write(s, s->reg, s->data); + tsc2005_pin_update(s); + } + + s->state = 0; + break; + } + + return ret; +} + +uint32_t tsc2005_txrx(void *opaque, uint32_t value, int len) +{ + uint32_t ret = 0; + + len &= ~7; + while (len > 0) { + len -= 8; + ret |= tsc2005_txrx_word(opaque, (value >> len) & 0xff) << len; + } + + return ret; +} + +static void tsc2005_timer_tick(void *opaque) +{ + TSC2005State *s = opaque; + + /* Timer ticked -- a set of conversions has been finished. */ + + if (!s->busy) + return; + + s->busy = false; + s->dav |= mode_regs[s->function]; + s->function = -1; + tsc2005_pin_update(s); +} + +static void tsc2005_touchscreen_event(void *opaque, + int x, int y, int z, int buttons_state) +{ + TSC2005State *s = opaque; + int p = s->pressure; + + if (buttons_state) { + s->x = x; + s->y = y; + } + s->pressure = !!buttons_state; + + /* + * Note: We would get better responsiveness in the guest by + * signaling TS events immediately, but for now we simulate + * the first conversion delay for sake of correctness. + */ + if (p != s->pressure) + tsc2005_pin_update(s); +} + +static int tsc2005_post_load(void *opaque, int version_id) +{ + TSC2005State *s = (TSC2005State *) opaque; + + s->busy = timer_pending(s->timer); + tsc2005_pin_update(s); + + return 0; +} + +static const VMStateDescription vmstate_tsc2005 = { + .name = "tsc2005", + .version_id = 2, + .minimum_version_id = 2, + .post_load = tsc2005_post_load, + .fields = (VMStateField []) { + VMSTATE_BOOL(pressure, TSC2005State), + VMSTATE_BOOL(irq, TSC2005State), + VMSTATE_BOOL(command, TSC2005State), + VMSTATE_BOOL(enabled, TSC2005State), + VMSTATE_BOOL(host_mode, TSC2005State), + VMSTATE_BOOL(reset, TSC2005State), + VMSTATE_BOOL(pdst, TSC2005State), + VMSTATE_BOOL(pnd0, TSC2005State), + VMSTATE_BOOL(precision, TSC2005State), + VMSTATE_BOOL(nextprecision, TSC2005State), + VMSTATE_UINT8(reg, TSC2005State), + VMSTATE_UINT8(state, TSC2005State), + VMSTATE_UINT16(data, TSC2005State), + VMSTATE_UINT16(dav, TSC2005State), + VMSTATE_UINT16(filter, TSC2005State), + VMSTATE_INT8(nextfunction, TSC2005State), + VMSTATE_INT8(function, TSC2005State), + VMSTATE_INT32(x, TSC2005State), + VMSTATE_INT32(y, TSC2005State), + VMSTATE_TIMER_PTR(timer, TSC2005State), + VMSTATE_UINT8(pin_func, TSC2005State), + VMSTATE_UINT16_ARRAY(timing, TSC2005State, 2), + VMSTATE_UINT8(noise, TSC2005State), + VMSTATE_UINT16_ARRAY(temp_thr, TSC2005State, 2), + VMSTATE_UINT16_ARRAY(aux_thr, TSC2005State, 2), + VMSTATE_INT32_ARRAY(tr, TSC2005State, 8), + VMSTATE_END_OF_LIST() + } +}; + +void *tsc2005_init(qemu_irq pintdav) +{ + TSC2005State *s; + + s = (TSC2005State *) + g_malloc0(sizeof(TSC2005State)); + s->x = 400; + s->y = 240; + s->pressure = false; + s->precision = s->nextprecision = false; + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc2005_timer_tick, s); + s->pint = pintdav; + s->model = 0x2005; + + s->tr[0] = 0; + s->tr[1] = 1; + s->tr[2] = 1; + s->tr[3] = 0; + s->tr[4] = 1; + s->tr[5] = 0; + s->tr[6] = 1; + s->tr[7] = 0; + + tsc2005_reset(s); + + qemu_add_mouse_event_handler(tsc2005_touchscreen_event, s, 1, + "QEMU TSC2005-driven Touchscreen"); + + qemu_register_reset((void *) tsc2005_reset, s); + vmstate_register(NULL, 0, &vmstate_tsc2005, s); + + return s; +} + +/* + * Use tslib generated calibration data to generate ADC input values + * from the touchscreen. Assuming 12-bit precision was used during + * tslib calibration. + */ +void tsc2005_set_transform(void *opaque, MouseTransformInfo *info) +{ + TSC2005State *s = (TSC2005State *) opaque; + + /* This version assumes touchscreen X & Y axis are parallel or + * perpendicular to LCD's X & Y axis in some way. */ + if (abs(info->a[0]) > abs(info->a[1])) { + s->tr[0] = 0; + s->tr[1] = -info->a[6] * info->x; + s->tr[2] = info->a[0]; + s->tr[3] = -info->a[2] / info->a[0]; + s->tr[4] = info->a[6] * info->y; + s->tr[5] = 0; + s->tr[6] = info->a[4]; + s->tr[7] = -info->a[5] / info->a[4]; + } else { + s->tr[0] = info->a[6] * info->y; + s->tr[1] = 0; + s->tr[2] = info->a[1]; + s->tr[3] = -info->a[2] / info->a[1]; + s->tr[4] = 0; + s->tr[5] = -info->a[6] * info->x; + s->tr[6] = info->a[3]; + s->tr[7] = -info->a[5] / info->a[3]; + } + + s->tr[0] >>= 11; + s->tr[1] >>= 11; + s->tr[3] <<= 4; + s->tr[4] >>= 11; + s->tr[5] >>= 11; + s->tr[7] <<= 4; +} diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c new file mode 100644 index 000000000..182d3725f --- /dev/null +++ b/hw/input/tsc210x.c @@ -0,0 +1,1253 @@ +/* + * TI TSC2102 (touchscreen/sensors/audio controller) emulator. + * TI TSC2301 (touchscreen/sensors/keypad). + * + * Copyright (c) 2006 Andrzej Zaborowski + * Copyright (C) 2008 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "audio/audio.h" +#include "qemu/timer.h" +#include "sysemu/reset.h" +#include "ui/console.h" +#include "hw/arm/omap.h" /* For I2SCodec */ +#include "hw/input/tsc2xxx.h" +#include "hw/irq.h" +#include "migration/vmstate.h" + +#define TSC_DATA_REGISTERS_PAGE 0x0 +#define TSC_CONTROL_REGISTERS_PAGE 0x1 +#define TSC_AUDIO_REGISTERS_PAGE 0x2 + +#define TSC_VERBOSE + +#define TSC_CUT_RESOLUTION(value, p) ((value) >> (16 - resolution[p])) + +typedef struct { + qemu_irq pint; + qemu_irq kbint; + qemu_irq davint; + QEMUTimer *timer; + QEMUSoundCard card; + uWireSlave chip; + I2SCodec codec; + uint8_t in_fifo[16384]; + uint8_t out_fifo[16384]; + uint16_t model; + + int32_t x, y; + bool pressure; + + uint8_t page, offset; + uint16_t dav; + + bool state; + bool irq; + bool command; + bool busy; + bool enabled; + bool host_mode; + uint8_t function, nextfunction; + uint8_t precision, nextprecision; + uint8_t filter; + uint8_t pin_func; + uint8_t ref; + uint8_t timing; + uint8_t noise; + + uint16_t audio_ctrl1; + uint16_t audio_ctrl2; + uint16_t audio_ctrl3; + uint16_t pll[3]; + uint16_t volume; + int64_t volume_change; + bool softstep; + uint16_t dac_power; + int64_t powerdown; + uint16_t filter_data[0x14]; + + const char *name; + SWVoiceIn *adc_voice[1]; + SWVoiceOut *dac_voice[1]; + int i2s_rx_rate; + int i2s_tx_rate; + + int tr[8]; + + struct { + uint16_t down; + uint16_t mask; + int scan; + int debounce; + int mode; + int intr; + } kb; + int64_t now; /* Time at migration */ +} TSC210xState; + +static const int resolution[4] = { 12, 8, 10, 12 }; + +#define TSC_MODE_NO_SCAN 0x0 +#define TSC_MODE_XY_SCAN 0x1 +#define TSC_MODE_XYZ_SCAN 0x2 +#define TSC_MODE_X 0x3 +#define TSC_MODE_Y 0x4 +#define TSC_MODE_Z 0x5 +#define TSC_MODE_BAT1 0x6 +#define TSC_MODE_BAT2 0x7 +#define TSC_MODE_AUX 0x8 +#define TSC_MODE_AUX_SCAN 0x9 +#define TSC_MODE_TEMP1 0xa +#define TSC_MODE_PORT_SCAN 0xb +#define TSC_MODE_TEMP2 0xc +#define TSC_MODE_XX_DRV 0xd +#define TSC_MODE_YY_DRV 0xe +#define TSC_MODE_YX_DRV 0xf + +static const uint16_t mode_regs[16] = { + 0x0000, /* No scan */ + 0x0600, /* X, Y scan */ + 0x0780, /* X, Y, Z scan */ + 0x0400, /* X */ + 0x0200, /* Y */ + 0x0180, /* Z */ + 0x0040, /* BAT1 */ + 0x0030, /* BAT2 */ + 0x0010, /* AUX */ + 0x0010, /* AUX scan */ + 0x0004, /* TEMP1 */ + 0x0070, /* Port scan */ + 0x0002, /* TEMP2 */ + 0x0000, /* X+, X- drivers */ + 0x0000, /* Y+, Y- drivers */ + 0x0000, /* Y+, X- drivers */ +}; + +#define X_TRANSFORM(s) \ + ((s->y * s->tr[0] - s->x * s->tr[1]) / s->tr[2] + s->tr[3]) +#define Y_TRANSFORM(s) \ + ((s->y * s->tr[4] - s->x * s->tr[5]) / s->tr[6] + s->tr[7]) +#define Z1_TRANSFORM(s) \ + ((400 - ((s)->x >> 7) + ((s)->pressure << 10)) << 4) +#define Z2_TRANSFORM(s) \ + ((4000 + ((s)->y >> 7) - ((s)->pressure << 10)) << 4) + +#define BAT1_VAL 0x8660 +#define BAT2_VAL 0x0000 +#define AUX1_VAL 0x35c0 +#define AUX2_VAL 0xffff +#define TEMP1_VAL 0x8c70 +#define TEMP2_VAL 0xa5b0 + +#define TSC_POWEROFF_DELAY 50 +#define TSC_SOFTSTEP_DELAY 50 + +static void tsc210x_reset(TSC210xState *s) +{ + s->state = false; + s->pin_func = 2; + s->enabled = false; + s->busy = false; + s->nextfunction = 0; + s->ref = 0; + s->timing = 0; + s->irq = false; + s->dav = 0; + + s->audio_ctrl1 = 0x0000; + s->audio_ctrl2 = 0x4410; + s->audio_ctrl3 = 0x0000; + s->pll[0] = 0x1004; + s->pll[1] = 0x0000; + s->pll[2] = 0x1fff; + s->volume = 0xffff; + s->dac_power = 0x8540; + s->softstep = true; + s->volume_change = 0; + s->powerdown = 0; + s->filter_data[0x00] = 0x6be3; + s->filter_data[0x01] = 0x9666; + s->filter_data[0x02] = 0x675d; + s->filter_data[0x03] = 0x6be3; + s->filter_data[0x04] = 0x9666; + s->filter_data[0x05] = 0x675d; + s->filter_data[0x06] = 0x7d83; + s->filter_data[0x07] = 0x84ee; + s->filter_data[0x08] = 0x7d83; + s->filter_data[0x09] = 0x84ee; + s->filter_data[0x0a] = 0x6be3; + s->filter_data[0x0b] = 0x9666; + s->filter_data[0x0c] = 0x675d; + s->filter_data[0x0d] = 0x6be3; + s->filter_data[0x0e] = 0x9666; + s->filter_data[0x0f] = 0x675d; + s->filter_data[0x10] = 0x7d83; + s->filter_data[0x11] = 0x84ee; + s->filter_data[0x12] = 0x7d83; + s->filter_data[0x13] = 0x84ee; + + s->i2s_tx_rate = 0; + s->i2s_rx_rate = 0; + + s->kb.scan = 1; + s->kb.debounce = 0; + s->kb.mask = 0x0000; + s->kb.mode = 3; + s->kb.intr = 0; + + qemu_set_irq(s->pint, !s->irq); + qemu_set_irq(s->davint, !s->dav); + qemu_irq_raise(s->kbint); +} + +typedef struct { + int rate; + int dsor; + int fsref; +} TSC210xRateInfo; + +/* { rate, dsor, fsref } */ +static const TSC210xRateInfo tsc2102_rates[] = { + /* Fsref / 6.0 */ + { 7350, 63, 1 }, + { 8000, 63, 0 }, + /* Fsref / 6.0 */ + { 7350, 54, 1 }, + { 8000, 54, 0 }, + /* Fsref / 5.0 */ + { 8820, 45, 1 }, + { 9600, 45, 0 }, + /* Fsref / 4.0 */ + { 11025, 36, 1 }, + { 12000, 36, 0 }, + /* Fsref / 3.0 */ + { 14700, 27, 1 }, + { 16000, 27, 0 }, + /* Fsref / 2.0 */ + { 22050, 18, 1 }, + { 24000, 18, 0 }, + /* Fsref / 1.5 */ + { 29400, 9, 1 }, + { 32000, 9, 0 }, + /* Fsref */ + { 44100, 0, 1 }, + { 48000, 0, 0 }, + + { 0, 0, 0 }, +}; + +static inline void tsc210x_out_flush(TSC210xState *s, int len) +{ + uint8_t *data = s->codec.out.fifo + s->codec.out.start; + uint8_t *end = data + len; + + while (data < end) + data += AUD_write(s->dac_voice[0], data, end - data) ?: (end - data); + + s->codec.out.len -= len; + if (s->codec.out.len) + memmove(s->codec.out.fifo, end, s->codec.out.len); + s->codec.out.start = 0; +} + +static void tsc210x_audio_out_cb(TSC210xState *s, int free_b) +{ + if (s->codec.out.len >= free_b) { + tsc210x_out_flush(s, free_b); + return; + } + + s->codec.out.size = MIN(free_b, 16384); + qemu_irq_raise(s->codec.tx_start); +} + +static void tsc2102_audio_rate_update(TSC210xState *s) +{ + const TSC210xRateInfo *rate; + + s->codec.tx_rate = 0; + s->codec.rx_rate = 0; + if (s->dac_power & (1 << 15)) /* PWDNC */ + return; + + for (rate = tsc2102_rates; rate->rate; rate ++) + if (rate->dsor == (s->audio_ctrl1 & 0x3f) && /* DACFS */ + rate->fsref == ((s->audio_ctrl3 >> 13) & 1))/* REFFS */ + break; + if (!rate->rate) { + printf("%s: unknown sampling rate configured\n", __func__); + return; + } + + s->codec.tx_rate = rate->rate; +} + +static void tsc2102_audio_output_update(TSC210xState *s) +{ + int enable; + struct audsettings fmt; + + if (s->dac_voice[0]) { + tsc210x_out_flush(s, s->codec.out.len); + s->codec.out.size = 0; + AUD_set_active_out(s->dac_voice[0], 0); + AUD_close_out(&s->card, s->dac_voice[0]); + s->dac_voice[0] = NULL; + } + s->codec.cts = 0; + + enable = + (~s->dac_power & (1 << 15)) && /* PWDNC */ + (~s->dac_power & (1 << 10)); /* DAPWDN */ + if (!enable || !s->codec.tx_rate) + return; + + /* Force our own sampling rate even in slave DAC mode */ + fmt.endianness = 0; + fmt.nchannels = 2; + fmt.freq = s->codec.tx_rate; + fmt.fmt = AUDIO_FORMAT_S16; + + s->dac_voice[0] = AUD_open_out(&s->card, s->dac_voice[0], + "tsc2102.sink", s, (void *) tsc210x_audio_out_cb, &fmt); + if (s->dac_voice[0]) { + s->codec.cts = 1; + AUD_set_active_out(s->dac_voice[0], 1); + } +} + +static uint16_t tsc2102_data_register_read(TSC210xState *s, int reg) +{ + switch (reg) { + case 0x00: /* X */ + s->dav &= 0xfbff; + return TSC_CUT_RESOLUTION(X_TRANSFORM(s), s->precision) + + (s->noise & 3); + + case 0x01: /* Y */ + s->noise ++; + s->dav &= 0xfdff; + return TSC_CUT_RESOLUTION(Y_TRANSFORM(s), s->precision) ^ + (s->noise & 3); + + case 0x02: /* Z1 */ + s->dav &= 0xfeff; + return TSC_CUT_RESOLUTION(Z1_TRANSFORM(s), s->precision) - + (s->noise & 3); + + case 0x03: /* Z2 */ + s->dav &= 0xff7f; + return TSC_CUT_RESOLUTION(Z2_TRANSFORM(s), s->precision) | + (s->noise & 3); + + case 0x04: /* KPData */ + if ((s->model & 0xff00) == 0x2300) { + if (s->kb.intr && (s->kb.mode & 2)) { + s->kb.intr = 0; + qemu_irq_raise(s->kbint); + } + return s->kb.down; + } + + return 0xffff; + + case 0x05: /* BAT1 */ + s->dav &= 0xffbf; + return TSC_CUT_RESOLUTION(BAT1_VAL, s->precision) + + (s->noise & 6); + + case 0x06: /* BAT2 */ + s->dav &= 0xffdf; + return TSC_CUT_RESOLUTION(BAT2_VAL, s->precision); + + case 0x07: /* AUX1 */ + s->dav &= 0xffef; + return TSC_CUT_RESOLUTION(AUX1_VAL, s->precision); + + case 0x08: /* AUX2 */ + s->dav &= 0xfff7; + return 0xffff; + + case 0x09: /* TEMP1 */ + s->dav &= 0xfffb; + return TSC_CUT_RESOLUTION(TEMP1_VAL, s->precision) - + (s->noise & 5); + + case 0x0a: /* TEMP2 */ + s->dav &= 0xfffd; + return TSC_CUT_RESOLUTION(TEMP2_VAL, s->precision) ^ + (s->noise & 3); + + case 0x0b: /* DAC */ + s->dav &= 0xfffe; + return 0xffff; + + default: +#ifdef TSC_VERBOSE + fprintf(stderr, "tsc2102_data_register_read: " + "no such register: 0x%02x\n", reg); +#endif + return 0xffff; + } +} + +static uint16_t tsc2102_control_register_read( + TSC210xState *s, int reg) +{ + switch (reg) { + case 0x00: /* TSC ADC */ + return (s->pressure << 15) | ((!s->busy) << 14) | + (s->nextfunction << 10) | (s->nextprecision << 8) | s->filter; + + case 0x01: /* Status / Keypad Control */ + if ((s->model & 0xff00) == 0x2100) + return (s->pin_func << 14) | ((!s->enabled) << 13) | + (s->host_mode << 12) | ((!!s->dav) << 11) | s->dav; + else + return (s->kb.intr << 15) | ((s->kb.scan || !s->kb.down) << 14) | + (s->kb.debounce << 11); + + case 0x02: /* DAC Control */ + if ((s->model & 0xff00) == 0x2300) + return s->dac_power & 0x8000; + else + goto bad_reg; + + case 0x03: /* Reference */ + return s->ref; + + case 0x04: /* Reset */ + return 0xffff; + + case 0x05: /* Configuration */ + return s->timing; + + case 0x06: /* Secondary configuration */ + if ((s->model & 0xff00) == 0x2100) + goto bad_reg; + return ((!s->dav) << 15) | ((s->kb.mode & 1) << 14) | s->pll[2]; + + case 0x10: /* Keypad Mask */ + if ((s->model & 0xff00) == 0x2100) + goto bad_reg; + return s->kb.mask; + + default: + bad_reg: +#ifdef TSC_VERBOSE + fprintf(stderr, "tsc2102_control_register_read: " + "no such register: 0x%02x\n", reg); +#endif + return 0xffff; + } +} + +static uint16_t tsc2102_audio_register_read(TSC210xState *s, int reg) +{ + int l_ch, r_ch; + uint16_t val; + + switch (reg) { + case 0x00: /* Audio Control 1 */ + return s->audio_ctrl1; + + case 0x01: + return 0xff00; + + case 0x02: /* DAC Volume Control */ + return s->volume; + + case 0x03: + return 0x8b00; + + case 0x04: /* Audio Control 2 */ + l_ch = 1; + r_ch = 1; + if (s->softstep && !(s->dac_power & (1 << 10))) { + l_ch = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) > + s->volume_change + TSC_SOFTSTEP_DELAY); + r_ch = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) > + s->volume_change + TSC_SOFTSTEP_DELAY); + } + + return s->audio_ctrl2 | (l_ch << 3) | (r_ch << 2); + + case 0x05: /* Stereo DAC Power Control */ + return 0x2aa0 | s->dac_power | + (((s->dac_power & (1 << 10)) && + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) > + s->powerdown + TSC_POWEROFF_DELAY)) << 6); + + case 0x06: /* Audio Control 3 */ + val = s->audio_ctrl3 | 0x0001; + s->audio_ctrl3 &= 0xff3f; + return val; + + case 0x07: /* LCH_BASS_BOOST_N0 */ + case 0x08: /* LCH_BASS_BOOST_N1 */ + case 0x09: /* LCH_BASS_BOOST_N2 */ + case 0x0a: /* LCH_BASS_BOOST_N3 */ + case 0x0b: /* LCH_BASS_BOOST_N4 */ + case 0x0c: /* LCH_BASS_BOOST_N5 */ + case 0x0d: /* LCH_BASS_BOOST_D1 */ + case 0x0e: /* LCH_BASS_BOOST_D2 */ + case 0x0f: /* LCH_BASS_BOOST_D4 */ + case 0x10: /* LCH_BASS_BOOST_D5 */ + case 0x11: /* RCH_BASS_BOOST_N0 */ + case 0x12: /* RCH_BASS_BOOST_N1 */ + case 0x13: /* RCH_BASS_BOOST_N2 */ + case 0x14: /* RCH_BASS_BOOST_N3 */ + case 0x15: /* RCH_BASS_BOOST_N4 */ + case 0x16: /* RCH_BASS_BOOST_N5 */ + case 0x17: /* RCH_BASS_BOOST_D1 */ + case 0x18: /* RCH_BASS_BOOST_D2 */ + case 0x19: /* RCH_BASS_BOOST_D4 */ + case 0x1a: /* RCH_BASS_BOOST_D5 */ + return s->filter_data[reg - 0x07]; + + case 0x1b: /* PLL Programmability 1 */ + return s->pll[0]; + + case 0x1c: /* PLL Programmability 2 */ + return s->pll[1]; + + case 0x1d: /* Audio Control 4 */ + return (!s->softstep) << 14; + + default: +#ifdef TSC_VERBOSE + fprintf(stderr, "tsc2102_audio_register_read: " + "no such register: 0x%02x\n", reg); +#endif + return 0xffff; + } +} + +static void tsc2102_data_register_write( + TSC210xState *s, int reg, uint16_t value) +{ + switch (reg) { + case 0x00: /* X */ + case 0x01: /* Y */ + case 0x02: /* Z1 */ + case 0x03: /* Z2 */ + case 0x05: /* BAT1 */ + case 0x06: /* BAT2 */ + case 0x07: /* AUX1 */ + case 0x08: /* AUX2 */ + case 0x09: /* TEMP1 */ + case 0x0a: /* TEMP2 */ + return; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "tsc2102_data_register_write: " + "no such register: 0x%02x\n", reg); + } +} + +static void tsc2102_control_register_write( + TSC210xState *s, int reg, uint16_t value) +{ + switch (reg) { + case 0x00: /* TSC ADC */ + s->host_mode = value >> 15; + s->enabled = !(value & 0x4000); + if (s->busy && !s->enabled) + timer_del(s->timer); + s->busy = s->busy && s->enabled; + s->nextfunction = (value >> 10) & 0xf; + s->nextprecision = (value >> 8) & 3; + s->filter = value & 0xff; + return; + + case 0x01: /* Status / Keypad Control */ + if ((s->model & 0xff00) == 0x2100) + s->pin_func = value >> 14; + else { + s->kb.scan = (value >> 14) & 1; + s->kb.debounce = (value >> 11) & 7; + if (s->kb.intr && s->kb.scan) { + s->kb.intr = 0; + qemu_irq_raise(s->kbint); + } + } + return; + + case 0x02: /* DAC Control */ + if ((s->model & 0xff00) == 0x2300) { + s->dac_power &= 0x7fff; + s->dac_power |= 0x8000 & value; + } else + goto bad_reg; + break; + + case 0x03: /* Reference */ + s->ref = value & 0x1f; + return; + + case 0x04: /* Reset */ + if (value == 0xbb00) { + if (s->busy) + timer_del(s->timer); + tsc210x_reset(s); +#ifdef TSC_VERBOSE + } else { + fprintf(stderr, "tsc2102_control_register_write: " + "wrong value written into RESET\n"); +#endif + } + return; + + case 0x05: /* Configuration */ + s->timing = value & 0x3f; +#ifdef TSC_VERBOSE + if (value & ~0x3f) + fprintf(stderr, "tsc2102_control_register_write: " + "wrong value written into CONFIG\n"); +#endif + return; + + case 0x06: /* Secondary configuration */ + if ((s->model & 0xff00) == 0x2100) + goto bad_reg; + s->kb.mode = value >> 14; + s->pll[2] = value & 0x3ffff; + return; + + case 0x10: /* Keypad Mask */ + if ((s->model & 0xff00) == 0x2100) + goto bad_reg; + s->kb.mask = value; + return; + + default: + bad_reg: + qemu_log_mask(LOG_GUEST_ERROR, "tsc2102_control_register_write: " + "no such register: 0x%02x\n", reg); + } +} + +static void tsc2102_audio_register_write( + TSC210xState *s, int reg, uint16_t value) +{ + switch (reg) { + case 0x00: /* Audio Control 1 */ + s->audio_ctrl1 = value & 0x0f3f; +#ifdef TSC_VERBOSE + if ((value & ~0x0f3f) || ((value & 7) != ((value >> 3) & 7))) + fprintf(stderr, "tsc2102_audio_register_write: " + "wrong value written into Audio 1\n"); +#endif + tsc2102_audio_rate_update(s); + tsc2102_audio_output_update(s); + return; + + case 0x01: +#ifdef TSC_VERBOSE + if (value != 0xff00) + fprintf(stderr, "tsc2102_audio_register_write: " + "wrong value written into reg 0x01\n"); +#endif + return; + + case 0x02: /* DAC Volume Control */ + s->volume = value; + s->volume_change = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + return; + + case 0x03: +#ifdef TSC_VERBOSE + if (value != 0x8b00) + fprintf(stderr, "tsc2102_audio_register_write: " + "wrong value written into reg 0x03\n"); +#endif + return; + + case 0x04: /* Audio Control 2 */ + s->audio_ctrl2 = value & 0xf7f2; +#ifdef TSC_VERBOSE + if (value & ~0xf7fd) + fprintf(stderr, "tsc2102_audio_register_write: " + "wrong value written into Audio 2\n"); +#endif + return; + + case 0x05: /* Stereo DAC Power Control */ + if ((value & ~s->dac_power) & (1 << 10)) + s->powerdown = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + s->dac_power = value & 0x9543; +#ifdef TSC_VERBOSE + if ((value & ~0x9543) != 0x2aa0) + fprintf(stderr, "tsc2102_audio_register_write: " + "wrong value written into Power\n"); +#endif + tsc2102_audio_rate_update(s); + tsc2102_audio_output_update(s); + return; + + case 0x06: /* Audio Control 3 */ + s->audio_ctrl3 &= 0x00c0; + s->audio_ctrl3 |= value & 0xf800; +#ifdef TSC_VERBOSE + if (value & ~0xf8c7) + fprintf(stderr, "tsc2102_audio_register_write: " + "wrong value written into Audio 3\n"); +#endif + tsc2102_audio_output_update(s); + return; + + case 0x07: /* LCH_BASS_BOOST_N0 */ + case 0x08: /* LCH_BASS_BOOST_N1 */ + case 0x09: /* LCH_BASS_BOOST_N2 */ + case 0x0a: /* LCH_BASS_BOOST_N3 */ + case 0x0b: /* LCH_BASS_BOOST_N4 */ + case 0x0c: /* LCH_BASS_BOOST_N5 */ + case 0x0d: /* LCH_BASS_BOOST_D1 */ + case 0x0e: /* LCH_BASS_BOOST_D2 */ + case 0x0f: /* LCH_BASS_BOOST_D4 */ + case 0x10: /* LCH_BASS_BOOST_D5 */ + case 0x11: /* RCH_BASS_BOOST_N0 */ + case 0x12: /* RCH_BASS_BOOST_N1 */ + case 0x13: /* RCH_BASS_BOOST_N2 */ + case 0x14: /* RCH_BASS_BOOST_N3 */ + case 0x15: /* RCH_BASS_BOOST_N4 */ + case 0x16: /* RCH_BASS_BOOST_N5 */ + case 0x17: /* RCH_BASS_BOOST_D1 */ + case 0x18: /* RCH_BASS_BOOST_D2 */ + case 0x19: /* RCH_BASS_BOOST_D4 */ + case 0x1a: /* RCH_BASS_BOOST_D5 */ + s->filter_data[reg - 0x07] = value; + return; + + case 0x1b: /* PLL Programmability 1 */ + s->pll[0] = value & 0xfffc; +#ifdef TSC_VERBOSE + if (value & ~0xfffc) + fprintf(stderr, "tsc2102_audio_register_write: " + "wrong value written into PLL 1\n"); +#endif + return; + + case 0x1c: /* PLL Programmability 2 */ + s->pll[1] = value & 0xfffc; +#ifdef TSC_VERBOSE + if (value & ~0xfffc) + fprintf(stderr, "tsc2102_audio_register_write: " + "wrong value written into PLL 2\n"); +#endif + return; + + case 0x1d: /* Audio Control 4 */ + s->softstep = !(value & 0x4000); +#ifdef TSC_VERBOSE + if (value & ~0x4000) + fprintf(stderr, "tsc2102_audio_register_write: " + "wrong value written into Audio 4\n"); +#endif + return; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "tsc2102_audio_register_write: " + "no such register: 0x%02x\n", reg); + } +} + +/* This handles most of the chip logic. */ +static void tsc210x_pin_update(TSC210xState *s) +{ + int64_t expires; + bool pin_state; + + switch (s->pin_func) { + case 0: + pin_state = s->pressure; + break; + case 1: + pin_state = !!s->dav; + break; + case 2: + default: + pin_state = s->pressure && !s->dav; + } + + if (!s->enabled) + pin_state = false; + + if (pin_state != s->irq) { + s->irq = pin_state; + qemu_set_irq(s->pint, !s->irq); + } + + switch (s->nextfunction) { + case TSC_MODE_XY_SCAN: + case TSC_MODE_XYZ_SCAN: + if (!s->pressure) + return; + break; + + case TSC_MODE_X: + case TSC_MODE_Y: + case TSC_MODE_Z: + if (!s->pressure) + return; + /* Fall through */ + case TSC_MODE_BAT1: + case TSC_MODE_BAT2: + case TSC_MODE_AUX: + case TSC_MODE_TEMP1: + case TSC_MODE_TEMP2: + if (s->dav) + s->enabled = false; + break; + + case TSC_MODE_AUX_SCAN: + case TSC_MODE_PORT_SCAN: + break; + + case TSC_MODE_NO_SCAN: + case TSC_MODE_XX_DRV: + case TSC_MODE_YY_DRV: + case TSC_MODE_YX_DRV: + default: + return; + } + + if (!s->enabled || s->busy || s->dav) + return; + + s->busy = true; + s->precision = s->nextprecision; + s->function = s->nextfunction; + expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (NANOSECONDS_PER_SECOND >> 10); + timer_mod(s->timer, expires); +} + +static uint16_t tsc210x_read(TSC210xState *s) +{ + uint16_t ret = 0x0000; + + if (!s->command) + fprintf(stderr, "tsc210x_read: SPI underrun!\n"); + + switch (s->page) { + case TSC_DATA_REGISTERS_PAGE: + ret = tsc2102_data_register_read(s, s->offset); + if (!s->dav) + qemu_irq_raise(s->davint); + break; + case TSC_CONTROL_REGISTERS_PAGE: + ret = tsc2102_control_register_read(s, s->offset); + break; + case TSC_AUDIO_REGISTERS_PAGE: + ret = tsc2102_audio_register_read(s, s->offset); + break; + default: + hw_error("tsc210x_read: wrong memory page\n"); + } + + tsc210x_pin_update(s); + + /* Allow sequential reads. */ + s->offset ++; + s->state = false; + return ret; +} + +static void tsc210x_write(TSC210xState *s, uint16_t value) +{ + /* + * This is a two-state state machine for reading + * command and data every second time. + */ + if (!s->state) { + s->command = (value >> 15) != 0; + s->page = (value >> 11) & 0x0f; + s->offset = (value >> 5) & 0x3f; + s->state = true; + } else { + if (s->command) + fprintf(stderr, "tsc210x_write: SPI overrun!\n"); + else + switch (s->page) { + case TSC_DATA_REGISTERS_PAGE: + tsc2102_data_register_write(s, s->offset, value); + break; + case TSC_CONTROL_REGISTERS_PAGE: + tsc2102_control_register_write(s, s->offset, value); + break; + case TSC_AUDIO_REGISTERS_PAGE: + tsc2102_audio_register_write(s, s->offset, value); + break; + default: + hw_error("tsc210x_write: wrong memory page\n"); + } + + tsc210x_pin_update(s); + s->state = false; + } +} + +uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len) +{ + TSC210xState *s = opaque; + uint32_t ret = 0; + + if (len != 16) + hw_error("%s: FIXME: bad SPI word width %i\n", __func__, len); + + /* TODO: sequential reads etc - how do we make sure the host doesn't + * unintentionally read out a conversion result from a register while + * transmitting the command word of the next command? */ + if (!value || (s->state && s->command)) + ret = tsc210x_read(s); + if (value || (s->state && !s->command)) + tsc210x_write(s, value); + + return ret; +} + +static void tsc210x_timer_tick(void *opaque) +{ + TSC210xState *s = opaque; + + /* Timer ticked -- a set of conversions has been finished. */ + + if (!s->busy) + return; + + s->busy = false; + s->dav |= mode_regs[s->function]; + tsc210x_pin_update(s); + qemu_irq_lower(s->davint); +} + +static void tsc210x_touchscreen_event(void *opaque, + int x, int y, int z, int buttons_state) +{ + TSC210xState *s = opaque; + int p = s->pressure; + + if (buttons_state) { + s->x = x; + s->y = y; + } + s->pressure = !!buttons_state; + + /* + * Note: We would get better responsiveness in the guest by + * signaling TS events immediately, but for now we simulate + * the first conversion delay for sake of correctness. + */ + if (p != s->pressure) + tsc210x_pin_update(s); +} + +static void tsc210x_i2s_swallow(TSC210xState *s) +{ + if (s->dac_voice[0]) + tsc210x_out_flush(s, s->codec.out.len); + else + s->codec.out.len = 0; +} + +static void tsc210x_i2s_set_rate(TSC210xState *s, int in, int out) +{ + s->i2s_tx_rate = out; + s->i2s_rx_rate = in; +} + +static int tsc210x_pre_save(void *opaque) +{ + TSC210xState *s = (TSC210xState *) opaque; + s->now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + return 0; +} + +static int tsc210x_post_load(void *opaque, int version_id) +{ + TSC210xState *s = (TSC210xState *) opaque; + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + if (s->function >= ARRAY_SIZE(mode_regs)) { + return -EINVAL; + } + if (s->nextfunction >= ARRAY_SIZE(mode_regs)) { + return -EINVAL; + } + if (s->precision >= ARRAY_SIZE(resolution)) { + return -EINVAL; + } + if (s->nextprecision >= ARRAY_SIZE(resolution)) { + return -EINVAL; + } + + s->volume_change -= s->now; + s->volume_change += now; + s->powerdown -= s->now; + s->powerdown += now; + + s->busy = timer_pending(s->timer); + qemu_set_irq(s->pint, !s->irq); + qemu_set_irq(s->davint, !s->dav); + + return 0; +} + +static VMStateField vmstatefields_tsc210x[] = { + VMSTATE_BOOL(enabled, TSC210xState), + VMSTATE_BOOL(host_mode, TSC210xState), + VMSTATE_BOOL(irq, TSC210xState), + VMSTATE_BOOL(command, TSC210xState), + VMSTATE_BOOL(pressure, TSC210xState), + VMSTATE_BOOL(softstep, TSC210xState), + VMSTATE_BOOL(state, TSC210xState), + VMSTATE_UINT16(dav, TSC210xState), + VMSTATE_INT32(x, TSC210xState), + VMSTATE_INT32(y, TSC210xState), + VMSTATE_UINT8(offset, TSC210xState), + VMSTATE_UINT8(page, TSC210xState), + VMSTATE_UINT8(filter, TSC210xState), + VMSTATE_UINT8(pin_func, TSC210xState), + VMSTATE_UINT8(ref, TSC210xState), + VMSTATE_UINT8(timing, TSC210xState), + VMSTATE_UINT8(noise, TSC210xState), + VMSTATE_UINT8(function, TSC210xState), + VMSTATE_UINT8(nextfunction, TSC210xState), + VMSTATE_UINT8(precision, TSC210xState), + VMSTATE_UINT8(nextprecision, TSC210xState), + VMSTATE_UINT16(audio_ctrl1, TSC210xState), + VMSTATE_UINT16(audio_ctrl2, TSC210xState), + VMSTATE_UINT16(audio_ctrl3, TSC210xState), + VMSTATE_UINT16_ARRAY(pll, TSC210xState, 3), + VMSTATE_UINT16(volume, TSC210xState), + VMSTATE_UINT16(dac_power, TSC210xState), + VMSTATE_INT64(volume_change, TSC210xState), + VMSTATE_INT64(powerdown, TSC210xState), + VMSTATE_INT64(now, TSC210xState), + VMSTATE_UINT16_ARRAY(filter_data, TSC210xState, 0x14), + VMSTATE_TIMER_PTR(timer, TSC210xState), + VMSTATE_END_OF_LIST() +}; + +static const VMStateDescription vmstate_tsc2102 = { + .name = "tsc2102", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = tsc210x_pre_save, + .post_load = tsc210x_post_load, + .fields = vmstatefields_tsc210x, +}; + +static const VMStateDescription vmstate_tsc2301 = { + .name = "tsc2301", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = tsc210x_pre_save, + .post_load = tsc210x_post_load, + .fields = vmstatefields_tsc210x, +}; + +uWireSlave *tsc2102_init(qemu_irq pint) +{ + TSC210xState *s; + + s = g_new0(TSC210xState, 1); + s->x = 160; + s->y = 160; + s->pressure = 0; + s->precision = s->nextprecision = 0; + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc210x_timer_tick, s); + s->pint = pint; + s->model = 0x2102; + s->name = "tsc2102"; + + s->tr[0] = 0; + s->tr[1] = 1; + s->tr[2] = 1; + s->tr[3] = 0; + s->tr[4] = 1; + s->tr[5] = 0; + s->tr[6] = 1; + s->tr[7] = 0; + + s->chip.opaque = s; + s->chip.send = (void *) tsc210x_write; + s->chip.receive = (void *) tsc210x_read; + + s->codec.opaque = s; + s->codec.tx_swallow = (void *) tsc210x_i2s_swallow; + s->codec.set_rate = (void *) tsc210x_i2s_set_rate; + s->codec.in.fifo = s->in_fifo; + s->codec.out.fifo = s->out_fifo; + + tsc210x_reset(s); + + qemu_add_mouse_event_handler(tsc210x_touchscreen_event, s, 1, + "QEMU TSC2102-driven Touchscreen"); + + AUD_register_card(s->name, &s->card); + + qemu_register_reset((void *) tsc210x_reset, s); + vmstate_register(NULL, 0, &vmstate_tsc2102, s); + + return &s->chip; +} + +uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav) +{ + TSC210xState *s; + + s = g_new0(TSC210xState, 1); + s->x = 400; + s->y = 240; + s->pressure = 0; + s->precision = s->nextprecision = 0; + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc210x_timer_tick, s); + s->pint = penirq; + s->kbint = kbirq; + s->davint = dav; + s->model = 0x2301; + s->name = "tsc2301"; + + s->tr[0] = 0; + s->tr[1] = 1; + s->tr[2] = 1; + s->tr[3] = 0; + s->tr[4] = 1; + s->tr[5] = 0; + s->tr[6] = 1; + s->tr[7] = 0; + + s->chip.opaque = s; + s->chip.send = (void *) tsc210x_write; + s->chip.receive = (void *) tsc210x_read; + + s->codec.opaque = s; + s->codec.tx_swallow = (void *) tsc210x_i2s_swallow; + s->codec.set_rate = (void *) tsc210x_i2s_set_rate; + s->codec.in.fifo = s->in_fifo; + s->codec.out.fifo = s->out_fifo; + + tsc210x_reset(s); + + qemu_add_mouse_event_handler(tsc210x_touchscreen_event, s, 1, + "QEMU TSC2301-driven Touchscreen"); + + AUD_register_card(s->name, &s->card); + + qemu_register_reset((void *) tsc210x_reset, s); + vmstate_register(NULL, 0, &vmstate_tsc2301, s); + + return &s->chip; +} + +I2SCodec *tsc210x_codec(uWireSlave *chip) +{ + TSC210xState *s = (TSC210xState *) chip->opaque; + + return &s->codec; +} + +/* + * Use tslib generated calibration data to generate ADC input values + * from the touchscreen. Assuming 12-bit precision was used during + * tslib calibration. + */ +void tsc210x_set_transform(uWireSlave *chip, + MouseTransformInfo *info) +{ + TSC210xState *s = (TSC210xState *) chip->opaque; +#if 0 + int64_t ltr[8]; + + ltr[0] = (int64_t) info->a[1] * info->y; + ltr[1] = (int64_t) info->a[4] * info->x; + ltr[2] = (int64_t) info->a[1] * info->a[3] - + (int64_t) info->a[4] * info->a[0]; + ltr[3] = (int64_t) info->a[2] * info->a[4] - + (int64_t) info->a[5] * info->a[1]; + ltr[4] = (int64_t) info->a[0] * info->y; + ltr[5] = (int64_t) info->a[3] * info->x; + ltr[6] = (int64_t) info->a[4] * info->a[0] - + (int64_t) info->a[1] * info->a[3]; + ltr[7] = (int64_t) info->a[2] * info->a[3] - + (int64_t) info->a[5] * info->a[0]; + + /* Avoid integer overflow */ + s->tr[0] = ltr[0] >> 11; + s->tr[1] = ltr[1] >> 11; + s->tr[2] = muldiv64(ltr[2], 1, info->a[6]); + s->tr[3] = muldiv64(ltr[3], 1 << 4, ltr[2]); + s->tr[4] = ltr[4] >> 11; + s->tr[5] = ltr[5] >> 11; + s->tr[6] = muldiv64(ltr[6], 1, info->a[6]); + s->tr[7] = muldiv64(ltr[7], 1 << 4, ltr[6]); +#else + + /* This version assumes touchscreen X & Y axis are parallel or + * perpendicular to LCD's X & Y axis in some way. */ + if (abs(info->a[0]) > abs(info->a[1])) { + s->tr[0] = 0; + s->tr[1] = -info->a[6] * info->x; + s->tr[2] = info->a[0]; + s->tr[3] = -info->a[2] / info->a[0]; + s->tr[4] = info->a[6] * info->y; + s->tr[5] = 0; + s->tr[6] = info->a[4]; + s->tr[7] = -info->a[5] / info->a[4]; + } else { + s->tr[0] = info->a[6] * info->y; + s->tr[1] = 0; + s->tr[2] = info->a[1]; + s->tr[3] = -info->a[2] / info->a[1]; + s->tr[4] = 0; + s->tr[5] = -info->a[6] * info->x; + s->tr[6] = info->a[3]; + s->tr[7] = -info->a[5] / info->a[3]; + } + + s->tr[0] >>= 11; + s->tr[1] >>= 11; + s->tr[3] <<= 4; + s->tr[4] >>= 11; + s->tr[5] >>= 11; + s->tr[7] <<= 4; +#endif +} + +void tsc210x_key_event(uWireSlave *chip, int key, int down) +{ + TSC210xState *s = (TSC210xState *) chip->opaque; + + if (down) + s->kb.down |= 1 << key; + else + s->kb.down &= ~(1 << key); + + if (down && (s->kb.down & ~s->kb.mask) && !s->kb.intr) { + s->kb.intr = 1; + qemu_irq_lower(s->kbint); + } else if (s->kb.intr && !(s->kb.down & ~s->kb.mask) && + !(s->kb.mode & 1)) { + s->kb.intr = 0; + qemu_irq_raise(s->kbint); + } +} diff --git a/hw/input/vhost-user-input.c b/hw/input/vhost-user-input.c new file mode 100644 index 000000000..273e96a7b --- /dev/null +++ b/hw/input/vhost-user-input.c @@ -0,0 +1,130 @@ +/* + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qemu-common.h" + +#include "hw/virtio/virtio-input.h" + +static int vhost_input_config_change(struct vhost_dev *dev) +{ + error_report("vhost-user-input: unhandled backend config change"); + return -1; +} + +static const VhostDevConfigOps config_ops = { + .vhost_dev_config_notifier = vhost_input_config_change, +}; + +static void vhost_input_realize(DeviceState *dev, Error **errp) +{ + VHostUserInput *vhi = VHOST_USER_INPUT(dev); + VirtIOInput *vinput = VIRTIO_INPUT(dev); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + + vhost_dev_set_config_notifier(&vhi->vhost->dev, &config_ops); + vinput->cfg_size = sizeof_field(virtio_input_config, u); + if (vhost_user_backend_dev_init(vhi->vhost, vdev, 2, errp) == -1) { + return; + } +} + +static void vhost_input_change_active(VirtIOInput *vinput) +{ + VHostUserInput *vhi = VHOST_USER_INPUT(vinput); + + if (vinput->active) { + vhost_user_backend_start(vhi->vhost); + } else { + vhost_user_backend_stop(vhi->vhost); + } +} + +static void vhost_input_get_config(VirtIODevice *vdev, uint8_t *config_data) +{ + VirtIOInput *vinput = VIRTIO_INPUT(vdev); + VHostUserInput *vhi = VHOST_USER_INPUT(vdev); + Error *local_err = NULL; + int ret; + + memset(config_data, 0, vinput->cfg_size); + + ret = vhost_dev_get_config(&vhi->vhost->dev, config_data, vinput->cfg_size, + &local_err); + if (ret) { + error_report_err(local_err); + return; + } +} + +static void vhost_input_set_config(VirtIODevice *vdev, + const uint8_t *config_data) +{ + VHostUserInput *vhi = VHOST_USER_INPUT(vdev); + int ret; + + ret = vhost_dev_set_config(&vhi->vhost->dev, config_data, + 0, sizeof(virtio_input_config), + VHOST_SET_CONFIG_TYPE_MASTER); + if (ret) { + error_report("vhost-user-input: set device config space failed"); + return; + } + + virtio_notify_config(vdev); +} + +static const VMStateDescription vmstate_vhost_input = { + .name = "vhost-user-input", + .unmigratable = 1, +}; + +static void vhost_input_class_init(ObjectClass *klass, void *data) +{ + VirtIOInputClass *vic = VIRTIO_INPUT_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_vhost_input; + vdc->get_config = vhost_input_get_config; + vdc->set_config = vhost_input_set_config; + vic->realize = vhost_input_realize; + vic->change_active = vhost_input_change_active; +} + +static void vhost_input_init(Object *obj) +{ + VHostUserInput *vhi = VHOST_USER_INPUT(obj); + + vhi->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND)); + object_property_add_alias(obj, "chardev", + OBJECT(vhi->vhost), "chardev"); +} + +static void vhost_input_finalize(Object *obj) +{ + VHostUserInput *vhi = VHOST_USER_INPUT(obj); + + object_unref(OBJECT(vhi->vhost)); +} + +static const TypeInfo vhost_input_info = { + .name = TYPE_VHOST_USER_INPUT, + .parent = TYPE_VIRTIO_INPUT, + .instance_size = sizeof(VHostUserInput), + .instance_init = vhost_input_init, + .instance_finalize = vhost_input_finalize, + .class_init = vhost_input_class_init, +}; + +static void vhost_input_register_types(void) +{ + type_register_static(&vhost_input_info); +} + +type_init(vhost_input_register_types) diff --git a/hw/input/virtio-input-hid.c b/hw/input/virtio-input-hid.c new file mode 100644 index 000000000..a7a244a95 --- /dev/null +++ b/hw/input/virtio-input-hid.c @@ -0,0 +1,522 @@ +/* + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/iov.h" +#include "qemu/module.h" + +#include "hw/virtio/virtio.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-input.h" + +#include "ui/console.h" + +#include "standard-headers/linux/input.h" + +#define VIRTIO_ID_NAME_KEYBOARD "QEMU Virtio Keyboard" +#define VIRTIO_ID_NAME_MOUSE "QEMU Virtio Mouse" +#define VIRTIO_ID_NAME_TABLET "QEMU Virtio Tablet" + +/* ----------------------------------------------------------------- */ + +static const unsigned short keymap_button[INPUT_BUTTON__MAX] = { + [INPUT_BUTTON_LEFT] = BTN_LEFT, + [INPUT_BUTTON_RIGHT] = BTN_RIGHT, + [INPUT_BUTTON_MIDDLE] = BTN_MIDDLE, + [INPUT_BUTTON_WHEEL_UP] = BTN_GEAR_UP, + [INPUT_BUTTON_WHEEL_DOWN] = BTN_GEAR_DOWN, + [INPUT_BUTTON_SIDE] = BTN_SIDE, + [INPUT_BUTTON_EXTRA] = BTN_EXTRA, +}; + +static const unsigned short axismap_rel[INPUT_AXIS__MAX] = { + [INPUT_AXIS_X] = REL_X, + [INPUT_AXIS_Y] = REL_Y, +}; + +static const unsigned short axismap_abs[INPUT_AXIS__MAX] = { + [INPUT_AXIS_X] = ABS_X, + [INPUT_AXIS_Y] = ABS_Y, +}; + +/* ----------------------------------------------------------------- */ + +static void virtio_input_key_config(VirtIOInput *vinput, + const unsigned short *keymap, + size_t mapsize) +{ + virtio_input_config keys; + int i, bit, byte, bmax = 0; + + memset(&keys, 0, sizeof(keys)); + for (i = 0; i < mapsize; i++) { + bit = keymap[i]; + if (!bit) { + continue; + } + byte = bit / 8; + bit = bit % 8; + keys.u.bitmap[byte] |= (1 << bit); + if (bmax < byte+1) { + bmax = byte+1; + } + } + keys.select = VIRTIO_INPUT_CFG_EV_BITS; + keys.subsel = EV_KEY; + keys.size = bmax; + virtio_input_add_config(vinput, &keys); +} + +static void virtio_input_handle_event(DeviceState *dev, QemuConsole *src, + InputEvent *evt) +{ + VirtIOInputHID *vhid = VIRTIO_INPUT_HID(dev); + VirtIOInput *vinput = VIRTIO_INPUT(dev); + virtio_input_event event; + int qcode; + InputKeyEvent *key; + InputMoveEvent *move; + InputBtnEvent *btn; + + switch (evt->type) { + case INPUT_EVENT_KIND_KEY: + key = evt->u.key.data; + qcode = qemu_input_key_value_to_qcode(key->key); + if (qcode < qemu_input_map_qcode_to_linux_len && + qemu_input_map_qcode_to_linux[qcode]) { + event.type = cpu_to_le16(EV_KEY); + event.code = cpu_to_le16(qemu_input_map_qcode_to_linux[qcode]); + event.value = cpu_to_le32(key->down ? 1 : 0); + virtio_input_send(vinput, &event); + } else { + if (key->down) { + fprintf(stderr, "%s: unmapped key: %d [%s]\n", __func__, + qcode, QKeyCode_str(qcode)); + } + } + break; + case INPUT_EVENT_KIND_BTN: + btn = evt->u.btn.data; + if (vhid->wheel_axis && + (btn->button == INPUT_BUTTON_WHEEL_UP || + btn->button == INPUT_BUTTON_WHEEL_DOWN) && + btn->down) { + event.type = cpu_to_le16(EV_REL); + event.code = cpu_to_le16(REL_WHEEL); + event.value = cpu_to_le32(btn->button == INPUT_BUTTON_WHEEL_UP + ? 1 : -1); + virtio_input_send(vinput, &event); + } else if (keymap_button[btn->button]) { + event.type = cpu_to_le16(EV_KEY); + event.code = cpu_to_le16(keymap_button[btn->button]); + event.value = cpu_to_le32(btn->down ? 1 : 0); + virtio_input_send(vinput, &event); + } else { + if (btn->down) { + fprintf(stderr, "%s: unmapped button: %d [%s]\n", __func__, + btn->button, + InputButton_str(btn->button)); + } + } + break; + case INPUT_EVENT_KIND_REL: + move = evt->u.rel.data; + event.type = cpu_to_le16(EV_REL); + event.code = cpu_to_le16(axismap_rel[move->axis]); + event.value = cpu_to_le32(move->value); + virtio_input_send(vinput, &event); + break; + case INPUT_EVENT_KIND_ABS: + move = evt->u.abs.data; + event.type = cpu_to_le16(EV_ABS); + event.code = cpu_to_le16(axismap_abs[move->axis]); + event.value = cpu_to_le32(move->value); + virtio_input_send(vinput, &event); + break; + default: + /* keep gcc happy */ + break; + } +} + +static void virtio_input_handle_sync(DeviceState *dev) +{ + VirtIOInput *vinput = VIRTIO_INPUT(dev); + virtio_input_event event = { + .type = cpu_to_le16(EV_SYN), + .code = cpu_to_le16(SYN_REPORT), + .value = 0, + }; + + virtio_input_send(vinput, &event); +} + +static void virtio_input_hid_realize(DeviceState *dev, Error **errp) +{ + VirtIOInputHID *vhid = VIRTIO_INPUT_HID(dev); + + vhid->hs = qemu_input_handler_register(dev, vhid->handler); + if (vhid->display && vhid->hs) { + qemu_input_handler_bind(vhid->hs, vhid->display, vhid->head, NULL); + } +} + +static void virtio_input_hid_unrealize(DeviceState *dev) +{ + VirtIOInputHID *vhid = VIRTIO_INPUT_HID(dev); + qemu_input_handler_unregister(vhid->hs); +} + +static void virtio_input_hid_change_active(VirtIOInput *vinput) +{ + VirtIOInputHID *vhid = VIRTIO_INPUT_HID(vinput); + + if (vinput->active) { + qemu_input_handler_activate(vhid->hs); + } else { + qemu_input_handler_deactivate(vhid->hs); + } +} + +static void virtio_input_hid_handle_status(VirtIOInput *vinput, + virtio_input_event *event) +{ + VirtIOInputHID *vhid = VIRTIO_INPUT_HID(vinput); + int ledbit = 0; + + switch (le16_to_cpu(event->type)) { + case EV_LED: + if (event->code == LED_NUML) { + ledbit = QEMU_NUM_LOCK_LED; + } else if (event->code == LED_CAPSL) { + ledbit = QEMU_CAPS_LOCK_LED; + } else if (event->code == LED_SCROLLL) { + ledbit = QEMU_SCROLL_LOCK_LED; + } + if (event->value) { + vhid->ledstate |= ledbit; + } else { + vhid->ledstate &= ~ledbit; + } + kbd_put_ledstate(vhid->ledstate); + break; + default: + fprintf(stderr, "%s: unknown type %d\n", __func__, + le16_to_cpu(event->type)); + break; + } +} + +static Property virtio_input_hid_properties[] = { + DEFINE_PROP_STRING("display", VirtIOInputHID, display), + DEFINE_PROP_UINT32("head", VirtIOInputHID, head, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_input_hid_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOInputClass *vic = VIRTIO_INPUT_CLASS(klass); + + device_class_set_props(dc, virtio_input_hid_properties); + vic->realize = virtio_input_hid_realize; + vic->unrealize = virtio_input_hid_unrealize; + vic->change_active = virtio_input_hid_change_active; + vic->handle_status = virtio_input_hid_handle_status; +} + +static const TypeInfo virtio_input_hid_info = { + .name = TYPE_VIRTIO_INPUT_HID, + .parent = TYPE_VIRTIO_INPUT, + .instance_size = sizeof(VirtIOInputHID), + .class_init = virtio_input_hid_class_init, + .abstract = true, +}; + +/* ----------------------------------------------------------------- */ + +static QemuInputHandler virtio_keyboard_handler = { + .name = VIRTIO_ID_NAME_KEYBOARD, + .mask = INPUT_EVENT_MASK_KEY, + .event = virtio_input_handle_event, + .sync = virtio_input_handle_sync, +}; + +static struct virtio_input_config virtio_keyboard_config[] = { + { + .select = VIRTIO_INPUT_CFG_ID_NAME, + .size = sizeof(VIRTIO_ID_NAME_KEYBOARD), + .u.string = VIRTIO_ID_NAME_KEYBOARD, + },{ + .select = VIRTIO_INPUT_CFG_ID_DEVIDS, + .size = sizeof(struct virtio_input_devids), + .u.ids = { + .bustype = const_le16(BUS_VIRTUAL), + .vendor = const_le16(0x0627), /* same we use for usb hid devices */ + .product = const_le16(0x0001), + .version = const_le16(0x0001), + }, + },{ + .select = VIRTIO_INPUT_CFG_EV_BITS, + .subsel = EV_REP, + .size = 1, + },{ + .select = VIRTIO_INPUT_CFG_EV_BITS, + .subsel = EV_LED, + .size = 1, + .u.bitmap = { + (1 << LED_NUML) | (1 << LED_CAPSL) | (1 << LED_SCROLLL), + }, + }, + { /* end of list */ }, +}; + +static void virtio_keyboard_init(Object *obj) +{ + VirtIOInputHID *vhid = VIRTIO_INPUT_HID(obj); + VirtIOInput *vinput = VIRTIO_INPUT(obj); + + vhid->handler = &virtio_keyboard_handler; + virtio_input_init_config(vinput, virtio_keyboard_config); + virtio_input_key_config(vinput, qemu_input_map_qcode_to_linux, + qemu_input_map_qcode_to_linux_len); +} + +static const TypeInfo virtio_keyboard_info = { + .name = TYPE_VIRTIO_KEYBOARD, + .parent = TYPE_VIRTIO_INPUT_HID, + .instance_size = sizeof(VirtIOInputHID), + .instance_init = virtio_keyboard_init, +}; + +/* ----------------------------------------------------------------- */ + +static QemuInputHandler virtio_mouse_handler = { + .name = VIRTIO_ID_NAME_MOUSE, + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL, + .event = virtio_input_handle_event, + .sync = virtio_input_handle_sync, +}; + +static struct virtio_input_config virtio_mouse_config_v1[] = { + { + .select = VIRTIO_INPUT_CFG_ID_NAME, + .size = sizeof(VIRTIO_ID_NAME_MOUSE), + .u.string = VIRTIO_ID_NAME_MOUSE, + },{ + .select = VIRTIO_INPUT_CFG_ID_DEVIDS, + .size = sizeof(struct virtio_input_devids), + .u.ids = { + .bustype = const_le16(BUS_VIRTUAL), + .vendor = const_le16(0x0627), /* same we use for usb hid devices */ + .product = const_le16(0x0002), + .version = const_le16(0x0001), + }, + },{ + .select = VIRTIO_INPUT_CFG_EV_BITS, + .subsel = EV_REL, + .size = 1, + .u.bitmap = { + (1 << REL_X) | (1 << REL_Y), + }, + }, + { /* end of list */ }, +}; + +static struct virtio_input_config virtio_mouse_config_v2[] = { + { + .select = VIRTIO_INPUT_CFG_ID_NAME, + .size = sizeof(VIRTIO_ID_NAME_MOUSE), + .u.string = VIRTIO_ID_NAME_MOUSE, + },{ + .select = VIRTIO_INPUT_CFG_ID_DEVIDS, + .size = sizeof(struct virtio_input_devids), + .u.ids = { + .bustype = const_le16(BUS_VIRTUAL), + .vendor = const_le16(0x0627), /* same we use for usb hid devices */ + .product = const_le16(0x0002), + .version = const_le16(0x0002), + }, + },{ + .select = VIRTIO_INPUT_CFG_EV_BITS, + .subsel = EV_REL, + .size = 2, + .u.bitmap = { + (1 << REL_X) | (1 << REL_Y), + (1 << (REL_WHEEL - 8)) + }, + }, + { /* end of list */ }, +}; + +static Property virtio_mouse_properties[] = { + DEFINE_PROP_BOOL("wheel-axis", VirtIOInputHID, wheel_axis, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_mouse_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_mouse_properties); +} + +static void virtio_mouse_init(Object *obj) +{ + VirtIOInputHID *vhid = VIRTIO_INPUT_HID(obj); + VirtIOInput *vinput = VIRTIO_INPUT(obj); + + vhid->handler = &virtio_mouse_handler; + virtio_input_init_config(vinput, vhid->wheel_axis + ? virtio_mouse_config_v2 + : virtio_mouse_config_v1); + virtio_input_key_config(vinput, keymap_button, + ARRAY_SIZE(keymap_button)); +} + +static const TypeInfo virtio_mouse_info = { + .name = TYPE_VIRTIO_MOUSE, + .parent = TYPE_VIRTIO_INPUT_HID, + .instance_size = sizeof(VirtIOInputHID), + .instance_init = virtio_mouse_init, + .class_init = virtio_mouse_class_init, +}; + +/* ----------------------------------------------------------------- */ + +static QemuInputHandler virtio_tablet_handler = { + .name = VIRTIO_ID_NAME_TABLET, + .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_ABS, + .event = virtio_input_handle_event, + .sync = virtio_input_handle_sync, +}; + +static struct virtio_input_config virtio_tablet_config_v1[] = { + { + .select = VIRTIO_INPUT_CFG_ID_NAME, + .size = sizeof(VIRTIO_ID_NAME_TABLET), + .u.string = VIRTIO_ID_NAME_TABLET, + },{ + .select = VIRTIO_INPUT_CFG_ID_DEVIDS, + .size = sizeof(struct virtio_input_devids), + .u.ids = { + .bustype = const_le16(BUS_VIRTUAL), + .vendor = const_le16(0x0627), /* same we use for usb hid devices */ + .product = const_le16(0x0003), + .version = const_le16(0x0001), + }, + },{ + .select = VIRTIO_INPUT_CFG_EV_BITS, + .subsel = EV_ABS, + .size = 1, + .u.bitmap = { + (1 << ABS_X) | (1 << ABS_Y), + }, + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_X, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_ABS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_ABS_MAX), + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_Y, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_ABS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_ABS_MAX), + }, + { /* end of list */ }, +}; + +static struct virtio_input_config virtio_tablet_config_v2[] = { + { + .select = VIRTIO_INPUT_CFG_ID_NAME, + .size = sizeof(VIRTIO_ID_NAME_TABLET), + .u.string = VIRTIO_ID_NAME_TABLET, + },{ + .select = VIRTIO_INPUT_CFG_ID_DEVIDS, + .size = sizeof(struct virtio_input_devids), + .u.ids = { + .bustype = const_le16(BUS_VIRTUAL), + .vendor = const_le16(0x0627), /* same we use for usb hid devices */ + .product = const_le16(0x0003), + .version = const_le16(0x0002), + }, + },{ + .select = VIRTIO_INPUT_CFG_EV_BITS, + .subsel = EV_ABS, + .size = 1, + .u.bitmap = { + (1 << ABS_X) | (1 << ABS_Y), + }, + },{ + .select = VIRTIO_INPUT_CFG_EV_BITS, + .subsel = EV_REL, + .size = 2, + .u.bitmap = { + 0, + (1 << (REL_WHEEL - 8)) + }, + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_X, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_ABS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_ABS_MAX), + },{ + .select = VIRTIO_INPUT_CFG_ABS_INFO, + .subsel = ABS_Y, + .size = sizeof(virtio_input_absinfo), + .u.abs.min = const_le32(INPUT_EVENT_ABS_MIN), + .u.abs.max = const_le32(INPUT_EVENT_ABS_MAX), + }, + { /* end of list */ }, +}; + +static Property virtio_tablet_properties[] = { + DEFINE_PROP_BOOL("wheel-axis", VirtIOInputHID, wheel_axis, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_tablet_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_tablet_properties); +} + +static void virtio_tablet_init(Object *obj) +{ + VirtIOInputHID *vhid = VIRTIO_INPUT_HID(obj); + VirtIOInput *vinput = VIRTIO_INPUT(obj); + + vhid->handler = &virtio_tablet_handler; + virtio_input_init_config(vinput, vhid->wheel_axis + ? virtio_tablet_config_v2 + : virtio_tablet_config_v1); + virtio_input_key_config(vinput, keymap_button, + ARRAY_SIZE(keymap_button)); +} + +static const TypeInfo virtio_tablet_info = { + .name = TYPE_VIRTIO_TABLET, + .parent = TYPE_VIRTIO_INPUT_HID, + .instance_size = sizeof(VirtIOInputHID), + .instance_init = virtio_tablet_init, + .class_init = virtio_tablet_class_init, +}; + +/* ----------------------------------------------------------------- */ + +static void virtio_register_types(void) +{ + type_register_static(&virtio_input_hid_info); + type_register_static(&virtio_keyboard_info); + type_register_static(&virtio_mouse_info); + type_register_static(&virtio_tablet_info); +} + +type_init(virtio_register_types) diff --git a/hw/input/virtio-input-host.c b/hw/input/virtio-input-host.c new file mode 100644 index 000000000..137efba57 --- /dev/null +++ b/hw/input/virtio-input-host.c @@ -0,0 +1,260 @@ +/* + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/sockets.h" + +#include "hw/virtio/virtio.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-input.h" + +#include +#include "standard-headers/linux/input.h" + +/* ----------------------------------------------------------------- */ + +static struct virtio_input_config virtio_input_host_config[] = { + { /* empty list */ }, +}; + +static void virtio_input_host_event(void *opaque) +{ + VirtIOInputHost *vih = opaque; + VirtIOInput *vinput = VIRTIO_INPUT(vih); + struct virtio_input_event virtio; + struct input_event evdev; + int rc; + + for (;;) { + rc = read(vih->fd, &evdev, sizeof(evdev)); + if (rc != sizeof(evdev)) { + break; + } + + virtio.type = cpu_to_le16(evdev.type); + virtio.code = cpu_to_le16(evdev.code); + virtio.value = cpu_to_le32(evdev.value); + virtio_input_send(vinput, &virtio); + } +} + +static void virtio_input_bits_config(VirtIOInputHost *vih, + int type, int count) +{ + virtio_input_config bits; + int rc, i, size = 0; + + memset(&bits, 0, sizeof(bits)); + rc = ioctl(vih->fd, EVIOCGBIT(type, count/8), bits.u.bitmap); + if (rc < 0) { + return; + } + + for (i = 0; i < count/8; i++) { + if (bits.u.bitmap[i]) { + size = i+1; + } + } + if (size == 0) { + return; + } + + bits.select = VIRTIO_INPUT_CFG_EV_BITS; + bits.subsel = type; + bits.size = size; + virtio_input_add_config(VIRTIO_INPUT(vih), &bits); +} + +static void virtio_input_abs_config(VirtIOInputHost *vih, int axis) +{ + virtio_input_config config; + struct input_absinfo absinfo; + int rc; + + rc = ioctl(vih->fd, EVIOCGABS(axis), &absinfo); + if (rc < 0) { + return; + } + + memset(&config, 0, sizeof(config)); + config.select = VIRTIO_INPUT_CFG_ABS_INFO; + config.subsel = axis; + config.size = sizeof(virtio_input_absinfo); + + config.u.abs.min = cpu_to_le32(absinfo.minimum); + config.u.abs.max = cpu_to_le32(absinfo.maximum); + config.u.abs.fuzz = cpu_to_le32(absinfo.fuzz); + config.u.abs.flat = cpu_to_le32(absinfo.flat); + config.u.abs.res = cpu_to_le32(absinfo.resolution); + + virtio_input_add_config(VIRTIO_INPUT(vih), &config); +} + +static void virtio_input_host_realize(DeviceState *dev, Error **errp) +{ + VirtIOInputHost *vih = VIRTIO_INPUT_HOST(dev); + VirtIOInput *vinput = VIRTIO_INPUT(dev); + virtio_input_config id, *abs; + struct input_id ids; + int rc, ver, i, axis; + uint8_t byte; + + if (!vih->evdev) { + error_setg(errp, "evdev property is required"); + return; + } + + vih->fd = open(vih->evdev, O_RDWR); + if (vih->fd < 0) { + error_setg_file_open(errp, errno, vih->evdev); + return; + } + qemu_set_nonblock(vih->fd); + + rc = ioctl(vih->fd, EVIOCGVERSION, &ver); + if (rc < 0) { + error_setg(errp, "%s: is not an evdev device", vih->evdev); + goto err_close; + } + + rc = ioctl(vih->fd, EVIOCGRAB, 1); + if (rc < 0) { + error_setg_errno(errp, errno, "%s: failed to get exclusive access", + vih->evdev); + goto err_close; + } + + memset(&id, 0, sizeof(id)); + ioctl(vih->fd, EVIOCGNAME(sizeof(id.u.string)-1), id.u.string); + id.select = VIRTIO_INPUT_CFG_ID_NAME; + id.size = strlen(id.u.string); + virtio_input_add_config(vinput, &id); + + if (ioctl(vih->fd, EVIOCGID, &ids) == 0) { + memset(&id, 0, sizeof(id)); + id.select = VIRTIO_INPUT_CFG_ID_DEVIDS; + id.size = sizeof(struct virtio_input_devids); + id.u.ids.bustype = cpu_to_le16(ids.bustype); + id.u.ids.vendor = cpu_to_le16(ids.vendor); + id.u.ids.product = cpu_to_le16(ids.product); + id.u.ids.version = cpu_to_le16(ids.version); + virtio_input_add_config(vinput, &id); + } + + virtio_input_bits_config(vih, EV_KEY, KEY_CNT); + virtio_input_bits_config(vih, EV_REL, REL_CNT); + virtio_input_bits_config(vih, EV_ABS, ABS_CNT); + virtio_input_bits_config(vih, EV_MSC, MSC_CNT); + virtio_input_bits_config(vih, EV_SW, SW_CNT); + virtio_input_bits_config(vih, EV_LED, LED_CNT); + + abs = virtio_input_find_config(VIRTIO_INPUT(vih), + VIRTIO_INPUT_CFG_EV_BITS, EV_ABS); + if (abs) { + for (i = 0; i < abs->size; i++) { + byte = abs->u.bitmap[i]; + axis = 8 * i; + while (byte) { + if (byte & 1) { + virtio_input_abs_config(vih, axis); + } + axis++; + byte >>= 1; + } + } + } + + qemu_set_fd_handler(vih->fd, virtio_input_host_event, NULL, vih); + return; + +err_close: + close(vih->fd); + vih->fd = -1; + return; +} + +static void virtio_input_host_unrealize(DeviceState *dev) +{ + VirtIOInputHost *vih = VIRTIO_INPUT_HOST(dev); + + if (vih->fd > 0) { + qemu_set_fd_handler(vih->fd, NULL, NULL, NULL); + close(vih->fd); + } +} + +static void virtio_input_host_handle_status(VirtIOInput *vinput, + virtio_input_event *event) +{ + VirtIOInputHost *vih = VIRTIO_INPUT_HOST(vinput); + struct input_event evdev; + struct timeval tval; + int rc; + + if (gettimeofday(&tval, NULL)) { + perror("virtio_input_host_handle_status: gettimeofday"); + return; + } + + evdev.input_event_sec = tval.tv_sec; + evdev.input_event_usec = tval.tv_usec; + evdev.type = le16_to_cpu(event->type); + evdev.code = le16_to_cpu(event->code); + evdev.value = le32_to_cpu(event->value); + + rc = write(vih->fd, &evdev, sizeof(evdev)); + if (rc == -1) { + perror("virtio_input_host_handle_status: write"); + } +} + +static const VMStateDescription vmstate_virtio_input_host = { + .name = "virtio-input-host", + .unmigratable = 1, +}; + +static Property virtio_input_host_properties[] = { + DEFINE_PROP_STRING("evdev", VirtIOInputHost, evdev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_input_host_class_init(ObjectClass *klass, void *data) +{ + VirtIOInputClass *vic = VIRTIO_INPUT_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_virtio_input_host; + device_class_set_props(dc, virtio_input_host_properties); + vic->realize = virtio_input_host_realize; + vic->unrealize = virtio_input_host_unrealize; + vic->handle_status = virtio_input_host_handle_status; +} + +static void virtio_input_host_init(Object *obj) +{ + VirtIOInput *vinput = VIRTIO_INPUT(obj); + + virtio_input_init_config(vinput, virtio_input_host_config); +} + +static const TypeInfo virtio_input_host_info = { + .name = TYPE_VIRTIO_INPUT_HOST, + .parent = TYPE_VIRTIO_INPUT, + .instance_size = sizeof(VirtIOInputHost), + .instance_init = virtio_input_host_init, + .class_init = virtio_input_host_class_init, +}; + +/* ----------------------------------------------------------------- */ + +static void virtio_register_types(void) +{ + type_register_static(&virtio_input_host_info); +} + +type_init(virtio_register_types) diff --git a/hw/input/virtio-input.c b/hw/input/virtio-input.c new file mode 100644 index 000000000..54bcb46c7 --- /dev/null +++ b/hw/input/virtio-input.c @@ -0,0 +1,343 @@ +/* + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "trace.h" + +#include "hw/virtio/virtio.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-input.h" + +#include "standard-headers/linux/input.h" + +#define VIRTIO_INPUT_VM_VERSION 1 + +/* ----------------------------------------------------------------- */ + +void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event) +{ + VirtQueueElement *elem; + int i, len; + + if (!vinput->active) { + return; + } + + /* queue up events ... */ + if (vinput->qindex == vinput->qsize) { + vinput->qsize++; + vinput->queue = g_realloc(vinput->queue, vinput->qsize * + sizeof(vinput->queue[0])); + } + vinput->queue[vinput->qindex++].event = *event; + + /* ... until we see a report sync ... */ + if (event->type != cpu_to_le16(EV_SYN) || + event->code != cpu_to_le16(SYN_REPORT)) { + return; + } + + /* ... then check available space ... */ + for (i = 0; i < vinput->qindex; i++) { + elem = virtqueue_pop(vinput->evt, sizeof(VirtQueueElement)); + if (!elem) { + while (--i >= 0) { + virtqueue_unpop(vinput->evt, vinput->queue[i].elem, 0); + } + vinput->qindex = 0; + trace_virtio_input_queue_full(); + return; + } + vinput->queue[i].elem = elem; + } + + /* ... and finally pass them to the guest */ + for (i = 0; i < vinput->qindex; i++) { + elem = vinput->queue[i].elem; + len = iov_from_buf(elem->in_sg, elem->in_num, + 0, &vinput->queue[i].event, sizeof(virtio_input_event)); + virtqueue_push(vinput->evt, elem, len); + g_free(elem); + } + virtio_notify(VIRTIO_DEVICE(vinput), vinput->evt); + vinput->qindex = 0; +} + +static void virtio_input_handle_evt(VirtIODevice *vdev, VirtQueue *vq) +{ + /* nothing */ +} + +static void virtio_input_handle_sts(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(vdev); + VirtIOInput *vinput = VIRTIO_INPUT(vdev); + virtio_input_event event; + VirtQueueElement *elem; + int len; + + for (;;) { + elem = virtqueue_pop(vinput->sts, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + + memset(&event, 0, sizeof(event)); + len = iov_to_buf(elem->out_sg, elem->out_num, + 0, &event, sizeof(event)); + if (vic->handle_status) { + vic->handle_status(vinput, &event); + } + virtqueue_push(vinput->sts, elem, len); + g_free(elem); + } + virtio_notify(vdev, vinput->sts); +} + +virtio_input_config *virtio_input_find_config(VirtIOInput *vinput, + uint8_t select, + uint8_t subsel) +{ + VirtIOInputConfig *cfg; + + QTAILQ_FOREACH(cfg, &vinput->cfg_list, node) { + if (select == cfg->config.select && + subsel == cfg->config.subsel) { + return &cfg->config; + } + } + return NULL; +} + +void virtio_input_add_config(VirtIOInput *vinput, + virtio_input_config *config) +{ + VirtIOInputConfig *cfg; + + if (virtio_input_find_config(vinput, config->select, config->subsel)) { + /* should not happen */ + fprintf(stderr, "%s: duplicate config: %d/%d\n", + __func__, config->select, config->subsel); + abort(); + } + + cfg = g_new0(VirtIOInputConfig, 1); + cfg->config = *config; + QTAILQ_INSERT_TAIL(&vinput->cfg_list, cfg, node); +} + +void virtio_input_init_config(VirtIOInput *vinput, + virtio_input_config *config) +{ + int i = 0; + + QTAILQ_INIT(&vinput->cfg_list); + while (config[i].select) { + virtio_input_add_config(vinput, config + i); + i++; + } +} + +void virtio_input_idstr_config(VirtIOInput *vinput, + uint8_t select, const char *string) +{ + virtio_input_config id; + + if (!string) { + return; + } + memset(&id, 0, sizeof(id)); + id.select = select; + id.size = snprintf(id.u.string, sizeof(id.u.string), "%s", string); + virtio_input_add_config(vinput, &id); +} + +static void virtio_input_get_config(VirtIODevice *vdev, uint8_t *config_data) +{ + VirtIOInput *vinput = VIRTIO_INPUT(vdev); + virtio_input_config *config; + + config = virtio_input_find_config(vinput, vinput->cfg_select, + vinput->cfg_subsel); + if (config) { + memcpy(config_data, config, vinput->cfg_size); + } else { + memset(config_data, 0, vinput->cfg_size); + } +} + +static void virtio_input_set_config(VirtIODevice *vdev, + const uint8_t *config_data) +{ + VirtIOInput *vinput = VIRTIO_INPUT(vdev); + virtio_input_config *config = (virtio_input_config *)config_data; + + vinput->cfg_select = config->select; + vinput->cfg_subsel = config->subsel; + virtio_notify_config(vdev); +} + +static uint64_t virtio_input_get_features(VirtIODevice *vdev, uint64_t f, + Error **errp) +{ + return f; +} + +static void virtio_input_set_status(VirtIODevice *vdev, uint8_t val) +{ + VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(vdev); + VirtIOInput *vinput = VIRTIO_INPUT(vdev); + + if (val & VIRTIO_CONFIG_S_DRIVER_OK) { + if (!vinput->active) { + vinput->active = true; + if (vic->change_active) { + vic->change_active(vinput); + } + } + } +} + +static void virtio_input_reset(VirtIODevice *vdev) +{ + VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(vdev); + VirtIOInput *vinput = VIRTIO_INPUT(vdev); + + if (vinput->active) { + vinput->active = false; + if (vic->change_active) { + vic->change_active(vinput); + } + } +} + +static int virtio_input_post_load(void *opaque, int version_id) +{ + VirtIOInput *vinput = opaque; + VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(vinput); + VirtIODevice *vdev = VIRTIO_DEVICE(vinput); + + vinput->active = vdev->status & VIRTIO_CONFIG_S_DRIVER_OK; + if (vic->change_active) { + vic->change_active(vinput); + } + return 0; +} + +static void virtio_input_device_realize(DeviceState *dev, Error **errp) +{ + VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(dev); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOInput *vinput = VIRTIO_INPUT(dev); + VirtIOInputConfig *cfg; + Error *local_err = NULL; + + if (vic->realize) { + vic->realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } + + virtio_input_idstr_config(vinput, VIRTIO_INPUT_CFG_ID_SERIAL, + vinput->serial); + + QTAILQ_FOREACH(cfg, &vinput->cfg_list, node) { + if (vinput->cfg_size < cfg->config.size) { + vinput->cfg_size = cfg->config.size; + } + } + vinput->cfg_size += 8; + assert(vinput->cfg_size <= sizeof(virtio_input_config)); + + virtio_init(vdev, "virtio-input", VIRTIO_ID_INPUT, + vinput->cfg_size); + vinput->evt = virtio_add_queue(vdev, 64, virtio_input_handle_evt); + vinput->sts = virtio_add_queue(vdev, 64, virtio_input_handle_sts); +} + +static void virtio_input_finalize(Object *obj) +{ + VirtIOInput *vinput = VIRTIO_INPUT(obj); + VirtIOInputConfig *cfg, *next; + + QTAILQ_FOREACH_SAFE(cfg, &vinput->cfg_list, node, next) { + QTAILQ_REMOVE(&vinput->cfg_list, cfg, node); + g_free(cfg); + } + + g_free(vinput->queue); +} + +static void virtio_input_device_unrealize(DeviceState *dev) +{ + VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(dev); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOInput *vinput = VIRTIO_INPUT(dev); + + if (vic->unrealize) { + vic->unrealize(dev); + } + virtio_delete_queue(vinput->evt); + virtio_delete_queue(vinput->sts); + virtio_cleanup(vdev); +} + +static const VMStateDescription vmstate_virtio_input = { + .name = "virtio-input", + .minimum_version_id = VIRTIO_INPUT_VM_VERSION, + .version_id = VIRTIO_INPUT_VM_VERSION, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, + .post_load = virtio_input_post_load, +}; + +static Property virtio_input_properties[] = { + DEFINE_PROP_STRING("serial", VirtIOInput, serial), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_input_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_input_properties); + dc->vmsd = &vmstate_virtio_input; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + vdc->realize = virtio_input_device_realize; + vdc->unrealize = virtio_input_device_unrealize; + vdc->get_config = virtio_input_get_config; + vdc->set_config = virtio_input_set_config; + vdc->get_features = virtio_input_get_features; + vdc->set_status = virtio_input_set_status; + vdc->reset = virtio_input_reset; +} + +static const TypeInfo virtio_input_info = { + .name = TYPE_VIRTIO_INPUT, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOInput), + .class_size = sizeof(VirtIOInputClass), + .class_init = virtio_input_class_init, + .abstract = true, + .instance_finalize = virtio_input_finalize, +}; + +/* ----------------------------------------------------------------- */ + +static void virtio_register_types(void) +{ + type_register_static(&virtio_input_info); +} + +type_init(virtio_register_types) diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig new file mode 100644 index 000000000..78aed93c4 --- /dev/null +++ b/hw/intc/Kconfig @@ -0,0 +1,75 @@ +config HEATHROW_PIC + bool + +config I8259 + bool + select ISA_BUS + +config PL190 + bool + +config IOAPIC + bool + select I8259 + +config ARM_GIC + bool + select MSI_NONBROKEN + +config OPENPIC + bool + select MSI_NONBROKEN + +config APIC + bool + select MSI_NONBROKEN + select I8259 + +config ARM_GIC_KVM + bool + default y + depends on ARM_GIC && KVM + +config XICS + bool + +config XIVE + bool + +config ALLWINNER_A10_PIC + bool + +config S390_FLIC + bool + +config S390_FLIC_KVM + bool + default y + depends on S390_FLIC && KVM + +config OMPIC + bool + +config PPC_UIC + bool + +config SH_INTC + bool + +config RX_ICU + bool + +config LOONGSON_LIOINTC + bool + +config RISCV_ACLINT + bool + +config SIFIVE_PLIC + bool + +config GOLDFISH_PIC + bool + +config M68K_IRQC + bool diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c new file mode 100644 index 000000000..8cca12480 --- /dev/null +++ b/hw/intc/allwinner-a10-pic.c @@ -0,0 +1,215 @@ +/* + * Allwinner A10 interrupt controller device emulation + * + * Copyright (C) 2013 Li Guang + * Written by Li Guang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/intc/allwinner-a10-pic.h" +#include "hw/irq.h" +#include "qemu/log.h" +#include "qemu/module.h" + +static void aw_a10_pic_update(AwA10PICState *s) +{ + uint8_t i; + int irq = 0, fiq = 0, zeroes; + + s->vector = 0; + + for (i = 0; i < AW_A10_PIC_REG_NUM; i++) { + irq |= s->irq_pending[i] & ~s->mask[i]; + fiq |= s->select[i] & s->irq_pending[i] & ~s->mask[i]; + + if (!s->vector) { + zeroes = ctz32(s->irq_pending[i] & ~s->mask[i]); + if (zeroes != 32) { + s->vector = (i * 32 + zeroes) * 4; + } + } + } + + qemu_set_irq(s->parent_irq, !!irq); + qemu_set_irq(s->parent_fiq, !!fiq); +} + +static void aw_a10_pic_set_irq(void *opaque, int irq, int level) +{ + AwA10PICState *s = opaque; + + if (level) { + set_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); + } else { + clear_bit(irq % 32, (void *)&s->irq_pending[irq / 32]); + } + aw_a10_pic_update(s); +} + +static uint64_t aw_a10_pic_read(void *opaque, hwaddr offset, unsigned size) +{ + AwA10PICState *s = opaque; + uint8_t index = (offset & 0xc) / 4; + + switch (offset) { + case AW_A10_PIC_VECTOR: + return s->vector; + case AW_A10_PIC_BASE_ADDR: + return s->base_addr; + case AW_A10_PIC_PROTECT: + return s->protect; + case AW_A10_PIC_NMI: + return s->nmi; + case AW_A10_PIC_IRQ_PENDING ... AW_A10_PIC_IRQ_PENDING + 8: + return s->irq_pending[index]; + case AW_A10_PIC_FIQ_PENDING ... AW_A10_PIC_FIQ_PENDING + 8: + return s->fiq_pending[index]; + case AW_A10_PIC_SELECT ... AW_A10_PIC_SELECT + 8: + return s->select[index]; + case AW_A10_PIC_ENABLE ... AW_A10_PIC_ENABLE + 8: + return s->enable[index]; + case AW_A10_PIC_MASK ... AW_A10_PIC_MASK + 8: + return s->mask[index]; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%x\n", __func__, (int)offset); + break; + } + + return 0; +} + +static void aw_a10_pic_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + AwA10PICState *s = opaque; + uint8_t index = (offset & 0xc) / 4; + + switch (offset) { + case AW_A10_PIC_BASE_ADDR: + s->base_addr = value & ~0x3; + break; + case AW_A10_PIC_PROTECT: + s->protect = value; + break; + case AW_A10_PIC_NMI: + s->nmi = value; + break; + case AW_A10_PIC_IRQ_PENDING ... AW_A10_PIC_IRQ_PENDING + 8: + /* + * The register is read-only; nevertheless, Linux (including + * the version originally shipped by Allwinner) pretends to + * write to the register. Just ignore it. + */ + break; + case AW_A10_PIC_FIQ_PENDING ... AW_A10_PIC_FIQ_PENDING + 8: + s->fiq_pending[index] &= ~value; + break; + case AW_A10_PIC_SELECT ... AW_A10_PIC_SELECT + 8: + s->select[index] = value; + break; + case AW_A10_PIC_ENABLE ... AW_A10_PIC_ENABLE + 8: + s->enable[index] = value; + break; + case AW_A10_PIC_MASK ... AW_A10_PIC_MASK + 8: + s->mask[index] = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%x\n", __func__, (int)offset); + break; + } + + aw_a10_pic_update(s); +} + +static const MemoryRegionOps aw_a10_pic_ops = { + .read = aw_a10_pic_read, + .write = aw_a10_pic_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_aw_a10_pic = { + .name = "a10.pic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(vector, AwA10PICState), + VMSTATE_UINT32(base_addr, AwA10PICState), + VMSTATE_UINT32(protect, AwA10PICState), + VMSTATE_UINT32(nmi, AwA10PICState), + VMSTATE_UINT32_ARRAY(irq_pending, AwA10PICState, AW_A10_PIC_REG_NUM), + VMSTATE_UINT32_ARRAY(fiq_pending, AwA10PICState, AW_A10_PIC_REG_NUM), + VMSTATE_UINT32_ARRAY(enable, AwA10PICState, AW_A10_PIC_REG_NUM), + VMSTATE_UINT32_ARRAY(select, AwA10PICState, AW_A10_PIC_REG_NUM), + VMSTATE_UINT32_ARRAY(mask, AwA10PICState, AW_A10_PIC_REG_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void aw_a10_pic_init(Object *obj) +{ + AwA10PICState *s = AW_A10_PIC(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + qdev_init_gpio_in(DEVICE(dev), aw_a10_pic_set_irq, AW_A10_PIC_INT_NR); + sysbus_init_irq(dev, &s->parent_irq); + sysbus_init_irq(dev, &s->parent_fiq); + memory_region_init_io(&s->iomem, OBJECT(s), &aw_a10_pic_ops, s, + TYPE_AW_A10_PIC, 0x400); + sysbus_init_mmio(dev, &s->iomem); +} + +static void aw_a10_pic_reset(DeviceState *d) +{ + AwA10PICState *s = AW_A10_PIC(d); + uint8_t i; + + s->base_addr = 0; + s->protect = 0; + s->nmi = 0; + s->vector = 0; + for (i = 0; i < AW_A10_PIC_REG_NUM; i++) { + s->irq_pending[i] = 0; + s->fiq_pending[i] = 0; + s->select[i] = 0; + s->enable[i] = 0; + s->mask[i] = 0; + } +} + +static void aw_a10_pic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = aw_a10_pic_reset; + dc->desc = "allwinner a10 pic"; + dc->vmsd = &vmstate_aw_a10_pic; + } + +static const TypeInfo aw_a10_pic_info = { + .name = TYPE_AW_A10_PIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AwA10PICState), + .instance_init = aw_a10_pic_init, + .class_init = aw_a10_pic_class_init, +}; + +static void aw_a10_register_types(void) +{ + type_register_static(&aw_a10_pic_info); +} + +type_init(aw_a10_register_types); diff --git a/hw/intc/apic.c b/hw/intc/apic.c new file mode 100644 index 000000000..3df11c34d --- /dev/null +++ b/hw/intc/apic.c @@ -0,0 +1,928 @@ +/* + * APIC support + * + * Copyright (c) 2004-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ +#include "qemu/osdep.h" +#include "qemu/thread.h" +#include "hw/i386/apic_internal.h" +#include "hw/i386/apic.h" +#include "hw/i386/ioapic.h" +#include "hw/intc/i8259.h" +#include "hw/pci/msi.h" +#include "qemu/host-utils.h" +#include "sysemu/kvm.h" +#include "trace.h" +#include "hw/i386/apic-msidef.h" +#include "qapi/error.h" +#include "qom/object.h" + +#define MAX_APICS 255 +#define MAX_APIC_WORDS 8 + +#define SYNC_FROM_VAPIC 0x1 +#define SYNC_TO_VAPIC 0x2 +#define SYNC_ISR_IRR_TO_VAPIC 0x4 + +static APICCommonState *local_apics[MAX_APICS + 1]; + +#define TYPE_APIC "apic" +/*This is reusing the APICCommonState typedef from APIC_COMMON */ +DECLARE_INSTANCE_CHECKER(APICCommonState, APIC, + TYPE_APIC) + +static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); +static void apic_update_irq(APICCommonState *s); +static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, + uint8_t dest, uint8_t dest_mode); + +/* Find first bit starting from msb */ +static int apic_fls_bit(uint32_t value) +{ + return 31 - clz32(value); +} + +/* Find first bit starting from lsb */ +static int apic_ffs_bit(uint32_t value) +{ + return ctz32(value); +} + +static inline void apic_reset_bit(uint32_t *tab, int index) +{ + int i, mask; + i = index >> 5; + mask = 1 << (index & 0x1f); + tab[i] &= ~mask; +} + +/* return -1 if no bit is set */ +static int get_highest_priority_int(uint32_t *tab) +{ + int i; + for (i = 7; i >= 0; i--) { + if (tab[i] != 0) { + return i * 32 + apic_fls_bit(tab[i]); + } + } + return -1; +} + +static void apic_sync_vapic(APICCommonState *s, int sync_type) +{ + VAPICState vapic_state; + size_t length; + off_t start; + int vector; + + if (!s->vapic_paddr) { + return; + } + if (sync_type & SYNC_FROM_VAPIC) { + cpu_physical_memory_read(s->vapic_paddr, &vapic_state, + sizeof(vapic_state)); + s->tpr = vapic_state.tpr; + } + if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { + start = offsetof(VAPICState, isr); + length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); + + if (sync_type & SYNC_TO_VAPIC) { + assert(qemu_cpu_is_self(CPU(s->cpu))); + + vapic_state.tpr = s->tpr; + vapic_state.enabled = 1; + start = 0; + length = sizeof(VAPICState); + } + + vector = get_highest_priority_int(s->isr); + if (vector < 0) { + vector = 0; + } + vapic_state.isr = vector & 0xf0; + + vapic_state.zero = 0; + + vector = get_highest_priority_int(s->irr); + if (vector < 0) { + vector = 0; + } + vapic_state.irr = vector & 0xff; + + address_space_write_rom(&address_space_memory, + s->vapic_paddr + start, + MEMTXATTRS_UNSPECIFIED, + ((void *)&vapic_state) + start, length); + } +} + +static void apic_vapic_base_update(APICCommonState *s) +{ + apic_sync_vapic(s, SYNC_TO_VAPIC); +} + +static void apic_local_deliver(APICCommonState *s, int vector) +{ + uint32_t lvt = s->lvt[vector]; + int trigger_mode; + + trace_apic_local_deliver(vector, (lvt >> 8) & 7); + + if (lvt & APIC_LVT_MASKED) + return; + + switch ((lvt >> 8) & 7) { + case APIC_DM_SMI: + cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SMI); + break; + + case APIC_DM_NMI: + cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_NMI); + break; + + case APIC_DM_EXTINT: + cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); + break; + + case APIC_DM_FIXED: + trigger_mode = APIC_TRIGGER_EDGE; + if ((vector == APIC_LVT_LINT0 || vector == APIC_LVT_LINT1) && + (lvt & APIC_LVT_LEVEL_TRIGGER)) + trigger_mode = APIC_TRIGGER_LEVEL; + apic_set_irq(s, lvt & 0xff, trigger_mode); + } +} + +void apic_deliver_pic_intr(DeviceState *dev, int level) +{ + APICCommonState *s = APIC(dev); + + if (level) { + apic_local_deliver(s, APIC_LVT_LINT0); + } else { + uint32_t lvt = s->lvt[APIC_LVT_LINT0]; + + switch ((lvt >> 8) & 7) { + case APIC_DM_FIXED: + if (!(lvt & APIC_LVT_LEVEL_TRIGGER)) + break; + apic_reset_bit(s->irr, lvt & 0xff); + /* fall through */ + case APIC_DM_EXTINT: + apic_update_irq(s); + break; + } + } +} + +static void apic_external_nmi(APICCommonState *s) +{ + apic_local_deliver(s, APIC_LVT_LINT1); +} + +#define foreach_apic(apic, deliver_bitmask, code) \ +{\ + int __i, __j;\ + for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ + uint32_t __mask = deliver_bitmask[__i];\ + if (__mask) {\ + for(__j = 0; __j < 32; __j++) {\ + if (__mask & (1U << __j)) {\ + apic = local_apics[__i * 32 + __j];\ + if (apic) {\ + code;\ + }\ + }\ + }\ + }\ + }\ +} + +static void apic_bus_deliver(const uint32_t *deliver_bitmask, + uint8_t delivery_mode, uint8_t vector_num, + uint8_t trigger_mode) +{ + APICCommonState *apic_iter; + + switch (delivery_mode) { + case APIC_DM_LOWPRI: + /* XXX: search for focus processor, arbitration */ + { + int i, d; + d = -1; + for(i = 0; i < MAX_APIC_WORDS; i++) { + if (deliver_bitmask[i]) { + d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); + break; + } + } + if (d >= 0) { + apic_iter = local_apics[d]; + if (apic_iter) { + apic_set_irq(apic_iter, vector_num, trigger_mode); + } + } + } + return; + + case APIC_DM_FIXED: + break; + + case APIC_DM_SMI: + foreach_apic(apic_iter, deliver_bitmask, + cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_SMI) + ); + return; + + case APIC_DM_NMI: + foreach_apic(apic_iter, deliver_bitmask, + cpu_interrupt(CPU(apic_iter->cpu), CPU_INTERRUPT_NMI) + ); + return; + + case APIC_DM_INIT: + /* normal INIT IPI sent to processors */ + foreach_apic(apic_iter, deliver_bitmask, + cpu_interrupt(CPU(apic_iter->cpu), + CPU_INTERRUPT_INIT) + ); + return; + + case APIC_DM_EXTINT: + /* handled in I/O APIC code */ + break; + + default: + return; + } + + foreach_apic(apic_iter, deliver_bitmask, + apic_set_irq(apic_iter, vector_num, trigger_mode) ); +} + +void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, + uint8_t vector_num, uint8_t trigger_mode) +{ + uint32_t deliver_bitmask[MAX_APIC_WORDS]; + + trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, + trigger_mode); + + apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); + apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); +} + +static void apic_set_base(APICCommonState *s, uint64_t val) +{ + s->apicbase = (val & 0xfffff000) | + (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); + /* if disabled, cannot be enabled again */ + if (!(val & MSR_IA32_APICBASE_ENABLE)) { + s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; + cpu_clear_apic_feature(&s->cpu->env); + s->spurious_vec &= ~APIC_SV_ENABLE; + } +} + +static void apic_set_tpr(APICCommonState *s, uint8_t val) +{ + /* Updates from cr8 are ignored while the VAPIC is active */ + if (!s->vapic_paddr) { + s->tpr = val << 4; + apic_update_irq(s); + } +} + +int apic_get_highest_priority_irr(DeviceState *dev) +{ + APICCommonState *s; + + if (!dev) { + /* no interrupts */ + return -1; + } + s = APIC_COMMON(dev); + return get_highest_priority_int(s->irr); +} + +static uint8_t apic_get_tpr(APICCommonState *s) +{ + apic_sync_vapic(s, SYNC_FROM_VAPIC); + return s->tpr >> 4; +} + +int apic_get_ppr(APICCommonState *s) +{ + int tpr, isrv, ppr; + + tpr = (s->tpr >> 4); + isrv = get_highest_priority_int(s->isr); + if (isrv < 0) + isrv = 0; + isrv >>= 4; + if (tpr >= isrv) + ppr = s->tpr; + else + ppr = isrv << 4; + return ppr; +} + +static int apic_get_arb_pri(APICCommonState *s) +{ + /* XXX: arbitration */ + return 0; +} + + +/* + * <0 - low prio interrupt, + * 0 - no interrupt, + * >0 - interrupt number + */ +static int apic_irq_pending(APICCommonState *s) +{ + int irrv, ppr; + + if (!(s->spurious_vec & APIC_SV_ENABLE)) { + return 0; + } + + irrv = get_highest_priority_int(s->irr); + if (irrv < 0) { + return 0; + } + ppr = apic_get_ppr(s); + if (ppr && (irrv & 0xf0) <= (ppr & 0xf0)) { + return -1; + } + + return irrv; +} + +/* signal the CPU if an irq is pending */ +static void apic_update_irq(APICCommonState *s) +{ + CPUState *cpu; + DeviceState *dev = (DeviceState *)s; + + cpu = CPU(s->cpu); + if (!qemu_cpu_is_self(cpu)) { + cpu_interrupt(cpu, CPU_INTERRUPT_POLL); + } else if (apic_irq_pending(s) > 0) { + cpu_interrupt(cpu, CPU_INTERRUPT_HARD); + } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); + } +} + +void apic_poll_irq(DeviceState *dev) +{ + APICCommonState *s = APIC(dev); + + apic_sync_vapic(s, SYNC_FROM_VAPIC); + apic_update_irq(s); +} + +static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode) +{ + apic_report_irq_delivered(!apic_get_bit(s->irr, vector_num)); + + apic_set_bit(s->irr, vector_num); + if (trigger_mode) + apic_set_bit(s->tmr, vector_num); + else + apic_reset_bit(s->tmr, vector_num); + if (s->vapic_paddr) { + apic_sync_vapic(s, SYNC_ISR_IRR_TO_VAPIC); + /* + * The vcpu thread needs to see the new IRR before we pull its current + * TPR value. That way, if we miss a lowering of the TRP, the guest + * has the chance to notice the new IRR and poll for IRQs on its own. + */ + smp_wmb(); + apic_sync_vapic(s, SYNC_FROM_VAPIC); + } + apic_update_irq(s); +} + +static void apic_eoi(APICCommonState *s) +{ + int isrv; + isrv = get_highest_priority_int(s->isr); + if (isrv < 0) + return; + apic_reset_bit(s->isr, isrv); + if (!(s->spurious_vec & APIC_SV_DIRECTED_IO) && apic_get_bit(s->tmr, isrv)) { + ioapic_eoi_broadcast(isrv); + } + apic_sync_vapic(s, SYNC_FROM_VAPIC | SYNC_TO_VAPIC); + apic_update_irq(s); +} + +static int apic_find_dest(uint8_t dest) +{ + APICCommonState *apic = local_apics[dest]; + int i; + + if (apic && apic->id == dest) + return dest; /* shortcut in case apic->id == local_apics[dest]->id */ + + for (i = 0; i < MAX_APICS; i++) { + apic = local_apics[i]; + if (apic && apic->id == dest) + return i; + if (!apic) + break; + } + + return -1; +} + +static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, + uint8_t dest, uint8_t dest_mode) +{ + APICCommonState *apic_iter; + int i; + + if (dest_mode == 0) { + if (dest == 0xff) { + memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t)); + } else { + int idx = apic_find_dest(dest); + memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); + if (idx >= 0) + apic_set_bit(deliver_bitmask, idx); + } + } else { + /* XXX: cluster mode */ + memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); + for(i = 0; i < MAX_APICS; i++) { + apic_iter = local_apics[i]; + if (apic_iter) { + if (apic_iter->dest_mode == 0xf) { + if (dest & apic_iter->log_dest) + apic_set_bit(deliver_bitmask, i); + } else if (apic_iter->dest_mode == 0x0) { + if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) && + (dest & apic_iter->log_dest & 0x0f)) { + apic_set_bit(deliver_bitmask, i); + } + } + } else { + break; + } + } + } +} + +static void apic_startup(APICCommonState *s, int vector_num) +{ + s->sipi_vector = vector_num; + cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); +} + +void apic_sipi(DeviceState *dev) +{ + APICCommonState *s = APIC(dev); + + cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); + + if (!s->wait_for_sipi) + return; + cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector); + s->wait_for_sipi = 0; +} + +static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, + uint8_t delivery_mode, uint8_t vector_num, + uint8_t trigger_mode) +{ + APICCommonState *s = APIC(dev); + uint32_t deliver_bitmask[MAX_APIC_WORDS]; + int dest_shorthand = (s->icr[0] >> 18) & 3; + APICCommonState *apic_iter; + + switch (dest_shorthand) { + case 0: + apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); + break; + case 1: + memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask)); + apic_set_bit(deliver_bitmask, s->id); + break; + case 2: + memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); + break; + case 3: + memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); + apic_reset_bit(deliver_bitmask, s->id); + break; + } + + switch (delivery_mode) { + case APIC_DM_INIT: + { + int trig_mode = (s->icr[0] >> 15) & 1; + int level = (s->icr[0] >> 14) & 1; + if (level == 0 && trig_mode == 1) { + foreach_apic(apic_iter, deliver_bitmask, + apic_iter->arb_id = apic_iter->id ); + return; + } + } + break; + + case APIC_DM_SIPI: + foreach_apic(apic_iter, deliver_bitmask, + apic_startup(apic_iter, vector_num) ); + return; + } + + apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); +} + +static bool apic_check_pic(APICCommonState *s) +{ + DeviceState *dev = (DeviceState *)s; + + if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { + return false; + } + apic_deliver_pic_intr(dev, 1); + return true; +} + +int apic_get_interrupt(DeviceState *dev) +{ + APICCommonState *s = APIC(dev); + int intno; + + /* if the APIC is installed or enabled, we let the 8259 handle the + IRQs */ + if (!s) + return -1; + if (!(s->spurious_vec & APIC_SV_ENABLE)) + return -1; + + apic_sync_vapic(s, SYNC_FROM_VAPIC); + intno = apic_irq_pending(s); + + /* if there is an interrupt from the 8259, let the caller handle + * that first since ExtINT interrupts ignore the priority. + */ + if (intno == 0 || apic_check_pic(s)) { + apic_sync_vapic(s, SYNC_TO_VAPIC); + return -1; + } else if (intno < 0) { + apic_sync_vapic(s, SYNC_TO_VAPIC); + return s->spurious_vec & 0xff; + } + apic_reset_bit(s->irr, intno); + apic_set_bit(s->isr, intno); + apic_sync_vapic(s, SYNC_TO_VAPIC); + + apic_update_irq(s); + + return intno; +} + +int apic_accept_pic_intr(DeviceState *dev) +{ + APICCommonState *s = APIC(dev); + uint32_t lvt0; + + if (!s) + return -1; + + lvt0 = s->lvt[APIC_LVT_LINT0]; + + if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) == 0 || + (lvt0 & APIC_LVT_MASKED) == 0) + return isa_pic != NULL; + + return 0; +} + +static void apic_timer_update(APICCommonState *s, int64_t current_time) +{ + if (apic_next_timer(s, current_time)) { + timer_mod(s->timer, s->next_time); + } else { + timer_del(s->timer); + } +} + +static void apic_timer(void *opaque) +{ + APICCommonState *s = opaque; + + apic_local_deliver(s, APIC_LVT_TIMER); + apic_timer_update(s, s->next_time); +} + +static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + DeviceState *dev; + APICCommonState *s; + uint32_t val; + int index; + + if (size < 4) { + return 0; + } + + dev = cpu_get_current_apic(); + if (!dev) { + return 0; + } + s = APIC(dev); + + index = (addr >> 4) & 0xff; + switch(index) { + case 0x02: /* id */ + val = s->id << 24; + break; + case 0x03: /* version */ + val = s->version | ((APIC_LVT_NB - 1) << 16); + break; + case 0x08: + apic_sync_vapic(s, SYNC_FROM_VAPIC); + if (apic_report_tpr_access) { + cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_READ); + } + val = s->tpr; + break; + case 0x09: + val = apic_get_arb_pri(s); + break; + case 0x0a: + /* ppr */ + val = apic_get_ppr(s); + break; + case 0x0b: + val = 0; + break; + case 0x0d: + val = s->log_dest << 24; + break; + case 0x0e: + val = (s->dest_mode << 28) | 0xfffffff; + break; + case 0x0f: + val = s->spurious_vec; + break; + case 0x10 ... 0x17: + val = s->isr[index & 7]; + break; + case 0x18 ... 0x1f: + val = s->tmr[index & 7]; + break; + case 0x20 ... 0x27: + val = s->irr[index & 7]; + break; + case 0x28: + val = s->esr; + break; + case 0x30: + case 0x31: + val = s->icr[index & 1]; + break; + case 0x32 ... 0x37: + val = s->lvt[index - 0x32]; + break; + case 0x38: + val = s->initial_count; + break; + case 0x39: + val = apic_get_current_count(s); + break; + case 0x3e: + val = s->divide_conf; + break; + default: + s->esr |= APIC_ESR_ILLEGAL_ADDRESS; + val = 0; + break; + } + trace_apic_mem_readl(addr, val); + return val; +} + +static void apic_send_msi(MSIMessage *msi) +{ + uint64_t addr = msi->address; + uint32_t data = msi->data; + uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; + uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; + uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; + uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; + uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; + /* XXX: Ignore redirection hint. */ + apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); +} + +static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + DeviceState *dev; + APICCommonState *s; + int index = (addr >> 4) & 0xff; + + if (size < 4) { + return; + } + + if (addr > 0xfff || !index) { + /* MSI and MMIO APIC are at the same memory location, + * but actually not on the global bus: MSI is on PCI bus + * APIC is connected directly to the CPU. + * Mapping them on the global bus happens to work because + * MSI registers are reserved in APIC MMIO and vice versa. */ + MSIMessage msi = { .address = addr, .data = val }; + apic_send_msi(&msi); + return; + } + + dev = cpu_get_current_apic(); + if (!dev) { + return; + } + s = APIC(dev); + + trace_apic_mem_writel(addr, val); + + switch(index) { + case 0x02: + s->id = (val >> 24); + break; + case 0x03: + break; + case 0x08: + if (apic_report_tpr_access) { + cpu_report_tpr_access(&s->cpu->env, TPR_ACCESS_WRITE); + } + s->tpr = val; + apic_sync_vapic(s, SYNC_TO_VAPIC); + apic_update_irq(s); + break; + case 0x09: + case 0x0a: + break; + case 0x0b: /* EOI */ + apic_eoi(s); + break; + case 0x0d: + s->log_dest = val >> 24; + break; + case 0x0e: + s->dest_mode = val >> 28; + break; + case 0x0f: + s->spurious_vec = val & 0x1ff; + apic_update_irq(s); + break; + case 0x10 ... 0x17: + case 0x18 ... 0x1f: + case 0x20 ... 0x27: + case 0x28: + break; + case 0x30: + s->icr[0] = val; + apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1, + (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), + (s->icr[0] >> 15) & 1); + break; + case 0x31: + s->icr[1] = val; + break; + case 0x32 ... 0x37: + { + int n = index - 0x32; + s->lvt[n] = val; + if (n == APIC_LVT_TIMER) { + apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) { + apic_update_irq(s); + } + } + break; + case 0x38: + s->initial_count = val; + s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + apic_timer_update(s, s->initial_count_load_time); + break; + case 0x39: + break; + case 0x3e: + { + int v; + s->divide_conf = val & 0xb; + v = (s->divide_conf & 3) | ((s->divide_conf >> 1) & 4); + s->count_shift = (v + 1) & 7; + } + break; + default: + s->esr |= APIC_ESR_ILLEGAL_ADDRESS; + break; + } +} + +static void apic_pre_save(APICCommonState *s) +{ + apic_sync_vapic(s, SYNC_FROM_VAPIC); +} + +static void apic_post_load(APICCommonState *s) +{ + if (s->timer_expiry != -1) { + timer_mod(s->timer, s->timer_expiry); + } else { + timer_del(s->timer); + } +} + +static const MemoryRegionOps apic_io_ops = { + .read = apic_mem_read, + .write = apic_mem_write, + .impl.min_access_size = 1, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void apic_realize(DeviceState *dev, Error **errp) +{ + APICCommonState *s = APIC(dev); + + if (s->id >= MAX_APICS) { + error_setg(errp, "%s initialization failed. APIC ID %d is invalid", + object_get_typename(OBJECT(dev)), s->id); + return; + } + + if (kvm_enabled()) { + warn_report("Userspace local APIC is deprecated for KVM."); + warn_report("Do not use kernel-irqchip except for the -M isapc machine type."); + } + + memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi", + APIC_SPACE_SIZE); + + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); + local_apics[s->id] = s; + + msi_nonbroken = true; +} + +static void apic_unrealize(DeviceState *dev) +{ + APICCommonState *s = APIC(dev); + + timer_free(s->timer); + local_apics[s->id] = NULL; +} + +static void apic_class_init(ObjectClass *klass, void *data) +{ + APICCommonClass *k = APIC_COMMON_CLASS(klass); + + k->realize = apic_realize; + k->unrealize = apic_unrealize; + k->set_base = apic_set_base; + k->set_tpr = apic_set_tpr; + k->get_tpr = apic_get_tpr; + k->vapic_base_update = apic_vapic_base_update; + k->external_nmi = apic_external_nmi; + k->pre_save = apic_pre_save; + k->post_load = apic_post_load; + k->send_msi = apic_send_msi; +} + +static const TypeInfo apic_info = { + .name = TYPE_APIC, + .instance_size = sizeof(APICCommonState), + .parent = TYPE_APIC_COMMON, + .class_init = apic_class_init, +}; + +static void apic_register_types(void) +{ + type_register_static(&apic_info); +} + +type_init(apic_register_types) diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c new file mode 100644 index 000000000..2a2098206 --- /dev/null +++ b/hw/intc/apic_common.c @@ -0,0 +1,497 @@ +/* + * APIC support - common bits of emulated and KVM kernel model + * + * Copyright (c) 2004-2005 Fabrice Bellard + * Copyright (c) 2011 Jan Kiszka, Siemens AG + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "hw/i386/apic.h" +#include "hw/i386/apic_internal.h" +#include "trace.h" +#include "hw/boards.h" +#include "sysemu/hax.h" +#include "sysemu/kvm.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" + +static int apic_irq_delivered; +bool apic_report_tpr_access; + +void cpu_set_apic_base(DeviceState *dev, uint64_t val) +{ + trace_cpu_set_apic_base(val); + + if (dev) { + APICCommonState *s = APIC_COMMON(dev); + APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + /* switching to x2APIC, reset possibly modified xAPIC ID */ + if (!(s->apicbase & MSR_IA32_APICBASE_EXTD) && + (val & MSR_IA32_APICBASE_EXTD)) { + s->id = s->initial_apic_id; + } + info->set_base(s, val); + } +} + +uint64_t cpu_get_apic_base(DeviceState *dev) +{ + if (dev) { + APICCommonState *s = APIC_COMMON(dev); + trace_cpu_get_apic_base((uint64_t)s->apicbase); + return s->apicbase; + } else { + trace_cpu_get_apic_base(MSR_IA32_APICBASE_BSP); + return MSR_IA32_APICBASE_BSP; + } +} + +void cpu_set_apic_tpr(DeviceState *dev, uint8_t val) +{ + APICCommonState *s; + APICCommonClass *info; + + if (!dev) { + return; + } + + s = APIC_COMMON(dev); + info = APIC_COMMON_GET_CLASS(s); + + info->set_tpr(s, val); +} + +uint8_t cpu_get_apic_tpr(DeviceState *dev) +{ + APICCommonState *s; + APICCommonClass *info; + + if (!dev) { + return 0; + } + + s = APIC_COMMON(dev); + info = APIC_COMMON_GET_CLASS(s); + + return info->get_tpr(s); +} + +void apic_enable_tpr_access_reporting(DeviceState *dev, bool enable) +{ + APICCommonState *s = APIC_COMMON(dev); + APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + + apic_report_tpr_access = enable; + if (info->enable_tpr_reporting) { + info->enable_tpr_reporting(s, enable); + } +} + +void apic_enable_vapic(DeviceState *dev, hwaddr paddr) +{ + APICCommonState *s = APIC_COMMON(dev); + APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + + s->vapic_paddr = paddr; + info->vapic_base_update(s); +} + +void apic_handle_tpr_access_report(DeviceState *dev, target_ulong ip, + TPRAccess access) +{ + APICCommonState *s = APIC_COMMON(dev); + + vapic_report_tpr_access(s->vapic, CPU(s->cpu), ip, access); +} + +void apic_report_irq_delivered(int delivered) +{ + apic_irq_delivered += delivered; + + trace_apic_report_irq_delivered(apic_irq_delivered); +} + +void apic_reset_irq_delivered(void) +{ + /* Copy this into a local variable to encourage gcc to emit a plain + * register for a sys/sdt.h marker. For details on this workaround, see: + * https://sourceware.org/bugzilla/show_bug.cgi?id=13296 + */ + volatile int a_i_d = apic_irq_delivered; + trace_apic_reset_irq_delivered(a_i_d); + + apic_irq_delivered = 0; +} + +int apic_get_irq_delivered(void) +{ + trace_apic_get_irq_delivered(apic_irq_delivered); + + return apic_irq_delivered; +} + +void apic_deliver_nmi(DeviceState *dev) +{ + APICCommonState *s = APIC_COMMON(dev); + APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + + info->external_nmi(s); +} + +bool apic_next_timer(APICCommonState *s, int64_t current_time) +{ + int64_t d; + + /* We need to store the timer state separately to support APIC + * implementations that maintain a non-QEMU timer, e.g. inside the + * host kernel. This open-coded state allows us to migrate between + * both models. */ + s->timer_expiry = -1; + + if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_MASKED) { + return false; + } + + d = (current_time - s->initial_count_load_time) >> s->count_shift; + + if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { + if (!s->initial_count) { + return false; + } + d = ((d / ((uint64_t)s->initial_count + 1)) + 1) * + ((uint64_t)s->initial_count + 1); + } else { + if (d >= s->initial_count) { + return false; + } + d = (uint64_t)s->initial_count + 1; + } + s->next_time = s->initial_count_load_time + (d << s->count_shift); + s->timer_expiry = s->next_time; + return true; +} + +uint32_t apic_get_current_count(APICCommonState *s) +{ + int64_t d; + uint32_t val; + d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >> + s->count_shift; + if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { + /* periodic */ + val = s->initial_count - (d % ((uint64_t)s->initial_count + 1)); + } else { + if (d >= s->initial_count) { + val = 0; + } else { + val = s->initial_count - d; + } + } + return val; +} + +void apic_init_reset(DeviceState *dev) +{ + APICCommonState *s; + APICCommonClass *info; + int i; + + if (!dev) { + return; + } + s = APIC_COMMON(dev); + s->tpr = 0; + s->spurious_vec = 0xff; + s->log_dest = 0; + s->dest_mode = 0xf; + memset(s->isr, 0, sizeof(s->isr)); + memset(s->tmr, 0, sizeof(s->tmr)); + memset(s->irr, 0, sizeof(s->irr)); + for (i = 0; i < APIC_LVT_NB; i++) { + s->lvt[i] = APIC_LVT_MASKED; + } + s->esr = 0; + memset(s->icr, 0, sizeof(s->icr)); + s->divide_conf = 0; + s->count_shift = 0; + s->initial_count = 0; + s->initial_count_load_time = 0; + s->next_time = 0; + s->wait_for_sipi = !cpu_is_bsp(s->cpu); + + if (s->timer) { + timer_del(s->timer); + } + s->timer_expiry = -1; + + info = APIC_COMMON_GET_CLASS(s); + if (info->reset) { + info->reset(s); + } +} + +void apic_designate_bsp(DeviceState *dev, bool bsp) +{ + if (dev == NULL) { + return; + } + + APICCommonState *s = APIC_COMMON(dev); + if (bsp) { + s->apicbase |= MSR_IA32_APICBASE_BSP; + } else { + s->apicbase &= ~MSR_IA32_APICBASE_BSP; + } +} + +static void apic_reset_common(DeviceState *dev) +{ + APICCommonState *s = APIC_COMMON(dev); + APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + uint32_t bsp; + + bsp = s->apicbase & MSR_IA32_APICBASE_BSP; + s->apicbase = APIC_DEFAULT_ADDRESS | bsp | MSR_IA32_APICBASE_ENABLE; + s->id = s->initial_apic_id; + + apic_reset_irq_delivered(); + + s->vapic_paddr = 0; + info->vapic_base_update(s); + + apic_init_reset(dev); +} + +static const VMStateDescription vmstate_apic_common; + +static void apic_common_realize(DeviceState *dev, Error **errp) +{ + APICCommonState *s = APIC_COMMON(dev); + APICCommonClass *info; + static DeviceState *vapic; + uint32_t instance_id = s->initial_apic_id; + + /* Normally initial APIC ID should be no more than hundreds */ + assert(instance_id != VMSTATE_INSTANCE_ID_ANY); + + info = APIC_COMMON_GET_CLASS(s); + info->realize(dev, errp); + + /* Note: We need at least 1M to map the VAPIC option ROM */ + if (!vapic && s->vapic_control & VAPIC_ENABLE_MASK && + !hax_enabled() && current_machine->ram_size >= 1024 * 1024) { + vapic = sysbus_create_simple("kvmvapic", -1, NULL); + } + s->vapic = vapic; + if (apic_report_tpr_access && info->enable_tpr_reporting) { + info->enable_tpr_reporting(s, true); + } + + if (s->legacy_instance_id) { + instance_id = VMSTATE_INSTANCE_ID_ANY; + } + vmstate_register_with_alias_id(NULL, instance_id, &vmstate_apic_common, + s, -1, 0, NULL); +} + +static void apic_common_unrealize(DeviceState *dev) +{ + APICCommonState *s = APIC_COMMON(dev); + APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + + vmstate_unregister(NULL, &vmstate_apic_common, s); + info->unrealize(dev); + + if (apic_report_tpr_access && info->enable_tpr_reporting) { + info->enable_tpr_reporting(s, false); + } +} + +static int apic_pre_load(void *opaque) +{ + APICCommonState *s = APIC_COMMON(opaque); + + /* The default is !cpu_is_bsp(s->cpu), but the common value is 0 + * so that's what apic_common_sipi_needed checks for. Reset to + * the value that is assumed when the apic_sipi subsection is + * absent. + */ + s->wait_for_sipi = 0; + return 0; +} + +static int apic_dispatch_pre_save(void *opaque) +{ + APICCommonState *s = APIC_COMMON(opaque); + APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + + if (info->pre_save) { + info->pre_save(s); + } + + return 0; +} + +static int apic_dispatch_post_load(void *opaque, int version_id) +{ + APICCommonState *s = APIC_COMMON(opaque); + APICCommonClass *info = APIC_COMMON_GET_CLASS(s); + + if (info->post_load) { + info->post_load(s); + } + return 0; +} + +static bool apic_common_sipi_needed(void *opaque) +{ + APICCommonState *s = APIC_COMMON(opaque); + return s->wait_for_sipi != 0; +} + +static const VMStateDescription vmstate_apic_common_sipi = { + .name = "apic_sipi", + .version_id = 1, + .minimum_version_id = 1, + .needed = apic_common_sipi_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(sipi_vector, APICCommonState), + VMSTATE_INT32(wait_for_sipi, APICCommonState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_apic_common = { + .name = "apic", + .version_id = 3, + .minimum_version_id = 3, + .pre_load = apic_pre_load, + .pre_save = apic_dispatch_pre_save, + .post_load = apic_dispatch_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(apicbase, APICCommonState), + VMSTATE_UINT8(id, APICCommonState), + VMSTATE_UINT8(arb_id, APICCommonState), + VMSTATE_UINT8(tpr, APICCommonState), + VMSTATE_UINT32(spurious_vec, APICCommonState), + VMSTATE_UINT8(log_dest, APICCommonState), + VMSTATE_UINT8(dest_mode, APICCommonState), + VMSTATE_UINT32_ARRAY(isr, APICCommonState, 8), + VMSTATE_UINT32_ARRAY(tmr, APICCommonState, 8), + VMSTATE_UINT32_ARRAY(irr, APICCommonState, 8), + VMSTATE_UINT32_ARRAY(lvt, APICCommonState, APIC_LVT_NB), + VMSTATE_UINT32(esr, APICCommonState), + VMSTATE_UINT32_ARRAY(icr, APICCommonState, 2), + VMSTATE_UINT32(divide_conf, APICCommonState), + VMSTATE_INT32(count_shift, APICCommonState), + VMSTATE_UINT32(initial_count, APICCommonState), + VMSTATE_INT64(initial_count_load_time, APICCommonState), + VMSTATE_INT64(next_time, APICCommonState), + VMSTATE_INT64(timer_expiry, + APICCommonState), /* open-coded timer state */ + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_apic_common_sipi, + NULL + } +}; + +static Property apic_properties_common[] = { + DEFINE_PROP_UINT8("version", APICCommonState, version, 0x14), + DEFINE_PROP_BIT("vapic", APICCommonState, vapic_control, VAPIC_ENABLE_BIT, + true), + DEFINE_PROP_BOOL("legacy-instance-id", APICCommonState, legacy_instance_id, + false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void apic_common_get_id(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + APICCommonState *s = APIC_COMMON(obj); + uint32_t value; + + value = s->apicbase & MSR_IA32_APICBASE_EXTD ? s->initial_apic_id : s->id; + visit_type_uint32(v, name, &value, errp); +} + +static void apic_common_set_id(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + APICCommonState *s = APIC_COMMON(obj); + DeviceState *dev = DEVICE(obj); + uint32_t value; + + if (dev->realized) { + qdev_prop_set_after_realize(dev, name, errp); + return; + } + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + s->initial_apic_id = value; + s->id = (uint8_t)value; +} + +static void apic_common_initfn(Object *obj) +{ + APICCommonState *s = APIC_COMMON(obj); + + s->id = s->initial_apic_id = -1; + object_property_add(obj, "id", "uint32", + apic_common_get_id, + apic_common_set_id, NULL, NULL); +} + +static void apic_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = apic_reset_common; + device_class_set_props(dc, apic_properties_common); + dc->realize = apic_common_realize; + dc->unrealize = apic_common_unrealize; + /* + * Reason: APIC and CPU need to be wired up by + * x86_cpu_apic_create() + */ + dc->user_creatable = false; +} + +static const TypeInfo apic_common_type = { + .name = TYPE_APIC_COMMON, + .parent = TYPE_DEVICE, + .instance_size = sizeof(APICCommonState), + .instance_init = apic_common_initfn, + .class_size = sizeof(APICCommonClass), + .class_init = apic_common_class_init, + .abstract = true, +}; + +static void apic_common_register_types(void) +{ + type_register_static(&apic_common_type); +} + +type_init(apic_common_register_types) diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c new file mode 100644 index 000000000..a994b1f02 --- /dev/null +++ b/hw/intc/arm_gic.c @@ -0,0 +1,2146 @@ +/* + * ARM Generic/Distributed Interrupt Controller + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +/* This file contains implementation code for the RealView EB interrupt + * controller, MPCore distributed interrupt controller and ARMv7-M + * Nested Vectored Interrupt Controller. + * It is compiled in two ways: + * (1) as a standalone file to produce a sysbus device which is a GIC + * that can be used on the realview board and as one of the builtin + * private peripherals for the ARM MP CPUs (11MPCore, A9, etc) + * (2) by being directly #included into armv7m_nvic.c to produce the + * armv7m_nvic device. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "gic_internal.h" +#include "qapi/error.h" +#include "hw/core/cpu.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "sysemu/kvm.h" +#include "sysemu/qtest.h" + +/* #define DEBUG_GIC */ + +#ifdef DEBUG_GIC +#define DEBUG_GIC_GATE 1 +#else +#define DEBUG_GIC_GATE 0 +#endif + +#define DPRINTF(fmt, ...) do { \ + if (DEBUG_GIC_GATE) { \ + fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ + } \ + } while (0) + +static const uint8_t gic_id_11mpcore[] = { + 0x00, 0x00, 0x00, 0x00, 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 +}; + +static const uint8_t gic_id_gicv1[] = { + 0x04, 0x00, 0x00, 0x00, 0x90, 0xb3, 0x1b, 0x00, 0x0d, 0xf0, 0x05, 0xb1 +}; + +static const uint8_t gic_id_gicv2[] = { + 0x04, 0x00, 0x00, 0x00, 0x90, 0xb4, 0x2b, 0x00, 0x0d, 0xf0, 0x05, 0xb1 +}; + +static inline int gic_get_current_cpu(GICState *s) +{ + if (!qtest_enabled() && s->num_cpu > 1) { + return current_cpu->cpu_index; + } + return 0; +} + +static inline int gic_get_current_vcpu(GICState *s) +{ + return gic_get_current_cpu(s) + GIC_NCPU; +} + +/* Return true if this GIC config has interrupt groups, which is + * true if we're a GICv2, or a GICv1 with the security extensions. + */ +static inline bool gic_has_groups(GICState *s) +{ + return s->revision == 2 || s->security_extn; +} + +static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs) +{ + return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure; +} + +static inline void gic_get_best_irq(GICState *s, int cpu, + int *best_irq, int *best_prio, int *group) +{ + int irq; + int cm = 1 << cpu; + + *best_irq = 1023; + *best_prio = 0x100; + + for (irq = 0; irq < s->num_irq; irq++) { + if (GIC_DIST_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) && + (!GIC_DIST_TEST_ACTIVE(irq, cm)) && + (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) { + if (GIC_DIST_GET_PRIORITY(irq, cpu) < *best_prio) { + *best_prio = GIC_DIST_GET_PRIORITY(irq, cpu); + *best_irq = irq; + } + } + } + + if (*best_irq < 1023) { + *group = GIC_DIST_TEST_GROUP(*best_irq, cm); + } +} + +static inline void gic_get_best_virq(GICState *s, int cpu, + int *best_irq, int *best_prio, int *group) +{ + int lr_idx = 0; + + *best_irq = 1023; + *best_prio = 0x100; + + for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { + uint32_t lr_entry = s->h_lr[lr_idx][cpu]; + int state = GICH_LR_STATE(lr_entry); + + if (state == GICH_LR_STATE_PENDING) { + int prio = GICH_LR_PRIORITY(lr_entry); + + if (prio < *best_prio) { + *best_prio = prio; + *best_irq = GICH_LR_VIRT_ID(lr_entry); + *group = GICH_LR_GROUP(lr_entry); + } + } + } +} + +/* Return true if IRQ signaling is enabled for the given cpu and at least one + * of the given groups: + * - in the non-virt case, the distributor must be enabled for one of the + * given groups + * - in the virt case, the virtual interface must be enabled. + * - in all cases, the (v)CPU interface must be enabled for one of the given + * groups. + */ +static inline bool gic_irq_signaling_enabled(GICState *s, int cpu, bool virt, + int group_mask) +{ + int cpu_iface = virt ? (cpu + GIC_NCPU) : cpu; + + if (!virt && !(s->ctlr & group_mask)) { + return false; + } + + if (virt && !(s->h_hcr[cpu] & R_GICH_HCR_EN_MASK)) { + return false; + } + + if (!(s->cpu_ctlr[cpu_iface] & group_mask)) { + return false; + } + + return true; +} + +/* TODO: Many places that call this routine could be optimized. */ +/* Update interrupt status after enabled or pending bits have been changed. */ +static inline void gic_update_internal(GICState *s, bool virt) +{ + int best_irq; + int best_prio; + int irq_level, fiq_level; + int cpu, cpu_iface; + int group = 0; + qemu_irq *irq_lines = virt ? s->parent_virq : s->parent_irq; + qemu_irq *fiq_lines = virt ? s->parent_vfiq : s->parent_fiq; + + for (cpu = 0; cpu < s->num_cpu; cpu++) { + cpu_iface = virt ? (cpu + GIC_NCPU) : cpu; + + s->current_pending[cpu_iface] = 1023; + if (!gic_irq_signaling_enabled(s, cpu, virt, + GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) { + qemu_irq_lower(irq_lines[cpu]); + qemu_irq_lower(fiq_lines[cpu]); + continue; + } + + if (virt) { + gic_get_best_virq(s, cpu, &best_irq, &best_prio, &group); + } else { + gic_get_best_irq(s, cpu, &best_irq, &best_prio, &group); + } + + if (best_irq != 1023) { + trace_gic_update_bestirq(virt ? "vcpu" : "cpu", cpu, + best_irq, best_prio, + s->priority_mask[cpu_iface], + s->running_priority[cpu_iface]); + } + + irq_level = fiq_level = 0; + + if (best_prio < s->priority_mask[cpu_iface]) { + s->current_pending[cpu_iface] = best_irq; + if (best_prio < s->running_priority[cpu_iface]) { + if (gic_irq_signaling_enabled(s, cpu, virt, 1 << group)) { + if (group == 0 && + s->cpu_ctlr[cpu_iface] & GICC_CTLR_FIQ_EN) { + DPRINTF("Raised pending FIQ %d (cpu %d)\n", + best_irq, cpu_iface); + fiq_level = 1; + trace_gic_update_set_irq(cpu, virt ? "vfiq" : "fiq", + fiq_level); + } else { + DPRINTF("Raised pending IRQ %d (cpu %d)\n", + best_irq, cpu_iface); + irq_level = 1; + trace_gic_update_set_irq(cpu, virt ? "virq" : "irq", + irq_level); + } + } + } + } + + qemu_set_irq(irq_lines[cpu], irq_level); + qemu_set_irq(fiq_lines[cpu], fiq_level); + } +} + +static void gic_update(GICState *s) +{ + gic_update_internal(s, false); +} + +/* Return true if this LR is empty, i.e. the corresponding bit + * in ELRSR is set. + */ +static inline bool gic_lr_entry_is_free(uint32_t entry) +{ + return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID) + && (GICH_LR_HW(entry) || !GICH_LR_EOI(entry)); +} + +/* Return true if this LR should trigger an EOI maintenance interrupt, i.e. the + * corrsponding bit in EISR is set. + */ +static inline bool gic_lr_entry_is_eoi(uint32_t entry) +{ + return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID) + && !GICH_LR_HW(entry) && GICH_LR_EOI(entry); +} + +static inline void gic_extract_lr_info(GICState *s, int cpu, + int *num_eoi, int *num_valid, int *num_pending) +{ + int lr_idx; + + *num_eoi = 0; + *num_valid = 0; + *num_pending = 0; + + for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { + uint32_t *entry = &s->h_lr[lr_idx][cpu]; + + if (gic_lr_entry_is_eoi(*entry)) { + (*num_eoi)++; + } + + if (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID) { + (*num_valid)++; + } + + if (GICH_LR_STATE(*entry) == GICH_LR_STATE_PENDING) { + (*num_pending)++; + } + } +} + +static void gic_compute_misr(GICState *s, int cpu) +{ + uint32_t value = 0; + int vcpu = cpu + GIC_NCPU; + + int num_eoi, num_valid, num_pending; + + gic_extract_lr_info(s, cpu, &num_eoi, &num_valid, &num_pending); + + /* EOI */ + if (num_eoi) { + value |= R_GICH_MISR_EOI_MASK; + } + + /* U: true if only 0 or 1 LR entry is valid */ + if ((s->h_hcr[cpu] & R_GICH_HCR_UIE_MASK) && (num_valid < 2)) { + value |= R_GICH_MISR_U_MASK; + } + + /* LRENP: EOICount is not 0 */ + if ((s->h_hcr[cpu] & R_GICH_HCR_LRENPIE_MASK) && + ((s->h_hcr[cpu] & R_GICH_HCR_EOICount_MASK) != 0)) { + value |= R_GICH_MISR_LRENP_MASK; + } + + /* NP: no pending interrupts */ + if ((s->h_hcr[cpu] & R_GICH_HCR_NPIE_MASK) && (num_pending == 0)) { + value |= R_GICH_MISR_NP_MASK; + } + + /* VGrp0E: group0 virq signaling enabled */ + if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0EIE_MASK) && + (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) { + value |= R_GICH_MISR_VGrp0E_MASK; + } + + /* VGrp0D: group0 virq signaling disabled */ + if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0DIE_MASK) && + !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) { + value |= R_GICH_MISR_VGrp0D_MASK; + } + + /* VGrp1E: group1 virq signaling enabled */ + if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1EIE_MASK) && + (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) { + value |= R_GICH_MISR_VGrp1E_MASK; + } + + /* VGrp1D: group1 virq signaling disabled */ + if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1DIE_MASK) && + !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) { + value |= R_GICH_MISR_VGrp1D_MASK; + } + + s->h_misr[cpu] = value; +} + +static void gic_update_maintenance(GICState *s) +{ + int cpu = 0; + int maint_level; + + for (cpu = 0; cpu < s->num_cpu; cpu++) { + gic_compute_misr(s, cpu); + maint_level = (s->h_hcr[cpu] & R_GICH_HCR_EN_MASK) && s->h_misr[cpu]; + + trace_gic_update_maintenance_irq(cpu, maint_level); + qemu_set_irq(s->maintenance_irq[cpu], maint_level); + } +} + +static void gic_update_virt(GICState *s) +{ + gic_update_internal(s, true); + gic_update_maintenance(s); +} + +static void gic_set_irq_11mpcore(GICState *s, int irq, int level, + int cm, int target) +{ + if (level) { + GIC_DIST_SET_LEVEL(irq, cm); + if (GIC_DIST_TEST_EDGE_TRIGGER(irq) || GIC_DIST_TEST_ENABLED(irq, cm)) { + DPRINTF("Set %d pending mask %x\n", irq, target); + GIC_DIST_SET_PENDING(irq, target); + } + } else { + GIC_DIST_CLEAR_LEVEL(irq, cm); + } +} + +static void gic_set_irq_generic(GICState *s, int irq, int level, + int cm, int target) +{ + if (level) { + GIC_DIST_SET_LEVEL(irq, cm); + DPRINTF("Set %d pending mask %x\n", irq, target); + if (GIC_DIST_TEST_EDGE_TRIGGER(irq)) { + GIC_DIST_SET_PENDING(irq, target); + } + } else { + GIC_DIST_CLEAR_LEVEL(irq, cm); + } +} + +/* Process a change in an external IRQ input. */ +static void gic_set_irq(void *opaque, int irq, int level) +{ + /* Meaning of the 'irq' parameter: + * [0..N-1] : external interrupts + * [N..N+31] : PPI (internal) interrupts for CPU 0 + * [N+32..N+63] : PPI (internal interrupts for CPU 1 + * ... + */ + GICState *s = (GICState *)opaque; + int cm, target; + if (irq < (s->num_irq - GIC_INTERNAL)) { + /* The first external input line is internal interrupt 32. */ + cm = ALL_CPU_MASK; + irq += GIC_INTERNAL; + target = GIC_DIST_TARGET(irq); + } else { + int cpu; + irq -= (s->num_irq - GIC_INTERNAL); + cpu = irq / GIC_INTERNAL; + irq %= GIC_INTERNAL; + cm = 1 << cpu; + target = cm; + } + + assert(irq >= GIC_NR_SGIS); + + if (level == GIC_DIST_TEST_LEVEL(irq, cm)) { + return; + } + + if (s->revision == REV_11MPCORE) { + gic_set_irq_11mpcore(s, irq, level, cm, target); + } else { + gic_set_irq_generic(s, irq, level, cm, target); + } + trace_gic_set_irq(irq, level, cm, target); + + gic_update(s); +} + +static uint16_t gic_get_current_pending_irq(GICState *s, int cpu, + MemTxAttrs attrs) +{ + uint16_t pending_irq = s->current_pending[cpu]; + + if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) { + int group = gic_test_group(s, pending_irq, cpu); + + /* On a GIC without the security extensions, reading this register + * behaves in the same way as a secure access to a GIC with them. + */ + bool secure = !gic_cpu_ns_access(s, cpu, attrs); + + if (group == 0 && !secure) { + /* Group0 interrupts hidden from Non-secure access */ + return 1023; + } + if (group == 1 && secure && !(s->cpu_ctlr[cpu] & GICC_CTLR_ACK_CTL)) { + /* Group1 interrupts only seen by Secure access if + * AckCtl bit set. + */ + return 1022; + } + } + return pending_irq; +} + +static int gic_get_group_priority(GICState *s, int cpu, int irq) +{ + /* Return the group priority of the specified interrupt + * (which is the top bits of its priority, with the number + * of bits masked determined by the applicable binary point register). + */ + int bpr; + uint32_t mask; + + if (gic_has_groups(s) && + !(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) && + gic_test_group(s, irq, cpu)) { + bpr = s->abpr[cpu] - 1; + assert(bpr >= 0); + } else { + bpr = s->bpr[cpu]; + } + + /* a BPR of 0 means the group priority bits are [7:1]; + * a BPR of 1 means they are [7:2], and so on down to + * a BPR of 7 meaning no group priority bits at all. + */ + mask = ~0U << ((bpr & 7) + 1); + + return gic_get_priority(s, irq, cpu) & mask; +} + +static void gic_activate_irq(GICState *s, int cpu, int irq) +{ + /* Set the appropriate Active Priority Register bit for this IRQ, + * and update the running priority. + */ + int prio = gic_get_group_priority(s, cpu, irq); + int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; + int preemption_level = prio >> (min_bpr + 1); + int regno = preemption_level / 32; + int bitno = preemption_level % 32; + uint32_t *papr = NULL; + + if (gic_is_vcpu(cpu)) { + assert(regno == 0); + papr = &s->h_apr[gic_get_vcpu_real_id(cpu)]; + } else if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) { + papr = &s->nsapr[regno][cpu]; + } else { + papr = &s->apr[regno][cpu]; + } + + *papr |= (1 << bitno); + + s->running_priority[cpu] = prio; + gic_set_active(s, irq, cpu); +} + +static int gic_get_prio_from_apr_bits(GICState *s, int cpu) +{ + /* Recalculate the current running priority for this CPU based + * on the set bits in the Active Priority Registers. + */ + int i; + + if (gic_is_vcpu(cpu)) { + uint32_t apr = s->h_apr[gic_get_vcpu_real_id(cpu)]; + if (apr) { + return ctz32(apr) << (GIC_VIRT_MIN_BPR + 1); + } else { + return 0x100; + } + } + + for (i = 0; i < GIC_NR_APRS; i++) { + uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu]; + if (!apr) { + continue; + } + return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1); + } + return 0x100; +} + +static void gic_drop_prio(GICState *s, int cpu, int group) +{ + /* Drop the priority of the currently active interrupt in the + * specified group. + * + * Note that we can guarantee (because of the requirement to nest + * GICC_IAR reads [which activate an interrupt and raise priority] + * with GICC_EOIR writes [which drop the priority for the interrupt]) + * that the interrupt we're being called for is the highest priority + * active interrupt, meaning that it has the lowest set bit in the + * APR registers. + * + * If the guest does not honour the ordering constraints then the + * behaviour of the GIC is UNPREDICTABLE, which for us means that + * the values of the APR registers might become incorrect and the + * running priority will be wrong, so interrupts that should preempt + * might not do so, and interrupts that should not preempt might do so. + */ + if (gic_is_vcpu(cpu)) { + int rcpu = gic_get_vcpu_real_id(cpu); + + if (s->h_apr[rcpu]) { + /* Clear lowest set bit */ + s->h_apr[rcpu] &= s->h_apr[rcpu] - 1; + } + } else { + int i; + + for (i = 0; i < GIC_NR_APRS; i++) { + uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu]; + if (!*papr) { + continue; + } + /* Clear lowest set bit */ + *papr &= *papr - 1; + break; + } + } + + s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu); +} + +static inline uint32_t gic_clear_pending_sgi(GICState *s, int irq, int cpu) +{ + int src; + uint32_t ret; + + if (!gic_is_vcpu(cpu)) { + /* Lookup the source CPU for the SGI and clear this in the + * sgi_pending map. Return the src and clear the overall pending + * state on this CPU if the SGI is not pending from any CPUs. + */ + assert(s->sgi_pending[irq][cpu] != 0); + src = ctz32(s->sgi_pending[irq][cpu]); + s->sgi_pending[irq][cpu] &= ~(1 << src); + if (s->sgi_pending[irq][cpu] == 0) { + gic_clear_pending(s, irq, cpu); + } + ret = irq | ((src & 0x7) << 10); + } else { + uint32_t *lr_entry = gic_get_lr_entry(s, irq, cpu); + src = GICH_LR_CPUID(*lr_entry); + + gic_clear_pending(s, irq, cpu); + ret = irq | (src << 10); + } + + return ret; +} + +uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs) +{ + int ret, irq; + + /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately + * for the case where this GIC supports grouping and the pending interrupt + * is in the wrong group. + */ + irq = gic_get_current_pending_irq(s, cpu, attrs); + trace_gic_acknowledge_irq(gic_is_vcpu(cpu) ? "vcpu" : "cpu", + gic_get_vcpu_real_id(cpu), irq); + + if (irq >= GIC_MAXIRQ) { + DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq); + return irq; + } + + if (gic_get_priority(s, irq, cpu) >= s->running_priority[cpu]) { + DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq); + return 1023; + } + + gic_activate_irq(s, cpu, irq); + + if (s->revision == REV_11MPCORE) { + /* Clear pending flags for both level and edge triggered interrupts. + * Level triggered IRQs will be reasserted once they become inactive. + */ + gic_clear_pending(s, irq, cpu); + ret = irq; + } else { + if (irq < GIC_NR_SGIS) { + ret = gic_clear_pending_sgi(s, irq, cpu); + } else { + gic_clear_pending(s, irq, cpu); + ret = irq; + } + } + + if (gic_is_vcpu(cpu)) { + gic_update_virt(s); + } else { + gic_update(s); + } + DPRINTF("ACK %d\n", irq); + return ret; +} + +static uint32_t gic_fullprio_mask(GICState *s, int cpu) +{ + /* + * Return a mask word which clears the unimplemented priority + * bits from a priority value for an interrupt. (Not to be + * confused with the group priority, whose mask depends on BPR.) + */ + int priBits; + + if (gic_is_vcpu(cpu)) { + priBits = GIC_VIRT_MAX_GROUP_PRIO_BITS; + } else { + priBits = s->n_prio_bits; + } + return ~0U << (8 - priBits); +} + +void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val, + MemTxAttrs attrs) +{ + if (s->security_extn && !attrs.secure) { + if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) { + return; /* Ignore Non-secure access of Group0 IRQ */ + } + val = 0x80 | (val >> 1); /* Non-secure view */ + } + + val &= gic_fullprio_mask(s, cpu); + + if (irq < GIC_INTERNAL) { + s->priority1[irq][cpu] = val; + } else { + s->priority2[(irq) - GIC_INTERNAL] = val; + } +} + +static uint32_t gic_dist_get_priority(GICState *s, int cpu, int irq, + MemTxAttrs attrs) +{ + uint32_t prio = GIC_DIST_GET_PRIORITY(irq, cpu); + + if (s->security_extn && !attrs.secure) { + if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) { + return 0; /* Non-secure access cannot read priority of Group0 IRQ */ + } + prio = (prio << 1) & 0xff; /* Non-secure view */ + } + return prio & gic_fullprio_mask(s, cpu); +} + +static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask, + MemTxAttrs attrs) +{ + if (gic_cpu_ns_access(s, cpu, attrs)) { + if (s->priority_mask[cpu] & 0x80) { + /* Priority Mask in upper half */ + pmask = 0x80 | (pmask >> 1); + } else { + /* Non-secure write ignored if priority mask is in lower half */ + return; + } + } + s->priority_mask[cpu] = pmask & gic_fullprio_mask(s, cpu); +} + +static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs) +{ + uint32_t pmask = s->priority_mask[cpu]; + + if (gic_cpu_ns_access(s, cpu, attrs)) { + if (pmask & 0x80) { + /* Priority Mask in upper half, return Non-secure view */ + pmask = (pmask << 1) & 0xff; + } else { + /* Priority Mask in lower half, RAZ */ + pmask = 0; + } + } + return pmask; +} + +static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs) +{ + uint32_t ret = s->cpu_ctlr[cpu]; + + if (gic_cpu_ns_access(s, cpu, attrs)) { + /* Construct the NS banked view of GICC_CTLR from the correct + * bits of the S banked view. We don't need to move the bypass + * control bits because we don't implement that (IMPDEF) part + * of the GIC architecture. + */ + ret = (ret & (GICC_CTLR_EN_GRP1 | GICC_CTLR_EOIMODE_NS)) >> 1; + } + return ret; +} + +static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value, + MemTxAttrs attrs) +{ + uint32_t mask; + + if (gic_cpu_ns_access(s, cpu, attrs)) { + /* The NS view can only write certain bits in the register; + * the rest are unchanged + */ + mask = GICC_CTLR_EN_GRP1; + if (s->revision == 2) { + mask |= GICC_CTLR_EOIMODE_NS; + } + s->cpu_ctlr[cpu] &= ~mask; + s->cpu_ctlr[cpu] |= (value << 1) & mask; + } else { + if (s->revision == 2) { + mask = s->security_extn ? GICC_CTLR_V2_S_MASK : GICC_CTLR_V2_MASK; + } else { + mask = s->security_extn ? GICC_CTLR_V1_S_MASK : GICC_CTLR_V1_MASK; + } + s->cpu_ctlr[cpu] = value & mask; + } + DPRINTF("CPU Interface %d: Group0 Interrupts %sabled, " + "Group1 Interrupts %sabled\n", cpu, + (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP0) ? "En" : "Dis", + (s->cpu_ctlr[cpu] & GICC_CTLR_EN_GRP1) ? "En" : "Dis"); +} + +static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs) +{ + if ((s->revision != REV_11MPCORE) && (s->running_priority[cpu] > 0xff)) { + /* Idle priority */ + return 0xff; + } + + if (gic_cpu_ns_access(s, cpu, attrs)) { + if (s->running_priority[cpu] & 0x80) { + /* Running priority in upper half of range: return the Non-secure + * view of the priority. + */ + return s->running_priority[cpu] << 1; + } else { + /* Running priority in lower half of range: RAZ */ + return 0; + } + } else { + return s->running_priority[cpu]; + } +} + +/* Return true if we should split priority drop and interrupt deactivation, + * ie whether the relevant EOIMode bit is set. + */ +static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs) +{ + if (s->revision != 2) { + /* Before GICv2 prio-drop and deactivate are not separable */ + return false; + } + if (gic_cpu_ns_access(s, cpu, attrs)) { + return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS; + } + return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE; +} + +static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) +{ + int group; + + if (irq >= GIC_MAXIRQ || (!gic_is_vcpu(cpu) && irq >= s->num_irq)) { + /* + * This handles two cases: + * 1. If software writes the ID of a spurious interrupt [ie 1023] + * to the GICC_DIR, the GIC ignores that write. + * 2. If software writes the number of a non-existent interrupt + * this must be a subcase of "value written is not an active interrupt" + * and so this is UNPREDICTABLE. We choose to ignore it. For vCPUs, + * all IRQs potentially exist, so this limit does not apply. + */ + return; + } + + if (!gic_eoi_split(s, cpu, attrs)) { + /* This is UNPREDICTABLE; we choose to ignore it */ + qemu_log_mask(LOG_GUEST_ERROR, + "gic_deactivate_irq: GICC_DIR write when EOIMode clear"); + return; + } + + if (gic_is_vcpu(cpu) && !gic_virq_is_valid(s, irq, cpu)) { + /* This vIRQ does not have an LR entry which is either active or + * pending and active. Increment EOICount and ignore the write. + */ + int rcpu = gic_get_vcpu_real_id(cpu); + s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; + + /* Update the virtual interface in case a maintenance interrupt should + * be raised. + */ + gic_update_virt(s); + return; + } + + group = gic_has_groups(s) && gic_test_group(s, irq, cpu); + + if (gic_cpu_ns_access(s, cpu, attrs) && !group) { + DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq); + return; + } + + gic_clear_active(s, irq, cpu); +} + +static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) +{ + int cm = 1 << cpu; + int group; + + DPRINTF("EOI %d\n", irq); + if (gic_is_vcpu(cpu)) { + /* The call to gic_prio_drop() will clear a bit in GICH_APR iff the + * running prio is < 0x100. + */ + bool prio_drop = s->running_priority[cpu] < 0x100; + + if (irq >= GIC_MAXIRQ) { + /* Ignore spurious interrupt */ + return; + } + + gic_drop_prio(s, cpu, 0); + + if (!gic_eoi_split(s, cpu, attrs)) { + bool valid = gic_virq_is_valid(s, irq, cpu); + if (prio_drop && !valid) { + /* We are in a situation where: + * - V_CTRL.EOIMode is false (no EOI split), + * - The call to gic_drop_prio() cleared a bit in GICH_APR, + * - This vIRQ does not have an LR entry which is either + * active or pending and active. + * In that case, we must increment EOICount. + */ + int rcpu = gic_get_vcpu_real_id(cpu); + s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT; + } else if (valid) { + gic_clear_active(s, irq, cpu); + } + } + + gic_update_virt(s); + return; + } + + if (irq >= s->num_irq) { + /* This handles two cases: + * 1. If software writes the ID of a spurious interrupt [ie 1023] + * to the GICC_EOIR, the GIC ignores that write. + * 2. If software writes the number of a non-existent interrupt + * this must be a subcase of "value written does not match the last + * valid interrupt value read from the Interrupt Acknowledge + * register" and so this is UNPREDICTABLE. We choose to ignore it. + */ + return; + } + if (s->running_priority[cpu] == 0x100) { + return; /* No active IRQ. */ + } + + if (s->revision == REV_11MPCORE) { + /* Mark level triggered interrupts as pending if they are still + raised. */ + if (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_ENABLED(irq, cm) + && GIC_DIST_TEST_LEVEL(irq, cm) + && (GIC_DIST_TARGET(irq) & cm) != 0) { + DPRINTF("Set %d pending mask %x\n", irq, cm); + GIC_DIST_SET_PENDING(irq, cm); + } + } + + group = gic_has_groups(s) && gic_test_group(s, irq, cpu); + + if (gic_cpu_ns_access(s, cpu, attrs) && !group) { + DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq); + return; + } + + /* Secure EOI with GICC_CTLR.AckCtl == 0 when the IRQ is a Group 1 + * interrupt is UNPREDICTABLE. We choose to handle it as if AckCtl == 1, + * i.e. go ahead and complete the irq anyway. + */ + + gic_drop_prio(s, cpu, group); + + /* In GICv2 the guest can choose to split priority-drop and deactivate */ + if (!gic_eoi_split(s, cpu, attrs)) { + gic_clear_active(s, irq, cpu); + } + gic_update(s); +} + +static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + uint32_t res; + int irq; + int i; + int cpu; + int cm; + int mask; + + cpu = gic_get_current_cpu(s); + cm = 1 << cpu; + if (offset < 0x100) { + if (offset == 0) { /* GICD_CTLR */ + if (s->security_extn && !attrs.secure) { + /* The NS bank of this register is just an alias of the + * EnableGrp1 bit in the S bank version. + */ + return extract32(s->ctlr, 1, 1); + } else { + return s->ctlr; + } + } + if (offset == 4) + /* Interrupt Controller Type Register */ + return ((s->num_irq / 32) - 1) + | ((s->num_cpu - 1) << 5) + | (s->security_extn << 10); + if (offset < 0x08) + return 0; + if (offset >= 0x80) { + /* Interrupt Group Registers: these RAZ/WI if this is an NS + * access to a GIC with the security extensions, or if the GIC + * doesn't have groups at all. + */ + res = 0; + if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { + /* Every byte offset holds 8 group status bits */ + irq = (offset - 0x080) * 8; + if (irq >= s->num_irq) { + goto bad_reg; + } + for (i = 0; i < 8; i++) { + if (GIC_DIST_TEST_GROUP(irq + i, cm)) { + res |= (1 << i); + } + } + } + return res; + } + goto bad_reg; + } else if (offset < 0x200) { + /* Interrupt Set/Clear Enable. */ + if (offset < 0x180) + irq = (offset - 0x100) * 8; + else + irq = (offset - 0x180) * 8; + if (irq >= s->num_irq) + goto bad_reg; + res = 0; + for (i = 0; i < 8; i++) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (GIC_DIST_TEST_ENABLED(irq + i, cm)) { + res |= (1 << i); + } + } + } else if (offset < 0x300) { + /* Interrupt Set/Clear Pending. */ + if (offset < 0x280) + irq = (offset - 0x200) * 8; + else + irq = (offset - 0x280) * 8; + if (irq >= s->num_irq) + goto bad_reg; + res = 0; + mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; + for (i = 0; i < 8; i++) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (gic_test_pending(s, irq + i, mask)) { + res |= (1 << i); + } + } + } else if (offset < 0x400) { + /* Interrupt Set/Clear Active. */ + if (offset < 0x380) { + irq = (offset - 0x300) * 8; + } else if (s->revision == 2) { + irq = (offset - 0x380) * 8; + } else { + goto bad_reg; + } + + if (irq >= s->num_irq) + goto bad_reg; + res = 0; + mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; + for (i = 0; i < 8; i++) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (GIC_DIST_TEST_ACTIVE(irq + i, mask)) { + res |= (1 << i); + } + } + } else if (offset < 0x800) { + /* Interrupt Priority. */ + irq = (offset - 0x400); + if (irq >= s->num_irq) + goto bad_reg; + res = gic_dist_get_priority(s, cpu, irq, attrs); + } else if (offset < 0xc00) { + /* Interrupt CPU Target. */ + if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { + /* For uniprocessor GICs these RAZ/WI */ + res = 0; + } else { + irq = (offset - 0x800); + if (irq >= s->num_irq) { + goto bad_reg; + } + if (irq < 29 && s->revision == REV_11MPCORE) { + res = 0; + } else if (irq < GIC_INTERNAL) { + res = cm; + } else { + res = GIC_DIST_TARGET(irq); + } + } + } else if (offset < 0xf00) { + /* Interrupt Configuration. */ + irq = (offset - 0xc00) * 4; + if (irq >= s->num_irq) + goto bad_reg; + res = 0; + for (i = 0; i < 4; i++) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (GIC_DIST_TEST_MODEL(irq + i)) { + res |= (1 << (i * 2)); + } + if (GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { + res |= (2 << (i * 2)); + } + } + } else if (offset < 0xf10) { + goto bad_reg; + } else if (offset < 0xf30) { + if (s->revision == REV_11MPCORE) { + goto bad_reg; + } + + if (offset < 0xf20) { + /* GICD_CPENDSGIRn */ + irq = (offset - 0xf10); + } else { + irq = (offset - 0xf20); + /* GICD_SPENDSGIRn */ + } + + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { + res = 0; /* Ignore Non-secure access of Group0 IRQ */ + } else { + res = s->sgi_pending[irq][cpu]; + } + } else if (offset < 0xfd0) { + goto bad_reg; + } else if (offset < 0x1000) { + if (offset & 3) { + res = 0; + } else { + switch (s->revision) { + case REV_11MPCORE: + res = gic_id_11mpcore[(offset - 0xfd0) >> 2]; + break; + case 1: + res = gic_id_gicv1[(offset - 0xfd0) >> 2]; + break; + case 2: + res = gic_id_gicv2[(offset - 0xfd0) >> 2]; + break; + default: + res = 0; + } + } + } else { + g_assert_not_reached(); + } + return res; +bad_reg: + qemu_log_mask(LOG_GUEST_ERROR, + "gic_dist_readb: Bad offset %x\n", (int)offset); + return 0; +} + +static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + switch (size) { + case 1: + *data = gic_dist_readb(opaque, offset, attrs); + break; + case 2: + *data = gic_dist_readb(opaque, offset, attrs); + *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; + break; + case 4: + *data = gic_dist_readb(opaque, offset, attrs); + *data |= gic_dist_readb(opaque, offset + 1, attrs) << 8; + *data |= gic_dist_readb(opaque, offset + 2, attrs) << 16; + *data |= gic_dist_readb(opaque, offset + 3, attrs) << 24; + break; + default: + return MEMTX_ERROR; + } + + trace_gic_dist_read(offset, size, *data); + return MEMTX_OK; +} + +static void gic_dist_writeb(void *opaque, hwaddr offset, + uint32_t value, MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + int irq; + int i; + int cpu; + + cpu = gic_get_current_cpu(s); + if (offset < 0x100) { + if (offset == 0) { + if (s->security_extn && !attrs.secure) { + /* NS version is just an alias of the S version's bit 1 */ + s->ctlr = deposit32(s->ctlr, 1, 1, value); + } else if (gic_has_groups(s)) { + s->ctlr = value & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1); + } else { + s->ctlr = value & GICD_CTLR_EN_GRP0; + } + DPRINTF("Distributor: Group0 %sabled; Group 1 %sabled\n", + s->ctlr & GICD_CTLR_EN_GRP0 ? "En" : "Dis", + s->ctlr & GICD_CTLR_EN_GRP1 ? "En" : "Dis"); + } else if (offset < 4) { + /* ignored. */ + } else if (offset >= 0x80) { + /* Interrupt Group Registers: RAZ/WI for NS access to secure + * GIC, or for GICs without groups. + */ + if (!(s->security_extn && !attrs.secure) && gic_has_groups(s)) { + /* Every byte offset holds 8 group status bits */ + irq = (offset - 0x80) * 8; + if (irq >= s->num_irq) { + goto bad_reg; + } + for (i = 0; i < 8; i++) { + /* Group bits are banked for private interrupts */ + int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + if (value & (1 << i)) { + /* Group1 (Non-secure) */ + GIC_DIST_SET_GROUP(irq + i, cm); + } else { + /* Group0 (Secure) */ + GIC_DIST_CLEAR_GROUP(irq + i, cm); + } + } + } + } else { + goto bad_reg; + } + } else if (offset < 0x180) { + /* Interrupt Set Enable. */ + irq = (offset - 0x100) * 8; + if (irq >= s->num_irq) + goto bad_reg; + if (irq < GIC_NR_SGIS) { + value = 0xff; + } + + for (i = 0; i < 8; i++) { + if (value & (1 << i)) { + int mask = + (irq < GIC_INTERNAL) ? (1 << cpu) + : GIC_DIST_TARGET(irq + i); + int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (!GIC_DIST_TEST_ENABLED(irq + i, cm)) { + DPRINTF("Enabled IRQ %d\n", irq + i); + trace_gic_enable_irq(irq + i); + } + GIC_DIST_SET_ENABLED(irq + i, cm); + /* If a raised level triggered IRQ enabled then mark + is as pending. */ + if (GIC_DIST_TEST_LEVEL(irq + i, mask) + && !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) { + DPRINTF("Set %d pending mask %x\n", irq + i, mask); + GIC_DIST_SET_PENDING(irq + i, mask); + } + } + } + } else if (offset < 0x200) { + /* Interrupt Clear Enable. */ + irq = (offset - 0x180) * 8; + if (irq >= s->num_irq) + goto bad_reg; + if (irq < GIC_NR_SGIS) { + value = 0; + } + + for (i = 0; i < 8; i++) { + if (value & (1 << i)) { + int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (GIC_DIST_TEST_ENABLED(irq + i, cm)) { + DPRINTF("Disabled IRQ %d\n", irq + i); + trace_gic_disable_irq(irq + i); + } + GIC_DIST_CLEAR_ENABLED(irq + i, cm); + } + } + } else if (offset < 0x280) { + /* Interrupt Set Pending. */ + irq = (offset - 0x200) * 8; + if (irq >= s->num_irq) + goto bad_reg; + if (irq < GIC_NR_SGIS) { + value = 0; + } + + for (i = 0; i < 8; i++) { + if (value & (1 << i)) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + GIC_DIST_SET_PENDING(irq + i, GIC_DIST_TARGET(irq + i)); + } + } + } else if (offset < 0x300) { + /* Interrupt Clear Pending. */ + irq = (offset - 0x280) * 8; + if (irq >= s->num_irq) + goto bad_reg; + if (irq < GIC_NR_SGIS) { + value = 0; + } + + for (i = 0; i < 8; i++) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + /* ??? This currently clears the pending bit for all CPUs, even + for per-CPU interrupts. It's unclear whether this is the + corect behavior. */ + if (value & (1 << i)) { + GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK); + } + } + } else if (offset < 0x380) { + /* Interrupt Set Active. */ + if (s->revision != 2) { + goto bad_reg; + } + + irq = (offset - 0x300) * 8; + if (irq >= s->num_irq) { + goto bad_reg; + } + + /* This register is banked per-cpu for PPIs */ + int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK; + + for (i = 0; i < 8; i++) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (value & (1 << i)) { + GIC_DIST_SET_ACTIVE(irq + i, cm); + } + } + } else if (offset < 0x400) { + /* Interrupt Clear Active. */ + if (s->revision != 2) { + goto bad_reg; + } + + irq = (offset - 0x380) * 8; + if (irq >= s->num_irq) { + goto bad_reg; + } + + /* This register is banked per-cpu for PPIs */ + int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK; + + for (i = 0; i < 8; i++) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (value & (1 << i)) { + GIC_DIST_CLEAR_ACTIVE(irq + i, cm); + } + } + } else if (offset < 0x800) { + /* Interrupt Priority. */ + irq = (offset - 0x400); + if (irq >= s->num_irq) + goto bad_reg; + gic_dist_set_priority(s, cpu, irq, value, attrs); + } else if (offset < 0xc00) { + /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the + * annoying exception of the 11MPCore's GIC. + */ + if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { + irq = (offset - 0x800); + if (irq >= s->num_irq) { + goto bad_reg; + } + if (irq < 29 && s->revision == REV_11MPCORE) { + value = 0; + } else if (irq < GIC_INTERNAL) { + value = ALL_CPU_MASK; + } + s->irq_target[irq] = value & ALL_CPU_MASK; + } + } else if (offset < 0xf00) { + /* Interrupt Configuration. */ + irq = (offset - 0xc00) * 4; + if (irq >= s->num_irq) + goto bad_reg; + if (irq < GIC_NR_SGIS) + value |= 0xaa; + for (i = 0; i < 4; i++) { + if (s->security_extn && !attrs.secure && + !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) { + continue; /* Ignore Non-secure access of Group0 IRQ */ + } + + if (s->revision == REV_11MPCORE) { + if (value & (1 << (i * 2))) { + GIC_DIST_SET_MODEL(irq + i); + } else { + GIC_DIST_CLEAR_MODEL(irq + i); + } + } + if (value & (2 << (i * 2))) { + GIC_DIST_SET_EDGE_TRIGGER(irq + i); + } else { + GIC_DIST_CLEAR_EDGE_TRIGGER(irq + i); + } + } + } else if (offset < 0xf10) { + /* 0xf00 is only handled for 32-bit writes. */ + goto bad_reg; + } else if (offset < 0xf20) { + /* GICD_CPENDSGIRn */ + if (s->revision == REV_11MPCORE) { + goto bad_reg; + } + irq = (offset - 0xf10); + + if (!s->security_extn || attrs.secure || + GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { + s->sgi_pending[irq][cpu] &= ~value; + if (s->sgi_pending[irq][cpu] == 0) { + GIC_DIST_CLEAR_PENDING(irq, 1 << cpu); + } + } + } else if (offset < 0xf30) { + /* GICD_SPENDSGIRn */ + if (s->revision == REV_11MPCORE) { + goto bad_reg; + } + irq = (offset - 0xf20); + + if (!s->security_extn || attrs.secure || + GIC_DIST_TEST_GROUP(irq, 1 << cpu)) { + GIC_DIST_SET_PENDING(irq, 1 << cpu); + s->sgi_pending[irq][cpu] |= value; + } + } else { + goto bad_reg; + } + gic_update(s); + return; +bad_reg: + qemu_log_mask(LOG_GUEST_ERROR, + "gic_dist_writeb: Bad offset %x\n", (int)offset); +} + +static void gic_dist_writew(void *opaque, hwaddr offset, + uint32_t value, MemTxAttrs attrs) +{ + gic_dist_writeb(opaque, offset, value & 0xff, attrs); + gic_dist_writeb(opaque, offset + 1, value >> 8, attrs); +} + +static void gic_dist_writel(void *opaque, hwaddr offset, + uint32_t value, MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + if (offset == 0xf00) { + int cpu; + int irq; + int mask; + int target_cpu; + + cpu = gic_get_current_cpu(s); + irq = value & 0xf; + switch ((value >> 24) & 3) { + case 0: + mask = (value >> 16) & ALL_CPU_MASK; + break; + case 1: + mask = ALL_CPU_MASK ^ (1 << cpu); + break; + case 2: + mask = 1 << cpu; + break; + default: + DPRINTF("Bad Soft Int target filter\n"); + mask = ALL_CPU_MASK; + break; + } + GIC_DIST_SET_PENDING(irq, mask); + target_cpu = ctz32(mask); + while (target_cpu < GIC_NCPU) { + s->sgi_pending[irq][target_cpu] |= (1 << cpu); + mask &= ~(1 << target_cpu); + target_cpu = ctz32(mask); + } + gic_update(s); + return; + } + gic_dist_writew(opaque, offset, value & 0xffff, attrs); + gic_dist_writew(opaque, offset + 2, value >> 16, attrs); +} + +static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size, MemTxAttrs attrs) +{ + trace_gic_dist_write(offset, size, data); + + switch (size) { + case 1: + gic_dist_writeb(opaque, offset, data, attrs); + return MEMTX_OK; + case 2: + gic_dist_writew(opaque, offset, data, attrs); + return MEMTX_OK; + case 4: + gic_dist_writel(opaque, offset, data, attrs); + return MEMTX_OK; + default: + return MEMTX_ERROR; + } +} + +static inline uint32_t gic_apr_ns_view(GICState *s, int cpu, int regno) +{ + /* Return the Nonsecure view of GICC_APR. This is the + * second half of GICC_NSAPR. + */ + switch (GIC_MIN_BPR) { + case 0: + if (regno < 2) { + return s->nsapr[regno + 2][cpu]; + } + break; + case 1: + if (regno == 0) { + return s->nsapr[regno + 1][cpu]; + } + break; + case 2: + if (regno == 0) { + return extract32(s->nsapr[0][cpu], 16, 16); + } + break; + case 3: + if (regno == 0) { + return extract32(s->nsapr[0][cpu], 8, 8); + } + break; + default: + g_assert_not_reached(); + } + return 0; +} + +static inline void gic_apr_write_ns_view(GICState *s, int cpu, int regno, + uint32_t value) +{ + /* Write the Nonsecure view of GICC_APR. */ + switch (GIC_MIN_BPR) { + case 0: + if (regno < 2) { + s->nsapr[regno + 2][cpu] = value; + } + break; + case 1: + if (regno == 0) { + s->nsapr[regno + 1][cpu] = value; + } + break; + case 2: + if (regno == 0) { + s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 16, 16, value); + } + break; + case 3: + if (regno == 0) { + s->nsapr[0][cpu] = deposit32(s->nsapr[0][cpu], 8, 8, value); + } + break; + default: + g_assert_not_reached(); + } +} + +static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, + uint64_t *data, MemTxAttrs attrs) +{ + switch (offset) { + case 0x00: /* Control */ + *data = gic_get_cpu_control(s, cpu, attrs); + break; + case 0x04: /* Priority mask */ + *data = gic_get_priority_mask(s, cpu, attrs); + break; + case 0x08: /* Binary Point */ + if (gic_cpu_ns_access(s, cpu, attrs)) { + if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { + /* NS view of BPR when CBPR is 1 */ + *data = MIN(s->bpr[cpu] + 1, 7); + } else { + /* BPR is banked. Non-secure copy stored in ABPR. */ + *data = s->abpr[cpu]; + } + } else { + *data = s->bpr[cpu]; + } + break; + case 0x0c: /* Acknowledge */ + *data = gic_acknowledge_irq(s, cpu, attrs); + break; + case 0x14: /* Running Priority */ + *data = gic_get_running_priority(s, cpu, attrs); + break; + case 0x18: /* Highest Pending Interrupt */ + *data = gic_get_current_pending_irq(s, cpu, attrs); + break; + case 0x1c: /* Aliased Binary Point */ + /* GIC v2, no security: ABPR + * GIC v1, no security: not implemented (RAZ/WI) + * With security extensions, secure access: ABPR (alias of NS BPR) + * With security extensions, nonsecure access: RAZ/WI + */ + if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { + *data = 0; + } else { + *data = s->abpr[cpu]; + } + break; + case 0xd0: case 0xd4: case 0xd8: case 0xdc: + { + int regno = (offset - 0xd0) / 4; + int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS; + + if (regno >= nr_aprs || s->revision != 2) { + *data = 0; + } else if (gic_is_vcpu(cpu)) { + *data = s->h_apr[gic_get_vcpu_real_id(cpu)]; + } else if (gic_cpu_ns_access(s, cpu, attrs)) { + /* NS view of GICC_APR is the top half of GIC_NSAPR */ + *data = gic_apr_ns_view(s, regno, cpu); + } else { + *data = s->apr[regno][cpu]; + } + break; + } + case 0xe0: case 0xe4: case 0xe8: case 0xec: + { + int regno = (offset - 0xe0) / 4; + + if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) || + gic_cpu_ns_access(s, cpu, attrs) || gic_is_vcpu(cpu)) { + *data = 0; + } else { + *data = s->nsapr[regno][cpu]; + } + break; + } + default: + qemu_log_mask(LOG_GUEST_ERROR, + "gic_cpu_read: Bad offset %x\n", (int)offset); + *data = 0; + break; + } + + trace_gic_cpu_read(gic_is_vcpu(cpu) ? "vcpu" : "cpu", + gic_get_vcpu_real_id(cpu), offset, *data); + return MEMTX_OK; +} + +static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, + uint32_t value, MemTxAttrs attrs) +{ + trace_gic_cpu_write(gic_is_vcpu(cpu) ? "vcpu" : "cpu", + gic_get_vcpu_real_id(cpu), offset, value); + + switch (offset) { + case 0x00: /* Control */ + gic_set_cpu_control(s, cpu, value, attrs); + break; + case 0x04: /* Priority mask */ + gic_set_priority_mask(s, cpu, value, attrs); + break; + case 0x08: /* Binary Point */ + if (gic_cpu_ns_access(s, cpu, attrs)) { + if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) { + /* WI when CBPR is 1 */ + return MEMTX_OK; + } else { + s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); + } + } else { + int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; + s->bpr[cpu] = MAX(value & 0x7, min_bpr); + } + break; + case 0x10: /* End Of Interrupt */ + gic_complete_irq(s, cpu, value & 0x3ff, attrs); + return MEMTX_OK; + case 0x1c: /* Aliased Binary Point */ + if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { + /* unimplemented, or NS access: RAZ/WI */ + return MEMTX_OK; + } else { + s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR); + } + break; + case 0xd0: case 0xd4: case 0xd8: case 0xdc: + { + int regno = (offset - 0xd0) / 4; + int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS; + + if (regno >= nr_aprs || s->revision != 2) { + return MEMTX_OK; + } + if (gic_is_vcpu(cpu)) { + s->h_apr[gic_get_vcpu_real_id(cpu)] = value; + } else if (gic_cpu_ns_access(s, cpu, attrs)) { + /* NS view of GICC_APR is the top half of GIC_NSAPR */ + gic_apr_write_ns_view(s, regno, cpu, value); + } else { + s->apr[regno][cpu] = value; + } + break; + } + case 0xe0: case 0xe4: case 0xe8: case 0xec: + { + int regno = (offset - 0xe0) / 4; + + if (regno >= GIC_NR_APRS || s->revision != 2) { + return MEMTX_OK; + } + if (gic_is_vcpu(cpu)) { + return MEMTX_OK; + } + if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) { + return MEMTX_OK; + } + s->nsapr[regno][cpu] = value; + break; + } + case 0x1000: + /* GICC_DIR */ + gic_deactivate_irq(s, cpu, value & 0x3ff, attrs); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "gic_cpu_write: Bad offset %x\n", (int)offset); + return MEMTX_OK; + } + + if (gic_is_vcpu(cpu)) { + gic_update_virt(s); + } else { + gic_update(s); + } + + return MEMTX_OK; +} + +/* Wrappers to read/write the GIC CPU interface for the current CPU */ +static MemTxResult gic_thiscpu_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + return gic_cpu_read(s, gic_get_current_cpu(s), addr, data, attrs); +} + +static MemTxResult gic_thiscpu_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + return gic_cpu_write(s, gic_get_current_cpu(s), addr, value, attrs); +} + +/* Wrappers to read/write the GIC CPU interface for a specific CPU. + * These just decode the opaque pointer into GICState* + cpu id. + */ +static MemTxResult gic_do_cpu_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICState **backref = (GICState **)opaque; + GICState *s = *backref; + int id = (backref - s->backref); + return gic_cpu_read(s, id, addr, data, attrs); +} + +static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + GICState **backref = (GICState **)opaque; + GICState *s = *backref; + int id = (backref - s->backref); + return gic_cpu_write(s, id, addr, value, attrs); +} + +static MemTxResult gic_thisvcpu_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + + return gic_cpu_read(s, gic_get_current_vcpu(s), addr, data, attrs); +} + +static MemTxResult gic_thisvcpu_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + + return gic_cpu_write(s, gic_get_current_vcpu(s), addr, value, attrs); +} + +static uint32_t gic_compute_eisr(GICState *s, int cpu, int lr_start) +{ + int lr_idx; + uint32_t ret = 0; + + for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { + uint32_t *entry = &s->h_lr[lr_idx][cpu]; + ret = deposit32(ret, lr_idx - lr_start, 1, + gic_lr_entry_is_eoi(*entry)); + } + + return ret; +} + +static uint32_t gic_compute_elrsr(GICState *s, int cpu, int lr_start) +{ + int lr_idx; + uint32_t ret = 0; + + for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) { + uint32_t *entry = &s->h_lr[lr_idx][cpu]; + ret = deposit32(ret, lr_idx - lr_start, 1, + gic_lr_entry_is_free(*entry)); + } + + return ret; +} + +static void gic_vmcr_write(GICState *s, uint32_t value, MemTxAttrs attrs) +{ + int vcpu = gic_get_current_vcpu(s); + uint32_t ctlr; + uint32_t abpr; + uint32_t bpr; + uint32_t prio_mask; + + ctlr = FIELD_EX32(value, GICH_VMCR, VMCCtlr); + abpr = FIELD_EX32(value, GICH_VMCR, VMABP); + bpr = FIELD_EX32(value, GICH_VMCR, VMBP); + prio_mask = FIELD_EX32(value, GICH_VMCR, VMPriMask) << 3; + + gic_set_cpu_control(s, vcpu, ctlr, attrs); + s->abpr[vcpu] = MAX(abpr, GIC_VIRT_MIN_ABPR); + s->bpr[vcpu] = MAX(bpr, GIC_VIRT_MIN_BPR); + gic_set_priority_mask(s, vcpu, prio_mask, attrs); +} + +static MemTxResult gic_hyp_read(void *opaque, int cpu, hwaddr addr, + uint64_t *data, MemTxAttrs attrs) +{ + GICState *s = ARM_GIC(opaque); + int vcpu = cpu + GIC_NCPU; + + switch (addr) { + case A_GICH_HCR: /* Hypervisor Control */ + *data = s->h_hcr[cpu]; + break; + + case A_GICH_VTR: /* VGIC Type */ + *data = FIELD_DP32(0, GICH_VTR, ListRegs, s->num_lrs - 1); + *data = FIELD_DP32(*data, GICH_VTR, PREbits, + GIC_VIRT_MAX_GROUP_PRIO_BITS - 1); + *data = FIELD_DP32(*data, GICH_VTR, PRIbits, + (7 - GIC_VIRT_MIN_BPR) - 1); + break; + + case A_GICH_VMCR: /* Virtual Machine Control */ + *data = FIELD_DP32(0, GICH_VMCR, VMCCtlr, + extract32(s->cpu_ctlr[vcpu], 0, 10)); + *data = FIELD_DP32(*data, GICH_VMCR, VMABP, s->abpr[vcpu]); + *data = FIELD_DP32(*data, GICH_VMCR, VMBP, s->bpr[vcpu]); + *data = FIELD_DP32(*data, GICH_VMCR, VMPriMask, + extract32(s->priority_mask[vcpu], 3, 5)); + break; + + case A_GICH_MISR: /* Maintenance Interrupt Status */ + *data = s->h_misr[cpu]; + break; + + case A_GICH_EISR0: /* End of Interrupt Status 0 and 1 */ + case A_GICH_EISR1: + *data = gic_compute_eisr(s, cpu, (addr - A_GICH_EISR0) * 8); + break; + + case A_GICH_ELRSR0: /* Empty List Status 0 and 1 */ + case A_GICH_ELRSR1: + *data = gic_compute_elrsr(s, cpu, (addr - A_GICH_ELRSR0) * 8); + break; + + case A_GICH_APR: /* Active Priorities */ + *data = s->h_apr[cpu]; + break; + + case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */ + { + int lr_idx = (addr - A_GICH_LR0) / 4; + + if (lr_idx > s->num_lrs) { + *data = 0; + } else { + *data = s->h_lr[lr_idx][cpu]; + } + break; + } + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "gic_hyp_read: Bad offset %" HWADDR_PRIx "\n", addr); + return MEMTX_OK; + } + + trace_gic_hyp_read(addr, *data); + return MEMTX_OK; +} + +static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr, + uint64_t value, MemTxAttrs attrs) +{ + GICState *s = ARM_GIC(opaque); + int vcpu = cpu + GIC_NCPU; + + trace_gic_hyp_write(addr, value); + + switch (addr) { + case A_GICH_HCR: /* Hypervisor Control */ + s->h_hcr[cpu] = value & GICH_HCR_MASK; + break; + + case A_GICH_VMCR: /* Virtual Machine Control */ + gic_vmcr_write(s, value, attrs); + break; + + case A_GICH_APR: /* Active Priorities */ + s->h_apr[cpu] = value; + s->running_priority[vcpu] = gic_get_prio_from_apr_bits(s, vcpu); + break; + + case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */ + { + int lr_idx = (addr - A_GICH_LR0) / 4; + + if (lr_idx > s->num_lrs) { + return MEMTX_OK; + } + + s->h_lr[lr_idx][cpu] = value & GICH_LR_MASK; + trace_gic_lr_entry(cpu, lr_idx, s->h_lr[lr_idx][cpu]); + break; + } + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "gic_hyp_write: Bad offset %" HWADDR_PRIx "\n", addr); + return MEMTX_OK; + } + + gic_update_virt(s); + return MEMTX_OK; +} + +static MemTxResult gic_thiscpu_hyp_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + + return gic_hyp_read(s, gic_get_current_cpu(s), addr, data, attrs); +} + +static MemTxResult gic_thiscpu_hyp_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + GICState *s = (GICState *)opaque; + + return gic_hyp_write(s, gic_get_current_cpu(s), addr, value, attrs); +} + +static MemTxResult gic_do_hyp_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICState **backref = (GICState **)opaque; + GICState *s = *backref; + int id = (backref - s->backref); + + return gic_hyp_read(s, id, addr, data, attrs); +} + +static MemTxResult gic_do_hyp_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + GICState **backref = (GICState **)opaque; + GICState *s = *backref; + int id = (backref - s->backref); + + return gic_hyp_write(s, id + GIC_NCPU, addr, value, attrs); + +} + +static const MemoryRegionOps gic_ops[2] = { + { + .read_with_attrs = gic_dist_read, + .write_with_attrs = gic_dist_write, + .endianness = DEVICE_NATIVE_ENDIAN, + }, + { + .read_with_attrs = gic_thiscpu_read, + .write_with_attrs = gic_thiscpu_write, + .endianness = DEVICE_NATIVE_ENDIAN, + } +}; + +static const MemoryRegionOps gic_cpu_ops = { + .read_with_attrs = gic_do_cpu_read, + .write_with_attrs = gic_do_cpu_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const MemoryRegionOps gic_virt_ops[2] = { + { + .read_with_attrs = gic_thiscpu_hyp_read, + .write_with_attrs = gic_thiscpu_hyp_write, + .endianness = DEVICE_NATIVE_ENDIAN, + }, + { + .read_with_attrs = gic_thisvcpu_read, + .write_with_attrs = gic_thisvcpu_write, + .endianness = DEVICE_NATIVE_ENDIAN, + } +}; + +static const MemoryRegionOps gic_viface_ops = { + .read_with_attrs = gic_do_hyp_read, + .write_with_attrs = gic_do_hyp_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void arm_gic_realize(DeviceState *dev, Error **errp) +{ + /* Device instance realize function for the GIC sysbus device */ + int i; + GICState *s = ARM_GIC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + ARMGICClass *agc = ARM_GIC_GET_CLASS(s); + Error *local_err = NULL; + + agc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (kvm_enabled() && !kvm_arm_supports_user_irq()) { + error_setg(errp, "KVM with user space irqchip only works when the " + "host kernel supports KVM_CAP_ARM_USER_IRQ"); + return; + } + + if (s->n_prio_bits > GIC_MAX_PRIORITY_BITS || + (s->virt_extn ? s->n_prio_bits < GIC_VIRT_MAX_GROUP_PRIO_BITS : + s->n_prio_bits < GIC_MIN_PRIORITY_BITS)) { + error_setg(errp, "num-priority-bits cannot be greater than %d" + " or less than %d", GIC_MAX_PRIORITY_BITS, + s->virt_extn ? GIC_VIRT_MAX_GROUP_PRIO_BITS : + GIC_MIN_PRIORITY_BITS); + return; + } + + /* This creates distributor, main CPU interface (s->cpuiomem[0]) and if + * enabled, virtualization extensions related interfaces (main virtual + * interface (s->vifaceiomem[0]) and virtual CPU interface). + */ + gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, gic_virt_ops); + + /* Extra core-specific regions for the CPU interfaces. This is + * necessary for "franken-GIC" implementations, for example on + * Exynos 4. + * NB that the memory region size of 0x100 applies for the 11MPCore + * and also cores following the GIC v1 spec (ie A9). + * GIC v2 defines a larger memory region (0x1000) so this will need + * to be extended when we implement A15. + */ + for (i = 0; i < s->num_cpu; i++) { + s->backref[i] = s; + memory_region_init_io(&s->cpuiomem[i+1], OBJECT(s), &gic_cpu_ops, + &s->backref[i], "gic_cpu", 0x100); + sysbus_init_mmio(sbd, &s->cpuiomem[i+1]); + } + + /* Extra core-specific regions for virtual interfaces. This is required by + * the GICv2 specification. + */ + if (s->virt_extn) { + for (i = 0; i < s->num_cpu; i++) { + memory_region_init_io(&s->vifaceiomem[i + 1], OBJECT(s), + &gic_viface_ops, &s->backref[i], + "gic_viface", 0x200); + sysbus_init_mmio(sbd, &s->vifaceiomem[i + 1]); + } + } + +} + +static void arm_gic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ARMGICClass *agc = ARM_GIC_CLASS(klass); + + device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize); +} + +static const TypeInfo arm_gic_info = { + .name = TYPE_ARM_GIC, + .parent = TYPE_ARM_GIC_COMMON, + .instance_size = sizeof(GICState), + .class_init = arm_gic_class_init, + .class_size = sizeof(ARMGICClass), +}; + +static void arm_gic_register_types(void) +{ + type_register_static(&arm_gic_info); +} + +type_init(arm_gic_register_types) diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c new file mode 100644 index 000000000..7b44d5625 --- /dev/null +++ b/hw/intc/arm_gic_common.c @@ -0,0 +1,394 @@ +/* + * ARM GIC support - common bits of emulated and KVM kernel model + * + * Copyright (c) 2012 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "gic_internal.h" +#include "hw/arm/linux-boot-if.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +static int gic_pre_save(void *opaque) +{ + GICState *s = (GICState *)opaque; + ARMGICCommonClass *c = ARM_GIC_COMMON_GET_CLASS(s); + + if (c->pre_save) { + c->pre_save(s); + } + + return 0; +} + +static int gic_post_load(void *opaque, int version_id) +{ + GICState *s = (GICState *)opaque; + ARMGICCommonClass *c = ARM_GIC_COMMON_GET_CLASS(s); + + if (c->post_load) { + c->post_load(s); + } + return 0; +} + +static bool gic_virt_state_needed(void *opaque) +{ + GICState *s = (GICState *)opaque; + + return s->virt_extn; +} + +static const VMStateDescription vmstate_gic_irq_state = { + .name = "arm_gic_irq_state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(enabled, gic_irq_state), + VMSTATE_UINT8(pending, gic_irq_state), + VMSTATE_UINT8(active, gic_irq_state), + VMSTATE_UINT8(level, gic_irq_state), + VMSTATE_BOOL(model, gic_irq_state), + VMSTATE_BOOL(edge_trigger, gic_irq_state), + VMSTATE_UINT8(group, gic_irq_state), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_gic_virt_state = { + .name = "arm_gic_virt_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = gic_virt_state_needed, + .fields = (VMStateField[]) { + /* Virtual interface */ + VMSTATE_UINT32_ARRAY(h_hcr, GICState, GIC_NCPU), + VMSTATE_UINT32_ARRAY(h_misr, GICState, GIC_NCPU), + VMSTATE_UINT32_2DARRAY(h_lr, GICState, GIC_MAX_LR, GIC_NCPU), + VMSTATE_UINT32_ARRAY(h_apr, GICState, GIC_NCPU), + + /* Virtual CPU interfaces */ + VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, GIC_NCPU, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(priority_mask, GICState, GIC_NCPU, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(running_priority, GICState, GIC_NCPU, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(current_pending, GICState, GIC_NCPU, GIC_NCPU), + VMSTATE_UINT8_SUB_ARRAY(bpr, GICState, GIC_NCPU, GIC_NCPU), + VMSTATE_UINT8_SUB_ARRAY(abpr, GICState, GIC_NCPU, GIC_NCPU), + + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_gic = { + .name = "arm_gic", + .version_id = 12, + .minimum_version_id = 12, + .pre_save = gic_pre_save, + .post_load = gic_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ctlr, GICState), + VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, 0, GIC_NCPU), + VMSTATE_STRUCT_ARRAY(irq_state, GICState, GIC_MAXIRQ, 1, + vmstate_gic_irq_state, gic_irq_state), + VMSTATE_UINT8_ARRAY(irq_target, GICState, GIC_MAXIRQ), + VMSTATE_UINT8_2DARRAY(priority1, GICState, GIC_INTERNAL, GIC_NCPU), + VMSTATE_UINT8_ARRAY(priority2, GICState, GIC_MAXIRQ - GIC_INTERNAL), + VMSTATE_UINT8_2DARRAY(sgi_pending, GICState, GIC_NR_SGIS, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(priority_mask, GICState, 0, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(running_priority, GICState, 0, GIC_NCPU), + VMSTATE_UINT16_SUB_ARRAY(current_pending, GICState, 0, GIC_NCPU), + VMSTATE_UINT8_SUB_ARRAY(bpr, GICState, 0, GIC_NCPU), + VMSTATE_UINT8_SUB_ARRAY(abpr, GICState, 0, GIC_NCPU), + VMSTATE_UINT32_2DARRAY(apr, GICState, GIC_NR_APRS, GIC_NCPU), + VMSTATE_UINT32_2DARRAY(nsapr, GICState, GIC_NR_APRS, GIC_NCPU), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_gic_virt_state, + NULL + } +}; + +void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler, + const MemoryRegionOps *ops, + const MemoryRegionOps *virt_ops) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(s); + int i = s->num_irq - GIC_INTERNAL; + + /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. + * GPIO array layout is thus: + * [0..N-1] SPIs + * [N..N+31] PPIs for CPU 0 + * [N+32..N+63] PPIs for CPU 1 + * ... + */ + i += (GIC_INTERNAL * s->num_cpu); + qdev_init_gpio_in(DEVICE(s), handler, i); + + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->parent_irq[i]); + } + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->parent_fiq[i]); + } + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->parent_virq[i]); + } + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->parent_vfiq[i]); + } + if (s->virt_extn) { + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->maintenance_irq[i]); + } + } + + /* Distributor */ + memory_region_init_io(&s->iomem, OBJECT(s), ops, s, "gic_dist", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + + /* This is the main CPU interface "for this core". It is always + * present because it is required by both software emulation and KVM. + */ + memory_region_init_io(&s->cpuiomem[0], OBJECT(s), ops ? &ops[1] : NULL, + s, "gic_cpu", s->revision == 2 ? 0x2000 : 0x100); + sysbus_init_mmio(sbd, &s->cpuiomem[0]); + + if (s->virt_extn) { + memory_region_init_io(&s->vifaceiomem[0], OBJECT(s), virt_ops, + s, "gic_viface", 0x1000); + sysbus_init_mmio(sbd, &s->vifaceiomem[0]); + + memory_region_init_io(&s->vcpuiomem, OBJECT(s), + virt_ops ? &virt_ops[1] : NULL, + s, "gic_vcpu", 0x2000); + sysbus_init_mmio(sbd, &s->vcpuiomem); + } +} + +static void arm_gic_common_realize(DeviceState *dev, Error **errp) +{ + GICState *s = ARM_GIC_COMMON(dev); + int num_irq = s->num_irq; + + if (s->num_cpu > GIC_NCPU) { + error_setg(errp, "requested %u CPUs exceeds GIC maximum %d", + s->num_cpu, GIC_NCPU); + return; + } + if (s->num_irq > GIC_MAXIRQ) { + error_setg(errp, + "requested %u interrupt lines exceeds GIC maximum %d", + num_irq, GIC_MAXIRQ); + return; + } + /* ITLinesNumber is represented as (N / 32) - 1 (see + * gic_dist_readb) so this is an implementation imposed + * restriction, not an architectural one: + */ + if (s->num_irq < 32 || (s->num_irq % 32)) { + error_setg(errp, + "%d interrupt lines unsupported: not divisible by 32", + num_irq); + return; + } + + if (s->security_extn && + (s->revision == REV_11MPCORE)) { + error_setg(errp, "this GIC revision does not implement " + "the security extensions"); + return; + } + + if (s->virt_extn) { + if (s->revision != 2) { + error_setg(errp, "GIC virtualization extensions are only " + "supported by revision 2"); + return; + } + + /* For now, set the number of implemented LRs to 4, as found in most + * real GICv2. This could be promoted as a QOM property if we need to + * emulate a variant with another num_lrs. + */ + s->num_lrs = 4; + } +} + +static inline void arm_gic_common_reset_irq_state(GICState *s, int first_cpu, + int resetprio) +{ + int i, j; + + for (i = first_cpu; i < first_cpu + s->num_cpu; i++) { + if (s->revision == REV_11MPCORE) { + s->priority_mask[i] = 0xf0; + } else { + s->priority_mask[i] = resetprio; + } + s->current_pending[i] = 1023; + s->running_priority[i] = 0x100; + s->cpu_ctlr[i] = 0; + s->bpr[i] = gic_is_vcpu(i) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR; + s->abpr[i] = gic_is_vcpu(i) ? GIC_VIRT_MIN_ABPR : GIC_MIN_ABPR; + + if (!gic_is_vcpu(i)) { + for (j = 0; j < GIC_INTERNAL; j++) { + s->priority1[j][i] = resetprio; + } + for (j = 0; j < GIC_NR_SGIS; j++) { + s->sgi_pending[j][i] = 0; + } + } + } +} + +static void arm_gic_common_reset(DeviceState *dev) +{ + GICState *s = ARM_GIC_COMMON(dev); + int i, j; + int resetprio; + + /* If we're resetting a TZ-aware GIC as if secure firmware + * had set it up ready to start a kernel in non-secure, + * we need to set interrupt priorities to a "zero for the + * NS view" value. This is particularly critical for the + * priority_mask[] values, because if they are zero then NS + * code cannot ever rewrite the priority to anything else. + */ + if (s->security_extn && s->irq_reset_nonsecure) { + resetprio = 0x80; + } else { + resetprio = 0; + } + + memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state)); + arm_gic_common_reset_irq_state(s, 0, resetprio); + + if (s->virt_extn) { + /* vCPU states are stored at indexes GIC_NCPU .. GIC_NCPU+num_cpu. + * The exposed vCPU interface does not have security extensions. + */ + arm_gic_common_reset_irq_state(s, GIC_NCPU, 0); + } + + for (i = 0; i < GIC_NR_SGIS; i++) { + GIC_DIST_SET_ENABLED(i, ALL_CPU_MASK); + GIC_DIST_SET_EDGE_TRIGGER(i); + } + + for (i = 0; i < ARRAY_SIZE(s->priority2); i++) { + s->priority2[i] = resetprio; + } + + for (i = 0; i < GIC_MAXIRQ; i++) { + /* For uniprocessor GICs all interrupts always target the sole CPU */ + if (s->num_cpu == 1) { + s->irq_target[i] = 1; + } else { + s->irq_target[i] = 0; + } + } + if (s->security_extn && s->irq_reset_nonsecure) { + for (i = 0; i < GIC_MAXIRQ; i++) { + GIC_DIST_SET_GROUP(i, ALL_CPU_MASK); + } + } + + if (s->virt_extn) { + for (i = 0; i < s->num_lrs; i++) { + for (j = 0; j < s->num_cpu; j++) { + s->h_lr[i][j] = 0; + } + } + + for (i = 0; i < s->num_cpu; i++) { + s->h_hcr[i] = 0; + s->h_misr[i] = 0; + } + } + + s->ctlr = 0; +} + +static void arm_gic_common_linux_init(ARMLinuxBootIf *obj, + bool secure_boot) +{ + GICState *s = ARM_GIC_COMMON(obj); + + if (s->security_extn && !secure_boot) { + /* We're directly booting a kernel into NonSecure. If this GIC + * implements the security extensions then we must configure it + * to have all the interrupts be NonSecure (this is a job that + * is done by the Secure boot firmware in real hardware, and in + * this mode QEMU is acting as a minimalist firmware-and-bootloader + * equivalent). + */ + s->irq_reset_nonsecure = true; + } +} + +static Property arm_gic_common_properties[] = { + DEFINE_PROP_UINT32("num-cpu", GICState, num_cpu, 1), + DEFINE_PROP_UINT32("num-irq", GICState, num_irq, 32), + /* Revision can be 1 or 2 for GIC architecture specification + * versions 1 or 2, or 0 to indicate the legacy 11MPCore GIC. + */ + DEFINE_PROP_UINT32("revision", GICState, revision, 1), + /* True if the GIC should implement the security extensions */ + DEFINE_PROP_BOOL("has-security-extensions", GICState, security_extn, 0), + /* True if the GIC should implement the virtualization extensions */ + DEFINE_PROP_BOOL("has-virtualization-extensions", GICState, virt_extn, 0), + DEFINE_PROP_UINT32("num-priority-bits", GICState, n_prio_bits, 8), + DEFINE_PROP_END_OF_LIST(), +}; + +static void arm_gic_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass); + + dc->reset = arm_gic_common_reset; + dc->realize = arm_gic_common_realize; + device_class_set_props(dc, arm_gic_common_properties); + dc->vmsd = &vmstate_gic; + albifc->arm_linux_init = arm_gic_common_linux_init; +} + +static const TypeInfo arm_gic_common_type = { + .name = TYPE_ARM_GIC_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GICState), + .class_size = sizeof(ARMGICCommonClass), + .class_init = arm_gic_common_class_init, + .abstract = true, + .interfaces = (InterfaceInfo []) { + { TYPE_ARM_LINUX_BOOT_IF }, + { }, + }, +}; + +static void register_types(void) +{ + type_register_static(&arm_gic_common_type); +} + +type_init(register_types) diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c new file mode 100644 index 000000000..7d2a13273 --- /dev/null +++ b/hw/intc/arm_gic_kvm.c @@ -0,0 +1,619 @@ +/* + * ARM Generic Interrupt Controller using KVM in-kernel support + * + * Copyright (c) 2012 Linaro Limited + * Written by Peter Maydell + * Save/Restore logic added by Christoffer Dall. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "migration/blocker.h" +#include "sysemu/kvm.h" +#include "kvm_arm.h" +#include "gic_internal.h" +#include "vgic_common.h" +#include "qom/object.h" + +#define TYPE_KVM_ARM_GIC "kvm-arm-gic" +typedef struct KVMARMGICClass KVMARMGICClass; +/* This is reusing the GICState typedef from ARM_GIC_COMMON */ +DECLARE_OBJ_CHECKERS(GICState, KVMARMGICClass, + KVM_ARM_GIC, TYPE_KVM_ARM_GIC) + +struct KVMARMGICClass { + ARMGICCommonClass parent_class; + DeviceRealize parent_realize; + void (*parent_reset)(DeviceState *dev); +}; + +void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level) +{ + /* Meaning of the 'irq' parameter: + * [0..N-1] : external interrupts + * [N..N+31] : PPI (internal) interrupts for CPU 0 + * [N+32..N+63] : PPI (internal interrupts for CPU 1 + * ... + * Convert this to the kernel's desired encoding, which + * has separate fields in the irq number for type, + * CPU number and interrupt number. + */ + int irqtype, cpu; + + if (irq < (num_irq - GIC_INTERNAL)) { + /* External interrupt. The kernel numbers these like the GIC + * hardware, with external interrupt IDs starting after the + * internal ones. + */ + irqtype = KVM_ARM_IRQ_TYPE_SPI; + cpu = 0; + irq += GIC_INTERNAL; + } else { + /* Internal interrupt: decode into (cpu, interrupt id) */ + irqtype = KVM_ARM_IRQ_TYPE_PPI; + irq -= (num_irq - GIC_INTERNAL); + cpu = irq / GIC_INTERNAL; + irq %= GIC_INTERNAL; + } + kvm_arm_set_irq(cpu, irqtype, irq, !!level); +} + +static void kvm_arm_gicv2_set_irq(void *opaque, int irq, int level) +{ + GICState *s = (GICState *)opaque; + + kvm_arm_gic_set_irq(s->num_irq, irq, level); +} + +static bool kvm_arm_gic_can_save_restore(GICState *s) +{ + return s->dev_fd >= 0; +} + +#define KVM_VGIC_ATTR(offset, cpu) \ + ((((uint64_t)(cpu) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) & \ + KVM_DEV_ARM_VGIC_CPUID_MASK) | \ + (((uint64_t)(offset) << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) & \ + KVM_DEV_ARM_VGIC_OFFSET_MASK)) + +static void kvm_gicd_access(GICState *s, int offset, int cpu, + uint32_t *val, bool write) +{ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, + KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort); +} + +static void kvm_gicc_access(GICState *s, int offset, int cpu, + uint32_t *val, bool write) +{ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS, + KVM_VGIC_ATTR(offset, cpu), val, write, &error_abort); +} + +#define for_each_irq_reg(_ctr, _max_irq, _field_width) \ + for (_ctr = 0; _ctr < ((_max_irq) / (32 / (_field_width))); _ctr++) + +/* + * Translate from the in-kernel field for an IRQ value to/from the qemu + * representation. + */ +typedef void (*vgic_translate_fn)(GICState *s, int irq, int cpu, + uint32_t *field, bool to_kernel); + +/* synthetic translate function used for clear/set registers to completely + * clear a setting using a clear-register before setting the remaining bits + * using a set-register */ +static void translate_clear(GICState *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = ~0; + } else { + /* does not make sense: qemu model doesn't use set/clear regs */ + abort(); + } +} + +static void translate_group(GICState *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + + if (to_kernel) { + *field = GIC_DIST_TEST_GROUP(irq, cm); + } else { + if (*field & 1) { + GIC_DIST_SET_GROUP(irq, cm); + } + } +} + +static void translate_enabled(GICState *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + + if (to_kernel) { + *field = GIC_DIST_TEST_ENABLED(irq, cm); + } else { + if (*field & 1) { + GIC_DIST_SET_ENABLED(irq, cm); + } + } +} + +static void translate_pending(GICState *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + + if (to_kernel) { + *field = gic_test_pending(s, irq, cm); + } else { + if (*field & 1) { + GIC_DIST_SET_PENDING(irq, cm); + /* TODO: Capture is level-line is held high in the kernel */ + } + } +} + +static void translate_active(GICState *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; + + if (to_kernel) { + *field = GIC_DIST_TEST_ACTIVE(irq, cm); + } else { + if (*field & 1) { + GIC_DIST_SET_ACTIVE(irq, cm); + } + } +} + +static void translate_trigger(GICState *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = (GIC_DIST_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0; + } else { + if (*field & 0x2) { + GIC_DIST_SET_EDGE_TRIGGER(irq); + } + } +} + +static void translate_priority(GICState *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = GIC_DIST_GET_PRIORITY(irq, cpu) & 0xff; + } else { + gic_dist_set_priority(s, cpu, irq, + *field & 0xff, MEMTXATTRS_UNSPECIFIED); + } +} + +static void translate_targets(GICState *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = s->irq_target[irq] & 0xff; + } else { + s->irq_target[irq] = *field & 0xff; + } +} + +static void translate_sgisource(GICState *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = s->sgi_pending[irq][cpu] & 0xff; + } else { + s->sgi_pending[irq][cpu] = *field & 0xff; + } +} + +/* Read a register group from the kernel VGIC */ +static void kvm_dist_get(GICState *s, uint32_t offset, int width, + int maxirq, vgic_translate_fn translate_fn) +{ + uint32_t reg; + int i; + int j; + int irq; + int cpu; + int regsz = 32 / width; /* irqs per kernel register */ + uint32_t field; + + for_each_irq_reg(i, maxirq, width) { + irq = i * regsz; + cpu = 0; + while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) { + kvm_gicd_access(s, offset, cpu, ®, false); + for (j = 0; j < regsz; j++) { + field = extract32(reg, j * width, width); + translate_fn(s, irq + j, cpu, &field, false); + } + + cpu++; + } + offset += 4; + } +} + +/* Write a register group to the kernel VGIC */ +static void kvm_dist_put(GICState *s, uint32_t offset, int width, + int maxirq, vgic_translate_fn translate_fn) +{ + uint32_t reg; + int i; + int j; + int irq; + int cpu; + int regsz = 32 / width; /* irqs per kernel register */ + uint32_t field; + + for_each_irq_reg(i, maxirq, width) { + irq = i * regsz; + cpu = 0; + while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) { + reg = 0; + for (j = 0; j < regsz; j++) { + translate_fn(s, irq + j, cpu, &field, true); + reg = deposit32(reg, j * width, width, field); + } + kvm_gicd_access(s, offset, cpu, ®, true); + + cpu++; + } + offset += 4; + } +} + +static void kvm_arm_gic_put(GICState *s) +{ + uint32_t reg; + int i; + int cpu; + int num_cpu; + int num_irq; + + /* Note: We do the restore in a slightly different order than the save + * (where the order doesn't matter and is simply ordered according to the + * register offset values */ + + /***************************************************************** + * Distributor State + */ + + /* s->ctlr -> GICD_CTLR */ + reg = s->ctlr; + kvm_gicd_access(s, 0x0, 0, ®, true); + + /* Sanity checking on GICD_TYPER and s->num_irq, s->num_cpu */ + kvm_gicd_access(s, 0x4, 0, ®, false); + num_irq = ((reg & 0x1f) + 1) * 32; + num_cpu = ((reg & 0xe0) >> 5) + 1; + + if (num_irq < s->num_irq) { + fprintf(stderr, "Restoring %u IRQs, but kernel supports max %d\n", + s->num_irq, num_irq); + abort(); + } else if (num_cpu != s->num_cpu) { + fprintf(stderr, "Restoring %u CPU interfaces, kernel only has %d\n", + s->num_cpu, num_cpu); + /* Did we not create the VCPUs in the kernel yet? */ + abort(); + } + + /* TODO: Consider checking compatibility with the IIDR ? */ + + /* irq_state[n].enabled -> GICD_ISENABLERn */ + kvm_dist_put(s, 0x180, 1, s->num_irq, translate_clear); + kvm_dist_put(s, 0x100, 1, s->num_irq, translate_enabled); + + /* irq_state[n].group -> GICD_IGROUPRn */ + kvm_dist_put(s, 0x80, 1, s->num_irq, translate_group); + + /* s->irq_target[irq] -> GICD_ITARGETSRn + * (restore targets before pending to ensure the pending state is set on + * the appropriate CPU interfaces in the kernel) */ + kvm_dist_put(s, 0x800, 8, s->num_irq, translate_targets); + + /* irq_state[n].trigger -> GICD_ICFGRn + * (restore configuration registers before pending IRQs so we treat + * level/edge correctly) */ + kvm_dist_put(s, 0xc00, 2, s->num_irq, translate_trigger); + + /* irq_state[n].pending + irq_state[n].level -> GICD_ISPENDRn */ + kvm_dist_put(s, 0x280, 1, s->num_irq, translate_clear); + kvm_dist_put(s, 0x200, 1, s->num_irq, translate_pending); + + /* irq_state[n].active -> GICD_ISACTIVERn */ + kvm_dist_put(s, 0x380, 1, s->num_irq, translate_clear); + kvm_dist_put(s, 0x300, 1, s->num_irq, translate_active); + + + /* s->priorityX[irq] -> ICD_IPRIORITYRn */ + kvm_dist_put(s, 0x400, 8, s->num_irq, translate_priority); + + /* s->sgi_pending -> ICD_CPENDSGIRn */ + kvm_dist_put(s, 0xf10, 8, GIC_NR_SGIS, translate_clear); + kvm_dist_put(s, 0xf20, 8, GIC_NR_SGIS, translate_sgisource); + + + /***************************************************************** + * CPU Interface(s) State + */ + + for (cpu = 0; cpu < s->num_cpu; cpu++) { + /* s->cpu_ctlr[cpu] -> GICC_CTLR */ + reg = s->cpu_ctlr[cpu]; + kvm_gicc_access(s, 0x00, cpu, ®, true); + + /* s->priority_mask[cpu] -> GICC_PMR */ + reg = (s->priority_mask[cpu] & 0xff); + kvm_gicc_access(s, 0x04, cpu, ®, true); + + /* s->bpr[cpu] -> GICC_BPR */ + reg = (s->bpr[cpu] & 0x7); + kvm_gicc_access(s, 0x08, cpu, ®, true); + + /* s->abpr[cpu] -> GICC_ABPR */ + reg = (s->abpr[cpu] & 0x7); + kvm_gicc_access(s, 0x1c, cpu, ®, true); + + /* s->apr[n][cpu] -> GICC_APRn */ + for (i = 0; i < 4; i++) { + reg = s->apr[i][cpu]; + kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, true); + } + } +} + +static void kvm_arm_gic_get(GICState *s) +{ + uint32_t reg; + int i; + int cpu; + + /***************************************************************** + * Distributor State + */ + + /* GICD_CTLR -> s->ctlr */ + kvm_gicd_access(s, 0x0, 0, ®, false); + s->ctlr = reg; + + /* Sanity checking on GICD_TYPER -> s->num_irq, s->num_cpu */ + kvm_gicd_access(s, 0x4, 0, ®, false); + s->num_irq = ((reg & 0x1f) + 1) * 32; + s->num_cpu = ((reg & 0xe0) >> 5) + 1; + + if (s->num_irq > GIC_MAXIRQ) { + fprintf(stderr, "Too many IRQs reported from the kernel: %d\n", + s->num_irq); + abort(); + } + + /* GICD_IIDR -> ? */ + kvm_gicd_access(s, 0x8, 0, ®, false); + + /* Clear all the IRQ settings */ + for (i = 0; i < s->num_irq; i++) { + memset(&s->irq_state[i], 0, sizeof(s->irq_state[0])); + } + + /* GICD_IGROUPRn -> irq_state[n].group */ + kvm_dist_get(s, 0x80, 1, s->num_irq, translate_group); + + /* GICD_ISENABLERn -> irq_state[n].enabled */ + kvm_dist_get(s, 0x100, 1, s->num_irq, translate_enabled); + + /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */ + kvm_dist_get(s, 0x200, 1, s->num_irq, translate_pending); + + /* GICD_ISACTIVERn -> irq_state[n].active */ + kvm_dist_get(s, 0x300, 1, s->num_irq, translate_active); + + /* GICD_ICFRn -> irq_state[n].trigger */ + kvm_dist_get(s, 0xc00, 2, s->num_irq, translate_trigger); + + /* GICD_IPRIORITYRn -> s->priorityX[irq] */ + kvm_dist_get(s, 0x400, 8, s->num_irq, translate_priority); + + /* GICD_ITARGETSRn -> s->irq_target[irq] */ + kvm_dist_get(s, 0x800, 8, s->num_irq, translate_targets); + + /* GICD_CPENDSGIRn -> s->sgi_pending */ + kvm_dist_get(s, 0xf10, 8, GIC_NR_SGIS, translate_sgisource); + + + /***************************************************************** + * CPU Interface(s) State + */ + + for (cpu = 0; cpu < s->num_cpu; cpu++) { + /* GICC_CTLR -> s->cpu_ctlr[cpu] */ + kvm_gicc_access(s, 0x00, cpu, ®, false); + s->cpu_ctlr[cpu] = reg; + + /* GICC_PMR -> s->priority_mask[cpu] */ + kvm_gicc_access(s, 0x04, cpu, ®, false); + s->priority_mask[cpu] = (reg & 0xff); + + /* GICC_BPR -> s->bpr[cpu] */ + kvm_gicc_access(s, 0x08, cpu, ®, false); + s->bpr[cpu] = (reg & 0x7); + + /* GICC_ABPR -> s->abpr[cpu] */ + kvm_gicc_access(s, 0x1c, cpu, ®, false); + s->abpr[cpu] = (reg & 0x7); + + /* GICC_APRn -> s->apr[n][cpu] */ + for (i = 0; i < 4; i++) { + kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, false); + s->apr[i][cpu] = reg; + } + } +} + +static void kvm_arm_gic_reset(DeviceState *dev) +{ + GICState *s = ARM_GIC_COMMON(dev); + KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); + + kgc->parent_reset(dev); + + if (kvm_arm_gic_can_save_restore(s)) { + kvm_arm_gic_put(s); + } +} + +static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) +{ + int i; + GICState *s = KVM_ARM_GIC(dev); + KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); + Error *local_err = NULL; + int ret; + + kgc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (s->security_extn) { + error_setg(errp, "the in-kernel VGIC does not implement the " + "security extensions"); + return; + } + + if (s->virt_extn) { + error_setg(errp, "the in-kernel VGIC does not implement the " + "virtualization extensions"); + return; + } + + if (!kvm_arm_gic_can_save_restore(s)) { + error_setg(&s->migration_blocker, "This operating system kernel does " + "not support vGICv2 migration"); + if (migrate_add_blocker(s->migration_blocker, errp) < 0) { + error_free(s->migration_blocker); + return; + } + } + + gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL, NULL); + + for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { + qemu_irq irq = qdev_get_gpio_in(dev, i); + kvm_irqchip_set_qemuirq_gsi(kvm_state, irq, i); + } + + /* Try to create the device via the device control API */ + s->dev_fd = -1; + ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false); + if (ret >= 0) { + s->dev_fd = ret; + + /* Newstyle API is used, we may have attributes */ + if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) { + uint32_t numirqs = s->num_irq; + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, + &numirqs, true, &error_abort); + } + /* Tell the kernel to complete VGIC initialization now */ + if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT)) { + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, + &error_abort); + } + } else if (kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { + error_setg_errno(errp, -ret, "error creating in-kernel VGIC"); + error_append_hint(errp, + "Perhaps the host CPU does not support GICv2?\n"); + } else if (ret != -ENODEV && ret != -ENOTSUP) { + /* + * Very ancient kernel without KVM_CAP_DEVICE_CTRL: assume that + * ENODEV or ENOTSUP mean "can't create GICv2 with KVM_CREATE_DEVICE", + * and that we will get a GICv2 via KVM_CREATE_IRQCHIP. + */ + error_setg_errno(errp, -ret, "error creating in-kernel VGIC"); + return; + } + + /* Distributor */ + kvm_arm_register_device(&s->iomem, + (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) + | KVM_VGIC_V2_ADDR_TYPE_DIST, + KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V2_ADDR_TYPE_DIST, + s->dev_fd, 0); + /* CPU interface for current core. Unlike arm_gic, we don't + * provide the "interface for core #N" memory regions, because + * cores with a VGIC don't have those. + */ + kvm_arm_register_device(&s->cpuiomem[0], + (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) + | KVM_VGIC_V2_ADDR_TYPE_CPU, + KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V2_ADDR_TYPE_CPU, + s->dev_fd, 0); + + if (kvm_has_gsi_routing()) { + /* set up irq routing */ + for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) { + kvm_irqchip_add_irq_route(kvm_state, i, 0, i); + } + + kvm_gsi_routing_allowed = true; + + kvm_irqchip_commit_routes(kvm_state); + } +} + +static void kvm_arm_gic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass); + KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass); + + agcc->pre_save = kvm_arm_gic_get; + agcc->post_load = kvm_arm_gic_put; + device_class_set_parent_realize(dc, kvm_arm_gic_realize, + &kgc->parent_realize); + device_class_set_parent_reset(dc, kvm_arm_gic_reset, &kgc->parent_reset); +} + +static const TypeInfo kvm_arm_gic_info = { + .name = TYPE_KVM_ARM_GIC, + .parent = TYPE_ARM_GIC_COMMON, + .instance_size = sizeof(GICState), + .class_init = kvm_arm_gic_class_init, + .class_size = sizeof(KVMARMGICClass), +}; + +static void kvm_arm_gic_register_types(void) +{ + type_register_static(&kvm_arm_gic_info); +} + +type_init(kvm_arm_gic_register_types) diff --git a/hw/intc/arm_gicv2m.c b/hw/intc/arm_gicv2m.c new file mode 100644 index 000000000..d564b857e --- /dev/null +++ b/hw/intc/arm_gicv2m.c @@ -0,0 +1,200 @@ +/* + * GICv2m extension for MSI/MSI-x support with a GICv2-based system + * + * Copyright (C) 2015 Linaro, All rights reserved. + * + * Author: Christoffer Dall + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* This file implements an emulated GICv2m widget as described in the ARM + * Server Base System Architecture (SBSA) specification Version 2.2 + * (ARM-DEN-0029 v2.2) pages 35-39 without any optional implementation defined + * identification registers and with a single non-secure MSI register frame. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/pci/msi.h" +#include "hw/qdev-properties.h" +#include "sysemu/kvm.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define TYPE_ARM_GICV2M "arm-gicv2m" +OBJECT_DECLARE_SIMPLE_TYPE(ARMGICv2mState, ARM_GICV2M) + +#define GICV2M_NUM_SPI_MAX 128 + +#define V2M_MSI_TYPER 0x008 +#define V2M_MSI_SETSPI_NS 0x040 +#define V2M_MSI_IIDR 0xFCC +#define V2M_IIDR0 0xFD0 +#define V2M_IIDR11 0xFFC + +#define PRODUCT_ID_QEMU 0x51 /* ASCII code Q */ + +struct ARMGICv2mState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq spi[GICV2M_NUM_SPI_MAX]; + + uint32_t base_spi; + uint32_t num_spi; +}; + +static void gicv2m_set_irq(void *opaque, int irq) +{ + ARMGICv2mState *s = (ARMGICv2mState *)opaque; + + qemu_irq_pulse(s->spi[irq]); +} + +static uint64_t gicv2m_read(void *opaque, hwaddr offset, + unsigned size) +{ + ARMGICv2mState *s = (ARMGICv2mState *)opaque; + uint32_t val; + + if (size != 4) { + qemu_log_mask(LOG_GUEST_ERROR, "gicv2m_read: bad size %u\n", size); + return 0; + } + + switch (offset) { + case V2M_MSI_TYPER: + val = (s->base_spi + 32) << 16; + val |= s->num_spi; + return val; + case V2M_MSI_IIDR: + /* We don't have any valid implementor so we leave that field as zero + * and we return 0 in the arch revision as per the spec. + */ + return (PRODUCT_ID_QEMU << 20); + case V2M_IIDR0 ... V2M_IIDR11: + /* We do not implement any optional identification registers and the + * mandatory MSI_PIDR2 register reads as 0x0, so we capture all + * implementation defined registers here. + */ + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "gicv2m_read: Bad offset %x\n", (int)offset); + return 0; + } +} + +static void gicv2m_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + ARMGICv2mState *s = (ARMGICv2mState *)opaque; + + if (size != 2 && size != 4) { + qemu_log_mask(LOG_GUEST_ERROR, "gicv2m_write: bad size %u\n", size); + return; + } + + switch (offset) { + case V2M_MSI_SETSPI_NS: { + int spi; + + spi = (value & 0x3ff) - (s->base_spi + 32); + if (spi >= 0 && spi < s->num_spi) { + gicv2m_set_irq(s, spi); + } + return; + } + default: + qemu_log_mask(LOG_GUEST_ERROR, + "gicv2m_write: Bad offset %x\n", (int)offset); + } +} + +static const MemoryRegionOps gicv2m_ops = { + .read = gicv2m_read, + .write = gicv2m_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void gicv2m_realize(DeviceState *dev, Error **errp) +{ + ARMGICv2mState *s = ARM_GICV2M(dev); + int i; + + if (s->num_spi > GICV2M_NUM_SPI_MAX) { + error_setg(errp, + "requested %u SPIs exceeds GICv2m frame maximum %d", + s->num_spi, GICV2M_NUM_SPI_MAX); + return; + } + + if (s->base_spi + 32 > 1020 - s->num_spi) { + error_setg(errp, + "requested base SPI %u+%u exceeds max. number 1020", + s->base_spi + 32, s->num_spi); + return; + } + + for (i = 0; i < s->num_spi; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->spi[i]); + } + + msi_nonbroken = true; + kvm_gsi_direct_mapping = true; + kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled(); +} + +static void gicv2m_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ARMGICv2mState *s = ARM_GICV2M(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &gicv2m_ops, s, + "gicv2m", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property gicv2m_properties[] = { + DEFINE_PROP_UINT32("base-spi", ARMGICv2mState, base_spi, 0), + DEFINE_PROP_UINT32("num-spi", ARMGICv2mState, num_spi, 64), + DEFINE_PROP_END_OF_LIST(), +}; + +static void gicv2m_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, gicv2m_properties); + dc->realize = gicv2m_realize; +} + +static const TypeInfo gicv2m_info = { + .name = TYPE_ARM_GICV2M, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARMGICv2mState), + .instance_init = gicv2m_init, + .class_init = gicv2m_class_init, +}; + +static void gicv2m_register_types(void) +{ + type_register_static(&gicv2m_info); +} + +type_init(gicv2m_register_types) diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c new file mode 100644 index 000000000..9f5f815db --- /dev/null +++ b/hw/intc/arm_gicv3.c @@ -0,0 +1,420 @@ +/* + * ARM Generic Interrupt Controller v3 + * + * Copyright (c) 2015 Huawei. + * Copyright (c) 2016 Linaro Limited + * Written by Shlomo Pongratz, Peter Maydell + * + * This code is licensed under the GPL, version 2 or (at your option) + * any later version. + */ + +/* This file contains implementation code for an interrupt controller + * which implements the GICv3 architecture. Specifically this is where + * the device class itself and the functions for handling interrupts + * coming in and going out live. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/intc/arm_gicv3.h" +#include "gicv3_internal.h" + +static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio) +{ + /* Return true if this IRQ at this priority should take + * precedence over the current recorded highest priority + * pending interrupt for this CPU. We also return true if + * the current recorded highest priority pending interrupt + * is the same as this one (a property which the calling code + * relies on). + */ + if (prio < cs->hppi.prio) { + return true; + } + /* If multiple pending interrupts have the same priority then it is an + * IMPDEF choice which of them to signal to the CPU. We choose to + * signal the one with the lowest interrupt number. + */ + if (prio == cs->hppi.prio && irq <= cs->hppi.irq) { + return true; + } + return false; +} + +static uint32_t gicd_int_pending(GICv3State *s, int irq) +{ + /* Recalculate which distributor interrupts are actually pending + * in the group of 32 interrupts starting at irq (which should be a multiple + * of 32), and return a 32-bit integer which has a bit set for each + * interrupt that is eligible to be signaled to the CPU interface. + * + * An interrupt is pending if: + * + the PENDING latch is set OR it is level triggered and the input is 1 + * + its ENABLE bit is set + * + the GICD enable bit for its group is set + * + its ACTIVE bit is not set (otherwise it would be Active+Pending) + * Conveniently we can bulk-calculate this with bitwise operations. + */ + uint32_t pend, grpmask; + uint32_t pending = *gic_bmp_ptr32(s->pending, irq); + uint32_t edge_trigger = *gic_bmp_ptr32(s->edge_trigger, irq); + uint32_t level = *gic_bmp_ptr32(s->level, irq); + uint32_t group = *gic_bmp_ptr32(s->group, irq); + uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq); + uint32_t enable = *gic_bmp_ptr32(s->enabled, irq); + uint32_t active = *gic_bmp_ptr32(s->active, irq); + + pend = pending | (~edge_trigger & level); + pend &= enable; + pend &= ~active; + + if (s->gicd_ctlr & GICD_CTLR_DS) { + grpmod = 0; + } + + grpmask = 0; + if (s->gicd_ctlr & GICD_CTLR_EN_GRP1NS) { + grpmask |= group; + } + if (s->gicd_ctlr & GICD_CTLR_EN_GRP1S) { + grpmask |= (~group & grpmod); + } + if (s->gicd_ctlr & GICD_CTLR_EN_GRP0) { + grpmask |= (~group & ~grpmod); + } + pend &= grpmask; + + return pend; +} + +static uint32_t gicr_int_pending(GICv3CPUState *cs) +{ + /* Recalculate which redistributor interrupts are actually pending, + * and return a 32-bit integer which has a bit set for each interrupt + * that is eligible to be signaled to the CPU interface. + * + * An interrupt is pending if: + * + the PENDING latch is set OR it is level triggered and the input is 1 + * + its ENABLE bit is set + * + the GICD enable bit for its group is set + * + its ACTIVE bit is not set (otherwise it would be Active+Pending) + * Conveniently we can bulk-calculate this with bitwise operations. + */ + uint32_t pend, grpmask, grpmod; + + pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level); + pend &= cs->gicr_ienabler0; + pend &= ~cs->gicr_iactiver0; + + if (cs->gic->gicd_ctlr & GICD_CTLR_DS) { + grpmod = 0; + } else { + grpmod = cs->gicr_igrpmodr0; + } + + grpmask = 0; + if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) { + grpmask |= cs->gicr_igroupr0; + } + if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1S) { + grpmask |= (~cs->gicr_igroupr0 & grpmod); + } + if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP0) { + grpmask |= (~cs->gicr_igroupr0 & ~grpmod); + } + pend &= grpmask; + + return pend; +} + +/* Update the interrupt status after state in a redistributor + * or CPU interface has changed, but don't tell the CPU i/f. + */ +static void gicv3_redist_update_noirqset(GICv3CPUState *cs) +{ + /* Find the highest priority pending interrupt among the + * redistributor interrupts (SGIs and PPIs). + */ + bool seenbetter = false; + uint8_t prio; + int i; + uint32_t pend; + + /* Find out which redistributor interrupts are eligible to be + * signaled to the CPU interface. + */ + pend = gicr_int_pending(cs); + + if (pend) { + for (i = 0; i < GIC_INTERNAL; i++) { + if (!(pend & (1 << i))) { + continue; + } + prio = cs->gicr_ipriorityr[i]; + if (irqbetter(cs, i, prio)) { + cs->hppi.irq = i; + cs->hppi.prio = prio; + seenbetter = true; + } + } + } + + if (seenbetter) { + cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq); + } + + if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable && + (cs->hpplpi.prio != 0xff)) { + if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio)) { + cs->hppi.irq = cs->hpplpi.irq; + cs->hppi.prio = cs->hpplpi.prio; + cs->hppi.grp = cs->hpplpi.grp; + seenbetter = true; + } + } + + /* If the best interrupt we just found would preempt whatever + * was the previous best interrupt before this update, then + * we know it's definitely the best one now. + * If we didn't find an interrupt that would preempt the previous + * best, and the previous best is outside our range (or there was no + * previous pending interrupt at all), then that is still valid, and + * we leave it as the best. + * Otherwise, we need to do a full update (because the previous best + * interrupt has reduced in priority and any other interrupt could + * now be the new best one). + */ + if (!seenbetter && cs->hppi.prio != 0xff && + (cs->hppi.irq < GIC_INTERNAL || + cs->hppi.irq >= GICV3_LPI_INTID_START)) { + gicv3_full_update_noirqset(cs->gic); + } +} + +/* Update the GIC status after state in a redistributor or + * CPU interface has changed, and inform the CPU i/f of + * its new highest priority pending interrupt. + */ +void gicv3_redist_update(GICv3CPUState *cs) +{ + gicv3_redist_update_noirqset(cs); + gicv3_cpuif_update(cs); +} + +/* Update the GIC status after state in the distributor has + * changed affecting @len interrupts starting at @start, + * but don't tell the CPU i/f. + */ +static void gicv3_update_noirqset(GICv3State *s, int start, int len) +{ + int i; + uint8_t prio; + uint32_t pend = 0; + + assert(start >= GIC_INTERNAL); + assert(len > 0); + + for (i = 0; i < s->num_cpu; i++) { + s->cpu[i].seenbetter = false; + } + + /* Find the highest priority pending interrupt in this range. */ + for (i = start; i < start + len; i++) { + GICv3CPUState *cs; + + if (i == start || (i & 0x1f) == 0) { + /* Calculate the next 32 bits worth of pending status */ + pend = gicd_int_pending(s, i & ~0x1f); + } + + if (!(pend & (1 << (i & 0x1f)))) { + continue; + } + cs = s->gicd_irouter_target[i]; + if (!cs) { + /* Interrupts targeting no implemented CPU should remain pending + * and not be forwarded to any CPU. + */ + continue; + } + prio = s->gicd_ipriority[i]; + if (irqbetter(cs, i, prio)) { + cs->hppi.irq = i; + cs->hppi.prio = prio; + cs->seenbetter = true; + } + } + + /* If the best interrupt we just found would preempt whatever + * was the previous best interrupt before this update, then + * we know it's definitely the best one now. + * If we didn't find an interrupt that would preempt the previous + * best, and the previous best is outside our range (or there was + * no previous pending interrupt at all), then that + * is still valid, and we leave it as the best. + * Otherwise, we need to do a full update (because the previous best + * interrupt has reduced in priority and any other interrupt could + * now be the new best one). + */ + for (i = 0; i < s->num_cpu; i++) { + GICv3CPUState *cs = &s->cpu[i]; + + if (cs->seenbetter) { + cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq); + } + + if (!cs->seenbetter && cs->hppi.prio != 0xff && + cs->hppi.irq >= start && cs->hppi.irq < start + len) { + gicv3_full_update_noirqset(s); + break; + } + } +} + +void gicv3_update(GICv3State *s, int start, int len) +{ + int i; + + gicv3_update_noirqset(s, start, len); + for (i = 0; i < s->num_cpu; i++) { + gicv3_cpuif_update(&s->cpu[i]); + } +} + +void gicv3_full_update_noirqset(GICv3State *s) +{ + /* Completely recalculate the GIC status from scratch, but + * don't update any outbound IRQ lines. + */ + int i; + + for (i = 0; i < s->num_cpu; i++) { + s->cpu[i].hppi.prio = 0xff; + } + + /* Note that we can guarantee that these functions will not + * recursively call back into gicv3_full_update(), because + * at each point the "previous best" is always outside the + * range we ask them to update. + */ + gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL); + + for (i = 0; i < s->num_cpu; i++) { + gicv3_redist_update_noirqset(&s->cpu[i]); + } +} + +void gicv3_full_update(GICv3State *s) +{ + /* Completely recalculate the GIC status from scratch, including + * updating outbound IRQ lines. + */ + int i; + + gicv3_full_update_noirqset(s); + for (i = 0; i < s->num_cpu; i++) { + gicv3_cpuif_update(&s->cpu[i]); + } +} + +/* Process a change in an external IRQ input. */ +static void gicv3_set_irq(void *opaque, int irq, int level) +{ + /* Meaning of the 'irq' parameter: + * [0..N-1] : external interrupts + * [N..N+31] : PPI (internal) interrupts for CPU 0 + * [N+32..N+63] : PPI (internal interrupts for CPU 1 + * ... + */ + GICv3State *s = opaque; + + if (irq < (s->num_irq - GIC_INTERNAL)) { + /* external interrupt (SPI) */ + gicv3_dist_set_irq(s, irq + GIC_INTERNAL, level); + } else { + /* per-cpu interrupt (PPI) */ + int cpu; + + irq -= (s->num_irq - GIC_INTERNAL); + cpu = irq / GIC_INTERNAL; + irq %= GIC_INTERNAL; + assert(cpu < s->num_cpu); + /* Raising SGIs via this function would be a bug in how the board + * model wires up interrupts. + */ + assert(irq >= GIC_NR_SGIS); + gicv3_redist_set_irq(&s->cpu[cpu], irq, level); + } +} + +static void arm_gicv3_post_load(GICv3State *s) +{ + int i; + /* Recalculate our cached idea of the current highest priority + * pending interrupt, but don't set IRQ or FIQ lines. + */ + for (i = 0; i < s->num_cpu; i++) { + gicv3_redist_update_lpi_only(&s->cpu[i]); + } + gicv3_full_update_noirqset(s); + /* Repopulate the cache of GICv3CPUState pointers for target CPUs */ + gicv3_cache_all_target_cpustates(s); +} + +static const MemoryRegionOps gic_ops[] = { + { + .read_with_attrs = gicv3_dist_read, + .write_with_attrs = gicv3_dist_write, + .endianness = DEVICE_NATIVE_ENDIAN, + }, + { + .read_with_attrs = gicv3_redist_read, + .write_with_attrs = gicv3_redist_write, + .endianness = DEVICE_NATIVE_ENDIAN, + } +}; + +static void arm_gic_realize(DeviceState *dev, Error **errp) +{ + /* Device instance realize function for the GIC sysbus device */ + GICv3State *s = ARM_GICV3(dev); + ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s); + Error *local_err = NULL; + + agc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops); + + gicv3_init_cpuif(s); +} + +static void arm_gicv3_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); + ARMGICv3Class *agc = ARM_GICV3_CLASS(klass); + + agcc->post_load = arm_gicv3_post_load; + device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize); +} + +static const TypeInfo arm_gicv3_info = { + .name = TYPE_ARM_GICV3, + .parent = TYPE_ARM_GICV3_COMMON, + .instance_size = sizeof(GICv3State), + .class_init = arm_gicv3_class_init, + .class_size = sizeof(ARMGICv3Class), +}; + +static void arm_gicv3_register_types(void) +{ + type_register_static(&arm_gicv3_info); +} + +type_init(arm_gicv3_register_types) diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c new file mode 100644 index 000000000..9884d2e39 --- /dev/null +++ b/hw/intc/arm_gicv3_common.c @@ -0,0 +1,561 @@ +/* + * ARM GICv3 support - common bits of emulated and KVM kernel model + * + * Copyright (c) 2012 Linaro Limited + * Copyright (c) 2015 Huawei. + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Written by Peter Maydell + * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/core/cpu.h" +#include "hw/intc/arm_gicv3_common.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "gicv3_internal.h" +#include "hw/arm/linux-boot-if.h" +#include "sysemu/kvm.h" + + +static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs) +{ + if (cs->gicd_no_migration_shift_bug) { + return; + } + + /* Older versions of QEMU had a bug in the handling of state save/restore + * to the KVM GICv3: they got the offset in the bitmap arrays wrong, + * so that instead of the data for external interrupts 32 and up + * starting at bit position 32 in the bitmap, it started at bit + * position 64. If we're receiving data from a QEMU with that bug, + * we must move the data down into the right place. + */ + memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8, + sizeof(cs->group) - GIC_INTERNAL / 8); + memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8, + sizeof(cs->grpmod) - GIC_INTERNAL / 8); + memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8, + sizeof(cs->enabled) - GIC_INTERNAL / 8); + memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8, + sizeof(cs->pending) - GIC_INTERNAL / 8); + memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8, + sizeof(cs->active) - GIC_INTERNAL / 8); + memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8, + sizeof(cs->edge_trigger) - GIC_INTERNAL / 8); + + /* + * While this new version QEMU doesn't have this kind of bug as we fix it, + * so it needs to set the flag to true to indicate that and it's necessary + * for next migration to work from this new version QEMU. + */ + cs->gicd_no_migration_shift_bug = true; +} + +static int gicv3_pre_save(void *opaque) +{ + GICv3State *s = (GICv3State *)opaque; + ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s); + + if (c->pre_save) { + c->pre_save(s); + } + + return 0; +} + +static int gicv3_post_load(void *opaque, int version_id) +{ + GICv3State *s = (GICv3State *)opaque; + ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s); + + gicv3_gicd_no_migration_shift_bug_post_load(s); + + if (c->post_load) { + c->post_load(s); + } + return 0; +} + +static bool virt_state_needed(void *opaque) +{ + GICv3CPUState *cs = opaque; + + return cs->num_list_regs != 0; +} + +static const VMStateDescription vmstate_gicv3_cpu_virt = { + .name = "arm_gicv3_cpu/virt", + .version_id = 1, + .minimum_version_id = 1, + .needed = virt_state_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4), + VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState), + VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX), + VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState), + VMSTATE_END_OF_LIST() + } +}; + +static int vmstate_gicv3_cpu_pre_load(void *opaque) +{ + GICv3CPUState *cs = opaque; + + /* + * If the sre_el1 subsection is not transferred this + * means SRE_EL1 is 0x7 (which might not be the same as + * our reset value). + */ + cs->icc_sre_el1 = 0x7; + return 0; +} + +static bool icc_sre_el1_reg_needed(void *opaque) +{ + GICv3CPUState *cs = opaque; + + return cs->icc_sre_el1 != 7; +} + +const VMStateDescription vmstate_gicv3_cpu_sre_el1 = { + .name = "arm_gicv3_cpu/sre_el1", + .version_id = 1, + .minimum_version_id = 1, + .needed = icc_sre_el1_reg_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(icc_sre_el1, GICv3CPUState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_gicv3_cpu = { + .name = "arm_gicv3_cpu", + .version_id = 1, + .minimum_version_id = 1, + .pre_load = vmstate_gicv3_cpu_pre_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(level, GICv3CPUState), + VMSTATE_UINT32(gicr_ctlr, GICv3CPUState), + VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2), + VMSTATE_UINT32(gicr_waker, GICv3CPUState), + VMSTATE_UINT64(gicr_propbaser, GICv3CPUState), + VMSTATE_UINT64(gicr_pendbaser, GICv3CPUState), + VMSTATE_UINT32(gicr_igroupr0, GICv3CPUState), + VMSTATE_UINT32(gicr_ienabler0, GICv3CPUState), + VMSTATE_UINT32(gicr_ipendr0, GICv3CPUState), + VMSTATE_UINT32(gicr_iactiver0, GICv3CPUState), + VMSTATE_UINT32(edge_trigger, GICv3CPUState), + VMSTATE_UINT32(gicr_igrpmodr0, GICv3CPUState), + VMSTATE_UINT32(gicr_nsacr, GICv3CPUState), + VMSTATE_UINT8_ARRAY(gicr_ipriorityr, GICv3CPUState, GIC_INTERNAL), + VMSTATE_UINT64_ARRAY(icc_ctlr_el1, GICv3CPUState, 2), + VMSTATE_UINT64(icc_pmr_el1, GICv3CPUState), + VMSTATE_UINT64_ARRAY(icc_bpr, GICv3CPUState, 3), + VMSTATE_UINT64_2DARRAY(icc_apr, GICv3CPUState, 3, 4), + VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3), + VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_gicv3_cpu_virt, + &vmstate_gicv3_cpu_sre_el1, + NULL + } +}; + +static int gicv3_pre_load(void *opaque) +{ + GICv3State *cs = opaque; + + /* + * The gicd_no_migration_shift_bug flag is used for migration compatibility + * for old version QEMU which may have the GICD bmp shift bug under KVM mode. + * Strictly, what we want to know is whether the migration source is using + * KVM. Since we don't have any way to determine that, we look at whether the + * destination is using KVM; this is close enough because for the older QEMU + * versions with this bug KVM -> TCG migration didn't work anyway. If the + * source is a newer QEMU without this bug it will transmit the migration + * subsection which sets the flag to true; otherwise it will remain set to + * the value we select here. + */ + if (kvm_enabled()) { + cs->gicd_no_migration_shift_bug = false; + } + + return 0; +} + +static bool needed_always(void *opaque) +{ + return true; +} + +const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = { + .name = "arm_gicv3/gicd_no_migration_shift_bug", + .version_id = 1, + .minimum_version_id = 1, + .needed = needed_always, + .fields = (VMStateField[]) { + VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_gicv3 = { + .name = "arm_gicv3", + .version_id = 1, + .minimum_version_id = 1, + .pre_load = gicv3_pre_load, + .pre_save = gicv3_pre_save, + .post_load = gicv3_post_load, + .priority = MIG_PRI_GICV3, + .fields = (VMStateField[]) { + VMSTATE_UINT32(gicd_ctlr, GICv3State), + VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2), + VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE), + VMSTATE_UINT32_ARRAY(grpmod, GICv3State, GICV3_BMP_SIZE), + VMSTATE_UINT32_ARRAY(enabled, GICv3State, GICV3_BMP_SIZE), + VMSTATE_UINT32_ARRAY(pending, GICv3State, GICV3_BMP_SIZE), + VMSTATE_UINT32_ARRAY(active, GICv3State, GICV3_BMP_SIZE), + VMSTATE_UINT32_ARRAY(level, GICv3State, GICV3_BMP_SIZE), + VMSTATE_UINT32_ARRAY(edge_trigger, GICv3State, GICV3_BMP_SIZE), + VMSTATE_UINT8_ARRAY(gicd_ipriority, GICv3State, GICV3_MAXIRQ), + VMSTATE_UINT64_ARRAY(gicd_irouter, GICv3State, GICV3_MAXIRQ), + VMSTATE_UINT32_ARRAY(gicd_nsacr, GICv3State, + DIV_ROUND_UP(GICV3_MAXIRQ, 16)), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, GICv3State, num_cpu, + vmstate_gicv3_cpu, GICv3CPUState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_gicv3_gicd_no_migration_shift_bug, + NULL + } +}; + +void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, + const MemoryRegionOps *ops) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(s); + int i; + int cpuidx; + + /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. + * GPIO array layout is thus: + * [0..N-1] spi + * [N..N+31] PPIs for CPU 0 + * [N+32..N+63] PPIs for CPU 1 + * ... + */ + i = s->num_irq - GIC_INTERNAL + GIC_INTERNAL * s->num_cpu; + qdev_init_gpio_in(DEVICE(s), handler, i); + + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->cpu[i].parent_irq); + } + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->cpu[i].parent_fiq); + } + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->cpu[i].parent_virq); + } + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq); + } + + memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s, + "gicv3_dist", 0x10000); + sysbus_init_mmio(sbd, &s->iomem_dist); + + s->redist_regions = g_new0(GICv3RedistRegion, s->nb_redist_regions); + cpuidx = 0; + for (i = 0; i < s->nb_redist_regions; i++) { + char *name = g_strdup_printf("gicv3_redist_region[%d]", i); + GICv3RedistRegion *region = &s->redist_regions[i]; + + region->gic = s; + region->cpuidx = cpuidx; + cpuidx += s->redist_region_count[i]; + + memory_region_init_io(®ion->iomem, OBJECT(s), + ops ? &ops[1] : NULL, region, name, + s->redist_region_count[i] * GICV3_REDIST_SIZE); + sysbus_init_mmio(sbd, ®ion->iomem); + g_free(name); + } +} + +static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) +{ + GICv3State *s = ARM_GICV3_COMMON(dev); + int i, rdist_capacity, cpuidx; + + /* revision property is actually reserved and currently used only in order + * to keep the interface compatible with GICv2 code, avoiding extra + * conditions. However, in future it could be used, for example, if we + * implement GICv4. + */ + if (s->revision != 3) { + error_setg(errp, "unsupported GIC revision %d", s->revision); + return; + } + + if (s->num_irq > GICV3_MAXIRQ) { + error_setg(errp, + "requested %u interrupt lines exceeds GIC maximum %d", + s->num_irq, GICV3_MAXIRQ); + return; + } + if (s->num_irq < GIC_INTERNAL) { + error_setg(errp, + "requested %u interrupt lines is below GIC minimum %d", + s->num_irq, GIC_INTERNAL); + return; + } + + /* ITLinesNumber is represented as (N / 32) - 1, so this is an + * implementation imposed restriction, not an architectural one, + * so we don't have to deal with bitfields where only some of the + * bits in a 32-bit word should be valid. + */ + if (s->num_irq % 32) { + error_setg(errp, + "%d interrupt lines unsupported: not divisible by 32", + s->num_irq); + return; + } + + if (s->lpi_enable && !s->dma) { + error_setg(errp, "Redist-ITS: Guest 'sysmem' reference link not set"); + return; + } + + rdist_capacity = 0; + for (i = 0; i < s->nb_redist_regions; i++) { + rdist_capacity += s->redist_region_count[i]; + } + if (rdist_capacity < s->num_cpu) { + error_setg(errp, "Capacity of the redist regions(%d) " + "is less than number of vcpus(%d)", + rdist_capacity, s->num_cpu); + return; + } + + s->cpu = g_new0(GICv3CPUState, s->num_cpu); + + for (i = 0; i < s->num_cpu; i++) { + CPUState *cpu = qemu_get_cpu(i); + uint64_t cpu_affid; + + s->cpu[i].cpu = cpu; + s->cpu[i].gic = s; + /* Store GICv3CPUState in CPUARMState gicv3state pointer */ + gicv3_set_gicv3state(cpu, &s->cpu[i]); + + /* Pre-construct the GICR_TYPER: + * For our implementation: + * Top 32 bits are the affinity value of the associated CPU + * CommonLPIAff == 01 (redistributors with same Aff3 share LPI table) + * Processor_Number == CPU index starting from 0 + * DPGS == 0 (GICR_CTLR.DPG* not supported) + * Last == 1 if this is the last redistributor in a series of + * contiguous redistributor pages + * DirectLPI == 0 (direct injection of LPIs not supported) + * VLPIS == 0 (virtual LPIs not supported) + * PLPIS == 0 (physical LPIs not supported) + */ + cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL); + + /* The CPU mp-affinity property is in MPIDR register format; squash + * the affinity bytes into 32 bits as the GICR_TYPER has them. + */ + cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) | + (cpu_affid & 0xFFFFFF); + s->cpu[i].gicr_typer = (cpu_affid << 32) | + (1 << 24) | + (i << 8); + + if (s->lpi_enable) { + s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS; + } + } + + /* + * Now go through and set GICR_TYPER.Last for the final + * redistributor in each region. + */ + cpuidx = 0; + for (i = 0; i < s->nb_redist_regions; i++) { + cpuidx += s->redist_region_count[i]; + s->cpu[cpuidx - 1].gicr_typer |= GICR_TYPER_LAST; + } +} + +static void arm_gicv3_finalize(Object *obj) +{ + GICv3State *s = ARM_GICV3_COMMON(obj); + + g_free(s->redist_region_count); +} + +static void arm_gicv3_common_reset(DeviceState *dev) +{ + GICv3State *s = ARM_GICV3_COMMON(dev); + int i; + + for (i = 0; i < s->num_cpu; i++) { + GICv3CPUState *cs = &s->cpu[i]; + + cs->level = 0; + cs->gicr_ctlr = 0; + cs->gicr_statusr[GICV3_S] = 0; + cs->gicr_statusr[GICV3_NS] = 0; + cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep; + cs->gicr_propbaser = 0; + cs->gicr_pendbaser = 0; + /* If we're resetting a TZ-aware GIC as if secure firmware + * had set it up ready to start a kernel in non-secure, we + * need to set interrupts to group 1 so the kernel can use them. + * Otherwise they reset to group 0 like the hardware. + */ + if (s->irq_reset_nonsecure) { + cs->gicr_igroupr0 = 0xffffffff; + } else { + cs->gicr_igroupr0 = 0; + } + + cs->gicr_ienabler0 = 0; + cs->gicr_ipendr0 = 0; + cs->gicr_iactiver0 = 0; + cs->edge_trigger = 0xffff; + cs->gicr_igrpmodr0 = 0; + cs->gicr_nsacr = 0; + memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr)); + + cs->hppi.prio = 0xff; + cs->hpplpi.prio = 0xff; + + /* State in the CPU interface must *not* be reset here, because it + * is part of the CPU's reset domain, not the GIC device's. + */ + } + + /* For our implementation affinity routing is always enabled */ + if (s->security_extn) { + s->gicd_ctlr = GICD_CTLR_ARE_S | GICD_CTLR_ARE_NS; + } else { + s->gicd_ctlr = GICD_CTLR_DS | GICD_CTLR_ARE; + } + + s->gicd_statusr[GICV3_S] = 0; + s->gicd_statusr[GICV3_NS] = 0; + + memset(s->group, 0, sizeof(s->group)); + memset(s->grpmod, 0, sizeof(s->grpmod)); + memset(s->enabled, 0, sizeof(s->enabled)); + memset(s->pending, 0, sizeof(s->pending)); + memset(s->active, 0, sizeof(s->active)); + memset(s->level, 0, sizeof(s->level)); + memset(s->edge_trigger, 0, sizeof(s->edge_trigger)); + memset(s->gicd_ipriority, 0, sizeof(s->gicd_ipriority)); + memset(s->gicd_irouter, 0, sizeof(s->gicd_irouter)); + memset(s->gicd_nsacr, 0, sizeof(s->gicd_nsacr)); + /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must + * write these to get sane behaviour and we need not populate the + * pointer cache here; however having the cache be different for + * "happened to be 0 from reset" and "guest wrote 0" would be + * too confusing. + */ + gicv3_cache_all_target_cpustates(s); + + if (s->irq_reset_nonsecure) { + /* If we're resetting a TZ-aware GIC as if secure firmware + * had set it up ready to start a kernel in non-secure, we + * need to set interrupts to group 1 so the kernel can use them. + * Otherwise they reset to group 0 like the hardware. + */ + for (i = GIC_INTERNAL; i < s->num_irq; i++) { + gicv3_gicd_group_set(s, i); + } + } + s->gicd_no_migration_shift_bug = true; +} + +static void arm_gic_common_linux_init(ARMLinuxBootIf *obj, + bool secure_boot) +{ + GICv3State *s = ARM_GICV3_COMMON(obj); + + if (s->security_extn && !secure_boot) { + /* We're directly booting a kernel into NonSecure. If this GIC + * implements the security extensions then we must configure it + * to have all the interrupts be NonSecure (this is a job that + * is done by the Secure boot firmware in real hardware, and in + * this mode QEMU is acting as a minimalist firmware-and-bootloader + * equivalent). + */ + s->irq_reset_nonsecure = true; + } +} + +static Property arm_gicv3_common_properties[] = { + DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1), + DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32), + DEFINE_PROP_UINT32("revision", GICv3State, revision, 3), + DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0), + DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0), + DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions, + redist_region_count, qdev_prop_uint32, uint32_t), + DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void arm_gicv3_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass); + + dc->reset = arm_gicv3_common_reset; + dc->realize = arm_gicv3_common_realize; + device_class_set_props(dc, arm_gicv3_common_properties); + dc->vmsd = &vmstate_gicv3; + albifc->arm_linux_init = arm_gic_common_linux_init; +} + +static const TypeInfo arm_gicv3_common_type = { + .name = TYPE_ARM_GICV3_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GICv3State), + .class_size = sizeof(ARMGICv3CommonClass), + .class_init = arm_gicv3_common_class_init, + .instance_finalize = arm_gicv3_finalize, + .abstract = true, + .interfaces = (InterfaceInfo []) { + { TYPE_ARM_LINUX_BOOT_IF }, + { }, + }, +}; + +static void register_types(void) +{ + type_register_static(&arm_gicv3_common_type); +} + +type_init(register_types) diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c new file mode 100644 index 000000000..85fc369e5 --- /dev/null +++ b/hw/intc/arm_gicv3_cpuif.c @@ -0,0 +1,2700 @@ +/* + * ARM Generic Interrupt Controller v3 + * + * Copyright (c) 2016 Linaro Limited + * Written by Peter Maydell + * + * This code is licensed under the GPL, version 2 or (at your option) + * any later version. + */ + +/* This file contains the code for the system register interface + * portions of the GICv3. + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "trace.h" +#include "gicv3_internal.h" +#include "hw/irq.h" +#include "cpu.h" + +void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s) +{ + ARMCPU *arm_cpu = ARM_CPU(cpu); + CPUARMState *env = &arm_cpu->env; + + env->gicv3state = (void *)s; +}; + +static GICv3CPUState *icc_cs_from_env(CPUARMState *env) +{ + return env->gicv3state; +} + +static bool gicv3_use_ns_bank(CPUARMState *env) +{ + /* Return true if we should use the NonSecure bank for a banked GIC + * CPU interface register. Note that this differs from the + * access_secure_reg() function because GICv3 banked registers are + * banked even for AArch64, unlike the other CPU system registers. + */ + return !arm_is_secure_below_el3(env); +} + +/* The minimum BPR for the virtual interface is a configurable property */ +static inline int icv_min_vbpr(GICv3CPUState *cs) +{ + return 7 - cs->vprebits; +} + +/* Simple accessor functions for LR fields */ +static uint32_t ich_lr_vintid(uint64_t lr) +{ + return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH); +} + +static uint32_t ich_lr_pintid(uint64_t lr) +{ + return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH); +} + +static uint32_t ich_lr_prio(uint64_t lr) +{ + return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH); +} + +static int ich_lr_state(uint64_t lr) +{ + return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH); +} + +static bool icv_access(CPUARMState *env, int hcr_flags) +{ + /* Return true if this ICC_ register access should really be + * directed to an ICV_ access. hcr_flags is a mask of + * HCR_EL2 bits to check: we treat this as an ICV_ access + * if we are in NS EL1 and at least one of the specified + * HCR_EL2 bits is set. + * + * ICV registers fall into four categories: + * * access if NS EL1 and HCR_EL2.FMO == 1: + * all ICV regs with '0' in their name + * * access if NS EL1 and HCR_EL2.IMO == 1: + * all ICV regs with '1' in their name + * * access if NS EL1 and either IMO or FMO == 1: + * CTLR, DIR, PMR, RPR + */ + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO); + + return flagmatch && arm_current_el(env) == 1 + && !arm_is_secure_below_el3(env); +} + +static int read_vbpr(GICv3CPUState *cs, int grp) +{ + /* Read VBPR value out of the VMCR field (caller must handle + * VCBPR effects if required) + */ + if (grp == GICV3_G0) { + return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT, + ICH_VMCR_EL2_VBPR0_LENGTH); + } else { + return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT, + ICH_VMCR_EL2_VBPR1_LENGTH); + } +} + +static void write_vbpr(GICv3CPUState *cs, int grp, int value) +{ + /* Write new VBPR1 value, handling the "writing a value less than + * the minimum sets it to the minimum" semantics. + */ + int min = icv_min_vbpr(cs); + + if (grp != GICV3_G0) { + min++; + } + + value = MAX(value, min); + + if (grp == GICV3_G0) { + cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT, + ICH_VMCR_EL2_VBPR0_LENGTH, value); + } else { + cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT, + ICH_VMCR_EL2_VBPR1_LENGTH, value); + } +} + +static uint32_t icv_fullprio_mask(GICv3CPUState *cs) +{ + /* Return a mask word which clears the unimplemented priority bits + * from a priority value for a virtual interrupt. (Not to be confused + * with the group priority, whose mask depends on the value of VBPR + * for the interrupt group.) + */ + return ~0U << (8 - cs->vpribits); +} + +static int ich_highest_active_virt_prio(GICv3CPUState *cs) +{ + /* Calculate the current running priority based on the set bits + * in the ICH Active Priority Registers. + */ + int i; + int aprmax = 1 << (cs->vprebits - 5); + + assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0])); + + for (i = 0; i < aprmax; i++) { + uint32_t apr = cs->ich_apr[GICV3_G0][i] | + cs->ich_apr[GICV3_G1NS][i]; + + if (!apr) { + continue; + } + return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1); + } + /* No current active interrupts: return idle priority */ + return 0xff; +} + +static int hppvi_index(GICv3CPUState *cs) +{ + /* Return the list register index of the highest priority pending + * virtual interrupt, as per the HighestPriorityVirtualInterrupt + * pseudocode. If no pending virtual interrupts, return -1. + */ + int idx = -1; + int i; + /* Note that a list register entry with a priority of 0xff will + * never be reported by this function; this is the architecturally + * correct behaviour. + */ + int prio = 0xff; + + if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) { + /* Both groups disabled, definitely nothing to do */ + return idx; + } + + for (i = 0; i < cs->num_list_regs; i++) { + uint64_t lr = cs->ich_lr_el2[i]; + int thisprio; + + if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) { + /* Not Pending */ + continue; + } + + /* Ignore interrupts if relevant group enable not set */ + if (lr & ICH_LR_EL2_GROUP) { + if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { + continue; + } + } else { + if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) { + continue; + } + } + + thisprio = ich_lr_prio(lr); + + if (thisprio < prio) { + prio = thisprio; + idx = i; + } + } + + return idx; +} + +static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group) +{ + /* Return a mask word which clears the subpriority bits from + * a priority value for a virtual interrupt in the specified group. + * This depends on the VBPR value. + * If using VBPR0 then: + * a BPR of 0 means the group priority bits are [7:1]; + * a BPR of 1 means they are [7:2], and so on down to + * a BPR of 7 meaning no group priority bits at all. + * If using VBPR1 then: + * a BPR of 0 is impossible (the minimum value is 1) + * a BPR of 1 means the group priority bits are [7:1]; + * a BPR of 2 means they are [7:2], and so on down to + * a BPR of 7 meaning the group priority is [7]. + * + * Which BPR to use depends on the group of the interrupt and + * the current ICH_VMCR_EL2.VCBPR settings. + * + * This corresponds to the VGroupBits() pseudocode. + */ + int bpr; + + if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) { + group = GICV3_G0; + } + + bpr = read_vbpr(cs, group); + if (group == GICV3_G1NS) { + assert(bpr > 0); + bpr--; + } + + return ~0U << (bpr + 1); +} + +static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr) +{ + /* Return true if we can signal this virtual interrupt defined by + * the given list register value; see the pseudocode functions + * CanSignalVirtualInterrupt and CanSignalVirtualInt. + * Compare also icc_hppi_can_preempt() which is the non-virtual + * equivalent of these checks. + */ + int grp; + uint32_t mask, prio, rprio, vpmr; + + if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) { + /* Virtual interface disabled */ + return false; + } + + /* We don't need to check that this LR is in Pending state because + * that has already been done in hppvi_index(). + */ + + prio = ich_lr_prio(lr); + vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, + ICH_VMCR_EL2_VPMR_LENGTH); + + if (prio >= vpmr) { + /* Priority mask masks this interrupt */ + return false; + } + + rprio = ich_highest_active_virt_prio(cs); + if (rprio == 0xff) { + /* No running interrupt so we can preempt */ + return true; + } + + grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; + + mask = icv_gprio_mask(cs, grp); + + /* We only preempt a running interrupt if the pending interrupt's + * group priority is sufficient (the subpriorities are not considered). + */ + if ((prio & mask) < (rprio & mask)) { + return true; + } + + return false; +} + +static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs, + uint32_t *misr) +{ + /* Return a set of bits indicating the EOI maintenance interrupt status + * for each list register. The EOI maintenance interrupt status is + * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1 + * (see the GICv3 spec for the ICH_EISR_EL2 register). + * If misr is not NULL then we should also collect the information + * about the MISR.EOI, MISR.NP and MISR.U bits. + */ + uint32_t value = 0; + int validcount = 0; + bool seenpending = false; + int i; + + for (i = 0; i < cs->num_list_regs; i++) { + uint64_t lr = cs->ich_lr_el2[i]; + + if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI)) + == ICH_LR_EL2_EOI) { + value |= (1 << i); + } + if ((lr & ICH_LR_EL2_STATE_MASK)) { + validcount++; + } + if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) { + seenpending = true; + } + } + + if (misr) { + if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) { + *misr |= ICH_MISR_EL2_U; + } + if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) { + *misr |= ICH_MISR_EL2_NP; + } + if (value) { + *misr |= ICH_MISR_EL2_EOI; + } + } + return value; +} + +static uint32_t maintenance_interrupt_state(GICv3CPUState *cs) +{ + /* Return a set of bits indicating the maintenance interrupt status + * (as seen in the ICH_MISR_EL2 register). + */ + uint32_t value = 0; + + /* Scan list registers and fill in the U, NP and EOI bits */ + eoi_maintenance_interrupt_state(cs, &value); + + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) && + (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) { + value |= ICH_MISR_EL2_LRENP; + } + + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) && + (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) { + value |= ICH_MISR_EL2_VGRP0E; + } + + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) && + !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { + value |= ICH_MISR_EL2_VGRP0D; + } + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) && + (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { + value |= ICH_MISR_EL2_VGRP1E; + } + + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) && + !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) { + value |= ICH_MISR_EL2_VGRP1D; + } + + return value; +} + +static void gicv3_cpuif_virt_update(GICv3CPUState *cs) +{ + /* Tell the CPU about any pending virtual interrupts or + * maintenance interrupts, following a change to the state + * of the CPU interface relevant to virtual interrupts. + * + * CAUTION: this function will call qemu_set_irq() on the + * CPU maintenance IRQ line, which is typically wired up + * to the GIC as a per-CPU interrupt. This means that it + * will recursively call back into the GIC code via + * gicv3_redist_set_irq() and thus into the CPU interface code's + * gicv3_cpuif_update(). It is therefore important that this + * function is only called as the final action of a CPU interface + * register write implementation, after all the GIC state + * fields have been updated. gicv3_cpuif_update() also must + * not cause this function to be called, but that happens + * naturally as a result of there being no architectural + * linkage between the physical and virtual GIC logic. + */ + int idx; + int irqlevel = 0; + int fiqlevel = 0; + int maintlevel = 0; + ARMCPU *cpu = ARM_CPU(cs->cpu); + + idx = hppvi_index(cs); + trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx); + if (idx >= 0) { + uint64_t lr = cs->ich_lr_el2[idx]; + + if (icv_hppi_can_preempt(cs, lr)) { + /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */ + if (lr & ICH_LR_EL2_GROUP) { + irqlevel = 1; + } else { + fiqlevel = 1; + } + } + } + + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) && + maintenance_interrupt_state(cs) != 0) { + maintlevel = 1; + } + + trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, + irqlevel, maintlevel); + + qemu_set_irq(cs->parent_vfiq, fiqlevel); + qemu_set_irq(cs->parent_virq, irqlevel); + qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel); +} + +static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int regno = ri->opc2 & 3; + int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; + uint64_t value = cs->ich_apr[grp][regno]; + + trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); + return value; +} + +static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int regno = ri->opc2 & 3; + int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; + + trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); + + cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; + + gicv3_cpuif_virt_update(cs); + return; +} + +static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS; + uint64_t bpr; + bool satinc = false; + + if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) { + /* reads return bpr0 + 1 saturated to 7, writes ignored */ + grp = GICV3_G0; + satinc = true; + } + + bpr = read_vbpr(cs, grp); + + if (satinc) { + bpr++; + bpr = MIN(bpr, 7); + } + + trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr); + + return bpr; +} + +static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS; + + trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1, + gicv3_redist_affid(cs), value); + + if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) { + /* reads return bpr0 + 1 saturated to 7, writes ignored */ + return; + } + + write_vbpr(cs, grp, value); + + gicv3_cpuif_virt_update(cs); +} + +static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value; + + value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, + ICH_VMCR_EL2_VPMR_LENGTH); + + trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value); + return value; +} + +static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + + trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value); + + value &= icv_fullprio_mask(cs); + + cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, + ICH_VMCR_EL2_VPMR_LENGTH, value); + + gicv3_cpuif_virt_update(cs); +} + +static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int enbit; + uint64_t value; + + enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT; + value = extract64(cs->ich_vmcr_el2, enbit, 1); + + trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0, + gicv3_redist_affid(cs), value); + return value; +} + +static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int enbit; + + trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0, + gicv3_redist_affid(cs), value); + + enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT; + + cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value); + gicv3_cpuif_virt_update(cs); +} + +static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value; + + /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits) + * should match the ones reported in ich_vtr_read(). + */ + value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | + (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); + + if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) { + value |= ICC_CTLR_EL1_EOIMODE; + } + + if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) { + value |= ICC_CTLR_EL1_CBPR; + } + + trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value); + return value; +} + +static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + + trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value); + + cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT, + 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0); + cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT, + 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0); + + gicv3_cpuif_virt_update(cs); +} + +static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int prio = ich_highest_active_virt_prio(cs); + + trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio); + return prio; +} + +static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; + int idx = hppvi_index(cs); + uint64_t value = INTID_SPURIOUS; + + if (idx >= 0) { + uint64_t lr = cs->ich_lr_el2[idx]; + int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; + + if (grp == thisgrp) { + value = ich_lr_vintid(lr); + } + } + + trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value); + return value; +} + +static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp) +{ + /* Activate the interrupt in the specified list register + * by moving it from Pending to Active state, and update the + * Active Priority Registers. + */ + uint32_t mask = icv_gprio_mask(cs, grp); + int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask; + int aprbit = prio >> (8 - cs->vprebits); + int regno = aprbit / 32; + int regbit = aprbit % 32; + + cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; + cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT; + cs->ich_apr[grp][regno] |= (1 << regbit); +} + +static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; + int idx = hppvi_index(cs); + uint64_t intid = INTID_SPURIOUS; + + if (idx >= 0) { + uint64_t lr = cs->ich_lr_el2[idx]; + int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; + + if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) { + intid = ich_lr_vintid(lr); + if (!gicv3_intid_is_special(intid)) { + icv_activate_irq(cs, idx, grp); + } else { + /* Interrupt goes from Pending to Invalid */ + cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; + /* We will now return the (bogus) ID from the list register, + * as per the pseudocode. + */ + } + } + } + + trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1, + gicv3_redist_affid(cs), intid); + + gicv3_cpuif_virt_update(cs); + + return intid; +} + +static int icc_highest_active_prio(GICv3CPUState *cs) +{ + /* Calculate the current running priority based on the set bits + * in the Active Priority Registers. + */ + int i; + + for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) { + uint32_t apr = cs->icc_apr[GICV3_G0][i] | + cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i]; + + if (!apr) { + continue; + } + return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1); + } + /* No current active interrupts: return idle priority */ + return 0xff; +} + +static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group) +{ + /* Return a mask word which clears the subpriority bits from + * a priority value for an interrupt in the specified group. + * This depends on the BPR value. For CBPR0 (S or NS): + * a BPR of 0 means the group priority bits are [7:1]; + * a BPR of 1 means they are [7:2], and so on down to + * a BPR of 7 meaning no group priority bits at all. + * For CBPR1 NS: + * a BPR of 0 is impossible (the minimum value is 1) + * a BPR of 1 means the group priority bits are [7:1]; + * a BPR of 2 means they are [7:2], and so on down to + * a BPR of 7 meaning the group priority is [7]. + * + * Which BPR to use depends on the group of the interrupt and + * the current ICC_CTLR.CBPR settings. + * + * This corresponds to the GroupBits() pseudocode. + */ + int bpr; + + if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) || + (group == GICV3_G1NS && + cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { + group = GICV3_G0; + } + + bpr = cs->icc_bpr[group] & 7; + + if (group == GICV3_G1NS) { + assert(bpr > 0); + bpr--; + } + + return ~0U << (bpr + 1); +} + +static bool icc_no_enabled_hppi(GICv3CPUState *cs) +{ + /* Return true if there is no pending interrupt, or the + * highest priority pending interrupt is in a group which has been + * disabled at the CPU interface by the ICC_IGRPEN* register enable bits. + */ + return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0); +} + +static bool icc_hppi_can_preempt(GICv3CPUState *cs) +{ + /* Return true if we have a pending interrupt of sufficient + * priority to preempt. + */ + int rprio; + uint32_t mask; + + if (icc_no_enabled_hppi(cs)) { + return false; + } + + if (cs->hppi.prio >= cs->icc_pmr_el1) { + /* Priority mask masks this interrupt */ + return false; + } + + rprio = icc_highest_active_prio(cs); + if (rprio == 0xff) { + /* No currently running interrupt so we can preempt */ + return true; + } + + mask = icc_gprio_mask(cs, cs->hppi.grp); + + /* We only preempt a running interrupt if the pending interrupt's + * group priority is sufficient (the subpriorities are not considered). + */ + if ((cs->hppi.prio & mask) < (rprio & mask)) { + return true; + } + + return false; +} + +void gicv3_cpuif_update(GICv3CPUState *cs) +{ + /* Tell the CPU about its highest priority pending interrupt */ + int irqlevel = 0; + int fiqlevel = 0; + ARMCPU *cpu = ARM_CPU(cs->cpu); + CPUARMState *env = &cpu->env; + + g_assert(qemu_mutex_iothread_locked()); + + trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq, + cs->hppi.grp, cs->hppi.prio); + + if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) { + /* If a Security-enabled GIC sends a G1S interrupt to a + * Security-disabled CPU, we must treat it as if it were G0. + */ + cs->hppi.grp = GICV3_G0; + } + + if (icc_hppi_can_preempt(cs)) { + /* We have an interrupt: should we signal it as IRQ or FIQ? + * This is described in the GICv3 spec section 4.6.2. + */ + bool isfiq; + + switch (cs->hppi.grp) { + case GICV3_G0: + isfiq = true; + break; + case GICV3_G1: + isfiq = (!arm_is_secure(env) || + (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3))); + break; + case GICV3_G1NS: + isfiq = arm_is_secure(env); + break; + default: + g_assert_not_reached(); + } + + if (isfiq) { + fiqlevel = 1; + } else { + irqlevel = 1; + } + } + + trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel); + + qemu_set_irq(cs->parent_fiq, fiqlevel); + qemu_set_irq(cs->parent_irq, irqlevel); +} + +static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint32_t value = cs->icc_pmr_el1; + + if (icv_access(env, HCR_FMO | HCR_IMO)) { + return icv_pmr_read(env, ri); + } + + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) && + (env->cp15.scr_el3 & SCR_FIQ)) { + /* NS access and Group 0 is inaccessible to NS: return the + * NS view of the current priority + */ + if ((value & 0x80) == 0) { + /* Secure priorities not visible to NS */ + value = 0; + } else if (value != 0xff) { + value = (value << 1) & 0xff; + } + } + + trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value); + + return value; +} + +static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + + if (icv_access(env, HCR_FMO | HCR_IMO)) { + return icv_pmr_write(env, ri, value); + } + + trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value); + + value &= 0xff; + + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) && + (env->cp15.scr_el3 & SCR_FIQ)) { + /* NS access and Group 0 is inaccessible to NS: return the + * NS view of the current priority + */ + if (!(cs->icc_pmr_el1 & 0x80)) { + /* Current PMR in the secure range, don't allow NS to change it */ + return; + } + value = (value >> 1) | 0x80; + } + cs->icc_pmr_el1 = value; + gicv3_cpuif_update(cs); +} + +static void icc_activate_irq(GICv3CPUState *cs, int irq) +{ + /* Move the interrupt from the Pending state to Active, and update + * the Active Priority Registers + */ + uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp); + int prio = cs->hppi.prio & mask; + int aprbit = prio >> 1; + int regno = aprbit / 32; + int regbit = aprbit % 32; + + cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit); + + if (irq < GIC_INTERNAL) { + cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1); + cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0); + gicv3_redist_update(cs); + } else if (irq < GICV3_LPI_INTID_START) { + gicv3_gicd_active_set(cs->gic, irq); + gicv3_gicd_pending_clear(cs->gic, irq); + gicv3_update(cs->gic, irq, 1); + } else { + gicv3_redist_lpi_pending(cs, irq, 0); + } +} + +static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env) +{ + /* Return the highest priority pending interrupt register value + * for group 0. + */ + bool irq_is_secure; + + if (cs->hppi.prio == 0xff) { + return INTID_SPURIOUS; + } + + /* Check whether we can return the interrupt or if we should return + * a special identifier, as per the CheckGroup0ForSpecialIdentifiers + * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM + * is always zero.) + */ + irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && + (cs->hppi.grp != GICV3_G1NS)); + + if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) { + return INTID_SPURIOUS; + } + if (irq_is_secure && !arm_is_secure(env)) { + /* Secure interrupts not visible to Nonsecure */ + return INTID_SPURIOUS; + } + + if (cs->hppi.grp != GICV3_G0) { + /* Indicate to EL3 that there's a Group 1 interrupt for the other + * state pending. + */ + return irq_is_secure ? INTID_SECURE : INTID_NONSECURE; + } + + return cs->hppi.irq; +} + +static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env) +{ + /* Return the highest priority pending interrupt register value + * for group 1. + */ + bool irq_is_secure; + + if (cs->hppi.prio == 0xff) { + return INTID_SPURIOUS; + } + + /* Check whether we can return the interrupt or if we should return + * a special identifier, as per the CheckGroup1ForSpecialIdentifiers + * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM + * is always zero.) + */ + irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && + (cs->hppi.grp != GICV3_G1NS)); + + if (cs->hppi.grp == GICV3_G0) { + /* Group 0 interrupts not visible via HPPIR1 */ + return INTID_SPURIOUS; + } + if (irq_is_secure) { + if (!arm_is_secure(env)) { + /* Secure interrupts not visible in Non-secure */ + return INTID_SPURIOUS; + } + } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { + /* Group 1 non-secure interrupts not visible in Secure EL1 */ + return INTID_SPURIOUS; + } + + return cs->hppi.irq; +} + +static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t intid; + + if (icv_access(env, HCR_FMO)) { + return icv_iar_read(env, ri); + } + + if (!icc_hppi_can_preempt(cs)) { + intid = INTID_SPURIOUS; + } else { + intid = icc_hppir0_value(cs, env); + } + + if (!gicv3_intid_is_special(intid)) { + icc_activate_irq(cs, intid); + } + + trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid); + return intid; +} + +static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t intid; + + if (icv_access(env, HCR_IMO)) { + return icv_iar_read(env, ri); + } + + if (!icc_hppi_can_preempt(cs)) { + intid = INTID_SPURIOUS; + } else { + intid = icc_hppir1_value(cs, env); + } + + if (!gicv3_intid_is_special(intid)) { + icc_activate_irq(cs, intid); + } + + trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid); + return intid; +} + +static void icc_drop_prio(GICv3CPUState *cs, int grp) +{ + /* Drop the priority of the currently active interrupt in + * the specified group. + * + * Note that we can guarantee (because of the requirement to nest + * ICC_IAR reads [which activate an interrupt and raise priority] + * with ICC_EOIR writes [which drop the priority for the interrupt]) + * that the interrupt we're being called for is the highest priority + * active interrupt, meaning that it has the lowest set bit in the + * APR registers. + * + * If the guest does not honour the ordering constraints then the + * behaviour of the GIC is UNPREDICTABLE, which for us means that + * the values of the APR registers might become incorrect and the + * running priority will be wrong, so interrupts that should preempt + * might not do so, and interrupts that should not preempt might do so. + */ + int i; + + for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) { + uint64_t *papr = &cs->icc_apr[grp][i]; + + if (!*papr) { + continue; + } + /* Clear the lowest set bit */ + *papr &= *papr - 1; + break; + } + + /* running priority change means we need an update for this cpu i/f */ + gicv3_cpuif_update(cs); +} + +static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs) +{ + /* Return true if we should split priority drop and interrupt + * deactivation, ie whether the relevant EOIMode bit is set. + */ + if (arm_is_el3_or_mon(env)) { + return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3; + } + if (arm_is_secure_below_el3(env)) { + return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE; + } else { + return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE; + } +} + +static int icc_highest_active_group(GICv3CPUState *cs) +{ + /* Return the group with the highest priority active interrupt. + * We can do this by just comparing the APRs to see which one + * has the lowest set bit. + * (If more than one group is active at the same priority then + * we're in UNPREDICTABLE territory.) + */ + int i; + + for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) { + int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]); + int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]); + int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]); + + if (g1nsctz < g0ctz && g1nsctz < g1ctz) { + return GICV3_G1NS; + } + if (g1ctz < g0ctz) { + return GICV3_G1; + } + if (g0ctz < 32) { + return GICV3_G0; + } + } + /* No set active bits? UNPREDICTABLE; return -1 so the caller + * ignores the spurious EOI attempt. + */ + return -1; +} + +static void icc_deactivate_irq(GICv3CPUState *cs, int irq) +{ + if (irq < GIC_INTERNAL) { + cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0); + gicv3_redist_update(cs); + } else { + gicv3_gicd_active_clear(cs->gic, irq); + gicv3_update(cs->gic, irq, 1); + } +} + +static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs) +{ + /* Return true if we should split priority drop and interrupt + * deactivation, ie whether the virtual EOIMode bit is set. + */ + return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM; +} + +static int icv_find_active(GICv3CPUState *cs, int irq) +{ + /* Given an interrupt number for an active interrupt, return the index + * of the corresponding list register, or -1 if there is no match. + * Corresponds to FindActiveVirtualInterrupt pseudocode. + */ + int i; + + for (i = 0; i < cs->num_list_regs; i++) { + uint64_t lr = cs->ich_lr_el2[i]; + + if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) { + return i; + } + } + + return -1; +} + +static void icv_deactivate_irq(GICv3CPUState *cs, int idx) +{ + /* Deactivate the interrupt in the specified list register index */ + uint64_t lr = cs->ich_lr_el2[idx]; + + if (lr & ICH_LR_EL2_HW) { + /* Deactivate the associated physical interrupt */ + int pirq = ich_lr_pintid(lr); + + if (pirq < INTID_SECURE) { + icc_deactivate_irq(cs, pirq); + } + } + + /* Clear the 'active' part of the state, so ActivePending->Pending + * and Active->Invalid. + */ + lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT; + cs->ich_lr_el2[idx] = lr; +} + +static void icv_increment_eoicount(GICv3CPUState *cs) +{ + /* Increment the EOICOUNT field in ICH_HCR_EL2 */ + int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT, + ICH_HCR_EL2_EOICOUNT_LENGTH); + + cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT, + ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1); +} + +static int icv_drop_prio(GICv3CPUState *cs) +{ + /* Drop the priority of the currently active virtual interrupt + * (favouring group 0 if there is a set active bit at + * the same priority for both group 0 and group 1). + * Return the priority value for the bit we just cleared, + * or 0xff if no bits were set in the AP registers at all. + * Note that though the ich_apr[] are uint64_t only the low + * 32 bits are actually relevant. + */ + int i; + int aprmax = 1 << (cs->vprebits - 5); + + assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0])); + + for (i = 0; i < aprmax; i++) { + uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i]; + uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i]; + int apr0count, apr1count; + + if (!*papr0 && !*papr1) { + continue; + } + + /* We can't just use the bit-twiddling hack icc_drop_prio() does + * because we need to return the bit number we cleared so + * it can be compared against the list register's priority field. + */ + apr0count = ctz32(*papr0); + apr1count = ctz32(*papr1); + + if (apr0count <= apr1count) { + *papr0 &= *papr0 - 1; + return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1); + } else { + *papr1 &= *papr1 - 1; + return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1); + } + } + return 0xff; +} + +static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Deactivate interrupt */ + GICv3CPUState *cs = icc_cs_from_env(env); + int idx; + int irq = value & 0xffffff; + + trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value); + + if (irq >= GICV3_MAXIRQ) { + /* Also catches special interrupt numbers and LPIs */ + return; + } + + if (!icv_eoi_split(env, cs)) { + return; + } + + idx = icv_find_active(cs, irq); + + if (idx < 0) { + /* No list register matching this, so increment the EOI count + * (might trigger a maintenance interrupt) + */ + icv_increment_eoicount(cs); + } else { + icv_deactivate_irq(cs, idx); + } + + gicv3_cpuif_virt_update(cs); +} + +static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* End of Interrupt */ + GICv3CPUState *cs = icc_cs_from_env(env); + int irq = value & 0xffffff; + int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; + int idx, dropprio; + + trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1, + gicv3_redist_affid(cs), value); + + if (gicv3_intid_is_special(irq)) { + return; + } + + /* We implement the IMPDEF choice of "drop priority before doing + * error checks" (because that lets us avoid scanning the AP + * registers twice). + */ + dropprio = icv_drop_prio(cs); + if (dropprio == 0xff) { + /* No active interrupt. It is CONSTRAINED UNPREDICTABLE + * whether the list registers are checked in this + * situation; we choose not to. + */ + return; + } + + idx = icv_find_active(cs, irq); + + if (idx < 0) { + /* No valid list register corresponding to EOI ID */ + icv_increment_eoicount(cs); + } else { + uint64_t lr = cs->ich_lr_el2[idx]; + int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; + int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp); + + if (thisgrp == grp && lr_gprio == dropprio) { + if (!icv_eoi_split(env, cs)) { + /* Priority drop and deactivate not split: deactivate irq now */ + icv_deactivate_irq(cs, idx); + } + } + } + + gicv3_cpuif_virt_update(cs); +} + +static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* End of Interrupt */ + GICv3CPUState *cs = icc_cs_from_env(env); + int irq = value & 0xffffff; + int grp; + bool is_eoir0 = ri->crm == 8; + + if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) { + icv_eoir_write(env, ri, value); + return; + } + + trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1, + gicv3_redist_affid(cs), value); + + if ((irq >= cs->gic->num_irq) && + !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) { + /* This handles two cases: + * 1. If software writes the ID of a spurious interrupt [ie 1020-1023] + * to the GICC_EOIR, the GIC ignores that write. + * 2. If software writes the number of a non-existent interrupt + * this must be a subcase of "value written does not match the last + * valid interrupt value read from the Interrupt Acknowledge + * register" and so this is UNPREDICTABLE. We choose to ignore it. + */ + return; + } + + grp = icc_highest_active_group(cs); + switch (grp) { + case GICV3_G0: + if (!is_eoir0) { + return; + } + if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) + && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) { + return; + } + break; + case GICV3_G1: + if (is_eoir0) { + return; + } + if (!arm_is_secure(env)) { + return; + } + break; + case GICV3_G1NS: + if (is_eoir0) { + return; + } + if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) { + return; + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: IRQ %d isn't active\n", __func__, irq); + return; + } + + icc_drop_prio(cs, grp); + + if (!icc_eoi_split(env, cs)) { + /* Priority drop and deactivate not split: deactivate irq now */ + icc_deactivate_irq(cs, irq); + } +} + +static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value; + + if (icv_access(env, HCR_FMO)) { + return icv_hppir_read(env, ri); + } + + value = icc_hppir0_value(cs, env); + trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value); + return value; +} + +static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value; + + if (icv_access(env, HCR_IMO)) { + return icv_hppir_read(env, ri); + } + + value = icc_hppir1_value(cs, env); + trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value); + return value; +} + +static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1; + bool satinc = false; + uint64_t bpr; + + if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { + return icv_bpr_read(env, ri); + } + + if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { + grp = GICV3_G1NS; + } + + if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) && + (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) { + /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses + * modify BPR0 + */ + grp = GICV3_G0; + } + + if (grp == GICV3_G1NS && arm_current_el(env) < 3 && + (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { + /* reads return bpr0 + 1 sat to 7, writes ignored */ + grp = GICV3_G0; + satinc = true; + } + + bpr = cs->icc_bpr[grp]; + if (satinc) { + bpr++; + bpr = MIN(bpr, 7); + } + + trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr); + + return bpr; +} + +static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1; + uint64_t minval; + + if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { + icv_bpr_write(env, ri, value); + return; + } + + trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1, + gicv3_redist_affid(cs), value); + + if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { + grp = GICV3_G1NS; + } + + if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) && + (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) { + /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses + * modify BPR0 + */ + grp = GICV3_G0; + } + + if (grp == GICV3_G1NS && arm_current_el(env) < 3 && + (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) { + /* reads return bpr0 + 1 sat to 7, writes ignored */ + return; + } + + minval = (grp == GICV3_G1NS) ? GIC_MIN_BPR_NS : GIC_MIN_BPR; + if (value < minval) { + value = minval; + } + + cs->icc_bpr[grp] = value & 7; + gicv3_cpuif_update(cs); +} + +static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value; + + int regno = ri->opc2 & 3; + int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0; + + if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { + return icv_ap_read(env, ri); + } + + if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { + grp = GICV3_G1NS; + } + + value = cs->icc_apr[grp][regno]; + + trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); + return value; +} + +static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + + int regno = ri->opc2 & 3; + int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0; + + if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { + icv_ap_write(env, ri, value); + return; + } + + trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); + + if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { + grp = GICV3_G1NS; + } + + /* It's not possible to claim that a Non-secure interrupt is active + * at a priority outside the Non-secure range (128..255), since this + * would otherwise allow malicious NS code to block delivery of S interrupts + * by writing a bad value to these registers. + */ + if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) { + return; + } + + cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU; + gicv3_cpuif_update(cs); +} + +static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Deactivate interrupt */ + GICv3CPUState *cs = icc_cs_from_env(env); + int irq = value & 0xffffff; + bool irq_is_secure, single_sec_state, irq_is_grp0; + bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2; + + if (icv_access(env, HCR_FMO | HCR_IMO)) { + icv_dir_write(env, ri, value); + return; + } + + trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value); + + if (irq >= cs->gic->num_irq) { + /* Also catches special interrupt numbers and LPIs */ + return; + } + + if (!icc_eoi_split(env, cs)) { + return; + } + + int grp = gicv3_irq_group(cs->gic, cs, irq); + + single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS; + irq_is_secure = !single_sec_state && (grp != GICV3_G1NS); + irq_is_grp0 = grp == GICV3_G0; + + /* Check whether we're allowed to deactivate this interrupt based + * on its group and the current CPU state. + * These checks are laid out to correspond to the spec's pseudocode. + */ + route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ; + route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ; + /* No need to include !IsSecure in route_*_to_el2 as it's only + * tested in cases where we know !IsSecure is true. + */ + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + route_fiq_to_el2 = hcr_el2 & HCR_FMO; + route_irq_to_el2 = hcr_el2 & HCR_IMO; + + switch (arm_current_el(env)) { + case 3: + break; + case 2: + if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) { + break; + } + if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) { + break; + } + return; + case 1: + if (!arm_is_secure_below_el3(env)) { + if (single_sec_state && irq_is_grp0 && + !route_fiq_to_el3 && !route_fiq_to_el2) { + break; + } + if (!irq_is_secure && !irq_is_grp0 && + !route_irq_to_el3 && !route_irq_to_el2) { + break; + } + } else { + if (irq_is_grp0 && !route_fiq_to_el3) { + break; + } + if (!irq_is_grp0 && + (!irq_is_secure || !single_sec_state) && + !route_irq_to_el3) { + break; + } + } + return; + default: + g_assert_not_reached(); + } + + icc_deactivate_irq(cs, irq); +} + +static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int prio; + + if (icv_access(env, HCR_FMO | HCR_IMO)) { + return icv_rpr_read(env, ri); + } + + prio = icc_highest_active_prio(cs); + + if (arm_feature(env, ARM_FEATURE_EL3) && + !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) { + /* NS GIC access and Group 0 is inaccessible to NS */ + if ((prio & 0x80) == 0) { + /* NS mustn't see priorities in the Secure half of the range */ + prio = 0; + } else if (prio != 0xff) { + /* Non-idle priority: show the Non-secure view of it */ + prio = (prio << 1) & 0xff; + } + } + + trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio); + return prio; +} + +static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs, + uint64_t value, int grp, bool ns) +{ + GICv3State *s = cs->gic; + + /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */ + uint64_t aff = extract64(value, 48, 8) << 16 | + extract64(value, 32, 8) << 8 | + extract64(value, 16, 8); + uint32_t targetlist = extract64(value, 0, 16); + uint32_t irq = extract64(value, 24, 4); + bool irm = extract64(value, 40, 1); + int i; + + if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) { + /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1 + * interrupts as Group 0 interrupts and must send Secure Group 0 + * interrupts to the target CPUs. + */ + grp = GICV3_G0; + } + + trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm, + aff, targetlist); + + for (i = 0; i < s->num_cpu; i++) { + GICv3CPUState *ocs = &s->cpu[i]; + + if (irm) { + /* IRM == 1 : route to all CPUs except self */ + if (cs == ocs) { + continue; + } + } else { + /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15] + * where the corresponding bit is set in targetlist + */ + int aff0; + + if (ocs->gicr_typer >> 40 != aff) { + continue; + } + aff0 = extract64(ocs->gicr_typer, 32, 8); + if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) { + continue; + } + } + + /* The redistributor will check against its own GICR_NSACR as needed */ + gicv3_redist_send_sgi(ocs, grp, irq, ns); + } +} + +static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Generate Secure Group 0 SGI. */ + GICv3CPUState *cs = icc_cs_from_env(env); + bool ns = !arm_is_secure(env); + + icc_generate_sgi(env, cs, value, GICV3_G0, ns); +} + +static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Generate Group 1 SGI for the current Security state */ + GICv3CPUState *cs = icc_cs_from_env(env); + int grp; + bool ns = !arm_is_secure(env); + + grp = ns ? GICV3_G1NS : GICV3_G1; + icc_generate_sgi(env, cs, value, grp, ns); +} + +static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Generate Group 1 SGI for the Security state that is not + * the current state + */ + GICv3CPUState *cs = icc_cs_from_env(env); + int grp; + bool ns = !arm_is_secure(env); + + grp = ns ? GICV3_G1 : GICV3_G1NS; + icc_generate_sgi(env, cs, value, grp, ns); +} + +static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0; + uint64_t value; + + if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { + return icv_igrpen_read(env, ri); + } + + if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { + grp = GICV3_G1NS; + } + + value = cs->icc_igrpen[grp]; + trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0, + gicv3_redist_affid(cs), value); + return value; +} + +static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0; + + if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) { + icv_igrpen_write(env, ri, value); + return; + } + + trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0, + gicv3_redist_affid(cs), value); + + if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) { + grp = GICV3_G1NS; + } + + cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE; + gicv3_cpuif_update(cs); +} + +static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value; + + /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */ + value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1); + trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value); + return value; +} + +static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + + trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value); + + /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */ + cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1); + cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1); + gicv3_cpuif_update(cs); +} + +static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S; + uint64_t value; + + if (icv_access(env, HCR_FMO | HCR_IMO)) { + return icv_ctlr_read(env, ri); + } + + value = cs->icc_ctlr_el1[bank]; + trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value); + return value; +} + +static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S; + uint64_t mask; + + if (icv_access(env, HCR_FMO | HCR_IMO)) { + icv_ctlr_write(env, ri, value); + return; + } + + trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value); + + /* Only CBPR and EOIMODE can be RW; + * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or + * the asseciated priority-based routing of them); + * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO. + */ + if (arm_feature(env, ARM_FEATURE_EL3) && + ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) { + mask = ICC_CTLR_EL1_EOIMODE; + } else { + mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE; + } + + cs->icc_ctlr_el1[bank] &= ~mask; + cs->icc_ctlr_el1[bank] |= (value & mask); + gicv3_cpuif_update(cs); +} + + +static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value; + + value = cs->icc_ctlr_el3; + if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) { + value |= ICC_CTLR_EL3_EOIMODE_EL1NS; + } + if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) { + value |= ICC_CTLR_EL3_CBPR_EL1NS; + } + if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) { + value |= ICC_CTLR_EL3_EOIMODE_EL1S; + } + if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) { + value |= ICC_CTLR_EL3_CBPR_EL1S; + } + + trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value); + return value; +} + +static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t mask; + + trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value); + + /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */ + cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE); + if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) { + cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE; + } + if (value & ICC_CTLR_EL3_CBPR_EL1NS) { + cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR; + } + + cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE); + if (value & ICC_CTLR_EL3_EOIMODE_EL1S) { + cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE; + } + if (value & ICC_CTLR_EL3_CBPR_EL1S) { + cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR; + } + + /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */ + mask = ICC_CTLR_EL3_EOIMODE_EL3; + + cs->icc_ctlr_el3 &= ~mask; + cs->icc_ctlr_el3 |= (value & mask); + gicv3_cpuif_update(cs); +} + +static CPAccessResult gicv3_irqfiq_access(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + CPAccessResult r = CP_ACCESS_OK; + GICv3CPUState *cs = icc_cs_from_env(env); + int el = arm_current_el(env); + + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) && + el == 1 && !arm_is_secure_below_el3(env)) { + /* Takes priority over a possible EL3 trap */ + return CP_ACCESS_TRAP_EL2; + } + + if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) { + switch (el) { + case 1: + /* Note that arm_hcr_el2_eff takes secure state into account. */ + if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) { + r = CP_ACCESS_TRAP_EL3; + } + break; + case 2: + r = CP_ACCESS_TRAP_EL3; + break; + case 3: + if (!is_a64(env) && !arm_is_el3_or_mon(env)) { + r = CP_ACCESS_TRAP_EL3; + } + break; + default: + g_assert_not_reached(); + } + } + + if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { + r = CP_ACCESS_TRAP; + } + return r; +} + +static CPAccessResult gicv3_dir_access(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) && + arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) { + /* Takes priority over a possible EL3 trap */ + return CP_ACCESS_TRAP_EL2; + } + + return gicv3_irqfiq_access(env, ri, isread); +} + +static CPAccessResult gicv3_sgi_access(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + if (arm_current_el(env) == 1 && + (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) { + /* Takes priority over a possible EL3 trap */ + return CP_ACCESS_TRAP_EL2; + } + + return gicv3_irqfiq_access(env, ri, isread); +} + +static CPAccessResult gicv3_fiq_access(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + CPAccessResult r = CP_ACCESS_OK; + GICv3CPUState *cs = icc_cs_from_env(env); + int el = arm_current_el(env); + + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) && + el == 1 && !arm_is_secure_below_el3(env)) { + /* Takes priority over a possible EL3 trap */ + return CP_ACCESS_TRAP_EL2; + } + + if (env->cp15.scr_el3 & SCR_FIQ) { + switch (el) { + case 1: + if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) { + r = CP_ACCESS_TRAP_EL3; + } + break; + case 2: + r = CP_ACCESS_TRAP_EL3; + break; + case 3: + if (!is_a64(env) && !arm_is_el3_or_mon(env)) { + r = CP_ACCESS_TRAP_EL3; + } + break; + default: + g_assert_not_reached(); + } + } + + if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { + r = CP_ACCESS_TRAP; + } + return r; +} + +static CPAccessResult gicv3_irq_access(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + CPAccessResult r = CP_ACCESS_OK; + GICv3CPUState *cs = icc_cs_from_env(env); + int el = arm_current_el(env); + + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) && + el == 1 && !arm_is_secure_below_el3(env)) { + /* Takes priority over a possible EL3 trap */ + return CP_ACCESS_TRAP_EL2; + } + + if (env->cp15.scr_el3 & SCR_IRQ) { + switch (el) { + case 1: + if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) { + r = CP_ACCESS_TRAP_EL3; + } + break; + case 2: + r = CP_ACCESS_TRAP_EL3; + break; + case 3: + if (!is_a64(env) && !arm_is_el3_or_mon(env)) { + r = CP_ACCESS_TRAP_EL3; + } + break; + default: + g_assert_not_reached(); + } + } + + if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) { + r = CP_ACCESS_TRAP; + } + return r; +} + +static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + + cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V | + (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | + (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); + cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V | + (1 << ICC_CTLR_EL1_IDBITS_SHIFT) | + (7 << ICC_CTLR_EL1_PRIBITS_SHIFT); + cs->icc_pmr_el1 = 0; + cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR; + cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR; + cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS; + memset(cs->icc_apr, 0, sizeof(cs->icc_apr)); + memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen)); + cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V | + (1 << ICC_CTLR_EL3_IDBITS_SHIFT) | + (7 << ICC_CTLR_EL3_PRIBITS_SHIFT); + + memset(cs->ich_apr, 0, sizeof(cs->ich_apr)); + cs->ich_hcr_el2 = 0; + memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2)); + cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN | + ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) | + (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT); +} + +static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { + { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irqfiq_access, + .readfn = icc_pmr_read, + .writefn = icc_pmr_write, + /* We hang the whole cpu interface reset routine off here + * rather than parcelling it out into one little function + * per register + */ + .resetfn = icc_reset, + }, + { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_R, .accessfn = gicv3_fiq_access, + .readfn = icc_iar0_read, + }, + { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = gicv3_fiq_access, + .writefn = icc_eoir_write, + }, + { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_R, .accessfn = gicv3_fiq_access, + .readfn = icc_hppir0_read, + }, + { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_fiq_access, + .readfn = icc_bpr_read, + .writefn = icc_bpr_write, + }, + { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_fiq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_fiq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_fiq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_fiq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + /* All the ICC_AP1R*_EL1 registers are banked */ + { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irq_access, + .readfn = icc_ap_read, + .writefn = icc_ap_write, + }, + { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = gicv3_dir_access, + .writefn = icc_dir_write, + }, + { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_R, .accessfn = gicv3_irqfiq_access, + .readfn = icc_rpr_read, + }, + { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = gicv3_sgi_access, + .writefn = icc_sgi1r_write, + }, + { .name = "ICC_SGI1R", + .cp = 15, .opc1 = 0, .crm = 12, + .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = gicv3_sgi_access, + .writefn = icc_sgi1r_write, + }, + { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = gicv3_sgi_access, + .writefn = icc_asgi1r_write, + }, + { .name = "ICC_ASGI1R", + .cp = 15, .opc1 = 1, .crm = 12, + .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = gicv3_sgi_access, + .writefn = icc_asgi1r_write, + }, + { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = gicv3_sgi_access, + .writefn = icc_sgi0r_write, + }, + { .name = "ICC_SGI0R", + .cp = 15, .opc1 = 2, .crm = 12, + .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = gicv3_sgi_access, + .writefn = icc_sgi0r_write, + }, + { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_R, .accessfn = gicv3_irq_access, + .readfn = icc_iar1_read, + }, + { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_W, .accessfn = gicv3_irq_access, + .writefn = icc_eoir_write, + }, + { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_R, .accessfn = gicv3_irq_access, + .readfn = icc_hppir1_read, + }, + /* This register is banked */ + { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irq_access, + .readfn = icc_bpr_read, + .writefn = icc_bpr_write, + }, + /* This register is banked */ + { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irqfiq_access, + .readfn = icc_ctlr_el1_read, + .writefn = icc_ctlr_el1_write, + }, + { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5, + .type = ARM_CP_NO_RAW | ARM_CP_CONST, + .access = PL1_RW, + /* We don't support IRQ/FIQ bypass and system registers are + * always enabled, so all our bits are RAZ/WI or RAO/WI. + * This register is banked but since it's constant we don't + * need to do anything special. + */ + .resetvalue = 0x7, + }, + { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_fiq_access, + .readfn = icc_igrpen_read, + .writefn = icc_igrpen_write, + }, + /* This register is banked */ + { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_RW, .accessfn = gicv3_irq_access, + .readfn = icc_igrpen_read, + .writefn = icc_igrpen_write, + }, + { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5, + .type = ARM_CP_NO_RAW | ARM_CP_CONST, + .access = PL2_RW, + /* We don't support IRQ/FIQ bypass and system registers are + * always enabled, so all our bits are RAZ/WI or RAO/WI. + */ + .resetvalue = 0xf, + }, + { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL3_RW, + .readfn = icc_ctlr_el3_read, + .writefn = icc_ctlr_el3_write, + }, + { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5, + .type = ARM_CP_NO_RAW | ARM_CP_CONST, + .access = PL3_RW, + /* We don't support IRQ/FIQ bypass and system registers are + * always enabled, so all our bits are RAZ/WI or RAO/WI. + */ + .resetvalue = 0xf, + }, + { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL3_RW, + .readfn = icc_igrpen1_el3_read, + .writefn = icc_igrpen1_el3_write, + }, + REGINFO_SENTINEL +}; + +static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int regno = ri->opc2 & 3; + int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; + uint64_t value; + + value = cs->ich_apr[grp][regno]; + trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value); + return value; +} + +static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int regno = ri->opc2 & 3; + int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0; + + trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); + + cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; + gicv3_cpuif_virt_update(cs); +} + +static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value = cs->ich_hcr_el2; + + trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value); + return value; +} + +static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + + trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value); + + value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE | + ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE | + ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC | + ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI | + ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK; + + cs->ich_hcr_el2 = value; + gicv3_cpuif_virt_update(cs); +} + +static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value = cs->ich_vmcr_el2; + + trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value); + return value; +} + +static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + + trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value); + + value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR | + ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK | + ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK; + value |= ICH_VMCR_EL2_VFIQEN; + + cs->ich_vmcr_el2 = value; + /* Enforce "writing BPRs to less than minimum sets them to the minimum" + * by reading and writing back the fields. + */ + write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0)); + write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1)); + + gicv3_cpuif_virt_update(cs); +} + +static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int regno = ri->opc2 | ((ri->crm & 1) << 3); + uint64_t value; + + /* This read function handles all of: + * 64-bit reads of the whole LR + * 32-bit reads of the low half of the LR + * 32-bit reads of the high half of the LR + */ + if (ri->state == ARM_CP_STATE_AA32) { + if (ri->crm >= 14) { + value = extract64(cs->ich_lr_el2[regno], 32, 32); + trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value); + } else { + value = extract64(cs->ich_lr_el2[regno], 0, 32); + trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value); + } + } else { + value = cs->ich_lr_el2[regno]; + trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value); + } + + return value; +} + +static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int regno = ri->opc2 | ((ri->crm & 1) << 3); + + /* This write function handles all of: + * 64-bit writes to the whole LR + * 32-bit writes to the low half of the LR + * 32-bit writes to the high half of the LR + */ + if (ri->state == ARM_CP_STATE_AA32) { + if (ri->crm >= 14) { + trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value); + value = deposit64(cs->ich_lr_el2[regno], 32, 32, value); + } else { + trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value); + value = deposit64(cs->ich_lr_el2[regno], 0, 32, value); + } + } else { + trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value); + } + + /* Enforce RES0 bits in priority field */ + if (cs->vpribits < 8) { + value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT, + 8 - cs->vpribits, 0); + } + + cs->ich_lr_el2[regno] = value; + gicv3_cpuif_virt_update(cs); +} + +static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value; + + value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT) + | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V + | (1 << ICH_VTR_EL2_IDBITS_SHIFT) + | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT) + | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT); + + trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value); + return value; +} + +static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value = maintenance_interrupt_state(cs); + + trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value); + return value; +} + +static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value = eoi_maintenance_interrupt_state(cs, NULL); + + trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value); + return value; +} + +static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t value = 0; + int i; + + for (i = 0; i < cs->num_list_regs; i++) { + uint64_t lr = cs->ich_lr_el2[i]; + + if ((lr & ICH_LR_EL2_STATE_MASK) == 0 && + ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) { + value |= (1 << i); + } + } + + trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value); + return value; +} + +static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { + { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_ap_read, + .writefn = ich_ap_write, + }, + { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_ap_read, + .writefn = ich_ap_write, + }, + { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_hcr_read, + .writefn = ich_hcr_write, + }, + { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_R, + .readfn = ich_vtr_read, + }, + { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_R, + .readfn = ich_misr_read, + }, + { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_R, + .readfn = ich_eisr_read, + }, + { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_R, + .readfn = ich_elrsr_read, + }, + { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_vmcr_read, + .writefn = ich_vmcr_write, + }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = { + { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_ap_read, + .writefn = ich_ap_write, + }, + { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_ap_read, + .writefn = ich_ap_write, + }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { + { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_ap_read, + .writefn = ich_ap_write, + }, + { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_ap_read, + .writefn = ich_ap_write, + }, + { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_ap_read, + .writefn = ich_ap_write, + }, + { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_ap_read, + .writefn = ich_ap_write, + }, + REGINFO_SENTINEL +}; + +static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque) +{ + GICv3CPUState *cs = opaque; + + gicv3_cpuif_update(cs); +} + +void gicv3_init_cpuif(GICv3State *s) +{ + /* Called from the GICv3 realize function; register our system + * registers with the CPU + */ + int i; + + for (i = 0; i < s->num_cpu; i++) { + ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); + GICv3CPUState *cs = &s->cpu[i]; + + /* Note that we can't just use the GICv3CPUState as an opaque pointer + * in define_arm_cp_regs_with_opaque(), because when we're called back + * it might be with code translated by CPU 0 but run by CPU 1, in + * which case we'd get the wrong value. + * So instead we define the regs with no ri->opaque info, and + * get back to the GICv3CPUState from the CPUARMState. + */ + define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); + if (arm_feature(&cpu->env, ARM_FEATURE_EL2) + && cpu->gic_num_lrs) { + int j; + + cs->num_list_regs = cpu->gic_num_lrs; + cs->vpribits = cpu->gic_vpribits; + cs->vprebits = cpu->gic_vprebits; + + /* Check against architectural constraints: getting these + * wrong would be a bug in the CPU code defining these, + * and the implementation relies on them holding. + */ + g_assert(cs->vprebits <= cs->vpribits); + g_assert(cs->vprebits >= 5 && cs->vprebits <= 7); + g_assert(cs->vpribits >= 5 && cs->vpribits <= 8); + + define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo); + + for (j = 0; j < cs->num_list_regs; j++) { + /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs + * are split into two cp15 regs, LR (the low part, with the + * same encoding as the AArch64 LR) and LRC (the high part). + */ + ARMCPRegInfo lr_regset[] = { + { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, + .crm = 12 + (j >> 3), .opc2 = j & 7, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_lr_read, + .writefn = ich_lr_write, + }, + { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 12, + .crm = 14 + (j >> 3), .opc2 = j & 7, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_lr_read, + .writefn = ich_lr_write, + }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, lr_regset); + } + if (cs->vprebits >= 6) { + define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo); + } + if (cs->vprebits == 7) { + define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo); + } + } + arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs); + } +} diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c new file mode 100644 index 000000000..4164500ea --- /dev/null +++ b/hw/intc/arm_gicv3_dist.c @@ -0,0 +1,914 @@ +/* + * ARM GICv3 emulation: Distributor + * + * Copyright (c) 2015 Huawei. + * Copyright (c) 2016 Linaro Limited. + * Written by Shlomo Pongratz, Peter Maydell + * + * This code is licensed under the GPL, version 2 or (at your option) + * any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "gicv3_internal.h" + +/* The GICD_NSACR registers contain a two bit field for each interrupt which + * allows the guest to give NonSecure code access to registers controlling + * Secure interrupts: + * 0b00: no access (NS accesses to bits for Secure interrupts will RAZ/WI) + * 0b01: NS r/w accesses permitted to ISPENDR, SETSPI_NSR, SGIR + * 0b10: as 0b01, and also r/w to ICPENDR, r/o to ISACTIVER/ICACTIVER, + * and w/o to CLRSPI_NSR + * 0b11: as 0b10, and also r/w to IROUTER and ITARGETSR + * + * Given a (multiple-of-32) interrupt number, these mask functions return + * a mask word where each bit is 1 if the NSACR settings permit access + * to the interrupt. The mask returned can then be ORed with the GICD_GROUP + * word for this set of interrupts to give an overall mask. + */ + +typedef uint32_t maskfn(GICv3State *s, int irq); + +static uint32_t mask_nsacr_ge1(GICv3State *s, int irq) +{ + /* Return a mask where each bit is set if the NSACR field is >= 1 */ + uint64_t raw_nsacr = s->gicd_nsacr[irq / 16 + 1]; + + raw_nsacr = raw_nsacr << 32 | s->gicd_nsacr[irq / 16]; + raw_nsacr = (raw_nsacr >> 1) | raw_nsacr; + return half_unshuffle64(raw_nsacr); +} + +static uint32_t mask_nsacr_ge2(GICv3State *s, int irq) +{ + /* Return a mask where each bit is set if the NSACR field is >= 2 */ + uint64_t raw_nsacr = s->gicd_nsacr[irq / 16 + 1]; + + raw_nsacr = raw_nsacr << 32 | s->gicd_nsacr[irq / 16]; + raw_nsacr = raw_nsacr >> 1; + return half_unshuffle64(raw_nsacr); +} + +/* We don't need a mask_nsacr_ge3() because IROUTER isn't a bitmap register, + * but it would be implemented using: + * raw_nsacr = (raw_nsacr >> 1) & raw_nsacr; + */ + +static uint32_t mask_group_and_nsacr(GICv3State *s, MemTxAttrs attrs, + maskfn *maskfn, int irq) +{ + /* Return a 32-bit mask which should be applied for this set of 32 + * interrupts; each bit is 1 if access is permitted by the + * combination of attrs.secure, GICD_GROUPR and GICD_NSACR. + */ + uint32_t mask; + + if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { + /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI + * unless the NSACR bits permit access. + */ + mask = *gic_bmp_ptr32(s->group, irq); + if (maskfn) { + mask |= maskfn(s, irq); + } + return mask; + } + return 0xFFFFFFFFU; +} + +static int gicd_ns_access(GICv3State *s, int irq) +{ + /* Return the 2 bit NS_access field from GICD_NSACR for the + * specified interrupt. + */ + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return 0; + } + return extract32(s->gicd_nsacr[irq / 16], (irq % 16) * 2, 2); +} + +static void gicd_write_set_bitmap_reg(GICv3State *s, MemTxAttrs attrs, + uint32_t *bmp, + maskfn *maskfn, + int offset, uint32_t val) +{ + /* Helper routine to implement writing to a "set-bitmap" register + * (GICD_ISENABLER, GICD_ISPENDR, etc). + * Semantics implemented here: + * RAZ/WI for SGIs, PPIs, unimplemented IRQs + * Bits corresponding to Group 0 or Secure Group 1 interrupts RAZ/WI. + * Writing 1 means "set bit in bitmap"; writing 0 is ignored. + * offset should be the offset in bytes of the register from the start + * of its group. + */ + int irq = offset * 8; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return; + } + val &= mask_group_and_nsacr(s, attrs, maskfn, irq); + *gic_bmp_ptr32(bmp, irq) |= val; + gicv3_update(s, irq, 32); +} + +static void gicd_write_clear_bitmap_reg(GICv3State *s, MemTxAttrs attrs, + uint32_t *bmp, + maskfn *maskfn, + int offset, uint32_t val) +{ + /* Helper routine to implement writing to a "clear-bitmap" register + * (GICD_ICENABLER, GICD_ICPENDR, etc). + * Semantics implemented here: + * RAZ/WI for SGIs, PPIs, unimplemented IRQs + * Bits corresponding to Group 0 or Secure Group 1 interrupts RAZ/WI. + * Writing 1 means "clear bit in bitmap"; writing 0 is ignored. + * offset should be the offset in bytes of the register from the start + * of its group. + */ + int irq = offset * 8; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return; + } + val &= mask_group_and_nsacr(s, attrs, maskfn, irq); + *gic_bmp_ptr32(bmp, irq) &= ~val; + gicv3_update(s, irq, 32); +} + +static uint32_t gicd_read_bitmap_reg(GICv3State *s, MemTxAttrs attrs, + uint32_t *bmp, + maskfn *maskfn, + int offset) +{ + /* Helper routine to implement reading a "set/clear-bitmap" register + * (GICD_ICENABLER, GICD_ISENABLER, GICD_ICPENDR, etc). + * Semantics implemented here: + * RAZ/WI for SGIs, PPIs, unimplemented IRQs + * Bits corresponding to Group 0 or Secure Group 1 interrupts RAZ/WI. + * offset should be the offset in bytes of the register from the start + * of its group. + */ + int irq = offset * 8; + uint32_t val; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return 0; + } + val = *gic_bmp_ptr32(bmp, irq); + if (bmp == s->pending) { + /* The PENDING register is a special case -- for level triggered + * interrupts, the PENDING state is the logical OR of the state of + * the PENDING latch with the input line level. + */ + uint32_t edge = *gic_bmp_ptr32(s->edge_trigger, irq); + uint32_t level = *gic_bmp_ptr32(s->level, irq); + val |= (~edge & level); + } + val &= mask_group_and_nsacr(s, attrs, maskfn, irq); + return val; +} + +static uint8_t gicd_read_ipriorityr(GICv3State *s, MemTxAttrs attrs, int irq) +{ + /* Read the value of GICD_IPRIORITYR for the specified interrupt, + * honouring security state (these are RAZ/WI for Group 0 or Secure + * Group 1 interrupts). + */ + uint32_t prio; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return 0; + } + + prio = s->gicd_ipriority[irq]; + + if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { + if (!gicv3_gicd_group_test(s, irq)) { + /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */ + return 0; + } + /* NS view of the interrupt priority */ + prio = (prio << 1) & 0xff; + } + return prio; +} + +static void gicd_write_ipriorityr(GICv3State *s, MemTxAttrs attrs, int irq, + uint8_t value) +{ + /* Write the value of GICD_IPRIORITYR for the specified interrupt, + * honouring security state (these are RAZ/WI for Group 0 or Secure + * Group 1 interrupts). + */ + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return; + } + + if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { + if (!gicv3_gicd_group_test(s, irq)) { + /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */ + return; + } + /* NS view of the interrupt priority */ + value = 0x80 | (value >> 1); + } + s->gicd_ipriority[irq] = value; +} + +static uint64_t gicd_read_irouter(GICv3State *s, MemTxAttrs attrs, int irq) +{ + /* Read the value of GICD_IROUTER for the specified interrupt, + * honouring security state. + */ + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return 0; + } + + if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { + /* RAZ/WI for NS accesses to secure interrupts */ + if (!gicv3_gicd_group_test(s, irq)) { + if (gicd_ns_access(s, irq) != 3) { + return 0; + } + } + } + + return s->gicd_irouter[irq]; +} + +static void gicd_write_irouter(GICv3State *s, MemTxAttrs attrs, int irq, + uint64_t val) +{ + /* Write the value of GICD_IROUTER for the specified interrupt, + * honouring security state. + */ + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return; + } + + if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { + /* RAZ/WI for NS accesses to secure interrupts */ + if (!gicv3_gicd_group_test(s, irq)) { + if (gicd_ns_access(s, irq) != 3) { + return; + } + } + } + + s->gicd_irouter[irq] = val; + gicv3_cache_target_cpustate(s, irq); + gicv3_update(s, irq, 1); +} + +/** + * gicd_readb + * gicd_readw + * gicd_readl + * gicd_readq + * gicd_writeb + * gicd_writew + * gicd_writel + * gicd_writeq + * + * Return %true if the operation succeeded, %false otherwise. + */ + +static bool gicd_readb(GICv3State *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + /* Most GICv3 distributor registers do not support byte accesses. */ + switch (offset) { + case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf: + case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: + case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff: + /* This GIC implementation always has affinity routing enabled, + * so these registers are all RAZ/WI. + */ + return true; + case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: + *data = gicd_read_ipriorityr(s, attrs, offset - GICD_IPRIORITYR); + return true; + default: + return false; + } +} + +static bool gicd_writeb(GICv3State *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + /* Most GICv3 distributor registers do not support byte accesses. */ + switch (offset) { + case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf: + case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: + case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff: + /* This GIC implementation always has affinity routing enabled, + * so these registers are all RAZ/WI. + */ + return true; + case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: + { + int irq = offset - GICD_IPRIORITYR; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return true; + } + gicd_write_ipriorityr(s, attrs, irq, value); + gicv3_update(s, irq, 1); + return true; + } + default: + return false; + } +} + +static bool gicd_readw(GICv3State *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + /* Only GICD_SETSPI_NSR, GICD_CLRSPI_NSR, GICD_SETSPI_SR and GICD_SETSPI_NSR + * support 16 bit accesses, and those registers are all part of the + * optional message-based SPI feature which this GIC does not currently + * implement (ie for us GICD_TYPER.MBIS == 0), so for us they are + * reserved. + */ + return false; +} + +static bool gicd_writew(GICv3State *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + /* Only GICD_SETSPI_NSR, GICD_CLRSPI_NSR, GICD_SETSPI_SR and GICD_SETSPI_NSR + * support 16 bit accesses, and those registers are all part of the + * optional message-based SPI feature which this GIC does not currently + * implement (ie for us GICD_TYPER.MBIS == 0), so for us they are + * reserved. + */ + return false; +} + +static bool gicd_readl(GICv3State *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + /* Almost all GICv3 distributor registers are 32-bit. + * Note that WO registers must return an UNKNOWN value on reads, + * not an abort. + */ + + switch (offset) { + case GICD_CTLR: + if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { + /* The NS view of the GICD_CTLR sees only certain bits: + * + bit [31] (RWP) is an alias of the Secure bit [31] + * + bit [4] (ARE_NS) is an alias of Secure bit [5] + * + bit [1] (EnableGrp1A) is an alias of Secure bit [1] if + * NS affinity routing is enabled, otherwise RES0 + * + bit [0] (EnableGrp1) is an alias of Secure bit [1] if + * NS affinity routing is not enabled, otherwise RES0 + * Since for QEMU affinity routing is always enabled + * for both S and NS this means that bits [4] and [5] are + * both always 1, and we can simply make the NS view + * be bits 31, 4 and 1 of the S view. + */ + *data = s->gicd_ctlr & (GICD_CTLR_ARE_S | + GICD_CTLR_EN_GRP1NS | + GICD_CTLR_RWP); + } else { + *data = s->gicd_ctlr; + } + return true; + case GICD_TYPER: + { + /* For this implementation: + * No1N == 1 (1-of-N SPI interrupts not supported) + * A3V == 1 (non-zero values of Affinity level 3 supported) + * IDbits == 0xf (we support 16-bit interrupt identifiers) + * DVIS == 0 (Direct virtual LPI injection not supported) + * LPIS == 1 (LPIs are supported if affinity routing is enabled) + * num_LPIs == 0b00000 (bits [15:11],Number of LPIs as indicated + * by GICD_TYPER.IDbits) + * MBIS == 0 (message-based SPIs not supported) + * SecurityExtn == 1 if security extns supported + * CPUNumber == 0 since for us ARE is always 1 + * ITLinesNumber == (num external irqs / 32) - 1 + */ + int itlinesnumber = ((s->num_irq - GIC_INTERNAL) / 32) - 1; + /* + * SecurityExtn must be RAZ if GICD_CTLR.DS == 1, and + * "security extensions not supported" always implies DS == 1, + * so we only need to check the DS bit. + */ + bool sec_extn = !(s->gicd_ctlr & GICD_CTLR_DS); + + *data = (1 << 25) | (1 << 24) | (sec_extn << 10) | + (s->lpi_enable << GICD_TYPER_LPIS_SHIFT) | + (0xf << 19) | itlinesnumber; + return true; + } + case GICD_IIDR: + /* We claim to be an ARM r0p0 with a zero ProductID. + * This is the same as an r0p0 GIC-500. + */ + *data = gicv3_iidr(); + return true; + case GICD_STATUSR: + /* RAZ/WI for us (this is an optional register and our implementation + * does not track RO/WO/reserved violations to report them to the guest) + */ + *data = 0; + return true; + case GICD_IGROUPR ... GICD_IGROUPR + 0x7f: + { + int irq; + + if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { + *data = 0; + return true; + } + /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ + irq = (offset - GICD_IGROUPR) * 8; + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + *data = 0; + return true; + } + *data = *gic_bmp_ptr32(s->group, irq); + return true; + } + case GICD_ISENABLER ... GICD_ISENABLER + 0x7f: + *data = gicd_read_bitmap_reg(s, attrs, s->enabled, NULL, + offset - GICD_ISENABLER); + return true; + case GICD_ICENABLER ... GICD_ICENABLER + 0x7f: + *data = gicd_read_bitmap_reg(s, attrs, s->enabled, NULL, + offset - GICD_ICENABLER); + return true; + case GICD_ISPENDR ... GICD_ISPENDR + 0x7f: + *data = gicd_read_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge1, + offset - GICD_ISPENDR); + return true; + case GICD_ICPENDR ... GICD_ICPENDR + 0x7f: + *data = gicd_read_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge2, + offset - GICD_ICPENDR); + return true; + case GICD_ISACTIVER ... GICD_ISACTIVER + 0x7f: + *data = gicd_read_bitmap_reg(s, attrs, s->active, mask_nsacr_ge2, + offset - GICD_ISACTIVER); + return true; + case GICD_ICACTIVER ... GICD_ICACTIVER + 0x7f: + *data = gicd_read_bitmap_reg(s, attrs, s->active, mask_nsacr_ge2, + offset - GICD_ICACTIVER); + return true; + case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: + { + int i, irq = offset - GICD_IPRIORITYR; + uint32_t value = 0; + + for (i = irq + 3; i >= irq; i--) { + value <<= 8; + value |= gicd_read_ipriorityr(s, attrs, i); + } + *data = value; + return true; + } + case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff: + /* RAZ/WI since affinity routing is always enabled */ + *data = 0; + return true; + case GICD_ICFGR ... GICD_ICFGR + 0xff: + { + /* Here only the even bits are used; odd bits are RES0 */ + int irq = (offset - GICD_ICFGR) * 4; + uint32_t value = 0; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + *data = 0; + return true; + } + + /* Since our edge_trigger bitmap is one bit per irq, we only need + * half of the 32-bit word, which we can then spread out + * into the odd bits. + */ + value = *gic_bmp_ptr32(s->edge_trigger, irq & ~0x1f); + value &= mask_group_and_nsacr(s, attrs, NULL, irq & ~0x1f); + value = extract32(value, (irq & 0x1f) ? 16 : 0, 16); + value = half_shuffle32(value) << 1; + *data = value; + return true; + } + case GICD_IGRPMODR ... GICD_IGRPMODR + 0xff: + { + int irq; + + if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { + /* RAZ/WI if security disabled, or if + * security enabled and this is an NS access + */ + *data = 0; + return true; + } + /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ + irq = (offset - GICD_IGRPMODR) * 8; + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + *data = 0; + return true; + } + *data = *gic_bmp_ptr32(s->grpmod, irq); + return true; + } + case GICD_NSACR ... GICD_NSACR + 0xff: + { + /* Two bits per interrupt */ + int irq = (offset - GICD_NSACR) * 4; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + *data = 0; + return true; + } + + if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { + /* RAZ/WI if security disabled, or if + * security enabled and this is an NS access + */ + *data = 0; + return true; + } + + *data = s->gicd_nsacr[irq / 16]; + return true; + } + case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf: + case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: + /* RAZ/WI since affinity routing is always enabled */ + *data = 0; + return true; + case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: + { + uint64_t r; + int irq = (offset - GICD_IROUTER) / 8; + + r = gicd_read_irouter(s, attrs, irq); + if (offset & 7) { + *data = r >> 32; + } else { + *data = (uint32_t)r; + } + return true; + } + case GICD_IDREGS ... GICD_IDREGS + 0x2f: + /* ID registers */ + *data = gicv3_idreg(offset - GICD_IDREGS); + return true; + case GICD_SGIR: + /* WO registers, return unknown value */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest read from WO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + *data = 0; + return true; + default: + return false; + } +} + +static bool gicd_writel(GICv3State *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + /* Almost all GICv3 distributor registers are 32-bit. Note that + * RO registers must ignore writes, not abort. + */ + + switch (offset) { + case GICD_CTLR: + { + uint32_t mask; + /* GICv3 5.3.20 */ + if (s->gicd_ctlr & GICD_CTLR_DS) { + /* With only one security state, E1NWF is RAZ/WI, DS is RAO/WI, + * ARE is RAO/WI (affinity routing always on), and only + * bits 0 and 1 (group enables) are writable. + */ + mask = GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1NS; + } else { + if (attrs.secure) { + /* for secure access: + * ARE_NS and ARE_S are RAO/WI (affinity routing always on) + * E1NWF is RAZ/WI (we don't support enable-1-of-n-wakeup) + * + * We can only modify bits[2:0] (the group enables). + */ + mask = GICD_CTLR_DS | GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1_ALL; + } else { + /* For non secure access ARE_NS is RAO/WI and EnableGrp1 + * is RES0. The only writable bit is [1] (EnableGrp1A), which + * is an alias of the Secure bit [1]. + */ + mask = GICD_CTLR_EN_GRP1NS; + } + } + s->gicd_ctlr = (s->gicd_ctlr & ~mask) | (value & mask); + if (value & mask & GICD_CTLR_DS) { + /* We just set DS, so the ARE_NS and EnG1S bits are now RES0. + * Note that this is a one-way transition because if DS is set + * then it's not writeable, so it can only go back to 0 with a + * hardware reset. + */ + s->gicd_ctlr &= ~(GICD_CTLR_EN_GRP1S | GICD_CTLR_ARE_NS); + } + gicv3_full_update(s); + return true; + } + case GICD_STATUSR: + /* RAZ/WI for our implementation */ + return true; + case GICD_IGROUPR ... GICD_IGROUPR + 0x7f: + { + int irq; + + if (!attrs.secure && !(s->gicd_ctlr & GICD_CTLR_DS)) { + return true; + } + /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ + irq = (offset - GICD_IGROUPR) * 8; + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return true; + } + *gic_bmp_ptr32(s->group, irq) = value; + gicv3_update(s, irq, 32); + return true; + } + case GICD_ISENABLER ... GICD_ISENABLER + 0x7f: + gicd_write_set_bitmap_reg(s, attrs, s->enabled, NULL, + offset - GICD_ISENABLER, value); + return true; + case GICD_ICENABLER ... GICD_ICENABLER + 0x7f: + gicd_write_clear_bitmap_reg(s, attrs, s->enabled, NULL, + offset - GICD_ICENABLER, value); + return true; + case GICD_ISPENDR ... GICD_ISPENDR + 0x7f: + gicd_write_set_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge1, + offset - GICD_ISPENDR, value); + return true; + case GICD_ICPENDR ... GICD_ICPENDR + 0x7f: + gicd_write_clear_bitmap_reg(s, attrs, s->pending, mask_nsacr_ge2, + offset - GICD_ICPENDR, value); + return true; + case GICD_ISACTIVER ... GICD_ISACTIVER + 0x7f: + gicd_write_set_bitmap_reg(s, attrs, s->active, NULL, + offset - GICD_ISACTIVER, value); + return true; + case GICD_ICACTIVER ... GICD_ICACTIVER + 0x7f: + gicd_write_clear_bitmap_reg(s, attrs, s->active, NULL, + offset - GICD_ICACTIVER, value); + return true; + case GICD_IPRIORITYR ... GICD_IPRIORITYR + 0x3ff: + { + int i, irq = offset - GICD_IPRIORITYR; + + if (irq < GIC_INTERNAL || irq + 3 >= s->num_irq) { + return true; + } + + for (i = irq; i < irq + 4; i++, value >>= 8) { + gicd_write_ipriorityr(s, attrs, i, value); + } + gicv3_update(s, irq, 4); + return true; + } + case GICD_ITARGETSR ... GICD_ITARGETSR + 0x3ff: + /* RAZ/WI since affinity routing is always enabled */ + return true; + case GICD_ICFGR ... GICD_ICFGR + 0xff: + { + /* Here only the odd bits are used; even bits are RES0 */ + int irq = (offset - GICD_ICFGR) * 4; + uint32_t mask, oldval; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return true; + } + + /* Since our edge_trigger bitmap is one bit per irq, our input + * 32-bits will compress down into 16 bits which we need + * to write into the bitmap. + */ + value = half_unshuffle32(value >> 1); + mask = mask_group_and_nsacr(s, attrs, NULL, irq & ~0x1f); + if (irq & 0x1f) { + value <<= 16; + mask &= 0xffff0000U; + } else { + mask &= 0xffff; + } + oldval = *gic_bmp_ptr32(s->edge_trigger, (irq & ~0x1f)); + value = (oldval & ~mask) | (value & mask); + *gic_bmp_ptr32(s->edge_trigger, irq & ~0x1f) = value; + return true; + } + case GICD_IGRPMODR ... GICD_IGRPMODR + 0xff: + { + int irq; + + if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { + /* RAZ/WI if security disabled, or if + * security enabled and this is an NS access + */ + return true; + } + /* RAZ/WI for SGIs, PPIs, unimplemented irqs */ + irq = (offset - GICD_IGRPMODR) * 8; + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return true; + } + *gic_bmp_ptr32(s->grpmod, irq) = value; + gicv3_update(s, irq, 32); + return true; + } + case GICD_NSACR ... GICD_NSACR + 0xff: + { + /* Two bits per interrupt */ + int irq = (offset - GICD_NSACR) * 4; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return true; + } + + if ((s->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { + /* RAZ/WI if security disabled, or if + * security enabled and this is an NS access + */ + return true; + } + + s->gicd_nsacr[irq / 16] = value; + /* No update required as this only affects access permission checks */ + return true; + } + case GICD_SGIR: + /* RES0 if affinity routing is enabled */ + return true; + case GICD_CPENDSGIR ... GICD_CPENDSGIR + 0xf: + case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: + /* RAZ/WI since affinity routing is always enabled */ + return true; + case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: + { + uint64_t r; + int irq = (offset - GICD_IROUTER) / 8; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return true; + } + + /* Write half of the 64-bit register */ + r = gicd_read_irouter(s, attrs, irq); + r = deposit64(r, (offset & 7) ? 32 : 0, 32, value); + gicd_write_irouter(s, attrs, irq, r); + return true; + } + case GICD_IDREGS ... GICD_IDREGS + 0x2f: + case GICD_TYPER: + case GICD_IIDR: + /* RO registers, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + return true; + default: + return false; + } +} + +static bool gicd_writeq(GICv3State *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + /* Our only 64-bit registers are GICD_IROUTER */ + int irq; + + switch (offset) { + case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: + irq = (offset - GICD_IROUTER) / 8; + gicd_write_irouter(s, attrs, irq, value); + return true; + default: + return false; + } +} + +static bool gicd_readq(GICv3State *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + /* Our only 64-bit registers are GICD_IROUTER */ + int irq; + + switch (offset) { + case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: + irq = (offset - GICD_IROUTER) / 8; + *data = gicd_read_irouter(s, attrs, irq); + return true; + default: + return false; + } +} + +MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICv3State *s = (GICv3State *)opaque; + bool r; + + switch (size) { + case 1: + r = gicd_readb(s, offset, data, attrs); + break; + case 2: + r = gicd_readw(s, offset, data, attrs); + break; + case 4: + r = gicd_readl(s, offset, data, attrs); + break; + case 8: + r = gicd_readq(s, offset, data, attrs); + break; + default: + r = false; + break; + } + + if (!r) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest read at offset " TARGET_FMT_plx + "size %u\n", __func__, offset, size); + trace_gicv3_dist_badread(offset, size, attrs.secure); + /* The spec requires that reserved registers are RAZ/WI; + * so use MEMTX_ERROR returns from leaf functions as a way to + * trigger the guest-error logging but don't return it to + * the caller, or we'll cause a spurious guest data abort. + */ + *data = 0; + } else { + trace_gicv3_dist_read(offset, *data, size, attrs.secure); + } + return MEMTX_OK; +} + +MemTxResult gicv3_dist_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size, MemTxAttrs attrs) +{ + GICv3State *s = (GICv3State *)opaque; + bool r; + + switch (size) { + case 1: + r = gicd_writeb(s, offset, data, attrs); + break; + case 2: + r = gicd_writew(s, offset, data, attrs); + break; + case 4: + r = gicd_writel(s, offset, data, attrs); + break; + case 8: + r = gicd_writeq(s, offset, data, attrs); + break; + default: + r = false; + break; + } + + if (!r) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write at offset " TARGET_FMT_plx + "size %u\n", __func__, offset, size); + trace_gicv3_dist_badwrite(offset, data, size, attrs.secure); + /* The spec requires that reserved registers are RAZ/WI; + * so use MEMTX_ERROR returns from leaf functions as a way to + * trigger the guest-error logging but don't return it to + * the caller, or we'll cause a spurious guest data abort. + */ + } else { + trace_gicv3_dist_write(offset, data, size, attrs.secure); + } + return MEMTX_OK; +} + +void gicv3_dist_set_irq(GICv3State *s, int irq, int level) +{ + /* Update distributor state for a change in an external SPI input line */ + if (level == gicv3_gicd_level_test(s, irq)) { + return; + } + + trace_gicv3_dist_set_irq(irq, level); + + gicv3_gicd_level_replace(s, irq, level); + + if (level) { + /* 0->1 edges latch the pending bit for edge-triggered interrupts */ + if (gicv3_gicd_edge_trigger_test(s, irq)) { + gicv3_gicd_pending_set(s, irq); + } + } + + gicv3_update(s, irq, 1); +} diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c new file mode 100644 index 000000000..c929a9cb5 --- /dev/null +++ b/hw/intc/arm_gicv3_its.c @@ -0,0 +1,1323 @@ +/* + * ITS emulation for a GICv3-based system + * + * Copyright Linaro.org 2021 + * + * Authors: + * Shashi Mallela + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/qdev-properties.h" +#include "hw/intc/arm_gicv3_its_common.h" +#include "gicv3_internal.h" +#include "qom/object.h" +#include "qapi/error.h" + +typedef struct GICv3ITSClass GICv3ITSClass; +/* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */ +DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass, + ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS) + +struct GICv3ITSClass { + GICv3ITSCommonClass parent_class; + void (*parent_reset)(DeviceState *dev); +}; + +/* + * This is an internal enum used to distinguish between LPI triggered + * via command queue and LPI triggered via gits_translater write. + */ +typedef enum ItsCmdType { + NONE = 0, /* internal indication for GITS_TRANSLATER write */ + CLEAR = 1, + DISCARD = 2, + INTERRUPT = 3, +} ItsCmdType; + +typedef struct { + uint32_t iteh; + uint64_t itel; +} IteEntry; + +static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz) +{ + uint64_t result = 0; + + switch (page_sz) { + case GITS_PAGE_SIZE_4K: + case GITS_PAGE_SIZE_16K: + result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12; + break; + + case GITS_PAGE_SIZE_64K: + result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16; + result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48; + break; + + default: + break; + } + return result; +} + +static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte, + MemTxResult *res) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint64_t l2t_addr; + uint64_t value; + bool valid_l2t; + uint32_t l2t_id; + uint32_t max_l2_entries; + + if (s->ct.indirect) { + l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE); + + value = address_space_ldq_le(as, + s->ct.base_addr + + (l2t_id * L1TABLE_ENTRY_SIZE), + MEMTXATTRS_UNSPECIFIED, res); + + if (*res == MEMTX_OK) { + valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; + + if (valid_l2t) { + max_l2_entries = s->ct.page_sz / s->ct.entry_sz; + + l2t_addr = value & ((1ULL << 51) - 1); + + *cte = address_space_ldq_le(as, l2t_addr + + ((icid % max_l2_entries) * GITS_CTE_SIZE), + MEMTXATTRS_UNSPECIFIED, res); + } + } + } else { + /* Flat level table */ + *cte = address_space_ldq_le(as, s->ct.base_addr + + (icid * GITS_CTE_SIZE), + MEMTXATTRS_UNSPECIFIED, res); + } + + return (*cte & TABLE_ENTRY_VALID_MASK) != 0; +} + +static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, + IteEntry ite) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint64_t itt_addr; + MemTxResult res = MEMTX_OK; + + itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT; + itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ + + address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) + + sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED, + &res); + + if (res == MEMTX_OK) { + address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) + + sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh, + MEMTXATTRS_UNSPECIFIED, &res); + } + if (res != MEMTX_OK) { + return false; + } else { + return true; + } +} + +static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte, + uint16_t *icid, uint32_t *pIntid, MemTxResult *res) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint64_t itt_addr; + bool status = false; + IteEntry ite = {}; + + itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT; + itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */ + + ite.itel = address_space_ldq_le(as, itt_addr + + (eventid * (sizeof(uint64_t) + + sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED, + res); + + if (*res == MEMTX_OK) { + ite.iteh = address_space_ldl_le(as, itt_addr + + (eventid * (sizeof(uint64_t) + + sizeof(uint32_t))) + sizeof(uint32_t), + MEMTXATTRS_UNSPECIFIED, res); + + if (*res == MEMTX_OK) { + if (ite.itel & TABLE_ENTRY_VALID_MASK) { + if ((ite.itel >> ITE_ENTRY_INTTYPE_SHIFT) & + GITS_TYPE_PHYSICAL) { + *pIntid = (ite.itel & ITE_ENTRY_INTID_MASK) >> + ITE_ENTRY_INTID_SHIFT; + *icid = ite.iteh & ITE_ENTRY_ICID_MASK; + status = true; + } + } + } + } + return status; +} + +static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint64_t l2t_addr; + uint64_t value; + bool valid_l2t; + uint32_t l2t_id; + uint32_t max_l2_entries; + + if (s->dt.indirect) { + l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE); + + value = address_space_ldq_le(as, + s->dt.base_addr + + (l2t_id * L1TABLE_ENTRY_SIZE), + MEMTXATTRS_UNSPECIFIED, res); + + if (*res == MEMTX_OK) { + valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; + + if (valid_l2t) { + max_l2_entries = s->dt.page_sz / s->dt.entry_sz; + + l2t_addr = value & ((1ULL << 51) - 1); + + value = address_space_ldq_le(as, l2t_addr + + ((devid % max_l2_entries) * GITS_DTE_SIZE), + MEMTXATTRS_UNSPECIFIED, res); + } + } + } else { + /* Flat level table */ + value = address_space_ldq_le(as, s->dt.base_addr + + (devid * GITS_DTE_SIZE), + MEMTXATTRS_UNSPECIFIED, res); + } + + return value; +} + +/* + * This function handles the processing of following commands based on + * the ItsCmdType parameter passed:- + * 1. triggering of lpi interrupt translation via ITS INT command + * 2. triggering of lpi interrupt translation via gits_translater register + * 3. handling of ITS CLEAR command + * 4. handling of ITS DISCARD command + */ +static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset, + ItsCmdType cmd) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint32_t devid, eventid; + MemTxResult res = MEMTX_OK; + bool dte_valid; + uint64_t dte = 0; + uint32_t max_eventid; + uint16_t icid = 0; + uint32_t pIntid = 0; + bool ite_valid = false; + uint64_t cte = 0; + bool cte_valid = false; + bool result = false; + uint64_t rdbase; + + if (cmd == NONE) { + devid = offset; + } else { + devid = ((value & DEVID_MASK) >> DEVID_SHIFT); + + offset += NUM_BYTES_IN_DW; + value = address_space_ldq_le(as, s->cq.base_addr + offset, + MEMTXATTRS_UNSPECIFIED, &res); + } + + if (res != MEMTX_OK) { + return result; + } + + eventid = (value & EVENTID_MASK); + + dte = get_dte(s, devid, &res); + + if (res != MEMTX_OK) { + return result; + } + dte_valid = dte & TABLE_ENTRY_VALID_MASK; + + if (dte_valid) { + max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1)); + + ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res); + + if (res != MEMTX_OK) { + return result; + } + + if (ite_valid) { + cte_valid = get_cte(s, icid, &cte, &res); + } + + if (res != MEMTX_OK) { + return result; + } + } + + if ((devid > s->dt.maxids.max_devids) || !dte_valid || !ite_valid || + !cte_valid || (eventid > max_eventid)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes " + "devid %d or eventid %d or invalid dte %d or" + "invalid cte %d or invalid ite %d\n", + __func__, devid, eventid, dte_valid, cte_valid, + ite_valid); + /* + * in this implementation, in case of error + * we ignore this command and move onto the next + * command in the queue + */ + } else { + /* + * Current implementation only supports rdbase == procnum + * Hence rdbase physical address is ignored + */ + rdbase = (cte & GITS_CTE_RDBASE_PROCNUM_MASK) >> 1U; + + if (rdbase > s->gicv3->num_cpu) { + return result; + } + + if ((cmd == CLEAR) || (cmd == DISCARD)) { + gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0); + } else { + gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1); + } + + if (cmd == DISCARD) { + IteEntry ite = {}; + /* remove mapping from interrupt translation table */ + result = update_ite(s, eventid, dte, ite); + } + } + + return result; +} + +static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset, + bool ignore_pInt) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint32_t devid, eventid; + uint32_t pIntid = 0; + uint32_t max_eventid, max_Intid; + bool dte_valid; + MemTxResult res = MEMTX_OK; + uint16_t icid = 0; + uint64_t dte = 0; + IteEntry ite; + uint32_t int_spurious = INTID_SPURIOUS; + bool result = false; + + devid = ((value & DEVID_MASK) >> DEVID_SHIFT); + offset += NUM_BYTES_IN_DW; + value = address_space_ldq_le(as, s->cq.base_addr + offset, + MEMTXATTRS_UNSPECIFIED, &res); + + if (res != MEMTX_OK) { + return result; + } + + eventid = (value & EVENTID_MASK); + + if (!ignore_pInt) { + pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT); + } + + offset += NUM_BYTES_IN_DW; + value = address_space_ldq_le(as, s->cq.base_addr + offset, + MEMTXATTRS_UNSPECIFIED, &res); + + if (res != MEMTX_OK) { + return result; + } + + icid = value & ICID_MASK; + + dte = get_dte(s, devid, &res); + + if (res != MEMTX_OK) { + return result; + } + dte_valid = dte & TABLE_ENTRY_VALID_MASK; + + max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1)); + + if (!ignore_pInt) { + max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1; + } + + if ((devid > s->dt.maxids.max_devids) || (icid > s->ct.maxids.max_collids) + || !dte_valid || (eventid > max_eventid) || + (!ignore_pInt && (((pIntid < GICV3_LPI_INTID_START) || + (pIntid > max_Intid)) && (pIntid != INTID_SPURIOUS)))) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes " + "devid %d or icid %d or eventid %d or pIntid %d or" + "unmapped dte %d\n", __func__, devid, icid, eventid, + pIntid, dte_valid); + /* + * in this implementation, in case of error + * we ignore this command and move onto the next + * command in the queue + */ + } else { + /* add ite entry to interrupt translation table */ + ite.itel = (dte_valid & TABLE_ENTRY_VALID_MASK) | + (GITS_TYPE_PHYSICAL << ITE_ENTRY_INTTYPE_SHIFT); + + if (ignore_pInt) { + ite.itel |= (eventid << ITE_ENTRY_INTID_SHIFT); + } else { + ite.itel |= (pIntid << ITE_ENTRY_INTID_SHIFT); + } + ite.itel |= (int_spurious << ITE_ENTRY_INTSP_SHIFT); + ite.iteh = icid; + + result = update_ite(s, eventid, dte, ite); + } + + return result; +} + +static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid, + uint64_t rdbase) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint64_t value; + uint64_t l2t_addr; + bool valid_l2t; + uint32_t l2t_id; + uint32_t max_l2_entries; + uint64_t cte = 0; + MemTxResult res = MEMTX_OK; + + if (!s->ct.valid) { + return true; + } + + if (valid) { + /* add mapping entry to collection table */ + cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL); + } + + /* + * The specification defines the format of level 1 entries of a + * 2-level table, but the format of level 2 entries and the format + * of flat-mapped tables is IMPDEF. + */ + if (s->ct.indirect) { + l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE); + + value = address_space_ldq_le(as, + s->ct.base_addr + + (l2t_id * L1TABLE_ENTRY_SIZE), + MEMTXATTRS_UNSPECIFIED, &res); + + if (res != MEMTX_OK) { + return false; + } + + valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; + + if (valid_l2t) { + max_l2_entries = s->ct.page_sz / s->ct.entry_sz; + + l2t_addr = value & ((1ULL << 51) - 1); + + address_space_stq_le(as, l2t_addr + + ((icid % max_l2_entries) * GITS_CTE_SIZE), + cte, MEMTXATTRS_UNSPECIFIED, &res); + } + } else { + /* Flat level table */ + address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE), + cte, MEMTXATTRS_UNSPECIFIED, &res); + } + if (res != MEMTX_OK) { + return false; + } else { + return true; + } +} + +static bool process_mapc(GICv3ITSState *s, uint32_t offset) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint16_t icid; + uint64_t rdbase; + bool valid; + MemTxResult res = MEMTX_OK; + bool result = false; + uint64_t value; + + offset += NUM_BYTES_IN_DW; + offset += NUM_BYTES_IN_DW; + + value = address_space_ldq_le(as, s->cq.base_addr + offset, + MEMTXATTRS_UNSPECIFIED, &res); + + if (res != MEMTX_OK) { + return result; + } + + icid = value & ICID_MASK; + + rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT; + rdbase &= RDBASE_PROCNUM_MASK; + + valid = (value & CMD_FIELD_VALID_MASK); + + if ((icid > s->ct.maxids.max_collids) || (rdbase > s->gicv3->num_cpu)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ITS MAPC: invalid collection table attributes " + "icid %d rdbase %" PRIu64 "\n", icid, rdbase); + /* + * in this implementation, in case of error + * we ignore this command and move onto the next + * command in the queue + */ + } else { + result = update_cte(s, icid, valid, rdbase); + } + + return result; +} + +static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid, + uint8_t size, uint64_t itt_addr) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint64_t value; + uint64_t l2t_addr; + bool valid_l2t; + uint32_t l2t_id; + uint32_t max_l2_entries; + uint64_t dte = 0; + MemTxResult res = MEMTX_OK; + + if (s->dt.valid) { + if (valid) { + /* add mapping entry to device table */ + dte = (valid & TABLE_ENTRY_VALID_MASK) | + ((size & SIZE_MASK) << 1U) | + (itt_addr << GITS_DTE_ITTADDR_SHIFT); + } + } else { + return true; + } + + /* + * The specification defines the format of level 1 entries of a + * 2-level table, but the format of level 2 entries and the format + * of flat-mapped tables is IMPDEF. + */ + if (s->dt.indirect) { + l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE); + + value = address_space_ldq_le(as, + s->dt.base_addr + + (l2t_id * L1TABLE_ENTRY_SIZE), + MEMTXATTRS_UNSPECIFIED, &res); + + if (res != MEMTX_OK) { + return false; + } + + valid_l2t = (value & L2_TABLE_VALID_MASK) != 0; + + if (valid_l2t) { + max_l2_entries = s->dt.page_sz / s->dt.entry_sz; + + l2t_addr = value & ((1ULL << 51) - 1); + + address_space_stq_le(as, l2t_addr + + ((devid % max_l2_entries) * GITS_DTE_SIZE), + dte, MEMTXATTRS_UNSPECIFIED, &res); + } + } else { + /* Flat level table */ + address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE), + dte, MEMTXATTRS_UNSPECIFIED, &res); + } + if (res != MEMTX_OK) { + return false; + } else { + return true; + } +} + +static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint32_t devid; + uint8_t size; + uint64_t itt_addr; + bool valid; + MemTxResult res = MEMTX_OK; + bool result = false; + + devid = ((value & DEVID_MASK) >> DEVID_SHIFT); + + offset += NUM_BYTES_IN_DW; + value = address_space_ldq_le(as, s->cq.base_addr + offset, + MEMTXATTRS_UNSPECIFIED, &res); + + if (res != MEMTX_OK) { + return result; + } + + size = (value & SIZE_MASK); + + offset += NUM_BYTES_IN_DW; + value = address_space_ldq_le(as, s->cq.base_addr + offset, + MEMTXATTRS_UNSPECIFIED, &res); + + if (res != MEMTX_OK) { + return result; + } + + itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT; + + valid = (value & CMD_FIELD_VALID_MASK); + + if ((devid > s->dt.maxids.max_devids) || + (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) { + qemu_log_mask(LOG_GUEST_ERROR, + "ITS MAPD: invalid device table attributes " + "devid %d or size %d\n", devid, size); + /* + * in this implementation, in case of error + * we ignore this command and move onto the next + * command in the queue + */ + } else { + result = update_dte(s, devid, valid, size, itt_addr); + } + + return result; +} + +/* + * Current implementation blocks until all + * commands are processed + */ +static void process_cmdq(GICv3ITSState *s) +{ + uint32_t wr_offset = 0; + uint32_t rd_offset = 0; + uint32_t cq_offset = 0; + uint64_t data; + AddressSpace *as = &s->gicv3->dma_as; + MemTxResult res = MEMTX_OK; + bool result = true; + uint8_t cmd; + int i; + + if (!(s->ctlr & ITS_CTLR_ENABLED)) { + return; + } + + wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET); + + if (wr_offset > s->cq.max_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid write offset " + "%d\n", __func__, wr_offset); + return; + } + + rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET); + + if (rd_offset > s->cq.max_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid read offset " + "%d\n", __func__, rd_offset); + return; + } + + while (wr_offset != rd_offset) { + cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE); + data = address_space_ldq_le(as, s->cq.base_addr + cq_offset, + MEMTXATTRS_UNSPECIFIED, &res); + if (res != MEMTX_OK) { + result = false; + } + cmd = (data & CMD_MASK); + + switch (cmd) { + case GITS_CMD_INT: + res = process_its_cmd(s, data, cq_offset, INTERRUPT); + break; + case GITS_CMD_CLEAR: + res = process_its_cmd(s, data, cq_offset, CLEAR); + break; + case GITS_CMD_SYNC: + /* + * Current implementation makes a blocking synchronous call + * for every command issued earlier, hence the internal state + * is already consistent by the time SYNC command is executed. + * Hence no further processing is required for SYNC command. + */ + break; + case GITS_CMD_MAPD: + result = process_mapd(s, data, cq_offset); + break; + case GITS_CMD_MAPC: + result = process_mapc(s, cq_offset); + break; + case GITS_CMD_MAPTI: + result = process_mapti(s, data, cq_offset, false); + break; + case GITS_CMD_MAPI: + result = process_mapti(s, data, cq_offset, true); + break; + case GITS_CMD_DISCARD: + result = process_its_cmd(s, data, cq_offset, DISCARD); + break; + case GITS_CMD_INV: + case GITS_CMD_INVALL: + /* + * Current implementation doesn't cache any ITS tables, + * but the calculated lpi priority information. We only + * need to trigger lpi priority re-calculation to be in + * sync with LPI config table or pending table changes. + */ + for (i = 0; i < s->gicv3->num_cpu; i++) { + gicv3_redist_update_lpi(&s->gicv3->cpu[i]); + } + break; + default: + break; + } + if (result) { + rd_offset++; + rd_offset %= s->cq.max_entries; + s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset); + } else { + /* + * in this implementation, in case of dma read/write error + * we stall the command processing + */ + s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: %x cmd processing failed\n", __func__, cmd); + break; + } + } +} + +/* + * This function extracts the ITS Device and Collection table specific + * parameters (like base_addr, size etc) from GITS_BASER register. + * It is called during ITS enable and also during post_load migration + */ +static void extract_table_params(GICv3ITSState *s) +{ + uint16_t num_pages = 0; + uint8_t page_sz_type; + uint8_t type; + uint32_t page_sz = 0; + uint64_t value; + + for (int i = 0; i < 8; i++) { + value = s->baser[i]; + + if (!value) { + continue; + } + + page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE); + + switch (page_sz_type) { + case 0: + page_sz = GITS_PAGE_SIZE_4K; + break; + + case 1: + page_sz = GITS_PAGE_SIZE_16K; + break; + + case 2: + case 3: + page_sz = GITS_PAGE_SIZE_64K; + break; + + default: + g_assert_not_reached(); + } + + num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1; + + type = FIELD_EX64(value, GITS_BASER, TYPE); + + switch (type) { + + case GITS_BASER_TYPE_DEVICE: + memset(&s->dt, 0 , sizeof(s->dt)); + s->dt.valid = FIELD_EX64(value, GITS_BASER, VALID); + + if (!s->dt.valid) { + return; + } + + s->dt.page_sz = page_sz; + s->dt.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT); + s->dt.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE); + + if (!s->dt.indirect) { + s->dt.max_entries = (num_pages * page_sz) / s->dt.entry_sz; + } else { + s->dt.max_entries = (((num_pages * page_sz) / + L1TABLE_ENTRY_SIZE) * + (page_sz / s->dt.entry_sz)); + } + + s->dt.maxids.max_devids = (1UL << (FIELD_EX64(s->typer, GITS_TYPER, + DEVBITS) + 1)); + + s->dt.base_addr = baser_base_addr(value, page_sz); + + break; + + case GITS_BASER_TYPE_COLLECTION: + memset(&s->ct, 0 , sizeof(s->ct)); + s->ct.valid = FIELD_EX64(value, GITS_BASER, VALID); + + /* + * GITS_TYPER.HCC is 0 for this implementation + * hence writes are discarded if ct.valid is 0 + */ + if (!s->ct.valid) { + return; + } + + s->ct.page_sz = page_sz; + s->ct.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT); + s->ct.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE); + + if (!s->ct.indirect) { + s->ct.max_entries = (num_pages * page_sz) / s->ct.entry_sz; + } else { + s->ct.max_entries = (((num_pages * page_sz) / + L1TABLE_ENTRY_SIZE) * + (page_sz / s->ct.entry_sz)); + } + + if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) { + s->ct.maxids.max_collids = (1UL << (FIELD_EX64(s->typer, + GITS_TYPER, CIDBITS) + 1)); + } else { + /* 16-bit CollectionId supported when CIL == 0 */ + s->ct.maxids.max_collids = (1UL << 16); + } + + s->ct.base_addr = baser_base_addr(value, page_sz); + + break; + + default: + break; + } + } +} + +static void extract_cmdq_params(GICv3ITSState *s) +{ + uint16_t num_pages = 0; + uint64_t value = s->cbaser; + + num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1; + + memset(&s->cq, 0 , sizeof(s->cq)); + s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID); + + if (s->cq.valid) { + s->cq.max_entries = (num_pages * GITS_PAGE_SIZE_4K) / + GITS_CMDQ_ENTRY_SIZE; + s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR); + s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT; + } +} + +static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset, + uint64_t data, unsigned size, + MemTxAttrs attrs) +{ + GICv3ITSState *s = (GICv3ITSState *)opaque; + bool result = true; + uint32_t devid = 0; + + switch (offset) { + case GITS_TRANSLATER: + if (s->ctlr & ITS_CTLR_ENABLED) { + devid = attrs.requester_id; + result = process_its_cmd(s, data, devid, NONE); + } + break; + default: + break; + } + + if (result) { + return MEMTX_OK; + } else { + return MEMTX_ERROR; + } +} + +static bool its_writel(GICv3ITSState *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + bool result = true; + int index; + + switch (offset) { + case GITS_CTLR: + if (value & R_GITS_CTLR_ENABLED_MASK) { + s->ctlr |= ITS_CTLR_ENABLED; + extract_table_params(s); + extract_cmdq_params(s); + s->creadr = 0; + process_cmdq(s); + } else { + s->ctlr &= ~ITS_CTLR_ENABLED; + } + break; + case GITS_CBASER: + /* + * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is + * already enabled + */ + if (!(s->ctlr & ITS_CTLR_ENABLED)) { + s->cbaser = deposit64(s->cbaser, 0, 32, value); + s->creadr = 0; + s->cwriter = s->creadr; + } + break; + case GITS_CBASER + 4: + /* + * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is + * already enabled + */ + if (!(s->ctlr & ITS_CTLR_ENABLED)) { + s->cbaser = deposit64(s->cbaser, 32, 32, value); + s->creadr = 0; + s->cwriter = s->creadr; + } + break; + case GITS_CWRITER: + s->cwriter = deposit64(s->cwriter, 0, 32, + (value & ~R_GITS_CWRITER_RETRY_MASK)); + if (s->cwriter != s->creadr) { + process_cmdq(s); + } + break; + case GITS_CWRITER + 4: + s->cwriter = deposit64(s->cwriter, 32, 32, value); + break; + case GITS_CREADR: + if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { + s->creadr = deposit64(s->creadr, 0, 32, + (value & ~R_GITS_CREADR_STALLED_MASK)); + } else { + /* RO register, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + } + break; + case GITS_CREADR + 4: + if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { + s->creadr = deposit64(s->creadr, 32, 32, value); + } else { + /* RO register, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + } + break; + case GITS_BASER ... GITS_BASER + 0x3f: + /* + * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is + * already enabled + */ + if (!(s->ctlr & ITS_CTLR_ENABLED)) { + index = (offset - GITS_BASER) / 8; + + if (offset & 7) { + value <<= 32; + value &= ~GITS_BASER_RO_MASK; + s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32); + s->baser[index] |= value; + } else { + value &= ~GITS_BASER_RO_MASK; + s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32); + s->baser[index] |= value; + } + } + break; + case GITS_IIDR: + case GITS_IDREGS ... GITS_IDREGS + 0x2f: + /* RO registers, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + break; + default: + result = false; + break; + } + return result; +} + +static bool its_readl(GICv3ITSState *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + bool result = true; + int index; + + switch (offset) { + case GITS_CTLR: + *data = s->ctlr; + break; + case GITS_IIDR: + *data = gicv3_iidr(); + break; + case GITS_IDREGS ... GITS_IDREGS + 0x2f: + /* ID registers */ + *data = gicv3_idreg(offset - GITS_IDREGS); + break; + case GITS_TYPER: + *data = extract64(s->typer, 0, 32); + break; + case GITS_TYPER + 4: + *data = extract64(s->typer, 32, 32); + break; + case GITS_CBASER: + *data = extract64(s->cbaser, 0, 32); + break; + case GITS_CBASER + 4: + *data = extract64(s->cbaser, 32, 32); + break; + case GITS_CREADR: + *data = extract64(s->creadr, 0, 32); + break; + case GITS_CREADR + 4: + *data = extract64(s->creadr, 32, 32); + break; + case GITS_CWRITER: + *data = extract64(s->cwriter, 0, 32); + break; + case GITS_CWRITER + 4: + *data = extract64(s->cwriter, 32, 32); + break; + case GITS_BASER ... GITS_BASER + 0x3f: + index = (offset - GITS_BASER) / 8; + if (offset & 7) { + *data = extract64(s->baser[index], 32, 32); + } else { + *data = extract64(s->baser[index], 0, 32); + } + break; + default: + result = false; + break; + } + return result; +} + +static bool its_writell(GICv3ITSState *s, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + bool result = true; + int index; + + switch (offset) { + case GITS_BASER ... GITS_BASER + 0x3f: + /* + * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is + * already enabled + */ + if (!(s->ctlr & ITS_CTLR_ENABLED)) { + index = (offset - GITS_BASER) / 8; + s->baser[index] &= GITS_BASER_RO_MASK; + s->baser[index] |= (value & ~GITS_BASER_RO_MASK); + } + break; + case GITS_CBASER: + /* + * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is + * already enabled + */ + if (!(s->ctlr & ITS_CTLR_ENABLED)) { + s->cbaser = value; + s->creadr = 0; + s->cwriter = s->creadr; + } + break; + case GITS_CWRITER: + s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK; + if (s->cwriter != s->creadr) { + process_cmdq(s); + } + break; + case GITS_CREADR: + if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) { + s->creadr = value & ~R_GITS_CREADR_STALLED_MASK; + } else { + /* RO register, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + } + break; + case GITS_TYPER: + /* RO registers, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + break; + default: + result = false; + break; + } + return result; +} + +static bool its_readll(GICv3ITSState *s, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + bool result = true; + int index; + + switch (offset) { + case GITS_TYPER: + *data = s->typer; + break; + case GITS_BASER ... GITS_BASER + 0x3f: + index = (offset - GITS_BASER) / 8; + *data = s->baser[index]; + break; + case GITS_CBASER: + *data = s->cbaser; + break; + case GITS_CREADR: + *data = s->creadr; + break; + case GITS_CWRITER: + *data = s->cwriter; + break; + default: + result = false; + break; + } + return result; +} + +static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICv3ITSState *s = (GICv3ITSState *)opaque; + bool result; + + switch (size) { + case 4: + result = its_readl(s, offset, data, attrs); + break; + case 8: + result = its_readll(s, offset, data, attrs); + break; + default: + result = false; + break; + } + + if (!result) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest read at offset " TARGET_FMT_plx + "size %u\n", __func__, offset, size); + /* + * The spec requires that reserved registers are RAZ/WI; + * so use false returns from leaf functions as a way to + * trigger the guest-error logging but don't return it to + * the caller, or we'll cause a spurious guest data abort. + */ + *data = 0; + } + return MEMTX_OK; +} + +static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size, MemTxAttrs attrs) +{ + GICv3ITSState *s = (GICv3ITSState *)opaque; + bool result; + + switch (size) { + case 4: + result = its_writel(s, offset, data, attrs); + break; + case 8: + result = its_writell(s, offset, data, attrs); + break; + default: + result = false; + break; + } + + if (!result) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write at offset " TARGET_FMT_plx + "size %u\n", __func__, offset, size); + /* + * The spec requires that reserved registers are RAZ/WI; + * so use false returns from leaf functions as a way to + * trigger the guest-error logging but don't return it to + * the caller, or we'll cause a spurious guest data abort. + */ + } + return MEMTX_OK; +} + +static const MemoryRegionOps gicv3_its_control_ops = { + .read_with_attrs = gicv3_its_read, + .write_with_attrs = gicv3_its_write, + .valid.min_access_size = 4, + .valid.max_access_size = 8, + .impl.min_access_size = 4, + .impl.max_access_size = 8, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const MemoryRegionOps gicv3_its_translation_ops = { + .write_with_attrs = gicv3_its_translation_write, + .valid.min_access_size = 2, + .valid.max_access_size = 4, + .impl.min_access_size = 2, + .impl.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) +{ + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + int i; + + for (i = 0; i < s->gicv3->num_cpu; i++) { + if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) { + error_setg(errp, "Physical LPI not supported by CPU %d", i); + return; + } + } + + gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops); + + address_space_init(&s->gicv3->dma_as, s->gicv3->dma, + "gicv3-its-sysmem"); + + /* set the ITS default features supported */ + s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, + GITS_TYPE_PHYSICAL); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE, + ITS_ITT_ENTRY_SIZE - 1); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS); +} + +static void gicv3_its_reset(DeviceState *dev) +{ + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); + + c->parent_reset(dev); + + /* Quiescent bit reset to 1 */ + s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1); + + /* + * setting GITS_BASER0.Type = 0b001 (Device) + * GITS_BASER1.Type = 0b100 (Collection Table) + * GITS_BASER.Type,where n = 3 to 7 are 0b00 (Unimplemented) + * GITS_BASER<0,1>.Page_Size = 64KB + * and default translation table entry size to 16 bytes + */ + s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE, + GITS_BASER_TYPE_DEVICE); + s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE, + GITS_BASER_PAGESIZE_64K); + s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE, + GITS_DTE_SIZE - 1); + + s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE, + GITS_BASER_TYPE_COLLECTION); + s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE, + GITS_BASER_PAGESIZE_64K); + s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE, + GITS_CTE_SIZE - 1); +} + +static void gicv3_its_post_load(GICv3ITSState *s) +{ + if (s->ctlr & ITS_CTLR_ENABLED) { + extract_table_params(s); + extract_cmdq_params(s); + } +} + +static Property gicv3_its_props[] = { + DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3", + GICv3State *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void gicv3_its_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass); + GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); + + dc->realize = gicv3_arm_its_realize; + device_class_set_props(dc, gicv3_its_props); + device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset); + icc->post_load = gicv3_its_post_load; +} + +static const TypeInfo gicv3_its_info = { + .name = TYPE_ARM_GICV3_ITS, + .parent = TYPE_ARM_GICV3_ITS_COMMON, + .instance_size = sizeof(GICv3ITSState), + .class_init = gicv3_its_class_init, + .class_size = sizeof(GICv3ITSClass), +}; + +static void gicv3_its_register_types(void) +{ + type_register_static(&gicv3_its_info); +} + +type_init(gicv3_its_register_types) diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c new file mode 100644 index 000000000..90b85f1e2 --- /dev/null +++ b/hw/intc/arm_gicv3_its_common.c @@ -0,0 +1,159 @@ +/* + * ITS base class for a GICv3-based system + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Written by Pavel Fedin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/msi.h" +#include "migration/vmstate.h" +#include "hw/intc/arm_gicv3_its_common.h" +#include "qemu/log.h" +#include "qemu/module.h" + +static int gicv3_its_pre_save(void *opaque) +{ + GICv3ITSState *s = (GICv3ITSState *)opaque; + GICv3ITSCommonClass *c = ARM_GICV3_ITS_COMMON_GET_CLASS(s); + + if (c->pre_save) { + c->pre_save(s); + } + + return 0; +} + +static int gicv3_its_post_load(void *opaque, int version_id) +{ + GICv3ITSState *s = (GICv3ITSState *)opaque; + GICv3ITSCommonClass *c = ARM_GICV3_ITS_COMMON_GET_CLASS(s); + + if (c->post_load) { + c->post_load(s); + } + return 0; +} + +static const VMStateDescription vmstate_its = { + .name = "arm_gicv3_its", + .pre_save = gicv3_its_pre_save, + .post_load = gicv3_its_post_load, + .priority = MIG_PRI_GICV3_ITS, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ctlr, GICv3ITSState), + VMSTATE_UINT32(iidr, GICv3ITSState), + VMSTATE_UINT64(cbaser, GICv3ITSState), + VMSTATE_UINT64(cwriter, GICv3ITSState), + VMSTATE_UINT64(creadr, GICv3ITSState), + VMSTATE_UINT64_ARRAY(baser, GICv3ITSState, 8), + VMSTATE_END_OF_LIST() + }, +}; + +static MemTxResult gicv3_its_trans_read(void *opaque, hwaddr offset, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + qemu_log_mask(LOG_GUEST_ERROR, "ITS read at offset 0x%"PRIx64"\n", offset); + *data = 0; + return MEMTX_OK; +} + +static MemTxResult gicv3_its_trans_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + if (offset == 0x0040 && ((size == 2) || (size == 4))) { + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(opaque); + GICv3ITSCommonClass *c = ARM_GICV3_ITS_COMMON_GET_CLASS(s); + int ret = c->send_msi(s, le64_to_cpu(value), attrs.requester_id); + + if (ret <= 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "ITS: Error sending MSI: %s\n", strerror(-ret)); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "ITS write at bad offset 0x%"PRIx64"\n", offset); + } + return MEMTX_OK; +} + +static const MemoryRegionOps gicv3_its_trans_ops = { + .read_with_attrs = gicv3_its_trans_read, + .write_with_attrs = gicv3_its_trans_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops, + const MemoryRegionOps *tops) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(s); + + memory_region_init_io(&s->iomem_its_cntrl, OBJECT(s), ops, s, + "control", ITS_CONTROL_SIZE); + memory_region_init_io(&s->iomem_its_translation, OBJECT(s), + tops ? tops : &gicv3_its_trans_ops, s, + "translation", ITS_TRANS_SIZE); + + /* Our two regions are always adjacent, therefore we now combine them + * into a single one in order to make our users' life easier. + */ + memory_region_init(&s->iomem_main, OBJECT(s), "gicv3_its", ITS_SIZE); + memory_region_add_subregion(&s->iomem_main, 0, &s->iomem_its_cntrl); + memory_region_add_subregion(&s->iomem_main, ITS_CONTROL_SIZE, + &s->iomem_its_translation); + sysbus_init_mmio(sbd, &s->iomem_main); + + msi_nonbroken = true; +} + +static void gicv3_its_common_reset(DeviceState *dev) +{ + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + + s->ctlr = 0; + s->cbaser = 0; + s->cwriter = 0; + s->creadr = 0; + s->iidr = 0; + memset(&s->baser, 0, sizeof(s->baser)); +} + +static void gicv3_its_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = gicv3_its_common_reset; + dc->vmsd = &vmstate_its; +} + +static const TypeInfo gicv3_its_common_info = { + .name = TYPE_ARM_GICV3_ITS_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GICv3ITSState), + .class_size = sizeof(GICv3ITSCommonClass), + .class_init = gicv3_its_common_class_init, + .abstract = true, +}; + +static void gicv3_its_common_register_types(void) +{ + type_register_static(&gicv3_its_common_info); +} + +type_init(gicv3_its_common_register_types) diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c new file mode 100644 index 000000000..0b4cbed28 --- /dev/null +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -0,0 +1,266 @@ +/* + * KVM-based ITS implementation for a GICv3-based system + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Written by Pavel Fedin + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/intc/arm_gicv3_its_common.h" +#include "hw/qdev-properties.h" +#include "sysemu/runstate.h" +#include "sysemu/kvm.h" +#include "kvm_arm.h" +#include "migration/blocker.h" +#include "qom/object.h" + +#define TYPE_KVM_ARM_ITS "arm-its-kvm" +typedef struct KVMARMITSClass KVMARMITSClass; +/* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */ +DECLARE_OBJ_CHECKERS(GICv3ITSState, KVMARMITSClass, + KVM_ARM_ITS, TYPE_KVM_ARM_ITS) + +struct KVMARMITSClass { + GICv3ITSCommonClass parent_class; + void (*parent_reset)(DeviceState *dev); +}; + + +static int kvm_its_send_msi(GICv3ITSState *s, uint32_t value, uint16_t devid) +{ + struct kvm_msi msi; + + if (unlikely(!s->translater_gpa_known)) { + MemoryRegion *mr = &s->iomem_its_translation; + MemoryRegionSection mrs; + + mrs = memory_region_find(mr, 0, 1); + memory_region_unref(mrs.mr); + s->gits_translater_gpa = mrs.offset_within_address_space + 0x40; + s->translater_gpa_known = true; + } + + msi.address_lo = extract64(s->gits_translater_gpa, 0, 32); + msi.address_hi = extract64(s->gits_translater_gpa, 32, 32); + msi.data = le32_to_cpu(value); + msi.flags = KVM_MSI_VALID_DEVID; + msi.devid = devid; + memset(msi.pad, 0, sizeof(msi.pad)); + + return kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi); +} + +/** + * vm_change_state_handler - VM change state callback aiming at flushing + * ITS tables into guest RAM + * + * The tables get flushed to guest RAM whenever the VM gets stopped. + */ +static void vm_change_state_handler(void *opaque, bool running, + RunState state) +{ + GICv3ITSState *s = (GICv3ITSState *)opaque; + Error *err = NULL; + + if (running) { + return; + } + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_ITS_SAVE_TABLES, NULL, true, &err); + if (err) { + error_report_err(err); + } +} + +static void kvm_arm_its_realize(DeviceState *dev, Error **errp) +{ + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + + s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_ITS, false); + if (s->dev_fd < 0) { + error_setg_errno(errp, -s->dev_fd, "error creating in-kernel ITS"); + return; + } + + /* explicit init of the ITS */ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, &error_abort); + + /* register the base address */ + kvm_arm_register_device(&s->iomem_its_cntrl, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd, 0); + + gicv3_its_init_mmio(s, NULL, NULL); + + if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CTLR)) { + error_setg(&s->migration_blocker, "This operating system kernel " + "does not support vITS migration"); + if (migrate_add_blocker(s->migration_blocker, errp) < 0) { + error_free(s->migration_blocker); + return; + } + } else { + qemu_add_vm_change_state_handler(vm_change_state_handler, s); + } + + kvm_msi_use_devid = true; + kvm_gsi_direct_mapping = false; + kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled(); +} + +/** + * kvm_arm_its_pre_save - handles the saving of ITS registers. + * ITS tables are flushed into guest RAM separately and earlier, + * through the VM change state handler, since at the moment pre_save() + * is called, the guest RAM has already been saved. + */ +static void kvm_arm_its_pre_save(GICv3ITSState *s) +{ + int i; + + for (i = 0; i < 8; i++) { + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_BASER + i * 8, &s->baser[i], false, + &error_abort); + } + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CTLR, &s->ctlr, false, &error_abort); + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CBASER, &s->cbaser, false, &error_abort); + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CREADR, &s->creadr, false, &error_abort); + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CWRITER, &s->cwriter, false, &error_abort); + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_IIDR, &s->iidr, false, &error_abort); +} + +/** + * kvm_arm_its_post_load - Restore both the ITS registers and tables + */ +static void kvm_arm_its_post_load(GICv3ITSState *s) +{ + int i; + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_IIDR, &s->iidr, true, &error_abort); + + /* + * must be written before GITS_CREADR since GITS_CBASER write + * access resets GITS_CREADR. + */ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CBASER, &s->cbaser, true, &error_abort); + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CREADR, &s->creadr, true, &error_abort); + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CWRITER, &s->cwriter, true, &error_abort); + + + for (i = 0; i < 8; i++) { + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_BASER + i * 8, &s->baser[i], true, + &error_abort); + } + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_ITS_RESTORE_TABLES, NULL, true, + &error_abort); + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CTLR, &s->ctlr, true, &error_abort); +} + +static void kvm_arm_its_reset(DeviceState *dev) +{ + GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + KVMARMITSClass *c = KVM_ARM_ITS_GET_CLASS(s); + int i; + + c->parent_reset(dev); + + if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_ITS_CTRL_RESET)) { + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_ITS_CTRL_RESET, NULL, true, &error_abort); + return; + } + + warn_report("ITS KVM: full reset is not supported by the host kernel"); + + if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CTLR)) { + return; + } + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CTLR, &s->ctlr, true, &error_abort); + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_CBASER, &s->cbaser, true, &error_abort); + + for (i = 0; i < 8; i++) { + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, + GITS_BASER + i * 8, &s->baser[i], true, + &error_abort); + } +} + +static Property kvm_arm_its_props[] = { + DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "kvm-arm-gicv3", + GICv3State *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void kvm_arm_its_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass); + KVMARMITSClass *ic = KVM_ARM_ITS_CLASS(klass); + + dc->realize = kvm_arm_its_realize; + device_class_set_props(dc, kvm_arm_its_props); + device_class_set_parent_reset(dc, kvm_arm_its_reset, &ic->parent_reset); + icc->send_msi = kvm_its_send_msi; + icc->pre_save = kvm_arm_its_pre_save; + icc->post_load = kvm_arm_its_post_load; +} + +static const TypeInfo kvm_arm_its_info = { + .name = TYPE_KVM_ARM_ITS, + .parent = TYPE_ARM_GICV3_ITS_COMMON, + .instance_size = sizeof(GICv3ITSState), + .class_init = kvm_arm_its_class_init, + .class_size = sizeof(KVMARMITSClass), +}; + +static void kvm_arm_its_register_types(void) +{ + type_register_static(&kvm_arm_its_info); +} + +type_init(kvm_arm_its_register_types) diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c new file mode 100644 index 000000000..5ec5ff9ef --- /dev/null +++ b/hw/intc/arm_gicv3_kvm.c @@ -0,0 +1,900 @@ +/* + * ARM Generic Interrupt Controller using KVM in-kernel support + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Written by Pavel Fedin + * Based on vGICv2 code by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/intc/arm_gicv3_common.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "kvm_arm.h" +#include "gicv3_internal.h" +#include "vgic_common.h" +#include "migration/blocker.h" +#include "qom/object.h" + +#ifdef DEBUG_GICV3_KVM +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, "kvm_gicv3: " fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +#define TYPE_KVM_ARM_GICV3 "kvm-arm-gicv3" +typedef struct KVMARMGICv3Class KVMARMGICv3Class; +/* This is reusing the GICv3State typedef from ARM_GICV3_ITS_COMMON */ +DECLARE_OBJ_CHECKERS(GICv3State, KVMARMGICv3Class, + KVM_ARM_GICV3, TYPE_KVM_ARM_GICV3) + +#define KVM_DEV_ARM_VGIC_SYSREG(op0, op1, crn, crm, op2) \ + (ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \ + ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \ + ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \ + ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \ + ARM64_SYS_REG_SHIFT_MASK(op2, OP2)) + +#define ICC_PMR_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(3, 0, 4, 6, 0) +#define ICC_BPR0_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 8, 3) +#define ICC_AP0R_EL1(n) \ + KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 8, 4 | n) +#define ICC_AP1R_EL1(n) \ + KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 9, n) +#define ICC_BPR1_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 3) +#define ICC_CTLR_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 4) +#define ICC_SRE_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 5) +#define ICC_IGRPEN0_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 6) +#define ICC_IGRPEN1_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 7) + +struct KVMARMGICv3Class { + ARMGICv3CommonClass parent_class; + DeviceRealize parent_realize; + void (*parent_reset)(DeviceState *dev); +}; + +static void kvm_arm_gicv3_set_irq(void *opaque, int irq, int level) +{ + GICv3State *s = (GICv3State *)opaque; + + kvm_arm_gic_set_irq(s->num_irq, irq, level); +} + +#define KVM_VGIC_ATTR(reg, typer) \ + ((typer & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) | (reg)) + +static inline void kvm_gicd_access(GICv3State *s, int offset, + uint32_t *val, bool write) +{ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, + KVM_VGIC_ATTR(offset, 0), + val, write, &error_abort); +} + +static inline void kvm_gicr_access(GICv3State *s, int offset, int cpu, + uint32_t *val, bool write) +{ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, + KVM_VGIC_ATTR(offset, s->cpu[cpu].gicr_typer), + val, write, &error_abort); +} + +static inline void kvm_gicc_access(GICv3State *s, uint64_t reg, int cpu, + uint64_t *val, bool write) +{ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS, + KVM_VGIC_ATTR(reg, s->cpu[cpu].gicr_typer), + val, write, &error_abort); +} + +static inline void kvm_gic_line_level_access(GICv3State *s, int irq, int cpu, + uint32_t *val, bool write) +{ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, + KVM_VGIC_ATTR(irq, s->cpu[cpu].gicr_typer) | + (VGIC_LEVEL_INFO_LINE_LEVEL << + KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT), + val, write, &error_abort); +} + +/* Loop through each distributor IRQ related register; since bits + * corresponding to SPIs and PPIs are RAZ/WI when affinity routing + * is enabled, we skip those. + */ +#define for_each_dist_irq_reg(_irq, _max, _field_width) \ + for (_irq = GIC_INTERNAL; _irq < _max; _irq += (32 / _field_width)) + +static void kvm_dist_get_priority(GICv3State *s, uint32_t offset, uint8_t *bmp) +{ + uint32_t reg, *field; + int irq; + + /* For the KVM GICv3, affinity routing is always enabled, and the first 8 + * GICD_IPRIORITYR registers are always RAZ/WI. The corresponding + * functionality is replaced by GICR_IPRIORITYR. It doesn't need to + * sync them. So it needs to skip the field of GIC_INTERNAL irqs in bmp and + * offset. + */ + field = (uint32_t *)(bmp + GIC_INTERNAL); + offset += (GIC_INTERNAL * 8) / 8; + for_each_dist_irq_reg(irq, s->num_irq, 8) { + kvm_gicd_access(s, offset, ®, false); + *field = reg; + offset += 4; + field++; + } +} + +static void kvm_dist_put_priority(GICv3State *s, uint32_t offset, uint8_t *bmp) +{ + uint32_t reg, *field; + int irq; + + /* For the KVM GICv3, affinity routing is always enabled, and the first 8 + * GICD_IPRIORITYR registers are always RAZ/WI. The corresponding + * functionality is replaced by GICR_IPRIORITYR. It doesn't need to + * sync them. So it needs to skip the field of GIC_INTERNAL irqs in bmp and + * offset. + */ + field = (uint32_t *)(bmp + GIC_INTERNAL); + offset += (GIC_INTERNAL * 8) / 8; + for_each_dist_irq_reg(irq, s->num_irq, 8) { + reg = *field; + kvm_gicd_access(s, offset, ®, true); + offset += 4; + field++; + } +} + +static void kvm_dist_get_edge_trigger(GICv3State *s, uint32_t offset, + uint32_t *bmp) +{ + uint32_t reg; + int irq; + + /* For the KVM GICv3, affinity routing is always enabled, and the first 2 + * GICD_ICFGR registers are always RAZ/WI. The corresponding + * functionality is replaced by GICR_ICFGR. It doesn't need to sync + * them. So it should increase the offset to skip GIC_INTERNAL irqs. + * This matches the for_each_dist_irq_reg() macro which also skips the + * first GIC_INTERNAL irqs. + */ + offset += (GIC_INTERNAL * 2) / 8; + for_each_dist_irq_reg(irq, s->num_irq, 2) { + kvm_gicd_access(s, offset, ®, false); + reg = half_unshuffle32(reg >> 1); + if (irq % 32 != 0) { + reg = (reg << 16); + } + *gic_bmp_ptr32(bmp, irq) |= reg; + offset += 4; + } +} + +static void kvm_dist_put_edge_trigger(GICv3State *s, uint32_t offset, + uint32_t *bmp) +{ + uint32_t reg; + int irq; + + /* For the KVM GICv3, affinity routing is always enabled, and the first 2 + * GICD_ICFGR registers are always RAZ/WI. The corresponding + * functionality is replaced by GICR_ICFGR. It doesn't need to sync + * them. So it should increase the offset to skip GIC_INTERNAL irqs. + * This matches the for_each_dist_irq_reg() macro which also skips the + * first GIC_INTERNAL irqs. + */ + offset += (GIC_INTERNAL * 2) / 8; + for_each_dist_irq_reg(irq, s->num_irq, 2) { + reg = *gic_bmp_ptr32(bmp, irq); + if (irq % 32 != 0) { + reg = (reg & 0xffff0000) >> 16; + } else { + reg = reg & 0xffff; + } + reg = half_shuffle32(reg) << 1; + kvm_gicd_access(s, offset, ®, true); + offset += 4; + } +} + +static void kvm_gic_get_line_level_bmp(GICv3State *s, uint32_t *bmp) +{ + uint32_t reg; + int irq; + + for_each_dist_irq_reg(irq, s->num_irq, 1) { + kvm_gic_line_level_access(s, irq, 0, ®, false); + *gic_bmp_ptr32(bmp, irq) = reg; + } +} + +static void kvm_gic_put_line_level_bmp(GICv3State *s, uint32_t *bmp) +{ + uint32_t reg; + int irq; + + for_each_dist_irq_reg(irq, s->num_irq, 1) { + reg = *gic_bmp_ptr32(bmp, irq); + kvm_gic_line_level_access(s, irq, 0, ®, true); + } +} + +/* Read a bitmap register group from the kernel VGIC. */ +static void kvm_dist_getbmp(GICv3State *s, uint32_t offset, uint32_t *bmp) +{ + uint32_t reg; + int irq; + + /* For the KVM GICv3, affinity routing is always enabled, and the + * GICD_IGROUPR0/GICD_IGRPMODR0/GICD_ISENABLER0/GICD_ISPENDR0/ + * GICD_ISACTIVER0 registers are always RAZ/WI. The corresponding + * functionality is replaced by the GICR registers. It doesn't need to sync + * them. So it should increase the offset to skip GIC_INTERNAL irqs. + * This matches the for_each_dist_irq_reg() macro which also skips the + * first GIC_INTERNAL irqs. + */ + offset += (GIC_INTERNAL * 1) / 8; + for_each_dist_irq_reg(irq, s->num_irq, 1) { + kvm_gicd_access(s, offset, ®, false); + *gic_bmp_ptr32(bmp, irq) = reg; + offset += 4; + } +} + +static void kvm_dist_putbmp(GICv3State *s, uint32_t offset, + uint32_t clroffset, uint32_t *bmp) +{ + uint32_t reg; + int irq; + + /* For the KVM GICv3, affinity routing is always enabled, and the + * GICD_IGROUPR0/GICD_IGRPMODR0/GICD_ISENABLER0/GICD_ISPENDR0/ + * GICD_ISACTIVER0 registers are always RAZ/WI. The corresponding + * functionality is replaced by the GICR registers. It doesn't need to sync + * them. So it should increase the offset and clroffset to skip GIC_INTERNAL + * irqs. This matches the for_each_dist_irq_reg() macro which also skips the + * first GIC_INTERNAL irqs. + */ + offset += (GIC_INTERNAL * 1) / 8; + if (clroffset != 0) { + clroffset += (GIC_INTERNAL * 1) / 8; + } + + for_each_dist_irq_reg(irq, s->num_irq, 1) { + /* If this bitmap is a set/clear register pair, first write to the + * clear-reg to clear all bits before using the set-reg to write + * the 1 bits. + */ + if (clroffset != 0) { + reg = 0; + kvm_gicd_access(s, clroffset, ®, true); + clroffset += 4; + } + reg = *gic_bmp_ptr32(bmp, irq); + kvm_gicd_access(s, offset, ®, true); + offset += 4; + } +} + +static void kvm_arm_gicv3_check(GICv3State *s) +{ + uint32_t reg; + uint32_t num_irq; + + /* Sanity checking s->num_irq */ + kvm_gicd_access(s, GICD_TYPER, ®, false); + num_irq = ((reg & 0x1f) + 1) * 32; + + if (num_irq < s->num_irq) { + error_report("Model requests %u IRQs, but kernel supports max %u", + s->num_irq, num_irq); + abort(); + } +} + +static void kvm_arm_gicv3_put(GICv3State *s) +{ + uint32_t regl, regh, reg; + uint64_t reg64, redist_typer; + int ncpu, i; + + kvm_arm_gicv3_check(s); + + kvm_gicr_access(s, GICR_TYPER, 0, ®l, false); + kvm_gicr_access(s, GICR_TYPER + 4, 0, ®h, false); + redist_typer = ((uint64_t)regh << 32) | regl; + + reg = s->gicd_ctlr; + kvm_gicd_access(s, GICD_CTLR, ®, true); + + if (redist_typer & GICR_TYPER_PLPIS) { + /* + * Restore base addresses before LPIs are potentially enabled by + * GICR_CTLR write + */ + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + + reg64 = c->gicr_propbaser; + regl = (uint32_t)reg64; + kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®l, true); + regh = (uint32_t)(reg64 >> 32); + kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, ®h, true); + + reg64 = c->gicr_pendbaser; + regl = (uint32_t)reg64; + kvm_gicr_access(s, GICR_PENDBASER, ncpu, ®l, true); + regh = (uint32_t)(reg64 >> 32); + kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, ®h, true); + } + } + + /* Redistributor state (one per CPU) */ + + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + + reg = c->gicr_ctlr; + kvm_gicr_access(s, GICR_CTLR, ncpu, ®, true); + + reg = c->gicr_statusr[GICV3_NS]; + kvm_gicr_access(s, GICR_STATUSR, ncpu, ®, true); + + reg = c->gicr_waker; + kvm_gicr_access(s, GICR_WAKER, ncpu, ®, true); + + reg = c->gicr_igroupr0; + kvm_gicr_access(s, GICR_IGROUPR0, ncpu, ®, true); + + reg = ~0; + kvm_gicr_access(s, GICR_ICENABLER0, ncpu, ®, true); + reg = c->gicr_ienabler0; + kvm_gicr_access(s, GICR_ISENABLER0, ncpu, ®, true); + + /* Restore config before pending so we treat level/edge correctly */ + reg = half_shuffle32(c->edge_trigger >> 16) << 1; + kvm_gicr_access(s, GICR_ICFGR1, ncpu, ®, true); + + reg = c->level; + kvm_gic_line_level_access(s, 0, ncpu, ®, true); + + reg = ~0; + kvm_gicr_access(s, GICR_ICPENDR0, ncpu, ®, true); + reg = c->gicr_ipendr0; + kvm_gicr_access(s, GICR_ISPENDR0, ncpu, ®, true); + + reg = ~0; + kvm_gicr_access(s, GICR_ICACTIVER0, ncpu, ®, true); + reg = c->gicr_iactiver0; + kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, ®, true); + + for (i = 0; i < GIC_INTERNAL; i += 4) { + reg = c->gicr_ipriorityr[i] | + (c->gicr_ipriorityr[i + 1] << 8) | + (c->gicr_ipriorityr[i + 2] << 16) | + (c->gicr_ipriorityr[i + 3] << 24); + kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, ®, true); + } + } + + /* Distributor state (shared between all CPUs */ + reg = s->gicd_statusr[GICV3_NS]; + kvm_gicd_access(s, GICD_STATUSR, ®, true); + + /* s->enable bitmap -> GICD_ISENABLERn */ + kvm_dist_putbmp(s, GICD_ISENABLER, GICD_ICENABLER, s->enabled); + + /* s->group bitmap -> GICD_IGROUPRn */ + kvm_dist_putbmp(s, GICD_IGROUPR, 0, s->group); + + /* Restore targets before pending to ensure the pending state is set on + * the appropriate CPU interfaces in the kernel + */ + + /* s->gicd_irouter[irq] -> GICD_IROUTERn + * We can't use kvm_dist_put() here because the registers are 64-bit + */ + for (i = GIC_INTERNAL; i < s->num_irq; i++) { + uint32_t offset; + + offset = GICD_IROUTER + (sizeof(uint32_t) * i); + reg = (uint32_t)s->gicd_irouter[i]; + kvm_gicd_access(s, offset, ®, true); + + offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4; + reg = (uint32_t)(s->gicd_irouter[i] >> 32); + kvm_gicd_access(s, offset, ®, true); + } + + /* s->trigger bitmap -> GICD_ICFGRn + * (restore configuration registers before pending IRQs so we treat + * level/edge correctly) + */ + kvm_dist_put_edge_trigger(s, GICD_ICFGR, s->edge_trigger); + + /* s->level bitmap -> line_level */ + kvm_gic_put_line_level_bmp(s, s->level); + + /* s->pending bitmap -> GICD_ISPENDRn */ + kvm_dist_putbmp(s, GICD_ISPENDR, GICD_ICPENDR, s->pending); + + /* s->active bitmap -> GICD_ISACTIVERn */ + kvm_dist_putbmp(s, GICD_ISACTIVER, GICD_ICACTIVER, s->active); + + /* s->gicd_ipriority[] -> GICD_IPRIORITYRn */ + kvm_dist_put_priority(s, GICD_IPRIORITYR, s->gicd_ipriority); + + /* CPU Interface state (one per CPU) */ + + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + int num_pri_bits; + + kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, true); + kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, + &c->icc_ctlr_el1[GICV3_NS], true); + kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, + &c->icc_igrpen[GICV3_G0], true); + kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, + &c->icc_igrpen[GICV3_G1NS], true); + kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, true); + kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], true); + kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], true); + + num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] & + ICC_CTLR_EL1_PRIBITS_MASK) >> + ICC_CTLR_EL1_PRIBITS_SHIFT) + 1; + + switch (num_pri_bits) { + case 7: + reg64 = c->icc_apr[GICV3_G0][3]; + kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, ®64, true); + reg64 = c->icc_apr[GICV3_G0][2]; + kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, ®64, true); + /* fall through */ + case 6: + reg64 = c->icc_apr[GICV3_G0][1]; + kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, ®64, true); + /* fall through */ + default: + reg64 = c->icc_apr[GICV3_G0][0]; + kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, ®64, true); + } + + switch (num_pri_bits) { + case 7: + reg64 = c->icc_apr[GICV3_G1NS][3]; + kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, ®64, true); + reg64 = c->icc_apr[GICV3_G1NS][2]; + kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, ®64, true); + /* fall through */ + case 6: + reg64 = c->icc_apr[GICV3_G1NS][1]; + kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, ®64, true); + /* fall through */ + default: + reg64 = c->icc_apr[GICV3_G1NS][0]; + kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, ®64, true); + } + } +} + +static void kvm_arm_gicv3_get(GICv3State *s) +{ + uint32_t regl, regh, reg; + uint64_t reg64, redist_typer; + int ncpu, i; + + kvm_arm_gicv3_check(s); + + kvm_gicr_access(s, GICR_TYPER, 0, ®l, false); + kvm_gicr_access(s, GICR_TYPER + 4, 0, ®h, false); + redist_typer = ((uint64_t)regh << 32) | regl; + + kvm_gicd_access(s, GICD_CTLR, ®, false); + s->gicd_ctlr = reg; + + /* Redistributor state (one per CPU) */ + + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + + kvm_gicr_access(s, GICR_CTLR, ncpu, ®, false); + c->gicr_ctlr = reg; + + kvm_gicr_access(s, GICR_STATUSR, ncpu, ®, false); + c->gicr_statusr[GICV3_NS] = reg; + + kvm_gicr_access(s, GICR_WAKER, ncpu, ®, false); + c->gicr_waker = reg; + + kvm_gicr_access(s, GICR_IGROUPR0, ncpu, ®, false); + c->gicr_igroupr0 = reg; + kvm_gicr_access(s, GICR_ISENABLER0, ncpu, ®, false); + c->gicr_ienabler0 = reg; + kvm_gicr_access(s, GICR_ICFGR1, ncpu, ®, false); + c->edge_trigger = half_unshuffle32(reg >> 1) << 16; + kvm_gic_line_level_access(s, 0, ncpu, ®, false); + c->level = reg; + kvm_gicr_access(s, GICR_ISPENDR0, ncpu, ®, false); + c->gicr_ipendr0 = reg; + kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, ®, false); + c->gicr_iactiver0 = reg; + + for (i = 0; i < GIC_INTERNAL; i += 4) { + kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, ®, false); + c->gicr_ipriorityr[i] = extract32(reg, 0, 8); + c->gicr_ipriorityr[i + 1] = extract32(reg, 8, 8); + c->gicr_ipriorityr[i + 2] = extract32(reg, 16, 8); + c->gicr_ipriorityr[i + 3] = extract32(reg, 24, 8); + } + } + + if (redist_typer & GICR_TYPER_PLPIS) { + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + + kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®l, false); + kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, ®h, false); + c->gicr_propbaser = ((uint64_t)regh << 32) | regl; + + kvm_gicr_access(s, GICR_PENDBASER, ncpu, ®l, false); + kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, ®h, false); + c->gicr_pendbaser = ((uint64_t)regh << 32) | regl; + } + } + + /* Distributor state (shared between all CPUs */ + + kvm_gicd_access(s, GICD_STATUSR, ®, false); + s->gicd_statusr[GICV3_NS] = reg; + + /* GICD_IGROUPRn -> s->group bitmap */ + kvm_dist_getbmp(s, GICD_IGROUPR, s->group); + + /* GICD_ISENABLERn -> s->enabled bitmap */ + kvm_dist_getbmp(s, GICD_ISENABLER, s->enabled); + + /* Line level of irq */ + kvm_gic_get_line_level_bmp(s, s->level); + /* GICD_ISPENDRn -> s->pending bitmap */ + kvm_dist_getbmp(s, GICD_ISPENDR, s->pending); + + /* GICD_ISACTIVERn -> s->active bitmap */ + kvm_dist_getbmp(s, GICD_ISACTIVER, s->active); + + /* GICD_ICFGRn -> s->trigger bitmap */ + kvm_dist_get_edge_trigger(s, GICD_ICFGR, s->edge_trigger); + + /* GICD_IPRIORITYRn -> s->gicd_ipriority[] */ + kvm_dist_get_priority(s, GICD_IPRIORITYR, s->gicd_ipriority); + + /* GICD_IROUTERn -> s->gicd_irouter[irq] */ + for (i = GIC_INTERNAL; i < s->num_irq; i++) { + uint32_t offset; + + offset = GICD_IROUTER + (sizeof(uint32_t) * i); + kvm_gicd_access(s, offset, ®l, false); + offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4; + kvm_gicd_access(s, offset, ®h, false); + s->gicd_irouter[i] = ((uint64_t)regh << 32) | regl; + } + + /***************************************************************** + * CPU Interface(s) State + */ + + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + int num_pri_bits; + + kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, false); + kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, + &c->icc_ctlr_el1[GICV3_NS], false); + kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, + &c->icc_igrpen[GICV3_G0], false); + kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, + &c->icc_igrpen[GICV3_G1NS], false); + kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, false); + kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], false); + kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], false); + num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] & + ICC_CTLR_EL1_PRIBITS_MASK) >> + ICC_CTLR_EL1_PRIBITS_SHIFT) + 1; + + switch (num_pri_bits) { + case 7: + kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, ®64, false); + c->icc_apr[GICV3_G0][3] = reg64; + kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, ®64, false); + c->icc_apr[GICV3_G0][2] = reg64; + /* fall through */ + case 6: + kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, ®64, false); + c->icc_apr[GICV3_G0][1] = reg64; + /* fall through */ + default: + kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, ®64, false); + c->icc_apr[GICV3_G0][0] = reg64; + } + + switch (num_pri_bits) { + case 7: + kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, ®64, false); + c->icc_apr[GICV3_G1NS][3] = reg64; + kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, ®64, false); + c->icc_apr[GICV3_G1NS][2] = reg64; + /* fall through */ + case 6: + kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, ®64, false); + c->icc_apr[GICV3_G1NS][1] = reg64; + /* fall through */ + default: + kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, ®64, false); + c->icc_apr[GICV3_G1NS][0] = reg64; + } + } +} + +static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3State *s; + GICv3CPUState *c; + + c = (GICv3CPUState *)env->gicv3state; + s = c->gic; + + c->icc_pmr_el1 = 0; + c->icc_bpr[GICV3_G0] = GIC_MIN_BPR; + c->icc_bpr[GICV3_G1] = GIC_MIN_BPR; + c->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR; + + c->icc_sre_el1 = 0x7; + memset(c->icc_apr, 0, sizeof(c->icc_apr)); + memset(c->icc_igrpen, 0, sizeof(c->icc_igrpen)); + + if (s->migration_blocker) { + return; + } + + /* Initialize to actual HW supported configuration */ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS, + KVM_VGIC_ATTR(ICC_CTLR_EL1, c->gicr_typer), + &c->icc_ctlr_el1[GICV3_NS], false, &error_abort); + + c->icc_ctlr_el1[GICV3_S] = c->icc_ctlr_el1[GICV3_NS]; +} + +static void kvm_arm_gicv3_reset(DeviceState *dev) +{ + GICv3State *s = ARM_GICV3_COMMON(dev); + KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s); + + DPRINTF("Reset\n"); + + kgc->parent_reset(dev); + + if (s->migration_blocker) { + DPRINTF("Cannot put kernel gic state, no kernel interface\n"); + return; + } + + kvm_arm_gicv3_put(s); +} + +/* + * CPU interface registers of GIC needs to be reset on CPU reset. + * For the calling arm_gicv3_icc_reset() on CPU reset, we register + * below ARMCPRegInfo. As we reset the whole cpu interface under single + * register reset, we define only one register of CPU interface instead + * of defining all the registers. + */ +static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { + { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4, + /* + * If ARM_CP_NOP is used, resetfn is not called, + * So ARM_CP_NO_RAW is appropriate type. + */ + .type = ARM_CP_NO_RAW, + .access = PL1_RW, + .readfn = arm_cp_read_zero, + .writefn = arm_cp_write_ignore, + /* + * We hang the whole cpu interface reset routine off here + * rather than parcelling it out into one little function + * per register + */ + .resetfn = arm_gicv3_icc_reset, + }, + REGINFO_SENTINEL +}; + +/** + * vm_change_state_handler - VM change state callback aiming at flushing + * RDIST pending tables into guest RAM + * + * The tables get flushed to guest RAM whenever the VM gets stopped. + */ +static void vm_change_state_handler(void *opaque, bool running, + RunState state) +{ + GICv3State *s = (GICv3State *)opaque; + Error *err = NULL; + int ret; + + if (running) { + return; + } + + ret = kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES, + NULL, true, &err); + if (err) { + error_report_err(err); + } + if (ret < 0 && ret != -EFAULT) { + abort(); + } +} + + +static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) +{ + GICv3State *s = KVM_ARM_GICV3(dev); + KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s); + bool multiple_redist_region_allowed; + Error *local_err = NULL; + int i; + + DPRINTF("kvm_arm_gicv3_realize\n"); + + kgc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (s->security_extn) { + error_setg(errp, "the in-kernel VGICv3 does not implement the " + "security extensions"); + return; + } + + gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL); + + for (i = 0; i < s->num_cpu; i++) { + ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); + + define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); + } + + /* Try to create the device via the device control API */ + s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V3, false); + if (s->dev_fd < 0) { + error_setg_errno(errp, -s->dev_fd, "error creating in-kernel VGIC"); + return; + } + + multiple_redist_region_allowed = + kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION); + + if (!multiple_redist_region_allowed && s->nb_redist_regions > 1) { + error_setg(errp, "Multiple VGICv3 redistributor regions are not " + "supported by this host kernel"); + error_append_hint(errp, "A maximum of %d VCPUs can be used", + s->redist_region_count[0]); + return; + } + + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, + 0, &s->num_irq, true, &error_abort); + + /* Tell the kernel to complete VGIC initialization now */ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, &error_abort); + + kvm_arm_register_device(&s->iomem_dist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd, 0); + + if (!multiple_redist_region_allowed) { + kvm_arm_register_device(&s->redist_regions[0].iomem, -1, + KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd, 0); + } else { + /* we register regions in reverse order as "devices" are inserted at + * the head of a QSLIST and the list is then popped from the head + * onwards by kvm_arm_machine_init_done() + */ + for (i = s->nb_redist_regions - 1; i >= 0; i--) { + /* Address mask made of the rdist region index and count */ + uint64_t addr_ormask = + i | ((uint64_t)s->redist_region_count[i] << 52); + + kvm_arm_register_device(&s->redist_regions[i].iomem, -1, + KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, + s->dev_fd, addr_ormask); + } + } + + if (kvm_has_gsi_routing()) { + /* set up irq routing */ + for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) { + kvm_irqchip_add_irq_route(kvm_state, i, 0, i); + } + + kvm_gsi_routing_allowed = true; + + kvm_irqchip_commit_routes(kvm_state); + } + + if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, + GICD_CTLR)) { + error_setg(&s->migration_blocker, "This operating system kernel does " + "not support vGICv3 migration"); + if (migrate_add_blocker(s->migration_blocker, errp) < 0) { + error_free(s->migration_blocker); + return; + } + } + if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES)) { + qemu_add_vm_change_state_handler(vm_change_state_handler, s); + } +} + +static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); + KVMARMGICv3Class *kgc = KVM_ARM_GICV3_CLASS(klass); + + agcc->pre_save = kvm_arm_gicv3_get; + agcc->post_load = kvm_arm_gicv3_put; + device_class_set_parent_realize(dc, kvm_arm_gicv3_realize, + &kgc->parent_realize); + device_class_set_parent_reset(dc, kvm_arm_gicv3_reset, &kgc->parent_reset); +} + +static const TypeInfo kvm_arm_gicv3_info = { + .name = TYPE_KVM_ARM_GICV3, + .parent = TYPE_ARM_GICV3_COMMON, + .instance_size = sizeof(GICv3State), + .class_init = kvm_arm_gicv3_class_init, + .class_size = sizeof(KVMARMGICv3Class), +}; + +static void kvm_arm_gicv3_register_types(void) +{ + type_register_static(&kvm_arm_gicv3_info); +} + +type_init(kvm_arm_gicv3_register_types) diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c new file mode 100644 index 000000000..c8ff3eca0 --- /dev/null +++ b/hw/intc/arm_gicv3_redist.c @@ -0,0 +1,738 @@ +/* + * ARM GICv3 emulation: Redistributor + * + * Copyright (c) 2015 Huawei. + * Copyright (c) 2016 Linaro Limited. + * Written by Shlomo Pongratz, Peter Maydell + * + * This code is licensed under the GPL, version 2 or (at your option) + * any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "gicv3_internal.h" + +static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs) +{ + /* Return a 32-bit mask which should be applied for this set of 32 + * interrupts; each bit is 1 if access is permitted by the + * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does + * not affect config register accesses, unlike GICD_NSACR.) + */ + if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { + /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */ + return cs->gicr_igroupr0; + } + return 0xFFFFFFFFU; +} + +static int gicr_ns_access(GICv3CPUState *cs, int irq) +{ + /* Return the 2 bit NSACR.NS_access field for this SGI */ + assert(irq < 16); + return extract32(cs->gicr_nsacr, irq * 2, 2); +} + +static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs, + uint32_t *reg, uint32_t val) +{ + /* Helper routine to implement writing to a "set-bitmap" register */ + val &= mask_group(cs, attrs); + *reg |= val; + gicv3_redist_update(cs); +} + +static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs, + uint32_t *reg, uint32_t val) +{ + /* Helper routine to implement writing to a "clear-bitmap" register */ + val &= mask_group(cs, attrs); + *reg &= ~val; + gicv3_redist_update(cs); +} + +static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs, + uint32_t reg) +{ + reg &= mask_group(cs, attrs); + return reg; +} + +static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, + int irq) +{ + /* Read the value of GICR_IPRIORITYR for the specified interrupt, + * honouring security state (these are RAZ/WI for Group 0 or Secure + * Group 1 interrupts). + */ + uint32_t prio; + + prio = cs->gicr_ipriorityr[irq]; + + if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { + if (!(cs->gicr_igroupr0 & (1U << irq))) { + /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */ + return 0; + } + /* NS view of the interrupt priority */ + prio = (prio << 1) & 0xff; + } + return prio; +} + +static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq, + uint8_t value) +{ + /* Write the value of GICD_IPRIORITYR for the specified interrupt, + * honouring security state (these are RAZ/WI for Group 0 or Secure + * Group 1 interrupts). + */ + if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { + if (!(cs->gicr_igroupr0 & (1U << irq))) { + /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */ + return; + } + /* NS view of the interrupt priority */ + value = 0x80 | (value >> 1); + } + cs->gicr_ipriorityr[irq] = value; +} + +static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + switch (offset) { + case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f: + *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR); + return MEMTX_OK; + default: + return MEMTX_ERROR; + } +} + +static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + switch (offset) { + case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f: + gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value); + gicv3_redist_update(cs); + return MEMTX_OK; + default: + return MEMTX_ERROR; + } +} + +static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + switch (offset) { + case GICR_CTLR: + *data = cs->gicr_ctlr; + return MEMTX_OK; + case GICR_IIDR: + *data = gicv3_iidr(); + return MEMTX_OK; + case GICR_TYPER: + *data = extract64(cs->gicr_typer, 0, 32); + return MEMTX_OK; + case GICR_TYPER + 4: + *data = extract64(cs->gicr_typer, 32, 32); + return MEMTX_OK; + case GICR_STATUSR: + /* RAZ/WI for us (this is an optional register and our implementation + * does not track RO/WO/reserved violations to report them to the guest) + */ + *data = 0; + return MEMTX_OK; + case GICR_WAKER: + *data = cs->gicr_waker; + return MEMTX_OK; + case GICR_PROPBASER: + *data = extract64(cs->gicr_propbaser, 0, 32); + return MEMTX_OK; + case GICR_PROPBASER + 4: + *data = extract64(cs->gicr_propbaser, 32, 32); + return MEMTX_OK; + case GICR_PENDBASER: + *data = extract64(cs->gicr_pendbaser, 0, 32); + return MEMTX_OK; + case GICR_PENDBASER + 4: + *data = extract64(cs->gicr_pendbaser, 32, 32); + return MEMTX_OK; + case GICR_IGROUPR0: + if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { + *data = 0; + return MEMTX_OK; + } + *data = cs->gicr_igroupr0; + return MEMTX_OK; + case GICR_ISENABLER0: + case GICR_ICENABLER0: + *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0); + return MEMTX_OK; + case GICR_ISPENDR0: + case GICR_ICPENDR0: + { + /* The pending register reads as the logical OR of the pending + * latch and the input line level for level-triggered interrupts. + */ + uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level); + *data = gicr_read_bitmap_reg(cs, attrs, val); + return MEMTX_OK; + } + case GICR_ISACTIVER0: + case GICR_ICACTIVER0: + *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0); + return MEMTX_OK; + case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f: + { + int i, irq = offset - GICR_IPRIORITYR; + uint32_t value = 0; + + for (i = irq + 3; i >= irq; i--) { + value <<= 8; + value |= gicr_read_ipriorityr(cs, attrs, i); + } + *data = value; + return MEMTX_OK; + } + case GICR_ICFGR0: + case GICR_ICFGR1: + { + /* Our edge_trigger bitmap is one bit per irq; take the correct + * half of it, and spread it out into the odd bits. + */ + uint32_t value; + + value = cs->edge_trigger & mask_group(cs, attrs); + value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16); + value = half_shuffle32(value) << 1; + *data = value; + return MEMTX_OK; + } + case GICR_IGRPMODR0: + if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { + /* RAZ/WI if security disabled, or if + * security enabled and this is an NS access + */ + *data = 0; + return MEMTX_OK; + } + *data = cs->gicr_igrpmodr0; + return MEMTX_OK; + case GICR_NSACR: + if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { + /* RAZ/WI if security disabled, or if + * security enabled and this is an NS access + */ + *data = 0; + return MEMTX_OK; + } + *data = cs->gicr_nsacr; + return MEMTX_OK; + case GICR_IDREGS ... GICR_IDREGS + 0x2f: + *data = gicv3_idreg(offset - GICR_IDREGS); + return MEMTX_OK; + default: + return MEMTX_ERROR; + } +} + +static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + switch (offset) { + case GICR_CTLR: + /* For our implementation, GICR_TYPER.DPGS is 0 and so all + * the DPG bits are RAZ/WI. We don't do anything asynchronously, + * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we + * implement LPIs) so Enable_LPIs is programmable. + */ + if (cs->gicr_typer & GICR_TYPER_PLPIS) { + if (value & GICR_CTLR_ENABLE_LPIS) { + cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS; + /* Check for any pending interr in pending table */ + gicv3_redist_update_lpi(cs); + } else { + cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS; + /* cs->hppi might have been an LPI; recalculate */ + gicv3_redist_update(cs); + } + } + return MEMTX_OK; + case GICR_STATUSR: + /* RAZ/WI for our implementation */ + return MEMTX_OK; + case GICR_WAKER: + /* Only the ProcessorSleep bit is writeable. When the guest sets + * it it requests that we transition the channel between the + * redistributor and the cpu interface to quiescent, and that + * we set the ChildrenAsleep bit once the inteface has reached the + * quiescent state. + * Setting the ProcessorSleep to 0 reverses the quiescing, and + * ChildrenAsleep is cleared once the transition is complete. + * Since our interface is not asynchronous, we complete these + * transitions instantaneously, so we set ChildrenAsleep to the + * same value as ProcessorSleep here. + */ + value &= GICR_WAKER_ProcessorSleep; + if (value & GICR_WAKER_ProcessorSleep) { + value |= GICR_WAKER_ChildrenAsleep; + } + cs->gicr_waker = value; + return MEMTX_OK; + case GICR_PROPBASER: + cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value); + return MEMTX_OK; + case GICR_PROPBASER + 4: + cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value); + return MEMTX_OK; + case GICR_PENDBASER: + cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value); + return MEMTX_OK; + case GICR_PENDBASER + 4: + cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value); + return MEMTX_OK; + case GICR_IGROUPR0: + if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { + return MEMTX_OK; + } + cs->gicr_igroupr0 = value; + gicv3_redist_update(cs); + return MEMTX_OK; + case GICR_ISENABLER0: + gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value); + return MEMTX_OK; + case GICR_ICENABLER0: + gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value); + return MEMTX_OK; + case GICR_ISPENDR0: + gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value); + return MEMTX_OK; + case GICR_ICPENDR0: + gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value); + return MEMTX_OK; + case GICR_ISACTIVER0: + gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value); + return MEMTX_OK; + case GICR_ICACTIVER0: + gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value); + return MEMTX_OK; + case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f: + { + int i, irq = offset - GICR_IPRIORITYR; + + for (i = irq; i < irq + 4; i++, value >>= 8) { + gicr_write_ipriorityr(cs, attrs, i, value); + } + gicv3_redist_update(cs); + return MEMTX_OK; + } + case GICR_ICFGR0: + /* Register is all RAZ/WI or RAO/WI bits */ + return MEMTX_OK; + case GICR_ICFGR1: + { + uint32_t mask; + + /* Since our edge_trigger bitmap is one bit per irq, our input + * 32-bits will compress down into 16 bits which we need + * to write into the bitmap. + */ + value = half_unshuffle32(value >> 1) << 16; + mask = mask_group(cs, attrs) & 0xffff0000U; + + cs->edge_trigger &= ~mask; + cs->edge_trigger |= (value & mask); + + gicv3_redist_update(cs); + return MEMTX_OK; + } + case GICR_IGRPMODR0: + if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { + /* RAZ/WI if security disabled, or if + * security enabled and this is an NS access + */ + return MEMTX_OK; + } + cs->gicr_igrpmodr0 = value; + gicv3_redist_update(cs); + return MEMTX_OK; + case GICR_NSACR: + if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) { + /* RAZ/WI if security disabled, or if + * security enabled and this is an NS access + */ + return MEMTX_OK; + } + cs->gicr_nsacr = value; + /* no update required as this only affects access permission checks */ + return MEMTX_OK; + case GICR_IIDR: + case GICR_TYPER: + case GICR_IDREGS ... GICR_IDREGS + 0x2f: + /* RO registers, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + return MEMTX_OK; + default: + return MEMTX_ERROR; + } +} + +static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset, + uint64_t *data, MemTxAttrs attrs) +{ + switch (offset) { + case GICR_TYPER: + *data = cs->gicr_typer; + return MEMTX_OK; + case GICR_PROPBASER: + *data = cs->gicr_propbaser; + return MEMTX_OK; + case GICR_PENDBASER: + *data = cs->gicr_pendbaser; + return MEMTX_OK; + default: + return MEMTX_ERROR; + } +} + +static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset, + uint64_t value, MemTxAttrs attrs) +{ + switch (offset) { + case GICR_PROPBASER: + cs->gicr_propbaser = value; + return MEMTX_OK; + case GICR_PENDBASER: + cs->gicr_pendbaser = value; + return MEMTX_OK; + case GICR_TYPER: + /* RO register, ignore the write */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write to RO register at offset " + TARGET_FMT_plx "\n", __func__, offset); + return MEMTX_OK; + default: + return MEMTX_ERROR; + } +} + +MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + GICv3RedistRegion *region = opaque; + GICv3State *s = region->gic; + GICv3CPUState *cs; + MemTxResult r; + int cpuidx; + + assert((offset & (size - 1)) == 0); + + /* + * There are (for GICv3) two 64K redistributor pages per CPU. + * In some cases the redistributor pages for all CPUs are not + * contiguous (eg on the virt board they are split into two + * parts if there are too many CPUs to all fit in the same place + * in the memory map); if so then the GIC has multiple MemoryRegions + * for the redistributors. + */ + cpuidx = region->cpuidx + offset / GICV3_REDIST_SIZE; + offset %= GICV3_REDIST_SIZE; + + cs = &s->cpu[cpuidx]; + + switch (size) { + case 1: + r = gicr_readb(cs, offset, data, attrs); + break; + case 4: + r = gicr_readl(cs, offset, data, attrs); + break; + case 8: + r = gicr_readll(cs, offset, data, attrs); + break; + default: + r = MEMTX_ERROR; + break; + } + + if (r == MEMTX_ERROR) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest read at offset " TARGET_FMT_plx + " size %u\n", __func__, offset, size); + trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset, + size, attrs.secure); + /* The spec requires that reserved registers are RAZ/WI; + * so use MEMTX_ERROR returns from leaf functions as a way to + * trigger the guest-error logging but don't return it to + * the caller, or we'll cause a spurious guest data abort. + */ + r = MEMTX_OK; + *data = 0; + } else { + trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data, + size, attrs.secure); + } + return r; +} + +MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size, MemTxAttrs attrs) +{ + GICv3RedistRegion *region = opaque; + GICv3State *s = region->gic; + GICv3CPUState *cs; + MemTxResult r; + int cpuidx; + + assert((offset & (size - 1)) == 0); + + /* + * There are (for GICv3) two 64K redistributor pages per CPU. + * In some cases the redistributor pages for all CPUs are not + * contiguous (eg on the virt board they are split into two + * parts if there are too many CPUs to all fit in the same place + * in the memory map); if so then the GIC has multiple MemoryRegions + * for the redistributors. + */ + cpuidx = region->cpuidx + offset / GICV3_REDIST_SIZE; + offset %= GICV3_REDIST_SIZE; + + cs = &s->cpu[cpuidx]; + + switch (size) { + case 1: + r = gicr_writeb(cs, offset, data, attrs); + break; + case 4: + r = gicr_writel(cs, offset, data, attrs); + break; + case 8: + r = gicr_writell(cs, offset, data, attrs); + break; + default: + r = MEMTX_ERROR; + break; + } + + if (r == MEMTX_ERROR) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid guest write at offset " TARGET_FMT_plx + " size %u\n", __func__, offset, size); + trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data, + size, attrs.secure); + /* The spec requires that reserved registers are RAZ/WI; + * so use MEMTX_ERROR returns from leaf functions as a way to + * trigger the guest-error logging but don't return it to + * the caller, or we'll cause a spurious guest data abort. + */ + r = MEMTX_OK; + } else { + trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data, + size, attrs.secure); + } + return r; +} + +static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq) +{ + AddressSpace *as = &cs->gic->dma_as; + uint64_t lpict_baddr; + uint8_t lpite; + uint8_t prio; + + lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK; + + address_space_read(as, lpict_baddr + ((irq - GICV3_LPI_INTID_START) * + sizeof(lpite)), MEMTXATTRS_UNSPECIFIED, &lpite, + sizeof(lpite)); + + if (!(lpite & LPI_CTE_ENABLED)) { + return; + } + + if (cs->gic->gicd_ctlr & GICD_CTLR_DS) { + prio = lpite & LPI_PRIORITY_MASK; + } else { + prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80; + } + + if ((prio < cs->hpplpi.prio) || + ((prio == cs->hpplpi.prio) && (irq <= cs->hpplpi.irq))) { + cs->hpplpi.irq = irq; + cs->hpplpi.prio = prio; + /* LPIs are always non-secure Grp1 interrupts */ + cs->hpplpi.grp = GICV3_G1NS; + } +} + +void gicv3_redist_update_lpi_only(GICv3CPUState *cs) +{ + /* + * This function scans the LPI pending table and for each pending + * LPI, reads the corresponding entry from LPI configuration table + * to extract the priority info and determine if the current LPI + * priority is lower than the last computed high priority lpi interrupt. + * If yes, replace current LPI as the new high priority lpi interrupt. + */ + AddressSpace *as = &cs->gic->dma_as; + uint64_t lpipt_baddr; + uint32_t pendt_size = 0; + uint8_t pend; + int i, bit; + uint64_t idbits; + + idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS), + GICD_TYPER_IDBITS); + + if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !cs->gicr_propbaser || + !cs->gicr_pendbaser) { + return; + } + + cs->hpplpi.prio = 0xff; + + lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; + + /* Determine the highest priority pending interrupt among LPIs */ + pendt_size = (1ULL << (idbits + 1)); + + for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) { + address_space_read(as, lpipt_baddr + i, MEMTXATTRS_UNSPECIFIED, &pend, + sizeof(pend)); + + while (pend) { + bit = ctz32(pend); + gicv3_redist_check_lpi_priority(cs, i * 8 + bit); + pend &= ~(1 << bit); + } + } +} + +void gicv3_redist_update_lpi(GICv3CPUState *cs) +{ + gicv3_redist_update_lpi_only(cs); + gicv3_redist_update(cs); +} + +void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level) +{ + /* + * This function updates the pending bit in lpi pending table for + * the irq being activated or deactivated. + */ + AddressSpace *as = &cs->gic->dma_as; + uint64_t lpipt_baddr; + bool ispend = false; + uint8_t pend; + + /* + * get the bit value corresponding to this irq in the + * lpi pending table + */ + lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; + + address_space_read(as, lpipt_baddr + ((irq / 8) * sizeof(pend)), + MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend)); + + ispend = extract32(pend, irq % 8, 1); + + /* no change in the value of pending bit, return */ + if (ispend == level) { + return; + } + pend = deposit32(pend, irq % 8, 1, level ? 1 : 0); + + address_space_write(as, lpipt_baddr + ((irq / 8) * sizeof(pend)), + MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend)); + + /* + * check if this LPI is better than the current hpplpi, if yes + * just set hpplpi.prio and .irq without doing a full rescan + */ + if (level) { + gicv3_redist_check_lpi_priority(cs, irq); + gicv3_redist_update(cs); + } else { + if (irq == cs->hpplpi.irq) { + gicv3_redist_update_lpi(cs); + } + } +} + +void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level) +{ + uint64_t idbits; + + idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS), + GICD_TYPER_IDBITS); + + if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !cs->gicr_propbaser || + !cs->gicr_pendbaser || (irq > (1ULL << (idbits + 1)) - 1) || + irq < GICV3_LPI_INTID_START) { + return; + } + + /* set/clear the pending bit for this irq */ + gicv3_redist_lpi_pending(cs, irq, level); +} + +void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level) +{ + /* Update redistributor state for a change in an external PPI input line */ + if (level == extract32(cs->level, irq, 1)) { + return; + } + + trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level); + + cs->level = deposit32(cs->level, irq, 1, level); + + if (level) { + /* 0->1 edges latch the pending bit for edge-triggered interrupts */ + if (extract32(cs->edge_trigger, irq, 1)) { + cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1); + } + } + + gicv3_redist_update(cs); +} + +void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns) +{ + /* Update redistributor state for a generated SGI */ + int irqgrp = gicv3_irq_group(cs->gic, cs, irq); + + /* If we are asked for a Secure Group 1 SGI and it's actually + * configured as Secure Group 0 this is OK (subject to the usual + * NSACR checks). + */ + if (grp == GICV3_G1 && irqgrp == GICV3_G0) { + grp = GICV3_G0; + } + + if (grp != irqgrp) { + return; + } + + if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) { + /* If security is enabled we must test the NSACR bits */ + int nsaccess = gicr_ns_access(cs, irq); + + if ((irqgrp == GICV3_G0 && nsaccess < 1) || + (irqgrp == GICV3_G1 && nsaccess < 2)) { + return; + } + } + + /* OK, we can accept the SGI */ + trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq); + cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1); + gicv3_redist_update(cs); +} diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c new file mode 100644 index 000000000..13df002ce --- /dev/null +++ b/hw/intc/armv7m_nvic.c @@ -0,0 +1,2735 @@ +/* + * ARM Nested Vectored Interrupt Controller + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + * + * The ARMv7M System controller is fairly tightly tied in with the + * NVIC. Much of that is also implemented here. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/timer.h" +#include "hw/intc/armv7m_nvic.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "sysemu/runstate.h" +#include "target/arm/cpu.h" +#include "exec/exec-all.h" +#include "exec/memop.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + +/* IRQ number counting: + * + * the num-irq property counts the number of external IRQ lines + * + * NVICState::num_irq counts the total number of exceptions + * (external IRQs, the 15 internal exceptions including reset, + * and one for the unused exception number 0). + * + * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines. + * + * NVIC_MAX_VECTORS is the highest permitted number of exceptions. + * + * Iterating through all exceptions should typically be done with + * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0. + * + * The external qemu_irq lines are the NVIC's external IRQ lines, + * so line 0 is exception 16. + * + * In the terminology of the architecture manual, "interrupts" are + * a subcategory of exception referring to the external interrupts + * (which are exception numbers NVIC_FIRST_IRQ and upward). + * For historical reasons QEMU tends to use "interrupt" and + * "exception" more or less interchangeably. + */ +#define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS +#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ) + +/* Effective running priority of the CPU when no exception is active + * (higher than the highest possible priority value) + */ +#define NVIC_NOEXC_PRIO 0x100 +/* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */ +#define NVIC_NS_PRIO_LIMIT 0x80 + +static const uint8_t nvic_id[] = { + 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 +}; + +static void signal_sysresetreq(NVICState *s) +{ + if (qemu_irq_is_connected(s->sysresetreq)) { + qemu_irq_pulse(s->sysresetreq); + } else { + /* + * Default behaviour if the SoC doesn't need to wire up + * SYSRESETREQ (eg to a system reset controller of some kind): + * perform a system reset via the usual QEMU API. + */ + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } +} + +static int nvic_pending_prio(NVICState *s) +{ + /* return the group priority of the current pending interrupt, + * or NVIC_NOEXC_PRIO if no interrupt is pending + */ + return s->vectpending_prio; +} + +/* Return the value of the ISCR RETTOBASE bit: + * 1 if there is exactly one active exception + * 0 if there is more than one active exception + * UNKNOWN if there are no active exceptions (we choose 1, + * which matches the choice Cortex-M3 is documented as making). + * + * NB: some versions of the documentation talk about this + * counting "active exceptions other than the one shown by IPSR"; + * this is only different in the obscure corner case where guest + * code has manually deactivated an exception and is about + * to fail an exception-return integrity check. The definition + * above is the one from the v8M ARM ARM and is also in line + * with the behaviour documented for the Cortex-M3. + */ +static bool nvic_rettobase(NVICState *s) +{ + int irq, nhand = 0; + bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY); + + for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) { + if (s->vectors[irq].active || + (check_sec && irq < NVIC_INTERNAL_VECTORS && + s->sec_vectors[irq].active)) { + nhand++; + if (nhand == 2) { + return 0; + } + } + } + + return 1; +} + +/* Return the value of the ISCR ISRPENDING bit: + * 1 if an external interrupt is pending + * 0 if no external interrupt is pending + */ +static bool nvic_isrpending(NVICState *s) +{ + int irq; + + /* + * We can shortcut if the highest priority pending interrupt + * happens to be external; if not we need to check the whole + * vectors[] array. + */ + if (s->vectpending > NVIC_FIRST_IRQ) { + return true; + } + + for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) { + if (s->vectors[irq].pending) { + return true; + } + } + return false; +} + +static bool exc_is_banked(int exc) +{ + /* Return true if this is one of the limited set of exceptions which + * are banked (and thus have state in sec_vectors[]) + */ + return exc == ARMV7M_EXCP_HARD || + exc == ARMV7M_EXCP_MEM || + exc == ARMV7M_EXCP_USAGE || + exc == ARMV7M_EXCP_SVC || + exc == ARMV7M_EXCP_PENDSV || + exc == ARMV7M_EXCP_SYSTICK; +} + +/* Return a mask word which clears the subpriority bits from + * a priority value for an M-profile exception, leaving only + * the group priority. + */ +static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure) +{ + return ~0U << (s->prigroup[secure] + 1); +} + +static bool exc_targets_secure(NVICState *s, int exc) +{ + /* Return true if this non-banked exception targets Secure state. */ + if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { + return false; + } + + if (exc >= NVIC_FIRST_IRQ) { + return !s->itns[exc]; + } + + /* Function shouldn't be called for banked exceptions. */ + assert(!exc_is_banked(exc)); + + switch (exc) { + case ARMV7M_EXCP_NMI: + case ARMV7M_EXCP_BUS: + return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); + case ARMV7M_EXCP_SECURE: + return true; + case ARMV7M_EXCP_DEBUG: + /* TODO: controlled by DEMCR.SDME, which we don't yet implement */ + return false; + default: + /* reset, and reserved (unused) low exception numbers. + * We'll get called by code that loops through all the exception + * numbers, but it doesn't matter what we return here as these + * non-existent exceptions will never be pended or active. + */ + return true; + } +} + +static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure) +{ + /* Return the group priority for this exception, given its raw + * (group-and-subgroup) priority value and whether it is targeting + * secure state or not. + */ + if (rawprio < 0) { + return rawprio; + } + rawprio &= nvic_gprio_mask(s, targets_secure); + /* AIRCR.PRIS causes us to squash all NS priorities into the + * lower half of the total range + */ + if (!targets_secure && + (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) { + rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT; + } + return rawprio; +} + +/* Recompute vectpending and exception_prio for a CPU which implements + * the Security extension + */ +static void nvic_recompute_state_secure(NVICState *s) +{ + int i, bank; + int pend_prio = NVIC_NOEXC_PRIO; + int active_prio = NVIC_NOEXC_PRIO; + int pend_irq = 0; + bool pending_is_s_banked = false; + int pend_subprio = 0; + + /* R_CQRV: precedence is by: + * - lowest group priority; if both the same then + * - lowest subpriority; if both the same then + * - lowest exception number; if both the same (ie banked) then + * - secure exception takes precedence + * Compare pseudocode RawExecutionPriority. + * Annoyingly, now we have two prigroup values (for S and NS) + * we can't do the loop comparison on raw priority values. + */ + for (i = 1; i < s->num_irq; i++) { + for (bank = M_REG_S; bank >= M_REG_NS; bank--) { + VecInfo *vec; + int prio, subprio; + bool targets_secure; + + if (bank == M_REG_S) { + if (!exc_is_banked(i)) { + continue; + } + vec = &s->sec_vectors[i]; + targets_secure = true; + } else { + vec = &s->vectors[i]; + targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i); + } + + prio = exc_group_prio(s, vec->prio, targets_secure); + subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure); + if (vec->enabled && vec->pending && + ((prio < pend_prio) || + (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) { + pend_prio = prio; + pend_subprio = subprio; + pend_irq = i; + pending_is_s_banked = (bank == M_REG_S); + } + if (vec->active && prio < active_prio) { + active_prio = prio; + } + } + } + + s->vectpending_is_s_banked = pending_is_s_banked; + s->vectpending = pend_irq; + s->vectpending_prio = pend_prio; + s->exception_prio = active_prio; + + trace_nvic_recompute_state_secure(s->vectpending, + s->vectpending_is_s_banked, + s->vectpending_prio, + s->exception_prio); +} + +/* Recompute vectpending and exception_prio */ +static void nvic_recompute_state(NVICState *s) +{ + int i; + int pend_prio = NVIC_NOEXC_PRIO; + int active_prio = NVIC_NOEXC_PRIO; + int pend_irq = 0; + + /* In theory we could write one function that handled both + * the "security extension present" and "not present"; however + * the security related changes significantly complicate the + * recomputation just by themselves and mixing both cases together + * would be even worse, so we retain a separate non-secure-only + * version for CPUs which don't implement the security extension. + */ + if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { + nvic_recompute_state_secure(s); + return; + } + + for (i = 1; i < s->num_irq; i++) { + VecInfo *vec = &s->vectors[i]; + + if (vec->enabled && vec->pending && vec->prio < pend_prio) { + pend_prio = vec->prio; + pend_irq = i; + } + if (vec->active && vec->prio < active_prio) { + active_prio = vec->prio; + } + } + + if (active_prio > 0) { + active_prio &= nvic_gprio_mask(s, false); + } + + if (pend_prio > 0) { + pend_prio &= nvic_gprio_mask(s, false); + } + + s->vectpending = pend_irq; + s->vectpending_prio = pend_prio; + s->exception_prio = active_prio; + + trace_nvic_recompute_state(s->vectpending, + s->vectpending_prio, + s->exception_prio); +} + +/* Return the current execution priority of the CPU + * (equivalent to the pseudocode ExecutionPriority function). + * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO. + */ +static inline int nvic_exec_prio(NVICState *s) +{ + CPUARMState *env = &s->cpu->env; + int running = NVIC_NOEXC_PRIO; + + if (env->v7m.basepri[M_REG_NS] > 0) { + running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS); + } + + if (env->v7m.basepri[M_REG_S] > 0) { + int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S); + if (running > basepri) { + running = basepri; + } + } + + if (env->v7m.primask[M_REG_NS]) { + if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) { + if (running > NVIC_NS_PRIO_LIMIT) { + running = NVIC_NS_PRIO_LIMIT; + } + } else { + running = 0; + } + } + + if (env->v7m.primask[M_REG_S]) { + running = 0; + } + + if (env->v7m.faultmask[M_REG_NS]) { + if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { + running = -1; + } else { + if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) { + if (running > NVIC_NS_PRIO_LIMIT) { + running = NVIC_NS_PRIO_LIMIT; + } + } else { + running = 0; + } + } + } + + if (env->v7m.faultmask[M_REG_S]) { + running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1; + } + + /* consider priority of active handler */ + return MIN(running, s->exception_prio); +} + +bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure) +{ + /* Return true if the requested execution priority is negative + * for the specified security state, ie that security state + * has an active NMI or HardFault or has set its FAULTMASK. + * Note that this is not the same as whether the execution + * priority is actually negative (for instance AIRCR.PRIS may + * mean we don't allow FAULTMASK_NS to actually make the execution + * priority negative). Compare pseudocode IsReqExcPriNeg(). + */ + NVICState *s = opaque; + + if (s->cpu->env.v7m.faultmask[secure]) { + return true; + } + + if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active : + s->vectors[ARMV7M_EXCP_HARD].active) { + return true; + } + + if (s->vectors[ARMV7M_EXCP_NMI].active && + exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) { + return true; + } + + return false; +} + +bool armv7m_nvic_can_take_pending_exception(void *opaque) +{ + NVICState *s = opaque; + + return nvic_exec_prio(s) > nvic_pending_prio(s); +} + +int armv7m_nvic_raw_execution_priority(void *opaque) +{ + NVICState *s = opaque; + + return s->exception_prio; +} + +/* caller must call nvic_irq_update() after this. + * secure indicates the bank to use for banked exceptions (we assert if + * we are passed secure=true for a non-banked exception). + */ +static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio) +{ + assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */ + assert(irq < s->num_irq); + + prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits); + + if (secure) { + assert(exc_is_banked(irq)); + s->sec_vectors[irq].prio = prio; + } else { + s->vectors[irq].prio = prio; + } + + trace_nvic_set_prio(irq, secure, prio); +} + +/* Return the current raw priority register value. + * secure indicates the bank to use for banked exceptions (we assert if + * we are passed secure=true for a non-banked exception). + */ +static int get_prio(NVICState *s, unsigned irq, bool secure) +{ + assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */ + assert(irq < s->num_irq); + + if (secure) { + assert(exc_is_banked(irq)); + return s->sec_vectors[irq].prio; + } else { + return s->vectors[irq].prio; + } +} + +/* Recompute state and assert irq line accordingly. + * Must be called after changes to: + * vec->active, vec->enabled, vec->pending or vec->prio for any vector + * prigroup + */ +static void nvic_irq_update(NVICState *s) +{ + int lvl; + int pend_prio; + + nvic_recompute_state(s); + pend_prio = nvic_pending_prio(s); + + /* Raise NVIC output if this IRQ would be taken, except that we + * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which + * will be checked for in arm_v7m_cpu_exec_interrupt()); changes + * to those CPU registers don't cause us to recalculate the NVIC + * pending info. + */ + lvl = (pend_prio < s->exception_prio); + trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl); + qemu_set_irq(s->excpout, lvl); +} + +/** + * armv7m_nvic_clear_pending: mark the specified exception as not pending + * @opaque: the NVIC + * @irq: the exception number to mark as not pending + * @secure: false for non-banked exceptions or for the nonsecure + * version of a banked exception, true for the secure version of a banked + * exception. + * + * Marks the specified exception as not pending. Note that we will assert() + * if @secure is true and @irq does not specify one of the fixed set + * of architecturally banked exceptions. + */ +static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure) +{ + NVICState *s = (NVICState *)opaque; + VecInfo *vec; + + assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); + + if (secure) { + assert(exc_is_banked(irq)); + vec = &s->sec_vectors[irq]; + } else { + vec = &s->vectors[irq]; + } + trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio); + if (vec->pending) { + vec->pending = 0; + nvic_irq_update(s); + } +} + +static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure, + bool derived) +{ + /* Pend an exception, including possibly escalating it to HardFault. + * + * This function handles both "normal" pending of interrupts and + * exceptions, and also derived exceptions (ones which occur as + * a result of trying to take some other exception). + * + * If derived == true, the caller guarantees that we are part way through + * trying to take an exception (but have not yet called + * armv7m_nvic_acknowledge_irq() to make it active), and so: + * - s->vectpending is the "original exception" we were trying to take + * - irq is the "derived exception" + * - nvic_exec_prio(s) gives the priority before exception entry + * Here we handle the prioritization logic which the pseudocode puts + * in the DerivedLateArrival() function. + */ + + NVICState *s = (NVICState *)opaque; + bool banked = exc_is_banked(irq); + VecInfo *vec; + bool targets_secure; + + assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); + assert(!secure || banked); + + vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; + + targets_secure = banked ? secure : exc_targets_secure(s, irq); + + trace_nvic_set_pending(irq, secure, targets_secure, + derived, vec->enabled, vec->prio); + + if (derived) { + /* Derived exceptions are always synchronous. */ + assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV); + + if (irq == ARMV7M_EXCP_DEBUG && + exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) { + /* DebugMonitorFault, but its priority is lower than the + * preempted exception priority: just ignore it. + */ + return; + } + + if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) { + /* If this is a terminal exception (one which means we cannot + * take the original exception, like a failure to read its + * vector table entry), then we must take the derived exception. + * If the derived exception can't take priority over the + * original exception, then we go into Lockup. + * + * For QEMU, we rely on the fact that a derived exception is + * terminal if and only if it's reported to us as HardFault, + * which saves having to have an extra argument is_terminal + * that we'd only use in one place. + */ + cpu_abort(&s->cpu->parent_obj, + "Lockup: can't take terminal derived exception " + "(original exception priority %d)\n", + s->vectpending_prio); + } + /* We now continue with the same code as for a normal pending + * exception, which will cause us to pend the derived exception. + * We'll then take either the original or the derived exception + * based on which is higher priority by the usual mechanism + * for selecting the highest priority pending interrupt. + */ + } + + if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) { + /* If a synchronous exception is pending then it may be + * escalated to HardFault if: + * * it is equal or lower priority to current execution + * * it is disabled + * (ie we need to take it immediately but we can't do so). + * Asynchronous exceptions (and interrupts) simply remain pending. + * + * For QEMU, we don't have any imprecise (asynchronous) faults, + * so we can assume that PREFETCH_ABORT and DATA_ABORT are always + * synchronous. + * Debug exceptions are awkward because only Debug exceptions + * resulting from the BKPT instruction should be escalated, + * but we don't currently implement any Debug exceptions other + * than those that result from BKPT, so we treat all debug exceptions + * as needing escalation. + * + * This all means we can identify whether to escalate based only on + * the exception number and don't (yet) need the caller to explicitly + * tell us whether this exception is synchronous or not. + */ + int running = nvic_exec_prio(s); + bool escalate = false; + + if (exc_group_prio(s, vec->prio, secure) >= running) { + trace_nvic_escalate_prio(irq, vec->prio, running); + escalate = true; + } else if (!vec->enabled) { + trace_nvic_escalate_disabled(irq); + escalate = true; + } + + if (escalate) { + + /* We need to escalate this exception to a synchronous HardFault. + * If BFHFNMINS is set then we escalate to the banked HF for + * the target security state of the original exception; otherwise + * we take a Secure HardFault. + */ + irq = ARMV7M_EXCP_HARD; + if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) && + (targets_secure || + !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) { + vec = &s->sec_vectors[irq]; + } else { + vec = &s->vectors[irq]; + } + if (running <= vec->prio) { + /* We want to escalate to HardFault but we can't take the + * synchronous HardFault at this point either. This is a + * Lockup condition due to a guest bug. We don't model + * Lockup, so report via cpu_abort() instead. + */ + cpu_abort(&s->cpu->parent_obj, + "Lockup: can't escalate %d to HardFault " + "(current priority %d)\n", irq, running); + } + + /* HF may be banked but there is only one shared HFSR */ + s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK; + } + } + + if (!vec->pending) { + vec->pending = 1; + nvic_irq_update(s); + } +} + +void armv7m_nvic_set_pending(void *opaque, int irq, bool secure) +{ + do_armv7m_nvic_set_pending(opaque, irq, secure, false); +} + +void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure) +{ + do_armv7m_nvic_set_pending(opaque, irq, secure, true); +} + +void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure) +{ + /* + * Pend an exception during lazy FP stacking. This differs + * from the usual exception pending because the logic for + * whether we should escalate depends on the saved context + * in the FPCCR register, not on the current state of the CPU/NVIC. + */ + NVICState *s = (NVICState *)opaque; + bool banked = exc_is_banked(irq); + VecInfo *vec; + bool targets_secure; + bool escalate = false; + /* + * We will only look at bits in fpccr if this is a banked exception + * (in which case 'secure' tells us whether it is the S or NS version). + * All the bits for the non-banked exceptions are in fpccr_s. + */ + uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S]; + uint32_t fpccr = s->cpu->env.v7m.fpccr[secure]; + + assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); + assert(!secure || banked); + + vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; + + targets_secure = banked ? secure : exc_targets_secure(s, irq); + + switch (irq) { + case ARMV7M_EXCP_DEBUG: + if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) { + /* Ignore DebugMonitor exception */ + return; + } + break; + case ARMV7M_EXCP_MEM: + escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK); + break; + case ARMV7M_EXCP_USAGE: + escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK); + break; + case ARMV7M_EXCP_BUS: + escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK); + break; + case ARMV7M_EXCP_SECURE: + escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK); + break; + default: + g_assert_not_reached(); + } + + if (escalate) { + /* + * Escalate to HardFault: faults that initially targeted Secure + * continue to do so, even if HF normally targets NonSecure. + */ + irq = ARMV7M_EXCP_HARD; + if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) && + (targets_secure || + !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) { + vec = &s->sec_vectors[irq]; + } else { + vec = &s->vectors[irq]; + } + } + + if (!vec->enabled || + nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) { + if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) { + /* + * We want to escalate to HardFault but the context the + * FP state belongs to prevents the exception pre-empting. + */ + cpu_abort(&s->cpu->parent_obj, + "Lockup: can't escalate to HardFault during " + "lazy FP register stacking\n"); + } + } + + if (escalate) { + s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK; + } + if (!vec->pending) { + vec->pending = 1; + /* + * We do not call nvic_irq_update(), because we know our caller + * is going to handle causing us to take the exception by + * raising EXCP_LAZYFP, so raising the IRQ line would be + * pointless extra work. We just need to recompute the + * priorities so that armv7m_nvic_can_take_pending_exception() + * returns the right answer. + */ + nvic_recompute_state(s); + } +} + +/* Make pending IRQ active. */ +void armv7m_nvic_acknowledge_irq(void *opaque) +{ + NVICState *s = (NVICState *)opaque; + CPUARMState *env = &s->cpu->env; + const int pending = s->vectpending; + const int running = nvic_exec_prio(s); + VecInfo *vec; + + assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq); + + if (s->vectpending_is_s_banked) { + vec = &s->sec_vectors[pending]; + } else { + vec = &s->vectors[pending]; + } + + assert(vec->enabled); + assert(vec->pending); + + assert(s->vectpending_prio < running); + + trace_nvic_acknowledge_irq(pending, s->vectpending_prio); + + vec->active = 1; + vec->pending = 0; + + write_v7m_exception(env, s->vectpending); + + nvic_irq_update(s); +} + +static bool vectpending_targets_secure(NVICState *s) +{ + /* Return true if s->vectpending targets Secure state */ + if (s->vectpending_is_s_banked) { + return true; + } + return !exc_is_banked(s->vectpending) && + exc_targets_secure(s, s->vectpending); +} + +void armv7m_nvic_get_pending_irq_info(void *opaque, + int *pirq, bool *ptargets_secure) +{ + NVICState *s = (NVICState *)opaque; + const int pending = s->vectpending; + bool targets_secure; + + assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq); + + targets_secure = vectpending_targets_secure(s); + + trace_nvic_get_pending_irq_info(pending, targets_secure); + + *ptargets_secure = targets_secure; + *pirq = pending; +} + +int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure) +{ + NVICState *s = (NVICState *)opaque; + VecInfo *vec = NULL; + int ret = 0; + + assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); + + trace_nvic_complete_irq(irq, secure); + + if (secure && exc_is_banked(irq)) { + vec = &s->sec_vectors[irq]; + } else { + vec = &s->vectors[irq]; + } + + /* + * Identify illegal exception return cases. We can't immediately + * return at this point because we still need to deactivate + * (either this exception or NMI/HardFault) first. + */ + if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) { + /* + * Return from a configurable exception targeting the opposite + * security state from the one we're trying to complete it for. + * Clear vec because it's not really the VecInfo for this + * (irq, secstate) so we mustn't deactivate it. + */ + ret = -1; + vec = NULL; + } else if (!vec->active) { + /* Return from an inactive interrupt */ + ret = -1; + } else { + /* Legal return, we will return the RETTOBASE bit value to the caller */ + ret = nvic_rettobase(s); + } + + /* + * For negative priorities, v8M will forcibly deactivate the appropriate + * NMI or HardFault regardless of what interrupt we're being asked to + * deactivate (compare the DeActivate() pseudocode). This is a guard + * against software returning from NMI or HardFault with a corrupted + * IPSR and leaving the CPU in a negative-priority state. + * v7M does not do this, but simply deactivates the requested interrupt. + */ + if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) { + switch (armv7m_nvic_raw_execution_priority(s)) { + case -1: + if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { + vec = &s->vectors[ARMV7M_EXCP_HARD]; + } else { + vec = &s->sec_vectors[ARMV7M_EXCP_HARD]; + } + break; + case -2: + vec = &s->vectors[ARMV7M_EXCP_NMI]; + break; + case -3: + vec = &s->sec_vectors[ARMV7M_EXCP_HARD]; + break; + default: + break; + } + } + + if (!vec) { + return ret; + } + + vec->active = 0; + if (vec->level) { + /* Re-pend the exception if it's still held high; only + * happens for extenal IRQs + */ + assert(irq >= NVIC_FIRST_IRQ); + vec->pending = 1; + } + + nvic_irq_update(s); + + return ret; +} + +bool armv7m_nvic_get_ready_status(void *opaque, int irq, bool secure) +{ + /* + * Return whether an exception is "ready", i.e. it is enabled and is + * configured at a priority which would allow it to interrupt the + * current execution priority. + * + * irq and secure have the same semantics as for armv7m_nvic_set_pending(): + * for non-banked exceptions secure is always false; for banked exceptions + * it indicates which of the exceptions is required. + */ + NVICState *s = (NVICState *)opaque; + bool banked = exc_is_banked(irq); + VecInfo *vec; + int running = nvic_exec_prio(s); + + assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); + assert(!secure || banked); + + /* + * HardFault is an odd special case: we always check against -1, + * even if we're secure and HardFault has priority -3; we never + * need to check for enabled state. + */ + if (irq == ARMV7M_EXCP_HARD) { + return running > -1; + } + + vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; + + return vec->enabled && + exc_group_prio(s, vec->prio, secure) < running; +} + +/* callback when external interrupt line is changed */ +static void set_irq_level(void *opaque, int n, int level) +{ + NVICState *s = opaque; + VecInfo *vec; + + n += NVIC_FIRST_IRQ; + + assert(n >= NVIC_FIRST_IRQ && n < s->num_irq); + + trace_nvic_set_irq_level(n, level); + + /* The pending status of an external interrupt is + * latched on rising edge and exception handler return. + * + * Pulsing the IRQ will always run the handler + * once, and the handler will re-run until the + * level is low when the handler completes. + */ + vec = &s->vectors[n]; + if (level != vec->level) { + vec->level = level; + if (level) { + armv7m_nvic_set_pending(s, n, false); + } + } +} + +/* callback when external NMI line is changed */ +static void nvic_nmi_trigger(void *opaque, int n, int level) +{ + NVICState *s = opaque; + + trace_nvic_set_nmi_level(level); + + /* + * The architecture doesn't specify whether NMI should share + * the normal-interrupt behaviour of being resampled on + * exception handler return. We choose not to, so just + * set NMI pending here and don't track the current level. + */ + if (level) { + armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false); + } +} + +static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) +{ + ARMCPU *cpu = s->cpu; + uint32_t val; + + switch (offset) { + case 4: /* Interrupt Control Type. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { + goto bad_offset; + } + return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1; + case 0xc: /* CPPWR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + /* We make the IMPDEF choice that nothing can ever go into a + * non-retentive power state, which allows us to RAZ/WI this. + */ + return 0; + case 0x380 ... 0x3bf: /* NVIC_ITNS */ + { + int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ; + int i; + + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return 0; + } + val = 0; + for (i = 0; i < 32 && startvec + i < s->num_irq; i++) { + if (s->itns[startvec + i]) { + val |= (1 << i); + } + } + return val; + } + case 0xcfc: + if (!arm_feature(&cpu->env, ARM_FEATURE_V8_1M)) { + goto bad_offset; + } + return cpu->revidr; + case 0xd00: /* CPUID Base. */ + return cpu->midr; + case 0xd04: /* Interrupt Control State (ICSR) */ + /* VECTACTIVE */ + val = cpu->env.v7m.exception; + /* VECTPENDING */ + if (s->vectpending) { + /* + * From v8.1M VECTPENDING must read as 1 if accessed as + * NonSecure and the highest priority pending and enabled + * exception targets Secure. + */ + int vp = s->vectpending; + if (!attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && + vectpending_targets_secure(s)) { + vp = 1; + } + val |= (vp & 0x1ff) << 12; + } + /* ISRPENDING - set if any external IRQ is pending */ + if (nvic_isrpending(s)) { + val |= (1 << 22); + } + /* RETTOBASE - set if only one handler is active */ + if (nvic_rettobase(s)) { + val |= (1 << 11); + } + if (attrs.secure) { + /* PENDSTSET */ + if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) { + val |= (1 << 26); + } + /* PENDSVSET */ + if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) { + val |= (1 << 28); + } + } else { + /* PENDSTSET */ + if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) { + val |= (1 << 26); + } + /* PENDSVSET */ + if (s->vectors[ARMV7M_EXCP_PENDSV].pending) { + val |= (1 << 28); + } + } + /* NMIPENDSET */ + if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) + && s->vectors[ARMV7M_EXCP_NMI].pending) { + val |= (1 << 31); + } + /* ISRPREEMPT: RES0 when halting debug not implemented */ + /* STTNS: RES0 for the Main Extension */ + return val; + case 0xd08: /* Vector Table Offset. */ + return cpu->env.v7m.vecbase[attrs.secure]; + case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */ + val = 0xfa050000 | (s->prigroup[attrs.secure] << 8); + if (attrs.secure) { + /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */ + val |= cpu->env.v7m.aircr; + } else { + if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If + * security isn't supported then BFHFNMINS is RAO (and + * the bit in env.v7m.aircr is always set). + */ + val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK; + } + } + return val; + case 0xd10: /* System Control. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { + goto bad_offset; + } + return cpu->env.v7m.scr[attrs.secure]; + case 0xd14: /* Configuration Control. */ + /* + * Non-banked bits: BFHFNMIGN (stored in the NS copy of the register) + * and TRD (stored in the S copy of the register) + */ + val = cpu->env.v7m.ccr[attrs.secure]; + val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK; + /* BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0 */ + if (!attrs.secure) { + if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + val &= ~R_V7M_CCR_BFHFNMIGN_MASK; + } + } + return val; + case 0xd24: /* System Handler Control and State (SHCSR) */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { + goto bad_offset; + } + val = 0; + if (attrs.secure) { + if (s->sec_vectors[ARMV7M_EXCP_MEM].active) { + val |= (1 << 0); + } + if (s->sec_vectors[ARMV7M_EXCP_HARD].active) { + val |= (1 << 2); + } + if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) { + val |= (1 << 3); + } + if (s->sec_vectors[ARMV7M_EXCP_SVC].active) { + val |= (1 << 7); + } + if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) { + val |= (1 << 10); + } + if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) { + val |= (1 << 11); + } + if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) { + val |= (1 << 12); + } + if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) { + val |= (1 << 13); + } + if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) { + val |= (1 << 15); + } + if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) { + val |= (1 << 16); + } + if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) { + val |= (1 << 18); + } + if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) { + val |= (1 << 21); + } + /* SecureFault is not banked but is always RAZ/WI to NS */ + if (s->vectors[ARMV7M_EXCP_SECURE].active) { + val |= (1 << 4); + } + if (s->vectors[ARMV7M_EXCP_SECURE].enabled) { + val |= (1 << 19); + } + if (s->vectors[ARMV7M_EXCP_SECURE].pending) { + val |= (1 << 20); + } + } else { + if (s->vectors[ARMV7M_EXCP_MEM].active) { + val |= (1 << 0); + } + if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */ + if (s->vectors[ARMV7M_EXCP_HARD].active) { + val |= (1 << 2); + } + if (s->vectors[ARMV7M_EXCP_HARD].pending) { + val |= (1 << 21); + } + } + if (s->vectors[ARMV7M_EXCP_USAGE].active) { + val |= (1 << 3); + } + if (s->vectors[ARMV7M_EXCP_SVC].active) { + val |= (1 << 7); + } + if (s->vectors[ARMV7M_EXCP_PENDSV].active) { + val |= (1 << 10); + } + if (s->vectors[ARMV7M_EXCP_SYSTICK].active) { + val |= (1 << 11); + } + if (s->vectors[ARMV7M_EXCP_USAGE].pending) { + val |= (1 << 12); + } + if (s->vectors[ARMV7M_EXCP_MEM].pending) { + val |= (1 << 13); + } + if (s->vectors[ARMV7M_EXCP_SVC].pending) { + val |= (1 << 15); + } + if (s->vectors[ARMV7M_EXCP_MEM].enabled) { + val |= (1 << 16); + } + if (s->vectors[ARMV7M_EXCP_USAGE].enabled) { + val |= (1 << 18); + } + } + if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + if (s->vectors[ARMV7M_EXCP_BUS].active) { + val |= (1 << 1); + } + if (s->vectors[ARMV7M_EXCP_BUS].pending) { + val |= (1 << 14); + } + if (s->vectors[ARMV7M_EXCP_BUS].enabled) { + val |= (1 << 17); + } + if (arm_feature(&cpu->env, ARM_FEATURE_V8) && + s->vectors[ARMV7M_EXCP_NMI].active) { + /* NMIACT is not present in v7M */ + val |= (1 << 5); + } + } + + /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */ + if (s->vectors[ARMV7M_EXCP_DEBUG].active) { + val |= (1 << 8); + } + return val; + case 0xd2c: /* Hard Fault Status. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->env.v7m.hfsr; + case 0xd30: /* Debug Fault Status. */ + return cpu->env.v7m.dfsr; + case 0xd34: /* MMFAR MemManage Fault Address */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->env.v7m.mmfar[attrs.secure]; + case 0xd38: /* Bus Fault Address. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + if (!attrs.secure && + !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + return 0; + } + return cpu->env.v7m.bfar; + case 0xd3c: /* Aux Fault Status. */ + /* TODO: Implement fault status registers. */ + qemu_log_mask(LOG_UNIMP, + "Aux Fault status registers unimplemented\n"); + return 0; + case 0xd40: /* PFR0. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_pfr0; + case 0xd44: /* PFR1. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_pfr1; + case 0xd48: /* DFR0. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_dfr0; + case 0xd4c: /* AFR0. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->id_afr0; + case 0xd50: /* MMFR0. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_mmfr0; + case 0xd54: /* MMFR1. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_mmfr1; + case 0xd58: /* MMFR2. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_mmfr2; + case 0xd5c: /* MMFR3. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_mmfr3; + case 0xd60: /* ISAR0. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_isar0; + case 0xd64: /* ISAR1. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_isar1; + case 0xd68: /* ISAR2. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_isar2; + case 0xd6c: /* ISAR3. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_isar3; + case 0xd70: /* ISAR4. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_isar4; + case 0xd74: /* ISAR5. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + return cpu->isar.id_isar5; + case 0xd78: /* CLIDR */ + return cpu->clidr; + case 0xd7c: /* CTR */ + return cpu->ctr; + case 0xd80: /* CSSIDR */ + { + int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK; + return cpu->ccsidr[idx]; + } + case 0xd84: /* CSSELR */ + return cpu->env.v7m.csselr[attrs.secure]; + case 0xd88: /* CPACR */ + if (!cpu_isar_feature(aa32_vfp_simd, cpu)) { + return 0; + } + return cpu->env.v7m.cpacr[attrs.secure]; + case 0xd8c: /* NSACR */ + if (!attrs.secure || !cpu_isar_feature(aa32_vfp_simd, cpu)) { + return 0; + } + return cpu->env.v7m.nsacr; + /* TODO: Implement debug registers. */ + case 0xd90: /* MPU_TYPE */ + /* Unified MPU; if the MPU is not present this value is zero */ + return cpu->pmsav7_dregion << 8; + case 0xd94: /* MPU_CTRL */ + return cpu->env.v7m.mpu_ctrl[attrs.secure]; + case 0xd98: /* MPU_RNR */ + return cpu->env.pmsav7.rnr[attrs.secure]; + case 0xd9c: /* MPU_RBAR */ + case 0xda4: /* MPU_RBAR_A1 */ + case 0xdac: /* MPU_RBAR_A2 */ + case 0xdb4: /* MPU_RBAR_A3 */ + { + int region = cpu->env.pmsav7.rnr[attrs.secure]; + + if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* PMSAv8M handling of the aliases is different from v7M: + * aliases A1, A2, A3 override the low two bits of the region + * number in MPU_RNR, and there is no 'region' field in the + * RBAR register. + */ + int aliasno = (offset - 0xd9c) / 8; /* 0..3 */ + if (aliasno) { + region = deposit32(region, 0, 2, aliasno); + } + if (region >= cpu->pmsav7_dregion) { + return 0; + } + return cpu->env.pmsav8.rbar[attrs.secure][region]; + } + + if (region >= cpu->pmsav7_dregion) { + return 0; + } + return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf); + } + case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */ + case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */ + case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */ + case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */ + { + int region = cpu->env.pmsav7.rnr[attrs.secure]; + + if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* PMSAv8M handling of the aliases is different from v7M: + * aliases A1, A2, A3 override the low two bits of the region + * number in MPU_RNR. + */ + int aliasno = (offset - 0xda0) / 8; /* 0..3 */ + if (aliasno) { + region = deposit32(region, 0, 2, aliasno); + } + if (region >= cpu->pmsav7_dregion) { + return 0; + } + return cpu->env.pmsav8.rlar[attrs.secure][region]; + } + + if (region >= cpu->pmsav7_dregion) { + return 0; + } + return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) | + (cpu->env.pmsav7.drsr[region] & 0xffff); + } + case 0xdc0: /* MPU_MAIR0 */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + return cpu->env.pmsav8.mair0[attrs.secure]; + case 0xdc4: /* MPU_MAIR1 */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + return cpu->env.pmsav8.mair1[attrs.secure]; + case 0xdd0: /* SAU_CTRL */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return 0; + } + return cpu->env.sau.ctrl; + case 0xdd4: /* SAU_TYPE */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return 0; + } + return cpu->sau_sregion; + case 0xdd8: /* SAU_RNR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return 0; + } + return cpu->env.sau.rnr; + case 0xddc: /* SAU_RBAR */ + { + int region = cpu->env.sau.rnr; + + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return 0; + } + if (region >= cpu->sau_sregion) { + return 0; + } + return cpu->env.sau.rbar[region]; + } + case 0xde0: /* SAU_RLAR */ + { + int region = cpu->env.sau.rnr; + + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return 0; + } + if (region >= cpu->sau_sregion) { + return 0; + } + return cpu->env.sau.rlar[region]; + } + case 0xde4: /* SFSR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return 0; + } + return cpu->env.v7m.sfsr; + case 0xde8: /* SFAR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return 0; + } + return cpu->env.v7m.sfar; + case 0xf04: /* RFSR */ + if (!cpu_isar_feature(aa32_ras, cpu)) { + goto bad_offset; + } + /* We provide minimal-RAS only: RFSR is RAZ/WI */ + return 0; + case 0xf34: /* FPCCR */ + if (!cpu_isar_feature(aa32_vfp_simd, cpu)) { + return 0; + } + if (attrs.secure) { + return cpu->env.v7m.fpccr[M_REG_S]; + } else { + /* + * NS can read LSPEN, CLRONRET and MONRDY. It can read + * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0; + * other non-banked bits RAZ. + * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set. + */ + uint32_t value = cpu->env.v7m.fpccr[M_REG_S]; + uint32_t mask = R_V7M_FPCCR_LSPEN_MASK | + R_V7M_FPCCR_CLRONRET_MASK | + R_V7M_FPCCR_MONRDY_MASK; + + if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { + mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK; + } + + value &= mask; + + value |= cpu->env.v7m.fpccr[M_REG_NS]; + return value; + } + case 0xf38: /* FPCAR */ + if (!cpu_isar_feature(aa32_vfp_simd, cpu)) { + return 0; + } + return cpu->env.v7m.fpcar[attrs.secure]; + case 0xf3c: /* FPDSCR */ + if (!cpu_isar_feature(aa32_vfp_simd, cpu)) { + return 0; + } + return cpu->env.v7m.fpdscr[attrs.secure]; + case 0xf40: /* MVFR0 */ + return cpu->isar.mvfr0; + case 0xf44: /* MVFR1 */ + return cpu->isar.mvfr1; + case 0xf48: /* MVFR2 */ + return cpu->isar.mvfr2; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset); + return 0; + } +} + +static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, + MemTxAttrs attrs) +{ + ARMCPU *cpu = s->cpu; + + switch (offset) { + case 0xc: /* CPPWR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + /* Make the IMPDEF choice to RAZ/WI this. */ + break; + case 0x380 ... 0x3bf: /* NVIC_ITNS */ + { + int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ; + int i; + + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + break; + } + for (i = 0; i < 32 && startvec + i < s->num_irq; i++) { + s->itns[startvec + i] = (value >> i) & 1; + } + nvic_irq_update(s); + break; + } + case 0xd04: /* Interrupt Control State (ICSR) */ + if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { + if (value & (1 << 31)) { + armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false); + } else if (value & (1 << 30) && + arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* PENDNMICLR didn't exist in v7M */ + armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false); + } + } + if (value & (1 << 28)) { + armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure); + } else if (value & (1 << 27)) { + armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure); + } + if (value & (1 << 26)) { + armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure); + } else if (value & (1 << 25)) { + armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure); + } + break; + case 0xd08: /* Vector Table Offset. */ + cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80; + break; + case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */ + if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) { + if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) { + if (attrs.secure || + !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) { + signal_sysresetreq(s); + } + } + if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) { + qemu_log_mask(LOG_GUEST_ERROR, + "Setting VECTCLRACTIVE when not in DEBUG mode " + "is UNPREDICTABLE\n"); + } + if (value & R_V7M_AIRCR_VECTRESET_MASK) { + /* NB: this bit is RES0 in v8M */ + qemu_log_mask(LOG_GUEST_ERROR, + "Setting VECTRESET when not in DEBUG mode " + "is UNPREDICTABLE\n"); + } + if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + s->prigroup[attrs.secure] = + extract32(value, + R_V7M_AIRCR_PRIGROUP_SHIFT, + R_V7M_AIRCR_PRIGROUP_LENGTH); + } + /* AIRCR.IESB is RAZ/WI because we implement only minimal RAS */ + if (attrs.secure) { + /* These bits are only writable by secure */ + cpu->env.v7m.aircr = value & + (R_V7M_AIRCR_SYSRESETREQS_MASK | + R_V7M_AIRCR_BFHFNMINS_MASK | + R_V7M_AIRCR_PRIS_MASK); + /* BFHFNMINS changes the priority of Secure HardFault, and + * allows a pending Non-secure HardFault to preempt (which + * we implement by marking it enabled). + */ + if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) { + s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3; + s->vectors[ARMV7M_EXCP_HARD].enabled = 1; + } else { + s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1; + s->vectors[ARMV7M_EXCP_HARD].enabled = 0; + } + } + nvic_irq_update(s); + } + break; + case 0xd10: /* System Control. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { + goto bad_offset; + } + /* We don't implement deep-sleep so these bits are RAZ/WI. + * The other bits in the register are banked. + * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which + * is architecturally permitted. + */ + value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK); + cpu->env.v7m.scr[attrs.secure] = value; + break; + case 0xd14: /* Configuration Control. */ + { + uint32_t mask; + + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + + /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */ + mask = R_V7M_CCR_STKALIGN_MASK | + R_V7M_CCR_BFHFNMIGN_MASK | + R_V7M_CCR_DIV_0_TRP_MASK | + R_V7M_CCR_UNALIGN_TRP_MASK | + R_V7M_CCR_USERSETMPEND_MASK | + R_V7M_CCR_NONBASETHRDENA_MASK; + if (arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && attrs.secure) { + /* TRD is always RAZ/WI from NS */ + mask |= R_V7M_CCR_TRD_MASK; + } + value &= mask; + + if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */ + value |= R_V7M_CCR_NONBASETHRDENA_MASK + | R_V7M_CCR_STKALIGN_MASK; + } + if (attrs.secure) { + /* the BFHFNMIGN bit is not banked; keep that in the NS copy */ + cpu->env.v7m.ccr[M_REG_NS] = + (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK) + | (value & R_V7M_CCR_BFHFNMIGN_MASK); + value &= ~R_V7M_CCR_BFHFNMIGN_MASK; + } else { + /* + * BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0, so + * preserve the state currently in the NS element of the array + */ + if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + value &= ~R_V7M_CCR_BFHFNMIGN_MASK; + value |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK; + } + } + + cpu->env.v7m.ccr[attrs.secure] = value; + break; + } + case 0xd24: /* System Handler Control and State (SHCSR) */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) { + goto bad_offset; + } + if (attrs.secure) { + s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0; + /* Secure HardFault active bit cannot be written */ + s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0; + s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0; + s->sec_vectors[ARMV7M_EXCP_PENDSV].active = + (value & (1 << 10)) != 0; + s->sec_vectors[ARMV7M_EXCP_SYSTICK].active = + (value & (1 << 11)) != 0; + s->sec_vectors[ARMV7M_EXCP_USAGE].pending = + (value & (1 << 12)) != 0; + s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0; + s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0; + s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0; + s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; + s->sec_vectors[ARMV7M_EXCP_USAGE].enabled = + (value & (1 << 18)) != 0; + s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0; + /* SecureFault not banked, but RAZ/WI to NS */ + s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0; + s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0; + s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0; + } else { + s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0; + if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* HARDFAULTPENDED is not present in v7M */ + s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0; + } + s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0; + s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0; + s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0; + s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0; + s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0; + s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0; + s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0; + s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0; + s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0; + } + if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0; + s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0; + s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; + } + /* NMIACT can only be written if the write is of a zero, with + * BFHFNMINS 1, and by the CPU in secure state via the NS alias. + */ + if (!attrs.secure && cpu->env.v7m.secure && + (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) && + (value & (1 << 5)) == 0) { + s->vectors[ARMV7M_EXCP_NMI].active = 0; + } + /* HARDFAULTACT can only be written if the write is of a zero + * to the non-secure HardFault state by the CPU in secure state. + * The only case where we can be targeting the non-secure HF state + * when in secure state is if this is a write via the NS alias + * and BFHFNMINS is 1. + */ + if (!attrs.secure && cpu->env.v7m.secure && + (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) && + (value & (1 << 2)) == 0) { + s->vectors[ARMV7M_EXCP_HARD].active = 0; + } + + /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */ + s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0; + nvic_irq_update(s); + break; + case 0xd2c: /* Hard Fault Status. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + cpu->env.v7m.hfsr &= ~value; /* W1C */ + break; + case 0xd30: /* Debug Fault Status. */ + cpu->env.v7m.dfsr &= ~value; /* W1C */ + break; + case 0xd34: /* Mem Manage Address. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + cpu->env.v7m.mmfar[attrs.secure] = value; + return; + case 0xd38: /* Bus Fault Address. */ + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + if (!attrs.secure && + !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + return; + } + cpu->env.v7m.bfar = value; + return; + case 0xd3c: /* Aux Fault Status. */ + qemu_log_mask(LOG_UNIMP, + "NVIC: Aux fault status registers unimplemented\n"); + break; + case 0xd84: /* CSSELR */ + if (!arm_v7m_csselr_razwi(cpu)) { + cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK; + } + break; + case 0xd88: /* CPACR */ + if (cpu_isar_feature(aa32_vfp_simd, cpu)) { + /* We implement only the Floating Point extension's CP10/CP11 */ + cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20); + } + break; + case 0xd8c: /* NSACR */ + if (attrs.secure && cpu_isar_feature(aa32_vfp_simd, cpu)) { + /* We implement only the Floating Point extension's CP10/CP11 */ + cpu->env.v7m.nsacr = value & (3 << 10); + } + break; + case 0xd90: /* MPU_TYPE */ + return; /* RO */ + case 0xd94: /* MPU_CTRL */ + if ((value & + (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK)) + == R_V7M_MPU_CTRL_HFNMIENA_MASK) { + qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is " + "UNPREDICTABLE\n"); + } + cpu->env.v7m.mpu_ctrl[attrs.secure] + = value & (R_V7M_MPU_CTRL_ENABLE_MASK | + R_V7M_MPU_CTRL_HFNMIENA_MASK | + R_V7M_MPU_CTRL_PRIVDEFENA_MASK); + tlb_flush(CPU(cpu)); + break; + case 0xd98: /* MPU_RNR */ + if (value >= cpu->pmsav7_dregion) { + qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %" + PRIu32 "/%" PRIu32 "\n", + value, cpu->pmsav7_dregion); + } else { + cpu->env.pmsav7.rnr[attrs.secure] = value; + } + break; + case 0xd9c: /* MPU_RBAR */ + case 0xda4: /* MPU_RBAR_A1 */ + case 0xdac: /* MPU_RBAR_A2 */ + case 0xdb4: /* MPU_RBAR_A3 */ + { + int region; + + if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* PMSAv8M handling of the aliases is different from v7M: + * aliases A1, A2, A3 override the low two bits of the region + * number in MPU_RNR, and there is no 'region' field in the + * RBAR register. + */ + int aliasno = (offset - 0xd9c) / 8; /* 0..3 */ + + region = cpu->env.pmsav7.rnr[attrs.secure]; + if (aliasno) { + region = deposit32(region, 0, 2, aliasno); + } + if (region >= cpu->pmsav7_dregion) { + return; + } + cpu->env.pmsav8.rbar[attrs.secure][region] = value; + tlb_flush(CPU(cpu)); + return; + } + + if (value & (1 << 4)) { + /* VALID bit means use the region number specified in this + * value and also update MPU_RNR.REGION with that value. + */ + region = extract32(value, 0, 4); + if (region >= cpu->pmsav7_dregion) { + qemu_log_mask(LOG_GUEST_ERROR, + "MPU region out of range %u/%" PRIu32 "\n", + region, cpu->pmsav7_dregion); + return; + } + cpu->env.pmsav7.rnr[attrs.secure] = region; + } else { + region = cpu->env.pmsav7.rnr[attrs.secure]; + } + + if (region >= cpu->pmsav7_dregion) { + return; + } + + cpu->env.pmsav7.drbar[region] = value & ~0x1f; + tlb_flush(CPU(cpu)); + break; + } + case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */ + case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */ + case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */ + case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */ + { + int region = cpu->env.pmsav7.rnr[attrs.secure]; + + if (arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* PMSAv8M handling of the aliases is different from v7M: + * aliases A1, A2, A3 override the low two bits of the region + * number in MPU_RNR. + */ + int aliasno = (offset - 0xd9c) / 8; /* 0..3 */ + + region = cpu->env.pmsav7.rnr[attrs.secure]; + if (aliasno) { + region = deposit32(region, 0, 2, aliasno); + } + if (region >= cpu->pmsav7_dregion) { + return; + } + cpu->env.pmsav8.rlar[attrs.secure][region] = value; + tlb_flush(CPU(cpu)); + return; + } + + if (region >= cpu->pmsav7_dregion) { + return; + } + + cpu->env.pmsav7.drsr[region] = value & 0xff3f; + cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f; + tlb_flush(CPU(cpu)); + break; + } + case 0xdc0: /* MPU_MAIR0 */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (cpu->pmsav7_dregion) { + /* Register is RES0 if no MPU regions are implemented */ + cpu->env.pmsav8.mair0[attrs.secure] = value; + } + /* We don't need to do anything else because memory attributes + * only affect cacheability, and we don't implement caching. + */ + break; + case 0xdc4: /* MPU_MAIR1 */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (cpu->pmsav7_dregion) { + /* Register is RES0 if no MPU regions are implemented */ + cpu->env.pmsav8.mair1[attrs.secure] = value; + } + /* We don't need to do anything else because memory attributes + * only affect cacheability, and we don't implement caching. + */ + break; + case 0xdd0: /* SAU_CTRL */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return; + } + cpu->env.sau.ctrl = value & 3; + break; + case 0xdd4: /* SAU_TYPE */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + break; + case 0xdd8: /* SAU_RNR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return; + } + if (value >= cpu->sau_sregion) { + qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %" + PRIu32 "/%" PRIu32 "\n", + value, cpu->sau_sregion); + } else { + cpu->env.sau.rnr = value; + } + break; + case 0xddc: /* SAU_RBAR */ + { + int region = cpu->env.sau.rnr; + + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return; + } + if (region >= cpu->sau_sregion) { + return; + } + cpu->env.sau.rbar[region] = value & ~0x1f; + tlb_flush(CPU(cpu)); + break; + } + case 0xde0: /* SAU_RLAR */ + { + int region = cpu->env.sau.rnr; + + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return; + } + if (region >= cpu->sau_sregion) { + return; + } + cpu->env.sau.rlar[region] = value & ~0x1c; + tlb_flush(CPU(cpu)); + break; + } + case 0xde4: /* SFSR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return; + } + cpu->env.v7m.sfsr &= ~value; /* W1C */ + break; + case 0xde8: /* SFAR */ + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + goto bad_offset; + } + if (!attrs.secure) { + return; + } + cpu->env.v7m.sfsr = value; + break; + case 0xf00: /* Software Triggered Interrupt Register */ + { + int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ; + + if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { + goto bad_offset; + } + + if (excnum < s->num_irq) { + armv7m_nvic_set_pending(s, excnum, false); + } + break; + } + case 0xf04: /* RFSR */ + if (!cpu_isar_feature(aa32_ras, cpu)) { + goto bad_offset; + } + /* We provide minimal-RAS only: RFSR is RAZ/WI */ + break; + case 0xf34: /* FPCCR */ + if (cpu_isar_feature(aa32_vfp_simd, cpu)) { + /* Not all bits here are banked. */ + uint32_t fpccr_s; + + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { + /* Don't allow setting of bits not present in v7M */ + value &= (R_V7M_FPCCR_LSPACT_MASK | + R_V7M_FPCCR_USER_MASK | + R_V7M_FPCCR_THREAD_MASK | + R_V7M_FPCCR_HFRDY_MASK | + R_V7M_FPCCR_MMRDY_MASK | + R_V7M_FPCCR_BFRDY_MASK | + R_V7M_FPCCR_MONRDY_MASK | + R_V7M_FPCCR_LSPEN_MASK | + R_V7M_FPCCR_ASPEN_MASK); + } + value &= ~R_V7M_FPCCR_RES0_MASK; + + if (!attrs.secure) { + /* Some non-banked bits are configurably writable by NS */ + fpccr_s = cpu->env.v7m.fpccr[M_REG_S]; + if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) { + uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN); + fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen); + } + if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) { + uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET); + fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor); + } + if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY); + uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY); + fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy); + fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy); + } + /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */ + { + uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY); + fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy); + } + + /* + * All other non-banked bits are RAZ/WI from NS; write + * just the banked bits to fpccr[M_REG_NS]. + */ + value &= R_V7M_FPCCR_BANKED_MASK; + cpu->env.v7m.fpccr[M_REG_NS] = value; + } else { + fpccr_s = value; + } + cpu->env.v7m.fpccr[M_REG_S] = fpccr_s; + } + break; + case 0xf38: /* FPCAR */ + if (cpu_isar_feature(aa32_vfp_simd, cpu)) { + value &= ~7; + cpu->env.v7m.fpcar[attrs.secure] = value; + } + break; + case 0xf3c: /* FPDSCR */ + if (cpu_isar_feature(aa32_vfp_simd, cpu)) { + uint32_t mask = FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK; + if (cpu_isar_feature(any_fp16, cpu)) { + mask |= FPCR_FZ16; + } + value &= mask; + if (cpu_isar_feature(aa32_lob, cpu)) { + value |= 4 << FPCR_LTPSIZE_SHIFT; + } + cpu->env.v7m.fpdscr[attrs.secure] = value; + } + break; + case 0xf50: /* ICIALLU */ + case 0xf58: /* ICIMVAU */ + case 0xf5c: /* DCIMVAC */ + case 0xf60: /* DCISW */ + case 0xf64: /* DCCMVAU */ + case 0xf68: /* DCCMVAC */ + case 0xf6c: /* DCCSW */ + case 0xf70: /* DCCIMVAC */ + case 0xf74: /* DCCISW */ + case 0xf78: /* BPIALL */ + /* Cache and branch predictor maintenance: for QEMU these always NOP */ + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "NVIC: Bad write offset 0x%x\n", offset); + } +} + +static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs) +{ + /* Return true if unprivileged access to this register is permitted. */ + switch (offset) { + case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */ + /* For access via STIR_NS it is the NS CCR.USERSETMPEND that + * controls access even though the CPU is in Secure state (I_QDKX). + */ + return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK; + default: + /* All other user accesses cause a BusFault unconditionally */ + return false; + } +} + +static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs) +{ + /* Behaviour for the SHPR register field for this exception: + * return M_REG_NS to use the nonsecure vector (including for + * non-banked exceptions), M_REG_S for the secure version of + * a banked exception, and -1 if this field should RAZ/WI. + */ + switch (exc) { + case ARMV7M_EXCP_MEM: + case ARMV7M_EXCP_USAGE: + case ARMV7M_EXCP_SVC: + case ARMV7M_EXCP_PENDSV: + case ARMV7M_EXCP_SYSTICK: + /* Banked exceptions */ + return attrs.secure; + case ARMV7M_EXCP_BUS: + /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */ + if (!attrs.secure && + !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + return -1; + } + return M_REG_NS; + case ARMV7M_EXCP_SECURE: + /* Not banked, RAZ/WI from nonsecure */ + if (!attrs.secure) { + return -1; + } + return M_REG_NS; + case ARMV7M_EXCP_DEBUG: + /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */ + return M_REG_NS; + case 8 ... 10: + case 13: + /* RES0 */ + return -1; + default: + /* Not reachable due to decode of SHPR register addresses */ + g_assert_not_reached(); + } +} + +static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + NVICState *s = (NVICState *)opaque; + uint32_t offset = addr; + unsigned i, startvec, end; + uint32_t val; + + if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) { + /* Generate BusFault for unprivileged accesses */ + return MEMTX_ERROR; + } + + switch (offset) { + /* reads of set and clear both return the status */ + case 0x100 ... 0x13f: /* NVIC Set enable */ + offset += 0x80; + /* fall through */ + case 0x180 ... 0x1bf: /* NVIC Clear enable */ + val = 0; + startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */ + + for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + if (s->vectors[startvec + i].enabled && + (attrs.secure || s->itns[startvec + i])) { + val |= (1 << i); + } + } + break; + case 0x200 ... 0x23f: /* NVIC Set pend */ + offset += 0x80; + /* fall through */ + case 0x280 ... 0x2bf: /* NVIC Clear pend */ + val = 0; + startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */ + for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + if (s->vectors[startvec + i].pending && + (attrs.secure || s->itns[startvec + i])) { + val |= (1 << i); + } + } + break; + case 0x300 ... 0x33f: /* NVIC Active */ + val = 0; + + if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) { + break; + } + + startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */ + + for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + if (s->vectors[startvec + i].active && + (attrs.secure || s->itns[startvec + i])) { + val |= (1 << i); + } + } + break; + case 0x400 ... 0x5ef: /* NVIC Priority */ + val = 0; + startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */ + + for (i = 0; i < size && startvec + i < s->num_irq; i++) { + if (attrs.secure || s->itns[startvec + i]) { + val |= s->vectors[startvec + i].prio << (8 * i); + } + } + break; + case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */ + if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { + val = 0; + break; + } + /* fall through */ + case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */ + val = 0; + for (i = 0; i < size; i++) { + unsigned hdlidx = (offset - 0xd14) + i; + int sbank = shpr_bank(s, hdlidx, attrs); + + if (sbank < 0) { + continue; + } + val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank)); + } + break; + case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */ + if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { + val = 0; + break; + }; + /* + * The BFSR bits [15:8] are shared between security states + * and we store them in the NS copy. They are RAZ/WI for + * NS code if AIRCR.BFHFNMINS is 0. + */ + val = s->cpu->env.v7m.cfsr[attrs.secure]; + if (!attrs.secure && + !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + val &= ~R_V7M_CFSR_BFSR_MASK; + } else { + val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK; + } + val = extract32(val, (offset - 0xd28) * 8, size * 8); + break; + case 0xfe0 ... 0xfff: /* ID. */ + if (offset & 3) { + val = 0; + } else { + val = nvic_id[(offset - 0xfe0) >> 2]; + } + break; + default: + if (size == 4) { + val = nvic_readl(s, offset, attrs); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "NVIC: Bad read of size %d at offset 0x%x\n", + size, offset); + val = 0; + } + } + + trace_nvic_sysreg_read(addr, val, size); + *data = val; + return MEMTX_OK; +} + +static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + NVICState *s = (NVICState *)opaque; + uint32_t offset = addr; + unsigned i, startvec, end; + unsigned setval = 0; + + trace_nvic_sysreg_write(addr, value, size); + + if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) { + /* Generate BusFault for unprivileged accesses */ + return MEMTX_ERROR; + } + + switch (offset) { + case 0x100 ... 0x13f: /* NVIC Set enable */ + offset += 0x80; + setval = 1; + /* fall through */ + case 0x180 ... 0x1bf: /* NVIC Clear enable */ + startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; + + for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + if (value & (1 << i) && + (attrs.secure || s->itns[startvec + i])) { + s->vectors[startvec + i].enabled = setval; + } + } + nvic_irq_update(s); + goto exit_ok; + case 0x200 ... 0x23f: /* NVIC Set pend */ + /* the special logic in armv7m_nvic_set_pending() + * is not needed since IRQs are never escalated + */ + offset += 0x80; + setval = 1; + /* fall through */ + case 0x280 ... 0x2bf: /* NVIC Clear pend */ + startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */ + + for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) { + if (value & (1 << i) && + (attrs.secure || s->itns[startvec + i])) { + s->vectors[startvec + i].pending = setval; + } + } + nvic_irq_update(s); + goto exit_ok; + case 0x300 ... 0x33f: /* NVIC Active */ + goto exit_ok; /* R/O */ + case 0x400 ... 0x5ef: /* NVIC Priority */ + startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */ + + for (i = 0; i < size && startvec + i < s->num_irq; i++) { + if (attrs.secure || s->itns[startvec + i]) { + set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff); + } + } + nvic_irq_update(s); + goto exit_ok; + case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */ + if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { + goto exit_ok; + } + /* fall through */ + case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */ + for (i = 0; i < size; i++) { + unsigned hdlidx = (offset - 0xd14) + i; + int newprio = extract32(value, i * 8, 8); + int sbank = shpr_bank(s, hdlidx, attrs); + + if (sbank < 0) { + continue; + } + set_prio(s, hdlidx, sbank, newprio); + } + nvic_irq_update(s); + goto exit_ok; + case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */ + if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) { + goto exit_ok; + } + /* All bits are W1C, so construct 32 bit value with 0s in + * the parts not written by the access size + */ + value <<= ((offset - 0xd28) * 8); + + if (!attrs.secure && + !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { + /* BFSR bits are RAZ/WI for NS if BFHFNMINS is set */ + value &= ~R_V7M_CFSR_BFSR_MASK; + } + + s->cpu->env.v7m.cfsr[attrs.secure] &= ~value; + if (attrs.secure) { + /* The BFSR bits [15:8] are shared between security states + * and we store them in the NS copy. + */ + s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK); + } + goto exit_ok; + } + if (size == 4) { + nvic_writel(s, offset, value, attrs); + goto exit_ok; + } + qemu_log_mask(LOG_GUEST_ERROR, + "NVIC: Bad write of size %d at offset 0x%x\n", size, offset); + /* This is UNPREDICTABLE; treat as RAZ/WI */ + + exit_ok: + /* Ensure any changes made are reflected in the cached hflags. */ + arm_rebuild_hflags(&s->cpu->env); + return MEMTX_OK; +} + +static const MemoryRegionOps nvic_sysreg_ops = { + .read_with_attrs = nvic_sysreg_read, + .write_with_attrs = nvic_sysreg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int nvic_post_load(void *opaque, int version_id) +{ + NVICState *s = opaque; + unsigned i; + int resetprio; + + /* Check for out of range priority settings */ + resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3; + + if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio || + s->vectors[ARMV7M_EXCP_NMI].prio != -2 || + s->vectors[ARMV7M_EXCP_HARD].prio != -1) { + return 1; + } + for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) { + if (s->vectors[i].prio & ~0xff) { + return 1; + } + } + + nvic_recompute_state(s); + + return 0; +} + +static const VMStateDescription vmstate_VecInfo = { + .name = "armv7m_nvic_info", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT16(prio, VecInfo), + VMSTATE_UINT8(enabled, VecInfo), + VMSTATE_UINT8(pending, VecInfo), + VMSTATE_UINT8(active, VecInfo), + VMSTATE_UINT8(level, VecInfo), + VMSTATE_END_OF_LIST() + } +}; + +static bool nvic_security_needed(void *opaque) +{ + NVICState *s = opaque; + + return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY); +} + +static int nvic_security_post_load(void *opaque, int version_id) +{ + NVICState *s = opaque; + int i; + + /* Check for out of range priority settings */ + if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1 + && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) { + /* We can't cross-check against AIRCR.BFHFNMINS as we don't know + * if the CPU state has been migrated yet; a mismatch won't + * cause the emulation to blow up, though. + */ + return 1; + } + for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) { + if (s->sec_vectors[i].prio & ~0xff) { + return 1; + } + } + return 0; +} + +static const VMStateDescription vmstate_nvic_security = { + .name = "armv7m_nvic/m-security", + .version_id = 1, + .minimum_version_id = 1, + .needed = nvic_security_needed, + .post_load = &nvic_security_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1, + vmstate_VecInfo, VecInfo), + VMSTATE_UINT32(prigroup[M_REG_S], NVICState), + VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_nvic = { + .name = "armv7m_nvic", + .version_id = 4, + .minimum_version_id = 4, + .post_load = &nvic_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1, + vmstate_VecInfo, VecInfo), + VMSTATE_UINT32(prigroup[M_REG_NS], NVICState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_nvic_security, + NULL + } +}; + +static Property props_nvic[] = { + /* Number of external IRQ lines (so excluding the 16 internal exceptions) */ + DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64), + DEFINE_PROP_END_OF_LIST() +}; + +static void armv7m_nvic_reset(DeviceState *dev) +{ + int resetprio; + NVICState *s = NVIC(dev); + + memset(s->vectors, 0, sizeof(s->vectors)); + memset(s->sec_vectors, 0, sizeof(s->sec_vectors)); + s->prigroup[M_REG_NS] = 0; + s->prigroup[M_REG_S] = 0; + + s->vectors[ARMV7M_EXCP_NMI].enabled = 1; + /* MEM, BUS, and USAGE are enabled through + * the System Handler Control register + */ + s->vectors[ARMV7M_EXCP_SVC].enabled = 1; + s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1; + s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1; + + /* DebugMonitor is enabled via DEMCR.MON_EN */ + s->vectors[ARMV7M_EXCP_DEBUG].enabled = 0; + + resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3; + s->vectors[ARMV7M_EXCP_RESET].prio = resetprio; + s->vectors[ARMV7M_EXCP_NMI].prio = -2; + s->vectors[ARMV7M_EXCP_HARD].prio = -1; + + if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { + s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1; + s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1; + s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1; + s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1; + + /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */ + s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1; + /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */ + s->vectors[ARMV7M_EXCP_HARD].enabled = 0; + } else { + s->vectors[ARMV7M_EXCP_HARD].enabled = 1; + } + + /* Strictly speaking the reset handler should be enabled. + * However, we don't simulate soft resets through the NVIC, + * and the reset vector should never be pended. + * So we leave it disabled to catch logic errors. + */ + + s->exception_prio = NVIC_NOEXC_PRIO; + s->vectpending = 0; + s->vectpending_is_s_banked = false; + s->vectpending_prio = NVIC_NOEXC_PRIO; + + if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) { + memset(s->itns, 0, sizeof(s->itns)); + } else { + /* This state is constant and not guest accessible in a non-security + * NVIC; we set the bits to true to avoid having to do a feature + * bit check in the NVIC enable/pend/etc register accessors. + */ + int i; + + for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) { + s->itns[i] = true; + } + } + + /* + * We updated state that affects the CPU's MMUidx and thus its hflags; + * and we can't guarantee that we run before the CPU reset function. + */ + arm_rebuild_hflags(&s->cpu->env); +} + +static void nvic_systick_trigger(void *opaque, int n, int level) +{ + NVICState *s = opaque; + + if (level) { + /* SysTick just asked us to pend its exception. + * (This is different from an external interrupt line's + * behaviour.) + * n == 0 : NonSecure systick + * n == 1 : Secure systick + */ + armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n); + } +} + +static void armv7m_nvic_realize(DeviceState *dev, Error **errp) +{ + NVICState *s = NVIC(dev); + + /* The armv7m container object will have set our CPU pointer */ + if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) { + error_setg(errp, "The NVIC can only be used with a Cortex-M CPU"); + return; + } + + if (s->num_irq > NVIC_MAX_IRQ) { + error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq); + return; + } + + qdev_init_gpio_in(dev, set_irq_level, s->num_irq); + + /* include space for internal exception vectors */ + s->num_irq += NVIC_FIRST_IRQ; + + s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2; + + /* + * This device provides a single memory region which covers the + * sysreg/NVIC registers from 0xE000E000 .. 0xE000EFFF, with the + * exception of the systick timer registers 0xE000E010 .. 0xE000E0FF. + */ + memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s, + "nvic_sysregs", 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->sysregmem); +} + +static void armv7m_nvic_instance_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + NVICState *nvic = NVIC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &nvic->excpout); + qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1); + qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", + M_REG_NUM_BANKS); + qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1); +} + +static void armv7m_nvic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_nvic; + device_class_set_props(dc, props_nvic); + dc->reset = armv7m_nvic_reset; + dc->realize = armv7m_nvic_realize; +} + +static const TypeInfo armv7m_nvic_info = { + .name = TYPE_NVIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = armv7m_nvic_instance_init, + .instance_size = sizeof(NVICState), + .class_init = armv7m_nvic_class_init, + .class_size = sizeof(SysBusDeviceClass), +}; + +static void armv7m_nvic_register_types(void) +{ + type_register_static(&armv7m_nvic_info); +} + +type_init(armv7m_nvic_register_types) diff --git a/hw/intc/aspeed_vic.c b/hw/intc/aspeed_vic.c new file mode 100644 index 000000000..5ba06c526 --- /dev/null +++ b/hw/intc/aspeed_vic.c @@ -0,0 +1,363 @@ +/* + * ASPEED Interrupt Controller (New) + * + * Andrew Jeffery + * + * Copyright 2015, 2016 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +/* The hardware exposes two register sets, a legacy set and a 'new' set. The + * model implements the 'new' register set, and logs warnings on accesses to + * the legacy IO space. + * + * The hardware uses 32bit registers to manage 51 IRQs, with low and high + * registers for each conceptual register. The device model's implementation + * uses 64bit data types to store both low and high register values (in the one + * member), but must cope with access offset values in multiples of 4 passed to + * the callbacks. As such the read() and write() implementations process the + * provided offset to understand whether the access is requesting the lower or + * upper 32 bits of the 64bit member. + * + * Additionally, the "Interrupt Enable", "Edge Status" and "Software Interrupt" + * fields have separate "enable"/"status" and "clear" registers, where set bits + * are written to one or the other to change state (avoiding a + * read-modify-write sequence). + */ + +#include "qemu/osdep.h" +#include "hw/intc/aspeed_vic.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + +#define AVIC_NEW_BASE_OFFSET 0x80 + +#define AVIC_L_MASK 0xFFFFFFFFU +#define AVIC_H_MASK 0x0007FFFFU +#define AVIC_EVENT_W_MASK (0x78000ULL << 32) + +static void aspeed_vic_update(AspeedVICState *s) +{ + uint64_t new = (s->raw & s->enable); + uint64_t flags; + + flags = new & s->select; + trace_aspeed_vic_update_fiq(!!flags); + qemu_set_irq(s->fiq, !!flags); + + flags = new & ~s->select; + trace_aspeed_vic_update_irq(!!flags); + qemu_set_irq(s->irq, !!flags); +} + +static void aspeed_vic_set_irq(void *opaque, int irq, int level) +{ + uint64_t irq_mask; + bool raise; + AspeedVICState *s = (AspeedVICState *)opaque; + + if (irq > ASPEED_VIC_NR_IRQS) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid interrupt number: %d\n", + __func__, irq); + return; + } + + trace_aspeed_vic_set_irq(irq, level); + + irq_mask = BIT(irq); + if (s->sense & irq_mask) { + /* level-triggered */ + if (s->event & irq_mask) { + /* high-sensitive */ + raise = level; + } else { + /* low-sensitive */ + raise = !level; + } + s->raw = deposit64(s->raw, irq, 1, raise); + } else { + uint64_t old_level = s->level & irq_mask; + + /* edge-triggered */ + if (s->dual_edge & irq_mask) { + raise = (!!old_level) != (!!level); + } else { + if (s->event & irq_mask) { + /* rising-sensitive */ + raise = !old_level && level; + } else { + /* falling-sensitive */ + raise = old_level && !level; + } + } + if (raise) { + s->raw = deposit64(s->raw, irq, 1, raise); + } + } + s->level = deposit64(s->level, irq, 1, level); + aspeed_vic_update(s); +} + +static uint64_t aspeed_vic_read(void *opaque, hwaddr offset, unsigned size) +{ + AspeedVICState *s = (AspeedVICState *)opaque; + hwaddr n_offset; + uint64_t val; + bool high; + + if (offset < AVIC_NEW_BASE_OFFSET) { + high = false; + n_offset = offset; + } else { + high = !!(offset & 0x4); + n_offset = (offset & ~0x4); + } + + switch (n_offset) { + case 0x80: /* IRQ Status */ + case 0x00: + val = s->raw & ~s->select & s->enable; + break; + case 0x88: /* FIQ Status */ + case 0x04: + val = s->raw & s->select & s->enable; + break; + case 0x90: /* Raw Interrupt Status */ + case 0x08: + val = s->raw; + break; + case 0x98: /* Interrupt Selection */ + case 0x0c: + val = s->select; + break; + case 0xa0: /* Interrupt Enable */ + case 0x10: + val = s->enable; + break; + case 0xb0: /* Software Interrupt */ + case 0x18: + val = s->trigger; + break; + case 0xc0: /* Interrupt Sensitivity */ + case 0x24: + val = s->sense; + break; + case 0xc8: /* Interrupt Both Edge Trigger Control */ + case 0x28: + val = s->dual_edge; + break; + case 0xd0: /* Interrupt Event */ + case 0x2c: + val = s->event; + break; + case 0xe0: /* Edge Triggered Interrupt Status */ + val = s->raw & ~s->sense; + break; + /* Illegal */ + case 0xa8: /* Interrupt Enable Clear */ + case 0xb8: /* Software Interrupt Clear */ + case 0xd8: /* Edge Triggered Interrupt Clear */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Read of write-only register with offset 0x%" + HWADDR_PRIx "\n", __func__, offset); + val = 0; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad register at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + val = 0; + break; + } + if (high) { + val = extract64(val, 32, 19); + } else { + val = extract64(val, 0, 32); + } + trace_aspeed_vic_read(offset, size, val); + return val; +} + +static void aspeed_vic_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedVICState *s = (AspeedVICState *)opaque; + hwaddr n_offset; + bool high; + + if (offset < AVIC_NEW_BASE_OFFSET) { + high = false; + n_offset = offset; + } else { + high = !!(offset & 0x4); + n_offset = (offset & ~0x4); + } + + trace_aspeed_vic_write(offset, size, data); + + /* Given we have members using separate enable/clear registers, deposit64() + * isn't quite the tool for the job. Instead, relocate the incoming bits to + * the required bit offset based on the provided access address + */ + if (high) { + data &= AVIC_H_MASK; + data <<= 32; + } else { + data &= AVIC_L_MASK; + } + + switch (n_offset) { + case 0x98: /* Interrupt Selection */ + case 0x0c: + /* Register has deposit64() semantics - overwrite requested 32 bits */ + if (high) { + s->select &= AVIC_L_MASK; + } else { + s->select &= ((uint64_t) AVIC_H_MASK) << 32; + } + s->select |= data; + break; + case 0xa0: /* Interrupt Enable */ + case 0x10: + s->enable |= data; + break; + case 0xa8: /* Interrupt Enable Clear */ + case 0x14: + s->enable &= ~data; + break; + case 0xb0: /* Software Interrupt */ + case 0x18: + qemu_log_mask(LOG_UNIMP, "%s: Software interrupts unavailable. " + "IRQs requested: 0x%016" PRIx64 "\n", __func__, data); + break; + case 0xb8: /* Software Interrupt Clear */ + case 0x1c: + qemu_log_mask(LOG_UNIMP, "%s: Software interrupts unavailable. " + "IRQs to be cleared: 0x%016" PRIx64 "\n", __func__, data); + break; + case 0xd0: /* Interrupt Event */ + /* Register has deposit64() semantics - overwrite the top four valid + * IRQ bits, as only the top four IRQs (GPIOs) can change their event + * type */ + if (high) { + s->event &= ~AVIC_EVENT_W_MASK; + s->event |= (data & AVIC_EVENT_W_MASK); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "Ignoring invalid write to interrupt event register"); + } + break; + case 0xd8: /* Edge Triggered Interrupt Clear */ + case 0x38: + s->raw &= ~(data & ~s->sense); + break; + case 0x80: /* IRQ Status */ + case 0x00: + case 0x88: /* FIQ Status */ + case 0x04: + case 0x90: /* Raw Interrupt Status */ + case 0x08: + case 0xc0: /* Interrupt Sensitivity */ + case 0x24: + case 0xc8: /* Interrupt Both Edge Trigger Control */ + case 0x28: + case 0xe0: /* Edge Triggered Interrupt Status */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write of read-only register with offset 0x%" + HWADDR_PRIx "\n", __func__, offset); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad register at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + } + aspeed_vic_update(s); +} + +static const MemoryRegionOps aspeed_vic_ops = { + .read = aspeed_vic_read, + .write = aspeed_vic_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static void aspeed_vic_reset(DeviceState *dev) +{ + AspeedVICState *s = ASPEED_VIC(dev); + + s->level = 0; + s->raw = 0; + s->select = 0; + s->enable = 0; + s->trigger = 0; + s->sense = 0x1F07FFF8FFFFULL; + s->dual_edge = 0xF800070000ULL; + s->event = 0x5F07FFF8FFFFULL; +} + +#define AVIC_IO_REGION_SIZE 0x20000 + +static void aspeed_vic_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedVICState *s = ASPEED_VIC(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_vic_ops, s, + TYPE_ASPEED_VIC, AVIC_IO_REGION_SIZE); + + sysbus_init_mmio(sbd, &s->iomem); + + qdev_init_gpio_in(dev, aspeed_vic_set_irq, ASPEED_VIC_NR_IRQS); + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->fiq); +} + +static const VMStateDescription vmstate_aspeed_vic = { + .name = "aspeed.new-vic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(level, AspeedVICState), + VMSTATE_UINT64(raw, AspeedVICState), + VMSTATE_UINT64(select, AspeedVICState), + VMSTATE_UINT64(enable, AspeedVICState), + VMSTATE_UINT64(trigger, AspeedVICState), + VMSTATE_UINT64(sense, AspeedVICState), + VMSTATE_UINT64(dual_edge, AspeedVICState), + VMSTATE_UINT64(event, AspeedVICState), + VMSTATE_END_OF_LIST() + } +}; + +static void aspeed_vic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = aspeed_vic_realize; + dc->reset = aspeed_vic_reset; + dc->desc = "ASPEED Interrupt Controller (New)"; + dc->vmsd = &vmstate_aspeed_vic; +} + +static const TypeInfo aspeed_vic_info = { + .name = TYPE_ASPEED_VIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedVICState), + .class_init = aspeed_vic_class_init, +}; + +static void aspeed_vic_register_types(void) +{ + type_register_static(&aspeed_vic_info); +} + +type_init(aspeed_vic_register_types); diff --git a/hw/intc/bcm2835_ic.c b/hw/intc/bcm2835_ic.c new file mode 100644 index 000000000..9000d995e --- /dev/null +++ b/hw/intc/bcm2835_ic.c @@ -0,0 +1,243 @@ +/* + * Raspberry Pi emulation (c) 2012 Gregory Estrade + * Refactoring for Pi2 Copyright (c) 2015, Microsoft. Written by Andrew Baumann. + * Heavily based on pl190.c, copyright terms below: + * + * Arm PrimeCell PL190 Vector Interrupt Controller + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/intc/bcm2835_ic.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + +#define GPU_IRQS 64 +#define ARM_IRQS 8 + +#define IRQ_PENDING_BASIC 0x00 /* IRQ basic pending */ +#define IRQ_PENDING_1 0x04 /* IRQ pending 1 */ +#define IRQ_PENDING_2 0x08 /* IRQ pending 2 */ +#define FIQ_CONTROL 0x0C /* FIQ register */ +#define IRQ_ENABLE_1 0x10 /* Interrupt enable register 1 */ +#define IRQ_ENABLE_2 0x14 /* Interrupt enable register 2 */ +#define IRQ_ENABLE_BASIC 0x18 /* Base interrupt enable register */ +#define IRQ_DISABLE_1 0x1C /* Interrupt disable register 1 */ +#define IRQ_DISABLE_2 0x20 /* Interrupt disable register 2 */ +#define IRQ_DISABLE_BASIC 0x24 /* Base interrupt disable register */ + +/* Update interrupts. */ +static void bcm2835_ic_update(BCM2835ICState *s) +{ + bool set = false; + + if (s->fiq_enable) { + if (s->fiq_select >= GPU_IRQS) { + /* ARM IRQ */ + set = extract32(s->arm_irq_level, s->fiq_select - GPU_IRQS, 1); + } else { + set = extract64(s->gpu_irq_level, s->fiq_select, 1); + } + } + qemu_set_irq(s->fiq, set); + + set = (s->gpu_irq_level & s->gpu_irq_enable) + || (s->arm_irq_level & s->arm_irq_enable); + qemu_set_irq(s->irq, set); +} + +static void bcm2835_ic_set_gpu_irq(void *opaque, int irq, int level) +{ + BCM2835ICState *s = opaque; + + assert(irq >= 0 && irq < 64); + trace_bcm2835_ic_set_gpu_irq(irq, level); + s->gpu_irq_level = deposit64(s->gpu_irq_level, irq, 1, level != 0); + bcm2835_ic_update(s); +} + +static void bcm2835_ic_set_arm_irq(void *opaque, int irq, int level) +{ + BCM2835ICState *s = opaque; + + assert(irq >= 0 && irq < 8); + trace_bcm2835_ic_set_cpu_irq(irq, level); + s->arm_irq_level = deposit32(s->arm_irq_level, irq, 1, level != 0); + bcm2835_ic_update(s); +} + +static const int irq_dups[] = { 7, 9, 10, 18, 19, 53, 54, 55, 56, 57, 62 }; + +static uint64_t bcm2835_ic_read(void *opaque, hwaddr offset, unsigned size) +{ + BCM2835ICState *s = opaque; + uint32_t res = 0; + uint64_t gpu_pending = s->gpu_irq_level & s->gpu_irq_enable; + int i; + + switch (offset) { + case IRQ_PENDING_BASIC: + /* bits 0-7: ARM irqs */ + res = s->arm_irq_level & s->arm_irq_enable; + + /* bits 8 & 9: pending registers 1 & 2 */ + res |= (((uint32_t)gpu_pending) != 0) << 8; + res |= ((gpu_pending >> 32) != 0) << 9; + + /* bits 10-20: selected GPU IRQs */ + for (i = 0; i < ARRAY_SIZE(irq_dups); i++) { + res |= extract64(gpu_pending, irq_dups[i], 1) << (i + 10); + } + break; + case IRQ_PENDING_1: + res = gpu_pending; + break; + case IRQ_PENDING_2: + res = gpu_pending >> 32; + break; + case FIQ_CONTROL: + res = (s->fiq_enable << 7) | s->fiq_select; + break; + case IRQ_ENABLE_1: + res = s->gpu_irq_enable; + break; + case IRQ_ENABLE_2: + res = s->gpu_irq_enable >> 32; + break; + case IRQ_ENABLE_BASIC: + res = s->arm_irq_enable; + break; + case IRQ_DISABLE_1: + res = ~s->gpu_irq_enable; + break; + case IRQ_DISABLE_2: + res = ~s->gpu_irq_enable >> 32; + break; + case IRQ_DISABLE_BASIC: + res = ~s->arm_irq_enable; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + return 0; + } + + return res; +} + +static void bcm2835_ic_write(void *opaque, hwaddr offset, uint64_t val, + unsigned size) +{ + BCM2835ICState *s = opaque; + + switch (offset) { + case FIQ_CONTROL: + s->fiq_select = extract32(val, 0, 7); + s->fiq_enable = extract32(val, 7, 1); + break; + case IRQ_ENABLE_1: + s->gpu_irq_enable |= val; + break; + case IRQ_ENABLE_2: + s->gpu_irq_enable |= val << 32; + break; + case IRQ_ENABLE_BASIC: + s->arm_irq_enable |= val & 0xff; + break; + case IRQ_DISABLE_1: + s->gpu_irq_enable &= ~val; + break; + case IRQ_DISABLE_2: + s->gpu_irq_enable &= ~(val << 32); + break; + case IRQ_DISABLE_BASIC: + s->arm_irq_enable &= ~val & 0xff; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + return; + } + bcm2835_ic_update(s); +} + +static const MemoryRegionOps bcm2835_ic_ops = { + .read = bcm2835_ic_read, + .write = bcm2835_ic_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void bcm2835_ic_reset(DeviceState *d) +{ + BCM2835ICState *s = BCM2835_IC(d); + + s->gpu_irq_enable = 0; + s->arm_irq_enable = 0; + s->fiq_enable = false; + s->fiq_select = 0; +} + +static void bcm2835_ic_init(Object *obj) +{ + BCM2835ICState *s = BCM2835_IC(obj); + + memory_region_init_io(&s->iomem, obj, &bcm2835_ic_ops, s, TYPE_BCM2835_IC, + 0x200); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); + + qdev_init_gpio_in_named(DEVICE(s), bcm2835_ic_set_gpu_irq, + BCM2835_IC_GPU_IRQ, GPU_IRQS); + qdev_init_gpio_in_named(DEVICE(s), bcm2835_ic_set_arm_irq, + BCM2835_IC_ARM_IRQ, ARM_IRQS); + + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->fiq); +} + +static const VMStateDescription vmstate_bcm2835_ic = { + .name = TYPE_BCM2835_IC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(gpu_irq_level, BCM2835ICState), + VMSTATE_UINT64(gpu_irq_enable, BCM2835ICState), + VMSTATE_UINT8(arm_irq_level, BCM2835ICState), + VMSTATE_UINT8(arm_irq_enable, BCM2835ICState), + VMSTATE_BOOL(fiq_enable, BCM2835ICState), + VMSTATE_UINT8(fiq_select, BCM2835ICState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_ic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bcm2835_ic_reset; + dc->vmsd = &vmstate_bcm2835_ic; +} + +static TypeInfo bcm2835_ic_info = { + .name = TYPE_BCM2835_IC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835ICState), + .class_init = bcm2835_ic_class_init, + .instance_init = bcm2835_ic_init, +}; + +static void bcm2835_ic_register_types(void) +{ + type_register_static(&bcm2835_ic_info); +} + +type_init(bcm2835_ic_register_types) diff --git a/hw/intc/bcm2836_control.c b/hw/intc/bcm2836_control.c new file mode 100644 index 000000000..2ead76ffd --- /dev/null +++ b/hw/intc/bcm2836_control.c @@ -0,0 +1,408 @@ +/* + * Rasperry Pi 2 emulation ARM control logic module. + * Copyright (c) 2015, Microsoft + * Written by Andrew Baumann + * + * Based on bcm2835_ic.c (Raspberry Pi emulation) (c) 2012 Gregory Estrade + * + * At present, only implements interrupt routing, and mailboxes (i.e., + * not PMU interrupt, or AXI counters). + * + * ARM Local Timer IRQ Copyright (c) 2019. Zoltán Baldaszti + * + * Ref: + * https://www.raspberrypi.org/documentation/hardware/raspberrypi/bcm2836/QA7_rev3.4.pdf + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/intc/bcm2836_control.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define REG_GPU_ROUTE 0x0c +#define REG_LOCALTIMERROUTING 0x24 +#define REG_LOCALTIMERCONTROL 0x34 +#define REG_LOCALTIMERACK 0x38 +#define REG_TIMERCONTROL 0x40 +#define REG_MBOXCONTROL 0x50 +#define REG_IRQSRC 0x60 +#define REG_FIQSRC 0x70 +#define REG_MBOX0_WR 0x80 +#define REG_MBOX0_RDCLR 0xc0 +#define REG_LIMIT 0x100 + +#define IRQ_BIT(cntrl, num) (((cntrl) & (1 << (num))) != 0) +#define FIQ_BIT(cntrl, num) (((cntrl) & (1 << ((num) + 4))) != 0) + +#define IRQ_CNTPSIRQ 0 +#define IRQ_CNTPNSIRQ 1 +#define IRQ_CNTHPIRQ 2 +#define IRQ_CNTVIRQ 3 +#define IRQ_MAILBOX0 4 +#define IRQ_MAILBOX1 5 +#define IRQ_MAILBOX2 6 +#define IRQ_MAILBOX3 7 +#define IRQ_GPU 8 +#define IRQ_PMU 9 +#define IRQ_AXI 10 +#define IRQ_TIMER 11 +#define IRQ_MAX IRQ_TIMER + +#define LOCALTIMER_FREQ 38400000 +#define LOCALTIMER_INTFLAG (1 << 31) +#define LOCALTIMER_RELOAD (1 << 30) +#define LOCALTIMER_INTENABLE (1 << 29) +#define LOCALTIMER_ENABLE (1 << 28) +#define LOCALTIMER_VALUE(x) ((x) & 0xfffffff) + +static void deliver_local(BCM2836ControlState *s, uint8_t core, uint8_t irq, + uint32_t controlreg, uint8_t controlidx) +{ + if (FIQ_BIT(controlreg, controlidx)) { + /* deliver a FIQ */ + s->fiqsrc[core] |= (uint32_t)1 << irq; + } else if (IRQ_BIT(controlreg, controlidx)) { + /* deliver an IRQ */ + s->irqsrc[core] |= (uint32_t)1 << irq; + } else { + /* the interrupt is masked */ + } +} + +/* Update interrupts. */ +static void bcm2836_control_update(BCM2836ControlState *s) +{ + int i, j; + + /* reset pending IRQs/FIQs */ + for (i = 0; i < BCM2836_NCORES; i++) { + s->irqsrc[i] = s->fiqsrc[i] = 0; + } + + /* apply routing logic, update status regs */ + if (s->gpu_irq) { + assert(s->route_gpu_irq < BCM2836_NCORES); + s->irqsrc[s->route_gpu_irq] |= (uint32_t)1 << IRQ_GPU; + } + + if (s->gpu_fiq) { + assert(s->route_gpu_fiq < BCM2836_NCORES); + s->fiqsrc[s->route_gpu_fiq] |= (uint32_t)1 << IRQ_GPU; + } + + /* + * handle the control module 'local timer' interrupt for one of the + * cores' IRQ/FIQ; this is distinct from the per-CPU timer + * interrupts handled below. + */ + if ((s->local_timer_control & LOCALTIMER_INTENABLE) && + (s->local_timer_control & LOCALTIMER_INTFLAG)) { + if (s->route_localtimer & 4) { + s->fiqsrc[(s->route_localtimer & 3)] |= (uint32_t)1 << IRQ_TIMER; + } else { + s->irqsrc[(s->route_localtimer & 3)] |= (uint32_t)1 << IRQ_TIMER; + } + } + + for (i = 0; i < BCM2836_NCORES; i++) { + /* handle local timer interrupts for this core */ + if (s->timerirqs[i]) { + assert(s->timerirqs[i] < (1 << (IRQ_CNTVIRQ + 1))); /* sane mask? */ + for (j = 0; j <= IRQ_CNTVIRQ; j++) { + if ((s->timerirqs[i] & (1 << j)) != 0) { + /* local interrupt j is set */ + deliver_local(s, i, j, s->timercontrol[i], j); + } + } + } + + /* handle mailboxes for this core */ + for (j = 0; j < BCM2836_MBPERCORE; j++) { + if (s->mailboxes[i * BCM2836_MBPERCORE + j] != 0) { + /* mailbox j is set */ + deliver_local(s, i, j + IRQ_MAILBOX0, s->mailboxcontrol[i], j); + } + } + } + + /* call set_irq appropriately for each output */ + for (i = 0; i < BCM2836_NCORES; i++) { + qemu_set_irq(s->irq[i], s->irqsrc[i] != 0); + qemu_set_irq(s->fiq[i], s->fiqsrc[i] != 0); + } +} + +static void bcm2836_control_set_local_irq(void *opaque, int core, int local_irq, + int level) +{ + BCM2836ControlState *s = opaque; + + assert(core >= 0 && core < BCM2836_NCORES); + assert(local_irq >= 0 && local_irq <= IRQ_CNTVIRQ); + + s->timerirqs[core] = deposit32(s->timerirqs[core], local_irq, 1, !!level); + + bcm2836_control_update(s); +} + +/* XXX: the following wrapper functions are a kludgy workaround, + * needed because I can't seem to pass useful information in the "irq" + * parameter when using named interrupts. Feel free to clean this up! + */ + +static void bcm2836_control_set_local_irq0(void *opaque, int core, int level) +{ + bcm2836_control_set_local_irq(opaque, core, IRQ_CNTPSIRQ, level); +} + +static void bcm2836_control_set_local_irq1(void *opaque, int core, int level) +{ + bcm2836_control_set_local_irq(opaque, core, IRQ_CNTPNSIRQ, level); +} + +static void bcm2836_control_set_local_irq2(void *opaque, int core, int level) +{ + bcm2836_control_set_local_irq(opaque, core, IRQ_CNTHPIRQ, level); +} + +static void bcm2836_control_set_local_irq3(void *opaque, int core, int level) +{ + bcm2836_control_set_local_irq(opaque, core, IRQ_CNTVIRQ, level); +} + +static void bcm2836_control_set_gpu_irq(void *opaque, int irq, int level) +{ + BCM2836ControlState *s = opaque; + + s->gpu_irq = level; + + bcm2836_control_update(s); +} + +static void bcm2836_control_set_gpu_fiq(void *opaque, int irq, int level) +{ + BCM2836ControlState *s = opaque; + + s->gpu_fiq = level; + + bcm2836_control_update(s); +} + +static void bcm2836_control_local_timer_set_next(void *opaque) +{ + BCM2836ControlState *s = opaque; + uint64_t next_event; + + assert(LOCALTIMER_VALUE(s->local_timer_control) > 0); + + next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + muldiv64(LOCALTIMER_VALUE(s->local_timer_control), + NANOSECONDS_PER_SECOND, LOCALTIMER_FREQ); + timer_mod(&s->timer, next_event); +} + +static void bcm2836_control_local_timer_tick(void *opaque) +{ + BCM2836ControlState *s = opaque; + + bcm2836_control_local_timer_set_next(s); + + s->local_timer_control |= LOCALTIMER_INTFLAG; + bcm2836_control_update(s); +} + +static void bcm2836_control_local_timer_control(void *opaque, uint32_t val) +{ + BCM2836ControlState *s = opaque; + + s->local_timer_control = val; + if (val & LOCALTIMER_ENABLE) { + bcm2836_control_local_timer_set_next(s); + } else { + timer_del(&s->timer); + } +} + +static void bcm2836_control_local_timer_ack(void *opaque, uint32_t val) +{ + BCM2836ControlState *s = opaque; + + if (val & LOCALTIMER_INTFLAG) { + s->local_timer_control &= ~LOCALTIMER_INTFLAG; + } + if ((val & LOCALTIMER_RELOAD) && + (s->local_timer_control & LOCALTIMER_ENABLE)) { + bcm2836_control_local_timer_set_next(s); + } +} + +static uint64_t bcm2836_control_read(void *opaque, hwaddr offset, unsigned size) +{ + BCM2836ControlState *s = opaque; + + if (offset == REG_GPU_ROUTE) { + assert(s->route_gpu_fiq < BCM2836_NCORES + && s->route_gpu_irq < BCM2836_NCORES); + return ((uint32_t)s->route_gpu_fiq << 2) | s->route_gpu_irq; + } else if (offset == REG_LOCALTIMERROUTING) { + return s->route_localtimer; + } else if (offset == REG_LOCALTIMERCONTROL) { + return s->local_timer_control; + } else if (offset == REG_LOCALTIMERACK) { + return 0; + } else if (offset >= REG_TIMERCONTROL && offset < REG_MBOXCONTROL) { + return s->timercontrol[(offset - REG_TIMERCONTROL) >> 2]; + } else if (offset >= REG_MBOXCONTROL && offset < REG_IRQSRC) { + return s->mailboxcontrol[(offset - REG_MBOXCONTROL) >> 2]; + } else if (offset >= REG_IRQSRC && offset < REG_FIQSRC) { + return s->irqsrc[(offset - REG_IRQSRC) >> 2]; + } else if (offset >= REG_FIQSRC && offset < REG_MBOX0_WR) { + return s->fiqsrc[(offset - REG_FIQSRC) >> 2]; + } else if (offset >= REG_MBOX0_RDCLR && offset < REG_LIMIT) { + return s->mailboxes[(offset - REG_MBOX0_RDCLR) >> 2]; + } else { + qemu_log_mask(LOG_UNIMP, "%s: Unsupported offset 0x%"HWADDR_PRIx"\n", + __func__, offset); + return 0; + } +} + +static void bcm2836_control_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + BCM2836ControlState *s = opaque; + + if (offset == REG_GPU_ROUTE) { + s->route_gpu_irq = val & 0x3; + s->route_gpu_fiq = (val >> 2) & 0x3; + } else if (offset == REG_LOCALTIMERROUTING) { + s->route_localtimer = val & 7; + } else if (offset == REG_LOCALTIMERCONTROL) { + bcm2836_control_local_timer_control(s, val); + } else if (offset == REG_LOCALTIMERACK) { + bcm2836_control_local_timer_ack(s, val); + } else if (offset >= REG_TIMERCONTROL && offset < REG_MBOXCONTROL) { + s->timercontrol[(offset - REG_TIMERCONTROL) >> 2] = val & 0xff; + } else if (offset >= REG_MBOXCONTROL && offset < REG_IRQSRC) { + s->mailboxcontrol[(offset - REG_MBOXCONTROL) >> 2] = val & 0xff; + } else if (offset >= REG_MBOX0_WR && offset < REG_MBOX0_RDCLR) { + s->mailboxes[(offset - REG_MBOX0_WR) >> 2] |= val; + } else if (offset >= REG_MBOX0_RDCLR && offset < REG_LIMIT) { + s->mailboxes[(offset - REG_MBOX0_RDCLR) >> 2] &= ~val; + } else { + qemu_log_mask(LOG_UNIMP, "%s: Unsupported offset 0x%"HWADDR_PRIx + " value 0x%"PRIx64"\n", + __func__, offset, val); + return; + } + + bcm2836_control_update(s); +} + +static const MemoryRegionOps bcm2836_control_ops = { + .read = bcm2836_control_read, + .write = bcm2836_control_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void bcm2836_control_reset(DeviceState *d) +{ + BCM2836ControlState *s = BCM2836_CONTROL(d); + int i; + + s->route_gpu_irq = s->route_gpu_fiq = 0; + + timer_del(&s->timer); + s->route_localtimer = 0; + s->local_timer_control = 0; + + for (i = 0; i < BCM2836_NCORES; i++) { + s->timercontrol[i] = 0; + s->mailboxcontrol[i] = 0; + } + + for (i = 0; i < BCM2836_NCORES * BCM2836_MBPERCORE; i++) { + s->mailboxes[i] = 0; + } +} + +static void bcm2836_control_init(Object *obj) +{ + BCM2836ControlState *s = BCM2836_CONTROL(obj); + DeviceState *dev = DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &bcm2836_control_ops, s, + TYPE_BCM2836_CONTROL, REG_LIMIT); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); + + /* inputs from each CPU core */ + qdev_init_gpio_in_named(dev, bcm2836_control_set_local_irq0, "cntpsirq", + BCM2836_NCORES); + qdev_init_gpio_in_named(dev, bcm2836_control_set_local_irq1, "cntpnsirq", + BCM2836_NCORES); + qdev_init_gpio_in_named(dev, bcm2836_control_set_local_irq2, "cnthpirq", + BCM2836_NCORES); + qdev_init_gpio_in_named(dev, bcm2836_control_set_local_irq3, "cntvirq", + BCM2836_NCORES); + + /* IRQ and FIQ inputs from upstream bcm2835 controller */ + qdev_init_gpio_in_named(dev, bcm2836_control_set_gpu_irq, "gpu-irq", 1); + qdev_init_gpio_in_named(dev, bcm2836_control_set_gpu_fiq, "gpu-fiq", 1); + + /* outputs to CPU cores */ + qdev_init_gpio_out_named(dev, s->irq, "irq", BCM2836_NCORES); + qdev_init_gpio_out_named(dev, s->fiq, "fiq", BCM2836_NCORES); + + /* create a qemu virtual timer */ + timer_init_ns(&s->timer, QEMU_CLOCK_VIRTUAL, + bcm2836_control_local_timer_tick, s); +} + +static const VMStateDescription vmstate_bcm2836_control = { + .name = TYPE_BCM2836_CONTROL, + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(mailboxes, BCM2836ControlState, + BCM2836_NCORES * BCM2836_MBPERCORE), + VMSTATE_UINT8(route_gpu_irq, BCM2836ControlState), + VMSTATE_UINT8(route_gpu_fiq, BCM2836ControlState), + VMSTATE_UINT32_ARRAY(timercontrol, BCM2836ControlState, BCM2836_NCORES), + VMSTATE_UINT32_ARRAY(mailboxcontrol, BCM2836ControlState, + BCM2836_NCORES), + VMSTATE_TIMER_V(timer, BCM2836ControlState, 2), + VMSTATE_UINT32_V(local_timer_control, BCM2836ControlState, 2), + VMSTATE_UINT8_V(route_localtimer, BCM2836ControlState, 2), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2836_control_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bcm2836_control_reset; + dc->vmsd = &vmstate_bcm2836_control; +} + +static TypeInfo bcm2836_control_info = { + .name = TYPE_BCM2836_CONTROL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2836ControlState), + .class_init = bcm2836_control_class_init, + .instance_init = bcm2836_control_init, +}; + +static void bcm2836_control_register_types(void) +{ + type_register_static(&bcm2836_control_info); +} + +type_init(bcm2836_control_register_types) diff --git a/hw/intc/etraxfs_pic.c b/hw/intc/etraxfs_pic.c new file mode 100644 index 000000000..bd37d1cca --- /dev/null +++ b/hw/intc/etraxfs_pic.c @@ -0,0 +1,172 @@ +/* + * QEMU ETRAX Interrupt Controller. + * + * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "qemu/module.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +#define D(x) + +#define R_RW_MASK 0 +#define R_R_VECT 1 +#define R_R_MASKED_VECT 2 +#define R_R_NMI 3 +#define R_R_GURU 4 +#define R_MAX 5 + +#define TYPE_ETRAX_FS_PIC "etraxfs-pic" +DECLARE_INSTANCE_CHECKER(struct etrax_pic, ETRAX_FS_PIC, + TYPE_ETRAX_FS_PIC) + +struct etrax_pic +{ + SysBusDevice parent_obj; + + MemoryRegion mmio; + qemu_irq parent_irq; + qemu_irq parent_nmi; + uint32_t regs[R_MAX]; +}; + +static void pic_update(struct etrax_pic *fs) +{ + uint32_t vector = 0; + int i; + + fs->regs[R_R_MASKED_VECT] = fs->regs[R_R_VECT] & fs->regs[R_RW_MASK]; + + /* The ETRAX interrupt controller signals interrupts to the core + through an interrupt request wire and an irq vector bus. If + multiple interrupts are simultaneously active it chooses vector + 0x30 and lets the sw choose the priorities. */ + if (fs->regs[R_R_MASKED_VECT]) { + uint32_t mv = fs->regs[R_R_MASKED_VECT]; + for (i = 0; i < 31; i++) { + if (mv & 1) { + vector = 0x31 + i; + /* Check for multiple interrupts. */ + if (mv > 1) + vector = 0x30; + break; + } + mv >>= 1; + } + } + + qemu_set_irq(fs->parent_irq, vector); +} + +static uint64_t +pic_read(void *opaque, hwaddr addr, unsigned int size) +{ + struct etrax_pic *fs = opaque; + uint32_t rval; + + rval = fs->regs[addr >> 2]; + D(printf("%s %x=%x\n", __func__, addr, rval)); + return rval; +} + +static void pic_write(void *opaque, hwaddr addr, + uint64_t value, unsigned int size) +{ + struct etrax_pic *fs = opaque; + D(printf("%s addr=%x val=%x\n", __func__, addr, value)); + + if (addr == R_RW_MASK) { + fs->regs[R_RW_MASK] = value; + pic_update(fs); + } +} + +static const MemoryRegionOps pic_ops = { + .read = pic_read, + .write = pic_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void nmi_handler(void *opaque, int irq, int level) +{ + struct etrax_pic *fs = (void *)opaque; + uint32_t mask; + + mask = 1 << irq; + if (level) + fs->regs[R_R_NMI] |= mask; + else + fs->regs[R_R_NMI] &= ~mask; + + qemu_set_irq(fs->parent_nmi, !!fs->regs[R_R_NMI]); +} + +static void irq_handler(void *opaque, int irq, int level) +{ + struct etrax_pic *fs = (void *)opaque; + + if (irq >= 30) { + nmi_handler(opaque, irq, level); + return; + } + + irq -= 1; + fs->regs[R_R_VECT] &= ~(1 << irq); + fs->regs[R_R_VECT] |= (!!level << irq); + pic_update(fs); +} + +static void etraxfs_pic_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + struct etrax_pic *s = ETRAX_FS_PIC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + qdev_init_gpio_in(dev, irq_handler, 32); + sysbus_init_irq(sbd, &s->parent_irq); + sysbus_init_irq(sbd, &s->parent_nmi); + + memory_region_init_io(&s->mmio, obj, &pic_ops, s, + "etraxfs-pic", R_MAX * 4); + sysbus_init_mmio(sbd, &s->mmio); +} + +static const TypeInfo etraxfs_pic_info = { + .name = TYPE_ETRAX_FS_PIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(struct etrax_pic), + .instance_init = etraxfs_pic_init, +}; + +static void etraxfs_pic_register_types(void) +{ + type_register_static(&etraxfs_pic_info); +} + +type_init(etraxfs_pic_register_types) diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c new file mode 100644 index 000000000..4534ee248 --- /dev/null +++ b/hw/intc/exynos4210_combiner.c @@ -0,0 +1,461 @@ +/* + * Samsung exynos4210 Interrupt Combiner + * + * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. + * All rights reserved. + * + * Evgeny Voevodin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +/* + * Exynos4210 Combiner represents an OR gate for SOC's IRQ lines. It combines + * IRQ sources into groups and provides signal output to GIC from each group. It + * is driven by common mask and enable/disable logic. Take a note that not all + * IRQs are passed to GIC through Combiner. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +#include "hw/arm/exynos4210.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +//#define DEBUG_COMBINER + +#ifdef DEBUG_COMBINER +#define DPRINTF(fmt, ...) \ + do { fprintf(stdout, "COMBINER: [%s:%d] " fmt, __func__ , __LINE__, \ + ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#define IIC_NGRP 64 /* Internal Interrupt Combiner + Groups number */ +#define IIC_NIRQ (IIC_NGRP * 8)/* Internal Interrupt Combiner + Interrupts number */ +#define IIC_REGION_SIZE 0x108 /* Size of memory mapped region */ +#define IIC_REGSET_SIZE 0x41 + +/* + * State for each output signal of internal combiner + */ +typedef struct CombinerGroupState { + uint8_t src_mask; /* 1 - source enabled, 0 - disabled */ + uint8_t src_pending; /* Pending source interrupts before masking */ +} CombinerGroupState; + +#define TYPE_EXYNOS4210_COMBINER "exynos4210.combiner" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210CombinerState, EXYNOS4210_COMBINER) + +struct Exynos4210CombinerState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + struct CombinerGroupState group[IIC_NGRP]; + uint32_t reg_set[IIC_REGSET_SIZE]; + uint32_t icipsr[2]; + uint32_t external; /* 1 means that this combiner is external */ + + qemu_irq output_irq[IIC_NGRP]; +}; + +static const VMStateDescription vmstate_exynos4210_combiner_group_state = { + .name = "exynos4210.combiner.groupstate", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(src_mask, CombinerGroupState), + VMSTATE_UINT8(src_pending, CombinerGroupState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_exynos4210_combiner = { + .name = "exynos4210.combiner", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(group, Exynos4210CombinerState, IIC_NGRP, 0, + vmstate_exynos4210_combiner_group_state, CombinerGroupState), + VMSTATE_UINT32_ARRAY(reg_set, Exynos4210CombinerState, + IIC_REGSET_SIZE), + VMSTATE_UINT32_ARRAY(icipsr, Exynos4210CombinerState, 2), + VMSTATE_UINT32(external, Exynos4210CombinerState), + VMSTATE_END_OF_LIST() + } +}; + +/* + * Get Combiner input GPIO into irqs structure + */ +void exynos4210_combiner_get_gpioin(Exynos4210Irq *irqs, DeviceState *dev, + int ext) +{ + int n; + int bit; + int max; + qemu_irq *irq; + + max = ext ? EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ : + EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; + irq = ext ? irqs->ext_combiner_irq : irqs->int_combiner_irq; + + /* + * Some IRQs of Int/External Combiner are going to two Combiners groups, + * so let split them. + */ + for (n = 0; n < max; n++) { + + bit = EXYNOS4210_COMBINER_GET_BIT_NUM(n); + + switch (n) { + /* MDNIE_LCD1 INTG1 */ + case EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 0) ... + EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 3): + irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), + irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(0, bit + 4)]); + continue; + + /* TMU INTG3 */ + case EXYNOS4210_COMBINER_GET_IRQ_NUM(3, 4): + irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), + irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(2, bit)]); + continue; + + /* LCD1 INTG12 */ + case EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 0) ... + EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 3): + irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), + irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(11, bit + 4)]); + continue; + + /* Multi-Core Timer INTG12 */ + case EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 4) ... + EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 8): + irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), + irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); + continue; + + /* Multi-Core Timer INTG35 */ + case EXYNOS4210_COMBINER_GET_IRQ_NUM(35, 4) ... + EXYNOS4210_COMBINER_GET_IRQ_NUM(35, 8): + irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), + irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); + continue; + + /* Multi-Core Timer INTG51 */ + case EXYNOS4210_COMBINER_GET_IRQ_NUM(51, 4) ... + EXYNOS4210_COMBINER_GET_IRQ_NUM(51, 8): + irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), + irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); + continue; + + /* Multi-Core Timer INTG53 */ + case EXYNOS4210_COMBINER_GET_IRQ_NUM(53, 4) ... + EXYNOS4210_COMBINER_GET_IRQ_NUM(53, 8): + irq[n] = qemu_irq_split(qdev_get_gpio_in(dev, n), + irq[EXYNOS4210_COMBINER_GET_IRQ_NUM(1, bit + 4)]); + continue; + } + + irq[n] = qdev_get_gpio_in(dev, n); + } +} + +static uint64_t +exynos4210_combiner_read(void *opaque, hwaddr offset, unsigned size) +{ + struct Exynos4210CombinerState *s = + (struct Exynos4210CombinerState *)opaque; + uint32_t req_quad_base_n; /* Base of registers quad. Multiply it by 4 and + get a start of corresponding group quad */ + uint32_t grp_quad_base_n; /* Base of group quad */ + uint32_t reg_n; /* Register number inside the quad */ + uint32_t val; + + req_quad_base_n = offset >> 4; + grp_quad_base_n = req_quad_base_n << 2; + reg_n = (offset - (req_quad_base_n << 4)) >> 2; + + if (req_quad_base_n >= IIC_NGRP) { + /* Read of ICIPSR register */ + return s->icipsr[reg_n]; + } + + val = 0; + + switch (reg_n) { + /* IISTR */ + case 2: + val |= s->group[grp_quad_base_n].src_pending; + val |= s->group[grp_quad_base_n + 1].src_pending << 8; + val |= s->group[grp_quad_base_n + 2].src_pending << 16; + val |= s->group[grp_quad_base_n + 3].src_pending << 24; + break; + /* IIMSR */ + case 3: + val |= s->group[grp_quad_base_n].src_mask & + s->group[grp_quad_base_n].src_pending; + val |= (s->group[grp_quad_base_n + 1].src_mask & + s->group[grp_quad_base_n + 1].src_pending) << 8; + val |= (s->group[grp_quad_base_n + 2].src_mask & + s->group[grp_quad_base_n + 2].src_pending) << 16; + val |= (s->group[grp_quad_base_n + 3].src_mask & + s->group[grp_quad_base_n + 3].src_pending) << 24; + break; + default: + if (offset >> 2 >= IIC_REGSET_SIZE) { + hw_error("exynos4210.combiner: overflow of reg_set by 0x" + TARGET_FMT_plx "offset\n", offset); + } + val = s->reg_set[offset >> 2]; + } + return val; +} + +static void exynos4210_combiner_update(void *opaque, uint8_t group_n) +{ + struct Exynos4210CombinerState *s = + (struct Exynos4210CombinerState *)opaque; + + /* Send interrupt if needed */ + if (s->group[group_n].src_mask & s->group[group_n].src_pending) { +#ifdef DEBUG_COMBINER + if (group_n != 26) { + /* skip uart */ + DPRINTF("%s raise IRQ[%d]\n", s->external ? "EXT" : "INT", group_n); + } +#endif + + /* Set Combiner interrupt pending status after masking */ + if (group_n >= 32) { + s->icipsr[1] |= 1 << (group_n - 32); + } else { + s->icipsr[0] |= 1 << group_n; + } + + qemu_irq_raise(s->output_irq[group_n]); + } else { +#ifdef DEBUG_COMBINER + if (group_n != 26) { + /* skip uart */ + DPRINTF("%s lower IRQ[%d]\n", s->external ? "EXT" : "INT", group_n); + } +#endif + + /* Set Combiner interrupt pending status after masking */ + if (group_n >= 32) { + s->icipsr[1] &= ~(1 << (group_n - 32)); + } else { + s->icipsr[0] &= ~(1 << group_n); + } + + qemu_irq_lower(s->output_irq[group_n]); + } +} + +static void exynos4210_combiner_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + struct Exynos4210CombinerState *s = + (struct Exynos4210CombinerState *)opaque; + uint32_t req_quad_base_n; /* Base of registers quad. Multiply it by 4 and + get a start of corresponding group quad */ + uint32_t grp_quad_base_n; /* Base of group quad */ + uint32_t reg_n; /* Register number inside the quad */ + + req_quad_base_n = offset >> 4; + grp_quad_base_n = req_quad_base_n << 2; + reg_n = (offset - (req_quad_base_n << 4)) >> 2; + + if (req_quad_base_n >= IIC_NGRP) { + hw_error("exynos4210.combiner: unallowed write access at offset 0x" + TARGET_FMT_plx "\n", offset); + return; + } + + if (reg_n > 1) { + hw_error("exynos4210.combiner: unallowed write access at offset 0x" + TARGET_FMT_plx "\n", offset); + return; + } + + if (offset >> 2 >= IIC_REGSET_SIZE) { + hw_error("exynos4210.combiner: overflow of reg_set by 0x" + TARGET_FMT_plx "offset\n", offset); + } + s->reg_set[offset >> 2] = val; + + switch (reg_n) { + /* IIESR */ + case 0: + /* FIXME: what if irq is pending, allowed by mask, and we allow it + * again. Interrupt will rise again! */ + + DPRINTF("%s enable IRQ for groups %d, %d, %d, %d\n", + s->external ? "EXT" : "INT", + grp_quad_base_n, + grp_quad_base_n + 1, + grp_quad_base_n + 2, + grp_quad_base_n + 3); + + /* Enable interrupt sources */ + s->group[grp_quad_base_n].src_mask |= val & 0xFF; + s->group[grp_quad_base_n + 1].src_mask |= (val & 0xFF00) >> 8; + s->group[grp_quad_base_n + 2].src_mask |= (val & 0xFF0000) >> 16; + s->group[grp_quad_base_n + 3].src_mask |= (val & 0xFF000000) >> 24; + + exynos4210_combiner_update(s, grp_quad_base_n); + exynos4210_combiner_update(s, grp_quad_base_n + 1); + exynos4210_combiner_update(s, grp_quad_base_n + 2); + exynos4210_combiner_update(s, grp_quad_base_n + 3); + break; + /* IIECR */ + case 1: + DPRINTF("%s disable IRQ for groups %d, %d, %d, %d\n", + s->external ? "EXT" : "INT", + grp_quad_base_n, + grp_quad_base_n + 1, + grp_quad_base_n + 2, + grp_quad_base_n + 3); + + /* Disable interrupt sources */ + s->group[grp_quad_base_n].src_mask &= ~(val & 0xFF); + s->group[grp_quad_base_n + 1].src_mask &= ~((val & 0xFF00) >> 8); + s->group[grp_quad_base_n + 2].src_mask &= ~((val & 0xFF0000) >> 16); + s->group[grp_quad_base_n + 3].src_mask &= ~((val & 0xFF000000) >> 24); + + exynos4210_combiner_update(s, grp_quad_base_n); + exynos4210_combiner_update(s, grp_quad_base_n + 1); + exynos4210_combiner_update(s, grp_quad_base_n + 2); + exynos4210_combiner_update(s, grp_quad_base_n + 3); + break; + default: + hw_error("exynos4210.combiner: unallowed write access at offset 0x" + TARGET_FMT_plx "\n", offset); + break; + } +} + +/* Get combiner group and bit from irq number */ +static uint8_t get_combiner_group_and_bit(int irq, uint8_t *bit) +{ + *bit = irq - ((irq >> 3) << 3); + return irq >> 3; +} + +/* Process a change in an external IRQ input. */ +static void exynos4210_combiner_handler(void *opaque, int irq, int level) +{ + struct Exynos4210CombinerState *s = + (struct Exynos4210CombinerState *)opaque; + uint8_t bit_n, group_n; + + group_n = get_combiner_group_and_bit(irq, &bit_n); + + if (s->external && group_n >= EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ) { + DPRINTF("%s unallowed IRQ group 0x%x\n", s->external ? "EXT" : "INT" + , group_n); + return; + } + + if (level) { + s->group[group_n].src_pending |= 1 << bit_n; + } else { + s->group[group_n].src_pending &= ~(1 << bit_n); + } + + exynos4210_combiner_update(s, group_n); +} + +static void exynos4210_combiner_reset(DeviceState *d) +{ + struct Exynos4210CombinerState *s = (struct Exynos4210CombinerState *)d; + + memset(&s->group, 0, sizeof(s->group)); + memset(&s->reg_set, 0, sizeof(s->reg_set)); + + s->reg_set[0xC0 >> 2] = 0x01010101; + s->reg_set[0xC4 >> 2] = 0x01010101; + s->reg_set[0xD0 >> 2] = 0x01010101; + s->reg_set[0xD4 >> 2] = 0x01010101; +} + +static const MemoryRegionOps exynos4210_combiner_ops = { + .read = exynos4210_combiner_read, + .write = exynos4210_combiner_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* + * Internal Combiner initialization. + */ +static void exynos4210_combiner_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + Exynos4210CombinerState *s = EXYNOS4210_COMBINER(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + unsigned int i; + + /* Allocate general purpose input signals and connect a handler to each of + * them */ + qdev_init_gpio_in(dev, exynos4210_combiner_handler, IIC_NIRQ); + + /* Connect SysBusDev irqs to device specific irqs */ + for (i = 0; i < IIC_NGRP; i++) { + sysbus_init_irq(sbd, &s->output_irq[i]); + } + + memory_region_init_io(&s->iomem, obj, &exynos4210_combiner_ops, s, + "exynos4210-combiner", IIC_REGION_SIZE); + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property exynos4210_combiner_properties[] = { + DEFINE_PROP_UINT32("external", Exynos4210CombinerState, external, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void exynos4210_combiner_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = exynos4210_combiner_reset; + device_class_set_props(dc, exynos4210_combiner_properties); + dc->vmsd = &vmstate_exynos4210_combiner; +} + +static const TypeInfo exynos4210_combiner_info = { + .name = TYPE_EXYNOS4210_COMBINER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210CombinerState), + .instance_init = exynos4210_combiner_init, + .class_init = exynos4210_combiner_class_init, +}; + +static void exynos4210_combiner_register_types(void) +{ + type_register_static(&exynos4210_combiner_info); +} + +type_init(exynos4210_combiner_register_types) diff --git a/hw/intc/exynos4210_gic.c b/hw/intc/exynos4210_gic.c new file mode 100644 index 000000000..bc73d1f11 --- /dev/null +++ b/hw/intc/exynos4210_gic.c @@ -0,0 +1,482 @@ +/* + * Samsung exynos4210 GIC implementation. Based on hw/arm_gic.c + * + * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. + * All rights reserved. + * + * Evgeny Voevodin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/arm/exynos4210.h" +#include "qom/object.h" + +enum ExtGicId { + EXT_GIC_ID_MDMA_LCD0 = 66, + EXT_GIC_ID_PDMA0, + EXT_GIC_ID_PDMA1, + EXT_GIC_ID_TIMER0, + EXT_GIC_ID_TIMER1, + EXT_GIC_ID_TIMER2, + EXT_GIC_ID_TIMER3, + EXT_GIC_ID_TIMER4, + EXT_GIC_ID_MCT_L0, + EXT_GIC_ID_WDT, + EXT_GIC_ID_RTC_ALARM, + EXT_GIC_ID_RTC_TIC, + EXT_GIC_ID_GPIO_XB, + EXT_GIC_ID_GPIO_XA, + EXT_GIC_ID_MCT_L1, + EXT_GIC_ID_IEM_APC, + EXT_GIC_ID_IEM_IEC, + EXT_GIC_ID_NFC, + EXT_GIC_ID_UART0, + EXT_GIC_ID_UART1, + EXT_GIC_ID_UART2, + EXT_GIC_ID_UART3, + EXT_GIC_ID_UART4, + EXT_GIC_ID_MCT_G0, + EXT_GIC_ID_I2C0, + EXT_GIC_ID_I2C1, + EXT_GIC_ID_I2C2, + EXT_GIC_ID_I2C3, + EXT_GIC_ID_I2C4, + EXT_GIC_ID_I2C5, + EXT_GIC_ID_I2C6, + EXT_GIC_ID_I2C7, + EXT_GIC_ID_SPI0, + EXT_GIC_ID_SPI1, + EXT_GIC_ID_SPI2, + EXT_GIC_ID_MCT_G1, + EXT_GIC_ID_USB_HOST, + EXT_GIC_ID_USB_DEVICE, + EXT_GIC_ID_MODEMIF, + EXT_GIC_ID_HSMMC0, + EXT_GIC_ID_HSMMC1, + EXT_GIC_ID_HSMMC2, + EXT_GIC_ID_HSMMC3, + EXT_GIC_ID_SDMMC, + EXT_GIC_ID_MIPI_CSI_4LANE, + EXT_GIC_ID_MIPI_DSI_4LANE, + EXT_GIC_ID_MIPI_CSI_2LANE, + EXT_GIC_ID_MIPI_DSI_2LANE, + EXT_GIC_ID_ONENAND_AUDI, + EXT_GIC_ID_ROTATOR, + EXT_GIC_ID_FIMC0, + EXT_GIC_ID_FIMC1, + EXT_GIC_ID_FIMC2, + EXT_GIC_ID_FIMC3, + EXT_GIC_ID_JPEG, + EXT_GIC_ID_2D, + EXT_GIC_ID_PCIe, + EXT_GIC_ID_MIXER, + EXT_GIC_ID_HDMI, + EXT_GIC_ID_HDMI_I2C, + EXT_GIC_ID_MFC, + EXT_GIC_ID_TVENC, +}; + +enum ExtInt { + EXT_GIC_ID_EXTINT0 = 48, + EXT_GIC_ID_EXTINT1, + EXT_GIC_ID_EXTINT2, + EXT_GIC_ID_EXTINT3, + EXT_GIC_ID_EXTINT4, + EXT_GIC_ID_EXTINT5, + EXT_GIC_ID_EXTINT6, + EXT_GIC_ID_EXTINT7, + EXT_GIC_ID_EXTINT8, + EXT_GIC_ID_EXTINT9, + EXT_GIC_ID_EXTINT10, + EXT_GIC_ID_EXTINT11, + EXT_GIC_ID_EXTINT12, + EXT_GIC_ID_EXTINT13, + EXT_GIC_ID_EXTINT14, + EXT_GIC_ID_EXTINT15 +}; + +/* + * External GIC sources which are not from External Interrupt Combiner or + * External Interrupts are starting from EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ, + * which is INTG16 in Internal Interrupt Combiner. + */ + +static const uint32_t +combiner_grp_to_gic_id[64-EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][8] = { + /* int combiner groups 16-19 */ + { }, { }, { }, { }, + /* int combiner group 20 */ + { 0, EXT_GIC_ID_MDMA_LCD0 }, + /* int combiner group 21 */ + { EXT_GIC_ID_PDMA0, EXT_GIC_ID_PDMA1 }, + /* int combiner group 22 */ + { EXT_GIC_ID_TIMER0, EXT_GIC_ID_TIMER1, EXT_GIC_ID_TIMER2, + EXT_GIC_ID_TIMER3, EXT_GIC_ID_TIMER4 }, + /* int combiner group 23 */ + { EXT_GIC_ID_RTC_ALARM, EXT_GIC_ID_RTC_TIC }, + /* int combiner group 24 */ + { EXT_GIC_ID_GPIO_XB, EXT_GIC_ID_GPIO_XA }, + /* int combiner group 25 */ + { EXT_GIC_ID_IEM_APC, EXT_GIC_ID_IEM_IEC }, + /* int combiner group 26 */ + { EXT_GIC_ID_UART0, EXT_GIC_ID_UART1, EXT_GIC_ID_UART2, EXT_GIC_ID_UART3, + EXT_GIC_ID_UART4 }, + /* int combiner group 27 */ + { EXT_GIC_ID_I2C0, EXT_GIC_ID_I2C1, EXT_GIC_ID_I2C2, EXT_GIC_ID_I2C3, + EXT_GIC_ID_I2C4, EXT_GIC_ID_I2C5, EXT_GIC_ID_I2C6, + EXT_GIC_ID_I2C7 }, + /* int combiner group 28 */ + { EXT_GIC_ID_SPI0, EXT_GIC_ID_SPI1, EXT_GIC_ID_SPI2 , EXT_GIC_ID_USB_HOST}, + /* int combiner group 29 */ + { EXT_GIC_ID_HSMMC0, EXT_GIC_ID_HSMMC1, EXT_GIC_ID_HSMMC2, + EXT_GIC_ID_HSMMC3, EXT_GIC_ID_SDMMC }, + /* int combiner group 30 */ + { EXT_GIC_ID_MIPI_CSI_4LANE, EXT_GIC_ID_MIPI_CSI_2LANE }, + /* int combiner group 31 */ + { EXT_GIC_ID_MIPI_DSI_4LANE, EXT_GIC_ID_MIPI_DSI_2LANE }, + /* int combiner group 32 */ + { EXT_GIC_ID_FIMC0, EXT_GIC_ID_FIMC1 }, + /* int combiner group 33 */ + { EXT_GIC_ID_FIMC2, EXT_GIC_ID_FIMC3 }, + /* int combiner group 34 */ + { EXT_GIC_ID_ONENAND_AUDI, EXT_GIC_ID_NFC }, + /* int combiner group 35 */ + { 0, 0, 0, EXT_GIC_ID_MCT_L1, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 }, + /* int combiner group 36 */ + { EXT_GIC_ID_MIXER }, + /* int combiner group 37 */ + { EXT_GIC_ID_EXTINT4, EXT_GIC_ID_EXTINT5, EXT_GIC_ID_EXTINT6, + EXT_GIC_ID_EXTINT7 }, + /* groups 38-50 */ + { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, { }, + /* int combiner group 51 */ + { EXT_GIC_ID_MCT_L0, 0, 0, 0, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 }, + /* group 52 */ + { }, + /* int combiner group 53 */ + { EXT_GIC_ID_WDT, 0, 0, 0, EXT_GIC_ID_MCT_G0, EXT_GIC_ID_MCT_G1 }, + /* groups 54-63 */ + { }, { }, { }, { }, { }, { }, { }, { }, { }, { } +}; + +#define EXYNOS4210_GIC_NIRQ 160 + +#define EXYNOS4210_EXT_GIC_CPU_REGION_SIZE 0x10000 +#define EXYNOS4210_EXT_GIC_DIST_REGION_SIZE 0x10000 + +#define EXYNOS4210_EXT_GIC_PER_CPU_OFFSET 0x8000 +#define EXYNOS4210_EXT_GIC_CPU_GET_OFFSET(n) \ + ((n) * EXYNOS4210_EXT_GIC_PER_CPU_OFFSET) +#define EXYNOS4210_EXT_GIC_DIST_GET_OFFSET(n) \ + ((n) * EXYNOS4210_EXT_GIC_PER_CPU_OFFSET) + +#define EXYNOS4210_GIC_CPU_REGION_SIZE 0x100 +#define EXYNOS4210_GIC_DIST_REGION_SIZE 0x1000 + +static void exynos4210_irq_handler(void *opaque, int irq, int level) +{ + Exynos4210Irq *s = (Exynos4210Irq *)opaque; + + /* Bypass */ + qemu_set_irq(s->board_irqs[irq], level); +} + +/* + * Initialize exynos4210 IRQ subsystem stub. + */ +qemu_irq *exynos4210_init_irq(Exynos4210Irq *s) +{ + return qemu_allocate_irqs(exynos4210_irq_handler, s, + EXYNOS4210_MAX_INT_COMBINER_IN_IRQ); +} + +/* + * Initialize board IRQs. + * These IRQs contain splitted Int/External Combiner and External Gic IRQs. + */ +void exynos4210_init_board_irqs(Exynos4210Irq *s) +{ + uint32_t grp, bit, irq_id, n; + + for (n = 0; n < EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ; n++) { + irq_id = 0; + if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 4) || + n == EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 4)) { + /* MCT_G0 is passed to External GIC */ + irq_id = EXT_GIC_ID_MCT_G0; + } + if (n == EXYNOS4210_COMBINER_GET_IRQ_NUM(1, 5) || + n == EXYNOS4210_COMBINER_GET_IRQ_NUM(12, 5)) { + /* MCT_G1 is passed to External and GIC */ + irq_id = EXT_GIC_ID_MCT_G1; + } + if (irq_id) { + s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n], + s->ext_gic_irq[irq_id-32]); + } else { + s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n], + s->ext_combiner_irq[n]); + } + } + for (; n < EXYNOS4210_MAX_INT_COMBINER_IN_IRQ; n++) { + /* these IDs are passed to Internal Combiner and External GIC */ + grp = EXYNOS4210_COMBINER_GET_GRP_NUM(n); + bit = EXYNOS4210_COMBINER_GET_BIT_NUM(n); + irq_id = combiner_grp_to_gic_id[grp - + EXYNOS4210_MAX_EXT_COMBINER_OUT_IRQ][bit]; + + if (irq_id) { + s->board_irqs[n] = qemu_irq_split(s->int_combiner_irq[n], + s->ext_gic_irq[irq_id-32]); + } + } +} + +/* + * Get IRQ number from exynos4210 IRQ subsystem stub. + * To identify IRQ source use internal combiner group and bit number + * grp - group number + * bit - bit number inside group + */ +uint32_t exynos4210_get_irq(uint32_t grp, uint32_t bit) +{ + return EXYNOS4210_COMBINER_GET_IRQ_NUM(grp, bit); +} + +/********* GIC part *********/ + +#define TYPE_EXYNOS4210_GIC "exynos4210.gic" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210GicState, EXYNOS4210_GIC) + +struct Exynos4210GicState { + SysBusDevice parent_obj; + + MemoryRegion cpu_container; + MemoryRegion dist_container; + MemoryRegion cpu_alias[EXYNOS4210_NCPUS]; + MemoryRegion dist_alias[EXYNOS4210_NCPUS]; + uint32_t num_cpu; + DeviceState *gic; +}; + +static void exynos4210_gic_set_irq(void *opaque, int irq, int level) +{ + Exynos4210GicState *s = (Exynos4210GicState *)opaque; + qemu_set_irq(qdev_get_gpio_in(s->gic, irq), level); +} + +static void exynos4210_gic_realize(DeviceState *dev, Error **errp) +{ + Object *obj = OBJECT(dev); + Exynos4210GicState *s = EXYNOS4210_GIC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + const char cpu_prefix[] = "exynos4210-gic-alias_cpu"; + const char dist_prefix[] = "exynos4210-gic-alias_dist"; + char cpu_alias_name[sizeof(cpu_prefix) + 3]; + char dist_alias_name[sizeof(cpu_prefix) + 3]; + SysBusDevice *gicbusdev; + uint32_t n = s->num_cpu; + uint32_t i; + + s->gic = qdev_new("arm_gic"); + qdev_prop_set_uint32(s->gic, "num-cpu", s->num_cpu); + qdev_prop_set_uint32(s->gic, "num-irq", EXYNOS4210_GIC_NIRQ); + gicbusdev = SYS_BUS_DEVICE(s->gic); + sysbus_realize_and_unref(gicbusdev, &error_fatal); + + /* Pass through outbound IRQ lines from the GIC */ + sysbus_pass_irq(sbd, gicbusdev); + + /* Pass through inbound GPIO lines to the GIC */ + qdev_init_gpio_in(dev, exynos4210_gic_set_irq, + EXYNOS4210_GIC_NIRQ - 32); + + memory_region_init(&s->cpu_container, obj, "exynos4210-cpu-container", + EXYNOS4210_EXT_GIC_CPU_REGION_SIZE); + memory_region_init(&s->dist_container, obj, "exynos4210-dist-container", + EXYNOS4210_EXT_GIC_DIST_REGION_SIZE); + + /* + * This clues in gcc that our on-stack buffers do, in fact have + * enough room for the cpu numbers. gcc 9.2.1 on 32-bit x86 + * doesn't figure this out, otherwise and gives spurious warnings. + */ + assert(n <= EXYNOS4210_NCPUS); + for (i = 0; i < n; i++) { + /* Map CPU interface per SMP Core */ + sprintf(cpu_alias_name, "%s%x", cpu_prefix, i); + memory_region_init_alias(&s->cpu_alias[i], obj, + cpu_alias_name, + sysbus_mmio_get_region(gicbusdev, 1), + 0, + EXYNOS4210_GIC_CPU_REGION_SIZE); + memory_region_add_subregion(&s->cpu_container, + EXYNOS4210_EXT_GIC_CPU_GET_OFFSET(i), &s->cpu_alias[i]); + + /* Map Distributor per SMP Core */ + sprintf(dist_alias_name, "%s%x", dist_prefix, i); + memory_region_init_alias(&s->dist_alias[i], obj, + dist_alias_name, + sysbus_mmio_get_region(gicbusdev, 0), + 0, + EXYNOS4210_GIC_DIST_REGION_SIZE); + memory_region_add_subregion(&s->dist_container, + EXYNOS4210_EXT_GIC_DIST_GET_OFFSET(i), &s->dist_alias[i]); + } + + sysbus_init_mmio(sbd, &s->cpu_container); + sysbus_init_mmio(sbd, &s->dist_container); +} + +static Property exynos4210_gic_properties[] = { + DEFINE_PROP_UINT32("num-cpu", Exynos4210GicState, num_cpu, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void exynos4210_gic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, exynos4210_gic_properties); + dc->realize = exynos4210_gic_realize; +} + +static const TypeInfo exynos4210_gic_info = { + .name = TYPE_EXYNOS4210_GIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210GicState), + .class_init = exynos4210_gic_class_init, +}; + +static void exynos4210_gic_register_types(void) +{ + type_register_static(&exynos4210_gic_info); +} + +type_init(exynos4210_gic_register_types) + +/* IRQ OR Gate struct. + * + * This device models an OR gate. There are n_in input qdev gpio lines and one + * output sysbus IRQ line. The output IRQ level is formed as OR between all + * gpio inputs. + */ + +#define TYPE_EXYNOS4210_IRQ_GATE "exynos4210.irq_gate" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210IRQGateState, EXYNOS4210_IRQ_GATE) + +struct Exynos4210IRQGateState { + SysBusDevice parent_obj; + + uint32_t n_in; /* inputs amount */ + uint32_t *level; /* input levels */ + qemu_irq out; /* output IRQ */ +}; + +static Property exynos4210_irq_gate_properties[] = { + DEFINE_PROP_UINT32("n_in", Exynos4210IRQGateState, n_in, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_exynos4210_irq_gate = { + .name = "exynos4210.irq_gate", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_VBUFFER_UINT32(level, Exynos4210IRQGateState, 1, NULL, n_in), + VMSTATE_END_OF_LIST() + } +}; + +/* Process a change in IRQ input. */ +static void exynos4210_irq_gate_handler(void *opaque, int irq, int level) +{ + Exynos4210IRQGateState *s = (Exynos4210IRQGateState *)opaque; + uint32_t i; + + assert(irq < s->n_in); + + s->level[irq] = level; + + for (i = 0; i < s->n_in; i++) { + if (s->level[i] >= 1) { + qemu_irq_raise(s->out); + return; + } + } + + qemu_irq_lower(s->out); +} + +static void exynos4210_irq_gate_reset(DeviceState *d) +{ + Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(d); + + memset(s->level, 0, s->n_in * sizeof(*s->level)); +} + +/* + * IRQ Gate initialization. + */ +static void exynos4210_irq_gate_init(Object *obj) +{ + Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &s->out); +} + +static void exynos4210_irq_gate_realize(DeviceState *dev, Error **errp) +{ + Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(dev); + + /* Allocate general purpose input signals and connect a handler to each of + * them */ + qdev_init_gpio_in(dev, exynos4210_irq_gate_handler, s->n_in); + + s->level = g_malloc0(s->n_in * sizeof(*s->level)); +} + +static void exynos4210_irq_gate_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = exynos4210_irq_gate_reset; + dc->vmsd = &vmstate_exynos4210_irq_gate; + device_class_set_props(dc, exynos4210_irq_gate_properties); + dc->realize = exynos4210_irq_gate_realize; +} + +static const TypeInfo exynos4210_irq_gate_info = { + .name = TYPE_EXYNOS4210_IRQ_GATE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210IRQGateState), + .instance_init = exynos4210_irq_gate_init, + .class_init = exynos4210_irq_gate_class_init, +}; + +static void exynos4210_irq_gate_register_types(void) +{ + type_register_static(&exynos4210_irq_gate_info); +} + +type_init(exynos4210_irq_gate_register_types) diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h new file mode 100644 index 000000000..8d29b40ca --- /dev/null +++ b/hw/intc/gic_internal.h @@ -0,0 +1,322 @@ +/* + * ARM GIC support - internal interfaces + * + * Copyright (c) 2012 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef QEMU_ARM_GIC_INTERNAL_H +#define QEMU_ARM_GIC_INTERNAL_H + +#include "hw/registerfields.h" +#include "hw/intc/arm_gic.h" + +#define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1))) + +#define GIC_DIST_SET_ENABLED(irq, cm) (s->irq_state[irq].enabled |= (cm)) +#define GIC_DIST_CLEAR_ENABLED(irq, cm) (s->irq_state[irq].enabled &= ~(cm)) +#define GIC_DIST_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0) +#define GIC_DIST_SET_PENDING(irq, cm) (s->irq_state[irq].pending |= (cm)) +#define GIC_DIST_CLEAR_PENDING(irq, cm) (s->irq_state[irq].pending &= ~(cm)) +#define GIC_DIST_SET_ACTIVE(irq, cm) (s->irq_state[irq].active |= (cm)) +#define GIC_DIST_CLEAR_ACTIVE(irq, cm) (s->irq_state[irq].active &= ~(cm)) +#define GIC_DIST_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) +#define GIC_DIST_SET_MODEL(irq) (s->irq_state[irq].model = true) +#define GIC_DIST_CLEAR_MODEL(irq) (s->irq_state[irq].model = false) +#define GIC_DIST_TEST_MODEL(irq) (s->irq_state[irq].model) +#define GIC_DIST_SET_LEVEL(irq, cm) (s->irq_state[irq].level |= (cm)) +#define GIC_DIST_CLEAR_LEVEL(irq, cm) (s->irq_state[irq].level &= ~(cm)) +#define GIC_DIST_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) +#define GIC_DIST_SET_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger = true) +#define GIC_DIST_CLEAR_EDGE_TRIGGER(irq) \ + (s->irq_state[irq].edge_trigger = false) +#define GIC_DIST_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger) +#define GIC_DIST_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \ + s->priority1[irq][cpu] : \ + s->priority2[(irq) - GIC_INTERNAL]) +#define GIC_DIST_TARGET(irq) (s->irq_target[irq]) +#define GIC_DIST_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm)) +#define GIC_DIST_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm)) +#define GIC_DIST_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0) + +#define GICD_CTLR_EN_GRP0 (1U << 0) +#define GICD_CTLR_EN_GRP1 (1U << 1) + +#define GICC_CTLR_EN_GRP0 (1U << 0) +#define GICC_CTLR_EN_GRP1 (1U << 1) +#define GICC_CTLR_ACK_CTL (1U << 2) +#define GICC_CTLR_FIQ_EN (1U << 3) +#define GICC_CTLR_CBPR (1U << 4) /* GICv1: SBPR */ +#define GICC_CTLR_EOIMODE (1U << 9) +#define GICC_CTLR_EOIMODE_NS (1U << 10) + +REG32(GICH_HCR, 0x0) + FIELD(GICH_HCR, EN, 0, 1) + FIELD(GICH_HCR, UIE, 1, 1) + FIELD(GICH_HCR, LRENPIE, 2, 1) + FIELD(GICH_HCR, NPIE, 3, 1) + FIELD(GICH_HCR, VGRP0EIE, 4, 1) + FIELD(GICH_HCR, VGRP0DIE, 5, 1) + FIELD(GICH_HCR, VGRP1EIE, 6, 1) + FIELD(GICH_HCR, VGRP1DIE, 7, 1) + FIELD(GICH_HCR, EOICount, 27, 5) + +#define GICH_HCR_MASK \ + (R_GICH_HCR_EN_MASK | R_GICH_HCR_UIE_MASK | \ + R_GICH_HCR_LRENPIE_MASK | R_GICH_HCR_NPIE_MASK | \ + R_GICH_HCR_VGRP0EIE_MASK | R_GICH_HCR_VGRP0DIE_MASK | \ + R_GICH_HCR_VGRP1EIE_MASK | R_GICH_HCR_VGRP1DIE_MASK | \ + R_GICH_HCR_EOICount_MASK) + +REG32(GICH_VTR, 0x4) + FIELD(GICH_VTR, ListRegs, 0, 6) + FIELD(GICH_VTR, PREbits, 26, 3) + FIELD(GICH_VTR, PRIbits, 29, 3) + +REG32(GICH_VMCR, 0x8) + FIELD(GICH_VMCR, VMCCtlr, 0, 10) + FIELD(GICH_VMCR, VMABP, 18, 3) + FIELD(GICH_VMCR, VMBP, 21, 3) + FIELD(GICH_VMCR, VMPriMask, 27, 5) + +REG32(GICH_MISR, 0x10) + FIELD(GICH_MISR, EOI, 0, 1) + FIELD(GICH_MISR, U, 1, 1) + FIELD(GICH_MISR, LRENP, 2, 1) + FIELD(GICH_MISR, NP, 3, 1) + FIELD(GICH_MISR, VGrp0E, 4, 1) + FIELD(GICH_MISR, VGrp0D, 5, 1) + FIELD(GICH_MISR, VGrp1E, 6, 1) + FIELD(GICH_MISR, VGrp1D, 7, 1) + +REG32(GICH_EISR0, 0x20) +REG32(GICH_EISR1, 0x24) +REG32(GICH_ELRSR0, 0x30) +REG32(GICH_ELRSR1, 0x34) +REG32(GICH_APR, 0xf0) + +REG32(GICH_LR0, 0x100) + FIELD(GICH_LR0, VirtualID, 0, 10) + FIELD(GICH_LR0, PhysicalID, 10, 10) + FIELD(GICH_LR0, CPUID, 10, 3) + FIELD(GICH_LR0, EOI, 19, 1) + FIELD(GICH_LR0, Priority, 23, 5) + FIELD(GICH_LR0, State, 28, 2) + FIELD(GICH_LR0, Grp1, 30, 1) + FIELD(GICH_LR0, HW, 31, 1) + +/* Last LR register */ +REG32(GICH_LR63, 0x1fc) + +#define GICH_LR_MASK \ + (R_GICH_LR0_VirtualID_MASK | R_GICH_LR0_PhysicalID_MASK | \ + R_GICH_LR0_CPUID_MASK | R_GICH_LR0_EOI_MASK | \ + R_GICH_LR0_Priority_MASK | R_GICH_LR0_State_MASK | \ + R_GICH_LR0_Grp1_MASK | R_GICH_LR0_HW_MASK) + +#define GICH_LR_STATE_INVALID 0 +#define GICH_LR_STATE_PENDING 1 +#define GICH_LR_STATE_ACTIVE 2 +#define GICH_LR_STATE_ACTIVE_PENDING 3 + +#define GICH_LR_VIRT_ID(entry) (FIELD_EX32(entry, GICH_LR0, VirtualID)) +#define GICH_LR_PHYS_ID(entry) (FIELD_EX32(entry, GICH_LR0, PhysicalID)) +#define GICH_LR_CPUID(entry) (FIELD_EX32(entry, GICH_LR0, CPUID)) +#define GICH_LR_EOI(entry) (FIELD_EX32(entry, GICH_LR0, EOI)) +#define GICH_LR_PRIORITY(entry) (FIELD_EX32(entry, GICH_LR0, Priority) << 3) +#define GICH_LR_STATE(entry) (FIELD_EX32(entry, GICH_LR0, State)) +#define GICH_LR_GROUP(entry) (FIELD_EX32(entry, GICH_LR0, Grp1)) +#define GICH_LR_HW(entry) (FIELD_EX32(entry, GICH_LR0, HW)) + +#define GICH_LR_CLEAR_PENDING(entry) \ + ((entry) &= ~(GICH_LR_STATE_PENDING << R_GICH_LR0_State_SHIFT)) +#define GICH_LR_SET_ACTIVE(entry) \ + ((entry) |= (GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT)) +#define GICH_LR_CLEAR_ACTIVE(entry) \ + ((entry) &= ~(GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT)) + +/* Valid bits for GICC_CTLR for GICv1, v1 with security extensions, + * GICv2 and GICv2 with security extensions: + */ +#define GICC_CTLR_V1_MASK 0x1 +#define GICC_CTLR_V1_S_MASK 0x1f +#define GICC_CTLR_V2_MASK 0x21f +#define GICC_CTLR_V2_S_MASK 0x61f + +/* The special cases for the revision property: */ +#define REV_11MPCORE 0 + +uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs); +void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val, + MemTxAttrs attrs); + +static inline bool gic_test_pending(GICState *s, int irq, int cm) +{ + if (s->revision == REV_11MPCORE) { + return s->irq_state[irq].pending & cm; + } else { + /* Edge-triggered interrupts are marked pending on a rising edge, but + * level-triggered interrupts are either considered pending when the + * level is active or if software has explicitly written to + * GICD_ISPENDR to set the state pending. + */ + return (s->irq_state[irq].pending & cm) || + (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_LEVEL(irq, cm)); + } +} + +static inline bool gic_is_vcpu(int cpu) +{ + return cpu >= GIC_NCPU; +} + +static inline int gic_get_vcpu_real_id(int cpu) +{ + return (cpu >= GIC_NCPU) ? (cpu - GIC_NCPU) : cpu; +} + +/* Return true if the given vIRQ state exists in a LR and is either active or + * pending and active. + * + * This function is used to check that a guest's `end of interrupt' or + * `interrupts deactivation' request is valid, and matches with a LR of an + * already acknowledged vIRQ (i.e. has the active bit set in its state). + */ +static inline bool gic_virq_is_valid(GICState *s, int irq, int vcpu) +{ + int cpu = gic_get_vcpu_real_id(vcpu); + int lr_idx; + + for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { + uint32_t *entry = &s->h_lr[lr_idx][cpu]; + + if ((GICH_LR_VIRT_ID(*entry) == irq) && + (GICH_LR_STATE(*entry) & GICH_LR_STATE_ACTIVE)) { + return true; + } + } + + return false; +} + +/* Return a pointer on the LR entry matching the given vIRQ. + * + * This function is used to retrieve an LR for which we know for sure that the + * corresponding vIRQ exists in the current context (i.e. its current state is + * not `invalid'): + * - Either the corresponding vIRQ has been validated with gic_virq_is_valid() + * so it is `active' or `active and pending', + * - Or it was pending and has been selected by gic_get_best_virq(). It is now + * `pending', `active' or `active and pending', depending on what the guest + * already did with this vIRQ. + * + * Having multiple LRs with the same VirtualID leads to UNPREDICTABLE + * behaviour in the GIC. We choose to return the first one that matches. + */ +static inline uint32_t *gic_get_lr_entry(GICState *s, int irq, int vcpu) +{ + int cpu = gic_get_vcpu_real_id(vcpu); + int lr_idx; + + for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) { + uint32_t *entry = &s->h_lr[lr_idx][cpu]; + + if ((GICH_LR_VIRT_ID(*entry) == irq) && + (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID)) { + return entry; + } + } + + g_assert_not_reached(); +} + +static inline bool gic_test_group(GICState *s, int irq, int cpu) +{ + if (gic_is_vcpu(cpu)) { + uint32_t *entry = gic_get_lr_entry(s, irq, cpu); + return GICH_LR_GROUP(*entry); + } else { + return GIC_DIST_TEST_GROUP(irq, 1 << cpu); + } +} + +static inline void gic_clear_pending(GICState *s, int irq, int cpu) +{ + if (gic_is_vcpu(cpu)) { + uint32_t *entry = gic_get_lr_entry(s, irq, cpu); + GICH_LR_CLEAR_PENDING(*entry); + } else { + /* Clear pending state for both level and edge triggered + * interrupts. (level triggered interrupts with an active line + * remain pending, see gic_test_pending) + */ + GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK + : (1 << cpu)); + } +} + +static inline void gic_set_active(GICState *s, int irq, int cpu) +{ + if (gic_is_vcpu(cpu)) { + uint32_t *entry = gic_get_lr_entry(s, irq, cpu); + GICH_LR_SET_ACTIVE(*entry); + } else { + GIC_DIST_SET_ACTIVE(irq, 1 << cpu); + } +} + +static inline void gic_clear_active(GICState *s, int irq, int cpu) +{ + if (gic_is_vcpu(cpu)) { + uint32_t *entry = gic_get_lr_entry(s, irq, cpu); + GICH_LR_CLEAR_ACTIVE(*entry); + + if (GICH_LR_HW(*entry)) { + /* Hardware interrupt. We must forward the deactivation request to + * the distributor. + */ + int phys_irq = GICH_LR_PHYS_ID(*entry); + int rcpu = gic_get_vcpu_real_id(cpu); + + if (phys_irq < GIC_NR_SGIS || phys_irq >= GIC_MAXIRQ) { + /* UNPREDICTABLE behaviour, we choose to ignore the request */ + return; + } + + /* This is equivalent to a NS write to DIR on the physical CPU + * interface. Hence group0 interrupt deactivation is ignored if + * the GIC is secure. + */ + if (!s->security_extn || GIC_DIST_TEST_GROUP(phys_irq, 1 << rcpu)) { + GIC_DIST_CLEAR_ACTIVE(phys_irq, 1 << rcpu); + } + } + } else { + GIC_DIST_CLEAR_ACTIVE(irq, 1 << cpu); + } +} + +static inline int gic_get_priority(GICState *s, int irq, int cpu) +{ + if (gic_is_vcpu(cpu)) { + uint32_t *entry = gic_get_lr_entry(s, irq, cpu); + return GICH_LR_PRIORITY(*entry); + } else { + return GIC_DIST_GET_PRIORITY(irq, cpu); + } +} + +#endif /* QEMU_ARM_GIC_INTERNAL_H */ diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h new file mode 100644 index 000000000..b9c37453b --- /dev/null +++ b/hw/intc/gicv3_internal.h @@ -0,0 +1,611 @@ +/* + * ARM GICv3 support - internal interfaces + * + * Copyright (c) 2012 Linaro Limited + * Copyright (c) 2015 Huawei. + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Written by Peter Maydell + * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef QEMU_ARM_GICV3_INTERNAL_H +#define QEMU_ARM_GICV3_INTERNAL_H + +#include "hw/registerfields.h" +#include "hw/intc/arm_gicv3_common.h" + +/* Distributor registers, as offsets from the distributor base address */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IIDR 0x0008 +#define GICD_STATUSR 0x0010 +#define GICD_SETSPI_NSR 0x0040 +#define GICD_CLRSPI_NSR 0x0048 +#define GICD_SETSPI_SR 0x0050 +#define GICD_CLRSPI_SR 0x0058 +#define GICD_SEIR 0x0068 +#define GICD_IGROUPR 0x0080 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 +#define GICD_ISACTIVER 0x0300 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 +#define GICD_ITARGETSR 0x0800 +#define GICD_ICFGR 0x0C00 +#define GICD_IGRPMODR 0x0D00 +#define GICD_NSACR 0x0E00 +#define GICD_SGIR 0x0F00 +#define GICD_CPENDSGIR 0x0F10 +#define GICD_SPENDSGIR 0x0F20 +#define GICD_IROUTER 0x6000 +#define GICD_IDREGS 0xFFD0 + +/* GICD_CTLR fields */ +#define GICD_CTLR_EN_GRP0 (1U << 0) +#define GICD_CTLR_EN_GRP1NS (1U << 1) /* GICv3 5.3.20 */ +#define GICD_CTLR_EN_GRP1S (1U << 2) +#define GICD_CTLR_EN_GRP1_ALL (GICD_CTLR_EN_GRP1NS | GICD_CTLR_EN_GRP1S) +/* Bit 4 is ARE if the system doesn't support TrustZone, ARE_S otherwise */ +#define GICD_CTLR_ARE (1U << 4) +#define GICD_CTLR_ARE_S (1U << 4) +#define GICD_CTLR_ARE_NS (1U << 5) +#define GICD_CTLR_DS (1U << 6) +#define GICD_CTLR_E1NWF (1U << 7) +#define GICD_CTLR_RWP (1U << 31) + +#define GICD_TYPER_LPIS_SHIFT 17 + +/* 16 bits EventId */ +#define GICD_TYPER_IDBITS 0xf + +/* + * Redistributor frame offsets from RD_base + */ +#define GICR_SGI_OFFSET 0x10000 + +/* + * Redistributor registers, offsets from RD_base + */ +#define GICR_CTLR 0x0000 +#define GICR_IIDR 0x0004 +#define GICR_TYPER 0x0008 +#define GICR_STATUSR 0x0010 +#define GICR_WAKER 0x0014 +#define GICR_SETLPIR 0x0040 +#define GICR_CLRLPIR 0x0048 +#define GICR_PROPBASER 0x0070 +#define GICR_PENDBASER 0x0078 +#define GICR_INVLPIR 0x00A0 +#define GICR_INVALLR 0x00B0 +#define GICR_SYNCR 0x00C0 +#define GICR_IDREGS 0xFFD0 + +/* SGI and PPI Redistributor registers, offsets from RD_base */ +#define GICR_IGROUPR0 (GICR_SGI_OFFSET + 0x0080) +#define GICR_ISENABLER0 (GICR_SGI_OFFSET + 0x0100) +#define GICR_ICENABLER0 (GICR_SGI_OFFSET + 0x0180) +#define GICR_ISPENDR0 (GICR_SGI_OFFSET + 0x0200) +#define GICR_ICPENDR0 (GICR_SGI_OFFSET + 0x0280) +#define GICR_ISACTIVER0 (GICR_SGI_OFFSET + 0x0300) +#define GICR_ICACTIVER0 (GICR_SGI_OFFSET + 0x0380) +#define GICR_IPRIORITYR (GICR_SGI_OFFSET + 0x0400) +#define GICR_ICFGR0 (GICR_SGI_OFFSET + 0x0C00) +#define GICR_ICFGR1 (GICR_SGI_OFFSET + 0x0C04) +#define GICR_IGRPMODR0 (GICR_SGI_OFFSET + 0x0D00) +#define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00) + +#define GICR_CTLR_ENABLE_LPIS (1U << 0) +#define GICR_CTLR_RWP (1U << 3) +#define GICR_CTLR_DPG0 (1U << 24) +#define GICR_CTLR_DPG1NS (1U << 25) +#define GICR_CTLR_DPG1S (1U << 26) +#define GICR_CTLR_UWP (1U << 31) + +#define GICR_TYPER_PLPIS (1U << 0) +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DIRECTLPI (1U << 3) +#define GICR_TYPER_LAST (1U << 4) +#define GICR_TYPER_DPGS (1U << 5) +#define GICR_TYPER_PROCNUM (0xFFFFU << 8) +#define GICR_TYPER_COMMONLPIAFF (0x3 << 24) +#define GICR_TYPER_AFFINITYVALUE (0xFFFFFFFFULL << 32) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +FIELD(GICR_PROPBASER, IDBITS, 0, 5) +FIELD(GICR_PROPBASER, INNERCACHE, 7, 3) +FIELD(GICR_PROPBASER, SHAREABILITY, 10, 2) +FIELD(GICR_PROPBASER, PHYADDR, 12, 40) +FIELD(GICR_PROPBASER, OUTERCACHE, 56, 3) + +FIELD(GICR_PENDBASER, INNERCACHE, 7, 3) +FIELD(GICR_PENDBASER, SHAREABILITY, 10, 2) +FIELD(GICR_PENDBASER, PHYADDR, 16, 36) +FIELD(GICR_PENDBASER, OUTERCACHE, 56, 3) +FIELD(GICR_PENDBASER, PTZ, 62, 1) + +#define GICR_PROPBASER_IDBITS_THRESHOLD 0xd + +#define ICC_CTLR_EL1_CBPR (1U << 0) +#define ICC_CTLR_EL1_EOIMODE (1U << 1) +#define ICC_CTLR_EL1_PMHE (1U << 6) +#define ICC_CTLR_EL1_PRIBITS_SHIFT 8 +#define ICC_CTLR_EL1_PRIBITS_MASK (7U << ICC_CTLR_EL1_PRIBITS_SHIFT) +#define ICC_CTLR_EL1_IDBITS_SHIFT 11 +#define ICC_CTLR_EL1_SEIS (1U << 14) +#define ICC_CTLR_EL1_A3V (1U << 15) + +#define ICC_PMR_PRIORITY_MASK 0xff +#define ICC_BPR_BINARYPOINT_MASK 0x07 +#define ICC_IGRPEN_ENABLE 0x01 + +#define ICC_CTLR_EL3_CBPR_EL1S (1U << 0) +#define ICC_CTLR_EL3_CBPR_EL1NS (1U << 1) +#define ICC_CTLR_EL3_EOIMODE_EL3 (1U << 2) +#define ICC_CTLR_EL3_EOIMODE_EL1S (1U << 3) +#define ICC_CTLR_EL3_EOIMODE_EL1NS (1U << 4) +#define ICC_CTLR_EL3_RM (1U << 5) +#define ICC_CTLR_EL3_PMHE (1U << 6) +#define ICC_CTLR_EL3_PRIBITS_SHIFT 8 +#define ICC_CTLR_EL3_IDBITS_SHIFT 11 +#define ICC_CTLR_EL3_SEIS (1U << 14) +#define ICC_CTLR_EL3_A3V (1U << 15) +#define ICC_CTLR_EL3_NDS (1U << 17) + +#define ICH_VMCR_EL2_VENG0_SHIFT 0 +#define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT) +#define ICH_VMCR_EL2_VENG1_SHIFT 1 +#define ICH_VMCR_EL2_VENG1 (1U << ICH_VMCR_EL2_VENG1_SHIFT) +#define ICH_VMCR_EL2_VACKCTL (1U << 2) +#define ICH_VMCR_EL2_VFIQEN (1U << 3) +#define ICH_VMCR_EL2_VCBPR_SHIFT 4 +#define ICH_VMCR_EL2_VCBPR (1U << ICH_VMCR_EL2_VCBPR_SHIFT) +#define ICH_VMCR_EL2_VEOIM_SHIFT 9 +#define ICH_VMCR_EL2_VEOIM (1U << ICH_VMCR_EL2_VEOIM_SHIFT) +#define ICH_VMCR_EL2_VBPR1_SHIFT 18 +#define ICH_VMCR_EL2_VBPR1_LENGTH 3 +#define ICH_VMCR_EL2_VBPR1_MASK (0x7U << ICH_VMCR_EL2_VBPR1_SHIFT) +#define ICH_VMCR_EL2_VBPR0_SHIFT 21 +#define ICH_VMCR_EL2_VBPR0_LENGTH 3 +#define ICH_VMCR_EL2_VBPR0_MASK (0x7U << ICH_VMCR_EL2_VBPR0_SHIFT) +#define ICH_VMCR_EL2_VPMR_SHIFT 24 +#define ICH_VMCR_EL2_VPMR_LENGTH 8 +#define ICH_VMCR_EL2_VPMR_MASK (0xffU << ICH_VMCR_EL2_VPMR_SHIFT) + +#define ICH_HCR_EL2_EN (1U << 0) +#define ICH_HCR_EL2_UIE (1U << 1) +#define ICH_HCR_EL2_LRENPIE (1U << 2) +#define ICH_HCR_EL2_NPIE (1U << 3) +#define ICH_HCR_EL2_VGRP0EIE (1U << 4) +#define ICH_HCR_EL2_VGRP0DIE (1U << 5) +#define ICH_HCR_EL2_VGRP1EIE (1U << 6) +#define ICH_HCR_EL2_VGRP1DIE (1U << 7) +#define ICH_HCR_EL2_TC (1U << 10) +#define ICH_HCR_EL2_TALL0 (1U << 11) +#define ICH_HCR_EL2_TALL1 (1U << 12) +#define ICH_HCR_EL2_TSEI (1U << 13) +#define ICH_HCR_EL2_TDIR (1U << 14) +#define ICH_HCR_EL2_EOICOUNT_SHIFT 27 +#define ICH_HCR_EL2_EOICOUNT_LENGTH 5 +#define ICH_HCR_EL2_EOICOUNT_MASK (0x1fU << ICH_HCR_EL2_EOICOUNT_SHIFT) + +#define ICH_LR_EL2_VINTID_SHIFT 0 +#define ICH_LR_EL2_VINTID_LENGTH 32 +#define ICH_LR_EL2_VINTID_MASK (0xffffffffULL << ICH_LR_EL2_VINTID_SHIFT) +#define ICH_LR_EL2_PINTID_SHIFT 32 +#define ICH_LR_EL2_PINTID_LENGTH 10 +#define ICH_LR_EL2_PINTID_MASK (0x3ffULL << ICH_LR_EL2_PINTID_SHIFT) +/* Note that EOI shares with the top bit of the pINTID field */ +#define ICH_LR_EL2_EOI (1ULL << 41) +#define ICH_LR_EL2_PRIORITY_SHIFT 48 +#define ICH_LR_EL2_PRIORITY_LENGTH 8 +#define ICH_LR_EL2_PRIORITY_MASK (0xffULL << ICH_LR_EL2_PRIORITY_SHIFT) +#define ICH_LR_EL2_GROUP (1ULL << 60) +#define ICH_LR_EL2_HW (1ULL << 61) +#define ICH_LR_EL2_STATE_SHIFT 62 +#define ICH_LR_EL2_STATE_LENGTH 2 +#define ICH_LR_EL2_STATE_MASK (3ULL << ICH_LR_EL2_STATE_SHIFT) +/* values for the state field: */ +#define ICH_LR_EL2_STATE_INVALID 0 +#define ICH_LR_EL2_STATE_PENDING 1 +#define ICH_LR_EL2_STATE_ACTIVE 2 +#define ICH_LR_EL2_STATE_ACTIVE_PENDING 3 +#define ICH_LR_EL2_STATE_PENDING_BIT (1ULL << ICH_LR_EL2_STATE_SHIFT) +#define ICH_LR_EL2_STATE_ACTIVE_BIT (2ULL << ICH_LR_EL2_STATE_SHIFT) + +#define ICH_MISR_EL2_EOI (1U << 0) +#define ICH_MISR_EL2_U (1U << 1) +#define ICH_MISR_EL2_LRENP (1U << 2) +#define ICH_MISR_EL2_NP (1U << 3) +#define ICH_MISR_EL2_VGRP0E (1U << 4) +#define ICH_MISR_EL2_VGRP0D (1U << 5) +#define ICH_MISR_EL2_VGRP1E (1U << 6) +#define ICH_MISR_EL2_VGRP1D (1U << 7) + +#define ICH_VTR_EL2_LISTREGS_SHIFT 0 +#define ICH_VTR_EL2_TDS (1U << 19) +#define ICH_VTR_EL2_NV4 (1U << 20) +#define ICH_VTR_EL2_A3V (1U << 21) +#define ICH_VTR_EL2_SEIS (1U << 22) +#define ICH_VTR_EL2_IDBITS_SHIFT 23 +#define ICH_VTR_EL2_PREBITS_SHIFT 26 +#define ICH_VTR_EL2_PRIBITS_SHIFT 29 + +/* ITS Registers */ + +FIELD(GITS_BASER, SIZE, 0, 8) +FIELD(GITS_BASER, PAGESIZE, 8, 2) +FIELD(GITS_BASER, SHAREABILITY, 10, 2) +FIELD(GITS_BASER, PHYADDR, 12, 36) +FIELD(GITS_BASER, PHYADDRL_64K, 16, 32) +FIELD(GITS_BASER, PHYADDRH_64K, 12, 4) +FIELD(GITS_BASER, ENTRYSIZE, 48, 5) +FIELD(GITS_BASER, OUTERCACHE, 53, 3) +FIELD(GITS_BASER, TYPE, 56, 3) +FIELD(GITS_BASER, INNERCACHE, 59, 3) +FIELD(GITS_BASER, INDIRECT, 62, 1) +FIELD(GITS_BASER, VALID, 63, 1) + +FIELD(GITS_CBASER, SIZE, 0, 8) +FIELD(GITS_CBASER, SHAREABILITY, 10, 2) +FIELD(GITS_CBASER, PHYADDR, 12, 40) +FIELD(GITS_CBASER, OUTERCACHE, 53, 3) +FIELD(GITS_CBASER, INNERCACHE, 59, 3) +FIELD(GITS_CBASER, VALID, 63, 1) + +FIELD(GITS_CREADR, STALLED, 0, 1) +FIELD(GITS_CREADR, OFFSET, 5, 15) + +FIELD(GITS_CWRITER, RETRY, 0, 1) +FIELD(GITS_CWRITER, OFFSET, 5, 15) + +FIELD(GITS_CTLR, ENABLED, 0, 1) +FIELD(GITS_CTLR, QUIESCENT, 31, 1) + +FIELD(GITS_TYPER, PHYSICAL, 0, 1) +FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4) +FIELD(GITS_TYPER, IDBITS, 8, 5) +FIELD(GITS_TYPER, DEVBITS, 13, 5) +FIELD(GITS_TYPER, SEIS, 18, 1) +FIELD(GITS_TYPER, PTA, 19, 1) +FIELD(GITS_TYPER, CIDBITS, 32, 4) +FIELD(GITS_TYPER, CIL, 36, 1) + +#define GITS_IDREGS 0xFFD0 + +#define ITS_CTLR_ENABLED (1U) /* ITS Enabled */ + +#define GITS_BASER_RO_MASK (R_GITS_BASER_ENTRYSIZE_MASK | \ + R_GITS_BASER_TYPE_MASK) + +#define GITS_BASER_PAGESIZE_4K 0 +#define GITS_BASER_PAGESIZE_16K 1 +#define GITS_BASER_PAGESIZE_64K 2 + +#define GITS_BASER_TYPE_DEVICE 1ULL +#define GITS_BASER_TYPE_COLLECTION 4ULL + +#define GITS_PAGE_SIZE_4K 0x1000 +#define GITS_PAGE_SIZE_16K 0x4000 +#define GITS_PAGE_SIZE_64K 0x10000 + +#define L1TABLE_ENTRY_SIZE 8 + +#define LPI_CTE_ENABLED TABLE_ENTRY_VALID_MASK +#define LPI_PRIORITY_MASK 0xfc + +#define GITS_CMDQ_ENTRY_SIZE 32 +#define NUM_BYTES_IN_DW 8 + +#define CMD_MASK 0xff + +/* ITS Commands */ +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_DISCARD 0x0F +#define GITS_CMD_INT 0x03 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPI 0x0B +#define GITS_CMD_MAPTI 0x0A +#define GITS_CMD_INV 0x0C +#define GITS_CMD_INVALL 0x0D +#define GITS_CMD_SYNC 0x05 + +/* MAPC command fields */ +#define ICID_LENGTH 16 +#define ICID_MASK ((1U << ICID_LENGTH) - 1) +FIELD(MAPC, RDBASE, 16, 32) + +#define RDBASE_PROCNUM_LENGTH 16 +#define RDBASE_PROCNUM_MASK ((1ULL << RDBASE_PROCNUM_LENGTH) - 1) + +/* MAPD command fields */ +#define ITTADDR_LENGTH 44 +#define ITTADDR_SHIFT 8 +#define ITTADDR_MASK MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH) +#define SIZE_MASK 0x1f + +/* MAPI command fields */ +#define EVENTID_MASK ((1ULL << 32) - 1) + +/* MAPTI command fields */ +#define pINTID_SHIFT 32 +#define pINTID_MASK MAKE_64BIT_MASK(32, 32) + +#define DEVID_SHIFT 32 +#define DEVID_MASK MAKE_64BIT_MASK(32, 32) + +#define VALID_SHIFT 63 +#define CMD_FIELD_VALID_MASK (1ULL << VALID_SHIFT) +#define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK +#define TABLE_ENTRY_VALID_MASK (1ULL << 0) + +/** + * Default features advertised by this version of ITS + */ +/* Physical LPIs supported */ +#define GITS_TYPE_PHYSICAL (1U << 0) + +/* + * 12 bytes Interrupt translation Table Entry size + * as per Table 5.3 in GICv3 spec + * ITE Lower 8 Bytes + * Bits: | 49 ... 26 | 25 ... 2 | 1 | 0 | + * Values: | 1023 | IntNum | IntType | Valid | + * ITE Higher 4 Bytes + * Bits: | 31 ... 16 | 15 ...0 | + * Values: | vPEID | ICID | + */ +#define ITS_ITT_ENTRY_SIZE 0xC +#define ITE_ENTRY_INTTYPE_SHIFT 1 +#define ITE_ENTRY_INTID_SHIFT 2 +#define ITE_ENTRY_INTID_MASK MAKE_64BIT_MASK(2, 24) +#define ITE_ENTRY_INTSP_SHIFT 26 +#define ITE_ENTRY_ICID_MASK MAKE_64BIT_MASK(0, 16) + +/* 16 bits EventId */ +#define ITS_IDBITS GICD_TYPER_IDBITS + +/* 16 bits DeviceId */ +#define ITS_DEVBITS 0xF + +/* 16 bits CollectionId */ +#define ITS_CIDBITS 0xF + +/* + * 8 bytes Device Table Entry size + * Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits + */ +#define GITS_DTE_SIZE (0x8ULL) +#define GITS_DTE_ITTADDR_SHIFT 6 +#define GITS_DTE_ITTADDR_MASK MAKE_64BIT_MASK(GITS_DTE_ITTADDR_SHIFT, \ + ITTADDR_LENGTH) + +/* + * 8 bytes Collection Table Entry size + * Valid = 1 bit,RDBase = 36 bits(considering max RDBASE) + */ +#define GITS_CTE_SIZE (0x8ULL) +#define GITS_CTE_RDBASE_PROCNUM_MASK MAKE_64BIT_MASK(1, RDBASE_PROCNUM_LENGTH) + +/* Special interrupt IDs */ +#define INTID_SECURE 1020 +#define INTID_NONSECURE 1021 +#define INTID_SPURIOUS 1023 + +/* Functions internal to the emulated GICv3 */ + +/** + * gicv3_intid_is_special: + * @intid: interrupt ID + * + * Return true if @intid is a special interrupt ID (1020 to + * 1023 inclusive). This corresponds to the GIC spec pseudocode + * IsSpecial() function. + */ +static inline bool gicv3_intid_is_special(int intid) +{ + return intid >= INTID_SECURE && intid <= INTID_SPURIOUS; +} + +/** + * gicv3_redist_update: + * @cs: GICv3CPUState for this redistributor + * + * Recalculate the highest priority pending interrupt after a + * change to redistributor state, and inform the CPU accordingly. + */ +void gicv3_redist_update(GICv3CPUState *cs); + +/** + * gicv3_update: + * @s: GICv3State + * @start: first interrupt whose state changed + * @len: length of the range of interrupts whose state changed + * + * Recalculate the highest priority pending interrupts after a + * change to the distributor state affecting @len interrupts + * starting at @start, and inform the CPUs accordingly. + */ +void gicv3_update(GICv3State *s, int start, int len); + +/** + * gicv3_full_update_noirqset: + * @s: GICv3State + * + * Recalculate the cached information about highest priority + * pending interrupts, but don't inform the CPUs. This should be + * called after an incoming migration has loaded new state. + */ +void gicv3_full_update_noirqset(GICv3State *s); + +/** + * gicv3_full_update: + * @s: GICv3State + * + * Recalculate the highest priority pending interrupts after + * a change that could affect the status of all interrupts, + * and inform the CPUs accordingly. + */ +void gicv3_full_update(GICv3State *s); +MemTxResult gicv3_dist_read(void *opaque, hwaddr offset, uint64_t *data, + unsigned size, MemTxAttrs attrs); +MemTxResult gicv3_dist_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size, MemTxAttrs attrs); +MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data, + unsigned size, MemTxAttrs attrs); +MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size, MemTxAttrs attrs); +void gicv3_dist_set_irq(GICv3State *s, int irq, int level); +void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level); +void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level); +void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level); +/** + * gicv3_redist_update_lpi: + * @cs: GICv3CPUState + * + * Scan the LPI pending table and recalculate the highest priority + * pending LPI and also the overall highest priority pending interrupt. + */ +void gicv3_redist_update_lpi(GICv3CPUState *cs); +/** + * gicv3_redist_update_lpi_only: + * @cs: GICv3CPUState + * + * Scan the LPI pending table and recalculate cs->hpplpi only, + * without calling gicv3_redist_update() to recalculate the overall + * highest priority pending interrupt. This should be called after + * an incoming migration has loaded new state. + */ +void gicv3_redist_update_lpi_only(GICv3CPUState *cs); +void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns); +void gicv3_init_cpuif(GICv3State *s); + +/** + * gicv3_cpuif_update: + * @cs: GICv3CPUState for the CPU to update + * + * Recalculate whether to assert the IRQ or FIQ lines after a change + * to the current highest priority pending interrupt, the CPU's + * current running priority or the CPU's current exception level or + * security state. + */ +void gicv3_cpuif_update(GICv3CPUState *cs); + +static inline uint32_t gicv3_iidr(void) +{ + /* Return the Implementer Identification Register value + * for the emulated GICv3, as reported in GICD_IIDR and GICR_IIDR. + * + * We claim to be an ARM r0p0 with a zero ProductID. + * This is the same as an r0p0 GIC-500. + */ + return 0x43b; +} + +static inline uint32_t gicv3_idreg(int regoffset) +{ + /* Return the value of the CoreSight ID register at the specified + * offset from the first ID register (as found in the distributor + * and redistributor register banks). + * These values indicate an ARM implementation of a GICv3. + */ + static const uint8_t gicd_ids[] = { + 0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x3B, 0x00, 0x0D, 0xF0, 0x05, 0xB1 + }; + return gicd_ids[regoffset / 4]; +} + +/** + * gicv3_irq_group: + * + * Return the group which this interrupt is configured as (GICV3_G0, + * GICV3_G1 or GICV3_G1NS). + */ +static inline int gicv3_irq_group(GICv3State *s, GICv3CPUState *cs, int irq) +{ + bool grpbit, grpmodbit; + + if (irq < GIC_INTERNAL) { + grpbit = extract32(cs->gicr_igroupr0, irq, 1); + grpmodbit = extract32(cs->gicr_igrpmodr0, irq, 1); + } else { + grpbit = gicv3_gicd_group_test(s, irq); + grpmodbit = gicv3_gicd_grpmod_test(s, irq); + } + if (grpbit) { + return GICV3_G1NS; + } + if (s->gicd_ctlr & GICD_CTLR_DS) { + return GICV3_G0; + } + return grpmodbit ? GICV3_G1 : GICV3_G0; +} + +/** + * gicv3_redist_affid: + * + * Return the 32-bit affinity ID of the CPU connected to this redistributor + */ +static inline uint32_t gicv3_redist_affid(GICv3CPUState *cs) +{ + return cs->gicr_typer >> 32; +} + +/** + * gicv3_cache_target_cpustate: + * + * Update the cached CPU state corresponding to the target for this interrupt + * (which is kept in s->gicd_irouter_target[]). + */ +static inline void gicv3_cache_target_cpustate(GICv3State *s, int irq) +{ + GICv3CPUState *cs = NULL; + int i; + uint32_t tgtaff = extract64(s->gicd_irouter[irq], 0, 24) | + extract64(s->gicd_irouter[irq], 32, 8) << 24; + + for (i = 0; i < s->num_cpu; i++) { + if (s->cpu[i].gicr_typer >> 32 == tgtaff) { + cs = &s->cpu[i]; + break; + } + } + + s->gicd_irouter_target[irq] = cs; +} + +/** + * gicv3_cache_all_target_cpustates: + * + * Populate the entire cache of CPU state pointers for interrupt targets + * (eg after inbound migration or CPU reset) + */ +static inline void gicv3_cache_all_target_cpustates(GICv3State *s) +{ + int irq; + + for (irq = GIC_INTERNAL; irq < GICV3_MAXIRQ; irq++) { + gicv3_cache_target_cpustate(s, irq); + } +} + +void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s); + +#endif /* QEMU_ARM_GICV3_INTERNAL_H */ diff --git a/hw/intc/goldfish_pic.c b/hw/intc/goldfish_pic.c new file mode 100644 index 000000000..dfd53275f --- /dev/null +++ b/hw/intc/goldfish_pic.c @@ -0,0 +1,219 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Goldfish PIC + * + * (c) 2020 Laurent Vivier + * + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/intc/intc.h" +#include "hw/intc/goldfish_pic.h" + +/* registers */ + +enum { + REG_STATUS = 0x00, + REG_IRQ_PENDING = 0x04, + REG_IRQ_DISABLE_ALL = 0x08, + REG_DISABLE = 0x0c, + REG_ENABLE = 0x10, +}; + +static bool goldfish_pic_get_statistics(InterruptStatsProvider *obj, + uint64_t **irq_counts, + unsigned int *nb_irqs) +{ + GoldfishPICState *s = GOLDFISH_PIC(obj); + + *irq_counts = s->stats_irq_count; + *nb_irqs = ARRAY_SIZE(s->stats_irq_count); + return true; +} + +static void goldfish_pic_print_info(InterruptStatsProvider *obj, Monitor *mon) +{ + GoldfishPICState *s = GOLDFISH_PIC(obj); + monitor_printf(mon, "goldfish-pic.%d: pending=0x%08x enabled=0x%08x\n", + s->idx, s->pending, s->enabled); +} + +static void goldfish_pic_update(GoldfishPICState *s) +{ + if (s->pending & s->enabled) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static void goldfish_irq_request(void *opaque, int irq, int level) +{ + GoldfishPICState *s = opaque; + + trace_goldfish_irq_request(s, s->idx, irq, level); + + if (level) { + s->pending |= 1 << irq; + s->stats_irq_count[irq]++; + } else { + s->pending &= ~(1 << irq); + } + goldfish_pic_update(s); +} + +static uint64_t goldfish_pic_read(void *opaque, hwaddr addr, + unsigned size) +{ + GoldfishPICState *s = opaque; + uint64_t value = 0; + + switch (addr) { + case REG_STATUS: + /* The number of pending interrupts (0 to 32) */ + value = ctpop32(s->pending & s->enabled); + break; + case REG_IRQ_PENDING: + /* The pending interrupt mask */ + value = s->pending & s->enabled; + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: unimplemented register read 0x%02"HWADDR_PRIx"\n", + __func__, addr); + break; + } + + trace_goldfish_pic_read(s, s->idx, addr, size, value); + + return value; +} + +static void goldfish_pic_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + GoldfishPICState *s = opaque; + + trace_goldfish_pic_write(s, s->idx, addr, size, value); + + switch (addr) { + case REG_IRQ_DISABLE_ALL: + s->enabled = 0; + s->pending = 0; + break; + case REG_DISABLE: + s->enabled &= ~value; + break; + case REG_ENABLE: + s->enabled |= value; + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: unimplemented register write 0x%02"HWADDR_PRIx"\n", + __func__, addr); + break; + } + goldfish_pic_update(s); +} + +static const MemoryRegionOps goldfish_pic_ops = { + .read = goldfish_pic_read, + .write = goldfish_pic_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.max_access_size = 4, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static void goldfish_pic_reset(DeviceState *dev) +{ + GoldfishPICState *s = GOLDFISH_PIC(dev); + int i; + + trace_goldfish_pic_reset(s, s->idx); + s->pending = 0; + s->enabled = 0; + + for (i = 0; i < ARRAY_SIZE(s->stats_irq_count); i++) { + s->stats_irq_count[i] = 0; + } +} + +static void goldfish_pic_realize(DeviceState *dev, Error **errp) +{ + GoldfishPICState *s = GOLDFISH_PIC(dev); + + trace_goldfish_pic_realize(s, s->idx); + + memory_region_init_io(&s->iomem, OBJECT(s), &goldfish_pic_ops, s, + "goldfish_pic", 0x24); +} + +static const VMStateDescription vmstate_goldfish_pic = { + .name = "goldfish_pic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(pending, GoldfishPICState), + VMSTATE_UINT32(enabled, GoldfishPICState), + VMSTATE_END_OF_LIST() + } +}; + +static void goldfish_pic_instance_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + GoldfishPICState *s = GOLDFISH_PIC(obj); + + trace_goldfish_pic_instance_init(s); + + sysbus_init_mmio(dev, &s->iomem); + sysbus_init_irq(dev, &s->irq); + + qdev_init_gpio_in(DEVICE(obj), goldfish_irq_request, GOLDFISH_PIC_IRQ_NB); +} + +static Property goldfish_pic_properties[] = { + DEFINE_PROP_UINT8("index", GoldfishPICState, idx, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void goldfish_pic_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(oc); + + dc->reset = goldfish_pic_reset; + dc->realize = goldfish_pic_realize; + dc->vmsd = &vmstate_goldfish_pic; + ic->get_statistics = goldfish_pic_get_statistics; + ic->print_info = goldfish_pic_print_info; + device_class_set_props(dc, goldfish_pic_properties); +} + +static const TypeInfo goldfish_pic_info = { + .name = TYPE_GOLDFISH_PIC, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = goldfish_pic_class_init, + .instance_init = goldfish_pic_instance_init, + .instance_size = sizeof(GoldfishPICState), + .interfaces = (InterfaceInfo[]) { + { TYPE_INTERRUPT_STATS_PROVIDER }, + { } + }, +}; + +static void goldfish_pic_register_types(void) +{ + type_register_static(&goldfish_pic_info); +} + +type_init(goldfish_pic_register_types) diff --git a/hw/intc/grlib_irqmp.c b/hw/intc/grlib_irqmp.c new file mode 100644 index 000000000..3bfe2544b --- /dev/null +++ b/hw/intc/grlib_irqmp.c @@ -0,0 +1,362 @@ +/* + * QEMU GRLIB IRQMP Emulator + * + * (Multiprocessor and extended interrupt not supported) + * + * Copyright (c) 2010-2019 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" + +#include "hw/qdev-properties.h" +#include "hw/sparc/grlib.h" + +#include "trace.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define IRQMP_MAX_CPU 16 +#define IRQMP_REG_SIZE 256 /* Size of memory mapped registers */ + +/* Memory mapped register offsets */ +#define LEVEL_OFFSET 0x00 +#define PENDING_OFFSET 0x04 +#define FORCE0_OFFSET 0x08 +#define CLEAR_OFFSET 0x0C +#define MP_STATUS_OFFSET 0x10 +#define BROADCAST_OFFSET 0x14 +#define MASK_OFFSET 0x40 +#define FORCE_OFFSET 0x80 +#define EXTENDED_OFFSET 0xC0 + +#define MAX_PILS 16 + +OBJECT_DECLARE_SIMPLE_TYPE(IRQMP, GRLIB_IRQMP) + +typedef struct IRQMPState IRQMPState; + +struct IRQMP { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + IRQMPState *state; + qemu_irq irq; +}; + +struct IRQMPState { + uint32_t level; + uint32_t pending; + uint32_t clear; + uint32_t broadcast; + + uint32_t mask[IRQMP_MAX_CPU]; + uint32_t force[IRQMP_MAX_CPU]; + uint32_t extended[IRQMP_MAX_CPU]; + + IRQMP *parent; +}; + +static void grlib_irqmp_check_irqs(IRQMPState *state) +{ + uint32_t pend = 0; + uint32_t level0 = 0; + uint32_t level1 = 0; + + assert(state != NULL); + assert(state->parent != NULL); + + /* IRQ for CPU 0 (no SMP support) */ + pend = (state->pending | state->force[0]) + & state->mask[0]; + + level0 = pend & ~state->level; + level1 = pend & state->level; + + trace_grlib_irqmp_check_irqs(state->pending, state->force[0], + state->mask[0], level1, level0); + + /* Trigger level1 interrupt first and level0 if there is no level1 */ + qemu_set_irq(state->parent->irq, level1 ?: level0); +} + +static void grlib_irqmp_ack_mask(IRQMPState *state, uint32_t mask) +{ + /* Clear registers */ + state->pending &= ~mask; + state->force[0] &= ~mask; /* Only CPU 0 (No SMP support) */ + + grlib_irqmp_check_irqs(state); +} + +void grlib_irqmp_ack(DeviceState *dev, int intno) +{ + IRQMP *irqmp = GRLIB_IRQMP(dev); + IRQMPState *state; + uint32_t mask; + + state = irqmp->state; + assert(state != NULL); + + intno &= 15; + mask = 1 << intno; + + trace_grlib_irqmp_ack(intno); + + grlib_irqmp_ack_mask(state, mask); +} + +static void grlib_irqmp_set_irq(void *opaque, int irq, int level) +{ + IRQMP *irqmp = GRLIB_IRQMP(opaque); + IRQMPState *s; + int i = 0; + + s = irqmp->state; + assert(s != NULL); + assert(s->parent != NULL); + + + if (level) { + trace_grlib_irqmp_set_irq(irq); + + if (s->broadcast & 1 << irq) { + /* Broadcasted IRQ */ + for (i = 0; i < IRQMP_MAX_CPU; i++) { + s->force[i] |= 1 << irq; + } + } else { + s->pending |= 1 << irq; + } + grlib_irqmp_check_irqs(s); + + } +} + +static uint64_t grlib_irqmp_read(void *opaque, hwaddr addr, + unsigned size) +{ + IRQMP *irqmp = opaque; + IRQMPState *state; + + assert(irqmp != NULL); + state = irqmp->state; + assert(state != NULL); + + addr &= 0xff; + + /* global registers */ + switch (addr) { + case LEVEL_OFFSET: + return state->level; + + case PENDING_OFFSET: + return state->pending; + + case FORCE0_OFFSET: + /* This register is an "alias" for the force register of CPU 0 */ + return state->force[0]; + + case CLEAR_OFFSET: + case MP_STATUS_OFFSET: + /* Always read as 0 */ + return 0; + + case BROADCAST_OFFSET: + return state->broadcast; + + default: + break; + } + + /* mask registers */ + if (addr >= MASK_OFFSET && addr < FORCE_OFFSET) { + int cpu = (addr - MASK_OFFSET) / 4; + assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + + return state->mask[cpu]; + } + + /* force registers */ + if (addr >= FORCE_OFFSET && addr < EXTENDED_OFFSET) { + int cpu = (addr - FORCE_OFFSET) / 4; + assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + + return state->force[cpu]; + } + + /* extended (not supported) */ + if (addr >= EXTENDED_OFFSET && addr < IRQMP_REG_SIZE) { + int cpu = (addr - EXTENDED_OFFSET) / 4; + assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + + return state->extended[cpu]; + } + + trace_grlib_irqmp_readl_unknown(addr); + return 0; +} + +static void grlib_irqmp_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + IRQMP *irqmp = opaque; + IRQMPState *state; + + assert(irqmp != NULL); + state = irqmp->state; + assert(state != NULL); + + addr &= 0xff; + + /* global registers */ + switch (addr) { + case LEVEL_OFFSET: + value &= 0xFFFF << 1; /* clean up the value */ + state->level = value; + return; + + case PENDING_OFFSET: + /* Read Only */ + return; + + case FORCE0_OFFSET: + /* This register is an "alias" for the force register of CPU 0 */ + + value &= 0xFFFE; /* clean up the value */ + state->force[0] = value; + grlib_irqmp_check_irqs(irqmp->state); + return; + + case CLEAR_OFFSET: + value &= ~1; /* clean up the value */ + grlib_irqmp_ack_mask(state, value); + return; + + case MP_STATUS_OFFSET: + /* Read Only (no SMP support) */ + return; + + case BROADCAST_OFFSET: + value &= 0xFFFE; /* clean up the value */ + state->broadcast = value; + return; + + default: + break; + } + + /* mask registers */ + if (addr >= MASK_OFFSET && addr < FORCE_OFFSET) { + int cpu = (addr - MASK_OFFSET) / 4; + assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + + value &= ~1; /* clean up the value */ + state->mask[cpu] = value; + grlib_irqmp_check_irqs(irqmp->state); + return; + } + + /* force registers */ + if (addr >= FORCE_OFFSET && addr < EXTENDED_OFFSET) { + int cpu = (addr - FORCE_OFFSET) / 4; + assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + + uint32_t force = value & 0xFFFE; + uint32_t clear = (value >> 16) & 0xFFFE; + uint32_t old = state->force[cpu]; + + state->force[cpu] = (old | force) & ~clear; + grlib_irqmp_check_irqs(irqmp->state); + return; + } + + /* extended (not supported) */ + if (addr >= EXTENDED_OFFSET && addr < IRQMP_REG_SIZE) { + int cpu = (addr - EXTENDED_OFFSET) / 4; + assert(cpu >= 0 && cpu < IRQMP_MAX_CPU); + + value &= 0xF; /* clean up the value */ + state->extended[cpu] = value; + return; + } + + trace_grlib_irqmp_writel_unknown(addr, value); +} + +static const MemoryRegionOps grlib_irqmp_ops = { + .read = grlib_irqmp_read, + .write = grlib_irqmp_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void grlib_irqmp_reset(DeviceState *d) +{ + IRQMP *irqmp = GRLIB_IRQMP(d); + assert(irqmp->state != NULL); + + memset(irqmp->state, 0, sizeof *irqmp->state); + irqmp->state->parent = irqmp; +} + +static void grlib_irqmp_init(Object *obj) +{ + IRQMP *irqmp = GRLIB_IRQMP(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + qdev_init_gpio_in(DEVICE(obj), grlib_irqmp_set_irq, MAX_PILS); + qdev_init_gpio_out_named(DEVICE(obj), &irqmp->irq, "grlib-irq", 1); + memory_region_init_io(&irqmp->iomem, obj, &grlib_irqmp_ops, irqmp, + "irqmp", IRQMP_REG_SIZE); + + irqmp->state = g_malloc0(sizeof *irqmp->state); + + sysbus_init_mmio(dev, &irqmp->iomem); +} + +static void grlib_irqmp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = grlib_irqmp_reset; +} + +static const TypeInfo grlib_irqmp_info = { + .name = TYPE_GRLIB_IRQMP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IRQMP), + .instance_init = grlib_irqmp_init, + .class_init = grlib_irqmp_class_init, +}; + +static void grlib_irqmp_register_types(void) +{ + type_register_static(&grlib_irqmp_info); +} + +type_init(grlib_irqmp_register_types) diff --git a/hw/intc/heathrow_pic.c b/hw/intc/heathrow_pic.c new file mode 100644 index 000000000..cb97c315d --- /dev/null +++ b/hw/intc/heathrow_pic.c @@ -0,0 +1,210 @@ +/* + * Heathrow PIC support (OldWorld PowerMac) + * + * Copyright (c) 2005-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/ppc/mac.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "hw/intc/heathrow_pic.h" +#include "hw/irq.h" +#include "trace.h" + +static inline int heathrow_check_irq(HeathrowPICState *pic) +{ + return (pic->events | (pic->levels & pic->level_triggered)) & pic->mask; +} + +/* update the CPU irq state */ +static void heathrow_update_irq(HeathrowState *s) +{ + if (heathrow_check_irq(&s->pics[0]) || + heathrow_check_irq(&s->pics[1])) { + qemu_irq_raise(s->irqs[0]); + } else { + qemu_irq_lower(s->irqs[0]); + } +} + +static void heathrow_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + HeathrowState *s = opaque; + HeathrowPICState *pic; + unsigned int n; + + n = ((addr & 0xfff) - 0x10) >> 4; + trace_heathrow_write(addr, n, value); + if (n >= 2) + return; + pic = &s->pics[n]; + switch(addr & 0xf) { + case 0x04: + pic->mask = value; + heathrow_update_irq(s); + break; + case 0x08: + /* do not reset level triggered IRQs */ + value &= ~pic->level_triggered; + pic->events &= ~value; + heathrow_update_irq(s); + break; + default: + break; + } +} + +static uint64_t heathrow_read(void *opaque, hwaddr addr, + unsigned size) +{ + HeathrowState *s = opaque; + HeathrowPICState *pic; + unsigned int n; + uint32_t value; + + n = ((addr & 0xfff) - 0x10) >> 4; + if (n >= 2) { + value = 0; + } else { + pic = &s->pics[n]; + switch(addr & 0xf) { + case 0x0: + value = pic->events; + break; + case 0x4: + value = pic->mask; + break; + case 0xc: + value = pic->levels; + break; + default: + value = 0; + break; + } + } + trace_heathrow_read(addr, n, value); + return value; +} + +static const MemoryRegionOps heathrow_ops = { + .read = heathrow_read, + .write = heathrow_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void heathrow_set_irq(void *opaque, int num, int level) +{ + HeathrowState *s = opaque; + HeathrowPICState *pic; + unsigned int irq_bit; + int last_level; + + pic = &s->pics[1 - (num >> 5)]; + irq_bit = 1 << (num & 0x1f); + last_level = (pic->levels & irq_bit) ? 1 : 0; + + if (level) { + pic->events |= irq_bit & ~pic->level_triggered; + pic->levels |= irq_bit; + } else { + pic->levels &= ~irq_bit; + } + + if (last_level != level) { + trace_heathrow_set_irq(num, level); + } + + heathrow_update_irq(s); +} + +static const VMStateDescription vmstate_heathrow_pic_one = { + .name = "heathrow_pic_one", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(events, HeathrowPICState), + VMSTATE_UINT32(mask, HeathrowPICState), + VMSTATE_UINT32(levels, HeathrowPICState), + VMSTATE_UINT32(level_triggered, HeathrowPICState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_heathrow = { + .name = "heathrow_pic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(pics, HeathrowState, 2, 1, + vmstate_heathrow_pic_one, HeathrowPICState), + VMSTATE_END_OF_LIST() + } +}; + +static void heathrow_reset(DeviceState *d) +{ + HeathrowState *s = HEATHROW(d); + + s->pics[0].level_triggered = 0; + s->pics[1].level_triggered = 0x1ff00000; +} + +static void heathrow_init(Object *obj) +{ + HeathrowState *s = HEATHROW(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + /* only 1 CPU */ + qdev_init_gpio_out(DEVICE(obj), s->irqs, 1); + + qdev_init_gpio_in(DEVICE(obj), heathrow_set_irq, HEATHROW_NUM_IRQS); + + memory_region_init_io(&s->mem, OBJECT(s), &heathrow_ops, s, + "heathrow-pic", 0x1000); + sysbus_init_mmio(sbd, &s->mem); +} + +static void heathrow_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->reset = heathrow_reset; + dc->vmsd = &vmstate_heathrow; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo heathrow_type_info = { + .name = TYPE_HEATHROW, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(HeathrowState), + .instance_init = heathrow_init, + .class_init = heathrow_class_init, +}; + +static void heathrow_register_types(void) +{ + type_register_static(&heathrow_type_info); +} + +type_init(heathrow_register_types) diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c new file mode 100644 index 000000000..cc4e21ffe --- /dev/null +++ b/hw/intc/i8259.c @@ -0,0 +1,466 @@ +/* + * QEMU 8259 interrupt controller emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/intc/i8259.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "hw/isa/i8259_internal.h" +#include "trace.h" +#include "qom/object.h" + +/* debug PIC */ +//#define DEBUG_PIC + +//#define DEBUG_IRQ_LATENCY + +#define TYPE_I8259 "isa-i8259" +typedef struct PICClass PICClass; +DECLARE_CLASS_CHECKERS(PICClass, PIC, + TYPE_I8259) + +/** + * PICClass: + * @parent_realize: The parent's realizefn. + */ +struct PICClass { + PICCommonClass parent_class; + + DeviceRealize parent_realize; +}; + +#ifdef DEBUG_IRQ_LATENCY +static int64_t irq_time[16]; +#endif +DeviceState *isa_pic; +static PICCommonState *slave_pic; + +/* return the highest priority found in mask (highest = smallest + number). Return 8 if no irq */ +static int get_priority(PICCommonState *s, int mask) +{ + int priority; + + if (mask == 0) { + return 8; + } + priority = 0; + while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) { + priority++; + } + return priority; +} + +/* return the pic wanted interrupt. return -1 if none */ +static int pic_get_irq(PICCommonState *s) +{ + int mask, cur_priority, priority; + + mask = s->irr & ~s->imr; + priority = get_priority(s, mask); + if (priority == 8) { + return -1; + } + /* compute current priority. If special fully nested mode on the + master, the IRQ coming from the slave is not taken into account + for the priority computation. */ + mask = s->isr; + if (s->special_mask) { + mask &= ~s->imr; + } + if (s->special_fully_nested_mode && s->master) { + mask &= ~(1 << 2); + } + cur_priority = get_priority(s, mask); + if (priority < cur_priority) { + /* higher priority found: an irq should be generated */ + return (priority + s->priority_add) & 7; + } else { + return -1; + } +} + +/* Update INT output. Must be called every time the output may have changed. */ +static void pic_update_irq(PICCommonState *s) +{ + int irq; + + irq = pic_get_irq(s); + if (irq >= 0) { + trace_pic_update_irq(s->master, s->imr, s->irr, s->priority_add); + qemu_irq_raise(s->int_out[0]); + } else { + qemu_irq_lower(s->int_out[0]); + } +} + +/* set irq level. If an edge is detected, then the IRR is set to 1 */ +static void pic_set_irq(void *opaque, int irq, int level) +{ + PICCommonState *s = opaque; + int mask = 1 << irq; + int irq_index = s->master ? irq : irq + 8; + + trace_pic_set_irq(s->master, irq, level); + pic_stat_update_irq(irq_index, level); + +#ifdef DEBUG_IRQ_LATENCY + if (level) { + irq_time[irq_index] = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + } +#endif + + if (s->elcr & mask) { + /* level triggered */ + if (level) { + s->irr |= mask; + s->last_irr |= mask; + } else { + s->irr &= ~mask; + s->last_irr &= ~mask; + } + } else { + /* edge triggered */ + if (level) { + if ((s->last_irr & mask) == 0) { + s->irr |= mask; + } + s->last_irr |= mask; + } else { + s->last_irr &= ~mask; + } + } + pic_update_irq(s); +} + +/* acknowledge interrupt 'irq' */ +static void pic_intack(PICCommonState *s, int irq) +{ + if (s->auto_eoi) { + if (s->rotate_on_auto_eoi) { + s->priority_add = (irq + 1) & 7; + } + } else { + s->isr |= (1 << irq); + } + /* We don't clear a level sensitive interrupt here */ + if (!(s->elcr & (1 << irq))) { + s->irr &= ~(1 << irq); + } + pic_update_irq(s); +} + +int pic_read_irq(DeviceState *d) +{ + PICCommonState *s = PIC_COMMON(d); + int irq, intno; + + irq = pic_get_irq(s); + if (irq >= 0) { + int irq2; + + if (irq == 2) { + irq2 = pic_get_irq(slave_pic); + if (irq2 >= 0) { + pic_intack(slave_pic, irq2); + } else { + /* spurious IRQ on slave controller */ + irq2 = 7; + } + intno = slave_pic->irq_base + irq2; + pic_intack(s, irq); + irq = irq2 + 8; + } else { + intno = s->irq_base + irq; + pic_intack(s, irq); + } + } else { + /* spurious IRQ on host controller */ + irq = 7; + intno = s->irq_base + irq; + } + +#ifdef DEBUG_IRQ_LATENCY + printf("IRQ%d latency=%0.3fus\n", + irq, + (double)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - + irq_time[irq]) * 1000000.0 / NANOSECONDS_PER_SECOND); +#endif + + trace_pic_interrupt(irq, intno); + return intno; +} + +static void pic_init_reset(PICCommonState *s) +{ + pic_reset_common(s); + pic_update_irq(s); +} + +static void pic_reset(DeviceState *dev) +{ + PICCommonState *s = PIC_COMMON(dev); + + s->elcr = 0; + pic_init_reset(s); +} + +static void pic_ioport_write(void *opaque, hwaddr addr64, + uint64_t val64, unsigned size) +{ + PICCommonState *s = opaque; + uint32_t addr = addr64; + uint32_t val = val64; + int priority, cmd, irq; + + trace_pic_ioport_write(s->master, addr, val); + + if (addr == 0) { + if (val & 0x10) { + pic_init_reset(s); + s->init_state = 1; + s->init4 = val & 1; + s->single_mode = val & 2; + if (val & 0x08) { + qemu_log_mask(LOG_UNIMP, + "i8259: level sensitive irq not supported\n"); + } + } else if (val & 0x08) { + if (val & 0x04) { + s->poll = 1; + } + if (val & 0x02) { + s->read_reg_select = val & 1; + } + if (val & 0x40) { + s->special_mask = (val >> 5) & 1; + } + } else { + cmd = val >> 5; + switch (cmd) { + case 0: + case 4: + s->rotate_on_auto_eoi = cmd >> 2; + break; + case 1: /* end of interrupt */ + case 5: + priority = get_priority(s, s->isr); + if (priority != 8) { + irq = (priority + s->priority_add) & 7; + s->isr &= ~(1 << irq); + if (cmd == 5) { + s->priority_add = (irq + 1) & 7; + } + pic_update_irq(s); + } + break; + case 3: + irq = val & 7; + s->isr &= ~(1 << irq); + pic_update_irq(s); + break; + case 6: + s->priority_add = (val + 1) & 7; + pic_update_irq(s); + break; + case 7: + irq = val & 7; + s->isr &= ~(1 << irq); + s->priority_add = (irq + 1) & 7; + pic_update_irq(s); + break; + default: + /* no operation */ + break; + } + } + } else { + switch (s->init_state) { + case 0: + /* normal mode */ + s->imr = val; + pic_update_irq(s); + break; + case 1: + s->irq_base = val & 0xf8; + s->init_state = s->single_mode ? (s->init4 ? 3 : 0) : 2; + break; + case 2: + if (s->init4) { + s->init_state = 3; + } else { + s->init_state = 0; + } + break; + case 3: + s->special_fully_nested_mode = (val >> 4) & 1; + s->auto_eoi = (val >> 1) & 1; + s->init_state = 0; + break; + } + } +} + +static uint64_t pic_ioport_read(void *opaque, hwaddr addr, + unsigned size) +{ + PICCommonState *s = opaque; + int ret; + + if (s->poll) { + ret = pic_get_irq(s); + if (ret >= 0) { + pic_intack(s, ret); + ret |= 0x80; + } else { + ret = 0; + } + s->poll = 0; + } else { + if (addr == 0) { + if (s->read_reg_select) { + ret = s->isr; + } else { + ret = s->irr; + } + } else { + ret = s->imr; + } + } + trace_pic_ioport_read(s->master, addr, ret); + return ret; +} + +int pic_get_output(DeviceState *d) +{ + PICCommonState *s = PIC_COMMON(d); + + return (pic_get_irq(s) >= 0); +} + +static void elcr_ioport_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PICCommonState *s = opaque; + s->elcr = val & s->elcr_mask; +} + +static uint64_t elcr_ioport_read(void *opaque, hwaddr addr, + unsigned size) +{ + PICCommonState *s = opaque; + return s->elcr; +} + +static const MemoryRegionOps pic_base_ioport_ops = { + .read = pic_ioport_read, + .write = pic_ioport_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static const MemoryRegionOps pic_elcr_ioport_ops = { + .read = elcr_ioport_read, + .write = elcr_ioport_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void pic_realize(DeviceState *dev, Error **errp) +{ + PICCommonState *s = PIC_COMMON(dev); + PICClass *pc = PIC_GET_CLASS(dev); + + memory_region_init_io(&s->base_io, OBJECT(s), &pic_base_ioport_ops, s, + "pic", 2); + memory_region_init_io(&s->elcr_io, OBJECT(s), &pic_elcr_ioport_ops, s, + "elcr", 1); + + qdev_init_gpio_out(dev, s->int_out, ARRAY_SIZE(s->int_out)); + qdev_init_gpio_in(dev, pic_set_irq, 8); + + pc->parent_realize(dev, errp); +} + +qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq) +{ + qemu_irq *irq_set; + DeviceState *dev; + ISADevice *isadev; + int i; + + irq_set = g_new0(qemu_irq, ISA_NUM_IRQS); + + isadev = i8259_init_chip(TYPE_I8259, bus, true); + dev = DEVICE(isadev); + + qdev_connect_gpio_out(dev, 0, parent_irq); + for (i = 0 ; i < 8; i++) { + irq_set[i] = qdev_get_gpio_in(dev, i); + } + + isa_pic = dev; + + isadev = i8259_init_chip(TYPE_I8259, bus, false); + dev = DEVICE(isadev); + + qdev_connect_gpio_out(dev, 0, irq_set[2]); + for (i = 0 ; i < 8; i++) { + irq_set[i + 8] = qdev_get_gpio_in(dev, i); + } + + slave_pic = PIC_COMMON(dev); + + return irq_set; +} + +static void i8259_class_init(ObjectClass *klass, void *data) +{ + PICClass *k = PIC_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_parent_realize(dc, pic_realize, &k->parent_realize); + dc->reset = pic_reset; +} + +static const TypeInfo i8259_info = { + .name = TYPE_I8259, + .instance_size = sizeof(PICCommonState), + .parent = TYPE_PIC_COMMON, + .class_init = i8259_class_init, + .class_size = sizeof(PICClass), +}; + +static void pic_register_types(void) +{ + type_register_static(&i8259_info); +} + +type_init(pic_register_types) diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c new file mode 100644 index 000000000..d90b40fe4 --- /dev/null +++ b/hw/intc/i8259_common.c @@ -0,0 +1,219 @@ +/* + * QEMU 8259 - common bits of emulated and KVM kernel model + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2011 Jan Kiszka, Siemens AG + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/intc/i8259.h" +#include "hw/isa/i8259_internal.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "qapi/error.h" + +static int irq_level[16]; +static uint64_t irq_count[16]; + +void pic_reset_common(PICCommonState *s) +{ + s->last_irr = 0; + s->irr &= s->elcr; + s->imr = 0; + s->isr = 0; + s->priority_add = 0; + s->irq_base = 0; + s->read_reg_select = 0; + s->poll = 0; + s->special_mask = 0; + s->init_state = 0; + s->auto_eoi = 0; + s->rotate_on_auto_eoi = 0; + s->special_fully_nested_mode = 0; + s->init4 = 0; + s->single_mode = 0; + /* Note: ELCR is not reset */ +} + +static int pic_dispatch_pre_save(void *opaque) +{ + PICCommonState *s = opaque; + PICCommonClass *info = PIC_COMMON_GET_CLASS(s); + + if (info->pre_save) { + info->pre_save(s); + } + + return 0; +} + +static int pic_dispatch_post_load(void *opaque, int version_id) +{ + PICCommonState *s = opaque; + PICCommonClass *info = PIC_COMMON_GET_CLASS(s); + + if (info->post_load) { + info->post_load(s); + } + return 0; +} + +static void pic_common_realize(DeviceState *dev, Error **errp) +{ + PICCommonState *s = PIC_COMMON(dev); + ISADevice *isa = ISA_DEVICE(dev); + + isa_register_ioport(isa, &s->base_io, s->iobase); + if (s->elcr_addr != -1) { + isa_register_ioport(isa, &s->elcr_io, s->elcr_addr); + } + + qdev_set_legacy_instance_id(dev, s->iobase, 1); +} + +ISADevice *i8259_init_chip(const char *name, ISABus *bus, bool master) +{ + DeviceState *dev; + ISADevice *isadev; + + isadev = isa_new(name); + dev = DEVICE(isadev); + qdev_prop_set_uint32(dev, "iobase", master ? 0x20 : 0xa0); + qdev_prop_set_uint32(dev, "elcr_addr", master ? 0x4d0 : 0x4d1); + qdev_prop_set_uint8(dev, "elcr_mask", master ? 0xf8 : 0xde); + qdev_prop_set_bit(dev, "master", master); + isa_realize_and_unref(isadev, bus, &error_fatal); + + return isadev; +} + +void pic_stat_update_irq(int irq, int level) +{ + if (level != irq_level[irq]) { + irq_level[irq] = level; + if (level == 1) { + irq_count[irq]++; + } + } +} + +bool pic_get_statistics(InterruptStatsProvider *obj, + uint64_t **irq_counts, unsigned int *nb_irqs) +{ + PICCommonState *s = PIC_COMMON(obj); + + if (s->master) { + *irq_counts = irq_count; + *nb_irqs = ARRAY_SIZE(irq_count); + } else { + *irq_counts = NULL; + *nb_irqs = 0; + } + + return true; +} + +void pic_print_info(InterruptStatsProvider *obj, Monitor *mon) +{ + PICCommonState *s = PIC_COMMON(obj); + + pic_dispatch_pre_save(s); + monitor_printf(mon, "pic%d: irr=%02x imr=%02x isr=%02x hprio=%d " + "irq_base=%02x rr_sel=%d elcr=%02x fnm=%d\n", + s->master ? 0 : 1, s->irr, s->imr, s->isr, s->priority_add, + s->irq_base, s->read_reg_select, s->elcr, + s->special_fully_nested_mode); +} + +static const VMStateDescription vmstate_pic_common = { + .name = "i8259", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = pic_dispatch_pre_save, + .post_load = pic_dispatch_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(last_irr, PICCommonState), + VMSTATE_UINT8(irr, PICCommonState), + VMSTATE_UINT8(imr, PICCommonState), + VMSTATE_UINT8(isr, PICCommonState), + VMSTATE_UINT8(priority_add, PICCommonState), + VMSTATE_UINT8(irq_base, PICCommonState), + VMSTATE_UINT8(read_reg_select, PICCommonState), + VMSTATE_UINT8(poll, PICCommonState), + VMSTATE_UINT8(special_mask, PICCommonState), + VMSTATE_UINT8(init_state, PICCommonState), + VMSTATE_UINT8(auto_eoi, PICCommonState), + VMSTATE_UINT8(rotate_on_auto_eoi, PICCommonState), + VMSTATE_UINT8(special_fully_nested_mode, PICCommonState), + VMSTATE_UINT8(init4, PICCommonState), + VMSTATE_UINT8(single_mode, PICCommonState), + VMSTATE_UINT8(elcr, PICCommonState), + VMSTATE_END_OF_LIST() + } +}; + +static Property pic_properties_common[] = { + DEFINE_PROP_UINT32("iobase", PICCommonState, iobase, -1), + DEFINE_PROP_UINT32("elcr_addr", PICCommonState, elcr_addr, -1), + DEFINE_PROP_UINT8("elcr_mask", PICCommonState, elcr_mask, -1), + DEFINE_PROP_BIT("master", PICCommonState, master, 0, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pic_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass); + + dc->vmsd = &vmstate_pic_common; + device_class_set_props(dc, pic_properties_common); + dc->realize = pic_common_realize; + /* + * Reason: unlike ordinary ISA devices, the PICs need additional + * wiring: its IRQ input lines are set up by board code, and the + * wiring of the slave to the master is hard-coded in device model + * code. + */ + dc->user_creatable = false; + ic->get_statistics = pic_get_statistics; + ic->print_info = pic_print_info; +} + +static const TypeInfo pic_common_type = { + .name = TYPE_PIC_COMMON, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(PICCommonState), + .class_size = sizeof(PICCommonClass), + .class_init = pic_common_class_init, + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { TYPE_INTERRUPT_STATS_PROVIDER }, + { } + }, +}; + +static void pic_common_register_types(void) +{ + type_register_static(&pic_common_type); +} + +type_init(pic_common_register_types) diff --git a/hw/intc/imx_avic.c b/hw/intc/imx_avic.c new file mode 100644 index 000000000..63fc602a1 --- /dev/null +++ b/hw/intc/imx_avic.c @@ -0,0 +1,366 @@ +/* + * i.MX31 Vectored Interrupt Controller + * + * Note this is NOT the PL192 provided by ARM, but + * a custom implementation by Freescale. + * + * Copyright (c) 2008 OKL + * Copyright (c) 2011 NICTA Pty Ltd + * Originally written by Hans Jiang + * Updated by Jean-Christophe Dubois + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + * + * TODO: implement vectors. + */ + +#include "qemu/osdep.h" +#include "hw/intc/imx_avic.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef DEBUG_IMX_AVIC +#define DEBUG_IMX_AVIC 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX_AVIC) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_AVIC, \ + __func__, ##args); \ + } \ + } while (0) + +static const VMStateDescription vmstate_imx_avic = { + .name = TYPE_IMX_AVIC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(pending, IMXAVICState), + VMSTATE_UINT64(enabled, IMXAVICState), + VMSTATE_UINT64(is_fiq, IMXAVICState), + VMSTATE_UINT32(intcntl, IMXAVICState), + VMSTATE_UINT32(intmask, IMXAVICState), + VMSTATE_UINT32_ARRAY(prio, IMXAVICState, PRIO_WORDS), + VMSTATE_END_OF_LIST() + }, +}; + +static inline int imx_avic_prio(IMXAVICState *s, int irq) +{ + uint32_t word = irq / PRIO_PER_WORD; + uint32_t part = 4 * (irq % PRIO_PER_WORD); + return 0xf & (s->prio[word] >> part); +} + +/* Update interrupts. */ +static void imx_avic_update(IMXAVICState *s) +{ + int i; + uint64_t new = s->pending & s->enabled; + uint64_t flags; + + flags = new & s->is_fiq; + qemu_set_irq(s->fiq, !!flags); + + flags = new & ~s->is_fiq; + if (!flags || (s->intmask == 0x1f)) { + qemu_set_irq(s->irq, !!flags); + return; + } + + /* + * Take interrupt if there's a pending interrupt with + * priority higher than the value of intmask + */ + for (i = 0; i < IMX_AVIC_NUM_IRQS; i++) { + if (flags & (1UL << i)) { + if (imx_avic_prio(s, i) > s->intmask) { + qemu_set_irq(s->irq, 1); + return; + } + } + } + qemu_set_irq(s->irq, 0); +} + +static void imx_avic_set_irq(void *opaque, int irq, int level) +{ + IMXAVICState *s = (IMXAVICState *)opaque; + + if (level) { + DPRINTF("Raising IRQ %d, prio %d\n", + irq, imx_avic_prio(s, irq)); + s->pending |= (1ULL << irq); + } else { + DPRINTF("Clearing IRQ %d, prio %d\n", + irq, imx_avic_prio(s, irq)); + s->pending &= ~(1ULL << irq); + } + + imx_avic_update(s); +} + + +static uint64_t imx_avic_read(void *opaque, + hwaddr offset, unsigned size) +{ + IMXAVICState *s = (IMXAVICState *)opaque; + + DPRINTF("read(offset = 0x%" HWADDR_PRIx ")\n", offset); + + switch (offset >> 2) { + case 0: /* INTCNTL */ + return s->intcntl; + + case 1: /* Normal Interrupt Mask Register, NIMASK */ + return s->intmask; + + case 2: /* Interrupt Enable Number Register, INTENNUM */ + case 3: /* Interrupt Disable Number Register, INTDISNUM */ + return 0; + + case 4: /* Interrupt Enabled Number Register High */ + return s->enabled >> 32; + + case 5: /* Interrupt Enabled Number Register Low */ + return s->enabled & 0xffffffffULL; + + case 6: /* Interrupt Type Register High */ + return s->is_fiq >> 32; + + case 7: /* Interrupt Type Register Low */ + return s->is_fiq & 0xffffffffULL; + + case 8: /* Normal Interrupt Priority Register 7 */ + case 9: /* Normal Interrupt Priority Register 6 */ + case 10:/* Normal Interrupt Priority Register 5 */ + case 11:/* Normal Interrupt Priority Register 4 */ + case 12:/* Normal Interrupt Priority Register 3 */ + case 13:/* Normal Interrupt Priority Register 2 */ + case 14:/* Normal Interrupt Priority Register 1 */ + case 15:/* Normal Interrupt Priority Register 0 */ + return s->prio[15-(offset>>2)]; + + case 16: /* Normal interrupt vector and status register */ + { + /* + * This returns the highest priority + * outstanding interrupt. Where there is more than + * one pending IRQ with the same priority, + * take the highest numbered one. + */ + uint64_t flags = s->pending & s->enabled & ~s->is_fiq; + int i; + int prio = -1; + int irq = -1; + for (i = 63; i >= 0; --i) { + if (flags & (1ULL< prio) { + irq = i; + prio = irq_prio; + } + } + } + if (irq >= 0) { + imx_avic_set_irq(s, irq, 0); + return irq << 16 | prio; + } + return 0xffffffffULL; + } + case 17:/* Fast Interrupt vector and status register */ + { + uint64_t flags = s->pending & s->enabled & s->is_fiq; + int i = ctz64(flags); + if (i < 64) { + imx_avic_set_irq(opaque, i, 0); + return i; + } + return 0xffffffffULL; + } + case 18:/* Interrupt source register high */ + return s->pending >> 32; + + case 19:/* Interrupt source register low */ + return s->pending & 0xffffffffULL; + + case 20:/* Interrupt Force Register high */ + case 21:/* Interrupt Force Register low */ + return 0; + + case 22:/* Normal Interrupt Pending Register High */ + return (s->pending & s->enabled & ~s->is_fiq) >> 32; + + case 23:/* Normal Interrupt Pending Register Low */ + return (s->pending & s->enabled & ~s->is_fiq) & 0xffffffffULL; + + case 24: /* Fast Interrupt Pending Register High */ + return (s->pending & s->enabled & s->is_fiq) >> 32; + + case 25: /* Fast Interrupt Pending Register Low */ + return (s->pending & s->enabled & s->is_fiq) & 0xffffffffULL; + + case 0x40: /* AVIC vector 0, use for WFI WAR */ + return 0x4; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_AVIC, __func__, offset); + return 0; + } +} + +static void imx_avic_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + IMXAVICState *s = (IMXAVICState *)opaque; + + /* Vector Registers not yet supported */ + if (offset >= 0x100 && offset <= 0x2fc) { + qemu_log_mask(LOG_UNIMP, "[%s]%s: vector %d ignored\n", + TYPE_IMX_AVIC, __func__, (int)((offset - 0x100) >> 2)); + return; + } + + DPRINTF("(0x%" HWADDR_PRIx ") = 0x%x\n", offset, (unsigned int)val); + + switch (offset >> 2) { + case 0: /* Interrupt Control Register, INTCNTL */ + s->intcntl = val & (ABFEN | NIDIS | FIDIS | NIAD | FIAD | NM); + if (s->intcntl & ABFEN) { + s->intcntl &= ~(val & ABFLAG); + } + break; + + case 1: /* Normal Interrupt Mask Register, NIMASK */ + s->intmask = val & 0x1f; + break; + + case 2: /* Interrupt Enable Number Register, INTENNUM */ + DPRINTF("enable(%d)\n", (int)val); + val &= 0x3f; + s->enabled |= (1ULL << val); + break; + + case 3: /* Interrupt Disable Number Register, INTDISNUM */ + DPRINTF("disable(%d)\n", (int)val); + val &= 0x3f; + s->enabled &= ~(1ULL << val); + break; + + case 4: /* Interrupt Enable Number Register High */ + s->enabled = (s->enabled & 0xffffffffULL) | (val << 32); + break; + + case 5: /* Interrupt Enable Number Register Low */ + s->enabled = (s->enabled & 0xffffffff00000000ULL) | val; + break; + + case 6: /* Interrupt Type Register High */ + s->is_fiq = (s->is_fiq & 0xffffffffULL) | (val << 32); + break; + + case 7: /* Interrupt Type Register Low */ + s->is_fiq = (s->is_fiq & 0xffffffff00000000ULL) | val; + break; + + case 8: /* Normal Interrupt Priority Register 7 */ + case 9: /* Normal Interrupt Priority Register 6 */ + case 10:/* Normal Interrupt Priority Register 5 */ + case 11:/* Normal Interrupt Priority Register 4 */ + case 12:/* Normal Interrupt Priority Register 3 */ + case 13:/* Normal Interrupt Priority Register 2 */ + case 14:/* Normal Interrupt Priority Register 1 */ + case 15:/* Normal Interrupt Priority Register 0 */ + s->prio[15-(offset>>2)] = val; + break; + + /* Read-only registers, writes ignored */ + case 16:/* Normal Interrupt Vector and Status register */ + case 17:/* Fast Interrupt vector and status register */ + case 18:/* Interrupt source register high */ + case 19:/* Interrupt source register low */ + return; + + case 20:/* Interrupt Force Register high */ + s->pending = (s->pending & 0xffffffffULL) | (val << 32); + break; + + case 21:/* Interrupt Force Register low */ + s->pending = (s->pending & 0xffffffff00000000ULL) | val; + break; + + case 22:/* Normal Interrupt Pending Register High */ + case 23:/* Normal Interrupt Pending Register Low */ + case 24: /* Fast Interrupt Pending Register High */ + case 25: /* Fast Interrupt Pending Register Low */ + return; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_AVIC, __func__, offset); + } + imx_avic_update(s); +} + +static const MemoryRegionOps imx_avic_ops = { + .read = imx_avic_read, + .write = imx_avic_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void imx_avic_reset(DeviceState *dev) +{ + IMXAVICState *s = IMX_AVIC(dev); + + s->pending = 0; + s->enabled = 0; + s->is_fiq = 0; + s->intmask = 0x1f; + s->intcntl = 0; + memset(s->prio, 0, sizeof s->prio); +} + +static void imx_avic_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + IMXAVICState *s = IMX_AVIC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &imx_avic_ops, s, + TYPE_IMX_AVIC, 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + + qdev_init_gpio_in(dev, imx_avic_set_irq, IMX_AVIC_NUM_IRQS); + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->fiq); +} + + +static void imx_avic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_imx_avic; + dc->reset = imx_avic_reset; + dc->desc = "i.MX Advanced Vector Interrupt Controller"; +} + +static const TypeInfo imx_avic_info = { + .name = TYPE_IMX_AVIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXAVICState), + .instance_init = imx_avic_init, + .class_init = imx_avic_class_init, +}; + +static void imx_avic_register_types(void) +{ + type_register_static(&imx_avic_info); +} + +type_init(imx_avic_register_types) diff --git a/hw/intc/imx_gpcv2.c b/hw/intc/imx_gpcv2.c new file mode 100644 index 000000000..237d5f97e --- /dev/null +++ b/hw/intc/imx_gpcv2.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2018, Impinj, Inc. + * + * i.MX7 GPCv2 block emulation code + * + * Author: Andrey Smirnov + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/intc/imx_gpcv2.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +#define GPC_PU_PGC_SW_PUP_REQ 0x0f8 +#define GPC_PU_PGC_SW_PDN_REQ 0x104 + +#define USB_HSIC_PHY_SW_Pxx_REQ BIT(4) +#define USB_OTG2_PHY_SW_Pxx_REQ BIT(3) +#define USB_OTG1_PHY_SW_Pxx_REQ BIT(2) +#define PCIE_PHY_SW_Pxx_REQ BIT(1) +#define MIPI_PHY_SW_Pxx_REQ BIT(0) + + +static void imx_gpcv2_reset(DeviceState *dev) +{ + IMXGPCv2State *s = IMX_GPCV2(dev); + + memset(s->regs, 0, sizeof(s->regs)); +} + +static uint64_t imx_gpcv2_read(void *opaque, hwaddr offset, + unsigned size) +{ + IMXGPCv2State *s = opaque; + + return s->regs[offset / sizeof(uint32_t)]; +} + +static void imx_gpcv2_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + IMXGPCv2State *s = opaque; + const size_t idx = offset / sizeof(uint32_t); + + s->regs[idx] = value; + + /* + * Real HW will clear those bits once as a way to indicate that + * power up request is complete + */ + if (offset == GPC_PU_PGC_SW_PUP_REQ || + offset == GPC_PU_PGC_SW_PDN_REQ) { + s->regs[idx] &= ~(USB_HSIC_PHY_SW_Pxx_REQ | + USB_OTG2_PHY_SW_Pxx_REQ | + USB_OTG1_PHY_SW_Pxx_REQ | + PCIE_PHY_SW_Pxx_REQ | + MIPI_PHY_SW_Pxx_REQ); + } +} + +static const struct MemoryRegionOps imx_gpcv2_ops = { + .read = imx_gpcv2_read, + .write = imx_gpcv2_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx_gpcv2_init(Object *obj) +{ + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMXGPCv2State *s = IMX_GPCV2(obj); + + memory_region_init_io(&s->iomem, + obj, + &imx_gpcv2_ops, + s, + TYPE_IMX_GPCV2 ".iomem", + sizeof(s->regs)); + sysbus_init_mmio(sd, &s->iomem); +} + +static const VMStateDescription vmstate_imx_gpcv2 = { + .name = TYPE_IMX_GPCV2, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, IMXGPCv2State, GPC_NUM), + VMSTATE_END_OF_LIST() + }, +}; + +static void imx_gpcv2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = imx_gpcv2_reset; + dc->vmsd = &vmstate_imx_gpcv2; + dc->desc = "i.MX GPCv2 Module"; +} + +static const TypeInfo imx_gpcv2_info = { + .name = TYPE_IMX_GPCV2, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXGPCv2State), + .instance_init = imx_gpcv2_init, + .class_init = imx_gpcv2_class_init, +}; + +static void imx_gpcv2_register_type(void) +{ + type_register_static(&imx_gpcv2_info); +} +type_init(imx_gpcv2_register_type) diff --git a/hw/intc/intc.c b/hw/intc/intc.c new file mode 100644 index 000000000..2e1e29e75 --- /dev/null +++ b/hw/intc/intc.c @@ -0,0 +1,41 @@ +/* + * QEMU Generic Interrupt Controller + * + * Copyright (c) 2016 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/intc/intc.h" +#include "qemu/module.h" + +static const TypeInfo intctrl_info = { + .name = TYPE_INTERRUPT_STATS_PROVIDER, + .parent = TYPE_INTERFACE, + .class_size = sizeof(InterruptStatsProviderClass), +}; + +static void intc_register_types(void) +{ + type_register_static(&intctrl_info); +} + +type_init(intc_register_types) + diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c new file mode 100644 index 000000000..264262959 --- /dev/null +++ b/hw/intc/ioapic.c @@ -0,0 +1,513 @@ +/* + * ioapic.c IOAPIC emulation logic + * + * Copyright (c) 2004-2005 Fabrice Bellard + * + * Split the ioapic logic from apic.c + * Xiantao Zhang + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "monitor/monitor.h" +#include "hw/i386/apic.h" +#include "hw/i386/ioapic.h" +#include "hw/i386/ioapic_internal.h" +#include "hw/i386/x86.h" +#include "hw/intc/i8259.h" +#include "hw/pci/msi.h" +#include "hw/qdev-properties.h" +#include "sysemu/kvm.h" +#include "sysemu/sysemu.h" +#include "hw/i386/apic-msidef.h" +#include "hw/i386/x86-iommu.h" +#include "trace.h" + +#define APIC_DELIVERY_MODE_SHIFT 8 +#define APIC_POLARITY_SHIFT 14 +#define APIC_TRIG_MODE_SHIFT 15 + +static IOAPICCommonState *ioapics[MAX_IOAPICS]; + +/* global variable from ioapic_common.c */ +extern int ioapic_no; + +struct ioapic_entry_info { + /* fields parsed from IOAPIC entries */ + uint8_t masked; + uint8_t trig_mode; + uint16_t dest_idx; + uint8_t dest_mode; + uint8_t delivery_mode; + uint8_t vector; + + /* MSI message generated from above parsed fields */ + uint32_t addr; + uint32_t data; +}; + +static void ioapic_entry_parse(uint64_t entry, struct ioapic_entry_info *info) +{ + memset(info, 0, sizeof(*info)); + info->masked = (entry >> IOAPIC_LVT_MASKED_SHIFT) & 1; + info->trig_mode = (entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1; + /* + * By default, this would be dest_id[8] + reserved[8]. When IR + * is enabled, this would be interrupt_index[15] + + * interrupt_format[1]. This field never means anything, but + * only used to generate corresponding MSI. + */ + info->dest_idx = (entry >> IOAPIC_LVT_DEST_IDX_SHIFT) & 0xffff; + info->dest_mode = (entry >> IOAPIC_LVT_DEST_MODE_SHIFT) & 1; + info->delivery_mode = (entry >> IOAPIC_LVT_DELIV_MODE_SHIFT) \ + & IOAPIC_DM_MASK; + if (info->delivery_mode == IOAPIC_DM_EXTINT) { + info->vector = pic_read_irq(isa_pic); + } else { + info->vector = entry & IOAPIC_VECTOR_MASK; + } + + info->addr = APIC_DEFAULT_ADDRESS | \ + (info->dest_idx << MSI_ADDR_DEST_IDX_SHIFT) | \ + (info->dest_mode << MSI_ADDR_DEST_MODE_SHIFT); + info->data = (info->vector << MSI_DATA_VECTOR_SHIFT) | \ + (info->trig_mode << MSI_DATA_TRIGGER_SHIFT) | \ + (info->delivery_mode << MSI_DATA_DELIVERY_MODE_SHIFT); +} + +static void ioapic_service(IOAPICCommonState *s) +{ + AddressSpace *ioapic_as = X86_MACHINE(qdev_get_machine())->ioapic_as; + struct ioapic_entry_info info; + uint8_t i; + uint32_t mask; + uint64_t entry; + + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + mask = 1 << i; + if (s->irr & mask) { + int coalesce = 0; + + entry = s->ioredtbl[i]; + ioapic_entry_parse(entry, &info); + if (!info.masked) { + if (info.trig_mode == IOAPIC_TRIGGER_EDGE) { + s->irr &= ~mask; + } else { + coalesce = s->ioredtbl[i] & IOAPIC_LVT_REMOTE_IRR; + trace_ioapic_set_remote_irr(i); + s->ioredtbl[i] |= IOAPIC_LVT_REMOTE_IRR; + } + + if (coalesce) { + /* We are level triggered interrupts, and the + * guest should be still working on previous one, + * so skip it. */ + continue; + } + +#ifdef CONFIG_KVM + if (kvm_irqchip_is_split()) { + if (info.trig_mode == IOAPIC_TRIGGER_EDGE) { + kvm_set_irq(kvm_state, i, 1); + kvm_set_irq(kvm_state, i, 0); + } else { + kvm_set_irq(kvm_state, i, 1); + } + continue; + } +#endif + + /* No matter whether IR is enabled, we translate + * the IOAPIC message into a MSI one, and its + * address space will decide whether we need a + * translation. */ + stl_le_phys(ioapic_as, info.addr, info.data); + } + } + } +} + +#define SUCCESSIVE_IRQ_MAX_COUNT 10000 + +static void delayed_ioapic_service_cb(void *opaque) +{ + IOAPICCommonState *s = opaque; + + ioapic_service(s); +} + +static void ioapic_set_irq(void *opaque, int vector, int level) +{ + IOAPICCommonState *s = opaque; + + /* ISA IRQs map to GSI 1-1 except for IRQ0 which maps + * to GSI 2. GSI maps to ioapic 1-1. This is not + * the cleanest way of doing it but it should work. */ + + trace_ioapic_set_irq(vector, level); + ioapic_stat_update_irq(s, vector, level); + if (vector == 0) { + vector = 2; + } + if (vector < IOAPIC_NUM_PINS) { + uint32_t mask = 1 << vector; + uint64_t entry = s->ioredtbl[vector]; + + if (((entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1) == + IOAPIC_TRIGGER_LEVEL) { + /* level triggered */ + if (level) { + s->irr |= mask; + if (!(entry & IOAPIC_LVT_REMOTE_IRR)) { + ioapic_service(s); + } + } else { + s->irr &= ~mask; + } + } else { + /* According to the 82093AA manual, we must ignore edge requests + * if the input pin is masked. */ + if (level && !(entry & IOAPIC_LVT_MASKED)) { + s->irr |= mask; + ioapic_service(s); + } + } + } +} + +static void ioapic_update_kvm_routes(IOAPICCommonState *s) +{ +#ifdef CONFIG_KVM + int i; + + if (kvm_irqchip_is_split()) { + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + MSIMessage msg; + struct ioapic_entry_info info; + ioapic_entry_parse(s->ioredtbl[i], &info); + if (!info.masked) { + msg.address = info.addr; + msg.data = info.data; + kvm_irqchip_update_msi_route(kvm_state, i, msg, NULL); + } + } + kvm_irqchip_commit_routes(kvm_state); + } +#endif +} + +#ifdef CONFIG_KVM +static void ioapic_iec_notifier(void *private, bool global, + uint32_t index, uint32_t mask) +{ + IOAPICCommonState *s = (IOAPICCommonState *)private; + /* For simplicity, we just update all the routes */ + ioapic_update_kvm_routes(s); +} +#endif + +void ioapic_eoi_broadcast(int vector) +{ + IOAPICCommonState *s; + uint64_t entry; + int i, n; + + trace_ioapic_eoi_broadcast(vector); + + for (i = 0; i < MAX_IOAPICS; i++) { + s = ioapics[i]; + if (!s) { + continue; + } + for (n = 0; n < IOAPIC_NUM_PINS; n++) { + entry = s->ioredtbl[n]; + + if ((entry & IOAPIC_VECTOR_MASK) != vector || + ((entry >> IOAPIC_LVT_TRIGGER_MODE_SHIFT) & 1) != IOAPIC_TRIGGER_LEVEL) { + continue; + } + +#ifdef CONFIG_KVM + /* + * When IOAPIC is in the userspace while APIC is still in + * the kernel (i.e., split irqchip), we have a trick to + * kick the resamplefd logic for registered irqfds from + * userspace to deactivate the IRQ. When that happens, it + * means the irq bypassed userspace IOAPIC (so the irr and + * remote-irr of the table entry should be bypassed too + * even if interrupt come). Still kick the resamplefds if + * they're bound to the IRQ, to make sure to EOI the + * interrupt for the hardware correctly. + * + * Note: We still need to go through the irr & remote-irr + * operations below because we don't know whether there're + * emulated devices that are using/sharing the same IRQ. + */ + kvm_resample_fd_notify(n); +#endif + + if (!(entry & IOAPIC_LVT_REMOTE_IRR)) { + continue; + } + + trace_ioapic_clear_remote_irr(n, vector); + s->ioredtbl[n] = entry & ~IOAPIC_LVT_REMOTE_IRR; + + if (!(entry & IOAPIC_LVT_MASKED) && (s->irr & (1 << n))) { + ++s->irq_eoi[n]; + if (s->irq_eoi[n] >= SUCCESSIVE_IRQ_MAX_COUNT) { + /* + * Real hardware does not deliver the interrupt immediately + * during eoi broadcast, and this lets a buggy guest make + * slow progress even if it does not correctly handle a + * level-triggered interrupt. Emulate this behavior if we + * detect an interrupt storm. + */ + s->irq_eoi[n] = 0; + timer_mod_anticipate(s->delayed_ioapic_service_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND / 100); + trace_ioapic_eoi_delayed_reassert(n); + } else { + ioapic_service(s); + } + } else { + s->irq_eoi[n] = 0; + } + } + } +} + +static uint64_t +ioapic_mem_read(void *opaque, hwaddr addr, unsigned int size) +{ + IOAPICCommonState *s = opaque; + int index; + uint32_t val = 0; + + addr &= 0xff; + + switch (addr) { + case IOAPIC_IOREGSEL: + val = s->ioregsel; + break; + case IOAPIC_IOWIN: + if (size != 4) { + break; + } + switch (s->ioregsel) { + case IOAPIC_REG_ID: + case IOAPIC_REG_ARB: + val = s->id << IOAPIC_ID_SHIFT; + break; + case IOAPIC_REG_VER: + val = s->version | + ((IOAPIC_NUM_PINS - 1) << IOAPIC_VER_ENTRIES_SHIFT); + break; + default: + index = (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1; + if (index >= 0 && index < IOAPIC_NUM_PINS) { + if (s->ioregsel & 1) { + val = s->ioredtbl[index] >> 32; + } else { + val = s->ioredtbl[index] & 0xffffffff; + } + } + } + break; + } + + trace_ioapic_mem_read(addr, s->ioregsel, size, val); + + return val; +} + +/* + * This is to satisfy the hack in Linux kernel. One hack of it is to + * simulate clearing the Remote IRR bit of IOAPIC entry using the + * following: + * + * "For IO-APIC's with EOI register, we use that to do an explicit EOI. + * Otherwise, we simulate the EOI message manually by changing the trigger + * mode to edge and then back to level, with RTE being masked during + * this." + * + * (See linux kernel __eoi_ioapic_pin() comment in commit c0205701) + * + * This is based on the assumption that, Remote IRR bit will be + * cleared by IOAPIC hardware when configured as edge-triggered + * interrupts. + * + * Without this, level-triggered interrupts in IR mode might fail to + * work correctly. + */ +static inline void +ioapic_fix_edge_remote_irr(uint64_t *entry) +{ + if (!(*entry & IOAPIC_LVT_TRIGGER_MODE)) { + /* Edge-triggered interrupts, make sure remote IRR is zero */ + *entry &= ~((uint64_t)IOAPIC_LVT_REMOTE_IRR); + } +} + +static void +ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + IOAPICCommonState *s = opaque; + int index; + + addr &= 0xff; + trace_ioapic_mem_write(addr, s->ioregsel, size, val); + + switch (addr) { + case IOAPIC_IOREGSEL: + s->ioregsel = val; + break; + case IOAPIC_IOWIN: + if (size != 4) { + break; + } + switch (s->ioregsel) { + case IOAPIC_REG_ID: + s->id = (val >> IOAPIC_ID_SHIFT) & IOAPIC_ID_MASK; + break; + case IOAPIC_REG_VER: + case IOAPIC_REG_ARB: + break; + default: + index = (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1; + if (index >= 0 && index < IOAPIC_NUM_PINS) { + uint64_t ro_bits = s->ioredtbl[index] & IOAPIC_RO_BITS; + if (s->ioregsel & 1) { + s->ioredtbl[index] &= 0xffffffff; + s->ioredtbl[index] |= (uint64_t)val << 32; + } else { + s->ioredtbl[index] &= ~0xffffffffULL; + s->ioredtbl[index] |= val; + } + /* restore RO bits */ + s->ioredtbl[index] &= IOAPIC_RW_BITS; + s->ioredtbl[index] |= ro_bits; + s->irq_eoi[index] = 0; + ioapic_fix_edge_remote_irr(&s->ioredtbl[index]); + ioapic_service(s); + } + } + break; + case IOAPIC_EOI: + /* Explicit EOI is only supported for IOAPIC version 0x20 */ + if (size != 4 || s->version != 0x20) { + break; + } + ioapic_eoi_broadcast(val); + break; + } + + ioapic_update_kvm_routes(s); +} + +static const MemoryRegionOps ioapic_io_ops = { + .read = ioapic_mem_read, + .write = ioapic_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void ioapic_machine_done_notify(Notifier *notifier, void *data) +{ +#ifdef CONFIG_KVM + IOAPICCommonState *s = container_of(notifier, IOAPICCommonState, + machine_done); + + if (kvm_irqchip_is_split()) { + X86IOMMUState *iommu = x86_iommu_get_default(); + if (iommu) { + /* Register this IOAPIC with IOMMU IEC notifier, so that + * when there are IR invalidates, we can be notified to + * update kernel IR cache. */ + x86_iommu_iec_register_notifier(iommu, ioapic_iec_notifier, s); + } + } +#endif +} + +#define IOAPIC_VER_DEF 0x20 + +static void ioapic_realize(DeviceState *dev, Error **errp) +{ + IOAPICCommonState *s = IOAPIC_COMMON(dev); + + if (s->version != 0x11 && s->version != 0x20) { + error_setg(errp, "IOAPIC only supports version 0x11 or 0x20 " + "(default: 0x%x).", IOAPIC_VER_DEF); + return; + } + + memory_region_init_io(&s->io_memory, OBJECT(s), &ioapic_io_ops, s, + "ioapic", 0x1000); + + s->delayed_ioapic_service_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, delayed_ioapic_service_cb, s); + + qdev_init_gpio_in(dev, ioapic_set_irq, IOAPIC_NUM_PINS); + + ioapics[ioapic_no] = s; + s->machine_done.notify = ioapic_machine_done_notify; + qemu_add_machine_init_done_notifier(&s->machine_done); +} + +static void ioapic_unrealize(DeviceState *dev) +{ + IOAPICCommonState *s = IOAPIC_COMMON(dev); + + timer_free(s->delayed_ioapic_service_timer); +} + +static Property ioapic_properties[] = { + DEFINE_PROP_UINT8("version", IOAPICCommonState, version, IOAPIC_VER_DEF), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ioapic_class_init(ObjectClass *klass, void *data) +{ + IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = ioapic_realize; + k->unrealize = ioapic_unrealize; + /* + * If APIC is in kernel, we need to update the kernel cache after + * migration, otherwise first 24 gsi routes will be invalid. + */ + k->post_load = ioapic_update_kvm_routes; + dc->reset = ioapic_reset_common; + device_class_set_props(dc, ioapic_properties); +} + +static const TypeInfo ioapic_info = { + .name = TYPE_IOAPIC, + .parent = TYPE_IOAPIC_COMMON, + .instance_size = sizeof(IOAPICCommonState), + .class_init = ioapic_class_init, +}; + +static void ioapic_register_types(void) +{ + type_register_static(&ioapic_info); +} + +type_init(ioapic_register_types) diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c new file mode 100644 index 000000000..3cccfc155 --- /dev/null +++ b/hw/intc/ioapic_common.c @@ -0,0 +1,224 @@ +/* + * IOAPIC emulation logic - common bits of emulated and KVM kernel model + * + * Copyright (c) 2004-2005 Fabrice Bellard + * Copyright (c) 2009 Xiantao Zhang, Intel + * Copyright (c) 2011 Jan Kiszka, Siemens AG + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "hw/i386/ioapic.h" +#include "hw/i386/ioapic_internal.h" +#include "hw/intc/intc.h" +#include "hw/sysbus.h" + +/* ioapic_no count start from 0 to MAX_IOAPICS, + * remove as static variable from ioapic_common_init. + * now as a global variable, let child to increase the counter + * then we can drop the 'instance_no' argument + * and convert to our QOM's realize function + */ +int ioapic_no; + +void ioapic_stat_update_irq(IOAPICCommonState *s, int irq, int level) +{ + if (level != s->irq_level[irq]) { + s->irq_level[irq] = level; + if (level == 1) { + s->irq_count[irq]++; + } + } +} + +static bool ioapic_get_statistics(InterruptStatsProvider *obj, + uint64_t **irq_counts, + unsigned int *nb_irqs) +{ + IOAPICCommonState *s = IOAPIC_COMMON(obj); + + *irq_counts = s->irq_count; + *nb_irqs = IOAPIC_NUM_PINS; + + return true; +} + +static void ioapic_irr_dump(Monitor *mon, const char *name, uint32_t bitmap) +{ + int i; + + monitor_printf(mon, "%-10s ", name); + if (bitmap == 0) { + monitor_printf(mon, "(none)\n"); + return; + } + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + if (bitmap & (1 << i)) { + monitor_printf(mon, "%-2u ", i); + } + } + monitor_printf(mon, "\n"); +} + +void ioapic_print_redtbl(Monitor *mon, IOAPICCommonState *s) +{ + static const char *delm_str[] = { + "fixed", "lowest", "SMI", "...", "NMI", "INIT", "...", "extINT"}; + uint32_t remote_irr = 0; + int i; + + monitor_printf(mon, "ioapic0: ver=0x%x id=0x%02x sel=0x%02x", + s->version, s->id, s->ioregsel); + if (s->ioregsel) { + monitor_printf(mon, " (redir[%u])\n", + (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1); + } else { + monitor_printf(mon, "\n"); + } + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + uint64_t entry = s->ioredtbl[i]; + uint32_t delm = (uint32_t)((entry & IOAPIC_LVT_DELIV_MODE) >> + IOAPIC_LVT_DELIV_MODE_SHIFT); + monitor_printf(mon, " pin %-2u 0x%016"PRIx64" dest=%"PRIx64 + " vec=%-3"PRIu64" %s %-5s %-6s %-6s %s\n", + i, entry, + (entry >> IOAPIC_LVT_DEST_SHIFT) & + (entry & IOAPIC_LVT_DEST_MODE ? 0xff : 0xf), + entry & IOAPIC_VECTOR_MASK, + entry & IOAPIC_LVT_POLARITY ? "active-lo" : "active-hi", + entry & IOAPIC_LVT_TRIGGER_MODE ? "level" : "edge", + entry & IOAPIC_LVT_MASKED ? "masked" : "", + delm_str[delm], + entry & IOAPIC_LVT_DEST_MODE ? "logical" : "physical"); + + remote_irr |= entry & IOAPIC_LVT_TRIGGER_MODE ? + (entry & IOAPIC_LVT_REMOTE_IRR ? (1 << i) : 0) : 0; + } + ioapic_irr_dump(mon, " IRR", s->irr); + ioapic_irr_dump(mon, " Remote IRR", remote_irr); +} + +void ioapic_reset_common(DeviceState *dev) +{ + IOAPICCommonState *s = IOAPIC_COMMON(dev); + int i; + + s->id = 0; + s->ioregsel = 0; + s->irr = 0; + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + s->ioredtbl[i] = 1 << IOAPIC_LVT_MASKED_SHIFT; + } +} + +static int ioapic_dispatch_pre_save(void *opaque) +{ + IOAPICCommonState *s = IOAPIC_COMMON(opaque); + IOAPICCommonClass *info = IOAPIC_COMMON_GET_CLASS(s); + + if (info->pre_save) { + info->pre_save(s); + } + + return 0; +} + +static int ioapic_dispatch_post_load(void *opaque, int version_id) +{ + IOAPICCommonState *s = IOAPIC_COMMON(opaque); + IOAPICCommonClass *info = IOAPIC_COMMON_GET_CLASS(s); + + if (info->post_load) { + info->post_load(s); + } + return 0; +} + +static void ioapic_common_realize(DeviceState *dev, Error **errp) +{ + IOAPICCommonState *s = IOAPIC_COMMON(dev); + IOAPICCommonClass *info; + + if (ioapic_no >= MAX_IOAPICS) { + error_setg(errp, "Only %d ioapics allowed", MAX_IOAPICS); + return; + } + + info = IOAPIC_COMMON_GET_CLASS(s); + info->realize(dev, errp); + + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->io_memory); + ioapic_no++; +} + +static void ioapic_print_info(InterruptStatsProvider *obj, + Monitor *mon) +{ + IOAPICCommonState *s = IOAPIC_COMMON(obj); + + ioapic_dispatch_pre_save(s); + ioapic_print_redtbl(mon, s); +} + +static const VMStateDescription vmstate_ioapic_common = { + .name = "ioapic", + .version_id = 3, + .minimum_version_id = 1, + .pre_save = ioapic_dispatch_pre_save, + .post_load = ioapic_dispatch_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(id, IOAPICCommonState), + VMSTATE_UINT8(ioregsel, IOAPICCommonState), + VMSTATE_UNUSED_V(2, 8), /* to account for qemu-kvm's v2 format */ + VMSTATE_UINT32_V(irr, IOAPICCommonState, 2), + VMSTATE_UINT64_ARRAY(ioredtbl, IOAPICCommonState, IOAPIC_NUM_PINS), + VMSTATE_END_OF_LIST() + } +}; + +static void ioapic_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass); + + dc->realize = ioapic_common_realize; + dc->vmsd = &vmstate_ioapic_common; + ic->print_info = ioapic_print_info; + ic->get_statistics = ioapic_get_statistics; +} + +static const TypeInfo ioapic_common_type = { + .name = TYPE_IOAPIC_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IOAPICCommonState), + .class_size = sizeof(IOAPICCommonClass), + .class_init = ioapic_common_class_init, + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { TYPE_INTERRUPT_STATS_PROVIDER }, + { } + }, +}; + +static void ioapic_common_register_types(void) +{ + type_register_static(&ioapic_common_type); +} + +type_init(ioapic_common_register_types) diff --git a/hw/intc/loongson_liointc.c b/hw/intc/loongson_liointc.c new file mode 100644 index 000000000..cc11b544c --- /dev/null +++ b/hw/intc/loongson_liointc.c @@ -0,0 +1,249 @@ +/* + * QEMU Loongson Local I/O interrupt controler. + * + * Copyright (c) 2020 Huacai Chen + * Copyright (c) 2020 Jiaxun Yang + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/intc/loongson_liointc.h" + +#define NUM_IRQS 32 + +#define NUM_CORES 4 +#define NUM_IPS 4 +#define NUM_PARENTS (NUM_CORES * NUM_IPS) +#define PARENT_COREx_IPy(x, y) (NUM_IPS * x + y) + +#define R_MAPPER_START 0x0 +#define R_MAPPER_END 0x20 +#define R_ISR R_MAPPER_END +#define R_IEN 0x24 +#define R_IEN_SET 0x28 +#define R_IEN_CLR 0x2c +#define R_ISR_SIZE 0x8 +#define R_START 0x40 +#define R_END (R_START + R_ISR_SIZE * NUM_CORES) + +struct loongson_liointc { + SysBusDevice parent_obj; + + MemoryRegion mmio; + qemu_irq parent_irq[NUM_PARENTS]; + + uint8_t mapper[NUM_IRQS]; /* 0:3 for core, 4:7 for IP */ + uint32_t isr; + uint32_t ien; + uint32_t per_core_isr[NUM_CORES]; + + /* state of the interrupt input pins */ + uint32_t pin_state; + bool parent_state[NUM_PARENTS]; +}; + +static void update_irq(struct loongson_liointc *p) +{ + uint32_t irq, core, ip; + uint32_t per_ip_isr[NUM_IPS] = {0}; + + /* level triggered interrupt */ + p->isr = p->pin_state; + + /* Clear disabled IRQs */ + p->isr &= p->ien; + + /* Clear per_core_isr */ + for (core = 0; core < NUM_CORES; core++) { + p->per_core_isr[core] = 0; + } + + /* Update per_core_isr and per_ip_isr */ + for (irq = 0; irq < NUM_IRQS; irq++) { + if (!(p->isr & (1 << irq))) { + continue; + } + + for (core = 0; core < NUM_CORES; core++) { + if ((p->mapper[irq] & (1 << core))) { + p->per_core_isr[core] |= (1 << irq); + } + } + + for (ip = 0; ip < NUM_IPS; ip++) { + if ((p->mapper[irq] & (1 << (ip + 4)))) { + per_ip_isr[ip] |= (1 << irq); + } + } + } + + /* Emit IRQ to parent! */ + for (core = 0; core < NUM_CORES; core++) { + for (ip = 0; ip < NUM_IPS; ip++) { + int parent = PARENT_COREx_IPy(core, ip); + if (p->parent_state[parent] != + (!!p->per_core_isr[core] && !!per_ip_isr[ip])) { + p->parent_state[parent] = !p->parent_state[parent]; + qemu_set_irq(p->parent_irq[parent], p->parent_state[parent]); + } + } + } +} + +static uint64_t +liointc_read(void *opaque, hwaddr addr, unsigned int size) +{ + struct loongson_liointc *p = opaque; + uint32_t r = 0; + + /* Mapper is 1 byte */ + if (size == 1 && addr < R_MAPPER_END) { + r = p->mapper[addr]; + goto out; + } + + /* Rest are 4 bytes */ + if (size != 4 || (addr % 4)) { + goto out; + } + + if (addr >= R_START && addr < R_END) { + hwaddr offset = addr - R_START; + int core = offset / R_ISR_SIZE; + + if (offset % R_ISR_SIZE) { + goto out; + } + r = p->per_core_isr[core]; + goto out; + } + + switch (addr) { + case R_ISR: + r = p->isr; + break; + case R_IEN: + r = p->ien; + break; + default: + break; + } + +out: + qemu_log_mask(CPU_LOG_INT, "%s: size=%d, addr=%"HWADDR_PRIx", val=%x\n", + __func__, size, addr, r); + return r; +} + +static void +liointc_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + struct loongson_liointc *p = opaque; + uint32_t value = val64; + + qemu_log_mask(CPU_LOG_INT, "%s: size=%d, addr=%"HWADDR_PRIx", val=%x\n", + __func__, size, addr, value); + + /* Mapper is 1 byte */ + if (size == 1 && addr < R_MAPPER_END) { + p->mapper[addr] = value; + goto out; + } + + /* Rest are 4 bytes */ + if (size != 4 || (addr % 4)) { + goto out; + } + + if (addr >= R_START && addr < R_END) { + hwaddr offset = addr - R_START; + int core = offset / R_ISR_SIZE; + + if (offset % R_ISR_SIZE) { + goto out; + } + p->per_core_isr[core] = value; + goto out; + } + + switch (addr) { + case R_IEN_SET: + p->ien |= value; + break; + case R_IEN_CLR: + p->ien &= ~value; + break; + default: + break; + } + +out: + update_irq(p); +} + +static const MemoryRegionOps pic_ops = { + .read = liointc_read, + .write = liointc_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4 + } +}; + +static void irq_handler(void *opaque, int irq, int level) +{ + struct loongson_liointc *p = opaque; + + p->pin_state &= ~(1 << irq); + p->pin_state |= level << irq; + update_irq(p); +} + +static void loongson_liointc_init(Object *obj) +{ + struct loongson_liointc *p = LOONGSON_LIOINTC(obj); + int i; + + qdev_init_gpio_in(DEVICE(obj), irq_handler, 32); + + for (i = 0; i < NUM_PARENTS; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(obj), &p->parent_irq[i]); + } + + memory_region_init_io(&p->mmio, obj, &pic_ops, p, + TYPE_LOONGSON_LIOINTC, R_END); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &p->mmio); +} + +static const TypeInfo loongson_liointc_info = { + .name = TYPE_LOONGSON_LIOINTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(struct loongson_liointc), + .instance_init = loongson_liointc_init, +}; + +static void loongson_liointc_register_types(void) +{ + type_register_static(&loongson_liointc_info); +} + +type_init(loongson_liointc_register_types) diff --git a/hw/intc/m68k_irqc.c b/hw/intc/m68k_irqc.c new file mode 100644 index 000000000..0c515e4ec --- /dev/null +++ b/hw/intc/m68k_irqc.c @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * QEMU Motorola 680x0 IRQ Controller + * + * (c) 2020 Laurent Vivier + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "hw/nmi.h" +#include "hw/intc/intc.h" +#include "hw/intc/m68k_irqc.h" + + +static bool m68k_irqc_get_statistics(InterruptStatsProvider *obj, + uint64_t **irq_counts, unsigned int *nb_irqs) +{ + M68KIRQCState *s = M68K_IRQC(obj); + + *irq_counts = s->stats_irq_count; + *nb_irqs = ARRAY_SIZE(s->stats_irq_count); + return true; +} + +static void m68k_irqc_print_info(InterruptStatsProvider *obj, Monitor *mon) +{ + M68KIRQCState *s = M68K_IRQC(obj); + monitor_printf(mon, "m68k-irqc: ipr=0x%x\n", s->ipr); +} + +static void m68k_set_irq(void *opaque, int irq, int level) +{ + M68KIRQCState *s = opaque; + M68kCPU *cpu = M68K_CPU(first_cpu); + int i; + + if (level) { + s->ipr |= 1 << irq; + s->stats_irq_count[irq]++; + } else { + s->ipr &= ~(1 << irq); + } + + for (i = M68K_IRQC_LEVEL_7; i >= M68K_IRQC_LEVEL_1; i--) { + if ((s->ipr >> i) & 1) { + m68k_set_irq_level(cpu, i + 1, i + M68K_IRQC_AUTOVECTOR_BASE); + return; + } + } + m68k_set_irq_level(cpu, 0, 0); +} + +static void m68k_irqc_reset(DeviceState *d) +{ + M68KIRQCState *s = M68K_IRQC(d); + int i; + + s->ipr = 0; + for (i = 0; i < ARRAY_SIZE(s->stats_irq_count); i++) { + s->stats_irq_count[i] = 0; + } +} + +static void m68k_irqc_instance_init(Object *obj) +{ + qdev_init_gpio_in(DEVICE(obj), m68k_set_irq, M68K_IRQC_LEVEL_NUM); +} + +static void m68k_nmi(NMIState *n, int cpu_index, Error **errp) +{ + m68k_set_irq(n, M68K_IRQC_LEVEL_7, 1); +} + +static const VMStateDescription vmstate_m68k_irqc = { + .name = "m68k-irqc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(ipr, M68KIRQCState), + VMSTATE_END_OF_LIST() + } +}; + +static void m68k_irqc_class_init(ObjectClass *oc, void *data) + { + DeviceClass *dc = DEVICE_CLASS(oc); + NMIClass *nc = NMI_CLASS(oc); + InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(oc); + + nc->nmi_monitor_handler = m68k_nmi; + dc->reset = m68k_irqc_reset; + dc->vmsd = &vmstate_m68k_irqc; + ic->get_statistics = m68k_irqc_get_statistics; + ic->print_info = m68k_irqc_print_info; +} + +static const TypeInfo m68k_irqc_type_info = { + .name = TYPE_M68K_IRQC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(M68KIRQCState), + .instance_init = m68k_irqc_instance_init, + .class_init = m68k_irqc_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NMI }, + { TYPE_INTERRUPT_STATS_PROVIDER }, + { } + }, +}; + +static void q800_irq_register_types(void) +{ + type_register_static(&m68k_irqc_type_info); +} + +type_init(q800_irq_register_types); diff --git a/hw/intc/meson.build b/hw/intc/meson.build new file mode 100644 index 000000000..c89d2ca18 --- /dev/null +++ b/hw/intc/meson.build @@ -0,0 +1,59 @@ +softmmu_ss.add(files('intc.c')) +softmmu_ss.add(when: 'CONFIG_ARM_GIC', if_true: files( + 'arm_gic.c', + 'arm_gic_common.c', + 'arm_gicv2m.c', + 'arm_gicv3.c', + 'arm_gicv3_common.c', + 'arm_gicv3_dist.c', + 'arm_gicv3_its_common.c', + 'arm_gicv3_redist.c', + 'arm_gicv3_its.c', +)) +softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_pic.c')) +softmmu_ss.add(when: 'CONFIG_HEATHROW_PIC', if_true: files('heathrow_pic.c')) +softmmu_ss.add(when: 'CONFIG_I8259', if_true: files('i8259_common.c', 'i8259.c')) +softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_avic.c', 'imx_gpcv2.c')) +softmmu_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic_common.c')) +softmmu_ss.add(when: 'CONFIG_OPENPIC', if_true: files('openpic.c')) +softmmu_ss.add(when: 'CONFIG_PL190', if_true: files('pl190.c')) +softmmu_ss.add(when: 'CONFIG_REALVIEW', if_true: files('realview_gic.c')) +softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_intctl.c')) +softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_intc.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-ipi.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP_PMU', if_true: files('xlnx-pmu-iomod-intc.c')) + +specific_ss.add(when: 'CONFIG_ALLWINNER_A10_PIC', if_true: files('allwinner-a10-pic.c')) +specific_ss.add(when: 'CONFIG_APIC', if_true: files('apic.c', 'apic_common.c')) +specific_ss.add(when: 'CONFIG_ARM_GIC', if_true: files('arm_gicv3_cpuif.c')) +specific_ss.add(when: 'CONFIG_ARM_GIC_KVM', if_true: files('arm_gic_kvm.c')) +specific_ss.add(when: ['CONFIG_ARM_GIC_KVM', 'TARGET_AARCH64'], if_true: files('arm_gicv3_kvm.c', 'arm_gicv3_its_kvm.c')) +specific_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_nvic.c')) +specific_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_vic.c')) +specific_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_gic.c', 'exynos4210_combiner.c')) +specific_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_irqmp.c')) +specific_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic.c')) +specific_ss.add(when: 'CONFIG_LOONGSON_LIOINTC', if_true: files('loongson_liointc.c')) +specific_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_gic.c')) +specific_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_intc.c')) +specific_ss.add(when: 'CONFIG_OMPIC', if_true: files('ompic.c')) +specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_OPENPIC'], + if_true: files('openpic_kvm.c')) +specific_ss.add(when: 'CONFIG_POWERNV', if_true: files('xics_pnv.c', 'pnv_xive.c')) +specific_ss.add(when: 'CONFIG_PPC_UIC', if_true: files('ppc-uic.c')) +specific_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_ic.c', 'bcm2836_control.c')) +specific_ss.add(when: 'CONFIG_RX_ICU', if_true: files('rx_icu.c')) +specific_ss.add(when: 'CONFIG_S390_FLIC', if_true: files('s390_flic.c')) +specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c')) +specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c')) +specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c')) +specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c')) +specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c')) +specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'], + if_true: files('xics_kvm.c')) +specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('xics_spapr.c', 'spapr_xive.c')) +specific_ss.add(when: 'CONFIG_XIVE', if_true: files('xive.c')) +specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XIVE'], + if_true: files('spapr_xive_kvm.c')) +specific_ss.add(when: 'CONFIG_GOLDFISH_PIC', if_true: files('goldfish_pic.c')) +specific_ss.add(when: 'CONFIG_M68K_IRQC', if_true: files('m68k_irqc.c')) diff --git a/hw/intc/mips_gic.c b/hw/intc/mips_gic.c new file mode 100644 index 000000000..bda454992 --- /dev/null +++ b/hw/intc/mips_gic.c @@ -0,0 +1,468 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal + * + * Copyright (C) 2016 Imagination Technologies + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "exec/memory.h" +#include "sysemu/kvm.h" +#include "sysemu/reset.h" +#include "kvm_mips.h" +#include "hw/intc/mips_gic.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" + +static void mips_gic_set_vp_irq(MIPSGICState *gic, int vp, int pin) +{ + int ored_level = 0; + int i; + + /* ORing pending registers sharing same pin */ + for (i = 0; i < gic->num_irq; i++) { + if ((gic->irq_state[i].map_pin & GIC_MAP_MSK) == pin && + gic->irq_state[i].map_vp == vp && + gic->irq_state[i].enabled) { + ored_level |= gic->irq_state[i].pending; + } + if (ored_level) { + /* no need to iterate all interrupts */ + break; + } + } + if (((gic->vps[vp].compare_map & GIC_MAP_MSK) == pin) && + (gic->vps[vp].mask & GIC_VP_MASK_CMP_MSK)) { + /* ORing with local pending register (count/compare) */ + ored_level |= (gic->vps[vp].pend & GIC_VP_MASK_CMP_MSK) >> + GIC_VP_MASK_CMP_SHF; + } + if (kvm_enabled()) { + kvm_mips_set_ipi_interrupt(env_archcpu(gic->vps[vp].env), + pin + GIC_CPU_PIN_OFFSET, + ored_level); + } else { + qemu_set_irq(gic->vps[vp].env->irq[pin + GIC_CPU_PIN_OFFSET], + ored_level); + } +} + +static void gic_update_pin_for_irq(MIPSGICState *gic, int n_IRQ) +{ + int vp = gic->irq_state[n_IRQ].map_vp; + int pin = gic->irq_state[n_IRQ].map_pin & GIC_MAP_MSK; + + if (vp < 0 || vp >= gic->num_vps) { + return; + } + mips_gic_set_vp_irq(gic, vp, pin); +} + +static void gic_set_irq(void *opaque, int n_IRQ, int level) +{ + MIPSGICState *gic = (MIPSGICState *) opaque; + + gic->irq_state[n_IRQ].pending = (uint8_t) level; + if (!gic->irq_state[n_IRQ].enabled) { + /* GIC interrupt source disabled */ + return; + } + gic_update_pin_for_irq(gic, n_IRQ); +} + +#define OFFSET_CHECK(c) \ + do { \ + if (!(c)) { \ + goto bad_offset; \ + } \ + } while (0) + +/* GIC Read VP Local/Other Registers */ +static uint64_t gic_read_vp(MIPSGICState *gic, uint32_t vp_index, hwaddr addr, + unsigned size) +{ + switch (addr) { + case GIC_VP_CTL_OFS: + return gic->vps[vp_index].ctl; + case GIC_VP_PEND_OFS: + mips_gictimer_get_sh_count(gic->gic_timer); + return gic->vps[vp_index].pend; + case GIC_VP_MASK_OFS: + return gic->vps[vp_index].mask; + case GIC_VP_COMPARE_MAP_OFS: + return gic->vps[vp_index].compare_map; + case GIC_VP_OTHER_ADDR_OFS: + return gic->vps[vp_index].other_addr; + case GIC_VP_IDENT_OFS: + return vp_index; + case GIC_VP_COMPARE_LO_OFS: + return mips_gictimer_get_vp_compare(gic->gic_timer, vp_index); + case GIC_VP_COMPARE_HI_OFS: + return 0; + default: + qemu_log_mask(LOG_UNIMP, "Read %d bytes at GIC offset LOCAL/OTHER 0x%" + PRIx64 "\n", size, addr); + break; + } + return 0; +} + +static uint64_t gic_read(void *opaque, hwaddr addr, unsigned size) +{ + MIPSGICState *gic = (MIPSGICState *) opaque; + uint32_t vp_index = current_cpu->cpu_index; + uint64_t ret = 0; + int i, base, irq_src; + uint32_t other_index; + + switch (addr) { + case GIC_SH_CONFIG_OFS: + ret = gic->sh_config | (mips_gictimer_get_countstop(gic->gic_timer) << + GIC_SH_CONFIG_COUNTSTOP_SHF); + break; + case GIC_SH_COUNTERLO_OFS: + ret = mips_gictimer_get_sh_count(gic->gic_timer); + break; + case GIC_SH_COUNTERHI_OFS: + ret = 0; + break; + case GIC_SH_PEND_OFS ... GIC_SH_PEND_LAST_OFS: + /* each bit represents pending status for an interrupt pin */ + base = (addr - GIC_SH_PEND_OFS) * 8; + OFFSET_CHECK((base + size * 8) <= gic->num_irq); + for (i = 0; i < size * 8; i++) { + ret |= (uint64_t) (gic->irq_state[base + i].pending) << i; + } + break; + case GIC_SH_MASK_OFS ... GIC_SH_MASK_LAST_OFS: + /* each bit represents status for an interrupt pin */ + base = (addr - GIC_SH_MASK_OFS) * 8; + OFFSET_CHECK((base + size * 8) <= gic->num_irq); + for (i = 0; i < size * 8; i++) { + ret |= (uint64_t) (gic->irq_state[base + i].enabled) << i; + } + break; + case GIC_SH_MAP0_PIN_OFS ... GIC_SH_MAP255_PIN_OFS: + /* 32 bits per a pin */ + irq_src = (addr - GIC_SH_MAP0_PIN_OFS) / 4; + OFFSET_CHECK(irq_src < gic->num_irq); + ret = gic->irq_state[irq_src].map_pin; + break; + case GIC_SH_MAP0_VP_OFS ... GIC_SH_MAP255_VP_LAST_OFS: + /* up to 32 bytes per a pin */ + irq_src = (addr - GIC_SH_MAP0_VP_OFS) / 32; + OFFSET_CHECK(irq_src < gic->num_irq); + if ((gic->irq_state[irq_src].map_vp) >= 0) { + ret = (uint64_t) 1 << (gic->irq_state[irq_src].map_vp); + } else { + ret = 0; + } + break; + /* VP-Local Register */ + case VP_LOCAL_SECTION_OFS ... (VP_LOCAL_SECTION_OFS + GIC_VL_BRK_GROUP): + ret = gic_read_vp(gic, vp_index, addr - VP_LOCAL_SECTION_OFS, size); + break; + /* VP-Other Register */ + case VP_OTHER_SECTION_OFS ... (VP_OTHER_SECTION_OFS + GIC_VL_BRK_GROUP): + other_index = gic->vps[vp_index].other_addr; + ret = gic_read_vp(gic, other_index, addr - VP_OTHER_SECTION_OFS, size); + break; + /* User-Mode Visible section */ + case USM_VISIBLE_SECTION_OFS + GIC_USER_MODE_COUNTERLO: + ret = mips_gictimer_get_sh_count(gic->gic_timer); + break; + case USM_VISIBLE_SECTION_OFS + GIC_USER_MODE_COUNTERHI: + ret = 0; + break; + default: + qemu_log_mask(LOG_UNIMP, "Read %d bytes at GIC offset 0x%" PRIx64 "\n", + size, addr); + break; + } + return ret; +bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, "Wrong GIC offset at 0x%" PRIx64 "\n", addr); + return 0; +} + +static void gic_timer_expire_cb(void *opaque, uint32_t vp_index) +{ + MIPSGICState *gic = opaque; + + gic->vps[vp_index].pend |= (1 << GIC_LOCAL_INT_COMPARE); + if (gic->vps[vp_index].pend & + (gic->vps[vp_index].mask & GIC_VP_MASK_CMP_MSK)) { + if (gic->vps[vp_index].compare_map & GIC_MAP_TO_PIN_MSK) { + /* it is safe to set the irq high regardless of other GIC IRQs */ + uint32_t pin = (gic->vps[vp_index].compare_map & GIC_MAP_MSK); + qemu_irq_raise(gic->vps[vp_index].env->irq + [pin + GIC_CPU_PIN_OFFSET]); + } + } +} + +static void gic_timer_store_vp_compare(MIPSGICState *gic, uint32_t vp_index, + uint64_t compare) +{ + gic->vps[vp_index].pend &= ~(1 << GIC_LOCAL_INT_COMPARE); + if (gic->vps[vp_index].compare_map & GIC_MAP_TO_PIN_MSK) { + uint32_t pin = (gic->vps[vp_index].compare_map & GIC_MAP_MSK); + mips_gic_set_vp_irq(gic, vp_index, pin); + } + mips_gictimer_store_vp_compare(gic->gic_timer, vp_index, compare); +} + +/* GIC Write VP Local/Other Registers */ +static void gic_write_vp(MIPSGICState *gic, uint32_t vp_index, hwaddr addr, + uint64_t data, unsigned size) +{ + switch (addr) { + case GIC_VP_CTL_OFS: + /* EIC isn't supported */ + break; + case GIC_VP_RMASK_OFS: + gic->vps[vp_index].mask &= ~(data & GIC_VP_SET_RESET_MSK) & + GIC_VP_SET_RESET_MSK; + break; + case GIC_VP_SMASK_OFS: + gic->vps[vp_index].mask |= data & GIC_VP_SET_RESET_MSK; + break; + case GIC_VP_COMPARE_MAP_OFS: + /* EIC isn't supported */ + OFFSET_CHECK((data & GIC_MAP_MSK) <= GIC_CPU_INT_MAX); + gic->vps[vp_index].compare_map = data & GIC_MAP_TO_PIN_REG_MSK; + break; + case GIC_VP_OTHER_ADDR_OFS: + OFFSET_CHECK(data < gic->num_vps); + gic->vps[vp_index].other_addr = data; + break; + case GIC_VP_COMPARE_LO_OFS: + gic_timer_store_vp_compare(gic, vp_index, data); + break; + default: + qemu_log_mask(LOG_UNIMP, "Write %d bytes at GIC offset LOCAL/OTHER " + "0x%" PRIx64" 0x%08" PRIx64 "\n", size, addr, data); + break; + } + return; +bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, "Wrong GIC offset at 0x%" PRIx64 "\n", addr); + return; +} + +static void gic_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) +{ + int intr; + MIPSGICState *gic = (MIPSGICState *) opaque; + uint32_t vp_index = current_cpu->cpu_index; + int i, base, irq_src; + uint32_t other_index; + + switch (addr) { + case GIC_SH_CONFIG_OFS: + { + uint32_t pre_cntstop = mips_gictimer_get_countstop(gic->gic_timer); + uint32_t new_cntstop = (data & GIC_SH_CONFIG_COUNTSTOP_MSK) >> + GIC_SH_CONFIG_COUNTSTOP_SHF; + if (pre_cntstop != new_cntstop) { + if (new_cntstop == 1) { + mips_gictimer_stop_count(gic->gic_timer); + } else { + mips_gictimer_start_count(gic->gic_timer); + } + } + } + break; + case GIC_SH_COUNTERLO_OFS: + if (mips_gictimer_get_countstop(gic->gic_timer)) { + mips_gictimer_store_sh_count(gic->gic_timer, data); + } + break; + case GIC_SH_RMASK_OFS ... GIC_SH_RMASK_LAST_OFS: + /* up to 64 bits per a pin */ + base = (addr - GIC_SH_RMASK_OFS) * 8; + OFFSET_CHECK((base + size * 8) <= gic->num_irq); + for (i = 0; i < size * 8; i++) { + gic->irq_state[base + i].enabled &= !((data >> i) & 1); + gic_update_pin_for_irq(gic, base + i); + } + break; + case GIC_SH_WEDGE_OFS: + /* Figure out which VP/HW Interrupt this maps to */ + intr = data & ~GIC_SH_WEDGE_RW_MSK; + /* Mask/Enabled Checks */ + OFFSET_CHECK(intr < gic->num_irq); + if (data & GIC_SH_WEDGE_RW_MSK) { + gic_set_irq(gic, intr, 1); + } else { + gic_set_irq(gic, intr, 0); + } + break; + case GIC_SH_SMASK_OFS ... GIC_SH_SMASK_LAST_OFS: + /* up to 64 bits per a pin */ + base = (addr - GIC_SH_SMASK_OFS) * 8; + OFFSET_CHECK((base + size * 8) <= gic->num_irq); + for (i = 0; i < size * 8; i++) { + gic->irq_state[base + i].enabled |= (data >> i) & 1; + gic_update_pin_for_irq(gic, base + i); + } + break; + case GIC_SH_MAP0_PIN_OFS ... GIC_SH_MAP255_PIN_OFS: + /* 32 bits per a pin */ + irq_src = (addr - GIC_SH_MAP0_PIN_OFS) / 4; + OFFSET_CHECK(irq_src < gic->num_irq); + /* EIC isn't supported */ + OFFSET_CHECK((data & GIC_MAP_MSK) <= GIC_CPU_INT_MAX); + gic->irq_state[irq_src].map_pin = data & GIC_MAP_TO_PIN_REG_MSK; + break; + case GIC_SH_MAP0_VP_OFS ... GIC_SH_MAP255_VP_LAST_OFS: + /* up to 32 bytes per a pin */ + irq_src = (addr - GIC_SH_MAP0_VP_OFS) / 32; + OFFSET_CHECK(irq_src < gic->num_irq); + data = data ? ctz64(data) : -1; + OFFSET_CHECK(data < gic->num_vps); + gic->irq_state[irq_src].map_vp = data; + break; + case VP_LOCAL_SECTION_OFS ... (VP_LOCAL_SECTION_OFS + GIC_VL_BRK_GROUP): + gic_write_vp(gic, vp_index, addr - VP_LOCAL_SECTION_OFS, data, size); + break; + case VP_OTHER_SECTION_OFS ... (VP_OTHER_SECTION_OFS + GIC_VL_BRK_GROUP): + other_index = gic->vps[vp_index].other_addr; + gic_write_vp(gic, other_index, addr - VP_OTHER_SECTION_OFS, data, size); + break; + case USM_VISIBLE_SECTION_OFS + GIC_USER_MODE_COUNTERLO: + case USM_VISIBLE_SECTION_OFS + GIC_USER_MODE_COUNTERHI: + /* do nothing. Read-only section */ + break; + default: + qemu_log_mask(LOG_UNIMP, "Write %d bytes at GIC offset 0x%" PRIx64 + " 0x%08" PRIx64 "\n", size, addr, data); + break; + } + return; +bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, "Wrong GIC offset at 0x%" PRIx64 "\n", addr); +} + +static void gic_reset(void *opaque) +{ + int i; + MIPSGICState *gic = (MIPSGICState *) opaque; + int numintrs = (gic->num_irq / 8) - 1; + + gic->sh_config = /* COUNTSTOP = 0 it is accessible via MIPSGICTimer*/ + /* CounterHi not implemented */ + (0 << GIC_SH_CONFIG_COUNTBITS_SHF) | + (numintrs << GIC_SH_CONFIG_NUMINTRS_SHF) | + (gic->num_vps << GIC_SH_CONFIG_PVPS_SHF); + for (i = 0; i < gic->num_vps; i++) { + gic->vps[i].ctl = 0x0; + gic->vps[i].pend = 0x0; + /* PERFCNT, TIMER and WD not implemented */ + gic->vps[i].mask = 0x32; + gic->vps[i].compare_map = GIC_MAP_TO_PIN_MSK; + mips_gictimer_store_vp_compare(gic->gic_timer, i, 0xffffffff); + gic->vps[i].other_addr = 0x0; + } + for (i = 0; i < gic->num_irq; i++) { + gic->irq_state[i].enabled = 0; + gic->irq_state[i].pending = 0; + gic->irq_state[i].map_pin = GIC_MAP_TO_PIN_MSK; + gic->irq_state[i].map_vp = -1; + } + mips_gictimer_store_sh_count(gic->gic_timer, 0); + /* COUNTSTOP = 0 */ + mips_gictimer_start_count(gic->gic_timer); +} + +static const MemoryRegionOps gic_ops = { + .read = gic_read, + .write = gic_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .max_access_size = 8, + }, +}; + +static void mips_gic_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + MIPSGICState *s = MIPS_GIC(obj); + + memory_region_init_io(&s->mr, OBJECT(s), &gic_ops, s, + "mips-gic", GIC_ADDRSPACE_SZ); + sysbus_init_mmio(sbd, &s->mr); + qemu_register_reset(gic_reset, s); +} + +static void mips_gic_realize(DeviceState *dev, Error **errp) +{ + MIPSGICState *s = MIPS_GIC(dev); + CPUState *cs = first_cpu; + int i; + + if (s->num_vps > GIC_MAX_VPS) { + error_setg(errp, "Exceeded maximum CPUs %d", s->num_vps); + return; + } + if ((s->num_irq > GIC_MAX_INTRS) || (s->num_irq % 8) || (s->num_irq <= 0)) { + error_setg(errp, "GIC supports up to %d external interrupts in " + "multiples of 8 : %d", GIC_MAX_INTRS, s->num_irq); + return; + } + s->vps = g_new(MIPSGICVPState, s->num_vps); + s->irq_state = g_new(MIPSGICIRQState, s->num_irq); + /* Register the env for all VPs with the GIC */ + for (i = 0; i < s->num_vps; i++) { + if (cs != NULL) { + s->vps[i].env = cs->env_ptr; + cs = CPU_NEXT(cs); + } else { + error_setg(errp, + "Unable to initialize GIC, CPUState for CPU#%d not valid.", i); + return; + } + } + s->gic_timer = mips_gictimer_init(s, s->num_vps, gic_timer_expire_cb); + qdev_init_gpio_in(dev, gic_set_irq, s->num_irq); + for (i = 0; i < s->num_irq; i++) { + s->irq_state[i].irq = qdev_get_gpio_in(dev, i); + } +} + +static Property mips_gic_properties[] = { + DEFINE_PROP_INT32("num-vp", MIPSGICState, num_vps, 1), + DEFINE_PROP_INT32("num-irq", MIPSGICState, num_irq, 256), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mips_gic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, mips_gic_properties); + dc->realize = mips_gic_realize; +} + +static const TypeInfo mips_gic_info = { + .name = TYPE_MIPS_GIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MIPSGICState), + .instance_init = mips_gic_init, + .class_init = mips_gic_class_init, +}; + +static void mips_gic_register_types(void) +{ + type_register_static(&mips_gic_info); +} + +type_init(mips_gic_register_types) diff --git a/hw/intc/omap_intc.c b/hw/intc/omap_intc.c new file mode 100644 index 000000000..d7183d035 --- /dev/null +++ b/hw/intc/omap_intc.c @@ -0,0 +1,690 @@ +/* + * TI OMAP interrupt controller emulation. + * + * Copyright (C) 2006-2008 Andrzej Zaborowski + * Copyright (C) 2007-2008 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/arm/omap.h" +#include "hw/sysbus.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" + +/* Interrupt Handlers */ +struct omap_intr_handler_bank_s { + uint32_t irqs; + uint32_t inputs; + uint32_t mask; + uint32_t fiq; + uint32_t sens_edge; + uint32_t swi; + unsigned char priority[32]; +}; + +struct omap_intr_handler_s { + SysBusDevice parent_obj; + + qemu_irq *pins; + qemu_irq parent_intr[2]; + MemoryRegion mmio; + void *iclk; + void *fclk; + unsigned char nbanks; + int level_only; + uint32_t size; + + uint8_t revision; + + /* state */ + uint32_t new_agr[2]; + int sir_intr[2]; + int autoidle; + uint32_t mask; + struct omap_intr_handler_bank_s bank[3]; +}; + +static void omap_inth_sir_update(struct omap_intr_handler_s *s, int is_fiq) +{ + int i, j, sir_intr, p_intr, p; + uint32_t level; + sir_intr = 0; + p_intr = 255; + + /* Find the interrupt line with the highest dynamic priority. + * Note: 0 denotes the hightest priority. + * If all interrupts have the same priority, the default order is IRQ_N, + * IRQ_N-1,...,IRQ_0. */ + for (j = 0; j < s->nbanks; ++j) { + level = s->bank[j].irqs & ~s->bank[j].mask & + (is_fiq ? s->bank[j].fiq : ~s->bank[j].fiq); + + while (level != 0) { + i = ctz32(level); + p = s->bank[j].priority[i]; + if (p <= p_intr) { + p_intr = p; + sir_intr = 32 * j + i; + } + level &= level - 1; + } + } + s->sir_intr[is_fiq] = sir_intr; +} + +static inline void omap_inth_update(struct omap_intr_handler_s *s, int is_fiq) +{ + int i; + uint32_t has_intr = 0; + + for (i = 0; i < s->nbanks; ++i) + has_intr |= s->bank[i].irqs & ~s->bank[i].mask & + (is_fiq ? s->bank[i].fiq : ~s->bank[i].fiq); + + if (s->new_agr[is_fiq] & has_intr & s->mask) { + s->new_agr[is_fiq] = 0; + omap_inth_sir_update(s, is_fiq); + qemu_set_irq(s->parent_intr[is_fiq], 1); + } +} + +#define INT_FALLING_EDGE 0 +#define INT_LOW_LEVEL 1 + +static void omap_set_intr(void *opaque, int irq, int req) +{ + struct omap_intr_handler_s *ih = (struct omap_intr_handler_s *) opaque; + uint32_t rise; + + struct omap_intr_handler_bank_s *bank = &ih->bank[irq >> 5]; + int n = irq & 31; + + if (req) { + rise = ~bank->irqs & (1 << n); + if (~bank->sens_edge & (1 << n)) + rise &= ~bank->inputs; + + bank->inputs |= (1 << n); + if (rise) { + bank->irqs |= rise; + omap_inth_update(ih, 0); + omap_inth_update(ih, 1); + } + } else { + rise = bank->sens_edge & bank->irqs & (1 << n); + bank->irqs &= ~rise; + bank->inputs &= ~(1 << n); + } +} + +/* Simplified version with no edge detection */ +static void omap_set_intr_noedge(void *opaque, int irq, int req) +{ + struct omap_intr_handler_s *ih = (struct omap_intr_handler_s *) opaque; + uint32_t rise; + + struct omap_intr_handler_bank_s *bank = &ih->bank[irq >> 5]; + int n = irq & 31; + + if (req) { + rise = ~bank->inputs & (1 << n); + if (rise) { + bank->irqs |= bank->inputs |= rise; + omap_inth_update(ih, 0); + omap_inth_update(ih, 1); + } + } else + bank->irqs = (bank->inputs &= ~(1 << n)) | bank->swi; +} + +static uint64_t omap_inth_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque; + int i, offset = addr; + int bank_no = offset >> 8; + int line_no; + struct omap_intr_handler_bank_s *bank = &s->bank[bank_no]; + offset &= 0xff; + + switch (offset) { + case 0x00: /* ITR */ + return bank->irqs; + + case 0x04: /* MIR */ + return bank->mask; + + case 0x10: /* SIR_IRQ_CODE */ + case 0x14: /* SIR_FIQ_CODE */ + if (bank_no != 0) + break; + line_no = s->sir_intr[(offset - 0x10) >> 2]; + bank = &s->bank[line_no >> 5]; + i = line_no & 31; + if (((bank->sens_edge >> i) & 1) == INT_FALLING_EDGE) + bank->irqs &= ~(1 << i); + return line_no; + + case 0x18: /* CONTROL_REG */ + if (bank_no != 0) + break; + return 0; + + case 0x1c: /* ILR0 */ + case 0x20: /* ILR1 */ + case 0x24: /* ILR2 */ + case 0x28: /* ILR3 */ + case 0x2c: /* ILR4 */ + case 0x30: /* ILR5 */ + case 0x34: /* ILR6 */ + case 0x38: /* ILR7 */ + case 0x3c: /* ILR8 */ + case 0x40: /* ILR9 */ + case 0x44: /* ILR10 */ + case 0x48: /* ILR11 */ + case 0x4c: /* ILR12 */ + case 0x50: /* ILR13 */ + case 0x54: /* ILR14 */ + case 0x58: /* ILR15 */ + case 0x5c: /* ILR16 */ + case 0x60: /* ILR17 */ + case 0x64: /* ILR18 */ + case 0x68: /* ILR19 */ + case 0x6c: /* ILR20 */ + case 0x70: /* ILR21 */ + case 0x74: /* ILR22 */ + case 0x78: /* ILR23 */ + case 0x7c: /* ILR24 */ + case 0x80: /* ILR25 */ + case 0x84: /* ILR26 */ + case 0x88: /* ILR27 */ + case 0x8c: /* ILR28 */ + case 0x90: /* ILR29 */ + case 0x94: /* ILR30 */ + case 0x98: /* ILR31 */ + i = (offset - 0x1c) >> 2; + return (bank->priority[i] << 2) | + (((bank->sens_edge >> i) & 1) << 1) | + ((bank->fiq >> i) & 1); + + case 0x9c: /* ISR */ + return 0x00000000; + + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_inth_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque; + int i, offset = addr; + int bank_no = offset >> 8; + struct omap_intr_handler_bank_s *bank = &s->bank[bank_no]; + offset &= 0xff; + + switch (offset) { + case 0x00: /* ITR */ + /* Important: ignore the clearing if the IRQ is level-triggered and + the input bit is 1 */ + bank->irqs &= value | (bank->inputs & bank->sens_edge); + return; + + case 0x04: /* MIR */ + bank->mask = value; + omap_inth_update(s, 0); + omap_inth_update(s, 1); + return; + + case 0x10: /* SIR_IRQ_CODE */ + case 0x14: /* SIR_FIQ_CODE */ + OMAP_RO_REG(addr); + break; + + case 0x18: /* CONTROL_REG */ + if (bank_no != 0) + break; + if (value & 2) { + qemu_set_irq(s->parent_intr[1], 0); + s->new_agr[1] = ~0; + omap_inth_update(s, 1); + } + if (value & 1) { + qemu_set_irq(s->parent_intr[0], 0); + s->new_agr[0] = ~0; + omap_inth_update(s, 0); + } + return; + + case 0x1c: /* ILR0 */ + case 0x20: /* ILR1 */ + case 0x24: /* ILR2 */ + case 0x28: /* ILR3 */ + case 0x2c: /* ILR4 */ + case 0x30: /* ILR5 */ + case 0x34: /* ILR6 */ + case 0x38: /* ILR7 */ + case 0x3c: /* ILR8 */ + case 0x40: /* ILR9 */ + case 0x44: /* ILR10 */ + case 0x48: /* ILR11 */ + case 0x4c: /* ILR12 */ + case 0x50: /* ILR13 */ + case 0x54: /* ILR14 */ + case 0x58: /* ILR15 */ + case 0x5c: /* ILR16 */ + case 0x60: /* ILR17 */ + case 0x64: /* ILR18 */ + case 0x68: /* ILR19 */ + case 0x6c: /* ILR20 */ + case 0x70: /* ILR21 */ + case 0x74: /* ILR22 */ + case 0x78: /* ILR23 */ + case 0x7c: /* ILR24 */ + case 0x80: /* ILR25 */ + case 0x84: /* ILR26 */ + case 0x88: /* ILR27 */ + case 0x8c: /* ILR28 */ + case 0x90: /* ILR29 */ + case 0x94: /* ILR30 */ + case 0x98: /* ILR31 */ + i = (offset - 0x1c) >> 2; + bank->priority[i] = (value >> 2) & 0x1f; + bank->sens_edge &= ~(1 << i); + bank->sens_edge |= ((value >> 1) & 1) << i; + bank->fiq &= ~(1 << i); + bank->fiq |= (value & 1) << i; + return; + + case 0x9c: /* ISR */ + for (i = 0; i < 32; i ++) + if (value & (1 << i)) { + omap_set_intr(s, 32 * bank_no + i, 1); + return; + } + return; + } + OMAP_BAD_REG(addr); +} + +static const MemoryRegionOps omap_inth_mem_ops = { + .read = omap_inth_read, + .write = omap_inth_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void omap_inth_reset(DeviceState *dev) +{ + struct omap_intr_handler_s *s = OMAP_INTC(dev); + int i; + + for (i = 0; i < s->nbanks; ++i){ + s->bank[i].irqs = 0x00000000; + s->bank[i].mask = 0xffffffff; + s->bank[i].sens_edge = 0x00000000; + s->bank[i].fiq = 0x00000000; + s->bank[i].inputs = 0x00000000; + s->bank[i].swi = 0x00000000; + memset(s->bank[i].priority, 0, sizeof(s->bank[i].priority)); + + if (s->level_only) + s->bank[i].sens_edge = 0xffffffff; + } + + s->new_agr[0] = ~0; + s->new_agr[1] = ~0; + s->sir_intr[0] = 0; + s->sir_intr[1] = 0; + s->autoidle = 0; + s->mask = ~0; + + qemu_set_irq(s->parent_intr[0], 0); + qemu_set_irq(s->parent_intr[1], 0); +} + +static void omap_intc_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + struct omap_intr_handler_s *s = OMAP_INTC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + s->nbanks = 1; + sysbus_init_irq(sbd, &s->parent_intr[0]); + sysbus_init_irq(sbd, &s->parent_intr[1]); + qdev_init_gpio_in(dev, omap_set_intr, s->nbanks * 32); + memory_region_init_io(&s->mmio, obj, &omap_inth_mem_ops, s, + "omap-intc", s->size); + sysbus_init_mmio(sbd, &s->mmio); +} + +static void omap_intc_realize(DeviceState *dev, Error **errp) +{ + struct omap_intr_handler_s *s = OMAP_INTC(dev); + + if (!s->iclk) { + error_setg(errp, "omap-intc: clk not connected"); + } +} + +void omap_intc_set_iclk(omap_intr_handler *intc, omap_clk clk) +{ + intc->iclk = clk; +} + +void omap_intc_set_fclk(omap_intr_handler *intc, omap_clk clk) +{ + intc->fclk = clk; +} + +static Property omap_intc_properties[] = { + DEFINE_PROP_UINT32("size", struct omap_intr_handler_s, size, 0x100), + DEFINE_PROP_END_OF_LIST(), +}; + +static void omap_intc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = omap_inth_reset; + device_class_set_props(dc, omap_intc_properties); + /* Reason: pointer property "clk" */ + dc->user_creatable = false; + dc->realize = omap_intc_realize; +} + +static const TypeInfo omap_intc_info = { + .name = "omap-intc", + .parent = TYPE_OMAP_INTC, + .instance_init = omap_intc_init, + .class_init = omap_intc_class_init, +}; + +static uint64_t omap2_inth_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque; + int offset = addr; + int bank_no, line_no; + struct omap_intr_handler_bank_s *bank = NULL; + + if ((offset & 0xf80) == 0x80) { + bank_no = (offset & 0x60) >> 5; + if (bank_no < s->nbanks) { + offset &= ~0x60; + bank = &s->bank[bank_no]; + } else { + OMAP_BAD_REG(addr); + return 0; + } + } + + switch (offset) { + case 0x00: /* INTC_REVISION */ + return s->revision; + + case 0x10: /* INTC_SYSCONFIG */ + return (s->autoidle >> 2) & 1; + + case 0x14: /* INTC_SYSSTATUS */ + return 1; /* RESETDONE */ + + case 0x40: /* INTC_SIR_IRQ */ + return s->sir_intr[0]; + + case 0x44: /* INTC_SIR_FIQ */ + return s->sir_intr[1]; + + case 0x48: /* INTC_CONTROL */ + return (!s->mask) << 2; /* GLOBALMASK */ + + case 0x4c: /* INTC_PROTECTION */ + return 0; + + case 0x50: /* INTC_IDLE */ + return s->autoidle & 3; + + /* Per-bank registers */ + case 0x80: /* INTC_ITR */ + return bank->inputs; + + case 0x84: /* INTC_MIR */ + return bank->mask; + + case 0x88: /* INTC_MIR_CLEAR */ + case 0x8c: /* INTC_MIR_SET */ + return 0; + + case 0x90: /* INTC_ISR_SET */ + return bank->swi; + + case 0x94: /* INTC_ISR_CLEAR */ + return 0; + + case 0x98: /* INTC_PENDING_IRQ */ + return bank->irqs & ~bank->mask & ~bank->fiq; + + case 0x9c: /* INTC_PENDING_FIQ */ + return bank->irqs & ~bank->mask & bank->fiq; + + /* Per-line registers */ + case 0x100 ... 0x300: /* INTC_ILR */ + bank_no = (offset - 0x100) >> 7; + if (bank_no > s->nbanks) + break; + bank = &s->bank[bank_no]; + line_no = (offset & 0x7f) >> 2; + return (bank->priority[line_no] << 2) | + ((bank->fiq >> line_no) & 1); + } + OMAP_BAD_REG(addr); + return 0; +} + +static void omap2_inth_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque; + int offset = addr; + int bank_no, line_no; + struct omap_intr_handler_bank_s *bank = NULL; + + if ((offset & 0xf80) == 0x80) { + bank_no = (offset & 0x60) >> 5; + if (bank_no < s->nbanks) { + offset &= ~0x60; + bank = &s->bank[bank_no]; + } else { + OMAP_BAD_REG(addr); + return; + } + } + + switch (offset) { + case 0x10: /* INTC_SYSCONFIG */ + s->autoidle &= 4; + s->autoidle |= (value & 1) << 2; + if (value & 2) { /* SOFTRESET */ + omap_inth_reset(DEVICE(s)); + } + return; + + case 0x48: /* INTC_CONTROL */ + s->mask = (value & 4) ? 0 : ~0; /* GLOBALMASK */ + if (value & 2) { /* NEWFIQAGR */ + qemu_set_irq(s->parent_intr[1], 0); + s->new_agr[1] = ~0; + omap_inth_update(s, 1); + } + if (value & 1) { /* NEWIRQAGR */ + qemu_set_irq(s->parent_intr[0], 0); + s->new_agr[0] = ~0; + omap_inth_update(s, 0); + } + return; + + case 0x4c: /* INTC_PROTECTION */ + /* TODO: Make a bitmap (or sizeof(char)map) of access privileges + * for every register, see Chapter 3 and 4 for privileged mode. */ + if (value & 1) + fprintf(stderr, "%s: protection mode enable attempt\n", + __func__); + return; + + case 0x50: /* INTC_IDLE */ + s->autoidle &= ~3; + s->autoidle |= value & 3; + return; + + /* Per-bank registers */ + case 0x84: /* INTC_MIR */ + bank->mask = value; + omap_inth_update(s, 0); + omap_inth_update(s, 1); + return; + + case 0x88: /* INTC_MIR_CLEAR */ + bank->mask &= ~value; + omap_inth_update(s, 0); + omap_inth_update(s, 1); + return; + + case 0x8c: /* INTC_MIR_SET */ + bank->mask |= value; + return; + + case 0x90: /* INTC_ISR_SET */ + bank->irqs |= bank->swi |= value; + omap_inth_update(s, 0); + omap_inth_update(s, 1); + return; + + case 0x94: /* INTC_ISR_CLEAR */ + bank->swi &= ~value; + bank->irqs = bank->swi & bank->inputs; + return; + + /* Per-line registers */ + case 0x100 ... 0x300: /* INTC_ILR */ + bank_no = (offset - 0x100) >> 7; + if (bank_no > s->nbanks) + break; + bank = &s->bank[bank_no]; + line_no = (offset & 0x7f) >> 2; + bank->priority[line_no] = (value >> 2) & 0x3f; + bank->fiq &= ~(1 << line_no); + bank->fiq |= (value & 1) << line_no; + return; + + case 0x00: /* INTC_REVISION */ + case 0x14: /* INTC_SYSSTATUS */ + case 0x40: /* INTC_SIR_IRQ */ + case 0x44: /* INTC_SIR_FIQ */ + case 0x80: /* INTC_ITR */ + case 0x98: /* INTC_PENDING_IRQ */ + case 0x9c: /* INTC_PENDING_FIQ */ + OMAP_RO_REG(addr); + return; + } + OMAP_BAD_REG(addr); +} + +static const MemoryRegionOps omap2_inth_mem_ops = { + .read = omap2_inth_read, + .write = omap2_inth_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void omap2_intc_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + struct omap_intr_handler_s *s = OMAP_INTC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + s->level_only = 1; + s->nbanks = 3; + sysbus_init_irq(sbd, &s->parent_intr[0]); + sysbus_init_irq(sbd, &s->parent_intr[1]); + qdev_init_gpio_in(dev, omap_set_intr_noedge, s->nbanks * 32); + memory_region_init_io(&s->mmio, obj, &omap2_inth_mem_ops, s, + "omap2-intc", 0x1000); + sysbus_init_mmio(sbd, &s->mmio); +} + +static void omap2_intc_realize(DeviceState *dev, Error **errp) +{ + struct omap_intr_handler_s *s = OMAP_INTC(dev); + + if (!s->iclk) { + error_setg(errp, "omap2-intc: iclk not connected"); + return; + } + if (!s->fclk) { + error_setg(errp, "omap2-intc: fclk not connected"); + return; + } +} + +static Property omap2_intc_properties[] = { + DEFINE_PROP_UINT8("revision", struct omap_intr_handler_s, + revision, 0x21), + DEFINE_PROP_END_OF_LIST(), +}; + +static void omap2_intc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = omap_inth_reset; + device_class_set_props(dc, omap2_intc_properties); + /* Reason: pointer property "iclk", "fclk" */ + dc->user_creatable = false; + dc->realize = omap2_intc_realize; +} + +static const TypeInfo omap2_intc_info = { + .name = "omap2-intc", + .parent = TYPE_OMAP_INTC, + .instance_init = omap2_intc_init, + .class_init = omap2_intc_class_init, +}; + +static const TypeInfo omap_intc_type_info = { + .name = TYPE_OMAP_INTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(omap_intr_handler), + .abstract = true, +}; + +static void omap_intc_register_types(void) +{ + type_register_static(&omap_intc_type_info); + type_register_static(&omap_intc_info); + type_register_static(&omap2_intc_info); +} + +type_init(omap_intc_register_types) diff --git a/hw/intc/ompic.c b/hw/intc/ompic.c new file mode 100644 index 000000000..1f1031480 --- /dev/null +++ b/hw/intc/ompic.c @@ -0,0 +1,181 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Authors: Stafford Horne + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "exec/memory.h" +#include "qom/object.h" + +#define TYPE_OR1K_OMPIC "or1k-ompic" +OBJECT_DECLARE_SIMPLE_TYPE(OR1KOMPICState, OR1K_OMPIC) + +#define OMPIC_CTRL_IRQ_ACK (1 << 31) +#define OMPIC_CTRL_IRQ_GEN (1 << 30) +#define OMPIC_CTRL_DST(cpu) (((cpu) >> 16) & 0x3fff) + +#define OMPIC_REG(addr) (((addr) >> 2) & 0x1) +#define OMPIC_SRC_CPU(addr) (((addr) >> 3) & 0x4f) +#define OMPIC_DST_CPU(addr) (((addr) >> 3) & 0x4f) + +#define OMPIC_STATUS_IRQ_PENDING (1 << 30) +#define OMPIC_STATUS_SRC(cpu) (((cpu) & 0x3fff) << 16) +#define OMPIC_STATUS_DATA(data) ((data) & 0xffff) + +#define OMPIC_CONTROL 0 +#define OMPIC_STATUS 1 + +#define OMPIC_MAX_CPUS 4 /* Real max is much higher, but dont waste memory */ +#define OMPIC_ADDRSPACE_SZ (OMPIC_MAX_CPUS * 2 * 4) /* 2 32-bit regs per cpu */ + +typedef struct OR1KOMPICCPUState OR1KOMPICCPUState; + +struct OR1KOMPICCPUState { + qemu_irq irq; + uint32_t status; + uint32_t control; +}; + +struct OR1KOMPICState { + SysBusDevice parent_obj; + MemoryRegion mr; + + OR1KOMPICCPUState cpus[OMPIC_MAX_CPUS]; + + uint32_t num_cpus; +}; + +static uint64_t ompic_read(void *opaque, hwaddr addr, unsigned size) +{ + OR1KOMPICState *s = opaque; + int src_cpu = OMPIC_SRC_CPU(addr); + + /* We can only write to control control, write control + update status */ + if (OMPIC_REG(addr) == OMPIC_CONTROL) { + return s->cpus[src_cpu].control; + } else { + return s->cpus[src_cpu].status; + } + +} + +static void ompic_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) +{ + OR1KOMPICState *s = opaque; + /* We can only write to control control, write control + update status */ + if (OMPIC_REG(addr) == OMPIC_CONTROL) { + int src_cpu = OMPIC_SRC_CPU(addr); + + s->cpus[src_cpu].control = data; + + if (data & OMPIC_CTRL_IRQ_GEN) { + int dst_cpu = OMPIC_CTRL_DST(data); + + s->cpus[dst_cpu].status = OMPIC_STATUS_IRQ_PENDING | + OMPIC_STATUS_SRC(src_cpu) | + OMPIC_STATUS_DATA(data); + + qemu_irq_raise(s->cpus[dst_cpu].irq); + } + if (data & OMPIC_CTRL_IRQ_ACK) { + s->cpus[src_cpu].status &= ~OMPIC_STATUS_IRQ_PENDING; + qemu_irq_lower(s->cpus[src_cpu].irq); + } + } +} + +static const MemoryRegionOps ompic_ops = { + .read = ompic_read, + .write = ompic_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .max_access_size = 8, + }, +}; + +static void or1k_ompic_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + OR1KOMPICState *s = OR1K_OMPIC(obj); + + memory_region_init_io(&s->mr, OBJECT(s), &ompic_ops, s, + "or1k-ompic", OMPIC_ADDRSPACE_SZ); + sysbus_init_mmio(sbd, &s->mr); +} + +static void or1k_ompic_realize(DeviceState *dev, Error **errp) +{ + OR1KOMPICState *s = OR1K_OMPIC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i; + + if (s->num_cpus > OMPIC_MAX_CPUS) { + error_setg(errp, "Exceeded maximum CPUs %d", s->num_cpus); + return; + } + /* Init IRQ sources for all CPUs */ + for (i = 0; i < s->num_cpus; i++) { + sysbus_init_irq(sbd, &s->cpus[i].irq); + } +} + +static Property or1k_ompic_properties[] = { + DEFINE_PROP_UINT32("num-cpus", OR1KOMPICState, num_cpus, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_or1k_ompic_cpu = { + .name = "or1k_ompic_cpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(status, OR1KOMPICCPUState), + VMSTATE_UINT32(control, OR1KOMPICCPUState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_or1k_ompic = { + .name = TYPE_OR1K_OMPIC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(cpus, OR1KOMPICState, OMPIC_MAX_CPUS, 1, + vmstate_or1k_ompic_cpu, OR1KOMPICCPUState), + VMSTATE_UINT32(num_cpus, OR1KOMPICState), + VMSTATE_END_OF_LIST() + } +}; + +static void or1k_ompic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, or1k_ompic_properties); + dc->realize = or1k_ompic_realize; + dc->vmsd = &vmstate_or1k_ompic; +} + +static const TypeInfo or1k_ompic_info = { + .name = TYPE_OR1K_OMPIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(OR1KOMPICState), + .instance_init = or1k_ompic_init, + .class_init = or1k_ompic_class_init, +}; + +static void or1k_ompic_register_types(void) +{ + type_register_static(&or1k_ompic_info); +} + +type_init(or1k_ompic_register_types) diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c new file mode 100644 index 000000000..49504e740 --- /dev/null +++ b/hw/intc/openpic.c @@ -0,0 +1,1645 @@ +/* + * OpenPIC emulation + * + * Copyright (c) 2004 Jocelyn Mayer + * 2011 Alexander Graf + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* + * + * Based on OpenPic implementations: + * - Motorola MPC8245 & MPC8540 user manuals. + * - Motorola Harrier programmer manual + * + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/ppc/mac.h" +#include "hw/pci/pci.h" +#include "hw/ppc/openpic.h" +#include "hw/ppc/ppc_e500.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/pci/msi.h" +#include "qapi/error.h" +#include "qemu/bitops.h" +#include "qapi/qmp/qerror.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qemu/error-report.h" + +/* #define DEBUG_OPENPIC */ + +#ifdef DEBUG_OPENPIC +static const int debug_openpic = 1; +#else +static const int debug_openpic = 0; +#endif + +static int get_current_cpu(void); +#define DPRINTF(fmt, ...) do { \ + if (debug_openpic) { \ + info_report("Core%d: " fmt, get_current_cpu(), ## __VA_ARGS__); \ + } \ + } while (0) + +/* OpenPIC capability flags */ +#define OPENPIC_FLAG_IDR_CRIT (1 << 0) +#define OPENPIC_FLAG_ILR (2 << 0) + +/* OpenPIC address map */ +#define OPENPIC_GLB_REG_START 0x0 +#define OPENPIC_GLB_REG_SIZE 0x10F0 +#define OPENPIC_TMR_REG_START 0x10F0 +#define OPENPIC_TMR_REG_SIZE 0x220 +#define OPENPIC_MSI_REG_START 0x1600 +#define OPENPIC_MSI_REG_SIZE 0x200 +#define OPENPIC_SUMMARY_REG_START 0x3800 +#define OPENPIC_SUMMARY_REG_SIZE 0x800 +#define OPENPIC_SRC_REG_START 0x10000 +#define OPENPIC_SRC_REG_SIZE (OPENPIC_MAX_SRC * 0x20) +#define OPENPIC_CPU_REG_START 0x20000 +#define OPENPIC_CPU_REG_SIZE 0x100 + ((MAX_CPU - 1) * 0x1000) + +static FslMpicInfo fsl_mpic_20 = { + .max_ext = 12, +}; + +static FslMpicInfo fsl_mpic_42 = { + .max_ext = 12, +}; + +#define FRR_NIRQ_SHIFT 16 +#define FRR_NCPU_SHIFT 8 +#define FRR_VID_SHIFT 0 + +#define VID_REVISION_1_2 2 +#define VID_REVISION_1_3 3 + +#define VIR_GENERIC 0x00000000 /* Generic Vendor ID */ +#define VIR_MPIC2A 0x00004614 /* IBM MPIC-2A */ + +#define GCR_RESET 0x80000000 +#define GCR_MODE_PASS 0x00000000 +#define GCR_MODE_MIXED 0x20000000 +#define GCR_MODE_PROXY 0x60000000 + +#define TBCR_CI 0x80000000 /* count inhibit */ +#define TCCR_TOG 0x80000000 /* toggles when decrement to zero */ + +#define IDR_EP_SHIFT 31 +#define IDR_EP_MASK (1U << IDR_EP_SHIFT) +#define IDR_CI0_SHIFT 30 +#define IDR_CI1_SHIFT 29 +#define IDR_P1_SHIFT 1 +#define IDR_P0_SHIFT 0 + +#define ILR_INTTGT_MASK 0x000000ff +#define ILR_INTTGT_INT 0x00 +#define ILR_INTTGT_CINT 0x01 /* critical */ +#define ILR_INTTGT_MCP 0x02 /* machine check */ + +/* + * The currently supported INTTGT values happen to be the same as QEMU's + * openpic output codes, but don't depend on this. The output codes + * could change (unlikely, but...) or support could be added for + * more INTTGT values. + */ +static const int inttgt_output[][2] = { + { ILR_INTTGT_INT, OPENPIC_OUTPUT_INT }, + { ILR_INTTGT_CINT, OPENPIC_OUTPUT_CINT }, + { ILR_INTTGT_MCP, OPENPIC_OUTPUT_MCK }, +}; + +static int inttgt_to_output(int inttgt) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) { + if (inttgt_output[i][0] == inttgt) { + return inttgt_output[i][1]; + } + } + + error_report("%s: unsupported inttgt %d", __func__, inttgt); + return OPENPIC_OUTPUT_INT; +} + +static int output_to_inttgt(int output) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(inttgt_output); i++) { + if (inttgt_output[i][1] == output) { + return inttgt_output[i][0]; + } + } + + abort(); +} + +#define MSIIR_OFFSET 0x140 +#define MSIIR_SRS_SHIFT 29 +#define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT) +#define MSIIR_IBS_SHIFT 24 +#define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT) + +static int get_current_cpu(void) +{ + if (!current_cpu) { + return -1; + } + + return current_cpu->cpu_index; +} + +static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr, + int idx); +static void openpic_cpu_write_internal(void *opaque, hwaddr addr, + uint32_t val, int idx); +static void openpic_reset(DeviceState *d); + +/* + * Convert between openpic clock ticks and nanosecs. In the hardware the clock + * frequency is driven by board inputs to the PIC which the PIC would then + * divide by 4 or 8. For now hard code to 25MZ. + */ +#define OPENPIC_TIMER_FREQ_MHZ 25 +#define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ) +static inline uint64_t ns_to_ticks(uint64_t ns) +{ + return ns / OPENPIC_TIMER_NS_PER_TICK; +} +static inline uint64_t ticks_to_ns(uint64_t ticks) +{ + return ticks * OPENPIC_TIMER_NS_PER_TICK; +} + +static inline void IRQ_setbit(IRQQueue *q, int n_IRQ) +{ + set_bit(n_IRQ, q->queue); +} + +static inline void IRQ_resetbit(IRQQueue *q, int n_IRQ) +{ + clear_bit(n_IRQ, q->queue); +} + +static void IRQ_check(OpenPICState *opp, IRQQueue *q) +{ + int irq = -1; + int next = -1; + int priority = -1; + + for (;;) { + irq = find_next_bit(q->queue, opp->max_irq, irq + 1); + if (irq == opp->max_irq) { + break; + } + + DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d", + irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority); + + if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) { + next = irq; + priority = IVPR_PRIORITY(opp->src[irq].ivpr); + } + } + + q->next = next; + q->priority = priority; +} + +static int IRQ_get_next(OpenPICState *opp, IRQQueue *q) +{ + /* XXX: optimize */ + IRQ_check(opp, q); + + return q->next; +} + +static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ, + bool active, bool was_active) +{ + IRQDest *dst; + IRQSource *src; + int priority; + + dst = &opp->dst[n_CPU]; + src = &opp->src[n_IRQ]; + + DPRINTF("%s: IRQ %d active %d was %d", + __func__, n_IRQ, active, was_active); + + if (src->output != OPENPIC_OUTPUT_INT) { + DPRINTF("%s: output %d irq %d active %d was %d count %d", + __func__, src->output, n_IRQ, active, was_active, + dst->outputs_active[src->output]); + + /* + * On Freescale MPIC, critical interrupts ignore priority, + * IACK, EOI, etc. Before MPIC v4.1 they also ignore + * masking. + */ + if (active) { + if (!was_active && dst->outputs_active[src->output]++ == 0) { + DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d", + __func__, src->output, n_CPU, n_IRQ); + qemu_irq_raise(dst->irqs[src->output]); + } + } else { + if (was_active && --dst->outputs_active[src->output] == 0) { + DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d", + __func__, src->output, n_CPU, n_IRQ); + qemu_irq_lower(dst->irqs[src->output]); + } + } + + return; + } + + priority = IVPR_PRIORITY(src->ivpr); + + /* + * Even if the interrupt doesn't have enough priority, + * it is still raised, in case ctpr is lowered later. + */ + if (active) { + IRQ_setbit(&dst->raised, n_IRQ); + } else { + IRQ_resetbit(&dst->raised, n_IRQ); + } + + IRQ_check(opp, &dst->raised); + + if (active && priority <= dst->ctpr) { + DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d", + __func__, n_IRQ, priority, dst->ctpr, n_CPU); + active = 0; + } + + if (active) { + if (IRQ_get_next(opp, &dst->servicing) >= 0 && + priority <= dst->servicing.priority) { + DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d", + __func__, n_IRQ, dst->servicing.next, n_CPU); + } else { + DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d", + __func__, n_CPU, n_IRQ, dst->raised.next); + qemu_irq_raise(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]); + } + } else { + IRQ_get_next(opp, &dst->servicing); + if (dst->raised.priority > dst->ctpr && + dst->raised.priority > dst->servicing.priority) { + DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d", + __func__, n_IRQ, dst->raised.next, dst->raised.priority, + dst->ctpr, dst->servicing.priority, n_CPU); + /* IRQ line stays asserted */ + } else { + DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d", + __func__, n_IRQ, dst->ctpr, dst->servicing.priority, n_CPU); + qemu_irq_lower(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]); + } + } +} + +/* update pic state because registers for n_IRQ have changed value */ +static void openpic_update_irq(OpenPICState *opp, int n_IRQ) +{ + IRQSource *src; + bool active, was_active; + int i; + + src = &opp->src[n_IRQ]; + active = src->pending; + + if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) { + /* Interrupt source is disabled */ + DPRINTF("%s: IRQ %d is disabled", __func__, n_IRQ); + active = false; + } + + was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK); + + /* + * We don't have a similar check for already-active because + * ctpr may have changed and we need to withdraw the interrupt. + */ + if (!active && !was_active) { + DPRINTF("%s: IRQ %d is already inactive", __func__, n_IRQ); + return; + } + + if (active) { + src->ivpr |= IVPR_ACTIVITY_MASK; + } else { + src->ivpr &= ~IVPR_ACTIVITY_MASK; + } + + if (src->destmask == 0) { + /* No target */ + DPRINTF("%s: IRQ %d has no target", __func__, n_IRQ); + return; + } + + if (src->destmask == (1 << src->last_cpu)) { + /* Only one CPU is allowed to receive this IRQ */ + IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active); + } else if (!(src->ivpr & IVPR_MODE_MASK)) { + /* Directed delivery mode */ + for (i = 0; i < opp->nb_cpus; i++) { + if (src->destmask & (1 << i)) { + IRQ_local_pipe(opp, i, n_IRQ, active, was_active); + } + } + } else { + /* Distributed delivery mode */ + for (i = src->last_cpu + 1; i != src->last_cpu; i++) { + if (i == opp->nb_cpus) { + i = 0; + } + if (src->destmask & (1 << i)) { + IRQ_local_pipe(opp, i, n_IRQ, active, was_active); + src->last_cpu = i; + break; + } + } + } +} + +static void openpic_set_irq(void *opaque, int n_IRQ, int level) +{ + OpenPICState *opp = opaque; + IRQSource *src; + + if (n_IRQ >= OPENPIC_MAX_IRQ) { + error_report("%s: IRQ %d out of range", __func__, n_IRQ); + abort(); + } + + src = &opp->src[n_IRQ]; + DPRINTF("openpic: set irq %d = %d ivpr=0x%08x", + n_IRQ, level, src->ivpr); + if (src->level) { + /* level-sensitive irq */ + src->pending = level; + openpic_update_irq(opp, n_IRQ); + } else { + /* edge-sensitive irq */ + if (level) { + src->pending = 1; + openpic_update_irq(opp, n_IRQ); + } + + if (src->output != OPENPIC_OUTPUT_INT) { + /* + * Edge-triggered interrupts shouldn't be used + * with non-INT delivery, but just in case, + * try to make it do something sane rather than + * cause an interrupt storm. This is close to + * what you'd probably see happen in real hardware. + */ + src->pending = 0; + openpic_update_irq(opp, n_IRQ); + } + } +} + +static inline uint32_t read_IRQreg_idr(OpenPICState *opp, int n_IRQ) +{ + return opp->src[n_IRQ].idr; +} + +static inline uint32_t read_IRQreg_ilr(OpenPICState *opp, int n_IRQ) +{ + if (opp->flags & OPENPIC_FLAG_ILR) { + return output_to_inttgt(opp->src[n_IRQ].output); + } + + return 0xffffffff; +} + +static inline uint32_t read_IRQreg_ivpr(OpenPICState *opp, int n_IRQ) +{ + return opp->src[n_IRQ].ivpr; +} + +static inline void write_IRQreg_idr(OpenPICState *opp, int n_IRQ, uint32_t val) +{ + IRQSource *src = &opp->src[n_IRQ]; + uint32_t normal_mask = (1UL << opp->nb_cpus) - 1; + uint32_t crit_mask = 0; + uint32_t mask = normal_mask; + int crit_shift = IDR_EP_SHIFT - opp->nb_cpus; + int i; + + if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { + crit_mask = mask << crit_shift; + mask |= crit_mask | IDR_EP; + } + + src->idr = val & mask; + DPRINTF("Set IDR %d to 0x%08x", n_IRQ, src->idr); + + if (opp->flags & OPENPIC_FLAG_IDR_CRIT) { + if (src->idr & crit_mask) { + if (src->idr & normal_mask) { + DPRINTF("%s: IRQ configured for multiple output types, using " + "critical", __func__); + } + + src->output = OPENPIC_OUTPUT_CINT; + src->nomask = true; + src->destmask = 0; + + for (i = 0; i < opp->nb_cpus; i++) { + int n_ci = IDR_CI0_SHIFT - i; + + if (src->idr & (1UL << n_ci)) { + src->destmask |= 1UL << i; + } + } + } else { + src->output = OPENPIC_OUTPUT_INT; + src->nomask = false; + src->destmask = src->idr & normal_mask; + } + } else { + src->destmask = src->idr; + } +} + +static inline void write_IRQreg_ilr(OpenPICState *opp, int n_IRQ, uint32_t val) +{ + if (opp->flags & OPENPIC_FLAG_ILR) { + IRQSource *src = &opp->src[n_IRQ]; + + src->output = inttgt_to_output(val & ILR_INTTGT_MASK); + DPRINTF("Set ILR %d to 0x%08x, output %d", n_IRQ, src->idr, + src->output); + + /* TODO: on MPIC v4.0 only, set nomask for non-INT */ + } +} + +static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val) +{ + uint32_t mask; + + /* + * NOTE when implementing newer FSL MPIC models: starting with v4.0, + * the polarity bit is read-only on internal interrupts. + */ + mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK | + IVPR_POLARITY_MASK | opp->vector_mask; + + /* ACTIVITY bit is read-only */ + opp->src[n_IRQ].ivpr = + (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask); + + /* + * For FSL internal interrupts, The sense bit is reserved and zero, + * and the interrupt is always level-triggered. Timers and IPIs + * have no sense or polarity bits, and are edge-triggered. + */ + switch (opp->src[n_IRQ].type) { + case IRQ_TYPE_NORMAL: + opp->src[n_IRQ].level = !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK); + break; + + case IRQ_TYPE_FSLINT: + opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK; + break; + + case IRQ_TYPE_FSLSPECIAL: + opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK); + break; + } + + openpic_update_irq(opp, n_IRQ); + DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x", n_IRQ, val, + opp->src[n_IRQ].ivpr); +} + +static void openpic_gcr_write(OpenPICState *opp, uint64_t val) +{ + bool mpic_proxy = false; + + if (val & GCR_RESET) { + openpic_reset(DEVICE(opp)); + return; + } + + opp->gcr &= ~opp->mpic_mode_mask; + opp->gcr |= val & opp->mpic_mode_mask; + + /* Set external proxy mode */ + if ((val & opp->mpic_mode_mask) == GCR_MODE_PROXY) { + mpic_proxy = true; + } + + ppce500_set_mpic_proxy(mpic_proxy); +} + +static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val, + unsigned len) +{ + OpenPICState *opp = opaque; + IRQDest *dst; + int idx; + + DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64, + __func__, addr, val); + if (addr & 0xF) { + return; + } + switch (addr) { + case 0x00: /* Block Revision Register1 (BRR1) is Readonly */ + break; + case 0x40: + case 0x50: + case 0x60: + case 0x70: + case 0x80: + case 0x90: + case 0xA0: + case 0xB0: + openpic_cpu_write_internal(opp, addr, val, get_current_cpu()); + break; + case 0x1000: /* FRR */ + break; + case 0x1020: /* GCR */ + openpic_gcr_write(opp, val); + break; + case 0x1080: /* VIR */ + break; + case 0x1090: /* PIR */ + for (idx = 0; idx < opp->nb_cpus; idx++) { + if ((val & (1 << idx)) && !(opp->pir & (1 << idx))) { + DPRINTF("Raise OpenPIC RESET output for CPU %d", idx); + dst = &opp->dst[idx]; + qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_RESET]); + } else if (!(val & (1 << idx)) && (opp->pir & (1 << idx))) { + DPRINTF("Lower OpenPIC RESET output for CPU %d", idx); + dst = &opp->dst[idx]; + qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_RESET]); + } + } + opp->pir = val; + break; + case 0x10A0: /* IPI_IVPR */ + case 0x10B0: + case 0x10C0: + case 0x10D0: + { + int idx; + idx = (addr - 0x10A0) >> 4; + write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val); + } + break; + case 0x10E0: /* SPVE */ + opp->spve = val & opp->vector_mask; + break; + default: + break; + } +} + +static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len) +{ + OpenPICState *opp = opaque; + uint32_t retval; + + DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr); + retval = 0xFFFFFFFF; + if (addr & 0xF) { + return retval; + } + switch (addr) { + case 0x1000: /* FRR */ + retval = opp->frr; + break; + case 0x1020: /* GCR */ + retval = opp->gcr; + break; + case 0x1080: /* VIR */ + retval = opp->vir; + break; + case 0x1090: /* PIR */ + retval = 0x00000000; + break; + case 0x00: /* Block Revision Register1 (BRR1) */ + retval = opp->brr1; + break; + case 0x40: + case 0x50: + case 0x60: + case 0x70: + case 0x80: + case 0x90: + case 0xA0: + case 0xB0: + retval = openpic_cpu_read_internal(opp, addr, get_current_cpu()); + break; + case 0x10A0: /* IPI_IVPR */ + case 0x10B0: + case 0x10C0: + case 0x10D0: + { + int idx; + idx = (addr - 0x10A0) >> 4; + retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx); + } + break; + case 0x10E0: /* SPVE */ + retval = opp->spve; + break; + default: + break; + } + DPRINTF("%s: => 0x%08x", __func__, retval); + + return retval; +} + +static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled); + +static void qemu_timer_cb(void *opaque) +{ + OpenPICTimer *tmr = opaque; + OpenPICState *opp = tmr->opp; + uint32_t n_IRQ = tmr->n_IRQ; + uint32_t val = tmr->tbcr & ~TBCR_CI; + uint32_t tog = ((tmr->tccr & TCCR_TOG) ^ TCCR_TOG); /* invert toggle. */ + + DPRINTF("%s n_IRQ=%d", __func__, n_IRQ); + /* Reload current count from base count and setup timer. */ + tmr->tccr = val | tog; + openpic_tmr_set_tmr(tmr, val, /*enabled=*/true); + /* Raise the interrupt. */ + opp->src[n_IRQ].destmask = read_IRQreg_idr(opp, n_IRQ); + openpic_set_irq(opp, n_IRQ, 1); + openpic_set_irq(opp, n_IRQ, 0); +} + +/* + * If enabled is true, arranges for an interrupt to be raised val clocks into + * the future, if enabled is false cancels the timer. + */ +static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled) +{ + uint64_t ns = ticks_to_ns(val & ~TCCR_TOG); + /* + * A count of zero causes a timer to be set to expire immediately. This + * effectively stops the simulation since the timer is constantly expiring + * which prevents guest code execution, so we don't honor that + * configuration. On real hardware, this situation would generate an + * interrupt on every clock cycle if the interrupt was unmasked. + */ + if ((ns == 0) || !enabled) { + tmr->qemu_timer_active = false; + tmr->tccr = tmr->tccr & TCCR_TOG; + timer_del(tmr->qemu_timer); /* set timer to never expire. */ + } else { + tmr->qemu_timer_active = true; + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + tmr->origin_time = now; + timer_mod(tmr->qemu_timer, now + ns); /* set timer expiration. */ + } +} + +/* + * Returns the currrent tccr value, i.e., timer value (in clocks) with + * appropriate TOG. + */ +static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr) +{ + uint64_t retval; + if (!tmr->qemu_timer_active) { + retval = tmr->tccr; + } else { + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint64_t used = now - tmr->origin_time; /* nsecs */ + uint32_t used_ticks = (uint32_t)ns_to_ticks(used); + uint32_t count = (tmr->tccr & ~TCCR_TOG) - used_ticks; + retval = (uint32_t)((tmr->tccr & TCCR_TOG) | (count & ~TCCR_TOG)); + } + return retval; +} + +static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val, + unsigned len) +{ + OpenPICState *opp = opaque; + int idx; + + DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64, + __func__, (addr + 0x10f0), val); + if (addr & 0xF) { + return; + } + + if (addr == 0) { + /* TFRR */ + opp->tfrr = val; + return; + } + addr -= 0x10; /* correct for TFRR */ + idx = (addr >> 6) & 0x3; + + switch (addr & 0x30) { + case 0x00: /* TCCR */ + break; + case 0x10: /* TBCR */ + /* Did the enable status change? */ + if ((opp->timers[idx].tbcr & TBCR_CI) != (val & TBCR_CI)) { + /* Did "Count Inhibit" transition from 1 to 0? */ + if ((val & TBCR_CI) == 0) { + opp->timers[idx].tccr = val & ~TCCR_TOG; + } + openpic_tmr_set_tmr(&opp->timers[idx], + (val & ~TBCR_CI), + /*enabled=*/((val & TBCR_CI) == 0)); + } + opp->timers[idx].tbcr = val; + break; + case 0x20: /* TVPR */ + write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val); + break; + case 0x30: /* TDR */ + write_IRQreg_idr(opp, opp->irq_tim0 + idx, val); + break; + } +} + +static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len) +{ + OpenPICState *opp = opaque; + uint32_t retval = -1; + int idx; + + DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr + 0x10f0); + if (addr & 0xF) { + goto out; + } + if (addr == 0) { + /* TFRR */ + retval = opp->tfrr; + goto out; + } + addr -= 0x10; /* correct for TFRR */ + idx = (addr >> 6) & 0x3; + switch (addr & 0x30) { + case 0x00: /* TCCR */ + retval = openpic_tmr_get_timer(&opp->timers[idx]); + break; + case 0x10: /* TBCR */ + retval = opp->timers[idx].tbcr; + break; + case 0x20: /* TVPR */ + retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx); + break; + case 0x30: /* TDR */ + retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx); + break; + } + +out: + DPRINTF("%s: => 0x%08x", __func__, retval); + + return retval; +} + +static void openpic_src_write(void *opaque, hwaddr addr, uint64_t val, + unsigned len) +{ + OpenPICState *opp = opaque; + int idx; + + DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64, + __func__, addr, val); + + addr = addr & 0xffff; + idx = addr >> 5; + + switch (addr & 0x1f) { + case 0x00: + write_IRQreg_ivpr(opp, idx, val); + break; + case 0x10: + write_IRQreg_idr(opp, idx, val); + break; + case 0x18: + write_IRQreg_ilr(opp, idx, val); + break; + } +} + +static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len) +{ + OpenPICState *opp = opaque; + uint32_t retval; + int idx; + + DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr); + retval = 0xFFFFFFFF; + + addr = addr & 0xffff; + idx = addr >> 5; + + switch (addr & 0x1f) { + case 0x00: + retval = read_IRQreg_ivpr(opp, idx); + break; + case 0x10: + retval = read_IRQreg_idr(opp, idx); + break; + case 0x18: + retval = read_IRQreg_ilr(opp, idx); + break; + } + + DPRINTF("%s: => 0x%08x", __func__, retval); + return retval; +} + +static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + OpenPICState *opp = opaque; + int idx = opp->irq_msi; + int srs, ibs; + + DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64, + __func__, addr, val); + if (addr & 0xF) { + return; + } + + switch (addr) { + case MSIIR_OFFSET: + srs = val >> MSIIR_SRS_SHIFT; + idx += srs; + ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT; + opp->msi[srs].msir |= 1 << ibs; + openpic_set_irq(opp, idx, 1); + break; + default: + /* most registers are read-only, thus ignored */ + break; + } +} + +static uint64_t openpic_msi_read(void *opaque, hwaddr addr, unsigned size) +{ + OpenPICState *opp = opaque; + uint64_t r = 0; + int i, srs; + + DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr); + if (addr & 0xF) { + return -1; + } + + srs = addr >> 4; + + switch (addr) { + case 0x00: + case 0x10: + case 0x20: + case 0x30: + case 0x40: + case 0x50: + case 0x60: + case 0x70: /* MSIRs */ + r = opp->msi[srs].msir; + /* Clear on read */ + opp->msi[srs].msir = 0; + openpic_set_irq(opp, opp->irq_msi + srs, 0); + break; + case 0x120: /* MSISR */ + for (i = 0; i < MAX_MSI; i++) { + r |= (opp->msi[i].msir ? 1 : 0) << i; + } + break; + } + + return r; +} + +static uint64_t openpic_summary_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t r = 0; + + DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr); + + /* TODO: EISR/EIMR */ + + return r; +} + +static void openpic_summary_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64, + __func__, addr, val); + + /* TODO: EISR/EIMR */ +} + +static void openpic_cpu_write_internal(void *opaque, hwaddr addr, + uint32_t val, int idx) +{ + OpenPICState *opp = opaque; + IRQSource *src; + IRQDest *dst; + int s_IRQ, n_IRQ; + + DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x", __func__, idx, + addr, val); + + if (idx < 0 || idx >= opp->nb_cpus) { + return; + } + + if (addr & 0xF) { + return; + } + dst = &opp->dst[idx]; + addr &= 0xFF0; + switch (addr) { + case 0x40: /* IPIDR */ + case 0x50: + case 0x60: + case 0x70: + idx = (addr - 0x40) >> 4; + /* we use IDE as mask which CPUs to deliver the IPI to still. */ + opp->src[opp->irq_ipi0 + idx].destmask |= val; + openpic_set_irq(opp, opp->irq_ipi0 + idx, 1); + openpic_set_irq(opp, opp->irq_ipi0 + idx, 0); + break; + case 0x80: /* CTPR */ + dst->ctpr = val & 0x0000000F; + + DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d", + __func__, idx, dst->ctpr, dst->raised.priority, + dst->servicing.priority); + + if (dst->raised.priority <= dst->ctpr) { + DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr", + __func__, idx); + qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]); + } else if (dst->raised.priority > dst->servicing.priority) { + DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d", + __func__, idx, dst->raised.next); + qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_INT]); + } + + break; + case 0x90: /* WHOAMI */ + /* Read-only register */ + break; + case 0xA0: /* IACK */ + /* Read-only register */ + break; + case 0xB0: /* EOI */ + DPRINTF("EOI"); + s_IRQ = IRQ_get_next(opp, &dst->servicing); + + if (s_IRQ < 0) { + DPRINTF("%s: EOI with no interrupt in service", __func__); + break; + } + + IRQ_resetbit(&dst->servicing, s_IRQ); + /* Set up next servicing IRQ */ + s_IRQ = IRQ_get_next(opp, &dst->servicing); + /* Check queued interrupts. */ + n_IRQ = IRQ_get_next(opp, &dst->raised); + src = &opp->src[n_IRQ]; + if (n_IRQ != -1 && + (s_IRQ == -1 || + IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) { + DPRINTF("Raise OpenPIC INT output cpu %d irq %d", + idx, n_IRQ); + qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]); + } + break; + default: + break; + } +} + +static void openpic_cpu_write(void *opaque, hwaddr addr, uint64_t val, + unsigned len) +{ + openpic_cpu_write_internal(opaque, addr, val, (addr & 0x1f000) >> 12); +} + + +static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu) +{ + IRQSource *src; + int retval, irq; + + DPRINTF("Lower OpenPIC INT output"); + qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]); + + irq = IRQ_get_next(opp, &dst->raised); + DPRINTF("IACK: irq=%d", irq); + + if (irq == -1) { + /* No more interrupt pending */ + return opp->spve; + } + + src = &opp->src[irq]; + if (!(src->ivpr & IVPR_ACTIVITY_MASK) || + !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) { + error_report("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x", + __func__, irq, dst->ctpr, src->ivpr); + openpic_update_irq(opp, irq); + retval = opp->spve; + } else { + /* IRQ enter servicing state */ + IRQ_setbit(&dst->servicing, irq); + retval = IVPR_VECTOR(opp, src->ivpr); + } + + if (!src->level) { + /* edge-sensitive IRQ */ + src->ivpr &= ~IVPR_ACTIVITY_MASK; + src->pending = 0; + IRQ_resetbit(&dst->raised, irq); + } + + /* Timers and IPIs support multicast. */ + if (((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) || + ((irq >= opp->irq_tim0) && (irq < (opp->irq_tim0 + OPENPIC_MAX_TMR)))) { + DPRINTF("irq is IPI or TMR"); + src->destmask &= ~(1 << cpu); + if (src->destmask && !src->level) { + /* trigger on CPUs that didn't know about it yet */ + openpic_set_irq(opp, irq, 1); + openpic_set_irq(opp, irq, 0); + /* if all CPUs knew about it, set active bit again */ + src->ivpr |= IVPR_ACTIVITY_MASK; + } + } + + return retval; +} + +static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr, + int idx) +{ + OpenPICState *opp = opaque; + IRQDest *dst; + uint32_t retval; + + DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx, __func__, idx, addr); + retval = 0xFFFFFFFF; + + if (idx < 0 || idx >= opp->nb_cpus) { + return retval; + } + + if (addr & 0xF) { + return retval; + } + dst = &opp->dst[idx]; + addr &= 0xFF0; + switch (addr) { + case 0x80: /* CTPR */ + retval = dst->ctpr; + break; + case 0x90: /* WHOAMI */ + retval = idx; + break; + case 0xA0: /* IACK */ + retval = openpic_iack(opp, dst, idx); + break; + case 0xB0: /* EOI */ + retval = 0; + break; + default: + break; + } + DPRINTF("%s: => 0x%08x", __func__, retval); + + return retval; +} + +static uint64_t openpic_cpu_read(void *opaque, hwaddr addr, unsigned len) +{ + return openpic_cpu_read_internal(opaque, addr, (addr & 0x1f000) >> 12); +} + +static const MemoryRegionOps openpic_glb_ops_le = { + .write = openpic_gbl_write, + .read = openpic_gbl_read, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps openpic_glb_ops_be = { + .write = openpic_gbl_write, + .read = openpic_gbl_read, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps openpic_tmr_ops_le = { + .write = openpic_tmr_write, + .read = openpic_tmr_read, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps openpic_tmr_ops_be = { + .write = openpic_tmr_write, + .read = openpic_tmr_read, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps openpic_cpu_ops_le = { + .write = openpic_cpu_write, + .read = openpic_cpu_read, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps openpic_cpu_ops_be = { + .write = openpic_cpu_write, + .read = openpic_cpu_read, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps openpic_src_ops_le = { + .write = openpic_src_write, + .read = openpic_src_read, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps openpic_src_ops_be = { + .write = openpic_src_write, + .read = openpic_src_read, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps openpic_msi_ops_be = { + .read = openpic_msi_read, + .write = openpic_msi_write, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps openpic_summary_ops_be = { + .read = openpic_summary_read, + .write = openpic_summary_write, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void openpic_reset(DeviceState *d) +{ + OpenPICState *opp = OPENPIC(d); + int i; + + opp->gcr = GCR_RESET; + /* Initialise controller registers */ + opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) | + ((opp->nb_cpus - 1) << FRR_NCPU_SHIFT) | + (opp->vid << FRR_VID_SHIFT); + + opp->pir = 0; + opp->spve = -1 & opp->vector_mask; + opp->tfrr = opp->tfrr_reset; + /* Initialise IRQ sources */ + for (i = 0; i < opp->max_irq; i++) { + opp->src[i].ivpr = opp->ivpr_reset; + switch (opp->src[i].type) { + case IRQ_TYPE_NORMAL: + opp->src[i].level = !!(opp->ivpr_reset & IVPR_SENSE_MASK); + break; + + case IRQ_TYPE_FSLINT: + opp->src[i].ivpr |= IVPR_POLARITY_MASK; + break; + + case IRQ_TYPE_FSLSPECIAL: + break; + } + + /* Mask all IPI interrupts for Freescale OpenPIC */ + if ((opp->model == OPENPIC_MODEL_FSL_MPIC_20) || + (opp->model == OPENPIC_MODEL_FSL_MPIC_42)) { + if (i >= opp->irq_ipi0 && i < opp->irq_tim0) { + write_IRQreg_idr(opp, i, 0); + continue; + } + } + + write_IRQreg_idr(opp, i, opp->idr_reset); + } + /* Initialise IRQ destinations */ + for (i = 0; i < opp->nb_cpus; i++) { + opp->dst[i].ctpr = 15; + opp->dst[i].raised.next = -1; + opp->dst[i].raised.priority = 0; + bitmap_clear(opp->dst[i].raised.queue, 0, IRQQUEUE_SIZE_BITS); + opp->dst[i].servicing.next = -1; + opp->dst[i].servicing.priority = 0; + bitmap_clear(opp->dst[i].servicing.queue, 0, IRQQUEUE_SIZE_BITS); + } + /* Initialise timers */ + for (i = 0; i < OPENPIC_MAX_TMR; i++) { + opp->timers[i].tccr = 0; + opp->timers[i].tbcr = TBCR_CI; + if (opp->timers[i].qemu_timer_active) { + timer_del(opp->timers[i].qemu_timer); /* Inhibit timer */ + opp->timers[i].qemu_timer_active = false; + } + } + /* Go out of RESET state */ + opp->gcr = 0; +} + +typedef struct MemReg { + const char *name; + MemoryRegionOps const *ops; + hwaddr start_addr; + ram_addr_t size; +} MemReg; + +static void fsl_common_init(OpenPICState *opp) +{ + int i; + int virq = OPENPIC_MAX_SRC; + + opp->vid = VID_REVISION_1_2; + opp->vir = VIR_GENERIC; + opp->vector_mask = 0xFFFF; + opp->tfrr_reset = 0; + opp->ivpr_reset = IVPR_MASK_MASK; + opp->idr_reset = 1 << 0; + opp->max_irq = OPENPIC_MAX_IRQ; + + opp->irq_ipi0 = virq; + virq += OPENPIC_MAX_IPI; + opp->irq_tim0 = virq; + virq += OPENPIC_MAX_TMR; + + assert(virq <= OPENPIC_MAX_IRQ); + + opp->irq_msi = 224; + + msi_nonbroken = true; + for (i = 0; i < opp->fsl->max_ext; i++) { + opp->src[i].level = false; + } + + /* Internal interrupts, including message and MSI */ + for (i = 16; i < OPENPIC_MAX_SRC; i++) { + opp->src[i].type = IRQ_TYPE_FSLINT; + opp->src[i].level = true; + } + + /* timers and IPIs */ + for (i = OPENPIC_MAX_SRC; i < virq; i++) { + opp->src[i].type = IRQ_TYPE_FSLSPECIAL; + opp->src[i].level = false; + } + + for (i = 0; i < OPENPIC_MAX_TMR; i++) { + opp->timers[i].n_IRQ = opp->irq_tim0 + i; + opp->timers[i].qemu_timer_active = false; + opp->timers[i].qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + &qemu_timer_cb, + &opp->timers[i]); + opp->timers[i].opp = opp; + } +} + +static void map_list(OpenPICState *opp, const MemReg *list, int *count) +{ + while (list->name) { + assert(*count < ARRAY_SIZE(opp->sub_io_mem)); + + memory_region_init_io(&opp->sub_io_mem[*count], OBJECT(opp), list->ops, + opp, list->name, list->size); + + memory_region_add_subregion(&opp->mem, list->start_addr, + &opp->sub_io_mem[*count]); + + (*count)++; + list++; + } +} + +static const VMStateDescription vmstate_openpic_irq_queue = { + .name = "openpic_irq_queue", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_BITMAP(queue, IRQQueue, 0, queue_size), + VMSTATE_INT32(next, IRQQueue), + VMSTATE_INT32(priority, IRQQueue), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_openpic_irqdest = { + .name = "openpic_irqdest", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_INT32(ctpr, IRQDest), + VMSTATE_STRUCT(raised, IRQDest, 0, vmstate_openpic_irq_queue, + IRQQueue), + VMSTATE_STRUCT(servicing, IRQDest, 0, vmstate_openpic_irq_queue, + IRQQueue), + VMSTATE_UINT32_ARRAY(outputs_active, IRQDest, OPENPIC_OUTPUT_NB), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_openpic_irqsource = { + .name = "openpic_irqsource", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ivpr, IRQSource), + VMSTATE_UINT32(idr, IRQSource), + VMSTATE_UINT32(destmask, IRQSource), + VMSTATE_INT32(last_cpu, IRQSource), + VMSTATE_INT32(pending, IRQSource), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_openpic_timer = { + .name = "openpic_timer", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(tccr, OpenPICTimer), + VMSTATE_UINT32(tbcr, OpenPICTimer), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_openpic_msi = { + .name = "openpic_msi", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(msir, OpenPICMSI), + VMSTATE_END_OF_LIST() + } +}; + +static int openpic_post_load(void *opaque, int version_id) +{ + OpenPICState *opp = (OpenPICState *)opaque; + int i; + + /* Update internal ivpr and idr variables */ + for (i = 0; i < opp->max_irq; i++) { + write_IRQreg_idr(opp, i, opp->src[i].idr); + write_IRQreg_ivpr(opp, i, opp->src[i].ivpr); + } + + return 0; +} + +static const VMStateDescription vmstate_openpic = { + .name = "openpic", + .version_id = 3, + .minimum_version_id = 3, + .post_load = openpic_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(gcr, OpenPICState), + VMSTATE_UINT32(vir, OpenPICState), + VMSTATE_UINT32(pir, OpenPICState), + VMSTATE_UINT32(spve, OpenPICState), + VMSTATE_UINT32(tfrr, OpenPICState), + VMSTATE_UINT32(max_irq, OpenPICState), + VMSTATE_STRUCT_VARRAY_UINT32(src, OpenPICState, max_irq, 0, + vmstate_openpic_irqsource, IRQSource), + VMSTATE_UINT32_EQUAL(nb_cpus, OpenPICState, NULL), + VMSTATE_STRUCT_VARRAY_UINT32(dst, OpenPICState, nb_cpus, 0, + vmstate_openpic_irqdest, IRQDest), + VMSTATE_STRUCT_ARRAY(timers, OpenPICState, OPENPIC_MAX_TMR, 0, + vmstate_openpic_timer, OpenPICTimer), + VMSTATE_STRUCT_ARRAY(msi, OpenPICState, MAX_MSI, 0, + vmstate_openpic_msi, OpenPICMSI), + VMSTATE_UINT32(irq_ipi0, OpenPICState), + VMSTATE_UINT32(irq_tim0, OpenPICState), + VMSTATE_UINT32(irq_msi, OpenPICState), + VMSTATE_END_OF_LIST() + } +}; + +static void openpic_init(Object *obj) +{ + OpenPICState *opp = OPENPIC(obj); + + memory_region_init(&opp->mem, obj, "openpic", 0x40000); +} + +static void openpic_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *d = SYS_BUS_DEVICE(dev); + OpenPICState *opp = OPENPIC(dev); + int i, j; + int list_count = 0; + static const MemReg list_le[] = { + {"glb", &openpic_glb_ops_le, + OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE}, + {"tmr", &openpic_tmr_ops_le, + OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE}, + {"src", &openpic_src_ops_le, + OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE}, + {"cpu", &openpic_cpu_ops_le, + OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE}, + {NULL} + }; + static const MemReg list_be[] = { + {"glb", &openpic_glb_ops_be, + OPENPIC_GLB_REG_START, OPENPIC_GLB_REG_SIZE}, + {"tmr", &openpic_tmr_ops_be, + OPENPIC_TMR_REG_START, OPENPIC_TMR_REG_SIZE}, + {"src", &openpic_src_ops_be, + OPENPIC_SRC_REG_START, OPENPIC_SRC_REG_SIZE}, + {"cpu", &openpic_cpu_ops_be, + OPENPIC_CPU_REG_START, OPENPIC_CPU_REG_SIZE}, + {NULL} + }; + static const MemReg list_fsl[] = { + {"msi", &openpic_msi_ops_be, + OPENPIC_MSI_REG_START, OPENPIC_MSI_REG_SIZE}, + {"summary", &openpic_summary_ops_be, + OPENPIC_SUMMARY_REG_START, OPENPIC_SUMMARY_REG_SIZE}, + {NULL} + }; + + if (opp->nb_cpus > MAX_CPU) { + error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, + TYPE_OPENPIC, "nb_cpus", (uint64_t)opp->nb_cpus, + (uint64_t)0, (uint64_t)MAX_CPU); + return; + } + + switch (opp->model) { + case OPENPIC_MODEL_FSL_MPIC_20: + default: + opp->fsl = &fsl_mpic_20; + opp->brr1 = 0x00400200; + opp->flags |= OPENPIC_FLAG_IDR_CRIT; + opp->nb_irqs = 80; + opp->mpic_mode_mask = GCR_MODE_MIXED; + + fsl_common_init(opp); + map_list(opp, list_be, &list_count); + map_list(opp, list_fsl, &list_count); + + break; + + case OPENPIC_MODEL_FSL_MPIC_42: + opp->fsl = &fsl_mpic_42; + opp->brr1 = 0x00400402; + opp->flags |= OPENPIC_FLAG_ILR; + opp->nb_irqs = 196; + opp->mpic_mode_mask = GCR_MODE_PROXY; + + fsl_common_init(opp); + map_list(opp, list_be, &list_count); + map_list(opp, list_fsl, &list_count); + + break; + + case OPENPIC_MODEL_KEYLARGO: + opp->nb_irqs = KEYLARGO_MAX_EXT; + opp->vid = VID_REVISION_1_2; + opp->vir = VIR_GENERIC; + opp->vector_mask = 0xFF; + opp->tfrr_reset = 4160000; + opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK; + opp->idr_reset = 0; + opp->max_irq = KEYLARGO_MAX_IRQ; + opp->irq_ipi0 = KEYLARGO_IPI_IRQ; + opp->irq_tim0 = KEYLARGO_TMR_IRQ; + opp->brr1 = -1; + opp->mpic_mode_mask = GCR_MODE_MIXED; + + if (opp->nb_cpus != 1) { + error_setg(errp, "Only UP supported today"); + return; + } + + map_list(opp, list_le, &list_count); + break; + } + + for (i = 0; i < opp->nb_cpus; i++) { + opp->dst[i].irqs = g_new0(qemu_irq, OPENPIC_OUTPUT_NB); + for (j = 0; j < OPENPIC_OUTPUT_NB; j++) { + sysbus_init_irq(d, &opp->dst[i].irqs[j]); + } + + opp->dst[i].raised.queue_size = IRQQUEUE_SIZE_BITS; + opp->dst[i].raised.queue = bitmap_new(IRQQUEUE_SIZE_BITS); + opp->dst[i].servicing.queue_size = IRQQUEUE_SIZE_BITS; + opp->dst[i].servicing.queue = bitmap_new(IRQQUEUE_SIZE_BITS); + } + + sysbus_init_mmio(d, &opp->mem); + qdev_init_gpio_in(dev, openpic_set_irq, opp->max_irq); +} + +static Property openpic_properties[] = { + DEFINE_PROP_UINT32("model", OpenPICState, model, OPENPIC_MODEL_FSL_MPIC_20), + DEFINE_PROP_UINT32("nb_cpus", OpenPICState, nb_cpus, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void openpic_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = openpic_realize; + device_class_set_props(dc, openpic_properties); + dc->reset = openpic_reset; + dc->vmsd = &vmstate_openpic; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo openpic_info = { + .name = TYPE_OPENPIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(OpenPICState), + .instance_init = openpic_init, + .class_init = openpic_class_init, +}; + +static void openpic_register_types(void) +{ + type_register_static(&openpic_info); +} + +type_init(openpic_register_types) diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c new file mode 100644 index 000000000..557dd0c2b --- /dev/null +++ b/hw/intc/openpic_kvm.c @@ -0,0 +1,294 @@ +/* + * KVM in-kernel OpenPIC + * + * Copyright 2013 Freescale Semiconductor, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include +#include "hw/ppc/openpic.h" +#include "hw/ppc/openpic_kvm.h" +#include "hw/pci/msi.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "sysemu/kvm.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define GCR_RESET 0x80000000 + +OBJECT_DECLARE_SIMPLE_TYPE(KVMOpenPICState, KVM_OPENPIC) + +struct KVMOpenPICState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion mem; + MemoryListener mem_listener; + uint32_t fd; + uint32_t model; + hwaddr mapped; +}; + +static void kvm_openpic_set_irq(void *opaque, int n_IRQ, int level) +{ + kvm_set_irq(kvm_state, n_IRQ, level); +} + +static void kvm_openpic_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + KVMOpenPICState *opp = opaque; + struct kvm_device_attr attr; + uint32_t val32 = val; + int ret; + + attr.group = KVM_DEV_MPIC_GRP_REGISTER; + attr.attr = addr; + attr.addr = (uint64_t)(unsigned long)&val32; + + ret = ioctl(opp->fd, KVM_SET_DEVICE_ATTR, &attr); + if (ret < 0) { + qemu_log_mask(LOG_UNIMP, "%s: %s %" PRIx64 "\n", __func__, + strerror(errno), attr.attr); + } +} + +static void kvm_openpic_reset(DeviceState *d) +{ + KVMOpenPICState *opp = KVM_OPENPIC(d); + + /* Trigger the GCR.RESET bit to reset the PIC */ + kvm_openpic_write(opp, 0x1020, GCR_RESET, sizeof(uint32_t)); +} + +static uint64_t kvm_openpic_read(void *opaque, hwaddr addr, unsigned size) +{ + KVMOpenPICState *opp = opaque; + struct kvm_device_attr attr; + uint32_t val = 0xdeadbeef; + int ret; + + attr.group = KVM_DEV_MPIC_GRP_REGISTER; + attr.attr = addr; + attr.addr = (uint64_t)(unsigned long)&val; + + ret = ioctl(opp->fd, KVM_GET_DEVICE_ATTR, &attr); + if (ret < 0) { + qemu_log_mask(LOG_UNIMP, "%s: %s %" PRIx64 "\n", __func__, + strerror(errno), attr.attr); + return 0; + } + + return val; +} + +static const MemoryRegionOps kvm_openpic_mem_ops = { + .write = kvm_openpic_write, + .read = kvm_openpic_read, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void kvm_openpic_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + KVMOpenPICState *opp = container_of(listener, KVMOpenPICState, + mem_listener); + struct kvm_device_attr attr; + uint64_t reg_base; + int ret; + + /* Ignore events on regions that are not us */ + if (section->mr != &opp->mem) { + return; + } + + if (opp->mapped) { + /* + * We can only map the MPIC once. Since we are already mapped, + * the best we can do is ignore new maps. + */ + return; + } + + reg_base = section->offset_within_address_space; + opp->mapped = reg_base; + + attr.group = KVM_DEV_MPIC_GRP_MISC; + attr.attr = KVM_DEV_MPIC_BASE_ADDR; + attr.addr = (uint64_t)(unsigned long)®_base; + + ret = ioctl(opp->fd, KVM_SET_DEVICE_ATTR, &attr); + if (ret < 0) { + fprintf(stderr, "%s: %s %" PRIx64 "\n", __func__, + strerror(errno), reg_base); + } +} + +static void kvm_openpic_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + KVMOpenPICState *opp = container_of(listener, KVMOpenPICState, + mem_listener); + struct kvm_device_attr attr; + uint64_t reg_base = 0; + int ret; + + /* Ignore events on regions that are not us */ + if (section->mr != &opp->mem) { + return; + } + + if (section->offset_within_address_space != opp->mapped) { + /* + * We can only map the MPIC once. This mapping was a secondary + * one that we couldn't fulfill. Ignore it. + */ + return; + } + opp->mapped = 0; + + attr.group = KVM_DEV_MPIC_GRP_MISC; + attr.attr = KVM_DEV_MPIC_BASE_ADDR; + attr.addr = (uint64_t)(unsigned long)®_base; + + ret = ioctl(opp->fd, KVM_SET_DEVICE_ATTR, &attr); + if (ret < 0) { + fprintf(stderr, "%s: %s %" PRIx64 "\n", __func__, + strerror(errno), reg_base); + } +} + +static void kvm_openpic_init(Object *obj) +{ + KVMOpenPICState *opp = KVM_OPENPIC(obj); + + memory_region_init_io(&opp->mem, OBJECT(opp), &kvm_openpic_mem_ops, opp, + "kvm-openpic", 0x40000); +} + +static void kvm_openpic_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *d = SYS_BUS_DEVICE(dev); + KVMOpenPICState *opp = KVM_OPENPIC(dev); + KVMState *s = kvm_state; + int kvm_openpic_model; + struct kvm_create_device cd = {0}; + int ret, i; + + if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) { + error_setg(errp, "Kernel is lacking Device Control API"); + return; + } + + switch (opp->model) { + case OPENPIC_MODEL_FSL_MPIC_20: + kvm_openpic_model = KVM_DEV_TYPE_FSL_MPIC_20; + break; + + case OPENPIC_MODEL_FSL_MPIC_42: + kvm_openpic_model = KVM_DEV_TYPE_FSL_MPIC_42; + break; + + default: + error_setg(errp, "Unsupported OpenPIC model %" PRIu32, opp->model); + return; + } + + cd.type = kvm_openpic_model; + ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &cd); + if (ret < 0) { + error_setg(errp, "Can't create device %d: %s", + cd.type, strerror(errno)); + return; + } + opp->fd = cd.fd; + + sysbus_init_mmio(d, &opp->mem); + qdev_init_gpio_in(dev, kvm_openpic_set_irq, OPENPIC_MAX_IRQ); + + opp->mem_listener.region_add = kvm_openpic_region_add; + opp->mem_listener.region_del = kvm_openpic_region_del; + opp->mem_listener.name = "openpic-kvm"; + memory_listener_register(&opp->mem_listener, &address_space_memory); + + /* indicate pic capabilities */ + msi_nonbroken = true; + kvm_kernel_irqchip = true; + kvm_async_interrupts_allowed = true; + + /* set up irq routing */ + kvm_init_irq_routing(kvm_state); + for (i = 0; i < 256; ++i) { + kvm_irqchip_add_irq_route(kvm_state, i, 0, i); + } + + kvm_msi_via_irqfd_allowed = true; + kvm_gsi_routing_allowed = true; + + kvm_irqchip_commit_routes(s); +} + +int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs) +{ + KVMOpenPICState *opp = KVM_OPENPIC(d); + + return kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_MPIC, 0, opp->fd, + kvm_arch_vcpu_id(cs)); +} + +static Property kvm_openpic_properties[] = { + DEFINE_PROP_UINT32("model", KVMOpenPICState, model, + OPENPIC_MODEL_FSL_MPIC_20), + DEFINE_PROP_END_OF_LIST(), +}; + +static void kvm_openpic_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = kvm_openpic_realize; + device_class_set_props(dc, kvm_openpic_properties); + dc->reset = kvm_openpic_reset; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo kvm_openpic_info = { + .name = TYPE_KVM_OPENPIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(KVMOpenPICState), + .instance_init = kvm_openpic_init, + .class_init = kvm_openpic_class_init, +}; + +static void kvm_openpic_register_types(void) +{ + type_register_static(&kvm_openpic_info); +} + +type_init(kvm_openpic_register_types) diff --git a/hw/intc/pl190.c b/hw/intc/pl190.c new file mode 100644 index 000000000..cd8844360 --- /dev/null +++ b/hw/intc/pl190.c @@ -0,0 +1,297 @@ +/* + * Arm PrimeCell PL190 Vector Interrupt Controller + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* The number of virtual priority levels. 16 user vectors plus the + unvectored IRQ. Chained interrupts would require an additional level + if implemented. */ + +#define PL190_NUM_PRIO 17 + +#define TYPE_PL190 "pl190" +OBJECT_DECLARE_SIMPLE_TYPE(PL190State, PL190) + +struct PL190State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t level; + uint32_t soft_level; + uint32_t irq_enable; + uint32_t fiq_select; + uint8_t vect_control[16]; + uint32_t vect_addr[PL190_NUM_PRIO]; + /* Mask containing interrupts with higher priority than this one. */ + uint32_t prio_mask[PL190_NUM_PRIO + 1]; + int protected; + /* Current priority level. */ + int priority; + int prev_prio[PL190_NUM_PRIO]; + qemu_irq irq; + qemu_irq fiq; +}; + +static const unsigned char pl190_id[] = +{ 0x90, 0x11, 0x04, 0x00, 0x0D, 0xf0, 0x05, 0xb1 }; + +static inline uint32_t pl190_irq_level(PL190State *s) +{ + return (s->level | s->soft_level) & s->irq_enable & ~s->fiq_select; +} + +/* Update interrupts. */ +static void pl190_update(PL190State *s) +{ + uint32_t level = pl190_irq_level(s); + int set; + + set = (level & s->prio_mask[s->priority]) != 0; + qemu_set_irq(s->irq, set); + set = ((s->level | s->soft_level) & s->fiq_select) != 0; + qemu_set_irq(s->fiq, set); +} + +static void pl190_set_irq(void *opaque, int irq, int level) +{ + PL190State *s = (PL190State *)opaque; + + if (level) + s->level |= 1u << irq; + else + s->level &= ~(1u << irq); + pl190_update(s); +} + +static void pl190_update_vectors(PL190State *s) +{ + uint32_t mask; + int i; + int n; + + mask = 0; + for (i = 0; i < 16; i++) + { + s->prio_mask[i] = mask; + if (s->vect_control[i] & 0x20) + { + n = s->vect_control[i] & 0x1f; + mask |= 1 << n; + } + } + s->prio_mask[16] = mask; + pl190_update(s); +} + +static uint64_t pl190_read(void *opaque, hwaddr offset, + unsigned size) +{ + PL190State *s = (PL190State *)opaque; + int i; + + if (offset >= 0xfe0 && offset < 0x1000) { + return pl190_id[(offset - 0xfe0) >> 2]; + } + if (offset >= 0x100 && offset < 0x140) { + return s->vect_addr[(offset - 0x100) >> 2]; + } + if (offset >= 0x200 && offset < 0x240) { + return s->vect_control[(offset - 0x200) >> 2]; + } + switch (offset >> 2) { + case 0: /* IRQSTATUS */ + return pl190_irq_level(s); + case 1: /* FIQSATUS */ + return (s->level | s->soft_level) & s->fiq_select; + case 2: /* RAWINTR */ + return s->level | s->soft_level; + case 3: /* INTSELECT */ + return s->fiq_select; + case 4: /* INTENABLE */ + return s->irq_enable; + case 6: /* SOFTINT */ + return s->soft_level; + case 8: /* PROTECTION */ + return s->protected; + case 12: /* VECTADDR */ + /* Read vector address at the start of an ISR. Increases the + * current priority level to that of the current interrupt. + * + * Since an enabled interrupt X at priority P causes prio_mask[Y] + * to have bit X set for all Y > P, this loop will stop with + * i == the priority of the highest priority set interrupt. + */ + for (i = 0; i < s->priority; i++) { + if ((s->level | s->soft_level) & s->prio_mask[i + 1]) { + break; + } + } + + /* Reading this value with no pending interrupts is undefined. + We return the default address. */ + if (i == PL190_NUM_PRIO) + return s->vect_addr[16]; + if (i < s->priority) + { + s->prev_prio[i] = s->priority; + s->priority = i; + pl190_update(s); + } + return s->vect_addr[s->priority]; + case 13: /* DEFVECTADDR */ + return s->vect_addr[16]; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl190_read: Bad offset %x\n", (int)offset); + return 0; + } +} + +static void pl190_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PL190State *s = (PL190State *)opaque; + + if (offset >= 0x100 && offset < 0x140) { + s->vect_addr[(offset - 0x100) >> 2] = val; + pl190_update_vectors(s); + return; + } + if (offset >= 0x200 && offset < 0x240) { + s->vect_control[(offset - 0x200) >> 2] = val; + pl190_update_vectors(s); + return; + } + switch (offset >> 2) { + case 0: /* SELECT */ + /* This is a readonly register, but linux tries to write to it + anyway. Ignore the write. */ + break; + case 3: /* INTSELECT */ + s->fiq_select = val; + break; + case 4: /* INTENABLE */ + s->irq_enable |= val; + break; + case 5: /* INTENCLEAR */ + s->irq_enable &= ~val; + break; + case 6: /* SOFTINT */ + s->soft_level |= val; + break; + case 7: /* SOFTINTCLEAR */ + s->soft_level &= ~val; + break; + case 8: /* PROTECTION */ + /* TODO: Protection (supervisor only access) is not implemented. */ + s->protected = val & 1; + break; + case 12: /* VECTADDR */ + /* Restore the previous priority level. The value written is + ignored. */ + if (s->priority < PL190_NUM_PRIO) + s->priority = s->prev_prio[s->priority]; + break; + case 13: /* DEFVECTADDR */ + s->vect_addr[16] = val; + break; + case 0xc0: /* ITCR */ + if (val) { + qemu_log_mask(LOG_UNIMP, "pl190: Test mode not implemented\n"); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl190_write: Bad offset %x\n", (int)offset); + return; + } + pl190_update(s); +} + +static const MemoryRegionOps pl190_ops = { + .read = pl190_read, + .write = pl190_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pl190_reset(DeviceState *d) +{ + PL190State *s = PL190(d); + int i; + + for (i = 0; i < 16; i++) { + s->vect_addr[i] = 0; + s->vect_control[i] = 0; + } + s->vect_addr[16] = 0; + s->prio_mask[17] = 0xffffffff; + s->priority = PL190_NUM_PRIO; + pl190_update_vectors(s); +} + +static void pl190_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + PL190State *s = PL190(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &pl190_ops, s, "pl190", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + qdev_init_gpio_in(dev, pl190_set_irq, 32); + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->fiq); +} + +static const VMStateDescription vmstate_pl190 = { + .name = "pl190", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(level, PL190State), + VMSTATE_UINT32(soft_level, PL190State), + VMSTATE_UINT32(irq_enable, PL190State), + VMSTATE_UINT32(fiq_select, PL190State), + VMSTATE_UINT8_ARRAY(vect_control, PL190State, 16), + VMSTATE_UINT32_ARRAY(vect_addr, PL190State, PL190_NUM_PRIO), + VMSTATE_UINT32_ARRAY(prio_mask, PL190State, PL190_NUM_PRIO+1), + VMSTATE_INT32(protected, PL190State), + VMSTATE_INT32(priority, PL190State), + VMSTATE_INT32_ARRAY(prev_prio, PL190State, PL190_NUM_PRIO), + VMSTATE_END_OF_LIST() + } +}; + +static void pl190_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = pl190_reset; + dc->vmsd = &vmstate_pl190; +} + +static const TypeInfo pl190_info = { + .name = TYPE_PL190, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL190State), + .instance_init = pl190_init, + .class_init = pl190_class_init, +}; + +static void pl190_register_types(void) +{ + type_register_static(&pl190_info); +} + +type_init(pl190_register_types) diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c new file mode 100644 index 000000000..ad4348361 --- /dev/null +++ b/hw/intc/pnv_xive.c @@ -0,0 +1,1987 @@ +/* + * QEMU PowerPC XIVE interrupt controller model + * + * Copyright (c) 2017-2019, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "sysemu/dma.h" +#include "sysemu/reset.h" +#include "monitor/monitor.h" +#include "hw/ppc/fdt.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_core.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/ppc/pnv_xive.h" +#include "hw/ppc/xive_regs.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/ppc.h" +#include "trace.h" + +#include + +#include "pnv_xive_regs.h" + +#undef XIVE_DEBUG + +/* + * Virtual structures table (VST) + */ +#define SBE_PER_BYTE 4 + +typedef struct XiveVstInfo { + const char *name; + uint32_t size; + uint32_t max_blocks; +} XiveVstInfo; + +static const XiveVstInfo vst_infos[] = { + [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 }, + [VST_TSEL_SBE] = { "SBE", 1, 16 }, + [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 }, + [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 }, + + /* + * Interrupt fifo backing store table (not modeled) : + * + * 0 - IPI, + * 1 - HWD, + * 2 - First escalate, + * 3 - Second escalate, + * 4 - Redistribution, + * 5 - IPI cascaded queue ? + */ + [VST_TSEL_IRQ] = { "IRQ", 1, 6 }, +}; + +#define xive_error(xive, fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ + (xive)->chip->chip_id, ## __VA_ARGS__); + +/* + * QEMU version of the GETFIELD/SETFIELD macros + * + * TODO: It might be better to use the existing extract64() and + * deposit64() but this means that all the register definitions will + * change and become incompatible with the ones found in skiboot. + * + * Keep it as it is for now until we find a common ground. + */ +static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) +{ + return (word & mask) >> ctz64(mask); +} + +static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, + uint64_t value) +{ + return (word & ~mask) | ((value << ctz64(mask)) & mask); +} + +/* + * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID + * field overrides the hardwired chip ID in the Powerbus operations + * and for CAM compares + */ +static uint8_t pnv_xive_block_id(PnvXive *xive) +{ + uint8_t blk = xive->chip->chip_id; + uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3]; + + if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) { + blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val); + } + + return blk; +} + +/* + * Remote access to controllers. HW uses MMIOs. For now, a simple scan + * of the chips is good enough. + * + * TODO: Block scope support + */ +static PnvXive *pnv_xive_get_remote(uint8_t blk) +{ + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); + int i; + + for (i = 0; i < pnv->num_chips; i++) { + Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); + PnvXive *xive = &chip9->xive; + + if (pnv_xive_block_id(xive) == blk) { + return xive; + } + } + return NULL; +} + +/* + * VST accessors for SBE, EAT, ENDT, NVT + * + * Indirect VST tables are arrays of VSDs pointing to a page (of same + * size). Each page is a direct VST table. + */ + +#define XIVE_VSD_SIZE 8 + +/* Indirect page size can be 4K, 64K, 2M, 16M. */ +static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) +{ + return page_shift == 12 || page_shift == 16 || + page_shift == 21 || page_shift == 24; +} + +static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, + uint64_t vsd, uint32_t idx) +{ + const XiveVstInfo *info = &vst_infos[type]; + uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; + uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); + uint32_t idx_max; + + idx_max = vst_tsize / info->size - 1; + if (idx > idx_max) { +#ifdef XIVE_DEBUG + xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?", + info->name, idx, idx_max); +#endif + return 0; + } + + return vst_addr + idx * info->size; +} + +static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, + uint64_t vsd, uint32_t idx) +{ + const XiveVstInfo *info = &vst_infos[type]; + uint64_t vsd_addr; + uint32_t vsd_idx; + uint32_t page_shift; + uint32_t vst_per_page; + + /* Get the page size of the indirect table. */ + vsd_addr = vsd & VSD_ADDRESS_MASK; + vsd = ldq_be_dma(&address_space_memory, vsd_addr); + + if (!(vsd & VSD_ADDRESS_MASK)) { +#ifdef XIVE_DEBUG + xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); +#endif + return 0; + } + + page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; + + if (!pnv_xive_vst_page_size_allowed(page_shift)) { + xive_error(xive, "VST: invalid %s page shift %d", info->name, + page_shift); + return 0; + } + + vst_per_page = (1ull << page_shift) / info->size; + vsd_idx = idx / vst_per_page; + + /* Load the VSD we are looking for, if not already done */ + if (vsd_idx) { + vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; + vsd = ldq_be_dma(&address_space_memory, vsd_addr); + + if (!(vsd & VSD_ADDRESS_MASK)) { +#ifdef XIVE_DEBUG + xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); +#endif + return 0; + } + + /* + * Check that the pages have a consistent size across the + * indirect table + */ + if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { + xive_error(xive, "VST: %s entry %x indirect page size differ !?", + info->name, idx); + return 0; + } + } + + return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); +} + +static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, + uint32_t idx) +{ + const XiveVstInfo *info = &vst_infos[type]; + uint64_t vsd; + + if (blk >= info->max_blocks) { + xive_error(xive, "VST: invalid block id %d for VST %s %d !?", + blk, info->name, idx); + return 0; + } + + vsd = xive->vsds[type][blk]; + + /* Remote VST access */ + if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { + xive = pnv_xive_get_remote(blk); + + return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; + } + + if (VSD_INDIRECT & vsd) { + return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); + } + + return pnv_xive_vst_addr_direct(xive, type, vsd, idx); +} + +static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, + uint32_t idx, void *data) +{ + const XiveVstInfo *info = &vst_infos[type]; + uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); + + if (!addr) { + return -1; + } + + cpu_physical_memory_read(addr, data, info->size); + return 0; +} + +#define XIVE_VST_WORD_ALL -1 + +static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, + uint32_t idx, void *data, uint32_t word_number) +{ + const XiveVstInfo *info = &vst_infos[type]; + uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); + + if (!addr) { + return -1; + } + + if (word_number == XIVE_VST_WORD_ALL) { + cpu_physical_memory_write(addr, data, info->size); + } else { + cpu_physical_memory_write(addr + word_number * 4, + data + word_number * 4, 4); + } + return 0; +} + +static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, + XiveEND *end) +{ + return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); +} + +static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, + XiveEND *end, uint8_t word_number) +{ + return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, + word_number); +} + +static int pnv_xive_end_update(PnvXive *xive) +{ + uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); + uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); + int i; + uint64_t eqc_watch[4]; + + for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { + eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); + } + + return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, + XIVE_VST_WORD_ALL); +} + +static void pnv_xive_end_cache_load(PnvXive *xive) +{ + uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); + uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, + xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); + uint64_t eqc_watch[4] = { 0 }; + int i; + + if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) { + xive_error(xive, "VST: no END entry %x/%x !?", blk, idx); + } + + for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { + xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]); + } +} + +static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, + XiveNVT *nvt) +{ + return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); +} + +static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, + XiveNVT *nvt, uint8_t word_number) +{ + return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, + word_number); +} + +static int pnv_xive_nvt_update(PnvXive *xive) +{ + uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); + uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); + int i; + uint64_t vpc_watch[8]; + + for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { + vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); + } + + return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, + XIVE_VST_WORD_ALL); +} + +static void pnv_xive_nvt_cache_load(PnvXive *xive) +{ + uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); + uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, + xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); + uint64_t vpc_watch[8] = { 0 }; + int i; + + if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) { + xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx); + } + + for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { + xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]); + } +} + +static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, + XiveEAS *eas) +{ + PnvXive *xive = PNV_XIVE(xrtr); + + /* + * EAT lookups should be local to the IC + */ + if (pnv_xive_block_id(xive) != blk) { + xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx)); + return -1; + } + + return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); +} + +/* + * One bit per thread id. The first register PC_THREAD_EN_REG0 covers + * the first cores 0-15 (normal) of the chip or 0-7 (fused). The + * second register covers cores 16-23 (normal) or 8-11 (fused). + */ +static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) +{ + int pir = ppc_cpu_pir(cpu); + uint32_t fc = PNV9_PIR2FUSEDCORE(pir); + uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1; + uint32_t bit = pir & 0x3f; + + return xive->regs[reg >> 3] & PPC_BIT(bit); +} + +static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) +{ + PnvXive *xive = PNV_XIVE(xptr); + PnvChip *chip = xive->chip; + int count = 0; + int i, j; + + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pc = chip->cores[i]; + CPUCore *cc = CPU_CORE(pc); + + for (j = 0; j < cc->nr_threads; j++) { + PowerPCCPU *cpu = pc->threads[j]; + XiveTCTX *tctx; + int ring; + + if (!pnv_xive_is_cpu_enabled(xive, cpu)) { + continue; + } + + tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); + + /* + * Check the thread context CAM lines and record matches. + */ + ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, + nvt_idx, cam_ignore, logic_serv); + /* + * Save the context and follow on to catch duplicates, that we + * don't support yet. + */ + if (ring != -1) { + if (match->tctx) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " + "thread context NVT %x/%x\n", + nvt_blk, nvt_idx); + return -1; + } + + match->ring = ring; + match->tctx = tctx; + count++; + } + } + } + + return count; +} + +static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr) +{ + return pnv_xive_block_id(PNV_XIVE(xrtr)); +} + +/* + * The TIMA MMIO space is shared among the chips and to identify the + * chip from which the access is being done, we extract the chip id + * from the PIR. + */ +static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu) +{ + int pir = ppc_cpu_pir(cpu); + XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr; + PnvXive *xive = PNV_XIVE(xptr); + + if (!pnv_xive_is_cpu_enabled(xive, cpu)) { + xive_error(xive, "IC: CPU %x is not enabled", pir); + } + return xive; +} + +/* + * The internal sources (IPIs) of the interrupt controller have no + * knowledge of the XIVE chip on which they reside. Encode the block + * id in the source interrupt number before forwarding the source + * event notification to the Router. This is required on a multichip + * system. + */ +static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) +{ + PnvXive *xive = PNV_XIVE(xn); + uint8_t blk = pnv_xive_block_id(xive); + + xive_router_notify(xn, XIVE_EAS(blk, srcno)); +} + +/* + * XIVE helpers + */ + +static uint64_t pnv_xive_vc_size(PnvXive *xive) +{ + return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; +} + +static uint64_t pnv_xive_edt_shift(PnvXive *xive) +{ + return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); +} + +static uint64_t pnv_xive_pc_size(PnvXive *xive) +{ + return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; +} + +static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk) +{ + uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk]; + uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); + + return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE; +} + +/* + * Compute the number of entries per indirect subpage. + */ +static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type) +{ + uint8_t blk = pnv_xive_block_id(xive); + uint64_t vsd = xive->vsds[type][blk]; + const XiveVstInfo *info = &vst_infos[type]; + uint64_t vsd_addr; + uint32_t page_shift; + + /* For direct tables, fake a valid value */ + if (!(VSD_INDIRECT & vsd)) { + return 1; + } + + /* Get the page size of the indirect table. */ + vsd_addr = vsd & VSD_ADDRESS_MASK; + vsd = ldq_be_dma(&address_space_memory, vsd_addr); + + if (!(vsd & VSD_ADDRESS_MASK)) { +#ifdef XIVE_DEBUG + xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx); +#endif + return 0; + } + + page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; + + if (!pnv_xive_vst_page_size_allowed(page_shift)) { + xive_error(xive, "VST: invalid %s page shift %d", info->name, + page_shift); + return 0; + } + + return (1ull << page_shift) / info->size; +} + +/* + * EDT Table + * + * The Virtualization Controller MMIO region containing the IPI ESB + * pages and END ESB pages is sub-divided into "sets" which map + * portions of the VC region to the different ESB pages. It is + * configured at runtime through the EDT "Domain Table" to let the + * firmware decide how to split the VC address space between IPI ESB + * pages and END ESB pages. + */ + +/* + * Computes the overall size of the IPI or the END ESB pages + */ +static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) +{ + uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); + uint64_t size = 0; + int i; + + for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { + uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); + + if (edt_type == type) { + size += edt_size; + } + } + + return size; +} + +/* + * Maps an offset of the VC region in the IPI or END region using the + * layout defined by the EDT "Domaine Table" + */ +static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, + uint64_t type) +{ + int i; + uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); + uint64_t edt_offset = vc_offset; + + for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { + uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); + + if (edt_type != type) { + edt_offset -= edt_size; + } + } + + return edt_offset; +} + +static void pnv_xive_edt_resize(PnvXive *xive) +{ + uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); + uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); + + memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); + memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); + + memory_region_set_size(&xive->end_edt_mmio, end_edt_size); + memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); +} + +/* + * XIVE Table configuration. Only EDT is supported. + */ +static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) +{ + uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; + uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); + uint64_t *xive_table; + uint8_t max_index; + + switch (tsel) { + case CQ_TAR_TSEL_BLK: + max_index = ARRAY_SIZE(xive->blk); + xive_table = xive->blk; + break; + case CQ_TAR_TSEL_MIG: + max_index = ARRAY_SIZE(xive->mig); + xive_table = xive->mig; + break; + case CQ_TAR_TSEL_EDT: + max_index = ARRAY_SIZE(xive->edt); + xive_table = xive->edt; + break; + case CQ_TAR_TSEL_VDT: + max_index = ARRAY_SIZE(xive->vdt); + xive_table = xive->vdt; + break; + default: + xive_error(xive, "IC: invalid table %d", (int) tsel); + return -1; + } + + if (tsel_index >= max_index) { + xive_error(xive, "IC: invalid index %d", (int) tsel_index); + return -1; + } + + xive_table[tsel_index] = val; + + if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { + xive->regs[CQ_TAR >> 3] = + SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); + } + + /* + * EDT configuration is complete. Resize the MMIO windows exposing + * the IPI and the END ESBs in the VC region. + */ + if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { + pnv_xive_edt_resize(xive); + } + + return 0; +} + +/* + * Virtual Structure Tables (VST) configuration + */ +static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, + uint8_t blk, uint64_t vsd) +{ + XiveENDSource *end_xsrc = &xive->end_source; + XiveSource *xsrc = &xive->ipi_source; + const XiveVstInfo *info = &vst_infos[type]; + uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; + uint64_t vst_tsize = 1ull << page_shift; + uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; + + /* Basic checks */ + + if (VSD_INDIRECT & vsd) { + if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { + xive_error(xive, "VST: %s indirect tables are not enabled", + info->name); + return; + } + + if (!pnv_xive_vst_page_size_allowed(page_shift)) { + xive_error(xive, "VST: invalid %s page shift %d", info->name, + page_shift); + return; + } + } + + if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { + xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with" + " page shift %d", info->name, vst_addr, page_shift); + return; + } + + /* Record the table configuration (in SRAM on HW) */ + xive->vsds[type][blk] = vsd; + + /* Now tune the models with the configuration provided by the FW */ + + switch (type) { + case VST_TSEL_IVT: /* Nothing to be done */ + break; + + case VST_TSEL_EQDT: + /* + * Backing store pages for the END. + * + * If the table is direct, we can compute the number of PQ + * entries provisioned by FW (such as skiboot) and resize the + * END ESB window accordingly. + */ + if (!(VSD_INDIRECT & vsd)) { + memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size) + * (1ull << xsrc->esb_shift)); + } + memory_region_add_subregion(&xive->end_edt_mmio, 0, + &end_xsrc->esb_mmio); + break; + + case VST_TSEL_SBE: + /* + * Backing store pages for the source PQ bits. The model does + * not use these PQ bits backed in RAM because the XiveSource + * model has its own. + * + * If the table is direct, we can compute the number of PQ + * entries provisioned by FW (such as skiboot) and resize the + * ESB window accordingly. + */ + if (!(VSD_INDIRECT & vsd)) { + memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE + * (1ull << xsrc->esb_shift)); + } + memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); + break; + + case VST_TSEL_VPDT: /* Not modeled */ + case VST_TSEL_IRQ: /* Not modeled */ + /* + * These tables contains the backing store pages for the + * interrupt fifos of the VC sub-engine in case of overflow. + */ + break; + + default: + g_assert_not_reached(); + } +} + +/* + * Both PC and VC sub-engines are configured as each use the Virtual + * Structure Tables : SBE, EAS, END and NVT. + */ +static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) +{ + uint8_t mode = GETFIELD(VSD_MODE, vsd); + uint8_t type = GETFIELD(VST_TABLE_SELECT, + xive->regs[VC_VSD_TABLE_ADDR >> 3]); + uint8_t blk = GETFIELD(VST_TABLE_BLOCK, + xive->regs[VC_VSD_TABLE_ADDR >> 3]); + uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; + + if (type > VST_TSEL_IRQ) { + xive_error(xive, "VST: invalid table type %d", type); + return; + } + + if (blk >= vst_infos[type].max_blocks) { + xive_error(xive, "VST: invalid block id %d for" + " %s table", blk, vst_infos[type].name); + return; + } + + /* + * Only take the VC sub-engine configuration into account because + * the XiveRouter model combines both VC and PC sub-engines + */ + if (pc_engine) { + return; + } + + if (!vst_addr) { + xive_error(xive, "VST: invalid %s table address", vst_infos[type].name); + return; + } + + switch (mode) { + case VSD_MODE_FORWARD: + xive->vsds[type][blk] = vsd; + break; + + case VSD_MODE_EXCLUSIVE: + pnv_xive_vst_set_exclusive(xive, type, blk, vsd); + break; + + default: + xive_error(xive, "VST: unsupported table mode %d", mode); + return; + } +} + +/* + * Interrupt controller MMIO region. The layout is compatible between + * 4K and 64K pages : + * + * Page 0 sub-engine BARs + * 0x000 - 0x3FF IC registers + * 0x400 - 0x7FF PC registers + * 0x800 - 0xFFF VC registers + * + * Page 1 Notify page (writes only) + * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) + * 0x800 - 0xFFF forwards and syncs + * + * Page 2 LSI Trigger page (writes only) (not modeled) + * Page 3 LSI SB EOI page (reads only) (not modeled) + * + * Page 4-7 indirect TIMA + */ + +/* + * IC - registers MMIO + */ +static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive *xive = PNV_XIVE(opaque); + MemoryRegion *sysmem = get_system_memory(); + uint32_t reg = offset >> 3; + bool is_chip0 = xive->chip->chip_id == 0; + + switch (offset) { + + /* + * XIVE CQ (PowerBus bridge) settings + */ + case CQ_MSGSND: /* msgsnd for doorbells */ + case CQ_FIRMASK_OR: /* FIR error reporting */ + break; + case CQ_PBI_CTL: + if (val & CQ_PBI_PC_64K) { + xive->pc_shift = 16; + } + if (val & CQ_PBI_VC_64K) { + xive->vc_shift = 16; + } + break; + case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ + /* + * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode + */ + break; + + /* + * XIVE Virtualization Controller settings + */ + case VC_GLOBAL_CONFIG: + break; + + /* + * XIVE Presenter Controller settings + */ + case PC_GLOBAL_CONFIG: + /* + * PC_GCONF_CHIPID_OVR + * Overrides Int command Chip ID with the Chip ID field (DEBUG) + */ + break; + case PC_TCTXT_CFG: + /* + * TODO: block group support + */ + break; + case PC_TCTXT_TRACK: + /* + * PC_TCTXT_TRACK_EN: + * enable block tracking and exchange of block ownership + * information between Interrupt controllers + */ + break; + + /* + * Misc settings + */ + case VC_SBC_CONFIG: /* Store EOI configuration */ + /* + * Configure store EOI if required by firwmare (skiboot has removed + * support recently though) + */ + if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { + xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI; + } + break; + + case VC_EQC_CONFIG: /* TODO: silent escalation */ + case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ + break; + + /* + * XIVE BAR settings (XSCOM only) + */ + case CQ_RST_CTL: + /* bit4: resets all BAR registers */ + break; + + case CQ_IC_BAR: /* IC BAR. 8 pages */ + xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; + if (!(val & CQ_IC_BAR_VALID)) { + xive->ic_base = 0; + if (xive->regs[reg] & CQ_IC_BAR_VALID) { + memory_region_del_subregion(&xive->ic_mmio, + &xive->ic_reg_mmio); + memory_region_del_subregion(&xive->ic_mmio, + &xive->ic_notify_mmio); + memory_region_del_subregion(&xive->ic_mmio, + &xive->ic_lsi_mmio); + memory_region_del_subregion(&xive->ic_mmio, + &xive->tm_indirect_mmio); + + memory_region_del_subregion(sysmem, &xive->ic_mmio); + } + } else { + xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); + if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { + memory_region_add_subregion(sysmem, xive->ic_base, + &xive->ic_mmio); + + memory_region_add_subregion(&xive->ic_mmio, 0, + &xive->ic_reg_mmio); + memory_region_add_subregion(&xive->ic_mmio, + 1ul << xive->ic_shift, + &xive->ic_notify_mmio); + memory_region_add_subregion(&xive->ic_mmio, + 2ul << xive->ic_shift, + &xive->ic_lsi_mmio); + memory_region_add_subregion(&xive->ic_mmio, + 4ull << xive->ic_shift, + &xive->tm_indirect_mmio); + } + } + break; + + case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ + case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ + xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; + if (!(val & CQ_TM_BAR_VALID)) { + xive->tm_base = 0; + if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { + memory_region_del_subregion(sysmem, &xive->tm_mmio); + } + } else { + xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); + if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { + memory_region_add_subregion(sysmem, xive->tm_base, + &xive->tm_mmio); + } + } + break; + + case CQ_PC_BARM: + xive->regs[reg] = val; + memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); + break; + case CQ_PC_BAR: /* From 32M to 512G */ + if (!(val & CQ_PC_BAR_VALID)) { + xive->pc_base = 0; + if (xive->regs[reg] & CQ_PC_BAR_VALID) { + memory_region_del_subregion(sysmem, &xive->pc_mmio); + } + } else { + xive->pc_base = val & ~(CQ_PC_BAR_VALID); + if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { + memory_region_add_subregion(sysmem, xive->pc_base, + &xive->pc_mmio); + } + } + break; + + case CQ_VC_BARM: + xive->regs[reg] = val; + memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); + break; + case CQ_VC_BAR: /* From 64M to 4TB */ + if (!(val & CQ_VC_BAR_VALID)) { + xive->vc_base = 0; + if (xive->regs[reg] & CQ_VC_BAR_VALID) { + memory_region_del_subregion(sysmem, &xive->vc_mmio); + } + } else { + xive->vc_base = val & ~(CQ_VC_BAR_VALID); + if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { + memory_region_add_subregion(sysmem, xive->vc_base, + &xive->vc_mmio); + } + } + break; + + /* + * XIVE Table settings. + */ + case CQ_TAR: /* Table Address */ + break; + case CQ_TDR: /* Table Data */ + pnv_xive_table_set_data(xive, val); + break; + + /* + * XIVE VC & PC Virtual Structure Table settings + */ + case VC_VSD_TABLE_ADDR: + case PC_VSD_TABLE_ADDR: /* Virtual table selector */ + break; + case VC_VSD_TABLE_DATA: /* Virtual table setting */ + case PC_VSD_TABLE_DATA: + pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); + break; + + /* + * Interrupt fifo overflow in memory backing store (Not modeled) + */ + case VC_IRQ_CONFIG_IPI: + case VC_IRQ_CONFIG_HW: + case VC_IRQ_CONFIG_CASCADE1: + case VC_IRQ_CONFIG_CASCADE2: + case VC_IRQ_CONFIG_REDIST: + case VC_IRQ_CONFIG_IPI_CASC: + break; + + /* + * XIVE hardware thread enablement + */ + case PC_THREAD_EN_REG0: /* Physical Thread Enable */ + case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ + break; + + case PC_THREAD_EN_REG0_SET: + xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; + break; + case PC_THREAD_EN_REG1_SET: + xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; + break; + case PC_THREAD_EN_REG0_CLR: + xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; + break; + case PC_THREAD_EN_REG1_CLR: + xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; + break; + + /* + * Indirect TIMA access set up. Defines the PIR of the HW thread + * to use. + */ + case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: + break; + + /* + * XIVE PC & VC cache updates for EAS, NVT and END + */ + case VC_IVC_SCRUB_MASK: + case VC_IVC_SCRUB_TRIG: + break; + + case VC_EQC_CWATCH_SPEC: + val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */ + break; + case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: + break; + case VC_EQC_CWATCH_DAT0: + /* writing to DATA0 triggers the cache write */ + xive->regs[reg] = val; + pnv_xive_end_update(xive); + break; + case VC_EQC_SCRUB_MASK: + case VC_EQC_SCRUB_TRIG: + /* + * The scrubbing registers flush the cache in RAM and can also + * invalidate. + */ + break; + + case PC_VPC_CWATCH_SPEC: + val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */ + break; + case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: + break; + case PC_VPC_CWATCH_DAT0: + /* writing to DATA0 triggers the cache write */ + xive->regs[reg] = val; + pnv_xive_nvt_update(xive); + break; + case PC_VPC_SCRUB_MASK: + case PC_VPC_SCRUB_TRIG: + /* + * The scrubbing registers flush the cache in RAM and can also + * invalidate. + */ + break; + + + /* + * XIVE PC & VC cache invalidation + */ + case PC_AT_KILL: + break; + case VC_AT_MACRO_KILL: + break; + case PC_AT_KILL_MASK: + case VC_AT_MACRO_KILL_MASK: + break; + + default: + xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset); + return; + } + + xive->regs[reg] = val; +} + +static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) +{ + PnvXive *xive = PNV_XIVE(opaque); + uint64_t val = 0; + uint32_t reg = offset >> 3; + + switch (offset) { + case CQ_CFG_PB_GEN: + case CQ_IC_BAR: + case CQ_TM1_BAR: + case CQ_TM2_BAR: + case CQ_PC_BAR: + case CQ_PC_BARM: + case CQ_VC_BAR: + case CQ_VC_BARM: + case CQ_TAR: + case CQ_TDR: + case CQ_PBI_CTL: + + case PC_TCTXT_CFG: + case PC_TCTXT_TRACK: + case PC_TCTXT_INDIR0: + case PC_TCTXT_INDIR1: + case PC_TCTXT_INDIR2: + case PC_TCTXT_INDIR3: + case PC_GLOBAL_CONFIG: + + case PC_VPC_SCRUB_MASK: + + case VC_GLOBAL_CONFIG: + case VC_AIB_TX_ORDER_TAG2: + + case VC_IRQ_CONFIG_IPI: + case VC_IRQ_CONFIG_HW: + case VC_IRQ_CONFIG_CASCADE1: + case VC_IRQ_CONFIG_CASCADE2: + case VC_IRQ_CONFIG_REDIST: + case VC_IRQ_CONFIG_IPI_CASC: + + case VC_EQC_SCRUB_MASK: + case VC_IVC_SCRUB_MASK: + case VC_SBC_CONFIG: + case VC_AT_MACRO_KILL_MASK: + case VC_VSD_TABLE_ADDR: + case PC_VSD_TABLE_ADDR: + case VC_VSD_TABLE_DATA: + case PC_VSD_TABLE_DATA: + case PC_THREAD_EN_REG0: + case PC_THREAD_EN_REG1: + val = xive->regs[reg]; + break; + + /* + * XIVE hardware thread enablement + */ + case PC_THREAD_EN_REG0_SET: + case PC_THREAD_EN_REG0_CLR: + val = xive->regs[PC_THREAD_EN_REG0 >> 3]; + break; + case PC_THREAD_EN_REG1_SET: + case PC_THREAD_EN_REG1_CLR: + val = xive->regs[PC_THREAD_EN_REG1 >> 3]; + break; + + case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ + val = 0xffffff0000000000; + break; + + /* + * XIVE PC & VC cache updates for EAS, NVT and END + */ + case VC_EQC_CWATCH_SPEC: + xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT); + val = xive->regs[reg]; + break; + case VC_EQC_CWATCH_DAT0: + /* + * Load DATA registers from cache with data requested by the + * SPEC register + */ + pnv_xive_end_cache_load(xive); + val = xive->regs[reg]; + break; + case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: + val = xive->regs[reg]; + break; + + case PC_VPC_CWATCH_SPEC: + xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT); + val = xive->regs[reg]; + break; + case PC_VPC_CWATCH_DAT0: + /* + * Load DATA registers from cache with data requested by the + * SPEC register + */ + pnv_xive_nvt_cache_load(xive); + val = xive->regs[reg]; + break; + case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: + val = xive->regs[reg]; + break; + + case PC_VPC_SCRUB_TRIG: + case VC_IVC_SCRUB_TRIG: + case VC_EQC_SCRUB_TRIG: + xive->regs[reg] &= ~VC_SCRUB_VALID; + val = xive->regs[reg]; + break; + + /* + * XIVE PC & VC cache invalidation + */ + case PC_AT_KILL: + xive->regs[reg] &= ~PC_AT_KILL_VALID; + val = xive->regs[reg]; + break; + case VC_AT_MACRO_KILL: + xive->regs[reg] &= ~VC_KILL_VALID; + val = xive->regs[reg]; + break; + + /* + * XIVE synchronisation + */ + case VC_EQC_CONFIG: + val = VC_EQC_SYNC_MASK; + break; + + default: + xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset); + } + + return val; +} + +static const MemoryRegionOps pnv_xive_ic_reg_ops = { + .read = pnv_xive_ic_reg_read, + .write = pnv_xive_ic_reg_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* + * IC - Notify MMIO port page (write only) + */ +#define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ +#define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ +#define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ +#define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ +#define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ +#define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ +#define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ +#define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ + +/* VC synchronisation */ +#define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ +#define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ +#define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ +#define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ +#define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ + +/* PC synchronisation */ +#define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ +#define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ +#define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ + +static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) +{ + uint8_t blk; + uint32_t idx; + + trace_pnv_xive_ic_hw_trigger(addr, val); + + if (val & XIVE_TRIGGER_END) { + xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64, + addr, val); + return; + } + + /* + * Forward the source event notification directly to the Router. + * The source interrupt number should already be correctly encoded + * with the chip block id by the sending device (PHB, PSI). + */ + blk = XIVE_EAS_BLOCK(val); + idx = XIVE_EAS_INDEX(val); + + xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx)); +} + +static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PnvXive *xive = PNV_XIVE(opaque); + + /* VC: HW triggers */ + switch (addr) { + case 0x000 ... 0x7FF: + pnv_xive_ic_hw_trigger(opaque, addr, val); + break; + + /* VC: Forwarded IRQs */ + case PNV_XIVE_FORWARD_IPI: + case PNV_XIVE_FORWARD_HW: + case PNV_XIVE_FORWARD_OS_ESC: + case PNV_XIVE_FORWARD_HW_ESC: + case PNV_XIVE_FORWARD_REDIS: + /* TODO: forwarded IRQs. Should be like HW triggers */ + xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64, + addr, val); + break; + + /* VC syncs */ + case PNV_XIVE_SYNC_IPI: + case PNV_XIVE_SYNC_HW: + case PNV_XIVE_SYNC_OS_ESC: + case PNV_XIVE_SYNC_HW_ESC: + case PNV_XIVE_SYNC_REDIS: + break; + + /* PC syncs */ + case PNV_XIVE_SYNC_PULL: + case PNV_XIVE_SYNC_PUSH: + case PNV_XIVE_SYNC_VPC: + break; + + default: + xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr); + } +} + +static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvXive *xive = PNV_XIVE(opaque); + + /* loads are invalid */ + xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr); + return -1; +} + +static const MemoryRegionOps pnv_xive_ic_notify_ops = { + .read = pnv_xive_ic_notify_read, + .write = pnv_xive_ic_notify_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* + * IC - LSI MMIO handlers (not modeled) + */ + +static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvXive *xive = PNV_XIVE(opaque); + + xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr); +} + +static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvXive *xive = PNV_XIVE(opaque); + + xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr); + return -1; +} + +static const MemoryRegionOps pnv_xive_ic_lsi_ops = { + .read = pnv_xive_ic_lsi_read, + .write = pnv_xive_ic_lsi_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* + * IC - Indirect TIMA MMIO handlers + */ + +/* + * When the TIMA is accessed from the indirect page, the thread id of + * the target CPU is configured in the PC_TCTXT_INDIR0 register before + * use. This is used for resets and for debug purpose also. + */ +static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) +{ + PnvChip *chip = xive->chip; + uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; + PowerPCCPU *cpu = NULL; + int pir; + + if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { + xive_error(xive, "IC: no indirect TIMA access in progress"); + return NULL; + } + + pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir); + cpu = pnv_chip_find_cpu(chip, pir); + if (!cpu) { + xive_error(xive, "IC: invalid PIR %x for indirect access", pir); + return NULL; + } + + /* Check that HW thread is XIVE enabled */ + if (!pnv_xive_is_cpu_enabled(xive, cpu)) { + xive_error(xive, "IC: CPU %x is not enabled", pir); + } + + return XIVE_TCTX(pnv_cpu_state(cpu)->intc); +} + +static void xive_tm_indirect_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); + + xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size); +} + +static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, + unsigned size) +{ + XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); + + return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size); +} + +static const MemoryRegionOps xive_tm_indirect_ops = { + .read = xive_tm_indirect_read, + .write = xive_tm_indirect_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static void pnv_xive_tm_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PowerPCCPU *cpu = POWERPC_CPU(current_cpu); + PnvXive *xive = pnv_xive_tm_get_xive(cpu); + XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); + + xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size); +} + +static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size) +{ + PowerPCCPU *cpu = POWERPC_CPU(current_cpu); + PnvXive *xive = pnv_xive_tm_get_xive(cpu); + XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); + + return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size); +} + +const MemoryRegionOps pnv_xive_tm_ops = { + .read = pnv_xive_tm_read, + .write = pnv_xive_tm_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +/* + * Interrupt controller XSCOM region. + */ +static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) +{ + switch (addr >> 3) { + case X_VC_EQC_CONFIG: + /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ + return VC_EQC_SYNC_MASK; + default: + return pnv_xive_ic_reg_read(opaque, addr, size); + } +} + +static void pnv_xive_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + pnv_xive_ic_reg_write(opaque, addr, val, size); +} + +static const MemoryRegionOps pnv_xive_xscom_ops = { + .read = pnv_xive_xscom_read, + .write = pnv_xive_xscom_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + } +}; + +/* + * Virtualization Controller MMIO region containing the IPI and END ESB pages + */ +static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, + unsigned size) +{ + PnvXive *xive = PNV_XIVE(opaque); + uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); + uint64_t edt_type = 0; + uint64_t edt_offset; + MemTxResult result; + AddressSpace *edt_as = NULL; + uint64_t ret = -1; + + if (edt_index < XIVE_TABLE_EDT_MAX) { + edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); + } + + switch (edt_type) { + case CQ_TDR_EDT_IPI: + edt_as = &xive->ipi_as; + break; + case CQ_TDR_EDT_EQ: + edt_as = &xive->end_as; + break; + default: + xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset); + return -1; + } + + /* Remap the offset for the targeted address space */ + edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); + + ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, + &result); + + if (result != MEMTX_OK) { + xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%" + HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END", + offset, edt_offset); + return -1; + } + + return ret; +} + +static void pnv_xive_vc_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + PnvXive *xive = PNV_XIVE(opaque); + uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); + uint64_t edt_type = 0; + uint64_t edt_offset; + MemTxResult result; + AddressSpace *edt_as = NULL; + + if (edt_index < XIVE_TABLE_EDT_MAX) { + edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); + } + + switch (edt_type) { + case CQ_TDR_EDT_IPI: + edt_as = &xive->ipi_as; + break; + case CQ_TDR_EDT_EQ: + edt_as = &xive->end_as; + break; + default: + xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx, + offset); + return; + } + + /* Remap the offset for the targeted address space */ + edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); + + address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset); + } +} + +static const MemoryRegionOps pnv_xive_vc_ops = { + .read = pnv_xive_vc_read, + .write = pnv_xive_vc_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +/* + * Presenter Controller MMIO region. The Virtualization Controller + * updates the IPB in the NVT table when required. Not modeled. + */ +static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvXive *xive = PNV_XIVE(opaque); + + xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr); + return -1; +} + +static void pnv_xive_pc_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + PnvXive *xive = PNV_XIVE(opaque); + + xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr); +} + +static const MemoryRegionOps pnv_xive_pc_ops = { + .read = pnv_xive_pc_read, + .write = pnv_xive_pc_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx, + Monitor *mon) +{ + uint8_t eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1); + uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1); + + if (!xive_nvt_is_valid(nvt)) { + return; + } + + monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x\n", nvt_idx, + eq_blk, eq_idx, + xive_get_field32(NVT_W4_IPB, nvt->w4)); +} + +void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) +{ + XiveRouter *xrtr = XIVE_ROUTER(xive); + uint8_t blk = pnv_xive_block_id(xive); + uint8_t chip_id = xive->chip->chip_id; + uint32_t srcno0 = XIVE_EAS(blk, 0); + uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk); + XiveEAS eas; + XiveEND end; + XiveNVT nvt; + int i; + uint64_t xive_nvt_per_subpage; + + monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk, + srcno0, srcno0 + nr_ipis - 1); + xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); + + monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk, + srcno0, srcno0 + nr_ipis - 1); + for (i = 0; i < nr_ipis; i++) { + if (xive_router_get_eas(xrtr, blk, i, &eas)) { + break; + } + if (!xive_eas_is_masked(&eas)) { + xive_eas_pic_print_info(&eas, i, mon); + } + } + + monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk); + i = 0; + while (!xive_router_get_end(xrtr, blk, i, &end)) { + xive_end_pic_print_info(&end, i++, mon); + } + + monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk); + i = 0; + while (!xive_router_get_end(xrtr, blk, i, &end)) { + xive_end_eas_pic_print_info(&end, i++, mon); + } + + monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk, + 0, XIVE_NVT_COUNT - 1); + xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT); + for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) { + while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) { + xive_nvt_pic_print_info(&nvt, i++, mon); + } + } +} + +static void pnv_xive_reset(void *dev) +{ + PnvXive *xive = PNV_XIVE(dev); + XiveSource *xsrc = &xive->ipi_source; + XiveENDSource *end_xsrc = &xive->end_source; + + /* Default page size (Should be changed at runtime to 64k) */ + xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; + + /* Clear subregions */ + if (memory_region_is_mapped(&xsrc->esb_mmio)) { + memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); + } + + if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { + memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); + } + + if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { + memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); + } + + if (memory_region_is_mapped(&xive->end_edt_mmio)) { + memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); + } +} + +static void pnv_xive_init(Object *obj) +{ + PnvXive *xive = PNV_XIVE(obj); + + object_initialize_child(obj, "ipi_source", &xive->ipi_source, + TYPE_XIVE_SOURCE); + object_initialize_child(obj, "end_source", &xive->end_source, + TYPE_XIVE_END_SOURCE); +} + +/* + * Maximum number of IRQs and ENDs supported by HW + */ +#define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) +#define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) + +static void pnv_xive_realize(DeviceState *dev, Error **errp) +{ + PnvXive *xive = PNV_XIVE(dev); + PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev); + XiveSource *xsrc = &xive->ipi_source; + XiveENDSource *end_xsrc = &xive->end_source; + Error *local_err = NULL; + + pxc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + assert(xive->chip); + + /* + * The XiveSource and XiveENDSource objects are realized with the + * maximum allowed HW configuration. The ESB MMIO regions will be + * resized dynamically when the controller is configured by the FW + * to limit accesses to resources not provisioned. + */ + object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS, + &error_fatal); + object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort); + if (!qdev_realize(DEVICE(xsrc), NULL, errp)) { + return; + } + + object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS, + &error_fatal); + object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive), + &error_abort); + if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) { + return; + } + + /* Default page size. Generally changed at runtime to 64k */ + xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; + + /* XSCOM region, used for initial configuration of the BARs */ + memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, + xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3); + + /* Interrupt controller MMIO regions */ + memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", + PNV9_XIVE_IC_SIZE); + + memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, + xive, "xive-ic-reg", 1 << xive->ic_shift); + memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), + &pnv_xive_ic_notify_ops, + xive, "xive-ic-notify", 1 << xive->ic_shift); + + /* The Pervasive LSI trigger and EOI pages (not modeled) */ + memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, + xive, "xive-ic-lsi", 2 << xive->ic_shift); + + /* Thread Interrupt Management Area (Indirect) */ + memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), + &xive_tm_indirect_ops, + xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE); + /* + * Overall Virtualization Controller MMIO region containing the + * IPI ESB pages and END ESB pages. The layout is defined by the + * EDT "Domain table" and the accesses are dispatched using + * address spaces for each. + */ + memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, + "xive-vc", PNV9_XIVE_VC_SIZE); + + memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi", + PNV9_XIVE_VC_SIZE); + address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi"); + memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end", + PNV9_XIVE_VC_SIZE); + address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end"); + + /* + * The MMIO windows exposing the IPI ESBs and the END ESBs in the + * VC region. Their size is configured by the FW in the EDT table. + */ + memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0); + memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0); + + /* Presenter Controller MMIO region (not modeled) */ + memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, + "xive-pc", PNV9_XIVE_PC_SIZE); + + /* Thread Interrupt Management Area (Direct) */ + memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops, + xive, "xive-tima", PNV9_XIVE_TM_SIZE); + + qemu_register_reset(pnv_xive_reset, dev); +} + +static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, + int xscom_offset) +{ + const char compat[] = "ibm,power9-xive-x"; + char *name; + int offset; + uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; + uint32_t reg[] = { + cpu_to_be32(lpc_pcba), + cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) + }; + + name = g_strdup_printf("xive@%x", lpc_pcba); + offset = fdt_add_subnode(fdt, xscom_offset, name); + _FDT(offset); + g_free(name); + + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); + _FDT((fdt_setprop(fdt, offset, "compatible", compat, + sizeof(compat)))); + return 0; +} + +static Property pnv_xive_properties[] = { + DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), + DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), + DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), + DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), + /* The PnvChip id identifies the XIVE interrupt controller. */ + DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_xive_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); + XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); + XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); + XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); + PnvXiveClass *pxc = PNV_XIVE_CLASS(klass); + + xdc->dt_xscom = pnv_xive_dt_xscom; + + dc->desc = "PowerNV XIVE Interrupt Controller"; + device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize); + dc->realize = pnv_xive_realize; + device_class_set_props(dc, pnv_xive_properties); + + xrc->get_eas = pnv_xive_get_eas; + xrc->get_end = pnv_xive_get_end; + xrc->write_end = pnv_xive_write_end; + xrc->get_nvt = pnv_xive_get_nvt; + xrc->write_nvt = pnv_xive_write_nvt; + xrc->get_block_id = pnv_xive_get_block_id; + + xnc->notify = pnv_xive_notify; + xpc->match_nvt = pnv_xive_match_nvt; +}; + +static const TypeInfo pnv_xive_info = { + .name = TYPE_PNV_XIVE, + .parent = TYPE_XIVE_ROUTER, + .instance_init = pnv_xive_init, + .instance_size = sizeof(PnvXive), + .class_init = pnv_xive_class_init, + .class_size = sizeof(PnvXiveClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_xive_register_types(void) +{ + type_register_static(&pnv_xive_info); +} + +type_init(pnv_xive_register_types) diff --git a/hw/intc/pnv_xive_regs.h b/hw/intc/pnv_xive_regs.h new file mode 100644 index 000000000..c78f030c0 --- /dev/null +++ b/hw/intc/pnv_xive_regs.h @@ -0,0 +1,248 @@ +/* + * QEMU PowerPC XIVE interrupt controller model + * + * Copyright (c) 2017-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef PPC_PNV_XIVE_REGS_H +#define PPC_PNV_XIVE_REGS_H + +/* IC register offsets 0x0 - 0x400 */ +#define CQ_SWI_CMD_HIST 0x020 +#define CQ_SWI_CMD_POLL 0x028 +#define CQ_SWI_CMD_BCAST 0x030 +#define CQ_SWI_CMD_ASSIGN 0x038 +#define CQ_SWI_CMD_BLK_UPD 0x040 +#define CQ_SWI_RSP 0x048 +#define CQ_CFG_PB_GEN 0x050 +#define CQ_INT_ADDR_OPT PPC_BITMASK(14, 15) +#define CQ_MSGSND 0x058 +#define CQ_CNPM_SEL 0x078 +#define CQ_IC_BAR 0x080 +#define CQ_IC_BAR_VALID PPC_BIT(0) +#define CQ_IC_BAR_64K PPC_BIT(1) +#define CQ_TM1_BAR 0x90 +#define CQ_TM2_BAR 0x0a0 +#define CQ_TM_BAR_VALID PPC_BIT(0) +#define CQ_TM_BAR_64K PPC_BIT(1) +#define CQ_PC_BAR 0x0b0 +#define CQ_PC_BAR_VALID PPC_BIT(0) +#define CQ_PC_BARM 0x0b8 +#define CQ_PC_BARM_MASK PPC_BITMASK(26, 38) +#define CQ_VC_BAR 0x0c0 +#define CQ_VC_BAR_VALID PPC_BIT(0) +#define CQ_VC_BARM 0x0c8 +#define CQ_VC_BARM_MASK PPC_BITMASK(21, 37) +#define CQ_TAR 0x0f0 +#define CQ_TAR_TBL_AUTOINC PPC_BIT(0) +#define CQ_TAR_TSEL PPC_BITMASK(12, 15) +#define CQ_TAR_TSEL_BLK PPC_BIT(12) +#define CQ_TAR_TSEL_MIG PPC_BIT(13) +#define CQ_TAR_TSEL_VDT PPC_BIT(14) +#define CQ_TAR_TSEL_EDT PPC_BIT(15) +#define CQ_TAR_TSEL_INDEX PPC_BITMASK(26, 31) +#define CQ_TDR 0x0f8 +#define CQ_TDR_VDT_VALID PPC_BIT(0) +#define CQ_TDR_VDT_BLK PPC_BITMASK(11, 15) +#define CQ_TDR_VDT_INDEX PPC_BITMASK(28, 31) +#define CQ_TDR_EDT_TYPE PPC_BITMASK(0, 1) +#define CQ_TDR_EDT_INVALID 0 +#define CQ_TDR_EDT_IPI 1 +#define CQ_TDR_EDT_EQ 2 +#define CQ_TDR_EDT_BLK PPC_BITMASK(12, 15) +#define CQ_TDR_EDT_INDEX PPC_BITMASK(26, 31) +#define CQ_PBI_CTL 0x100 +#define CQ_PBI_PC_64K PPC_BIT(5) +#define CQ_PBI_VC_64K PPC_BIT(6) +#define CQ_PBI_LNX_TRIG PPC_BIT(7) +#define CQ_PBI_FORCE_TM_LOCAL PPC_BIT(22) +#define CQ_PBO_CTL 0x108 +#define CQ_AIB_CTL 0x110 +#define CQ_RST_CTL 0x118 +#define CQ_FIRMASK 0x198 +#define CQ_FIRMASK_AND 0x1a0 +#define CQ_FIRMASK_OR 0x1a8 + +/* PC LBS1 register offsets 0x400 - 0x800 */ +#define PC_TCTXT_CFG 0x400 +#define PC_TCTXT_CFG_BLKGRP_EN PPC_BIT(0) +#define PC_TCTXT_CFG_TARGET_EN PPC_BIT(1) +#define PC_TCTXT_CFG_LGS_EN PPC_BIT(2) +#define PC_TCTXT_CFG_STORE_ACK PPC_BIT(3) +#define PC_TCTXT_CFG_HARD_CHIPID_BLK PPC_BIT(8) +#define PC_TCTXT_CHIPID_OVERRIDE PPC_BIT(9) +#define PC_TCTXT_CHIPID PPC_BITMASK(12, 15) +#define PC_TCTXT_INIT_AGE PPC_BITMASK(30, 31) +#define PC_TCTXT_TRACK 0x408 +#define PC_TCTXT_TRACK_EN PPC_BIT(0) +#define PC_TCTXT_INDIR0 0x420 +#define PC_TCTXT_INDIR_VALID PPC_BIT(0) +#define PC_TCTXT_INDIR_THRDID PPC_BITMASK(9, 15) +#define PC_TCTXT_INDIR1 0x428 +#define PC_TCTXT_INDIR2 0x430 +#define PC_TCTXT_INDIR3 0x438 +#define PC_THREAD_EN_REG0 0x440 +#define PC_THREAD_EN_REG0_SET 0x448 +#define PC_THREAD_EN_REG0_CLR 0x450 +#define PC_THREAD_EN_REG1 0x460 +#define PC_THREAD_EN_REG1_SET 0x468 +#define PC_THREAD_EN_REG1_CLR 0x470 +#define PC_GLOBAL_CONFIG 0x480 +#define PC_GCONF_INDIRECT PPC_BIT(32) +#define PC_GCONF_CHIPID_OVR PPC_BIT(40) +#define PC_GCONF_CHIPID PPC_BITMASK(44, 47) +#define PC_VSD_TABLE_ADDR 0x488 +#define PC_VSD_TABLE_DATA 0x490 +#define PC_AT_KILL 0x4b0 +#define PC_AT_KILL_VALID PPC_BIT(0) +#define PC_AT_KILL_BLOCK_ID PPC_BITMASK(27, 31) +#define PC_AT_KILL_OFFSET PPC_BITMASK(48, 60) +#define PC_AT_KILL_MASK 0x4b8 + +/* PC LBS2 register offsets */ +#define PC_VPC_CACHE_ENABLE 0x708 +#define PC_VPC_CACHE_EN_MASK PPC_BITMASK(0, 31) +#define PC_VPC_SCRUB_TRIG 0x710 +#define PC_VPC_SCRUB_MASK 0x718 +#define PC_SCRUB_VALID PPC_BIT(0) +#define PC_SCRUB_WANT_DISABLE PPC_BIT(1) +#define PC_SCRUB_WANT_INVAL PPC_BIT(2) +#define PC_SCRUB_BLOCK_ID PPC_BITMASK(27, 31) +#define PC_SCRUB_OFFSET PPC_BITMASK(45, 63) +#define PC_VPC_CWATCH_SPEC 0x738 +#define PC_VPC_CWATCH_CONFLICT PPC_BIT(0) +#define PC_VPC_CWATCH_FULL PPC_BIT(8) +#define PC_VPC_CWATCH_BLOCKID PPC_BITMASK(27, 31) +#define PC_VPC_CWATCH_OFFSET PPC_BITMASK(45, 63) +#define PC_VPC_CWATCH_DAT0 0x740 +#define PC_VPC_CWATCH_DAT1 0x748 +#define PC_VPC_CWATCH_DAT2 0x750 +#define PC_VPC_CWATCH_DAT3 0x758 +#define PC_VPC_CWATCH_DAT4 0x760 +#define PC_VPC_CWATCH_DAT5 0x768 +#define PC_VPC_CWATCH_DAT6 0x770 +#define PC_VPC_CWATCH_DAT7 0x778 + +/* VC0 register offsets 0x800 - 0xFFF */ +#define VC_GLOBAL_CONFIG 0x800 +#define VC_GCONF_INDIRECT PPC_BIT(32) +#define VC_VSD_TABLE_ADDR 0x808 +#define VC_VSD_TABLE_DATA 0x810 +#define VC_IVE_ISB_BLOCK_MODE 0x818 +#define VC_EQD_BLOCK_MODE 0x820 +#define VC_VPS_BLOCK_MODE 0x828 +#define VC_IRQ_CONFIG_IPI 0x840 +#define VC_IRQ_CONFIG_MEMB_EN PPC_BIT(45) +#define VC_IRQ_CONFIG_MEMB_SZ PPC_BITMASK(46, 51) +#define VC_IRQ_CONFIG_HW 0x848 +#define VC_IRQ_CONFIG_CASCADE1 0x850 +#define VC_IRQ_CONFIG_CASCADE2 0x858 +#define VC_IRQ_CONFIG_REDIST 0x860 +#define VC_IRQ_CONFIG_IPI_CASC 0x868 +#define VC_AIB_TX_ORDER_TAG2_REL_TF PPC_BIT(20) +#define VC_AIB_TX_ORDER_TAG2 0x890 +#define VC_AT_MACRO_KILL 0x8b0 +#define VC_AT_MACRO_KILL_MASK 0x8b8 +#define VC_KILL_VALID PPC_BIT(0) +#define VC_KILL_TYPE PPC_BITMASK(14, 15) +#define VC_KILL_IRQ 0 +#define VC_KILL_IVC 1 +#define VC_KILL_SBC 2 +#define VC_KILL_EQD 3 +#define VC_KILL_BLOCK_ID PPC_BITMASK(27, 31) +#define VC_KILL_OFFSET PPC_BITMASK(48, 60) +#define VC_EQC_CACHE_ENABLE 0x908 +#define VC_EQC_CACHE_EN_MASK PPC_BITMASK(0, 15) +#define VC_EQC_SCRUB_TRIG 0x910 +#define VC_EQC_SCRUB_MASK 0x918 +#define VC_EQC_CONFIG 0x920 +#define X_VC_EQC_CONFIG 0x214 /* XSCOM register */ +#define VC_EQC_CONF_SYNC_IPI PPC_BIT(32) +#define VC_EQC_CONF_SYNC_HW PPC_BIT(33) +#define VC_EQC_CONF_SYNC_ESC1 PPC_BIT(34) +#define VC_EQC_CONF_SYNC_ESC2 PPC_BIT(35) +#define VC_EQC_CONF_SYNC_REDI PPC_BIT(36) +#define VC_EQC_CONF_EQP_INTERLEAVE PPC_BIT(38) +#define VC_EQC_CONF_ENABLE_END_s_BIT PPC_BIT(39) +#define VC_EQC_CONF_ENABLE_END_u_BIT PPC_BIT(40) +#define VC_EQC_CONF_ENABLE_END_c_BIT PPC_BIT(41) +#define VC_EQC_CONF_ENABLE_MORE_QSZ PPC_BIT(42) +#define VC_EQC_CONF_SKIP_ESCALATE PPC_BIT(43) +#define VC_EQC_CWATCH_SPEC 0x928 +#define VC_EQC_CWATCH_CONFLICT PPC_BIT(0) +#define VC_EQC_CWATCH_FULL PPC_BIT(8) +#define VC_EQC_CWATCH_BLOCKID PPC_BITMASK(28, 31) +#define VC_EQC_CWATCH_OFFSET PPC_BITMASK(40, 63) +#define VC_EQC_CWATCH_DAT0 0x930 +#define VC_EQC_CWATCH_DAT1 0x938 +#define VC_EQC_CWATCH_DAT2 0x940 +#define VC_EQC_CWATCH_DAT3 0x948 +#define VC_IVC_SCRUB_TRIG 0x990 +#define VC_IVC_SCRUB_MASK 0x998 +#define VC_SBC_SCRUB_TRIG 0xa10 +#define VC_SBC_SCRUB_MASK 0xa18 +#define VC_SCRUB_VALID PPC_BIT(0) +#define VC_SCRUB_WANT_DISABLE PPC_BIT(1) +#define VC_SCRUB_WANT_INVAL PPC_BIT(2) /* EQC and SBC only */ +#define VC_SCRUB_BLOCK_ID PPC_BITMASK(28, 31) +#define VC_SCRUB_OFFSET PPC_BITMASK(40, 63) +#define VC_IVC_CACHE_ENABLE 0x988 +#define VC_IVC_CACHE_EN_MASK PPC_BITMASK(0, 15) +#define VC_SBC_CACHE_ENABLE 0xa08 +#define VC_SBC_CACHE_EN_MASK PPC_BITMASK(0, 15) +#define VC_IVC_CACHE_SCRUB_TRIG 0x990 +#define VC_IVC_CACHE_SCRUB_MASK 0x998 +#define VC_SBC_CACHE_ENABLE 0xa08 +#define VC_SBC_CACHE_SCRUB_TRIG 0xa10 +#define VC_SBC_CACHE_SCRUB_MASK 0xa18 +#define VC_SBC_CONFIG 0xa20 +#define VC_SBC_CONF_CPLX_CIST PPC_BIT(44) +#define VC_SBC_CONF_CIST_BOTH PPC_BIT(45) +#define VC_SBC_CONF_NO_UPD_PRF PPC_BIT(59) + +/* VC1 register offsets */ + +/* VSD Table address register definitions (shared) */ +#define VST_ADDR_AUTOINC PPC_BIT(0) +#define VST_TABLE_SELECT PPC_BITMASK(13, 15) +#define VST_TSEL_IVT 0 +#define VST_TSEL_SBE 1 +#define VST_TSEL_EQDT 2 +#define VST_TSEL_VPDT 3 +#define VST_TSEL_IRQ 4 /* VC only */ +#define VST_TABLE_BLOCK PPC_BITMASK(27, 31) + +/* Number of queue overflow pages */ +#define VC_QUEUE_OVF_COUNT 6 + +/* + * Bits in a VSD entry. + * + * Note: the address is naturally aligned, we don't use a PPC_BITMASK, + * but just a mask to apply to the address before OR'ing it in. + * + * Note: VSD_FIRMWARE is a SW bit ! It hijacks an unused bit in the + * VSD and is only meant to be used in indirect mode ! + */ +#define VSD_MODE PPC_BITMASK(0, 1) +#define VSD_MODE_SHARED 1 +#define VSD_MODE_EXCLUSIVE 2 +#define VSD_MODE_FORWARD 3 +#define VSD_ADDRESS_MASK 0x0ffffffffffff000ull +#define VSD_MIGRATION_REG PPC_BITMASK(52, 55) +#define VSD_INDIRECT PPC_BIT(56) +#define VSD_TSIZE PPC_BITMASK(59, 63) +#define VSD_FIRMWARE PPC_BIT(2) /* Read warning above */ + +#define VC_EQC_SYNC_MASK \ + (VC_EQC_CONF_SYNC_IPI | \ + VC_EQC_CONF_SYNC_HW | \ + VC_EQC_CONF_SYNC_ESC1 | \ + VC_EQC_CONF_SYNC_ESC2 | \ + VC_EQC_CONF_SYNC_REDI) + + +#endif /* PPC_PNV_XIVE_REGS_H */ diff --git a/hw/intc/ppc-uic.c b/hw/intc/ppc-uic.c new file mode 100644 index 000000000..60013f2dd --- /dev/null +++ b/hw/intc/ppc-uic.c @@ -0,0 +1,321 @@ +/* + * "Universal" Interrupt Controller for PowerPPC 4xx embedded processors + * + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/intc/ppc-uic.h" +#include "hw/irq.h" +#include "cpu.h" +#include "hw/ppc/ppc.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" + +enum { + DCR_UICSR = 0x000, + DCR_UICSRS = 0x001, + DCR_UICER = 0x002, + DCR_UICCR = 0x003, + DCR_UICPR = 0x004, + DCR_UICTR = 0x005, + DCR_UICMSR = 0x006, + DCR_UICVR = 0x007, + DCR_UICVCR = 0x008, + DCR_UICMAX = 0x009, +}; + +/*#define DEBUG_UIC*/ + +#ifdef DEBUG_UIC +# define LOG_UIC(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__) +#else +# define LOG_UIC(...) do { } while (0) +#endif + +static void ppcuic_trigger_irq(PPCUIC *uic) +{ + uint32_t ir, cr; + int start, end, inc, i; + + /* Trigger interrupt if any is pending */ + ir = uic->uicsr & uic->uicer & (~uic->uiccr); + cr = uic->uicsr & uic->uicer & uic->uiccr; + LOG_UIC("%s: uicsr %08" PRIx32 " uicer %08" PRIx32 + " uiccr %08" PRIx32 "\n" + " %08" PRIx32 " ir %08" PRIx32 " cr %08" PRIx32 "\n", + __func__, uic->uicsr, uic->uicer, uic->uiccr, + uic->uicsr & uic->uicer, ir, cr); + if (ir != 0x0000000) { + LOG_UIC("Raise UIC interrupt\n"); + qemu_irq_raise(uic->output_int); + } else { + LOG_UIC("Lower UIC interrupt\n"); + qemu_irq_lower(uic->output_int); + } + /* Trigger critical interrupt if any is pending and update vector */ + if (cr != 0x0000000) { + qemu_irq_raise(uic->output_cint); + if (uic->use_vectors) { + /* Compute critical IRQ vector */ + if (uic->uicvcr & 1) { + start = 31; + end = 0; + inc = -1; + } else { + start = 0; + end = 31; + inc = 1; + } + uic->uicvr = uic->uicvcr & 0xFFFFFFFC; + for (i = start; i <= end; i += inc) { + if (cr & (1 << i)) { + uic->uicvr += (i - start) * 512 * inc; + break; + } + } + } + LOG_UIC("Raise UIC critical interrupt - " + "vector %08" PRIx32 "\n", uic->uicvr); + } else { + LOG_UIC("Lower UIC critical interrupt\n"); + qemu_irq_lower(uic->output_cint); + uic->uicvr = 0x00000000; + } +} + +static void ppcuic_set_irq(void *opaque, int irq_num, int level) +{ + PPCUIC *uic; + uint32_t mask, sr; + + uic = opaque; + mask = 1U << (31 - irq_num); + LOG_UIC("%s: irq %d level %d uicsr %08" PRIx32 + " mask %08" PRIx32 " => %08" PRIx32 " %08" PRIx32 "\n", + __func__, irq_num, level, + uic->uicsr, mask, uic->uicsr & mask, level << irq_num); + if (irq_num < 0 || irq_num > 31) { + return; + } + sr = uic->uicsr; + + /* Update status register */ + if (uic->uictr & mask) { + /* Edge sensitive interrupt */ + if (level == 1) { + uic->uicsr |= mask; + } + } else { + /* Level sensitive interrupt */ + if (level == 1) { + uic->uicsr |= mask; + uic->level |= mask; + } else { + uic->uicsr &= ~mask; + uic->level &= ~mask; + } + } + LOG_UIC("%s: irq %d level %d sr %" PRIx32 " => " + "%08" PRIx32 "\n", __func__, irq_num, level, uic->uicsr, sr); + if (sr != uic->uicsr) { + ppcuic_trigger_irq(uic); + } +} + +static uint32_t dcr_read_uic(void *opaque, int dcrn) +{ + PPCUIC *uic; + uint32_t ret; + + uic = opaque; + dcrn -= uic->dcr_base; + switch (dcrn) { + case DCR_UICSR: + case DCR_UICSRS: + ret = uic->uicsr; + break; + case DCR_UICER: + ret = uic->uicer; + break; + case DCR_UICCR: + ret = uic->uiccr; + break; + case DCR_UICPR: + ret = uic->uicpr; + break; + case DCR_UICTR: + ret = uic->uictr; + break; + case DCR_UICMSR: + ret = uic->uicsr & uic->uicer; + break; + case DCR_UICVR: + if (!uic->use_vectors) { + goto no_read; + } + ret = uic->uicvr; + break; + case DCR_UICVCR: + if (!uic->use_vectors) { + goto no_read; + } + ret = uic->uicvcr; + break; + default: + no_read: + ret = 0x00000000; + break; + } + + return ret; +} + +static void dcr_write_uic(void *opaque, int dcrn, uint32_t val) +{ + PPCUIC *uic; + + uic = opaque; + dcrn -= uic->dcr_base; + LOG_UIC("%s: dcr %d val 0x%x\n", __func__, dcrn, val); + switch (dcrn) { + case DCR_UICSR: + uic->uicsr &= ~val; + uic->uicsr |= uic->level; + ppcuic_trigger_irq(uic); + break; + case DCR_UICSRS: + uic->uicsr |= val; + ppcuic_trigger_irq(uic); + break; + case DCR_UICER: + uic->uicer = val; + ppcuic_trigger_irq(uic); + break; + case DCR_UICCR: + uic->uiccr = val; + ppcuic_trigger_irq(uic); + break; + case DCR_UICPR: + uic->uicpr = val; + break; + case DCR_UICTR: + uic->uictr = val; + ppcuic_trigger_irq(uic); + break; + case DCR_UICMSR: + break; + case DCR_UICVR: + break; + case DCR_UICVCR: + uic->uicvcr = val & 0xFFFFFFFD; + ppcuic_trigger_irq(uic); + break; + } +} + +static void ppc_uic_reset(DeviceState *dev) +{ + PPCUIC *uic = PPC_UIC(dev); + + uic->uiccr = 0x00000000; + uic->uicer = 0x00000000; + uic->uicpr = 0x00000000; + uic->uicsr = 0x00000000; + uic->uictr = 0x00000000; + if (uic->use_vectors) { + uic->uicvcr = 0x00000000; + uic->uicvr = 0x0000000; + } +} + +static void ppc_uic_realize(DeviceState *dev, Error **errp) +{ + PPCUIC *uic = PPC_UIC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + PowerPCCPU *cpu; + int i; + + if (!uic->cpu) { + /* This is a programming error in the code using this device */ + error_setg(errp, "ppc-uic 'cpu' link property was not set"); + return; + } + + cpu = POWERPC_CPU(uic->cpu); + for (i = 0; i < DCR_UICMAX; i++) { + ppc_dcr_register(&cpu->env, uic->dcr_base + i, uic, + &dcr_read_uic, &dcr_write_uic); + } + + sysbus_init_irq(sbd, &uic->output_int); + sysbus_init_irq(sbd, &uic->output_cint); + qdev_init_gpio_in(dev, ppcuic_set_irq, UIC_MAX_IRQ); +} + +static Property ppc_uic_properties[] = { + DEFINE_PROP_LINK("cpu", PPCUIC, cpu, TYPE_CPU, CPUState *), + DEFINE_PROP_UINT32("dcr-base", PPCUIC, dcr_base, 0xc0), + DEFINE_PROP_BOOL("use-vectors", PPCUIC, use_vectors, true), + DEFINE_PROP_END_OF_LIST() +}; + +static const VMStateDescription ppc_uic_vmstate = { + .name = "ppc-uic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(level, PPCUIC), + VMSTATE_UINT32(uicsr, PPCUIC), + VMSTATE_UINT32(uicer, PPCUIC), + VMSTATE_UINT32(uiccr, PPCUIC), + VMSTATE_UINT32(uicpr, PPCUIC), + VMSTATE_UINT32(uictr, PPCUIC), + VMSTATE_UINT32(uicvcr, PPCUIC), + VMSTATE_UINT32(uicvr, PPCUIC), + VMSTATE_END_OF_LIST() + }, +}; + +static void ppc_uic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = ppc_uic_reset; + dc->realize = ppc_uic_realize; + dc->vmsd = &ppc_uic_vmstate; + device_class_set_props(dc, ppc_uic_properties); +} + +static const TypeInfo ppc_uic_info = { + .name = TYPE_PPC_UIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PPCUIC), + .class_init = ppc_uic_class_init, +}; + +static void ppc_uic_register_types(void) +{ + type_register_static(&ppc_uic_info); +} + +type_init(ppc_uic_register_types); diff --git a/hw/intc/realview_gic.c b/hw/intc/realview_gic.c new file mode 100644 index 000000000..9b12116b2 --- /dev/null +++ b/hw/intc/realview_gic.c @@ -0,0 +1,86 @@ +/* + * ARM RealView Emulation Baseboard Interrupt Controller + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/intc/realview_gic.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" + +static void realview_gic_set_irq(void *opaque, int irq, int level) +{ + RealViewGICState *s = (RealViewGICState *)opaque; + + qemu_set_irq(qdev_get_gpio_in(DEVICE(&s->gic), irq), level); +} + +static void realview_gic_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + RealViewGICState *s = REALVIEW_GIC(dev); + SysBusDevice *busdev; + /* The GICs on the RealView boards have a fixed nonconfigurable + * number of interrupt lines, so we don't need to expose this as + * a qdev property. + */ + int numirq = 96; + + qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", numirq); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(&s->gic); + + /* Pass through outbound IRQ lines from the GIC */ + sysbus_pass_irq(sbd, busdev); + + /* Pass through inbound GPIO lines to the GIC */ + qdev_init_gpio_in(dev, realview_gic_set_irq, numirq - 32); + + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(busdev, 1)); + memory_region_add_subregion(&s->container, 0x1000, + sysbus_mmio_get_region(busdev, 0)); +} + +static void realview_gic_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RealViewGICState *s = REALVIEW_GIC(obj); + + memory_region_init(&s->container, OBJECT(s), + "realview-gic-container", 0x2000); + sysbus_init_mmio(sbd, &s->container); + + object_initialize_child(obj, "gic", &s->gic, TYPE_ARM_GIC); + qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", 1); +} + +static void realview_gic_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = realview_gic_realize; +} + +static const TypeInfo realview_gic_info = { + .name = TYPE_REALVIEW_GIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RealViewGICState), + .instance_init = realview_gic_init, + .class_init = realview_gic_class_init, +}; + +static void realview_gic_register_types(void) +{ + type_register_static(&realview_gic_info); +} + +type_init(realview_gic_register_types) diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c new file mode 100644 index 000000000..f1a5d3d28 --- /dev/null +++ b/hw/intc/riscv_aclint.c @@ -0,0 +1,460 @@ +/* + * RISC-V ACLINT (Advanced Core Local Interruptor) + * URL: https://github.com/riscv/riscv-aclint + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017 SiFive, Inc. + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This provides real-time clock, timer and interprocessor interrupts. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "target/riscv/cpu.h" +#include "hw/qdev-properties.h" +#include "hw/intc/riscv_aclint.h" +#include "qemu/timer.h" +#include "hw/irq.h" + +typedef struct riscv_aclint_mtimer_callback { + RISCVAclintMTimerState *s; + int num; +} riscv_aclint_mtimer_callback; + +static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq) +{ + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + timebase_freq, NANOSECONDS_PER_SECOND); +} + +/* + * Called when timecmp is written to update the QEMU timer or immediately + * trigger timer interrupt if mtimecmp <= current timer value. + */ +static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer, + RISCVCPU *cpu, + int hartid, + uint64_t value, + uint32_t timebase_freq) +{ + uint64_t next; + uint64_t diff; + + uint64_t rtc_r = cpu_riscv_read_rtc(timebase_freq); + + cpu->env.timecmp = value; + if (cpu->env.timecmp <= rtc_r) { + /* + * If we're setting an MTIMECMP value in the "past", + * immediately raise the timer interrupt + */ + qemu_irq_raise(mtimer->timer_irqs[hartid - mtimer->hartid_base]); + return; + } + + /* otherwise, set up the future timer interrupt */ + qemu_irq_lower(mtimer->timer_irqs[hartid - mtimer->hartid_base]); + diff = cpu->env.timecmp - rtc_r; + /* back to ns (note args switched in muldiv64) */ + uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq); + + /* + * check if ns_diff overflowed and check if the addition would potentially + * overflow + */ + if ((NANOSECONDS_PER_SECOND > timebase_freq && ns_diff < diff) || + ns_diff > INT64_MAX) { + next = INT64_MAX; + } else { + /* + * as it is very unlikely qemu_clock_get_ns will return a value + * greater than INT64_MAX, no additional check is needed for an + * unsigned integer overflow. + */ + next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns_diff; + /* + * if ns_diff is INT64_MAX next may still be outside the range + * of a signed integer. + */ + next = MIN(next, INT64_MAX); + } + + timer_mod(cpu->env.timer, next); +} + +/* + * Callback used when the timer set using timer_mod expires. + * Should raise the timer interrupt line + */ +static void riscv_aclint_mtimer_cb(void *opaque) +{ + riscv_aclint_mtimer_callback *state = opaque; + + qemu_irq_raise(state->s->timer_irqs[state->num]); +} + +/* CPU read MTIMER register */ +static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr, + unsigned size) +{ + RISCVAclintMTimerState *mtimer = opaque; + + if (addr >= mtimer->timecmp_base && + addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) { + size_t hartid = mtimer->hartid_base + + ((addr - mtimer->timecmp_base) >> 3); + CPUState *cpu = qemu_get_cpu(hartid); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + if (!env) { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-mtimer: invalid hartid: %zu", hartid); + } else if ((addr & 0x7) == 0) { + /* timecmp_lo */ + uint64_t timecmp = env->timecmp; + return timecmp & 0xFFFFFFFF; + } else if ((addr & 0x7) == 4) { + /* timecmp_hi */ + uint64_t timecmp = env->timecmp; + return (timecmp >> 32) & 0xFFFFFFFF; + } else { + qemu_log_mask(LOG_UNIMP, + "aclint-mtimer: invalid read: %08x", (uint32_t)addr); + return 0; + } + } else if (addr == mtimer->time_base) { + /* time_lo */ + return cpu_riscv_read_rtc(mtimer->timebase_freq) & 0xFFFFFFFF; + } else if (addr == mtimer->time_base + 4) { + /* time_hi */ + return (cpu_riscv_read_rtc(mtimer->timebase_freq) >> 32) & 0xFFFFFFFF; + } + + qemu_log_mask(LOG_UNIMP, + "aclint-mtimer: invalid read: %08x", (uint32_t)addr); + return 0; +} + +/* CPU write MTIMER register */ +static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + RISCVAclintMTimerState *mtimer = opaque; + + if (addr >= mtimer->timecmp_base && + addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) { + size_t hartid = mtimer->hartid_base + + ((addr - mtimer->timecmp_base) >> 3); + CPUState *cpu = qemu_get_cpu(hartid); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + if (!env) { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-mtimer: invalid hartid: %zu", hartid); + } else if ((addr & 0x7) == 0) { + /* timecmp_lo */ + uint64_t timecmp_hi = env->timecmp >> 32; + riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid, + timecmp_hi << 32 | (value & 0xFFFFFFFF), + mtimer->timebase_freq); + return; + } else if ((addr & 0x7) == 4) { + /* timecmp_hi */ + uint64_t timecmp_lo = env->timecmp; + riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid, + value << 32 | (timecmp_lo & 0xFFFFFFFF), + mtimer->timebase_freq); + } else { + qemu_log_mask(LOG_UNIMP, + "aclint-mtimer: invalid timecmp write: %08x", + (uint32_t)addr); + } + return; + } else if (addr == mtimer->time_base) { + /* time_lo */ + qemu_log_mask(LOG_UNIMP, + "aclint-mtimer: time_lo write not implemented"); + return; + } else if (addr == mtimer->time_base + 4) { + /* time_hi */ + qemu_log_mask(LOG_UNIMP, + "aclint-mtimer: time_hi write not implemented"); + return; + } + + qemu_log_mask(LOG_UNIMP, + "aclint-mtimer: invalid write: %08x", (uint32_t)addr); +} + +static const MemoryRegionOps riscv_aclint_mtimer_ops = { + .read = riscv_aclint_mtimer_read, + .write = riscv_aclint_mtimer_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8 + } +}; + +static Property riscv_aclint_mtimer_properties[] = { + DEFINE_PROP_UINT32("hartid-base", RISCVAclintMTimerState, + hartid_base, 0), + DEFINE_PROP_UINT32("num-harts", RISCVAclintMTimerState, num_harts, 1), + DEFINE_PROP_UINT32("timecmp-base", RISCVAclintMTimerState, + timecmp_base, RISCV_ACLINT_DEFAULT_MTIMECMP), + DEFINE_PROP_UINT32("time-base", RISCVAclintMTimerState, + time_base, RISCV_ACLINT_DEFAULT_MTIME), + DEFINE_PROP_UINT32("aperture-size", RISCVAclintMTimerState, + aperture_size, RISCV_ACLINT_DEFAULT_MTIMER_SIZE), + DEFINE_PROP_UINT32("timebase-freq", RISCVAclintMTimerState, + timebase_freq, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void riscv_aclint_mtimer_realize(DeviceState *dev, Error **errp) +{ + RISCVAclintMTimerState *s = RISCV_ACLINT_MTIMER(dev); + int i; + + memory_region_init_io(&s->mmio, OBJECT(dev), &riscv_aclint_mtimer_ops, + s, TYPE_RISCV_ACLINT_MTIMER, s->aperture_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); + + s->timer_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts); + qdev_init_gpio_out(dev, s->timer_irqs, s->num_harts); + + /* Claim timer interrupt bits */ + for (i = 0; i < s->num_harts; i++) { + RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i)); + if (riscv_cpu_claim_interrupts(cpu, MIP_MTIP) < 0) { + error_report("MTIP already claimed"); + exit(1); + } + } +} + +static void riscv_aclint_mtimer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = riscv_aclint_mtimer_realize; + device_class_set_props(dc, riscv_aclint_mtimer_properties); +} + +static const TypeInfo riscv_aclint_mtimer_info = { + .name = TYPE_RISCV_ACLINT_MTIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RISCVAclintMTimerState), + .class_init = riscv_aclint_mtimer_class_init, +}; + +/* + * Create ACLINT MTIMER device. + */ +DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size, + uint32_t hartid_base, uint32_t num_harts, + uint32_t timecmp_base, uint32_t time_base, uint32_t timebase_freq, + bool provide_rdtime) +{ + int i; + DeviceState *dev = qdev_new(TYPE_RISCV_ACLINT_MTIMER); + + assert(num_harts <= RISCV_ACLINT_MAX_HARTS); + assert(!(addr & 0x7)); + assert(!(timecmp_base & 0x7)); + assert(!(time_base & 0x7)); + + qdev_prop_set_uint32(dev, "hartid-base", hartid_base); + qdev_prop_set_uint32(dev, "num-harts", num_harts); + qdev_prop_set_uint32(dev, "timecmp-base", timecmp_base); + qdev_prop_set_uint32(dev, "time-base", time_base); + qdev_prop_set_uint32(dev, "aperture-size", size); + qdev_prop_set_uint32(dev, "timebase-freq", timebase_freq); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + + for (i = 0; i < num_harts; i++) { + CPUState *cpu = qemu_get_cpu(hartid_base + i); + RISCVCPU *rvcpu = RISCV_CPU(cpu); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + riscv_aclint_mtimer_callback *cb = + g_malloc0(sizeof(riscv_aclint_mtimer_callback)); + + if (!env) { + g_free(cb); + continue; + } + if (provide_rdtime) { + riscv_cpu_set_rdtime_fn(env, cpu_riscv_read_rtc, timebase_freq); + } + + cb->s = RISCV_ACLINT_MTIMER(dev); + cb->num = i; + env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + &riscv_aclint_mtimer_cb, cb); + env->timecmp = 0; + + qdev_connect_gpio_out(dev, i, + qdev_get_gpio_in(DEVICE(rvcpu), IRQ_M_TIMER)); + } + + return dev; +} + +/* CPU read [M|S]SWI register */ +static uint64_t riscv_aclint_swi_read(void *opaque, hwaddr addr, + unsigned size) +{ + RISCVAclintSwiState *swi = opaque; + + if (addr < (swi->num_harts << 2)) { + size_t hartid = swi->hartid_base + (addr >> 2); + CPUState *cpu = qemu_get_cpu(hartid); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + if (!env) { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-swi: invalid hartid: %zu", hartid); + } else if ((addr & 0x3) == 0) { + return (swi->sswi) ? 0 : ((env->mip & MIP_MSIP) > 0); + } + } + + qemu_log_mask(LOG_UNIMP, + "aclint-swi: invalid read: %08x", (uint32_t)addr); + return 0; +} + +/* CPU write [M|S]SWI register */ +static void riscv_aclint_swi_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + RISCVAclintSwiState *swi = opaque; + + if (addr < (swi->num_harts << 2)) { + size_t hartid = swi->hartid_base + (addr >> 2); + CPUState *cpu = qemu_get_cpu(hartid); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + if (!env) { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-swi: invalid hartid: %zu", hartid); + } else if ((addr & 0x3) == 0) { + if (value & 0x1) { + qemu_irq_raise(swi->soft_irqs[hartid - swi->hartid_base]); + } else { + if (!swi->sswi) { + qemu_irq_lower(swi->soft_irqs[hartid - swi->hartid_base]); + } + } + return; + } + } + + qemu_log_mask(LOG_UNIMP, + "aclint-swi: invalid write: %08x", (uint32_t)addr); +} + +static const MemoryRegionOps riscv_aclint_swi_ops = { + .read = riscv_aclint_swi_read, + .write = riscv_aclint_swi_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static Property riscv_aclint_swi_properties[] = { + DEFINE_PROP_UINT32("hartid-base", RISCVAclintSwiState, hartid_base, 0), + DEFINE_PROP_UINT32("num-harts", RISCVAclintSwiState, num_harts, 1), + DEFINE_PROP_UINT32("sswi", RISCVAclintSwiState, sswi, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void riscv_aclint_swi_realize(DeviceState *dev, Error **errp) +{ + RISCVAclintSwiState *swi = RISCV_ACLINT_SWI(dev); + int i; + + memory_region_init_io(&swi->mmio, OBJECT(dev), &riscv_aclint_swi_ops, swi, + TYPE_RISCV_ACLINT_SWI, RISCV_ACLINT_SWI_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &swi->mmio); + + swi->soft_irqs = g_malloc(sizeof(qemu_irq) * swi->num_harts); + qdev_init_gpio_out(dev, swi->soft_irqs, swi->num_harts); + + /* Claim software interrupt bits */ + for (i = 0; i < swi->num_harts; i++) { + RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(swi->hartid_base + i)); + /* We don't claim mip.SSIP because it is writeable by software */ + if (riscv_cpu_claim_interrupts(cpu, swi->sswi ? 0 : MIP_MSIP) < 0) { + error_report("MSIP already claimed"); + exit(1); + } + } +} + +static void riscv_aclint_swi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = riscv_aclint_swi_realize; + device_class_set_props(dc, riscv_aclint_swi_properties); +} + +static const TypeInfo riscv_aclint_swi_info = { + .name = TYPE_RISCV_ACLINT_SWI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RISCVAclintSwiState), + .class_init = riscv_aclint_swi_class_init, +}; + +/* + * Create ACLINT [M|S]SWI device. + */ +DeviceState *riscv_aclint_swi_create(hwaddr addr, uint32_t hartid_base, + uint32_t num_harts, bool sswi) +{ + int i; + DeviceState *dev = qdev_new(TYPE_RISCV_ACLINT_SWI); + + assert(num_harts <= RISCV_ACLINT_MAX_HARTS); + assert(!(addr & 0x3)); + + qdev_prop_set_uint32(dev, "hartid-base", hartid_base); + qdev_prop_set_uint32(dev, "num-harts", num_harts); + qdev_prop_set_uint32(dev, "sswi", sswi ? true : false); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + + for (i = 0; i < num_harts; i++) { + CPUState *cpu = qemu_get_cpu(hartid_base + i); + RISCVCPU *rvcpu = RISCV_CPU(cpu); + + qdev_connect_gpio_out(dev, i, + qdev_get_gpio_in(DEVICE(rvcpu), + (sswi) ? IRQ_S_SOFT : IRQ_M_SOFT)); + } + + return dev; +} + +static void riscv_aclint_register_types(void) +{ + type_register_static(&riscv_aclint_mtimer_info); + type_register_static(&riscv_aclint_swi_info); +} + +type_init(riscv_aclint_register_types) diff --git a/hw/intc/rx_icu.c b/hw/intc/rx_icu.c new file mode 100644 index 000000000..e5c01807b --- /dev/null +++ b/hw/intc/rx_icu.c @@ -0,0 +1,395 @@ +/* + * RX Interrupt Control Unit + * + * Warning: Only ICUa is supported. + * + * Datasheet: RX62N Group, RX621 Group User's Manual: Hardware + * (Rev.1.40 R01UH0033EJ0140) + * + * Copyright (c) 2019 Yoshinori Sato + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "hw/irq.h" +#include "hw/registerfields.h" +#include "hw/qdev-properties.h" +#include "hw/intc/rx_icu.h" +#include "migration/vmstate.h" + +REG8(IR, 0) + FIELD(IR, IR, 0, 1) +REG8(DTCER, 0x100) + FIELD(DTCER, DTCE, 0, 1) +REG8(IER, 0x200) +REG8(SWINTR, 0x2e0) + FIELD(SWINTR, SWINT, 0, 1) +REG16(FIR, 0x2f0) + FIELD(FIR, FVCT, 0, 8) + FIELD(FIR, FIEN, 15, 1) +REG8(IPR, 0x300) + FIELD(IPR, IPR, 0, 4) +REG8(DMRSR, 0x400) +REG8(IRQCR, 0x500) + FIELD(IRQCR, IRQMD, 2, 2) +REG8(NMISR, 0x580) + FIELD(NMISR, NMIST, 0, 1) + FIELD(NMISR, LVDST, 1, 1) + FIELD(NMISR, OSTST, 2, 1) +REG8(NMIER, 0x581) + FIELD(NMIER, NMIEN, 0, 1) + FIELD(NMIER, LVDEN, 1, 1) + FIELD(NMIER, OSTEN, 2, 1) +REG8(NMICLR, 0x582) + FIELD(NMICLR, NMICLR, 0, 1) + FIELD(NMICLR, OSTCLR, 2, 1) +REG8(NMICR, 0x583) + FIELD(NMICR, NMIMD, 3, 1) + +static void set_irq(RXICUState *icu, int n_IRQ, int req) +{ + if ((icu->fir & R_FIR_FIEN_MASK) && + (icu->fir & R_FIR_FVCT_MASK) == n_IRQ) { + qemu_set_irq(icu->_fir, req); + } else { + qemu_set_irq(icu->_irq, req); + } +} + +static uint16_t rxicu_level(RXICUState *icu, unsigned n) +{ + return (icu->ipr[icu->map[n]] << 8) | n; +} + +static void rxicu_request(RXICUState *icu, int n_IRQ) +{ + int enable; + + enable = icu->ier[n_IRQ / 8] & (1 << (n_IRQ & 7)); + if (n_IRQ > 0 && enable != 0 && qatomic_read(&icu->req_irq) < 0) { + qatomic_set(&icu->req_irq, n_IRQ); + set_irq(icu, n_IRQ, rxicu_level(icu, n_IRQ)); + } +} + +static void rxicu_set_irq(void *opaque, int n_IRQ, int level) +{ + RXICUState *icu = opaque; + struct IRQSource *src; + int issue; + + if (n_IRQ >= NR_IRQS) { + error_report("%s: IRQ %d out of range", __func__, n_IRQ); + return; + } + + src = &icu->src[n_IRQ]; + + level = (level != 0); + switch (src->sense) { + case TRG_LEVEL: + /* level-sensitive irq */ + issue = level; + src->level = level; + break; + case TRG_NEDGE: + issue = (level == 0 && src->level == 1); + src->level = level; + break; + case TRG_PEDGE: + issue = (level == 1 && src->level == 0); + src->level = level; + break; + case TRG_BEDGE: + issue = ((level ^ src->level) & 1); + src->level = level; + break; + default: + g_assert_not_reached(); + } + if (issue == 0 && src->sense == TRG_LEVEL) { + icu->ir[n_IRQ] = 0; + if (qatomic_read(&icu->req_irq) == n_IRQ) { + /* clear request */ + set_irq(icu, n_IRQ, 0); + qatomic_set(&icu->req_irq, -1); + } + return; + } + if (issue) { + icu->ir[n_IRQ] = 1; + rxicu_request(icu, n_IRQ); + } +} + +static void rxicu_ack_irq(void *opaque, int no, int level) +{ + RXICUState *icu = opaque; + int i; + int n_IRQ; + int max_pri; + + n_IRQ = qatomic_read(&icu->req_irq); + if (n_IRQ < 0) { + return; + } + qatomic_set(&icu->req_irq, -1); + if (icu->src[n_IRQ].sense != TRG_LEVEL) { + icu->ir[n_IRQ] = 0; + } + + max_pri = 0; + n_IRQ = -1; + for (i = 0; i < NR_IRQS; i++) { + if (icu->ir[i]) { + if (max_pri < icu->ipr[icu->map[i]]) { + n_IRQ = i; + max_pri = icu->ipr[icu->map[i]]; + } + } + } + + if (n_IRQ >= 0) { + rxicu_request(icu, n_IRQ); + } +} + +static uint64_t icu_read(void *opaque, hwaddr addr, unsigned size) +{ + RXICUState *icu = opaque; + int reg = addr & 0xff; + + if ((addr != A_FIR && size != 1) || + (addr == A_FIR && size != 2)) { + qemu_log_mask(LOG_GUEST_ERROR, "rx_icu: Invalid read size 0x%" + HWADDR_PRIX "\n", + addr); + return UINT64_MAX; + } + switch (addr) { + case A_IR ... A_IR + 0xff: + return icu->ir[reg] & R_IR_IR_MASK; + case A_DTCER ... A_DTCER + 0xff: + return icu->dtcer[reg] & R_DTCER_DTCE_MASK; + case A_IER ... A_IER + 0x1f: + return icu->ier[reg]; + case A_SWINTR: + return 0; + case A_FIR: + return icu->fir & (R_FIR_FIEN_MASK | R_FIR_FVCT_MASK); + case A_IPR ... A_IPR + 0x8f: + return icu->ipr[reg] & R_IPR_IPR_MASK; + case A_DMRSR: + case A_DMRSR + 4: + case A_DMRSR + 8: + case A_DMRSR + 12: + return icu->dmasr[reg >> 2]; + case A_IRQCR ... A_IRQCR + 0x1f: + return icu->src[64 + reg].sense << R_IRQCR_IRQMD_SHIFT; + case A_NMISR: + case A_NMICLR: + return 0; + case A_NMIER: + return icu->nmier; + case A_NMICR: + return icu->nmicr; + default: + qemu_log_mask(LOG_UNIMP, "rx_icu: Register 0x%" HWADDR_PRIX " " + "not implemented.\n", + addr); + break; + } + return UINT64_MAX; +} + +static void icu_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + RXICUState *icu = opaque; + int reg = addr & 0xff; + + if ((addr != A_FIR && size != 1) || + (addr == A_FIR && size != 2)) { + qemu_log_mask(LOG_GUEST_ERROR, "rx_icu: Invalid write size at " + "0x%" HWADDR_PRIX "\n", + addr); + return; + } + switch (addr) { + case A_IR ... A_IR + 0xff: + if (icu->src[reg].sense != TRG_LEVEL && val == 0) { + icu->ir[reg] = 0; + } + break; + case A_DTCER ... A_DTCER + 0xff: + icu->dtcer[reg] = val & R_DTCER_DTCE_MASK; + qemu_log_mask(LOG_UNIMP, "rx_icu: DTC not implemented\n"); + break; + case A_IER ... A_IER + 0x1f: + icu->ier[reg] = val; + break; + case A_SWINTR: + if (val & R_SWINTR_SWINT_MASK) { + qemu_irq_pulse(icu->_swi); + } + break; + case A_FIR: + icu->fir = val & (R_FIR_FIEN_MASK | R_FIR_FVCT_MASK); + break; + case A_IPR ... A_IPR + 0x8f: + icu->ipr[reg] = val & R_IPR_IPR_MASK; + break; + case A_DMRSR: + case A_DMRSR + 4: + case A_DMRSR + 8: + case A_DMRSR + 12: + icu->dmasr[reg >> 2] = val; + qemu_log_mask(LOG_UNIMP, "rx_icu: DMAC not implemented\n"); + break; + case A_IRQCR ... A_IRQCR + 0x1f: + icu->src[64 + reg].sense = val >> R_IRQCR_IRQMD_SHIFT; + break; + case A_NMICLR: + break; + case A_NMIER: + icu->nmier |= val & (R_NMIER_NMIEN_MASK | + R_NMIER_LVDEN_MASK | + R_NMIER_OSTEN_MASK); + break; + case A_NMICR: + if ((icu->nmier & R_NMIER_NMIEN_MASK) == 0) { + icu->nmicr = val & R_NMICR_NMIMD_MASK; + } + break; + default: + qemu_log_mask(LOG_UNIMP, "rx_icu: Register 0x%" HWADDR_PRIX " " + "not implemented\n", + addr); + break; + } +} + +static const MemoryRegionOps icu_ops = { + .write = icu_write, + .read = icu_read, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 2, + }, + .valid = { + .min_access_size = 1, + .max_access_size = 2, + }, +}; + +static void rxicu_realize(DeviceState *dev, Error **errp) +{ + RXICUState *icu = RX_ICU(dev); + int i; + + if (icu->init_sense == NULL) { + qemu_log_mask(LOG_GUEST_ERROR, + "rx_icu: trigger-level property must be set."); + return; + } + + for (i = 0; i < NR_IRQS; i++) { + icu->src[i].sense = TRG_PEDGE; + } + for (i = 0; i < icu->nr_sense; i++) { + uint8_t irqno = icu->init_sense[i]; + icu->src[irqno].sense = TRG_LEVEL; + } + icu->req_irq = -1; +} + +static void rxicu_init(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + RXICUState *icu = RX_ICU(obj); + + memory_region_init_io(&icu->memory, OBJECT(icu), &icu_ops, + icu, "rx-icu", 0x600); + sysbus_init_mmio(d, &icu->memory); + + qdev_init_gpio_in(DEVICE(d), rxicu_set_irq, NR_IRQS); + qdev_init_gpio_in_named(DEVICE(d), rxicu_ack_irq, "ack", 1); + sysbus_init_irq(d, &icu->_irq); + sysbus_init_irq(d, &icu->_fir); + sysbus_init_irq(d, &icu->_swi); +} + +static void rxicu_fini(Object *obj) +{ + RXICUState *icu = RX_ICU(obj); + g_free(icu->map); + g_free(icu->init_sense); +} + +static const VMStateDescription vmstate_rxicu = { + .name = "rx-icu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(ir, RXICUState, NR_IRQS), + VMSTATE_UINT8_ARRAY(dtcer, RXICUState, NR_IRQS), + VMSTATE_UINT8_ARRAY(ier, RXICUState, NR_IRQS / 8), + VMSTATE_UINT8_ARRAY(ipr, RXICUState, 142), + VMSTATE_UINT8_ARRAY(dmasr, RXICUState, 4), + VMSTATE_UINT16(fir, RXICUState), + VMSTATE_UINT8(nmisr, RXICUState), + VMSTATE_UINT8(nmier, RXICUState), + VMSTATE_UINT8(nmiclr, RXICUState), + VMSTATE_UINT8(nmicr, RXICUState), + VMSTATE_INT16(req_irq, RXICUState), + VMSTATE_END_OF_LIST() + } +}; + +static Property rxicu_properties[] = { + DEFINE_PROP_ARRAY("ipr-map", RXICUState, nr_irqs, map, + qdev_prop_uint8, uint8_t), + DEFINE_PROP_ARRAY("trigger-level", RXICUState, nr_sense, init_sense, + qdev_prop_uint8, uint8_t), + DEFINE_PROP_END_OF_LIST(), +}; + +static void rxicu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = rxicu_realize; + dc->vmsd = &vmstate_rxicu; + device_class_set_props(dc, rxicu_properties); +} + +static const TypeInfo rxicu_info = { + .name = TYPE_RX_ICU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RXICUState), + .instance_init = rxicu_init, + .instance_finalize = rxicu_fini, + .class_init = rxicu_class_init, +}; + +static void rxicu_register_types(void) +{ + type_register_static(&rxicu_info); +} + +type_init(rxicu_register_types) diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c new file mode 100644 index 000000000..74e02858d --- /dev/null +++ b/hw/intc/s390_flic.c @@ -0,0 +1,503 @@ +/* + * QEMU S390x floating interrupt controller (flic) + * + * Copyright 2014 IBM Corp. + * Author(s): Jens Freimann + * Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "hw/s390x/ioinst.h" +#include "hw/s390x/s390_flic.h" +#include "hw/qdev-properties.h" +#include "hw/s390x/css.h" +#include "trace.h" +#include "qapi/error.h" +#include "hw/s390x/s390-virtio-ccw.h" + +S390FLICStateClass *s390_get_flic_class(S390FLICState *fs) +{ + static S390FLICStateClass *class; + + if (!class) { + /* we only have one flic device, so this is fine to cache */ + class = S390_FLIC_COMMON_GET_CLASS(fs); + } + return class; +} + +QEMUS390FLICState *s390_get_qemu_flic(S390FLICState *fs) +{ + static QEMUS390FLICState *flic; + + if (!flic) { + /* we only have one flic device, so this is fine to cache */ + flic = QEMU_S390_FLIC(fs); + } + return flic; +} + +S390FLICState *s390_get_flic(void) +{ + static S390FLICState *fs; + + if (!fs) { + fs = S390_FLIC_COMMON(object_resolve_path_type("", + TYPE_S390_FLIC_COMMON, + NULL)); + } + return fs; +} + +void s390_flic_init(void) +{ + DeviceState *dev; + + if (kvm_enabled()) { + dev = qdev_new(TYPE_KVM_S390_FLIC); + object_property_add_child(qdev_get_machine(), TYPE_KVM_S390_FLIC, + OBJECT(dev)); + } else { + dev = qdev_new(TYPE_QEMU_S390_FLIC); + object_property_add_child(qdev_get_machine(), TYPE_QEMU_S390_FLIC, + OBJECT(dev)); + } + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); +} + +static int qemu_s390_register_io_adapter(S390FLICState *fs, uint32_t id, + uint8_t isc, bool swap, + bool is_maskable, uint8_t flags) +{ + /* nothing to do */ + return 0; +} + +static int qemu_s390_io_adapter_map(S390FLICState *fs, uint32_t id, + uint64_t map_addr, bool do_map) +{ + /* nothing to do */ + return 0; +} + +static int qemu_s390_add_adapter_routes(S390FLICState *fs, + AdapterRoutes *routes) +{ + return -ENOSYS; +} + +static void qemu_s390_release_adapter_routes(S390FLICState *fs, + AdapterRoutes *routes) +{ +} + +static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id, + uint16_t subchannel_nr) +{ + QEMUS390FLICState *flic = s390_get_qemu_flic(fs); + QEMUS390FlicIO *cur, *next; + uint8_t isc; + + g_assert(qemu_mutex_iothread_locked()); + if (!(flic->pending & FLIC_PENDING_IO)) { + return 0; + } + + /* check all iscs */ + for (isc = 0; isc < 8; isc++) { + if (QLIST_EMPTY(&flic->io[isc])) { + continue; + } + + /* search and delete any matching one */ + QLIST_FOREACH_SAFE(cur, &flic->io[isc], next, next) { + if (cur->id == subchannel_id && cur->nr == subchannel_nr) { + QLIST_REMOVE(cur, next); + g_free(cur); + } + } + + /* update our indicator bit */ + if (QLIST_EMPTY(&flic->io[isc])) { + flic->pending &= ~ISC_TO_PENDING_IO(isc); + } + } + return 0; +} + +static int qemu_s390_modify_ais_mode(S390FLICState *fs, uint8_t isc, + uint16_t mode) +{ + QEMUS390FLICState *flic = s390_get_qemu_flic(fs); + + switch (mode) { + case SIC_IRQ_MODE_ALL: + flic->simm &= ~AIS_MODE_MASK(isc); + flic->nimm &= ~AIS_MODE_MASK(isc); + break; + case SIC_IRQ_MODE_SINGLE: + flic->simm |= AIS_MODE_MASK(isc); + flic->nimm &= ~AIS_MODE_MASK(isc); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int qemu_s390_inject_airq(S390FLICState *fs, uint8_t type, + uint8_t isc, uint8_t flags) +{ + QEMUS390FLICState *flic = s390_get_qemu_flic(fs); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + bool flag = flags & S390_ADAPTER_SUPPRESSIBLE; + uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI; + + if (flag && (flic->nimm & AIS_MODE_MASK(isc))) { + trace_qemu_s390_airq_suppressed(type, isc); + return 0; + } + + fsc->inject_io(fs, 0, 0, 0, io_int_word); + + if (flag && (flic->simm & AIS_MODE_MASK(isc))) { + flic->nimm |= AIS_MODE_MASK(isc); + trace_qemu_s390_suppress_airq(isc, "Single-Interruption Mode", + "NO-Interruptions Mode"); + } + + return 0; +} + +static void qemu_s390_flic_notify(uint32_t type) +{ + CPUState *cs; + + /* + * We have to make all CPUs see CPU_INTERRUPT_HARD, so they might + * consider it. We will kick all running CPUs and only relevant + * sleeping ones. + */ + CPU_FOREACH(cs) { + S390CPU *cpu = S390_CPU(cs); + + cs->interrupt_request |= CPU_INTERRUPT_HARD; + + /* ignore CPUs that are not sleeping */ + if (s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING && + s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD) { + continue; + } + + /* we always kick running CPUs for now, this is tricky */ + if (cs->halted) { + /* don't check for subclasses, CPUs double check when waking up */ + if (type & FLIC_PENDING_SERVICE) { + if (!(cpu->env.psw.mask & PSW_MASK_EXT)) { + continue; + } + } else if (type & FLIC_PENDING_IO) { + if (!(cpu->env.psw.mask & PSW_MASK_IO)) { + continue; + } + } else if (type & FLIC_PENDING_MCHK_CR) { + if (!(cpu->env.psw.mask & PSW_MASK_MCHECK)) { + continue; + } + } + } + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + +uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic) +{ + uint32_t tmp; + + g_assert(qemu_mutex_iothread_locked()); + g_assert(flic->pending & FLIC_PENDING_SERVICE); + tmp = flic->service_param; + flic->service_param = 0; + flic->pending &= ~FLIC_PENDING_SERVICE; + + return tmp; +} + +/* caller has to free the returned object */ +QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6) +{ + QEMUS390FlicIO *io; + uint8_t isc; + + g_assert(qemu_mutex_iothread_locked()); + if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) { + return NULL; + } + + for (isc = 0; isc < 8; isc++) { + if (QLIST_EMPTY(&flic->io[isc]) || !(cr6 & ISC_TO_ISC_BITS(isc))) { + continue; + } + io = QLIST_FIRST(&flic->io[isc]); + QLIST_REMOVE(io, next); + + /* update our indicator bit */ + if (QLIST_EMPTY(&flic->io[isc])) { + flic->pending &= ~ISC_TO_PENDING_IO(isc); + } + return io; + } + + return NULL; +} + +void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic) +{ + g_assert(qemu_mutex_iothread_locked()); + g_assert(flic->pending & FLIC_PENDING_MCHK_CR); + flic->pending &= ~FLIC_PENDING_MCHK_CR; +} + +static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm) +{ + QEMUS390FLICState *flic = s390_get_qemu_flic(fs); + + g_assert(qemu_mutex_iothread_locked()); + /* multiplexing is good enough for sclp - kvm does it internally as well */ + flic->service_param |= parm; + flic->pending |= FLIC_PENDING_SERVICE; + + qemu_s390_flic_notify(FLIC_PENDING_SERVICE); +} + +static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id, + uint16_t subchannel_nr, uint32_t io_int_parm, + uint32_t io_int_word) +{ + const uint8_t isc = IO_INT_WORD_ISC(io_int_word); + QEMUS390FLICState *flic = s390_get_qemu_flic(fs); + QEMUS390FlicIO *io; + + g_assert(qemu_mutex_iothread_locked()); + io = g_new0(QEMUS390FlicIO, 1); + io->id = subchannel_id; + io->nr = subchannel_nr; + io->parm = io_int_parm; + io->word = io_int_word; + + QLIST_INSERT_HEAD(&flic->io[isc], io, next); + flic->pending |= ISC_TO_PENDING_IO(isc); + + qemu_s390_flic_notify(ISC_TO_PENDING_IO(isc)); +} + +static void qemu_s390_inject_crw_mchk(S390FLICState *fs) +{ + QEMUS390FLICState *flic = s390_get_qemu_flic(fs); + + g_assert(qemu_mutex_iothread_locked()); + flic->pending |= FLIC_PENDING_MCHK_CR; + + qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR); +} + +bool qemu_s390_flic_has_service(QEMUS390FLICState *flic) +{ + /* called without lock via cc->has_work, will be validated under lock */ + return !!(flic->pending & FLIC_PENDING_SERVICE); +} + +bool qemu_s390_flic_has_io(QEMUS390FLICState *flic, uint64_t cr6) +{ + /* called without lock via cc->has_work, will be validated under lock */ + return !!(flic->pending & CR6_TO_PENDING_IO(cr6)); +} + +bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic) +{ + /* called without lock via cc->has_work, will be validated under lock */ + return !!(flic->pending & FLIC_PENDING_MCHK_CR); +} + +bool qemu_s390_flic_has_any(QEMUS390FLICState *flic) +{ + g_assert(qemu_mutex_iothread_locked()); + return !!flic->pending; +} + +static void qemu_s390_flic_reset(DeviceState *dev) +{ + QEMUS390FLICState *flic = QEMU_S390_FLIC(dev); + QEMUS390FlicIO *cur, *next; + int isc; + + g_assert(qemu_mutex_iothread_locked()); + flic->simm = 0; + flic->nimm = 0; + flic->pending = 0; + + /* remove all pending io interrupts */ + for (isc = 0; isc < 8; isc++) { + QLIST_FOREACH_SAFE(cur, &flic->io[isc], next, next) { + QLIST_REMOVE(cur, next); + g_free(cur); + } + } +} + +bool ais_needed(void *opaque) +{ + S390FLICState *s = opaque; + + return s->ais_supported; +} + +static const VMStateDescription qemu_s390_flic_vmstate = { + .name = "qemu-s390-flic", + .version_id = 1, + .minimum_version_id = 1, + .needed = ais_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(simm, QEMUS390FLICState), + VMSTATE_UINT8(nimm, QEMUS390FLICState), + VMSTATE_END_OF_LIST() + } +}; + +static void qemu_s390_flic_instance_init(Object *obj) +{ + QEMUS390FLICState *flic = QEMU_S390_FLIC(obj); + int isc; + + for (isc = 0; isc < 8; isc++) { + QLIST_INIT(&flic->io[isc]); + } +} + +static void qemu_s390_flic_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc); + + dc->reset = qemu_s390_flic_reset; + dc->vmsd = &qemu_s390_flic_vmstate; + fsc->register_io_adapter = qemu_s390_register_io_adapter; + fsc->io_adapter_map = qemu_s390_io_adapter_map; + fsc->add_adapter_routes = qemu_s390_add_adapter_routes; + fsc->release_adapter_routes = qemu_s390_release_adapter_routes; + fsc->clear_io_irq = qemu_s390_clear_io_flic; + fsc->modify_ais_mode = qemu_s390_modify_ais_mode; + fsc->inject_airq = qemu_s390_inject_airq; + fsc->inject_service = qemu_s390_inject_service; + fsc->inject_io = qemu_s390_inject_io; + fsc->inject_crw_mchk = qemu_s390_inject_crw_mchk; +} + +static Property s390_flic_common_properties[] = { + DEFINE_PROP_UINT32("adapter_routes_max_batch", S390FLICState, + adapter_routes_max_batch, ADAPTER_ROUTES_MAX_GSI), + DEFINE_PROP_END_OF_LIST(), +}; + +static void s390_flic_common_realize(DeviceState *dev, Error **errp) +{ + S390FLICState *fs = S390_FLIC_COMMON(dev); + uint32_t max_batch = fs->adapter_routes_max_batch; + + if (max_batch > ADAPTER_ROUTES_MAX_GSI) { + error_setg(errp, "flic property adapter_routes_max_batch too big" + " (%d > %d)", max_batch, ADAPTER_ROUTES_MAX_GSI); + return; + } + + fs->ais_supported = s390_has_feat(S390_FEAT_ADAPTER_INT_SUPPRESSION); +} + +static void s390_flic_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, s390_flic_common_properties); + dc->realize = s390_flic_common_realize; +} + +static const TypeInfo qemu_s390_flic_info = { + .name = TYPE_QEMU_S390_FLIC, + .parent = TYPE_S390_FLIC_COMMON, + .instance_size = sizeof(QEMUS390FLICState), + .instance_init = qemu_s390_flic_instance_init, + .class_init = qemu_s390_flic_class_init, +}; + + +static const TypeInfo s390_flic_common_info = { + .name = TYPE_S390_FLIC_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(S390FLICState), + .class_init = s390_flic_class_init, + .class_size = sizeof(S390FLICStateClass), +}; + +static void qemu_s390_flic_register_types(void) +{ + type_register_static(&s390_flic_common_info); + type_register_static(&qemu_s390_flic_info); +} + +type_init(qemu_s390_flic_register_types) + +static bool adapter_info_so_needed(void *opaque) +{ + return css_migration_enabled(); +} + +const VMStateDescription vmstate_adapter_info_so = { + .name = "s390_adapter_info/summary_offset", + .version_id = 1, + .minimum_version_id = 1, + .needed = adapter_info_so_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(summary_offset, AdapterInfo), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_adapter_info = { + .name = "s390_adapter_info", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(ind_offset, AdapterInfo), + /* + * We do not have to migrate neither the id nor the addresses. + * The id is set by css_register_io_adapter and the addresses + * are set based on the IndAddr objects after those get mapped. + */ + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_adapter_info_so, + NULL + } +}; + +const VMStateDescription vmstate_adapter_routes = { + + .name = "s390_adapter_routes", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(adapter, AdapterRoutes, 1, vmstate_adapter_info, + AdapterInfo), + VMSTATE_END_OF_LIST() + } +}; diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c new file mode 100644 index 000000000..efe505418 --- /dev/null +++ b/hw/intc/s390_flic_kvm.c @@ -0,0 +1,679 @@ +/* + * QEMU S390x KVM floating interrupt controller (flic) + * + * Copyright 2014 IBM Corp. + * Author(s): Jens Freimann + * Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "kvm/kvm_s390x.h" +#include +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "sysemu/kvm.h" +#include "hw/s390x/s390_flic.h" +#include "hw/s390x/adapter.h" +#include "hw/s390x/css.h" +#include "migration/qemu-file-types.h" +#include "trace.h" +#include "qom/object.h" + +#define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size +#define FLIC_FAILED (-1UL) +#define FLIC_SAVEVM_VERSION 1 + +struct KVMS390FLICState{ + S390FLICState parent_obj; + + uint32_t fd; + bool clear_io_supported; +}; + +static KVMS390FLICState *s390_get_kvm_flic(S390FLICState *fs) +{ + static KVMS390FLICState *flic; + + if (!flic) { + /* we only have one flic device, so this is fine to cache */ + flic = KVM_S390_FLIC(fs); + } + return flic; +} + +/** + * flic_get_all_irqs - store all pending irqs in buffer + * @buf: pointer to buffer which is passed to kernel + * @len: length of buffer + * @flic: pointer to flic device state + * + * Returns: -ENOMEM if buffer is too small, + * -EINVAL if attr.group is invalid, + * -EFAULT if copying to userspace failed, + * on success return number of stored interrupts + */ +static int flic_get_all_irqs(KVMS390FLICState *flic, + void *buf, int len) +{ + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_GET_ALL_IRQS, + .addr = (uint64_t) buf, + .attr = len, + }; + int rc; + + rc = ioctl(flic->fd, KVM_GET_DEVICE_ATTR, &attr); + + return rc == -1 ? -errno : rc; +} + +static void flic_enable_pfault(KVMS390FLICState *flic) +{ + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_APF_ENABLE, + }; + int rc; + + rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + + if (rc) { + fprintf(stderr, "flic: couldn't enable pfault\n"); + } +} + +static void flic_disable_wait_pfault(KVMS390FLICState *flic) +{ + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_APF_DISABLE_WAIT, + }; + int rc; + + rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + + if (rc) { + fprintf(stderr, "flic: couldn't disable pfault\n"); + } +} + +/** flic_enqueue_irqs - returns 0 on success + * @buf: pointer to buffer which is passed to kernel + * @len: length of buffer + * @flic: pointer to flic device state + * + * Returns: -EINVAL if attr.group is unknown + */ +static int flic_enqueue_irqs(void *buf, uint64_t len, + KVMS390FLICState *flic) +{ + int rc; + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_ENQUEUE, + .addr = (uint64_t) buf, + .attr = len, + }; + + rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + + return rc ? -errno : 0; +} + +static void kvm_s390_inject_flic(S390FLICState *fs, struct kvm_s390_irq *irq) +{ + static bool use_flic = true; + int r; + + if (use_flic) { + r = flic_enqueue_irqs(irq, sizeof(*irq), s390_get_kvm_flic(fs)); + if (r == -ENOSYS) { + use_flic = false; + } + if (!r) { + return; + } + } + /* fallback to legacy KVM IOCTL in case FLIC fails */ + kvm_s390_floating_interrupt_legacy(irq); +} + +static void kvm_s390_inject_service(S390FLICState *fs, uint32_t parm) +{ + struct kvm_s390_irq irq = { + .type = KVM_S390_INT_SERVICE, + .u.ext.ext_params = parm, + }; + + kvm_s390_inject_flic(fs, &irq); +} + +static void kvm_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id, + uint16_t subchannel_nr, uint32_t io_int_parm, + uint32_t io_int_word) +{ + struct kvm_s390_irq irq = { + .u.io.subchannel_id = subchannel_id, + .u.io.subchannel_nr = subchannel_nr, + .u.io.io_int_parm = io_int_parm, + .u.io.io_int_word = io_int_word, + }; + + if (io_int_word & IO_INT_WORD_AI) { + irq.type = KVM_S390_INT_IO(1, 0, 0, 0); + } else { + irq.type = KVM_S390_INT_IO(0, (subchannel_id & 0xff00) >> 8, + (subchannel_id & 0x0006), + subchannel_nr); + } + kvm_s390_inject_flic(fs, &irq); +} + +static void kvm_s390_inject_crw_mchk(S390FLICState *fs) +{ + struct kvm_s390_irq irq = { + .type = KVM_S390_MCHK, + .u.mchk.cr14 = CR14_CHANNEL_REPORT_SC, + .u.mchk.mcic = s390_build_validity_mcic() | MCIC_SC_CP, + }; + + kvm_s390_inject_flic(fs, &irq); +} + +static int kvm_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id, + uint16_t subchannel_nr) +{ + KVMS390FLICState *flic = s390_get_kvm_flic(fs); + int rc; + uint32_t sid = subchannel_id << 16 | subchannel_nr; + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_CLEAR_IO_IRQ, + .addr = (uint64_t) &sid, + .attr = sizeof(sid), + }; + if (unlikely(!flic->clear_io_supported)) { + return -ENOSYS; + } + rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + return rc ? -errno : 0; +} + +static int kvm_s390_modify_ais_mode(S390FLICState *fs, uint8_t isc, + uint16_t mode) +{ + KVMS390FLICState *flic = s390_get_kvm_flic(fs); + struct kvm_s390_ais_req req = { + .isc = isc, + .mode = mode, + }; + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_AISM, + .addr = (uint64_t)&req, + }; + + if (!fs->ais_supported) { + return -ENOSYS; + } + + return ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr) ? -errno : 0; +} + +static int kvm_s390_inject_airq(S390FLICState *fs, uint8_t type, + uint8_t isc, uint8_t flags) +{ + KVMS390FLICState *flic = s390_get_kvm_flic(fs); + uint32_t id = css_get_adapter_id(type, isc); + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_AIRQ_INJECT, + .attr = id, + }; + + if (!fs->ais_supported) { + return -ENOSYS; + } + + return ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr) ? -errno : 0; +} + +/** + * __get_all_irqs - store all pending irqs in buffer + * @flic: pointer to flic device state + * @buf: pointer to pointer to a buffer + * @len: length of buffer + * + * Returns: return value of flic_get_all_irqs + * Note: Retry and increase buffer size until flic_get_all_irqs + * either returns a value >= 0 or a negative error code. + * -ENOMEM is an exception, which means the buffer is too small + * and we should try again. Other negative error codes can be + * -EFAULT and -EINVAL which we ignore at this point + */ +static int __get_all_irqs(KVMS390FLICState *flic, + void **buf, int len) +{ + int r; + + do { + /* returns -ENOMEM if buffer is too small and number + * of queued interrupts on success */ + r = flic_get_all_irqs(flic, *buf, len); + if (r >= 0) { + break; + } + len *= 2; + *buf = g_try_realloc(*buf, len); + if (!buf) { + return -ENOMEM; + } + } while (r == -ENOMEM && len <= KVM_S390_FLIC_MAX_BUFFER); + + return r; +} + +static int kvm_s390_register_io_adapter(S390FLICState *fs, uint32_t id, + uint8_t isc, bool swap, + bool is_maskable, uint8_t flags) +{ + struct kvm_s390_io_adapter adapter = { + .id = id, + .isc = isc, + .maskable = is_maskable, + .swap = swap, + .flags = flags, + }; + KVMS390FLICState *flic = KVM_S390_FLIC(fs); + int r; + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_ADAPTER_REGISTER, + .addr = (uint64_t)&adapter, + }; + + if (!kvm_gsi_routing_enabled()) { + /* nothing to do */ + return 0; + } + + r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + + return r ? -errno : 0; +} + +static int kvm_s390_io_adapter_map(S390FLICState *fs, uint32_t id, + uint64_t map_addr, bool do_map) +{ + struct kvm_s390_io_adapter_req req = { + .id = id, + .type = do_map ? KVM_S390_IO_ADAPTER_MAP : KVM_S390_IO_ADAPTER_UNMAP, + .addr = map_addr, + }; + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_ADAPTER_MODIFY, + .addr = (uint64_t)&req, + }; + KVMS390FLICState *flic = s390_get_kvm_flic(fs); + int r; + + if (!kvm_gsi_routing_enabled()) { + /* nothing to do */ + return 0; + } + + r = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + return r ? -errno : 0; +} + +static int kvm_s390_add_adapter_routes(S390FLICState *fs, + AdapterRoutes *routes) +{ + int ret, i; + uint64_t ind_offset = routes->adapter.ind_offset; + + if (!kvm_gsi_routing_enabled()) { + return -ENOSYS; + } + + for (i = 0; i < routes->num_routes; i++) { + ret = kvm_irqchip_add_adapter_route(kvm_state, &routes->adapter); + if (ret < 0) { + goto out_undo; + } + routes->gsi[i] = ret; + routes->adapter.ind_offset++; + } + kvm_irqchip_commit_routes(kvm_state); + + /* Restore passed-in structure to original state. */ + routes->adapter.ind_offset = ind_offset; + return 0; +out_undo: + while (--i >= 0) { + kvm_irqchip_release_virq(kvm_state, routes->gsi[i]); + routes->gsi[i] = -1; + } + routes->adapter.ind_offset = ind_offset; + return ret; +} + +static void kvm_s390_release_adapter_routes(S390FLICState *fs, + AdapterRoutes *routes) +{ + int i; + + if (!kvm_gsi_routing_enabled()) { + return; + } + + for (i = 0; i < routes->num_routes; i++) { + if (routes->gsi[i] >= 0) { + kvm_irqchip_release_virq(kvm_state, routes->gsi[i]); + routes->gsi[i] = -1; + } + } +} + +/** + * kvm_flic_save - Save pending floating interrupts + * @f: QEMUFile containing migration state + * @opaque: pointer to flic device state + * @size: ignored + * + * Note: Pass buf and len to kernel. Start with one page and + * increase until buffer is sufficient or maxium size is + * reached + */ +static int kvm_flic_save(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + KVMS390FLICState *flic = opaque; + int len = FLIC_SAVE_INITIAL_SIZE; + void *buf; + int count; + int r = 0; + + flic_disable_wait_pfault((struct KVMS390FLICState *) opaque); + + buf = g_try_malloc0(len); + if (!buf) { + /* Storing FLIC_FAILED into the count field here will cause the + * target system to fail when attempting to load irqs from the + * migration state */ + error_report("flic: couldn't allocate memory"); + qemu_put_be64(f, FLIC_FAILED); + return -ENOMEM; + } + + count = __get_all_irqs(flic, &buf, len); + if (count < 0) { + error_report("flic: couldn't retrieve irqs from kernel, rc %d", + count); + /* Storing FLIC_FAILED into the count field here will cause the + * target system to fail when attempting to load irqs from the + * migration state */ + qemu_put_be64(f, FLIC_FAILED); + r = count; + } else { + qemu_put_be64(f, count); + qemu_put_buffer(f, (uint8_t *) buf, + count * sizeof(struct kvm_s390_irq)); + } + g_free(buf); + + return r; +} + +/** + * kvm_flic_load - Load pending floating interrupts + * @f: QEMUFile containing migration state + * @opaque: pointer to flic device state + * @size: ignored + * + * Returns: value of flic_enqueue_irqs, -EINVAL on error + * Note: Do nothing when no interrupts where stored + * in QEMUFile + */ +static int kvm_flic_load(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field) +{ + uint64_t len = 0; + uint64_t count = 0; + void *buf = NULL; + int r = 0; + + flic_enable_pfault((struct KVMS390FLICState *) opaque); + + count = qemu_get_be64(f); + len = count * sizeof(struct kvm_s390_irq); + if (count == FLIC_FAILED) { + return -EINVAL; + } + if (count == 0) { + return 0; + } + buf = g_try_malloc0(len); + if (!buf) { + return -ENOMEM; + } + + if (qemu_get_buffer(f, (uint8_t *) buf, len) != len) { + r = -EINVAL; + goto out_free; + } + r = flic_enqueue_irqs(buf, len, (struct KVMS390FLICState *) opaque); + +out_free: + g_free(buf); + return r; +} + +typedef struct KVMS390FLICStateMigTmp { + KVMS390FLICState *parent; + uint8_t simm; + uint8_t nimm; +} KVMS390FLICStateMigTmp; + +static int kvm_flic_ais_pre_save(void *opaque) +{ + KVMS390FLICStateMigTmp *tmp = opaque; + KVMS390FLICState *flic = tmp->parent; + struct kvm_s390_ais_all ais; + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_AISM_ALL, + .addr = (uint64_t)&ais, + .attr = sizeof(ais), + }; + + if (ioctl(flic->fd, KVM_GET_DEVICE_ATTR, &attr)) { + error_report("Failed to retrieve kvm flic ais states"); + return -EINVAL; + } + + tmp->simm = ais.simm; + tmp->nimm = ais.nimm; + + return 0; +} + +static int kvm_flic_ais_post_load(void *opaque, int version_id) +{ + KVMS390FLICStateMigTmp *tmp = opaque; + KVMS390FLICState *flic = tmp->parent; + struct kvm_s390_ais_all ais = { + .simm = tmp->simm, + .nimm = tmp->nimm, + }; + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_AISM_ALL, + .addr = (uint64_t)&ais, + }; + + /* This can happen when the user mis-configures its guests in an + * incompatible fashion or without a CPU model. For example using + * qemu with -cpu host (which is not migration safe) and do a + * migration from a host that has AIS to a host that has no AIS. + * In that case the target system will reject the migration here. + */ + if (!ais_needed(flic)) { + return -ENOSYS; + } + + return ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr) ? -errno : 0; +} + +static const VMStateDescription kvm_s390_flic_ais_tmp = { + .name = "s390-flic-ais-tmp", + .pre_save = kvm_flic_ais_pre_save, + .post_load = kvm_flic_ais_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(simm, KVMS390FLICStateMigTmp), + VMSTATE_UINT8(nimm, KVMS390FLICStateMigTmp), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription kvm_s390_flic_vmstate_ais = { + .name = "s390-flic/ais", + .version_id = 1, + .minimum_version_id = 1, + .needed = ais_needed, + .fields = (VMStateField[]) { + VMSTATE_WITH_TMP(KVMS390FLICState, KVMS390FLICStateMigTmp, + kvm_s390_flic_ais_tmp), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription kvm_s390_flic_vmstate = { + /* should have been like kvm-s390-flic, + * can't change without breaking compat */ + .name = "s390-flic", + .version_id = FLIC_SAVEVM_VERSION, + .minimum_version_id = FLIC_SAVEVM_VERSION, + .fields = (VMStateField[]) { + { + .name = "irqs", + .info = &(const VMStateInfo) { + .name = "irqs", + .get = kvm_flic_load, + .put = kvm_flic_save, + }, + .flags = VMS_SINGLE, + }, + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &kvm_s390_flic_vmstate_ais, + NULL + } +}; + +struct KVMS390FLICStateClass { + S390FLICStateClass parent_class; + DeviceRealize parent_realize; +}; +typedef struct KVMS390FLICStateClass KVMS390FLICStateClass; + +DECLARE_CLASS_CHECKERS(KVMS390FLICStateClass, KVM_S390_FLIC, + TYPE_KVM_S390_FLIC) + + +static void kvm_s390_flic_realize(DeviceState *dev, Error **errp) +{ + KVMS390FLICState *flic_state = KVM_S390_FLIC(dev); + struct kvm_create_device cd = {0}; + struct kvm_device_attr test_attr = {0}; + int ret; + Error *err = NULL; + + KVM_S390_FLIC_GET_CLASS(dev)->parent_realize(dev, &err); + if (err) { + error_propagate(errp, err); + return; + } + flic_state->fd = -1; + + cd.type = KVM_DEV_TYPE_FLIC; + ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd); + if (ret < 0) { + error_setg_errno(errp, errno, "Creating the KVM device failed"); + trace_flic_create_device(errno); + return; + } + flic_state->fd = cd.fd; + + /* Check clear_io_irq support */ + test_attr.group = KVM_DEV_FLIC_CLEAR_IO_IRQ; + flic_state->clear_io_supported = !ioctl(flic_state->fd, + KVM_HAS_DEVICE_ATTR, test_attr); +} + +static void kvm_s390_flic_reset(DeviceState *dev) +{ + KVMS390FLICState *flic = KVM_S390_FLIC(dev); + S390FLICState *fs = S390_FLIC_COMMON(dev); + struct kvm_device_attr attr = { + .group = KVM_DEV_FLIC_CLEAR_IRQS, + }; + int rc = 0; + uint8_t isc; + + if (flic->fd == -1) { + return; + } + + flic_disable_wait_pfault(flic); + + if (fs->ais_supported) { + for (isc = 0; isc <= MAX_ISC; isc++) { + rc = kvm_s390_modify_ais_mode(fs, isc, SIC_IRQ_MODE_ALL); + if (rc) { + error_report("Failed to reset ais mode for isc %d: %s", + isc, strerror(-rc)); + } + } + } + + rc = ioctl(flic->fd, KVM_SET_DEVICE_ATTR, &attr); + if (rc) { + trace_flic_reset_failed(errno); + } + + flic_enable_pfault(flic); +} + +static void kvm_s390_flic_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc); + + KVM_S390_FLIC_CLASS(oc)->parent_realize = dc->realize; + dc->realize = kvm_s390_flic_realize; + dc->vmsd = &kvm_s390_flic_vmstate; + dc->reset = kvm_s390_flic_reset; + fsc->register_io_adapter = kvm_s390_register_io_adapter; + fsc->io_adapter_map = kvm_s390_io_adapter_map; + fsc->add_adapter_routes = kvm_s390_add_adapter_routes; + fsc->release_adapter_routes = kvm_s390_release_adapter_routes; + fsc->clear_io_irq = kvm_s390_clear_io_flic; + fsc->modify_ais_mode = kvm_s390_modify_ais_mode; + fsc->inject_airq = kvm_s390_inject_airq; + fsc->inject_service = kvm_s390_inject_service; + fsc->inject_io = kvm_s390_inject_io; + fsc->inject_crw_mchk = kvm_s390_inject_crw_mchk; +} + +static const TypeInfo kvm_s390_flic_info = { + .name = TYPE_KVM_S390_FLIC, + .parent = TYPE_S390_FLIC_COMMON, + .instance_size = sizeof(KVMS390FLICState), + .class_size = sizeof(KVMS390FLICStateClass), + .class_init = kvm_s390_flic_class_init, +}; + +static void kvm_s390_flic_register_types(void) +{ + type_register_static(&kvm_s390_flic_info); +} + +type_init(kvm_s390_flic_register_types) diff --git a/hw/intc/sh_intc.c b/hw/intc/sh_intc.c new file mode 100644 index 000000000..c9b0b0c1e --- /dev/null +++ b/hw/intc/sh_intc.c @@ -0,0 +1,449 @@ +/* + * SuperH interrupt controller module + * + * Copyright (c) 2007 Magnus Damm + * Based on sh_timer.c and arm_timer.c by Paul Brook + * Copyright (c) 2005-2006 CodeSourcery. + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu.h" +#include "hw/sh4/sh_intc.h" +#include "hw/irq.h" +#include "hw/sh4/sh.h" +#include "trace.h" + +void sh_intc_toggle_source(struct intc_source *source, + int enable_adj, int assert_adj) +{ + int enable_changed = 0; + int pending_changed = 0; + int old_pending; + + if (source->enable_count == source->enable_max && enable_adj == -1) { + enable_changed = -1; + } + source->enable_count += enable_adj; + + if (source->enable_count == source->enable_max) { + enable_changed = 1; + } + source->asserted += assert_adj; + + old_pending = source->pending; + source->pending = source->asserted && + (source->enable_count == source->enable_max); + + if (old_pending != source->pending) { + pending_changed = 1; + } + if (pending_changed) { + if (source->pending) { + source->parent->pending++; + if (source->parent->pending == 1) { + cpu_interrupt(first_cpu, CPU_INTERRUPT_HARD); + } + } else { + source->parent->pending--; + if (source->parent->pending == 0) { + cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD); + } + } + } + + if (enable_changed || assert_adj || pending_changed) { + trace_sh_intc_sources(source->parent->pending, source->asserted, + source->enable_count, source->enable_max, + source->vect, source->asserted ? "asserted " : + assert_adj ? "deasserted" : "", + enable_changed == 1 ? "enabled " : + enable_changed == -1 ? "disabled " : "", + source->pending ? "pending" : ""); + } +} + +static void sh_intc_set_irq(void *opaque, int n, int level) +{ + struct intc_desc *desc = opaque; + struct intc_source *source = &desc->sources[n]; + + if (level && !source->asserted) { + sh_intc_toggle_source(source, 0, 1); + } else if (!level && source->asserted) { + sh_intc_toggle_source(source, 0, -1); + } +} + +int sh_intc_get_pending_vector(struct intc_desc *desc, int imask) +{ + unsigned int i; + + /* slow: use a linked lists of pending sources instead */ + /* wrong: take interrupt priority into account (one list per priority) */ + + if (imask == 0x0f) { + return -1; /* FIXME, update code to include priority per source */ + } + + for (i = 0; i < desc->nr_sources; i++) { + struct intc_source *source = &desc->sources[i]; + + if (source->pending) { + trace_sh_intc_pending(desc->pending, source->vect); + return source->vect; + } + } + g_assert_not_reached(); +} + +typedef enum { + INTC_MODE_NONE, + INTC_MODE_DUAL_SET, + INTC_MODE_DUAL_CLR, + INTC_MODE_ENABLE_REG, + INTC_MODE_MASK_REG, +} SHIntCMode; +#define INTC_MODE_IS_PRIO 0x80 + +static SHIntCMode sh_intc_mode(unsigned long address, unsigned long set_reg, + unsigned long clr_reg) +{ + if (address != A7ADDR(set_reg) && address != A7ADDR(clr_reg)) { + return INTC_MODE_NONE; + } + if (set_reg && clr_reg) { + return address == A7ADDR(set_reg) ? + INTC_MODE_DUAL_SET : INTC_MODE_DUAL_CLR; + } + return set_reg ? INTC_MODE_ENABLE_REG : INTC_MODE_MASK_REG; +} + +static void sh_intc_locate(struct intc_desc *desc, + unsigned long address, + unsigned long **datap, + intc_enum **enums, + unsigned int *first, + unsigned int *width, + unsigned int *modep) +{ + SHIntCMode mode; + unsigned int i; + + /* this is slow but works for now */ + + if (desc->mask_regs) { + for (i = 0; i < desc->nr_mask_regs; i++) { + struct intc_mask_reg *mr = &desc->mask_regs[i]; + + mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg); + if (mode != INTC_MODE_NONE) { + *modep = mode; + *datap = &mr->value; + *enums = mr->enum_ids; + *first = mr->reg_width - 1; + *width = 1; + return; + } + } + } + + if (desc->prio_regs) { + for (i = 0; i < desc->nr_prio_regs; i++) { + struct intc_prio_reg *pr = &desc->prio_regs[i]; + + mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg); + if (mode != INTC_MODE_NONE) { + *modep = mode | INTC_MODE_IS_PRIO; + *datap = &pr->value; + *enums = pr->enum_ids; + *first = pr->reg_width / pr->field_width - 1; + *width = pr->field_width; + return; + } + } + } + g_assert_not_reached(); +} + +static void sh_intc_toggle_mask(struct intc_desc *desc, intc_enum id, + int enable, int is_group) +{ + struct intc_source *source = &desc->sources[id]; + + if (!id) { + return; + } + if (!source->next_enum_id && (!source->enable_max || !source->vect)) { + qemu_log_mask(LOG_UNIMP, + "sh_intc: reserved interrupt source %d modified\n", id); + return; + } + + if (source->vect) { + sh_intc_toggle_source(source, enable ? 1 : -1, 0); + } + + if ((is_group || !source->vect) && source->next_enum_id) { + sh_intc_toggle_mask(desc, source->next_enum_id, enable, 1); + } + + if (!source->vect) { + trace_sh_intc_set(id, !!enable); + } +} + +static uint64_t sh_intc_read(void *opaque, hwaddr offset, unsigned size) +{ + struct intc_desc *desc = opaque; + intc_enum *enum_ids; + unsigned int first; + unsigned int width; + unsigned int mode; + unsigned long *valuep; + + sh_intc_locate(desc, (unsigned long)offset, &valuep, + &enum_ids, &first, &width, &mode); + trace_sh_intc_read(size, (uint64_t)offset, *valuep); + return *valuep; +} + +static void sh_intc_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + struct intc_desc *desc = opaque; + intc_enum *enum_ids; + unsigned int first; + unsigned int width; + unsigned int mode; + unsigned long *valuep; + unsigned int k; + unsigned long mask; + + trace_sh_intc_write(size, (uint64_t)offset, value); + sh_intc_locate(desc, (unsigned long)offset, &valuep, + &enum_ids, &first, &width, &mode); + switch (mode) { + case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO: + break; + case INTC_MODE_DUAL_SET: + value |= *valuep; + break; + case INTC_MODE_DUAL_CLR: + value = *valuep & ~value; + break; + default: + g_assert_not_reached(); + } + + for (k = 0; k <= first; k++) { + mask = (1 << width) - 1; + mask <<= (first - k) * width; + + if ((*valuep & mask) != (value & mask)) { + sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0); + } + } + + *valuep = value; +} + +static const MemoryRegionOps sh_intc_ops = { + .read = sh_intc_read, + .write = sh_intc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void sh_intc_register_source(struct intc_desc *desc, + intc_enum source, + struct intc_group *groups, + int nr_groups) +{ + unsigned int i, k; + intc_enum id; + + if (desc->mask_regs) { + for (i = 0; i < desc->nr_mask_regs; i++) { + struct intc_mask_reg *mr = &desc->mask_regs[i]; + + for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) { + id = mr->enum_ids[k]; + if (id && id == source) { + desc->sources[id].enable_max++; + } + } + } + } + + if (desc->prio_regs) { + for (i = 0; i < desc->nr_prio_regs; i++) { + struct intc_prio_reg *pr = &desc->prio_regs[i]; + + for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) { + id = pr->enum_ids[k]; + if (id && id == source) { + desc->sources[id].enable_max++; + } + } + } + } + + if (groups) { + for (i = 0; i < nr_groups; i++) { + struct intc_group *gr = &groups[i]; + + for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) { + id = gr->enum_ids[k]; + if (id && id == source) { + desc->sources[id].enable_max++; + } + } + } + } + +} + +void sh_intc_register_sources(struct intc_desc *desc, + struct intc_vect *vectors, + int nr_vectors, + struct intc_group *groups, + int nr_groups) +{ + unsigned int i, k; + intc_enum id; + struct intc_source *s; + + for (i = 0; i < nr_vectors; i++) { + struct intc_vect *vect = &vectors[i]; + + sh_intc_register_source(desc, vect->enum_id, groups, nr_groups); + id = vect->enum_id; + if (id) { + s = &desc->sources[id]; + s->vect = vect->vect; + trace_sh_intc_register("source", vect->enum_id, s->vect, + s->enable_count, s->enable_max); + } + } + + if (groups) { + for (i = 0; i < nr_groups; i++) { + struct intc_group *gr = &groups[i]; + + id = gr->enum_id; + s = &desc->sources[id]; + s->next_enum_id = gr->enum_ids[0]; + + for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) { + if (gr->enum_ids[k]) { + id = gr->enum_ids[k - 1]; + s = &desc->sources[id]; + s->next_enum_id = gr->enum_ids[k]; + } + } + trace_sh_intc_register("group", gr->enum_id, 0xffff, + s->enable_count, s->enable_max); + } + } +} + +static unsigned int sh_intc_register(MemoryRegion *sysmem, + struct intc_desc *desc, + const unsigned long address, + const char *type, + const char *action, + const unsigned int index) +{ + char name[60]; + MemoryRegion *iomem, *iomem_p4, *iomem_a7; + + if (!address) { + return 0; + } + + iomem = &desc->iomem; + iomem_p4 = &desc->iomem_aliases[index]; + iomem_a7 = iomem_p4 + 1; + + snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "p4"); + memory_region_init_alias(iomem_p4, NULL, name, iomem, A7ADDR(address), 4); + memory_region_add_subregion(sysmem, P4ADDR(address), iomem_p4); + + snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "a7"); + memory_region_init_alias(iomem_a7, NULL, name, iomem, A7ADDR(address), 4); + memory_region_add_subregion(sysmem, A7ADDR(address), iomem_a7); + + /* used to increment aliases index */ + return 2; +} + +int sh_intc_init(MemoryRegion *sysmem, + struct intc_desc *desc, + int nr_sources, + struct intc_mask_reg *mask_regs, + int nr_mask_regs, + struct intc_prio_reg *prio_regs, + int nr_prio_regs) +{ + unsigned int i, j; + + desc->pending = 0; + desc->nr_sources = nr_sources; + desc->mask_regs = mask_regs; + desc->nr_mask_regs = nr_mask_regs; + desc->prio_regs = prio_regs; + desc->nr_prio_regs = nr_prio_regs; + /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases) */ + desc->iomem_aliases = g_new0(MemoryRegion, + (nr_mask_regs + nr_prio_regs) * 4); + desc->sources = g_new0(struct intc_source, nr_sources); + for (i = 0; i < nr_sources; i++) { + desc->sources[i].parent = desc; + } + desc->irqs = qemu_allocate_irqs(sh_intc_set_irq, desc, nr_sources); + memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc, "intc", + 0x100000000ULL); + j = 0; + if (desc->mask_regs) { + for (i = 0; i < desc->nr_mask_regs; i++) { + struct intc_mask_reg *mr = &desc->mask_regs[i]; + + j += sh_intc_register(sysmem, desc, mr->set_reg, "mask", "set", j); + j += sh_intc_register(sysmem, desc, mr->clr_reg, "mask", "clr", j); + } + } + + if (desc->prio_regs) { + for (i = 0; i < desc->nr_prio_regs; i++) { + struct intc_prio_reg *pr = &desc->prio_regs[i]; + + j += sh_intc_register(sysmem, desc, pr->set_reg, "prio", "set", j); + j += sh_intc_register(sysmem, desc, pr->clr_reg, "prio", "clr", j); + } + } + + return 0; +} + +/* + * Assert level IRL interrupt. + * 0:deassert. 1:lowest priority,... 15:highest priority + */ +void sh_intc_set_irl(void *opaque, int n, int level) +{ + struct intc_source *s = opaque; + int i, irl = level ^ 15; + intc_enum id = s->next_enum_id; + + for (i = 0; id; id = s->next_enum_id, i++) { + s = &s->parent->sources[id]; + if (i == irl) { + sh_intc_toggle_source(s, s->enable_count ? 0 : 1, + s->asserted ? 0 : 1); + } else if (s->asserted) { + sh_intc_toggle_source(s, 0, -1); + } + } +} diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c new file mode 100644 index 000000000..877e76877 --- /dev/null +++ b/hw/intc/sifive_plic.c @@ -0,0 +1,563 @@ +/* + * SiFive PLIC (Platform Level Interrupt Controller) + * + * Copyright (c) 2017 SiFive, Inc. + * + * This provides a parameterizable interrupt controller based on SiFive's PLIC. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "hw/sysbus.h" +#include "hw/pci/msi.h" +#include "hw/qdev-properties.h" +#include "hw/intc/sifive_plic.h" +#include "target/riscv/cpu.h" +#include "migration/vmstate.h" +#include "hw/irq.h" + +#define RISCV_DEBUG_PLIC 0 + +static PLICMode char_to_mode(char c) +{ + switch (c) { + case 'U': return PLICMode_U; + case 'S': return PLICMode_S; + case 'H': return PLICMode_H; + case 'M': return PLICMode_M; + default: + error_report("plic: invalid mode '%c'", c); + exit(1); + } +} + +static char mode_to_char(PLICMode m) +{ + switch (m) { + case PLICMode_U: return 'U'; + case PLICMode_S: return 'S'; + case PLICMode_H: return 'H'; + case PLICMode_M: return 'M'; + default: return '?'; + } +} + +static void sifive_plic_print_state(SiFivePLICState *plic) +{ + int i; + int addrid; + + /* pending */ + qemu_log("pending : "); + for (i = plic->bitfield_words - 1; i >= 0; i--) { + qemu_log("%08x", plic->pending[i]); + } + qemu_log("\n"); + + /* pending */ + qemu_log("claimed : "); + for (i = plic->bitfield_words - 1; i >= 0; i--) { + qemu_log("%08x", plic->claimed[i]); + } + qemu_log("\n"); + + for (addrid = 0; addrid < plic->num_addrs; addrid++) { + qemu_log("hart%d-%c enable: ", + plic->addr_config[addrid].hartid, + mode_to_char(plic->addr_config[addrid].mode)); + for (i = plic->bitfield_words - 1; i >= 0; i--) { + qemu_log("%08x", plic->enable[addrid * plic->bitfield_words + i]); + } + qemu_log("\n"); + } +} + +static uint32_t atomic_set_masked(uint32_t *a, uint32_t mask, uint32_t value) +{ + uint32_t old, new, cmp = qatomic_read(a); + + do { + old = cmp; + new = (old & ~mask) | (value & mask); + cmp = qatomic_cmpxchg(a, old, new); + } while (old != cmp); + + return old; +} + +static void sifive_plic_set_pending(SiFivePLICState *plic, int irq, bool level) +{ + atomic_set_masked(&plic->pending[irq >> 5], 1 << (irq & 31), -!!level); +} + +static void sifive_plic_set_claimed(SiFivePLICState *plic, int irq, bool level) +{ + atomic_set_masked(&plic->claimed[irq >> 5], 1 << (irq & 31), -!!level); +} + +static int sifive_plic_irqs_pending(SiFivePLICState *plic, uint32_t addrid) +{ + int i, j; + for (i = 0; i < plic->bitfield_words; i++) { + uint32_t pending_enabled_not_claimed = + (plic->pending[i] & ~plic->claimed[i]) & + plic->enable[addrid * plic->bitfield_words + i]; + if (!pending_enabled_not_claimed) { + continue; + } + for (j = 0; j < 32; j++) { + int irq = (i << 5) + j; + uint32_t prio = plic->source_priority[irq]; + int enabled = pending_enabled_not_claimed & (1 << j); + if (enabled && prio > plic->target_priority[addrid]) { + return 1; + } + } + } + return 0; +} + +static void sifive_plic_update(SiFivePLICState *plic) +{ + int addrid; + + /* raise irq on harts where this irq is enabled */ + for (addrid = 0; addrid < plic->num_addrs; addrid++) { + uint32_t hartid = plic->addr_config[addrid].hartid; + PLICMode mode = plic->addr_config[addrid].mode; + int level = sifive_plic_irqs_pending(plic, addrid); + + switch (mode) { + case PLICMode_M: + qemu_set_irq(plic->m_external_irqs[hartid - plic->hartid_base], level); + break; + case PLICMode_S: + qemu_set_irq(plic->s_external_irqs[hartid - plic->hartid_base], level); + break; + default: + break; + } + } + + if (RISCV_DEBUG_PLIC) { + sifive_plic_print_state(plic); + } +} + +static uint32_t sifive_plic_claim(SiFivePLICState *plic, uint32_t addrid) +{ + int i, j; + uint32_t max_irq = 0; + uint32_t max_prio = plic->target_priority[addrid]; + + for (i = 0; i < plic->bitfield_words; i++) { + uint32_t pending_enabled_not_claimed = + (plic->pending[i] & ~plic->claimed[i]) & + plic->enable[addrid * plic->bitfield_words + i]; + if (!pending_enabled_not_claimed) { + continue; + } + for (j = 0; j < 32; j++) { + int irq = (i << 5) + j; + uint32_t prio = plic->source_priority[irq]; + int enabled = pending_enabled_not_claimed & (1 << j); + if (enabled && prio > max_prio) { + max_irq = irq; + max_prio = prio; + } + } + } + + if (max_irq) { + sifive_plic_set_pending(plic, max_irq, false); + sifive_plic_set_claimed(plic, max_irq, true); + } + return max_irq; +} + +static uint64_t sifive_plic_read(void *opaque, hwaddr addr, unsigned size) +{ + SiFivePLICState *plic = opaque; + + /* writes must be 4 byte words */ + if ((addr & 0x3) != 0) { + goto err; + } + + if (addr >= plic->priority_base && /* 4 bytes per source */ + addr < plic->priority_base + (plic->num_sources << 2)) + { + uint32_t irq = ((addr - plic->priority_base) >> 2) + 1; + if (RISCV_DEBUG_PLIC) { + qemu_log("plic: read priority: irq=%d priority=%d\n", + irq, plic->source_priority[irq]); + } + return plic->source_priority[irq]; + } else if (addr >= plic->pending_base && /* 1 bit per source */ + addr < plic->pending_base + (plic->num_sources >> 3)) + { + uint32_t word = (addr - plic->pending_base) >> 2; + if (RISCV_DEBUG_PLIC) { + qemu_log("plic: read pending: word=%d value=%d\n", + word, plic->pending[word]); + } + return plic->pending[word]; + } else if (addr >= plic->enable_base && /* 1 bit per source */ + addr < plic->enable_base + plic->num_addrs * plic->enable_stride) + { + uint32_t addrid = (addr - plic->enable_base) / plic->enable_stride; + uint32_t wordid = (addr & (plic->enable_stride - 1)) >> 2; + if (wordid < plic->bitfield_words) { + if (RISCV_DEBUG_PLIC) { + qemu_log("plic: read enable: hart%d-%c word=%d value=%x\n", + plic->addr_config[addrid].hartid, + mode_to_char(plic->addr_config[addrid].mode), wordid, + plic->enable[addrid * plic->bitfield_words + wordid]); + } + return plic->enable[addrid * plic->bitfield_words + wordid]; + } + } else if (addr >= plic->context_base && /* 1 bit per source */ + addr < plic->context_base + plic->num_addrs * plic->context_stride) + { + uint32_t addrid = (addr - plic->context_base) / plic->context_stride; + uint32_t contextid = (addr & (plic->context_stride - 1)); + if (contextid == 0) { + if (RISCV_DEBUG_PLIC) { + qemu_log("plic: read priority: hart%d-%c priority=%x\n", + plic->addr_config[addrid].hartid, + mode_to_char(plic->addr_config[addrid].mode), + plic->target_priority[addrid]); + } + return plic->target_priority[addrid]; + } else if (contextid == 4) { + uint32_t value = sifive_plic_claim(plic, addrid); + if (RISCV_DEBUG_PLIC) { + qemu_log("plic: read claim: hart%d-%c irq=%x\n", + plic->addr_config[addrid].hartid, + mode_to_char(plic->addr_config[addrid].mode), + value); + } + sifive_plic_update(plic); + return value; + } + } + +err: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register read 0x%" HWADDR_PRIx "\n", + __func__, addr); + return 0; +} + +static void sifive_plic_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + SiFivePLICState *plic = opaque; + + /* writes must be 4 byte words */ + if ((addr & 0x3) != 0) { + goto err; + } + + if (addr >= plic->priority_base && /* 4 bytes per source */ + addr < plic->priority_base + (plic->num_sources << 2)) + { + uint32_t irq = ((addr - plic->priority_base) >> 2) + 1; + plic->source_priority[irq] = value & 7; + if (RISCV_DEBUG_PLIC) { + qemu_log("plic: write priority: irq=%d priority=%d\n", + irq, plic->source_priority[irq]); + } + sifive_plic_update(plic); + return; + } else if (addr >= plic->pending_base && /* 1 bit per source */ + addr < plic->pending_base + (plic->num_sources >> 3)) + { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid pending write: 0x%" HWADDR_PRIx "", + __func__, addr); + return; + } else if (addr >= plic->enable_base && /* 1 bit per source */ + addr < plic->enable_base + plic->num_addrs * plic->enable_stride) + { + uint32_t addrid = (addr - plic->enable_base) / plic->enable_stride; + uint32_t wordid = (addr & (plic->enable_stride - 1)) >> 2; + if (wordid < plic->bitfield_words) { + plic->enable[addrid * plic->bitfield_words + wordid] = value; + if (RISCV_DEBUG_PLIC) { + qemu_log("plic: write enable: hart%d-%c word=%d value=%x\n", + plic->addr_config[addrid].hartid, + mode_to_char(plic->addr_config[addrid].mode), wordid, + plic->enable[addrid * plic->bitfield_words + wordid]); + } + return; + } + } else if (addr >= plic->context_base && /* 4 bytes per reg */ + addr < plic->context_base + plic->num_addrs * plic->context_stride) + { + uint32_t addrid = (addr - plic->context_base) / plic->context_stride; + uint32_t contextid = (addr & (plic->context_stride - 1)); + if (contextid == 0) { + if (RISCV_DEBUG_PLIC) { + qemu_log("plic: write priority: hart%d-%c priority=%x\n", + plic->addr_config[addrid].hartid, + mode_to_char(plic->addr_config[addrid].mode), + plic->target_priority[addrid]); + } + if (value <= plic->num_priorities) { + plic->target_priority[addrid] = value; + sifive_plic_update(plic); + } + return; + } else if (contextid == 4) { + if (RISCV_DEBUG_PLIC) { + qemu_log("plic: write claim: hart%d-%c irq=%x\n", + plic->addr_config[addrid].hartid, + mode_to_char(plic->addr_config[addrid].mode), + (uint32_t)value); + } + if (value < plic->num_sources) { + sifive_plic_set_claimed(plic, value, false); + sifive_plic_update(plic); + } + return; + } + } + +err: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register write 0x%" HWADDR_PRIx "\n", + __func__, addr); +} + +static const MemoryRegionOps sifive_plic_ops = { + .read = sifive_plic_read, + .write = sifive_plic_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +/* + * parse PLIC hart/mode address offset config + * + * "M" 1 hart with M mode + * "MS,MS" 2 harts, 0-1 with M and S mode + * "M,MS,MS,MS,MS" 5 harts, 0 with M mode, 1-5 with M and S mode + */ +static void parse_hart_config(SiFivePLICState *plic) +{ + int addrid, hartid, modes; + const char *p; + char c; + + /* count and validate hart/mode combinations */ + addrid = 0, hartid = 0, modes = 0; + p = plic->hart_config; + while ((c = *p++)) { + if (c == ',') { + addrid += ctpop8(modes); + modes = 0; + hartid++; + } else { + int m = 1 << char_to_mode(c); + if (modes == (modes | m)) { + error_report("plic: duplicate mode '%c' in config: %s", + c, plic->hart_config); + exit(1); + } + modes |= m; + } + } + if (modes) { + addrid += ctpop8(modes); + } + hartid++; + + plic->num_addrs = addrid; + plic->num_harts = hartid; + + /* store hart/mode combinations */ + plic->addr_config = g_new(PLICAddr, plic->num_addrs); + addrid = 0, hartid = plic->hartid_base; + p = plic->hart_config; + while ((c = *p++)) { + if (c == ',') { + hartid++; + } else { + plic->addr_config[addrid].addrid = addrid; + plic->addr_config[addrid].hartid = hartid; + plic->addr_config[addrid].mode = char_to_mode(c); + addrid++; + } + } +} + +static void sifive_plic_irq_request(void *opaque, int irq, int level) +{ + SiFivePLICState *s = opaque; + + sifive_plic_set_pending(s, irq, level > 0); + sifive_plic_update(s); +} + +static void sifive_plic_realize(DeviceState *dev, Error **errp) +{ + SiFivePLICState *s = SIFIVE_PLIC(dev); + int i; + + memory_region_init_io(&s->mmio, OBJECT(dev), &sifive_plic_ops, s, + TYPE_SIFIVE_PLIC, s->aperture_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); + + parse_hart_config(s); + + s->bitfield_words = (s->num_sources + 31) >> 5; + s->num_enables = s->bitfield_words * s->num_addrs; + s->source_priority = g_new0(uint32_t, s->num_sources); + s->target_priority = g_new(uint32_t, s->num_addrs); + s->pending = g_new0(uint32_t, s->bitfield_words); + s->claimed = g_new0(uint32_t, s->bitfield_words); + s->enable = g_new0(uint32_t, s->num_enables); + + qdev_init_gpio_in(dev, sifive_plic_irq_request, s->num_sources); + + s->s_external_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts); + qdev_init_gpio_out(dev, s->s_external_irqs, s->num_harts); + + s->m_external_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts); + qdev_init_gpio_out(dev, s->m_external_irqs, s->num_harts); + + /* We can't allow the supervisor to control SEIP as this would allow the + * supervisor to clear a pending external interrupt which will result in + * lost a interrupt in the case a PLIC is attached. The SEIP bit must be + * hardware controlled when a PLIC is attached. + */ + for (i = 0; i < s->num_harts; i++) { + RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i)); + if (riscv_cpu_claim_interrupts(cpu, MIP_SEIP) < 0) { + error_report("SEIP already claimed"); + exit(1); + } + } + + msi_nonbroken = true; +} + +static const VMStateDescription vmstate_sifive_plic = { + .name = "riscv_sifive_plic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VARRAY_UINT32(source_priority, SiFivePLICState, + num_sources, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(target_priority, SiFivePLICState, + num_addrs, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(pending, SiFivePLICState, bitfield_words, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(claimed, SiFivePLICState, bitfield_words, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(enable, SiFivePLICState, num_enables, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_END_OF_LIST() + } +}; + +static Property sifive_plic_properties[] = { + DEFINE_PROP_STRING("hart-config", SiFivePLICState, hart_config), + DEFINE_PROP_UINT32("hartid-base", SiFivePLICState, hartid_base, 0), + DEFINE_PROP_UINT32("num-sources", SiFivePLICState, num_sources, 0), + DEFINE_PROP_UINT32("num-priorities", SiFivePLICState, num_priorities, 0), + DEFINE_PROP_UINT32("priority-base", SiFivePLICState, priority_base, 0), + DEFINE_PROP_UINT32("pending-base", SiFivePLICState, pending_base, 0), + DEFINE_PROP_UINT32("enable-base", SiFivePLICState, enable_base, 0), + DEFINE_PROP_UINT32("enable-stride", SiFivePLICState, enable_stride, 0), + DEFINE_PROP_UINT32("context-base", SiFivePLICState, context_base, 0), + DEFINE_PROP_UINT32("context-stride", SiFivePLICState, context_stride, 0), + DEFINE_PROP_UINT32("aperture-size", SiFivePLICState, aperture_size, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sifive_plic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, sifive_plic_properties); + dc->realize = sifive_plic_realize; + dc->vmsd = &vmstate_sifive_plic; +} + +static const TypeInfo sifive_plic_info = { + .name = TYPE_SIFIVE_PLIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SiFivePLICState), + .class_init = sifive_plic_class_init, +}; + +static void sifive_plic_register_types(void) +{ + type_register_static(&sifive_plic_info); +} + +type_init(sifive_plic_register_types) + +/* + * Create PLIC device. + */ +DeviceState *sifive_plic_create(hwaddr addr, char *hart_config, + uint32_t num_harts, + uint32_t hartid_base, uint32_t num_sources, + uint32_t num_priorities, uint32_t priority_base, + uint32_t pending_base, uint32_t enable_base, + uint32_t enable_stride, uint32_t context_base, + uint32_t context_stride, uint32_t aperture_size) +{ + DeviceState *dev = qdev_new(TYPE_SIFIVE_PLIC); + int i; + + assert(enable_stride == (enable_stride & -enable_stride)); + assert(context_stride == (context_stride & -context_stride)); + qdev_prop_set_string(dev, "hart-config", hart_config); + qdev_prop_set_uint32(dev, "hartid-base", hartid_base); + qdev_prop_set_uint32(dev, "num-sources", num_sources); + qdev_prop_set_uint32(dev, "num-priorities", num_priorities); + qdev_prop_set_uint32(dev, "priority-base", priority_base); + qdev_prop_set_uint32(dev, "pending-base", pending_base); + qdev_prop_set_uint32(dev, "enable-base", enable_base); + qdev_prop_set_uint32(dev, "enable-stride", enable_stride); + qdev_prop_set_uint32(dev, "context-base", context_base); + qdev_prop_set_uint32(dev, "context-stride", context_stride); + qdev_prop_set_uint32(dev, "aperture-size", aperture_size); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + + for (i = 0; i < num_harts; i++) { + CPUState *cpu = qemu_get_cpu(hartid_base + i); + + qdev_connect_gpio_out(dev, i, + qdev_get_gpio_in(DEVICE(cpu), IRQ_S_EXT)); + qdev_connect_gpio_out(dev, num_harts + i, + qdev_get_gpio_in(DEVICE(cpu), IRQ_M_EXT)); + } + + return dev; +} diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c new file mode 100644 index 000000000..f7e59ba64 --- /dev/null +++ b/hw/intc/slavio_intctl.c @@ -0,0 +1,475 @@ +/* + * QEMU Sparc SLAVIO interrupt controller emulation + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "hw/intc/intc.h" +#include "hw/irq.h" +#include "trace.h" +#include "qom/object.h" + +//#define DEBUG_IRQ_COUNT + +/* + * Registers of interrupt controller in sun4m. + * + * This is the interrupt controller part of chip STP2001 (Slave I/O), also + * produced as NCR89C105. See + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C105.txt + * + * There is a system master controller and one for each cpu. + * + */ + +#define MAX_CPUS 16 +#define MAX_PILS 16 + +struct SLAVIO_INTCTLState; + +typedef struct SLAVIO_CPUINTCTLState { + MemoryRegion iomem; + struct SLAVIO_INTCTLState *master; + uint32_t intreg_pending; + uint32_t cpu; + uint32_t irl_out; +} SLAVIO_CPUINTCTLState; + +#define TYPE_SLAVIO_INTCTL "slavio_intctl" +OBJECT_DECLARE_SIMPLE_TYPE(SLAVIO_INTCTLState, SLAVIO_INTCTL) + +struct SLAVIO_INTCTLState { + SysBusDevice parent_obj; + + MemoryRegion iomem; +#ifdef DEBUG_IRQ_COUNT + uint64_t irq_count[32]; +#endif + qemu_irq cpu_irqs[MAX_CPUS][MAX_PILS]; + SLAVIO_CPUINTCTLState slaves[MAX_CPUS]; + uint32_t intregm_pending; + uint32_t intregm_disabled; + uint32_t target_cpu; +}; + +#define INTCTL_MAXADDR 0xf +#define INTCTL_SIZE (INTCTL_MAXADDR + 1) +#define INTCTLM_SIZE 0x14 +#define MASTER_IRQ_MASK ~0x0fa2007f +#define MASTER_DISABLE 0x80000000 +#define CPU_SOFTIRQ_MASK 0xfffe0000 +#define CPU_IRQ_INT15_IN (1 << 15) +#define CPU_IRQ_TIMER_IN (1 << 14) + +static void slavio_check_interrupts(SLAVIO_INTCTLState *s, int set_irqs); + +// per-cpu interrupt controller +static uint64_t slavio_intctl_mem_readl(void *opaque, hwaddr addr, + unsigned size) +{ + SLAVIO_CPUINTCTLState *s = opaque; + uint32_t saddr, ret; + + saddr = addr >> 2; + switch (saddr) { + case 0: + ret = s->intreg_pending; + break; + default: + ret = 0; + break; + } + trace_slavio_intctl_mem_readl(s->cpu, addr, ret); + + return ret; +} + +static void slavio_intctl_mem_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SLAVIO_CPUINTCTLState *s = opaque; + uint32_t saddr; + + saddr = addr >> 2; + trace_slavio_intctl_mem_writel(s->cpu, addr, val); + switch (saddr) { + case 1: // clear pending softints + val &= CPU_SOFTIRQ_MASK | CPU_IRQ_INT15_IN; + s->intreg_pending &= ~val; + slavio_check_interrupts(s->master, 1); + trace_slavio_intctl_mem_writel_clear(s->cpu, val, s->intreg_pending); + break; + case 2: // set softint + val &= CPU_SOFTIRQ_MASK; + s->intreg_pending |= val; + slavio_check_interrupts(s->master, 1); + trace_slavio_intctl_mem_writel_set(s->cpu, val, s->intreg_pending); + break; + default: + break; + } +} + +static const MemoryRegionOps slavio_intctl_mem_ops = { + .read = slavio_intctl_mem_readl, + .write = slavio_intctl_mem_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +// master system interrupt controller +static uint64_t slavio_intctlm_mem_readl(void *opaque, hwaddr addr, + unsigned size) +{ + SLAVIO_INTCTLState *s = opaque; + uint32_t saddr, ret; + + saddr = addr >> 2; + switch (saddr) { + case 0: + ret = s->intregm_pending & ~MASTER_DISABLE; + break; + case 1: + ret = s->intregm_disabled & MASTER_IRQ_MASK; + break; + case 4: + ret = s->target_cpu; + break; + default: + ret = 0; + break; + } + trace_slavio_intctlm_mem_readl(addr, ret); + + return ret; +} + +static void slavio_intctlm_mem_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SLAVIO_INTCTLState *s = opaque; + uint32_t saddr; + + saddr = addr >> 2; + trace_slavio_intctlm_mem_writel(addr, val); + switch (saddr) { + case 2: // clear (enable) + // Force clear unused bits + val &= MASTER_IRQ_MASK; + s->intregm_disabled &= ~val; + trace_slavio_intctlm_mem_writel_enable(val, s->intregm_disabled); + slavio_check_interrupts(s, 1); + break; + case 3: // set (disable; doesn't affect pending) + // Force clear unused bits + val &= MASTER_IRQ_MASK; + s->intregm_disabled |= val; + slavio_check_interrupts(s, 1); + trace_slavio_intctlm_mem_writel_disable(val, s->intregm_disabled); + break; + case 4: + s->target_cpu = val & (MAX_CPUS - 1); + slavio_check_interrupts(s, 1); + trace_slavio_intctlm_mem_writel_target(s->target_cpu); + break; + default: + break; + } +} + +static const MemoryRegionOps slavio_intctlm_mem_ops = { + .read = slavio_intctlm_mem_readl, + .write = slavio_intctlm_mem_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const uint32_t intbit_to_level[] = { + 2, 3, 5, 7, 9, 11, 13, 2, 3, 5, 7, 9, 11, 13, 12, 12, + 6, 13, 4, 10, 8, 9, 11, 0, 0, 0, 0, 15, 15, 15, 15, 0, +}; + +static void slavio_check_interrupts(SLAVIO_INTCTLState *s, int set_irqs) +{ + uint32_t pending = s->intregm_pending, pil_pending; + unsigned int i, j; + + pending &= ~s->intregm_disabled; + + trace_slavio_check_interrupts(pending, s->intregm_disabled); + for (i = 0; i < MAX_CPUS; i++) { + pil_pending = 0; + + /* If we are the current interrupt target, get hard interrupts */ + if (pending && !(s->intregm_disabled & MASTER_DISABLE) && + (i == s->target_cpu)) { + for (j = 0; j < 32; j++) { + if ((pending & (1 << j)) && intbit_to_level[j]) { + pil_pending |= 1 << intbit_to_level[j]; + } + } + } + + /* Calculate current pending hard interrupts for display */ + s->slaves[i].intreg_pending &= CPU_SOFTIRQ_MASK | CPU_IRQ_INT15_IN | + CPU_IRQ_TIMER_IN; + if (i == s->target_cpu) { + for (j = 0; j < 32; j++) { + if ((s->intregm_pending & (1U << j)) && intbit_to_level[j]) { + s->slaves[i].intreg_pending |= 1 << intbit_to_level[j]; + } + } + } + + /* Level 15 and CPU timer interrupts are only masked when + the MASTER_DISABLE bit is set */ + if (!(s->intregm_disabled & MASTER_DISABLE)) { + pil_pending |= s->slaves[i].intreg_pending & + (CPU_IRQ_INT15_IN | CPU_IRQ_TIMER_IN); + } + + /* Add soft interrupts */ + pil_pending |= (s->slaves[i].intreg_pending & CPU_SOFTIRQ_MASK) >> 16; + + if (set_irqs) { + /* Since there is not really an interrupt 0 (and pil_pending + * and irl_out bit zero are thus always zero) there is no need + * to do anything with cpu_irqs[i][0] and it is OK not to do + * the j=0 iteration of this loop. + */ + for (j = MAX_PILS-1; j > 0; j--) { + if (pil_pending & (1 << j)) { + if (!(s->slaves[i].irl_out & (1 << j))) { + qemu_irq_raise(s->cpu_irqs[i][j]); + } + } else { + if (s->slaves[i].irl_out & (1 << j)) { + qemu_irq_lower(s->cpu_irqs[i][j]); + } + } + } + } + s->slaves[i].irl_out = pil_pending; + } +} + +/* + * "irq" here is the bit number in the system interrupt register to + * separate serial and keyboard interrupts sharing a level. + */ +static void slavio_set_irq(void *opaque, int irq, int level) +{ + SLAVIO_INTCTLState *s = opaque; + uint32_t mask = 1 << irq; + uint32_t pil = intbit_to_level[irq]; + unsigned int i; + + trace_slavio_set_irq(s->target_cpu, irq, pil, level); + if (pil > 0) { + if (level) { +#ifdef DEBUG_IRQ_COUNT + s->irq_count[pil]++; +#endif + s->intregm_pending |= mask; + if (pil == 15) { + for (i = 0; i < MAX_CPUS; i++) { + s->slaves[i].intreg_pending |= 1 << pil; + } + } + } else { + s->intregm_pending &= ~mask; + if (pil == 15) { + for (i = 0; i < MAX_CPUS; i++) { + s->slaves[i].intreg_pending &= ~(1 << pil); + } + } + } + slavio_check_interrupts(s, 1); + } +} + +static void slavio_set_timer_irq_cpu(void *opaque, int cpu, int level) +{ + SLAVIO_INTCTLState *s = opaque; + + trace_slavio_set_timer_irq_cpu(cpu, level); + + if (level) { + s->slaves[cpu].intreg_pending |= CPU_IRQ_TIMER_IN; + } else { + s->slaves[cpu].intreg_pending &= ~CPU_IRQ_TIMER_IN; + } + + slavio_check_interrupts(s, 1); +} + +static void slavio_set_irq_all(void *opaque, int irq, int level) +{ + if (irq < 32) { + slavio_set_irq(opaque, irq, level); + } else { + slavio_set_timer_irq_cpu(opaque, irq - 32, level); + } +} + +static int vmstate_intctl_post_load(void *opaque, int version_id) +{ + SLAVIO_INTCTLState *s = opaque; + + slavio_check_interrupts(s, 0); + return 0; +} + +static const VMStateDescription vmstate_intctl_cpu = { + .name ="slavio_intctl_cpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(intreg_pending, SLAVIO_CPUINTCTLState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_intctl = { + .name ="slavio_intctl", + .version_id = 1, + .minimum_version_id = 1, + .post_load = vmstate_intctl_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(slaves, SLAVIO_INTCTLState, MAX_CPUS, 1, + vmstate_intctl_cpu, SLAVIO_CPUINTCTLState), + VMSTATE_UINT32(intregm_pending, SLAVIO_INTCTLState), + VMSTATE_UINT32(intregm_disabled, SLAVIO_INTCTLState), + VMSTATE_UINT32(target_cpu, SLAVIO_INTCTLState), + VMSTATE_END_OF_LIST() + } +}; + +static void slavio_intctl_reset(DeviceState *d) +{ + SLAVIO_INTCTLState *s = SLAVIO_INTCTL(d); + int i; + + for (i = 0; i < MAX_CPUS; i++) { + s->slaves[i].intreg_pending = 0; + s->slaves[i].irl_out = 0; + } + s->intregm_disabled = ~MASTER_IRQ_MASK; + s->intregm_pending = 0; + s->target_cpu = 0; + slavio_check_interrupts(s, 0); +} + +#ifdef DEBUG_IRQ_COUNT +static bool slavio_intctl_get_statistics(InterruptStatsProvider *obj, + uint64_t **irq_counts, + unsigned int *nb_irqs) +{ + SLAVIO_INTCTLState *s = SLAVIO_INTCTL(obj); + *irq_counts = s->irq_count; + *nb_irqs = ARRAY_SIZE(s->irq_count); + return true; +} +#endif + +static void slavio_intctl_print_info(InterruptStatsProvider *obj, Monitor *mon) +{ + SLAVIO_INTCTLState *s = SLAVIO_INTCTL(obj); + int i; + + for (i = 0; i < MAX_CPUS; i++) { + monitor_printf(mon, "per-cpu %d: pending 0x%08x\n", i, + s->slaves[i].intreg_pending); + } + monitor_printf(mon, "master: pending 0x%08x, disabled 0x%08x\n", + s->intregm_pending, s->intregm_disabled); +} + +static void slavio_intctl_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + SLAVIO_INTCTLState *s = SLAVIO_INTCTL(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + unsigned int i, j; + char slave_name[45]; + + qdev_init_gpio_in(dev, slavio_set_irq_all, 32 + MAX_CPUS); + memory_region_init_io(&s->iomem, obj, &slavio_intctlm_mem_ops, s, + "master-interrupt-controller", INTCTLM_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + + for (i = 0; i < MAX_CPUS; i++) { + snprintf(slave_name, sizeof(slave_name), + "slave-interrupt-controller-%i", i); + for (j = 0; j < MAX_PILS; j++) { + sysbus_init_irq(sbd, &s->cpu_irqs[i][j]); + } + memory_region_init_io(&s->slaves[i].iomem, OBJECT(s), + &slavio_intctl_mem_ops, + &s->slaves[i], slave_name, INTCTL_SIZE); + sysbus_init_mmio(sbd, &s->slaves[i].iomem); + s->slaves[i].cpu = i; + s->slaves[i].master = s; + } +} + +static void slavio_intctl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass); + + dc->reset = slavio_intctl_reset; + dc->vmsd = &vmstate_intctl; +#ifdef DEBUG_IRQ_COUNT + ic->get_statistics = slavio_intctl_get_statistics; +#endif + ic->print_info = slavio_intctl_print_info; +} + +static const TypeInfo slavio_intctl_info = { + .name = TYPE_SLAVIO_INTCTL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SLAVIO_INTCTLState), + .instance_init = slavio_intctl_init, + .class_init = slavio_intctl_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_INTERRUPT_STATS_PROVIDER }, + { } + }, +}; + +static void slavio_intctl_register_types(void) +{ + type_register_static(&slavio_intctl_info); +} + +type_init(slavio_intctl_register_types) diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c new file mode 100644 index 000000000..4ec659b93 --- /dev/null +++ b/hw/intc/spapr_xive.c @@ -0,0 +1,1830 @@ +/* + * QEMU PowerPC sPAPR XIVE interrupt controller model + * + * Copyright (c) 2017-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "sysemu/reset.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "hw/ppc/fdt.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_cpu_core.h" +#include "hw/ppc/spapr_xive.h" +#include "hw/ppc/xive.h" +#include "hw/ppc/xive_regs.h" +#include "hw/qdev-properties.h" +#include "trace.h" + +/* + * XIVE Virtualization Controller BAR and Thread Managment BAR that we + * use for the ESB pages and the TIMA pages + */ +#define SPAPR_XIVE_VC_BASE 0x0006010000000000ull +#define SPAPR_XIVE_TM_BASE 0x0006030203180000ull + +/* + * The allocation of VP blocks is a complex operation in OPAL and the + * VP identifiers have a relation with the number of HW chips, the + * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE + * controller model does not have the same constraints and can use a + * simple mapping scheme of the CPU vcpu_id + * + * These identifiers are never returned to the OS. + */ + +#define SPAPR_XIVE_NVT_BASE 0x400 + +/* + * sPAPR NVT and END indexing helpers + */ +static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx) +{ + return nvt_idx - SPAPR_XIVE_NVT_BASE; +} + +static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu, + uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) +{ + assert(cpu); + + if (out_nvt_blk) { + *out_nvt_blk = SPAPR_XIVE_BLOCK_ID; + } + + if (out_nvt_blk) { + *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id; + } +} + +static int spapr_xive_target_to_nvt(uint32_t target, + uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) +{ + PowerPCCPU *cpu = spapr_find_cpu(target); + + if (!cpu) { + return -1; + } + + spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx); + return 0; +} + +/* + * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8 + * priorities per CPU + */ +int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx, + uint32_t *out_server, uint8_t *out_prio) +{ + + assert(end_blk == SPAPR_XIVE_BLOCK_ID); + + if (out_server) { + *out_server = end_idx >> 3; + } + + if (out_prio) { + *out_prio = end_idx & 0x7; + } + return 0; +} + +static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio, + uint8_t *out_end_blk, uint32_t *out_end_idx) +{ + assert(cpu); + + if (out_end_blk) { + *out_end_blk = SPAPR_XIVE_BLOCK_ID; + } + + if (out_end_idx) { + *out_end_idx = (cpu->vcpu_id << 3) + prio; + } +} + +static int spapr_xive_target_to_end(uint32_t target, uint8_t prio, + uint8_t *out_end_blk, uint32_t *out_end_idx) +{ + PowerPCCPU *cpu = spapr_find_cpu(target); + + if (!cpu) { + return -1; + } + + spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx); + return 0; +} + +/* + * On sPAPR machines, use a simplified output for the XIVE END + * structure dumping only the information related to the OS EQ. + */ +static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end, + Monitor *mon) +{ + uint64_t qaddr_base = xive_end_qaddr(end); + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qentries = 1 << (qsize + 10); + uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); + uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); + + monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d", + spapr_xive_nvt_to_target(0, nvt), + priority, qindex, qentries, qaddr_base, qgen); + + xive_end_queue_pic_print_info(end, 6, mon); +} + +/* + * kvm_irqchip_in_kernel() will cause the compiler to turn this + * info a nop if CONFIG_KVM isn't defined. + */ +#define spapr_xive_in_kernel(xive) \ + (kvm_irqchip_in_kernel() && (xive)->fd != -1) + +static void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon) +{ + XiveSource *xsrc = &xive->source; + int i; + + if (spapr_xive_in_kernel(xive)) { + Error *local_err = NULL; + + kvmppc_xive_synchronize_state(xive, &local_err); + if (local_err) { + error_report_err(local_err); + return; + } + } + + monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n"); + + for (i = 0; i < xive->nr_irqs; i++) { + uint8_t pq = xive_source_esb_get(xsrc, i); + XiveEAS *eas = &xive->eat[i]; + + if (!xive_eas_is_valid(eas)) { + continue; + } + + monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i, + xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + xive_source_is_asserted(xsrc, i) ? 'A' : ' ', + xive_eas_is_masked(eas) ? "M" : " ", + (int) xive_get_field64(EAS_END_DATA, eas->w)); + + if (!xive_eas_is_masked(eas)) { + uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); + XiveEND *end; + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + if (xive_end_is_valid(end)) { + spapr_xive_end_pic_print_info(xive, end, mon); + } + } + monitor_printf(mon, "\n"); + } +} + +void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable) +{ + memory_region_set_enabled(&xive->source.esb_mmio, enable); + memory_region_set_enabled(&xive->tm_mmio, enable); + + /* Disable the END ESBs until a guest OS makes use of them */ + memory_region_set_enabled(&xive->end_source.esb_mmio, false); +} + +static void spapr_xive_tm_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx; + + xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size); +} + +static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size) +{ + XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx; + + return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size); +} + +const MemoryRegionOps spapr_xive_tm_ops = { + .read = spapr_xive_tm_read, + .write = spapr_xive_tm_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static void spapr_xive_end_reset(XiveEND *end) +{ + memset(end, 0, sizeof(*end)); + + /* switch off the escalation and notification ESBs */ + end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q); +} + +static void spapr_xive_reset(void *dev) +{ + SpaprXive *xive = SPAPR_XIVE(dev); + int i; + + /* + * The XiveSource has its own reset handler, which mask off all + * IRQs (!P|Q) + */ + + /* Mask all valid EASs in the IRQ number space. */ + for (i = 0; i < xive->nr_irqs; i++) { + XiveEAS *eas = &xive->eat[i]; + if (xive_eas_is_valid(eas)) { + eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED); + } else { + eas->w = 0; + } + } + + /* Clear all ENDs */ + for (i = 0; i < xive->nr_ends; i++) { + spapr_xive_end_reset(&xive->endt[i]); + } +} + +static void spapr_xive_instance_init(Object *obj) +{ + SpaprXive *xive = SPAPR_XIVE(obj); + + object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE); + + object_initialize_child(obj, "end_source", &xive->end_source, + TYPE_XIVE_END_SOURCE); + + /* Not connected to the KVM XIVE device */ + xive->fd = -1; +} + +static void spapr_xive_realize(DeviceState *dev, Error **errp) +{ + SpaprXive *xive = SPAPR_XIVE(dev); + SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive); + XiveSource *xsrc = &xive->source; + XiveENDSource *end_xsrc = &xive->end_source; + Error *local_err = NULL; + + /* Set by spapr_irq_init() */ + g_assert(xive->nr_irqs); + g_assert(xive->nr_ends); + + sxc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* + * Initialize the internal sources, for IPIs and virtual devices. + */ + object_property_set_int(OBJECT(xsrc), "nr-irqs", xive->nr_irqs, + &error_fatal); + object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort); + if (!qdev_realize(DEVICE(xsrc), NULL, errp)) { + return; + } + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio); + + /* + * Initialize the END ESB source + */ + object_property_set_int(OBJECT(end_xsrc), "nr-ends", xive->nr_irqs, + &error_fatal); + object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive), + &error_abort); + if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) { + return; + } + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio); + + /* Set the mapping address of the END ESB pages after the source ESBs */ + xive->end_base = xive->vc_base + xive_source_esb_len(xsrc); + + /* + * Allocate the routing tables + */ + xive->eat = g_new0(XiveEAS, xive->nr_irqs); + xive->endt = g_new0(XiveEND, xive->nr_ends); + + xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64, + xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT)); + + qemu_register_reset(spapr_xive_reset, dev); + + /* TIMA initialization */ + memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops, + xive, "xive.tima", 4ull << TM_SHIFT); + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio); + + /* + * Map all regions. These will be enabled or disabled at reset and + * can also be overridden by KVM memory regions if active + */ + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base); + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base); + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base); +} + +static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk, + uint32_t eas_idx, XiveEAS *eas) +{ + SpaprXive *xive = SPAPR_XIVE(xrtr); + + if (eas_idx >= xive->nr_irqs) { + return -1; + } + + *eas = xive->eat[eas_idx]; + return 0; +} + +static int spapr_xive_get_end(XiveRouter *xrtr, + uint8_t end_blk, uint32_t end_idx, XiveEND *end) +{ + SpaprXive *xive = SPAPR_XIVE(xrtr); + + if (end_idx >= xive->nr_ends) { + return -1; + } + + memcpy(end, &xive->endt[end_idx], sizeof(XiveEND)); + return 0; +} + +static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk, + uint32_t end_idx, XiveEND *end, + uint8_t word_number) +{ + SpaprXive *xive = SPAPR_XIVE(xrtr); + + if (end_idx >= xive->nr_ends) { + return -1; + } + + memcpy(&xive->endt[end_idx], end, sizeof(XiveEND)); + return 0; +} + +static int spapr_xive_get_nvt(XiveRouter *xrtr, + uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt) +{ + uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); + PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); + + if (!cpu) { + /* TODO: should we assert() if we can find a NVT ? */ + return -1; + } + + /* + * sPAPR does not maintain a NVT table. Return that the NVT is + * valid if we have found a matching CPU + */ + nvt->w0 = cpu_to_be32(NVT_W0_VALID); + return 0; +} + +static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, + uint32_t nvt_idx, XiveNVT *nvt, + uint8_t word_number) +{ + /* + * We don't need to write back to the NVTs because the sPAPR + * machine should never hit a non-scheduled NVT. It should never + * get called. + */ + g_assert_not_reached(); +} + +static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) +{ + CPUState *cs; + int count = 0; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx; + int ring; + + /* + * Skip partially initialized vCPUs. This can happen when + * vCPUs are hotplugged. + */ + if (!tctx) { + continue; + } + + /* + * Check the thread context CAM lines and record matches. + */ + ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx, + cam_ignore, logic_serv); + /* + * Save the matching thread interrupt context and follow on to + * check for duplicates which are invalid. + */ + if (ring != -1) { + if (match->tctx) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " + "context NVT %x/%x\n", nvt_blk, nvt_idx); + return -1; + } + + match->ring = ring; + match->tctx = tctx; + count++; + } + } + + return count; +} + +static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr) +{ + return SPAPR_XIVE_BLOCK_ID; +} + +static const VMStateDescription vmstate_spapr_xive_end = { + .name = TYPE_SPAPR_XIVE "/end", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField []) { + VMSTATE_UINT32(w0, XiveEND), + VMSTATE_UINT32(w1, XiveEND), + VMSTATE_UINT32(w2, XiveEND), + VMSTATE_UINT32(w3, XiveEND), + VMSTATE_UINT32(w4, XiveEND), + VMSTATE_UINT32(w5, XiveEND), + VMSTATE_UINT32(w6, XiveEND), + VMSTATE_UINT32(w7, XiveEND), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_spapr_xive_eas = { + .name = TYPE_SPAPR_XIVE "/eas", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField []) { + VMSTATE_UINT64(w, XiveEAS), + VMSTATE_END_OF_LIST() + }, +}; + +static int vmstate_spapr_xive_pre_save(void *opaque) +{ + SpaprXive *xive = SPAPR_XIVE(opaque); + + if (spapr_xive_in_kernel(xive)) { + return kvmppc_xive_pre_save(xive); + } + + return 0; +} + +/* + * Called by the sPAPR IRQ backend 'post_load' method at the machine + * level. + */ +static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + + if (spapr_xive_in_kernel(xive)) { + return kvmppc_xive_post_load(xive, version_id); + } + + return 0; +} + +static const VMStateDescription vmstate_spapr_xive = { + .name = TYPE_SPAPR_XIVE, + .version_id = 1, + .minimum_version_id = 1, + .pre_save = vmstate_spapr_xive_pre_save, + .post_load = NULL, /* handled at the machine level */ + .fields = (VMStateField[]) { + VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs, + vmstate_spapr_xive_eas, XiveEAS), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends, + vmstate_spapr_xive_end, XiveEND), + VMSTATE_END_OF_LIST() + }, +}; + +static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn, + bool lsi, Error **errp) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + XiveSource *xsrc = &xive->source; + + assert(lisn < xive->nr_irqs); + + trace_spapr_xive_claim_irq(lisn, lsi); + + if (xive_eas_is_valid(&xive->eat[lisn])) { + error_setg(errp, "IRQ %d is not free", lisn); + return -EBUSY; + } + + /* + * Set default values when allocating an IRQ number + */ + xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED); + if (lsi) { + xive_source_irq_set_lsi(xsrc, lisn); + } + + if (spapr_xive_in_kernel(xive)) { + return kvmppc_xive_source_reset_one(xsrc, lisn, errp); + } + + return 0; +} + +static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + assert(lisn < xive->nr_irqs); + + trace_spapr_xive_free_irq(lisn); + + xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID); +} + +static Property spapr_xive_properties[] = { + DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0), + DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0), + DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE), + DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE), + DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7), + DEFINE_PROP_END_OF_LIST(), +}; + +static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc, + PowerPCCPU *cpu, Error **errp) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + Object *obj; + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp); + if (!obj) { + return -1; + } + + spapr_cpu->tctx = XIVE_TCTX(obj); + return 0; +} + +static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam) +{ + uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam); + memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); +} + +static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc, + PowerPCCPU *cpu) +{ + XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx; + uint8_t nvt_blk; + uint32_t nvt_idx; + + xive_tctx_reset(tctx); + + /* + * When a Virtual Processor is scheduled to run on a HW thread, + * the hypervisor pushes its identifier in the OS CAM line. + * Emulate the same behavior under QEMU. + */ + spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx); + + xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx)); +} + +static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc, + PowerPCCPU *cpu) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + xive_tctx_destroy(spapr_cpu->tctx); + spapr_cpu->tctx = NULL; +} + +static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + + trace_spapr_xive_set_irq(irq, val); + + if (spapr_xive_in_kernel(xive)) { + kvmppc_xive_source_set_irq(&xive->source, irq, val); + } else { + xive_source_set_irq(&xive->source, irq, val); + } +} + +static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon); + } + + spapr_xive_pic_print_info(xive, mon); +} + +static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers, + void *fdt, uint32_t phandle) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + int node; + uint64_t timas[2 * 2]; + /* Interrupt number ranges for the IPIs */ + uint32_t lisn_ranges[] = { + cpu_to_be32(SPAPR_IRQ_IPI), + cpu_to_be32(SPAPR_IRQ_IPI + nr_servers), + }; + /* + * EQ size - the sizes of pages supported by the system 4K, 64K, + * 2M, 16M. We only advertise 64K for the moment. + */ + uint32_t eq_sizes[] = { + cpu_to_be32(16), /* 64K */ + }; + /* + * QEMU/KVM only needs to define a single range to reserve the + * escalation priority. A priority bitmask would have been more + * appropriate. + */ + uint32_t plat_res_int_priorities[] = { + cpu_to_be32(xive->hv_prio), /* start */ + cpu_to_be32(0xff - xive->hv_prio), /* count */ + }; + + /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */ + timas[0] = cpu_to_be64(xive->tm_base + + XIVE_TM_USER_PAGE * (1ull << TM_SHIFT)); + timas[1] = cpu_to_be64(1ull << TM_SHIFT); + timas[2] = cpu_to_be64(xive->tm_base + + XIVE_TM_OS_PAGE * (1ull << TM_SHIFT)); + timas[3] = cpu_to_be64(1ull << TM_SHIFT); + + _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename)); + + _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe")); + _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas))); + + _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe")); + _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes, + sizeof(eq_sizes))); + _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges, + sizeof(lisn_ranges))); + + /* For Linux to link the LSIs to the interrupt controller. */ + _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); + _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); + + /* For SLOF */ + _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle)); + _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle)); + + /* + * The "ibm,plat-res-int-priorities" property defines the priority + * ranges reserved by the hypervisor + */ + _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities", + plat_res_int_priorities, sizeof(plat_res_int_priorities))); +} + +static int spapr_xive_activate(SpaprInterruptController *intc, + uint32_t nr_servers, Error **errp) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + + if (kvm_enabled()) { + int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers, + errp); + if (rc < 0) { + return rc; + } + } + + /* Activate the XIVE MMIOs */ + spapr_xive_mmio_set_enabled(xive, true); + + return 0; +} + +static void spapr_xive_deactivate(SpaprInterruptController *intc) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + + spapr_xive_mmio_set_enabled(xive, false); + + if (spapr_xive_in_kernel(xive)) { + kvmppc_xive_disconnect(intc); + } +} + +static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr) +{ + return spapr_xive_in_kernel(SPAPR_XIVE(xptr)); +} + +static void spapr_xive_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); + SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass); + XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass); + SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass); + + dc->desc = "sPAPR XIVE Interrupt Controller"; + device_class_set_props(dc, spapr_xive_properties); + device_class_set_parent_realize(dc, spapr_xive_realize, + &sxc->parent_realize); + dc->vmsd = &vmstate_spapr_xive; + + xrc->get_eas = spapr_xive_get_eas; + xrc->get_end = spapr_xive_get_end; + xrc->write_end = spapr_xive_write_end; + xrc->get_nvt = spapr_xive_get_nvt; + xrc->write_nvt = spapr_xive_write_nvt; + xrc->get_block_id = spapr_xive_get_block_id; + + sicc->activate = spapr_xive_activate; + sicc->deactivate = spapr_xive_deactivate; + sicc->cpu_intc_create = spapr_xive_cpu_intc_create; + sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset; + sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy; + sicc->claim_irq = spapr_xive_claim_irq; + sicc->free_irq = spapr_xive_free_irq; + sicc->set_irq = spapr_xive_set_irq; + sicc->print_info = spapr_xive_print_info; + sicc->dt = spapr_xive_dt; + sicc->post_load = spapr_xive_post_load; + + xpc->match_nvt = spapr_xive_match_nvt; + xpc->in_kernel = spapr_xive_in_kernel_xptr; +} + +static const TypeInfo spapr_xive_info = { + .name = TYPE_SPAPR_XIVE, + .parent = TYPE_XIVE_ROUTER, + .instance_init = spapr_xive_instance_init, + .instance_size = sizeof(SpaprXive), + .class_init = spapr_xive_class_init, + .class_size = sizeof(SpaprXiveClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_SPAPR_INTC }, + { } + }, +}; + +static void spapr_xive_register_types(void) +{ + type_register_static(&spapr_xive_info); +} + +type_init(spapr_xive_register_types) + +/* + * XIVE hcalls + * + * The terminology used by the XIVE hcalls is the following : + * + * TARGET vCPU number + * EQ Event Queue assigned by OS to receive event data + * ESB page for source interrupt management + * LISN Logical Interrupt Source Number identifying a source in the + * machine + * EISN Effective Interrupt Source Number used by guest OS to + * identify source in the guest + * + * The EAS, END, NVT structures are not exposed. + */ + +/* + * On POWER9, the KVM XIVE device uses priority 7 for the escalation + * interrupts. So we only allow the guest to use priorities [0..6]. + */ +static bool spapr_xive_priority_is_reserved(SpaprXive *xive, uint8_t priority) +{ + return priority >= xive->hv_prio; +} + +/* + * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical + * real address of the MMIO page through which the Event State Buffer + * entry associated with the value of the "lisn" parameter is managed. + * + * Parameters: + * Input + * - R4: "flags" + * Bits 0-63 reserved + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as returned + * by the H_ALLOCATE_VAS_WINDOW hcall + * + * Output + * - R4: "flags" + * Bits 0-59: Reserved + * Bit 60: H_INT_ESB must be used for Event State Buffer + * management + * Bit 61: 1 == LSI 0 == MSI + * Bit 62: the full function page supports trigger + * Bit 63: Store EOI Supported + * - R5: Logical Real address of full function Event State Buffer + * management page, -1 if H_INT_ESB hcall flag is set to 1. + * - R6: Logical Real Address of trigger only Event State Buffer + * management page or -1. + * - R7: Power of 2 page size for the ESB management pages returned in + * R5 and R6. + */ + +#define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */ +#define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */ +#define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management + on same page */ +#define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */ + +static target_ulong h_int_get_source_info(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + SpaprXive *xive = spapr->xive; + XiveSource *xsrc = &xive->source; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + + trace_spapr_xive_get_source_info(flags, lisn); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + if (!xive_eas_is_valid(&xive->eat[lisn])) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* + * All sources are emulated under the main XIVE object and share + * the same characteristics. + */ + args[0] = 0; + if (!xive_source_esb_has_2page(xsrc)) { + args[0] |= SPAPR_XIVE_SRC_TRIGGER; + } + if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) { + args[0] |= SPAPR_XIVE_SRC_STORE_EOI; + } + + /* + * Force the use of the H_INT_ESB hcall in case of an LSI + * interrupt. This is necessary under KVM to re-trigger the + * interrupt if the level is still asserted + */ + if (xive_source_irq_is_lsi(xsrc, lisn)) { + args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI; + } + + if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { + args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn); + } else { + args[1] = -1; + } + + if (xive_source_esb_has_2page(xsrc) && + !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { + args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn); + } else { + args[2] = -1; + } + + if (xive_source_esb_has_2page(xsrc)) { + args[3] = xsrc->esb_shift - 1; + } else { + args[3] = xsrc->esb_shift; + } + + return H_SUCCESS; +} + +/* + * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical + * Interrupt Source to a target. The Logical Interrupt Source is + * designated with the "lisn" parameter and the target is designated + * with the "target" and "priority" parameters. Upon return from the + * hcall(), no additional interrupts will be directed to the old EQ. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-61: Reserved + * Bit 62: set the "eisn" in the EAS + * Bit 63: masks the interrupt source in the hardware interrupt + * control structure. An interrupt masked by this mechanism will + * be dropped, but it's source state bits will still be + * set. There is no race-free way of unmasking and restoring the + * source. Thus this should only be used in interrupts that are + * also masked at the source, and only in cases where the + * interrupt is not meant to be used for a large amount of time + * because no valid target exists for it for example + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as returned by + * the H_ALLOCATE_VAS_WINDOW hcall + * - R6: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R7: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * - R8: "eisn" is the guest EISN associated with the "lisn" + * + * Output: + * - None + */ + +#define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62) +#define SPAPR_XIVE_SRC_MASK PPC_BIT(63) + +static target_ulong h_int_set_source_config(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + SpaprXive *xive = spapr->xive; + XiveEAS eas, new_eas; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + target_ulong target = args[2]; + target_ulong priority = args[3]; + target_ulong eisn = args[4]; + uint8_t end_blk; + uint32_t end_idx; + + trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* priority 0xff is used to reset the EAS */ + if (priority == 0xff) { + new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED); + goto out; + } + + if (flags & SPAPR_XIVE_SRC_MASK) { + new_eas.w = eas.w | cpu_to_be64(EAS_MASKED); + } else { + new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED); + } + + if (spapr_xive_priority_is_reserved(xive, priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P4; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P3; + } + + new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk); + new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx); + + if (flags & SPAPR_XIVE_SRC_SET_EISN) { + new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn); + } + + if (spapr_xive_in_kernel(xive)) { + Error *local_err = NULL; + + kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err); + if (local_err) { + error_report_err(local_err); + return H_HARDWARE; + } + } + +out: + xive->eat[lisn] = new_eas; + return H_SUCCESS; +} + +/* + * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which + * target/priority pair is assigned to the specified Logical Interrupt + * Source. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63 Reserved + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as + * returned by the H_ALLOCATE_VAS_WINDOW hcall + * + * Output: + * - R4: Target to which the specified Logical Interrupt Source is + * assigned + * - R5: Priority to which the specified Logical Interrupt Source is + * assigned + * - R6: EISN for the specified Logical Interrupt Source (this will be + * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG) + */ +static target_ulong h_int_get_source_config(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + SpaprXive *xive = spapr->xive; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + XiveEAS eas; + XiveEND *end; + uint8_t nvt_blk; + uint32_t end_idx, nvt_idx; + + trace_spapr_xive_get_source_config(flags, lisn); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* EAS_END_BLOCK is unused on sPAPR */ + end_idx = xive_get_field64(EAS_END_INDEX, eas.w); + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); + nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); + args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); + + if (xive_eas_is_masked(&eas)) { + args[1] = 0xff; + } else { + args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7); + } + + args[2] = xive_get_field64(EAS_END_DATA, eas.w); + + return H_SUCCESS; +} + +/* + * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real + * address of the notification management page associated with the + * specified target and priority. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63 Reserved + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * + * Output: + * - R4: Logical real address of notification page + * - R5: Power of 2 page size of the notification page + */ +static target_ulong h_int_get_queue_info(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + SpaprXive *xive = spapr->xive; + XiveENDSource *end_xsrc = &xive->end_source; + target_ulong flags = args[0]; + target_ulong target = args[1]; + target_ulong priority = args[2]; + XiveEND *end; + uint8_t end_blk; + uint32_t end_idx; + + trace_spapr_xive_get_queue_info(flags, target, priority); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + if (spapr_xive_priority_is_reserved(xive, priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P3; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P2; + } + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx; + if (xive_end_is_enqueue(end)) { + args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; + } else { + args[1] = 0; + } + + return H_SUCCESS; +} + +/* + * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for + * a given "target" and "priority". It is also used to set the + * notification config associated with the EQ. An EQ size of 0 is + * used to reset the EQ config for a given target and priority. If + * resetting the EQ config, the END associated with the given "target" + * and "priority" will be changed to disable queueing. + * + * Upon return from the hcall(), no additional interrupts will be + * directed to the old EQ (if one was set). The old EQ (if one was + * set) should be investigated for interrupts that occurred prior to + * or during the hcall(). + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-62: Reserved + * Bit 63: Unconditional Notify (n) per the XIVE spec + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * - R7: "eventQueue": The logical real address of the start of the EQ + * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes" + * + * Output: + * - None + */ + +#define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63) + +static target_ulong h_int_set_queue_config(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + SpaprXive *xive = spapr->xive; + target_ulong flags = args[0]; + target_ulong target = args[1]; + target_ulong priority = args[2]; + target_ulong qpage = args[3]; + target_ulong qsize = args[4]; + XiveEND end; + uint8_t end_blk, nvt_blk; + uint32_t end_idx, nvt_idx; + + trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) { + return H_PARAMETER; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + if (spapr_xive_priority_is_reserved(xive, priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P3; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P2; + } + + assert(end_idx < xive->nr_ends); + memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND)); + + switch (qsize) { + case 12: + case 16: + case 21: + case 24: + if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx + " is not naturally aligned with %" HWADDR_PRIx "\n", + qpage, (hwaddr)1 << qsize); + return H_P4; + } + end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff); + end.w3 = cpu_to_be32(qpage & 0xffffffff); + end.w0 |= cpu_to_be32(END_W0_ENQUEUE); + end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12); + break; + case 0: + /* reset queue and disable queueing */ + spapr_xive_end_reset(&end); + goto out; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n", + qsize); + return H_P5; + } + + if (qsize) { + hwaddr plen = 1 << qsize; + void *eq; + + /* + * Validate the guest EQ. We should also check that the queue + * has been zeroed by the OS. + */ + eq = address_space_map(CPU(cpu)->as, qpage, &plen, true, + MEMTXATTRS_UNSPECIFIED); + if (plen != 1 << qsize) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%" + HWADDR_PRIx "\n", qpage); + return H_P4; + } + address_space_unmap(CPU(cpu)->as, eq, plen, true, plen); + } + + /* "target" should have been validated above */ + if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) { + g_assert_not_reached(); + } + + /* + * Ensure the priority and target are correctly set (they will not + * be right after allocation) + */ + end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) | + xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx); + end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority); + + if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) { + end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY); + } else { + end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY); + } + + /* + * The generation bit for the END starts at 1 and The END page + * offset counter starts at 0. + */ + end.w1 = cpu_to_be32(END_W1_GENERATION) | + xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul); + end.w0 |= cpu_to_be32(END_W0_VALID); + + /* + * TODO: issue syncs required to ensure all in-flight interrupts + * are complete on the old END + */ + +out: + if (spapr_xive_in_kernel(xive)) { + Error *local_err = NULL; + + kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err); + if (local_err) { + error_report_err(local_err); + return H_HARDWARE; + } + } + + /* Update END */ + memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND)); + return H_SUCCESS; +} + +/* + * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given + * target and priority. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-62: Reserved + * Bit 63: Debug: Return debug data + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * + * Output: + * - R4: "flags": + * Bits 0-61: Reserved + * Bit 62: The value of Event Queue Generation Number (g) per + * the XIVE spec if "Debug" = 1 + * Bit 63: The value of Unconditional Notify (n) per the XIVE spec + * - R5: The logical real address of the start of the EQ + * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes" + * - R7: The value of Event Queue Offset Counter per XIVE spec + * if "Debug" = 1, else 0 + * + */ + +#define SPAPR_XIVE_END_DEBUG PPC_BIT(63) + +static target_ulong h_int_get_queue_config(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + SpaprXive *xive = spapr->xive; + target_ulong flags = args[0]; + target_ulong target = args[1]; + target_ulong priority = args[2]; + XiveEND *end; + uint8_t end_blk; + uint32_t end_idx; + + trace_spapr_xive_get_queue_config(flags, target, priority); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~SPAPR_XIVE_END_DEBUG) { + return H_PARAMETER; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + if (spapr_xive_priority_is_reserved(xive, priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P3; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P2; + } + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + args[0] = 0; + if (xive_end_is_notify(end)) { + args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY; + } + + if (xive_end_is_enqueue(end)) { + args[1] = xive_end_qaddr(end); + args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; + } else { + args[1] = 0; + args[2] = 0; + } + + if (spapr_xive_in_kernel(xive)) { + Error *local_err = NULL; + + kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err); + if (local_err) { + error_report_err(local_err); + return H_HARDWARE; + } + } + + /* TODO: do we need any locking on the END ? */ + if (flags & SPAPR_XIVE_END_DEBUG) { + /* Load the event queue generation number into the return flags */ + args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62; + + /* Load R7 with the event queue offset counter */ + args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1); + } else { + args[3] = 0; + } + + return H_SUCCESS; +} + +/* + * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the + * reporting cache line pair for the calling thread. The reporting + * cache lines will contain the OS interrupt context when the OS + * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS + * interrupt. The reporting cache lines can be reset by inputting -1 + * in "reportingLine". Issuing the CI store byte without reporting + * cache lines registered will result in the data not being accessible + * to the OS. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * - R5: "reportingLine": The logical real address of the reporting cache + * line pair + * + * Output: + * - None + */ +static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong flags = args[0]; + + trace_spapr_xive_set_os_reporting_line(flags); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + /* TODO: H_INT_SET_OS_REPORTING_LINE */ + return H_FUNCTION; +} + +/* + * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical + * real address of the reporting cache line pair set for the input + * "target". If no reporting cache line pair has been set, -1 is + * returned. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "reportingLine": The logical real address of the reporting + * cache line pair + * + * Output: + * - R4: The logical real address of the reporting line if set, else -1 + */ +static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong flags = args[0]; + + trace_spapr_xive_get_os_reporting_line(flags); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + /* TODO: H_INT_GET_OS_REPORTING_LINE */ + return H_FUNCTION; +} + +/* + * The H_INT_ESB hcall() is used to issue a load or store to the ESB + * page for the input "lisn". This hcall is only supported for LISNs + * that have the ESB hcall flag set to 1 when returned from hcall() + * H_INT_GET_SOURCE_INFO. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-62: Reserved + * bit 63: Store: Store=1, store operation, else load operation + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as + * returned by the H_ALLOCATE_VAS_WINDOW hcall + * - R6: "esbOffset" is the offset into the ESB page for the load or + * store operation + * - R7: "storeData" is the data to write for a store operation + * + * Output: + * - R4: The value of the load if load operation, else -1 + */ + +#define SPAPR_XIVE_ESB_STORE PPC_BIT(63) + +static target_ulong h_int_esb(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + SpaprXive *xive = spapr->xive; + XiveEAS eas; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + target_ulong offset = args[2]; + target_ulong data = args[3]; + hwaddr mmio_addr; + XiveSource *xsrc = &xive->source; + + trace_spapr_xive_esb(flags, lisn, offset, data); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~SPAPR_XIVE_ESB_STORE) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + if (offset > (1ull << xsrc->esb_shift)) { + return H_P3; + } + + if (spapr_xive_in_kernel(xive)) { + args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data, + flags & SPAPR_XIVE_ESB_STORE); + } else { + mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; + + if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, + (flags & SPAPR_XIVE_ESB_STORE))) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" + HWADDR_PRIx "\n", mmio_addr); + return H_HARDWARE; + } + args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data; + } + return H_SUCCESS; +} + +/* + * The H_INT_SYNC hcall() is used to issue hardware syncs that will + * ensure any in flight events for the input lisn are in the event + * queue. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as + * returned by the H_ALLOCATE_VAS_WINDOW hcall + * + * Output: + * - None + */ +static target_ulong h_int_sync(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + SpaprXive *xive = spapr->xive; + XiveEAS eas; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + + trace_spapr_xive_sync(flags, lisn); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + /* + * This is not real hardware. Nothing to be done unless when + * under KVM + */ + + if (spapr_xive_in_kernel(xive)) { + Error *local_err = NULL; + + kvmppc_xive_sync_source(xive, lisn, &local_err); + if (local_err) { + error_report_err(local_err); + return H_HARDWARE; + } + } + return H_SUCCESS; +} + +/* + * The H_INT_RESET hcall() is used to reset all of the partition's + * interrupt exploitation structures to their initial state. This + * means losing all previously set interrupt state set via + * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * + * Output: + * - None + */ +static target_ulong h_int_reset(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + SpaprXive *xive = spapr->xive; + target_ulong flags = args[0]; + + trace_spapr_xive_reset(flags); + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + device_cold_reset(DEVICE(xive)); + + if (spapr_xive_in_kernel(xive)) { + Error *local_err = NULL; + + kvmppc_xive_reset(xive, &local_err); + if (local_err) { + error_report_err(local_err); + return H_HARDWARE; + } + } + return H_SUCCESS; +} + +void spapr_xive_hcall_init(SpaprMachineState *spapr) +{ + spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info); + spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config); + spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config); + spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info); + spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config); + spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config); + spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE, + h_int_set_os_reporting_line); + spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE, + h_int_get_os_reporting_line); + spapr_register_hypercall(H_INT_ESB, h_int_esb); + spapr_register_hypercall(H_INT_SYNC, h_int_sync); + spapr_register_hypercall(H_INT_RESET, h_int_reset); +} diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c new file mode 100644 index 000000000..61fe7bd2d --- /dev/null +++ b/hw/intc/spapr_xive_kvm.c @@ -0,0 +1,869 @@ +/* + * QEMU PowerPC sPAPR XIVE interrupt controller model + * + * Copyright (c) 2017-2019, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_cpu_core.h" +#include "hw/ppc/spapr_xive.h" +#include "hw/ppc/xive.h" +#include "kvm_ppc.h" +#include "trace.h" + +#include + +/* + * Helpers for CPU hotplug + * + * TODO: make a common KVMEnabledCPU layer for XICS and XIVE + */ +typedef struct KVMEnabledCPU { + unsigned long vcpu_id; + QLIST_ENTRY(KVMEnabledCPU) node; +} KVMEnabledCPU; + +static QLIST_HEAD(, KVMEnabledCPU) + kvm_enabled_cpus = QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus); + +static bool kvm_cpu_is_enabled(CPUState *cs) +{ + KVMEnabledCPU *enabled_cpu; + unsigned long vcpu_id = kvm_arch_vcpu_id(cs); + + QLIST_FOREACH(enabled_cpu, &kvm_enabled_cpus, node) { + if (enabled_cpu->vcpu_id == vcpu_id) { + return true; + } + } + return false; +} + +static void kvm_cpu_enable(CPUState *cs) +{ + KVMEnabledCPU *enabled_cpu; + unsigned long vcpu_id = kvm_arch_vcpu_id(cs); + + enabled_cpu = g_malloc(sizeof(*enabled_cpu)); + enabled_cpu->vcpu_id = vcpu_id; + QLIST_INSERT_HEAD(&kvm_enabled_cpus, enabled_cpu, node); +} + +static void kvm_cpu_disable_all(void) +{ + KVMEnabledCPU *enabled_cpu, *next; + + QLIST_FOREACH_SAFE(enabled_cpu, &kvm_enabled_cpus, node, next) { + QLIST_REMOVE(enabled_cpu, node); + g_free(enabled_cpu); + } +} + +/* + * XIVE Thread Interrupt Management context (KVM) + */ + +int kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp) +{ + SpaprXive *xive = SPAPR_XIVE(tctx->xptr); + uint64_t state[2]; + int ret; + + assert(xive->fd != -1); + + /* word0 and word1 of the OS ring. */ + state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]); + + ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state); + if (ret != 0) { + error_setg_errno(errp, -ret, + "XIVE: could not restore KVM state of CPU %ld", + kvm_arch_vcpu_id(tctx->cs)); + return ret; + } + + return 0; +} + +int kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp) +{ + SpaprXive *xive = SPAPR_XIVE(tctx->xptr); + uint64_t state[2] = { 0 }; + int ret; + + assert(xive->fd != -1); + + ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state); + if (ret != 0) { + error_setg_errno(errp, -ret, + "XIVE: could not capture KVM state of CPU %ld", + kvm_arch_vcpu_id(tctx->cs)); + return ret; + } + + /* word0 and word1 of the OS ring. */ + *((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0]; + + return 0; +} + +typedef struct { + XiveTCTX *tctx; + Error **errp; + int ret; +} XiveCpuGetState; + +static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu, + run_on_cpu_data arg) +{ + XiveCpuGetState *s = arg.host_ptr; + + s->ret = kvmppc_xive_cpu_get_state(s->tctx, s->errp); +} + +int kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp) +{ + XiveCpuGetState s = { + .tctx = tctx, + .errp = errp, + }; + + /* + * Kick the vCPU to make sure they are available for the KVM ioctl. + */ + run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state, + RUN_ON_CPU_HOST_PTR(&s)); + + return s.ret; +} + +int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp) +{ + ERRP_GUARD(); + SpaprXive *xive = SPAPR_XIVE(tctx->xptr); + unsigned long vcpu_id; + int ret; + + assert(xive->fd != -1); + + /* Check if CPU was hot unplugged and replugged. */ + if (kvm_cpu_is_enabled(tctx->cs)) { + return 0; + } + + vcpu_id = kvm_arch_vcpu_id(tctx->cs); + + trace_kvm_xive_cpu_connect(vcpu_id); + + ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd, + vcpu_id, 0); + if (ret < 0) { + error_setg_errno(errp, -ret, + "XIVE: unable to connect CPU%ld to KVM device", + vcpu_id); + if (ret == -ENOSPC) { + error_append_hint(errp, "Try -smp maxcpus=N with N < %u\n", + MACHINE(qdev_get_machine())->smp.max_cpus); + } + return ret; + } + + kvm_cpu_enable(tctx->cs); + return 0; +} + +/* + * XIVE Interrupt Source (KVM) + */ + +int kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas, + Error **errp) +{ + uint32_t end_idx; + uint32_t end_blk; + uint8_t priority; + uint32_t server; + bool masked; + uint32_t eisn; + uint64_t kvm_src; + + assert(xive_eas_is_valid(eas)); + + end_idx = xive_get_field64(EAS_END_INDEX, eas->w); + end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); + eisn = xive_get_field64(EAS_END_DATA, eas->w); + masked = xive_eas_is_masked(eas); + + spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); + + kvm_src = priority << KVM_XIVE_SOURCE_PRIORITY_SHIFT & + KVM_XIVE_SOURCE_PRIORITY_MASK; + kvm_src |= server << KVM_XIVE_SOURCE_SERVER_SHIFT & + KVM_XIVE_SOURCE_SERVER_MASK; + kvm_src |= ((uint64_t) masked << KVM_XIVE_SOURCE_MASKED_SHIFT) & + KVM_XIVE_SOURCE_MASKED_MASK; + kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) & + KVM_XIVE_SOURCE_EISN_MASK; + + return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn, + &kvm_src, true, errp); +} + +void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp) +{ + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_SYNC, lisn, + NULL, true, errp); +} + +/* + * At reset, the interrupt sources are simply created and MASKED. We + * only need to inform the KVM XIVE device about their type: LSI or + * MSI. + */ +int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp) +{ + SpaprXive *xive = SPAPR_XIVE(xsrc->xive); + uint64_t state = 0; + + trace_kvm_xive_source_reset(srcno); + + assert(xive->fd != -1); + + if (xive_source_irq_is_lsi(xsrc, srcno)) { + state |= KVM_XIVE_LEVEL_SENSITIVE; + if (xive_source_is_asserted(xsrc, srcno)) { + state |= KVM_XIVE_LEVEL_ASSERTED; + } + } + + return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE, srcno, &state, + true, errp); +} + +static int kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp) +{ + SpaprXive *xive = SPAPR_XIVE(xsrc->xive); + int i; + + for (i = 0; i < xsrc->nr_irqs; i++) { + int ret; + + if (!xive_eas_is_valid(&xive->eat[i])) { + continue; + } + + ret = kvmppc_xive_source_reset_one(xsrc, i, errp); + if (ret < 0) { + return ret; + } + } + + return 0; +} + +/* + * This is used to perform the magic loads on the ESB pages, described + * in xive.h. + * + * Memory barriers should not be needed for loads (no store for now). + */ +static uint64_t xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, + uint64_t data, bool write) +{ + uint64_t *addr = xsrc->esb_mmap + xive_source_esb_mgmt(xsrc, srcno) + + offset; + + if (write) { + *addr = cpu_to_be64(data); + return -1; + } else { + /* Prevent the compiler from optimizing away the load */ + volatile uint64_t value = be64_to_cpu(*addr); + return value; + } +} + +static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset) +{ + return xive_esb_rw(xsrc, srcno, offset, 0, 0) & 0x3; +} + +static void kvmppc_xive_esb_trigger(XiveSource *xsrc, int srcno) +{ + xive_esb_rw(xsrc, srcno, 0, 0, true); +} + +uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, + uint64_t data, bool write) +{ + if (write) { + return xive_esb_rw(xsrc, srcno, offset, data, 1); + } + + /* + * Special Load EOI handling for LSI sources. Q bit is never set + * and the interrupt should be re-triggered if the level is still + * asserted. + */ + if (xive_source_irq_is_lsi(xsrc, srcno) && + offset == XIVE_ESB_LOAD_EOI) { + xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00); + if (xive_source_is_asserted(xsrc, srcno)) { + kvmppc_xive_esb_trigger(xsrc, srcno); + } + return 0; + } else { + return xive_esb_rw(xsrc, srcno, offset, 0, 0); + } +} + +static void kvmppc_xive_source_get_state(XiveSource *xsrc) +{ + SpaprXive *xive = SPAPR_XIVE(xsrc->xive); + int i; + + for (i = 0; i < xsrc->nr_irqs; i++) { + uint8_t pq; + + if (!xive_eas_is_valid(&xive->eat[i])) { + continue; + } + + /* Perform a load without side effect to retrieve the PQ bits */ + pq = xive_esb_read(xsrc, i, XIVE_ESB_GET); + + /* and save PQ locally */ + xive_source_esb_set(xsrc, i, pq); + } +} + +void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val) +{ + XiveSource *xsrc = opaque; + + if (!xive_source_irq_is_lsi(xsrc, srcno)) { + if (!val) { + return; + } + } else { + xive_source_set_asserted(xsrc, srcno, val); + } + + kvmppc_xive_esb_trigger(xsrc, srcno); +} + +/* + * sPAPR XIVE interrupt controller (KVM) + */ +int kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk, + uint32_t end_idx, XiveEND *end, + Error **errp) +{ + struct kvm_ppc_xive_eq kvm_eq = { 0 }; + uint64_t kvm_eq_idx; + uint8_t priority; + uint32_t server; + int ret; + + assert(xive_end_is_valid(end)); + + /* Encode the tuple (server, prio) as a KVM EQ index */ + spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); + + kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT & + KVM_XIVE_EQ_PRIORITY_MASK; + kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT & + KVM_XIVE_EQ_SERVER_MASK; + + ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, + &kvm_eq, false, errp); + if (ret < 0) { + return ret; + } + + /* + * The EQ index and toggle bit are updated by HW. These are the + * only fields from KVM we want to update QEMU with. The other END + * fields should already be in the QEMU END table. + */ + end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) | + xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex); + + return 0; +} + +int kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk, + uint32_t end_idx, XiveEND *end, + Error **errp) +{ + struct kvm_ppc_xive_eq kvm_eq = { 0 }; + uint64_t kvm_eq_idx; + uint8_t priority; + uint32_t server; + + /* + * Build the KVM state from the local END structure. + */ + + kvm_eq.flags = 0; + if (xive_get_field32(END_W0_UCOND_NOTIFY, end->w0)) { + kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY; + } + + /* + * If the hcall is disabling the EQ, set the size and page address + * to zero. When migrating, only valid ENDs are taken into + * account. + */ + if (xive_end_is_valid(end)) { + kvm_eq.qshift = xive_get_field32(END_W0_QSIZE, end->w0) + 12; + kvm_eq.qaddr = xive_end_qaddr(end); + /* + * The EQ toggle bit and index should only be relevant when + * restoring the EQ state + */ + kvm_eq.qtoggle = xive_get_field32(END_W1_GENERATION, end->w1); + kvm_eq.qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + } else { + kvm_eq.qshift = 0; + kvm_eq.qaddr = 0; + } + + /* Encode the tuple (server, prio) as a KVM EQ index */ + spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); + + kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT & + KVM_XIVE_EQ_PRIORITY_MASK; + kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT & + KVM_XIVE_EQ_SERVER_MASK; + + return + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, + &kvm_eq, true, errp); +} + +void kvmppc_xive_reset(SpaprXive *xive, Error **errp) +{ + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, KVM_DEV_XIVE_RESET, + NULL, true, errp); +} + +static int kvmppc_xive_get_queues(SpaprXive *xive, Error **errp) +{ + int i; + int ret; + + for (i = 0; i < xive->nr_ends; i++) { + if (!xive_end_is_valid(&xive->endt[i])) { + continue; + } + + ret = kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, + &xive->endt[i], errp); + if (ret < 0) { + return ret; + } + } + + return 0; +} + +/* + * The primary goal of the XIVE VM change handler is to mark the EQ + * pages dirty when all XIVE event notifications have stopped. + * + * Whenever the VM is stopped, the VM change handler sets the source + * PQs to PENDING to stop the flow of events and to possibly catch a + * triggered interrupt occuring while the VM is stopped. The previous + * state is saved in anticipation of a migration. The XIVE controller + * is then synced through KVM to flush any in-flight event + * notification and stabilize the EQs. + * + * At this stage, we can mark the EQ page dirty and let a migration + * sequence transfer the EQ pages to the destination, which is done + * just after the stop state. + * + * The previous configuration of the sources is restored when the VM + * runs again. If an interrupt was queued while the VM was stopped, + * simply generate a trigger. + */ +static void kvmppc_xive_change_state_handler(void *opaque, bool running, + RunState state) +{ + SpaprXive *xive = opaque; + XiveSource *xsrc = &xive->source; + Error *local_err = NULL; + int i; + + /* + * Restore the sources to their initial state. This is called when + * the VM resumes after a stop or a migration. + */ + if (running) { + for (i = 0; i < xsrc->nr_irqs; i++) { + uint8_t pq; + uint8_t old_pq; + + if (!xive_eas_is_valid(&xive->eat[i])) { + continue; + } + + pq = xive_source_esb_get(xsrc, i); + old_pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_00 + (pq << 8)); + + /* + * An interrupt was queued while the VM was stopped, + * generate a trigger. + */ + if (pq == XIVE_ESB_RESET && old_pq == XIVE_ESB_QUEUED) { + kvmppc_xive_esb_trigger(xsrc, i); + } + } + + return; + } + + /* + * Mask the sources, to stop the flow of event notifications, and + * save the PQs locally in the XiveSource object. The XiveSource + * state will be collected later on by its vmstate handler if a + * migration is in progress. + */ + for (i = 0; i < xsrc->nr_irqs; i++) { + uint8_t pq; + + if (!xive_eas_is_valid(&xive->eat[i])) { + continue; + } + + pq = xive_esb_read(xsrc, i, XIVE_ESB_GET); + + /* + * PQ is set to PENDING to possibly catch a triggered + * interrupt occuring while the VM is stopped (hotplug event + * for instance) . + */ + if (pq != XIVE_ESB_OFF) { + pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_10); + } + xive_source_esb_set(xsrc, i, pq); + } + + /* + * Sync the XIVE controller in KVM, to flush in-flight event + * notification that should be enqueued in the EQs and mark the + * XIVE EQ pages dirty to collect all updates. + */ + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, + KVM_DEV_XIVE_EQ_SYNC, NULL, true, &local_err); + if (local_err) { + error_report_err(local_err); + return; + } +} + +void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp) +{ + assert(xive->fd != -1); + + /* + * When the VM is stopped, the sources are masked and the previous + * state is saved in anticipation of a migration. We should not + * synchronize the source state in that case else we will override + * the saved state. + */ + if (runstate_is_running()) { + kvmppc_xive_source_get_state(&xive->source); + } + + /* EAT: there is no extra state to query from KVM */ + + /* ENDT */ + kvmppc_xive_get_queues(xive, errp); +} + +/* + * The SpaprXive 'pre_save' method is called by the vmstate handler of + * the SpaprXive model, after the XIVE controller is synced in the VM + * change handler. + */ +int kvmppc_xive_pre_save(SpaprXive *xive) +{ + Error *local_err = NULL; + int ret; + + assert(xive->fd != -1); + + /* EAT: there is no extra state to query from KVM */ + + /* ENDT */ + ret = kvmppc_xive_get_queues(xive, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + + return 0; +} + +/* + * The SpaprXive 'post_load' method is not called by a vmstate + * handler. It is called at the sPAPR machine level at the end of the + * migration sequence by the sPAPR IRQ backend 'post_load' method, + * when all XIVE states have been transferred and loaded. + */ +int kvmppc_xive_post_load(SpaprXive *xive, int version_id) +{ + Error *local_err = NULL; + CPUState *cs; + int i; + int ret; + + /* The KVM XIVE device should be in use */ + assert(xive->fd != -1); + + /* Restore the ENDT first. The targetting depends on it. */ + for (i = 0; i < xive->nr_ends; i++) { + if (!xive_end_is_valid(&xive->endt[i])) { + continue; + } + + ret = kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, + &xive->endt[i], &local_err); + if (ret < 0) { + goto fail; + } + } + + /* Restore the EAT */ + for (i = 0; i < xive->nr_irqs; i++) { + if (!xive_eas_is_valid(&xive->eat[i])) { + continue; + } + + /* + * We can only restore the source config if the source has been + * previously set in KVM. Since we don't do that for all interrupts + * at reset time anymore, let's do it now. + */ + ret = kvmppc_xive_source_reset_one(&xive->source, i, &local_err); + if (ret < 0) { + goto fail; + } + + ret = kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err); + if (ret < 0) { + goto fail; + } + } + + /* + * Restore the thread interrupt contexts of initial CPUs. + * + * The context of hotplugged CPUs is restored later, by the + * 'post_load' handler of the XiveTCTX model because they are not + * available at the time the SpaprXive 'post_load' method is + * called. We can not restore the context of all CPUs in the + * 'post_load' handler of XiveTCTX because the machine is not + * necessarily connected to the KVM device at that time. + */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + ret = kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err); + if (ret < 0) { + goto fail; + } + } + + /* The source states will be restored when the machine starts running */ + return 0; + +fail: + error_report_err(local_err); + return ret; +} + +/* Returns MAP_FAILED on error and sets errno */ +static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len, + Error **errp) +{ + void *addr; + uint32_t page_shift = 16; /* TODO: fix page_shift */ + + addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, xive->fd, + pgoff << page_shift); + if (addr == MAP_FAILED) { + error_setg_errno(errp, errno, "XIVE: unable to set memory mapping"); + } + + return addr; +} + +/* + * All the XIVE memory regions are now backed by mappings from the KVM + * XIVE device. + */ +int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, + Error **errp) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + XiveSource *xsrc = &xive->source; + size_t esb_len = xive_source_esb_len(xsrc); + size_t tima_len = 4ull << TM_SHIFT; + CPUState *cs; + int fd; + void *addr; + int ret; + + /* + * The KVM XIVE device already in use. This is the case when + * rebooting under the XIVE-only interrupt mode. + */ + if (xive->fd != -1) { + return 0; + } + + if (!kvmppc_has_cap_xive()) { + error_setg(errp, "IRQ_XIVE capability must be present for KVM"); + return -1; + } + + /* First, create the KVM XIVE device */ + fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false); + if (fd < 0) { + error_setg_errno(errp, -fd, "XIVE: error creating KVM device"); + return -1; + } + xive->fd = fd; + + /* Tell KVM about the # of VCPUs we may have */ + if (kvm_device_check_attr(xive->fd, KVM_DEV_XIVE_GRP_CTRL, + KVM_DEV_XIVE_NR_SERVERS)) { + ret = kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, + KVM_DEV_XIVE_NR_SERVERS, &nr_servers, true, + errp); + if (ret < 0) { + goto fail; + } + } + + /* + * 1. Source ESB pages - KVM mapping + */ + addr = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len, errp); + if (addr == MAP_FAILED) { + goto fail; + } + xsrc->esb_mmap = addr; + + memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc), + "xive.esb-kvm", esb_len, xsrc->esb_mmap); + memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0, + &xsrc->esb_mmio_kvm, 1); + + /* + * 2. END ESB pages (No KVM support yet) + */ + + /* + * 3. TIMA pages - KVM mapping + */ + addr = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len, errp); + if (addr == MAP_FAILED) { + goto fail; + } + xive->tm_mmap = addr; + + memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive), + "xive.tima", tima_len, xive->tm_mmap); + memory_region_add_subregion_overlap(&xive->tm_mmio, 0, + &xive->tm_mmio_kvm, 1); + + xive->change = qemu_add_vm_change_state_handler( + kvmppc_xive_change_state_handler, xive); + + /* Connect the presenters to the initial VCPUs of the machine */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + ret = kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, errp); + if (ret < 0) { + goto fail; + } + } + + /* Update the KVM sources */ + ret = kvmppc_xive_source_reset(xsrc, errp); + if (ret < 0) { + goto fail; + } + + kvm_kernel_irqchip = true; + kvm_msi_via_irqfd_allowed = true; + kvm_gsi_direct_mapping = true; + return 0; + +fail: + kvmppc_xive_disconnect(intc); + return -1; +} + +void kvmppc_xive_disconnect(SpaprInterruptController *intc) +{ + SpaprXive *xive = SPAPR_XIVE(intc); + XiveSource *xsrc; + size_t esb_len; + + assert(xive->fd != -1); + + /* Clear the KVM mapping */ + xsrc = &xive->source; + esb_len = xive_source_esb_len(xsrc); + + if (xsrc->esb_mmap) { + memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm); + object_unparent(OBJECT(&xsrc->esb_mmio_kvm)); + munmap(xsrc->esb_mmap, esb_len); + xsrc->esb_mmap = NULL; + } + + if (xive->tm_mmap) { + memory_region_del_subregion(&xive->tm_mmio, &xive->tm_mmio_kvm); + object_unparent(OBJECT(&xive->tm_mmio_kvm)); + munmap(xive->tm_mmap, 4ull << TM_SHIFT); + xive->tm_mmap = NULL; + } + + /* + * When the KVM device fd is closed, the KVM device is destroyed + * and removed from the list of devices of the VM. The VCPU + * presenters are also detached from the device. + */ + close(xive->fd); + xive->fd = -1; + + kvm_kernel_irqchip = false; + kvm_msi_via_irqfd_allowed = false; + kvm_gsi_direct_mapping = false; + + /* Clear the local list of presenter (hotplug) */ + kvm_cpu_disable_all(); + + /* VM Change state handler is not needed anymore */ + if (xive->change) { + qemu_del_vm_change_state_handler(xive->change); + xive->change = NULL; + } +} diff --git a/hw/intc/trace-events b/hw/intc/trace-events new file mode 100644 index 000000000..9aba7e3a7 --- /dev/null +++ b/hw/intc/trace-events @@ -0,0 +1,248 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# i8259.c +pic_update_irq(bool master, uint8_t imr, uint8_t irr, uint8_t padd) "master %d imr %"PRIu8" irr %"PRIu8" padd %"PRIu8 +pic_set_irq(bool master, int irq, int level) "master %d irq %d level %d" +pic_interrupt(int irq, int intno) "irq %d intno %d" +pic_ioport_write(bool master, uint64_t addr, uint64_t val) "master %d addr 0x%"PRIx64" val 0x%"PRIx64 +pic_ioport_read(bool master, uint64_t addr, int val) "master %d addr 0x%"PRIx64" val 0x%x" + +# apic_common.c +cpu_set_apic_base(uint64_t val) "0x%016"PRIx64 +cpu_get_apic_base(uint64_t val) "0x%016"PRIx64 +# coalescing +apic_report_irq_delivered(int apic_irq_delivered) "coalescing %d" +apic_reset_irq_delivered(int apic_irq_delivered) "old coalescing %d" +apic_get_irq_delivered(int apic_irq_delivered) "returning coalescing %d" + +# apic.c +apic_local_deliver(int vector, uint32_t lvt) "vector %d delivery mode %d" +apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t vector_num, uint8_t trigger_mode) "dest %d dest_mode %d delivery_mode %d vector %d trigger_mode %d" +apic_mem_readl(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x" +apic_mem_writel(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x" + +# ioapic.c +ioapic_set_remote_irr(int n) "set remote irr for pin %d" +ioapic_clear_remote_irr(int n, int vector) "clear remote irr for pin %d vector %d" +ioapic_eoi_broadcast(int vector) "EOI broadcast for vector %d" +ioapic_eoi_delayed_reassert(int vector) "delayed reassert on EOI broadcast for vector %d" +ioapic_mem_read(uint8_t addr, uint8_t regsel, uint8_t size, uint32_t val) "ioapic mem read addr 0x%"PRIx8" regsel: 0x%"PRIx8" size 0x%"PRIx8" retval 0x%"PRIx32 +ioapic_mem_write(uint8_t addr, uint8_t regsel, uint8_t size, uint32_t val) "ioapic mem write addr 0x%"PRIx8" regsel: 0x%"PRIx8" size 0x%"PRIx8" val 0x%"PRIx32 +ioapic_set_irq(int vector, int level) "vector: %d level: %d" + +# slavio_intctl.c +slavio_intctl_mem_readl(uint32_t cpu, uint64_t addr, uint32_t ret) "read cpu %d reg 0x%"PRIx64" = 0x%x" +slavio_intctl_mem_writel(uint32_t cpu, uint64_t addr, uint32_t val) "write cpu %d reg 0x%"PRIx64" = 0x%x" +slavio_intctl_mem_writel_clear(uint32_t cpu, uint32_t val, uint32_t intreg_pending) "Cleared cpu %d irq mask 0x%x, curmask 0x%x" +slavio_intctl_mem_writel_set(uint32_t cpu, uint32_t val, uint32_t intreg_pending) "Set cpu %d irq mask 0x%x, curmask 0x%x" +slavio_intctlm_mem_readl(uint64_t addr, uint32_t ret) "read system reg 0x%"PRIx64" = 0x%x" +slavio_intctlm_mem_writel(uint64_t addr, uint32_t val) "write system reg 0x%"PRIx64" = 0x%x" +slavio_intctlm_mem_writel_enable(uint32_t val, uint32_t intregm_disabled) "Enabled master irq mask 0x%x, curmask 0x%x" +slavio_intctlm_mem_writel_disable(uint32_t val, uint32_t intregm_disabled) "Disabled master irq mask 0x%x, curmask 0x%x" +slavio_intctlm_mem_writel_target(uint32_t cpu) "Set master irq cpu %d" +slavio_check_interrupts(uint32_t pending, uint32_t intregm_disabled) "pending 0x%x disabled 0x%x" +slavio_set_irq(uint32_t target_cpu, int irq, uint32_t pil, int level) "Set cpu %d irq %d -> pil %d level %d" +slavio_set_timer_irq_cpu(int cpu, int level) "Set cpu %d local timer level %d" + +# grlib_irqmp.c +grlib_irqmp_check_irqs(uint32_t pend, uint32_t force, uint32_t mask, uint32_t lvl1, uint32_t lvl2) "pend:0x%04x force:0x%04x mask:0x%04x lvl1:0x%04x lvl0:0x%04x" +grlib_irqmp_ack(int intno) "interrupt:%d" +grlib_irqmp_set_irq(int irq) "Raise CPU IRQ %d" +grlib_irqmp_readl_unknown(uint64_t addr) "addr 0x%"PRIx64 +grlib_irqmp_writel_unknown(uint64_t addr, uint32_t value) "addr 0x%"PRIx64" value 0x%x" + +# xics.c +xics_icp_check_ipi(int server, uint8_t mfrr) "CPU %d can take IPI mfrr=0x%x" +xics_icp_accept(uint32_t old_xirr, uint32_t new_xirr) "icp_accept: XIRR 0x%"PRIx32"->0x%"PRIx32 +xics_icp_eoi(int server, uint32_t xirr, uint32_t new_xirr) "icp_eoi: server %d given XIRR 0x%"PRIx32" new XIRR 0x%"PRIx32 +xics_icp_irq(int server, int nr, uint8_t priority) "cpu %d trying to deliver irq 0x%"PRIx32" priority 0x%x" +xics_icp_raise(uint32_t xirr, uint8_t pending_priority) "raising IRQ new XIRR=0x%x new pending priority=0x%x" +xics_ics_set_irq_msi(int srcno, int nr) "set_irq_msi: srcno %d [irq 0x%x]" +xics_masked_pending(void) "set_irq_msi: masked pending" +xics_ics_set_irq_lsi(int srcno, int nr) "set_irq_lsi: srcno %d [irq 0x%x]" +xics_ics_write_xive(int nr, int srcno, int server, uint8_t priority) "ics_write_xive: irq 0x%x [src %d] server 0x%x prio 0x%x" +xics_ics_reject(int nr, int srcno) "reject irq 0x%x [src %d]" +xics_ics_eoi(int nr) "ics_eoi: irq 0x%x" + +# s390_flic_kvm.c +flic_create_device(int err) "flic: create device failed %d" +flic_reset_failed(int err) "flic: reset failed %d" + +# s390_flic.c +qemu_s390_airq_suppressed(uint8_t type, uint8_t isc) "flic: adapter I/O interrupt suppressed (type 0x%x isc 0x%x)" +qemu_s390_suppress_airq(uint8_t isc, const char *from, const char *to) "flic: for isc 0x%x, suppress airq by modifying ais mode from %s to %s" + +# aspeed_vic.c +aspeed_vic_set_irq(int irq, int level) "Enabling IRQ %d: %d" +aspeed_vic_update_fiq(int flags) "Raising FIQ: %d" +aspeed_vic_update_irq(int flags) "Raising IRQ: %d" +aspeed_vic_read(uint64_t offset, unsigned size, uint32_t value) "From 0x%" PRIx64 " of size %u: 0x%" PRIx32 +aspeed_vic_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 + +# arm_gic.c +gic_enable_irq(int irq) "irq %d enabled" +gic_disable_irq(int irq) "irq %d disabled" +gic_set_irq(int irq, int level, int cpumask, int target) "irq %d level %d cpumask 0x%x target 0x%x" +gic_update_bestirq(const char *s, int cpu, int irq, int prio, int priority_mask, int running_priority) "%s %d irq %d priority %d cpu priority mask %d cpu running priority %d" +gic_update_set_irq(int cpu, const char *name, int level) "cpu[%d]: %s = %d" +gic_acknowledge_irq(const char *s, int cpu, int irq) "%s %d acknowledged irq %d" +gic_cpu_write(const char *s, int cpu, int addr, uint32_t val) "%s %d iface write at 0x%08x 0x%08" PRIx32 +gic_cpu_read(const char *s, int cpu, int addr, uint32_t val) "%s %d iface read at 0x%08x: 0x%08" PRIx32 +gic_hyp_read(int addr, uint32_t val) "hyp read at 0x%08x: 0x%08" PRIx32 +gic_hyp_write(int addr, uint32_t val) "hyp write at 0x%08x: 0x%08" PRIx32 +gic_dist_read(int addr, unsigned int size, uint32_t val) "dist read at 0x%08x size %u: 0x%08" PRIx32 +gic_dist_write(int addr, unsigned int size, uint32_t val) "dist write at 0x%08x size %u: 0x%08" PRIx32 +gic_lr_entry(int cpu, int entry, uint32_t val) "cpu %d: new lr entry %d: 0x%08" PRIx32 +gic_update_maintenance_irq(int cpu, int val) "cpu %d: maintenance = %d" + +# arm_gicv3_cpuif.c +gicv3_icc_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_pmr_write(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR write cpu 0x%x value 0x%" PRIx64 +gicv3_icc_bpr_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_BPR%d read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_bpr_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_BPR%d write cpu 0x%x value 0x%" PRIx64 +gicv3_icc_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICC_AP%dR%d read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICC_AP%dR%d write cpu 0x%x value 0x%" PRIx64 +gicv3_icc_igrpen_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN%d read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_igrpen_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN%d write cpu 0x%x value 0x%" PRIx64 +gicv3_icc_igrpen1_el3_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN1_EL3 read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_igrpen1_el3_write(uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN1_EL3 write cpu 0x%x value 0x%" PRIx64 +gicv3_icc_ctlr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_CTLR read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_ctlr_write(uint32_t cpu, uint64_t val) "GICv3 ICC_CTLR write cpu 0x%x value 0x%" PRIx64 +gicv3_icc_ctlr_el3_read(uint32_t cpu, uint64_t val) "GICv3 ICC_CTLR_EL3 read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_ctlr_el3_write(uint32_t cpu, uint64_t val) "GICv3 ICC_CTLR_EL3 write cpu 0x%x value 0x%" PRIx64 +gicv3_cpuif_update(uint32_t cpuid, int irq, int grp, int prio) "GICv3 CPU i/f 0x%x HPPI update: irq %d group %d prio %d" +gicv3_cpuif_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel) "GICv3 CPU i/f 0x%x HPPI update: setting FIQ %d IRQ %d" +gicv3_icc_generate_sgi(uint32_t cpuid, int irq, int irm, uint32_t aff, uint32_t targetlist) "GICv3 CPU i/f 0x%x generating SGI %d IRM %d target affinity 0x%xxx targetlist 0x%x" +gicv3_icc_iar0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IAR0 read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_iar1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IAR1 read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_EOIR%d write cpu 0x%x value 0x%" PRIx64 +gicv3_icc_hppir0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR0 read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_hppir1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR1 read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICC_DIR write cpu 0x%x value 0x%" PRIx64 +gicv3_icc_rpr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_RPR read cpu 0x%x value 0x%" PRIx64 +gicv3_ich_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_AP%dR%d read cpu 0x%x value 0x%" PRIx64 +gicv3_ich_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_AP%dR%d write cpu 0x%x value 0x%" PRIx64 +gicv3_ich_hcr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_HCR_EL2 read cpu 0x%x value 0x%" PRIx64 +gicv3_ich_hcr_write(uint32_t cpu, uint64_t val) "GICv3 ICH_HCR_EL2 write cpu 0x%x value 0x%" PRIx64 +gicv3_ich_vmcr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_VMCR_EL2 read cpu 0x%x value 0x%" PRIx64 +gicv3_ich_vmcr_write(uint32_t cpu, uint64_t val) "GICv3 ICH_VMCR_EL2 write cpu 0x%x value 0x%" PRIx64 +gicv3_ich_lr_read(int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_LR%d_EL2 read cpu 0x%x value 0x%" PRIx64 +gicv3_ich_lr32_read(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LR%d read cpu 0x%x value 0x%" PRIx32 +gicv3_ich_lrc_read(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LRC%d read cpu 0x%x value 0x%" PRIx32 +gicv3_ich_lr_write(int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_LR%d_EL2 write cpu 0x%x value 0x%" PRIx64 +gicv3_ich_lr32_write(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LR%d write cpu 0x%x value 0x%" PRIx32 +gicv3_ich_lrc_write(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LRC%d write cpu 0x%x value 0x%" PRIx32 +gicv3_ich_vtr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_VTR read cpu 0x%x value 0x%" PRIx64 +gicv3_ich_misr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_MISR read cpu 0x%x value 0x%" PRIx64 +gicv3_ich_eisr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_EISR read cpu 0x%x value 0x%" PRIx64 +gicv3_ich_elrsr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_ELRSR read cpu 0x%x value 0x%" PRIx64 +gicv3_icv_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICV_AP%dR%d read cpu 0x%x value 0x%" PRIx64 +gicv3_icv_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICV_AP%dR%d write cpu 0x%x value 0x%" PRIx64 +gicv3_icv_bpr_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_BPR%d read cpu 0x%x value 0x%" PRIx64 +gicv3_icv_bpr_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_BPR%d write cpu 0x%x value 0x%" PRIx64 +gicv3_icv_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_PMR read cpu 0x%x value 0x%" PRIx64 +gicv3_icv_pmr_write(uint32_t cpu, uint64_t val) "GICv3 ICV_PMR write cpu 0x%x value 0x%" PRIx64 +gicv3_icv_igrpen_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IGRPEN%d read cpu 0x%x value 0x%" PRIx64 +gicv3_icv_igrpen_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IGRPEN%d write cpu 0x%x value 0x%" PRIx64 +gicv3_icv_ctlr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_CTLR read cpu 0x%x value 0x%" PRIx64 +gicv3_icv_ctlr_write(uint32_t cpu, uint64_t val) "GICv3 ICV_CTLR write cpu 0x%x value 0x%" PRIx64 +gicv3_icv_rpr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_RPR read cpu 0x%x value 0x%" PRIx64 +gicv3_icv_hppir_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_HPPIR%d read cpu 0x%x value 0x%" PRIx64 +gicv3_icv_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICV_DIR write cpu 0x%x value 0x%" PRIx64 +gicv3_icv_iar_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IAR%d read cpu 0x%x value 0x%" PRIx64 +gicv3_icv_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_EOIR%d write cpu 0x%x value 0x%" PRIx64 +gicv3_cpuif_virt_update(uint32_t cpuid, int idx) "GICv3 CPU i/f 0x%x virt HPPI update LR index %d" +gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel, int maintlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting FIQ %d IRQ %d maintenance-irq %d" + +# arm_gicv3_dist.c +gicv3_dist_read(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" +gicv3_dist_badread(uint64_t offset, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " size %u secure %d: error" +gicv3_dist_write(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" +gicv3_dist_badwrite(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d: error" +gicv3_dist_set_irq(int irq, int level) "GICv3 distributor interrupt %d level changed to %d" + +# arm_gicv3_redist.c +gicv3_redist_read(uint32_t cpu, uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 redistributor 0x%x read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" +gicv3_redist_badread(uint32_t cpu, uint64_t offset, unsigned size, bool secure) "GICv3 redistributor 0x%x read: offset 0x%" PRIx64 " size %u secure %d: error" +gicv3_redist_write(uint32_t cpu, uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 redistributor 0x%x write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" +gicv3_redist_badwrite(uint32_t cpu, uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 redistributor 0x%x write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d: error" +gicv3_redist_set_irq(uint32_t cpu, int irq, int level) "GICv3 redistributor 0x%x interrupt %d level changed to %d" +gicv3_redist_send_sgi(uint32_t cpu, int irq) "GICv3 redistributor 0x%x pending SGI %d" + +# armv7m_nvic.c +nvic_recompute_state(int vectpending, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d vectpending_prio %d exception_prio %d" +nvic_recompute_state_secure(int vectpending, bool vectpending_is_s_banked, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d is_s_banked %d vectpending_prio %d exception_prio %d" +nvic_set_prio(int irq, bool secure, uint8_t prio) "NVIC set irq %d secure-bank %d priority %d" +nvic_irq_update(int vectpending, int pendprio, int exception_prio, int level) "NVIC vectpending %d pending prio %d exception_prio %d: setting irq line to %d" +nvic_escalate_prio(int irq, int irqprio, int runprio) "NVIC escalating irq %d to HardFault: insufficient priority %d >= %d" +nvic_escalate_disabled(int irq) "NVIC escalating irq %d to HardFault: disabled" +nvic_set_pending(int irq, bool secure, bool targets_secure, bool derived, int en, int prio) "NVIC set pending irq %d secure-bank %d targets_secure %d derived %d (enabled: %d priority %d)" +nvic_clear_pending(int irq, bool secure, int en, int prio) "NVIC clear pending irq %d secure-bank %d (enabled: %d priority %d)" +nvic_acknowledge_irq(int irq, int prio) "NVIC acknowledge IRQ: %d now active (prio %d)" +nvic_get_pending_irq_info(int irq, bool secure) "NVIC next IRQ %d: targets_secure: %d" +nvic_complete_irq(int irq, bool secure) "NVIC complete IRQ %d (secure %d)" +nvic_set_irq_level(int irq, int level) "NVIC external irq %d level set to %d" +nvic_set_nmi_level(int level) "NVIC external NMI level set to %d" +nvic_sysreg_read(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" +nvic_sysreg_write(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" + +# heathrow_pic.c +heathrow_write(uint64_t addr, unsigned int n, uint64_t value) "0x%"PRIx64" %u: 0x%"PRIx64 +heathrow_read(uint64_t addr, unsigned int n, uint64_t value) "0x%"PRIx64" %u: 0x%"PRIx64 +heathrow_set_irq(int num, int level) "set_irq: num=0x%02x level=%d" + +# bcm2835_ic.c +bcm2835_ic_set_gpu_irq(int irq, int level) "GPU irq #%d level %d" +bcm2835_ic_set_cpu_irq(int irq, int level) "CPU irq #%d level %d" + +# spapr_xive.c +spapr_xive_claim_irq(uint32_t lisn, bool lsi) "lisn=0x%x lsi=%d" +spapr_xive_free_irq(uint32_t lisn) "lisn=0x%x" +spapr_xive_set_irq(uint32_t lisn, uint32_t val) "lisn=0x%x val=%d" +spapr_xive_get_source_info(uint64_t flags, uint64_t lisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64 +spapr_xive_set_source_config(uint64_t flags, uint64_t lisn, uint64_t target, uint64_t priority, uint64_t eisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64" eisn=0x%"PRIx64 +spapr_xive_get_source_config(uint64_t flags, uint64_t lisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64 +spapr_xive_get_queue_info(uint64_t flags, uint64_t target, uint64_t priority) "flags=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64 +spapr_xive_set_queue_config(uint64_t flags, uint64_t target, uint64_t priority, uint64_t qpage, uint64_t qsize) "flags=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64" qpage=0x%"PRIx64" qsize=0x%"PRIx64 +spapr_xive_get_queue_config(uint64_t flags, uint64_t target, uint64_t priority) "flags=0x%"PRIx64" target=0x%"PRIx64" priority=0x%"PRIx64 +spapr_xive_set_os_reporting_line(uint64_t flags) "flags=0x%"PRIx64 +spapr_xive_get_os_reporting_line(uint64_t flags) "flags=0x%"PRIx64 +spapr_xive_esb(uint64_t flags, uint64_t lisn, uint64_t offset, uint64_t data) "flags=0x%"PRIx64" lisn=0x%"PRIx64" offset=0x%"PRIx64" data=0x%"PRIx64 +spapr_xive_sync(uint64_t flags, uint64_t lisn) "flags=0x%"PRIx64" lisn=0x%"PRIx64 +spapr_xive_reset(uint64_t flags) "flags=0x%"PRIx64 + +# spapr_xive_kvm.c +kvm_xive_cpu_connect(uint32_t id) "connect CPU%d to KVM device" +kvm_xive_source_reset(uint32_t srcno) "IRQ 0x%x" + +# xive.c +xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK" +xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !" +xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x" +xive_source_esb_read(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64 +xive_source_esb_write(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64 +xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "END 0x%02x/0x%04x -> enqueue 0x%08x" +xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x" +xive_tctx_tm_write(uint64_t offset, unsigned int size, uint64_t value) "@0x%"PRIx64" sz=%d val=0x%" PRIx64 +xive_tctx_tm_read(uint64_t offset, unsigned int size, uint64_t value) "@0x%"PRIx64" sz=%d val=0x%" PRIx64 +xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring) "found NVT 0x%x/0x%x ring=0x%x" +xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64 + +# pnv_xive.c +pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64 + +# goldfish_pic.c +goldfish_irq_request(void *dev, int idx, int irq, int level) "pic: %p goldfish-irq.%d irq: %d level: %d" +goldfish_pic_read(void *dev, int idx, unsigned int addr, unsigned int size, uint64_t value) "pic: %p goldfish-irq.%d reg: 0x%02x size: %d value: 0x%"PRIx64 +goldfish_pic_write(void *dev, int idx, unsigned int addr, unsigned int size, uint64_t value) "pic: %p goldfish-irq.%d reg: 0x%02x size: %d value: 0x%"PRIx64 +goldfish_pic_reset(void *dev, int idx) "pic: %p goldfish-irq.%d" +goldfish_pic_realize(void *dev, int idx) "pic: %p goldfish-irq.%d" +goldfish_pic_instance_init(void *dev) "pic: %p goldfish-irq" + +# sh_intc.c +sh_intc_sources(int p, int a, int c, int m, unsigned short v, const char *s1, const char *s2, const char *s3) "(%d/%d/%d/%d) interrupt source 0x%x %s%s%s" +sh_intc_pending(int p, unsigned short v) "(%d) returning interrupt source 0x%x" +sh_intc_register(const char *s, int id, unsigned short v, int c, int m) "%s %u -> 0x%04x (%d/%d)" +sh_intc_read(unsigned size, uint64_t offset, unsigned long val) "size %u 0x%" PRIx64 " -> 0x%lx" +sh_intc_write(unsigned size, uint64_t offset, unsigned long val) "size %u 0x%" PRIx64 " <- 0x%lx" +sh_intc_set(int id, int enable) "setting interrupt group %d to %d" diff --git a/hw/intc/trace.h b/hw/intc/trace.h new file mode 100644 index 000000000..02394aea2 --- /dev/null +++ b/hw/intc/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_intc.h" diff --git a/hw/intc/vgic_common.h b/hw/intc/vgic_common.h new file mode 100644 index 000000000..80d919eb9 --- /dev/null +++ b/hw/intc/vgic_common.h @@ -0,0 +1,35 @@ +/* + * ARM KVM vGIC utility functions + * + * Copyright (c) 2015 Samsung Electronics + * Written by Pavel Fedin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef QEMU_ARM_VGIC_COMMON_H +#define QEMU_ARM_VGIC_COMMON_H + +/** + * kvm_arm_gic_set_irq - Send an IRQ to the in-kernel vGIC + * @num_irq: Total number of IRQs configured for the GIC instance + * @irq: qemu internal IRQ line number: + * [0..N-1] : external interrupts + * [N..N+31] : PPI (internal) interrupts for CPU 0 + * [N+32..N+63] : PPI (internal interrupts for CPU 1 + * @level: level of the IRQ line. + */ +void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level); + +#endif diff --git a/hw/intc/xics.c b/hw/intc/xics.c new file mode 100644 index 000000000..48a835eab --- /dev/null +++ b/hw/intc/xics.c @@ -0,0 +1,751 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics + * + * Copyright (c) 2010,2011 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "trace.h" +#include "qemu/timer.h" +#include "hw/ppc/xics.h" +#include "hw/qdev-properties.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/visitor.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "hw/intc/intc.h" +#include "hw/irq.h" +#include "sysemu/kvm.h" +#include "sysemu/reset.h" + +void icp_pic_print_info(ICPState *icp, Monitor *mon) +{ + int cpu_index; + + /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs + * are hot plugged or unplugged. + */ + if (!icp) { + return; + } + + cpu_index = icp->cs ? icp->cs->cpu_index : -1; + + if (!icp->output) { + return; + } + + if (kvm_irqchip_in_kernel()) { + icp_synchronize_state(icp); + } + + monitor_printf(mon, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n", + cpu_index, icp->xirr, icp->xirr_owner, + icp->pending_priority, icp->mfrr); +} + +void ics_pic_print_info(ICSState *ics, Monitor *mon) +{ + uint32_t i; + + monitor_printf(mon, "ICS %4x..%4x %p\n", + ics->offset, ics->offset + ics->nr_irqs - 1, ics); + + if (!ics->irqs) { + return; + } + + if (kvm_irqchip_in_kernel()) { + ics_synchronize_state(ics); + } + + for (i = 0; i < ics->nr_irqs; i++) { + ICSIRQState *irq = ics->irqs + i; + + if (!(irq->flags & XICS_FLAGS_IRQ_MASK)) { + continue; + } + monitor_printf(mon, " %4x %s %02x %02x\n", + ics->offset + i, + (irq->flags & XICS_FLAGS_IRQ_LSI) ? + "LSI" : "MSI", + irq->priority, irq->status); + } +} + +/* + * ICP: Presentation layer + */ + +#define XISR_MASK 0x00ffffff +#define CPPR_MASK 0xff000000 + +#define XISR(icp) (((icp)->xirr) & XISR_MASK) +#define CPPR(icp) (((icp)->xirr) >> 24) + +static void ics_reject(ICSState *ics, uint32_t nr); +static void ics_eoi(ICSState *ics, uint32_t nr); + +static void icp_check_ipi(ICPState *icp) +{ + if (XISR(icp) && (icp->pending_priority <= icp->mfrr)) { + return; + } + + trace_xics_icp_check_ipi(icp->cs->cpu_index, icp->mfrr); + + if (XISR(icp) && icp->xirr_owner) { + ics_reject(icp->xirr_owner, XISR(icp)); + } + + icp->xirr = (icp->xirr & ~XISR_MASK) | XICS_IPI; + icp->pending_priority = icp->mfrr; + icp->xirr_owner = NULL; + qemu_irq_raise(icp->output); +} + +void icp_resend(ICPState *icp) +{ + XICSFabric *xi = icp->xics; + XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi); + + if (icp->mfrr < CPPR(icp)) { + icp_check_ipi(icp); + } + + xic->ics_resend(xi); +} + +void icp_set_cppr(ICPState *icp, uint8_t cppr) +{ + uint8_t old_cppr; + uint32_t old_xisr; + + old_cppr = CPPR(icp); + icp->xirr = (icp->xirr & ~CPPR_MASK) | (cppr << 24); + + if (cppr < old_cppr) { + if (XISR(icp) && (cppr <= icp->pending_priority)) { + old_xisr = XISR(icp); + icp->xirr &= ~XISR_MASK; /* Clear XISR */ + icp->pending_priority = 0xff; + qemu_irq_lower(icp->output); + if (icp->xirr_owner) { + ics_reject(icp->xirr_owner, old_xisr); + icp->xirr_owner = NULL; + } + } + } else { + if (!XISR(icp)) { + icp_resend(icp); + } + } +} + +void icp_set_mfrr(ICPState *icp, uint8_t mfrr) +{ + icp->mfrr = mfrr; + if (mfrr < CPPR(icp)) { + icp_check_ipi(icp); + } +} + +uint32_t icp_accept(ICPState *icp) +{ + uint32_t xirr = icp->xirr; + + qemu_irq_lower(icp->output); + icp->xirr = icp->pending_priority << 24; + icp->pending_priority = 0xff; + icp->xirr_owner = NULL; + + trace_xics_icp_accept(xirr, icp->xirr); + + return xirr; +} + +uint32_t icp_ipoll(ICPState *icp, uint32_t *mfrr) +{ + if (mfrr) { + *mfrr = icp->mfrr; + } + return icp->xirr; +} + +void icp_eoi(ICPState *icp, uint32_t xirr) +{ + XICSFabric *xi = icp->xics; + XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi); + ICSState *ics; + uint32_t irq; + + /* Send EOI -> ICS */ + icp->xirr = (icp->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); + trace_xics_icp_eoi(icp->cs->cpu_index, xirr, icp->xirr); + irq = xirr & XISR_MASK; + + ics = xic->ics_get(xi, irq); + if (ics) { + ics_eoi(ics, irq); + } + if (!XISR(icp)) { + icp_resend(icp); + } +} + +void icp_irq(ICSState *ics, int server, int nr, uint8_t priority) +{ + ICPState *icp = xics_icp_get(ics->xics, server); + + trace_xics_icp_irq(server, nr, priority); + + if ((priority >= CPPR(icp)) + || (XISR(icp) && (icp->pending_priority <= priority))) { + ics_reject(ics, nr); + } else { + if (XISR(icp) && icp->xirr_owner) { + ics_reject(icp->xirr_owner, XISR(icp)); + icp->xirr_owner = NULL; + } + icp->xirr = (icp->xirr & ~XISR_MASK) | (nr & XISR_MASK); + icp->xirr_owner = ics; + icp->pending_priority = priority; + trace_xics_icp_raise(icp->xirr, icp->pending_priority); + qemu_irq_raise(icp->output); + } +} + +static int icp_pre_save(void *opaque) +{ + ICPState *icp = opaque; + + if (kvm_irqchip_in_kernel()) { + icp_get_kvm_state(icp); + } + + return 0; +} + +static int icp_post_load(void *opaque, int version_id) +{ + ICPState *icp = opaque; + + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + int ret; + + ret = icp_set_kvm_state(icp, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + } + + return 0; +} + +static const VMStateDescription vmstate_icp_server = { + .name = "icp/server", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = icp_pre_save, + .post_load = icp_post_load, + .fields = (VMStateField[]) { + /* Sanity check */ + VMSTATE_UINT32(xirr, ICPState), + VMSTATE_UINT8(pending_priority, ICPState), + VMSTATE_UINT8(mfrr, ICPState), + VMSTATE_END_OF_LIST() + }, +}; + +void icp_reset(ICPState *icp) +{ + icp->xirr = 0; + icp->pending_priority = 0xff; + icp->mfrr = 0xff; + + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + icp_set_kvm_state(icp, &local_err); + if (local_err) { + error_report_err(local_err); + } + } +} + +static void icp_realize(DeviceState *dev, Error **errp) +{ + ICPState *icp = ICP(dev); + CPUPPCState *env; + Error *err = NULL; + + assert(icp->xics); + assert(icp->cs); + + env = &POWERPC_CPU(icp->cs)->env; + switch (PPC_INPUT(env)) { + case PPC_FLAGS_INPUT_POWER7: + icp->output = env->irq_inputs[POWER7_INPUT_INT]; + break; + case PPC_FLAGS_INPUT_POWER9: /* For SPAPR xics emulation */ + icp->output = env->irq_inputs[POWER9_INPUT_INT]; + break; + + case PPC_FLAGS_INPUT_970: + icp->output = env->irq_inputs[PPC970_INPUT_INT]; + break; + + default: + error_setg(errp, "XICS interrupt controller does not support this CPU bus model"); + return; + } + + /* Connect the presenter to the VCPU (required for CPU hotplug) */ + if (kvm_irqchip_in_kernel()) { + icp_kvm_realize(dev, &err); + if (err) { + error_propagate(errp, err); + return; + } + } + + vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp); +} + +static void icp_unrealize(DeviceState *dev) +{ + ICPState *icp = ICP(dev); + + vmstate_unregister(NULL, &vmstate_icp_server, icp); +} + +static Property icp_properties[] = { + DEFINE_PROP_LINK(ICP_PROP_XICS, ICPState, xics, TYPE_XICS_FABRIC, + XICSFabric *), + DEFINE_PROP_LINK(ICP_PROP_CPU, ICPState, cs, TYPE_CPU, CPUState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void icp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = icp_realize; + dc->unrealize = icp_unrealize; + device_class_set_props(dc, icp_properties); + /* + * Reason: part of XICS interrupt controller, needs to be wired up + * by icp_create(). + */ + dc->user_creatable = false; +} + +static const TypeInfo icp_info = { + .name = TYPE_ICP, + .parent = TYPE_DEVICE, + .instance_size = sizeof(ICPState), + .class_init = icp_class_init, + .class_size = sizeof(ICPStateClass), +}; + +Object *icp_create(Object *cpu, const char *type, XICSFabric *xi, Error **errp) +{ + Object *obj; + + obj = object_new(type); + object_property_add_child(cpu, type, obj); + object_unref(obj); + object_property_set_link(obj, ICP_PROP_XICS, OBJECT(xi), &error_abort); + object_property_set_link(obj, ICP_PROP_CPU, cpu, &error_abort); + if (!qdev_realize(DEVICE(obj), NULL, errp)) { + object_unparent(obj); + obj = NULL; + } + + return obj; +} + +void icp_destroy(ICPState *icp) +{ + Object *obj = OBJECT(icp); + + object_unparent(obj); +} + +/* + * ICS: Source layer + */ +static void ics_resend_msi(ICSState *ics, int srcno) +{ + ICSIRQState *irq = ics->irqs + srcno; + + /* FIXME: filter by server#? */ + if (irq->status & XICS_STATUS_REJECTED) { + irq->status &= ~XICS_STATUS_REJECTED; + if (irq->priority != 0xff) { + icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); + } + } +} + +static void ics_resend_lsi(ICSState *ics, int srcno) +{ + ICSIRQState *irq = ics->irqs + srcno; + + if ((irq->priority != 0xff) + && (irq->status & XICS_STATUS_ASSERTED) + && !(irq->status & XICS_STATUS_SENT)) { + irq->status |= XICS_STATUS_SENT; + icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); + } +} + +static void ics_set_irq_msi(ICSState *ics, int srcno, int val) +{ + ICSIRQState *irq = ics->irqs + srcno; + + trace_xics_ics_set_irq_msi(srcno, srcno + ics->offset); + + if (val) { + if (irq->priority == 0xff) { + irq->status |= XICS_STATUS_MASKED_PENDING; + trace_xics_masked_pending(); + } else { + icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); + } + } +} + +static void ics_set_irq_lsi(ICSState *ics, int srcno, int val) +{ + ICSIRQState *irq = ics->irqs + srcno; + + trace_xics_ics_set_irq_lsi(srcno, srcno + ics->offset); + if (val) { + irq->status |= XICS_STATUS_ASSERTED; + } else { + irq->status &= ~XICS_STATUS_ASSERTED; + } + ics_resend_lsi(ics, srcno); +} + +void ics_set_irq(void *opaque, int srcno, int val) +{ + ICSState *ics = (ICSState *)opaque; + + if (kvm_irqchip_in_kernel()) { + ics_kvm_set_irq(ics, srcno, val); + return; + } + + if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { + ics_set_irq_lsi(ics, srcno, val); + } else { + ics_set_irq_msi(ics, srcno, val); + } +} + +static void ics_write_xive_msi(ICSState *ics, int srcno) +{ + ICSIRQState *irq = ics->irqs + srcno; + + if (!(irq->status & XICS_STATUS_MASKED_PENDING) + || (irq->priority == 0xff)) { + return; + } + + irq->status &= ~XICS_STATUS_MASKED_PENDING; + icp_irq(ics, irq->server, srcno + ics->offset, irq->priority); +} + +static void ics_write_xive_lsi(ICSState *ics, int srcno) +{ + ics_resend_lsi(ics, srcno); +} + +void ics_write_xive(ICSState *ics, int srcno, int server, + uint8_t priority, uint8_t saved_priority) +{ + ICSIRQState *irq = ics->irqs + srcno; + + irq->server = server; + irq->priority = priority; + irq->saved_priority = saved_priority; + + trace_xics_ics_write_xive(ics->offset + srcno, srcno, server, priority); + + if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { + ics_write_xive_lsi(ics, srcno); + } else { + ics_write_xive_msi(ics, srcno); + } +} + +static void ics_reject(ICSState *ics, uint32_t nr) +{ + ICSStateClass *isc = ICS_GET_CLASS(ics); + ICSIRQState *irq = ics->irqs + nr - ics->offset; + + if (isc->reject) { + isc->reject(ics, nr); + return; + } + + trace_xics_ics_reject(nr, nr - ics->offset); + if (irq->flags & XICS_FLAGS_IRQ_MSI) { + irq->status |= XICS_STATUS_REJECTED; + } else if (irq->flags & XICS_FLAGS_IRQ_LSI) { + irq->status &= ~XICS_STATUS_SENT; + } +} + +void ics_resend(ICSState *ics) +{ + ICSStateClass *isc = ICS_GET_CLASS(ics); + int i; + + if (isc->resend) { + isc->resend(ics); + return; + } + + for (i = 0; i < ics->nr_irqs; i++) { + /* FIXME: filter by server#? */ + if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { + ics_resend_lsi(ics, i); + } else { + ics_resend_msi(ics, i); + } + } +} + +static void ics_eoi(ICSState *ics, uint32_t nr) +{ + int srcno = nr - ics->offset; + ICSIRQState *irq = ics->irqs + srcno; + + trace_xics_ics_eoi(nr); + + if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { + irq->status &= ~XICS_STATUS_SENT; + } +} + +static void ics_reset_irq(ICSIRQState *irq) +{ + irq->priority = 0xff; + irq->saved_priority = 0xff; +} + +static void ics_reset(DeviceState *dev) +{ + ICSState *ics = ICS(dev); + int i; + uint8_t flags[ics->nr_irqs]; + + for (i = 0; i < ics->nr_irqs; i++) { + flags[i] = ics->irqs[i].flags; + } + + memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); + + for (i = 0; i < ics->nr_irqs; i++) { + ics_reset_irq(ics->irqs + i); + ics->irqs[i].flags = flags[i]; + } + + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + ics_set_kvm_state(ICS(dev), &local_err); + if (local_err) { + error_report_err(local_err); + } + } +} + +static void ics_reset_handler(void *dev) +{ + ics_reset(dev); +} + +static void ics_realize(DeviceState *dev, Error **errp) +{ + ICSState *ics = ICS(dev); + + assert(ics->xics); + + if (!ics->nr_irqs) { + error_setg(errp, "Number of interrupts needs to be greater 0"); + return; + } + ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); + + qemu_register_reset(ics_reset_handler, ics); +} + +static void ics_instance_init(Object *obj) +{ + ICSState *ics = ICS(obj); + + ics->offset = XICS_IRQ_BASE; +} + +static int ics_pre_save(void *opaque) +{ + ICSState *ics = opaque; + + if (kvm_irqchip_in_kernel()) { + ics_get_kvm_state(ics); + } + + return 0; +} + +static int ics_post_load(void *opaque, int version_id) +{ + ICSState *ics = opaque; + + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + int ret; + + ret = ics_set_kvm_state(ics, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + } + + return 0; +} + +static const VMStateDescription vmstate_ics_irq = { + .name = "ics/irq", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(server, ICSIRQState), + VMSTATE_UINT8(priority, ICSIRQState), + VMSTATE_UINT8(saved_priority, ICSIRQState), + VMSTATE_UINT8(status, ICSIRQState), + VMSTATE_UINT8(flags, ICSIRQState), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_ics = { + .name = "ics", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = ics_pre_save, + .post_load = ics_post_load, + .fields = (VMStateField[]) { + /* Sanity check */ + VMSTATE_UINT32_EQUAL(nr_irqs, ICSState, NULL), + + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, + vmstate_ics_irq, + ICSIRQState), + VMSTATE_END_OF_LIST() + }, +}; + +static Property ics_properties[] = { + DEFINE_PROP_UINT32("nr-irqs", ICSState, nr_irqs, 0), + DEFINE_PROP_LINK(ICS_PROP_XICS, ICSState, xics, TYPE_XICS_FABRIC, + XICSFabric *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ics_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = ics_realize; + device_class_set_props(dc, ics_properties); + dc->reset = ics_reset; + dc->vmsd = &vmstate_ics; + /* + * Reason: part of XICS interrupt controller, needs to be wired up, + * e.g. by spapr_irq_init(). + */ + dc->user_creatable = false; +} + +static const TypeInfo ics_info = { + .name = TYPE_ICS, + .parent = TYPE_DEVICE, + .instance_size = sizeof(ICSState), + .instance_init = ics_instance_init, + .class_init = ics_class_init, + .class_size = sizeof(ICSStateClass), +}; + +static const TypeInfo xics_fabric_info = { + .name = TYPE_XICS_FABRIC, + .parent = TYPE_INTERFACE, + .class_size = sizeof(XICSFabricClass), +}; + +/* + * Exported functions + */ +ICPState *xics_icp_get(XICSFabric *xi, int server) +{ + XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi); + + return xic->icp_get(xi, server); +} + +void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) +{ + assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); + + ics->irqs[srcno].flags |= + lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; + + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + ics_reset_irq(ics->irqs + srcno); + ics_set_kvm_state_one(ics, srcno, &local_err); + if (local_err) { + error_report_err(local_err); + } + } +} + +static void xics_register_types(void) +{ + type_register_static(&ics_info); + type_register_static(&icp_info); + type_register_static(&xics_fabric_info); +} + +type_init(xics_register_types) diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c new file mode 100644 index 000000000..f5bfc501b --- /dev/null +++ b/hw/intc/xics_kvm.c @@ -0,0 +1,509 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics, in-kernel emulation + * + * Copyright (c) 2013 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "trace.h" +#include "sysemu/kvm.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_cpu_core.h" +#include "hw/ppc/xics.h" +#include "hw/ppc/xics_spapr.h" +#include "kvm_ppc.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" + +#include + +static int kernel_xics_fd = -1; + +typedef struct KVMEnabledICP { + unsigned long vcpu_id; + QLIST_ENTRY(KVMEnabledICP) node; +} KVMEnabledICP; + +static QLIST_HEAD(, KVMEnabledICP) + kvm_enabled_icps = QLIST_HEAD_INITIALIZER(&kvm_enabled_icps); + +static void kvm_disable_icps(void) +{ + KVMEnabledICP *enabled_icp, *next; + + QLIST_FOREACH_SAFE(enabled_icp, &kvm_enabled_icps, node, next) { + QLIST_REMOVE(enabled_icp, node); + g_free(enabled_icp); + } +} + +/* + * ICP-KVM + */ +void icp_get_kvm_state(ICPState *icp) +{ + uint64_t state; + int ret; + + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return; + } + + /* ICP for this CPU thread is not in use, exiting */ + if (!icp->cs) { + return; + } + + ret = kvm_get_one_reg(icp->cs, KVM_REG_PPC_ICP_STATE, &state); + if (ret != 0) { + error_report("Unable to retrieve KVM interrupt controller state" + " for CPU %ld: %s", kvm_arch_vcpu_id(icp->cs), strerror(errno)); + exit(1); + } + + icp->xirr = state >> KVM_REG_PPC_ICP_XISR_SHIFT; + icp->mfrr = (state >> KVM_REG_PPC_ICP_MFRR_SHIFT) + & KVM_REG_PPC_ICP_MFRR_MASK; + icp->pending_priority = (state >> KVM_REG_PPC_ICP_PPRI_SHIFT) + & KVM_REG_PPC_ICP_PPRI_MASK; +} + +static void do_icp_synchronize_state(CPUState *cpu, run_on_cpu_data arg) +{ + icp_get_kvm_state(arg.host_ptr); +} + +void icp_synchronize_state(ICPState *icp) +{ + if (icp->cs) { + run_on_cpu(icp->cs, do_icp_synchronize_state, RUN_ON_CPU_HOST_PTR(icp)); + } +} + +int icp_set_kvm_state(ICPState *icp, Error **errp) +{ + uint64_t state; + int ret; + + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return 0; + } + + /* ICP for this CPU thread is not in use, exiting */ + if (!icp->cs) { + return 0; + } + + state = ((uint64_t)icp->xirr << KVM_REG_PPC_ICP_XISR_SHIFT) + | ((uint64_t)icp->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) + | ((uint64_t)icp->pending_priority << KVM_REG_PPC_ICP_PPRI_SHIFT); + + ret = kvm_set_one_reg(icp->cs, KVM_REG_PPC_ICP_STATE, &state); + if (ret < 0) { + error_setg_errno(errp, -ret, + "Unable to restore KVM interrupt controller state (0x%" + PRIx64 ") for CPU %ld", state, + kvm_arch_vcpu_id(icp->cs)); + return ret; + } + + return 0; +} + +void icp_kvm_realize(DeviceState *dev, Error **errp) +{ + ICPState *icp = ICP(dev); + CPUState *cs; + KVMEnabledICP *enabled_icp; + unsigned long vcpu_id; + int ret; + + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return; + } + + cs = icp->cs; + vcpu_id = kvm_arch_vcpu_id(cs); + + /* + * If we are reusing a parked vCPU fd corresponding to the CPU + * which was hot-removed earlier we don't have to renable + * KVM_CAP_IRQ_XICS capability again. + */ + QLIST_FOREACH(enabled_icp, &kvm_enabled_icps, node) { + if (enabled_icp->vcpu_id == vcpu_id) { + return; + } + } + + ret = kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_XICS, 0, kernel_xics_fd, vcpu_id); + if (ret < 0) { + Error *local_err = NULL; + + error_setg(&local_err, "Unable to connect CPU%ld to kernel XICS: %s", + vcpu_id, strerror(errno)); + if (errno == ENOSPC) { + error_append_hint(&local_err, "Try -smp maxcpus=N with N < %u\n", + MACHINE(qdev_get_machine())->smp.max_cpus); + } + error_propagate(errp, local_err); + return; + } + enabled_icp = g_malloc(sizeof(*enabled_icp)); + enabled_icp->vcpu_id = vcpu_id; + QLIST_INSERT_HEAD(&kvm_enabled_icps, enabled_icp, node); +} + +/* + * ICS-KVM + */ +void ics_get_kvm_state(ICSState *ics) +{ + uint64_t state; + int i; + + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return; + } + + for (i = 0; i < ics->nr_irqs; i++) { + ICSIRQState *irq = &ics->irqs[i]; + + if (ics_irq_free(ics, i)) { + continue; + } + + kvm_device_access(kernel_xics_fd, KVM_DEV_XICS_GRP_SOURCES, + i + ics->offset, &state, false, &error_fatal); + + irq->server = state & KVM_XICS_DESTINATION_MASK; + irq->saved_priority = (state >> KVM_XICS_PRIORITY_SHIFT) + & KVM_XICS_PRIORITY_MASK; + /* + * To be consistent with the software emulation in xics.c, we + * split out the masked state + priority that we get from the + * kernel into 'current priority' (0xff if masked) and + * 'saved priority' (if masked, this is the priority the + * interrupt had before it was masked). Masking and unmasking + * are done with the ibm,int-off and ibm,int-on RTAS calls. + */ + if (state & KVM_XICS_MASKED) { + irq->priority = 0xff; + } else { + irq->priority = irq->saved_priority; + } + + irq->status = 0; + if (state & KVM_XICS_PENDING) { + if (state & KVM_XICS_LEVEL_SENSITIVE) { + irq->status |= XICS_STATUS_ASSERTED; + } else { + /* + * A pending edge-triggered interrupt (or MSI) + * must have been rejected previously when we + * first detected it and tried to deliver it, + * so mark it as pending and previously rejected + * for consistency with how xics.c works. + */ + irq->status |= XICS_STATUS_MASKED_PENDING + | XICS_STATUS_REJECTED; + } + } + if (state & KVM_XICS_PRESENTED) { + irq->status |= XICS_STATUS_PRESENTED; + } + if (state & KVM_XICS_QUEUED) { + irq->status |= XICS_STATUS_QUEUED; + } + } +} + +void ics_synchronize_state(ICSState *ics) +{ + ics_get_kvm_state(ics); +} + +int ics_set_kvm_state_one(ICSState *ics, int srcno, Error **errp) +{ + uint64_t state; + ICSIRQState *irq = &ics->irqs[srcno]; + int ret; + + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return 0; + } + + state = irq->server; + state |= (uint64_t)(irq->saved_priority & KVM_XICS_PRIORITY_MASK) + << KVM_XICS_PRIORITY_SHIFT; + if (irq->priority != irq->saved_priority) { + assert(irq->priority == 0xff); + } + + if (irq->priority == 0xff) { + state |= KVM_XICS_MASKED; + } + + if (irq->flags & XICS_FLAGS_IRQ_LSI) { + state |= KVM_XICS_LEVEL_SENSITIVE; + if (irq->status & XICS_STATUS_ASSERTED) { + state |= KVM_XICS_PENDING; + } + } else { + if (irq->status & XICS_STATUS_MASKED_PENDING) { + state |= KVM_XICS_PENDING; + } + } + if (irq->status & XICS_STATUS_PRESENTED) { + state |= KVM_XICS_PRESENTED; + } + if (irq->status & XICS_STATUS_QUEUED) { + state |= KVM_XICS_QUEUED; + } + + ret = kvm_device_access(kernel_xics_fd, KVM_DEV_XICS_GRP_SOURCES, + srcno + ics->offset, &state, true, errp); + if (ret < 0) { + return ret; + } + + return 0; +} + +int ics_set_kvm_state(ICSState *ics, Error **errp) +{ + int i; + + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return 0; + } + + for (i = 0; i < ics->nr_irqs; i++) { + int ret; + + if (ics_irq_free(ics, i)) { + continue; + } + + ret = ics_set_kvm_state_one(ics, i, errp); + if (ret < 0) { + return ret; + } + } + + return 0; +} + +void ics_kvm_set_irq(ICSState *ics, int srcno, int val) +{ + struct kvm_irq_level args; + int rc; + + /* The KVM XICS device should be in use */ + assert(kernel_xics_fd != -1); + + args.irq = srcno + ics->offset; + if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MSI) { + if (!val) { + return; + } + args.level = KVM_INTERRUPT_SET; + } else { + args.level = val ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET; + } + rc = kvm_vm_ioctl(kvm_state, KVM_IRQ_LINE, &args); + if (rc < 0) { + perror("kvm_irq_line"); + } +} + +int xics_kvm_connect(SpaprInterruptController *intc, uint32_t nr_servers, + Error **errp) +{ + ICSState *ics = ICS_SPAPR(intc); + int rc; + CPUState *cs; + Error *local_err = NULL; + + /* + * The KVM XICS device already in use. This is the case when + * rebooting under the XICS-only interrupt mode. + */ + if (kernel_xics_fd != -1) { + return 0; + } + + if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_IRQ_XICS)) { + error_setg(errp, + "KVM and IRQ_XICS capability must be present for in-kernel XICS"); + return -1; + } + + rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_SET_XIVE, "ibm,set-xive"); + if (rc < 0) { + error_setg_errno(&local_err, -rc, + "kvmppc_define_rtas_kernel_token: ibm,set-xive"); + goto fail; + } + + rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_GET_XIVE, "ibm,get-xive"); + if (rc < 0) { + error_setg_errno(&local_err, -rc, + "kvmppc_define_rtas_kernel_token: ibm,get-xive"); + goto fail; + } + + rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_INT_ON, "ibm,int-on"); + if (rc < 0) { + error_setg_errno(&local_err, -rc, + "kvmppc_define_rtas_kernel_token: ibm,int-on"); + goto fail; + } + + rc = kvmppc_define_rtas_kernel_token(RTAS_IBM_INT_OFF, "ibm,int-off"); + if (rc < 0) { + error_setg_errno(&local_err, -rc, + "kvmppc_define_rtas_kernel_token: ibm,int-off"); + goto fail; + } + + /* Create the KVM XICS device */ + rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false); + if (rc < 0) { + error_setg_errno(&local_err, -rc, "Error on KVM_CREATE_DEVICE for XICS"); + goto fail; + } + + /* Tell KVM about the # of VCPUs we may have (POWER9 and newer only) */ + if (kvm_device_check_attr(rc, KVM_DEV_XICS_GRP_CTRL, + KVM_DEV_XICS_NR_SERVERS)) { + if (kvm_device_access(rc, KVM_DEV_XICS_GRP_CTRL, + KVM_DEV_XICS_NR_SERVERS, &nr_servers, true, + &local_err)) { + goto fail; + } + } + + kernel_xics_fd = rc; + kvm_kernel_irqchip = true; + kvm_msi_via_irqfd_allowed = true; + kvm_gsi_direct_mapping = true; + + /* Create the presenters */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + icp_kvm_realize(DEVICE(spapr_cpu_state(cpu)->icp), &local_err); + if (local_err) { + goto fail; + } + } + + /* Update the KVM sources */ + ics_set_kvm_state(ics, &local_err); + if (local_err) { + goto fail; + } + + /* Connect the presenters to the initial VCPUs of the machine */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + icp_set_kvm_state(spapr_cpu_state(cpu)->icp, &local_err); + if (local_err) { + goto fail; + } + } + + return 0; + +fail: + error_propagate(errp, local_err); + xics_kvm_disconnect(intc); + return -1; +} + +void xics_kvm_disconnect(SpaprInterruptController *intc) +{ + /* + * Only on P9 using the XICS-on XIVE KVM device: + * + * When the KVM device fd is closed, the device is destroyed and + * removed from the list of devices of the VM. The VCPU presenters + * are also detached from the device. + */ + if (kernel_xics_fd != -1) { + close(kernel_xics_fd); + kernel_xics_fd = -1; + } + + kvmppc_define_rtas_kernel_token(0, "ibm,set-xive"); + kvmppc_define_rtas_kernel_token(0, "ibm,get-xive"); + kvmppc_define_rtas_kernel_token(0, "ibm,int-on"); + kvmppc_define_rtas_kernel_token(0, "ibm,int-off"); + + kvm_kernel_irqchip = false; + kvm_msi_via_irqfd_allowed = false; + kvm_gsi_direct_mapping = false; + + /* Clear the presenter from the VCPUs */ + kvm_disable_icps(); +} + +/* + * This is a heuristic to detect older KVMs on POWER9 hosts that don't + * support destruction of a KVM XICS device while the VM is running. + * Required to start a spapr machine with ic-mode=dual,kernel-irqchip=on. + */ +bool xics_kvm_has_broken_disconnect(void) +{ + int rc; + + rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false); + if (rc < 0) { + /* + * The error is ignored on purpose. The KVM XICS setup code + * will catch it again anyway. The goal here is to see if + * close() actually destroys the device or not. + */ + return false; + } + + close(rc); + + rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false); + if (rc >= 0) { + close(rc); + return false; + } + + return errno == EEXIST; +} diff --git a/hw/intc/xics_pnv.c b/hw/intc/xics_pnv.c new file mode 100644 index 000000000..753c067f1 --- /dev/null +++ b/hw/intc/xics_pnv.c @@ -0,0 +1,202 @@ +/* + * QEMU PowerPC PowerNV Interrupt Control Presenter (ICP) model + * + * Copyright (c) 2017, IBM Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/ppc/xics.h" + +#define ICP_XIRR_POLL 0 /* 1 byte (CPRR) or 4 bytes */ +#define ICP_XIRR 4 /* 1 byte (CPRR) or 4 bytes */ +#define ICP_MFRR 12 /* 1 byte access only */ + +#define ICP_LINKA 16 /* unused */ +#define ICP_LINKB 20 /* unused */ +#define ICP_LINKC 24 /* unused */ + +static uint64_t pnv_icp_read(void *opaque, hwaddr addr, unsigned width) +{ + ICPState *icp = ICP(opaque); + PnvICPState *picp = PNV_ICP(opaque); + bool byte0 = (width == 1 && (addr & 0x3) == 0); + uint64_t val = 0xffffffff; + + switch (addr & 0xffc) { + case ICP_XIRR_POLL: + val = icp_ipoll(icp, NULL); + if (byte0) { + val >>= 24; + } else if (width != 4) { + goto bad_access; + } + break; + case ICP_XIRR: + if (byte0) { + val = icp_ipoll(icp, NULL) >> 24; + } else if (width == 4) { + val = icp_accept(icp); + } else { + goto bad_access; + } + break; + case ICP_MFRR: + if (byte0) { + val = icp->mfrr; + } else { + goto bad_access; + } + break; + case ICP_LINKA: + if (width == 4) { + val = picp->links[0]; + } else { + goto bad_access; + } + break; + case ICP_LINKB: + if (width == 4) { + val = picp->links[1]; + } else { + goto bad_access; + } + break; + case ICP_LINKC: + if (width == 4) { + val = picp->links[2]; + } else { + goto bad_access; + } + break; + default: +bad_access: + qemu_log_mask(LOG_GUEST_ERROR, "XICS: Bad ICP access 0x%" + HWADDR_PRIx"/%d\n", addr, width); + } + + return val; +} + +static void pnv_icp_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + ICPState *icp = ICP(opaque); + PnvICPState *picp = PNV_ICP(opaque); + bool byte0 = (width == 1 && (addr & 0x3) == 0); + + switch (addr & 0xffc) { + case ICP_XIRR: + if (byte0) { + icp_set_cppr(icp, val); + } else if (width == 4) { + icp_eoi(icp, val); + } else { + goto bad_access; + } + break; + case ICP_MFRR: + if (byte0) { + icp_set_mfrr(icp, val); + } else { + goto bad_access; + } + break; + case ICP_LINKA: + if (width == 4) { + picp->links[0] = val; + } else { + goto bad_access; + } + break; + case ICP_LINKB: + if (width == 4) { + picp->links[1] = val; + } else { + goto bad_access; + } + break; + case ICP_LINKC: + if (width == 4) { + picp->links[2] = val; + } else { + goto bad_access; + } + break; + default: +bad_access: + qemu_log_mask(LOG_GUEST_ERROR, "XICS: Bad ICP access 0x%" + HWADDR_PRIx"/%d\n", addr, width); + } +} + +static const MemoryRegionOps pnv_icp_ops = { + .read = pnv_icp_read, + .write = pnv_icp_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void pnv_icp_realize(DeviceState *dev, Error **errp) +{ + ICPState *icp = ICP(dev); + PnvICPState *pnv_icp = PNV_ICP(icp); + ICPStateClass *icpc = ICP_GET_CLASS(icp); + Error *local_err = NULL; + + icpc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + memory_region_init_io(&pnv_icp->mmio, OBJECT(icp), &pnv_icp_ops, + icp, "icp-thread", 0x1000); +} + +static void pnv_icp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ICPStateClass *icpc = ICP_CLASS(klass); + + device_class_set_parent_realize(dc, pnv_icp_realize, + &icpc->parent_realize); + dc->desc = "PowerNV ICP"; +} + +static const TypeInfo pnv_icp_info = { + .name = TYPE_PNV_ICP, + .parent = TYPE_ICP, + .instance_size = sizeof(PnvICPState), + .class_init = pnv_icp_class_init, + .class_size = sizeof(ICPStateClass), +}; + +static void pnv_icp_register_types(void) +{ + type_register_static(&pnv_icp_info); +} + +type_init(pnv_icp_register_types) diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c new file mode 100644 index 000000000..37b2d9997 --- /dev/null +++ b/hw/intc/xics_spapr.c @@ -0,0 +1,476 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics + * + * Copyright (c) 2010,2011 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include "qemu/osdep.h" +#include "trace.h" +#include "qemu/timer.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_cpu_core.h" +#include "hw/ppc/xics.h" +#include "hw/ppc/xics_spapr.h" +#include "hw/ppc/fdt.h" +#include "qapi/visitor.h" + +/* + * Guest interfaces + */ + +static bool check_emulated_xics(SpaprMachineState *spapr, const char *func) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) || + kvm_irqchip_in_kernel()) { + error_report("pseries: %s must only be called for emulated XICS", + func); + return false; + } + + return true; +} + +#define CHECK_EMULATED_XICS_HCALL(spapr) \ + do { \ + if (!check_emulated_xics((spapr), __func__)) { \ + return H_HARDWARE; \ + } \ + } while (0) + +static target_ulong h_cppr(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong cppr = args[0]; + + CHECK_EMULATED_XICS_HCALL(spapr); + + icp_set_cppr(spapr_cpu_state(cpu)->icp, cppr); + return H_SUCCESS; +} + +static target_ulong h_ipi(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong mfrr = args[1]; + ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), args[0]); + + CHECK_EMULATED_XICS_HCALL(spapr); + + if (!icp) { + return H_PARAMETER; + } + + icp_set_mfrr(icp, mfrr); + return H_SUCCESS; +} + +static target_ulong h_xirr(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + uint32_t xirr = icp_accept(spapr_cpu_state(cpu)->icp); + + CHECK_EMULATED_XICS_HCALL(spapr); + + args[0] = xirr; + return H_SUCCESS; +} + +static target_ulong h_xirr_x(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + uint32_t xirr = icp_accept(spapr_cpu_state(cpu)->icp); + + CHECK_EMULATED_XICS_HCALL(spapr); + + args[0] = xirr; + args[1] = cpu_get_host_ticks(); + return H_SUCCESS; +} + +static target_ulong h_eoi(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong xirr = args[0]; + + CHECK_EMULATED_XICS_HCALL(spapr); + + icp_eoi(spapr_cpu_state(cpu)->icp, xirr); + return H_SUCCESS; +} + +static target_ulong h_ipoll(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), args[0]); + uint32_t mfrr; + uint32_t xirr; + + CHECK_EMULATED_XICS_HCALL(spapr); + + if (!icp) { + return H_PARAMETER; + } + + xirr = icp_ipoll(icp, &mfrr); + + args[0] = xirr; + args[1] = mfrr; + + return H_SUCCESS; +} + +#define CHECK_EMULATED_XICS_RTAS(spapr, rets) \ + do { \ + if (!check_emulated_xics((spapr), __func__)) { \ + rtas_st((rets), 0, RTAS_OUT_HW_ERROR); \ + return; \ + } \ + } while (0) + +static void rtas_set_xive(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, + uint32_t nargs, target_ulong args, + uint32_t nret, target_ulong rets) +{ + ICSState *ics = spapr->ics; + uint32_t nr, srcno, server, priority; + + CHECK_EMULATED_XICS_RTAS(spapr, rets); + + if ((nargs != 3) || (nret != 1)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + if (!ics) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + nr = rtas_ld(args, 0); + server = rtas_ld(args, 1); + priority = rtas_ld(args, 2); + + if (!ics_valid_irq(ics, nr) || !xics_icp_get(XICS_FABRIC(spapr), server) + || (priority > 0xff)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + srcno = nr - ics->offset; + ics_write_xive(ics, srcno, server, priority, priority); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_get_xive(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, + uint32_t nargs, target_ulong args, + uint32_t nret, target_ulong rets) +{ + ICSState *ics = spapr->ics; + uint32_t nr, srcno; + + CHECK_EMULATED_XICS_RTAS(spapr, rets); + + if ((nargs != 1) || (nret != 3)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + if (!ics) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + nr = rtas_ld(args, 0); + + if (!ics_valid_irq(ics, nr)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + srcno = nr - ics->offset; + rtas_st(rets, 1, ics->irqs[srcno].server); + rtas_st(rets, 2, ics->irqs[srcno].priority); +} + +static void rtas_int_off(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, + uint32_t nargs, target_ulong args, + uint32_t nret, target_ulong rets) +{ + ICSState *ics = spapr->ics; + uint32_t nr, srcno; + + CHECK_EMULATED_XICS_RTAS(spapr, rets); + + if ((nargs != 1) || (nret != 1)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + if (!ics) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + nr = rtas_ld(args, 0); + + if (!ics_valid_irq(ics, nr)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + srcno = nr - ics->offset; + ics_write_xive(ics, srcno, ics->irqs[srcno].server, 0xff, + ics->irqs[srcno].priority); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_int_on(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, + uint32_t nargs, target_ulong args, + uint32_t nret, target_ulong rets) +{ + ICSState *ics = spapr->ics; + uint32_t nr, srcno; + + CHECK_EMULATED_XICS_RTAS(spapr, rets); + + if ((nargs != 1) || (nret != 1)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + if (!ics) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + nr = rtas_ld(args, 0); + + if (!ics_valid_irq(ics, nr)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + srcno = nr - ics->offset; + ics_write_xive(ics, srcno, ics->irqs[srcno].server, + ics->irqs[srcno].saved_priority, + ics->irqs[srcno].saved_priority); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void ics_spapr_realize(DeviceState *dev, Error **errp) +{ + ICSState *ics = ICS_SPAPR(dev); + ICSStateClass *icsc = ICS_GET_CLASS(ics); + Error *local_err = NULL; + + icsc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive); + spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive); + spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off); + spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on); + + spapr_register_hypercall(H_CPPR, h_cppr); + spapr_register_hypercall(H_IPI, h_ipi); + spapr_register_hypercall(H_XIRR, h_xirr); + spapr_register_hypercall(H_XIRR_X, h_xirr_x); + spapr_register_hypercall(H_EOI, h_eoi); + spapr_register_hypercall(H_IPOLL, h_ipoll); +} + +static void xics_spapr_dt(SpaprInterruptController *intc, uint32_t nr_servers, + void *fdt, uint32_t phandle) +{ + uint32_t interrupt_server_ranges_prop[] = { + 0, cpu_to_be32(nr_servers), + }; + int node; + + _FDT(node = fdt_add_subnode(fdt, 0, "interrupt-controller")); + + _FDT(fdt_setprop_string(fdt, node, "device_type", + "PowerPC-External-Interrupt-Presentation")); + _FDT(fdt_setprop_string(fdt, node, "compatible", "IBM,ppc-xicp")); + _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); + _FDT(fdt_setprop(fdt, node, "ibm,interrupt-server-ranges", + interrupt_server_ranges_prop, + sizeof(interrupt_server_ranges_prop))); + _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); + _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle)); + _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle)); +} + +static int xics_spapr_cpu_intc_create(SpaprInterruptController *intc, + PowerPCCPU *cpu, Error **errp) +{ + ICSState *ics = ICS_SPAPR(intc); + Object *obj; + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + obj = icp_create(OBJECT(cpu), TYPE_ICP, ics->xics, errp); + if (!obj) { + return -1; + } + + spapr_cpu->icp = ICP(obj); + return 0; +} + +static void xics_spapr_cpu_intc_reset(SpaprInterruptController *intc, + PowerPCCPU *cpu) +{ + icp_reset(spapr_cpu_state(cpu)->icp); +} + +static void xics_spapr_cpu_intc_destroy(SpaprInterruptController *intc, + PowerPCCPU *cpu) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + icp_destroy(spapr_cpu->icp); + spapr_cpu->icp = NULL; +} + +static int xics_spapr_claim_irq(SpaprInterruptController *intc, int irq, + bool lsi, Error **errp) +{ + ICSState *ics = ICS_SPAPR(intc); + + assert(ics); + assert(ics_valid_irq(ics, irq)); + + if (!ics_irq_free(ics, irq - ics->offset)) { + error_setg(errp, "IRQ %d is not free", irq); + return -EBUSY; + } + + ics_set_irq_type(ics, irq - ics->offset, lsi); + return 0; +} + +static void xics_spapr_free_irq(SpaprInterruptController *intc, int irq) +{ + ICSState *ics = ICS_SPAPR(intc); + uint32_t srcno = irq - ics->offset; + + assert(ics_valid_irq(ics, irq)); + + memset(&ics->irqs[srcno], 0, sizeof(ICSIRQState)); +} + +static void xics_spapr_set_irq(SpaprInterruptController *intc, int irq, int val) +{ + ICSState *ics = ICS_SPAPR(intc); + uint32_t srcno = irq - ics->offset; + + ics_set_irq(ics, srcno, val); +} + +static void xics_spapr_print_info(SpaprInterruptController *intc, Monitor *mon) +{ + ICSState *ics = ICS_SPAPR(intc); + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon); + } + + ics_pic_print_info(ics, mon); +} + +static int xics_spapr_post_load(SpaprInterruptController *intc, int version_id) +{ + if (!kvm_irqchip_in_kernel()) { + CPUState *cs; + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + icp_resend(spapr_cpu_state(cpu)->icp); + } + } + return 0; +} + +static int xics_spapr_activate(SpaprInterruptController *intc, + uint32_t nr_servers, Error **errp) +{ + if (kvm_enabled()) { + return spapr_irq_init_kvm(xics_kvm_connect, intc, nr_servers, errp); + } + return 0; +} + +static void xics_spapr_deactivate(SpaprInterruptController *intc) +{ + if (kvm_irqchip_in_kernel()) { + xics_kvm_disconnect(intc); + } +} + +static void ics_spapr_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ICSStateClass *isc = ICS_CLASS(klass); + SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass); + + device_class_set_parent_realize(dc, ics_spapr_realize, + &isc->parent_realize); + sicc->activate = xics_spapr_activate; + sicc->deactivate = xics_spapr_deactivate; + sicc->cpu_intc_create = xics_spapr_cpu_intc_create; + sicc->cpu_intc_reset = xics_spapr_cpu_intc_reset; + sicc->cpu_intc_destroy = xics_spapr_cpu_intc_destroy; + sicc->claim_irq = xics_spapr_claim_irq; + sicc->free_irq = xics_spapr_free_irq; + sicc->set_irq = xics_spapr_set_irq; + sicc->print_info = xics_spapr_print_info; + sicc->dt = xics_spapr_dt; + sicc->post_load = xics_spapr_post_load; +} + +static const TypeInfo ics_spapr_info = { + .name = TYPE_ICS_SPAPR, + .parent = TYPE_ICS, + .class_init = ics_spapr_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_SPAPR_INTC }, + { } + }, +}; + +static void xics_spapr_register_types(void) +{ + type_register_static(&ics_spapr_info); +} + +type_init(xics_spapr_register_types) diff --git a/hw/intc/xilinx_intc.c b/hw/intc/xilinx_intc.c new file mode 100644 index 000000000..4c4397b3d --- /dev/null +++ b/hw/intc/xilinx_intc.c @@ -0,0 +1,206 @@ +/* + * QEMU Xilinx OPB Interrupt Controller. + * + * Copyright (c) 2009 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "qemu/module.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +#define D(x) + +#define R_ISR 0 +#define R_IPR 1 +#define R_IER 2 +#define R_IAR 3 +#define R_SIE 4 +#define R_CIE 5 +#define R_IVR 6 +#define R_MER 7 +#define R_MAX 8 + +#define TYPE_XILINX_INTC "xlnx.xps-intc" +DECLARE_INSTANCE_CHECKER(struct xlx_pic, XILINX_INTC, + TYPE_XILINX_INTC) + +struct xlx_pic +{ + SysBusDevice parent_obj; + + MemoryRegion mmio; + qemu_irq parent_irq; + + /* Configuration reg chosen at synthesis-time. QEMU populates + the bits at board-setup. */ + uint32_t c_kind_of_intr; + + /* Runtime control registers. */ + uint32_t regs[R_MAX]; + /* state of the interrupt input pins */ + uint32_t irq_pin_state; +}; + +static void update_irq(struct xlx_pic *p) +{ + uint32_t i; + + /* level triggered interrupt */ + if (p->regs[R_MER] & 2) { + p->regs[R_ISR] |= p->irq_pin_state & ~p->c_kind_of_intr; + } + + /* Update the pending register. */ + p->regs[R_IPR] = p->regs[R_ISR] & p->regs[R_IER]; + + /* Update the vector register. */ + for (i = 0; i < 32; i++) { + if (p->regs[R_IPR] & (1U << i)) { + break; + } + } + if (i == 32) + i = ~0; + + p->regs[R_IVR] = i; + qemu_set_irq(p->parent_irq, (p->regs[R_MER] & 1) && p->regs[R_IPR]); +} + +static uint64_t +pic_read(void *opaque, hwaddr addr, unsigned int size) +{ + struct xlx_pic *p = opaque; + uint32_t r = 0; + + addr >>= 2; + switch (addr) + { + default: + if (addr < ARRAY_SIZE(p->regs)) + r = p->regs[addr]; + break; + + } + D(printf("%s %x=%x\n", __func__, addr * 4, r)); + return r; +} + +static void +pic_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + struct xlx_pic *p = opaque; + uint32_t value = val64; + + addr >>= 2; + D(qemu_log("%s addr=%x val=%x\n", __func__, addr * 4, value)); + switch (addr) + { + case R_IAR: + p->regs[R_ISR] &= ~value; /* ACK. */ + break; + case R_SIE: + p->regs[R_IER] |= value; /* Atomic set ie. */ + break; + case R_CIE: + p->regs[R_IER] &= ~value; /* Atomic clear ie. */ + break; + case R_MER: + p->regs[R_MER] = value & 0x3; + break; + case R_ISR: + if ((p->regs[R_MER] & 2)) { + break; + } + /* fallthrough */ + default: + if (addr < ARRAY_SIZE(p->regs)) + p->regs[addr] = value; + break; + } + update_irq(p); +} + +static const MemoryRegionOps pic_ops = { + .read = pic_read, + .write = pic_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void irq_handler(void *opaque, int irq, int level) +{ + struct xlx_pic *p = opaque; + + /* edge triggered interrupt */ + if (p->c_kind_of_intr & (1 << irq) && p->regs[R_MER] & 2) { + p->regs[R_ISR] |= (level << irq); + } + + p->irq_pin_state &= ~(1 << irq); + p->irq_pin_state |= level << irq; + update_irq(p); +} + +static void xilinx_intc_init(Object *obj) +{ + struct xlx_pic *p = XILINX_INTC(obj); + + qdev_init_gpio_in(DEVICE(obj), irq_handler, 32); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &p->parent_irq); + + memory_region_init_io(&p->mmio, obj, &pic_ops, p, "xlnx.xps-intc", + R_MAX * 4); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &p->mmio); +} + +static Property xilinx_intc_properties[] = { + DEFINE_PROP_UINT32("kind-of-intr", struct xlx_pic, c_kind_of_intr, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xilinx_intc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, xilinx_intc_properties); +} + +static const TypeInfo xilinx_intc_info = { + .name = TYPE_XILINX_INTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(struct xlx_pic), + .instance_init = xilinx_intc_init, + .class_init = xilinx_intc_class_init, +}; + +static void xilinx_intc_register_types(void) +{ + type_register_static(&xilinx_intc_info); +} + +type_init(xilinx_intc_register_types) diff --git a/hw/intc/xive.c b/hw/intc/xive.c new file mode 100644 index 000000000..190194d27 --- /dev/null +++ b/hw/intc/xive.c @@ -0,0 +1,1983 @@ +/* + * QEMU PowerPC XIVE interrupt controller model + * + * Copyright (c) 2017-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "sysemu/dma.h" +#include "sysemu/reset.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "hw/irq.h" +#include "hw/ppc/xive.h" +#include "hw/ppc/xive_regs.h" +#include "trace.h" + +/* + * XIVE Thread Interrupt Management context + */ + +/* + * Convert an Interrupt Pending Buffer (IPB) register to a Pending + * Interrupt Priority Register (PIPR), which contains the priority of + * the most favored pending notification. + */ +static uint8_t ipb_to_pipr(uint8_t ibp) +{ + return ibp ? clz32((uint32_t)ibp << 24) : 0xff; +} + +static uint8_t exception_mask(uint8_t ring) +{ + switch (ring) { + case TM_QW1_OS: + return TM_QW1_NSR_EO; + case TM_QW3_HV_PHYS: + return TM_QW3_NSR_HE; + default: + g_assert_not_reached(); + } +} + +static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) +{ + switch (ring) { + case TM_QW0_USER: + return 0; /* Not supported */ + case TM_QW1_OS: + return tctx->os_output; + case TM_QW2_HV_POOL: + case TM_QW3_HV_PHYS: + return tctx->hv_output; + default: + return 0; + } +} + +static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) +{ + uint8_t *regs = &tctx->regs[ring]; + uint8_t nsr = regs[TM_NSR]; + uint8_t mask = exception_mask(ring); + + qemu_irq_lower(xive_tctx_output(tctx, ring)); + + if (regs[TM_NSR] & mask) { + uint8_t cppr = regs[TM_PIPR]; + + regs[TM_CPPR] = cppr; + + /* Reset the pending buffer bit */ + regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); + regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); + + /* Drop Exception bit */ + regs[TM_NSR] &= ~mask; + + trace_xive_tctx_accept(tctx->cs->cpu_index, ring, + regs[TM_IPB], regs[TM_PIPR], + regs[TM_CPPR], regs[TM_NSR]); + } + + return (nsr << 8) | regs[TM_CPPR]; +} + +static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) +{ + uint8_t *regs = &tctx->regs[ring]; + + if (regs[TM_PIPR] < regs[TM_CPPR]) { + switch (ring) { + case TM_QW1_OS: + regs[TM_NSR] |= TM_QW1_NSR_EO; + break; + case TM_QW3_HV_PHYS: + regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6); + break; + default: + g_assert_not_reached(); + } + trace_xive_tctx_notify(tctx->cs->cpu_index, ring, + regs[TM_IPB], regs[TM_PIPR], + regs[TM_CPPR], regs[TM_NSR]); + qemu_irq_raise(xive_tctx_output(tctx, ring)); + } +} + +static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) +{ + uint8_t *regs = &tctx->regs[ring]; + + trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, + regs[TM_IPB], regs[TM_PIPR], + cppr, regs[TM_NSR]); + + if (cppr > XIVE_PRIORITY_MAX) { + cppr = 0xff; + } + + tctx->regs[ring + TM_CPPR] = cppr; + + /* CPPR has changed, check if we need to raise a pending exception */ + xive_tctx_notify(tctx, ring); +} + +void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) +{ + uint8_t *regs = &tctx->regs[ring]; + + regs[TM_IPB] |= ipb; + regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); + xive_tctx_notify(tctx, ring); +} + +/* + * XIVE Thread Interrupt Management Area (TIMA) + */ + +static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); +} + +static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive_tctx_accept(tctx, TM_QW3_HV_PHYS); +} + +static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); + uint32_t qw2w2; + + qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0); + memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); + return qw2w2; +} + +static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, + uint64_t value, unsigned size) +{ + tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff; +} + +static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff; +} + +/* + * Define an access map for each page of the TIMA that we will use in + * the memory region ops to filter values when doing loads and stores + * of raw registers values + * + * Registers accessibility bits : + * + * 0x0 - no access + * 0x1 - write only + * 0x2 - read only + * 0x3 - read/write + */ + +static const uint8_t xive_tm_hw_view[] = { + 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ + 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ + 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ + 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ +}; + +static const uint8_t xive_tm_hv_view[] = { + 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ + 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ + 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ + 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ +}; + +static const uint8_t xive_tm_os_view[] = { + 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ + 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ +}; + +static const uint8_t xive_tm_user_view[] = { + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ +}; + +/* + * Overall TIMA access map for the thread interrupt management context + * registers + */ +static const uint8_t *xive_tm_views[] = { + [XIVE_TM_HW_PAGE] = xive_tm_hw_view, + [XIVE_TM_HV_PAGE] = xive_tm_hv_view, + [XIVE_TM_OS_PAGE] = xive_tm_os_view, + [XIVE_TM_USER_PAGE] = xive_tm_user_view, +}; + +/* + * Computes a register access mask for a given offset in the TIMA + */ +static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write) +{ + uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; + uint8_t reg_offset = offset & 0x3F; + uint8_t reg_mask = write ? 0x1 : 0x2; + uint64_t mask = 0x0; + int i; + + for (i = 0; i < size; i++) { + if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) { + mask |= (uint64_t) 0xff << (8 * (size - i - 1)); + } + } + + return mask; +} + +static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, + unsigned size) +{ + uint8_t ring_offset = offset & 0x30; + uint8_t reg_offset = offset & 0x3F; + uint64_t mask = xive_tm_mask(offset, size, true); + int i; + + /* + * Only 4 or 8 bytes stores are allowed and the User ring is + * excluded + */ + if (size < 4 || !mask || ring_offset == TM_QW0_USER) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" + HWADDR_PRIx"\n", offset); + return; + } + + /* + * Use the register offset for the raw values and filter out + * reserved values + */ + for (i = 0; i < size; i++) { + uint8_t byte_mask = (mask >> (8 * (size - i - 1))); + if (byte_mask) { + tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) & + byte_mask; + } + } +} + +static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) +{ + uint8_t ring_offset = offset & 0x30; + uint8_t reg_offset = offset & 0x3F; + uint64_t mask = xive_tm_mask(offset, size, false); + uint64_t ret; + int i; + + /* + * Only 4 or 8 bytes loads are allowed and the User ring is + * excluded + */ + if (size < 4 || !mask || ring_offset == TM_QW0_USER) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" + HWADDR_PRIx"\n", offset); + return -1; + } + + /* Use the register offset for the raw values */ + ret = 0; + for (i = 0; i < size; i++) { + ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1)); + } + + /* filter out reserved values */ + return ret & mask; +} + +/* + * The TM context is mapped twice within each page. Stores and loads + * to the first mapping below 2K write and read the specified values + * without modification. The second mapping above 2K performs specific + * state changes (side effects) in addition to setting/returning the + * interrupt management area context of the processor thread. + */ +static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive_tctx_accept(tctx, TM_QW1_OS); +} + +static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); +} + +/* + * Adjust the IPB to allow a CPU to process event queues of other + * priorities during one physical interrupt cycle. + */ +static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff)); +} + +static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, + uint32_t *nvt_idx, bool *vo) +{ + if (nvt_blk) { + *nvt_blk = xive_nvt_blk(cam); + } + if (nvt_idx) { + *nvt_idx = xive_nvt_idx(cam); + } + if (vo) { + *vo = !!(cam & TM_QW1W2_VO); + } +} + +static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk, + uint32_t *nvt_idx, bool *vo) +{ + uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); + uint32_t cam = be32_to_cpu(qw1w2); + + xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo); + return qw1w2; +} + +static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2) +{ + memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); +} + +static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + uint32_t qw1w2; + uint32_t qw1w2_new; + uint8_t nvt_blk; + uint32_t nvt_idx; + bool vo; + + qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo); + + if (!vo) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n", + nvt_blk, nvt_idx); + } + + /* Invalidate CAM line */ + qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0); + xive_tctx_set_os_cam(tctx, qw1w2_new); + return qw1w2; +} + +static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, + uint8_t nvt_blk, uint32_t nvt_idx) +{ + XiveNVT nvt; + uint8_t ipb; + + /* + * Grab the associated NVT to pull the pending bits, and merge + * them with the IPB of the thread interrupt context registers + */ + if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVT %x/%x\n", + nvt_blk, nvt_idx); + return; + } + + ipb = xive_get_field32(NVT_W4_IPB, nvt.w4); + + if (ipb) { + /* Reset the NVT value */ + nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0); + xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); + + /* Merge in current context */ + xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); + } +} + +/* + * Updating the OS CAM line can trigger a resend of interrupt + */ +static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + uint32_t cam = value; + uint32_t qw1w2 = cpu_to_be32(cam); + uint8_t nvt_blk; + uint32_t nvt_idx; + bool vo; + + xive_os_cam_decode(cam, &nvt_blk, &nvt_idx, &vo); + + /* First update the registers */ + xive_tctx_set_os_cam(tctx, qw1w2); + + /* Check the interrupt pending bits */ + if (vo) { + xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx); + } +} + +/* + * Define a mapping of "special" operations depending on the TIMA page + * offset and the size of the operation. + */ +typedef struct XiveTmOp { + uint8_t page_offset; + uint32_t op_offset; + unsigned size; + void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, + uint64_t value, unsigned size); + uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, + unsigned size); +} XiveTmOp; + +static const XiveTmOp xive_tm_operations[] = { + /* + * MMIOs below 2K : raw values and special operations without side + * effects + */ + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, + + /* MMIOs above 2K : special operations with side effects */ + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, +}; + +static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) +{ + uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; + uint32_t op_offset = offset & 0xFFF; + int i; + + for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) { + const XiveTmOp *xto = &xive_tm_operations[i]; + + /* Accesses done from a more privileged TIMA page is allowed */ + if (xto->page_offset >= page_offset && + xto->op_offset == op_offset && + xto->size == size && + ((write && xto->write_handler) || (!write && xto->read_handler))) { + return xto; + } + } + return NULL; +} + +/* + * TIMA MMIO handlers + */ +void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, + uint64_t value, unsigned size) +{ + const XiveTmOp *xto; + + trace_xive_tctx_tm_write(offset, size, value); + + /* + * TODO: check V bit in Q[0-3]W2 + */ + + /* + * First, check for special operations in the 2K region + */ + if (offset & 0x800) { + xto = xive_tm_find_op(offset, size, true); + if (!xto) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " + "@%"HWADDR_PRIx"\n", offset); + } else { + xto->write_handler(xptr, tctx, offset, value, size); + } + return; + } + + /* + * Then, for special operations in the region below 2K. + */ + xto = xive_tm_find_op(offset, size, true); + if (xto) { + xto->write_handler(xptr, tctx, offset, value, size); + return; + } + + /* + * Finish with raw access to the register values + */ + xive_tm_raw_write(tctx, offset, value, size); +} + +uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, + unsigned size) +{ + const XiveTmOp *xto; + uint64_t ret; + + /* + * TODO: check V bit in Q[0-3]W2 + */ + + /* + * First, check for special operations in the 2K region + */ + if (offset & 0x800) { + xto = xive_tm_find_op(offset, size, false); + if (!xto) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" + "@%"HWADDR_PRIx"\n", offset); + return -1; + } + ret = xto->read_handler(xptr, tctx, offset, size); + goto out; + } + + /* + * Then, for special operations in the region below 2K. + */ + xto = xive_tm_find_op(offset, size, false); + if (xto) { + ret = xto->read_handler(xptr, tctx, offset, size); + goto out; + } + + /* + * Finish with raw access to the register values + */ + ret = xive_tm_raw_read(tctx, offset, size); +out: + trace_xive_tctx_tm_read(offset, size, ret); + return ret; +} + +static char *xive_tctx_ring_print(uint8_t *ring) +{ + uint32_t w2 = xive_tctx_word2(ring); + + return g_strdup_printf("%02x %02x %02x %02x %02x " + "%02x %02x %02x %08x", + ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB], + ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR], + be32_to_cpu(w2)); +} + +static const char * const xive_tctx_ring_names[] = { + "USER", "OS", "POOL", "PHYS", +}; + +/* + * kvm_irqchip_in_kernel() will cause the compiler to turn this + * info a nop if CONFIG_KVM isn't defined. + */ +#define xive_in_kernel(xptr) \ + (kvm_irqchip_in_kernel() && \ + ({ \ + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \ + xpc->in_kernel ? xpc->in_kernel(xptr) : false; \ + })) + +void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon) +{ + int cpu_index; + int i; + + /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs + * are hot plugged or unplugged. + */ + if (!tctx) { + return; + } + + cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; + + if (xive_in_kernel(tctx->xptr)) { + Error *local_err = NULL; + + kvmppc_xive_cpu_synchronize_state(tctx, &local_err); + if (local_err) { + error_report_err(local_err); + return; + } + } + + monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR" + " W2\n", cpu_index); + + for (i = 0; i < XIVE_TM_RING_COUNT; i++) { + char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]); + monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index, + xive_tctx_ring_names[i], s); + g_free(s); + } +} + +void xive_tctx_reset(XiveTCTX *tctx) +{ + memset(tctx->regs, 0, sizeof(tctx->regs)); + + /* Set some defaults */ + tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; + tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; + tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; + + /* + * Initialize PIPR to 0xFF to avoid phantom interrupts when the + * CPPR is first set. + */ + tctx->regs[TM_QW1_OS + TM_PIPR] = + ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); + tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = + ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); +} + +static void xive_tctx_realize(DeviceState *dev, Error **errp) +{ + XiveTCTX *tctx = XIVE_TCTX(dev); + PowerPCCPU *cpu; + CPUPPCState *env; + + assert(tctx->cs); + assert(tctx->xptr); + + cpu = POWERPC_CPU(tctx->cs); + env = &cpu->env; + switch (PPC_INPUT(env)) { + case PPC_FLAGS_INPUT_POWER9: + tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT]; + tctx->os_output = env->irq_inputs[POWER9_INPUT_INT]; + break; + + default: + error_setg(errp, "XIVE interrupt controller does not support " + "this CPU bus model"); + return; + } + + /* Connect the presenter to the VCPU (required for CPU hotplug) */ + if (xive_in_kernel(tctx->xptr)) { + if (kvmppc_xive_cpu_connect(tctx, errp) < 0) { + return; + } + } +} + +static int vmstate_xive_tctx_pre_save(void *opaque) +{ + XiveTCTX *tctx = XIVE_TCTX(opaque); + Error *local_err = NULL; + int ret; + + if (xive_in_kernel(tctx->xptr)) { + ret = kvmppc_xive_cpu_get_state(tctx, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + } + + return 0; +} + +static int vmstate_xive_tctx_post_load(void *opaque, int version_id) +{ + XiveTCTX *tctx = XIVE_TCTX(opaque); + Error *local_err = NULL; + int ret; + + if (xive_in_kernel(tctx->xptr)) { + /* + * Required for hotplugged CPU, for which the state comes + * after all states of the machine. + */ + ret = kvmppc_xive_cpu_set_state(tctx, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + } + + return 0; +} + +static const VMStateDescription vmstate_xive_tctx = { + .name = TYPE_XIVE_TCTX, + .version_id = 1, + .minimum_version_id = 1, + .pre_save = vmstate_xive_tctx_pre_save, + .post_load = vmstate_xive_tctx_post_load, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(regs, XiveTCTX), + VMSTATE_END_OF_LIST() + }, +}; + +static Property xive_tctx_properties[] = { + DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *), + DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER, + XivePresenter *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xive_tctx_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "XIVE Interrupt Thread Context"; + dc->realize = xive_tctx_realize; + dc->vmsd = &vmstate_xive_tctx; + device_class_set_props(dc, xive_tctx_properties); + /* + * Reason: part of XIVE interrupt controller, needs to be wired up + * by xive_tctx_create(). + */ + dc->user_creatable = false; +} + +static const TypeInfo xive_tctx_info = { + .name = TYPE_XIVE_TCTX, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XiveTCTX), + .class_init = xive_tctx_class_init, +}; + +Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp) +{ + Object *obj; + + obj = object_new(TYPE_XIVE_TCTX); + object_property_add_child(cpu, TYPE_XIVE_TCTX, obj); + object_unref(obj); + object_property_set_link(obj, "cpu", cpu, &error_abort); + object_property_set_link(obj, "presenter", OBJECT(xptr), &error_abort); + if (!qdev_realize(DEVICE(obj), NULL, errp)) { + object_unparent(obj); + return NULL; + } + return obj; +} + +void xive_tctx_destroy(XiveTCTX *tctx) +{ + Object *obj = OBJECT(tctx); + + object_unparent(obj); +} + +/* + * XIVE ESB helpers + */ + +uint8_t xive_esb_set(uint8_t *pq, uint8_t value) +{ + uint8_t old_pq = *pq & 0x3; + + *pq &= ~0x3; + *pq |= value & 0x3; + + return old_pq; +} + +bool xive_esb_trigger(uint8_t *pq) +{ + uint8_t old_pq = *pq & 0x3; + + switch (old_pq) { + case XIVE_ESB_RESET: + xive_esb_set(pq, XIVE_ESB_PENDING); + return true; + case XIVE_ESB_PENDING: + case XIVE_ESB_QUEUED: + xive_esb_set(pq, XIVE_ESB_QUEUED); + return false; + case XIVE_ESB_OFF: + xive_esb_set(pq, XIVE_ESB_OFF); + return false; + default: + g_assert_not_reached(); + } +} + +bool xive_esb_eoi(uint8_t *pq) +{ + uint8_t old_pq = *pq & 0x3; + + switch (old_pq) { + case XIVE_ESB_RESET: + case XIVE_ESB_PENDING: + xive_esb_set(pq, XIVE_ESB_RESET); + return false; + case XIVE_ESB_QUEUED: + xive_esb_set(pq, XIVE_ESB_PENDING); + return true; + case XIVE_ESB_OFF: + xive_esb_set(pq, XIVE_ESB_OFF); + return false; + default: + g_assert_not_reached(); + } +} + +/* + * XIVE Interrupt Source (or IVSE) + */ + +uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + + return xsrc->status[srcno] & 0x3; +} + +uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq) +{ + assert(srcno < xsrc->nr_irqs); + + return xive_esb_set(&xsrc->status[srcno], pq); +} + +/* + * Returns whether the event notification should be forwarded. + */ +static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) +{ + uint8_t old_pq = xive_source_esb_get(xsrc, srcno); + + xive_source_set_asserted(xsrc, srcno, true); + + switch (old_pq) { + case XIVE_ESB_RESET: + xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING); + return true; + default: + return false; + } +} + +/* + * Returns whether the event notification should be forwarded. + */ +static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) +{ + bool ret; + + assert(srcno < xsrc->nr_irqs); + + ret = xive_esb_trigger(&xsrc->status[srcno]); + + if (xive_source_irq_is_lsi(xsrc, srcno) && + xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: queued an event on LSI IRQ %d\n", srcno); + } + + return ret; +} + +/* + * Returns whether the event notification should be forwarded. + */ +static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) +{ + bool ret; + + assert(srcno < xsrc->nr_irqs); + + ret = xive_esb_eoi(&xsrc->status[srcno]); + + /* + * LSI sources do not set the Q bit but they can still be + * asserted, in which case we should forward a new event + * notification + */ + if (xive_source_irq_is_lsi(xsrc, srcno) && + xive_source_is_asserted(xsrc, srcno)) { + ret = xive_source_lsi_trigger(xsrc, srcno); + } + + return ret; +} + +/* + * Forward the source event notification to the Router + */ +static void xive_source_notify(XiveSource *xsrc, int srcno) +{ + XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive); + + if (xnc->notify) { + xnc->notify(xsrc->xive, srcno); + } +} + +/* + * In a two pages ESB MMIO setting, even page is the trigger page, odd + * page is for management + */ +static inline bool addr_is_even(hwaddr addr, uint32_t shift) +{ + return !((addr >> shift) & 1); +} + +static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr) +{ + return xive_source_esb_has_2page(xsrc) && + addr_is_even(addr, xsrc->esb_shift - 1); +} + +/* + * ESB MMIO loads + * Trigger page Management/EOI page + * + * ESB MMIO setting 2 pages 1 or 2 pages + * + * 0x000 .. 0x3FF -1 EOI and return 0|1 + * 0x400 .. 0x7FF -1 EOI and return 0|1 + * 0x800 .. 0xBFF -1 return PQ + * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00 + * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01 + * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10 + * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11 + */ +static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) +{ + XiveSource *xsrc = XIVE_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint32_t srcno = addr >> xsrc->esb_shift; + uint64_t ret = -1; + + /* In a two pages ESB MMIO setting, trigger page should not be read */ + if (xive_source_is_trigger_page(xsrc, addr)) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: invalid load on IRQ %d trigger page at " + "0x%"HWADDR_PRIx"\n", srcno, addr); + return -1; + } + + switch (offset) { + case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: + ret = xive_source_esb_eoi(xsrc, srcno); + + /* Forward the source event notification for routing */ + if (ret) { + xive_source_notify(xsrc, srcno); + } + break; + + case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: + ret = xive_source_esb_get(xsrc, srcno); + break; + + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: + case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: + case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: + case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: + ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n", + offset); + } + + trace_xive_source_esb_read(addr, srcno, ret); + + return ret; +} + +/* + * ESB MMIO stores + * Trigger page Management/EOI page + * + * ESB MMIO setting 2 pages 1 or 2 pages + * + * 0x000 .. 0x3FF Trigger Trigger + * 0x400 .. 0x7FF Trigger EOI + * 0x800 .. 0xBFF Trigger undefined + * 0xC00 .. 0xCFF Trigger PQ=00 + * 0xD00 .. 0xDFF Trigger PQ=01 + * 0xE00 .. 0xDFF Trigger PQ=10 + * 0xF00 .. 0xDFF Trigger PQ=11 + */ +static void xive_source_esb_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + XiveSource *xsrc = XIVE_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint32_t srcno = addr >> xsrc->esb_shift; + bool notify = false; + + trace_xive_source_esb_write(addr, srcno, value); + + /* In a two pages ESB MMIO setting, trigger page only triggers */ + if (xive_source_is_trigger_page(xsrc, addr)) { + notify = xive_source_esb_trigger(xsrc, srcno); + goto out; + } + + switch (offset) { + case 0 ... 0x3FF: + notify = xive_source_esb_trigger(xsrc, srcno); + break; + + case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: + if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: invalid Store EOI for IRQ %d\n", srcno); + return; + } + + notify = xive_source_esb_eoi(xsrc, srcno); + break; + + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: + case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: + case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: + case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: + xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n", + offset); + return; + } + +out: + /* Forward the source event notification for routing */ + if (notify) { + xive_source_notify(xsrc, srcno); + } +} + +static const MemoryRegionOps xive_source_esb_ops = { + .read = xive_source_esb_read, + .write = xive_source_esb_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +void xive_source_set_irq(void *opaque, int srcno, int val) +{ + XiveSource *xsrc = XIVE_SOURCE(opaque); + bool notify = false; + + if (xive_source_irq_is_lsi(xsrc, srcno)) { + if (val) { + notify = xive_source_lsi_trigger(xsrc, srcno); + } else { + xive_source_set_asserted(xsrc, srcno, false); + } + } else { + if (val) { + notify = xive_source_esb_trigger(xsrc, srcno); + } + } + + /* Forward the source event notification for routing */ + if (notify) { + xive_source_notify(xsrc, srcno); + } +} + +void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon) +{ + int i; + + for (i = 0; i < xsrc->nr_irqs; i++) { + uint8_t pq = xive_source_esb_get(xsrc, i); + + if (pq == XIVE_ESB_OFF) { + continue; + } + + monitor_printf(mon, " %08x %s %c%c%c\n", i + offset, + xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + xive_source_is_asserted(xsrc, i) ? 'A' : ' '); + } +} + +static void xive_source_reset(void *dev) +{ + XiveSource *xsrc = XIVE_SOURCE(dev); + + /* Do not clear the LSI bitmap */ + + /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */ + memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs); +} + +static void xive_source_realize(DeviceState *dev, Error **errp) +{ + XiveSource *xsrc = XIVE_SOURCE(dev); + size_t esb_len = xive_source_esb_len(xsrc); + + assert(xsrc->xive); + + if (!xsrc->nr_irqs) { + error_setg(errp, "Number of interrupt needs to be greater than 0"); + return; + } + + if (xsrc->esb_shift != XIVE_ESB_4K && + xsrc->esb_shift != XIVE_ESB_4K_2PAGE && + xsrc->esb_shift != XIVE_ESB_64K && + xsrc->esb_shift != XIVE_ESB_64K_2PAGE) { + error_setg(errp, "Invalid ESB shift setting"); + return; + } + + xsrc->status = g_malloc0(xsrc->nr_irqs); + xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); + + memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len); + memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc), + &xive_source_esb_ops, xsrc, "xive.esb-emulated", + esb_len); + memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated); + + qemu_register_reset(xive_source_reset, dev); +} + +static const VMStateDescription vmstate_xive_source = { + .name = TYPE_XIVE_SOURCE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL), + VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs), + VMSTATE_END_OF_LIST() + }, +}; + +/* + * The default XIVE interrupt source setting for the ESB MMIOs is two + * 64k pages without Store EOI, to be in sync with KVM. + */ +static Property xive_source_properties[] = { + DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0), + DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0), + DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE), + DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER, + XiveNotifier *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xive_source_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "XIVE Interrupt Source"; + device_class_set_props(dc, xive_source_properties); + dc->realize = xive_source_realize; + dc->vmsd = &vmstate_xive_source; + /* + * Reason: part of XIVE interrupt controller, needs to be wired up, + * e.g. by spapr_xive_instance_init(). + */ + dc->user_creatable = false; +} + +static const TypeInfo xive_source_info = { + .name = TYPE_XIVE_SOURCE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XiveSource), + .class_init = xive_source_class_init, +}; + +/* + * XiveEND helpers + */ + +void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon) +{ + uint64_t qaddr_base = xive_end_qaddr(end); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qentries = 1 << (qsize + 10); + int i; + + /* + * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window + */ + monitor_printf(mon, " [ "); + qindex = (qindex - (width - 1)) & (qentries - 1); + for (i = 0; i < width; i++) { + uint64_t qaddr = qaddr_base + (qindex << 2); + uint32_t qdata = -1; + + if (dma_memory_read(&address_space_memory, qaddr, &qdata, + sizeof(qdata))) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" + HWADDR_PRIx "\n", qaddr); + return; + } + monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "", + be32_to_cpu(qdata)); + qindex = (qindex + 1) & (qentries - 1); + } + monitor_printf(mon, "]"); +} + +void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon) +{ + uint64_t qaddr_base = xive_end_qaddr(end); + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qentries = 1 << (qsize + 10); + + uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); + uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); + uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); + uint8_t pq; + + if (!xive_end_is_valid(end)) { + return; + } + + pq = xive_get_field32(END_W1_ESn, end->w1); + + monitor_printf(mon, " %08x %c%c %c%c%c%c%c%c%c%c prio:%d nvt:%02x/%04x", + end_idx, + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + xive_end_is_valid(end) ? 'v' : '-', + xive_end_is_enqueue(end) ? 'q' : '-', + xive_end_is_notify(end) ? 'n' : '-', + xive_end_is_backlog(end) ? 'b' : '-', + xive_end_is_escalate(end) ? 'e' : '-', + xive_end_is_uncond_escalation(end) ? 'u' : '-', + xive_end_is_silent_escalation(end) ? 's' : '-', + xive_end_is_firmware(end) ? 'f' : '-', + priority, nvt_blk, nvt_idx); + + if (qaddr_base) { + monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d", + qaddr_base, qindex, qentries, qgen); + xive_end_queue_pic_print_info(end, 6, mon); + } + monitor_printf(mon, "\n"); +} + +static void xive_end_enqueue(XiveEND *end, uint32_t data) +{ + uint64_t qaddr_base = xive_end_qaddr(end); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); + + uint64_t qaddr = qaddr_base + (qindex << 2); + uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); + uint32_t qentries = 1 << (qsize + 10); + + if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" + HWADDR_PRIx "\n", qaddr); + return; + } + + qindex = (qindex + 1) & (qentries - 1); + if (qindex == 0) { + qgen ^= 1; + end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen); + } + end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex); +} + +void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx, + Monitor *mon) +{ + XiveEAS *eas = (XiveEAS *) &end->w4; + uint8_t pq; + + if (!xive_end_is_escalate(end)) { + return; + } + + pq = xive_get_field32(END_W1_ESe, end->w1); + + monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n", + end_idx, + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + xive_eas_is_valid(eas) ? 'V' : ' ', + xive_eas_is_masked(eas) ? 'M' : ' ', + (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), + (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), + (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); +} + +/* + * XIVE Router (aka. Virtualization Controller or IVRE) + */ + +int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + XiveEAS *eas) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); +} + +int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_end(xrtr, end_blk, end_idx, end); +} + +int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end, uint8_t word_number) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); +} + +int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt); +} + +int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt, uint8_t word_number) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number); +} + +static int xive_router_get_block_id(XiveRouter *xrtr) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_block_id(xrtr); +} + +static void xive_router_realize(DeviceState *dev, Error **errp) +{ + XiveRouter *xrtr = XIVE_ROUTER(dev); + + assert(xrtr->xfb); +} + +/* + * Encode the HW CAM line in the block group mode format : + * + * chip << 19 | 0000000 0 0001 thread (7Bit) + */ +static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) +{ + CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; + uint32_t pir = env->spr_cb[SPR_PIR].default_value; + uint8_t blk = xive_router_get_block_id(XIVE_ROUTER(xptr)); + + return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); +} + +/* + * The thread context register words are in big-endian format. + */ +int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, + uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint32_t logic_serv) +{ + uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx); + uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); + uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); + uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); + uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); + + /* + * TODO (PowerNV): ignore mode. The low order bits of the NVT + * identifier are ignored in the "CAM" match. + */ + + if (format == 0) { + if (cam_ignore == true) { + /* + * F=0 & i=1: Logical server notification (bits ignored at + * the end of the NVT identifier) + */ + qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", + nvt_blk, nvt_idx); + return -1; + } + + /* F=0 & i=0: Specific NVT notification */ + + /* PHYS ring */ + if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) && + cam == xive_tctx_hw_cam_line(xptr, tctx)) { + return TM_QW3_HV_PHYS; + } + + /* HV POOL ring */ + if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) && + cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) { + return TM_QW2_HV_POOL; + } + + /* OS ring */ + if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && + cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) { + return TM_QW1_OS; + } + } else { + /* F=1 : User level Event-Based Branch (EBB) notification */ + + /* USER ring */ + if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && + (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) && + (be32_to_cpu(qw0w2) & TM_QW0W2_VU) && + (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) { + return TM_QW0_USER; + } + } + return -1; +} + +/* + * This is our simple Xive Presenter Engine model. It is merged in the + * Router as it does not require an extra object. + * + * It receives notification requests sent by the IVRE to find one + * matching NVT (or more) dispatched on the processor threads. In case + * of a single NVT notification, the process is abreviated and the + * thread is signaled if a match is found. In case of a logical server + * notification (bits ignored at the end of the NVT identifier), the + * IVPE and IVRE select a winning thread using different filters. This + * involves 2 or 3 exchanges on the PowerBus that the model does not + * support. + * + * The parameters represent what is sent on the PowerBus + */ +bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv) +{ + XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); + XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; + int count; + + /* + * Ask the machine to scan the interrupt controllers for a match + */ + count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore, + priority, logic_serv, &match); + if (count < 0) { + return false; + } + + /* handle CPU exception delivery */ + if (count) { + trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring); + xive_tctx_ipb_update(match.tctx, match.ring, + xive_priority_to_ipb(priority)); + } + + return !!count; +} + +/* + * Notification using the END ESe/ESn bit (Event State Buffer for + * escalation and notification). Provide further coalescing in the + * Router. + */ +static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk, + uint32_t end_idx, XiveEND *end, + uint32_t end_esmask) +{ + uint8_t pq = xive_get_field32(end_esmask, end->w1); + bool notify = xive_esb_trigger(&pq); + + if (pq != xive_get_field32(end_esmask, end->w1)) { + end->w1 = xive_set_field32(end_esmask, end->w1, pq); + xive_router_write_end(xrtr, end_blk, end_idx, end, 1); + } + + /* ESe/n[Q]=1 : end of notification */ + return notify; +} + +/* + * An END trigger can come from an event trigger (IPI or HW) or from + * another chip. We don't model the PowerBus but the END trigger + * message has the same parameters than in the function below. + */ +static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, + uint32_t end_idx, uint32_t end_data) +{ + XiveEND end; + uint8_t priority; + uint8_t format; + uint8_t nvt_blk; + uint32_t nvt_idx; + XiveNVT nvt; + bool found; + + /* END cache lookup */ + if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, + end_idx); + return; + } + + if (!xive_end_is_valid(&end)) { + trace_xive_router_end_notify(end_blk, end_idx, end_data); + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", + end_blk, end_idx); + return; + } + + if (xive_end_is_enqueue(&end)) { + xive_end_enqueue(&end, end_data); + /* Enqueuing event data modifies the EQ toggle and index */ + xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); + } + + /* + * When the END is silent, we skip the notification part. + */ + if (xive_end_is_silent_escalation(&end)) { + goto do_escalation; + } + + /* + * The W7 format depends on the F bit in W6. It defines the type + * of the notification : + * + * F=0 : single or multiple NVT notification + * F=1 : User level Event-Based Branch (EBB) notification, no + * priority + */ + format = xive_get_field32(END_W6_FORMAT_BIT, end.w6); + priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7); + + /* The END is masked */ + if (format == 0 && priority == 0xff) { + return; + } + + /* + * Check the END ESn (Event State Buffer for notification) for + * even further coalescing in the Router + */ + if (!xive_end_is_notify(&end)) { + /* ESn[Q]=1 : end of notification */ + if (!xive_router_end_es_notify(xrtr, end_blk, end_idx, + &end, END_W1_ESn)) { + return; + } + } + + /* + * Follows IVPE notification + */ + nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6); + nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6); + + /* NVT cache lookup */ + if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n", + nvt_blk, nvt_idx); + return; + } + + if (!xive_nvt_is_valid(&nvt)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n", + nvt_blk, nvt_idx); + return; + } + + found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, + xive_get_field32(END_W7_F0_IGNORE, end.w7), + priority, + xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); + + /* TODO: Auto EOI. */ + + if (found) { + return; + } + + /* + * If no matching NVT is dispatched on a HW thread : + * - specific VP: update the NVT structure if backlog is activated + * - logical server : forward request to IVPE (not supported) + */ + if (xive_end_is_backlog(&end)) { + uint8_t ipb; + + if (format == 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: END %x/%x invalid config: F1 & backlog\n", + end_blk, end_idx); + return; + } + /* + * Record the IPB in the associated NVT structure for later + * use. The presenter will resend the interrupt when the vCPU + * is dispatched again on a HW thread. + */ + ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | + xive_priority_to_ipb(priority); + nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb); + xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); + + /* + * On HW, follows a "Broadcast Backlog" to IVPEs + */ + } + +do_escalation: + /* + * If activated, escalate notification using the ESe PQ bits and + * the EAS in w4-5 + */ + if (!xive_end_is_escalate(&end)) { + return; + } + + /* + * Check the END ESe (Event State Buffer for escalation) for even + * further coalescing in the Router + */ + if (!xive_end_is_uncond_escalation(&end)) { + /* ESe[Q]=1 : end of notification */ + if (!xive_router_end_es_notify(xrtr, end_blk, end_idx, + &end, END_W1_ESe)) { + return; + } + } + + trace_xive_router_end_escalate(end_blk, end_idx, + (uint8_t) xive_get_field32(END_W4_ESC_END_BLOCK, end.w4), + (uint32_t) xive_get_field32(END_W4_ESC_END_INDEX, end.w4), + (uint32_t) xive_get_field32(END_W5_ESC_END_DATA, end.w5)); + /* + * The END trigger becomes an Escalation trigger + */ + xive_router_end_notify(xrtr, + xive_get_field32(END_W4_ESC_END_BLOCK, end.w4), + xive_get_field32(END_W4_ESC_END_INDEX, end.w4), + xive_get_field32(END_W5_ESC_END_DATA, end.w5)); +} + +void xive_router_notify(XiveNotifier *xn, uint32_t lisn) +{ + XiveRouter *xrtr = XIVE_ROUTER(xn); + uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); + uint32_t eas_idx = XIVE_EAS_INDEX(lisn); + XiveEAS eas; + + /* EAS cache lookup */ + if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); + return; + } + + /* + * The IVRE checks the State Bit Cache at this point. We skip the + * SBC lookup because the state bits of the sources are modeled + * internally in QEMU. + */ + + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn); + return; + } + + if (xive_eas_is_masked(&eas)) { + /* Notification completed */ + return; + } + + /* + * The event trigger becomes an END trigger + */ + xive_router_end_notify(xrtr, + xive_get_field64(EAS_END_BLOCK, eas.w), + xive_get_field64(EAS_END_INDEX, eas.w), + xive_get_field64(EAS_END_DATA, eas.w)); +} + +static Property xive_router_properties[] = { + DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb, + TYPE_XIVE_FABRIC, XiveFabric *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xive_router_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); + + dc->desc = "XIVE Router Engine"; + device_class_set_props(dc, xive_router_properties); + /* Parent is SysBusDeviceClass. No need to call its realize hook */ + dc->realize = xive_router_realize; + xnc->notify = xive_router_notify; +} + +static const TypeInfo xive_router_info = { + .name = TYPE_XIVE_ROUTER, + .parent = TYPE_SYS_BUS_DEVICE, + .abstract = true, + .instance_size = sizeof(XiveRouter), + .class_size = sizeof(XiveRouterClass), + .class_init = xive_router_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_XIVE_NOTIFIER }, + { TYPE_XIVE_PRESENTER }, + { } + } +}; + +void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon) +{ + if (!xive_eas_is_valid(eas)) { + return; + } + + monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n", + lisn, xive_eas_is_masked(eas) ? "M" : " ", + (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), + (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), + (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); +} + +/* + * END ESB MMIO loads + */ +static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size) +{ + XiveENDSource *xsrc = XIVE_END_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint8_t end_blk; + uint32_t end_idx; + XiveEND end; + uint32_t end_esmask; + uint8_t pq; + uint64_t ret = -1; + + /* + * The block id should be deduced from the load address on the END + * ESB MMIO but our model only supports a single block per XIVE chip. + */ + end_blk = xive_router_get_block_id(xsrc->xrtr); + end_idx = addr >> (xsrc->esb_shift + 1); + + trace_xive_end_source_read(end_blk, end_idx, addr); + + if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, + end_idx); + return -1; + } + + if (!xive_end_is_valid(&end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", + end_blk, end_idx); + return -1; + } + + end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe; + pq = xive_get_field32(end_esmask, end.w1); + + switch (offset) { + case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: + ret = xive_esb_eoi(&pq); + + /* Forward the source event notification for routing ?? */ + break; + + case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: + ret = pq; + break; + + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: + case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: + case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: + case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: + ret = xive_esb_set(&pq, (offset >> 8) & 0x3); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", + offset); + return -1; + } + + if (pq != xive_get_field32(end_esmask, end.w1)) { + end.w1 = xive_set_field32(end_esmask, end.w1, pq); + xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); + } + + return ret; +} + +/* + * END ESB MMIO stores are invalid + */ +static void xive_end_source_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%" + HWADDR_PRIx"\n", addr); +} + +static const MemoryRegionOps xive_end_source_ops = { + .read = xive_end_source_read, + .write = xive_end_source_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static void xive_end_source_realize(DeviceState *dev, Error **errp) +{ + XiveENDSource *xsrc = XIVE_END_SOURCE(dev); + + assert(xsrc->xrtr); + + if (!xsrc->nr_ends) { + error_setg(errp, "Number of interrupt needs to be greater than 0"); + return; + } + + if (xsrc->esb_shift != XIVE_ESB_4K && + xsrc->esb_shift != XIVE_ESB_64K) { + error_setg(errp, "Invalid ESB shift setting"); + return; + } + + /* + * Each END is assigned an even/odd pair of MMIO pages, the even page + * manages the ESn field while the odd page manages the ESe field. + */ + memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), + &xive_end_source_ops, xsrc, "xive.end", + (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); +} + +static Property xive_end_source_properties[] = { + DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0), + DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K), + DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER, + XiveRouter *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xive_end_source_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "XIVE END Source"; + device_class_set_props(dc, xive_end_source_properties); + dc->realize = xive_end_source_realize; + /* + * Reason: part of XIVE interrupt controller, needs to be wired up, + * e.g. by spapr_xive_instance_init(). + */ + dc->user_creatable = false; +} + +static const TypeInfo xive_end_source_info = { + .name = TYPE_XIVE_END_SOURCE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XiveENDSource), + .class_init = xive_end_source_class_init, +}; + +/* + * XIVE Notifier + */ +static const TypeInfo xive_notifier_info = { + .name = TYPE_XIVE_NOTIFIER, + .parent = TYPE_INTERFACE, + .class_size = sizeof(XiveNotifierClass), +}; + +/* + * XIVE Presenter + */ +static const TypeInfo xive_presenter_info = { + .name = TYPE_XIVE_PRESENTER, + .parent = TYPE_INTERFACE, + .class_size = sizeof(XivePresenterClass), +}; + +/* + * XIVE Fabric + */ +static const TypeInfo xive_fabric_info = { + .name = TYPE_XIVE_FABRIC, + .parent = TYPE_INTERFACE, + .class_size = sizeof(XiveFabricClass), +}; + +static void xive_register_types(void) +{ + type_register_static(&xive_fabric_info); + type_register_static(&xive_source_info); + type_register_static(&xive_notifier_info); + type_register_static(&xive_presenter_info); + type_register_static(&xive_router_info); + type_register_static(&xive_end_source_info); + type_register_static(&xive_tctx_info); +} + +type_init(xive_register_types) diff --git a/hw/intc/xlnx-pmu-iomod-intc.c b/hw/intc/xlnx-pmu-iomod-intc.c new file mode 100644 index 000000000..acaa1c3e6 --- /dev/null +++ b/hw/intc/xlnx-pmu-iomod-intc.c @@ -0,0 +1,558 @@ +/* + * QEMU model of Xilinx I/O Module Interrupt Controller + * + * Copyright (c) 2013 Xilinx Inc + * Written by Edgar E. Iglesias + * Written by Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/register.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/intc/xlnx-pmu-iomod-intc.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" + +#ifndef XLNX_PMU_IO_INTC_ERR_DEBUG +#define XLNX_PMU_IO_INTC_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(lvl, fmt, args...) do {\ + if (XLNX_PMU_IO_INTC_ERR_DEBUG >= lvl) {\ + qemu_log(TYPE_XLNX_PMU_IO_INTC ": %s:" fmt, __func__, ## args);\ + } \ +} while (0) + +#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args) + +REG32(IRQ_MODE, 0xc) +REG32(GPO0, 0x10) + FIELD(GPO0, MAGIC_WORD_1, 24, 8) + FIELD(GPO0, MAGIC_WORD_2, 16, 8) + FIELD(GPO0, FT_INJECT_FAILURE, 13, 3) + FIELD(GPO0, DISABLE_RST_FTSM, 12, 1) + FIELD(GPO0, RST_FTSM, 11, 1) + FIELD(GPO0, CLR_FTSTS, 10, 1) + FIELD(GPO0, RST_ON_SLEEP, 9, 1) + FIELD(GPO0, DISABLE_TRACE_COMP, 8, 1) + FIELD(GPO0, PIT3_PRESCALE, 7, 1) + FIELD(GPO0, PIT2_PRESCALE, 5, 2) + FIELD(GPO0, PIT1_PRESCALE, 3, 2) + FIELD(GPO0, PIT0_PRESCALE, 1, 2) + FIELD(GPO0, DEBUG_REMAP, 0, 1) +REG32(GPO1, 0x14) + FIELD(GPO1, MIO_5, 5, 1) + FIELD(GPO1, MIO_4, 4, 1) + FIELD(GPO1, MIO_3, 3, 1) + FIELD(GPO1, MIO_2, 2, 1) + FIELD(GPO1, MIO_1, 1, 1) + FIELD(GPO1, MIO_0, 0, 1) +REG32(GPO2, 0x18) + FIELD(GPO2, DAP_RPU_WAKE_ACK, 9, 1) + FIELD(GPO2, DAP_FP_WAKE_ACK, 8, 1) + FIELD(GPO2, PS_STATUS, 7, 1) + FIELD(GPO2, PCAP_EN, 6, 1) +REG32(GPO3, 0x1c) + FIELD(GPO3, PL_GPO_31, 31, 1) + FIELD(GPO3, PL_GPO_30, 30, 1) + FIELD(GPO3, PL_GPO_29, 29, 1) + FIELD(GPO3, PL_GPO_28, 28, 1) + FIELD(GPO3, PL_GPO_27, 27, 1) + FIELD(GPO3, PL_GPO_26, 26, 1) + FIELD(GPO3, PL_GPO_25, 25, 1) + FIELD(GPO3, PL_GPO_24, 24, 1) + FIELD(GPO3, PL_GPO_23, 23, 1) + FIELD(GPO3, PL_GPO_22, 22, 1) + FIELD(GPO3, PL_GPO_21, 21, 1) + FIELD(GPO3, PL_GPO_20, 20, 1) + FIELD(GPO3, PL_GPO_19, 19, 1) + FIELD(GPO3, PL_GPO_18, 18, 1) + FIELD(GPO3, PL_GPO_17, 17, 1) + FIELD(GPO3, PL_GPO_16, 16, 1) + FIELD(GPO3, PL_GPO_15, 15, 1) + FIELD(GPO3, PL_GPO_14, 14, 1) + FIELD(GPO3, PL_GPO_13, 13, 1) + FIELD(GPO3, PL_GPO_12, 12, 1) + FIELD(GPO3, PL_GPO_11, 11, 1) + FIELD(GPO3, PL_GPO_10, 10, 1) + FIELD(GPO3, PL_GPO_9, 9, 1) + FIELD(GPO3, PL_GPO_8, 8, 1) + FIELD(GPO3, PL_GPO_7, 7, 1) + FIELD(GPO3, PL_GPO_6, 6, 1) + FIELD(GPO3, PL_GPO_5, 5, 1) + FIELD(GPO3, PL_GPO_4, 4, 1) + FIELD(GPO3, PL_GPO_3, 3, 1) + FIELD(GPO3, PL_GPO_2, 2, 1) + FIELD(GPO3, PL_GPO_1, 1, 1) + FIELD(GPO3, PL_GPO_0, 0, 1) +REG32(GPI0, 0x20) + FIELD(GPI0, RFT_ECC_FATAL_ERR, 31, 1) + FIELD(GPI0, RFT_VOTER_ERR, 30, 1) + FIELD(GPI0, RFT_COMPARE_ERR_23, 29, 1) + FIELD(GPI0, RFT_COMPARE_ERR_13, 28, 1) + FIELD(GPI0, RFT_COMPARE_ERR_12, 27, 1) + FIELD(GPI0, RFT_LS_MISMATCH_23_B, 26, 1) + FIELD(GPI0, RFT_LS_MISMATCH_13_B, 25, 1) + FIELD(GPI0, RFT_LS_MISMATCH_12_B, 24, 1) + FIELD(GPI0, RFT_MISMATCH_STATE, 23, 1) + FIELD(GPI0, RFT_MISMATCH_CPU, 22, 1) + FIELD(GPI0, RFT_SLEEP_RESET, 19, 1) + FIELD(GPI0, RFT_LS_MISMATCH_23_A, 18, 1) + FIELD(GPI0, RFT_LS_MISMATCH_13_A, 17, 1) + FIELD(GPI0, RFT_LS_MISMATCH_12_A, 16, 1) + FIELD(GPI0, NFT_ECC_FATAL_ERR, 15, 1) + FIELD(GPI0, NFT_VOTER_ERR, 14, 1) + FIELD(GPI0, NFT_COMPARE_ERR_23, 13, 1) + FIELD(GPI0, NFT_COMPARE_ERR_13, 12, 1) + FIELD(GPI0, NFT_COMPARE_ERR_12, 11, 1) + FIELD(GPI0, NFT_LS_MISMATCH_23_B, 10, 1) + FIELD(GPI0, NFT_LS_MISMATCH_13_B, 9, 1) + FIELD(GPI0, NFT_LS_MISMATCH_12_B, 8, 1) + FIELD(GPI0, NFT_MISMATCH_STATE, 7, 1) + FIELD(GPI0, NFT_MISMATCH_CPU, 6, 1) + FIELD(GPI0, NFT_SLEEP_RESET, 3, 1) + FIELD(GPI0, NFT_LS_MISMATCH_23_A, 2, 1) + FIELD(GPI0, NFT_LS_MISMATCH_13_A, 1, 1) + FIELD(GPI0, NFT_LS_MISMATCH_12_A, 0, 1) +REG32(GPI1, 0x24) + FIELD(GPI1, APB_AIB_ERROR, 31, 1) + FIELD(GPI1, AXI_AIB_ERROR, 30, 1) + FIELD(GPI1, ERROR_2, 29, 1) + FIELD(GPI1, ERROR_1, 28, 1) + FIELD(GPI1, ACPU_3_DBG_PWRUP, 23, 1) + FIELD(GPI1, ACPU_2_DBG_PWRUP, 22, 1) + FIELD(GPI1, ACPU_1_DBG_PWRUP, 21, 1) + FIELD(GPI1, ACPU_0_DBG_PWRUP, 20, 1) + FIELD(GPI1, FPD_WAKE_GIC_PROXY, 16, 1) + FIELD(GPI1, MIO_WAKE_5, 15, 1) + FIELD(GPI1, MIO_WAKE_4, 14, 1) + FIELD(GPI1, MIO_WAKE_3, 13, 1) + FIELD(GPI1, MIO_WAKE_2, 12, 1) + FIELD(GPI1, MIO_WAKE_1, 11, 1) + FIELD(GPI1, MIO_WAKE_0, 10, 1) + FIELD(GPI1, DAP_RPU_WAKE, 9, 1) + FIELD(GPI1, DAP_FPD_WAKE, 8, 1) + FIELD(GPI1, USB_1_WAKE, 7, 1) + FIELD(GPI1, USB_0_WAKE, 6, 1) + FIELD(GPI1, R5_1_WAKE, 5, 1) + FIELD(GPI1, R5_0_WAKE, 4, 1) + FIELD(GPI1, ACPU_3_WAKE, 3, 1) + FIELD(GPI1, ACPU_2_WAKE, 2, 1) + FIELD(GPI1, ACPU_1_WAKE, 1, 1) + FIELD(GPI1, ACPU_0_WAKE, 0, 1) +REG32(GPI2, 0x28) + FIELD(GPI2, VCC_INT_FP_DISCONNECT, 31, 1) + FIELD(GPI2, VCC_INT_DISCONNECT, 30, 1) + FIELD(GPI2, VCC_AUX_DISCONNECT, 29, 1) + FIELD(GPI2, DBG_ACPU3_RST_REQ, 23, 1) + FIELD(GPI2, DBG_ACPU2_RST_REQ, 22, 1) + FIELD(GPI2, DBG_ACPU1_RST_REQ, 21, 1) + FIELD(GPI2, DBG_ACPU0_RST_REQ, 20, 1) + FIELD(GPI2, CP_ACPU3_RST_REQ, 19, 1) + FIELD(GPI2, CP_ACPU2_RST_REQ, 18, 1) + FIELD(GPI2, CP_ACPU1_RST_REQ, 17, 1) + FIELD(GPI2, CP_ACPU0_RST_REQ, 16, 1) + FIELD(GPI2, DBG_RCPU1_RST_REQ, 9, 1) + FIELD(GPI2, DBG_RCPU0_RST_REQ, 8, 1) + FIELD(GPI2, R5_1_SLEEP, 5, 1) + FIELD(GPI2, R5_0_SLEEP, 4, 1) + FIELD(GPI2, ACPU_3_SLEEP, 3, 1) + FIELD(GPI2, ACPU_2_SLEEP, 2, 1) + FIELD(GPI2, ACPU_1_SLEEP, 1, 1) + FIELD(GPI2, ACPU_0_SLEEP, 0, 1) +REG32(GPI3, 0x2c) + FIELD(GPI3, PL_GPI_31, 31, 1) + FIELD(GPI3, PL_GPI_30, 30, 1) + FIELD(GPI3, PL_GPI_29, 29, 1) + FIELD(GPI3, PL_GPI_28, 28, 1) + FIELD(GPI3, PL_GPI_27, 27, 1) + FIELD(GPI3, PL_GPI_26, 26, 1) + FIELD(GPI3, PL_GPI_25, 25, 1) + FIELD(GPI3, PL_GPI_24, 24, 1) + FIELD(GPI3, PL_GPI_23, 23, 1) + FIELD(GPI3, PL_GPI_22, 22, 1) + FIELD(GPI3, PL_GPI_21, 21, 1) + FIELD(GPI3, PL_GPI_20, 20, 1) + FIELD(GPI3, PL_GPI_19, 19, 1) + FIELD(GPI3, PL_GPI_18, 18, 1) + FIELD(GPI3, PL_GPI_17, 17, 1) + FIELD(GPI3, PL_GPI_16, 16, 1) + FIELD(GPI3, PL_GPI_15, 15, 1) + FIELD(GPI3, PL_GPI_14, 14, 1) + FIELD(GPI3, PL_GPI_13, 13, 1) + FIELD(GPI3, PL_GPI_12, 12, 1) + FIELD(GPI3, PL_GPI_11, 11, 1) + FIELD(GPI3, PL_GPI_10, 10, 1) + FIELD(GPI3, PL_GPI_9, 9, 1) + FIELD(GPI3, PL_GPI_8, 8, 1) + FIELD(GPI3, PL_GPI_7, 7, 1) + FIELD(GPI3, PL_GPI_6, 6, 1) + FIELD(GPI3, PL_GPI_5, 5, 1) + FIELD(GPI3, PL_GPI_4, 4, 1) + FIELD(GPI3, PL_GPI_3, 3, 1) + FIELD(GPI3, PL_GPI_2, 2, 1) + FIELD(GPI3, PL_GPI_1, 1, 1) + FIELD(GPI3, PL_GPI_0, 0, 1) +REG32(IRQ_STATUS, 0x30) + FIELD(IRQ_STATUS, CSU_PMU_SEC_LOCK, 31, 1) + FIELD(IRQ_STATUS, INV_ADDR, 29, 1) + FIELD(IRQ_STATUS, PWR_DN_REQ, 28, 1) + FIELD(IRQ_STATUS, PWR_UP_REQ, 27, 1) + FIELD(IRQ_STATUS, SW_RST_REQ, 26, 1) + FIELD(IRQ_STATUS, HW_RST_REQ, 25, 1) + FIELD(IRQ_STATUS, ISO_REQ, 24, 1) + FIELD(IRQ_STATUS, FW_REQ, 23, 1) + FIELD(IRQ_STATUS, IPI3, 22, 1) + FIELD(IRQ_STATUS, IPI2, 21, 1) + FIELD(IRQ_STATUS, IPI1, 20, 1) + FIELD(IRQ_STATUS, IPI0, 19, 1) + FIELD(IRQ_STATUS, RTC_ALARM, 18, 1) + FIELD(IRQ_STATUS, RTC_EVERY_SECOND, 17, 1) + FIELD(IRQ_STATUS, CORRECTABLE_ECC, 16, 1) + FIELD(IRQ_STATUS, GPI3, 14, 1) + FIELD(IRQ_STATUS, GPI2, 13, 1) + FIELD(IRQ_STATUS, GPI1, 12, 1) + FIELD(IRQ_STATUS, GPI0, 11, 1) + FIELD(IRQ_STATUS, PIT3, 6, 1) + FIELD(IRQ_STATUS, PIT2, 5, 1) + FIELD(IRQ_STATUS, PIT1, 4, 1) + FIELD(IRQ_STATUS, PIT0, 3, 1) +REG32(IRQ_PENDING, 0x34) + FIELD(IRQ_PENDING, CSU_PMU_SEC_LOCK, 31, 1) + FIELD(IRQ_PENDING, INV_ADDR, 29, 1) + FIELD(IRQ_PENDING, PWR_DN_REQ, 28, 1) + FIELD(IRQ_PENDING, PWR_UP_REQ, 27, 1) + FIELD(IRQ_PENDING, SW_RST_REQ, 26, 1) + FIELD(IRQ_PENDING, HW_RST_REQ, 25, 1) + FIELD(IRQ_PENDING, ISO_REQ, 24, 1) + FIELD(IRQ_PENDING, FW_REQ, 23, 1) + FIELD(IRQ_PENDING, IPI3, 22, 1) + FIELD(IRQ_PENDING, IPI2, 21, 1) + FIELD(IRQ_PENDING, IPI1, 20, 1) + FIELD(IRQ_PENDING, IPI0, 19, 1) + FIELD(IRQ_PENDING, RTC_ALARM, 18, 1) + FIELD(IRQ_PENDING, RTC_EVERY_SECOND, 17, 1) + FIELD(IRQ_PENDING, CORRECTABLE_ECC, 16, 1) + FIELD(IRQ_PENDING, GPI3, 14, 1) + FIELD(IRQ_PENDING, GPI2, 13, 1) + FIELD(IRQ_PENDING, GPI1, 12, 1) + FIELD(IRQ_PENDING, GPI0, 11, 1) + FIELD(IRQ_PENDING, PIT3, 6, 1) + FIELD(IRQ_PENDING, PIT2, 5, 1) + FIELD(IRQ_PENDING, PIT1, 4, 1) + FIELD(IRQ_PENDING, PIT0, 3, 1) +REG32(IRQ_ENABLE, 0x38) + FIELD(IRQ_ENABLE, CSU_PMU_SEC_LOCK, 31, 1) + FIELD(IRQ_ENABLE, INV_ADDR, 29, 1) + FIELD(IRQ_ENABLE, PWR_DN_REQ, 28, 1) + FIELD(IRQ_ENABLE, PWR_UP_REQ, 27, 1) + FIELD(IRQ_ENABLE, SW_RST_REQ, 26, 1) + FIELD(IRQ_ENABLE, HW_RST_REQ, 25, 1) + FIELD(IRQ_ENABLE, ISO_REQ, 24, 1) + FIELD(IRQ_ENABLE, FW_REQ, 23, 1) + FIELD(IRQ_ENABLE, IPI3, 22, 1) + FIELD(IRQ_ENABLE, IPI2, 21, 1) + FIELD(IRQ_ENABLE, IPI1, 20, 1) + FIELD(IRQ_ENABLE, IPI0, 19, 1) + FIELD(IRQ_ENABLE, RTC_ALARM, 18, 1) + FIELD(IRQ_ENABLE, RTC_EVERY_SECOND, 17, 1) + FIELD(IRQ_ENABLE, CORRECTABLE_ECC, 16, 1) + FIELD(IRQ_ENABLE, GPI3, 14, 1) + FIELD(IRQ_ENABLE, GPI2, 13, 1) + FIELD(IRQ_ENABLE, GPI1, 12, 1) + FIELD(IRQ_ENABLE, GPI0, 11, 1) + FIELD(IRQ_ENABLE, PIT3, 6, 1) + FIELD(IRQ_ENABLE, PIT2, 5, 1) + FIELD(IRQ_ENABLE, PIT1, 4, 1) + FIELD(IRQ_ENABLE, PIT0, 3, 1) +REG32(IRQ_ACK, 0x3c) + FIELD(IRQ_ACK, CSU_PMU_SEC_LOCK, 31, 1) + FIELD(IRQ_ACK, INV_ADDR, 29, 1) + FIELD(IRQ_ACK, PWR_DN_REQ, 28, 1) + FIELD(IRQ_ACK, PWR_UP_REQ, 27, 1) + FIELD(IRQ_ACK, SW_RST_REQ, 26, 1) + FIELD(IRQ_ACK, HW_RST_REQ, 25, 1) + FIELD(IRQ_ACK, ISO_REQ, 24, 1) + FIELD(IRQ_ACK, FW_REQ, 23, 1) + FIELD(IRQ_ACK, IPI3, 22, 1) + FIELD(IRQ_ACK, IPI2, 21, 1) + FIELD(IRQ_ACK, IPI1, 20, 1) + FIELD(IRQ_ACK, IPI0, 19, 1) + FIELD(IRQ_ACK, RTC_ALARM, 18, 1) + FIELD(IRQ_ACK, RTC_EVERY_SECOND, 17, 1) + FIELD(IRQ_ACK, CORRECTABLE_ECC, 16, 1) + FIELD(IRQ_ACK, GPI3, 14, 1) + FIELD(IRQ_ACK, GPI2, 13, 1) + FIELD(IRQ_ACK, GPI1, 12, 1) + FIELD(IRQ_ACK, GPI0, 11, 1) + FIELD(IRQ_ACK, PIT3, 6, 1) + FIELD(IRQ_ACK, PIT2, 5, 1) + FIELD(IRQ_ACK, PIT1, 4, 1) + FIELD(IRQ_ACK, PIT0, 3, 1) +REG32(PIT0_PRELOAD, 0x40) +REG32(PIT0_COUNTER, 0x44) +REG32(PIT0_CONTROL, 0x48) + FIELD(PIT0_CONTROL, PRELOAD, 1, 1) + FIELD(PIT0_CONTROL, EN, 0, 1) +REG32(PIT1_PRELOAD, 0x50) +REG32(PIT1_COUNTER, 0x54) +REG32(PIT1_CONTROL, 0x58) + FIELD(PIT1_CONTROL, PRELOAD, 1, 1) + FIELD(PIT1_CONTROL, EN, 0, 1) +REG32(PIT2_PRELOAD, 0x60) +REG32(PIT2_COUNTER, 0x64) +REG32(PIT2_CONTROL, 0x68) + FIELD(PIT2_CONTROL, PRELOAD, 1, 1) + FIELD(PIT2_CONTROL, EN, 0, 1) +REG32(PIT3_PRELOAD, 0x70) +REG32(PIT3_COUNTER, 0x74) +REG32(PIT3_CONTROL, 0x78) + FIELD(PIT3_CONTROL, PRELOAD, 1, 1) + FIELD(PIT3_CONTROL, EN, 0, 1) + +static void xlnx_pmu_io_irq_update(XlnxPMUIOIntc *s) +{ + bool irq_out; + + s->regs[R_IRQ_PENDING] = s->regs[R_IRQ_STATUS] & s->regs[R_IRQ_ENABLE]; + irq_out = !!s->regs[R_IRQ_PENDING]; + + DB_PRINT("Setting IRQ output = %d\n", irq_out); + + qemu_set_irq(s->parent_irq, irq_out); +} + +static void xlnx_pmu_io_irq_enable_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(reg->opaque); + + xlnx_pmu_io_irq_update(s); +} + +static void xlnx_pmu_io_irq_ack_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(reg->opaque); + uint32_t val = val64; + + /* Only clear */ + val &= s->regs[R_IRQ_STATUS]; + s->regs[R_IRQ_STATUS] ^= val; + + /* Active level triggered interrupts stay high. */ + s->regs[R_IRQ_STATUS] |= s->irq_raw & ~s->cfg.level_edge; + + xlnx_pmu_io_irq_update(s); +} + +static const RegisterAccessInfo xlnx_pmu_io_intc_regs_info[] = { + { .name = "IRQ_MODE", .addr = A_IRQ_MODE, + .rsvd = 0xffffffff, + },{ .name = "GPO0", .addr = A_GPO0, + },{ .name = "GPO1", .addr = A_GPO1, + .rsvd = 0xffffffc0, + },{ .name = "GPO2", .addr = A_GPO2, + .rsvd = 0xfffffc3f, + },{ .name = "GPO3", .addr = A_GPO3, + },{ .name = "GPI0", .addr = A_GPI0, + .rsvd = 0x300030, + .ro = 0xffcfffcf, + },{ .name = "GPI1", .addr = A_GPI1, + .rsvd = 0xf0e0000, + .ro = 0xf0f1ffff, + },{ .name = "GPI2", .addr = A_GPI2, + .rsvd = 0x1f00fcc0, + .ro = 0xe0ff033f, + },{ .name = "GPI3", .addr = A_GPI3, + .ro = 0xffffffff, + },{ .name = "IRQ_STATUS", .addr = A_IRQ_STATUS, + .rsvd = 0x40008787, + .ro = 0xbfff7878, + },{ .name = "IRQ_PENDING", .addr = A_IRQ_PENDING, + .rsvd = 0x40008787, + .ro = 0xdfff7ff8, + },{ .name = "IRQ_ENABLE", .addr = A_IRQ_ENABLE, + .rsvd = 0x40008787, + .ro = 0x7800, + .post_write = xlnx_pmu_io_irq_enable_postw, + },{ .name = "IRQ_ACK", .addr = A_IRQ_ACK, + .rsvd = 0x40008787, + .post_write = xlnx_pmu_io_irq_ack_postw, + },{ .name = "PIT0_PRELOAD", .addr = A_PIT0_PRELOAD, + .ro = 0xffffffff, + },{ .name = "PIT0_COUNTER", .addr = A_PIT0_COUNTER, + .ro = 0xffffffff, + },{ .name = "PIT0_CONTROL", .addr = A_PIT0_CONTROL, + .rsvd = 0xfffffffc, + },{ .name = "PIT1_PRELOAD", .addr = A_PIT1_PRELOAD, + .ro = 0xffffffff, + },{ .name = "PIT1_COUNTER", .addr = A_PIT1_COUNTER, + .ro = 0xffffffff, + },{ .name = "PIT1_CONTROL", .addr = A_PIT1_CONTROL, + .rsvd = 0xfffffffc, + },{ .name = "PIT2_PRELOAD", .addr = A_PIT2_PRELOAD, + .ro = 0xffffffff, + },{ .name = "PIT2_COUNTER", .addr = A_PIT2_COUNTER, + .ro = 0xffffffff, + },{ .name = "PIT2_CONTROL", .addr = A_PIT2_CONTROL, + .rsvd = 0xfffffffc, + },{ .name = "PIT3_PRELOAD", .addr = A_PIT3_PRELOAD, + .ro = 0xffffffff, + },{ .name = "PIT3_COUNTER", .addr = A_PIT3_COUNTER, + .ro = 0xffffffff, + },{ .name = "PIT3_CONTROL", .addr = A_PIT3_CONTROL, + .rsvd = 0xfffffffc, + } +}; + +static void irq_handler(void *opaque, int irq, int level) +{ + XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(opaque); + uint32_t mask = 1 << irq; + uint32_t prev = s->irq_raw; + uint32_t temp; + + s->irq_raw &= ~mask; + s->irq_raw |= (!!level) << irq; + + /* Turn active-low into active-high. */ + s->irq_raw ^= (~s->cfg.positive); + s->irq_raw &= mask; + + if (s->cfg.level_edge & mask) { + /* Edge triggered. */ + temp = (prev ^ s->irq_raw) & s->irq_raw; + } else { + /* Level triggered. */ + temp = s->irq_raw; + } + s->regs[R_IRQ_STATUS] |= temp; + + xlnx_pmu_io_irq_update(s); +} + +static void xlnx_pmu_io_intc_reset(DeviceState *dev) +{ + XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } + + xlnx_pmu_io_irq_update(s); +} + +static const MemoryRegionOps xlnx_pmu_io_intc_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static Property xlnx_pmu_io_intc_properties[] = { + DEFINE_PROP_UINT32("intc-intr-size", XlnxPMUIOIntc, cfg.intr_size, 0), + DEFINE_PROP_UINT32("intc-level-edge", XlnxPMUIOIntc, cfg.level_edge, 0), + DEFINE_PROP_UINT32("intc-positive", XlnxPMUIOIntc, cfg.positive, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xlnx_pmu_io_intc_realize(DeviceState *dev, Error **errp) +{ + XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(dev); + + /* Internal interrupts are edge triggered */ + s->cfg.level_edge <<= 16; + s->cfg.level_edge |= 0xffff; + + /* Internal interrupts are positive. */ + s->cfg.positive <<= 16; + s->cfg.positive |= 0xffff; + + /* Max 16 external interrupts. */ + assert(s->cfg.intr_size <= 16); + + qdev_init_gpio_in(dev, irq_handler, 16 + s->cfg.intr_size); +} + +static void xlnx_pmu_io_intc_init(Object *obj) +{ + XlnxPMUIOIntc *s = XLNX_PMU_IO_INTC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + memory_region_init(&s->iomem, obj, TYPE_XLNX_PMU_IO_INTC, + XLNXPMUIOINTC_R_MAX * 4); + reg_array = + register_init_block32(DEVICE(obj), xlnx_pmu_io_intc_regs_info, + ARRAY_SIZE(xlnx_pmu_io_intc_regs_info), + s->regs_info, s->regs, + &xlnx_pmu_io_intc_ops, + XLNX_PMU_IO_INTC_ERR_DEBUG, + XLNXPMUIOINTC_R_MAX * 4); + memory_region_add_subregion(&s->iomem, + 0x0, + ®_array->mem); + sysbus_init_mmio(sbd, &s->iomem); + + sysbus_init_irq(sbd, &s->parent_irq); +} + +static const VMStateDescription vmstate_xlnx_pmu_io_intc = { + .name = TYPE_XLNX_PMU_IO_INTC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxPMUIOIntc, XLNXPMUIOINTC_R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static void xlnx_pmu_io_intc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = xlnx_pmu_io_intc_reset; + dc->realize = xlnx_pmu_io_intc_realize; + dc->vmsd = &vmstate_xlnx_pmu_io_intc; + device_class_set_props(dc, xlnx_pmu_io_intc_properties); +} + +static const TypeInfo xlnx_pmu_io_intc_info = { + .name = TYPE_XLNX_PMU_IO_INTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxPMUIOIntc), + .class_init = xlnx_pmu_io_intc_class_init, + .instance_init = xlnx_pmu_io_intc_init, +}; + +static void xlnx_pmu_io_intc_register_types(void) +{ + type_register_static(&xlnx_pmu_io_intc_info); +} + +type_init(xlnx_pmu_io_intc_register_types) diff --git a/hw/intc/xlnx-zynqmp-ipi.c b/hw/intc/xlnx-zynqmp-ipi.c new file mode 100644 index 000000000..adc117901 --- /dev/null +++ b/hw/intc/xlnx-zynqmp-ipi.c @@ -0,0 +1,380 @@ +/* + * QEMU model of the IPI Inter Processor Interrupt block + * + * Copyright (c) 2014 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * Written by Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/register.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/intc/xlnx-zynqmp-ipi.h" +#include "hw/irq.h" + +#ifndef XLNX_ZYNQMP_IPI_ERR_DEBUG +#define XLNX_ZYNQMP_IPI_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(lvl, fmt, args...) do {\ + if (XLNX_ZYNQMP_IPI_ERR_DEBUG >= lvl) {\ + qemu_log(TYPE_XLNX_ZYNQMP_IPI ": %s:" fmt, __func__, ## args);\ + } \ +} while (0) + +#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args) + +REG32(IPI_TRIG, 0x0) + FIELD(IPI_TRIG, PL_3, 27, 1) + FIELD(IPI_TRIG, PL_2, 26, 1) + FIELD(IPI_TRIG, PL_1, 25, 1) + FIELD(IPI_TRIG, PL_0, 24, 1) + FIELD(IPI_TRIG, PMU_3, 19, 1) + FIELD(IPI_TRIG, PMU_2, 18, 1) + FIELD(IPI_TRIG, PMU_1, 17, 1) + FIELD(IPI_TRIG, PMU_0, 16, 1) + FIELD(IPI_TRIG, RPU_1, 9, 1) + FIELD(IPI_TRIG, RPU_0, 8, 1) + FIELD(IPI_TRIG, APU, 0, 1) +REG32(IPI_OBS, 0x4) + FIELD(IPI_OBS, PL_3, 27, 1) + FIELD(IPI_OBS, PL_2, 26, 1) + FIELD(IPI_OBS, PL_1, 25, 1) + FIELD(IPI_OBS, PL_0, 24, 1) + FIELD(IPI_OBS, PMU_3, 19, 1) + FIELD(IPI_OBS, PMU_2, 18, 1) + FIELD(IPI_OBS, PMU_1, 17, 1) + FIELD(IPI_OBS, PMU_0, 16, 1) + FIELD(IPI_OBS, RPU_1, 9, 1) + FIELD(IPI_OBS, RPU_0, 8, 1) + FIELD(IPI_OBS, APU, 0, 1) +REG32(IPI_ISR, 0x10) + FIELD(IPI_ISR, PL_3, 27, 1) + FIELD(IPI_ISR, PL_2, 26, 1) + FIELD(IPI_ISR, PL_1, 25, 1) + FIELD(IPI_ISR, PL_0, 24, 1) + FIELD(IPI_ISR, PMU_3, 19, 1) + FIELD(IPI_ISR, PMU_2, 18, 1) + FIELD(IPI_ISR, PMU_1, 17, 1) + FIELD(IPI_ISR, PMU_0, 16, 1) + FIELD(IPI_ISR, RPU_1, 9, 1) + FIELD(IPI_ISR, RPU_0, 8, 1) + FIELD(IPI_ISR, APU, 0, 1) +REG32(IPI_IMR, 0x14) + FIELD(IPI_IMR, PL_3, 27, 1) + FIELD(IPI_IMR, PL_2, 26, 1) + FIELD(IPI_IMR, PL_1, 25, 1) + FIELD(IPI_IMR, PL_0, 24, 1) + FIELD(IPI_IMR, PMU_3, 19, 1) + FIELD(IPI_IMR, PMU_2, 18, 1) + FIELD(IPI_IMR, PMU_1, 17, 1) + FIELD(IPI_IMR, PMU_0, 16, 1) + FIELD(IPI_IMR, RPU_1, 9, 1) + FIELD(IPI_IMR, RPU_0, 8, 1) + FIELD(IPI_IMR, APU, 0, 1) +REG32(IPI_IER, 0x18) + FIELD(IPI_IER, PL_3, 27, 1) + FIELD(IPI_IER, PL_2, 26, 1) + FIELD(IPI_IER, PL_1, 25, 1) + FIELD(IPI_IER, PL_0, 24, 1) + FIELD(IPI_IER, PMU_3, 19, 1) + FIELD(IPI_IER, PMU_2, 18, 1) + FIELD(IPI_IER, PMU_1, 17, 1) + FIELD(IPI_IER, PMU_0, 16, 1) + FIELD(IPI_IER, RPU_1, 9, 1) + FIELD(IPI_IER, RPU_0, 8, 1) + FIELD(IPI_IER, APU, 0, 1) +REG32(IPI_IDR, 0x1c) + FIELD(IPI_IDR, PL_3, 27, 1) + FIELD(IPI_IDR, PL_2, 26, 1) + FIELD(IPI_IDR, PL_1, 25, 1) + FIELD(IPI_IDR, PL_0, 24, 1) + FIELD(IPI_IDR, PMU_3, 19, 1) + FIELD(IPI_IDR, PMU_2, 18, 1) + FIELD(IPI_IDR, PMU_1, 17, 1) + FIELD(IPI_IDR, PMU_0, 16, 1) + FIELD(IPI_IDR, RPU_1, 9, 1) + FIELD(IPI_IDR, RPU_0, 8, 1) + FIELD(IPI_IDR, APU, 0, 1) + +/* APU + * RPU_0 + * RPU_1 + * PMU_0 + * PMU_1 + * PMU_2 + * PMU_3 + * PL_0 + * PL_1 + * PL_2 + * PL_3 + */ +int index_array[NUM_IPIS] = {0, 8, 9, 16, 17, 18, 19, 24, 25, 26, 27}; +static const char *index_array_names[NUM_IPIS] = {"APU", "RPU_0", "RPU_1", + "PMU_0", "PMU_1", "PMU_2", + "PMU_3", "PL_0", "PL_1", + "PL_2", "PL_3"}; + +static void xlnx_zynqmp_ipi_set_trig(XlnxZynqMPIPI *s, uint32_t val) +{ + int i, ipi_index, ipi_mask; + + for (i = 0; i < NUM_IPIS; i++) { + ipi_index = index_array[i]; + ipi_mask = (1 << ipi_index); + DB_PRINT("Setting %s=%d\n", index_array_names[i], + !!(val & ipi_mask)); + qemu_set_irq(s->irq_trig_out[i], !!(val & ipi_mask)); + } +} + +static void xlnx_zynqmp_ipi_set_obs(XlnxZynqMPIPI *s, uint32_t val) +{ + int i, ipi_index, ipi_mask; + + for (i = 0; i < NUM_IPIS; i++) { + ipi_index = index_array[i]; + ipi_mask = (1 << ipi_index); + DB_PRINT("Setting %s=%d\n", index_array_names[i], + !!(val & ipi_mask)); + qemu_set_irq(s->irq_obs_out[i], !!(val & ipi_mask)); + } +} + +static void xlnx_zynqmp_ipi_update_irq(XlnxZynqMPIPI *s) +{ + bool pending = s->regs[R_IPI_ISR] & ~s->regs[R_IPI_IMR]; + + DB_PRINT("irq=%d isr=%x mask=%x\n", + pending, s->regs[R_IPI_ISR], s->regs[R_IPI_IMR]); + qemu_set_irq(s->irq, pending); +} + +static uint64_t xlnx_zynqmp_ipi_trig_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque); + + xlnx_zynqmp_ipi_set_trig(s, val64); + + return val64; +} + +static void xlnx_zynqmp_ipi_trig_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque); + + /* TRIG generates a pulse on the outbound signals. We use the + * post-write callback to bring the signal back-down. + */ + s->regs[R_IPI_TRIG] = 0; + + xlnx_zynqmp_ipi_set_trig(s, 0); +} + +static uint64_t xlnx_zynqmp_ipi_isr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque); + + xlnx_zynqmp_ipi_set_obs(s, val64); + + return val64; +} + +static void xlnx_zynqmp_ipi_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque); + + xlnx_zynqmp_ipi_update_irq(s); +} + +static uint64_t xlnx_zynqmp_ipi_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque); + uint32_t val = val64; + + s->regs[R_IPI_IMR] &= ~val; + xlnx_zynqmp_ipi_update_irq(s); + return 0; +} + +static uint64_t xlnx_zynqmp_ipi_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(reg->opaque); + uint32_t val = val64; + + s->regs[R_IPI_IMR] |= val; + xlnx_zynqmp_ipi_update_irq(s); + return 0; +} + +static const RegisterAccessInfo xlnx_zynqmp_ipi_regs_info[] = { + { .name = "IPI_TRIG", .addr = A_IPI_TRIG, + .rsvd = 0xf0f0fcfe, + .ro = 0xf0f0fcfe, + .pre_write = xlnx_zynqmp_ipi_trig_prew, + .post_write = xlnx_zynqmp_ipi_trig_postw, + },{ .name = "IPI_OBS", .addr = A_IPI_OBS, + .rsvd = 0xf0f0fcfe, + .ro = 0xffffffff, + },{ .name = "IPI_ISR", .addr = A_IPI_ISR, + .rsvd = 0xf0f0fcfe, + .ro = 0xf0f0fcfe, + .w1c = 0xf0f0301, + .pre_write = xlnx_zynqmp_ipi_isr_prew, + .post_write = xlnx_zynqmp_ipi_isr_postw, + },{ .name = "IPI_IMR", .addr = A_IPI_IMR, + .reset = 0xf0f0301, + .rsvd = 0xf0f0fcfe, + .ro = 0xffffffff, + },{ .name = "IPI_IER", .addr = A_IPI_IER, + .rsvd = 0xf0f0fcfe, + .ro = 0xf0f0fcfe, + .pre_write = xlnx_zynqmp_ipi_ier_prew, + },{ .name = "IPI_IDR", .addr = A_IPI_IDR, + .rsvd = 0xf0f0fcfe, + .ro = 0xf0f0fcfe, + .pre_write = xlnx_zynqmp_ipi_idr_prew, + } +}; + +static void xlnx_zynqmp_ipi_reset(DeviceState *dev) +{ + XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } + + xlnx_zynqmp_ipi_update_irq(s); +} + +static void xlnx_zynqmp_ipi_handler(void *opaque, int n, int level) +{ + XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(opaque); + uint32_t val = (!!level) << n; + + DB_PRINT("IPI input irq[%d]=%d\n", n, level); + + s->regs[R_IPI_ISR] |= val; + xlnx_zynqmp_ipi_set_obs(s, s->regs[R_IPI_ISR]); + xlnx_zynqmp_ipi_update_irq(s); +} + +static void xlnx_zynqmp_obs_handler(void *opaque, int n, int level) +{ + XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(opaque); + + DB_PRINT("OBS input irq[%d]=%d\n", n, level); + + s->regs[R_IPI_OBS] &= ~(1ULL << n); + s->regs[R_IPI_OBS] |= (level << n); +} + +static const MemoryRegionOps xlnx_zynqmp_ipi_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void xlnx_zynqmp_ipi_realize(DeviceState *dev, Error **errp) +{ + qdev_init_gpio_in_named(dev, xlnx_zynqmp_ipi_handler, "IPI_INPUTS", 32); + qdev_init_gpio_in_named(dev, xlnx_zynqmp_obs_handler, "OBS_INPUTS", 32); +} + +static void xlnx_zynqmp_ipi_init(Object *obj) +{ + XlnxZynqMPIPI *s = XLNX_ZYNQMP_IPI(obj); + DeviceState *dev = DEVICE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + char *irq_name; + int i; + + memory_region_init(&s->iomem, obj, TYPE_XLNX_ZYNQMP_IPI, + R_XLNX_ZYNQMP_IPI_MAX * 4); + reg_array = + register_init_block32(DEVICE(obj), xlnx_zynqmp_ipi_regs_info, + ARRAY_SIZE(xlnx_zynqmp_ipi_regs_info), + s->regs_info, s->regs, + &xlnx_zynqmp_ipi_ops, + XLNX_ZYNQMP_IPI_ERR_DEBUG, + R_XLNX_ZYNQMP_IPI_MAX * 4); + memory_region_add_subregion(&s->iomem, + 0x0, + ®_array->mem); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + + for (i = 0; i < NUM_IPIS; i++) { + qdev_init_gpio_out_named(dev, &s->irq_trig_out[i], + index_array_names[i], 1); + + irq_name = g_strdup_printf("OBS_%s", index_array_names[i]); + qdev_init_gpio_out_named(dev, &s->irq_obs_out[i], + irq_name, 1); + g_free(irq_name); + } +} + +static const VMStateDescription vmstate_zynqmp_pmu_ipi = { + .name = TYPE_XLNX_ZYNQMP_IPI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPIPI, R_XLNX_ZYNQMP_IPI_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static void xlnx_zynqmp_ipi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = xlnx_zynqmp_ipi_reset; + dc->realize = xlnx_zynqmp_ipi_realize; + dc->vmsd = &vmstate_zynqmp_pmu_ipi; +} + +static const TypeInfo xlnx_zynqmp_ipi_info = { + .name = TYPE_XLNX_ZYNQMP_IPI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZynqMPIPI), + .class_init = xlnx_zynqmp_ipi_class_init, + .instance_init = xlnx_zynqmp_ipi_init, +}; + +static void xlnx_zynqmp_ipi_register_types(void) +{ + type_register_static(&xlnx_zynqmp_ipi_info); +} + +type_init(xlnx_zynqmp_ipi_register_types) diff --git a/hw/ipack/Kconfig b/hw/ipack/Kconfig new file mode 100644 index 000000000..f8da24a62 --- /dev/null +++ b/hw/ipack/Kconfig @@ -0,0 +1,4 @@ +config IPACK + bool + default y if PCI_DEVICES + depends on PCI diff --git a/hw/ipack/ipack.c b/hw/ipack/ipack.c new file mode 100644 index 000000000..ae20f36da --- /dev/null +++ b/hw/ipack/ipack.c @@ -0,0 +1,123 @@ +/* + * QEMU IndustryPack emulation + * + * Copyright (C) 2012 Igalia, S.L. + * Author: Alberto Garcia + * + * This code is licensed under the GNU GPL v2 or (at your option) any + * later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/ipack/ipack.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +IPackDevice *ipack_device_find(IPackBus *bus, int32_t slot) +{ + BusChild *kid; + + QTAILQ_FOREACH(kid, &BUS(bus)->children, sibling) { + DeviceState *qdev = kid->child; + IPackDevice *ip = IPACK_DEVICE(qdev); + if (ip->slot == slot) { + return ip; + } + } + return NULL; +} + +void ipack_bus_init(IPackBus *bus, size_t bus_size, + DeviceState *parent, + uint8_t n_slots, + qemu_irq_handler handler) +{ + qbus_init(bus, bus_size, TYPE_IPACK_BUS, parent, NULL); + bus->n_slots = n_slots; + bus->set_irq = handler; +} + +static void ipack_device_realize(DeviceState *dev, Error **errp) +{ + IPackDevice *idev = IPACK_DEVICE(dev); + IPackBus *bus = IPACK_BUS(qdev_get_parent_bus(dev)); + IPackDeviceClass *k = IPACK_DEVICE_GET_CLASS(dev); + + if (idev->slot < 0) { + idev->slot = bus->free_slot; + } + if (idev->slot >= bus->n_slots) { + error_setg(errp, "Only %" PRIu8 " slots available.", bus->n_slots); + return; + } + bus->free_slot = idev->slot + 1; + + idev->irq = qemu_allocate_irqs(bus->set_irq, idev, 2); + + k->realize(dev, errp); +} + +static void ipack_device_unrealize(DeviceState *dev) +{ + IPackDevice *idev = IPACK_DEVICE(dev); + IPackDeviceClass *k = IPACK_DEVICE_GET_CLASS(dev); + + if (k->unrealize) { + k->unrealize(dev); + return; + } + + qemu_free_irqs(idev->irq, 2); +} + +static Property ipack_device_props[] = { + DEFINE_PROP_INT32("slot", IPackDevice, slot, -1), + DEFINE_PROP_END_OF_LIST() +}; + +static void ipack_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_INPUT, k->categories); + k->bus_type = TYPE_IPACK_BUS; + k->realize = ipack_device_realize; + k->unrealize = ipack_device_unrealize; + device_class_set_props(k, ipack_device_props); +} + +const VMStateDescription vmstate_ipack_device = { + .name = "ipack_device", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32(slot, IPackDevice), + VMSTATE_END_OF_LIST() + } +}; + +static const TypeInfo ipack_device_info = { + .name = TYPE_IPACK_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(IPackDevice), + .class_size = sizeof(IPackDeviceClass), + .class_init = ipack_device_class_init, + .abstract = true, +}; + +static const TypeInfo ipack_bus_info = { + .name = TYPE_IPACK_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(IPackBus), +}; + +static void ipack_register_types(void) +{ + type_register_static(&ipack_device_info); + type_register_static(&ipack_bus_info); +} + +type_init(ipack_register_types) diff --git a/hw/ipack/meson.build b/hw/ipack/meson.build new file mode 100644 index 000000000..3f8138b6f --- /dev/null +++ b/hw/ipack/meson.build @@ -0,0 +1 @@ +softmmu_ss.add(when: 'CONFIG_IPACK', if_true: files('ipack.c', 'tpci200.c')) diff --git a/hw/ipack/tpci200.c b/hw/ipack/tpci200.c new file mode 100644 index 000000000..1f764fc85 --- /dev/null +++ b/hw/ipack/tpci200.c @@ -0,0 +1,664 @@ +/* + * QEMU TEWS TPCI200 IndustryPack carrier emulation + * + * Copyright (C) 2012 Igalia, S.L. + * Author: Alberto Garcia + * + * This code is licensed under the GNU GPL v2 or (at your option) any + * later version. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/ipack/ipack.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* #define DEBUG_TPCI */ + +#ifdef DEBUG_TPCI +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, "TPCI200: " fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do { } while (0) +#endif + +#define N_MODULES 4 + +#define IP_ID_SPACE 2 +#define IP_INT_SPACE 3 +#define IP_IO_SPACE_ADDR_MASK 0x7F +#define IP_ID_SPACE_ADDR_MASK 0x3F +#define IP_INT_SPACE_ADDR_MASK 0x3F + +#define STATUS_INT(IP, INTNO) BIT((IP) * 2 + (INTNO)) +#define STATUS_TIME(IP) BIT((IP) + 12) +#define STATUS_ERR_ANY 0xF00 + +#define CTRL_CLKRATE BIT(0) +#define CTRL_RECOVER BIT(1) +#define CTRL_TIME_INT BIT(2) +#define CTRL_ERR_INT BIT(3) +#define CTRL_INT_EDGE(INTNO) BIT(4 + (INTNO)) +#define CTRL_INT(INTNO) BIT(6 + (INTNO)) + +#define REG_REV_ID 0x00 +#define REG_IP_A_CTRL 0x02 +#define REG_IP_B_CTRL 0x04 +#define REG_IP_C_CTRL 0x06 +#define REG_IP_D_CTRL 0x08 +#define REG_RESET 0x0A +#define REG_STATUS 0x0C +#define IP_N_FROM_REG(REG) ((REG) / 2 - 1) + +struct TPCI200State { + PCIDevice dev; + IPackBus bus; + MemoryRegion mmio; + MemoryRegion io; + MemoryRegion las0; + MemoryRegion las1; + MemoryRegion las2; + MemoryRegion las3; + bool big_endian[3]; + uint8_t ctrl[N_MODULES]; + uint16_t status; + uint8_t int_set; +}; + +#define TYPE_TPCI200 "tpci200" + +OBJECT_DECLARE_SIMPLE_TYPE(TPCI200State, TPCI200) + +static const uint8_t local_config_regs[] = { + 0x00, 0xFF, 0xFF, 0x0F, 0x00, 0xFC, 0xFF, 0x0F, 0x00, 0x00, 0x00, + 0x0E, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x08, 0x01, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0xA0, 0x60, 0x41, 0xD4, + 0xA2, 0x20, 0x41, 0x14, 0xA2, 0x20, 0x41, 0x14, 0xA2, 0x20, 0x01, + 0x14, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, 0x08, 0x01, 0x02, + 0x00, 0x04, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x80, 0x02, 0x41, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x7A, 0x00, 0x52, 0x92, 0x24, 0x02 +}; + +static void adjust_addr(bool big_endian, hwaddr *addr, unsigned size) +{ + /* During 8 bit access in big endian mode, + odd and even addresses are swapped */ + if (big_endian && size == 1) { + *addr ^= 1; + } +} + +static uint64_t adjust_value(bool big_endian, uint64_t *val, unsigned size) +{ + /* Local spaces only support 8/16 bit access, + * so there's no need to care for sizes > 2 */ + if (big_endian && size == 2) { + *val = bswap16(*val); + } + return *val; +} + +static void tpci200_set_irq(void *opaque, int intno, int level) +{ + IPackDevice *ip = opaque; + IPackBus *bus = IPACK_BUS(qdev_get_parent_bus(DEVICE(ip))); + PCIDevice *pcidev = PCI_DEVICE(BUS(bus)->parent); + TPCI200State *dev = TPCI200(pcidev); + unsigned ip_n = ip->slot; + uint16_t prev_status = dev->status; + + assert(ip->slot >= 0 && ip->slot < N_MODULES); + + /* The requested interrupt must be enabled in the IP CONTROL + * register */ + if (!(dev->ctrl[ip_n] & CTRL_INT(intno))) { + return; + } + + /* Update the interrupt status in the IP STATUS register */ + if (level) { + dev->status |= STATUS_INT(ip_n, intno); + } else { + dev->status &= ~STATUS_INT(ip_n, intno); + } + + /* Return if there are no changes */ + if (dev->status == prev_status) { + return; + } + + DPRINTF("IP %u INT%u#: %u\n", ip_n, intno, level); + + /* Check if the interrupt is edge sensitive */ + if (dev->ctrl[ip_n] & CTRL_INT_EDGE(intno)) { + if (level) { + pci_set_irq(&dev->dev, !dev->int_set); + pci_set_irq(&dev->dev, dev->int_set); + } + } else { + unsigned i, j; + uint16_t level_status = dev->status; + + /* Check if there are any level sensitive interrupts set by + removing the ones that are edge sensitive from the status + register */ + for (i = 0; i < N_MODULES; i++) { + for (j = 0; j < 2; j++) { + if (dev->ctrl[i] & CTRL_INT_EDGE(j)) { + level_status &= ~STATUS_INT(i, j); + } + } + } + + if (level_status && !dev->int_set) { + pci_irq_assert(&dev->dev); + dev->int_set = 1; + } else if (!level_status && dev->int_set) { + pci_irq_deassert(&dev->dev); + dev->int_set = 0; + } + } +} + +static uint64_t tpci200_read_cfg(void *opaque, hwaddr addr, unsigned size) +{ + TPCI200State *s = opaque; + uint8_t ret = 0; + if (addr < ARRAY_SIZE(local_config_regs)) { + ret = local_config_regs[addr]; + } + /* Endianness is stored in the first bit of these registers */ + if ((addr == 0x2b && s->big_endian[0]) || + (addr == 0x2f && s->big_endian[1]) || + (addr == 0x33 && s->big_endian[2])) { + ret |= 1; + } + DPRINTF("Read from LCR 0x%x: 0x%x\n", (unsigned) addr, (unsigned) ret); + return ret; +} + +static void tpci200_write_cfg(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + TPCI200State *s = opaque; + /* Endianness is stored in the first bit of these registers */ + if (addr == 0x2b || addr == 0x2f || addr == 0x33) { + unsigned las = (addr - 0x2b) / 4; + s->big_endian[las] = val & 1; + DPRINTF("LAS%u big endian mode: %u\n", las, (unsigned) val & 1); + } else { + DPRINTF("Write to LCR 0x%x: 0x%x\n", (unsigned) addr, (unsigned) val); + } +} + +static uint64_t tpci200_read_las0(void *opaque, hwaddr addr, unsigned size) +{ + TPCI200State *s = opaque; + uint64_t ret = 0; + + switch (addr) { + + case REG_REV_ID: + DPRINTF("Read REVISION ID\n"); /* Current value is 0x00 */ + break; + + case REG_IP_A_CTRL: + case REG_IP_B_CTRL: + case REG_IP_C_CTRL: + case REG_IP_D_CTRL: + { + unsigned ip_n = IP_N_FROM_REG(addr); + ret = s->ctrl[ip_n]; + DPRINTF("Read IP %c CONTROL: 0x%x\n", 'A' + ip_n, (unsigned) ret); + } + break; + + case REG_RESET: + DPRINTF("Read RESET\n"); /* Not implemented */ + break; + + case REG_STATUS: + ret = s->status; + DPRINTF("Read STATUS: 0x%x\n", (unsigned) ret); + break; + + /* Reserved */ + default: + DPRINTF("Unsupported read from LAS0 0x%x\n", (unsigned) addr); + break; + } + + return adjust_value(s->big_endian[0], &ret, size); +} + +static void tpci200_write_las0(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + TPCI200State *s = opaque; + + adjust_value(s->big_endian[0], &val, size); + + switch (addr) { + + case REG_REV_ID: + DPRINTF("Write Revision ID: 0x%x\n", (unsigned) val); /* No effect */ + break; + + case REG_IP_A_CTRL: + case REG_IP_B_CTRL: + case REG_IP_C_CTRL: + case REG_IP_D_CTRL: + { + unsigned ip_n = IP_N_FROM_REG(addr); + s->ctrl[ip_n] = val; + DPRINTF("Write IP %c CONTROL: 0x%x\n", 'A' + ip_n, (unsigned) val); + } + break; + + case REG_RESET: + DPRINTF("Write RESET: 0x%x\n", (unsigned) val); /* Not implemented */ + break; + + case REG_STATUS: + { + unsigned i; + + for (i = 0; i < N_MODULES; i++) { + IPackDevice *ip = ipack_device_find(&s->bus, i); + + if (ip != NULL) { + if (val & STATUS_INT(i, 0)) { + DPRINTF("Clear IP %c INT0# status\n", 'A' + i); + qemu_irq_lower(ip->irq[0]); + } + if (val & STATUS_INT(i, 1)) { + DPRINTF("Clear IP %c INT1# status\n", 'A' + i); + qemu_irq_lower(ip->irq[1]); + } + } + + if (val & STATUS_TIME(i)) { + DPRINTF("Clear IP %c timeout\n", 'A' + i); + s->status &= ~STATUS_TIME(i); + } + } + + if (val & STATUS_ERR_ANY) { + DPRINTF("Unexpected write to STATUS register: 0x%x\n", + (unsigned) val); + } + } + break; + + /* Reserved */ + default: + DPRINTF("Unsupported write to LAS0 0x%x: 0x%x\n", + (unsigned) addr, (unsigned) val); + break; + } +} + +static uint64_t tpci200_read_las1(void *opaque, hwaddr addr, unsigned size) +{ + TPCI200State *s = opaque; + IPackDevice *ip; + uint64_t ret = 0; + unsigned ip_n, space; + uint8_t offset; + + adjust_addr(s->big_endian[1], &addr, size); + + /* + * The address is divided into the IP module number (0-4), the IP + * address space (I/O, ID, INT) and the offset within that space. + */ + ip_n = addr >> 8; + space = (addr >> 6) & 3; + ip = ipack_device_find(&s->bus, ip_n); + + if (ip == NULL) { + DPRINTF("Read LAS1: IP module %u not installed\n", ip_n); + } else { + IPackDeviceClass *k = IPACK_DEVICE_GET_CLASS(ip); + switch (space) { + + case IP_ID_SPACE: + offset = addr & IP_ID_SPACE_ADDR_MASK; + if (k->id_read) { + ret = k->id_read(ip, offset); + } + break; + + case IP_INT_SPACE: + offset = addr & IP_INT_SPACE_ADDR_MASK; + + /* Read address 0 to ACK IP INT0# and address 2 to ACK IP INT1# */ + if (offset == 0 || offset == 2) { + unsigned intno = offset / 2; + bool int_set = s->status & STATUS_INT(ip_n, intno); + bool int_edge_sensitive = s->ctrl[ip_n] & CTRL_INT_EDGE(intno); + if (int_set && !int_edge_sensitive) { + qemu_irq_lower(ip->irq[intno]); + } + } + + if (k->int_read) { + ret = k->int_read(ip, offset); + } + break; + + default: + offset = addr & IP_IO_SPACE_ADDR_MASK; + if (k->io_read) { + ret = k->io_read(ip, offset); + } + break; + } + } + + return adjust_value(s->big_endian[1], &ret, size); +} + +static void tpci200_write_las1(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + TPCI200State *s = opaque; + IPackDevice *ip; + unsigned ip_n, space; + uint8_t offset; + + adjust_addr(s->big_endian[1], &addr, size); + adjust_value(s->big_endian[1], &val, size); + + /* + * The address is divided into the IP module number, the IP + * address space (I/O, ID, INT) and the offset within that space. + */ + ip_n = addr >> 8; + space = (addr >> 6) & 3; + ip = ipack_device_find(&s->bus, ip_n); + + if (ip == NULL) { + DPRINTF("Write LAS1: IP module %u not installed\n", ip_n); + } else { + IPackDeviceClass *k = IPACK_DEVICE_GET_CLASS(ip); + switch (space) { + + case IP_ID_SPACE: + offset = addr & IP_ID_SPACE_ADDR_MASK; + if (k->id_write) { + k->id_write(ip, offset, val); + } + break; + + case IP_INT_SPACE: + offset = addr & IP_INT_SPACE_ADDR_MASK; + if (k->int_write) { + k->int_write(ip, offset, val); + } + break; + + default: + offset = addr & IP_IO_SPACE_ADDR_MASK; + if (k->io_write) { + k->io_write(ip, offset, val); + } + break; + } + } +} + +static uint64_t tpci200_read_las2(void *opaque, hwaddr addr, unsigned size) +{ + TPCI200State *s = opaque; + IPackDevice *ip; + uint64_t ret = 0; + unsigned ip_n; + uint32_t offset; + + adjust_addr(s->big_endian[2], &addr, size); + + /* + * The address is divided into the IP module number and the offset + * within the IP module MEM space. + */ + ip_n = addr >> 23; + offset = addr & 0x7fffff; + ip = ipack_device_find(&s->bus, ip_n); + + if (ip == NULL) { + DPRINTF("Read LAS2: IP module %u not installed\n", ip_n); + } else { + IPackDeviceClass *k = IPACK_DEVICE_GET_CLASS(ip); + if (k->mem_read16) { + ret = k->mem_read16(ip, offset); + } + } + + return adjust_value(s->big_endian[2], &ret, size); +} + +static void tpci200_write_las2(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + TPCI200State *s = opaque; + IPackDevice *ip; + unsigned ip_n; + uint32_t offset; + + adjust_addr(s->big_endian[2], &addr, size); + adjust_value(s->big_endian[2], &val, size); + + /* + * The address is divided into the IP module number and the offset + * within the IP module MEM space. + */ + ip_n = addr >> 23; + offset = addr & 0x7fffff; + ip = ipack_device_find(&s->bus, ip_n); + + if (ip == NULL) { + DPRINTF("Write LAS2: IP module %u not installed\n", ip_n); + } else { + IPackDeviceClass *k = IPACK_DEVICE_GET_CLASS(ip); + if (k->mem_write16) { + k->mem_write16(ip, offset, val); + } + } +} + +static uint64_t tpci200_read_las3(void *opaque, hwaddr addr, unsigned size) +{ + TPCI200State *s = opaque; + IPackDevice *ip; + uint64_t ret = 0; + /* + * The address is divided into the IP module number and the offset + * within the IP module MEM space. + */ + unsigned ip_n = addr >> 22; + uint32_t offset = addr & 0x3fffff; + + ip = ipack_device_find(&s->bus, ip_n); + + if (ip == NULL) { + DPRINTF("Read LAS3: IP module %u not installed\n", ip_n); + } else { + IPackDeviceClass *k = IPACK_DEVICE_GET_CLASS(ip); + if (k->mem_read8) { + ret = k->mem_read8(ip, offset); + } + } + + return ret; +} + +static void tpci200_write_las3(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + TPCI200State *s = opaque; + IPackDevice *ip; + /* + * The address is divided into the IP module number and the offset + * within the IP module MEM space. + */ + unsigned ip_n = addr >> 22; + uint32_t offset = addr & 0x3fffff; + + ip = ipack_device_find(&s->bus, ip_n); + + if (ip == NULL) { + DPRINTF("Write LAS3: IP module %u not installed\n", ip_n); + } else { + IPackDeviceClass *k = IPACK_DEVICE_GET_CLASS(ip); + if (k->mem_write8) { + k->mem_write8(ip, offset, val); + } + } +} + +static const MemoryRegionOps tpci200_cfg_ops = { + .read = tpci200_read_cfg, + .write = tpci200_write_cfg, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4 + }, + .impl = { + .min_access_size = 1, + .max_access_size = 1 + } +}; + +static const MemoryRegionOps tpci200_las0_ops = { + .read = tpci200_read_las0, + .write = tpci200_write_las0, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 2, + .max_access_size = 2 + } +}; + +static const MemoryRegionOps tpci200_las1_ops = { + .read = tpci200_read_las1, + .write = tpci200_write_las1, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 2 + } +}; + +static const MemoryRegionOps tpci200_las2_ops = { + .read = tpci200_read_las2, + .write = tpci200_write_las2, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 2 + } +}; + +static const MemoryRegionOps tpci200_las3_ops = { + .read = tpci200_read_las3, + .write = tpci200_write_las3, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1 + } +}; + +static void tpci200_realize(PCIDevice *pci_dev, Error **errp) +{ + TPCI200State *s = TPCI200(pci_dev); + uint8_t *c = s->dev.config; + + pci_set_word(c + PCI_COMMAND, 0x0003); + pci_set_word(c + PCI_STATUS, 0x0280); + + pci_set_byte(c + PCI_INTERRUPT_PIN, 0x01); /* Interrupt pin A */ + + pci_set_byte(c + PCI_CAPABILITY_LIST, 0x40); + pci_set_long(c + 0x40, 0x48014801); + pci_set_long(c + 0x48, 0x00024C06); + pci_set_long(c + 0x4C, 0x00000003); + + memory_region_init_io(&s->mmio, OBJECT(s), &tpci200_cfg_ops, + s, "tpci200_mmio", 128); + memory_region_init_io(&s->io, OBJECT(s), &tpci200_cfg_ops, + s, "tpci200_io", 128); + memory_region_init_io(&s->las0, OBJECT(s), &tpci200_las0_ops, + s, "tpci200_las0", 256); + memory_region_init_io(&s->las1, OBJECT(s), &tpci200_las1_ops, + s, "tpci200_las1", 1024); + memory_region_init_io(&s->las2, OBJECT(s), &tpci200_las2_ops, + s, "tpci200_las2", 32 * MiB); + memory_region_init_io(&s->las3, OBJECT(s), &tpci200_las3_ops, + s, "tpci200_las3", 16 * MiB); + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio); + pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->io); + pci_register_bar(&s->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->las0); + pci_register_bar(&s->dev, 3, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->las1); + pci_register_bar(&s->dev, 4, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->las2); + pci_register_bar(&s->dev, 5, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->las3); + + ipack_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), + N_MODULES, tpci200_set_irq); +} + +static const VMStateDescription vmstate_tpci200 = { + .name = "tpci200", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, TPCI200State), + VMSTATE_BOOL_ARRAY(big_endian, TPCI200State, 3), + VMSTATE_UINT8_ARRAY(ctrl, TPCI200State, N_MODULES), + VMSTATE_UINT16(status, TPCI200State), + VMSTATE_UINT8(int_set, TPCI200State), + VMSTATE_END_OF_LIST() + } +}; + +static void tpci200_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = tpci200_realize; + k->vendor_id = PCI_VENDOR_ID_TEWS; + k->device_id = PCI_DEVICE_ID_TEWS_TPCI200; + k->class_id = PCI_CLASS_BRIDGE_OTHER; + k->subsystem_vendor_id = PCI_VENDOR_ID_TEWS; + k->subsystem_id = 0x300A; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + dc->desc = "TEWS TPCI200 IndustryPack carrier"; + dc->vmsd = &vmstate_tpci200; +} + +static const TypeInfo tpci200_info = { + .name = TYPE_TPCI200, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(TPCI200State), + .class_init = tpci200_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void tpci200_register_types(void) +{ + type_register_static(&tpci200_info); +} + +type_init(tpci200_register_types) diff --git a/hw/ipmi/Kconfig b/hw/ipmi/Kconfig new file mode 100644 index 000000000..9befd4f42 --- /dev/null +++ b/hw/ipmi/Kconfig @@ -0,0 +1,37 @@ +config IPMI + bool + +config IPMI_LOCAL + bool + default y + depends on IPMI + +config IPMI_EXTERN + bool + default y + depends on IPMI + +config ISA_IPMI_KCS + bool + depends on ISA_BUS + select IPMI + +config ISA_IPMI_BT + bool + depends on ISA_BUS + select IPMI + +config PCI_IPMI_KCS + bool + depends on PCI + select IPMI + +config PCI_IPMI_BT + bool + depends on PCI + select IPMI + +config IPMI_SSIF + bool + depends on I2C + select IPMI diff --git a/hw/ipmi/ipmi.c b/hw/ipmi/ipmi.c new file mode 100644 index 000000000..8d35c9fdd --- /dev/null +++ b/hw/ipmi/ipmi.c @@ -0,0 +1,138 @@ +/* + * QEMU IPMI emulation + * + * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/ipmi/ipmi.h" +#include "hw/qdev-properties.h" +#include "qom/object_interfaces.h" +#include "sysemu/runstate.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/nmi.h" + +static uint32_t ipmi_current_uuid = 1; + +uint32_t ipmi_next_uuid(void) +{ + return ipmi_current_uuid++; +} + +static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op, int checkonly) +{ + switch (op) { + case IPMI_RESET_CHASSIS: + if (checkonly) { + return 0; + } + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + return 0; + + case IPMI_POWEROFF_CHASSIS: + if (checkonly) { + return 0; + } + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + return 0; + + case IPMI_SEND_NMI: + if (checkonly) { + return 0; + } + /* We don't care what CPU we use. */ + nmi_monitor_handle(0, NULL); + return 0; + + case IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP: + if (checkonly) { + return 0; + } + qemu_system_powerdown_request(); + return 0; + + case IPMI_POWERCYCLE_CHASSIS: + case IPMI_PULSE_DIAG_IRQ: + case IPMI_POWERON_CHASSIS: + default: + return IPMI_CC_COMMAND_NOT_SUPPORTED; + } +} + +static void ipmi_interface_class_init(ObjectClass *class, void *data) +{ + IPMIInterfaceClass *ik = IPMI_INTERFACE_CLASS(class); + + ik->do_hw_op = ipmi_do_hw_op; +} + +static TypeInfo ipmi_interface_type_info = { + .name = TYPE_IPMI_INTERFACE, + .parent = TYPE_INTERFACE, + .class_size = sizeof(IPMIInterfaceClass), + .class_init = ipmi_interface_class_init, +}; + +static void isa_ipmi_bmc_check(const Object *obj, const char *name, + Object *val, Error **errp) +{ + IPMIBmc *bmc = IPMI_BMC(val); + + if (bmc->intf) + error_setg(errp, "BMC object is already in use"); +} + +void ipmi_bmc_find_and_link(Object *obj, Object **bmc) +{ + object_property_add_link(obj, "bmc", TYPE_IPMI_BMC, bmc, + isa_ipmi_bmc_check, + OBJ_PROP_LINK_STRONG); +} + +static Property ipmi_bmc_properties[] = { + DEFINE_PROP_UINT8("slave_addr", IPMIBmc, slave_addr, 0x20), + DEFINE_PROP_END_OF_LIST(), +}; + +static void bmc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, ipmi_bmc_properties); +} + +static TypeInfo ipmi_bmc_type_info = { + .name = TYPE_IPMI_BMC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(IPMIBmc), + .abstract = true, + .class_size = sizeof(IPMIBmcClass), + .class_init = bmc_class_init, +}; + +static void ipmi_register_types(void) +{ + type_register_static(&ipmi_interface_type_info); + type_register_static(&ipmi_bmc_type_info); +} + +type_init(ipmi_register_types) diff --git a/hw/ipmi/ipmi_bmc_extern.c b/hw/ipmi/ipmi_bmc_extern.c new file mode 100644 index 000000000..acf2bab35 --- /dev/null +++ b/hw/ipmi/ipmi_bmc_extern.c @@ -0,0 +1,548 @@ +/* + * IPMI BMC external connection + * + * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * This is designed to connect with OpenIPMI's lanserv serial interface + * using the "VM" connection type. See that for details. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "chardev/char-fe.h" +#include "hw/ipmi/ipmi.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#define VM_MSG_CHAR 0xA0 /* Marks end of message */ +#define VM_CMD_CHAR 0xA1 /* Marks end of a command */ +#define VM_ESCAPE_CHAR 0xAA /* Set bit 4 from the next byte to 0 */ + +#define VM_PROTOCOL_VERSION 1 +#define VM_CMD_VERSION 0xff /* A version number byte follows */ +#define VM_CMD_NOATTN 0x00 +#define VM_CMD_ATTN 0x01 +#define VM_CMD_ATTN_IRQ 0x02 +#define VM_CMD_POWEROFF 0x03 +#define VM_CMD_RESET 0x04 +#define VM_CMD_ENABLE_IRQ 0x05 /* Enable/disable the messaging irq */ +#define VM_CMD_DISABLE_IRQ 0x06 +#define VM_CMD_SEND_NMI 0x07 +#define VM_CMD_CAPABILITIES 0x08 +#define VM_CAPABILITIES_POWER 0x01 +#define VM_CAPABILITIES_RESET 0x02 +#define VM_CAPABILITIES_IRQ 0x04 +#define VM_CAPABILITIES_NMI 0x08 +#define VM_CAPABILITIES_ATTN 0x10 +#define VM_CAPABILITIES_GRACEFUL_SHUTDOWN 0x20 +#define VM_CMD_GRACEFUL_SHUTDOWN 0x09 + +#define TYPE_IPMI_BMC_EXTERN "ipmi-bmc-extern" +OBJECT_DECLARE_SIMPLE_TYPE(IPMIBmcExtern, IPMI_BMC_EXTERN) +struct IPMIBmcExtern { + IPMIBmc parent; + + CharBackend chr; + + bool connected; + + unsigned char inbuf[MAX_IPMI_MSG_SIZE + 2]; + unsigned int inpos; + bool in_escape; + bool in_too_many; + bool waiting_rsp; + bool sending_cmd; + + unsigned char outbuf[(MAX_IPMI_MSG_SIZE + 2) * 2 + 1]; + unsigned int outpos; + unsigned int outlen; + + struct QEMUTimer *extern_timer; + + /* A reset event is pending to be sent upstream. */ + bool send_reset; +}; + +static unsigned char +ipmb_checksum(const unsigned char *data, int size, unsigned char start) +{ + unsigned char csum = start; + + for (; size > 0; size--, data++) { + csum += *data; + } + return csum; +} + +static void continue_send(IPMIBmcExtern *ibe) +{ + int ret; + if (ibe->outlen == 0) { + goto check_reset; + } + send: + ret = qemu_chr_fe_write(&ibe->chr, ibe->outbuf + ibe->outpos, + ibe->outlen - ibe->outpos); + if (ret > 0) { + ibe->outpos += ret; + } + if (ibe->outpos < ibe->outlen) { + /* Not fully transmitted, try again in a 10ms */ + timer_mod_ns(ibe->extern_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 10000000); + } else { + /* Sent */ + ibe->outlen = 0; + ibe->outpos = 0; + if (!ibe->sending_cmd) { + ibe->waiting_rsp = true; + } else { + ibe->sending_cmd = false; + } + check_reset: + if (ibe->connected && ibe->send_reset) { + /* Send the reset */ + ibe->outbuf[0] = VM_CMD_RESET; + ibe->outbuf[1] = VM_CMD_CHAR; + ibe->outlen = 2; + ibe->outpos = 0; + ibe->send_reset = false; + ibe->sending_cmd = true; + goto send; + } + + if (ibe->waiting_rsp) { + /* Make sure we get a response within 4 seconds. */ + timer_mod_ns(ibe->extern_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 4000000000ULL); + } + } + return; +} + +static void extern_timeout(void *opaque) +{ + IPMIBmcExtern *ibe = opaque; + IPMIInterface *s = ibe->parent.intf; + + if (ibe->connected) { + if (ibe->waiting_rsp && (ibe->outlen == 0)) { + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + /* The message response timed out, return an error. */ + ibe->waiting_rsp = false; + ibe->inbuf[1] = ibe->outbuf[1] | 0x04; + ibe->inbuf[2] = ibe->outbuf[2]; + ibe->inbuf[3] = IPMI_CC_TIMEOUT; + k->handle_rsp(s, ibe->outbuf[0], ibe->inbuf + 1, 3); + } else { + continue_send(ibe); + } + } +} + +static void addchar(IPMIBmcExtern *ibe, unsigned char ch) +{ + switch (ch) { + case VM_MSG_CHAR: + case VM_CMD_CHAR: + case VM_ESCAPE_CHAR: + ibe->outbuf[ibe->outlen] = VM_ESCAPE_CHAR; + ibe->outlen++; + ch |= 0x10; + /* fall through */ + default: + ibe->outbuf[ibe->outlen] = ch; + ibe->outlen++; + } +} + +static void ipmi_bmc_extern_handle_command(IPMIBmc *b, + uint8_t *cmd, unsigned int cmd_len, + unsigned int max_cmd_len, + uint8_t msg_id) +{ + IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(b); + IPMIInterface *s = ibe->parent.intf; + uint8_t err = 0, csum; + unsigned int i; + + if (ibe->outlen) { + /* We already have a command queued. Shouldn't ever happen. */ + error_report("IPMI KCS: Got command when not finished with the" + " previous command"); + abort(); + } + + /* If it's too short or it was truncated, return an error. */ + if (cmd_len < 2) { + err = IPMI_CC_REQUEST_DATA_LENGTH_INVALID; + } else if ((cmd_len > max_cmd_len) || (cmd_len > MAX_IPMI_MSG_SIZE)) { + err = IPMI_CC_REQUEST_DATA_TRUNCATED; + } else if (!ibe->connected) { + err = IPMI_CC_BMC_INIT_IN_PROGRESS; + } + if (err) { + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + unsigned char rsp[3]; + rsp[0] = cmd[0] | 0x04; + rsp[1] = cmd[1]; + rsp[2] = err; + ibe->waiting_rsp = false; + k->handle_rsp(s, msg_id, rsp, 3); + goto out; + } + + addchar(ibe, msg_id); + for (i = 0; i < cmd_len; i++) { + addchar(ibe, cmd[i]); + } + csum = ipmb_checksum(&msg_id, 1, 0); + addchar(ibe, -ipmb_checksum(cmd, cmd_len, csum)); + + ibe->outbuf[ibe->outlen] = VM_MSG_CHAR; + ibe->outlen++; + + /* Start the transmit */ + continue_send(ibe); + + out: + return; +} + +static void handle_hw_op(IPMIBmcExtern *ibe, unsigned char hw_op) +{ + IPMIInterface *s = ibe->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + + switch (hw_op) { + case VM_CMD_VERSION: + /* We only support one version at this time. */ + break; + + case VM_CMD_NOATTN: + k->set_atn(s, 0, 0); + break; + + case VM_CMD_ATTN: + k->set_atn(s, 1, 0); + break; + + case VM_CMD_ATTN_IRQ: + k->set_atn(s, 1, 1); + break; + + case VM_CMD_POWEROFF: + k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 0); + break; + + case VM_CMD_RESET: + k->do_hw_op(s, IPMI_RESET_CHASSIS, 0); + break; + + case VM_CMD_ENABLE_IRQ: + k->set_irq_enable(s, 1); + break; + + case VM_CMD_DISABLE_IRQ: + k->set_irq_enable(s, 0); + break; + + case VM_CMD_SEND_NMI: + k->do_hw_op(s, IPMI_SEND_NMI, 0); + break; + + case VM_CMD_GRACEFUL_SHUTDOWN: + k->do_hw_op(s, IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 0); + break; + } +} + +static void handle_msg(IPMIBmcExtern *ibe) +{ + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(ibe->parent.intf); + + if (ibe->in_escape) { + ipmi_debug("msg escape not ended\n"); + return; + } + if (ibe->inpos < 5) { + ipmi_debug("msg too short\n"); + return; + } + if (ibe->in_too_many) { + ibe->inbuf[3] = IPMI_CC_REQUEST_DATA_TRUNCATED; + ibe->inpos = 4; + } else if (ipmb_checksum(ibe->inbuf, ibe->inpos, 0) != 0) { + ipmi_debug("msg checksum failure\n"); + return; + } else { + ibe->inpos--; /* Remove checkum */ + } + + timer_del(ibe->extern_timer); + ibe->waiting_rsp = false; + k->handle_rsp(ibe->parent.intf, ibe->inbuf[0], ibe->inbuf + 1, ibe->inpos - 1); +} + +static int can_receive(void *opaque) +{ + return 1; +} + +static void receive(void *opaque, const uint8_t *buf, int size) +{ + IPMIBmcExtern *ibe = opaque; + int i; + unsigned char hw_op; + + for (i = 0; i < size; i++) { + unsigned char ch = buf[i]; + + switch (ch) { + case VM_MSG_CHAR: + handle_msg(ibe); + ibe->in_too_many = false; + ibe->inpos = 0; + break; + + case VM_CMD_CHAR: + if (ibe->in_too_many) { + ipmi_debug("cmd in too many\n"); + ibe->in_too_many = false; + ibe->inpos = 0; + break; + } + if (ibe->in_escape) { + ipmi_debug("cmd in escape\n"); + ibe->in_too_many = false; + ibe->inpos = 0; + ibe->in_escape = false; + break; + } + ibe->in_too_many = false; + if (ibe->inpos < 1) { + break; + } + hw_op = ibe->inbuf[0]; + ibe->inpos = 0; + goto out_hw_op; + break; + + case VM_ESCAPE_CHAR: + ibe->in_escape = true; + break; + + default: + if (ibe->in_escape) { + ch &= ~0x10; + ibe->in_escape = false; + } + if (ibe->in_too_many) { + break; + } + if (ibe->inpos >= sizeof(ibe->inbuf)) { + ibe->in_too_many = true; + break; + } + ibe->inbuf[ibe->inpos] = ch; + ibe->inpos++; + break; + } + } + return; + + out_hw_op: + handle_hw_op(ibe, hw_op); +} + +static void chr_event(void *opaque, QEMUChrEvent event) +{ + IPMIBmcExtern *ibe = opaque; + IPMIInterface *s = ibe->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + unsigned char v; + + switch (event) { + case CHR_EVENT_OPENED: + ibe->connected = true; + ibe->outpos = 0; + ibe->outlen = 0; + addchar(ibe, VM_CMD_VERSION); + addchar(ibe, VM_PROTOCOL_VERSION); + ibe->outbuf[ibe->outlen] = VM_CMD_CHAR; + ibe->outlen++; + addchar(ibe, VM_CMD_CAPABILITIES); + v = VM_CAPABILITIES_IRQ | VM_CAPABILITIES_ATTN; + if (k->do_hw_op(ibe->parent.intf, IPMI_POWEROFF_CHASSIS, 1) == 0) { + v |= VM_CAPABILITIES_POWER; + } + if (k->do_hw_op(ibe->parent.intf, IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 1) + == 0) { + v |= VM_CAPABILITIES_GRACEFUL_SHUTDOWN; + } + if (k->do_hw_op(ibe->parent.intf, IPMI_RESET_CHASSIS, 1) == 0) { + v |= VM_CAPABILITIES_RESET; + } + if (k->do_hw_op(ibe->parent.intf, IPMI_SEND_NMI, 1) == 0) { + v |= VM_CAPABILITIES_NMI; + } + addchar(ibe, v); + ibe->outbuf[ibe->outlen] = VM_CMD_CHAR; + ibe->outlen++; + ibe->sending_cmd = false; + continue_send(ibe); + break; + + case CHR_EVENT_CLOSED: + if (!ibe->connected) { + return; + } + ibe->connected = false; + /* + * Don't hang the OS trying to handle the ATN bit, other end will + * resend on a reconnect. + */ + k->set_atn(s, 0, 0); + if (ibe->waiting_rsp) { + ibe->waiting_rsp = false; + ibe->inbuf[1] = ibe->outbuf[1] | 0x04; + ibe->inbuf[2] = ibe->outbuf[2]; + ibe->inbuf[3] = IPMI_CC_BMC_INIT_IN_PROGRESS; + k->handle_rsp(s, ibe->outbuf[0], ibe->inbuf + 1, 3); + } + break; + + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static void ipmi_bmc_extern_handle_reset(IPMIBmc *b) +{ + IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(b); + + ibe->send_reset = true; + continue_send(ibe); +} + +static void ipmi_bmc_extern_realize(DeviceState *dev, Error **errp) +{ + IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(dev); + + if (!qemu_chr_fe_backend_connected(&ibe->chr)) { + error_setg(errp, "IPMI external bmc requires chardev attribute"); + return; + } + + qemu_chr_fe_set_handlers(&ibe->chr, can_receive, receive, + chr_event, NULL, ibe, NULL, true); +} + +static int ipmi_bmc_extern_post_migrate(void *opaque, int version_id) +{ + IPMIBmcExtern *ibe = opaque; + + /* + * We don't directly restore waiting_rsp, Instead, we return an + * error on the interface if a response was being waited for. + */ + if (ibe->waiting_rsp) { + IPMIInterface *ii = ibe->parent.intf; + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + + ibe->waiting_rsp = false; + ibe->inbuf[1] = ibe->outbuf[1] | 0x04; + ibe->inbuf[2] = ibe->outbuf[2]; + ibe->inbuf[3] = IPMI_CC_BMC_INIT_IN_PROGRESS; + iic->handle_rsp(ii, ibe->outbuf[0], ibe->inbuf + 1, 3); + } + return 0; +} + +static const VMStateDescription vmstate_ipmi_bmc_extern = { + .name = TYPE_IPMI_BMC_EXTERN, + .version_id = 1, + .minimum_version_id = 1, + .post_load = ipmi_bmc_extern_post_migrate, + .fields = (VMStateField[]) { + VMSTATE_BOOL(send_reset, IPMIBmcExtern), + VMSTATE_BOOL(waiting_rsp, IPMIBmcExtern), + VMSTATE_END_OF_LIST() + } +}; + +static void ipmi_bmc_extern_init(Object *obj) +{ + IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(obj); + + ibe->extern_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, extern_timeout, ibe); + vmstate_register(NULL, 0, &vmstate_ipmi_bmc_extern, ibe); +} + +static void ipmi_bmc_extern_finalize(Object *obj) +{ + IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(obj); + + timer_free(ibe->extern_timer); +} + +static Property ipmi_bmc_extern_properties[] = { + DEFINE_PROP_CHR("chardev", IPMIBmcExtern, chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ipmi_bmc_extern_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + IPMIBmcClass *bk = IPMI_BMC_CLASS(oc); + + bk->handle_command = ipmi_bmc_extern_handle_command; + bk->handle_reset = ipmi_bmc_extern_handle_reset; + dc->hotpluggable = false; + dc->realize = ipmi_bmc_extern_realize; + device_class_set_props(dc, ipmi_bmc_extern_properties); +} + +static const TypeInfo ipmi_bmc_extern_type = { + .name = TYPE_IPMI_BMC_EXTERN, + .parent = TYPE_IPMI_BMC, + .instance_size = sizeof(IPMIBmcExtern), + .instance_init = ipmi_bmc_extern_init, + .instance_finalize = ipmi_bmc_extern_finalize, + .class_init = ipmi_bmc_extern_class_init, + }; + +static void ipmi_bmc_extern_register_types(void) +{ + type_register_static(&ipmi_bmc_extern_type); +} + +type_init(ipmi_bmc_extern_register_types) diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c new file mode 100644 index 000000000..905e09109 --- /dev/null +++ b/hw/ipmi/ipmi_bmc_sim.c @@ -0,0 +1,2232 @@ +/* + * IPMI BMC emulation + * + * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "qemu/timer.h" +#include "hw/ipmi/ipmi.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" + +#define IPMI_NETFN_CHASSIS 0x00 + +#define IPMI_CMD_GET_CHASSIS_CAPABILITIES 0x00 +#define IPMI_CMD_GET_CHASSIS_STATUS 0x01 +#define IPMI_CMD_CHASSIS_CONTROL 0x02 +#define IPMI_CMD_GET_SYS_RESTART_CAUSE 0x09 + +#define IPMI_NETFN_SENSOR_EVENT 0x04 + +#define IPMI_CMD_PLATFORM_EVENT_MSG 0x02 +#define IPMI_CMD_SET_SENSOR_EVT_ENABLE 0x28 +#define IPMI_CMD_GET_SENSOR_EVT_ENABLE 0x29 +#define IPMI_CMD_REARM_SENSOR_EVTS 0x2a +#define IPMI_CMD_GET_SENSOR_EVT_STATUS 0x2b +#define IPMI_CMD_GET_SENSOR_READING 0x2d +#define IPMI_CMD_SET_SENSOR_TYPE 0x2e +#define IPMI_CMD_GET_SENSOR_TYPE 0x2f +#define IPMI_CMD_SET_SENSOR_READING 0x30 + +/* #define IPMI_NETFN_APP 0x06 In ipmi.h */ + +#define IPMI_CMD_GET_DEVICE_ID 0x01 +#define IPMI_CMD_COLD_RESET 0x02 +#define IPMI_CMD_WARM_RESET 0x03 +#define IPMI_CMD_SET_ACPI_POWER_STATE 0x06 +#define IPMI_CMD_GET_ACPI_POWER_STATE 0x07 +#define IPMI_CMD_GET_DEVICE_GUID 0x08 +#define IPMI_CMD_RESET_WATCHDOG_TIMER 0x22 +#define IPMI_CMD_SET_WATCHDOG_TIMER 0x24 +#define IPMI_CMD_GET_WATCHDOG_TIMER 0x25 +#define IPMI_CMD_SET_BMC_GLOBAL_ENABLES 0x2e +#define IPMI_CMD_GET_BMC_GLOBAL_ENABLES 0x2f +#define IPMI_CMD_CLR_MSG_FLAGS 0x30 +#define IPMI_CMD_GET_MSG_FLAGS 0x31 +#define IPMI_CMD_GET_MSG 0x33 +#define IPMI_CMD_SEND_MSG 0x34 +#define IPMI_CMD_READ_EVT_MSG_BUF 0x35 + +#define IPMI_NETFN_STORAGE 0x0a + +#define IPMI_CMD_GET_SDR_REP_INFO 0x20 +#define IPMI_CMD_GET_SDR_REP_ALLOC_INFO 0x21 +#define IPMI_CMD_RESERVE_SDR_REP 0x22 +#define IPMI_CMD_GET_SDR 0x23 +#define IPMI_CMD_ADD_SDR 0x24 +#define IPMI_CMD_PARTIAL_ADD_SDR 0x25 +#define IPMI_CMD_DELETE_SDR 0x26 +#define IPMI_CMD_CLEAR_SDR_REP 0x27 +#define IPMI_CMD_GET_SDR_REP_TIME 0x28 +#define IPMI_CMD_SET_SDR_REP_TIME 0x29 +#define IPMI_CMD_ENTER_SDR_REP_UPD_MODE 0x2A +#define IPMI_CMD_EXIT_SDR_REP_UPD_MODE 0x2B +#define IPMI_CMD_RUN_INIT_AGENT 0x2C +#define IPMI_CMD_GET_FRU_AREA_INFO 0x10 +#define IPMI_CMD_READ_FRU_DATA 0x11 +#define IPMI_CMD_WRITE_FRU_DATA 0x12 +#define IPMI_CMD_GET_SEL_INFO 0x40 +#define IPMI_CMD_GET_SEL_ALLOC_INFO 0x41 +#define IPMI_CMD_RESERVE_SEL 0x42 +#define IPMI_CMD_GET_SEL_ENTRY 0x43 +#define IPMI_CMD_ADD_SEL_ENTRY 0x44 +#define IPMI_CMD_PARTIAL_ADD_SEL_ENTRY 0x45 +#define IPMI_CMD_DELETE_SEL_ENTRY 0x46 +#define IPMI_CMD_CLEAR_SEL 0x47 +#define IPMI_CMD_GET_SEL_TIME 0x48 +#define IPMI_CMD_SET_SEL_TIME 0x49 + + +/* Same as a timespec struct. */ +struct ipmi_time { + long tv_sec; + long tv_nsec; +}; + +#define MAX_SEL_SIZE 128 + +typedef struct IPMISel { + uint8_t sel[MAX_SEL_SIZE][16]; + unsigned int next_free; + long time_offset; + uint16_t reservation; + uint8_t last_addition[4]; + uint8_t last_clear[4]; + uint8_t overflow; +} IPMISel; + +#define MAX_SDR_SIZE 16384 + +typedef struct IPMISdr { + uint8_t sdr[MAX_SDR_SIZE]; + unsigned int next_free; + uint16_t next_rec_id; + uint16_t reservation; + uint8_t last_addition[4]; + uint8_t last_clear[4]; + uint8_t overflow; +} IPMISdr; + +typedef struct IPMIFru { + char *filename; + unsigned int nentries; + uint16_t areasize; + uint8_t *data; +} IPMIFru; + +typedef struct IPMISensor { + uint8_t status; + uint8_t reading; + uint16_t states_suppt; + uint16_t assert_suppt; + uint16_t deassert_suppt; + uint16_t states; + uint16_t assert_states; + uint16_t deassert_states; + uint16_t assert_enable; + uint16_t deassert_enable; + uint8_t sensor_type; + uint8_t evt_reading_type_code; +} IPMISensor; +#define IPMI_SENSOR_GET_PRESENT(s) ((s)->status & 0x01) +#define IPMI_SENSOR_SET_PRESENT(s, v) ((s)->status = (s->status & ~0x01) | \ + !!(v)) +#define IPMI_SENSOR_GET_SCAN_ON(s) ((s)->status & 0x40) +#define IPMI_SENSOR_SET_SCAN_ON(s, v) ((s)->status = (s->status & ~0x40) | \ + ((!!(v)) << 6)) +#define IPMI_SENSOR_GET_EVENTS_ON(s) ((s)->status & 0x80) +#define IPMI_SENSOR_SET_EVENTS_ON(s, v) ((s)->status = (s->status & ~0x80) | \ + ((!!(v)) << 7)) +#define IPMI_SENSOR_GET_RET_STATUS(s) ((s)->status & 0xc0) +#define IPMI_SENSOR_SET_RET_STATUS(s, v) ((s)->status = (s->status & ~0xc0) | \ + (v & 0xc0)) +#define IPMI_SENSOR_IS_DISCRETE(s) ((s)->evt_reading_type_code != 1) + +#define MAX_SENSORS 20 +#define IPMI_WATCHDOG_SENSOR 0 + +#define MAX_NETFNS 64 + +typedef struct IPMIRcvBufEntry { + QTAILQ_ENTRY(IPMIRcvBufEntry) entry; + uint8_t len; + uint8_t buf[MAX_IPMI_MSG_SIZE]; +} IPMIRcvBufEntry; + +struct IPMIBmcSim { + IPMIBmc parent; + + QEMUTimer *timer; + + uint8_t bmc_global_enables; + uint8_t msg_flags; + + bool watchdog_initialized; + uint8_t watchdog_use; + uint8_t watchdog_action; + uint8_t watchdog_pretimeout; /* In seconds */ + uint8_t watchdog_expired; + uint16_t watchdog_timeout; /* in 100's of milliseconds */ + + bool watchdog_running; + bool watchdog_preaction_ran; + int64_t watchdog_expiry; + + uint8_t device_id; + uint8_t ipmi_version; + uint8_t device_rev; + uint8_t fwrev1; + uint8_t fwrev2; + uint32_t mfg_id; + uint16_t product_id; + + uint8_t restart_cause; + + uint8_t acpi_power_state[2]; + QemuUUID uuid; + + IPMISel sel; + IPMISdr sdr; + IPMIFru fru; + IPMISensor sensors[MAX_SENSORS]; + char *sdr_filename; + + /* Odd netfns are for responses, so we only need the even ones. */ + const IPMINetfn *netfns[MAX_NETFNS / 2]; + + /* We allow one event in the buffer */ + uint8_t evtbuf[16]; + + QTAILQ_HEAD(, IPMIRcvBufEntry) rcvbufs; +}; + +#define IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK (1 << 3) +#define IPMI_BMC_MSG_FLAG_EVT_BUF_FULL (1 << 1) +#define IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE (1 << 0) +#define IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK_SET(s) \ + (IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK & (s)->msg_flags) +#define IPMI_BMC_MSG_FLAG_EVT_BUF_FULL_SET(s) \ + (IPMI_BMC_MSG_FLAG_EVT_BUF_FULL & (s)->msg_flags) +#define IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE_SET(s) \ + (IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE & (s)->msg_flags) + +#define IPMI_BMC_RCV_MSG_QUEUE_INT_BIT 0 +#define IPMI_BMC_EVBUF_FULL_INT_BIT 1 +#define IPMI_BMC_EVENT_MSG_BUF_BIT 2 +#define IPMI_BMC_EVENT_LOG_BIT 3 +#define IPMI_BMC_MSG_INTS_ON(s) ((s)->bmc_global_enables & \ + (1 << IPMI_BMC_RCV_MSG_QUEUE_INT_BIT)) +#define IPMI_BMC_EVBUF_FULL_INT_ENABLED(s) ((s)->bmc_global_enables & \ + (1 << IPMI_BMC_EVBUF_FULL_INT_BIT)) +#define IPMI_BMC_EVENT_LOG_ENABLED(s) ((s)->bmc_global_enables & \ + (1 << IPMI_BMC_EVENT_LOG_BIT)) +#define IPMI_BMC_EVENT_MSG_BUF_ENABLED(s) ((s)->bmc_global_enables & \ + (1 << IPMI_BMC_EVENT_MSG_BUF_BIT)) + +#define IPMI_BMC_WATCHDOG_USE_MASK 0xc7 +#define IPMI_BMC_WATCHDOG_ACTION_MASK 0x77 +#define IPMI_BMC_WATCHDOG_GET_USE(s) ((s)->watchdog_use & 0x7) +#define IPMI_BMC_WATCHDOG_GET_DONT_LOG(s) (((s)->watchdog_use >> 7) & 0x1) +#define IPMI_BMC_WATCHDOG_GET_DONT_STOP(s) (((s)->watchdog_use >> 6) & 0x1) +#define IPMI_BMC_WATCHDOG_GET_PRE_ACTION(s) (((s)->watchdog_action >> 4) & 0x7) +#define IPMI_BMC_WATCHDOG_PRE_NONE 0 +#define IPMI_BMC_WATCHDOG_PRE_SMI 1 +#define IPMI_BMC_WATCHDOG_PRE_NMI 2 +#define IPMI_BMC_WATCHDOG_PRE_MSG_INT 3 +#define IPMI_BMC_WATCHDOG_GET_ACTION(s) ((s)->watchdog_action & 0x7) +#define IPMI_BMC_WATCHDOG_ACTION_NONE 0 +#define IPMI_BMC_WATCHDOG_ACTION_RESET 1 +#define IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN 2 +#define IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE 3 + +#define RSP_BUFFER_INITIALIZER { } + +static inline void rsp_buffer_pushmore(RspBuffer *rsp, uint8_t *bytes, + unsigned int n) +{ + if (rsp->len + n >= sizeof(rsp->buffer)) { + rsp_buffer_set_error(rsp, IPMI_CC_REQUEST_DATA_TRUNCATED); + return; + } + + memcpy(&rsp->buffer[rsp->len], bytes, n); + rsp->len += n; +} + +static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs); + +static void ipmi_gettime(struct ipmi_time *time) +{ + int64_t stime; + + stime = qemu_clock_get_ns(QEMU_CLOCK_HOST); + time->tv_sec = stime / 1000000000LL; + time->tv_nsec = stime % 1000000000LL; +} + +static int64_t ipmi_getmonotime(void) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +} + +static void ipmi_timeout(void *opaque) +{ + IPMIBmcSim *ibs = opaque; + + ipmi_sim_handle_timeout(ibs); +} + +static void set_timestamp(IPMIBmcSim *ibs, uint8_t *ts) +{ + unsigned int val; + struct ipmi_time now; + + ipmi_gettime(&now); + val = now.tv_sec + ibs->sel.time_offset; + ts[0] = val & 0xff; + ts[1] = (val >> 8) & 0xff; + ts[2] = (val >> 16) & 0xff; + ts[3] = (val >> 24) & 0xff; +} + +static void sdr_inc_reservation(IPMISdr *sdr) +{ + sdr->reservation++; + if (sdr->reservation == 0) { + sdr->reservation = 1; + } +} + +static int sdr_add_entry(IPMIBmcSim *ibs, + const struct ipmi_sdr_header *sdrh_entry, + unsigned int len, uint16_t *recid) +{ + struct ipmi_sdr_header *sdrh = + (struct ipmi_sdr_header *) &ibs->sdr.sdr[ibs->sdr.next_free]; + + if ((len < IPMI_SDR_HEADER_SIZE) || (len > 255)) { + return 1; + } + + if (ipmi_sdr_length(sdrh_entry) != len) { + return 1; + } + + if (ibs->sdr.next_free + len > MAX_SDR_SIZE) { + ibs->sdr.overflow = 1; + return 1; + } + + memcpy(sdrh, sdrh_entry, len); + sdrh->rec_id[0] = ibs->sdr.next_rec_id & 0xff; + sdrh->rec_id[1] = (ibs->sdr.next_rec_id >> 8) & 0xff; + sdrh->sdr_version = 0x51; /* Conform to IPMI 1.5 spec */ + + if (recid) { + *recid = ibs->sdr.next_rec_id; + } + ibs->sdr.next_rec_id++; + set_timestamp(ibs, ibs->sdr.last_addition); + ibs->sdr.next_free += len; + sdr_inc_reservation(&ibs->sdr); + return 0; +} + +static int sdr_find_entry(IPMISdr *sdr, uint16_t recid, + unsigned int *retpos, uint16_t *nextrec) +{ + unsigned int pos = *retpos; + + while (pos < sdr->next_free) { + struct ipmi_sdr_header *sdrh = + (struct ipmi_sdr_header *) &sdr->sdr[pos]; + uint16_t trec = ipmi_sdr_recid(sdrh); + unsigned int nextpos = pos + ipmi_sdr_length(sdrh); + + if (trec == recid) { + if (nextrec) { + if (nextpos >= sdr->next_free) { + *nextrec = 0xffff; + } else { + *nextrec = (sdr->sdr[nextpos] | + (sdr->sdr[nextpos + 1] << 8)); + } + } + *retpos = pos; + return 0; + } + pos = nextpos; + } + return 1; +} + +int ipmi_bmc_sdr_find(IPMIBmc *b, uint16_t recid, + const struct ipmi_sdr_compact **sdr, uint16_t *nextrec) + +{ + IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b); + unsigned int pos; + + pos = 0; + if (sdr_find_entry(&ibs->sdr, recid, &pos, nextrec)) { + return -1; + } + + *sdr = (const struct ipmi_sdr_compact *) &ibs->sdr.sdr[pos]; + return 0; +} + +static void sel_inc_reservation(IPMISel *sel) +{ + sel->reservation++; + if (sel->reservation == 0) { + sel->reservation = 1; + } +} + +/* Returns 1 if the SEL is full and can't hold the event. */ +static int sel_add_event(IPMIBmcSim *ibs, uint8_t *event) +{ + uint8_t ts[4]; + + event[0] = 0xff; + event[1] = 0xff; + set_timestamp(ibs, ts); + if (event[2] < 0xe0) { /* Don't set timestamps for type 0xe0-0xff. */ + memcpy(event + 3, ts, 4); + } + if (ibs->sel.next_free == MAX_SEL_SIZE) { + ibs->sel.overflow = 1; + return 1; + } + event[0] = ibs->sel.next_free & 0xff; + event[1] = (ibs->sel.next_free >> 8) & 0xff; + memcpy(ibs->sel.last_addition, ts, 4); + memcpy(ibs->sel.sel[ibs->sel.next_free], event, 16); + ibs->sel.next_free++; + sel_inc_reservation(&ibs->sel); + return 0; +} + +static int attn_set(IPMIBmcSim *ibs) +{ + return IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE_SET(ibs) + || IPMI_BMC_MSG_FLAG_EVT_BUF_FULL_SET(ibs) + || IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK_SET(ibs); +} + +static int attn_irq_enabled(IPMIBmcSim *ibs) +{ + return (IPMI_BMC_MSG_INTS_ON(ibs) && + (IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE_SET(ibs) || + IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK_SET(ibs))) + || (IPMI_BMC_EVBUF_FULL_INT_ENABLED(ibs) && + IPMI_BMC_MSG_FLAG_EVT_BUF_FULL_SET(ibs)); +} + +void ipmi_bmc_gen_event(IPMIBmc *b, uint8_t *evt, bool log) +{ + IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b); + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + + if (!IPMI_BMC_EVENT_MSG_BUF_ENABLED(ibs)) { + return; + } + + if (log && IPMI_BMC_EVENT_LOG_ENABLED(ibs)) { + sel_add_event(ibs, evt); + } + + if (ibs->msg_flags & IPMI_BMC_MSG_FLAG_EVT_BUF_FULL) { + goto out; + } + + memcpy(ibs->evtbuf, evt, 16); + ibs->msg_flags |= IPMI_BMC_MSG_FLAG_EVT_BUF_FULL; + k->set_atn(s, 1, attn_irq_enabled(ibs)); + out: + return; +} +static void gen_event(IPMIBmcSim *ibs, unsigned int sens_num, uint8_t deassert, + uint8_t evd1, uint8_t evd2, uint8_t evd3) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + uint8_t evt[16]; + IPMISensor *sens = ibs->sensors + sens_num; + + if (!IPMI_BMC_EVENT_MSG_BUF_ENABLED(ibs)) { + return; + } + if (!IPMI_SENSOR_GET_EVENTS_ON(sens)) { + return; + } + + evt[2] = 0x2; /* System event record */ + evt[7] = ibs->parent.slave_addr; + evt[8] = 0; + evt[9] = 0x04; /* Format version */ + evt[10] = sens->sensor_type; + evt[11] = sens_num; + evt[12] = sens->evt_reading_type_code | (!!deassert << 7); + evt[13] = evd1; + evt[14] = evd2; + evt[15] = evd3; + + if (IPMI_BMC_EVENT_LOG_ENABLED(ibs)) { + sel_add_event(ibs, evt); + } + + if (ibs->msg_flags & IPMI_BMC_MSG_FLAG_EVT_BUF_FULL) { + return; + } + + memcpy(ibs->evtbuf, evt, 16); + ibs->msg_flags |= IPMI_BMC_MSG_FLAG_EVT_BUF_FULL; + k->set_atn(s, 1, attn_irq_enabled(ibs)); +} + +static void sensor_set_discrete_bit(IPMIBmcSim *ibs, unsigned int sensor, + unsigned int bit, unsigned int val, + uint8_t evd1, uint8_t evd2, uint8_t evd3) +{ + IPMISensor *sens; + uint16_t mask; + + if (sensor >= MAX_SENSORS) { + return; + } + if (bit >= 16) { + return; + } + + mask = (1 << bit); + sens = ibs->sensors + sensor; + if (val) { + sens->states |= mask & sens->states_suppt; + if (sens->assert_states & mask) { + return; /* Already asserted */ + } + sens->assert_states |= mask & sens->assert_suppt; + if (sens->assert_enable & mask & sens->assert_states) { + /* Send an event on assert */ + gen_event(ibs, sensor, 0, evd1, evd2, evd3); + } + } else { + sens->states &= ~(mask & sens->states_suppt); + if (sens->deassert_states & mask) { + return; /* Already deasserted */ + } + sens->deassert_states |= mask & sens->deassert_suppt; + if (sens->deassert_enable & mask & sens->deassert_states) { + /* Send an event on deassert */ + gen_event(ibs, sensor, 1, evd1, evd2, evd3); + } + } +} + +static void ipmi_init_sensors_from_sdrs(IPMIBmcSim *s) +{ + unsigned int i, pos; + IPMISensor *sens; + + for (i = 0; i < MAX_SENSORS; i++) { + memset(s->sensors + i, 0, sizeof(*sens)); + } + + pos = 0; + for (i = 0; !sdr_find_entry(&s->sdr, i, &pos, NULL); i++) { + struct ipmi_sdr_compact *sdr = + (struct ipmi_sdr_compact *) &s->sdr.sdr[pos]; + unsigned int len = sdr->header.rec_length; + + if (len < 20) { + continue; + } + if (sdr->header.rec_type != IPMI_SDR_COMPACT_TYPE) { + continue; /* Not a sensor SDR we set from */ + } + + if (sdr->sensor_owner_number >= MAX_SENSORS) { + continue; + } + sens = s->sensors + sdr->sensor_owner_number; + + IPMI_SENSOR_SET_PRESENT(sens, 1); + IPMI_SENSOR_SET_SCAN_ON(sens, (sdr->sensor_init >> 6) & 1); + IPMI_SENSOR_SET_EVENTS_ON(sens, (sdr->sensor_init >> 5) & 1); + sens->assert_suppt = sdr->assert_mask[0] | (sdr->assert_mask[1] << 8); + sens->deassert_suppt = + sdr->deassert_mask[0] | (sdr->deassert_mask[1] << 8); + sens->states_suppt = + sdr->discrete_mask[0] | (sdr->discrete_mask[1] << 8); + sens->sensor_type = sdr->sensor_type; + sens->evt_reading_type_code = sdr->reading_type & 0x7f; + + /* Enable all the events that are supported. */ + sens->assert_enable = sens->assert_suppt; + sens->deassert_enable = sens->deassert_suppt; + } +} + +int ipmi_sim_register_netfn(IPMIBmcSim *s, unsigned int netfn, + const IPMINetfn *netfnd) +{ + if ((netfn & 1) || (netfn >= MAX_NETFNS) || (s->netfns[netfn / 2])) { + return -1; + } + s->netfns[netfn / 2] = netfnd; + return 0; +} + +static const IPMICmdHandler *ipmi_get_handler(IPMIBmcSim *ibs, + unsigned int netfn, + unsigned int cmd) +{ + const IPMICmdHandler *hdl; + + if (netfn & 1 || netfn >= MAX_NETFNS || !ibs->netfns[netfn / 2]) { + return NULL; + } + + if (cmd >= ibs->netfns[netfn / 2]->cmd_nums) { + return NULL; + } + + hdl = &ibs->netfns[netfn / 2]->cmd_handlers[cmd]; + if (!hdl->cmd_handler) { + return NULL; + } + + return hdl; +} + +static void next_timeout(IPMIBmcSim *ibs) +{ + int64_t next; + if (ibs->watchdog_running) { + next = ibs->watchdog_expiry; + } else { + /* Wait a minute */ + next = ipmi_getmonotime() + 60 * 1000000000LL; + } + timer_mod_ns(ibs->timer, next); +} + +static void ipmi_sim_handle_command(IPMIBmc *b, + uint8_t *cmd, unsigned int cmd_len, + unsigned int max_cmd_len, + uint8_t msg_id) +{ + IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b); + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + const IPMICmdHandler *hdl; + RspBuffer rsp = RSP_BUFFER_INITIALIZER; + + /* Set up the response, set the low bit of NETFN. */ + /* Note that max_rsp_len must be at least 3 */ + if (sizeof(rsp.buffer) < 3) { + rsp_buffer_set_error(&rsp, IPMI_CC_REQUEST_DATA_TRUNCATED); + goto out; + } + + rsp_buffer_push(&rsp, cmd[0] | 0x04); + rsp_buffer_push(&rsp, cmd[1]); + rsp_buffer_push(&rsp, 0); /* Assume success */ + + /* If it's too short or it was truncated, return an error. */ + if (cmd_len < 2) { + rsp_buffer_set_error(&rsp, IPMI_CC_REQUEST_DATA_LENGTH_INVALID); + goto out; + } + if (cmd_len > max_cmd_len) { + rsp_buffer_set_error(&rsp, IPMI_CC_REQUEST_DATA_TRUNCATED); + goto out; + } + + if ((cmd[0] & 0x03) != 0) { + /* Only have stuff on LUN 0 */ + rsp_buffer_set_error(&rsp, IPMI_CC_COMMAND_INVALID_FOR_LUN); + goto out; + } + + hdl = ipmi_get_handler(ibs, cmd[0] >> 2, cmd[1]); + if (!hdl) { + rsp_buffer_set_error(&rsp, IPMI_CC_INVALID_CMD); + goto out; + } + + if (cmd_len < hdl->cmd_len_min) { + rsp_buffer_set_error(&rsp, IPMI_CC_REQUEST_DATA_LENGTH_INVALID); + goto out; + } + + hdl->cmd_handler(ibs, cmd, cmd_len, &rsp); + + out: + k->handle_rsp(s, msg_id, rsp.buffer, rsp.len); + + next_timeout(ibs); +} + +static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + + if (!ibs->watchdog_running) { + goto out; + } + + if (!ibs->watchdog_preaction_ran) { + switch (IPMI_BMC_WATCHDOG_GET_PRE_ACTION(ibs)) { + case IPMI_BMC_WATCHDOG_PRE_NMI: + ibs->msg_flags |= IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK; + k->do_hw_op(s, IPMI_SEND_NMI, 0); + sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 8, 1, + 0xc8, (2 << 4) | 0xf, 0xff); + break; + + case IPMI_BMC_WATCHDOG_PRE_MSG_INT: + ibs->msg_flags |= IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK; + k->set_atn(s, 1, attn_irq_enabled(ibs)); + sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 8, 1, + 0xc8, (3 << 4) | 0xf, 0xff); + break; + + default: + goto do_full_expiry; + } + + ibs->watchdog_preaction_ran = 1; + /* Issued the pretimeout, do the rest of the timeout now. */ + ibs->watchdog_expiry = ipmi_getmonotime(); + ibs->watchdog_expiry += ibs->watchdog_pretimeout * 1000000000LL; + goto out; + } + + do_full_expiry: + ibs->watchdog_running = 0; /* Stop the watchdog on a timeout */ + ibs->watchdog_expired |= (1 << IPMI_BMC_WATCHDOG_GET_USE(ibs)); + switch (IPMI_BMC_WATCHDOG_GET_ACTION(ibs)) { + case IPMI_BMC_WATCHDOG_ACTION_NONE: + sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 0, 1, + 0xc0, ibs->watchdog_use & 0xf, 0xff); + break; + + case IPMI_BMC_WATCHDOG_ACTION_RESET: + sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 1, 1, + 0xc1, ibs->watchdog_use & 0xf, 0xff); + k->do_hw_op(s, IPMI_RESET_CHASSIS, 0); + break; + + case IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN: + sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 2, 1, + 0xc2, ibs->watchdog_use & 0xf, 0xff); + k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 0); + break; + + case IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE: + sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 2, 1, + 0xc3, ibs->watchdog_use & 0xf, 0xff); + k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 0); + break; + } + + out: + next_timeout(ibs); +} + +static void chassis_capabilities(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + rsp_buffer_push(rsp, 0); + rsp_buffer_push(rsp, ibs->parent.slave_addr); + rsp_buffer_push(rsp, ibs->parent.slave_addr); + rsp_buffer_push(rsp, ibs->parent.slave_addr); + rsp_buffer_push(rsp, ibs->parent.slave_addr); +} + +static void chassis_status(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + rsp_buffer_push(rsp, 0x61); /* Unknown power restore, power is on */ + rsp_buffer_push(rsp, 0); + rsp_buffer_push(rsp, 0); + rsp_buffer_push(rsp, 0); +} + +static void chassis_control(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + + switch (cmd[2] & 0xf) { + case 0: /* power down */ + rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 0)); + break; + case 1: /* power up */ + rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_POWERON_CHASSIS, 0)); + break; + case 2: /* power cycle */ + rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 0)); + break; + case 3: /* hard reset */ + rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_RESET_CHASSIS, 0)); + break; + case 4: /* pulse diagnostic interrupt */ + rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_PULSE_DIAG_IRQ, 0)); + break; + case 5: /* soft shutdown via ACPI by overtemp emulation */ + rsp_buffer_set_error(rsp, k->do_hw_op(s, + IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 0)); + break; + default: + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } +} + +static void chassis_get_sys_restart_cause(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) + +{ + rsp_buffer_push(rsp, ibs->restart_cause & 0xf); /* Restart Cause */ + rsp_buffer_push(rsp, 0); /* Channel 0 */ +} + +static void get_device_id(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + rsp_buffer_push(rsp, ibs->device_id); + rsp_buffer_push(rsp, ibs->device_rev & 0xf); + rsp_buffer_push(rsp, ibs->fwrev1 & 0x7f); + rsp_buffer_push(rsp, ibs->fwrev2); + rsp_buffer_push(rsp, ibs->ipmi_version); + rsp_buffer_push(rsp, 0x07); /* sensor, SDR, and SEL. */ + rsp_buffer_push(rsp, ibs->mfg_id & 0xff); + rsp_buffer_push(rsp, (ibs->mfg_id >> 8) & 0xff); + rsp_buffer_push(rsp, (ibs->mfg_id >> 16) & 0xff); + rsp_buffer_push(rsp, ibs->product_id & 0xff); + rsp_buffer_push(rsp, (ibs->product_id >> 8) & 0xff); +} + +static void set_global_enables(IPMIBmcSim *ibs, uint8_t val) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + bool irqs_on; + + ibs->bmc_global_enables = val; + + irqs_on = val & (IPMI_BMC_EVBUF_FULL_INT_BIT | + IPMI_BMC_RCV_MSG_QUEUE_INT_BIT); + + k->set_irq_enable(s, irqs_on); +} + +static void cold_reset(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + + /* Disable all interrupts */ + set_global_enables(ibs, 1 << IPMI_BMC_EVENT_LOG_BIT); + + if (k->reset) { + k->reset(s, true); + } +} + +static void warm_reset(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + + if (k->reset) { + k->reset(s, false); + } +} +static void set_acpi_power_state(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + ibs->acpi_power_state[0] = cmd[2]; + ibs->acpi_power_state[1] = cmd[3]; +} + +static void get_acpi_power_state(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + rsp_buffer_push(rsp, ibs->acpi_power_state[0]); + rsp_buffer_push(rsp, ibs->acpi_power_state[1]); +} + +static void get_device_guid(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + unsigned int i; + + /* An uninitialized uuid is all zeros, use that to know if it is set. */ + for (i = 0; i < 16; i++) { + if (ibs->uuid.data[i]) { + goto uuid_set; + } + } + /* No uuid is set, return an error. */ + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_CMD); + return; + + uuid_set: + for (i = 0; i < 16; i++) { + rsp_buffer_push(rsp, ibs->uuid.data[i]); + } +} + +static void set_bmc_global_enables(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + set_global_enables(ibs, cmd[2]); +} + +static void get_bmc_global_enables(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + rsp_buffer_push(rsp, ibs->bmc_global_enables); +} + +static void clr_msg_flags(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + + ibs->msg_flags &= ~cmd[2]; + k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs)); +} + +static void get_msg_flags(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + rsp_buffer_push(rsp, ibs->msg_flags); +} + +static void read_evt_msg_buf(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + unsigned int i; + + if (!(ibs->msg_flags & IPMI_BMC_MSG_FLAG_EVT_BUF_FULL)) { + rsp_buffer_set_error(rsp, 0x80); + return; + } + for (i = 0; i < 16; i++) { + rsp_buffer_push(rsp, ibs->evtbuf[i]); + } + ibs->msg_flags &= ~IPMI_BMC_MSG_FLAG_EVT_BUF_FULL; + k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs)); +} + +static void get_msg(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMIRcvBufEntry *msg; + + if (QTAILQ_EMPTY(&ibs->rcvbufs)) { + rsp_buffer_set_error(rsp, 0x80); /* Queue empty */ + goto out; + } + rsp_buffer_push(rsp, 0); /* Channel 0 */ + msg = QTAILQ_FIRST(&ibs->rcvbufs); + rsp_buffer_pushmore(rsp, msg->buf, msg->len); + QTAILQ_REMOVE(&ibs->rcvbufs, msg, entry); + g_free(msg); + + if (QTAILQ_EMPTY(&ibs->rcvbufs)) { + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + + ibs->msg_flags &= ~IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE; + k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs)); + } + +out: + return; +} + +static unsigned char +ipmb_checksum(unsigned char *data, int size, unsigned char csum) +{ + for (; size > 0; size--, data++) { + csum += *data; + } + + return -csum; +} + +static void send_msg(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + IPMIRcvBufEntry *msg; + uint8_t *buf; + uint8_t netfn, rqLun, rsLun, rqSeq; + + if (cmd[2] != 0) { + /* We only handle channel 0 with no options */ + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + if (cmd_len < 10) { + rsp_buffer_set_error(rsp, IPMI_CC_REQUEST_DATA_LENGTH_INVALID); + return; + } + + if (cmd[3] != 0x40) { + /* We only emulate a MC at address 0x40. */ + rsp_buffer_set_error(rsp, 0x83); /* NAK on write */ + return; + } + + cmd += 3; /* Skip the header. */ + cmd_len -= 3; + + /* + * At this point we "send" the message successfully. Any error will + * be returned in the response. + */ + if (ipmb_checksum(cmd, cmd_len, 0) != 0 || + cmd[3] != 0x20) { /* Improper response address */ + return; /* No response */ + } + + netfn = cmd[1] >> 2; + rqLun = cmd[4] & 0x3; + rsLun = cmd[1] & 0x3; + rqSeq = cmd[4] >> 2; + + if (rqLun != 2) { + /* We only support LUN 2 coming back to us. */ + return; + } + + msg = g_malloc(sizeof(*msg)); + msg->buf[0] = ((netfn | 1) << 2) | rqLun; /* NetFN, and make a response */ + msg->buf[1] = ipmb_checksum(msg->buf, 1, 0); + msg->buf[2] = cmd[0]; /* rsSA */ + msg->buf[3] = (rqSeq << 2) | rsLun; + msg->buf[4] = cmd[5]; /* Cmd */ + msg->buf[5] = 0; /* Completion Code */ + msg->len = 6; + + if ((cmd[1] >> 2) != IPMI_NETFN_APP || cmd[5] != IPMI_CMD_GET_DEVICE_ID) { + /* Not a command we handle. */ + msg->buf[5] = IPMI_CC_INVALID_CMD; + goto end_msg; + } + + buf = msg->buf + msg->len; /* After the CC */ + buf[0] = 0; + buf[1] = 0; + buf[2] = 0; + buf[3] = 0; + buf[4] = 0x51; + buf[5] = 0; + buf[6] = 0; + buf[7] = 0; + buf[8] = 0; + buf[9] = 0; + buf[10] = 0; + msg->len += 11; + + end_msg: + msg->buf[msg->len] = ipmb_checksum(msg->buf, msg->len, 0); + msg->len++; + QTAILQ_INSERT_TAIL(&ibs->rcvbufs, msg, entry); + ibs->msg_flags |= IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE; + k->set_atn(s, 1, attn_irq_enabled(ibs)); +} + +static void do_watchdog_reset(IPMIBmcSim *ibs) +{ + if (IPMI_BMC_WATCHDOG_GET_ACTION(ibs) == + IPMI_BMC_WATCHDOG_ACTION_NONE) { + ibs->watchdog_running = 0; + return; + } + ibs->watchdog_preaction_ran = 0; + + + /* Timeout is in tenths of a second, offset is in seconds */ + ibs->watchdog_expiry = ipmi_getmonotime(); + ibs->watchdog_expiry += ibs->watchdog_timeout * 100000000LL; + if (IPMI_BMC_WATCHDOG_GET_PRE_ACTION(ibs) != IPMI_BMC_WATCHDOG_PRE_NONE) { + ibs->watchdog_expiry -= ibs->watchdog_pretimeout * 1000000000LL; + } + ibs->watchdog_running = 1; +} + +static void reset_watchdog_timer(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + if (!ibs->watchdog_initialized) { + rsp_buffer_set_error(rsp, 0x80); + return; + } + do_watchdog_reset(ibs); +} + +static void set_watchdog_timer(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMIInterface *s = ibs->parent.intf; + IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); + unsigned int val; + + val = cmd[2] & 0x7; /* Validate use */ + if (val == 0 || val > 5) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + val = cmd[3] & 0x7; /* Validate action */ + switch (val) { + case IPMI_BMC_WATCHDOG_ACTION_NONE: + break; + + case IPMI_BMC_WATCHDOG_ACTION_RESET: + rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_RESET_CHASSIS, 1)); + break; + + case IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN: + rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 1)); + break; + + case IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE: + rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 1)); + break; + + default: + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + } + if (rsp->buffer[2]) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + val = (cmd[3] >> 4) & 0x7; /* Validate preaction */ + switch (val) { + case IPMI_BMC_WATCHDOG_PRE_MSG_INT: + case IPMI_BMC_WATCHDOG_PRE_NONE: + break; + + case IPMI_BMC_WATCHDOG_PRE_NMI: + if (k->do_hw_op(s, IPMI_SEND_NMI, 1)) { + /* NMI not supported. */ + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + break; + + default: + /* We don't support PRE_SMI */ + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + ibs->watchdog_initialized = 1; + ibs->watchdog_use = cmd[2] & IPMI_BMC_WATCHDOG_USE_MASK; + ibs->watchdog_action = cmd[3] & IPMI_BMC_WATCHDOG_ACTION_MASK; + ibs->watchdog_pretimeout = cmd[4]; + ibs->watchdog_expired &= ~cmd[5]; + ibs->watchdog_timeout = cmd[6] | (((uint16_t) cmd[7]) << 8); + if (ibs->watchdog_running & IPMI_BMC_WATCHDOG_GET_DONT_STOP(ibs)) { + do_watchdog_reset(ibs); + } else { + ibs->watchdog_running = 0; + } +} + +static void get_watchdog_timer(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + rsp_buffer_push(rsp, ibs->watchdog_use); + rsp_buffer_push(rsp, ibs->watchdog_action); + rsp_buffer_push(rsp, ibs->watchdog_pretimeout); + rsp_buffer_push(rsp, ibs->watchdog_expired); + rsp_buffer_push(rsp, ibs->watchdog_timeout & 0xff); + rsp_buffer_push(rsp, (ibs->watchdog_timeout >> 8) & 0xff); + if (ibs->watchdog_running) { + long timeout; + timeout = ((ibs->watchdog_expiry - ipmi_getmonotime() + 50000000) + / 100000000); + rsp_buffer_push(rsp, timeout & 0xff); + rsp_buffer_push(rsp, (timeout >> 8) & 0xff); + } else { + rsp_buffer_push(rsp, 0); + rsp_buffer_push(rsp, 0); + } +} + +static void get_sdr_rep_info(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + unsigned int i; + + rsp_buffer_push(rsp, 0x51); /* Conform to IPMI 1.5 spec */ + rsp_buffer_push(rsp, ibs->sdr.next_rec_id & 0xff); + rsp_buffer_push(rsp, (ibs->sdr.next_rec_id >> 8) & 0xff); + rsp_buffer_push(rsp, (MAX_SDR_SIZE - ibs->sdr.next_free) & 0xff); + rsp_buffer_push(rsp, ((MAX_SDR_SIZE - ibs->sdr.next_free) >> 8) & 0xff); + for (i = 0; i < 4; i++) { + rsp_buffer_push(rsp, ibs->sdr.last_addition[i]); + } + for (i = 0; i < 4; i++) { + rsp_buffer_push(rsp, ibs->sdr.last_clear[i]); + } + /* Only modal support, reserve supported */ + rsp_buffer_push(rsp, (ibs->sdr.overflow << 7) | 0x22); +} + +static void reserve_sdr_rep(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + rsp_buffer_push(rsp, ibs->sdr.reservation & 0xff); + rsp_buffer_push(rsp, (ibs->sdr.reservation >> 8) & 0xff); +} + +static void get_sdr(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + unsigned int pos; + uint16_t nextrec; + struct ipmi_sdr_header *sdrh; + + if (cmd[6]) { + if ((cmd[2] | (cmd[3] << 8)) != ibs->sdr.reservation) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_RESERVATION); + return; + } + } + + pos = 0; + if (sdr_find_entry(&ibs->sdr, cmd[4] | (cmd[5] << 8), + &pos, &nextrec)) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + + sdrh = (struct ipmi_sdr_header *) &ibs->sdr.sdr[pos]; + + if (cmd[6] > ipmi_sdr_length(sdrh)) { + rsp_buffer_set_error(rsp, IPMI_CC_PARM_OUT_OF_RANGE); + return; + } + + rsp_buffer_push(rsp, nextrec & 0xff); + rsp_buffer_push(rsp, (nextrec >> 8) & 0xff); + + if (cmd[7] == 0xff) { + cmd[7] = ipmi_sdr_length(sdrh) - cmd[6]; + } + + if ((cmd[7] + rsp->len) > sizeof(rsp->buffer)) { + rsp_buffer_set_error(rsp, IPMI_CC_CANNOT_RETURN_REQ_NUM_BYTES); + return; + } + + rsp_buffer_pushmore(rsp, ibs->sdr.sdr + pos + cmd[6], cmd[7]); +} + +static void add_sdr(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + uint16_t recid; + struct ipmi_sdr_header *sdrh = (struct ipmi_sdr_header *) cmd + 2; + + if (sdr_add_entry(ibs, sdrh, cmd_len - 2, &recid)) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + rsp_buffer_push(rsp, recid & 0xff); + rsp_buffer_push(rsp, (recid >> 8) & 0xff); +} + +static void clear_sdr_rep(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + if ((cmd[2] | (cmd[3] << 8)) != ibs->sdr.reservation) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_RESERVATION); + return; + } + + if (cmd[4] != 'C' || cmd[5] != 'L' || cmd[6] != 'R') { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + if (cmd[7] == 0xaa) { + ibs->sdr.next_free = 0; + ibs->sdr.overflow = 0; + set_timestamp(ibs, ibs->sdr.last_clear); + rsp_buffer_push(rsp, 1); /* Erasure complete */ + sdr_inc_reservation(&ibs->sdr); + } else if (cmd[7] == 0) { + rsp_buffer_push(rsp, 1); /* Erasure complete */ + } else { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } +} + +static void get_sel_info(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + unsigned int i, val; + + rsp_buffer_push(rsp, 0x51); /* Conform to IPMI 1.5 */ + rsp_buffer_push(rsp, ibs->sel.next_free & 0xff); + rsp_buffer_push(rsp, (ibs->sel.next_free >> 8) & 0xff); + val = (MAX_SEL_SIZE - ibs->sel.next_free) * 16; + rsp_buffer_push(rsp, val & 0xff); + rsp_buffer_push(rsp, (val >> 8) & 0xff); + for (i = 0; i < 4; i++) { + rsp_buffer_push(rsp, ibs->sel.last_addition[i]); + } + for (i = 0; i < 4; i++) { + rsp_buffer_push(rsp, ibs->sel.last_clear[i]); + } + /* Only support Reserve SEL */ + rsp_buffer_push(rsp, (ibs->sel.overflow << 7) | 0x02); +} + +static void get_fru_area_info(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + uint8_t fruid; + uint16_t fru_entry_size; + + fruid = cmd[2]; + + if (fruid >= ibs->fru.nentries) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + fru_entry_size = ibs->fru.areasize; + + rsp_buffer_push(rsp, fru_entry_size & 0xff); + rsp_buffer_push(rsp, fru_entry_size >> 8 & 0xff); + rsp_buffer_push(rsp, 0x0); +} + +static void read_fru_data(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + uint8_t fruid; + uint16_t offset; + int i; + uint8_t *fru_entry; + unsigned int count; + + fruid = cmd[2]; + offset = (cmd[3] | cmd[4] << 8); + + if (fruid >= ibs->fru.nentries) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + if (offset >= ibs->fru.areasize - 1) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + fru_entry = &ibs->fru.data[fruid * ibs->fru.areasize]; + + count = MIN(cmd[5], ibs->fru.areasize - offset); + + rsp_buffer_push(rsp, count & 0xff); + for (i = 0; i < count; i++) { + rsp_buffer_push(rsp, fru_entry[offset + i]); + } +} + +static void write_fru_data(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + uint8_t fruid; + uint16_t offset; + uint8_t *fru_entry; + unsigned int count; + + fruid = cmd[2]; + offset = (cmd[3] | cmd[4] << 8); + + if (fruid >= ibs->fru.nentries) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + if (offset >= ibs->fru.areasize - 1) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + fru_entry = &ibs->fru.data[fruid * ibs->fru.areasize]; + + count = MIN(cmd_len - 5, ibs->fru.areasize - offset); + + memcpy(fru_entry + offset, cmd + 5, count); + + rsp_buffer_push(rsp, count & 0xff); +} + +static void reserve_sel(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + rsp_buffer_push(rsp, ibs->sel.reservation & 0xff); + rsp_buffer_push(rsp, (ibs->sel.reservation >> 8) & 0xff); +} + +static void get_sel_entry(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + unsigned int val; + + if (cmd[6]) { + if ((cmd[2] | (cmd[3] << 8)) != ibs->sel.reservation) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_RESERVATION); + return; + } + } + if (ibs->sel.next_free == 0) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + if (cmd[6] > 15) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + if (cmd[7] == 0xff) { + cmd[7] = 16; + } else if ((cmd[7] + cmd[6]) > 16) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } else { + cmd[7] += cmd[6]; + } + + val = cmd[4] | (cmd[5] << 8); + if (val == 0xffff) { + val = ibs->sel.next_free - 1; + } else if (val >= ibs->sel.next_free) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + if ((val + 1) == ibs->sel.next_free) { + rsp_buffer_push(rsp, 0xff); + rsp_buffer_push(rsp, 0xff); + } else { + rsp_buffer_push(rsp, (val + 1) & 0xff); + rsp_buffer_push(rsp, ((val + 1) >> 8) & 0xff); + } + for (; cmd[6] < cmd[7]; cmd[6]++) { + rsp_buffer_push(rsp, ibs->sel.sel[val][cmd[6]]); + } +} + +static void add_sel_entry(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + if (sel_add_event(ibs, cmd + 2)) { + rsp_buffer_set_error(rsp, IPMI_CC_OUT_OF_SPACE); + return; + } + /* sel_add_event fills in the record number. */ + rsp_buffer_push(rsp, cmd[2]); + rsp_buffer_push(rsp, cmd[3]); +} + +static void clear_sel(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + if ((cmd[2] | (cmd[3] << 8)) != ibs->sel.reservation) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_RESERVATION); + return; + } + + if (cmd[4] != 'C' || cmd[5] != 'L' || cmd[6] != 'R') { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + if (cmd[7] == 0xaa) { + ibs->sel.next_free = 0; + ibs->sel.overflow = 0; + set_timestamp(ibs, ibs->sdr.last_clear); + rsp_buffer_push(rsp, 1); /* Erasure complete */ + sel_inc_reservation(&ibs->sel); + } else if (cmd[7] == 0) { + rsp_buffer_push(rsp, 1); /* Erasure complete */ + } else { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } +} + +static void get_sel_time(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + uint32_t val; + struct ipmi_time now; + + ipmi_gettime(&now); + val = now.tv_sec + ibs->sel.time_offset; + rsp_buffer_push(rsp, val & 0xff); + rsp_buffer_push(rsp, (val >> 8) & 0xff); + rsp_buffer_push(rsp, (val >> 16) & 0xff); + rsp_buffer_push(rsp, (val >> 24) & 0xff); +} + +static void set_sel_time(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + uint32_t val; + struct ipmi_time now; + + val = cmd[2] | (cmd[3] << 8) | (cmd[4] << 16) | (cmd[5] << 24); + ipmi_gettime(&now); + ibs->sel.time_offset = now.tv_sec - ((long) val); +} + +static void platform_event_msg(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + uint8_t event[16]; + + event[2] = 2; /* System event record */ + event[7] = cmd[2]; /* Generator ID */ + event[8] = 0; + event[9] = cmd[3]; /* EvMRev */ + event[10] = cmd[4]; /* Sensor type */ + event[11] = cmd[5]; /* Sensor number */ + event[12] = cmd[6]; /* Event dir / Event type */ + event[13] = cmd[7]; /* Event data 1 */ + event[14] = cmd[8]; /* Event data 2 */ + event[15] = cmd[9]; /* Event data 3 */ + + if (sel_add_event(ibs, event)) { + rsp_buffer_set_error(rsp, IPMI_CC_OUT_OF_SPACE); + } +} + +static void set_sensor_evt_enable(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMISensor *sens; + + if ((cmd[2] >= MAX_SENSORS) || + !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + sens = ibs->sensors + cmd[2]; + switch ((cmd[3] >> 4) & 0x3) { + case 0: /* Do not change */ + break; + case 1: /* Enable bits */ + if (cmd_len > 4) { + sens->assert_enable |= cmd[4]; + } + if (cmd_len > 5) { + sens->assert_enable |= cmd[5] << 8; + } + if (cmd_len > 6) { + sens->deassert_enable |= cmd[6]; + } + if (cmd_len > 7) { + sens->deassert_enable |= cmd[7] << 8; + } + break; + case 2: /* Disable bits */ + if (cmd_len > 4) { + sens->assert_enable &= ~cmd[4]; + } + if (cmd_len > 5) { + sens->assert_enable &= ~(cmd[5] << 8); + } + if (cmd_len > 6) { + sens->deassert_enable &= ~cmd[6]; + } + if (cmd_len > 7) { + sens->deassert_enable &= ~(cmd[7] << 8); + } + break; + case 3: + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + IPMI_SENSOR_SET_RET_STATUS(sens, cmd[3]); +} + +static void get_sensor_evt_enable(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMISensor *sens; + + if ((cmd[2] >= MAX_SENSORS) || + !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + sens = ibs->sensors + cmd[2]; + rsp_buffer_push(rsp, IPMI_SENSOR_GET_RET_STATUS(sens)); + rsp_buffer_push(rsp, sens->assert_enable & 0xff); + rsp_buffer_push(rsp, (sens->assert_enable >> 8) & 0xff); + rsp_buffer_push(rsp, sens->deassert_enable & 0xff); + rsp_buffer_push(rsp, (sens->deassert_enable >> 8) & 0xff); +} + +static void rearm_sensor_evts(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMISensor *sens; + + if ((cmd[2] >= MAX_SENSORS) || + !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + sens = ibs->sensors + cmd[2]; + + if ((cmd[3] & 0x80) == 0) { + /* Just clear everything */ + sens->states = 0; + return; + } +} + +static void get_sensor_evt_status(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMISensor *sens; + + if ((cmd[2] >= MAX_SENSORS) || + !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + sens = ibs->sensors + cmd[2]; + rsp_buffer_push(rsp, sens->reading); + rsp_buffer_push(rsp, IPMI_SENSOR_GET_RET_STATUS(sens)); + rsp_buffer_push(rsp, sens->assert_states & 0xff); + rsp_buffer_push(rsp, (sens->assert_states >> 8) & 0xff); + rsp_buffer_push(rsp, sens->deassert_states & 0xff); + rsp_buffer_push(rsp, (sens->deassert_states >> 8) & 0xff); +} + +static void get_sensor_reading(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMISensor *sens; + + if ((cmd[2] >= MAX_SENSORS) || + !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + sens = ibs->sensors + cmd[2]; + rsp_buffer_push(rsp, sens->reading); + rsp_buffer_push(rsp, IPMI_SENSOR_GET_RET_STATUS(sens)); + rsp_buffer_push(rsp, sens->states & 0xff); + if (IPMI_SENSOR_IS_DISCRETE(sens)) { + rsp_buffer_push(rsp, (sens->states >> 8) & 0xff); + } +} + +static void set_sensor_type(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMISensor *sens; + + + if ((cmd[2] >= MAX_SENSORS) || + !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + sens = ibs->sensors + cmd[2]; + sens->sensor_type = cmd[3]; + sens->evt_reading_type_code = cmd[4] & 0x7f; +} + +static void get_sensor_type(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMISensor *sens; + + + if ((cmd[2] >= MAX_SENSORS) || + !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + sens = ibs->sensors + cmd[2]; + rsp_buffer_push(rsp, sens->sensor_type); + rsp_buffer_push(rsp, sens->evt_reading_type_code); +} + +/* + * bytes parameter + * 1 sensor number + * 2 operation (see below for bits meaning) + * 3 sensor reading + * 4:5 assertion states (optional) + * 6:7 deassertion states (optional) + * 8:10 event data 1,2,3 (optional) + */ +static void set_sensor_reading(IPMIBmcSim *ibs, + uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + IPMISensor *sens; + uint8_t evd1 = 0; + uint8_t evd2 = 0; + uint8_t evd3 = 0; + uint8_t new_reading = 0; + uint16_t new_assert_states = 0; + uint16_t new_deassert_states = 0; + bool change_reading = false; + bool change_assert = false; + bool change_deassert = false; + enum { + SENSOR_GEN_EVENT_NONE, + SENSOR_GEN_EVENT_DATA, + SENSOR_GEN_EVENT_BMC, + } do_gen_event = SENSOR_GEN_EVENT_NONE; + + if ((cmd[2] >= MAX_SENSORS) || + !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) { + rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT); + return; + } + + sens = ibs->sensors + cmd[2]; + + /* [1:0] Sensor Reading operation */ + switch ((cmd[3]) & 0x3) { + case 0: /* Do not change */ + break; + case 1: /* write given value to sensor reading byte */ + new_reading = cmd[4]; + if (sens->reading != new_reading) { + change_reading = true; + } + break; + case 2: + case 3: + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + /* [3:2] Deassertion bits operation */ + switch ((cmd[3] >> 2) & 0x3) { + case 0: /* Do not change */ + break; + case 1: /* write given value */ + if (cmd_len > 7) { + new_deassert_states = cmd[7]; + change_deassert = true; + } + if (cmd_len > 8) { + new_deassert_states |= (cmd[8] << 8); + } + break; + + case 2: /* mask on */ + if (cmd_len > 7) { + new_deassert_states = (sens->deassert_states | cmd[7]); + change_deassert = true; + } + if (cmd_len > 8) { + new_deassert_states |= (sens->deassert_states | (cmd[8] << 8)); + } + break; + + case 3: /* mask off */ + if (cmd_len > 7) { + new_deassert_states = (sens->deassert_states & cmd[7]); + change_deassert = true; + } + if (cmd_len > 8) { + new_deassert_states |= (sens->deassert_states & (cmd[8] << 8)); + } + break; + } + + if (change_deassert && (new_deassert_states == sens->deassert_states)) { + change_deassert = false; + } + + /* [5:4] Assertion bits operation */ + switch ((cmd[3] >> 4) & 0x3) { + case 0: /* Do not change */ + break; + case 1: /* write given value */ + if (cmd_len > 5) { + new_assert_states = cmd[5]; + change_assert = true; + } + if (cmd_len > 6) { + new_assert_states |= (cmd[6] << 8); + } + break; + + case 2: /* mask on */ + if (cmd_len > 5) { + new_assert_states = (sens->assert_states | cmd[5]); + change_assert = true; + } + if (cmd_len > 6) { + new_assert_states |= (sens->assert_states | (cmd[6] << 8)); + } + break; + + case 3: /* mask off */ + if (cmd_len > 5) { + new_assert_states = (sens->assert_states & cmd[5]); + change_assert = true; + } + if (cmd_len > 6) { + new_assert_states |= (sens->assert_states & (cmd[6] << 8)); + } + break; + } + + if (change_assert && (new_assert_states == sens->assert_states)) { + change_assert = false; + } + + if (cmd_len > 9) { + evd1 = cmd[9]; + } + if (cmd_len > 10) { + evd2 = cmd[10]; + } + if (cmd_len > 11) { + evd3 = cmd[11]; + } + + /* [7:6] Event Data Bytes operation */ + switch ((cmd[3] >> 6) & 0x3) { + case 0: /* + * Don’t use Event Data bytes from this command. BMC will + * generate it's own Event Data bytes based on its sensor + * implementation. + */ + evd1 = evd2 = evd3 = 0x0; + do_gen_event = SENSOR_GEN_EVENT_BMC; + break; + case 1: /* + * Write given values to event data bytes including bits + * [3:0] Event Data 1. + */ + do_gen_event = SENSOR_GEN_EVENT_DATA; + break; + case 2: /* + * Write given values to event data bytes excluding bits + * [3:0] Event Data 1. + */ + evd1 &= 0xf0; + do_gen_event = SENSOR_GEN_EVENT_DATA; + break; + case 3: + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + /* + * Event Data Bytes operation and parameter are inconsistent. The + * Specs are not clear on that topic but generating an error seems + * correct. + */ + if (do_gen_event == SENSOR_GEN_EVENT_DATA && cmd_len < 10) { + rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD); + return; + } + + /* commit values */ + if (change_reading) { + sens->reading = new_reading; + } + + if (change_assert) { + sens->assert_states = new_assert_states; + } + + if (change_deassert) { + sens->deassert_states = new_deassert_states; + } + + /* TODO: handle threshold sensor */ + if (!IPMI_SENSOR_IS_DISCRETE(sens)) { + return; + } + + switch (do_gen_event) { + case SENSOR_GEN_EVENT_DATA: { + unsigned int bit = evd1 & 0xf; + uint16_t mask = (1 << bit); + + if (sens->assert_states & mask & sens->assert_enable) { + gen_event(ibs, cmd[2], 0, evd1, evd2, evd3); + } + + if (sens->deassert_states & mask & sens->deassert_enable) { + gen_event(ibs, cmd[2], 1, evd1, evd2, evd3); + } + break; + } + case SENSOR_GEN_EVENT_BMC: + /* + * TODO: generate event and event data bytes depending on the + * sensor + */ + break; + case SENSOR_GEN_EVENT_NONE: + break; + } +} + +static const IPMICmdHandler chassis_cmds[] = { + [IPMI_CMD_GET_CHASSIS_CAPABILITIES] = { chassis_capabilities }, + [IPMI_CMD_GET_CHASSIS_STATUS] = { chassis_status }, + [IPMI_CMD_CHASSIS_CONTROL] = { chassis_control, 3 }, + [IPMI_CMD_GET_SYS_RESTART_CAUSE] = { chassis_get_sys_restart_cause } +}; +static const IPMINetfn chassis_netfn = { + .cmd_nums = ARRAY_SIZE(chassis_cmds), + .cmd_handlers = chassis_cmds +}; + +static const IPMICmdHandler sensor_event_cmds[] = { + [IPMI_CMD_PLATFORM_EVENT_MSG] = { platform_event_msg, 10 }, + [IPMI_CMD_SET_SENSOR_EVT_ENABLE] = { set_sensor_evt_enable, 4 }, + [IPMI_CMD_GET_SENSOR_EVT_ENABLE] = { get_sensor_evt_enable, 3 }, + [IPMI_CMD_REARM_SENSOR_EVTS] = { rearm_sensor_evts, 4 }, + [IPMI_CMD_GET_SENSOR_EVT_STATUS] = { get_sensor_evt_status, 3 }, + [IPMI_CMD_GET_SENSOR_READING] = { get_sensor_reading, 3 }, + [IPMI_CMD_SET_SENSOR_TYPE] = { set_sensor_type, 5 }, + [IPMI_CMD_GET_SENSOR_TYPE] = { get_sensor_type, 3 }, + [IPMI_CMD_SET_SENSOR_READING] = { set_sensor_reading, 5 }, +}; +static const IPMINetfn sensor_event_netfn = { + .cmd_nums = ARRAY_SIZE(sensor_event_cmds), + .cmd_handlers = sensor_event_cmds +}; + +static const IPMICmdHandler app_cmds[] = { + [IPMI_CMD_GET_DEVICE_ID] = { get_device_id }, + [IPMI_CMD_COLD_RESET] = { cold_reset }, + [IPMI_CMD_WARM_RESET] = { warm_reset }, + [IPMI_CMD_SET_ACPI_POWER_STATE] = { set_acpi_power_state, 4 }, + [IPMI_CMD_GET_ACPI_POWER_STATE] = { get_acpi_power_state }, + [IPMI_CMD_GET_DEVICE_GUID] = { get_device_guid }, + [IPMI_CMD_SET_BMC_GLOBAL_ENABLES] = { set_bmc_global_enables, 3 }, + [IPMI_CMD_GET_BMC_GLOBAL_ENABLES] = { get_bmc_global_enables }, + [IPMI_CMD_CLR_MSG_FLAGS] = { clr_msg_flags, 3 }, + [IPMI_CMD_GET_MSG_FLAGS] = { get_msg_flags }, + [IPMI_CMD_GET_MSG] = { get_msg }, + [IPMI_CMD_SEND_MSG] = { send_msg, 3 }, + [IPMI_CMD_READ_EVT_MSG_BUF] = { read_evt_msg_buf }, + [IPMI_CMD_RESET_WATCHDOG_TIMER] = { reset_watchdog_timer }, + [IPMI_CMD_SET_WATCHDOG_TIMER] = { set_watchdog_timer, 8 }, + [IPMI_CMD_GET_WATCHDOG_TIMER] = { get_watchdog_timer }, +}; +static const IPMINetfn app_netfn = { + .cmd_nums = ARRAY_SIZE(app_cmds), + .cmd_handlers = app_cmds +}; + +static const IPMICmdHandler storage_cmds[] = { + [IPMI_CMD_GET_FRU_AREA_INFO] = { get_fru_area_info, 3 }, + [IPMI_CMD_READ_FRU_DATA] = { read_fru_data, 5 }, + [IPMI_CMD_WRITE_FRU_DATA] = { write_fru_data, 5 }, + [IPMI_CMD_GET_SDR_REP_INFO] = { get_sdr_rep_info }, + [IPMI_CMD_RESERVE_SDR_REP] = { reserve_sdr_rep }, + [IPMI_CMD_GET_SDR] = { get_sdr, 8 }, + [IPMI_CMD_ADD_SDR] = { add_sdr }, + [IPMI_CMD_CLEAR_SDR_REP] = { clear_sdr_rep, 8 }, + [IPMI_CMD_GET_SEL_INFO] = { get_sel_info }, + [IPMI_CMD_RESERVE_SEL] = { reserve_sel }, + [IPMI_CMD_GET_SEL_ENTRY] = { get_sel_entry, 8 }, + [IPMI_CMD_ADD_SEL_ENTRY] = { add_sel_entry, 18 }, + [IPMI_CMD_CLEAR_SEL] = { clear_sel, 8 }, + [IPMI_CMD_GET_SEL_TIME] = { get_sel_time }, + [IPMI_CMD_SET_SEL_TIME] = { set_sel_time, 6 }, +}; + +static const IPMINetfn storage_netfn = { + .cmd_nums = ARRAY_SIZE(storage_cmds), + .cmd_handlers = storage_cmds +}; + +static void register_cmds(IPMIBmcSim *s) +{ + ipmi_sim_register_netfn(s, IPMI_NETFN_CHASSIS, &chassis_netfn); + ipmi_sim_register_netfn(s, IPMI_NETFN_SENSOR_EVENT, &sensor_event_netfn); + ipmi_sim_register_netfn(s, IPMI_NETFN_APP, &app_netfn); + ipmi_sim_register_netfn(s, IPMI_NETFN_STORAGE, &storage_netfn); +} + +static uint8_t init_sdrs[] = { + /* Watchdog device */ + 0x00, 0x00, 0x51, 0x02, 35, 0x20, 0x00, 0x00, + 0x23, 0x01, 0x63, 0x00, 0x23, 0x6f, 0x0f, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, + 'W', 'a', 't', 'c', 'h', 'd', 'o', 'g', +}; + +static void ipmi_sdr_init(IPMIBmcSim *ibs) +{ + unsigned int i; + int len; + size_t sdrs_size; + uint8_t *sdrs; + + sdrs_size = sizeof(init_sdrs); + sdrs = init_sdrs; + if (ibs->sdr_filename && + !g_file_get_contents(ibs->sdr_filename, (gchar **) &sdrs, &sdrs_size, + NULL)) { + error_report("failed to load sdr file '%s'", ibs->sdr_filename); + sdrs_size = sizeof(init_sdrs); + sdrs = init_sdrs; + } + + for (i = 0; i < sdrs_size; i += len) { + struct ipmi_sdr_header *sdrh; + + if (i + IPMI_SDR_HEADER_SIZE > sdrs_size) { + error_report("Problem with recid 0x%4.4x", i); + break; + } + sdrh = (struct ipmi_sdr_header *) &sdrs[i]; + len = ipmi_sdr_length(sdrh); + if (i + len > sdrs_size) { + error_report("Problem with recid 0x%4.4x", i); + break; + } + sdr_add_entry(ibs, sdrh, len, NULL); + } + + if (sdrs != init_sdrs) { + g_free(sdrs); + } +} + +static const VMStateDescription vmstate_ipmi_sim = { + .name = TYPE_IPMI_BMC_SIMULATOR, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(bmc_global_enables, IPMIBmcSim), + VMSTATE_UINT8(msg_flags, IPMIBmcSim), + VMSTATE_BOOL(watchdog_initialized, IPMIBmcSim), + VMSTATE_UINT8(watchdog_use, IPMIBmcSim), + VMSTATE_UINT8(watchdog_action, IPMIBmcSim), + VMSTATE_UINT8(watchdog_pretimeout, IPMIBmcSim), + VMSTATE_UINT8(watchdog_expired, IPMIBmcSim), + VMSTATE_UINT16(watchdog_timeout, IPMIBmcSim), + VMSTATE_BOOL(watchdog_running, IPMIBmcSim), + VMSTATE_BOOL(watchdog_preaction_ran, IPMIBmcSim), + VMSTATE_INT64(watchdog_expiry, IPMIBmcSim), + VMSTATE_UINT8_ARRAY(evtbuf, IPMIBmcSim, 16), + VMSTATE_UINT8(sensors[IPMI_WATCHDOG_SENSOR].status, IPMIBmcSim), + VMSTATE_UINT8(sensors[IPMI_WATCHDOG_SENSOR].reading, IPMIBmcSim), + VMSTATE_UINT16(sensors[IPMI_WATCHDOG_SENSOR].states, IPMIBmcSim), + VMSTATE_UINT16(sensors[IPMI_WATCHDOG_SENSOR].assert_states, IPMIBmcSim), + VMSTATE_UINT16(sensors[IPMI_WATCHDOG_SENSOR].deassert_states, + IPMIBmcSim), + VMSTATE_UINT16(sensors[IPMI_WATCHDOG_SENSOR].assert_enable, IPMIBmcSim), + VMSTATE_END_OF_LIST() + } +}; + +static void ipmi_fru_init(IPMIFru *fru) +{ + int fsize; + int size = 0; + + if (!fru->filename) { + goto out; + } + + fsize = get_image_size(fru->filename); + if (fsize > 0) { + size = QEMU_ALIGN_UP(fsize, fru->areasize); + fru->data = g_malloc0(size); + if (load_image_size(fru->filename, fru->data, fsize) != fsize) { + error_report("Could not load file '%s'", fru->filename); + g_free(fru->data); + fru->data = NULL; + } + } + +out: + if (!fru->data) { + /* give one default FRU */ + size = fru->areasize; + fru->data = g_malloc0(size); + } + + fru->nentries = size / fru->areasize; +} + +static void ipmi_sim_realize(DeviceState *dev, Error **errp) +{ + IPMIBmc *b = IPMI_BMC(dev); + unsigned int i; + IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b); + + QTAILQ_INIT(&ibs->rcvbufs); + + ibs->bmc_global_enables = (1 << IPMI_BMC_EVENT_LOG_BIT); + ibs->device_id = 0x20; + ibs->ipmi_version = 0x02; /* IPMI 2.0 */ + ibs->restart_cause = 0; + for (i = 0; i < 4; i++) { + ibs->sel.last_addition[i] = 0xff; + ibs->sel.last_clear[i] = 0xff; + ibs->sdr.last_addition[i] = 0xff; + ibs->sdr.last_clear[i] = 0xff; + } + + ipmi_sdr_init(ibs); + + ipmi_fru_init(&ibs->fru); + + ibs->acpi_power_state[0] = 0; + ibs->acpi_power_state[1] = 0; + + ipmi_init_sensors_from_sdrs(ibs); + register_cmds(ibs); + + ibs->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ipmi_timeout, ibs); + + vmstate_register(NULL, 0, &vmstate_ipmi_sim, ibs); +} + +static Property ipmi_sim_properties[] = { + DEFINE_PROP_UINT16("fruareasize", IPMIBmcSim, fru.areasize, 1024), + DEFINE_PROP_STRING("frudatafile", IPMIBmcSim, fru.filename), + DEFINE_PROP_STRING("sdrfile", IPMIBmcSim, sdr_filename), + DEFINE_PROP_UINT8("device_id", IPMIBmcSim, device_id, 0x20), + DEFINE_PROP_UINT8("ipmi_version", IPMIBmcSim, ipmi_version, 0x02), + DEFINE_PROP_UINT8("device_rev", IPMIBmcSim, device_rev, 0), + DEFINE_PROP_UINT8("fwrev1", IPMIBmcSim, fwrev1, 0), + DEFINE_PROP_UINT8("fwrev2", IPMIBmcSim, fwrev2, 0), + DEFINE_PROP_UINT32("mfg_id", IPMIBmcSim, mfg_id, 0), + DEFINE_PROP_UINT16("product_id", IPMIBmcSim, product_id, 0), + DEFINE_PROP_UUID_NODEFAULT("guid", IPMIBmcSim, uuid), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ipmi_sim_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + IPMIBmcClass *bk = IPMI_BMC_CLASS(oc); + + dc->hotpluggable = false; + dc->realize = ipmi_sim_realize; + device_class_set_props(dc, ipmi_sim_properties); + bk->handle_command = ipmi_sim_handle_command; +} + +static const TypeInfo ipmi_sim_type = { + .name = TYPE_IPMI_BMC_SIMULATOR, + .parent = TYPE_IPMI_BMC, + .instance_size = sizeof(IPMIBmcSim), + .class_init = ipmi_sim_class_init, +}; + +static void ipmi_sim_register_types(void) +{ + type_register_static(&ipmi_sim_type); +} + +type_init(ipmi_sim_register_types) diff --git a/hw/ipmi/ipmi_bt.c b/hw/ipmi/ipmi_bt.c new file mode 100644 index 000000000..22f94fb98 --- /dev/null +++ b/hw/ipmi/ipmi_bt.c @@ -0,0 +1,437 @@ +/* + * QEMU IPMI BT emulation + * + * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/ipmi/ipmi_bt.h" + +/* Control register */ +#define IPMI_BT_CLR_WR_BIT 0 +#define IPMI_BT_CLR_RD_BIT 1 +#define IPMI_BT_H2B_ATN_BIT 2 +#define IPMI_BT_B2H_ATN_BIT 3 +#define IPMI_BT_SMS_ATN_BIT 4 +#define IPMI_BT_HBUSY_BIT 6 +#define IPMI_BT_BBUSY_BIT 7 + +#define IPMI_BT_GET_CLR_WR(d) (((d) >> IPMI_BT_CLR_WR_BIT) & 0x1) + +#define IPMI_BT_GET_CLR_RD(d) (((d) >> IPMI_BT_CLR_RD_BIT) & 0x1) + +#define IPMI_BT_GET_H2B_ATN(d) (((d) >> IPMI_BT_H2B_ATN_BIT) & 0x1) + +#define IPMI_BT_B2H_ATN_MASK (1 << IPMI_BT_B2H_ATN_BIT) +#define IPMI_BT_GET_B2H_ATN(d) (((d) >> IPMI_BT_B2H_ATN_BIT) & 0x1) +#define IPMI_BT_SET_B2H_ATN(d, v) ((d) = (((d) & ~IPMI_BT_B2H_ATN_MASK) | \ + (!!(v) << IPMI_BT_B2H_ATN_BIT))) + +#define IPMI_BT_SMS_ATN_MASK (1 << IPMI_BT_SMS_ATN_BIT) +#define IPMI_BT_GET_SMS_ATN(d) (((d) >> IPMI_BT_SMS_ATN_BIT) & 0x1) +#define IPMI_BT_SET_SMS_ATN(d, v) ((d) = (((d) & ~IPMI_BT_SMS_ATN_MASK) | \ + (!!(v) << IPMI_BT_SMS_ATN_BIT))) + +#define IPMI_BT_HBUSY_MASK (1 << IPMI_BT_HBUSY_BIT) +#define IPMI_BT_GET_HBUSY(d) (((d) >> IPMI_BT_HBUSY_BIT) & 0x1) +#define IPMI_BT_SET_HBUSY(d, v) ((d) = (((d) & ~IPMI_BT_HBUSY_MASK) | \ + (!!(v) << IPMI_BT_HBUSY_BIT))) + +#define IPMI_BT_BBUSY_MASK (1 << IPMI_BT_BBUSY_BIT) +#define IPMI_BT_SET_BBUSY(d, v) ((d) = (((d) & ~IPMI_BT_BBUSY_MASK) | \ + (!!(v) << IPMI_BT_BBUSY_BIT))) + + +/* Mask register */ +#define IPMI_BT_B2H_IRQ_EN_BIT 0 +#define IPMI_BT_B2H_IRQ_BIT 1 + +#define IPMI_BT_B2H_IRQ_EN_MASK (1 << IPMI_BT_B2H_IRQ_EN_BIT) +#define IPMI_BT_GET_B2H_IRQ_EN(d) (((d) >> IPMI_BT_B2H_IRQ_EN_BIT) & 0x1) +#define IPMI_BT_SET_B2H_IRQ_EN(d, v) ((d) = (((d) & ~IPMI_BT_B2H_IRQ_EN_MASK) |\ + (!!(v) << IPMI_BT_B2H_IRQ_EN_BIT))) + +#define IPMI_BT_B2H_IRQ_MASK (1 << IPMI_BT_B2H_IRQ_BIT) +#define IPMI_BT_GET_B2H_IRQ(d) (((d) >> IPMI_BT_B2H_IRQ_BIT) & 0x1) +#define IPMI_BT_SET_B2H_IRQ(d, v) ((d) = (((d) & ~IPMI_BT_B2H_IRQ_MASK) | \ + (!!(v) << IPMI_BT_B2H_IRQ_BIT))) + +#define IPMI_CMD_GET_BT_INTF_CAP 0x36 + +static void ipmi_bt_raise_irq(IPMIBT *ib) +{ + if (ib->use_irq && ib->irqs_enabled && ib->raise_irq) { + ib->raise_irq(ib); + } +} + +static void ipmi_bt_lower_irq(IPMIBT *ib) +{ + if (ib->lower_irq) { + ib->lower_irq(ib); + } +} + +static void ipmi_bt_handle_event(IPMIInterface *ii) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIBT *ib = iic->get_backend_data(ii); + + if (ib->inlen < 4) { + goto out; + } + /* Note that overruns are handled by handle_command */ + if (ib->inmsg[0] != (ib->inlen - 1)) { + /* Length mismatch, just ignore. */ + IPMI_BT_SET_BBUSY(ib->control_reg, 1); + ib->inlen = 0; + goto out; + } + if ((ib->inmsg[1] == (IPMI_NETFN_APP << 2)) && + (ib->inmsg[3] == IPMI_CMD_GET_BT_INTF_CAP)) { + /* We handle this one ourselves. */ + ib->outmsg[0] = 9; + ib->outmsg[1] = ib->inmsg[1] | 0x04; + ib->outmsg[2] = ib->inmsg[2]; + ib->outmsg[3] = ib->inmsg[3]; + ib->outmsg[4] = 0; + ib->outmsg[5] = 1; /* Only support 1 outstanding request. */ + if (sizeof(ib->inmsg) > 0xff) { /* Input buffer size */ + ib->outmsg[6] = 0xff; + } else { + ib->outmsg[6] = (unsigned char) sizeof(ib->inmsg); + } + if (sizeof(ib->outmsg) > 0xff) { /* Output buffer size */ + ib->outmsg[7] = 0xff; + } else { + ib->outmsg[7] = (unsigned char) sizeof(ib->outmsg); + } + ib->outmsg[8] = 10; /* Max request to response time */ + ib->outmsg[9] = 0; /* Don't recommend retries */ + ib->outlen = 10; + IPMI_BT_SET_BBUSY(ib->control_reg, 0); + IPMI_BT_SET_B2H_ATN(ib->control_reg, 1); + if (!IPMI_BT_GET_B2H_IRQ(ib->mask_reg) && + IPMI_BT_GET_B2H_IRQ_EN(ib->mask_reg)) { + IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 1); + ipmi_bt_raise_irq(ib); + } + goto out; + } + ib->waiting_seq = ib->inmsg[2]; + ib->inmsg[2] = ib->inmsg[1]; + { + IPMIBmcClass *bk = IPMI_BMC_GET_CLASS(ib->bmc); + bk->handle_command(ib->bmc, ib->inmsg + 2, ib->inlen - 2, + sizeof(ib->inmsg), ib->waiting_rsp); + } + out: + return; +} + +static void ipmi_bt_handle_rsp(IPMIInterface *ii, uint8_t msg_id, + unsigned char *rsp, unsigned int rsp_len) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIBT *ib = iic->get_backend_data(ii); + + if (ib->waiting_rsp == msg_id) { + ib->waiting_rsp++; + if (rsp_len > (sizeof(ib->outmsg) - 2)) { + ib->outmsg[0] = 4; + ib->outmsg[1] = rsp[0]; + ib->outmsg[2] = ib->waiting_seq; + ib->outmsg[3] = rsp[1]; + ib->outmsg[4] = IPMI_CC_CANNOT_RETURN_REQ_NUM_BYTES; + ib->outlen = 5; + } else { + ib->outmsg[0] = rsp_len + 1; + ib->outmsg[1] = rsp[0]; + ib->outmsg[2] = ib->waiting_seq; + memcpy(ib->outmsg + 3, rsp + 1, rsp_len - 1); + ib->outlen = rsp_len + 2; + } + IPMI_BT_SET_BBUSY(ib->control_reg, 0); + IPMI_BT_SET_B2H_ATN(ib->control_reg, 1); + if (!IPMI_BT_GET_B2H_IRQ(ib->mask_reg) && + IPMI_BT_GET_B2H_IRQ_EN(ib->mask_reg)) { + IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 1); + ipmi_bt_raise_irq(ib); + } + } +} + + +static uint64_t ipmi_bt_ioport_read(void *opaque, hwaddr addr, unsigned size) +{ + IPMIInterface *ii = opaque; + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIBT *ib = iic->get_backend_data(ii); + uint32_t ret = 0xff; + + switch (addr & ib->size_mask) { + case 0: + ret = ib->control_reg; + break; + case 1: + if (ib->outpos < ib->outlen) { + ret = ib->outmsg[ib->outpos]; + ib->outpos++; + if (ib->outpos == ib->outlen) { + ib->outpos = 0; + ib->outlen = 0; + } + } else { + ret = 0xff; + } + break; + case 2: + ret = ib->mask_reg; + break; + default: + ret = 0xff; + break; + } + return ret; +} + +static void ipmi_bt_signal(IPMIBT *ib, IPMIInterface *ii) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + + ib->do_wake = 1; + while (ib->do_wake) { + ib->do_wake = 0; + iic->handle_if_event(ii); + } +} + +static void ipmi_bt_ioport_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + IPMIInterface *ii = opaque; + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIBT *ib = iic->get_backend_data(ii); + + switch (addr & ib->size_mask) { + case 0: + if (IPMI_BT_GET_CLR_WR(val)) { + ib->inlen = 0; + } + if (IPMI_BT_GET_CLR_RD(val)) { + ib->outpos = 0; + } + if (IPMI_BT_GET_B2H_ATN(val)) { + IPMI_BT_SET_B2H_ATN(ib->control_reg, 0); + } + if (IPMI_BT_GET_SMS_ATN(val)) { + IPMI_BT_SET_SMS_ATN(ib->control_reg, 0); + } + if (IPMI_BT_GET_HBUSY(val)) { + /* Toggle */ + IPMI_BT_SET_HBUSY(ib->control_reg, + !IPMI_BT_GET_HBUSY(ib->control_reg)); + } + if (IPMI_BT_GET_H2B_ATN(val)) { + IPMI_BT_SET_BBUSY(ib->control_reg, 1); + ipmi_bt_signal(ib, ii); + } + break; + + case 1: + if (ib->inlen < sizeof(ib->inmsg)) { + ib->inmsg[ib->inlen] = val; + } + ib->inlen++; + break; + + case 2: + if (IPMI_BT_GET_B2H_IRQ_EN(val) != + IPMI_BT_GET_B2H_IRQ_EN(ib->mask_reg)) { + if (IPMI_BT_GET_B2H_IRQ_EN(val)) { + if (IPMI_BT_GET_B2H_ATN(ib->control_reg) || + IPMI_BT_GET_SMS_ATN(ib->control_reg)) { + IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 1); + ipmi_bt_raise_irq(ib); + } + IPMI_BT_SET_B2H_IRQ_EN(ib->mask_reg, 1); + } else { + if (IPMI_BT_GET_B2H_IRQ(ib->mask_reg)) { + IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 0); + ipmi_bt_lower_irq(ib); + } + IPMI_BT_SET_B2H_IRQ_EN(ib->mask_reg, 0); + } + } + if (IPMI_BT_GET_B2H_IRQ(val) && IPMI_BT_GET_B2H_IRQ(ib->mask_reg)) { + IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 0); + ipmi_bt_lower_irq(ib); + } + break; + default: + /* Ignore. */ + break; + } +} + +static const MemoryRegionOps ipmi_bt_io_ops = { + .read = ipmi_bt_ioport_read, + .write = ipmi_bt_ioport_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ipmi_bt_set_atn(IPMIInterface *ii, int val, int irq) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIBT *ib = iic->get_backend_data(ii); + + if (!!val == IPMI_BT_GET_SMS_ATN(ib->control_reg)) { + return; + } + + IPMI_BT_SET_SMS_ATN(ib->control_reg, val); + if (val) { + if (irq && !IPMI_BT_GET_B2H_ATN(ib->control_reg) && + IPMI_BT_GET_B2H_IRQ_EN(ib->mask_reg)) { + IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 1); + ipmi_bt_raise_irq(ib); + } + } else { + if (!IPMI_BT_GET_B2H_ATN(ib->control_reg) && + IPMI_BT_GET_B2H_IRQ(ib->mask_reg)) { + IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 0); + ipmi_bt_lower_irq(ib); + } + } +} + +static void ipmi_bt_handle_reset(IPMIInterface *ii, bool is_cold) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIBT *ib = iic->get_backend_data(ii); + + if (is_cold) { + /* Disable the BT interrupt on reset */ + if (IPMI_BT_GET_B2H_IRQ(ib->mask_reg)) { + IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 0); + ipmi_bt_lower_irq(ib); + } + IPMI_BT_SET_B2H_IRQ_EN(ib->mask_reg, 0); + } +} + +static void ipmi_bt_set_irq_enable(IPMIInterface *ii, int val) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIBT *ib = iic->get_backend_data(ii); + + ib->irqs_enabled = val; +} + +static void ipmi_bt_init(IPMIInterface *ii, unsigned int min_size, Error **errp) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIBT *ib = iic->get_backend_data(ii); + + if (min_size == 0) { + min_size = 4; + } + ib->size_mask = min_size - 1; + ib->io_length = 3; + + memory_region_init_io(&ib->io, NULL, &ipmi_bt_io_ops, ii, "ipmi-bt", + min_size); +} + +int ipmi_bt_vmstate_post_load(void *opaque, int version) +{ + IPMIBT *ib = opaque; + + /* Make sure all the values are sane. */ + if (ib->outpos >= MAX_IPMI_MSG_SIZE || ib->outlen >= MAX_IPMI_MSG_SIZE || + ib->outpos >= ib->outlen) { + qemu_log_mask(LOG_GUEST_ERROR, + "ipmi:bt: vmstate transfer received bad out values: %d %d\n", + ib->outpos, ib->outlen); + ib->outpos = 0; + ib->outlen = 0; + } + + if (ib->inlen >= MAX_IPMI_MSG_SIZE) { + qemu_log_mask(LOG_GUEST_ERROR, + "ipmi:bt: vmstate transfer received bad in value: %d\n", + ib->inlen); + ib->inlen = 0; + } + + return 0; +} + +const VMStateDescription vmstate_IPMIBT = { + .name = TYPE_IPMI_INTERFACE_PREFIX "bt", + .version_id = 1, + .minimum_version_id = 1, + .post_load = ipmi_bt_vmstate_post_load, + .fields = (VMStateField[]) { + VMSTATE_BOOL(obf_irq_set, IPMIBT), + VMSTATE_BOOL(atn_irq_set, IPMIBT), + VMSTATE_BOOL(irqs_enabled, IPMIBT), + VMSTATE_UINT32(outpos, IPMIBT), + VMSTATE_UINT32(outlen, IPMIBT), + VMSTATE_UINT8_ARRAY(outmsg, IPMIBT, MAX_IPMI_MSG_SIZE), + VMSTATE_UINT32(inlen, IPMIBT), + VMSTATE_UINT8_ARRAY(inmsg, IPMIBT, MAX_IPMI_MSG_SIZE), + VMSTATE_UINT8(control_reg, IPMIBT), + VMSTATE_UINT8(mask_reg, IPMIBT), + VMSTATE_UINT8(waiting_rsp, IPMIBT), + VMSTATE_UINT8(waiting_seq, IPMIBT), + VMSTATE_END_OF_LIST() + } +}; + +void ipmi_bt_get_fwinfo(struct IPMIBT *ib, IPMIFwInfo *info) +{ + info->interface_name = "bt"; + info->interface_type = IPMI_SMBIOS_BT; + info->ipmi_spec_major_revision = 2; + info->ipmi_spec_minor_revision = 0; + info->base_address = ib->io_base; + info->register_length = ib->io_length; + info->register_spacing = 1; + info->memspace = IPMI_MEMSPACE_IO; + info->irq_type = IPMI_LEVEL_IRQ; +} + +void ipmi_bt_class_init(IPMIInterfaceClass *iic) +{ + iic->init = ipmi_bt_init; + iic->set_atn = ipmi_bt_set_atn; + iic->handle_rsp = ipmi_bt_handle_rsp; + iic->handle_if_event = ipmi_bt_handle_event; + iic->set_irq_enable = ipmi_bt_set_irq_enable; + iic->reset = ipmi_bt_handle_reset; +} diff --git a/hw/ipmi/ipmi_kcs.c b/hw/ipmi/ipmi_kcs.c new file mode 100644 index 000000000..a77612946 --- /dev/null +++ b/hw/ipmi/ipmi_kcs.c @@ -0,0 +1,423 @@ +/* + * QEMU IPMI KCS emulation + * + * Copyright (c) 2015,2017 Corey Minyard, MontaVista Software, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/ipmi/ipmi_kcs.h" + +#define IPMI_KCS_OBF_BIT 0 +#define IPMI_KCS_IBF_BIT 1 +#define IPMI_KCS_SMS_ATN_BIT 2 +#define IPMI_KCS_CD_BIT 3 + +#define IPMI_KCS_OBF_MASK (1 << IPMI_KCS_OBF_BIT) +#define IPMI_KCS_GET_OBF(d) (((d) >> IPMI_KCS_OBF_BIT) & 0x1) +#define IPMI_KCS_SET_OBF(d, v) (d) = (((d) & ~IPMI_KCS_OBF_MASK) | \ + (((v) & 1) << IPMI_KCS_OBF_BIT)) +#define IPMI_KCS_IBF_MASK (1 << IPMI_KCS_IBF_BIT) +#define IPMI_KCS_GET_IBF(d) (((d) >> IPMI_KCS_IBF_BIT) & 0x1) +#define IPMI_KCS_SET_IBF(d, v) (d) = (((d) & ~IPMI_KCS_IBF_MASK) | \ + (((v) & 1) << IPMI_KCS_IBF_BIT)) +#define IPMI_KCS_SMS_ATN_MASK (1 << IPMI_KCS_SMS_ATN_BIT) +#define IPMI_KCS_GET_SMS_ATN(d) (((d) >> IPMI_KCS_SMS_ATN_BIT) & 0x1) +#define IPMI_KCS_SET_SMS_ATN(d, v) (d) = (((d) & ~IPMI_KCS_SMS_ATN_MASK) | \ + (((v) & 1) << IPMI_KCS_SMS_ATN_BIT)) +#define IPMI_KCS_CD_MASK (1 << IPMI_KCS_CD_BIT) +#define IPMI_KCS_GET_CD(d) (((d) >> IPMI_KCS_CD_BIT) & 0x1) +#define IPMI_KCS_SET_CD(d, v) (d) = (((d) & ~IPMI_KCS_CD_MASK) | \ + (((v) & 1) << IPMI_KCS_CD_BIT)) + +#define IPMI_KCS_IDLE_STATE 0 +#define IPMI_KCS_READ_STATE 1 +#define IPMI_KCS_WRITE_STATE 2 +#define IPMI_KCS_ERROR_STATE 3 + +#define IPMI_KCS_GET_STATE(d) (((d) >> 6) & 0x3) +#define IPMI_KCS_SET_STATE(d, v) ((d) = ((d) & ~0xc0) | (((v) & 0x3) << 6)) + +#define IPMI_KCS_ABORT_STATUS_CMD 0x60 +#define IPMI_KCS_WRITE_START_CMD 0x61 +#define IPMI_KCS_WRITE_END_CMD 0x62 +#define IPMI_KCS_READ_CMD 0x68 + +#define IPMI_KCS_STATUS_NO_ERR 0x00 +#define IPMI_KCS_STATUS_ABORTED_ERR 0x01 +#define IPMI_KCS_STATUS_BAD_CC_ERR 0x02 +#define IPMI_KCS_STATUS_LENGTH_ERR 0x06 + +static void ipmi_kcs_raise_irq(IPMIKCS *ik) +{ + if (ik->use_irq && ik->irqs_enabled && ik->raise_irq) { + ik->raise_irq(ik); + } +} + +static void ipmi_kcs_lower_irq(IPMIKCS *ik) +{ + if (ik->lower_irq) { + ik->lower_irq(ik); + } +} + +#define SET_OBF() \ + do { \ + IPMI_KCS_SET_OBF(ik->status_reg, 1); \ + if (!ik->obf_irq_set) { \ + ik->obf_irq_set = 1; \ + if (!ik->atn_irq_set) { \ + ipmi_kcs_raise_irq(ik); \ + } \ + } \ + } while (0) + +static void ipmi_kcs_signal(IPMIKCS *ik, IPMIInterface *ii) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + + ik->do_wake = 1; + while (ik->do_wake) { + ik->do_wake = 0; + iic->handle_if_event(ii); + } +} + +static void ipmi_kcs_handle_event(IPMIInterface *ii) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIKCS *ik = iic->get_backend_data(ii); + + if (ik->cmd_reg == IPMI_KCS_ABORT_STATUS_CMD) { + if (IPMI_KCS_GET_STATE(ik->status_reg) != IPMI_KCS_ERROR_STATE) { + ik->waiting_rsp++; /* Invalidate the message */ + ik->outmsg[0] = IPMI_KCS_STATUS_ABORTED_ERR; + ik->outlen = 1; + ik->outpos = 0; + IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_ERROR_STATE); + SET_OBF(); + } + goto out; + } + + switch (IPMI_KCS_GET_STATE(ik->status_reg)) { + case IPMI_KCS_IDLE_STATE: + if (ik->cmd_reg == IPMI_KCS_WRITE_START_CMD) { + IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_WRITE_STATE); + ik->cmd_reg = -1; + ik->write_end = 0; + ik->inlen = 0; + SET_OBF(); + } + break; + + case IPMI_KCS_READ_STATE: + handle_read: + if (ik->outpos >= ik->outlen) { + IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_IDLE_STATE); + SET_OBF(); + } else if (ik->data_in_reg == IPMI_KCS_READ_CMD) { + ik->data_out_reg = ik->outmsg[ik->outpos]; + ik->outpos++; + SET_OBF(); + } else { + ik->outmsg[0] = IPMI_KCS_STATUS_BAD_CC_ERR; + ik->outlen = 1; + ik->outpos = 0; + IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_ERROR_STATE); + SET_OBF(); + goto out; + } + break; + + case IPMI_KCS_WRITE_STATE: + if (ik->data_in_reg != -1) { + /* + * Don't worry about input overrun here, that will be + * handled in the BMC. + */ + if (ik->inlen < sizeof(ik->inmsg)) { + ik->inmsg[ik->inlen] = ik->data_in_reg; + } + ik->inlen++; + } + if (ik->write_end) { + IPMIBmcClass *bk = IPMI_BMC_GET_CLASS(ik->bmc); + ik->outlen = 0; + ik->write_end = 0; + ik->outpos = 0; + bk->handle_command(ik->bmc, ik->inmsg, ik->inlen, sizeof(ik->inmsg), + ik->waiting_rsp); + goto out_noibf; + } else if (ik->cmd_reg == IPMI_KCS_WRITE_END_CMD) { + ik->cmd_reg = -1; + ik->write_end = 1; + } + SET_OBF(); + break; + + case IPMI_KCS_ERROR_STATE: + if (ik->data_in_reg != -1) { + IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_READ_STATE); + ik->data_in_reg = IPMI_KCS_READ_CMD; + goto handle_read; + } + break; + } + + if (ik->cmd_reg != -1) { + /* Got an invalid command */ + ik->outmsg[0] = IPMI_KCS_STATUS_BAD_CC_ERR; + ik->outlen = 1; + ik->outpos = 0; + IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_ERROR_STATE); + } + + out: + ik->cmd_reg = -1; + ik->data_in_reg = -1; + IPMI_KCS_SET_IBF(ik->status_reg, 0); + out_noibf: + return; +} + +static void ipmi_kcs_handle_rsp(IPMIInterface *ii, uint8_t msg_id, + unsigned char *rsp, unsigned int rsp_len) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIKCS *ik = iic->get_backend_data(ii); + + if (ik->waiting_rsp == msg_id) { + ik->waiting_rsp++; + if (rsp_len > sizeof(ik->outmsg)) { + ik->outmsg[0] = rsp[0]; + ik->outmsg[1] = rsp[1]; + ik->outmsg[2] = IPMI_CC_CANNOT_RETURN_REQ_NUM_BYTES; + ik->outlen = 3; + } else { + memcpy(ik->outmsg, rsp, rsp_len); + ik->outlen = rsp_len; + } + IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_READ_STATE); + ik->data_in_reg = IPMI_KCS_READ_CMD; + ipmi_kcs_signal(ik, ii); + } +} + + +static uint64_t ipmi_kcs_ioport_read(void *opaque, hwaddr addr, unsigned size) +{ + IPMIInterface *ii = opaque; + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIKCS *ik = iic->get_backend_data(ii); + uint32_t ret; + + switch (addr & ik->size_mask) { + case 0: + ret = ik->data_out_reg; + IPMI_KCS_SET_OBF(ik->status_reg, 0); + if (ik->obf_irq_set) { + ik->obf_irq_set = 0; + if (!ik->atn_irq_set) { + ipmi_kcs_lower_irq(ik); + } + } + break; + + case 1: + ret = ik->status_reg; + if (ik->atn_irq_set) { + ik->atn_irq_set = 0; + if (!ik->obf_irq_set) { + ipmi_kcs_lower_irq(ik); + } + } + break; + + default: + ret = 0xff; + } + return ret; +} + +static void ipmi_kcs_ioport_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + IPMIInterface *ii = opaque; + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIKCS *ik = iic->get_backend_data(ii); + + if (IPMI_KCS_GET_IBF(ik->status_reg)) { + return; + } + + switch (addr & ik->size_mask) { + case 0: + ik->data_in_reg = val; + break; + + case 1: + ik->cmd_reg = val; + break; + + default: + /* Ignore. */ + break; + } + IPMI_KCS_SET_IBF(ik->status_reg, 1); + ipmi_kcs_signal(ik, ii); +} + +const MemoryRegionOps ipmi_kcs_io_ops = { + .read = ipmi_kcs_ioport_read, + .write = ipmi_kcs_ioport_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ipmi_kcs_set_atn(IPMIInterface *ii, int val, int irq) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIKCS *ik = iic->get_backend_data(ii); + + IPMI_KCS_SET_SMS_ATN(ik->status_reg, val); + if (val) { + if (irq && !ik->atn_irq_set) { + ik->atn_irq_set = 1; + if (!ik->obf_irq_set) { + ipmi_kcs_raise_irq(ik); + } + } + } else { + if (ik->atn_irq_set) { + ik->atn_irq_set = 0; + if (!ik->obf_irq_set) { + ipmi_kcs_lower_irq(ik); + } + } + } +} + +static void ipmi_kcs_set_irq_enable(IPMIInterface *ii, int val) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIKCS *ik = iic->get_backend_data(ii); + + ik->irqs_enabled = val; +} + +/* min_size must be a power of 2. */ +static void ipmi_kcs_init(IPMIInterface *ii, unsigned int min_size, + Error **errp) +{ + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + IPMIKCS *ik = iic->get_backend_data(ii); + + if (min_size == 0) { + min_size = 2; + } + ik->size_mask = min_size - 1; + ik->io_length = 2; + memory_region_init_io(&ik->io, NULL, &ipmi_kcs_io_ops, ii, "ipmi-kcs", + min_size); +} + +int ipmi_kcs_vmstate_post_load(void *opaque, int version) +{ + IPMIKCS *ik = opaque; + + /* Make sure all the values are sane. */ + if (ik->outpos >= MAX_IPMI_MSG_SIZE || ik->outlen >= MAX_IPMI_MSG_SIZE || + ik->outpos >= ik->outlen) { + qemu_log_mask(LOG_GUEST_ERROR, + "ipmi:kcs: vmstate transfer received bad out values: %d %d\n", + ik->outpos, ik->outlen); + ik->outpos = 0; + ik->outlen = 0; + } + + if (ik->inlen >= MAX_IPMI_MSG_SIZE) { + qemu_log_mask(LOG_GUEST_ERROR, + "ipmi:kcs: vmstate transfer received bad in value: %d\n", + ik->inlen); + ik->inlen = 0; + } + + return 0; +} + +static bool vmstate_kcs_before_version2(void *opaque, int version) +{ + return version <= 1; +} + +const VMStateDescription vmstate_IPMIKCS = { + .name = TYPE_IPMI_INTERFACE_PREFIX "kcs", + .version_id = 2, + .minimum_version_id = 1, + .post_load = ipmi_kcs_vmstate_post_load, + .fields = (VMStateField[]) { + VMSTATE_BOOL(obf_irq_set, IPMIKCS), + VMSTATE_BOOL(atn_irq_set, IPMIKCS), + VMSTATE_UNUSED_TEST(vmstate_kcs_before_version2, 1), /* Was use_irq */ + VMSTATE_BOOL(irqs_enabled, IPMIKCS), + VMSTATE_UINT32(outpos, IPMIKCS), + VMSTATE_UINT32_V(outlen, IPMIKCS, 2), + VMSTATE_UINT8_ARRAY(outmsg, IPMIKCS, MAX_IPMI_MSG_SIZE), + VMSTATE_UINT32_V(inlen, IPMIKCS, 2), + VMSTATE_UINT8_ARRAY(inmsg, IPMIKCS, MAX_IPMI_MSG_SIZE), + VMSTATE_BOOL(write_end, IPMIKCS), + VMSTATE_UINT8(status_reg, IPMIKCS), + VMSTATE_UINT8(data_out_reg, IPMIKCS), + VMSTATE_INT16(data_in_reg, IPMIKCS), + VMSTATE_INT16(cmd_reg, IPMIKCS), + VMSTATE_UINT8(waiting_rsp, IPMIKCS), + VMSTATE_END_OF_LIST() + } +}; + +void ipmi_kcs_get_fwinfo(IPMIKCS *ik, IPMIFwInfo *info) +{ + info->interface_name = "kcs"; + info->interface_type = IPMI_SMBIOS_KCS; + info->ipmi_spec_major_revision = 2; + info->ipmi_spec_minor_revision = 0; + info->base_address = ik->io_base; + info->i2c_slave_address = ik->bmc->slave_addr; + info->register_length = ik->io_length; + info->register_spacing = 1; + info->memspace = IPMI_MEMSPACE_IO; + info->irq_type = IPMI_LEVEL_IRQ; +} + +void ipmi_kcs_class_init(IPMIInterfaceClass *iic) +{ + iic->init = ipmi_kcs_init; + iic->set_atn = ipmi_kcs_set_atn; + iic->handle_rsp = ipmi_kcs_handle_rsp; + iic->handle_if_event = ipmi_kcs_handle_event; + iic->set_irq_enable = ipmi_kcs_set_irq_enable; +} diff --git a/hw/ipmi/isa_ipmi_bt.c b/hw/ipmi/isa_ipmi_bt.c new file mode 100644 index 000000000..02625eb94 --- /dev/null +++ b/hw/ipmi/isa_ipmi_bt.c @@ -0,0 +1,173 @@ +/* + * QEMU ISA IPMI BT emulation + * + * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/ipmi/ipmi_bt.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#define TYPE_ISA_IPMI_BT "isa-ipmi-bt" +OBJECT_DECLARE_SIMPLE_TYPE(ISAIPMIBTDevice, ISA_IPMI_BT) + +struct ISAIPMIBTDevice { + ISADevice dev; + int32_t isairq; + qemu_irq irq; + IPMIBT bt; + uint32_t uuid; +}; + +static void isa_ipmi_bt_get_fwinfo(struct IPMIInterface *ii, IPMIFwInfo *info) +{ + ISAIPMIBTDevice *iib = ISA_IPMI_BT(ii); + + ipmi_bt_get_fwinfo(&iib->bt, info); + info->interrupt_number = iib->isairq; + info->i2c_slave_address = iib->bt.bmc->slave_addr; + info->uuid = iib->uuid; +} + +static void isa_ipmi_bt_raise_irq(IPMIBT *ib) +{ + ISAIPMIBTDevice *iib = ib->opaque; + + qemu_irq_raise(iib->irq); +} + +static void isa_ipmi_bt_lower_irq(IPMIBT *ib) +{ + ISAIPMIBTDevice *iib = ib->opaque; + + qemu_irq_lower(iib->irq); +} + +static void isa_ipmi_bt_realize(DeviceState *dev, Error **errp) +{ + Error *err = NULL; + ISADevice *isadev = ISA_DEVICE(dev); + ISAIPMIBTDevice *iib = ISA_IPMI_BT(dev); + IPMIInterface *ii = IPMI_INTERFACE(dev); + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + + if (!iib->bt.bmc) { + error_setg(errp, "IPMI device requires a bmc attribute to be set"); + return; + } + + iib->uuid = ipmi_next_uuid(); + + iib->bt.bmc->intf = ii; + iib->bt.opaque = iib; + + iic->init(ii, 0, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (iib->isairq > 0) { + isa_init_irq(isadev, &iib->irq, iib->isairq); + iib->bt.use_irq = 1; + iib->bt.raise_irq = isa_ipmi_bt_raise_irq; + iib->bt.lower_irq = isa_ipmi_bt_lower_irq; + } + + qdev_set_legacy_instance_id(dev, iib->bt.io_base, iib->bt.io_length); + + isa_register_ioport(isadev, &iib->bt.io, iib->bt.io_base); +} + +static const VMStateDescription vmstate_ISAIPMIBTDevice = { + .name = TYPE_IPMI_INTERFACE_PREFIX "isa-bt", + .version_id = 2, + .minimum_version_id = 2, + /* + * Version 1 had messed up the array transfer, it's not even usable + * because it used VMSTATE_VBUFFER_UINT32, but it did not transfer + * the buffer length, so random things would happen. + */ + .fields = (VMStateField[]) { + VMSTATE_STRUCT(bt, ISAIPMIBTDevice, 1, vmstate_IPMIBT, IPMIBT), + VMSTATE_END_OF_LIST() + } +}; + +static void isa_ipmi_bt_init(Object *obj) +{ + ISAIPMIBTDevice *iib = ISA_IPMI_BT(obj); + + ipmi_bmc_find_and_link(obj, (Object **) &iib->bt.bmc); + + vmstate_register(NULL, 0, &vmstate_ISAIPMIBTDevice, iib); +} + +static void *isa_ipmi_bt_get_backend_data(IPMIInterface *ii) +{ + ISAIPMIBTDevice *iib = ISA_IPMI_BT(ii); + + return &iib->bt; +} + +static Property ipmi_isa_properties[] = { + DEFINE_PROP_UINT32("ioport", ISAIPMIBTDevice, bt.io_base, 0xe4), + DEFINE_PROP_INT32("irq", ISAIPMIBTDevice, isairq, 5), + DEFINE_PROP_END_OF_LIST(), +}; + +static void isa_ipmi_bt_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); + + dc->realize = isa_ipmi_bt_realize; + device_class_set_props(dc, ipmi_isa_properties); + + iic->get_backend_data = isa_ipmi_bt_get_backend_data; + ipmi_bt_class_init(iic); + iic->get_fwinfo = isa_ipmi_bt_get_fwinfo; +} + +static const TypeInfo isa_ipmi_bt_info = { + .name = TYPE_ISA_IPMI_BT, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISAIPMIBTDevice), + .instance_init = isa_ipmi_bt_init, + .class_init = isa_ipmi_bt_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_IPMI_INTERFACE }, + { } + } +}; + +static void ipmi_register_types(void) +{ + type_register_static(&isa_ipmi_bt_info); +} + +type_init(ipmi_register_types) diff --git a/hw/ipmi/isa_ipmi_kcs.c b/hw/ipmi/isa_ipmi_kcs.c new file mode 100644 index 000000000..3b23ad08b --- /dev/null +++ b/hw/ipmi/isa_ipmi_kcs.c @@ -0,0 +1,180 @@ +/* + * QEMU ISA IPMI KCS emulation + * + * Copyright (c) 2015,2017 Corey Minyard, MontaVista Software, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/ipmi/ipmi_kcs.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#define TYPE_ISA_IPMI_KCS "isa-ipmi-kcs" +OBJECT_DECLARE_SIMPLE_TYPE(ISAIPMIKCSDevice, ISA_IPMI_KCS) + +struct ISAIPMIKCSDevice { + ISADevice dev; + int32_t isairq; + qemu_irq irq; + IPMIKCS kcs; + uint32_t uuid; +}; + +static void isa_ipmi_kcs_get_fwinfo(IPMIInterface *ii, IPMIFwInfo *info) +{ + ISAIPMIKCSDevice *iik = ISA_IPMI_KCS(ii); + + ipmi_kcs_get_fwinfo(&iik->kcs, info); + info->interrupt_number = iik->isairq; + info->uuid = iik->uuid; +} + +static void isa_ipmi_kcs_raise_irq(IPMIKCS *ik) +{ + ISAIPMIKCSDevice *iik = ik->opaque; + + qemu_irq_raise(iik->irq); +} + +static void isa_ipmi_kcs_lower_irq(IPMIKCS *ik) +{ + ISAIPMIKCSDevice *iik = ik->opaque; + + qemu_irq_lower(iik->irq); +} + +static void ipmi_isa_realize(DeviceState *dev, Error **errp) +{ + Error *err = NULL; + ISADevice *isadev = ISA_DEVICE(dev); + ISAIPMIKCSDevice *iik = ISA_IPMI_KCS(dev); + IPMIInterface *ii = IPMI_INTERFACE(dev); + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + + if (!iik->kcs.bmc) { + error_setg(errp, "IPMI device requires a bmc attribute to be set"); + return; + } + + iik->uuid = ipmi_next_uuid(); + + iik->kcs.bmc->intf = ii; + iik->kcs.opaque = iik; + + iic->init(ii, 0, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (iik->isairq > 0) { + isa_init_irq(isadev, &iik->irq, iik->isairq); + iik->kcs.use_irq = 1; + iik->kcs.raise_irq = isa_ipmi_kcs_raise_irq; + iik->kcs.lower_irq = isa_ipmi_kcs_lower_irq; + } + + qdev_set_legacy_instance_id(dev, iik->kcs.io_base, iik->kcs.io_length); + + isa_register_ioport(isadev, &iik->kcs.io, iik->kcs.io_base); +} + +static bool vmstate_kcs_before_version2(void *opaque, int version) +{ + return version <= 1; +} + +static const VMStateDescription vmstate_ISAIPMIKCSDevice = { + .name = TYPE_IPMI_INTERFACE, + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VSTRUCT_TEST(kcs, ISAIPMIKCSDevice, vmstate_kcs_before_version2, + 0, vmstate_IPMIKCS, IPMIKCS, 1), + VMSTATE_VSTRUCT_V(kcs, ISAIPMIKCSDevice, 2, vmstate_IPMIKCS, + IPMIKCS, 2), + VMSTATE_END_OF_LIST() + } +}; + +static void isa_ipmi_kcs_init(Object *obj) +{ + ISAIPMIKCSDevice *iik = ISA_IPMI_KCS(obj); + + ipmi_bmc_find_and_link(obj, (Object **) &iik->kcs.bmc); + + /* + * Version 1 had an incorrect name, it clashed with the BT + * IPMI device, so receive it, but transmit a different + * version. + */ + vmstate_register(NULL, 0, &vmstate_ISAIPMIKCSDevice, iik); +} + +static void *isa_ipmi_kcs_get_backend_data(IPMIInterface *ii) +{ + ISAIPMIKCSDevice *iik = ISA_IPMI_KCS(ii); + + return &iik->kcs; +} + +static Property ipmi_isa_properties[] = { + DEFINE_PROP_UINT32("ioport", ISAIPMIKCSDevice, kcs.io_base, 0xca2), + DEFINE_PROP_INT32("irq", ISAIPMIKCSDevice, isairq, 5), + DEFINE_PROP_END_OF_LIST(), +}; + +static void isa_ipmi_kcs_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); + + dc->realize = ipmi_isa_realize; + device_class_set_props(dc, ipmi_isa_properties); + + iic->get_backend_data = isa_ipmi_kcs_get_backend_data; + ipmi_kcs_class_init(iic); + iic->get_fwinfo = isa_ipmi_kcs_get_fwinfo; +} + +static const TypeInfo isa_ipmi_kcs_info = { + .name = TYPE_ISA_IPMI_KCS, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISAIPMIKCSDevice), + .instance_init = isa_ipmi_kcs_init, + .class_init = isa_ipmi_kcs_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_IPMI_INTERFACE }, + { } + } +}; + +static void ipmi_register_types(void) +{ + type_register_static(&isa_ipmi_kcs_info); +} + +type_init(ipmi_register_types) diff --git a/hw/ipmi/meson.build b/hw/ipmi/meson.build new file mode 100644 index 000000000..9622ea2a2 --- /dev/null +++ b/hw/ipmi/meson.build @@ -0,0 +1,11 @@ +ipmi_ss = ss.source_set() +ipmi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c', 'ipmi_kcs.c', 'ipmi_bt.c')) +ipmi_ss.add(when: 'CONFIG_IPMI_LOCAL', if_true: files('ipmi_bmc_sim.c')) +ipmi_ss.add(when: 'CONFIG_IPMI_EXTERN', if_true: files('ipmi_bmc_extern.c')) +ipmi_ss.add(when: 'CONFIG_ISA_IPMI_KCS', if_true: files('isa_ipmi_kcs.c')) +ipmi_ss.add(when: 'CONFIG_PCI_IPMI_KCS', if_true: files('pci_ipmi_kcs.c')) +ipmi_ss.add(when: 'CONFIG_ISA_IPMI_BT', if_true: files('isa_ipmi_bt.c')) +ipmi_ss.add(when: 'CONFIG_PCI_IPMI_BT', if_true: files('pci_ipmi_bt.c')) +ipmi_ss.add(when: 'CONFIG_IPMI_SSIF', if_true: files('smbus_ipmi.c')) + +softmmu_ss.add_all(when: 'CONFIG_IPMI', if_true: ipmi_ss) diff --git a/hw/ipmi/pci_ipmi_bt.c b/hw/ipmi/pci_ipmi_bt.c new file mode 100644 index 000000000..b6e52730d --- /dev/null +++ b/hw/ipmi/pci_ipmi_bt.c @@ -0,0 +1,148 @@ +/* + * QEMU PCI IPMI BT emulation + * + * Copyright (c) 2017 Corey Minyard, MontaVista Software, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "hw/ipmi/ipmi_bt.h" +#include "hw/pci/pci.h" +#include "qom/object.h" + +#define TYPE_PCI_IPMI_BT "pci-ipmi-bt" +OBJECT_DECLARE_SIMPLE_TYPE(PCIIPMIBTDevice, PCI_IPMI_BT) + +struct PCIIPMIBTDevice { + PCIDevice dev; + IPMIBT bt; + bool irq_enabled; + uint32_t uuid; +}; + +static void pci_ipmi_raise_irq(IPMIBT *ik) +{ + PCIIPMIBTDevice *pik = ik->opaque; + + pci_set_irq(&pik->dev, true); +} + +static void pci_ipmi_lower_irq(IPMIBT *ik) +{ + PCIIPMIBTDevice *pik = ik->opaque; + + pci_set_irq(&pik->dev, false); +} + +static void pci_ipmi_bt_realize(PCIDevice *pd, Error **errp) +{ + Error *err = NULL; + PCIIPMIBTDevice *pik = PCI_IPMI_BT(pd); + IPMIInterface *ii = IPMI_INTERFACE(pd); + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + + if (!pik->bt.bmc) { + error_setg(errp, "IPMI device requires a bmc attribute to be set"); + return; + } + + pik->uuid = ipmi_next_uuid(); + + pik->bt.bmc->intf = ii; + pik->bt.opaque = pik; + + pci_config_set_prog_interface(pd->config, 0x02); /* BT */ + pci_config_set_interrupt_pin(pd->config, 0x01); + pik->bt.use_irq = 1; + pik->bt.raise_irq = pci_ipmi_raise_irq; + pik->bt.lower_irq = pci_ipmi_lower_irq; + + iic->init(ii, 8, &err); + if (err) { + error_propagate(errp, err); + return; + } + pci_register_bar(pd, 0, PCI_BASE_ADDRESS_SPACE_IO, &pik->bt.io); +} + +const VMStateDescription vmstate_PCIIPMIBTDevice = { + .name = TYPE_IPMI_INTERFACE_PREFIX "pci-bt", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PCIIPMIBTDevice), + VMSTATE_STRUCT(bt, PCIIPMIBTDevice, 1, vmstate_IPMIBT, IPMIBT), + VMSTATE_END_OF_LIST() + } +}; + +static void pci_ipmi_bt_instance_init(Object *obj) +{ + PCIIPMIBTDevice *pik = PCI_IPMI_BT(obj); + + ipmi_bmc_find_and_link(obj, (Object **) &pik->bt.bmc); +} + +static void *pci_ipmi_bt_get_backend_data(IPMIInterface *ii) +{ + PCIIPMIBTDevice *pik = PCI_IPMI_BT(ii); + + return &pik->bt; +} + +static void pci_ipmi_bt_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc); + IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); + + pdc->vendor_id = PCI_VENDOR_ID_QEMU; + pdc->device_id = PCI_DEVICE_ID_QEMU_IPMI; + pdc->revision = 1; + pdc->class_id = PCI_CLASS_SERIAL_IPMI; + + dc->vmsd = &vmstate_PCIIPMIBTDevice; + dc->desc = "PCI IPMI BT"; + pdc->realize = pci_ipmi_bt_realize; + + iic->get_backend_data = pci_ipmi_bt_get_backend_data; + ipmi_bt_class_init(iic); +} + +static const TypeInfo pci_ipmi_bt_info = { + .name = TYPE_PCI_IPMI_BT, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIIPMIBTDevice), + .instance_init = pci_ipmi_bt_instance_init, + .class_init = pci_ipmi_bt_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_IPMI_INTERFACE }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + } +}; + +static void pci_ipmi_bt_register_types(void) +{ + type_register_static(&pci_ipmi_bt_info); +} + +type_init(pci_ipmi_bt_register_types) diff --git a/hw/ipmi/pci_ipmi_kcs.c b/hw/ipmi/pci_ipmi_kcs.c new file mode 100644 index 000000000..de1341886 --- /dev/null +++ b/hw/ipmi/pci_ipmi_kcs.c @@ -0,0 +1,148 @@ +/* + * QEMU PCI IPMI KCS emulation + * + * Copyright (c) 2017 Corey Minyard, MontaVista Software, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "hw/ipmi/ipmi_kcs.h" +#include "hw/pci/pci.h" +#include "qom/object.h" + +#define TYPE_PCI_IPMI_KCS "pci-ipmi-kcs" +OBJECT_DECLARE_SIMPLE_TYPE(PCIIPMIKCSDevice, PCI_IPMI_KCS) + +struct PCIIPMIKCSDevice { + PCIDevice dev; + IPMIKCS kcs; + bool irq_enabled; + uint32_t uuid; +}; + +static void pci_ipmi_raise_irq(IPMIKCS *ik) +{ + PCIIPMIKCSDevice *pik = ik->opaque; + + pci_set_irq(&pik->dev, true); +} + +static void pci_ipmi_lower_irq(IPMIKCS *ik) +{ + PCIIPMIKCSDevice *pik = ik->opaque; + + pci_set_irq(&pik->dev, false); +} + +static void pci_ipmi_kcs_realize(PCIDevice *pd, Error **errp) +{ + Error *err = NULL; + PCIIPMIKCSDevice *pik = PCI_IPMI_KCS(pd); + IPMIInterface *ii = IPMI_INTERFACE(pd); + IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii); + + if (!pik->kcs.bmc) { + error_setg(errp, "IPMI device requires a bmc attribute to be set"); + return; + } + + pik->uuid = ipmi_next_uuid(); + + pik->kcs.bmc->intf = ii; + pik->kcs.opaque = pik; + + pci_config_set_prog_interface(pd->config, 0x01); /* KCS */ + pci_config_set_interrupt_pin(pd->config, 0x01); + pik->kcs.use_irq = 1; + pik->kcs.raise_irq = pci_ipmi_raise_irq; + pik->kcs.lower_irq = pci_ipmi_lower_irq; + + iic->init(ii, 8, &err); + if (err) { + error_propagate(errp, err); + return; + } + pci_register_bar(pd, 0, PCI_BASE_ADDRESS_SPACE_IO, &pik->kcs.io); +} + +const VMStateDescription vmstate_PCIIPMIKCSDevice = { + .name = TYPE_IPMI_INTERFACE_PREFIX "pci-kcs", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PCIIPMIKCSDevice), + VMSTATE_STRUCT(kcs, PCIIPMIKCSDevice, 1, vmstate_IPMIKCS, IPMIKCS), + VMSTATE_END_OF_LIST() + } +}; + +static void pci_ipmi_kcs_instance_init(Object *obj) +{ + PCIIPMIKCSDevice *pik = PCI_IPMI_KCS(obj); + + ipmi_bmc_find_and_link(obj, (Object **) &pik->kcs.bmc); +} + +static void *pci_ipmi_kcs_get_backend_data(IPMIInterface *ii) +{ + PCIIPMIKCSDevice *pik = PCI_IPMI_KCS(ii); + + return &pik->kcs; +} + +static void pci_ipmi_kcs_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc); + IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); + + pdc->vendor_id = PCI_VENDOR_ID_QEMU; + pdc->device_id = PCI_DEVICE_ID_QEMU_IPMI; + pdc->revision = 1; + pdc->class_id = PCI_CLASS_SERIAL_IPMI; + + dc->vmsd = &vmstate_PCIIPMIKCSDevice; + dc->desc = "PCI IPMI KCS"; + pdc->realize = pci_ipmi_kcs_realize; + + iic->get_backend_data = pci_ipmi_kcs_get_backend_data; + ipmi_kcs_class_init(iic); +} + +static const TypeInfo pci_ipmi_kcs_info = { + .name = TYPE_PCI_IPMI_KCS, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIIPMIKCSDevice), + .instance_init = pci_ipmi_kcs_instance_init, + .class_init = pci_ipmi_kcs_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_IPMI_INTERFACE }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + } +}; + +static void pci_ipmi_kcs_register_types(void) +{ + type_register_static(&pci_ipmi_kcs_info); +} + +type_init(pci_ipmi_kcs_register_types) diff --git a/hw/ipmi/smbus_ipmi.c b/hw/ipmi/smbus_ipmi.c new file mode 100644 index 000000000..1fdf0a66b --- /dev/null +++ b/hw/ipmi/smbus_ipmi.c @@ -0,0 +1,385 @@ +/* + * QEMU IPMI SMBus (SSIF) emulation + * + * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "migration/vmstate.h" +#include "hw/i2c/smbus_slave.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/ipmi/ipmi.h" +#include "qom/object.h" + +#define TYPE_SMBUS_IPMI "smbus-ipmi" +OBJECT_DECLARE_SIMPLE_TYPE(SMBusIPMIDevice, SMBUS_IPMI) + +#define SSIF_IPMI_REQUEST 2 +#define SSIF_IPMI_MULTI_PART_REQUEST_START 6 +#define SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE 7 +#define SSIF_IPMI_MULTI_PART_REQUEST_END 8 +#define SSIF_IPMI_RESPONSE 3 +#define SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE 9 +#define SSIF_IPMI_MULTI_PART_RETRY 0xa + +#define MAX_SSIF_IPMI_MSG_SIZE 255 +#define MAX_SSIF_IPMI_MSG_CHUNK 32 + +#define IPMI_GET_SYS_INTF_CAP_CMD 0x57 + +struct SMBusIPMIDevice { + SMBusDevice parent; + + IPMIBmc *bmc; + + uint8_t outmsg[MAX_SSIF_IPMI_MSG_SIZE]; + uint32_t outlen; + uint32_t currblk; + + /* Holds the SMBUS message currently being sent to the host. */ + uint8_t outbuf[MAX_SSIF_IPMI_MSG_CHUNK + 1]; /* len + message. */ + uint32_t outpos; + + uint8_t inmsg[MAX_SSIF_IPMI_MSG_SIZE]; + uint32_t inlen; + + /* + * This is a response number that we send with the command to make + * sure that the response matches the command. + */ + uint8_t waiting_rsp; + + uint32_t uuid; +}; + +static void smbus_ipmi_handle_event(IPMIInterface *ii) +{ + /* No interrupts, so nothing to do here. */ +} + +static void smbus_ipmi_handle_rsp(IPMIInterface *ii, uint8_t msg_id, + unsigned char *rsp, unsigned int rsp_len) +{ + SMBusIPMIDevice *sid = SMBUS_IPMI(ii); + + if (sid->waiting_rsp == msg_id) { + sid->waiting_rsp++; + + if (rsp_len > MAX_SSIF_IPMI_MSG_SIZE) { + rsp[2] = IPMI_CC_REQUEST_DATA_TRUNCATED; + rsp_len = MAX_SSIF_IPMI_MSG_SIZE; + } + memcpy(sid->outmsg, rsp, rsp_len); + sid->outlen = rsp_len; + sid->outpos = 0; + sid->currblk = 0; + } +} + +static void smbus_ipmi_set_atn(IPMIInterface *ii, int val, int irq) +{ + /* This is where PEC would go. */ +} + +static void smbus_ipmi_set_irq_enable(IPMIInterface *ii, int val) +{ +} + +static void smbus_ipmi_send_msg(SMBusIPMIDevice *sid) +{ + uint8_t *msg = sid->inmsg; + uint32_t len = sid->inlen; + IPMIBmcClass *bk = IPMI_BMC_GET_CLASS(sid->bmc); + + sid->outlen = 0; + sid->outpos = 0; + sid->currblk = 0; + + if (msg[0] == (IPMI_NETFN_APP << 2) && msg[1] == IPMI_GET_SYS_INTF_CAP_CMD) + { + /* We handle this ourself. */ + sid->outmsg[0] = (IPMI_NETFN_APP + 1) << 2; + sid->outmsg[1] = msg[1]; + if (len < 3) { + sid->outmsg[2] = IPMI_CC_REQUEST_DATA_LENGTH_INVALID; + sid->outlen = 3; + } else if ((msg[2] & 0x0f) != 0) { + sid->outmsg[2] = IPMI_CC_INVALID_DATA_FIELD; + sid->outlen = 3; + } else { + sid->outmsg[2] = 0; + sid->outmsg[3] = 0; + sid->outmsg[4] = (2 << 6); /* Multi-part supported. */ + sid->outmsg[5] = MAX_SSIF_IPMI_MSG_SIZE; + sid->outmsg[6] = MAX_SSIF_IPMI_MSG_SIZE; + sid->outlen = 7; + } + return; + } + + bk->handle_command(sid->bmc, sid->inmsg, sid->inlen, sizeof(sid->inmsg), + sid->waiting_rsp); +} + +static uint8_t ipmi_receive_byte(SMBusDevice *dev) +{ + SMBusIPMIDevice *sid = SMBUS_IPMI(dev); + + if (sid->outpos >= sizeof(sid->outbuf)) { + return 0xff; + } + + return sid->outbuf[sid->outpos++]; +} + +static int ipmi_load_readbuf(SMBusIPMIDevice *sid) +{ + unsigned int block = sid->currblk, pos, len; + + if (sid->outlen == 0) { + return -1; + } + + if (sid->outlen <= 32) { + if (block != 0) { + return -1; + } + sid->outbuf[0] = sid->outlen; + memcpy(sid->outbuf + 1, sid->outmsg, sid->outlen); + sid->outpos = 0; + return 0; + } + + if (block == 0) { + sid->outbuf[0] = 32; + sid->outbuf[1] = 0; + sid->outbuf[2] = 1; + memcpy(sid->outbuf + 3, sid->outmsg, 30); + sid->outpos = 0; + return 0; + } + + /* + * Calculate the position in outmsg. 30 for the first block, 31 + * for the rest of the blocks. + */ + pos = 30 + (block - 1) * 31; + + if (pos >= sid->outlen) { + return -1; + } + + len = sid->outlen - pos; + if (len > 31) { + /* More chunks after this. */ + len = 31; + /* Blocks start at 0 for the first middle transaction. */ + sid->outbuf[1] = block - 1; + } else { + sid->outbuf[1] = 0xff; /* End of message marker. */ + } + + sid->outbuf[0] = len + 1; + memcpy(sid->outbuf + 2, sid->outmsg + pos, len); + sid->outpos = 0; + return 0; +} + +static int ipmi_write_data(SMBusDevice *dev, uint8_t *buf, uint8_t len) +{ + SMBusIPMIDevice *sid = SMBUS_IPMI(dev); + bool send = false; + uint8_t cmd; + int ret = 0; + + /* length is guaranteed to be >= 1. */ + cmd = *buf++; + len--; + + /* Handle read request, which don't have any data in the write part. */ + switch (cmd) { + case SSIF_IPMI_RESPONSE: + sid->currblk = 0; + ret = ipmi_load_readbuf(sid); + break; + + case SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE: + sid->currblk++; + ret = ipmi_load_readbuf(sid); + break; + + case SSIF_IPMI_MULTI_PART_RETRY: + if (len >= 1) { + sid->currblk = buf[0]; + ret = ipmi_load_readbuf(sid); + } else { + ret = -1; + } + break; + + default: + break; + } + + /* This should be a message write, make the length is there and correct. */ + if (len >= 1) { + if (*buf != len - 1 || *buf > MAX_SSIF_IPMI_MSG_CHUNK) { + return -1; /* Bogus message */ + } + buf++; + len--; + } + + switch (cmd) { + case SSIF_IPMI_REQUEST: + send = true; + /* FALLTHRU */ + case SSIF_IPMI_MULTI_PART_REQUEST_START: + if (len < 2) { + return -1; /* Bogus. */ + } + memcpy(sid->inmsg, buf, len); + sid->inlen = len; + break; + + case SSIF_IPMI_MULTI_PART_REQUEST_END: + send = true; + /* FALLTHRU */ + case SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE: + if (!sid->inlen) { + return -1; /* Bogus. */ + } + if (sid->inlen + len > MAX_SSIF_IPMI_MSG_SIZE) { + sid->inlen = 0; /* Discard the message. */ + return -1; /* Bogus. */ + } + if (len < 32) { + /* + * Special hack, a multi-part middle that is less than 32 bytes + * marks the end of a message. The specification is fairly + * confusing, so some systems to this, even sending a zero + * length end message to mark the end. + */ + send = true; + } + memcpy(sid->inmsg + sid->inlen, buf, len); + sid->inlen += len; + break; + } + + if (send && sid->inlen) { + smbus_ipmi_send_msg(sid); + } + + return ret; +} + +static const VMStateDescription vmstate_smbus_ipmi = { + .name = TYPE_SMBUS_IPMI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_SMBUS_DEVICE(parent, SMBusIPMIDevice), + VMSTATE_UINT8(waiting_rsp, SMBusIPMIDevice), + VMSTATE_UINT32(outlen, SMBusIPMIDevice), + VMSTATE_UINT32(currblk, SMBusIPMIDevice), + VMSTATE_UINT8_ARRAY(outmsg, SMBusIPMIDevice, MAX_SSIF_IPMI_MSG_SIZE), + VMSTATE_UINT32(outpos, SMBusIPMIDevice), + VMSTATE_UINT8_ARRAY(outbuf, SMBusIPMIDevice, + MAX_SSIF_IPMI_MSG_CHUNK + 1), + VMSTATE_UINT32(inlen, SMBusIPMIDevice), + VMSTATE_UINT8_ARRAY(inmsg, SMBusIPMIDevice, MAX_SSIF_IPMI_MSG_SIZE), + VMSTATE_END_OF_LIST() + } +}; + +static void smbus_ipmi_realize(DeviceState *dev, Error **errp) +{ + SMBusIPMIDevice *sid = SMBUS_IPMI(dev); + IPMIInterface *ii = IPMI_INTERFACE(dev); + + if (!sid->bmc) { + error_setg(errp, "IPMI device requires a bmc attribute to be set"); + return; + } + + sid->uuid = ipmi_next_uuid(); + + sid->bmc->intf = ii; +} + +static void smbus_ipmi_init(Object *obj) +{ + SMBusIPMIDevice *sid = SMBUS_IPMI(obj); + + ipmi_bmc_find_and_link(obj, (Object **) &sid->bmc); +} + +static void smbus_ipmi_get_fwinfo(struct IPMIInterface *ii, IPMIFwInfo *info) +{ + SMBusIPMIDevice *sid = SMBUS_IPMI(ii); + + info->interface_name = "smbus"; + info->interface_type = IPMI_SMBIOS_SSIF; + info->ipmi_spec_major_revision = 2; + info->ipmi_spec_minor_revision = 0; + info->i2c_slave_address = sid->bmc->slave_addr; + info->base_address = sid->parent.i2c.address; + info->memspace = IPMI_MEMSPACE_SMBUS; + info->register_spacing = 1; + info->uuid = sid->uuid; +} + +static void smbus_ipmi_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc); + SMBusDeviceClass *sc = SMBUS_DEVICE_CLASS(oc); + + sc->receive_byte = ipmi_receive_byte; + sc->write_data = ipmi_write_data; + dc->vmsd = &vmstate_smbus_ipmi; + dc->realize = smbus_ipmi_realize; + iic->set_atn = smbus_ipmi_set_atn; + iic->handle_rsp = smbus_ipmi_handle_rsp; + iic->handle_if_event = smbus_ipmi_handle_event; + iic->set_irq_enable = smbus_ipmi_set_irq_enable; + iic->get_fwinfo = smbus_ipmi_get_fwinfo; +} + +static const TypeInfo smbus_ipmi_info = { + .name = TYPE_SMBUS_IPMI, + .parent = TYPE_SMBUS_DEVICE, + .instance_size = sizeof(SMBusIPMIDevice), + .instance_init = smbus_ipmi_init, + .class_init = smbus_ipmi_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_IPMI_INTERFACE }, + { } + } +}; + +static void smbus_ipmi_register_types(void) +{ + type_register_static(&smbus_ipmi_info); +} + +type_init(smbus_ipmi_register_types) diff --git a/hw/isa/Kconfig b/hw/isa/Kconfig new file mode 100644 index 000000000..d42143a99 --- /dev/null +++ b/hw/isa/Kconfig @@ -0,0 +1,72 @@ +config ISA_BUS + bool + +config APM + bool + +config I82378 + bool + select ISA_BUS + select I8259 + select I8254 + select I82374 + select MC146818RTC + select PCSPK + +config ISA_SUPERIO + bool + select ISA_BUS + select PCKBD + select FDC_ISA + +config PC87312 + bool + select ISA_SUPERIO + select I8259 + select I8254 + select I8257 + select MC146818RTC + select SERIAL_ISA + select PARALLEL + select FDC_ISA + select IDE_ISA + +config PIIX3 + bool + select ISA_BUS + +config PIIX4 + bool + # For historical reasons, SuperIO devices are created in the board + # for PIIX4. + select ISA_BUS + select USB_UHCI + +config VT82C686 + bool + select ISA_SUPERIO + select ACPI_SMBUS + select SERIAL_ISA + select FDC_ISA + select USB_UHCI + select APM + select I8254 + select I8257 + select I8259 + select MC146818RTC + select PARALLEL + +config SMC37C669 + bool + select ISA_SUPERIO + select SERIAL_ISA + select PARALLEL + select FDC_ISA + +config LPC_ICH9 + bool + # For historical reasons, SuperIO devices are created in the board + # for ICH9. + select ISA_BUS + select ACPI_SMBUS + select ACPI_X86_ICH diff --git a/hw/isa/apm.c b/hw/isa/apm.c new file mode 100644 index 000000000..dfe9020d3 --- /dev/null +++ b/hw/isa/apm.c @@ -0,0 +1,97 @@ +/* + * QEMU PC APM controller Emulation + * This is split out from acpi.c + * + * Copyright (c) 2006 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/isa/apm.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "trace.h" + + +/* fixed I/O location */ +#define APM_STS_IOPORT 0xb3 + +static void apm_ioport_writeb(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + APMState *apm = opaque; + addr &= 1; + + trace_apm_io_write(addr, val); + if (addr == 0) { + apm->apmc = val; + + if (apm->callback) { + (apm->callback)(val, apm->arg); + } + } else { + apm->apms = val; + } +} + +static uint64_t apm_ioport_readb(void *opaque, hwaddr addr, unsigned size) +{ + APMState *apm = opaque; + uint32_t val; + + addr &= 1; + if (addr == 0) { + val = apm->apmc; + } else { + val = apm->apms; + } + trace_apm_io_read(addr, val); + + return val; +} + +const VMStateDescription vmstate_apm = { + .name = "APM State", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(apmc, APMState), + VMSTATE_UINT8(apms, APMState), + VMSTATE_END_OF_LIST() + } +}; + +static const MemoryRegionOps apm_ops = { + .read = apm_ioport_readb, + .write = apm_ioport_writeb, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +void apm_init(PCIDevice *dev, APMState *apm, apm_ctrl_changed_t callback, + void *arg) +{ + apm->callback = callback; + apm->arg = arg; + + /* ioport 0xb2, 0xb3 */ + memory_region_init_io(&apm->io, OBJECT(dev), &apm_ops, apm, "apm-io", 2); + memory_region_add_subregion(pci_address_space_io(dev), APM_CNT_IOPORT, + &apm->io); +} diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c new file mode 100644 index 000000000..2a2ff05b9 --- /dev/null +++ b/hw/isa/i82378.c @@ -0,0 +1,151 @@ +/* + * QEMU Intel i82378 emulation (PCI to ISA bridge) + * + * Copyright (c) 2010-2011 Hervé Poussineau + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/irq.h" +#include "hw/intc/i8259.h" +#include "hw/timer/i8254.h" +#include "migration/vmstate.h" +#include "hw/audio/pcspk.h" +#include "qom/object.h" + +#define TYPE_I82378 "i82378" +OBJECT_DECLARE_SIMPLE_TYPE(I82378State, I82378) + +struct I82378State { + PCIDevice parent_obj; + + qemu_irq out[2]; + qemu_irq *i8259; + MemoryRegion io; +}; + +static const VMStateDescription vmstate_i82378 = { + .name = "pci-i82378", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, I82378State), + VMSTATE_END_OF_LIST() + }, +}; + +static void i82378_request_out0_irq(void *opaque, int irq, int level) +{ + I82378State *s = opaque; + qemu_set_irq(s->out[0], level); +} + +static void i82378_request_pic_irq(void *opaque, int irq, int level) +{ + DeviceState *dev = opaque; + I82378State *s = I82378(dev); + + qemu_set_irq(s->i8259[irq], level); +} + +static void i82378_realize(PCIDevice *pci, Error **errp) +{ + DeviceState *dev = DEVICE(pci); + I82378State *s = I82378(dev); + uint8_t *pci_conf; + ISABus *isabus; + ISADevice *pit; + + pci_conf = pci->config; + pci_set_word(pci_conf + PCI_COMMAND, + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_set_word(pci_conf + PCI_STATUS, + PCI_STATUS_DEVSEL_MEDIUM); + + pci_config_set_interrupt_pin(pci_conf, 1); /* interrupt pin 0 */ + + isabus = isa_bus_new(dev, get_system_memory(), + pci_address_space_io(pci), errp); + if (!isabus) { + return; + } + + /* This device has: + 2 82C59 (irq) + 1 82C54 (pit) + 2 82C37 (dma) + NMI + Utility Bus Support Registers + + All devices accept byte access only, except timer + */ + + /* 2 82C59 (irq) */ + s->i8259 = i8259_init(isabus, + qemu_allocate_irq(i82378_request_out0_irq, s, 0)); + isa_bus_irqs(isabus, s->i8259); + + /* 1 82C54 (pit) */ + pit = i8254_pit_init(isabus, 0x40, 0, NULL); + + /* speaker */ + pcspk_init(isa_new(TYPE_PC_SPEAKER), isabus, pit); + + /* 2 82C37 (dma) */ + isa_create_simple(isabus, "i82374"); +} + +static void i82378_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + I82378State *s = I82378(obj); + + qdev_init_gpio_out(dev, s->out, 1); + qdev_init_gpio_in(dev, i82378_request_pic_irq, 16); +} + +static void i82378_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = i82378_realize; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82378; + k->revision = 0x03; + k->class_id = PCI_CLASS_BRIDGE_ISA; + dc->vmsd = &vmstate_i82378; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo i82378_type_info = { + .name = TYPE_I82378, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(I82378State), + .instance_init = i82378_init, + .class_init = i82378_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void i82378_register_types(void) +{ + type_register_static(&i82378_type_info); +} + +type_init(i82378_register_types) diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c new file mode 100644 index 000000000..6c31398dd --- /dev/null +++ b/hw/isa/isa-bus.c @@ -0,0 +1,309 @@ +/* + * isa bus support for qdev. + * + * Copyright (c) 2009 Gerd Hoffmann + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "monitor/monitor.h" +#include "hw/sysbus.h" +#include "sysemu/sysemu.h" +#include "hw/isa/isa.h" + +static ISABus *isabus; + +static void isabus_dev_print(Monitor *mon, DeviceState *dev, int indent); +static char *isabus_get_fw_dev_path(DeviceState *dev); + +static void isa_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->print_dev = isabus_dev_print; + k->get_fw_dev_path = isabus_get_fw_dev_path; +} + +static const TypeInfo isa_dma_info = { + .name = TYPE_ISADMA, + .parent = TYPE_INTERFACE, + .class_size = sizeof(IsaDmaClass), +}; + +static const TypeInfo isa_bus_info = { + .name = TYPE_ISA_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(ISABus), + .class_init = isa_bus_class_init, +}; + +ISABus *isa_bus_new(DeviceState *dev, MemoryRegion* address_space, + MemoryRegion *address_space_io, Error **errp) +{ + if (isabus) { + error_setg(errp, "Can't create a second ISA bus"); + return NULL; + } + if (!dev) { + dev = qdev_new("isabus-bridge"); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + } + + isabus = ISA_BUS(qbus_new(TYPE_ISA_BUS, dev, NULL)); + isabus->address_space = address_space; + isabus->address_space_io = address_space_io; + return isabus; +} + +void isa_bus_irqs(ISABus *bus, qemu_irq *irqs) +{ + bus->irqs = irqs; +} + +/* + * isa_get_irq() returns the corresponding qemu_irq entry for the i8259. + * + * This function is only for special cases such as the 'ferr', and + * temporary use for normal devices until they are converted to qdev. + */ +qemu_irq isa_get_irq(ISADevice *dev, unsigned isairq) +{ + assert(!dev || ISA_BUS(qdev_get_parent_bus(DEVICE(dev))) == isabus); + assert(isairq < ISA_NUM_IRQS); + return isabus->irqs[isairq]; +} + +void isa_init_irq(ISADevice *dev, qemu_irq *p, unsigned isairq) +{ + assert(dev->nirqs < ARRAY_SIZE(dev->isairq)); + assert(isairq < ISA_NUM_IRQS); + dev->isairq[dev->nirqs] = isairq; + *p = isa_get_irq(dev, isairq); + dev->nirqs++; +} + +void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, unsigned isairq) +{ + qemu_irq irq; + isa_init_irq(isadev, &irq, isairq); + qdev_connect_gpio_out(DEVICE(isadev), gpioirq, irq); +} + +void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16) +{ + assert(bus && dma8 && dma16); + assert(!bus->dma[0] && !bus->dma[1]); + bus->dma[0] = dma8; + bus->dma[1] = dma16; +} + +IsaDma *isa_get_dma(ISABus *bus, int nchan) +{ + assert(bus); + return bus->dma[nchan > 3 ? 1 : 0]; +} + +static inline void isa_init_ioport(ISADevice *dev, uint16_t ioport) +{ + if (dev && (dev->ioport_id == 0 || ioport < dev->ioport_id)) { + dev->ioport_id = ioport; + } +} + +void isa_register_ioport(ISADevice *dev, MemoryRegion *io, uint16_t start) +{ + memory_region_add_subregion(isabus->address_space_io, start, io); + isa_init_ioport(dev, start); +} + +int isa_register_portio_list(ISADevice *dev, + PortioList *piolist, uint16_t start, + const MemoryRegionPortio *pio_start, + void *opaque, const char *name) +{ + assert(piolist && !piolist->owner); + + if (!isabus) { + return -ENODEV; + } + + /* START is how we should treat DEV, regardless of the actual + contents of the portio array. This is how the old code + actually handled e.g. the FDC device. */ + isa_init_ioport(dev, start); + + portio_list_init(piolist, OBJECT(dev), pio_start, opaque, name); + portio_list_add(piolist, isabus->address_space_io, start); + + return 0; +} + +static void isa_device_init(Object *obj) +{ + ISADevice *dev = ISA_DEVICE(obj); + + dev->isairq[0] = -1; + dev->isairq[1] = -1; +} + +ISADevice *isa_new(const char *name) +{ + return ISA_DEVICE(qdev_new(name)); +} + +ISADevice *isa_try_new(const char *name) +{ + return ISA_DEVICE(qdev_try_new(name)); +} + +ISADevice *isa_create_simple(ISABus *bus, const char *name) +{ + ISADevice *dev; + + dev = isa_new(name); + isa_realize_and_unref(dev, bus, &error_fatal); + return dev; +} + +bool isa_realize_and_unref(ISADevice *dev, ISABus *bus, Error **errp) +{ + return qdev_realize_and_unref(&dev->parent_obj, &bus->parent_obj, errp); +} + +ISADevice *isa_vga_init(ISABus *bus) +{ + switch (vga_interface_type) { + case VGA_CIRRUS: + return isa_create_simple(bus, "isa-cirrus-vga"); + case VGA_QXL: + error_report("%s: qxl: no PCI bus", __func__); + return NULL; + case VGA_STD: + return isa_create_simple(bus, "isa-vga"); + case VGA_VMWARE: + error_report("%s: vmware_vga: no PCI bus", __func__); + return NULL; + case VGA_VIRTIO: + error_report("%s: virtio-vga: no PCI bus", __func__); + return NULL; + case VGA_NONE: + default: + return NULL; + } +} + +void isa_build_aml(ISABus *bus, Aml *scope) +{ + BusChild *kid; + ISADevice *dev; + ISADeviceClass *dc; + + QTAILQ_FOREACH(kid, &bus->parent_obj.children, sibling) { + dev = ISA_DEVICE(kid->child); + dc = ISA_DEVICE_GET_CLASS(dev); + if (dc->build_aml) { + dc->build_aml(dev, scope); + } + } +} + +static void isabus_dev_print(Monitor *mon, DeviceState *dev, int indent) +{ + ISADevice *d = ISA_DEVICE(dev); + + if (d->isairq[1] != -1) { + monitor_printf(mon, "%*sisa irqs %d,%d\n", indent, "", + d->isairq[0], d->isairq[1]); + } else if (d->isairq[0] != -1) { + monitor_printf(mon, "%*sisa irq %d\n", indent, "", + d->isairq[0]); + } +} + +static void isabus_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->fw_name = "isa"; +} + +static const TypeInfo isabus_bridge_info = { + .name = "isabus-bridge", + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SysBusDevice), + .class_init = isabus_bridge_class_init, +}; + +static void isa_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + k->bus_type = TYPE_ISA_BUS; +} + +static const TypeInfo isa_device_type_info = { + .name = TYPE_ISA_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(ISADevice), + .instance_init = isa_device_init, + .abstract = true, + .class_size = sizeof(ISADeviceClass), + .class_init = isa_device_class_init, +}; + +static void isabus_register_types(void) +{ + type_register_static(&isa_dma_info); + type_register_static(&isa_bus_info); + type_register_static(&isabus_bridge_info); + type_register_static(&isa_device_type_info); +} + +static char *isabus_get_fw_dev_path(DeviceState *dev) +{ + ISADevice *d = ISA_DEVICE(dev); + char path[40]; + int off; + + off = snprintf(path, sizeof(path), "%s", qdev_fw_name(dev)); + if (d->ioport_id) { + snprintf(path + off, sizeof(path) - off, "@%04x", d->ioport_id); + } + + return g_strdup(path); +} + +MemoryRegion *isa_address_space(ISADevice *dev) +{ + if (dev) { + return isa_bus_from_device(dev)->address_space; + } + + return isabus->address_space; +} + +MemoryRegion *isa_address_space_io(ISADevice *dev) +{ + if (dev) { + return isa_bus_from_device(dev)->address_space_io; + } + + return isabus->address_space_io; +} + +type_init(isabus_register_types) diff --git a/hw/isa/isa-superio.c b/hw/isa/isa-superio.c new file mode 100644 index 000000000..c81bfe58e --- /dev/null +++ b/hw/isa/isa-superio.c @@ -0,0 +1,212 @@ +/* + * Generic ISA Super I/O + * + * Copyright (c) 2010-2012 Herve Poussineau + * Copyright (c) 2011-2012 Andreas Färber + * Copyright (c) 2018 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "sysemu/blockdev.h" +#include "chardev/char.h" +#include "hw/block/fdc.h" +#include "hw/isa/superio.h" +#include "hw/qdev-properties.h" +#include "hw/input/i8042.h" +#include "hw/char/serial.h" +#include "trace.h" + +static void isa_superio_realize(DeviceState *dev, Error **errp) +{ + ISASuperIODevice *sio = ISA_SUPERIO(dev); + ISASuperIOClass *k = ISA_SUPERIO_GET_CLASS(sio); + ISABus *bus = isa_bus_from_device(ISA_DEVICE(dev)); + ISADevice *isa; + DeviceState *d; + Chardev *chr; + DriveInfo *fd[MAX_FD]; + char *name; + int i; + + /* Parallel port */ + for (i = 0; i < k->parallel.count; i++) { + if (i >= ARRAY_SIZE(sio->parallel)) { + warn_report("superio: ignoring %td parallel controllers", + k->parallel.count - ARRAY_SIZE(sio->parallel)); + break; + } + if (!k->parallel.is_enabled || k->parallel.is_enabled(sio, i)) { + /* FIXME use a qdev chardev prop instead of parallel_hds[] */ + chr = parallel_hds[i]; + if (chr == NULL) { + name = g_strdup_printf("discarding-parallel%d", i); + chr = qemu_chr_new(name, "null", NULL); + } else { + name = g_strdup_printf("parallel%d", i); + } + isa = isa_new("isa-parallel"); + d = DEVICE(isa); + qdev_prop_set_uint32(d, "index", i); + if (k->parallel.get_iobase) { + qdev_prop_set_uint32(d, "iobase", + k->parallel.get_iobase(sio, i)); + } + if (k->parallel.get_irq) { + qdev_prop_set_uint32(d, "irq", k->parallel.get_irq(sio, i)); + } + qdev_prop_set_chr(d, "chardev", chr); + object_property_add_child(OBJECT(dev), name, OBJECT(isa)); + isa_realize_and_unref(isa, bus, &error_fatal); + sio->parallel[i] = isa; + trace_superio_create_parallel(i, + k->parallel.get_iobase ? + k->parallel.get_iobase(sio, i) : -1, + k->parallel.get_irq ? + k->parallel.get_irq(sio, i) : -1); + g_free(name); + } + } + + /* Serial */ + for (i = 0; i < k->serial.count; i++) { + if (i >= ARRAY_SIZE(sio->serial)) { + warn_report("superio: ignoring %td serial controllers", + k->serial.count - ARRAY_SIZE(sio->serial)); + break; + } + if (!k->serial.is_enabled || k->serial.is_enabled(sio, i)) { + /* FIXME use a qdev chardev prop instead of serial_hd() */ + chr = serial_hd(i); + if (chr == NULL) { + name = g_strdup_printf("discarding-serial%d", i); + chr = qemu_chr_new(name, "null", NULL); + } else { + name = g_strdup_printf("serial%d", i); + } + isa = isa_new(TYPE_ISA_SERIAL); + d = DEVICE(isa); + qdev_prop_set_uint32(d, "index", i); + if (k->serial.get_iobase) { + qdev_prop_set_uint32(d, "iobase", + k->serial.get_iobase(sio, i)); + } + if (k->serial.get_irq) { + qdev_prop_set_uint32(d, "irq", k->serial.get_irq(sio, i)); + } + qdev_prop_set_chr(d, "chardev", chr); + object_property_add_child(OBJECT(dev), name, OBJECT(isa)); + isa_realize_and_unref(isa, bus, &error_fatal); + sio->serial[i] = isa; + trace_superio_create_serial(i, + k->serial.get_iobase ? + k->serial.get_iobase(sio, i) : -1, + k->serial.get_irq ? + k->serial.get_irq(sio, i) : -1); + g_free(name); + } + } + + /* Floppy disc */ + if (!k->floppy.is_enabled || k->floppy.is_enabled(sio, 0)) { + isa = isa_new(TYPE_ISA_FDC); + d = DEVICE(isa); + if (k->floppy.get_iobase) { + qdev_prop_set_uint32(d, "iobase", k->floppy.get_iobase(sio, 0)); + } + if (k->floppy.get_irq) { + qdev_prop_set_uint32(d, "irq", k->floppy.get_irq(sio, 0)); + } + /* FIXME use a qdev drive property instead of drive_get() */ + for (i = 0; i < MAX_FD; i++) { + fd[i] = drive_get(IF_FLOPPY, 0, i); + } + object_property_add_child(OBJECT(sio), "isa-fdc", OBJECT(isa)); + isa_realize_and_unref(isa, bus, &error_fatal); + isa_fdc_init_drives(isa, fd); + sio->floppy = isa; + trace_superio_create_floppy(0, + k->floppy.get_iobase ? + k->floppy.get_iobase(sio, 0) : -1, + k->floppy.get_irq ? + k->floppy.get_irq(sio, 0) : -1); + } + + /* Keyboard, mouse */ + isa = isa_new(TYPE_I8042); + object_property_add_child(OBJECT(sio), TYPE_I8042, OBJECT(isa)); + isa_realize_and_unref(isa, bus, &error_fatal); + sio->kbc = isa; + + /* IDE */ + if (k->ide.count && (!k->ide.is_enabled || k->ide.is_enabled(sio, 0))) { + isa = isa_new("isa-ide"); + d = DEVICE(isa); + if (k->ide.get_iobase) { + qdev_prop_set_uint32(d, "iobase", k->ide.get_iobase(sio, 0)); + } + if (k->ide.get_iobase) { + qdev_prop_set_uint32(d, "iobase2", k->ide.get_iobase(sio, 1)); + } + if (k->ide.get_irq) { + qdev_prop_set_uint32(d, "irq", k->ide.get_irq(sio, 0)); + } + object_property_add_child(OBJECT(sio), "isa-ide", OBJECT(isa)); + isa_realize_and_unref(isa, bus, &error_fatal); + sio->ide = isa; + trace_superio_create_ide(0, + k->ide.get_iobase ? + k->ide.get_iobase(sio, 0) : -1, + k->ide.get_irq ? + k->ide.get_irq(sio, 0) : -1); + } +} + +static void isa_superio_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = isa_superio_realize; + /* Reason: Uses parallel_hds[0] in realize(), so it can't be used twice */ + dc->user_creatable = false; +} + +static const TypeInfo isa_superio_type_info = { + .name = TYPE_ISA_SUPERIO, + .parent = TYPE_ISA_DEVICE, + .abstract = true, + .class_size = sizeof(ISASuperIOClass), + .class_init = isa_superio_class_init, +}; + +/* SMS FDC37M817 Super I/O */ +static void fdc37m81x_class_init(ObjectClass *klass, void *data) +{ + ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); + + sc->serial.count = 2; /* NS16C550A */ + sc->parallel.count = 1; + sc->floppy.count = 1; /* SMSC 82077AA Compatible */ + sc->ide.count = 0; +} + +static const TypeInfo fdc37m81x_type_info = { + .name = TYPE_FDC37M81X_SUPERIO, + .parent = TYPE_ISA_SUPERIO, + .instance_size = sizeof(ISASuperIODevice), + .class_init = fdc37m81x_class_init, +}; + +static void isa_superio_register_types(void) +{ + type_register_static(&isa_superio_type_info); + type_register_static(&fdc37m81x_type_info); +} + +type_init(isa_superio_register_types) diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c new file mode 100644 index 000000000..5f143dca1 --- /dev/null +++ b/hw/isa/lpc_ich9.c @@ -0,0 +1,857 @@ +/* + * QEMU ICH9 Emulation + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2009, 2010, 2011 + * Isaku Yamahata + * VA Linux Systems Japan K.K. + * Copyright (C) 2012 Jason Baron + * + * This is based on piix.c, but heavily modified. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/range.h" +#include "hw/isa/isa.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/isa/apm.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bridge.h" +#include "hw/i386/ich9.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/ich9.h" +#include "hw/pci/pci_bus.h" +#include "hw/qdev-properties.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "hw/core/cpu.h" +#include "hw/nvram/fw_cfg.h" +#include "qemu/cutils.h" + +/*****************************************************************************/ +/* ICH9 LPC PCI to ISA bridge */ + +static void ich9_lpc_reset(DeviceState *qdev); + +/* chipset configuration register + * to access chipset configuration registers, pci_[sg]et_{byte, word, long} + * are used. + * Although it's not pci configuration space, it's little endian as Intel. + */ + +static void ich9_cc_update_ir(uint8_t irr[PCI_NUM_PINS], uint16_t ir) +{ + int intx; + for (intx = 0; intx < PCI_NUM_PINS; intx++) { + irr[intx] = (ir >> (intx * ICH9_CC_DIR_SHIFT)) & ICH9_CC_DIR_MASK; + } +} + +static void ich9_cc_update(ICH9LPCState *lpc) +{ + int slot; + int pci_intx; + + const int reg_offsets[] = { + ICH9_CC_D25IR, + ICH9_CC_D26IR, + ICH9_CC_D27IR, + ICH9_CC_D28IR, + ICH9_CC_D29IR, + ICH9_CC_D30IR, + ICH9_CC_D31IR, + }; + const int *offset; + + /* D{25 - 31}IR, but D30IR is read only to 0. */ + for (slot = 25, offset = reg_offsets; slot < 32; slot++, offset++) { + if (slot == 30) { + continue; + } + ich9_cc_update_ir(lpc->irr[slot], + pci_get_word(lpc->chip_config + *offset)); + } + + /* + * D30: DMI2PCI bridge + * It is arbitrarily decided how INTx lines of PCI devices behind + * the bridge are connected to pirq lines. Our choice is PIRQ[E-H]. + * INT[A-D] are connected to PIRQ[E-H] + */ + for (pci_intx = 0; pci_intx < PCI_NUM_PINS; pci_intx++) { + lpc->irr[30][pci_intx] = pci_intx + 4; + } +} + +static void ich9_cc_init(ICH9LPCState *lpc) +{ + int slot; + int intx; + + /* the default irq routing is arbitrary as long as it matches with + * acpi irq routing table. + * The one that is incompatible with piix_pci(= bochs) one is + * intentionally chosen to let the users know that the different + * board is used. + * + * int[A-D] -> pirq[E-F] + * avoid pirq A-D because they are used for pci express port + */ + for (slot = 0; slot < PCI_SLOT_MAX; slot++) { + for (intx = 0; intx < PCI_NUM_PINS; intx++) { + lpc->irr[slot][intx] = (slot + intx) % 4 + 4; + } + } + ich9_cc_update(lpc); +} + +static void ich9_cc_reset(ICH9LPCState *lpc) +{ + uint8_t *c = lpc->chip_config; + + memset(lpc->chip_config, 0, sizeof(lpc->chip_config)); + + pci_set_long(c + ICH9_CC_D31IR, ICH9_CC_DIR_DEFAULT); + pci_set_long(c + ICH9_CC_D30IR, ICH9_CC_D30IR_DEFAULT); + pci_set_long(c + ICH9_CC_D29IR, ICH9_CC_DIR_DEFAULT); + pci_set_long(c + ICH9_CC_D28IR, ICH9_CC_DIR_DEFAULT); + pci_set_long(c + ICH9_CC_D27IR, ICH9_CC_DIR_DEFAULT); + pci_set_long(c + ICH9_CC_D26IR, ICH9_CC_DIR_DEFAULT); + pci_set_long(c + ICH9_CC_D25IR, ICH9_CC_DIR_DEFAULT); + pci_set_long(c + ICH9_CC_GCS, ICH9_CC_GCS_DEFAULT); + + ich9_cc_update(lpc); +} + +static void ich9_cc_addr_len(uint64_t *addr, unsigned *len) +{ + *addr &= ICH9_CC_ADDR_MASK; + if (*addr + *len >= ICH9_CC_SIZE) { + *len = ICH9_CC_SIZE - *addr; + } +} + +/* val: little endian */ +static void ich9_cc_write(void *opaque, hwaddr addr, + uint64_t val, unsigned len) +{ + ICH9LPCState *lpc = (ICH9LPCState *)opaque; + + ich9_cc_addr_len(&addr, &len); + memcpy(lpc->chip_config + addr, &val, len); + pci_bus_fire_intx_routing_notifier(pci_get_bus(&lpc->d)); + ich9_cc_update(lpc); +} + +/* return value: little endian */ +static uint64_t ich9_cc_read(void *opaque, hwaddr addr, + unsigned len) +{ + ICH9LPCState *lpc = (ICH9LPCState *)opaque; + + uint32_t val = 0; + ich9_cc_addr_len(&addr, &len); + memcpy(&val, lpc->chip_config + addr, len); + return val; +} + +/* IRQ routing */ +/* */ +static void ich9_lpc_rout(uint8_t pirq_rout, int *pic_irq, int *pic_dis) +{ + *pic_irq = pirq_rout & ICH9_LPC_PIRQ_ROUT_MASK; + *pic_dis = pirq_rout & ICH9_LPC_PIRQ_ROUT_IRQEN; +} + +static void ich9_lpc_pic_irq(ICH9LPCState *lpc, int pirq_num, + int *pic_irq, int *pic_dis) +{ + switch (pirq_num) { + case 0 ... 3: /* A-D */ + ich9_lpc_rout(lpc->d.config[ICH9_LPC_PIRQA_ROUT + pirq_num], + pic_irq, pic_dis); + return; + case 4 ... 7: /* E-H */ + ich9_lpc_rout(lpc->d.config[ICH9_LPC_PIRQE_ROUT + (pirq_num - 4)], + pic_irq, pic_dis); + return; + default: + break; + } + abort(); +} + +/* gsi: i8259+ioapic irq 0-15, otherwise assert */ +static void ich9_lpc_update_pic(ICH9LPCState *lpc, int gsi) +{ + int i, pic_level; + + assert(gsi < ICH9_LPC_PIC_NUM_PINS); + + /* The pic level is the logical OR of all the PCI irqs mapped to it */ + pic_level = 0; + for (i = 0; i < ICH9_LPC_NB_PIRQS; i++) { + int tmp_irq; + int tmp_dis; + ich9_lpc_pic_irq(lpc, i, &tmp_irq, &tmp_dis); + if (!tmp_dis && tmp_irq == gsi) { + pic_level |= pci_bus_get_irq_level(pci_get_bus(&lpc->d), i); + } + } + if (gsi == lpc->sci_gsi) { + pic_level |= lpc->sci_level; + } + + qemu_set_irq(lpc->gsi[gsi], pic_level); +} + +/* APIC mode: GSIx: PIRQ[A-H] -> GSI 16, ... no pirq shares same APIC pins. */ +static int ich9_pirq_to_gsi(int pirq) +{ + return pirq + ICH9_LPC_PIC_NUM_PINS; +} + +static int ich9_gsi_to_pirq(int gsi) +{ + return gsi - ICH9_LPC_PIC_NUM_PINS; +} + +/* gsi: ioapic irq 16-23, otherwise assert */ +static void ich9_lpc_update_apic(ICH9LPCState *lpc, int gsi) +{ + int level = 0; + + assert(gsi >= ICH9_LPC_PIC_NUM_PINS); + + level |= pci_bus_get_irq_level(pci_get_bus(&lpc->d), ich9_gsi_to_pirq(gsi)); + if (gsi == lpc->sci_gsi) { + level |= lpc->sci_level; + } + + qemu_set_irq(lpc->gsi[gsi], level); +} + +void ich9_lpc_set_irq(void *opaque, int pirq, int level) +{ + ICH9LPCState *lpc = opaque; + int pic_irq, pic_dis; + + assert(0 <= pirq); + assert(pirq < ICH9_LPC_NB_PIRQS); + + ich9_lpc_update_apic(lpc, ich9_pirq_to_gsi(pirq)); + ich9_lpc_pic_irq(lpc, pirq, &pic_irq, &pic_dis); + ich9_lpc_update_pic(lpc, pic_irq); +} + +/* return the pirq number (PIRQ[A-H]:0-7) corresponding to + * a given device irq pin. + */ +int ich9_lpc_map_irq(PCIDevice *pci_dev, int intx) +{ + BusState *bus = qdev_get_parent_bus(&pci_dev->qdev); + PCIBus *pci_bus = PCI_BUS(bus); + PCIDevice *lpc_pdev = + pci_bus->devices[PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC)]; + ICH9LPCState *lpc = ICH9_LPC_DEVICE(lpc_pdev); + + return lpc->irr[PCI_SLOT(pci_dev->devfn)][intx]; +} + +PCIINTxRoute ich9_route_intx_pin_to_irq(void *opaque, int pirq_pin) +{ + ICH9LPCState *lpc = opaque; + PCIINTxRoute route; + int pic_irq; + int pic_dis; + + assert(0 <= pirq_pin); + assert(pirq_pin < ICH9_LPC_NB_PIRQS); + + route.mode = PCI_INTX_ENABLED; + ich9_lpc_pic_irq(lpc, pirq_pin, &pic_irq, &pic_dis); + if (!pic_dis) { + if (pic_irq < ICH9_LPC_PIC_NUM_PINS) { + route.irq = pic_irq; + } else { + route.mode = PCI_INTX_DISABLED; + route.irq = -1; + } + } else { + route.irq = ich9_pirq_to_gsi(pirq_pin); + } + + return route; +} + +void ich9_generate_smi(void) +{ + cpu_interrupt(first_cpu, CPU_INTERRUPT_SMI); +} + +/* Returns -1 on error, IRQ number on success */ +static int ich9_lpc_sci_irq(ICH9LPCState *lpc) +{ + uint8_t sel = lpc->d.config[ICH9_LPC_ACPI_CTRL] & + ICH9_LPC_ACPI_CTRL_SCI_IRQ_SEL_MASK; + switch (sel) { + case ICH9_LPC_ACPI_CTRL_9: + return 9; + case ICH9_LPC_ACPI_CTRL_10: + return 10; + case ICH9_LPC_ACPI_CTRL_11: + return 11; + case ICH9_LPC_ACPI_CTRL_20: + return 20; + case ICH9_LPC_ACPI_CTRL_21: + return 21; + default: + /* reserved */ + qemu_log_mask(LOG_GUEST_ERROR, + "ICH9 LPC: SCI IRQ SEL #%u is reserved\n", sel); + break; + } + return -1; +} + +static void ich9_set_sci(void *opaque, int irq_num, int level) +{ + ICH9LPCState *lpc = opaque; + int irq; + + assert(irq_num == 0); + level = !!level; + if (level == lpc->sci_level) { + return; + } + lpc->sci_level = level; + + irq = lpc->sci_gsi; + if (irq < 0) { + return; + } + + if (irq >= ICH9_LPC_PIC_NUM_PINS) { + ich9_lpc_update_apic(lpc, irq); + } else { + ich9_lpc_update_pic(lpc, irq); + } +} + +static void smi_features_ok_callback(void *opaque) +{ + ICH9LPCState *lpc = opaque; + uint64_t guest_features; + uint64_t guest_cpu_hotplug_features; + + if (lpc->smi_features_ok) { + /* negotiation already complete, features locked */ + return; + } + + memcpy(&guest_features, lpc->smi_guest_features_le, sizeof guest_features); + le64_to_cpus(&guest_features); + if (guest_features & ~lpc->smi_host_features) { + /* guest requests invalid features, leave @features_ok at zero */ + return; + } + + guest_cpu_hotplug_features = guest_features & + (BIT_ULL(ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT) | + BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT)); + if (!(guest_features & BIT_ULL(ICH9_LPC_SMI_F_BROADCAST_BIT)) && + guest_cpu_hotplug_features) { + /* + * cpu hot-[un]plug with SMI requires SMI broadcast, + * leave @features_ok at zero + */ + return; + } + + if (guest_cpu_hotplug_features == + BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT)) { + /* cpu hot-unplug is unsupported without cpu-hotplug */ + return; + } + + /* valid feature subset requested, lock it down, report success */ + lpc->smi_negotiated_features = guest_features; + lpc->smi_features_ok = 1; +} + +void ich9_lpc_pm_init(PCIDevice *lpc_pci, bool smm_enabled) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(lpc_pci); + qemu_irq sci_irq; + FWCfgState *fw_cfg = fw_cfg_find(); + + sci_irq = qemu_allocate_irq(ich9_set_sci, lpc, 0); + ich9_pm_init(lpc_pci, &lpc->pm, smm_enabled, sci_irq); + + if (lpc->smi_host_features && fw_cfg) { + uint64_t host_features_le; + + host_features_le = cpu_to_le64(lpc->smi_host_features); + memcpy(lpc->smi_host_features_le, &host_features_le, + sizeof host_features_le); + fw_cfg_add_file(fw_cfg, "etc/smi/supported-features", + lpc->smi_host_features_le, + sizeof lpc->smi_host_features_le); + + /* The other two guest-visible fields are cleared on device reset, we + * just link them into fw_cfg here. + */ + fw_cfg_add_file_callback(fw_cfg, "etc/smi/requested-features", + NULL, NULL, NULL, + lpc->smi_guest_features_le, + sizeof lpc->smi_guest_features_le, + false); + fw_cfg_add_file_callback(fw_cfg, "etc/smi/features-ok", + smi_features_ok_callback, NULL, lpc, + &lpc->smi_features_ok, + sizeof lpc->smi_features_ok, + true); + } + + ich9_lpc_reset(DEVICE(lpc)); +} + +/* APM */ + +static void ich9_apm_ctrl_changed(uint32_t val, void *arg) +{ + ICH9LPCState *lpc = arg; + + /* ACPI specs 3.0, 4.7.2.5 */ + acpi_pm1_cnt_update(&lpc->pm.acpi_regs, + val == ICH9_APM_ACPI_ENABLE, + val == ICH9_APM_ACPI_DISABLE); + if (val == ICH9_APM_ACPI_ENABLE || val == ICH9_APM_ACPI_DISABLE) { + return; + } + + /* SMI_EN = PMBASE + 30. SMI control and enable register */ + if (lpc->pm.smi_en & ICH9_PMIO_SMI_EN_APMC_EN) { + if (lpc->smi_negotiated_features & + (UINT64_C(1) << ICH9_LPC_SMI_F_BROADCAST_BIT)) { + CPUState *cs; + CPU_FOREACH(cs) { + cpu_interrupt(cs, CPU_INTERRUPT_SMI); + } + } else { + cpu_interrupt(current_cpu, CPU_INTERRUPT_SMI); + } + } +} + +/* config:PMBASE */ +static void +ich9_lpc_pmbase_sci_update(ICH9LPCState *lpc) +{ + uint32_t pm_io_base = pci_get_long(lpc->d.config + ICH9_LPC_PMBASE); + uint8_t acpi_cntl = pci_get_long(lpc->d.config + ICH9_LPC_ACPI_CTRL); + int new_gsi; + + if (acpi_cntl & ICH9_LPC_ACPI_CTRL_ACPI_EN) { + pm_io_base &= ICH9_LPC_PMBASE_BASE_ADDRESS_MASK; + } else { + pm_io_base = 0; + } + + ich9_pm_iospace_update(&lpc->pm, pm_io_base); + + new_gsi = ich9_lpc_sci_irq(lpc); + if (new_gsi == -1) { + return; + } + if (lpc->sci_level && new_gsi != lpc->sci_gsi) { + qemu_set_irq(lpc->pm.irq, 0); + lpc->sci_gsi = new_gsi; + qemu_set_irq(lpc->pm.irq, 1); + } + lpc->sci_gsi = new_gsi; +} + +/* config:RCBA */ +static void ich9_lpc_rcba_update(ICH9LPCState *lpc, uint32_t rcba_old) +{ + uint32_t rcba = pci_get_long(lpc->d.config + ICH9_LPC_RCBA); + + if (rcba_old & ICH9_LPC_RCBA_EN) { + memory_region_del_subregion(get_system_memory(), &lpc->rcrb_mem); + } + if (rcba & ICH9_LPC_RCBA_EN) { + memory_region_add_subregion_overlap(get_system_memory(), + rcba & ICH9_LPC_RCBA_BA_MASK, + &lpc->rcrb_mem, 1); + } +} + +/* config:GEN_PMCON* */ +static void +ich9_lpc_pmcon_update(ICH9LPCState *lpc) +{ + uint16_t gen_pmcon_1 = pci_get_word(lpc->d.config + ICH9_LPC_GEN_PMCON_1); + uint16_t wmask; + + if (gen_pmcon_1 & ICH9_LPC_GEN_PMCON_1_SMI_LOCK) { + wmask = pci_get_word(lpc->d.wmask + ICH9_LPC_GEN_PMCON_1); + wmask &= ~ICH9_LPC_GEN_PMCON_1_SMI_LOCK; + pci_set_word(lpc->d.wmask + ICH9_LPC_GEN_PMCON_1, wmask); + lpc->pm.smi_en_wmask &= ~1; + } +} + +static int ich9_lpc_post_load(void *opaque, int version_id) +{ + ICH9LPCState *lpc = opaque; + + ich9_lpc_pmbase_sci_update(lpc); + ich9_lpc_rcba_update(lpc, 0 /* disabled ICH9_LPC_RCBA_EN */); + ich9_lpc_pmcon_update(lpc); + return 0; +} + +static void ich9_lpc_config_write(PCIDevice *d, + uint32_t addr, uint32_t val, int len) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(d); + uint32_t rcba_old = pci_get_long(d->config + ICH9_LPC_RCBA); + + pci_default_write_config(d, addr, val, len); + if (ranges_overlap(addr, len, ICH9_LPC_PMBASE, 4) || + ranges_overlap(addr, len, ICH9_LPC_ACPI_CTRL, 1)) { + ich9_lpc_pmbase_sci_update(lpc); + } + if (ranges_overlap(addr, len, ICH9_LPC_RCBA, 4)) { + ich9_lpc_rcba_update(lpc, rcba_old); + } + if (ranges_overlap(addr, len, ICH9_LPC_PIRQA_ROUT, 4)) { + pci_bus_fire_intx_routing_notifier(pci_get_bus(&lpc->d)); + } + if (ranges_overlap(addr, len, ICH9_LPC_PIRQE_ROUT, 4)) { + pci_bus_fire_intx_routing_notifier(pci_get_bus(&lpc->d)); + } + if (ranges_overlap(addr, len, ICH9_LPC_GEN_PMCON_1, 8)) { + ich9_lpc_pmcon_update(lpc); + } +} + +static void ich9_lpc_reset(DeviceState *qdev) +{ + PCIDevice *d = PCI_DEVICE(qdev); + ICH9LPCState *lpc = ICH9_LPC_DEVICE(d); + uint32_t rcba_old = pci_get_long(d->config + ICH9_LPC_RCBA); + int i; + + for (i = 0; i < 4; i++) { + pci_set_byte(d->config + ICH9_LPC_PIRQA_ROUT + i, + ICH9_LPC_PIRQ_ROUT_DEFAULT); + } + for (i = 0; i < 4; i++) { + pci_set_byte(d->config + ICH9_LPC_PIRQE_ROUT + i, + ICH9_LPC_PIRQ_ROUT_DEFAULT); + } + pci_set_byte(d->config + ICH9_LPC_ACPI_CTRL, ICH9_LPC_ACPI_CTRL_DEFAULT); + + pci_set_long(d->config + ICH9_LPC_PMBASE, ICH9_LPC_PMBASE_DEFAULT); + pci_set_long(d->config + ICH9_LPC_RCBA, ICH9_LPC_RCBA_DEFAULT); + + ich9_cc_reset(lpc); + + ich9_lpc_pmbase_sci_update(lpc); + ich9_lpc_rcba_update(lpc, rcba_old); + + lpc->sci_level = 0; + lpc->rst_cnt = 0; + + memset(lpc->smi_guest_features_le, 0, sizeof lpc->smi_guest_features_le); + lpc->smi_features_ok = 0; + lpc->smi_negotiated_features = 0; +} + +/* root complex register block is mapped into memory space */ +static const MemoryRegionOps rcrb_mmio_ops = { + .read = ich9_cc_read, + .write = ich9_cc_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ich9_lpc_machine_ready(Notifier *n, void *opaque) +{ + ICH9LPCState *s = container_of(n, ICH9LPCState, machine_ready); + MemoryRegion *io_as = pci_address_space_io(&s->d); + uint8_t *pci_conf; + + pci_conf = s->d.config; + if (memory_region_present(io_as, 0x3f8)) { + /* com1 */ + pci_conf[0x82] |= 0x01; + } + if (memory_region_present(io_as, 0x2f8)) { + /* com2 */ + pci_conf[0x82] |= 0x02; + } + if (memory_region_present(io_as, 0x378)) { + /* lpt */ + pci_conf[0x82] |= 0x04; + } + if (memory_region_present(io_as, 0x3f2)) { + /* floppy */ + pci_conf[0x82] |= 0x08; + } +} + +/* reset control */ +static void ich9_rst_cnt_write(void *opaque, hwaddr addr, uint64_t val, + unsigned len) +{ + ICH9LPCState *lpc = opaque; + + if (val & 4) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + return; + } + lpc->rst_cnt = val & 0xA; /* keep FULL_RST (bit 3) and SYS_RST (bit 1) */ +} + +static uint64_t ich9_rst_cnt_read(void *opaque, hwaddr addr, unsigned len) +{ + ICH9LPCState *lpc = opaque; + + return lpc->rst_cnt; +} + +static const MemoryRegionOps ich9_rst_cnt_ops = { + .read = ich9_rst_cnt_read, + .write = ich9_rst_cnt_write, + .endianness = DEVICE_LITTLE_ENDIAN +}; + +static void ich9_lpc_initfn(Object *obj) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(obj); + + static const uint8_t acpi_enable_cmd = ICH9_APM_ACPI_ENABLE; + static const uint8_t acpi_disable_cmd = ICH9_APM_ACPI_DISABLE; + + object_property_add_uint8_ptr(obj, ACPI_PM_PROP_SCI_INT, + &lpc->sci_gsi, OBJ_PROP_FLAG_READ); + object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_ENABLE_CMD, + &acpi_enable_cmd, OBJ_PROP_FLAG_READ); + object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_DISABLE_CMD, + &acpi_disable_cmd, OBJ_PROP_FLAG_READ); + object_property_add_uint64_ptr(obj, ICH9_LPC_SMI_NEGOTIATED_FEAT_PROP, + &lpc->smi_negotiated_features, + OBJ_PROP_FLAG_READ); + + ich9_pm_add_properties(obj, &lpc->pm); +} + +static void ich9_lpc_realize(PCIDevice *d, Error **errp) +{ + ICH9LPCState *lpc = ICH9_LPC_DEVICE(d); + DeviceState *dev = DEVICE(d); + ISABus *isa_bus; + + if ((lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT)) && + !(lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT))) { + /* + * smi_features_ok_callback() throws an error on this. + * + * So bail out here instead of advertizing the invalid + * configuration and get obscure firmware failures from that. + */ + error_setg(errp, "cpu hot-unplug requires cpu hot-plug"); + return; + } + + isa_bus = isa_bus_new(DEVICE(d), get_system_memory(), get_system_io(), + errp); + if (!isa_bus) { + return; + } + + pci_set_long(d->wmask + ICH9_LPC_PMBASE, + ICH9_LPC_PMBASE_BASE_ADDRESS_MASK); + pci_set_byte(d->wmask + ICH9_LPC_PMBASE, + ICH9_LPC_ACPI_CTRL_ACPI_EN | + ICH9_LPC_ACPI_CTRL_SCI_IRQ_SEL_MASK); + + memory_region_init_io(&lpc->rcrb_mem, OBJECT(d), &rcrb_mmio_ops, lpc, + "lpc-rcrb-mmio", ICH9_CC_SIZE); + + lpc->isa_bus = isa_bus; + + ich9_cc_init(lpc); + apm_init(d, &lpc->apm, ich9_apm_ctrl_changed, lpc); + + lpc->machine_ready.notify = ich9_lpc_machine_ready; + qemu_add_machine_init_done_notifier(&lpc->machine_ready); + + memory_region_init_io(&lpc->rst_cnt_mem, OBJECT(d), &ich9_rst_cnt_ops, lpc, + "lpc-reset-control", 1); + memory_region_add_subregion_overlap(pci_address_space_io(d), + ICH9_RST_CNT_IOPORT, &lpc->rst_cnt_mem, + 1); + + qdev_init_gpio_out_named(dev, lpc->gsi, ICH9_GPIO_GSI, GSI_NUM_PINS); + + isa_bus_irqs(isa_bus, lpc->gsi); +} + +static bool ich9_rst_cnt_needed(void *opaque) +{ + ICH9LPCState *lpc = opaque; + + return (lpc->rst_cnt != 0); +} + +static const VMStateDescription vmstate_ich9_rst_cnt = { + .name = "ICH9LPC/rst_cnt", + .version_id = 1, + .minimum_version_id = 1, + .needed = ich9_rst_cnt_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(rst_cnt, ICH9LPCState), + VMSTATE_END_OF_LIST() + } +}; + +static bool ich9_smi_feat_needed(void *opaque) +{ + ICH9LPCState *lpc = opaque; + + return !buffer_is_zero(lpc->smi_guest_features_le, + sizeof lpc->smi_guest_features_le) || + lpc->smi_features_ok; +} + +static const VMStateDescription vmstate_ich9_smi_feat = { + .name = "ICH9LPC/smi_feat", + .version_id = 1, + .minimum_version_id = 1, + .needed = ich9_smi_feat_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(smi_guest_features_le, ICH9LPCState, + sizeof(uint64_t)), + VMSTATE_UINT8(smi_features_ok, ICH9LPCState), + VMSTATE_UINT64(smi_negotiated_features, ICH9LPCState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ich9_lpc = { + .name = "ICH9LPC", + .version_id = 1, + .minimum_version_id = 1, + .post_load = ich9_lpc_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(d, ICH9LPCState), + VMSTATE_STRUCT(apm, ICH9LPCState, 0, vmstate_apm, APMState), + VMSTATE_STRUCT(pm, ICH9LPCState, 0, vmstate_ich9_pm, ICH9LPCPMRegs), + VMSTATE_UINT8_ARRAY(chip_config, ICH9LPCState, ICH9_CC_SIZE), + VMSTATE_UINT32(sci_level, ICH9LPCState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_ich9_rst_cnt, + &vmstate_ich9_smi_feat, + NULL + } +}; + +static Property ich9_lpc_properties[] = { + DEFINE_PROP_BOOL("noreboot", ICH9LPCState, pin_strap.spkr_hi, true), + DEFINE_PROP_BOOL("smm-compat", ICH9LPCState, pm.smm_compat, false), + DEFINE_PROP_BIT64("x-smi-broadcast", ICH9LPCState, smi_host_features, + ICH9_LPC_SMI_F_BROADCAST_BIT, true), + DEFINE_PROP_BIT64("x-smi-cpu-hotplug", ICH9LPCState, smi_host_features, + ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT, true), + DEFINE_PROP_BIT64("x-smi-cpu-hotunplug", ICH9LPCState, smi_host_features, + ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ich9_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev) +{ + ICH9LPCState *s = ICH9_LPC_DEVICE(adev); + + acpi_send_gpe_event(&s->pm.acpi_regs, s->pm.irq, ev); +} + +static void ich9_lpc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->reset = ich9_lpc_reset; + k->realize = ich9_lpc_realize; + dc->vmsd = &vmstate_ich9_lpc; + device_class_set_props(dc, ich9_lpc_properties); + k->config_write = ich9_lpc_config_write; + dc->desc = "ICH9 LPC bridge"; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_ICH9_8; + k->revision = ICH9_A2_LPC_REVISION; + k->class_id = PCI_CLASS_BRIDGE_ISA; + /* + * Reason: part of ICH9 southbridge, needs to be wired up by + * pc_q35_init() + */ + dc->user_creatable = false; + hc->pre_plug = ich9_pm_device_pre_plug_cb; + hc->plug = ich9_pm_device_plug_cb; + hc->unplug_request = ich9_pm_device_unplug_request_cb; + hc->unplug = ich9_pm_device_unplug_cb; + adevc->ospm_status = ich9_pm_ospm_status; + adevc->send_event = ich9_send_gpe; + adevc->madt_cpu = pc_madt_cpu_entry; +} + +static const TypeInfo ich9_lpc_info = { + .name = TYPE_ICH9_LPC_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(ICH9LPCState), + .instance_init = ich9_lpc_initfn, + .class_init = ich9_lpc_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { TYPE_ACPI_DEVICE_IF }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + } +}; + +static void ich9_lpc_register(void) +{ + type_register_static(&ich9_lpc_info); +} + +type_init(ich9_lpc_register); diff --git a/hw/isa/meson.build b/hw/isa/meson.build new file mode 100644 index 000000000..8bf678ca0 --- /dev/null +++ b/hw/isa/meson.build @@ -0,0 +1,11 @@ +softmmu_ss.add(when: 'CONFIG_APM', if_true: files('apm.c')) +softmmu_ss.add(when: 'CONFIG_I82378', if_true: files('i82378.c')) +softmmu_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('isa-bus.c')) +softmmu_ss.add(when: 'CONFIG_ISA_SUPERIO', if_true: files('isa-superio.c')) +softmmu_ss.add(when: 'CONFIG_PC87312', if_true: files('pc87312.c')) +softmmu_ss.add(when: 'CONFIG_PIIX3', if_true: files('piix3.c')) +softmmu_ss.add(when: 'CONFIG_PIIX4', if_true: files('piix4.c')) +softmmu_ss.add(when: 'CONFIG_SMC37C669', if_true: files('smc37c669-superio.c')) +softmmu_ss.add(when: 'CONFIG_VT82C686', if_true: files('vt82c686.c')) + +specific_ss.add(when: 'CONFIG_LPC_ICH9', if_true: files('lpc_ich9.c')) diff --git a/hw/isa/pc87312.c b/hw/isa/pc87312.c new file mode 100644 index 000000000..8d7b8d3db --- /dev/null +++ b/hw/isa/pc87312.c @@ -0,0 +1,387 @@ +/* + * QEMU National Semiconductor PC87312 (Super I/O) + * + * Copyright (c) 2010-2012 Herve Poussineau + * Copyright (c) 2011-2012 Andreas Färber + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/isa/pc87312.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "trace.h" + + +#define REG_FER 0 +#define REG_FAR 1 +#define REG_PTR 2 + +#define FER_PARALLEL_EN 0x01 +#define FER_UART1_EN 0x02 +#define FER_UART2_EN 0x04 +#define FER_FDC_EN 0x08 +#define FER_FDC_4 0x10 +#define FER_FDC_ADDR 0x20 +#define FER_IDE_EN 0x40 +#define FER_IDE_ADDR 0x80 + +#define FAR_PARALLEL_ADDR 0x03 +#define FAR_UART1_ADDR 0x0C +#define FAR_UART2_ADDR 0x30 +#define FAR_UART_3_4 0xC0 + +#define PTR_POWER_DOWN 0x01 +#define PTR_CLOCK_DOWN 0x02 +#define PTR_PWDN 0x04 +#define PTR_IRQ_5_7 0x08 +#define PTR_UART1_TEST 0x10 +#define PTR_UART2_TEST 0x20 +#define PTR_LOCK_CONF 0x40 +#define PTR_EPP_MODE 0x80 + + +/* Parallel port */ + +static bool is_parallel_enabled(ISASuperIODevice *sio, uint8_t index) +{ + PC87312State *s = PC87312(sio); + return index ? false : s->regs[REG_FER] & FER_PARALLEL_EN; +} + +static const uint16_t parallel_base[] = { 0x378, 0x3bc, 0x278, 0x00 }; + +static uint16_t get_parallel_iobase(ISASuperIODevice *sio, uint8_t index) +{ + PC87312State *s = PC87312(sio); + return parallel_base[s->regs[REG_FAR] & FAR_PARALLEL_ADDR]; +} + +static const unsigned int parallel_irq[] = { 5, 7, 5, 0 }; + +static unsigned int get_parallel_irq(ISASuperIODevice *sio, uint8_t index) +{ + PC87312State *s = PC87312(sio); + int idx; + idx = (s->regs[REG_FAR] & FAR_PARALLEL_ADDR); + if (idx == 0) { + return (s->regs[REG_PTR] & PTR_IRQ_5_7) ? 7 : 5; + } else { + return parallel_irq[idx]; + } +} + + +/* UARTs */ + +static const uint16_t uart_base[2][4] = { + { 0x3e8, 0x338, 0x2e8, 0x220 }, + { 0x2e8, 0x238, 0x2e0, 0x228 } +}; + +static uint16_t get_uart_iobase(ISASuperIODevice *sio, uint8_t i) +{ + PC87312State *s = PC87312(sio); + int idx; + idx = (s->regs[REG_FAR] >> (2 * i + 2)) & 0x3; + if (idx == 0) { + return 0x3f8; + } else if (idx == 1) { + return 0x2f8; + } else { + return uart_base[idx & 1][(s->regs[REG_FAR] & FAR_UART_3_4) >> 6]; + } +} + +static unsigned int get_uart_irq(ISASuperIODevice *sio, uint8_t i) +{ + PC87312State *s = PC87312(sio); + int idx; + idx = (s->regs[REG_FAR] >> (2 * i + 2)) & 0x3; + return (idx & 1) ? 3 : 4; +} + +static bool is_uart_enabled(ISASuperIODevice *sio, uint8_t i) +{ + PC87312State *s = PC87312(sio); + return s->regs[REG_FER] & (FER_UART1_EN << i); +} + + +/* Floppy controller */ + +static bool is_fdc_enabled(ISASuperIODevice *sio, uint8_t index) +{ + PC87312State *s = PC87312(sio); + assert(!index); + return s->regs[REG_FER] & FER_FDC_EN; +} + +static uint16_t get_fdc_iobase(ISASuperIODevice *sio, uint8_t index) +{ + PC87312State *s = PC87312(sio); + assert(!index); + return (s->regs[REG_FER] & FER_FDC_ADDR) ? 0x370 : 0x3f0; +} + +static unsigned int get_fdc_irq(ISASuperIODevice *sio, uint8_t index) +{ + assert(!index); + return 6; +} + + +/* IDE controller */ + +static bool is_ide_enabled(ISASuperIODevice *sio, uint8_t index) +{ + PC87312State *s = PC87312(sio); + + return s->regs[REG_FER] & FER_IDE_EN; +} + +static uint16_t get_ide_iobase(ISASuperIODevice *sio, uint8_t index) +{ + PC87312State *s = PC87312(sio); + + if (index == 1) { + return get_ide_iobase(sio, 0) + 0x206; + } + return (s->regs[REG_FER] & FER_IDE_ADDR) ? 0x170 : 0x1f0; +} + +static unsigned int get_ide_irq(ISASuperIODevice *sio, uint8_t index) +{ + assert(index == 0); + return 14; +} + +static void reconfigure_devices(PC87312State *s) +{ + error_report("pc87312: unsupported device reconfiguration (%02x %02x %02x)", + s->regs[REG_FER], s->regs[REG_FAR], s->regs[REG_PTR]); +} + +static void pc87312_soft_reset(PC87312State *s) +{ + static const uint8_t fer_init[] = { + 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4b, 0x4b, + 0x4b, 0x4b, 0x4b, 0x4b, 0x0f, 0x0f, 0x0f, 0x0f, + 0x49, 0x49, 0x49, 0x49, 0x07, 0x07, 0x07, 0x07, + 0x47, 0x47, 0x47, 0x47, 0x47, 0x47, 0x08, 0x00, + }; + static const uint8_t far_init[] = { + 0x10, 0x11, 0x11, 0x39, 0x24, 0x38, 0x00, 0x01, + 0x01, 0x09, 0x08, 0x08, 0x10, 0x11, 0x39, 0x24, + 0x00, 0x01, 0x01, 0x00, 0x10, 0x11, 0x39, 0x24, + 0x10, 0x11, 0x11, 0x39, 0x24, 0x38, 0x10, 0x10, + }; + static const uint8_t ptr_init[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }; + + s->read_id_step = 0; + s->selected_index = REG_FER; + + s->regs[REG_FER] = fer_init[s->config & 0x1f]; + s->regs[REG_FAR] = far_init[s->config & 0x1f]; + s->regs[REG_PTR] = ptr_init[s->config & 0x1f]; +} + +static void pc87312_hard_reset(PC87312State *s) +{ + pc87312_soft_reset(s); +} + +static void pc87312_io_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + PC87312State *s = opaque; + + trace_pc87312_io_write(addr, val); + + if ((addr & 1) == 0) { + /* Index register */ + s->read_id_step = 2; + s->selected_index = val; + } else { + /* Data register */ + if (s->selected_index < 3) { + s->regs[s->selected_index] = val; + reconfigure_devices(s); + } + } +} + +static uint64_t pc87312_io_read(void *opaque, hwaddr addr, unsigned int size) +{ + PC87312State *s = opaque; + uint32_t val; + + if ((addr & 1) == 0) { + /* Index register */ + if (s->read_id_step++ == 0) { + val = 0x88; + } else if (s->read_id_step++ == 1) { + val = 0; + } else { + val = s->selected_index; + } + } else { + /* Data register */ + if (s->selected_index < 3) { + val = s->regs[s->selected_index]; + } else { + /* Invalid selected index */ + val = 0; + } + } + + trace_pc87312_io_read(addr, val); + return val; +} + +static const MemoryRegionOps pc87312_io_ops = { + .read = pc87312_io_read, + .write = pc87312_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static int pc87312_post_load(void *opaque, int version_id) +{ + PC87312State *s = opaque; + + reconfigure_devices(s); + return 0; +} + +static void pc87312_reset(DeviceState *d) +{ + PC87312State *s = PC87312(d); + + pc87312_soft_reset(s); +} + +static void pc87312_realize(DeviceState *dev, Error **errp) +{ + PC87312State *s; + ISADevice *isa; + Error *local_err = NULL; + + s = PC87312(dev); + isa = ISA_DEVICE(dev); + isa_register_ioport(isa, &s->io, s->iobase); + pc87312_hard_reset(s); + + ISA_SUPERIO_GET_CLASS(dev)->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } +} + +static void pc87312_initfn(Object *obj) +{ + PC87312State *s = PC87312(obj); + + memory_region_init_io(&s->io, obj, &pc87312_io_ops, s, "pc87312", 2); +} + +static const VMStateDescription vmstate_pc87312 = { + .name = "pc87312", + .version_id = 1, + .minimum_version_id = 1, + .post_load = pc87312_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(read_id_step, PC87312State), + VMSTATE_UINT8(selected_index, PC87312State), + VMSTATE_UINT8_ARRAY(regs, PC87312State, 3), + VMSTATE_END_OF_LIST() + } +}; + +static Property pc87312_properties[] = { + DEFINE_PROP_UINT16("iobase", PC87312State, iobase, 0x398), + DEFINE_PROP_UINT8("config", PC87312State, config, 1), + DEFINE_PROP_END_OF_LIST() +}; + +static void pc87312_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); + + sc->parent_realize = dc->realize; + dc->realize = pc87312_realize; + dc->reset = pc87312_reset; + dc->vmsd = &vmstate_pc87312; + device_class_set_props(dc, pc87312_properties); + + sc->parallel = (ISASuperIOFuncs){ + .count = 1, + .is_enabled = is_parallel_enabled, + .get_iobase = get_parallel_iobase, + .get_irq = get_parallel_irq, + }; + sc->serial = (ISASuperIOFuncs){ + .count = 2, + .is_enabled = is_uart_enabled, + .get_iobase = get_uart_iobase, + .get_irq = get_uart_irq, + }; + sc->floppy = (ISASuperIOFuncs){ + .count = 1, + .is_enabled = is_fdc_enabled, + .get_iobase = get_fdc_iobase, + .get_irq = get_fdc_irq, + }; + sc->ide = (ISASuperIOFuncs){ + .count = 1, + .is_enabled = is_ide_enabled, + .get_iobase = get_ide_iobase, + .get_irq = get_ide_irq, + }; +} + +static const TypeInfo pc87312_type_info = { + .name = TYPE_PC87312, + .parent = TYPE_ISA_SUPERIO, + .instance_size = sizeof(PC87312State), + .instance_init = pc87312_initfn, + .class_init = pc87312_class_init, + /* FIXME use a qdev drive property instead of drive_get() */ +}; + +static void pc87312_register_types(void) +{ + type_register_static(&pc87312_type_info); +} + +type_init(pc87312_register_types) diff --git a/hw/isa/piix3.c b/hw/isa/piix3.c new file mode 100644 index 000000000..dab901c9a --- /dev/null +++ b/hw/isa/piix3.c @@ -0,0 +1,395 @@ +/* + * QEMU PIIX PCI ISA Bridge Emulation + * + * Copyright (c) 2006 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/range.h" +#include "hw/southbridge/piix.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "hw/xen/xen.h" +#include "sysemu/xen.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "migration/vmstate.h" + +#define XEN_PIIX_NUM_PIRQS 128ULL + +#define TYPE_PIIX3_DEVICE "PIIX3" +#define TYPE_PIIX3_XEN_DEVICE "PIIX3-xen" + +static void piix3_set_irq_pic(PIIX3State *piix3, int pic_irq) +{ + qemu_set_irq(piix3->pic[pic_irq], + !!(piix3->pic_levels & + (((1ULL << PIIX_NUM_PIRQS) - 1) << + (pic_irq * PIIX_NUM_PIRQS)))); +} + +static void piix3_set_irq_level_internal(PIIX3State *piix3, int pirq, int level) +{ + int pic_irq; + uint64_t mask; + + pic_irq = piix3->dev.config[PIIX_PIRQCA + pirq]; + if (pic_irq >= PIIX_NUM_PIC_IRQS) { + return; + } + + mask = 1ULL << ((pic_irq * PIIX_NUM_PIRQS) + pirq); + piix3->pic_levels &= ~mask; + piix3->pic_levels |= mask * !!level; +} + +static void piix3_set_irq_level(PIIX3State *piix3, int pirq, int level) +{ + int pic_irq; + + pic_irq = piix3->dev.config[PIIX_PIRQCA + pirq]; + if (pic_irq >= PIIX_NUM_PIC_IRQS) { + return; + } + + piix3_set_irq_level_internal(piix3, pirq, level); + + piix3_set_irq_pic(piix3, pic_irq); +} + +static void piix3_set_irq(void *opaque, int pirq, int level) +{ + PIIX3State *piix3 = opaque; + piix3_set_irq_level(piix3, pirq, level); +} + +static PCIINTxRoute piix3_route_intx_pin_to_irq(void *opaque, int pin) +{ + PIIX3State *piix3 = opaque; + int irq = piix3->dev.config[PIIX_PIRQCA + pin]; + PCIINTxRoute route; + + if (irq < PIIX_NUM_PIC_IRQS) { + route.mode = PCI_INTX_ENABLED; + route.irq = irq; + } else { + route.mode = PCI_INTX_DISABLED; + route.irq = -1; + } + return route; +} + +/* irq routing is changed. so rebuild bitmap */ +static void piix3_update_irq_levels(PIIX3State *piix3) +{ + PCIBus *bus = pci_get_bus(&piix3->dev); + int pirq; + + piix3->pic_levels = 0; + for (pirq = 0; pirq < PIIX_NUM_PIRQS; pirq++) { + piix3_set_irq_level(piix3, pirq, pci_bus_get_irq_level(bus, pirq)); + } +} + +static void piix3_write_config(PCIDevice *dev, + uint32_t address, uint32_t val, int len) +{ + pci_default_write_config(dev, address, val, len); + if (ranges_overlap(address, len, PIIX_PIRQCA, 4)) { + PIIX3State *piix3 = PIIX3_PCI_DEVICE(dev); + int pic_irq; + + pci_bus_fire_intx_routing_notifier(pci_get_bus(&piix3->dev)); + piix3_update_irq_levels(piix3); + for (pic_irq = 0; pic_irq < PIIX_NUM_PIC_IRQS; pic_irq++) { + piix3_set_irq_pic(piix3, pic_irq); + } + } +} + +static void piix3_write_config_xen(PCIDevice *dev, + uint32_t address, uint32_t val, int len) +{ + xen_piix_pci_write_config_client(address, val, len); + piix3_write_config(dev, address, val, len); +} + +static void piix3_reset(void *opaque) +{ + PIIX3State *d = opaque; + uint8_t *pci_conf = d->dev.config; + + pci_conf[0x04] = 0x07; /* master, memory and I/O */ + pci_conf[0x05] = 0x00; + pci_conf[0x06] = 0x00; + pci_conf[0x07] = 0x02; /* PCI_status_devsel_medium */ + pci_conf[0x4c] = 0x4d; + pci_conf[0x4e] = 0x03; + pci_conf[0x4f] = 0x00; + pci_conf[0x60] = 0x80; + pci_conf[0x61] = 0x80; + pci_conf[0x62] = 0x80; + pci_conf[0x63] = 0x80; + pci_conf[0x69] = 0x02; + pci_conf[0x70] = 0x80; + pci_conf[0x76] = 0x0c; + pci_conf[0x77] = 0x0c; + pci_conf[0x78] = 0x02; + pci_conf[0x79] = 0x00; + pci_conf[0x80] = 0x00; + pci_conf[0x82] = 0x00; + pci_conf[0xa0] = 0x08; + pci_conf[0xa2] = 0x00; + pci_conf[0xa3] = 0x00; + pci_conf[0xa4] = 0x00; + pci_conf[0xa5] = 0x00; + pci_conf[0xa6] = 0x00; + pci_conf[0xa7] = 0x00; + pci_conf[0xa8] = 0x0f; + pci_conf[0xaa] = 0x00; + pci_conf[0xab] = 0x00; + pci_conf[0xac] = 0x00; + pci_conf[0xae] = 0x00; + + d->pic_levels = 0; + d->rcr = 0; +} + +static int piix3_post_load(void *opaque, int version_id) +{ + PIIX3State *piix3 = opaque; + int pirq; + + /* + * Because the i8259 has not been deserialized yet, qemu_irq_raise + * might bring the system to a different state than the saved one; + * for example, the interrupt could be masked but the i8259 would + * not know that yet and would trigger an interrupt in the CPU. + * + * Here, we update irq levels without raising the interrupt. + * Interrupt state will be deserialized separately through the i8259. + */ + piix3->pic_levels = 0; + for (pirq = 0; pirq < PIIX_NUM_PIRQS; pirq++) { + piix3_set_irq_level_internal(piix3, pirq, + pci_bus_get_irq_level(pci_get_bus(&piix3->dev), pirq)); + } + return 0; +} + +static int piix3_pre_save(void *opaque) +{ + int i; + PIIX3State *piix3 = opaque; + + for (i = 0; i < ARRAY_SIZE(piix3->pci_irq_levels_vmstate); i++) { + piix3->pci_irq_levels_vmstate[i] = + pci_bus_get_irq_level(pci_get_bus(&piix3->dev), i); + } + + return 0; +} + +static bool piix3_rcr_needed(void *opaque) +{ + PIIX3State *piix3 = opaque; + + return (piix3->rcr != 0); +} + +static const VMStateDescription vmstate_piix3_rcr = { + .name = "PIIX3/rcr", + .version_id = 1, + .minimum_version_id = 1, + .needed = piix3_rcr_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(rcr, PIIX3State), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_piix3 = { + .name = "PIIX3", + .version_id = 3, + .minimum_version_id = 2, + .post_load = piix3_post_load, + .pre_save = piix3_pre_save, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PIIX3State), + VMSTATE_INT32_ARRAY_V(pci_irq_levels_vmstate, PIIX3State, + PIIX_NUM_PIRQS, 3), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_piix3_rcr, + NULL + } +}; + + +static void rcr_write(void *opaque, hwaddr addr, uint64_t val, unsigned len) +{ + PIIX3State *d = opaque; + + if (val & 4) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + return; + } + d->rcr = val & 2; /* keep System Reset type only */ +} + +static uint64_t rcr_read(void *opaque, hwaddr addr, unsigned len) +{ + PIIX3State *d = opaque; + + return d->rcr; +} + +static const MemoryRegionOps rcr_ops = { + .read = rcr_read, + .write = rcr_write, + .endianness = DEVICE_LITTLE_ENDIAN +}; + +static void piix3_realize(PCIDevice *dev, Error **errp) +{ + PIIX3State *d = PIIX3_PCI_DEVICE(dev); + + if (!isa_bus_new(DEVICE(d), get_system_memory(), + pci_address_space_io(dev), errp)) { + return; + } + + memory_region_init_io(&d->rcr_mem, OBJECT(dev), &rcr_ops, d, + "piix3-reset-control", 1); + memory_region_add_subregion_overlap(pci_address_space_io(dev), + PIIX_RCR_IOPORT, &d->rcr_mem, 1); + + qemu_register_reset(piix3_reset, d); +} + +static void pci_piix3_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + dc->desc = "ISA bridge"; + dc->vmsd = &vmstate_piix3; + dc->hotpluggable = false; + k->realize = piix3_realize; + k->vendor_id = PCI_VENDOR_ID_INTEL; + /* 82371SB PIIX3 PCI-to-ISA bridge (Step A1) */ + k->device_id = PCI_DEVICE_ID_INTEL_82371SB_0; + k->class_id = PCI_CLASS_BRIDGE_ISA; + /* + * Reason: part of PIIX3 southbridge, needs to be wired up by + * pc_piix.c's pc_init1() + */ + dc->user_creatable = false; +} + +static const TypeInfo piix3_pci_type_info = { + .name = TYPE_PIIX3_PCI_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PIIX3State), + .abstract = true, + .class_init = pci_piix3_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void piix3_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->config_write = piix3_write_config; +} + +static const TypeInfo piix3_info = { + .name = TYPE_PIIX3_DEVICE, + .parent = TYPE_PIIX3_PCI_DEVICE, + .class_init = piix3_class_init, +}; + +static void piix3_xen_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->config_write = piix3_write_config_xen; +}; + +static const TypeInfo piix3_xen_info = { + .name = TYPE_PIIX3_XEN_DEVICE, + .parent = TYPE_PIIX3_PCI_DEVICE, + .class_init = piix3_xen_class_init, +}; + +static void piix3_register_types(void) +{ + type_register_static(&piix3_pci_type_info); + type_register_static(&piix3_info); + type_register_static(&piix3_xen_info); +} + +type_init(piix3_register_types) + +/* + * Return the global irq number corresponding to a given device irq + * pin. We could also use the bus number to have a more precise mapping. + */ +static int pci_slot_get_pirq(PCIDevice *pci_dev, int pci_intx) +{ + int slot_addend; + slot_addend = PCI_SLOT(pci_dev->devfn) - 1; + return (pci_intx + slot_addend) & 3; +} + +PIIX3State *piix3_create(PCIBus *pci_bus, ISABus **isa_bus) +{ + PIIX3State *piix3; + PCIDevice *pci_dev; + + /* + * Xen supports additional interrupt routes from the PCI devices to + * the IOAPIC: the four pins of each PCI device on the bus are also + * connected to the IOAPIC directly. + * These additional routes can be discovered through ACPI. + */ + if (xen_enabled()) { + pci_dev = pci_create_simple_multifunction(pci_bus, -1, true, + TYPE_PIIX3_XEN_DEVICE); + piix3 = PIIX3_PCI_DEVICE(pci_dev); + pci_bus_irqs(pci_bus, xen_piix3_set_irq, xen_pci_slot_get_pirq, + piix3, XEN_PIIX_NUM_PIRQS); + } else { + pci_dev = pci_create_simple_multifunction(pci_bus, -1, true, + TYPE_PIIX3_DEVICE); + piix3 = PIIX3_PCI_DEVICE(pci_dev); + pci_bus_irqs(pci_bus, piix3_set_irq, pci_slot_get_pirq, + piix3, PIIX_NUM_PIRQS); + pci_bus_set_route_irq_fn(pci_bus, piix3_route_intx_pin_to_irq); + } + *isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix3), "isa.0")); + + return piix3; +} diff --git a/hw/isa/piix4.c b/hw/isa/piix4.c new file mode 100644 index 000000000..0fe7b69bc --- /dev/null +++ b/hw/isa/piix4.c @@ -0,0 +1,275 @@ +/* + * QEMU PIIX4 PCI Bridge Emulation + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2018 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/southbridge/piix.h" +#include "hw/pci/pci.h" +#include "hw/isa/isa.h" +#include "hw/intc/i8259.h" +#include "hw/dma/i8257.h" +#include "hw/timer/i8254.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/ide/pci.h" +#include "migration/vmstate.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "qom/object.h" + +PCIDevice *piix4_dev; + +struct PIIX4State { + PCIDevice dev; + qemu_irq cpu_intr; + qemu_irq *isa; + + RTCState rtc; + /* Reset Control Register */ + MemoryRegion rcr_mem; + uint8_t rcr; +}; + +OBJECT_DECLARE_SIMPLE_TYPE(PIIX4State, PIIX4_PCI_DEVICE) + +static void piix4_isa_reset(DeviceState *dev) +{ + PIIX4State *d = PIIX4_PCI_DEVICE(dev); + uint8_t *pci_conf = d->dev.config; + + pci_conf[0x04] = 0x07; // master, memory and I/O + pci_conf[0x05] = 0x00; + pci_conf[0x06] = 0x00; + pci_conf[0x07] = 0x02; // PCI_status_devsel_medium + pci_conf[0x4c] = 0x4d; + pci_conf[0x4e] = 0x03; + pci_conf[0x4f] = 0x00; + pci_conf[0x60] = 0x0a; // PCI A -> IRQ 10 + pci_conf[0x61] = 0x0a; // PCI B -> IRQ 10 + pci_conf[0x62] = 0x0b; // PCI C -> IRQ 11 + pci_conf[0x63] = 0x0b; // PCI D -> IRQ 11 + pci_conf[0x69] = 0x02; + pci_conf[0x70] = 0x80; + pci_conf[0x76] = 0x0c; + pci_conf[0x77] = 0x0c; + pci_conf[0x78] = 0x02; + pci_conf[0x79] = 0x00; + pci_conf[0x80] = 0x00; + pci_conf[0x82] = 0x00; + pci_conf[0xa0] = 0x08; + pci_conf[0xa2] = 0x00; + pci_conf[0xa3] = 0x00; + pci_conf[0xa4] = 0x00; + pci_conf[0xa5] = 0x00; + pci_conf[0xa6] = 0x00; + pci_conf[0xa7] = 0x00; + pci_conf[0xa8] = 0x0f; + pci_conf[0xaa] = 0x00; + pci_conf[0xab] = 0x00; + pci_conf[0xac] = 0x00; + pci_conf[0xae] = 0x00; +} + +static int piix4_ide_post_load(void *opaque, int version_id) +{ + PIIX4State *s = opaque; + + if (version_id == 2) { + s->rcr = 0; + } + + return 0; +} + +static const VMStateDescription vmstate_piix4 = { + .name = "PIIX4", + .version_id = 3, + .minimum_version_id = 2, + .post_load = piix4_ide_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PIIX4State), + VMSTATE_UINT8_V(rcr, PIIX4State, 3), + VMSTATE_END_OF_LIST() + } +}; + +static void piix4_request_i8259_irq(void *opaque, int irq, int level) +{ + PIIX4State *s = opaque; + qemu_set_irq(s->cpu_intr, level); +} + +static void piix4_set_i8259_irq(void *opaque, int irq, int level) +{ + PIIX4State *s = opaque; + qemu_set_irq(s->isa[irq], level); +} + +static void piix4_rcr_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int len) +{ + PIIX4State *s = opaque; + + if (val & 4) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + return; + } + + s->rcr = val & 2; /* keep System Reset type only */ +} + +static uint64_t piix4_rcr_read(void *opaque, hwaddr addr, unsigned int len) +{ + PIIX4State *s = opaque; + + return s->rcr; +} + +static const MemoryRegionOps piix4_rcr_ops = { + .read = piix4_rcr_read, + .write = piix4_rcr_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void piix4_realize(PCIDevice *dev, Error **errp) +{ + PIIX4State *s = PIIX4_PCI_DEVICE(dev); + ISABus *isa_bus; + qemu_irq *i8259_out_irq; + + isa_bus = isa_bus_new(DEVICE(dev), pci_address_space(dev), + pci_address_space_io(dev), errp); + if (!isa_bus) { + return; + } + + qdev_init_gpio_in_named(DEVICE(dev), piix4_set_i8259_irq, + "isa", ISA_NUM_IRQS); + qdev_init_gpio_out_named(DEVICE(dev), &s->cpu_intr, + "intr", 1); + + memory_region_init_io(&s->rcr_mem, OBJECT(dev), &piix4_rcr_ops, s, + "reset-control", 1); + memory_region_add_subregion_overlap(pci_address_space_io(dev), + PIIX_RCR_IOPORT, &s->rcr_mem, 1); + + /* initialize i8259 pic */ + i8259_out_irq = qemu_allocate_irqs(piix4_request_i8259_irq, s, 1); + s->isa = i8259_init(isa_bus, *i8259_out_irq); + + /* initialize ISA irqs */ + isa_bus_irqs(isa_bus, s->isa); + + /* initialize pit */ + i8254_pit_init(isa_bus, 0x40, 0, NULL); + + /* DMA */ + i8257_dma_init(isa_bus, 0); + + /* RTC */ + qdev_prop_set_int32(DEVICE(&s->rtc), "base_year", 2000); + if (!qdev_realize(DEVICE(&s->rtc), BUS(isa_bus), errp)) { + return; + } + isa_init_irq(ISA_DEVICE(&s->rtc), &s->rtc.irq, RTC_ISA_IRQ); + + piix4_dev = dev; +} + +static void piix4_init(Object *obj) +{ + PIIX4State *s = PIIX4_PCI_DEVICE(obj); + + object_initialize(&s->rtc, sizeof(s->rtc), TYPE_MC146818_RTC); +} + +static void piix4_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = piix4_realize; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82371AB_0; + k->class_id = PCI_CLASS_BRIDGE_ISA; + dc->reset = piix4_isa_reset; + dc->desc = "ISA bridge"; + dc->vmsd = &vmstate_piix4; + /* + * Reason: part of PIIX4 southbridge, needs to be wired up, + * e.g. by mips_malta_init() + */ + dc->user_creatable = false; + dc->hotpluggable = false; +} + +static const TypeInfo piix4_info = { + .name = TYPE_PIIX4_PCI_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PIIX4State), + .instance_init = piix4_init, + .class_init = piix4_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void piix4_register_types(void) +{ + type_register_static(&piix4_info); +} + +type_init(piix4_register_types) + +DeviceState *piix4_create(PCIBus *pci_bus, ISABus **isa_bus, I2CBus **smbus) +{ + PCIDevice *pci; + DeviceState *dev; + int devfn = PCI_DEVFN(10, 0); + + pci = pci_create_simple_multifunction(pci_bus, devfn, true, + TYPE_PIIX4_PCI_DEVICE); + dev = DEVICE(pci); + if (isa_bus) { + *isa_bus = ISA_BUS(qdev_get_child_bus(dev, "isa.0")); + } + + pci = pci_create_simple(pci_bus, devfn + 1, "piix4-ide"); + pci_ide_create_devs(pci); + + pci_create_simple(pci_bus, devfn + 2, "piix4-usb-uhci"); + if (smbus) { + *smbus = piix4_pm_init(pci_bus, devfn + 3, 0x1100, + qdev_get_gpio_in_named(dev, "isa", 9), + NULL, 0, NULL); + } + + return dev; +} diff --git a/hw/isa/smc37c669-superio.c b/hw/isa/smc37c669-superio.c new file mode 100644 index 000000000..18287741c --- /dev/null +++ b/hw/isa/smc37c669-superio.c @@ -0,0 +1,116 @@ +/* + * SMC FDC37C669 Super I/O controller + * + * Copyright (c) 2018 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/isa/superio.h" +#include "qemu/module.h" + +/* UARTs (compatible with NS16450 or PC16550) */ + +static bool is_serial_enabled(ISASuperIODevice *sio, uint8_t index) +{ + return index < 2; +} + +static uint16_t get_serial_iobase(ISASuperIODevice *sio, uint8_t index) +{ + return index ? 0x2f8 : 0x3f8; +} + +static unsigned int get_serial_irq(ISASuperIODevice *sio, uint8_t index) +{ + return index ? 3 : 4; +} + +/* Parallel port */ + +static bool is_parallel_enabled(ISASuperIODevice *sio, uint8_t index) +{ + return index < 1; +} + +static uint16_t get_parallel_iobase(ISASuperIODevice *sio, uint8_t index) +{ + return 0x378; +} + +static unsigned int get_parallel_irq(ISASuperIODevice *sio, uint8_t index) +{ + return 7; +} + +static unsigned int get_parallel_dma(ISASuperIODevice *sio, uint8_t index) +{ + return 3; +} + +/* Diskette controller (Software compatible with the Intel PC8477) */ + +static bool is_fdc_enabled(ISASuperIODevice *sio, uint8_t index) +{ + return index < 1; +} + +static uint16_t get_fdc_iobase(ISASuperIODevice *sio, uint8_t index) +{ + return 0x3f0; +} + +static unsigned int get_fdc_irq(ISASuperIODevice *sio, uint8_t index) +{ + return 6; +} + +static unsigned int get_fdc_dma(ISASuperIODevice *sio, uint8_t index) +{ + return 2; +} + +static void smc37c669_class_init(ObjectClass *klass, void *data) +{ + ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); + + sc->parallel = (ISASuperIOFuncs){ + .count = 1, + .is_enabled = is_parallel_enabled, + .get_iobase = get_parallel_iobase, + .get_irq = get_parallel_irq, + .get_dma = get_parallel_dma, + }; + sc->serial = (ISASuperIOFuncs){ + .count = 2, + .is_enabled = is_serial_enabled, + .get_iobase = get_serial_iobase, + .get_irq = get_serial_irq, + }; + sc->floppy = (ISASuperIOFuncs){ + .count = 1, + .is_enabled = is_fdc_enabled, + .get_iobase = get_fdc_iobase, + .get_irq = get_fdc_irq, + .get_dma = get_fdc_dma, + }; + sc->ide.count = 0; +} + +static const TypeInfo smc37c669_type_info = { + .name = TYPE_SMC37C669_SUPERIO, + .parent = TYPE_ISA_SUPERIO, + .instance_size = sizeof(ISASuperIODevice), + .class_size = sizeof(ISASuperIOClass), + .class_init = smc37c669_class_init, +}; + +static void smc37c669_register_types(void) +{ + type_register_static(&smc37c669_type_info); +} + +type_init(smc37c669_register_types) diff --git a/hw/isa/trace-events b/hw/isa/trace-events new file mode 100644 index 000000000..b8f877e1e --- /dev/null +++ b/hw/isa/trace-events @@ -0,0 +1,23 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# isa-superio.c +superio_create_parallel(int id, uint16_t base, unsigned int irq) "id=%d, base 0x%03x, irq %u" +superio_create_serial(int id, uint16_t base, unsigned int irq) "id=%d, base 0x%03x, irq %u" +superio_create_floppy(int id, uint16_t base, unsigned int irq) "id=%d, base 0x%03x, irq %u" +superio_create_ide(int id, uint16_t base, unsigned int irq) "id=%d, base 0x%03x, irq %u" + +# pc87312.c +pc87312_io_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" +pc87312_io_write(uint32_t addr, uint32_t val) "write addr=0x%x val=0x%x" + +# apm.c +apm_io_read(uint8_t addr, uint8_t val) "read addr=0x%x val=0x%02x" +apm_io_write(uint8_t addr, uint8_t val) "write addr=0x%x val=0x%02x" + +# vt82c686.c +via_isa_write(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x" +via_pm_write(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x" +via_pm_io_read(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x" +via_pm_io_write(uint32_t addr, uint32_t val, int len) "addr 0x%x val 0x%x len 0x%x" +via_superio_read(uint8_t addr, uint8_t val) "addr 0x%x val 0x%x" +via_superio_write(uint8_t addr, uint32_t val) "addr 0x%x val 0x%x" diff --git a/hw/isa/trace.h b/hw/isa/trace.h new file mode 100644 index 000000000..501205cfc --- /dev/null +++ b/hw/isa/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_isa.h" diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c new file mode 100644 index 000000000..8f656251b --- /dev/null +++ b/hw/isa/vt82c686.c @@ -0,0 +1,754 @@ +/* + * VT82C686B south bridge support + * + * Copyright (c) 2008 yajin (yajin@vm-kernel.org) + * Copyright (c) 2009 chenming (chenming@rdc.faw.com.cn) + * Copyright (c) 2010 Huacai Chen (zltjiangshi@gmail.com) + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + * + * VT8231 south bridge support and general clean up to allow it + * Copyright (c) 2018-2020 BALATON Zoltan + */ + +#include "qemu/osdep.h" +#include "hw/isa/vt82c686.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/isa/isa.h" +#include "hw/isa/superio.h" +#include "hw/intc/i8259.h" +#include "hw/irq.h" +#include "hw/dma/i8257.h" +#include "hw/timer/i8254.h" +#include "hw/rtc/mc146818rtc.h" +#include "migration/vmstate.h" +#include "hw/isa/apm.h" +#include "hw/acpi/acpi.h" +#include "hw/i2c/pm_smbus.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/range.h" +#include "qemu/timer.h" +#include "trace.h" + +#define TYPE_VIA_PM "via-pm" +OBJECT_DECLARE_SIMPLE_TYPE(ViaPMState, VIA_PM) + +struct ViaPMState { + PCIDevice dev; + MemoryRegion io; + ACPIREGS ar; + APMState apm; + PMSMBus smb; +}; + +static void pm_io_space_update(ViaPMState *s) +{ + uint32_t pmbase = pci_get_long(s->dev.config + 0x48) & 0xff80UL; + + memory_region_transaction_begin(); + memory_region_set_address(&s->io, pmbase); + memory_region_set_enabled(&s->io, s->dev.config[0x41] & BIT(7)); + memory_region_transaction_commit(); +} + +static void smb_io_space_update(ViaPMState *s) +{ + uint32_t smbase = pci_get_long(s->dev.config + 0x90) & 0xfff0UL; + + memory_region_transaction_begin(); + memory_region_set_address(&s->smb.io, smbase); + memory_region_set_enabled(&s->smb.io, s->dev.config[0xd2] & BIT(0)); + memory_region_transaction_commit(); +} + +static int vmstate_acpi_post_load(void *opaque, int version_id) +{ + ViaPMState *s = opaque; + + pm_io_space_update(s); + smb_io_space_update(s); + return 0; +} + +static const VMStateDescription vmstate_acpi = { + .name = "vt82c686b_pm", + .version_id = 1, + .minimum_version_id = 1, + .post_load = vmstate_acpi_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, ViaPMState), + VMSTATE_UINT16(ar.pm1.evt.sts, ViaPMState), + VMSTATE_UINT16(ar.pm1.evt.en, ViaPMState), + VMSTATE_UINT16(ar.pm1.cnt.cnt, ViaPMState), + VMSTATE_STRUCT(apm, ViaPMState, 0, vmstate_apm, APMState), + VMSTATE_TIMER_PTR(ar.tmr.timer, ViaPMState), + VMSTATE_INT64(ar.tmr.overflow_time, ViaPMState), + VMSTATE_END_OF_LIST() + } +}; + +static void pm_write_config(PCIDevice *d, uint32_t addr, uint32_t val, int len) +{ + ViaPMState *s = VIA_PM(d); + + trace_via_pm_write(addr, val, len); + pci_default_write_config(d, addr, val, len); + if (ranges_overlap(addr, len, 0x48, 4)) { + uint32_t v = pci_get_long(s->dev.config + 0x48); + pci_set_long(s->dev.config + 0x48, (v & 0xff80UL) | 1); + } + if (range_covers_byte(addr, len, 0x41)) { + pm_io_space_update(s); + } + if (ranges_overlap(addr, len, 0x90, 4)) { + uint32_t v = pci_get_long(s->dev.config + 0x90); + pci_set_long(s->dev.config + 0x90, (v & 0xfff0UL) | 1); + } + if (range_covers_byte(addr, len, 0xd2)) { + s->dev.config[0xd2] &= 0xf; + smb_io_space_update(s); + } +} + +static void pm_io_write(void *op, hwaddr addr, uint64_t data, unsigned size) +{ + trace_via_pm_io_write(addr, data, size); +} + +static uint64_t pm_io_read(void *op, hwaddr addr, unsigned size) +{ + trace_via_pm_io_read(addr, 0, size); + return 0; +} + +static const MemoryRegionOps pm_io_ops = { + .read = pm_io_read, + .write = pm_io_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void pm_update_sci(ViaPMState *s) +{ + int sci_level, pmsts; + + pmsts = acpi_pm1_evt_get_sts(&s->ar); + sci_level = (((pmsts & s->ar.pm1.evt.en) & + (ACPI_BITMASK_RT_CLOCK_ENABLE | + ACPI_BITMASK_POWER_BUTTON_ENABLE | + ACPI_BITMASK_GLOBAL_LOCK_ENABLE | + ACPI_BITMASK_TIMER_ENABLE)) != 0); + if (pci_get_byte(s->dev.config + PCI_INTERRUPT_PIN)) { + /* + * FIXME: + * Fix device model that realizes this PM device and remove + * this work around. + * The device model should wire SCI and setup + * PCI_INTERRUPT_PIN properly. + * If PIN# = 0(interrupt pin isn't used), don't raise SCI as + * work around. + */ + pci_set_irq(&s->dev, sci_level); + } + /* schedule a timer interruption if needed */ + acpi_pm_tmr_update(&s->ar, (s->ar.pm1.evt.en & ACPI_BITMASK_TIMER_ENABLE) && + !(pmsts & ACPI_BITMASK_TIMER_STATUS)); +} + +static void pm_tmr_timer(ACPIREGS *ar) +{ + ViaPMState *s = container_of(ar, ViaPMState, ar); + pm_update_sci(s); +} + +static void via_pm_reset(DeviceState *d) +{ + ViaPMState *s = VIA_PM(d); + + memset(s->dev.config + PCI_CONFIG_HEADER_SIZE, 0, + PCI_CONFIG_SPACE_SIZE - PCI_CONFIG_HEADER_SIZE); + /* Power Management IO base */ + pci_set_long(s->dev.config + 0x48, 1); + /* SMBus IO base */ + pci_set_long(s->dev.config + 0x90, 1); + + acpi_pm1_evt_reset(&s->ar); + acpi_pm1_cnt_reset(&s->ar); + acpi_pm_tmr_reset(&s->ar); + pm_update_sci(s); + + pm_io_space_update(s); + smb_io_space_update(s); +} + +static void via_pm_realize(PCIDevice *dev, Error **errp) +{ + ViaPMState *s = VIA_PM(dev); + + pci_set_word(dev->config + PCI_STATUS, PCI_STATUS_FAST_BACK | + PCI_STATUS_DEVSEL_MEDIUM); + + pm_smbus_init(DEVICE(s), &s->smb, false); + memory_region_add_subregion(pci_address_space_io(dev), 0, &s->smb.io); + memory_region_set_enabled(&s->smb.io, false); + + apm_init(dev, &s->apm, NULL, s); + + memory_region_init_io(&s->io, OBJECT(dev), &pm_io_ops, s, "via-pm", 128); + memory_region_add_subregion(pci_address_space_io(dev), 0, &s->io); + memory_region_set_enabled(&s->io, false); + + acpi_pm_tmr_init(&s->ar, pm_tmr_timer, &s->io); + acpi_pm1_evt_init(&s->ar, pm_tmr_timer, &s->io); + acpi_pm1_cnt_init(&s->ar, &s->io, false, false, 2, false); +} + +typedef struct via_pm_init_info { + uint16_t device_id; +} ViaPMInitInfo; + +static void via_pm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + ViaPMInitInfo *info = data; + + k->realize = via_pm_realize; + k->config_write = pm_write_config; + k->vendor_id = PCI_VENDOR_ID_VIA; + k->device_id = info->device_id; + k->class_id = PCI_CLASS_BRIDGE_OTHER; + k->revision = 0x40; + dc->reset = via_pm_reset; + /* Reason: part of VIA south bridge, does not exist stand alone */ + dc->user_creatable = false; + dc->vmsd = &vmstate_acpi; +} + +static const TypeInfo via_pm_info = { + .name = TYPE_VIA_PM, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(ViaPMState), + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static const ViaPMInitInfo vt82c686b_pm_init_info = { + .device_id = PCI_DEVICE_ID_VIA_82C686B_PM, +}; + +static const TypeInfo vt82c686b_pm_info = { + .name = TYPE_VT82C686B_PM, + .parent = TYPE_VIA_PM, + .class_init = via_pm_class_init, + .class_data = (void *)&vt82c686b_pm_init_info, +}; + +static const ViaPMInitInfo vt8231_pm_init_info = { + .device_id = PCI_DEVICE_ID_VIA_8231_PM, +}; + +static const TypeInfo vt8231_pm_info = { + .name = TYPE_VT8231_PM, + .parent = TYPE_VIA_PM, + .class_init = via_pm_class_init, + .class_data = (void *)&vt8231_pm_init_info, +}; + + +#define TYPE_VIA_SUPERIO "via-superio" +OBJECT_DECLARE_SIMPLE_TYPE(ViaSuperIOState, VIA_SUPERIO) + +struct ViaSuperIOState { + ISASuperIODevice superio; + uint8_t regs[0x100]; + const MemoryRegionOps *io_ops; + MemoryRegion io; +}; + +static inline void via_superio_io_enable(ViaSuperIOState *s, bool enable) +{ + memory_region_set_enabled(&s->io, enable); +} + +static void via_superio_realize(DeviceState *d, Error **errp) +{ + ViaSuperIOState *s = VIA_SUPERIO(d); + ISASuperIOClass *ic = ISA_SUPERIO_GET_CLASS(s); + Error *local_err = NULL; + + assert(s->io_ops); + ic->parent_realize(d, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + memory_region_init_io(&s->io, OBJECT(d), s->io_ops, s, "via-superio", 2); + memory_region_set_enabled(&s->io, false); + /* The floppy also uses 0x3f0 and 0x3f1 but this seems to work anyway */ + memory_region_add_subregion(isa_address_space_io(ISA_DEVICE(s)), 0x3f0, + &s->io); +} + +static uint64_t via_superio_cfg_read(void *opaque, hwaddr addr, unsigned size) +{ + ViaSuperIOState *sc = opaque; + uint8_t idx = sc->regs[0]; + uint8_t val = sc->regs[idx]; + + if (addr == 0) { + return idx; + } + if (addr == 1 && idx == 0) { + val = 0; /* reading reg 0 where we store index value */ + } + trace_via_superio_read(idx, val); + return val; +} + +static void via_superio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); + + sc->parent_realize = dc->realize; + dc->realize = via_superio_realize; +} + +static const TypeInfo via_superio_info = { + .name = TYPE_VIA_SUPERIO, + .parent = TYPE_ISA_SUPERIO, + .instance_size = sizeof(ViaSuperIOState), + .class_size = sizeof(ISASuperIOClass), + .class_init = via_superio_class_init, + .abstract = true, +}; + +#define TYPE_VT82C686B_SUPERIO "vt82c686b-superio" + +static void vt82c686b_superio_cfg_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + ViaSuperIOState *sc = opaque; + uint8_t idx = sc->regs[0]; + + if (addr == 0) { /* config index register */ + sc->regs[0] = data; + return; + } + + /* config data register */ + trace_via_superio_write(idx, data); + switch (idx) { + case 0x00 ... 0xdf: + case 0xe4: + case 0xe5: + case 0xe9 ... 0xed: + case 0xf3: + case 0xf5: + case 0xf7: + case 0xf9 ... 0xfb: + case 0xfd ... 0xff: + /* ignore write to read only registers */ + return; + /* case 0xe6 ... 0xe8: Should set base port of parallel and serial */ + default: + qemu_log_mask(LOG_UNIMP, + "via_superio_cfg: unimplemented register 0x%x\n", idx); + break; + } + sc->regs[idx] = data; +} + +static const MemoryRegionOps vt82c686b_superio_cfg_ops = { + .read = via_superio_cfg_read, + .write = vt82c686b_superio_cfg_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void vt82c686b_superio_reset(DeviceState *dev) +{ + ViaSuperIOState *s = VIA_SUPERIO(dev); + + memset(s->regs, 0, sizeof(s->regs)); + /* Device ID */ + vt82c686b_superio_cfg_write(s, 0, 0xe0, 1); + vt82c686b_superio_cfg_write(s, 1, 0x3c, 1); + /* Function select - all disabled */ + vt82c686b_superio_cfg_write(s, 0, 0xe2, 1); + vt82c686b_superio_cfg_write(s, 1, 0x03, 1); + /* Floppy ctrl base addr 0x3f0-7 */ + vt82c686b_superio_cfg_write(s, 0, 0xe3, 1); + vt82c686b_superio_cfg_write(s, 1, 0xfc, 1); + /* Parallel port base addr 0x378-f */ + vt82c686b_superio_cfg_write(s, 0, 0xe6, 1); + vt82c686b_superio_cfg_write(s, 1, 0xde, 1); + /* Serial port 1 base addr 0x3f8-f */ + vt82c686b_superio_cfg_write(s, 0, 0xe7, 1); + vt82c686b_superio_cfg_write(s, 1, 0xfe, 1); + /* Serial port 2 base addr 0x2f8-f */ + vt82c686b_superio_cfg_write(s, 0, 0xe8, 1); + vt82c686b_superio_cfg_write(s, 1, 0xbe, 1); + + vt82c686b_superio_cfg_write(s, 0, 0, 1); +} + +static void vt82c686b_superio_init(Object *obj) +{ + VIA_SUPERIO(obj)->io_ops = &vt82c686b_superio_cfg_ops; +} + +static void vt82c686b_superio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); + + dc->reset = vt82c686b_superio_reset; + sc->serial.count = 2; + sc->parallel.count = 1; + sc->ide.count = 0; /* emulated by via-ide */ + sc->floppy.count = 1; +} + +static const TypeInfo vt82c686b_superio_info = { + .name = TYPE_VT82C686B_SUPERIO, + .parent = TYPE_VIA_SUPERIO, + .instance_size = sizeof(ViaSuperIOState), + .instance_init = vt82c686b_superio_init, + .class_size = sizeof(ISASuperIOClass), + .class_init = vt82c686b_superio_class_init, +}; + + +#define TYPE_VT8231_SUPERIO "vt8231-superio" + +static void vt8231_superio_cfg_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + ViaSuperIOState *sc = opaque; + uint8_t idx = sc->regs[0]; + + if (addr == 0) { /* config index register */ + sc->regs[0] = data; + return; + } + + /* config data register */ + trace_via_superio_write(idx, data); + switch (idx) { + case 0x00 ... 0xdf: + case 0xe7 ... 0xef: + case 0xf0 ... 0xf1: + case 0xf5: + case 0xf8: + case 0xfd: + /* ignore write to read only registers */ + return; + default: + qemu_log_mask(LOG_UNIMP, + "via_superio_cfg: unimplemented register 0x%x\n", idx); + break; + } + sc->regs[idx] = data; +} + +static const MemoryRegionOps vt8231_superio_cfg_ops = { + .read = via_superio_cfg_read, + .write = vt8231_superio_cfg_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void vt8231_superio_reset(DeviceState *dev) +{ + ViaSuperIOState *s = VIA_SUPERIO(dev); + + memset(s->regs, 0, sizeof(s->regs)); + /* Device ID */ + s->regs[0xf0] = 0x3c; + /* Device revision */ + s->regs[0xf1] = 0x01; + /* Function select - all disabled */ + vt8231_superio_cfg_write(s, 0, 0xf2, 1); + vt8231_superio_cfg_write(s, 1, 0x03, 1); + /* Serial port base addr */ + vt8231_superio_cfg_write(s, 0, 0xf4, 1); + vt8231_superio_cfg_write(s, 1, 0xfe, 1); + /* Parallel port base addr */ + vt8231_superio_cfg_write(s, 0, 0xf6, 1); + vt8231_superio_cfg_write(s, 1, 0xde, 1); + /* Floppy ctrl base addr */ + vt8231_superio_cfg_write(s, 0, 0xf7, 1); + vt8231_superio_cfg_write(s, 1, 0xfc, 1); + + vt8231_superio_cfg_write(s, 0, 0, 1); +} + +static void vt8231_superio_init(Object *obj) +{ + VIA_SUPERIO(obj)->io_ops = &vt8231_superio_cfg_ops; +} + +static uint16_t vt8231_superio_serial_iobase(ISASuperIODevice *sio, + uint8_t index) +{ + return 0x2f8; /* FIXME: This should be settable via registers f2-f4 */ +} + +static void vt8231_superio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ISASuperIOClass *sc = ISA_SUPERIO_CLASS(klass); + + dc->reset = vt8231_superio_reset; + sc->serial.count = 1; + sc->serial.get_iobase = vt8231_superio_serial_iobase; + sc->parallel.count = 1; + sc->ide.count = 0; /* emulated by via-ide */ + sc->floppy.count = 1; +} + +static const TypeInfo vt8231_superio_info = { + .name = TYPE_VT8231_SUPERIO, + .parent = TYPE_VIA_SUPERIO, + .instance_size = sizeof(ViaSuperIOState), + .instance_init = vt8231_superio_init, + .class_size = sizeof(ISASuperIOClass), + .class_init = vt8231_superio_class_init, +}; + + +#define TYPE_VIA_ISA "via-isa" +OBJECT_DECLARE_SIMPLE_TYPE(ViaISAState, VIA_ISA) + +struct ViaISAState { + PCIDevice dev; + qemu_irq cpu_intr; + qemu_irq *isa_irqs; + ISABus *isa_bus; + ViaSuperIOState *via_sio; +}; + +static const VMStateDescription vmstate_via = { + .name = "via-isa", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, ViaISAState), + VMSTATE_END_OF_LIST() + } +}; + +static const TypeInfo via_isa_info = { + .name = TYPE_VIA_ISA, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(ViaISAState), + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +void via_isa_set_irq(PCIDevice *d, int n, int level) +{ + ViaISAState *s = VIA_ISA(d); + qemu_set_irq(s->isa_irqs[n], level); +} + +static void via_isa_request_i8259_irq(void *opaque, int irq, int level) +{ + ViaISAState *s = opaque; + qemu_set_irq(s->cpu_intr, level); +} + +static void via_isa_realize(PCIDevice *d, Error **errp) +{ + ViaISAState *s = VIA_ISA(d); + DeviceState *dev = DEVICE(d); + qemu_irq *isa_irq; + int i; + + qdev_init_gpio_out(dev, &s->cpu_intr, 1); + isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1); + s->isa_bus = isa_bus_new(dev, get_system_memory(), pci_address_space_io(d), + &error_fatal); + s->isa_irqs = i8259_init(s->isa_bus, *isa_irq); + isa_bus_irqs(s->isa_bus, s->isa_irqs); + i8254_pit_init(s->isa_bus, 0x40, 0, NULL); + i8257_dma_init(s->isa_bus, 0); + mc146818_rtc_init(s->isa_bus, 2000, NULL); + + for (i = 0; i < PCI_CONFIG_HEADER_SIZE; i++) { + if (i < PCI_COMMAND || i >= PCI_REVISION_ID) { + d->wmask[i] = 0; + } + } +} + +/* TYPE_VT82C686B_ISA */ + +static void vt82c686b_write_config(PCIDevice *d, uint32_t addr, + uint32_t val, int len) +{ + ViaISAState *s = VIA_ISA(d); + + trace_via_isa_write(addr, val, len); + pci_default_write_config(d, addr, val, len); + if (addr == 0x85) { + /* BIT(1): enable or disable superio config io ports */ + via_superio_io_enable(s->via_sio, val & BIT(1)); + } +} + +static void vt82c686b_isa_reset(DeviceState *dev) +{ + ViaISAState *s = VIA_ISA(dev); + uint8_t *pci_conf = s->dev.config; + + pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x000000c0); + pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | PCI_COMMAND_SPECIAL); + pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_DEVSEL_MEDIUM); + + pci_conf[0x48] = 0x01; /* Miscellaneous Control 3 */ + pci_conf[0x4a] = 0x04; /* IDE interrupt Routing */ + pci_conf[0x4f] = 0x03; /* DMA/Master Mem Access Control 3 */ + pci_conf[0x50] = 0x2d; /* PnP DMA Request Control */ + pci_conf[0x59] = 0x04; + pci_conf[0x5a] = 0x04; /* KBC/RTC Control*/ + pci_conf[0x5f] = 0x04; + pci_conf[0x77] = 0x10; /* GPIO Control 1/2/3/4 */ +} + +static void vt82c686b_realize(PCIDevice *d, Error **errp) +{ + ViaISAState *s = VIA_ISA(d); + + via_isa_realize(d, errp); + s->via_sio = VIA_SUPERIO(isa_create_simple(s->isa_bus, + TYPE_VT82C686B_SUPERIO)); +} + +static void vt82c686b_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = vt82c686b_realize; + k->config_write = vt82c686b_write_config; + k->vendor_id = PCI_VENDOR_ID_VIA; + k->device_id = PCI_DEVICE_ID_VIA_82C686B_ISA; + k->class_id = PCI_CLASS_BRIDGE_ISA; + k->revision = 0x40; + dc->reset = vt82c686b_isa_reset; + dc->desc = "ISA bridge"; + dc->vmsd = &vmstate_via; + /* Reason: part of VIA VT82C686 southbridge, needs to be wired up */ + dc->user_creatable = false; +} + +static const TypeInfo vt82c686b_isa_info = { + .name = TYPE_VT82C686B_ISA, + .parent = TYPE_VIA_ISA, + .instance_size = sizeof(ViaISAState), + .class_init = vt82c686b_class_init, +}; + +/* TYPE_VT8231_ISA */ + +static void vt8231_write_config(PCIDevice *d, uint32_t addr, + uint32_t val, int len) +{ + ViaISAState *s = VIA_ISA(d); + + trace_via_isa_write(addr, val, len); + pci_default_write_config(d, addr, val, len); + if (addr == 0x50) { + /* BIT(2): enable or disable superio config io ports */ + via_superio_io_enable(s->via_sio, val & BIT(2)); + } +} + +static void vt8231_isa_reset(DeviceState *dev) +{ + ViaISAState *s = VIA_ISA(dev); + uint8_t *pci_conf = s->dev.config; + + pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x000000c0); + pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER | PCI_COMMAND_SPECIAL); + pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_DEVSEL_MEDIUM); + + pci_conf[0x58] = 0x40; /* Miscellaneous Control 0 */ + pci_conf[0x67] = 0x08; /* Fast IR Config */ + pci_conf[0x6b] = 0x01; /* Fast IR I/O Base */ +} + +static void vt8231_realize(PCIDevice *d, Error **errp) +{ + ViaISAState *s = VIA_ISA(d); + + via_isa_realize(d, errp); + s->via_sio = VIA_SUPERIO(isa_create_simple(s->isa_bus, + TYPE_VT8231_SUPERIO)); +} + +static void vt8231_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = vt8231_realize; + k->config_write = vt8231_write_config; + k->vendor_id = PCI_VENDOR_ID_VIA; + k->device_id = PCI_DEVICE_ID_VIA_8231_ISA; + k->class_id = PCI_CLASS_BRIDGE_ISA; + k->revision = 0x10; + dc->reset = vt8231_isa_reset; + dc->desc = "ISA bridge"; + dc->vmsd = &vmstate_via; + /* Reason: part of VIA VT8231 southbridge, needs to be wired up */ + dc->user_creatable = false; +} + +static const TypeInfo vt8231_isa_info = { + .name = TYPE_VT8231_ISA, + .parent = TYPE_VIA_ISA, + .instance_size = sizeof(ViaISAState), + .class_init = vt8231_class_init, +}; + + +static void vt82c686b_register_types(void) +{ + type_register_static(&via_pm_info); + type_register_static(&vt82c686b_pm_info); + type_register_static(&vt8231_pm_info); + type_register_static(&via_superio_info); + type_register_static(&vt82c686b_superio_info); + type_register_static(&vt8231_superio_info); + type_register_static(&via_isa_info); + type_register_static(&vt82c686b_isa_info); + type_register_static(&vt8231_isa_info); +} + +type_init(vt82c686b_register_types) diff --git a/hw/m68k/Kconfig b/hw/m68k/Kconfig new file mode 100644 index 000000000..f839f8a03 --- /dev/null +++ b/hw/m68k/Kconfig @@ -0,0 +1,34 @@ +config AN5206 + bool + select COLDFIRE + select PTIMER + +config MCF5208 + bool + select COLDFIRE + select PTIMER + +config NEXTCUBE + bool + select FRAMEBUFFER + select ESCC + +config Q800 + bool + select MAC_VIA + select NUBUS + select MACFB + select SWIM + select ESCC + select ESP + select DP8393X + select OR_IRQ + +config M68K_VIRT + bool + select M68K_IRQC + select VIRT_CTRL + select GOLDFISH_PIC + select GOLDFISH_TTY + select GOLDFISH_RTC + select VIRTIO_MMIO diff --git a/hw/m68k/an5206.c b/hw/m68k/an5206.c new file mode 100644 index 000000000..11ae4c979 --- /dev/null +++ b/hw/m68k/an5206.c @@ -0,0 +1,102 @@ +/* + * Arnewsh 5206 ColdFire system emulation. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/m68k/mcf.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "elf.h" +#include "qemu/error-report.h" +#include "sysemu/qtest.h" + +#define KERNEL_LOAD_ADDR 0x10000 +#define AN5206_MBAR_ADDR 0x10000000 +#define AN5206_RAMBAR_ADDR 0x20000000 + +static void mcf5206_init(MemoryRegion *sysmem, uint32_t base) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new(TYPE_MCF5206_MBAR); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + + memory_region_add_subregion(sysmem, base, sysbus_mmio_get_region(s, 0)); +} + +static void an5206_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + const char *kernel_filename = machine->kernel_filename; + M68kCPU *cpu; + CPUM68KState *env; + int kernel_size; + uint64_t elf_entry; + hwaddr entry; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *sram = g_new(MemoryRegion, 1); + + cpu = M68K_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + + /* Initialize CPU registers. */ + env->vbr = 0; + /* TODO: allow changing MBAR and RAMBAR. */ + env->mbar = AN5206_MBAR_ADDR | 1; + env->rambar0 = AN5206_RAMBAR_ADDR | 1; + + /* DRAM at address zero */ + memory_region_add_subregion(address_space_mem, 0, machine->ram); + + /* Internal SRAM. */ + memory_region_init_ram(sram, NULL, "an5206.sram", 512, &error_fatal); + memory_region_add_subregion(address_space_mem, AN5206_RAMBAR_ADDR, sram); + + mcf5206_init(address_space_mem, AN5206_MBAR_ADDR); + + /* Load kernel. */ + if (!kernel_filename) { + if (qtest_enabled()) { + return; + } + error_report("Kernel image must be specified"); + exit(1); + } + + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, + NULL, NULL, NULL, 1, EM_68K, 0, 0); + entry = elf_entry; + if (kernel_size < 0) { + kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL, + NULL, NULL); + } + if (kernel_size < 0) { + kernel_size = load_image_targphys(kernel_filename, KERNEL_LOAD_ADDR, + ram_size - KERNEL_LOAD_ADDR); + entry = KERNEL_LOAD_ADDR; + } + if (kernel_size < 0) { + error_report("Could not load kernel '%s'", kernel_filename); + exit(1); + } + + env->pc = entry; +} + +static void an5206_machine_init(MachineClass *mc) +{ + mc->desc = "Arnewsh 5206"; + mc->init = an5206_init; + mc->default_cpu_type = M68K_CPU_TYPE_NAME("m5206"); + mc->default_ram_id = "an5206.ram"; +} + +DEFINE_MACHINE("an5206", an5206_machine_init) diff --git a/hw/m68k/bootinfo.h b/hw/m68k/bootinfo.h new file mode 100644 index 000000000..adbf0c552 --- /dev/null +++ b/hw/m68k/bootinfo.h @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + * + * Bootinfo tags from linux bootinfo.h and bootinfo-mac.h: + * This is an easily parsable and extendable structure containing all + * information to be passed from the bootstrap to the kernel + * + * This structure is copied right after the kernel by the bootstrap + * routine. + */ + +#ifndef HW_M68K_BOOTINFO_H +#define HW_M68K_BOOTINFO_H + +#define BOOTINFO0(as, base, id) \ + do { \ + stw_phys(as, base, id); \ + base += 2; \ + stw_phys(as, base, sizeof(struct bi_record)); \ + base += 2; \ + } while (0) + +#define BOOTINFO1(as, base, id, value) \ + do { \ + stw_phys(as, base, id); \ + base += 2; \ + stw_phys(as, base, sizeof(struct bi_record) + 4); \ + base += 2; \ + stl_phys(as, base, value); \ + base += 4; \ + } while (0) + +#define BOOTINFO2(as, base, id, value1, value2) \ + do { \ + stw_phys(as, base, id); \ + base += 2; \ + stw_phys(as, base, sizeof(struct bi_record) + 8); \ + base += 2; \ + stl_phys(as, base, value1); \ + base += 4; \ + stl_phys(as, base, value2); \ + base += 4; \ + } while (0) + +#define BOOTINFOSTR(as, base, id, string) \ + do { \ + int i; \ + stw_phys(as, base, id); \ + base += 2; \ + stw_phys(as, base, \ + (sizeof(struct bi_record) + strlen(string) + 2) & ~1); \ + base += 2; \ + for (i = 0; string[i]; i++) { \ + stb_phys(as, base++, string[i]); \ + } \ + stb_phys(as, base++, 0); \ + base = (parameters_base + 1) & ~1; \ + } while (0) +#endif diff --git a/hw/m68k/mcf5206.c b/hw/m68k/mcf5206.c new file mode 100644 index 000000000..6d93d761a --- /dev/null +++ b/hw/m68k/mcf5206.c @@ -0,0 +1,629 @@ +/* + * Motorola ColdFire MCF5206 SoC embedded peripheral emulation. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "cpu.h" +#include "hw/boards.h" +#include "hw/irq.h" +#include "hw/m68k/mcf.h" +#include "qemu/timer.h" +#include "hw/ptimer.h" +#include "sysemu/sysemu.h" +#include "hw/sysbus.h" + +/* General purpose timer module. */ +typedef struct { + uint16_t tmr; + uint16_t trr; + uint16_t tcr; + uint16_t ter; + ptimer_state *timer; + qemu_irq irq; + int irq_state; +} m5206_timer_state; + +#define TMR_RST 0x01 +#define TMR_CLK 0x06 +#define TMR_FRR 0x08 +#define TMR_ORI 0x10 +#define TMR_OM 0x20 +#define TMR_CE 0xc0 + +#define TER_CAP 0x01 +#define TER_REF 0x02 + +static void m5206_timer_update(m5206_timer_state *s) +{ + if ((s->tmr & TMR_ORI) != 0 && (s->ter & TER_REF)) + qemu_irq_raise(s->irq); + else + qemu_irq_lower(s->irq); +} + +static void m5206_timer_reset(m5206_timer_state *s) +{ + s->tmr = 0; + s->trr = 0; +} + +static void m5206_timer_recalibrate(m5206_timer_state *s) +{ + int prescale; + int mode; + + ptimer_transaction_begin(s->timer); + ptimer_stop(s->timer); + + if ((s->tmr & TMR_RST) == 0) { + goto exit; + } + + prescale = (s->tmr >> 8) + 1; + mode = (s->tmr >> 1) & 3; + if (mode == 2) + prescale *= 16; + + if (mode == 3 || mode == 0) { + qemu_log_mask(LOG_UNIMP, "m5206_timer: mode %d not implemented\n", + mode); + goto exit; + } + if ((s->tmr & TMR_FRR) == 0) { + qemu_log_mask(LOG_UNIMP, + "m5206_timer: free running mode not implemented\n"); + goto exit; + } + + /* Assume 66MHz system clock. */ + ptimer_set_freq(s->timer, 66000000 / prescale); + + ptimer_set_limit(s->timer, s->trr, 0); + + ptimer_run(s->timer, 0); +exit: + ptimer_transaction_commit(s->timer); +} + +static void m5206_timer_trigger(void *opaque) +{ + m5206_timer_state *s = (m5206_timer_state *)opaque; + s->ter |= TER_REF; + m5206_timer_update(s); +} + +static uint32_t m5206_timer_read(m5206_timer_state *s, uint32_t addr) +{ + switch (addr) { + case 0: + return s->tmr; + case 4: + return s->trr; + case 8: + return s->tcr; + case 0xc: + return s->trr - ptimer_get_count(s->timer); + case 0x11: + return s->ter; + default: + return 0; + } +} + +static void m5206_timer_write(m5206_timer_state *s, uint32_t addr, uint32_t val) +{ + switch (addr) { + case 0: + if ((s->tmr & TMR_RST) != 0 && (val & TMR_RST) == 0) { + m5206_timer_reset(s); + } + s->tmr = val; + m5206_timer_recalibrate(s); + break; + case 4: + s->trr = val; + m5206_timer_recalibrate(s); + break; + case 8: + s->tcr = val; + break; + case 0xc: + ptimer_transaction_begin(s->timer); + ptimer_set_count(s->timer, val); + ptimer_transaction_commit(s->timer); + break; + case 0x11: + s->ter &= ~val; + break; + default: + break; + } + m5206_timer_update(s); +} + +static m5206_timer_state *m5206_timer_init(qemu_irq irq) +{ + m5206_timer_state *s; + + s = g_new0(m5206_timer_state, 1); + s->timer = ptimer_init(m5206_timer_trigger, s, PTIMER_POLICY_DEFAULT); + s->irq = irq; + m5206_timer_reset(s); + return s; +} + +/* System Integration Module. */ + +typedef struct { + SysBusDevice parent_obj; + + M68kCPU *cpu; + MemoryRegion iomem; + qemu_irq *pic; + m5206_timer_state *timer[2]; + void *uart[2]; + uint8_t scr; + uint8_t icr[14]; + uint16_t imr; /* 1 == interrupt is masked. */ + uint16_t ipr; + uint8_t rsr; + uint8_t swivr; + uint8_t par; + /* Include the UART vector registers here. */ + uint8_t uivr[2]; +} m5206_mbar_state; + +#define MCF5206_MBAR(obj) OBJECT_CHECK(m5206_mbar_state, (obj), TYPE_MCF5206_MBAR) + +/* Interrupt controller. */ + +static int m5206_find_pending_irq(m5206_mbar_state *s) +{ + int level; + int vector; + uint16_t active; + int i; + + level = 0; + vector = 0; + active = s->ipr & ~s->imr; + if (!active) + return 0; + + for (i = 1; i < 14; i++) { + if (active & (1 << i)) { + if ((s->icr[i] & 0x1f) > level) { + level = s->icr[i] & 0x1f; + vector = i; + } + } + } + + if (level < 4) + vector = 0; + + return vector; +} + +static void m5206_mbar_update(m5206_mbar_state *s) +{ + int irq; + int vector; + int level; + + irq = m5206_find_pending_irq(s); + if (irq) { + int tmp; + tmp = s->icr[irq]; + level = (tmp >> 2) & 7; + if (tmp & 0x80) { + /* Autovector. */ + vector = 24 + level; + } else { + switch (irq) { + case 8: /* SWT */ + vector = s->swivr; + break; + case 12: /* UART1 */ + vector = s->uivr[0]; + break; + case 13: /* UART2 */ + vector = s->uivr[1]; + break; + default: + /* Unknown vector. */ + qemu_log_mask(LOG_UNIMP, "%s: Unhandled vector for IRQ %d\n", + __func__, irq); + vector = 0xf; + break; + } + } + } else { + level = 0; + vector = 0; + } + m68k_set_irq_level(s->cpu, level, vector); +} + +static void m5206_mbar_set_irq(void *opaque, int irq, int level) +{ + m5206_mbar_state *s = (m5206_mbar_state *)opaque; + if (level) { + s->ipr |= 1 << irq; + } else { + s->ipr &= ~(1 << irq); + } + m5206_mbar_update(s); +} + +/* System Integration Module. */ + +static void m5206_mbar_reset(DeviceState *dev) +{ + m5206_mbar_state *s = MCF5206_MBAR(dev); + + s->scr = 0xc0; + s->icr[1] = 0x04; + s->icr[2] = 0x08; + s->icr[3] = 0x0c; + s->icr[4] = 0x10; + s->icr[5] = 0x14; + s->icr[6] = 0x18; + s->icr[7] = 0x1c; + s->icr[8] = 0x1c; + s->icr[9] = 0x80; + s->icr[10] = 0x80; + s->icr[11] = 0x80; + s->icr[12] = 0x00; + s->icr[13] = 0x00; + s->imr = 0x3ffe; + s->rsr = 0x80; + s->swivr = 0x0f; + s->par = 0; +} + +static uint64_t m5206_mbar_read(m5206_mbar_state *s, + uint16_t offset, unsigned size) +{ + if (offset >= 0x100 && offset < 0x120) { + return m5206_timer_read(s->timer[0], offset - 0x100); + } else if (offset >= 0x120 && offset < 0x140) { + return m5206_timer_read(s->timer[1], offset - 0x120); + } else if (offset >= 0x140 && offset < 0x160) { + return mcf_uart_read(s->uart[0], offset - 0x140, size); + } else if (offset >= 0x180 && offset < 0x1a0) { + return mcf_uart_read(s->uart[1], offset - 0x180, size); + } + switch (offset) { + case 0x03: return s->scr; + case 0x14 ... 0x20: return s->icr[offset - 0x13]; + case 0x36: return s->imr; + case 0x3a: return s->ipr; + case 0x40: return s->rsr; + case 0x41: return 0; + case 0x42: return s->swivr; + case 0x50: + /* DRAM mask register. */ + /* FIXME: currently hardcoded to 128Mb. */ + { + uint32_t mask = ~0; + while (mask > current_machine->ram_size) { + mask >>= 1; + } + return mask & 0x0ffe0000; + } + case 0x5c: return 1; /* DRAM bank 1 empty. */ + case 0xcb: return s->par; + case 0x170: return s->uivr[0]; + case 0x1b0: return s->uivr[1]; + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad MBAR offset 0x%"PRIx16"\n", + __func__, offset); + return 0; +} + +static void m5206_mbar_write(m5206_mbar_state *s, uint16_t offset, + uint64_t value, unsigned size) +{ + if (offset >= 0x100 && offset < 0x120) { + m5206_timer_write(s->timer[0], offset - 0x100, value); + return; + } else if (offset >= 0x120 && offset < 0x140) { + m5206_timer_write(s->timer[1], offset - 0x120, value); + return; + } else if (offset >= 0x140 && offset < 0x160) { + mcf_uart_write(s->uart[0], offset - 0x140, value, size); + return; + } else if (offset >= 0x180 && offset < 0x1a0) { + mcf_uart_write(s->uart[1], offset - 0x180, value, size); + return; + } + switch (offset) { + case 0x03: + s->scr = value; + break; + case 0x14 ... 0x20: + s->icr[offset - 0x13] = value; + m5206_mbar_update(s); + break; + case 0x36: + s->imr = value; + m5206_mbar_update(s); + break; + case 0x40: + s->rsr &= ~value; + break; + case 0x41: + /* TODO: implement watchdog. */ + break; + case 0x42: + s->swivr = value; + break; + case 0xcb: + s->par = value; + break; + case 0x170: + s->uivr[0] = value; + break; + case 0x178: case 0x17c: case 0x1c8: case 0x1bc: + /* Not implemented: UART Output port bits. */ + break; + case 0x1b0: + s->uivr[1] = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad MBAR offset 0x%"PRIx16"\n", + __func__, offset); + break; + } +} + +/* Internal peripherals use a variety of register widths. + This lookup table allows a single routine to handle all of them. */ +static const uint8_t m5206_mbar_width[] = +{ + /* 000-040 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, + /* 040-080 */ 1, 2, 2, 2, 4, 1, 2, 4, 1, 2, 4, 2, 2, 4, 2, 2, + /* 080-0c0 */ 4, 2, 2, 4, 2, 2, 4, 2, 2, 4, 2, 2, 4, 2, 2, 4, + /* 0c0-100 */ 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* 100-140 */ 2, 2, 2, 2, 1, 0, 0, 0, 2, 2, 2, 2, 1, 0, 0, 0, + /* 140-180 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 180-1c0 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 1c0-200 */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +}; + +static uint32_t m5206_mbar_readw(void *opaque, hwaddr offset); +static uint32_t m5206_mbar_readl(void *opaque, hwaddr offset); + +static uint32_t m5206_mbar_readb(void *opaque, hwaddr offset) +{ + m5206_mbar_state *s = (m5206_mbar_state *)opaque; + offset &= 0x3ff; + if (offset >= 0x200) { + qemu_log_mask(LOG_GUEST_ERROR, "Bad MBAR read offset 0x%" HWADDR_PRIX, + offset); + return 0; + } + if (m5206_mbar_width[offset >> 2] > 1) { + uint16_t val; + val = m5206_mbar_readw(opaque, offset & ~1); + if ((offset & 1) == 0) { + val >>= 8; + } + return val & 0xff; + } + return m5206_mbar_read(s, offset, 1); +} + +static uint32_t m5206_mbar_readw(void *opaque, hwaddr offset) +{ + m5206_mbar_state *s = (m5206_mbar_state *)opaque; + int width; + offset &= 0x3ff; + if (offset >= 0x200) { + qemu_log_mask(LOG_GUEST_ERROR, "Bad MBAR read offset 0x%" HWADDR_PRIX, + offset); + return 0; + } + width = m5206_mbar_width[offset >> 2]; + if (width > 2) { + uint32_t val; + val = m5206_mbar_readl(opaque, offset & ~3); + if ((offset & 3) == 0) + val >>= 16; + return val & 0xffff; + } else if (width < 2) { + uint16_t val; + val = m5206_mbar_readb(opaque, offset) << 8; + val |= m5206_mbar_readb(opaque, offset + 1); + return val; + } + return m5206_mbar_read(s, offset, 2); +} + +static uint32_t m5206_mbar_readl(void *opaque, hwaddr offset) +{ + m5206_mbar_state *s = (m5206_mbar_state *)opaque; + int width; + offset &= 0x3ff; + if (offset >= 0x200) { + qemu_log_mask(LOG_GUEST_ERROR, "Bad MBAR read offset 0x%" HWADDR_PRIX, + offset); + return 0; + } + width = m5206_mbar_width[offset >> 2]; + if (width < 4) { + uint32_t val; + val = m5206_mbar_readw(opaque, offset) << 16; + val |= m5206_mbar_readw(opaque, offset + 2); + return val; + } + return m5206_mbar_read(s, offset, 4); +} + +static void m5206_mbar_writew(void *opaque, hwaddr offset, + uint32_t value); +static void m5206_mbar_writel(void *opaque, hwaddr offset, + uint32_t value); + +static void m5206_mbar_writeb(void *opaque, hwaddr offset, + uint32_t value) +{ + m5206_mbar_state *s = (m5206_mbar_state *)opaque; + int width; + offset &= 0x3ff; + if (offset >= 0x200) { + qemu_log_mask(LOG_GUEST_ERROR, "Bad MBAR write offset 0x%" HWADDR_PRIX, + offset); + return; + } + width = m5206_mbar_width[offset >> 2]; + if (width > 1) { + uint32_t tmp; + tmp = m5206_mbar_readw(opaque, offset & ~1); + if (offset & 1) { + tmp = (tmp & 0xff00) | value; + } else { + tmp = (tmp & 0x00ff) | (value << 8); + } + m5206_mbar_writew(opaque, offset & ~1, tmp); + return; + } + m5206_mbar_write(s, offset, value, 1); +} + +static void m5206_mbar_writew(void *opaque, hwaddr offset, + uint32_t value) +{ + m5206_mbar_state *s = (m5206_mbar_state *)opaque; + int width; + offset &= 0x3ff; + if (offset >= 0x200) { + qemu_log_mask(LOG_GUEST_ERROR, "Bad MBAR write offset 0x%" HWADDR_PRIX, + offset); + return; + } + width = m5206_mbar_width[offset >> 2]; + if (width > 2) { + uint32_t tmp; + tmp = m5206_mbar_readl(opaque, offset & ~3); + if (offset & 3) { + tmp = (tmp & 0xffff0000) | value; + } else { + tmp = (tmp & 0x0000ffff) | (value << 16); + } + m5206_mbar_writel(opaque, offset & ~3, tmp); + return; + } else if (width < 2) { + m5206_mbar_writeb(opaque, offset, value >> 8); + m5206_mbar_writeb(opaque, offset + 1, value & 0xff); + return; + } + m5206_mbar_write(s, offset, value, 2); +} + +static void m5206_mbar_writel(void *opaque, hwaddr offset, + uint32_t value) +{ + m5206_mbar_state *s = (m5206_mbar_state *)opaque; + int width; + offset &= 0x3ff; + if (offset >= 0x200) { + qemu_log_mask(LOG_GUEST_ERROR, "Bad MBAR write offset 0x%" HWADDR_PRIX, + offset); + return; + } + width = m5206_mbar_width[offset >> 2]; + if (width < 4) { + m5206_mbar_writew(opaque, offset, value >> 16); + m5206_mbar_writew(opaque, offset + 2, value & 0xffff); + return; + } + m5206_mbar_write(s, offset, value, 4); +} + +static uint64_t m5206_mbar_readfn(void *opaque, hwaddr addr, unsigned size) +{ + switch (size) { + case 1: + return m5206_mbar_readb(opaque, addr); + case 2: + return m5206_mbar_readw(opaque, addr); + case 4: + return m5206_mbar_readl(opaque, addr); + default: + g_assert_not_reached(); + } +} + +static void m5206_mbar_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + switch (size) { + case 1: + m5206_mbar_writeb(opaque, addr, value); + break; + case 2: + m5206_mbar_writew(opaque, addr, value); + break; + case 4: + m5206_mbar_writel(opaque, addr, value); + break; + default: + g_assert_not_reached(); + } +} + +static const MemoryRegionOps m5206_mbar_ops = { + .read = m5206_mbar_readfn, + .write = m5206_mbar_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mcf5206_mbar_realize(DeviceState *dev, Error **errp) +{ + m5206_mbar_state *s = MCF5206_MBAR(dev); + + memory_region_init_io(&s->iomem, NULL, &m5206_mbar_ops, s, + "mbar", 0x00001000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + + s->pic = qemu_allocate_irqs(m5206_mbar_set_irq, s, 14); + s->timer[0] = m5206_timer_init(s->pic[9]); + s->timer[1] = m5206_timer_init(s->pic[10]); + s->uart[0] = mcf_uart_init(s->pic[12], serial_hd(0)); + s->uart[1] = mcf_uart_init(s->pic[13], serial_hd(1)); + s->cpu = M68K_CPU(qemu_get_cpu(0)); +} + +static void mcf5206_mbar_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "MCF5206 system integration module"; + dc->realize = mcf5206_mbar_realize; + dc->reset = m5206_mbar_reset; +} + +static const TypeInfo mcf5206_mbar_info = { + .name = TYPE_MCF5206_MBAR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(m5206_mbar_state), + .class_init = mcf5206_mbar_class_init, +}; + +static void mcf5206_mbar_register_types(void) +{ + type_register_static(&mcf5206_mbar_info); +} + +type_init(mcf5206_mbar_register_types) diff --git a/hw/m68k/mcf5208.c b/hw/m68k/mcf5208.c new file mode 100644 index 000000000..93812ee20 --- /dev/null +++ b/hw/m68k/mcf5208.c @@ -0,0 +1,363 @@ +/* + * Motorola ColdFire MCF5208 SoC emulation. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "cpu.h" +#include "hw/irq.h" +#include "hw/m68k/mcf.h" +#include "hw/m68k/mcf_fec.h" +#include "qemu/timer.h" +#include "hw/ptimer.h" +#include "sysemu/sysemu.h" +#include "sysemu/qtest.h" +#include "net/net.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/sysbus.h" +#include "elf.h" + +#define SYS_FREQ 166666666 + +#define ROM_SIZE 0x200000 + +#define PCSR_EN 0x0001 +#define PCSR_RLD 0x0002 +#define PCSR_PIF 0x0004 +#define PCSR_PIE 0x0008 +#define PCSR_OVW 0x0010 +#define PCSR_DBG 0x0020 +#define PCSR_DOZE 0x0040 +#define PCSR_PRE_SHIFT 8 +#define PCSR_PRE_MASK 0x0f00 + +typedef struct { + MemoryRegion iomem; + qemu_irq irq; + ptimer_state *timer; + uint16_t pcsr; + uint16_t pmr; + uint16_t pcntr; +} m5208_timer_state; + +static void m5208_timer_update(m5208_timer_state *s) +{ + if ((s->pcsr & (PCSR_PIE | PCSR_PIF)) == (PCSR_PIE | PCSR_PIF)) + qemu_irq_raise(s->irq); + else + qemu_irq_lower(s->irq); +} + +static void m5208_timer_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + m5208_timer_state *s = (m5208_timer_state *)opaque; + int prescale; + int limit; + switch (offset) { + case 0: + /* The PIF bit is set-to-clear. */ + if (value & PCSR_PIF) { + s->pcsr &= ~PCSR_PIF; + value &= ~PCSR_PIF; + } + /* Avoid frobbing the timer if we're just twiddling IRQ bits. */ + if (((s->pcsr ^ value) & ~PCSR_PIE) == 0) { + s->pcsr = value; + m5208_timer_update(s); + return; + } + + ptimer_transaction_begin(s->timer); + if (s->pcsr & PCSR_EN) + ptimer_stop(s->timer); + + s->pcsr = value; + + prescale = 1 << ((s->pcsr & PCSR_PRE_MASK) >> PCSR_PRE_SHIFT); + ptimer_set_freq(s->timer, (SYS_FREQ / 2) / prescale); + if (s->pcsr & PCSR_RLD) + limit = s->pmr; + else + limit = 0xffff; + ptimer_set_limit(s->timer, limit, 0); + + if (s->pcsr & PCSR_EN) + ptimer_run(s->timer, 0); + ptimer_transaction_commit(s->timer); + break; + case 2: + ptimer_transaction_begin(s->timer); + s->pmr = value; + s->pcsr &= ~PCSR_PIF; + if ((s->pcsr & PCSR_RLD) == 0) { + if (s->pcsr & PCSR_OVW) + ptimer_set_count(s->timer, value); + } else { + ptimer_set_limit(s->timer, value, s->pcsr & PCSR_OVW); + } + ptimer_transaction_commit(s->timer); + break; + case 4: + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + return; + } + m5208_timer_update(s); +} + +static void m5208_timer_trigger(void *opaque) +{ + m5208_timer_state *s = (m5208_timer_state *)opaque; + s->pcsr |= PCSR_PIF; + m5208_timer_update(s); +} + +static uint64_t m5208_timer_read(void *opaque, hwaddr addr, + unsigned size) +{ + m5208_timer_state *s = (m5208_timer_state *)opaque; + switch (addr) { + case 0: + return s->pcsr; + case 2: + return s->pmr; + case 4: + return ptimer_get_count(s->timer); + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, addr); + return 0; + } +} + +static const MemoryRegionOps m5208_timer_ops = { + .read = m5208_timer_read, + .write = m5208_timer_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static uint64_t m5208_sys_read(void *opaque, hwaddr addr, + unsigned size) +{ + switch (addr) { + case 0x110: /* SDCS0 */ + { + int n; + for (n = 0; n < 32; n++) { + if (current_machine->ram_size < (2u << n)) { + break; + } + } + return (n - 1) | 0x40000000; + } + case 0x114: /* SDCS1 */ + return 0; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, addr); + return 0; + } +} + +static void m5208_sys_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, addr); +} + +static const MemoryRegionOps m5208_sys_ops = { + .read = m5208_sys_read, + .write = m5208_sys_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic) +{ + MemoryRegion *iomem = g_new(MemoryRegion, 1); + m5208_timer_state *s; + int i; + + /* SDRAMC. */ + memory_region_init_io(iomem, NULL, &m5208_sys_ops, NULL, "m5208-sys", 0x00004000); + memory_region_add_subregion(address_space, 0xfc0a8000, iomem); + /* Timers. */ + for (i = 0; i < 2; i++) { + s = g_new0(m5208_timer_state, 1); + s->timer = ptimer_init(m5208_timer_trigger, s, PTIMER_POLICY_DEFAULT); + memory_region_init_io(&s->iomem, NULL, &m5208_timer_ops, s, + "m5208-timer", 0x00004000); + memory_region_add_subregion(address_space, 0xfc080000 + 0x4000 * i, + &s->iomem); + s->irq = pic[4 + i]; + } +} + +static void mcf_fec_init(MemoryRegion *sysmem, NICInfo *nd, hwaddr base, + qemu_irq *irqs) +{ + DeviceState *dev; + SysBusDevice *s; + int i; + + qemu_check_nic_model(nd, TYPE_MCF_FEC_NET); + dev = qdev_new(TYPE_MCF_FEC_NET); + qdev_set_nic_properties(dev, nd); + + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + for (i = 0; i < FEC_NUM_IRQ; i++) { + sysbus_connect_irq(s, i, irqs[i]); + } + + memory_region_add_subregion(sysmem, base, sysbus_mmio_get_region(s, 0)); +} + +static void mcf5208evb_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + const char *kernel_filename = machine->kernel_filename; + M68kCPU *cpu; + CPUM68KState *env; + int kernel_size; + uint64_t elf_entry; + hwaddr entry; + qemu_irq *pic; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *rom = g_new(MemoryRegion, 1); + MemoryRegion *sram = g_new(MemoryRegion, 1); + + cpu = M68K_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + + /* Initialize CPU registers. */ + env->vbr = 0; + /* TODO: Configure BARs. */ + + /* ROM at 0x00000000 */ + memory_region_init_rom(rom, NULL, "mcf5208.rom", ROM_SIZE, &error_fatal); + memory_region_add_subregion(address_space_mem, 0x00000000, rom); + + /* DRAM at 0x40000000 */ + memory_region_add_subregion(address_space_mem, 0x40000000, machine->ram); + + /* Internal SRAM. */ + memory_region_init_ram(sram, NULL, "mcf5208.sram", 16 * KiB, &error_fatal); + memory_region_add_subregion(address_space_mem, 0x80000000, sram); + + /* Internal peripherals. */ + pic = mcf_intc_init(address_space_mem, 0xfc048000, cpu); + + mcf_uart_mm_init(0xfc060000, pic[26], serial_hd(0)); + mcf_uart_mm_init(0xfc064000, pic[27], serial_hd(1)); + mcf_uart_mm_init(0xfc068000, pic[28], serial_hd(2)); + + mcf5208_sys_init(address_space_mem, pic); + + if (nb_nics > 1) { + error_report("Too many NICs"); + exit(1); + } + if (nd_table[0].used) { + mcf_fec_init(address_space_mem, &nd_table[0], + 0xfc030000, pic + 36); + } + + g_free(pic); + + /* 0xfc000000 SCM. */ + /* 0xfc004000 XBS. */ + /* 0xfc008000 FlexBus CS. */ + /* 0xfc030000 FEC. */ + /* 0xfc040000 SCM + Power management. */ + /* 0xfc044000 eDMA. */ + /* 0xfc048000 INTC. */ + /* 0xfc058000 I2C. */ + /* 0xfc05c000 QSPI. */ + /* 0xfc060000 UART0. */ + /* 0xfc064000 UART0. */ + /* 0xfc068000 UART0. */ + /* 0xfc070000 DMA timers. */ + /* 0xfc080000 PIT0. */ + /* 0xfc084000 PIT1. */ + /* 0xfc088000 EPORT. */ + /* 0xfc08c000 Watchdog. */ + /* 0xfc090000 clock module. */ + /* 0xfc0a0000 CCM + reset. */ + /* 0xfc0a4000 GPIO. */ + /* 0xfc0a8000 SDRAM controller. */ + + /* Load firmware */ + if (machine->firmware) { + char *fn; + uint8_t *ptr; + + fn = qemu_find_file(QEMU_FILE_TYPE_BIOS, machine->firmware); + if (!fn) { + error_report("Could not find ROM image '%s'", machine->firmware); + exit(1); + } + if (load_image_targphys(fn, 0x0, ROM_SIZE) < 8) { + error_report("Could not load ROM image '%s'", machine->firmware); + exit(1); + } + g_free(fn); + /* Initial PC is always at offset 4 in firmware binaries */ + ptr = rom_ptr(0x4, 4); + assert(ptr != NULL); + env->pc = ldl_p(ptr); + } + + /* Load kernel. */ + if (!kernel_filename) { + if (qtest_enabled() || machine->firmware) { + return; + } + error_report("Kernel image must be specified"); + exit(1); + } + + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, + NULL, NULL, NULL, 1, EM_68K, 0, 0); + entry = elf_entry; + if (kernel_size < 0) { + kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL, + NULL, NULL); + } + if (kernel_size < 0) { + kernel_size = load_image_targphys(kernel_filename, 0x40000000, + ram_size); + entry = 0x40000000; + } + if (kernel_size < 0) { + error_report("Could not load kernel '%s'", kernel_filename); + exit(1); + } + + env->pc = entry; +} + +static void mcf5208evb_machine_init(MachineClass *mc) +{ + mc->desc = "MCF5208EVB"; + mc->init = mcf5208evb_init; + mc->is_default = true; + mc->default_cpu_type = M68K_CPU_TYPE_NAME("m5208"); + mc->default_ram_id = "mcf5208.ram"; +} + +DEFINE_MACHINE("mcf5208evb", mcf5208evb_machine_init) diff --git a/hw/m68k/mcf_intc.c b/hw/m68k/mcf_intc.c new file mode 100644 index 000000000..4cd30188c --- /dev/null +++ b/hw/m68k/mcf_intc.c @@ -0,0 +1,217 @@ +/* + * ColdFire Interrupt Controller emulation. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "cpu.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/m68k/mcf.h" +#include "qom/object.h" + +#define TYPE_MCF_INTC "mcf-intc" +OBJECT_DECLARE_SIMPLE_TYPE(mcf_intc_state, MCF_INTC) + +struct mcf_intc_state { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint64_t ipr; + uint64_t imr; + uint64_t ifr; + uint64_t enabled; + uint8_t icr[64]; + M68kCPU *cpu; + int active_vector; +}; + +static void mcf_intc_update(mcf_intc_state *s) +{ + uint64_t active; + int i; + int best; + int best_level; + + active = (s->ipr | s->ifr) & s->enabled & ~s->imr; + best_level = 0; + best = 64; + if (active) { + for (i = 0; i < 64; i++) { + if ((active & 1) != 0 && s->icr[i] >= best_level) { + best_level = s->icr[i]; + best = i; + } + active >>= 1; + } + } + s->active_vector = ((best == 64) ? 24 : (best + 64)); + m68k_set_irq_level(s->cpu, best_level, s->active_vector); +} + +static uint64_t mcf_intc_read(void *opaque, hwaddr addr, + unsigned size) +{ + int offset; + mcf_intc_state *s = (mcf_intc_state *)opaque; + offset = addr & 0xff; + if (offset >= 0x40 && offset < 0x80) { + return s->icr[offset - 0x40]; + } + switch (offset) { + case 0x00: + return (uint32_t)(s->ipr >> 32); + case 0x04: + return (uint32_t)s->ipr; + case 0x08: + return (uint32_t)(s->imr >> 32); + case 0x0c: + return (uint32_t)s->imr; + case 0x10: + return (uint32_t)(s->ifr >> 32); + case 0x14: + return (uint32_t)s->ifr; + case 0xe0: /* SWIACK. */ + return s->active_vector; + case 0xe1: case 0xe2: case 0xe3: case 0xe4: + case 0xe5: case 0xe6: case 0xe7: + /* LnIACK */ + qemu_log_mask(LOG_UNIMP, "%s: LnIACK not implemented (offset 0x%02x)\n", + __func__, offset); + /* fallthru */ + default: + return 0; + } +} + +static void mcf_intc_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + int offset; + mcf_intc_state *s = (mcf_intc_state *)opaque; + offset = addr & 0xff; + if (offset >= 0x40 && offset < 0x80) { + int n = offset - 0x40; + s->icr[n] = val; + if (val == 0) + s->enabled &= ~(1ull << n); + else + s->enabled |= (1ull << n); + mcf_intc_update(s); + return; + } + switch (offset) { + case 0x00: case 0x04: + /* Ignore IPR writes. */ + return; + case 0x08: + s->imr = (s->imr & 0xffffffff) | ((uint64_t)val << 32); + break; + case 0x0c: + s->imr = (s->imr & 0xffffffff00000000ull) | (uint32_t)val; + break; + case 0x1c: + if (val & 0x40) { + s->imr = ~0ull; + } else { + s->imr |= (0x1ull << (val & 0x3f)); + } + break; + case 0x1d: + if (val & 0x40) { + s->imr = 0ull; + } else { + s->imr &= ~(0x1ull << (val & 0x3f)); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%02x\n", + __func__, offset); + return; + } + mcf_intc_update(s); +} + +static void mcf_intc_set_irq(void *opaque, int irq, int level) +{ + mcf_intc_state *s = (mcf_intc_state *)opaque; + if (irq >= 64) + return; + if (level) + s->ipr |= 1ull << irq; + else + s->ipr &= ~(1ull << irq); + mcf_intc_update(s); +} + +static void mcf_intc_reset(DeviceState *dev) +{ + mcf_intc_state *s = MCF_INTC(dev); + + s->imr = ~0ull; + s->ipr = 0; + s->ifr = 0; + s->enabled = 0; + memset(s->icr, 0, 64); + s->active_vector = 24; +} + +static const MemoryRegionOps mcf_intc_ops = { + .read = mcf_intc_read, + .write = mcf_intc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mcf_intc_instance_init(Object *obj) +{ + mcf_intc_state *s = MCF_INTC(obj); + + memory_region_init_io(&s->iomem, obj, &mcf_intc_ops, s, "mcf", 0x100); +} + +static void mcf_intc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->reset = mcf_intc_reset; +} + +static const TypeInfo mcf_intc_gate_info = { + .name = TYPE_MCF_INTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mcf_intc_state), + .instance_init = mcf_intc_instance_init, + .class_init = mcf_intc_class_init, +}; + +static void mcf_intc_register_types(void) +{ + type_register_static(&mcf_intc_gate_info); +} + +type_init(mcf_intc_register_types) + +qemu_irq *mcf_intc_init(MemoryRegion *sysmem, + hwaddr base, + M68kCPU *cpu) +{ + DeviceState *dev; + mcf_intc_state *s; + + dev = qdev_new(TYPE_MCF_INTC); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + s = MCF_INTC(dev); + s->cpu = cpu; + + memory_region_add_subregion(sysmem, base, &s->iomem); + + return qemu_allocate_irqs(mcf_intc_set_irq, s, 64); +} diff --git a/hw/m68k/meson.build b/hw/m68k/meson.build new file mode 100644 index 000000000..31248641d --- /dev/null +++ b/hw/m68k/meson.build @@ -0,0 +1,8 @@ +m68k_ss = ss.source_set() +m68k_ss.add(when: 'CONFIG_AN5206', if_true: files('an5206.c', 'mcf5206.c')) +m68k_ss.add(when: 'CONFIG_MCF5208', if_true: files('mcf5208.c', 'mcf_intc.c')) +m68k_ss.add(when: 'CONFIG_NEXTCUBE', if_true: files('next-kbd.c', 'next-cube.c')) +m68k_ss.add(when: 'CONFIG_Q800', if_true: files('q800.c')) +m68k_ss.add(when: 'CONFIG_M68K_VIRT', if_true: files('virt.c')) + +hw_arch += {'m68k': m68k_ss} diff --git a/hw/m68k/next-cube.c b/hw/m68k/next-cube.c new file mode 100644 index 000000000..e0d4a94f9 --- /dev/null +++ b/hw/m68k/next-cube.c @@ -0,0 +1,1056 @@ +/* + * NeXT Cube System Driver + * + * Copyright (c) 2011 Bryce Lanham + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "exec/hwaddr.h" +#include "sysemu/sysemu.h" +#include "sysemu/qtest.h" +#include "hw/irq.h" +#include "hw/m68k/next-cube.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/scsi/esp.h" +#include "hw/sysbus.h" +#include "qom/object.h" +#include "hw/char/escc.h" /* ZILOG 8530 Serial Emulation */ +#include "hw/block/fdc.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "ui/console.h" +#include "target/m68k/cpu.h" +#include "migration/vmstate.h" + +/* #define DEBUG_NEXT */ +#ifdef DEBUG_NEXT +#define DPRINTF(fmt, ...) \ + do { printf("NeXT: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do { } while (0) +#endif + +#define TYPE_NEXT_MACHINE MACHINE_TYPE_NAME("next-cube") +OBJECT_DECLARE_SIMPLE_TYPE(NeXTState, NEXT_MACHINE) + +#define ENTRY 0x0100001e +#define RAM_SIZE 0x4000000 +#define ROM_FILE "Rev_2.5_v66.bin" + +typedef struct next_dma { + uint32_t csr; + + uint32_t saved_next; + uint32_t saved_limit; + uint32_t saved_start; + uint32_t saved_stop; + + uint32_t next; + uint32_t limit; + uint32_t start; + uint32_t stop; + + uint32_t next_initbuf; + uint32_t size; +} next_dma; + +typedef struct NextRtc { + uint8_t ram[32]; + uint8_t command; + uint8_t value; + uint8_t status; + uint8_t control; + uint8_t retval; +} NextRtc; + +struct NeXTState { + MachineState parent; + + next_dma dma[10]; +}; + +#define TYPE_NEXT_PC "next-pc" +OBJECT_DECLARE_SIMPLE_TYPE(NeXTPC, NEXT_PC) + +/* NeXT Peripheral Controller */ +struct NeXTPC { + SysBusDevice parent_obj; + + M68kCPU *cpu; + + MemoryRegion mmiomem; + MemoryRegion scrmem; + + uint32_t scr1; + uint32_t scr2; + uint8_t scsi_csr_1; + uint8_t scsi_csr_2; + uint32_t int_mask; + uint32_t int_status; + + NextRtc rtc; +}; + +/* Thanks to NeXT forums for this */ +/* +static const uint8_t rtc_ram3[32] = { + 0x94, 0x0f, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xfb, 0x6d, 0x00, 0x00, 0x7B, 0x00, + 0x00, 0x00, 0x65, 0x6e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x13 +}; +*/ +static const uint8_t rtc_ram2[32] = { + 0x94, 0x0f, 0x40, 0x03, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xfb, 0x6d, 0x00, 0x00, 0x4b, 0x00, + 0x41, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0x7e, +}; + +#define SCR2_RTCLK 0x2 +#define SCR2_RTDATA 0x4 +#define SCR2_TOBCD(x) (((x / 10) << 4) + (x % 10)) + +static void nextscr2_write(NeXTPC *s, uint32_t val, int size) +{ + static int led; + static int phase; + static uint8_t old_scr2; + uint8_t scr2_2; + NextRtc *rtc = &s->rtc; + + if (size == 4) { + scr2_2 = (val >> 8) & 0xFF; + } else { + scr2_2 = val & 0xFF; + } + + if (val & 0x1) { + DPRINTF("fault!\n"); + led++; + if (led == 10) { + DPRINTF("LED flashing, possible fault!\n"); + led = 0; + } + } + + if (scr2_2 & 0x1) { + /* DPRINTF("RTC %x phase %i\n", scr2_2, phase); */ + if (phase == -1) { + phase = 0; + } + /* If we are in going down clock... do something */ + if (((old_scr2 & SCR2_RTCLK) != (scr2_2 & SCR2_RTCLK)) && + ((scr2_2 & SCR2_RTCLK) == 0)) { + if (phase < 8) { + rtc->command = (rtc->command << 1) | + ((scr2_2 & SCR2_RTDATA) ? 1 : 0); + } + if (phase >= 8 && phase < 16) { + rtc->value = (rtc->value << 1) | + ((scr2_2 & SCR2_RTDATA) ? 1 : 0); + + /* if we read RAM register, output RT_DATA bit */ + if (rtc->command <= 0x1F) { + scr2_2 = scr2_2 & (~SCR2_RTDATA); + if (rtc->ram[rtc->command] & (0x80 >> (phase - 8))) { + scr2_2 |= SCR2_RTDATA; + } + + rtc->retval = (rtc->retval << 1) | + ((scr2_2 & SCR2_RTDATA) ? 1 : 0); + } + /* read the status 0x30 */ + if (rtc->command == 0x30) { + scr2_2 = scr2_2 & (~SCR2_RTDATA); + /* for now status = 0x98 (new rtc + FTU) */ + if (rtc->status & (0x80 >> (phase - 8))) { + scr2_2 |= SCR2_RTDATA; + } + + rtc->retval = (rtc->retval << 1) | + ((scr2_2 & SCR2_RTDATA) ? 1 : 0); + } + /* read the status 0x31 */ + if (rtc->command == 0x31) { + scr2_2 = scr2_2 & (~SCR2_RTDATA); + if (rtc->control & (0x80 >> (phase - 8))) { + scr2_2 |= SCR2_RTDATA; + } + rtc->retval = (rtc->retval << 1) | + ((scr2_2 & SCR2_RTDATA) ? 1 : 0); + } + + if ((rtc->command >= 0x20) && (rtc->command <= 0x2F)) { + scr2_2 = scr2_2 & (~SCR2_RTDATA); + /* for now 0x00 */ + time_t time_h = time(NULL); + struct tm *info = localtime(&time_h); + int ret = 0; + + switch (rtc->command) { + case 0x20: + ret = SCR2_TOBCD(info->tm_sec); + break; + case 0x21: + ret = SCR2_TOBCD(info->tm_min); + break; + case 0x22: + ret = SCR2_TOBCD(info->tm_hour); + break; + case 0x24: + ret = SCR2_TOBCD(info->tm_mday); + break; + case 0x25: + ret = SCR2_TOBCD((info->tm_mon + 1)); + break; + case 0x26: + ret = SCR2_TOBCD((info->tm_year - 100)); + break; + + } + + if (ret & (0x80 >> (phase - 8))) { + scr2_2 |= SCR2_RTDATA; + } + rtc->retval = (rtc->retval << 1) | + ((scr2_2 & SCR2_RTDATA) ? 1 : 0); + } + + } + + phase++; + if (phase == 16) { + if (rtc->command >= 0x80 && rtc->command <= 0x9F) { + rtc->ram[rtc->command - 0x80] = rtc->value; + } + /* write to x30 register */ + if (rtc->command == 0xB1) { + /* clear FTU */ + if (rtc->value & 0x04) { + rtc->status = rtc->status & (~0x18); + s->int_status = s->int_status & (~0x04); + } + } + } + } + } else { + /* else end or abort */ + phase = -1; + rtc->command = 0; + rtc->value = 0; + } + s->scr2 = val & 0xFFFF00FF; + s->scr2 |= scr2_2 << 8; + old_scr2 = scr2_2; +} + +static uint32_t mmio_readb(NeXTPC *s, hwaddr addr) +{ + switch (addr) { + case 0xc000: + return (s->scr1 >> 24) & 0xFF; + case 0xc001: + return (s->scr1 >> 16) & 0xFF; + case 0xc002: + return (s->scr1 >> 8) & 0xFF; + case 0xc003: + return (s->scr1 >> 0) & 0xFF; + + case 0xd000: + return (s->scr2 >> 24) & 0xFF; + case 0xd001: + return (s->scr2 >> 16) & 0xFF; + case 0xd002: + return (s->scr2 >> 8) & 0xFF; + case 0xd003: + return (s->scr2 >> 0) & 0xFF; + case 0x14020: + DPRINTF("MMIO Read 0x4020\n"); + return 0x7f; + + default: + DPRINTF("MMIO Read B @ %"HWADDR_PRIx"\n", addr); + return 0x0; + } +} + +static uint32_t mmio_readw(NeXTPC *s, hwaddr addr) +{ + switch (addr) { + default: + DPRINTF("MMIO Read W @ %"HWADDR_PRIx"\n", addr); + return 0x0; + } +} + +static uint32_t mmio_readl(NeXTPC *s, hwaddr addr) +{ + switch (addr) { + case 0x7000: + /* DPRINTF("Read INT status: %x\n", s->int_status); */ + return s->int_status; + + case 0x7800: + DPRINTF("MMIO Read INT mask: %x\n", s->int_mask); + return s->int_mask; + + case 0xc000: + return s->scr1; + + case 0xd000: + return s->scr2; + + default: + DPRINTF("MMIO Read L @ %"HWADDR_PRIx"\n", addr); + return 0x0; + } +} + +static void mmio_writeb(NeXTPC *s, hwaddr addr, uint32_t val) +{ + switch (addr) { + case 0xd003: + nextscr2_write(s, val, 1); + break; + default: + DPRINTF("MMIO Write B @ %x with %x\n", (unsigned int)addr, val); + } + +} + +static void mmio_writew(NeXTPC *s, hwaddr addr, uint32_t val) +{ + DPRINTF("MMIO Write W\n"); +} + +static void mmio_writel(NeXTPC *s, hwaddr addr, uint32_t val) +{ + switch (addr) { + case 0x7000: + DPRINTF("INT Status old: %x new: %x\n", s->int_status, val); + s->int_status = val; + break; + case 0x7800: + DPRINTF("INT Mask old: %x new: %x\n", s->int_mask, val); + s->int_mask = val; + break; + case 0xc000: + DPRINTF("SCR1 Write: %x\n", val); + break; + case 0xd000: + nextscr2_write(s, val, 4); + break; + + default: + DPRINTF("MMIO Write l @ %x with %x\n", (unsigned int)addr, val); + } +} + +static uint64_t mmio_readfn(void *opaque, hwaddr addr, unsigned size) +{ + NeXTPC *s = NEXT_PC(opaque); + + switch (size) { + case 1: + return mmio_readb(s, addr); + case 2: + return mmio_readw(s, addr); + case 4: + return mmio_readl(s, addr); + default: + g_assert_not_reached(); + } +} + +static void mmio_writefn(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + NeXTPC *s = NEXT_PC(opaque); + + switch (size) { + case 1: + mmio_writeb(s, addr, value); + break; + case 2: + mmio_writew(s, addr, value); + break; + case 4: + mmio_writel(s, addr, value); + break; + default: + g_assert_not_reached(); + } +} + +static const MemoryRegionOps mmio_ops = { + .read = mmio_readfn, + .write = mmio_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static uint32_t scr_readb(NeXTPC *s, hwaddr addr) +{ + switch (addr) { + case 0x14108: + DPRINTF("FD read @ %x\n", (unsigned int)addr); + return 0x40 | 0x04 | 0x2 | 0x1; + case 0x14020: + DPRINTF("SCSI 4020 STATUS READ %X\n", s->scsi_csr_1); + return s->scsi_csr_1; + + case 0x14021: + DPRINTF("SCSI 4021 STATUS READ %X\n", s->scsi_csr_2); + return 0x40; + + /* + * These 4 registers are the hardware timer, not sure which register + * is the latch instead of data, but no problems so far + */ + case 0x1a000: + return 0xff & (clock() >> 24); + case 0x1a001: + return 0xff & (clock() >> 16); + case 0x1a002: + return 0xff & (clock() >> 8); + case 0x1a003: + /* Hack: We need to have this change consistently to make it work */ + return 0xFF & clock(); + + default: + DPRINTF("BMAP Read B @ %x\n", (unsigned int)addr); + return 0; + } +} + +static uint32_t scr_readw(NeXTPC *s, hwaddr addr) +{ + DPRINTF("BMAP Read W @ %x\n", (unsigned int)addr); + return 0; +} + +static uint32_t scr_readl(NeXTPC *s, hwaddr addr) +{ + DPRINTF("BMAP Read L @ %x\n", (unsigned int)addr); + return 0; +} + +#define SCSICSR_ENABLE 0x01 +#define SCSICSR_RESET 0x02 /* reset scsi dma */ +#define SCSICSR_FIFOFL 0x04 +#define SCSICSR_DMADIR 0x08 /* if set, scsi to mem */ +#define SCSICSR_CPUDMA 0x10 /* if set, dma enabled */ +#define SCSICSR_INTMASK 0x20 /* if set, interrupt enabled */ + +static void scr_writeb(NeXTPC *s, hwaddr addr, uint32_t value) +{ + switch (addr) { + case 0x14108: + DPRINTF("FDCSR Write: %x\n", value); + + if (value == 0x0) { + /* qemu_irq_raise(s->fd_irq[0]); */ + } + break; + case 0x14020: /* SCSI Control Register */ + if (value & SCSICSR_FIFOFL) { + DPRINTF("SCSICSR FIFO Flush\n"); + /* will have to add another irq to the esp if this is needed */ + /* esp_puflush_fifo(esp_g); */ + /* qemu_irq_pulse(s->scsi_dma); */ + } + + if (value & SCSICSR_ENABLE) { + DPRINTF("SCSICSR Enable\n"); + /* + * qemu_irq_raise(s->scsi_dma); + * s->scsi_csr_1 = 0xc0; + * s->scsi_csr_1 |= 0x1; + * qemu_irq_pulse(s->scsi_dma); + */ + } + /* + * else + * s->scsi_csr_1 &= ~SCSICSR_ENABLE; + */ + + if (value & SCSICSR_RESET) { + DPRINTF("SCSICSR Reset\n"); + /* I think this should set DMADIR. CPUDMA and INTMASK to 0 */ + /* qemu_irq_raise(s->scsi_reset); */ + /* s->scsi_csr_1 &= ~(SCSICSR_INTMASK |0x80|0x1); */ + + } + if (value & SCSICSR_DMADIR) { + DPRINTF("SCSICSR DMAdir\n"); + } + if (value & SCSICSR_CPUDMA) { + DPRINTF("SCSICSR CPUDMA\n"); + /* qemu_irq_raise(s->scsi_dma); */ + + s->int_status |= 0x4000000; + } else { + s->int_status &= ~(0x4000000); + } + if (value & SCSICSR_INTMASK) { + DPRINTF("SCSICSR INTMASK\n"); + /* + * int_mask &= ~0x1000; + * s->scsi_csr_1 |= value; + * s->scsi_csr_1 &= ~SCSICSR_INTMASK; + * if (s->scsi_queued) { + * s->scsi_queued = 0; + * next_irq(s, NEXT_SCSI_I, level); + * } + */ + } else { + /* int_mask |= 0x1000; */ + } + if (value & 0x80) { + /* int_mask |= 0x1000; */ + /* s->scsi_csr_1 |= 0x80; */ + } + DPRINTF("SCSICSR Write: %x\n", value); + /* s->scsi_csr_1 = value; */ + return; + /* Hardware timer latch - not implemented yet */ + case 0x1a000: + default: + DPRINTF("BMAP Write B @ %x with %x\n", (unsigned int)addr, value); + } +} + +static void scr_writew(NeXTPC *s, hwaddr addr, uint32_t value) +{ + DPRINTF("BMAP Write W @ %x with %x\n", (unsigned int)addr, value); +} + +static void scr_writel(NeXTPC *s, hwaddr addr, uint32_t value) +{ + DPRINTF("BMAP Write L @ %x with %x\n", (unsigned int)addr, value); +} + +static uint64_t scr_readfn(void *opaque, hwaddr addr, unsigned size) +{ + NeXTPC *s = NEXT_PC(opaque); + + switch (size) { + case 1: + return scr_readb(s, addr); + case 2: + return scr_readw(s, addr); + case 4: + return scr_readl(s, addr); + default: + g_assert_not_reached(); + } +} + +static void scr_writefn(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + NeXTPC *s = NEXT_PC(opaque); + + switch (size) { + case 1: + scr_writeb(s, addr, value); + break; + case 2: + scr_writew(s, addr, value); + break; + case 4: + scr_writel(s, addr, value); + break; + default: + g_assert_not_reached(); + } +} + +static const MemoryRegionOps scr_ops = { + .read = scr_readfn, + .write = scr_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +#define NEXTDMA_SCSI(x) (0x10 + x) +#define NEXTDMA_FD(x) (0x10 + x) +#define NEXTDMA_ENTX(x) (0x110 + x) +#define NEXTDMA_ENRX(x) (0x150 + x) +#define NEXTDMA_CSR 0x0 +#define NEXTDMA_NEXT 0x4000 +#define NEXTDMA_LIMIT 0x4004 +#define NEXTDMA_START 0x4008 +#define NEXTDMA_STOP 0x400c +#define NEXTDMA_NEXT_INIT 0x4200 +#define NEXTDMA_SIZE 0x4204 + +static void dma_writel(void *opaque, hwaddr addr, uint64_t value, + unsigned int size) +{ + NeXTState *next_state = NEXT_MACHINE(opaque); + + switch (addr) { + case NEXTDMA_ENRX(NEXTDMA_CSR): + if (value & DMA_DEV2M) { + next_state->dma[NEXTDMA_ENRX].csr |= DMA_DEV2M; + } + + if (value & DMA_SETENABLE) { + /* DPRINTF("SCSI DMA ENABLE\n"); */ + next_state->dma[NEXTDMA_ENRX].csr |= DMA_ENABLE; + } + if (value & DMA_SETSUPDATE) { + next_state->dma[NEXTDMA_ENRX].csr |= DMA_SUPDATE; + } + if (value & DMA_CLRCOMPLETE) { + next_state->dma[NEXTDMA_ENRX].csr &= ~DMA_COMPLETE; + } + + if (value & DMA_RESET) { + next_state->dma[NEXTDMA_ENRX].csr &= ~(DMA_COMPLETE | DMA_SUPDATE | + DMA_ENABLE | DMA_DEV2M); + } + /* DPRINTF("RXCSR \tWrite: %x\n",value); */ + break; + case NEXTDMA_ENRX(NEXTDMA_NEXT_INIT): + next_state->dma[NEXTDMA_ENRX].next_initbuf = value; + break; + case NEXTDMA_ENRX(NEXTDMA_NEXT): + next_state->dma[NEXTDMA_ENRX].next = value; + break; + case NEXTDMA_ENRX(NEXTDMA_LIMIT): + next_state->dma[NEXTDMA_ENRX].limit = value; + break; + case NEXTDMA_SCSI(NEXTDMA_CSR): + if (value & DMA_DEV2M) { + next_state->dma[NEXTDMA_SCSI].csr |= DMA_DEV2M; + } + if (value & DMA_SETENABLE) { + /* DPRINTF("SCSI DMA ENABLE\n"); */ + next_state->dma[NEXTDMA_SCSI].csr |= DMA_ENABLE; + } + if (value & DMA_SETSUPDATE) { + next_state->dma[NEXTDMA_SCSI].csr |= DMA_SUPDATE; + } + if (value & DMA_CLRCOMPLETE) { + next_state->dma[NEXTDMA_SCSI].csr &= ~DMA_COMPLETE; + } + + if (value & DMA_RESET) { + next_state->dma[NEXTDMA_SCSI].csr &= ~(DMA_COMPLETE | DMA_SUPDATE | + DMA_ENABLE | DMA_DEV2M); + /* DPRINTF("SCSI DMA RESET\n"); */ + } + /* DPRINTF("RXCSR \tWrite: %x\n",value); */ + break; + + case NEXTDMA_SCSI(NEXTDMA_NEXT): + next_state->dma[NEXTDMA_SCSI].next = value; + break; + + case NEXTDMA_SCSI(NEXTDMA_LIMIT): + next_state->dma[NEXTDMA_SCSI].limit = value; + break; + + case NEXTDMA_SCSI(NEXTDMA_START): + next_state->dma[NEXTDMA_SCSI].start = value; + break; + + case NEXTDMA_SCSI(NEXTDMA_STOP): + next_state->dma[NEXTDMA_SCSI].stop = value; + break; + + case NEXTDMA_SCSI(NEXTDMA_NEXT_INIT): + next_state->dma[NEXTDMA_SCSI].next_initbuf = value; + break; + + default: + DPRINTF("DMA write @ %x w/ %x\n", (unsigned)addr, (unsigned)value); + } +} + +static uint64_t dma_readl(void *opaque, hwaddr addr, unsigned int size) +{ + NeXTState *next_state = NEXT_MACHINE(opaque); + + switch (addr) { + case NEXTDMA_SCSI(NEXTDMA_CSR): + DPRINTF("SCSI DMA CSR READ\n"); + return next_state->dma[NEXTDMA_SCSI].csr; + case NEXTDMA_ENRX(NEXTDMA_CSR): + return next_state->dma[NEXTDMA_ENRX].csr; + case NEXTDMA_ENRX(NEXTDMA_NEXT_INIT): + return next_state->dma[NEXTDMA_ENRX].next_initbuf; + case NEXTDMA_ENRX(NEXTDMA_NEXT): + return next_state->dma[NEXTDMA_ENRX].next; + case NEXTDMA_ENRX(NEXTDMA_LIMIT): + return next_state->dma[NEXTDMA_ENRX].limit; + + case NEXTDMA_SCSI(NEXTDMA_NEXT): + return next_state->dma[NEXTDMA_SCSI].next; + case NEXTDMA_SCSI(NEXTDMA_NEXT_INIT): + return next_state->dma[NEXTDMA_SCSI].next_initbuf; + case NEXTDMA_SCSI(NEXTDMA_LIMIT): + return next_state->dma[NEXTDMA_SCSI].limit; + case NEXTDMA_SCSI(NEXTDMA_START): + return next_state->dma[NEXTDMA_SCSI].start; + case NEXTDMA_SCSI(NEXTDMA_STOP): + return next_state->dma[NEXTDMA_SCSI].stop; + + default: + DPRINTF("DMA read @ %x\n", (unsigned int)addr); + return 0; + } + + /* + * once the csr's are done, subtract 0x3FEC from the addr, and that will + * normalize the upper registers + */ +} + +static const MemoryRegionOps dma_ops = { + .read = dma_readl, + .write = dma_writel, + .impl.min_access_size = 4, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void next_irq(void *opaque, int number, int level) +{ + NeXTPC *s = NEXT_PC(opaque); + M68kCPU *cpu = s->cpu; + int shift = 0; + + /* first switch sets interupt status */ + /* DPRINTF("IRQ %i\n",number); */ + switch (number) { + /* level 3 - floppy, kbd/mouse, power, ether rx/tx, scsi, clock */ + case NEXT_FD_I: + shift = 7; + break; + case NEXT_KBD_I: + shift = 3; + break; + case NEXT_PWR_I: + shift = 2; + break; + case NEXT_ENRX_I: + shift = 9; + break; + case NEXT_ENTX_I: + shift = 10; + break; + case NEXT_SCSI_I: + shift = 12; + break; + case NEXT_CLK_I: + shift = 5; + break; + + /* level 5 - scc (serial) */ + case NEXT_SCC_I: + shift = 17; + break; + + /* level 6 - audio etherrx/tx dma */ + case NEXT_ENTX_DMA_I: + shift = 28; + break; + case NEXT_ENRX_DMA_I: + shift = 27; + break; + case NEXT_SCSI_DMA_I: + shift = 26; + break; + case NEXT_SND_I: + shift = 23; + break; + case NEXT_SCC_DMA_I: + shift = 21; + break; + + } + /* + * this HAS to be wrong, the interrupt handlers in mach and together + * int_status and int_mask and return if there is a hit + */ + if (s->int_mask & (1 << shift)) { + DPRINTF("%x interrupt masked @ %x\n", 1 << shift, cpu->env.pc); + /* return; */ + } + + /* second switch triggers the correct interrupt */ + if (level) { + s->int_status |= 1 << shift; + + switch (number) { + /* level 3 - floppy, kbd/mouse, power, ether rx/tx, scsi, clock */ + case NEXT_FD_I: + case NEXT_KBD_I: + case NEXT_PWR_I: + case NEXT_ENRX_I: + case NEXT_ENTX_I: + case NEXT_SCSI_I: + case NEXT_CLK_I: + m68k_set_irq_level(cpu, 3, 27); + break; + + /* level 5 - scc (serial) */ + case NEXT_SCC_I: + m68k_set_irq_level(cpu, 5, 29); + break; + + /* level 6 - audio etherrx/tx dma */ + case NEXT_ENTX_DMA_I: + case NEXT_ENRX_DMA_I: + case NEXT_SCSI_DMA_I: + case NEXT_SND_I: + case NEXT_SCC_DMA_I: + m68k_set_irq_level(cpu, 6, 30); + break; + } + } else { + s->int_status &= ~(1 << shift); + cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); + } +} + +static void next_escc_init(DeviceState *pcdev) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new(TYPE_ESCC); + qdev_prop_set_uint32(dev, "disabled", 0); + qdev_prop_set_uint32(dev, "frequency", 9600 * 384); + qdev_prop_set_uint32(dev, "it_shift", 0); + qdev_prop_set_bit(dev, "bit_swap", true); + qdev_prop_set_chr(dev, "chrB", serial_hd(1)); + qdev_prop_set_chr(dev, "chrA", serial_hd(0)); + qdev_prop_set_uint32(dev, "chnBtype", escc_serial); + qdev_prop_set_uint32(dev, "chnAtype", escc_serial); + + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(pcdev, NEXT_SCC_I)); + sysbus_connect_irq(s, 1, qdev_get_gpio_in(pcdev, NEXT_SCC_DMA_I)); + sysbus_mmio_map(s, 0, 0x2118000); +} + +static void next_pc_reset(DeviceState *dev) +{ + NeXTPC *s = NEXT_PC(dev); + + /* Set internal registers to initial values */ + /* 0x0000XX00 << vital bits */ + s->scr1 = 0x00011102; + s->scr2 = 0x00ff0c80; + + s->rtc.status = 0x90; + + /* Load RTC RAM - TODO: provide possibility to load contents from file */ + memcpy(s->rtc.ram, rtc_ram2, 32); +} + +static void next_pc_realize(DeviceState *dev, Error **errp) +{ + NeXTPC *s = NEXT_PC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + qdev_init_gpio_in(dev, next_irq, NEXT_NUM_IRQS); + + memory_region_init_io(&s->mmiomem, OBJECT(s), &mmio_ops, s, + "next.mmio", 0xD0000); + memory_region_init_io(&s->scrmem, OBJECT(s), &scr_ops, s, + "next.scr", 0x20000); + sysbus_init_mmio(sbd, &s->mmiomem); + sysbus_init_mmio(sbd, &s->scrmem); +} + +/* + * If the m68k CPU implemented its inbound irq lines as GPIO lines + * rather than via the m68k_set_irq_level() function we would not need + * this cpu link property and could instead provide outbound IRQ lines + * that the board could wire up to the CPU. + */ +static Property next_pc_properties[] = { + DEFINE_PROP_LINK("cpu", NeXTPC, cpu, TYPE_M68K_CPU, M68kCPU *), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription next_rtc_vmstate = { + .name = "next-rtc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(ram, NextRtc, 32), + VMSTATE_UINT8(command, NextRtc), + VMSTATE_UINT8(value, NextRtc), + VMSTATE_UINT8(status, NextRtc), + VMSTATE_UINT8(control, NextRtc), + VMSTATE_UINT8(retval, NextRtc), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription next_pc_vmstate = { + .name = "next-pc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(scr1, NeXTPC), + VMSTATE_UINT32(scr2, NeXTPC), + VMSTATE_UINT32(int_mask, NeXTPC), + VMSTATE_UINT32(int_status, NeXTPC), + VMSTATE_UINT8(scsi_csr_1, NeXTPC), + VMSTATE_UINT8(scsi_csr_2, NeXTPC), + VMSTATE_STRUCT(rtc, NeXTPC, 0, next_rtc_vmstate, NextRtc), + VMSTATE_END_OF_LIST() + }, +}; + +static void next_pc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NeXT Peripheral Controller"; + dc->realize = next_pc_realize; + dc->reset = next_pc_reset; + device_class_set_props(dc, next_pc_properties); + dc->vmsd = &next_pc_vmstate; +} + +static const TypeInfo next_pc_info = { + .name = TYPE_NEXT_PC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NeXTPC), + .class_init = next_pc_class_init, +}; + +static void next_cube_init(MachineState *machine) +{ + M68kCPU *cpu; + CPUM68KState *env; + MemoryRegion *rom = g_new(MemoryRegion, 1); + MemoryRegion *dmamem = g_new(MemoryRegion, 1); + MemoryRegion *bmapm1 = g_new(MemoryRegion, 1); + MemoryRegion *bmapm2 = g_new(MemoryRegion, 1); + MemoryRegion *sysmem = get_system_memory(); + const char *bios_name = machine->firmware ?: ROM_FILE; + DeviceState *dev; + DeviceState *pcdev; + + /* Initialize the cpu core */ + cpu = M68K_CPU(cpu_create(machine->cpu_type)); + if (!cpu) { + error_report("Unable to find m68k CPU definition"); + exit(1); + } + env = &cpu->env; + + /* Initialize CPU registers. */ + env->vbr = 0; + env->sr = 0x2700; + + /* Peripheral Controller */ + pcdev = qdev_new(TYPE_NEXT_PC); + object_property_set_link(OBJECT(pcdev), "cpu", OBJECT(cpu), &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(pcdev), &error_fatal); + + /* 64MB RAM starting at 0x04000000 */ + memory_region_add_subregion(sysmem, 0x04000000, machine->ram); + + /* Framebuffer */ + dev = qdev_new(TYPE_NEXTFB); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x0B000000); + + /* MMIO */ + sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 0, 0x02000000); + + /* BMAP IO - acts as a catch-all for now */ + sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 1, 0x02100000); + + /* BMAP memory */ + memory_region_init_ram_flags_nomigrate(bmapm1, NULL, "next.bmapmem", 64, + RAM_SHARED, &error_fatal); + memory_region_add_subregion(sysmem, 0x020c0000, bmapm1); + /* The Rev_2.5_v66.bin firmware accesses it at 0x820c0020, too */ + memory_region_init_alias(bmapm2, NULL, "next.bmapmem2", bmapm1, 0x0, 64); + memory_region_add_subregion(sysmem, 0x820c0000, bmapm2); + + /* KBD */ + dev = qdev_new(TYPE_NEXTKBD); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x0200e000); + + /* Load ROM here */ + /* still not sure if the rom should also be mapped at 0x0*/ + memory_region_init_rom(rom, NULL, "next.rom", 0x20000, &error_fatal); + memory_region_add_subregion(sysmem, 0x01000000, rom); + if (load_image_targphys(bios_name, 0x01000000, 0x20000) < 8) { + if (!qtest_enabled()) { + error_report("Failed to load firmware '%s'.", bios_name); + } + } else { + uint8_t *ptr; + /* Initial PC is always at offset 4 in firmware binaries */ + ptr = rom_ptr(0x01000004, 4); + g_assert(ptr != NULL); + env->pc = ldl_p(ptr); + if (env->pc >= 0x01020000) { + error_report("'%s' does not seem to be a valid firmware image.", + bios_name); + exit(1); + } + } + + /* Serial */ + next_escc_init(pcdev); + + /* TODO: */ + /* Network */ + /* SCSI */ + + /* DMA */ + memory_region_init_io(dmamem, NULL, &dma_ops, machine, "next.dma", 0x5000); + memory_region_add_subregion(sysmem, 0x02000000, dmamem); +} + +static void next_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "NeXT Cube"; + mc->init = next_cube_init; + mc->default_ram_size = RAM_SIZE; + mc->default_ram_id = "next.ram"; + mc->default_cpu_type = M68K_CPU_TYPE_NAME("m68040"); +} + +static const TypeInfo next_typeinfo = { + .name = TYPE_NEXT_MACHINE, + .parent = TYPE_MACHINE, + .class_init = next_machine_class_init, + .instance_size = sizeof(NeXTState), +}; + +static void next_register_type(void) +{ + type_register_static(&next_typeinfo); + type_register_static(&next_pc_info); +} + +type_init(next_register_type) diff --git a/hw/m68k/next-kbd.c b/hw/m68k/next-kbd.c new file mode 100644 index 000000000..0544160e9 --- /dev/null +++ b/hw/m68k/next-kbd.c @@ -0,0 +1,289 @@ +/* + * QEMU NeXT Keyboard/Mouse emulation + * + * Copyright (c) 2011 Bryce Lanham + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * This is admittedly hackish, but works well enough for basic input. Mouse + * support will be added once we can boot something that needs the mouse. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/sysbus.h" +#include "hw/m68k/next-cube.h" +#include "ui/console.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(NextKBDState, NEXTKBD) + +/* following defintions from next68k netbsd */ +#define CSR_INT 0x00800000 +#define CSR_DATA 0x00400000 + +#define KD_KEYMASK 0x007f +#define KD_DIRECTION 0x0080 /* pressed or released */ +#define KD_CNTL 0x0100 +#define KD_LSHIFT 0x0200 +#define KD_RSHIFT 0x0400 +#define KD_LCOMM 0x0800 +#define KD_RCOMM 0x1000 +#define KD_LALT 0x2000 +#define KD_RALT 0x4000 +#define KD_VALID 0x8000 /* only set for scancode keys ? */ +#define KD_MODS 0x4f00 + +#define KBD_QUEUE_SIZE 256 + +typedef struct { + uint8_t data[KBD_QUEUE_SIZE]; + int rptr, wptr, count; +} KBDQueue; + + +struct NextKBDState { + SysBusDevice sbd; + MemoryRegion mr; + KBDQueue queue; + uint16_t shift; +}; + +static void queue_code(void *opaque, int code); + +/* lots of magic numbers here */ +static uint32_t kbd_read_byte(void *opaque, hwaddr addr) +{ + switch (addr & 0x3) { + case 0x0: /* 0xe000 */ + return 0x80 | 0x20; + + case 0x1: /* 0xe001 */ + return 0x80 | 0x40 | 0x20 | 0x10; + + case 0x2: /* 0xe002 */ + /* returning 0x40 caused mach to hang */ + return 0x10 | 0x2 | 0x1; + + default: + qemu_log_mask(LOG_UNIMP, "NeXT kbd read byte %"HWADDR_PRIx"\n", addr); + } + + return 0; +} + +static uint32_t kbd_read_word(void *opaque, hwaddr addr) +{ + qemu_log_mask(LOG_UNIMP, "NeXT kbd read word %"HWADDR_PRIx"\n", addr); + return 0; +} + +/* even more magic numbers */ +static uint32_t kbd_read_long(void *opaque, hwaddr addr) +{ + int key = 0; + NextKBDState *s = NEXTKBD(opaque); + KBDQueue *q = &s->queue; + + switch (addr & 0xf) { + case 0x0: /* 0xe000 */ + return 0xA0F09300; + + case 0x8: /* 0xe008 */ + /* get keycode from buffer */ + if (q->count > 0) { + key = q->data[q->rptr]; + if (++q->rptr == KBD_QUEUE_SIZE) { + q->rptr = 0; + } + + q->count--; + + if (s->shift) { + key |= s->shift; + } + + if (key & 0x80) { + return 0; + } else { + return 0x10000000 | KD_VALID | key; + } + } else { + return 0; + } + + default: + qemu_log_mask(LOG_UNIMP, "NeXT kbd read long %"HWADDR_PRIx"\n", addr); + return 0; + } +} + +static uint64_t kbd_readfn(void *opaque, hwaddr addr, unsigned size) +{ + switch (size) { + case 1: + return kbd_read_byte(opaque, addr); + case 2: + return kbd_read_word(opaque, addr); + case 4: + return kbd_read_long(opaque, addr); + default: + g_assert_not_reached(); + } +} + +static void kbd_writefn(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "NeXT kbd write: size=%u addr=0x%"HWADDR_PRIx + "val=0x%"PRIx64"\n", size, addr, value); +} + +static const MemoryRegionOps kbd_ops = { + .read = kbd_readfn, + .write = kbd_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void nextkbd_event(void *opaque, int ch) +{ + /* + * Will want to set vars for caps/num lock + * if (ch & 0x80) -> key release + * there's also e0 escaped scancodes that might need to be handled + */ + queue_code(opaque, ch); +} + +static const unsigned char next_keycodes[128] = { + 0x00, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x50, 0x4F, + 0x4E, 0x1E, 0x1F, 0x20, 0x1D, 0x1C, 0x1B, 0x00, + 0x42, 0x43, 0x44, 0x45, 0x48, 0x47, 0x46, 0x06, + 0x07, 0x08, 0x00, 0x00, 0x2A, 0x00, 0x39, 0x3A, + 0x3B, 0x3C, 0x3D, 0x40, 0x3F, 0x3E, 0x2D, 0x2C, + 0x2B, 0x26, 0x00, 0x00, 0x31, 0x32, 0x33, 0x34, + 0x35, 0x37, 0x36, 0x2e, 0x2f, 0x30, 0x00, 0x00, + 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +static void queue_code(void *opaque, int code) +{ + NextKBDState *s = NEXTKBD(opaque); + KBDQueue *q = &s->queue; + int key = code & KD_KEYMASK; + int release = code & 0x80; + static int ext; + + if (code == 0xE0) { + ext = 1; + } + + if (code == 0x2A || code == 0x1D || code == 0x36) { + if (code == 0x2A) { + s->shift = KD_LSHIFT; + } else if (code == 0x36) { + s->shift = KD_RSHIFT; + ext = 0; + } else if (code == 0x1D && !ext) { + s->shift = KD_LCOMM; + } else if (code == 0x1D && ext) { + ext = 0; + s->shift = KD_RCOMM; + } + return; + } else if (code == (0x2A | 0x80) || code == (0x1D | 0x80) || + code == (0x36 | 0x80)) { + s->shift = 0; + return; + } + + if (q->count >= KBD_QUEUE_SIZE) { + return; + } + + q->data[q->wptr] = next_keycodes[key] | release; + + if (++q->wptr == KBD_QUEUE_SIZE) { + q->wptr = 0; + } + + q->count++; + + /* + * might need to actually trigger the NeXT irq, but as the keyboard works + * at the moment, I'll worry about it later + */ + /* s->update_irq(s->update_arg, 1); */ +} + +static void nextkbd_reset(DeviceState *dev) +{ + NextKBDState *nks = NEXTKBD(dev); + + memset(&nks->queue, 0, sizeof(KBDQueue)); + nks->shift = 0; +} + +static void nextkbd_realize(DeviceState *dev, Error **errp) +{ + NextKBDState *s = NEXTKBD(dev); + + memory_region_init_io(&s->mr, OBJECT(dev), &kbd_ops, s, "next.kbd", 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr); + + qemu_add_kbd_event_handler(nextkbd_event, s); +} + +static const VMStateDescription nextkbd_vmstate = { + .name = TYPE_NEXTKBD, + .unmigratable = 1, /* TODO: Implement this when m68k CPU is migratable */ +}; + +static void nextkbd_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + dc->vmsd = &nextkbd_vmstate; + dc->realize = nextkbd_realize; + dc->reset = nextkbd_reset; +} + +static const TypeInfo nextkbd_info = { + .name = TYPE_NEXTKBD, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NextKBDState), + .class_init = nextkbd_class_init, +}; + +static void nextkbd_register_types(void) +{ + type_register_static(&nextkbd_info); +} + +type_init(nextkbd_register_types) diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c new file mode 100644 index 000000000..e4c7c9b88 --- /dev/null +++ b/hw/m68k/q800.c @@ -0,0 +1,711 @@ +/* + * QEMU Motorla 680x0 Macintosh hardware System Emulator + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "hw/boards.h" +#include "hw/or-irq.h" +#include "hw/nmi.h" +#include "elf.h" +#include "hw/loader.h" +#include "ui/console.h" +#include "hw/char/escc.h" +#include "hw/sysbus.h" +#include "hw/scsi/esp.h" +#include "standard-headers/asm-m68k/bootinfo.h" +#include "standard-headers/asm-m68k/bootinfo-mac.h" +#include "bootinfo.h" +#include "hw/misc/mac_via.h" +#include "hw/input/adb.h" +#include "hw/nubus/mac-nubus-bridge.h" +#include "hw/display/macfb.h" +#include "hw/block/swim.h" +#include "net/net.h" +#include "qapi/error.h" +#include "sysemu/qtest.h" +#include "sysemu/runstate.h" +#include "sysemu/reset.h" +#include "migration/vmstate.h" + +#define MACROM_ADDR 0x40800000 +#define MACROM_SIZE 0x00100000 + +#define MACROM_FILENAME "MacROM.bin" + +#define IO_BASE 0x50000000 +#define IO_SLICE 0x00040000 +#define IO_SIZE 0x04000000 + +#define VIA_BASE (IO_BASE + 0x00000) +#define SONIC_PROM_BASE (IO_BASE + 0x08000) +#define SONIC_BASE (IO_BASE + 0x0a000) +#define SCC_BASE (IO_BASE + 0x0c020) +#define ESP_BASE (IO_BASE + 0x10000) +#define ESP_PDMA (IO_BASE + 0x10100) +#define ASC_BASE (IO_BASE + 0x14000) +#define SWIM_BASE (IO_BASE + 0x1E000) + +#define SONIC_PROM_SIZE 0x1000 + +/* + * the video base, whereas it a Nubus address, + * is needed by the kernel to have early display and + * thus provided by the bootloader + */ +#define VIDEO_BASE 0xf9000000 + +#define MAC_CLOCK 3686418 + +/* + * Slot 0x9 is reserved for use by the in-built framebuffer whilst only + * slots 0xc, 0xd and 0xe physically exist on the Quadra 800 + */ +#define Q800_NUBUS_SLOTS_AVAILABLE (BIT(0x9) | BIT(0xc) | BIT(0xd) | \ + BIT(0xe)) + +/* + * The GLUE (General Logic Unit) is an Apple custom integrated circuit chip + * that performs a variety of functions (RAM management, clock generation, ...). + * The GLUE chip receives interrupt requests from various devices, + * assign priority to each, and asserts one or more interrupt line to the + * CPU. + */ + +#define TYPE_GLUE "q800-glue" +OBJECT_DECLARE_SIMPLE_TYPE(GLUEState, GLUE) + +struct GLUEState { + SysBusDevice parent_obj; + M68kCPU *cpu; + uint8_t ipr; + uint8_t auxmode; + qemu_irq irqs[1]; + QEMUTimer *nmi_release; +}; + +#define GLUE_IRQ_IN_VIA1 0 +#define GLUE_IRQ_IN_VIA2 1 +#define GLUE_IRQ_IN_SONIC 2 +#define GLUE_IRQ_IN_ESCC 3 +#define GLUE_IRQ_IN_NMI 4 + +#define GLUE_IRQ_NUBUS_9 0 + +/* + * The GLUE logic on the Quadra 800 supports 2 different IRQ routing modes + * controlled from the VIA1 auxmode GPIO (port B bit 6) which are documented + * in NetBSD as follows: + * + * A/UX mode (Linux, NetBSD, auxmode GPIO low) + * + * Level 0: Spurious: ignored + * Level 1: Software + * Level 2: VIA2 (except ethernet, sound) + * Level 3: Ethernet + * Level 4: Serial (SCC) + * Level 5: Sound + * Level 6: VIA1 + * Level 7: NMIs: parity errors, RESET button, YANCC error + * + * Classic mode (default: used by MacOS, A/UX 3.0.1, auxmode GPIO high) + * + * Level 0: Spurious: ignored + * Level 1: VIA1 (clock, ADB) + * Level 2: VIA2 (NuBus, SCSI) + * Level 3: + * Level 4: Serial (SCC) + * Level 5: + * Level 6: + * Level 7: Non-maskable: parity errors, RESET button + * + * Note that despite references to A/UX mode in Linux and NetBSD, at least + * A/UX 3.0.1 still uses Classic mode. + */ + +static void GLUE_set_irq(void *opaque, int irq, int level) +{ + GLUEState *s = opaque; + int i; + + if (s->auxmode) { + /* Classic mode */ + switch (irq) { + case GLUE_IRQ_IN_VIA1: + irq = 0; + break; + + case GLUE_IRQ_IN_VIA2: + irq = 1; + break; + + case GLUE_IRQ_IN_SONIC: + /* Route to VIA2 instead */ + qemu_set_irq(s->irqs[GLUE_IRQ_NUBUS_9], level); + return; + + case GLUE_IRQ_IN_ESCC: + irq = 3; + break; + + case GLUE_IRQ_IN_NMI: + irq = 6; + break; + + default: + g_assert_not_reached(); + } + } else { + /* A/UX mode */ + switch (irq) { + case GLUE_IRQ_IN_VIA1: + irq = 5; + break; + + case GLUE_IRQ_IN_VIA2: + irq = 1; + break; + + case GLUE_IRQ_IN_SONIC: + irq = 2; + break; + + case GLUE_IRQ_IN_ESCC: + irq = 3; + break; + + case GLUE_IRQ_IN_NMI: + irq = 6; + break; + + default: + g_assert_not_reached(); + } + } + + if (level) { + s->ipr |= 1 << irq; + } else { + s->ipr &= ~(1 << irq); + } + + for (i = 7; i >= 0; i--) { + if ((s->ipr >> i) & 1) { + m68k_set_irq_level(s->cpu, i + 1, i + 25); + return; + } + } + m68k_set_irq_level(s->cpu, 0, 0); +} + +static void glue_auxmode_set_irq(void *opaque, int irq, int level) +{ + GLUEState *s = GLUE(opaque); + + s->auxmode = level; +} + +static void glue_nmi(NMIState *n, int cpu_index, Error **errp) +{ + GLUEState *s = GLUE(n); + + /* Hold NMI active for 100ms */ + GLUE_set_irq(s, GLUE_IRQ_IN_NMI, 1); + timer_mod(s->nmi_release, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 100); +} + +static void glue_nmi_release(void *opaque) +{ + GLUEState *s = GLUE(opaque); + + GLUE_set_irq(s, GLUE_IRQ_IN_NMI, 0); +} + +static void glue_reset(DeviceState *dev) +{ + GLUEState *s = GLUE(dev); + + s->ipr = 0; + s->auxmode = 0; + + timer_del(s->nmi_release); +} + +static const VMStateDescription vmstate_glue = { + .name = "q800-glue", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(ipr, GLUEState), + VMSTATE_UINT8(auxmode, GLUEState), + VMSTATE_TIMER_PTR(nmi_release, GLUEState), + VMSTATE_END_OF_LIST(), + }, +}; + +/* + * If the m68k CPU implemented its inbound irq lines as GPIO lines + * rather than via the m68k_set_irq_level() function we would not need + * this cpu link property and could instead provide outbound IRQ lines + * that the board could wire up to the CPU. + */ +static Property glue_properties[] = { + DEFINE_PROP_LINK("cpu", GLUEState, cpu, TYPE_M68K_CPU, M68kCPU *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void glue_finalize(Object *obj) +{ + GLUEState *s = GLUE(obj); + + timer_free(s->nmi_release); +} + +static void glue_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + GLUEState *s = GLUE(dev); + + qdev_init_gpio_in(dev, GLUE_set_irq, 8); + qdev_init_gpio_in_named(dev, glue_auxmode_set_irq, "auxmode", 1); + + qdev_init_gpio_out(dev, s->irqs, 1); + + /* NMI release timer */ + s->nmi_release = timer_new_ms(QEMU_CLOCK_VIRTUAL, glue_nmi_release, s); +} + +static void glue_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + NMIClass *nc = NMI_CLASS(klass); + + dc->vmsd = &vmstate_glue; + dc->reset = glue_reset; + device_class_set_props(dc, glue_properties); + nc->nmi_monitor_handler = glue_nmi; +} + +static const TypeInfo glue_info = { + .name = TYPE_GLUE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GLUEState), + .instance_init = glue_init, + .instance_finalize = glue_finalize, + .class_init = glue_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NMI }, + { } + }, +}; + +static void main_cpu_reset(void *opaque) +{ + M68kCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + + cpu_reset(cs); + cpu->env.aregs[7] = ldl_phys(cs->as, 0); + cpu->env.pc = ldl_phys(cs->as, 4); +} + +static uint8_t fake_mac_rom[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + /* offset: 0xa - mac_reset */ + + /* via2[vDirB] |= VIA2B_vPower */ + 0x20, 0x7C, 0x50, 0xF0, 0x24, 0x00, /* moveal VIA2_BASE+vDirB,%a0 */ + 0x10, 0x10, /* moveb %a0@,%d0 */ + 0x00, 0x00, 0x00, 0x04, /* orib #4,%d0 */ + 0x10, 0x80, /* moveb %d0,%a0@ */ + + /* via2[vBufB] &= ~VIA2B_vPower */ + 0x20, 0x7C, 0x50, 0xF0, 0x20, 0x00, /* moveal VIA2_BASE+vBufB,%a0 */ + 0x10, 0x10, /* moveb %a0@,%d0 */ + 0x02, 0x00, 0xFF, 0xFB, /* andib #-5,%d0 */ + 0x10, 0x80, /* moveb %d0,%a0@ */ + + /* while (true) ; */ + 0x60, 0xFE /* bras [self] */ +}; + +static void q800_init(MachineState *machine) +{ + M68kCPU *cpu = NULL; + int linux_boot; + int32_t kernel_size; + uint64_t elf_entry; + char *filename; + int bios_size; + ram_addr_t initrd_base; + int32_t initrd_size; + MemoryRegion *rom; + MemoryRegion *io; + MemoryRegion *dp8393x_prom = g_new(MemoryRegion, 1); + uint8_t *prom; + const int io_slice_nb = (IO_SIZE / IO_SLICE) - 1; + int i, checksum; + MacFbMode *macfb_mode; + ram_addr_t ram_size = machine->ram_size; + const char *kernel_filename = machine->kernel_filename; + const char *initrd_filename = machine->initrd_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *bios_name = machine->firmware ?: MACROM_FILENAME; + hwaddr parameters_base; + CPUState *cs; + DeviceState *dev; + DeviceState *via1_dev, *via2_dev; + DeviceState *escc_orgate; + SysBusESPState *sysbus_esp; + ESPState *esp; + SysBusDevice *sysbus; + BusState *adb_bus; + NubusBus *nubus; + DeviceState *glue; + DriveInfo *dinfo; + + linux_boot = (kernel_filename != NULL); + + if (ram_size > 1 * GiB) { + error_report("Too much memory for this machine: %" PRId64 " MiB, " + "maximum 1024 MiB", ram_size / MiB); + exit(1); + } + + /* init CPUs */ + cpu = M68K_CPU(cpu_create(machine->cpu_type)); + qemu_register_reset(main_cpu_reset, cpu); + + /* RAM */ + memory_region_add_subregion(get_system_memory(), 0, machine->ram); + + /* + * Memory from IO_BASE to IO_BASE + IO_SLICE is repeated + * from IO_BASE + IO_SLICE to IO_BASE + IO_SIZE + */ + io = g_new(MemoryRegion, io_slice_nb); + for (i = 0; i < io_slice_nb; i++) { + char *name = g_strdup_printf("mac_m68k.io[%d]", i + 1); + + memory_region_init_alias(&io[i], NULL, name, get_system_memory(), + IO_BASE, IO_SLICE); + memory_region_add_subregion(get_system_memory(), + IO_BASE + (i + 1) * IO_SLICE, &io[i]); + g_free(name); + } + + /* IRQ Glue */ + glue = qdev_new(TYPE_GLUE); + object_property_set_link(OBJECT(glue), "cpu", OBJECT(cpu), &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(glue), &error_fatal); + + /* VIA 1 */ + via1_dev = qdev_new(TYPE_MOS6522_Q800_VIA1); + dinfo = drive_get(IF_MTD, 0, 0); + if (dinfo) { + qdev_prop_set_drive(via1_dev, "drive", blk_by_legacy_dinfo(dinfo)); + } + sysbus = SYS_BUS_DEVICE(via1_dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 1, VIA_BASE); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, GLUE_IRQ_IN_VIA1)); + /* A/UX mode */ + qdev_connect_gpio_out(via1_dev, 0, + qdev_get_gpio_in_named(glue, "auxmode", 0)); + + adb_bus = qdev_get_child_bus(via1_dev, "adb.0"); + dev = qdev_new(TYPE_ADB_KEYBOARD); + qdev_realize_and_unref(dev, adb_bus, &error_fatal); + dev = qdev_new(TYPE_ADB_MOUSE); + qdev_realize_and_unref(dev, adb_bus, &error_fatal); + + /* VIA 2 */ + via2_dev = qdev_new(TYPE_MOS6522_Q800_VIA2); + sysbus = SYS_BUS_DEVICE(via2_dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 1, VIA_BASE + VIA_SIZE); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, GLUE_IRQ_IN_VIA2)); + + /* MACSONIC */ + + if (nb_nics > 1) { + error_report("q800 can only have one ethernet interface"); + exit(1); + } + + qemu_check_nic_model(&nd_table[0], "dp83932"); + + /* + * MacSonic driver needs an Apple MAC address + * Valid prefix are: + * 00:05:02 Apple + * 00:80:19 Dayna Communications, Inc. + * 00:A0:40 Apple + * 08:00:07 Apple + * (Q800 use the last one) + */ + nd_table[0].macaddr.a[0] = 0x08; + nd_table[0].macaddr.a[1] = 0x00; + nd_table[0].macaddr.a[2] = 0x07; + + dev = qdev_new("dp8393x"); + qdev_set_nic_properties(dev, &nd_table[0]); + qdev_prop_set_uint8(dev, "it_shift", 2); + qdev_prop_set_bit(dev, "big_endian", true); + object_property_set_link(OBJECT(dev), "dma_mr", + OBJECT(get_system_memory()), &error_abort); + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 0, SONIC_BASE); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, GLUE_IRQ_IN_SONIC)); + + memory_region_init_rom(dp8393x_prom, NULL, "dp8393x-q800.prom", + SONIC_PROM_SIZE, &error_fatal); + memory_region_add_subregion(get_system_memory(), SONIC_PROM_BASE, + dp8393x_prom); + + /* Add MAC address with valid checksum to PROM */ + prom = memory_region_get_ram_ptr(dp8393x_prom); + checksum = 0; + for (i = 0; i < 6; i++) { + prom[i] = revbit8(nd_table[0].macaddr.a[i]); + checksum ^= prom[i]; + } + prom[7] = 0xff - checksum; + + /* SCC */ + + dev = qdev_new(TYPE_ESCC); + qdev_prop_set_uint32(dev, "disabled", 0); + qdev_prop_set_uint32(dev, "frequency", MAC_CLOCK); + qdev_prop_set_uint32(dev, "it_shift", 1); + qdev_prop_set_bit(dev, "bit_swap", true); + qdev_prop_set_chr(dev, "chrA", serial_hd(0)); + qdev_prop_set_chr(dev, "chrB", serial_hd(1)); + qdev_prop_set_uint32(dev, "chnBtype", 0); + qdev_prop_set_uint32(dev, "chnAtype", 0); + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + + /* Logically OR both its IRQs together */ + escc_orgate = DEVICE(object_new(TYPE_OR_IRQ)); + object_property_set_int(OBJECT(escc_orgate), "num-lines", 2, &error_fatal); + qdev_realize_and_unref(escc_orgate, NULL, &error_fatal); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(escc_orgate, 0)); + sysbus_connect_irq(sysbus, 1, qdev_get_gpio_in(escc_orgate, 1)); + qdev_connect_gpio_out(DEVICE(escc_orgate), 0, + qdev_get_gpio_in(glue, GLUE_IRQ_IN_ESCC)); + sysbus_mmio_map(sysbus, 0, SCC_BASE); + + /* SCSI */ + + dev = qdev_new(TYPE_SYSBUS_ESP); + sysbus_esp = SYSBUS_ESP(dev); + esp = &sysbus_esp->esp; + esp->dma_memory_read = NULL; + esp->dma_memory_write = NULL; + esp->dma_opaque = NULL; + sysbus_esp->it_shift = 4; + esp->dma_enabled = 1; + + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(via2_dev, + VIA2_IRQ_SCSI_BIT)); + sysbus_connect_irq(sysbus, 1, qdev_get_gpio_in(via2_dev, + VIA2_IRQ_SCSI_DATA_BIT)); + sysbus_mmio_map(sysbus, 0, ESP_BASE); + sysbus_mmio_map(sysbus, 1, ESP_PDMA); + + scsi_bus_legacy_handle_cmdline(&esp->bus); + + /* SWIM floppy controller */ + + dev = qdev_new(TYPE_SWIM); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, SWIM_BASE); + + /* NuBus */ + + dev = qdev_new(TYPE_MAC_NUBUS_BRIDGE); + qdev_prop_set_uint32(dev, "slot-available-mask", + Q800_NUBUS_SLOTS_AVAILABLE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, + MAC_NUBUS_FIRST_SLOT * NUBUS_SUPER_SLOT_SIZE); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, NUBUS_SLOT_BASE + + MAC_NUBUS_FIRST_SLOT * NUBUS_SLOT_SIZE); + qdev_connect_gpio_out(dev, 9, + qdev_get_gpio_in_named(via2_dev, "nubus-irq", + VIA2_NUBUS_IRQ_INTVIDEO)); + for (i = 1; i < VIA2_NUBUS_IRQ_NB; i++) { + qdev_connect_gpio_out(dev, 9 + i, + qdev_get_gpio_in_named(via2_dev, "nubus-irq", + VIA2_NUBUS_IRQ_9 + i)); + } + + /* + * Since the framebuffer in slot 0x9 uses a separate IRQ, wire the unused + * IRQ via GLUE for use by SONIC Ethernet in classic mode + */ + qdev_connect_gpio_out(glue, GLUE_IRQ_NUBUS_9, + qdev_get_gpio_in_named(via2_dev, "nubus-irq", + VIA2_NUBUS_IRQ_9)); + + nubus = &NUBUS_BRIDGE(dev)->bus; + + /* framebuffer in nubus slot #9 */ + + dev = qdev_new(TYPE_NUBUS_MACFB); + qdev_prop_set_uint32(dev, "slot", 9); + qdev_prop_set_uint32(dev, "width", graphic_width); + qdev_prop_set_uint32(dev, "height", graphic_height); + qdev_prop_set_uint8(dev, "depth", graphic_depth); + if (graphic_width == 1152 && graphic_height == 870) { + qdev_prop_set_uint8(dev, "display", MACFB_DISPLAY_APPLE_21_COLOR); + } else { + qdev_prop_set_uint8(dev, "display", MACFB_DISPLAY_VGA); + } + qdev_realize_and_unref(dev, BUS(nubus), &error_fatal); + + macfb_mode = (NUBUS_MACFB(dev)->macfb).mode; + + cs = CPU(cpu); + if (linux_boot) { + uint64_t high; + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, + &elf_entry, NULL, &high, NULL, 1, + EM_68K, 0, 0); + if (kernel_size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + stl_phys(cs->as, 4, elf_entry); /* reset initial PC */ + parameters_base = (high + 1) & ~1; + + BOOTINFO1(cs->as, parameters_base, BI_MACHTYPE, MACH_MAC); + BOOTINFO1(cs->as, parameters_base, BI_FPUTYPE, FPU_68040); + BOOTINFO1(cs->as, parameters_base, BI_MMUTYPE, MMU_68040); + BOOTINFO1(cs->as, parameters_base, BI_CPUTYPE, CPU_68040); + BOOTINFO1(cs->as, parameters_base, BI_MAC_CPUID, CPUB_68040); + BOOTINFO1(cs->as, parameters_base, BI_MAC_MODEL, MAC_MODEL_Q800); + BOOTINFO1(cs->as, parameters_base, + BI_MAC_MEMSIZE, ram_size >> 20); /* in MB */ + BOOTINFO2(cs->as, parameters_base, BI_MEMCHUNK, 0, ram_size); + BOOTINFO1(cs->as, parameters_base, BI_MAC_VADDR, + VIDEO_BASE + macfb_mode->offset); + BOOTINFO1(cs->as, parameters_base, BI_MAC_VDEPTH, graphic_depth); + BOOTINFO1(cs->as, parameters_base, BI_MAC_VDIM, + (graphic_height << 16) | graphic_width); + BOOTINFO1(cs->as, parameters_base, BI_MAC_VROW, macfb_mode->stride); + BOOTINFO1(cs->as, parameters_base, BI_MAC_SCCBASE, SCC_BASE); + + rom = g_malloc(sizeof(*rom)); + memory_region_init_ram_ptr(rom, NULL, "m68k_fake_mac.rom", + sizeof(fake_mac_rom), fake_mac_rom); + memory_region_set_readonly(rom, true); + memory_region_add_subregion(get_system_memory(), MACROM_ADDR, rom); + + if (kernel_cmdline) { + BOOTINFOSTR(cs->as, parameters_base, BI_COMMAND_LINE, + kernel_cmdline); + } + + /* load initrd */ + if (initrd_filename) { + initrd_size = get_image_size(initrd_filename); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + initrd_filename); + exit(1); + } + + initrd_base = (ram_size - initrd_size) & TARGET_PAGE_MASK; + load_image_targphys(initrd_filename, initrd_base, + ram_size - initrd_base); + BOOTINFO2(cs->as, parameters_base, BI_RAMDISK, initrd_base, + initrd_size); + } else { + initrd_base = 0; + initrd_size = 0; + } + BOOTINFO0(cs->as, parameters_base, BI_LAST); + } else { + uint8_t *ptr; + /* allocate and load BIOS */ + rom = g_malloc(sizeof(*rom)); + memory_region_init_rom(rom, NULL, "m68k_mac.rom", MACROM_SIZE, + &error_abort); + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + memory_region_add_subregion(get_system_memory(), MACROM_ADDR, rom); + + /* Load MacROM binary */ + if (filename) { + bios_size = load_image_targphys(filename, MACROM_ADDR, MACROM_SIZE); + g_free(filename); + } else { + bios_size = -1; + } + + /* Remove qtest_enabled() check once firmware files are in the tree */ + if (!qtest_enabled()) { + if (bios_size < 0 || bios_size > MACROM_SIZE) { + error_report("could not load MacROM '%s'", bios_name); + exit(1); + } + + ptr = rom_ptr(MACROM_ADDR, MACROM_SIZE); + stl_phys(cs->as, 0, ldl_p(ptr)); /* reset initial SP */ + stl_phys(cs->as, 4, + MACROM_ADDR + ldl_p(ptr + 4)); /* reset initial PC */ + } + } +} + +static void q800_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + mc->desc = "Macintosh Quadra 800"; + mc->init = q800_init; + mc->default_cpu_type = M68K_CPU_TYPE_NAME("m68040"); + mc->max_cpus = 1; + mc->block_default_type = IF_SCSI; + mc->default_ram_id = "m68k_mac.ram"; +} + +static const TypeInfo q800_machine_typeinfo = { + .name = MACHINE_TYPE_NAME("q800"), + .parent = TYPE_MACHINE, + .class_init = q800_machine_class_init, +}; + +static void q800_machine_register_types(void) +{ + type_register_static(&q800_machine_typeinfo); + type_register_static(&glue_info); +} + +type_init(q800_machine_register_types) diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c new file mode 100644 index 000000000..0efa4a45c --- /dev/null +++ b/hw/m68k/virt.c @@ -0,0 +1,324 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * QEMU Vitual M68K Machine + * + * (c) 2020 Laurent Vivier + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu-common.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "elf.h" +#include "hw/loader.h" +#include "ui/console.h" +#include "hw/sysbus.h" +#include "standard-headers/asm-m68k/bootinfo.h" +#include "standard-headers/asm-m68k/bootinfo-virt.h" +#include "bootinfo.h" +#include "net/net.h" +#include "qapi/error.h" +#include "sysemu/qtest.h" +#include "sysemu/runstate.h" +#include "sysemu/reset.h" + +#include "hw/intc/m68k_irqc.h" +#include "hw/misc/virt_ctrl.h" +#include "hw/char/goldfish_tty.h" +#include "hw/rtc/goldfish_rtc.h" +#include "hw/intc/goldfish_pic.h" +#include "hw/virtio/virtio-mmio.h" +#include "hw/virtio/virtio-blk.h" + +/* + * 6 goldfish-pic for CPU IRQ #1 to IRQ #6 + * CPU IRQ #1 -> PIC #1 + * IRQ #1 to IRQ #31 -> unused + * IRQ #32 -> goldfish-tty + * CPU IRQ #2 -> PIC #2 + * IRQ #1 to IRQ #32 -> virtio-mmio from 1 to 32 + * CPU IRQ #3 -> PIC #3 + * IRQ #1 to IRQ #32 -> virtio-mmio from 33 to 64 + * CPU IRQ #4 -> PIC #4 + * IRQ #1 to IRQ #32 -> virtio-mmio from 65 to 96 + * CPU IRQ #5 -> PIC #5 + * IRQ #1 to IRQ #32 -> virtio-mmio from 97 to 128 + * CPU IRQ #6 -> PIC #6 + * IRQ #1 -> goldfish-rtc + * IRQ #2 to IRQ #32 -> unused + * CPU IRQ #7 -> NMI + */ + +#define PIC_IRQ_BASE(num) (8 + (num - 1) * 32) +#define PIC_IRQ(num, irq) (PIC_IRQ_BASE(num) + irq - 1) +#define PIC_GPIO(pic_irq) (qdev_get_gpio_in(pic_dev[(pic_irq - 8) / 32], \ + (pic_irq - 8) % 32)) + +#define VIRT_GF_PIC_MMIO_BASE 0xff000000 /* MMIO: 0xff000000 - 0xff005fff */ +#define VIRT_GF_PIC_IRQ_BASE 1 /* IRQ: #1 -> #6 */ +#define VIRT_GF_PIC_NB 6 + +/* 2 goldfish-rtc (and timer) */ +#define VIRT_GF_RTC_MMIO_BASE 0xff006000 /* MMIO: 0xff006000 - 0xff007fff */ +#define VIRT_GF_RTC_IRQ_BASE PIC_IRQ(6, 1) /* PIC: #6, IRQ: #1 */ +#define VIRT_GF_RTC_NB 2 + +/* 1 goldfish-tty */ +#define VIRT_GF_TTY_MMIO_BASE 0xff008000 /* MMIO: 0xff008000 - 0xff008fff */ +#define VIRT_GF_TTY_IRQ_BASE PIC_IRQ(1, 32) /* PIC: #1, IRQ: #32 */ + +/* 1 virt-ctrl */ +#define VIRT_CTRL_MMIO_BASE 0xff009000 /* MMIO: 0xff009000 - 0xff009fff */ +#define VIRT_CTRL_IRQ_BASE PIC_IRQ(1, 1) /* PIC: #1, IRQ: #1 */ + +/* + * virtio-mmio size is 0x200 bytes + * we use 4 goldfish-pic to attach them, + * we can attach 32 virtio devices / goldfish-pic + * -> we can manage 32 * 4 = 128 virtio devices + */ +#define VIRT_VIRTIO_MMIO_BASE 0xff010000 /* MMIO: 0xff010000 - 0xff01ffff */ +#define VIRT_VIRTIO_IRQ_BASE PIC_IRQ(2, 1) /* PIC: 2, 3, 4, 5, IRQ: ALL */ + +static void main_cpu_reset(void *opaque) +{ + M68kCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + + cpu_reset(cs); + cpu->env.aregs[7] = ldl_phys(cs->as, 0); + cpu->env.pc = ldl_phys(cs->as, 4); +} + +static void virt_init(MachineState *machine) +{ + M68kCPU *cpu = NULL; + int32_t kernel_size; + uint64_t elf_entry; + ram_addr_t initrd_base; + int32_t initrd_size; + ram_addr_t ram_size = machine->ram_size; + const char *kernel_filename = machine->kernel_filename; + const char *initrd_filename = machine->initrd_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + hwaddr parameters_base; + DeviceState *dev; + DeviceState *irqc_dev; + DeviceState *pic_dev[VIRT_GF_PIC_NB]; + SysBusDevice *sysbus; + hwaddr io_base; + int i; + + if (ram_size > 3399672 * KiB) { + /* + * The physical memory can be up to 4 GiB - 16 MiB, but linux + * kernel crashes after this limit (~ 3.2 GiB) + */ + error_report("Too much memory for this machine: %" PRId64 " KiB, " + "maximum 3399672 KiB", ram_size / KiB); + exit(1); + } + + /* init CPUs */ + cpu = M68K_CPU(cpu_create(machine->cpu_type)); + qemu_register_reset(main_cpu_reset, cpu); + + /* RAM */ + memory_region_add_subregion(get_system_memory(), 0, machine->ram); + + /* IRQ Controller */ + + irqc_dev = qdev_new(TYPE_M68K_IRQC); + sysbus_realize_and_unref(SYS_BUS_DEVICE(irqc_dev), &error_fatal); + + /* + * 6 goldfish-pic + * + * map: 0xff000000 - 0xff006fff = 28 KiB + * IRQ: #1 (lower priority) -> #6 (higher priority) + * + */ + io_base = VIRT_GF_PIC_MMIO_BASE; + for (i = 0; i < VIRT_GF_PIC_NB; i++) { + pic_dev[i] = qdev_new(TYPE_GOLDFISH_PIC); + sysbus = SYS_BUS_DEVICE(pic_dev[i]); + qdev_prop_set_uint8(pic_dev[i], "index", i); + sysbus_realize_and_unref(sysbus, &error_fatal); + + sysbus_mmio_map(sysbus, 0, io_base); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(irqc_dev, i)); + + io_base += 0x1000; + } + + /* goldfish-rtc */ + io_base = VIRT_GF_RTC_MMIO_BASE; + for (i = 0; i < VIRT_GF_RTC_NB; i++) { + dev = qdev_new(TYPE_GOLDFISH_RTC); + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 0, io_base); + sysbus_connect_irq(sysbus, 0, PIC_GPIO(VIRT_GF_RTC_IRQ_BASE + i)); + + io_base += 0x1000; + } + + /* goldfish-tty */ + dev = qdev_new(TYPE_GOLDFISH_TTY); + sysbus = SYS_BUS_DEVICE(dev); + qdev_prop_set_chr(dev, "chardev", serial_hd(0)); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 0, VIRT_GF_TTY_MMIO_BASE); + sysbus_connect_irq(sysbus, 0, PIC_GPIO(VIRT_GF_TTY_IRQ_BASE)); + + /* virt controller */ + dev = qdev_new(TYPE_VIRT_CTRL); + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 0, VIRT_CTRL_MMIO_BASE); + sysbus_connect_irq(sysbus, 0, PIC_GPIO(VIRT_CTRL_IRQ_BASE)); + + /* virtio-mmio */ + io_base = VIRT_VIRTIO_MMIO_BASE; + for (i = 0; i < 128; i++) { + dev = qdev_new(TYPE_VIRTIO_MMIO); + qdev_prop_set_bit(dev, "force-legacy", false); + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_connect_irq(sysbus, 0, PIC_GPIO(VIRT_VIRTIO_IRQ_BASE + i)); + sysbus_mmio_map(sysbus, 0, io_base); + io_base += 0x200; + } + + if (kernel_filename) { + CPUState *cs = CPU(cpu); + uint64_t high; + + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, + &elf_entry, NULL, &high, NULL, 1, + EM_68K, 0, 0); + if (kernel_size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + stl_phys(cs->as, 4, elf_entry); /* reset initial PC */ + parameters_base = (high + 1) & ~1; + + BOOTINFO1(cs->as, parameters_base, BI_MACHTYPE, MACH_VIRT); + BOOTINFO1(cs->as, parameters_base, BI_FPUTYPE, FPU_68040); + BOOTINFO1(cs->as, parameters_base, BI_MMUTYPE, MMU_68040); + BOOTINFO1(cs->as, parameters_base, BI_CPUTYPE, CPU_68040); + BOOTINFO2(cs->as, parameters_base, BI_MEMCHUNK, 0, ram_size); + + BOOTINFO1(cs->as, parameters_base, BI_VIRT_QEMU_VERSION, + ((QEMU_VERSION_MAJOR << 24) | (QEMU_VERSION_MINOR << 16) | + (QEMU_VERSION_MICRO << 8))); + BOOTINFO2(cs->as, parameters_base, BI_VIRT_GF_PIC_BASE, + VIRT_GF_PIC_MMIO_BASE, VIRT_GF_PIC_IRQ_BASE); + BOOTINFO2(cs->as, parameters_base, BI_VIRT_GF_RTC_BASE, + VIRT_GF_RTC_MMIO_BASE, VIRT_GF_RTC_IRQ_BASE); + BOOTINFO2(cs->as, parameters_base, BI_VIRT_GF_TTY_BASE, + VIRT_GF_TTY_MMIO_BASE, VIRT_GF_TTY_IRQ_BASE); + BOOTINFO2(cs->as, parameters_base, BI_VIRT_CTRL_BASE, + VIRT_CTRL_MMIO_BASE, VIRT_CTRL_IRQ_BASE); + BOOTINFO2(cs->as, parameters_base, BI_VIRT_VIRTIO_BASE, + VIRT_VIRTIO_MMIO_BASE, VIRT_VIRTIO_IRQ_BASE); + + if (kernel_cmdline) { + BOOTINFOSTR(cs->as, parameters_base, BI_COMMAND_LINE, + kernel_cmdline); + } + + /* load initrd */ + if (initrd_filename) { + initrd_size = get_image_size(initrd_filename); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + initrd_filename); + exit(1); + } + + initrd_base = (ram_size - initrd_size) & TARGET_PAGE_MASK; + load_image_targphys(initrd_filename, initrd_base, + ram_size - initrd_base); + BOOTINFO2(cs->as, parameters_base, BI_RAMDISK, initrd_base, + initrd_size); + } else { + initrd_base = 0; + initrd_size = 0; + } + BOOTINFO0(cs->as, parameters_base, BI_LAST); + } +} + +static void virt_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + mc->desc = "QEMU M68K Virtual Machine"; + mc->init = virt_init; + mc->default_cpu_type = M68K_CPU_TYPE_NAME("m68040"); + mc->max_cpus = 1; + mc->no_floppy = 1; + mc->no_parallel = 1; + mc->default_ram_id = "m68k_virt.ram"; +} + +static const TypeInfo virt_machine_info = { + .name = MACHINE_TYPE_NAME("virt"), + .parent = TYPE_MACHINE, + .abstract = true, + .class_init = virt_machine_class_init, +}; + +static void virt_machine_register_types(void) +{ + type_register_static(&virt_machine_info); +} + +type_init(virt_machine_register_types) + +#define DEFINE_VIRT_MACHINE(major, minor, latest) \ + static void virt_##major##_##minor##_class_init(ObjectClass *oc, \ + void *data) \ + { \ + MachineClass *mc = MACHINE_CLASS(oc); \ + virt_machine_##major##_##minor##_options(mc); \ + mc->desc = "QEMU " # major "." # minor " M68K Virtual Machine"; \ + if (latest) { \ + mc->alias = "virt"; \ + } \ + } \ + static const TypeInfo machvirt_##major##_##minor##_info = { \ + .name = MACHINE_TYPE_NAME("virt-" # major "." # minor), \ + .parent = MACHINE_TYPE_NAME("virt"), \ + .class_init = virt_##major##_##minor##_class_init, \ + }; \ + static void machvirt_machine_##major##_##minor##_init(void) \ + { \ + type_register_static(&machvirt_##major##_##minor##_info); \ + } \ + type_init(machvirt_machine_##major##_##minor##_init); + +static void virt_machine_6_2_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE(6, 2, true) + +static void virt_machine_6_1_options(MachineClass *mc) +{ + virt_machine_6_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); +} +DEFINE_VIRT_MACHINE(6, 1, false) + +static void virt_machine_6_0_options(MachineClass *mc) +{ + virt_machine_6_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len); +} +DEFINE_VIRT_MACHINE(6, 0, false) diff --git a/hw/mem/Kconfig b/hw/mem/Kconfig new file mode 100644 index 000000000..03dbb3c7d --- /dev/null +++ b/hw/mem/Kconfig @@ -0,0 +1,13 @@ +config DIMM + bool + select MEM_DEVICE + +config MEM_DEVICE + bool + +config NVDIMM + bool + select MEM_DEVICE + +config SPARSE_MEM + bool diff --git a/hw/mem/memory-device.c b/hw/mem/memory-device.c new file mode 100644 index 000000000..d9f830171 --- /dev/null +++ b/hw/mem/memory-device.c @@ -0,0 +1,346 @@ +/* + * Memory Device Interface + * + * Copyright ProfitBricks GmbH 2012 + * Copyright (C) 2014 Red Hat Inc + * Copyright (c) 2018 Red Hat Inc + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/mem/memory-device.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "qemu/range.h" +#include "hw/virtio/vhost.h" +#include "sysemu/kvm.h" +#include "trace.h" + +static gint memory_device_addr_sort(gconstpointer a, gconstpointer b) +{ + const MemoryDeviceState *md_a = MEMORY_DEVICE(a); + const MemoryDeviceState *md_b = MEMORY_DEVICE(b); + const MemoryDeviceClass *mdc_a = MEMORY_DEVICE_GET_CLASS(a); + const MemoryDeviceClass *mdc_b = MEMORY_DEVICE_GET_CLASS(b); + const uint64_t addr_a = mdc_a->get_addr(md_a); + const uint64_t addr_b = mdc_b->get_addr(md_b); + + if (addr_a > addr_b) { + return 1; + } else if (addr_a < addr_b) { + return -1; + } + return 0; +} + +static int memory_device_build_list(Object *obj, void *opaque) +{ + GSList **list = opaque; + + if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) { + DeviceState *dev = DEVICE(obj); + if (dev->realized) { /* only realized memory devices matter */ + *list = g_slist_insert_sorted(*list, dev, memory_device_addr_sort); + } + } + + object_child_foreach(obj, memory_device_build_list, opaque); + return 0; +} + +static int memory_device_used_region_size(Object *obj, void *opaque) +{ + uint64_t *size = opaque; + + if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) { + const DeviceState *dev = DEVICE(obj); + const MemoryDeviceState *md = MEMORY_DEVICE(obj); + + if (dev->realized) { + *size += memory_device_get_region_size(md, &error_abort); + } + } + + object_child_foreach(obj, memory_device_used_region_size, opaque); + return 0; +} + +static void memory_device_check_addable(MachineState *ms, uint64_t size, + Error **errp) +{ + uint64_t used_region_size = 0; + + /* we will need a new memory slot for kvm and vhost */ + if (kvm_enabled() && !kvm_has_free_slot(ms)) { + error_setg(errp, "hypervisor has no free memory slots left"); + return; + } + if (!vhost_has_free_slot()) { + error_setg(errp, "a used vhost backend has no free memory slots left"); + return; + } + + /* will we exceed the total amount of memory specified */ + memory_device_used_region_size(OBJECT(ms), &used_region_size); + if (used_region_size + size < used_region_size || + used_region_size + size > ms->maxram_size - ms->ram_size) { + error_setg(errp, "not enough space, currently 0x%" PRIx64 + " in use of total space for memory devices 0x" RAM_ADDR_FMT, + used_region_size, ms->maxram_size - ms->ram_size); + return; + } + +} + +static uint64_t memory_device_get_free_addr(MachineState *ms, + const uint64_t *hint, + uint64_t align, uint64_t size, + Error **errp) +{ + Error *err = NULL; + GSList *list = NULL, *item; + Range as, new = range_empty; + + if (!ms->device_memory) { + error_setg(errp, "memory devices (e.g. for memory hotplug) are not " + "supported by the machine"); + return 0; + } + + if (!memory_region_size(&ms->device_memory->mr)) { + error_setg(errp, "memory devices (e.g. for memory hotplug) are not " + "enabled, please specify the maxmem option"); + return 0; + } + range_init_nofail(&as, ms->device_memory->base, + memory_region_size(&ms->device_memory->mr)); + + /* start of address space indicates the maximum alignment we expect */ + if (!QEMU_IS_ALIGNED(range_lob(&as), align)) { + warn_report("the alignment (0x%" PRIx64 ") exceeds the expected" + " maximum alignment, memory will get fragmented and not" + " all 'maxmem' might be usable for memory devices.", + align); + } + + memory_device_check_addable(ms, size, &err); + if (err) { + error_propagate(errp, err); + return 0; + } + + if (hint && !QEMU_IS_ALIGNED(*hint, align)) { + error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes", + align); + return 0; + } + + if (!QEMU_IS_ALIGNED(size, align)) { + error_setg(errp, "backend memory size must be multiple of 0x%" + PRIx64, align); + return 0; + } + + if (hint) { + if (range_init(&new, *hint, size) || !range_contains_range(&as, &new)) { + error_setg(errp, "can't add memory device [0x%" PRIx64 ":0x%" PRIx64 + "], usable range for memory devices [0x%" PRIx64 ":0x%" + PRIx64 "]", *hint, size, range_lob(&as), + range_size(&as)); + return 0; + } + } else { + if (range_init(&new, QEMU_ALIGN_UP(range_lob(&as), align), size)) { + error_setg(errp, "can't add memory device, device too big"); + return 0; + } + } + + /* find address range that will fit new memory device */ + object_child_foreach(OBJECT(ms), memory_device_build_list, &list); + for (item = list; item; item = g_slist_next(item)) { + const MemoryDeviceState *md = item->data; + const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(OBJECT(md)); + uint64_t next_addr; + Range tmp; + + range_init_nofail(&tmp, mdc->get_addr(md), + memory_device_get_region_size(md, &error_abort)); + + if (range_overlaps_range(&tmp, &new)) { + if (hint) { + const DeviceState *d = DEVICE(md); + error_setg(errp, "address range conflicts with memory device" + " id='%s'", d->id ? d->id : "(unnamed)"); + goto out; + } + + next_addr = QEMU_ALIGN_UP(range_upb(&tmp) + 1, align); + if (!next_addr || range_init(&new, next_addr, range_size(&new))) { + range_make_empty(&new); + break; + } + } else if (range_lob(&tmp) > range_upb(&new)) { + break; + } + } + + if (!range_contains_range(&as, &new)) { + error_setg(errp, "could not find position in guest address space for " + "memory device - memory fragmented due to alignments"); + } +out: + g_slist_free(list); + return range_lob(&new); +} + +MemoryDeviceInfoList *qmp_memory_device_list(void) +{ + GSList *devices = NULL, *item; + MemoryDeviceInfoList *list = NULL, **tail = &list; + + object_child_foreach(qdev_get_machine(), memory_device_build_list, + &devices); + + for (item = devices; item; item = g_slist_next(item)) { + const MemoryDeviceState *md = MEMORY_DEVICE(item->data); + const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(item->data); + MemoryDeviceInfo *info = g_new0(MemoryDeviceInfo, 1); + + mdc->fill_device_info(md, info); + + QAPI_LIST_APPEND(tail, info); + } + + g_slist_free(devices); + + return list; +} + +static int memory_device_plugged_size(Object *obj, void *opaque) +{ + uint64_t *size = opaque; + + if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) { + const DeviceState *dev = DEVICE(obj); + const MemoryDeviceState *md = MEMORY_DEVICE(obj); + const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj); + + if (dev->realized) { + *size += mdc->get_plugged_size(md, &error_abort); + } + } + + object_child_foreach(obj, memory_device_plugged_size, opaque); + return 0; +} + +uint64_t get_plugged_memory_size(void) +{ + uint64_t size = 0; + + memory_device_plugged_size(qdev_get_machine(), &size); + + return size; +} + +void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms, + const uint64_t *legacy_align, Error **errp) +{ + const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md); + Error *local_err = NULL; + uint64_t addr, align = 0; + MemoryRegion *mr; + + mr = mdc->get_memory_region(md, &local_err); + if (local_err) { + goto out; + } + + if (legacy_align) { + align = *legacy_align; + } else { + if (mdc->get_min_alignment) { + align = mdc->get_min_alignment(md); + } + align = MAX(align, memory_region_get_alignment(mr)); + } + addr = mdc->get_addr(md); + addr = memory_device_get_free_addr(ms, !addr ? NULL : &addr, align, + memory_region_size(mr), &local_err); + if (local_err) { + goto out; + } + mdc->set_addr(md, addr, &local_err); + if (!local_err) { + trace_memory_device_pre_plug(DEVICE(md)->id ? DEVICE(md)->id : "", + addr); + } +out: + error_propagate(errp, local_err); +} + +void memory_device_plug(MemoryDeviceState *md, MachineState *ms) +{ + const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md); + const uint64_t addr = mdc->get_addr(md); + MemoryRegion *mr; + + /* + * We expect that a previous call to memory_device_pre_plug() succeeded, so + * it can't fail at this point. + */ + mr = mdc->get_memory_region(md, &error_abort); + g_assert(ms->device_memory); + + memory_region_add_subregion(&ms->device_memory->mr, + addr - ms->device_memory->base, mr); + trace_memory_device_plug(DEVICE(md)->id ? DEVICE(md)->id : "", addr); +} + +void memory_device_unplug(MemoryDeviceState *md, MachineState *ms) +{ + const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md); + MemoryRegion *mr; + + /* + * We expect that a previous call to memory_device_pre_plug() succeeded, so + * it can't fail at this point. + */ + mr = mdc->get_memory_region(md, &error_abort); + g_assert(ms->device_memory); + + memory_region_del_subregion(&ms->device_memory->mr, mr); + trace_memory_device_unplug(DEVICE(md)->id ? DEVICE(md)->id : "", + mdc->get_addr(md)); +} + +uint64_t memory_device_get_region_size(const MemoryDeviceState *md, + Error **errp) +{ + const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md); + MemoryRegion *mr; + + /* dropping const here is fine as we don't touch the memory region */ + mr = mdc->get_memory_region((MemoryDeviceState *)md, errp); + if (!mr) { + return 0; + } + + return memory_region_size(mr); +} + +static const TypeInfo memory_device_info = { + .name = TYPE_MEMORY_DEVICE, + .parent = TYPE_INTERFACE, + .class_size = sizeof(MemoryDeviceClass), +}; + +static void memory_device_register_types(void) +{ + type_register_static(&memory_device_info); +} + +type_init(memory_device_register_types) diff --git a/hw/mem/meson.build b/hw/mem/meson.build new file mode 100644 index 000000000..82f86d117 --- /dev/null +++ b/hw/mem/meson.build @@ -0,0 +1,9 @@ +mem_ss = ss.source_set() +mem_ss.add(files('memory-device.c')) +mem_ss.add(when: 'CONFIG_DIMM', if_true: files('pc-dimm.c')) +mem_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_mc.c')) +mem_ss.add(when: 'CONFIG_NVDIMM', if_true: files('nvdimm.c')) + +softmmu_ss.add_all(when: 'CONFIG_MEM_DEVICE', if_true: mem_ss) + +softmmu_ss.add(when: 'CONFIG_SPARSE_MEM', if_true: files('sparse-mem.c')) diff --git a/hw/mem/npcm7xx_mc.c b/hw/mem/npcm7xx_mc.c new file mode 100644 index 000000000..abc5af562 --- /dev/null +++ b/hw/mem/npcm7xx_mc.c @@ -0,0 +1,84 @@ +/* + * Nuvoton NPCM7xx Memory Controller stub + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/mem/npcm7xx_mc.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" + +#define NPCM7XX_MC_REGS_SIZE (4 * KiB) + +static uint64_t npcm7xx_mc_read(void *opaque, hwaddr addr, unsigned int size) +{ + /* + * If bits 8..11 @ offset 0 are not zero, the boot block thinks the memory + * controller has already been initialized and will skip DDR training. + */ + if (addr == 0) { + return 0x100; + } + + qemu_log_mask(LOG_UNIMP, "%s: mostly unimplemented\n", __func__); + + return 0; +} + +static void npcm7xx_mc_write(void *opaque, hwaddr addr, uint64_t v, + unsigned int size) +{ + qemu_log_mask(LOG_UNIMP, "%s: mostly unimplemented\n", __func__); +} + +static const MemoryRegionOps npcm7xx_mc_ops = { + .read = npcm7xx_mc_read, + .write = npcm7xx_mc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm7xx_mc_realize(DeviceState *dev, Error **errp) +{ + NPCM7xxMCState *s = NPCM7XX_MC(dev); + + memory_region_init_io(&s->mmio, OBJECT(s), &npcm7xx_mc_ops, s, "regs", + NPCM7XX_MC_REGS_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio); +} + +static void npcm7xx_mc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx Memory Controller stub"; + dc->realize = npcm7xx_mc_realize; +} + +static const TypeInfo npcm7xx_mc_types[] = { + { + .name = TYPE_NPCM7XX_MC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxMCState), + .class_init = npcm7xx_mc_class_init, + }, +}; +DEFINE_TYPES(npcm7xx_mc_types); diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c new file mode 100644 index 000000000..7397b6715 --- /dev/null +++ b/hw/mem/nvdimm.c @@ -0,0 +1,266 @@ +/* + * Non-Volatile Dual In-line Memory Module Virtualization Implementation + * + * Copyright(C) 2015 Intel Corporation. + * + * Author: + * Xiao Guangrong + * + * Currently, it only supports PMEM Virtualization. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/pmem.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "hw/mem/nvdimm.h" +#include "hw/qdev-properties.h" +#include "hw/mem/memory-device.h" +#include "sysemu/hostmem.h" + +static void nvdimm_get_label_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + NVDIMMDevice *nvdimm = NVDIMM(obj); + uint64_t value = nvdimm->label_size; + + visit_type_size(v, name, &value, errp); +} + +static void nvdimm_set_label_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + NVDIMMDevice *nvdimm = NVDIMM(obj); + uint64_t value; + + if (nvdimm->nvdimm_mr) { + error_setg(errp, "cannot change property value"); + return; + } + + if (!visit_type_size(v, name, &value, errp)) { + return; + } + if (value < MIN_NAMESPACE_LABEL_SIZE) { + error_setg(errp, "Property '%s.%s' (0x%" PRIx64 ") is required" + " at least 0x%lx", object_get_typename(obj), name, value, + MIN_NAMESPACE_LABEL_SIZE); + return; + } + + nvdimm->label_size = value; +} + +static void nvdimm_get_uuid(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + NVDIMMDevice *nvdimm = NVDIMM(obj); + char *value = NULL; + + value = qemu_uuid_unparse_strdup(&nvdimm->uuid); + + visit_type_str(v, name, &value, errp); + g_free(value); +} + + +static void nvdimm_set_uuid(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + NVDIMMDevice *nvdimm = NVDIMM(obj); + char *value; + + if (!visit_type_str(v, name, &value, errp)) { + return; + } + + if (qemu_uuid_parse(value, &nvdimm->uuid) != 0) { + error_setg(errp, "Property '%s.%s' has invalid value", + object_get_typename(obj), name); + } + + g_free(value); +} + + +static void nvdimm_init(Object *obj) +{ + object_property_add(obj, NVDIMM_LABEL_SIZE_PROP, "int", + nvdimm_get_label_size, nvdimm_set_label_size, NULL, + NULL); + + object_property_add(obj, NVDIMM_UUID_PROP, "QemuUUID", nvdimm_get_uuid, + nvdimm_set_uuid, NULL, NULL); +} + +static void nvdimm_finalize(Object *obj) +{ + NVDIMMDevice *nvdimm = NVDIMM(obj); + + g_free(nvdimm->nvdimm_mr); +} + +static void nvdimm_prepare_memory_region(NVDIMMDevice *nvdimm, Error **errp) +{ + PCDIMMDevice *dimm = PC_DIMM(nvdimm); + uint64_t align, pmem_size, size; + MemoryRegion *mr; + + g_assert(!nvdimm->nvdimm_mr); + + if (!dimm->hostmem) { + error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property must be set"); + return; + } + + mr = host_memory_backend_get_memory(dimm->hostmem); + align = memory_region_get_alignment(mr); + size = memory_region_size(mr); + + pmem_size = size - nvdimm->label_size; + nvdimm->label_data = memory_region_get_ram_ptr(mr) + pmem_size; + pmem_size = QEMU_ALIGN_DOWN(pmem_size, align); + + if (size <= nvdimm->label_size || !pmem_size) { + HostMemoryBackend *hostmem = dimm->hostmem; + + error_setg(errp, "the size of memdev %s (0x%" PRIx64 ") is too " + "small to contain nvdimm label (0x%" PRIx64 ") and " + "aligned PMEM (0x%" PRIx64 ")", + object_get_canonical_path_component(OBJECT(hostmem)), + memory_region_size(mr), nvdimm->label_size, align); + return; + } + + if (!nvdimm->unarmed && memory_region_is_rom(mr)) { + HostMemoryBackend *hostmem = dimm->hostmem; + + error_setg(errp, "'unarmed' property must be off since memdev %s " + "is read-only", + object_get_canonical_path_component(OBJECT(hostmem))); + return; + } + + nvdimm->nvdimm_mr = g_new(MemoryRegion, 1); + memory_region_init_alias(nvdimm->nvdimm_mr, OBJECT(dimm), + "nvdimm-memory", mr, 0, pmem_size); + memory_region_set_nonvolatile(nvdimm->nvdimm_mr, true); + nvdimm->nvdimm_mr->align = align; +} + +static MemoryRegion *nvdimm_md_get_memory_region(MemoryDeviceState *md, + Error **errp) +{ + NVDIMMDevice *nvdimm = NVDIMM(md); + Error *local_err = NULL; + + if (!nvdimm->nvdimm_mr) { + nvdimm_prepare_memory_region(nvdimm, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return NULL; + } + } + return nvdimm->nvdimm_mr; +} + +static void nvdimm_realize(PCDIMMDevice *dimm, Error **errp) +{ + NVDIMMDevice *nvdimm = NVDIMM(dimm); + + if (!nvdimm->nvdimm_mr) { + nvdimm_prepare_memory_region(nvdimm, errp); + } +} + +/* + * the caller should check the input parameters before calling + * label read/write functions. + */ +static void nvdimm_validate_rw_label_data(NVDIMMDevice *nvdimm, uint64_t size, + uint64_t offset) +{ + assert((nvdimm->label_size >= size + offset) && (offset + size > offset)); +} + +static void nvdimm_read_label_data(NVDIMMDevice *nvdimm, void *buf, + uint64_t size, uint64_t offset) +{ + nvdimm_validate_rw_label_data(nvdimm, size, offset); + + memcpy(buf, nvdimm->label_data + offset, size); +} + +static void nvdimm_write_label_data(NVDIMMDevice *nvdimm, const void *buf, + uint64_t size, uint64_t offset) +{ + MemoryRegion *mr; + PCDIMMDevice *dimm = PC_DIMM(nvdimm); + bool is_pmem = object_property_get_bool(OBJECT(dimm->hostmem), + "pmem", NULL); + uint64_t backend_offset; + + nvdimm_validate_rw_label_data(nvdimm, size, offset); + + if (!is_pmem) { + memcpy(nvdimm->label_data + offset, buf, size); + } else { + pmem_memcpy_persist(nvdimm->label_data + offset, buf, size); + } + + mr = host_memory_backend_get_memory(dimm->hostmem); + backend_offset = memory_region_size(mr) - nvdimm->label_size + offset; + memory_region_set_dirty(mr, backend_offset, size); +} + +static Property nvdimm_properties[] = { + DEFINE_PROP_BOOL(NVDIMM_UNARMED_PROP, NVDIMMDevice, unarmed, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void nvdimm_class_init(ObjectClass *oc, void *data) +{ + PCDIMMDeviceClass *ddc = PC_DIMM_CLASS(oc); + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc); + NVDIMMClass *nvc = NVDIMM_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + ddc->realize = nvdimm_realize; + mdc->get_memory_region = nvdimm_md_get_memory_region; + device_class_set_props(dc, nvdimm_properties); + + nvc->read_label_data = nvdimm_read_label_data; + nvc->write_label_data = nvdimm_write_label_data; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static TypeInfo nvdimm_info = { + .name = TYPE_NVDIMM, + .parent = TYPE_PC_DIMM, + .class_size = sizeof(NVDIMMClass), + .class_init = nvdimm_class_init, + .instance_size = sizeof(NVDIMMDevice), + .instance_init = nvdimm_init, + .instance_finalize = nvdimm_finalize, +}; + +static void nvdimm_register_types(void) +{ + type_register_static(&nvdimm_info); +} + +type_init(nvdimm_register_types) diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c new file mode 100644 index 000000000..48b913aba --- /dev/null +++ b/hw/mem/pc-dimm.c @@ -0,0 +1,307 @@ +/* + * Dimm device for Memory Hotplug + * + * Copyright ProfitBricks GmbH 2012 + * Copyright (C) 2014 Red Hat Inc + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "hw/boards.h" +#include "hw/mem/pc-dimm.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/mem/nvdimm.h" +#include "hw/mem/memory-device.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/module.h" +#include "sysemu/hostmem.h" +#include "sysemu/numa.h" +#include "trace.h" + +static int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp); + +static MemoryRegion *pc_dimm_get_memory_region(PCDIMMDevice *dimm, Error **errp) +{ + if (!dimm->hostmem) { + error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property must be set"); + return NULL; + } + + return host_memory_backend_get_memory(dimm->hostmem); +} + +void pc_dimm_pre_plug(PCDIMMDevice *dimm, MachineState *machine, + const uint64_t *legacy_align, Error **errp) +{ + Error *local_err = NULL; + int slot; + + slot = object_property_get_int(OBJECT(dimm), PC_DIMM_SLOT_PROP, + &error_abort); + if ((slot < 0 || slot >= machine->ram_slots) && + slot != PC_DIMM_UNASSIGNED_SLOT) { + error_setg(errp, + "invalid slot number %d, valid range is [0-%" PRIu64 "]", + slot, machine->ram_slots - 1); + return; + } + + slot = pc_dimm_get_free_slot(slot == PC_DIMM_UNASSIGNED_SLOT ? NULL : &slot, + machine->ram_slots, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + object_property_set_int(OBJECT(dimm), PC_DIMM_SLOT_PROP, slot, + &error_abort); + trace_mhp_pc_dimm_assigned_slot(slot); + + memory_device_pre_plug(MEMORY_DEVICE(dimm), machine, legacy_align, + errp); +} + +void pc_dimm_plug(PCDIMMDevice *dimm, MachineState *machine) +{ + MemoryRegion *vmstate_mr = pc_dimm_get_memory_region(dimm, + &error_abort); + + memory_device_plug(MEMORY_DEVICE(dimm), machine); + vmstate_register_ram(vmstate_mr, DEVICE(dimm)); +} + +void pc_dimm_unplug(PCDIMMDevice *dimm, MachineState *machine) +{ + MemoryRegion *vmstate_mr = pc_dimm_get_memory_region(dimm, + &error_abort); + + memory_device_unplug(MEMORY_DEVICE(dimm), machine); + vmstate_unregister_ram(vmstate_mr, DEVICE(dimm)); +} + +static int pc_dimm_slot2bitmap(Object *obj, void *opaque) +{ + unsigned long *bitmap = opaque; + + if (object_dynamic_cast(obj, TYPE_PC_DIMM)) { + DeviceState *dev = DEVICE(obj); + if (dev->realized) { /* count only realized DIMMs */ + PCDIMMDevice *d = PC_DIMM(obj); + set_bit(d->slot, bitmap); + } + } + + object_child_foreach(obj, pc_dimm_slot2bitmap, opaque); + return 0; +} + +static int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp) +{ + unsigned long *bitmap; + int slot = 0; + + if (max_slots <= 0) { + error_setg(errp, "no slots where allocated, please specify " + "the 'slots' option"); + return slot; + } + + bitmap = bitmap_new(max_slots); + object_child_foreach(qdev_get_machine(), pc_dimm_slot2bitmap, bitmap); + + /* check if requested slot is not occupied */ + if (hint) { + if (*hint >= max_slots) { + error_setg(errp, "invalid slot# %d, should be less than %d", + *hint, max_slots); + } else if (!test_bit(*hint, bitmap)) { + slot = *hint; + } else { + error_setg(errp, "slot %d is busy", *hint); + } + goto out; + } + + /* search for free slot */ + slot = find_first_zero_bit(bitmap, max_slots); + if (slot == max_slots) { + error_setg(errp, "no free slots available"); + } +out: + g_free(bitmap); + return slot; +} + +static Property pc_dimm_properties[] = { + DEFINE_PROP_UINT64(PC_DIMM_ADDR_PROP, PCDIMMDevice, addr, 0), + DEFINE_PROP_UINT32(PC_DIMM_NODE_PROP, PCDIMMDevice, node, 0), + DEFINE_PROP_INT32(PC_DIMM_SLOT_PROP, PCDIMMDevice, slot, + PC_DIMM_UNASSIGNED_SLOT), + DEFINE_PROP_LINK(PC_DIMM_MEMDEV_PROP, PCDIMMDevice, hostmem, + TYPE_MEMORY_BACKEND, HostMemoryBackend *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pc_dimm_get_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Error *local_err = NULL; + uint64_t value; + + value = memory_device_get_region_size(MEMORY_DEVICE(obj), &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + visit_type_uint64(v, name, &value, errp); +} + +static void pc_dimm_init(Object *obj) +{ + object_property_add(obj, PC_DIMM_SIZE_PROP, "uint64", pc_dimm_get_size, + NULL, NULL, NULL); +} + +static void pc_dimm_realize(DeviceState *dev, Error **errp) +{ + PCDIMMDevice *dimm = PC_DIMM(dev); + PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); + MachineState *ms = MACHINE(qdev_get_machine()); + + if (ms->numa_state) { + int nb_numa_nodes = ms->numa_state->num_nodes; + + if (((nb_numa_nodes > 0) && (dimm->node >= nb_numa_nodes)) || + (!nb_numa_nodes && dimm->node)) { + error_setg(errp, "'DIMM property " PC_DIMM_NODE_PROP " has value %" + PRIu32 "' which exceeds the number of numa nodes: %d", + dimm->node, nb_numa_nodes ? nb_numa_nodes : 1); + return; + } + } else if (dimm->node > 0) { + error_setg(errp, "machine doesn't support NUMA"); + return; + } + + if (!dimm->hostmem) { + error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property is not set"); + return; + } else if (host_memory_backend_is_mapped(dimm->hostmem)) { + error_setg(errp, "can't use already busy memdev: %s", + object_get_canonical_path_component(OBJECT(dimm->hostmem))); + return; + } + + if (ddc->realize) { + ddc->realize(dimm, errp); + } + + host_memory_backend_set_mapped(dimm->hostmem, true); +} + +static void pc_dimm_unrealize(DeviceState *dev) +{ + PCDIMMDevice *dimm = PC_DIMM(dev); + + host_memory_backend_set_mapped(dimm->hostmem, false); +} + +static uint64_t pc_dimm_md_get_addr(const MemoryDeviceState *md) +{ + return object_property_get_uint(OBJECT(md), PC_DIMM_ADDR_PROP, + &error_abort); +} + +static void pc_dimm_md_set_addr(MemoryDeviceState *md, uint64_t addr, + Error **errp) +{ + object_property_set_uint(OBJECT(md), PC_DIMM_ADDR_PROP, addr, errp); +} + +static MemoryRegion *pc_dimm_md_get_memory_region(MemoryDeviceState *md, + Error **errp) +{ + return pc_dimm_get_memory_region(PC_DIMM(md), errp); +} + +static void pc_dimm_md_fill_device_info(const MemoryDeviceState *md, + MemoryDeviceInfo *info) +{ + PCDIMMDeviceInfo *di = g_new0(PCDIMMDeviceInfo, 1); + const DeviceClass *dc = DEVICE_GET_CLASS(md); + const PCDIMMDevice *dimm = PC_DIMM(md); + const DeviceState *dev = DEVICE(md); + + if (dev->id) { + di->has_id = true; + di->id = g_strdup(dev->id); + } + di->hotplugged = dev->hotplugged; + di->hotpluggable = dc->hotpluggable; + di->addr = dimm->addr; + di->slot = dimm->slot; + di->node = dimm->node; + di->size = object_property_get_uint(OBJECT(dimm), PC_DIMM_SIZE_PROP, + NULL); + di->memdev = object_get_canonical_path(OBJECT(dimm->hostmem)); + + if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { + info->u.nvdimm.data = di; + info->type = MEMORY_DEVICE_INFO_KIND_NVDIMM; + } else { + info->u.dimm.data = di; + info->type = MEMORY_DEVICE_INFO_KIND_DIMM; + } +} + +static void pc_dimm_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc); + + dc->realize = pc_dimm_realize; + dc->unrealize = pc_dimm_unrealize; + device_class_set_props(dc, pc_dimm_properties); + dc->desc = "DIMM memory module"; + + mdc->get_addr = pc_dimm_md_get_addr; + mdc->set_addr = pc_dimm_md_set_addr; + /* for a dimm plugged_size == region_size */ + mdc->get_plugged_size = memory_device_get_region_size; + mdc->get_memory_region = pc_dimm_md_get_memory_region; + mdc->fill_device_info = pc_dimm_md_fill_device_info; +} + +static TypeInfo pc_dimm_info = { + .name = TYPE_PC_DIMM, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PCDIMMDevice), + .instance_init = pc_dimm_init, + .class_init = pc_dimm_class_init, + .class_size = sizeof(PCDIMMDeviceClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_MEMORY_DEVICE }, + { } + }, +}; + +static void pc_dimm_register_types(void) +{ + type_register_static(&pc_dimm_info); +} + +type_init(pc_dimm_register_types) diff --git a/hw/mem/sparse-mem.c b/hw/mem/sparse-mem.c new file mode 100644 index 000000000..e6640eb8e --- /dev/null +++ b/hw/mem/sparse-mem.c @@ -0,0 +1,150 @@ +/* + * A sparse memory device. Useful for fuzzing + * + * Copyright Red Hat Inc., 2021 + * + * Authors: + * Alexander Bulekov + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "qemu/units.h" +#include "sysemu/qtest.h" +#include "hw/mem/sparse-mem.h" + +#define SPARSE_MEM(obj) OBJECT_CHECK(SparseMemState, (obj), TYPE_SPARSE_MEM) +#define SPARSE_BLOCK_SIZE 0x1000 + +typedef struct SparseMemState { + SysBusDevice parent_obj; + MemoryRegion mmio; + uint64_t baseaddr; + uint64_t length; + uint64_t size_used; + uint64_t maxsize; + GHashTable *mapped; +} SparseMemState; + +typedef struct sparse_mem_block { + uint8_t data[SPARSE_BLOCK_SIZE]; +} sparse_mem_block; + +static uint64_t sparse_mem_read(void *opaque, hwaddr addr, unsigned int size) +{ + SparseMemState *s = opaque; + uint64_t ret = 0; + size_t pfn = addr / SPARSE_BLOCK_SIZE; + size_t offset = addr % SPARSE_BLOCK_SIZE; + sparse_mem_block *block; + + block = g_hash_table_lookup(s->mapped, (void *)pfn); + if (block) { + assert(offset + size <= sizeof(block->data)); + memcpy(&ret, block->data + offset, size); + } + return ret; +} + +static void sparse_mem_write(void *opaque, hwaddr addr, uint64_t v, + unsigned int size) +{ + SparseMemState *s = opaque; + size_t pfn = addr / SPARSE_BLOCK_SIZE; + size_t offset = addr % SPARSE_BLOCK_SIZE; + sparse_mem_block *block; + + if (!g_hash_table_lookup(s->mapped, (void *)pfn) && + s->size_used + SPARSE_BLOCK_SIZE < s->maxsize && v) { + g_hash_table_insert(s->mapped, (void *)pfn, + g_new0(sparse_mem_block, 1)); + s->size_used += sizeof(block->data); + } + block = g_hash_table_lookup(s->mapped, (void *)pfn); + if (!block) { + return; + } + + assert(offset + size <= sizeof(block->data)); + + memcpy(block->data + offset, &v, size); + +} + +static const MemoryRegionOps sparse_mem_ops = { + .read = sparse_mem_read, + .write = sparse_mem_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = false, + }, +}; + +static Property sparse_mem_properties[] = { + /* The base address of the memory */ + DEFINE_PROP_UINT64("baseaddr", SparseMemState, baseaddr, 0x0), + /* The length of the sparse memory region */ + DEFINE_PROP_UINT64("length", SparseMemState, length, UINT64_MAX), + /* Max amount of actual memory that can be used to back the sparse memory */ + DEFINE_PROP_UINT64("maxsize", SparseMemState, maxsize, 10 * MiB), + DEFINE_PROP_END_OF_LIST(), +}; + +MemoryRegion *sparse_mem_init(uint64_t addr, uint64_t length) +{ + DeviceState *dev; + + dev = qdev_new(TYPE_SPARSE_MEM); + qdev_prop_set_uint64(dev, "baseaddr", addr); + qdev_prop_set_uint64(dev, "length", length); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(dev), 0, addr, -10000); + return &SPARSE_MEM(dev)->mmio; +} + +static void sparse_mem_realize(DeviceState *dev, Error **errp) +{ + SparseMemState *s = SPARSE_MEM(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + if (!qtest_enabled()) { + error_setg(errp, "sparse_mem device should only be used " + "for testing with QTest"); + return; + } + + assert(s->baseaddr + s->length > s->baseaddr); + + s->mapped = g_hash_table_new(NULL, NULL); + memory_region_init_io(&s->mmio, OBJECT(s), &sparse_mem_ops, s, + "sparse-mem", s->length); + sysbus_init_mmio(sbd, &s->mmio); +} + +static void sparse_mem_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, sparse_mem_properties); + + dc->desc = "Sparse Memory Device"; + dc->realize = sparse_mem_realize; +} + +static const TypeInfo sparse_mem_types[] = { + { + .name = TYPE_SPARSE_MEM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SparseMemState), + .class_init = sparse_mem_class_init, + }, +}; +DEFINE_TYPES(sparse_mem_types); diff --git a/hw/mem/trace-events b/hw/mem/trace-events new file mode 100644 index 000000000..8b6b02b5b --- /dev/null +++ b/hw/mem/trace-events @@ -0,0 +1,8 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# pc-dimm.c +mhp_pc_dimm_assigned_slot(int slot) "%d" +# memory-device.c +memory_device_pre_plug(const char *id, uint64_t addr) "id=%s addr=0x%"PRIx64 +memory_device_plug(const char *id, uint64_t addr) "id=%s addr=0x%"PRIx64 +memory_device_unplug(const char *id, uint64_t addr) "id=%s addr=0x%"PRIx64 diff --git a/hw/mem/trace.h b/hw/mem/trace.h new file mode 100644 index 000000000..2f2c94540 --- /dev/null +++ b/hw/mem/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_mem.h" diff --git a/hw/meson.build b/hw/meson.build new file mode 100644 index 000000000..b3366c888 --- /dev/null +++ b/hw/meson.build @@ -0,0 +1,66 @@ +subdir('9pfs') +subdir('acpi') +subdir('adc') +subdir('audio') +subdir('block') +subdir('char') +subdir('core') +subdir('cpu') +subdir('display') +subdir('dma') +subdir('gpio') +subdir('hyperv') +subdir('i2c') +subdir('ide') +subdir('input') +subdir('intc') +subdir('ipack') +subdir('ipmi') +subdir('isa') +subdir('mem') +subdir('misc') +subdir('net') +subdir('nubus') +subdir('nvme') +subdir('nvram') +subdir('pci') +subdir('pci-bridge') +subdir('pci-host') +subdir('pcmcia') +subdir('rdma') +subdir('rtc') +subdir('scsi') +subdir('sd') +subdir('sensor') +subdir('smbios') +subdir('ssi') +subdir('timer') +subdir('tpm') +subdir('usb') +subdir('vfio') +subdir('virtio') +subdir('watchdog') +subdir('xen') +subdir('xenpv') + +subdir('alpha') +subdir('arm') +subdir('avr') +subdir('cris') +subdir('hppa') +subdir('i386') +subdir('m68k') +subdir('microblaze') +subdir('mips') +subdir('nios2') +subdir('openrisc') +subdir('ppc') +subdir('remote') +subdir('riscv') +subdir('rx') +subdir('s390x') +subdir('sh4') +subdir('sparc') +subdir('sparc64') +subdir('tricore') +subdir('xtensa') diff --git a/hw/microblaze/Kconfig b/hw/microblaze/Kconfig new file mode 100644 index 000000000..e2697ced9 --- /dev/null +++ b/hw/microblaze/Kconfig @@ -0,0 +1,21 @@ +config PETALOGIX_S3ADSP1800 + bool + select PFLASH_CFI01 + select XILINX + select XILINX_AXI + select XILINX_ETHLITE + select UNIMP + +config PETALOGIX_ML605 + bool + select PFLASH_CFI01 + select SERIAL + select SSI_M25P80 + select XILINX + select XILINX_AXI + select XILINX_ETHLITE + select XILINX_SPI + +config XLNX_ZYNQMP_PMU + bool + select XLNX_ZYNQMP diff --git a/hw/microblaze/boot.c b/hw/microblaze/boot.c new file mode 100644 index 000000000..8821d009f --- /dev/null +++ b/hw/microblaze/boot.c @@ -0,0 +1,216 @@ +/* + * Microblaze kernel loader + * + * Copyright (c) 2012 Peter Crosthwaite + * Copyright (c) 2012 PetaLogix + * Copyright (c) 2009 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "cpu.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" +#include "sysemu/device_tree.h" +#include "sysemu/reset.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "elf.h" +#include "qemu/cutils.h" + +#include "boot.h" + +static struct +{ + void (*machine_cpu_reset)(MicroBlazeCPU *); + uint32_t bootstrap_pc; + uint32_t cmdline; + uint32_t initrd_start; + uint32_t initrd_end; + uint32_t fdt; +} boot_info; + +static void main_cpu_reset(void *opaque) +{ + MicroBlazeCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + CPUMBState *env = &cpu->env; + + cpu_reset(cs); + env->regs[5] = boot_info.cmdline; + env->regs[6] = boot_info.initrd_start; + env->regs[7] = boot_info.fdt; + cpu_set_pc(cs, boot_info.bootstrap_pc); + if (boot_info.machine_cpu_reset) { + boot_info.machine_cpu_reset(cpu); + } +} + +static int microblaze_load_dtb(hwaddr addr, + uint32_t ramsize, + uint32_t initrd_start, + uint32_t initrd_end, + const char *kernel_cmdline, + const char *dtb_filename) +{ + int fdt_size; + void *fdt = NULL; + int r; + + if (dtb_filename) { + fdt = load_device_tree(dtb_filename, &fdt_size); + } + if (!fdt) { + return 0; + } + + if (kernel_cmdline) { + r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", + kernel_cmdline); + if (r < 0) { + fprintf(stderr, "couldn't set /chosen/bootargs\n"); + } + } + + if (initrd_start) { + qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", + initrd_start); + + qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", + initrd_end); + } + + cpu_physical_memory_write(addr, fdt, fdt_size); + g_free(fdt); + return fdt_size; +} + +static uint64_t translate_kernel_address(void *opaque, uint64_t addr) +{ + return addr - 0x30000000LL; +} + +void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, + uint32_t ramsize, + const char *initrd_filename, + const char *dtb_filename, + void (*machine_cpu_reset)(MicroBlazeCPU *)) +{ + const char *kernel_filename; + const char *kernel_cmdline; + const char *dtb_arg; + char *filename = NULL; + + kernel_filename = current_machine->kernel_filename; + kernel_cmdline = current_machine->kernel_cmdline; + dtb_arg = current_machine->dtb; + /* default to pcbios dtb as passed by machine_init */ + if (!dtb_arg && dtb_filename) { + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_filename); + } + + boot_info.machine_cpu_reset = machine_cpu_reset; + qemu_register_reset(main_cpu_reset, cpu); + + if (kernel_filename) { + int kernel_size; + uint64_t entry, high; + uint32_t base32; + int big_endian = 0; + +#ifdef TARGET_WORDS_BIGENDIAN + big_endian = 1; +#endif + + /* Boots a kernel elf binary. */ + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, + &entry, NULL, &high, NULL, + big_endian, EM_MICROBLAZE, 0, 0); + base32 = entry; + if (base32 == 0xc0000000) { + kernel_size = load_elf(kernel_filename, NULL, + translate_kernel_address, NULL, + &entry, NULL, NULL, NULL, + big_endian, EM_MICROBLAZE, 0, 0); + } + /* Always boot into physical ram. */ + boot_info.bootstrap_pc = (uint32_t)entry; + + /* If it wasn't an ELF image, try an u-boot image. */ + if (kernel_size < 0) { + hwaddr uentry, loadaddr = LOAD_UIMAGE_LOADADDR_INVALID; + + kernel_size = load_uimage(kernel_filename, &uentry, &loadaddr, 0, + NULL, NULL); + boot_info.bootstrap_pc = uentry; + high = (loadaddr + kernel_size + 3) & ~3; + } + + /* Not an ELF image nor an u-boot image, try a RAW image. */ + if (kernel_size < 0) { + kernel_size = load_image_targphys(kernel_filename, ddr_base, + ramsize); + boot_info.bootstrap_pc = ddr_base; + high = (ddr_base + kernel_size + 3) & ~3; + } + + if (initrd_filename) { + int initrd_size; + uint32_t initrd_offset; + + high = ROUND_UP(high + kernel_size, 4); + boot_info.initrd_start = high; + initrd_offset = boot_info.initrd_start - ddr_base; + + initrd_size = load_ramdisk(initrd_filename, + boot_info.initrd_start, + ramsize - initrd_offset); + if (initrd_size < 0) { + initrd_size = load_image_targphys(initrd_filename, + boot_info.initrd_start, + ramsize - initrd_offset); + } + if (initrd_size < 0) { + error_report("could not load initrd '%s'", + initrd_filename); + exit(EXIT_FAILURE); + } + boot_info.initrd_end = boot_info.initrd_start + initrd_size; + high = ROUND_UP(high + initrd_size, 4); + } + + boot_info.cmdline = high + 4096; + if (kernel_cmdline && strlen(kernel_cmdline)) { + pstrcpy_targphys("cmdline", boot_info.cmdline, 256, kernel_cmdline); + } + /* Provide a device-tree. */ + boot_info.fdt = boot_info.cmdline + 4096; + microblaze_load_dtb(boot_info.fdt, ramsize, + boot_info.initrd_start, + boot_info.initrd_end, + kernel_cmdline, + /* Preference a -dtb argument */ + dtb_arg ? dtb_arg : filename); + } + g_free(filename); +} diff --git a/hw/microblaze/boot.h b/hw/microblaze/boot.h new file mode 100644 index 000000000..5a8c2f797 --- /dev/null +++ b/hw/microblaze/boot.h @@ -0,0 +1,11 @@ +#ifndef MICROBLAZE_BOOT_H +#define MICROBLAZE_BOOT_H + + +void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, + uint32_t ramsize, + const char *initrd_filename, + const char *dtb_filename, + void (*machine_cpu_reset)(MicroBlazeCPU *)); + +#endif /* MICROBLAZE_BOOT_H */ diff --git a/hw/microblaze/meson.build b/hw/microblaze/meson.build new file mode 100644 index 000000000..bb9e4eb8f --- /dev/null +++ b/hw/microblaze/meson.build @@ -0,0 +1,7 @@ +microblaze_ss = ss.source_set() +microblaze_ss.add(files('boot.c')) +microblaze_ss.add(when: 'CONFIG_PETALOGIX_S3ADSP1800', if_true: files('petalogix_s3adsp1800_mmu.c')) +microblaze_ss.add(when: 'CONFIG_PETALOGIX_ML605', if_true: files('petalogix_ml605_mmu.c')) +microblaze_ss.add(when: 'CONFIG_XLNX_ZYNQMP_PMU', if_true: files('xlnx-zynqmp-pmu.c')) + +hw_arch += {'microblaze': microblaze_ss} diff --git a/hw/microblaze/petalogix_ml605_mmu.c b/hw/microblaze/petalogix_ml605_mmu.c new file mode 100644 index 000000000..159db6cbe --- /dev/null +++ b/hw/microblaze/petalogix_ml605_mmu.c @@ -0,0 +1,220 @@ +/* + * Model of Petalogix linux reference design targeting Xilinx Spartan ml605 + * board. + * + * Copyright (c) 2011 Michal Simek + * Copyright (c) 2011 PetaLogix + * Copyright (c) 2009 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "net/net.h" +#include "hw/block/flash.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/char/serial.h" +#include "hw/qdev-properties.h" +#include "exec/address-spaces.h" +#include "hw/ssi/ssi.h" + +#include "boot.h" + +#include "hw/stream.h" + +#define LMB_BRAM_SIZE (128 * KiB) +#define FLASH_SIZE (32 * MiB) + +#define BINARY_DEVICE_TREE_FILE "petalogix-ml605.dtb" + +#define NUM_SPI_FLASHES 4 + +#define SPI_BASEADDR 0x40a00000 +#define MEMORY_BASEADDR 0x50000000 +#define FLASH_BASEADDR 0x86000000 +#define INTC_BASEADDR 0x81800000 +#define TIMER_BASEADDR 0x83c00000 +#define UART16550_BASEADDR 0x83e00000 +#define AXIENET_BASEADDR 0x82780000 +#define AXIDMA_BASEADDR 0x84600000 + +#define AXIDMA_IRQ1 0 +#define AXIDMA_IRQ0 1 +#define TIMER_IRQ 2 +#define AXIENET_IRQ 3 +#define SPI_IRQ 4 +#define UART16550_IRQ 5 + +static void +petalogix_ml605_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + MemoryRegion *address_space_mem = get_system_memory(); + DeviceState *dev, *dma, *eth0; + Object *ds, *cs; + MicroBlazeCPU *cpu; + SysBusDevice *busdev; + DriveInfo *dinfo; + int i; + MemoryRegion *phys_lmb_bram = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram = g_new(MemoryRegion, 1); + qemu_irq irq[32]; + + /* init CPUs */ + cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU)); + object_property_set_str(OBJECT(cpu), "version", "8.10.a", &error_abort); + /* Use FPU but don't use floating point conversion and square + * root instructions + */ + object_property_set_int(OBJECT(cpu), "use-fpu", 1, &error_abort); + object_property_set_bool(OBJECT(cpu), "dcache-writeback", true, + &error_abort); + object_property_set_bool(OBJECT(cpu), "endianness", true, &error_abort); + qdev_realize(DEVICE(cpu), NULL, &error_abort); + + /* Attach emulated BRAM through the LMB. */ + memory_region_init_ram(phys_lmb_bram, NULL, "petalogix_ml605.lmb_bram", + LMB_BRAM_SIZE, &error_fatal); + memory_region_add_subregion(address_space_mem, 0x00000000, phys_lmb_bram); + + memory_region_init_ram(phys_ram, NULL, "petalogix_ml605.ram", ram_size, + &error_fatal); + memory_region_add_subregion(address_space_mem, MEMORY_BASEADDR, phys_ram); + + dinfo = drive_get(IF_PFLASH, 0, 0); + /* 5th parameter 2 means bank-width + * 10th paremeter 0 means little-endian */ + pflash_cfi01_register(FLASH_BASEADDR, "petalogix_ml605.flash", FLASH_SIZE, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + 64 * KiB, 2, 0x89, 0x18, 0x0000, 0x0, 0); + + + dev = qdev_new("xlnx.xps-intc"); + qdev_prop_set_uint32(dev, "kind-of-intr", 1 << TIMER_IRQ); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(DEVICE(cpu), MB_CPU_IRQ)); + for (i = 0; i < 32; i++) { + irq[i] = qdev_get_gpio_in(dev, i); + } + + serial_mm_init(address_space_mem, UART16550_BASEADDR + 0x1000, 2, + irq[UART16550_IRQ], 115200, serial_hd(0), + DEVICE_LITTLE_ENDIAN); + + /* 2 timers at irq 2 @ 100 Mhz. */ + dev = qdev_new("xlnx.xps-timer"); + qdev_prop_set_uint32(dev, "one-timer-only", 0); + qdev_prop_set_uint32(dev, "clock-frequency", 100 * 1000000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]); + + /* axi ethernet and dma initialization. */ + qemu_check_nic_model(&nd_table[0], "xlnx.axi-ethernet"); + eth0 = qdev_new("xlnx.axi-ethernet"); + dma = qdev_new("xlnx.axi-dma"); + + /* FIXME: attach to the sysbus instead */ + object_property_add_child(qdev_get_machine(), "xilinx-eth", OBJECT(eth0)); + object_property_add_child(qdev_get_machine(), "xilinx-dma", OBJECT(dma)); + + ds = object_property_get_link(OBJECT(dma), + "axistream-connected-target", NULL); + cs = object_property_get_link(OBJECT(dma), + "axistream-control-connected-target", NULL); + qdev_set_nic_properties(eth0, &nd_table[0]); + qdev_prop_set_uint32(eth0, "rxmem", 0x1000); + qdev_prop_set_uint32(eth0, "txmem", 0x1000); + object_property_set_link(OBJECT(eth0), "axistream-connected", ds, + &error_abort); + object_property_set_link(OBJECT(eth0), "axistream-control-connected", cs, + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(eth0), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(eth0), 0, AXIENET_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(eth0), 0, irq[AXIENET_IRQ]); + + ds = object_property_get_link(OBJECT(eth0), + "axistream-connected-target", NULL); + cs = object_property_get_link(OBJECT(eth0), + "axistream-control-connected-target", NULL); + qdev_prop_set_uint32(dma, "freqhz", 100 * 1000000); + object_property_set_link(OBJECT(dma), "axistream-connected", ds, + &error_abort); + object_property_set_link(OBJECT(dma), "axistream-control-connected", cs, + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dma), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dma), 0, AXIDMA_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dma), 0, irq[AXIDMA_IRQ0]); + sysbus_connect_irq(SYS_BUS_DEVICE(dma), 1, irq[AXIDMA_IRQ1]); + + { + SSIBus *spi; + + dev = qdev_new("xlnx.xps-spi"); + qdev_prop_set_uint8(dev, "num-ss-bits", NUM_SPI_FLASHES); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, SPI_BASEADDR); + sysbus_connect_irq(busdev, 0, irq[SPI_IRQ]); + + spi = (SSIBus *)qdev_get_child_bus(dev, "spi"); + + for (i = 0; i < NUM_SPI_FLASHES; i++) { + DriveInfo *dinfo = drive_get_next(IF_MTD); + qemu_irq cs_line; + + dev = qdev_new("n25q128"); + if (dinfo) { + qdev_prop_set_drive_err(dev, "drive", + blk_by_legacy_dinfo(dinfo), + &error_fatal); + } + qdev_realize_and_unref(dev, BUS(spi), &error_fatal); + + cs_line = qdev_get_gpio_in_named(dev, SSI_GPIO_CS, 0); + sysbus_connect_irq(busdev, i+1, cs_line); + } + } + + /* setup PVR to match kernel settings */ + cpu->cfg.pvr_regs[4] = 0xc56b8000; + cpu->cfg.pvr_regs[5] = 0xc56be000; + cpu->cfg.pvr_regs[10] = 0x0e000000; /* virtex 6 */ + + microblaze_load_kernel(cpu, MEMORY_BASEADDR, ram_size, + machine->initrd_filename, + BINARY_DEVICE_TREE_FILE, + NULL); + +} + +static void petalogix_ml605_machine_init(MachineClass *mc) +{ + mc->desc = "PetaLogix linux refdesign for xilinx ml605 little endian"; + mc->init = petalogix_ml605_init; +} + +DEFINE_MACHINE("petalogix-ml605", petalogix_ml605_machine_init) diff --git a/hw/microblaze/petalogix_s3adsp1800_mmu.c b/hw/microblaze/petalogix_s3adsp1800_mmu.c new file mode 100644 index 000000000..9d959d1ad --- /dev/null +++ b/hw/microblaze/petalogix_s3adsp1800_mmu.c @@ -0,0 +1,138 @@ +/* + * Model of Petalogix linux reference design targeting Xilinx Spartan 3ADSP-1800 + * boards. + * + * Copyright (c) 2009 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "net/net.h" +#include "hw/block/flash.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/misc/unimp.h" +#include "exec/address-spaces.h" +#include "hw/char/xilinx_uartlite.h" + +#include "boot.h" + +#define LMB_BRAM_SIZE (128 * KiB) +#define FLASH_SIZE (16 * MiB) + +#define BINARY_DEVICE_TREE_FILE "petalogix-s3adsp1800.dtb" + +#define MEMORY_BASEADDR 0x90000000 +#define FLASH_BASEADDR 0xa0000000 +#define GPIO_BASEADDR 0x81400000 +#define INTC_BASEADDR 0x81800000 +#define TIMER_BASEADDR 0x83c00000 +#define UARTLITE_BASEADDR 0x84000000 +#define ETHLITE_BASEADDR 0x81000000 + +#define TIMER_IRQ 0 +#define ETHLITE_IRQ 1 +#define UARTLITE_IRQ 3 + +static void +petalogix_s3adsp1800_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + DeviceState *dev; + MicroBlazeCPU *cpu; + DriveInfo *dinfo; + int i; + hwaddr ddr_base = MEMORY_BASEADDR; + MemoryRegion *phys_lmb_bram = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram = g_new(MemoryRegion, 1); + qemu_irq irq[32]; + MemoryRegion *sysmem = get_system_memory(); + + cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU)); + object_property_set_str(OBJECT(cpu), "version", "7.10.d", &error_abort); + qdev_realize(DEVICE(cpu), NULL, &error_abort); + + /* Attach emulated BRAM through the LMB. */ + memory_region_init_ram(phys_lmb_bram, NULL, + "petalogix_s3adsp1800.lmb_bram", LMB_BRAM_SIZE, + &error_fatal); + memory_region_add_subregion(sysmem, 0x00000000, phys_lmb_bram); + + memory_region_init_ram(phys_ram, NULL, "petalogix_s3adsp1800.ram", + ram_size, &error_fatal); + memory_region_add_subregion(sysmem, ddr_base, phys_ram); + + dinfo = drive_get(IF_PFLASH, 0, 0); + pflash_cfi01_register(FLASH_BASEADDR, + "petalogix_s3adsp1800.flash", FLASH_SIZE, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + 64 * KiB, 1, 0x89, 0x18, 0x0000, 0x0, 1); + + dev = qdev_new("xlnx.xps-intc"); + qdev_prop_set_uint32(dev, "kind-of-intr", + 1 << ETHLITE_IRQ | 1 << UARTLITE_IRQ); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(DEVICE(cpu), MB_CPU_IRQ)); + for (i = 0; i < 32; i++) { + irq[i] = qdev_get_gpio_in(dev, i); + } + + xilinx_uartlite_create(UARTLITE_BASEADDR, irq[UARTLITE_IRQ], + serial_hd(0)); + + /* 2 timers at irq 2 @ 62 Mhz. */ + dev = qdev_new("xlnx.xps-timer"); + qdev_prop_set_uint32(dev, "one-timer-only", 0); + qdev_prop_set_uint32(dev, "clock-frequency", 62 * 1000000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]); + + qemu_check_nic_model(&nd_table[0], "xlnx.xps-ethernetlite"); + dev = qdev_new("xlnx.xps-ethernetlite"); + qdev_set_nic_properties(dev, &nd_table[0]); + qdev_prop_set_uint32(dev, "tx-ping-pong", 0); + qdev_prop_set_uint32(dev, "rx-ping-pong", 0); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, ETHLITE_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[ETHLITE_IRQ]); + + create_unimplemented_device("gpio", GPIO_BASEADDR, 0x10000); + + microblaze_load_kernel(cpu, ddr_base, ram_size, + machine->initrd_filename, + BINARY_DEVICE_TREE_FILE, + NULL); +} + +static void petalogix_s3adsp1800_machine_init(MachineClass *mc) +{ + mc->desc = "PetaLogix linux refdesign for xilinx Spartan 3ADSP1800"; + mc->init = petalogix_s3adsp1800_init; + mc->is_default = true; +} + +DEFINE_MACHINE("petalogix-s3adsp1800", petalogix_s3adsp1800_machine_init) diff --git a/hw/microblaze/xlnx-zynqmp-pmu.c b/hw/microblaze/xlnx-zynqmp-pmu.c new file mode 100644 index 000000000..5a2016672 --- /dev/null +++ b/hw/microblaze/xlnx-zynqmp-pmu.c @@ -0,0 +1,187 @@ +/* + * Xilinx Zynq MPSoC PMU (Power Management Unit) emulation + * + * Copyright (C) 2017 Xilinx Inc + * Written by Alistair Francis + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "exec/address-spaces.h" +#include "hw/boards.h" +#include "cpu.h" +#include "boot.h" + +#include "hw/intc/xlnx-zynqmp-ipi.h" +#include "hw/intc/xlnx-pmu-iomod-intc.h" +#include "qom/object.h" + +/* Define the PMU device */ + +#define TYPE_XLNX_ZYNQMP_PMU_SOC "xlnx-zynqmp-pmu-soc" +OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPPMUSoCState, XLNX_ZYNQMP_PMU_SOC) + +#define XLNX_ZYNQMP_PMU_ROM_SIZE 0x8000 +#define XLNX_ZYNQMP_PMU_ROM_ADDR 0xFFD00000 +#define XLNX_ZYNQMP_PMU_RAM_ADDR 0xFFDC0000 + +#define XLNX_ZYNQMP_PMU_INTC_ADDR 0xFFD40000 + +#define XLNX_ZYNQMP_PMU_NUM_IPIS 4 + +static const uint64_t ipi_addr[XLNX_ZYNQMP_PMU_NUM_IPIS] = { + 0xFF340000, 0xFF350000, 0xFF360000, 0xFF370000, +}; +static const uint64_t ipi_irq[XLNX_ZYNQMP_PMU_NUM_IPIS] = { + 19, 20, 21, 22, +}; + +struct XlnxZynqMPPMUSoCState { + /*< private >*/ + DeviceState parent_obj; + + /*< public >*/ + MicroBlazeCPU cpu; + XlnxPMUIOIntc intc; + XlnxZynqMPIPI ipi[XLNX_ZYNQMP_PMU_NUM_IPIS]; +}; + + +static void xlnx_zynqmp_pmu_soc_init(Object *obj) +{ + XlnxZynqMPPMUSoCState *s = XLNX_ZYNQMP_PMU_SOC(obj); + + object_initialize_child(obj, "pmu-cpu", &s->cpu, TYPE_MICROBLAZE_CPU); + + object_initialize_child(obj, "intc", &s->intc, TYPE_XLNX_PMU_IO_INTC); + + /* Create the IPI device */ + for (int i = 0; i < XLNX_ZYNQMP_PMU_NUM_IPIS; i++) { + char *name = g_strdup_printf("ipi%d", i); + object_initialize_child(obj, name, &s->ipi[i], TYPE_XLNX_ZYNQMP_IPI); + g_free(name); + } +} + +static void xlnx_zynqmp_pmu_soc_realize(DeviceState *dev, Error **errp) +{ + XlnxZynqMPPMUSoCState *s = XLNX_ZYNQMP_PMU_SOC(dev); + + object_property_set_uint(OBJECT(&s->cpu), "base-vectors", + XLNX_ZYNQMP_PMU_ROM_ADDR, &error_abort); + object_property_set_bool(OBJECT(&s->cpu), "use-stack-protection", true, + &error_abort); + object_property_set_uint(OBJECT(&s->cpu), "use-fpu", 0, &error_abort); + object_property_set_uint(OBJECT(&s->cpu), "use-hw-mul", 0, &error_abort); + object_property_set_bool(OBJECT(&s->cpu), "use-barrel", true, + &error_abort); + object_property_set_bool(OBJECT(&s->cpu), "use-msr-instr", true, + &error_abort); + object_property_set_bool(OBJECT(&s->cpu), "use-pcmp-instr", true, + &error_abort); + object_property_set_bool(OBJECT(&s->cpu), "use-mmu", false, &error_abort); + object_property_set_bool(OBJECT(&s->cpu), "endianness", true, + &error_abort); + object_property_set_str(OBJECT(&s->cpu), "version", "8.40.b", + &error_abort); + object_property_set_uint(OBJECT(&s->cpu), "pvr", 0, &error_abort); + if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { + return; + } + + object_property_set_uint(OBJECT(&s->intc), "intc-intr-size", 0x10, + &error_abort); + object_property_set_uint(OBJECT(&s->intc), "intc-level-edge", 0x0, + &error_abort); + object_property_set_uint(OBJECT(&s->intc), "intc-positive", 0xffff, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->intc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->intc), 0, XLNX_ZYNQMP_PMU_INTC_ADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->intc), 0, + qdev_get_gpio_in(DEVICE(&s->cpu), MB_CPU_IRQ)); + + /* Connect the IPI device */ + for (int i = 0; i < XLNX_ZYNQMP_PMU_NUM_IPIS; i++) { + sysbus_realize(SYS_BUS_DEVICE(&s->ipi[i]), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ipi[i]), 0, ipi_addr[i]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ipi[i]), 0, + qdev_get_gpio_in(DEVICE(&s->intc), ipi_irq[i])); + } +} + +static void xlnx_zynqmp_pmu_soc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = xlnx_zynqmp_pmu_soc_realize; +} + +static const TypeInfo xlnx_zynqmp_pmu_soc_type_info = { + .name = TYPE_XLNX_ZYNQMP_PMU_SOC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XlnxZynqMPPMUSoCState), + .instance_init = xlnx_zynqmp_pmu_soc_init, + .class_init = xlnx_zynqmp_pmu_soc_class_init, +}; + +static void xlnx_zynqmp_pmu_soc_register_types(void) +{ + type_register_static(&xlnx_zynqmp_pmu_soc_type_info); +} + +type_init(xlnx_zynqmp_pmu_soc_register_types) + +/* Define the PMU Machine */ + +static void xlnx_zynqmp_pmu_init(MachineState *machine) +{ + XlnxZynqMPPMUSoCState *pmu = g_new0(XlnxZynqMPPMUSoCState, 1); + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *pmu_rom = g_new(MemoryRegion, 1); + MemoryRegion *pmu_ram = g_new(MemoryRegion, 1); + + /* Create the ROM */ + memory_region_init_rom(pmu_rom, NULL, "xlnx-zynqmp-pmu.rom", + XLNX_ZYNQMP_PMU_ROM_SIZE, &error_fatal); + memory_region_add_subregion(address_space_mem, XLNX_ZYNQMP_PMU_ROM_ADDR, + pmu_rom); + + /* Create the RAM */ + memory_region_init_ram(pmu_ram, NULL, "xlnx-zynqmp-pmu.ram", + machine->ram_size, &error_fatal); + memory_region_add_subregion(address_space_mem, XLNX_ZYNQMP_PMU_RAM_ADDR, + pmu_ram); + + /* Create the PMU device */ + object_initialize_child(OBJECT(machine), "pmu", pmu, + TYPE_XLNX_ZYNQMP_PMU_SOC); + qdev_realize(DEVICE(pmu), NULL, &error_fatal); + + /* Load the kernel */ + microblaze_load_kernel(&pmu->cpu, XLNX_ZYNQMP_PMU_RAM_ADDR, + machine->ram_size, + machine->initrd_filename, + machine->dtb, + NULL); +} + +static void xlnx_zynqmp_pmu_machine_init(MachineClass *mc) +{ + mc->desc = "Xilinx ZynqMP PMU machine"; + mc->init = xlnx_zynqmp_pmu_init; +} + +DEFINE_MACHINE("xlnx-zynqmp-pmu", xlnx_zynqmp_pmu_machine_init) + diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig new file mode 100644 index 000000000..b4c5549ce --- /dev/null +++ b/hw/mips/Kconfig @@ -0,0 +1,61 @@ +config MALTA + bool + select ISA_SUPERIO + +config MIPSSIM + bool + select ISA_BUS + select SERIAL_ISA + select MIPSNET + +config JAZZ + bool + select ISA_BUS + select RC4030 + select I8259 + select I8254 + select I8257 + select PCSPK + select VGA_ISA_MM + select G364FB + select DP8393X + select ESP + select FDC_SYSBUS + select MC146818RTC + select PCKBD + select SERIAL + select PARALLEL + select DS1225Y + select JAZZ_LED + +config FULOONG + bool + select PCI_BONITO + +config LOONGSON3V + bool + imply VIRTIO_VGA + imply QXL if SPICE + select SERIAL + select GOLDFISH_RTC + select LOONGSON_LIOINTC + select PCI_DEVICES + select PCI_EXPRESS_GENERIC_BRIDGE + select MSI_NONBROKEN + select FW_CFG_MIPS + +config MIPS_CPS + bool + select PTIMER + select MIPS_ITU + +config MIPS_BOSTON + bool + select FITLOADER + select MIPS_CPS + select PCI_EXPRESS_XILINX + select AHCI_ICH9 + select SERIAL + +config FW_CFG_MIPS + bool diff --git a/hw/mips/bootloader.c b/hw/mips/bootloader.c new file mode 100644 index 000000000..99991f8b2 --- /dev/null +++ b/hw/mips/bootloader.c @@ -0,0 +1,204 @@ +/* + * Utility for QEMU MIPS to generate it's simple bootloader + * + * Instructions used here are carefully selected to keep compatibility with + * MIPS Release 6. + * + * Copyright (C) 2020 Jiaxun Yang + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "cpu.h" +#include "hw/mips/bootloader.h" + +typedef enum bl_reg { + BL_REG_ZERO = 0, + BL_REG_AT = 1, + BL_REG_V0 = 2, + BL_REG_V1 = 3, + BL_REG_A0 = 4, + BL_REG_A1 = 5, + BL_REG_A2 = 6, + BL_REG_A3 = 7, + BL_REG_T0 = 8, + BL_REG_T1 = 9, + BL_REG_T2 = 10, + BL_REG_T3 = 11, + BL_REG_T4 = 12, + BL_REG_T5 = 13, + BL_REG_T6 = 14, + BL_REG_T7 = 15, + BL_REG_S0 = 16, + BL_REG_S1 = 17, + BL_REG_S2 = 18, + BL_REG_S3 = 19, + BL_REG_S4 = 20, + BL_REG_S5 = 21, + BL_REG_S6 = 22, + BL_REG_S7 = 23, + BL_REG_T8 = 24, + BL_REG_T9 = 25, + BL_REG_K0 = 26, + BL_REG_K1 = 27, + BL_REG_GP = 28, + BL_REG_SP = 29, + BL_REG_FP = 30, + BL_REG_RA = 31, +} bl_reg; + +static bool bootcpu_supports_isa(uint64_t isa_mask) +{ + return cpu_supports_isa(&MIPS_CPU(first_cpu)->env, isa_mask); +} + +/* Base types */ +static void bl_gen_nop(uint32_t **p) +{ + stl_p(*p, 0); + *p = *p + 1; +} + +static void bl_gen_r_type(uint32_t **p, uint8_t opcode, + bl_reg rs, bl_reg rt, bl_reg rd, + uint8_t shift, uint8_t funct) +{ + uint32_t insn = 0; + + insn = deposit32(insn, 26, 6, opcode); + insn = deposit32(insn, 21, 5, rs); + insn = deposit32(insn, 16, 5, rt); + insn = deposit32(insn, 11, 5, rd); + insn = deposit32(insn, 6, 5, shift); + insn = deposit32(insn, 0, 6, funct); + + stl_p(*p, insn); + *p = *p + 1; +} + +static void bl_gen_i_type(uint32_t **p, uint8_t opcode, + bl_reg rs, bl_reg rt, uint16_t imm) +{ + uint32_t insn = 0; + + insn = deposit32(insn, 26, 6, opcode); + insn = deposit32(insn, 21, 5, rs); + insn = deposit32(insn, 16, 5, rt); + insn = deposit32(insn, 0, 16, imm); + + stl_p(*p, insn); + *p = *p + 1; +} + +/* Single instructions */ +static void bl_gen_dsll(uint32_t **p, bl_reg rd, bl_reg rt, uint8_t sa) +{ + if (bootcpu_supports_isa(ISA_MIPS3)) { + bl_gen_r_type(p, 0, 0, rt, rd, sa, 0x38); + } else { + g_assert_not_reached(); /* unsupported */ + } +} + +static void bl_gen_jalr(uint32_t **p, bl_reg rs) +{ + bl_gen_r_type(p, 0, rs, 0, BL_REG_RA, 0, 0x09); +} + +static void bl_gen_lui(uint32_t **p, bl_reg rt, uint16_t imm) +{ + /* R6: It's a alias of AUI with RS = 0 */ + bl_gen_i_type(p, 0x0f, 0, rt, imm); +} + +static void bl_gen_ori(uint32_t **p, bl_reg rt, bl_reg rs, uint16_t imm) +{ + bl_gen_i_type(p, 0x0d, rs, rt, imm); +} + +static void bl_gen_sw(uint32_t **p, bl_reg rt, uint8_t base, uint16_t offset) +{ + bl_gen_i_type(p, 0x2b, base, rt, offset); +} + +static void bl_gen_sd(uint32_t **p, bl_reg rt, uint8_t base, uint16_t offset) +{ + if (bootcpu_supports_isa(ISA_MIPS3)) { + bl_gen_i_type(p, 0x3f, base, rt, offset); + } else { + g_assert_not_reached(); /* unsupported */ + } +} + +/* Pseudo instructions */ +static void bl_gen_li(uint32_t **p, bl_reg rt, uint32_t imm) +{ + bl_gen_lui(p, rt, extract32(imm, 16, 16)); + bl_gen_ori(p, rt, rt, extract32(imm, 0, 16)); +} + +static void bl_gen_dli(uint32_t **p, bl_reg rt, uint64_t imm) +{ + bl_gen_li(p, rt, extract64(imm, 32, 32)); + bl_gen_dsll(p, rt, rt, 16); + bl_gen_ori(p, rt, rt, extract64(imm, 16, 16)); + bl_gen_dsll(p, rt, rt, 16); + bl_gen_ori(p, rt, rt, extract64(imm, 0, 16)); +} + +static void bl_gen_load_ulong(uint32_t **p, bl_reg rt, target_ulong imm) +{ + if (bootcpu_supports_isa(ISA_MIPS3)) { + bl_gen_dli(p, rt, imm); /* 64bit */ + } else { + bl_gen_li(p, rt, imm); /* 32bit */ + } +} + +/* Helpers */ +void bl_gen_jump_to(uint32_t **p, target_ulong jump_addr) +{ + bl_gen_load_ulong(p, BL_REG_T9, jump_addr); + bl_gen_jalr(p, BL_REG_T9); + bl_gen_nop(p); /* delay slot */ +} + +void bl_gen_jump_kernel(uint32_t **p, target_ulong sp, target_ulong a0, + target_ulong a1, target_ulong a2, target_ulong a3, + target_ulong kernel_addr) +{ + bl_gen_load_ulong(p, BL_REG_SP, sp); + bl_gen_load_ulong(p, BL_REG_A0, a0); + bl_gen_load_ulong(p, BL_REG_A1, a1); + bl_gen_load_ulong(p, BL_REG_A2, a2); + bl_gen_load_ulong(p, BL_REG_A3, a3); + + bl_gen_jump_to(p, kernel_addr); +} + +void bl_gen_write_ulong(uint32_t **p, target_ulong addr, target_ulong val) +{ + bl_gen_load_ulong(p, BL_REG_K0, val); + bl_gen_load_ulong(p, BL_REG_K1, addr); + if (bootcpu_supports_isa(ISA_MIPS3)) { + bl_gen_sd(p, BL_REG_K0, BL_REG_K1, 0x0); + } else { + bl_gen_sw(p, BL_REG_K0, BL_REG_K1, 0x0); + } +} + +void bl_gen_write_u32(uint32_t **p, target_ulong addr, uint32_t val) +{ + bl_gen_li(p, BL_REG_K0, val); + bl_gen_load_ulong(p, BL_REG_K1, addr); + bl_gen_sw(p, BL_REG_K0, BL_REG_K1, 0x0); +} + +void bl_gen_write_u64(uint32_t **p, target_ulong addr, uint64_t val) +{ + bl_gen_dli(p, BL_REG_K0, val); + bl_gen_load_ulong(p, BL_REG_K1, addr); + bl_gen_sd(p, BL_REG_K0, BL_REG_K1, 0x0); +} diff --git a/hw/mips/boston.c b/hw/mips/boston.c new file mode 100644 index 000000000..59ca08b93 --- /dev/null +++ b/hw/mips/boston.c @@ -0,0 +1,835 @@ +/* + * MIPS Boston development board emulation. + * + * Copyright (c) 2016 Imagination Technologies + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" + +#include "elf.h" +#include "hw/boards.h" +#include "hw/char/serial.h" +#include "hw/ide/pci.h" +#include "hw/ide/ahci.h" +#include "hw/loader.h" +#include "hw/loader-fit.h" +#include "hw/mips/bootloader.h" +#include "hw/mips/cps.h" +#include "hw/pci-host/xilinx-pcie.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "chardev/char.h" +#include "sysemu/device_tree.h" +#include "sysemu/sysemu.h" +#include "sysemu/qtest.h" +#include "sysemu/runstate.h" + +#include +#include "qom/object.h" + +#define TYPE_BOSTON "mips-boston" +typedef struct BostonState BostonState; +DECLARE_INSTANCE_CHECKER(BostonState, BOSTON, + TYPE_BOSTON) + +#define FDT_IRQ_TYPE_NONE 0 +#define FDT_IRQ_TYPE_LEVEL_HIGH 4 +#define FDT_GIC_SHARED 0 +#define FDT_GIC_LOCAL 1 +#define FDT_BOSTON_CLK_SYS 1 +#define FDT_BOSTON_CLK_CPU 2 +#define FDT_PCI_IRQ_MAP_PINS 4 +#define FDT_PCI_IRQ_MAP_DESCS 6 + +struct BostonState { + SysBusDevice parent_obj; + + MachineState *mach; + MIPSCPSState cps; + SerialMM *uart; + Clock *cpuclk; + + CharBackend lcd_display; + char lcd_content[8]; + bool lcd_inited; + + hwaddr kernel_entry; + hwaddr fdt_base; +}; + +enum { + BOSTON_LOWDDR, + BOSTON_PCIE0, + BOSTON_PCIE1, + BOSTON_PCIE2, + BOSTON_PCIE2_MMIO, + BOSTON_CM, + BOSTON_GIC, + BOSTON_CDMM, + BOSTON_CPC, + BOSTON_PLATREG, + BOSTON_UART, + BOSTON_LCD, + BOSTON_FLASH, + BOSTON_PCIE1_MMIO, + BOSTON_PCIE0_MMIO, + BOSTON_HIGHDDR, +}; + +static const MemMapEntry boston_memmap[] = { + [BOSTON_LOWDDR] = { 0x0, 0x10000000 }, + [BOSTON_PCIE0] = { 0x10000000, 0x2000000 }, + [BOSTON_PCIE1] = { 0x12000000, 0x2000000 }, + [BOSTON_PCIE2] = { 0x14000000, 0x2000000 }, + [BOSTON_PCIE2_MMIO] = { 0x16000000, 0x100000 }, + [BOSTON_CM] = { 0x16100000, 0x20000 }, + [BOSTON_GIC] = { 0x16120000, 0x20000 }, + [BOSTON_CDMM] = { 0x16140000, 0x8000 }, + [BOSTON_CPC] = { 0x16200000, 0x8000 }, + [BOSTON_PLATREG] = { 0x17ffd000, 0x1000 }, + [BOSTON_UART] = { 0x17ffe000, 0x20 }, + [BOSTON_LCD] = { 0x17fff000, 0x8 }, + [BOSTON_FLASH] = { 0x18000000, 0x8000000 }, + [BOSTON_PCIE1_MMIO] = { 0x20000000, 0x20000000 }, + [BOSTON_PCIE0_MMIO] = { 0x40000000, 0x40000000 }, + [BOSTON_HIGHDDR] = { 0x80000000, 0x0 }, +}; + +enum boston_plat_reg { + PLAT_FPGA_BUILD = 0x00, + PLAT_CORE_CL = 0x04, + PLAT_WRAPPER_CL = 0x08, + PLAT_SYSCLK_STATUS = 0x0c, + PLAT_SOFTRST_CTL = 0x10, +#define PLAT_SOFTRST_CTL_SYSRESET (1 << 4) + PLAT_DDR3_STATUS = 0x14, +#define PLAT_DDR3_STATUS_LOCKED (1 << 0) +#define PLAT_DDR3_STATUS_CALIBRATED (1 << 2) + PLAT_PCIE_STATUS = 0x18, +#define PLAT_PCIE_STATUS_PCIE0_LOCKED (1 << 0) +#define PLAT_PCIE_STATUS_PCIE1_LOCKED (1 << 8) +#define PLAT_PCIE_STATUS_PCIE2_LOCKED (1 << 16) + PLAT_FLASH_CTL = 0x1c, + PLAT_SPARE0 = 0x20, + PLAT_SPARE1 = 0x24, + PLAT_SPARE2 = 0x28, + PLAT_SPARE3 = 0x2c, + PLAT_MMCM_DIV = 0x30, +#define PLAT_MMCM_DIV_CLK0DIV_SHIFT 0 +#define PLAT_MMCM_DIV_INPUT_SHIFT 8 +#define PLAT_MMCM_DIV_MUL_SHIFT 16 +#define PLAT_MMCM_DIV_CLK1DIV_SHIFT 24 + PLAT_BUILD_CFG = 0x34, +#define PLAT_BUILD_CFG_IOCU_EN (1 << 0) +#define PLAT_BUILD_CFG_PCIE0_EN (1 << 1) +#define PLAT_BUILD_CFG_PCIE1_EN (1 << 2) +#define PLAT_BUILD_CFG_PCIE2_EN (1 << 3) + PLAT_DDR_CFG = 0x38, +#define PLAT_DDR_CFG_SIZE (0xf << 0) +#define PLAT_DDR_CFG_MHZ (0xfff << 4) + PLAT_NOC_PCIE0_ADDR = 0x3c, + PLAT_NOC_PCIE1_ADDR = 0x40, + PLAT_NOC_PCIE2_ADDR = 0x44, + PLAT_SYS_CTL = 0x48, +}; + +static void boston_lcd_event(void *opaque, QEMUChrEvent event) +{ + BostonState *s = opaque; + if (event == CHR_EVENT_OPENED && !s->lcd_inited) { + qemu_chr_fe_printf(&s->lcd_display, " "); + s->lcd_inited = true; + } +} + +static uint64_t boston_lcd_read(void *opaque, hwaddr addr, + unsigned size) +{ + BostonState *s = opaque; + uint64_t val = 0; + + switch (size) { + case 8: + val |= (uint64_t)s->lcd_content[(addr + 7) & 0x7] << 56; + val |= (uint64_t)s->lcd_content[(addr + 6) & 0x7] << 48; + val |= (uint64_t)s->lcd_content[(addr + 5) & 0x7] << 40; + val |= (uint64_t)s->lcd_content[(addr + 4) & 0x7] << 32; + /* fall through */ + case 4: + val |= (uint64_t)s->lcd_content[(addr + 3) & 0x7] << 24; + val |= (uint64_t)s->lcd_content[(addr + 2) & 0x7] << 16; + /* fall through */ + case 2: + val |= (uint64_t)s->lcd_content[(addr + 1) & 0x7] << 8; + /* fall through */ + case 1: + val |= (uint64_t)s->lcd_content[(addr + 0) & 0x7]; + break; + } + + return val; +} + +static void boston_lcd_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + BostonState *s = opaque; + + switch (size) { + case 8: + s->lcd_content[(addr + 7) & 0x7] = val >> 56; + s->lcd_content[(addr + 6) & 0x7] = val >> 48; + s->lcd_content[(addr + 5) & 0x7] = val >> 40; + s->lcd_content[(addr + 4) & 0x7] = val >> 32; + /* fall through */ + case 4: + s->lcd_content[(addr + 3) & 0x7] = val >> 24; + s->lcd_content[(addr + 2) & 0x7] = val >> 16; + /* fall through */ + case 2: + s->lcd_content[(addr + 1) & 0x7] = val >> 8; + /* fall through */ + case 1: + s->lcd_content[(addr + 0) & 0x7] = val; + break; + } + + qemu_chr_fe_printf(&s->lcd_display, + "\r%-8.8s", s->lcd_content); +} + +static const MemoryRegionOps boston_lcd_ops = { + .read = boston_lcd_read, + .write = boston_lcd_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static uint64_t boston_platreg_read(void *opaque, hwaddr addr, + unsigned size) +{ + BostonState *s = opaque; + uint32_t gic_freq, val; + + if (size != 4) { + qemu_log_mask(LOG_UNIMP, "%uB platform register read\n", size); + return 0; + } + + switch (addr & 0xffff) { + case PLAT_FPGA_BUILD: + case PLAT_CORE_CL: + case PLAT_WRAPPER_CL: + return 0; + case PLAT_DDR3_STATUS: + return PLAT_DDR3_STATUS_LOCKED | PLAT_DDR3_STATUS_CALIBRATED; + case PLAT_MMCM_DIV: + gic_freq = mips_gictimer_get_freq(s->cps.gic.gic_timer) / 1000000; + val = gic_freq << PLAT_MMCM_DIV_INPUT_SHIFT; + val |= 1 << PLAT_MMCM_DIV_MUL_SHIFT; + val |= 1 << PLAT_MMCM_DIV_CLK0DIV_SHIFT; + val |= 1 << PLAT_MMCM_DIV_CLK1DIV_SHIFT; + return val; + case PLAT_BUILD_CFG: + val = PLAT_BUILD_CFG_PCIE0_EN; + val |= PLAT_BUILD_CFG_PCIE1_EN; + val |= PLAT_BUILD_CFG_PCIE2_EN; + return val; + case PLAT_DDR_CFG: + val = s->mach->ram_size / GiB; + assert(!(val & ~PLAT_DDR_CFG_SIZE)); + val |= PLAT_DDR_CFG_MHZ; + return val; + default: + qemu_log_mask(LOG_UNIMP, "Read platform register 0x%" HWADDR_PRIx "\n", + addr & 0xffff); + return 0; + } +} + +static void boston_platreg_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + if (size != 4) { + qemu_log_mask(LOG_UNIMP, "%uB platform register write\n", size); + return; + } + + switch (addr & 0xffff) { + case PLAT_FPGA_BUILD: + case PLAT_CORE_CL: + case PLAT_WRAPPER_CL: + case PLAT_DDR3_STATUS: + case PLAT_PCIE_STATUS: + case PLAT_MMCM_DIV: + case PLAT_BUILD_CFG: + case PLAT_DDR_CFG: + /* read only */ + break; + case PLAT_SOFTRST_CTL: + if (val & PLAT_SOFTRST_CTL_SYSRESET) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + default: + qemu_log_mask(LOG_UNIMP, "Write platform register 0x%" HWADDR_PRIx + " = 0x%" PRIx64 "\n", addr & 0xffff, val); + break; + } +} + +static const MemoryRegionOps boston_platreg_ops = { + .read = boston_platreg_read, + .write = boston_platreg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void mips_boston_instance_init(Object *obj) +{ + BostonState *s = BOSTON(obj); + + s->cpuclk = qdev_init_clock_out(DEVICE(obj), "cpu-refclk"); + clock_set_hz(s->cpuclk, 1000000000); /* 1 GHz */ +} + +static const TypeInfo boston_device = { + .name = TYPE_BOSTON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BostonState), + .instance_init = mips_boston_instance_init, +}; + +static void boston_register_types(void) +{ + type_register_static(&boston_device); +} +type_init(boston_register_types) + +static void gen_firmware(uint32_t *p, hwaddr kernel_entry, hwaddr fdt_addr) +{ + uint64_t regaddr; + + /* Move CM GCRs */ + regaddr = cpu_mips_phys_to_kseg1(NULL, GCR_BASE_ADDR + GCR_BASE_OFS), + bl_gen_write_ulong(&p, regaddr, + boston_memmap[BOSTON_CM].base); + + /* Move & enable GIC GCRs */ + regaddr = cpu_mips_phys_to_kseg1(NULL, boston_memmap[BOSTON_CM].base + + GCR_GIC_BASE_OFS), + bl_gen_write_ulong(&p, regaddr, + boston_memmap[BOSTON_GIC].base | GCR_GIC_BASE_GICEN_MSK); + + /* Move & enable CPC GCRs */ + regaddr = cpu_mips_phys_to_kseg1(NULL, boston_memmap[BOSTON_CM].base + + GCR_CPC_BASE_OFS), + bl_gen_write_ulong(&p, regaddr, + boston_memmap[BOSTON_CPC].base | GCR_CPC_BASE_CPCEN_MSK); + + /* + * Setup argument registers to follow the UHI boot protocol: + * + * a0/$4 = -2 + * a1/$5 = virtual address of FDT + * a2/$6 = 0 + * a3/$7 = 0 + */ + bl_gen_jump_kernel(&p, 0, (int32_t)-2, fdt_addr, 0, 0, kernel_entry); +} + +static const void *boston_fdt_filter(void *opaque, const void *fdt_orig, + const void *match_data, hwaddr *load_addr) +{ + BostonState *s = BOSTON(opaque); + MachineState *machine = s->mach; + const char *cmdline; + int err; + size_t ram_low_sz, ram_high_sz; + size_t fdt_sz = fdt_totalsize(fdt_orig) * 2; + g_autofree void *fdt = g_malloc0(fdt_sz); + + err = fdt_open_into(fdt_orig, fdt, fdt_sz); + if (err) { + fprintf(stderr, "unable to open FDT\n"); + return NULL; + } + + cmdline = (machine->kernel_cmdline && machine->kernel_cmdline[0]) + ? machine->kernel_cmdline : " "; + err = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); + if (err < 0) { + fprintf(stderr, "couldn't set /chosen/bootargs\n"); + return NULL; + } + + ram_low_sz = MIN(256 * MiB, machine->ram_size); + ram_high_sz = machine->ram_size - ram_low_sz; + qemu_fdt_setprop_sized_cells(fdt, "/memory@0", "reg", + 1, boston_memmap[BOSTON_LOWDDR].base, 1, ram_low_sz, + 1, boston_memmap[BOSTON_HIGHDDR].base + ram_low_sz, + 1, ram_high_sz); + + fdt = g_realloc(fdt, fdt_totalsize(fdt)); + qemu_fdt_dumpdtb(fdt, fdt_sz); + + s->fdt_base = *load_addr; + + return g_steal_pointer(&fdt); +} + +static const void *boston_kernel_filter(void *opaque, const void *kernel, + hwaddr *load_addr, hwaddr *entry_addr) +{ + BostonState *s = BOSTON(opaque); + + s->kernel_entry = *entry_addr; + + return kernel; +} + +static const struct fit_loader_match boston_matches[] = { + { "img,boston" }, + { NULL }, +}; + +static const struct fit_loader boston_fit_loader = { + .matches = boston_matches, + .addr_to_phys = cpu_mips_kseg0_to_phys, + .fdt_filter = boston_fdt_filter, + .kernel_filter = boston_kernel_filter, +}; + +static inline XilinxPCIEHost * +xilinx_pcie_init(MemoryRegion *sys_mem, uint32_t bus_nr, + hwaddr cfg_base, uint64_t cfg_size, + hwaddr mmio_base, uint64_t mmio_size, + qemu_irq irq, bool link_up) +{ + DeviceState *dev; + MemoryRegion *cfg, *mmio; + + dev = qdev_new(TYPE_XILINX_PCIE_HOST); + + qdev_prop_set_uint32(dev, "bus_nr", bus_nr); + qdev_prop_set_uint64(dev, "cfg_base", cfg_base); + qdev_prop_set_uint64(dev, "cfg_size", cfg_size); + qdev_prop_set_uint64(dev, "mmio_base", mmio_base); + qdev_prop_set_uint64(dev, "mmio_size", mmio_size); + qdev_prop_set_bit(dev, "link_up", link_up); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + cfg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion_overlap(sys_mem, cfg_base, cfg, 0); + + mmio = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_add_subregion_overlap(sys_mem, 0, mmio, 0); + + qdev_connect_gpio_out_named(dev, "interrupt_out", 0, irq); + + return XILINX_PCIE_HOST(dev); +} + + +static void fdt_create_pcie(void *fdt, int gic_ph, int irq, hwaddr reg_base, + hwaddr reg_size, hwaddr mmio_base, hwaddr mmio_size) +{ + int i; + char *name, *intc_name; + uint32_t intc_ph; + uint32_t interrupt_map[FDT_PCI_IRQ_MAP_PINS][FDT_PCI_IRQ_MAP_DESCS]; + + intc_ph = qemu_fdt_alloc_phandle(fdt); + name = g_strdup_printf("/soc/pci@%" HWADDR_PRIx, reg_base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", + "xlnx,axi-pcie-host-1.00.a"); + qemu_fdt_setprop_string(fdt, name, "device_type", "pci"); + qemu_fdt_setprop_cells(fdt, name, "reg", reg_base, reg_size); + + qemu_fdt_setprop_cell(fdt, name, "#address-cells", 3); + qemu_fdt_setprop_cell(fdt, name, "#size-cells", 2); + qemu_fdt_setprop_cell(fdt, name, "#interrupt-cells", 1); + + qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", gic_ph); + qemu_fdt_setprop_cells(fdt, name, "interrupts", FDT_GIC_SHARED, irq, + FDT_IRQ_TYPE_LEVEL_HIGH); + + qemu_fdt_setprop_cells(fdt, name, "ranges", 0x02000000, 0, mmio_base, + mmio_base, 0, mmio_size); + qemu_fdt_setprop_cells(fdt, name, "bus-range", 0x00, 0xff); + + + + intc_name = g_strdup_printf("%s/interrupt-controller", name); + qemu_fdt_add_subnode(fdt, intc_name); + qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, intc_name, "#address-cells", 0); + qemu_fdt_setprop_cell(fdt, intc_name, "#interrupt-cells", 1); + qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_ph); + + qemu_fdt_setprop_cells(fdt, name, "interrupt-map-mask", 0, 0, 0, 7); + for (i = 0; i < FDT_PCI_IRQ_MAP_PINS; i++) { + uint32_t *irqmap = interrupt_map[i]; + + irqmap[0] = cpu_to_be32(0); + irqmap[1] = cpu_to_be32(0); + irqmap[2] = cpu_to_be32(0); + irqmap[3] = cpu_to_be32(i + 1); + irqmap[4] = cpu_to_be32(intc_ph); + irqmap[5] = cpu_to_be32(i + 1); + } + qemu_fdt_setprop(fdt, name, "interrupt-map", + &interrupt_map, sizeof(interrupt_map)); + + g_free(intc_name); + g_free(name); +} + +static const void *create_fdt(BostonState *s, + const MemMapEntry *memmap, int *dt_size) +{ + void *fdt; + int cpu; + MachineState *mc = s->mach; + uint32_t platreg_ph, gic_ph, clk_ph; + char *name, *gic_name, *platreg_name, *stdout_name; + static const char * const syscon_compat[2] = { + "img,boston-platform-regs", "syscon" + }; + + fdt = create_device_tree(dt_size); + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + platreg_ph = qemu_fdt_alloc_phandle(fdt); + gic_ph = qemu_fdt_alloc_phandle(fdt); + clk_ph = qemu_fdt_alloc_phandle(fdt); + + qemu_fdt_setprop_string(fdt, "/", "model", "img,boston"); + qemu_fdt_setprop_string(fdt, "/", "compatible", "img,boston"); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x1); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x1); + + + qemu_fdt_add_subnode(fdt, "/cpus"); + qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); + qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); + + for (cpu = 0; cpu < mc->smp.cpus; cpu++) { + name = g_strdup_printf("/cpus/cpu@%d", cpu); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "img,mips"); + qemu_fdt_setprop_string(fdt, name, "status", "okay"); + qemu_fdt_setprop_cell(fdt, name, "reg", cpu); + qemu_fdt_setprop_string(fdt, name, "device_type", "cpu"); + qemu_fdt_setprop_cells(fdt, name, "clocks", clk_ph, FDT_BOSTON_CLK_CPU); + g_free(name); + } + + qemu_fdt_add_subnode(fdt, "/soc"); + qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0); + qemu_fdt_setprop_string(fdt, "/soc", "compatible", "simple-bus"); + qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x1); + qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x1); + + fdt_create_pcie(fdt, gic_ph, 2, + memmap[BOSTON_PCIE0].base, memmap[BOSTON_PCIE0].size, + memmap[BOSTON_PCIE0_MMIO].base, memmap[BOSTON_PCIE0_MMIO].size); + + fdt_create_pcie(fdt, gic_ph, 1, + memmap[BOSTON_PCIE1].base, memmap[BOSTON_PCIE1].size, + memmap[BOSTON_PCIE1_MMIO].base, memmap[BOSTON_PCIE1_MMIO].size); + + fdt_create_pcie(fdt, gic_ph, 0, + memmap[BOSTON_PCIE2].base, memmap[BOSTON_PCIE2].size, + memmap[BOSTON_PCIE2_MMIO].base, memmap[BOSTON_PCIE2_MMIO].size); + + /* GIC with it's timer node */ + gic_name = g_strdup_printf("/soc/interrupt-controller@%" HWADDR_PRIx, + memmap[BOSTON_GIC].base); + qemu_fdt_add_subnode(fdt, gic_name); + qemu_fdt_setprop_string(fdt, gic_name, "compatible", "mti,gic"); + qemu_fdt_setprop_cells(fdt, gic_name, "reg", memmap[BOSTON_GIC].base, + memmap[BOSTON_GIC].size); + qemu_fdt_setprop(fdt, gic_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, gic_name, "#interrupt-cells", 3); + qemu_fdt_setprop_cell(fdt, gic_name, "phandle", gic_ph); + + name = g_strdup_printf("%s/timer", gic_name); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "mti,gic-timer"); + qemu_fdt_setprop_cells(fdt, name, "interrupts", FDT_GIC_LOCAL, 1, + FDT_IRQ_TYPE_NONE); + qemu_fdt_setprop_cells(fdt, name, "clocks", clk_ph, FDT_BOSTON_CLK_CPU); + g_free(name); + g_free(gic_name); + + /* CDMM node */ + name = g_strdup_printf("/soc/cdmm@%" HWADDR_PRIx, memmap[BOSTON_CDMM].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "mti,mips-cdmm"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_CDMM].base, + memmap[BOSTON_CDMM].size); + g_free(name); + + /* CPC node */ + name = g_strdup_printf("/soc/cpc@%" HWADDR_PRIx, memmap[BOSTON_CPC].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "mti,mips-cpc"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_CPC].base, + memmap[BOSTON_CPC].size); + g_free(name); + + /* platreg and it's clk node */ + platreg_name = g_strdup_printf("/soc/system-controller@%" HWADDR_PRIx, + memmap[BOSTON_PLATREG].base); + qemu_fdt_add_subnode(fdt, platreg_name); + qemu_fdt_setprop_string_array(fdt, platreg_name, "compatible", + (char **)&syscon_compat, + ARRAY_SIZE(syscon_compat)); + qemu_fdt_setprop_cells(fdt, platreg_name, "reg", + memmap[BOSTON_PLATREG].base, + memmap[BOSTON_PLATREG].size); + qemu_fdt_setprop_cell(fdt, platreg_name, "phandle", platreg_ph); + + name = g_strdup_printf("%s/clock", platreg_name); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "img,boston-clock"); + qemu_fdt_setprop_cell(fdt, name, "#clock-cells", 1); + qemu_fdt_setprop_cell(fdt, name, "phandle", clk_ph); + g_free(name); + g_free(platreg_name); + + /* reboot node */ + name = g_strdup_printf("/soc/reboot"); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "syscon-reboot"); + qemu_fdt_setprop_cell(fdt, name, "regmap", platreg_ph); + qemu_fdt_setprop_cell(fdt, name, "offset", 0x10); + qemu_fdt_setprop_cell(fdt, name, "mask", 0x10); + g_free(name); + + /* uart node */ + name = g_strdup_printf("/soc/uart@%" HWADDR_PRIx, memmap[BOSTON_UART].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "ns16550a"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_UART].base, + memmap[BOSTON_UART].size); + qemu_fdt_setprop_cell(fdt, name, "reg-shift", 0x2); + qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", gic_ph); + qemu_fdt_setprop_cells(fdt, name, "interrupts", FDT_GIC_SHARED, 3, + FDT_IRQ_TYPE_LEVEL_HIGH); + qemu_fdt_setprop_cells(fdt, name, "clocks", clk_ph, FDT_BOSTON_CLK_SYS); + + qemu_fdt_add_subnode(fdt, "/chosen"); + stdout_name = g_strdup_printf("%s:115200", name); + qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", stdout_name); + g_free(stdout_name); + g_free(name); + + /* lcd node */ + name = g_strdup_printf("/soc/lcd@%" HWADDR_PRIx, memmap[BOSTON_LCD].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "img,boston-lcd"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_LCD].base, + memmap[BOSTON_LCD].size); + g_free(name); + + name = g_strdup_printf("/memory@0"); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "device_type", "memory"); + g_free(name); + + return fdt; +} + +static void boston_mach_init(MachineState *machine) +{ + DeviceState *dev; + BostonState *s; + MemoryRegion *flash, *ddr_low_alias, *lcd, *platreg; + MemoryRegion *sys_mem = get_system_memory(); + XilinxPCIEHost *pcie2; + PCIDevice *ahci; + DriveInfo *hd[6]; + Chardev *chr; + int fw_size, fit_err; + + if ((machine->ram_size % GiB) || + (machine->ram_size > (2 * GiB))) { + error_report("Memory size must be 1GB or 2GB"); + exit(1); + } + + dev = qdev_new(TYPE_BOSTON); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + s = BOSTON(dev); + s->mach = machine; + + if (!cpu_type_supports_cps_smp(machine->cpu_type)) { + error_report("Boston requires CPUs which support CPS"); + exit(1); + } + + object_initialize_child(OBJECT(machine), "cps", &s->cps, TYPE_MIPS_CPS); + object_property_set_str(OBJECT(&s->cps), "cpu-type", machine->cpu_type, + &error_fatal); + object_property_set_int(OBJECT(&s->cps), "num-vp", machine->smp.cpus, + &error_fatal); + qdev_connect_clock_in(DEVICE(&s->cps), "clk-in", + qdev_get_clock_out(dev, "cpu-refclk")); + sysbus_realize(SYS_BUS_DEVICE(&s->cps), &error_fatal); + + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->cps), 0, 0, 1); + + flash = g_new(MemoryRegion, 1); + memory_region_init_rom(flash, NULL, "boston.flash", + boston_memmap[BOSTON_FLASH].size, &error_fatal); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_FLASH].base, + flash, 0); + + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_HIGHDDR].base, + machine->ram, 0); + + ddr_low_alias = g_new(MemoryRegion, 1); + memory_region_init_alias(ddr_low_alias, NULL, "boston_low.ddr", + machine->ram, 0, + MIN(machine->ram_size, (256 * MiB))); + memory_region_add_subregion_overlap(sys_mem, 0, ddr_low_alias, 0); + + xilinx_pcie_init(sys_mem, 0, + boston_memmap[BOSTON_PCIE0].base, + boston_memmap[BOSTON_PCIE0].size, + boston_memmap[BOSTON_PCIE0_MMIO].base, + boston_memmap[BOSTON_PCIE0_MMIO].size, + get_cps_irq(&s->cps, 2), false); + + xilinx_pcie_init(sys_mem, 1, + boston_memmap[BOSTON_PCIE1].base, + boston_memmap[BOSTON_PCIE1].size, + boston_memmap[BOSTON_PCIE1_MMIO].base, + boston_memmap[BOSTON_PCIE1_MMIO].size, + get_cps_irq(&s->cps, 1), false); + + pcie2 = xilinx_pcie_init(sys_mem, 2, + boston_memmap[BOSTON_PCIE2].base, + boston_memmap[BOSTON_PCIE2].size, + boston_memmap[BOSTON_PCIE2_MMIO].base, + boston_memmap[BOSTON_PCIE2_MMIO].size, + get_cps_irq(&s->cps, 0), true); + + platreg = g_new(MemoryRegion, 1); + memory_region_init_io(platreg, NULL, &boston_platreg_ops, s, + "boston-platregs", + boston_memmap[BOSTON_PLATREG].size); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_PLATREG].base, platreg, 0); + + s->uart = serial_mm_init(sys_mem, boston_memmap[BOSTON_UART].base, 2, + get_cps_irq(&s->cps, 3), 10000000, + serial_hd(0), DEVICE_NATIVE_ENDIAN); + + lcd = g_new(MemoryRegion, 1); + memory_region_init_io(lcd, NULL, &boston_lcd_ops, s, "boston-lcd", 0x8); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_LCD].base, lcd, 0); + + chr = qemu_chr_new("lcd", "vc:320x240", NULL); + qemu_chr_fe_init(&s->lcd_display, chr, NULL); + qemu_chr_fe_set_handlers(&s->lcd_display, NULL, NULL, + boston_lcd_event, NULL, s, NULL, true); + + ahci = pci_create_simple_multifunction(&PCI_BRIDGE(&pcie2->root)->sec_bus, + PCI_DEVFN(0, 0), + true, TYPE_ICH9_AHCI); + g_assert(ARRAY_SIZE(hd) == ahci_get_num_ports(ahci)); + ide_drive_get(hd, ahci_get_num_ports(ahci)); + ahci_ide_create_devs(ahci, hd); + + if (machine->firmware) { + fw_size = load_image_targphys(machine->firmware, + 0x1fc00000, 4 * MiB); + if (fw_size == -1) { + error_report("unable to load firmware image '%s'", + machine->firmware); + exit(1); + } + } else if (machine->kernel_filename) { + uint64_t kernel_entry, kernel_high; + ssize_t kernel_size; + + kernel_size = load_elf(machine->kernel_filename, NULL, + cpu_mips_kseg0_to_phys, NULL, + &kernel_entry, NULL, &kernel_high, + NULL, 0, EM_MIPS, 1, 0); + + if (kernel_size > 0) { + int dt_size; + g_autofree const void *dtb_file_data, *dtb_load_data; + hwaddr dtb_paddr = QEMU_ALIGN_UP(kernel_high, 64 * KiB); + hwaddr dtb_vaddr = cpu_mips_phys_to_kseg0(NULL, dtb_paddr); + + s->kernel_entry = kernel_entry; + if (machine->dtb) { + dtb_file_data = load_device_tree(machine->dtb, &dt_size); + } else { + dtb_file_data = create_fdt(s, boston_memmap, &dt_size); + } + + dtb_load_data = boston_fdt_filter(s, dtb_file_data, + NULL, &dtb_vaddr); + + /* Calculate real fdt size after filter */ + dt_size = fdt_totalsize(dtb_load_data); + rom_add_blob_fixed("dtb", dtb_load_data, dt_size, dtb_paddr); + } else { + /* Try to load file as FIT */ + fit_err = load_fit(&boston_fit_loader, machine->kernel_filename, s); + if (fit_err) { + error_report("unable to load kernel image"); + exit(1); + } + } + + gen_firmware(memory_region_get_ram_ptr(flash) + 0x7c00000, + s->kernel_entry, s->fdt_base); + } else if (!qtest_enabled()) { + error_report("Please provide either a -kernel or -bios argument"); + exit(1); + } +} + +static void boston_mach_class_init(MachineClass *mc) +{ + mc->desc = "MIPS Boston"; + mc->init = boston_mach_init; + mc->block_default_type = IF_IDE; + mc->default_ram_size = 1 * GiB; + mc->default_ram_id = "boston.ddr"; + mc->max_cpus = 16; + mc->default_cpu_type = MIPS_CPU_TYPE_NAME("I6400"); +} + +DEFINE_MACHINE("boston", boston_mach_class_init) diff --git a/hw/mips/cps.c b/hw/mips/cps.c new file mode 100644 index 000000000..2b436700c --- /dev/null +++ b/hw/mips/cps.c @@ -0,0 +1,210 @@ +/* + * Coherent Processing System emulation. + * + * Copyright (c) 2016 Imagination Technologies + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/mips/cps.h" +#include "hw/mips/mips.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/mips/cpudevs.h" +#include "sysemu/kvm.h" +#include "sysemu/reset.h" + +qemu_irq get_cps_irq(MIPSCPSState *s, int pin_number) +{ + assert(pin_number < s->num_irq); + return s->gic.irq_state[pin_number].irq; +} + +static void mips_cps_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + MIPSCPSState *s = MIPS_CPS(obj); + + s->clock = qdev_init_clock_in(DEVICE(obj), "clk-in", NULL, NULL, 0); + /* + * Cover entire address space as there do not seem to be any + * constraints for the base address of CPC and GIC. + */ + memory_region_init(&s->container, obj, "mips-cps-container", UINT64_MAX); + sysbus_init_mmio(sbd, &s->container); +} + +static void main_cpu_reset(void *opaque) +{ + MIPSCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + + cpu_reset(cs); +} + +static bool cpu_mips_itu_supported(CPUMIPSState *env) +{ + bool is_mt = (env->CP0_Config5 & (1 << CP0C5_VP)) || ase_mt_available(env); + + return is_mt && !kvm_enabled(); +} + +static void mips_cps_realize(DeviceState *dev, Error **errp) +{ + MIPSCPSState *s = MIPS_CPS(dev); + CPUMIPSState *env; + MIPSCPU *cpu; + int i; + target_ulong gcr_base; + bool itu_present = false; + bool saar_present = false; + + if (!clock_get(s->clock)) { + error_setg(errp, "CPS input clock is not connected to an output clock"); + return; + } + + for (i = 0; i < s->num_vp; i++) { + cpu = MIPS_CPU(object_new(s->cpu_type)); + + /* All VPs are halted on reset. Leave powering up to CPC. */ + if (!object_property_set_bool(OBJECT(cpu), "start-powered-off", true, + errp)) { + return; + } + /* All cores use the same clock tree */ + qdev_connect_clock_in(DEVICE(cpu), "clk-in", s->clock); + + if (!qdev_realize_and_unref(DEVICE(cpu), NULL, errp)) { + return; + } + + /* Init internal devices */ + cpu_mips_irq_init_cpu(cpu); + cpu_mips_clock_init(cpu); + + env = &cpu->env; + if (cpu_mips_itu_supported(env)) { + itu_present = true; + /* Attach ITC Tag to the VP */ + env->itc_tag = mips_itu_get_tag_region(&s->itu); + env->itu = &s->itu; + } + qemu_register_reset(main_cpu_reset, cpu); + } + + cpu = MIPS_CPU(first_cpu); + env = &cpu->env; + saar_present = (bool)env->saarp; + + /* Inter-Thread Communication Unit */ + if (itu_present) { + object_initialize_child(OBJECT(dev), "itu", &s->itu, TYPE_MIPS_ITU); + object_property_set_int(OBJECT(&s->itu), "num-fifo", 16, + &error_abort); + object_property_set_int(OBJECT(&s->itu), "num-semaphores", 16, + &error_abort); + object_property_set_bool(OBJECT(&s->itu), "saar-present", saar_present, + &error_abort); + if (saar_present) { + s->itu.saar = &env->CP0_SAAR; + } + if (!sysbus_realize(SYS_BUS_DEVICE(&s->itu), errp)) { + return; + } + + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->itu), 0)); + } + + /* Cluster Power Controller */ + object_initialize_child(OBJECT(dev), "cpc", &s->cpc, TYPE_MIPS_CPC); + object_property_set_int(OBJECT(&s->cpc), "num-vp", s->num_vp, + &error_abort); + object_property_set_int(OBJECT(&s->cpc), "vp-start-running", 1, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->cpc), errp)) { + return; + } + + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->cpc), 0)); + + /* Global Interrupt Controller */ + object_initialize_child(OBJECT(dev), "gic", &s->gic, TYPE_MIPS_GIC); + object_property_set_int(OBJECT(&s->gic), "num-vp", s->num_vp, + &error_abort); + object_property_set_int(OBJECT(&s->gic), "num-irq", 128, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gic), errp)) { + return; + } + + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gic), 0)); + + /* Global Configuration Registers */ + gcr_base = env->CP0_CMGCRBase << 4; + + object_initialize_child(OBJECT(dev), "gcr", &s->gcr, TYPE_MIPS_GCR); + object_property_set_int(OBJECT(&s->gcr), "num-vp", s->num_vp, + &error_abort); + object_property_set_int(OBJECT(&s->gcr), "gcr-rev", 0x800, + &error_abort); + object_property_set_int(OBJECT(&s->gcr), "gcr-base", gcr_base, + &error_abort); + object_property_set_link(OBJECT(&s->gcr), "gic", OBJECT(&s->gic.mr), + &error_abort); + object_property_set_link(OBJECT(&s->gcr), "cpc", OBJECT(&s->cpc.mr), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gcr), errp)) { + return; + } + + memory_region_add_subregion(&s->container, gcr_base, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gcr), 0)); +} + +static Property mips_cps_properties[] = { + DEFINE_PROP_UINT32("num-vp", MIPSCPSState, num_vp, 1), + DEFINE_PROP_UINT32("num-irq", MIPSCPSState, num_irq, 256), + DEFINE_PROP_STRING("cpu-type", MIPSCPSState, cpu_type), + DEFINE_PROP_END_OF_LIST() +}; + +static void mips_cps_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mips_cps_realize; + device_class_set_props(dc, mips_cps_properties); +} + +static const TypeInfo mips_cps_info = { + .name = TYPE_MIPS_CPS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MIPSCPSState), + .instance_init = mips_cps_init, + .class_init = mips_cps_class_init, +}; + +static void mips_cps_register_types(void) +{ + type_register_static(&mips_cps_info); +} + +type_init(mips_cps_register_types) diff --git a/hw/mips/fuloong2e.c b/hw/mips/fuloong2e.c new file mode 100644 index 000000000..c1b8066a1 --- /dev/null +++ b/hw/mips/fuloong2e.c @@ -0,0 +1,350 @@ +/* + * QEMU fuloong 2e mini pc support + * + * Copyright (c) 2008 yajin (yajin@vm-kernel.org) + * Copyright (c) 2009 chenming (chenming@rdc.faw.com.cn) + * Copyright (c) 2010 Huacai Chen (zltjiangshi@gmail.com) + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +/* + * Fuloong 2e mini pc is based on ICT/ST Loongson 2e CPU (MIPS III like, 800MHz) + * https://www.linux-mips.org/wiki/Fuloong_2E + * + * Loongson 2e manuals: + * https://github.com/loongson-community/docs/tree/master/2E + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/clock.h" +#include "net/net.h" +#include "hw/boards.h" +#include "hw/i2c/smbus_eeprom.h" +#include "hw/block/flash.h" +#include "hw/mips/mips.h" +#include "hw/mips/bootloader.h" +#include "hw/mips/cpudevs.h" +#include "hw/pci/pci.h" +#include "hw/loader.h" +#include "hw/ide/pci.h" +#include "hw/qdev-properties.h" +#include "elf.h" +#include "hw/isa/vt82c686.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" +#include "sysemu/sysemu.h" +#include "qemu/error-report.h" + +#define ENVP_PADDR 0x2000 +#define ENVP_VADDR cpu_mips_phys_to_kseg0(NULL, ENVP_PADDR) +#define ENVP_NB_ENTRIES 16 +#define ENVP_ENTRY_SIZE 256 + +/* Fuloong 2e has a 512k flash: Winbond W39L040AP70Z */ +#define BIOS_SIZE (512 * KiB) +#define MAX_IDE_BUS 2 + +/* + * PMON is not part of qemu and released with BSD license, anyone + * who want to build a pmon binary please first git-clone the source + * from the git repository at: + * https://github.com/loongson-community/pmon + */ +#define FULOONG_BIOSNAME "pmon_2e.bin" + +/* PCI SLOT in Fuloong 2e */ +#define FULOONG2E_VIA_SLOT 5 +#define FULOONG2E_ATI_SLOT 6 +#define FULOONG2E_RTL8139_SLOT 7 + +static struct _loaderparams { + int ram_size; + const char *kernel_filename; + const char *kernel_cmdline; + const char *initrd_filename; +} loaderparams; + +static void GCC_FMT_ATTR(3, 4) prom_set(uint32_t *prom_buf, int index, + const char *string, ...) +{ + va_list ap; + int32_t table_addr; + + if (index >= ENVP_NB_ENTRIES) { + return; + } + + if (string == NULL) { + prom_buf[index] = 0; + return; + } + + table_addr = sizeof(int32_t) * ENVP_NB_ENTRIES + index * ENVP_ENTRY_SIZE; + prom_buf[index] = tswap32(ENVP_VADDR + table_addr); + + va_start(ap, string); + vsnprintf((char *)prom_buf + table_addr, ENVP_ENTRY_SIZE, string, ap); + va_end(ap); +} + +static uint64_t load_kernel(MIPSCPU *cpu) +{ + uint64_t kernel_entry, kernel_high, initrd_size; + int index = 0; + long kernel_size; + ram_addr_t initrd_offset; + uint32_t *prom_buf; + long prom_size; + + kernel_size = load_elf(loaderparams.kernel_filename, NULL, + cpu_mips_kseg0_to_phys, NULL, + &kernel_entry, NULL, + &kernel_high, NULL, + 0, EM_MIPS, 1, 0); + if (kernel_size < 0) { + error_report("could not load kernel '%s': %s", + loaderparams.kernel_filename, + load_elf_strerror(kernel_size)); + exit(1); + } + + /* load initrd */ + initrd_size = 0; + initrd_offset = 0; + if (loaderparams.initrd_filename) { + initrd_size = get_image_size(loaderparams.initrd_filename); + if (initrd_size > 0) { + initrd_offset = ROUND_UP(kernel_high, INITRD_PAGE_SIZE); + if (initrd_offset + initrd_size > loaderparams.ram_size) { + error_report("memory too small for initial ram disk '%s'", + loaderparams.initrd_filename); + exit(1); + } + initrd_size = load_image_targphys(loaderparams.initrd_filename, + initrd_offset, + loaderparams.ram_size - initrd_offset); + } + if (initrd_size == (target_ulong) -1) { + error_report("could not load initial ram disk '%s'", + loaderparams.initrd_filename); + exit(1); + } + } + + /* Setup prom parameters. */ + prom_size = ENVP_NB_ENTRIES * (sizeof(int32_t) + ENVP_ENTRY_SIZE); + prom_buf = g_malloc(prom_size); + + prom_set(prom_buf, index++, "%s", loaderparams.kernel_filename); + if (initrd_size > 0) { + prom_set(prom_buf, index++, + "rd_start=0x%" PRIx64 " rd_size=%" PRId64 " %s", + cpu_mips_phys_to_kseg0(NULL, initrd_offset), + initrd_size, loaderparams.kernel_cmdline); + } else { + prom_set(prom_buf, index++, "%s", loaderparams.kernel_cmdline); + } + + /* Setup minimum environment variables */ + prom_set(prom_buf, index++, "busclock=33000000"); + prom_set(prom_buf, index++, "cpuclock=%u", clock_get_hz(cpu->clock)); + prom_set(prom_buf, index++, "memsize=%"PRIi64, loaderparams.ram_size / MiB); + prom_set(prom_buf, index++, NULL); + + rom_add_blob_fixed("prom", prom_buf, prom_size, ENVP_PADDR); + + g_free(prom_buf); + return kernel_entry; +} + +static void write_bootloader(CPUMIPSState *env, uint8_t *base, + uint64_t kernel_addr) +{ + uint32_t *p; + + /* Small bootloader */ + p = (uint32_t *)base; + + /* j 0x1fc00040 */ + stl_p(p++, 0x0bf00010); + /* nop */ + stl_p(p++, 0x00000000); + + /* Second part of the bootloader */ + p = (uint32_t *)(base + 0x040); + + bl_gen_jump_kernel(&p, ENVP_VADDR - 64, 2, ENVP_VADDR, ENVP_VADDR + 8, + loaderparams.ram_size, kernel_addr); +} + +static void main_cpu_reset(void *opaque) +{ + MIPSCPU *cpu = opaque; + CPUMIPSState *env = &cpu->env; + + cpu_reset(CPU(cpu)); + /* TODO: 2E reset stuff */ + if (loaderparams.kernel_filename) { + env->CP0_Status &= ~((1 << CP0St_BEV) | (1 << CP0St_ERL)); + } +} + +static void vt82c686b_southbridge_init(PCIBus *pci_bus, int slot, qemu_irq intc, + I2CBus **i2c_bus) +{ + PCIDevice *dev; + + dev = pci_create_simple_multifunction(pci_bus, PCI_DEVFN(slot, 0), true, + TYPE_VT82C686B_ISA); + qdev_connect_gpio_out(DEVICE(dev), 0, intc); + + dev = pci_create_simple(pci_bus, PCI_DEVFN(slot, 1), "via-ide"); + pci_ide_create_devs(dev); + + pci_create_simple(pci_bus, PCI_DEVFN(slot, 2), "vt82c686b-usb-uhci"); + pci_create_simple(pci_bus, PCI_DEVFN(slot, 3), "vt82c686b-usb-uhci"); + + dev = pci_create_simple(pci_bus, PCI_DEVFN(slot, 4), TYPE_VT82C686B_PM); + *i2c_bus = I2C_BUS(qdev_get_child_bus(DEVICE(dev), "i2c")); + + /* Audio support */ + pci_create_simple(pci_bus, PCI_DEVFN(slot, 5), TYPE_VIA_AC97); + pci_create_simple(pci_bus, PCI_DEVFN(slot, 6), TYPE_VIA_MC97); +} + +/* Network support */ +static void network_init(PCIBus *pci_bus) +{ + int i; + + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + const char *default_devaddr = NULL; + + if (i == 0 && (!nd->model || strcmp(nd->model, "rtl8139") == 0)) { + /* The Fuloong board has a RTL8139 card using PCI SLOT 7 */ + default_devaddr = "07"; + } + + pci_nic_init_nofail(nd, pci_bus, "rtl8139", default_devaddr); + } +} + +static void mips_fuloong2e_init(MachineState *machine) +{ + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *initrd_filename = machine->initrd_filename; + char *filename; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *bios = g_new(MemoryRegion, 1); + long bios_size; + uint8_t *spd_data; + uint64_t kernel_entry; + PCIDevice *pci_dev; + PCIBus *pci_bus; + I2CBus *smbus; + Clock *cpuclk; + MIPSCPU *cpu; + CPUMIPSState *env; + DeviceState *dev; + + cpuclk = clock_new(OBJECT(machine), "cpu-refclk"); + clock_set_hz(cpuclk, 533080000); /* ~533 MHz */ + + /* init CPUs */ + cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk); + env = &cpu->env; + + qemu_register_reset(main_cpu_reset, cpu); + + /* TODO: support more than 256M RAM as highmem */ + if (machine->ram_size != 256 * MiB) { + error_report("Invalid RAM size, should be 256MB"); + exit(EXIT_FAILURE); + } + memory_region_add_subregion(address_space_mem, 0, machine->ram); + + /* Boot ROM */ + memory_region_init_rom(bios, NULL, "fuloong2e.bios", BIOS_SIZE, + &error_fatal); + memory_region_add_subregion(address_space_mem, 0x1fc00000LL, bios); + + /* + * We do not support flash operation, just loading pmon.bin as raw BIOS. + * Please use -L to set the BIOS path and -bios to set bios name. + */ + + if (kernel_filename) { + loaderparams.ram_size = machine->ram_size; + loaderparams.kernel_filename = kernel_filename; + loaderparams.kernel_cmdline = kernel_cmdline; + loaderparams.initrd_filename = initrd_filename; + kernel_entry = load_kernel(cpu); + write_bootloader(env, memory_region_get_ram_ptr(bios), kernel_entry); + } else { + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, + machine->firmware ?: FULOONG_BIOSNAME); + if (filename) { + bios_size = load_image_targphys(filename, 0x1fc00000LL, + BIOS_SIZE); + g_free(filename); + } else { + bios_size = -1; + } + + if ((bios_size < 0 || bios_size > BIOS_SIZE) && + machine->firmware && !qtest_enabled()) { + error_report("Could not load MIPS bios '%s'", machine->firmware); + exit(1); + } + } + + /* Init internal devices */ + cpu_mips_irq_init_cpu(cpu); + cpu_mips_clock_init(cpu); + + /* North bridge, Bonito --> IP2 */ + pci_bus = bonito_init((qemu_irq *)&(env->irq[2])); + + /* South bridge -> IP5 */ + vt82c686b_southbridge_init(pci_bus, FULOONG2E_VIA_SLOT, env->irq[5], + &smbus); + + /* GPU */ + if (vga_interface_type != VGA_NONE) { + pci_dev = pci_new(-1, "ati-vga"); + dev = DEVICE(pci_dev); + qdev_prop_set_uint32(dev, "vgamem_mb", 16); + qdev_prop_set_uint16(dev, "x-device-id", 0x5159); + pci_realize_and_unref(pci_dev, pci_bus, &error_fatal); + } + + /* Populate SPD eeprom data */ + spd_data = spd_data_generate(DDR, machine->ram_size); + smbus_eeprom_init_one(smbus, 0x50, spd_data); + + /* Network card: RTL8139D */ + network_init(pci_bus); +} + +static void mips_fuloong2e_machine_init(MachineClass *mc) +{ + mc->desc = "Fuloong 2e mini pc"; + mc->init = mips_fuloong2e_init; + mc->block_default_type = IF_IDE; + mc->default_cpu_type = MIPS_CPU_TYPE_NAME("Loongson-2E"); + mc->default_ram_size = 256 * MiB; + mc->default_ram_id = "fuloong2e.ram"; + mc->minimum_page_bits = 14; +} + +DEFINE_MACHINE("fuloong2e", mips_fuloong2e_machine_init) diff --git a/hw/mips/fw_cfg.c b/hw/mips/fw_cfg.c new file mode 100644 index 000000000..67c4a74f4 --- /dev/null +++ b/hw/mips/fw_cfg.c @@ -0,0 +1,35 @@ +/* + * QEMU fw_cfg helpers (MIPS specific) + * + * Copyright (c) 2020 Lemote, Inc. + * + * Author: + * Huacai Chen (chenhc@lemote.com) + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/mips/fw_cfg.h" +#include "hw/nvram/fw_cfg.h" + +const char *fw_cfg_arch_key_name(uint16_t key) +{ + static const struct { + uint16_t key; + const char *name; + } fw_cfg_arch_wellknown_keys[] = { + {FW_CFG_MACHINE_VERSION, "machine_version"}, + {FW_CFG_CPU_FREQ, "cpu_frequency"}, + }; + + for (size_t i = 0; i < ARRAY_SIZE(fw_cfg_arch_wellknown_keys); i++) { + if (fw_cfg_arch_wellknown_keys[i].key == key) { + return fw_cfg_arch_wellknown_keys[i].name; + } + } + return NULL; +} diff --git a/hw/mips/fw_cfg.h b/hw/mips/fw_cfg.h new file mode 100644 index 000000000..e317d5b9a --- /dev/null +++ b/hw/mips/fw_cfg.h @@ -0,0 +1,19 @@ +/* + * QEMU fw_cfg helpers (MIPS specific) + * + * Copyright (c) 2020 Huacai Chen + * + * SPDX-License-Identifier: MIT + */ + +#ifndef HW_MIPS_FW_CFG_H +#define HW_MIPS_FW_CFG_H + +#include "hw/boards.h" +#include "hw/nvram/fw_cfg.h" + +/* Data for BIOS to identify machine */ +#define FW_CFG_MACHINE_VERSION (FW_CFG_ARCH_LOCAL + 0) +#define FW_CFG_CPU_FREQ (FW_CFG_ARCH_LOCAL + 1) + +#endif diff --git a/hw/mips/gt64xxx_pci.c b/hw/mips/gt64xxx_pci.c new file mode 100644 index 000000000..c7480bd01 --- /dev/null +++ b/hw/mips/gt64xxx_pci.c @@ -0,0 +1,1300 @@ +/* + * QEMU GT64120 PCI host + * + * Copyright (c) 2006,2007 Aurelien Jarno + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/units.h" +#include "qemu/log.h" +#include "hw/mips/mips.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/southbridge/piix.h" +#include "migration/vmstate.h" +#include "hw/intc/i8259.h" +#include "hw/irq.h" +#include "trace.h" +#include "qom/object.h" + +#define GT_REGS (0x1000 >> 2) + +/* CPU Configuration */ +#define GT_CPU (0x000 >> 2) +#define GT_MULTI (0x120 >> 2) + +/* CPU Address Decode */ +#define GT_SCS10LD (0x008 >> 2) +#define GT_SCS10HD (0x010 >> 2) +#define GT_SCS32LD (0x018 >> 2) +#define GT_SCS32HD (0x020 >> 2) +#define GT_CS20LD (0x028 >> 2) +#define GT_CS20HD (0x030 >> 2) +#define GT_CS3BOOTLD (0x038 >> 2) +#define GT_CS3BOOTHD (0x040 >> 2) +#define GT_PCI0IOLD (0x048 >> 2) +#define GT_PCI0IOHD (0x050 >> 2) +#define GT_PCI0M0LD (0x058 >> 2) +#define GT_PCI0M0HD (0x060 >> 2) +#define GT_PCI0M1LD (0x080 >> 2) +#define GT_PCI0M1HD (0x088 >> 2) +#define GT_PCI1IOLD (0x090 >> 2) +#define GT_PCI1IOHD (0x098 >> 2) +#define GT_PCI1M0LD (0x0a0 >> 2) +#define GT_PCI1M0HD (0x0a8 >> 2) +#define GT_PCI1M1LD (0x0b0 >> 2) +#define GT_PCI1M1HD (0x0b8 >> 2) +#define GT_ISD (0x068 >> 2) + +#define GT_SCS10AR (0x0d0 >> 2) +#define GT_SCS32AR (0x0d8 >> 2) +#define GT_CS20R (0x0e0 >> 2) +#define GT_CS3BOOTR (0x0e8 >> 2) + +#define GT_PCI0IOREMAP (0x0f0 >> 2) +#define GT_PCI0M0REMAP (0x0f8 >> 2) +#define GT_PCI0M1REMAP (0x100 >> 2) +#define GT_PCI1IOREMAP (0x108 >> 2) +#define GT_PCI1M0REMAP (0x110 >> 2) +#define GT_PCI1M1REMAP (0x118 >> 2) + +/* CPU Error Report */ +#define GT_CPUERR_ADDRLO (0x070 >> 2) +#define GT_CPUERR_ADDRHI (0x078 >> 2) +#define GT_CPUERR_DATALO (0x128 >> 2) /* GT-64120A only */ +#define GT_CPUERR_DATAHI (0x130 >> 2) /* GT-64120A only */ +#define GT_CPUERR_PARITY (0x138 >> 2) /* GT-64120A only */ + +/* CPU Sync Barrier */ +#define GT_PCI0SYNC (0x0c0 >> 2) +#define GT_PCI1SYNC (0x0c8 >> 2) + +/* SDRAM and Device Address Decode */ +#define GT_SCS0LD (0x400 >> 2) +#define GT_SCS0HD (0x404 >> 2) +#define GT_SCS1LD (0x408 >> 2) +#define GT_SCS1HD (0x40c >> 2) +#define GT_SCS2LD (0x410 >> 2) +#define GT_SCS2HD (0x414 >> 2) +#define GT_SCS3LD (0x418 >> 2) +#define GT_SCS3HD (0x41c >> 2) +#define GT_CS0LD (0x420 >> 2) +#define GT_CS0HD (0x424 >> 2) +#define GT_CS1LD (0x428 >> 2) +#define GT_CS1HD (0x42c >> 2) +#define GT_CS2LD (0x430 >> 2) +#define GT_CS2HD (0x434 >> 2) +#define GT_CS3LD (0x438 >> 2) +#define GT_CS3HD (0x43c >> 2) +#define GT_BOOTLD (0x440 >> 2) +#define GT_BOOTHD (0x444 >> 2) +#define GT_ADERR (0x470 >> 2) + +/* SDRAM Configuration */ +#define GT_SDRAM_CFG (0x448 >> 2) +#define GT_SDRAM_OPMODE (0x474 >> 2) +#define GT_SDRAM_BM (0x478 >> 2) +#define GT_SDRAM_ADDRDECODE (0x47c >> 2) + +/* SDRAM Parameters */ +#define GT_SDRAM_B0 (0x44c >> 2) +#define GT_SDRAM_B1 (0x450 >> 2) +#define GT_SDRAM_B2 (0x454 >> 2) +#define GT_SDRAM_B3 (0x458 >> 2) + +/* Device Parameters */ +#define GT_DEV_B0 (0x45c >> 2) +#define GT_DEV_B1 (0x460 >> 2) +#define GT_DEV_B2 (0x464 >> 2) +#define GT_DEV_B3 (0x468 >> 2) +#define GT_DEV_BOOT (0x46c >> 2) + +/* ECC */ +#define GT_ECC_ERRDATALO (0x480 >> 2) /* GT-64120A only */ +#define GT_ECC_ERRDATAHI (0x484 >> 2) /* GT-64120A only */ +#define GT_ECC_MEM (0x488 >> 2) /* GT-64120A only */ +#define GT_ECC_CALC (0x48c >> 2) /* GT-64120A only */ +#define GT_ECC_ERRADDR (0x490 >> 2) /* GT-64120A only */ + +/* DMA Record */ +#define GT_DMA0_CNT (0x800 >> 2) +#define GT_DMA1_CNT (0x804 >> 2) +#define GT_DMA2_CNT (0x808 >> 2) +#define GT_DMA3_CNT (0x80c >> 2) +#define GT_DMA0_SA (0x810 >> 2) +#define GT_DMA1_SA (0x814 >> 2) +#define GT_DMA2_SA (0x818 >> 2) +#define GT_DMA3_SA (0x81c >> 2) +#define GT_DMA0_DA (0x820 >> 2) +#define GT_DMA1_DA (0x824 >> 2) +#define GT_DMA2_DA (0x828 >> 2) +#define GT_DMA3_DA (0x82c >> 2) +#define GT_DMA0_NEXT (0x830 >> 2) +#define GT_DMA1_NEXT (0x834 >> 2) +#define GT_DMA2_NEXT (0x838 >> 2) +#define GT_DMA3_NEXT (0x83c >> 2) +#define GT_DMA0_CUR (0x870 >> 2) +#define GT_DMA1_CUR (0x874 >> 2) +#define GT_DMA2_CUR (0x878 >> 2) +#define GT_DMA3_CUR (0x87c >> 2) + +/* DMA Channel Control */ +#define GT_DMA0_CTRL (0x840 >> 2) +#define GT_DMA1_CTRL (0x844 >> 2) +#define GT_DMA2_CTRL (0x848 >> 2) +#define GT_DMA3_CTRL (0x84c >> 2) + +/* DMA Arbiter */ +#define GT_DMA_ARB (0x860 >> 2) + +/* Timer/Counter */ +#define GT_TC0 (0x850 >> 2) +#define GT_TC1 (0x854 >> 2) +#define GT_TC2 (0x858 >> 2) +#define GT_TC3 (0x85c >> 2) +#define GT_TC_CONTROL (0x864 >> 2) + +/* PCI Internal */ +#define GT_PCI0_CMD (0xc00 >> 2) +#define GT_PCI0_TOR (0xc04 >> 2) +#define GT_PCI0_BS_SCS10 (0xc08 >> 2) +#define GT_PCI0_BS_SCS32 (0xc0c >> 2) +#define GT_PCI0_BS_CS20 (0xc10 >> 2) +#define GT_PCI0_BS_CS3BT (0xc14 >> 2) +#define GT_PCI1_IACK (0xc30 >> 2) +#define GT_PCI0_IACK (0xc34 >> 2) +#define GT_PCI0_BARE (0xc3c >> 2) +#define GT_PCI0_PREFMBR (0xc40 >> 2) +#define GT_PCI0_SCS10_BAR (0xc48 >> 2) +#define GT_PCI0_SCS32_BAR (0xc4c >> 2) +#define GT_PCI0_CS20_BAR (0xc50 >> 2) +#define GT_PCI0_CS3BT_BAR (0xc54 >> 2) +#define GT_PCI0_SSCS10_BAR (0xc58 >> 2) +#define GT_PCI0_SSCS32_BAR (0xc5c >> 2) +#define GT_PCI0_SCS3BT_BAR (0xc64 >> 2) +#define GT_PCI1_CMD (0xc80 >> 2) +#define GT_PCI1_TOR (0xc84 >> 2) +#define GT_PCI1_BS_SCS10 (0xc88 >> 2) +#define GT_PCI1_BS_SCS32 (0xc8c >> 2) +#define GT_PCI1_BS_CS20 (0xc90 >> 2) +#define GT_PCI1_BS_CS3BT (0xc94 >> 2) +#define GT_PCI1_BARE (0xcbc >> 2) +#define GT_PCI1_PREFMBR (0xcc0 >> 2) +#define GT_PCI1_SCS10_BAR (0xcc8 >> 2) +#define GT_PCI1_SCS32_BAR (0xccc >> 2) +#define GT_PCI1_CS20_BAR (0xcd0 >> 2) +#define GT_PCI1_CS3BT_BAR (0xcd4 >> 2) +#define GT_PCI1_SSCS10_BAR (0xcd8 >> 2) +#define GT_PCI1_SSCS32_BAR (0xcdc >> 2) +#define GT_PCI1_SCS3BT_BAR (0xce4 >> 2) +#define GT_PCI1_CFGADDR (0xcf0 >> 2) +#define GT_PCI1_CFGDATA (0xcf4 >> 2) +#define GT_PCI0_CFGADDR (0xcf8 >> 2) +#define GT_PCI0_CFGDATA (0xcfc >> 2) + +/* Interrupts */ +#define GT_INTRCAUSE (0xc18 >> 2) +#define GT_INTRMASK (0xc1c >> 2) +#define GT_PCI0_ICMASK (0xc24 >> 2) +#define GT_PCI0_SERR0MASK (0xc28 >> 2) +#define GT_CPU_INTSEL (0xc70 >> 2) +#define GT_PCI0_INTSEL (0xc74 >> 2) +#define GT_HINTRCAUSE (0xc98 >> 2) +#define GT_HINTRMASK (0xc9c >> 2) +#define GT_PCI0_HICMASK (0xca4 >> 2) +#define GT_PCI1_SERR1MASK (0xca8 >> 2) + +#define PCI_MAPPING_ENTRY(regname) \ + hwaddr regname ##_start; \ + hwaddr regname ##_length; \ + MemoryRegion regname ##_mem + +#define TYPE_GT64120_PCI_HOST_BRIDGE "gt64120" + +OBJECT_DECLARE_SIMPLE_TYPE(GT64120State, GT64120_PCI_HOST_BRIDGE) + +struct GT64120State { + PCIHostState parent_obj; + + uint32_t regs[GT_REGS]; + PCI_MAPPING_ENTRY(PCI0IO); + PCI_MAPPING_ENTRY(PCI0M0); + PCI_MAPPING_ENTRY(PCI0M1); + PCI_MAPPING_ENTRY(ISD); + MemoryRegion pci0_mem; + AddressSpace pci0_mem_as; +}; + +/* Adjust range to avoid touching space which isn't mappable via PCI */ +/* + * XXX: Hardcoded values for Malta: 0x1e000000 - 0x1f100000 + * 0x1fc00000 - 0x1fd00000 + */ +static void check_reserved_space(hwaddr *start, hwaddr *length) +{ + hwaddr begin = *start; + hwaddr end = *start + *length; + + if (end >= 0x1e000000LL && end < 0x1f100000LL) { + end = 0x1e000000LL; + } + if (begin >= 0x1e000000LL && begin < 0x1f100000LL) { + begin = 0x1f100000LL; + } + if (end >= 0x1fc00000LL && end < 0x1fd00000LL) { + end = 0x1fc00000LL; + } + if (begin >= 0x1fc00000LL && begin < 0x1fd00000LL) { + begin = 0x1fd00000LL; + } + /* XXX: This is broken when a reserved range splits the requested range */ + if (end >= 0x1f100000LL && begin < 0x1e000000LL) { + end = 0x1e000000LL; + } + if (end >= 0x1fd00000LL && begin < 0x1fc00000LL) { + end = 0x1fc00000LL; + } + + *start = begin; + *length = end - begin; +} + +static void gt64120_isd_mapping(GT64120State *s) +{ + /* Bits 14:0 of ISD map to bits 35:21 of the start address. */ + hwaddr start = ((hwaddr)s->regs[GT_ISD] << 21) & 0xFFFE00000ull; + hwaddr length = 0x1000; + + if (s->ISD_length) { + memory_region_del_subregion(get_system_memory(), &s->ISD_mem); + } + check_reserved_space(&start, &length); + length = 0x1000; + /* Map new address */ + trace_gt64120_isd_remap(s->ISD_length, s->ISD_start, length, start); + s->ISD_start = start; + s->ISD_length = length; + memory_region_add_subregion(get_system_memory(), s->ISD_start, &s->ISD_mem); +} + +static void gt64120_pci_mapping(GT64120State *s) +{ + /* Update PCI0IO mapping */ + if ((s->regs[GT_PCI0IOLD] & 0x7f) <= s->regs[GT_PCI0IOHD]) { + /* Unmap old IO address */ + if (s->PCI0IO_length) { + memory_region_del_subregion(get_system_memory(), &s->PCI0IO_mem); + object_unparent(OBJECT(&s->PCI0IO_mem)); + } + /* Map new IO address */ + s->PCI0IO_start = s->regs[GT_PCI0IOLD] << 21; + s->PCI0IO_length = ((s->regs[GT_PCI0IOHD] + 1) - + (s->regs[GT_PCI0IOLD] & 0x7f)) << 21; + if (s->PCI0IO_length) { + memory_region_init_alias(&s->PCI0IO_mem, OBJECT(s), "pci0-io", + get_system_io(), 0, s->PCI0IO_length); + memory_region_add_subregion(get_system_memory(), s->PCI0IO_start, + &s->PCI0IO_mem); + } + } + + /* Update PCI0M0 mapping */ + if ((s->regs[GT_PCI0M0LD] & 0x7f) <= s->regs[GT_PCI0M0HD]) { + /* Unmap old MEM address */ + if (s->PCI0M0_length) { + memory_region_del_subregion(get_system_memory(), &s->PCI0M0_mem); + object_unparent(OBJECT(&s->PCI0M0_mem)); + } + /* Map new mem address */ + s->PCI0M0_start = s->regs[GT_PCI0M0LD] << 21; + s->PCI0M0_length = ((s->regs[GT_PCI0M0HD] + 1) - + (s->regs[GT_PCI0M0LD] & 0x7f)) << 21; + if (s->PCI0M0_length) { + memory_region_init_alias(&s->PCI0M0_mem, OBJECT(s), "pci0-mem0", + &s->pci0_mem, s->PCI0M0_start, + s->PCI0M0_length); + memory_region_add_subregion(get_system_memory(), s->PCI0M0_start, + &s->PCI0M0_mem); + } + } + + /* Update PCI0M1 mapping */ + if ((s->regs[GT_PCI0M1LD] & 0x7f) <= s->regs[GT_PCI0M1HD]) { + /* Unmap old MEM address */ + if (s->PCI0M1_length) { + memory_region_del_subregion(get_system_memory(), &s->PCI0M1_mem); + object_unparent(OBJECT(&s->PCI0M1_mem)); + } + /* Map new mem address */ + s->PCI0M1_start = s->regs[GT_PCI0M1LD] << 21; + s->PCI0M1_length = ((s->regs[GT_PCI0M1HD] + 1) - + (s->regs[GT_PCI0M1LD] & 0x7f)) << 21; + if (s->PCI0M1_length) { + memory_region_init_alias(&s->PCI0M1_mem, OBJECT(s), "pci0-mem1", + &s->pci0_mem, s->PCI0M1_start, + s->PCI0M1_length); + memory_region_add_subregion(get_system_memory(), s->PCI0M1_start, + &s->PCI0M1_mem); + } + } +} + +static int gt64120_post_load(void *opaque, int version_id) +{ + GT64120State *s = opaque; + + gt64120_isd_mapping(s); + gt64120_pci_mapping(s); + + return 0; +} + +static const VMStateDescription vmstate_gt64120 = { + .name = "gt64120", + .version_id = 1, + .minimum_version_id = 1, + .post_load = gt64120_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, GT64120State, GT_REGS), + VMSTATE_END_OF_LIST() + } +}; + +static void gt64120_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + GT64120State *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s); + uint32_t saddr = addr >> 2; + + trace_gt64120_write(addr, val); + if (!(s->regs[GT_CPU] & 0x00001000)) { + val = bswap32(val); + } + + switch (saddr) { + + /* CPU Configuration */ + case GT_CPU: + s->regs[GT_CPU] = val; + break; + case GT_MULTI: + /* Read-only register as only one GT64xxx is present on the CPU bus */ + break; + + /* CPU Address Decode */ + case GT_PCI0IOLD: + s->regs[GT_PCI0IOLD] = val & 0x00007fff; + s->regs[GT_PCI0IOREMAP] = val & 0x000007ff; + gt64120_pci_mapping(s); + break; + case GT_PCI0M0LD: + s->regs[GT_PCI0M0LD] = val & 0x00007fff; + s->regs[GT_PCI0M0REMAP] = val & 0x000007ff; + gt64120_pci_mapping(s); + break; + case GT_PCI0M1LD: + s->regs[GT_PCI0M1LD] = val & 0x00007fff; + s->regs[GT_PCI0M1REMAP] = val & 0x000007ff; + gt64120_pci_mapping(s); + break; + case GT_PCI1IOLD: + s->regs[GT_PCI1IOLD] = val & 0x00007fff; + s->regs[GT_PCI1IOREMAP] = val & 0x000007ff; + break; + case GT_PCI1M0LD: + s->regs[GT_PCI1M0LD] = val & 0x00007fff; + s->regs[GT_PCI1M0REMAP] = val & 0x000007ff; + break; + case GT_PCI1M1LD: + s->regs[GT_PCI1M1LD] = val & 0x00007fff; + s->regs[GT_PCI1M1REMAP] = val & 0x000007ff; + break; + case GT_PCI0M0HD: + case GT_PCI0M1HD: + case GT_PCI0IOHD: + s->regs[saddr] = val & 0x0000007f; + gt64120_pci_mapping(s); + break; + case GT_PCI1IOHD: + case GT_PCI1M0HD: + case GT_PCI1M1HD: + s->regs[saddr] = val & 0x0000007f; + break; + case GT_ISD: + s->regs[saddr] = val & 0x00007fff; + gt64120_isd_mapping(s); + break; + + case GT_PCI0IOREMAP: + case GT_PCI0M0REMAP: + case GT_PCI0M1REMAP: + case GT_PCI1IOREMAP: + case GT_PCI1M0REMAP: + case GT_PCI1M1REMAP: + s->regs[saddr] = val & 0x000007ff; + break; + + /* CPU Error Report */ + case GT_CPUERR_ADDRLO: + case GT_CPUERR_ADDRHI: + case GT_CPUERR_DATALO: + case GT_CPUERR_DATAHI: + case GT_CPUERR_PARITY: + /* Read-only registers, do nothing */ + qemu_log_mask(LOG_GUEST_ERROR, + "gt64120: Read-only register write " + "reg:0x%03x size:%u value:0x%0*" PRIx64 "\n", + saddr << 2, size, size << 1, val); + break; + + /* CPU Sync Barrier */ + case GT_PCI0SYNC: + case GT_PCI1SYNC: + /* Read-only registers, do nothing */ + qemu_log_mask(LOG_GUEST_ERROR, + "gt64120: Read-only register write " + "reg:0x%03x size:%u value:0x%0*" PRIx64 "\n", + saddr << 2, size, size << 1, val); + break; + + /* SDRAM and Device Address Decode */ + case GT_SCS0LD: + case GT_SCS0HD: + case GT_SCS1LD: + case GT_SCS1HD: + case GT_SCS2LD: + case GT_SCS2HD: + case GT_SCS3LD: + case GT_SCS3HD: + case GT_CS0LD: + case GT_CS0HD: + case GT_CS1LD: + case GT_CS1HD: + case GT_CS2LD: + case GT_CS2HD: + case GT_CS3LD: + case GT_CS3HD: + case GT_BOOTLD: + case GT_BOOTHD: + case GT_ADERR: + /* SDRAM Configuration */ + case GT_SDRAM_CFG: + case GT_SDRAM_OPMODE: + case GT_SDRAM_BM: + case GT_SDRAM_ADDRDECODE: + /* Accept and ignore SDRAM interleave configuration */ + s->regs[saddr] = val; + break; + + /* Device Parameters */ + case GT_DEV_B0: + case GT_DEV_B1: + case GT_DEV_B2: + case GT_DEV_B3: + case GT_DEV_BOOT: + /* Not implemented */ + qemu_log_mask(LOG_UNIMP, + "gt64120: Unimplemented device register write " + "reg:0x%03x size:%u value:0x%0*" PRIx64 "\n", + saddr << 2, size, size << 1, val); + break; + + /* ECC */ + case GT_ECC_ERRDATALO: + case GT_ECC_ERRDATAHI: + case GT_ECC_MEM: + case GT_ECC_CALC: + case GT_ECC_ERRADDR: + /* Read-only registers, do nothing */ + qemu_log_mask(LOG_GUEST_ERROR, + "gt64120: Read-only register write " + "reg:0x%03x size:%u value:0x%0*" PRIx64 "\n", + saddr << 2, size, size << 1, val); + break; + + /* DMA Record */ + case GT_DMA0_CNT: + case GT_DMA1_CNT: + case GT_DMA2_CNT: + case GT_DMA3_CNT: + case GT_DMA0_SA: + case GT_DMA1_SA: + case GT_DMA2_SA: + case GT_DMA3_SA: + case GT_DMA0_DA: + case GT_DMA1_DA: + case GT_DMA2_DA: + case GT_DMA3_DA: + case GT_DMA0_NEXT: + case GT_DMA1_NEXT: + case GT_DMA2_NEXT: + case GT_DMA3_NEXT: + case GT_DMA0_CUR: + case GT_DMA1_CUR: + case GT_DMA2_CUR: + case GT_DMA3_CUR: + + /* DMA Channel Control */ + case GT_DMA0_CTRL: + case GT_DMA1_CTRL: + case GT_DMA2_CTRL: + case GT_DMA3_CTRL: + + /* DMA Arbiter */ + case GT_DMA_ARB: + /* Not implemented */ + qemu_log_mask(LOG_UNIMP, + "gt64120: Unimplemented DMA register write " + "reg:0x%03x size:%u value:0x%0*" PRIx64 "\n", + saddr << 2, size, size << 1, val); + break; + + /* Timer/Counter */ + case GT_TC0: + case GT_TC1: + case GT_TC2: + case GT_TC3: + case GT_TC_CONTROL: + /* Not implemented */ + qemu_log_mask(LOG_UNIMP, + "gt64120: Unimplemented timer register write " + "reg:0x%03x size:%u value:0x%0*" PRIx64 "\n", + saddr << 2, size, size << 1, val); + break; + + /* PCI Internal */ + case GT_PCI0_CMD: + case GT_PCI1_CMD: + s->regs[saddr] = val & 0x0401fc0f; + break; + case GT_PCI0_TOR: + case GT_PCI0_BS_SCS10: + case GT_PCI0_BS_SCS32: + case GT_PCI0_BS_CS20: + case GT_PCI0_BS_CS3BT: + case GT_PCI1_IACK: + case GT_PCI0_IACK: + case GT_PCI0_BARE: + case GT_PCI0_PREFMBR: + case GT_PCI0_SCS10_BAR: + case GT_PCI0_SCS32_BAR: + case GT_PCI0_CS20_BAR: + case GT_PCI0_CS3BT_BAR: + case GT_PCI0_SSCS10_BAR: + case GT_PCI0_SSCS32_BAR: + case GT_PCI0_SCS3BT_BAR: + case GT_PCI1_TOR: + case GT_PCI1_BS_SCS10: + case GT_PCI1_BS_SCS32: + case GT_PCI1_BS_CS20: + case GT_PCI1_BS_CS3BT: + case GT_PCI1_BARE: + case GT_PCI1_PREFMBR: + case GT_PCI1_SCS10_BAR: + case GT_PCI1_SCS32_BAR: + case GT_PCI1_CS20_BAR: + case GT_PCI1_CS3BT_BAR: + case GT_PCI1_SSCS10_BAR: + case GT_PCI1_SSCS32_BAR: + case GT_PCI1_SCS3BT_BAR: + case GT_PCI1_CFGADDR: + case GT_PCI1_CFGDATA: + /* not implemented */ + qemu_log_mask(LOG_UNIMP, + "gt64120: Unimplemented PCI register write " + "reg:0x%03x size:%u value:0x%0*" PRIx64 "\n", + saddr << 2, size, size << 1, val); + break; + case GT_PCI0_CFGADDR: + phb->config_reg = val & 0x80fffffc; + break; + case GT_PCI0_CFGDATA: + if (!(s->regs[GT_PCI0_CMD] & 1) && (phb->config_reg & 0x00fff800)) { + val = bswap32(val); + } + if (phb->config_reg & (1u << 31)) { + pci_data_write(phb->bus, phb->config_reg, val, 4); + } + break; + + /* Interrupts */ + case GT_INTRCAUSE: + /* not really implemented */ + s->regs[saddr] = ~(~(s->regs[saddr]) | ~(val & 0xfffffffe)); + s->regs[saddr] |= !!(s->regs[saddr] & 0xfffffffe); + trace_gt64120_write_intreg("INTRCAUSE", size, val); + break; + case GT_INTRMASK: + s->regs[saddr] = val & 0x3c3ffffe; + trace_gt64120_write_intreg("INTRMASK", size, val); + break; + case GT_PCI0_ICMASK: + s->regs[saddr] = val & 0x03fffffe; + trace_gt64120_write_intreg("ICMASK", size, val); + break; + case GT_PCI0_SERR0MASK: + s->regs[saddr] = val & 0x0000003f; + trace_gt64120_write_intreg("SERR0MASK", size, val); + break; + + /* Reserved when only PCI_0 is configured. */ + case GT_HINTRCAUSE: + case GT_CPU_INTSEL: + case GT_PCI0_INTSEL: + case GT_HINTRMASK: + case GT_PCI0_HICMASK: + case GT_PCI1_SERR1MASK: + /* not implemented */ + break; + + /* SDRAM Parameters */ + case GT_SDRAM_B0: + case GT_SDRAM_B1: + case GT_SDRAM_B2: + case GT_SDRAM_B3: + /* + * We don't simulate electrical parameters of the SDRAM. + * Accept, but ignore the values. + */ + s->regs[saddr] = val; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "gt64120: Illegal register write " + "reg:0x%03x size:%u value:0x%0*" PRIx64 "\n", + saddr << 2, size, size << 1, val); + break; + } +} + +static uint64_t gt64120_readl(void *opaque, + hwaddr addr, unsigned size) +{ + GT64120State *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s); + uint32_t val; + uint32_t saddr = addr >> 2; + + switch (saddr) { + + /* CPU Configuration */ + case GT_MULTI: + /* + * Only one GT64xxx is present on the CPU bus, return + * the initial value. + */ + val = s->regs[saddr]; + break; + + /* CPU Error Report */ + case GT_CPUERR_ADDRLO: + case GT_CPUERR_ADDRHI: + case GT_CPUERR_DATALO: + case GT_CPUERR_DATAHI: + case GT_CPUERR_PARITY: + /* Emulated memory has no error, always return the initial values. */ + val = s->regs[saddr]; + break; + + /* CPU Sync Barrier */ + case GT_PCI0SYNC: + case GT_PCI1SYNC: + /* + * Reading those register should empty all FIFO on the PCI + * bus, which are not emulated. The return value should be + * a random value that should be ignored. + */ + val = 0xc000ffee; + break; + + /* ECC */ + case GT_ECC_ERRDATALO: + case GT_ECC_ERRDATAHI: + case GT_ECC_MEM: + case GT_ECC_CALC: + case GT_ECC_ERRADDR: + /* Emulated memory has no error, always return the initial values. */ + val = s->regs[saddr]; + break; + + case GT_CPU: + case GT_SCS10LD: + case GT_SCS10HD: + case GT_SCS32LD: + case GT_SCS32HD: + case GT_CS20LD: + case GT_CS20HD: + case GT_CS3BOOTLD: + case GT_CS3BOOTHD: + case GT_SCS10AR: + case GT_SCS32AR: + case GT_CS20R: + case GT_CS3BOOTR: + case GT_PCI0IOLD: + case GT_PCI0M0LD: + case GT_PCI0M1LD: + case GT_PCI1IOLD: + case GT_PCI1M0LD: + case GT_PCI1M1LD: + case GT_PCI0IOHD: + case GT_PCI0M0HD: + case GT_PCI0M1HD: + case GT_PCI1IOHD: + case GT_PCI1M0HD: + case GT_PCI1M1HD: + case GT_PCI0IOREMAP: + case GT_PCI0M0REMAP: + case GT_PCI0M1REMAP: + case GT_PCI1IOREMAP: + case GT_PCI1M0REMAP: + case GT_PCI1M1REMAP: + case GT_ISD: + val = s->regs[saddr]; + break; + case GT_PCI0_IACK: + /* Read the IRQ number */ + val = pic_read_irq(isa_pic); + break; + + /* SDRAM and Device Address Decode */ + case GT_SCS0LD: + case GT_SCS0HD: + case GT_SCS1LD: + case GT_SCS1HD: + case GT_SCS2LD: + case GT_SCS2HD: + case GT_SCS3LD: + case GT_SCS3HD: + case GT_CS0LD: + case GT_CS0HD: + case GT_CS1LD: + case GT_CS1HD: + case GT_CS2LD: + case GT_CS2HD: + case GT_CS3LD: + case GT_CS3HD: + case GT_BOOTLD: + case GT_BOOTHD: + case GT_ADERR: + val = s->regs[saddr]; + break; + + /* SDRAM Configuration */ + case GT_SDRAM_CFG: + case GT_SDRAM_OPMODE: + case GT_SDRAM_BM: + case GT_SDRAM_ADDRDECODE: + val = s->regs[saddr]; + break; + + /* SDRAM Parameters */ + case GT_SDRAM_B0: + case GT_SDRAM_B1: + case GT_SDRAM_B2: + case GT_SDRAM_B3: + /* + * We don't simulate electrical parameters of the SDRAM. + * Just return the last written value. + */ + val = s->regs[saddr]; + break; + + /* Device Parameters */ + case GT_DEV_B0: + case GT_DEV_B1: + case GT_DEV_B2: + case GT_DEV_B3: + case GT_DEV_BOOT: + val = s->regs[saddr]; + break; + + /* DMA Record */ + case GT_DMA0_CNT: + case GT_DMA1_CNT: + case GT_DMA2_CNT: + case GT_DMA3_CNT: + case GT_DMA0_SA: + case GT_DMA1_SA: + case GT_DMA2_SA: + case GT_DMA3_SA: + case GT_DMA0_DA: + case GT_DMA1_DA: + case GT_DMA2_DA: + case GT_DMA3_DA: + case GT_DMA0_NEXT: + case GT_DMA1_NEXT: + case GT_DMA2_NEXT: + case GT_DMA3_NEXT: + case GT_DMA0_CUR: + case GT_DMA1_CUR: + case GT_DMA2_CUR: + case GT_DMA3_CUR: + val = s->regs[saddr]; + break; + + /* DMA Channel Control */ + case GT_DMA0_CTRL: + case GT_DMA1_CTRL: + case GT_DMA2_CTRL: + case GT_DMA3_CTRL: + val = s->regs[saddr]; + break; + + /* DMA Arbiter */ + case GT_DMA_ARB: + val = s->regs[saddr]; + break; + + /* Timer/Counter */ + case GT_TC0: + case GT_TC1: + case GT_TC2: + case GT_TC3: + case GT_TC_CONTROL: + val = s->regs[saddr]; + break; + + /* PCI Internal */ + case GT_PCI0_CFGADDR: + val = phb->config_reg; + break; + case GT_PCI0_CFGDATA: + if (!(phb->config_reg & (1 << 31))) { + val = 0xffffffff; + } else { + val = pci_data_read(phb->bus, phb->config_reg, 4); + } + if (!(s->regs[GT_PCI0_CMD] & 1) && (phb->config_reg & 0x00fff800)) { + val = bswap32(val); + } + break; + + case GT_PCI0_CMD: + case GT_PCI0_TOR: + case GT_PCI0_BS_SCS10: + case GT_PCI0_BS_SCS32: + case GT_PCI0_BS_CS20: + case GT_PCI0_BS_CS3BT: + case GT_PCI1_IACK: + case GT_PCI0_BARE: + case GT_PCI0_PREFMBR: + case GT_PCI0_SCS10_BAR: + case GT_PCI0_SCS32_BAR: + case GT_PCI0_CS20_BAR: + case GT_PCI0_CS3BT_BAR: + case GT_PCI0_SSCS10_BAR: + case GT_PCI0_SSCS32_BAR: + case GT_PCI0_SCS3BT_BAR: + case GT_PCI1_CMD: + case GT_PCI1_TOR: + case GT_PCI1_BS_SCS10: + case GT_PCI1_BS_SCS32: + case GT_PCI1_BS_CS20: + case GT_PCI1_BS_CS3BT: + case GT_PCI1_BARE: + case GT_PCI1_PREFMBR: + case GT_PCI1_SCS10_BAR: + case GT_PCI1_SCS32_BAR: + case GT_PCI1_CS20_BAR: + case GT_PCI1_CS3BT_BAR: + case GT_PCI1_SSCS10_BAR: + case GT_PCI1_SSCS32_BAR: + case GT_PCI1_SCS3BT_BAR: + case GT_PCI1_CFGADDR: + case GT_PCI1_CFGDATA: + val = s->regs[saddr]; + break; + + /* Interrupts */ + case GT_INTRCAUSE: + val = s->regs[saddr]; + trace_gt64120_read_intreg("INTRCAUSE", size, val); + break; + case GT_INTRMASK: + val = s->regs[saddr]; + trace_gt64120_read_intreg("INTRMASK", size, val); + break; + case GT_PCI0_ICMASK: + val = s->regs[saddr]; + trace_gt64120_read_intreg("ICMASK", size, val); + break; + case GT_PCI0_SERR0MASK: + val = s->regs[saddr]; + trace_gt64120_read_intreg("SERR0MASK", size, val); + break; + + /* Reserved when only PCI_0 is configured. */ + case GT_HINTRCAUSE: + case GT_CPU_INTSEL: + case GT_PCI0_INTSEL: + case GT_HINTRMASK: + case GT_PCI0_HICMASK: + case GT_PCI1_SERR1MASK: + val = s->regs[saddr]; + break; + + default: + val = s->regs[saddr]; + qemu_log_mask(LOG_GUEST_ERROR, + "gt64120: Illegal register read " + "reg:0x%03x size:%u value:0x%0*x\n", + saddr << 2, size, size << 1, val); + break; + } + + if (!(s->regs[GT_CPU] & 0x00001000)) { + val = bswap32(val); + } + trace_gt64120_read(addr, val); + + return val; +} + +static const MemoryRegionOps isd_mem_ops = { + .read = gt64120_readl, + .write = gt64120_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static int gt64120_pci_map_irq(PCIDevice *pci_dev, int irq_num) +{ + int slot; + + slot = PCI_SLOT(pci_dev->devfn); + + switch (slot) { + /* PIIX4 USB */ + case 10: + return 3; + /* AMD 79C973 Ethernet */ + case 11: + return 1; + /* Crystal 4281 Sound */ + case 12: + return 2; + /* PCI slot 1 to 4 */ + case 18 ... 21: + return ((slot - 18) + irq_num) & 0x03; + /* Unknown device, don't do any translation */ + default: + return irq_num; + } +} + +static int pci_irq_levels[4]; + +static void gt64120_pci_set_irq(void *opaque, int irq_num, int level) +{ + int i, pic_irq, pic_level; + qemu_irq *pic = opaque; + + pci_irq_levels[irq_num] = level; + + /* now we change the pic irq level according to the piix irq mappings */ + /* XXX: optimize */ + pic_irq = piix4_dev->config[PIIX_PIRQCA + irq_num]; + if (pic_irq < 16) { + /* The pic level is the logical OR of all the PCI irqs mapped to it. */ + pic_level = 0; + for (i = 0; i < 4; i++) { + if (pic_irq == piix4_dev->config[PIIX_PIRQCA + i]) { + pic_level |= pci_irq_levels[i]; + } + } + qemu_set_irq(pic[pic_irq], pic_level); + } +} + + +static void gt64120_reset(DeviceState *dev) +{ + GT64120State *s = GT64120_PCI_HOST_BRIDGE(dev); + + /* FIXME: Malta specific hw assumptions ahead */ + + /* CPU Configuration */ +#ifdef TARGET_WORDS_BIGENDIAN + s->regs[GT_CPU] = 0x00000000; +#else + s->regs[GT_CPU] = 0x00001000; +#endif + s->regs[GT_MULTI] = 0x00000003; + + /* CPU Address decode */ + s->regs[GT_SCS10LD] = 0x00000000; + s->regs[GT_SCS10HD] = 0x00000007; + s->regs[GT_SCS32LD] = 0x00000008; + s->regs[GT_SCS32HD] = 0x0000000f; + s->regs[GT_CS20LD] = 0x000000e0; + s->regs[GT_CS20HD] = 0x00000070; + s->regs[GT_CS3BOOTLD] = 0x000000f8; + s->regs[GT_CS3BOOTHD] = 0x0000007f; + + s->regs[GT_PCI0IOLD] = 0x00000080; + s->regs[GT_PCI0IOHD] = 0x0000000f; + s->regs[GT_PCI0M0LD] = 0x00000090; + s->regs[GT_PCI0M0HD] = 0x0000001f; + s->regs[GT_ISD] = 0x000000a0; + s->regs[GT_PCI0M1LD] = 0x00000790; + s->regs[GT_PCI0M1HD] = 0x0000001f; + s->regs[GT_PCI1IOLD] = 0x00000100; + s->regs[GT_PCI1IOHD] = 0x0000000f; + s->regs[GT_PCI1M0LD] = 0x00000110; + s->regs[GT_PCI1M0HD] = 0x0000001f; + s->regs[GT_PCI1M1LD] = 0x00000120; + s->regs[GT_PCI1M1HD] = 0x0000002f; + + s->regs[GT_SCS10AR] = 0x00000000; + s->regs[GT_SCS32AR] = 0x00000008; + s->regs[GT_CS20R] = 0x000000e0; + s->regs[GT_CS3BOOTR] = 0x000000f8; + + s->regs[GT_PCI0IOREMAP] = 0x00000080; + s->regs[GT_PCI0M0REMAP] = 0x00000090; + s->regs[GT_PCI0M1REMAP] = 0x00000790; + s->regs[GT_PCI1IOREMAP] = 0x00000100; + s->regs[GT_PCI1M0REMAP] = 0x00000110; + s->regs[GT_PCI1M1REMAP] = 0x00000120; + + /* CPU Error Report */ + s->regs[GT_CPUERR_ADDRLO] = 0x00000000; + s->regs[GT_CPUERR_ADDRHI] = 0x00000000; + s->regs[GT_CPUERR_DATALO] = 0xffffffff; + s->regs[GT_CPUERR_DATAHI] = 0xffffffff; + s->regs[GT_CPUERR_PARITY] = 0x000000ff; + + /* CPU Sync Barrier */ + s->regs[GT_PCI0SYNC] = 0x00000000; + s->regs[GT_PCI1SYNC] = 0x00000000; + + /* SDRAM and Device Address Decode */ + s->regs[GT_SCS0LD] = 0x00000000; + s->regs[GT_SCS0HD] = 0x00000007; + s->regs[GT_SCS1LD] = 0x00000008; + s->regs[GT_SCS1HD] = 0x0000000f; + s->regs[GT_SCS2LD] = 0x00000010; + s->regs[GT_SCS2HD] = 0x00000017; + s->regs[GT_SCS3LD] = 0x00000018; + s->regs[GT_SCS3HD] = 0x0000001f; + s->regs[GT_CS0LD] = 0x000000c0; + s->regs[GT_CS0HD] = 0x000000c7; + s->regs[GT_CS1LD] = 0x000000c8; + s->regs[GT_CS1HD] = 0x000000cf; + s->regs[GT_CS2LD] = 0x000000d0; + s->regs[GT_CS2HD] = 0x000000df; + s->regs[GT_CS3LD] = 0x000000f0; + s->regs[GT_CS3HD] = 0x000000fb; + s->regs[GT_BOOTLD] = 0x000000fc; + s->regs[GT_BOOTHD] = 0x000000ff; + s->regs[GT_ADERR] = 0xffffffff; + + /* SDRAM Configuration */ + s->regs[GT_SDRAM_CFG] = 0x00000200; + s->regs[GT_SDRAM_OPMODE] = 0x00000000; + s->regs[GT_SDRAM_BM] = 0x00000007; + s->regs[GT_SDRAM_ADDRDECODE] = 0x00000002; + + /* SDRAM Parameters */ + s->regs[GT_SDRAM_B0] = 0x00000005; + s->regs[GT_SDRAM_B1] = 0x00000005; + s->regs[GT_SDRAM_B2] = 0x00000005; + s->regs[GT_SDRAM_B3] = 0x00000005; + + /* ECC */ + s->regs[GT_ECC_ERRDATALO] = 0x00000000; + s->regs[GT_ECC_ERRDATAHI] = 0x00000000; + s->regs[GT_ECC_MEM] = 0x00000000; + s->regs[GT_ECC_CALC] = 0x00000000; + s->regs[GT_ECC_ERRADDR] = 0x00000000; + + /* Device Parameters */ + s->regs[GT_DEV_B0] = 0x386fffff; + s->regs[GT_DEV_B1] = 0x386fffff; + s->regs[GT_DEV_B2] = 0x386fffff; + s->regs[GT_DEV_B3] = 0x386fffff; + s->regs[GT_DEV_BOOT] = 0x146fffff; + + /* DMA registers are all zeroed at reset */ + + /* Timer/Counter */ + s->regs[GT_TC0] = 0xffffffff; + s->regs[GT_TC1] = 0x00ffffff; + s->regs[GT_TC2] = 0x00ffffff; + s->regs[GT_TC3] = 0x00ffffff; + s->regs[GT_TC_CONTROL] = 0x00000000; + + /* PCI Internal */ +#ifdef TARGET_WORDS_BIGENDIAN + s->regs[GT_PCI0_CMD] = 0x00000000; +#else + s->regs[GT_PCI0_CMD] = 0x00010001; +#endif + s->regs[GT_PCI0_TOR] = 0x0000070f; + s->regs[GT_PCI0_BS_SCS10] = 0x00fff000; + s->regs[GT_PCI0_BS_SCS32] = 0x00fff000; + s->regs[GT_PCI0_BS_CS20] = 0x01fff000; + s->regs[GT_PCI0_BS_CS3BT] = 0x00fff000; + s->regs[GT_PCI1_IACK] = 0x00000000; + s->regs[GT_PCI0_IACK] = 0x00000000; + s->regs[GT_PCI0_BARE] = 0x0000000f; + s->regs[GT_PCI0_PREFMBR] = 0x00000040; + s->regs[GT_PCI0_SCS10_BAR] = 0x00000000; + s->regs[GT_PCI0_SCS32_BAR] = 0x01000000; + s->regs[GT_PCI0_CS20_BAR] = 0x1c000000; + s->regs[GT_PCI0_CS3BT_BAR] = 0x1f000000; + s->regs[GT_PCI0_SSCS10_BAR] = 0x00000000; + s->regs[GT_PCI0_SSCS32_BAR] = 0x01000000; + s->regs[GT_PCI0_SCS3BT_BAR] = 0x1f000000; +#ifdef TARGET_WORDS_BIGENDIAN + s->regs[GT_PCI1_CMD] = 0x00000000; +#else + s->regs[GT_PCI1_CMD] = 0x00010001; +#endif + s->regs[GT_PCI1_TOR] = 0x0000070f; + s->regs[GT_PCI1_BS_SCS10] = 0x00fff000; + s->regs[GT_PCI1_BS_SCS32] = 0x00fff000; + s->regs[GT_PCI1_BS_CS20] = 0x01fff000; + s->regs[GT_PCI1_BS_CS3BT] = 0x00fff000; + s->regs[GT_PCI1_BARE] = 0x0000000f; + s->regs[GT_PCI1_PREFMBR] = 0x00000040; + s->regs[GT_PCI1_SCS10_BAR] = 0x00000000; + s->regs[GT_PCI1_SCS32_BAR] = 0x01000000; + s->regs[GT_PCI1_CS20_BAR] = 0x1c000000; + s->regs[GT_PCI1_CS3BT_BAR] = 0x1f000000; + s->regs[GT_PCI1_SSCS10_BAR] = 0x00000000; + s->regs[GT_PCI1_SSCS32_BAR] = 0x01000000; + s->regs[GT_PCI1_SCS3BT_BAR] = 0x1f000000; + s->regs[GT_PCI1_CFGADDR] = 0x00000000; + s->regs[GT_PCI1_CFGDATA] = 0x00000000; + s->regs[GT_PCI0_CFGADDR] = 0x00000000; + + /* Interrupt registers are all zeroed at reset */ + + gt64120_isd_mapping(s); + gt64120_pci_mapping(s); +} + +static void gt64120_realize(DeviceState *dev, Error **errp) +{ + GT64120State *s = GT64120_PCI_HOST_BRIDGE(dev); + + memory_region_init_io(&s->ISD_mem, OBJECT(dev), &isd_mem_ops, s, + "gt64120-isd", 0x1000); +} + +PCIBus *gt64120_register(qemu_irq *pic) +{ + GT64120State *d; + PCIHostState *phb; + DeviceState *dev; + + dev = qdev_new(TYPE_GT64120_PCI_HOST_BRIDGE); + d = GT64120_PCI_HOST_BRIDGE(dev); + phb = PCI_HOST_BRIDGE(dev); + memory_region_init(&d->pci0_mem, OBJECT(dev), "pci0-mem", 4 * GiB); + address_space_init(&d->pci0_mem_as, &d->pci0_mem, "pci0-mem"); + phb->bus = pci_register_root_bus(dev, "pci", + gt64120_pci_set_irq, gt64120_pci_map_irq, + pic, + &d->pci0_mem, + get_system_io(), + PCI_DEVFN(18, 0), 4, TYPE_PCI_BUS); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + pci_create_simple(phb->bus, PCI_DEVFN(0, 0), "gt64120_pci"); + return phb->bus; +} + +static void gt64120_pci_realize(PCIDevice *d, Error **errp) +{ + /* FIXME: Malta specific hw assumptions ahead */ + pci_set_word(d->config + PCI_COMMAND, 0); + pci_set_word(d->config + PCI_STATUS, + PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM); + pci_config_set_prog_interface(d->config, 0); + pci_set_long(d->config + PCI_BASE_ADDRESS_0, 0x00000008); + pci_set_long(d->config + PCI_BASE_ADDRESS_1, 0x01000008); + pci_set_long(d->config + PCI_BASE_ADDRESS_2, 0x1c000000); + pci_set_long(d->config + PCI_BASE_ADDRESS_3, 0x1f000000); + pci_set_long(d->config + PCI_BASE_ADDRESS_4, 0x14000000); + pci_set_long(d->config + PCI_BASE_ADDRESS_5, 0x14000001); + pci_set_byte(d->config + 0x3d, 0x01); +} + +static void gt64120_pci_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = gt64120_pci_realize; + k->vendor_id = PCI_VENDOR_ID_MARVELL; + k->device_id = PCI_DEVICE_ID_MARVELL_GT6412X; + k->revision = 0x10; + k->class_id = PCI_CLASS_BRIDGE_HOST; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo gt64120_pci_info = { + .name = "gt64120_pci", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = gt64120_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void gt64120_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->realize = gt64120_realize; + dc->reset = gt64120_reset; + dc->vmsd = &vmstate_gt64120; +} + +static const TypeInfo gt64120_info = { + .name = TYPE_GT64120_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(GT64120State), + .class_init = gt64120_class_init, +}; + +static void gt64120_pci_register_types(void) +{ + type_register_static(>64120_info); + type_register_static(>64120_pci_info); +} + +type_init(gt64120_pci_register_types) diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c new file mode 100644 index 000000000..f5a26e174 --- /dev/null +++ b/hw/mips/jazz.c @@ -0,0 +1,441 @@ +/* + * QEMU MIPS Jazz support + * + * Copyright (c) 2007-2008 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "hw/clock.h" +#include "hw/mips/mips.h" +#include "hw/mips/cpudevs.h" +#include "hw/intc/i8259.h" +#include "hw/dma/i8257.h" +#include "hw/char/serial.h" +#include "hw/char/parallel.h" +#include "hw/isa/isa.h" +#include "hw/block/fdc.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "net/net.h" +#include "hw/scsi/esp.h" +#include "hw/mips/bios.h" +#include "hw/loader.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/timer/i8254.h" +#include "hw/display/vga.h" +#include "hw/audio/pcspk.h" +#include "hw/input/i8042.h" +#include "hw/sysbus.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/help_option.h" +#ifdef CONFIG_TCG +#include "hw/core/tcg-cpu-ops.h" +#endif /* CONFIG_TCG */ + +enum jazz_model_e { + JAZZ_MAGNUM, + JAZZ_PICA61, +}; + +static void main_cpu_reset(void *opaque) +{ + MIPSCPU *cpu = opaque; + + cpu_reset(CPU(cpu)); +} + +static uint64_t rtc_read(void *opaque, hwaddr addr, unsigned size) +{ + uint8_t val; + address_space_read(&address_space_memory, 0x90000071, + MEMTXATTRS_UNSPECIFIED, &val, 1); + return val; +} + +static void rtc_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + uint8_t buf = val & 0xff; + address_space_write(&address_space_memory, 0x90000071, + MEMTXATTRS_UNSPECIFIED, &buf, 1); +} + +static const MemoryRegionOps rtc_ops = { + .read = rtc_read, + .write = rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static uint64_t dma_dummy_read(void *opaque, hwaddr addr, + unsigned size) +{ + /* + * Nothing to do. That is only to ensure that + * the current DMA acknowledge cycle is completed. + */ + return 0xff; +} + +static void dma_dummy_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + /* + * Nothing to do. That is only to ensure that + * the current DMA acknowledge cycle is completed. + */ +} + +static const MemoryRegionOps dma_dummy_ops = { + .read = dma_dummy_read, + .write = dma_dummy_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +#define MAGNUM_BIOS_SIZE_MAX 0x7e000 +#define MAGNUM_BIOS_SIZE \ + (BIOS_SIZE < MAGNUM_BIOS_SIZE_MAX ? BIOS_SIZE : MAGNUM_BIOS_SIZE_MAX) + +#define SONIC_PROM_SIZE 0x1000 + +static void mips_jazz_init(MachineState *machine, + enum jazz_model_e jazz_model) +{ + MemoryRegion *address_space = get_system_memory(); + char *filename; + int bios_size, n, big_endian; + Clock *cpuclk; + MIPSCPU *cpu; + MIPSCPUClass *mcc; + CPUMIPSState *env; + qemu_irq *i8259; + rc4030_dma *dmas; + IOMMUMemoryRegion *rc4030_dma_mr; + MemoryRegion *isa_mem = g_new(MemoryRegion, 1); + MemoryRegion *isa_io = g_new(MemoryRegion, 1); + MemoryRegion *rtc = g_new(MemoryRegion, 1); + MemoryRegion *i8042 = g_new(MemoryRegion, 1); + MemoryRegion *dma_dummy = g_new(MemoryRegion, 1); + MemoryRegion *dp8393x_prom = g_new(MemoryRegion, 1); + NICInfo *nd; + DeviceState *dev, *rc4030; + SysBusDevice *sysbus; + ISABus *isa_bus; + ISADevice *pit; + DriveInfo *fds[MAX_FD]; + MemoryRegion *bios = g_new(MemoryRegion, 1); + MemoryRegion *bios2 = g_new(MemoryRegion, 1); + SysBusESPState *sysbus_esp; + ESPState *esp; + static const struct { + unsigned freq_hz; + unsigned pll_mult; + } ext_clk[] = { + [JAZZ_MAGNUM] = {50000000, 2}, + [JAZZ_PICA61] = {33333333, 4}, + }; + +#ifdef TARGET_WORDS_BIGENDIAN + big_endian = 1; +#else + big_endian = 0; +#endif + + if (machine->ram_size > 256 * MiB) { + error_report("RAM size more than 256Mb is not supported"); + exit(EXIT_FAILURE); + } + + cpuclk = clock_new(OBJECT(machine), "cpu-refclk"); + clock_set_hz(cpuclk, ext_clk[jazz_model].freq_hz + * ext_clk[jazz_model].pll_mult); + + /* init CPUs */ + cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk); + env = &cpu->env; + qemu_register_reset(main_cpu_reset, cpu); + + /* + * Chipset returns 0 in invalid reads and do not raise data exceptions. + * However, we can't simply add a global memory region to catch + * everything, as this would make all accesses including instruction + * accesses be ignored and not raise exceptions. + * + * NOTE: this behaviour of raising exceptions for bad instruction + * fetches but not bad data accesses was added in commit 54e755588cf1e9 + * to restore behaviour broken by c658b94f6e8c206, but it is not clear + * whether the real hardware behaves this way. It is possible that + * real hardware ignores bad instruction fetches as well -- if so then + * we could replace this hijacking of CPU methods with a simple global + * memory region that catches all memory accesses, as we do on Malta. + */ + mcc = MIPS_CPU_GET_CLASS(cpu); + mcc->no_data_aborts = true; + + /* allocate RAM */ + memory_region_add_subregion(address_space, 0, machine->ram); + + memory_region_init_rom(bios, NULL, "mips_jazz.bios", MAGNUM_BIOS_SIZE, + &error_fatal); + memory_region_init_alias(bios2, NULL, "mips_jazz.bios", bios, + 0, MAGNUM_BIOS_SIZE); + memory_region_add_subregion(address_space, 0x1fc00000LL, bios); + memory_region_add_subregion(address_space, 0xfff00000LL, bios2); + + /* load the BIOS image. */ + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, machine->firmware ?: BIOS_FILENAME); + if (filename) { + bios_size = load_image_targphys(filename, 0xfff00000LL, + MAGNUM_BIOS_SIZE); + g_free(filename); + } else { + bios_size = -1; + } + if ((bios_size < 0 || bios_size > MAGNUM_BIOS_SIZE) + && machine->firmware && !qtest_enabled()) { + error_report("Could not load MIPS bios '%s'", machine->firmware); + exit(1); + } + + /* Init CPU internal devices */ + cpu_mips_irq_init_cpu(cpu); + cpu_mips_clock_init(cpu); + + /* Chipset */ + rc4030 = rc4030_init(&dmas, &rc4030_dma_mr); + sysbus = SYS_BUS_DEVICE(rc4030); + sysbus_connect_irq(sysbus, 0, env->irq[6]); + sysbus_connect_irq(sysbus, 1, env->irq[3]); + memory_region_add_subregion(address_space, 0x80000000, + sysbus_mmio_get_region(sysbus, 0)); + memory_region_add_subregion(address_space, 0xf0000000, + sysbus_mmio_get_region(sysbus, 1)); + memory_region_init_io(dma_dummy, NULL, &dma_dummy_ops, + NULL, "dummy_dma", 0x1000); + memory_region_add_subregion(address_space, 0x8000d000, dma_dummy); + + memory_region_init_rom(dp8393x_prom, NULL, "dp8393x-jazz.prom", + SONIC_PROM_SIZE, &error_fatal); + memory_region_add_subregion(address_space, 0x8000b000, dp8393x_prom); + + /* ISA bus: IO space at 0x90000000, mem space at 0x91000000 */ + memory_region_init(isa_io, NULL, "isa-io", 0x00010000); + memory_region_init(isa_mem, NULL, "isa-mem", 0x01000000); + memory_region_add_subregion(address_space, 0x90000000, isa_io); + memory_region_add_subregion(address_space, 0x91000000, isa_mem); + isa_bus = isa_bus_new(NULL, isa_mem, isa_io, &error_abort); + + /* ISA devices */ + i8259 = i8259_init(isa_bus, env->irq[4]); + isa_bus_irqs(isa_bus, i8259); + i8257_dma_init(isa_bus, 0); + pit = i8254_pit_init(isa_bus, 0x40, 0, NULL); + pcspk_init(isa_new(TYPE_PC_SPEAKER), isa_bus, pit); + + /* Video card */ + switch (jazz_model) { + case JAZZ_MAGNUM: + dev = qdev_new("sysbus-g364"); + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 0, 0x60080000); + sysbus_mmio_map(sysbus, 1, 0x40000000); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(rc4030, 3)); + { + /* Simple ROM, so user doesn't have to provide one */ + MemoryRegion *rom_mr = g_new(MemoryRegion, 1); + memory_region_init_rom(rom_mr, NULL, "g364fb.rom", 0x80000, + &error_fatal); + uint8_t *rom = memory_region_get_ram_ptr(rom_mr); + memory_region_add_subregion(address_space, 0x60000000, rom_mr); + rom[0] = 0x10; /* Mips G364 */ + } + break; + case JAZZ_PICA61: + isa_vga_mm_init(0x40000000, 0x60000000, 0, get_system_memory()); + break; + default: + break; + } + + /* Network controller */ + for (n = 0; n < nb_nics; n++) { + nd = &nd_table[n]; + if (!nd->model) { + nd->model = g_strdup("dp83932"); + } + if (strcmp(nd->model, "dp83932") == 0) { + int checksum, i; + uint8_t *prom; + + qemu_check_nic_model(nd, "dp83932"); + + dev = qdev_new("dp8393x"); + qdev_set_nic_properties(dev, nd); + qdev_prop_set_uint8(dev, "it_shift", 2); + qdev_prop_set_bit(dev, "big_endian", big_endian > 0); + object_property_set_link(OBJECT(dev), "dma_mr", + OBJECT(rc4030_dma_mr), &error_abort); + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 0, 0x80001000); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(rc4030, 4)); + + /* Add MAC address with valid checksum to PROM */ + prom = memory_region_get_ram_ptr(dp8393x_prom); + checksum = 0; + for (i = 0; i < 6; i++) { + prom[i] = nd->macaddr.a[i]; + checksum += prom[i]; + if (checksum > 0xff) { + checksum = (checksum + 1) & 0xff; + } + } + prom[7] = 0xff - checksum; + break; + } else if (is_help_option(nd->model)) { + error_report("Supported NICs: dp83932"); + exit(1); + } else { + error_report("Unsupported NIC: %s", nd->model); + exit(1); + } + } + + /* SCSI adapter */ + dev = qdev_new(TYPE_SYSBUS_ESP); + sysbus_esp = SYSBUS_ESP(dev); + esp = &sysbus_esp->esp; + esp->dma_memory_read = rc4030_dma_read; + esp->dma_memory_write = rc4030_dma_write; + esp->dma_opaque = dmas[0]; + sysbus_esp->it_shift = 0; + /* XXX for now until rc4030 has been changed to use DMA enable signal */ + esp->dma_enabled = 1; + + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(rc4030, 5)); + sysbus_mmio_map(sysbus, 0, 0x80002000); + + scsi_bus_legacy_handle_cmdline(&esp->bus); + + /* Floppy */ + for (n = 0; n < MAX_FD; n++) { + fds[n] = drive_get(IF_FLOPPY, 0, n); + } + /* FIXME: we should enable DMA with a custom IsaDma device */ + fdctrl_init_sysbus(qdev_get_gpio_in(rc4030, 1), -1, 0x80003000, fds); + + /* Real time clock */ + mc146818_rtc_init(isa_bus, 1980, NULL); + memory_region_init_io(rtc, NULL, &rtc_ops, NULL, "rtc", 0x1000); + memory_region_add_subregion(address_space, 0x80004000, rtc); + + /* Keyboard (i8042) */ + i8042_mm_init(qdev_get_gpio_in(rc4030, 6), qdev_get_gpio_in(rc4030, 7), + i8042, 0x1000, 0x1); + memory_region_add_subregion(address_space, 0x80005000, i8042); + + /* Serial ports */ + serial_mm_init(address_space, 0x80006000, 0, + qdev_get_gpio_in(rc4030, 8), 8000000 / 16, + serial_hd(0), DEVICE_NATIVE_ENDIAN); + serial_mm_init(address_space, 0x80007000, 0, + qdev_get_gpio_in(rc4030, 9), 8000000 / 16, + serial_hd(1), DEVICE_NATIVE_ENDIAN); + + /* Parallel port */ + if (parallel_hds[0]) + parallel_mm_init(address_space, 0x80008000, 0, + qdev_get_gpio_in(rc4030, 0), parallel_hds[0]); + + /* FIXME: missing Jazz sound at 0x8000c000, rc4030[2] */ + + /* NVRAM */ + dev = qdev_new("ds1225y"); + sysbus = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sysbus, &error_fatal); + sysbus_mmio_map(sysbus, 0, 0x80009000); + + /* LED indicator */ + sysbus_create_simple("jazz-led", 0x8000f000, NULL); + + g_free(dmas); +} + +static +void mips_magnum_init(MachineState *machine) +{ + mips_jazz_init(machine, JAZZ_MAGNUM); +} + +static +void mips_pica61_init(MachineState *machine) +{ + mips_jazz_init(machine, JAZZ_PICA61); +} + +static void mips_magnum_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "MIPS Magnum"; + mc->init = mips_magnum_init; + mc->block_default_type = IF_SCSI; + mc->default_cpu_type = MIPS_CPU_TYPE_NAME("R4000"); + mc->default_ram_id = "mips_jazz.ram"; +} + +static const TypeInfo mips_magnum_type = { + .name = MACHINE_TYPE_NAME("magnum"), + .parent = TYPE_MACHINE, + .class_init = mips_magnum_class_init, +}; + +static void mips_pica61_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Acer Pica 61"; + mc->init = mips_pica61_init; + mc->block_default_type = IF_SCSI; + mc->default_cpu_type = MIPS_CPU_TYPE_NAME("R4000"); + mc->default_ram_id = "mips_jazz.ram"; +} + +static const TypeInfo mips_pica61_type = { + .name = MACHINE_TYPE_NAME("pica61"), + .parent = TYPE_MACHINE, + .class_init = mips_pica61_class_init, +}; + +static void mips_jazz_machine_init(void) +{ + type_register_static(&mips_magnum_type); + type_register_static(&mips_pica61_type); +} + +type_init(mips_jazz_machine_init) diff --git a/hw/mips/loongson3_bootp.c b/hw/mips/loongson3_bootp.c new file mode 100644 index 000000000..f99af2293 --- /dev/null +++ b/hw/mips/loongson3_bootp.c @@ -0,0 +1,151 @@ +/* + * LEFI (a UEFI-like interface for BIOS-Kernel boot parameters) helpers + * + * Copyright (c) 2018-2020 Huacai Chen (chenhc@lemote.com) + * Copyright (c) 2018-2020 Jiaxun Yang + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "cpu.h" +#include "hw/boards.h" +#include "hw/mips/loongson3_bootp.h" + +#define LOONGSON3_CORE_PER_NODE 4 + +static void init_cpu_info(void *g_cpuinfo, uint64_t cpu_freq) +{ + struct efi_cpuinfo_loongson *c = g_cpuinfo; + + c->cputype = cpu_to_le32(Loongson_3A); + c->processor_id = cpu_to_le32(MIPS_CPU(first_cpu)->env.CP0_PRid); + if (cpu_freq > UINT_MAX) { + c->cpu_clock_freq = cpu_to_le32(UINT_MAX); + } else { + c->cpu_clock_freq = cpu_to_le32(cpu_freq); + } + + c->cpu_startup_core_id = cpu_to_le16(0); + c->nr_cpus = cpu_to_le32(current_machine->smp.cpus); + c->total_node = cpu_to_le32(DIV_ROUND_UP(current_machine->smp.cpus, + LOONGSON3_CORE_PER_NODE)); +} + +static void init_memory_map(void *g_map, uint64_t ram_size) +{ + struct efi_memory_map_loongson *emap = g_map; + + emap->nr_map = cpu_to_le32(2); + emap->mem_freq = cpu_to_le32(300000000); + + emap->map[0].node_id = cpu_to_le32(0); + emap->map[0].mem_type = cpu_to_le32(1); + emap->map[0].mem_start = cpu_to_le64(0x0); + emap->map[0].mem_size = cpu_to_le32(240); + + emap->map[1].node_id = cpu_to_le32(0); + emap->map[1].mem_type = cpu_to_le32(2); + emap->map[1].mem_start = cpu_to_le64(0x90000000); + emap->map[1].mem_size = cpu_to_le32((ram_size / MiB) - 256); +} + +static void init_system_loongson(void *g_system) +{ + struct system_loongson *s = g_system; + + s->ccnuma_smp = cpu_to_le32(0); + s->sing_double_channel = cpu_to_le32(1); + s->nr_uarts = cpu_to_le32(1); + s->uarts[0].iotype = cpu_to_le32(2); + s->uarts[0].int_offset = cpu_to_le32(2); + s->uarts[0].uartclk = cpu_to_le32(25000000); /* Random value */ + s->uarts[0].uart_base = cpu_to_le64(virt_memmap[VIRT_UART].base); +} + +static void init_irq_source(void *g_irq_source) +{ + struct irq_source_routing_table *irq_info = g_irq_source; + + irq_info->node_id = cpu_to_le32(0); + irq_info->PIC_type = cpu_to_le32(0); + irq_info->dma_mask_bits = cpu_to_le16(64); + irq_info->pci_mem_start_addr = cpu_to_le64(virt_memmap[VIRT_PCIE_MMIO].base); + irq_info->pci_mem_end_addr = cpu_to_le64(virt_memmap[VIRT_PCIE_MMIO].base + + virt_memmap[VIRT_PCIE_MMIO].size - 1); + irq_info->pci_io_start_addr = cpu_to_le64(virt_memmap[VIRT_PCIE_PIO].base); +} + +static void init_interface_info(void *g_interface) +{ + struct interface_info *interface = g_interface; + + interface->vers = cpu_to_le16(0x01); + strpadcpy(interface->description, 64, "UEFI_Version_v1.0", '\0'); +} + +static void board_devices_info(void *g_board) +{ + struct board_devices *bd = g_board; + + strpadcpy(bd->name, 64, "Loongson-3A-VIRT-1w-V1.00-demo", '\0'); +} + +static void init_special_info(void *g_special) +{ + struct loongson_special_attribute *special = g_special; + + strpadcpy(special->special_name, 64, "2018-05-01", '\0'); +} + +void init_loongson_params(struct loongson_params *lp, void *p, + uint64_t cpu_freq, uint64_t ram_size) +{ + init_cpu_info(p, cpu_freq); + lp->cpu_offset = cpu_to_le64((uintptr_t)p - (uintptr_t)lp); + p += ROUND_UP(sizeof(struct efi_cpuinfo_loongson), 64); + + init_memory_map(p, ram_size); + lp->memory_offset = cpu_to_le64((uintptr_t)p - (uintptr_t)lp); + p += ROUND_UP(sizeof(struct efi_memory_map_loongson), 64); + + init_system_loongson(p); + lp->system_offset = cpu_to_le64((uintptr_t)p - (uintptr_t)lp); + p += ROUND_UP(sizeof(struct system_loongson), 64); + + init_irq_source(p); + lp->irq_offset = cpu_to_le64((uintptr_t)p - (uintptr_t)lp); + p += ROUND_UP(sizeof(struct irq_source_routing_table), 64); + + init_interface_info(p); + lp->interface_offset = cpu_to_le64((uintptr_t)p - (uintptr_t)lp); + p += ROUND_UP(sizeof(struct interface_info), 64); + + board_devices_info(p); + lp->boarddev_table_offset = cpu_to_le64((uintptr_t)p - (uintptr_t)lp); + p += ROUND_UP(sizeof(struct board_devices), 64); + + init_special_info(p); + lp->special_offset = cpu_to_le64((uintptr_t)p - (uintptr_t)lp); + p += ROUND_UP(sizeof(struct loongson_special_attribute), 64); +} + +void init_reset_system(struct efi_reset_system_t *reset) +{ + reset->Shutdown = cpu_to_le64(0xffffffffbfc000a8); + reset->ResetCold = cpu_to_le64(0xffffffffbfc00080); + reset->ResetWarm = cpu_to_le64(0xffffffffbfc00080); +} diff --git a/hw/mips/loongson3_bootp.h b/hw/mips/loongson3_bootp.h new file mode 100644 index 000000000..d525ab745 --- /dev/null +++ b/hw/mips/loongson3_bootp.h @@ -0,0 +1,236 @@ +/* + * LEFI (a UEFI-like interface for BIOS-Kernel boot parameters) data structures + * defined at arch/mips/include/asm/mach-loongson64/boot_param.h in Linux kernel + * + * Copyright (c) 2017-2020 Huacai Chen (chenhc@lemote.com) + * Copyright (c) 2017-2020 Jiaxun Yang + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef HW_MIPS_LOONGSON3_BOOTP_H +#define HW_MIPS_LOONGSON3_BOOTP_H + +struct efi_memory_map_loongson { + uint16_t vers; /* version of efi_memory_map */ + uint32_t nr_map; /* number of memory_maps */ + uint32_t mem_freq; /* memory frequence */ + struct mem_map { + uint32_t node_id; /* node_id which memory attached to */ + uint32_t mem_type; /* system memory, pci memory, pci io, etc. */ + uint64_t mem_start; /* memory map start address */ + uint32_t mem_size; /* each memory_map size, not the total size */ + } map[128]; +} QEMU_PACKED; + +enum loongson_cpu_type { + Legacy_2E = 0x0, + Legacy_2F = 0x1, + Legacy_3A = 0x2, + Legacy_3B = 0x3, + Legacy_1A = 0x4, + Legacy_1B = 0x5, + Legacy_2G = 0x6, + Legacy_2H = 0x7, + Loongson_1A = 0x100, + Loongson_1B = 0x101, + Loongson_2E = 0x200, + Loongson_2F = 0x201, + Loongson_2G = 0x202, + Loongson_2H = 0x203, + Loongson_3A = 0x300, + Loongson_3B = 0x301 +}; + +/* + * Capability and feature descriptor structure for MIPS CPU + */ +struct efi_cpuinfo_loongson { + uint16_t vers; /* version of efi_cpuinfo_loongson */ + uint32_t processor_id; /* PRID, e.g. 6305, 6306 */ + uint32_t cputype; /* Loongson_3A/3B, etc. */ + uint32_t total_node; /* num of total numa nodes */ + uint16_t cpu_startup_core_id; /* Boot core id */ + uint16_t reserved_cores_mask; + uint32_t cpu_clock_freq; /* cpu_clock */ + uint32_t nr_cpus; + char cpuname[64]; +} QEMU_PACKED; + +#define MAX_UARTS 64 +struct uart_device { + uint32_t iotype; + uint32_t uartclk; + uint32_t int_offset; + uint64_t uart_base; +} QEMU_PACKED; + +#define MAX_SENSORS 64 +#define SENSOR_TEMPER 0x00000001 +#define SENSOR_VOLTAGE 0x00000002 +#define SENSOR_FAN 0x00000004 +struct sensor_device { + char name[32]; /* a formal name */ + char label[64]; /* a flexible description */ + uint32_t type; /* SENSOR_* */ + uint32_t id; /* instance id of a sensor-class */ + uint32_t fan_policy; /* step speed or constant speed */ + uint32_t fan_percent;/* only for constant speed policy */ + uint64_t base_addr; /* base address of device registers */ +} QEMU_PACKED; + +struct system_loongson { + uint16_t vers; /* version of system_loongson */ + uint32_t ccnuma_smp; /* 0: no numa; 1: has numa */ + uint32_t sing_double_channel;/* 1: single; 2: double */ + uint32_t nr_uarts; + struct uart_device uarts[MAX_UARTS]; + uint32_t nr_sensors; + struct sensor_device sensors[MAX_SENSORS]; + char has_ec; + char ec_name[32]; + uint64_t ec_base_addr; + char has_tcm; + char tcm_name[32]; + uint64_t tcm_base_addr; + uint64_t workarounds; + uint64_t of_dtb_addr; /* NULL if not support */ +} QEMU_PACKED; + +struct irq_source_routing_table { + uint16_t vers; + uint16_t size; + uint16_t rtr_bus; + uint16_t rtr_devfn; + uint32_t vendor; + uint32_t device; + uint32_t PIC_type; /* conform use HT or PCI to route to CPU-PIC */ + uint64_t ht_int_bit; /* 3A: 1<<24; 3B: 1<<16 */ + uint64_t ht_enable; /* irqs used in this PIC */ + uint32_t node_id; /* node id: 0x0-0; 0x1-1; 0x10-2; 0x11-3 */ + uint64_t pci_mem_start_addr; + uint64_t pci_mem_end_addr; + uint64_t pci_io_start_addr; + uint64_t pci_io_end_addr; + uint64_t pci_config_addr; + uint16_t dma_mask_bits; + uint16_t dma_noncoherent; +} QEMU_PACKED; + +struct interface_info { + uint16_t vers; /* version of the specificition */ + uint16_t size; + uint8_t flag; + char description[64]; +} QEMU_PACKED; + +#define MAX_RESOURCE_NUMBER 128 +struct resource_loongson { + uint64_t start; /* resource start address */ + uint64_t end; /* resource end address */ + char name[64]; + uint32_t flags; +}; + +struct archdev_data {}; /* arch specific additions */ + +struct board_devices { + char name[64]; /* hold the device name */ + uint32_t num_resources; /* number of device_resource */ + /* for each device's resource */ + struct resource_loongson resource[MAX_RESOURCE_NUMBER]; + /* arch specific additions */ + struct archdev_data archdata; +}; + +struct loongson_special_attribute { + uint16_t vers; /* version of this special */ + char special_name[64]; /* special_atribute_name */ + uint32_t loongson_special_type; /* type of special device */ + /* for each device's resource */ + struct resource_loongson resource[MAX_RESOURCE_NUMBER]; +}; + +struct loongson_params { + uint64_t memory_offset; /* efi_memory_map_loongson struct offset */ + uint64_t cpu_offset; /* efi_cpuinfo_loongson struct offset */ + uint64_t system_offset; /* system_loongson struct offset */ + uint64_t irq_offset; /* irq_source_routing_table struct offset */ + uint64_t interface_offset; /* interface_info struct offset */ + uint64_t special_offset; /* loongson_special_attribute struct offset */ + uint64_t boarddev_table_offset; /* board_devices offset */ +}; + +struct smbios_tables { + uint16_t vers; /* version of smbios */ + uint64_t vga_bios; /* vga_bios address */ + struct loongson_params lp; +}; + +struct efi_reset_system_t { + uint64_t ResetCold; + uint64_t ResetWarm; + uint64_t ResetType; + uint64_t Shutdown; + uint64_t DoSuspend; /* NULL if not support */ +}; + +struct efi_loongson { + uint64_t mps; /* MPS table */ + uint64_t acpi; /* ACPI table (IA64 ext 0.71) */ + uint64_t acpi20; /* ACPI table (ACPI 2.0) */ + struct smbios_tables smbios; /* SM BIOS table */ + uint64_t sal_systab; /* SAL system table */ + uint64_t boot_info; /* boot info table */ +}; + +struct boot_params { + struct efi_loongson efi; + struct efi_reset_system_t reset_system; +}; + +/* Overall MMIO & Memory layout */ +enum { + VIRT_LOWMEM, + VIRT_PM, + VIRT_FW_CFG, + VIRT_RTC, + VIRT_PCIE_PIO, + VIRT_PCIE_ECAM, + VIRT_BIOS_ROM, + VIRT_UART, + VIRT_LIOINTC, + VIRT_PCIE_MMIO, + VIRT_HIGHMEM +}; + +/* Low MEM layout for QEMU kernel loader */ +enum { + LOADER_KERNEL, + LOADER_INITRD, + LOADER_CMDLINE +}; + +/* BIOS ROM layout for QEMU kernel loader */ +enum { + LOADER_BOOTROM, + LOADER_PARAM, +}; + +extern const MemMapEntry virt_memmap[]; +void init_loongson_params(struct loongson_params *lp, void *p, + uint64_t cpu_freq, uint64_t ram_size); +void init_reset_system(struct efi_reset_system_t *reset); + +#endif diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c new file mode 100644 index 000000000..ae192db0c --- /dev/null +++ b/hw/mips/loongson3_virt.c @@ -0,0 +1,634 @@ +/* + * Generic Loongson-3 Platform support + * + * Copyright (c) 2018-2020 Huacai Chen (chenhc@lemote.com) + * Copyright (c) 2018-2020 Jiaxun Yang + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* + * Generic virtualized PC Platform based on Loongson-3 CPU (MIPS64R2 with + * extensions, 800~2000MHz) + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "elf.h" +#include "kvm_mips.h" +#include "hw/char/serial.h" +#include "hw/intc/loongson_liointc.h" +#include "hw/mips/mips.h" +#include "hw/mips/cpudevs.h" +#include "hw/mips/fw_cfg.h" +#include "hw/mips/loongson3_bootp.h" +#include "hw/misc/unimp.h" +#include "hw/intc/i8259.h" +#include "hw/loader.h" +#include "hw/isa/superio.h" +#include "hw/pci/msi.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/pci-host/gpex.h" +#include "hw/usb.h" +#include "net/net.h" +#include "sysemu/kvm.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "qemu/error-report.h" + +#define PM_CNTL_MODE 0x10 + +#define LOONGSON_MAX_VCPUS 16 + +/* + * Loongson-3's virtual machine BIOS can be obtained here: + * 1, https://github.com/loongson-community/firmware-nonfree + * 2, http://dev.lemote.com:8000/files/firmware/UEFI/KVM/bios_loongson3.bin + */ +#define LOONGSON3_BIOSNAME "bios_loongson3.bin" + +#define UART_IRQ 0 +#define RTC_IRQ 1 +#define PCIE_IRQ_BASE 2 + +const MemMapEntry virt_memmap[] = { + [VIRT_LOWMEM] = { 0x00000000, 0x10000000 }, + [VIRT_PM] = { 0x10080000, 0x100 }, + [VIRT_FW_CFG] = { 0x10080100, 0x100 }, + [VIRT_RTC] = { 0x10081000, 0x1000 }, + [VIRT_PCIE_PIO] = { 0x18000000, 0x80000 }, + [VIRT_PCIE_ECAM] = { 0x1a000000, 0x2000000 }, + [VIRT_BIOS_ROM] = { 0x1fc00000, 0x200000 }, + [VIRT_UART] = { 0x1fe001e0, 0x8 }, + [VIRT_LIOINTC] = { 0x3ff01400, 0x64 }, + [VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 }, + [VIRT_HIGHMEM] = { 0x80000000, 0x0 }, /* Variable */ +}; + +static const MemMapEntry loader_memmap[] = { + [LOADER_KERNEL] = { 0x00000000, 0x4000000 }, + [LOADER_INITRD] = { 0x04000000, 0x0 }, /* Variable */ + [LOADER_CMDLINE] = { 0x0ff00000, 0x100000 }, +}; + +static const MemMapEntry loader_rommap[] = { + [LOADER_BOOTROM] = { 0x1fc00000, 0x1000 }, + [LOADER_PARAM] = { 0x1fc01000, 0x10000 }, +}; + +struct LoongsonMachineState { + MachineState parent_obj; + MemoryRegion *pio_alias; + MemoryRegion *mmio_alias; + MemoryRegion *ecam_alias; +}; +typedef struct LoongsonMachineState LoongsonMachineState; + +#define TYPE_LOONGSON_MACHINE MACHINE_TYPE_NAME("loongson3-virt") +DECLARE_INSTANCE_CHECKER(LoongsonMachineState, LOONGSON_MACHINE, TYPE_LOONGSON_MACHINE) + +static struct _loaderparams { + uint64_t cpu_freq; + uint64_t ram_size; + const char *kernel_cmdline; + const char *kernel_filename; + const char *initrd_filename; + uint64_t kernel_entry; + uint64_t a0, a1, a2; +} loaderparams; + +static uint64_t loongson3_pm_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void loongson3_pm_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + if (addr != PM_CNTL_MODE) { + return; + } + + switch (val) { + case 0x00: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + return; + case 0xff: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + return; + default: + return; + } +} + +static const MemoryRegionOps loongson3_pm_ops = { + .read = loongson3_pm_read, + .write = loongson3_pm_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1 + } +}; + +#define DEF_LOONGSON3_FREQ (800 * 1000 * 1000) + +static uint64_t get_cpu_freq_hz(void) +{ +#ifdef CONFIG_KVM + int ret; + uint64_t freq; + struct kvm_one_reg freq_reg = { + .id = KVM_REG_MIPS_COUNT_HZ, + .addr = (uintptr_t)(&freq) + }; + + if (kvm_enabled()) { + ret = kvm_vcpu_ioctl(first_cpu, KVM_GET_ONE_REG, &freq_reg); + if (ret >= 0) { + return freq * 2; + } + } +#endif + return DEF_LOONGSON3_FREQ; +} + +static void init_boot_param(void) +{ + static void *p; + struct boot_params *bp; + + p = g_malloc0(loader_rommap[LOADER_PARAM].size); + bp = p; + + bp->efi.smbios.vers = cpu_to_le16(1); + init_reset_system(&(bp->reset_system)); + p += ROUND_UP(sizeof(struct boot_params), 64); + init_loongson_params(&(bp->efi.smbios.lp), p, + loaderparams.cpu_freq, loaderparams.ram_size); + + rom_add_blob_fixed("params_rom", bp, + loader_rommap[LOADER_PARAM].size, + loader_rommap[LOADER_PARAM].base); + + g_free(bp); + + loaderparams.a2 = cpu_mips_phys_to_kseg0(NULL, + loader_rommap[LOADER_PARAM].base); +} + +static void init_boot_rom(void) +{ + const unsigned int boot_code[] = { + 0x40086000, /* mfc0 t0, CP0_STATUS */ + 0x240900E4, /* li t1, 0xe4 #set kx, sx, ux, erl */ + 0x01094025, /* or t0, t0, t1 */ + 0x3C090040, /* lui t1, 0x40 #set bev */ + 0x01094025, /* or t0, t0, t1 */ + 0x40886000, /* mtc0 t0, CP0_STATUS */ + 0x00000000, + 0x40806800, /* mtc0 zero, CP0_CAUSE */ + 0x00000000, + 0x400A7801, /* mfc0 t2, $15, 1 */ + 0x314A00FF, /* andi t2, 0x0ff */ + 0x3C089000, /* dli t0, 0x900000003ff01000 */ + 0x00084438, + 0x35083FF0, + 0x00084438, + 0x35081000, + 0x314B0003, /* andi t3, t2, 0x3 #local cpuid */ + 0x000B5A00, /* sll t3, 8 */ + 0x010B4025, /* or t0, t0, t3 */ + 0x314C000C, /* andi t4, t2, 0xc #node id */ + 0x000C62BC, /* dsll t4, 42 */ + 0x010C4025, /* or t0, t0, t4 */ + /* WaitForInit: */ + 0xDD020020, /* ld v0, FN_OFF(t0) #FN_OFF 0x020 */ + 0x1040FFFE, /* beqz v0, WaitForInit */ + 0x00000000, /* nop */ + 0xDD1D0028, /* ld sp, SP_OFF(t0) #FN_OFF 0x028 */ + 0xDD1C0030, /* ld gp, GP_OFF(t0) #FN_OFF 0x030 */ + 0xDD050038, /* ld a1, A1_OFF(t0) #FN_OFF 0x038 */ + 0x00400008, /* jr v0 #byebye */ + 0x00000000, /* nop */ + 0x1000FFFF, /* 1: b 1b */ + 0x00000000, /* nop */ + + /* Reset */ + 0x3C0C9000, /* dli t0, 0x9000000010080010 */ + 0x358C0000, + 0x000C6438, + 0x358C1008, + 0x000C6438, + 0x358C0010, + 0x240D0000, /* li t1, 0x00 */ + 0xA18D0000, /* sb t1, (t0) */ + 0x1000FFFF, /* 1: b 1b */ + 0x00000000, /* nop */ + + /* Shutdown */ + 0x3C0C9000, /* dli t0, 0x9000000010080010 */ + 0x358C0000, + 0x000C6438, + 0x358C1008, + 0x000C6438, + 0x358C0010, + 0x240D00FF, /* li t1, 0xff */ + 0xA18D0000, /* sb t1, (t0) */ + 0x1000FFFF, /* 1: b 1b */ + 0x00000000 /* nop */ + }; + + rom_add_blob_fixed("boot_rom", boot_code, sizeof(boot_code), + loader_rommap[LOADER_BOOTROM].base); +} + +static void fw_cfg_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); +} + +static void fw_conf_init(unsigned long ram_size) +{ + FWCfgState *fw_cfg; + hwaddr cfg_addr = virt_memmap[VIRT_FW_CFG].base; + + fw_cfg = fw_cfg_init_mem_wide(cfg_addr, cfg_addr + 8, 8, 0, NULL); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)current_machine->smp.cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)current_machine->smp.max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); + fw_cfg_add_i32(fw_cfg, FW_CFG_MACHINE_VERSION, 1); + fw_cfg_add_i64(fw_cfg, FW_CFG_CPU_FREQ, get_cpu_freq_hz()); + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); +} + +static int set_prom_cmdline(ram_addr_t initrd_offset, long initrd_size) +{ + int ret = 0; + void *cmdline_buf; + hwaddr cmdline_vaddr; + unsigned int *parg_env; + + /* Allocate cmdline_buf for command line. */ + cmdline_buf = g_malloc0(loader_memmap[LOADER_CMDLINE].size); + cmdline_vaddr = cpu_mips_phys_to_kseg0(NULL, + loader_memmap[LOADER_CMDLINE].base); + + /* + * Layout of cmdline_buf looks like this: + * argv[0], argv[1], 0, env[0], env[1], ... env[i], 0, + * argv[0]'s data, argv[1]'s data, env[0]'data, ..., env[i]'s data, 0 + */ + parg_env = (void *)cmdline_buf; + + ret = (3 + 1) * 4; + *parg_env++ = cmdline_vaddr + ret; + ret += (1 + snprintf(cmdline_buf + ret, 256 - ret, "g")); + + /* argv1 */ + *parg_env++ = cmdline_vaddr + ret; + if (initrd_size > 0) + ret += (1 + snprintf(cmdline_buf + ret, 256 - ret, + "rd_start=0x" TARGET_FMT_lx " rd_size=%li %s", + cpu_mips_phys_to_kseg0(NULL, initrd_offset), + initrd_size, loaderparams.kernel_cmdline)); + else + ret += (1 + snprintf(cmdline_buf + ret, 256 - ret, "%s", + loaderparams.kernel_cmdline)); + + /* argv2 */ + *parg_env++ = cmdline_vaddr + 4 * ret; + + rom_add_blob_fixed("cmdline", cmdline_buf, + loader_memmap[LOADER_CMDLINE].size, + loader_memmap[LOADER_CMDLINE].base); + + g_free(cmdline_buf); + + loaderparams.a0 = 2; + loaderparams.a1 = cmdline_vaddr; + + return 0; +} + +static uint64_t load_kernel(CPUMIPSState *env) +{ + long kernel_size; + ram_addr_t initrd_offset; + uint64_t kernel_entry, kernel_low, kernel_high, initrd_size; + + kernel_size = load_elf(loaderparams.kernel_filename, NULL, + cpu_mips_kseg0_to_phys, NULL, + (uint64_t *)&kernel_entry, + (uint64_t *)&kernel_low, (uint64_t *)&kernel_high, + NULL, 0, EM_MIPS, 1, 0); + if (kernel_size < 0) { + error_report("could not load kernel '%s': %s", + loaderparams.kernel_filename, + load_elf_strerror(kernel_size)); + exit(1); + } + + /* load initrd */ + initrd_size = 0; + initrd_offset = 0; + if (loaderparams.initrd_filename) { + initrd_size = get_image_size(loaderparams.initrd_filename); + if (initrd_size > 0) { + initrd_offset = MAX(loader_memmap[LOADER_INITRD].base, + ROUND_UP(kernel_high, INITRD_PAGE_SIZE)); + + if (initrd_offset + initrd_size > loaderparams.ram_size) { + error_report("memory too small for initial ram disk '%s'", + loaderparams.initrd_filename); + exit(1); + } + + initrd_size = load_image_targphys(loaderparams.initrd_filename, + initrd_offset, + loaderparams.ram_size - initrd_offset); + } + + if (initrd_size == (target_ulong) -1) { + error_report("could not load initial ram disk '%s'", + loaderparams.initrd_filename); + exit(1); + } + } + + /* Setup prom cmdline. */ + set_prom_cmdline(initrd_offset, initrd_size); + + return kernel_entry; +} + +static void main_cpu_reset(void *opaque) +{ + MIPSCPU *cpu = opaque; + CPUMIPSState *env = &cpu->env; + + cpu_reset(CPU(cpu)); + + /* Loongson-3 reset stuff */ + if (loaderparams.kernel_filename) { + if (cpu == MIPS_CPU(first_cpu)) { + env->active_tc.gpr[4] = loaderparams.a0; + env->active_tc.gpr[5] = loaderparams.a1; + env->active_tc.gpr[6] = loaderparams.a2; + env->active_tc.PC = loaderparams.kernel_entry; + } + env->CP0_Status &= ~((1 << CP0St_BEV) | (1 << CP0St_ERL)); + } +} + +static inline void loongson3_virt_devices_init(MachineState *machine, + DeviceState *pic) +{ + int i; + qemu_irq irq; + PCIBus *pci_bus; + DeviceState *dev; + MemoryRegion *mmio_reg, *ecam_reg; + LoongsonMachineState *s = LOONGSON_MACHINE(machine); + + dev = qdev_new(TYPE_GPEX_HOST); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + pci_bus = PCI_HOST_BRIDGE(dev)->bus; + + s->ecam_alias = g_new0(MemoryRegion, 1); + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_init_alias(s->ecam_alias, OBJECT(dev), "pcie-ecam", + ecam_reg, 0, virt_memmap[VIRT_PCIE_ECAM].size); + memory_region_add_subregion(get_system_memory(), + virt_memmap[VIRT_PCIE_ECAM].base, + s->ecam_alias); + + s->mmio_alias = g_new0(MemoryRegion, 1); + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_init_alias(s->mmio_alias, OBJECT(dev), "pcie-mmio", + mmio_reg, virt_memmap[VIRT_PCIE_MMIO].base, + virt_memmap[VIRT_PCIE_MMIO].size); + memory_region_add_subregion(get_system_memory(), + virt_memmap[VIRT_PCIE_MMIO].base, + s->mmio_alias); + + s->pio_alias = g_new0(MemoryRegion, 1); + memory_region_init_alias(s->pio_alias, OBJECT(dev), "pcie-pio", + get_system_io(), 0, + virt_memmap[VIRT_PCIE_PIO].size); + memory_region_add_subregion(get_system_memory(), + virt_memmap[VIRT_PCIE_PIO].base, s->pio_alias); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, virt_memmap[VIRT_PCIE_PIO].base); + + for (i = 0; i < GPEX_NUM_IRQS; i++) { + irq = qdev_get_gpio_in(pic, PCIE_IRQ_BASE + i); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); + gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ_BASE + i); + } + msi_nonbroken = true; + + pci_vga_init(pci_bus); + + if (defaults_enabled()) { + pci_create_simple(pci_bus, -1, "pci-ohci"); + usb_create_simple(usb_bus_find(-1), "usb-kbd"); + usb_create_simple(usb_bus_find(-1), "usb-tablet"); + } + + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + + if (!nd->model) { + nd->model = g_strdup("virtio"); + } + + pci_nic_init_nofail(nd, pci_bus, nd->model, NULL); + } +} + +static void mips_loongson3_virt_init(MachineState *machine) +{ + int i; + long bios_size; + MIPSCPU *cpu; + Clock *cpuclk; + CPUMIPSState *env; + DeviceState *liointc; + char *filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *kernel_filename = machine->kernel_filename; + const char *initrd_filename = machine->initrd_filename; + ram_addr_t ram_size = machine->ram_size; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *ram = g_new(MemoryRegion, 1); + MemoryRegion *bios = g_new(MemoryRegion, 1); + MemoryRegion *iomem = g_new(MemoryRegion, 1); + + /* TODO: TCG will support all CPU types */ + if (!kvm_enabled()) { + if (!machine->cpu_type) { + machine->cpu_type = MIPS_CPU_TYPE_NAME("Loongson-3A1000"); + } + if (!strstr(machine->cpu_type, "Loongson-3A1000")) { + error_report("Loongson-3/TCG needs cpu type Loongson-3A1000"); + exit(1); + } + } else { + if (!machine->cpu_type) { + machine->cpu_type = MIPS_CPU_TYPE_NAME("Loongson-3A4000"); + } + if (!strstr(machine->cpu_type, "Loongson-3A4000")) { + error_report("Loongson-3/KVM needs cpu type Loongson-3A4000"); + exit(1); + } + } + + if (ram_size < 512 * MiB) { + error_report("Loongson-3 machine needs at least 512MB memory"); + exit(1); + } + + /* + * The whole MMIO range among configure registers doesn't generate + * exception when accessing invalid memory. Create some unimplememted + * devices to emulate this feature. + */ + create_unimplemented_device("mmio fallback 0", 0x10000000, 256 * MiB); + create_unimplemented_device("mmio fallback 1", 0x30000000, 256 * MiB); + + liointc = qdev_new("loongson.liointc"); + sysbus_realize_and_unref(SYS_BUS_DEVICE(liointc), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(liointc), 0, virt_memmap[VIRT_LIOINTC].base); + + serial_mm_init(address_space_mem, virt_memmap[VIRT_UART].base, 0, + qdev_get_gpio_in(liointc, UART_IRQ), 115200, serial_hd(0), + DEVICE_NATIVE_ENDIAN); + + sysbus_create_simple("goldfish_rtc", virt_memmap[VIRT_RTC].base, + qdev_get_gpio_in(liointc, RTC_IRQ)); + + cpuclk = clock_new(OBJECT(machine), "cpu-refclk"); + clock_set_hz(cpuclk, DEF_LOONGSON3_FREQ); + + for (i = 0; i < machine->smp.cpus; i++) { + int ip; + + /* init CPUs */ + cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk); + + /* Init internal devices */ + cpu_mips_irq_init_cpu(cpu); + cpu_mips_clock_init(cpu); + qemu_register_reset(main_cpu_reset, cpu); + + if (i >= 4) { + continue; /* Only node-0 can be connected to LIOINTC */ + } + + for (ip = 0; ip < 4 ; ip++) { + int pin = i * 4 + ip; + sysbus_connect_irq(SYS_BUS_DEVICE(liointc), + pin, cpu->env.irq[ip + 2]); + } + } + env = &MIPS_CPU(first_cpu)->env; + + /* Allocate RAM/BIOS, 0x00000000~0x10000000 is alias of 0x80000000~0x90000000 */ + memory_region_init_rom(bios, NULL, "loongson3.bios", + virt_memmap[VIRT_BIOS_ROM].size, &error_fatal); + memory_region_init_alias(ram, NULL, "loongson3.lowmem", + machine->ram, 0, virt_memmap[VIRT_LOWMEM].size); + memory_region_init_io(iomem, NULL, &loongson3_pm_ops, + NULL, "loongson3_pm", virt_memmap[VIRT_PM].size); + + memory_region_add_subregion(address_space_mem, + virt_memmap[VIRT_LOWMEM].base, ram); + memory_region_add_subregion(address_space_mem, + virt_memmap[VIRT_BIOS_ROM].base, bios); + memory_region_add_subregion(address_space_mem, + virt_memmap[VIRT_HIGHMEM].base, machine->ram); + memory_region_add_subregion(address_space_mem, + virt_memmap[VIRT_PM].base, iomem); + + /* + * We do not support flash operation, just loading bios.bin as raw BIOS. + * Please use -L to set the BIOS path and -bios to set bios name. + */ + + if (kernel_filename) { + loaderparams.cpu_freq = get_cpu_freq_hz(); + loaderparams.ram_size = ram_size; + loaderparams.kernel_filename = kernel_filename; + loaderparams.kernel_cmdline = kernel_cmdline; + loaderparams.initrd_filename = initrd_filename; + loaderparams.kernel_entry = load_kernel(env); + + init_boot_rom(); + init_boot_param(); + } else { + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, + machine->firmware ?: LOONGSON3_BIOSNAME); + if (filename) { + bios_size = load_image_targphys(filename, + virt_memmap[VIRT_BIOS_ROM].base, + virt_memmap[VIRT_BIOS_ROM].size); + g_free(filename); + } else { + bios_size = -1; + } + + if ((bios_size < 0 || bios_size > virt_memmap[VIRT_BIOS_ROM].size) && + !kernel_filename && !qtest_enabled()) { + error_report("Could not load MIPS bios '%s'", machine->firmware); + exit(1); + } + + fw_conf_init(ram_size); + } + + loongson3_virt_devices_init(machine, liointc); +} + +static void loongson3v_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Loongson-3 Virtualization Platform"; + mc->init = mips_loongson3_virt_init; + mc->block_default_type = IF_IDE; + mc->max_cpus = LOONGSON_MAX_VCPUS; + mc->default_ram_id = "loongson3.highram"; + mc->default_ram_size = 1600 * MiB; + mc->kvm_type = mips_kvm_type; + mc->minimum_page_bits = 14; +} + +static const TypeInfo loongson3_machine_types[] = { + { + .name = TYPE_LOONGSON_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(LoongsonMachineState), + .class_init = loongson3v_machine_class_init, + } +}; + +DEFINE_TYPES(loongson3_machine_types) diff --git a/hw/mips/malta.c b/hw/mips/malta.c new file mode 100644 index 000000000..b770b8d36 --- /dev/null +++ b/hw/mips/malta.c @@ -0,0 +1,1464 @@ +/* + * QEMU Malta board support + * + * Copyright (c) 2006 Aurelien Jarno + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/bitops.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "hw/clock.h" +#include "hw/southbridge/piix.h" +#include "hw/isa/superio.h" +#include "hw/char/serial.h" +#include "net/net.h" +#include "hw/boards.h" +#include "hw/i2c/smbus_eeprom.h" +#include "hw/block/flash.h" +#include "hw/mips/mips.h" +#include "hw/mips/cpudevs.h" +#include "hw/pci/pci.h" +#include "qemu/log.h" +#include "hw/mips/bios.h" +#include "hw/ide.h" +#include "hw/irq.h" +#include "hw/loader.h" +#include "elf.h" +#include "qom/object.h" +#include "hw/sysbus.h" /* SysBusDevice */ +#include "qemu/host-utils.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/misc/empty_slot.h" +#include "sysemu/kvm.h" +#include "semihosting/semihost.h" +#include "hw/mips/cps.h" +#include "hw/qdev-clock.h" + +#define ENVP_PADDR 0x2000 +#define ENVP_VADDR cpu_mips_phys_to_kseg0(NULL, ENVP_PADDR) +#define ENVP_NB_ENTRIES 16 +#define ENVP_ENTRY_SIZE 256 + +/* Hardware addresses */ +#define FLASH_ADDRESS 0x1e000000ULL +#define FPGA_ADDRESS 0x1f000000ULL +#define RESET_ADDRESS 0x1fc00000ULL + +#define FLASH_SIZE 0x400000 + +#define MAX_IDE_BUS 2 + +typedef struct { + MemoryRegion iomem; + MemoryRegion iomem_lo; /* 0 - 0x900 */ + MemoryRegion iomem_hi; /* 0xa00 - 0x100000 */ + uint32_t leds; + uint32_t brk; + uint32_t gpout; + uint32_t i2cin; + uint32_t i2coe; + uint32_t i2cout; + uint32_t i2csel; + CharBackend display; + char display_text[9]; + SerialMM *uart; + bool display_inited; +} MaltaFPGAState; + +#define TYPE_MIPS_MALTA "mips-malta" +OBJECT_DECLARE_SIMPLE_TYPE(MaltaState, MIPS_MALTA) + +struct MaltaState { + SysBusDevice parent_obj; + + Clock *cpuclk; + MIPSCPSState cps; + qemu_irq i8259[ISA_NUM_IRQS]; +}; + +static struct _loaderparams { + int ram_size, ram_low_size; + const char *kernel_filename; + const char *kernel_cmdline; + const char *initrd_filename; +} loaderparams; + +/* Malta FPGA */ +static void malta_fpga_update_display(void *opaque) +{ + char leds_text[9]; + int i; + MaltaFPGAState *s = opaque; + + for (i = 7 ; i >= 0 ; i--) { + if (s->leds & (1 << i)) { + leds_text[i] = '#'; + } else { + leds_text[i] = ' '; + } + } + leds_text[8] = '\0'; + + qemu_chr_fe_printf(&s->display, "\e[H\n\n|\e[32m%-8.8s\e[00m|\r\n", + leds_text); + qemu_chr_fe_printf(&s->display, "\n\n\n\n|\e[31m%-8.8s\e[00m|", + s->display_text); +} + +/* + * EEPROM 24C01 / 24C02 emulation. + * + * Emulation for serial EEPROMs: + * 24C01 - 1024 bit (128 x 8) + * 24C02 - 2048 bit (256 x 8) + * + * Typical device names include Microchip 24C02SC or SGS Thomson ST24C02. + */ + +#if defined(DEBUG) +# define logout(fmt, ...) \ + fprintf(stderr, "MALTA\t%-24s" fmt, __func__, ## __VA_ARGS__) +#else +# define logout(fmt, ...) ((void)0) +#endif + +struct _eeprom24c0x_t { + uint8_t tick; + uint8_t address; + uint8_t command; + uint8_t ack; + uint8_t scl; + uint8_t sda; + uint8_t data; + /* uint16_t size; */ + uint8_t contents[256]; +}; + +typedef struct _eeprom24c0x_t eeprom24c0x_t; + +static eeprom24c0x_t spd_eeprom = { + .contents = { + /* 00000000: */ + 0x80, 0x08, 0xFF, 0x0D, 0x0A, 0xFF, 0x40, 0x00, + /* 00000008: */ + 0x01, 0x75, 0x54, 0x00, 0x82, 0x08, 0x00, 0x01, + /* 00000010: */ + 0x8F, 0x04, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, + /* 00000018: */ + 0x00, 0x00, 0x00, 0x14, 0x0F, 0x14, 0x2D, 0xFF, + /* 00000020: */ + 0x15, 0x08, 0x15, 0x08, 0x00, 0x00, 0x00, 0x00, + /* 00000028: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 00000030: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 00000038: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xD0, + /* 00000040: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 00000048: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 00000050: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 00000058: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 00000060: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 00000068: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 00000070: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* 00000078: */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0xF4, + }, +}; + +static void generate_eeprom_spd(uint8_t *eeprom, ram_addr_t ram_size) +{ + enum { SDR = 0x4, DDR2 = 0x8 } type; + uint8_t *spd = spd_eeprom.contents; + uint8_t nbanks = 0; + uint16_t density = 0; + int i; + + /* work in terms of MB */ + ram_size /= MiB; + + while ((ram_size >= 4) && (nbanks <= 2)) { + int sz_log2 = MIN(31 - clz32(ram_size), 14); + nbanks++; + density |= 1 << (sz_log2 - 2); + ram_size -= 1 << sz_log2; + } + + /* split to 2 banks if possible */ + if ((nbanks == 1) && (density > 1)) { + nbanks++; + density >>= 1; + } + + if (density & 0xff00) { + density = (density & 0xe0) | ((density >> 8) & 0x1f); + type = DDR2; + } else if (!(density & 0x1f)) { + type = DDR2; + } else { + type = SDR; + } + + if (ram_size) { + warn_report("SPD cannot represent final " RAM_ADDR_FMT "MB" + " of SDRAM", ram_size); + } + + /* fill in SPD memory information */ + spd[2] = type; + spd[5] = nbanks; + spd[31] = density; + + /* checksum */ + spd[63] = 0; + for (i = 0; i < 63; i++) { + spd[63] += spd[i]; + } + + /* copy for SMBUS */ + memcpy(eeprom, spd, sizeof(spd_eeprom.contents)); +} + +static void generate_eeprom_serial(uint8_t *eeprom) +{ + int i, pos = 0; + uint8_t mac[6] = { 0x00 }; + uint8_t sn[5] = { 0x01, 0x23, 0x45, 0x67, 0x89 }; + + /* version */ + eeprom[pos++] = 0x01; + + /* count */ + eeprom[pos++] = 0x02; + + /* MAC address */ + eeprom[pos++] = 0x01; /* MAC */ + eeprom[pos++] = 0x06; /* length */ + memcpy(&eeprom[pos], mac, sizeof(mac)); + pos += sizeof(mac); + + /* serial number */ + eeprom[pos++] = 0x02; /* serial */ + eeprom[pos++] = 0x05; /* length */ + memcpy(&eeprom[pos], sn, sizeof(sn)); + pos += sizeof(sn); + + /* checksum */ + eeprom[pos] = 0; + for (i = 0; i < pos; i++) { + eeprom[pos] += eeprom[i]; + } +} + +static uint8_t eeprom24c0x_read(eeprom24c0x_t *eeprom) +{ + logout("%u: scl = %u, sda = %u, data = 0x%02x\n", + eeprom->tick, eeprom->scl, eeprom->sda, eeprom->data); + return eeprom->sda; +} + +static void eeprom24c0x_write(eeprom24c0x_t *eeprom, int scl, int sda) +{ + if (eeprom->scl && scl && (eeprom->sda != sda)) { + logout("%u: scl = %u->%u, sda = %u->%u i2c %s\n", + eeprom->tick, eeprom->scl, scl, eeprom->sda, sda, + sda ? "stop" : "start"); + if (!sda) { + eeprom->tick = 1; + eeprom->command = 0; + } + } else if (eeprom->tick == 0 && !eeprom->ack) { + /* Waiting for start. */ + logout("%u: scl = %u->%u, sda = %u->%u wait for i2c start\n", + eeprom->tick, eeprom->scl, scl, eeprom->sda, sda); + } else if (!eeprom->scl && scl) { + logout("%u: scl = %u->%u, sda = %u->%u trigger bit\n", + eeprom->tick, eeprom->scl, scl, eeprom->sda, sda); + if (eeprom->ack) { + logout("\ti2c ack bit = 0\n"); + sda = 0; + eeprom->ack = 0; + } else if (eeprom->sda == sda) { + uint8_t bit = (sda != 0); + logout("\ti2c bit = %d\n", bit); + if (eeprom->tick < 9) { + eeprom->command <<= 1; + eeprom->command += bit; + eeprom->tick++; + if (eeprom->tick == 9) { + logout("\tcommand 0x%04x, %s\n", eeprom->command, + bit ? "read" : "write"); + eeprom->ack = 1; + } + } else if (eeprom->tick < 17) { + if (eeprom->command & 1) { + sda = ((eeprom->data & 0x80) != 0); + } + eeprom->address <<= 1; + eeprom->address += bit; + eeprom->tick++; + eeprom->data <<= 1; + if (eeprom->tick == 17) { + eeprom->data = eeprom->contents[eeprom->address]; + logout("\taddress 0x%04x, data 0x%02x\n", + eeprom->address, eeprom->data); + eeprom->ack = 1; + eeprom->tick = 0; + } + } else if (eeprom->tick >= 17) { + sda = 0; + } + } else { + logout("\tsda changed with raising scl\n"); + } + } else { + logout("%u: scl = %u->%u, sda = %u->%u\n", eeprom->tick, eeprom->scl, + scl, eeprom->sda, sda); + } + eeprom->scl = scl; + eeprom->sda = sda; +} + +static uint64_t malta_fpga_read(void *opaque, hwaddr addr, + unsigned size) +{ + MaltaFPGAState *s = opaque; + uint32_t val = 0; + uint32_t saddr; + + saddr = (addr & 0xfffff); + + switch (saddr) { + + /* SWITCH Register */ + case 0x00200: + val = 0x00000000; + break; + + /* STATUS Register */ + case 0x00208: +#ifdef TARGET_WORDS_BIGENDIAN + val = 0x00000012; +#else + val = 0x00000010; +#endif + break; + + /* JMPRS Register */ + case 0x00210: + val = 0x00; + break; + + /* LEDBAR Register */ + case 0x00408: + val = s->leds; + break; + + /* BRKRES Register */ + case 0x00508: + val = s->brk; + break; + + /* UART Registers are handled directly by the serial device */ + + /* GPOUT Register */ + case 0x00a00: + val = s->gpout; + break; + + /* XXX: implement a real I2C controller */ + + /* GPINP Register */ + case 0x00a08: + /* IN = OUT until a real I2C control is implemented */ + if (s->i2csel) { + val = s->i2cout; + } else { + val = 0x00; + } + break; + + /* I2CINP Register */ + case 0x00b00: + val = ((s->i2cin & ~1) | eeprom24c0x_read(&spd_eeprom)); + break; + + /* I2COE Register */ + case 0x00b08: + val = s->i2coe; + break; + + /* I2COUT Register */ + case 0x00b10: + val = s->i2cout; + break; + + /* I2CSEL Register */ + case 0x00b18: + val = s->i2csel; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "malta_fpga_read: Bad register addr 0x%"HWADDR_PRIX"\n", + addr); + break; + } + return val; +} + +static void malta_fpga_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MaltaFPGAState *s = opaque; + uint32_t saddr; + + saddr = (addr & 0xfffff); + + switch (saddr) { + + /* SWITCH Register */ + case 0x00200: + break; + + /* JMPRS Register */ + case 0x00210: + break; + + /* LEDBAR Register */ + case 0x00408: + s->leds = val & 0xff; + malta_fpga_update_display(s); + break; + + /* ASCIIWORD Register */ + case 0x00410: + snprintf(s->display_text, 9, "%08X", (uint32_t)val); + malta_fpga_update_display(s); + break; + + /* ASCIIPOS0 to ASCIIPOS7 Registers */ + case 0x00418: + case 0x00420: + case 0x00428: + case 0x00430: + case 0x00438: + case 0x00440: + case 0x00448: + case 0x00450: + s->display_text[(saddr - 0x00418) >> 3] = (char) val; + malta_fpga_update_display(s); + break; + + /* SOFTRES Register */ + case 0x00500: + if (val == 0x42) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + + /* BRKRES Register */ + case 0x00508: + s->brk = val & 0xff; + break; + + /* UART Registers are handled directly by the serial device */ + + /* GPOUT Register */ + case 0x00a00: + s->gpout = val & 0xff; + break; + + /* I2COE Register */ + case 0x00b08: + s->i2coe = val & 0x03; + break; + + /* I2COUT Register */ + case 0x00b10: + eeprom24c0x_write(&spd_eeprom, val & 0x02, val & 0x01); + s->i2cout = val; + break; + + /* I2CSEL Register */ + case 0x00b18: + s->i2csel = val & 0x01; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "malta_fpga_write: Bad register addr 0x%"HWADDR_PRIX"\n", + addr); + break; + } +} + +static const MemoryRegionOps malta_fpga_ops = { + .read = malta_fpga_read, + .write = malta_fpga_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void malta_fpga_reset(void *opaque) +{ + MaltaFPGAState *s = opaque; + + s->leds = 0x00; + s->brk = 0x0a; + s->gpout = 0x00; + s->i2cin = 0x3; + s->i2coe = 0x0; + s->i2cout = 0x3; + s->i2csel = 0x1; + + s->display_text[8] = '\0'; + snprintf(s->display_text, 9, " "); +} + +static void malta_fgpa_display_event(void *opaque, QEMUChrEvent event) +{ + MaltaFPGAState *s = opaque; + + if (event == CHR_EVENT_OPENED && !s->display_inited) { + qemu_chr_fe_printf(&s->display, "\e[HMalta LEDBAR\r\n"); + qemu_chr_fe_printf(&s->display, "+--------+\r\n"); + qemu_chr_fe_printf(&s->display, "+ +\r\n"); + qemu_chr_fe_printf(&s->display, "+--------+\r\n"); + qemu_chr_fe_printf(&s->display, "\n"); + qemu_chr_fe_printf(&s->display, "Malta ASCII\r\n"); + qemu_chr_fe_printf(&s->display, "+--------+\r\n"); + qemu_chr_fe_printf(&s->display, "+ +\r\n"); + qemu_chr_fe_printf(&s->display, "+--------+\r\n"); + s->display_inited = true; + } +} + +static MaltaFPGAState *malta_fpga_init(MemoryRegion *address_space, + hwaddr base, qemu_irq uart_irq, Chardev *uart_chr) +{ + MaltaFPGAState *s; + Chardev *chr; + + s = g_new0(MaltaFPGAState, 1); + + memory_region_init_io(&s->iomem, NULL, &malta_fpga_ops, s, + "malta-fpga", 0x100000); + memory_region_init_alias(&s->iomem_lo, NULL, "malta-fpga", + &s->iomem, 0, 0x900); + memory_region_init_alias(&s->iomem_hi, NULL, "malta-fpga", + &s->iomem, 0xa00, 0x100000 - 0xa00); + + memory_region_add_subregion(address_space, base, &s->iomem_lo); + memory_region_add_subregion(address_space, base + 0xa00, &s->iomem_hi); + + chr = qemu_chr_new("fpga", "vc:320x200", NULL); + qemu_chr_fe_init(&s->display, chr, NULL); + qemu_chr_fe_set_handlers(&s->display, NULL, NULL, + malta_fgpa_display_event, NULL, s, NULL, true); + + s->uart = serial_mm_init(address_space, base + 0x900, 3, uart_irq, + 230400, uart_chr, DEVICE_NATIVE_ENDIAN); + + malta_fpga_reset(s); + qemu_register_reset(malta_fpga_reset, s); + + return s; +} + +/* Network support */ +static void network_init(PCIBus *pci_bus) +{ + int i; + + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + const char *default_devaddr = NULL; + + if (i == 0 && (!nd->model || strcmp(nd->model, "pcnet") == 0)) + /* The malta board has a PCNet card using PCI SLOT 11 */ + default_devaddr = "0b"; + + pci_nic_init_nofail(nd, pci_bus, "pcnet", default_devaddr); + } +} + +static void write_bootloader_nanomips(uint8_t *base, uint64_t run_addr, + uint64_t kernel_entry) +{ + uint16_t *p; + + /* Small bootloader */ + p = (uint16_t *)base; + +#define NM_HI1(VAL) (((VAL) >> 16) & 0x1f) +#define NM_HI2(VAL) \ + (((VAL) & 0xf000) | (((VAL) >> 19) & 0xffc) | (((VAL) >> 31) & 0x1)) +#define NM_LO(VAL) ((VAL) & 0xfff) + + stw_p(p++, 0x2800); stw_p(p++, 0x001c); + /* bc to_here */ + stw_p(p++, 0x8000); stw_p(p++, 0xc000); + /* nop */ + stw_p(p++, 0x8000); stw_p(p++, 0xc000); + /* nop */ + stw_p(p++, 0x8000); stw_p(p++, 0xc000); + /* nop */ + stw_p(p++, 0x8000); stw_p(p++, 0xc000); + /* nop */ + stw_p(p++, 0x8000); stw_p(p++, 0xc000); + /* nop */ + stw_p(p++, 0x8000); stw_p(p++, 0xc000); + /* nop */ + stw_p(p++, 0x8000); stw_p(p++, 0xc000); + /* nop */ + + /* to_here: */ + if (semihosting_get_argc()) { + /* Preserve a0 content as arguments have been passed */ + stw_p(p++, 0x8000); stw_p(p++, 0xc000); + /* nop */ + } else { + stw_p(p++, 0x0080); stw_p(p++, 0x0002); + /* li a0,2 */ + } + + stw_p(p++, 0xe3a0 | NM_HI1(ENVP_VADDR - 64)); + + stw_p(p++, NM_HI2(ENVP_VADDR - 64)); + /* lui sp,%hi(ENVP_VADDR - 64) */ + + stw_p(p++, 0x83bd); stw_p(p++, NM_LO(ENVP_VADDR - 64)); + /* ori sp,sp,%lo(ENVP_VADDR - 64) */ + + stw_p(p++, 0xe0a0 | NM_HI1(ENVP_VADDR)); + + stw_p(p++, NM_HI2(ENVP_VADDR)); + /* lui a1,%hi(ENVP_VADDR) */ + + stw_p(p++, 0x80a5); stw_p(p++, NM_LO(ENVP_VADDR)); + /* ori a1,a1,%lo(ENVP_VADDR) */ + + stw_p(p++, 0xe0c0 | NM_HI1(ENVP_VADDR + 8)); + + stw_p(p++, NM_HI2(ENVP_VADDR + 8)); + /* lui a2,%hi(ENVP_VADDR + 8) */ + + stw_p(p++, 0x80c6); stw_p(p++, NM_LO(ENVP_VADDR + 8)); + /* ori a2,a2,%lo(ENVP_VADDR + 8) */ + + stw_p(p++, 0xe0e0 | NM_HI1(loaderparams.ram_low_size)); + + stw_p(p++, NM_HI2(loaderparams.ram_low_size)); + /* lui a3,%hi(loaderparams.ram_low_size) */ + + stw_p(p++, 0x80e7); stw_p(p++, NM_LO(loaderparams.ram_low_size)); + /* ori a3,a3,%lo(loaderparams.ram_low_size) */ + + /* + * Load BAR registers as done by YAMON: + * + * - set up PCI0 I/O BARs from 0x18000000 to 0x181fffff + * - set up PCI0 MEM0 at 0x10000000, size 0x8000000 + * - set up PCI0 MEM1 at 0x18200000, size 0xbe00000 + * + */ + stw_p(p++, 0xe040); stw_p(p++, 0x0681); + /* lui t1, %hi(0xb4000000) */ + +#ifdef TARGET_WORDS_BIGENDIAN + + stw_p(p++, 0xe020); stw_p(p++, 0x0be1); + /* lui t0, %hi(0xdf000000) */ + + /* 0x68 corresponds to GT_ISD (from hw/mips/gt64xxx_pci.c) */ + stw_p(p++, 0x8422); stw_p(p++, 0x9068); + /* sw t0, 0x68(t1) */ + + stw_p(p++, 0xe040); stw_p(p++, 0x077d); + /* lui t1, %hi(0xbbe00000) */ + + stw_p(p++, 0xe020); stw_p(p++, 0x0801); + /* lui t0, %hi(0xc0000000) */ + + /* 0x48 corresponds to GT_PCI0IOLD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9048); + /* sw t0, 0x48(t1) */ + + stw_p(p++, 0xe020); stw_p(p++, 0x0800); + /* lui t0, %hi(0x40000000) */ + + /* 0x50 corresponds to GT_PCI0IOHD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9050); + /* sw t0, 0x50(t1) */ + + stw_p(p++, 0xe020); stw_p(p++, 0x0001); + /* lui t0, %hi(0x80000000) */ + + /* 0x58 corresponds to GT_PCI0M0LD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9058); + /* sw t0, 0x58(t1) */ + + stw_p(p++, 0xe020); stw_p(p++, 0x07e0); + /* lui t0, %hi(0x3f000000) */ + + /* 0x60 corresponds to GT_PCI0M0HD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9060); + /* sw t0, 0x60(t1) */ + + stw_p(p++, 0xe020); stw_p(p++, 0x0821); + /* lui t0, %hi(0xc1000000) */ + + /* 0x80 corresponds to GT_PCI0M1LD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9080); + /* sw t0, 0x80(t1) */ + + stw_p(p++, 0xe020); stw_p(p++, 0x0bc0); + /* lui t0, %hi(0x5e000000) */ + +#else + + stw_p(p++, 0x0020); stw_p(p++, 0x00df); + /* addiu[32] t0, $0, 0xdf */ + + /* 0x68 corresponds to GT_ISD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9068); + /* sw t0, 0x68(t1) */ + + /* Use kseg2 remapped address 0x1be00000 */ + stw_p(p++, 0xe040); stw_p(p++, 0x077d); + /* lui t1, %hi(0xbbe00000) */ + + stw_p(p++, 0x0020); stw_p(p++, 0x00c0); + /* addiu[32] t0, $0, 0xc0 */ + + /* 0x48 corresponds to GT_PCI0IOLD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9048); + /* sw t0, 0x48(t1) */ + + stw_p(p++, 0x0020); stw_p(p++, 0x0040); + /* addiu[32] t0, $0, 0x40 */ + + /* 0x50 corresponds to GT_PCI0IOHD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9050); + /* sw t0, 0x50(t1) */ + + stw_p(p++, 0x0020); stw_p(p++, 0x0080); + /* addiu[32] t0, $0, 0x80 */ + + /* 0x58 corresponds to GT_PCI0M0LD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9058); + /* sw t0, 0x58(t1) */ + + stw_p(p++, 0x0020); stw_p(p++, 0x003f); + /* addiu[32] t0, $0, 0x3f */ + + /* 0x60 corresponds to GT_PCI0M0HD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9060); + /* sw t0, 0x60(t1) */ + + stw_p(p++, 0x0020); stw_p(p++, 0x00c1); + /* addiu[32] t0, $0, 0xc1 */ + + /* 0x80 corresponds to GT_PCI0M1LD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9080); + /* sw t0, 0x80(t1) */ + + stw_p(p++, 0x0020); stw_p(p++, 0x005e); + /* addiu[32] t0, $0, 0x5e */ + +#endif + + /* 0x88 corresponds to GT_PCI0M1HD */ + stw_p(p++, 0x8422); stw_p(p++, 0x9088); + /* sw t0, 0x88(t1) */ + + stw_p(p++, 0xe320 | NM_HI1(kernel_entry)); + + stw_p(p++, NM_HI2(kernel_entry)); + /* lui t9,%hi(kernel_entry) */ + + stw_p(p++, 0x8339); stw_p(p++, NM_LO(kernel_entry)); + /* ori t9,t9,%lo(kernel_entry) */ + + stw_p(p++, 0x4bf9); stw_p(p++, 0x0000); + /* jalrc t8 */ +} + +/* + * ROM and pseudo bootloader + * + * The following code implements a very very simple bootloader. It first + * loads the registers a0 to a3 to the values expected by the OS, and + * then jump at the kernel address. + * + * The bootloader should pass the locations of the kernel arguments and + * environment variables tables. Those tables contain the 32-bit address + * of NULL terminated strings. The environment variables table should be + * terminated by a NULL address. + * + * For a simpler implementation, the number of kernel arguments is fixed + * to two (the name of the kernel and the command line), and the two + * tables are actually the same one. + * + * The registers a0 to a3 should contain the following values: + * a0 - number of kernel arguments + * a1 - 32-bit address of the kernel arguments table + * a2 - 32-bit address of the environment variables table + * a3 - RAM size in bytes + */ +static void write_bootloader(uint8_t *base, uint64_t run_addr, + uint64_t kernel_entry) +{ + uint32_t *p; + + /* Small bootloader */ + p = (uint32_t *)base; + + stl_p(p++, 0x08000000 | /* j 0x1fc00580 */ + ((run_addr + 0x580) & 0x0fffffff) >> 2); + stl_p(p++, 0x00000000); /* nop */ + + /* YAMON service vector */ + stl_p(base + 0x500, run_addr + 0x0580); /* start: */ + stl_p(base + 0x504, run_addr + 0x083c); /* print_count: */ + stl_p(base + 0x520, run_addr + 0x0580); /* start: */ + stl_p(base + 0x52c, run_addr + 0x0800); /* flush_cache: */ + stl_p(base + 0x534, run_addr + 0x0808); /* print: */ + stl_p(base + 0x538, run_addr + 0x0800); /* reg_cpu_isr: */ + stl_p(base + 0x53c, run_addr + 0x0800); /* unred_cpu_isr: */ + stl_p(base + 0x540, run_addr + 0x0800); /* reg_ic_isr: */ + stl_p(base + 0x544, run_addr + 0x0800); /* unred_ic_isr: */ + stl_p(base + 0x548, run_addr + 0x0800); /* reg_esr: */ + stl_p(base + 0x54c, run_addr + 0x0800); /* unreg_esr: */ + stl_p(base + 0x550, run_addr + 0x0800); /* getchar: */ + stl_p(base + 0x554, run_addr + 0x0800); /* syscon_read: */ + + + /* Second part of the bootloader */ + p = (uint32_t *) (base + 0x580); + + if (semihosting_get_argc()) { + /* Preserve a0 content as arguments have been passed */ + stl_p(p++, 0x00000000); /* nop */ + } else { + stl_p(p++, 0x24040002); /* addiu a0, zero, 2 */ + } + + /* lui sp, high(ENVP_VADDR) */ + stl_p(p++, 0x3c1d0000 | (((ENVP_VADDR - 64) >> 16) & 0xffff)); + /* ori sp, sp, low(ENVP_VADDR) */ + stl_p(p++, 0x37bd0000 | ((ENVP_VADDR - 64) & 0xffff)); + /* lui a1, high(ENVP_VADDR) */ + stl_p(p++, 0x3c050000 | ((ENVP_VADDR >> 16) & 0xffff)); + /* ori a1, a1, low(ENVP_VADDR) */ + stl_p(p++, 0x34a50000 | (ENVP_VADDR & 0xffff)); + /* lui a2, high(ENVP_VADDR + 8) */ + stl_p(p++, 0x3c060000 | (((ENVP_VADDR + 8) >> 16) & 0xffff)); + /* ori a2, a2, low(ENVP_VADDR + 8) */ + stl_p(p++, 0x34c60000 | ((ENVP_VADDR + 8) & 0xffff)); + /* lui a3, high(ram_low_size) */ + stl_p(p++, 0x3c070000 | (loaderparams.ram_low_size >> 16)); + /* ori a3, a3, low(ram_low_size) */ + stl_p(p++, 0x34e70000 | (loaderparams.ram_low_size & 0xffff)); + + /* Load BAR registers as done by YAMON */ + stl_p(p++, 0x3c09b400); /* lui t1, 0xb400 */ + +#ifdef TARGET_WORDS_BIGENDIAN + stl_p(p++, 0x3c08df00); /* lui t0, 0xdf00 */ +#else + stl_p(p++, 0x340800df); /* ori t0, r0, 0x00df */ +#endif + stl_p(p++, 0xad280068); /* sw t0, 0x0068(t1) */ + + stl_p(p++, 0x3c09bbe0); /* lui t1, 0xbbe0 */ + +#ifdef TARGET_WORDS_BIGENDIAN + stl_p(p++, 0x3c08c000); /* lui t0, 0xc000 */ +#else + stl_p(p++, 0x340800c0); /* ori t0, r0, 0x00c0 */ +#endif + stl_p(p++, 0xad280048); /* sw t0, 0x0048(t1) */ +#ifdef TARGET_WORDS_BIGENDIAN + stl_p(p++, 0x3c084000); /* lui t0, 0x4000 */ +#else + stl_p(p++, 0x34080040); /* ori t0, r0, 0x0040 */ +#endif + stl_p(p++, 0xad280050); /* sw t0, 0x0050(t1) */ + +#ifdef TARGET_WORDS_BIGENDIAN + stl_p(p++, 0x3c088000); /* lui t0, 0x8000 */ +#else + stl_p(p++, 0x34080080); /* ori t0, r0, 0x0080 */ +#endif + stl_p(p++, 0xad280058); /* sw t0, 0x0058(t1) */ +#ifdef TARGET_WORDS_BIGENDIAN + stl_p(p++, 0x3c083f00); /* lui t0, 0x3f00 */ +#else + stl_p(p++, 0x3408003f); /* ori t0, r0, 0x003f */ +#endif + stl_p(p++, 0xad280060); /* sw t0, 0x0060(t1) */ + +#ifdef TARGET_WORDS_BIGENDIAN + stl_p(p++, 0x3c08c100); /* lui t0, 0xc100 */ +#else + stl_p(p++, 0x340800c1); /* ori t0, r0, 0x00c1 */ +#endif + stl_p(p++, 0xad280080); /* sw t0, 0x0080(t1) */ +#ifdef TARGET_WORDS_BIGENDIAN + stl_p(p++, 0x3c085e00); /* lui t0, 0x5e00 */ +#else + stl_p(p++, 0x3408005e); /* ori t0, r0, 0x005e */ +#endif + stl_p(p++, 0xad280088); /* sw t0, 0x0088(t1) */ + + /* Jump to kernel code */ + stl_p(p++, 0x3c1f0000 | + ((kernel_entry >> 16) & 0xffff)); /* lui ra, high(kernel_entry) */ + stl_p(p++, 0x37ff0000 | + (kernel_entry & 0xffff)); /* ori ra, ra, low(kernel_entry) */ + stl_p(p++, 0x03e00009); /* jalr ra */ + stl_p(p++, 0x00000000); /* nop */ + + /* YAMON subroutines */ + p = (uint32_t *) (base + 0x800); + stl_p(p++, 0x03e00009); /* jalr ra */ + stl_p(p++, 0x24020000); /* li v0,0 */ + /* 808 YAMON print */ + stl_p(p++, 0x03e06821); /* move t5,ra */ + stl_p(p++, 0x00805821); /* move t3,a0 */ + stl_p(p++, 0x00a05021); /* move t2,a1 */ + stl_p(p++, 0x91440000); /* lbu a0,0(t2) */ + stl_p(p++, 0x254a0001); /* addiu t2,t2,1 */ + stl_p(p++, 0x10800005); /* beqz a0,834 */ + stl_p(p++, 0x00000000); /* nop */ + stl_p(p++, 0x0ff0021c); /* jal 870 */ + stl_p(p++, 0x00000000); /* nop */ + stl_p(p++, 0x1000fff9); /* b 814 */ + stl_p(p++, 0x00000000); /* nop */ + stl_p(p++, 0x01a00009); /* jalr t5 */ + stl_p(p++, 0x01602021); /* move a0,t3 */ + /* 0x83c YAMON print_count */ + stl_p(p++, 0x03e06821); /* move t5,ra */ + stl_p(p++, 0x00805821); /* move t3,a0 */ + stl_p(p++, 0x00a05021); /* move t2,a1 */ + stl_p(p++, 0x00c06021); /* move t4,a2 */ + stl_p(p++, 0x91440000); /* lbu a0,0(t2) */ + stl_p(p++, 0x0ff0021c); /* jal 870 */ + stl_p(p++, 0x00000000); /* nop */ + stl_p(p++, 0x254a0001); /* addiu t2,t2,1 */ + stl_p(p++, 0x258cffff); /* addiu t4,t4,-1 */ + stl_p(p++, 0x1580fffa); /* bnez t4,84c */ + stl_p(p++, 0x00000000); /* nop */ + stl_p(p++, 0x01a00009); /* jalr t5 */ + stl_p(p++, 0x01602021); /* move a0,t3 */ + /* 0x870 */ + stl_p(p++, 0x3c08b800); /* lui t0,0xb400 */ + stl_p(p++, 0x350803f8); /* ori t0,t0,0x3f8 */ + stl_p(p++, 0x91090005); /* lbu t1,5(t0) */ + stl_p(p++, 0x00000000); /* nop */ + stl_p(p++, 0x31290040); /* andi t1,t1,0x40 */ + stl_p(p++, 0x1120fffc); /* beqz t1,878 */ + stl_p(p++, 0x00000000); /* nop */ + stl_p(p++, 0x03e00009); /* jalr ra */ + stl_p(p++, 0xa1040000); /* sb a0,0(t0) */ + +} + +static void GCC_FMT_ATTR(3, 4) prom_set(uint32_t *prom_buf, int index, + const char *string, ...) +{ + va_list ap; + uint32_t table_addr; + + if (index >= ENVP_NB_ENTRIES) { + return; + } + + if (string == NULL) { + prom_buf[index] = 0; + return; + } + + table_addr = sizeof(uint32_t) * ENVP_NB_ENTRIES + index * ENVP_ENTRY_SIZE; + prom_buf[index] = tswap32(ENVP_VADDR + table_addr); + + va_start(ap, string); + vsnprintf((char *)prom_buf + table_addr, ENVP_ENTRY_SIZE, string, ap); + va_end(ap); +} + +/* Kernel */ +static uint64_t load_kernel(void) +{ + uint64_t kernel_entry, kernel_high, initrd_size; + long kernel_size; + ram_addr_t initrd_offset; + int big_endian; + uint32_t *prom_buf; + long prom_size; + int prom_index = 0; + uint64_t (*xlate_to_kseg0) (void *opaque, uint64_t addr); + +#ifdef TARGET_WORDS_BIGENDIAN + big_endian = 1; +#else + big_endian = 0; +#endif + + kernel_size = load_elf(loaderparams.kernel_filename, NULL, + cpu_mips_kseg0_to_phys, NULL, + &kernel_entry, NULL, + &kernel_high, NULL, big_endian, EM_MIPS, + 1, 0); + if (kernel_size < 0) { + error_report("could not load kernel '%s': %s", + loaderparams.kernel_filename, + load_elf_strerror(kernel_size)); + exit(1); + } + + /* Check where the kernel has been linked */ + if (kernel_entry & 0x80000000ll) { + if (kvm_enabled()) { + error_report("KVM guest kernels must be linked in useg. " + "Did you forget to enable CONFIG_KVM_GUEST?"); + exit(1); + } + + xlate_to_kseg0 = cpu_mips_phys_to_kseg0; + } else { + /* if kernel entry is in useg it is probably a KVM T&E kernel */ + mips_um_ksegs_enable(); + + xlate_to_kseg0 = cpu_mips_kvm_um_phys_to_kseg0; + } + + /* load initrd */ + initrd_size = 0; + initrd_offset = 0; + if (loaderparams.initrd_filename) { + initrd_size = get_image_size(loaderparams.initrd_filename); + if (initrd_size > 0) { + /* + * The kernel allocates the bootmap memory in the low memory after + * the initrd. It takes at most 128kiB for 2GB RAM and 4kiB + * pages. + */ + initrd_offset = ROUND_UP(loaderparams.ram_low_size + - (initrd_size + 128 * KiB), + INITRD_PAGE_SIZE); + if (kernel_high >= initrd_offset) { + error_report("memory too small for initial ram disk '%s'", + loaderparams.initrd_filename); + exit(1); + } + initrd_size = load_image_targphys(loaderparams.initrd_filename, + initrd_offset, + loaderparams.ram_size - initrd_offset); + } + if (initrd_size == (target_ulong) -1) { + error_report("could not load initial ram disk '%s'", + loaderparams.initrd_filename); + exit(1); + } + } + + /* Setup prom parameters. */ + prom_size = ENVP_NB_ENTRIES * (sizeof(int32_t) + ENVP_ENTRY_SIZE); + prom_buf = g_malloc(prom_size); + + prom_set(prom_buf, prom_index++, "%s", loaderparams.kernel_filename); + if (initrd_size > 0) { + prom_set(prom_buf, prom_index++, + "rd_start=0x%" PRIx64 " rd_size=%" PRId64 " %s", + xlate_to_kseg0(NULL, initrd_offset), + initrd_size, loaderparams.kernel_cmdline); + } else { + prom_set(prom_buf, prom_index++, "%s", loaderparams.kernel_cmdline); + } + + prom_set(prom_buf, prom_index++, "memsize"); + prom_set(prom_buf, prom_index++, "%u", loaderparams.ram_low_size); + + prom_set(prom_buf, prom_index++, "ememsize"); + prom_set(prom_buf, prom_index++, "%u", loaderparams.ram_size); + + prom_set(prom_buf, prom_index++, "modetty0"); + prom_set(prom_buf, prom_index++, "38400n8r"); + prom_set(prom_buf, prom_index++, NULL); + + rom_add_blob_fixed("prom", prom_buf, prom_size, ENVP_PADDR); + + g_free(prom_buf); + return kernel_entry; +} + +static void malta_mips_config(MIPSCPU *cpu) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + unsigned int smp_cpus = ms->smp.cpus; + CPUMIPSState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + if (ase_mt_available(env)) { + env->mvp->CP0_MVPConf0 = deposit32(env->mvp->CP0_MVPConf0, + CP0MVPC0_PTC, 8, + smp_cpus * cs->nr_threads - 1); + env->mvp->CP0_MVPConf0 = deposit32(env->mvp->CP0_MVPConf0, + CP0MVPC0_PVPE, 4, smp_cpus - 1); + } +} + +static void main_cpu_reset(void *opaque) +{ + MIPSCPU *cpu = opaque; + CPUMIPSState *env = &cpu->env; + + cpu_reset(CPU(cpu)); + + /* + * The bootloader does not need to be rewritten as it is located in a + * read only location. The kernel location and the arguments table + * location does not change. + */ + if (loaderparams.kernel_filename) { + env->CP0_Status &= ~(1 << CP0St_ERL); + } + + malta_mips_config(cpu); + + if (kvm_enabled()) { + /* Start running from the bootloader we wrote to end of RAM */ + env->active_tc.PC = 0x40000000 + loaderparams.ram_low_size; + } +} + +static void create_cpu_without_cps(MachineState *ms, MaltaState *s, + qemu_irq *cbus_irq, qemu_irq *i8259_irq) +{ + CPUMIPSState *env; + MIPSCPU *cpu; + int i; + + for (i = 0; i < ms->smp.cpus; i++) { + cpu = mips_cpu_create_with_clock(ms->cpu_type, s->cpuclk); + + /* Init internal devices */ + cpu_mips_irq_init_cpu(cpu); + cpu_mips_clock_init(cpu); + qemu_register_reset(main_cpu_reset, cpu); + } + + cpu = MIPS_CPU(first_cpu); + env = &cpu->env; + *i8259_irq = env->irq[2]; + *cbus_irq = env->irq[4]; +} + +static void create_cps(MachineState *ms, MaltaState *s, + qemu_irq *cbus_irq, qemu_irq *i8259_irq) +{ + object_initialize_child(OBJECT(s), "cps", &s->cps, TYPE_MIPS_CPS); + object_property_set_str(OBJECT(&s->cps), "cpu-type", ms->cpu_type, + &error_fatal); + object_property_set_int(OBJECT(&s->cps), "num-vp", ms->smp.cpus, + &error_fatal); + qdev_connect_clock_in(DEVICE(&s->cps), "clk-in", s->cpuclk); + sysbus_realize(SYS_BUS_DEVICE(&s->cps), &error_fatal); + + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->cps), 0, 0, 1); + + *i8259_irq = get_cps_irq(&s->cps, 3); + *cbus_irq = NULL; +} + +static void mips_create_cpu(MachineState *ms, MaltaState *s, + qemu_irq *cbus_irq, qemu_irq *i8259_irq) +{ + if ((ms->smp.cpus > 1) && cpu_type_supports_cps_smp(ms->cpu_type)) { + create_cps(ms, s, cbus_irq, i8259_irq); + } else { + create_cpu_without_cps(ms, s, cbus_irq, i8259_irq); + } +} + +static +void mips_malta_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + ram_addr_t ram_low_size; + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *initrd_filename = machine->initrd_filename; + char *filename; + PFlashCFI01 *fl; + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *ram_low_preio = g_new(MemoryRegion, 1); + MemoryRegion *ram_low_postio; + MemoryRegion *bios, *bios_copy = g_new(MemoryRegion, 1); + const size_t smbus_eeprom_size = 8 * 256; + uint8_t *smbus_eeprom_buf = g_malloc0(smbus_eeprom_size); + uint64_t kernel_entry, bootloader_run_addr; + PCIBus *pci_bus; + ISABus *isa_bus; + qemu_irq cbus_irq, i8259_irq; + I2CBus *smbus; + DriveInfo *dinfo; + int fl_idx = 0; + int be; + MaltaState *s; + DeviceState *dev; + + s = MIPS_MALTA(qdev_new(TYPE_MIPS_MALTA)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(s), &error_fatal); + + /* create CPU */ + mips_create_cpu(machine, s, &cbus_irq, &i8259_irq); + + /* allocate RAM */ + if (ram_size > 2 * GiB) { + error_report("Too much memory for this machine: %" PRId64 "MB," + " maximum 2048MB", ram_size / MiB); + exit(1); + } + + /* register RAM at high address where it is undisturbed by IO */ + memory_region_add_subregion(system_memory, 0x80000000, machine->ram); + + /* alias for pre IO hole access */ + memory_region_init_alias(ram_low_preio, NULL, "mips_malta_low_preio.ram", + machine->ram, 0, MIN(ram_size, 256 * MiB)); + memory_region_add_subregion(system_memory, 0, ram_low_preio); + + /* alias for post IO hole access, if there is enough RAM */ + if (ram_size > 512 * MiB) { + ram_low_postio = g_new(MemoryRegion, 1); + memory_region_init_alias(ram_low_postio, NULL, + "mips_malta_low_postio.ram", + machine->ram, 512 * MiB, + ram_size - 512 * MiB); + memory_region_add_subregion(system_memory, 512 * MiB, + ram_low_postio); + } + +#ifdef TARGET_WORDS_BIGENDIAN + be = 1; +#else + be = 0; +#endif + + /* FPGA */ + + /* The CBUS UART is attached to the MIPS CPU INT2 pin, ie interrupt 4 */ + malta_fpga_init(system_memory, FPGA_ADDRESS, cbus_irq, serial_hd(2)); + + /* Load firmware in flash / BIOS. */ + dinfo = drive_get(IF_PFLASH, 0, fl_idx); + fl = pflash_cfi01_register(FLASH_ADDRESS, "mips_malta.bios", + FLASH_SIZE, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + 65536, + 4, 0x0000, 0x0000, 0x0000, 0x0000, be); + bios = pflash_cfi01_get_memory(fl); + fl_idx++; + if (kernel_filename) { + ram_low_size = MIN(ram_size, 256 * MiB); + /* For KVM we reserve 1MB of RAM for running bootloader */ + if (kvm_enabled()) { + ram_low_size -= 0x100000; + bootloader_run_addr = cpu_mips_kvm_um_phys_to_kseg0(NULL, ram_low_size); + } else { + bootloader_run_addr = cpu_mips_phys_to_kseg0(NULL, RESET_ADDRESS); + } + + /* Write a small bootloader to the flash location. */ + loaderparams.ram_size = ram_size; + loaderparams.ram_low_size = ram_low_size; + loaderparams.kernel_filename = kernel_filename; + loaderparams.kernel_cmdline = kernel_cmdline; + loaderparams.initrd_filename = initrd_filename; + kernel_entry = load_kernel(); + + if (!cpu_type_supports_isa(machine->cpu_type, ISA_NANOMIPS32)) { + write_bootloader(memory_region_get_ram_ptr(bios), + bootloader_run_addr, kernel_entry); + } else { + write_bootloader_nanomips(memory_region_get_ram_ptr(bios), + bootloader_run_addr, kernel_entry); + } + if (kvm_enabled()) { + /* Write the bootloader code @ the end of RAM, 1MB reserved */ + write_bootloader(memory_region_get_ram_ptr(ram_low_preio) + + ram_low_size, + bootloader_run_addr, kernel_entry); + } + } else { + target_long bios_size = FLASH_SIZE; + /* The flash region isn't executable from a KVM guest */ + if (kvm_enabled()) { + error_report("KVM enabled but no -kernel argument was specified. " + "Booting from flash is not supported with KVM."); + exit(1); + } + /* Load firmware from flash. */ + if (!dinfo) { + /* Load a BIOS image. */ + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, + machine->firmware ?: BIOS_FILENAME); + if (filename) { + bios_size = load_image_targphys(filename, FLASH_ADDRESS, + BIOS_SIZE); + g_free(filename); + } else { + bios_size = -1; + } + if ((bios_size < 0 || bios_size > BIOS_SIZE) && + machine->firmware && !qtest_enabled()) { + error_report("Could not load MIPS bios '%s'", machine->firmware); + exit(1); + } + } + /* + * In little endian mode the 32bit words in the bios are swapped, + * a neat trick which allows bi-endian firmware. + */ +#ifndef TARGET_WORDS_BIGENDIAN + { + uint32_t *end, *addr; + const size_t swapsize = MIN(bios_size, 0x3e0000); + addr = rom_ptr(FLASH_ADDRESS, swapsize); + if (!addr) { + addr = memory_region_get_ram_ptr(bios); + } + end = (void *)addr + swapsize; + while (addr < end) { + bswap32s(addr); + addr++; + } + } +#endif + } + + /* + * Map the BIOS at a 2nd physical location, as on the real board. + * Copy it so that we can patch in the MIPS revision, which cannot be + * handled by an overlapping region as the resulting ROM code subpage + * regions are not executable. + */ + memory_region_init_ram(bios_copy, NULL, "bios.1fc", BIOS_SIZE, + &error_fatal); + if (!rom_copy(memory_region_get_ram_ptr(bios_copy), + FLASH_ADDRESS, BIOS_SIZE)) { + memcpy(memory_region_get_ram_ptr(bios_copy), + memory_region_get_ram_ptr(bios), BIOS_SIZE); + } + memory_region_set_readonly(bios_copy, true); + memory_region_add_subregion(system_memory, RESET_ADDRESS, bios_copy); + + /* Board ID = 0x420 (Malta Board with CoreLV) */ + stl_p(memory_region_get_ram_ptr(bios_copy) + 0x10, 0x00000420); + + /* Northbridge */ + pci_bus = gt64120_register(s->i8259); + /* + * The whole address space decoded by the GT-64120A doesn't generate + * exception when accessing invalid memory. Create an empty slot to + * emulate this feature. + */ + empty_slot_init("GT64120", 0, 0x20000000); + + /* Southbridge */ + dev = piix4_create(pci_bus, &isa_bus, &smbus); + + /* Interrupt controller */ + qdev_connect_gpio_out_named(dev, "intr", 0, i8259_irq); + for (int i = 0; i < ISA_NUM_IRQS; i++) { + s->i8259[i] = qdev_get_gpio_in_named(dev, "isa", i); + } + + /* generate SPD EEPROM data */ + generate_eeprom_spd(&smbus_eeprom_buf[0 * 256], ram_size); + generate_eeprom_serial(&smbus_eeprom_buf[6 * 256]); + smbus_eeprom_init(smbus, 8, smbus_eeprom_buf, smbus_eeprom_size); + g_free(smbus_eeprom_buf); + + /* Super I/O: SMS FDC37M817 */ + isa_create_simple(isa_bus, TYPE_FDC37M81X_SUPERIO); + + /* Network card */ + network_init(pci_bus); + + /* Optional PCI video card */ + pci_vga_init(pci_bus); +} + +static void mips_malta_instance_init(Object *obj) +{ + MaltaState *s = MIPS_MALTA(obj); + + s->cpuclk = qdev_init_clock_out(DEVICE(obj), "cpu-refclk"); + clock_set_hz(s->cpuclk, 320000000); /* 320 MHz */ +} + +static const TypeInfo mips_malta_device = { + .name = TYPE_MIPS_MALTA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MaltaState), + .instance_init = mips_malta_instance_init, +}; + +static void mips_malta_machine_init(MachineClass *mc) +{ + mc->desc = "MIPS Malta Core LV"; + mc->init = mips_malta_init; + mc->block_default_type = IF_IDE; + mc->max_cpus = 16; + mc->is_default = true; +#ifdef TARGET_MIPS64 + mc->default_cpu_type = MIPS_CPU_TYPE_NAME("20Kc"); +#else + mc->default_cpu_type = MIPS_CPU_TYPE_NAME("24Kf"); +#endif + mc->default_ram_id = "mips_malta.ram"; +} + +DEFINE_MACHINE("malta", mips_malta_machine_init) + +static void mips_malta_register_types(void) +{ + type_register_static(&mips_malta_device); +} + +type_init(mips_malta_register_types) diff --git a/hw/mips/meson.build b/hw/mips/meson.build new file mode 100644 index 000000000..dd0101ad4 --- /dev/null +++ b/hw/mips/meson.build @@ -0,0 +1,15 @@ +mips_ss = ss.source_set() +mips_ss.add(files('bootloader.c', 'mips_int.c')) +mips_ss.add(when: 'CONFIG_FW_CFG_MIPS', if_true: files('fw_cfg.c')) +mips_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_bootp.c', 'loongson3_virt.c')) +mips_ss.add(when: 'CONFIG_MALTA', if_true: files('gt64xxx_pci.c', 'malta.c')) +mips_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('cps.c')) + +if 'CONFIG_TCG' in config_all +mips_ss.add(when: 'CONFIG_JAZZ', if_true: files('jazz.c')) +mips_ss.add(when: 'CONFIG_MIPSSIM', if_true: files('mipssim.c')) +mips_ss.add(when: 'CONFIG_FULOONG', if_true: files('fuloong2e.c')) +mips_ss.add(when: 'CONFIG_MIPS_BOSTON', if_true: [files('boston.c'), fdt]) +endif + +hw_arch += {'mips': mips_ss} diff --git a/hw/mips/mips_int.c b/hw/mips/mips_int.c new file mode 100644 index 000000000..2db5e10fe --- /dev/null +++ b/hw/mips/mips_int.c @@ -0,0 +1,88 @@ +/* + * QEMU MIPS interrupt support + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "hw/irq.h" +#include "hw/mips/cpudevs.h" +#include "sysemu/kvm.h" +#include "kvm_mips.h" + +static void cpu_mips_irq_request(void *opaque, int irq, int level) +{ + MIPSCPU *cpu = opaque; + CPUMIPSState *env = &cpu->env; + CPUState *cs = CPU(cpu); + bool locked = false; + + if (irq < 0 || irq > 7) { + return; + } + + /* Make sure locking works even if BQL is already held by the caller */ + if (!qemu_mutex_iothread_locked()) { + locked = true; + qemu_mutex_lock_iothread(); + } + + if (level) { + env->CP0_Cause |= 1 << (irq + CP0Ca_IP); + } else { + env->CP0_Cause &= ~(1 << (irq + CP0Ca_IP)); + } + + if (kvm_enabled() && (irq == 2 || irq == 3)) { + kvm_mips_set_interrupt(cpu, irq, level); + } + + if (env->CP0_Cause & CP0Ca_IP_mask) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + + if (locked) { + qemu_mutex_unlock_iothread(); + } +} + +void cpu_mips_irq_init_cpu(MIPSCPU *cpu) +{ + CPUMIPSState *env = &cpu->env; + qemu_irq *qi; + int i; + + qi = qemu_allocate_irqs(cpu_mips_irq_request, cpu, 8); + for (i = 0; i < 8; i++) { + env->irq[i] = qi[i]; + } + g_free(qi); +} + +void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level) +{ + if (irq < 0 || irq > 2) { + return; + } + + qemu_set_irq(env->irq[irq], level); +} diff --git a/hw/mips/mipssim.c b/hw/mips/mipssim.c new file mode 100644 index 000000000..2325e7e05 --- /dev/null +++ b/hw/mips/mipssim.c @@ -0,0 +1,246 @@ +/* + * QEMU/mipssim emulation + * + * Emulates a very simple machine model similar to the one used by the + * proprietary MIPS emulator. + * + * Copyright (c) 2007 Thiemo Seufer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "hw/clock.h" +#include "hw/mips/mips.h" +#include "hw/mips/cpudevs.h" +#include "hw/char/serial.h" +#include "hw/isa/isa.h" +#include "net/net.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/mips/bios.h" +#include "hw/loader.h" +#include "elf.h" +#include "hw/sysbus.h" +#include "hw/qdev-properties.h" +#include "qemu/error-report.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" + +static struct _loaderparams { + int ram_size; + const char *kernel_filename; + const char *kernel_cmdline; + const char *initrd_filename; +} loaderparams; + +typedef struct ResetData { + MIPSCPU *cpu; + uint64_t vector; +} ResetData; + +static uint64_t load_kernel(void) +{ + uint64_t entry, kernel_high, initrd_size; + long kernel_size; + ram_addr_t initrd_offset; + int big_endian; + +#ifdef TARGET_WORDS_BIGENDIAN + big_endian = 1; +#else + big_endian = 0; +#endif + + kernel_size = load_elf(loaderparams.kernel_filename, NULL, + cpu_mips_kseg0_to_phys, NULL, + &entry, NULL, + &kernel_high, NULL, big_endian, + EM_MIPS, 1, 0); + if (kernel_size < 0) { + error_report("could not load kernel '%s': %s", + loaderparams.kernel_filename, + load_elf_strerror(kernel_size)); + exit(1); + } + + /* load initrd */ + initrd_size = 0; + initrd_offset = 0; + if (loaderparams.initrd_filename) { + initrd_size = get_image_size(loaderparams.initrd_filename); + if (initrd_size > 0) { + initrd_offset = ROUND_UP(kernel_high, INITRD_PAGE_SIZE); + if (initrd_offset + initrd_size > loaderparams.ram_size) { + error_report("memory too small for initial ram disk '%s'", + loaderparams.initrd_filename); + exit(1); + } + initrd_size = load_image_targphys(loaderparams.initrd_filename, + initrd_offset, loaderparams.ram_size - initrd_offset); + } + if (initrd_size == (target_ulong) -1) { + error_report("could not load initial ram disk '%s'", + loaderparams.initrd_filename); + exit(1); + } + } + return entry; +} + +static void main_cpu_reset(void *opaque) +{ + ResetData *s = (ResetData *)opaque; + CPUMIPSState *env = &s->cpu->env; + + cpu_reset(CPU(s->cpu)); + env->active_tc.PC = s->vector & ~(target_ulong)1; + if (s->vector & 1) { + env->hflags |= MIPS_HFLAG_M16; + } +} + +static void mipsnet_init(int base, qemu_irq irq, NICInfo *nd) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new("mipsnet"); + qdev_set_nic_properties(dev, nd); + + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, irq); + memory_region_add_subregion(get_system_io(), + base, + sysbus_mmio_get_region(s, 0)); +} + +static void +mips_mipssim_init(MachineState *machine) +{ + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *initrd_filename = machine->initrd_filename; + char *filename; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *isa = g_new(MemoryRegion, 1); + MemoryRegion *bios = g_new(MemoryRegion, 1); + Clock *cpuclk; + MIPSCPU *cpu; + CPUMIPSState *env; + ResetData *reset_info; + int bios_size; + + cpuclk = clock_new(OBJECT(machine), "cpu-refclk"); +#ifdef TARGET_MIPS64 + clock_set_hz(cpuclk, 6000000); /* 6 MHz */ +#else + clock_set_hz(cpuclk, 12000000); /* 12 MHz */ +#endif + + /* Init CPUs. */ + cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk); + env = &cpu->env; + + reset_info = g_malloc0(sizeof(ResetData)); + reset_info->cpu = cpu; + reset_info->vector = env->active_tc.PC; + qemu_register_reset(main_cpu_reset, reset_info); + + /* Allocate RAM. */ + memory_region_init_rom(bios, NULL, "mips_mipssim.bios", BIOS_SIZE, + &error_fatal); + + memory_region_add_subregion(address_space_mem, 0, machine->ram); + + /* Map the BIOS / boot exception handler. */ + memory_region_add_subregion(address_space_mem, 0x1fc00000LL, bios); + /* Load a BIOS / boot exception handler image. */ + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, machine->firmware ?: BIOS_FILENAME); + if (filename) { + bios_size = load_image_targphys(filename, 0x1fc00000LL, BIOS_SIZE); + g_free(filename); + } else { + bios_size = -1; + } + if ((bios_size < 0 || bios_size > BIOS_SIZE) && + machine->firmware && !qtest_enabled()) { + /* Bail out if we have neither a kernel image nor boot vector code. */ + error_report("Could not load MIPS bios '%s'", machine->firmware); + exit(1); + } else { + /* We have a boot vector start address. */ + env->active_tc.PC = (target_long)(int32_t)0xbfc00000; + } + + if (kernel_filename) { + loaderparams.ram_size = machine->ram_size; + loaderparams.kernel_filename = kernel_filename; + loaderparams.kernel_cmdline = kernel_cmdline; + loaderparams.initrd_filename = initrd_filename; + reset_info->vector = load_kernel(); + } + + /* Init CPU internal devices. */ + cpu_mips_irq_init_cpu(cpu); + cpu_mips_clock_init(cpu); + + /* Register 64 KB of ISA IO space at 0x1fd00000. */ + memory_region_init_alias(isa, NULL, "isa_mmio", + get_system_io(), 0, 0x00010000); + memory_region_add_subregion(get_system_memory(), 0x1fd00000, isa); + + /* + * A single 16450 sits at offset 0x3f8. It is attached to + * MIPS CPU INT2, which is interrupt 4. + */ + if (serial_hd(0)) { + DeviceState *dev = qdev_new(TYPE_SERIAL_MM); + + qdev_prop_set_chr(dev, "chardev", serial_hd(0)); + qdev_prop_set_uint8(dev, "regshift", 0); + qdev_prop_set_uint8(dev, "endianness", DEVICE_LITTLE_ENDIAN); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, env->irq[4]); + sysbus_add_io(SYS_BUS_DEVICE(dev), 0x3f8, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0)); + } + + if (nd_table[0].used) + /* MIPSnet uses the MIPS CPU INT0, which is interrupt 2. */ + mipsnet_init(0x4200, env->irq[2], &nd_table[0]); +} + +static void mips_mipssim_machine_init(MachineClass *mc) +{ + mc->desc = "MIPS MIPSsim platform"; + mc->init = mips_mipssim_init; +#ifdef TARGET_MIPS64 + mc->default_cpu_type = MIPS_CPU_TYPE_NAME("5Kf"); +#else + mc->default_cpu_type = MIPS_CPU_TYPE_NAME("24Kf"); +#endif + mc->default_ram_id = "mips_mipssim.ram"; +} + +DEFINE_MACHINE("mipssim", mips_mipssim_machine_init) diff --git a/hw/mips/trace-events b/hw/mips/trace-events new file mode 100644 index 000000000..13ee731a4 --- /dev/null +++ b/hw/mips/trace-events @@ -0,0 +1,6 @@ +# gt64xxx_pci.c +gt64120_read(uint64_t addr, uint64_t value) "gt64120 read 0x%03"PRIx64" value:0x%08" PRIx64 +gt64120_write(uint64_t addr, uint64_t value) "gt64120 write 0x%03"PRIx64" value:0x%08" PRIx64 +gt64120_read_intreg(const char *regname, unsigned size, uint64_t value) "gt64120 read %s size:%u value:0x%08" PRIx64 +gt64120_write_intreg(const char *regname, unsigned size, uint64_t value) "gt64120 write %s size:%u value:0x%08" PRIx64 +gt64120_isd_remap(uint64_t from_length, uint64_t from_addr, uint64_t to_length, uint64_t to_addr) "ISD: 0x%08" PRIx64 "@0x%08" PRIx64 " -> 0x%08" PRIx64 "@0x%08" PRIx64 diff --git a/hw/mips/trace.h b/hw/mips/trace.h new file mode 100644 index 000000000..8d1fd7c9e --- /dev/null +++ b/hw/mips/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_mips.h" diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig new file mode 100644 index 000000000..507058d8b --- /dev/null +++ b/hw/misc/Kconfig @@ -0,0 +1,174 @@ +config APPLESMC + bool + depends on ISA_BUS + +config ARMSSE_CPUID + bool + +config ARMSSE_MHU + bool + +config ARMSSE_CPU_PWRCTRL + bool + +config ISA_DEBUG + bool + depends on ISA_BUS + +config SGA + bool + depends on ISA_BUS + +config ISA_TESTDEV + bool + default y if TEST_DEVICES + depends on ISA_BUS + +config PCI_TESTDEV + bool + default y if TEST_DEVICES + depends on PCI + +config EDU + bool + default y if TEST_DEVICES + depends on PCI && MSI_NONBROKEN + +config PCA9552 + bool + depends on I2C + +config PL310 + bool + +config INTEGRATOR_DEBUG + bool + +config A9SCU + bool + +config ARM11SCU + bool + +config MOS6522 + bool + +config MACIO + bool + select CUDA + select ESCC + select IDE_MACIO + select MAC_DBDMA + select MAC_NVRAM + select MOS6522 + +config IVSHMEM_DEVICE + bool + default y if PCI_DEVICES + depends on PCI && LINUX && IVSHMEM && MSI_NONBROKEN + +config ECCMEMCTL + bool + select ECC + +config IMX + bool + select PTIMER + select SSI + select USB_EHCI_SYSBUS + +config STM32F2XX_SYSCFG + bool + +config STM32F4XX_SYSCFG + bool + +config STM32F4XX_EXTI + bool + +config MIPS_ITU + bool + +config MPS2_FPGAIO + bool + select LED + +config MPS2_SCC + bool + select LED + +config TZ_MPC + bool + +config TZ_MSC + bool + +config TZ_PPC + bool + +config IOTKIT_SECCTL + bool + +config IOTKIT_SYSCTL + bool + +config IOTKIT_SYSINFO + bool + +config PVPANIC_COMMON + bool + +config PVPANIC_PCI + bool + default y if PCI_DEVICES + depends on PCI + select PVPANIC_COMMON + +config PVPANIC_ISA + bool + depends on ISA_BUS + select PVPANIC_COMMON + +config AUX + bool + select I2C + +config UNIMP + bool + +config LED + bool + +config MAC_VIA + bool + select MOS6522 + select ADB + +config AVR_POWER + bool + +config MCHP_PFSOC_DMC + bool + +config MCHP_PFSOC_IOSCB + bool + +config MCHP_PFSOC_SYSREG + bool + +config SIFIVE_TEST + bool + +config SIFIVE_E_PRCI + bool + +config SIFIVE_U_OTP + bool + +config SIFIVE_U_PRCI + bool + +config VIRT_CTRL + bool + +source macio/Kconfig diff --git a/hw/misc/a9scu.c b/hw/misc/a9scu.c new file mode 100644 index 000000000..a375ebc98 --- /dev/null +++ b/hw/misc/a9scu.c @@ -0,0 +1,153 @@ +/* + * Cortex-A9MPCore Snoop Control Unit (SCU) emulation. + * + * Copyright (c) 2009 CodeSourcery. + * Copyright (c) 2011 Linaro Limited. + * Written by Paul Brook, Peter Maydell. + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/misc/a9scu.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define A9_SCU_CPU_MAX 4 + +static uint64_t a9_scu_read(void *opaque, hwaddr offset, + unsigned size) +{ + A9SCUState *s = (A9SCUState *)opaque; + switch (offset) { + case 0x00: /* Control */ + return s->control; + case 0x04: /* Configuration */ + return (((1 << s->num_cpu) - 1) << 4) | (s->num_cpu - 1); + case 0x08: /* CPU Power Status */ + return s->status; + case 0x0c: /* Invalidate All Registers In Secure State */ + return 0; + case 0x40: /* Filtering Start Address Register */ + case 0x44: /* Filtering End Address Register */ + /* RAZ/WI, like an implementation with only one AXI master */ + return 0; + case 0x50: /* SCU Access Control Register */ + case 0x54: /* SCU Non-secure Access Control Register */ + /* unimplemented, fall through */ + default: + qemu_log_mask(LOG_UNIMP, "%s: Unsupported offset 0x%"HWADDR_PRIx"\n", + __func__, offset); + return 0; + } +} + +static void a9_scu_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + A9SCUState *s = (A9SCUState *)opaque; + + switch (offset) { + case 0x00: /* Control */ + s->control = value & 1; + break; + case 0x4: /* Configuration: RO */ + break; + case 0x08: case 0x09: case 0x0A: case 0x0B: /* Power Control */ + s->status = value; + break; + case 0x0c: /* Invalidate All Registers In Secure State */ + /* no-op as we do not implement caches */ + break; + case 0x40: /* Filtering Start Address Register */ + case 0x44: /* Filtering End Address Register */ + /* RAZ/WI, like an implementation with only one AXI master */ + break; + case 0x50: /* SCU Access Control Register */ + case 0x54: /* SCU Non-secure Access Control Register */ + /* unimplemented, fall through */ + default: + qemu_log_mask(LOG_UNIMP, "%s: Unsupported offset 0x%"HWADDR_PRIx + " value 0x%"PRIx64"\n", + __func__, offset, value); + break; + } +} + +static const MemoryRegionOps a9_scu_ops = { + .read = a9_scu_read, + .write = a9_scu_write, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void a9_scu_reset(DeviceState *dev) +{ + A9SCUState *s = A9_SCU(dev); + s->control = 0; +} + +static void a9_scu_realize(DeviceState *dev, Error **errp) +{ + A9SCUState *s = A9_SCU(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + if (!s->num_cpu || s->num_cpu > A9_SCU_CPU_MAX) { + error_setg(errp, "Illegal CPU count: %u", s->num_cpu); + return; + } + + memory_region_init_io(&s->iomem, OBJECT(s), &a9_scu_ops, s, + "a9-scu", 0x100); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription vmstate_a9_scu = { + .name = "a9-scu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(control, A9SCUState), + VMSTATE_UINT32(status, A9SCUState), + VMSTATE_END_OF_LIST() + } +}; + +static Property a9_scu_properties[] = { + DEFINE_PROP_UINT32("num-cpu", A9SCUState, num_cpu, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void a9_scu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, a9_scu_properties); + dc->vmsd = &vmstate_a9_scu; + dc->reset = a9_scu_reset; + dc->realize = a9_scu_realize; +} + +static const TypeInfo a9_scu_info = { + .name = TYPE_A9_SCU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(A9SCUState), + .class_init = a9_scu_class_init, +}; + +static void a9mp_register_types(void) +{ + type_register_static(&a9_scu_info); +} + +type_init(a9mp_register_types) diff --git a/hw/misc/allwinner-cpucfg.c b/hw/misc/allwinner-cpucfg.c new file mode 100644 index 000000000..bbd33a7da --- /dev/null +++ b/hw/misc/allwinner-cpucfg.c @@ -0,0 +1,282 @@ +/* + * Allwinner CPU Configuration Module emulation + * + * Copyright (C) 2019 Niek Linnenbank + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "hw/core/cpu.h" +#include "target/arm/arm-powerctl.h" +#include "target/arm/cpu.h" +#include "hw/misc/allwinner-cpucfg.h" +#include "trace.h" + +/* CPUCFG register offsets */ +enum { + REG_CPUS_RST_CTRL = 0x0000, /* CPUs Reset Control */ + REG_CPU0_RST_CTRL = 0x0040, /* CPU#0 Reset Control */ + REG_CPU0_CTRL = 0x0044, /* CPU#0 Control */ + REG_CPU0_STATUS = 0x0048, /* CPU#0 Status */ + REG_CPU1_RST_CTRL = 0x0080, /* CPU#1 Reset Control */ + REG_CPU1_CTRL = 0x0084, /* CPU#1 Control */ + REG_CPU1_STATUS = 0x0088, /* CPU#1 Status */ + REG_CPU2_RST_CTRL = 0x00C0, /* CPU#2 Reset Control */ + REG_CPU2_CTRL = 0x00C4, /* CPU#2 Control */ + REG_CPU2_STATUS = 0x00C8, /* CPU#2 Status */ + REG_CPU3_RST_CTRL = 0x0100, /* CPU#3 Reset Control */ + REG_CPU3_CTRL = 0x0104, /* CPU#3 Control */ + REG_CPU3_STATUS = 0x0108, /* CPU#3 Status */ + REG_CPU_SYS_RST = 0x0140, /* CPU System Reset */ + REG_CLK_GATING = 0x0144, /* CPU Clock Gating */ + REG_GEN_CTRL = 0x0184, /* General Control */ + REG_SUPER_STANDBY = 0x01A0, /* Super Standby Flag */ + REG_ENTRY_ADDR = 0x01A4, /* Reset Entry Address */ + REG_DBG_EXTERN = 0x01E4, /* Debug External */ + REG_CNT64_CTRL = 0x0280, /* 64-bit Counter Control */ + REG_CNT64_LOW = 0x0284, /* 64-bit Counter Low */ + REG_CNT64_HIGH = 0x0288, /* 64-bit Counter High */ +}; + +/* CPUCFG register flags */ +enum { + CPUX_RESET_RELEASED = ((1 << 1) | (1 << 0)), + CPUX_STATUS_SMP = (1 << 0), + CPU_SYS_RESET_RELEASED = (1 << 0), + CLK_GATING_ENABLE = ((1 << 8) | 0xF), +}; + +/* CPUCFG register reset values */ +enum { + REG_CLK_GATING_RST = 0x0000010F, + REG_GEN_CTRL_RST = 0x00000020, + REG_SUPER_STANDBY_RST = 0x0, + REG_CNT64_CTRL_RST = 0x0, +}; + +/* CPUCFG constants */ +enum { + CPU_EXCEPTION_LEVEL_ON_RESET = 3, /* EL3 */ +}; + +static void allwinner_cpucfg_cpu_reset(AwCpuCfgState *s, uint8_t cpu_id) +{ + int ret; + + trace_allwinner_cpucfg_cpu_reset(cpu_id, s->entry_addr); + + ARMCPU *target_cpu = ARM_CPU(arm_get_cpu_by_id(cpu_id)); + if (!target_cpu) { + /* + * Called with a bogus value for cpu_id. Guest error will + * already have been logged, we can simply return here. + */ + return; + } + bool target_aa64 = arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64); + + ret = arm_set_cpu_on(cpu_id, s->entry_addr, 0, + CPU_EXCEPTION_LEVEL_ON_RESET, target_aa64); + if (ret != QEMU_ARM_POWERCTL_RET_SUCCESS) { + error_report("%s: failed to bring up CPU %d: err %d", + __func__, cpu_id, ret); + return; + } +} + +static uint64_t allwinner_cpucfg_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwCpuCfgState *s = AW_CPUCFG(opaque); + uint64_t val = 0; + + switch (offset) { + case REG_CPUS_RST_CTRL: /* CPUs Reset Control */ + case REG_CPU_SYS_RST: /* CPU System Reset */ + val = CPU_SYS_RESET_RELEASED; + break; + case REG_CPU0_RST_CTRL: /* CPU#0 Reset Control */ + case REG_CPU1_RST_CTRL: /* CPU#1 Reset Control */ + case REG_CPU2_RST_CTRL: /* CPU#2 Reset Control */ + case REG_CPU3_RST_CTRL: /* CPU#3 Reset Control */ + val = CPUX_RESET_RELEASED; + break; + case REG_CPU0_CTRL: /* CPU#0 Control */ + case REG_CPU1_CTRL: /* CPU#1 Control */ + case REG_CPU2_CTRL: /* CPU#2 Control */ + case REG_CPU3_CTRL: /* CPU#3 Control */ + val = 0; + break; + case REG_CPU0_STATUS: /* CPU#0 Status */ + case REG_CPU1_STATUS: /* CPU#1 Status */ + case REG_CPU2_STATUS: /* CPU#2 Status */ + case REG_CPU3_STATUS: /* CPU#3 Status */ + val = CPUX_STATUS_SMP; + break; + case REG_CLK_GATING: /* CPU Clock Gating */ + val = CLK_GATING_ENABLE; + break; + case REG_GEN_CTRL: /* General Control */ + val = s->gen_ctrl; + break; + case REG_SUPER_STANDBY: /* Super Standby Flag */ + val = s->super_standby; + break; + case REG_ENTRY_ADDR: /* Reset Entry Address */ + val = s->entry_addr; + break; + case REG_DBG_EXTERN: /* Debug External */ + case REG_CNT64_CTRL: /* 64-bit Counter Control */ + case REG_CNT64_LOW: /* 64-bit Counter Low */ + case REG_CNT64_HIGH: /* 64-bit Counter High */ + qemu_log_mask(LOG_UNIMP, "%s: unimplemented register at 0x%04x\n", + __func__, (uint32_t)offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + break; + } + + trace_allwinner_cpucfg_read(offset, val, size); + + return val; +} + +static void allwinner_cpucfg_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwCpuCfgState *s = AW_CPUCFG(opaque); + + trace_allwinner_cpucfg_write(offset, val, size); + + switch (offset) { + case REG_CPUS_RST_CTRL: /* CPUs Reset Control */ + case REG_CPU_SYS_RST: /* CPU System Reset */ + break; + case REG_CPU0_RST_CTRL: /* CPU#0 Reset Control */ + case REG_CPU1_RST_CTRL: /* CPU#1 Reset Control */ + case REG_CPU2_RST_CTRL: /* CPU#2 Reset Control */ + case REG_CPU3_RST_CTRL: /* CPU#3 Reset Control */ + if (val) { + allwinner_cpucfg_cpu_reset(s, (offset - REG_CPU0_RST_CTRL) >> 6); + } + break; + case REG_CPU0_CTRL: /* CPU#0 Control */ + case REG_CPU1_CTRL: /* CPU#1 Control */ + case REG_CPU2_CTRL: /* CPU#2 Control */ + case REG_CPU3_CTRL: /* CPU#3 Control */ + case REG_CPU0_STATUS: /* CPU#0 Status */ + case REG_CPU1_STATUS: /* CPU#1 Status */ + case REG_CPU2_STATUS: /* CPU#2 Status */ + case REG_CPU3_STATUS: /* CPU#3 Status */ + case REG_CLK_GATING: /* CPU Clock Gating */ + break; + case REG_GEN_CTRL: /* General Control */ + s->gen_ctrl = val; + break; + case REG_SUPER_STANDBY: /* Super Standby Flag */ + s->super_standby = val; + break; + case REG_ENTRY_ADDR: /* Reset Entry Address */ + s->entry_addr = val; + break; + case REG_DBG_EXTERN: /* Debug External */ + case REG_CNT64_CTRL: /* 64-bit Counter Control */ + case REG_CNT64_LOW: /* 64-bit Counter Low */ + case REG_CNT64_HIGH: /* 64-bit Counter High */ + qemu_log_mask(LOG_UNIMP, "%s: unimplemented register at 0x%04x\n", + __func__, (uint32_t)offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + break; + } +} + +static const MemoryRegionOps allwinner_cpucfg_ops = { + .read = allwinner_cpucfg_read, + .write = allwinner_cpucfg_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static void allwinner_cpucfg_reset(DeviceState *dev) +{ + AwCpuCfgState *s = AW_CPUCFG(dev); + + /* Set default values for registers */ + s->gen_ctrl = REG_GEN_CTRL_RST; + s->super_standby = REG_SUPER_STANDBY_RST; + s->entry_addr = 0; +} + +static void allwinner_cpucfg_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwCpuCfgState *s = AW_CPUCFG(obj); + + /* Memory mapping */ + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_cpucfg_ops, s, + TYPE_AW_CPUCFG, 1 * KiB); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription allwinner_cpucfg_vmstate = { + .name = "allwinner-cpucfg", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(gen_ctrl, AwCpuCfgState), + VMSTATE_UINT32(super_standby, AwCpuCfgState), + VMSTATE_UINT32(entry_addr, AwCpuCfgState), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_cpucfg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = allwinner_cpucfg_reset; + dc->vmsd = &allwinner_cpucfg_vmstate; +} + +static const TypeInfo allwinner_cpucfg_info = { + .name = TYPE_AW_CPUCFG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_cpucfg_init, + .instance_size = sizeof(AwCpuCfgState), + .class_init = allwinner_cpucfg_class_init, +}; + +static void allwinner_cpucfg_register(void) +{ + type_register_static(&allwinner_cpucfg_info); +} + +type_init(allwinner_cpucfg_register) diff --git a/hw/misc/allwinner-h3-ccu.c b/hw/misc/allwinner-h3-ccu.c new file mode 100644 index 000000000..18d107454 --- /dev/null +++ b/hw/misc/allwinner-h3-ccu.c @@ -0,0 +1,242 @@ +/* + * Allwinner H3 Clock Control Unit emulation + * + * Copyright (C) 2019 Niek Linnenbank + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/allwinner-h3-ccu.h" + +/* CCU register offsets */ +enum { + REG_PLL_CPUX = 0x0000, /* PLL CPUX Control */ + REG_PLL_AUDIO = 0x0008, /* PLL Audio Control */ + REG_PLL_VIDEO = 0x0010, /* PLL Video Control */ + REG_PLL_VE = 0x0018, /* PLL VE Control */ + REG_PLL_DDR = 0x0020, /* PLL DDR Control */ + REG_PLL_PERIPH0 = 0x0028, /* PLL Peripherals 0 Control */ + REG_PLL_GPU = 0x0038, /* PLL GPU Control */ + REG_PLL_PERIPH1 = 0x0044, /* PLL Peripherals 1 Control */ + REG_PLL_DE = 0x0048, /* PLL Display Engine Control */ + REG_CPUX_AXI = 0x0050, /* CPUX/AXI Configuration */ + REG_APB1 = 0x0054, /* ARM Peripheral Bus 1 Config */ + REG_APB2 = 0x0058, /* ARM Peripheral Bus 2 Config */ + REG_DRAM_CFG = 0x00F4, /* DRAM Configuration */ + REG_MBUS = 0x00FC, /* MBUS Reset */ + REG_PLL_TIME0 = 0x0200, /* PLL Stable Time 0 */ + REG_PLL_TIME1 = 0x0204, /* PLL Stable Time 1 */ + REG_PLL_CPUX_BIAS = 0x0220, /* PLL CPUX Bias */ + REG_PLL_AUDIO_BIAS = 0x0224, /* PLL Audio Bias */ + REG_PLL_VIDEO_BIAS = 0x0228, /* PLL Video Bias */ + REG_PLL_VE_BIAS = 0x022C, /* PLL VE Bias */ + REG_PLL_DDR_BIAS = 0x0230, /* PLL DDR Bias */ + REG_PLL_PERIPH0_BIAS = 0x0234, /* PLL Peripherals 0 Bias */ + REG_PLL_GPU_BIAS = 0x023C, /* PLL GPU Bias */ + REG_PLL_PERIPH1_BIAS = 0x0244, /* PLL Peripherals 1 Bias */ + REG_PLL_DE_BIAS = 0x0248, /* PLL Display Engine Bias */ + REG_PLL_CPUX_TUNING = 0x0250, /* PLL CPUX Tuning */ + REG_PLL_DDR_TUNING = 0x0260, /* PLL DDR Tuning */ +}; + +#define REG_INDEX(offset) (offset / sizeof(uint32_t)) + +/* CCU register flags */ +enum { + REG_DRAM_CFG_UPDATE = (1 << 16), +}; + +enum { + REG_PLL_ENABLE = (1 << 31), + REG_PLL_LOCK = (1 << 28), +}; + + +/* CCU register reset values */ +enum { + REG_PLL_CPUX_RST = 0x00001000, + REG_PLL_AUDIO_RST = 0x00035514, + REG_PLL_VIDEO_RST = 0x03006207, + REG_PLL_VE_RST = 0x03006207, + REG_PLL_DDR_RST = 0x00001000, + REG_PLL_PERIPH0_RST = 0x00041811, + REG_PLL_GPU_RST = 0x03006207, + REG_PLL_PERIPH1_RST = 0x00041811, + REG_PLL_DE_RST = 0x03006207, + REG_CPUX_AXI_RST = 0x00010000, + REG_APB1_RST = 0x00001010, + REG_APB2_RST = 0x01000000, + REG_DRAM_CFG_RST = 0x00000000, + REG_MBUS_RST = 0x80000000, + REG_PLL_TIME0_RST = 0x000000FF, + REG_PLL_TIME1_RST = 0x000000FF, + REG_PLL_CPUX_BIAS_RST = 0x08100200, + REG_PLL_AUDIO_BIAS_RST = 0x10100000, + REG_PLL_VIDEO_BIAS_RST = 0x10100000, + REG_PLL_VE_BIAS_RST = 0x10100000, + REG_PLL_DDR_BIAS_RST = 0x81104000, + REG_PLL_PERIPH0_BIAS_RST = 0x10100010, + REG_PLL_GPU_BIAS_RST = 0x10100000, + REG_PLL_PERIPH1_BIAS_RST = 0x10100010, + REG_PLL_DE_BIAS_RST = 0x10100000, + REG_PLL_CPUX_TUNING_RST = 0x0A101000, + REG_PLL_DDR_TUNING_RST = 0x14880000, +}; + +static uint64_t allwinner_h3_ccu_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwH3ClockCtlState *s = AW_H3_CCU(opaque); + const uint32_t idx = REG_INDEX(offset); + + switch (offset) { + case 0x308 ... AW_H3_CCU_IOSIZE: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + return s->regs[idx]; +} + +static void allwinner_h3_ccu_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwH3ClockCtlState *s = AW_H3_CCU(opaque); + const uint32_t idx = REG_INDEX(offset); + + switch (offset) { + case REG_DRAM_CFG: /* DRAM Configuration */ + val &= ~REG_DRAM_CFG_UPDATE; + break; + case REG_PLL_CPUX: /* PLL CPUX Control */ + case REG_PLL_AUDIO: /* PLL Audio Control */ + case REG_PLL_VIDEO: /* PLL Video Control */ + case REG_PLL_VE: /* PLL VE Control */ + case REG_PLL_DDR: /* PLL DDR Control */ + case REG_PLL_PERIPH0: /* PLL Peripherals 0 Control */ + case REG_PLL_GPU: /* PLL GPU Control */ + case REG_PLL_PERIPH1: /* PLL Peripherals 1 Control */ + case REG_PLL_DE: /* PLL Display Engine Control */ + if (val & REG_PLL_ENABLE) { + val |= REG_PLL_LOCK; + } + break; + case 0x308 ... AW_H3_CCU_IOSIZE: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented write offset 0x%04x\n", + __func__, (uint32_t)offset); + break; + } + + s->regs[idx] = (uint32_t) val; +} + +static const MemoryRegionOps allwinner_h3_ccu_ops = { + .read = allwinner_h3_ccu_read, + .write = allwinner_h3_ccu_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static void allwinner_h3_ccu_reset(DeviceState *dev) +{ + AwH3ClockCtlState *s = AW_H3_CCU(dev); + + /* Set default values for registers */ + s->regs[REG_INDEX(REG_PLL_CPUX)] = REG_PLL_CPUX_RST; + s->regs[REG_INDEX(REG_PLL_AUDIO)] = REG_PLL_AUDIO_RST; + s->regs[REG_INDEX(REG_PLL_VIDEO)] = REG_PLL_VIDEO_RST; + s->regs[REG_INDEX(REG_PLL_VE)] = REG_PLL_VE_RST; + s->regs[REG_INDEX(REG_PLL_DDR)] = REG_PLL_DDR_RST; + s->regs[REG_INDEX(REG_PLL_PERIPH0)] = REG_PLL_PERIPH0_RST; + s->regs[REG_INDEX(REG_PLL_GPU)] = REG_PLL_GPU_RST; + s->regs[REG_INDEX(REG_PLL_PERIPH1)] = REG_PLL_PERIPH1_RST; + s->regs[REG_INDEX(REG_PLL_DE)] = REG_PLL_DE_RST; + s->regs[REG_INDEX(REG_CPUX_AXI)] = REG_CPUX_AXI_RST; + s->regs[REG_INDEX(REG_APB1)] = REG_APB1_RST; + s->regs[REG_INDEX(REG_APB2)] = REG_APB2_RST; + s->regs[REG_INDEX(REG_DRAM_CFG)] = REG_DRAM_CFG_RST; + s->regs[REG_INDEX(REG_MBUS)] = REG_MBUS_RST; + s->regs[REG_INDEX(REG_PLL_TIME0)] = REG_PLL_TIME0_RST; + s->regs[REG_INDEX(REG_PLL_TIME1)] = REG_PLL_TIME1_RST; + s->regs[REG_INDEX(REG_PLL_CPUX_BIAS)] = REG_PLL_CPUX_BIAS_RST; + s->regs[REG_INDEX(REG_PLL_AUDIO_BIAS)] = REG_PLL_AUDIO_BIAS_RST; + s->regs[REG_INDEX(REG_PLL_VIDEO_BIAS)] = REG_PLL_VIDEO_BIAS_RST; + s->regs[REG_INDEX(REG_PLL_VE_BIAS)] = REG_PLL_VE_BIAS_RST; + s->regs[REG_INDEX(REG_PLL_DDR_BIAS)] = REG_PLL_DDR_BIAS_RST; + s->regs[REG_INDEX(REG_PLL_PERIPH0_BIAS)] = REG_PLL_PERIPH0_BIAS_RST; + s->regs[REG_INDEX(REG_PLL_GPU_BIAS)] = REG_PLL_GPU_BIAS_RST; + s->regs[REG_INDEX(REG_PLL_PERIPH1_BIAS)] = REG_PLL_PERIPH1_BIAS_RST; + s->regs[REG_INDEX(REG_PLL_DE_BIAS)] = REG_PLL_DE_BIAS_RST; + s->regs[REG_INDEX(REG_PLL_CPUX_TUNING)] = REG_PLL_CPUX_TUNING_RST; + s->regs[REG_INDEX(REG_PLL_DDR_TUNING)] = REG_PLL_DDR_TUNING_RST; +} + +static void allwinner_h3_ccu_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwH3ClockCtlState *s = AW_H3_CCU(obj); + + /* Memory mapping */ + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_h3_ccu_ops, s, + TYPE_AW_H3_CCU, AW_H3_CCU_IOSIZE); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription allwinner_h3_ccu_vmstate = { + .name = "allwinner-h3-ccu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AwH3ClockCtlState, AW_H3_CCU_REGS_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_h3_ccu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = allwinner_h3_ccu_reset; + dc->vmsd = &allwinner_h3_ccu_vmstate; +} + +static const TypeInfo allwinner_h3_ccu_info = { + .name = TYPE_AW_H3_CCU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_h3_ccu_init, + .instance_size = sizeof(AwH3ClockCtlState), + .class_init = allwinner_h3_ccu_class_init, +}; + +static void allwinner_h3_ccu_register(void) +{ + type_register_static(&allwinner_h3_ccu_info); +} + +type_init(allwinner_h3_ccu_register) diff --git a/hw/misc/allwinner-h3-dramc.c b/hw/misc/allwinner-h3-dramc.c new file mode 100644 index 000000000..1d37cf422 --- /dev/null +++ b/hw/misc/allwinner-h3-dramc.c @@ -0,0 +1,358 @@ +/* + * Allwinner H3 SDRAM Controller emulation + * + * Copyright (C) 2019 Niek Linnenbank + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "exec/address-spaces.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "hw/misc/allwinner-h3-dramc.h" +#include "trace.h" + +#define REG_INDEX(offset) (offset / sizeof(uint32_t)) + +/* DRAMCOM register offsets */ +enum { + REG_DRAMCOM_CR = 0x0000, /* Control Register */ +}; + +/* DRAMCTL register offsets */ +enum { + REG_DRAMCTL_PIR = 0x0000, /* PHY Initialization Register */ + REG_DRAMCTL_PGSR = 0x0010, /* PHY General Status Register */ + REG_DRAMCTL_STATR = 0x0018, /* Status Register */ +}; + +/* DRAMCTL register flags */ +enum { + REG_DRAMCTL_PGSR_INITDONE = (1 << 0), +}; + +enum { + REG_DRAMCTL_STATR_ACTIVE = (1 << 0), +}; + +static void allwinner_h3_dramc_map_rows(AwH3DramCtlState *s, uint8_t row_bits, + uint8_t bank_bits, uint16_t page_size) +{ + /* + * This function simulates row addressing behavior when bootloader + * software attempts to detect the amount of available SDRAM. In U-Boot + * the controller is configured with the widest row addressing available. + * Then a pattern is written to RAM at an offset on the row boundary size. + * If the value read back equals the value read back from the + * start of RAM, the bootloader knows the amount of row bits. + * + * This function inserts a mirrored memory region when the configured row + * bits are not matching the actual emulated memory, to simulate the + * same behavior on hardware as expected by the bootloader. + */ + uint8_t row_bits_actual = 0; + + /* Calculate the actual row bits using the ram_size property */ + for (uint8_t i = 8; i < 12; i++) { + if (1 << i == s->ram_size) { + row_bits_actual = i + 3; + break; + } + } + + if (s->ram_size == (1 << (row_bits - 3))) { + /* When row bits is the expected value, remove the mirror */ + memory_region_set_enabled(&s->row_mirror_alias, false); + trace_allwinner_h3_dramc_rowmirror_disable(); + + } else if (row_bits_actual) { + /* Row bits not matching ram_size, install the rows mirror */ + hwaddr row_mirror = s->ram_addr + ((1ULL << (row_bits_actual + + bank_bits)) * page_size); + + memory_region_set_enabled(&s->row_mirror_alias, true); + memory_region_set_address(&s->row_mirror_alias, row_mirror); + + trace_allwinner_h3_dramc_rowmirror_enable(row_mirror); + } +} + +static uint64_t allwinner_h3_dramcom_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwH3DramCtlState *s = AW_H3_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + if (idx >= AW_H3_DRAMCOM_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + trace_allwinner_h3_dramcom_read(offset, s->dramcom[idx], size); + + return s->dramcom[idx]; +} + +static void allwinner_h3_dramcom_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwH3DramCtlState *s = AW_H3_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + trace_allwinner_h3_dramcom_write(offset, val, size); + + if (idx >= AW_H3_DRAMCOM_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return; + } + + switch (offset) { + case REG_DRAMCOM_CR: /* Control Register */ + allwinner_h3_dramc_map_rows(s, ((val >> 4) & 0xf) + 1, + ((val >> 2) & 0x1) + 2, + 1 << (((val >> 8) & 0xf) + 3)); + break; + default: + break; + }; + + s->dramcom[idx] = (uint32_t) val; +} + +static uint64_t allwinner_h3_dramctl_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwH3DramCtlState *s = AW_H3_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + if (idx >= AW_H3_DRAMCTL_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + trace_allwinner_h3_dramctl_read(offset, s->dramctl[idx], size); + + return s->dramctl[idx]; +} + +static void allwinner_h3_dramctl_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwH3DramCtlState *s = AW_H3_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + trace_allwinner_h3_dramctl_write(offset, val, size); + + if (idx >= AW_H3_DRAMCTL_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return; + } + + switch (offset) { + case REG_DRAMCTL_PIR: /* PHY Initialization Register */ + s->dramctl[REG_INDEX(REG_DRAMCTL_PGSR)] |= REG_DRAMCTL_PGSR_INITDONE; + s->dramctl[REG_INDEX(REG_DRAMCTL_STATR)] |= REG_DRAMCTL_STATR_ACTIVE; + break; + default: + break; + } + + s->dramctl[idx] = (uint32_t) val; +} + +static uint64_t allwinner_h3_dramphy_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwH3DramCtlState *s = AW_H3_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + if (idx >= AW_H3_DRAMPHY_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + trace_allwinner_h3_dramphy_read(offset, s->dramphy[idx], size); + + return s->dramphy[idx]; +} + +static void allwinner_h3_dramphy_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwH3DramCtlState *s = AW_H3_DRAMC(opaque); + const uint32_t idx = REG_INDEX(offset); + + trace_allwinner_h3_dramphy_write(offset, val, size); + + if (idx >= AW_H3_DRAMPHY_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return; + } + + s->dramphy[idx] = (uint32_t) val; +} + +static const MemoryRegionOps allwinner_h3_dramcom_ops = { + .read = allwinner_h3_dramcom_read, + .write = allwinner_h3_dramcom_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static const MemoryRegionOps allwinner_h3_dramctl_ops = { + .read = allwinner_h3_dramctl_read, + .write = allwinner_h3_dramctl_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static const MemoryRegionOps allwinner_h3_dramphy_ops = { + .read = allwinner_h3_dramphy_read, + .write = allwinner_h3_dramphy_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static void allwinner_h3_dramc_reset(DeviceState *dev) +{ + AwH3DramCtlState *s = AW_H3_DRAMC(dev); + + /* Set default values for registers */ + memset(&s->dramcom, 0, sizeof(s->dramcom)); + memset(&s->dramctl, 0, sizeof(s->dramctl)); + memset(&s->dramphy, 0, sizeof(s->dramphy)); +} + +static void allwinner_h3_dramc_realize(DeviceState *dev, Error **errp) +{ + AwH3DramCtlState *s = AW_H3_DRAMC(dev); + + /* Only power of 2 RAM sizes from 256MiB up to 2048MiB are supported */ + for (uint8_t i = 8; i < 13; i++) { + if (1 << i == s->ram_size) { + break; + } else if (i == 12) { + error_report("%s: ram-size %u MiB is not supported", + __func__, s->ram_size); + exit(1); + } + } + + /* Setup row mirror mappings */ + memory_region_init_ram(&s->row_mirror, OBJECT(s), + "allwinner-h3-dramc.row-mirror", + 4 * KiB, &error_abort); + memory_region_add_subregion_overlap(get_system_memory(), s->ram_addr, + &s->row_mirror, 10); + + memory_region_init_alias(&s->row_mirror_alias, OBJECT(s), + "allwinner-h3-dramc.row-mirror-alias", + &s->row_mirror, 0, 4 * KiB); + memory_region_add_subregion_overlap(get_system_memory(), + s->ram_addr + 1 * MiB, + &s->row_mirror_alias, 10); + memory_region_set_enabled(&s->row_mirror_alias, false); +} + +static void allwinner_h3_dramc_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwH3DramCtlState *s = AW_H3_DRAMC(obj); + + /* DRAMCOM registers */ + memory_region_init_io(&s->dramcom_iomem, OBJECT(s), + &allwinner_h3_dramcom_ops, s, + TYPE_AW_H3_DRAMC, 4 * KiB); + sysbus_init_mmio(sbd, &s->dramcom_iomem); + + /* DRAMCTL registers */ + memory_region_init_io(&s->dramctl_iomem, OBJECT(s), + &allwinner_h3_dramctl_ops, s, + TYPE_AW_H3_DRAMC, 4 * KiB); + sysbus_init_mmio(sbd, &s->dramctl_iomem); + + /* DRAMPHY registers */ + memory_region_init_io(&s->dramphy_iomem, OBJECT(s), + &allwinner_h3_dramphy_ops, s, + TYPE_AW_H3_DRAMC, 4 * KiB); + sysbus_init_mmio(sbd, &s->dramphy_iomem); +} + +static Property allwinner_h3_dramc_properties[] = { + DEFINE_PROP_UINT64("ram-addr", AwH3DramCtlState, ram_addr, 0x0), + DEFINE_PROP_UINT32("ram-size", AwH3DramCtlState, ram_size, 256 * MiB), + DEFINE_PROP_END_OF_LIST() +}; + +static const VMStateDescription allwinner_h3_dramc_vmstate = { + .name = "allwinner-h3-dramc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(dramcom, AwH3DramCtlState, AW_H3_DRAMCOM_REGS_NUM), + VMSTATE_UINT32_ARRAY(dramctl, AwH3DramCtlState, AW_H3_DRAMCTL_REGS_NUM), + VMSTATE_UINT32_ARRAY(dramphy, AwH3DramCtlState, AW_H3_DRAMPHY_REGS_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_h3_dramc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = allwinner_h3_dramc_reset; + dc->vmsd = &allwinner_h3_dramc_vmstate; + dc->realize = allwinner_h3_dramc_realize; + device_class_set_props(dc, allwinner_h3_dramc_properties); +} + +static const TypeInfo allwinner_h3_dramc_info = { + .name = TYPE_AW_H3_DRAMC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_h3_dramc_init, + .instance_size = sizeof(AwH3DramCtlState), + .class_init = allwinner_h3_dramc_class_init, +}; + +static void allwinner_h3_dramc_register(void) +{ + type_register_static(&allwinner_h3_dramc_info); +} + +type_init(allwinner_h3_dramc_register) diff --git a/hw/misc/allwinner-h3-sysctrl.c b/hw/misc/allwinner-h3-sysctrl.c new file mode 100644 index 000000000..1d07efa88 --- /dev/null +++ b/hw/misc/allwinner-h3-sysctrl.c @@ -0,0 +1,140 @@ +/* + * Allwinner H3 System Control emulation + * + * Copyright (C) 2019 Niek Linnenbank + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/allwinner-h3-sysctrl.h" + +/* System Control register offsets */ +enum { + REG_VER = 0x24, /* Version */ + REG_EMAC_PHY_CLK = 0x30, /* EMAC PHY Clock */ +}; + +#define REG_INDEX(offset) (offset / sizeof(uint32_t)) + +/* System Control register reset values */ +enum { + REG_VER_RST = 0x0, + REG_EMAC_PHY_CLK_RST = 0x58000, +}; + +static uint64_t allwinner_h3_sysctrl_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwH3SysCtrlState *s = AW_H3_SYSCTRL(opaque); + const uint32_t idx = REG_INDEX(offset); + + if (idx >= AW_H3_SYSCTRL_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + return s->regs[idx]; +} + +static void allwinner_h3_sysctrl_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwH3SysCtrlState *s = AW_H3_SYSCTRL(opaque); + const uint32_t idx = REG_INDEX(offset); + + if (idx >= AW_H3_SYSCTRL_REGS_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return; + } + + switch (offset) { + case REG_VER: /* Version */ + break; + default: + s->regs[idx] = (uint32_t) val; + break; + } +} + +static const MemoryRegionOps allwinner_h3_sysctrl_ops = { + .read = allwinner_h3_sysctrl_read, + .write = allwinner_h3_sysctrl_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static void allwinner_h3_sysctrl_reset(DeviceState *dev) +{ + AwH3SysCtrlState *s = AW_H3_SYSCTRL(dev); + + /* Set default values for registers */ + s->regs[REG_INDEX(REG_VER)] = REG_VER_RST; + s->regs[REG_INDEX(REG_EMAC_PHY_CLK)] = REG_EMAC_PHY_CLK_RST; +} + +static void allwinner_h3_sysctrl_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwH3SysCtrlState *s = AW_H3_SYSCTRL(obj); + + /* Memory mapping */ + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_h3_sysctrl_ops, s, + TYPE_AW_H3_SYSCTRL, 4 * KiB); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription allwinner_h3_sysctrl_vmstate = { + .name = "allwinner-h3-sysctrl", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AwH3SysCtrlState, AW_H3_SYSCTRL_REGS_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_h3_sysctrl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = allwinner_h3_sysctrl_reset; + dc->vmsd = &allwinner_h3_sysctrl_vmstate; +} + +static const TypeInfo allwinner_h3_sysctrl_info = { + .name = TYPE_AW_H3_SYSCTRL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_h3_sysctrl_init, + .instance_size = sizeof(AwH3SysCtrlState), + .class_init = allwinner_h3_sysctrl_class_init, +}; + +static void allwinner_h3_sysctrl_register(void) +{ + type_register_static(&allwinner_h3_sysctrl_info); +} + +type_init(allwinner_h3_sysctrl_register) diff --git a/hw/misc/allwinner-sid.c b/hw/misc/allwinner-sid.c new file mode 100644 index 000000000..6d61f55b1 --- /dev/null +++ b/hw/misc/allwinner-sid.c @@ -0,0 +1,169 @@ +/* + * Allwinner Security ID emulation + * + * Copyright (C) 2019 Niek Linnenbank + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/guest-random.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/misc/allwinner-sid.h" +#include "trace.h" + +/* SID register offsets */ +enum { + REG_PRCTL = 0x40, /* Control */ + REG_RDKEY = 0x60, /* Read Key */ +}; + +/* SID register flags */ +enum { + REG_PRCTL_WRITE = 0x0002, /* Unknown write flag */ + REG_PRCTL_OP_LOCK = 0xAC00, /* Lock operation */ +}; + +static uint64_t allwinner_sid_read(void *opaque, hwaddr offset, + unsigned size) +{ + const AwSidState *s = AW_SID(opaque); + uint64_t val = 0; + + switch (offset) { + case REG_PRCTL: /* Control */ + val = s->control; + break; + case REG_RDKEY: /* Read Key */ + val = s->rdkey; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + trace_allwinner_sid_read(offset, val, size); + + return val; +} + +static void allwinner_sid_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwSidState *s = AW_SID(opaque); + + trace_allwinner_sid_write(offset, val, size); + + switch (offset) { + case REG_PRCTL: /* Control */ + s->control = val; + + if ((s->control & REG_PRCTL_OP_LOCK) && + (s->control & REG_PRCTL_WRITE)) { + uint32_t id = s->control >> 16; + + if (id <= sizeof(QemuUUID) - sizeof(s->rdkey)) { + s->rdkey = ldl_be_p(&s->identifier.data[id]); + } + } + s->control &= ~REG_PRCTL_WRITE; + break; + case REG_RDKEY: /* Read Key */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + break; + } +} + +static const MemoryRegionOps allwinner_sid_ops = { + .read = allwinner_sid_read, + .write = allwinner_sid_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static void allwinner_sid_reset(DeviceState *dev) +{ + AwSidState *s = AW_SID(dev); + + /* Set default values for registers */ + s->control = 0; + s->rdkey = 0; +} + +static void allwinner_sid_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwSidState *s = AW_SID(obj); + + /* Memory mapping */ + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_sid_ops, s, + TYPE_AW_SID, 1 * KiB); + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property allwinner_sid_properties[] = { + DEFINE_PROP_UUID_NODEFAULT("identifier", AwSidState, identifier), + DEFINE_PROP_END_OF_LIST() +}; + +static const VMStateDescription allwinner_sid_vmstate = { + .name = "allwinner-sid", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(control, AwSidState), + VMSTATE_UINT32(rdkey, AwSidState), + VMSTATE_UINT8_ARRAY_V(identifier.data, AwSidState, sizeof(QemuUUID), 1), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_sid_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = allwinner_sid_reset; + dc->vmsd = &allwinner_sid_vmstate; + device_class_set_props(dc, allwinner_sid_properties); +} + +static const TypeInfo allwinner_sid_info = { + .name = TYPE_AW_SID, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_sid_init, + .instance_size = sizeof(AwSidState), + .class_init = allwinner_sid_class_init, +}; + +static void allwinner_sid_register(void) +{ + type_register_static(&allwinner_sid_info); +} + +type_init(allwinner_sid_register) diff --git a/hw/misc/applesmc.c b/hw/misc/applesmc.c new file mode 100644 index 000000000..1b9acaf1d --- /dev/null +++ b/hw/misc/applesmc.c @@ -0,0 +1,372 @@ +/* + * Apple SMC controller + * + * Copyright (c) 2007 Alexander Graf + * + * Authors: Alexander Graf + * Susanne Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + * ***************************************************************** + * + * In all Intel-based Apple hardware there is an SMC chip to control the + * backlight, fans and several other generic device parameters. It also + * contains the magic keys used to dongle Mac OS X to the device. + * + * This driver was mostly created by looking at the Linux AppleSMC driver + * implementation and does not support IRQ. + * + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "ui/console.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qom/object.h" + +/* #define DEBUG_SMC */ + +#define APPLESMC_DEFAULT_IOBASE 0x300 + +enum { + APPLESMC_DATA_PORT = 0x00, + APPLESMC_CMD_PORT = 0x04, + APPLESMC_ERR_PORT = 0x1e, + APPLESMC_NUM_PORTS = 0x20, +}; + +enum { + APPLESMC_READ_CMD = 0x10, + APPLESMC_WRITE_CMD = 0x11, + APPLESMC_GET_KEY_BY_INDEX_CMD = 0x12, + APPLESMC_GET_KEY_TYPE_CMD = 0x13, +}; + +enum { + APPLESMC_ST_CMD_DONE = 0x00, + APPLESMC_ST_DATA_READY = 0x01, + APPLESMC_ST_BUSY = 0x02, + APPLESMC_ST_ACK = 0x04, + APPLESMC_ST_NEW_CMD = 0x08, +}; + +enum { + APPLESMC_ST_1E_CMD_INTRUPTED = 0x80, + APPLESMC_ST_1E_STILL_BAD_CMD = 0x81, + APPLESMC_ST_1E_BAD_CMD = 0x82, + APPLESMC_ST_1E_NOEXIST = 0x84, + APPLESMC_ST_1E_WRITEONLY = 0x85, + APPLESMC_ST_1E_READONLY = 0x86, + APPLESMC_ST_1E_BAD_INDEX = 0xb8, +}; + +#ifdef DEBUG_SMC +#define smc_debug(...) fprintf(stderr, "AppleSMC: " __VA_ARGS__) +#else +#define smc_debug(...) do { } while (0) +#endif + +static char default_osk[64] = "This is a dummy key. Enter the real key " + "using the -osk parameter"; + +struct AppleSMCData { + uint8_t len; + const char *key; + const char *data; + QLIST_ENTRY(AppleSMCData) node; +}; + +OBJECT_DECLARE_SIMPLE_TYPE(AppleSMCState, APPLE_SMC) + +struct AppleSMCState { + ISADevice parent_obj; + + MemoryRegion io_data; + MemoryRegion io_cmd; + MemoryRegion io_err; + uint32_t iobase; + uint8_t cmd; + uint8_t status; + uint8_t status_1e; + uint8_t last_ret; + char key[4]; + uint8_t read_pos; + uint8_t data_len; + uint8_t data_pos; + uint8_t data[255]; + char *osk; + QLIST_HEAD(, AppleSMCData) data_def; +}; + +static void applesmc_io_cmd_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + AppleSMCState *s = opaque; + uint8_t status = s->status & 0x0f; + + smc_debug("CMD received: 0x%02x\n", (uint8_t)val); + switch (val) { + case APPLESMC_READ_CMD: + /* did last command run through OK? */ + if (status == APPLESMC_ST_CMD_DONE || status == APPLESMC_ST_NEW_CMD) { + s->cmd = val; + s->status = APPLESMC_ST_NEW_CMD | APPLESMC_ST_ACK; + } else { + smc_debug("ERROR: previous command interrupted!\n"); + s->status = APPLESMC_ST_NEW_CMD; + s->status_1e = APPLESMC_ST_1E_CMD_INTRUPTED; + } + break; + default: + smc_debug("UNEXPECTED CMD 0x%02x\n", (uint8_t)val); + s->status = APPLESMC_ST_NEW_CMD; + s->status_1e = APPLESMC_ST_1E_BAD_CMD; + } + s->read_pos = 0; + s->data_pos = 0; +} + +static struct AppleSMCData *applesmc_find_key(AppleSMCState *s) +{ + struct AppleSMCData *d; + + QLIST_FOREACH(d, &s->data_def, node) { + if (!memcmp(d->key, s->key, 4)) { + return d; + } + } + return NULL; +} + +static void applesmc_io_data_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + AppleSMCState *s = opaque; + struct AppleSMCData *d; + + smc_debug("DATA received: 0x%02x\n", (uint8_t)val); + switch (s->cmd) { + case APPLESMC_READ_CMD: + if ((s->status & 0x0f) == APPLESMC_ST_CMD_DONE) { + break; + } + if (s->read_pos < 4) { + s->key[s->read_pos] = val; + s->status = APPLESMC_ST_ACK; + } else if (s->read_pos == 4) { + d = applesmc_find_key(s); + if (d != NULL) { + memcpy(s->data, d->data, d->len); + s->data_len = d->len; + s->data_pos = 0; + s->status = APPLESMC_ST_ACK | APPLESMC_ST_DATA_READY; + s->status_1e = APPLESMC_ST_CMD_DONE; /* clear on valid key */ + } else { + smc_debug("READ_CMD: key '%c%c%c%c' not found!\n", + s->key[0], s->key[1], s->key[2], s->key[3]); + s->status = APPLESMC_ST_CMD_DONE; + s->status_1e = APPLESMC_ST_1E_NOEXIST; + } + } + s->read_pos++; + break; + default: + s->status = APPLESMC_ST_CMD_DONE; + s->status_1e = APPLESMC_ST_1E_STILL_BAD_CMD; + } +} + +static void applesmc_io_err_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + smc_debug("ERR_CODE received: 0x%02x, ignoring!\n", (uint8_t)val); + /* NOTE: writing to the error port not supported! */ +} + +static uint64_t applesmc_io_data_read(void *opaque, hwaddr addr, unsigned size) +{ + AppleSMCState *s = opaque; + + switch (s->cmd) { + case APPLESMC_READ_CMD: + if (!(s->status & APPLESMC_ST_DATA_READY)) { + break; + } + if (s->data_pos < s->data_len) { + s->last_ret = s->data[s->data_pos]; + smc_debug("READ '%c%c%c%c'[%d] = %02x\n", + s->key[0], s->key[1], s->key[2], s->key[3], + s->data_pos, s->last_ret); + s->data_pos++; + if (s->data_pos == s->data_len) { + s->status = APPLESMC_ST_CMD_DONE; + smc_debug("READ '%c%c%c%c' Len=%d complete!\n", + s->key[0], s->key[1], s->key[2], s->key[3], + s->data_len); + } else { + s->status = APPLESMC_ST_ACK | APPLESMC_ST_DATA_READY; + } + } + break; + default: + s->status = APPLESMC_ST_CMD_DONE; + s->status_1e = APPLESMC_ST_1E_STILL_BAD_CMD; + } + smc_debug("DATA sent: 0x%02x\n", s->last_ret); + + return s->last_ret; +} + +static uint64_t applesmc_io_cmd_read(void *opaque, hwaddr addr, unsigned size) +{ + AppleSMCState *s = opaque; + + smc_debug("CMD sent: 0x%02x\n", s->status); + return s->status; +} + +static uint64_t applesmc_io_err_read(void *opaque, hwaddr addr, unsigned size) +{ + AppleSMCState *s = opaque; + + /* NOTE: read does not clear the 1e status */ + smc_debug("ERR_CODE sent: 0x%02x\n", s->status_1e); + return s->status_1e; +} + +static void applesmc_add_key(AppleSMCState *s, const char *key, + int len, const char *data) +{ + struct AppleSMCData *def; + + def = g_malloc0(sizeof(struct AppleSMCData)); + def->key = key; + def->len = len; + def->data = data; + + QLIST_INSERT_HEAD(&s->data_def, def, node); +} + +static void qdev_applesmc_isa_reset(DeviceState *dev) +{ + AppleSMCState *s = APPLE_SMC(dev); + struct AppleSMCData *d, *next; + + /* Remove existing entries */ + QLIST_FOREACH_SAFE(d, &s->data_def, node, next) { + QLIST_REMOVE(d, node); + } + s->status = 0x00; + s->status_1e = 0x00; + s->last_ret = 0x00; + + applesmc_add_key(s, "REV ", 6, "\x01\x13\x0f\x00\x00\x03"); + applesmc_add_key(s, "OSK0", 32, s->osk); + applesmc_add_key(s, "OSK1", 32, s->osk + 32); + applesmc_add_key(s, "NATJ", 1, "\0"); + applesmc_add_key(s, "MSSP", 1, "\0"); + applesmc_add_key(s, "MSSD", 1, "\0x3"); +} + +static const MemoryRegionOps applesmc_data_io_ops = { + .write = applesmc_io_data_write, + .read = applesmc_io_data_read, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static const MemoryRegionOps applesmc_cmd_io_ops = { + .write = applesmc_io_cmd_write, + .read = applesmc_io_cmd_read, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static const MemoryRegionOps applesmc_err_io_ops = { + .write = applesmc_io_err_write, + .read = applesmc_io_err_read, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void applesmc_isa_realize(DeviceState *dev, Error **errp) +{ + AppleSMCState *s = APPLE_SMC(dev); + + memory_region_init_io(&s->io_data, OBJECT(s), &applesmc_data_io_ops, s, + "applesmc-data", 1); + isa_register_ioport(&s->parent_obj, &s->io_data, + s->iobase + APPLESMC_DATA_PORT); + + memory_region_init_io(&s->io_cmd, OBJECT(s), &applesmc_cmd_io_ops, s, + "applesmc-cmd", 1); + isa_register_ioport(&s->parent_obj, &s->io_cmd, + s->iobase + APPLESMC_CMD_PORT); + + memory_region_init_io(&s->io_err, OBJECT(s), &applesmc_err_io_ops, s, + "applesmc-err", 1); + isa_register_ioport(&s->parent_obj, &s->io_err, + s->iobase + APPLESMC_ERR_PORT); + + if (!s->osk || (strlen(s->osk) != 64)) { + warn_report("Using AppleSMC with invalid key"); + s->osk = default_osk; + } + + QLIST_INIT(&s->data_def); + qdev_applesmc_isa_reset(dev); +} + +static Property applesmc_isa_properties[] = { + DEFINE_PROP_UINT32(APPLESMC_PROP_IO_BASE, AppleSMCState, iobase, + APPLESMC_DEFAULT_IOBASE), + DEFINE_PROP_STRING("osk", AppleSMCState, osk), + DEFINE_PROP_END_OF_LIST(), +}; + +static void qdev_applesmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = applesmc_isa_realize; + dc->reset = qdev_applesmc_isa_reset; + device_class_set_props(dc, applesmc_isa_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo applesmc_isa_info = { + .name = TYPE_APPLE_SMC, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(AppleSMCState), + .class_init = qdev_applesmc_class_init, +}; + +static void applesmc_register_types(void) +{ + type_register_static(&applesmc_isa_info); +} + +type_init(applesmc_register_types) diff --git a/hw/misc/arm11scu.c b/hw/misc/arm11scu.c new file mode 100644 index 000000000..17c36a054 --- /dev/null +++ b/hw/misc/arm11scu.c @@ -0,0 +1,104 @@ +/* + * ARM11MPCore Snoop Control Unit (SCU) emulation + * + * Copyright (c) 2006-2007 CodeSourcery. + * Copyright (c) 2013 SUSE LINUX Products GmbH + * Written by Paul Brook and Andreas Färber + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/misc/arm11scu.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "qemu/module.h" + +static uint64_t mpcore_scu_read(void *opaque, hwaddr offset, + unsigned size) +{ + ARM11SCUState *s = (ARM11SCUState *)opaque; + int id; + /* SCU */ + switch (offset) { + case 0x00: /* Control. */ + return s->control; + case 0x04: /* Configuration. */ + id = ((1 << s->num_cpu) - 1) << 4; + return id | (s->num_cpu - 1); + case 0x08: /* CPU status. */ + return 0; + case 0x0c: /* Invalidate all. */ + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "mpcore_priv_read: Bad offset %x\n", (int)offset); + return 0; + } +} + +static void mpcore_scu_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + ARM11SCUState *s = (ARM11SCUState *)opaque; + /* SCU */ + switch (offset) { + case 0: /* Control register. */ + s->control = value & 1; + break; + case 0x0c: /* Invalidate all. */ + /* This is a no-op as cache is not emulated. */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "mpcore_priv_read: Bad offset %x\n", (int)offset); + } +} + +static const MemoryRegionOps mpcore_scu_ops = { + .read = mpcore_scu_read, + .write = mpcore_scu_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void arm11_scu_realize(DeviceState *dev, Error **errp) +{ +} + +static void arm11_scu_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ARM11SCUState *s = ARM11_SCU(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), + &mpcore_scu_ops, s, "mpcore-scu", 0x100); + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property arm11_scu_properties[] = { + DEFINE_PROP_UINT32("num-cpu", ARM11SCUState, num_cpu, 1), + DEFINE_PROP_END_OF_LIST() +}; + +static void arm11_scu_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = arm11_scu_realize; + device_class_set_props(dc, arm11_scu_properties); +} + +static const TypeInfo arm11_scu_type_info = { + .name = TYPE_ARM11_SCU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARM11SCUState), + .instance_init = arm11_scu_init, + .class_init = arm11_scu_class_init, +}; + +static void arm11_scu_register_types(void) +{ + type_register_static(&arm11_scu_type_info); +} + +type_init(arm11_scu_register_types) diff --git a/hw/misc/arm_integrator_debug.c b/hw/misc/arm_integrator_debug.c new file mode 100644 index 000000000..9a1972782 --- /dev/null +++ b/hw/misc/arm_integrator_debug.c @@ -0,0 +1,100 @@ +/* + * LED, Switch and Debug control registers for ARM Integrator Boards + * + * This is currently a stub for this functionality but at least + * ensures something other than unassigned_mem_read() handles access + * to this area. + * + * The real h/w is described at: + * https://developer.arm.com/documentation/dui0159/b/peripherals-and-interfaces/debug-leds-and-dip-switch-interface + * + * Copyright (c) 2013 Alex Bennée + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/misc/arm_integrator_debug.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(IntegratorDebugState, INTEGRATOR_DEBUG) + +struct IntegratorDebugState { + SysBusDevice parent_obj; + + MemoryRegion iomem; +}; + +static uint64_t intdbg_control_read(void *opaque, hwaddr offset, + unsigned size) +{ + switch (offset >> 2) { + case 0: /* ALPHA */ + case 1: /* LEDS */ + case 2: /* SWITCHES */ + qemu_log_mask(LOG_UNIMP, + "%s: returning zero from %" HWADDR_PRIx ":%u\n", + __func__, offset, size); + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset %" HWADDR_PRIx, + __func__, offset); + return 0; + } +} + +static void intdbg_control_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + switch (offset >> 2) { + case 1: /* ALPHA */ + case 2: /* LEDS */ + case 3: /* SWITCHES */ + /* Nothing interesting implemented yet. */ + qemu_log_mask(LOG_UNIMP, + "%s: ignoring write of %" PRIu64 + " to %" HWADDR_PRIx ":%u\n", + __func__, value, offset, size); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write of %" PRIu64 + " to bad offset %" HWADDR_PRIx "\n", + __func__, value, offset); + } +} + +static const MemoryRegionOps intdbg_control_ops = { + .read = intdbg_control_read, + .write = intdbg_control_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void intdbg_control_init(Object *obj) +{ + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IntegratorDebugState *s = INTEGRATOR_DEBUG(obj); + + memory_region_init_io(&s->iomem, obj, &intdbg_control_ops, + NULL, "dbg-leds", 0x1000000); + sysbus_init_mmio(sd, &s->iomem); +} + +static const TypeInfo intdbg_info = { + .name = TYPE_INTEGRATOR_DEBUG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IntegratorDebugState), + .instance_init = intdbg_control_init, +}; + +static void intdbg_register_types(void) +{ + type_register_static(&intdbg_info); +} + +type_init(intdbg_register_types) diff --git a/hw/misc/arm_l2x0.c b/hw/misc/arm_l2x0.c new file mode 100644 index 000000000..75c3eb898 --- /dev/null +++ b/hw/misc/arm_l2x0.c @@ -0,0 +1,203 @@ +/* + * ARM dummy L210, L220, PL310 cache controller. + * + * Copyright (c) 2010-2012 Calxeda + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or any later version, as published by the Free Software + * Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* L2C-310 r3p2 */ +#define CACHE_ID 0x410000c8 + +#define TYPE_ARM_L2X0 "l2x0" +OBJECT_DECLARE_SIMPLE_TYPE(L2x0State, ARM_L2X0) + +struct L2x0State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t cache_type; + uint32_t ctrl; + uint32_t aux_ctrl; + uint32_t data_ctrl; + uint32_t tag_ctrl; + uint32_t filter_start; + uint32_t filter_end; +}; + +static const VMStateDescription vmstate_l2x0 = { + .name = "l2x0", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ctrl, L2x0State), + VMSTATE_UINT32(aux_ctrl, L2x0State), + VMSTATE_UINT32(data_ctrl, L2x0State), + VMSTATE_UINT32(tag_ctrl, L2x0State), + VMSTATE_UINT32(filter_start, L2x0State), + VMSTATE_UINT32(filter_end, L2x0State), + VMSTATE_END_OF_LIST() + } +}; + + +static uint64_t l2x0_priv_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t cache_data; + L2x0State *s = (L2x0State *)opaque; + offset &= 0xfff; + if (offset >= 0x730 && offset < 0x800) { + return 0; /* cache ops complete */ + } + switch (offset) { + case 0: + return CACHE_ID; + case 0x4: + /* aux_ctrl values affect cache_type values */ + cache_data = (s->aux_ctrl & (7 << 17)) >> 15; + cache_data |= (s->aux_ctrl & (1 << 16)) >> 16; + return s->cache_type |= (cache_data << 18) | (cache_data << 6); + case 0x100: + return s->ctrl; + case 0x104: + return s->aux_ctrl; + case 0x108: + return s->tag_ctrl; + case 0x10C: + return s->data_ctrl; + case 0xC00: + return s->filter_start; + case 0xC04: + return s->filter_end; + case 0xF40: + return 0; + case 0xF60: + return 0; + case 0xF80: + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "l2x0_priv_read: Bad offset %x\n", (int)offset); + break; + } + return 0; +} + +static void l2x0_priv_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + L2x0State *s = (L2x0State *)opaque; + offset &= 0xfff; + if (offset >= 0x730 && offset < 0x800) { + /* ignore */ + return; + } + switch (offset) { + case 0x100: + s->ctrl = value & 1; + break; + case 0x104: + s->aux_ctrl = value; + break; + case 0x108: + s->tag_ctrl = value; + break; + case 0x10C: + s->data_ctrl = value; + break; + case 0xC00: + s->filter_start = value; + break; + case 0xC04: + s->filter_end = value; + break; + case 0xF40: + return; + case 0xF60: + return; + case 0xF80: + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "l2x0_priv_write: Bad offset %x\n", (int)offset); + break; + } +} + +static void l2x0_priv_reset(DeviceState *dev) +{ + L2x0State *s = ARM_L2X0(dev); + + s->ctrl = 0; + s->aux_ctrl = 0x02020000; + s->tag_ctrl = 0; + s->data_ctrl = 0; + s->filter_start = 0; + s->filter_end = 0; +} + +static const MemoryRegionOps l2x0_mem_ops = { + .read = l2x0_priv_read, + .write = l2x0_priv_write, + .endianness = DEVICE_NATIVE_ENDIAN, + }; + +static void l2x0_priv_init(Object *obj) +{ + L2x0State *s = ARM_L2X0(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &l2x0_mem_ops, s, + "l2x0_cc", 0x1000); + sysbus_init_mmio(dev, &s->iomem); +} + +static Property l2x0_properties[] = { + DEFINE_PROP_UINT32("cache-type", L2x0State, cache_type, 0x1c100100), + DEFINE_PROP_END_OF_LIST(), +}; + +static void l2x0_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_l2x0; + device_class_set_props(dc, l2x0_properties); + dc->reset = l2x0_priv_reset; +} + +static const TypeInfo l2x0_info = { + .name = TYPE_ARM_L2X0, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(L2x0State), + .instance_init = l2x0_priv_init, + .class_init = l2x0_class_init, +}; + +static void l2x0_register_types(void) +{ + type_register_static(&l2x0_info); +} + +type_init(l2x0_register_types) diff --git a/hw/misc/arm_sysctl.c b/hw/misc/arm_sysctl.c new file mode 100644 index 000000000..42d469385 --- /dev/null +++ b/hw/misc/arm_sysctl.c @@ -0,0 +1,662 @@ +/* + * Status and system control registers for ARM RealView/Versatile boards. + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qemu/timer.h" +#include "sysemu/runstate.h" +#include "qemu/bitops.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/arm/primecell.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define LOCK_VALUE 0xa05f + +#define TYPE_ARM_SYSCTL "realview_sysctl" +OBJECT_DECLARE_SIMPLE_TYPE(arm_sysctl_state, ARM_SYSCTL) + +struct arm_sysctl_state { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq pl110_mux_ctrl; + + uint32_t sys_id; + uint32_t leds; + uint16_t lockval; + uint32_t cfgdata1; + uint32_t cfgdata2; + uint32_t flags; + uint32_t nvflags; + uint32_t resetlevel; + uint32_t proc_id; + uint32_t sys_mci; + uint32_t sys_cfgdata; + uint32_t sys_cfgctrl; + uint32_t sys_cfgstat; + uint32_t sys_clcd; + uint32_t mb_clock[6]; + uint32_t *db_clock; + uint32_t db_num_vsensors; + uint32_t *db_voltage; + uint32_t db_num_clocks; + uint32_t *db_clock_reset; +}; + +static const VMStateDescription vmstate_arm_sysctl = { + .name = "realview_sysctl", + .version_id = 4, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(leds, arm_sysctl_state), + VMSTATE_UINT16(lockval, arm_sysctl_state), + VMSTATE_UINT32(cfgdata1, arm_sysctl_state), + VMSTATE_UINT32(cfgdata2, arm_sysctl_state), + VMSTATE_UINT32(flags, arm_sysctl_state), + VMSTATE_UINT32(nvflags, arm_sysctl_state), + VMSTATE_UINT32(resetlevel, arm_sysctl_state), + VMSTATE_UINT32_V(sys_mci, arm_sysctl_state, 2), + VMSTATE_UINT32_V(sys_cfgdata, arm_sysctl_state, 2), + VMSTATE_UINT32_V(sys_cfgctrl, arm_sysctl_state, 2), + VMSTATE_UINT32_V(sys_cfgstat, arm_sysctl_state, 2), + VMSTATE_UINT32_V(sys_clcd, arm_sysctl_state, 3), + VMSTATE_UINT32_ARRAY_V(mb_clock, arm_sysctl_state, 6, 4), + VMSTATE_VARRAY_UINT32(db_clock, arm_sysctl_state, db_num_clocks, + 4, vmstate_info_uint32, uint32_t), + VMSTATE_END_OF_LIST() + } +}; + +/* The PB926 actually uses a different format for + * its SYS_ID register. Fortunately the bits which are + * board type on later boards are distinct. + */ +#define BOARD_ID_PB926 0x100 +#define BOARD_ID_EB 0x140 +#define BOARD_ID_PBA8 0x178 +#define BOARD_ID_PBX 0x182 +#define BOARD_ID_VEXPRESS 0x190 + +static int board_id(arm_sysctl_state *s) +{ + /* Extract the board ID field from the SYS_ID register value */ + return (s->sys_id >> 16) & 0xfff; +} + +static void arm_sysctl_reset(DeviceState *d) +{ + arm_sysctl_state *s = ARM_SYSCTL(d); + int i; + + s->leds = 0; + s->lockval = 0; + s->cfgdata1 = 0; + s->cfgdata2 = 0; + s->flags = 0; + s->resetlevel = 0; + /* Motherboard oscillators (in Hz) */ + s->mb_clock[0] = 50000000; /* Static memory clock: 50MHz */ + s->mb_clock[1] = 23750000; /* motherboard CLCD clock: 23.75MHz */ + s->mb_clock[2] = 24000000; /* IO FPGA peripheral clock: 24MHz */ + s->mb_clock[3] = 24000000; /* IO FPGA reserved clock: 24MHz */ + s->mb_clock[4] = 24000000; /* System bus global clock: 24MHz */ + s->mb_clock[5] = 24000000; /* IO FPGA reserved clock: 24MHz */ + /* Daughterboard oscillators: reset from property values */ + for (i = 0; i < s->db_num_clocks; i++) { + s->db_clock[i] = s->db_clock_reset[i]; + } + if (board_id(s) == BOARD_ID_VEXPRESS) { + /* On VExpress this register will RAZ/WI */ + s->sys_clcd = 0; + } else { + /* All others: CLCDID 0x1f, indicating VGA */ + s->sys_clcd = 0x1f00; + } +} + +static uint64_t arm_sysctl_read(void *opaque, hwaddr offset, + unsigned size) +{ + arm_sysctl_state *s = (arm_sysctl_state *)opaque; + + switch (offset) { + case 0x00: /* ID */ + return s->sys_id; + case 0x04: /* SW */ + /* General purpose hardware switches. + We don't have a useful way of exposing these to the user. */ + return 0; + case 0x08: /* LED */ + return s->leds; + case 0x20: /* LOCK */ + return s->lockval; + case 0x0c: /* OSC0 */ + case 0x10: /* OSC1 */ + case 0x14: /* OSC2 */ + case 0x18: /* OSC3 */ + case 0x1c: /* OSC4 */ + case 0x24: /* 100HZ */ + /* ??? Implement these. */ + return 0; + case 0x28: /* CFGDATA1 */ + return s->cfgdata1; + case 0x2c: /* CFGDATA2 */ + return s->cfgdata2; + case 0x30: /* FLAGS */ + return s->flags; + case 0x38: /* NVFLAGS */ + return s->nvflags; + case 0x40: /* RESETCTL */ + if (board_id(s) == BOARD_ID_VEXPRESS) { + /* reserved: RAZ/WI */ + return 0; + } + return s->resetlevel; + case 0x44: /* PCICTL */ + return 1; + case 0x48: /* MCI */ + return s->sys_mci; + case 0x4c: /* FLASH */ + return 0; + case 0x50: /* CLCD */ + return s->sys_clcd; + case 0x54: /* CLCDSER */ + return 0; + case 0x58: /* BOOTCS */ + return 0; + case 0x5c: /* 24MHz */ + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24000000, + NANOSECONDS_PER_SECOND); + case 0x60: /* MISC */ + return 0; + case 0x84: /* PROCID0 */ + return s->proc_id; + case 0x88: /* PROCID1 */ + return 0xff000000; + case 0x64: /* DMAPSR0 */ + case 0x68: /* DMAPSR1 */ + case 0x6c: /* DMAPSR2 */ + case 0x70: /* IOSEL */ + case 0x74: /* PLDCTL */ + case 0x80: /* BUSID */ + case 0x8c: /* OSCRESET0 */ + case 0x90: /* OSCRESET1 */ + case 0x94: /* OSCRESET2 */ + case 0x98: /* OSCRESET3 */ + case 0x9c: /* OSCRESET4 */ + case 0xc0: /* SYS_TEST_OSC0 */ + case 0xc4: /* SYS_TEST_OSC1 */ + case 0xc8: /* SYS_TEST_OSC2 */ + case 0xcc: /* SYS_TEST_OSC3 */ + case 0xd0: /* SYS_TEST_OSC4 */ + return 0; + case 0xa0: /* SYS_CFGDATA */ + if (board_id(s) != BOARD_ID_VEXPRESS) { + goto bad_reg; + } + return s->sys_cfgdata; + case 0xa4: /* SYS_CFGCTRL */ + if (board_id(s) != BOARD_ID_VEXPRESS) { + goto bad_reg; + } + return s->sys_cfgctrl; + case 0xa8: /* SYS_CFGSTAT */ + if (board_id(s) != BOARD_ID_VEXPRESS) { + goto bad_reg; + } + return s->sys_cfgstat; + default: + bad_reg: + qemu_log_mask(LOG_GUEST_ERROR, + "arm_sysctl_read: Bad register offset 0x%x\n", + (int)offset); + return 0; + } +} + +/* SYS_CFGCTRL functions */ +#define SYS_CFG_OSC 1 +#define SYS_CFG_VOLT 2 +#define SYS_CFG_AMP 3 +#define SYS_CFG_TEMP 4 +#define SYS_CFG_RESET 5 +#define SYS_CFG_SCC 6 +#define SYS_CFG_MUXFPGA 7 +#define SYS_CFG_SHUTDOWN 8 +#define SYS_CFG_REBOOT 9 +#define SYS_CFG_DVIMODE 11 +#define SYS_CFG_POWER 12 +#define SYS_CFG_ENERGY 13 + +/* SYS_CFGCTRL site field values */ +#define SYS_CFG_SITE_MB 0 +#define SYS_CFG_SITE_DB1 1 +#define SYS_CFG_SITE_DB2 2 + +/** + * vexpress_cfgctrl_read: + * @s: arm_sysctl_state pointer + * @dcc, @function, @site, @position, @device: split out values from + * SYS_CFGCTRL register + * @val: pointer to where to put the read data on success + * + * Handle a VExpress SYS_CFGCTRL register read. On success, return true and + * write the read value to *val. On failure, return false (and val may + * or may not be written to). + */ +static bool vexpress_cfgctrl_read(arm_sysctl_state *s, unsigned int dcc, + unsigned int function, unsigned int site, + unsigned int position, unsigned int device, + uint32_t *val) +{ + /* We don't support anything other than DCC 0, board stack position 0 + * or sites other than motherboard/daughterboard: + */ + if (dcc != 0 || position != 0 || + (site != SYS_CFG_SITE_MB && site != SYS_CFG_SITE_DB1)) { + goto cfgctrl_unimp; + } + + switch (function) { + case SYS_CFG_VOLT: + if (site == SYS_CFG_SITE_DB1 && device < s->db_num_vsensors) { + *val = s->db_voltage[device]; + return true; + } + if (site == SYS_CFG_SITE_MB && device == 0) { + /* There is only one motherboard voltage sensor: + * VIO : 3.3V : bus voltage between mother and daughterboard + */ + *val = 3300000; + return true; + } + break; + case SYS_CFG_OSC: + if (site == SYS_CFG_SITE_MB && device < ARRAY_SIZE(s->mb_clock)) { + /* motherboard clock */ + *val = s->mb_clock[device]; + return true; + } + if (site == SYS_CFG_SITE_DB1 && device < s->db_num_clocks) { + /* daughterboard clock */ + *val = s->db_clock[device]; + return true; + } + break; + default: + break; + } + +cfgctrl_unimp: + qemu_log_mask(LOG_UNIMP, + "arm_sysctl: Unimplemented SYS_CFGCTRL read of function " + "0x%x DCC 0x%x site 0x%x position 0x%x device 0x%x\n", + function, dcc, site, position, device); + return false; +} + +/** + * vexpress_cfgctrl_write: + * @s: arm_sysctl_state pointer + * @dcc, @function, @site, @position, @device: split out values from + * SYS_CFGCTRL register + * @val: data to write + * + * Handle a VExpress SYS_CFGCTRL register write. On success, return true. + * On failure, return false. + */ +static bool vexpress_cfgctrl_write(arm_sysctl_state *s, unsigned int dcc, + unsigned int function, unsigned int site, + unsigned int position, unsigned int device, + uint32_t val) +{ + /* We don't support anything other than DCC 0, board stack position 0 + * or sites other than motherboard/daughterboard: + */ + if (dcc != 0 || position != 0 || + (site != SYS_CFG_SITE_MB && site != SYS_CFG_SITE_DB1)) { + goto cfgctrl_unimp; + } + + switch (function) { + case SYS_CFG_OSC: + if (site == SYS_CFG_SITE_MB && device < ARRAY_SIZE(s->mb_clock)) { + /* motherboard clock */ + s->mb_clock[device] = val; + return true; + } + if (site == SYS_CFG_SITE_DB1 && device < s->db_num_clocks) { + /* daughterboard clock */ + s->db_clock[device] = val; + return true; + } + break; + case SYS_CFG_MUXFPGA: + if (site == SYS_CFG_SITE_MB && device == 0) { + /* Select whether video output comes from motherboard + * or daughterboard: log and ignore as QEMU doesn't + * support this. + */ + qemu_log_mask(LOG_UNIMP, "arm_sysctl: selection of video output " + "not supported, ignoring\n"); + return true; + } + break; + case SYS_CFG_SHUTDOWN: + if (site == SYS_CFG_SITE_MB && device == 0) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + return true; + } + break; + case SYS_CFG_REBOOT: + if (site == SYS_CFG_SITE_MB && device == 0) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + return true; + } + break; + case SYS_CFG_DVIMODE: + if (site == SYS_CFG_SITE_MB && device == 0) { + /* Selecting DVI mode is meaningless for QEMU: we will + * always display the output correctly according to the + * pixel height/width programmed into the CLCD controller. + */ + return true; + } + default: + break; + } + +cfgctrl_unimp: + qemu_log_mask(LOG_UNIMP, + "arm_sysctl: Unimplemented SYS_CFGCTRL write of function " + "0x%x DCC 0x%x site 0x%x position 0x%x device 0x%x\n", + function, dcc, site, position, device); + return false; +} + +static void arm_sysctl_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + arm_sysctl_state *s = (arm_sysctl_state *)opaque; + + switch (offset) { + case 0x08: /* LED */ + s->leds = val; + break; + case 0x0c: /* OSC0 */ + case 0x10: /* OSC1 */ + case 0x14: /* OSC2 */ + case 0x18: /* OSC3 */ + case 0x1c: /* OSC4 */ + /* ??? */ + break; + case 0x20: /* LOCK */ + if (val == LOCK_VALUE) + s->lockval = val; + else + s->lockval = val & 0x7fff; + break; + case 0x28: /* CFGDATA1 */ + /* ??? Need to implement this. */ + s->cfgdata1 = val; + break; + case 0x2c: /* CFGDATA2 */ + /* ??? Need to implement this. */ + s->cfgdata2 = val; + break; + case 0x30: /* FLAGSSET */ + s->flags |= val; + break; + case 0x34: /* FLAGSCLR */ + s->flags &= ~val; + break; + case 0x38: /* NVFLAGSSET */ + s->nvflags |= val; + break; + case 0x3c: /* NVFLAGSCLR */ + s->nvflags &= ~val; + break; + case 0x40: /* RESETCTL */ + switch (board_id(s)) { + case BOARD_ID_PB926: + if (s->lockval == LOCK_VALUE) { + s->resetlevel = val; + if (val & 0x100) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + } + break; + case BOARD_ID_PBX: + case BOARD_ID_PBA8: + if (s->lockval == LOCK_VALUE) { + s->resetlevel = val; + if (val & 0x04) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + } + break; + case BOARD_ID_VEXPRESS: + case BOARD_ID_EB: + default: + /* reserved: RAZ/WI */ + break; + } + break; + case 0x44: /* PCICTL */ + /* nothing to do. */ + break; + case 0x4c: /* FLASH */ + break; + case 0x50: /* CLCD */ + switch (board_id(s)) { + case BOARD_ID_PB926: + /* On 926 bits 13:8 are R/O, bits 1:0 control + * the mux that defines how to interpret the PL110 + * graphics format, and other bits are r/w but we + * don't implement them to do anything. + */ + s->sys_clcd &= 0x3f00; + s->sys_clcd |= val & ~0x3f00; + qemu_set_irq(s->pl110_mux_ctrl, val & 3); + break; + case BOARD_ID_EB: + /* The EB is the same except that there is no mux since + * the EB has a PL111. + */ + s->sys_clcd &= 0x3f00; + s->sys_clcd |= val & ~0x3f00; + break; + case BOARD_ID_PBA8: + case BOARD_ID_PBX: + /* On PBA8 and PBX bit 7 is r/w and all other bits + * are either r/o or RAZ/WI. + */ + s->sys_clcd &= (1 << 7); + s->sys_clcd |= val & ~(1 << 7); + break; + case BOARD_ID_VEXPRESS: + default: + /* On VExpress this register is unimplemented and will RAZ/WI */ + break; + } + break; + case 0x54: /* CLCDSER */ + case 0x64: /* DMAPSR0 */ + case 0x68: /* DMAPSR1 */ + case 0x6c: /* DMAPSR2 */ + case 0x70: /* IOSEL */ + case 0x74: /* PLDCTL */ + case 0x80: /* BUSID */ + case 0x84: /* PROCID0 */ + case 0x88: /* PROCID1 */ + case 0x8c: /* OSCRESET0 */ + case 0x90: /* OSCRESET1 */ + case 0x94: /* OSCRESET2 */ + case 0x98: /* OSCRESET3 */ + case 0x9c: /* OSCRESET4 */ + break; + case 0xa0: /* SYS_CFGDATA */ + if (board_id(s) != BOARD_ID_VEXPRESS) { + goto bad_reg; + } + s->sys_cfgdata = val; + return; + case 0xa4: /* SYS_CFGCTRL */ + if (board_id(s) != BOARD_ID_VEXPRESS) { + goto bad_reg; + } + /* Undefined bits [19:18] are RAZ/WI, and writing to + * the start bit just triggers the action; it always reads + * as zero. + */ + s->sys_cfgctrl = val & ~((3 << 18) | (1 << 31)); + if (val & (1 << 31)) { + /* Start bit set -- actually do something */ + unsigned int dcc = extract32(s->sys_cfgctrl, 26, 4); + unsigned int function = extract32(s->sys_cfgctrl, 20, 6); + unsigned int site = extract32(s->sys_cfgctrl, 16, 2); + unsigned int position = extract32(s->sys_cfgctrl, 12, 4); + unsigned int device = extract32(s->sys_cfgctrl, 0, 12); + s->sys_cfgstat = 1; /* complete */ + if (s->sys_cfgctrl & (1 << 30)) { + if (!vexpress_cfgctrl_write(s, dcc, function, site, position, + device, s->sys_cfgdata)) { + s->sys_cfgstat |= 2; /* error */ + } + } else { + uint32_t val; + if (!vexpress_cfgctrl_read(s, dcc, function, site, position, + device, &val)) { + s->sys_cfgstat |= 2; /* error */ + } else { + s->sys_cfgdata = val; + } + } + } + s->sys_cfgctrl &= ~(1 << 31); + return; + case 0xa8: /* SYS_CFGSTAT */ + if (board_id(s) != BOARD_ID_VEXPRESS) { + goto bad_reg; + } + s->sys_cfgstat = val & 3; + return; + default: + bad_reg: + qemu_log_mask(LOG_GUEST_ERROR, + "arm_sysctl_write: Bad register offset 0x%x\n", + (int)offset); + return; + } +} + +static const MemoryRegionOps arm_sysctl_ops = { + .read = arm_sysctl_read, + .write = arm_sysctl_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void arm_sysctl_gpio_set(void *opaque, int line, int level) +{ + arm_sysctl_state *s = (arm_sysctl_state *)opaque; + switch (line) { + case ARM_SYSCTL_GPIO_MMC_WPROT: + { + /* For PB926 and EB write-protect is bit 2 of SYS_MCI; + * for all later boards it is bit 1. + */ + int bit = 2; + if ((board_id(s) == BOARD_ID_PB926) || (board_id(s) == BOARD_ID_EB)) { + bit = 4; + } + s->sys_mci &= ~bit; + if (level) { + s->sys_mci |= bit; + } + break; + } + case ARM_SYSCTL_GPIO_MMC_CARDIN: + s->sys_mci &= ~1; + if (level) { + s->sys_mci |= 1; + } + break; + } +} + +static void arm_sysctl_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + arm_sysctl_state *s = ARM_SYSCTL(obj); + + memory_region_init_io(&s->iomem, OBJECT(dev), &arm_sysctl_ops, s, + "arm-sysctl", 0x1000); + sysbus_init_mmio(sd, &s->iomem); + qdev_init_gpio_in(dev, arm_sysctl_gpio_set, 2); + qdev_init_gpio_out(dev, &s->pl110_mux_ctrl, 1); +} + +static void arm_sysctl_realize(DeviceState *d, Error **errp) +{ + arm_sysctl_state *s = ARM_SYSCTL(d); + + s->db_clock = g_new0(uint32_t, s->db_num_clocks); +} + +static void arm_sysctl_finalize(Object *obj) +{ + arm_sysctl_state *s = ARM_SYSCTL(obj); + + g_free(s->db_voltage); + g_free(s->db_clock); + g_free(s->db_clock_reset); +} + +static Property arm_sysctl_properties[] = { + DEFINE_PROP_UINT32("sys_id", arm_sysctl_state, sys_id, 0), + DEFINE_PROP_UINT32("proc_id", arm_sysctl_state, proc_id, 0), + /* Daughterboard power supply voltages (as reported via SYS_CFG) */ + DEFINE_PROP_ARRAY("db-voltage", arm_sysctl_state, db_num_vsensors, + db_voltage, qdev_prop_uint32, uint32_t), + /* Daughterboard clock reset values (as reported via SYS_CFG) */ + DEFINE_PROP_ARRAY("db-clock", arm_sysctl_state, db_num_clocks, + db_clock_reset, qdev_prop_uint32, uint32_t), + DEFINE_PROP_END_OF_LIST(), +}; + +static void arm_sysctl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = arm_sysctl_realize; + dc->reset = arm_sysctl_reset; + dc->vmsd = &vmstate_arm_sysctl; + device_class_set_props(dc, arm_sysctl_properties); +} + +static const TypeInfo arm_sysctl_info = { + .name = TYPE_ARM_SYSCTL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(arm_sysctl_state), + .instance_init = arm_sysctl_init, + .instance_finalize = arm_sysctl_finalize, + .class_init = arm_sysctl_class_init, +}; + +static void arm_sysctl_register_types(void) +{ + type_register_static(&arm_sysctl_info); +} + +type_init(arm_sysctl_register_types) diff --git a/hw/misc/armsse-cpu-pwrctrl.c b/hw/misc/armsse-cpu-pwrctrl.c new file mode 100644 index 000000000..42fc38879 --- /dev/null +++ b/hw/misc/armsse-cpu-pwrctrl.c @@ -0,0 +1,149 @@ +/* + * Arm SSE CPU PWRCTRL register block + * + * Copyright (c) 2021 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the "CPU_PWRCTRL block" which is part of the + * Arm Corstone SSE-300 Example Subsystem and documented in + * https://developer.arm.com/documentation/101773/0000 + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/sysbus.h" +#include "hw/registerfields.h" +#include "hw/misc/armsse-cpu-pwrctrl.h" + +REG32(CPUPWRCFG, 0x0) +REG32(PID4, 0xfd0) +REG32(PID5, 0xfd4) +REG32(PID6, 0xfd8) +REG32(PID7, 0xfdc) +REG32(PID0, 0xfe0) +REG32(PID1, 0xfe4) +REG32(PID2, 0xfe8) +REG32(PID3, 0xfec) +REG32(CID0, 0xff0) +REG32(CID1, 0xff4) +REG32(CID2, 0xff8) +REG32(CID3, 0xffc) + +/* PID/CID values */ +static const int cpu_pwrctrl_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x5a, 0xb8, 0x0b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static uint64_t pwrctrl_read(void *opaque, hwaddr offset, unsigned size) +{ + ARMSSECPUPwrCtrl *s = ARMSSE_CPU_PWRCTRL(opaque); + uint64_t r; + + switch (offset) { + case A_CPUPWRCFG: + r = s->cpupwrcfg; + break; + case A_PID4 ... A_CID3: + r = cpu_pwrctrl_id[(offset - A_PID4) / 4]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE CPU_PWRCTRL read: bad offset %x\n", (int)offset); + r = 0; + break; + } + trace_armsse_cpu_pwrctrl_read(offset, r, size); + return r; +} + +static void pwrctrl_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + ARMSSECPUPwrCtrl *s = ARMSSE_CPU_PWRCTRL(opaque); + + trace_armsse_cpu_pwrctrl_write(offset, value, size); + + switch (offset) { + case A_CPUPWRCFG: + qemu_log_mask(LOG_UNIMP, + "SSE CPU_PWRCTRL: CPUPWRCFG unimplemented\n"); + s->cpupwrcfg = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE CPU_PWRCTRL write: bad offset 0x%x\n", (int)offset); + break; + } +} + +static const MemoryRegionOps pwrctrl_ops = { + .read = pwrctrl_read, + .write = pwrctrl_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void pwrctrl_reset(DeviceState *dev) +{ + ARMSSECPUPwrCtrl *s = ARMSSE_CPU_PWRCTRL(dev); + + s->cpupwrcfg = 0; +} + +static const VMStateDescription pwrctrl_vmstate = { + .name = "armsse-cpu-pwrctrl", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cpupwrcfg, ARMSSECPUPwrCtrl), + VMSTATE_END_OF_LIST() + }, +}; + +static void pwrctrl_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ARMSSECPUPwrCtrl *s = ARMSSE_CPU_PWRCTRL(obj); + + memory_region_init_io(&s->iomem, obj, &pwrctrl_ops, + s, "armsse-cpu-pwrctrl", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void pwrctrl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = pwrctrl_reset; + dc->vmsd = &pwrctrl_vmstate; +} + +static const TypeInfo pwrctrl_info = { + .name = TYPE_ARMSSE_CPU_PWRCTRL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARMSSECPUPwrCtrl), + .instance_init = pwrctrl_init, + .class_init = pwrctrl_class_init, +}; + +static void pwrctrl_register_types(void) +{ + type_register_static(&pwrctrl_info); +} + +type_init(pwrctrl_register_types); diff --git a/hw/misc/armsse-cpuid.c b/hw/misc/armsse-cpuid.c new file mode 100644 index 000000000..e785a0905 --- /dev/null +++ b/hw/misc/armsse-cpuid.c @@ -0,0 +1,135 @@ +/* + * ARM SSE-200 CPU_IDENTITY register block + * + * Copyright (c) 2019 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the "CPU_IDENTITY" register block which is part of the + * Arm SSE-200 and documented in + * https://developer.arm.com/documentation/101104/latest/ + * + * It consists of one read-only CPUID register (set by QOM property), plus the + * usual ID registers. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/registerfields.h" +#include "hw/misc/armsse-cpuid.h" +#include "hw/qdev-properties.h" + +REG32(CPUID, 0x0) +REG32(PID4, 0xfd0) +REG32(PID5, 0xfd4) +REG32(PID6, 0xfd8) +REG32(PID7, 0xfdc) +REG32(PID0, 0xfe0) +REG32(PID1, 0xfe4) +REG32(PID2, 0xfe8) +REG32(PID3, 0xfec) +REG32(CID0, 0xff0) +REG32(CID1, 0xff4) +REG32(CID2, 0xff8) +REG32(CID3, 0xffc) + +/* PID/CID values */ +static const int sysinfo_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x58, 0xb8, 0x0b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static uint64_t armsse_cpuid_read(void *opaque, hwaddr offset, + unsigned size) +{ + ARMSSECPUID *s = ARMSSE_CPUID(opaque); + uint64_t r; + + switch (offset) { + case A_CPUID: + r = s->cpuid; + break; + case A_PID4 ... A_CID3: + r = sysinfo_id[(offset - A_PID4) / 4]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE CPU_IDENTITY read: bad offset 0x%x\n", (int)offset); + r = 0; + break; + } + trace_armsse_cpuid_read(offset, r, size); + return r; +} + +static void armsse_cpuid_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + trace_armsse_cpuid_write(offset, value, size); + + qemu_log_mask(LOG_GUEST_ERROR, + "SSE CPU_IDENTITY: write to RO offset 0x%x\n", (int)offset); +} + +static const MemoryRegionOps armsse_cpuid_ops = { + .read = armsse_cpuid_read, + .write = armsse_cpuid_write, + .endianness = DEVICE_LITTLE_ENDIAN, + /* byte/halfword accesses are just zero-padded on reads and writes */ + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, +}; + +static Property armsse_cpuid_props[] = { + DEFINE_PROP_UINT32("CPUID", ARMSSECPUID, cpuid, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void armsse_cpuid_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ARMSSECPUID *s = ARMSSE_CPUID(obj); + + memory_region_init_io(&s->iomem, obj, &armsse_cpuid_ops, + s, "armsse-cpuid", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void armsse_cpuid_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + /* + * This device has no guest-modifiable state and so it + * does not need a reset function or VMState. + */ + + device_class_set_props(dc, armsse_cpuid_props); +} + +static const TypeInfo armsse_cpuid_info = { + .name = TYPE_ARMSSE_CPUID, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARMSSECPUID), + .instance_init = armsse_cpuid_init, + .class_init = armsse_cpuid_class_init, +}; + +static void armsse_cpuid_register_types(void) +{ + type_register_static(&armsse_cpuid_info); +} + +type_init(armsse_cpuid_register_types); diff --git a/hw/misc/armsse-mhu.c b/hw/misc/armsse-mhu.c new file mode 100644 index 000000000..0be7f0fc8 --- /dev/null +++ b/hw/misc/armsse-mhu.c @@ -0,0 +1,200 @@ +/* + * ARM SSE-200 Message Handling Unit (MHU) + * + * Copyright (c) 2019 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the Message Handling Unit (MHU) which is part of the + * Arm SSE-200 and documented in + * https://developer.arm.com/documentation/101104/latest/ + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/registerfields.h" +#include "hw/irq.h" +#include "hw/misc/armsse-mhu.h" + +REG32(CPU0INTR_STAT, 0x0) +REG32(CPU0INTR_SET, 0x4) +REG32(CPU0INTR_CLR, 0x8) +REG32(CPU1INTR_STAT, 0x10) +REG32(CPU1INTR_SET, 0x14) +REG32(CPU1INTR_CLR, 0x18) +REG32(PID4, 0xfd0) +REG32(PID5, 0xfd4) +REG32(PID6, 0xfd8) +REG32(PID7, 0xfdc) +REG32(PID0, 0xfe0) +REG32(PID1, 0xfe4) +REG32(PID2, 0xfe8) +REG32(PID3, 0xfec) +REG32(CID0, 0xff0) +REG32(CID1, 0xff4) +REG32(CID2, 0xff8) +REG32(CID3, 0xffc) + +/* Valid bits in the interrupt registers. If any are set the IRQ is raised */ +#define INTR_MASK 0xf + +/* PID/CID values */ +static const int armsse_mhu_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x56, 0xb8, 0x0b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static void armsse_mhu_update(ARMSSEMHU *s) +{ + qemu_set_irq(s->cpu0irq, s->cpu0intr != 0); + qemu_set_irq(s->cpu1irq, s->cpu1intr != 0); +} + +static uint64_t armsse_mhu_read(void *opaque, hwaddr offset, unsigned size) +{ + ARMSSEMHU *s = ARMSSE_MHU(opaque); + uint64_t r; + + switch (offset) { + case A_CPU0INTR_STAT: + r = s->cpu0intr; + break; + + case A_CPU1INTR_STAT: + r = s->cpu1intr; + break; + + case A_PID4 ... A_CID3: + r = armsse_mhu_id[(offset - A_PID4) / 4]; + break; + + case A_CPU0INTR_SET: + case A_CPU0INTR_CLR: + case A_CPU1INTR_SET: + case A_CPU1INTR_CLR: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE MHU: read of write-only register at offset 0x%x\n", + (int)offset); + r = 0; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE MHU read: bad offset 0x%x\n", (int)offset); + r = 0; + break; + } + trace_armsse_mhu_read(offset, r, size); + return r; +} + +static void armsse_mhu_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + ARMSSEMHU *s = ARMSSE_MHU(opaque); + + trace_armsse_mhu_write(offset, value, size); + + switch (offset) { + case A_CPU0INTR_SET: + s->cpu0intr |= (value & INTR_MASK); + break; + case A_CPU0INTR_CLR: + s->cpu0intr &= ~(value & INTR_MASK); + break; + case A_CPU1INTR_SET: + s->cpu1intr |= (value & INTR_MASK); + break; + case A_CPU1INTR_CLR: + s->cpu1intr &= ~(value & INTR_MASK); + break; + + case A_CPU0INTR_STAT: + case A_CPU1INTR_STAT: + case A_PID4 ... A_CID3: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE MHU: write to read-only register at offset 0x%x\n", + (int)offset); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE MHU write: bad offset 0x%x\n", (int)offset); + break; + } + + armsse_mhu_update(s); +} + +static const MemoryRegionOps armsse_mhu_ops = { + .read = armsse_mhu_read, + .write = armsse_mhu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void armsse_mhu_reset(DeviceState *dev) +{ + ARMSSEMHU *s = ARMSSE_MHU(dev); + + s->cpu0intr = 0; + s->cpu1intr = 0; +} + +static const VMStateDescription armsse_mhu_vmstate = { + .name = "armsse-mhu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cpu0intr, ARMSSEMHU), + VMSTATE_UINT32(cpu1intr, ARMSSEMHU), + VMSTATE_END_OF_LIST() + }, +}; + +static void armsse_mhu_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ARMSSEMHU *s = ARMSSE_MHU(obj); + + memory_region_init_io(&s->iomem, obj, &armsse_mhu_ops, + s, "armsse-mhu", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->cpu0irq); + sysbus_init_irq(sbd, &s->cpu1irq); +} + +static void armsse_mhu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = armsse_mhu_reset; + dc->vmsd = &armsse_mhu_vmstate; +} + +static const TypeInfo armsse_mhu_info = { + .name = TYPE_ARMSSE_MHU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARMSSEMHU), + .instance_init = armsse_mhu_init, + .class_init = armsse_mhu_class_init, +}; + +static void armsse_mhu_register_types(void) +{ + type_register_static(&armsse_mhu_info); +} + +type_init(armsse_mhu_register_types); diff --git a/hw/misc/armv7m_ras.c b/hw/misc/armv7m_ras.c new file mode 100644 index 000000000..de24922c9 --- /dev/null +++ b/hw/misc/armv7m_ras.c @@ -0,0 +1,93 @@ +/* + * Arm M-profile RAS (Reliability, Availability and Serviceability) block + * + * Copyright (c) 2021 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/misc/armv7m_ras.h" +#include "qemu/log.h" + +static MemTxResult ras_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + if (attrs.user) { + return MEMTX_ERROR; + } + + switch (addr) { + case 0xe10: /* ERRIIDR */ + /* architect field = Arm; product/variant/revision 0 */ + *data = 0x43b; + break; + case 0xfc8: /* ERRDEVID */ + /* Minimal RAS: we implement 0 error record indexes */ + *data = 0; + break; + default: + qemu_log_mask(LOG_UNIMP, "Read RAS register offset 0x%x\n", + (uint32_t)addr); + *data = 0; + break; + } + return MEMTX_OK; +} + +static MemTxResult ras_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + if (attrs.user) { + return MEMTX_ERROR; + } + + switch (addr) { + default: + qemu_log_mask(LOG_UNIMP, "Write to RAS register offset 0x%x\n", + (uint32_t)addr); + break; + } + return MEMTX_OK; +} + +static const MemoryRegionOps ras_ops = { + .read_with_attrs = ras_read, + .write_with_attrs = ras_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + + +static void armv7m_ras_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ARMv7MRAS *s = ARMV7M_RAS(obj); + + memory_region_init_io(&s->iomem, obj, &ras_ops, + s, "armv7m-ras", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void armv7m_ras_class_init(ObjectClass *klass, void *data) +{ + /* This device has no state: no need for vmstate or reset */ +} + +static const TypeInfo armv7m_ras_info = { + .name = TYPE_ARMV7M_RAS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARMv7MRAS), + .instance_init = armv7m_ras_init, + .class_init = armv7m_ras_class_init, +}; + +static void armv7m_ras_register_types(void) +{ + type_register_static(&armv7m_ras_info); +} + +type_init(armv7m_ras_register_types); diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c new file mode 100644 index 000000000..10f00e65f --- /dev/null +++ b/hw/misc/aspeed_hace.c @@ -0,0 +1,389 @@ +/* + * ASPEED Hash and Crypto Engine + * + * Copyright (C) 2021 IBM Corp. + * + * Joel Stanley + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "hw/misc/aspeed_hace.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "crypto/hash.h" +#include "hw/qdev-properties.h" +#include "hw/irq.h" + +#define R_CRYPT_CMD (0x10 / 4) + +#define R_STATUS (0x1c / 4) +#define HASH_IRQ BIT(9) +#define CRYPT_IRQ BIT(12) +#define TAG_IRQ BIT(15) + +#define R_HASH_SRC (0x20 / 4) +#define R_HASH_DEST (0x24 / 4) +#define R_HASH_SRC_LEN (0x2c / 4) + +#define R_HASH_CMD (0x30 / 4) +/* Hash algorithm selection */ +#define HASH_ALGO_MASK (BIT(4) | BIT(5) | BIT(6)) +#define HASH_ALGO_MD5 0 +#define HASH_ALGO_SHA1 BIT(5) +#define HASH_ALGO_SHA224 BIT(6) +#define HASH_ALGO_SHA256 (BIT(4) | BIT(6)) +#define HASH_ALGO_SHA512_SERIES (BIT(5) | BIT(6)) +/* SHA512 algorithm selection */ +#define SHA512_HASH_ALGO_MASK (BIT(10) | BIT(11) | BIT(12)) +#define HASH_ALGO_SHA512_SHA512 0 +#define HASH_ALGO_SHA512_SHA384 BIT(10) +#define HASH_ALGO_SHA512_SHA256 BIT(11) +#define HASH_ALGO_SHA512_SHA224 (BIT(10) | BIT(11)) +/* HMAC modes */ +#define HASH_HMAC_MASK (BIT(7) | BIT(8)) +#define HASH_DIGEST 0 +#define HASH_DIGEST_HMAC BIT(7) +#define HASH_DIGEST_ACCUM BIT(8) +#define HASH_HMAC_KEY (BIT(7) | BIT(8)) +/* Cascaded operation modes */ +#define HASH_ONLY 0 +#define HASH_ONLY2 BIT(0) +#define HASH_CRYPT_THEN_HASH BIT(1) +#define HASH_HASH_THEN_CRYPT (BIT(0) | BIT(1)) +/* Other cmd bits */ +#define HASH_IRQ_EN BIT(9) +#define HASH_SG_EN BIT(18) +/* Scatter-gather data list */ +#define SG_LIST_LEN_SIZE 4 +#define SG_LIST_LEN_MASK 0x0FFFFFFF +#define SG_LIST_LEN_LAST BIT(31) +#define SG_LIST_ADDR_SIZE 4 +#define SG_LIST_ADDR_MASK 0x7FFFFFFF +#define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE) +#define ASPEED_HACE_MAX_SG 256 /* max number of entries */ + +static const struct { + uint32_t mask; + QCryptoHashAlgorithm algo; +} hash_algo_map[] = { + { HASH_ALGO_MD5, QCRYPTO_HASH_ALG_MD5 }, + { HASH_ALGO_SHA1, QCRYPTO_HASH_ALG_SHA1 }, + { HASH_ALGO_SHA224, QCRYPTO_HASH_ALG_SHA224 }, + { HASH_ALGO_SHA256, QCRYPTO_HASH_ALG_SHA256 }, + { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, QCRYPTO_HASH_ALG_SHA512 }, + { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, QCRYPTO_HASH_ALG_SHA384 }, + { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, QCRYPTO_HASH_ALG_SHA256 }, +}; + +static int hash_algo_lookup(uint32_t reg) +{ + int i; + + reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK; + + for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) { + if (reg == hash_algo_map[i].mask) { + return hash_algo_map[i].algo; + } + } + + return -1; +} + +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode) +{ + struct iovec iov[ASPEED_HACE_MAX_SG]; + g_autofree uint8_t *digest_buf; + size_t digest_len = 0; + int i; + + if (sg_mode) { + uint32_t len = 0; + + for (i = 0; !(len & SG_LIST_LEN_LAST); i++) { + uint32_t addr, src; + hwaddr plen; + + if (i == ASPEED_HACE_MAX_SG) { + qemu_log_mask(LOG_GUEST_ERROR, + "aspeed_hace: guest failed to set end of sg list marker\n"); + break; + } + + src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE); + + len = address_space_ldl_le(&s->dram_as, src, + MEMTXATTRS_UNSPECIFIED, NULL); + + addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE, + MEMTXATTRS_UNSPECIFIED, NULL); + addr &= SG_LIST_ADDR_MASK; + + iov[i].iov_len = len & SG_LIST_LEN_MASK; + plen = iov[i].iov_len; + iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false, + MEMTXATTRS_UNSPECIFIED); + } + } else { + hwaddr len = s->regs[R_HASH_SRC_LEN]; + + iov[0].iov_len = len; + iov[0].iov_base = address_space_map(&s->dram_as, s->regs[R_HASH_SRC], + &len, false, + MEMTXATTRS_UNSPECIFIED); + i = 1; + } + + if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__); + return; + } + + if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST], + MEMTXATTRS_UNSPECIFIED, + digest_buf, digest_len)) { + qemu_log_mask(LOG_GUEST_ERROR, + "aspeed_hace: address space write failed\n"); + } + + for (; i > 0; i--) { + address_space_unmap(&s->dram_as, iov[i - 1].iov_base, + iov[i - 1].iov_len, false, + iov[i - 1].iov_len); + } + + /* + * Set status bits to indicate completion. Testing shows hardware sets + * these irrespective of HASH_IRQ_EN. + */ + s->regs[R_STATUS] |= HASH_IRQ; +} + +static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size) +{ + AspeedHACEState *s = ASPEED_HACE(opaque); + + addr >>= 2; + + if (addr >= ASPEED_HACE_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, addr << 2); + return 0; + } + + return s->regs[addr]; +} + +static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + AspeedHACEState *s = ASPEED_HACE(opaque); + AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); + + addr >>= 2; + + if (addr >= ASPEED_HACE_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, addr << 2); + return; + } + + switch (addr) { + case R_STATUS: + if (data & HASH_IRQ) { + data &= ~HASH_IRQ; + + if (s->regs[addr] & HASH_IRQ) { + qemu_irq_lower(s->irq); + } + } + break; + case R_HASH_SRC: + data &= ahc->src_mask; + break; + case R_HASH_DEST: + data &= ahc->dest_mask; + break; + case R_HASH_SRC_LEN: + data &= 0x0FFFFFFF; + break; + case R_HASH_CMD: { + int algo; + data &= ahc->hash_mask; + + if ((data & HASH_HMAC_MASK)) { + qemu_log_mask(LOG_UNIMP, + "%s: HMAC engine command mode %"PRIx64" not implemented", + __func__, (data & HASH_HMAC_MASK) >> 8); + } + if (data & BIT(1)) { + qemu_log_mask(LOG_UNIMP, + "%s: Cascaded mode not implemented", + __func__); + } + algo = hash_algo_lookup(data); + if (algo < 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid hash algorithm selection 0x%"PRIx64"\n", + __func__, data & ahc->hash_mask); + break; + } + do_hash_operation(s, algo, data & HASH_SG_EN); + + if (data & HASH_IRQ_EN) { + qemu_irq_raise(s->irq); + } + break; + } + case R_CRYPT_CMD: + qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n", + __func__); + break; + default: + break; + } + + s->regs[addr] = data; +} + +static const MemoryRegionOps aspeed_hace_ops = { + .read = aspeed_hace_read, + .write = aspeed_hace_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void aspeed_hace_reset(DeviceState *dev) +{ + struct AspeedHACEState *s = ASPEED_HACE(dev); + + memset(s->regs, 0, sizeof(s->regs)); +} + +static void aspeed_hace_realize(DeviceState *dev, Error **errp) +{ + AspeedHACEState *s = ASPEED_HACE(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s, + TYPE_ASPEED_HACE, 0x1000); + + if (!s->dram_mr) { + error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set"); + return; + } + + address_space_init(&s->dram_as, s->dram_mr, "dram"); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property aspeed_hace_properties[] = { + DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + + +static const VMStateDescription vmstate_aspeed_hace = { + .name = TYPE_ASPEED_HACE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS), + VMSTATE_END_OF_LIST(), + } +}; + +static void aspeed_hace_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_hace_realize; + dc->reset = aspeed_hace_reset; + device_class_set_props(dc, aspeed_hace_properties); + dc->vmsd = &vmstate_aspeed_hace; +} + +static const TypeInfo aspeed_hace_info = { + .name = TYPE_ASPEED_HACE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedHACEState), + .class_init = aspeed_hace_class_init, + .class_size = sizeof(AspeedHACEClass) +}; + +static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); + + dc->desc = "AST2400 Hash and Crypto Engine"; + + ahc->src_mask = 0x0FFFFFFF; + ahc->dest_mask = 0x0FFFFFF8; + ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */ +} + +static const TypeInfo aspeed_ast2400_hace_info = { + .name = TYPE_ASPEED_AST2400_HACE, + .parent = TYPE_ASPEED_HACE, + .class_init = aspeed_ast2400_hace_class_init, +}; + +static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); + + dc->desc = "AST2500 Hash and Crypto Engine"; + + ahc->src_mask = 0x3fffffff; + ahc->dest_mask = 0x3ffffff8; + ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */ +} + +static const TypeInfo aspeed_ast2500_hace_info = { + .name = TYPE_ASPEED_AST2500_HACE, + .parent = TYPE_ASPEED_HACE, + .class_init = aspeed_ast2500_hace_class_init, +}; + +static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); + + dc->desc = "AST2600 Hash and Crypto Engine"; + + ahc->src_mask = 0x7FFFFFFF; + ahc->dest_mask = 0x7FFFFFF8; + ahc->hash_mask = 0x00147FFF; +} + +static const TypeInfo aspeed_ast2600_hace_info = { + .name = TYPE_ASPEED_AST2600_HACE, + .parent = TYPE_ASPEED_HACE, + .class_init = aspeed_ast2600_hace_class_init, +}; + +static void aspeed_hace_register_types(void) +{ + type_register_static(&aspeed_ast2400_hace_info); + type_register_static(&aspeed_ast2500_hace_info); + type_register_static(&aspeed_ast2600_hace_info); + type_register_static(&aspeed_hace_info); +} + +type_init(aspeed_hace_register_types); diff --git a/hw/misc/aspeed_lpc.c b/hw/misc/aspeed_lpc.c new file mode 100644 index 000000000..2dddb27c3 --- /dev/null +++ b/hw/misc/aspeed_lpc.c @@ -0,0 +1,486 @@ +/* + * ASPEED LPC Controller + * + * Copyright (C) 2017-2018 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "hw/misc/aspeed_lpc.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +#define TO_REG(offset) ((offset) >> 2) + +#define HICR0 TO_REG(0x00) +#define HICR0_LPC3E BIT(7) +#define HICR0_LPC2E BIT(6) +#define HICR0_LPC1E BIT(5) +#define HICR1 TO_REG(0x04) +#define HICR2 TO_REG(0x08) +#define HICR2_IBFIE3 BIT(3) +#define HICR2_IBFIE2 BIT(2) +#define HICR2_IBFIE1 BIT(1) +#define HICR3 TO_REG(0x0C) +#define HICR4 TO_REG(0x10) +#define HICR4_KCSENBL BIT(2) +#define IDR1 TO_REG(0x24) +#define IDR2 TO_REG(0x28) +#define IDR3 TO_REG(0x2C) +#define ODR1 TO_REG(0x30) +#define ODR2 TO_REG(0x34) +#define ODR3 TO_REG(0x38) +#define STR1 TO_REG(0x3C) +#define STR_OBF BIT(0) +#define STR_IBF BIT(1) +#define STR_CMD_DATA BIT(3) +#define STR2 TO_REG(0x40) +#define STR3 TO_REG(0x44) +#define HICR5 TO_REG(0x80) +#define HICR6 TO_REG(0x84) +#define HICR7 TO_REG(0x88) +#define HICR8 TO_REG(0x8C) +#define HICRB TO_REG(0x100) +#define HICRB_IBFIE4 BIT(1) +#define HICRB_LPC4E BIT(0) +#define IDR4 TO_REG(0x114) +#define ODR4 TO_REG(0x118) +#define STR4 TO_REG(0x11C) + +enum aspeed_kcs_channel_id { + kcs_channel_1 = 0, + kcs_channel_2, + kcs_channel_3, + kcs_channel_4, +}; + +static const enum aspeed_lpc_subdevice aspeed_kcs_subdevice_map[] = { + [kcs_channel_1] = aspeed_lpc_kcs_1, + [kcs_channel_2] = aspeed_lpc_kcs_2, + [kcs_channel_3] = aspeed_lpc_kcs_3, + [kcs_channel_4] = aspeed_lpc_kcs_4, +}; + +struct aspeed_kcs_channel { + enum aspeed_kcs_channel_id id; + + int idr; + int odr; + int str; +}; + +static const struct aspeed_kcs_channel aspeed_kcs_channel_map[] = { + [kcs_channel_1] = { + .id = kcs_channel_1, + .idr = IDR1, + .odr = ODR1, + .str = STR1 + }, + + [kcs_channel_2] = { + .id = kcs_channel_2, + .idr = IDR2, + .odr = ODR2, + .str = STR2 + }, + + [kcs_channel_3] = { + .id = kcs_channel_3, + .idr = IDR3, + .odr = ODR3, + .str = STR3 + }, + + [kcs_channel_4] = { + .id = kcs_channel_4, + .idr = IDR4, + .odr = ODR4, + .str = STR4 + }, +}; + +struct aspeed_kcs_register_data { + const char *name; + int reg; + const struct aspeed_kcs_channel *chan; +}; + +static const struct aspeed_kcs_register_data aspeed_kcs_registers[] = { + { + .name = "idr1", + .reg = IDR1, + .chan = &aspeed_kcs_channel_map[kcs_channel_1], + }, + { + .name = "odr1", + .reg = ODR1, + .chan = &aspeed_kcs_channel_map[kcs_channel_1], + }, + { + .name = "str1", + .reg = STR1, + .chan = &aspeed_kcs_channel_map[kcs_channel_1], + }, + { + .name = "idr2", + .reg = IDR2, + .chan = &aspeed_kcs_channel_map[kcs_channel_2], + }, + { + .name = "odr2", + .reg = ODR2, + .chan = &aspeed_kcs_channel_map[kcs_channel_2], + }, + { + .name = "str2", + .reg = STR2, + .chan = &aspeed_kcs_channel_map[kcs_channel_2], + }, + { + .name = "idr3", + .reg = IDR3, + .chan = &aspeed_kcs_channel_map[kcs_channel_3], + }, + { + .name = "odr3", + .reg = ODR3, + .chan = &aspeed_kcs_channel_map[kcs_channel_3], + }, + { + .name = "str3", + .reg = STR3, + .chan = &aspeed_kcs_channel_map[kcs_channel_3], + }, + { + .name = "idr4", + .reg = IDR4, + .chan = &aspeed_kcs_channel_map[kcs_channel_4], + }, + { + .name = "odr4", + .reg = ODR4, + .chan = &aspeed_kcs_channel_map[kcs_channel_4], + }, + { + .name = "str4", + .reg = STR4, + .chan = &aspeed_kcs_channel_map[kcs_channel_4], + }, + { }, +}; + +static const struct aspeed_kcs_register_data * +aspeed_kcs_get_register_data_by_name(const char *name) +{ + const struct aspeed_kcs_register_data *pos = aspeed_kcs_registers; + + while (pos->name) { + if (!strcmp(pos->name, name)) { + return pos; + } + pos++; + } + + return NULL; +} + +static const struct aspeed_kcs_channel * +aspeed_kcs_get_channel_by_register(int reg) +{ + const struct aspeed_kcs_register_data *pos = aspeed_kcs_registers; + + while (pos->name) { + if (pos->reg == reg) { + return pos->chan; + } + pos++; + } + + return NULL; +} + +static void aspeed_kcs_get_register_property(Object *obj, + Visitor *v, + const char *name, + void *opaque, + Error **errp) +{ + const struct aspeed_kcs_register_data *data; + AspeedLPCState *s = ASPEED_LPC(obj); + uint32_t val; + + data = aspeed_kcs_get_register_data_by_name(name); + if (!data) { + return; + } + + if (!strncmp("odr", name, 3)) { + s->regs[data->chan->str] &= ~STR_OBF; + } + + val = s->regs[data->reg]; + + visit_type_uint32(v, name, &val, errp); +} + +static bool aspeed_kcs_channel_enabled(AspeedLPCState *s, + const struct aspeed_kcs_channel *channel) +{ + switch (channel->id) { + case kcs_channel_1: return s->regs[HICR0] & HICR0_LPC1E; + case kcs_channel_2: return s->regs[HICR0] & HICR0_LPC2E; + case kcs_channel_3: + return (s->regs[HICR0] & HICR0_LPC3E) && + (s->regs[HICR4] & HICR4_KCSENBL); + case kcs_channel_4: return s->regs[HICRB] & HICRB_LPC4E; + default: return false; + } +} + +static bool +aspeed_kcs_channel_ibf_irq_enabled(AspeedLPCState *s, + const struct aspeed_kcs_channel *channel) +{ + if (!aspeed_kcs_channel_enabled(s, channel)) { + return false; + } + + switch (channel->id) { + case kcs_channel_1: return s->regs[HICR2] & HICR2_IBFIE1; + case kcs_channel_2: return s->regs[HICR2] & HICR2_IBFIE2; + case kcs_channel_3: return s->regs[HICR2] & HICR2_IBFIE3; + case kcs_channel_4: return s->regs[HICRB] & HICRB_IBFIE4; + default: return false; + } +} + +static void aspeed_kcs_set_register_property(Object *obj, + Visitor *v, + const char *name, + void *opaque, + Error **errp) +{ + const struct aspeed_kcs_register_data *data; + AspeedLPCState *s = ASPEED_LPC(obj); + uint32_t val; + + data = aspeed_kcs_get_register_data_by_name(name); + if (!data) { + return; + } + + if (!visit_type_uint32(v, name, &val, errp)) { + return; + } + + if (strncmp("str", name, 3)) { + s->regs[data->reg] = val; + } + + if (!strncmp("idr", name, 3)) { + s->regs[data->chan->str] |= STR_IBF; + if (aspeed_kcs_channel_ibf_irq_enabled(s, data->chan)) { + enum aspeed_lpc_subdevice subdev; + + subdev = aspeed_kcs_subdevice_map[data->chan->id]; + qemu_irq_raise(s->subdevice_irqs[subdev]); + } + } +} + +static void aspeed_lpc_set_irq(void *opaque, int irq, int level) +{ + AspeedLPCState *s = (AspeedLPCState *)opaque; + + if (level) { + s->subdevice_irqs_pending |= BIT(irq); + } else { + s->subdevice_irqs_pending &= ~BIT(irq); + } + + qemu_set_irq(s->irq, !!s->subdevice_irqs_pending); +} + +static uint64_t aspeed_lpc_read(void *opaque, hwaddr offset, unsigned size) +{ + AspeedLPCState *s = ASPEED_LPC(opaque); + int reg = TO_REG(offset); + + if (reg >= ARRAY_SIZE(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return 0; + } + + switch (reg) { + case IDR1: + case IDR2: + case IDR3: + case IDR4: + { + const struct aspeed_kcs_channel *channel; + + channel = aspeed_kcs_get_channel_by_register(reg); + if (s->regs[channel->str] & STR_IBF) { + enum aspeed_lpc_subdevice subdev; + + subdev = aspeed_kcs_subdevice_map[channel->id]; + qemu_irq_lower(s->subdevice_irqs[subdev]); + } + + s->regs[channel->str] &= ~STR_IBF; + break; + } + default: + break; + } + + return s->regs[reg]; +} + +static void aspeed_lpc_write(void *opaque, hwaddr offset, uint64_t data, + unsigned int size) +{ + AspeedLPCState *s = ASPEED_LPC(opaque); + int reg = TO_REG(offset); + + if (reg >= ARRAY_SIZE(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + + switch (reg) { + case ODR1: + case ODR2: + case ODR3: + case ODR4: + s->regs[aspeed_kcs_get_channel_by_register(reg)->str] |= STR_OBF; + break; + default: + break; + } + + s->regs[reg] = data; +} + +static const MemoryRegionOps aspeed_lpc_ops = { + .read = aspeed_lpc_read, + .write = aspeed_lpc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void aspeed_lpc_reset(DeviceState *dev) +{ + struct AspeedLPCState *s = ASPEED_LPC(dev); + + s->subdevice_irqs_pending = 0; + + memset(s->regs, 0, sizeof(s->regs)); + + s->regs[HICR7] = s->hicr7; +} + +static void aspeed_lpc_realize(DeviceState *dev, Error **errp) +{ + AspeedLPCState *s = ASPEED_LPC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->subdevice_irqs[aspeed_lpc_kcs_1]); + sysbus_init_irq(sbd, &s->subdevice_irqs[aspeed_lpc_kcs_2]); + sysbus_init_irq(sbd, &s->subdevice_irqs[aspeed_lpc_kcs_3]); + sysbus_init_irq(sbd, &s->subdevice_irqs[aspeed_lpc_kcs_4]); + sysbus_init_irq(sbd, &s->subdevice_irqs[aspeed_lpc_ibt]); + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_lpc_ops, s, + TYPE_ASPEED_LPC, 0x1000); + + sysbus_init_mmio(sbd, &s->iomem); + + qdev_init_gpio_in(dev, aspeed_lpc_set_irq, ASPEED_LPC_NR_SUBDEVS); +} + +static void aspeed_lpc_init(Object *obj) +{ + object_property_add(obj, "idr1", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "odr1", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "str1", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "idr2", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "odr2", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "str2", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "idr3", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "odr3", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "str3", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "idr4", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "odr4", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); + object_property_add(obj, "str4", "uint32", aspeed_kcs_get_register_property, + aspeed_kcs_set_register_property, NULL, NULL); +} + +static const VMStateDescription vmstate_aspeed_lpc = { + .name = TYPE_ASPEED_LPC, + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedLPCState, ASPEED_LPC_NR_REGS), + VMSTATE_UINT32(subdevice_irqs_pending, AspeedLPCState), + VMSTATE_END_OF_LIST(), + } +}; + +static Property aspeed_lpc_properties[] = { + DEFINE_PROP_UINT32("hicr7", AspeedLPCState, hicr7, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_lpc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_lpc_realize; + dc->reset = aspeed_lpc_reset; + dc->desc = "Aspeed LPC Controller", + dc->vmsd = &vmstate_aspeed_lpc; + device_class_set_props(dc, aspeed_lpc_properties); +} + +static const TypeInfo aspeed_lpc_info = { + .name = TYPE_ASPEED_LPC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedLPCState), + .class_init = aspeed_lpc_class_init, + .instance_init = aspeed_lpc_init, +}; + +static void aspeed_lpc_register_types(void) +{ + type_register_static(&aspeed_lpc_info); +} + +type_init(aspeed_lpc_register_types); diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c new file mode 100644 index 000000000..d06e179a6 --- /dev/null +++ b/hw/misc/aspeed_scu.c @@ -0,0 +1,740 @@ +/* + * ASPEED System Control Unit + * + * Andrew Jeffery + * + * Copyright 2016 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/misc/aspeed_scu.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/guest-random.h" +#include "qemu/module.h" +#include "trace.h" + +#define TO_REG(offset) ((offset) >> 2) + +#define PROT_KEY TO_REG(0x00) +#define SYS_RST_CTRL TO_REG(0x04) +#define CLK_SEL TO_REG(0x08) +#define CLK_STOP_CTRL TO_REG(0x0C) +#define FREQ_CNTR_CTRL TO_REG(0x10) +#define FREQ_CNTR_EVAL TO_REG(0x14) +#define IRQ_CTRL TO_REG(0x18) +#define D2PLL_PARAM TO_REG(0x1C) +#define MPLL_PARAM TO_REG(0x20) +#define HPLL_PARAM TO_REG(0x24) +#define FREQ_CNTR_RANGE TO_REG(0x28) +#define MISC_CTRL1 TO_REG(0x2C) +#define PCI_CTRL1 TO_REG(0x30) +#define PCI_CTRL2 TO_REG(0x34) +#define PCI_CTRL3 TO_REG(0x38) +#define SYS_RST_STATUS TO_REG(0x3C) +#define SOC_SCRATCH1 TO_REG(0x40) +#define SOC_SCRATCH2 TO_REG(0x44) +#define MAC_CLK_DELAY TO_REG(0x48) +#define MISC_CTRL2 TO_REG(0x4C) +#define VGA_SCRATCH1 TO_REG(0x50) +#define VGA_SCRATCH2 TO_REG(0x54) +#define VGA_SCRATCH3 TO_REG(0x58) +#define VGA_SCRATCH4 TO_REG(0x5C) +#define VGA_SCRATCH5 TO_REG(0x60) +#define VGA_SCRATCH6 TO_REG(0x64) +#define VGA_SCRATCH7 TO_REG(0x68) +#define VGA_SCRATCH8 TO_REG(0x6C) +#define HW_STRAP1 TO_REG(0x70) +#define RNG_CTRL TO_REG(0x74) +#define RNG_DATA TO_REG(0x78) +#define SILICON_REV TO_REG(0x7C) +#define PINMUX_CTRL1 TO_REG(0x80) +#define PINMUX_CTRL2 TO_REG(0x84) +#define PINMUX_CTRL3 TO_REG(0x88) +#define PINMUX_CTRL4 TO_REG(0x8C) +#define PINMUX_CTRL5 TO_REG(0x90) +#define PINMUX_CTRL6 TO_REG(0x94) +#define WDT_RST_CTRL TO_REG(0x9C) +#define PINMUX_CTRL7 TO_REG(0xA0) +#define PINMUX_CTRL8 TO_REG(0xA4) +#define PINMUX_CTRL9 TO_REG(0xA8) +#define WAKEUP_EN TO_REG(0xC0) +#define WAKEUP_CTRL TO_REG(0xC4) +#define HW_STRAP2 TO_REG(0xD0) +#define FREE_CNTR4 TO_REG(0xE0) +#define FREE_CNTR4_EXT TO_REG(0xE4) +#define CPU2_CTRL TO_REG(0x100) +#define CPU2_BASE_SEG1 TO_REG(0x104) +#define CPU2_BASE_SEG2 TO_REG(0x108) +#define CPU2_BASE_SEG3 TO_REG(0x10C) +#define CPU2_BASE_SEG4 TO_REG(0x110) +#define CPU2_BASE_SEG5 TO_REG(0x114) +#define CPU2_CACHE_CTRL TO_REG(0x118) +#define CHIP_ID0 TO_REG(0x150) +#define CHIP_ID1 TO_REG(0x154) +#define UART_HPLL_CLK TO_REG(0x160) +#define PCIE_CTRL TO_REG(0x180) +#define BMC_MMIO_CTRL TO_REG(0x184) +#define RELOC_DECODE_BASE1 TO_REG(0x188) +#define RELOC_DECODE_BASE2 TO_REG(0x18C) +#define MAILBOX_DECODE_BASE TO_REG(0x190) +#define SRAM_DECODE_BASE1 TO_REG(0x194) +#define SRAM_DECODE_BASE2 TO_REG(0x198) +#define BMC_REV TO_REG(0x19C) +#define BMC_DEV_ID TO_REG(0x1A4) + +#define AST2600_PROT_KEY TO_REG(0x00) +#define AST2600_SILICON_REV TO_REG(0x04) +#define AST2600_SILICON_REV2 TO_REG(0x14) +#define AST2600_SYS_RST_CTRL TO_REG(0x40) +#define AST2600_SYS_RST_CTRL_CLR TO_REG(0x44) +#define AST2600_SYS_RST_CTRL2 TO_REG(0x50) +#define AST2600_SYS_RST_CTRL2_CLR TO_REG(0x54) +#define AST2600_CLK_STOP_CTRL TO_REG(0x80) +#define AST2600_CLK_STOP_CTRL_CLR TO_REG(0x84) +#define AST2600_CLK_STOP_CTRL2 TO_REG(0x90) +#define AST2600_CLK_STOP_CTRL2_CLR TO_REG(0x94) +#define AST2600_DEBUG_CTRL TO_REG(0xC8) +#define AST2600_DEBUG_CTRL2 TO_REG(0xD8) +#define AST2600_SDRAM_HANDSHAKE TO_REG(0x100) +#define AST2600_HPLL_PARAM TO_REG(0x200) +#define AST2600_HPLL_EXT TO_REG(0x204) +#define AST2600_APLL_PARAM TO_REG(0x210) +#define AST2600_APLL_EXT TO_REG(0x214) +#define AST2600_MPLL_PARAM TO_REG(0x220) +#define AST2600_MPLL_EXT TO_REG(0x224) +#define AST2600_EPLL_PARAM TO_REG(0x240) +#define AST2600_EPLL_EXT TO_REG(0x244) +#define AST2600_DPLL_PARAM TO_REG(0x260) +#define AST2600_DPLL_EXT TO_REG(0x264) +#define AST2600_CLK_SEL TO_REG(0x300) +#define AST2600_CLK_SEL2 TO_REG(0x304) +#define AST2600_CLK_SEL3 TO_REG(0x308) +#define AST2600_CLK_SEL4 TO_REG(0x310) +#define AST2600_CLK_SEL5 TO_REG(0x314) +#define AST2600_UARTCLK TO_REG(0x338) +#define AST2600_HUARTCLK TO_REG(0x33C) +#define AST2600_HW_STRAP1 TO_REG(0x500) +#define AST2600_HW_STRAP1_CLR TO_REG(0x504) +#define AST2600_HW_STRAP1_PROT TO_REG(0x508) +#define AST2600_HW_STRAP2 TO_REG(0x510) +#define AST2600_HW_STRAP2_CLR TO_REG(0x514) +#define AST2600_HW_STRAP2_PROT TO_REG(0x518) +#define AST2600_RNG_CTRL TO_REG(0x524) +#define AST2600_RNG_DATA TO_REG(0x540) +#define AST2600_CHIP_ID0 TO_REG(0x5B0) +#define AST2600_CHIP_ID1 TO_REG(0x5B4) + +#define AST2600_CLK TO_REG(0x40) + +#define SCU_IO_REGION_SIZE 0x1000 + +static const uint32_t ast2400_a0_resets[ASPEED_SCU_NR_REGS] = { + [SYS_RST_CTRL] = 0xFFCFFEDCU, + [CLK_SEL] = 0xF3F40000U, + [CLK_STOP_CTRL] = 0x19FC3E8BU, + [D2PLL_PARAM] = 0x00026108U, + [MPLL_PARAM] = 0x00030291U, + [HPLL_PARAM] = 0x00000291U, + [MISC_CTRL1] = 0x00000010U, + [PCI_CTRL1] = 0x20001A03U, + [PCI_CTRL2] = 0x20001A03U, + [PCI_CTRL3] = 0x04000030U, + [SYS_RST_STATUS] = 0x00000001U, + [SOC_SCRATCH1] = 0x000000C0U, /* SoC completed DRAM init */ + [MISC_CTRL2] = 0x00000023U, + [RNG_CTRL] = 0x0000000EU, + [PINMUX_CTRL2] = 0x0000F000U, + [PINMUX_CTRL3] = 0x01000000U, + [PINMUX_CTRL4] = 0x000000FFU, + [PINMUX_CTRL5] = 0x0000A000U, + [WDT_RST_CTRL] = 0x003FFFF3U, + [PINMUX_CTRL8] = 0xFFFF0000U, + [PINMUX_CTRL9] = 0x000FFFFFU, + [FREE_CNTR4] = 0x000000FFU, + [FREE_CNTR4_EXT] = 0x000000FFU, + [CPU2_BASE_SEG1] = 0x80000000U, + [CPU2_BASE_SEG4] = 0x1E600000U, + [CPU2_BASE_SEG5] = 0xC0000000U, + [UART_HPLL_CLK] = 0x00001903U, + [PCIE_CTRL] = 0x0000007BU, + [BMC_DEV_ID] = 0x00002402U +}; + +/* SCU70 bit 23: 0 24Mhz. bit 11:9: 0b001 AXI:ABH ratio 2:1 */ +/* AST2500 revision A1 */ + +static const uint32_t ast2500_a1_resets[ASPEED_SCU_NR_REGS] = { + [SYS_RST_CTRL] = 0xFFCFFEDCU, + [CLK_SEL] = 0xF3F40000U, + [CLK_STOP_CTRL] = 0x19FC3E8BU, + [D2PLL_PARAM] = 0x00026108U, + [MPLL_PARAM] = 0x00030291U, + [HPLL_PARAM] = 0x93000400U, + [MISC_CTRL1] = 0x00000010U, + [PCI_CTRL1] = 0x20001A03U, + [PCI_CTRL2] = 0x20001A03U, + [PCI_CTRL3] = 0x04000030U, + [SYS_RST_STATUS] = 0x00000001U, + [SOC_SCRATCH1] = 0x000000C0U, /* SoC completed DRAM init */ + [MISC_CTRL2] = 0x00000023U, + [RNG_CTRL] = 0x0000000EU, + [PINMUX_CTRL2] = 0x0000F000U, + [PINMUX_CTRL3] = 0x03000000U, + [PINMUX_CTRL4] = 0x00000000U, + [PINMUX_CTRL5] = 0x0000A000U, + [WDT_RST_CTRL] = 0x023FFFF3U, + [PINMUX_CTRL8] = 0xFFFF0000U, + [PINMUX_CTRL9] = 0x000FFFFFU, + [FREE_CNTR4] = 0x000000FFU, + [FREE_CNTR4_EXT] = 0x000000FFU, + [CPU2_BASE_SEG1] = 0x80000000U, + [CPU2_BASE_SEG4] = 0x1E600000U, + [CPU2_BASE_SEG5] = 0xC0000000U, + [CHIP_ID0] = 0x1234ABCDU, + [CHIP_ID1] = 0x88884444U, + [UART_HPLL_CLK] = 0x00001903U, + [PCIE_CTRL] = 0x0000007BU, + [BMC_DEV_ID] = 0x00002402U +}; + +static uint32_t aspeed_scu_get_random(void) +{ + uint32_t num; + qemu_guest_getrandom_nofail(&num, sizeof(num)); + return num; +} + +uint32_t aspeed_scu_get_apb_freq(AspeedSCUState *s) +{ + AspeedSCUClass *asc = ASPEED_SCU_GET_CLASS(s); + uint32_t hpll = asc->calc_hpll(s, s->regs[HPLL_PARAM]); + + return hpll / (SCU_CLK_GET_PCLK_DIV(s->regs[CLK_SEL]) + 1) + / asc->apb_divider; +} + +static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size) +{ + AspeedSCUState *s = ASPEED_SCU(opaque); + int reg = TO_REG(offset); + + if (reg >= ASPEED_SCU_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return 0; + } + + switch (reg) { + case RNG_DATA: + /* On hardware, RNG_DATA works regardless of + * the state of the enable bit in RNG_CTRL + */ + s->regs[RNG_DATA] = aspeed_scu_get_random(); + break; + case WAKEUP_EN: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Read of write-only offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + } + + return s->regs[reg]; +} + +static void aspeed_ast2400_scu_write(void *opaque, hwaddr offset, + uint64_t data, unsigned size) +{ + AspeedSCUState *s = ASPEED_SCU(opaque); + int reg = TO_REG(offset); + + if (reg >= ASPEED_SCU_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + if (reg > PROT_KEY && reg < CPU2_BASE_SEG1 && + !s->regs[PROT_KEY]) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: SCU is locked!\n", __func__); + } + + trace_aspeed_scu_write(offset, size, data); + + switch (reg) { + case PROT_KEY: + s->regs[reg] = (data == ASPEED_SCU_PROT_KEY) ? 1 : 0; + return; + case SILICON_REV: + case FREQ_CNTR_EVAL: + case VGA_SCRATCH1 ... VGA_SCRATCH8: + case RNG_DATA: + case FREE_CNTR4: + case FREE_CNTR4_EXT: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + s->regs[reg] = data; +} + +static void aspeed_ast2500_scu_write(void *opaque, hwaddr offset, + uint64_t data, unsigned size) +{ + AspeedSCUState *s = ASPEED_SCU(opaque); + int reg = TO_REG(offset); + + if (reg >= ASPEED_SCU_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + if (reg > PROT_KEY && reg < CPU2_BASE_SEG1 && + !s->regs[PROT_KEY]) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: SCU is locked!\n", __func__); + return; + } + + trace_aspeed_scu_write(offset, size, data); + + switch (reg) { + case PROT_KEY: + s->regs[reg] = (data == ASPEED_SCU_PROT_KEY) ? 1 : 0; + return; + case HW_STRAP1: + s->regs[HW_STRAP1] |= data; + return; + case SILICON_REV: + s->regs[HW_STRAP1] &= ~data; + return; + case FREQ_CNTR_EVAL: + case VGA_SCRATCH1 ... VGA_SCRATCH8: + case RNG_DATA: + case FREE_CNTR4: + case FREE_CNTR4_EXT: + case CHIP_ID0: + case CHIP_ID1: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + s->regs[reg] = data; +} + +static const MemoryRegionOps aspeed_ast2400_scu_ops = { + .read = aspeed_scu_read, + .write = aspeed_ast2400_scu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps aspeed_ast2500_scu_ops = { + .read = aspeed_scu_read, + .write = aspeed_ast2500_scu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static uint32_t aspeed_scu_get_clkin(AspeedSCUState *s) +{ + if (s->hw_strap1 & SCU_HW_STRAP_CLK_25M_IN) { + return 25000000; + } else if (s->hw_strap1 & SCU_HW_STRAP_CLK_48M_IN) { + return 48000000; + } else { + return 24000000; + } +} + +/* + * Strapped frequencies for the AST2400 in MHz. They depend on the + * clkin frequency. + */ +static const uint32_t hpll_ast2400_freqs[][4] = { + { 384, 360, 336, 408 }, /* 24MHz or 48MHz */ + { 400, 375, 350, 425 }, /* 25MHz */ +}; + +static uint32_t aspeed_2400_scu_calc_hpll(AspeedSCUState *s, uint32_t hpll_reg) +{ + uint8_t freq_select; + bool clk_25m_in; + uint32_t clkin = aspeed_scu_get_clkin(s); + + if (hpll_reg & SCU_AST2400_H_PLL_OFF) { + return 0; + } + + if (hpll_reg & SCU_AST2400_H_PLL_PROGRAMMED) { + uint32_t multiplier = 1; + + if (!(hpll_reg & SCU_AST2400_H_PLL_BYPASS_EN)) { + uint32_t n = (hpll_reg >> 5) & 0x3f; + uint32_t od = (hpll_reg >> 4) & 0x1; + uint32_t d = hpll_reg & 0xf; + + multiplier = (2 - od) * ((n + 2) / (d + 1)); + } + + return clkin * multiplier; + } + + /* HW strapping */ + clk_25m_in = !!(s->hw_strap1 & SCU_HW_STRAP_CLK_25M_IN); + freq_select = SCU_AST2400_HW_STRAP_GET_H_PLL_CLK(s->hw_strap1); + + return hpll_ast2400_freqs[clk_25m_in][freq_select] * 1000000; +} + +static uint32_t aspeed_2500_scu_calc_hpll(AspeedSCUState *s, uint32_t hpll_reg) +{ + uint32_t multiplier = 1; + uint32_t clkin = aspeed_scu_get_clkin(s); + + if (hpll_reg & SCU_H_PLL_OFF) { + return 0; + } + + if (!(hpll_reg & SCU_H_PLL_BYPASS_EN)) { + uint32_t p = (hpll_reg >> 13) & 0x3f; + uint32_t m = (hpll_reg >> 5) & 0xff; + uint32_t n = hpll_reg & 0x1f; + + multiplier = ((m + 1) / (n + 1)) / (p + 1); + } + + return clkin * multiplier; +} + +static void aspeed_scu_reset(DeviceState *dev) +{ + AspeedSCUState *s = ASPEED_SCU(dev); + AspeedSCUClass *asc = ASPEED_SCU_GET_CLASS(dev); + + memcpy(s->regs, asc->resets, asc->nr_regs * 4); + s->regs[SILICON_REV] = s->silicon_rev; + s->regs[HW_STRAP1] = s->hw_strap1; + s->regs[HW_STRAP2] = s->hw_strap2; + s->regs[PROT_KEY] = s->hw_prot_key; +} + +static uint32_t aspeed_silicon_revs[] = { + AST2400_A0_SILICON_REV, + AST2400_A1_SILICON_REV, + AST2500_A0_SILICON_REV, + AST2500_A1_SILICON_REV, + AST2600_A0_SILICON_REV, + AST2600_A1_SILICON_REV, + AST2600_A2_SILICON_REV, + AST2600_A3_SILICON_REV, +}; + +bool is_supported_silicon_rev(uint32_t silicon_rev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(aspeed_silicon_revs); i++) { + if (silicon_rev == aspeed_silicon_revs[i]) { + return true; + } + } + + return false; +} + +static void aspeed_scu_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedSCUState *s = ASPEED_SCU(dev); + AspeedSCUClass *asc = ASPEED_SCU_GET_CLASS(dev); + + if (!is_supported_silicon_rev(s->silicon_rev)) { + error_setg(errp, "Unknown silicon revision: 0x%" PRIx32, + s->silicon_rev); + return; + } + + memory_region_init_io(&s->iomem, OBJECT(s), asc->ops, s, + TYPE_ASPEED_SCU, SCU_IO_REGION_SIZE); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription vmstate_aspeed_scu = { + .name = "aspeed.scu", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedSCUState, ASPEED_AST2600_SCU_NR_REGS), + VMSTATE_END_OF_LIST() + } +}; + +static Property aspeed_scu_properties[] = { + DEFINE_PROP_UINT32("silicon-rev", AspeedSCUState, silicon_rev, 0), + DEFINE_PROP_UINT32("hw-strap1", AspeedSCUState, hw_strap1, 0), + DEFINE_PROP_UINT32("hw-strap2", AspeedSCUState, hw_strap2, 0), + DEFINE_PROP_UINT32("hw-prot-key", AspeedSCUState, hw_prot_key, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_scu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = aspeed_scu_realize; + dc->reset = aspeed_scu_reset; + dc->desc = "ASPEED System Control Unit"; + dc->vmsd = &vmstate_aspeed_scu; + device_class_set_props(dc, aspeed_scu_properties); +} + +static const TypeInfo aspeed_scu_info = { + .name = TYPE_ASPEED_SCU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedSCUState), + .class_init = aspeed_scu_class_init, + .class_size = sizeof(AspeedSCUClass), + .abstract = true, +}; + +static void aspeed_2400_scu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass); + + dc->desc = "ASPEED 2400 System Control Unit"; + asc->resets = ast2400_a0_resets; + asc->calc_hpll = aspeed_2400_scu_calc_hpll; + asc->apb_divider = 2; + asc->nr_regs = ASPEED_SCU_NR_REGS; + asc->ops = &aspeed_ast2400_scu_ops; +} + +static const TypeInfo aspeed_2400_scu_info = { + .name = TYPE_ASPEED_2400_SCU, + .parent = TYPE_ASPEED_SCU, + .instance_size = sizeof(AspeedSCUState), + .class_init = aspeed_2400_scu_class_init, +}; + +static void aspeed_2500_scu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass); + + dc->desc = "ASPEED 2500 System Control Unit"; + asc->resets = ast2500_a1_resets; + asc->calc_hpll = aspeed_2500_scu_calc_hpll; + asc->apb_divider = 4; + asc->nr_regs = ASPEED_SCU_NR_REGS; + asc->ops = &aspeed_ast2500_scu_ops; +} + +static const TypeInfo aspeed_2500_scu_info = { + .name = TYPE_ASPEED_2500_SCU, + .parent = TYPE_ASPEED_SCU, + .instance_size = sizeof(AspeedSCUState), + .class_init = aspeed_2500_scu_class_init, +}; + +static uint64_t aspeed_ast2600_scu_read(void *opaque, hwaddr offset, + unsigned size) +{ + AspeedSCUState *s = ASPEED_SCU(opaque); + int reg = TO_REG(offset); + + if (reg >= ASPEED_AST2600_SCU_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return 0; + } + + switch (reg) { + case AST2600_HPLL_EXT: + case AST2600_EPLL_EXT: + case AST2600_MPLL_EXT: + /* PLLs are always "locked" */ + return s->regs[reg] | BIT(31); + case AST2600_RNG_DATA: + /* + * On hardware, RNG_DATA works regardless of the state of the + * enable bit in RNG_CTRL + * + * TODO: Check this is true for ast2600 + */ + s->regs[AST2600_RNG_DATA] = aspeed_scu_get_random(); + break; + } + + return s->regs[reg]; +} + +static void aspeed_ast2600_scu_write(void *opaque, hwaddr offset, + uint64_t data64, unsigned size) +{ + AspeedSCUState *s = ASPEED_SCU(opaque); + int reg = TO_REG(offset); + /* Truncate here so bitwise operations below behave as expected */ + uint32_t data = data64; + + if (reg >= ASPEED_AST2600_SCU_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + if (reg > PROT_KEY && !s->regs[PROT_KEY]) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: SCU is locked!\n", __func__); + } + + trace_aspeed_scu_write(offset, size, data); + + switch (reg) { + case AST2600_PROT_KEY: + s->regs[reg] = (data == ASPEED_SCU_PROT_KEY) ? 1 : 0; + return; + case AST2600_HW_STRAP1: + case AST2600_HW_STRAP2: + if (s->regs[reg + 2]) { + return; + } + /* fall through */ + case AST2600_SYS_RST_CTRL: + case AST2600_SYS_RST_CTRL2: + case AST2600_CLK_STOP_CTRL: + case AST2600_CLK_STOP_CTRL2: + /* W1S (Write 1 to set) registers */ + s->regs[reg] |= data; + return; + case AST2600_SYS_RST_CTRL_CLR: + case AST2600_SYS_RST_CTRL2_CLR: + case AST2600_CLK_STOP_CTRL_CLR: + case AST2600_CLK_STOP_CTRL2_CLR: + case AST2600_HW_STRAP1_CLR: + case AST2600_HW_STRAP2_CLR: + /* + * W1C (Write 1 to clear) registers are offset by one address from + * the data register + */ + s->regs[reg - 1] &= ~data; + return; + + case AST2600_RNG_DATA: + case AST2600_SILICON_REV: + case AST2600_SILICON_REV2: + case AST2600_CHIP_ID0: + case AST2600_CHIP_ID1: + /* Add read only registers here */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + s->regs[reg] = data; +} + +static const MemoryRegionOps aspeed_ast2600_scu_ops = { + .read = aspeed_ast2600_scu_read, + .write = aspeed_ast2600_scu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static const uint32_t ast2600_a3_resets[ASPEED_AST2600_SCU_NR_REGS] = { + [AST2600_SYS_RST_CTRL] = 0xF7C3FED8, + [AST2600_SYS_RST_CTRL2] = 0x0DFFFFFC, + [AST2600_CLK_STOP_CTRL] = 0xFFFF7F8A, + [AST2600_CLK_STOP_CTRL2] = 0xFFF0FFF0, + [AST2600_DEBUG_CTRL] = 0x00000FFF, + [AST2600_DEBUG_CTRL2] = 0x000000FF, + [AST2600_SDRAM_HANDSHAKE] = 0x00000000, + [AST2600_HPLL_PARAM] = 0x1000408F, + [AST2600_APLL_PARAM] = 0x1000405F, + [AST2600_MPLL_PARAM] = 0x1008405F, + [AST2600_EPLL_PARAM] = 0x1004077F, + [AST2600_DPLL_PARAM] = 0x1078405F, + [AST2600_CLK_SEL] = 0xF3940000, + [AST2600_CLK_SEL2] = 0x00700000, + [AST2600_CLK_SEL3] = 0x00000000, + [AST2600_CLK_SEL4] = 0xF3F40000, + [AST2600_CLK_SEL5] = 0x30000000, + [AST2600_UARTCLK] = 0x00014506, + [AST2600_HUARTCLK] = 0x000145C0, + [AST2600_CHIP_ID0] = 0x1234ABCD, + [AST2600_CHIP_ID1] = 0x88884444, +}; + +static void aspeed_ast2600_scu_reset(DeviceState *dev) +{ + AspeedSCUState *s = ASPEED_SCU(dev); + AspeedSCUClass *asc = ASPEED_SCU_GET_CLASS(dev); + + memcpy(s->regs, asc->resets, asc->nr_regs * 4); + + /* + * A0 reports A0 in _REV, but subsequent revisions report A1 regardless + * of actual revision. QEMU and Linux only support A1 onwards so this is + * sufficient. + */ + s->regs[AST2600_SILICON_REV] = AST2600_A3_SILICON_REV; + s->regs[AST2600_SILICON_REV2] = s->silicon_rev; + s->regs[AST2600_HW_STRAP1] = s->hw_strap1; + s->regs[AST2600_HW_STRAP2] = s->hw_strap2; + s->regs[PROT_KEY] = s->hw_prot_key; +} + +static void aspeed_2600_scu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSCUClass *asc = ASPEED_SCU_CLASS(klass); + + dc->desc = "ASPEED 2600 System Control Unit"; + dc->reset = aspeed_ast2600_scu_reset; + asc->resets = ast2600_a3_resets; + asc->calc_hpll = aspeed_2500_scu_calc_hpll; /* No change since AST2500 */ + asc->apb_divider = 4; + asc->nr_regs = ASPEED_AST2600_SCU_NR_REGS; + asc->ops = &aspeed_ast2600_scu_ops; +} + +static const TypeInfo aspeed_2600_scu_info = { + .name = TYPE_ASPEED_2600_SCU, + .parent = TYPE_ASPEED_SCU, + .instance_size = sizeof(AspeedSCUState), + .class_init = aspeed_2600_scu_class_init, +}; + +static void aspeed_scu_register_types(void) +{ + type_register_static(&aspeed_scu_info); + type_register_static(&aspeed_2400_scu_info); + type_register_static(&aspeed_2500_scu_info); + type_register_static(&aspeed_2600_scu_info); +} + +type_init(aspeed_scu_register_types); diff --git a/hw/misc/aspeed_sdmc.c b/hw/misc/aspeed_sdmc.c new file mode 100644 index 000000000..08f856cbd --- /dev/null +++ b/hw/misc/aspeed_sdmc.c @@ -0,0 +1,522 @@ +/* + * ASPEED SDRAM Memory Controller + * + * Copyright (C) 2016 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "hw/misc/aspeed_sdmc.h" +#include "hw/misc/aspeed_scu.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "trace.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qapi/visitor.h" + +/* Protection Key Register */ +#define R_PROT (0x00 / 4) +#define PROT_UNLOCKED 0x01 +#define PROT_HARDLOCKED 0x10 /* AST2600 */ +#define PROT_SOFTLOCKED 0x00 + +#define PROT_KEY_UNLOCK 0xFC600309 +#define PROT_KEY_HARDLOCK 0xDEADDEAD /* AST2600 */ + +/* Configuration Register */ +#define R_CONF (0x04 / 4) + +/* Interrupt control/status */ +#define R_ISR (0x50 / 4) + +/* Control/Status Register #1 (ast2500) */ +#define R_STATUS1 (0x60 / 4) +#define PHY_BUSY_STATE BIT(0) +#define PHY_PLL_LOCK_STATUS BIT(4) + +/* Reserved */ +#define R_MCR6C (0x6c / 4) + +#define R_ECC_TEST_CTRL (0x70 / 4) +#define ECC_TEST_FINISHED BIT(12) +#define ECC_TEST_FAIL BIT(13) + +#define R_TEST_START_LEN (0x74 / 4) +#define R_TEST_FAIL_DQ (0x78 / 4) +#define R_TEST_INIT_VAL (0x7c / 4) +#define R_DRAM_SW (0x88 / 4) +#define R_DRAM_TIME (0x8c / 4) +#define R_ECC_ERR_INJECT (0xb4 / 4) + +/* + * Configuration register Ox4 (for Aspeed AST2400 SOC) + * + * These are for the record and future use. ASPEED_SDMC_DRAM_SIZE is + * what we care about right now as it is checked by U-Boot to + * determine the RAM size. + */ + +#define ASPEED_SDMC_RESERVED 0xFFFFF800 /* 31:11 reserved */ +#define ASPEED_SDMC_AST2300_COMPAT (1 << 10) +#define ASPEED_SDMC_SCRAMBLE_PATTERN (1 << 9) +#define ASPEED_SDMC_DATA_SCRAMBLE (1 << 8) +#define ASPEED_SDMC_ECC_ENABLE (1 << 7) +#define ASPEED_SDMC_VGA_COMPAT (1 << 6) /* readonly */ +#define ASPEED_SDMC_DRAM_BANK (1 << 5) +#define ASPEED_SDMC_DRAM_BURST (1 << 4) +#define ASPEED_SDMC_VGA_APERTURE(x) ((x & 0x3) << 2) /* readonly */ +#define ASPEED_SDMC_VGA_8MB 0x0 +#define ASPEED_SDMC_VGA_16MB 0x1 +#define ASPEED_SDMC_VGA_32MB 0x2 +#define ASPEED_SDMC_VGA_64MB 0x3 +#define ASPEED_SDMC_DRAM_SIZE(x) (x & 0x3) +#define ASPEED_SDMC_DRAM_64MB 0x0 +#define ASPEED_SDMC_DRAM_128MB 0x1 +#define ASPEED_SDMC_DRAM_256MB 0x2 +#define ASPEED_SDMC_DRAM_512MB 0x3 + +#define ASPEED_SDMC_READONLY_MASK \ + (ASPEED_SDMC_RESERVED | ASPEED_SDMC_VGA_COMPAT | \ + ASPEED_SDMC_VGA_APERTURE(ASPEED_SDMC_VGA_64MB)) +/* + * Configuration register Ox4 (for Aspeed AST2500 SOC and higher) + * + * Incompatibilities are annotated in the list. ASPEED_SDMC_HW_VERSION + * should be set to 1 for the AST2500 SOC. + */ +#define ASPEED_SDMC_HW_VERSION(x) ((x & 0xf) << 28) /* readonly */ +#define ASPEED_SDMC_SW_VERSION ((x & 0xff) << 20) +#define ASPEED_SDMC_CACHE_INITIAL_DONE (1 << 19) /* readonly */ +#define ASPEED_SDMC_AST2500_RESERVED 0x7C000 /* 18:14 reserved */ +#define ASPEED_SDMC_CACHE_DDR4_CONF (1 << 13) +#define ASPEED_SDMC_CACHE_INITIAL (1 << 12) +#define ASPEED_SDMC_CACHE_RANGE_CTRL (1 << 11) +#define ASPEED_SDMC_CACHE_ENABLE (1 << 10) /* differs from AST2400 */ +#define ASPEED_SDMC_DRAM_TYPE (1 << 4) /* differs from AST2400 */ + +/* DRAM size definitions differs */ +#define ASPEED_SDMC_AST2500_128MB 0x0 +#define ASPEED_SDMC_AST2500_256MB 0x1 +#define ASPEED_SDMC_AST2500_512MB 0x2 +#define ASPEED_SDMC_AST2500_1024MB 0x3 + +#define ASPEED_SDMC_AST2600_256MB 0x0 +#define ASPEED_SDMC_AST2600_512MB 0x1 +#define ASPEED_SDMC_AST2600_1024MB 0x2 +#define ASPEED_SDMC_AST2600_2048MB 0x3 + +#define ASPEED_SDMC_AST2500_READONLY_MASK \ + (ASPEED_SDMC_HW_VERSION(0xf) | ASPEED_SDMC_CACHE_INITIAL_DONE | \ + ASPEED_SDMC_AST2500_RESERVED | ASPEED_SDMC_VGA_COMPAT | \ + ASPEED_SDMC_VGA_APERTURE(ASPEED_SDMC_VGA_64MB)) + +static uint64_t aspeed_sdmc_read(void *opaque, hwaddr addr, unsigned size) +{ + AspeedSDMCState *s = ASPEED_SDMC(opaque); + + addr >>= 2; + + if (addr >= ARRAY_SIZE(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, addr * 4); + return 0; + } + + return s->regs[addr]; +} + +static void aspeed_sdmc_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + AspeedSDMCState *s = ASPEED_SDMC(opaque); + AspeedSDMCClass *asc = ASPEED_SDMC_GET_CLASS(s); + + addr >>= 2; + + if (addr >= ARRAY_SIZE(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, addr); + return; + } + + asc->write(s, addr, data); +} + +static const MemoryRegionOps aspeed_sdmc_ops = { + .read = aspeed_sdmc_read, + .write = aspeed_sdmc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void aspeed_sdmc_reset(DeviceState *dev) +{ + AspeedSDMCState *s = ASPEED_SDMC(dev); + AspeedSDMCClass *asc = ASPEED_SDMC_GET_CLASS(s); + + memset(s->regs, 0, sizeof(s->regs)); + + /* Set ram size bit and defaults values */ + s->regs[R_CONF] = asc->compute_conf(s, 0); + + /* + * PHY status: + * - set phy status ok (set bit 1) + * - initial PVT calibration ok (clear bit 3) + * - runtime calibration ok (clear bit 5) + */ + s->regs[0x100] = BIT(1); + + /* PHY eye window: set all as passing */ + s->regs[0x100 | (0x68 / 4)] = 0xff; + s->regs[0x100 | (0x7c / 4)] = 0xff; + s->regs[0x100 | (0x50 / 4)] = 0xfffffff; +} + +static void aspeed_sdmc_get_ram_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + AspeedSDMCState *s = ASPEED_SDMC(obj); + int64_t value = s->ram_size; + + visit_type_int(v, name, &value, errp); +} + +static void aspeed_sdmc_set_ram_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + int i; + char *sz; + int64_t value; + AspeedSDMCState *s = ASPEED_SDMC(obj); + AspeedSDMCClass *asc = ASPEED_SDMC_GET_CLASS(s); + + if (!visit_type_int(v, name, &value, errp)) { + return; + } + + for (i = 0; asc->valid_ram_sizes[i]; i++) { + if (value == asc->valid_ram_sizes[i]) { + s->ram_size = value; + return; + } + } + + sz = size_to_str(value); + error_setg(errp, "Invalid RAM size %s", sz); + g_free(sz); +} + +static void aspeed_sdmc_initfn(Object *obj) +{ + object_property_add(obj, "ram-size", "int", + aspeed_sdmc_get_ram_size, aspeed_sdmc_set_ram_size, + NULL, NULL); +} + +static void aspeed_sdmc_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedSDMCState *s = ASPEED_SDMC(dev); + AspeedSDMCClass *asc = ASPEED_SDMC_GET_CLASS(s); + + assert(asc->max_ram_size < 4 * GiB); /* 32-bit address bus */ + s->max_ram_size = asc->max_ram_size; + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_sdmc_ops, s, + TYPE_ASPEED_SDMC, 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription vmstate_aspeed_sdmc = { + .name = "aspeed.sdmc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedSDMCState, ASPEED_SDMC_NR_REGS), + VMSTATE_END_OF_LIST() + } +}; + +static Property aspeed_sdmc_properties[] = { + DEFINE_PROP_UINT64("max-ram-size", AspeedSDMCState, max_ram_size, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_sdmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = aspeed_sdmc_realize; + dc->reset = aspeed_sdmc_reset; + dc->desc = "ASPEED SDRAM Memory Controller"; + dc->vmsd = &vmstate_aspeed_sdmc; + device_class_set_props(dc, aspeed_sdmc_properties); +} + +static const TypeInfo aspeed_sdmc_info = { + .name = TYPE_ASPEED_SDMC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedSDMCState), + .instance_init = aspeed_sdmc_initfn, + .class_init = aspeed_sdmc_class_init, + .class_size = sizeof(AspeedSDMCClass), + .abstract = true, +}; + +static int aspeed_sdmc_get_ram_bits(AspeedSDMCState *s) +{ + AspeedSDMCClass *asc = ASPEED_SDMC_GET_CLASS(s); + int i; + + /* + * The bitfield value encoding the RAM size is the index of the + * possible RAM size array + */ + for (i = 0; asc->valid_ram_sizes[i]; i++) { + if (s->ram_size == asc->valid_ram_sizes[i]) { + return i; + } + } + + /* + * Invalid RAM sizes should have been excluded when setting the + * SoC RAM size. + */ + g_assert_not_reached(); +} + +static uint32_t aspeed_2400_sdmc_compute_conf(AspeedSDMCState *s, uint32_t data) +{ + uint32_t fixed_conf = ASPEED_SDMC_VGA_COMPAT | + ASPEED_SDMC_DRAM_SIZE(aspeed_sdmc_get_ram_bits(s)); + + /* Make sure readonly bits are kept */ + data &= ~ASPEED_SDMC_READONLY_MASK; + + return data | fixed_conf; +} + +static void aspeed_2400_sdmc_write(AspeedSDMCState *s, uint32_t reg, + uint32_t data) +{ + if (reg == R_PROT) { + s->regs[reg] = (data == PROT_KEY_UNLOCK) ? PROT_UNLOCKED : PROT_SOFTLOCKED; + return; + } + + if (!s->regs[R_PROT]) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: SDMC is locked!\n", __func__); + return; + } + + switch (reg) { + case R_CONF: + data = aspeed_2400_sdmc_compute_conf(s, data); + break; + default: + break; + } + + s->regs[reg] = data; +} + +static const uint64_t +aspeed_2400_ram_sizes[] = { 64 * MiB, 128 * MiB, 256 * MiB, 512 * MiB, 0}; + +static void aspeed_2400_sdmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass); + + dc->desc = "ASPEED 2400 SDRAM Memory Controller"; + asc->max_ram_size = 512 * MiB; + asc->compute_conf = aspeed_2400_sdmc_compute_conf; + asc->write = aspeed_2400_sdmc_write; + asc->valid_ram_sizes = aspeed_2400_ram_sizes; +} + +static const TypeInfo aspeed_2400_sdmc_info = { + .name = TYPE_ASPEED_2400_SDMC, + .parent = TYPE_ASPEED_SDMC, + .class_init = aspeed_2400_sdmc_class_init, +}; + +static uint32_t aspeed_2500_sdmc_compute_conf(AspeedSDMCState *s, uint32_t data) +{ + uint32_t fixed_conf = ASPEED_SDMC_HW_VERSION(1) | + ASPEED_SDMC_VGA_APERTURE(ASPEED_SDMC_VGA_64MB) | + ASPEED_SDMC_CACHE_INITIAL_DONE | + ASPEED_SDMC_DRAM_SIZE(aspeed_sdmc_get_ram_bits(s)); + + /* Make sure readonly bits are kept */ + data &= ~ASPEED_SDMC_AST2500_READONLY_MASK; + + return data | fixed_conf; +} + +static void aspeed_2500_sdmc_write(AspeedSDMCState *s, uint32_t reg, + uint32_t data) +{ + if (reg == R_PROT) { + s->regs[reg] = (data == PROT_KEY_UNLOCK) ? PROT_UNLOCKED : PROT_SOFTLOCKED; + return; + } + + if (!s->regs[R_PROT]) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: SDMC is locked!\n", __func__); + return; + } + + switch (reg) { + case R_CONF: + data = aspeed_2500_sdmc_compute_conf(s, data); + break; + case R_STATUS1: + /* Will never return 'busy' */ + data &= ~PHY_BUSY_STATE; + break; + case R_ECC_TEST_CTRL: + /* Always done, always happy */ + data |= ECC_TEST_FINISHED; + data &= ~ECC_TEST_FAIL; + break; + default: + break; + } + + s->regs[reg] = data; +} + +static const uint64_t +aspeed_2500_ram_sizes[] = { 128 * MiB, 256 * MiB, 512 * MiB, 1024 * MiB, 0}; + +static void aspeed_2500_sdmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass); + + dc->desc = "ASPEED 2500 SDRAM Memory Controller"; + asc->max_ram_size = 1 * GiB; + asc->compute_conf = aspeed_2500_sdmc_compute_conf; + asc->write = aspeed_2500_sdmc_write; + asc->valid_ram_sizes = aspeed_2500_ram_sizes; +} + +static const TypeInfo aspeed_2500_sdmc_info = { + .name = TYPE_ASPEED_2500_SDMC, + .parent = TYPE_ASPEED_SDMC, + .class_init = aspeed_2500_sdmc_class_init, +}; + +static uint32_t aspeed_2600_sdmc_compute_conf(AspeedSDMCState *s, uint32_t data) +{ + uint32_t fixed_conf = ASPEED_SDMC_HW_VERSION(3) | + ASPEED_SDMC_VGA_APERTURE(ASPEED_SDMC_VGA_64MB) | + ASPEED_SDMC_DRAM_SIZE(aspeed_sdmc_get_ram_bits(s)); + + /* Make sure readonly bits are kept (use ast2500 mask) */ + data &= ~ASPEED_SDMC_AST2500_READONLY_MASK; + + return data | fixed_conf; +} + +static void aspeed_2600_sdmc_write(AspeedSDMCState *s, uint32_t reg, + uint32_t data) +{ + /* Unprotected registers */ + switch (reg) { + case R_ISR: + case R_MCR6C: + case R_TEST_START_LEN: + case R_TEST_FAIL_DQ: + case R_TEST_INIT_VAL: + case R_DRAM_SW: + case R_DRAM_TIME: + case R_ECC_ERR_INJECT: + s->regs[reg] = data; + return; + } + + if (s->regs[R_PROT] == PROT_HARDLOCKED) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: SDMC is locked until system reset!\n", + __func__); + return; + } + + if (reg != R_PROT && s->regs[R_PROT] == PROT_SOFTLOCKED) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: SDMC is locked! (write to MCR%02x blocked)\n", + __func__, reg * 4); + return; + } + + switch (reg) { + case R_PROT: + if (data == PROT_KEY_UNLOCK) { + data = PROT_UNLOCKED; + } else if (data == PROT_KEY_HARDLOCK) { + data = PROT_HARDLOCKED; + } else { + data = PROT_SOFTLOCKED; + } + break; + case R_CONF: + data = aspeed_2600_sdmc_compute_conf(s, data); + break; + case R_STATUS1: + /* Will never return 'busy'. 'lock status' is always set */ + data &= ~PHY_BUSY_STATE; + data |= PHY_PLL_LOCK_STATUS; + break; + case R_ECC_TEST_CTRL: + /* Always done, always happy */ + data |= ECC_TEST_FINISHED; + data &= ~ECC_TEST_FAIL; + break; + default: + break; + } + + s->regs[reg] = data; +} + +static const uint64_t +aspeed_2600_ram_sizes[] = { 256 * MiB, 512 * MiB, 1024 * MiB, 2048 * MiB, 0}; + +static void aspeed_2600_sdmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSDMCClass *asc = ASPEED_SDMC_CLASS(klass); + + dc->desc = "ASPEED 2600 SDRAM Memory Controller"; + asc->max_ram_size = 2 * GiB; + asc->compute_conf = aspeed_2600_sdmc_compute_conf; + asc->write = aspeed_2600_sdmc_write; + asc->valid_ram_sizes = aspeed_2600_ram_sizes; +} + +static const TypeInfo aspeed_2600_sdmc_info = { + .name = TYPE_ASPEED_2600_SDMC, + .parent = TYPE_ASPEED_SDMC, + .class_init = aspeed_2600_sdmc_class_init, +}; + +static void aspeed_sdmc_register_types(void) +{ + type_register_static(&aspeed_sdmc_info); + type_register_static(&aspeed_2400_sdmc_info); + type_register_static(&aspeed_2500_sdmc_info); + type_register_static(&aspeed_2600_sdmc_info); +} + +type_init(aspeed_sdmc_register_types); diff --git a/hw/misc/aspeed_xdma.c b/hw/misc/aspeed_xdma.c new file mode 100644 index 000000000..1c21577c9 --- /dev/null +++ b/hw/misc/aspeed_xdma.c @@ -0,0 +1,245 @@ +/* + * ASPEED XDMA Controller + * Eddie James + * + * Copyright (C) 2019 IBM Corp + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "hw/irq.h" +#include "hw/misc/aspeed_xdma.h" +#include "migration/vmstate.h" +#include "qapi/error.h" + +#include "trace.h" + +#define XDMA_BMC_CMDQ_ADDR 0x10 +#define XDMA_BMC_CMDQ_ENDP 0x14 +#define XDMA_BMC_CMDQ_WRP 0x18 +#define XDMA_BMC_CMDQ_W_MASK 0x0003FFFF +#define XDMA_BMC_CMDQ_RDP 0x1C +#define XDMA_BMC_CMDQ_RDP_MAGIC 0xEE882266 +#define XDMA_IRQ_ENG_CTRL 0x20 +#define XDMA_IRQ_ENG_CTRL_US_COMP BIT(4) +#define XDMA_IRQ_ENG_CTRL_DS_COMP BIT(5) +#define XDMA_IRQ_ENG_CTRL_W_MASK 0xBFEFF07F +#define XDMA_IRQ_ENG_STAT 0x24 +#define XDMA_IRQ_ENG_STAT_US_COMP BIT(4) +#define XDMA_IRQ_ENG_STAT_DS_COMP BIT(5) +#define XDMA_IRQ_ENG_STAT_RESET 0xF8000000 + +#define XDMA_AST2600_BMC_CMDQ_ADDR 0x14 +#define XDMA_AST2600_BMC_CMDQ_ENDP 0x18 +#define XDMA_AST2600_BMC_CMDQ_WRP 0x1c +#define XDMA_AST2600_BMC_CMDQ_RDP 0x20 +#define XDMA_AST2600_IRQ_CTRL 0x38 +#define XDMA_AST2600_IRQ_CTRL_US_COMP BIT(16) +#define XDMA_AST2600_IRQ_CTRL_DS_COMP BIT(17) +#define XDMA_AST2600_IRQ_CTRL_W_MASK 0x017003FF +#define XDMA_AST2600_IRQ_STATUS 0x3c +#define XDMA_AST2600_IRQ_STATUS_US_COMP BIT(16) +#define XDMA_AST2600_IRQ_STATUS_DS_COMP BIT(17) + +#define XDMA_MEM_SIZE 0x1000 + +#define TO_REG(addr) ((addr) / sizeof(uint32_t)) + +static uint64_t aspeed_xdma_read(void *opaque, hwaddr addr, unsigned int size) +{ + uint32_t val = 0; + AspeedXDMAState *xdma = opaque; + + if (addr < ASPEED_XDMA_REG_SIZE) { + val = xdma->regs[TO_REG(addr)]; + } + + return (uint64_t)val; +} + +static void aspeed_xdma_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + unsigned int idx; + uint32_t val32 = (uint32_t)val; + AspeedXDMAState *xdma = opaque; + AspeedXDMAClass *axc = ASPEED_XDMA_GET_CLASS(xdma); + + if (addr >= ASPEED_XDMA_REG_SIZE) { + return; + } + + if (addr == axc->cmdq_endp) { + xdma->regs[TO_REG(addr)] = val32 & XDMA_BMC_CMDQ_W_MASK; + } else if (addr == axc->cmdq_wrp) { + idx = TO_REG(addr); + xdma->regs[idx] = val32 & XDMA_BMC_CMDQ_W_MASK; + xdma->regs[TO_REG(axc->cmdq_rdp)] = xdma->regs[idx]; + + trace_aspeed_xdma_write(addr, val); + + if (xdma->bmc_cmdq_readp_set) { + xdma->bmc_cmdq_readp_set = 0; + } else { + xdma->regs[TO_REG(axc->intr_status)] |= axc->intr_complete; + + if (xdma->regs[TO_REG(axc->intr_ctrl)] & axc->intr_complete) { + qemu_irq_raise(xdma->irq); + } + } + } else if (addr == axc->cmdq_rdp) { + trace_aspeed_xdma_write(addr, val); + + if (val32 == XDMA_BMC_CMDQ_RDP_MAGIC) { + xdma->bmc_cmdq_readp_set = 1; + } + } else if (addr == axc->intr_ctrl) { + xdma->regs[TO_REG(addr)] = val32 & axc->intr_ctrl_mask; + } else if (addr == axc->intr_status) { + trace_aspeed_xdma_write(addr, val); + + idx = TO_REG(addr); + if (val32 & axc->intr_complete) { + xdma->regs[idx] &= ~axc->intr_complete; + qemu_irq_lower(xdma->irq); + } + } else { + xdma->regs[TO_REG(addr)] = val32; + } +} + +static const MemoryRegionOps aspeed_xdma_ops = { + .read = aspeed_xdma_read, + .write = aspeed_xdma_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void aspeed_xdma_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedXDMAState *xdma = ASPEED_XDMA(dev); + + sysbus_init_irq(sbd, &xdma->irq); + memory_region_init_io(&xdma->iomem, OBJECT(xdma), &aspeed_xdma_ops, xdma, + TYPE_ASPEED_XDMA, XDMA_MEM_SIZE); + sysbus_init_mmio(sbd, &xdma->iomem); +} + +static void aspeed_xdma_reset(DeviceState *dev) +{ + AspeedXDMAState *xdma = ASPEED_XDMA(dev); + AspeedXDMAClass *axc = ASPEED_XDMA_GET_CLASS(xdma); + + xdma->bmc_cmdq_readp_set = 0; + memset(xdma->regs, 0, ASPEED_XDMA_REG_SIZE); + xdma->regs[TO_REG(axc->intr_status)] = XDMA_IRQ_ENG_STAT_RESET; + + qemu_irq_lower(xdma->irq); +} + +static const VMStateDescription aspeed_xdma_vmstate = { + .name = TYPE_ASPEED_XDMA, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedXDMAState, ASPEED_XDMA_NUM_REGS), + VMSTATE_END_OF_LIST(), + }, +}; + +static void aspeed_2600_xdma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass); + + dc->desc = "ASPEED 2600 XDMA Controller"; + + axc->cmdq_endp = XDMA_AST2600_BMC_CMDQ_ENDP; + axc->cmdq_wrp = XDMA_AST2600_BMC_CMDQ_WRP; + axc->cmdq_rdp = XDMA_AST2600_BMC_CMDQ_RDP; + axc->intr_ctrl = XDMA_AST2600_IRQ_CTRL; + axc->intr_ctrl_mask = XDMA_AST2600_IRQ_CTRL_W_MASK; + axc->intr_status = XDMA_AST2600_IRQ_STATUS; + axc->intr_complete = XDMA_AST2600_IRQ_STATUS_US_COMP | + XDMA_AST2600_IRQ_STATUS_DS_COMP; +} + +static const TypeInfo aspeed_2600_xdma_info = { + .name = TYPE_ASPEED_2600_XDMA, + .parent = TYPE_ASPEED_XDMA, + .class_init = aspeed_2600_xdma_class_init, +}; + +static void aspeed_2500_xdma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass); + + dc->desc = "ASPEED 2500 XDMA Controller"; + + axc->cmdq_endp = XDMA_BMC_CMDQ_ENDP; + axc->cmdq_wrp = XDMA_BMC_CMDQ_WRP; + axc->cmdq_rdp = XDMA_BMC_CMDQ_RDP; + axc->intr_ctrl = XDMA_IRQ_ENG_CTRL; + axc->intr_ctrl_mask = XDMA_IRQ_ENG_CTRL_W_MASK; + axc->intr_status = XDMA_IRQ_ENG_STAT; + axc->intr_complete = XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP; +}; + +static const TypeInfo aspeed_2500_xdma_info = { + .name = TYPE_ASPEED_2500_XDMA, + .parent = TYPE_ASPEED_XDMA, + .class_init = aspeed_2500_xdma_class_init, +}; + +static void aspeed_2400_xdma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass); + + dc->desc = "ASPEED 2400 XDMA Controller"; + + axc->cmdq_endp = XDMA_BMC_CMDQ_ENDP; + axc->cmdq_wrp = XDMA_BMC_CMDQ_WRP; + axc->cmdq_rdp = XDMA_BMC_CMDQ_RDP; + axc->intr_ctrl = XDMA_IRQ_ENG_CTRL; + axc->intr_ctrl_mask = XDMA_IRQ_ENG_CTRL_W_MASK; + axc->intr_status = XDMA_IRQ_ENG_STAT; + axc->intr_complete = XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP; +}; + +static const TypeInfo aspeed_2400_xdma_info = { + .name = TYPE_ASPEED_2400_XDMA, + .parent = TYPE_ASPEED_XDMA, + .class_init = aspeed_2400_xdma_class_init, +}; + +static void aspeed_xdma_class_init(ObjectClass *classp, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(classp); + + dc->realize = aspeed_xdma_realize; + dc->reset = aspeed_xdma_reset; + dc->vmsd = &aspeed_xdma_vmstate; +} + +static const TypeInfo aspeed_xdma_info = { + .name = TYPE_ASPEED_XDMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedXDMAState), + .class_init = aspeed_xdma_class_init, + .class_size = sizeof(AspeedXDMAClass), + .abstract = true, +}; + +static void aspeed_xdma_register_type(void) +{ + type_register_static(&aspeed_xdma_info); + type_register_static(&aspeed_2400_xdma_info); + type_register_static(&aspeed_2500_xdma_info); + type_register_static(&aspeed_2600_xdma_info); +} +type_init(aspeed_xdma_register_type); diff --git a/hw/misc/auxbus.c b/hw/misc/auxbus.c new file mode 100644 index 000000000..8a8012f5f --- /dev/null +++ b/hw/misc/auxbus.c @@ -0,0 +1,337 @@ +/* + * auxbus.c + * + * Copyright 2015 : GreenSocs Ltd + * http://www.greensocs.com/ , email: info@greensocs.com + * + * Developed by : + * Frederic Konrad + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option)any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +/* + * This is an implementation of the AUX bus for VESA Display Port v1.1a. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/auxbus.h" +#include "hw/i2c/i2c.h" +#include "monitor/monitor.h" +#include "qapi/error.h" + +#ifndef DEBUG_AUX +#define DEBUG_AUX 0 +#endif + +#define DPRINTF(fmt, ...) do { \ + if (DEBUG_AUX) { \ + qemu_log("aux: " fmt , ## __VA_ARGS__); \ + } \ +} while (0) + + +static void aux_slave_dev_print(Monitor *mon, DeviceState *dev, int indent); +static inline I2CBus *aux_bridge_get_i2c_bus(AUXTOI2CState *bridge); + +/* aux-bus implementation (internal not public) */ +static void aux_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + /* AUXSlave has an MMIO so we need to change the way we print information + * in monitor. + */ + k->print_dev = aux_slave_dev_print; +} + +AUXBus *aux_bus_init(DeviceState *parent, const char *name) +{ + AUXBus *bus; + Object *auxtoi2c; + + bus = AUX_BUS(qbus_new(TYPE_AUX_BUS, parent, name)); + auxtoi2c = object_new_with_props(TYPE_AUXTOI2C, OBJECT(bus), "i2c", + &error_abort, NULL); + + bus->bridge = AUXTOI2C(auxtoi2c); + + /* Memory related. */ + bus->aux_io = g_malloc(sizeof(*bus->aux_io)); + memory_region_init(bus->aux_io, OBJECT(bus), "aux-io", 1 * MiB); + address_space_init(&bus->aux_addr_space, bus->aux_io, "aux-io"); + return bus; +} + +void aux_bus_realize(AUXBus *bus) +{ + qdev_realize(DEVICE(bus->bridge), BUS(bus), &error_fatal); +} + +void aux_map_slave(AUXSlave *aux_dev, hwaddr addr) +{ + DeviceState *dev = DEVICE(aux_dev); + AUXBus *bus = AUX_BUS(qdev_get_parent_bus(dev)); + memory_region_add_subregion(bus->aux_io, addr, aux_dev->mmio); +} + +static bool aux_bus_is_bridge(AUXBus *bus, DeviceState *dev) +{ + return (dev == DEVICE(bus->bridge)); +} + +I2CBus *aux_get_i2c_bus(AUXBus *bus) +{ + return aux_bridge_get_i2c_bus(bus->bridge); +} + +AUXReply aux_request(AUXBus *bus, AUXCommand cmd, uint32_t address, + uint8_t len, uint8_t *data) +{ + AUXReply ret = AUX_NACK; + I2CBus *i2c_bus = aux_get_i2c_bus(bus); + size_t i; + + DPRINTF("request at address 0x%" PRIX32 ", command %u, len %u\n", address, + cmd, len); + + switch (cmd) { + /* + * Forward the request on the AUX bus.. + */ + case WRITE_AUX: + case READ_AUX: + for (i = 0; i < len; i++) { + if (!address_space_rw(&bus->aux_addr_space, address++, + MEMTXATTRS_UNSPECIFIED, data++, 1, + cmd == WRITE_AUX)) { + ret = AUX_I2C_ACK; + } else { + ret = AUX_NACK; + break; + } + } + break; + /* + * Classic I2C transactions.. + */ + case READ_I2C: + if (i2c_bus_busy(i2c_bus)) { + i2c_end_transfer(i2c_bus); + } + + if (i2c_start_recv(i2c_bus, address)) { + ret = AUX_I2C_NACK; + break; + } + + ret = AUX_I2C_ACK; + for (i = 0; i < len; i++) { + data[i] = i2c_recv(i2c_bus); + } + i2c_end_transfer(i2c_bus); + break; + case WRITE_I2C: + if (i2c_bus_busy(i2c_bus)) { + i2c_end_transfer(i2c_bus); + } + + if (i2c_start_send(i2c_bus, address)) { + ret = AUX_I2C_NACK; + break; + } + + ret = AUX_I2C_ACK; + for (i = 0; i < len; i++) { + if (i2c_send(i2c_bus, data[i]) < 0) { + ret = AUX_I2C_NACK; + break; + } + } + i2c_end_transfer(i2c_bus); + break; + /* + * I2C MOT transactions. + * + * Here we send a start when: + * - We didn't start transaction yet. + * - We had a READ and we do a WRITE. + * - We changed the address. + */ + case WRITE_I2C_MOT: + ret = AUX_I2C_NACK; + if (!i2c_bus_busy(i2c_bus)) { + /* + * No transactions started.. + */ + if (i2c_start_send(i2c_bus, address)) { + break; + } + } else if ((address != bus->last_i2c_address) || + (bus->last_transaction != cmd)) { + /* + * Transaction started but we need to restart.. + */ + i2c_end_transfer(i2c_bus); + if (i2c_start_send(i2c_bus, address)) { + break; + } + } + + bus->last_transaction = cmd; + bus->last_i2c_address = address; + ret = AUX_I2C_ACK; + for (i = 0; i < len; i++) { + if (i2c_send(i2c_bus, data[i]) < 0) { + i2c_end_transfer(i2c_bus); + ret = AUX_I2C_NACK; + break; + } + } + break; + case READ_I2C_MOT: + ret = AUX_I2C_NACK; + if (!i2c_bus_busy(i2c_bus)) { + /* + * No transactions started.. + */ + if (i2c_start_recv(i2c_bus, address)) { + break; + } + } else if ((address != bus->last_i2c_address) || + (bus->last_transaction != cmd)) { + /* + * Transaction started but we need to restart.. + */ + i2c_end_transfer(i2c_bus); + if (i2c_start_recv(i2c_bus, address)) { + break; + } + } + + bus->last_transaction = cmd; + bus->last_i2c_address = address; + for (i = 0; i < len; i++) { + data[i] = i2c_recv(i2c_bus); + } + ret = AUX_I2C_ACK; + break; + default: + qemu_log_mask(LOG_UNIMP, "AUX cmd=%u not implemented\n", cmd); + return AUX_NACK; + } + + DPRINTF("reply: %u\n", ret); + return ret; +} + +static const TypeInfo aux_bus_info = { + .name = TYPE_AUX_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(AUXBus), + .class_init = aux_bus_class_init +}; + +/* aux-i2c implementation (internal not public) */ +struct AUXTOI2CState { + /*< private >*/ + DeviceState parent_obj; + + /*< public >*/ + I2CBus *i2c_bus; +}; + +static void aux_bridge_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + /* This device is private and is created only once for each + * aux-bus in aux_bus_init(..). So don't allow the user to add one. + */ + dc->user_creatable = false; +} + +static void aux_bridge_init(Object *obj) +{ + AUXTOI2CState *s = AUXTOI2C(obj); + + s->i2c_bus = i2c_init_bus(DEVICE(obj), "aux-i2c"); +} + +static inline I2CBus *aux_bridge_get_i2c_bus(AUXTOI2CState *bridge) +{ + return bridge->i2c_bus; +} + +static const TypeInfo aux_to_i2c_type_info = { + .name = TYPE_AUXTOI2C, + .parent = TYPE_AUX_SLAVE, + .class_init = aux_bridge_class_init, + .instance_size = sizeof(AUXTOI2CState), + .instance_init = aux_bridge_init +}; + +/* aux-slave implementation */ +static void aux_slave_dev_print(Monitor *mon, DeviceState *dev, int indent) +{ + AUXBus *bus = AUX_BUS(qdev_get_parent_bus(dev)); + AUXSlave *s; + + /* Don't print anything if the device is I2C "bridge". */ + if (aux_bus_is_bridge(bus, dev)) { + return; + } + + s = AUX_SLAVE(dev); + + monitor_printf(mon, "%*smemory " TARGET_FMT_plx "/" TARGET_FMT_plx "\n", + indent, "", + object_property_get_uint(OBJECT(s->mmio), "addr", NULL), + memory_region_size(s->mmio)); +} + +void aux_init_mmio(AUXSlave *aux_slave, MemoryRegion *mmio) +{ + assert(!aux_slave->mmio); + aux_slave->mmio = mmio; +} + +static void aux_slave_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_MISC, k->categories); + k->bus_type = TYPE_AUX_BUS; +} + +static const TypeInfo aux_slave_type_info = { + .name = TYPE_AUX_SLAVE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(AUXSlave), + .abstract = true, + .class_init = aux_slave_class_init, +}; + +static void aux_register_types(void) +{ + type_register_static(&aux_bus_info); + type_register_static(&aux_slave_type_info); + type_register_static(&aux_to_i2c_type_info); +} + +type_init(aux_register_types) diff --git a/hw/misc/avr_power.c b/hw/misc/avr_power.c new file mode 100644 index 000000000..a5412f2cf --- /dev/null +++ b/hw/misc/avr_power.c @@ -0,0 +1,113 @@ +/* + * AVR Power Reduction Management + * + * Copyright (c) 2019-2020 Michael Rolnik + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/misc/avr_power.h" +#include "qemu/log.h" +#include "hw/qdev-properties.h" +#include "hw/irq.h" +#include "trace.h" + +static void avr_mask_reset(DeviceState *dev) +{ + AVRMaskState *s = AVR_MASK(dev); + + s->val = 0x00; + + for (int i = 0; i < 8; i++) { + qemu_set_irq(s->irq[i], 0); + } +} + +static uint64_t avr_mask_read(void *opaque, hwaddr offset, unsigned size) +{ + assert(size == 1); + assert(offset == 0); + AVRMaskState *s = opaque; + + trace_avr_power_read(s->val); + + return (uint64_t)s->val; +} + +static void avr_mask_write(void *opaque, hwaddr offset, + uint64_t val64, unsigned size) +{ + assert(size == 1); + assert(offset == 0); + AVRMaskState *s = opaque; + uint8_t val8 = val64; + + trace_avr_power_write(val8); + s->val = val8; + for (int i = 0; i < 8; i++) { + qemu_set_irq(s->irq[i], (val8 & (1 << i)) != 0); + } +} + +static const MemoryRegionOps avr_mask_ops = { + .read = avr_mask_read, + .write = avr_mask_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .max_access_size = 1, + }, +}; + +static void avr_mask_init(Object *dev) +{ + AVRMaskState *s = AVR_MASK(dev); + SysBusDevice *busdev = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&s->iomem, dev, &avr_mask_ops, s, TYPE_AVR_MASK, + 0x01); + sysbus_init_mmio(busdev, &s->iomem); + + for (int i = 0; i < 8; i++) { + sysbus_init_irq(busdev, &s->irq[i]); + } + s->val = 0x00; +} + +static void avr_mask_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = avr_mask_reset; +} + +static const TypeInfo avr_mask_info = { + .name = TYPE_AVR_MASK, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AVRMaskState), + .class_init = avr_mask_class_init, + .instance_init = avr_mask_init, +}; + +static void avr_mask_register_types(void) +{ + type_register_static(&avr_mask_info); +} + +type_init(avr_mask_register_types) diff --git a/hw/misc/bcm2835_cprman.c b/hw/misc/bcm2835_cprman.c new file mode 100644 index 000000000..75e6c574d --- /dev/null +++ b/hw/misc/bcm2835_cprman.c @@ -0,0 +1,813 @@ +/* + * BCM2835 CPRMAN clock manager + * + * Copyright (c) 2020 Luc Michel + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +/* + * This peripheral is roughly divided into 3 main parts: + * - the PLLs + * - the PLL channels + * - the clock muxes + * + * A main oscillator (xosc) feeds all the PLLs. Each PLLs has one or more + * channels. Those channel are then connected to the clock muxes. Each mux has + * multiples sources (usually the xosc, some of the PLL channels and some "test + * debug" clocks). A mux is configured to select a given source through its + * control register. Each mux has one output clock that also goes out of the + * CPRMAN. This output clock usually connects to another peripheral in the SoC + * (so a given mux is dedicated to a peripheral). + * + * At each level (PLL, channel and mux), the clock can be altered through + * dividers (and multipliers in case of the PLLs), and can be disabled (in this + * case, the next levels see no clock). + * + * This can be sum-up as follows (this is an example and not the actual BCM2835 + * clock tree): + * + * /-->[PLL]-|->[PLL channel]--... [mux]--> to peripherals + * | |->[PLL channel] muxes takes [mux] + * | \->[PLL channel] inputs from [mux] + * | some channels [mux] + * [xosc]---|-->[PLL]-|->[PLL channel] and other srcs [mux] + * | \->[PLL channel] ...-->[mux] + * | [mux] + * \-->[PLL]--->[PLL channel] [mux] + * + * The page at https://elinux.org/The_Undocumented_Pi gives the actual clock + * tree configuration. + * + * The CPRMAN exposes clock outputs with the name of the clock mux suffixed + * with "-out" (e.g. "uart-out", "h264-out", ...). + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/misc/bcm2835_cprman.h" +#include "hw/misc/bcm2835_cprman_internals.h" +#include "trace.h" + +/* PLL */ + +static void pll_reset(DeviceState *dev) +{ + CprmanPllState *s = CPRMAN_PLL(dev); + const PLLResetInfo *info = &PLL_RESET_INFO[s->id]; + + *s->reg_cm = info->cm; + *s->reg_a2w_ctrl = info->a2w_ctrl; + memcpy(s->reg_a2w_ana, info->a2w_ana, sizeof(info->a2w_ana)); + *s->reg_a2w_frac = info->a2w_frac; +} + +static bool pll_is_locked(const CprmanPllState *pll) +{ + return !FIELD_EX32(*pll->reg_a2w_ctrl, A2W_PLLx_CTRL, PWRDN) + && !FIELD_EX32(*pll->reg_cm, CM_PLLx, ANARST); +} + +static void pll_update(CprmanPllState *pll) +{ + uint64_t freq, ndiv, fdiv, pdiv; + + if (!pll_is_locked(pll)) { + clock_update(pll->out, 0); + return; + } + + pdiv = FIELD_EX32(*pll->reg_a2w_ctrl, A2W_PLLx_CTRL, PDIV); + + if (!pdiv) { + clock_update(pll->out, 0); + return; + } + + ndiv = FIELD_EX32(*pll->reg_a2w_ctrl, A2W_PLLx_CTRL, NDIV); + fdiv = FIELD_EX32(*pll->reg_a2w_frac, A2W_PLLx_FRAC, FRAC); + + if (pll->reg_a2w_ana[1] & pll->prediv_mask) { + /* The prescaler doubles the parent frequency */ + ndiv *= 2; + fdiv *= 2; + } + + /* + * We have a multiplier with an integer part (ndiv) and a fractional part + * (fdiv), and a divider (pdiv). + */ + freq = clock_get_hz(pll->xosc_in) * + ((ndiv << R_A2W_PLLx_FRAC_FRAC_LENGTH) + fdiv); + freq /= pdiv; + freq >>= R_A2W_PLLx_FRAC_FRAC_LENGTH; + + clock_update_hz(pll->out, freq); +} + +static void pll_xosc_update(void *opaque, ClockEvent event) +{ + pll_update(CPRMAN_PLL(opaque)); +} + +static void pll_init(Object *obj) +{ + CprmanPllState *s = CPRMAN_PLL(obj); + + s->xosc_in = qdev_init_clock_in(DEVICE(s), "xosc-in", pll_xosc_update, + s, ClockUpdate); + s->out = qdev_init_clock_out(DEVICE(s), "out"); +} + +static const VMStateDescription pll_vmstate = { + .name = TYPE_CPRMAN_PLL, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(xosc_in, CprmanPllState), + VMSTATE_END_OF_LIST() + } +}; + +static void pll_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = pll_reset; + dc->vmsd = &pll_vmstate; +} + +static const TypeInfo cprman_pll_info = { + .name = TYPE_CPRMAN_PLL, + .parent = TYPE_DEVICE, + .instance_size = sizeof(CprmanPllState), + .class_init = pll_class_init, + .instance_init = pll_init, +}; + + +/* PLL channel */ + +static void pll_channel_reset(DeviceState *dev) +{ + CprmanPllChannelState *s = CPRMAN_PLL_CHANNEL(dev); + const PLLChannelResetInfo *info = &PLL_CHANNEL_RESET_INFO[s->id]; + + *s->reg_a2w_ctrl = info->a2w_ctrl; +} + +static bool pll_channel_is_enabled(CprmanPllChannelState *channel) +{ + /* + * XXX I'm not sure of the purpose of the LOAD field. The Linux driver does + * not set it when enabling the channel, but does clear it when disabling + * it. + */ + return !FIELD_EX32(*channel->reg_a2w_ctrl, A2W_PLLx_CHANNELy, DISABLE) + && !(*channel->reg_cm & channel->hold_mask); +} + +static void pll_channel_update(CprmanPllChannelState *channel) +{ + uint64_t freq, div; + + if (!pll_channel_is_enabled(channel)) { + clock_update(channel->out, 0); + return; + } + + div = FIELD_EX32(*channel->reg_a2w_ctrl, A2W_PLLx_CHANNELy, DIV); + + if (!div) { + /* + * It seems that when the divider value is 0, it is considered as + * being maximum by the hardware (see the Linux driver). + */ + div = R_A2W_PLLx_CHANNELy_DIV_MASK; + } + + /* Some channels have an additional fixed divider */ + freq = clock_get_hz(channel->pll_in) / (div * channel->fixed_divider); + + clock_update_hz(channel->out, freq); +} + +/* Update a PLL and all its channels */ +static void pll_update_all_channels(BCM2835CprmanState *s, + CprmanPllState *pll) +{ + size_t i; + + pll_update(pll); + + for (i = 0; i < CPRMAN_NUM_PLL_CHANNEL; i++) { + CprmanPllChannelState *channel = &s->channels[i]; + if (channel->parent == pll->id) { + pll_channel_update(channel); + } + } +} + +static void pll_channel_pll_in_update(void *opaque, ClockEvent event) +{ + pll_channel_update(CPRMAN_PLL_CHANNEL(opaque)); +} + +static void pll_channel_init(Object *obj) +{ + CprmanPllChannelState *s = CPRMAN_PLL_CHANNEL(obj); + + s->pll_in = qdev_init_clock_in(DEVICE(s), "pll-in", + pll_channel_pll_in_update, s, + ClockUpdate); + s->out = qdev_init_clock_out(DEVICE(s), "out"); +} + +static const VMStateDescription pll_channel_vmstate = { + .name = TYPE_CPRMAN_PLL_CHANNEL, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(pll_in, CprmanPllChannelState), + VMSTATE_END_OF_LIST() + } +}; + +static void pll_channel_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = pll_channel_reset; + dc->vmsd = &pll_channel_vmstate; +} + +static const TypeInfo cprman_pll_channel_info = { + .name = TYPE_CPRMAN_PLL_CHANNEL, + .parent = TYPE_DEVICE, + .instance_size = sizeof(CprmanPllChannelState), + .class_init = pll_channel_class_init, + .instance_init = pll_channel_init, +}; + + +/* clock mux */ + +static bool clock_mux_is_enabled(CprmanClockMuxState *mux) +{ + return FIELD_EX32(*mux->reg_ctl, CM_CLOCKx_CTL, ENABLE); +} + +static void clock_mux_update(CprmanClockMuxState *mux) +{ + uint64_t freq; + uint32_t div, src = FIELD_EX32(*mux->reg_ctl, CM_CLOCKx_CTL, SRC); + bool enabled = clock_mux_is_enabled(mux); + + *mux->reg_ctl = FIELD_DP32(*mux->reg_ctl, CM_CLOCKx_CTL, BUSY, enabled); + + if (!enabled) { + clock_update(mux->out, 0); + return; + } + + freq = clock_get_hz(mux->srcs[src]); + + if (mux->int_bits == 0 && mux->frac_bits == 0) { + clock_update_hz(mux->out, freq); + return; + } + + /* + * The divider has an integer and a fractional part. The size of each part + * varies with the muxes (int_bits and frac_bits). Both parts are + * concatenated, with the integer part always starting at bit 12. + * + * 31 12 11 0 + * ------------------------------ + * CM_DIV | | int | frac | | + * ------------------------------ + * <-----> <------> + * int_bits frac_bits + */ + div = extract32(*mux->reg_div, + R_CM_CLOCKx_DIV_FRAC_LENGTH - mux->frac_bits, + mux->int_bits + mux->frac_bits); + + if (!div) { + clock_update(mux->out, 0); + return; + } + + freq = muldiv64(freq, 1 << mux->frac_bits, div); + + clock_update_hz(mux->out, freq); +} + +static void clock_mux_src_update(void *opaque, ClockEvent event) +{ + CprmanClockMuxState **backref = opaque; + CprmanClockMuxState *s = *backref; + CprmanClockMuxSource src = backref - s->backref; + + if (FIELD_EX32(*s->reg_ctl, CM_CLOCKx_CTL, SRC) != src) { + return; + } + + clock_mux_update(s); +} + +static void clock_mux_reset(DeviceState *dev) +{ + CprmanClockMuxState *clock = CPRMAN_CLOCK_MUX(dev); + const ClockMuxResetInfo *info = &CLOCK_MUX_RESET_INFO[clock->id]; + + *clock->reg_ctl = info->cm_ctl; + *clock->reg_div = info->cm_div; +} + +static void clock_mux_init(Object *obj) +{ + CprmanClockMuxState *s = CPRMAN_CLOCK_MUX(obj); + size_t i; + + for (i = 0; i < CPRMAN_NUM_CLOCK_MUX_SRC; i++) { + char *name = g_strdup_printf("srcs[%zu]", i); + s->backref[i] = s; + s->srcs[i] = qdev_init_clock_in(DEVICE(s), name, + clock_mux_src_update, + &s->backref[i], + ClockUpdate); + g_free(name); + } + + s->out = qdev_init_clock_out(DEVICE(s), "out"); +} + +static const VMStateDescription clock_mux_vmstate = { + .name = TYPE_CPRMAN_CLOCK_MUX, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_ARRAY_CLOCK(srcs, CprmanClockMuxState, + CPRMAN_NUM_CLOCK_MUX_SRC), + VMSTATE_END_OF_LIST() + } +}; + +static void clock_mux_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = clock_mux_reset; + dc->vmsd = &clock_mux_vmstate; +} + +static const TypeInfo cprman_clock_mux_info = { + .name = TYPE_CPRMAN_CLOCK_MUX, + .parent = TYPE_DEVICE, + .instance_size = sizeof(CprmanClockMuxState), + .class_init = clock_mux_class_init, + .instance_init = clock_mux_init, +}; + + +/* DSI0HSCK mux */ + +static void dsi0hsck_mux_update(CprmanDsi0HsckMuxState *s) +{ + bool src_is_plld = FIELD_EX32(*s->reg_cm, CM_DSI0HSCK, SELPLLD); + Clock *src = src_is_plld ? s->plld_in : s->plla_in; + + clock_update(s->out, clock_get(src)); +} + +static void dsi0hsck_mux_in_update(void *opaque, ClockEvent event) +{ + dsi0hsck_mux_update(CPRMAN_DSI0HSCK_MUX(opaque)); +} + +static void dsi0hsck_mux_init(Object *obj) +{ + CprmanDsi0HsckMuxState *s = CPRMAN_DSI0HSCK_MUX(obj); + DeviceState *dev = DEVICE(obj); + + s->plla_in = qdev_init_clock_in(dev, "plla-in", dsi0hsck_mux_in_update, + s, ClockUpdate); + s->plld_in = qdev_init_clock_in(dev, "plld-in", dsi0hsck_mux_in_update, + s, ClockUpdate); + s->out = qdev_init_clock_out(DEVICE(s), "out"); +} + +static const VMStateDescription dsi0hsck_mux_vmstate = { + .name = TYPE_CPRMAN_DSI0HSCK_MUX, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(plla_in, CprmanDsi0HsckMuxState), + VMSTATE_CLOCK(plld_in, CprmanDsi0HsckMuxState), + VMSTATE_END_OF_LIST() + } +}; + +static void dsi0hsck_mux_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &dsi0hsck_mux_vmstate; +} + +static const TypeInfo cprman_dsi0hsck_mux_info = { + .name = TYPE_CPRMAN_DSI0HSCK_MUX, + .parent = TYPE_DEVICE, + .instance_size = sizeof(CprmanDsi0HsckMuxState), + .class_init = dsi0hsck_mux_class_init, + .instance_init = dsi0hsck_mux_init, +}; + + +/* CPRMAN "top level" model */ + +static uint32_t get_cm_lock(const BCM2835CprmanState *s) +{ + static const int CM_LOCK_MAPPING[CPRMAN_NUM_PLL] = { + [CPRMAN_PLLA] = R_CM_LOCK_FLOCKA_SHIFT, + [CPRMAN_PLLC] = R_CM_LOCK_FLOCKC_SHIFT, + [CPRMAN_PLLD] = R_CM_LOCK_FLOCKD_SHIFT, + [CPRMAN_PLLH] = R_CM_LOCK_FLOCKH_SHIFT, + [CPRMAN_PLLB] = R_CM_LOCK_FLOCKB_SHIFT, + }; + + uint32_t r = 0; + size_t i; + + for (i = 0; i < CPRMAN_NUM_PLL; i++) { + r |= pll_is_locked(&s->plls[i]) << CM_LOCK_MAPPING[i]; + } + + return r; +} + +static uint64_t cprman_read(void *opaque, hwaddr offset, + unsigned size) +{ + BCM2835CprmanState *s = CPRMAN(opaque); + uint64_t r = 0; + size_t idx = offset / sizeof(uint32_t); + + switch (idx) { + case R_CM_LOCK: + r = get_cm_lock(s); + break; + + default: + r = s->regs[idx]; + } + + trace_bcm2835_cprman_read(offset, r); + return r; +} + +static inline void update_pll_and_channels_from_cm(BCM2835CprmanState *s, + size_t idx) +{ + size_t i; + + for (i = 0; i < CPRMAN_NUM_PLL; i++) { + if (PLL_INIT_INFO[i].cm_offset == idx) { + pll_update_all_channels(s, &s->plls[i]); + return; + } + } +} + +static inline void update_channel_from_a2w(BCM2835CprmanState *s, size_t idx) +{ + size_t i; + + for (i = 0; i < CPRMAN_NUM_PLL_CHANNEL; i++) { + if (PLL_CHANNEL_INIT_INFO[i].a2w_ctrl_offset == idx) { + pll_channel_update(&s->channels[i]); + return; + } + } +} + +static inline void update_mux_from_cm(BCM2835CprmanState *s, size_t idx) +{ + size_t i; + + for (i = 0; i < CPRMAN_NUM_CLOCK_MUX; i++) { + if ((CLOCK_MUX_INIT_INFO[i].cm_offset == idx) || + (CLOCK_MUX_INIT_INFO[i].cm_offset + 4 == idx)) { + /* matches CM_CTL or CM_DIV mux register */ + clock_mux_update(&s->clock_muxes[i]); + return; + } + } +} + +#define CASE_PLL_A2W_REGS(pll_) \ + case R_A2W_ ## pll_ ## _CTRL: \ + case R_A2W_ ## pll_ ## _ANA0: \ + case R_A2W_ ## pll_ ## _ANA1: \ + case R_A2W_ ## pll_ ## _ANA2: \ + case R_A2W_ ## pll_ ## _ANA3: \ + case R_A2W_ ## pll_ ## _FRAC + +static void cprman_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + BCM2835CprmanState *s = CPRMAN(opaque); + size_t idx = offset / sizeof(uint32_t); + + if (FIELD_EX32(value, CPRMAN, PASSWORD) != CPRMAN_PASSWORD) { + trace_bcm2835_cprman_write_invalid_magic(offset, value); + return; + } + + value &= ~R_CPRMAN_PASSWORD_MASK; + + trace_bcm2835_cprman_write(offset, value); + s->regs[idx] = value; + + switch (idx) { + case R_CM_PLLA ... R_CM_PLLH: + case R_CM_PLLB: + /* + * A given CM_PLLx register is shared by both the PLL and the channels + * of this PLL. + */ + update_pll_and_channels_from_cm(s, idx); + break; + + CASE_PLL_A2W_REGS(PLLA) : + pll_update(&s->plls[CPRMAN_PLLA]); + break; + + CASE_PLL_A2W_REGS(PLLC) : + pll_update(&s->plls[CPRMAN_PLLC]); + break; + + CASE_PLL_A2W_REGS(PLLD) : + pll_update(&s->plls[CPRMAN_PLLD]); + break; + + CASE_PLL_A2W_REGS(PLLH) : + pll_update(&s->plls[CPRMAN_PLLH]); + break; + + CASE_PLL_A2W_REGS(PLLB) : + pll_update(&s->plls[CPRMAN_PLLB]); + break; + + case R_A2W_PLLA_DSI0: + case R_A2W_PLLA_CORE: + case R_A2W_PLLA_PER: + case R_A2W_PLLA_CCP2: + case R_A2W_PLLC_CORE2: + case R_A2W_PLLC_CORE1: + case R_A2W_PLLC_PER: + case R_A2W_PLLC_CORE0: + case R_A2W_PLLD_DSI0: + case R_A2W_PLLD_CORE: + case R_A2W_PLLD_PER: + case R_A2W_PLLD_DSI1: + case R_A2W_PLLH_AUX: + case R_A2W_PLLH_RCAL: + case R_A2W_PLLH_PIX: + case R_A2W_PLLB_ARM: + update_channel_from_a2w(s, idx); + break; + + case R_CM_GNRICCTL ... R_CM_SMIDIV: + case R_CM_TCNTCNT ... R_CM_VECDIV: + case R_CM_PULSECTL ... R_CM_PULSEDIV: + case R_CM_SDCCTL ... R_CM_ARMCTL: + case R_CM_AVEOCTL ... R_CM_EMMCDIV: + case R_CM_EMMC2CTL ... R_CM_EMMC2DIV: + update_mux_from_cm(s, idx); + break; + + case R_CM_DSI0HSCK: + dsi0hsck_mux_update(&s->dsi0hsck_mux); + break; + } +} + +#undef CASE_PLL_A2W_REGS + +static const MemoryRegionOps cprman_ops = { + .read = cprman_read, + .write = cprman_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + /* + * Although this hasn't been checked against real hardware, nor the + * information can be found in a datasheet, it seems reasonable because + * of the "PASSWORD" magic value found in every registers. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, + .impl = { + .max_access_size = 4, + }, +}; + +static void cprman_reset(DeviceState *dev) +{ + BCM2835CprmanState *s = CPRMAN(dev); + size_t i; + + memset(s->regs, 0, sizeof(s->regs)); + + for (i = 0; i < CPRMAN_NUM_PLL; i++) { + device_cold_reset(DEVICE(&s->plls[i])); + } + + for (i = 0; i < CPRMAN_NUM_PLL_CHANNEL; i++) { + device_cold_reset(DEVICE(&s->channels[i])); + } + + device_cold_reset(DEVICE(&s->dsi0hsck_mux)); + + for (i = 0; i < CPRMAN_NUM_CLOCK_MUX; i++) { + device_cold_reset(DEVICE(&s->clock_muxes[i])); + } + + clock_update_hz(s->xosc, s->xosc_freq); +} + +static void cprman_init(Object *obj) +{ + BCM2835CprmanState *s = CPRMAN(obj); + size_t i; + + for (i = 0; i < CPRMAN_NUM_PLL; i++) { + object_initialize_child(obj, PLL_INIT_INFO[i].name, + &s->plls[i], TYPE_CPRMAN_PLL); + set_pll_init_info(s, &s->plls[i], i); + } + + for (i = 0; i < CPRMAN_NUM_PLL_CHANNEL; i++) { + object_initialize_child(obj, PLL_CHANNEL_INIT_INFO[i].name, + &s->channels[i], + TYPE_CPRMAN_PLL_CHANNEL); + set_pll_channel_init_info(s, &s->channels[i], i); + } + + object_initialize_child(obj, "dsi0hsck-mux", + &s->dsi0hsck_mux, TYPE_CPRMAN_DSI0HSCK_MUX); + s->dsi0hsck_mux.reg_cm = &s->regs[R_CM_DSI0HSCK]; + + for (i = 0; i < CPRMAN_NUM_CLOCK_MUX; i++) { + char *alias; + + object_initialize_child(obj, CLOCK_MUX_INIT_INFO[i].name, + &s->clock_muxes[i], + TYPE_CPRMAN_CLOCK_MUX); + set_clock_mux_init_info(s, &s->clock_muxes[i], i); + + /* Expose muxes output as CPRMAN outputs */ + alias = g_strdup_printf("%s-out", CLOCK_MUX_INIT_INFO[i].name); + qdev_alias_clock(DEVICE(&s->clock_muxes[i]), "out", DEVICE(obj), alias); + g_free(alias); + } + + s->xosc = clock_new(obj, "xosc"); + s->gnd = clock_new(obj, "gnd"); + + clock_set(s->gnd, 0); + + memory_region_init_io(&s->iomem, obj, &cprman_ops, + s, "bcm2835-cprman", 0x2000); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem); +} + +static void connect_mux_sources(BCM2835CprmanState *s, + CprmanClockMuxState *mux, + const CprmanPllChannel *clk_mapping) +{ + size_t i; + Clock *td0 = s->clock_muxes[CPRMAN_CLOCK_TD0].out; + Clock *td1 = s->clock_muxes[CPRMAN_CLOCK_TD1].out; + + /* For sources from 0 to 3. Source 4 to 9 are mux specific */ + Clock * const CLK_SRC_MAPPING[] = { + [CPRMAN_CLOCK_SRC_GND] = s->gnd, + [CPRMAN_CLOCK_SRC_XOSC] = s->xosc, + [CPRMAN_CLOCK_SRC_TD0] = td0, + [CPRMAN_CLOCK_SRC_TD1] = td1, + }; + + for (i = 0; i < CPRMAN_NUM_CLOCK_MUX_SRC; i++) { + CprmanPllChannel mapping = clk_mapping[i]; + Clock *src; + + if (mapping == CPRMAN_CLOCK_SRC_FORCE_GROUND) { + src = s->gnd; + } else if (mapping == CPRMAN_CLOCK_SRC_DSI0HSCK) { + src = s->dsi0hsck_mux.out; + } else if (i < CPRMAN_CLOCK_SRC_PLLA) { + src = CLK_SRC_MAPPING[i]; + } else { + src = s->channels[mapping].out; + } + + clock_set_source(mux->srcs[i], src); + } +} + +static void cprman_realize(DeviceState *dev, Error **errp) +{ + BCM2835CprmanState *s = CPRMAN(dev); + size_t i; + + for (i = 0; i < CPRMAN_NUM_PLL; i++) { + CprmanPllState *pll = &s->plls[i]; + + clock_set_source(pll->xosc_in, s->xosc); + + if (!qdev_realize(DEVICE(pll), NULL, errp)) { + return; + } + } + + for (i = 0; i < CPRMAN_NUM_PLL_CHANNEL; i++) { + CprmanPllChannelState *channel = &s->channels[i]; + CprmanPll parent = PLL_CHANNEL_INIT_INFO[i].parent; + Clock *parent_clk = s->plls[parent].out; + + clock_set_source(channel->pll_in, parent_clk); + + if (!qdev_realize(DEVICE(channel), NULL, errp)) { + return; + } + } + + clock_set_source(s->dsi0hsck_mux.plla_in, + s->channels[CPRMAN_PLLA_CHANNEL_DSI0].out); + clock_set_source(s->dsi0hsck_mux.plld_in, + s->channels[CPRMAN_PLLD_CHANNEL_DSI0].out); + + if (!qdev_realize(DEVICE(&s->dsi0hsck_mux), NULL, errp)) { + return; + } + + for (i = 0; i < CPRMAN_NUM_CLOCK_MUX; i++) { + CprmanClockMuxState *clock_mux = &s->clock_muxes[i]; + + connect_mux_sources(s, clock_mux, CLOCK_MUX_INIT_INFO[i].src_mapping); + + if (!qdev_realize(DEVICE(clock_mux), NULL, errp)) { + return; + } + } +} + +static const VMStateDescription cprman_vmstate = { + .name = TYPE_BCM2835_CPRMAN, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, BCM2835CprmanState, CPRMAN_NUM_REGS), + VMSTATE_END_OF_LIST() + } +}; + +static Property cprman_properties[] = { + DEFINE_PROP_UINT32("xosc-freq-hz", BCM2835CprmanState, xosc_freq, 19200000), + DEFINE_PROP_END_OF_LIST() +}; + +static void cprman_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = cprman_realize; + dc->reset = cprman_reset; + dc->vmsd = &cprman_vmstate; + device_class_set_props(dc, cprman_properties); +} + +static const TypeInfo cprman_info = { + .name = TYPE_BCM2835_CPRMAN, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835CprmanState), + .class_init = cprman_class_init, + .instance_init = cprman_init, +}; + +static void cprman_register_types(void) +{ + type_register_static(&cprman_info); + type_register_static(&cprman_pll_info); + type_register_static(&cprman_pll_channel_info); + type_register_static(&cprman_clock_mux_info); + type_register_static(&cprman_dsi0hsck_mux_info); +} + +type_init(cprman_register_types); diff --git a/hw/misc/bcm2835_mbox.c b/hw/misc/bcm2835_mbox.c new file mode 100644 index 000000000..9f73cbd5e --- /dev/null +++ b/hw/misc/bcm2835_mbox.c @@ -0,0 +1,340 @@ +/* + * Raspberry Pi emulation (c) 2012 Gregory Estrade + * + * This file models the system mailboxes, which are used for + * communication with low-bandwidth GPU peripherals. Refs: + * https://github.com/raspberrypi/firmware/wiki/Mailboxes + * https://github.com/raspberrypi/firmware/wiki/Accessing-mailboxes + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/misc/bcm2835_mbox.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + +#define MAIL0_PEEK 0x90 +#define MAIL0_SENDER 0x94 +#define MAIL1_STATUS 0xb8 + +/* Mailbox status register */ +#define MAIL0_STATUS 0x98 +#define ARM_MS_FULL 0x80000000 +#define ARM_MS_EMPTY 0x40000000 +#define ARM_MS_LEVEL 0x400000FF /* Max. value depends on mailbox depth */ + +/* MAILBOX config/status register */ +#define MAIL0_CONFIG 0x9c +/* ANY write to this register clears the error bits! */ +#define ARM_MC_IHAVEDATAIRQEN 0x00000001 /* mbox irq enable: has data */ +#define ARM_MC_IHAVESPACEIRQEN 0x00000002 /* mbox irq enable: has space */ +#define ARM_MC_OPPISEMPTYIRQEN 0x00000004 /* mbox irq enable: Opp is empty */ +#define ARM_MC_MAIL_CLEAR 0x00000008 /* mbox clear write 1, then 0 */ +#define ARM_MC_IHAVEDATAIRQPEND 0x00000010 /* mbox irq pending: has space */ +#define ARM_MC_IHAVESPACEIRQPEND 0x00000020 /* mbox irq pending: Opp is empty */ +#define ARM_MC_OPPISEMPTYIRQPEND 0x00000040 /* mbox irq pending */ +/* Bit 7 is unused */ +#define ARM_MC_ERRNOOWN 0x00000100 /* error : none owner read from mailbox */ +#define ARM_MC_ERROVERFLW 0x00000200 /* error : write to fill mailbox */ +#define ARM_MC_ERRUNDRFLW 0x00000400 /* error : read from empty mailbox */ + +static void mbox_update_status(BCM2835Mbox *mb) +{ + mb->status &= ~(ARM_MS_EMPTY | ARM_MS_FULL); + if (mb->count == 0) { + mb->status |= ARM_MS_EMPTY; + } else if (mb->count == MBOX_SIZE) { + mb->status |= ARM_MS_FULL; + } +} + +static void mbox_reset(BCM2835Mbox *mb) +{ + int n; + + mb->count = 0; + mb->config = 0; + for (n = 0; n < MBOX_SIZE; n++) { + mb->reg[n] = MBOX_INVALID_DATA; + } + mbox_update_status(mb); +} + +static uint32_t mbox_pull(BCM2835Mbox *mb, int index) +{ + int n; + uint32_t val; + + assert(mb->count > 0); + assert(index < mb->count); + + val = mb->reg[index]; + for (n = index + 1; n < mb->count; n++) { + mb->reg[n - 1] = mb->reg[n]; + } + mb->count--; + mb->reg[mb->count] = MBOX_INVALID_DATA; + + mbox_update_status(mb); + + return val; +} + +static void mbox_push(BCM2835Mbox *mb, uint32_t val) +{ + assert(mb->count < MBOX_SIZE); + mb->reg[mb->count++] = val; + mbox_update_status(mb); +} + +static void bcm2835_mbox_update(BCM2835MboxState *s) +{ + uint32_t value; + bool set; + int n; + + s->mbox_irq_disabled = true; + + /* Get pending responses and put them in the vc->arm mbox, + * as long as it's not full + */ + for (n = 0; n < MBOX_CHAN_COUNT; n++) { + while (s->available[n] && !(s->mbox[0].status & ARM_MS_FULL)) { + value = ldl_le_phys(&s->mbox_as, n << MBOX_AS_CHAN_SHIFT); + assert(value != MBOX_INVALID_DATA); /* Pending interrupt but no data */ + mbox_push(&s->mbox[0], value); + } + } + + /* TODO (?): Try to push pending requests from the arm->vc mbox */ + + /* Re-enable calls from the IRQ routine */ + s->mbox_irq_disabled = false; + + /* Update ARM IRQ status */ + set = false; + s->mbox[0].config &= ~ARM_MC_IHAVEDATAIRQPEND; + if (!(s->mbox[0].status & ARM_MS_EMPTY)) { + s->mbox[0].config |= ARM_MC_IHAVEDATAIRQPEND; + if (s->mbox[0].config & ARM_MC_IHAVEDATAIRQEN) { + set = true; + } + } + trace_bcm2835_mbox_irq(set); + qemu_set_irq(s->arm_irq, set); +} + +static void bcm2835_mbox_set_irq(void *opaque, int irq, int level) +{ + BCM2835MboxState *s = opaque; + + s->available[irq] = level; + + /* avoid recursively calling bcm2835_mbox_update when the interrupt + * status changes due to the ldl_phys call within that function + */ + if (!s->mbox_irq_disabled) { + bcm2835_mbox_update(s); + } +} + +static uint64_t bcm2835_mbox_read(void *opaque, hwaddr offset, unsigned size) +{ + BCM2835MboxState *s = opaque; + uint32_t res = 0; + + offset &= 0xff; + + switch (offset) { + case 0x80 ... 0x8c: /* MAIL0_READ */ + if (s->mbox[0].status & ARM_MS_EMPTY) { + res = MBOX_INVALID_DATA; + } else { + res = mbox_pull(&s->mbox[0], 0); + } + break; + + case MAIL0_PEEK: + res = s->mbox[0].reg[0]; + break; + + case MAIL0_SENDER: + break; + + case MAIL0_STATUS: + res = s->mbox[0].status; + break; + + case MAIL0_CONFIG: + res = s->mbox[0].config; + break; + + case MAIL1_STATUS: + res = s->mbox[1].status; + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: Unsupported offset 0x%"HWADDR_PRIx"\n", + __func__, offset); + trace_bcm2835_mbox_read(size, offset, res); + return 0; + } + trace_bcm2835_mbox_read(size, offset, res); + + bcm2835_mbox_update(s); + + return res; +} + +static void bcm2835_mbox_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + BCM2835MboxState *s = opaque; + hwaddr childaddr; + uint8_t ch; + + offset &= 0xff; + + trace_bcm2835_mbox_write(size, offset, value); + switch (offset) { + case MAIL0_SENDER: + break; + + case MAIL0_CONFIG: + s->mbox[0].config &= ~ARM_MC_IHAVEDATAIRQEN; + s->mbox[0].config |= value & ARM_MC_IHAVEDATAIRQEN; + break; + + case 0xa0 ... 0xac: /* MAIL1_WRITE */ + if (s->mbox[1].status & ARM_MS_FULL) { + /* Mailbox full */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: mailbox full\n", __func__); + } else { + ch = value & 0xf; + if (ch < MBOX_CHAN_COUNT) { + childaddr = ch << MBOX_AS_CHAN_SHIFT; + if (ldl_le_phys(&s->mbox_as, childaddr + MBOX_AS_PENDING)) { + /* Child busy, push delayed. Push it in the arm->vc mbox */ + mbox_push(&s->mbox[1], value); + } else { + /* Push it directly to the child device */ + stl_le_phys(&s->mbox_as, childaddr, value); + } + } else { + /* Invalid channel number */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid channel %u\n", + __func__, ch); + } + } + break; + + default: + qemu_log_mask(LOG_UNIMP, "%s: Unsupported offset 0x%"HWADDR_PRIx + " value 0x%"PRIx64"\n", + __func__, offset, value); + return; + } + + bcm2835_mbox_update(s); +} + +static const MemoryRegionOps bcm2835_mbox_ops = { + .read = bcm2835_mbox_read, + .write = bcm2835_mbox_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +/* vmstate of a single mailbox */ +static const VMStateDescription vmstate_bcm2835_mbox_box = { + .name = TYPE_BCM2835_MBOX "_box", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, BCM2835Mbox, MBOX_SIZE), + VMSTATE_UINT32(count, BCM2835Mbox), + VMSTATE_UINT32(status, BCM2835Mbox), + VMSTATE_UINT32(config, BCM2835Mbox), + VMSTATE_END_OF_LIST() + } +}; + +/* vmstate of the entire device */ +static const VMStateDescription vmstate_bcm2835_mbox = { + .name = TYPE_BCM2835_MBOX, + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL_ARRAY(available, BCM2835MboxState, MBOX_CHAN_COUNT), + VMSTATE_STRUCT_ARRAY(mbox, BCM2835MboxState, 2, 1, + vmstate_bcm2835_mbox_box, BCM2835Mbox), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_mbox_init(Object *obj) +{ + BCM2835MboxState *s = BCM2835_MBOX(obj); + + memory_region_init_io(&s->iomem, obj, &bcm2835_mbox_ops, s, + TYPE_BCM2835_MBOX, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->arm_irq); + qdev_init_gpio_in(DEVICE(s), bcm2835_mbox_set_irq, MBOX_CHAN_COUNT); +} + +static void bcm2835_mbox_reset(DeviceState *dev) +{ + BCM2835MboxState *s = BCM2835_MBOX(dev); + int n; + + mbox_reset(&s->mbox[0]); + mbox_reset(&s->mbox[1]); + s->mbox_irq_disabled = false; + for (n = 0; n < MBOX_CHAN_COUNT; n++) { + s->available[n] = false; + } +} + +static void bcm2835_mbox_realize(DeviceState *dev, Error **errp) +{ + BCM2835MboxState *s = BCM2835_MBOX(dev); + Object *obj; + + obj = object_property_get_link(OBJECT(dev), "mbox-mr", &error_abort); + s->mbox_mr = MEMORY_REGION(obj); + address_space_init(&s->mbox_as, s->mbox_mr, TYPE_BCM2835_MBOX "-memory"); + bcm2835_mbox_reset(dev); +} + +static void bcm2835_mbox_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = bcm2835_mbox_realize; + dc->reset = bcm2835_mbox_reset; + dc->vmsd = &vmstate_bcm2835_mbox; +} + +static TypeInfo bcm2835_mbox_info = { + .name = TYPE_BCM2835_MBOX, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835MboxState), + .class_init = bcm2835_mbox_class_init, + .instance_init = bcm2835_mbox_init, +}; + +static void bcm2835_mbox_register_types(void) +{ + type_register_static(&bcm2835_mbox_info); +} + +type_init(bcm2835_mbox_register_types) diff --git a/hw/misc/bcm2835_mphi.c b/hw/misc/bcm2835_mphi.c new file mode 100644 index 000000000..0428e10ba --- /dev/null +++ b/hw/misc/bcm2835_mphi.c @@ -0,0 +1,191 @@ +/* + * BCM2835 SOC MPHI emulation + * + * Very basic emulation, only providing the FIQ interrupt needed to + * allow the dwc-otg USB host controller driver in the Raspbian kernel + * to function. + * + * Copyright (c) 2020 Paul Zimmerman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/misc/bcm2835_mphi.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" + +static inline void mphi_raise_irq(BCM2835MphiState *s) +{ + qemu_set_irq(s->irq, 1); +} + +static inline void mphi_lower_irq(BCM2835MphiState *s) +{ + qemu_set_irq(s->irq, 0); +} + +static uint64_t mphi_reg_read(void *ptr, hwaddr addr, unsigned size) +{ + BCM2835MphiState *s = ptr; + uint32_t val = 0; + + switch (addr) { + case 0x28: /* outdda */ + val = s->outdda; + break; + case 0x2c: /* outddb */ + val = s->outddb; + break; + case 0x4c: /* ctrl */ + val = s->ctrl; + val |= 1 << 17; + break; + case 0x50: /* intstat */ + val = s->intstat; + break; + case 0x1f0: /* swirq_set */ + val = s->swirq; + break; + case 0x1f4: /* swirq_clr */ + val = s->swirq; + break; + default: + qemu_log_mask(LOG_UNIMP, "read from unknown register"); + break; + } + + return val; +} + +static void mphi_reg_write(void *ptr, hwaddr addr, uint64_t val, unsigned size) +{ + BCM2835MphiState *s = ptr; + int do_irq = 0; + + switch (addr) { + case 0x28: /* outdda */ + s->outdda = val; + break; + case 0x2c: /* outddb */ + s->outddb = val; + if (val & (1 << 29)) { + do_irq = 1; + } + break; + case 0x4c: /* ctrl */ + s->ctrl = val; + if (val & (1 << 16)) { + do_irq = -1; + } + break; + case 0x50: /* intstat */ + s->intstat = val; + if (val & ((1 << 16) | (1 << 29))) { + do_irq = -1; + } + break; + case 0x1f0: /* swirq_set */ + s->swirq |= val; + do_irq = 1; + break; + case 0x1f4: /* swirq_clr */ + s->swirq &= ~val; + do_irq = -1; + break; + default: + qemu_log_mask(LOG_UNIMP, "write to unknown register"); + return; + } + + if (do_irq > 0) { + mphi_raise_irq(s); + } else if (do_irq < 0) { + mphi_lower_irq(s); + } +} + +static const MemoryRegionOps mphi_mmio_ops = { + .read = mphi_reg_read, + .write = mphi_reg_write, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void mphi_reset(DeviceState *dev) +{ + BCM2835MphiState *s = BCM2835_MPHI(dev); + + s->outdda = 0; + s->outddb = 0; + s->ctrl = 0; + s->intstat = 0; + s->swirq = 0; +} + +static void mphi_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + BCM2835MphiState *s = BCM2835_MPHI(dev); + + sysbus_init_irq(sbd, &s->irq); +} + +static void mphi_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + BCM2835MphiState *s = BCM2835_MPHI(obj); + + memory_region_init_io(&s->iomem, obj, &mphi_mmio_ops, s, "mphi", MPHI_MMIO_SIZE); + sysbus_init_mmio(sbd, &s->iomem); +} + +const VMStateDescription vmstate_mphi_state = { + .name = "mphi", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(outdda, BCM2835MphiState), + VMSTATE_UINT32(outddb, BCM2835MphiState), + VMSTATE_UINT32(ctrl, BCM2835MphiState), + VMSTATE_UINT32(intstat, BCM2835MphiState), + VMSTATE_UINT32(swirq, BCM2835MphiState), + VMSTATE_END_OF_LIST() + } +}; + +static void mphi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mphi_realize; + dc->reset = mphi_reset; + dc->vmsd = &vmstate_mphi_state; +} + +static const TypeInfo bcm2835_mphi_type_info = { + .name = TYPE_BCM2835_MPHI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835MphiState), + .instance_init = mphi_init, + .class_init = mphi_class_init, +}; + +static void bcm2835_mphi_register_types(void) +{ + type_register_static(&bcm2835_mphi_type_info); +} + +type_init(bcm2835_mphi_register_types) diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c new file mode 100644 index 000000000..25fa804cb --- /dev/null +++ b/hw/misc/bcm2835_powermgt.c @@ -0,0 +1,160 @@ +/* + * BCM2835 Power Management emulation + * + * Copyright (C) 2017 Marcin Chojnacki + * Copyright (C) 2021 Nolan Leake + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/bcm2835_powermgt.h" +#include "migration/vmstate.h" +#include "sysemu/runstate.h" + +#define PASSWORD 0x5a000000 +#define PASSWORD_MASK 0xff000000 + +#define R_RSTC 0x1c +#define V_RSTC_RESET 0x20 +#define R_RSTS 0x20 +#define V_RSTS_POWEROFF 0x555 /* Linux uses partition 63 to indicate halt. */ +#define R_WDOG 0x24 + +static uint64_t bcm2835_powermgt_read(void *opaque, hwaddr offset, + unsigned size) +{ + BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque; + uint32_t res = 0; + + switch (offset) { + case R_RSTC: + res = s->rstc; + break; + case R_RSTS: + res = s->rsts; + break; + case R_WDOG: + res = s->wdog; + break; + + default: + qemu_log_mask(LOG_UNIMP, + "bcm2835_powermgt_read: Unknown offset 0x%08"HWADDR_PRIx + "\n", offset); + res = 0; + break; + } + + return res; +} + +static void bcm2835_powermgt_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + BCM2835PowerMgtState *s = (BCM2835PowerMgtState *)opaque; + + if ((value & PASSWORD_MASK) != PASSWORD) { + qemu_log_mask(LOG_GUEST_ERROR, + "bcm2835_powermgt_write: Bad password 0x%"PRIx64 + " at offset 0x%08"HWADDR_PRIx"\n", + value, offset); + return; + } + + value = value & ~PASSWORD_MASK; + + switch (offset) { + case R_RSTC: + s->rstc = value; + if (value & V_RSTC_RESET) { + if ((s->rsts & 0xfff) == V_RSTS_POWEROFF) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } else { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + } + break; + case R_RSTS: + qemu_log_mask(LOG_UNIMP, + "bcm2835_powermgt_write: RSTS\n"); + s->rsts = value; + break; + case R_WDOG: + qemu_log_mask(LOG_UNIMP, + "bcm2835_powermgt_write: WDOG\n"); + s->wdog = value; + break; + + default: + qemu_log_mask(LOG_UNIMP, + "bcm2835_powermgt_write: Unknown offset 0x%08"HWADDR_PRIx + "\n", offset); + break; + } +} + +static const MemoryRegionOps bcm2835_powermgt_ops = { + .read = bcm2835_powermgt_read, + .write = bcm2835_powermgt_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static const VMStateDescription vmstate_bcm2835_powermgt = { + .name = TYPE_BCM2835_POWERMGT, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(rstc, BCM2835PowerMgtState), + VMSTATE_UINT32(rsts, BCM2835PowerMgtState), + VMSTATE_UINT32(wdog, BCM2835PowerMgtState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_powermgt_init(Object *obj) +{ + BCM2835PowerMgtState *s = BCM2835_POWERMGT(obj); + + memory_region_init_io(&s->iomem, obj, &bcm2835_powermgt_ops, s, + TYPE_BCM2835_POWERMGT, 0x200); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static void bcm2835_powermgt_reset(DeviceState *dev) +{ + BCM2835PowerMgtState *s = BCM2835_POWERMGT(dev); + + /* https://elinux.org/BCM2835_registers#PM */ + s->rstc = 0x00000102; + s->rsts = 0x00001000; + s->wdog = 0x00000000; +} + +static void bcm2835_powermgt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bcm2835_powermgt_reset; + dc->vmsd = &vmstate_bcm2835_powermgt; +} + +static TypeInfo bcm2835_powermgt_info = { + .name = TYPE_BCM2835_POWERMGT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835PowerMgtState), + .class_init = bcm2835_powermgt_class_init, + .instance_init = bcm2835_powermgt_init, +}; + +static void bcm2835_powermgt_register_types(void) +{ + type_register_static(&bcm2835_powermgt_info); +} + +type_init(bcm2835_powermgt_register_types) diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c new file mode 100644 index 000000000..73941bdae --- /dev/null +++ b/hw/misc/bcm2835_property.c @@ -0,0 +1,436 @@ +/* + * Raspberry Pi emulation (c) 2012 Gregory Estrade + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/misc/bcm2835_property.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/misc/bcm2835_mbox_defs.h" +#include "sysemu/dma.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + +/* https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface */ + +static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) +{ + uint32_t tag; + uint32_t bufsize; + uint32_t tot_len; + size_t resplen; + uint32_t tmp; + int n; + uint32_t offset, length, color; + + /* + * Copy the current state of the framebuffer config; we will update + * this copy as we process tags and then ask the framebuffer to use + * it at the end. + */ + BCM2835FBConfig fbconfig = s->fbdev->config; + bool fbconfig_updated = false; + + value &= ~0xf; + + s->addr = value; + + tot_len = ldl_le_phys(&s->dma_as, value); + + /* @(addr + 4) : Buffer response code */ + value = s->addr + 8; + while (value + 8 <= s->addr + tot_len) { + tag = ldl_le_phys(&s->dma_as, value); + bufsize = ldl_le_phys(&s->dma_as, value + 4); + /* @(value + 8) : Request/response indicator */ + resplen = 0; + switch (tag) { + case 0x00000000: /* End tag */ + break; + case 0x00000001: /* Get firmware revision */ + stl_le_phys(&s->dma_as, value + 12, 346337); + resplen = 4; + break; + case 0x00010001: /* Get board model */ + qemu_log_mask(LOG_UNIMP, + "bcm2835_property: 0x%08x get board model NYI\n", + tag); + resplen = 4; + break; + case 0x00010002: /* Get board revision */ + stl_le_phys(&s->dma_as, value + 12, s->board_rev); + resplen = 4; + break; + case 0x00010003: /* Get board MAC address */ + resplen = sizeof(s->macaddr.a); + dma_memory_write(&s->dma_as, value + 12, s->macaddr.a, resplen); + break; + case 0x00010004: /* Get board serial */ + qemu_log_mask(LOG_UNIMP, + "bcm2835_property: 0x%08x get board serial NYI\n", + tag); + resplen = 8; + break; + case 0x00010005: /* Get ARM memory */ + /* base */ + stl_le_phys(&s->dma_as, value + 12, 0); + /* size */ + stl_le_phys(&s->dma_as, value + 16, s->fbdev->vcram_base); + resplen = 8; + break; + case 0x00010006: /* Get VC memory */ + /* base */ + stl_le_phys(&s->dma_as, value + 12, s->fbdev->vcram_base); + /* size */ + stl_le_phys(&s->dma_as, value + 16, s->fbdev->vcram_size); + resplen = 8; + break; + case 0x00028001: /* Set power state */ + /* Assume that whatever device they asked for exists, + * and we'll just claim we set it to the desired state + */ + tmp = ldl_le_phys(&s->dma_as, value + 16); + stl_le_phys(&s->dma_as, value + 16, (tmp & 1)); + resplen = 8; + break; + + /* Clocks */ + + case 0x00030001: /* Get clock state */ + stl_le_phys(&s->dma_as, value + 16, 0x1); + resplen = 8; + break; + + case 0x00038001: /* Set clock state */ + qemu_log_mask(LOG_UNIMP, + "bcm2835_property: 0x%08x set clock state NYI\n", + tag); + resplen = 8; + break; + + case 0x00030002: /* Get clock rate */ + case 0x00030004: /* Get max clock rate */ + case 0x00030007: /* Get min clock rate */ + switch (ldl_le_phys(&s->dma_as, value + 12)) { + case 1: /* EMMC */ + stl_le_phys(&s->dma_as, value + 16, 50000000); + break; + case 2: /* UART */ + stl_le_phys(&s->dma_as, value + 16, 3000000); + break; + default: + stl_le_phys(&s->dma_as, value + 16, 700000000); + break; + } + resplen = 8; + break; + + case 0x00038002: /* Set clock rate */ + case 0x00038004: /* Set max clock rate */ + case 0x00038007: /* Set min clock rate */ + qemu_log_mask(LOG_UNIMP, + "bcm2835_property: 0x%08x set clock rate NYI\n", + tag); + resplen = 8; + break; + + /* Temperature */ + + case 0x00030006: /* Get temperature */ + stl_le_phys(&s->dma_as, value + 16, 25000); + resplen = 8; + break; + + case 0x0003000A: /* Get max temperature */ + stl_le_phys(&s->dma_as, value + 16, 99000); + resplen = 8; + break; + + /* Frame buffer */ + + case 0x00040001: /* Allocate buffer */ + stl_le_phys(&s->dma_as, value + 12, fbconfig.base); + stl_le_phys(&s->dma_as, value + 16, + bcm2835_fb_get_size(&fbconfig)); + resplen = 8; + break; + case 0x00048001: /* Release buffer */ + resplen = 0; + break; + case 0x00040002: /* Blank screen */ + resplen = 4; + break; + case 0x00044003: /* Test physical display width/height */ + case 0x00044004: /* Test virtual display width/height */ + resplen = 8; + break; + case 0x00048003: /* Set physical display width/height */ + fbconfig.xres = ldl_le_phys(&s->dma_as, value + 12); + fbconfig.yres = ldl_le_phys(&s->dma_as, value + 16); + bcm2835_fb_validate_config(&fbconfig); + fbconfig_updated = true; + /* fall through */ + case 0x00040003: /* Get physical display width/height */ + stl_le_phys(&s->dma_as, value + 12, fbconfig.xres); + stl_le_phys(&s->dma_as, value + 16, fbconfig.yres); + resplen = 8; + break; + case 0x00048004: /* Set virtual display width/height */ + fbconfig.xres_virtual = ldl_le_phys(&s->dma_as, value + 12); + fbconfig.yres_virtual = ldl_le_phys(&s->dma_as, value + 16); + bcm2835_fb_validate_config(&fbconfig); + fbconfig_updated = true; + /* fall through */ + case 0x00040004: /* Get virtual display width/height */ + stl_le_phys(&s->dma_as, value + 12, fbconfig.xres_virtual); + stl_le_phys(&s->dma_as, value + 16, fbconfig.yres_virtual); + resplen = 8; + break; + case 0x00044005: /* Test depth */ + resplen = 4; + break; + case 0x00048005: /* Set depth */ + fbconfig.bpp = ldl_le_phys(&s->dma_as, value + 12); + bcm2835_fb_validate_config(&fbconfig); + fbconfig_updated = true; + /* fall through */ + case 0x00040005: /* Get depth */ + stl_le_phys(&s->dma_as, value + 12, fbconfig.bpp); + resplen = 4; + break; + case 0x00044006: /* Test pixel order */ + resplen = 4; + break; + case 0x00048006: /* Set pixel order */ + fbconfig.pixo = ldl_le_phys(&s->dma_as, value + 12); + bcm2835_fb_validate_config(&fbconfig); + fbconfig_updated = true; + /* fall through */ + case 0x00040006: /* Get pixel order */ + stl_le_phys(&s->dma_as, value + 12, fbconfig.pixo); + resplen = 4; + break; + case 0x00044007: /* Test pixel alpha */ + resplen = 4; + break; + case 0x00048007: /* Set alpha */ + fbconfig.alpha = ldl_le_phys(&s->dma_as, value + 12); + bcm2835_fb_validate_config(&fbconfig); + fbconfig_updated = true; + /* fall through */ + case 0x00040007: /* Get alpha */ + stl_le_phys(&s->dma_as, value + 12, fbconfig.alpha); + resplen = 4; + break; + case 0x00040008: /* Get pitch */ + stl_le_phys(&s->dma_as, value + 12, + bcm2835_fb_get_pitch(&fbconfig)); + resplen = 4; + break; + case 0x00044009: /* Test virtual offset */ + resplen = 8; + break; + case 0x00048009: /* Set virtual offset */ + fbconfig.xoffset = ldl_le_phys(&s->dma_as, value + 12); + fbconfig.yoffset = ldl_le_phys(&s->dma_as, value + 16); + bcm2835_fb_validate_config(&fbconfig); + fbconfig_updated = true; + /* fall through */ + case 0x00040009: /* Get virtual offset */ + stl_le_phys(&s->dma_as, value + 12, fbconfig.xoffset); + stl_le_phys(&s->dma_as, value + 16, fbconfig.yoffset); + resplen = 8; + break; + case 0x0004000a: /* Get/Test/Set overscan */ + case 0x0004400a: + case 0x0004800a: + stl_le_phys(&s->dma_as, value + 12, 0); + stl_le_phys(&s->dma_as, value + 16, 0); + stl_le_phys(&s->dma_as, value + 20, 0); + stl_le_phys(&s->dma_as, value + 24, 0); + resplen = 16; + break; + case 0x0004800b: /* Set palette */ + offset = ldl_le_phys(&s->dma_as, value + 12); + length = ldl_le_phys(&s->dma_as, value + 16); + n = 0; + while (n < length - offset) { + color = ldl_le_phys(&s->dma_as, value + 20 + (n << 2)); + stl_le_phys(&s->dma_as, + s->fbdev->vcram_base + ((offset + n) << 2), color); + n++; + } + stl_le_phys(&s->dma_as, value + 12, 0); + resplen = 4; + break; + + case 0x00060001: /* Get DMA channels */ + /* channels 2-5 */ + stl_le_phys(&s->dma_as, value + 12, 0x003C); + resplen = 4; + break; + + case 0x00050001: /* Get command line */ + resplen = 0; + break; + + default: + qemu_log_mask(LOG_UNIMP, + "bcm2835_property: unhandled tag 0x%08x\n", tag); + break; + } + + trace_bcm2835_mbox_property(tag, bufsize, resplen); + if (tag == 0) { + break; + } + + stl_le_phys(&s->dma_as, value + 8, (1 << 31) | resplen); + value += bufsize + 12; + } + + /* Reconfigure framebuffer if required */ + if (fbconfig_updated) { + bcm2835_fb_reconfigure(s->fbdev, &fbconfig); + } + + /* Buffer response code */ + stl_le_phys(&s->dma_as, s->addr + 4, (1 << 31)); +} + +static uint64_t bcm2835_property_read(void *opaque, hwaddr offset, + unsigned size) +{ + BCM2835PropertyState *s = opaque; + uint32_t res = 0; + + switch (offset) { + case MBOX_AS_DATA: + res = MBOX_CHAN_PROPERTY | s->addr; + s->pending = false; + qemu_set_irq(s->mbox_irq, 0); + break; + + case MBOX_AS_PENDING: + res = s->pending; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + return 0; + } + + return res; +} + +static void bcm2835_property_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + BCM2835PropertyState *s = opaque; + + switch (offset) { + case MBOX_AS_DATA: + /* bcm2835_mbox should check our pending status before pushing */ + assert(!s->pending); + s->pending = true; + bcm2835_property_mbox_push(s, value); + qemu_set_irq(s->mbox_irq, 1); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + return; + } +} + +static const MemoryRegionOps bcm2835_property_ops = { + .read = bcm2835_property_read, + .write = bcm2835_property_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static const VMStateDescription vmstate_bcm2835_property = { + .name = TYPE_BCM2835_PROPERTY, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_MACADDR(macaddr, BCM2835PropertyState), + VMSTATE_UINT32(addr, BCM2835PropertyState), + VMSTATE_BOOL(pending, BCM2835PropertyState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_property_init(Object *obj) +{ + BCM2835PropertyState *s = BCM2835_PROPERTY(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &bcm2835_property_ops, s, + TYPE_BCM2835_PROPERTY, 0x10); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->mbox_irq); +} + +static void bcm2835_property_reset(DeviceState *dev) +{ + BCM2835PropertyState *s = BCM2835_PROPERTY(dev); + + s->pending = false; +} + +static void bcm2835_property_realize(DeviceState *dev, Error **errp) +{ + BCM2835PropertyState *s = BCM2835_PROPERTY(dev); + Object *obj; + + obj = object_property_get_link(OBJECT(dev), "fb", &error_abort); + s->fbdev = BCM2835_FB(obj); + + obj = object_property_get_link(OBJECT(dev), "dma-mr", &error_abort); + s->dma_mr = MEMORY_REGION(obj); + address_space_init(&s->dma_as, s->dma_mr, TYPE_BCM2835_PROPERTY "-memory"); + + /* TODO: connect to MAC address of USB NIC device, once we emulate it */ + qemu_macaddr_default_if_unset(&s->macaddr); + + bcm2835_property_reset(dev); +} + +static Property bcm2835_property_props[] = { + DEFINE_PROP_UINT32("board-rev", BCM2835PropertyState, board_rev, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void bcm2835_property_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, bcm2835_property_props); + dc->realize = bcm2835_property_realize; + dc->vmsd = &vmstate_bcm2835_property; +} + +static TypeInfo bcm2835_property_info = { + .name = TYPE_BCM2835_PROPERTY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835PropertyState), + .class_init = bcm2835_property_class_init, + .instance_init = bcm2835_property_init, +}; + +static void bcm2835_property_register_types(void) +{ + type_register_static(&bcm2835_property_info); +} + +type_init(bcm2835_property_register_types) diff --git a/hw/misc/bcm2835_rng.c b/hw/misc/bcm2835_rng.c new file mode 100644 index 000000000..d0c4e64e8 --- /dev/null +++ b/hw/misc/bcm2835_rng.c @@ -0,0 +1,147 @@ +/* + * BCM2835 Random Number Generator emulation + * + * Copyright (C) 2017 Marcin Chojnacki + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/guest-random.h" +#include "qemu/module.h" +#include "hw/misc/bcm2835_rng.h" +#include "migration/vmstate.h" + +static uint32_t get_random_bytes(void) +{ + uint32_t res; + + /* + * On failure we don't want to return the guest a non-random + * value in case they're really using it for cryptographic + * purposes, so the best we can do is die here. + * This shouldn't happen unless something's broken. + * In theory we could implement this device's full FIFO + * and interrupt semantics and then just stop filling the + * FIFO. That's a lot of work, though, so we assume any + * errors are systematic problems and trust that if we didn't + * fail as the guest inited then we won't fail later on + * mid-run. + */ + qemu_guest_getrandom_nofail(&res, sizeof(res)); + return res; +} + +static uint64_t bcm2835_rng_read(void *opaque, hwaddr offset, + unsigned size) +{ + BCM2835RngState *s = (BCM2835RngState *)opaque; + uint32_t res = 0; + + assert(size == 4); + + switch (offset) { + case 0x0: /* rng_ctrl */ + res = s->rng_ctrl; + break; + case 0x4: /* rng_status */ + res = s->rng_status | (1 << 24); + break; + case 0x8: /* rng_data */ + res = get_random_bytes(); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "bcm2835_rng_read: Bad offset %x\n", + (int)offset); + res = 0; + break; + } + + return res; +} + +static void bcm2835_rng_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + BCM2835RngState *s = (BCM2835RngState *)opaque; + + assert(size == 4); + + switch (offset) { + case 0x0: /* rng_ctrl */ + s->rng_ctrl = value; + break; + case 0x4: /* rng_status */ + /* we shouldn't let the guest write to bits [31..20] */ + s->rng_status &= ~0xFFFFF; /* clear 20 lower bits */ + s->rng_status |= value & 0xFFFFF; /* set them to new value */ + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "bcm2835_rng_write: Bad offset %x\n", + (int)offset); + break; + } +} + +static const MemoryRegionOps bcm2835_rng_ops = { + .read = bcm2835_rng_read, + .write = bcm2835_rng_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_bcm2835_rng = { + .name = TYPE_BCM2835_RNG, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(rng_ctrl, BCM2835RngState), + VMSTATE_UINT32(rng_status, BCM2835RngState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_rng_init(Object *obj) +{ + BCM2835RngState *s = BCM2835_RNG(obj); + + memory_region_init_io(&s->iomem, obj, &bcm2835_rng_ops, s, + TYPE_BCM2835_RNG, 0x10); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static void bcm2835_rng_reset(DeviceState *dev) +{ + BCM2835RngState *s = BCM2835_RNG(dev); + + s->rng_ctrl = 0; + s->rng_status = 0; +} + +static void bcm2835_rng_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bcm2835_rng_reset; + dc->vmsd = &vmstate_bcm2835_rng; +} + +static TypeInfo bcm2835_rng_info = { + .name = TYPE_BCM2835_RNG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835RngState), + .class_init = bcm2835_rng_class_init, + .instance_init = bcm2835_rng_init, +}; + +static void bcm2835_rng_register_types(void) +{ + type_register_static(&bcm2835_rng_info); +} + +type_init(bcm2835_rng_register_types) diff --git a/hw/misc/bcm2835_thermal.c b/hw/misc/bcm2835_thermal.c new file mode 100644 index 000000000..c6f3b1ad6 --- /dev/null +++ b/hw/misc/bcm2835_thermal.c @@ -0,0 +1,135 @@ +/* + * BCM2835 dummy thermal sensor + * + * Copyright (C) 2019 Philippe Mathieu-Daudé + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/misc/bcm2835_thermal.h" +#include "hw/registerfields.h" +#include "migration/vmstate.h" + +REG32(CTL, 0) +FIELD(CTL, POWER_DOWN, 0, 1) +FIELD(CTL, RESET, 1, 1) +FIELD(CTL, BANDGAP_CTRL, 2, 3) +FIELD(CTL, INTERRUPT_ENABLE, 5, 1) +FIELD(CTL, DIRECT, 6, 1) +FIELD(CTL, INTERRUPT_CLEAR, 7, 1) +FIELD(CTL, HOLD, 8, 10) +FIELD(CTL, RESET_DELAY, 18, 8) +FIELD(CTL, REGULATOR_ENABLE, 26, 1) + +REG32(STAT, 4) +FIELD(STAT, DATA, 0, 10) +FIELD(STAT, VALID, 10, 1) +FIELD(STAT, INTERRUPT, 11, 1) + +#define THERMAL_OFFSET_C 412 +#define THERMAL_COEFF (-0.538f) + +static uint16_t bcm2835_thermal_temp2adc(int temp_C) +{ + return (temp_C - THERMAL_OFFSET_C) / THERMAL_COEFF; +} + +static uint64_t bcm2835_thermal_read(void *opaque, hwaddr addr, unsigned size) +{ + Bcm2835ThermalState *s = BCM2835_THERMAL(opaque); + uint32_t val = 0; + + switch (addr) { + case A_CTL: + val = s->ctl; + break; + case A_STAT: + /* Temperature is constantly 25°C. */ + val = FIELD_DP32(bcm2835_thermal_temp2adc(25), STAT, VALID, true); + break; + default: + /* MemoryRegionOps are aligned, so this can not happen. */ + g_assert_not_reached(); + } + return val; +} + +static void bcm2835_thermal_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + Bcm2835ThermalState *s = BCM2835_THERMAL(opaque); + + switch (addr) { + case A_CTL: + s->ctl = value; + break; + case A_STAT: + qemu_log_mask(LOG_GUEST_ERROR, "%s: write 0x%" PRIx64 + " to 0x%" HWADDR_PRIx "\n", + __func__, value, addr); + break; + default: + /* MemoryRegionOps are aligned, so this can not happen. */ + g_assert_not_reached(); + } +} + +static const MemoryRegionOps bcm2835_thermal_ops = { + .read = bcm2835_thermal_read, + .write = bcm2835_thermal_write, + .impl.max_access_size = 4, + .valid.min_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void bcm2835_thermal_reset(DeviceState *dev) +{ + Bcm2835ThermalState *s = BCM2835_THERMAL(dev); + + s->ctl = 0; +} + +static void bcm2835_thermal_realize(DeviceState *dev, Error **errp) +{ + Bcm2835ThermalState *s = BCM2835_THERMAL(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &bcm2835_thermal_ops, + s, TYPE_BCM2835_THERMAL, 8); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static const VMStateDescription bcm2835_thermal_vmstate = { + .name = "bcm2835_thermal", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ctl, Bcm2835ThermalState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_thermal_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = bcm2835_thermal_realize; + dc->reset = bcm2835_thermal_reset; + dc->vmsd = &bcm2835_thermal_vmstate; +} + +static const TypeInfo bcm2835_thermal_info = { + .name = TYPE_BCM2835_THERMAL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Bcm2835ThermalState), + .class_init = bcm2835_thermal_class_init, +}; + +static void bcm2835_thermal_register_types(void) +{ + type_register_static(&bcm2835_thermal_info); +} + +type_init(bcm2835_thermal_register_types) diff --git a/hw/misc/cbus.c b/hw/misc/cbus.c new file mode 100644 index 000000000..3c3721ad2 --- /dev/null +++ b/hw/misc/cbus.c @@ -0,0 +1,619 @@ +/* + * CBUS three-pin bus and the Retu / Betty / Tahvo / Vilma / Avilma / + * Hinku / Vinku / Ahne / Pihi chips used in various Nokia platforms. + * Based on reverse-engineering of a linux driver. + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/misc/cbus.h" +#include "sysemu/runstate.h" + +//#define DEBUG + +typedef struct { + void *opaque; + void (*io)(void *opaque, int rw, int reg, uint16_t *val); + int addr; +} CBusSlave; + +typedef struct { + CBus cbus; + + int sel; + int dat; + int clk; + int bit; + int dir; + uint16_t val; + qemu_irq dat_out; + + int addr; + int reg; + int rw; + enum { + cbus_address, + cbus_value, + } cycle; + + CBusSlave *slave[8]; +} CBusPriv; + +static void cbus_io(CBusPriv *s) +{ + if (s->slave[s->addr]) + s->slave[s->addr]->io(s->slave[s->addr]->opaque, + s->rw, s->reg, &s->val); + else + hw_error("%s: bad slave address %i\n", __func__, s->addr); +} + +static void cbus_cycle(CBusPriv *s) +{ + switch (s->cycle) { + case cbus_address: + s->addr = (s->val >> 6) & 7; + s->rw = (s->val >> 5) & 1; + s->reg = (s->val >> 0) & 0x1f; + + s->cycle = cbus_value; + s->bit = 15; + s->dir = !s->rw; + s->val = 0; + + if (s->rw) + cbus_io(s); + break; + + case cbus_value: + if (!s->rw) + cbus_io(s); + + s->cycle = cbus_address; + s->bit = 8; + s->dir = 1; + s->val = 0; + break; + } +} + +static void cbus_clk(void *opaque, int line, int level) +{ + CBusPriv *s = (CBusPriv *) opaque; + + if (!s->sel && level && !s->clk) { + if (s->dir) + s->val |= s->dat << (s->bit --); + else + qemu_set_irq(s->dat_out, (s->val >> (s->bit --)) & 1); + + if (s->bit < 0) + cbus_cycle(s); + } + + s->clk = level; +} + +static void cbus_dat(void *opaque, int line, int level) +{ + CBusPriv *s = (CBusPriv *) opaque; + + s->dat = level; +} + +static void cbus_sel(void *opaque, int line, int level) +{ + CBusPriv *s = (CBusPriv *) opaque; + + if (!level) { + s->dir = 1; + s->bit = 8; + s->val = 0; + } + + s->sel = level; +} + +CBus *cbus_init(qemu_irq dat) +{ + CBusPriv *s = (CBusPriv *) g_malloc0(sizeof(*s)); + + s->dat_out = dat; + s->cbus.clk = qemu_allocate_irq(cbus_clk, s, 0); + s->cbus.dat = qemu_allocate_irq(cbus_dat, s, 0); + s->cbus.sel = qemu_allocate_irq(cbus_sel, s, 0); + + s->sel = 1; + s->clk = 0; + s->dat = 0; + + return &s->cbus; +} + +void cbus_attach(CBus *bus, void *slave_opaque) +{ + CBusSlave *slave = (CBusSlave *) slave_opaque; + CBusPriv *s = (CBusPriv *) bus; + + s->slave[slave->addr] = slave; +} + +/* Retu/Vilma */ +typedef struct { + uint16_t irqst; + uint16_t irqen; + uint16_t cc[2]; + int channel; + uint16_t result[16]; + uint16_t sample; + uint16_t status; + + struct { + uint16_t cal; + } rtc; + + int is_vilma; + qemu_irq irq; + CBusSlave cbus; +} CBusRetu; + +static void retu_interrupt_update(CBusRetu *s) +{ + qemu_set_irq(s->irq, s->irqst & ~s->irqen); +} + +#define RETU_REG_ASICR 0x00 /* (RO) ASIC ID & revision */ +#define RETU_REG_IDR 0x01 /* (T) Interrupt ID */ +#define RETU_REG_IMR 0x02 /* (RW) Interrupt mask */ +#define RETU_REG_RTCDSR 0x03 /* (RW) RTC seconds register */ +#define RETU_REG_RTCHMR 0x04 /* (RO) RTC hours and minutes reg */ +#define RETU_REG_RTCHMAR 0x05 /* (RW) RTC hours and minutes set reg */ +#define RETU_REG_RTCCALR 0x06 /* (RW) RTC calibration register */ +#define RETU_REG_ADCR 0x08 /* (RW) ADC result register */ +#define RETU_REG_ADCSCR 0x09 /* (RW) ADC sample control register */ +#define RETU_REG_AFCR 0x0a /* (RW) AFC register */ +#define RETU_REG_ANTIFR 0x0b /* (RW) AntiF register */ +#define RETU_REG_CALIBR 0x0c /* (RW) CalibR register*/ +#define RETU_REG_CCR1 0x0d /* (RW) Common control register 1 */ +#define RETU_REG_CCR2 0x0e /* (RW) Common control register 2 */ +#define RETU_REG_RCTRL_CLR 0x0f /* (T) Regulator clear register */ +#define RETU_REG_RCTRL_SET 0x10 /* (T) Regulator set register */ +#define RETU_REG_TXCR 0x11 /* (RW) TxC register */ +#define RETU_REG_STATUS 0x16 /* (RO) Status register */ +#define RETU_REG_WATCHDOG 0x17 /* (RW) Watchdog register */ +#define RETU_REG_AUDTXR 0x18 /* (RW) Audio Codec Tx register */ +#define RETU_REG_AUDPAR 0x19 /* (RW) AudioPA register */ +#define RETU_REG_AUDRXR1 0x1a /* (RW) Audio receive register 1 */ +#define RETU_REG_AUDRXR2 0x1b /* (RW) Audio receive register 2 */ +#define RETU_REG_SGR1 0x1c /* (RW) */ +#define RETU_REG_SCR1 0x1d /* (RW) */ +#define RETU_REG_SGR2 0x1e /* (RW) */ +#define RETU_REG_SCR2 0x1f /* (RW) */ + +/* Retu Interrupt sources */ +enum { + retu_int_pwr = 0, /* Power button */ + retu_int_char = 1, /* Charger */ + retu_int_rtcs = 2, /* Seconds */ + retu_int_rtcm = 3, /* Minutes */ + retu_int_rtcd = 4, /* Days */ + retu_int_rtca = 5, /* Alarm */ + retu_int_hook = 6, /* Hook */ + retu_int_head = 7, /* Headset */ + retu_int_adcs = 8, /* ADC sample */ +}; + +/* Retu ADC channel wiring */ +enum { + retu_adc_bsi = 1, /* BSI */ + retu_adc_batt_temp = 2, /* Battery temperature */ + retu_adc_chg_volt = 3, /* Charger voltage */ + retu_adc_head_det = 4, /* Headset detection */ + retu_adc_hook_det = 5, /* Hook detection */ + retu_adc_rf_gp = 6, /* RF GP */ + retu_adc_tx_det = 7, /* Wideband Tx detection */ + retu_adc_batt_volt = 8, /* Battery voltage */ + retu_adc_sens = 10, /* Light sensor */ + retu_adc_sens_temp = 11, /* Light sensor temperature */ + retu_adc_bbatt_volt = 12, /* Backup battery voltage */ + retu_adc_self_temp = 13, /* RETU temperature */ +}; + +static inline uint16_t retu_read(CBusRetu *s, int reg) +{ +#ifdef DEBUG + printf("RETU read at %02x\n", reg); +#endif + + switch (reg) { + case RETU_REG_ASICR: + return 0x0215 | (s->is_vilma << 7); + + case RETU_REG_IDR: /* TODO: Or is this ffs(s->irqst)? */ + return s->irqst; + + case RETU_REG_IMR: + return s->irqen; + + case RETU_REG_RTCDSR: + case RETU_REG_RTCHMR: + case RETU_REG_RTCHMAR: + /* TODO */ + return 0x0000; + + case RETU_REG_RTCCALR: + return s->rtc.cal; + + case RETU_REG_ADCR: + return (s->channel << 10) | s->result[s->channel]; + case RETU_REG_ADCSCR: + return s->sample; + + case RETU_REG_AFCR: + case RETU_REG_ANTIFR: + case RETU_REG_CALIBR: + /* TODO */ + return 0x0000; + + case RETU_REG_CCR1: + return s->cc[0]; + case RETU_REG_CCR2: + return s->cc[1]; + + case RETU_REG_RCTRL_CLR: + case RETU_REG_RCTRL_SET: + case RETU_REG_TXCR: + /* TODO */ + return 0x0000; + + case RETU_REG_STATUS: + return s->status; + + case RETU_REG_WATCHDOG: + case RETU_REG_AUDTXR: + case RETU_REG_AUDPAR: + case RETU_REG_AUDRXR1: + case RETU_REG_AUDRXR2: + case RETU_REG_SGR1: + case RETU_REG_SCR1: + case RETU_REG_SGR2: + case RETU_REG_SCR2: + /* TODO */ + return 0x0000; + + default: + hw_error("%s: bad register %02x\n", __func__, reg); + } +} + +static inline void retu_write(CBusRetu *s, int reg, uint16_t val) +{ +#ifdef DEBUG + printf("RETU write of %04x at %02x\n", val, reg); +#endif + + switch (reg) { + case RETU_REG_IDR: + s->irqst ^= val; + retu_interrupt_update(s); + break; + + case RETU_REG_IMR: + s->irqen = val; + retu_interrupt_update(s); + break; + + case RETU_REG_RTCDSR: + case RETU_REG_RTCHMAR: + /* TODO */ + break; + + case RETU_REG_RTCCALR: + s->rtc.cal = val; + break; + + case RETU_REG_ADCR: + s->channel = (val >> 10) & 0xf; + s->irqst |= 1 << retu_int_adcs; + retu_interrupt_update(s); + break; + case RETU_REG_ADCSCR: + s->sample &= ~val; + break; + + case RETU_REG_AFCR: + case RETU_REG_ANTIFR: + case RETU_REG_CALIBR: + + case RETU_REG_CCR1: + s->cc[0] = val; + break; + case RETU_REG_CCR2: + s->cc[1] = val; + break; + + case RETU_REG_RCTRL_CLR: + case RETU_REG_RCTRL_SET: + /* TODO */ + break; + + case RETU_REG_WATCHDOG: + if (val == 0 && (s->cc[0] & 2)) + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + break; + + case RETU_REG_TXCR: + case RETU_REG_AUDTXR: + case RETU_REG_AUDPAR: + case RETU_REG_AUDRXR1: + case RETU_REG_AUDRXR2: + case RETU_REG_SGR1: + case RETU_REG_SCR1: + case RETU_REG_SGR2: + case RETU_REG_SCR2: + /* TODO */ + break; + + default: + hw_error("%s: bad register %02x\n", __func__, reg); + } +} + +static void retu_io(void *opaque, int rw, int reg, uint16_t *val) +{ + CBusRetu *s = (CBusRetu *) opaque; + + if (rw) + *val = retu_read(s, reg); + else + retu_write(s, reg, *val); +} + +void *retu_init(qemu_irq irq, int vilma) +{ + CBusRetu *s = (CBusRetu *) g_malloc0(sizeof(*s)); + + s->irq = irq; + s->irqen = 0xffff; + s->irqst = 0x0000; + s->status = 0x0020; + s->is_vilma = !!vilma; + s->rtc.cal = 0x01; + s->result[retu_adc_bsi] = 0x3c2; + s->result[retu_adc_batt_temp] = 0x0fc; + s->result[retu_adc_chg_volt] = 0x165; + s->result[retu_adc_head_det] = 123; + s->result[retu_adc_hook_det] = 1023; + s->result[retu_adc_rf_gp] = 0x11; + s->result[retu_adc_tx_det] = 0x11; + s->result[retu_adc_batt_volt] = 0x250; + s->result[retu_adc_sens] = 2; + s->result[retu_adc_sens_temp] = 0x11; + s->result[retu_adc_bbatt_volt] = 0x3d0; + s->result[retu_adc_self_temp] = 0x330; + + s->cbus.opaque = s; + s->cbus.io = retu_io; + s->cbus.addr = 1; + + return &s->cbus; +} + +void retu_key_event(void *retu, int state) +{ + CBusSlave *slave = (CBusSlave *) retu; + CBusRetu *s = (CBusRetu *) slave->opaque; + + s->irqst |= 1 << retu_int_pwr; + retu_interrupt_update(s); + + if (state) + s->status &= ~(1 << 5); + else + s->status |= 1 << 5; +} + +#if 0 +static void retu_head_event(void *retu, int state) +{ + CBusSlave *slave = (CBusSlave *) retu; + CBusRetu *s = (CBusRetu *) slave->opaque; + + if ((s->cc[0] & 0x500) == 0x500) { /* TODO: Which bits? */ + /* TODO: reissue the interrupt every 100ms or so. */ + s->irqst |= 1 << retu_int_head; + retu_interrupt_update(s); + } + + if (state) + s->result[retu_adc_head_det] = 50; + else + s->result[retu_adc_head_det] = 123; +} + +static void retu_hook_event(void *retu, int state) +{ + CBusSlave *slave = (CBusSlave *) retu; + CBusRetu *s = (CBusRetu *) slave->opaque; + + if ((s->cc[0] & 0x500) == 0x500) { + /* TODO: reissue the interrupt every 100ms or so. */ + s->irqst |= 1 << retu_int_hook; + retu_interrupt_update(s); + } + + if (state) + s->result[retu_adc_hook_det] = 50; + else + s->result[retu_adc_hook_det] = 123; +} +#endif + +/* Tahvo/Betty */ +typedef struct { + uint16_t irqst; + uint16_t irqen; + uint8_t charger; + uint8_t backlight; + uint16_t usbr; + uint16_t power; + + int is_betty; + qemu_irq irq; + CBusSlave cbus; +} CBusTahvo; + +static void tahvo_interrupt_update(CBusTahvo *s) +{ + qemu_set_irq(s->irq, s->irqst & ~s->irqen); +} + +#define TAHVO_REG_ASICR 0x00 /* (RO) ASIC ID & revision */ +#define TAHVO_REG_IDR 0x01 /* (T) Interrupt ID */ +#define TAHVO_REG_IDSR 0x02 /* (RO) Interrupt status */ +#define TAHVO_REG_IMR 0x03 /* (RW) Interrupt mask */ +#define TAHVO_REG_CHAPWMR 0x04 /* (RW) Charger PWM */ +#define TAHVO_REG_LEDPWMR 0x05 /* (RW) LED PWM */ +#define TAHVO_REG_USBR 0x06 /* (RW) USB control */ +#define TAHVO_REG_RCR 0x07 /* (RW) Some kind of power management */ +#define TAHVO_REG_CCR1 0x08 /* (RW) Common control register 1 */ +#define TAHVO_REG_CCR2 0x09 /* (RW) Common control register 2 */ +#define TAHVO_REG_TESTR1 0x0a /* (RW) Test register 1 */ +#define TAHVO_REG_TESTR2 0x0b /* (RW) Test register 2 */ +#define TAHVO_REG_NOPR 0x0c /* (RW) Number of periods */ +#define TAHVO_REG_FRR 0x0d /* (RO) FR */ + +static inline uint16_t tahvo_read(CBusTahvo *s, int reg) +{ +#ifdef DEBUG + printf("TAHVO read at %02x\n", reg); +#endif + + switch (reg) { + case TAHVO_REG_ASICR: + return 0x0021 | (s->is_betty ? 0x0b00 : 0x0300); /* 22 in N810 */ + + case TAHVO_REG_IDR: + case TAHVO_REG_IDSR: /* XXX: what does this do? */ + return s->irqst; + + case TAHVO_REG_IMR: + return s->irqen; + + case TAHVO_REG_CHAPWMR: + return s->charger; + + case TAHVO_REG_LEDPWMR: + return s->backlight; + + case TAHVO_REG_USBR: + return s->usbr; + + case TAHVO_REG_RCR: + return s->power; + + case TAHVO_REG_CCR1: + case TAHVO_REG_CCR2: + case TAHVO_REG_TESTR1: + case TAHVO_REG_TESTR2: + case TAHVO_REG_NOPR: + case TAHVO_REG_FRR: + return 0x0000; + + default: + hw_error("%s: bad register %02x\n", __func__, reg); + } +} + +static inline void tahvo_write(CBusTahvo *s, int reg, uint16_t val) +{ +#ifdef DEBUG + printf("TAHVO write of %04x at %02x\n", val, reg); +#endif + + switch (reg) { + case TAHVO_REG_IDR: + s->irqst ^= val; + tahvo_interrupt_update(s); + break; + + case TAHVO_REG_IMR: + s->irqen = val; + tahvo_interrupt_update(s); + break; + + case TAHVO_REG_CHAPWMR: + s->charger = val; + break; + + case TAHVO_REG_LEDPWMR: + if (s->backlight != (val & 0x7f)) { + s->backlight = val & 0x7f; + printf("%s: LCD backlight now at %i / 127\n", + __func__, s->backlight); + } + break; + + case TAHVO_REG_USBR: + s->usbr = val; + break; + + case TAHVO_REG_RCR: + s->power = val; + break; + + case TAHVO_REG_CCR1: + case TAHVO_REG_CCR2: + case TAHVO_REG_TESTR1: + case TAHVO_REG_TESTR2: + case TAHVO_REG_NOPR: + case TAHVO_REG_FRR: + break; + + default: + hw_error("%s: bad register %02x\n", __func__, reg); + } +} + +static void tahvo_io(void *opaque, int rw, int reg, uint16_t *val) +{ + CBusTahvo *s = (CBusTahvo *) opaque; + + if (rw) + *val = tahvo_read(s, reg); + else + tahvo_write(s, reg, *val); +} + +void *tahvo_init(qemu_irq irq, int betty) +{ + CBusTahvo *s = (CBusTahvo *) g_malloc0(sizeof(*s)); + + s->irq = irq; + s->irqen = 0xffff; + s->irqst = 0x0000; + s->is_betty = !!betty; + + s->cbus.opaque = s; + s->cbus.io = tahvo_io; + s->cbus.addr = 2; + + return &s->cbus; +} diff --git a/hw/misc/debugexit.c b/hw/misc/debugexit.c new file mode 100644 index 000000000..ab6de69ce --- /dev/null +++ b/hw/misc/debugexit.c @@ -0,0 +1,84 @@ +/* + * debug exit port emulation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define TYPE_ISA_DEBUG_EXIT_DEVICE "isa-debug-exit" +OBJECT_DECLARE_SIMPLE_TYPE(ISADebugExitState, ISA_DEBUG_EXIT_DEVICE) + +struct ISADebugExitState { + ISADevice parent_obj; + + uint32_t iobase; + uint32_t iosize; + MemoryRegion io; +}; + +static uint64_t debug_exit_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void debug_exit_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + exit((val << 1) | 1); +} + +static const MemoryRegionOps debug_exit_ops = { + .read = debug_exit_read, + .write = debug_exit_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void debug_exit_realizefn(DeviceState *d, Error **errp) +{ + ISADevice *dev = ISA_DEVICE(d); + ISADebugExitState *isa = ISA_DEBUG_EXIT_DEVICE(d); + + memory_region_init_io(&isa->io, OBJECT(dev), &debug_exit_ops, isa, + TYPE_ISA_DEBUG_EXIT_DEVICE, isa->iosize); + memory_region_add_subregion(isa_address_space_io(dev), + isa->iobase, &isa->io); +} + +static Property debug_exit_properties[] = { + DEFINE_PROP_UINT32("iobase", ISADebugExitState, iobase, 0x501), + DEFINE_PROP_UINT32("iosize", ISADebugExitState, iosize, 0x02), + DEFINE_PROP_END_OF_LIST(), +}; + +static void debug_exit_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = debug_exit_realizefn; + device_class_set_props(dc, debug_exit_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo debug_exit_info = { + .name = TYPE_ISA_DEBUG_EXIT_DEVICE, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISADebugExitState), + .class_init = debug_exit_class_initfn, +}; + +static void debug_exit_register_types(void) +{ + type_register_static(&debug_exit_info); +} + +type_init(debug_exit_register_types) diff --git a/hw/misc/eccmemctl.c b/hw/misc/eccmemctl.c new file mode 100644 index 000000000..c65806e3d --- /dev/null +++ b/hw/misc/eccmemctl.c @@ -0,0 +1,357 @@ +/* + * QEMU Sparc Sun4m ECC memory controller emulation + * + * Copyright (c) 2007 Robert Reif + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" + +/* There are 3 versions of this chip used in SMP sun4m systems: + * MCC (version 0, implementation 0) SS-600MP + * EMC (version 0, implementation 1) SS-10 + * SMC (version 0, implementation 2) SS-10SX and SS-20 + * + * Chipset docs: + * "Sun-4M System Architecture (revision 2.0) by Chuck Narad", 950-1373-01, + * http://mediacast.sun.com/users/Barton808/media/Sun4M_SystemArchitecture_edited2.pdf + */ + +#define ECC_MCC 0x00000000 +#define ECC_EMC 0x10000000 +#define ECC_SMC 0x20000000 + +/* Register indexes */ +#define ECC_MER 0 /* Memory Enable Register */ +#define ECC_MDR 1 /* Memory Delay Register */ +#define ECC_MFSR 2 /* Memory Fault Status Register */ +#define ECC_VCR 3 /* Video Configuration Register */ +#define ECC_MFAR0 4 /* Memory Fault Address Register 0 */ +#define ECC_MFAR1 5 /* Memory Fault Address Register 1 */ +#define ECC_DR 6 /* Diagnostic Register */ +#define ECC_ECR0 7 /* Event Count Register 0 */ +#define ECC_ECR1 8 /* Event Count Register 1 */ + +/* ECC fault control register */ +#define ECC_MER_EE 0x00000001 /* Enable ECC checking */ +#define ECC_MER_EI 0x00000002 /* Enable Interrupts on + correctable errors */ +#define ECC_MER_MRR0 0x00000004 /* SIMM 0 */ +#define ECC_MER_MRR1 0x00000008 /* SIMM 1 */ +#define ECC_MER_MRR2 0x00000010 /* SIMM 2 */ +#define ECC_MER_MRR3 0x00000020 /* SIMM 3 */ +#define ECC_MER_MRR4 0x00000040 /* SIMM 4 */ +#define ECC_MER_MRR5 0x00000080 /* SIMM 5 */ +#define ECC_MER_MRR6 0x00000100 /* SIMM 6 */ +#define ECC_MER_MRR7 0x00000200 /* SIMM 7 */ +#define ECC_MER_REU 0x00000100 /* Memory Refresh Enable (600MP) */ +#define ECC_MER_MRR 0x000003fc /* MRR mask */ +#define ECC_MER_A 0x00000400 /* Memory controller addr map select */ +#define ECC_MER_DCI 0x00000800 /* Disables Coherent Invalidate ACK */ +#define ECC_MER_VER 0x0f000000 /* Version */ +#define ECC_MER_IMPL 0xf0000000 /* Implementation */ +#define ECC_MER_MASK_0 0x00000103 /* Version 0 (MCC) mask */ +#define ECC_MER_MASK_1 0x00000bff /* Version 1 (EMC) mask */ +#define ECC_MER_MASK_2 0x00000bff /* Version 2 (SMC) mask */ + +/* ECC memory delay register */ +#define ECC_MDR_RRI 0x000003ff /* Refresh Request Interval */ +#define ECC_MDR_MI 0x00001c00 /* MIH Delay */ +#define ECC_MDR_CI 0x0000e000 /* Coherent Invalidate Delay */ +#define ECC_MDR_MDL 0x001f0000 /* MBus Master arbitration delay */ +#define ECC_MDR_MDH 0x03e00000 /* MBus Master arbitration delay */ +#define ECC_MDR_GAD 0x7c000000 /* Graphics Arbitration Delay */ +#define ECC_MDR_RSC 0x80000000 /* Refresh load control */ +#define ECC_MDR_MASK 0x7fffffff + +/* ECC fault status register */ +#define ECC_MFSR_CE 0x00000001 /* Correctable error */ +#define ECC_MFSR_BS 0x00000002 /* C2 graphics bad slot access */ +#define ECC_MFSR_TO 0x00000004 /* Timeout on write */ +#define ECC_MFSR_UE 0x00000008 /* Uncorrectable error */ +#define ECC_MFSR_DW 0x000000f0 /* Index of double word in block */ +#define ECC_MFSR_SYND 0x0000ff00 /* Syndrome for correctable error */ +#define ECC_MFSR_ME 0x00010000 /* Multiple errors */ +#define ECC_MFSR_C2ERR 0x00020000 /* C2 graphics error */ + +/* ECC fault address register 0 */ +#define ECC_MFAR0_PADDR 0x0000000f /* PA[32-35] */ +#define ECC_MFAR0_TYPE 0x000000f0 /* Transaction type */ +#define ECC_MFAR0_SIZE 0x00000700 /* Transaction size */ +#define ECC_MFAR0_CACHE 0x00000800 /* Mapped cacheable */ +#define ECC_MFAR0_LOCK 0x00001000 /* Error occurred in atomic cycle */ +#define ECC_MFAR0_BMODE 0x00002000 /* Boot mode */ +#define ECC_MFAR0_VADDR 0x003fc000 /* VA[12-19] (superset bits) */ +#define ECC_MFAR0_S 0x08000000 /* Supervisor mode */ +#define ECC_MFARO_MID 0xf0000000 /* Module ID */ + +/* ECC diagnostic register */ +#define ECC_DR_CBX 0x00000001 +#define ECC_DR_CB0 0x00000002 +#define ECC_DR_CB1 0x00000004 +#define ECC_DR_CB2 0x00000008 +#define ECC_DR_CB4 0x00000010 +#define ECC_DR_CB8 0x00000020 +#define ECC_DR_CB16 0x00000040 +#define ECC_DR_CB32 0x00000080 +#define ECC_DR_DMODE 0x00000c00 + +#define ECC_NREGS 9 +#define ECC_SIZE (ECC_NREGS * sizeof(uint32_t)) + +#define ECC_DIAG_SIZE 4 +#define ECC_DIAG_MASK (ECC_DIAG_SIZE - 1) + +#define TYPE_ECC_MEMCTL "eccmemctl" +OBJECT_DECLARE_SIMPLE_TYPE(ECCState, ECC_MEMCTL) + +struct ECCState { + SysBusDevice parent_obj; + + MemoryRegion iomem, iomem_diag; + qemu_irq irq; + uint32_t regs[ECC_NREGS]; + uint8_t diag[ECC_DIAG_SIZE]; + uint32_t version; +}; + +static void ecc_mem_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + ECCState *s = opaque; + + switch (addr >> 2) { + case ECC_MER: + if (s->version == ECC_MCC) + s->regs[ECC_MER] = (val & ECC_MER_MASK_0); + else if (s->version == ECC_EMC) + s->regs[ECC_MER] = s->version | (val & ECC_MER_MASK_1); + else if (s->version == ECC_SMC) + s->regs[ECC_MER] = s->version | (val & ECC_MER_MASK_2); + trace_ecc_mem_writel_mer(val); + break; + case ECC_MDR: + s->regs[ECC_MDR] = val & ECC_MDR_MASK; + trace_ecc_mem_writel_mdr(val); + break; + case ECC_MFSR: + s->regs[ECC_MFSR] = val; + qemu_irq_lower(s->irq); + trace_ecc_mem_writel_mfsr(val); + break; + case ECC_VCR: + s->regs[ECC_VCR] = val; + trace_ecc_mem_writel_vcr(val); + break; + case ECC_DR: + s->regs[ECC_DR] = val; + trace_ecc_mem_writel_dr(val); + break; + case ECC_ECR0: + s->regs[ECC_ECR0] = val; + trace_ecc_mem_writel_ecr0(val); + break; + case ECC_ECR1: + s->regs[ECC_ECR0] = val; + trace_ecc_mem_writel_ecr1(val); + break; + } +} + +static uint64_t ecc_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + ECCState *s = opaque; + uint32_t ret = 0; + + switch (addr >> 2) { + case ECC_MER: + ret = s->regs[ECC_MER]; + trace_ecc_mem_readl_mer(ret); + break; + case ECC_MDR: + ret = s->regs[ECC_MDR]; + trace_ecc_mem_readl_mdr(ret); + break; + case ECC_MFSR: + ret = s->regs[ECC_MFSR]; + trace_ecc_mem_readl_mfsr(ret); + break; + case ECC_VCR: + ret = s->regs[ECC_VCR]; + trace_ecc_mem_readl_vcr(ret); + break; + case ECC_MFAR0: + ret = s->regs[ECC_MFAR0]; + trace_ecc_mem_readl_mfar0(ret); + break; + case ECC_MFAR1: + ret = s->regs[ECC_MFAR1]; + trace_ecc_mem_readl_mfar1(ret); + break; + case ECC_DR: + ret = s->regs[ECC_DR]; + trace_ecc_mem_readl_dr(ret); + break; + case ECC_ECR0: + ret = s->regs[ECC_ECR0]; + trace_ecc_mem_readl_ecr0(ret); + break; + case ECC_ECR1: + ret = s->regs[ECC_ECR0]; + trace_ecc_mem_readl_ecr1(ret); + break; + } + return ret; +} + +static const MemoryRegionOps ecc_mem_ops = { + .read = ecc_mem_read, + .write = ecc_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void ecc_diag_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + ECCState *s = opaque; + + trace_ecc_diag_mem_writeb(addr, val); + s->diag[addr & ECC_DIAG_MASK] = val; +} + +static uint64_t ecc_diag_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + ECCState *s = opaque; + uint32_t ret = s->diag[(int)addr]; + + trace_ecc_diag_mem_readb(addr, ret); + return ret; +} + +static const MemoryRegionOps ecc_diag_mem_ops = { + .read = ecc_diag_mem_read, + .write = ecc_diag_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static const VMStateDescription vmstate_ecc = { + .name ="ECC", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, ECCState, ECC_NREGS), + VMSTATE_BUFFER(diag, ECCState), + VMSTATE_UINT32(version, ECCState), + VMSTATE_END_OF_LIST() + } +}; + +static void ecc_reset(DeviceState *d) +{ + ECCState *s = ECC_MEMCTL(d); + + if (s->version == ECC_MCC) { + s->regs[ECC_MER] &= ECC_MER_REU; + } else { + s->regs[ECC_MER] &= (ECC_MER_VER | ECC_MER_IMPL | ECC_MER_MRR | + ECC_MER_DCI); + } + s->regs[ECC_MDR] = 0x20; + s->regs[ECC_MFSR] = 0; + s->regs[ECC_VCR] = 0; + s->regs[ECC_MFAR0] = 0x07c00000; + s->regs[ECC_MFAR1] = 0; + s->regs[ECC_DR] = 0; + s->regs[ECC_ECR0] = 0; + s->regs[ECC_ECR1] = 0; +} + +static void ecc_init(Object *obj) +{ + ECCState *s = ECC_MEMCTL(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(dev, &s->irq); + + memory_region_init_io(&s->iomem, obj, &ecc_mem_ops, s, "ecc", ECC_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static void ecc_realize(DeviceState *dev, Error **errp) +{ + ECCState *s = ECC_MEMCTL(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + s->regs[0] = s->version; + + if (s->version == ECC_MCC) { // SS-600MP only + memory_region_init_io(&s->iomem_diag, OBJECT(dev), &ecc_diag_mem_ops, s, + "ecc.diag", ECC_DIAG_SIZE); + sysbus_init_mmio(sbd, &s->iomem_diag); + } +} + +static Property ecc_properties[] = { + DEFINE_PROP_UINT32("version", ECCState, version, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ecc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = ecc_realize; + dc->reset = ecc_reset; + dc->vmsd = &vmstate_ecc; + device_class_set_props(dc, ecc_properties); +} + +static const TypeInfo ecc_info = { + .name = TYPE_ECC_MEMCTL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ECCState), + .instance_init = ecc_init, + .class_init = ecc_class_init, +}; + + +static void ecc_register_types(void) +{ + type_register_static(&ecc_info); +} + +type_init(ecc_register_types) diff --git a/hw/misc/edu.c b/hw/misc/edu.c new file mode 100644 index 000000000..e935c418d --- /dev/null +++ b/hw/misc/edu.c @@ -0,0 +1,442 @@ +/* + * QEMU educational PCI device + * + * Copyright (c) 2012-2015 Jiri Slaby + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/pci/pci.h" +#include "hw/hw.h" +#include "hw/pci/msi.h" +#include "qemu/timer.h" +#include "qom/object.h" +#include "qemu/main-loop.h" /* iothread mutex */ +#include "qemu/module.h" +#include "qapi/visitor.h" + +#define TYPE_PCI_EDU_DEVICE "edu" +typedef struct EduState EduState; +DECLARE_INSTANCE_CHECKER(EduState, EDU, + TYPE_PCI_EDU_DEVICE) + +#define FACT_IRQ 0x00000001 +#define DMA_IRQ 0x00000100 + +#define DMA_START 0x40000 +#define DMA_SIZE 4096 + +struct EduState { + PCIDevice pdev; + MemoryRegion mmio; + + QemuThread thread; + QemuMutex thr_mutex; + QemuCond thr_cond; + bool stopping; + + uint32_t addr4; + uint32_t fact; +#define EDU_STATUS_COMPUTING 0x01 +#define EDU_STATUS_IRQFACT 0x80 + uint32_t status; + + uint32_t irq_status; + +#define EDU_DMA_RUN 0x1 +#define EDU_DMA_DIR(cmd) (((cmd) & 0x2) >> 1) +# define EDU_DMA_FROM_PCI 0 +# define EDU_DMA_TO_PCI 1 +#define EDU_DMA_IRQ 0x4 + struct dma_state { + dma_addr_t src; + dma_addr_t dst; + dma_addr_t cnt; + dma_addr_t cmd; + } dma; + QEMUTimer dma_timer; + char dma_buf[DMA_SIZE]; + uint64_t dma_mask; +}; + +static bool edu_msi_enabled(EduState *edu) +{ + return msi_enabled(&edu->pdev); +} + +static void edu_raise_irq(EduState *edu, uint32_t val) +{ + edu->irq_status |= val; + if (edu->irq_status) { + if (edu_msi_enabled(edu)) { + msi_notify(&edu->pdev, 0); + } else { + pci_set_irq(&edu->pdev, 1); + } + } +} + +static void edu_lower_irq(EduState *edu, uint32_t val) +{ + edu->irq_status &= ~val; + + if (!edu->irq_status && !edu_msi_enabled(edu)) { + pci_set_irq(&edu->pdev, 0); + } +} + +static bool within(uint64_t addr, uint64_t start, uint64_t end) +{ + return start <= addr && addr < end; +} + +static void edu_check_range(uint64_t addr, uint64_t size1, uint64_t start, + uint64_t size2) +{ + uint64_t end1 = addr + size1; + uint64_t end2 = start + size2; + + if (within(addr, start, end2) && + end1 > addr && within(end1, start, end2)) { + return; + } + + hw_error("EDU: DMA range 0x%016"PRIx64"-0x%016"PRIx64 + " out of bounds (0x%016"PRIx64"-0x%016"PRIx64")!", + addr, end1 - 1, start, end2 - 1); +} + +static dma_addr_t edu_clamp_addr(const EduState *edu, dma_addr_t addr) +{ + dma_addr_t res = addr & edu->dma_mask; + + if (addr != res) { + printf("EDU: clamping DMA %#.16"PRIx64" to %#.16"PRIx64"!\n", addr, res); + } + + return res; +} + +static void edu_dma_timer(void *opaque) +{ + EduState *edu = opaque; + bool raise_irq = false; + + if (!(edu->dma.cmd & EDU_DMA_RUN)) { + return; + } + + if (EDU_DMA_DIR(edu->dma.cmd) == EDU_DMA_FROM_PCI) { + uint64_t dst = edu->dma.dst; + edu_check_range(dst, edu->dma.cnt, DMA_START, DMA_SIZE); + dst -= DMA_START; + pci_dma_read(&edu->pdev, edu_clamp_addr(edu, edu->dma.src), + edu->dma_buf + dst, edu->dma.cnt); + } else { + uint64_t src = edu->dma.src; + edu_check_range(src, edu->dma.cnt, DMA_START, DMA_SIZE); + src -= DMA_START; + pci_dma_write(&edu->pdev, edu_clamp_addr(edu, edu->dma.dst), + edu->dma_buf + src, edu->dma.cnt); + } + + edu->dma.cmd &= ~EDU_DMA_RUN; + if (edu->dma.cmd & EDU_DMA_IRQ) { + raise_irq = true; + } + + if (raise_irq) { + edu_raise_irq(edu, DMA_IRQ); + } +} + +static void dma_rw(EduState *edu, bool write, dma_addr_t *val, dma_addr_t *dma, + bool timer) +{ + if (write && (edu->dma.cmd & EDU_DMA_RUN)) { + return; + } + + if (write) { + *dma = *val; + } else { + *val = *dma; + } + + if (timer) { + timer_mod(&edu->dma_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 100); + } +} + +static uint64_t edu_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + EduState *edu = opaque; + uint64_t val = ~0ULL; + + if (addr < 0x80 && size != 4) { + return val; + } + + if (addr >= 0x80 && size != 4 && size != 8) { + return val; + } + + switch (addr) { + case 0x00: + val = 0x010000edu; + break; + case 0x04: + val = edu->addr4; + break; + case 0x08: + qemu_mutex_lock(&edu->thr_mutex); + val = edu->fact; + qemu_mutex_unlock(&edu->thr_mutex); + break; + case 0x20: + val = qatomic_read(&edu->status); + break; + case 0x24: + val = edu->irq_status; + break; + case 0x80: + dma_rw(edu, false, &val, &edu->dma.src, false); + break; + case 0x88: + dma_rw(edu, false, &val, &edu->dma.dst, false); + break; + case 0x90: + dma_rw(edu, false, &val, &edu->dma.cnt, false); + break; + case 0x98: + dma_rw(edu, false, &val, &edu->dma.cmd, false); + break; + } + + return val; +} + +static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + EduState *edu = opaque; + + if (addr < 0x80 && size != 4) { + return; + } + + if (addr >= 0x80 && size != 4 && size != 8) { + return; + } + + switch (addr) { + case 0x04: + edu->addr4 = ~val; + break; + case 0x08: + if (qatomic_read(&edu->status) & EDU_STATUS_COMPUTING) { + break; + } + /* EDU_STATUS_COMPUTING cannot go 0->1 concurrently, because it is only + * set in this function and it is under the iothread mutex. + */ + qemu_mutex_lock(&edu->thr_mutex); + edu->fact = val; + qatomic_or(&edu->status, EDU_STATUS_COMPUTING); + qemu_cond_signal(&edu->thr_cond); + qemu_mutex_unlock(&edu->thr_mutex); + break; + case 0x20: + if (val & EDU_STATUS_IRQFACT) { + qatomic_or(&edu->status, EDU_STATUS_IRQFACT); + } else { + qatomic_and(&edu->status, ~EDU_STATUS_IRQFACT); + } + break; + case 0x60: + edu_raise_irq(edu, val); + break; + case 0x64: + edu_lower_irq(edu, val); + break; + case 0x80: + dma_rw(edu, true, &val, &edu->dma.src, false); + break; + case 0x88: + dma_rw(edu, true, &val, &edu->dma.dst, false); + break; + case 0x90: + dma_rw(edu, true, &val, &edu->dma.cnt, false); + break; + case 0x98: + if (!(val & EDU_DMA_RUN)) { + break; + } + dma_rw(edu, true, &val, &edu->dma.cmd, true); + break; + } +} + +static const MemoryRegionOps edu_mmio_ops = { + .read = edu_mmio_read, + .write = edu_mmio_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + }, + +}; + +/* + * We purposely use a thread, so that users are forced to wait for the status + * register. + */ +static void *edu_fact_thread(void *opaque) +{ + EduState *edu = opaque; + + while (1) { + uint32_t val, ret = 1; + + qemu_mutex_lock(&edu->thr_mutex); + while ((qatomic_read(&edu->status) & EDU_STATUS_COMPUTING) == 0 && + !edu->stopping) { + qemu_cond_wait(&edu->thr_cond, &edu->thr_mutex); + } + + if (edu->stopping) { + qemu_mutex_unlock(&edu->thr_mutex); + break; + } + + val = edu->fact; + qemu_mutex_unlock(&edu->thr_mutex); + + while (val > 0) { + ret *= val--; + } + + /* + * We should sleep for a random period here, so that students are + * forced to check the status properly. + */ + + qemu_mutex_lock(&edu->thr_mutex); + edu->fact = ret; + qemu_mutex_unlock(&edu->thr_mutex); + qatomic_and(&edu->status, ~EDU_STATUS_COMPUTING); + + if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) { + qemu_mutex_lock_iothread(); + edu_raise_irq(edu, FACT_IRQ); + qemu_mutex_unlock_iothread(); + } + } + + return NULL; +} + +static void pci_edu_realize(PCIDevice *pdev, Error **errp) +{ + EduState *edu = EDU(pdev); + uint8_t *pci_conf = pdev->config; + + pci_config_set_interrupt_pin(pci_conf, 1); + + if (msi_init(pdev, 0, 1, true, false, errp)) { + return; + } + + timer_init_ms(&edu->dma_timer, QEMU_CLOCK_VIRTUAL, edu_dma_timer, edu); + + qemu_mutex_init(&edu->thr_mutex); + qemu_cond_init(&edu->thr_cond); + qemu_thread_create(&edu->thread, "edu", edu_fact_thread, + edu, QEMU_THREAD_JOINABLE); + + memory_region_init_io(&edu->mmio, OBJECT(edu), &edu_mmio_ops, edu, + "edu-mmio", 1 * MiB); + pci_register_bar(pdev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &edu->mmio); +} + +static void pci_edu_uninit(PCIDevice *pdev) +{ + EduState *edu = EDU(pdev); + + qemu_mutex_lock(&edu->thr_mutex); + edu->stopping = true; + qemu_mutex_unlock(&edu->thr_mutex); + qemu_cond_signal(&edu->thr_cond); + qemu_thread_join(&edu->thread); + + qemu_cond_destroy(&edu->thr_cond); + qemu_mutex_destroy(&edu->thr_mutex); + + timer_del(&edu->dma_timer); + msi_uninit(pdev); +} + +static void edu_instance_init(Object *obj) +{ + EduState *edu = EDU(obj); + + edu->dma_mask = (1UL << 28) - 1; + object_property_add_uint64_ptr(obj, "dma_mask", + &edu->dma_mask, OBJ_PROP_FLAG_READWRITE); +} + +static void edu_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(class); + PCIDeviceClass *k = PCI_DEVICE_CLASS(class); + + k->realize = pci_edu_realize; + k->exit = pci_edu_uninit; + k->vendor_id = PCI_VENDOR_ID_QEMU; + k->device_id = 0x11e8; + k->revision = 0x10; + k->class_id = PCI_CLASS_OTHERS; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static void pci_edu_register_types(void) +{ + static InterfaceInfo interfaces[] = { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }; + static const TypeInfo edu_info = { + .name = TYPE_PCI_EDU_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(EduState), + .instance_init = edu_instance_init, + .class_init = edu_class_init, + .interfaces = interfaces, + }; + + type_register_static(&edu_info); +} +type_init(pci_edu_register_types) diff --git a/hw/misc/empty_slot.c b/hw/misc/empty_slot.c new file mode 100644 index 000000000..37b0ddfb0 --- /dev/null +++ b/hw/misc/empty_slot.c @@ -0,0 +1,109 @@ +/* + * QEMU Empty Slot + * + * The empty_slot device emulates known to a bus but not connected devices. + * + * Copyright (c) 2010 Artyom Tarasenko + * + * This code is licensed under the GNU GPL v2 or (at your option) any later + * version. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/qdev-properties.h" +#include "hw/misc/empty_slot.h" +#include "qapi/error.h" +#include "trace.h" +#include "qom/object.h" + +#define TYPE_EMPTY_SLOT "empty_slot" +OBJECT_DECLARE_SIMPLE_TYPE(EmptySlot, EMPTY_SLOT) + +struct EmptySlot { + SysBusDevice parent_obj; + + MemoryRegion iomem; + char *name; + uint64_t size; +}; + +static uint64_t empty_slot_read(void *opaque, hwaddr addr, + unsigned size) +{ + EmptySlot *s = EMPTY_SLOT(opaque); + + trace_empty_slot_write(addr, size << 1, 0, size, s->name); + + return 0; +} + +static void empty_slot_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + EmptySlot *s = EMPTY_SLOT(opaque); + + trace_empty_slot_write(addr, size << 1, val, size, s->name); +} + +static const MemoryRegionOps empty_slot_ops = { + .read = empty_slot_read, + .write = empty_slot_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +void empty_slot_init(const char *name, hwaddr addr, uint64_t slot_size) +{ + if (slot_size > 0) { + /* Only empty slots larger than 0 byte need handling. */ + DeviceState *dev; + + dev = qdev_new(TYPE_EMPTY_SLOT); + + qdev_prop_set_uint64(dev, "size", slot_size); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(dev), 0, addr, -10000); + } +} + +static void empty_slot_realize(DeviceState *dev, Error **errp) +{ + EmptySlot *s = EMPTY_SLOT(dev); + + if (s->name == NULL) { + s->name = g_strdup("empty-slot"); + } + memory_region_init_io(&s->iomem, OBJECT(s), &empty_slot_ops, s, + s->name, s->size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); +} + +static Property empty_slot_properties[] = { + DEFINE_PROP_UINT64("size", EmptySlot, size, 0), + DEFINE_PROP_STRING("name", EmptySlot, name), + DEFINE_PROP_END_OF_LIST(), +}; + +static void empty_slot_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = empty_slot_realize; + device_class_set_props(dc, empty_slot_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo empty_slot_info = { + .name = TYPE_EMPTY_SLOT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(EmptySlot), + .class_init = empty_slot_class_init, +}; + +static void empty_slot_register_types(void) +{ + type_register_static(&empty_slot_info); +} + +type_init(empty_slot_register_types) diff --git a/hw/misc/exynos4210_clk.c b/hw/misc/exynos4210_clk.c new file mode 100644 index 000000000..58cec282f --- /dev/null +++ b/hw/misc/exynos4210_clk.c @@ -0,0 +1,166 @@ +/* + * Exynos4210 Clock Controller Emulation + * + * Copyright (c) 2017 Krzysztof Kozlowski + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define TYPE_EXYNOS4210_CLK "exynos4210.clk" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210ClkState, EXYNOS4210_CLK) + +#define CLK_PLL_LOCKED BIT(29) + +#define EXYNOS4210_CLK_REGS_MEM_SIZE 0x15104 + +typedef struct Exynos4210Reg { + const char *name; /* for debug only */ + uint32_t offset; + uint32_t reset_value; +} Exynos4210Reg; + +/* Clock controller register base: 0x10030000 */ +static const Exynos4210Reg exynos4210_clk_regs[] = { + {"EPLL_LOCK", 0xc010, 0x00000fff}, + {"VPLL_LOCK", 0xc020, 0x00000fff}, + {"EPLL_CON0", 0xc110, 0x00300301 | CLK_PLL_LOCKED}, + {"EPLL_CON1", 0xc114, 0x00000000}, + {"VPLL_CON0", 0xc120, 0x00240201 | CLK_PLL_LOCKED}, + {"VPLL_CON1", 0xc124, 0x66010464}, + {"APLL_LOCK", 0x14000, 0x00000fff}, + {"MPLL_LOCK", 0x14004, 0x00000fff}, + {"APLL_CON0", 0x14100, 0x00c80601 | CLK_PLL_LOCKED}, + {"APLL_CON1", 0x14104, 0x0000001c}, + {"MPLL_CON0", 0x14108, 0x00c80601 | CLK_PLL_LOCKED}, + {"MPLL_CON1", 0x1410c, 0x0000001c}, +}; + +#define EXYNOS4210_REGS_NUM ARRAY_SIZE(exynos4210_clk_regs) + +struct Exynos4210ClkState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t reg[EXYNOS4210_REGS_NUM]; +}; + +static uint64_t exynos4210_clk_read(void *opaque, hwaddr offset, + unsigned size) +{ + const Exynos4210ClkState *s = (Exynos4210ClkState *)opaque; + const Exynos4210Reg *regs = exynos4210_clk_regs; + unsigned int i; + + for (i = 0; i < EXYNOS4210_REGS_NUM; i++) { + if (regs->offset == offset) { + return s->reg[i]; + } + regs++; + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad read offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; +} + +static void exynos4210_clk_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + Exynos4210ClkState *s = (Exynos4210ClkState *)opaque; + const Exynos4210Reg *regs = exynos4210_clk_regs; + unsigned int i; + + for (i = 0; i < EXYNOS4210_REGS_NUM; i++) { + if (regs->offset == offset) { + s->reg[i] = val; + return; + } + regs++; + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write offset 0x%04x\n", + __func__, (uint32_t)offset); +} + +static const MemoryRegionOps exynos4210_clk_ops = { + .read = exynos4210_clk_read, + .write = exynos4210_clk_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false + } +}; + +static void exynos4210_clk_reset(DeviceState *dev) +{ + Exynos4210ClkState *s = EXYNOS4210_CLK(dev); + unsigned int i; + + /* Set default values for registers */ + for (i = 0; i < EXYNOS4210_REGS_NUM; i++) { + s->reg[i] = exynos4210_clk_regs[i].reset_value; + } +} + +static void exynos4210_clk_init(Object *obj) +{ + Exynos4210ClkState *s = EXYNOS4210_CLK(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + /* memory mapping */ + memory_region_init_io(&s->iomem, obj, &exynos4210_clk_ops, s, + TYPE_EXYNOS4210_CLK, EXYNOS4210_CLK_REGS_MEM_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static const VMStateDescription exynos4210_clk_vmstate = { + .name = TYPE_EXYNOS4210_CLK, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, Exynos4210ClkState, EXYNOS4210_REGS_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static void exynos4210_clk_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = exynos4210_clk_reset; + dc->vmsd = &exynos4210_clk_vmstate; +} + +static const TypeInfo exynos4210_clk_info = { + .name = TYPE_EXYNOS4210_CLK, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210ClkState), + .instance_init = exynos4210_clk_init, + .class_init = exynos4210_clk_class_init, +}; + +static void exynos4210_clk_register(void) +{ + qemu_log_mask(LOG_GUEST_ERROR, "Clock init\n"); + type_register_static(&exynos4210_clk_info); +} + +type_init(exynos4210_clk_register) diff --git a/hw/misc/exynos4210_pmu.c b/hw/misc/exynos4210_pmu.c new file mode 100644 index 000000000..e24139c63 --- /dev/null +++ b/hw/misc/exynos4210_pmu.c @@ -0,0 +1,522 @@ +/* + * Exynos4210 Power Management Unit (PMU) Emulation + * + * Copyright (C) 2011 Samsung Electronics Co Ltd. + * Maksim Kozlov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +/* + * This model implements PMU registers just as a bulk of memory. Currently, + * the only reason this device exists is that secondary CPU boot loader + * uses PMU INFORM5 register as a holding pen. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" +#include "qom/object.h" + +#ifndef DEBUG_PMU +#define DEBUG_PMU 0 +#endif + +#ifndef DEBUG_PMU_EXTEND +#define DEBUG_PMU_EXTEND 0 +#endif + +#if DEBUG_PMU +#define PRINT_DEBUG(fmt, args...) \ + do { \ + fprintf(stderr, " [%s:%d] "fmt, __func__, __LINE__, ##args); \ + } while (0) + +#if DEBUG_PMU_EXTEND +#define PRINT_DEBUG_EXTEND(fmt, args...) \ + do { \ + fprintf(stderr, " [%s:%d] "fmt, __func__, __LINE__, ##args); \ + } while (0) +#else +#define PRINT_DEBUG_EXTEND(fmt, args...) do {} while (0) +#endif /* EXTEND */ + +#else +#define PRINT_DEBUG(fmt, args...) do {} while (0) +#define PRINT_DEBUG_EXTEND(fmt, args...) do {} while (0) +#endif + +/* + * Offsets for PMU registers + */ +#define OM_STAT 0x0000 /* OM status register */ +#define RTC_CLKO_SEL 0x000C /* Controls RTCCLKOUT */ +#define GNSS_RTC_OUT_CTRL 0x0010 /* Controls GNSS_RTC_OUT */ +/* Decides whether system-level low-power mode is used. */ +#define SYSTEM_POWER_DOWN_CTRL 0x0200 +/* Sets control options for CENTRAL_SEQ */ +#define SYSTEM_POWER_DOWN_OPTION 0x0208 +#define SWRESET 0x0400 /* Generate software reset */ +#define RST_STAT 0x0404 /* Reset status register */ +#define WAKEUP_STAT 0x0600 /* Wakeup status register */ +#define EINT_WAKEUP_MASK 0x0604 /* Configure External INTerrupt mask */ +#define WAKEUP_MASK 0x0608 /* Configure wakeup source mask */ +#define HDMI_PHY_CONTROL 0x0700 /* HDMI PHY control register */ +#define USBDEVICE_PHY_CONTROL 0x0704 /* USB Device PHY control register */ +#define USBHOST_PHY_CONTROL 0x0708 /* USB HOST PHY control register */ +#define DAC_PHY_CONTROL 0x070C /* DAC control register */ +#define MIPI_PHY0_CONTROL 0x0710 /* MIPI PHY control register */ +#define MIPI_PHY1_CONTROL 0x0714 /* MIPI PHY control register */ +#define ADC_PHY_CONTROL 0x0718 /* TS-ADC control register */ +#define PCIe_PHY_CONTROL 0x071C /* TS-PCIe control register */ +#define SATA_PHY_CONTROL 0x0720 /* TS-SATA control register */ +#define INFORM0 0x0800 /* Information register 0 */ +#define INFORM1 0x0804 /* Information register 1 */ +#define INFORM2 0x0808 /* Information register 2 */ +#define INFORM3 0x080C /* Information register 3 */ +#define INFORM4 0x0810 /* Information register 4 */ +#define INFORM5 0x0814 /* Information register 5 */ +#define INFORM6 0x0818 /* Information register 6 */ +#define INFORM7 0x081C /* Information register 7 */ +#define PMU_DEBUG 0x0A00 /* PMU debug register */ +/* Registers to set system-level low-power option */ +#define ARM_CORE0_SYS_PWR_REG 0x1000 +#define ARM_CORE1_SYS_PWR_REG 0x1010 +#define ARM_COMMON_SYS_PWR_REG 0x1080 +#define ARM_CPU_L2_0_SYS_PWR_REG 0x10C0 +#define ARM_CPU_L2_1_SYS_PWR_REG 0x10C4 +#define CMU_ACLKSTOP_SYS_PWR_REG 0x1100 +#define CMU_SCLKSTOP_SYS_PWR_REG 0x1104 +#define CMU_RESET_SYS_PWR_REG 0x110C +#define APLL_SYSCLK_SYS_PWR_REG 0x1120 +#define MPLL_SYSCLK_SYS_PWR_REG 0x1124 +#define VPLL_SYSCLK_SYS_PWR_REG 0x1128 +#define EPLL_SYSCLK_SYS_PWR_REG 0x112C +#define CMU_CLKSTOP_GPS_ALIVE_SYS_PWR_REG 0x1138 +#define CMU_RESET_GPS_ALIVE_SYS_PWR_REG 0x113C +#define CMU_CLKSTOP_CAM_SYS_PWR_REG 0x1140 +#define CMU_CLKSTOP_TV_SYS_PWR_REG 0x1144 +#define CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1148 +#define CMU_CLKSTOP_G3D_SYS_PWR_REG 0x114C +#define CMU_CLKSTOP_LCD0_SYS_PWR_REG 0x1150 +#define CMU_CLKSTOP_LCD1_SYS_PWR_REG 0x1154 +#define CMU_CLKSTOP_MAUDIO_SYS_PWR_REG 0x1158 +#define CMU_CLKSTOP_GPS_SYS_PWR_REG 0x115C +#define CMU_RESET_CAM_SYS_PWR_REG 0x1160 +#define CMU_RESET_TV_SYS_PWR_REG 0x1164 +#define CMU_RESET_MFC_SYS_PWR_REG 0x1168 +#define CMU_RESET_G3D_SYS_PWR_REG 0x116C +#define CMU_RESET_LCD0_SYS_PWR_REG 0x1170 +#define CMU_RESET_LCD1_SYS_PWR_REG 0x1174 +#define CMU_RESET_MAUDIO_SYS_PWR_REG 0x1178 +#define CMU_RESET_GPS_SYS_PWR_REG 0x117C +#define TOP_BUS_SYS_PWR_REG 0x1180 +#define TOP_RETENTION_SYS_PWR_REG 0x1184 +#define TOP_PWR_SYS_PWR_REG 0x1188 +#define LOGIC_RESET_SYS_PWR_REG 0x11A0 +#define OneNANDXL_MEM_SYS_PWR_REG 0x11C0 +#define MODEMIF_MEM_SYS_PWR_REG 0x11C4 +#define USBDEVICE_MEM_SYS_PWR_REG 0x11CC +#define SDMMC_MEM_SYS_PWR_REG 0x11D0 +#define CSSYS_MEM_SYS_PWR_REG 0x11D4 +#define SECSS_MEM_SYS_PWR_REG 0x11D8 +#define PCIe_MEM_SYS_PWR_REG 0x11E0 +#define SATA_MEM_SYS_PWR_REG 0x11E4 +#define PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200 +#define PAD_RETENTION_MAUDIO_SYS_PWR_REG 0x1204 +#define PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220 +#define PAD_RETENTION_UART_SYS_PWR_REG 0x1224 +#define PAD_RETENTION_MMCA_SYS_PWR_REG 0x1228 +#define PAD_RETENTION_MMCB_SYS_PWR_REG 0x122C +#define PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230 +#define PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234 +#define PAD_ISOLATION_SYS_PWR_REG 0x1240 +#define PAD_ALV_SEL_SYS_PWR_REG 0x1260 +#define XUSBXTI_SYS_PWR_REG 0x1280 +#define XXTI_SYS_PWR_REG 0x1284 +#define EXT_REGULATOR_SYS_PWR_REG 0x12C0 +#define GPIO_MODE_SYS_PWR_REG 0x1300 +#define GPIO_MODE_MAUDIO_SYS_PWR_REG 0x1340 +#define CAM_SYS_PWR_REG 0x1380 +#define TV_SYS_PWR_REG 0x1384 +#define MFC_SYS_PWR_REG 0x1388 +#define G3D_SYS_PWR_REG 0x138C +#define LCD0_SYS_PWR_REG 0x1390 +#define LCD1_SYS_PWR_REG 0x1394 +#define MAUDIO_SYS_PWR_REG 0x1398 +#define GPS_SYS_PWR_REG 0x139C +#define GPS_ALIVE_SYS_PWR_REG 0x13A0 +#define ARM_CORE0_CONFIGURATION 0x2000 /* Configure power mode of ARM_CORE0 */ +#define ARM_CORE0_STATUS 0x2004 /* Check power mode of ARM_CORE0 */ +#define ARM_CORE0_OPTION 0x2008 /* Sets control options for ARM_CORE0 */ +#define ARM_CORE1_CONFIGURATION 0x2080 /* Configure power mode of ARM_CORE1 */ +#define ARM_CORE1_STATUS 0x2084 /* Check power mode of ARM_CORE1 */ +#define ARM_CORE1_OPTION 0x2088 /* Sets control options for ARM_CORE0 */ +#define ARM_COMMON_OPTION 0x2408 /* Sets control options for ARM_COMMON */ +/* Configure power mode of ARM_CPU_L2_0 */ +#define ARM_CPU_L2_0_CONFIGURATION 0x2600 +#define ARM_CPU_L2_0_STATUS 0x2604 /* Check power mode of ARM_CPU_L2_0 */ +/* Configure power mode of ARM_CPU_L2_1 */ +#define ARM_CPU_L2_1_CONFIGURATION 0x2620 +#define ARM_CPU_L2_1_STATUS 0x2624 /* Check power mode of ARM_CPU_L2_1 */ +/* Sets control options for PAD_RETENTION_MAUDIO */ +#define PAD_RETENTION_MAUDIO_OPTION 0x3028 +/* Sets control options for PAD_RETENTION_GPIO */ +#define PAD_RETENTION_GPIO_OPTION 0x3108 +/* Sets control options for PAD_RETENTION_UART */ +#define PAD_RETENTION_UART_OPTION 0x3128 +/* Sets control options for PAD_RETENTION_MMCA */ +#define PAD_RETENTION_MMCA_OPTION 0x3148 +/* Sets control options for PAD_RETENTION_MMCB */ +#define PAD_RETENTION_MMCB_OPTION 0x3168 +/* Sets control options for PAD_RETENTION_EBIA */ +#define PAD_RETENTION_EBIA_OPTION 0x3188 +/* Sets control options for PAD_RETENTION_EBIB */ +#define PAD_RETENTION_EBIB_OPTION 0x31A8 +#define PS_HOLD_CONTROL 0x330C /* PS_HOLD control register */ +#define XUSBXTI_CONFIGURATION 0x3400 /* Configure the pad of XUSBXTI */ +#define XUSBXTI_STATUS 0x3404 /* Check the pad of XUSBXTI */ +/* Sets time required for XUSBXTI to be stabilized */ +#define XUSBXTI_DURATION 0x341C +#define XXTI_CONFIGURATION 0x3420 /* Configure the pad of XXTI */ +#define XXTI_STATUS 0x3424 /* Check the pad of XXTI */ +/* Sets time required for XXTI to be stabilized */ +#define XXTI_DURATION 0x343C +/* Sets time required for EXT_REGULATOR to be stabilized */ +#define EXT_REGULATOR_DURATION 0x361C +#define CAM_CONFIGURATION 0x3C00 /* Configure power mode of CAM */ +#define CAM_STATUS 0x3C04 /* Check power mode of CAM */ +#define CAM_OPTION 0x3C08 /* Sets control options for CAM */ +#define TV_CONFIGURATION 0x3C20 /* Configure power mode of TV */ +#define TV_STATUS 0x3C24 /* Check power mode of TV */ +#define TV_OPTION 0x3C28 /* Sets control options for TV */ +#define MFC_CONFIGURATION 0x3C40 /* Configure power mode of MFC */ +#define MFC_STATUS 0x3C44 /* Check power mode of MFC */ +#define MFC_OPTION 0x3C48 /* Sets control options for MFC */ +#define G3D_CONFIGURATION 0x3C60 /* Configure power mode of G3D */ +#define G3D_STATUS 0x3C64 /* Check power mode of G3D */ +#define G3D_OPTION 0x3C68 /* Sets control options for G3D */ +#define LCD0_CONFIGURATION 0x3C80 /* Configure power mode of LCD0 */ +#define LCD0_STATUS 0x3C84 /* Check power mode of LCD0 */ +#define LCD0_OPTION 0x3C88 /* Sets control options for LCD0 */ +#define LCD1_CONFIGURATION 0x3CA0 /* Configure power mode of LCD1 */ +#define LCD1_STATUS 0x3CA4 /* Check power mode of LCD1 */ +#define LCD1_OPTION 0x3CA8 /* Sets control options for LCD1 */ +#define GPS_CONFIGURATION 0x3CE0 /* Configure power mode of GPS */ +#define GPS_STATUS 0x3CE4 /* Check power mode of GPS */ +#define GPS_OPTION 0x3CE8 /* Sets control options for GPS */ +#define GPS_ALIVE_CONFIGURATION 0x3D00 /* Configure power mode of GPS */ +#define GPS_ALIVE_STATUS 0x3D04 /* Check power mode of GPS */ +#define GPS_ALIVE_OPTION 0x3D08 /* Sets control options for GPS */ + +#define EXYNOS4210_PMU_REGS_MEM_SIZE 0x3d0c + +typedef struct Exynos4210PmuReg { + const char *name; /* for debug only */ + uint32_t offset; + uint32_t reset_value; +} Exynos4210PmuReg; + +static const Exynos4210PmuReg exynos4210_pmu_regs[] = { + {"OM_STAT", OM_STAT, 0x00000000}, + {"RTC_CLKO_SEL", RTC_CLKO_SEL, 0x00000000}, + {"GNSS_RTC_OUT_CTRL", GNSS_RTC_OUT_CTRL, 0x00000001}, + {"SYSTEM_POWER_DOWN_CTRL", SYSTEM_POWER_DOWN_CTRL, 0x00010000}, + {"SYSTEM_POWER_DOWN_OPTION", SYSTEM_POWER_DOWN_OPTION, 0x03030000}, + {"SWRESET", SWRESET, 0x00000000}, + {"RST_STAT", RST_STAT, 0x00000000}, + {"WAKEUP_STAT", WAKEUP_STAT, 0x00000000}, + {"EINT_WAKEUP_MASK", EINT_WAKEUP_MASK, 0x00000000}, + {"WAKEUP_MASK", WAKEUP_MASK, 0x00000000}, + {"HDMI_PHY_CONTROL", HDMI_PHY_CONTROL, 0x00960000}, + {"USBDEVICE_PHY_CONTROL", USBDEVICE_PHY_CONTROL, 0x00000000}, + {"USBHOST_PHY_CONTROL", USBHOST_PHY_CONTROL, 0x00000000}, + {"DAC_PHY_CONTROL", DAC_PHY_CONTROL, 0x00000000}, + {"MIPI_PHY0_CONTROL", MIPI_PHY0_CONTROL, 0x00000000}, + {"MIPI_PHY1_CONTROL", MIPI_PHY1_CONTROL, 0x00000000}, + {"ADC_PHY_CONTROL", ADC_PHY_CONTROL, 0x00000001}, + {"PCIe_PHY_CONTROL", PCIe_PHY_CONTROL, 0x00000000}, + {"SATA_PHY_CONTROL", SATA_PHY_CONTROL, 0x00000000}, + {"INFORM0", INFORM0, 0x00000000}, + {"INFORM1", INFORM1, 0x00000000}, + {"INFORM2", INFORM2, 0x00000000}, + {"INFORM3", INFORM3, 0x00000000}, + {"INFORM4", INFORM4, 0x00000000}, + {"INFORM5", INFORM5, 0x00000000}, + {"INFORM6", INFORM6, 0x00000000}, + {"INFORM7", INFORM7, 0x00000000}, + {"PMU_DEBUG", PMU_DEBUG, 0x00000000}, + {"ARM_CORE0_SYS_PWR_REG", ARM_CORE0_SYS_PWR_REG, 0xFFFFFFFF}, + {"ARM_CORE1_SYS_PWR_REG", ARM_CORE1_SYS_PWR_REG, 0xFFFFFFFF}, + {"ARM_COMMON_SYS_PWR_REG", ARM_COMMON_SYS_PWR_REG, 0xFFFFFFFF}, + {"ARM_CPU_L2_0_SYS_PWR_REG", ARM_CPU_L2_0_SYS_PWR_REG, 0xFFFFFFFF}, + {"ARM_CPU_L2_1_SYS_PWR_REG", ARM_CPU_L2_1_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_ACLKSTOP_SYS_PWR_REG", CMU_ACLKSTOP_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_SCLKSTOP_SYS_PWR_REG", CMU_SCLKSTOP_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_RESET_SYS_PWR_REG", CMU_RESET_SYS_PWR_REG, 0xFFFFFFFF}, + {"APLL_SYSCLK_SYS_PWR_REG", APLL_SYSCLK_SYS_PWR_REG, 0xFFFFFFFF}, + {"MPLL_SYSCLK_SYS_PWR_REG", MPLL_SYSCLK_SYS_PWR_REG, 0xFFFFFFFF}, + {"VPLL_SYSCLK_SYS_PWR_REG", VPLL_SYSCLK_SYS_PWR_REG, 0xFFFFFFFF}, + {"EPLL_SYSCLK_SYS_PWR_REG", EPLL_SYSCLK_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_CLKSTOP_GPS_ALIVE_SYS_PWR_REG", CMU_CLKSTOP_GPS_ALIVE_SYS_PWR_REG, + 0xFFFFFFFF}, + {"CMU_RESET_GPS_ALIVE_SYS_PWR_REG", CMU_RESET_GPS_ALIVE_SYS_PWR_REG, + 0xFFFFFFFF}, + {"CMU_CLKSTOP_CAM_SYS_PWR_REG", CMU_CLKSTOP_CAM_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_CLKSTOP_TV_SYS_PWR_REG", CMU_CLKSTOP_TV_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_CLKSTOP_MFC_SYS_PWR_REG", CMU_CLKSTOP_MFC_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_CLKSTOP_G3D_SYS_PWR_REG", CMU_CLKSTOP_G3D_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_CLKSTOP_LCD0_SYS_PWR_REG", CMU_CLKSTOP_LCD0_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_CLKSTOP_LCD1_SYS_PWR_REG", CMU_CLKSTOP_LCD1_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_CLKSTOP_MAUDIO_SYS_PWR_REG", CMU_CLKSTOP_MAUDIO_SYS_PWR_REG, + 0xFFFFFFFF}, + {"CMU_CLKSTOP_GPS_SYS_PWR_REG", CMU_CLKSTOP_GPS_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_RESET_CAM_SYS_PWR_REG", CMU_RESET_CAM_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_RESET_TV_SYS_PWR_REG", CMU_RESET_TV_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_RESET_MFC_SYS_PWR_REG", CMU_RESET_MFC_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_RESET_G3D_SYS_PWR_REG", CMU_RESET_G3D_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_RESET_LCD0_SYS_PWR_REG", CMU_RESET_LCD0_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_RESET_LCD1_SYS_PWR_REG", CMU_RESET_LCD1_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_RESET_MAUDIO_SYS_PWR_REG", CMU_RESET_MAUDIO_SYS_PWR_REG, 0xFFFFFFFF}, + {"CMU_RESET_GPS_SYS_PWR_REG", CMU_RESET_GPS_SYS_PWR_REG, 0xFFFFFFFF}, + {"TOP_BUS_SYS_PWR_REG", TOP_BUS_SYS_PWR_REG, 0xFFFFFFFF}, + {"TOP_RETENTION_SYS_PWR_REG", TOP_RETENTION_SYS_PWR_REG, 0xFFFFFFFF}, + {"TOP_PWR_SYS_PWR_REG", TOP_PWR_SYS_PWR_REG, 0xFFFFFFFF}, + {"LOGIC_RESET_SYS_PWR_REG", LOGIC_RESET_SYS_PWR_REG, 0xFFFFFFFF}, + {"OneNANDXL_MEM_SYS_PWR_REG", OneNANDXL_MEM_SYS_PWR_REG, 0xFFFFFFFF}, + {"MODEMIF_MEM_SYS_PWR_REG", MODEMIF_MEM_SYS_PWR_REG, 0xFFFFFFFF}, + {"USBDEVICE_MEM_SYS_PWR_REG", USBDEVICE_MEM_SYS_PWR_REG, 0xFFFFFFFF}, + {"SDMMC_MEM_SYS_PWR_REG", SDMMC_MEM_SYS_PWR_REG, 0xFFFFFFFF}, + {"CSSYS_MEM_SYS_PWR_REG", CSSYS_MEM_SYS_PWR_REG, 0xFFFFFFFF}, + {"SECSS_MEM_SYS_PWR_REG", SECSS_MEM_SYS_PWR_REG, 0xFFFFFFFF}, + {"PCIe_MEM_SYS_PWR_REG", PCIe_MEM_SYS_PWR_REG, 0xFFFFFFFF}, + {"SATA_MEM_SYS_PWR_REG", SATA_MEM_SYS_PWR_REG, 0xFFFFFFFF}, + {"PAD_RETENTION_DRAM_SYS_PWR_REG", PAD_RETENTION_DRAM_SYS_PWR_REG, + 0xFFFFFFFF}, + {"PAD_RETENTION_MAUDIO_SYS_PWR_REG", PAD_RETENTION_MAUDIO_SYS_PWR_REG, + 0xFFFFFFFF}, + {"PAD_RETENTION_GPIO_SYS_PWR_REG", PAD_RETENTION_GPIO_SYS_PWR_REG, + 0xFFFFFFFF}, + {"PAD_RETENTION_UART_SYS_PWR_REG", PAD_RETENTION_UART_SYS_PWR_REG, + 0xFFFFFFFF}, + {"PAD_RETENTION_MMCA_SYS_PWR_REG", PAD_RETENTION_MMCA_SYS_PWR_REG, + 0xFFFFFFFF}, + {"PAD_RETENTION_MMCB_SYS_PWR_REG", PAD_RETENTION_MMCB_SYS_PWR_REG, + 0xFFFFFFFF}, + {"PAD_RETENTION_EBIA_SYS_PWR_REG", PAD_RETENTION_EBIA_SYS_PWR_REG, + 0xFFFFFFFF}, + {"PAD_RETENTION_EBIB_SYS_PWR_REG", PAD_RETENTION_EBIB_SYS_PWR_REG, + 0xFFFFFFFF}, + {"PAD_ISOLATION_SYS_PWR_REG", PAD_ISOLATION_SYS_PWR_REG, 0xFFFFFFFF}, + {"PAD_ALV_SEL_SYS_PWR_REG", PAD_ALV_SEL_SYS_PWR_REG, 0xFFFFFFFF}, + {"XUSBXTI_SYS_PWR_REG", XUSBXTI_SYS_PWR_REG, 0xFFFFFFFF}, + {"XXTI_SYS_PWR_REG", XXTI_SYS_PWR_REG, 0xFFFFFFFF}, + {"EXT_REGULATOR_SYS_PWR_REG", EXT_REGULATOR_SYS_PWR_REG, 0xFFFFFFFF}, + {"GPIO_MODE_SYS_PWR_REG", GPIO_MODE_SYS_PWR_REG, 0xFFFFFFFF}, + {"GPIO_MODE_MAUDIO_SYS_PWR_REG", GPIO_MODE_MAUDIO_SYS_PWR_REG, 0xFFFFFFFF}, + {"CAM_SYS_PWR_REG", CAM_SYS_PWR_REG, 0xFFFFFFFF}, + {"TV_SYS_PWR_REG", TV_SYS_PWR_REG, 0xFFFFFFFF}, + {"MFC_SYS_PWR_REG", MFC_SYS_PWR_REG, 0xFFFFFFFF}, + {"G3D_SYS_PWR_REG", G3D_SYS_PWR_REG, 0xFFFFFFFF}, + {"LCD0_SYS_PWR_REG", LCD0_SYS_PWR_REG, 0xFFFFFFFF}, + {"LCD1_SYS_PWR_REG", LCD1_SYS_PWR_REG, 0xFFFFFFFF}, + {"MAUDIO_SYS_PWR_REG", MAUDIO_SYS_PWR_REG, 0xFFFFFFFF}, + {"GPS_SYS_PWR_REG", GPS_SYS_PWR_REG, 0xFFFFFFFF}, + {"GPS_ALIVE_SYS_PWR_REG", GPS_ALIVE_SYS_PWR_REG, 0xFFFFFFFF}, + {"ARM_CORE0_CONFIGURATION", ARM_CORE0_CONFIGURATION, 0x00000003}, + {"ARM_CORE0_STATUS", ARM_CORE0_STATUS, 0x00030003}, + {"ARM_CORE0_OPTION", ARM_CORE0_OPTION, 0x01010001}, + {"ARM_CORE1_CONFIGURATION", ARM_CORE1_CONFIGURATION, 0x00000003}, + {"ARM_CORE1_STATUS", ARM_CORE1_STATUS, 0x00030003}, + {"ARM_CORE1_OPTION", ARM_CORE1_OPTION, 0x01010001}, + {"ARM_COMMON_OPTION", ARM_COMMON_OPTION, 0x00000001}, + {"ARM_CPU_L2_0_CONFIGURATION", ARM_CPU_L2_0_CONFIGURATION, 0x00000003}, + {"ARM_CPU_L2_0_STATUS", ARM_CPU_L2_0_STATUS, 0x00000003}, + {"ARM_CPU_L2_1_CONFIGURATION", ARM_CPU_L2_1_CONFIGURATION, 0x00000003}, + {"ARM_CPU_L2_1_STATUS", ARM_CPU_L2_1_STATUS, 0x00000003}, + {"PAD_RETENTION_MAUDIO_OPTION", PAD_RETENTION_MAUDIO_OPTION, 0x00000000}, + {"PAD_RETENTION_GPIO_OPTION", PAD_RETENTION_GPIO_OPTION, 0x00000000}, + {"PAD_RETENTION_UART_OPTION", PAD_RETENTION_UART_OPTION, 0x00000000}, + {"PAD_RETENTION_MMCA_OPTION", PAD_RETENTION_MMCA_OPTION, 0x00000000}, + {"PAD_RETENTION_MMCB_OPTION", PAD_RETENTION_MMCB_OPTION, 0x00000000}, + {"PAD_RETENTION_EBIA_OPTION", PAD_RETENTION_EBIA_OPTION, 0x00000000}, + {"PAD_RETENTION_EBIB_OPTION", PAD_RETENTION_EBIB_OPTION, 0x00000000}, + /* + * PS_HOLD_CONTROL: reset value and manually toggle high the DATA bit. + * DATA bit high, set usually by bootloader, keeps system on. + */ + {"PS_HOLD_CONTROL", PS_HOLD_CONTROL, 0x00005200 | BIT(8)}, + {"XUSBXTI_CONFIGURATION", XUSBXTI_CONFIGURATION, 0x00000001}, + {"XUSBXTI_STATUS", XUSBXTI_STATUS, 0x00000001}, + {"XUSBXTI_DURATION", XUSBXTI_DURATION, 0xFFF00000}, + {"XXTI_CONFIGURATION", XXTI_CONFIGURATION, 0x00000001}, + {"XXTI_STATUS", XXTI_STATUS, 0x00000001}, + {"XXTI_DURATION", XXTI_DURATION, 0xFFF00000}, + {"EXT_REGULATOR_DURATION", EXT_REGULATOR_DURATION, 0xFFF03FFF}, + {"CAM_CONFIGURATION", CAM_CONFIGURATION, 0x00000007}, + {"CAM_STATUS", CAM_STATUS, 0x00060007}, + {"CAM_OPTION", CAM_OPTION, 0x00000001}, + {"TV_CONFIGURATION", TV_CONFIGURATION, 0x00000007}, + {"TV_STATUS", TV_STATUS, 0x00060007}, + {"TV_OPTION", TV_OPTION, 0x00000001}, + {"MFC_CONFIGURATION", MFC_CONFIGURATION, 0x00000007}, + {"MFC_STATUS", MFC_STATUS, 0x00060007}, + {"MFC_OPTION", MFC_OPTION, 0x00000001}, + {"G3D_CONFIGURATION", G3D_CONFIGURATION, 0x00000007}, + {"G3D_STATUS", G3D_STATUS, 0x00060007}, + {"G3D_OPTION", G3D_OPTION, 0x00000001}, + {"LCD0_CONFIGURATION", LCD0_CONFIGURATION, 0x00000007}, + {"LCD0_STATUS", LCD0_STATUS, 0x00060007}, + {"LCD0_OPTION", LCD0_OPTION, 0x00000001}, + {"LCD1_CONFIGURATION", LCD1_CONFIGURATION, 0x00000007}, + {"LCD1_STATUS", LCD1_STATUS, 0x00060007}, + {"LCD1_OPTION", LCD1_OPTION, 0x00000001}, + {"GPS_CONFIGURATION", GPS_CONFIGURATION, 0x00000007}, + {"GPS_STATUS", GPS_STATUS, 0x00060007}, + {"GPS_OPTION", GPS_OPTION, 0x00000001}, + {"GPS_ALIVE_CONFIGURATION", GPS_ALIVE_CONFIGURATION, 0x00000007}, + {"GPS_ALIVE_STATUS", GPS_ALIVE_STATUS, 0x00060007}, + {"GPS_ALIVE_OPTION", GPS_ALIVE_OPTION, 0x00000001}, +}; + +#define PMU_NUM_OF_REGISTERS ARRAY_SIZE(exynos4210_pmu_regs) + +#define TYPE_EXYNOS4210_PMU "exynos4210.pmu" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210PmuState, EXYNOS4210_PMU) + +struct Exynos4210PmuState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t reg[PMU_NUM_OF_REGISTERS]; +}; + +static void exynos4210_pmu_poweroff(void) +{ + PRINT_DEBUG("QEMU PMU: PS_HOLD bit down, powering off\n"); + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); +} + +static uint64_t exynos4210_pmu_read(void *opaque, hwaddr offset, + unsigned size) +{ + Exynos4210PmuState *s = (Exynos4210PmuState *)opaque; + const Exynos4210PmuReg *reg_p = exynos4210_pmu_regs; + unsigned int i; + + for (i = 0; i < PMU_NUM_OF_REGISTERS; i++) { + if (reg_p->offset == offset) { + PRINT_DEBUG_EXTEND("%s [0x%04x] -> 0x%04x\n", reg_p->name, + (uint32_t)offset, s->reg[i]); + return s->reg[i]; + } + reg_p++; + } + PRINT_DEBUG("QEMU PMU ERROR: bad read offset 0x%04x\n", (uint32_t)offset); + return 0; +} + +static void exynos4210_pmu_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + Exynos4210PmuState *s = (Exynos4210PmuState *)opaque; + const Exynos4210PmuReg *reg_p = exynos4210_pmu_regs; + unsigned int i; + + for (i = 0; i < PMU_NUM_OF_REGISTERS; i++) { + if (reg_p->offset == offset) { + PRINT_DEBUG_EXTEND("%s <0x%04x> <- 0x%04x\n", reg_p->name, + (uint32_t)offset, (uint32_t)val); + s->reg[i] = val; + if ((offset == PS_HOLD_CONTROL) && ((val & BIT(8)) == 0)) { + /* + * We are interested only in setting data bit + * of PS_HOLD_CONTROL register to indicate power off request. + */ + exynos4210_pmu_poweroff(); + } + return; + } + reg_p++; + } + PRINT_DEBUG("QEMU PMU ERROR: bad write offset 0x%04x\n", (uint32_t)offset); +} + +static const MemoryRegionOps exynos4210_pmu_ops = { + .read = exynos4210_pmu_read, + .write = exynos4210_pmu_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false + } +}; + +static void exynos4210_pmu_reset(DeviceState *dev) +{ + Exynos4210PmuState *s = EXYNOS4210_PMU(dev); + unsigned i; + + /* Set default values for registers */ + for (i = 0; i < PMU_NUM_OF_REGISTERS; i++) { + s->reg[i] = exynos4210_pmu_regs[i].reset_value; + } +} + +static void exynos4210_pmu_init(Object *obj) +{ + Exynos4210PmuState *s = EXYNOS4210_PMU(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + /* memory mapping */ + memory_region_init_io(&s->iomem, obj, &exynos4210_pmu_ops, s, + "exynos4210.pmu", EXYNOS4210_PMU_REGS_MEM_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static const VMStateDescription exynos4210_pmu_vmstate = { + .name = "exynos4210.pmu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, Exynos4210PmuState, PMU_NUM_OF_REGISTERS), + VMSTATE_END_OF_LIST() + } +}; + +static void exynos4210_pmu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = exynos4210_pmu_reset; + dc->vmsd = &exynos4210_pmu_vmstate; +} + +static const TypeInfo exynos4210_pmu_info = { + .name = TYPE_EXYNOS4210_PMU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210PmuState), + .instance_init = exynos4210_pmu_init, + .class_init = exynos4210_pmu_class_init, +}; + +static void exynos4210_pmu_register(void) +{ + type_register_static(&exynos4210_pmu_info); +} + +type_init(exynos4210_pmu_register) diff --git a/hw/misc/exynos4210_rng.c b/hw/misc/exynos4210_rng.c new file mode 100644 index 000000000..1b9e8347a --- /dev/null +++ b/hw/misc/exynos4210_rng.c @@ -0,0 +1,277 @@ +/* + * Exynos4210 Pseudo Random Nubmer Generator Emulation + * + * Copyright (c) 2017 Krzysztof Kozlowski + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/guest-random.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define DEBUG_EXYNOS_RNG 0 + +#define DPRINTF(fmt, ...) \ + do { \ + if (DEBUG_EXYNOS_RNG) { \ + printf("exynos4210_rng: " fmt, ## __VA_ARGS__); \ + } \ + } while (0) + +#define TYPE_EXYNOS4210_RNG "exynos4210.rng" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210RngState, EXYNOS4210_RNG) + +/* + * Exynos4220, PRNG, only polling mode is supported. + */ + +/* RNG_CONTROL_1 register bitfields, reset value: 0x0 */ +#define EXYNOS4210_RNG_CONTROL_1_PRNG 0x8 +#define EXYNOS4210_RNG_CONTROL_1_START_INIT BIT(4) +/* RNG_STATUS register bitfields, reset value: 0x1 */ +#define EXYNOS4210_RNG_STATUS_PRNG_ERROR BIT(7) +#define EXYNOS4210_RNG_STATUS_PRNG_DONE BIT(5) +#define EXYNOS4210_RNG_STATUS_MSG_DONE BIT(4) +#define EXYNOS4210_RNG_STATUS_PARTIAL_DONE BIT(3) +#define EXYNOS4210_RNG_STATUS_PRNG_BUSY BIT(2) +#define EXYNOS4210_RNG_STATUS_SEED_SETTING_DONE BIT(1) +#define EXYNOS4210_RNG_STATUS_BUFFER_READY BIT(0) +#define EXYNOS4210_RNG_STATUS_WRITE_MASK (EXYNOS4210_RNG_STATUS_PRNG_DONE \ + | EXYNOS4210_RNG_STATUS_MSG_DONE \ + | EXYNOS4210_RNG_STATUS_PARTIAL_DONE) + +#define EXYNOS4210_RNG_CONTROL_1 0x0 +#define EXYNOS4210_RNG_STATUS 0x10 +#define EXYNOS4210_RNG_SEED_IN 0x140 +#define EXYNOS4210_RNG_SEED_IN_OFFSET(n) (EXYNOS4210_RNG_SEED_IN + (n * 0x4)) +#define EXYNOS4210_RNG_PRNG 0x160 +#define EXYNOS4210_RNG_PRNG_OFFSET(n) (EXYNOS4210_RNG_PRNG + (n * 0x4)) + +#define EXYNOS4210_RNG_PRNG_NUM 5 + +#define EXYNOS4210_RNG_REGS_MEM_SIZE 0x200 + +struct Exynos4210RngState { + SysBusDevice parent_obj; + MemoryRegion iomem; + + int32_t randr_value[EXYNOS4210_RNG_PRNG_NUM]; + /* bits from 0 to EXYNOS4210_RNG_PRNG_NUM if given seed register was set */ + uint32_t seed_set; + + /* Register values */ + uint32_t reg_control; + uint32_t reg_status; +}; + +static bool exynos4210_rng_seed_ready(const Exynos4210RngState *s) +{ + uint32_t mask = MAKE_64BIT_MASK(0, EXYNOS4210_RNG_PRNG_NUM); + + /* Return true if all the seed-set bits are set. */ + return (s->seed_set & mask) == mask; +} + +static void exynos4210_rng_set_seed(Exynos4210RngState *s, unsigned int i, + uint64_t val) +{ + /* + * We actually ignore the seed and always generate true random numbers. + * Theoretically this should not match the device as Exynos has + * a Pseudo Random Number Generator but testing shown that it always + * generates random numbers regardless of the seed value. + */ + s->seed_set |= BIT(i); + + /* If all seeds were written, update the status to reflect it */ + if (exynos4210_rng_seed_ready(s)) { + s->reg_status |= EXYNOS4210_RNG_STATUS_SEED_SETTING_DONE; + } else { + s->reg_status &= ~EXYNOS4210_RNG_STATUS_SEED_SETTING_DONE; + } +} + +static void exynos4210_rng_run_engine(Exynos4210RngState *s) +{ + Error *err = NULL; + + /* Seed set? */ + if ((s->reg_status & EXYNOS4210_RNG_STATUS_SEED_SETTING_DONE) == 0) { + goto out; + } + + /* PRNG engine chosen? */ + if ((s->reg_control & EXYNOS4210_RNG_CONTROL_1_PRNG) == 0) { + goto out; + } + + /* PRNG engine started? */ + if ((s->reg_control & EXYNOS4210_RNG_CONTROL_1_START_INIT) == 0) { + goto out; + } + + /* Get randoms */ + if (qemu_guest_getrandom(s->randr_value, sizeof(s->randr_value), &err)) { + error_report_err(err); + } else { + /* Notify that PRNG is ready */ + s->reg_status |= EXYNOS4210_RNG_STATUS_PRNG_DONE; + } + +out: + /* Always clear start engine bit */ + s->reg_control &= ~EXYNOS4210_RNG_CONTROL_1_START_INIT; +} + +static uint64_t exynos4210_rng_read(void *opaque, hwaddr offset, + unsigned size) +{ + Exynos4210RngState *s = (Exynos4210RngState *)opaque; + uint32_t val = 0; + + assert(size == 4); + + switch (offset) { + case EXYNOS4210_RNG_CONTROL_1: + val = s->reg_control; + break; + + case EXYNOS4210_RNG_STATUS: + val = s->reg_status; + break; + + case EXYNOS4210_RNG_PRNG_OFFSET(0): + case EXYNOS4210_RNG_PRNG_OFFSET(1): + case EXYNOS4210_RNG_PRNG_OFFSET(2): + case EXYNOS4210_RNG_PRNG_OFFSET(3): + case EXYNOS4210_RNG_PRNG_OFFSET(4): + val = s->randr_value[(offset - EXYNOS4210_RNG_PRNG_OFFSET(0)) / 4]; + DPRINTF("returning random @0x%" HWADDR_PRIx ": 0x%" PRIx32 "\n", + offset, val); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad read offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + + return val; +} + +static void exynos4210_rng_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + Exynos4210RngState *s = (Exynos4210RngState *)opaque; + + assert(size == 4); + + switch (offset) { + case EXYNOS4210_RNG_CONTROL_1: + DPRINTF("RNG_CONTROL_1 = 0x%" PRIx64 "\n", val); + s->reg_control = val; + exynos4210_rng_run_engine(s); + break; + + case EXYNOS4210_RNG_STATUS: + /* For clearing status fields */ + s->reg_status &= ~EXYNOS4210_RNG_STATUS_WRITE_MASK; + s->reg_status |= val & EXYNOS4210_RNG_STATUS_WRITE_MASK; + break; + + case EXYNOS4210_RNG_SEED_IN_OFFSET(0): + case EXYNOS4210_RNG_SEED_IN_OFFSET(1): + case EXYNOS4210_RNG_SEED_IN_OFFSET(2): + case EXYNOS4210_RNG_SEED_IN_OFFSET(3): + case EXYNOS4210_RNG_SEED_IN_OFFSET(4): + exynos4210_rng_set_seed(s, + (offset - EXYNOS4210_RNG_SEED_IN_OFFSET(0)) / 4, + val); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad write offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } +} + +static const MemoryRegionOps exynos4210_rng_ops = { + .read = exynos4210_rng_read, + .write = exynos4210_rng_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void exynos4210_rng_reset(DeviceState *dev) +{ + Exynos4210RngState *s = EXYNOS4210_RNG(dev); + + s->reg_control = 0; + s->reg_status = EXYNOS4210_RNG_STATUS_BUFFER_READY; + memset(s->randr_value, 0, sizeof(s->randr_value)); + s->seed_set = 0; +} + +static void exynos4210_rng_init(Object *obj) +{ + Exynos4210RngState *s = EXYNOS4210_RNG(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &exynos4210_rng_ops, s, + TYPE_EXYNOS4210_RNG, EXYNOS4210_RNG_REGS_MEM_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static const VMStateDescription exynos4210_rng_vmstate = { + .name = TYPE_EXYNOS4210_RNG, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32_ARRAY(randr_value, Exynos4210RngState, + EXYNOS4210_RNG_PRNG_NUM), + VMSTATE_UINT32(seed_set, Exynos4210RngState), + VMSTATE_UINT32(reg_status, Exynos4210RngState), + VMSTATE_UINT32(reg_control, Exynos4210RngState), + VMSTATE_END_OF_LIST() + } +}; + +static void exynos4210_rng_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = exynos4210_rng_reset; + dc->vmsd = &exynos4210_rng_vmstate; +} + +static const TypeInfo exynos4210_rng_info = { + .name = TYPE_EXYNOS4210_RNG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210RngState), + .instance_init = exynos4210_rng_init, + .class_init = exynos4210_rng_class_init, +}; + +static void exynos4210_rng_register(void) +{ + type_register_static(&exynos4210_rng_info); +} + +type_init(exynos4210_rng_register) diff --git a/hw/misc/grlib_ahb_apb_pnp.c b/hw/misc/grlib_ahb_apb_pnp.c new file mode 100644 index 000000000..43e001c3c --- /dev/null +++ b/hw/misc/grlib_ahb_apb_pnp.c @@ -0,0 +1,301 @@ +/* + * GRLIB AHB APB PNP + * + * Copyright (C) 2019 AdaCore + * + * Developed by : + * Frederic Konrad + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/sysbus.h" +#include "hw/misc/grlib_ahb_apb_pnp.h" +#include "trace.h" + +#define GRLIB_PNP_VENDOR_SHIFT (24) +#define GRLIB_PNP_VENDOR_SIZE (8) +#define GRLIB_PNP_DEV_SHIFT (12) +#define GRLIB_PNP_DEV_SIZE (12) +#define GRLIB_PNP_VER_SHIFT (5) +#define GRLIB_PNP_VER_SIZE (5) +#define GRLIB_PNP_IRQ_SHIFT (0) +#define GRLIB_PNP_IRQ_SIZE (5) +#define GRLIB_PNP_ADDR_SHIFT (20) +#define GRLIB_PNP_ADDR_SIZE (12) +#define GRLIB_PNP_MASK_SHIFT (4) +#define GRLIB_PNP_MASK_SIZE (12) + +#define GRLIB_AHB_DEV_ADDR_SHIFT (20) +#define GRLIB_AHB_DEV_ADDR_SIZE (12) +#define GRLIB_AHB_ENTRY_SIZE (0x20) +#define GRLIB_AHB_MAX_DEV (64) +#define GRLIB_AHB_SLAVE_OFFSET (0x800) + +#define GRLIB_APB_DEV_ADDR_SHIFT (8) +#define GRLIB_APB_DEV_ADDR_SIZE (12) +#define GRLIB_APB_ENTRY_SIZE (0x08) +#define GRLIB_APB_MAX_DEV (512) + +#define GRLIB_PNP_MAX_REGS (0x1000) + +typedef struct AHBPnp { + SysBusDevice parent_obj; + MemoryRegion iomem; + + uint32_t regs[GRLIB_PNP_MAX_REGS >> 2]; + uint8_t master_count; + uint8_t slave_count; +} AHBPnp; + +void grlib_ahb_pnp_add_entry(AHBPnp *dev, uint32_t address, uint32_t mask, + uint8_t vendor, uint16_t device, int slave, + int type) +{ + unsigned int reg_start; + + /* + * AHB entries look like this: + * + * 31 -------- 23 -------- 11 ----- 9 -------- 4 --- 0 + * | VENDOR ID | DEVICE ID | IRQ ? | VERSION | IRQ | + * -------------------------------------------------- + * | USER | + * -------------------------------------------------- + * | USER | + * -------------------------------------------------- + * | USER | + * -------------------------------------------------- + * | USER | + * -------------------------------------------------- + * 31 ----------- 20 --- 15 ----------------- 3 ---- 0 + * | ADDR[31..12] | 00PC | MASK | TYPE | + * -------------------------------------------------- + * 31 ----------- 20 --- 15 ----------------- 3 ---- 0 + * | ADDR[31..12] | 00PC | MASK | TYPE | + * -------------------------------------------------- + * 31 ----------- 20 --- 15 ----------------- 3 ---- 0 + * | ADDR[31..12] | 00PC | MASK | TYPE | + * -------------------------------------------------- + * 31 ----------- 20 --- 15 ----------------- 3 ---- 0 + * | ADDR[31..12] | 00PC | MASK | TYPE | + * -------------------------------------------------- + */ + + if (slave) { + assert(dev->slave_count < GRLIB_AHB_MAX_DEV); + reg_start = (GRLIB_AHB_SLAVE_OFFSET + + (dev->slave_count * GRLIB_AHB_ENTRY_SIZE)) >> 2; + dev->slave_count++; + } else { + assert(dev->master_count < GRLIB_AHB_MAX_DEV); + reg_start = (dev->master_count * GRLIB_AHB_ENTRY_SIZE) >> 2; + dev->master_count++; + } + + dev->regs[reg_start] = deposit32(dev->regs[reg_start], + GRLIB_PNP_VENDOR_SHIFT, + GRLIB_PNP_VENDOR_SIZE, + vendor); + dev->regs[reg_start] = deposit32(dev->regs[reg_start], + GRLIB_PNP_DEV_SHIFT, + GRLIB_PNP_DEV_SIZE, + device); + reg_start += 4; + /* AHB Memory Space */ + dev->regs[reg_start] = type; + dev->regs[reg_start] = deposit32(dev->regs[reg_start], + GRLIB_PNP_ADDR_SHIFT, + GRLIB_PNP_ADDR_SIZE, + extract32(address, + GRLIB_AHB_DEV_ADDR_SHIFT, + GRLIB_AHB_DEV_ADDR_SIZE)); + dev->regs[reg_start] = deposit32(dev->regs[reg_start], + GRLIB_PNP_MASK_SHIFT, + GRLIB_PNP_MASK_SIZE, + mask); +} + +static uint64_t grlib_ahb_pnp_read(void *opaque, hwaddr offset, unsigned size) +{ + AHBPnp *ahb_pnp = GRLIB_AHB_PNP(opaque); + uint32_t val; + + val = ahb_pnp->regs[offset >> 2]; + trace_grlib_ahb_pnp_read(offset, val); + + return val; +} + +static void grlib_ahb_pnp_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s not implemented\n", __func__); +} + +static const MemoryRegionOps grlib_ahb_pnp_ops = { + .read = grlib_ahb_pnp_read, + .write = grlib_ahb_pnp_write, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void grlib_ahb_pnp_realize(DeviceState *dev, Error **errp) +{ + AHBPnp *ahb_pnp = GRLIB_AHB_PNP(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&ahb_pnp->iomem, OBJECT(dev), &grlib_ahb_pnp_ops, + ahb_pnp, TYPE_GRLIB_AHB_PNP, GRLIB_PNP_MAX_REGS); + sysbus_init_mmio(sbd, &ahb_pnp->iomem); +} + +static void grlib_ahb_pnp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = grlib_ahb_pnp_realize; +} + +static const TypeInfo grlib_ahb_pnp_info = { + .name = TYPE_GRLIB_AHB_PNP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AHBPnp), + .class_init = grlib_ahb_pnp_class_init, +}; + +/* APBPnp */ + +typedef struct APBPnp { + SysBusDevice parent_obj; + MemoryRegion iomem; + + uint32_t regs[GRLIB_PNP_MAX_REGS >> 2]; + uint32_t entry_count; +} APBPnp; + +void grlib_apb_pnp_add_entry(APBPnp *dev, uint32_t address, uint32_t mask, + uint8_t vendor, uint16_t device, uint8_t version, + uint8_t irq, int type) +{ + unsigned int reg_start; + + /* + * APB entries look like this: + * + * 31 -------- 23 -------- 11 ----- 9 ------- 4 --- 0 + * | VENDOR ID | DEVICE ID | IRQ ? | VERSION | IRQ | + * + * 31 ---------- 20 --- 15 ----------------- 3 ---- 0 + * | ADDR[20..8] | 0000 | MASK | TYPE | + */ + + assert(dev->entry_count < GRLIB_APB_MAX_DEV); + reg_start = (dev->entry_count * GRLIB_APB_ENTRY_SIZE) >> 2; + dev->entry_count++; + + dev->regs[reg_start] = deposit32(dev->regs[reg_start], + GRLIB_PNP_VENDOR_SHIFT, + GRLIB_PNP_VENDOR_SIZE, + vendor); + dev->regs[reg_start] = deposit32(dev->regs[reg_start], + GRLIB_PNP_DEV_SHIFT, + GRLIB_PNP_DEV_SIZE, + device); + dev->regs[reg_start] = deposit32(dev->regs[reg_start], + GRLIB_PNP_VER_SHIFT, + GRLIB_PNP_VER_SIZE, + version); + dev->regs[reg_start] = deposit32(dev->regs[reg_start], + GRLIB_PNP_IRQ_SHIFT, + GRLIB_PNP_IRQ_SIZE, + irq); + reg_start += 1; + dev->regs[reg_start] = type; + dev->regs[reg_start] = deposit32(dev->regs[reg_start], + GRLIB_PNP_ADDR_SHIFT, + GRLIB_PNP_ADDR_SIZE, + extract32(address, + GRLIB_APB_DEV_ADDR_SHIFT, + GRLIB_APB_DEV_ADDR_SIZE)); + dev->regs[reg_start] = deposit32(dev->regs[reg_start], + GRLIB_PNP_MASK_SHIFT, + GRLIB_PNP_MASK_SIZE, + mask); +} + +static uint64_t grlib_apb_pnp_read(void *opaque, hwaddr offset, unsigned size) +{ + APBPnp *apb_pnp = GRLIB_APB_PNP(opaque); + uint32_t val; + + val = apb_pnp->regs[offset >> 2]; + trace_grlib_apb_pnp_read(offset, val); + + return val; +} + +static void grlib_apb_pnp_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s not implemented\n", __func__); +} + +static const MemoryRegionOps grlib_apb_pnp_ops = { + .read = grlib_apb_pnp_read, + .write = grlib_apb_pnp_write, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void grlib_apb_pnp_realize(DeviceState *dev, Error **errp) +{ + APBPnp *apb_pnp = GRLIB_APB_PNP(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&apb_pnp->iomem, OBJECT(dev), &grlib_apb_pnp_ops, + apb_pnp, TYPE_GRLIB_APB_PNP, GRLIB_PNP_MAX_REGS); + sysbus_init_mmio(sbd, &apb_pnp->iomem); +} + +static void grlib_apb_pnp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = grlib_apb_pnp_realize; +} + +static const TypeInfo grlib_apb_pnp_info = { + .name = TYPE_GRLIB_APB_PNP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(APBPnp), + .class_init = grlib_apb_pnp_class_init, +}; + +static void grlib_ahb_apb_pnp_register_types(void) +{ + type_register_static(&grlib_ahb_pnp_info); + type_register_static(&grlib_apb_pnp_info); +} + +type_init(grlib_ahb_apb_pnp_register_types) diff --git a/hw/misc/imx25_ccm.c b/hw/misc/imx25_ccm.c new file mode 100644 index 000000000..ff996e2f2 --- /dev/null +++ b/hw/misc/imx25_ccm.c @@ -0,0 +1,320 @@ +/* + * IMX25 Clock Control Module + * + * Copyright (C) 2012 NICTA + * Updated by Jean-Christophe Dubois + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * To get the timer frequencies right, we need to emulate at least part of + * the CCM. + */ + +#include "qemu/osdep.h" +#include "hw/misc/imx25_ccm.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef DEBUG_IMX25_CCM +#define DEBUG_IMX25_CCM 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX25_CCM) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX25_CCM, \ + __func__, ##args); \ + } \ + } while (0) + +static const char *imx25_ccm_reg_name(uint32_t reg) +{ + static char unknown[20]; + + switch (reg) { + case IMX25_CCM_MPCTL_REG: + return "mpctl"; + case IMX25_CCM_UPCTL_REG: + return "upctl"; + case IMX25_CCM_CCTL_REG: + return "cctl"; + case IMX25_CCM_CGCR0_REG: + return "cgcr0"; + case IMX25_CCM_CGCR1_REG: + return "cgcr1"; + case IMX25_CCM_CGCR2_REG: + return "cgcr2"; + case IMX25_CCM_PCDR0_REG: + return "pcdr0"; + case IMX25_CCM_PCDR1_REG: + return "pcdr1"; + case IMX25_CCM_PCDR2_REG: + return "pcdr2"; + case IMX25_CCM_PCDR3_REG: + return "pcdr3"; + case IMX25_CCM_RCSR_REG: + return "rcsr"; + case IMX25_CCM_CRDR_REG: + return "crdr"; + case IMX25_CCM_DCVR0_REG: + return "dcvr0"; + case IMX25_CCM_DCVR1_REG: + return "dcvr1"; + case IMX25_CCM_DCVR2_REG: + return "dcvr2"; + case IMX25_CCM_DCVR3_REG: + return "dcvr3"; + case IMX25_CCM_LTR0_REG: + return "ltr0"; + case IMX25_CCM_LTR1_REG: + return "ltr1"; + case IMX25_CCM_LTR2_REG: + return "ltr2"; + case IMX25_CCM_LTR3_REG: + return "ltr3"; + case IMX25_CCM_LTBR0_REG: + return "ltbr0"; + case IMX25_CCM_LTBR1_REG: + return "ltbr1"; + case IMX25_CCM_PMCR0_REG: + return "pmcr0"; + case IMX25_CCM_PMCR1_REG: + return "pmcr1"; + case IMX25_CCM_PMCR2_REG: + return "pmcr2"; + case IMX25_CCM_MCR_REG: + return "mcr"; + case IMX25_CCM_LPIMR0_REG: + return "lpimr0"; + case IMX25_CCM_LPIMR1_REG: + return "lpimr1"; + default: + sprintf(unknown, "[%u ?]", reg); + return unknown; + } +} +#define CKIH_FREQ 24000000 /* 24MHz crystal input */ + +static const VMStateDescription vmstate_imx25_ccm = { + .name = TYPE_IMX25_CCM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, IMX25CCMState, IMX25_CCM_MAX_REG), + VMSTATE_END_OF_LIST() + }, +}; + +static uint32_t imx25_ccm_get_mpll_clk(IMXCCMState *dev) +{ + uint32_t freq; + IMX25CCMState *s = IMX25_CCM(dev); + + if (EXTRACT(s->reg[IMX25_CCM_CCTL_REG], MPLL_BYPASS)) { + freq = CKIH_FREQ; + } else { + freq = imx_ccm_calc_pll(s->reg[IMX25_CCM_MPCTL_REG], CKIH_FREQ); + } + + DPRINTF("freq = %u\n", freq); + + return freq; +} + +static uint32_t imx25_ccm_get_mcu_clk(IMXCCMState *dev) +{ + uint32_t freq; + IMX25CCMState *s = IMX25_CCM(dev); + + freq = imx25_ccm_get_mpll_clk(dev); + + if (EXTRACT(s->reg[IMX25_CCM_CCTL_REG], ARM_SRC)) { + freq = (freq * 3 / 4); + } + + freq = freq / (1 + EXTRACT(s->reg[IMX25_CCM_CCTL_REG], ARM_CLK_DIV)); + + DPRINTF("freq = %u\n", freq); + + return freq; +} + +static uint32_t imx25_ccm_get_ahb_clk(IMXCCMState *dev) +{ + uint32_t freq; + IMX25CCMState *s = IMX25_CCM(dev); + + freq = imx25_ccm_get_mcu_clk(dev) + / (1 + EXTRACT(s->reg[IMX25_CCM_CCTL_REG], AHB_CLK_DIV)); + + DPRINTF("freq = %u\n", freq); + + return freq; +} + +static uint32_t imx25_ccm_get_ipg_clk(IMXCCMState *dev) +{ + uint32_t freq; + + freq = imx25_ccm_get_ahb_clk(dev) / 2; + + DPRINTF("freq = %u\n", freq); + + return freq; +} + +static uint32_t imx25_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) +{ + uint32_t freq = 0; + DPRINTF("Clock = %d)\n", clock); + + switch (clock) { + case CLK_NONE: + break; + case CLK_IPG: + case CLK_IPG_HIGH: + freq = imx25_ccm_get_ipg_clk(dev); + break; + case CLK_32k: + freq = CKIL_FREQ; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: unsupported clock %d\n", + TYPE_IMX25_CCM, __func__, clock); + break; + } + + DPRINTF("Clock = %d) = %u\n", clock, freq); + + return freq; +} + +static void imx25_ccm_reset(DeviceState *dev) +{ + IMX25CCMState *s = IMX25_CCM(dev); + + DPRINTF("\n"); + + memset(s->reg, 0, IMX25_CCM_MAX_REG * sizeof(uint32_t)); + s->reg[IMX25_CCM_MPCTL_REG] = 0x800b2c01; + s->reg[IMX25_CCM_UPCTL_REG] = 0x84042800; + /* + * The value below gives: + * CPU = 133 MHz, AHB = 66,5 MHz, IPG = 33 MHz. + */ + s->reg[IMX25_CCM_CCTL_REG] = 0xd0030000; + s->reg[IMX25_CCM_CGCR0_REG] = 0x028A0100; + s->reg[IMX25_CCM_CGCR1_REG] = 0x04008100; + s->reg[IMX25_CCM_CGCR2_REG] = 0x00000438; + s->reg[IMX25_CCM_PCDR0_REG] = 0x01010101; + s->reg[IMX25_CCM_PCDR1_REG] = 0x01010101; + s->reg[IMX25_CCM_PCDR2_REG] = 0x01010101; + s->reg[IMX25_CCM_PCDR3_REG] = 0x01010101; + s->reg[IMX25_CCM_PMCR0_REG] = 0x00A00000; + s->reg[IMX25_CCM_PMCR1_REG] = 0x0000A030; + s->reg[IMX25_CCM_PMCR2_REG] = 0x0000A030; + s->reg[IMX25_CCM_MCR_REG] = 0x43000000; + + /* + * default boot will change the reset values to allow: + * CPU = 399 MHz, AHB = 133 MHz, IPG = 66,5 MHz. + * For some reason, this doesn't work. With the value below, linux + * detects a 88 MHz IPG CLK instead of 66,5 MHz. + s->reg[IMX25_CCM_CCTL_REG] = 0x20032000; + */ +} + +static uint64_t imx25_ccm_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value = 0; + IMX25CCMState *s = (IMX25CCMState *)opaque; + + if (offset < 0x70) { + value = s->reg[offset >> 2]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX25_CCM, __func__, offset); + } + + DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx25_ccm_reg_name(offset >> 2), + value); + + return value; +} + +static void imx25_ccm_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + IMX25CCMState *s = (IMX25CCMState *)opaque; + + DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx25_ccm_reg_name(offset >> 2), + (uint32_t)value); + + if (offset < 0x70) { + /* + * We will do a better implementation later. In particular some bits + * cannot be written to. + */ + s->reg[offset >> 2] = value; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX25_CCM, __func__, offset); + } +} + +static const struct MemoryRegionOps imx25_ccm_ops = { + .read = imx25_ccm_read, + .write = imx25_ccm_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx25_ccm_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMX25CCMState *s = IMX25_CCM(obj); + + memory_region_init_io(&s->iomem, OBJECT(dev), &imx25_ccm_ops, s, + TYPE_IMX25_CCM, 0x1000); + sysbus_init_mmio(sd, &s->iomem); +} + +static void imx25_ccm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IMXCCMClass *ccm = IMX_CCM_CLASS(klass); + + dc->reset = imx25_ccm_reset; + dc->vmsd = &vmstate_imx25_ccm; + dc->desc = "i.MX25 Clock Control Module"; + + ccm->get_clock_frequency = imx25_ccm_get_clock_frequency; +} + +static const TypeInfo imx25_ccm_info = { + .name = TYPE_IMX25_CCM, + .parent = TYPE_IMX_CCM, + .instance_size = sizeof(IMX25CCMState), + .instance_init = imx25_ccm_init, + .class_init = imx25_ccm_class_init, +}; + +static void imx25_ccm_register_types(void) +{ + type_register_static(&imx25_ccm_info); +} + +type_init(imx25_ccm_register_types) diff --git a/hw/misc/imx31_ccm.c b/hw/misc/imx31_ccm.c new file mode 100644 index 000000000..ad30a4b2c --- /dev/null +++ b/hw/misc/imx31_ccm.c @@ -0,0 +1,347 @@ +/* + * IMX31 Clock Control Module + * + * Copyright (C) 2012 NICTA + * Updated by Jean-Christophe Dubois + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * To get the timer frequencies right, we need to emulate at least part of + * the i.MX31 CCM. + */ + +#include "qemu/osdep.h" +#include "hw/misc/imx31_ccm.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define CKIH_FREQ 26000000 /* 26MHz crystal input */ + +#ifndef DEBUG_IMX31_CCM +#define DEBUG_IMX31_CCM 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX31_CCM) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX31_CCM, \ + __func__, ##args); \ + } \ + } while (0) + +static const char *imx31_ccm_reg_name(uint32_t reg) +{ + static char unknown[20]; + + switch (reg) { + case IMX31_CCM_CCMR_REG: + return "CCMR"; + case IMX31_CCM_PDR0_REG: + return "PDR0"; + case IMX31_CCM_PDR1_REG: + return "PDR1"; + case IMX31_CCM_RCSR_REG: + return "RCSR"; + case IMX31_CCM_MPCTL_REG: + return "MPCTL"; + case IMX31_CCM_UPCTL_REG: + return "UPCTL"; + case IMX31_CCM_SPCTL_REG: + return "SPCTL"; + case IMX31_CCM_COSR_REG: + return "COSR"; + case IMX31_CCM_CGR0_REG: + return "CGR0"; + case IMX31_CCM_CGR1_REG: + return "CGR1"; + case IMX31_CCM_CGR2_REG: + return "CGR2"; + case IMX31_CCM_WIMR_REG: + return "WIMR"; + case IMX31_CCM_LDC_REG: + return "LDC"; + case IMX31_CCM_DCVR0_REG: + return "DCVR0"; + case IMX31_CCM_DCVR1_REG: + return "DCVR1"; + case IMX31_CCM_DCVR2_REG: + return "DCVR2"; + case IMX31_CCM_DCVR3_REG: + return "DCVR3"; + case IMX31_CCM_LTR0_REG: + return "LTR0"; + case IMX31_CCM_LTR1_REG: + return "LTR1"; + case IMX31_CCM_LTR2_REG: + return "LTR2"; + case IMX31_CCM_LTR3_REG: + return "LTR3"; + case IMX31_CCM_LTBR0_REG: + return "LTBR0"; + case IMX31_CCM_LTBR1_REG: + return "LTBR1"; + case IMX31_CCM_PMCR0_REG: + return "PMCR0"; + case IMX31_CCM_PMCR1_REG: + return "PMCR1"; + case IMX31_CCM_PDR2_REG: + return "PDR2"; + default: + sprintf(unknown, "[%u ?]", reg); + return unknown; + } +} + +static const VMStateDescription vmstate_imx31_ccm = { + .name = TYPE_IMX31_CCM, + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, IMX31CCMState, IMX31_CCM_MAX_REG), + VMSTATE_END_OF_LIST() + }, +}; + +static uint32_t imx31_ccm_get_pll_ref_clk(IMXCCMState *dev) +{ + uint32_t freq = 0; + IMX31CCMState *s = IMX31_CCM(dev); + + if ((s->reg[IMX31_CCM_CCMR_REG] & CCMR_PRCS) == 2) { + if (s->reg[IMX31_CCM_CCMR_REG] & CCMR_FPME) { + freq = CKIL_FREQ; + if (s->reg[IMX31_CCM_CCMR_REG] & CCMR_FPMF) { + freq *= 1024; + } + } + } else { + freq = CKIH_FREQ; + } + + DPRINTF("freq = %u\n", freq); + + return freq; +} + +static uint32_t imx31_ccm_get_mpll_clk(IMXCCMState *dev) +{ + uint32_t freq; + IMX31CCMState *s = IMX31_CCM(dev); + + freq = imx_ccm_calc_pll(s->reg[IMX31_CCM_MPCTL_REG], + imx31_ccm_get_pll_ref_clk(dev)); + + DPRINTF("freq = %u\n", freq); + + return freq; +} + +static uint32_t imx31_ccm_get_mcu_main_clk(IMXCCMState *dev) +{ + uint32_t freq; + IMX31CCMState *s = IMX31_CCM(dev); + + if ((s->reg[IMX31_CCM_CCMR_REG] & CCMR_MDS) || + !(s->reg[IMX31_CCM_CCMR_REG] & CCMR_MPE)) { + freq = imx31_ccm_get_pll_ref_clk(dev); + } else { + freq = imx31_ccm_get_mpll_clk(dev); + } + + DPRINTF("freq = %u\n", freq); + + return freq; +} + +static uint32_t imx31_ccm_get_hclk_clk(IMXCCMState *dev) +{ + uint32_t freq; + IMX31CCMState *s = IMX31_CCM(dev); + + freq = imx31_ccm_get_mcu_main_clk(dev) + / (1 + EXTRACT(s->reg[IMX31_CCM_PDR0_REG], MAX)); + + DPRINTF("freq = %u\n", freq); + + return freq; +} + +static uint32_t imx31_ccm_get_ipg_clk(IMXCCMState *dev) +{ + uint32_t freq; + IMX31CCMState *s = IMX31_CCM(dev); + + freq = imx31_ccm_get_hclk_clk(dev) + / (1 + EXTRACT(s->reg[IMX31_CCM_PDR0_REG], IPG)); + + DPRINTF("freq = %u\n", freq); + + return freq; +} + +static uint32_t imx31_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) +{ + uint32_t freq = 0; + + switch (clock) { + case CLK_NONE: + break; + case CLK_IPG: + case CLK_IPG_HIGH: + freq = imx31_ccm_get_ipg_clk(dev); + break; + case CLK_32k: + freq = CKIL_FREQ; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: unsupported clock %d\n", + TYPE_IMX31_CCM, __func__, clock); + break; + } + + DPRINTF("Clock = %d) = %u\n", clock, freq); + + return freq; +} + +static void imx31_ccm_reset(DeviceState *dev) +{ + IMX31CCMState *s = IMX31_CCM(dev); + + DPRINTF("()\n"); + + memset(s->reg, 0, sizeof(uint32_t) * IMX31_CCM_MAX_REG); + + s->reg[IMX31_CCM_CCMR_REG] = 0x074b0b7d; + s->reg[IMX31_CCM_PDR0_REG] = 0xff870b48; + s->reg[IMX31_CCM_PDR1_REG] = 0x49fcfe7f; + s->reg[IMX31_CCM_RCSR_REG] = 0x007f0000; + s->reg[IMX31_CCM_MPCTL_REG] = 0x04001800; + s->reg[IMX31_CCM_UPCTL_REG] = 0x04051c03; + s->reg[IMX31_CCM_SPCTL_REG] = 0x04043001; + s->reg[IMX31_CCM_COSR_REG] = 0x00000280; + s->reg[IMX31_CCM_CGR0_REG] = 0xffffffff; + s->reg[IMX31_CCM_CGR1_REG] = 0xffffffff; + s->reg[IMX31_CCM_CGR2_REG] = 0xffffffff; + s->reg[IMX31_CCM_WIMR_REG] = 0xffffffff; + s->reg[IMX31_CCM_LTR1_REG] = 0x00004040; + s->reg[IMX31_CCM_PMCR0_REG] = 0x80209828; + s->reg[IMX31_CCM_PMCR1_REG] = 0x00aa0000; + s->reg[IMX31_CCM_PDR2_REG] = 0x00000285; +} + +static uint64_t imx31_ccm_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value = 0; + IMX31CCMState *s = (IMX31CCMState *)opaque; + + if ((offset >> 2) < IMX31_CCM_MAX_REG) { + value = s->reg[offset >> 2]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX31_CCM, __func__, offset); + } + + DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx31_ccm_reg_name(offset >> 2), + value); + + return (uint64_t)value; +} + +static void imx31_ccm_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + IMX31CCMState *s = (IMX31CCMState *)opaque; + + DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx31_ccm_reg_name(offset >> 2), + (uint32_t)value); + + switch (offset >> 2) { + case IMX31_CCM_CCMR_REG: + s->reg[IMX31_CCM_CCMR_REG] = CCMR_FPMF | (value & 0x3b6fdfff); + break; + case IMX31_CCM_PDR0_REG: + s->reg[IMX31_CCM_PDR0_REG] = value & 0xff9f3fff; + break; + case IMX31_CCM_PDR1_REG: + s->reg[IMX31_CCM_PDR1_REG] = value; + break; + case IMX31_CCM_MPCTL_REG: + s->reg[IMX31_CCM_MPCTL_REG] = value & 0xbfff3fff; + break; + case IMX31_CCM_SPCTL_REG: + s->reg[IMX31_CCM_SPCTL_REG] = value & 0xbfff3fff; + break; + case IMX31_CCM_CGR0_REG: + s->reg[IMX31_CCM_CGR0_REG] = value; + break; + case IMX31_CCM_CGR1_REG: + s->reg[IMX31_CCM_CGR1_REG] = value; + break; + case IMX31_CCM_CGR2_REG: + s->reg[IMX31_CCM_CGR2_REG] = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX31_CCM, __func__, offset); + break; + } +} + +static const struct MemoryRegionOps imx31_ccm_ops = { + .read = imx31_ccm_read, + .write = imx31_ccm_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, + +}; + +static void imx31_ccm_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMX31CCMState *s = IMX31_CCM(obj); + + memory_region_init_io(&s->iomem, OBJECT(dev), &imx31_ccm_ops, s, + TYPE_IMX31_CCM, 0x1000); + sysbus_init_mmio(sd, &s->iomem); +} + +static void imx31_ccm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IMXCCMClass *ccm = IMX_CCM_CLASS(klass); + + dc->reset = imx31_ccm_reset; + dc->vmsd = &vmstate_imx31_ccm; + dc->desc = "i.MX31 Clock Control Module"; + + ccm->get_clock_frequency = imx31_ccm_get_clock_frequency; +} + +static const TypeInfo imx31_ccm_info = { + .name = TYPE_IMX31_CCM, + .parent = TYPE_IMX_CCM, + .instance_size = sizeof(IMX31CCMState), + .instance_init = imx31_ccm_init, + .class_init = imx31_ccm_class_init, +}; + +static void imx31_ccm_register_types(void) +{ + type_register_static(&imx31_ccm_info); +} + +type_init(imx31_ccm_register_types) diff --git a/hw/misc/imx6_ccm.c b/hw/misc/imx6_ccm.c new file mode 100644 index 000000000..4c830fd89 --- /dev/null +++ b/hw/misc/imx6_ccm.c @@ -0,0 +1,783 @@ +/* + * IMX6 Clock Control Module + * + * Copyright (c) 2015 Jean-Christophe Dubois + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * To get the timer frequencies right, we need to emulate at least part of + * the CCM. + */ + +#include "qemu/osdep.h" +#include "hw/misc/imx6_ccm.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef DEBUG_IMX6_CCM +#define DEBUG_IMX6_CCM 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX6_CCM) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX6_CCM, \ + __func__, ##args); \ + } \ + } while (0) + +static const char *imx6_ccm_reg_name(uint32_t reg) +{ + static char unknown[20]; + + switch (reg) { + case CCM_CCR: + return "CCR"; + case CCM_CCDR: + return "CCDR"; + case CCM_CSR: + return "CSR"; + case CCM_CCSR: + return "CCSR"; + case CCM_CACRR: + return "CACRR"; + case CCM_CBCDR: + return "CBCDR"; + case CCM_CBCMR: + return "CBCMR"; + case CCM_CSCMR1: + return "CSCMR1"; + case CCM_CSCMR2: + return "CSCMR2"; + case CCM_CSCDR1: + return "CSCDR1"; + case CCM_CS1CDR: + return "CS1CDR"; + case CCM_CS2CDR: + return "CS2CDR"; + case CCM_CDCDR: + return "CDCDR"; + case CCM_CHSCCDR: + return "CHSCCDR"; + case CCM_CSCDR2: + return "CSCDR2"; + case CCM_CSCDR3: + return "CSCDR3"; + case CCM_CDHIPR: + return "CDHIPR"; + case CCM_CTOR: + return "CTOR"; + case CCM_CLPCR: + return "CLPCR"; + case CCM_CISR: + return "CISR"; + case CCM_CIMR: + return "CIMR"; + case CCM_CCOSR: + return "CCOSR"; + case CCM_CGPR: + return "CGPR"; + case CCM_CCGR0: + return "CCGR0"; + case CCM_CCGR1: + return "CCGR1"; + case CCM_CCGR2: + return "CCGR2"; + case CCM_CCGR3: + return "CCGR3"; + case CCM_CCGR4: + return "CCGR4"; + case CCM_CCGR5: + return "CCGR5"; + case CCM_CCGR6: + return "CCGR6"; + case CCM_CMEOR: + return "CMEOR"; + default: + sprintf(unknown, "%u ?", reg); + return unknown; + } +} + +static const char *imx6_analog_reg_name(uint32_t reg) +{ + static char unknown[20]; + + switch (reg) { + case CCM_ANALOG_PLL_ARM: + return "PLL_ARM"; + case CCM_ANALOG_PLL_ARM_SET: + return "PLL_ARM_SET"; + case CCM_ANALOG_PLL_ARM_CLR: + return "PLL_ARM_CLR"; + case CCM_ANALOG_PLL_ARM_TOG: + return "PLL_ARM_TOG"; + case CCM_ANALOG_PLL_USB1: + return "PLL_USB1"; + case CCM_ANALOG_PLL_USB1_SET: + return "PLL_USB1_SET"; + case CCM_ANALOG_PLL_USB1_CLR: + return "PLL_USB1_CLR"; + case CCM_ANALOG_PLL_USB1_TOG: + return "PLL_USB1_TOG"; + case CCM_ANALOG_PLL_USB2: + return "PLL_USB2"; + case CCM_ANALOG_PLL_USB2_SET: + return "PLL_USB2_SET"; + case CCM_ANALOG_PLL_USB2_CLR: + return "PLL_USB2_CLR"; + case CCM_ANALOG_PLL_USB2_TOG: + return "PLL_USB2_TOG"; + case CCM_ANALOG_PLL_SYS: + return "PLL_SYS"; + case CCM_ANALOG_PLL_SYS_SET: + return "PLL_SYS_SET"; + case CCM_ANALOG_PLL_SYS_CLR: + return "PLL_SYS_CLR"; + case CCM_ANALOG_PLL_SYS_TOG: + return "PLL_SYS_TOG"; + case CCM_ANALOG_PLL_SYS_SS: + return "PLL_SYS_SS"; + case CCM_ANALOG_PLL_SYS_NUM: + return "PLL_SYS_NUM"; + case CCM_ANALOG_PLL_SYS_DENOM: + return "PLL_SYS_DENOM"; + case CCM_ANALOG_PLL_AUDIO: + return "PLL_AUDIO"; + case CCM_ANALOG_PLL_AUDIO_SET: + return "PLL_AUDIO_SET"; + case CCM_ANALOG_PLL_AUDIO_CLR: + return "PLL_AUDIO_CLR"; + case CCM_ANALOG_PLL_AUDIO_TOG: + return "PLL_AUDIO_TOG"; + case CCM_ANALOG_PLL_AUDIO_NUM: + return "PLL_AUDIO_NUM"; + case CCM_ANALOG_PLL_AUDIO_DENOM: + return "PLL_AUDIO_DENOM"; + case CCM_ANALOG_PLL_VIDEO: + return "PLL_VIDEO"; + case CCM_ANALOG_PLL_VIDEO_SET: + return "PLL_VIDEO_SET"; + case CCM_ANALOG_PLL_VIDEO_CLR: + return "PLL_VIDEO_CLR"; + case CCM_ANALOG_PLL_VIDEO_TOG: + return "PLL_VIDEO_TOG"; + case CCM_ANALOG_PLL_VIDEO_NUM: + return "PLL_VIDEO_NUM"; + case CCM_ANALOG_PLL_VIDEO_DENOM: + return "PLL_VIDEO_DENOM"; + case CCM_ANALOG_PLL_MLB: + return "PLL_MLB"; + case CCM_ANALOG_PLL_MLB_SET: + return "PLL_MLB_SET"; + case CCM_ANALOG_PLL_MLB_CLR: + return "PLL_MLB_CLR"; + case CCM_ANALOG_PLL_MLB_TOG: + return "PLL_MLB_TOG"; + case CCM_ANALOG_PLL_ENET: + return "PLL_ENET"; + case CCM_ANALOG_PLL_ENET_SET: + return "PLL_ENET_SET"; + case CCM_ANALOG_PLL_ENET_CLR: + return "PLL_ENET_CLR"; + case CCM_ANALOG_PLL_ENET_TOG: + return "PLL_ENET_TOG"; + case CCM_ANALOG_PFD_480: + return "PFD_480"; + case CCM_ANALOG_PFD_480_SET: + return "PFD_480_SET"; + case CCM_ANALOG_PFD_480_CLR: + return "PFD_480_CLR"; + case CCM_ANALOG_PFD_480_TOG: + return "PFD_480_TOG"; + case CCM_ANALOG_PFD_528: + return "PFD_528"; + case CCM_ANALOG_PFD_528_SET: + return "PFD_528_SET"; + case CCM_ANALOG_PFD_528_CLR: + return "PFD_528_CLR"; + case CCM_ANALOG_PFD_528_TOG: + return "PFD_528_TOG"; + case CCM_ANALOG_MISC0: + return "MISC0"; + case CCM_ANALOG_MISC0_SET: + return "MISC0_SET"; + case CCM_ANALOG_MISC0_CLR: + return "MISC0_CLR"; + case CCM_ANALOG_MISC0_TOG: + return "MISC0_TOG"; + case CCM_ANALOG_MISC2: + return "MISC2"; + case CCM_ANALOG_MISC2_SET: + return "MISC2_SET"; + case CCM_ANALOG_MISC2_CLR: + return "MISC2_CLR"; + case CCM_ANALOG_MISC2_TOG: + return "MISC2_TOG"; + case PMU_REG_1P1: + return "PMU_REG_1P1"; + case PMU_REG_3P0: + return "PMU_REG_3P0"; + case PMU_REG_2P5: + return "PMU_REG_2P5"; + case PMU_REG_CORE: + return "PMU_REG_CORE"; + case PMU_MISC1: + return "PMU_MISC1"; + case PMU_MISC1_SET: + return "PMU_MISC1_SET"; + case PMU_MISC1_CLR: + return "PMU_MISC1_CLR"; + case PMU_MISC1_TOG: + return "PMU_MISC1_TOG"; + case USB_ANALOG_DIGPROG: + return "USB_ANALOG_DIGPROG"; + default: + sprintf(unknown, "%u ?", reg); + return unknown; + } +} + +#define CKIH_FREQ 24000000 /* 24MHz crystal input */ + +static const VMStateDescription vmstate_imx6_ccm = { + .name = TYPE_IMX6_CCM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(ccm, IMX6CCMState, CCM_MAX), + VMSTATE_UINT32_ARRAY(analog, IMX6CCMState, CCM_ANALOG_MAX), + VMSTATE_END_OF_LIST() + }, +}; + +static uint64_t imx6_analog_get_pll2_clk(IMX6CCMState *dev) +{ + uint64_t freq = 24000000; + + if (EXTRACT(dev->analog[CCM_ANALOG_PLL_SYS], DIV_SELECT)) { + freq *= 22; + } else { + freq *= 20; + } + + DPRINTF("freq = %u\n", (uint32_t)freq); + + return freq; +} + +static uint64_t imx6_analog_get_pll2_pfd0_clk(IMX6CCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6_analog_get_pll2_clk(dev) * 18 + / EXTRACT(dev->analog[CCM_ANALOG_PFD_528], PFD0_FRAC); + + DPRINTF("freq = %u\n", (uint32_t)freq); + + return freq; +} + +static uint64_t imx6_analog_get_pll2_pfd2_clk(IMX6CCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6_analog_get_pll2_clk(dev) * 18 + / EXTRACT(dev->analog[CCM_ANALOG_PFD_528], PFD2_FRAC); + + DPRINTF("freq = %u\n", (uint32_t)freq); + + return freq; +} + +static uint64_t imx6_analog_get_periph_clk(IMX6CCMState *dev) +{ + uint64_t freq = 0; + + switch (EXTRACT(dev->ccm[CCM_CBCMR], PRE_PERIPH_CLK_SEL)) { + case 0: + freq = imx6_analog_get_pll2_clk(dev); + break; + case 1: + freq = imx6_analog_get_pll2_pfd2_clk(dev); + break; + case 2: + freq = imx6_analog_get_pll2_pfd0_clk(dev); + break; + case 3: + freq = imx6_analog_get_pll2_pfd2_clk(dev) / 2; + break; + default: + /* We should never get there */ + g_assert_not_reached(); + break; + } + + DPRINTF("freq = %u\n", (uint32_t)freq); + + return freq; +} + +static uint64_t imx6_ccm_get_ahb_clk(IMX6CCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6_analog_get_periph_clk(dev) + / (1 + EXTRACT(dev->ccm[CCM_CBCDR], AHB_PODF)); + + DPRINTF("freq = %u\n", (uint32_t)freq); + + return freq; +} + +static uint64_t imx6_ccm_get_ipg_clk(IMX6CCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6_ccm_get_ahb_clk(dev) + / (1 + EXTRACT(dev->ccm[CCM_CBCDR], IPG_PODF)); + + DPRINTF("freq = %u\n", (uint32_t)freq); + + return freq; +} + +static uint64_t imx6_ccm_get_per_clk(IMX6CCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6_ccm_get_ipg_clk(dev) + / (1 + EXTRACT(dev->ccm[CCM_CSCMR1], PERCLK_PODF)); + + DPRINTF("freq = %u\n", (uint32_t)freq); + + return freq; +} + +static uint32_t imx6_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) +{ + uint32_t freq = 0; + IMX6CCMState *s = IMX6_CCM(dev); + + switch (clock) { + case CLK_NONE: + break; + case CLK_IPG: + freq = imx6_ccm_get_ipg_clk(s); + break; + case CLK_IPG_HIGH: + freq = imx6_ccm_get_per_clk(s); + break; + case CLK_32k: + freq = CKIL_FREQ; + break; + case CLK_HIGH: + freq = 24000000; + break; + case CLK_HIGH_DIV: + freq = 24000000 / 8; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: unsupported clock %d\n", + TYPE_IMX6_CCM, __func__, clock); + break; + } + + DPRINTF("Clock = %d) = %u\n", clock, freq); + + return freq; +} + +static void imx6_ccm_reset(DeviceState *dev) +{ + IMX6CCMState *s = IMX6_CCM(dev); + + DPRINTF("\n"); + + s->ccm[CCM_CCR] = 0x040116FF; + s->ccm[CCM_CCDR] = 0x00000000; + s->ccm[CCM_CSR] = 0x00000010; + s->ccm[CCM_CCSR] = 0x00000100; + s->ccm[CCM_CACRR] = 0x00000000; + s->ccm[CCM_CBCDR] = 0x00018D40; + s->ccm[CCM_CBCMR] = 0x00022324; + s->ccm[CCM_CSCMR1] = 0x00F00000; + s->ccm[CCM_CSCMR2] = 0x02B92F06; + s->ccm[CCM_CSCDR1] = 0x00490B00; + s->ccm[CCM_CS1CDR] = 0x0EC102C1; + s->ccm[CCM_CS2CDR] = 0x000736C1; + s->ccm[CCM_CDCDR] = 0x33F71F92; + s->ccm[CCM_CHSCCDR] = 0x0002A150; + s->ccm[CCM_CSCDR2] = 0x0002A150; + s->ccm[CCM_CSCDR3] = 0x00014841; + s->ccm[CCM_CDHIPR] = 0x00000000; + s->ccm[CCM_CTOR] = 0x00000000; + s->ccm[CCM_CLPCR] = 0x00000079; + s->ccm[CCM_CISR] = 0x00000000; + s->ccm[CCM_CIMR] = 0xFFFFFFFF; + s->ccm[CCM_CCOSR] = 0x000A0001; + s->ccm[CCM_CGPR] = 0x0000FE62; + s->ccm[CCM_CCGR0] = 0xFFFFFFFF; + s->ccm[CCM_CCGR1] = 0xFFFFFFFF; + s->ccm[CCM_CCGR2] = 0xFC3FFFFF; + s->ccm[CCM_CCGR3] = 0xFFFFFFFF; + s->ccm[CCM_CCGR4] = 0xFFFFFFFF; + s->ccm[CCM_CCGR5] = 0xFFFFFFFF; + s->ccm[CCM_CCGR6] = 0xFFFFFFFF; + s->ccm[CCM_CMEOR] = 0xFFFFFFFF; + + s->analog[CCM_ANALOG_PLL_ARM] = 0x00013042; + s->analog[CCM_ANALOG_PLL_USB1] = 0x00012000; + s->analog[CCM_ANALOG_PLL_USB2] = 0x00012000; + s->analog[CCM_ANALOG_PLL_SYS] = 0x00013001; + s->analog[CCM_ANALOG_PLL_SYS_SS] = 0x00000000; + s->analog[CCM_ANALOG_PLL_SYS_NUM] = 0x00000000; + s->analog[CCM_ANALOG_PLL_SYS_DENOM] = 0x00000012; + s->analog[CCM_ANALOG_PLL_AUDIO] = 0x00011006; + s->analog[CCM_ANALOG_PLL_AUDIO_NUM] = 0x05F5E100; + s->analog[CCM_ANALOG_PLL_AUDIO_DENOM] = 0x2964619C; + s->analog[CCM_ANALOG_PLL_VIDEO] = 0x0001100C; + s->analog[CCM_ANALOG_PLL_VIDEO_NUM] = 0x05F5E100; + s->analog[CCM_ANALOG_PLL_VIDEO_DENOM] = 0x10A24447; + s->analog[CCM_ANALOG_PLL_MLB] = 0x00010000; + s->analog[CCM_ANALOG_PLL_ENET] = 0x00011001; + s->analog[CCM_ANALOG_PFD_480] = 0x1311100C; + s->analog[CCM_ANALOG_PFD_528] = 0x1018101B; + + s->analog[PMU_REG_1P1] = 0x00001073; + s->analog[PMU_REG_3P0] = 0x00000F74; + s->analog[PMU_REG_2P5] = 0x00005071; + s->analog[PMU_REG_CORE] = 0x00402010; + s->analog[PMU_MISC0] = 0x04000080; + s->analog[PMU_MISC1] = 0x00000000; + s->analog[PMU_MISC2] = 0x00272727; + + s->analog[USB_ANALOG_USB1_VBUS_DETECT] = 0x00000004; + s->analog[USB_ANALOG_USB1_CHRG_DETECT] = 0x00000000; + s->analog[USB_ANALOG_USB1_VBUS_DETECT_STAT] = 0x00000000; + s->analog[USB_ANALOG_USB1_CHRG_DETECT_STAT] = 0x00000000; + s->analog[USB_ANALOG_USB1_MISC] = 0x00000002; + s->analog[USB_ANALOG_USB2_VBUS_DETECT] = 0x00000004; + s->analog[USB_ANALOG_USB2_CHRG_DETECT] = 0x00000000; + s->analog[USB_ANALOG_USB2_MISC] = 0x00000002; + s->analog[USB_ANALOG_DIGPROG] = 0x00630000; + + /* all PLLs need to be locked */ + s->analog[CCM_ANALOG_PLL_ARM] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_USB1] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_USB2] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_SYS] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_AUDIO] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_VIDEO] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_MLB] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_ENET] |= CCM_ANALOG_PLL_LOCK; +} + +static uint64_t imx6_ccm_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value = 0; + uint32_t index = offset >> 2; + IMX6CCMState *s = (IMX6CCMState *)opaque; + + value = s->ccm[index]; + + DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx6_ccm_reg_name(index), value); + + return (uint64_t)value; +} + +static void imx6_ccm_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + uint32_t index = offset >> 2; + IMX6CCMState *s = (IMX6CCMState *)opaque; + + DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx6_ccm_reg_name(index), + (uint32_t)value); + + /* + * We will do a better implementation later. In particular some bits + * cannot be written to. + */ + s->ccm[index] = (uint32_t)value; +} + +static uint64_t imx6_analog_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value; + uint32_t index = offset >> 2; + IMX6CCMState *s = (IMX6CCMState *)opaque; + + switch (index) { + case CCM_ANALOG_PLL_ARM_SET: + case CCM_ANALOG_PLL_USB1_SET: + case CCM_ANALOG_PLL_USB2_SET: + case CCM_ANALOG_PLL_SYS_SET: + case CCM_ANALOG_PLL_AUDIO_SET: + case CCM_ANALOG_PLL_VIDEO_SET: + case CCM_ANALOG_PLL_MLB_SET: + case CCM_ANALOG_PLL_ENET_SET: + case CCM_ANALOG_PFD_480_SET: + case CCM_ANALOG_PFD_528_SET: + case CCM_ANALOG_MISC0_SET: + case PMU_MISC1_SET: + case CCM_ANALOG_MISC2_SET: + case USB_ANALOG_USB1_VBUS_DETECT_SET: + case USB_ANALOG_USB1_CHRG_DETECT_SET: + case USB_ANALOG_USB1_MISC_SET: + case USB_ANALOG_USB2_VBUS_DETECT_SET: + case USB_ANALOG_USB2_CHRG_DETECT_SET: + case USB_ANALOG_USB2_MISC_SET: + /* + * All REG_NAME_SET register access are in fact targeting the + * the REG_NAME register. + */ + value = s->analog[index - 1]; + break; + case CCM_ANALOG_PLL_ARM_CLR: + case CCM_ANALOG_PLL_USB1_CLR: + case CCM_ANALOG_PLL_USB2_CLR: + case CCM_ANALOG_PLL_SYS_CLR: + case CCM_ANALOG_PLL_AUDIO_CLR: + case CCM_ANALOG_PLL_VIDEO_CLR: + case CCM_ANALOG_PLL_MLB_CLR: + case CCM_ANALOG_PLL_ENET_CLR: + case CCM_ANALOG_PFD_480_CLR: + case CCM_ANALOG_PFD_528_CLR: + case CCM_ANALOG_MISC0_CLR: + case PMU_MISC1_CLR: + case CCM_ANALOG_MISC2_CLR: + case USB_ANALOG_USB1_VBUS_DETECT_CLR: + case USB_ANALOG_USB1_CHRG_DETECT_CLR: + case USB_ANALOG_USB1_MISC_CLR: + case USB_ANALOG_USB2_VBUS_DETECT_CLR: + case USB_ANALOG_USB2_CHRG_DETECT_CLR: + case USB_ANALOG_USB2_MISC_CLR: + /* + * All REG_NAME_CLR register access are in fact targeting the + * the REG_NAME register. + */ + value = s->analog[index - 2]; + break; + case CCM_ANALOG_PLL_ARM_TOG: + case CCM_ANALOG_PLL_USB1_TOG: + case CCM_ANALOG_PLL_USB2_TOG: + case CCM_ANALOG_PLL_SYS_TOG: + case CCM_ANALOG_PLL_AUDIO_TOG: + case CCM_ANALOG_PLL_VIDEO_TOG: + case CCM_ANALOG_PLL_MLB_TOG: + case CCM_ANALOG_PLL_ENET_TOG: + case CCM_ANALOG_PFD_480_TOG: + case CCM_ANALOG_PFD_528_TOG: + case CCM_ANALOG_MISC0_TOG: + case PMU_MISC1_TOG: + case CCM_ANALOG_MISC2_TOG: + case USB_ANALOG_USB1_VBUS_DETECT_TOG: + case USB_ANALOG_USB1_CHRG_DETECT_TOG: + case USB_ANALOG_USB1_MISC_TOG: + case USB_ANALOG_USB2_VBUS_DETECT_TOG: + case USB_ANALOG_USB2_CHRG_DETECT_TOG: + case USB_ANALOG_USB2_MISC_TOG: + /* + * All REG_NAME_TOG register access are in fact targeting the + * the REG_NAME register. + */ + value = s->analog[index - 3]; + break; + default: + value = s->analog[index]; + break; + } + + DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx6_analog_reg_name(index), value); + + return (uint64_t)value; +} + +static void imx6_analog_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + uint32_t index = offset >> 2; + IMX6CCMState *s = (IMX6CCMState *)opaque; + + DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx6_analog_reg_name(index), + (uint32_t)value); + + switch (index) { + case CCM_ANALOG_PLL_ARM_SET: + case CCM_ANALOG_PLL_USB1_SET: + case CCM_ANALOG_PLL_USB2_SET: + case CCM_ANALOG_PLL_SYS_SET: + case CCM_ANALOG_PLL_AUDIO_SET: + case CCM_ANALOG_PLL_VIDEO_SET: + case CCM_ANALOG_PLL_MLB_SET: + case CCM_ANALOG_PLL_ENET_SET: + case CCM_ANALOG_PFD_480_SET: + case CCM_ANALOG_PFD_528_SET: + case CCM_ANALOG_MISC0_SET: + case PMU_MISC1_SET: + case CCM_ANALOG_MISC2_SET: + case USB_ANALOG_USB1_VBUS_DETECT_SET: + case USB_ANALOG_USB1_CHRG_DETECT_SET: + case USB_ANALOG_USB1_MISC_SET: + case USB_ANALOG_USB2_VBUS_DETECT_SET: + case USB_ANALOG_USB2_CHRG_DETECT_SET: + case USB_ANALOG_USB2_MISC_SET: + /* + * All REG_NAME_SET register access are in fact targeting the + * the REG_NAME register. So we change the value of the + * REG_NAME register, setting bits passed in the value. + */ + s->analog[index - 1] |= value; + break; + case CCM_ANALOG_PLL_ARM_CLR: + case CCM_ANALOG_PLL_USB1_CLR: + case CCM_ANALOG_PLL_USB2_CLR: + case CCM_ANALOG_PLL_SYS_CLR: + case CCM_ANALOG_PLL_AUDIO_CLR: + case CCM_ANALOG_PLL_VIDEO_CLR: + case CCM_ANALOG_PLL_MLB_CLR: + case CCM_ANALOG_PLL_ENET_CLR: + case CCM_ANALOG_PFD_480_CLR: + case CCM_ANALOG_PFD_528_CLR: + case CCM_ANALOG_MISC0_CLR: + case PMU_MISC1_CLR: + case CCM_ANALOG_MISC2_CLR: + case USB_ANALOG_USB1_VBUS_DETECT_CLR: + case USB_ANALOG_USB1_CHRG_DETECT_CLR: + case USB_ANALOG_USB1_MISC_CLR: + case USB_ANALOG_USB2_VBUS_DETECT_CLR: + case USB_ANALOG_USB2_CHRG_DETECT_CLR: + case USB_ANALOG_USB2_MISC_CLR: + /* + * All REG_NAME_CLR register access are in fact targeting the + * the REG_NAME register. So we change the value of the + * REG_NAME register, unsetting bits passed in the value. + */ + s->analog[index - 2] &= ~value; + break; + case CCM_ANALOG_PLL_ARM_TOG: + case CCM_ANALOG_PLL_USB1_TOG: + case CCM_ANALOG_PLL_USB2_TOG: + case CCM_ANALOG_PLL_SYS_TOG: + case CCM_ANALOG_PLL_AUDIO_TOG: + case CCM_ANALOG_PLL_VIDEO_TOG: + case CCM_ANALOG_PLL_MLB_TOG: + case CCM_ANALOG_PLL_ENET_TOG: + case CCM_ANALOG_PFD_480_TOG: + case CCM_ANALOG_PFD_528_TOG: + case CCM_ANALOG_MISC0_TOG: + case PMU_MISC1_TOG: + case CCM_ANALOG_MISC2_TOG: + case USB_ANALOG_USB1_VBUS_DETECT_TOG: + case USB_ANALOG_USB1_CHRG_DETECT_TOG: + case USB_ANALOG_USB1_MISC_TOG: + case USB_ANALOG_USB2_VBUS_DETECT_TOG: + case USB_ANALOG_USB2_CHRG_DETECT_TOG: + case USB_ANALOG_USB2_MISC_TOG: + /* + * All REG_NAME_TOG register access are in fact targeting the + * the REG_NAME register. So we change the value of the + * REG_NAME register, toggling bits passed in the value. + */ + s->analog[index - 3] ^= value; + break; + default: + /* + * We will do a better implementation later. In particular some bits + * cannot be written to. + */ + s->analog[index] = value; + break; + } +} + +static const struct MemoryRegionOps imx6_ccm_ops = { + .read = imx6_ccm_read, + .write = imx6_ccm_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static const struct MemoryRegionOps imx6_analog_ops = { + .read = imx6_analog_read, + .write = imx6_analog_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx6_ccm_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMX6CCMState *s = IMX6_CCM(obj); + + /* initialize a container for the all memory range */ + memory_region_init(&s->container, OBJECT(dev), TYPE_IMX6_CCM, 0x5000); + + /* We initialize an IO memory region for the CCM part */ + memory_region_init_io(&s->ioccm, OBJECT(dev), &imx6_ccm_ops, s, + TYPE_IMX6_CCM ".ccm", CCM_MAX * sizeof(uint32_t)); + + /* Add the CCM as a subregion at offset 0 */ + memory_region_add_subregion(&s->container, 0, &s->ioccm); + + /* We initialize an IO memory region for the ANALOG part */ + memory_region_init_io(&s->ioanalog, OBJECT(dev), &imx6_analog_ops, s, + TYPE_IMX6_CCM ".analog", + CCM_ANALOG_MAX * sizeof(uint32_t)); + + /* Add the ANALOG as a subregion at offset 0x4000 */ + memory_region_add_subregion(&s->container, 0x4000, &s->ioanalog); + + sysbus_init_mmio(sd, &s->container); +} + +static void imx6_ccm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IMXCCMClass *ccm = IMX_CCM_CLASS(klass); + + dc->reset = imx6_ccm_reset; + dc->vmsd = &vmstate_imx6_ccm; + dc->desc = "i.MX6 Clock Control Module"; + + ccm->get_clock_frequency = imx6_ccm_get_clock_frequency; +} + +static const TypeInfo imx6_ccm_info = { + .name = TYPE_IMX6_CCM, + .parent = TYPE_IMX_CCM, + .instance_size = sizeof(IMX6CCMState), + .instance_init = imx6_ccm_init, + .class_init = imx6_ccm_class_init, +}; + +static void imx6_ccm_register_types(void) +{ + type_register_static(&imx6_ccm_info); +} + +type_init(imx6_ccm_register_types) diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c new file mode 100644 index 000000000..79f437591 --- /dev/null +++ b/hw/misc/imx6_src.c @@ -0,0 +1,311 @@ +/* + * IMX6 System Reset Controller + * + * Copyright (c) 2015 Jean-Christophe Dubois + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/misc/imx6_src.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "arm-powerctl.h" +#include "hw/core/cpu.h" + +#ifndef DEBUG_IMX6_SRC +#define DEBUG_IMX6_SRC 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX6_SRC) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX6_SRC, \ + __func__, ##args); \ + } \ + } while (0) + +static const char *imx6_src_reg_name(uint32_t reg) +{ + static char unknown[20]; + + switch (reg) { + case SRC_SCR: + return "SRC_SCR"; + case SRC_SBMR1: + return "SRC_SBMR1"; + case SRC_SRSR: + return "SRC_SRSR"; + case SRC_SISR: + return "SRC_SISR"; + case SRC_SIMR: + return "SRC_SIMR"; + case SRC_SBMR2: + return "SRC_SBMR2"; + case SRC_GPR1: + return "SRC_GPR1"; + case SRC_GPR2: + return "SRC_GPR2"; + case SRC_GPR3: + return "SRC_GPR3"; + case SRC_GPR4: + return "SRC_GPR4"; + case SRC_GPR5: + return "SRC_GPR5"; + case SRC_GPR6: + return "SRC_GPR6"; + case SRC_GPR7: + return "SRC_GPR7"; + case SRC_GPR8: + return "SRC_GPR8"; + case SRC_GPR9: + return "SRC_GPR9"; + case SRC_GPR10: + return "SRC_GPR10"; + default: + sprintf(unknown, "%u ?", reg); + return unknown; + } +} + +static const VMStateDescription vmstate_imx6_src = { + .name = TYPE_IMX6_SRC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, IMX6SRCState, SRC_MAX), + VMSTATE_END_OF_LIST() + }, +}; + +static void imx6_src_reset(DeviceState *dev) +{ + IMX6SRCState *s = IMX6_SRC(dev); + + DPRINTF("\n"); + + memset(s->regs, 0, sizeof(s->regs)); + + /* Set reset values */ + s->regs[SRC_SCR] = 0x521; + s->regs[SRC_SRSR] = 0x1; + s->regs[SRC_SIMR] = 0x1F; +} + +static uint64_t imx6_src_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value = 0; + IMX6SRCState *s = (IMX6SRCState *)opaque; + uint32_t index = offset >> 2; + + if (index < SRC_MAX) { + value = s->regs[index]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX6_SRC, __func__, offset); + + } + + DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx6_src_reg_name(index), value); + + return value; +} + + +/* The reset is asynchronous so we need to defer clearing the reset + * bit until the work is completed. + */ + +struct SRCSCRResetInfo { + IMX6SRCState *s; + int reset_bit; +}; + +static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data) +{ + struct SRCSCRResetInfo *ri = data.host_ptr; + IMX6SRCState *s = ri->s; + + assert(qemu_mutex_iothread_locked()); + + s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0); + DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", + imx6_src_reg_name(SRC_SCR), s->regs[SRC_SCR]); + + g_free(ri); +} + +static void imx6_defer_clear_reset_bit(int cpuid, + IMX6SRCState *s, + unsigned long reset_shift) +{ + struct SRCSCRResetInfo *ri; + CPUState *cpu = arm_get_cpu_by_id(cpuid); + + if (!cpu) { + return; + } + + ri = g_malloc(sizeof(struct SRCSCRResetInfo)); + ri->s = s; + ri->reset_bit = reset_shift; + + async_run_on_cpu(cpu, imx6_clear_reset_bit, RUN_ON_CPU_HOST_PTR(ri)); +} + + +static void imx6_src_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + IMX6SRCState *s = (IMX6SRCState *)opaque; + uint32_t index = offset >> 2; + unsigned long change_mask; + unsigned long current_value = value; + + if (index >= SRC_MAX) { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX6_SRC, __func__, offset); + return; + } + + DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx6_src_reg_name(index), + (uint32_t)current_value); + + change_mask = s->regs[index] ^ (uint32_t)current_value; + + switch (index) { + case SRC_SCR: + /* + * On real hardware when the system reset controller starts a + * secondary CPU it runs through some boot ROM code which reads + * the SRC_GPRX registers controlling the start address and branches + * to it. + * Here we are taking a short cut and branching directly to the + * requested address (we don't want to run the boot ROM code inside + * QEMU) + */ + if (EXTRACT(change_mask, CORE3_ENABLE)) { + if (EXTRACT(current_value, CORE3_ENABLE)) { + /* CORE 3 is brought up */ + arm_set_cpu_on(3, s->regs[SRC_GPR7], s->regs[SRC_GPR8], + 3, false); + } else { + /* CORE 3 is shut down */ + arm_set_cpu_off(3); + } + /* We clear the reset bits as the processor changed state */ + imx6_defer_clear_reset_bit(3, s, CORE3_RST_SHIFT); + clear_bit(CORE3_RST_SHIFT, &change_mask); + } + if (EXTRACT(change_mask, CORE2_ENABLE)) { + if (EXTRACT(current_value, CORE2_ENABLE)) { + /* CORE 2 is brought up */ + arm_set_cpu_on(2, s->regs[SRC_GPR5], s->regs[SRC_GPR6], + 3, false); + } else { + /* CORE 2 is shut down */ + arm_set_cpu_off(2); + } + /* We clear the reset bits as the processor changed state */ + imx6_defer_clear_reset_bit(2, s, CORE2_RST_SHIFT); + clear_bit(CORE2_RST_SHIFT, &change_mask); + } + if (EXTRACT(change_mask, CORE1_ENABLE)) { + if (EXTRACT(current_value, CORE1_ENABLE)) { + /* CORE 1 is brought up */ + arm_set_cpu_on(1, s->regs[SRC_GPR3], s->regs[SRC_GPR4], + 3, false); + } else { + /* CORE 1 is shut down */ + arm_set_cpu_off(1); + } + /* We clear the reset bits as the processor changed state */ + imx6_defer_clear_reset_bit(1, s, CORE1_RST_SHIFT); + clear_bit(CORE1_RST_SHIFT, &change_mask); + } + if (EXTRACT(change_mask, CORE0_RST)) { + arm_reset_cpu(0); + imx6_defer_clear_reset_bit(0, s, CORE0_RST_SHIFT); + } + if (EXTRACT(change_mask, CORE1_RST)) { + arm_reset_cpu(1); + imx6_defer_clear_reset_bit(1, s, CORE1_RST_SHIFT); + } + if (EXTRACT(change_mask, CORE2_RST)) { + arm_reset_cpu(2); + imx6_defer_clear_reset_bit(2, s, CORE2_RST_SHIFT); + } + if (EXTRACT(change_mask, CORE3_RST)) { + arm_reset_cpu(3); + imx6_defer_clear_reset_bit(3, s, CORE3_RST_SHIFT); + } + if (EXTRACT(change_mask, SW_IPU2_RST)) { + /* We pretend the IPU2 is reset */ + clear_bit(SW_IPU2_RST_SHIFT, ¤t_value); + } + if (EXTRACT(change_mask, SW_IPU1_RST)) { + /* We pretend the IPU1 is reset */ + clear_bit(SW_IPU1_RST_SHIFT, ¤t_value); + } + s->regs[index] = current_value; + break; + default: + s->regs[index] = current_value; + break; + } +} + +static const struct MemoryRegionOps imx6_src_ops = { + .read = imx6_src_read, + .write = imx6_src_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx6_src_realize(DeviceState *dev, Error **errp) +{ + IMX6SRCState *s = IMX6_SRC(dev); + + memory_region_init_io(&s->iomem, OBJECT(dev), &imx6_src_ops, s, + TYPE_IMX6_SRC, 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); +} + +static void imx6_src_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = imx6_src_realize; + dc->reset = imx6_src_reset; + dc->vmsd = &vmstate_imx6_src; + dc->desc = "i.MX6 System Reset Controller"; +} + +static const TypeInfo imx6_src_info = { + .name = TYPE_IMX6_SRC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMX6SRCState), + .class_init = imx6_src_class_init, +}; + +static void imx6_src_register_types(void) +{ + type_register_static(&imx6_src_info); +} + +type_init(imx6_src_register_types) diff --git a/hw/misc/imx6ul_ccm.c b/hw/misc/imx6ul_ccm.c new file mode 100644 index 000000000..a65d03145 --- /dev/null +++ b/hw/misc/imx6ul_ccm.c @@ -0,0 +1,938 @@ +/* + * IMX6UL Clock Control Module + * + * Copyright (c) 2018 Jean-Christophe Dubois + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * To get the timer frequencies right, we need to emulate at least part of + * the CCM. + */ + +#include "qemu/osdep.h" +#include "hw/registerfields.h" +#include "migration/vmstate.h" +#include "hw/misc/imx6ul_ccm.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#include "trace.h" + +static const uint32_t ccm_mask[CCM_MAX] = { + [CCM_CCR] = 0xf01fef80, + [CCM_CCDR] = 0xfffeffff, + [CCM_CSR] = 0xffffffff, + [CCM_CCSR] = 0xfffffef2, + [CCM_CACRR] = 0xfffffff8, + [CCM_CBCDR] = 0xc1f8e000, + [CCM_CBCMR] = 0xfc03cfff, + [CCM_CSCMR1] = 0x80700000, + [CCM_CSCMR2] = 0xe01ff003, + [CCM_CSCDR1] = 0xfe00c780, + [CCM_CS1CDR] = 0xfe00fe00, + [CCM_CS2CDR] = 0xf8007000, + [CCM_CDCDR] = 0xf00fffff, + [CCM_CHSCCDR] = 0xfffc01ff, + [CCM_CSCDR2] = 0xfe0001ff, + [CCM_CSCDR3] = 0xffffc1ff, + [CCM_CDHIPR] = 0xffffffff, + [CCM_CTOR] = 0x00000000, + [CCM_CLPCR] = 0xf39ff01c, + [CCM_CISR] = 0xfb85ffbe, + [CCM_CIMR] = 0xfb85ffbf, + [CCM_CCOSR] = 0xfe00fe00, + [CCM_CGPR] = 0xfffc3fea, + [CCM_CCGR0] = 0x00000000, + [CCM_CCGR1] = 0x00000000, + [CCM_CCGR2] = 0x00000000, + [CCM_CCGR3] = 0x00000000, + [CCM_CCGR4] = 0x00000000, + [CCM_CCGR5] = 0x00000000, + [CCM_CCGR6] = 0x00000000, + [CCM_CMEOR] = 0xafffff1f, +}; + +static const uint32_t analog_mask[CCM_ANALOG_MAX] = { + [CCM_ANALOG_PLL_ARM] = 0xfff60f80, + [CCM_ANALOG_PLL_USB1] = 0xfffe0fbc, + [CCM_ANALOG_PLL_USB2] = 0xfffe0fbc, + [CCM_ANALOG_PLL_SYS] = 0xfffa0ffe, + [CCM_ANALOG_PLL_SYS_SS] = 0x00000000, + [CCM_ANALOG_PLL_SYS_NUM] = 0xc0000000, + [CCM_ANALOG_PLL_SYS_DENOM] = 0xc0000000, + [CCM_ANALOG_PLL_AUDIO] = 0xffe20f80, + [CCM_ANALOG_PLL_AUDIO_NUM] = 0xc0000000, + [CCM_ANALOG_PLL_AUDIO_DENOM] = 0xc0000000, + [CCM_ANALOG_PLL_VIDEO] = 0xffe20f80, + [CCM_ANALOG_PLL_VIDEO_NUM] = 0xc0000000, + [CCM_ANALOG_PLL_VIDEO_DENOM] = 0xc0000000, + [CCM_ANALOG_PLL_ENET] = 0xffc20ff0, + [CCM_ANALOG_PFD_480] = 0x40404040, + [CCM_ANALOG_PFD_528] = 0x40404040, + [PMU_MISC0] = 0x01fe8306, + [PMU_MISC1] = 0x07fcede0, + [PMU_MISC2] = 0x005f5f5f, +}; + +static const char *imx6ul_ccm_reg_name(uint32_t reg) +{ + static char unknown[20]; + + switch (reg) { + case CCM_CCR: + return "CCR"; + case CCM_CCDR: + return "CCDR"; + case CCM_CSR: + return "CSR"; + case CCM_CCSR: + return "CCSR"; + case CCM_CACRR: + return "CACRR"; + case CCM_CBCDR: + return "CBCDR"; + case CCM_CBCMR: + return "CBCMR"; + case CCM_CSCMR1: + return "CSCMR1"; + case CCM_CSCMR2: + return "CSCMR2"; + case CCM_CSCDR1: + return "CSCDR1"; + case CCM_CS1CDR: + return "CS1CDR"; + case CCM_CS2CDR: + return "CS2CDR"; + case CCM_CDCDR: + return "CDCDR"; + case CCM_CHSCCDR: + return "CHSCCDR"; + case CCM_CSCDR2: + return "CSCDR2"; + case CCM_CSCDR3: + return "CSCDR3"; + case CCM_CDHIPR: + return "CDHIPR"; + case CCM_CTOR: + return "CTOR"; + case CCM_CLPCR: + return "CLPCR"; + case CCM_CISR: + return "CISR"; + case CCM_CIMR: + return "CIMR"; + case CCM_CCOSR: + return "CCOSR"; + case CCM_CGPR: + return "CGPR"; + case CCM_CCGR0: + return "CCGR0"; + case CCM_CCGR1: + return "CCGR1"; + case CCM_CCGR2: + return "CCGR2"; + case CCM_CCGR3: + return "CCGR3"; + case CCM_CCGR4: + return "CCGR4"; + case CCM_CCGR5: + return "CCGR5"; + case CCM_CCGR6: + return "CCGR6"; + case CCM_CMEOR: + return "CMEOR"; + default: + sprintf(unknown, "%u ?", reg); + return unknown; + } +} + +static const char *imx6ul_analog_reg_name(uint32_t reg) +{ + static char unknown[20]; + + switch (reg) { + case CCM_ANALOG_PLL_ARM: + return "PLL_ARM"; + case CCM_ANALOG_PLL_ARM_SET: + return "PLL_ARM_SET"; + case CCM_ANALOG_PLL_ARM_CLR: + return "PLL_ARM_CLR"; + case CCM_ANALOG_PLL_ARM_TOG: + return "PLL_ARM_TOG"; + case CCM_ANALOG_PLL_USB1: + return "PLL_USB1"; + case CCM_ANALOG_PLL_USB1_SET: + return "PLL_USB1_SET"; + case CCM_ANALOG_PLL_USB1_CLR: + return "PLL_USB1_CLR"; + case CCM_ANALOG_PLL_USB1_TOG: + return "PLL_USB1_TOG"; + case CCM_ANALOG_PLL_USB2: + return "PLL_USB2"; + case CCM_ANALOG_PLL_USB2_SET: + return "PLL_USB2_SET"; + case CCM_ANALOG_PLL_USB2_CLR: + return "PLL_USB2_CLR"; + case CCM_ANALOG_PLL_USB2_TOG: + return "PLL_USB2_TOG"; + case CCM_ANALOG_PLL_SYS: + return "PLL_SYS"; + case CCM_ANALOG_PLL_SYS_SET: + return "PLL_SYS_SET"; + case CCM_ANALOG_PLL_SYS_CLR: + return "PLL_SYS_CLR"; + case CCM_ANALOG_PLL_SYS_TOG: + return "PLL_SYS_TOG"; + case CCM_ANALOG_PLL_SYS_SS: + return "PLL_SYS_SS"; + case CCM_ANALOG_PLL_SYS_NUM: + return "PLL_SYS_NUM"; + case CCM_ANALOG_PLL_SYS_DENOM: + return "PLL_SYS_DENOM"; + case CCM_ANALOG_PLL_AUDIO: + return "PLL_AUDIO"; + case CCM_ANALOG_PLL_AUDIO_SET: + return "PLL_AUDIO_SET"; + case CCM_ANALOG_PLL_AUDIO_CLR: + return "PLL_AUDIO_CLR"; + case CCM_ANALOG_PLL_AUDIO_TOG: + return "PLL_AUDIO_TOG"; + case CCM_ANALOG_PLL_AUDIO_NUM: + return "PLL_AUDIO_NUM"; + case CCM_ANALOG_PLL_AUDIO_DENOM: + return "PLL_AUDIO_DENOM"; + case CCM_ANALOG_PLL_VIDEO: + return "PLL_VIDEO"; + case CCM_ANALOG_PLL_VIDEO_SET: + return "PLL_VIDEO_SET"; + case CCM_ANALOG_PLL_VIDEO_CLR: + return "PLL_VIDEO_CLR"; + case CCM_ANALOG_PLL_VIDEO_TOG: + return "PLL_VIDEO_TOG"; + case CCM_ANALOG_PLL_VIDEO_NUM: + return "PLL_VIDEO_NUM"; + case CCM_ANALOG_PLL_VIDEO_DENOM: + return "PLL_VIDEO_DENOM"; + case CCM_ANALOG_PLL_ENET: + return "PLL_ENET"; + case CCM_ANALOG_PLL_ENET_SET: + return "PLL_ENET_SET"; + case CCM_ANALOG_PLL_ENET_CLR: + return "PLL_ENET_CLR"; + case CCM_ANALOG_PLL_ENET_TOG: + return "PLL_ENET_TOG"; + case CCM_ANALOG_PFD_480: + return "PFD_480"; + case CCM_ANALOG_PFD_480_SET: + return "PFD_480_SET"; + case CCM_ANALOG_PFD_480_CLR: + return "PFD_480_CLR"; + case CCM_ANALOG_PFD_480_TOG: + return "PFD_480_TOG"; + case CCM_ANALOG_PFD_528: + return "PFD_528"; + case CCM_ANALOG_PFD_528_SET: + return "PFD_528_SET"; + case CCM_ANALOG_PFD_528_CLR: + return "PFD_528_CLR"; + case CCM_ANALOG_PFD_528_TOG: + return "PFD_528_TOG"; + case CCM_ANALOG_MISC0: + return "MISC0"; + case CCM_ANALOG_MISC0_SET: + return "MISC0_SET"; + case CCM_ANALOG_MISC0_CLR: + return "MISC0_CLR"; + case CCM_ANALOG_MISC0_TOG: + return "MISC0_TOG"; + case CCM_ANALOG_MISC2: + return "MISC2"; + case CCM_ANALOG_MISC2_SET: + return "MISC2_SET"; + case CCM_ANALOG_MISC2_CLR: + return "MISC2_CLR"; + case CCM_ANALOG_MISC2_TOG: + return "MISC2_TOG"; + case PMU_REG_1P1: + return "PMU_REG_1P1"; + case PMU_REG_3P0: + return "PMU_REG_3P0"; + case PMU_REG_2P5: + return "PMU_REG_2P5"; + case PMU_REG_CORE: + return "PMU_REG_CORE"; + case PMU_MISC1: + return "PMU_MISC1"; + case PMU_MISC1_SET: + return "PMU_MISC1_SET"; + case PMU_MISC1_CLR: + return "PMU_MISC1_CLR"; + case PMU_MISC1_TOG: + return "PMU_MISC1_TOG"; + case USB_ANALOG_DIGPROG: + return "USB_ANALOG_DIGPROG"; + default: + sprintf(unknown, "%u ?", reg); + return unknown; + } +} + +#define CKIH_FREQ 24000000 /* 24MHz crystal input */ + +static const VMStateDescription vmstate_imx6ul_ccm = { + .name = TYPE_IMX6UL_CCM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(ccm, IMX6ULCCMState, CCM_MAX), + VMSTATE_UINT32_ARRAY(analog, IMX6ULCCMState, CCM_ANALOG_MAX), + VMSTATE_END_OF_LIST() + }, +}; + +static uint64_t imx6ul_analog_get_osc_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = CKIH_FREQ; + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_analog_get_pll2_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = imx6ul_analog_get_osc_clk(dev); + + if (FIELD_EX32(dev->analog[CCM_ANALOG_PLL_SYS], + ANALOG_PLL_SYS, DIV_SELECT)) { + freq *= 22; + } else { + freq *= 20; + } + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_analog_get_pll3_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = imx6ul_analog_get_osc_clk(dev) * 20; + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_analog_get_pll2_pfd0_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_analog_get_pll2_clk(dev) * 18 + / FIELD_EX32(dev->analog[CCM_ANALOG_PFD_528], + ANALOG_PFD_528, PFD0_FRAC); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_analog_get_pll2_pfd2_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_analog_get_pll2_clk(dev) * 18 + / FIELD_EX32(dev->analog[CCM_ANALOG_PFD_528], + ANALOG_PFD_528, PFD2_FRAC); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_analog_pll2_bypass_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_periph_clk2_sel_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + switch (FIELD_EX32(dev->ccm[CCM_CBCMR], CBCMR, PERIPH_CLK2_SEL)) { + case 0: + freq = imx6ul_analog_get_pll3_clk(dev); + break; + case 1: + freq = imx6ul_analog_get_osc_clk(dev); + break; + case 2: + freq = imx6ul_analog_pll2_bypass_clk(dev); + break; + case 3: + /* We should never get there as 3 is a reserved value */ + qemu_log_mask(LOG_GUEST_ERROR, + "[%s]%s: unsupported PERIPH_CLK2_SEL value 3\n", + TYPE_IMX6UL_CCM, __func__); + /* freq is set to 0 as we don't know what it should be */ + break; + default: + g_assert_not_reached(); + } + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_periph_clk_sel_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + switch (FIELD_EX32(dev->ccm[CCM_CBCMR], CBCMR, PRE_PERIPH_CLK_SEL)) { + case 0: + freq = imx6ul_analog_get_pll2_clk(dev); + break; + case 1: + freq = imx6ul_analog_get_pll2_pfd2_clk(dev); + break; + case 2: + freq = imx6ul_analog_get_pll2_pfd0_clk(dev); + break; + case 3: + freq = imx6ul_analog_get_pll2_pfd2_clk(dev) / 2; + break; + default: + g_assert_not_reached(); + } + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_periph_clk2_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_ccm_get_periph_clk2_sel_clk(dev) + / (1 + FIELD_EX32(dev->ccm[CCM_CBCDR], CBCDR, PERIPH_CLK2_PODF)); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_periph_sel_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + switch (FIELD_EX32(dev->ccm[CCM_CBCDR], CBCDR, PERIPH_CLK_SEL)) { + case 0: + freq = imx6ul_ccm_get_periph_clk_sel_clk(dev); + break; + case 1: + freq = imx6ul_ccm_get_periph_clk2_clk(dev); + break; + default: + g_assert_not_reached(); + } + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_ahb_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_ccm_get_periph_sel_clk(dev) + / (1 + FIELD_EX32(dev->ccm[CCM_CBCDR], CBCDR, AHB_PODF)); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_ipg_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_ccm_get_ahb_clk(dev) + / (1 + FIELD_EX32(dev->ccm[CCM_CBCDR], CBCDR, IPG_PODF)); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_per_sel_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + switch (FIELD_EX32(dev->ccm[CCM_CSCMR1], CSCMR1, PERCLK_CLK_SEL)) { + case 0: + freq = imx6ul_ccm_get_ipg_clk(dev); + break; + case 1: + freq = imx6ul_analog_get_osc_clk(dev); + break; + default: + g_assert_not_reached(); + } + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint64_t imx6ul_ccm_get_per_clk(IMX6ULCCMState *dev) +{ + uint64_t freq = 0; + + freq = imx6ul_ccm_get_per_sel_clk(dev) + / (1 + FIELD_EX32(dev->ccm[CCM_CSCMR1], CSCMR1, PERCLK_PODF)); + + trace_ccm_freq((uint32_t)freq); + + return freq; +} + +static uint32_t imx6ul_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) +{ + uint32_t freq = 0; + IMX6ULCCMState *s = IMX6UL_CCM(dev); + + switch (clock) { + case CLK_NONE: + break; + case CLK_IPG: + freq = imx6ul_ccm_get_ipg_clk(s); + break; + case CLK_IPG_HIGH: + freq = imx6ul_ccm_get_per_clk(s); + break; + case CLK_32k: + freq = CKIL_FREQ; + break; + case CLK_HIGH: + freq = CKIH_FREQ; + break; + case CLK_HIGH_DIV: + freq = CKIH_FREQ / 8; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: unsupported clock %d\n", + TYPE_IMX6UL_CCM, __func__, clock); + break; + } + + trace_ccm_clock_freq(clock, freq); + + return freq; +} + +static void imx6ul_ccm_reset(DeviceState *dev) +{ + IMX6ULCCMState *s = IMX6UL_CCM(dev); + + trace_ccm_entry(); + + s->ccm[CCM_CCR] = 0x0401167F; + s->ccm[CCM_CCDR] = 0x00000000; + s->ccm[CCM_CSR] = 0x00000010; + s->ccm[CCM_CCSR] = 0x00000100; + s->ccm[CCM_CACRR] = 0x00000000; + s->ccm[CCM_CBCDR] = 0x00018D00; + s->ccm[CCM_CBCMR] = 0x24860324; + s->ccm[CCM_CSCMR1] = 0x04900080; + s->ccm[CCM_CSCMR2] = 0x03192F06; + s->ccm[CCM_CSCDR1] = 0x00490B00; + s->ccm[CCM_CS1CDR] = 0x0EC102C1; + s->ccm[CCM_CS2CDR] = 0x000336C1; + s->ccm[CCM_CDCDR] = 0x33F71F92; + s->ccm[CCM_CHSCCDR] = 0x000248A4; + s->ccm[CCM_CSCDR2] = 0x00029B48; + s->ccm[CCM_CSCDR3] = 0x00014841; + s->ccm[CCM_CDHIPR] = 0x00000000; + s->ccm[CCM_CTOR] = 0x00000000; + s->ccm[CCM_CLPCR] = 0x00000079; + s->ccm[CCM_CISR] = 0x00000000; + s->ccm[CCM_CIMR] = 0xFFFFFFFF; + s->ccm[CCM_CCOSR] = 0x000A0001; + s->ccm[CCM_CGPR] = 0x0000FE62; + s->ccm[CCM_CCGR0] = 0xFFFFFFFF; + s->ccm[CCM_CCGR1] = 0xFFFFFFFF; + s->ccm[CCM_CCGR2] = 0xFC3FFFFF; + s->ccm[CCM_CCGR3] = 0xFFFFFFFF; + s->ccm[CCM_CCGR4] = 0xFFFFFFFF; + s->ccm[CCM_CCGR5] = 0xFFFFFFFF; + s->ccm[CCM_CCGR6] = 0xFFFFFFFF; + s->ccm[CCM_CMEOR] = 0xFFFFFFFF; + + s->analog[CCM_ANALOG_PLL_ARM] = 0x00013063; + s->analog[CCM_ANALOG_PLL_USB1] = 0x00012000; + s->analog[CCM_ANALOG_PLL_USB2] = 0x00012000; + s->analog[CCM_ANALOG_PLL_SYS] = 0x00013001; + s->analog[CCM_ANALOG_PLL_SYS_SS] = 0x00000000; + s->analog[CCM_ANALOG_PLL_SYS_NUM] = 0x00000000; + s->analog[CCM_ANALOG_PLL_SYS_DENOM] = 0x00000012; + s->analog[CCM_ANALOG_PLL_AUDIO] = 0x00011006; + s->analog[CCM_ANALOG_PLL_AUDIO_NUM] = 0x05F5E100; + s->analog[CCM_ANALOG_PLL_AUDIO_DENOM] = 0x2964619C; + s->analog[CCM_ANALOG_PLL_VIDEO] = 0x0001100C; + s->analog[CCM_ANALOG_PLL_VIDEO_NUM] = 0x05F5E100; + s->analog[CCM_ANALOG_PLL_VIDEO_DENOM] = 0x10A24447; + s->analog[CCM_ANALOG_PLL_ENET] = 0x00011001; + s->analog[CCM_ANALOG_PFD_480] = 0x1311100C; + s->analog[CCM_ANALOG_PFD_528] = 0x1018101B; + + s->analog[PMU_REG_1P1] = 0x00001073; + s->analog[PMU_REG_3P0] = 0x00000F74; + s->analog[PMU_REG_2P5] = 0x00001073; + s->analog[PMU_REG_CORE] = 0x00482012; + s->analog[PMU_MISC0] = 0x04000000; + s->analog[PMU_MISC1] = 0x00000000; + s->analog[PMU_MISC2] = 0x00272727; + s->analog[PMU_LOWPWR_CTRL] = 0x00004009; + + s->analog[USB_ANALOG_USB1_VBUS_DETECT] = 0x01000004; + s->analog[USB_ANALOG_USB1_CHRG_DETECT] = 0x00000000; + s->analog[USB_ANALOG_USB1_VBUS_DETECT_STAT] = 0x00000000; + s->analog[USB_ANALOG_USB1_CHRG_DETECT_STAT] = 0x00000000; + s->analog[USB_ANALOG_USB1_MISC] = 0x00000002; + s->analog[USB_ANALOG_USB2_VBUS_DETECT] = 0x01000004; + s->analog[USB_ANALOG_USB2_CHRG_DETECT] = 0x00000000; + s->analog[USB_ANALOG_USB2_MISC] = 0x00000002; + s->analog[USB_ANALOG_DIGPROG] = 0x00640000; + + /* all PLLs need to be locked */ + s->analog[CCM_ANALOG_PLL_ARM] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_USB1] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_USB2] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_SYS] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_AUDIO] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_VIDEO] |= CCM_ANALOG_PLL_LOCK; + s->analog[CCM_ANALOG_PLL_ENET] |= CCM_ANALOG_PLL_LOCK; + + s->analog[TEMPMON_TEMPSENSE0] = 0x00000001; + s->analog[TEMPMON_TEMPSENSE1] = 0x00000001; + s->analog[TEMPMON_TEMPSENSE2] = 0x00000000; +} + +static uint64_t imx6ul_ccm_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value = 0; + uint32_t index = offset >> 2; + IMX6ULCCMState *s = (IMX6ULCCMState *)opaque; + + assert(index < CCM_MAX); + + value = s->ccm[index]; + + trace_ccm_read_reg(imx6ul_ccm_reg_name(index), (uint32_t)value); + + return (uint64_t)value; +} + +static void imx6ul_ccm_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + uint32_t index = offset >> 2; + IMX6ULCCMState *s = (IMX6ULCCMState *)opaque; + + assert(index < CCM_MAX); + + trace_ccm_write_reg(imx6ul_ccm_reg_name(index), (uint32_t)value); + + s->ccm[index] = (s->ccm[index] & ccm_mask[index]) | + ((uint32_t)value & ~ccm_mask[index]); +} + +static uint64_t imx6ul_analog_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value; + uint32_t index = offset >> 2; + IMX6ULCCMState *s = (IMX6ULCCMState *)opaque; + + assert(index < CCM_ANALOG_MAX); + + switch (index) { + case CCM_ANALOG_PLL_ARM_SET: + case CCM_ANALOG_PLL_USB1_SET: + case CCM_ANALOG_PLL_USB2_SET: + case CCM_ANALOG_PLL_SYS_SET: + case CCM_ANALOG_PLL_AUDIO_SET: + case CCM_ANALOG_PLL_VIDEO_SET: + case CCM_ANALOG_PLL_ENET_SET: + case CCM_ANALOG_PFD_480_SET: + case CCM_ANALOG_PFD_528_SET: + case CCM_ANALOG_MISC0_SET: + case PMU_MISC1_SET: + case CCM_ANALOG_MISC2_SET: + case USB_ANALOG_USB1_VBUS_DETECT_SET: + case USB_ANALOG_USB1_CHRG_DETECT_SET: + case USB_ANALOG_USB1_MISC_SET: + case USB_ANALOG_USB2_VBUS_DETECT_SET: + case USB_ANALOG_USB2_CHRG_DETECT_SET: + case USB_ANALOG_USB2_MISC_SET: + case TEMPMON_TEMPSENSE0_SET: + case TEMPMON_TEMPSENSE1_SET: + case TEMPMON_TEMPSENSE2_SET: + /* + * All REG_NAME_SET register access are in fact targeting + * the REG_NAME register. + */ + value = s->analog[index - 1]; + break; + case CCM_ANALOG_PLL_ARM_CLR: + case CCM_ANALOG_PLL_USB1_CLR: + case CCM_ANALOG_PLL_USB2_CLR: + case CCM_ANALOG_PLL_SYS_CLR: + case CCM_ANALOG_PLL_AUDIO_CLR: + case CCM_ANALOG_PLL_VIDEO_CLR: + case CCM_ANALOG_PLL_ENET_CLR: + case CCM_ANALOG_PFD_480_CLR: + case CCM_ANALOG_PFD_528_CLR: + case CCM_ANALOG_MISC0_CLR: + case PMU_MISC1_CLR: + case CCM_ANALOG_MISC2_CLR: + case USB_ANALOG_USB1_VBUS_DETECT_CLR: + case USB_ANALOG_USB1_CHRG_DETECT_CLR: + case USB_ANALOG_USB1_MISC_CLR: + case USB_ANALOG_USB2_VBUS_DETECT_CLR: + case USB_ANALOG_USB2_CHRG_DETECT_CLR: + case USB_ANALOG_USB2_MISC_CLR: + case TEMPMON_TEMPSENSE0_CLR: + case TEMPMON_TEMPSENSE1_CLR: + case TEMPMON_TEMPSENSE2_CLR: + /* + * All REG_NAME_CLR register access are in fact targeting + * the REG_NAME register. + */ + value = s->analog[index - 2]; + break; + case CCM_ANALOG_PLL_ARM_TOG: + case CCM_ANALOG_PLL_USB1_TOG: + case CCM_ANALOG_PLL_USB2_TOG: + case CCM_ANALOG_PLL_SYS_TOG: + case CCM_ANALOG_PLL_AUDIO_TOG: + case CCM_ANALOG_PLL_VIDEO_TOG: + case CCM_ANALOG_PLL_ENET_TOG: + case CCM_ANALOG_PFD_480_TOG: + case CCM_ANALOG_PFD_528_TOG: + case CCM_ANALOG_MISC0_TOG: + case PMU_MISC1_TOG: + case CCM_ANALOG_MISC2_TOG: + case USB_ANALOG_USB1_VBUS_DETECT_TOG: + case USB_ANALOG_USB1_CHRG_DETECT_TOG: + case USB_ANALOG_USB1_MISC_TOG: + case USB_ANALOG_USB2_VBUS_DETECT_TOG: + case USB_ANALOG_USB2_CHRG_DETECT_TOG: + case USB_ANALOG_USB2_MISC_TOG: + case TEMPMON_TEMPSENSE0_TOG: + case TEMPMON_TEMPSENSE1_TOG: + case TEMPMON_TEMPSENSE2_TOG: + /* + * All REG_NAME_TOG register access are in fact targeting + * the REG_NAME register. + */ + value = s->analog[index - 3]; + break; + default: + value = s->analog[index]; + break; + } + + trace_ccm_read_reg(imx6ul_analog_reg_name(index), (uint32_t)value); + + return (uint64_t)value; +} + +static void imx6ul_analog_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + uint32_t index = offset >> 2; + IMX6ULCCMState *s = (IMX6ULCCMState *)opaque; + + assert(index < CCM_ANALOG_MAX); + + trace_ccm_write_reg(imx6ul_analog_reg_name(index), (uint32_t)value); + + switch (index) { + case CCM_ANALOG_PLL_ARM_SET: + case CCM_ANALOG_PLL_USB1_SET: + case CCM_ANALOG_PLL_USB2_SET: + case CCM_ANALOG_PLL_SYS_SET: + case CCM_ANALOG_PLL_AUDIO_SET: + case CCM_ANALOG_PLL_VIDEO_SET: + case CCM_ANALOG_PLL_ENET_SET: + case CCM_ANALOG_PFD_480_SET: + case CCM_ANALOG_PFD_528_SET: + case CCM_ANALOG_MISC0_SET: + case PMU_MISC1_SET: + case CCM_ANALOG_MISC2_SET: + case USB_ANALOG_USB1_VBUS_DETECT_SET: + case USB_ANALOG_USB1_CHRG_DETECT_SET: + case USB_ANALOG_USB1_MISC_SET: + case USB_ANALOG_USB2_VBUS_DETECT_SET: + case USB_ANALOG_USB2_CHRG_DETECT_SET: + case USB_ANALOG_USB2_MISC_SET: + /* + * All REG_NAME_SET register access are in fact targeting + * the REG_NAME register. So we change the value of the + * REG_NAME register, setting bits passed in the value. + */ + s->analog[index - 1] |= (value & ~analog_mask[index - 1]); + break; + case CCM_ANALOG_PLL_ARM_CLR: + case CCM_ANALOG_PLL_USB1_CLR: + case CCM_ANALOG_PLL_USB2_CLR: + case CCM_ANALOG_PLL_SYS_CLR: + case CCM_ANALOG_PLL_AUDIO_CLR: + case CCM_ANALOG_PLL_VIDEO_CLR: + case CCM_ANALOG_PLL_ENET_CLR: + case CCM_ANALOG_PFD_480_CLR: + case CCM_ANALOG_PFD_528_CLR: + case CCM_ANALOG_MISC0_CLR: + case PMU_MISC1_CLR: + case CCM_ANALOG_MISC2_CLR: + case USB_ANALOG_USB1_VBUS_DETECT_CLR: + case USB_ANALOG_USB1_CHRG_DETECT_CLR: + case USB_ANALOG_USB1_MISC_CLR: + case USB_ANALOG_USB2_VBUS_DETECT_CLR: + case USB_ANALOG_USB2_CHRG_DETECT_CLR: + case USB_ANALOG_USB2_MISC_CLR: + /* + * All REG_NAME_CLR register access are in fact targeting + * the REG_NAME register. So we change the value of the + * REG_NAME register, unsetting bits passed in the value. + */ + s->analog[index - 2] &= ~(value & ~analog_mask[index - 2]); + break; + case CCM_ANALOG_PLL_ARM_TOG: + case CCM_ANALOG_PLL_USB1_TOG: + case CCM_ANALOG_PLL_USB2_TOG: + case CCM_ANALOG_PLL_SYS_TOG: + case CCM_ANALOG_PLL_AUDIO_TOG: + case CCM_ANALOG_PLL_VIDEO_TOG: + case CCM_ANALOG_PLL_ENET_TOG: + case CCM_ANALOG_PFD_480_TOG: + case CCM_ANALOG_PFD_528_TOG: + case CCM_ANALOG_MISC0_TOG: + case PMU_MISC1_TOG: + case CCM_ANALOG_MISC2_TOG: + case USB_ANALOG_USB1_VBUS_DETECT_TOG: + case USB_ANALOG_USB1_CHRG_DETECT_TOG: + case USB_ANALOG_USB1_MISC_TOG: + case USB_ANALOG_USB2_VBUS_DETECT_TOG: + case USB_ANALOG_USB2_CHRG_DETECT_TOG: + case USB_ANALOG_USB2_MISC_TOG: + /* + * All REG_NAME_TOG register access are in fact targeting + * the REG_NAME register. So we change the value of the + * REG_NAME register, toggling bits passed in the value. + */ + s->analog[index - 3] ^= (value & ~analog_mask[index - 3]); + break; + default: + s->analog[index] = (s->analog[index] & analog_mask[index]) | + (value & ~analog_mask[index]); + break; + } +} + +static const struct MemoryRegionOps imx6ul_ccm_ops = { + .read = imx6ul_ccm_read, + .write = imx6ul_ccm_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static const struct MemoryRegionOps imx6ul_analog_ops = { + .read = imx6ul_analog_read, + .write = imx6ul_analog_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx6ul_ccm_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMX6ULCCMState *s = IMX6UL_CCM(obj); + + /* initialize a container for the all memory range */ + memory_region_init(&s->container, OBJECT(dev), TYPE_IMX6UL_CCM, 0x8000); + + /* We initialize an IO memory region for the CCM part */ + memory_region_init_io(&s->ioccm, OBJECT(dev), &imx6ul_ccm_ops, s, + TYPE_IMX6UL_CCM ".ccm", CCM_MAX * sizeof(uint32_t)); + + /* Add the CCM as a subregion at offset 0 */ + memory_region_add_subregion(&s->container, 0, &s->ioccm); + + /* We initialize an IO memory region for the ANALOG part */ + memory_region_init_io(&s->ioanalog, OBJECT(dev), &imx6ul_analog_ops, s, + TYPE_IMX6UL_CCM ".analog", + CCM_ANALOG_MAX * sizeof(uint32_t)); + + /* Add the ANALOG as a subregion at offset 0x4000 */ + memory_region_add_subregion(&s->container, 0x4000, &s->ioanalog); + + sysbus_init_mmio(sd, &s->container); +} + +static void imx6ul_ccm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IMXCCMClass *ccm = IMX_CCM_CLASS(klass); + + dc->reset = imx6ul_ccm_reset; + dc->vmsd = &vmstate_imx6ul_ccm; + dc->desc = "i.MX6UL Clock Control Module"; + + ccm->get_clock_frequency = imx6ul_ccm_get_clock_frequency; +} + +static const TypeInfo imx6ul_ccm_info = { + .name = TYPE_IMX6UL_CCM, + .parent = TYPE_IMX_CCM, + .instance_size = sizeof(IMX6ULCCMState), + .instance_init = imx6ul_ccm_init, + .class_init = imx6ul_ccm_class_init, +}; + +static void imx6ul_ccm_register_types(void) +{ + type_register_static(&imx6ul_ccm_info); +} + +type_init(imx6ul_ccm_register_types) diff --git a/hw/misc/imx7_ccm.c b/hw/misc/imx7_ccm.c new file mode 100644 index 000000000..075159e49 --- /dev/null +++ b/hw/misc/imx7_ccm.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2018, Impinj, Inc. + * + * i.MX7 CCM, PMU and ANALOG IP blocks emulation code + * + * Author: Andrey Smirnov + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#include "hw/misc/imx7_ccm.h" +#include "migration/vmstate.h" + +static void imx7_analog_reset(DeviceState *dev) +{ + IMX7AnalogState *s = IMX7_ANALOG(dev); + + memset(s->pmu, 0, sizeof(s->pmu)); + memset(s->analog, 0, sizeof(s->analog)); + + s->analog[ANALOG_PLL_ARM] = 0x00002042; + s->analog[ANALOG_PLL_DDR] = 0x0060302c; + s->analog[ANALOG_PLL_DDR_SS] = 0x00000000; + s->analog[ANALOG_PLL_DDR_NUM] = 0x06aaac4d; + s->analog[ANALOG_PLL_DDR_DENOM] = 0x100003ec; + s->analog[ANALOG_PLL_480] = 0x00002000; + s->analog[ANALOG_PLL_480A] = 0x52605a56; + s->analog[ANALOG_PLL_480B] = 0x52525216; + s->analog[ANALOG_PLL_ENET] = 0x00001fc0; + s->analog[ANALOG_PLL_AUDIO] = 0x0001301b; + s->analog[ANALOG_PLL_AUDIO_SS] = 0x00000000; + s->analog[ANALOG_PLL_AUDIO_NUM] = 0x05f5e100; + s->analog[ANALOG_PLL_AUDIO_DENOM] = 0x2964619c; + s->analog[ANALOG_PLL_VIDEO] = 0x0008201b; + s->analog[ANALOG_PLL_VIDEO_SS] = 0x00000000; + s->analog[ANALOG_PLL_VIDEO_NUM] = 0x0000f699; + s->analog[ANALOG_PLL_VIDEO_DENOM] = 0x000f4240; + s->analog[ANALOG_PLL_MISC0] = 0x00000000; + + /* all PLLs need to be locked */ + s->analog[ANALOG_PLL_ARM] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_PLL_DDR] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_PLL_480] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_PLL_480A] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_PLL_480B] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_PLL_ENET] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_PLL_AUDIO] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_PLL_VIDEO] |= ANALOG_PLL_LOCK; + s->analog[ANALOG_PLL_MISC0] |= ANALOG_PLL_LOCK; + + /* + * Since I couldn't find any info about this in the reference + * manual the value of this register is based strictly on matching + * what Linux kernel expects it to be. + */ + s->analog[ANALOG_DIGPROG] = 0x720000; + /* + * Set revision to be 1.0 (Arbitrary choice, no particular + * reason). + */ + s->analog[ANALOG_DIGPROG] |= 0x000010; +} + +static void imx7_ccm_reset(DeviceState *dev) +{ + IMX7CCMState *s = IMX7_CCM(dev); + + memset(s->ccm, 0, sizeof(s->ccm)); +} + +#define CCM_INDEX(offset) (((offset) & ~(hwaddr)0xF) / sizeof(uint32_t)) +#define CCM_BITOP(offset) ((offset) & (hwaddr)0xF) + +enum { + CCM_BITOP_NONE = 0x00, + CCM_BITOP_SET = 0x04, + CCM_BITOP_CLR = 0x08, + CCM_BITOP_TOG = 0x0C, +}; + +static uint64_t imx7_set_clr_tog_read(void *opaque, hwaddr offset, + unsigned size) +{ + const uint32_t *mmio = opaque; + + return mmio[CCM_INDEX(offset)]; +} + +static void imx7_set_clr_tog_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + const uint8_t bitop = CCM_BITOP(offset); + const uint32_t index = CCM_INDEX(offset); + uint32_t *mmio = opaque; + + switch (bitop) { + case CCM_BITOP_NONE: + mmio[index] = value; + break; + case CCM_BITOP_SET: + mmio[index] |= value; + break; + case CCM_BITOP_CLR: + mmio[index] &= ~value; + break; + case CCM_BITOP_TOG: + mmio[index] ^= value; + break; + }; +} + +static const struct MemoryRegionOps imx7_set_clr_tog_ops = { + .read = imx7_set_clr_tog_read, + .write = imx7_set_clr_tog_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx7_digprog_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + qemu_log_mask(LOG_GUEST_ERROR, + "Guest write to read-only ANALOG_DIGPROG register\n"); +} + +static const struct MemoryRegionOps imx7_digprog_ops = { + .read = imx7_set_clr_tog_read, + .write = imx7_digprog_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx7_ccm_init(Object *obj) +{ + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMX7CCMState *s = IMX7_CCM(obj); + + memory_region_init_io(&s->iomem, + obj, + &imx7_set_clr_tog_ops, + s->ccm, + TYPE_IMX7_CCM ".ccm", + sizeof(s->ccm)); + + sysbus_init_mmio(sd, &s->iomem); +} + +static void imx7_analog_init(Object *obj) +{ + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMX7AnalogState *s = IMX7_ANALOG(obj); + + memory_region_init(&s->mmio.container, obj, TYPE_IMX7_ANALOG, + 0x10000); + + memory_region_init_io(&s->mmio.analog, + obj, + &imx7_set_clr_tog_ops, + s->analog, + TYPE_IMX7_ANALOG, + sizeof(s->analog)); + + memory_region_add_subregion(&s->mmio.container, + 0x60, &s->mmio.analog); + + memory_region_init_io(&s->mmio.pmu, + obj, + &imx7_set_clr_tog_ops, + s->pmu, + TYPE_IMX7_ANALOG ".pmu", + sizeof(s->pmu)); + + memory_region_add_subregion(&s->mmio.container, + 0x200, &s->mmio.pmu); + + memory_region_init_io(&s->mmio.digprog, + obj, + &imx7_digprog_ops, + &s->analog[ANALOG_DIGPROG], + TYPE_IMX7_ANALOG ".digprog", + sizeof(uint32_t)); + + memory_region_add_subregion_overlap(&s->mmio.container, + 0x800, &s->mmio.digprog, 10); + + + sysbus_init_mmio(sd, &s->mmio.container); +} + +static const VMStateDescription vmstate_imx7_ccm = { + .name = TYPE_IMX7_CCM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(ccm, IMX7CCMState, CCM_MAX), + VMSTATE_END_OF_LIST() + }, +}; + +static uint32_t imx7_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) +{ + /* + * This function is "consumed" by GPT emulation code, however on + * i.MX7 each GPT block can have their own clock root. This means + * that this functions needs somehow to know requester's identity + * and the way to pass it: be it via additional IMXClk constants + * or by adding another argument to this method needs to be + * figured out + */ + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Not implemented\n", + TYPE_IMX7_CCM, __func__); + return 0; +} + +static void imx7_ccm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IMXCCMClass *ccm = IMX_CCM_CLASS(klass); + + dc->reset = imx7_ccm_reset; + dc->vmsd = &vmstate_imx7_ccm; + dc->desc = "i.MX7 Clock Control Module"; + + ccm->get_clock_frequency = imx7_ccm_get_clock_frequency; +} + +static const TypeInfo imx7_ccm_info = { + .name = TYPE_IMX7_CCM, + .parent = TYPE_IMX_CCM, + .instance_size = sizeof(IMX7CCMState), + .instance_init = imx7_ccm_init, + .class_init = imx7_ccm_class_init, +}; + +static const VMStateDescription vmstate_imx7_analog = { + .name = TYPE_IMX7_ANALOG, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(analog, IMX7AnalogState, ANALOG_MAX), + VMSTATE_UINT32_ARRAY(pmu, IMX7AnalogState, PMU_MAX), + VMSTATE_END_OF_LIST() + }, +}; + +static void imx7_analog_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = imx7_analog_reset; + dc->vmsd = &vmstate_imx7_analog; + dc->desc = "i.MX7 Analog Module"; +} + +static const TypeInfo imx7_analog_info = { + .name = TYPE_IMX7_ANALOG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMX7AnalogState), + .instance_init = imx7_analog_init, + .class_init = imx7_analog_class_init, +}; + +static void imx7_ccm_register_type(void) +{ + type_register_static(&imx7_ccm_info); + type_register_static(&imx7_analog_info); +} +type_init(imx7_ccm_register_type) diff --git a/hw/misc/imx7_gpr.c b/hw/misc/imx7_gpr.c new file mode 100644 index 000000000..b03341a2e --- /dev/null +++ b/hw/misc/imx7_gpr.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2018, Impinj, Inc. + * + * i.MX7 GPR IP block emulation code + * + * Author: Andrey Smirnov + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * Bare minimum emulation code needed to support being able to shut + * down linux guest gracefully. + */ + +#include "qemu/osdep.h" +#include "hw/misc/imx7_gpr.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#include "trace.h" + +enum IMX7GPRRegisters { + IOMUXC_GPR0 = 0x00, + IOMUXC_GPR1 = 0x04, + IOMUXC_GPR2 = 0x08, + IOMUXC_GPR3 = 0x0c, + IOMUXC_GPR4 = 0x10, + IOMUXC_GPR5 = 0x14, + IOMUXC_GPR6 = 0x18, + IOMUXC_GPR7 = 0x1c, + IOMUXC_GPR8 = 0x20, + IOMUXC_GPR9 = 0x24, + IOMUXC_GPR10 = 0x28, + IOMUXC_GPR11 = 0x2c, + IOMUXC_GPR12 = 0x30, + IOMUXC_GPR13 = 0x34, + IOMUXC_GPR14 = 0x38, + IOMUXC_GPR15 = 0x3c, + IOMUXC_GPR16 = 0x40, + IOMUXC_GPR17 = 0x44, + IOMUXC_GPR18 = 0x48, + IOMUXC_GPR19 = 0x4c, + IOMUXC_GPR20 = 0x50, + IOMUXC_GPR21 = 0x54, + IOMUXC_GPR22 = 0x58, +}; + +#define IMX7D_GPR1_IRQ_MASK BIT(12) +#define IMX7D_GPR1_ENET1_TX_CLK_SEL_MASK BIT(13) +#define IMX7D_GPR1_ENET2_TX_CLK_SEL_MASK BIT(14) +#define IMX7D_GPR1_ENET_TX_CLK_SEL_MASK (0x3 << 13) +#define IMX7D_GPR1_ENET1_CLK_DIR_MASK BIT(17) +#define IMX7D_GPR1_ENET2_CLK_DIR_MASK BIT(18) +#define IMX7D_GPR1_ENET_CLK_DIR_MASK (0x3 << 17) + +#define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI BIT(4) +#define IMX7D_GPR12_PCIE_PHY_REFCLK_SEL BIT(5) +#define IMX7D_GPR22_PCIE_PHY_PLL_LOCKED BIT(31) + + +static uint64_t imx7_gpr_read(void *opaque, hwaddr offset, unsigned size) +{ + trace_imx7_gpr_read(offset); + + if (offset == IOMUXC_GPR22) { + return IMX7D_GPR22_PCIE_PHY_PLL_LOCKED; + } + + return 0; +} + +static void imx7_gpr_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + trace_imx7_gpr_write(offset, v); +} + +static const struct MemoryRegionOps imx7_gpr_ops = { + .read = imx7_gpr_read, + .write = imx7_gpr_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the + * real device but in practice there is no reason for a guest + * to access this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx7_gpr_init(Object *obj) +{ + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMX7GPRState *s = IMX7_GPR(obj); + + memory_region_init_io(&s->mmio, obj, &imx7_gpr_ops, s, + TYPE_IMX7_GPR, 64 * 1024); + sysbus_init_mmio(sd, &s->mmio); +} + +static void imx7_gpr_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "i.MX7 General Purpose Registers Module"; +} + +static const TypeInfo imx7_gpr_info = { + .name = TYPE_IMX7_GPR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMX7GPRState), + .instance_init = imx7_gpr_init, + .class_init = imx7_gpr_class_init, +}; + +static void imx7_gpr_register_type(void) +{ + type_register_static(&imx7_gpr_info); +} +type_init(imx7_gpr_register_type) diff --git a/hw/misc/imx7_snvs.c b/hw/misc/imx7_snvs.c new file mode 100644 index 000000000..ee7698bd9 --- /dev/null +++ b/hw/misc/imx7_snvs.c @@ -0,0 +1,83 @@ +/* + * IMX7 Secure Non-Volatile Storage + * + * Copyright (c) 2018, Impinj, Inc. + * + * Author: Andrey Smirnov + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * Bare minimum emulation code needed to support being able to shut + * down linux guest gracefully. + */ + +#include "qemu/osdep.h" +#include "hw/misc/imx7_snvs.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" + +static uint64_t imx7_snvs_read(void *opaque, hwaddr offset, unsigned size) +{ + return 0; +} + +static void imx7_snvs_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + const uint32_t value = v; + const uint32_t mask = SNVS_LPCR_TOP | SNVS_LPCR_DP_EN; + + if (offset == SNVS_LPCR && ((value & mask) == mask)) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } +} + +static const struct MemoryRegionOps imx7_snvs_ops = { + .read = imx7_snvs_read, + .write = imx7_snvs_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx7_snvs_init(Object *obj) +{ + SysBusDevice *sd = SYS_BUS_DEVICE(obj); + IMX7SNVSState *s = IMX7_SNVS(obj); + + memory_region_init_io(&s->mmio, obj, &imx7_snvs_ops, s, + TYPE_IMX7_SNVS, 0x1000); + + sysbus_init_mmio(sd, &s->mmio); +} + +static void imx7_snvs_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "i.MX7 Secure Non-Volatile Storage Module"; +} + +static const TypeInfo imx7_snvs_info = { + .name = TYPE_IMX7_SNVS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMX7SNVSState), + .instance_init = imx7_snvs_init, + .class_init = imx7_snvs_class_init, +}; + +static void imx7_snvs_register_type(void) +{ + type_register_static(&imx7_snvs_info); +} +type_init(imx7_snvs_register_type) diff --git a/hw/misc/imx_ccm.c b/hw/misc/imx_ccm.c new file mode 100644 index 000000000..9403c5daa --- /dev/null +++ b/hw/misc/imx_ccm.c @@ -0,0 +1,86 @@ +/* + * IMX31 Clock Control Module + * + * Copyright (C) 2012 NICTA + * Updated by Jean-Christophe Dubois + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This is an abstract base class used to get a common interface to + * retrieve the CCM frequencies from the various i.MX SOC. + */ + +#include "qemu/osdep.h" +#include "hw/misc/imx_ccm.h" +#include "qemu/module.h" + +#ifndef DEBUG_IMX_CCM +#define DEBUG_IMX_CCM 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX_CCM) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_CCM, \ + __func__, ##args); \ + } \ + } while (0) + + +uint32_t imx_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock) +{ + uint32_t freq = 0; + IMXCCMClass *klass = IMX_CCM_GET_CLASS(dev); + + if (klass->get_clock_frequency) { + freq = klass->get_clock_frequency(dev, clock); + } + + DPRINTF("(clock = %d) = %u\n", clock, freq); + + return freq; +} + +/* + * Calculate PLL output frequency + */ +uint32_t imx_ccm_calc_pll(uint32_t pllreg, uint32_t base_freq) +{ + int32_t freq; + int32_t mfn = MFN(pllreg); /* Numerator */ + uint32_t mfi = MFI(pllreg); /* Integer part */ + uint32_t mfd = 1 + MFD(pllreg); /* Denominator */ + uint32_t pd = 1 + PD(pllreg); /* Pre-divider */ + + if (mfi < 5) { + mfi = 5; + } + + /* mfn is 10-bit signed twos-complement */ + mfn <<= 32 - 10; + mfn >>= 32 - 10; + + freq = ((2 * (base_freq >> 10) * (mfi * mfd + mfn)) / + (mfd * pd)) << 10; + + DPRINTF("(pllreg = 0x%08x, base_freq = %u) = %d\n", pllreg, base_freq, + freq); + + return freq; +} + +static const TypeInfo imx_ccm_info = { + .name = TYPE_IMX_CCM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXCCMState), + .class_size = sizeof(IMXCCMClass), + .abstract = true, +}; + +static void imx_ccm_register_types(void) +{ + type_register_static(&imx_ccm_info); +} + +type_init(imx_ccm_register_types) diff --git a/hw/misc/imx_rngc.c b/hw/misc/imx_rngc.c new file mode 100644 index 000000000..632c03779 --- /dev/null +++ b/hw/misc/imx_rngc.c @@ -0,0 +1,277 @@ +/* + * Freescale i.MX RNGC emulation + * + * Copyright (C) 2020 Martin Kaiser + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This driver provides the minimum functionality to initialize and seed + * an rngc and to read random numbers. The rngb that is found in imx25 + * chipsets is also supported. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/guest-random.h" +#include "hw/irq.h" +#include "hw/misc/imx_rngc.h" +#include "migration/vmstate.h" + +#define RNGC_NAME "i.MX RNGC" + +#define RNGC_VER_ID 0x00 +#define RNGC_COMMAND 0x04 +#define RNGC_CONTROL 0x08 +#define RNGC_STATUS 0x0C +#define RNGC_FIFO 0x14 + +/* These version info are reported by the rngb in an imx258 chip. */ +#define RNG_TYPE_RNGB 0x1 +#define V_MAJ 0x2 +#define V_MIN 0x40 + +#define RNGC_CMD_BIT_SW_RST 0x40 +#define RNGC_CMD_BIT_CLR_ERR 0x20 +#define RNGC_CMD_BIT_CLR_INT 0x10 +#define RNGC_CMD_BIT_SEED 0x02 +#define RNGC_CMD_BIT_SELF_TEST 0x01 + +#define RNGC_CTRL_BIT_MASK_ERR 0x40 +#define RNGC_CTRL_BIT_MASK_DONE 0x20 +#define RNGC_CTRL_BIT_AUTO_SEED 0x10 + +/* the current status for self-test and seed operations */ +#define OP_IDLE 0 +#define OP_RUN 1 +#define OP_DONE 2 + +static uint64_t imx_rngc_read(void *opaque, hwaddr offset, unsigned size) +{ + IMXRNGCState *s = IMX_RNGC(opaque); + uint64_t val = 0; + + switch (offset) { + case RNGC_VER_ID: + val |= RNG_TYPE_RNGB << 28 | V_MAJ << 8 | V_MIN; + break; + + case RNGC_COMMAND: + if (s->op_seed == OP_RUN) { + val |= RNGC_CMD_BIT_SEED; + } + if (s->op_self_test == OP_RUN) { + val |= RNGC_CMD_BIT_SELF_TEST; + } + break; + + case RNGC_CONTROL: + /* + * The CTL_ACC and VERIF_MODE bits are not supported yet. + * They read as 0. + */ + val |= s->mask; + if (s->auto_seed) { + val |= RNGC_CTRL_BIT_AUTO_SEED; + } + /* + * We don't have an internal fifo like the real hardware. + * There's no need for strategy to handle fifo underflows. + * We return the FIFO_UFLOW_RESPONSE bits as 0. + */ + break; + + case RNGC_STATUS: + /* + * We never report any statistics test or self-test errors or any + * other errors. STAT_TEST_PF, ST_PF and ERROR are always 0. + */ + + /* + * We don't have an internal fifo, see above. Therefore, we + * report back the default fifo size (5 32-bit words) and + * indicate that our fifo is always full. + */ + val |= 5 << 12 | 5 << 8; + + /* We always have a new seed available. */ + val |= 1 << 6; + + if (s->op_seed == OP_DONE) { + val |= 1 << 5; + } + if (s->op_self_test == OP_DONE) { + val |= 1 << 4; + } + if (s->op_seed == OP_RUN || s->op_self_test == OP_RUN) { + /* + * We're busy if self-test is running or if we're + * seeding the prng. + */ + val |= 1 << 1; + } else { + /* + * We're ready to provide secure random numbers whenever + * we're not busy. + */ + val |= 1; + } + break; + + case RNGC_FIFO: + qemu_guest_getrandom_nofail(&val, sizeof(val)); + break; + } + + return val; +} + +static void imx_rngc_do_reset(IMXRNGCState *s) +{ + s->op_self_test = OP_IDLE; + s->op_seed = OP_IDLE; + s->mask = 0; + s->auto_seed = false; +} + +static void imx_rngc_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + IMXRNGCState *s = IMX_RNGC(opaque); + + switch (offset) { + case RNGC_COMMAND: + if (value & RNGC_CMD_BIT_SW_RST) { + imx_rngc_do_reset(s); + } + + /* + * For now, both CLR_ERR and CLR_INT clear the interrupt. We + * don't report any errors yet. + */ + if (value & (RNGC_CMD_BIT_CLR_ERR | RNGC_CMD_BIT_CLR_INT)) { + qemu_irq_lower(s->irq); + } + + if (value & RNGC_CMD_BIT_SEED) { + s->op_seed = OP_RUN; + qemu_bh_schedule(s->seed_bh); + } + + if (value & RNGC_CMD_BIT_SELF_TEST) { + s->op_self_test = OP_RUN; + qemu_bh_schedule(s->self_test_bh); + } + break; + + case RNGC_CONTROL: + /* + * The CTL_ACC and VERIF_MODE bits are not supported yet. + * We ignore them if they're set by the caller. + */ + + if (value & RNGC_CTRL_BIT_MASK_ERR) { + s->mask |= RNGC_CTRL_BIT_MASK_ERR; + } else { + s->mask &= ~RNGC_CTRL_BIT_MASK_ERR; + } + + if (value & RNGC_CTRL_BIT_MASK_DONE) { + s->mask |= RNGC_CTRL_BIT_MASK_DONE; + } else { + s->mask &= ~RNGC_CTRL_BIT_MASK_DONE; + } + + if (value & RNGC_CTRL_BIT_AUTO_SEED) { + s->auto_seed = true; + } else { + s->auto_seed = false; + } + break; + } +} + +static const MemoryRegionOps imx_rngc_ops = { + .read = imx_rngc_read, + .write = imx_rngc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void imx_rngc_self_test(void *opaque) +{ + IMXRNGCState *s = IMX_RNGC(opaque); + + s->op_self_test = OP_DONE; + if (!(s->mask & RNGC_CTRL_BIT_MASK_DONE)) { + qemu_irq_raise(s->irq); + } +} + +static void imx_rngc_seed(void *opaque) +{ + IMXRNGCState *s = IMX_RNGC(opaque); + + s->op_seed = OP_DONE; + if (!(s->mask & RNGC_CTRL_BIT_MASK_DONE)) { + qemu_irq_raise(s->irq); + } +} + +static void imx_rngc_realize(DeviceState *dev, Error **errp) +{ + IMXRNGCState *s = IMX_RNGC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &imx_rngc_ops, s, + TYPE_IMX_RNGC, 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + + sysbus_init_irq(sbd, &s->irq); + s->self_test_bh = qemu_bh_new(imx_rngc_self_test, s); + s->seed_bh = qemu_bh_new(imx_rngc_seed, s); +} + +static void imx_rngc_reset(DeviceState *dev) +{ + IMXRNGCState *s = IMX_RNGC(dev); + + imx_rngc_do_reset(s); +} + +static const VMStateDescription vmstate_imx_rngc = { + .name = RNGC_NAME, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(op_self_test, IMXRNGCState), + VMSTATE_UINT8(op_seed, IMXRNGCState), + VMSTATE_UINT8(mask, IMXRNGCState), + VMSTATE_BOOL(auto_seed, IMXRNGCState), + VMSTATE_END_OF_LIST() + } +}; + +static void imx_rngc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = imx_rngc_realize; + dc->reset = imx_rngc_reset; + dc->desc = RNGC_NAME, + dc->vmsd = &vmstate_imx_rngc; +} + +static const TypeInfo imx_rngc_info = { + .name = TYPE_IMX_RNGC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXRNGCState), + .class_init = imx_rngc_class_init, +}; + +static void imx_rngc_register_types(void) +{ + type_register_static(&imx_rngc_info); +} + +type_init(imx_rngc_register_types) diff --git a/hw/misc/iotkit-secctl.c b/hw/misc/iotkit-secctl.c new file mode 100644 index 000000000..7b41cfa8f --- /dev/null +++ b/hw/misc/iotkit-secctl.c @@ -0,0 +1,845 @@ +/* + * Arm IoT Kit security controller + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/registerfields.h" +#include "hw/irq.h" +#include "hw/misc/iotkit-secctl.h" +#include "hw/arm/armsse-version.h" +#include "hw/qdev-properties.h" + +/* Registers in the secure privilege control block */ +REG32(SECRESPCFG, 0x10) +REG32(NSCCFG, 0x14) +REG32(SECMPCINTSTATUS, 0x1c) +REG32(SECPPCINTSTAT, 0x20) +REG32(SECPPCINTCLR, 0x24) +REG32(SECPPCINTEN, 0x28) +REG32(SECMSCINTSTAT, 0x30) +REG32(SECMSCINTCLR, 0x34) +REG32(SECMSCINTEN, 0x38) +REG32(BRGINTSTAT, 0x40) +REG32(BRGINTCLR, 0x44) +REG32(BRGINTEN, 0x48) +REG32(AHBNSPPC0, 0x50) +REG32(AHBNSPPCEXP0, 0x60) +REG32(AHBNSPPCEXP1, 0x64) +REG32(AHBNSPPCEXP2, 0x68) +REG32(AHBNSPPCEXP3, 0x6c) +REG32(APBNSPPC0, 0x70) +REG32(APBNSPPC1, 0x74) +REG32(APBNSPPCEXP0, 0x80) +REG32(APBNSPPCEXP1, 0x84) +REG32(APBNSPPCEXP2, 0x88) +REG32(APBNSPPCEXP3, 0x8c) +REG32(AHBSPPPC0, 0x90) +REG32(AHBSPPPCEXP0, 0xa0) +REG32(AHBSPPPCEXP1, 0xa4) +REG32(AHBSPPPCEXP2, 0xa8) +REG32(AHBSPPPCEXP3, 0xac) +REG32(APBSPPPC0, 0xb0) +REG32(APBSPPPC1, 0xb4) +REG32(APBSPPPCEXP0, 0xc0) +REG32(APBSPPPCEXP1, 0xc4) +REG32(APBSPPPCEXP2, 0xc8) +REG32(APBSPPPCEXP3, 0xcc) +REG32(NSMSCEXP, 0xd0) +REG32(PID4, 0xfd0) +REG32(PID5, 0xfd4) +REG32(PID6, 0xfd8) +REG32(PID7, 0xfdc) +REG32(PID0, 0xfe0) +REG32(PID1, 0xfe4) +REG32(PID2, 0xfe8) +REG32(PID3, 0xfec) +REG32(CID0, 0xff0) +REG32(CID1, 0xff4) +REG32(CID2, 0xff8) +REG32(CID3, 0xffc) + +/* Registers in the non-secure privilege control block */ +REG32(AHBNSPPPC0, 0x90) +REG32(AHBNSPPPCEXP0, 0xa0) +REG32(AHBNSPPPCEXP1, 0xa4) +REG32(AHBNSPPPCEXP2, 0xa8) +REG32(AHBNSPPPCEXP3, 0xac) +REG32(APBNSPPPC0, 0xb0) +REG32(APBNSPPPC1, 0xb4) +REG32(APBNSPPPCEXP0, 0xc0) +REG32(APBNSPPPCEXP1, 0xc4) +REG32(APBNSPPPCEXP2, 0xc8) +REG32(APBNSPPPCEXP3, 0xcc) +/* PID and CID registers are also present in the NS block */ + +static const uint8_t iotkit_secctl_s_idregs[] = { + 0x04, 0x00, 0x00, 0x00, + 0x52, 0xb8, 0x0b, 0x00, + 0x0d, 0xf0, 0x05, 0xb1, +}; + +static const uint8_t iotkit_secctl_ns_idregs[] = { + 0x04, 0x00, 0x00, 0x00, + 0x53, 0xb8, 0x0b, 0x00, + 0x0d, 0xf0, 0x05, 0xb1, +}; + +static const uint8_t iotkit_secctl_s_sse300_idregs[] = { + 0x04, 0x00, 0x00, 0x00, + 0x52, 0xb8, 0x2b, 0x00, + 0x0d, 0xf0, 0x05, 0xb1, +}; + +static const uint8_t iotkit_secctl_ns_sse300_idregs[] = { + 0x04, 0x00, 0x00, 0x00, + 0x53, 0xb8, 0x2b, 0x00, + 0x0d, 0xf0, 0x05, 0xb1, +}; + + +/* The register sets for the various PPCs (AHB internal, APB internal, + * AHB expansion, APB expansion) are all set up so that they are + * in 16-aligned blocks so offsets 0xN0, 0xN4, 0xN8, 0xNC are PPCs + * 0, 1, 2, 3 of that type, so we can convert a register address offset + * into an an index into a PPC array easily. + */ +static inline int offset_to_ppc_idx(uint32_t offset) +{ + return extract32(offset, 2, 2); +} + +typedef void PerPPCFunction(IoTKitSecCtlPPC *ppc); + +static void foreach_ppc(IoTKitSecCtl *s, PerPPCFunction *fn) +{ + int i; + + for (i = 0; i < IOTS_NUM_APB_PPC; i++) { + fn(&s->apb[i]); + } + for (i = 0; i < IOTS_NUM_APB_EXP_PPC; i++) { + fn(&s->apbexp[i]); + } + for (i = 0; i < IOTS_NUM_AHB_EXP_PPC; i++) { + fn(&s->ahbexp[i]); + } +} + +static MemTxResult iotkit_secctl_s_read(void *opaque, hwaddr addr, + uint64_t *pdata, + unsigned size, MemTxAttrs attrs) +{ + uint64_t r; + uint32_t offset = addr & ~0x3; + IoTKitSecCtl *s = IOTKIT_SECCTL(opaque); + + switch (offset) { + case A_AHBNSPPC0: + case A_AHBSPPPC0: + r = 0; + break; + case A_SECRESPCFG: + r = s->secrespcfg; + break; + case A_NSCCFG: + r = s->nsccfg; + break; + case A_SECMPCINTSTATUS: + r = s->mpcintstatus; + break; + case A_SECPPCINTSTAT: + r = s->secppcintstat; + break; + case A_SECPPCINTEN: + r = s->secppcinten; + break; + case A_BRGINTSTAT: + /* QEMU's bus fabric can never report errors as it doesn't buffer + * writes, so we never report bridge interrupts. + */ + r = 0; + break; + case A_BRGINTEN: + r = s->brginten; + break; + case A_AHBNSPPCEXP0: + case A_AHBNSPPCEXP1: + case A_AHBNSPPCEXP2: + case A_AHBNSPPCEXP3: + r = s->ahbexp[offset_to_ppc_idx(offset)].ns; + break; + case A_APBNSPPC0: + case A_APBNSPPC1: + r = s->apb[offset_to_ppc_idx(offset)].ns; + break; + case A_APBNSPPCEXP0: + case A_APBNSPPCEXP1: + case A_APBNSPPCEXP2: + case A_APBNSPPCEXP3: + r = s->apbexp[offset_to_ppc_idx(offset)].ns; + break; + case A_AHBSPPPCEXP0: + case A_AHBSPPPCEXP1: + case A_AHBSPPPCEXP2: + case A_AHBSPPPCEXP3: + r = s->apbexp[offset_to_ppc_idx(offset)].sp; + break; + case A_APBSPPPC0: + case A_APBSPPPC1: + r = s->apb[offset_to_ppc_idx(offset)].sp; + break; + case A_APBSPPPCEXP0: + case A_APBSPPPCEXP1: + case A_APBSPPPCEXP2: + case A_APBSPPPCEXP3: + r = s->apbexp[offset_to_ppc_idx(offset)].sp; + break; + case A_SECMSCINTSTAT: + r = s->secmscintstat; + break; + case A_SECMSCINTEN: + r = s->secmscinten; + break; + case A_NSMSCEXP: + r = s->nsmscexp; + break; + case A_PID4: + case A_PID5: + case A_PID6: + case A_PID7: + case A_PID0: + case A_PID1: + case A_PID2: + case A_PID3: + case A_CID0: + case A_CID1: + case A_CID2: + case A_CID3: + switch (s->sse_version) { + case ARMSSE_SSE300: + r = iotkit_secctl_s_sse300_idregs[(offset - A_PID4) / 4]; + break; + default: + r = iotkit_secctl_s_idregs[(offset - A_PID4) / 4]; + break; + } + break; + case A_SECPPCINTCLR: + case A_SECMSCINTCLR: + case A_BRGINTCLR: + qemu_log_mask(LOG_GUEST_ERROR, + "IotKit SecCtl S block read: write-only offset 0x%x\n", + offset); + r = 0; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "IotKit SecCtl S block read: bad offset 0x%x\n", offset); + r = 0; + break; + } + + if (size != 4) { + /* None of our registers are access-sensitive, so just pull the right + * byte out of the word read result. + */ + r = extract32(r, (addr & 3) * 8, size * 8); + } + + trace_iotkit_secctl_s_read(offset, r, size); + *pdata = r; + return MEMTX_OK; +} + +static void iotkit_secctl_update_ppc_ap(IoTKitSecCtlPPC *ppc) +{ + int i; + + for (i = 0; i < ppc->numports; i++) { + bool v; + + if (extract32(ppc->ns, i, 1)) { + v = extract32(ppc->nsp, i, 1); + } else { + v = extract32(ppc->sp, i, 1); + } + qemu_set_irq(ppc->ap[i], v); + } +} + +static void iotkit_secctl_ppc_ns_write(IoTKitSecCtlPPC *ppc, uint32_t value) +{ + int i; + + ppc->ns = value & MAKE_64BIT_MASK(0, ppc->numports); + for (i = 0; i < ppc->numports; i++) { + qemu_set_irq(ppc->nonsec[i], extract32(ppc->ns, i, 1)); + } + iotkit_secctl_update_ppc_ap(ppc); +} + +static void iotkit_secctl_ppc_sp_write(IoTKitSecCtlPPC *ppc, uint32_t value) +{ + ppc->sp = value & MAKE_64BIT_MASK(0, ppc->numports); + iotkit_secctl_update_ppc_ap(ppc); +} + +static void iotkit_secctl_ppc_nsp_write(IoTKitSecCtlPPC *ppc, uint32_t value) +{ + ppc->nsp = value & MAKE_64BIT_MASK(0, ppc->numports); + iotkit_secctl_update_ppc_ap(ppc); +} + +static void iotkit_secctl_ppc_update_irq_clear(IoTKitSecCtlPPC *ppc) +{ + uint32_t value = ppc->parent->secppcintstat; + + qemu_set_irq(ppc->irq_clear, extract32(value, ppc->irq_bit_offset, 1)); +} + +static void iotkit_secctl_ppc_update_irq_enable(IoTKitSecCtlPPC *ppc) +{ + uint32_t value = ppc->parent->secppcinten; + + qemu_set_irq(ppc->irq_enable, extract32(value, ppc->irq_bit_offset, 1)); +} + +static void iotkit_secctl_update_mscexp_irqs(qemu_irq *msc_irqs, uint32_t value) +{ + int i; + + for (i = 0; i < IOTS_NUM_EXP_MSC; i++) { + qemu_set_irq(msc_irqs[i], extract32(value, i + 16, 1)); + } +} + +static void iotkit_secctl_update_msc_irq(IoTKitSecCtl *s) +{ + /* Update the combined MSC IRQ, based on S_MSCEXP_STATUS and S_MSCEXP_EN */ + bool level = s->secmscintstat & s->secmscinten; + + qemu_set_irq(s->msc_irq, level); +} + +static MemTxResult iotkit_secctl_s_write(void *opaque, hwaddr addr, + uint64_t value, + unsigned size, MemTxAttrs attrs) +{ + IoTKitSecCtl *s = IOTKIT_SECCTL(opaque); + uint32_t offset = addr; + IoTKitSecCtlPPC *ppc; + + trace_iotkit_secctl_s_write(offset, value, size); + + if (size != 4) { + /* Byte and halfword writes are ignored */ + qemu_log_mask(LOG_GUEST_ERROR, + "IotKit SecCtl S block write: bad size, ignored\n"); + return MEMTX_OK; + } + + switch (offset) { + case A_NSCCFG: + s->nsccfg = value & 3; + qemu_set_irq(s->nsc_cfg_irq, s->nsccfg); + break; + case A_SECRESPCFG: + value &= 1; + s->secrespcfg = value; + qemu_set_irq(s->sec_resp_cfg, s->secrespcfg); + break; + case A_SECPPCINTCLR: + s->secppcintstat &= ~(value & 0x00f000f3); + foreach_ppc(s, iotkit_secctl_ppc_update_irq_clear); + break; + case A_SECPPCINTEN: + s->secppcinten = value & 0x00f000f3; + foreach_ppc(s, iotkit_secctl_ppc_update_irq_enable); + break; + case A_BRGINTCLR: + break; + case A_BRGINTEN: + s->brginten = value & 0xffff0000; + break; + case A_AHBNSPPCEXP0: + case A_AHBNSPPCEXP1: + case A_AHBNSPPCEXP2: + case A_AHBNSPPCEXP3: + ppc = &s->ahbexp[offset_to_ppc_idx(offset)]; + iotkit_secctl_ppc_ns_write(ppc, value); + break; + case A_APBNSPPC0: + case A_APBNSPPC1: + ppc = &s->apb[offset_to_ppc_idx(offset)]; + iotkit_secctl_ppc_ns_write(ppc, value); + break; + case A_APBNSPPCEXP0: + case A_APBNSPPCEXP1: + case A_APBNSPPCEXP2: + case A_APBNSPPCEXP3: + ppc = &s->apbexp[offset_to_ppc_idx(offset)]; + iotkit_secctl_ppc_ns_write(ppc, value); + break; + case A_AHBSPPPCEXP0: + case A_AHBSPPPCEXP1: + case A_AHBSPPPCEXP2: + case A_AHBSPPPCEXP3: + ppc = &s->ahbexp[offset_to_ppc_idx(offset)]; + iotkit_secctl_ppc_sp_write(ppc, value); + break; + case A_APBSPPPC0: + case A_APBSPPPC1: + ppc = &s->apb[offset_to_ppc_idx(offset)]; + iotkit_secctl_ppc_sp_write(ppc, value); + break; + case A_APBSPPPCEXP0: + case A_APBSPPPCEXP1: + case A_APBSPPPCEXP2: + case A_APBSPPPCEXP3: + ppc = &s->apbexp[offset_to_ppc_idx(offset)]; + iotkit_secctl_ppc_sp_write(ppc, value); + break; + case A_SECMSCINTCLR: + iotkit_secctl_update_mscexp_irqs(s->mscexp_clear, value); + break; + case A_SECMSCINTEN: + s->secmscinten = value; + iotkit_secctl_update_msc_irq(s); + break; + case A_NSMSCEXP: + s->nsmscexp = value; + iotkit_secctl_update_mscexp_irqs(s->mscexp_ns, value); + break; + case A_SECMPCINTSTATUS: + case A_SECPPCINTSTAT: + case A_SECMSCINTSTAT: + case A_BRGINTSTAT: + case A_AHBNSPPC0: + case A_AHBSPPPC0: + case A_PID4: + case A_PID5: + case A_PID6: + case A_PID7: + case A_PID0: + case A_PID1: + case A_PID2: + case A_PID3: + case A_CID0: + case A_CID1: + case A_CID2: + case A_CID3: + qemu_log_mask(LOG_GUEST_ERROR, + "IoTKit SecCtl S block write: " + "read-only offset 0x%x\n", offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "IotKit SecCtl S block write: bad offset 0x%x\n", + offset); + break; + } + + return MEMTX_OK; +} + +static MemTxResult iotkit_secctl_ns_read(void *opaque, hwaddr addr, + uint64_t *pdata, + unsigned size, MemTxAttrs attrs) +{ + IoTKitSecCtl *s = IOTKIT_SECCTL(opaque); + uint64_t r; + uint32_t offset = addr & ~0x3; + + switch (offset) { + case A_AHBNSPPPC0: + r = 0; + break; + case A_AHBNSPPPCEXP0: + case A_AHBNSPPPCEXP1: + case A_AHBNSPPPCEXP2: + case A_AHBNSPPPCEXP3: + r = s->ahbexp[offset_to_ppc_idx(offset)].nsp; + break; + case A_APBNSPPPC0: + case A_APBNSPPPC1: + r = s->apb[offset_to_ppc_idx(offset)].nsp; + break; + case A_APBNSPPPCEXP0: + case A_APBNSPPPCEXP1: + case A_APBNSPPPCEXP2: + case A_APBNSPPPCEXP3: + r = s->apbexp[offset_to_ppc_idx(offset)].nsp; + break; + case A_PID4: + case A_PID5: + case A_PID6: + case A_PID7: + case A_PID0: + case A_PID1: + case A_PID2: + case A_PID3: + case A_CID0: + case A_CID1: + case A_CID2: + case A_CID3: + switch (s->sse_version) { + case ARMSSE_SSE300: + r = iotkit_secctl_ns_sse300_idregs[(offset - A_PID4) / 4]; + break; + default: + r = iotkit_secctl_ns_idregs[(offset - A_PID4) / 4]; + break; + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "IotKit SecCtl NS block write: bad offset 0x%x\n", + offset); + r = 0; + break; + } + + if (size != 4) { + /* None of our registers are access-sensitive, so just pull the right + * byte out of the word read result. + */ + r = extract32(r, (addr & 3) * 8, size * 8); + } + + trace_iotkit_secctl_ns_read(offset, r, size); + *pdata = r; + return MEMTX_OK; +} + +static MemTxResult iotkit_secctl_ns_write(void *opaque, hwaddr addr, + uint64_t value, + unsigned size, MemTxAttrs attrs) +{ + IoTKitSecCtl *s = IOTKIT_SECCTL(opaque); + uint32_t offset = addr; + IoTKitSecCtlPPC *ppc; + + trace_iotkit_secctl_ns_write(offset, value, size); + + if (size != 4) { + /* Byte and halfword writes are ignored */ + qemu_log_mask(LOG_GUEST_ERROR, + "IotKit SecCtl NS block write: bad size, ignored\n"); + return MEMTX_OK; + } + + switch (offset) { + case A_AHBNSPPPCEXP0: + case A_AHBNSPPPCEXP1: + case A_AHBNSPPPCEXP2: + case A_AHBNSPPPCEXP3: + ppc = &s->ahbexp[offset_to_ppc_idx(offset)]; + iotkit_secctl_ppc_nsp_write(ppc, value); + break; + case A_APBNSPPPC0: + case A_APBNSPPPC1: + ppc = &s->apb[offset_to_ppc_idx(offset)]; + iotkit_secctl_ppc_nsp_write(ppc, value); + break; + case A_APBNSPPPCEXP0: + case A_APBNSPPPCEXP1: + case A_APBNSPPPCEXP2: + case A_APBNSPPPCEXP3: + ppc = &s->apbexp[offset_to_ppc_idx(offset)]; + iotkit_secctl_ppc_nsp_write(ppc, value); + break; + case A_AHBNSPPPC0: + case A_PID4: + case A_PID5: + case A_PID6: + case A_PID7: + case A_PID0: + case A_PID1: + case A_PID2: + case A_PID3: + case A_CID0: + case A_CID1: + case A_CID2: + case A_CID3: + qemu_log_mask(LOG_GUEST_ERROR, + "IoTKit SecCtl NS block write: " + "read-only offset 0x%x\n", offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "IotKit SecCtl NS block write: bad offset 0x%x\n", + offset); + break; + } + + return MEMTX_OK; +} + +static const MemoryRegionOps iotkit_secctl_s_ops = { + .read_with_attrs = iotkit_secctl_s_read, + .write_with_attrs = iotkit_secctl_s_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 4, +}; + +static const MemoryRegionOps iotkit_secctl_ns_ops = { + .read_with_attrs = iotkit_secctl_ns_read, + .write_with_attrs = iotkit_secctl_ns_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 4, +}; + +static void iotkit_secctl_reset_ppc(IoTKitSecCtlPPC *ppc) +{ + ppc->ns = 0; + ppc->sp = 0; + ppc->nsp = 0; +} + +static void iotkit_secctl_reset(DeviceState *dev) +{ + IoTKitSecCtl *s = IOTKIT_SECCTL(dev); + + s->secppcintstat = 0; + s->secppcinten = 0; + s->secrespcfg = 0; + s->nsccfg = 0; + s->brginten = 0; + + foreach_ppc(s, iotkit_secctl_reset_ppc); +} + +static void iotkit_secctl_mpc_status(void *opaque, int n, int level) +{ + IoTKitSecCtl *s = IOTKIT_SECCTL(opaque); + + s->mpcintstatus = deposit32(s->mpcintstatus, n, 1, !!level); +} + +static void iotkit_secctl_mpcexp_status(void *opaque, int n, int level) +{ + IoTKitSecCtl *s = IOTKIT_SECCTL(opaque); + + s->mpcintstatus = deposit32(s->mpcintstatus, n + 16, 1, !!level); +} + +static void iotkit_secctl_mscexp_status(void *opaque, int n, int level) +{ + IoTKitSecCtl *s = IOTKIT_SECCTL(opaque); + + s->secmscintstat = deposit32(s->secmscintstat, n + 16, 1, !!level); + iotkit_secctl_update_msc_irq(s); +} + +static void iotkit_secctl_ppc_irqstatus(void *opaque, int n, int level) +{ + IoTKitSecCtlPPC *ppc = opaque; + IoTKitSecCtl *s = IOTKIT_SECCTL(ppc->parent); + int irqbit = ppc->irq_bit_offset + n; + + s->secppcintstat = deposit32(s->secppcintstat, irqbit, 1, level); +} + +static void iotkit_secctl_init_ppc(IoTKitSecCtl *s, + IoTKitSecCtlPPC *ppc, + const char *name, + int numports, + int irq_bit_offset) +{ + char *gpioname; + DeviceState *dev = DEVICE(s); + + ppc->numports = numports; + ppc->irq_bit_offset = irq_bit_offset; + ppc->parent = s; + + gpioname = g_strdup_printf("%s_nonsec", name); + qdev_init_gpio_out_named(dev, ppc->nonsec, gpioname, numports); + g_free(gpioname); + gpioname = g_strdup_printf("%s_ap", name); + qdev_init_gpio_out_named(dev, ppc->ap, gpioname, numports); + g_free(gpioname); + gpioname = g_strdup_printf("%s_irq_enable", name); + qdev_init_gpio_out_named(dev, &ppc->irq_enable, gpioname, 1); + g_free(gpioname); + gpioname = g_strdup_printf("%s_irq_clear", name); + qdev_init_gpio_out_named(dev, &ppc->irq_clear, gpioname, 1); + g_free(gpioname); + gpioname = g_strdup_printf("%s_irq_status", name); + qdev_init_gpio_in_named_with_opaque(dev, iotkit_secctl_ppc_irqstatus, + ppc, gpioname, 1); + g_free(gpioname); +} + +static void iotkit_secctl_init(Object *obj) +{ + IoTKitSecCtl *s = IOTKIT_SECCTL(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(obj); + int i; + + iotkit_secctl_init_ppc(s, &s->apb[0], "apb_ppc0", + IOTS_APB_PPC0_NUM_PORTS, 0); + iotkit_secctl_init_ppc(s, &s->apb[1], "apb_ppc1", + IOTS_APB_PPC1_NUM_PORTS, 1); + + for (i = 0; i < IOTS_NUM_APB_EXP_PPC; i++) { + IoTKitSecCtlPPC *ppc = &s->apbexp[i]; + char *ppcname = g_strdup_printf("apb_ppcexp%d", i); + iotkit_secctl_init_ppc(s, ppc, ppcname, IOTS_PPC_NUM_PORTS, 4 + i); + g_free(ppcname); + } + for (i = 0; i < IOTS_NUM_AHB_EXP_PPC; i++) { + IoTKitSecCtlPPC *ppc = &s->ahbexp[i]; + char *ppcname = g_strdup_printf("ahb_ppcexp%d", i); + iotkit_secctl_init_ppc(s, ppc, ppcname, IOTS_PPC_NUM_PORTS, 20 + i); + g_free(ppcname); + } + + qdev_init_gpio_out_named(dev, &s->sec_resp_cfg, "sec_resp_cfg", 1); + qdev_init_gpio_out_named(dev, &s->nsc_cfg_irq, "nsc_cfg", 1); + + qdev_init_gpio_in_named(dev, iotkit_secctl_mpc_status, "mpc_status", + IOTS_NUM_MPC); + qdev_init_gpio_in_named(dev, iotkit_secctl_mpcexp_status, + "mpcexp_status", IOTS_NUM_EXP_MPC); + + qdev_init_gpio_in_named(dev, iotkit_secctl_mscexp_status, + "mscexp_status", IOTS_NUM_EXP_MSC); + qdev_init_gpio_out_named(dev, s->mscexp_clear, "mscexp_clear", + IOTS_NUM_EXP_MSC); + qdev_init_gpio_out_named(dev, s->mscexp_ns, "mscexp_ns", + IOTS_NUM_EXP_MSC); + qdev_init_gpio_out_named(dev, &s->msc_irq, "msc_irq", 1); + + memory_region_init_io(&s->s_regs, obj, &iotkit_secctl_s_ops, + s, "iotkit-secctl-s-regs", 0x1000); + memory_region_init_io(&s->ns_regs, obj, &iotkit_secctl_ns_ops, + s, "iotkit-secctl-ns-regs", 0x1000); + sysbus_init_mmio(sbd, &s->s_regs); + sysbus_init_mmio(sbd, &s->ns_regs); +} + +static void iotkit_secctl_realize(DeviceState *dev, Error **errp) +{ + IoTKitSecCtl *s = IOTKIT_SECCTL(dev); + + if (!armsse_version_valid(s->sse_version)) { + error_setg(errp, "invalid sse-version value %d", s->sse_version); + return; + } +} + +static const VMStateDescription iotkit_secctl_ppc_vmstate = { + .name = "iotkit-secctl-ppc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ns, IoTKitSecCtlPPC), + VMSTATE_UINT32(sp, IoTKitSecCtlPPC), + VMSTATE_UINT32(nsp, IoTKitSecCtlPPC), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription iotkit_secctl_mpcintstatus_vmstate = { + .name = "iotkit-secctl-mpcintstatus", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(mpcintstatus, IoTKitSecCtl), + VMSTATE_END_OF_LIST() + } +}; + +static bool needed_always(void *opaque) +{ + return true; +} + +static const VMStateDescription iotkit_secctl_msc_vmstate = { + .name = "iotkit-secctl/msc", + .version_id = 1, + .minimum_version_id = 1, + .needed = needed_always, + .fields = (VMStateField[]) { + VMSTATE_UINT32(secmscintstat, IoTKitSecCtl), + VMSTATE_UINT32(secmscinten, IoTKitSecCtl), + VMSTATE_UINT32(nsmscexp, IoTKitSecCtl), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription iotkit_secctl_vmstate = { + .name = "iotkit-secctl", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(secppcintstat, IoTKitSecCtl), + VMSTATE_UINT32(secppcinten, IoTKitSecCtl), + VMSTATE_UINT32(secrespcfg, IoTKitSecCtl), + VMSTATE_UINT32(nsccfg, IoTKitSecCtl), + VMSTATE_UINT32(brginten, IoTKitSecCtl), + VMSTATE_STRUCT_ARRAY(apb, IoTKitSecCtl, IOTS_NUM_APB_PPC, 1, + iotkit_secctl_ppc_vmstate, IoTKitSecCtlPPC), + VMSTATE_STRUCT_ARRAY(apbexp, IoTKitSecCtl, IOTS_NUM_APB_EXP_PPC, 1, + iotkit_secctl_ppc_vmstate, IoTKitSecCtlPPC), + VMSTATE_STRUCT_ARRAY(ahbexp, IoTKitSecCtl, IOTS_NUM_AHB_EXP_PPC, 1, + iotkit_secctl_ppc_vmstate, IoTKitSecCtlPPC), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &iotkit_secctl_mpcintstatus_vmstate, + &iotkit_secctl_msc_vmstate, + NULL + }, +}; + +static Property iotkit_secctl_props[] = { + DEFINE_PROP_UINT32("sse-version", IoTKitSecCtl, sse_version, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void iotkit_secctl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &iotkit_secctl_vmstate; + dc->reset = iotkit_secctl_reset; + dc->realize = iotkit_secctl_realize; + device_class_set_props(dc, iotkit_secctl_props); +} + +static const TypeInfo iotkit_secctl_info = { + .name = TYPE_IOTKIT_SECCTL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IoTKitSecCtl), + .instance_init = iotkit_secctl_init, + .class_init = iotkit_secctl_class_init, +}; + +static void iotkit_secctl_register_types(void) +{ + type_register_static(&iotkit_secctl_info); +} + +type_init(iotkit_secctl_register_types); diff --git a/hw/misc/iotkit-sysctl.c b/hw/misc/iotkit-sysctl.c new file mode 100644 index 000000000..9ee8fe849 --- /dev/null +++ b/hw/misc/iotkit-sysctl.c @@ -0,0 +1,872 @@ +/* + * ARM IoTKit system control element + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the "system control element" which is part of the + * Arm IoTKit and documented in + * https://developer.arm.com/documentation/ecm0601256/latest + * Specifically, it implements the "system control register" blocks. + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" +#include "trace.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/registerfields.h" +#include "hw/misc/iotkit-sysctl.h" +#include "hw/qdev-properties.h" +#include "hw/arm/armsse-version.h" +#include "target/arm/arm-powerctl.h" +#include "target/arm/cpu.h" + +REG32(SECDBGSTAT, 0x0) +REG32(SECDBGSET, 0x4) +REG32(SECDBGCLR, 0x8) +REG32(SCSECCTRL, 0xc) +REG32(FCLK_DIV, 0x10) +REG32(SYSCLK_DIV, 0x14) +REG32(CLOCK_FORCE, 0x18) +REG32(RESET_SYNDROME, 0x100) +REG32(RESET_MASK, 0x104) +REG32(SWRESET, 0x108) + FIELD(SWRESET, SWRESETREQ, 9, 1) +REG32(GRETREG, 0x10c) +REG32(INITSVTOR0, 0x110) + FIELD(INITSVTOR0, LOCK, 0, 1) + FIELD(INITSVTOR0, VTOR, 7, 25) +REG32(INITSVTOR1, 0x114) +REG32(CPUWAIT, 0x118) +REG32(NMI_ENABLE, 0x11c) /* BUSWAIT in IoTKit */ +REG32(WICCTRL, 0x120) +REG32(EWCTRL, 0x124) +REG32(PWRCTRL, 0x1fc) + FIELD(PWRCTRL, PPU_ACCESS_UNLOCK, 0, 1) + FIELD(PWRCTRL, PPU_ACCESS_FILTER, 1, 1) +REG32(PDCM_PD_SYS_SENSE, 0x200) +REG32(PDCM_PD_CPU0_SENSE, 0x204) +REG32(PDCM_PD_SRAM0_SENSE, 0x20c) +REG32(PDCM_PD_SRAM1_SENSE, 0x210) +REG32(PDCM_PD_SRAM2_SENSE, 0x214) /* PDCM_PD_VMR0_SENSE on SSE300 */ +REG32(PDCM_PD_SRAM3_SENSE, 0x218) /* PDCM_PD_VMR1_SENSE on SSE300 */ +REG32(PID4, 0xfd0) +REG32(PID5, 0xfd4) +REG32(PID6, 0xfd8) +REG32(PID7, 0xfdc) +REG32(PID0, 0xfe0) +REG32(PID1, 0xfe4) +REG32(PID2, 0xfe8) +REG32(PID3, 0xfec) +REG32(CID0, 0xff0) +REG32(CID1, 0xff4) +REG32(CID2, 0xff8) +REG32(CID3, 0xffc) + +/* PID/CID values */ +static const int iotkit_sysctl_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x54, 0xb8, 0x0b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +/* Also used by the SSE300 */ +static const int sse200_sysctl_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x54, 0xb8, 0x1b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +/* + * Set the initial secure vector table offset address for the core. + * This will take effect when the CPU next resets. + */ +static void set_init_vtor(uint64_t cpuid, uint32_t vtor) +{ + Object *cpuobj = OBJECT(arm_get_cpu_by_id(cpuid)); + + if (cpuobj) { + if (object_property_find(cpuobj, "init-svtor")) { + object_property_set_uint(cpuobj, "init-svtor", vtor, &error_abort); + } + } +} + +static uint64_t iotkit_sysctl_read(void *opaque, hwaddr offset, + unsigned size) +{ + IoTKitSysCtl *s = IOTKIT_SYSCTL(opaque); + uint64_t r; + + switch (offset) { + case A_SECDBGSTAT: + r = s->secure_debug; + break; + case A_SCSECCTRL: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + r = s->scsecctrl; + break; + default: + g_assert_not_reached(); + } + break; + case A_FCLK_DIV: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + r = s->fclk_div; + break; + default: + g_assert_not_reached(); + } + break; + case A_SYSCLK_DIV: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + r = s->sysclk_div; + break; + default: + g_assert_not_reached(); + } + break; + case A_CLOCK_FORCE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + r = s->clock_force; + break; + default: + g_assert_not_reached(); + } + break; + case A_RESET_SYNDROME: + r = s->reset_syndrome; + break; + case A_RESET_MASK: + r = s->reset_mask; + break; + case A_GRETREG: + r = s->gretreg; + break; + case A_INITSVTOR0: + r = s->initsvtor0; + break; + case A_INITSVTOR1: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + r = s->initsvtor1; + break; + case ARMSSE_SSE300: + goto bad_offset; + default: + g_assert_not_reached(); + } + break; + case A_CPUWAIT: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + case ARMSSE_SSE200: + r = s->cpuwait; + break; + case ARMSSE_SSE300: + /* In SSE300 this is reserved (for INITSVTOR2) */ + goto bad_offset; + default: + g_assert_not_reached(); + } + break; + case A_NMI_ENABLE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + /* In IoTKit this is named BUSWAIT but marked reserved, R/O, zero */ + r = 0; + break; + case ARMSSE_SSE200: + r = s->nmi_enable; + break; + case ARMSSE_SSE300: + /* In SSE300 this is reserved (for INITSVTOR3) */ + goto bad_offset; + default: + g_assert_not_reached(); + } + break; + case A_WICCTRL: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + case ARMSSE_SSE200: + r = s->wicctrl; + break; + case ARMSSE_SSE300: + /* In SSE300 this offset is CPUWAIT */ + r = s->cpuwait; + break; + default: + g_assert_not_reached(); + } + break; + case A_EWCTRL: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + r = s->ewctrl; + break; + case ARMSSE_SSE300: + /* In SSE300 this offset is is NMI_ENABLE */ + r = s->nmi_enable; + break; + default: + g_assert_not_reached(); + } + break; + case A_PWRCTRL: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + case ARMSSE_SSE200: + goto bad_offset; + case ARMSSE_SSE300: + r = s->pwrctrl; + break; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_SYS_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + r = s->pdcm_pd_sys_sense; + break; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_CPU0_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + case ARMSSE_SSE200: + goto bad_offset; + case ARMSSE_SSE300: + r = s->pdcm_pd_cpu0_sense; + break; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_SRAM0_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + r = s->pdcm_pd_sram0_sense; + break; + case ARMSSE_SSE300: + goto bad_offset; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_SRAM1_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + r = s->pdcm_pd_sram1_sense; + break; + case ARMSSE_SSE300: + goto bad_offset; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_SRAM2_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + r = s->pdcm_pd_sram2_sense; + break; + case ARMSSE_SSE300: + r = s->pdcm_pd_vmr0_sense; + break; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_SRAM3_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + r = s->pdcm_pd_sram3_sense; + break; + case ARMSSE_SSE300: + r = s->pdcm_pd_vmr1_sense; + break; + default: + g_assert_not_reached(); + } + break; + case A_PID4 ... A_CID3: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + r = iotkit_sysctl_id[(offset - A_PID4) / 4]; + break; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + r = sse200_sysctl_id[(offset - A_PID4) / 4]; + break; + default: + g_assert_not_reached(); + } + break; + case A_SECDBGSET: + case A_SECDBGCLR: + case A_SWRESET: + qemu_log_mask(LOG_GUEST_ERROR, + "IoTKit SysCtl read: read of WO offset %x\n", + (int)offset); + r = 0; + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "IoTKit SysCtl read: bad offset %x\n", (int)offset); + r = 0; + break; + } + trace_iotkit_sysctl_read(offset, r, size); + return r; +} + +static void cpuwait_write(IoTKitSysCtl *s, uint32_t value) +{ + int num_cpus = (s->sse_version == ARMSSE_SSE300) ? 1 : 2; + int i; + + for (i = 0; i < num_cpus; i++) { + uint32_t mask = 1 << i; + if ((s->cpuwait & mask) && !(value & mask)) { + /* Powering up CPU 0 */ + arm_set_cpu_on_and_reset(i); + } + } + s->cpuwait = value; +} + +static void iotkit_sysctl_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + IoTKitSysCtl *s = IOTKIT_SYSCTL(opaque); + + trace_iotkit_sysctl_write(offset, value, size); + + /* + * Most of the state here has to do with control of reset and + * similar kinds of power up -- for instance the guest can ask + * what the reason for the last reset was, or forbid reset for + * some causes (like the non-secure watchdog). Most of this is + * not relevant to QEMU, which doesn't really model anything other + * than a full power-on reset. + * We just model the registers as reads-as-written. + */ + + switch (offset) { + case A_RESET_SYNDROME: + qemu_log_mask(LOG_UNIMP, + "IoTKit SysCtl RESET_SYNDROME unimplemented\n"); + s->reset_syndrome = value; + break; + case A_RESET_MASK: + qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl RESET_MASK unimplemented\n"); + s->reset_mask = value; + break; + case A_GRETREG: + /* + * General retention register, which is only reset by a power-on + * reset. Technically this implementation is complete, since + * QEMU only supports power-on resets... + */ + s->gretreg = value; + break; + case A_INITSVTOR0: + switch (s->sse_version) { + case ARMSSE_SSE300: + /* SSE300 has a LOCK bit which prevents further writes when set */ + if (s->initsvtor0 & R_INITSVTOR0_LOCK_MASK) { + qemu_log_mask(LOG_GUEST_ERROR, + "IoTKit INITSVTOR0 write when register locked\n"); + break; + } + s->initsvtor0 = value; + set_init_vtor(0, s->initsvtor0 & R_INITSVTOR0_VTOR_MASK); + break; + case ARMSSE_IOTKIT: + case ARMSSE_SSE200: + s->initsvtor0 = value; + set_init_vtor(0, s->initsvtor0); + break; + default: + g_assert_not_reached(); + } + break; + case A_CPUWAIT: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + case ARMSSE_SSE200: + cpuwait_write(s, value); + break; + case ARMSSE_SSE300: + /* In SSE300 this is reserved (for INITSVTOR2) */ + goto bad_offset; + default: + g_assert_not_reached(); + } + break; + case A_WICCTRL: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + case ARMSSE_SSE200: + qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl WICCTRL unimplemented\n"); + s->wicctrl = value; + break; + case ARMSSE_SSE300: + /* In SSE300 this offset is CPUWAIT */ + cpuwait_write(s, value); + break; + default: + g_assert_not_reached(); + } + break; + case A_SECDBGSET: + /* write-1-to-set */ + qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl SECDBGSET unimplemented\n"); + s->secure_debug |= value; + break; + case A_SECDBGCLR: + /* write-1-to-clear */ + s->secure_debug &= ~value; + break; + case A_SWRESET: + /* One w/o bit to request a reset; all other bits reserved */ + if (value & R_SWRESET_SWRESETREQ_MASK) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + case A_SCSECCTRL: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl SCSECCTRL unimplemented\n"); + s->scsecctrl = value; + break; + default: + g_assert_not_reached(); + } + break; + case A_FCLK_DIV: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl FCLK_DIV unimplemented\n"); + s->fclk_div = value; + break; + default: + g_assert_not_reached(); + } + break; + case A_SYSCLK_DIV: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl SYSCLK_DIV unimplemented\n"); + s->sysclk_div = value; + break; + default: + g_assert_not_reached(); + } + break; + case A_CLOCK_FORCE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl CLOCK_FORCE unimplemented\n"); + s->clock_force = value; + break; + default: + g_assert_not_reached(); + } + break; + case A_INITSVTOR1: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + s->initsvtor1 = value; + set_init_vtor(1, s->initsvtor1); + break; + case ARMSSE_SSE300: + goto bad_offset; + default: + g_assert_not_reached(); + } + break; + case A_EWCTRL: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl EWCTRL unimplemented\n"); + s->ewctrl = value; + break; + case ARMSSE_SSE300: + /* In SSE300 this offset is is NMI_ENABLE */ + qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl NMI_ENABLE unimplemented\n"); + s->nmi_enable = value; + break; + default: + g_assert_not_reached(); + } + break; + case A_PWRCTRL: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + case ARMSSE_SSE200: + goto bad_offset; + case ARMSSE_SSE300: + if (!(s->pwrctrl & R_PWRCTRL_PPU_ACCESS_UNLOCK_MASK)) { + qemu_log_mask(LOG_GUEST_ERROR, + "IoTKit PWRCTRL write when register locked\n"); + break; + } + s->pwrctrl = value; + break; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_SYS_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + case ARMSSE_SSE300: + qemu_log_mask(LOG_UNIMP, + "IoTKit SysCtl PDCM_PD_SYS_SENSE unimplemented\n"); + s->pdcm_pd_sys_sense = value; + break; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_CPU0_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + case ARMSSE_SSE200: + goto bad_offset; + case ARMSSE_SSE300: + qemu_log_mask(LOG_UNIMP, + "IoTKit SysCtl PDCM_PD_CPU0_SENSE unimplemented\n"); + s->pdcm_pd_cpu0_sense = value; + break; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_SRAM0_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + qemu_log_mask(LOG_UNIMP, + "IoTKit SysCtl PDCM_PD_SRAM0_SENSE unimplemented\n"); + s->pdcm_pd_sram0_sense = value; + break; + case ARMSSE_SSE300: + goto bad_offset; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_SRAM1_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + qemu_log_mask(LOG_UNIMP, + "IoTKit SysCtl PDCM_PD_SRAM1_SENSE unimplemented\n"); + s->pdcm_pd_sram1_sense = value; + break; + case ARMSSE_SSE300: + goto bad_offset; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_SRAM2_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + qemu_log_mask(LOG_UNIMP, + "IoTKit SysCtl PDCM_PD_SRAM2_SENSE unimplemented\n"); + s->pdcm_pd_sram2_sense = value; + break; + case ARMSSE_SSE300: + qemu_log_mask(LOG_UNIMP, + "IoTKit SysCtl PDCM_PD_VMR0_SENSE unimplemented\n"); + s->pdcm_pd_vmr0_sense = value; + break; + default: + g_assert_not_reached(); + } + break; + case A_PDCM_PD_SRAM3_SENSE: + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto bad_offset; + case ARMSSE_SSE200: + qemu_log_mask(LOG_UNIMP, + "IoTKit SysCtl PDCM_PD_SRAM3_SENSE unimplemented\n"); + s->pdcm_pd_sram3_sense = value; + break; + case ARMSSE_SSE300: + qemu_log_mask(LOG_UNIMP, + "IoTKit SysCtl PDCM_PD_VMR1_SENSE unimplemented\n"); + s->pdcm_pd_vmr1_sense = value; + break; + default: + g_assert_not_reached(); + } + break; + case A_NMI_ENABLE: + /* In IoTKit this is BUSWAIT: reserved, R/O, zero */ + switch (s->sse_version) { + case ARMSSE_IOTKIT: + goto ro_offset; + case ARMSSE_SSE200: + qemu_log_mask(LOG_UNIMP, "IoTKit SysCtl NMI_ENABLE unimplemented\n"); + s->nmi_enable = value; + break; + case ARMSSE_SSE300: + /* In SSE300 this is reserved (for INITSVTOR3) */ + goto bad_offset; + default: + g_assert_not_reached(); + } + break; + case A_SECDBGSTAT: + case A_PID4 ... A_CID3: + ro_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "IoTKit SysCtl write: write of RO offset %x\n", + (int)offset); + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "IoTKit SysCtl write: bad offset %x\n", (int)offset); + break; + } +} + +static const MemoryRegionOps iotkit_sysctl_ops = { + .read = iotkit_sysctl_read, + .write = iotkit_sysctl_write, + .endianness = DEVICE_LITTLE_ENDIAN, + /* byte/halfword accesses are just zero-padded on reads and writes */ + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, +}; + +static void iotkit_sysctl_reset(DeviceState *dev) +{ + IoTKitSysCtl *s = IOTKIT_SYSCTL(dev); + + trace_iotkit_sysctl_reset(); + s->secure_debug = 0; + s->reset_syndrome = 1; + s->reset_mask = 0; + s->gretreg = 0; + s->initsvtor0 = s->initsvtor0_rst; + s->initsvtor1 = s->initsvtor1_rst; + s->cpuwait = s->cpuwait_rst; + s->wicctrl = 0; + s->scsecctrl = 0; + s->fclk_div = 0; + s->sysclk_div = 0; + s->clock_force = 0; + s->nmi_enable = 0; + s->ewctrl = 0; + s->pwrctrl = 0x3; + s->pdcm_pd_sys_sense = 0x7f; + s->pdcm_pd_sram0_sense = 0; + s->pdcm_pd_sram1_sense = 0; + s->pdcm_pd_sram2_sense = 0; + s->pdcm_pd_sram3_sense = 0; + s->pdcm_pd_cpu0_sense = 0; + s->pdcm_pd_vmr0_sense = 0; + s->pdcm_pd_vmr1_sense = 0; +} + +static void iotkit_sysctl_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + IoTKitSysCtl *s = IOTKIT_SYSCTL(obj); + + memory_region_init_io(&s->iomem, obj, &iotkit_sysctl_ops, + s, "iotkit-sysctl", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void iotkit_sysctl_realize(DeviceState *dev, Error **errp) +{ + IoTKitSysCtl *s = IOTKIT_SYSCTL(dev); + + if (!armsse_version_valid(s->sse_version)) { + error_setg(errp, "invalid sse-version value %d", s->sse_version); + return; + } +} + +static bool sse300_needed(void *opaque) +{ + IoTKitSysCtl *s = IOTKIT_SYSCTL(opaque); + + return s->sse_version == ARMSSE_SSE300; +} + +static const VMStateDescription iotkit_sysctl_sse300_vmstate = { + .name = "iotkit-sysctl/sse-300", + .version_id = 1, + .minimum_version_id = 1, + .needed = sse300_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(pwrctrl, IoTKitSysCtl), + VMSTATE_UINT32(pdcm_pd_cpu0_sense, IoTKitSysCtl), + VMSTATE_UINT32(pdcm_pd_vmr0_sense, IoTKitSysCtl), + VMSTATE_UINT32(pdcm_pd_vmr1_sense, IoTKitSysCtl), + VMSTATE_END_OF_LIST() + } +}; + +static bool sse200_needed(void *opaque) +{ + IoTKitSysCtl *s = IOTKIT_SYSCTL(opaque); + + return s->sse_version != ARMSSE_IOTKIT; +} + +static const VMStateDescription iotkit_sysctl_sse200_vmstate = { + .name = "iotkit-sysctl/sse-200", + .version_id = 1, + .minimum_version_id = 1, + .needed = sse200_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(scsecctrl, IoTKitSysCtl), + VMSTATE_UINT32(fclk_div, IoTKitSysCtl), + VMSTATE_UINT32(sysclk_div, IoTKitSysCtl), + VMSTATE_UINT32(clock_force, IoTKitSysCtl), + VMSTATE_UINT32(initsvtor1, IoTKitSysCtl), + VMSTATE_UINT32(nmi_enable, IoTKitSysCtl), + VMSTATE_UINT32(pdcm_pd_sys_sense, IoTKitSysCtl), + VMSTATE_UINT32(pdcm_pd_sram0_sense, IoTKitSysCtl), + VMSTATE_UINT32(pdcm_pd_sram1_sense, IoTKitSysCtl), + VMSTATE_UINT32(pdcm_pd_sram2_sense, IoTKitSysCtl), + VMSTATE_UINT32(pdcm_pd_sram3_sense, IoTKitSysCtl), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription iotkit_sysctl_vmstate = { + .name = "iotkit-sysctl", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(secure_debug, IoTKitSysCtl), + VMSTATE_UINT32(reset_syndrome, IoTKitSysCtl), + VMSTATE_UINT32(reset_mask, IoTKitSysCtl), + VMSTATE_UINT32(gretreg, IoTKitSysCtl), + VMSTATE_UINT32(initsvtor0, IoTKitSysCtl), + VMSTATE_UINT32(cpuwait, IoTKitSysCtl), + VMSTATE_UINT32(wicctrl, IoTKitSysCtl), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &iotkit_sysctl_sse200_vmstate, + &iotkit_sysctl_sse300_vmstate, + NULL + } +}; + +static Property iotkit_sysctl_props[] = { + DEFINE_PROP_UINT32("sse-version", IoTKitSysCtl, sse_version, 0), + DEFINE_PROP_UINT32("CPUWAIT_RST", IoTKitSysCtl, cpuwait_rst, 0), + DEFINE_PROP_UINT32("INITSVTOR0_RST", IoTKitSysCtl, initsvtor0_rst, + 0x10000000), + DEFINE_PROP_UINT32("INITSVTOR1_RST", IoTKitSysCtl, initsvtor1_rst, + 0x10000000), + DEFINE_PROP_END_OF_LIST() +}; + +static void iotkit_sysctl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &iotkit_sysctl_vmstate; + dc->reset = iotkit_sysctl_reset; + device_class_set_props(dc, iotkit_sysctl_props); + dc->realize = iotkit_sysctl_realize; +} + +static const TypeInfo iotkit_sysctl_info = { + .name = TYPE_IOTKIT_SYSCTL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IoTKitSysCtl), + .instance_init = iotkit_sysctl_init, + .class_init = iotkit_sysctl_class_init, +}; + +static void iotkit_sysctl_register_types(void) +{ + type_register_static(&iotkit_sysctl_info); +} + +type_init(iotkit_sysctl_register_types); diff --git a/hw/misc/iotkit-sysinfo.c b/hw/misc/iotkit-sysinfo.c new file mode 100644 index 000000000..aaa9305b2 --- /dev/null +++ b/hw/misc/iotkit-sysinfo.c @@ -0,0 +1,187 @@ +/* + * ARM IoTKit system information block + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the "system information block" which is part of the + * Arm IoTKit and documented in + * https://developer.arm.com/documentation/ecm0601256/latest + * It consists of 2 read-only version/config registers, plus the + * usual ID registers. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/registerfields.h" +#include "hw/misc/iotkit-sysinfo.h" +#include "hw/qdev-properties.h" +#include "hw/arm/armsse-version.h" + +REG32(SYS_VERSION, 0x0) +REG32(SYS_CONFIG, 0x4) +REG32(SYS_CONFIG1, 0x8) +REG32(IIDR, 0xfc8) +REG32(PID4, 0xfd0) +REG32(PID5, 0xfd4) +REG32(PID6, 0xfd8) +REG32(PID7, 0xfdc) +REG32(PID0, 0xfe0) +REG32(PID1, 0xfe4) +REG32(PID2, 0xfe8) +REG32(PID3, 0xfec) +REG32(CID0, 0xff0) +REG32(CID1, 0xff4) +REG32(CID2, 0xff8) +REG32(CID3, 0xffc) + +/* PID/CID values */ +static const int sysinfo_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x58, 0xb8, 0x0b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static const int sysinfo_sse300_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x58, 0xb8, 0x1b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static uint64_t iotkit_sysinfo_read(void *opaque, hwaddr offset, + unsigned size) +{ + IoTKitSysInfo *s = IOTKIT_SYSINFO(opaque); + uint64_t r; + + switch (offset) { + case A_SYS_VERSION: + r = s->sys_version; + break; + + case A_SYS_CONFIG: + r = s->sys_config; + break; + case A_SYS_CONFIG1: + switch (s->sse_version) { + case ARMSSE_SSE300: + return 0; + break; + default: + goto bad_read; + } + break; + case A_IIDR: + switch (s->sse_version) { + case ARMSSE_SSE300: + return s->iidr; + break; + default: + goto bad_read; + } + break; + case A_PID4 ... A_CID3: + switch (s->sse_version) { + case ARMSSE_SSE300: + r = sysinfo_sse300_id[(offset - A_PID4) / 4]; + break; + default: + r = sysinfo_id[(offset - A_PID4) / 4]; + break; + } + break; + default: + bad_read: + qemu_log_mask(LOG_GUEST_ERROR, + "IoTKit SysInfo read: bad offset %x\n", (int)offset); + r = 0; + break; + } + trace_iotkit_sysinfo_read(offset, r, size); + return r; +} + +static void iotkit_sysinfo_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + trace_iotkit_sysinfo_write(offset, value, size); + + qemu_log_mask(LOG_GUEST_ERROR, + "IoTKit SysInfo: write to RO offset 0x%x\n", (int)offset); +} + +static const MemoryRegionOps iotkit_sysinfo_ops = { + .read = iotkit_sysinfo_read, + .write = iotkit_sysinfo_write, + .endianness = DEVICE_LITTLE_ENDIAN, + /* byte/halfword accesses are just zero-padded on reads and writes */ + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, +}; + +static Property iotkit_sysinfo_props[] = { + DEFINE_PROP_UINT32("SYS_VERSION", IoTKitSysInfo, sys_version, 0), + DEFINE_PROP_UINT32("SYS_CONFIG", IoTKitSysInfo, sys_config, 0), + DEFINE_PROP_UINT32("sse-version", IoTKitSysInfo, sse_version, 0), + DEFINE_PROP_UINT32("IIDR", IoTKitSysInfo, iidr, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void iotkit_sysinfo_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + IoTKitSysInfo *s = IOTKIT_SYSINFO(obj); + + memory_region_init_io(&s->iomem, obj, &iotkit_sysinfo_ops, + s, "iotkit-sysinfo", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void iotkit_sysinfo_realize(DeviceState *dev, Error **errp) +{ + IoTKitSysInfo *s = IOTKIT_SYSINFO(dev); + + if (!armsse_version_valid(s->sse_version)) { + error_setg(errp, "invalid sse-version value %d", s->sse_version); + return; + } +} + +static void iotkit_sysinfo_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + /* + * This device has no guest-modifiable state and so it + * does not need a reset function or VMState. + */ + dc->realize = iotkit_sysinfo_realize; + device_class_set_props(dc, iotkit_sysinfo_props); +} + +static const TypeInfo iotkit_sysinfo_info = { + .name = TYPE_IOTKIT_SYSINFO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IoTKitSysInfo), + .instance_init = iotkit_sysinfo_init, + .class_init = iotkit_sysinfo_class_init, +}; + +static void iotkit_sysinfo_register_types(void) +{ + type_register_static(&iotkit_sysinfo_info); +} + +type_init(iotkit_sysinfo_register_types); diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c new file mode 100644 index 000000000..1ba4a9837 --- /dev/null +++ b/hw/misc/ivshmem.c @@ -0,0 +1,1135 @@ +/* + * Inter-VM Shared Memory PCI device. + * + * Author: + * Cam Macdonell + * + * Based On: cirrus_vga.c + * Copyright (c) 2004 Fabrice Bellard + * Copyright (c) 2004 Makoto Suzuki (suzu) + * + * and rtl8139.c + * Copyright (c) 2006 Igor Kovalenko + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "sysemu/kvm.h" +#include "migration/blocker.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "qemu/event_notifier.h" +#include "qemu/module.h" +#include "qom/object_interfaces.h" +#include "chardev/char-fe.h" +#include "sysemu/hostmem.h" +#include "qapi/visitor.h" + +#include "hw/misc/ivshmem.h" +#include "qom/object.h" + +#define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET +#define PCI_DEVICE_ID_IVSHMEM 0x1110 + +#define IVSHMEM_MAX_PEERS UINT16_MAX +#define IVSHMEM_IOEVENTFD 0 +#define IVSHMEM_MSI 1 + +#define IVSHMEM_REG_BAR_SIZE 0x100 + +#define IVSHMEM_DEBUG 0 +#define IVSHMEM_DPRINTF(fmt, ...) \ + do { \ + if (IVSHMEM_DEBUG) { \ + printf("IVSHMEM: " fmt, ## __VA_ARGS__); \ + } \ + } while (0) + +#define TYPE_IVSHMEM_COMMON "ivshmem-common" +typedef struct IVShmemState IVShmemState; +DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_COMMON, + TYPE_IVSHMEM_COMMON) + +#define TYPE_IVSHMEM_PLAIN "ivshmem-plain" +DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_PLAIN, + TYPE_IVSHMEM_PLAIN) + +#define TYPE_IVSHMEM_DOORBELL "ivshmem-doorbell" +DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM_DOORBELL, + TYPE_IVSHMEM_DOORBELL) + +#define TYPE_IVSHMEM "ivshmem" +DECLARE_INSTANCE_CHECKER(IVShmemState, IVSHMEM, + TYPE_IVSHMEM) + +typedef struct Peer { + int nb_eventfds; + EventNotifier *eventfds; +} Peer; + +typedef struct MSIVector { + PCIDevice *pdev; + int virq; + bool unmasked; +} MSIVector; + +struct IVShmemState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + uint32_t features; + + /* exactly one of these two may be set */ + HostMemoryBackend *hostmem; /* with interrupts */ + CharBackend server_chr; /* without interrupts */ + + /* registers */ + uint32_t intrmask; + uint32_t intrstatus; + int vm_id; + + /* BARs */ + MemoryRegion ivshmem_mmio; /* BAR 0 (registers) */ + MemoryRegion *ivshmem_bar2; /* BAR 2 (shared memory) */ + MemoryRegion server_bar2; /* used with server_chr */ + + /* interrupt support */ + Peer *peers; + int nb_peers; /* space in @peers[] */ + uint32_t vectors; + MSIVector *msi_vectors; + uint64_t msg_buf; /* buffer for receiving server messages */ + int msg_buffered_bytes; /* #bytes in @msg_buf */ + + /* migration stuff */ + OnOffAuto master; + Error *migration_blocker; +}; + +/* registers for the Inter-VM shared memory device */ +enum ivshmem_registers { + INTRMASK = 0, + INTRSTATUS = 4, + IVPOSITION = 8, + DOORBELL = 12, +}; + +static inline uint32_t ivshmem_has_feature(IVShmemState *ivs, + unsigned int feature) { + return (ivs->features & (1 << feature)); +} + +static inline bool ivshmem_is_master(IVShmemState *s) +{ + assert(s->master != ON_OFF_AUTO_AUTO); + return s->master == ON_OFF_AUTO_ON; +} + +static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val) +{ + IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val); + + s->intrmask = val; +} + +static uint32_t ivshmem_IntrMask_read(IVShmemState *s) +{ + uint32_t ret = s->intrmask; + + IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret); + return ret; +} + +static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val) +{ + IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val); + + s->intrstatus = val; +} + +static uint32_t ivshmem_IntrStatus_read(IVShmemState *s) +{ + uint32_t ret = s->intrstatus; + + /* reading ISR clears all interrupts */ + s->intrstatus = 0; + return ret; +} + +static void ivshmem_io_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + IVShmemState *s = opaque; + + uint16_t dest = val >> 16; + uint16_t vector = val & 0xff; + + addr &= 0xfc; + + IVSHMEM_DPRINTF("writing to addr " TARGET_FMT_plx "\n", addr); + switch (addr) + { + case INTRMASK: + ivshmem_IntrMask_write(s, val); + break; + + case INTRSTATUS: + ivshmem_IntrStatus_write(s, val); + break; + + case DOORBELL: + /* check that dest VM ID is reasonable */ + if (dest >= s->nb_peers) { + IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest); + break; + } + + /* check doorbell range */ + if (vector < s->peers[dest].nb_eventfds) { + IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector); + event_notifier_set(&s->peers[dest].eventfds[vector]); + } else { + IVSHMEM_DPRINTF("Invalid destination vector %d on VM %d\n", + vector, dest); + } + break; + default: + IVSHMEM_DPRINTF("Unhandled write " TARGET_FMT_plx "\n", addr); + } +} + +static uint64_t ivshmem_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + + IVShmemState *s = opaque; + uint32_t ret; + + switch (addr) + { + case INTRMASK: + ret = ivshmem_IntrMask_read(s); + break; + + case INTRSTATUS: + ret = ivshmem_IntrStatus_read(s); + break; + + case IVPOSITION: + ret = s->vm_id; + break; + + default: + IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr); + ret = 0; + } + + return ret; +} + +static const MemoryRegionOps ivshmem_mmio_ops = { + .read = ivshmem_io_read, + .write = ivshmem_io_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void ivshmem_vector_notify(void *opaque) +{ + MSIVector *entry = opaque; + PCIDevice *pdev = entry->pdev; + IVShmemState *s = IVSHMEM_COMMON(pdev); + int vector = entry - s->msi_vectors; + EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; + + if (!event_notifier_test_and_clear(n)) { + return; + } + + IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector); + if (ivshmem_has_feature(s, IVSHMEM_MSI)) { + if (msix_enabled(pdev)) { + msix_notify(pdev, vector); + } + } else { + ivshmem_IntrStatus_write(s, 1); + } +} + +static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector, + MSIMessage msg) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; + MSIVector *v = &s->msi_vectors[vector]; + int ret; + + IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector); + if (!v->pdev) { + error_report("ivshmem: vector %d route does not exist", vector); + return -EINVAL; + } + assert(!v->unmasked); + + ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev); + if (ret < 0) { + return ret; + } + kvm_irqchip_commit_routes(kvm_state); + + ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq); + if (ret < 0) { + return ret; + } + v->unmasked = true; + + return 0; +} + +static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; + MSIVector *v = &s->msi_vectors[vector]; + int ret; + + IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector); + if (!v->pdev) { + error_report("ivshmem: vector %d route does not exist", vector); + return; + } + assert(v->unmasked); + + ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, v->virq); + if (ret < 0) { + error_report("remove_irqfd_notifier_gsi failed"); + return; + } + v->unmasked = false; +} + +static void ivshmem_vector_poll(PCIDevice *dev, + unsigned int vector_start, + unsigned int vector_end) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + unsigned int vector; + + IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end); + + vector_end = MIN(vector_end, s->vectors); + + for (vector = vector_start; vector < vector_end; vector++) { + EventNotifier *notifier = &s->peers[s->vm_id].eventfds[vector]; + + if (!msix_is_masked(dev, vector)) { + continue; + } + + if (event_notifier_test_and_clear(notifier)) { + msix_set_pending(dev, vector); + } + } +} + +static void watch_vector_notifier(IVShmemState *s, EventNotifier *n, + int vector) +{ + int eventfd = event_notifier_get_fd(n); + + assert(!s->msi_vectors[vector].pdev); + s->msi_vectors[vector].pdev = PCI_DEVICE(s); + + qemu_set_fd_handler(eventfd, ivshmem_vector_notify, + NULL, &s->msi_vectors[vector]); +} + +static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i) +{ + memory_region_add_eventfd(&s->ivshmem_mmio, + DOORBELL, + 4, + true, + (posn << 16) | i, + &s->peers[posn].eventfds[i]); +} + +static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i) +{ + memory_region_del_eventfd(&s->ivshmem_mmio, + DOORBELL, + 4, + true, + (posn << 16) | i, + &s->peers[posn].eventfds[i]); +} + +static void close_peer_eventfds(IVShmemState *s, int posn) +{ + int i, n; + + assert(posn >= 0 && posn < s->nb_peers); + n = s->peers[posn].nb_eventfds; + + if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { + memory_region_transaction_begin(); + for (i = 0; i < n; i++) { + ivshmem_del_eventfd(s, posn, i); + } + memory_region_transaction_commit(); + } + + for (i = 0; i < n; i++) { + event_notifier_cleanup(&s->peers[posn].eventfds[i]); + } + + g_free(s->peers[posn].eventfds); + s->peers[posn].nb_eventfds = 0; +} + +static void resize_peers(IVShmemState *s, int nb_peers) +{ + int old_nb_peers = s->nb_peers; + int i; + + assert(nb_peers > old_nb_peers); + IVSHMEM_DPRINTF("bumping storage to %d peers\n", nb_peers); + + s->peers = g_realloc(s->peers, nb_peers * sizeof(Peer)); + s->nb_peers = nb_peers; + + for (i = old_nb_peers; i < nb_peers; i++) { + s->peers[i].eventfds = g_new0(EventNotifier, s->vectors); + s->peers[i].nb_eventfds = 0; + } +} + +static void ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector, + Error **errp) +{ + PCIDevice *pdev = PCI_DEVICE(s); + int ret; + + IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector); + assert(!s->msi_vectors[vector].pdev); + + ret = kvm_irqchip_add_msi_route(kvm_state, vector, pdev); + if (ret < 0) { + error_setg(errp, "kvm_irqchip_add_msi_route failed"); + return; + } + + s->msi_vectors[vector].virq = ret; + s->msi_vectors[vector].pdev = pdev; +} + +static void setup_interrupt(IVShmemState *s, int vector, Error **errp) +{ + EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; + bool with_irqfd = kvm_msi_via_irqfd_enabled() && + ivshmem_has_feature(s, IVSHMEM_MSI); + PCIDevice *pdev = PCI_DEVICE(s); + Error *err = NULL; + + IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector); + + if (!with_irqfd) { + IVSHMEM_DPRINTF("with eventfd\n"); + watch_vector_notifier(s, n, vector); + } else if (msix_enabled(pdev)) { + IVSHMEM_DPRINTF("with irqfd\n"); + ivshmem_add_kvm_msi_virq(s, vector, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (!msix_is_masked(pdev, vector)) { + kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, + s->msi_vectors[vector].virq); + /* TODO handle error */ + } + } else { + /* it will be delayed until msix is enabled, in write_config */ + IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled\n"); + } +} + +static void process_msg_shmem(IVShmemState *s, int fd, Error **errp) +{ + Error *local_err = NULL; + struct stat buf; + size_t size; + + if (s->ivshmem_bar2) { + error_setg(errp, "server sent unexpected shared memory message"); + close(fd); + return; + } + + if (fstat(fd, &buf) < 0) { + error_setg_errno(errp, errno, + "can't determine size of shared memory sent by server"); + close(fd); + return; + } + + size = buf.st_size; + + /* mmap the region and map into the BAR2 */ + memory_region_init_ram_from_fd(&s->server_bar2, OBJECT(s), "ivshmem.bar2", + size, RAM_SHARED, fd, 0, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + s->ivshmem_bar2 = &s->server_bar2; +} + +static void process_msg_disconnect(IVShmemState *s, uint16_t posn, + Error **errp) +{ + IVSHMEM_DPRINTF("posn %d has gone away\n", posn); + if (posn >= s->nb_peers || posn == s->vm_id) { + error_setg(errp, "invalid peer %d", posn); + return; + } + close_peer_eventfds(s, posn); +} + +static void process_msg_connect(IVShmemState *s, uint16_t posn, int fd, + Error **errp) +{ + Peer *peer = &s->peers[posn]; + int vector; + + /* + * The N-th connect message for this peer comes with the file + * descriptor for vector N-1. Count messages to find the vector. + */ + if (peer->nb_eventfds >= s->vectors) { + error_setg(errp, "Too many eventfd received, device has %d vectors", + s->vectors); + close(fd); + return; + } + vector = peer->nb_eventfds++; + + IVSHMEM_DPRINTF("eventfds[%d][%d] = %d\n", posn, vector, fd); + event_notifier_init_fd(&peer->eventfds[vector], fd); + fcntl_setfl(fd, O_NONBLOCK); /* msix/irqfd poll non block */ + + if (posn == s->vm_id) { + setup_interrupt(s, vector, errp); + /* TODO do we need to handle the error? */ + } + + if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { + ivshmem_add_eventfd(s, posn, vector); + } +} + +static void process_msg(IVShmemState *s, int64_t msg, int fd, Error **errp) +{ + IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", msg, fd); + + if (msg < -1 || msg > IVSHMEM_MAX_PEERS) { + error_setg(errp, "server sent invalid message %" PRId64, msg); + close(fd); + return; + } + + if (msg == -1) { + process_msg_shmem(s, fd, errp); + return; + } + + if (msg >= s->nb_peers) { + resize_peers(s, msg + 1); + } + + if (fd >= 0) { + process_msg_connect(s, msg, fd, errp); + } else { + process_msg_disconnect(s, msg, errp); + } +} + +static int ivshmem_can_receive(void *opaque) +{ + IVShmemState *s = opaque; + + assert(s->msg_buffered_bytes < sizeof(s->msg_buf)); + return sizeof(s->msg_buf) - s->msg_buffered_bytes; +} + +static void ivshmem_read(void *opaque, const uint8_t *buf, int size) +{ + IVShmemState *s = opaque; + Error *err = NULL; + int fd; + int64_t msg; + + assert(size >= 0 && s->msg_buffered_bytes + size <= sizeof(s->msg_buf)); + memcpy((unsigned char *)&s->msg_buf + s->msg_buffered_bytes, buf, size); + s->msg_buffered_bytes += size; + if (s->msg_buffered_bytes < sizeof(s->msg_buf)) { + return; + } + msg = le64_to_cpu(s->msg_buf); + s->msg_buffered_bytes = 0; + + fd = qemu_chr_fe_get_msgfd(&s->server_chr); + + process_msg(s, msg, fd, &err); + if (err) { + error_report_err(err); + } +} + +static int64_t ivshmem_recv_msg(IVShmemState *s, int *pfd, Error **errp) +{ + int64_t msg; + int n, ret; + + n = 0; + do { + ret = qemu_chr_fe_read_all(&s->server_chr, (uint8_t *)&msg + n, + sizeof(msg) - n); + if (ret < 0) { + if (ret == -EINTR) { + continue; + } + error_setg_errno(errp, -ret, "read from server failed"); + return INT64_MIN; + } + n += ret; + } while (n < sizeof(msg)); + + *pfd = qemu_chr_fe_get_msgfd(&s->server_chr); + return le64_to_cpu(msg); +} + +static void ivshmem_recv_setup(IVShmemState *s, Error **errp) +{ + Error *err = NULL; + int64_t msg; + int fd; + + msg = ivshmem_recv_msg(s, &fd, &err); + if (err) { + error_propagate(errp, err); + return; + } + if (msg != IVSHMEM_PROTOCOL_VERSION) { + error_setg(errp, "server sent version %" PRId64 ", expecting %d", + msg, IVSHMEM_PROTOCOL_VERSION); + return; + } + if (fd != -1) { + error_setg(errp, "server sent invalid version message"); + return; + } + + /* + * ivshmem-server sends the remaining initial messages in a fixed + * order, but the device has always accepted them in any order. + * Stay as compatible as practical, just in case people use + * servers that behave differently. + */ + + /* + * ivshmem_device_spec.txt has always required the ID message + * right here, and ivshmem-server has always complied. However, + * older versions of the device accepted it out of order, but + * broke when an interrupt setup message arrived before it. + */ + msg = ivshmem_recv_msg(s, &fd, &err); + if (err) { + error_propagate(errp, err); + return; + } + if (fd != -1 || msg < 0 || msg > IVSHMEM_MAX_PEERS) { + error_setg(errp, "server sent invalid ID message"); + return; + } + s->vm_id = msg; + + /* + * Receive more messages until we got shared memory. + */ + do { + msg = ivshmem_recv_msg(s, &fd, &err); + if (err) { + error_propagate(errp, err); + return; + } + process_msg(s, msg, fd, &err); + if (err) { + error_propagate(errp, err); + return; + } + } while (msg != -1); + + /* + * This function must either map the shared memory or fail. The + * loop above ensures that: it terminates normally only after it + * successfully processed the server's shared memory message. + * Assert that actually mapped the shared memory: + */ + assert(s->ivshmem_bar2); +} + +/* Select the MSI-X vectors used by device. + * ivshmem maps events to vectors statically, so + * we just enable all vectors on init and after reset. */ +static void ivshmem_msix_vector_use(IVShmemState *s) +{ + PCIDevice *d = PCI_DEVICE(s); + int i; + + for (i = 0; i < s->vectors; i++) { + msix_vector_use(d, i); + } +} + +static void ivshmem_disable_irqfd(IVShmemState *s); + +static void ivshmem_reset(DeviceState *d) +{ + IVShmemState *s = IVSHMEM_COMMON(d); + + ivshmem_disable_irqfd(s); + + s->intrstatus = 0; + s->intrmask = 0; + if (ivshmem_has_feature(s, IVSHMEM_MSI)) { + ivshmem_msix_vector_use(s); + } +} + +static int ivshmem_setup_interrupts(IVShmemState *s, Error **errp) +{ + /* allocate QEMU callback data for receiving interrupts */ + s->msi_vectors = g_malloc0(s->vectors * sizeof(MSIVector)); + + if (ivshmem_has_feature(s, IVSHMEM_MSI)) { + if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1, errp)) { + return -1; + } + + IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors); + ivshmem_msix_vector_use(s); + } + + return 0; +} + +static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector) +{ + IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector); + + if (s->msi_vectors[vector].pdev == NULL) { + return; + } + + /* it was cleaned when masked in the frontend. */ + kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq); + + s->msi_vectors[vector].pdev = NULL; +} + +static void ivshmem_enable_irqfd(IVShmemState *s) +{ + PCIDevice *pdev = PCI_DEVICE(s); + int i; + + for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { + Error *err = NULL; + + ivshmem_add_kvm_msi_virq(s, i, &err); + if (err) { + error_report_err(err); + goto undo; + } + } + + if (msix_set_vector_notifiers(pdev, + ivshmem_vector_unmask, + ivshmem_vector_mask, + ivshmem_vector_poll)) { + error_report("ivshmem: msix_set_vector_notifiers failed"); + goto undo; + } + return; + +undo: + while (--i >= 0) { + ivshmem_remove_kvm_msi_virq(s, i); + } +} + +static void ivshmem_disable_irqfd(IVShmemState *s) +{ + PCIDevice *pdev = PCI_DEVICE(s); + int i; + + if (!pdev->msix_vector_use_notifier) { + return; + } + + msix_unset_vector_notifiers(pdev); + + for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { + /* + * MSI-X is already disabled here so msix_unset_vector_notifiers() + * didn't call our release notifier. Do it now to keep our masks and + * unmasks balanced. + */ + if (s->msi_vectors[i].unmasked) { + ivshmem_vector_mask(pdev, i); + } + ivshmem_remove_kvm_msi_virq(s, i); + } + +} + +static void ivshmem_write_config(PCIDevice *pdev, uint32_t address, + uint32_t val, int len) +{ + IVShmemState *s = IVSHMEM_COMMON(pdev); + int is_enabled, was_enabled = msix_enabled(pdev); + + pci_default_write_config(pdev, address, val, len); + is_enabled = msix_enabled(pdev); + + if (kvm_msi_via_irqfd_enabled()) { + if (!was_enabled && is_enabled) { + ivshmem_enable_irqfd(s); + } else if (was_enabled && !is_enabled) { + ivshmem_disable_irqfd(s); + } + } +} + +static void ivshmem_common_realize(PCIDevice *dev, Error **errp) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + Error *err = NULL; + uint8_t *pci_conf; + + /* IRQFD requires MSI */ + if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) && + !ivshmem_has_feature(s, IVSHMEM_MSI)) { + error_setg(errp, "ioeventfd/irqfd requires MSI"); + return; + } + + pci_conf = dev->config; + pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY; + + memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s, + "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE); + + /* region for registers*/ + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, + &s->ivshmem_mmio); + + if (s->hostmem != NULL) { + IVSHMEM_DPRINTF("using hostmem\n"); + + s->ivshmem_bar2 = host_memory_backend_get_memory(s->hostmem); + host_memory_backend_set_mapped(s->hostmem, true); + } else { + Chardev *chr = qemu_chr_fe_get_driver(&s->server_chr); + assert(chr); + + IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n", + chr->filename); + + /* we allocate enough space for 16 peers and grow as needed */ + resize_peers(s, 16); + + /* + * Receive setup messages from server synchronously. + * Older versions did it asynchronously, but that creates a + * number of entertaining race conditions. + */ + ivshmem_recv_setup(s, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (s->master == ON_OFF_AUTO_ON && s->vm_id != 0) { + error_setg(errp, + "master must connect to the server before any peers"); + return; + } + + qemu_chr_fe_set_handlers(&s->server_chr, ivshmem_can_receive, + ivshmem_read, NULL, NULL, s, NULL, true); + + if (ivshmem_setup_interrupts(s, errp) < 0) { + error_prepend(errp, "Failed to initialize interrupts: "); + return; + } + } + + if (s->master == ON_OFF_AUTO_AUTO) { + s->master = s->vm_id == 0 ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + } + + if (!ivshmem_is_master(s)) { + error_setg(&s->migration_blocker, + "Migration is disabled when using feature 'peer mode' in device 'ivshmem'"); + if (migrate_add_blocker(s->migration_blocker, errp) < 0) { + error_free(s->migration_blocker); + return; + } + } + + vmstate_register_ram(s->ivshmem_bar2, DEVICE(s)); + pci_register_bar(PCI_DEVICE(s), 2, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_PREFETCH | + PCI_BASE_ADDRESS_MEM_TYPE_64, + s->ivshmem_bar2); +} + +static void ivshmem_exit(PCIDevice *dev) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + int i; + + if (s->migration_blocker) { + migrate_del_blocker(s->migration_blocker); + error_free(s->migration_blocker); + } + + if (memory_region_is_mapped(s->ivshmem_bar2)) { + if (!s->hostmem) { + void *addr = memory_region_get_ram_ptr(s->ivshmem_bar2); + int fd; + + if (munmap(addr, memory_region_size(s->ivshmem_bar2) == -1)) { + error_report("Failed to munmap shared memory %s", + strerror(errno)); + } + + fd = memory_region_get_fd(s->ivshmem_bar2); + close(fd); + } + + vmstate_unregister_ram(s->ivshmem_bar2, DEVICE(dev)); + } + + if (s->hostmem) { + host_memory_backend_set_mapped(s->hostmem, false); + } + + if (s->peers) { + for (i = 0; i < s->nb_peers; i++) { + close_peer_eventfds(s, i); + } + g_free(s->peers); + } + + if (ivshmem_has_feature(s, IVSHMEM_MSI)) { + msix_uninit_exclusive_bar(dev); + } + + g_free(s->msi_vectors); +} + +static int ivshmem_pre_load(void *opaque) +{ + IVShmemState *s = opaque; + + if (!ivshmem_is_master(s)) { + error_report("'peer' devices are not migratable"); + return -EINVAL; + } + + return 0; +} + +static int ivshmem_post_load(void *opaque, int version_id) +{ + IVShmemState *s = opaque; + + if (ivshmem_has_feature(s, IVSHMEM_MSI)) { + ivshmem_msix_vector_use(s); + } + return 0; +} + +static void ivshmem_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = ivshmem_common_realize; + k->exit = ivshmem_exit; + k->config_write = ivshmem_write_config; + k->vendor_id = PCI_VENDOR_ID_IVSHMEM; + k->device_id = PCI_DEVICE_ID_IVSHMEM; + k->class_id = PCI_CLASS_MEMORY_RAM; + k->revision = 1; + dc->reset = ivshmem_reset; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "Inter-VM shared memory"; +} + +static const TypeInfo ivshmem_common_info = { + .name = TYPE_IVSHMEM_COMMON, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(IVShmemState), + .abstract = true, + .class_init = ivshmem_common_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static const VMStateDescription ivshmem_plain_vmsd = { + .name = TYPE_IVSHMEM_PLAIN, + .version_id = 0, + .minimum_version_id = 0, + .pre_load = ivshmem_pre_load, + .post_load = ivshmem_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), + VMSTATE_UINT32(intrstatus, IVShmemState), + VMSTATE_UINT32(intrmask, IVShmemState), + VMSTATE_END_OF_LIST() + }, +}; + +static Property ivshmem_plain_properties[] = { + DEFINE_PROP_ON_OFF_AUTO("master", IVShmemState, master, ON_OFF_AUTO_OFF), + DEFINE_PROP_LINK("memdev", IVShmemState, hostmem, TYPE_MEMORY_BACKEND, + HostMemoryBackend *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ivshmem_plain_realize(PCIDevice *dev, Error **errp) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + + if (!s->hostmem) { + error_setg(errp, "You must specify a 'memdev'"); + return; + } else if (host_memory_backend_is_mapped(s->hostmem)) { + error_setg(errp, "can't use already busy memdev: %s", + object_get_canonical_path_component(OBJECT(s->hostmem))); + return; + } + + ivshmem_common_realize(dev, errp); +} + +static void ivshmem_plain_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = ivshmem_plain_realize; + device_class_set_props(dc, ivshmem_plain_properties); + dc->vmsd = &ivshmem_plain_vmsd; +} + +static const TypeInfo ivshmem_plain_info = { + .name = TYPE_IVSHMEM_PLAIN, + .parent = TYPE_IVSHMEM_COMMON, + .instance_size = sizeof(IVShmemState), + .class_init = ivshmem_plain_class_init, +}; + +static const VMStateDescription ivshmem_doorbell_vmsd = { + .name = TYPE_IVSHMEM_DOORBELL, + .version_id = 0, + .minimum_version_id = 0, + .pre_load = ivshmem_pre_load, + .post_load = ivshmem_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), + VMSTATE_MSIX(parent_obj, IVShmemState), + VMSTATE_UINT32(intrstatus, IVShmemState), + VMSTATE_UINT32(intrmask, IVShmemState), + VMSTATE_END_OF_LIST() + }, +}; + +static Property ivshmem_doorbell_properties[] = { + DEFINE_PROP_CHR("chardev", IVShmemState, server_chr), + DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1), + DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, + true), + DEFINE_PROP_ON_OFF_AUTO("master", IVShmemState, master, ON_OFF_AUTO_OFF), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ivshmem_doorbell_init(Object *obj) +{ + IVShmemState *s = IVSHMEM_DOORBELL(obj); + + s->features |= (1 << IVSHMEM_MSI); +} + +static void ivshmem_doorbell_realize(PCIDevice *dev, Error **errp) +{ + IVShmemState *s = IVSHMEM_COMMON(dev); + + if (!qemu_chr_fe_backend_connected(&s->server_chr)) { + error_setg(errp, "You must specify a 'chardev'"); + return; + } + + ivshmem_common_realize(dev, errp); +} + +static void ivshmem_doorbell_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = ivshmem_doorbell_realize; + device_class_set_props(dc, ivshmem_doorbell_properties); + dc->vmsd = &ivshmem_doorbell_vmsd; +} + +static const TypeInfo ivshmem_doorbell_info = { + .name = TYPE_IVSHMEM_DOORBELL, + .parent = TYPE_IVSHMEM_COMMON, + .instance_size = sizeof(IVShmemState), + .instance_init = ivshmem_doorbell_init, + .class_init = ivshmem_doorbell_class_init, +}; + +static void ivshmem_register_types(void) +{ + type_register_static(&ivshmem_common_info); + type_register_static(&ivshmem_plain_info); + type_register_static(&ivshmem_doorbell_info); +} + +type_init(ivshmem_register_types) diff --git a/hw/misc/led.c b/hw/misc/led.c new file mode 100644 index 000000000..f6d6d68bc --- /dev/null +++ b/hw/misc/led.c @@ -0,0 +1,161 @@ +/* + * QEMU single LED device + * + * Copyright (C) 2020 Philippe Mathieu-Daudé + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/misc/led.h" +#include "trace.h" + +#define LED_INTENSITY_PERCENT_MAX 100 + +static const char * const led_color_name[] = { + [LED_COLOR_VIOLET] = "violet", + [LED_COLOR_BLUE] = "blue", + [LED_COLOR_CYAN] = "cyan", + [LED_COLOR_GREEN] = "green", + [LED_COLOR_YELLOW] = "yellow", + [LED_COLOR_AMBER] = "amber", + [LED_COLOR_ORANGE] = "orange", + [LED_COLOR_RED] = "red", +}; + +static bool led_color_name_is_valid(const char *color_name) +{ + for (size_t i = 0; i < ARRAY_SIZE(led_color_name); i++) { + if (strcmp(color_name, led_color_name[i]) == 0) { + return true; + } + } + return false; +} + +void led_set_intensity(LEDState *s, unsigned intensity_percent) +{ + if (intensity_percent > LED_INTENSITY_PERCENT_MAX) { + intensity_percent = LED_INTENSITY_PERCENT_MAX; + } + trace_led_set_intensity(s->description, s->color, intensity_percent); + if (intensity_percent != s->intensity_percent) { + trace_led_change_intensity(s->description, s->color, + s->intensity_percent, intensity_percent); + } + s->intensity_percent = intensity_percent; +} + +unsigned led_get_intensity(LEDState *s) +{ + return s->intensity_percent; +} + +void led_set_state(LEDState *s, bool is_emitting) +{ + led_set_intensity(s, is_emitting ? LED_INTENSITY_PERCENT_MAX : 0); +} + +static void led_set_state_gpio_handler(void *opaque, int line, int new_state) +{ + LEDState *s = LED(opaque); + + assert(line == 0); + led_set_state(s, !!new_state != s->gpio_active_high); +} + +static void led_reset(DeviceState *dev) +{ + LEDState *s = LED(dev); + + led_set_state(s, s->gpio_active_high); +} + +static const VMStateDescription vmstate_led = { + .name = TYPE_LED, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(intensity_percent, LEDState), + VMSTATE_END_OF_LIST() + } +}; + +static void led_realize(DeviceState *dev, Error **errp) +{ + LEDState *s = LED(dev); + + if (s->color == NULL) { + error_setg(errp, "property 'color' not specified"); + return; + } else if (!led_color_name_is_valid(s->color)) { + error_setg(errp, "property 'color' invalid or not supported"); + return; + } + if (s->description == NULL) { + s->description = g_strdup("n/a"); + } + + qdev_init_gpio_in(DEVICE(s), led_set_state_gpio_handler, 1); +} + +static Property led_properties[] = { + DEFINE_PROP_STRING("color", LEDState, color), + DEFINE_PROP_STRING("description", LEDState, description), + DEFINE_PROP_BOOL("gpio-active-high", LEDState, gpio_active_high, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void led_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "LED"; + dc->vmsd = &vmstate_led; + dc->reset = led_reset; + dc->realize = led_realize; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + device_class_set_props(dc, led_properties); +} + +static const TypeInfo led_info = { + .name = TYPE_LED, + .parent = TYPE_DEVICE, + .instance_size = sizeof(LEDState), + .class_init = led_class_init +}; + +static void led_register_types(void) +{ + type_register_static(&led_info); +} + +type_init(led_register_types) + +LEDState *led_create_simple(Object *parentobj, + GpioPolarity gpio_polarity, + LEDColor color, + const char *description) +{ + g_autofree char *name = NULL; + DeviceState *dev; + + dev = qdev_new(TYPE_LED); + qdev_prop_set_bit(dev, "gpio-active-high", + gpio_polarity == GPIO_POLARITY_ACTIVE_HIGH); + qdev_prop_set_string(dev, "color", led_color_name[color]); + if (!description) { + static unsigned undescribed_led_id; + name = g_strdup_printf("undescribed-led-#%u", undescribed_led_id++); + } else { + qdev_prop_set_string(dev, "description", description); + name = g_ascii_strdown(description, -1); + name = g_strdelimit(name, " #", '-'); + } + object_property_add_child(parentobj, name, OBJECT(dev)); + qdev_realize_and_unref(dev, NULL, &error_fatal); + + return LED(dev); +} diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c new file mode 100644 index 000000000..b378e6b30 --- /dev/null +++ b/hw/misc/mac_via.c @@ -0,0 +1,1221 @@ +/* + * QEMU m68k Macintosh VIA device support + * + * Copyright (c) 2011-2018 Laurent Vivier + * Copyright (c) 2018 Mark Cave-Ayland + * + * Some parts from hw/misc/macio/cuda.c + * + * Copyright (c) 2004-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * some parts from linux-2.6.29, arch/m68k/include/asm/mac_via.h + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "migration/vmstate.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "qemu/timer.h" +#include "hw/misc/mac_via.h" +#include "hw/misc/mos6522.h" +#include "hw/input/adb.h" +#include "sysemu/runstate.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "sysemu/block-backend.h" +#include "trace.h" +#include "qemu/log.h" + +/* + * VIAs: There are two in every machine + */ + +/* + * Not all of these are true post MacII I think. + * CSA: probably the ones CHRP marks as 'unused' change purposes + * when the IWM becomes the SWIM. + * http://www.rs6000.ibm.com/resource/technology/chrpio/via5.mak.html + * ftp://ftp.austin.ibm.com/pub/technology/spec/chrp/inwork/CHRP_IORef_1.0.pdf + * + * also, http://developer.apple.com/technotes/hw/hw_09.html claims the + * following changes for IIfx: + * VIA1A_vSccWrReq not available and that VIA1A_vSync has moved to an IOP. + * Also, "All of the functionality of VIA2 has been moved to other chips". + */ + +#define VIA1A_vSccWrReq 0x80 /* + * SCC write. (input) + * [CHRP] SCC WREQ: Reflects the state of the + * Wait/Request pins from the SCC. + * [Macintosh Family Hardware] + * as CHRP on SE/30,II,IIx,IIcx,IIci. + * on IIfx, "0 means an active request" + */ +#define VIA1A_vRev8 0x40 /* + * Revision 8 board ??? + * [CHRP] En WaitReqB: Lets the WaitReq_L + * signal from port B of the SCC appear on + * the PA7 input pin. Output. + * [Macintosh Family] On the SE/30, this + * is the bit to flip screen buffers. + * 0=alternate, 1=main. + * on II,IIx,IIcx,IIci,IIfx this is a bit + * for Rev ID. 0=II,IIx, 1=IIcx,IIci,IIfx + */ +#define VIA1A_vHeadSel 0x20 /* + * Head select for IWM. + * [CHRP] unused. + * [Macintosh Family] "Floppy disk + * state-control line SEL" on all but IIfx + */ +#define VIA1A_vOverlay 0x10 /* + * [Macintosh Family] On SE/30,II,IIx,IIcx + * this bit enables the "Overlay" address + * map in the address decoders as it is on + * reset for mapping the ROM over the reset + * vector. 1=use overlay map. + * On the IIci,IIfx it is another bit of the + * CPU ID: 0=normal IIci, 1=IIci with parity + * feature or IIfx. + * [CHRP] En WaitReqA: Lets the WaitReq_L + * signal from port A of the SCC appear + * on the PA7 input pin (CHRP). Output. + * [MkLinux] "Drive Select" + * (with 0x20 being 'disk head select') + */ +#define VIA1A_vSync 0x08 /* + * [CHRP] Sync Modem: modem clock select: + * 1: select the external serial clock to + * drive the SCC's /RTxCA pin. + * 0: Select the 3.6864MHz clock to drive + * the SCC cell. + * [Macintosh Family] Correct on all but IIfx + */ + +/* + * Macintosh Family Hardware sez: bits 0-2 of VIA1A are volume control + * on Macs which had the PWM sound hardware. Reserved on newer models. + * On IIci,IIfx, bits 1-2 are the rest of the CPU ID: + * bit 2: 1=IIci, 0=IIfx + * bit 1: 1 on both IIci and IIfx. + * MkLinux sez bit 0 is 'burnin flag' in this case. + * CHRP sez: VIA1A bits 0-2 and 5 are 'unused': if programmed as + * inputs, these bits will read 0. + */ +#define VIA1A_vVolume 0x07 /* Audio volume mask for PWM */ +#define VIA1A_CPUID0 0x02 /* CPU id bit 0 on RBV, others */ +#define VIA1A_CPUID1 0x04 /* CPU id bit 0 on RBV, others */ +#define VIA1A_CPUID2 0x10 /* CPU id bit 0 on RBV, others */ +#define VIA1A_CPUID3 0x40 /* CPU id bit 0 on RBV, others */ + +/* + * Info on VIA1B is from Macintosh Family Hardware & MkLinux. + * CHRP offers no info. + */ +#define VIA1B_vSound 0x80 /* + * Sound enable (for compatibility with + * PWM hardware) 0=enabled. + * Also, on IIci w/parity, shows parity error + * 0=error, 1=OK. + */ +#define VIA1B_vMystery 0x40 /* + * On IIci, parity enable. 0=enabled,1=disabled + * On SE/30, vertical sync interrupt enable. + * 0=enabled. This vSync interrupt shows up + * as a slot $E interrupt. + * On Quadra 800 this bit toggles A/UX mode which + * configures the glue logic to deliver some IRQs + * at different levels compared to a classic + * Mac. + */ +#define VIA1B_vADBS2 0x20 /* ADB state input bit 1 (unused on IIfx) */ +#define VIA1B_vADBS1 0x10 /* ADB state input bit 0 (unused on IIfx) */ +#define VIA1B_vADBInt 0x08 /* ADB interrupt 0=interrupt (unused on IIfx)*/ +#define VIA1B_vRTCEnb 0x04 /* Enable Real time clock. 0=enabled. */ +#define VIA1B_vRTCClk 0x02 /* Real time clock serial-clock line. */ +#define VIA1B_vRTCData 0x01 /* Real time clock serial-data line. */ + +/* + * VIA2 A register is the interrupt lines raised off the nubus + * slots. + * The below info is from 'Macintosh Family Hardware.' + * MkLinux calls the 'IIci internal video IRQ' below the 'RBV slot 0 irq.' + * It also notes that the slot $9 IRQ is the 'Ethernet IRQ' and + * defines the 'Video IRQ' as 0x40 for the 'EVR' VIA work-alike. + * Perhaps OSS uses vRAM1 and vRAM2 for ADB. + */ + +#define VIA2A_vRAM1 0x80 /* RAM size bit 1 (IIci: reserved) */ +#define VIA2A_vRAM0 0x40 /* RAM size bit 0 (IIci: internal video IRQ) */ +#define VIA2A_vIRQE 0x20 /* IRQ from slot $E */ +#define VIA2A_vIRQD 0x10 /* IRQ from slot $D */ +#define VIA2A_vIRQC 0x08 /* IRQ from slot $C */ +#define VIA2A_vIRQB 0x04 /* IRQ from slot $B */ +#define VIA2A_vIRQA 0x02 /* IRQ from slot $A */ +#define VIA2A_vIRQ9 0x01 /* IRQ from slot $9 */ + +/* + * RAM size bits decoded as follows: + * bit1 bit0 size of ICs in bank A + * 0 0 256 kbit + * 0 1 1 Mbit + * 1 0 4 Mbit + * 1 1 16 Mbit + */ + +/* + * Register B has the fun stuff in it + */ + +#define VIA2B_vVBL 0x80 /* + * VBL output to VIA1 (60.15Hz) driven by + * timer T1. + * on IIci, parity test: 0=test mode. + * [MkLinux] RBV_PARODD: 1=odd,0=even. + */ +#define VIA2B_vSndJck 0x40 /* + * External sound jack status. + * 0=plug is inserted. On SE/30, always 0 + */ +#define VIA2B_vTfr0 0x20 /* Transfer mode bit 0 ack from NuBus */ +#define VIA2B_vTfr1 0x10 /* Transfer mode bit 1 ack from NuBus */ +#define VIA2B_vMode32 0x08 /* + * 24/32bit switch - doubles as cache flush + * on II, AMU/PMMU control. + * if AMU, 0=24bit to 32bit translation + * if PMMU, 1=PMMU is accessing page table. + * on SE/30 tied low. + * on IIx,IIcx,IIfx, unused. + * on IIci/RBV, cache control. 0=flush cache. + */ +#define VIA2B_vPower 0x04 /* + * Power off, 0=shut off power. + * on SE/30 this signal sent to PDS card. + */ +#define VIA2B_vBusLk 0x02 /* + * Lock NuBus transactions, 0=locked. + * on SE/30 sent to PDS card. + */ +#define VIA2B_vCDis 0x01 /* + * Cache control. On IIci, 1=disable cache card + * on others, 0=disable processor's instruction + * and data caches. + */ + +/* interrupt flags */ + +#define IRQ_SET 0x80 + +/* common */ + +#define VIA_IRQ_TIMER1 0x40 +#define VIA_IRQ_TIMER2 0x20 + +/* + * Apple sez: http://developer.apple.com/technotes/ov/ov_04.html + * Another example of a valid function that has no ROM support is the use + * of the alternate video page for page-flipping animation. Since there + * is no ROM call to flip pages, it is necessary to go play with the + * right bit in the VIA chip (6522 Versatile Interface Adapter). + * [CSA: don't know which one this is, but it's one of 'em!] + */ + +/* + * 6522 registers - see databook. + * CSA: Assignments for VIA1 confirmed from CHRP spec. + */ + +/* partial address decode. 0xYYXX : XX part for RBV, YY part for VIA */ +/* Note: 15 VIA regs, 8 RBV regs */ + +#define vBufB 0x0000 /* [VIA/RBV] Register B */ +#define vBufAH 0x0200 /* [VIA only] Buffer A, with handshake. DON'T USE! */ +#define vDirB 0x0400 /* [VIA only] Data Direction Register B. */ +#define vDirA 0x0600 /* [VIA only] Data Direction Register A. */ +#define vT1CL 0x0800 /* [VIA only] Timer one counter low. */ +#define vT1CH 0x0a00 /* [VIA only] Timer one counter high. */ +#define vT1LL 0x0c00 /* [VIA only] Timer one latches low. */ +#define vT1LH 0x0e00 /* [VIA only] Timer one latches high. */ +#define vT2CL 0x1000 /* [VIA only] Timer two counter low. */ +#define vT2CH 0x1200 /* [VIA only] Timer two counter high. */ +#define vSR 0x1400 /* [VIA only] Shift register. */ +#define vACR 0x1600 /* [VIA only] Auxilary control register. */ +#define vPCR 0x1800 /* [VIA only] Peripheral control register. */ + /* + * CHRP sez never ever to *write* this. + * Mac family says never to *change* this. + * In fact we need to initialize it once at start. + */ +#define vIFR 0x1a00 /* [VIA/RBV] Interrupt flag register. */ +#define vIER 0x1c00 /* [VIA/RBV] Interrupt enable register. */ +#define vBufA 0x1e00 /* [VIA/RBV] register A (no handshake) */ + +/* from linux 2.6 drivers/macintosh/via-macii.c */ + +/* Bits in ACR */ + +#define VIA1ACR_vShiftCtrl 0x1c /* Shift register control bits */ +#define VIA1ACR_vShiftExtClk 0x0c /* Shift on external clock */ +#define VIA1ACR_vShiftOut 0x10 /* Shift out if 1 */ + +/* + * Apple Macintosh Family Hardware Refenece + * Table 19-10 ADB transaction states + */ + +#define ADB_STATE_NEW 0 +#define ADB_STATE_EVEN 1 +#define ADB_STATE_ODD 2 +#define ADB_STATE_IDLE 3 + +#define VIA1B_vADB_StateMask (VIA1B_vADBS1 | VIA1B_vADBS2) +#define VIA1B_vADB_StateShift 4 + +#define VIA_TIMER_FREQ (783360) +#define VIA_ADB_POLL_FREQ 50 /* XXX: not real */ + +/* + * Guide to the Macintosh Family Hardware ch. 12 "Displays" p. 401 gives the + * precise 60Hz interrupt frequency as ~60.15Hz with a period of 16625.8 us + */ +#define VIA_60HZ_TIMER_PERIOD_NS 16625800 + +/* VIA returns time offset from Jan 1, 1904, not 1970 */ +#define RTC_OFFSET 2082844800 + +enum { + REG_0, + REG_1, + REG_2, + REG_3, + REG_TEST, + REG_WPROTECT, + REG_PRAM_ADDR, + REG_PRAM_ADDR_LAST = REG_PRAM_ADDR + 19, + REG_PRAM_SECT, + REG_PRAM_SECT_LAST = REG_PRAM_SECT + 7, + REG_INVALID, + REG_EMPTY = 0xff, +}; + +static void via1_sixty_hz_update(MOS6522Q800VIA1State *v1s) +{ + /* 60 Hz irq */ + v1s->next_sixty_hz = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + VIA_60HZ_TIMER_PERIOD_NS) / + VIA_60HZ_TIMER_PERIOD_NS * VIA_60HZ_TIMER_PERIOD_NS; + timer_mod(v1s->sixty_hz_timer, v1s->next_sixty_hz); +} + +static void via1_one_second_update(MOS6522Q800VIA1State *v1s) +{ + v1s->next_second = (qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000) / + 1000 * 1000; + timer_mod(v1s->one_second_timer, v1s->next_second); +} + +static void via1_sixty_hz(void *opaque) +{ + MOS6522Q800VIA1State *v1s = opaque; + MOS6522State *s = MOS6522(v1s); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + + s->ifr |= VIA1_IRQ_60HZ; + mdc->update_irq(s); + + via1_sixty_hz_update(v1s); +} + +static void via1_one_second(void *opaque) +{ + MOS6522Q800VIA1State *v1s = opaque; + MOS6522State *s = MOS6522(v1s); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + + s->ifr |= VIA1_IRQ_ONE_SECOND; + mdc->update_irq(s); + + via1_one_second_update(v1s); +} + +static void via1_irq_request(void *opaque, int irq, int level) +{ + MOS6522Q800VIA1State *v1s = opaque; + MOS6522State *s = MOS6522(v1s); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + + if (level) { + s->ifr |= 1 << irq; + } else { + s->ifr &= ~(1 << irq); + } + + mdc->update_irq(s); +} + +static void via2_irq_request(void *opaque, int irq, int level) +{ + MOS6522Q800VIA2State *v2s = opaque; + MOS6522State *s = MOS6522(v2s); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + + if (level) { + s->ifr |= 1 << irq; + } else { + s->ifr &= ~(1 << irq); + } + + mdc->update_irq(s); +} + + +static void pram_update(MOS6522Q800VIA1State *v1s) +{ + if (v1s->blk) { + if (blk_pwrite(v1s->blk, 0, v1s->PRAM, sizeof(v1s->PRAM), 0) < 0) { + qemu_log("pram_update: cannot write to file\n"); + } + } +} + +/* + * RTC Commands + * + * Command byte Register addressed by the command + * + * z0000001 Seconds register 0 (lowest-order byte) + * z0000101 Seconds register 1 + * z0001001 Seconds register 2 + * z0001101 Seconds register 3 (highest-order byte) + * 00110001 Test register (write-only) + * 00110101 Write-Protect Register (write-only) + * z010aa01 RAM address 100aa ($10-$13) (first 20 bytes only) + * z1aaaa01 RAM address 0aaaa ($00-$0F) (first 20 bytes only) + * z0111aaa Extended memory designator and sector number + * + * For a read request, z=1, for a write z=0 + * The letter a indicates bits whose value depend on what parameter + * RAM byte you want to address + */ +static int via1_rtc_compact_cmd(uint8_t value) +{ + uint8_t read = value & 0x80; + + value &= 0x7f; + + /* the last 2 bits of a command byte must always be 0b01 ... */ + if ((value & 0x78) == 0x38) { + /* except for the extended memory designator */ + return read | (REG_PRAM_SECT + (value & 0x07)); + } + if ((value & 0x03) == 0x01) { + value >>= 2; + if ((value & 0x1c) == 0) { + /* seconds registers */ + return read | (REG_0 + (value & 0x03)); + } else if ((value == 0x0c) && !read) { + return REG_TEST; + } else if ((value == 0x0d) && !read) { + return REG_WPROTECT; + } else if ((value & 0x1c) == 0x08) { + /* RAM address 0x10 to 0x13 */ + return read | (REG_PRAM_ADDR + 0x10 + (value & 0x03)); + } else if ((value & 0x43) == 0x41) { + /* RAM address 0x00 to 0x0f */ + return read | (REG_PRAM_ADDR + (value & 0x0f)); + } + } + return REG_INVALID; +} + +static void via1_rtc_update(MOS6522Q800VIA1State *v1s) +{ + MOS6522State *s = MOS6522(v1s); + int cmd, sector, addr; + uint32_t time; + + if (s->b & VIA1B_vRTCEnb) { + return; + } + + if (s->dirb & VIA1B_vRTCData) { + /* send bits to the RTC */ + if (!(v1s->last_b & VIA1B_vRTCClk) && (s->b & VIA1B_vRTCClk)) { + v1s->data_out <<= 1; + v1s->data_out |= s->b & VIA1B_vRTCData; + v1s->data_out_cnt++; + } + trace_via1_rtc_update_data_out(v1s->data_out_cnt, v1s->data_out); + } else { + trace_via1_rtc_update_data_in(v1s->data_in_cnt, v1s->data_in); + /* receive bits from the RTC */ + if ((v1s->last_b & VIA1B_vRTCClk) && + !(s->b & VIA1B_vRTCClk) && + v1s->data_in_cnt) { + s->b = (s->b & ~VIA1B_vRTCData) | + ((v1s->data_in >> 7) & VIA1B_vRTCData); + v1s->data_in <<= 1; + v1s->data_in_cnt--; + } + return; + } + + if (v1s->data_out_cnt != 8) { + return; + } + + v1s->data_out_cnt = 0; + + trace_via1_rtc_internal_status(v1s->cmd, v1s->alt, v1s->data_out); + /* first byte: it's a command */ + if (v1s->cmd == REG_EMPTY) { + + cmd = via1_rtc_compact_cmd(v1s->data_out); + trace_via1_rtc_internal_cmd(cmd); + + if (cmd == REG_INVALID) { + trace_via1_rtc_cmd_invalid(v1s->data_out); + return; + } + + if (cmd & 0x80) { /* this is a read command */ + switch (cmd & 0x7f) { + case REG_0...REG_3: /* seconds registers */ + /* + * register 0 is lowest-order byte + * register 3 is highest-order byte + */ + + time = v1s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + / NANOSECONDS_PER_SECOND); + trace_via1_rtc_internal_time(time); + v1s->data_in = (time >> ((cmd & 0x03) << 3)) & 0xff; + v1s->data_in_cnt = 8; + trace_via1_rtc_cmd_seconds_read((cmd & 0x7f) - REG_0, + v1s->data_in); + break; + case REG_PRAM_ADDR...REG_PRAM_ADDR_LAST: + /* PRAM address 0x00 -> 0x13 */ + v1s->data_in = v1s->PRAM[(cmd & 0x7f) - REG_PRAM_ADDR]; + v1s->data_in_cnt = 8; + trace_via1_rtc_cmd_pram_read((cmd & 0x7f) - REG_PRAM_ADDR, + v1s->data_in); + break; + case REG_PRAM_SECT...REG_PRAM_SECT_LAST: + /* + * extended memory designator and sector number + * the only two-byte read command + */ + trace_via1_rtc_internal_set_cmd(cmd); + v1s->cmd = cmd; + break; + default: + g_assert_not_reached(); + break; + } + return; + } + + /* this is a write command, needs a parameter */ + if (cmd == REG_WPROTECT || !v1s->wprotect) { + trace_via1_rtc_internal_set_cmd(cmd); + v1s->cmd = cmd; + } else { + trace_via1_rtc_internal_ignore_cmd(cmd); + } + return; + } + + /* second byte: it's a parameter */ + if (v1s->alt == REG_EMPTY) { + switch (v1s->cmd & 0x7f) { + case REG_0...REG_3: /* seconds register */ + /* FIXME */ + trace_via1_rtc_cmd_seconds_write(v1s->cmd - REG_0, v1s->data_out); + v1s->cmd = REG_EMPTY; + break; + case REG_TEST: + /* device control: nothing to do */ + trace_via1_rtc_cmd_test_write(v1s->data_out); + v1s->cmd = REG_EMPTY; + break; + case REG_WPROTECT: + /* Write Protect register */ + trace_via1_rtc_cmd_wprotect_write(v1s->data_out); + v1s->wprotect = !!(v1s->data_out & 0x80); + v1s->cmd = REG_EMPTY; + break; + case REG_PRAM_ADDR...REG_PRAM_ADDR_LAST: + /* PRAM address 0x00 -> 0x13 */ + trace_via1_rtc_cmd_pram_write(v1s->cmd - REG_PRAM_ADDR, + v1s->data_out); + v1s->PRAM[v1s->cmd - REG_PRAM_ADDR] = v1s->data_out; + pram_update(v1s); + v1s->cmd = REG_EMPTY; + break; + case REG_PRAM_SECT...REG_PRAM_SECT_LAST: + addr = (v1s->data_out >> 2) & 0x1f; + sector = (v1s->cmd & 0x7f) - REG_PRAM_SECT; + if (v1s->cmd & 0x80) { + /* it's a read */ + v1s->data_in = v1s->PRAM[sector * 32 + addr]; + v1s->data_in_cnt = 8; + trace_via1_rtc_cmd_pram_sect_read(sector, addr, + sector * 32 + addr, + v1s->data_in); + v1s->cmd = REG_EMPTY; + } else { + /* it's a write, we need one more parameter */ + trace_via1_rtc_internal_set_alt(addr, sector, addr); + v1s->alt = addr; + } + break; + default: + g_assert_not_reached(); + break; + } + return; + } + + /* third byte: it's the data of a REG_PRAM_SECT write */ + g_assert(REG_PRAM_SECT <= v1s->cmd && v1s->cmd <= REG_PRAM_SECT_LAST); + sector = v1s->cmd - REG_PRAM_SECT; + v1s->PRAM[sector * 32 + v1s->alt] = v1s->data_out; + pram_update(v1s); + trace_via1_rtc_cmd_pram_sect_write(sector, v1s->alt, sector * 32 + v1s->alt, + v1s->data_out); + v1s->alt = REG_EMPTY; + v1s->cmd = REG_EMPTY; +} + +static void adb_via_poll(void *opaque) +{ + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(opaque); + MOS6522State *s = MOS6522(v1s); + ADBBusState *adb_bus = &v1s->adb_bus; + uint8_t obuf[9]; + uint8_t *data = &s->sr; + int olen; + + /* + * Setting vADBInt below indicates that an autopoll reply has been + * received, however we must block autopoll until the point where + * the entire reply has been read back to the host + */ + adb_autopoll_block(adb_bus); + + if (v1s->adb_data_in_size > 0 && v1s->adb_data_in_index == 0) { + /* + * For older Linux kernels that switch to IDLE mode after sending the + * ADB command, detect if there is an existing response and return that + * as a a "fake" autopoll reply or bus timeout accordingly + */ + *data = v1s->adb_data_out[0]; + olen = v1s->adb_data_in_size; + + s->b &= ~VIA1B_vADBInt; + qemu_irq_raise(v1s->adb_data_ready); + } else { + /* + * Otherwise poll as normal + */ + v1s->adb_data_in_index = 0; + v1s->adb_data_out_index = 0; + olen = adb_poll(adb_bus, obuf, adb_bus->autopoll_mask); + + if (olen > 0) { + /* Autopoll response */ + *data = obuf[0]; + olen--; + memcpy(v1s->adb_data_in, &obuf[1], olen); + v1s->adb_data_in_size = olen; + + s->b &= ~VIA1B_vADBInt; + qemu_irq_raise(v1s->adb_data_ready); + } else { + *data = v1s->adb_autopoll_cmd; + obuf[0] = 0xff; + obuf[1] = 0xff; + olen = 2; + + memcpy(v1s->adb_data_in, obuf, olen); + v1s->adb_data_in_size = olen; + + s->b &= ~VIA1B_vADBInt; + qemu_irq_raise(v1s->adb_data_ready); + } + } + + trace_via1_adb_poll(*data, (s->b & VIA1B_vADBInt) ? "+" : "-", + adb_bus->status, v1s->adb_data_in_index, olen); +} + +static int adb_via_send_len(uint8_t data) +{ + /* Determine the send length from the given ADB command */ + uint8_t cmd = data & 0xc; + uint8_t reg = data & 0x3; + + switch (cmd) { + case 0x8: + /* Listen command */ + switch (reg) { + case 2: + /* Register 2 is only used for the keyboard */ + return 3; + case 3: + /* + * Fortunately our devices only implement writes + * to register 3 which is fixed at 2 bytes + */ + return 3; + default: + qemu_log_mask(LOG_UNIMP, "ADB unknown length for register %d\n", + reg); + return 1; + } + default: + /* Talk, BusReset */ + return 1; + } +} + +static void adb_via_send(MOS6522Q800VIA1State *v1s, int state, uint8_t data) +{ + MOS6522State *ms = MOS6522(v1s); + ADBBusState *adb_bus = &v1s->adb_bus; + uint16_t autopoll_mask; + + switch (state) { + case ADB_STATE_NEW: + /* + * Command byte: vADBInt tells host autopoll data already present + * in VIA shift register and ADB transceiver + */ + adb_autopoll_block(adb_bus); + + if (adb_bus->status & ADB_STATUS_POLLREPLY) { + /* Tell the host the existing data is from autopoll */ + ms->b &= ~VIA1B_vADBInt; + } else { + ms->b |= VIA1B_vADBInt; + v1s->adb_data_out_index = 0; + v1s->adb_data_out[v1s->adb_data_out_index++] = data; + } + + trace_via1_adb_send(" NEW", data, (ms->b & VIA1B_vADBInt) ? "+" : "-"); + qemu_irq_raise(v1s->adb_data_ready); + break; + + case ADB_STATE_EVEN: + case ADB_STATE_ODD: + ms->b |= VIA1B_vADBInt; + v1s->adb_data_out[v1s->adb_data_out_index++] = data; + + trace_via1_adb_send(state == ADB_STATE_EVEN ? "EVEN" : " ODD", + data, (ms->b & VIA1B_vADBInt) ? "+" : "-"); + qemu_irq_raise(v1s->adb_data_ready); + break; + + case ADB_STATE_IDLE: + return; + } + + /* If the command is complete, execute it */ + if (v1s->adb_data_out_index == adb_via_send_len(v1s->adb_data_out[0])) { + v1s->adb_data_in_size = adb_request(adb_bus, v1s->adb_data_in, + v1s->adb_data_out, + v1s->adb_data_out_index); + v1s->adb_data_in_index = 0; + + if (adb_bus->status & ADB_STATUS_BUSTIMEOUT) { + /* + * Bus timeout (but allow first EVEN and ODD byte to indicate + * timeout via vADBInt and SRQ status) + */ + v1s->adb_data_in[0] = 0xff; + v1s->adb_data_in[1] = 0xff; + v1s->adb_data_in_size = 2; + } + + /* + * If last command is TALK, store it for use by autopoll and adjust + * the autopoll mask accordingly + */ + if ((v1s->adb_data_out[0] & 0xc) == 0xc) { + v1s->adb_autopoll_cmd = v1s->adb_data_out[0]; + + autopoll_mask = 1 << (v1s->adb_autopoll_cmd >> 4); + adb_set_autopoll_mask(adb_bus, autopoll_mask); + } + } +} + +static void adb_via_receive(MOS6522Q800VIA1State *v1s, int state, uint8_t *data) +{ + MOS6522State *ms = MOS6522(v1s); + ADBBusState *adb_bus = &v1s->adb_bus; + uint16_t pending; + + switch (state) { + case ADB_STATE_NEW: + ms->b |= VIA1B_vADBInt; + return; + + case ADB_STATE_IDLE: + ms->b |= VIA1B_vADBInt; + adb_autopoll_unblock(adb_bus); + + trace_via1_adb_receive("IDLE", *data, + (ms->b & VIA1B_vADBInt) ? "+" : "-", adb_bus->status, + v1s->adb_data_in_index, v1s->adb_data_in_size); + + break; + + case ADB_STATE_EVEN: + case ADB_STATE_ODD: + switch (v1s->adb_data_in_index) { + case 0: + /* First EVEN byte: vADBInt indicates bus timeout */ + *data = v1s->adb_data_in[v1s->adb_data_in_index]; + if (adb_bus->status & ADB_STATUS_BUSTIMEOUT) { + ms->b &= ~VIA1B_vADBInt; + } else { + ms->b |= VIA1B_vADBInt; + } + + trace_via1_adb_receive(state == ADB_STATE_EVEN ? "EVEN" : " ODD", + *data, (ms->b & VIA1B_vADBInt) ? "+" : "-", + adb_bus->status, v1s->adb_data_in_index, + v1s->adb_data_in_size); + + v1s->adb_data_in_index++; + break; + + case 1: + /* First ODD byte: vADBInt indicates SRQ */ + *data = v1s->adb_data_in[v1s->adb_data_in_index]; + pending = adb_bus->pending & ~(1 << (v1s->adb_autopoll_cmd >> 4)); + if (pending) { + ms->b &= ~VIA1B_vADBInt; + } else { + ms->b |= VIA1B_vADBInt; + } + + trace_via1_adb_receive(state == ADB_STATE_EVEN ? "EVEN" : " ODD", + *data, (ms->b & VIA1B_vADBInt) ? "+" : "-", + adb_bus->status, v1s->adb_data_in_index, + v1s->adb_data_in_size); + + v1s->adb_data_in_index++; + break; + + default: + /* + * Otherwise vADBInt indicates end of data. Note that Linux + * specifically checks for the sequence 0x0 0xff to confirm the + * end of the poll reply, so provide these extra bytes below to + * keep it happy + */ + if (v1s->adb_data_in_index < v1s->adb_data_in_size) { + /* Next data byte */ + *data = v1s->adb_data_in[v1s->adb_data_in_index]; + ms->b |= VIA1B_vADBInt; + } else if (v1s->adb_data_in_index == v1s->adb_data_in_size) { + if (adb_bus->status & ADB_STATUS_BUSTIMEOUT) { + /* Bus timeout (no more data) */ + *data = 0xff; + } else { + /* Return 0x0 after reply */ + *data = 0; + } + ms->b &= ~VIA1B_vADBInt; + } else { + /* Bus timeout (no more data) */ + *data = 0xff; + ms->b &= ~VIA1B_vADBInt; + adb_bus->status = 0; + adb_autopoll_unblock(adb_bus); + } + + trace_via1_adb_receive(state == ADB_STATE_EVEN ? "EVEN" : " ODD", + *data, (ms->b & VIA1B_vADBInt) ? "+" : "-", + adb_bus->status, v1s->adb_data_in_index, + v1s->adb_data_in_size); + + if (v1s->adb_data_in_index <= v1s->adb_data_in_size) { + v1s->adb_data_in_index++; + } + break; + } + + qemu_irq_raise(v1s->adb_data_ready); + break; + } +} + +static void via1_adb_update(MOS6522Q800VIA1State *v1s) +{ + MOS6522State *s = MOS6522(v1s); + int oldstate, state; + + oldstate = (v1s->last_b & VIA1B_vADB_StateMask) >> VIA1B_vADB_StateShift; + state = (s->b & VIA1B_vADB_StateMask) >> VIA1B_vADB_StateShift; + + if (state != oldstate) { + if (s->acr & VIA1ACR_vShiftOut) { + /* output mode */ + adb_via_send(v1s, state, s->sr); + } else { + /* input mode */ + adb_via_receive(v1s, state, &s->sr); + } + } +} + +static void via1_auxmode_update(MOS6522Q800VIA1State *v1s) +{ + MOS6522State *s = MOS6522(v1s); + int oldirq, irq; + + oldirq = (v1s->last_b & VIA1B_vMystery) ? 1 : 0; + irq = (s->b & VIA1B_vMystery) ? 1 : 0; + + /* Check to see if the A/UX mode bit has changed */ + if (irq != oldirq) { + trace_via1_auxmode(irq); + qemu_set_irq(v1s->auxmode_irq, irq); + } +} + +static uint64_t mos6522_q800_via1_read(void *opaque, hwaddr addr, unsigned size) +{ + MOS6522Q800VIA1State *s = MOS6522_Q800_VIA1(opaque); + MOS6522State *ms = MOS6522(s); + + addr = (addr >> 9) & 0xf; + return mos6522_read(ms, addr, size); +} + +static void mos6522_q800_via1_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(opaque); + MOS6522State *ms = MOS6522(v1s); + + addr = (addr >> 9) & 0xf; + mos6522_write(ms, addr, val, size); + + switch (addr) { + case VIA_REG_B: + via1_rtc_update(v1s); + via1_adb_update(v1s); + via1_auxmode_update(v1s); + + v1s->last_b = ms->b; + break; + } +} + +static const MemoryRegionOps mos6522_q800_via1_ops = { + .read = mos6522_q800_via1_read, + .write = mos6522_q800_via1_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static uint64_t mos6522_q800_via2_read(void *opaque, hwaddr addr, unsigned size) +{ + MOS6522Q800VIA2State *s = MOS6522_Q800_VIA2(opaque); + MOS6522State *ms = MOS6522(s); + + addr = (addr >> 9) & 0xf; + return mos6522_read(ms, addr, size); +} + +static void mos6522_q800_via2_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + MOS6522Q800VIA2State *s = MOS6522_Q800_VIA2(opaque); + MOS6522State *ms = MOS6522(s); + + addr = (addr >> 9) & 0xf; + mos6522_write(ms, addr, val, size); +} + +static const MemoryRegionOps mos6522_q800_via2_ops = { + .read = mos6522_q800_via2_read, + .write = mos6522_q800_via2_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void via1_postload_update_cb(void *opaque, bool running, RunState state) +{ + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(opaque); + + qemu_del_vm_change_state_handler(v1s->vmstate); + v1s->vmstate = NULL; + + pram_update(v1s); +} + +static int via1_post_load(void *opaque, int version_id) +{ + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(opaque); + + if (v1s->blk) { + v1s->vmstate = qemu_add_vm_change_state_handler( + via1_postload_update_cb, v1s); + } + + return 0; +} + +/* VIA 1 */ +static void mos6522_q800_via1_reset(DeviceState *dev) +{ + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(dev); + MOS6522State *ms = MOS6522(v1s); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); + ADBBusState *adb_bus = &v1s->adb_bus; + + mdc->parent_reset(dev); + + ms->timers[0].frequency = VIA_TIMER_FREQ; + ms->timers[1].frequency = VIA_TIMER_FREQ; + + ms->b = VIA1B_vADB_StateMask | VIA1B_vADBInt | VIA1B_vRTCEnb; + + /* ADB/RTC */ + adb_set_autopoll_enabled(adb_bus, true); + v1s->cmd = REG_EMPTY; + v1s->alt = REG_EMPTY; +} + +static void mos6522_q800_via1_realize(DeviceState *dev, Error **errp) +{ + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(dev); + ADBBusState *adb_bus = &v1s->adb_bus; + struct tm tm; + int ret; + + v1s->one_second_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, via1_one_second, + v1s); + via1_one_second_update(v1s); + v1s->sixty_hz_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, via1_sixty_hz, + v1s); + via1_sixty_hz_update(v1s); + + qemu_get_timedate(&tm, 0); + v1s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET; + + adb_register_autopoll_callback(adb_bus, adb_via_poll, v1s); + v1s->adb_data_ready = qdev_get_gpio_in(dev, VIA1_IRQ_ADB_READY_BIT); + + if (v1s->blk) { + int64_t len = blk_getlength(v1s->blk); + if (len < 0) { + error_setg_errno(errp, -len, + "could not get length of backing image"); + return; + } + ret = blk_set_perm(v1s->blk, + BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, + BLK_PERM_ALL, errp); + if (ret < 0) { + return; + } + + len = blk_pread(v1s->blk, 0, v1s->PRAM, sizeof(v1s->PRAM)); + if (len != sizeof(v1s->PRAM)) { + error_setg(errp, "can't read PRAM contents"); + return; + } + } +} + +static void mos6522_q800_via1_init(Object *obj) +{ + MOS6522Q800VIA1State *v1s = MOS6522_Q800_VIA1(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(v1s); + + memory_region_init_io(&v1s->via_mem, obj, &mos6522_q800_via1_ops, v1s, + "via1", VIA_SIZE); + sysbus_init_mmio(sbd, &v1s->via_mem); + + /* ADB */ + qbus_init((BusState *)&v1s->adb_bus, sizeof(v1s->adb_bus), + TYPE_ADB_BUS, DEVICE(v1s), "adb.0"); + + qdev_init_gpio_in(DEVICE(obj), via1_irq_request, VIA1_IRQ_NB); + + /* A/UX mode */ + qdev_init_gpio_out(DEVICE(obj), &v1s->auxmode_irq, 1); +} + +static const VMStateDescription vmstate_q800_via1 = { + .name = "q800-via1", + .version_id = 0, + .minimum_version_id = 0, + .post_load = via1_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(parent_obj, MOS6522Q800VIA1State, 0, vmstate_mos6522, + MOS6522State), + VMSTATE_UINT8(last_b, MOS6522Q800VIA1State), + /* RTC */ + VMSTATE_BUFFER(PRAM, MOS6522Q800VIA1State), + VMSTATE_UINT32(tick_offset, MOS6522Q800VIA1State), + VMSTATE_UINT8(data_out, MOS6522Q800VIA1State), + VMSTATE_INT32(data_out_cnt, MOS6522Q800VIA1State), + VMSTATE_UINT8(data_in, MOS6522Q800VIA1State), + VMSTATE_UINT8(data_in_cnt, MOS6522Q800VIA1State), + VMSTATE_UINT8(cmd, MOS6522Q800VIA1State), + VMSTATE_INT32(wprotect, MOS6522Q800VIA1State), + VMSTATE_INT32(alt, MOS6522Q800VIA1State), + /* ADB */ + VMSTATE_INT32(adb_data_in_size, MOS6522Q800VIA1State), + VMSTATE_INT32(adb_data_in_index, MOS6522Q800VIA1State), + VMSTATE_INT32(adb_data_out_index, MOS6522Q800VIA1State), + VMSTATE_BUFFER(adb_data_in, MOS6522Q800VIA1State), + VMSTATE_BUFFER(adb_data_out, MOS6522Q800VIA1State), + VMSTATE_UINT8(adb_autopoll_cmd, MOS6522Q800VIA1State), + /* Timers */ + VMSTATE_TIMER_PTR(one_second_timer, MOS6522Q800VIA1State), + VMSTATE_INT64(next_second, MOS6522Q800VIA1State), + VMSTATE_TIMER_PTR(sixty_hz_timer, MOS6522Q800VIA1State), + VMSTATE_INT64(next_sixty_hz, MOS6522Q800VIA1State), + VMSTATE_END_OF_LIST() + } +}; + +static Property mos6522_q800_via1_properties[] = { + DEFINE_PROP_DRIVE("drive", MOS6522Q800VIA1State, blk), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mos6522_q800_via1_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = mos6522_q800_via1_realize; + dc->reset = mos6522_q800_via1_reset; + dc->vmsd = &vmstate_q800_via1; + device_class_set_props(dc, mos6522_q800_via1_properties); +} + +static const TypeInfo mos6522_q800_via1_type_info = { + .name = TYPE_MOS6522_Q800_VIA1, + .parent = TYPE_MOS6522, + .instance_size = sizeof(MOS6522Q800VIA1State), + .instance_init = mos6522_q800_via1_init, + .class_init = mos6522_q800_via1_class_init, +}; + +/* VIA 2 */ +static void mos6522_q800_via2_portB_write(MOS6522State *s) +{ + if (s->dirb & VIA2B_vPower && (s->b & VIA2B_vPower) == 0) { + /* shutdown */ + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } +} + +static void mos6522_q800_via2_reset(DeviceState *dev) +{ + MOS6522State *ms = MOS6522(dev); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); + + mdc->parent_reset(dev); + + ms->timers[0].frequency = VIA_TIMER_FREQ; + ms->timers[1].frequency = VIA_TIMER_FREQ; + + ms->dirb = 0; + ms->b = 0; + ms->dira = 0; + ms->a = 0x7f; +} + +static void via2_nubus_irq_request(void *opaque, int irq, int level) +{ + MOS6522Q800VIA2State *v2s = opaque; + MOS6522State *s = MOS6522(v2s); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + + if (level) { + /* Port A nubus IRQ inputs are active LOW */ + s->a &= ~(1 << irq); + s->ifr |= 1 << VIA2_IRQ_NUBUS_BIT; + } else { + s->a |= (1 << irq); + s->ifr &= ~(1 << VIA2_IRQ_NUBUS_BIT); + } + + mdc->update_irq(s); +} + +static void mos6522_q800_via2_init(Object *obj) +{ + MOS6522Q800VIA2State *v2s = MOS6522_Q800_VIA2(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(v2s); + + memory_region_init_io(&v2s->via_mem, obj, &mos6522_q800_via2_ops, v2s, + "via2", VIA_SIZE); + sysbus_init_mmio(sbd, &v2s->via_mem); + + qdev_init_gpio_in(DEVICE(obj), via2_irq_request, VIA2_IRQ_NB); + + qdev_init_gpio_in_named(DEVICE(obj), via2_nubus_irq_request, "nubus-irq", + VIA2_NUBUS_IRQ_NB); +} + +static const VMStateDescription vmstate_q800_via2 = { + .name = "q800-via2", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(parent_obj, MOS6522Q800VIA2State, 0, vmstate_mos6522, + MOS6522State), + VMSTATE_END_OF_LIST() + } +}; + +static void mos6522_q800_via2_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); + + dc->reset = mos6522_q800_via2_reset; + dc->vmsd = &vmstate_q800_via2; + mdc->portB_write = mos6522_q800_via2_portB_write; +} + +static const TypeInfo mos6522_q800_via2_type_info = { + .name = TYPE_MOS6522_Q800_VIA2, + .parent = TYPE_MOS6522, + .instance_size = sizeof(MOS6522Q800VIA2State), + .instance_init = mos6522_q800_via2_init, + .class_init = mos6522_q800_via2_class_init, +}; + +static void mac_via_register_types(void) +{ + type_register_static(&mos6522_q800_via1_type_info); + type_register_static(&mos6522_q800_via2_type_info); +} + +type_init(mac_via_register_types); diff --git a/hw/misc/macio/Kconfig b/hw/misc/macio/Kconfig new file mode 100644 index 000000000..c6caeb672 --- /dev/null +++ b/hw/misc/macio/Kconfig @@ -0,0 +1,11 @@ +config CUDA + bool + +config MAC_PMU + bool + +config MAC_DBDMA + bool + +config MACIO_GPIO + bool diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c new file mode 100644 index 000000000..e917a6a09 --- /dev/null +++ b/hw/misc/macio/cuda.c @@ -0,0 +1,629 @@ +/* + * QEMU PowerMac CUDA device support + * + * Copyright (c) 2004-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/ppc/mac.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/input/adb.h" +#include "hw/misc/mos6522.h" +#include "hw/misc/macio/cuda.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "sysemu/runstate.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + +/* Bits in B data register: all active low */ +#define TREQ 0x08 /* Transfer request (input) */ +#define TACK 0x10 /* Transfer acknowledge (output) */ +#define TIP 0x20 /* Transfer in progress (output) */ + +/* commands (1st byte) */ +#define ADB_PACKET 0 +#define CUDA_PACKET 1 +#define ERROR_PACKET 2 +#define TIMER_PACKET 3 +#define POWER_PACKET 4 +#define MACIIC_PACKET 5 +#define PMU_PACKET 6 + +#define CUDA_TIMER_FREQ (4700000 / 6) + +/* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */ +#define RTC_OFFSET 2082844800 + +static void cuda_receive_packet_from_host(CUDAState *s, + const uint8_t *data, int len); + +/* MacOS uses timer 1 for calibration on startup, so we use + * the timebase frequency and cuda_get_counter_value() with + * cuda_get_load_time() to steer MacOS to calculate calibrate its timers + * correctly for both TCG and KVM (see commit b981289c49 "PPC: Cuda: Use cuda + * timer to expose tbfreq to guest" for more information) */ + +static uint64_t cuda_get_counter_value(MOS6522State *s, MOS6522Timer *ti) +{ + MOS6522CUDAState *mcs = container_of(s, MOS6522CUDAState, parent_obj); + CUDAState *cs = container_of(mcs, CUDAState, mos6522_cuda); + + /* Reverse of the tb calculation algorithm that Mac OS X uses on bootup */ + uint64_t tb_diff = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + cs->tb_frequency, NANOSECONDS_PER_SECOND) - + ti->load_time; + + return (tb_diff * 0xBF401675E5DULL) / (cs->tb_frequency << 24); +} + +static uint64_t cuda_get_load_time(MOS6522State *s, MOS6522Timer *ti) +{ + MOS6522CUDAState *mcs = container_of(s, MOS6522CUDAState, parent_obj); + CUDAState *cs = container_of(mcs, CUDAState, mos6522_cuda); + + uint64_t load_time = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + cs->tb_frequency, NANOSECONDS_PER_SECOND); + return load_time; +} + +static void cuda_set_sr_int(void *opaque) +{ + CUDAState *s = opaque; + MOS6522CUDAState *mcs = &s->mos6522_cuda; + MOS6522State *ms = MOS6522(mcs); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); + + mdc->set_sr_int(ms); +} + +static void cuda_delay_set_sr_int(CUDAState *s) +{ + int64_t expire; + + trace_cuda_delay_set_sr_int(); + + expire = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->sr_delay_ns; + timer_mod(s->sr_delay_timer, expire); +} + +/* NOTE: TIP and TREQ are negated */ +static void cuda_update(CUDAState *s) +{ + MOS6522CUDAState *mcs = &s->mos6522_cuda; + MOS6522State *ms = MOS6522(mcs); + ADBBusState *adb_bus = &s->adb_bus; + int packet_received, len; + + packet_received = 0; + if (!(ms->b & TIP)) { + /* transfer requested from host */ + + if (ms->acr & SR_OUT) { + /* data output */ + if ((ms->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) { + if (s->data_out_index < sizeof(s->data_out)) { + if (s->data_out_index == 0) { + adb_autopoll_block(adb_bus); + } + trace_cuda_data_send(ms->sr); + s->data_out[s->data_out_index++] = ms->sr; + cuda_delay_set_sr_int(s); + } + } + } else { + if (s->data_in_index < s->data_in_size) { + /* data input */ + if ((ms->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) { + ms->sr = s->data_in[s->data_in_index++]; + trace_cuda_data_recv(ms->sr); + /* indicate end of transfer */ + if (s->data_in_index >= s->data_in_size) { + ms->b = (ms->b | TREQ); + adb_autopoll_unblock(adb_bus); + } + cuda_delay_set_sr_int(s); + } + } + } + } else { + /* no transfer requested: handle sync case */ + if ((s->last_b & TIP) && (ms->b & TACK) != (s->last_b & TACK)) { + /* update TREQ state each time TACK change state */ + if (ms->b & TACK) { + ms->b = (ms->b | TREQ); + } else { + ms->b = (ms->b & ~TREQ); + } + cuda_delay_set_sr_int(s); + } else { + if (!(s->last_b & TIP)) { + /* handle end of host to cuda transfer */ + packet_received = (s->data_out_index > 0); + /* always an IRQ at the end of transfer */ + cuda_delay_set_sr_int(s); + } + /* signal if there is data to read */ + if (s->data_in_index < s->data_in_size) { + ms->b = (ms->b & ~TREQ); + } + } + } + + s->last_acr = ms->acr; + s->last_b = ms->b; + + /* NOTE: cuda_receive_packet_from_host() can call cuda_update() + recursively */ + if (packet_received) { + len = s->data_out_index; + s->data_out_index = 0; + cuda_receive_packet_from_host(s, s->data_out, len); + } +} + +static void cuda_send_packet_to_host(CUDAState *s, + const uint8_t *data, int len) +{ + int i; + + trace_cuda_packet_send(len); + for (i = 0; i < len; i++) { + trace_cuda_packet_send_data(i, data[i]); + } + + memcpy(s->data_in, data, len); + s->data_in_size = len; + s->data_in_index = 0; + cuda_update(s); + cuda_delay_set_sr_int(s); +} + +static void cuda_adb_poll(void *opaque) +{ + CUDAState *s = opaque; + ADBBusState *adb_bus = &s->adb_bus; + uint8_t obuf[ADB_MAX_OUT_LEN + 2]; + int olen; + + olen = adb_poll(adb_bus, obuf + 2, adb_bus->autopoll_mask); + if (olen > 0) { + obuf[0] = ADB_PACKET; + obuf[1] = 0x40; /* polled data */ + cuda_send_packet_to_host(s, obuf, olen + 2); + } +} + +/* description of commands */ +typedef struct CudaCommand { + uint8_t command; + const char *name; + bool (*handler)(CUDAState *s, + const uint8_t *in_args, int in_len, + uint8_t *out_args, int *out_len); +} CudaCommand; + +static bool cuda_cmd_autopoll(CUDAState *s, + const uint8_t *in_data, int in_len, + uint8_t *out_data, int *out_len) +{ + ADBBusState *adb_bus = &s->adb_bus; + bool autopoll; + + if (in_len != 1) { + return false; + } + + autopoll = (in_data[0] != 0) ? true : false; + + adb_set_autopoll_enabled(adb_bus, autopoll); + return true; +} + +static bool cuda_cmd_set_autorate(CUDAState *s, + const uint8_t *in_data, int in_len, + uint8_t *out_data, int *out_len) +{ + ADBBusState *adb_bus = &s->adb_bus; + + if (in_len != 1) { + return false; + } + + /* we don't want a period of 0 ms */ + /* FIXME: check what real hardware does */ + if (in_data[0] == 0) { + return false; + } + + adb_set_autopoll_rate_ms(adb_bus, in_data[0]); + return true; +} + +static bool cuda_cmd_set_device_list(CUDAState *s, + const uint8_t *in_data, int in_len, + uint8_t *out_data, int *out_len) +{ + ADBBusState *adb_bus = &s->adb_bus; + uint16_t mask; + + if (in_len != 2) { + return false; + } + + mask = (((uint16_t)in_data[0]) << 8) | in_data[1]; + + adb_set_autopoll_mask(adb_bus, mask); + return true; +} + +static bool cuda_cmd_powerdown(CUDAState *s, + const uint8_t *in_data, int in_len, + uint8_t *out_data, int *out_len) +{ + if (in_len != 0) { + return false; + } + + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + return true; +} + +static bool cuda_cmd_reset_system(CUDAState *s, + const uint8_t *in_data, int in_len, + uint8_t *out_data, int *out_len) +{ + if (in_len != 0) { + return false; + } + + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + return true; +} + +static bool cuda_cmd_set_file_server_flag(CUDAState *s, + const uint8_t *in_data, int in_len, + uint8_t *out_data, int *out_len) +{ + if (in_len != 1) { + return false; + } + + qemu_log_mask(LOG_UNIMP, + "CUDA: unimplemented command FILE_SERVER_FLAG %d\n", + in_data[0]); + return true; +} + +static bool cuda_cmd_set_power_message(CUDAState *s, + const uint8_t *in_data, int in_len, + uint8_t *out_data, int *out_len) +{ + if (in_len != 1) { + return false; + } + + qemu_log_mask(LOG_UNIMP, + "CUDA: unimplemented command SET_POWER_MESSAGE %d\n", + in_data[0]); + return true; +} + +static bool cuda_cmd_get_time(CUDAState *s, + const uint8_t *in_data, int in_len, + uint8_t *out_data, int *out_len) +{ + uint32_t ti; + + if (in_len != 0) { + return false; + } + + ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + / NANOSECONDS_PER_SECOND); + out_data[0] = ti >> 24; + out_data[1] = ti >> 16; + out_data[2] = ti >> 8; + out_data[3] = ti; + *out_len = 4; + return true; +} + +static bool cuda_cmd_set_time(CUDAState *s, + const uint8_t *in_data, int in_len, + uint8_t *out_data, int *out_len) +{ + uint32_t ti; + + if (in_len != 4) { + return false; + } + + ti = (((uint32_t)in_data[0]) << 24) + (((uint32_t)in_data[1]) << 16) + + (((uint32_t)in_data[2]) << 8) + in_data[3]; + s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + / NANOSECONDS_PER_SECOND); + return true; +} + +static const CudaCommand handlers[] = { + { CUDA_AUTOPOLL, "AUTOPOLL", cuda_cmd_autopoll }, + { CUDA_SET_AUTO_RATE, "SET_AUTO_RATE", cuda_cmd_set_autorate }, + { CUDA_SET_DEVICE_LIST, "SET_DEVICE_LIST", cuda_cmd_set_device_list }, + { CUDA_POWERDOWN, "POWERDOWN", cuda_cmd_powerdown }, + { CUDA_RESET_SYSTEM, "RESET_SYSTEM", cuda_cmd_reset_system }, + { CUDA_FILE_SERVER_FLAG, "FILE_SERVER_FLAG", + cuda_cmd_set_file_server_flag }, + { CUDA_SET_POWER_MESSAGES, "SET_POWER_MESSAGES", + cuda_cmd_set_power_message }, + { CUDA_GET_TIME, "GET_TIME", cuda_cmd_get_time }, + { CUDA_SET_TIME, "SET_TIME", cuda_cmd_set_time }, +}; + +static void cuda_receive_packet(CUDAState *s, + const uint8_t *data, int len) +{ + uint8_t obuf[16] = { CUDA_PACKET, 0, data[0] }; + int i, out_len = 0; + + for (i = 0; i < ARRAY_SIZE(handlers); i++) { + const CudaCommand *desc = &handlers[i]; + if (desc->command == data[0]) { + trace_cuda_receive_packet_cmd(desc->name); + out_len = 0; + if (desc->handler(s, data + 1, len - 1, obuf + 3, &out_len)) { + cuda_send_packet_to_host(s, obuf, 3 + out_len); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "CUDA: %s: wrong parameters %d\n", + desc->name, len); + obuf[0] = ERROR_PACKET; + obuf[1] = 0x5; /* bad parameters */ + obuf[2] = CUDA_PACKET; + obuf[3] = data[0]; + cuda_send_packet_to_host(s, obuf, 4); + } + return; + } + } + + qemu_log_mask(LOG_GUEST_ERROR, "CUDA: unknown command 0x%02x\n", data[0]); + obuf[0] = ERROR_PACKET; + obuf[1] = 0x2; /* unknown command */ + obuf[2] = CUDA_PACKET; + obuf[3] = data[0]; + cuda_send_packet_to_host(s, obuf, 4); +} + +static void cuda_receive_packet_from_host(CUDAState *s, + const uint8_t *data, int len) +{ + int i; + + trace_cuda_packet_receive(len); + for (i = 0; i < len; i++) { + trace_cuda_packet_receive_data(i, data[i]); + } + + switch(data[0]) { + case ADB_PACKET: + { + uint8_t obuf[ADB_MAX_OUT_LEN + 3]; + int olen; + olen = adb_request(&s->adb_bus, obuf + 2, data + 1, len - 1); + if (olen > 0) { + obuf[0] = ADB_PACKET; + obuf[1] = 0x00; + cuda_send_packet_to_host(s, obuf, olen + 2); + } else { + /* error */ + obuf[0] = ADB_PACKET; + obuf[1] = -olen; + obuf[2] = data[1]; + olen = 0; + cuda_send_packet_to_host(s, obuf, olen + 3); + } + } + break; + case CUDA_PACKET: + cuda_receive_packet(s, data + 1, len - 1); + break; + } +} + +static uint64_t mos6522_cuda_read(void *opaque, hwaddr addr, unsigned size) +{ + CUDAState *s = opaque; + MOS6522CUDAState *mcs = &s->mos6522_cuda; + MOS6522State *ms = MOS6522(mcs); + + addr = (addr >> 9) & 0xf; + return mos6522_read(ms, addr, size); +} + +static void mos6522_cuda_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + CUDAState *s = opaque; + MOS6522CUDAState *mcs = &s->mos6522_cuda; + MOS6522State *ms = MOS6522(mcs); + + addr = (addr >> 9) & 0xf; + mos6522_write(ms, addr, val, size); +} + +static const MemoryRegionOps mos6522_cuda_ops = { + .read = mos6522_cuda_read, + .write = mos6522_cuda_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static const VMStateDescription vmstate_cuda = { + .name = "cuda", + .version_id = 6, + .minimum_version_id = 6, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(mos6522_cuda.parent_obj, CUDAState, 0, vmstate_mos6522, + MOS6522State), + VMSTATE_UINT8(last_b, CUDAState), + VMSTATE_UINT8(last_acr, CUDAState), + VMSTATE_INT32(data_in_size, CUDAState), + VMSTATE_INT32(data_in_index, CUDAState), + VMSTATE_INT32(data_out_index, CUDAState), + VMSTATE_BUFFER(data_in, CUDAState), + VMSTATE_BUFFER(data_out, CUDAState), + VMSTATE_UINT32(tick_offset, CUDAState), + VMSTATE_TIMER_PTR(sr_delay_timer, CUDAState), + VMSTATE_END_OF_LIST() + } +}; + +static void cuda_reset(DeviceState *dev) +{ + CUDAState *s = CUDA(dev); + ADBBusState *adb_bus = &s->adb_bus; + + s->data_in_size = 0; + s->data_in_index = 0; + s->data_out_index = 0; + + adb_set_autopoll_enabled(adb_bus, false); +} + +static void cuda_realize(DeviceState *dev, Error **errp) +{ + CUDAState *s = CUDA(dev); + SysBusDevice *sbd; + ADBBusState *adb_bus = &s->adb_bus; + struct tm tm; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mos6522_cuda), errp)) { + return; + } + + /* Pass IRQ from 6522 */ + sbd = SYS_BUS_DEVICE(s); + sysbus_pass_irq(sbd, SYS_BUS_DEVICE(&s->mos6522_cuda)); + + qemu_get_timedate(&tm, 0); + s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET; + + s->sr_delay_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_set_sr_int, s); + s->sr_delay_ns = 20 * SCALE_US; + + adb_register_autopoll_callback(adb_bus, cuda_adb_poll, s); +} + +static void cuda_init(Object *obj) +{ + CUDAState *s = CUDA(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + object_initialize_child(obj, "mos6522-cuda", &s->mos6522_cuda, + TYPE_MOS6522_CUDA); + + memory_region_init_io(&s->mem, obj, &mos6522_cuda_ops, s, "cuda", 0x2000); + sysbus_init_mmio(sbd, &s->mem); + + qbus_init(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, + DEVICE(obj), "adb.0"); +} + +static Property cuda_properties[] = { + DEFINE_PROP_UINT64("timebase-frequency", CUDAState, tb_frequency, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void cuda_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = cuda_realize; + dc->reset = cuda_reset; + dc->vmsd = &vmstate_cuda; + device_class_set_props(dc, cuda_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo cuda_type_info = { + .name = TYPE_CUDA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CUDAState), + .instance_init = cuda_init, + .class_init = cuda_class_init, +}; + +static void mos6522_cuda_portB_write(MOS6522State *s) +{ + MOS6522CUDAState *mcs = container_of(s, MOS6522CUDAState, parent_obj); + CUDAState *cs = container_of(mcs, CUDAState, mos6522_cuda); + + cuda_update(cs); +} + +static void mos6522_cuda_reset(DeviceState *dev) +{ + MOS6522State *ms = MOS6522(dev); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); + + mdc->parent_reset(dev); + + ms->timers[0].frequency = CUDA_TIMER_FREQ; + ms->timers[1].frequency = (SCALE_US * 6000) / 4700; +} + +static void mos6522_cuda_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); + + dc->reset = mos6522_cuda_reset; + mdc->portB_write = mos6522_cuda_portB_write; + mdc->get_timer1_counter_value = cuda_get_counter_value; + mdc->get_timer2_counter_value = cuda_get_counter_value; + mdc->get_timer1_load_time = cuda_get_load_time; + mdc->get_timer2_load_time = cuda_get_load_time; +} + +static const TypeInfo mos6522_cuda_type_info = { + .name = TYPE_MOS6522_CUDA, + .parent = TYPE_MOS6522, + .instance_size = sizeof(MOS6522CUDAState), + .class_init = mos6522_cuda_class_init, +}; + +static void cuda_register_types(void) +{ + type_register_static(&mos6522_cuda_type_info); + type_register_static(&cuda_type_info); +} + +type_init(cuda_register_types) diff --git a/hw/misc/macio/gpio.c b/hw/misc/macio/gpio.c new file mode 100644 index 000000000..b1bcf830c --- /dev/null +++ b/hw/misc/macio/gpio.c @@ -0,0 +1,219 @@ +/* + * PowerMac NewWorld MacIO GPIO emulation + * + * Copyright (c) 2016 Benjamin Herrenschmidt + * Copyright (c) 2018 Mark Cave-Ayland + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/ppc/mac.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/misc/macio/macio.h" +#include "hw/misc/macio/gpio.h" +#include "hw/nmi.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + + +void macio_set_gpio(MacIOGPIOState *s, uint32_t gpio, bool state) +{ + uint8_t new_reg; + + trace_macio_set_gpio(gpio, state); + + if (s->gpio_regs[gpio] & 4) { + qemu_log_mask(LOG_GUEST_ERROR, + "GPIO: Setting GPIO %d while it's an output\n", gpio); + } + + new_reg = s->gpio_regs[gpio] & ~2; + if (state) { + new_reg |= 2; + } + + if (new_reg == s->gpio_regs[gpio]) { + return; + } + + s->gpio_regs[gpio] = new_reg; + + /* + * Note that we probably need to get access to the MPIC config to + * decode polarity since qemu always use "raise" regardless. + * + * For now, we hard wire known GPIOs + */ + + switch (gpio) { + case 1: + /* Level low */ + if (!state) { + trace_macio_gpio_irq_assert(gpio); + qemu_irq_raise(s->gpio_extirqs[gpio]); + } else { + trace_macio_gpio_irq_deassert(gpio); + qemu_irq_lower(s->gpio_extirqs[gpio]); + } + break; + + case 9: + /* Edge, triggered by NMI below */ + if (state) { + trace_macio_gpio_irq_assert(gpio); + qemu_irq_raise(s->gpio_extirqs[gpio]); + } else { + trace_macio_gpio_irq_deassert(gpio); + qemu_irq_lower(s->gpio_extirqs[gpio]); + } + break; + + default: + qemu_log_mask(LOG_UNIMP, "GPIO: setting unimplemented GPIO %d", gpio); + } +} + +static void macio_gpio_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + MacIOGPIOState *s = opaque; + uint8_t ibit; + + trace_macio_gpio_write(addr, value); + + /* Levels regs are read-only */ + if (addr < 8) { + return; + } + + addr -= 8; + if (addr < 36) { + value &= ~2; + + if (value & 4) { + ibit = (value & 1) << 1; + } else { + ibit = s->gpio_regs[addr] & 2; + } + + s->gpio_regs[addr] = value | ibit; + } +} + +static uint64_t macio_gpio_read(void *opaque, hwaddr addr, unsigned size) +{ + MacIOGPIOState *s = opaque; + uint64_t val = 0; + + /* Levels regs */ + if (addr < 8) { + val = s->gpio_levels[addr]; + } else { + addr -= 8; + + if (addr < 36) { + val = s->gpio_regs[addr]; + } + } + + trace_macio_gpio_write(addr, val); + return val; +} + +static const MemoryRegionOps macio_gpio_ops = { + .read = macio_gpio_read, + .write = macio_gpio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void macio_gpio_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + MacIOGPIOState *s = MACIO_GPIO(obj); + int i; + + for (i = 0; i < 10; i++) { + sysbus_init_irq(sbd, &s->gpio_extirqs[i]); + } + + memory_region_init_io(&s->gpiomem, OBJECT(s), &macio_gpio_ops, obj, + "gpio", 0x30); + sysbus_init_mmio(sbd, &s->gpiomem); +} + +static const VMStateDescription vmstate_macio_gpio = { + .name = "macio_gpio", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(gpio_levels, MacIOGPIOState, 8), + VMSTATE_UINT8_ARRAY(gpio_regs, MacIOGPIOState, 36), + VMSTATE_END_OF_LIST() + } +}; + +static void macio_gpio_reset(DeviceState *dev) +{ + MacIOGPIOState *s = MACIO_GPIO(dev); + + /* GPIO 1 is up by default */ + macio_set_gpio(s, 1, true); +} + +static void macio_gpio_nmi(NMIState *n, int cpu_index, Error **errp) +{ + macio_set_gpio(MACIO_GPIO(n), 9, true); + macio_set_gpio(MACIO_GPIO(n), 9, false); +} + +static void macio_gpio_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + NMIClass *nc = NMI_CLASS(oc); + + dc->reset = macio_gpio_reset; + dc->vmsd = &vmstate_macio_gpio; + nc->nmi_monitor_handler = macio_gpio_nmi; +} + +static const TypeInfo macio_gpio_init_info = { + .name = TYPE_MACIO_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MacIOGPIOState), + .instance_init = macio_gpio_init, + .class_init = macio_gpio_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NMI }, + { } + }, +}; + +static void macio_gpio_register_types(void) +{ + type_register_static(&macio_gpio_init_info); +} + +type_init(macio_gpio_register_types) diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c new file mode 100644 index 000000000..e220f1a92 --- /dev/null +++ b/hw/misc/macio/mac_dbdma.c @@ -0,0 +1,940 @@ +/* + * PowerMac descriptor-based DMA emulation + * + * Copyright (c) 2005-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * Copyright (c) 2009 Laurent Vivier + * + * some parts from linux-2.6.28, arch/powerpc/include/asm/dbdma.h + * + * Definitions for using the Apple Descriptor-Based DMA controller + * in Power Macintosh computers. + * + * Copyright (C) 1996 Paul Mackerras. + * + * some parts from mol 0.9.71 + * + * Descriptor based DMA emulation + * + * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/ppc/mac_dbdma.h" +#include "migration/vmstate.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "sysemu/dma.h" + +/* debug DBDMA */ +#define DEBUG_DBDMA 0 +#define DEBUG_DBDMA_CHANMASK ((1ull << DBDMA_CHANNELS) - 1) + +#define DBDMA_DPRINTF(fmt, ...) do { \ + if (DEBUG_DBDMA) { \ + printf("DBDMA: " fmt , ## __VA_ARGS__); \ + } \ +} while (0) + +#define DBDMA_DPRINTFCH(ch, fmt, ...) do { \ + if (DEBUG_DBDMA) { \ + if ((1ul << (ch)->channel) & DEBUG_DBDMA_CHANMASK) { \ + printf("DBDMA[%02x]: " fmt , (ch)->channel, ## __VA_ARGS__); \ + } \ + } \ +} while (0) + +/* + */ + +static DBDMAState *dbdma_from_ch(DBDMA_channel *ch) +{ + return container_of(ch, DBDMAState, channels[ch->channel]); +} + +#if DEBUG_DBDMA +static void dump_dbdma_cmd(DBDMA_channel *ch, dbdma_cmd *cmd) +{ + DBDMA_DPRINTFCH(ch, "dbdma_cmd %p\n", cmd); + DBDMA_DPRINTFCH(ch, " req_count 0x%04x\n", le16_to_cpu(cmd->req_count)); + DBDMA_DPRINTFCH(ch, " command 0x%04x\n", le16_to_cpu(cmd->command)); + DBDMA_DPRINTFCH(ch, " phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr)); + DBDMA_DPRINTFCH(ch, " cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep)); + DBDMA_DPRINTFCH(ch, " res_count 0x%04x\n", le16_to_cpu(cmd->res_count)); + DBDMA_DPRINTFCH(ch, " xfer_status 0x%04x\n", + le16_to_cpu(cmd->xfer_status)); +} +#else +static void dump_dbdma_cmd(DBDMA_channel *ch, dbdma_cmd *cmd) +{ +} +#endif +static void dbdma_cmdptr_load(DBDMA_channel *ch) +{ + DBDMA_DPRINTFCH(ch, "dbdma_cmdptr_load 0x%08x\n", + ch->regs[DBDMA_CMDPTR_LO]); + dma_memory_read(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], + &ch->current, sizeof(dbdma_cmd)); +} + +static void dbdma_cmdptr_save(DBDMA_channel *ch) +{ + DBDMA_DPRINTFCH(ch, "-> update 0x%08x stat=0x%08x, res=0x%04x\n", + ch->regs[DBDMA_CMDPTR_LO], + le16_to_cpu(ch->current.xfer_status), + le16_to_cpu(ch->current.res_count)); + dma_memory_write(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], + &ch->current, sizeof(dbdma_cmd)); +} + +static void kill_channel(DBDMA_channel *ch) +{ + DBDMA_DPRINTFCH(ch, "kill_channel\n"); + + ch->regs[DBDMA_STATUS] |= DEAD; + ch->regs[DBDMA_STATUS] &= ~ACTIVE; + + qemu_irq_raise(ch->irq); +} + +static void conditional_interrupt(DBDMA_channel *ch) +{ + dbdma_cmd *current = &ch->current; + uint16_t intr; + uint16_t sel_mask, sel_value; + uint32_t status; + int cond; + + DBDMA_DPRINTFCH(ch, "%s\n", __func__); + + intr = le16_to_cpu(current->command) & INTR_MASK; + + switch(intr) { + case INTR_NEVER: /* don't interrupt */ + return; + case INTR_ALWAYS: /* always interrupt */ + qemu_irq_raise(ch->irq); + DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__); + return; + } + + status = ch->regs[DBDMA_STATUS] & DEVSTAT; + + sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f; + sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f; + + cond = (status & sel_mask) == (sel_value & sel_mask); + + switch(intr) { + case INTR_IFSET: /* intr if condition bit is 1 */ + if (cond) { + qemu_irq_raise(ch->irq); + DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__); + } + return; + case INTR_IFCLR: /* intr if condition bit is 0 */ + if (!cond) { + qemu_irq_raise(ch->irq); + DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__); + } + return; + } +} + +static int conditional_wait(DBDMA_channel *ch) +{ + dbdma_cmd *current = &ch->current; + uint16_t wait; + uint16_t sel_mask, sel_value; + uint32_t status; + int cond; + int res = 0; + + wait = le16_to_cpu(current->command) & WAIT_MASK; + switch(wait) { + case WAIT_NEVER: /* don't wait */ + return 0; + case WAIT_ALWAYS: /* always wait */ + DBDMA_DPRINTFCH(ch, " [WAIT_ALWAYS]\n"); + return 1; + } + + status = ch->regs[DBDMA_STATUS] & DEVSTAT; + + sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f; + sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f; + + cond = (status & sel_mask) == (sel_value & sel_mask); + + switch(wait) { + case WAIT_IFSET: /* wait if condition bit is 1 */ + if (cond) { + res = 1; + } + DBDMA_DPRINTFCH(ch, " [WAIT_IFSET=%d]\n", res); + break; + case WAIT_IFCLR: /* wait if condition bit is 0 */ + if (!cond) { + res = 1; + } + DBDMA_DPRINTFCH(ch, " [WAIT_IFCLR=%d]\n", res); + break; + } + return res; +} + +static void next(DBDMA_channel *ch) +{ + uint32_t cp; + + ch->regs[DBDMA_STATUS] &= ~BT; + + cp = ch->regs[DBDMA_CMDPTR_LO]; + ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd); + dbdma_cmdptr_load(ch); +} + +static void branch(DBDMA_channel *ch) +{ + dbdma_cmd *current = &ch->current; + + ch->regs[DBDMA_CMDPTR_LO] = le32_to_cpu(current->cmd_dep); + ch->regs[DBDMA_STATUS] |= BT; + dbdma_cmdptr_load(ch); +} + +static void conditional_branch(DBDMA_channel *ch) +{ + dbdma_cmd *current = &ch->current; + uint16_t br; + uint16_t sel_mask, sel_value; + uint32_t status; + int cond; + + /* check if we must branch */ + + br = le16_to_cpu(current->command) & BR_MASK; + + switch(br) { + case BR_NEVER: /* don't branch */ + next(ch); + return; + case BR_ALWAYS: /* always branch */ + DBDMA_DPRINTFCH(ch, " [BR_ALWAYS]\n"); + branch(ch); + return; + } + + status = ch->regs[DBDMA_STATUS] & DEVSTAT; + + sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f; + sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f; + + cond = (status & sel_mask) == (sel_value & sel_mask); + + switch(br) { + case BR_IFSET: /* branch if condition bit is 1 */ + if (cond) { + DBDMA_DPRINTFCH(ch, " [BR_IFSET = 1]\n"); + branch(ch); + } else { + DBDMA_DPRINTFCH(ch, " [BR_IFSET = 0]\n"); + next(ch); + } + return; + case BR_IFCLR: /* branch if condition bit is 0 */ + if (!cond) { + DBDMA_DPRINTFCH(ch, " [BR_IFCLR = 1]\n"); + branch(ch); + } else { + DBDMA_DPRINTFCH(ch, " [BR_IFCLR = 0]\n"); + next(ch); + } + return; + } +} + +static void channel_run(DBDMA_channel *ch); + +static void dbdma_end(DBDMA_io *io) +{ + DBDMA_channel *ch = io->channel; + dbdma_cmd *current = &ch->current; + + DBDMA_DPRINTFCH(ch, "%s\n", __func__); + + if (conditional_wait(ch)) + goto wait; + + current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); + current->res_count = cpu_to_le16(io->len); + dbdma_cmdptr_save(ch); + if (io->is_last) + ch->regs[DBDMA_STATUS] &= ~FLUSH; + + conditional_interrupt(ch); + conditional_branch(ch); + +wait: + /* Indicate that we're ready for a new DMA round */ + ch->io.processing = false; + + if ((ch->regs[DBDMA_STATUS] & RUN) && + (ch->regs[DBDMA_STATUS] & ACTIVE)) + channel_run(ch); +} + +static void start_output(DBDMA_channel *ch, int key, uint32_t addr, + uint16_t req_count, int is_last) +{ + DBDMA_DPRINTFCH(ch, "start_output\n"); + + /* KEY_REGS, KEY_DEVICE and KEY_STREAM + * are not implemented in the mac-io chip + */ + + DBDMA_DPRINTFCH(ch, "addr 0x%x key 0x%x\n", addr, key); + if (!addr || key > KEY_STREAM3) { + kill_channel(ch); + return; + } + + ch->io.addr = addr; + ch->io.len = req_count; + ch->io.is_last = is_last; + ch->io.dma_end = dbdma_end; + ch->io.is_dma_out = 1; + ch->io.processing = true; + if (ch->rw) { + ch->rw(&ch->io); + } +} + +static void start_input(DBDMA_channel *ch, int key, uint32_t addr, + uint16_t req_count, int is_last) +{ + DBDMA_DPRINTFCH(ch, "start_input\n"); + + /* KEY_REGS, KEY_DEVICE and KEY_STREAM + * are not implemented in the mac-io chip + */ + + DBDMA_DPRINTFCH(ch, "addr 0x%x key 0x%x\n", addr, key); + if (!addr || key > KEY_STREAM3) { + kill_channel(ch); + return; + } + + ch->io.addr = addr; + ch->io.len = req_count; + ch->io.is_last = is_last; + ch->io.dma_end = dbdma_end; + ch->io.is_dma_out = 0; + ch->io.processing = true; + if (ch->rw) { + ch->rw(&ch->io); + } +} + +static void load_word(DBDMA_channel *ch, int key, uint32_t addr, + uint16_t len) +{ + dbdma_cmd *current = &ch->current; + + DBDMA_DPRINTFCH(ch, "load_word %d bytes, addr=%08x\n", len, addr); + + /* only implements KEY_SYSTEM */ + + if (key != KEY_SYSTEM) { + printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key); + kill_channel(ch); + return; + } + + dma_memory_read(&address_space_memory, addr, ¤t->cmd_dep, len); + + if (conditional_wait(ch)) + goto wait; + + current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); + dbdma_cmdptr_save(ch); + ch->regs[DBDMA_STATUS] &= ~FLUSH; + + conditional_interrupt(ch); + next(ch); + +wait: + DBDMA_kick(dbdma_from_ch(ch)); +} + +static void store_word(DBDMA_channel *ch, int key, uint32_t addr, + uint16_t len) +{ + dbdma_cmd *current = &ch->current; + + DBDMA_DPRINTFCH(ch, "store_word %d bytes, addr=%08x pa=%x\n", + len, addr, le32_to_cpu(current->cmd_dep)); + + /* only implements KEY_SYSTEM */ + + if (key != KEY_SYSTEM) { + printf("DBDMA: STORE_WORD, unimplemented key %x\n", key); + kill_channel(ch); + return; + } + + dma_memory_write(&address_space_memory, addr, ¤t->cmd_dep, len); + + if (conditional_wait(ch)) + goto wait; + + current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); + dbdma_cmdptr_save(ch); + ch->regs[DBDMA_STATUS] &= ~FLUSH; + + conditional_interrupt(ch); + next(ch); + +wait: + DBDMA_kick(dbdma_from_ch(ch)); +} + +static void nop(DBDMA_channel *ch) +{ + dbdma_cmd *current = &ch->current; + + if (conditional_wait(ch)) + goto wait; + + current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); + dbdma_cmdptr_save(ch); + + conditional_interrupt(ch); + conditional_branch(ch); + +wait: + DBDMA_kick(dbdma_from_ch(ch)); +} + +static void stop(DBDMA_channel *ch) +{ + ch->regs[DBDMA_STATUS] &= ~(ACTIVE); + + /* the stop command does not increment command pointer */ +} + +static void channel_run(DBDMA_channel *ch) +{ + dbdma_cmd *current = &ch->current; + uint16_t cmd, key; + uint16_t req_count; + uint32_t phy_addr; + + DBDMA_DPRINTFCH(ch, "channel_run\n"); + dump_dbdma_cmd(ch, current); + + /* clear WAKE flag at command fetch */ + + ch->regs[DBDMA_STATUS] &= ~WAKE; + + cmd = le16_to_cpu(current->command) & COMMAND_MASK; + + switch (cmd) { + case DBDMA_NOP: + nop(ch); + return; + + case DBDMA_STOP: + stop(ch); + return; + } + + key = le16_to_cpu(current->command) & 0x0700; + req_count = le16_to_cpu(current->req_count); + phy_addr = le32_to_cpu(current->phy_addr); + + if (key == KEY_STREAM4) { + printf("command %x, invalid key 4\n", cmd); + kill_channel(ch); + return; + } + + switch (cmd) { + case OUTPUT_MORE: + DBDMA_DPRINTFCH(ch, "* OUTPUT_MORE *\n"); + start_output(ch, key, phy_addr, req_count, 0); + return; + + case OUTPUT_LAST: + DBDMA_DPRINTFCH(ch, "* OUTPUT_LAST *\n"); + start_output(ch, key, phy_addr, req_count, 1); + return; + + case INPUT_MORE: + DBDMA_DPRINTFCH(ch, "* INPUT_MORE *\n"); + start_input(ch, key, phy_addr, req_count, 0); + return; + + case INPUT_LAST: + DBDMA_DPRINTFCH(ch, "* INPUT_LAST *\n"); + start_input(ch, key, phy_addr, req_count, 1); + return; + } + + if (key < KEY_REGS) { + printf("command %x, invalid key %x\n", cmd, key); + key = KEY_SYSTEM; + } + + /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits + * and BRANCH is invalid + */ + + req_count = req_count & 0x0007; + if (req_count & 0x4) { + req_count = 4; + phy_addr &= ~3; + } else if (req_count & 0x2) { + req_count = 2; + phy_addr &= ~1; + } else + req_count = 1; + + switch (cmd) { + case LOAD_WORD: + DBDMA_DPRINTFCH(ch, "* LOAD_WORD *\n"); + load_word(ch, key, phy_addr, req_count); + return; + + case STORE_WORD: + DBDMA_DPRINTFCH(ch, "* STORE_WORD *\n"); + store_word(ch, key, phy_addr, req_count); + return; + } +} + +static void DBDMA_run(DBDMAState *s) +{ + int channel; + + for (channel = 0; channel < DBDMA_CHANNELS; channel++) { + DBDMA_channel *ch = &s->channels[channel]; + uint32_t status = ch->regs[DBDMA_STATUS]; + if (!ch->io.processing && (status & RUN) && (status & ACTIVE)) { + channel_run(ch); + } + } +} + +static void DBDMA_run_bh(void *opaque) +{ + DBDMAState *s = opaque; + + DBDMA_DPRINTF("-> DBDMA_run_bh\n"); + DBDMA_run(s); + DBDMA_DPRINTF("<- DBDMA_run_bh\n"); +} + +void DBDMA_kick(DBDMAState *dbdma) +{ + qemu_bh_schedule(dbdma->bh); +} + +void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq, + DBDMA_rw rw, DBDMA_flush flush, + void *opaque) +{ + DBDMAState *s = dbdma; + DBDMA_channel *ch = &s->channels[nchan]; + + DBDMA_DPRINTFCH(ch, "DBDMA_register_channel 0x%x\n", nchan); + + assert(rw); + assert(flush); + + ch->irq = irq; + ch->rw = rw; + ch->flush = flush; + ch->io.opaque = opaque; +} + +static void dbdma_control_write(DBDMA_channel *ch) +{ + uint16_t mask, value; + uint32_t status; + bool do_flush = false; + + mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff; + value = ch->regs[DBDMA_CONTROL] & 0xffff; + + /* This is the status register which we'll update + * appropriately and store back + */ + status = ch->regs[DBDMA_STATUS]; + + /* RUN and PAUSE are bits under SW control only + * FLUSH and WAKE are set by SW and cleared by HW + * DEAD, ACTIVE and BT are only under HW control + * + * We handle ACTIVE separately at the end of the + * logic to ensure all cases are covered. + */ + + /* Setting RUN will tentatively activate the channel + */ + if ((mask & RUN) && (value & RUN)) { + status |= RUN; + DBDMA_DPRINTFCH(ch, " Setting RUN !\n"); + } + + /* Clearing RUN 1->0 will stop the channel */ + if ((mask & RUN) && !(value & RUN)) { + /* This has the side effect of clearing the DEAD bit */ + status &= ~(DEAD | RUN); + DBDMA_DPRINTFCH(ch, " Clearing RUN !\n"); + } + + /* Setting WAKE wakes up an idle channel if it's running + * + * Note: The doc doesn't say so but assume that only works + * on a channel whose RUN bit is set. + * + * We set WAKE in status, it's not terribly useful as it will + * be cleared on the next command fetch but it seems to mimmic + * the HW behaviour and is useful for the way we handle + * ACTIVE further down. + */ + if ((mask & WAKE) && (value & WAKE) && (status & RUN)) { + status |= WAKE; + DBDMA_DPRINTFCH(ch, " Setting WAKE !\n"); + } + + /* PAUSE being set will deactivate (or prevent activation) + * of the channel. We just copy it over for now, ACTIVE will + * be re-evaluated later. + */ + if (mask & PAUSE) { + status = (status & ~PAUSE) | (value & PAUSE); + DBDMA_DPRINTFCH(ch, " %sing PAUSE !\n", + (value & PAUSE) ? "sett" : "clear"); + } + + /* FLUSH is its own thing */ + if ((mask & FLUSH) && (value & FLUSH)) { + DBDMA_DPRINTFCH(ch, " Setting FLUSH !\n"); + /* We set flush directly in the status register, we do *NOT* + * set it in "status" so that it gets naturally cleared when + * we update the status register further down. That way it + * will be set only during the HW flush operation so it is + * visible to any completions happening during that time. + */ + ch->regs[DBDMA_STATUS] |= FLUSH; + do_flush = true; + } + + /* If either RUN or PAUSE is clear, so should ACTIVE be, + * otherwise, ACTIVE will be set if we modified RUN, PAUSE or + * set WAKE. That means that PAUSE was just cleared, RUN was + * just set or WAKE was just set. + */ + if ((status & PAUSE) || !(status & RUN)) { + status &= ~ACTIVE; + DBDMA_DPRINTFCH(ch, " -> ACTIVE down !\n"); + + /* We stopped processing, we want the underlying HW command + * to complete *before* we clear the ACTIVE bit. Otherwise + * we can get into a situation where the command status will + * have RUN or ACTIVE not set which is going to confuse the + * MacOS driver. + */ + do_flush = true; + } else if (mask & (RUN | PAUSE)) { + status |= ACTIVE; + DBDMA_DPRINTFCH(ch, " -> ACTIVE up !\n"); + } else if ((mask & WAKE) && (value & WAKE)) { + status |= ACTIVE; + DBDMA_DPRINTFCH(ch, " -> ACTIVE up !\n"); + } + + DBDMA_DPRINTFCH(ch, " new status=0x%08x\n", status); + + /* If we need to flush the underlying HW, do it now, this happens + * both on FLUSH commands and when stopping the channel for safety. + */ + if (do_flush && ch->flush) { + ch->flush(&ch->io); + } + + /* Finally update the status register image */ + ch->regs[DBDMA_STATUS] = status; + + /* If active, make sure the BH gets to run */ + if (status & ACTIVE) { + DBDMA_kick(dbdma_from_ch(ch)); + } +} + +static void dbdma_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + int channel = addr >> DBDMA_CHANNEL_SHIFT; + DBDMAState *s = opaque; + DBDMA_channel *ch = &s->channels[channel]; + int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2; + + DBDMA_DPRINTFCH(ch, "writel 0x" TARGET_FMT_plx " <= 0x%08"PRIx64"\n", + addr, value); + DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n", + (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg); + + /* cmdptr cannot be modified if channel is ACTIVE */ + + if (reg == DBDMA_CMDPTR_LO && (ch->regs[DBDMA_STATUS] & ACTIVE)) { + return; + } + + ch->regs[reg] = value; + + switch(reg) { + case DBDMA_CONTROL: + dbdma_control_write(ch); + break; + case DBDMA_CMDPTR_LO: + /* 16-byte aligned */ + ch->regs[DBDMA_CMDPTR_LO] &= ~0xf; + dbdma_cmdptr_load(ch); + break; + case DBDMA_STATUS: + case DBDMA_INTR_SEL: + case DBDMA_BRANCH_SEL: + case DBDMA_WAIT_SEL: + /* nothing to do */ + break; + case DBDMA_XFER_MODE: + case DBDMA_CMDPTR_HI: + case DBDMA_DATA2PTR_HI: + case DBDMA_DATA2PTR_LO: + case DBDMA_ADDRESS_HI: + case DBDMA_BRANCH_ADDR_HI: + case DBDMA_RES1: + case DBDMA_RES2: + case DBDMA_RES3: + case DBDMA_RES4: + /* unused */ + break; + } +} + +static uint64_t dbdma_read(void *opaque, hwaddr addr, + unsigned size) +{ + uint32_t value; + int channel = addr >> DBDMA_CHANNEL_SHIFT; + DBDMAState *s = opaque; + DBDMA_channel *ch = &s->channels[channel]; + int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2; + + value = ch->regs[reg]; + + switch(reg) { + case DBDMA_CONTROL: + value = ch->regs[DBDMA_STATUS]; + break; + case DBDMA_STATUS: + case DBDMA_CMDPTR_LO: + case DBDMA_INTR_SEL: + case DBDMA_BRANCH_SEL: + case DBDMA_WAIT_SEL: + /* nothing to do */ + break; + case DBDMA_XFER_MODE: + case DBDMA_CMDPTR_HI: + case DBDMA_DATA2PTR_HI: + case DBDMA_DATA2PTR_LO: + case DBDMA_ADDRESS_HI: + case DBDMA_BRANCH_ADDR_HI: + /* unused */ + value = 0; + break; + case DBDMA_RES1: + case DBDMA_RES2: + case DBDMA_RES3: + case DBDMA_RES4: + /* reserved */ + break; + } + + DBDMA_DPRINTFCH(ch, "readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value); + DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n", + (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg); + + return value; +} + +static const MemoryRegionOps dbdma_ops = { + .read = dbdma_read, + .write = dbdma_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const VMStateDescription vmstate_dbdma_io = { + .name = "dbdma_io", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT64(addr, struct DBDMA_io), + VMSTATE_INT32(len, struct DBDMA_io), + VMSTATE_INT32(is_last, struct DBDMA_io), + VMSTATE_INT32(is_dma_out, struct DBDMA_io), + VMSTATE_BOOL(processing, struct DBDMA_io), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_dbdma_cmd = { + .name = "dbdma_cmd", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT16(req_count, dbdma_cmd), + VMSTATE_UINT16(command, dbdma_cmd), + VMSTATE_UINT32(phy_addr, dbdma_cmd), + VMSTATE_UINT32(cmd_dep, dbdma_cmd), + VMSTATE_UINT16(res_count, dbdma_cmd), + VMSTATE_UINT16(xfer_status, dbdma_cmd), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_dbdma_channel = { + .name = "dbdma_channel", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS), + VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io), + VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd, + dbdma_cmd), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_dbdma = { + .name = "dbdma", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1, + vmstate_dbdma_channel, DBDMA_channel), + VMSTATE_END_OF_LIST() + } +}; + +static void mac_dbdma_reset(DeviceState *d) +{ + DBDMAState *s = MAC_DBDMA(d); + int i; + + for (i = 0; i < DBDMA_CHANNELS; i++) { + memset(s->channels[i].regs, 0, DBDMA_SIZE); + } +} + +static void dbdma_unassigned_rw(DBDMA_io *io) +{ + DBDMA_channel *ch = io->channel; + dbdma_cmd *current = &ch->current; + uint16_t cmd; + qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", + __func__, ch->channel); + ch->io.processing = false; + + cmd = le16_to_cpu(current->command) & COMMAND_MASK; + if (cmd == OUTPUT_MORE || cmd == OUTPUT_LAST || + cmd == INPUT_MORE || cmd == INPUT_LAST) { + current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); + current->res_count = cpu_to_le16(io->len); + dbdma_cmdptr_save(ch); + } +} + +static void dbdma_unassigned_flush(DBDMA_io *io) +{ + DBDMA_channel *ch = io->channel; + qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", + __func__, ch->channel); +} + +static void mac_dbdma_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DBDMAState *s = MAC_DBDMA(obj); + int i; + + for (i = 0; i < DBDMA_CHANNELS; i++) { + DBDMA_channel *ch = &s->channels[i]; + + ch->rw = dbdma_unassigned_rw; + ch->flush = dbdma_unassigned_flush; + ch->channel = i; + ch->io.channel = ch; + } + + memory_region_init_io(&s->mem, obj, &dbdma_ops, s, "dbdma", 0x1000); + sysbus_init_mmio(sbd, &s->mem); +} + +static void mac_dbdma_realize(DeviceState *dev, Error **errp) +{ + DBDMAState *s = MAC_DBDMA(dev); + + s->bh = qemu_bh_new(DBDMA_run_bh, s); +} + +static void mac_dbdma_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = mac_dbdma_realize; + dc->reset = mac_dbdma_reset; + dc->vmsd = &vmstate_dbdma; +} + +static const TypeInfo mac_dbdma_type_info = { + .name = TYPE_MAC_DBDMA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(DBDMAState), + .instance_init = mac_dbdma_init, + .class_init = mac_dbdma_class_init +}; + +static void mac_dbdma_register_types(void) +{ + type_register_static(&mac_dbdma_type_info); +} + +type_init(mac_dbdma_register_types) diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c new file mode 100644 index 000000000..c1fad43f6 --- /dev/null +++ b/hw/misc/macio/macio.c @@ -0,0 +1,504 @@ +/* + * PowerMac MacIO device emulation + * + * Copyright (c) 2005-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/ppc/mac.h" +#include "hw/misc/macio/cuda.h" +#include "hw/pci/pci.h" +#include "hw/ppc/mac_dbdma.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/char/escc.h" +#include "hw/misc/macio/macio.h" +#include "hw/intc/heathrow_pic.h" +#include "trace.h" + +/* Note: this code is strongly inspirated from the corresponding code + * in PearPC */ + +/* + * The mac-io has two interfaces to the ESCC. One is called "escc-legacy", + * while the other one is the normal, current ESCC interface. + * + * The magic below creates memory aliases to spawn the escc-legacy device + * purely by rerouting the respective registers to our escc region. This + * works because the only difference between the two memory regions is the + * register layout, not their semantics. + * + * Reference: ftp://ftp.software.ibm.com/rs6000/technology/spec/chrp/inwork/CHRP_IORef_1.0.pdf + */ +static void macio_escc_legacy_setup(MacIOState *s) +{ + ESCCState *escc = ESCC(&s->escc); + SysBusDevice *sbd = SYS_BUS_DEVICE(escc); + MemoryRegion *escc_legacy = g_new(MemoryRegion, 1); + MemoryRegion *bar = &s->bar; + int i; + static const int maps[] = { + 0x00, 0x00, /* Command B */ + 0x02, 0x20, /* Command A */ + 0x04, 0x10, /* Data B */ + 0x06, 0x30, /* Data A */ + 0x08, 0x40, /* Enhancement B */ + 0x0A, 0x50, /* Enhancement A */ + 0x80, 0x80, /* Recovery count */ + 0x90, 0x90, /* Start A */ + 0xa0, 0xa0, /* Start B */ + 0xb0, 0xb0, /* Detect AB */ + }; + + memory_region_init(escc_legacy, OBJECT(s), "escc-legacy", 256); + for (i = 0; i < ARRAY_SIZE(maps); i += 2) { + MemoryRegion *port = g_new(MemoryRegion, 1); + memory_region_init_alias(port, OBJECT(s), "escc-legacy-port", + sysbus_mmio_get_region(sbd, 0), + maps[i + 1], 0x2); + memory_region_add_subregion(escc_legacy, maps[i], port); + } + + memory_region_add_subregion(bar, 0x12000, escc_legacy); +} + +static void macio_bar_setup(MacIOState *s) +{ + ESCCState *escc = ESCC(&s->escc); + SysBusDevice *sbd = SYS_BUS_DEVICE(escc); + MemoryRegion *bar = &s->bar; + + memory_region_add_subregion(bar, 0x13000, sysbus_mmio_get_region(sbd, 0)); + macio_escc_legacy_setup(s); +} + +static void macio_common_realize(PCIDevice *d, Error **errp) +{ + MacIOState *s = MACIO(d); + SysBusDevice *sysbus_dev; + + if (!qdev_realize(DEVICE(&s->dbdma), BUS(&s->macio_bus), errp)) { + return; + } + sysbus_dev = SYS_BUS_DEVICE(&s->dbdma); + memory_region_add_subregion(&s->bar, 0x08000, + sysbus_mmio_get_region(sysbus_dev, 0)); + + qdev_prop_set_uint32(DEVICE(&s->escc), "disabled", 0); + qdev_prop_set_uint32(DEVICE(&s->escc), "frequency", ESCC_CLOCK); + qdev_prop_set_uint32(DEVICE(&s->escc), "it_shift", 4); + qdev_prop_set_uint32(DEVICE(&s->escc), "chnBtype", escc_serial); + qdev_prop_set_uint32(DEVICE(&s->escc), "chnAtype", escc_serial); + if (!qdev_realize(DEVICE(&s->escc), BUS(&s->macio_bus), errp)) { + return; + } + + macio_bar_setup(s); + pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar); +} + +static void macio_realize_ide(MacIOState *s, MACIOIDEState *ide, + qemu_irq irq0, qemu_irq irq1, int dmaid, + Error **errp) +{ + SysBusDevice *sysbus_dev; + + sysbus_dev = SYS_BUS_DEVICE(ide); + sysbus_connect_irq(sysbus_dev, 0, irq0); + sysbus_connect_irq(sysbus_dev, 1, irq1); + qdev_prop_set_uint32(DEVICE(ide), "channel", dmaid); + object_property_set_link(OBJECT(ide), "dbdma", OBJECT(&s->dbdma), + &error_abort); + macio_ide_register_dma(ide); + + qdev_realize(DEVICE(ide), BUS(&s->macio_bus), errp); +} + +static void macio_oldworld_realize(PCIDevice *d, Error **errp) +{ + MacIOState *s = MACIO(d); + OldWorldMacIOState *os = OLDWORLD_MACIO(d); + DeviceState *pic_dev = DEVICE(&os->pic); + Error *err = NULL; + SysBusDevice *sysbus_dev; + + macio_common_realize(d, &err); + if (err) { + error_propagate(errp, err); + return; + } + + /* Heathrow PIC */ + if (!qdev_realize(DEVICE(&os->pic), BUS(&s->macio_bus), errp)) { + return; + } + sysbus_dev = SYS_BUS_DEVICE(&os->pic); + memory_region_add_subregion(&s->bar, 0x0, + sysbus_mmio_get_region(sysbus_dev, 0)); + + qdev_prop_set_uint64(DEVICE(&s->cuda), "timebase-frequency", + s->frequency); + if (!qdev_realize(DEVICE(&s->cuda), BUS(&s->macio_bus), errp)) { + return; + } + sysbus_dev = SYS_BUS_DEVICE(&s->cuda); + memory_region_add_subregion(&s->bar, 0x16000, + sysbus_mmio_get_region(sysbus_dev, 0)); + sysbus_connect_irq(sysbus_dev, 0, qdev_get_gpio_in(pic_dev, + OLDWORLD_CUDA_IRQ)); + + sysbus_dev = SYS_BUS_DEVICE(&s->escc); + sysbus_connect_irq(sysbus_dev, 0, qdev_get_gpio_in(pic_dev, + OLDWORLD_ESCCB_IRQ)); + sysbus_connect_irq(sysbus_dev, 1, qdev_get_gpio_in(pic_dev, + OLDWORLD_ESCCA_IRQ)); + + if (!qdev_realize(DEVICE(&os->nvram), BUS(&s->macio_bus), errp)) { + return; + } + sysbus_dev = SYS_BUS_DEVICE(&os->nvram); + memory_region_add_subregion(&s->bar, 0x60000, + sysbus_mmio_get_region(sysbus_dev, 0)); + pmac_format_nvram_partition(&os->nvram, os->nvram.size); + + /* IDE buses */ + macio_realize_ide(s, &os->ide[0], + qdev_get_gpio_in(pic_dev, OLDWORLD_IDE0_IRQ), + qdev_get_gpio_in(pic_dev, OLDWORLD_IDE0_DMA_IRQ), + 0x16, &err); + if (err) { + error_propagate(errp, err); + return; + } + + macio_realize_ide(s, &os->ide[1], + qdev_get_gpio_in(pic_dev, OLDWORLD_IDE1_IRQ), + qdev_get_gpio_in(pic_dev, OLDWORLD_IDE1_DMA_IRQ), + 0x1a, &err); + if (err) { + error_propagate(errp, err); + return; + } +} + +static void macio_init_ide(MacIOState *s, MACIOIDEState *ide, int index) +{ + gchar *name = g_strdup_printf("ide[%i]", index); + uint32_t addr = 0x1f000 + ((index + 1) * 0x1000); + + object_initialize_child(OBJECT(s), name, ide, TYPE_MACIO_IDE); + qdev_prop_set_uint32(DEVICE(ide), "addr", addr); + memory_region_add_subregion(&s->bar, addr, &ide->mem); + g_free(name); +} + +static void macio_oldworld_init(Object *obj) +{ + MacIOState *s = MACIO(obj); + OldWorldMacIOState *os = OLDWORLD_MACIO(obj); + DeviceState *dev; + int i; + + object_initialize_child(OBJECT(s), "pic", &os->pic, TYPE_HEATHROW); + + object_initialize_child(OBJECT(s), "cuda", &s->cuda, TYPE_CUDA); + + object_initialize_child(OBJECT(s), "nvram", &os->nvram, TYPE_MACIO_NVRAM); + dev = DEVICE(&os->nvram); + qdev_prop_set_uint32(dev, "size", 0x2000); + qdev_prop_set_uint32(dev, "it_shift", 4); + + for (i = 0; i < 2; i++) { + macio_init_ide(s, &os->ide[i], i); + } +} + +static void timer_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + trace_macio_timer_write(addr, size, value); +} + +static uint64_t timer_read(void *opaque, hwaddr addr, unsigned size) +{ + uint32_t value = 0; + uint64_t systime = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint64_t kltime; + + kltime = muldiv64(systime, 4194300, NANOSECONDS_PER_SECOND * 4); + kltime = muldiv64(kltime, 18432000, 1048575); + + switch (addr) { + case 0x38: + value = kltime; + break; + case 0x3c: + value = kltime >> 32; + break; + } + + trace_macio_timer_read(addr, size, value); + return value; +} + +static const MemoryRegionOps timer_ops = { + .read = timer_read, + .write = timer_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void macio_newworld_realize(PCIDevice *d, Error **errp) +{ + MacIOState *s = MACIO(d); + NewWorldMacIOState *ns = NEWWORLD_MACIO(d); + DeviceState *pic_dev = DEVICE(&ns->pic); + Error *err = NULL; + SysBusDevice *sysbus_dev; + MemoryRegion *timer_memory = NULL; + + macio_common_realize(d, &err); + if (err) { + error_propagate(errp, err); + return; + } + + /* OpenPIC */ + qdev_prop_set_uint32(pic_dev, "model", OPENPIC_MODEL_KEYLARGO); + sysbus_dev = SYS_BUS_DEVICE(&ns->pic); + sysbus_realize_and_unref(sysbus_dev, &error_fatal); + memory_region_add_subregion(&s->bar, 0x40000, + sysbus_mmio_get_region(sysbus_dev, 0)); + + sysbus_dev = SYS_BUS_DEVICE(&s->escc); + sysbus_connect_irq(sysbus_dev, 0, qdev_get_gpio_in(pic_dev, + NEWWORLD_ESCCB_IRQ)); + sysbus_connect_irq(sysbus_dev, 1, qdev_get_gpio_in(pic_dev, + NEWWORLD_ESCCA_IRQ)); + + /* IDE buses */ + macio_realize_ide(s, &ns->ide[0], + qdev_get_gpio_in(pic_dev, NEWWORLD_IDE0_IRQ), + qdev_get_gpio_in(pic_dev, NEWWORLD_IDE0_DMA_IRQ), + 0x16, &err); + if (err) { + error_propagate(errp, err); + return; + } + + macio_realize_ide(s, &ns->ide[1], + qdev_get_gpio_in(pic_dev, NEWWORLD_IDE1_IRQ), + qdev_get_gpio_in(pic_dev, NEWWORLD_IDE1_DMA_IRQ), + 0x1a, &err); + if (err) { + error_propagate(errp, err); + return; + } + + /* Timer */ + timer_memory = g_new(MemoryRegion, 1); + memory_region_init_io(timer_memory, OBJECT(s), &timer_ops, NULL, "timer", + 0x1000); + memory_region_add_subregion(&s->bar, 0x15000, timer_memory); + + if (ns->has_pmu) { + /* GPIOs */ + if (!qdev_realize(DEVICE(&ns->gpio), BUS(&s->macio_bus), errp)) { + return; + } + sysbus_dev = SYS_BUS_DEVICE(&ns->gpio); + sysbus_connect_irq(sysbus_dev, 1, qdev_get_gpio_in(pic_dev, + NEWWORLD_EXTING_GPIO1)); + sysbus_connect_irq(sysbus_dev, 9, qdev_get_gpio_in(pic_dev, + NEWWORLD_EXTING_GPIO9)); + memory_region_add_subregion(&s->bar, 0x50, + sysbus_mmio_get_region(sysbus_dev, 0)); + + /* PMU */ + object_initialize_child(OBJECT(s), "pmu", &s->pmu, TYPE_VIA_PMU); + object_property_set_link(OBJECT(&s->pmu), "gpio", OBJECT(sysbus_dev), + &error_abort); + qdev_prop_set_bit(DEVICE(&s->pmu), "has-adb", ns->has_adb); + if (!qdev_realize(DEVICE(&s->pmu), BUS(&s->macio_bus), errp)) { + return; + } + sysbus_dev = SYS_BUS_DEVICE(&s->pmu); + sysbus_connect_irq(sysbus_dev, 0, qdev_get_gpio_in(pic_dev, + NEWWORLD_PMU_IRQ)); + memory_region_add_subregion(&s->bar, 0x16000, + sysbus_mmio_get_region(sysbus_dev, 0)); + } else { + object_unparent(OBJECT(&ns->gpio)); + + /* CUDA */ + object_initialize_child(OBJECT(s), "cuda", &s->cuda, TYPE_CUDA); + qdev_prop_set_uint64(DEVICE(&s->cuda), "timebase-frequency", + s->frequency); + + if (!qdev_realize(DEVICE(&s->cuda), BUS(&s->macio_bus), errp)) { + return; + } + sysbus_dev = SYS_BUS_DEVICE(&s->cuda); + sysbus_connect_irq(sysbus_dev, 0, qdev_get_gpio_in(pic_dev, + NEWWORLD_CUDA_IRQ)); + memory_region_add_subregion(&s->bar, 0x16000, + sysbus_mmio_get_region(sysbus_dev, 0)); + } +} + +static void macio_newworld_init(Object *obj) +{ + MacIOState *s = MACIO(obj); + NewWorldMacIOState *ns = NEWWORLD_MACIO(obj); + int i; + + object_initialize_child(OBJECT(s), "pic", &ns->pic, TYPE_OPENPIC); + + object_initialize_child(OBJECT(s), "gpio", &ns->gpio, TYPE_MACIO_GPIO); + + for (i = 0; i < 2; i++) { + macio_init_ide(s, &ns->ide[i], i); + } +} + +static void macio_instance_init(Object *obj) +{ + MacIOState *s = MACIO(obj); + + memory_region_init(&s->bar, obj, "macio", 0x80000); + + qbus_init(&s->macio_bus, sizeof(s->macio_bus), TYPE_MACIO_BUS, + DEVICE(obj), "macio.0"); + + object_initialize_child(OBJECT(s), "dbdma", &s->dbdma, TYPE_MAC_DBDMA); + + object_initialize_child(OBJECT(s), "escc", &s->escc, TYPE_ESCC); +} + +static const VMStateDescription vmstate_macio_oldworld = { + .name = "macio-oldworld", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj.parent, OldWorldMacIOState), + VMSTATE_END_OF_LIST() + } +}; + +static void macio_oldworld_class_init(ObjectClass *oc, void *data) +{ + PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + pdc->realize = macio_oldworld_realize; + pdc->device_id = PCI_DEVICE_ID_APPLE_343S1201; + dc->vmsd = &vmstate_macio_oldworld; +} + +static const VMStateDescription vmstate_macio_newworld = { + .name = "macio-newworld", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj.parent, NewWorldMacIOState), + VMSTATE_END_OF_LIST() + } +}; + +static Property macio_newworld_properties[] = { + DEFINE_PROP_BOOL("has-pmu", NewWorldMacIOState, has_pmu, false), + DEFINE_PROP_BOOL("has-adb", NewWorldMacIOState, has_adb, false), + DEFINE_PROP_END_OF_LIST() +}; + +static void macio_newworld_class_init(ObjectClass *oc, void *data) +{ + PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + pdc->realize = macio_newworld_realize; + pdc->device_id = PCI_DEVICE_ID_APPLE_UNI_N_KEYL; + dc->vmsd = &vmstate_macio_newworld; + device_class_set_props(dc, macio_newworld_properties); +} + +static Property macio_properties[] = { + DEFINE_PROP_UINT64("frequency", MacIOState, frequency, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void macio_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->vendor_id = PCI_VENDOR_ID_APPLE; + k->class_id = PCI_CLASS_OTHERS << 8; + device_class_set_props(dc, macio_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo macio_bus_info = { + .name = TYPE_MACIO_BUS, + .parent = TYPE_SYSTEM_BUS, + .instance_size = sizeof(MacIOBusState), +}; + +static const TypeInfo macio_oldworld_type_info = { + .name = TYPE_OLDWORLD_MACIO, + .parent = TYPE_MACIO, + .instance_size = sizeof(OldWorldMacIOState), + .instance_init = macio_oldworld_init, + .class_init = macio_oldworld_class_init, +}; + +static const TypeInfo macio_newworld_type_info = { + .name = TYPE_NEWWORLD_MACIO, + .parent = TYPE_MACIO, + .instance_size = sizeof(NewWorldMacIOState), + .instance_init = macio_newworld_init, + .class_init = macio_newworld_class_init, +}; + +static const TypeInfo macio_type_info = { + .name = TYPE_MACIO, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(MacIOState), + .instance_init = macio_instance_init, + .abstract = true, + .class_init = macio_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void macio_register_types(void) +{ + type_register_static(&macio_bus_info); + type_register_static(&macio_type_info); + type_register_static(&macio_oldworld_type_info); + type_register_static(&macio_newworld_type_info); +} + +type_init(macio_register_types) diff --git a/hw/misc/macio/meson.build b/hw/misc/macio/meson.build new file mode 100644 index 000000000..17282da20 --- /dev/null +++ b/hw/misc/macio/meson.build @@ -0,0 +1,8 @@ +macio_ss = ss.source_set() +macio_ss.add(files('macio.c')) +macio_ss.add(when: 'CONFIG_CUDA', if_true: files('cuda.c')) +macio_ss.add(when: 'CONFIG_MACIO_GPIO', if_true: files('gpio.c')) +macio_ss.add(when: 'CONFIG_MAC_DBDMA', if_true: files('mac_dbdma.c')) +macio_ss.add(when: 'CONFIG_MAC_PMU', if_true: files('pmu.c')) + +softmmu_ss.add_all(when: 'CONFIG_MACIO', if_true: macio_ss) diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c new file mode 100644 index 000000000..eb39c6469 --- /dev/null +++ b/hw/misc/macio/pmu.c @@ -0,0 +1,871 @@ +/* + * QEMU PowerMac PMU device support + * + * Copyright (c) 2016 Benjamin Herrenschmidt, IBM Corp. + * Copyright (c) 2018 Mark Cave-Ayland + * + * Based on the CUDA device by: + * + * Copyright (c) 2004-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/ppc/mac.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/input/adb.h" +#include "hw/irq.h" +#include "hw/misc/mos6522.h" +#include "hw/misc/macio/gpio.h" +#include "hw/misc/macio/pmu.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "sysemu/runstate.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + + +/* Bits in B data register: all active low */ +#define TACK 0x08 /* Transfer request (input) */ +#define TREQ 0x10 /* Transfer acknowledge (output) */ + +/* PMU returns time_t's offset from Jan 1, 1904, not 1970 */ +#define RTC_OFFSET 2082844800 + +#define VIA_TIMER_FREQ (4700000 / 6) + +static void via_update_irq(PMUState *s) +{ + MOS6522PMUState *mps = MOS6522_PMU(&s->mos6522_pmu); + MOS6522State *ms = MOS6522(mps); + + bool new_state = !!(ms->ifr & ms->ier & (SR_INT | T1_INT | T2_INT)); + + if (new_state != s->via_irq_state) { + s->via_irq_state = new_state; + qemu_set_irq(s->via_irq, new_state); + } +} + +static void via_set_sr_int(void *opaque) +{ + PMUState *s = opaque; + MOS6522PMUState *mps = MOS6522_PMU(&s->mos6522_pmu); + MOS6522State *ms = MOS6522(mps); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); + + mdc->set_sr_int(ms); +} + +static void pmu_update_extirq(PMUState *s) +{ + if ((s->intbits & s->intmask) != 0) { + macio_set_gpio(s->gpio, 1, false); + } else { + macio_set_gpio(s->gpio, 1, true); + } +} + +static void pmu_adb_poll(void *opaque) +{ + PMUState *s = opaque; + ADBBusState *adb_bus = &s->adb_bus; + int olen; + + if (!(s->intbits & PMU_INT_ADB)) { + olen = adb_poll(adb_bus, s->adb_reply, adb_bus->autopoll_mask); + trace_pmu_adb_poll(olen); + + if (olen > 0) { + s->adb_reply_size = olen; + s->intbits |= PMU_INT_ADB | PMU_INT_ADB_AUTO; + pmu_update_extirq(s); + } + } +} + +static void pmu_one_sec_timer(void *opaque) +{ + PMUState *s = opaque; + + trace_pmu_one_sec_timer(); + + s->intbits |= PMU_INT_TICK; + pmu_update_extirq(s); + s->one_sec_target += 1000; + + timer_mod(s->one_sec_timer, s->one_sec_target); +} + +static void pmu_cmd_int_ack(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + if (in_len != 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: INT_ACK command, invalid len: %d want: 0\n", + in_len); + return; + } + + /* Make appropriate reply packet */ + if (s->intbits & PMU_INT_ADB) { + if (!s->adb_reply_size) { + qemu_log_mask(LOG_GUEST_ERROR, + "Odd, PMU_INT_ADB set with no reply in buffer\n"); + } + + memcpy(out_data + 1, s->adb_reply, s->adb_reply_size); + out_data[0] = s->intbits & (PMU_INT_ADB | PMU_INT_ADB_AUTO); + *out_len = s->adb_reply_size + 1; + s->intbits &= ~(PMU_INT_ADB | PMU_INT_ADB_AUTO); + s->adb_reply_size = 0; + } else { + out_data[0] = s->intbits; + s->intbits = 0; + *out_len = 1; + } + + pmu_update_extirq(s); +} + +static void pmu_cmd_set_int_mask(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + if (in_len != 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: SET_INT_MASK command, invalid len: %d want: 1\n", + in_len); + return; + } + + trace_pmu_cmd_set_int_mask(s->intmask); + s->intmask = in_data[0]; + + pmu_update_extirq(s); +} + +static void pmu_cmd_set_adb_autopoll(PMUState *s, uint16_t mask) +{ + ADBBusState *adb_bus = &s->adb_bus; + + trace_pmu_cmd_set_adb_autopoll(mask); + + if (mask) { + adb_set_autopoll_mask(adb_bus, mask); + adb_set_autopoll_enabled(adb_bus, true); + } else { + adb_set_autopoll_enabled(adb_bus, false); + } +} + +static void pmu_cmd_adb(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + int len, adblen; + uint8_t adb_cmd[255]; + + if (in_len < 2) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: ADB PACKET, invalid len: %d want at least 2\n", + in_len); + return; + } + + *out_len = 0; + + if (!s->has_adb) { + trace_pmu_cmd_adb_nobus(); + return; + } + + /* Set autopoll is a special form of the command */ + if (in_data[0] == 0 && in_data[1] == 0x86) { + uint16_t mask = in_data[2]; + mask = (mask << 8) | in_data[3]; + if (in_len != 4) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: ADB Autopoll requires 4 bytes, got %d\n", + in_len); + return; + } + + pmu_cmd_set_adb_autopoll(s, mask); + return; + } + + trace_pmu_cmd_adb_request(in_len, in_data[0], in_data[1], in_data[2], + in_data[3], in_data[4]); + + *out_len = 0; + + /* Check ADB len */ + adblen = in_data[2]; + if (adblen > (in_len - 3)) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: ADB len is %d > %d (in_len -3)...erroring\n", + adblen, in_len - 3); + len = -1; + } else if (adblen > 252) { + qemu_log_mask(LOG_GUEST_ERROR, "PMU: ADB command too big!\n"); + len = -1; + } else { + /* Format command */ + adb_cmd[0] = in_data[0]; + memcpy(&adb_cmd[1], &in_data[3], in_len - 3); + len = adb_request(&s->adb_bus, s->adb_reply + 2, adb_cmd, in_len - 2); + + trace_pmu_cmd_adb_reply(len); + } + + if (len > 0) { + /* XXX Check this */ + s->adb_reply_size = len + 2; + s->adb_reply[0] = 0x01; + s->adb_reply[1] = len; + } else { + /* XXX Check this */ + s->adb_reply_size = 1; + s->adb_reply[0] = 0x00; + } + + s->intbits |= PMU_INT_ADB; + pmu_update_extirq(s); +} + +static void pmu_cmd_adb_poll_off(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + ADBBusState *adb_bus = &s->adb_bus; + + if (in_len != 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: ADB POLL OFF command, invalid len: %d want: 0\n", + in_len); + return; + } + + if (s->has_adb) { + adb_set_autopoll_enabled(adb_bus, false); + } +} + +static void pmu_cmd_shutdown(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + if (in_len != 4) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: SHUTDOWN command, invalid len: %d want: 4\n", + in_len); + return; + } + + *out_len = 1; + out_data[0] = 0; + + if (in_data[0] != 'M' || in_data[1] != 'A' || in_data[2] != 'T' || + in_data[3] != 'T') { + + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: SHUTDOWN command, Bad MATT signature\n"); + return; + } + + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); +} + +static void pmu_cmd_reset(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + if (in_len != 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: RESET command, invalid len: %d want: 0\n", + in_len); + return; + } + + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); +} + +static void pmu_cmd_get_rtc(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + uint32_t ti; + + if (in_len != 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: GET_RTC command, invalid len: %d want: 0\n", + in_len); + return; + } + + ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + / NANOSECONDS_PER_SECOND); + out_data[0] = ti >> 24; + out_data[1] = ti >> 16; + out_data[2] = ti >> 8; + out_data[3] = ti; + *out_len = 4; +} + +static void pmu_cmd_set_rtc(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + uint32_t ti; + + if (in_len != 4) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: SET_RTC command, invalid len: %d want: 4\n", + in_len); + return; + } + + ti = (((uint32_t)in_data[0]) << 24) + (((uint32_t)in_data[1]) << 16) + + (((uint32_t)in_data[2]) << 8) + in_data[3]; + + s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + / NANOSECONDS_PER_SECOND); +} + +static void pmu_cmd_system_ready(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + /* Do nothing */ +} + +static void pmu_cmd_get_version(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + *out_len = 1; + *out_data = 1; /* ??? Check what Apple does */ +} + +static void pmu_cmd_power_events(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + if (in_len < 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: POWER EVENTS command, invalid len %d, want at least 1\n", + in_len); + return; + } + + switch (in_data[0]) { + /* Dummies for now */ + case PMU_PWR_GET_POWERUP_EVENTS: + *out_len = 2; + out_data[0] = 0; + out_data[1] = 0; + break; + case PMU_PWR_SET_POWERUP_EVENTS: + case PMU_PWR_CLR_POWERUP_EVENTS: + break; + case PMU_PWR_GET_WAKEUP_EVENTS: + *out_len = 2; + out_data[0] = 0; + out_data[1] = 0; + break; + case PMU_PWR_SET_WAKEUP_EVENTS: + case PMU_PWR_CLR_WAKEUP_EVENTS: + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: POWER EVENTS unknown subcommand 0x%02x\n", + in_data[0]); + } +} + +static void pmu_cmd_get_cover(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + /* Not 100% sure here, will have to check what a real Mac + * returns other than byte 0 bit 0 is LID closed on laptops + */ + *out_len = 1; + *out_data = 0x00; +} + +static void pmu_cmd_download_status(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + /* This has to do with PMU firmware updates as far as I can tell. + * + * We return 0x62 which is what OpenPMU expects + */ + *out_len = 1; + *out_data = 0x62; +} + +static void pmu_cmd_read_pmu_ram(PMUState *s, + const uint8_t *in_data, uint8_t in_len, + uint8_t *out_data, uint8_t *out_len) +{ + if (in_len < 3) { + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: READ_PMU_RAM command, invalid len %d, expected 3\n", + in_len); + return; + } + + qemu_log_mask(LOG_GUEST_ERROR, + "PMU: Unsupported READ_PMU_RAM, args: %02x %02x %02x\n", + in_data[0], in_data[1], in_data[2]); + + *out_len = 0; +} + +/* description of commands */ +typedef struct PMUCmdHandler { + uint8_t command; + const char *name; + void (*handler)(PMUState *s, + const uint8_t *in_args, uint8_t in_len, + uint8_t *out_args, uint8_t *out_len); +} PMUCmdHandler; + +static const PMUCmdHandler PMUCmdHandlers[] = { + { PMU_INT_ACK, "INT ACK", pmu_cmd_int_ack }, + { PMU_SET_INTR_MASK, "SET INT MASK", pmu_cmd_set_int_mask }, + { PMU_ADB_CMD, "ADB COMMAND", pmu_cmd_adb }, + { PMU_ADB_POLL_OFF, "ADB POLL OFF", pmu_cmd_adb_poll_off }, + { PMU_RESET, "REBOOT", pmu_cmd_reset }, + { PMU_SHUTDOWN, "SHUTDOWN", pmu_cmd_shutdown }, + { PMU_READ_RTC, "GET RTC", pmu_cmd_get_rtc }, + { PMU_SET_RTC, "SET RTC", pmu_cmd_set_rtc }, + { PMU_SYSTEM_READY, "SYSTEM READY", pmu_cmd_system_ready }, + { PMU_GET_VERSION, "GET VERSION", pmu_cmd_get_version }, + { PMU_POWER_EVENTS, "POWER EVENTS", pmu_cmd_power_events }, + { PMU_GET_COVER, "GET_COVER", pmu_cmd_get_cover }, + { PMU_DOWNLOAD_STATUS, "DOWNLOAD STATUS", pmu_cmd_download_status }, + { PMU_READ_PMU_RAM, "READ PMGR RAM", pmu_cmd_read_pmu_ram }, +}; + +static void pmu_dispatch_cmd(PMUState *s) +{ + unsigned int i; + + /* No response by default */ + s->cmd_rsp_sz = 0; + + for (i = 0; i < ARRAY_SIZE(PMUCmdHandlers); i++) { + const PMUCmdHandler *desc = &PMUCmdHandlers[i]; + + if (desc->command != s->cmd) { + continue; + } + + trace_pmu_dispatch_cmd(desc->name); + desc->handler(s, s->cmd_buf, s->cmd_buf_pos, + s->cmd_rsp, &s->cmd_rsp_sz); + + if (s->rsplen != -1 && s->rsplen != s->cmd_rsp_sz) { + trace_pmu_debug_protocol_string("QEMU internal cmd resp mismatch!"); + } else { + trace_pmu_debug_protocol_resp_size(s->cmd_rsp_sz); + } + + return; + } + + trace_pmu_dispatch_unknown_cmd(s->cmd); + + /* Manufacture fake response with 0's */ + if (s->rsplen == -1) { + s->cmd_rsp_sz = 0; + } else { + s->cmd_rsp_sz = s->rsplen; + memset(s->cmd_rsp, 0, s->rsplen); + } +} + +static void pmu_update(PMUState *s) +{ + MOS6522PMUState *mps = &s->mos6522_pmu; + MOS6522State *ms = MOS6522(mps); + ADBBusState *adb_bus = &s->adb_bus; + + /* Only react to changes in reg B */ + if (ms->b == s->last_b) { + return; + } + s->last_b = ms->b; + + /* Check the TREQ / TACK state */ + switch (ms->b & (TREQ | TACK)) { + case TREQ: + /* This is an ack release, handle it and bail out */ + ms->b |= TACK; + s->last_b = ms->b; + + trace_pmu_debug_protocol_string("handshake: TREQ high, setting TACK"); + return; + case TACK: + /* This is a valid request, handle below */ + break; + case TREQ | TACK: + /* This is an idle state */ + return; + default: + /* Invalid state, log and ignore */ + trace_pmu_debug_protocol_error(ms->b); + return; + } + + /* If we wanted to handle commands asynchronously, this is where + * we would delay the clearing of TACK until we are ready to send + * the response + */ + + /* We have a request, handshake TACK so we don't stay in + * an invalid state. If we were concurrent with the OS we + * should only do this after we grabbed the SR but that isn't + * a problem here. + */ + + trace_pmu_debug_protocol_clear_treq(s->cmd_state); + + ms->b &= ~TACK; + s->last_b = ms->b; + + /* Act according to state */ + switch (s->cmd_state) { + case pmu_state_idle: + if (!(ms->acr & SR_OUT)) { + trace_pmu_debug_protocol_string("protocol error! " + "state idle, ACR reading"); + break; + } + + s->cmd = ms->sr; + via_set_sr_int(s); + s->cmdlen = pmu_data_len[s->cmd][0]; + s->rsplen = pmu_data_len[s->cmd][1]; + s->cmd_buf_pos = 0; + s->cmd_rsp_pos = 0; + s->cmd_state = pmu_state_cmd; + + adb_autopoll_block(adb_bus); + trace_pmu_debug_protocol_cmd(s->cmd, s->cmdlen, s->rsplen); + break; + + case pmu_state_cmd: + if (!(ms->acr & SR_OUT)) { + trace_pmu_debug_protocol_string("protocol error! " + "state cmd, ACR reading"); + break; + } + + if (s->cmdlen == -1) { + trace_pmu_debug_protocol_cmdlen(ms->sr); + + s->cmdlen = ms->sr; + if (s->cmdlen > sizeof(s->cmd_buf)) { + trace_pmu_debug_protocol_cmd_toobig(s->cmdlen); + } + } else if (s->cmd_buf_pos < sizeof(s->cmd_buf)) { + s->cmd_buf[s->cmd_buf_pos++] = ms->sr; + } + + via_set_sr_int(s); + break; + + case pmu_state_rsp: + if (ms->acr & SR_OUT) { + trace_pmu_debug_protocol_string("protocol error! " + "state resp, ACR writing"); + break; + } + + if (s->rsplen == -1) { + trace_pmu_debug_protocol_cmd_send_resp_size(s->cmd_rsp_sz); + + ms->sr = s->cmd_rsp_sz; + s->rsplen = s->cmd_rsp_sz; + } else if (s->cmd_rsp_pos < s->cmd_rsp_sz) { + trace_pmu_debug_protocol_cmd_send_resp(s->cmd_rsp_pos, s->rsplen); + + ms->sr = s->cmd_rsp[s->cmd_rsp_pos++]; + } + + via_set_sr_int(s); + break; + } + + /* Check for state completion */ + if (s->cmd_state == pmu_state_cmd && s->cmdlen == s->cmd_buf_pos) { + trace_pmu_debug_protocol_string("Command reception complete, " + "dispatching..."); + + pmu_dispatch_cmd(s); + s->cmd_state = pmu_state_rsp; + } + + if (s->cmd_state == pmu_state_rsp && s->rsplen == s->cmd_rsp_pos) { + trace_pmu_debug_protocol_cmd_resp_complete(ms->ier); + + adb_autopoll_unblock(adb_bus); + s->cmd_state = pmu_state_idle; + } +} + +static uint64_t mos6522_pmu_read(void *opaque, hwaddr addr, unsigned size) +{ + PMUState *s = opaque; + MOS6522PMUState *mps = &s->mos6522_pmu; + MOS6522State *ms = MOS6522(mps); + + addr = (addr >> 9) & 0xf; + return mos6522_read(ms, addr, size); +} + +static void mos6522_pmu_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PMUState *s = opaque; + MOS6522PMUState *mps = &s->mos6522_pmu; + MOS6522State *ms = MOS6522(mps); + + addr = (addr >> 9) & 0xf; + mos6522_write(ms, addr, val, size); +} + +static const MemoryRegionOps mos6522_pmu_ops = { + .read = mos6522_pmu_read, + .write = mos6522_pmu_write, + .endianness = DEVICE_BIG_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static bool pmu_adb_state_needed(void *opaque) +{ + PMUState *s = opaque; + + return s->has_adb; +} + +static const VMStateDescription vmstate_pmu_adb = { + .name = "pmu/adb", + .version_id = 1, + .minimum_version_id = 1, + .needed = pmu_adb_state_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(adb_reply_size, PMUState), + VMSTATE_BUFFER(adb_reply, PMUState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pmu = { + .name = "pmu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(mos6522_pmu.parent_obj, PMUState, 0, vmstate_mos6522, + MOS6522State), + VMSTATE_UINT8(last_b, PMUState), + VMSTATE_UINT8(cmd, PMUState), + VMSTATE_UINT32(cmdlen, PMUState), + VMSTATE_UINT32(rsplen, PMUState), + VMSTATE_UINT8(cmd_buf_pos, PMUState), + VMSTATE_BUFFER(cmd_buf, PMUState), + VMSTATE_UINT8(cmd_rsp_pos, PMUState), + VMSTATE_UINT8(cmd_rsp_sz, PMUState), + VMSTATE_BUFFER(cmd_rsp, PMUState), + VMSTATE_UINT8(intbits, PMUState), + VMSTATE_UINT8(intmask, PMUState), + VMSTATE_UINT32(tick_offset, PMUState), + VMSTATE_TIMER_PTR(one_sec_timer, PMUState), + VMSTATE_INT64(one_sec_target, PMUState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_pmu_adb, + NULL + } +}; + +static void pmu_reset(DeviceState *dev) +{ + PMUState *s = VIA_PMU(dev); + + /* OpenBIOS needs to do this? MacOS 9 needs it */ + s->intmask = PMU_INT_ADB | PMU_INT_TICK; + s->intbits = 0; + + s->cmd_state = pmu_state_idle; +} + +static void pmu_realize(DeviceState *dev, Error **errp) +{ + PMUState *s = VIA_PMU(dev); + SysBusDevice *sbd; + ADBBusState *adb_bus = &s->adb_bus; + struct tm tm; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->mos6522_pmu), errp)) { + return; + } + + /* Pass IRQ from 6522 */ + sbd = SYS_BUS_DEVICE(s); + sysbus_pass_irq(sbd, SYS_BUS_DEVICE(&s->mos6522_pmu)); + + qemu_get_timedate(&tm, 0); + s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET; + s->one_sec_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, pmu_one_sec_timer, s); + s->one_sec_target = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000; + timer_mod(s->one_sec_timer, s->one_sec_target); + + if (s->has_adb) { + qbus_init(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, + dev, "adb.0"); + adb_register_autopoll_callback(adb_bus, pmu_adb_poll, s); + } +} + +static void pmu_init(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + PMUState *s = VIA_PMU(obj); + + object_property_add_link(obj, "gpio", TYPE_MACIO_GPIO, + (Object **) &s->gpio, + qdev_prop_allow_set_link_before_realize, + 0); + + object_initialize_child(obj, "mos6522-pmu", &s->mos6522_pmu, + TYPE_MOS6522_PMU); + + memory_region_init_io(&s->mem, obj, &mos6522_pmu_ops, s, "via-pmu", + 0x2000); + sysbus_init_mmio(d, &s->mem); +} + +static Property pmu_properties[] = { + DEFINE_PROP_BOOL("has-adb", PMUState, has_adb, true), + DEFINE_PROP_END_OF_LIST() +}; + +static void pmu_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = pmu_realize; + dc->reset = pmu_reset; + dc->vmsd = &vmstate_pmu; + device_class_set_props(dc, pmu_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo pmu_type_info = { + .name = TYPE_VIA_PMU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PMUState), + .instance_init = pmu_init, + .class_init = pmu_class_init, +}; + +static void mos6522_pmu_portB_write(MOS6522State *s) +{ + MOS6522PMUState *mps = container_of(s, MOS6522PMUState, parent_obj); + PMUState *ps = container_of(mps, PMUState, mos6522_pmu); + + if ((s->pcr & 0xe0) == 0x20 || (s->pcr & 0xe0) == 0x60) { + s->ifr &= ~CB2_INT; + } + s->ifr &= ~CB1_INT; + + via_update_irq(ps); + pmu_update(ps); +} + +static void mos6522_pmu_portA_write(MOS6522State *s) +{ + MOS6522PMUState *mps = container_of(s, MOS6522PMUState, parent_obj); + PMUState *ps = container_of(mps, PMUState, mos6522_pmu); + + if ((s->pcr & 0x0e) == 0x02 || (s->pcr & 0x0e) == 0x06) { + s->ifr &= ~CA2_INT; + } + s->ifr &= ~CA1_INT; + + via_update_irq(ps); +} + +static void mos6522_pmu_reset(DeviceState *dev) +{ + MOS6522State *ms = MOS6522(dev); + MOS6522PMUState *mps = container_of(ms, MOS6522PMUState, parent_obj); + PMUState *s = container_of(mps, PMUState, mos6522_pmu); + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms); + + mdc->parent_reset(dev); + + ms->timers[0].frequency = VIA_TIMER_FREQ; + ms->timers[1].frequency = (SCALE_US * 6000) / 4700; + + s->last_b = ms->b = TACK | TREQ; +} + +static void mos6522_pmu_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); + + dc->reset = mos6522_pmu_reset; + mdc->portB_write = mos6522_pmu_portB_write; + mdc->portA_write = mos6522_pmu_portA_write; +} + +static const TypeInfo mos6522_pmu_type_info = { + .name = TYPE_MOS6522_PMU, + .parent = TYPE_MOS6522, + .instance_size = sizeof(MOS6522PMUState), + .class_init = mos6522_pmu_class_init, +}; + +static void pmu_register_types(void) +{ + type_register_static(&pmu_type_info); + type_register_static(&mos6522_pmu_type_info); +} + +type_init(pmu_register_types) diff --git a/hw/misc/macio/trace-events b/hw/misc/macio/trace-events new file mode 100644 index 000000000..ad4b9d1c0 --- /dev/null +++ b/hw/misc/macio/trace-events @@ -0,0 +1,42 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# cuda.c +cuda_delay_set_sr_int(void) "" +cuda_data_send(uint8_t data) "send: 0x%02x" +cuda_data_recv(uint8_t data) "recv: 0x%02x" +cuda_receive_packet_cmd(const char *cmd) "handling command %s" +cuda_packet_receive(int len) "length %d" +cuda_packet_receive_data(int i, const uint8_t data) "[%d] 0x%02x" +cuda_packet_send(int len) "length %d" +cuda_packet_send_data(int i, const uint8_t data) "[%d] 0x%02x" + +# macio.c +macio_timer_write(uint64_t addr, unsigned len, uint64_t val) "write addr 0x%"PRIx64 " len %d val 0x%"PRIx64 +macio_timer_read(uint64_t addr, unsigned len, uint32_t val) "read addr 0x%"PRIx64 " len %d val 0x%"PRIx32 + +# gpio.c +macio_set_gpio(int gpio, bool state) "setting GPIO %d to %d" +macio_gpio_irq_assert(int gpio) "asserting GPIO %d" +macio_gpio_irq_deassert(int gpio) "deasserting GPIO %d" +macio_gpio_write(uint64_t addr, uint64_t val) "addr: 0x%"PRIx64" value: 0x%"PRIx64 + +# pmu.c +pmu_adb_poll(int olen) "ADB autopoll, olen=%d" +pmu_one_sec_timer(void) "PMU one sec..." +pmu_cmd_set_int_mask(int intmask) "Setting PMU int mask to 0x%02x" +pmu_cmd_set_adb_autopoll(int mask) "ADB set autopoll, mask=0x%04x" +pmu_cmd_adb_nobus(void) "ADB PACKET with no ADB bus!" +pmu_cmd_adb_request(int inlen, int indata0, int indata1, int indata2, int indata3, int indata4) "ADB request: len=%d, cmd=0x%02x, pflags=0x%02x, adblen=%d: 0x%02x 0x%02x..." +pmu_cmd_adb_reply(int len) "ADB reply is %d bytes" +pmu_dispatch_cmd(const char *name) "handling command %s" +pmu_dispatch_unknown_cmd(int cmd) "Unknown PMU command 0x%02x" +pmu_debug_protocol_string(const char *str) "%s" +pmu_debug_protocol_resp_size(int size) "sending %d resp bytes" +pmu_debug_protocol_error(int portB) "protocol error! portB=0x%02x" +pmu_debug_protocol_clear_treq(int state) "TREQ cleared, clearing TACK, state: %d" +pmu_debug_protocol_cmd(int cmd, int cmdlen, int rsplen) "Got command byte 0x%02x, clen=%d, rlen=%d" +pmu_debug_protocol_cmdlen(int len) "got cmd length byte: %d" +pmu_debug_protocol_cmd_toobig(int len) "command too big (%d bytes)" +pmu_debug_protocol_cmd_send_resp_size(int len) "sending length byte: %d" +pmu_debug_protocol_cmd_send_resp(int pos, int len) "sending byte: %d/%d" +pmu_debug_protocol_cmd_resp_complete(int ier) "Response send complete. IER=0x%02x" diff --git a/hw/misc/macio/trace.h b/hw/misc/macio/trace.h new file mode 100644 index 000000000..34a3cf1b4 --- /dev/null +++ b/hw/misc/macio/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_misc_macio.h" diff --git a/hw/misc/mchp_pfsoc_dmc.c b/hw/misc/mchp_pfsoc_dmc.c new file mode 100644 index 000000000..43d8e970a --- /dev/null +++ b/hw/misc/mchp_pfsoc_dmc.c @@ -0,0 +1,215 @@ +/* + * Microchip PolarFire SoC DDR Memory Controller module emulation + * + * Copyright (c) 2020 Wind River Systems, Inc. + * + * Author: + * Bin Meng + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/misc/mchp_pfsoc_dmc.h" + +/* DDR SGMII PHY module */ + +#define SGMII_PHY_IOC_REG1 0x208 +#define SGMII_PHY_TRAINING_STATUS 0x814 +#define SGMII_PHY_DQ_DQS_ERR_DONE 0x834 +#define SGMII_PHY_DQDQS_STATUS1 0x84c +#define SGMII_PHY_PVT_STAT 0xc20 + +static uint64_t mchp_pfsoc_ddr_sgmii_phy_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t val = 0; + static int training_status_bit; + + switch (offset) { + case SGMII_PHY_IOC_REG1: + /* See ddr_pvt_calibration() in HSS */ + val = BIT(4) | BIT(2); + break; + case SGMII_PHY_TRAINING_STATUS: + /* + * The codes logic emulates the training status change from + * DDR_TRAINING_IP_SM_BCLKSCLK to DDR_TRAINING_IP_SM_DQ_DQS. + * + * See ddr_setup() in mss_ddr.c in the HSS source codes. + */ + val = 1 << training_status_bit; + training_status_bit = (training_status_bit + 1) % 5; + break; + case SGMII_PHY_DQ_DQS_ERR_DONE: + /* + * DDR_TRAINING_IP_SM_VERIFY state in ddr_setup(), + * check that DQ/DQS training passed without error. + */ + val = 8; + break; + case SGMII_PHY_DQDQS_STATUS1: + /* + * DDR_TRAINING_IP_SM_VERIFY state in ddr_setup(), + * check that DQ/DQS calculated window is above 5 taps. + */ + val = 0xff; + break; + case SGMII_PHY_PVT_STAT: + /* See sgmii_channel_setup() in HSS */ + val = BIT(14) | BIT(6); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device read " + "(size %d, offset 0x%" HWADDR_PRIx ")\n", + __func__, size, offset); + break; + } + + return val; +} + +static void mchp_pfsoc_ddr_sgmii_phy_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device write " + "(size %d, value 0x%" PRIx64 + ", offset 0x%" HWADDR_PRIx ")\n", + __func__, size, value, offset); +} + +static const MemoryRegionOps mchp_pfsoc_ddr_sgmii_phy_ops = { + .read = mchp_pfsoc_ddr_sgmii_phy_read, + .write = mchp_pfsoc_ddr_sgmii_phy_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void mchp_pfsoc_ddr_sgmii_phy_realize(DeviceState *dev, Error **errp) +{ + MchpPfSoCDdrSgmiiPhyState *s = MCHP_PFSOC_DDR_SGMII_PHY(dev); + + memory_region_init_io(&s->sgmii_phy, OBJECT(dev), + &mchp_pfsoc_ddr_sgmii_phy_ops, s, + "mchp.pfsoc.ddr_sgmii_phy", + MCHP_PFSOC_DDR_SGMII_PHY_REG_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->sgmii_phy); +} + +static void mchp_pfsoc_ddr_sgmii_phy_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Microchip PolarFire SoC DDR SGMII PHY module"; + dc->realize = mchp_pfsoc_ddr_sgmii_phy_realize; +} + +static const TypeInfo mchp_pfsoc_ddr_sgmii_phy_info = { + .name = TYPE_MCHP_PFSOC_DDR_SGMII_PHY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MchpPfSoCDdrSgmiiPhyState), + .class_init = mchp_pfsoc_ddr_sgmii_phy_class_init, +}; + +static void mchp_pfsoc_ddr_sgmii_phy_register_types(void) +{ + type_register_static(&mchp_pfsoc_ddr_sgmii_phy_info); +} + +type_init(mchp_pfsoc_ddr_sgmii_phy_register_types) + +/* DDR CFG module */ + +#define CFG_MT_DONE_ACK 0x4428 +#define CFG_STAT_DFI_INIT_COMPLETE 0x10034 +#define CFG_STAT_DFI_TRAINING_COMPLETE 0x10038 + +static uint64_t mchp_pfsoc_ddr_cfg_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t val = 0; + + switch (offset) { + case CFG_MT_DONE_ACK: + /* memory test in MTC_test() */ + val = BIT(0); + break; + case CFG_STAT_DFI_INIT_COMPLETE: + /* DDR_TRAINING_IP_SM_START_CHECK state in ddr_setup() */ + val = BIT(0); + break; + case CFG_STAT_DFI_TRAINING_COMPLETE: + /* DDR_TRAINING_IP_SM_VERIFY state in ddr_setup() */ + val = BIT(0); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device read " + "(size %d, offset 0x%" HWADDR_PRIx ")\n", + __func__, size, offset); + break; + } + + return val; +} + +static void mchp_pfsoc_ddr_cfg_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device write " + "(size %d, value 0x%" PRIx64 + ", offset 0x%" HWADDR_PRIx ")\n", + __func__, size, value, offset); +} + +static const MemoryRegionOps mchp_pfsoc_ddr_cfg_ops = { + .read = mchp_pfsoc_ddr_cfg_read, + .write = mchp_pfsoc_ddr_cfg_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void mchp_pfsoc_ddr_cfg_realize(DeviceState *dev, Error **errp) +{ + MchpPfSoCDdrCfgState *s = MCHP_PFSOC_DDR_CFG(dev); + + memory_region_init_io(&s->cfg, OBJECT(dev), + &mchp_pfsoc_ddr_cfg_ops, s, + "mchp.pfsoc.ddr_cfg", + MCHP_PFSOC_DDR_CFG_REG_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->cfg); +} + +static void mchp_pfsoc_ddr_cfg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Microchip PolarFire SoC DDR CFG module"; + dc->realize = mchp_pfsoc_ddr_cfg_realize; +} + +static const TypeInfo mchp_pfsoc_ddr_cfg_info = { + .name = TYPE_MCHP_PFSOC_DDR_CFG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MchpPfSoCDdrCfgState), + .class_init = mchp_pfsoc_ddr_cfg_class_init, +}; + +static void mchp_pfsoc_ddr_cfg_register_types(void) +{ + type_register_static(&mchp_pfsoc_ddr_cfg_info); +} + +type_init(mchp_pfsoc_ddr_cfg_register_types) diff --git a/hw/misc/mchp_pfsoc_ioscb.c b/hw/misc/mchp_pfsoc_ioscb.c new file mode 100644 index 000000000..f4fd55a0e --- /dev/null +++ b/hw/misc/mchp_pfsoc_ioscb.c @@ -0,0 +1,241 @@ +/* + * Microchip PolarFire SoC IOSCB module emulation + * + * Copyright (c) 2020 Wind River Systems, Inc. + * + * Author: + * Bin Meng + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/misc/mchp_pfsoc_ioscb.h" + +/* + * The whole IOSCB module registers map into the system address at 0x3000_0000, + * named as "System Port 0 (AXI-D0)". + */ +#define IOSCB_WHOLE_REG_SIZE 0x10000000 +#define IOSCB_SUBMOD_REG_SIZE 0x1000 + +/* + * There are many sub-modules in the IOSCB module. + * See Microchip PolarFire SoC documentation (Register_Map.zip), + * Register Map/PF_SoC_RegMap_V1_1/MPFS250T/mpfs250t_ioscb_memmap_dri.htm + * + * The following are sub-modules offsets that are of concern. + */ +#define IOSCB_LANE01_BASE 0x06500000 +#define IOSCB_LANE23_BASE 0x06510000 +#define IOSCB_CTRL_BASE 0x07020000 +#define IOSCB_CFG_BASE 0x07080000 +#define IOSCB_PLL_MSS_BASE 0x0E001000 +#define IOSCB_CFM_MSS_BASE 0x0E002000 +#define IOSCB_PLL_DDR_BASE 0x0E010000 +#define IOSCB_BC_DDR_BASE 0x0E020000 +#define IOSCB_IO_CALIB_DDR_BASE 0x0E040000 +#define IOSCB_PLL_SGMII_BASE 0x0E080000 +#define IOSCB_DLL_SGMII_BASE 0x0E100000 +#define IOSCB_CFM_SGMII_BASE 0x0E200000 +#define IOSCB_BC_SGMII_BASE 0x0E400000 +#define IOSCB_IO_CALIB_SGMII_BASE 0x0E800000 + +static uint64_t mchp_pfsoc_dummy_read(void *opaque, hwaddr offset, + unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device read " + "(size %d, offset 0x%" HWADDR_PRIx ")\n", + __func__, size, offset); + + return 0; +} + +static void mchp_pfsoc_dummy_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device write " + "(size %d, value 0x%" PRIx64 + ", offset 0x%" HWADDR_PRIx ")\n", + __func__, size, value, offset); +} + +static const MemoryRegionOps mchp_pfsoc_dummy_ops = { + .read = mchp_pfsoc_dummy_read, + .write = mchp_pfsoc_dummy_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* All PLL modules in IOSCB have the same register layout */ + +#define PLL_CTRL 0x04 + +static uint64_t mchp_pfsoc_pll_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t val = 0; + + switch (offset) { + case PLL_CTRL: + /* PLL is locked */ + val = BIT(25); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device read " + "(size %d, offset 0x%" HWADDR_PRIx ")\n", + __func__, size, offset); + break; + } + + return val; +} + +static const MemoryRegionOps mchp_pfsoc_pll_ops = { + .read = mchp_pfsoc_pll_read, + .write = mchp_pfsoc_dummy_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* IO_CALIB_DDR submodule */ + +#define IO_CALIB_DDR_IOC_REG1 0x08 + +static uint64_t mchp_pfsoc_io_calib_ddr_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t val = 0; + + switch (offset) { + case IO_CALIB_DDR_IOC_REG1: + /* calibration completed */ + val = BIT(2); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device read " + "(size %d, offset 0x%" HWADDR_PRIx ")\n", + __func__, size, offset); + break; + } + + return val; +} + +static const MemoryRegionOps mchp_pfsoc_io_calib_ddr_ops = { + .read = mchp_pfsoc_io_calib_ddr_read, + .write = mchp_pfsoc_dummy_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void mchp_pfsoc_ioscb_realize(DeviceState *dev, Error **errp) +{ + MchpPfSoCIoscbState *s = MCHP_PFSOC_IOSCB(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init(&s->container, OBJECT(s), + "mchp.pfsoc.ioscb", IOSCB_WHOLE_REG_SIZE); + sysbus_init_mmio(sbd, &s->container); + + /* add subregions for all sub-modules in IOSCB */ + + memory_region_init_io(&s->lane01, OBJECT(s), &mchp_pfsoc_dummy_ops, s, + "mchp.pfsoc.ioscb.lane01", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_LANE01_BASE, &s->lane01); + + memory_region_init_io(&s->lane23, OBJECT(s), &mchp_pfsoc_dummy_ops, s, + "mchp.pfsoc.ioscb.lane23", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_LANE23_BASE, &s->lane23); + + memory_region_init_io(&s->ctrl, OBJECT(s), &mchp_pfsoc_dummy_ops, s, + "mchp.pfsoc.ioscb.ctrl", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_CTRL_BASE, &s->ctrl); + + memory_region_init_io(&s->cfg, OBJECT(s), &mchp_pfsoc_dummy_ops, s, + "mchp.pfsoc.ioscb.cfg", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_CFG_BASE, &s->cfg); + + memory_region_init_io(&s->pll_mss, OBJECT(s), &mchp_pfsoc_pll_ops, s, + "mchp.pfsoc.ioscb.pll_mss", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_PLL_MSS_BASE, &s->pll_mss); + + memory_region_init_io(&s->cfm_mss, OBJECT(s), &mchp_pfsoc_dummy_ops, s, + "mchp.pfsoc.ioscb.cfm_mss", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_CFM_MSS_BASE, &s->cfm_mss); + + memory_region_init_io(&s->pll_ddr, OBJECT(s), &mchp_pfsoc_pll_ops, s, + "mchp.pfsoc.ioscb.pll_ddr", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_PLL_DDR_BASE, &s->pll_ddr); + + memory_region_init_io(&s->bc_ddr, OBJECT(s), &mchp_pfsoc_dummy_ops, s, + "mchp.pfsoc.ioscb.bc_ddr", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_BC_DDR_BASE, &s->bc_ddr); + + memory_region_init_io(&s->io_calib_ddr, OBJECT(s), + &mchp_pfsoc_io_calib_ddr_ops, s, + "mchp.pfsoc.ioscb.io_calib_ddr", + IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_IO_CALIB_DDR_BASE, + &s->io_calib_ddr); + + memory_region_init_io(&s->pll_sgmii, OBJECT(s), &mchp_pfsoc_pll_ops, s, + "mchp.pfsoc.ioscb.pll_sgmii", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_PLL_SGMII_BASE, + &s->pll_sgmii); + + memory_region_init_io(&s->dll_sgmii, OBJECT(s), &mchp_pfsoc_dummy_ops, s, + "mchp.pfsoc.ioscb.dll_sgmii", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_DLL_SGMII_BASE, + &s->dll_sgmii); + + memory_region_init_io(&s->cfm_sgmii, OBJECT(s), &mchp_pfsoc_dummy_ops, s, + "mchp.pfsoc.ioscb.cfm_sgmii", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_CFM_SGMII_BASE, + &s->cfm_sgmii); + + memory_region_init_io(&s->bc_sgmii, OBJECT(s), &mchp_pfsoc_dummy_ops, s, + "mchp.pfsoc.ioscb.bc_sgmii", IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_BC_SGMII_BASE, + &s->bc_sgmii); + + memory_region_init_io(&s->io_calib_sgmii, OBJECT(s), &mchp_pfsoc_dummy_ops, + s, "mchp.pfsoc.ioscb.io_calib_sgmii", + IOSCB_SUBMOD_REG_SIZE); + memory_region_add_subregion(&s->container, IOSCB_IO_CALIB_SGMII_BASE, + &s->io_calib_sgmii); +} + +static void mchp_pfsoc_ioscb_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Microchip PolarFire SoC IOSCB modules"; + dc->realize = mchp_pfsoc_ioscb_realize; +} + +static const TypeInfo mchp_pfsoc_ioscb_info = { + .name = TYPE_MCHP_PFSOC_IOSCB, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MchpPfSoCIoscbState), + .class_init = mchp_pfsoc_ioscb_class_init, +}; + +static void mchp_pfsoc_ioscb_register_types(void) +{ + type_register_static(&mchp_pfsoc_ioscb_info); +} + +type_init(mchp_pfsoc_ioscb_register_types) diff --git a/hw/misc/mchp_pfsoc_sysreg.c b/hw/misc/mchp_pfsoc_sysreg.c new file mode 100644 index 000000000..89571eded --- /dev/null +++ b/hw/misc/mchp_pfsoc_sysreg.c @@ -0,0 +1,98 @@ +/* + * Microchip PolarFire SoC SYSREG module emulation + * + * Copyright (c) 2020 Wind River Systems, Inc. + * + * Author: + * Bin Meng + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/misc/mchp_pfsoc_sysreg.h" + +#define ENVM_CR 0xb8 + +static uint64_t mchp_pfsoc_sysreg_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t val = 0; + + switch (offset) { + case ENVM_CR: + /* Indicate the eNVM is running at the configured divider rate */ + val = BIT(6); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device read " + "(size %d, offset 0x%" HWADDR_PRIx ")\n", + __func__, size, offset); + break; + } + + return val; +} + +static void mchp_pfsoc_sysreg_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device write " + "(size %d, value 0x%" PRIx64 + ", offset 0x%" HWADDR_PRIx ")\n", + __func__, size, value, offset); +} + +static const MemoryRegionOps mchp_pfsoc_sysreg_ops = { + .read = mchp_pfsoc_sysreg_read, + .write = mchp_pfsoc_sysreg_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void mchp_pfsoc_sysreg_realize(DeviceState *dev, Error **errp) +{ + MchpPfSoCSysregState *s = MCHP_PFSOC_SYSREG(dev); + + memory_region_init_io(&s->sysreg, OBJECT(dev), + &mchp_pfsoc_sysreg_ops, s, + "mchp.pfsoc.sysreg", + MCHP_PFSOC_SYSREG_REG_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->sysreg); +} + +static void mchp_pfsoc_sysreg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Microchip PolarFire SoC SYSREG module"; + dc->realize = mchp_pfsoc_sysreg_realize; +} + +static const TypeInfo mchp_pfsoc_sysreg_info = { + .name = TYPE_MCHP_PFSOC_SYSREG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MchpPfSoCSysregState), + .class_init = mchp_pfsoc_sysreg_class_init, +}; + +static void mchp_pfsoc_sysreg_register_types(void) +{ + type_register_static(&mchp_pfsoc_sysreg_info); +} + +type_init(mchp_pfsoc_sysreg_register_types) diff --git a/hw/misc/meson.build b/hw/misc/meson.build new file mode 100644 index 000000000..3f41a3a5b --- /dev/null +++ b/hw/misc/meson.build @@ -0,0 +1,128 @@ +softmmu_ss.add(when: 'CONFIG_APPLESMC', if_true: files('applesmc.c')) +softmmu_ss.add(when: 'CONFIG_EDU', if_true: files('edu.c')) +softmmu_ss.add(when: 'CONFIG_FW_CFG_DMA', if_true: files('vmcoreinfo.c')) +softmmu_ss.add(when: 'CONFIG_ISA_DEBUG', if_true: files('debugexit.c')) +softmmu_ss.add(when: 'CONFIG_ISA_TESTDEV', if_true: files('pc-testdev.c')) +softmmu_ss.add(when: 'CONFIG_PCA9552', if_true: files('pca9552.c')) +softmmu_ss.add(when: 'CONFIG_PCI_TESTDEV', if_true: files('pci-testdev.c')) +softmmu_ss.add(when: 'CONFIG_SGA', if_true: files('sga.c')) +softmmu_ss.add(when: 'CONFIG_UNIMP', if_true: files('unimp.c')) +softmmu_ss.add(when: 'CONFIG_EMPTY_SLOT', if_true: files('empty_slot.c')) +softmmu_ss.add(when: 'CONFIG_LED', if_true: files('led.c')) +softmmu_ss.add(when: 'CONFIG_PVPANIC_COMMON', if_true: files('pvpanic.c')) + +# ARM devices +softmmu_ss.add(when: 'CONFIG_PL310', if_true: files('arm_l2x0.c')) +softmmu_ss.add(when: 'CONFIG_INTEGRATOR_DEBUG', if_true: files('arm_integrator_debug.c')) +softmmu_ss.add(when: 'CONFIG_A9SCU', if_true: files('a9scu.c')) +softmmu_ss.add(when: 'CONFIG_ARM11SCU', if_true: files('arm11scu.c')) + +softmmu_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_ras.c')) + +# Mac devices +softmmu_ss.add(when: 'CONFIG_MOS6522', if_true: files('mos6522.c')) + +# virt devices +softmmu_ss.add(when: 'CONFIG_VIRT_CTRL', if_true: files('virt_ctrl.c')) + +# RISC-V devices +softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_DMC', if_true: files('mchp_pfsoc_dmc.c')) +softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_IOSCB', if_true: files('mchp_pfsoc_ioscb.c')) +softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_SYSREG', if_true: files('mchp_pfsoc_sysreg.c')) +softmmu_ss.add(when: 'CONFIG_SIFIVE_TEST', if_true: files('sifive_test.c')) +softmmu_ss.add(when: 'CONFIG_SIFIVE_E_PRCI', if_true: files('sifive_e_prci.c')) +softmmu_ss.add(when: 'CONFIG_SIFIVE_U_OTP', if_true: files('sifive_u_otp.c')) +softmmu_ss.add(when: 'CONFIG_SIFIVE_U_PRCI', if_true: files('sifive_u_prci.c')) + +subdir('macio') + +softmmu_ss.add(when: 'CONFIG_IVSHMEM_DEVICE', if_true: files('ivshmem.c')) + +softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-ccu.c')) +specific_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-cpucfg.c')) +softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-dramc.c')) +softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3-sysctrl.c')) +softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sid.c')) +softmmu_ss.add(when: 'CONFIG_REALVIEW', if_true: files('arm_sysctl.c')) +softmmu_ss.add(when: 'CONFIG_NSERIES', if_true: files('cbus.c')) +softmmu_ss.add(when: 'CONFIG_ECCMEMCTL', if_true: files('eccmemctl.c')) +softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pmu.c', 'exynos4210_clk.c', 'exynos4210_rng.c')) +softmmu_ss.add(when: 'CONFIG_IMX', if_true: files( + 'imx25_ccm.c', + 'imx31_ccm.c', + 'imx6_ccm.c', + 'imx6ul_ccm.c', + 'imx7_ccm.c', + 'imx7_gpr.c', + 'imx7_snvs.c', + 'imx_ccm.c', + 'imx_rngc.c', +)) +softmmu_ss.add(when: 'CONFIG_MAINSTONE', if_true: files('mst_fpga.c')) +softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files( + 'npcm7xx_clk.c', + 'npcm7xx_gcr.c', + 'npcm7xx_mft.c', + 'npcm7xx_pwm.c', + 'npcm7xx_rng.c', +)) +softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files( + 'omap_clk.c', + 'omap_gpmc.c', + 'omap_l4.c', + 'omap_sdrc.c', + 'omap_tap.c', +)) +softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files( + 'bcm2835_mbox.c', + 'bcm2835_mphi.c', + 'bcm2835_property.c', + 'bcm2835_rng.c', + 'bcm2835_thermal.c', + 'bcm2835_cprman.c', + 'bcm2835_powermgt.c', +)) +softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c')) +softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-xramc.c')) +softmmu_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c')) +softmmu_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c')) +softmmu_ss.add(when: 'CONFIG_STM32F4XX_EXTI', if_true: files('stm32f4xx_exti.c')) +softmmu_ss.add(when: 'CONFIG_MPS2_FPGAIO', if_true: files('mps2-fpgaio.c')) +softmmu_ss.add(when: 'CONFIG_MPS2_SCC', if_true: files('mps2-scc.c')) + +softmmu_ss.add(when: 'CONFIG_TZ_MPC', if_true: files('tz-mpc.c')) +softmmu_ss.add(when: 'CONFIG_TZ_MSC', if_true: files('tz-msc.c')) +softmmu_ss.add(when: 'CONFIG_TZ_PPC', if_true: files('tz-ppc.c')) +softmmu_ss.add(when: 'CONFIG_IOTKIT_SECCTL', if_true: files('iotkit-secctl.c')) +softmmu_ss.add(when: 'CONFIG_IOTKIT_SYSINFO', if_true: files('iotkit-sysinfo.c')) +softmmu_ss.add(when: 'CONFIG_ARMSSE_CPU_PWRCTRL', if_true: files('armsse-cpu-pwrctrl.c')) +softmmu_ss.add(when: 'CONFIG_ARMSSE_CPUID', if_true: files('armsse-cpuid.c')) +softmmu_ss.add(when: 'CONFIG_ARMSSE_MHU', if_true: files('armsse-mhu.c')) + +softmmu_ss.add(when: 'CONFIG_PVPANIC_ISA', if_true: files('pvpanic-isa.c')) +softmmu_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c')) +softmmu_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c')) +softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( + 'aspeed_hace.c', + 'aspeed_lpc.c', + 'aspeed_scu.c', + 'aspeed_sdmc.c', + 'aspeed_xdma.c')) + +softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-sysreg.c')) +softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_rng.c')) + +softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_ahb_apb_pnp.c')) + +specific_ss.add(when: 'CONFIG_AVR_POWER', if_true: files('avr_power.c')) + +specific_ss.add(when: 'CONFIG_IMX', if_true: files('imx6_src.c')) +specific_ss.add(when: 'CONFIG_IOTKIT_SYSCTL', if_true: files('iotkit-sysctl.c')) + +specific_ss.add(when: 'CONFIG_MAC_VIA', if_true: files('mac_via.c')) + +specific_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_cmgcr.c', 'mips_cpc.c')) +specific_ss.add(when: 'CONFIG_MIPS_ITU', if_true: files('mips_itu.c')) + +specific_ss.add(when: 'CONFIG_SBSA_REF', if_true: files('sbsa_ec.c')) diff --git a/hw/misc/mips_cmgcr.c b/hw/misc/mips_cmgcr.c new file mode 100644 index 000000000..3c8b37f70 --- /dev/null +++ b/hw/misc/mips_cmgcr.c @@ -0,0 +1,255 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal + * + * Copyright (C) 2015 Imagination Technologies + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/misc/mips_cmgcr.h" +#include "hw/misc/mips_cpc.h" +#include "hw/qdev-properties.h" +#include "hw/intc/mips_gic.h" + +static inline bool is_cpc_connected(MIPSGCRState *s) +{ + return s->cpc_mr != NULL; +} + +static inline bool is_gic_connected(MIPSGCRState *s) +{ + return s->gic_mr != NULL; +} + +static inline void update_gcr_base(MIPSGCRState *gcr, uint64_t val) +{ + CPUState *cpu; + MIPSCPU *mips_cpu; + + gcr->gcr_base = val & GCR_BASE_GCRBASE_MSK; + memory_region_set_address(&gcr->iomem, gcr->gcr_base); + + CPU_FOREACH(cpu) { + mips_cpu = MIPS_CPU(cpu); + mips_cpu->env.CP0_CMGCRBase = gcr->gcr_base >> 4; + } +} + +static inline void update_cpc_base(MIPSGCRState *gcr, uint64_t val) +{ + if (is_cpc_connected(gcr)) { + gcr->cpc_base = val & GCR_CPC_BASE_MSK; + memory_region_transaction_begin(); + memory_region_set_address(gcr->cpc_mr, + gcr->cpc_base & GCR_CPC_BASE_CPCBASE_MSK); + memory_region_set_enabled(gcr->cpc_mr, + gcr->cpc_base & GCR_CPC_BASE_CPCEN_MSK); + memory_region_transaction_commit(); + } +} + +static inline void update_gic_base(MIPSGCRState *gcr, uint64_t val) +{ + if (is_gic_connected(gcr)) { + gcr->gic_base = val & GCR_GIC_BASE_MSK; + memory_region_transaction_begin(); + memory_region_set_address(gcr->gic_mr, + gcr->gic_base & GCR_GIC_BASE_GICBASE_MSK); + memory_region_set_enabled(gcr->gic_mr, + gcr->gic_base & GCR_GIC_BASE_GICEN_MSK); + memory_region_transaction_commit(); + } +} + +/* Read GCR registers */ +static uint64_t gcr_read(void *opaque, hwaddr addr, unsigned size) +{ + MIPSGCRState *gcr = (MIPSGCRState *) opaque; + MIPSGCRVPState *current_vps = &gcr->vps[current_cpu->cpu_index]; + MIPSGCRVPState *other_vps = &gcr->vps[current_vps->other]; + + switch (addr) { + /* Global Control Block Register */ + case GCR_CONFIG_OFS: + /* Set PCORES to 0 */ + return 0; + case GCR_BASE_OFS: + return gcr->gcr_base; + case GCR_REV_OFS: + return gcr->gcr_rev; + case GCR_GIC_BASE_OFS: + return gcr->gic_base; + case GCR_CPC_BASE_OFS: + return gcr->cpc_base; + case GCR_GIC_STATUS_OFS: + return is_gic_connected(gcr); + case GCR_CPC_STATUS_OFS: + return is_cpc_connected(gcr); + case GCR_L2_CONFIG_OFS: + /* L2 BYPASS */ + return GCR_L2_CONFIG_BYPASS_MSK; + /* Core-Local and Core-Other Control Blocks */ + case MIPS_CLCB_OFS + GCR_CL_CONFIG_OFS: + case MIPS_COCB_OFS + GCR_CL_CONFIG_OFS: + /* Set PVP to # of VPs - 1 */ + return gcr->num_vps - 1; + case MIPS_CLCB_OFS + GCR_CL_RESETBASE_OFS: + return current_vps->reset_base; + case MIPS_COCB_OFS + GCR_CL_RESETBASE_OFS: + return other_vps->reset_base; + case MIPS_CLCB_OFS + GCR_CL_OTHER_OFS: + return current_vps->other; + case MIPS_COCB_OFS + GCR_CL_OTHER_OFS: + return other_vps->other; + default: + qemu_log_mask(LOG_UNIMP, "Read %d bytes at GCR offset 0x%" HWADDR_PRIx + "\n", size, addr); + return 0; + } + return 0; +} + +static inline target_ulong get_exception_base(MIPSGCRVPState *vps) +{ + /* TODO: BEV_BASE and SELECT_BEV */ + return (int32_t)(vps->reset_base & GCR_CL_RESET_BASE_RESETBASE_MSK); +} + +/* Write GCR registers */ +static void gcr_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) +{ + MIPSGCRState *gcr = (MIPSGCRState *)opaque; + MIPSGCRVPState *current_vps = &gcr->vps[current_cpu->cpu_index]; + MIPSGCRVPState *other_vps = &gcr->vps[current_vps->other]; + + switch (addr) { + case GCR_BASE_OFS: + update_gcr_base(gcr, data); + break; + case GCR_GIC_BASE_OFS: + update_gic_base(gcr, data); + break; + case GCR_CPC_BASE_OFS: + update_cpc_base(gcr, data); + break; + case MIPS_CLCB_OFS + GCR_CL_RESETBASE_OFS: + current_vps->reset_base = data & GCR_CL_RESET_BASE_MSK; + cpu_set_exception_base(current_cpu->cpu_index, + get_exception_base(current_vps)); + break; + case MIPS_COCB_OFS + GCR_CL_RESETBASE_OFS: + other_vps->reset_base = data & GCR_CL_RESET_BASE_MSK; + cpu_set_exception_base(current_vps->other, + get_exception_base(other_vps)); + break; + case MIPS_CLCB_OFS + GCR_CL_OTHER_OFS: + if ((data & GCR_CL_OTHER_MSK) < gcr->num_vps) { + current_vps->other = data & GCR_CL_OTHER_MSK; + } + break; + case MIPS_COCB_OFS + GCR_CL_OTHER_OFS: + if ((data & GCR_CL_OTHER_MSK) < gcr->num_vps) { + other_vps->other = data & GCR_CL_OTHER_MSK; + } + break; + default: + qemu_log_mask(LOG_UNIMP, "Write %d bytes at GCR offset 0x%" HWADDR_PRIx + " 0x%" PRIx64 "\n", size, addr, data); + break; + } +} + +static const MemoryRegionOps gcr_ops = { + .read = gcr_read, + .write = gcr_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .max_access_size = 8, + }, +}; + +static void mips_gcr_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + MIPSGCRState *s = MIPS_GCR(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &gcr_ops, s, + "mips-gcr", GCR_ADDRSPACE_SZ); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void mips_gcr_reset(DeviceState *dev) +{ + MIPSGCRState *s = MIPS_GCR(dev); + int i; + + update_gic_base(s, 0); + update_cpc_base(s, 0); + + for (i = 0; i < s->num_vps; i++) { + s->vps[i].other = 0; + s->vps[i].reset_base = 0xBFC00000 & GCR_CL_RESET_BASE_MSK; + cpu_set_exception_base(i, get_exception_base(&s->vps[i])); + } +} + +static const VMStateDescription vmstate_mips_gcr = { + .name = "mips-gcr", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT64(cpc_base, MIPSGCRState), + VMSTATE_END_OF_LIST() + }, +}; + +static Property mips_gcr_properties[] = { + DEFINE_PROP_INT32("num-vp", MIPSGCRState, num_vps, 1), + DEFINE_PROP_INT32("gcr-rev", MIPSGCRState, gcr_rev, 0x800), + DEFINE_PROP_UINT64("gcr-base", MIPSGCRState, gcr_base, GCR_BASE_ADDR), + DEFINE_PROP_LINK("gic", MIPSGCRState, gic_mr, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_LINK("cpc", MIPSGCRState, cpc_mr, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mips_gcr_realize(DeviceState *dev, Error **errp) +{ + MIPSGCRState *s = MIPS_GCR(dev); + + /* Create local set of registers for each VP */ + s->vps = g_new(MIPSGCRVPState, s->num_vps); +} + +static void mips_gcr_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + device_class_set_props(dc, mips_gcr_properties); + dc->vmsd = &vmstate_mips_gcr; + dc->reset = mips_gcr_reset; + dc->realize = mips_gcr_realize; +} + +static const TypeInfo mips_gcr_info = { + .name = TYPE_MIPS_GCR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MIPSGCRState), + .instance_init = mips_gcr_init, + .class_init = mips_gcr_class_init, +}; + +static void mips_gcr_register_types(void) +{ + type_register_static(&mips_gcr_info); +} + +type_init(mips_gcr_register_types) diff --git a/hw/misc/mips_cpc.c b/hw/misc/mips_cpc.c new file mode 100644 index 000000000..4a94c8705 --- /dev/null +++ b/hw/misc/mips_cpc.c @@ -0,0 +1,195 @@ +/* + * Cluster Power Controller emulation + * + * Copyright (c) 2016 Imagination Technologies + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" + +#include "hw/misc/mips_cpc.h" +#include "hw/qdev-properties.h" + +static inline uint64_t cpc_vp_run_mask(MIPSCPCState *cpc) +{ + return (1ULL << cpc->num_vp) - 1; +} + +static void mips_cpu_reset_async_work(CPUState *cs, run_on_cpu_data data) +{ + MIPSCPCState *cpc = (MIPSCPCState *) data.host_ptr; + + cpu_reset(cs); + cs->halted = 0; + cpc->vp_running |= 1ULL << cs->cpu_index; +} + +static void cpc_run_vp(MIPSCPCState *cpc, uint64_t vp_run) +{ + CPUState *cs = first_cpu; + + CPU_FOREACH(cs) { + uint64_t i = 1ULL << cs->cpu_index; + if (i & vp_run & ~cpc->vp_running) { + /* + * To avoid racing with a CPU we are just kicking off. + * We do the final bit of preparation for the work in + * the target CPUs context. + */ + async_safe_run_on_cpu(cs, mips_cpu_reset_async_work, + RUN_ON_CPU_HOST_PTR(cpc)); + } + } +} + +static void cpc_stop_vp(MIPSCPCState *cpc, uint64_t vp_stop) +{ + CPUState *cs = first_cpu; + + CPU_FOREACH(cs) { + uint64_t i = 1ULL << cs->cpu_index; + if (i & vp_stop & cpc->vp_running) { + cpu_interrupt(cs, CPU_INTERRUPT_HALT); + cpc->vp_running &= ~i; + } + } +} + +static void cpc_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + MIPSCPCState *s = opaque; + + switch (offset) { + case CPC_CL_BASE_OFS + CPC_VP_RUN_OFS: + case CPC_CO_BASE_OFS + CPC_VP_RUN_OFS: + cpc_run_vp(s, data & cpc_vp_run_mask(s)); + break; + case CPC_CL_BASE_OFS + CPC_VP_STOP_OFS: + case CPC_CO_BASE_OFS + CPC_VP_STOP_OFS: + cpc_stop_vp(s, data & cpc_vp_run_mask(s)); + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: Bad offset 0x%x\n", __func__, (int)offset); + break; + } + + return; +} + +static uint64_t cpc_read(void *opaque, hwaddr offset, unsigned size) +{ + MIPSCPCState *s = opaque; + + switch (offset) { + case CPC_CL_BASE_OFS + CPC_VP_RUNNING_OFS: + case CPC_CO_BASE_OFS + CPC_VP_RUNNING_OFS: + return s->vp_running; + default: + qemu_log_mask(LOG_UNIMP, + "%s: Bad offset 0x%x\n", __func__, (int)offset); + return 0; + } +} + +static const MemoryRegionOps cpc_ops = { + .read = cpc_read, + .write = cpc_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .max_access_size = 8, + }, +}; + +static void mips_cpc_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + MIPSCPCState *s = MIPS_CPC(obj); + + memory_region_init_io(&s->mr, OBJECT(s), &cpc_ops, s, "mips-cpc", + CPC_ADDRSPACE_SZ); + sysbus_init_mmio(sbd, &s->mr); +} + +static void mips_cpc_realize(DeviceState *dev, Error **errp) +{ + MIPSCPCState *s = MIPS_CPC(dev); + + if (s->vp_start_running > cpc_vp_run_mask(s)) { + error_setg(errp, + "incorrect vp_start_running 0x%" PRIx64 " for num_vp = %d", + s->vp_running, s->num_vp); + return; + } +} + +static void mips_cpc_reset(DeviceState *dev) +{ + MIPSCPCState *s = MIPS_CPC(dev); + + /* Reflect the fact that all VPs are halted on reset */ + s->vp_running = 0; + + /* Put selected VPs into run state */ + cpc_run_vp(s, s->vp_start_running); +} + +static const VMStateDescription vmstate_mips_cpc = { + .name = "mips-cpc", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT64(vp_running, MIPSCPCState), + VMSTATE_END_OF_LIST() + }, +}; + +static Property mips_cpc_properties[] = { + DEFINE_PROP_UINT32("num-vp", MIPSCPCState, num_vp, 0x1), + DEFINE_PROP_UINT64("vp-start-running", MIPSCPCState, vp_start_running, 0x1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mips_cpc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mips_cpc_realize; + dc->reset = mips_cpc_reset; + dc->vmsd = &vmstate_mips_cpc; + device_class_set_props(dc, mips_cpc_properties); +} + +static const TypeInfo mips_cpc_info = { + .name = TYPE_MIPS_CPC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MIPSCPCState), + .instance_init = mips_cpc_init, + .class_init = mips_cpc_class_init, +}; + +static void mips_cpc_register_types(void) +{ + type_register_static(&mips_cpc_info); +} + +type_init(mips_cpc_register_types) diff --git a/hw/misc/mips_itu.c b/hw/misc/mips_itu.c new file mode 100644 index 000000000..80683fed3 --- /dev/null +++ b/hw/misc/mips_itu.c @@ -0,0 +1,581 @@ +/* + * Inter-Thread Communication Unit emulation. + * + * Copyright (c) 2016 Imagination Technologies + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "exec/exec-all.h" +#include "hw/misc/mips_itu.h" +#include "hw/qdev-properties.h" + +#define ITC_TAG_ADDRSPACE_SZ (ITC_ADDRESSMAP_NUM * 8) +/* Initialize as 4kB area to fit all 32 cells with default 128B grain. + Storage may be resized by the software. */ +#define ITC_STORAGE_ADDRSPACE_SZ 0x1000 + +#define ITC_FIFO_NUM_MAX 16 +#define ITC_SEMAPH_NUM_MAX 16 +#define ITC_AM1_NUMENTRIES_OFS 20 + +#define ITC_CELL_PV_MAX_VAL 0xFFFF + +#define ITC_CELL_TAG_FIFO_DEPTH 28 +#define ITC_CELL_TAG_FIFO_PTR 18 +#define ITC_CELL_TAG_FIFO 17 +#define ITC_CELL_TAG_T 16 +#define ITC_CELL_TAG_F 1 +#define ITC_CELL_TAG_E 0 + +#define ITC_AM0_BASE_ADDRESS_MASK 0xFFFFFC00ULL +#define ITC_AM0_EN_MASK 0x1 + +#define ITC_AM1_ADDR_MASK_MASK 0x1FC00 +#define ITC_AM1_ENTRY_GRAIN_MASK 0x7 + +typedef enum ITCView { + ITCVIEW_BYPASS = 0, + ITCVIEW_CONTROL = 1, + ITCVIEW_EF_SYNC = 2, + ITCVIEW_EF_TRY = 3, + ITCVIEW_PV_SYNC = 4, + ITCVIEW_PV_TRY = 5, + ITCVIEW_PV_ICR0 = 15, +} ITCView; + +#define ITC_ICR0_CELL_NUM 16 +#define ITC_ICR0_BLK_GRAIN 8 +#define ITC_ICR0_BLK_GRAIN_MASK 0x7 +#define ITC_ICR0_ERR_AXI 2 +#define ITC_ICR0_ERR_PARITY 1 +#define ITC_ICR0_ERR_EXEC 0 + +MemoryRegion *mips_itu_get_tag_region(MIPSITUState *itu) +{ + return &itu->tag_io; +} + +static uint64_t itc_tag_read(void *opaque, hwaddr addr, unsigned size) +{ + MIPSITUState *tag = (MIPSITUState *)opaque; + uint64_t index = addr >> 3; + + if (index >= ITC_ADDRESSMAP_NUM) { + qemu_log_mask(LOG_GUEST_ERROR, "Read 0x%" PRIx64 "\n", addr); + return 0; + } + + return tag->ITCAddressMap[index]; +} + +void itc_reconfigure(MIPSITUState *tag) +{ + uint64_t *am = &tag->ITCAddressMap[0]; + MemoryRegion *mr = &tag->storage_io; + hwaddr address = am[0] & ITC_AM0_BASE_ADDRESS_MASK; + uint64_t size = (1 * KiB) + (am[1] & ITC_AM1_ADDR_MASK_MASK); + bool is_enabled = (am[0] & ITC_AM0_EN_MASK) != 0; + + if (tag->saar_present) { + address = ((*(uint64_t *) tag->saar) & 0xFFFFFFFFE000ULL) << 4; + size = 1ULL << ((*(uint64_t *) tag->saar >> 1) & 0x1f); + is_enabled = *(uint64_t *) tag->saar & 1; + } + + memory_region_transaction_begin(); + if (!(size & (size - 1))) { + memory_region_set_size(mr, size); + } + memory_region_set_address(mr, address); + memory_region_set_enabled(mr, is_enabled); + memory_region_transaction_commit(); +} + +static void itc_tag_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + MIPSITUState *tag = (MIPSITUState *)opaque; + uint64_t *am = &tag->ITCAddressMap[0]; + uint64_t am_old, mask; + uint64_t index = addr >> 3; + + switch (index) { + case 0: + mask = ITC_AM0_BASE_ADDRESS_MASK | ITC_AM0_EN_MASK; + break; + case 1: + mask = ITC_AM1_ADDR_MASK_MASK | ITC_AM1_ENTRY_GRAIN_MASK; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "Bad write 0x%" PRIx64 "\n", addr); + return; + } + + am_old = am[index]; + am[index] = (data & mask) | (am_old & ~mask); + if (am_old != am[index]) { + itc_reconfigure(tag); + } +} + +static const MemoryRegionOps itc_tag_ops = { + .read = itc_tag_read, + .write = itc_tag_write, + .impl = { + .max_access_size = 8, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static inline uint32_t get_num_cells(MIPSITUState *s) +{ + return s->num_fifo + s->num_semaphores; +} + +static inline ITCView get_itc_view(hwaddr addr) +{ + return (addr >> 3) & 0xf; +} + +static inline int get_cell_stride_shift(const MIPSITUState *s) +{ + /* Minimum interval (for EntryGain = 0) is 128 B */ + if (s->saar_present) { + return 7 + ((s->icr0 >> ITC_ICR0_BLK_GRAIN) & + ITC_ICR0_BLK_GRAIN_MASK); + } else { + return 7 + (s->ITCAddressMap[1] & ITC_AM1_ENTRY_GRAIN_MASK); + } +} + +static inline ITCStorageCell *get_cell(MIPSITUState *s, + hwaddr addr) +{ + uint32_t cell_idx = addr >> get_cell_stride_shift(s); + uint32_t num_cells = get_num_cells(s); + + if (cell_idx >= num_cells) { + cell_idx = num_cells - 1; + } + + return &s->cell[cell_idx]; +} + +static void wake_blocked_threads(ITCStorageCell *c) +{ + CPUState *cs; + CPU_FOREACH(cs) { + if (cs->halted && (c->blocked_threads & (1ULL << cs->cpu_index))) { + cpu_interrupt(cs, CPU_INTERRUPT_WAKE); + } + } + c->blocked_threads = 0; +} + +static void QEMU_NORETURN block_thread_and_exit(ITCStorageCell *c) +{ + c->blocked_threads |= 1ULL << current_cpu->cpu_index; + current_cpu->halted = 1; + current_cpu->exception_index = EXCP_HLT; + cpu_loop_exit_restore(current_cpu, current_cpu->mem_io_pc); +} + +/* ITC Bypass View */ + +static inline uint64_t view_bypass_read(ITCStorageCell *c) +{ + if (c->tag.FIFO) { + return c->data[c->fifo_out]; + } else { + return c->data[0]; + } +} + +static inline void view_bypass_write(ITCStorageCell *c, uint64_t val) +{ + if (c->tag.FIFO && (c->tag.FIFOPtr > 0)) { + int idx = (c->fifo_out + c->tag.FIFOPtr - 1) % ITC_CELL_DEPTH; + c->data[idx] = val; + } + + /* ignore a write to the semaphore cell */ +} + +/* ITC Control View */ + +static inline uint64_t view_control_read(ITCStorageCell *c) +{ + return ((uint64_t)c->tag.FIFODepth << ITC_CELL_TAG_FIFO_DEPTH) | + (c->tag.FIFOPtr << ITC_CELL_TAG_FIFO_PTR) | + (c->tag.FIFO << ITC_CELL_TAG_FIFO) | + (c->tag.T << ITC_CELL_TAG_T) | + (c->tag.E << ITC_CELL_TAG_E) | + (c->tag.F << ITC_CELL_TAG_F); +} + +static inline void view_control_write(ITCStorageCell *c, uint64_t val) +{ + c->tag.T = (val >> ITC_CELL_TAG_T) & 1; + c->tag.E = (val >> ITC_CELL_TAG_E) & 1; + c->tag.F = (val >> ITC_CELL_TAG_F) & 1; + + if (c->tag.E) { + c->tag.FIFOPtr = 0; + } +} + +/* ITC Empty/Full View */ + +static uint64_t view_ef_common_read(ITCStorageCell *c, bool blocking) +{ + uint64_t ret = 0; + + if (!c->tag.FIFO) { + return 0; + } + + c->tag.F = 0; + + if (blocking && c->tag.E) { + block_thread_and_exit(c); + } + + if (c->blocked_threads) { + wake_blocked_threads(c); + } + + if (c->tag.FIFOPtr > 0) { + ret = c->data[c->fifo_out]; + c->fifo_out = (c->fifo_out + 1) % ITC_CELL_DEPTH; + c->tag.FIFOPtr--; + } + + if (c->tag.FIFOPtr == 0) { + c->tag.E = 1; + } + + return ret; +} + +static uint64_t view_ef_sync_read(ITCStorageCell *c) +{ + return view_ef_common_read(c, true); +} + +static uint64_t view_ef_try_read(ITCStorageCell *c) +{ + return view_ef_common_read(c, false); +} + +static inline void view_ef_common_write(ITCStorageCell *c, uint64_t val, + bool blocking) +{ + if (!c->tag.FIFO) { + return; + } + + c->tag.E = 0; + + if (blocking && c->tag.F) { + block_thread_and_exit(c); + } + + if (c->blocked_threads) { + wake_blocked_threads(c); + } + + if (c->tag.FIFOPtr < ITC_CELL_DEPTH) { + int idx = (c->fifo_out + c->tag.FIFOPtr) % ITC_CELL_DEPTH; + c->data[idx] = val; + c->tag.FIFOPtr++; + } + + if (c->tag.FIFOPtr == ITC_CELL_DEPTH) { + c->tag.F = 1; + } +} + +static void view_ef_sync_write(ITCStorageCell *c, uint64_t val) +{ + view_ef_common_write(c, val, true); +} + +static void view_ef_try_write(ITCStorageCell *c, uint64_t val) +{ + view_ef_common_write(c, val, false); +} + +/* ITC P/V View */ + +static uint64_t view_pv_common_read(ITCStorageCell *c, bool blocking) +{ + uint64_t ret = c->data[0]; + + if (c->tag.FIFO) { + return 0; + } + + if (c->data[0] > 0) { + c->data[0]--; + } else if (blocking) { + block_thread_and_exit(c); + } + + return ret; +} + +static uint64_t view_pv_sync_read(ITCStorageCell *c) +{ + return view_pv_common_read(c, true); +} + +static uint64_t view_pv_try_read(ITCStorageCell *c) +{ + return view_pv_common_read(c, false); +} + +static inline void view_pv_common_write(ITCStorageCell *c) +{ + if (c->tag.FIFO) { + return; + } + + if (c->data[0] < ITC_CELL_PV_MAX_VAL) { + c->data[0]++; + } + + if (c->blocked_threads) { + wake_blocked_threads(c); + } +} + +static void view_pv_sync_write(ITCStorageCell *c) +{ + view_pv_common_write(c); +} + +static void view_pv_try_write(ITCStorageCell *c) +{ + view_pv_common_write(c); +} + +static void raise_exception(int excp) +{ + current_cpu->exception_index = excp; + cpu_loop_exit(current_cpu); +} + +static uint64_t itc_storage_read(void *opaque, hwaddr addr, unsigned size) +{ + MIPSITUState *s = (MIPSITUState *)opaque; + ITCStorageCell *cell = get_cell(s, addr); + ITCView view = get_itc_view(addr); + uint64_t ret = -1; + + switch (size) { + case 1: + case 2: + s->icr0 |= 1 << ITC_ICR0_ERR_AXI; + raise_exception(EXCP_DBE); + return 0; + } + + switch (view) { + case ITCVIEW_BYPASS: + ret = view_bypass_read(cell); + break; + case ITCVIEW_CONTROL: + ret = view_control_read(cell); + break; + case ITCVIEW_EF_SYNC: + ret = view_ef_sync_read(cell); + break; + case ITCVIEW_EF_TRY: + ret = view_ef_try_read(cell); + break; + case ITCVIEW_PV_SYNC: + ret = view_pv_sync_read(cell); + break; + case ITCVIEW_PV_TRY: + ret = view_pv_try_read(cell); + break; + case ITCVIEW_PV_ICR0: + ret = s->icr0; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "itc_storage_read: Bad ITC View %d\n", (int)view); + break; + } + + return ret; +} + +static void itc_storage_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + MIPSITUState *s = (MIPSITUState *)opaque; + ITCStorageCell *cell = get_cell(s, addr); + ITCView view = get_itc_view(addr); + + switch (size) { + case 1: + case 2: + s->icr0 |= 1 << ITC_ICR0_ERR_AXI; + raise_exception(EXCP_DBE); + return; + } + + switch (view) { + case ITCVIEW_BYPASS: + view_bypass_write(cell, data); + break; + case ITCVIEW_CONTROL: + view_control_write(cell, data); + break; + case ITCVIEW_EF_SYNC: + view_ef_sync_write(cell, data); + break; + case ITCVIEW_EF_TRY: + view_ef_try_write(cell, data); + break; + case ITCVIEW_PV_SYNC: + view_pv_sync_write(cell); + break; + case ITCVIEW_PV_TRY: + view_pv_try_write(cell); + break; + case ITCVIEW_PV_ICR0: + if (data & 0x7) { + /* clear ERROR bits */ + s->icr0 &= ~(data & 0x7); + } + /* set BLK_GRAIN */ + s->icr0 &= ~0x700; + s->icr0 |= data & 0x700; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "itc_storage_write: Bad ITC View %d\n", (int)view); + break; + } + +} + +static const MemoryRegionOps itc_storage_ops = { + .read = itc_storage_read, + .write = itc_storage_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void itc_reset_cells(MIPSITUState *s) +{ + int i; + + memset(s->cell, 0, get_num_cells(s) * sizeof(s->cell[0])); + + for (i = 0; i < s->num_fifo; i++) { + s->cell[i].tag.E = 1; + s->cell[i].tag.FIFO = 1; + s->cell[i].tag.FIFODepth = ITC_CELL_DEPTH_SHIFT; + } +} + +static void mips_itu_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + MIPSITUState *s = MIPS_ITU(obj); + + memory_region_init_io(&s->storage_io, OBJECT(s), &itc_storage_ops, s, + "mips-itc-storage", ITC_STORAGE_ADDRSPACE_SZ); + sysbus_init_mmio(sbd, &s->storage_io); + + memory_region_init_io(&s->tag_io, OBJECT(s), &itc_tag_ops, s, + "mips-itc-tag", ITC_TAG_ADDRSPACE_SZ); +} + +static void mips_itu_realize(DeviceState *dev, Error **errp) +{ + MIPSITUState *s = MIPS_ITU(dev); + + if (s->num_fifo > ITC_FIFO_NUM_MAX) { + error_setg(errp, "Exceed maximum number of FIFO cells: %d", + s->num_fifo); + return; + } + if (s->num_semaphores > ITC_SEMAPH_NUM_MAX) { + error_setg(errp, "Exceed maximum number of Semaphore cells: %d", + s->num_semaphores); + return; + } + + s->cell = g_new(ITCStorageCell, get_num_cells(s)); +} + +static void mips_itu_reset(DeviceState *dev) +{ + MIPSITUState *s = MIPS_ITU(dev); + + if (s->saar_present) { + *(uint64_t *) s->saar = 0x11 << 1; + s->icr0 = get_num_cells(s) << ITC_ICR0_CELL_NUM; + } else { + s->ITCAddressMap[0] = 0; + s->ITCAddressMap[1] = + ((ITC_STORAGE_ADDRSPACE_SZ - 1) & ITC_AM1_ADDR_MASK_MASK) | + (get_num_cells(s) << ITC_AM1_NUMENTRIES_OFS); + } + itc_reconfigure(s); + + itc_reset_cells(s); +} + +static Property mips_itu_properties[] = { + DEFINE_PROP_INT32("num-fifo", MIPSITUState, num_fifo, + ITC_FIFO_NUM_MAX), + DEFINE_PROP_INT32("num-semaphores", MIPSITUState, num_semaphores, + ITC_SEMAPH_NUM_MAX), + DEFINE_PROP_BOOL("saar-present", MIPSITUState, saar_present, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mips_itu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, mips_itu_properties); + dc->realize = mips_itu_realize; + dc->reset = mips_itu_reset; +} + +static const TypeInfo mips_itu_info = { + .name = TYPE_MIPS_ITU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MIPSITUState), + .instance_init = mips_itu_init, + .class_init = mips_itu_class_init, +}; + +static void mips_itu_register_types(void) +{ + type_register_static(&mips_itu_info); +} + +type_init(mips_itu_register_types) diff --git a/hw/misc/mos6522.c b/hw/misc/mos6522.c new file mode 100644 index 000000000..1c57332b4 --- /dev/null +++ b/hw/misc/mos6522.c @@ -0,0 +1,541 @@ +/* + * QEMU MOS6522 VIA emulation + * + * Copyright (c) 2004-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * Copyright (c) 2018 Mark Cave-Ayland + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/input/adb.h" +#include "hw/irq.h" +#include "hw/misc/mos6522.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/timer.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + +/* XXX: implement all timer modes */ + +static void mos6522_timer1_update(MOS6522State *s, MOS6522Timer *ti, + int64_t current_time); +static void mos6522_timer2_update(MOS6522State *s, MOS6522Timer *ti, + int64_t current_time); + +static void mos6522_update_irq(MOS6522State *s) +{ + if (s->ifr & s->ier) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static uint64_t get_counter_value(MOS6522State *s, MOS6522Timer *ti) +{ + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + + if (ti->index == 0) { + return mdc->get_timer1_counter_value(s, ti); + } else { + return mdc->get_timer2_counter_value(s, ti); + } +} + +static uint64_t get_load_time(MOS6522State *s, MOS6522Timer *ti) +{ + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + + if (ti->index == 0) { + return mdc->get_timer1_load_time(s, ti); + } else { + return mdc->get_timer2_load_time(s, ti); + } +} + +static unsigned int get_counter(MOS6522State *s, MOS6522Timer *ti) +{ + int64_t d; + unsigned int counter; + + d = get_counter_value(s, ti); + + if (ti->index == 0) { + /* the timer goes down from latch to -1 (period of latch + 2) */ + if (d <= (ti->counter_value + 1)) { + counter = (ti->counter_value - d) & 0xffff; + } else { + counter = (d - (ti->counter_value + 1)) % (ti->latch + 2); + counter = (ti->latch - counter) & 0xffff; + } + } else { + counter = (ti->counter_value - d) & 0xffff; + } + return counter; +} + +static void set_counter(MOS6522State *s, MOS6522Timer *ti, unsigned int val) +{ + trace_mos6522_set_counter(1 + ti->index, val); + ti->load_time = get_load_time(s, ti); + ti->counter_value = val; + if (ti->index == 0) { + mos6522_timer1_update(s, ti, ti->load_time); + } else { + mos6522_timer2_update(s, ti, ti->load_time); + } +} + +static int64_t get_next_irq_time(MOS6522State *s, MOS6522Timer *ti, + int64_t current_time) +{ + int64_t d, next_time; + unsigned int counter; + + if (ti->frequency == 0) { + return INT64_MAX; + } + + /* current counter value */ + d = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - ti->load_time, + ti->frequency, NANOSECONDS_PER_SECOND); + + /* the timer goes down from latch to -1 (period of latch + 2) */ + if (d <= (ti->counter_value + 1)) { + counter = (ti->counter_value - d) & 0xffff; + } else { + counter = (d - (ti->counter_value + 1)) % (ti->latch + 2); + counter = (ti->latch - counter) & 0xffff; + } + + /* Note: we consider the irq is raised on 0 */ + if (counter == 0xffff) { + next_time = d + ti->latch + 1; + } else if (counter == 0) { + next_time = d + ti->latch + 2; + } else { + next_time = d + counter; + } + trace_mos6522_get_next_irq_time(ti->latch, d, next_time - d); + next_time = muldiv64(next_time, NANOSECONDS_PER_SECOND, ti->frequency) + + ti->load_time; + + if (next_time <= current_time) { + next_time = current_time + 1; + } + return next_time; +} + +static void mos6522_timer1_update(MOS6522State *s, MOS6522Timer *ti, + int64_t current_time) +{ + if (!ti->timer) { + return; + } + ti->next_irq_time = get_next_irq_time(s, ti, current_time); + if ((s->ier & T1_INT) == 0 || (s->acr & T1MODE) != T1MODE_CONT) { + timer_del(ti->timer); + } else { + timer_mod(ti->timer, ti->next_irq_time); + } +} + +static void mos6522_timer2_update(MOS6522State *s, MOS6522Timer *ti, + int64_t current_time) +{ + if (!ti->timer) { + return; + } + ti->next_irq_time = get_next_irq_time(s, ti, current_time); + if ((s->ier & T2_INT) == 0) { + timer_del(ti->timer); + } else { + timer_mod(ti->timer, ti->next_irq_time); + } +} + +static void mos6522_timer1(void *opaque) +{ + MOS6522State *s = opaque; + MOS6522Timer *ti = &s->timers[0]; + + mos6522_timer1_update(s, ti, ti->next_irq_time); + s->ifr |= T1_INT; + mos6522_update_irq(s); +} + +static void mos6522_timer2(void *opaque) +{ + MOS6522State *s = opaque; + MOS6522Timer *ti = &s->timers[1]; + + mos6522_timer2_update(s, ti, ti->next_irq_time); + s->ifr |= T2_INT; + mos6522_update_irq(s); +} + +static void mos6522_set_sr_int(MOS6522State *s) +{ + trace_mos6522_set_sr_int(); + s->ifr |= SR_INT; + mos6522_update_irq(s); +} + +static uint64_t mos6522_get_counter_value(MOS6522State *s, MOS6522Timer *ti) +{ + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - ti->load_time, + ti->frequency, NANOSECONDS_PER_SECOND); +} + +static uint64_t mos6522_get_load_time(MOS6522State *s, MOS6522Timer *ti) +{ + uint64_t load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + return load_time; +} + +static void mos6522_portA_write(MOS6522State *s) +{ + qemu_log_mask(LOG_UNIMP, "portA_write unimplemented\n"); +} + +static void mos6522_portB_write(MOS6522State *s) +{ + qemu_log_mask(LOG_UNIMP, "portB_write unimplemented\n"); +} + +uint64_t mos6522_read(void *opaque, hwaddr addr, unsigned size) +{ + MOS6522State *s = opaque; + uint32_t val; + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + if (now >= s->timers[0].next_irq_time) { + mos6522_timer1_update(s, &s->timers[0], now); + s->ifr |= T1_INT; + } + if (now >= s->timers[1].next_irq_time) { + mos6522_timer2_update(s, &s->timers[1], now); + s->ifr |= T2_INT; + } + switch (addr) { + case VIA_REG_B: + val = s->b; + break; + case VIA_REG_A: + qemu_log_mask(LOG_UNIMP, "Read access to register A with handshake"); + /* fall through */ + case VIA_REG_ANH: + val = s->a; + break; + case VIA_REG_DIRB: + val = s->dirb; + break; + case VIA_REG_DIRA: + val = s->dira; + break; + case VIA_REG_T1CL: + val = get_counter(s, &s->timers[0]) & 0xff; + s->ifr &= ~T1_INT; + mos6522_update_irq(s); + break; + case VIA_REG_T1CH: + val = get_counter(s, &s->timers[0]) >> 8; + mos6522_update_irq(s); + break; + case VIA_REG_T1LL: + val = s->timers[0].latch & 0xff; + break; + case VIA_REG_T1LH: + /* XXX: check this */ + val = (s->timers[0].latch >> 8) & 0xff; + break; + case VIA_REG_T2CL: + val = get_counter(s, &s->timers[1]) & 0xff; + s->ifr &= ~T2_INT; + mos6522_update_irq(s); + break; + case VIA_REG_T2CH: + val = get_counter(s, &s->timers[1]) >> 8; + break; + case VIA_REG_SR: + val = s->sr; + s->ifr &= ~SR_INT; + mos6522_update_irq(s); + break; + case VIA_REG_ACR: + val = s->acr; + break; + case VIA_REG_PCR: + val = s->pcr; + break; + case VIA_REG_IFR: + val = s->ifr; + if (s->ifr & s->ier) { + val |= 0x80; + } + break; + case VIA_REG_IER: + val = s->ier | 0x80; + break; + default: + g_assert_not_reached(); + } + + if (addr != VIA_REG_IFR || val != 0) { + trace_mos6522_read(addr, val); + } + + return val; +} + +void mos6522_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + MOS6522State *s = opaque; + MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s); + + trace_mos6522_write(addr, val); + + switch (addr) { + case VIA_REG_B: + s->b = (s->b & ~s->dirb) | (val & s->dirb); + mdc->portB_write(s); + break; + case VIA_REG_A: + qemu_log_mask(LOG_UNIMP, "Write access to register A with handshake"); + /* fall through */ + case VIA_REG_ANH: + s->a = (s->a & ~s->dira) | (val & s->dira); + mdc->portA_write(s); + break; + case VIA_REG_DIRB: + s->dirb = val; + break; + case VIA_REG_DIRA: + s->dira = val; + break; + case VIA_REG_T1CL: + s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; + mos6522_timer1_update(s, &s->timers[0], + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + break; + case VIA_REG_T1CH: + s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); + s->ifr &= ~T1_INT; + set_counter(s, &s->timers[0], s->timers[0].latch); + break; + case VIA_REG_T1LL: + s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; + mos6522_timer1_update(s, &s->timers[0], + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + break; + case VIA_REG_T1LH: + s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); + s->ifr &= ~T1_INT; + mos6522_timer1_update(s, &s->timers[0], + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + break; + case VIA_REG_T2CL: + s->timers[1].latch = (s->timers[1].latch & 0xff00) | val; + break; + case VIA_REG_T2CH: + /* To ensure T2 generates an interrupt on zero crossing with the + common timer code, write the value directly from the latch to + the counter */ + s->timers[1].latch = (s->timers[1].latch & 0xff) | (val << 8); + s->ifr &= ~T2_INT; + set_counter(s, &s->timers[1], s->timers[1].latch); + break; + case VIA_REG_SR: + s->sr = val; + break; + case VIA_REG_ACR: + s->acr = val; + mos6522_timer1_update(s, &s->timers[0], + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + break; + case VIA_REG_PCR: + s->pcr = val; + break; + case VIA_REG_IFR: + /* reset bits */ + s->ifr &= ~val; + mos6522_update_irq(s); + break; + case VIA_REG_IER: + if (val & IER_SET) { + /* set bits */ + s->ier |= val & 0x7f; + } else { + /* reset bits */ + s->ier &= ~val; + } + mos6522_update_irq(s); + /* if IER is modified starts needed timers */ + mos6522_timer1_update(s, &s->timers[0], + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + mos6522_timer2_update(s, &s->timers[1], + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + break; + default: + g_assert_not_reached(); + } +} + +static const MemoryRegionOps mos6522_ops = { + .read = mos6522_read, + .write = mos6522_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static const VMStateDescription vmstate_mos6522_timer = { + .name = "mos6522_timer", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT16(latch, MOS6522Timer), + VMSTATE_UINT16(counter_value, MOS6522Timer), + VMSTATE_INT64(load_time, MOS6522Timer), + VMSTATE_INT64(next_irq_time, MOS6522Timer), + VMSTATE_TIMER_PTR(timer, MOS6522Timer), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_mos6522 = { + .name = "mos6522", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(a, MOS6522State), + VMSTATE_UINT8(b, MOS6522State), + VMSTATE_UINT8(dira, MOS6522State), + VMSTATE_UINT8(dirb, MOS6522State), + VMSTATE_UINT8(sr, MOS6522State), + VMSTATE_UINT8(acr, MOS6522State), + VMSTATE_UINT8(pcr, MOS6522State), + VMSTATE_UINT8(ifr, MOS6522State), + VMSTATE_UINT8(ier, MOS6522State), + VMSTATE_STRUCT_ARRAY(timers, MOS6522State, 2, 0, + vmstate_mos6522_timer, MOS6522Timer), + VMSTATE_END_OF_LIST() + } +}; + +static void mos6522_reset(DeviceState *dev) +{ + MOS6522State *s = MOS6522(dev); + + s->b = 0; + s->a = 0; + s->dirb = 0xff; + s->dira = 0; + s->sr = 0; + s->acr = 0; + s->pcr = 0; + s->ifr = 0; + s->ier = 0; + /* s->ier = T1_INT | SR_INT; */ + + s->timers[0].frequency = s->frequency; + s->timers[0].latch = 0xffff; + set_counter(s, &s->timers[0], 0xffff); + timer_del(s->timers[0].timer); + + s->timers[1].frequency = s->frequency; + s->timers[1].latch = 0xffff; + timer_del(s->timers[1].timer); +} + +static void mos6522_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + MOS6522State *s = MOS6522(obj); + int i; + + memory_region_init_io(&s->mem, obj, &mos6522_ops, s, "mos6522", 0x10); + sysbus_init_mmio(sbd, &s->mem); + sysbus_init_irq(sbd, &s->irq); + + for (i = 0; i < ARRAY_SIZE(s->timers); i++) { + s->timers[i].index = i; + } + + s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, mos6522_timer1, s); + s->timers[1].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, mos6522_timer2, s); +} + +static void mos6522_finalize(Object *obj) +{ + MOS6522State *s = MOS6522(obj); + + timer_free(s->timers[0].timer); + timer_free(s->timers[1].timer); +} + +static Property mos6522_properties[] = { + DEFINE_PROP_UINT64("frequency", MOS6522State, frequency, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void mos6522_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + MOS6522DeviceClass *mdc = MOS6522_CLASS(oc); + + dc->reset = mos6522_reset; + dc->vmsd = &vmstate_mos6522; + device_class_set_props(dc, mos6522_properties); + mdc->parent_reset = dc->reset; + mdc->set_sr_int = mos6522_set_sr_int; + mdc->portB_write = mos6522_portB_write; + mdc->portA_write = mos6522_portA_write; + mdc->update_irq = mos6522_update_irq; + mdc->get_timer1_counter_value = mos6522_get_counter_value; + mdc->get_timer2_counter_value = mos6522_get_counter_value; + mdc->get_timer1_load_time = mos6522_get_load_time; + mdc->get_timer2_load_time = mos6522_get_load_time; +} + +static const TypeInfo mos6522_type_info = { + .name = TYPE_MOS6522, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MOS6522State), + .instance_init = mos6522_init, + .instance_finalize = mos6522_finalize, + .abstract = true, + .class_size = sizeof(MOS6522DeviceClass), + .class_init = mos6522_class_init, +}; + +static void mos6522_register_types(void) +{ + type_register_static(&mos6522_type_info); +} + +type_init(mos6522_register_types) diff --git a/hw/misc/mps2-fpgaio.c b/hw/misc/mps2-fpgaio.c new file mode 100644 index 000000000..07b8cbdad --- /dev/null +++ b/hw/misc/mps2-fpgaio.c @@ -0,0 +1,355 @@ +/* + * ARM MPS2 AN505 FPGAIO emulation + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* This is a model of the "FPGA system control and I/O" block found + * in the AN505 FPGA image for the MPS2 devboard. + * It is documented in AN505: + * https://developer.arm.com/documentation/dai0505/latest/ + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/registerfields.h" +#include "hw/misc/mps2-fpgaio.h" +#include "hw/misc/led.h" +#include "hw/qdev-properties.h" +#include "qemu/timer.h" + +REG32(LED0, 0) +REG32(DBGCTRL, 4) +REG32(BUTTON, 8) +REG32(CLK1HZ, 0x10) +REG32(CLK100HZ, 0x14) +REG32(COUNTER, 0x18) +REG32(PRESCALE, 0x1c) +REG32(PSCNTR, 0x20) +REG32(SWITCH, 0x28) +REG32(MISC, 0x4c) + +static uint32_t counter_from_tickoff(int64_t now, int64_t tick_offset, int frq) +{ + return muldiv64(now - tick_offset, frq, NANOSECONDS_PER_SECOND); +} + +static int64_t tickoff_from_counter(int64_t now, uint32_t count, int frq) +{ + return now - muldiv64(count, NANOSECONDS_PER_SECOND, frq); +} + +static void resync_counter(MPS2FPGAIO *s) +{ + /* + * Update s->counter and s->pscntr to their true current values + * by calculating how many times PSCNTR has ticked since the + * last time we did a resync. + */ + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + int64_t elapsed = now - s->pscntr_sync_ticks; + + /* + * Round elapsed down to a whole number of PSCNTR ticks, so we don't + * lose time if we do multiple resyncs in a single tick. + */ + uint64_t ticks = muldiv64(elapsed, s->prescale_clk, NANOSECONDS_PER_SECOND); + + /* + * Work out what PSCNTR and COUNTER have moved to. We assume that + * PSCNTR reloads from PRESCALE one tick-period after it hits zero, + * and that COUNTER increments at the same moment. + */ + if (ticks == 0) { + /* We haven't ticked since the last time we were asked */ + return; + } else if (ticks < s->pscntr) { + /* We haven't yet reached zero, just reduce the PSCNTR */ + s->pscntr -= ticks; + } else { + if (s->prescale == 0) { + /* + * If the reload value is zero then the PSCNTR will stick + * at zero once it reaches it, and so we will increment + * COUNTER every tick after that. + */ + s->counter += ticks - s->pscntr; + s->pscntr = 0; + } else { + /* + * This is the complicated bit. This ASCII art diagram gives an + * example with PRESCALE==5 PSCNTR==7: + * + * ticks 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 + * PSCNTR 7 6 5 4 3 2 1 0 5 4 3 2 1 0 5 + * cinc 1 2 + * y 0 1 2 3 4 5 6 7 8 9 10 11 12 + * x 0 1 2 3 4 5 0 1 2 3 4 5 0 + * + * where x = y % (s->prescale + 1) + * and so PSCNTR = s->prescale - x + * and COUNTER is incremented by y / (s->prescale + 1) + * + * The case where PSCNTR < PRESCALE works out the same, + * though we must be careful to calculate y as 64-bit unsigned + * for all parts of the expression. + * y < 0 is not possible because that implies ticks < s->pscntr. + */ + uint64_t y = ticks - s->pscntr + s->prescale; + s->pscntr = s->prescale - (y % (s->prescale + 1)); + s->counter += y / (s->prescale + 1); + } + } + + /* + * Only advance the sync time to the timestamp of the last PSCNTR tick, + * not all the way to 'now', so we don't lose time if we do multiple + * resyncs in a single tick. + */ + s->pscntr_sync_ticks += muldiv64(ticks, NANOSECONDS_PER_SECOND, + s->prescale_clk); +} + +static uint64_t mps2_fpgaio_read(void *opaque, hwaddr offset, unsigned size) +{ + MPS2FPGAIO *s = MPS2_FPGAIO(opaque); + uint64_t r; + int64_t now; + + switch (offset) { + case A_LED0: + r = s->led0; + break; + case A_DBGCTRL: + if (!s->has_dbgctrl) { + goto bad_offset; + } + r = s->dbgctrl; + break; + case A_BUTTON: + /* User-pressable board buttons. We don't model that, so just return + * zeroes. + */ + r = 0; + break; + case A_PRESCALE: + r = s->prescale; + break; + case A_MISC: + r = s->misc; + break; + case A_CLK1HZ: + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + r = counter_from_tickoff(now, s->clk1hz_tick_offset, 1); + break; + case A_CLK100HZ: + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + r = counter_from_tickoff(now, s->clk100hz_tick_offset, 100); + break; + case A_COUNTER: + resync_counter(s); + r = s->counter; + break; + case A_PSCNTR: + resync_counter(s); + r = s->pscntr; + break; + case A_SWITCH: + if (!s->has_switches) { + goto bad_offset; + } + /* User-togglable board switches. We don't model that, so report 0. */ + r = 0; + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "MPS2 FPGAIO read: bad offset %x\n", (int) offset); + r = 0; + break; + } + + trace_mps2_fpgaio_read(offset, r, size); + return r; +} + +static void mps2_fpgaio_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + MPS2FPGAIO *s = MPS2_FPGAIO(opaque); + int64_t now; + + trace_mps2_fpgaio_write(offset, value, size); + + switch (offset) { + case A_LED0: + if (s->num_leds != 0) { + uint32_t i; + + s->led0 = value & MAKE_64BIT_MASK(0, s->num_leds); + for (i = 0; i < s->num_leds; i++) { + led_set_state(s->led[i], value & (1 << i)); + } + } + break; + case A_DBGCTRL: + if (!s->has_dbgctrl) { + goto bad_offset; + } + qemu_log_mask(LOG_UNIMP, + "MPS2 FPGAIO: DBGCTRL unimplemented\n"); + s->dbgctrl = value; + break; + case A_PRESCALE: + resync_counter(s); + s->prescale = value; + break; + case A_MISC: + /* These are control bits for some of the other devices on the + * board (SPI, CLCD, etc). We don't implement that yet, so just + * make the bits read as written. + */ + qemu_log_mask(LOG_UNIMP, + "MPS2 FPGAIO: MISC control bits unimplemented\n"); + s->misc = value; + break; + case A_CLK1HZ: + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->clk1hz_tick_offset = tickoff_from_counter(now, value, 1); + break; + case A_CLK100HZ: + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->clk100hz_tick_offset = tickoff_from_counter(now, value, 100); + break; + case A_COUNTER: + resync_counter(s); + s->counter = value; + break; + case A_PSCNTR: + resync_counter(s); + s->pscntr = value; + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "MPS2 FPGAIO write: bad offset 0x%x\n", (int) offset); + break; + } +} + +static const MemoryRegionOps mps2_fpgaio_ops = { + .read = mps2_fpgaio_read, + .write = mps2_fpgaio_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void mps2_fpgaio_reset(DeviceState *dev) +{ + MPS2FPGAIO *s = MPS2_FPGAIO(dev); + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + trace_mps2_fpgaio_reset(); + s->led0 = 0; + s->prescale = 0; + s->misc = 0; + s->clk1hz_tick_offset = tickoff_from_counter(now, 0, 1); + s->clk100hz_tick_offset = tickoff_from_counter(now, 0, 100); + s->counter = 0; + s->pscntr = 0; + s->pscntr_sync_ticks = now; + + for (size_t i = 0; i < s->num_leds; i++) { + device_cold_reset(DEVICE(s->led[i])); + } +} + +static void mps2_fpgaio_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + MPS2FPGAIO *s = MPS2_FPGAIO(obj); + + memory_region_init_io(&s->iomem, obj, &mps2_fpgaio_ops, s, + "mps2-fpgaio", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void mps2_fpgaio_realize(DeviceState *dev, Error **errp) +{ + MPS2FPGAIO *s = MPS2_FPGAIO(dev); + uint32_t i; + + if (s->num_leds > MPS2FPGAIO_MAX_LEDS) { + error_setg(errp, "num-leds cannot be greater than %d", + MPS2FPGAIO_MAX_LEDS); + return; + } + + for (i = 0; i < s->num_leds; i++) { + g_autofree char *ledname = g_strdup_printf("USERLED%d", i); + s->led[i] = led_create_simple(OBJECT(dev), GPIO_POLARITY_ACTIVE_HIGH, + LED_COLOR_GREEN, ledname); + } +} + +static const VMStateDescription mps2_fpgaio_vmstate = { + .name = "mps2-fpgaio", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_UINT32(led0, MPS2FPGAIO), + VMSTATE_UINT32(prescale, MPS2FPGAIO), + VMSTATE_UINT32(misc, MPS2FPGAIO), + VMSTATE_UINT32(dbgctrl, MPS2FPGAIO), + VMSTATE_INT64(clk1hz_tick_offset, MPS2FPGAIO), + VMSTATE_INT64(clk100hz_tick_offset, MPS2FPGAIO), + VMSTATE_UINT32(counter, MPS2FPGAIO), + VMSTATE_UINT32(pscntr, MPS2FPGAIO), + VMSTATE_INT64(pscntr_sync_ticks, MPS2FPGAIO), + VMSTATE_END_OF_LIST() + }, +}; + +static Property mps2_fpgaio_properties[] = { + /* Frequency of the prescale counter */ + DEFINE_PROP_UINT32("prescale-clk", MPS2FPGAIO, prescale_clk, 20000000), + /* Number of LEDs controlled by LED0 register */ + DEFINE_PROP_UINT32("num-leds", MPS2FPGAIO, num_leds, 2), + DEFINE_PROP_BOOL("has-switches", MPS2FPGAIO, has_switches, false), + DEFINE_PROP_BOOL("has-dbgctrl", MPS2FPGAIO, has_dbgctrl, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mps2_fpgaio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &mps2_fpgaio_vmstate; + dc->realize = mps2_fpgaio_realize; + dc->reset = mps2_fpgaio_reset; + device_class_set_props(dc, mps2_fpgaio_properties); +} + +static const TypeInfo mps2_fpgaio_info = { + .name = TYPE_MPS2_FPGAIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MPS2FPGAIO), + .instance_init = mps2_fpgaio_init, + .class_init = mps2_fpgaio_class_init, +}; + +static void mps2_fpgaio_register_types(void) +{ + type_register_static(&mps2_fpgaio_info); +} + +type_init(mps2_fpgaio_register_types); diff --git a/hw/misc/mps2-scc.c b/hw/misc/mps2-scc.c new file mode 100644 index 000000000..b3b42a792 --- /dev/null +++ b/hw/misc/mps2-scc.c @@ -0,0 +1,396 @@ +/* + * ARM MPS2 SCC emulation + * + * Copyright (c) 2017 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* This is a model of the SCC (Serial Communication Controller) + * found in the FPGA images of MPS2 development boards. + * + * Documentation of it can be found in the MPS2 TRM: + * https://developer.arm.com/documentation/100112/latest/ + * and also in the Application Notes documenting individual FPGA images. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/bitops.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/registerfields.h" +#include "hw/misc/mps2-scc.h" +#include "hw/misc/led.h" +#include "hw/qdev-properties.h" + +REG32(CFG0, 0) +REG32(CFG1, 4) +REG32(CFG2, 8) +REG32(CFG3, 0xc) +REG32(CFG4, 0x10) +REG32(CFG5, 0x14) +REG32(CFG6, 0x18) +REG32(CFGDATA_RTN, 0xa0) +REG32(CFGDATA_OUT, 0xa4) +REG32(CFGCTRL, 0xa8) + FIELD(CFGCTRL, DEVICE, 0, 12) + FIELD(CFGCTRL, RES1, 12, 8) + FIELD(CFGCTRL, FUNCTION, 20, 6) + FIELD(CFGCTRL, RES2, 26, 4) + FIELD(CFGCTRL, WRITE, 30, 1) + FIELD(CFGCTRL, START, 31, 1) +REG32(CFGSTAT, 0xac) + FIELD(CFGSTAT, DONE, 0, 1) + FIELD(CFGSTAT, ERROR, 1, 1) +REG32(DLL, 0x100) +REG32(AID, 0xFF8) +REG32(ID, 0xFFC) + +static int scc_partno(MPS2SCC *s) +{ + /* Return the partno field of the SCC_ID (0x524, 0x511, etc) */ + return extract32(s->id, 4, 8); +} + +/* Handle a write via the SYS_CFG channel to the specified function/device. + * Return false on error (reported to guest via SYS_CFGCTRL ERROR bit). + */ +static bool scc_cfg_write(MPS2SCC *s, unsigned function, + unsigned device, uint32_t value) +{ + trace_mps2_scc_cfg_write(function, device, value); + + if (function != 1 || device >= s->num_oscclk) { + qemu_log_mask(LOG_GUEST_ERROR, + "MPS2 SCC config write: bad function %d device %d\n", + function, device); + return false; + } + + s->oscclk[device] = value; + return true; +} + +/* Handle a read via the SYS_CFG channel to the specified function/device. + * Return false on error (reported to guest via SYS_CFGCTRL ERROR bit), + * or set *value on success. + */ +static bool scc_cfg_read(MPS2SCC *s, unsigned function, + unsigned device, uint32_t *value) +{ + if (function != 1 || device >= s->num_oscclk) { + qemu_log_mask(LOG_GUEST_ERROR, + "MPS2 SCC config read: bad function %d device %d\n", + function, device); + return false; + } + + *value = s->oscclk[device]; + + trace_mps2_scc_cfg_read(function, device, *value); + return true; +} + +static uint64_t mps2_scc_read(void *opaque, hwaddr offset, unsigned size) +{ + MPS2SCC *s = MPS2_SCC(opaque); + uint64_t r; + + switch (offset) { + case A_CFG0: + r = s->cfg0; + break; + case A_CFG1: + r = s->cfg1; + break; + case A_CFG2: + if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) { + /* CFG2 reserved on other boards */ + goto bad_offset; + } + r = s->cfg2; + break; + case A_CFG3: + if (scc_partno(s) == 0x524 && scc_partno(s) == 0x547) { + /* CFG3 reserved on AN524 */ + goto bad_offset; + } + /* These are user-settable DIP switches on the board. We don't + * model that, so just return zeroes. + */ + r = 0; + break; + case A_CFG4: + r = s->cfg4; + break; + case A_CFG5: + if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) { + /* CFG5 reserved on other boards */ + goto bad_offset; + } + r = s->cfg5; + break; + case A_CFG6: + if (scc_partno(s) != 0x524) { + /* CFG6 reserved on other boards */ + goto bad_offset; + } + r = s->cfg6; + break; + case A_CFGDATA_RTN: + r = s->cfgdata_rtn; + break; + case A_CFGDATA_OUT: + r = s->cfgdata_out; + break; + case A_CFGCTRL: + r = s->cfgctrl; + break; + case A_CFGSTAT: + r = s->cfgstat; + break; + case A_DLL: + r = s->dll; + break; + case A_AID: + r = s->aid; + break; + case A_ID: + r = s->id; + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "MPS2 SCC read: bad offset %x\n", (int) offset); + r = 0; + break; + } + + trace_mps2_scc_read(offset, r, size); + return r; +} + +static void mps2_scc_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + MPS2SCC *s = MPS2_SCC(opaque); + + trace_mps2_scc_write(offset, value, size); + + switch (offset) { + case A_CFG0: + /* + * On some boards bit 0 controls board-specific remapping; + * we always reflect bit 0 in the 'remap' GPIO output line, + * and let the board wire it up or not as it chooses. + * TODO on some boards bit 1 is CPU_WAIT. + */ + s->cfg0 = value; + qemu_set_irq(s->remap, s->cfg0 & 1); + break; + case A_CFG1: + s->cfg1 = value; + for (size_t i = 0; i < ARRAY_SIZE(s->led); i++) { + led_set_state(s->led[i], extract32(value, i, 1)); + } + break; + case A_CFG2: + if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) { + /* CFG2 reserved on other boards */ + goto bad_offset; + } + /* AN524: QSPI Select signal */ + s->cfg2 = value; + break; + case A_CFG5: + if (scc_partno(s) != 0x524 && scc_partno(s) != 0x547) { + /* CFG5 reserved on other boards */ + goto bad_offset; + } + /* AN524: ACLK frequency in Hz */ + s->cfg5 = value; + break; + case A_CFG6: + if (scc_partno(s) != 0x524) { + /* CFG6 reserved on other boards */ + goto bad_offset; + } + /* AN524: Clock divider for BRAM */ + s->cfg6 = value; + break; + case A_CFGDATA_OUT: + s->cfgdata_out = value; + break; + case A_CFGCTRL: + /* Writing to CFGCTRL clears SYS_CFGSTAT */ + s->cfgstat = 0; + s->cfgctrl = value & ~(R_CFGCTRL_RES1_MASK | + R_CFGCTRL_RES2_MASK | + R_CFGCTRL_START_MASK); + + if (value & R_CFGCTRL_START_MASK) { + /* Start bit set -- do a read or write (instantaneously) */ + int device = extract32(s->cfgctrl, R_CFGCTRL_DEVICE_SHIFT, + R_CFGCTRL_DEVICE_LENGTH); + int function = extract32(s->cfgctrl, R_CFGCTRL_FUNCTION_SHIFT, + R_CFGCTRL_FUNCTION_LENGTH); + + s->cfgstat = R_CFGSTAT_DONE_MASK; + if (s->cfgctrl & R_CFGCTRL_WRITE_MASK) { + if (!scc_cfg_write(s, function, device, s->cfgdata_out)) { + s->cfgstat |= R_CFGSTAT_ERROR_MASK; + } + } else { + uint32_t result; + if (!scc_cfg_read(s, function, device, &result)) { + s->cfgstat |= R_CFGSTAT_ERROR_MASK; + } else { + s->cfgdata_rtn = result; + } + } + } + break; + case A_DLL: + /* DLL stands for Digital Locked Loop. + * Bits [31:24] (DLL_LOCK_MASK) are writable, and indicate a + * mask of which of the DLL_LOCKED bits [16:23] should be ORed + * together to determine the ALL_UNMASKED_DLLS_LOCKED bit [0]. + * For QEMU, our DLLs are always locked, so we can leave bit 0 + * as 1 always and don't need to recalculate it. + */ + s->dll = deposit32(s->dll, 24, 8, extract32(value, 24, 8)); + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "MPS2 SCC write: bad offset 0x%x\n", (int) offset); + break; + } +} + +static const MemoryRegionOps mps2_scc_ops = { + .read = mps2_scc_read, + .write = mps2_scc_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void mps2_scc_reset(DeviceState *dev) +{ + MPS2SCC *s = MPS2_SCC(dev); + int i; + + trace_mps2_scc_reset(); + s->cfg0 = s->cfg0_reset; + s->cfg1 = 0; + s->cfg2 = 0; + s->cfg5 = 0; + s->cfg6 = 0; + s->cfgdata_rtn = 0; + s->cfgdata_out = 0; + s->cfgctrl = 0x100000; + s->cfgstat = 0; + s->dll = 0xffff0001; + for (i = 0; i < s->num_oscclk; i++) { + s->oscclk[i] = s->oscclk_reset[i]; + } + for (i = 0; i < ARRAY_SIZE(s->led); i++) { + device_cold_reset(DEVICE(s->led[i])); + } +} + +static void mps2_scc_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + MPS2SCC *s = MPS2_SCC(obj); + + memory_region_init_io(&s->iomem, obj, &mps2_scc_ops, s, "mps2-scc", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + qdev_init_gpio_out_named(DEVICE(obj), &s->remap, "remap", 1); +} + +static void mps2_scc_realize(DeviceState *dev, Error **errp) +{ + MPS2SCC *s = MPS2_SCC(dev); + + for (size_t i = 0; i < ARRAY_SIZE(s->led); i++) { + char *name = g_strdup_printf("SCC LED%zu", i); + s->led[i] = led_create_simple(OBJECT(dev), GPIO_POLARITY_ACTIVE_HIGH, + LED_COLOR_GREEN, name); + g_free(name); + } + + s->oscclk = g_new0(uint32_t, s->num_oscclk); +} + +static const VMStateDescription mps2_scc_vmstate = { + .name = "mps2-scc", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cfg0, MPS2SCC), + VMSTATE_UINT32(cfg1, MPS2SCC), + VMSTATE_UINT32(cfg2, MPS2SCC), + /* cfg3, cfg4 are read-only so need not be migrated */ + VMSTATE_UINT32(cfg5, MPS2SCC), + VMSTATE_UINT32(cfg6, MPS2SCC), + VMSTATE_UINT32(cfgdata_rtn, MPS2SCC), + VMSTATE_UINT32(cfgdata_out, MPS2SCC), + VMSTATE_UINT32(cfgctrl, MPS2SCC), + VMSTATE_UINT32(cfgstat, MPS2SCC), + VMSTATE_UINT32(dll, MPS2SCC), + VMSTATE_VARRAY_UINT32(oscclk, MPS2SCC, num_oscclk, + 0, vmstate_info_uint32, uint32_t), + VMSTATE_END_OF_LIST() + } +}; + +static Property mps2_scc_properties[] = { + /* Values for various read-only ID registers (which are specific + * to the board model or FPGA image) + */ + DEFINE_PROP_UINT32("scc-cfg4", MPS2SCC, cfg4, 0), + DEFINE_PROP_UINT32("scc-aid", MPS2SCC, aid, 0), + DEFINE_PROP_UINT32("scc-id", MPS2SCC, id, 0), + /* Reset value for CFG0 register */ + DEFINE_PROP_UINT32("scc-cfg0", MPS2SCC, cfg0_reset, 0), + /* + * These are the initial settings for the source clocks on the board. + * In hardware they can be configured via a config file read by the + * motherboard configuration controller to suit the FPGA image. + */ + DEFINE_PROP_ARRAY("oscclk", MPS2SCC, num_oscclk, oscclk_reset, + qdev_prop_uint32, uint32_t), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mps2_scc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mps2_scc_realize; + dc->vmsd = &mps2_scc_vmstate; + dc->reset = mps2_scc_reset; + device_class_set_props(dc, mps2_scc_properties); +} + +static const TypeInfo mps2_scc_info = { + .name = TYPE_MPS2_SCC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MPS2SCC), + .instance_init = mps2_scc_init, + .class_init = mps2_scc_class_init, +}; + +static void mps2_scc_register_types(void) +{ + type_register_static(&mps2_scc_info); +} + +type_init(mps2_scc_register_types); diff --git a/hw/misc/msf2-sysreg.c b/hw/misc/msf2-sysreg.c new file mode 100644 index 000000000..2dce55c36 --- /dev/null +++ b/hw/misc/msf2-sysreg.c @@ -0,0 +1,163 @@ +/* + * System Register block model of Microsemi SmartFusion2. + * + * Copyright (c) 2017 Subbaraya Sundeep + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/msf2-sysreg.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "trace.h" + +static inline int msf2_divbits(uint32_t div) +{ + int r = ctz32(div); + + return (div < 8) ? r : r + 1; +} + +static void msf2_sysreg_reset(DeviceState *d) +{ + MSF2SysregState *s = MSF2_SYSREG(d); + + s->regs[MSSDDR_PLL_STATUS_LOW_CR] = 0x021A2358; + s->regs[MSSDDR_PLL_STATUS] = 0x3; + s->regs[MSSDDR_FACC1_CR] = msf2_divbits(s->apb0div) << 5 | + msf2_divbits(s->apb1div) << 2; +} + +static uint64_t msf2_sysreg_read(void *opaque, hwaddr offset, + unsigned size) +{ + MSF2SysregState *s = opaque; + uint32_t ret = 0; + + offset >>= 2; + if (offset < ARRAY_SIZE(s->regs)) { + ret = s->regs[offset]; + trace_msf2_sysreg_read(offset << 2, ret); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%08" HWADDR_PRIx "\n", __func__, + offset << 2); + } + + return ret; +} + +static void msf2_sysreg_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + MSF2SysregState *s = opaque; + uint32_t newval = val; + + offset >>= 2; + + switch (offset) { + case MSSDDR_PLL_STATUS: + trace_msf2_sysreg_write_pll_status(); + break; + + case ESRAM_CR: + case DDR_CR: + case ENVM_REMAP_BASE_CR: + if (newval != s->regs[offset]) { + qemu_log_mask(LOG_UNIMP, + TYPE_MSF2_SYSREG": remapping not supported\n"); + } + break; + + default: + if (offset < ARRAY_SIZE(s->regs)) { + trace_msf2_sysreg_write(offset << 2, newval, s->regs[offset]); + s->regs[offset] = newval; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%08" HWADDR_PRIx "\n", __func__, + offset << 2); + } + break; + } +} + +static const MemoryRegionOps sysreg_ops = { + .read = msf2_sysreg_read, + .write = msf2_sysreg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void msf2_sysreg_init(Object *obj) +{ + MSF2SysregState *s = MSF2_SYSREG(obj); + + memory_region_init_io(&s->iomem, obj, &sysreg_ops, s, TYPE_MSF2_SYSREG, + MSF2_SYSREG_MMIO_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem); +} + +static const VMStateDescription vmstate_msf2_sysreg = { + .name = TYPE_MSF2_SYSREG, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, MSF2SysregState, MSF2_SYSREG_MMIO_SIZE / 4), + VMSTATE_END_OF_LIST() + } +}; + +static Property msf2_sysreg_properties[] = { + /* default divisors in Libero GUI */ + DEFINE_PROP_UINT8("apb0divisor", MSF2SysregState, apb0div, 2), + DEFINE_PROP_UINT8("apb1divisor", MSF2SysregState, apb1div, 2), + DEFINE_PROP_END_OF_LIST(), +}; + +static void msf2_sysreg_realize(DeviceState *dev, Error **errp) +{ + MSF2SysregState *s = MSF2_SYSREG(dev); + + if ((s->apb0div > 32 || !is_power_of_2(s->apb0div)) + || (s->apb1div > 32 || !is_power_of_2(s->apb1div))) { + error_setg(errp, "Invalid apb divisor value"); + error_append_hint(errp, "apb divisor must be a power of 2" + " and maximum value is 32\n"); + } +} + +static void msf2_sysreg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_msf2_sysreg; + dc->reset = msf2_sysreg_reset; + device_class_set_props(dc, msf2_sysreg_properties); + dc->realize = msf2_sysreg_realize; +} + +static const TypeInfo msf2_sysreg_info = { + .name = TYPE_MSF2_SYSREG, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = msf2_sysreg_class_init, + .instance_size = sizeof(MSF2SysregState), + .instance_init = msf2_sysreg_init, +}; + +static void msf2_sysreg_register_types(void) +{ + type_register_static(&msf2_sysreg_info); +} + +type_init(msf2_sysreg_register_types) diff --git a/hw/misc/mst_fpga.c b/hw/misc/mst_fpga.c new file mode 100644 index 000000000..2aaadfa96 --- /dev/null +++ b/hw/misc/mst_fpga.c @@ -0,0 +1,269 @@ +/* + * PXA270-based Intel Mainstone platforms. + * FPGA driver + * + * Copyright (c) 2007 by Armin Kuster or + * + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* Mainstone FPGA for extern irqs */ +#define FPGA_GPIO_PIN 0 +#define MST_NUM_IRQS 16 +#define MST_LEDDAT1 0x10 +#define MST_LEDDAT2 0x14 +#define MST_LEDCTRL 0x40 +#define MST_GPSWR 0x60 +#define MST_MSCWR1 0x80 +#define MST_MSCWR2 0x84 +#define MST_MSCWR3 0x88 +#define MST_MSCRD 0x90 +#define MST_INTMSKENA 0xc0 +#define MST_INTSETCLR 0xd0 +#define MST_PCMCIA0 0xe0 +#define MST_PCMCIA1 0xe4 + +#define MST_PCMCIAx_READY (1 << 10) +#define MST_PCMCIAx_nCD (1 << 5) + +#define MST_PCMCIA_CD0_IRQ 9 +#define MST_PCMCIA_CD1_IRQ 13 + +#define TYPE_MAINSTONE_FPGA "mainstone-fpga" +OBJECT_DECLARE_SIMPLE_TYPE(mst_irq_state, MAINSTONE_FPGA) + +struct mst_irq_state { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + qemu_irq parent; + + uint32_t prev_level; + uint32_t leddat1; + uint32_t leddat2; + uint32_t ledctrl; + uint32_t gpswr; + uint32_t mscwr1; + uint32_t mscwr2; + uint32_t mscwr3; + uint32_t mscrd; + uint32_t intmskena; + uint32_t intsetclr; + uint32_t pcmcia0; + uint32_t pcmcia1; +}; + +static void +mst_fpga_set_irq(void *opaque, int irq, int level) +{ + mst_irq_state *s = (mst_irq_state *)opaque; + uint32_t oldint = s->intsetclr & s->intmskena; + + if (level) + s->prev_level |= 1u << irq; + else + s->prev_level &= ~(1u << irq); + + switch(irq) { + case MST_PCMCIA_CD0_IRQ: + if (level) + s->pcmcia0 &= ~MST_PCMCIAx_nCD; + else + s->pcmcia0 |= MST_PCMCIAx_nCD; + break; + case MST_PCMCIA_CD1_IRQ: + if (level) + s->pcmcia1 &= ~MST_PCMCIAx_nCD; + else + s->pcmcia1 |= MST_PCMCIAx_nCD; + break; + } + + if ((s->intmskena & (1u << irq)) && level) + s->intsetclr |= 1u << irq; + + if (oldint != (s->intsetclr & s->intmskena)) + qemu_set_irq(s->parent, s->intsetclr & s->intmskena); +} + + +static uint64_t +mst_fpga_readb(void *opaque, hwaddr addr, unsigned size) +{ + mst_irq_state *s = (mst_irq_state *) opaque; + + switch (addr) { + case MST_LEDDAT1: + return s->leddat1; + case MST_LEDDAT2: + return s->leddat2; + case MST_LEDCTRL: + return s->ledctrl; + case MST_GPSWR: + return s->gpswr; + case MST_MSCWR1: + return s->mscwr1; + case MST_MSCWR2: + return s->mscwr2; + case MST_MSCWR3: + return s->mscwr3; + case MST_MSCRD: + return s->mscrd; + case MST_INTMSKENA: + return s->intmskena; + case MST_INTSETCLR: + return s->intsetclr; + case MST_PCMCIA0: + return s->pcmcia0; + case MST_PCMCIA1: + return s->pcmcia1; + default: + printf("Mainstone - mst_fpga_readb: Bad register offset " + "0x" TARGET_FMT_plx "\n", addr); + } + return 0; +} + +static void +mst_fpga_writeb(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + mst_irq_state *s = (mst_irq_state *) opaque; + value &= 0xffffffff; + + switch (addr) { + case MST_LEDDAT1: + s->leddat1 = value; + break; + case MST_LEDDAT2: + s->leddat2 = value; + break; + case MST_LEDCTRL: + s->ledctrl = value; + break; + case MST_GPSWR: + s->gpswr = value; + break; + case MST_MSCWR1: + s->mscwr1 = value; + break; + case MST_MSCWR2: + s->mscwr2 = value; + break; + case MST_MSCWR3: + s->mscwr3 = value; + break; + case MST_MSCRD: + s->mscrd = value; + break; + case MST_INTMSKENA: /* Mask interrupt */ + s->intmskena = (value & 0xFEEFF); + qemu_set_irq(s->parent, s->intsetclr & s->intmskena); + break; + case MST_INTSETCLR: /* clear or set interrupt */ + s->intsetclr = (value & 0xFEEFF); + qemu_set_irq(s->parent, s->intsetclr & s->intmskena); + break; + /* For PCMCIAx allow the to change only power and reset */ + case MST_PCMCIA0: + s->pcmcia0 = (value & 0x1f) | (s->pcmcia0 & ~0x1f); + break; + case MST_PCMCIA1: + s->pcmcia1 = (value & 0x1f) | (s->pcmcia1 & ~0x1f); + break; + default: + printf("Mainstone - mst_fpga_writeb: Bad register offset " + "0x" TARGET_FMT_plx "\n", addr); + } +} + +static const MemoryRegionOps mst_fpga_ops = { + .read = mst_fpga_readb, + .write = mst_fpga_writeb, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int mst_fpga_post_load(void *opaque, int version_id) +{ + mst_irq_state *s = (mst_irq_state *) opaque; + + qemu_set_irq(s->parent, s->intsetclr & s->intmskena); + return 0; +} + +static void mst_fpga_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + mst_irq_state *s = MAINSTONE_FPGA(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + s->pcmcia0 = MST_PCMCIAx_READY | MST_PCMCIAx_nCD; + s->pcmcia1 = MST_PCMCIAx_READY | MST_PCMCIAx_nCD; + + sysbus_init_irq(sbd, &s->parent); + + /* alloc the external 16 irqs */ + qdev_init_gpio_in(dev, mst_fpga_set_irq, MST_NUM_IRQS); + + memory_region_init_io(&s->iomem, obj, &mst_fpga_ops, s, + "fpga", 0x00100000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription vmstate_mst_fpga_regs = { + .name = "mainstone_fpga", + .version_id = 0, + .minimum_version_id = 0, + .post_load = mst_fpga_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(prev_level, mst_irq_state), + VMSTATE_UINT32(leddat1, mst_irq_state), + VMSTATE_UINT32(leddat2, mst_irq_state), + VMSTATE_UINT32(ledctrl, mst_irq_state), + VMSTATE_UINT32(gpswr, mst_irq_state), + VMSTATE_UINT32(mscwr1, mst_irq_state), + VMSTATE_UINT32(mscwr2, mst_irq_state), + VMSTATE_UINT32(mscwr3, mst_irq_state), + VMSTATE_UINT32(mscrd, mst_irq_state), + VMSTATE_UINT32(intmskena, mst_irq_state), + VMSTATE_UINT32(intsetclr, mst_irq_state), + VMSTATE_UINT32(pcmcia0, mst_irq_state), + VMSTATE_UINT32(pcmcia1, mst_irq_state), + VMSTATE_END_OF_LIST(), + }, +}; + +static void mst_fpga_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Mainstone II FPGA"; + dc->vmsd = &vmstate_mst_fpga_regs; +} + +static const TypeInfo mst_fpga_info = { + .name = TYPE_MAINSTONE_FPGA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mst_irq_state), + .instance_init = mst_fpga_init, + .class_init = mst_fpga_class_init, +}; + +static void mst_fpga_register_types(void) +{ + type_register_static(&mst_fpga_info); +} + +type_init(mst_fpga_register_types) diff --git a/hw/misc/npcm7xx_clk.c b/hw/misc/npcm7xx_clk.c new file mode 100644 index 000000000..0b61070c5 --- /dev/null +++ b/hw/misc/npcm7xx_clk.c @@ -0,0 +1,1095 @@ +/* + * Nuvoton NPCM7xx Clock Control Registers. + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/misc/npcm7xx_clk.h" +#include "hw/timer/npcm7xx_timer.h" +#include "hw/qdev-clock.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qemu/units.h" +#include "trace.h" +#include "sysemu/watchdog.h" + +/* + * The reference clock hz, and the SECCNT and CNTR25M registers in this module, + * is always 25 MHz. + */ +#define NPCM7XX_CLOCK_REF_HZ (25000000) + +/* Register Field Definitions */ +#define NPCM7XX_CLK_WDRCR_CA9C BIT(0) /* Cortex-A9 Cores */ + +#define PLLCON_LOKI BIT(31) +#define PLLCON_LOKS BIT(30) +#define PLLCON_PWDEN BIT(12) +#define PLLCON_FBDV(con) extract32((con), 16, 12) +#define PLLCON_OTDV2(con) extract32((con), 13, 3) +#define PLLCON_OTDV1(con) extract32((con), 8, 3) +#define PLLCON_INDV(con) extract32((con), 0, 6) + +enum NPCM7xxCLKRegisters { + NPCM7XX_CLK_CLKEN1, + NPCM7XX_CLK_CLKSEL, + NPCM7XX_CLK_CLKDIV1, + NPCM7XX_CLK_PLLCON0, + NPCM7XX_CLK_PLLCON1, + NPCM7XX_CLK_SWRSTR, + NPCM7XX_CLK_IPSRST1 = 0x20 / sizeof(uint32_t), + NPCM7XX_CLK_IPSRST2, + NPCM7XX_CLK_CLKEN2, + NPCM7XX_CLK_CLKDIV2, + NPCM7XX_CLK_CLKEN3, + NPCM7XX_CLK_IPSRST3, + NPCM7XX_CLK_WD0RCR, + NPCM7XX_CLK_WD1RCR, + NPCM7XX_CLK_WD2RCR, + NPCM7XX_CLK_SWRSTC1, + NPCM7XX_CLK_SWRSTC2, + NPCM7XX_CLK_SWRSTC3, + NPCM7XX_CLK_SWRSTC4, + NPCM7XX_CLK_PLLCON2, + NPCM7XX_CLK_CLKDIV3, + NPCM7XX_CLK_CORSTC, + NPCM7XX_CLK_PLLCONG, + NPCM7XX_CLK_AHBCKFI, + NPCM7XX_CLK_SECCNT, + NPCM7XX_CLK_CNTR25M, + NPCM7XX_CLK_REGS_END, +}; + +/* + * These reset values were taken from version 0.91 of the NPCM750R data sheet. + * + * All are loaded on power-up reset. CLKENx and SWRSTR should also be loaded on + * core domain reset, but this reset type is not yet supported by QEMU. + */ +static const uint32_t cold_reset_values[NPCM7XX_CLK_NR_REGS] = { + [NPCM7XX_CLK_CLKEN1] = 0xffffffff, + [NPCM7XX_CLK_CLKSEL] = 0x004aaaaa, + [NPCM7XX_CLK_CLKDIV1] = 0x5413f855, + [NPCM7XX_CLK_PLLCON0] = 0x00222101 | PLLCON_LOKI, + [NPCM7XX_CLK_PLLCON1] = 0x00202101 | PLLCON_LOKI, + [NPCM7XX_CLK_IPSRST1] = 0x00001000, + [NPCM7XX_CLK_IPSRST2] = 0x80000000, + [NPCM7XX_CLK_CLKEN2] = 0xffffffff, + [NPCM7XX_CLK_CLKDIV2] = 0xaa4f8f9f, + [NPCM7XX_CLK_CLKEN3] = 0xffffffff, + [NPCM7XX_CLK_IPSRST3] = 0x03000000, + [NPCM7XX_CLK_WD0RCR] = 0xffffffff, + [NPCM7XX_CLK_WD1RCR] = 0xffffffff, + [NPCM7XX_CLK_WD2RCR] = 0xffffffff, + [NPCM7XX_CLK_SWRSTC1] = 0x00000003, + [NPCM7XX_CLK_PLLCON2] = 0x00c02105 | PLLCON_LOKI, + [NPCM7XX_CLK_CORSTC] = 0x04000003, + [NPCM7XX_CLK_PLLCONG] = 0x01228606 | PLLCON_LOKI, + [NPCM7XX_CLK_AHBCKFI] = 0x000000c8, +}; + +/* The number of watchdogs that can trigger a reset. */ +#define NPCM7XX_NR_WATCHDOGS (3) + +/* Clock converter functions */ + +#define TYPE_NPCM7XX_CLOCK_PLL "npcm7xx-clock-pll" +#define NPCM7XX_CLOCK_PLL(obj) OBJECT_CHECK(NPCM7xxClockPLLState, \ + (obj), TYPE_NPCM7XX_CLOCK_PLL) +#define TYPE_NPCM7XX_CLOCK_SEL "npcm7xx-clock-sel" +#define NPCM7XX_CLOCK_SEL(obj) OBJECT_CHECK(NPCM7xxClockSELState, \ + (obj), TYPE_NPCM7XX_CLOCK_SEL) +#define TYPE_NPCM7XX_CLOCK_DIVIDER "npcm7xx-clock-divider" +#define NPCM7XX_CLOCK_DIVIDER(obj) OBJECT_CHECK(NPCM7xxClockDividerState, \ + (obj), TYPE_NPCM7XX_CLOCK_DIVIDER) + +static void npcm7xx_clk_update_pll(void *opaque) +{ + NPCM7xxClockPLLState *s = opaque; + uint32_t con = s->clk->regs[s->reg]; + uint64_t freq; + + /* The PLL is grounded if it is not locked yet. */ + if (con & PLLCON_LOKI) { + freq = clock_get_hz(s->clock_in); + freq *= PLLCON_FBDV(con); + freq /= PLLCON_INDV(con) * PLLCON_OTDV1(con) * PLLCON_OTDV2(con); + } else { + freq = 0; + } + + clock_update_hz(s->clock_out, freq); +} + +static void npcm7xx_clk_update_sel(void *opaque) +{ + NPCM7xxClockSELState *s = opaque; + uint32_t index = extract32(s->clk->regs[NPCM7XX_CLK_CLKSEL], s->offset, + s->len); + + if (index >= s->input_size) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: SEL index: %u out of range\n", + __func__, index); + index = 0; + } + clock_update_hz(s->clock_out, clock_get_hz(s->clock_in[index])); +} + +static void npcm7xx_clk_update_divider(void *opaque) +{ + NPCM7xxClockDividerState *s = opaque; + uint32_t freq; + + freq = s->divide(s); + clock_update_hz(s->clock_out, freq); +} + +static uint32_t divide_by_constant(NPCM7xxClockDividerState *s) +{ + return clock_get_hz(s->clock_in) / s->divisor; +} + +static uint32_t divide_by_reg_divisor(NPCM7xxClockDividerState *s) +{ + return clock_get_hz(s->clock_in) / + (extract32(s->clk->regs[s->reg], s->offset, s->len) + 1); +} + +static uint32_t divide_by_reg_divisor_times_2(NPCM7xxClockDividerState *s) +{ + return divide_by_reg_divisor(s) / 2; +} + +static uint32_t shift_by_reg_divisor(NPCM7xxClockDividerState *s) +{ + return clock_get_hz(s->clock_in) >> + extract32(s->clk->regs[s->reg], s->offset, s->len); +} + +static NPCM7xxClockPLL find_pll_by_reg(enum NPCM7xxCLKRegisters reg) +{ + switch (reg) { + case NPCM7XX_CLK_PLLCON0: + return NPCM7XX_CLOCK_PLL0; + case NPCM7XX_CLK_PLLCON1: + return NPCM7XX_CLOCK_PLL1; + case NPCM7XX_CLK_PLLCON2: + return NPCM7XX_CLOCK_PLL2; + case NPCM7XX_CLK_PLLCONG: + return NPCM7XX_CLOCK_PLLG; + default: + g_assert_not_reached(); + } +} + +static void npcm7xx_clk_update_all_plls(NPCM7xxCLKState *clk) +{ + int i; + + for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { + npcm7xx_clk_update_pll(&clk->plls[i]); + } +} + +static void npcm7xx_clk_update_all_sels(NPCM7xxCLKState *clk) +{ + int i; + + for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { + npcm7xx_clk_update_sel(&clk->sels[i]); + } +} + +static void npcm7xx_clk_update_all_dividers(NPCM7xxCLKState *clk) +{ + int i; + + for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { + npcm7xx_clk_update_divider(&clk->dividers[i]); + } +} + +static void npcm7xx_clk_update_all_clocks(NPCM7xxCLKState *clk) +{ + clock_update_hz(clk->clkref, NPCM7XX_CLOCK_REF_HZ); + npcm7xx_clk_update_all_plls(clk); + npcm7xx_clk_update_all_sels(clk); + npcm7xx_clk_update_all_dividers(clk); +} + +/* Types of clock sources. */ +typedef enum ClockSrcType { + CLKSRC_REF, + CLKSRC_PLL, + CLKSRC_SEL, + CLKSRC_DIV, +} ClockSrcType; + +typedef struct PLLInitInfo { + const char *name; + ClockSrcType src_type; + int src_index; + int reg; + const char *public_name; +} PLLInitInfo; + +typedef struct SELInitInfo { + const char *name; + uint8_t input_size; + ClockSrcType src_type[NPCM7XX_CLK_SEL_MAX_INPUT]; + int src_index[NPCM7XX_CLK_SEL_MAX_INPUT]; + int offset; + int len; + const char *public_name; +} SELInitInfo; + +typedef struct DividerInitInfo { + const char *name; + ClockSrcType src_type; + int src_index; + uint32_t (*divide)(NPCM7xxClockDividerState *s); + int reg; /* not used when type == CONSTANT */ + int offset; /* not used when type == CONSTANT */ + int len; /* not used when type == CONSTANT */ + int divisor; /* used only when type == CONSTANT */ + const char *public_name; +} DividerInitInfo; + +static const PLLInitInfo pll_init_info_list[] = { + [NPCM7XX_CLOCK_PLL0] = { + .name = "pll0", + .src_type = CLKSRC_REF, + .reg = NPCM7XX_CLK_PLLCON0, + }, + [NPCM7XX_CLOCK_PLL1] = { + .name = "pll1", + .src_type = CLKSRC_REF, + .reg = NPCM7XX_CLK_PLLCON1, + }, + [NPCM7XX_CLOCK_PLL2] = { + .name = "pll2", + .src_type = CLKSRC_REF, + .reg = NPCM7XX_CLK_PLLCON2, + }, + [NPCM7XX_CLOCK_PLLG] = { + .name = "pllg", + .src_type = CLKSRC_REF, + .reg = NPCM7XX_CLK_PLLCONG, + }, +}; + +static const SELInitInfo sel_init_info_list[] = { + [NPCM7XX_CLOCK_PIXCKSEL] = { + .name = "pixcksel", + .input_size = 2, + .src_type = {CLKSRC_PLL, CLKSRC_REF}, + .src_index = {NPCM7XX_CLOCK_PLLG, 0}, + .offset = 5, + .len = 1, + .public_name = "pixel-clock", + }, + [NPCM7XX_CLOCK_MCCKSEL] = { + .name = "mccksel", + .input_size = 4, + .src_type = {CLKSRC_DIV, CLKSRC_REF, CLKSRC_REF, + /*MCBPCK, shouldn't be used in normal operation*/ + CLKSRC_REF}, + .src_index = {NPCM7XX_CLOCK_PLL1D2, 0, 0, 0}, + .offset = 12, + .len = 2, + .public_name = "mc-phy-clock", + }, + [NPCM7XX_CLOCK_CPUCKSEL] = { + .name = "cpucksel", + .input_size = 4, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, + /*SYSBPCK, shouldn't be used in normal operation*/ + CLKSRC_REF}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, 0}, + .offset = 0, + .len = 2, + .public_name = "system-clock", + }, + [NPCM7XX_CLOCK_CLKOUTSEL] = { + .name = "clkoutsel", + .input_size = 5, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, + CLKSRC_PLL, CLKSRC_DIV}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, + NPCM7XX_CLOCK_PLLG, NPCM7XX_CLOCK_PLL2D2}, + .offset = 18, + .len = 3, + .public_name = "tock", + }, + [NPCM7XX_CLOCK_UARTCKSEL] = { + .name = "uartcksel", + .input_size = 4, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, + NPCM7XX_CLOCK_PLL2D2}, + .offset = 8, + .len = 2, + }, + [NPCM7XX_CLOCK_TIMCKSEL] = { + .name = "timcksel", + .input_size = 4, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, + NPCM7XX_CLOCK_PLL2D2}, + .offset = 14, + .len = 2, + }, + [NPCM7XX_CLOCK_SDCKSEL] = { + .name = "sdcksel", + .input_size = 4, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, + NPCM7XX_CLOCK_PLL2D2}, + .offset = 6, + .len = 2, + }, + [NPCM7XX_CLOCK_GFXMSEL] = { + .name = "gfxmksel", + .input_size = 2, + .src_type = {CLKSRC_REF, CLKSRC_PLL}, + .src_index = {0, NPCM7XX_CLOCK_PLL2}, + .offset = 21, + .len = 1, + }, + [NPCM7XX_CLOCK_SUCKSEL] = { + .name = "sucksel", + .input_size = 4, + .src_type = {CLKSRC_PLL, CLKSRC_DIV, CLKSRC_REF, CLKSRC_DIV}, + .src_index = {NPCM7XX_CLOCK_PLL0, NPCM7XX_CLOCK_PLL1D2, 0, + NPCM7XX_CLOCK_PLL2D2}, + .offset = 10, + .len = 2, + }, +}; + +static const DividerInitInfo divider_init_info_list[] = { + [NPCM7XX_CLOCK_PLL1D2] = { + .name = "pll1d2", + .src_type = CLKSRC_PLL, + .src_index = NPCM7XX_CLOCK_PLL1, + .divide = divide_by_constant, + .divisor = 2, + }, + [NPCM7XX_CLOCK_PLL2D2] = { + .name = "pll2d2", + .src_type = CLKSRC_PLL, + .src_index = NPCM7XX_CLOCK_PLL2, + .divide = divide_by_constant, + .divisor = 2, + }, + [NPCM7XX_CLOCK_MC_DIVIDER] = { + .name = "mc-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_MCCKSEL, + .divide = divide_by_constant, + .divisor = 2, + .public_name = "mc-clock" + }, + [NPCM7XX_CLOCK_AXI_DIVIDER] = { + .name = "axi-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_CPUCKSEL, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 0, + .len = 1, + .public_name = "clk2" + }, + [NPCM7XX_CLOCK_AHB_DIVIDER] = { + .name = "ahb-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AXI_DIVIDER, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 26, + .len = 2, + .public_name = "clk4" + }, + [NPCM7XX_CLOCK_AHB3_DIVIDER] = { + .name = "ahb3-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 6, + .len = 5, + .public_name = "ahb3-spi3-clock" + }, + [NPCM7XX_CLOCK_SPI0_DIVIDER] = { + .name = "spi0-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV3, + .offset = 6, + .len = 5, + .public_name = "spi0-clock", + }, + [NPCM7XX_CLOCK_SPIX_DIVIDER] = { + .name = "spix-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV3, + .offset = 1, + .len = 5, + .public_name = "spix-clock", + }, + [NPCM7XX_CLOCK_APB1_DIVIDER] = { + .name = "apb1-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 24, + .len = 2, + .public_name = "apb1-clock", + }, + [NPCM7XX_CLOCK_APB2_DIVIDER] = { + .name = "apb2-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 26, + .len = 2, + .public_name = "apb2-clock", + }, + [NPCM7XX_CLOCK_APB3_DIVIDER] = { + .name = "apb3-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 28, + .len = 2, + .public_name = "apb3-clock", + }, + [NPCM7XX_CLOCK_APB4_DIVIDER] = { + .name = "apb4-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 30, + .len = 2, + .public_name = "apb4-clock", + }, + [NPCM7XX_CLOCK_APB5_DIVIDER] = { + .name = "apb5-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_AHB_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 22, + .len = 2, + .public_name = "apb5-clock", + }, + [NPCM7XX_CLOCK_CLKOUT_DIVIDER] = { + .name = "clkout-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_CLKOUTSEL, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 16, + .len = 5, + .public_name = "clkout", + }, + [NPCM7XX_CLOCK_UART_DIVIDER] = { + .name = "uart-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_UARTCKSEL, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 16, + .len = 5, + .public_name = "uart-clock", + }, + [NPCM7XX_CLOCK_TIMER_DIVIDER] = { + .name = "timer-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_TIMCKSEL, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 21, + .len = 5, + .public_name = "timer-clock", + }, + [NPCM7XX_CLOCK_ADC_DIVIDER] = { + .name = "adc-divider", + .src_type = CLKSRC_DIV, + .src_index = NPCM7XX_CLOCK_TIMER_DIVIDER, + .divide = shift_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 28, + .len = 3, + .public_name = "adc-clock", + }, + [NPCM7XX_CLOCK_MMC_DIVIDER] = { + .name = "mmc-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_SDCKSEL, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV1, + .offset = 11, + .len = 5, + .public_name = "mmc-clock", + }, + [NPCM7XX_CLOCK_SDHC_DIVIDER] = { + .name = "sdhc-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_SDCKSEL, + .divide = divide_by_reg_divisor_times_2, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 0, + .len = 4, + .public_name = "sdhc-clock", + }, + [NPCM7XX_CLOCK_GFXM_DIVIDER] = { + .name = "gfxm-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_GFXMSEL, + .divide = divide_by_constant, + .divisor = 3, + .public_name = "gfxm-clock", + }, + [NPCM7XX_CLOCK_UTMI_DIVIDER] = { + .name = "utmi-divider", + .src_type = CLKSRC_SEL, + .src_index = NPCM7XX_CLOCK_SUCKSEL, + .divide = divide_by_reg_divisor, + .reg = NPCM7XX_CLK_CLKDIV2, + .offset = 8, + .len = 5, + .public_name = "utmi-clock", + }, +}; + +static void npcm7xx_clk_update_pll_cb(void *opaque, ClockEvent event) +{ + npcm7xx_clk_update_pll(opaque); +} + +static void npcm7xx_clk_pll_init(Object *obj) +{ + NPCM7xxClockPLLState *pll = NPCM7XX_CLOCK_PLL(obj); + + pll->clock_in = qdev_init_clock_in(DEVICE(pll), "clock-in", + npcm7xx_clk_update_pll_cb, pll, + ClockUpdate); + pll->clock_out = qdev_init_clock_out(DEVICE(pll), "clock-out"); +} + +static void npcm7xx_clk_update_sel_cb(void *opaque, ClockEvent event) +{ + npcm7xx_clk_update_sel(opaque); +} + +static void npcm7xx_clk_sel_init(Object *obj) +{ + int i; + NPCM7xxClockSELState *sel = NPCM7XX_CLOCK_SEL(obj); + + for (i = 0; i < NPCM7XX_CLK_SEL_MAX_INPUT; ++i) { + sel->clock_in[i] = qdev_init_clock_in(DEVICE(sel), + g_strdup_printf("clock-in[%d]", i), + npcm7xx_clk_update_sel_cb, sel, ClockUpdate); + } + sel->clock_out = qdev_init_clock_out(DEVICE(sel), "clock-out"); +} + +static void npcm7xx_clk_update_divider_cb(void *opaque, ClockEvent event) +{ + npcm7xx_clk_update_divider(opaque); +} + +static void npcm7xx_clk_divider_init(Object *obj) +{ + NPCM7xxClockDividerState *div = NPCM7XX_CLOCK_DIVIDER(obj); + + div->clock_in = qdev_init_clock_in(DEVICE(div), "clock-in", + npcm7xx_clk_update_divider_cb, + div, ClockUpdate); + div->clock_out = qdev_init_clock_out(DEVICE(div), "clock-out"); +} + +static void npcm7xx_init_clock_pll(NPCM7xxClockPLLState *pll, + NPCM7xxCLKState *clk, const PLLInitInfo *init_info) +{ + pll->name = init_info->name; + pll->clk = clk; + pll->reg = init_info->reg; + if (init_info->public_name != NULL) { + qdev_alias_clock(DEVICE(pll), "clock-out", DEVICE(clk), + init_info->public_name); + } +} + +static void npcm7xx_init_clock_sel(NPCM7xxClockSELState *sel, + NPCM7xxCLKState *clk, const SELInitInfo *init_info) +{ + int input_size = init_info->input_size; + + sel->name = init_info->name; + sel->clk = clk; + sel->input_size = init_info->input_size; + g_assert(input_size <= NPCM7XX_CLK_SEL_MAX_INPUT); + sel->offset = init_info->offset; + sel->len = init_info->len; + if (init_info->public_name != NULL) { + qdev_alias_clock(DEVICE(sel), "clock-out", DEVICE(clk), + init_info->public_name); + } +} + +static void npcm7xx_init_clock_divider(NPCM7xxClockDividerState *div, + NPCM7xxCLKState *clk, const DividerInitInfo *init_info) +{ + div->name = init_info->name; + div->clk = clk; + + div->divide = init_info->divide; + if (div->divide == divide_by_constant) { + div->divisor = init_info->divisor; + } else { + div->reg = init_info->reg; + div->offset = init_info->offset; + div->len = init_info->len; + } + if (init_info->public_name != NULL) { + qdev_alias_clock(DEVICE(div), "clock-out", DEVICE(clk), + init_info->public_name); + } +} + +static Clock *npcm7xx_get_clock(NPCM7xxCLKState *clk, ClockSrcType type, + int index) +{ + switch (type) { + case CLKSRC_REF: + return clk->clkref; + case CLKSRC_PLL: + return clk->plls[index].clock_out; + case CLKSRC_SEL: + return clk->sels[index].clock_out; + case CLKSRC_DIV: + return clk->dividers[index].clock_out; + default: + g_assert_not_reached(); + } +} + +static void npcm7xx_connect_clocks(NPCM7xxCLKState *clk) +{ + int i, j; + Clock *src; + + for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { + src = npcm7xx_get_clock(clk, pll_init_info_list[i].src_type, + pll_init_info_list[i].src_index); + clock_set_source(clk->plls[i].clock_in, src); + } + for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { + for (j = 0; j < sel_init_info_list[i].input_size; ++j) { + src = npcm7xx_get_clock(clk, sel_init_info_list[i].src_type[j], + sel_init_info_list[i].src_index[j]); + clock_set_source(clk->sels[i].clock_in[j], src); + } + } + for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { + src = npcm7xx_get_clock(clk, divider_init_info_list[i].src_type, + divider_init_info_list[i].src_index); + clock_set_source(clk->dividers[i].clock_in, src); + } +} + +static uint64_t npcm7xx_clk_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t reg = offset / sizeof(uint32_t); + NPCM7xxCLKState *s = opaque; + int64_t now_ns; + uint32_t value = 0; + + if (reg >= NPCM7XX_CLK_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%04" HWADDR_PRIx " out of range\n", + __func__, offset); + return 0; + } + + switch (reg) { + case NPCM7XX_CLK_SWRSTR: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: register @ 0x%04" HWADDR_PRIx " is write-only\n", + __func__, offset); + break; + + case NPCM7XX_CLK_SECCNT: + now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + value = (now_ns - s->ref_ns) / NANOSECONDS_PER_SECOND; + break; + + case NPCM7XX_CLK_CNTR25M: + now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + /* + * This register counts 25 MHz cycles, updating every 640 ns. It rolls + * over to zero every second. + * + * The 4 LSBs are always zero: (1e9 / 640) << 4 = 25000000. + */ + value = (((now_ns - s->ref_ns) / 640) << 4) % NPCM7XX_CLOCK_REF_HZ; + break; + + default: + value = s->regs[reg]; + break; + }; + + trace_npcm7xx_clk_read(offset, value); + + return value; +} + +static void npcm7xx_clk_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + uint32_t reg = offset / sizeof(uint32_t); + NPCM7xxCLKState *s = opaque; + uint32_t value = v; + + trace_npcm7xx_clk_write(offset, value); + + if (reg >= NPCM7XX_CLK_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%04" HWADDR_PRIx " out of range\n", + __func__, offset); + return; + } + + switch (reg) { + case NPCM7XX_CLK_SWRSTR: + qemu_log_mask(LOG_UNIMP, "%s: SW reset not implemented: 0x%02x\n", + __func__, value); + value = 0; + break; + + case NPCM7XX_CLK_PLLCON0: + case NPCM7XX_CLK_PLLCON1: + case NPCM7XX_CLK_PLLCON2: + case NPCM7XX_CLK_PLLCONG: + if (value & PLLCON_PWDEN) { + /* Power down -- clear lock and indicate loss of lock */ + value &= ~PLLCON_LOKI; + value |= PLLCON_LOKS; + } else { + /* Normal mode -- assume always locked */ + value |= PLLCON_LOKI; + /* Keep LOKS unchanged unless cleared by writing 1 */ + if (value & PLLCON_LOKS) { + value &= ~PLLCON_LOKS; + } else { + value |= (value & PLLCON_LOKS); + } + } + /* Only update PLL when it is locked. */ + if (value & PLLCON_LOKI) { + npcm7xx_clk_update_pll(&s->plls[find_pll_by_reg(reg)]); + } + break; + + case NPCM7XX_CLK_CLKSEL: + npcm7xx_clk_update_all_sels(s); + break; + + case NPCM7XX_CLK_CLKDIV1: + case NPCM7XX_CLK_CLKDIV2: + case NPCM7XX_CLK_CLKDIV3: + npcm7xx_clk_update_all_dividers(s); + break; + + case NPCM7XX_CLK_CNTR25M: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n", + __func__, offset); + return; + } + + s->regs[reg] = value; +} + +/* Perform reset action triggered by a watchdog */ +static void npcm7xx_clk_perform_watchdog_reset(void *opaque, int n, + int level) +{ + NPCM7xxCLKState *clk = NPCM7XX_CLK(opaque); + uint32_t rcr; + + g_assert(n >= 0 && n <= NPCM7XX_NR_WATCHDOGS); + rcr = clk->regs[NPCM7XX_CLK_WD0RCR + n]; + if (rcr & NPCM7XX_CLK_WDRCR_CA9C) { + watchdog_perform_action(); + } else { + qemu_log_mask(LOG_UNIMP, + "%s: only CPU reset is implemented. (requested 0x%" PRIx32")\n", + __func__, rcr); + } +} + +static const struct MemoryRegionOps npcm7xx_clk_ops = { + .read = npcm7xx_clk_read, + .write = npcm7xx_clk_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm7xx_clk_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxCLKState *s = NPCM7XX_CLK(obj); + + QEMU_BUILD_BUG_ON(sizeof(s->regs) != sizeof(cold_reset_values)); + + switch (type) { + case RESET_TYPE_COLD: + memcpy(s->regs, cold_reset_values, sizeof(cold_reset_values)); + s->ref_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + npcm7xx_clk_update_all_clocks(s); + return; + } + + /* + * A small number of registers need to be reset on a core domain reset, + * but no such reset type exists yet. + */ + qemu_log_mask(LOG_UNIMP, "%s: reset type %d not implemented.", + __func__, type); +} + +static void npcm7xx_clk_init_clock_hierarchy(NPCM7xxCLKState *s) +{ + int i; + + s->clkref = qdev_init_clock_in(DEVICE(s), "clkref", NULL, NULL, 0); + + /* First pass: init all converter modules */ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(pll_init_info_list) != NPCM7XX_CLOCK_NR_PLLS); + QEMU_BUILD_BUG_ON(ARRAY_SIZE(sel_init_info_list) != NPCM7XX_CLOCK_NR_SELS); + QEMU_BUILD_BUG_ON(ARRAY_SIZE(divider_init_info_list) + != NPCM7XX_CLOCK_NR_DIVIDERS); + for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { + object_initialize_child(OBJECT(s), pll_init_info_list[i].name, + &s->plls[i], TYPE_NPCM7XX_CLOCK_PLL); + npcm7xx_init_clock_pll(&s->plls[i], s, + &pll_init_info_list[i]); + } + for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { + object_initialize_child(OBJECT(s), sel_init_info_list[i].name, + &s->sels[i], TYPE_NPCM7XX_CLOCK_SEL); + npcm7xx_init_clock_sel(&s->sels[i], s, + &sel_init_info_list[i]); + } + for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { + object_initialize_child(OBJECT(s), divider_init_info_list[i].name, + &s->dividers[i], TYPE_NPCM7XX_CLOCK_DIVIDER); + npcm7xx_init_clock_divider(&s->dividers[i], s, + ÷r_init_info_list[i]); + } + + /* Second pass: connect converter modules */ + npcm7xx_connect_clocks(s); + + clock_update_hz(s->clkref, NPCM7XX_CLOCK_REF_HZ); +} + +static void npcm7xx_clk_init(Object *obj) +{ + NPCM7xxCLKState *s = NPCM7XX_CLK(obj); + + memory_region_init_io(&s->iomem, obj, &npcm7xx_clk_ops, s, + TYPE_NPCM7XX_CLK, 4 * KiB); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static int npcm7xx_clk_post_load(void *opaque, int version_id) +{ + if (version_id >= 1) { + NPCM7xxCLKState *clk = opaque; + + npcm7xx_clk_update_all_clocks(clk); + } + + return 0; +} + +static void npcm7xx_clk_realize(DeviceState *dev, Error **errp) +{ + int i; + NPCM7xxCLKState *s = NPCM7XX_CLK(dev); + + qdev_init_gpio_in_named(DEVICE(s), npcm7xx_clk_perform_watchdog_reset, + NPCM7XX_WATCHDOG_RESET_GPIO_IN, NPCM7XX_NR_WATCHDOGS); + npcm7xx_clk_init_clock_hierarchy(s); + + /* Realize child devices */ + for (i = 0; i < NPCM7XX_CLOCK_NR_PLLS; ++i) { + if (!qdev_realize(DEVICE(&s->plls[i]), NULL, errp)) { + return; + } + } + for (i = 0; i < NPCM7XX_CLOCK_NR_SELS; ++i) { + if (!qdev_realize(DEVICE(&s->sels[i]), NULL, errp)) { + return; + } + } + for (i = 0; i < NPCM7XX_CLOCK_NR_DIVIDERS; ++i) { + if (!qdev_realize(DEVICE(&s->dividers[i]), NULL, errp)) { + return; + } + } +} + +static const VMStateDescription vmstate_npcm7xx_clk_pll = { + .name = "npcm7xx-clock-pll", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(clock_in, NPCM7xxClockPLLState), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_npcm7xx_clk_sel = { + .name = "npcm7xx-clock-sel", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(clock_in, NPCM7xxClockSELState, + NPCM7XX_CLK_SEL_MAX_INPUT, 0, vmstate_clock, Clock), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_npcm7xx_clk_divider = { + .name = "npcm7xx-clock-divider", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(clock_in, NPCM7xxClockDividerState), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_npcm7xx_clk = { + .name = "npcm7xx-clk", + .version_id = 1, + .minimum_version_id = 1, + .post_load = npcm7xx_clk_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, NPCM7xxCLKState, NPCM7XX_CLK_NR_REGS), + VMSTATE_INT64(ref_ns, NPCM7xxCLKState), + VMSTATE_CLOCK(clkref, NPCM7xxCLKState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_clk_pll_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx Clock PLL Module"; + dc->vmsd = &vmstate_npcm7xx_clk_pll; +} + +static void npcm7xx_clk_sel_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx Clock SEL Module"; + dc->vmsd = &vmstate_npcm7xx_clk_sel; +} + +static void npcm7xx_clk_divider_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx Clock Divider Module"; + dc->vmsd = &vmstate_npcm7xx_clk_divider; +} + +static void npcm7xx_clk_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + QEMU_BUILD_BUG_ON(NPCM7XX_CLK_REGS_END > NPCM7XX_CLK_NR_REGS); + + dc->desc = "NPCM7xx Clock Control Registers"; + dc->vmsd = &vmstate_npcm7xx_clk; + dc->realize = npcm7xx_clk_realize; + rc->phases.enter = npcm7xx_clk_enter_reset; +} + +static const TypeInfo npcm7xx_clk_pll_info = { + .name = TYPE_NPCM7XX_CLOCK_PLL, + .parent = TYPE_DEVICE, + .instance_size = sizeof(NPCM7xxClockPLLState), + .instance_init = npcm7xx_clk_pll_init, + .class_init = npcm7xx_clk_pll_class_init, +}; + +static const TypeInfo npcm7xx_clk_sel_info = { + .name = TYPE_NPCM7XX_CLOCK_SEL, + .parent = TYPE_DEVICE, + .instance_size = sizeof(NPCM7xxClockSELState), + .instance_init = npcm7xx_clk_sel_init, + .class_init = npcm7xx_clk_sel_class_init, +}; + +static const TypeInfo npcm7xx_clk_divider_info = { + .name = TYPE_NPCM7XX_CLOCK_DIVIDER, + .parent = TYPE_DEVICE, + .instance_size = sizeof(NPCM7xxClockDividerState), + .instance_init = npcm7xx_clk_divider_init, + .class_init = npcm7xx_clk_divider_class_init, +}; + +static const TypeInfo npcm7xx_clk_info = { + .name = TYPE_NPCM7XX_CLK, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxCLKState), + .instance_init = npcm7xx_clk_init, + .class_init = npcm7xx_clk_class_init, +}; + +static void npcm7xx_clk_register_type(void) +{ + type_register_static(&npcm7xx_clk_pll_info); + type_register_static(&npcm7xx_clk_sel_info); + type_register_static(&npcm7xx_clk_divider_info); + type_register_static(&npcm7xx_clk_info); +} +type_init(npcm7xx_clk_register_type); diff --git a/hw/misc/npcm7xx_gcr.c b/hw/misc/npcm7xx_gcr.c new file mode 100644 index 000000000..eace9e196 --- /dev/null +++ b/hw/misc/npcm7xx_gcr.c @@ -0,0 +1,269 @@ +/* + * Nuvoton NPCM7xx System Global Control Registers. + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/misc/npcm7xx_gcr.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" + +#include "trace.h" + +#define NPCM7XX_GCR_MIN_DRAM_SIZE (128 * MiB) +#define NPCM7XX_GCR_MAX_DRAM_SIZE (2 * GiB) + +enum NPCM7xxGCRRegisters { + NPCM7XX_GCR_PDID, + NPCM7XX_GCR_PWRON, + NPCM7XX_GCR_MFSEL1 = 0x0c / sizeof(uint32_t), + NPCM7XX_GCR_MFSEL2, + NPCM7XX_GCR_MISCPE, + NPCM7XX_GCR_SPSWC = 0x038 / sizeof(uint32_t), + NPCM7XX_GCR_INTCR, + NPCM7XX_GCR_INTSR, + NPCM7XX_GCR_HIFCR = 0x050 / sizeof(uint32_t), + NPCM7XX_GCR_INTCR2 = 0x060 / sizeof(uint32_t), + NPCM7XX_GCR_MFSEL3, + NPCM7XX_GCR_SRCNT, + NPCM7XX_GCR_RESSR, + NPCM7XX_GCR_RLOCKR1, + NPCM7XX_GCR_FLOCKR1, + NPCM7XX_GCR_DSCNT, + NPCM7XX_GCR_MDLR, + NPCM7XX_GCR_SCRPAD3, + NPCM7XX_GCR_SCRPAD2, + NPCM7XX_GCR_DAVCLVLR = 0x098 / sizeof(uint32_t), + NPCM7XX_GCR_INTCR3, + NPCM7XX_GCR_VSINTR = 0x0ac / sizeof(uint32_t), + NPCM7XX_GCR_MFSEL4, + NPCM7XX_GCR_CPBPNTR = 0x0c4 / sizeof(uint32_t), + NPCM7XX_GCR_CPCTL = 0x0d0 / sizeof(uint32_t), + NPCM7XX_GCR_CP2BST, + NPCM7XX_GCR_B2CPNT, + NPCM7XX_GCR_CPPCTL, + NPCM7XX_GCR_I2CSEGSEL, + NPCM7XX_GCR_I2CSEGCTL, + NPCM7XX_GCR_VSRCR, + NPCM7XX_GCR_MLOCKR, + NPCM7XX_GCR_SCRPAD = 0x013c / sizeof(uint32_t), + NPCM7XX_GCR_USB1PHYCTL, + NPCM7XX_GCR_USB2PHYCTL, + NPCM7XX_GCR_REGS_END, +}; + +static const uint32_t cold_reset_values[NPCM7XX_GCR_NR_REGS] = { + [NPCM7XX_GCR_PDID] = 0x04a92750, /* Poleg A1 */ + [NPCM7XX_GCR_MISCPE] = 0x0000ffff, + [NPCM7XX_GCR_SPSWC] = 0x00000003, + [NPCM7XX_GCR_INTCR] = 0x0000035e, + [NPCM7XX_GCR_HIFCR] = 0x0000004e, + [NPCM7XX_GCR_INTCR2] = (1U << 19), /* DDR initialized */ + [NPCM7XX_GCR_RESSR] = 0x80000000, + [NPCM7XX_GCR_DSCNT] = 0x000000c0, + [NPCM7XX_GCR_DAVCLVLR] = 0x5a00f3cf, + [NPCM7XX_GCR_SCRPAD] = 0x00000008, + [NPCM7XX_GCR_USB1PHYCTL] = 0x034730e4, + [NPCM7XX_GCR_USB2PHYCTL] = 0x034730e4, +}; + +static uint64_t npcm7xx_gcr_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t reg = offset / sizeof(uint32_t); + NPCM7xxGCRState *s = opaque; + + if (reg >= NPCM7XX_GCR_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%04" HWADDR_PRIx " out of range\n", + __func__, offset); + return 0; + } + + trace_npcm7xx_gcr_read(offset, s->regs[reg]); + + return s->regs[reg]; +} + +static void npcm7xx_gcr_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + uint32_t reg = offset / sizeof(uint32_t); + NPCM7xxGCRState *s = opaque; + uint32_t value = v; + + trace_npcm7xx_gcr_write(offset, value); + + if (reg >= NPCM7XX_GCR_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%04" HWADDR_PRIx " out of range\n", + __func__, offset); + return; + } + + switch (reg) { + case NPCM7XX_GCR_PDID: + case NPCM7XX_GCR_PWRON: + case NPCM7XX_GCR_INTSR: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n", + __func__, offset); + return; + + case NPCM7XX_GCR_RESSR: + case NPCM7XX_GCR_CP2BST: + /* Write 1 to clear */ + value = s->regs[reg] & ~value; + break; + + case NPCM7XX_GCR_RLOCKR1: + case NPCM7XX_GCR_MDLR: + /* Write 1 to set */ + value |= s->regs[reg]; + break; + }; + + s->regs[reg] = value; +} + +static const struct MemoryRegionOps npcm7xx_gcr_ops = { + .read = npcm7xx_gcr_read, + .write = npcm7xx_gcr_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm7xx_gcr_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxGCRState *s = NPCM7XX_GCR(obj); + + QEMU_BUILD_BUG_ON(sizeof(s->regs) != sizeof(cold_reset_values)); + + switch (type) { + case RESET_TYPE_COLD: + memcpy(s->regs, cold_reset_values, sizeof(s->regs)); + s->regs[NPCM7XX_GCR_PWRON] = s->reset_pwron; + s->regs[NPCM7XX_GCR_MDLR] = s->reset_mdlr; + s->regs[NPCM7XX_GCR_INTCR3] = s->reset_intcr3; + break; + } +} + +static void npcm7xx_gcr_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + NPCM7xxGCRState *s = NPCM7XX_GCR(dev); + uint64_t dram_size; + Object *obj; + + obj = object_property_get_link(OBJECT(dev), "dram-mr", errp); + if (!obj) { + error_prepend(errp, "%s: required dram-mr link not found: ", __func__); + return; + } + dram_size = memory_region_size(MEMORY_REGION(obj)); + if (!is_power_of_2(dram_size) || + dram_size < NPCM7XX_GCR_MIN_DRAM_SIZE || + dram_size > NPCM7XX_GCR_MAX_DRAM_SIZE) { + g_autofree char *sz = size_to_str(dram_size); + g_autofree char *min_sz = size_to_str(NPCM7XX_GCR_MIN_DRAM_SIZE); + g_autofree char *max_sz = size_to_str(NPCM7XX_GCR_MAX_DRAM_SIZE); + error_setg(errp, "%s: unsupported DRAM size %s", __func__, sz); + error_append_hint(errp, + "DRAM size must be a power of two between %s and %s," + " inclusive.\n", min_sz, max_sz); + return; + } + + /* Power-on reset value */ + s->reset_intcr3 = 0x00001002; + + /* + * The GMMAP (Graphics Memory Map) field is used by u-boot to detect the + * DRAM size, and is normally initialized by the boot block as part of DRAM + * training. However, since we don't have a complete emulation of the + * memory controller and try to make it look like it has already been + * initialized, the boot block will skip this initialization, and we need + * to make sure this field is set correctly up front. + * + * WARNING: some versions of u-boot only looks at bits 8 and 9, so 2 GiB of + * DRAM will be interpreted as 128 MiB. + * + * https://github.com/Nuvoton-Israel/u-boot/blob/2aef993bd2aafeb5408dbaad0f3ce099ee40c4aa/board/nuvoton/poleg/poleg.c#L244 + */ + s->reset_intcr3 |= ctz64(dram_size / NPCM7XX_GCR_MIN_DRAM_SIZE) << 8; +} + +static void npcm7xx_gcr_init(Object *obj) +{ + NPCM7xxGCRState *s = NPCM7XX_GCR(obj); + + memory_region_init_io(&s->iomem, obj, &npcm7xx_gcr_ops, s, + TYPE_NPCM7XX_GCR, 4 * KiB); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static const VMStateDescription vmstate_npcm7xx_gcr = { + .name = "npcm7xx-gcr", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, NPCM7xxGCRState, NPCM7XX_GCR_NR_REGS), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property npcm7xx_gcr_properties[] = { + DEFINE_PROP_UINT32("disabled-modules", NPCM7xxGCRState, reset_mdlr, 0), + DEFINE_PROP_UINT32("power-on-straps", NPCM7xxGCRState, reset_pwron, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void npcm7xx_gcr_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + QEMU_BUILD_BUG_ON(NPCM7XX_GCR_REGS_END > NPCM7XX_GCR_NR_REGS); + + dc->desc = "NPCM7xx System Global Control Registers"; + dc->realize = npcm7xx_gcr_realize; + dc->vmsd = &vmstate_npcm7xx_gcr; + rc->phases.enter = npcm7xx_gcr_enter_reset; + + device_class_set_props(dc, npcm7xx_gcr_properties); +} + +static const TypeInfo npcm7xx_gcr_info = { + .name = TYPE_NPCM7XX_GCR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxGCRState), + .instance_init = npcm7xx_gcr_init, + .class_init = npcm7xx_gcr_class_init, +}; + +static void npcm7xx_gcr_register_type(void) +{ + type_register_static(&npcm7xx_gcr_info); +} +type_init(npcm7xx_gcr_register_type); diff --git a/hw/misc/npcm7xx_mft.c b/hw/misc/npcm7xx_mft.c new file mode 100644 index 000000000..a30583a1b --- /dev/null +++ b/hw/misc/npcm7xx_mft.c @@ -0,0 +1,540 @@ +/* + * Nuvoton NPCM7xx MFT Module + * + * Copyright 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/misc/npcm7xx_mft.h" +#include "hw/misc/npcm7xx_pwm.h" +#include "hw/registerfields.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qemu/units.h" +#include "trace.h" + +/* + * Some of the registers can only accessed via 16-bit ops and some can only + * be accessed via 8-bit ops. However we mark all of them using REG16 to + * simplify implementation. npcm7xx_mft_check_mem_op checks the access length + * of memory operations. + */ +REG16(NPCM7XX_MFT_CNT1, 0x00); +REG16(NPCM7XX_MFT_CRA, 0x02); +REG16(NPCM7XX_MFT_CRB, 0x04); +REG16(NPCM7XX_MFT_CNT2, 0x06); +REG16(NPCM7XX_MFT_PRSC, 0x08); +REG16(NPCM7XX_MFT_CKC, 0x0a); +REG16(NPCM7XX_MFT_MCTRL, 0x0c); +REG16(NPCM7XX_MFT_ICTRL, 0x0e); +REG16(NPCM7XX_MFT_ICLR, 0x10); +REG16(NPCM7XX_MFT_IEN, 0x12); +REG16(NPCM7XX_MFT_CPA, 0x14); +REG16(NPCM7XX_MFT_CPB, 0x16); +REG16(NPCM7XX_MFT_CPCFG, 0x18); +REG16(NPCM7XX_MFT_INASEL, 0x1a); +REG16(NPCM7XX_MFT_INBSEL, 0x1c); + +/* Register Fields */ +#define NPCM7XX_MFT_CKC_C2CSEL BIT(3) +#define NPCM7XX_MFT_CKC_C1CSEL BIT(0) + +#define NPCM7XX_MFT_MCTRL_TBEN BIT(6) +#define NPCM7XX_MFT_MCTRL_TAEN BIT(5) +#define NPCM7XX_MFT_MCTRL_TBEDG BIT(4) +#define NPCM7XX_MFT_MCTRL_TAEDG BIT(3) +#define NPCM7XX_MFT_MCTRL_MODE5 BIT(2) + +#define NPCM7XX_MFT_ICTRL_TFPND BIT(5) +#define NPCM7XX_MFT_ICTRL_TEPND BIT(4) +#define NPCM7XX_MFT_ICTRL_TDPND BIT(3) +#define NPCM7XX_MFT_ICTRL_TCPND BIT(2) +#define NPCM7XX_MFT_ICTRL_TBPND BIT(1) +#define NPCM7XX_MFT_ICTRL_TAPND BIT(0) + +#define NPCM7XX_MFT_ICLR_TFCLR BIT(5) +#define NPCM7XX_MFT_ICLR_TECLR BIT(4) +#define NPCM7XX_MFT_ICLR_TDCLR BIT(3) +#define NPCM7XX_MFT_ICLR_TCCLR BIT(2) +#define NPCM7XX_MFT_ICLR_TBCLR BIT(1) +#define NPCM7XX_MFT_ICLR_TACLR BIT(0) + +#define NPCM7XX_MFT_IEN_TFIEN BIT(5) +#define NPCM7XX_MFT_IEN_TEIEN BIT(4) +#define NPCM7XX_MFT_IEN_TDIEN BIT(3) +#define NPCM7XX_MFT_IEN_TCIEN BIT(2) +#define NPCM7XX_MFT_IEN_TBIEN BIT(1) +#define NPCM7XX_MFT_IEN_TAIEN BIT(0) + +#define NPCM7XX_MFT_CPCFG_GET_B(rv) extract8((rv), 4, 4) +#define NPCM7XX_MFT_CPCFG_GET_A(rv) extract8((rv), 0, 4) +#define NPCM7XX_MFT_CPCFG_HIEN BIT(3) +#define NPCM7XX_MFT_CPCFG_EQEN BIT(2) +#define NPCM7XX_MFT_CPCFG_LOEN BIT(1) +#define NPCM7XX_MFT_CPCFG_CPSEL BIT(0) + +#define NPCM7XX_MFT_INASEL_SELA BIT(0) +#define NPCM7XX_MFT_INBSEL_SELB BIT(0) + +/* Max CNT values of the module. The CNT value is a countdown from it. */ +#define NPCM7XX_MFT_MAX_CNT 0xFFFF + +/* Each fan revolution should generated 2 pulses */ +#define NPCM7XX_MFT_PULSE_PER_REVOLUTION 2 + +typedef enum NPCM7xxMFTCaptureState { + /* capture succeeded with a valid CNT value. */ + NPCM7XX_CAPTURE_SUCCEED, + /* capture stopped prematurely due to reaching CPCFG condition. */ + NPCM7XX_CAPTURE_COMPARE_HIT, + /* capture fails since it reaches underflow condition for CNT. */ + NPCM7XX_CAPTURE_UNDERFLOW, +} NPCM7xxMFTCaptureState; + +static void npcm7xx_mft_reset(NPCM7xxMFTState *s) +{ + int i; + + /* Only registers PRSC ~ INBSEL need to be reset. */ + for (i = R_NPCM7XX_MFT_PRSC; i <= R_NPCM7XX_MFT_INBSEL; ++i) { + s->regs[i] = 0; + } +} + +static void npcm7xx_mft_clear_interrupt(NPCM7xxMFTState *s, uint8_t iclr) +{ + /* + * Clear bits in ICTRL where corresponding bits in iclr is 1. + * Both iclr and ictrl are 8-bit regs. (See npcm7xx_mft_check_mem_op) + */ + s->regs[R_NPCM7XX_MFT_ICTRL] &= ~iclr; +} + +/* + * If the CPCFG's condition should be triggered during count down from + * NPCM7XX_MFT_MAX_CNT to src if compared to tgt, return the count when + * the condition is triggered. + * Otherwise return -1. + * Since tgt is uint16_t it must always <= NPCM7XX_MFT_MAX_CNT. + */ +static int npcm7xx_mft_compare(int32_t src, uint16_t tgt, uint8_t cpcfg) +{ + if (cpcfg & NPCM7XX_MFT_CPCFG_HIEN) { + return NPCM7XX_MFT_MAX_CNT; + } + if ((cpcfg & NPCM7XX_MFT_CPCFG_EQEN) && (src <= tgt)) { + return tgt; + } + if ((cpcfg & NPCM7XX_MFT_CPCFG_LOEN) && (tgt > 0) && (src < tgt)) { + return tgt - 1; + } + + return -1; +} + +/* Compute CNT according to corresponding fan's RPM. */ +static NPCM7xxMFTCaptureState npcm7xx_mft_compute_cnt( + Clock *clock, uint32_t max_rpm, uint32_t duty, uint16_t tgt, + uint8_t cpcfg, uint16_t *cnt) +{ + uint32_t rpm = (uint64_t)max_rpm * (uint64_t)duty / NPCM7XX_PWM_MAX_DUTY; + int32_t count; + int stopped; + NPCM7xxMFTCaptureState state; + + if (rpm == 0) { + /* + * If RPM = 0, capture won't happen. CNT will continue count down. + * So it's effective equivalent to have a cnt > NPCM7XX_MFT_MAX_CNT + */ + count = NPCM7XX_MFT_MAX_CNT + 1; + } else { + /* + * RPM = revolution/min. The time for one revlution (in ns) is + * MINUTE_TO_NANOSECOND / RPM. + */ + count = clock_ns_to_ticks(clock, (60 * NANOSECONDS_PER_SECOND) / + (rpm * NPCM7XX_MFT_PULSE_PER_REVOLUTION)); + } + + if (count > NPCM7XX_MFT_MAX_CNT) { + count = -1; + } else { + /* The CNT is a countdown value from NPCM7XX_MFT_MAX_CNT. */ + count = NPCM7XX_MFT_MAX_CNT - count; + } + stopped = npcm7xx_mft_compare(count, tgt, cpcfg); + if (stopped == -1) { + if (count == -1) { + /* Underflow */ + state = NPCM7XX_CAPTURE_UNDERFLOW; + } else { + state = NPCM7XX_CAPTURE_SUCCEED; + } + } else { + count = stopped; + state = NPCM7XX_CAPTURE_COMPARE_HIT; + } + + if (count != -1) { + *cnt = count; + } + trace_npcm7xx_mft_rpm(clock->canonical_path, clock_get_hz(clock), + state, count, rpm, duty); + return state; +} + +/* + * Capture Fan RPM and update CNT and CR registers accordingly. + * Raise IRQ if certain contidions are met in IEN. + */ +static void npcm7xx_mft_capture(NPCM7xxMFTState *s) +{ + int irq_level = 0; + NPCM7xxMFTCaptureState state; + int sel; + uint8_t cpcfg; + + /* + * If not mode 5, the behavior is undefined. We just do nothing in this + * case. + */ + if (!(s->regs[R_NPCM7XX_MFT_MCTRL] & NPCM7XX_MFT_MCTRL_MODE5)) { + return; + } + + /* Capture input A. */ + if (s->regs[R_NPCM7XX_MFT_MCTRL] & NPCM7XX_MFT_MCTRL_TAEN && + s->regs[R_NPCM7XX_MFT_CKC] & NPCM7XX_MFT_CKC_C1CSEL) { + sel = s->regs[R_NPCM7XX_MFT_INASEL] & NPCM7XX_MFT_INASEL_SELA; + cpcfg = NPCM7XX_MFT_CPCFG_GET_A(s->regs[R_NPCM7XX_MFT_CPCFG]); + state = npcm7xx_mft_compute_cnt(s->clock_1, + sel ? s->max_rpm[2] : s->max_rpm[0], + sel ? s->duty[2] : s->duty[0], + s->regs[R_NPCM7XX_MFT_CPA], + cpcfg, + &s->regs[R_NPCM7XX_MFT_CNT1]); + switch (state) { + case NPCM7XX_CAPTURE_SUCCEED: + /* Interrupt on input capture on TAn transition - TAPND */ + s->regs[R_NPCM7XX_MFT_CRA] = s->regs[R_NPCM7XX_MFT_CNT1]; + s->regs[R_NPCM7XX_MFT_ICTRL] |= NPCM7XX_MFT_ICTRL_TAPND; + if (s->regs[R_NPCM7XX_MFT_IEN] & NPCM7XX_MFT_IEN_TAIEN) { + irq_level = 1; + } + break; + + case NPCM7XX_CAPTURE_COMPARE_HIT: + /* Compare Hit - TEPND */ + s->regs[R_NPCM7XX_MFT_ICTRL] |= NPCM7XX_MFT_ICTRL_TEPND; + if (s->regs[R_NPCM7XX_MFT_IEN] & NPCM7XX_MFT_IEN_TEIEN) { + irq_level = 1; + } + break; + + case NPCM7XX_CAPTURE_UNDERFLOW: + /* Underflow - TCPND */ + s->regs[R_NPCM7XX_MFT_ICTRL] |= NPCM7XX_MFT_ICTRL_TCPND; + if (s->regs[R_NPCM7XX_MFT_IEN] & NPCM7XX_MFT_IEN_TCIEN) { + irq_level = 1; + } + break; + + default: + g_assert_not_reached(); + } + } + + /* Capture input B. */ + if (s->regs[R_NPCM7XX_MFT_MCTRL] & NPCM7XX_MFT_MCTRL_TBEN && + s->regs[R_NPCM7XX_MFT_CKC] & NPCM7XX_MFT_CKC_C2CSEL) { + sel = s->regs[R_NPCM7XX_MFT_INBSEL] & NPCM7XX_MFT_INBSEL_SELB; + cpcfg = NPCM7XX_MFT_CPCFG_GET_B(s->regs[R_NPCM7XX_MFT_CPCFG]); + state = npcm7xx_mft_compute_cnt(s->clock_2, + sel ? s->max_rpm[3] : s->max_rpm[1], + sel ? s->duty[3] : s->duty[1], + s->regs[R_NPCM7XX_MFT_CPB], + cpcfg, + &s->regs[R_NPCM7XX_MFT_CNT2]); + switch (state) { + case NPCM7XX_CAPTURE_SUCCEED: + /* Interrupt on input capture on TBn transition - TBPND */ + s->regs[R_NPCM7XX_MFT_CRB] = s->regs[R_NPCM7XX_MFT_CNT2]; + s->regs[R_NPCM7XX_MFT_ICTRL] |= NPCM7XX_MFT_ICTRL_TBPND; + if (s->regs[R_NPCM7XX_MFT_IEN] & NPCM7XX_MFT_IEN_TBIEN) { + irq_level = 1; + } + break; + + case NPCM7XX_CAPTURE_COMPARE_HIT: + /* Compare Hit - TFPND */ + s->regs[R_NPCM7XX_MFT_ICTRL] |= NPCM7XX_MFT_ICTRL_TFPND; + if (s->regs[R_NPCM7XX_MFT_IEN] & NPCM7XX_MFT_IEN_TFIEN) { + irq_level = 1; + } + break; + + case NPCM7XX_CAPTURE_UNDERFLOW: + /* Underflow - TDPND */ + s->regs[R_NPCM7XX_MFT_ICTRL] |= NPCM7XX_MFT_ICTRL_TDPND; + if (s->regs[R_NPCM7XX_MFT_IEN] & NPCM7XX_MFT_IEN_TDIEN) { + irq_level = 1; + } + break; + + default: + g_assert_not_reached(); + } + } + + trace_npcm7xx_mft_capture(DEVICE(s)->canonical_path, irq_level); + qemu_set_irq(s->irq, irq_level); +} + +/* Update clock for counters. */ +static void npcm7xx_mft_update_clock(void *opaque, ClockEvent event) +{ + NPCM7xxMFTState *s = NPCM7XX_MFT(opaque); + uint64_t prescaled_clock_period; + + prescaled_clock_period = clock_get(s->clock_in) * + (s->regs[R_NPCM7XX_MFT_PRSC] + 1ULL); + trace_npcm7xx_mft_update_clock(s->clock_in->canonical_path, + s->regs[R_NPCM7XX_MFT_CKC], + clock_get(s->clock_in), + prescaled_clock_period); + /* Update clock 1 */ + if (s->regs[R_NPCM7XX_MFT_CKC] & NPCM7XX_MFT_CKC_C1CSEL) { + /* Clock is prescaled. */ + clock_update(s->clock_1, prescaled_clock_period); + } else { + /* Clock stopped. */ + clock_update(s->clock_1, 0); + } + /* Update clock 2 */ + if (s->regs[R_NPCM7XX_MFT_CKC] & NPCM7XX_MFT_CKC_C2CSEL) { + /* Clock is prescaled. */ + clock_update(s->clock_2, prescaled_clock_period); + } else { + /* Clock stopped. */ + clock_update(s->clock_2, 0); + } + + npcm7xx_mft_capture(s); +} + +static uint64_t npcm7xx_mft_read(void *opaque, hwaddr offset, unsigned size) +{ + NPCM7xxMFTState *s = NPCM7XX_MFT(opaque); + uint16_t value = 0; + + switch (offset) { + case A_NPCM7XX_MFT_ICLR: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: register @ 0x%04" HWADDR_PRIx " is write-only\n", + __func__, offset); + break; + + default: + value = s->regs[offset / 2]; + } + + trace_npcm7xx_mft_read(DEVICE(s)->canonical_path, offset, value); + return value; +} + +static void npcm7xx_mft_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + NPCM7xxMFTState *s = NPCM7XX_MFT(opaque); + + trace_npcm7xx_mft_write(DEVICE(s)->canonical_path, offset, v); + switch (offset) { + case A_NPCM7XX_MFT_ICLR: + npcm7xx_mft_clear_interrupt(s, v); + break; + + case A_NPCM7XX_MFT_CKC: + case A_NPCM7XX_MFT_PRSC: + s->regs[offset / 2] = v; + npcm7xx_mft_update_clock(s, ClockUpdate); + break; + + default: + s->regs[offset / 2] = v; + npcm7xx_mft_capture(s); + break; + } +} + +static bool npcm7xx_mft_check_mem_op(void *opaque, hwaddr offset, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + switch (offset) { + /* 16-bit registers. Must be accessed with 16-bit read/write.*/ + case A_NPCM7XX_MFT_CNT1: + case A_NPCM7XX_MFT_CRA: + case A_NPCM7XX_MFT_CRB: + case A_NPCM7XX_MFT_CNT2: + case A_NPCM7XX_MFT_CPA: + case A_NPCM7XX_MFT_CPB: + return size == 2; + + /* 8-bit registers. Must be accessed with 8-bit read/write.*/ + case A_NPCM7XX_MFT_PRSC: + case A_NPCM7XX_MFT_CKC: + case A_NPCM7XX_MFT_MCTRL: + case A_NPCM7XX_MFT_ICTRL: + case A_NPCM7XX_MFT_ICLR: + case A_NPCM7XX_MFT_IEN: + case A_NPCM7XX_MFT_CPCFG: + case A_NPCM7XX_MFT_INASEL: + case A_NPCM7XX_MFT_INBSEL: + return size == 1; + + default: + /* Invalid registers. */ + return false; + } +} + +static void npcm7xx_mft_get_max_rpm(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + visit_type_uint32(v, name, (uint32_t *)opaque, errp); +} + +static void npcm7xx_mft_set_max_rpm(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + NPCM7xxMFTState *s = NPCM7XX_MFT(obj); + uint32_t *max_rpm = opaque; + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + *max_rpm = value; + npcm7xx_mft_capture(s); +} + +static void npcm7xx_mft_duty_handler(void *opaque, int n, int value) +{ + NPCM7xxMFTState *s = NPCM7XX_MFT(opaque); + + trace_npcm7xx_mft_set_duty(DEVICE(s)->canonical_path, n, value); + s->duty[n] = value; + npcm7xx_mft_capture(s); +} + +static const struct MemoryRegionOps npcm7xx_mft_ops = { + .read = npcm7xx_mft_read, + .write = npcm7xx_mft_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 2, + .unaligned = false, + .accepts = npcm7xx_mft_check_mem_op, + }, +}; + +static void npcm7xx_mft_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxMFTState *s = NPCM7XX_MFT(obj); + + npcm7xx_mft_reset(s); +} + +static void npcm7xx_mft_hold_reset(Object *obj) +{ + NPCM7xxMFTState *s = NPCM7XX_MFT(obj); + + qemu_irq_lower(s->irq); +} + +static void npcm7xx_mft_init(Object *obj) +{ + NPCM7xxMFTState *s = NPCM7XX_MFT(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &npcm7xx_mft_ops, s, + TYPE_NPCM7XX_MFT, 4 * KiB); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + s->clock_in = qdev_init_clock_in(dev, "clock-in", npcm7xx_mft_update_clock, + s, ClockUpdate); + s->clock_1 = qdev_init_clock_out(dev, "clock1"); + s->clock_2 = qdev_init_clock_out(dev, "clock2"); + + for (int i = 0; i < NPCM7XX_PWM_PER_MODULE; ++i) { + object_property_add(obj, "max_rpm[*]", "uint32", + npcm7xx_mft_get_max_rpm, + npcm7xx_mft_set_max_rpm, + NULL, &s->max_rpm[i]); + } + qdev_init_gpio_in_named(dev, npcm7xx_mft_duty_handler, "duty", + NPCM7XX_MFT_FANIN_COUNT); +} + +static const VMStateDescription vmstate_npcm7xx_mft = { + .name = "npcm7xx-mft-module", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(clock_in, NPCM7xxMFTState), + VMSTATE_CLOCK(clock_1, NPCM7xxMFTState), + VMSTATE_CLOCK(clock_2, NPCM7xxMFTState), + VMSTATE_UINT16_ARRAY(regs, NPCM7xxMFTState, NPCM7XX_MFT_NR_REGS), + VMSTATE_UINT32_ARRAY(max_rpm, NPCM7xxMFTState, NPCM7XX_MFT_FANIN_COUNT), + VMSTATE_UINT32_ARRAY(duty, NPCM7xxMFTState, NPCM7XX_MFT_FANIN_COUNT), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_mft_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx MFT Controller"; + dc->vmsd = &vmstate_npcm7xx_mft; + rc->phases.enter = npcm7xx_mft_enter_reset; + rc->phases.hold = npcm7xx_mft_hold_reset; +} + +static const TypeInfo npcm7xx_mft_info = { + .name = TYPE_NPCM7XX_MFT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxMFTState), + .class_init = npcm7xx_mft_class_init, + .instance_init = npcm7xx_mft_init, +}; + +static void npcm7xx_mft_register_type(void) +{ + type_register_static(&npcm7xx_mft_info); +} +type_init(npcm7xx_mft_register_type); diff --git a/hw/misc/npcm7xx_pwm.c b/hw/misc/npcm7xx_pwm.c new file mode 100644 index 000000000..2be5bd25c --- /dev/null +++ b/hw/misc/npcm7xx_pwm.c @@ -0,0 +1,569 @@ +/* + * Nuvoton NPCM7xx PWM Module + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/misc/npcm7xx_pwm.h" +#include "hw/registerfields.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "trace.h" + +REG32(NPCM7XX_PWM_PPR, 0x00); +REG32(NPCM7XX_PWM_CSR, 0x04); +REG32(NPCM7XX_PWM_PCR, 0x08); +REG32(NPCM7XX_PWM_CNR0, 0x0c); +REG32(NPCM7XX_PWM_CMR0, 0x10); +REG32(NPCM7XX_PWM_PDR0, 0x14); +REG32(NPCM7XX_PWM_CNR1, 0x18); +REG32(NPCM7XX_PWM_CMR1, 0x1c); +REG32(NPCM7XX_PWM_PDR1, 0x20); +REG32(NPCM7XX_PWM_CNR2, 0x24); +REG32(NPCM7XX_PWM_CMR2, 0x28); +REG32(NPCM7XX_PWM_PDR2, 0x2c); +REG32(NPCM7XX_PWM_CNR3, 0x30); +REG32(NPCM7XX_PWM_CMR3, 0x34); +REG32(NPCM7XX_PWM_PDR3, 0x38); +REG32(NPCM7XX_PWM_PIER, 0x3c); +REG32(NPCM7XX_PWM_PIIR, 0x40); +REG32(NPCM7XX_PWM_PWDR0, 0x44); +REG32(NPCM7XX_PWM_PWDR1, 0x48); +REG32(NPCM7XX_PWM_PWDR2, 0x4c); +REG32(NPCM7XX_PWM_PWDR3, 0x50); + +/* Register field definitions. */ +#define NPCM7XX_PPR(rv, index) extract32((rv), npcm7xx_ppr_base[index], 8) +#define NPCM7XX_CSR(rv, index) extract32((rv), npcm7xx_csr_base[index], 3) +#define NPCM7XX_CH(rv, index) extract32((rv), npcm7xx_ch_base[index], 4) +#define NPCM7XX_CH_EN BIT(0) +#define NPCM7XX_CH_INV BIT(2) +#define NPCM7XX_CH_MOD BIT(3) + +#define NPCM7XX_MAX_CMR 65535 +#define NPCM7XX_MAX_CNR 65535 + +/* Offset of each PWM channel's prescaler in the PPR register. */ +static const int npcm7xx_ppr_base[] = { 0, 0, 8, 8 }; +/* Offset of each PWM channel's clock selector in the CSR register. */ +static const int npcm7xx_csr_base[] = { 0, 4, 8, 12 }; +/* Offset of each PWM channel's control variable in the PCR register. */ +static const int npcm7xx_ch_base[] = { 0, 8, 12, 16 }; + +static uint32_t npcm7xx_pwm_calculate_freq(NPCM7xxPWM *p) +{ + uint32_t ppr; + uint32_t csr; + uint32_t freq; + + if (!p->running) { + return 0; + } + + csr = NPCM7XX_CSR(p->module->csr, p->index); + ppr = NPCM7XX_PPR(p->module->ppr, p->index); + freq = clock_get_hz(p->module->clock); + freq /= ppr + 1; + /* csr can only be 0~4 */ + if (csr > 4) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid csr value %u\n", + __func__, csr); + csr = 4; + } + /* freq won't be changed if csr == 4. */ + if (csr < 4) { + freq >>= csr + 1; + } + + return freq / (p->cnr + 1); +} + +static uint32_t npcm7xx_pwm_calculate_duty(NPCM7xxPWM *p) +{ + uint32_t duty; + + if (p->running) { + if (p->cnr == 0) { + duty = 0; + } else if (p->cmr >= p->cnr) { + duty = NPCM7XX_PWM_MAX_DUTY; + } else { + duty = (uint64_t)NPCM7XX_PWM_MAX_DUTY * (p->cmr + 1) / (p->cnr + 1); + } + } else { + duty = 0; + } + + if (p->inverted) { + duty = NPCM7XX_PWM_MAX_DUTY - duty; + } + + return duty; +} + +static void npcm7xx_pwm_update_freq(NPCM7xxPWM *p) +{ + uint32_t freq = npcm7xx_pwm_calculate_freq(p); + + if (freq != p->freq) { + trace_npcm7xx_pwm_update_freq(DEVICE(p->module)->canonical_path, + p->index, p->freq, freq); + p->freq = freq; + } +} + +static void npcm7xx_pwm_update_duty(NPCM7xxPWM *p) +{ + uint32_t duty = npcm7xx_pwm_calculate_duty(p); + + if (duty != p->duty) { + trace_npcm7xx_pwm_update_duty(DEVICE(p->module)->canonical_path, + p->index, p->duty, duty); + p->duty = duty; + qemu_set_irq(p->module->duty_gpio_out[p->index], p->duty); + } +} + +static void npcm7xx_pwm_update_output(NPCM7xxPWM *p) +{ + npcm7xx_pwm_update_freq(p); + npcm7xx_pwm_update_duty(p); +} + +static void npcm7xx_pwm_write_ppr(NPCM7xxPWMState *s, uint32_t new_ppr) +{ + int i; + uint32_t old_ppr = s->ppr; + + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_ppr_base) != NPCM7XX_PWM_PER_MODULE); + s->ppr = new_ppr; + for (i = 0; i < NPCM7XX_PWM_PER_MODULE; ++i) { + if (NPCM7XX_PPR(old_ppr, i) != NPCM7XX_PPR(new_ppr, i)) { + npcm7xx_pwm_update_freq(&s->pwm[i]); + } + } +} + +static void npcm7xx_pwm_write_csr(NPCM7xxPWMState *s, uint32_t new_csr) +{ + int i; + uint32_t old_csr = s->csr; + + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_csr_base) != NPCM7XX_PWM_PER_MODULE); + s->csr = new_csr; + for (i = 0; i < NPCM7XX_PWM_PER_MODULE; ++i) { + if (NPCM7XX_CSR(old_csr, i) != NPCM7XX_CSR(new_csr, i)) { + npcm7xx_pwm_update_freq(&s->pwm[i]); + } + } +} + +static void npcm7xx_pwm_write_pcr(NPCM7xxPWMState *s, uint32_t new_pcr) +{ + int i; + bool inverted; + uint32_t pcr; + NPCM7xxPWM *p; + + s->pcr = new_pcr; + QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_ch_base) != NPCM7XX_PWM_PER_MODULE); + for (i = 0; i < NPCM7XX_PWM_PER_MODULE; ++i) { + p = &s->pwm[i]; + pcr = NPCM7XX_CH(new_pcr, i); + inverted = pcr & NPCM7XX_CH_INV; + + /* + * We only run a PWM channel with toggle mode. Single-shot mode does not + * generate frequency and duty-cycle values. + */ + if ((pcr & NPCM7XX_CH_EN) && (pcr & NPCM7XX_CH_MOD)) { + if (p->running) { + /* Re-run this PWM channel if inverted changed. */ + if (p->inverted ^ inverted) { + p->inverted = inverted; + npcm7xx_pwm_update_duty(p); + } + } else { + /* Run this PWM channel. */ + p->running = true; + p->inverted = inverted; + npcm7xx_pwm_update_output(p); + } + } else { + /* Clear this PWM channel. */ + p->running = false; + p->inverted = inverted; + npcm7xx_pwm_update_output(p); + } + } + +} + +static hwaddr npcm7xx_cnr_index(hwaddr offset) +{ + switch (offset) { + case A_NPCM7XX_PWM_CNR0: + return 0; + case A_NPCM7XX_PWM_CNR1: + return 1; + case A_NPCM7XX_PWM_CNR2: + return 2; + case A_NPCM7XX_PWM_CNR3: + return 3; + default: + g_assert_not_reached(); + } +} + +static hwaddr npcm7xx_cmr_index(hwaddr offset) +{ + switch (offset) { + case A_NPCM7XX_PWM_CMR0: + return 0; + case A_NPCM7XX_PWM_CMR1: + return 1; + case A_NPCM7XX_PWM_CMR2: + return 2; + case A_NPCM7XX_PWM_CMR3: + return 3; + default: + g_assert_not_reached(); + } +} + +static hwaddr npcm7xx_pdr_index(hwaddr offset) +{ + switch (offset) { + case A_NPCM7XX_PWM_PDR0: + return 0; + case A_NPCM7XX_PWM_PDR1: + return 1; + case A_NPCM7XX_PWM_PDR2: + return 2; + case A_NPCM7XX_PWM_PDR3: + return 3; + default: + g_assert_not_reached(); + } +} + +static hwaddr npcm7xx_pwdr_index(hwaddr offset) +{ + switch (offset) { + case A_NPCM7XX_PWM_PWDR0: + return 0; + case A_NPCM7XX_PWM_PWDR1: + return 1; + case A_NPCM7XX_PWM_PWDR2: + return 2; + case A_NPCM7XX_PWM_PWDR3: + return 3; + default: + g_assert_not_reached(); + } +} + +static uint64_t npcm7xx_pwm_read(void *opaque, hwaddr offset, unsigned size) +{ + NPCM7xxPWMState *s = opaque; + uint64_t value = 0; + + switch (offset) { + case A_NPCM7XX_PWM_CNR0: + case A_NPCM7XX_PWM_CNR1: + case A_NPCM7XX_PWM_CNR2: + case A_NPCM7XX_PWM_CNR3: + value = s->pwm[npcm7xx_cnr_index(offset)].cnr; + break; + + case A_NPCM7XX_PWM_CMR0: + case A_NPCM7XX_PWM_CMR1: + case A_NPCM7XX_PWM_CMR2: + case A_NPCM7XX_PWM_CMR3: + value = s->pwm[npcm7xx_cmr_index(offset)].cmr; + break; + + case A_NPCM7XX_PWM_PDR0: + case A_NPCM7XX_PWM_PDR1: + case A_NPCM7XX_PWM_PDR2: + case A_NPCM7XX_PWM_PDR3: + value = s->pwm[npcm7xx_pdr_index(offset)].pdr; + break; + + case A_NPCM7XX_PWM_PWDR0: + case A_NPCM7XX_PWM_PWDR1: + case A_NPCM7XX_PWM_PWDR2: + case A_NPCM7XX_PWM_PWDR3: + value = s->pwm[npcm7xx_pwdr_index(offset)].pwdr; + break; + + case A_NPCM7XX_PWM_PPR: + value = s->ppr; + break; + + case A_NPCM7XX_PWM_CSR: + value = s->csr; + break; + + case A_NPCM7XX_PWM_PCR: + value = s->pcr; + break; + + case A_NPCM7XX_PWM_PIER: + value = s->pier; + break; + + case A_NPCM7XX_PWM_PIIR: + value = s->piir; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid offset 0x%04" HWADDR_PRIx "\n", + __func__, offset); + break; + } + + trace_npcm7xx_pwm_read(DEVICE(s)->canonical_path, offset, value); + return value; +} + +static void npcm7xx_pwm_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + NPCM7xxPWMState *s = opaque; + NPCM7xxPWM *p; + uint32_t value = v; + + trace_npcm7xx_pwm_write(DEVICE(s)->canonical_path, offset, value); + switch (offset) { + case A_NPCM7XX_PWM_CNR0: + case A_NPCM7XX_PWM_CNR1: + case A_NPCM7XX_PWM_CNR2: + case A_NPCM7XX_PWM_CNR3: + p = &s->pwm[npcm7xx_cnr_index(offset)]; + if (value > NPCM7XX_MAX_CNR) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid cnr value: %u", __func__, value); + p->cnr = NPCM7XX_MAX_CNR; + } else { + p->cnr = value; + } + npcm7xx_pwm_update_output(p); + break; + + case A_NPCM7XX_PWM_CMR0: + case A_NPCM7XX_PWM_CMR1: + case A_NPCM7XX_PWM_CMR2: + case A_NPCM7XX_PWM_CMR3: + p = &s->pwm[npcm7xx_cmr_index(offset)]; + if (value > NPCM7XX_MAX_CMR) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid cmr value: %u", __func__, value); + p->cmr = NPCM7XX_MAX_CMR; + } else { + p->cmr = value; + } + npcm7xx_pwm_update_output(p); + break; + + case A_NPCM7XX_PWM_PDR0: + case A_NPCM7XX_PWM_PDR1: + case A_NPCM7XX_PWM_PDR2: + case A_NPCM7XX_PWM_PDR3: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n", + __func__, offset); + break; + + case A_NPCM7XX_PWM_PWDR0: + case A_NPCM7XX_PWM_PWDR1: + case A_NPCM7XX_PWM_PWDR2: + case A_NPCM7XX_PWM_PWDR3: + qemu_log_mask(LOG_UNIMP, + "%s: register @ 0x%04" HWADDR_PRIx " is not implemented\n", + __func__, offset); + break; + + case A_NPCM7XX_PWM_PPR: + npcm7xx_pwm_write_ppr(s, value); + break; + + case A_NPCM7XX_PWM_CSR: + npcm7xx_pwm_write_csr(s, value); + break; + + case A_NPCM7XX_PWM_PCR: + npcm7xx_pwm_write_pcr(s, value); + break; + + case A_NPCM7XX_PWM_PIER: + qemu_log_mask(LOG_UNIMP, + "%s: register @ 0x%04" HWADDR_PRIx " is not implemented\n", + __func__, offset); + break; + + case A_NPCM7XX_PWM_PIIR: + qemu_log_mask(LOG_UNIMP, + "%s: register @ 0x%04" HWADDR_PRIx " is not implemented\n", + __func__, offset); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid offset 0x%04" HWADDR_PRIx "\n", + __func__, offset); + break; + } +} + +static const struct MemoryRegionOps npcm7xx_pwm_ops = { + .read = npcm7xx_pwm_read, + .write = npcm7xx_pwm_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm7xx_pwm_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxPWMState *s = NPCM7XX_PWM(obj); + int i; + + for (i = 0; i < NPCM7XX_PWM_PER_MODULE; i++) { + NPCM7xxPWM *p = &s->pwm[i]; + + p->cnr = 0x00000000; + p->cmr = 0x00000000; + p->pdr = 0x00000000; + p->pwdr = 0x00000000; + } + + s->ppr = 0x00000000; + s->csr = 0x00000000; + s->pcr = 0x00000000; + s->pier = 0x00000000; + s->piir = 0x00000000; +} + +static void npcm7xx_pwm_hold_reset(Object *obj) +{ + NPCM7xxPWMState *s = NPCM7XX_PWM(obj); + int i; + + for (i = 0; i < NPCM7XX_PWM_PER_MODULE; i++) { + qemu_irq_lower(s->pwm[i].irq); + } +} + +static void npcm7xx_pwm_init(Object *obj) +{ + NPCM7xxPWMState *s = NPCM7XX_PWM(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + int i; + + QEMU_BUILD_BUG_ON(ARRAY_SIZE(s->pwm) != NPCM7XX_PWM_PER_MODULE); + for (i = 0; i < NPCM7XX_PWM_PER_MODULE; i++) { + NPCM7xxPWM *p = &s->pwm[i]; + p->module = s; + p->index = i; + sysbus_init_irq(sbd, &p->irq); + } + + memory_region_init_io(&s->iomem, obj, &npcm7xx_pwm_ops, s, + TYPE_NPCM7XX_PWM, 4 * KiB); + sysbus_init_mmio(sbd, &s->iomem); + s->clock = qdev_init_clock_in(DEVICE(s), "clock", NULL, NULL, 0); + + for (i = 0; i < NPCM7XX_PWM_PER_MODULE; ++i) { + object_property_add_uint32_ptr(obj, "freq[*]", + &s->pwm[i].freq, OBJ_PROP_FLAG_READ); + object_property_add_uint32_ptr(obj, "duty[*]", + &s->pwm[i].duty, OBJ_PROP_FLAG_READ); + } + qdev_init_gpio_out_named(DEVICE(s), s->duty_gpio_out, + "duty-gpio-out", NPCM7XX_PWM_PER_MODULE); +} + +static const VMStateDescription vmstate_npcm7xx_pwm = { + .name = "npcm7xx-pwm", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_BOOL(running, NPCM7xxPWM), + VMSTATE_BOOL(inverted, NPCM7xxPWM), + VMSTATE_UINT8(index, NPCM7xxPWM), + VMSTATE_UINT32(cnr, NPCM7xxPWM), + VMSTATE_UINT32(cmr, NPCM7xxPWM), + VMSTATE_UINT32(pdr, NPCM7xxPWM), + VMSTATE_UINT32(pwdr, NPCM7xxPWM), + VMSTATE_UINT32(freq, NPCM7xxPWM), + VMSTATE_UINT32(duty, NPCM7xxPWM), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_npcm7xx_pwm_module = { + .name = "npcm7xx-pwm-module", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(clock, NPCM7xxPWMState), + VMSTATE_STRUCT_ARRAY(pwm, NPCM7xxPWMState, + NPCM7XX_PWM_PER_MODULE, 0, vmstate_npcm7xx_pwm, + NPCM7xxPWM), + VMSTATE_UINT32(ppr, NPCM7xxPWMState), + VMSTATE_UINT32(csr, NPCM7xxPWMState), + VMSTATE_UINT32(pcr, NPCM7xxPWMState), + VMSTATE_UINT32(pier, NPCM7xxPWMState), + VMSTATE_UINT32(piir, NPCM7xxPWMState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_pwm_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx PWM Controller"; + dc->vmsd = &vmstate_npcm7xx_pwm_module; + rc->phases.enter = npcm7xx_pwm_enter_reset; + rc->phases.hold = npcm7xx_pwm_hold_reset; +} + +static const TypeInfo npcm7xx_pwm_info = { + .name = TYPE_NPCM7XX_PWM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxPWMState), + .class_init = npcm7xx_pwm_class_init, + .instance_init = npcm7xx_pwm_init, +}; + +static void npcm7xx_pwm_register_type(void) +{ + type_register_static(&npcm7xx_pwm_info); +} +type_init(npcm7xx_pwm_register_type); diff --git a/hw/misc/npcm7xx_rng.c b/hw/misc/npcm7xx_rng.c new file mode 100644 index 000000000..b01df7cdb --- /dev/null +++ b/hw/misc/npcm7xx_rng.c @@ -0,0 +1,180 @@ +/* + * Nuvoton NPCM7xx Random Number Generator. + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/misc/npcm7xx_rng.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/guest-random.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" + +#include "trace.h" + +#define NPCM7XX_RNG_REGS_SIZE (4 * KiB) + +#define NPCM7XX_RNGCS (0x00) +#define NPCM7XX_RNGCS_CLKP(rv) extract32(rv, 2, 4) +#define NPCM7XX_RNGCS_DVALID BIT(1) +#define NPCM7XX_RNGCS_RNGE BIT(0) + +#define NPCM7XX_RNGD (0x04) +#define NPCM7XX_RNGMODE (0x08) +#define NPCM7XX_RNGMODE_NORMAL (0x02) + +static bool npcm7xx_rng_is_enabled(NPCM7xxRNGState *s) +{ + return (s->rngcs & NPCM7XX_RNGCS_RNGE) && + (s->rngmode == NPCM7XX_RNGMODE_NORMAL); +} + +static uint64_t npcm7xx_rng_read(void *opaque, hwaddr offset, unsigned size) +{ + NPCM7xxRNGState *s = opaque; + uint64_t value = 0; + + switch (offset) { + case NPCM7XX_RNGCS: + /* + * If the RNG is enabled, but we don't have any valid random data, try + * obtaining some and update the DVALID bit accordingly. + */ + if (!npcm7xx_rng_is_enabled(s)) { + s->rngcs &= ~NPCM7XX_RNGCS_DVALID; + } else if (!(s->rngcs & NPCM7XX_RNGCS_DVALID)) { + uint8_t byte = 0; + + if (qemu_guest_getrandom(&byte, sizeof(byte), NULL) == 0) { + s->rngd = byte; + s->rngcs |= NPCM7XX_RNGCS_DVALID; + } + } + value = s->rngcs; + break; + case NPCM7XX_RNGD: + if (npcm7xx_rng_is_enabled(s) && s->rngcs & NPCM7XX_RNGCS_DVALID) { + s->rngcs &= ~NPCM7XX_RNGCS_DVALID; + value = s->rngd; + s->rngd = 0; + } + break; + case NPCM7XX_RNGMODE: + value = s->rngmode; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from invalid offset 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, offset); + break; + } + + trace_npcm7xx_rng_read(offset, value, size); + + return value; +} + +static void npcm7xx_rng_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + NPCM7xxRNGState *s = opaque; + + trace_npcm7xx_rng_write(offset, value, size); + + switch (offset) { + case NPCM7XX_RNGCS: + s->rngcs &= NPCM7XX_RNGCS_DVALID; + s->rngcs |= value & ~NPCM7XX_RNGCS_DVALID; + break; + case NPCM7XX_RNGD: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to read-only register @ 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, offset); + break; + case NPCM7XX_RNGMODE: + s->rngmode = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to invalid offset 0x%" HWADDR_PRIx "\n", + DEVICE(s)->canonical_path, offset); + break; + } +} + +static const MemoryRegionOps npcm7xx_rng_ops = { + .read = npcm7xx_rng_read, + .write = npcm7xx_rng_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm7xx_rng_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxRNGState *s = NPCM7XX_RNG(obj); + + s->rngcs = 0; + s->rngd = 0; + s->rngmode = 0; +} + +static void npcm7xx_rng_init(Object *obj) +{ + NPCM7xxRNGState *s = NPCM7XX_RNG(obj); + + memory_region_init_io(&s->iomem, obj, &npcm7xx_rng_ops, s, "regs", + NPCM7XX_RNG_REGS_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static const VMStateDescription vmstate_npcm7xx_rng = { + .name = "npcm7xx-rng", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(rngcs, NPCM7xxRNGState), + VMSTATE_UINT8(rngd, NPCM7xxRNGState), + VMSTATE_UINT8(rngmode, NPCM7xxRNGState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_rng_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "NPCM7xx Random Number Generator"; + dc->vmsd = &vmstate_npcm7xx_rng; + rc->phases.enter = npcm7xx_rng_enter_reset; +} + +static const TypeInfo npcm7xx_rng_types[] = { + { + .name = TYPE_NPCM7XX_RNG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxRNGState), + .class_init = npcm7xx_rng_class_init, + .instance_init = npcm7xx_rng_init, + }, +}; +DEFINE_TYPES(npcm7xx_rng_types); diff --git a/hw/misc/nrf51_rng.c b/hw/misc/nrf51_rng.c new file mode 100644 index 000000000..fc86e1b69 --- /dev/null +++ b/hw/misc/nrf51_rng.c @@ -0,0 +1,266 @@ +/* + * nRF51 Random Number Generator + * + * Reference Manual: http://infocenter.nordicsemi.com/pdf/nRF51_RM_v3.0.1.pdf + * + * Copyright 2018 Steffen Görtz + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/arm/nrf51.h" +#include "hw/irq.h" +#include "hw/misc/nrf51_rng.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/guest-random.h" + +static void update_irq(NRF51RNGState *s) +{ + bool irq = s->interrupt_enabled && s->event_valrdy; + qemu_set_irq(s->irq, irq); +} + +static uint64_t rng_read(void *opaque, hwaddr offset, unsigned int size) +{ + NRF51RNGState *s = NRF51_RNG(opaque); + uint64_t r = 0; + + switch (offset) { + case NRF51_RNG_EVENT_VALRDY: + r = s->event_valrdy; + break; + case NRF51_RNG_REG_SHORTS: + r = s->shortcut_stop_on_valrdy; + break; + case NRF51_RNG_REG_INTEN: + case NRF51_RNG_REG_INTENSET: + case NRF51_RNG_REG_INTENCLR: + r = s->interrupt_enabled; + break; + case NRF51_RNG_REG_CONFIG: + r = s->filter_enabled; + break; + case NRF51_RNG_REG_VALUE: + r = s->value; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad read offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + + return r; +} + +static int64_t calc_next_timeout(NRF51RNGState *s) +{ + int64_t timeout = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL); + if (s->filter_enabled) { + timeout += s->period_filtered_us; + } else { + timeout += s->period_unfiltered_us; + } + + return timeout; +} + + +static void rng_update_timer(NRF51RNGState *s) +{ + if (s->active) { + timer_mod(&s->timer, calc_next_timeout(s)); + } else { + timer_del(&s->timer); + } +} + + +static void rng_write(void *opaque, hwaddr offset, + uint64_t value, unsigned int size) +{ + NRF51RNGState *s = NRF51_RNG(opaque); + + switch (offset) { + case NRF51_RNG_TASK_START: + if (value == NRF51_TRIGGER_TASK) { + s->active = 1; + rng_update_timer(s); + } + break; + case NRF51_RNG_TASK_STOP: + if (value == NRF51_TRIGGER_TASK) { + s->active = 0; + rng_update_timer(s); + } + break; + case NRF51_RNG_EVENT_VALRDY: + if (value == NRF51_EVENT_CLEAR) { + s->event_valrdy = 0; + } + break; + case NRF51_RNG_REG_SHORTS: + s->shortcut_stop_on_valrdy = + (value & BIT_MASK(NRF51_RNG_REG_SHORTS_VALRDY_STOP)) ? 1 : 0; + break; + case NRF51_RNG_REG_INTEN: + s->interrupt_enabled = + (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) ? 1 : 0; + break; + case NRF51_RNG_REG_INTENSET: + if (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) { + s->interrupt_enabled = 1; + } + break; + case NRF51_RNG_REG_INTENCLR: + if (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) { + s->interrupt_enabled = 0; + } + break; + case NRF51_RNG_REG_CONFIG: + s->filter_enabled = + (value & BIT_MASK(NRF51_RNG_REG_CONFIG_DECEN)) ? 1 : 0; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad write offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + + update_irq(s); +} + +static const MemoryRegionOps rng_ops = { + .read = rng_read, + .write = rng_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4 +}; + +static void nrf51_rng_timer_expire(void *opaque) +{ + NRF51RNGState *s = NRF51_RNG(opaque); + + qemu_guest_getrandom_nofail(&s->value, 1); + + s->event_valrdy = 1; + qemu_set_irq(s->eep_valrdy, 1); + + if (s->shortcut_stop_on_valrdy) { + s->active = 0; + } + + rng_update_timer(s); + update_irq(s); +} + +static void nrf51_rng_tep_start(void *opaque, int n, int level) +{ + NRF51RNGState *s = NRF51_RNG(opaque); + + if (level) { + s->active = 1; + rng_update_timer(s); + } +} + +static void nrf51_rng_tep_stop(void *opaque, int n, int level) +{ + NRF51RNGState *s = NRF51_RNG(opaque); + + if (level) { + s->active = 0; + rng_update_timer(s); + } +} + + +static void nrf51_rng_init(Object *obj) +{ + NRF51RNGState *s = NRF51_RNG(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->mmio, obj, &rng_ops, s, + TYPE_NRF51_RNG, NRF51_RNG_SIZE); + sysbus_init_mmio(sbd, &s->mmio); + + timer_init_us(&s->timer, QEMU_CLOCK_VIRTUAL, nrf51_rng_timer_expire, s); + + sysbus_init_irq(sbd, &s->irq); + + /* Tasks */ + qdev_init_gpio_in_named(DEVICE(s), nrf51_rng_tep_start, "tep_start", 1); + qdev_init_gpio_in_named(DEVICE(s), nrf51_rng_tep_stop, "tep_stop", 1); + + /* Events */ + qdev_init_gpio_out_named(DEVICE(s), &s->eep_valrdy, "eep_valrdy", 1); +} + +static void nrf51_rng_reset(DeviceState *dev) +{ + NRF51RNGState *s = NRF51_RNG(dev); + + s->value = 0; + s->active = 0; + s->event_valrdy = 0; + s->shortcut_stop_on_valrdy = 0; + s->interrupt_enabled = 0; + s->filter_enabled = 0; + + rng_update_timer(s); +} + + +static Property nrf51_rng_properties[] = { + DEFINE_PROP_UINT16("period_unfiltered_us", NRF51RNGState, + period_unfiltered_us, 167), + DEFINE_PROP_UINT16("period_filtered_us", NRF51RNGState, + period_filtered_us, 660), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_rng = { + .name = "nrf51_soc.rng", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(active, NRF51RNGState), + VMSTATE_UINT32(event_valrdy, NRF51RNGState), + VMSTATE_UINT32(shortcut_stop_on_valrdy, NRF51RNGState), + VMSTATE_UINT32(interrupt_enabled, NRF51RNGState), + VMSTATE_UINT32(filter_enabled, NRF51RNGState), + VMSTATE_END_OF_LIST() + } +}; + +static void nrf51_rng_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, nrf51_rng_properties); + dc->vmsd = &vmstate_rng; + dc->reset = nrf51_rng_reset; +} + +static const TypeInfo nrf51_rng_info = { + .name = TYPE_NRF51_RNG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NRF51RNGState), + .instance_init = nrf51_rng_init, + .class_init = nrf51_rng_class_init +}; + +static void nrf51_rng_register_types(void) +{ + type_register_static(&nrf51_rng_info); +} + +type_init(nrf51_rng_register_types) diff --git a/hw/misc/omap_clk.c b/hw/misc/omap_clk.c new file mode 100644 index 000000000..c77ca2fc7 --- /dev/null +++ b/hw/misc/omap_clk.c @@ -0,0 +1,1267 @@ +/* + * OMAP clocks. + * + * Copyright (C) 2006-2008 Andrzej Zaborowski + * + * Clocks data comes in part from arch/arm/mach-omap1/clock.h in Linux. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/arm/omap.h" + +struct clk { + const char *name; + const char *alias; + struct clk *parent; + struct clk *child1; + struct clk *sibling; +#define ALWAYS_ENABLED (1 << 0) +#define CLOCK_IN_OMAP310 (1 << 10) +#define CLOCK_IN_OMAP730 (1 << 11) +#define CLOCK_IN_OMAP1510 (1 << 12) +#define CLOCK_IN_OMAP16XX (1 << 13) +#define CLOCK_IN_OMAP242X (1 << 14) +#define CLOCK_IN_OMAP243X (1 << 15) +#define CLOCK_IN_OMAP343X (1 << 16) + uint32_t flags; + int id; + + int running; /* Is currently ticking */ + int enabled; /* Is enabled, regardless of its input clk */ + unsigned long rate; /* Current rate (if .running) */ + unsigned int divisor; /* Rate relative to input (if .enabled) */ + unsigned int multiplier; /* Rate relative to input (if .enabled) */ + qemu_irq users[16]; /* Who to notify on change */ + int usecount; /* Automatically idle when unused */ +}; + +static struct clk xtal_osc12m = { + .name = "xtal_osc_12m", + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, +}; + +static struct clk xtal_osc32k = { + .name = "xtal_osc_32k", + .rate = 32768, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, +}; + +static struct clk ck_ref = { + .name = "ck_ref", + .alias = "clkin", + .parent = &xtal_osc12m, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + ALWAYS_ENABLED, +}; + +/* If a dpll is disabled it becomes a bypass, child clocks don't stop */ +static struct clk dpll1 = { + .name = "dpll1", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + ALWAYS_ENABLED, +}; + +static struct clk dpll2 = { + .name = "dpll2", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED, +}; + +static struct clk dpll3 = { + .name = "dpll3", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED, +}; + +static struct clk dpll4 = { + .name = "dpll4", + .parent = &ck_ref, + .multiplier = 4, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, +}; + +static struct clk apll = { + .name = "apll", + .parent = &ck_ref, + .multiplier = 48, + .divisor = 12, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, +}; + +static struct clk ck_48m = { + .name = "ck_48m", + .parent = &dpll4, /* either dpll4 or apll */ + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, +}; + +static struct clk ck_dpll1out = { + .name = "ck_dpll1out", + .parent = &dpll1, + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk sossi_ck = { + .name = "ck_sossi", + .parent = &ck_dpll1out, + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk clkm1 = { + .name = "clkm1", + .alias = "ck_gen1", + .parent = &dpll1, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + ALWAYS_ENABLED, +}; + +static struct clk clkm2 = { + .name = "clkm2", + .alias = "ck_gen2", + .parent = &dpll1, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + ALWAYS_ENABLED, +}; + +static struct clk clkm3 = { + .name = "clkm3", + .alias = "ck_gen3", + .parent = &dpll1, /* either dpll1 or ck_ref */ + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + ALWAYS_ENABLED, +}; + +static struct clk arm_ck = { + .name = "arm_ck", + .alias = "mpu_ck", + .parent = &clkm1, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + ALWAYS_ENABLED, +}; + +static struct clk armper_ck = { + .name = "armper_ck", + .alias = "mpuper_ck", + .parent = &clkm1, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, +}; + +static struct clk arm_gpio_ck = { + .name = "arm_gpio_ck", + .alias = "mpu_gpio_ck", + .parent = &clkm1, + .divisor = 1, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, +}; + +static struct clk armxor_ck = { + .name = "armxor_ck", + .alias = "mpuxor_ck", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, +}; + +static struct clk armtim_ck = { + .name = "armtim_ck", + .alias = "mputim_ck", + .parent = &ck_ref, /* either CLKIN or DPLL1 */ + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, +}; + +static struct clk armwdt_ck = { + .name = "armwdt_ck", + .alias = "mpuwd_ck", + .parent = &clkm1, + .divisor = 14, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + ALWAYS_ENABLED, +}; + +static struct clk arminth_ck16xx = { + .name = "arminth_ck", + .parent = &arm_ck, + .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, + /* Note: On 16xx the frequency can be divided by 2 by programming + * ARM_CKCTL:ARM_INTHCK_SEL(14) to 1 + * + * 1510 version is in TC clocks. + */ +}; + +static struct clk dsp_ck = { + .name = "dsp_ck", + .parent = &clkm2, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, +}; + +static struct clk dspmmu_ck = { + .name = "dspmmu_ck", + .parent = &clkm2, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | + ALWAYS_ENABLED, +}; + +static struct clk dspper_ck = { + .name = "dspper_ck", + .parent = &clkm2, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, +}; + +static struct clk dspxor_ck = { + .name = "dspxor_ck", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, +}; + +static struct clk dsptim_ck = { + .name = "dsptim_ck", + .parent = &ck_ref, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, +}; + +static struct clk tc_ck = { + .name = "tc_ck", + .parent = &clkm3, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | + CLOCK_IN_OMAP730 | CLOCK_IN_OMAP310 | + ALWAYS_ENABLED, +}; + +static struct clk arminth_ck15xx = { + .name = "arminth_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, + /* Note: On 1510 the frequency follows TC_CK + * + * 16xx version is in MPU clocks. + */ +}; + +static struct clk tipb_ck = { + /* No-idle controlled by "tc_ck" */ + .name = "tipb_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, +}; + +static struct clk l3_ocpi_ck = { + /* No-idle controlled by "tc_ck" */ + .name = "l3_ocpi_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk tc1_ck = { + .name = "tc1_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk tc2_ck = { + .name = "tc2_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk dma_ck = { + /* No-idle controlled by "tc_ck" */ + .name = "dma_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + ALWAYS_ENABLED, +}; + +static struct clk dma_lcdfree_ck = { + .name = "dma_lcdfree_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, +}; + +static struct clk api_ck = { + .name = "api_ck", + .alias = "mpui_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, +}; + +static struct clk lb_ck = { + .name = "lb_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, +}; + +static struct clk lbfree_ck = { + .name = "lbfree_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, +}; + +static struct clk hsab_ck = { + .name = "hsab_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, +}; + +static struct clk rhea1_ck = { + .name = "rhea1_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, +}; + +static struct clk rhea2_ck = { + .name = "rhea2_ck", + .parent = &tc_ck, + .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, +}; + +static struct clk lcd_ck_16xx = { + .name = "lcd_ck", + .parent = &clkm3, + .flags = CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730, +}; + +static struct clk lcd_ck_1510 = { + .name = "lcd_ck", + .parent = &clkm3, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, +}; + +static struct clk uart1_1510 = { + .name = "uart1_ck", + /* Direct from ULPD, no real parent */ + .parent = &armper_ck, /* either armper_ck or dpll4 */ + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, +}; + +static struct clk uart1_16xx = { + .name = "uart1_ck", + /* Direct from ULPD, no real parent */ + .parent = &armper_ck, + .rate = 48000000, + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk uart2_ck = { + .name = "uart2_ck", + /* Direct from ULPD, no real parent */ + .parent = &armper_ck, /* either armper_ck or dpll4 */ + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 | + ALWAYS_ENABLED, +}; + +static struct clk uart3_1510 = { + .name = "uart3_ck", + /* Direct from ULPD, no real parent */ + .parent = &armper_ck, /* either armper_ck or dpll4 */ + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED, +}; + +static struct clk uart3_16xx = { + .name = "uart3_ck", + /* Direct from ULPD, no real parent */ + .parent = &armper_ck, + .rate = 48000000, + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk usb_clk0 = { /* 6 MHz output on W4_USB_CLK0 */ + .name = "usb_clk0", + .alias = "usb.clko", + /* Direct from ULPD, no parent */ + .rate = 6000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, +}; + +static struct clk usb_hhc_ck1510 = { + .name = "usb_hhc_ck", + /* Direct from ULPD, no parent */ + .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */ + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310, +}; + +static struct clk usb_hhc_ck16xx = { + .name = "usb_hhc_ck", + /* Direct from ULPD, no parent */ + .rate = 48000000, + /* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */ + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk usb_w2fc_mclk = { + .name = "usb_w2fc_mclk", + .alias = "usb_w2fc_ck", + .parent = &ck_48m, + .rate = 48000000, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, +}; + +static struct clk mclk_1510 = { + .name = "mclk", + /* Direct from ULPD, no parent. May be enabled by ext hardware. */ + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510, +}; + +static struct clk bclk_310 = { + .name = "bt_mclk_out", /* Alias midi_mclk_out? */ + .parent = &armper_ck, + .flags = CLOCK_IN_OMAP310, +}; + +static struct clk mclk_310 = { + .name = "com_mclk_out", + .parent = &armper_ck, + .flags = CLOCK_IN_OMAP310, +}; + +static struct clk mclk_16xx = { + .name = "mclk", + /* Direct from ULPD, no parent. May be enabled by ext hardware. */ + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk bclk_1510 = { + .name = "bclk", + /* Direct from ULPD, no parent. May be enabled by ext hardware. */ + .rate = 12000000, + .flags = CLOCK_IN_OMAP1510, +}; + +static struct clk bclk_16xx = { + .name = "bclk", + /* Direct from ULPD, no parent. May be enabled by ext hardware. */ + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk mmc1_ck = { + .name = "mmc_ck", + .id = 1, + /* Functional clock is direct from ULPD, interface clock is ARMPER */ + .parent = &armper_ck, /* either armper_ck or dpll4 */ + .rate = 48000000, + .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310, +}; + +static struct clk mmc2_ck = { + .name = "mmc_ck", + .id = 2, + /* Functional clock is direct from ULPD, interface clock is ARMPER */ + .parent = &armper_ck, + .rate = 48000000, + .flags = CLOCK_IN_OMAP16XX, +}; + +static struct clk cam_mclk = { + .name = "cam.mclk", + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, + .rate = 12000000, +}; + +static struct clk cam_exclk = { + .name = "cam.exclk", + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, + /* Either 12M from cam.mclk or 48M from dpll4 */ + .parent = &cam_mclk, +}; + +static struct clk cam_lclk = { + .name = "cam.lclk", + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX, +}; + +static struct clk i2c_fck = { + .name = "i2c_fck", + .id = 1, + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | + ALWAYS_ENABLED, + .parent = &armxor_ck, +}; + +static struct clk i2c_ick = { + .name = "i2c_ick", + .id = 1, + .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED, + .parent = &armper_ck, +}; + +static struct clk clk32k = { + .name = "clk32-kHz", + .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | + CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, + .parent = &xtal_osc32k, +}; + +static struct clk ref_clk = { + .name = "ref_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, + .rate = 12000000, /* 12 MHz or 13 MHz or 19.2 MHz */ + /*.parent = sys.xtalin */ +}; + +static struct clk apll_96m = { + .name = "apll_96m", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, + .rate = 96000000, + /*.parent = ref_clk */ +}; + +static struct clk apll_54m = { + .name = "apll_54m", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, + .rate = 54000000, + /*.parent = ref_clk */ +}; + +static struct clk sys_clk = { + .name = "sys_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, + .rate = 32768, + /*.parent = sys.xtalin */ +}; + +static struct clk sleep_clk = { + .name = "sleep_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, + .rate = 32768, + /*.parent = sys.xtalin */ +}; + +static struct clk dpll_ck = { + .name = "dpll", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, + .parent = &ref_clk, +}; + +static struct clk dpll_x2_ck = { + .name = "dpll_x2", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, + .parent = &ref_clk, +}; + +static struct clk wdt1_sys_clk = { + .name = "wdt1_sys_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED, + .rate = 32768, + /*.parent = sys.xtalin */ +}; + +static struct clk func_96m_clk = { + .name = "func_96m_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .divisor = 1, + .parent = &apll_96m, +}; + +static struct clk func_48m_clk = { + .name = "func_48m_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .divisor = 2, + .parent = &apll_96m, +}; + +static struct clk func_12m_clk = { + .name = "func_12m_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .divisor = 8, + .parent = &apll_96m, +}; + +static struct clk func_54m_clk = { + .name = "func_54m_clk", + .flags = CLOCK_IN_OMAP242X, + .divisor = 1, + .parent = &apll_54m, +}; + +static struct clk sys_clkout = { + .name = "clkout", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk sys_clkout2 = { + .name = "clkout2", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_clk = { + .name = "core_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &dpll_x2_ck, /* Switchable between dpll_ck and clk32k */ +}; + +static struct clk l3_clk = { + .name = "l3_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_clk, +}; + +static struct clk core_l4_iclk = { + .name = "core_l4_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &l3_clk, +}; + +static struct clk wu_l4_iclk = { + .name = "wu_l4_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &l3_clk, +}; + +static struct clk core_l3_iclk = { + .name = "core_l3_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_clk, +}; + +static struct clk core_l4_usb_clk = { + .name = "core_l4_usb_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &l3_clk, +}; + +static struct clk wu_gpt1_clk = { + .name = "wu_gpt1_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk wu_32k_clk = { + .name = "wu_32k_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk uart1_fclk = { + .name = "uart1_fclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &func_48m_clk, +}; + +static struct clk uart1_iclk = { + .name = "uart1_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_l4_iclk, +}; + +static struct clk uart2_fclk = { + .name = "uart2_fclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &func_48m_clk, +}; + +static struct clk uart2_iclk = { + .name = "uart2_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_l4_iclk, +}; + +static struct clk uart3_fclk = { + .name = "uart3_fclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &func_48m_clk, +}; + +static struct clk uart3_iclk = { + .name = "uart3_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_l4_iclk, +}; + +static struct clk mpu_fclk = { + .name = "mpu_fclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_clk, +}; + +static struct clk mpu_iclk = { + .name = "mpu_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_clk, +}; + +static struct clk int_m_fclk = { + .name = "int_m_fclk", + .alias = "mpu_intc_fclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_clk, +}; + +static struct clk int_m_iclk = { + .name = "int_m_iclk", + .alias = "mpu_intc_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_clk, +}; + +static struct clk core_gpt2_clk = { + .name = "core_gpt2_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_gpt3_clk = { + .name = "core_gpt3_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_gpt4_clk = { + .name = "core_gpt4_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_gpt5_clk = { + .name = "core_gpt5_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_gpt6_clk = { + .name = "core_gpt6_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_gpt7_clk = { + .name = "core_gpt7_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_gpt8_clk = { + .name = "core_gpt8_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_gpt9_clk = { + .name = "core_gpt9_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_gpt10_clk = { + .name = "core_gpt10_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_gpt11_clk = { + .name = "core_gpt11_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk core_gpt12_clk = { + .name = "core_gpt12_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, +}; + +static struct clk mcbsp1_clk = { + .name = "mcbsp1_cg", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .divisor = 2, + .parent = &func_96m_clk, +}; + +static struct clk mcbsp2_clk = { + .name = "mcbsp2_cg", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .divisor = 2, + .parent = &func_96m_clk, +}; + +static struct clk emul_clk = { + .name = "emul_ck", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &func_54m_clk, +}; + +static struct clk sdma_fclk = { + .name = "sdma_fclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &l3_clk, +}; + +static struct clk sdma_iclk = { + .name = "sdma_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_l3_iclk, /* core_l4_iclk for the configuration port */ +}; + +static struct clk i2c1_fclk = { + .name = "i2c1.fclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &func_12m_clk, + .divisor = 1, +}; + +static struct clk i2c1_iclk = { + .name = "i2c1.iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_l4_iclk, +}; + +static struct clk i2c2_fclk = { + .name = "i2c2.fclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &func_12m_clk, + .divisor = 1, +}; + +static struct clk i2c2_iclk = { + .name = "i2c2.iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_l4_iclk, +}; + +static struct clk gpio_dbclk[5] = { + { + .name = "gpio1_dbclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &wu_32k_clk, + }, { + .name = "gpio2_dbclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &wu_32k_clk, + }, { + .name = "gpio3_dbclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &wu_32k_clk, + }, { + .name = "gpio4_dbclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &wu_32k_clk, + }, { + .name = "gpio5_dbclk", + .flags = CLOCK_IN_OMAP243X, + .parent = &wu_32k_clk, + }, +}; + +static struct clk gpio_iclk = { + .name = "gpio_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &wu_l4_iclk, +}; + +static struct clk mmc_fck = { + .name = "mmc_fclk", + .flags = CLOCK_IN_OMAP242X, + .parent = &func_96m_clk, +}; + +static struct clk mmc_ick = { + .name = "mmc_iclk", + .flags = CLOCK_IN_OMAP242X, + .parent = &core_l4_iclk, +}; + +static struct clk spi_fclk[3] = { + { + .name = "spi1_fclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &func_48m_clk, + }, { + .name = "spi2_fclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &func_48m_clk, + }, { + .name = "spi3_fclk", + .flags = CLOCK_IN_OMAP243X, + .parent = &func_48m_clk, + }, +}; + +static struct clk dss_clk[2] = { + { + .name = "dss_clk1", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_clk, + }, { + .name = "dss_clk2", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &sys_clk, + }, +}; + +static struct clk dss_54m_clk = { + .name = "dss_54m_clk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &func_54m_clk, +}; + +static struct clk dss_l3_iclk = { + .name = "dss_l3_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_l3_iclk, +}; + +static struct clk dss_l4_iclk = { + .name = "dss_l4_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_l4_iclk, +}; + +static struct clk spi_iclk[3] = { + { + .name = "spi1_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_l4_iclk, + }, { + .name = "spi2_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + .parent = &core_l4_iclk, + }, { + .name = "spi3_iclk", + .flags = CLOCK_IN_OMAP243X, + .parent = &core_l4_iclk, + }, +}; + +static struct clk omapctrl_clk = { + .name = "omapctrl_iclk", + .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X, + /* XXX Should be in WKUP domain */ + .parent = &core_l4_iclk, +}; + +static struct clk *onchip_clks[] = { + /* OMAP 1 */ + + /* non-ULPD clocks */ + &xtal_osc12m, + &xtal_osc32k, + &ck_ref, + &dpll1, + &dpll2, + &dpll3, + &dpll4, + &apll, + &ck_48m, + /* CK_GEN1 clocks */ + &clkm1, + &ck_dpll1out, + &sossi_ck, + &arm_ck, + &armper_ck, + &arm_gpio_ck, + &armxor_ck, + &armtim_ck, + &armwdt_ck, + &arminth_ck15xx, &arminth_ck16xx, + /* CK_GEN2 clocks */ + &clkm2, + &dsp_ck, + &dspmmu_ck, + &dspper_ck, + &dspxor_ck, + &dsptim_ck, + /* CK_GEN3 clocks */ + &clkm3, + &tc_ck, + &tipb_ck, + &l3_ocpi_ck, + &tc1_ck, + &tc2_ck, + &dma_ck, + &dma_lcdfree_ck, + &api_ck, + &lb_ck, + &lbfree_ck, + &hsab_ck, + &rhea1_ck, + &rhea2_ck, + &lcd_ck_16xx, + &lcd_ck_1510, + /* ULPD clocks */ + &uart1_1510, + &uart1_16xx, + &uart2_ck, + &uart3_1510, + &uart3_16xx, + &usb_clk0, + &usb_hhc_ck1510, &usb_hhc_ck16xx, + &mclk_1510, &mclk_16xx, &mclk_310, + &bclk_1510, &bclk_16xx, &bclk_310, + &mmc1_ck, + &mmc2_ck, + &cam_mclk, + &cam_exclk, + &cam_lclk, + &clk32k, + &usb_w2fc_mclk, + /* Virtual clocks */ + &i2c_fck, + &i2c_ick, + + /* OMAP 2 */ + + &ref_clk, + &apll_96m, + &apll_54m, + &sys_clk, + &sleep_clk, + &dpll_ck, + &dpll_x2_ck, + &wdt1_sys_clk, + &func_96m_clk, + &func_48m_clk, + &func_12m_clk, + &func_54m_clk, + &sys_clkout, + &sys_clkout2, + &core_clk, + &l3_clk, + &core_l4_iclk, + &wu_l4_iclk, + &core_l3_iclk, + &core_l4_usb_clk, + &wu_gpt1_clk, + &wu_32k_clk, + &uart1_fclk, + &uart1_iclk, + &uart2_fclk, + &uart2_iclk, + &uart3_fclk, + &uart3_iclk, + &mpu_fclk, + &mpu_iclk, + &int_m_fclk, + &int_m_iclk, + &core_gpt2_clk, + &core_gpt3_clk, + &core_gpt4_clk, + &core_gpt5_clk, + &core_gpt6_clk, + &core_gpt7_clk, + &core_gpt8_clk, + &core_gpt9_clk, + &core_gpt10_clk, + &core_gpt11_clk, + &core_gpt12_clk, + &mcbsp1_clk, + &mcbsp2_clk, + &emul_clk, + &sdma_fclk, + &sdma_iclk, + &i2c1_fclk, + &i2c1_iclk, + &i2c2_fclk, + &i2c2_iclk, + &gpio_dbclk[0], + &gpio_dbclk[1], + &gpio_dbclk[2], + &gpio_dbclk[3], + &gpio_iclk, + &mmc_fck, + &mmc_ick, + &spi_fclk[0], + &spi_iclk[0], + &spi_fclk[1], + &spi_iclk[1], + &spi_fclk[2], + &spi_iclk[2], + &dss_clk[0], + &dss_clk[1], + &dss_54m_clk, + &dss_l3_iclk, + &dss_l4_iclk, + &omapctrl_clk, + + NULL +}; + +void omap_clk_adduser(struct clk *clk, qemu_irq user) +{ + qemu_irq *i; + + for (i = clk->users; *i; i ++); + *i = user; +} + +struct clk *omap_findclk(struct omap_mpu_state_s *mpu, const char *name) +{ + struct clk *i; + + for (i = mpu->clks; i->name; i ++) + if (!strcmp(i->name, name) || (i->alias && !strcmp(i->alias, name))) + return i; + hw_error("%s: %s not found\n", __func__, name); +} + +void omap_clk_get(struct clk *clk) +{ + clk->usecount ++; +} + +void omap_clk_put(struct clk *clk) +{ + if (!(clk->usecount --)) + hw_error("%s: %s is not in use\n", __func__, clk->name); +} + +static void omap_clk_update(struct clk *clk) +{ + int parent, running; + qemu_irq *user; + struct clk *i; + + if (clk->parent) + parent = clk->parent->running; + else + parent = 1; + + running = parent && (clk->enabled || + ((clk->flags & ALWAYS_ENABLED) && clk->usecount)); + if (clk->running != running) { + clk->running = running; + for (user = clk->users; *user; user ++) + qemu_set_irq(*user, running); + for (i = clk->child1; i; i = i->sibling) + omap_clk_update(i); + } +} + +static void omap_clk_rate_update_full(struct clk *clk, unsigned long int rate, + unsigned long int div, unsigned long int mult) +{ + struct clk *i; + qemu_irq *user; + + clk->rate = muldiv64(rate, mult, div); + if (clk->running) + for (user = clk->users; *user; user ++) + qemu_irq_raise(*user); + for (i = clk->child1; i; i = i->sibling) + omap_clk_rate_update_full(i, rate, + div * i->divisor, mult * i->multiplier); +} + +static void omap_clk_rate_update(struct clk *clk) +{ + struct clk *i; + unsigned long int div, mult = div = 1; + + for (i = clk; i->parent; i = i->parent) { + div *= i->divisor; + mult *= i->multiplier; + } + + omap_clk_rate_update_full(clk, i->rate, div, mult); +} + +void omap_clk_reparent(struct clk *clk, struct clk *parent) +{ + struct clk **p; + + if (clk->parent) { + for (p = &clk->parent->child1; *p != clk; p = &(*p)->sibling); + *p = clk->sibling; + } + + clk->parent = parent; + if (parent) { + clk->sibling = parent->child1; + parent->child1 = clk; + omap_clk_update(clk); + omap_clk_rate_update(clk); + } else + clk->sibling = NULL; +} + +void omap_clk_onoff(struct clk *clk, int on) +{ + clk->enabled = on; + omap_clk_update(clk); +} + +void omap_clk_canidle(struct clk *clk, int can) +{ + if (can) + omap_clk_put(clk); + else + omap_clk_get(clk); +} + +void omap_clk_setrate(struct clk *clk, int divide, int multiply) +{ + clk->divisor = divide; + clk->multiplier = multiply; + omap_clk_rate_update(clk); +} + +int64_t omap_clk_getrate(omap_clk clk) +{ + return clk->rate; +} + +void omap_clk_init(struct omap_mpu_state_s *mpu) +{ + struct clk **i, *j, *k; + int count; + int flag; + + if (cpu_is_omap310(mpu)) + flag = CLOCK_IN_OMAP310; + else if (cpu_is_omap1510(mpu)) + flag = CLOCK_IN_OMAP1510; + else if (cpu_is_omap2410(mpu) || cpu_is_omap2420(mpu)) + flag = CLOCK_IN_OMAP242X; + else if (cpu_is_omap2430(mpu)) + flag = CLOCK_IN_OMAP243X; + else if (cpu_is_omap3430(mpu)) + flag = CLOCK_IN_OMAP243X; + else + return; + + for (i = onchip_clks, count = 0; *i; i ++) + if ((*i)->flags & flag) + count ++; + mpu->clks = g_new0(struct clk, count + 1); + for (i = onchip_clks, j = mpu->clks; *i; i ++) + if ((*i)->flags & flag) { + memcpy(j, *i, sizeof(struct clk)); + for (k = mpu->clks; k < j; k ++) + if (j->parent && !strcmp(j->parent->name, k->name)) { + j->parent = k; + j->sibling = k->child1; + k->child1 = j; + } else if (k->parent && !strcmp(k->parent->name, j->name)) { + k->parent = j; + k->sibling = j->child1; + j->child1 = k; + } + j->divisor = j->divisor ?: 1; + j->multiplier = j->multiplier ?: 1; + j ++; + } + for (j = mpu->clks; count --; j ++) { + omap_clk_update(j); + omap_clk_rate_update(j); + } +} diff --git a/hw/misc/omap_gpmc.c b/hw/misc/omap_gpmc.c new file mode 100644 index 000000000..10de7a552 --- /dev/null +++ b/hw/misc/omap_gpmc.c @@ -0,0 +1,898 @@ +/* + * TI OMAP general purpose memory controller emulation. + * + * Copyright (C) 2007-2009 Nokia Corporation + * Original code written by Andrzej Zaborowski + * Enhancements for OMAP3 and NAND support written by Juha Riihimäki + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) any later version of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/block/flash.h" +#include "hw/arm/omap.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" + +/* General-Purpose Memory Controller */ +struct omap_gpmc_s { + qemu_irq irq; + qemu_irq drq; + MemoryRegion iomem; + int accept_256; + + uint8_t revision; + uint8_t sysconfig; + uint16_t irqst; + uint16_t irqen; + uint16_t lastirq; + uint16_t timeout; + uint16_t config; + struct omap_gpmc_cs_file_s { + uint32_t config[7]; + MemoryRegion *iomem; + MemoryRegion container; + MemoryRegion nandiomem; + DeviceState *dev; + } cs_file[8]; + int ecc_cs; + int ecc_ptr; + uint32_t ecc_cfg; + ECCState ecc[9]; + struct prefetch { + uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */ + uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */ + int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */ + int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */ + int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */ + MemoryRegion iomem; + uint8_t fifo[64]; + } prefetch; +}; + +#define OMAP_GPMC_8BIT 0 +#define OMAP_GPMC_16BIT 1 +#define OMAP_GPMC_NOR 0 +#define OMAP_GPMC_NAND 2 + +static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f) +{ + return (f->config[0] >> 10) & 3; +} + +static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f) +{ + /* devsize field is really 2 bits but we ignore the high + * bit to ensure consistent behaviour if the guest sets + * it (values 2 and 3 are reserved in the TRM) + */ + return (f->config[0] >> 12) & 1; +} + +/* Extract the chip-select value from the prefetch config1 register */ +static int prefetch_cs(uint32_t config1) +{ + return (config1 >> 24) & 7; +} + +static int prefetch_threshold(uint32_t config1) +{ + return (config1 >> 8) & 0x7f; +} + +static void omap_gpmc_int_update(struct omap_gpmc_s *s) +{ + /* The TRM is a bit unclear, but it seems to say that + * the TERMINALCOUNTSTATUS bit is set only on the + * transition when the prefetch engine goes from + * active to inactive, whereas the FIFOEVENTSTATUS + * bit is held high as long as the fifo has at + * least THRESHOLD bytes available. + * So we do the latter here, but TERMINALCOUNTSTATUS + * is set elsewhere. + */ + if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) { + s->irqst |= 1; + } + if ((s->irqen & s->irqst) != s->lastirq) { + s->lastirq = s->irqen & s->irqst; + qemu_set_irq(s->irq, s->lastirq); + } +} + +static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value) +{ + if (s->prefetch.config1 & 4) { + qemu_set_irq(s->drq, value); + } +} + +/* Access functions for when a NAND-like device is mapped into memory: + * all addresses in the region behave like accesses to the relevant + * GPMC_NAND_DATA_i register (which is actually implemented to call these) + */ +static uint64_t omap_nand_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque; + uint64_t v; + nand_setpins(f->dev, 0, 0, 0, 1, 0); + switch (omap_gpmc_devsize(f)) { + case OMAP_GPMC_8BIT: + v = nand_getio(f->dev); + if (size == 1) { + return v; + } + v |= (nand_getio(f->dev) << 8); + if (size == 2) { + return v; + } + v |= (nand_getio(f->dev) << 16); + v |= (nand_getio(f->dev) << 24); + return v; + case OMAP_GPMC_16BIT: + v = nand_getio(f->dev); + if (size == 1) { + /* 8 bit read from 16 bit device : probably a guest bug */ + return v & 0xff; + } + if (size == 2) { + return v; + } + v |= (nand_getio(f->dev) << 16); + return v; + default: + abort(); + } +} + +static void omap_nand_setio(DeviceState *dev, uint64_t value, + int nandsize, int size) +{ + /* Write the specified value to the NAND device, respecting + * both size of the NAND device and size of the write access. + */ + switch (nandsize) { + case OMAP_GPMC_8BIT: + switch (size) { + case 1: + nand_setio(dev, value & 0xff); + break; + case 2: + nand_setio(dev, value & 0xff); + nand_setio(dev, (value >> 8) & 0xff); + break; + case 4: + default: + nand_setio(dev, value & 0xff); + nand_setio(dev, (value >> 8) & 0xff); + nand_setio(dev, (value >> 16) & 0xff); + nand_setio(dev, (value >> 24) & 0xff); + break; + } + break; + case OMAP_GPMC_16BIT: + switch (size) { + case 1: + /* writing to a 16bit device with 8bit access is probably a guest + * bug; pass the value through anyway. + */ + case 2: + nand_setio(dev, value & 0xffff); + break; + case 4: + default: + nand_setio(dev, value & 0xffff); + nand_setio(dev, (value >> 16) & 0xffff); + break; + } + break; + } +} + +static void omap_nand_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque; + nand_setpins(f->dev, 0, 0, 0, 1, 0); + omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); +} + +static const MemoryRegionOps omap_nand_ops = { + .read = omap_nand_read, + .write = omap_nand_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void fill_prefetch_fifo(struct omap_gpmc_s *s) +{ + /* Fill the prefetch FIFO by reading data from NAND. + * We do this synchronously, unlike the hardware which + * will do this asynchronously. We refill when the + * FIFO has THRESHOLD bytes free, and we always refill + * as much data as possible starting at the top end + * of the FIFO. + * (We have to refill at THRESHOLD rather than waiting + * for the FIFO to empty to allow for the case where + * the FIFO size isn't an exact multiple of THRESHOLD + * and we're doing DMA transfers.) + * This means we never need to handle wrap-around in + * the fifo-reading code, and the next byte of data + * to read is always fifo[63 - fifopointer]. + */ + int fptr; + int cs = prefetch_cs(s->prefetch.config1); + int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); + int bytes; + /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE + * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND. + * Instead believe the bit that says it is always a byte count. + */ + bytes = 64 - s->prefetch.fifopointer; + if (bytes > s->prefetch.count) { + bytes = s->prefetch.count; + } + if (is16bit) { + bytes &= ~1; + } + + s->prefetch.count -= bytes; + s->prefetch.fifopointer += bytes; + fptr = 64 - s->prefetch.fifopointer; + /* Move the existing data in the FIFO so it sits just + * before what we're about to read in + */ + while (fptr < (64 - bytes)) { + s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes]; + fptr++; + } + while (fptr < 64) { + if (is16bit) { + uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2); + s->prefetch.fifo[fptr++] = v & 0xff; + s->prefetch.fifo[fptr++] = (v >> 8) & 0xff; + } else { + s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1); + } + } + if (s->prefetch.startengine && (s->prefetch.count == 0)) { + /* This was the final transfer: raise TERMINALCOUNTSTATUS */ + s->irqst |= 2; + s->prefetch.startengine = 0; + } + /* If there are any bytes in the FIFO at this point then + * we must raise a DMA request (either this is a final part + * transfer, or we filled the FIFO in which case we certainly + * have THRESHOLD bytes available) + */ + if (s->prefetch.fifopointer != 0) { + omap_gpmc_dma_update(s, 1); + } + omap_gpmc_int_update(s); +} + +/* Access functions for a NAND-like device when the prefetch/postwrite + * engine is enabled -- all addresses in the region behave alike: + * data is read or written to the FIFO. + */ +static uint64_t omap_gpmc_prefetch_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; + uint32_t data; + if (s->prefetch.config1 & 1) { + /* The TRM doesn't define the behaviour if you read from the + * FIFO when the prefetch engine is in write mode. We choose + * to always return zero. + */ + return 0; + } + /* Note that trying to read an empty fifo repeats the last byte */ + if (s->prefetch.fifopointer) { + s->prefetch.fifopointer--; + } + data = s->prefetch.fifo[63 - s->prefetch.fifopointer]; + if (s->prefetch.fifopointer == + (64 - prefetch_threshold(s->prefetch.config1))) { + /* We've drained THRESHOLD bytes now. So deassert the + * DMA request, then refill the FIFO (which will probably + * assert it again.) + */ + omap_gpmc_dma_update(s, 0); + fill_prefetch_fifo(s); + } + omap_gpmc_int_update(s); + return data; +} + +static void omap_gpmc_prefetch_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; + int cs = prefetch_cs(s->prefetch.config1); + if ((s->prefetch.config1 & 1) == 0) { + /* The TRM doesn't define the behaviour of writing to the + * FIFO when the prefetch engine is in read mode. We + * choose to ignore the write. + */ + return; + } + if (s->prefetch.count == 0) { + /* The TRM doesn't define the behaviour of writing to the + * FIFO if the transfer is complete. We choose to ignore. + */ + return; + } + /* The only reason we do any data buffering in postwrite + * mode is if we are talking to a 16 bit NAND device, in + * which case we need to buffer the first byte of the + * 16 bit word until the other byte arrives. + */ + int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); + if (is16bit) { + /* fifopointer alternates between 64 (waiting for first + * byte of word) and 63 (waiting for second byte) + */ + if (s->prefetch.fifopointer == 64) { + s->prefetch.fifo[0] = value; + s->prefetch.fifopointer--; + } else { + value = (value << 8) | s->prefetch.fifo[0]; + omap_nand_write(&s->cs_file[cs], 0, value, 2); + s->prefetch.count--; + s->prefetch.fifopointer = 64; + } + } else { + /* Just write the byte : fifopointer remains 64 at all times */ + omap_nand_write(&s->cs_file[cs], 0, value, 1); + s->prefetch.count--; + } + if (s->prefetch.count == 0) { + /* Final transfer: raise TERMINALCOUNTSTATUS */ + s->irqst |= 2; + s->prefetch.startengine = 0; + } + omap_gpmc_int_update(s); +} + +static const MemoryRegionOps omap_prefetch_ops = { + .read = omap_gpmc_prefetch_read, + .write = omap_gpmc_prefetch_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 1, + .impl.max_access_size = 1, +}; + +static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs) +{ + /* Return the MemoryRegion* to map/unmap for this chipselect */ + struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; + if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) { + return f->iomem; + } + if ((s->prefetch.config1 & 0x80) && + (prefetch_cs(s->prefetch.config1) == cs)) { + /* The prefetch engine is enabled for this CS: map the FIFO */ + return &s->prefetch.iomem; + } + return &f->nandiomem; +} + +static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs) +{ + struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; + uint32_t mask = (f->config[6] >> 8) & 0xf; + uint32_t base = f->config[6] & 0x3f; + uint32_t size; + + if (!f->iomem && !f->dev) { + return; + } + + if (!(f->config[6] & (1 << 6))) { + /* Do nothing unless CSVALID */ + return; + } + + /* TODO: check for overlapping regions and report access errors */ + if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf + && !(s->accept_256 && !mask)) { + fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n", + __func__, mask); + } + + base <<= 24; + size = (0x0fffffff & ~(mask << 24)) + 1; + /* TODO: rather than setting the size of the mapping (which should be + * constant), the mask should cause wrapping of the address space, so + * that the same memory becomes accessible at every size bytes + * starting from base. */ + memory_region_init(&f->container, NULL, "omap-gpmc-file", size); + memory_region_add_subregion(&f->container, 0, + omap_gpmc_cs_memregion(s, cs)); + memory_region_add_subregion(get_system_memory(), base, + &f->container); +} + +static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs) +{ + struct omap_gpmc_cs_file_s *f = &s->cs_file[cs]; + if (!(f->config[6] & (1 << 6))) { + /* Do nothing unless CSVALID */ + return; + } + if (!f->iomem && !f->dev) { + return; + } + memory_region_del_subregion(get_system_memory(), &f->container); + memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs)); + object_unparent(OBJECT(&f->container)); +} + +void omap_gpmc_reset(struct omap_gpmc_s *s) +{ + int i; + + s->sysconfig = 0; + s->irqst = 0; + s->irqen = 0; + omap_gpmc_int_update(s); + for (i = 0; i < 8; i++) { + /* This has to happen before we change any of the config + * used to determine which memory regions are mapped or unmapped. + */ + omap_gpmc_cs_unmap(s, i); + } + s->timeout = 0; + s->config = 0xa00; + s->prefetch.config1 = 0x00004000; + s->prefetch.transfercount = 0x00000000; + s->prefetch.startengine = 0; + s->prefetch.fifopointer = 0; + s->prefetch.count = 0; + for (i = 0; i < 8; i ++) { + s->cs_file[i].config[1] = 0x101001; + s->cs_file[i].config[2] = 0x020201; + s->cs_file[i].config[3] = 0x10031003; + s->cs_file[i].config[4] = 0x10f1111; + s->cs_file[i].config[5] = 0; + s->cs_file[i].config[6] = 0xf00; + /* In theory we could probe attached devices for some CFG1 + * bits here, but we just retain them across resets as they + * were set initially by omap_gpmc_attach(). + */ + if (i == 0) { + s->cs_file[i].config[0] &= 0x00433e00; + s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */ + omap_gpmc_cs_map(s, i); + } else { + s->cs_file[i].config[0] &= 0x00403c00; + } + } + s->ecc_cs = 0; + s->ecc_ptr = 0; + s->ecc_cfg = 0x3fcff000; + for (i = 0; i < 9; i ++) + ecc_reset(&s->ecc[i]); +} + +static int gpmc_wordaccess_only(hwaddr addr) +{ + /* Return true if the register offset is to a register that + * only permits word width accesses. + * Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND + * for any chipselect. + */ + if (addr >= 0x60 && addr <= 0x1d4) { + int cs = (addr - 0x60) / 0x30; + addr -= cs * 0x30; + if (addr >= 0x7c && addr < 0x88) { + /* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */ + return 0; + } + } + return 1; +} + +static uint64_t omap_gpmc_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; + int cs; + struct omap_gpmc_cs_file_s *f; + + if (size != 4 && gpmc_wordaccess_only(addr)) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x000: /* GPMC_REVISION */ + return s->revision; + + case 0x010: /* GPMC_SYSCONFIG */ + return s->sysconfig; + + case 0x014: /* GPMC_SYSSTATUS */ + return 1; /* RESETDONE */ + + case 0x018: /* GPMC_IRQSTATUS */ + return s->irqst; + + case 0x01c: /* GPMC_IRQENABLE */ + return s->irqen; + + case 0x040: /* GPMC_TIMEOUT_CONTROL */ + return s->timeout; + + case 0x044: /* GPMC_ERR_ADDRESS */ + case 0x048: /* GPMC_ERR_TYPE */ + return 0; + + case 0x050: /* GPMC_CONFIG */ + return s->config; + + case 0x054: /* GPMC_STATUS */ + return 0x001; + + case 0x060 ... 0x1d4: + cs = (addr - 0x060) / 0x30; + addr -= cs * 0x30; + f = s->cs_file + cs; + switch (addr) { + case 0x60: /* GPMC_CONFIG1 */ + return f->config[0]; + case 0x64: /* GPMC_CONFIG2 */ + return f->config[1]; + case 0x68: /* GPMC_CONFIG3 */ + return f->config[2]; + case 0x6c: /* GPMC_CONFIG4 */ + return f->config[3]; + case 0x70: /* GPMC_CONFIG5 */ + return f->config[4]; + case 0x74: /* GPMC_CONFIG6 */ + return f->config[5]; + case 0x78: /* GPMC_CONFIG7 */ + return f->config[6]; + case 0x84 ... 0x87: /* GPMC_NAND_DATA */ + if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { + return omap_nand_read(f, 0, size); + } + return 0; + } + break; + + case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */ + return s->prefetch.config1; + case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */ + return s->prefetch.transfercount; + case 0x1ec: /* GPMC_PREFETCH_CONTROL */ + return s->prefetch.startengine; + case 0x1f0: /* GPMC_PREFETCH_STATUS */ + /* NB: The OMAP3 TRM is inconsistent about whether the GPMC + * FIFOTHRESHOLDSTATUS bit should be set when + * FIFOPOINTER > FIFOTHRESHOLD or when it is >= FIFOTHRESHOLD. + * Apparently the underlying functional spec from which the TRM was + * created states that the behaviour is ">=", and this also + * makes more conceptual sense. + */ + return (s->prefetch.fifopointer << 24) | + ((s->prefetch.fifopointer >= + ((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) | + s->prefetch.count; + + case 0x1f4: /* GPMC_ECC_CONFIG */ + return s->ecc_cs; + case 0x1f8: /* GPMC_ECC_CONTROL */ + return s->ecc_ptr; + case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */ + return s->ecc_cfg; + case 0x200 ... 0x220: /* GPMC_ECC_RESULT */ + cs = (addr & 0x1f) >> 2; + /* TODO: check correctness */ + return + ((s->ecc[cs].cp & 0x07) << 0) | + ((s->ecc[cs].cp & 0x38) << 13) | + ((s->ecc[cs].lp[0] & 0x1ff) << 3) | + ((s->ecc[cs].lp[1] & 0x1ff) << 19); + + case 0x230: /* GPMC_TESTMODE_CTRL */ + return 0; + case 0x234: /* GPMC_PSA_LSB */ + case 0x238: /* GPMC_PSA_MSB */ + return 0x00000000; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_gpmc_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque; + int cs; + struct omap_gpmc_cs_file_s *f; + + if (size != 4 && gpmc_wordaccess_only(addr)) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x000: /* GPMC_REVISION */ + case 0x014: /* GPMC_SYSSTATUS */ + case 0x054: /* GPMC_STATUS */ + case 0x1f0: /* GPMC_PREFETCH_STATUS */ + case 0x200 ... 0x220: /* GPMC_ECC_RESULT */ + case 0x234: /* GPMC_PSA_LSB */ + case 0x238: /* GPMC_PSA_MSB */ + OMAP_RO_REG(addr); + break; + + case 0x010: /* GPMC_SYSCONFIG */ + if ((value >> 3) == 0x3) + fprintf(stderr, "%s: bad SDRAM idle mode %"PRIi64"\n", + __func__, value >> 3); + if (value & 2) + omap_gpmc_reset(s); + s->sysconfig = value & 0x19; + break; + + case 0x018: /* GPMC_IRQSTATUS */ + s->irqst &= ~value; + omap_gpmc_int_update(s); + break; + + case 0x01c: /* GPMC_IRQENABLE */ + s->irqen = value & 0xf03; + omap_gpmc_int_update(s); + break; + + case 0x040: /* GPMC_TIMEOUT_CONTROL */ + s->timeout = value & 0x1ff1; + break; + + case 0x044: /* GPMC_ERR_ADDRESS */ + case 0x048: /* GPMC_ERR_TYPE */ + break; + + case 0x050: /* GPMC_CONFIG */ + s->config = value & 0xf13; + break; + + case 0x060 ... 0x1d4: + cs = (addr - 0x060) / 0x30; + addr -= cs * 0x30; + f = s->cs_file + cs; + switch (addr) { + case 0x60: /* GPMC_CONFIG1 */ + f->config[0] = value & 0xffef3e13; + break; + case 0x64: /* GPMC_CONFIG2 */ + f->config[1] = value & 0x001f1f8f; + break; + case 0x68: /* GPMC_CONFIG3 */ + f->config[2] = value & 0x001f1f8f; + break; + case 0x6c: /* GPMC_CONFIG4 */ + f->config[3] = value & 0x1f8f1f8f; + break; + case 0x70: /* GPMC_CONFIG5 */ + f->config[4] = value & 0x0f1f1f1f; + break; + case 0x74: /* GPMC_CONFIG6 */ + f->config[5] = value & 0x00000fcf; + break; + case 0x78: /* GPMC_CONFIG7 */ + if ((f->config[6] ^ value) & 0xf7f) { + omap_gpmc_cs_unmap(s, cs); + f->config[6] = value & 0x00000f7f; + omap_gpmc_cs_map(s, cs); + } + break; + case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */ + if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { + nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */ + omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); + } + break; + case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */ + if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { + nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */ + omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size); + } + break; + case 0x84 ... 0x87: /* GPMC_NAND_DATA */ + if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) { + omap_nand_write(f, 0, value, size); + } + break; + default: + goto bad_reg; + } + break; + + case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */ + if (!s->prefetch.startengine) { + uint32_t newconfig1 = value & 0x7f8f7fbf; + uint32_t changed; + changed = newconfig1 ^ s->prefetch.config1; + if (changed & (0x80 | 0x7000000)) { + /* Turning the engine on or off, or mapping it somewhere else. + * cs_map() and cs_unmap() check the prefetch config and + * overall CSVALID bits, so it is sufficient to unmap-and-map + * both the old cs and the new one. Note that we adhere to + * the "unmap/change config/map" order (and not unmap twice + * if newcs == oldcs), otherwise we'll try to delete the wrong + * memory region. + */ + int oldcs = prefetch_cs(s->prefetch.config1); + int newcs = prefetch_cs(newconfig1); + omap_gpmc_cs_unmap(s, oldcs); + if (oldcs != newcs) { + omap_gpmc_cs_unmap(s, newcs); + } + s->prefetch.config1 = newconfig1; + omap_gpmc_cs_map(s, oldcs); + if (oldcs != newcs) { + omap_gpmc_cs_map(s, newcs); + } + } else { + s->prefetch.config1 = newconfig1; + } + } + break; + + case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */ + if (!s->prefetch.startengine) { + s->prefetch.transfercount = value & 0x3fff; + } + break; + + case 0x1ec: /* GPMC_PREFETCH_CONTROL */ + if (s->prefetch.startengine != (value & 1)) { + s->prefetch.startengine = value & 1; + if (s->prefetch.startengine) { + /* Prefetch engine start */ + s->prefetch.count = s->prefetch.transfercount; + if (s->prefetch.config1 & 1) { + /* Write */ + s->prefetch.fifopointer = 64; + } else { + /* Read */ + s->prefetch.fifopointer = 0; + fill_prefetch_fifo(s); + } + } else { + /* Prefetch engine forcibly stopped. The TRM + * doesn't define the behaviour if you do this. + * We clear the prefetch count, which means that + * we permit no more writes, and don't read any + * more data from NAND. The CPU can still drain + * the FIFO of unread data. + */ + s->prefetch.count = 0; + } + omap_gpmc_int_update(s); + } + break; + + case 0x1f4: /* GPMC_ECC_CONFIG */ + s->ecc_cs = 0x8f; + break; + case 0x1f8: /* GPMC_ECC_CONTROL */ + if (value & (1 << 8)) + for (cs = 0; cs < 9; cs ++) + ecc_reset(&s->ecc[cs]); + s->ecc_ptr = value & 0xf; + if (s->ecc_ptr == 0 || s->ecc_ptr > 9) { + s->ecc_ptr = 0; + s->ecc_cs &= ~1; + } + break; + case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */ + s->ecc_cfg = value & 0x3fcff1ff; + break; + case 0x230: /* GPMC_TESTMODE_CTRL */ + if (value & 7) + fprintf(stderr, "%s: test mode enable attempt\n", __func__); + break; + + default: + bad_reg: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_gpmc_ops = { + .read = omap_gpmc_read, + .write = omap_gpmc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu, + hwaddr base, + qemu_irq irq, qemu_irq drq) +{ + int cs; + struct omap_gpmc_s *s = g_new0(struct omap_gpmc_s, 1); + + memory_region_init_io(&s->iomem, NULL, &omap_gpmc_ops, s, "omap-gpmc", 0x1000); + memory_region_add_subregion(get_system_memory(), base, &s->iomem); + + s->irq = irq; + s->drq = drq; + s->accept_256 = cpu_is_omap3630(mpu); + s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20; + s->lastirq = 0; + omap_gpmc_reset(s); + + /* We have to register a different IO memory handler for each + * chip select region in case a NAND device is mapped there. We + * make the region the worst-case size of 256MB and rely on the + * container memory region in cs_map to chop it down to the actual + * guest-requested size. + */ + for (cs = 0; cs < 8; cs++) { + memory_region_init_io(&s->cs_file[cs].nandiomem, NULL, + &omap_nand_ops, + &s->cs_file[cs], + "omap-nand", + 256 * 1024 * 1024); + } + + memory_region_init_io(&s->prefetch.iomem, NULL, &omap_prefetch_ops, s, + "omap-gpmc-prefetch", 256 * 1024 * 1024); + return s; +} + +void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem) +{ + struct omap_gpmc_cs_file_s *f; + assert(iomem); + + if (cs < 0 || cs >= 8) { + fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs); + exit(-1); + } + f = &s->cs_file[cs]; + + omap_gpmc_cs_unmap(s, cs); + f->config[0] &= ~(0xf << 10); + f->iomem = iomem; + omap_gpmc_cs_map(s, cs); +} + +void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand) +{ + struct omap_gpmc_cs_file_s *f; + assert(nand); + + if (cs < 0 || cs >= 8) { + fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs); + exit(-1); + } + f = &s->cs_file[cs]; + + omap_gpmc_cs_unmap(s, cs); + f->config[0] &= ~(0xf << 10); + f->config[0] |= (OMAP_GPMC_NAND << 10); + f->dev = nand; + if (nand_getbuswidth(f->dev) == 16) { + f->config[0] |= OMAP_GPMC_16BIT << 12; + } + omap_gpmc_cs_map(s, cs); +} diff --git a/hw/misc/omap_l4.c b/hw/misc/omap_l4.c new file mode 100644 index 000000000..54aeaecd6 --- /dev/null +++ b/hw/misc/omap_l4.c @@ -0,0 +1,163 @@ +/* + * TI OMAP L4 interconnect emulation. + * + * Copyright (C) 2007-2009 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) any later version of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#include "qemu/osdep.h" +#include "hw/arm/omap.h" + +struct omap_l4_s { + MemoryRegion *address_space; + hwaddr base; + int ta_num; + struct omap_target_agent_s ta[]; +}; + +struct omap_l4_s *omap_l4_init(MemoryRegion *address_space, + hwaddr base, int ta_num) +{ + struct omap_l4_s *bus = g_malloc0( + sizeof(*bus) + ta_num * sizeof(*bus->ta)); + + bus->address_space = address_space; + bus->ta_num = ta_num; + bus->base = base; + + return bus; +} + +hwaddr omap_l4_region_base(struct omap_target_agent_s *ta, + int region) +{ + return ta->bus->base + ta->start[region].offset; +} + +hwaddr omap_l4_region_size(struct omap_target_agent_s *ta, + int region) +{ + return ta->start[region].size; +} + +static uint64_t omap_l4ta_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_target_agent_s *s = (struct omap_target_agent_s *) opaque; + + if (size != 2) { + return omap_badwidth_read16(opaque, addr); + } + + switch (addr) { + case 0x00: /* COMPONENT */ + return s->component; + + case 0x20: /* AGENT_CONTROL */ + return s->control; + + case 0x28: /* AGENT_STATUS */ + return s->status; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_l4ta_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_target_agent_s *s = (struct omap_target_agent_s *) opaque; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* COMPONENT */ + case 0x28: /* AGENT_STATUS */ + OMAP_RO_REG(addr); + break; + + case 0x20: /* AGENT_CONTROL */ + s->control = value & 0x01000700; + if (value & 1) /* OCP_RESET */ + s->status &= ~1; /* REQ_TIMEOUT */ + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static const MemoryRegionOps omap_l4ta_ops = { + .read = omap_l4ta_read, + .write = omap_l4ta_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct omap_target_agent_s *omap_l4ta_get(struct omap_l4_s *bus, + const struct omap_l4_region_s *regions, + const struct omap_l4_agent_info_s *agents, + int cs) +{ + int i; + struct omap_target_agent_s *ta = NULL; + const struct omap_l4_agent_info_s *info = NULL; + + for (i = 0; i < bus->ta_num; i ++) + if (agents[i].ta == cs) { + ta = &bus->ta[i]; + info = &agents[i]; + break; + } + if (!ta) { + fprintf(stderr, "%s: bad target agent (%i)\n", __func__, cs); + exit(-1); + } + + ta->bus = bus; + ta->start = ®ions[info->region]; + ta->regions = info->regions; + + ta->component = ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0); + ta->status = 0x00000000; + ta->control = 0x00000200; /* XXX 01000200 for L4TAO */ + + memory_region_init_io(&ta->iomem, NULL, &omap_l4ta_ops, ta, "omap.l4ta", + omap_l4_region_size(ta, info->ta_region)); + omap_l4_attach(ta, info->ta_region, &ta->iomem); + + return ta; +} + +hwaddr omap_l4_attach(struct omap_target_agent_s *ta, + int region, MemoryRegion *mr) +{ + hwaddr base; + + if (region < 0 || region >= ta->regions) { + fprintf(stderr, "%s: bad io region (%i)\n", __func__, region); + exit(-1); + } + + base = ta->bus->base + ta->start[region].offset; + if (mr) { + memory_region_add_subregion(ta->bus->address_space, base, mr); + } + + return base; +} diff --git a/hw/misc/omap_sdrc.c b/hw/misc/omap_sdrc.c new file mode 100644 index 000000000..f2f72f681 --- /dev/null +++ b/hw/misc/omap_sdrc.c @@ -0,0 +1,168 @@ +/* + * TI OMAP SDRAM controller emulation. + * + * Copyright (C) 2007-2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) any later version of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#include "qemu/osdep.h" +#include "hw/arm/omap.h" + +/* SDRAM Controller Subsystem */ +struct omap_sdrc_s { + MemoryRegion iomem; + uint8_t config; +}; + +void omap_sdrc_reset(struct omap_sdrc_s *s) +{ + s->config = 0x10; +} + +static uint64_t omap_sdrc_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_sdrc_s *s = (struct omap_sdrc_s *) opaque; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x00: /* SDRC_REVISION */ + return 0x20; + + case 0x10: /* SDRC_SYSCONFIG */ + return s->config; + + case 0x14: /* SDRC_SYSSTATUS */ + return 1; /* RESETDONE */ + + case 0x40: /* SDRC_CS_CFG */ + case 0x44: /* SDRC_SHARING */ + case 0x48: /* SDRC_ERR_ADDR */ + case 0x4c: /* SDRC_ERR_TYPE */ + case 0x60: /* SDRC_DLLA_SCTRL */ + case 0x64: /* SDRC_DLLA_STATUS */ + case 0x68: /* SDRC_DLLB_CTRL */ + case 0x6c: /* SDRC_DLLB_STATUS */ + case 0x70: /* SDRC_POWER */ + case 0x80: /* SDRC_MCFG_0 */ + case 0x84: /* SDRC_MR_0 */ + case 0x88: /* SDRC_EMR1_0 */ + case 0x8c: /* SDRC_EMR2_0 */ + case 0x90: /* SDRC_EMR3_0 */ + case 0x94: /* SDRC_DCDL1_CTRL */ + case 0x98: /* SDRC_DCDL2_CTRL */ + case 0x9c: /* SDRC_ACTIM_CTRLA_0 */ + case 0xa0: /* SDRC_ACTIM_CTRLB_0 */ + case 0xa4: /* SDRC_RFR_CTRL_0 */ + case 0xa8: /* SDRC_MANUAL_0 */ + case 0xb0: /* SDRC_MCFG_1 */ + case 0xb4: /* SDRC_MR_1 */ + case 0xb8: /* SDRC_EMR1_1 */ + case 0xbc: /* SDRC_EMR2_1 */ + case 0xc0: /* SDRC_EMR3_1 */ + case 0xc4: /* SDRC_ACTIM_CTRLA_1 */ + case 0xc8: /* SDRC_ACTIM_CTRLB_1 */ + case 0xd4: /* SDRC_RFR_CTRL_1 */ + case 0xd8: /* SDRC_MANUAL_1 */ + return 0x00; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_sdrc_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_sdrc_s *s = (struct omap_sdrc_s *) opaque; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* SDRC_REVISION */ + case 0x14: /* SDRC_SYSSTATUS */ + case 0x48: /* SDRC_ERR_ADDR */ + case 0x64: /* SDRC_DLLA_STATUS */ + case 0x6c: /* SDRC_DLLB_STATUS */ + OMAP_RO_REG(addr); + return; + + case 0x10: /* SDRC_SYSCONFIG */ + if ((value >> 3) != 0x2) + fprintf(stderr, "%s: bad SDRAM idle mode %i\n", + __func__, (unsigned)value >> 3); + if (value & 2) + omap_sdrc_reset(s); + s->config = value & 0x18; + break; + + case 0x40: /* SDRC_CS_CFG */ + case 0x44: /* SDRC_SHARING */ + case 0x4c: /* SDRC_ERR_TYPE */ + case 0x60: /* SDRC_DLLA_SCTRL */ + case 0x68: /* SDRC_DLLB_CTRL */ + case 0x70: /* SDRC_POWER */ + case 0x80: /* SDRC_MCFG_0 */ + case 0x84: /* SDRC_MR_0 */ + case 0x88: /* SDRC_EMR1_0 */ + case 0x8c: /* SDRC_EMR2_0 */ + case 0x90: /* SDRC_EMR3_0 */ + case 0x94: /* SDRC_DCDL1_CTRL */ + case 0x98: /* SDRC_DCDL2_CTRL */ + case 0x9c: /* SDRC_ACTIM_CTRLA_0 */ + case 0xa0: /* SDRC_ACTIM_CTRLB_0 */ + case 0xa4: /* SDRC_RFR_CTRL_0 */ + case 0xa8: /* SDRC_MANUAL_0 */ + case 0xb0: /* SDRC_MCFG_1 */ + case 0xb4: /* SDRC_MR_1 */ + case 0xb8: /* SDRC_EMR1_1 */ + case 0xbc: /* SDRC_EMR2_1 */ + case 0xc0: /* SDRC_EMR3_1 */ + case 0xc4: /* SDRC_ACTIM_CTRLA_1 */ + case 0xc8: /* SDRC_ACTIM_CTRLB_1 */ + case 0xd4: /* SDRC_RFR_CTRL_1 */ + case 0xd8: /* SDRC_MANUAL_1 */ + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_sdrc_ops = { + .read = omap_sdrc_read, + .write = omap_sdrc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct omap_sdrc_s *omap_sdrc_init(MemoryRegion *sysmem, + hwaddr base) +{ + struct omap_sdrc_s *s = g_new0(struct omap_sdrc_s, 1); + + omap_sdrc_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_sdrc_ops, s, "omap.sdrc", 0x1000); + memory_region_add_subregion(sysmem, base, &s->iomem); + + return s; +} diff --git a/hw/misc/omap_tap.c b/hw/misc/omap_tap.c new file mode 100644 index 000000000..3f595e8df --- /dev/null +++ b/hw/misc/omap_tap.c @@ -0,0 +1,118 @@ +/* + * TI OMAP TEST-Chip-level TAP emulation. + * + * Copyright (C) 2007-2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) any later version of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/arm/omap.h" + +/* TEST-Chip-level TAP */ +static uint64_t omap_tap_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x204: /* IDCODE_reg */ + switch (s->mpu_model) { + case omap2420: + case omap2422: + case omap2423: + return 0x5b5d902f; /* ES 2.2 */ + case omap2430: + return 0x5b68a02f; /* ES 2.2 */ + case omap3430: + return 0x1b7ae02f; /* ES 2 */ + default: + hw_error("%s: Bad mpu model\n", __func__); + } + + case 0x208: /* PRODUCTION_ID_reg for OMAP2 */ + case 0x210: /* PRODUCTION_ID_reg for OMAP3 */ + switch (s->mpu_model) { + case omap2420: + return 0x000254f0; /* POP ESHS2.1.1 in N91/93/95, ES2 in N800 */ + case omap2422: + return 0x000400f0; + case omap2423: + return 0x000800f0; + case omap2430: + return 0x000000f0; + case omap3430: + return 0x000000f0; + default: + hw_error("%s: Bad mpu model\n", __func__); + } + + case 0x20c: + switch (s->mpu_model) { + case omap2420: + case omap2422: + case omap2423: + return 0xcafeb5d9; /* ES 2.2 */ + case omap2430: + return 0xcafeb68a; /* ES 2.2 */ + case omap3430: + return 0xcafeb7ae; /* ES 2 */ + default: + hw_error("%s: Bad mpu model\n", __func__); + } + + case 0x218: /* DIE_ID_reg */ + return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0); + case 0x21c: /* DIE_ID_reg */ + return 0x54 << 24; + case 0x220: /* DIE_ID_reg */ + return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0); + case 0x224: /* DIE_ID_reg */ + return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0); + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_tap_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + OMAP_BAD_REG(addr); +} + +static const MemoryRegionOps omap_tap_ops = { + .read = omap_tap_read, + .write = omap_tap_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +void omap_tap_init(struct omap_target_agent_s *ta, + struct omap_mpu_state_s *mpu) +{ + memory_region_init_io(&mpu->tap_iomem, NULL, &omap_tap_ops, mpu, "omap.tap", + omap_l4_region_size(ta, 0)); + omap_l4_attach(ta, 0, &mpu->tap_iomem); +} diff --git a/hw/misc/pc-testdev.c b/hw/misc/pc-testdev.c new file mode 100644 index 000000000..e38965186 --- /dev/null +++ b/hw/misc/pc-testdev.c @@ -0,0 +1,216 @@ +/* + * QEMU x86 ISA testdev + * + * Copyright (c) 2012 Avi Kivity, Gerd Hoffmann, Marcelo Tosatti + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * This device is used to test KVM features specific to the x86 port, such + * as emulation, power management, interrupt routing, among others. It's meant + * to be used like: + * + * qemu-system-x86_64 -device pc-testdev -serial stdio \ + * -device isa-debug-exit,iobase=0xf4,iosize=0x4 \ + * -kernel /home/lmr/Code/virt-test.git/kvm/unittests/msr.flat + * + * Where msr.flat is one of the KVM unittests, present on a separate repo, + * https://git.kernel.org/pub/scm/virt/kvm/kvm-unit-tests.git +*/ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "qom/object.h" + +#define IOMEM_LEN 0x10000 + +struct PCTestdev { + ISADevice parent_obj; + + MemoryRegion ioport; + MemoryRegion ioport_byte; + MemoryRegion flush; + MemoryRegion irq; + MemoryRegion iomem; + uint32_t ioport_data; + char iomem_buf[IOMEM_LEN]; +}; + +#define TYPE_TESTDEV "pc-testdev" +OBJECT_DECLARE_SIMPLE_TYPE(PCTestdev, TESTDEV) + +static uint64_t test_irq_line_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void test_irq_line_write(void *opaque, hwaddr addr, uint64_t data, + unsigned len) +{ + PCTestdev *dev = opaque; + ISADevice *isa = ISA_DEVICE(dev); + + qemu_set_irq(isa_get_irq(isa, addr), !!data); +} + +static const MemoryRegionOps test_irq_ops = { + .read = test_irq_line_read, + .write = test_irq_line_write, + .valid.min_access_size = 1, + .valid.max_access_size = 1, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void test_ioport_write(void *opaque, hwaddr addr, uint64_t data, + unsigned len) +{ + PCTestdev *dev = opaque; + int bits = len * 8; + int start_bit = (addr & 3) * 8; + uint32_t mask = ((uint32_t)-1 >> (32 - bits)) << start_bit; + dev->ioport_data &= ~mask; + dev->ioport_data |= data << start_bit; +} + +static uint64_t test_ioport_read(void *opaque, hwaddr addr, unsigned len) +{ + PCTestdev *dev = opaque; + int bits = len * 8; + int start_bit = (addr & 3) * 8; + uint32_t mask = ((uint32_t)-1 >> (32 - bits)) << start_bit; + return (dev->ioport_data & mask) >> start_bit; +} + +static const MemoryRegionOps test_ioport_ops = { + .read = test_ioport_read, + .write = test_ioport_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps test_ioport_byte_ops = { + .read = test_ioport_read, + .write = test_ioport_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t test_flush_page_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void test_flush_page_write(void *opaque, hwaddr addr, uint64_t data, + unsigned len) +{ + hwaddr page = 4096; + void *a = cpu_physical_memory_map(data & ~0xffful, &page, false); + + /* We might not be able to get the full page, only mprotect what we actually + have mapped */ +#if defined(CONFIG_POSIX) + mprotect(a, page, PROT_NONE); + mprotect(a, page, PROT_READ|PROT_WRITE); +#endif + cpu_physical_memory_unmap(a, page, 0, 0); +} + +static const MemoryRegionOps test_flush_ops = { + .read = test_flush_page_read, + .write = test_flush_page_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t test_iomem_read(void *opaque, hwaddr addr, unsigned len) +{ + PCTestdev *dev = opaque; + uint64_t ret = 0; + memcpy(&ret, &dev->iomem_buf[addr], len); + + return ret; +} + +static void test_iomem_write(void *opaque, hwaddr addr, uint64_t val, + unsigned len) +{ + PCTestdev *dev = opaque; + memcpy(&dev->iomem_buf[addr], &val, len); + dev->iomem_buf[addr] = val; +} + +static const MemoryRegionOps test_iomem_ops = { + .read = test_iomem_read, + .write = test_iomem_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void testdev_realizefn(DeviceState *d, Error **errp) +{ + ISADevice *isa = ISA_DEVICE(d); + PCTestdev *dev = TESTDEV(d); + MemoryRegion *mem = isa_address_space(isa); + MemoryRegion *io = isa_address_space_io(isa); + + memory_region_init_io(&dev->ioport, OBJECT(dev), &test_ioport_ops, dev, + "pc-testdev-ioport", 4); + memory_region_init_io(&dev->ioport_byte, OBJECT(dev), + &test_ioport_byte_ops, dev, + "pc-testdev-ioport-byte", 4); + memory_region_init_io(&dev->flush, OBJECT(dev), &test_flush_ops, dev, + "pc-testdev-flush-page", 4); + memory_region_init_io(&dev->irq, OBJECT(dev), &test_irq_ops, dev, + "pc-testdev-irq-line", 24); + memory_region_init_io(&dev->iomem, OBJECT(dev), &test_iomem_ops, dev, + "pc-testdev-iomem", IOMEM_LEN); + + memory_region_add_subregion(io, 0xe0, &dev->ioport); + memory_region_add_subregion(io, 0xe4, &dev->flush); + memory_region_add_subregion(io, 0xe8, &dev->ioport_byte); + memory_region_add_subregion(io, 0x2000, &dev->irq); + memory_region_add_subregion(mem, 0xff000000, &dev->iomem); +} + +static void testdev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->realize = testdev_realizefn; +} + +static const TypeInfo testdev_info = { + .name = TYPE_TESTDEV, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(PCTestdev), + .class_init = testdev_class_init, +}; + +static void testdev_register_types(void) +{ + type_register_static(&testdev_info); +} + +type_init(testdev_register_types) diff --git a/hw/misc/pca9552.c b/hw/misc/pca9552.c new file mode 100644 index 000000000..fff19e369 --- /dev/null +++ b/hw/misc/pca9552.c @@ -0,0 +1,437 @@ +/* + * PCA9552 I2C LED blinker + * + * https://www.nxp.com/docs/en/application-note/AN264.pdf + * + * Copyright (c) 2017-2018, IBM Corporation. + * Copyright (c) 2020 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/bitops.h" +#include "hw/qdev-properties.h" +#include "hw/misc/pca9552.h" +#include "hw/misc/pca9552_regs.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "trace.h" +#include "qom/object.h" + +struct PCA955xClass { + /*< private >*/ + I2CSlaveClass parent_class; + /*< public >*/ + + uint8_t pin_count; + uint8_t max_reg; +}; +typedef struct PCA955xClass PCA955xClass; + +DECLARE_CLASS_CHECKERS(PCA955xClass, PCA955X, + TYPE_PCA955X) + +#define PCA9552_LED_ON 0x0 +#define PCA9552_LED_OFF 0x1 +#define PCA9552_LED_PWM0 0x2 +#define PCA9552_LED_PWM1 0x3 + +static const char *led_state[] = {"on", "off", "pwm0", "pwm1"}; + +static uint8_t pca955x_pin_get_config(PCA955xState *s, int pin) +{ + uint8_t reg = PCA9552_LS0 + (pin / 4); + uint8_t shift = (pin % 4) << 1; + + return extract32(s->regs[reg], shift, 2); +} + +/* Return INPUT status (bit #N belongs to GPIO #N) */ +static uint16_t pca955x_pins_get_status(PCA955xState *s) +{ + return (s->regs[PCA9552_INPUT1] << 8) | s->regs[PCA9552_INPUT0]; +} + +static void pca955x_display_pins_status(PCA955xState *s, + uint16_t previous_pins_status) +{ + PCA955xClass *k = PCA955X_GET_CLASS(s); + uint16_t pins_status, pins_changed; + int i; + + pins_status = pca955x_pins_get_status(s); + pins_changed = previous_pins_status ^ pins_status; + if (!pins_changed) { + return; + } + if (trace_event_get_state_backends(TRACE_PCA955X_GPIO_STATUS)) { + char *buf = g_newa(char, k->pin_count + 1); + + for (i = 0; i < k->pin_count; i++) { + if (extract32(pins_status, i, 1)) { + buf[i] = '*'; + } else { + buf[i] = '.'; + } + } + buf[i] = '\0'; + trace_pca955x_gpio_status(s->description, buf); + } + if (trace_event_get_state_backends(TRACE_PCA955X_GPIO_CHANGE)) { + for (i = 0; i < k->pin_count; i++) { + if (extract32(pins_changed, i, 1)) { + unsigned new_state = extract32(pins_status, i, 1); + + /* + * We display the state using the PCA logic ("active-high"). + * This is not the state of the LED, which signal might be + * wired "active-low" on the board. + */ + trace_pca955x_gpio_change(s->description, i, + !new_state, new_state); + } + } + } +} + +static void pca955x_update_pin_input(PCA955xState *s) +{ + PCA955xClass *k = PCA955X_GET_CLASS(s); + int i; + + for (i = 0; i < k->pin_count; i++) { + uint8_t input_reg = PCA9552_INPUT0 + (i / 8); + uint8_t input_shift = (i % 8); + uint8_t config = pca955x_pin_get_config(s, i); + + switch (config) { + case PCA9552_LED_ON: + qemu_set_irq(s->gpio[i], 1); + s->regs[input_reg] |= 1 << input_shift; + break; + case PCA9552_LED_OFF: + qemu_set_irq(s->gpio[i], 0); + s->regs[input_reg] &= ~(1 << input_shift); + break; + case PCA9552_LED_PWM0: + case PCA9552_LED_PWM1: + /* TODO */ + default: + break; + } + } +} + +static uint8_t pca955x_read(PCA955xState *s, uint8_t reg) +{ + switch (reg) { + case PCA9552_INPUT0: + case PCA9552_INPUT1: + case PCA9552_PSC0: + case PCA9552_PWM0: + case PCA9552_PSC1: + case PCA9552_PWM1: + case PCA9552_LS0: + case PCA9552_LS1: + case PCA9552_LS2: + case PCA9552_LS3: + return s->regs[reg]; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: unexpected read to register %d\n", + __func__, reg); + return 0xFF; + } +} + +static void pca955x_write(PCA955xState *s, uint8_t reg, uint8_t data) +{ + uint16_t pins_status; + + switch (reg) { + case PCA9552_PSC0: + case PCA9552_PWM0: + case PCA9552_PSC1: + case PCA9552_PWM1: + s->regs[reg] = data; + break; + + case PCA9552_LS0: + case PCA9552_LS1: + case PCA9552_LS2: + case PCA9552_LS3: + pins_status = pca955x_pins_get_status(s); + s->regs[reg] = data; + pca955x_update_pin_input(s); + pca955x_display_pins_status(s, pins_status); + break; + + case PCA9552_INPUT0: + case PCA9552_INPUT1: + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: unexpected write to register %d\n", + __func__, reg); + } +} + +/* + * When Auto-Increment is on, the register address is incremented + * after each byte is sent to or received by the device. The index + * rollovers to 0 when the maximum register address is reached. + */ +static void pca955x_autoinc(PCA955xState *s) +{ + PCA955xClass *k = PCA955X_GET_CLASS(s); + + if (s->pointer != 0xFF && s->pointer & PCA9552_AUTOINC) { + uint8_t reg = s->pointer & 0xf; + + reg = (reg + 1) % (k->max_reg + 1); + s->pointer = reg | PCA9552_AUTOINC; + } +} + +static uint8_t pca955x_recv(I2CSlave *i2c) +{ + PCA955xState *s = PCA955X(i2c); + uint8_t ret; + + ret = pca955x_read(s, s->pointer & 0xf); + + /* + * From the Specs: + * + * Important Note: When a Read sequence is initiated and the + * AI bit is set to Logic Level 1, the Read Sequence MUST + * start by a register different from 0. + * + * I don't know what should be done in this case, so throw an + * error. + */ + if (s->pointer == PCA9552_AUTOINC) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Autoincrement read starting with register 0\n", + __func__); + } + + pca955x_autoinc(s); + + return ret; +} + +static int pca955x_send(I2CSlave *i2c, uint8_t data) +{ + PCA955xState *s = PCA955X(i2c); + + /* First byte sent by is the register address */ + if (s->len == 0) { + s->pointer = data; + s->len++; + } else { + pca955x_write(s, s->pointer & 0xf, data); + + pca955x_autoinc(s); + } + + return 0; +} + +static int pca955x_event(I2CSlave *i2c, enum i2c_event event) +{ + PCA955xState *s = PCA955X(i2c); + + s->len = 0; + return 0; +} + +static void pca955x_get_led(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + PCA955xClass *k = PCA955X_GET_CLASS(obj); + PCA955xState *s = PCA955X(obj); + int led, rc, reg; + uint8_t state; + + rc = sscanf(name, "led%2d", &led); + if (rc != 1) { + error_setg(errp, "%s: error reading %s", __func__, name); + return; + } + if (led < 0 || led > k->pin_count) { + error_setg(errp, "%s invalid led %s", __func__, name); + return; + } + /* + * Get the LSx register as the qom interface should expose the device + * state, not the modeled 'input line' behaviour which would come from + * reading the INPUTx reg + */ + reg = PCA9552_LS0 + led / 4; + state = (pca955x_read(s, reg) >> ((led % 4) * 2)) & 0x3; + visit_type_str(v, name, (char **)&led_state[state], errp); +} + +/* + * Return an LED selector register value based on an existing one, with + * the appropriate 2-bit state value set for the given LED number (0-3). + */ +static inline uint8_t pca955x_ledsel(uint8_t oldval, int led_num, int state) +{ + return (oldval & (~(0x3 << (led_num << 1)))) | + ((state & 0x3) << (led_num << 1)); +} + +static void pca955x_set_led(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + PCA955xClass *k = PCA955X_GET_CLASS(obj); + PCA955xState *s = PCA955X(obj); + int led, rc, reg, val; + uint8_t state; + char *state_str; + + if (!visit_type_str(v, name, &state_str, errp)) { + return; + } + rc = sscanf(name, "led%2d", &led); + if (rc != 1) { + error_setg(errp, "%s: error reading %s", __func__, name); + return; + } + if (led < 0 || led > k->pin_count) { + error_setg(errp, "%s invalid led %s", __func__, name); + return; + } + + for (state = 0; state < ARRAY_SIZE(led_state); state++) { + if (!strcmp(state_str, led_state[state])) { + break; + } + } + if (state >= ARRAY_SIZE(led_state)) { + error_setg(errp, "%s invalid led state %s", __func__, state_str); + return; + } + + reg = PCA9552_LS0 + led / 4; + val = pca955x_read(s, reg); + val = pca955x_ledsel(val, led % 4, state); + pca955x_write(s, reg, val); +} + +static const VMStateDescription pca9552_vmstate = { + .name = "PCA9552", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(len, PCA955xState), + VMSTATE_UINT8(pointer, PCA955xState), + VMSTATE_UINT8_ARRAY(regs, PCA955xState, PCA955X_NR_REGS), + VMSTATE_I2C_SLAVE(i2c, PCA955xState), + VMSTATE_END_OF_LIST() + } +}; + +static void pca9552_reset(DeviceState *dev) +{ + PCA955xState *s = PCA955X(dev); + + s->regs[PCA9552_PSC0] = 0xFF; + s->regs[PCA9552_PWM0] = 0x80; + s->regs[PCA9552_PSC1] = 0xFF; + s->regs[PCA9552_PWM1] = 0x80; + s->regs[PCA9552_LS0] = 0x55; /* all OFF */ + s->regs[PCA9552_LS1] = 0x55; + s->regs[PCA9552_LS2] = 0x55; + s->regs[PCA9552_LS3] = 0x55; + + pca955x_update_pin_input(s); + + s->pointer = 0xFF; + s->len = 0; +} + +static void pca955x_initfn(Object *obj) +{ + PCA955xClass *k = PCA955X_GET_CLASS(obj); + int led; + + assert(k->pin_count <= PCA955X_PIN_COUNT_MAX); + for (led = 0; led < k->pin_count; led++) { + char *name; + + name = g_strdup_printf("led%d", led); + object_property_add(obj, name, "bool", pca955x_get_led, pca955x_set_led, + NULL, NULL); + g_free(name); + } +} + +static void pca955x_realize(DeviceState *dev, Error **errp) +{ + PCA955xClass *k = PCA955X_GET_CLASS(dev); + PCA955xState *s = PCA955X(dev); + + if (!s->description) { + s->description = g_strdup("pca-unspecified"); + } + + qdev_init_gpio_out(dev, s->gpio, k->pin_count); +} + +static Property pca955x_properties[] = { + DEFINE_PROP_STRING("description", PCA955xState, description), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pca955x_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + k->event = pca955x_event; + k->recv = pca955x_recv; + k->send = pca955x_send; + dc->realize = pca955x_realize; + device_class_set_props(dc, pca955x_properties); +} + +static const TypeInfo pca955x_info = { + .name = TYPE_PCA955X, + .parent = TYPE_I2C_SLAVE, + .instance_init = pca955x_initfn, + .instance_size = sizeof(PCA955xState), + .class_init = pca955x_class_init, + .class_size = sizeof(PCA955xClass), + .abstract = true, +}; + +static void pca9552_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCA955xClass *pc = PCA955X_CLASS(oc); + + dc->reset = pca9552_reset; + dc->vmsd = &pca9552_vmstate; + pc->max_reg = PCA9552_LS3; + pc->pin_count = 16; +} + +static const TypeInfo pca9552_info = { + .name = TYPE_PCA9552, + .parent = TYPE_PCA955X, + .class_init = pca9552_class_init, +}; + +static void pca955x_register_types(void) +{ + type_register_static(&pca955x_info); + type_register_static(&pca9552_info); +} + +type_init(pca955x_register_types) diff --git a/hw/misc/pci-testdev.c b/hw/misc/pci-testdev.c new file mode 100644 index 000000000..03845c8de --- /dev/null +++ b/hw/misc/pci-testdev.c @@ -0,0 +1,361 @@ +/* + * QEMU PCI test device + * + * Copyright (c) 2012 Red Hat Inc. + * Author: Michael S. Tsirkin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "qemu/event_notifier.h" +#include "qemu/module.h" +#include "sysemu/kvm.h" +#include "qom/object.h" + +typedef struct PCITestDevHdr { + uint8_t test; + uint8_t width; + uint8_t pad0[2]; + uint32_t offset; + uint8_t data; + uint8_t pad1[3]; + uint32_t count; + uint8_t name[]; +} PCITestDevHdr; + +typedef struct IOTest { + MemoryRegion *mr; + EventNotifier notifier; + bool hasnotifier; + unsigned size; + bool match_data; + PCITestDevHdr *hdr; + unsigned bufsize; +} IOTest; + +#define IOTEST_DATAMATCH 0xFA +#define IOTEST_NOMATCH 0xCE + +#define IOTEST_IOSIZE 128 +#define IOTEST_MEMSIZE 2048 + +static const char *iotest_test[] = { + "no-eventfd", + "wildcard-eventfd", + "datamatch-eventfd" +}; + +static const char *iotest_type[] = { + "mmio", + "portio" +}; + +#define IOTEST_TEST(i) (iotest_test[((i) % ARRAY_SIZE(iotest_test))]) +#define IOTEST_TYPE(i) (iotest_type[((i) / ARRAY_SIZE(iotest_test))]) +#define IOTEST_MAX_TEST (ARRAY_SIZE(iotest_test)) +#define IOTEST_MAX_TYPE (ARRAY_SIZE(iotest_type)) +#define IOTEST_MAX (IOTEST_MAX_TEST * IOTEST_MAX_TYPE) + +enum { + IOTEST_ACCESS_NAME, + IOTEST_ACCESS_DATA, + IOTEST_ACCESS_MAX, +}; + +#define IOTEST_ACCESS_TYPE uint8_t +#define IOTEST_ACCESS_WIDTH (sizeof(uint8_t)) + +struct PCITestDevState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + MemoryRegion mmio; + MemoryRegion portio; + IOTest *tests; + int current; + + uint64_t membar_size; + MemoryRegion membar; +}; + +#define TYPE_PCI_TEST_DEV "pci-testdev" + +OBJECT_DECLARE_SIMPLE_TYPE(PCITestDevState, PCI_TEST_DEV) + +#define IOTEST_IS_MEM(i) (strcmp(IOTEST_TYPE(i), "portio")) +#define IOTEST_REGION(d, i) (IOTEST_IS_MEM(i) ? &(d)->mmio : &(d)->portio) +#define IOTEST_SIZE(i) (IOTEST_IS_MEM(i) ? IOTEST_MEMSIZE : IOTEST_IOSIZE) +#define IOTEST_PCI_BAR(i) (IOTEST_IS_MEM(i) ? PCI_BASE_ADDRESS_SPACE_MEMORY : \ + PCI_BASE_ADDRESS_SPACE_IO) + +static int pci_testdev_start(IOTest *test) +{ + test->hdr->count = 0; + if (!test->hasnotifier) { + return 0; + } + event_notifier_test_and_clear(&test->notifier); + memory_region_add_eventfd(test->mr, + le32_to_cpu(test->hdr->offset), + test->size, + test->match_data, + test->hdr->data, + &test->notifier); + return 0; +} + +static void pci_testdev_stop(IOTest *test) +{ + if (!test->hasnotifier) { + return; + } + memory_region_del_eventfd(test->mr, + le32_to_cpu(test->hdr->offset), + test->size, + test->match_data, + test->hdr->data, + &test->notifier); +} + +static void +pci_testdev_reset(PCITestDevState *d) +{ + if (d->current == -1) { + return; + } + pci_testdev_stop(&d->tests[d->current]); + d->current = -1; +} + +static void pci_testdev_inc(IOTest *test, unsigned inc) +{ + uint32_t c = le32_to_cpu(test->hdr->count); + test->hdr->count = cpu_to_le32(c + inc); +} + +static void +pci_testdev_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size, int type) +{ + PCITestDevState *d = opaque; + IOTest *test; + int t, r; + + if (addr == offsetof(PCITestDevHdr, test)) { + pci_testdev_reset(d); + if (val >= IOTEST_MAX_TEST) { + return; + } + t = type * IOTEST_MAX_TEST + val; + r = pci_testdev_start(&d->tests[t]); + if (r < 0) { + return; + } + d->current = t; + return; + } + if (d->current < 0) { + return; + } + test = &d->tests[d->current]; + if (addr != le32_to_cpu(test->hdr->offset)) { + return; + } + if (test->match_data && test->size != size) { + return; + } + if (test->match_data && val != test->hdr->data) { + return; + } + pci_testdev_inc(test, 1); +} + +static uint64_t +pci_testdev_read(void *opaque, hwaddr addr, unsigned size) +{ + PCITestDevState *d = opaque; + const char *buf; + IOTest *test; + if (d->current < 0) { + return 0; + } + test = &d->tests[d->current]; + buf = (const char *)test->hdr; + if (addr + size >= test->bufsize) { + return 0; + } + if (test->hasnotifier) { + event_notifier_test_and_clear(&test->notifier); + } + return buf[addr]; +} + +static void +pci_testdev_mmio_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + pci_testdev_write(opaque, addr, val, size, 0); +} + +static void +pci_testdev_pio_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + pci_testdev_write(opaque, addr, val, size, 1); +} + +static const MemoryRegionOps pci_testdev_mmio_ops = { + .read = pci_testdev_read, + .write = pci_testdev_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static const MemoryRegionOps pci_testdev_pio_ops = { + .read = pci_testdev_read, + .write = pci_testdev_pio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void pci_testdev_realize(PCIDevice *pci_dev, Error **errp) +{ + PCITestDevState *d = PCI_TEST_DEV(pci_dev); + uint8_t *pci_conf; + char *name; + int r, i; + bool fastmmio = kvm_ioeventfd_any_length_enabled(); + + pci_conf = pci_dev->config; + + pci_conf[PCI_INTERRUPT_PIN] = 0; /* no interrupt pin */ + + memory_region_init_io(&d->mmio, OBJECT(d), &pci_testdev_mmio_ops, d, + "pci-testdev-mmio", IOTEST_MEMSIZE * 2); + memory_region_init_io(&d->portio, OBJECT(d), &pci_testdev_pio_ops, d, + "pci-testdev-portio", IOTEST_IOSIZE * 2); + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio); + pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->portio); + + if (d->membar_size) { + memory_region_init(&d->membar, OBJECT(d), "pci-testdev-membar", + d->membar_size); + pci_register_bar(pci_dev, 2, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_PREFETCH | + PCI_BASE_ADDRESS_MEM_TYPE_64, + &d->membar); + } + + d->current = -1; + d->tests = g_malloc0(IOTEST_MAX * sizeof *d->tests); + for (i = 0; i < IOTEST_MAX; ++i) { + IOTest *test = &d->tests[i]; + name = g_strdup_printf("%s-%s", IOTEST_TYPE(i), IOTEST_TEST(i)); + test->bufsize = sizeof(PCITestDevHdr) + strlen(name) + 1; + test->hdr = g_malloc0(test->bufsize); + memcpy(test->hdr->name, name, strlen(name) + 1); + g_free(name); + test->hdr->offset = cpu_to_le32(IOTEST_SIZE(i) + i * IOTEST_ACCESS_WIDTH); + test->match_data = strcmp(IOTEST_TEST(i), "wildcard-eventfd"); + if (fastmmio && IOTEST_IS_MEM(i) && !test->match_data) { + test->size = 0; + } else { + test->size = IOTEST_ACCESS_WIDTH; + } + test->hdr->test = i; + test->hdr->data = test->match_data ? IOTEST_DATAMATCH : IOTEST_NOMATCH; + test->hdr->width = IOTEST_ACCESS_WIDTH; + test->mr = IOTEST_REGION(d, i); + if (!strcmp(IOTEST_TEST(i), "no-eventfd")) { + test->hasnotifier = false; + continue; + } + r = event_notifier_init(&test->notifier, 0); + assert(r >= 0); + test->hasnotifier = true; + } +} + +static void +pci_testdev_uninit(PCIDevice *dev) +{ + PCITestDevState *d = PCI_TEST_DEV(dev); + int i; + + pci_testdev_reset(d); + for (i = 0; i < IOTEST_MAX; ++i) { + if (d->tests[i].hasnotifier) { + event_notifier_cleanup(&d->tests[i].notifier); + } + g_free(d->tests[i].hdr); + } + g_free(d->tests); +} + +static void qdev_pci_testdev_reset(DeviceState *dev) +{ + PCITestDevState *d = PCI_TEST_DEV(dev); + pci_testdev_reset(d); +} + +static Property pci_testdev_properties[] = { + DEFINE_PROP_SIZE("membar", PCITestDevState, membar_size, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pci_testdev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_testdev_realize; + k->exit = pci_testdev_uninit; + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_TEST; + k->revision = 0x00; + k->class_id = PCI_CLASS_OTHERS; + dc->desc = "PCI Test Device"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->reset = qdev_pci_testdev_reset; + device_class_set_props(dc, pci_testdev_properties); +} + +static const TypeInfo pci_testdev_info = { + .name = TYPE_PCI_TEST_DEV, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCITestDevState), + .class_init = pci_testdev_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void pci_testdev_register_types(void) +{ + type_register_static(&pci_testdev_info); +} + +type_init(pci_testdev_register_types) diff --git a/hw/misc/pvpanic-isa.c b/hw/misc/pvpanic-isa.c new file mode 100644 index 000000000..7b66d58ac --- /dev/null +++ b/hw/misc/pvpanic-isa.c @@ -0,0 +1,93 @@ +/* + * QEMU simulated pvpanic device. + * + * Copyright Fujitsu, Corp. 2013 + * + * Authors: + * Wen Congyang + * Hu Tao + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" + +#include "hw/nvram/fw_cfg.h" +#include "hw/qdev-properties.h" +#include "hw/misc/pvpanic.h" +#include "qom/object.h" +#include "hw/isa/isa.h" + +OBJECT_DECLARE_SIMPLE_TYPE(PVPanicISAState, PVPANIC_ISA_DEVICE) + +/* + * PVPanicISAState for ISA device and + * use ioport. + */ +struct PVPanicISAState { + ISADevice parent_obj; + + uint16_t ioport; + PVPanicState pvpanic; +}; + +static void pvpanic_isa_initfn(Object *obj) +{ + PVPanicISAState *s = PVPANIC_ISA_DEVICE(obj); + + pvpanic_setup_io(&s->pvpanic, DEVICE(s), 1); +} + +static void pvpanic_isa_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *d = ISA_DEVICE(dev); + PVPanicISAState *s = PVPANIC_ISA_DEVICE(dev); + PVPanicState *ps = &s->pvpanic; + FWCfgState *fw_cfg = fw_cfg_find(); + uint16_t *pvpanic_port; + + if (!fw_cfg) { + return; + } + + pvpanic_port = g_malloc(sizeof(*pvpanic_port)); + *pvpanic_port = cpu_to_le16(s->ioport); + fw_cfg_add_file(fw_cfg, "etc/pvpanic-port", pvpanic_port, + sizeof(*pvpanic_port)); + + isa_register_ioport(d, &ps->mr, s->ioport); +} + +static Property pvpanic_isa_properties[] = { + DEFINE_PROP_UINT16(PVPANIC_IOPORT_PROP, PVPanicISAState, ioport, 0x505), + DEFINE_PROP_UINT8("events", PVPanicISAState, pvpanic.events, PVPANIC_PANICKED | PVPANIC_CRASHLOADED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pvpanic_isa_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pvpanic_isa_realizefn; + device_class_set_props(dc, pvpanic_isa_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static TypeInfo pvpanic_isa_info = { + .name = TYPE_PVPANIC_ISA_DEVICE, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(PVPanicISAState), + .instance_init = pvpanic_isa_initfn, + .class_init = pvpanic_isa_class_init, +}; + +static void pvpanic_register_types(void) +{ + type_register_static(&pvpanic_isa_info); +} + +type_init(pvpanic_register_types) diff --git a/hw/misc/pvpanic-pci.c b/hw/misc/pvpanic-pci.c new file mode 100644 index 000000000..af8cbe283 --- /dev/null +++ b/hw/misc/pvpanic-pci.c @@ -0,0 +1,93 @@ +/* + * QEMU simulated PCI pvpanic device. + * + * Copyright (C) 2020 Oracle + * + * Authors: + * Mihai Carabas + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" + +#include "hw/nvram/fw_cfg.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/misc/pvpanic.h" +#include "qom/object.h" +#include "hw/pci/pci.h" + +OBJECT_DECLARE_SIMPLE_TYPE(PVPanicPCIState, PVPANIC_PCI_DEVICE) + +/* + * PVPanicPCIState for PCI device + */ +typedef struct PVPanicPCIState { + PCIDevice dev; + PVPanicState pvpanic; +} PVPanicPCIState; + +static const VMStateDescription vmstate_pvpanic_pci = { + .name = "pvpanic-pci", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PVPanicPCIState), + VMSTATE_END_OF_LIST() + } +}; + +static void pvpanic_pci_realizefn(PCIDevice *dev, Error **errp) +{ + PVPanicPCIState *s = PVPANIC_PCI_DEVICE(dev); + PVPanicState *ps = &s->pvpanic; + + pvpanic_setup_io(&s->pvpanic, DEVICE(s), 2); + + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &ps->mr); +} + +static Property pvpanic_pci_properties[] = { + DEFINE_PROP_UINT8("events", PVPanicPCIState, pvpanic.events, PVPANIC_PANICKED | PVPANIC_CRASHLOADED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pvpanic_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(klass); + + device_class_set_props(dc, pvpanic_pci_properties); + + pc->realize = pvpanic_pci_realizefn; + pc->vendor_id = PCI_VENDOR_ID_REDHAT; + pc->device_id = PCI_DEVICE_ID_REDHAT_PVPANIC; + pc->revision = 1; + pc->class_id = PCI_CLASS_SYSTEM_OTHER; + dc->vmsd = &vmstate_pvpanic_pci; + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static TypeInfo pvpanic_pci_info = { + .name = TYPE_PVPANIC_PCI_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PVPanicPCIState), + .class_init = pvpanic_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + } +}; + +static void pvpanic_register_types(void) +{ + type_register_static(&pvpanic_pci_info); +} + +type_init(pvpanic_register_types); diff --git a/hw/misc/pvpanic.c b/hw/misc/pvpanic.c new file mode 100644 index 000000000..e2cb4a5d2 --- /dev/null +++ b/hw/misc/pvpanic.c @@ -0,0 +1,70 @@ +/* + * QEMU simulated pvpanic device. + * + * Copyright Fujitsu, Corp. 2013 + * + * Authors: + * Wen Congyang + * Hu Tao + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" + +#include "hw/nvram/fw_cfg.h" +#include "hw/qdev-properties.h" +#include "hw/misc/pvpanic.h" +#include "qom/object.h" + +static void handle_event(int event) +{ + static bool logged; + + if (event & ~(PVPANIC_PANICKED | PVPANIC_CRASHLOADED) && !logged) { + qemu_log_mask(LOG_GUEST_ERROR, "pvpanic: unknown event %#x.\n", event); + logged = true; + } + + if (event & PVPANIC_PANICKED) { + qemu_system_guest_panicked(NULL); + return; + } + + if (event & PVPANIC_CRASHLOADED) { + qemu_system_guest_crashloaded(NULL); + return; + } +} + +/* return supported events on read */ +static uint64_t pvpanic_read(void *opaque, hwaddr addr, unsigned size) +{ + PVPanicState *pvp = opaque; + return pvp->events; +} + +static void pvpanic_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + handle_event(val); +} + +static const MemoryRegionOps pvpanic_ops = { + .read = pvpanic_read, + .write = pvpanic_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +void pvpanic_setup_io(PVPanicState *s, DeviceState *dev, unsigned size) +{ + memory_region_init_io(&s->mr, OBJECT(dev), &pvpanic_ops, s, "pvpanic", size); +} diff --git a/hw/misc/sbsa_ec.c b/hw/misc/sbsa_ec.c new file mode 100644 index 000000000..83020fe9a --- /dev/null +++ b/hw/misc/sbsa_ec.c @@ -0,0 +1,98 @@ +/* + * ARM SBSA Reference Platform Embedded Controller + * + * A device to allow PSCI running in the secure side of sbsa-ref machine + * to communicate platform power states to qemu. + * + * Copyright (c) 2020 Nuvia Inc + * Written by Graeme Gregory + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/log.h" +#include "hw/sysbus.h" +#include "sysemu/runstate.h" + +typedef struct { + SysBusDevice parent_obj; + MemoryRegion iomem; +} SECUREECState; + +#define TYPE_SBSA_EC "sbsa-ec" +#define SECURE_EC(obj) OBJECT_CHECK(SECUREECState, (obj), TYPE_SBSA_EC) + +enum sbsa_ec_powerstates { + SBSA_EC_CMD_POWEROFF = 0x01, + SBSA_EC_CMD_REBOOT = 0x02, +}; + +static uint64_t sbsa_ec_read(void *opaque, hwaddr offset, unsigned size) +{ + /* No use for this currently */ + qemu_log_mask(LOG_GUEST_ERROR, "sbsa-ec: no readable registers"); + return 0; +} + +static void sbsa_ec_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + if (offset == 0) { /* PSCI machine power command register */ + switch (value) { + case SBSA_EC_CMD_POWEROFF: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + break; + case SBSA_EC_CMD_REBOOT: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "sbsa-ec: unknown power command"); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, "sbsa-ec: unknown EC register"); + } +} + +static const MemoryRegionOps sbsa_ec_ops = { + .read = sbsa_ec_read, + .write = sbsa_ec_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void sbsa_ec_init(Object *obj) +{ + SECUREECState *s = SECURE_EC(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &sbsa_ec_ops, s, "sbsa-ec", + 0x1000); + sysbus_init_mmio(dev, &s->iomem); +} + +static void sbsa_ec_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + /* No vmstate or reset required: device has no internal state */ + dc->user_creatable = false; +} + +static const TypeInfo sbsa_ec_info = { + .name = TYPE_SBSA_EC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SECUREECState), + .instance_init = sbsa_ec_init, + .class_init = sbsa_ec_class_init, +}; + +static void sbsa_ec_register_type(void) +{ + type_register_static(&sbsa_ec_info); +} + +type_init(sbsa_ec_register_type); diff --git a/hw/misc/sga.c b/hw/misc/sga.c new file mode 100644 index 000000000..1d04672b0 --- /dev/null +++ b/hw/misc/sga.c @@ -0,0 +1,71 @@ +/* + * QEMU dummy ISA device for loading sgabios option rom. + * + * Copyright (c) 2011 Glauber Costa, Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * sgabios code originally available at code.google.com/p/sgabios + * + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "hw/loader.h" +#include "qemu/module.h" +#include "qom/object.h" +#include "qemu/error-report.h" + +#define SGABIOS_FILENAME "sgabios.bin" + +#define TYPE_SGA "sga" +OBJECT_DECLARE_SIMPLE_TYPE(ISASGAState, SGA) + +struct ISASGAState { + ISADevice parent_obj; +}; + +static void sga_realizefn(DeviceState *dev, Error **errp) +{ + warn_report("-device sga is deprecated, use -machine graphics=off"); + rom_add_vga(SGABIOS_FILENAME); +} + +static void sga_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->realize = sga_realizefn; + dc->desc = "Serial Graphics Adapter"; +} + +static const TypeInfo sga_info = { + .name = TYPE_SGA, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISASGAState), + .class_init = sga_class_initfn, +}; + +static void sga_register_types(void) +{ + type_register_static(&sga_info); +} + +type_init(sga_register_types) diff --git a/hw/misc/sifive_e_prci.c b/hw/misc/sifive_e_prci.c new file mode 100644 index 000000000..a8702c6a5 --- /dev/null +++ b/hw/misc/sifive_e_prci.c @@ -0,0 +1,124 @@ +/* + * QEMU SiFive E PRCI (Power, Reset, Clock, Interrupt) + * + * Copyright (c) 2017 SiFive, Inc. + * + * Simple model of the PRCI to emulate register reads made by the SDK BSP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/sifive_e_prci.h" + +static uint64_t sifive_e_prci_read(void *opaque, hwaddr addr, unsigned int size) +{ + SiFiveEPRCIState *s = opaque; + switch (addr) { + case SIFIVE_E_PRCI_HFROSCCFG: + return s->hfrosccfg; + case SIFIVE_E_PRCI_HFXOSCCFG: + return s->hfxosccfg; + case SIFIVE_E_PRCI_PLLCFG: + return s->pllcfg; + case SIFIVE_E_PRCI_PLLOUTDIV: + return s->plloutdiv; + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: read: addr=0x%x\n", + __func__, (int)addr); + return 0; +} + +static void sifive_e_prci_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + SiFiveEPRCIState *s = opaque; + switch (addr) { + case SIFIVE_E_PRCI_HFROSCCFG: + s->hfrosccfg = (uint32_t) val64; + /* OSC stays ready */ + s->hfrosccfg |= SIFIVE_E_PRCI_HFROSCCFG_RDY; + break; + case SIFIVE_E_PRCI_HFXOSCCFG: + s->hfxosccfg = (uint32_t) val64; + /* OSC stays ready */ + s->hfxosccfg |= SIFIVE_E_PRCI_HFXOSCCFG_RDY; + break; + case SIFIVE_E_PRCI_PLLCFG: + s->pllcfg = (uint32_t) val64; + /* PLL stays locked */ + s->pllcfg |= SIFIVE_E_PRCI_PLLCFG_LOCK; + break; + case SIFIVE_E_PRCI_PLLOUTDIV: + s->plloutdiv = (uint32_t) val64; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%x v=0x%x\n", + __func__, (int)addr, (int)val64); + } +} + +static const MemoryRegionOps sifive_e_prci_ops = { + .read = sifive_e_prci_read, + .write = sifive_e_prci_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void sifive_e_prci_init(Object *obj) +{ + SiFiveEPRCIState *s = SIFIVE_E_PRCI(obj); + + memory_region_init_io(&s->mmio, obj, &sifive_e_prci_ops, s, + TYPE_SIFIVE_E_PRCI, SIFIVE_E_PRCI_REG_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + s->hfrosccfg = (SIFIVE_E_PRCI_HFROSCCFG_RDY | SIFIVE_E_PRCI_HFROSCCFG_EN); + s->hfxosccfg = (SIFIVE_E_PRCI_HFXOSCCFG_RDY | SIFIVE_E_PRCI_HFXOSCCFG_EN); + s->pllcfg = (SIFIVE_E_PRCI_PLLCFG_REFSEL | SIFIVE_E_PRCI_PLLCFG_BYPASS | + SIFIVE_E_PRCI_PLLCFG_LOCK); + s->plloutdiv = SIFIVE_E_PRCI_PLLOUTDIV_DIV1; +} + +static const TypeInfo sifive_e_prci_info = { + .name = TYPE_SIFIVE_E_PRCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SiFiveEPRCIState), + .instance_init = sifive_e_prci_init, +}; + +static void sifive_e_prci_register_types(void) +{ + type_register_static(&sifive_e_prci_info); +} + +type_init(sifive_e_prci_register_types) + + +/* + * Create PRCI device. + */ +DeviceState *sifive_e_prci_create(hwaddr addr) +{ + DeviceState *dev = qdev_new(TYPE_SIFIVE_E_PRCI); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + return dev; +} diff --git a/hw/misc/sifive_test.c b/hw/misc/sifive_test.c new file mode 100644 index 000000000..56df45bfe --- /dev/null +++ b/hw/misc/sifive_test.c @@ -0,0 +1,99 @@ +/* + * QEMU SiFive Test Finisher + * + * Copyright (c) 2018 SiFive, Inc. + * + * Test finisher memory mapped device used to exit simulation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" +#include "hw/misc/sifive_test.h" + +static uint64_t sifive_test_read(void *opaque, hwaddr addr, unsigned int size) +{ + return 0; +} + +static void sifive_test_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + if (addr == 0) { + int status = val64 & 0xffff; + int code = (val64 >> 16) & 0xffff; + switch (status) { + case FINISHER_FAIL: + exit(code); + case FINISHER_PASS: + exit(0); + case FINISHER_RESET: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + return; + default: + break; + } + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: write: addr=0x%x val=0x%016" PRIx64 "\n", + __func__, (int)addr, val64); +} + +static const MemoryRegionOps sifive_test_ops = { + .read = sifive_test_read, + .write = sifive_test_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 2, + .max_access_size = 4 + } +}; + +static void sifive_test_init(Object *obj) +{ + SiFiveTestState *s = SIFIVE_TEST(obj); + + memory_region_init_io(&s->mmio, obj, &sifive_test_ops, s, + TYPE_SIFIVE_TEST, 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static const TypeInfo sifive_test_info = { + .name = TYPE_SIFIVE_TEST, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SiFiveTestState), + .instance_init = sifive_test_init, +}; + +static void sifive_test_register_types(void) +{ + type_register_static(&sifive_test_info); +} + +type_init(sifive_test_register_types) + + +/* + * Create Test device. + */ +DeviceState *sifive_test_create(hwaddr addr) +{ + DeviceState *dev = qdev_new(TYPE_SIFIVE_TEST); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + return dev; +} diff --git a/hw/misc/sifive_u_otp.c b/hw/misc/sifive_u_otp.c new file mode 100644 index 000000000..52fdb750c --- /dev/null +++ b/hw/misc/sifive_u_otp.c @@ -0,0 +1,301 @@ +/* + * QEMU SiFive U OTP (One-Time Programmable) Memory interface + * + * Copyright (c) 2019 Bin Meng + * + * Simple model of the OTP to emulate register reads made by the SDK BSP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/sysbus.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/sifive_u_otp.h" +#include "sysemu/blockdev.h" +#include "sysemu/block-backend.h" + +#define WRITTEN_BIT_ON 0x1 + +#define SET_FUSEARRAY_BIT(map, i, off, bit) \ + map[i] = bit ? (map[i] | bit << off) : (map[i] & ~(0x1 << off)) + +#define GET_FUSEARRAY_BIT(map, i, off) \ + ((map[i] >> off) & 0x1) + +static uint64_t sifive_u_otp_read(void *opaque, hwaddr addr, unsigned int size) +{ + SiFiveUOTPState *s = opaque; + + switch (addr) { + case SIFIVE_U_OTP_PA: + return s->pa; + case SIFIVE_U_OTP_PAIO: + return s->paio; + case SIFIVE_U_OTP_PAS: + return s->pas; + case SIFIVE_U_OTP_PCE: + return s->pce; + case SIFIVE_U_OTP_PCLK: + return s->pclk; + case SIFIVE_U_OTP_PDIN: + return s->pdin; + case SIFIVE_U_OTP_PDOUT: + if ((s->pce & SIFIVE_U_OTP_PCE_EN) && + (s->pdstb & SIFIVE_U_OTP_PDSTB_EN) && + (s->ptrim & SIFIVE_U_OTP_PTRIM_EN)) { + + /* read from backend */ + if (s->blk) { + int32_t buf; + + if (blk_pread(s->blk, s->pa * SIFIVE_U_OTP_FUSE_WORD, &buf, + SIFIVE_U_OTP_FUSE_WORD) < 0) { + error_report("read error index<%d>", s->pa); + return 0xff; + } + + return buf; + } + + return s->fuse[s->pa & SIFIVE_U_OTP_PA_MASK]; + } else { + return 0xff; + } + case SIFIVE_U_OTP_PDSTB: + return s->pdstb; + case SIFIVE_U_OTP_PPROG: + return s->pprog; + case SIFIVE_U_OTP_PTC: + return s->ptc; + case SIFIVE_U_OTP_PTM: + return s->ptm; + case SIFIVE_U_OTP_PTM_REP: + return s->ptm_rep; + case SIFIVE_U_OTP_PTR: + return s->ptr; + case SIFIVE_U_OTP_PTRIM: + return s->ptrim; + case SIFIVE_U_OTP_PWE: + return s->pwe; + } + + qemu_log_mask(LOG_GUEST_ERROR, "%s: read: addr=0x%" HWADDR_PRIx "\n", + __func__, addr); + return 0; +} + +static void sifive_u_otp_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + SiFiveUOTPState *s = opaque; + uint32_t val32 = (uint32_t)val64; + + switch (addr) { + case SIFIVE_U_OTP_PA: + s->pa = val32 & SIFIVE_U_OTP_PA_MASK; + break; + case SIFIVE_U_OTP_PAIO: + s->paio = val32; + break; + case SIFIVE_U_OTP_PAS: + s->pas = val32; + break; + case SIFIVE_U_OTP_PCE: + s->pce = val32; + break; + case SIFIVE_U_OTP_PCLK: + s->pclk = val32; + break; + case SIFIVE_U_OTP_PDIN: + s->pdin = val32; + break; + case SIFIVE_U_OTP_PDOUT: + /* read-only */ + break; + case SIFIVE_U_OTP_PDSTB: + s->pdstb = val32; + break; + case SIFIVE_U_OTP_PPROG: + s->pprog = val32; + break; + case SIFIVE_U_OTP_PTC: + s->ptc = val32; + break; + case SIFIVE_U_OTP_PTM: + s->ptm = val32; + break; + case SIFIVE_U_OTP_PTM_REP: + s->ptm_rep = val32; + break; + case SIFIVE_U_OTP_PTR: + s->ptr = val32; + break; + case SIFIVE_U_OTP_PTRIM: + s->ptrim = val32; + break; + case SIFIVE_U_OTP_PWE: + s->pwe = val32 & SIFIVE_U_OTP_PWE_EN; + + /* PWE is enabled. Ignore PAS=1 (no redundancy cell) */ + if (s->pwe && !s->pas) { + if (GET_FUSEARRAY_BIT(s->fuse_wo, s->pa, s->paio)) { + qemu_log_mask(LOG_GUEST_ERROR, + "write once error: idx<%u>, bit<%u>\n", + s->pa, s->paio); + break; + } + + /* write bit data */ + SET_FUSEARRAY_BIT(s->fuse, s->pa, s->paio, s->pdin); + + /* write to backend */ + if (s->blk) { + if (blk_pwrite(s->blk, s->pa * SIFIVE_U_OTP_FUSE_WORD, + &s->fuse[s->pa], SIFIVE_U_OTP_FUSE_WORD, + 0) < 0) { + error_report("write error index<%d>", s->pa); + } + } + + /* update written bit */ + SET_FUSEARRAY_BIT(s->fuse_wo, s->pa, s->paio, WRITTEN_BIT_ON); + } + + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%" HWADDR_PRIx + " v=0x%x\n", __func__, addr, val32); + } +} + +static const MemoryRegionOps sifive_u_otp_ops = { + .read = sifive_u_otp_read, + .write = sifive_u_otp_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static Property sifive_u_otp_properties[] = { + DEFINE_PROP_UINT32("serial", SiFiveUOTPState, serial, 0), + DEFINE_PROP_DRIVE("drive", SiFiveUOTPState, blk), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sifive_u_otp_realize(DeviceState *dev, Error **errp) +{ + SiFiveUOTPState *s = SIFIVE_U_OTP(dev); + DriveInfo *dinfo; + + memory_region_init_io(&s->mmio, OBJECT(dev), &sifive_u_otp_ops, s, + TYPE_SIFIVE_U_OTP, SIFIVE_U_OTP_REG_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); + + dinfo = drive_get_next(IF_PFLASH); + if (!dinfo) { + dinfo = drive_get_next(IF_NONE); + if (dinfo) { + warn_report("using \"-drive if=none\" for the OTP is deprecated, " + "use \"-drive if=pflash\" instead."); + } + } + if (dinfo) { + int ret; + uint64_t perm; + int filesize; + BlockBackend *blk; + + blk = blk_by_legacy_dinfo(dinfo); + filesize = SIFIVE_U_OTP_NUM_FUSES * SIFIVE_U_OTP_FUSE_WORD; + if (blk_getlength(blk) < filesize) { + error_setg(errp, "OTP drive size < 16K"); + return; + } + + qdev_prop_set_drive_err(dev, "drive", blk, errp); + + if (s->blk) { + perm = BLK_PERM_CONSISTENT_READ | + (blk_supports_write_perm(s->blk) ? BLK_PERM_WRITE : 0); + ret = blk_set_perm(s->blk, perm, BLK_PERM_ALL, errp); + if (ret < 0) { + return; + } + + if (blk_pread(s->blk, 0, s->fuse, filesize) != filesize) { + error_setg(errp, "failed to read the initial flash content"); + return; + } + } + } + + /* Initialize all fuses' initial value to 0xFFs */ + memset(s->fuse, 0xff, sizeof(s->fuse)); + + /* Make a valid content of serial number */ + s->fuse[SIFIVE_U_OTP_SERIAL_ADDR] = s->serial; + s->fuse[SIFIVE_U_OTP_SERIAL_ADDR + 1] = ~(s->serial); + + if (s->blk) { + /* Put serial number to backend as well*/ + uint32_t serial_data; + int index = SIFIVE_U_OTP_SERIAL_ADDR; + + serial_data = s->serial; + if (blk_pwrite(s->blk, index * SIFIVE_U_OTP_FUSE_WORD, + &serial_data, SIFIVE_U_OTP_FUSE_WORD, 0) < 0) { + error_setg(errp, "failed to write index<%d>", index); + return; + } + + serial_data = ~(s->serial); + if (blk_pwrite(s->blk, (index + 1) * SIFIVE_U_OTP_FUSE_WORD, + &serial_data, SIFIVE_U_OTP_FUSE_WORD, 0) < 0) { + error_setg(errp, "failed to write index<%d>", index + 1); + return; + } + } + + /* Initialize write-once map */ + memset(s->fuse_wo, 0x00, sizeof(s->fuse_wo)); +} + +static void sifive_u_otp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, sifive_u_otp_properties); + dc->realize = sifive_u_otp_realize; +} + +static const TypeInfo sifive_u_otp_info = { + .name = TYPE_SIFIVE_U_OTP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SiFiveUOTPState), + .class_init = sifive_u_otp_class_init, +}; + +static void sifive_u_otp_register_types(void) +{ + type_register_static(&sifive_u_otp_info); +} + +type_init(sifive_u_otp_register_types) diff --git a/hw/misc/sifive_u_prci.c b/hw/misc/sifive_u_prci.c new file mode 100644 index 000000000..5d9d446ee --- /dev/null +++ b/hw/misc/sifive_u_prci.c @@ -0,0 +1,169 @@ +/* + * QEMU SiFive U PRCI (Power, Reset, Clock, Interrupt) + * + * Copyright (c) 2019 Bin Meng + * + * Simple model of the PRCI to emulate register reads made by the SDK BSP + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/misc/sifive_u_prci.h" + +static uint64_t sifive_u_prci_read(void *opaque, hwaddr addr, unsigned int size) +{ + SiFiveUPRCIState *s = opaque; + + switch (addr) { + case SIFIVE_U_PRCI_HFXOSCCFG: + return s->hfxosccfg; + case SIFIVE_U_PRCI_COREPLLCFG0: + return s->corepllcfg0; + case SIFIVE_U_PRCI_DDRPLLCFG0: + return s->ddrpllcfg0; + case SIFIVE_U_PRCI_DDRPLLCFG1: + return s->ddrpllcfg1; + case SIFIVE_U_PRCI_GEMGXLPLLCFG0: + return s->gemgxlpllcfg0; + case SIFIVE_U_PRCI_GEMGXLPLLCFG1: + return s->gemgxlpllcfg1; + case SIFIVE_U_PRCI_CORECLKSEL: + return s->coreclksel; + case SIFIVE_U_PRCI_DEVICESRESET: + return s->devicesreset; + case SIFIVE_U_PRCI_CLKMUXSTATUS: + return s->clkmuxstatus; + } + + qemu_log_mask(LOG_GUEST_ERROR, "%s: read: addr=0x%" HWADDR_PRIx "\n", + __func__, addr); + + return 0; +} + +static void sifive_u_prci_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + SiFiveUPRCIState *s = opaque; + uint32_t val32 = (uint32_t)val64; + + switch (addr) { + case SIFIVE_U_PRCI_HFXOSCCFG: + s->hfxosccfg = val32; + /* OSC stays ready */ + s->hfxosccfg |= SIFIVE_U_PRCI_HFXOSCCFG_RDY; + break; + case SIFIVE_U_PRCI_COREPLLCFG0: + s->corepllcfg0 = val32; + /* internal feedback */ + s->corepllcfg0 |= SIFIVE_U_PRCI_PLLCFG0_FSE; + /* PLL stays locked */ + s->corepllcfg0 |= SIFIVE_U_PRCI_PLLCFG0_LOCK; + break; + case SIFIVE_U_PRCI_DDRPLLCFG0: + s->ddrpllcfg0 = val32; + /* internal feedback */ + s->ddrpllcfg0 |= SIFIVE_U_PRCI_PLLCFG0_FSE; + /* PLL stays locked */ + s->ddrpllcfg0 |= SIFIVE_U_PRCI_PLLCFG0_LOCK; + break; + case SIFIVE_U_PRCI_DDRPLLCFG1: + s->ddrpllcfg1 = val32; + break; + case SIFIVE_U_PRCI_GEMGXLPLLCFG0: + s->gemgxlpllcfg0 = val32; + /* internal feedback */ + s->gemgxlpllcfg0 |= SIFIVE_U_PRCI_PLLCFG0_FSE; + /* PLL stays locked */ + s->gemgxlpllcfg0 |= SIFIVE_U_PRCI_PLLCFG0_LOCK; + break; + case SIFIVE_U_PRCI_GEMGXLPLLCFG1: + s->gemgxlpllcfg1 = val32; + break; + case SIFIVE_U_PRCI_CORECLKSEL: + s->coreclksel = val32; + break; + case SIFIVE_U_PRCI_DEVICESRESET: + s->devicesreset = val32; + break; + case SIFIVE_U_PRCI_CLKMUXSTATUS: + s->clkmuxstatus = val32; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%" HWADDR_PRIx + " v=0x%x\n", __func__, addr, val32); + } +} + +static const MemoryRegionOps sifive_u_prci_ops = { + .read = sifive_u_prci_read, + .write = sifive_u_prci_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void sifive_u_prci_realize(DeviceState *dev, Error **errp) +{ + SiFiveUPRCIState *s = SIFIVE_U_PRCI(dev); + + memory_region_init_io(&s->mmio, OBJECT(dev), &sifive_u_prci_ops, s, + TYPE_SIFIVE_U_PRCI, SIFIVE_U_PRCI_REG_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); +} + +static void sifive_u_prci_reset(DeviceState *dev) +{ + SiFiveUPRCIState *s = SIFIVE_U_PRCI(dev); + + /* Initialize register to power-on-reset values */ + s->hfxosccfg = SIFIVE_U_PRCI_HFXOSCCFG_RDY | SIFIVE_U_PRCI_HFXOSCCFG_EN; + s->corepllcfg0 = SIFIVE_U_PRCI_PLLCFG0_DIVR | SIFIVE_U_PRCI_PLLCFG0_DIVF | + SIFIVE_U_PRCI_PLLCFG0_DIVQ | SIFIVE_U_PRCI_PLLCFG0_FSE | + SIFIVE_U_PRCI_PLLCFG0_LOCK; + s->ddrpllcfg0 = SIFIVE_U_PRCI_PLLCFG0_DIVR | SIFIVE_U_PRCI_PLLCFG0_DIVF | + SIFIVE_U_PRCI_PLLCFG0_DIVQ | SIFIVE_U_PRCI_PLLCFG0_FSE | + SIFIVE_U_PRCI_PLLCFG0_LOCK; + s->gemgxlpllcfg0 = SIFIVE_U_PRCI_PLLCFG0_DIVR | SIFIVE_U_PRCI_PLLCFG0_DIVF | + SIFIVE_U_PRCI_PLLCFG0_DIVQ | SIFIVE_U_PRCI_PLLCFG0_FSE | + SIFIVE_U_PRCI_PLLCFG0_LOCK; + s->coreclksel = SIFIVE_U_PRCI_CORECLKSEL_HFCLK; +} + +static void sifive_u_prci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sifive_u_prci_realize; + dc->reset = sifive_u_prci_reset; +} + +static const TypeInfo sifive_u_prci_info = { + .name = TYPE_SIFIVE_U_PRCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SiFiveUPRCIState), + .class_init = sifive_u_prci_class_init, +}; + +static void sifive_u_prci_register_types(void) +{ + type_register_static(&sifive_u_prci_info); +} + +type_init(sifive_u_prci_register_types) diff --git a/hw/misc/slavio_misc.c b/hw/misc/slavio_misc.c new file mode 100644 index 000000000..e8eb71570 --- /dev/null +++ b/hw/misc/slavio_misc.c @@ -0,0 +1,515 @@ +/* + * QEMU Sparc SLAVIO aux io port emulation + * + * Copyright (c) 2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" +#include "trace.h" +#include "qom/object.h" + +/* + * This is the auxio port, chip control and system control part of + * chip STP2001 (Slave I/O), also produced as NCR89C105. See + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C105.txt + * + * This also includes the PMC CPU idle controller. + */ + +#define TYPE_SLAVIO_MISC "slavio_misc" +OBJECT_DECLARE_SIMPLE_TYPE(MiscState, SLAVIO_MISC) + +struct MiscState { + SysBusDevice parent_obj; + + MemoryRegion cfg_iomem; + MemoryRegion diag_iomem; + MemoryRegion mdm_iomem; + MemoryRegion led_iomem; + MemoryRegion sysctrl_iomem; + MemoryRegion aux1_iomem; + MemoryRegion aux2_iomem; + qemu_irq irq; + qemu_irq fdc_tc; + uint32_t dummy; + uint8_t config; + uint8_t aux1, aux2; + uint8_t diag, mctrl; + uint8_t sysctrl; + uint16_t leds; +}; + +#define TYPE_APC "apc" +typedef struct APCState APCState; +DECLARE_INSTANCE_CHECKER(APCState, APC, + TYPE_APC) + +struct APCState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq cpu_halt; +}; + +#define MISC_SIZE 1 +#define LED_SIZE 2 +#define SYSCTRL_SIZE 4 + +#define AUX1_TC 0x02 + +#define AUX2_PWROFF 0x01 +#define AUX2_PWRINTCLR 0x02 +#define AUX2_PWRFAIL 0x20 + +#define CFG_PWRINTEN 0x08 + +#define SYS_RESET 0x01 +#define SYS_RESETSTAT 0x02 + +static void slavio_misc_update_irq(void *opaque) +{ + MiscState *s = opaque; + + if ((s->aux2 & AUX2_PWRFAIL) && (s->config & CFG_PWRINTEN)) { + trace_slavio_misc_update_irq_raise(); + qemu_irq_raise(s->irq); + } else { + trace_slavio_misc_update_irq_lower(); + qemu_irq_lower(s->irq); + } +} + +static void slavio_misc_reset(DeviceState *d) +{ + MiscState *s = SLAVIO_MISC(d); + + // Diagnostic and system control registers not cleared in reset + s->config = s->aux1 = s->aux2 = s->mctrl = 0; +} + +static void slavio_set_power_fail(void *opaque, int irq, int power_failing) +{ + MiscState *s = opaque; + + trace_slavio_set_power_fail(power_failing, s->config); + if (power_failing && (s->config & CFG_PWRINTEN)) { + s->aux2 |= AUX2_PWRFAIL; + } else { + s->aux2 &= ~AUX2_PWRFAIL; + } + slavio_misc_update_irq(s); +} + +static void slavio_cfg_mem_writeb(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MiscState *s = opaque; + + trace_slavio_cfg_mem_writeb(val & 0xff); + s->config = val & 0xff; + slavio_misc_update_irq(s); +} + +static uint64_t slavio_cfg_mem_readb(void *opaque, hwaddr addr, + unsigned size) +{ + MiscState *s = opaque; + uint32_t ret = 0; + + ret = s->config; + trace_slavio_cfg_mem_readb(ret); + return ret; +} + +static const MemoryRegionOps slavio_cfg_mem_ops = { + .read = slavio_cfg_mem_readb, + .write = slavio_cfg_mem_writeb, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void slavio_diag_mem_writeb(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MiscState *s = opaque; + + trace_slavio_diag_mem_writeb(val & 0xff); + s->diag = val & 0xff; +} + +static uint64_t slavio_diag_mem_readb(void *opaque, hwaddr addr, + unsigned size) +{ + MiscState *s = opaque; + uint32_t ret = 0; + + ret = s->diag; + trace_slavio_diag_mem_readb(ret); + return ret; +} + +static const MemoryRegionOps slavio_diag_mem_ops = { + .read = slavio_diag_mem_readb, + .write = slavio_diag_mem_writeb, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void slavio_mdm_mem_writeb(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MiscState *s = opaque; + + trace_slavio_mdm_mem_writeb(val & 0xff); + s->mctrl = val & 0xff; +} + +static uint64_t slavio_mdm_mem_readb(void *opaque, hwaddr addr, + unsigned size) +{ + MiscState *s = opaque; + uint32_t ret = 0; + + ret = s->mctrl; + trace_slavio_mdm_mem_readb(ret); + return ret; +} + +static const MemoryRegionOps slavio_mdm_mem_ops = { + .read = slavio_mdm_mem_readb, + .write = slavio_mdm_mem_writeb, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void slavio_aux1_mem_writeb(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MiscState *s = opaque; + + trace_slavio_aux1_mem_writeb(val & 0xff); + if (val & AUX1_TC) { + // Send a pulse to floppy terminal count line + if (s->fdc_tc) { + qemu_irq_raise(s->fdc_tc); + qemu_irq_lower(s->fdc_tc); + } + val &= ~AUX1_TC; + } + s->aux1 = val & 0xff; +} + +static uint64_t slavio_aux1_mem_readb(void *opaque, hwaddr addr, + unsigned size) +{ + MiscState *s = opaque; + uint32_t ret = 0; + + ret = s->aux1; + trace_slavio_aux1_mem_readb(ret); + return ret; +} + +static const MemoryRegionOps slavio_aux1_mem_ops = { + .read = slavio_aux1_mem_readb, + .write = slavio_aux1_mem_writeb, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void slavio_aux2_mem_writeb(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MiscState *s = opaque; + + val &= AUX2_PWRINTCLR | AUX2_PWROFF; + trace_slavio_aux2_mem_writeb(val & 0xff); + val |= s->aux2 & AUX2_PWRFAIL; + if (val & AUX2_PWRINTCLR) // Clear Power Fail int + val &= AUX2_PWROFF; + s->aux2 = val; + if (val & AUX2_PWROFF) + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + slavio_misc_update_irq(s); +} + +static uint64_t slavio_aux2_mem_readb(void *opaque, hwaddr addr, + unsigned size) +{ + MiscState *s = opaque; + uint32_t ret = 0; + + ret = s->aux2; + trace_slavio_aux2_mem_readb(ret); + return ret; +} + +static const MemoryRegionOps slavio_aux2_mem_ops = { + .read = slavio_aux2_mem_readb, + .write = slavio_aux2_mem_writeb, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void apc_mem_writeb(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + APCState *s = opaque; + + trace_apc_mem_writeb(val & 0xff); + qemu_irq_raise(s->cpu_halt); +} + +static uint64_t apc_mem_readb(void *opaque, hwaddr addr, + unsigned size) +{ + uint32_t ret = 0; + + trace_apc_mem_readb(ret); + return ret; +} + +static const MemoryRegionOps apc_mem_ops = { + .read = apc_mem_readb, + .write = apc_mem_writeb, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + } +}; + +static uint64_t slavio_sysctrl_mem_readl(void *opaque, hwaddr addr, + unsigned size) +{ + MiscState *s = opaque; + uint32_t ret = 0; + + switch (addr) { + case 0: + ret = s->sysctrl; + break; + default: + break; + } + trace_slavio_sysctrl_mem_readl(ret); + return ret; +} + +static void slavio_sysctrl_mem_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MiscState *s = opaque; + + trace_slavio_sysctrl_mem_writel(val); + switch (addr) { + case 0: + if (val & SYS_RESET) { + s->sysctrl = SYS_RESETSTAT; + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + default: + break; + } +} + +static const MemoryRegionOps slavio_sysctrl_mem_ops = { + .read = slavio_sysctrl_mem_readl, + .write = slavio_sysctrl_mem_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t slavio_led_mem_readw(void *opaque, hwaddr addr, + unsigned size) +{ + MiscState *s = opaque; + uint32_t ret = 0; + + switch (addr) { + case 0: + ret = s->leds; + break; + default: + break; + } + trace_slavio_led_mem_readw(ret); + return ret; +} + +static void slavio_led_mem_writew(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MiscState *s = opaque; + + trace_slavio_led_mem_writew(val & 0xffff); + switch (addr) { + case 0: + s->leds = val; + break; + default: + break; + } +} + +static const MemoryRegionOps slavio_led_mem_ops = { + .read = slavio_led_mem_readw, + .write = slavio_led_mem_writew, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 2, + .max_access_size = 2, + }, +}; + +static const VMStateDescription vmstate_misc = { + .name ="slavio_misc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(dummy, MiscState), + VMSTATE_UINT8(config, MiscState), + VMSTATE_UINT8(aux1, MiscState), + VMSTATE_UINT8(aux2, MiscState), + VMSTATE_UINT8(diag, MiscState), + VMSTATE_UINT8(mctrl, MiscState), + VMSTATE_UINT8(sysctrl, MiscState), + VMSTATE_END_OF_LIST() + } +}; + +static void apc_init(Object *obj) +{ + APCState *s = APC(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(dev, &s->cpu_halt); + + /* Power management (APC) XXX: not a Slavio device */ + memory_region_init_io(&s->iomem, obj, &apc_mem_ops, s, + "apc", MISC_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static void slavio_misc_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + MiscState *s = SLAVIO_MISC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->fdc_tc); + + /* 8 bit registers */ + /* Slavio control */ + memory_region_init_io(&s->cfg_iomem, obj, &slavio_cfg_mem_ops, s, + "configuration", MISC_SIZE); + sysbus_init_mmio(sbd, &s->cfg_iomem); + + /* Diagnostics */ + memory_region_init_io(&s->diag_iomem, obj, &slavio_diag_mem_ops, s, + "diagnostic", MISC_SIZE); + sysbus_init_mmio(sbd, &s->diag_iomem); + + /* Modem control */ + memory_region_init_io(&s->mdm_iomem, obj, &slavio_mdm_mem_ops, s, + "modem", MISC_SIZE); + sysbus_init_mmio(sbd, &s->mdm_iomem); + + /* 16 bit registers */ + /* ss600mp diag LEDs */ + memory_region_init_io(&s->led_iomem, obj, &slavio_led_mem_ops, s, + "leds", LED_SIZE); + sysbus_init_mmio(sbd, &s->led_iomem); + + /* 32 bit registers */ + /* System control */ + memory_region_init_io(&s->sysctrl_iomem, obj, &slavio_sysctrl_mem_ops, s, + "system-control", SYSCTRL_SIZE); + sysbus_init_mmio(sbd, &s->sysctrl_iomem); + + /* AUX 1 (Misc System Functions) */ + memory_region_init_io(&s->aux1_iomem, obj, &slavio_aux1_mem_ops, s, + "misc-system-functions", MISC_SIZE); + sysbus_init_mmio(sbd, &s->aux1_iomem); + + /* AUX 2 (Software Powerdown Control) */ + memory_region_init_io(&s->aux2_iomem, obj, &slavio_aux2_mem_ops, s, + "software-powerdown-control", MISC_SIZE); + sysbus_init_mmio(sbd, &s->aux2_iomem); + + qdev_init_gpio_in(dev, slavio_set_power_fail, 1); +} + +static void slavio_misc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = slavio_misc_reset; + dc->vmsd = &vmstate_misc; +} + +static const TypeInfo slavio_misc_info = { + .name = TYPE_SLAVIO_MISC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MiscState), + .instance_init = slavio_misc_init, + .class_init = slavio_misc_class_init, +}; + +static const TypeInfo apc_info = { + .name = TYPE_APC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MiscState), + .instance_init = apc_init, +}; + +static void slavio_misc_register_types(void) +{ + type_register_static(&slavio_misc_info); + type_register_static(&apc_info); +} + +type_init(slavio_misc_register_types) diff --git a/hw/misc/stm32f2xx_syscfg.c b/hw/misc/stm32f2xx_syscfg.c new file mode 100644 index 000000000..04c22c285 --- /dev/null +++ b/hw/misc/stm32f2xx_syscfg.c @@ -0,0 +1,161 @@ +/* + * STM32F2XX SYSCFG + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/misc/stm32f2xx_syscfg.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef STM_SYSCFG_ERR_DEBUG +#define STM_SYSCFG_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(lvl, fmt, args...) do { \ + if (STM_SYSCFG_ERR_DEBUG >= lvl) { \ + qemu_log("%s: " fmt, __func__, ## args); \ + } \ +} while (0) + +#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args) + +static void stm32f2xx_syscfg_reset(DeviceState *dev) +{ + STM32F2XXSyscfgState *s = STM32F2XX_SYSCFG(dev); + + s->syscfg_memrmp = 0x00000000; + s->syscfg_pmc = 0x00000000; + s->syscfg_exticr1 = 0x00000000; + s->syscfg_exticr2 = 0x00000000; + s->syscfg_exticr3 = 0x00000000; + s->syscfg_exticr4 = 0x00000000; + s->syscfg_cmpcr = 0x00000000; +} + +static uint64_t stm32f2xx_syscfg_read(void *opaque, hwaddr addr, + unsigned int size) +{ + STM32F2XXSyscfgState *s = opaque; + + DB_PRINT("0x%"HWADDR_PRIx"\n", addr); + + switch (addr) { + case SYSCFG_MEMRMP: + return s->syscfg_memrmp; + case SYSCFG_PMC: + return s->syscfg_pmc; + case SYSCFG_EXTICR1: + return s->syscfg_exticr1; + case SYSCFG_EXTICR2: + return s->syscfg_exticr2; + case SYSCFG_EXTICR3: + return s->syscfg_exticr3; + case SYSCFG_EXTICR4: + return s->syscfg_exticr4; + case SYSCFG_CMPCR: + return s->syscfg_cmpcr; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + return 0; + } + + return 0; +} + +static void stm32f2xx_syscfg_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + STM32F2XXSyscfgState *s = opaque; + uint32_t value = val64; + + DB_PRINT("0x%x, 0x%"HWADDR_PRIx"\n", value, addr); + + switch (addr) { + case SYSCFG_MEMRMP: + qemu_log_mask(LOG_UNIMP, + "%s: Changeing the memory mapping isn't supported " \ + "in QEMU\n", __func__); + return; + case SYSCFG_PMC: + qemu_log_mask(LOG_UNIMP, + "%s: Changeing the memory mapping isn't supported " \ + "in QEMU\n", __func__); + return; + case SYSCFG_EXTICR1: + s->syscfg_exticr1 = (value & 0xFFFF); + return; + case SYSCFG_EXTICR2: + s->syscfg_exticr2 = (value & 0xFFFF); + return; + case SYSCFG_EXTICR3: + s->syscfg_exticr3 = (value & 0xFFFF); + return; + case SYSCFG_EXTICR4: + s->syscfg_exticr4 = (value & 0xFFFF); + return; + case SYSCFG_CMPCR: + s->syscfg_cmpcr = value; + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + } +} + +static const MemoryRegionOps stm32f2xx_syscfg_ops = { + .read = stm32f2xx_syscfg_read, + .write = stm32f2xx_syscfg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void stm32f2xx_syscfg_init(Object *obj) +{ + STM32F2XXSyscfgState *s = STM32F2XX_SYSCFG(obj); + + memory_region_init_io(&s->mmio, obj, &stm32f2xx_syscfg_ops, s, + TYPE_STM32F2XX_SYSCFG, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void stm32f2xx_syscfg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = stm32f2xx_syscfg_reset; +} + +static const TypeInfo stm32f2xx_syscfg_info = { + .name = TYPE_STM32F2XX_SYSCFG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32F2XXSyscfgState), + .instance_init = stm32f2xx_syscfg_init, + .class_init = stm32f2xx_syscfg_class_init, +}; + +static void stm32f2xx_syscfg_register_types(void) +{ + type_register_static(&stm32f2xx_syscfg_info); +} + +type_init(stm32f2xx_syscfg_register_types) diff --git a/hw/misc/stm32f4xx_exti.c b/hw/misc/stm32f4xx_exti.c new file mode 100644 index 000000000..02e781004 --- /dev/null +++ b/hw/misc/stm32f4xx_exti.c @@ -0,0 +1,188 @@ +/* + * STM32F4XX EXTI + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/misc/stm32f4xx_exti.h" + +static void stm32f4xx_exti_reset(DeviceState *dev) +{ + STM32F4xxExtiState *s = STM32F4XX_EXTI(dev); + + s->exti_imr = 0x00000000; + s->exti_emr = 0x00000000; + s->exti_rtsr = 0x00000000; + s->exti_ftsr = 0x00000000; + s->exti_swier = 0x00000000; + s->exti_pr = 0x00000000; +} + +static void stm32f4xx_exti_set_irq(void *opaque, int irq, int level) +{ + STM32F4xxExtiState *s = opaque; + + trace_stm32f4xx_exti_set_irq(irq, level); + + if (((1 << irq) & s->exti_rtsr) && level) { + /* Rising Edge */ + s->exti_pr |= 1 << irq; + } + + if (((1 << irq) & s->exti_ftsr) && !level) { + /* Falling Edge */ + s->exti_pr |= 1 << irq; + } + + if (!((1 << irq) & s->exti_imr)) { + /* Interrupt is masked */ + return; + } + qemu_irq_pulse(s->irq[irq]); +} + +static uint64_t stm32f4xx_exti_read(void *opaque, hwaddr addr, + unsigned int size) +{ + STM32F4xxExtiState *s = opaque; + + trace_stm32f4xx_exti_read(addr); + + switch (addr) { + case EXTI_IMR: + return s->exti_imr; + case EXTI_EMR: + return s->exti_emr; + case EXTI_RTSR: + return s->exti_rtsr; + case EXTI_FTSR: + return s->exti_ftsr; + case EXTI_SWIER: + return s->exti_swier; + case EXTI_PR: + return s->exti_pr; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "STM32F4XX_exti_read: Bad offset %x\n", (int)addr); + return 0; + } + return 0; +} + +static void stm32f4xx_exti_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + STM32F4xxExtiState *s = opaque; + uint32_t value = (uint32_t) val64; + + trace_stm32f4xx_exti_write(addr, value); + + switch (addr) { + case EXTI_IMR: + s->exti_imr = value; + return; + case EXTI_EMR: + s->exti_emr = value; + return; + case EXTI_RTSR: + s->exti_rtsr = value; + return; + case EXTI_FTSR: + s->exti_ftsr = value; + return; + case EXTI_SWIER: + s->exti_swier = value; + return; + case EXTI_PR: + /* This bit is cleared by writing a 1 to it */ + s->exti_pr &= ~value; + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "STM32F4XX_exti_write: Bad offset %x\n", (int)addr); + } +} + +static const MemoryRegionOps stm32f4xx_exti_ops = { + .read = stm32f4xx_exti_read, + .write = stm32f4xx_exti_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void stm32f4xx_exti_init(Object *obj) +{ + STM32F4xxExtiState *s = STM32F4XX_EXTI(obj); + int i; + + for (i = 0; i < NUM_INTERRUPT_OUT_LINES; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq[i]); + } + + memory_region_init_io(&s->mmio, obj, &stm32f4xx_exti_ops, s, + TYPE_STM32F4XX_EXTI, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + qdev_init_gpio_in(DEVICE(obj), stm32f4xx_exti_set_irq, + NUM_GPIO_EVENT_IN_LINES); +} + +static const VMStateDescription vmstate_stm32f4xx_exti = { + .name = TYPE_STM32F4XX_EXTI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(exti_imr, STM32F4xxExtiState), + VMSTATE_UINT32(exti_emr, STM32F4xxExtiState), + VMSTATE_UINT32(exti_rtsr, STM32F4xxExtiState), + VMSTATE_UINT32(exti_ftsr, STM32F4xxExtiState), + VMSTATE_UINT32(exti_swier, STM32F4xxExtiState), + VMSTATE_UINT32(exti_pr, STM32F4xxExtiState), + VMSTATE_END_OF_LIST() + } +}; + +static void stm32f4xx_exti_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = stm32f4xx_exti_reset; + dc->vmsd = &vmstate_stm32f4xx_exti; +} + +static const TypeInfo stm32f4xx_exti_info = { + .name = TYPE_STM32F4XX_EXTI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32F4xxExtiState), + .instance_init = stm32f4xx_exti_init, + .class_init = stm32f4xx_exti_class_init, +}; + +static void stm32f4xx_exti_register_types(void) +{ + type_register_static(&stm32f4xx_exti_info); +} + +type_init(stm32f4xx_exti_register_types) diff --git a/hw/misc/stm32f4xx_syscfg.c b/hw/misc/stm32f4xx_syscfg.c new file mode 100644 index 000000000..f960e4ea1 --- /dev/null +++ b/hw/misc/stm32f4xx_syscfg.c @@ -0,0 +1,171 @@ +/* + * STM32F4xx SYSCFG + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/misc/stm32f4xx_syscfg.h" + +static void stm32f4xx_syscfg_reset(DeviceState *dev) +{ + STM32F4xxSyscfgState *s = STM32F4XX_SYSCFG(dev); + + s->syscfg_memrmp = 0x00000000; + s->syscfg_pmc = 0x00000000; + s->syscfg_exticr[0] = 0x00000000; + s->syscfg_exticr[1] = 0x00000000; + s->syscfg_exticr[2] = 0x00000000; + s->syscfg_exticr[3] = 0x00000000; + s->syscfg_cmpcr = 0x00000000; +} + +static void stm32f4xx_syscfg_set_irq(void *opaque, int irq, int level) +{ + STM32F4xxSyscfgState *s = opaque; + int icrreg = irq / 4; + int startbit = (irq & 3) * 4; + uint8_t config = irq / 16; + + trace_stm32f4xx_syscfg_set_irq(irq / 16, irq % 16, level); + + g_assert(icrreg < SYSCFG_NUM_EXTICR); + + if (extract32(s->syscfg_exticr[icrreg], startbit, 4) == config) { + qemu_set_irq(s->gpio_out[irq], level); + trace_stm32f4xx_pulse_exti(irq); + } +} + +static uint64_t stm32f4xx_syscfg_read(void *opaque, hwaddr addr, + unsigned int size) +{ + STM32F4xxSyscfgState *s = opaque; + + trace_stm32f4xx_syscfg_read(addr); + + switch (addr) { + case SYSCFG_MEMRMP: + return s->syscfg_memrmp; + case SYSCFG_PMC: + return s->syscfg_pmc; + case SYSCFG_EXTICR1...SYSCFG_EXTICR4: + return s->syscfg_exticr[addr / 4 - SYSCFG_EXTICR1 / 4]; + case SYSCFG_CMPCR: + return s->syscfg_cmpcr; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + return 0; + } +} + +static void stm32f4xx_syscfg_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + STM32F4xxSyscfgState *s = opaque; + uint32_t value = val64; + + trace_stm32f4xx_syscfg_write(value, addr); + + switch (addr) { + case SYSCFG_MEMRMP: + qemu_log_mask(LOG_UNIMP, + "%s: Changing the memory mapping isn't supported " \ + "in QEMU\n", __func__); + return; + case SYSCFG_PMC: + qemu_log_mask(LOG_UNIMP, + "%s: Changing the memory mapping isn't supported " \ + "in QEMU\n", __func__); + return; + case SYSCFG_EXTICR1...SYSCFG_EXTICR4: + s->syscfg_exticr[addr / 4 - SYSCFG_EXTICR1 / 4] = (value & 0xFFFF); + return; + case SYSCFG_CMPCR: + s->syscfg_cmpcr = value; + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + } +} + +static const MemoryRegionOps stm32f4xx_syscfg_ops = { + .read = stm32f4xx_syscfg_read, + .write = stm32f4xx_syscfg_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void stm32f4xx_syscfg_init(Object *obj) +{ + STM32F4xxSyscfgState *s = STM32F4XX_SYSCFG(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, &stm32f4xx_syscfg_ops, s, + TYPE_STM32F4XX_SYSCFG, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + qdev_init_gpio_in(DEVICE(obj), stm32f4xx_syscfg_set_irq, 16 * 9); + qdev_init_gpio_out(DEVICE(obj), s->gpio_out, 16); +} + +static const VMStateDescription vmstate_stm32f4xx_syscfg = { + .name = TYPE_STM32F4XX_SYSCFG, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(syscfg_memrmp, STM32F4xxSyscfgState), + VMSTATE_UINT32(syscfg_pmc, STM32F4xxSyscfgState), + VMSTATE_UINT32_ARRAY(syscfg_exticr, STM32F4xxSyscfgState, + SYSCFG_NUM_EXTICR), + VMSTATE_UINT32(syscfg_cmpcr, STM32F4xxSyscfgState), + VMSTATE_END_OF_LIST() + } +}; + +static void stm32f4xx_syscfg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = stm32f4xx_syscfg_reset; + dc->vmsd = &vmstate_stm32f4xx_syscfg; +} + +static const TypeInfo stm32f4xx_syscfg_info = { + .name = TYPE_STM32F4XX_SYSCFG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32F4xxSyscfgState), + .instance_init = stm32f4xx_syscfg_init, + .class_init = stm32f4xx_syscfg_class_init, +}; + +static void stm32f4xx_syscfg_register_types(void) +{ + type_register_static(&stm32f4xx_syscfg_info); +} + +type_init(stm32f4xx_syscfg_register_types) diff --git a/hw/misc/trace-events b/hw/misc/trace-events new file mode 100644 index 000000000..2da96d167 --- /dev/null +++ b/hw/misc/trace-events @@ -0,0 +1,255 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# allwinner-cpucfg.c +allwinner_cpucfg_cpu_reset(uint8_t cpu_id, uint32_t reset_addr) "id %u, reset_addr 0x%" PRIu32 +allwinner_cpucfg_read(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_cpucfg_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 + +# allwinner-h3-dramc.c +allwinner_h3_dramc_rowmirror_disable(void) "Disable row mirror" +allwinner_h3_dramc_rowmirror_enable(uint64_t addr) "Enable row mirror: addr 0x%" PRIx64 +allwinner_h3_dramcom_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_h3_dramcom_write(uint64_t offset, uint64_t data, unsigned size) "Write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_h3_dramctl_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_h3_dramctl_write(uint64_t offset, uint64_t data, unsigned size) "Write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_h3_dramphy_read(uint64_t offset, uint64_t data, unsigned size) "Read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_h3_dramphy_write(uint64_t offset, uint64_t data, unsigned size) "write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 + +# allwinner-sid.c +allwinner_sid_read(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_sid_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 + +# avr_power.c +avr_power_read(uint8_t value) "power_reduc read value:%u" +avr_power_write(uint8_t value) "power_reduc write value:%u" + +# eccmemctl.c +ecc_mem_writel_mer(uint32_t val) "Write memory enable 0x%08x" +ecc_mem_writel_mdr(uint32_t val) "Write memory delay 0x%08x" +ecc_mem_writel_mfsr(uint32_t val) "Write memory fault status 0x%08x" +ecc_mem_writel_vcr(uint32_t val) "Write slot configuration 0x%08x" +ecc_mem_writel_dr(uint32_t val) "Write diagnostic 0x%08x" +ecc_mem_writel_ecr0(uint32_t val) "Write event count 1 0x%08x" +ecc_mem_writel_ecr1(uint32_t val) "Write event count 2 0x%08x" +ecc_mem_readl_mer(uint32_t ret) "Read memory enable 0x%08x" +ecc_mem_readl_mdr(uint32_t ret) "Read memory delay 0x%08x" +ecc_mem_readl_mfsr(uint32_t ret) "Read memory fault status 0x%08x" +ecc_mem_readl_vcr(uint32_t ret) "Read slot configuration 0x%08x" +ecc_mem_readl_mfar0(uint32_t ret) "Read memory fault address 0 0x%08x" +ecc_mem_readl_mfar1(uint32_t ret) "Read memory fault address 1 0x%08x" +ecc_mem_readl_dr(uint32_t ret) "Read diagnostic 0x%08x" +ecc_mem_readl_ecr0(uint32_t ret) "Read event count 1 0x%08x" +ecc_mem_readl_ecr1(uint32_t ret) "Read event count 2 0x%08x" +ecc_diag_mem_writeb(uint64_t addr, uint32_t val) "Write diagnostic %"PRId64" = 0x%02x" +ecc_diag_mem_readb(uint64_t addr, uint32_t ret) "Read diagnostic %"PRId64"= 0x%02x" + +# empty_slot.c +empty_slot_write(uint64_t addr, unsigned width, uint64_t value, unsigned size, const char *name) "wr addr:0x%04"PRIx64" data:0x%0*"PRIx64" size %u [%s]" + +# slavio_misc.c +slavio_misc_update_irq_raise(void) "Raise IRQ" +slavio_misc_update_irq_lower(void) "Lower IRQ" +slavio_set_power_fail(int power_failing, uint8_t config) "Power fail: %d, config: %d" +slavio_cfg_mem_writeb(uint32_t val) "Write config 0x%02x" +slavio_cfg_mem_readb(uint32_t ret) "Read config 0x%02x" +slavio_diag_mem_writeb(uint32_t val) "Write diag 0x%02x" +slavio_diag_mem_readb(uint32_t ret) "Read diag 0x%02x" +slavio_mdm_mem_writeb(uint32_t val) "Write modem control 0x%02x" +slavio_mdm_mem_readb(uint32_t ret) "Read modem control 0x%02x" +slavio_aux1_mem_writeb(uint32_t val) "Write aux1 0x%02x" +slavio_aux1_mem_readb(uint32_t ret) "Read aux1 0x%02x" +slavio_aux2_mem_writeb(uint32_t val) "Write aux2 0x%02x" +slavio_aux2_mem_readb(uint32_t ret) "Read aux2 0x%02x" +apc_mem_writeb(uint32_t val) "Write power management 0x%02x" +apc_mem_readb(uint32_t ret) "Read power management 0x%02x" +slavio_sysctrl_mem_writel(uint32_t val) "Write system control 0x%08x" +slavio_sysctrl_mem_readl(uint32_t ret) "Read system control 0x%08x" +slavio_led_mem_writew(uint32_t val) "Write diagnostic LED 0x%04x" +slavio_led_mem_readw(uint32_t ret) "Read diagnostic LED 0x%04x" + +# aspeed_scu.c +aspeed_scu_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 + +# mps2-scc.c +mps2_scc_read(uint64_t offset, uint64_t data, unsigned size) "MPS2 SCC read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +mps2_scc_write(uint64_t offset, uint64_t data, unsigned size) "MPS2 SCC write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +mps2_scc_reset(void) "MPS2 SCC: reset" +mps2_scc_cfg_write(unsigned function, unsigned device, uint32_t value) "MPS2 SCC config write: function %d device %d data 0x%" PRIx32 +mps2_scc_cfg_read(unsigned function, unsigned device, uint32_t value) "MPS2 SCC config read: function %d device %d data 0x%" PRIx32 + +# mps2-fpgaio.c +mps2_fpgaio_read(uint64_t offset, uint64_t data, unsigned size) "MPS2 FPGAIO read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +mps2_fpgaio_write(uint64_t offset, uint64_t data, unsigned size) "MPS2 FPGAIO write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +mps2_fpgaio_reset(void) "MPS2 FPGAIO: reset" + +# msf2-sysreg.c +msf2_sysreg_write(uint64_t offset, uint32_t val, uint32_t prev) "msf2-sysreg write: addr 0x%08" PRIx64 " data 0x%" PRIx32 " prev 0x%" PRIx32 +msf2_sysreg_read(uint64_t offset, uint32_t val) "msf2-sysreg read: addr 0x%08" PRIx64 " data 0x%08" PRIx32 +msf2_sysreg_write_pll_status(void) "Invalid write to read only PLL status register" + +# imx7_gpr.c +imx7_gpr_read(uint64_t offset) "addr 0x%08" PRIx64 +imx7_gpr_write(uint64_t offset, uint64_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx64 + +# mos6522.c +mos6522_set_counter(int index, unsigned int val) "T%d.counter=%d" +mos6522_get_next_irq_time(uint16_t latch, int64_t d, int64_t delta) "latch=%d counter=0x%"PRId64 " delta_next=0x%"PRId64 +mos6522_set_sr_int(void) "set sr_int" +mos6522_write(uint64_t addr, uint64_t val) "reg=0x%"PRIx64 " val=0x%"PRIx64 +mos6522_read(uint64_t addr, unsigned val) "reg=0x%"PRIx64 " val=0x%x" + +# npcm7xx_clk.c +npcm7xx_clk_read(uint64_t offset, uint32_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 +npcm7xx_clk_write(uint64_t offset, uint32_t value) "offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 + +# npcm7xx_gcr.c +npcm7xx_gcr_read(uint64_t offset, uint32_t value) " offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 +npcm7xx_gcr_write(uint64_t offset, uint32_t value) "offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 + +# npcm7xx_mft.c +npcm7xx_mft_read(const char *name, uint64_t offset, uint16_t value) "%s: offset: 0x%04" PRIx64 " value: 0x%04" PRIx16 +npcm7xx_mft_write(const char *name, uint64_t offset, uint16_t value) "%s: offset: 0x%04" PRIx64 " value: 0x%04" PRIx16 +npcm7xx_mft_rpm(const char *clock, uint32_t clock_hz, int state, int32_t cnt, uint32_t rpm, uint32_t duty) " fan clk: %s clock_hz: %" PRIu32 ", state: %d, cnt: %" PRIi32 ", rpm: %" PRIu32 ", duty: %" PRIu32 +npcm7xx_mft_capture(const char *name, int irq_level) "%s: level: %d" +npcm7xx_mft_update_clock(const char *name, uint16_t sel, uint64_t clock_period, uint64_t prescaled_clock_period) "%s: sel: 0x%02" PRIx16 ", period: %" PRIu64 ", prescaled: %" PRIu64 +npcm7xx_mft_set_duty(const char *name, int n, int value) "%s[%d]: %d" + +# npcm7xx_rng.c +npcm7xx_rng_read(uint64_t offset, uint64_t value, unsigned size) "offset: 0x%04" PRIx64 " value: 0x%02" PRIx64 " size: %u" +npcm7xx_rng_write(uint64_t offset, uint64_t value, unsigned size) "offset: 0x%04" PRIx64 " value: 0x%02" PRIx64 " size: %u" + +# npcm7xx_pwm.c +npcm7xx_pwm_read(const char *id, uint64_t offset, uint32_t value) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 +npcm7xx_pwm_write(const char *id, uint64_t offset, uint32_t value) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 +npcm7xx_pwm_update_freq(const char *id, uint8_t index, uint32_t old_value, uint32_t new_value) "%s pwm[%u] Update Freq: old_freq: %u, new_freq: %u" +npcm7xx_pwm_update_duty(const char *id, uint8_t index, uint32_t old_value, uint32_t new_value) "%s pwm[%u] Update Duty: old_duty: %u, new_duty: %u" + +# stm32f4xx_syscfg.c +stm32f4xx_syscfg_set_irq(int gpio, int line, int level) "Interrupt: GPIO: %d, Line: %d; Level: %d" +stm32f4xx_pulse_exti(int irq) "Pulse EXTI: %d" +stm32f4xx_syscfg_read(uint64_t addr) "reg read: addr: 0x%" PRIx64 " " +stm32f4xx_syscfg_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" + +# stm32f4xx_exti.c +stm32f4xx_exti_set_irq(int irq, int leve) "Set EXTI: %d to %d" +stm32f4xx_exti_read(uint64_t addr) "reg read: addr: 0x%" PRIx64 " " +stm32f4xx_exti_write(uint64_t addr, uint64_t data) "reg write: addr: 0x%" PRIx64 " val: 0x%" PRIx64 "" + +# tz-mpc.c +tz_mpc_reg_read(uint32_t offset, uint64_t data, unsigned size) "TZ MPC regs read: offset 0x%x data 0x%" PRIx64 " size %u" +tz_mpc_reg_write(uint32_t offset, uint64_t data, unsigned size) "TZ MPC regs write: offset 0x%x data 0x%" PRIx64 " size %u" +tz_mpc_mem_blocked_read(uint64_t addr, unsigned size, bool secure) "TZ MPC blocked read: offset 0x%" PRIx64 " size %u secure %d" +tz_mpc_mem_blocked_write(uint64_t addr, uint64_t data, unsigned size, bool secure) "TZ MPC blocked write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" +tz_mpc_translate(uint64_t addr, int flags, const char *idx, const char *res) "TZ MPC translate: addr 0x%" PRIx64 " flags 0x%x iommu_idx %s: %s" +tz_mpc_iommu_notify(uint64_t addr) "TZ MPC iommu: notifying UNMAP/MAP for 0x%" PRIx64 + +# tz-msc.c +tz_msc_reset(void) "TZ MSC: reset" +tz_msc_cfg_nonsec(int level) "TZ MSC: cfg_nonsec = %d" +tz_msc_cfg_sec_resp(int level) "TZ MSC: cfg_sec_resp = %d" +tz_msc_irq_clear(int level) "TZ MSC: int_clear = %d" +tz_msc_update_irq(int level) "TZ MSC: setting irq line to %d" +tz_msc_access_blocked(uint64_t offset) "TZ MSC: offset 0x%" PRIx64 " access blocked" + +# tz-ppc.c +tz_ppc_reset(void) "TZ PPC: reset" +tz_ppc_cfg_nonsec(int n, int level) "TZ PPC: cfg_nonsec[%d] = %d" +tz_ppc_cfg_ap(int n, int level) "TZ PPC: cfg_ap[%d] = %d" +tz_ppc_cfg_sec_resp(int level) "TZ PPC: cfg_sec_resp = %d" +tz_ppc_irq_enable(int level) "TZ PPC: int_enable = %d" +tz_ppc_irq_clear(int level) "TZ PPC: int_clear = %d" +tz_ppc_update_irq(int level) "TZ PPC: setting irq line to %d" +tz_ppc_read_blocked(int n, uint64_t offset, bool secure, bool user) "TZ PPC: port %d offset 0x%" PRIx64 " read (secure %d user %d) blocked" +tz_ppc_write_blocked(int n, uint64_t offset, bool secure, bool user) "TZ PPC: port %d offset 0x%" PRIx64 " write (secure %d user %d) blocked" + +# iotkit-secctl.c +iotkit_secctl_s_read(uint32_t offset, uint64_t data, unsigned size) "IoTKit SecCtl S regs read: offset 0x%x data 0x%" PRIx64 " size %u" +iotkit_secctl_s_write(uint32_t offset, uint64_t data, unsigned size) "IoTKit SecCtl S regs write: offset 0x%x data 0x%" PRIx64 " size %u" +iotkit_secctl_ns_read(uint32_t offset, uint64_t data, unsigned size) "IoTKit SecCtl NS regs read: offset 0x%x data 0x%" PRIx64 " size %u" +iotkit_secctl_ns_write(uint32_t offset, uint64_t data, unsigned size) "IoTKit SecCtl NS regs write: offset 0x%x data 0x%" PRIx64 " size %u" + +# imx6ul_ccm.c +ccm_entry(void) "" +ccm_freq(uint32_t freq) "freq = %d" +ccm_clock_freq(uint32_t clock, uint32_t freq) "(Clock = %d) = %d" +ccm_read_reg(const char *reg_name, uint32_t value) "reg[%s] <= 0x%" PRIx32 +ccm_write_reg(const char *reg_name, uint32_t value) "reg[%s] => 0x%" PRIx32 + +# iotkit-sysinfo.c +iotkit_sysinfo_read(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysInfo read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +iotkit_sysinfo_write(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysInfo write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" + +# iotkit-sysctl.c +iotkit_sysctl_read(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysCtl read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +iotkit_sysctl_write(uint64_t offset, uint64_t data, unsigned size) "IoTKit SysCtl write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +iotkit_sysctl_reset(void) "IoTKit SysCtl: reset" + +# armsse-cpu-pwrctrl.c +armsse_cpu_pwrctrl_read(uint64_t offset, uint64_t data, unsigned size) "SSE-300 CPU_PWRCTRL read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +armsse_cpu_pwrctrl_write(uint64_t offset, uint64_t data, unsigned size) "SSE-300 CPU_PWRCTRL write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" + +# armsse-cpuid.c +armsse_cpuid_read(uint64_t offset, uint64_t data, unsigned size) "SSE-200 CPU_IDENTITY read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +armsse_cpuid_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 CPU_IDENTITY write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" + +# armsse-mhu.c +armsse_mhu_read(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +armsse_mhu_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" + +# aspeed_xdma.c +aspeed_xdma_write(uint64_t offset, uint64_t data) "XDMA write: offset 0x%" PRIx64 " data 0x%" PRIx64 + +# bcm2835_property.c +bcm2835_mbox_property(uint32_t tag, uint32_t bufsize, size_t resplen) "mbox property tag:0x%08x in_sz:%u out_sz:%zu" + +# bcm2835_mbox.c +bcm2835_mbox_write(unsigned int size, uint64_t addr, uint64_t value) "mbox write sz:%u addr:0x%"PRIx64" data:0x%"PRIx64 +bcm2835_mbox_read(unsigned int size, uint64_t addr, uint64_t value) "mbox read sz:%u addr:0x%"PRIx64" data:0x%"PRIx64 +bcm2835_mbox_irq(unsigned level) "mbox irq:ARM level:%u" + +# mac_via.c +via1_rtc_update_data_out(int count, int value) "count=%d value=0x%02x" +via1_rtc_update_data_in(int count, int value) "count=%d value=0x%02x" +via1_rtc_internal_status(int cmd, int alt, int value) "cmd=0x%02x alt=0x%02x value=0x%02x" +via1_rtc_internal_cmd(int cmd) "cmd=0x%02x" +via1_rtc_cmd_invalid(int value) "value=0x%02x" +via1_rtc_internal_time(uint32_t time) "time=0x%08x" +via1_rtc_internal_set_cmd(int cmd) "cmd=0x%02x" +via1_rtc_internal_ignore_cmd(int cmd) "cmd=0x%02x" +via1_rtc_internal_set_alt(int alt, int sector, int offset) "alt=0x%02x sector=%u offset=%u" +via1_rtc_cmd_seconds_read(int reg, int value) "reg=%d value=0x%02x" +via1_rtc_cmd_seconds_write(int reg, int value) "reg=%d value=0x%02x" +via1_rtc_cmd_test_write(int value) "value=0x%02x" +via1_rtc_cmd_wprotect_write(int value) "value=0x%02x" +via1_rtc_cmd_pram_read(int addr, int value) "addr=%u value=0x%02x" +via1_rtc_cmd_pram_write(int addr, int value) "addr=%u value=0x%02x" +via1_rtc_cmd_pram_sect_read(int sector, int offset, int addr, int value) "sector=%u offset=%u addr=0x%x value=0x%02x" +via1_rtc_cmd_pram_sect_write(int sector, int offset, int addr, int value) "sector=%u offset=%u addr=0x%x value=0x%02x" +via1_adb_send(const char *state, uint8_t data, const char *vadbint) "state %s data=0x%02x vADBInt=%s" +via1_adb_receive(const char *state, uint8_t data, const char *vadbint, int status, int index, int size) "state %s data=0x%02x vADBInt=%s status=0x%x index=%d size=%d" +via1_adb_poll(uint8_t data, const char *vadbint, int status, int index, int size) "data=0x%02x vADBInt=%s status=0x%x index=%d size=%d" +via1_auxmode(int mode) "setting auxmode to %d" + +# grlib_ahb_apb_pnp.c +grlib_ahb_pnp_read(uint64_t addr, uint32_t value) "AHB PnP read addr:0x%03"PRIx64" data:0x%08x" +grlib_apb_pnp_read(uint64_t addr, uint32_t value) "APB PnP read addr:0x%03"PRIx64" data:0x%08x" + +# led.c +led_set_intensity(const char *color, const char *desc, uint8_t intensity_percent) "LED desc:'%s' color:%s intensity: %u%%" +led_change_intensity(const char *color, const char *desc, uint8_t old_intensity_percent, uint8_t new_intensity_percent) "LED desc:'%s' color:%s intensity %u%% -> %u%%" + +# pca9552.c +pca955x_gpio_status(const char *description, const char *buf) "%s GPIOs 0-15 [%s]" +pca955x_gpio_change(const char *description, unsigned id, unsigned prev_state, unsigned current_state) "%s GPIO id:%u status: %u -> %u" + +# bcm2835_cprman.c +bcm2835_cprman_read(uint64_t offset, uint64_t value) "offset:0x%" PRIx64 " value:0x%" PRIx64 +bcm2835_cprman_write(uint64_t offset, uint64_t value) "offset:0x%" PRIx64 " value:0x%" PRIx64 +bcm2835_cprman_write_invalid_magic(uint64_t offset, uint64_t value) "offset:0x%" PRIx64 " value:0x%" PRIx64 + +# virt_ctrl.c +virt_ctrl_read(void *dev, unsigned int addr, unsigned int size, uint64_t value) "ctrl: %p reg: 0x%02x size: %d value: 0x%"PRIx64 +virt_ctrl_write(void *dev, unsigned int addr, unsigned int size, uint64_t value) "ctrl: %p reg: 0x%02x size: %d value: 0x%"PRIx64 +virt_ctrl_reset(void *dev) "ctrl: %p" +virt_ctrl_realize(void *dev) "ctrl: %p" +virt_ctrl_instance_init(void *dev) "ctrl: %p" diff --git a/hw/misc/trace.h b/hw/misc/trace.h new file mode 100644 index 000000000..1ab6923d1 --- /dev/null +++ b/hw/misc/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_misc.h" diff --git a/hw/misc/tz-mpc.c b/hw/misc/tz-mpc.c new file mode 100644 index 000000000..30481e1c9 --- /dev/null +++ b/hw/misc/tz-mpc.c @@ -0,0 +1,636 @@ +/* + * ARM AHB5 TrustZone Memory Protection Controller emulation + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/registerfields.h" +#include "hw/irq.h" +#include "hw/misc/tz-mpc.h" +#include "hw/qdev-properties.h" + +/* Our IOMMU has two IOMMU indexes, one for secure transactions and one for + * non-secure transactions. + */ +enum { + IOMMU_IDX_S, + IOMMU_IDX_NS, + IOMMU_NUM_INDEXES, +}; + +/* Config registers */ +REG32(CTRL, 0x00) + FIELD(CTRL, SEC_RESP, 4, 1) + FIELD(CTRL, AUTOINC, 8, 1) + FIELD(CTRL, LOCKDOWN, 31, 1) +REG32(BLK_MAX, 0x10) +REG32(BLK_CFG, 0x14) +REG32(BLK_IDX, 0x18) +REG32(BLK_LUT, 0x1c) +REG32(INT_STAT, 0x20) + FIELD(INT_STAT, IRQ, 0, 1) +REG32(INT_CLEAR, 0x24) + FIELD(INT_CLEAR, IRQ, 0, 1) +REG32(INT_EN, 0x28) + FIELD(INT_EN, IRQ, 0, 1) +REG32(INT_INFO1, 0x2c) +REG32(INT_INFO2, 0x30) + FIELD(INT_INFO2, HMASTER, 0, 16) + FIELD(INT_INFO2, HNONSEC, 16, 1) + FIELD(INT_INFO2, CFG_NS, 17, 1) +REG32(INT_SET, 0x34) + FIELD(INT_SET, IRQ, 0, 1) +REG32(PIDR4, 0xfd0) +REG32(PIDR5, 0xfd4) +REG32(PIDR6, 0xfd8) +REG32(PIDR7, 0xfdc) +REG32(PIDR0, 0xfe0) +REG32(PIDR1, 0xfe4) +REG32(PIDR2, 0xfe8) +REG32(PIDR3, 0xfec) +REG32(CIDR0, 0xff0) +REG32(CIDR1, 0xff4) +REG32(CIDR2, 0xff8) +REG32(CIDR3, 0xffc) + +static const uint8_t tz_mpc_idregs[] = { + 0x04, 0x00, 0x00, 0x00, + 0x60, 0xb8, 0x1b, 0x00, + 0x0d, 0xf0, 0x05, 0xb1, +}; + +static void tz_mpc_irq_update(TZMPC *s) +{ + qemu_set_irq(s->irq, s->int_stat && s->int_en); +} + +static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx, + uint32_t oldlut, uint32_t newlut) +{ + /* Called when the LUT word at lutidx has changed from oldlut to newlut; + * must call the IOMMU notifiers for the changed blocks. + */ + IOMMUTLBEvent event = { + .entry = { + .addr_mask = s->blocksize - 1, + } + }; + hwaddr addr = lutidx * s->blocksize * 32; + int i; + + for (i = 0; i < 32; i++, addr += s->blocksize) { + bool block_is_ns; + + if (!((oldlut ^ newlut) & (1 << i))) { + continue; + } + /* This changes the mappings for both the S and the NS space, + * so we need to do four notifies: an UNMAP then a MAP for each. + */ + block_is_ns = newlut & (1 << i); + + trace_tz_mpc_iommu_notify(addr); + event.entry.iova = addr; + event.entry.translated_addr = addr; + + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.perm = IOMMU_NONE; + memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, event); + memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, event); + + event.type = IOMMU_NOTIFIER_MAP; + event.entry.perm = IOMMU_RW; + if (block_is_ns) { + event.entry.target_as = &s->blocked_io_as; + } else { + event.entry.target_as = &s->downstream_as; + } + memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, event); + if (block_is_ns) { + event.entry.target_as = &s->downstream_as; + } else { + event.entry.target_as = &s->blocked_io_as; + } + memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, event); + } +} + +static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size) +{ + /* Auto-increment BLK_IDX if necessary */ + if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) { + s->blk_idx++; + s->blk_idx %= s->blk_max; + } +} + +static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr, + uint64_t *pdata, + unsigned size, MemTxAttrs attrs) +{ + TZMPC *s = TZ_MPC(opaque); + uint64_t r; + uint32_t offset = addr & ~0x3; + + if (!attrs.secure && offset < A_PIDR4) { + /* NS accesses can only see the ID registers */ + qemu_log_mask(LOG_GUEST_ERROR, + "TZ MPC register read: NS access to offset 0x%x\n", + offset); + r = 0; + goto read_out; + } + + switch (offset) { + case A_CTRL: + r = s->ctrl; + break; + case A_BLK_MAX: + r = s->blk_max - 1; + break; + case A_BLK_CFG: + /* We are never in "init in progress state", so this just indicates + * the block size. s->blocksize == (1 << BLK_CFG + 5), so + * BLK_CFG == ctz32(s->blocksize) - 5 + */ + r = ctz32(s->blocksize) - 5; + break; + case A_BLK_IDX: + r = s->blk_idx; + break; + case A_BLK_LUT: + r = s->blk_lut[s->blk_idx]; + tz_mpc_autoinc_idx(s, size); + break; + case A_INT_STAT: + r = s->int_stat; + break; + case A_INT_EN: + r = s->int_en; + break; + case A_INT_INFO1: + r = s->int_info1; + break; + case A_INT_INFO2: + r = s->int_info2; + break; + case A_PIDR4: + case A_PIDR5: + case A_PIDR6: + case A_PIDR7: + case A_PIDR0: + case A_PIDR1: + case A_PIDR2: + case A_PIDR3: + case A_CIDR0: + case A_CIDR1: + case A_CIDR2: + case A_CIDR3: + r = tz_mpc_idregs[(offset - A_PIDR4) / 4]; + break; + case A_INT_CLEAR: + case A_INT_SET: + qemu_log_mask(LOG_GUEST_ERROR, + "TZ MPC register read: write-only offset 0x%x\n", + offset); + r = 0; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "TZ MPC register read: bad offset 0x%x\n", offset); + r = 0; + break; + } + + if (size != 4) { + /* None of our registers are read-sensitive (except BLK_LUT, + * which can special case the "size not 4" case), so just + * pull the right bytes out of the word read result. + */ + r = extract32(r, (addr & 3) * 8, size * 8); + } + +read_out: + trace_tz_mpc_reg_read(addr, r, size); + *pdata = r; + return MEMTX_OK; +} + +static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr, + uint64_t value, + unsigned size, MemTxAttrs attrs) +{ + TZMPC *s = TZ_MPC(opaque); + uint32_t offset = addr & ~0x3; + + trace_tz_mpc_reg_write(addr, value, size); + + if (!attrs.secure && offset < A_PIDR4) { + /* NS accesses can only see the ID registers */ + qemu_log_mask(LOG_GUEST_ERROR, + "TZ MPC register write: NS access to offset 0x%x\n", + offset); + return MEMTX_OK; + } + + if (size != 4) { + /* Expand the byte or halfword write to a full word size. + * In most cases we can do this with zeroes; the exceptions + * are CTRL, BLK_IDX and BLK_LUT. + */ + uint32_t oldval; + + switch (offset) { + case A_CTRL: + oldval = s->ctrl; + break; + case A_BLK_IDX: + oldval = s->blk_idx; + break; + case A_BLK_LUT: + oldval = s->blk_lut[s->blk_idx]; + break; + default: + oldval = 0; + break; + } + value = deposit32(oldval, (addr & 3) * 8, size * 8, value); + } + + if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) && + (offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) { + /* Lockdown mode makes these three registers read-only, and + * the only way out of it is to reset the device. + */ + qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x " + "while MPC is in lockdown mode\n", offset); + return MEMTX_OK; + } + + switch (offset) { + case A_CTRL: + /* We don't implement the 'data gating' feature so all other bits + * are reserved and we make them RAZ/WI. + */ + s->ctrl = value & (R_CTRL_SEC_RESP_MASK | + R_CTRL_AUTOINC_MASK | + R_CTRL_LOCKDOWN_MASK); + break; + case A_BLK_IDX: + s->blk_idx = value % s->blk_max; + break; + case A_BLK_LUT: + tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value); + s->blk_lut[s->blk_idx] = value; + tz_mpc_autoinc_idx(s, size); + break; + case A_INT_CLEAR: + if (value & R_INT_CLEAR_IRQ_MASK) { + s->int_stat = 0; + tz_mpc_irq_update(s); + } + break; + case A_INT_EN: + s->int_en = value & R_INT_EN_IRQ_MASK; + tz_mpc_irq_update(s); + break; + case A_INT_SET: + if (value & R_INT_SET_IRQ_MASK) { + s->int_stat = R_INT_STAT_IRQ_MASK; + tz_mpc_irq_update(s); + } + break; + case A_PIDR4: + case A_PIDR5: + case A_PIDR6: + case A_PIDR7: + case A_PIDR0: + case A_PIDR1: + case A_PIDR2: + case A_PIDR3: + case A_CIDR0: + case A_CIDR1: + case A_CIDR2: + case A_CIDR3: + qemu_log_mask(LOG_GUEST_ERROR, + "TZ MPC register write: read-only offset 0x%x\n", offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "TZ MPC register write: bad offset 0x%x\n", offset); + break; + } + + return MEMTX_OK; +} + +static const MemoryRegionOps tz_mpc_reg_ops = { + .read_with_attrs = tz_mpc_reg_read, + .write_with_attrs = tz_mpc_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 4, +}; + +static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr) +{ + /* Return the cfg_ns bit from the LUT for the specified address */ + hwaddr blknum = addr / s->blocksize; + hwaddr blkword = blknum / 32; + uint32_t blkbit = 1U << (blknum % 32); + + /* This would imply the address was larger than the size we + * defined this memory region to be, so it can't happen. + */ + assert(blkword < s->blk_max); + return s->blk_lut[blkword] & blkbit; +} + +static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs) +{ + /* Handle a blocked transaction: raise IRQ, capture info, etc */ + if (!s->int_stat) { + /* First blocked transfer: capture information into INT_INFO1 and + * INT_INFO2. Subsequent transfers are still blocked but don't + * capture information until the guest clears the interrupt. + */ + + s->int_info1 = addr; + s->int_info2 = 0; + s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER, + attrs.requester_id & 0xffff); + s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC, + ~attrs.secure); + s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS, + tz_mpc_cfg_ns(s, addr)); + s->int_stat |= R_INT_STAT_IRQ_MASK; + tz_mpc_irq_update(s); + } + + /* Generate bus error if desired; otherwise RAZ/WI */ + return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK; +} + +/* Accesses only reach these read and write functions if the MPC is + * blocking them; non-blocked accesses go directly to the downstream + * memory region without passing through this code. + */ +static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr, + uint64_t *pdata, + unsigned size, MemTxAttrs attrs) +{ + TZMPC *s = TZ_MPC(opaque); + + trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure); + + *pdata = 0; + return tz_mpc_handle_block(s, addr, attrs); +} + +static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr, + uint64_t value, + unsigned size, MemTxAttrs attrs) +{ + TZMPC *s = TZ_MPC(opaque); + + trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure); + + return tz_mpc_handle_block(s, addr, attrs); +} + +static const MemoryRegionOps tz_mpc_mem_blocked_ops = { + .read_with_attrs = tz_mpc_mem_blocked_read, + .write_with_attrs = tz_mpc_mem_blocked_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .impl.min_access_size = 1, + .impl.max_access_size = 8, +}; + +static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu, + hwaddr addr, IOMMUAccessFlags flags, + int iommu_idx) +{ + TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream)); + bool ok; + + IOMMUTLBEntry ret = { + .iova = addr & ~(s->blocksize - 1), + .translated_addr = addr & ~(s->blocksize - 1), + .addr_mask = s->blocksize - 1, + .perm = IOMMU_RW, + }; + + /* Look at the per-block configuration for this address, and + * return a TLB entry directing the transaction at either + * downstream_as or blocked_io_as, as appropriate. + * If the LUT cfg_ns bit is 1, only non-secure transactions + * may pass. If the bit is 0, only secure transactions may pass. + */ + ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS); + + trace_tz_mpc_translate(addr, flags, + iommu_idx == IOMMU_IDX_S ? "S" : "NS", + ok ? "pass" : "block"); + + ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as; + return ret; +} + +static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs) +{ + /* We treat unspecified attributes like secure. Transactions with + * unspecified attributes come from places like + * rom_reset() for initial image load, and we want + * those to pass through the from-reset "everything is secure" config. + * All the real during-emulation transactions from the CPU will + * specify attributes. + */ + return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS; +} + +static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu) +{ + return IOMMU_NUM_INDEXES; +} + +static void tz_mpc_reset(DeviceState *dev) +{ + TZMPC *s = TZ_MPC(dev); + + s->ctrl = 0x00000100; + s->blk_idx = 0; + s->int_stat = 0; + s->int_en = 1; + s->int_info1 = 0; + s->int_info2 = 0; + + memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t)); +} + +static void tz_mpc_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + TZMPC *s = TZ_MPC(obj); + + qdev_init_gpio_out_named(dev, &s->irq, "irq", 1); +} + +static void tz_mpc_realize(DeviceState *dev, Error **errp) +{ + Object *obj = OBJECT(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + TZMPC *s = TZ_MPC(dev); + uint64_t size; + + /* We can't create the upstream end of the port until realize, + * as we don't know the size of the MR used as the downstream until then. + * We insist on having a downstream, to avoid complicating the code + * with handling the "don't know how big this is" case. It's easy + * enough for the user to create an unimplemented_device as downstream + * if they have nothing else to plug into this. + */ + if (!s->downstream) { + error_setg(errp, "MPC 'downstream' link not set"); + return; + } + + size = memory_region_size(s->downstream); + + memory_region_init_iommu(&s->upstream, sizeof(s->upstream), + TYPE_TZ_MPC_IOMMU_MEMORY_REGION, + obj, "tz-mpc-upstream", size); + + /* In real hardware the block size is configurable. In QEMU we could + * make it configurable but will need it to be at least as big as the + * target page size so we can execute out of the resulting MRs. Guest + * software is supposed to check the block size using the BLK_CFG + * register, so make it fixed at the page size. + */ + s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream); + if (size % s->blocksize != 0) { + error_setg(errp, + "MPC 'downstream' size %" PRId64 + " is not a multiple of %" HWADDR_PRIx " bytes", + size, s->blocksize); + object_unref(OBJECT(&s->upstream)); + return; + } + + /* BLK_MAX is the max value of BLK_IDX, which indexes an array of 32-bit + * words, each bit of which indicates one block. + */ + s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32); + + memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops, + s, "tz-mpc-regs", 0x1000); + sysbus_init_mmio(sbd, &s->regmr); + + sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream)); + + /* This memory region is not exposed to users of this device as a + * sysbus MMIO region, but is instead used internally as something + * that our IOMMU translate function might direct accesses to. + */ + memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops, + s, "tz-mpc-blocked-io", size); + + address_space_init(&s->downstream_as, s->downstream, + "tz-mpc-downstream"); + address_space_init(&s->blocked_io_as, &s->blocked_io, + "tz-mpc-blocked-io"); + + s->blk_lut = g_new0(uint32_t, s->blk_max); +} + +static int tz_mpc_post_load(void *opaque, int version_id) +{ + TZMPC *s = TZ_MPC(opaque); + + /* Check the incoming data doesn't point blk_idx off the end of blk_lut. */ + if (s->blk_idx >= s->blk_max) { + return -1; + } + return 0; +} + +static const VMStateDescription tz_mpc_vmstate = { + .name = "tz-mpc", + .version_id = 1, + .minimum_version_id = 1, + .post_load = tz_mpc_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ctrl, TZMPC), + VMSTATE_UINT32(blk_idx, TZMPC), + VMSTATE_UINT32(int_stat, TZMPC), + VMSTATE_UINT32(int_en, TZMPC), + VMSTATE_UINT32(int_info1, TZMPC), + VMSTATE_UINT32(int_info2, TZMPC), + VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max, + 0, vmstate_info_uint32, uint32_t), + VMSTATE_END_OF_LIST() + } +}; + +static Property tz_mpc_properties[] = { + DEFINE_PROP_LINK("downstream", TZMPC, downstream, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void tz_mpc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = tz_mpc_realize; + dc->vmsd = &tz_mpc_vmstate; + dc->reset = tz_mpc_reset; + device_class_set_props(dc, tz_mpc_properties); +} + +static const TypeInfo tz_mpc_info = { + .name = TYPE_TZ_MPC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(TZMPC), + .instance_init = tz_mpc_init, + .class_init = tz_mpc_class_init, +}; + +static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass, + void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = tz_mpc_translate; + imrc->attrs_to_index = tz_mpc_attrs_to_index; + imrc->num_indexes = tz_mpc_num_indexes; +} + +static const TypeInfo tz_mpc_iommu_memory_region_info = { + .name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION, + .parent = TYPE_IOMMU_MEMORY_REGION, + .class_init = tz_mpc_iommu_memory_region_class_init, +}; + +static void tz_mpc_register_types(void) +{ + type_register_static(&tz_mpc_info); + type_register_static(&tz_mpc_iommu_memory_region_info); +} + +type_init(tz_mpc_register_types); diff --git a/hw/misc/tz-msc.c b/hw/misc/tz-msc.c new file mode 100644 index 000000000..acbe94400 --- /dev/null +++ b/hw/misc/tz-msc.c @@ -0,0 +1,312 @@ +/* + * ARM TrustZone master security controller emulation + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/registerfields.h" +#include "hw/irq.h" +#include "hw/misc/tz-msc.h" +#include "hw/qdev-properties.h" + +static void tz_msc_update_irq(TZMSC *s) +{ + bool level = s->irq_status; + + trace_tz_msc_update_irq(level); + qemu_set_irq(s->irq, level); +} + +static void tz_msc_cfg_nonsec(void *opaque, int n, int level) +{ + TZMSC *s = TZ_MSC(opaque); + + trace_tz_msc_cfg_nonsec(level); + s->cfg_nonsec = level; +} + +static void tz_msc_cfg_sec_resp(void *opaque, int n, int level) +{ + TZMSC *s = TZ_MSC(opaque); + + trace_tz_msc_cfg_sec_resp(level); + s->cfg_sec_resp = level; +} + +static void tz_msc_irq_clear(void *opaque, int n, int level) +{ + TZMSC *s = TZ_MSC(opaque); + + trace_tz_msc_irq_clear(level); + + s->irq_clear = level; + if (level) { + s->irq_status = false; + tz_msc_update_irq(s); + } +} + +/* The MSC may either block a transaction by aborting it, block a + * transaction by making it RAZ/WI, allow it through with + * MemTxAttrs indicating a secure transaction, or allow it with + * MemTxAttrs indicating a non-secure transaction. + */ +typedef enum MSCAction { + MSCBlockAbort, + MSCBlockRAZWI, + MSCAllowSecure, + MSCAllowNonSecure, +} MSCAction; + +static MSCAction tz_msc_check(TZMSC *s, hwaddr addr) +{ + /* + * Check whether to allow an access from the bus master, returning + * an MSCAction indicating the required behaviour. If the transaction + * is blocked, the caller must check cfg_sec_resp to determine + * whether to abort or RAZ/WI the transaction. + */ + IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(s->idau); + IDAUInterface *ii = IDAU_INTERFACE(s->idau); + bool idau_exempt = false, idau_ns = true, idau_nsc = true; + int idau_region = IREGION_NOTVALID; + + iic->check(ii, addr, &idau_region, &idau_exempt, &idau_ns, &idau_nsc); + + if (idau_exempt) { + /* + * Uncheck region -- OK, transaction type depends on + * whether bus master is configured as Secure or NonSecure + */ + return s->cfg_nonsec ? MSCAllowNonSecure : MSCAllowSecure; + } + + if (idau_ns) { + /* NonSecure region -- always forward as NS transaction */ + return MSCAllowNonSecure; + } + + if (!s->cfg_nonsec) { + /* Access to Secure region by Secure bus master: OK */ + return MSCAllowSecure; + } + + /* Attempted access to Secure region by NS bus master: block */ + trace_tz_msc_access_blocked(addr); + if (!s->cfg_sec_resp) { + return MSCBlockRAZWI; + } + + /* + * The TRM isn't clear on behaviour if irq_clear is high when a + * transaction is blocked. We assume that the MSC behaves like the + * PPC, where holding irq_clear high suppresses the interrupt. + */ + if (!s->irq_clear) { + s->irq_status = true; + tz_msc_update_irq(s); + } + return MSCBlockAbort; +} + +static MemTxResult tz_msc_read(void *opaque, hwaddr addr, uint64_t *pdata, + unsigned size, MemTxAttrs attrs) +{ + TZMSC *s = opaque; + AddressSpace *as = &s->downstream_as; + uint64_t data; + MemTxResult res; + + switch (tz_msc_check(s, addr)) { + case MSCBlockAbort: + return MEMTX_ERROR; + case MSCBlockRAZWI: + *pdata = 0; + return MEMTX_OK; + case MSCAllowSecure: + attrs.secure = 1; + attrs.unspecified = 0; + break; + case MSCAllowNonSecure: + attrs.secure = 0; + attrs.unspecified = 0; + break; + } + + switch (size) { + case 1: + data = address_space_ldub(as, addr, attrs, &res); + break; + case 2: + data = address_space_lduw_le(as, addr, attrs, &res); + break; + case 4: + data = address_space_ldl_le(as, addr, attrs, &res); + break; + case 8: + data = address_space_ldq_le(as, addr, attrs, &res); + break; + default: + g_assert_not_reached(); + } + *pdata = data; + return res; +} + +static MemTxResult tz_msc_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) +{ + TZMSC *s = opaque; + AddressSpace *as = &s->downstream_as; + MemTxResult res; + + switch (tz_msc_check(s, addr)) { + case MSCBlockAbort: + return MEMTX_ERROR; + case MSCBlockRAZWI: + return MEMTX_OK; + case MSCAllowSecure: + attrs.secure = 1; + attrs.unspecified = 0; + break; + case MSCAllowNonSecure: + attrs.secure = 0; + attrs.unspecified = 0; + break; + } + + switch (size) { + case 1: + address_space_stb(as, addr, val, attrs, &res); + break; + case 2: + address_space_stw_le(as, addr, val, attrs, &res); + break; + case 4: + address_space_stl_le(as, addr, val, attrs, &res); + break; + case 8: + address_space_stq_le(as, addr, val, attrs, &res); + break; + default: + g_assert_not_reached(); + } + return res; +} + +static const MemoryRegionOps tz_msc_ops = { + .read_with_attrs = tz_msc_read, + .write_with_attrs = tz_msc_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void tz_msc_reset(DeviceState *dev) +{ + TZMSC *s = TZ_MSC(dev); + + trace_tz_msc_reset(); + s->cfg_sec_resp = false; + s->cfg_nonsec = false; + s->irq_clear = 0; + s->irq_status = 0; +} + +static void tz_msc_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + TZMSC *s = TZ_MSC(obj); + + qdev_init_gpio_in_named(dev, tz_msc_cfg_nonsec, "cfg_nonsec", 1); + qdev_init_gpio_in_named(dev, tz_msc_cfg_sec_resp, "cfg_sec_resp", 1); + qdev_init_gpio_in_named(dev, tz_msc_irq_clear, "irq_clear", 1); + qdev_init_gpio_out_named(dev, &s->irq, "irq", 1); +} + +static void tz_msc_realize(DeviceState *dev, Error **errp) +{ + Object *obj = OBJECT(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + TZMSC *s = TZ_MSC(dev); + const char *name = "tz-msc-downstream"; + uint64_t size; + + /* + * We can't create the upstream end of the port until realize, + * as we don't know the size of the MR used as the downstream until then. + * We insist on having a downstream, to avoid complicating the + * code with handling the "don't know how big this is" case. It's easy + * enough for the user to create an unimplemented_device as downstream + * if they have nothing else to plug into this. + */ + if (!s->downstream) { + error_setg(errp, "MSC 'downstream' link not set"); + return; + } + if (!s->idau) { + error_setg(errp, "MSC 'idau' link not set"); + return; + } + + size = memory_region_size(s->downstream); + address_space_init(&s->downstream_as, s->downstream, name); + memory_region_init_io(&s->upstream, obj, &tz_msc_ops, s, name, size); + sysbus_init_mmio(sbd, &s->upstream); +} + +static const VMStateDescription tz_msc_vmstate = { + .name = "tz-msc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(cfg_nonsec, TZMSC), + VMSTATE_BOOL(cfg_sec_resp, TZMSC), + VMSTATE_BOOL(irq_clear, TZMSC), + VMSTATE_BOOL(irq_status, TZMSC), + VMSTATE_END_OF_LIST() + } +}; + +static Property tz_msc_properties[] = { + DEFINE_PROP_LINK("downstream", TZMSC, downstream, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_LINK("idau", TZMSC, idau, + TYPE_IDAU_INTERFACE, IDAUInterface *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void tz_msc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = tz_msc_realize; + dc->vmsd = &tz_msc_vmstate; + dc->reset = tz_msc_reset; + device_class_set_props(dc, tz_msc_properties); +} + +static const TypeInfo tz_msc_info = { + .name = TYPE_TZ_MSC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(TZMSC), + .instance_init = tz_msc_init, + .class_init = tz_msc_class_init, +}; + +static void tz_msc_register_types(void) +{ + type_register_static(&tz_msc_info); +} + +type_init(tz_msc_register_types); diff --git a/hw/misc/tz-ppc.c b/hw/misc/tz-ppc.c new file mode 100644 index 000000000..36495c68e --- /dev/null +++ b/hw/misc/tz-ppc.c @@ -0,0 +1,352 @@ +/* + * ARM TrustZone peripheral protection controller emulation + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/registerfields.h" +#include "hw/irq.h" +#include "hw/misc/tz-ppc.h" +#include "hw/qdev-properties.h" + +static void tz_ppc_update_irq(TZPPC *s) +{ + bool level = s->irq_status && s->irq_enable; + + trace_tz_ppc_update_irq(level); + qemu_set_irq(s->irq, level); +} + +static void tz_ppc_cfg_nonsec(void *opaque, int n, int level) +{ + TZPPC *s = TZ_PPC(opaque); + + assert(n < TZ_NUM_PORTS); + trace_tz_ppc_cfg_nonsec(n, level); + s->cfg_nonsec[n] = level; +} + +static void tz_ppc_cfg_ap(void *opaque, int n, int level) +{ + TZPPC *s = TZ_PPC(opaque); + + assert(n < TZ_NUM_PORTS); + trace_tz_ppc_cfg_ap(n, level); + s->cfg_ap[n] = level; +} + +static void tz_ppc_cfg_sec_resp(void *opaque, int n, int level) +{ + TZPPC *s = TZ_PPC(opaque); + + trace_tz_ppc_cfg_sec_resp(level); + s->cfg_sec_resp = level; +} + +static void tz_ppc_irq_enable(void *opaque, int n, int level) +{ + TZPPC *s = TZ_PPC(opaque); + + trace_tz_ppc_irq_enable(level); + s->irq_enable = level; + tz_ppc_update_irq(s); +} + +static void tz_ppc_irq_clear(void *opaque, int n, int level) +{ + TZPPC *s = TZ_PPC(opaque); + + trace_tz_ppc_irq_clear(level); + + s->irq_clear = level; + if (level) { + s->irq_status = false; + tz_ppc_update_irq(s); + } +} + +static bool tz_ppc_check(TZPPC *s, int n, MemTxAttrs attrs) +{ + /* Check whether to allow an access to port n; return true if + * the check passes, and false if the transaction must be blocked. + * If the latter, the caller must check cfg_sec_resp to determine + * whether to abort or RAZ/WI the transaction. + * The checks are: + * + nonsec_mask suppresses any check of the secure attribute + * + otherwise, block if cfg_nonsec is 1 and transaction is secure, + * or if cfg_nonsec is 0 and transaction is non-secure + * + block if transaction is usermode and cfg_ap is 0 + */ + if ((attrs.secure == s->cfg_nonsec[n] && !(s->nonsec_mask & (1 << n))) || + (attrs.user && !s->cfg_ap[n])) { + /* Block the transaction. */ + if (!s->irq_clear) { + /* Note that holding irq_clear high suppresses interrupts */ + s->irq_status = true; + tz_ppc_update_irq(s); + } + return false; + } + return true; +} + +static MemTxResult tz_ppc_read(void *opaque, hwaddr addr, uint64_t *pdata, + unsigned size, MemTxAttrs attrs) +{ + TZPPCPort *p = opaque; + TZPPC *s = p->ppc; + int n = p - s->port; + AddressSpace *as = &p->downstream_as; + uint64_t data; + MemTxResult res; + + if (!tz_ppc_check(s, n, attrs)) { + trace_tz_ppc_read_blocked(n, addr, attrs.secure, attrs.user); + if (s->cfg_sec_resp) { + return MEMTX_ERROR; + } else { + *pdata = 0; + return MEMTX_OK; + } + } + + switch (size) { + case 1: + data = address_space_ldub(as, addr, attrs, &res); + break; + case 2: + data = address_space_lduw_le(as, addr, attrs, &res); + break; + case 4: + data = address_space_ldl_le(as, addr, attrs, &res); + break; + case 8: + data = address_space_ldq_le(as, addr, attrs, &res); + break; + default: + g_assert_not_reached(); + } + *pdata = data; + return res; +} + +static MemTxResult tz_ppc_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) +{ + TZPPCPort *p = opaque; + TZPPC *s = p->ppc; + AddressSpace *as = &p->downstream_as; + int n = p - s->port; + MemTxResult res; + + if (!tz_ppc_check(s, n, attrs)) { + trace_tz_ppc_write_blocked(n, addr, attrs.secure, attrs.user); + if (s->cfg_sec_resp) { + return MEMTX_ERROR; + } else { + return MEMTX_OK; + } + } + + switch (size) { + case 1: + address_space_stb(as, addr, val, attrs, &res); + break; + case 2: + address_space_stw_le(as, addr, val, attrs, &res); + break; + case 4: + address_space_stl_le(as, addr, val, attrs, &res); + break; + case 8: + address_space_stq_le(as, addr, val, attrs, &res); + break; + default: + g_assert_not_reached(); + } + return res; +} + +static const MemoryRegionOps tz_ppc_ops = { + .read_with_attrs = tz_ppc_read, + .write_with_attrs = tz_ppc_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static bool tz_ppc_dummy_accepts(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + /* + * Board code should never map the upstream end of an unused port, + * so we should never try to make a memory access to it. + */ + g_assert_not_reached(); +} + +static uint64_t tz_ppc_dummy_read(void *opaque, hwaddr addr, unsigned size) +{ + g_assert_not_reached(); +} + +static void tz_ppc_dummy_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + g_assert_not_reached(); +} + +static const MemoryRegionOps tz_ppc_dummy_ops = { + /* define r/w methods to avoid assert failure in memory_region_init_io */ + .read = tz_ppc_dummy_read, + .write = tz_ppc_dummy_write, + .valid.accepts = tz_ppc_dummy_accepts, +}; + +static void tz_ppc_reset(DeviceState *dev) +{ + TZPPC *s = TZ_PPC(dev); + + trace_tz_ppc_reset(); + s->cfg_sec_resp = false; + memset(s->cfg_nonsec, 0, sizeof(s->cfg_nonsec)); + memset(s->cfg_ap, 0, sizeof(s->cfg_ap)); +} + +static void tz_ppc_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + TZPPC *s = TZ_PPC(obj); + + qdev_init_gpio_in_named(dev, tz_ppc_cfg_nonsec, "cfg_nonsec", TZ_NUM_PORTS); + qdev_init_gpio_in_named(dev, tz_ppc_cfg_ap, "cfg_ap", TZ_NUM_PORTS); + qdev_init_gpio_in_named(dev, tz_ppc_cfg_sec_resp, "cfg_sec_resp", 1); + qdev_init_gpio_in_named(dev, tz_ppc_irq_enable, "irq_enable", 1); + qdev_init_gpio_in_named(dev, tz_ppc_irq_clear, "irq_clear", 1); + qdev_init_gpio_out_named(dev, &s->irq, "irq", 1); +} + +static void tz_ppc_realize(DeviceState *dev, Error **errp) +{ + Object *obj = OBJECT(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + TZPPC *s = TZ_PPC(dev); + int i; + int max_port = 0; + + /* We can't create the upstream end of the port until realize, + * as we don't know the size of the MR used as the downstream until then. + */ + for (i = 0; i < TZ_NUM_PORTS; i++) { + if (s->port[i].downstream) { + max_port = i; + } + } + + for (i = 0; i <= max_port; i++) { + TZPPCPort *port = &s->port[i]; + char *name; + uint64_t size; + + if (!port->downstream) { + /* + * Create dummy sysbus MMIO region so the sysbus region + * numbering doesn't get out of sync with the port numbers. + * The size is entirely arbitrary. + */ + name = g_strdup_printf("tz-ppc-dummy-port[%d]", i); + memory_region_init_io(&port->upstream, obj, &tz_ppc_dummy_ops, + port, name, 0x10000); + sysbus_init_mmio(sbd, &port->upstream); + g_free(name); + continue; + } + + name = g_strdup_printf("tz-ppc-port[%d]", i); + + port->ppc = s; + address_space_init(&port->downstream_as, port->downstream, name); + + size = memory_region_size(port->downstream); + memory_region_init_io(&port->upstream, obj, &tz_ppc_ops, + port, name, size); + sysbus_init_mmio(sbd, &port->upstream); + g_free(name); + } +} + +static const VMStateDescription tz_ppc_vmstate = { + .name = "tz-ppc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL_ARRAY(cfg_nonsec, TZPPC, 16), + VMSTATE_BOOL_ARRAY(cfg_ap, TZPPC, 16), + VMSTATE_BOOL(cfg_sec_resp, TZPPC), + VMSTATE_BOOL(irq_enable, TZPPC), + VMSTATE_BOOL(irq_clear, TZPPC), + VMSTATE_BOOL(irq_status, TZPPC), + VMSTATE_END_OF_LIST() + } +}; + +#define DEFINE_PORT(N) \ + DEFINE_PROP_LINK("port[" #N "]", TZPPC, port[N].downstream, \ + TYPE_MEMORY_REGION, MemoryRegion *) + +static Property tz_ppc_properties[] = { + DEFINE_PROP_UINT32("NONSEC_MASK", TZPPC, nonsec_mask, 0), + DEFINE_PORT(0), + DEFINE_PORT(1), + DEFINE_PORT(2), + DEFINE_PORT(3), + DEFINE_PORT(4), + DEFINE_PORT(5), + DEFINE_PORT(6), + DEFINE_PORT(7), + DEFINE_PORT(8), + DEFINE_PORT(9), + DEFINE_PORT(10), + DEFINE_PORT(11), + DEFINE_PORT(12), + DEFINE_PORT(13), + DEFINE_PORT(14), + DEFINE_PORT(15), + DEFINE_PROP_END_OF_LIST(), +}; + +static void tz_ppc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = tz_ppc_realize; + dc->vmsd = &tz_ppc_vmstate; + dc->reset = tz_ppc_reset; + device_class_set_props(dc, tz_ppc_properties); +} + +static const TypeInfo tz_ppc_info = { + .name = TYPE_TZ_PPC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(TZPPC), + .instance_init = tz_ppc_init, + .class_init = tz_ppc_class_init, +}; + +static void tz_ppc_register_types(void) +{ + type_register_static(&tz_ppc_info); +} + +type_init(tz_ppc_register_types); diff --git a/hw/misc/unimp.c b/hw/misc/unimp.c new file mode 100644 index 000000000..6cfc5727f --- /dev/null +++ b/hw/misc/unimp.c @@ -0,0 +1,99 @@ +/* "Unimplemented" device + * + * This is a dummy device which accepts and logs all accesses. + * It's useful for stubbing out regions of an SoC or board + * map which correspond to devices that have not yet been + * implemented. This is often sufficient to placate initial + * guest device driver probing such that the system will + * come up. + * + * Copyright Linaro Limited, 2017 + * Written by Peter Maydell + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/misc/unimp.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" + +static uint64_t unimp_read(void *opaque, hwaddr offset, unsigned size) +{ + UnimplementedDeviceState *s = UNIMPLEMENTED_DEVICE(opaque); + + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device read " + "(size %d, offset 0x%0*" HWADDR_PRIx ")\n", + s->name, size, s->offset_fmt_width, offset); + return 0; +} + +static void unimp_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + UnimplementedDeviceState *s = UNIMPLEMENTED_DEVICE(opaque); + + qemu_log_mask(LOG_UNIMP, "%s: unimplemented device write " + "(size %d, offset 0x%0*" HWADDR_PRIx + ", value 0x%0*" PRIx64 ")\n", + s->name, size, s->offset_fmt_width, offset, size << 1, value); +} + +static const MemoryRegionOps unimp_ops = { + .read = unimp_read, + .write = unimp_write, + .impl.min_access_size = 1, + .impl.max_access_size = 8, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void unimp_realize(DeviceState *dev, Error **errp) +{ + UnimplementedDeviceState *s = UNIMPLEMENTED_DEVICE(dev); + + if (s->size == 0) { + error_setg(errp, "property 'size' not specified or zero"); + return; + } + + if (s->name == NULL) { + error_setg(errp, "property 'name' not specified"); + return; + } + + s->offset_fmt_width = DIV_ROUND_UP(64 - clz64(s->size - 1), 4); + + memory_region_init_io(&s->iomem, OBJECT(s), &unimp_ops, s, + s->name, s->size); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static Property unimp_properties[] = { + DEFINE_PROP_UINT64("size", UnimplementedDeviceState, size, 0), + DEFINE_PROP_STRING("name", UnimplementedDeviceState, name), + DEFINE_PROP_END_OF_LIST(), +}; + +static void unimp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = unimp_realize; + device_class_set_props(dc, unimp_properties); +} + +static const TypeInfo unimp_info = { + .name = TYPE_UNIMPLEMENTED_DEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(UnimplementedDeviceState), + .class_init = unimp_class_init, +}; + +static void unimp_register_types(void) +{ + type_register_static(&unimp_info); +} + +type_init(unimp_register_types) diff --git a/hw/misc/virt_ctrl.c b/hw/misc/virt_ctrl.c new file mode 100644 index 000000000..e75d1e7e1 --- /dev/null +++ b/hw/misc/virt_ctrl.c @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Virt system Controller + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "trace.h" +#include "sysemu/runstate.h" +#include "hw/misc/virt_ctrl.h" + +enum { + REG_FEATURES = 0x00, + REG_CMD = 0x04, +}; + +#define FEAT_POWER_CTRL 0x00000001 + +enum { + CMD_NOOP, + CMD_RESET, + CMD_HALT, + CMD_PANIC, +}; + +static uint64_t virt_ctrl_read(void *opaque, hwaddr addr, unsigned size) +{ + VirtCtrlState *s = opaque; + uint64_t value = 0; + + switch (addr) { + case REG_FEATURES: + value = FEAT_POWER_CTRL; + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: unimplemented register read 0x%02"HWADDR_PRIx"\n", + __func__, addr); + break; + } + + trace_virt_ctrl_write(s, addr, size, value); + + return value; +} + +static void virt_ctrl_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + VirtCtrlState *s = opaque; + + trace_virt_ctrl_write(s, addr, size, value); + + switch (addr) { + case REG_CMD: + switch (value) { + case CMD_NOOP: + break; + case CMD_RESET: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + break; + case CMD_HALT: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + break; + case CMD_PANIC: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_PANIC); + break; + } + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: unimplemented register write 0x%02"HWADDR_PRIx"\n", + __func__, addr); + break; + } +} + +static const MemoryRegionOps virt_ctrl_ops = { + .read = virt_ctrl_read, + .write = virt_ctrl_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.max_access_size = 4, + .impl.max_access_size = 4, +}; + +static void virt_ctrl_reset(DeviceState *dev) +{ + VirtCtrlState *s = VIRT_CTRL(dev); + + trace_virt_ctrl_reset(s); +} + +static void virt_ctrl_realize(DeviceState *dev, Error **errp) +{ + VirtCtrlState *s = VIRT_CTRL(dev); + + trace_virt_ctrl_instance_init(s); + + memory_region_init_io(&s->iomem, OBJECT(s), &virt_ctrl_ops, s, + "virt-ctrl", 0x100); +} + +static const VMStateDescription vmstate_virt_ctrl = { + .name = "virt-ctrl", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(irq_enabled, VirtCtrlState), + VMSTATE_END_OF_LIST() + } +}; + +static void virt_ctrl_instance_init(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + VirtCtrlState *s = VIRT_CTRL(obj); + + trace_virt_ctrl_instance_init(s); + + sysbus_init_mmio(dev, &s->iomem); + sysbus_init_irq(dev, &s->irq); +} + +static void virt_ctrl_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->reset = virt_ctrl_reset; + dc->realize = virt_ctrl_realize; + dc->vmsd = &vmstate_virt_ctrl; +} + +static const TypeInfo virt_ctrl_info = { + .name = TYPE_VIRT_CTRL, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = virt_ctrl_class_init, + .instance_init = virt_ctrl_instance_init, + .instance_size = sizeof(VirtCtrlState), +}; + +static void virt_ctrl_register_types(void) +{ + type_register_static(&virt_ctrl_info); +} + +type_init(virt_ctrl_register_types) diff --git a/hw/misc/vmcoreinfo.c b/hw/misc/vmcoreinfo.c new file mode 100644 index 000000000..a9d718fc2 --- /dev/null +++ b/hw/misc/vmcoreinfo.c @@ -0,0 +1,108 @@ +/* + * Virtual Machine coreinfo device + * + * Copyright (C) 2017 Red Hat, Inc. + * + * Authors: Marc-André Lureau + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "sysemu/reset.h" +#include "hw/nvram/fw_cfg.h" +#include "migration/vmstate.h" +#include "hw/misc/vmcoreinfo.h" + +static void fw_cfg_vmci_write(void *dev, off_t offset, size_t len) +{ + VMCoreInfoState *s = VMCOREINFO(dev); + + s->has_vmcoreinfo = offset == 0 && len == sizeof(s->vmcoreinfo) + && s->vmcoreinfo.guest_format != FW_CFG_VMCOREINFO_FORMAT_NONE; +} + +static void vmcoreinfo_reset(void *dev) +{ + VMCoreInfoState *s = VMCOREINFO(dev); + + s->has_vmcoreinfo = false; + memset(&s->vmcoreinfo, 0, sizeof(s->vmcoreinfo)); + s->vmcoreinfo.host_format = cpu_to_le16(FW_CFG_VMCOREINFO_FORMAT_ELF); +} + +static void vmcoreinfo_realize(DeviceState *dev, Error **errp) +{ + VMCoreInfoState *s = VMCOREINFO(dev); + FWCfgState *fw_cfg = fw_cfg_find(); + /* for gdb script dump-guest-memory.py */ + static VMCoreInfoState * volatile vmcoreinfo_state G_GNUC_UNUSED; + + /* Given that this function is executing, there is at least one VMCOREINFO + * device. Check if there are several. + */ + if (!vmcoreinfo_find()) { + error_setg(errp, "at most one %s device is permitted", + VMCOREINFO_DEVICE); + return; + } + + if (!fw_cfg || !fw_cfg->dma_enabled) { + error_setg(errp, "%s device requires fw_cfg with DMA", + VMCOREINFO_DEVICE); + return; + } + + fw_cfg_add_file_callback(fw_cfg, FW_CFG_VMCOREINFO_FILENAME, + NULL, fw_cfg_vmci_write, s, + &s->vmcoreinfo, sizeof(s->vmcoreinfo), false); + + /* + * This device requires to register a global reset because it is + * not plugged to a bus (which, as its QOM parent, would reset it). + */ + qemu_register_reset(vmcoreinfo_reset, dev); + vmcoreinfo_state = s; +} + +static const VMStateDescription vmstate_vmcoreinfo = { + .name = "vmcoreinfo", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(has_vmcoreinfo, VMCoreInfoState), + VMSTATE_UINT16(vmcoreinfo.host_format, VMCoreInfoState), + VMSTATE_UINT16(vmcoreinfo.guest_format, VMCoreInfoState), + VMSTATE_UINT32(vmcoreinfo.size, VMCoreInfoState), + VMSTATE_UINT64(vmcoreinfo.paddr, VMCoreInfoState), + VMSTATE_END_OF_LIST() + }, +}; + +static void vmcoreinfo_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_vmcoreinfo; + dc->realize = vmcoreinfo_realize; + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo vmcoreinfo_device_info = { + .name = VMCOREINFO_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(VMCoreInfoState), + .class_init = vmcoreinfo_device_class_init, +}; + +static void vmcoreinfo_register_types(void) +{ + type_register_static(&vmcoreinfo_device_info); +} + +type_init(vmcoreinfo_register_types) diff --git a/hw/misc/xlnx-versal-xramc.c b/hw/misc/xlnx-versal-xramc.c new file mode 100644 index 000000000..e5b719a0e --- /dev/null +++ b/hw/misc/xlnx-versal-xramc.c @@ -0,0 +1,253 @@ +/* + * QEMU model of the Xilinx XRAM Controller. + * + * Copyright (c) 2021 Xilinx Inc. + * SPDX-License-Identifier: GPL-2.0-or-later + * Written by Edgar E. Iglesias + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "hw/qdev-properties.h" +#include "hw/irq.h" +#include "hw/misc/xlnx-versal-xramc.h" + +#ifndef XLNX_XRAM_CTRL_ERR_DEBUG +#define XLNX_XRAM_CTRL_ERR_DEBUG 0 +#endif + +static void xram_update_irq(XlnxXramCtrl *s) +{ + bool pending = s->regs[R_XRAM_ISR] & ~s->regs[R_XRAM_IMR]; + qemu_set_irq(s->irq, pending); +} + +static void xram_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxXramCtrl *s = XLNX_XRAM_CTRL(reg->opaque); + xram_update_irq(s); +} + +static uint64_t xram_ien_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxXramCtrl *s = XLNX_XRAM_CTRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_XRAM_IMR] &= ~val; + xram_update_irq(s); + return 0; +} + +static uint64_t xram_ids_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxXramCtrl *s = XLNX_XRAM_CTRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_XRAM_IMR] |= val; + xram_update_irq(s); + return 0; +} + +static const RegisterAccessInfo xram_ctrl_regs_info[] = { + { .name = "XRAM_ERR_CTRL", .addr = A_XRAM_ERR_CTRL, + .reset = 0xf, + .rsvd = 0xfffffff0, + },{ .name = "XRAM_ISR", .addr = A_XRAM_ISR, + .rsvd = 0xfffff800, + .w1c = 0x7ff, + .post_write = xram_isr_postw, + },{ .name = "XRAM_IMR", .addr = A_XRAM_IMR, + .reset = 0x7ff, + .rsvd = 0xfffff800, + .ro = 0x7ff, + },{ .name = "XRAM_IEN", .addr = A_XRAM_IEN, + .rsvd = 0xfffff800, + .pre_write = xram_ien_prew, + },{ .name = "XRAM_IDS", .addr = A_XRAM_IDS, + .rsvd = 0xfffff800, + .pre_write = xram_ids_prew, + },{ .name = "XRAM_ECC_CNTL", .addr = A_XRAM_ECC_CNTL, + .rsvd = 0xfffffff8, + },{ .name = "XRAM_CLR_EXE", .addr = A_XRAM_CLR_EXE, + .rsvd = 0xffffff00, + },{ .name = "XRAM_CE_FFA", .addr = A_XRAM_CE_FFA, + .rsvd = 0xfff00000, + .ro = 0xfffff, + },{ .name = "XRAM_CE_FFD0", .addr = A_XRAM_CE_FFD0, + .ro = 0xffffffff, + },{ .name = "XRAM_CE_FFD1", .addr = A_XRAM_CE_FFD1, + .ro = 0xffffffff, + },{ .name = "XRAM_CE_FFD2", .addr = A_XRAM_CE_FFD2, + .ro = 0xffffffff, + },{ .name = "XRAM_CE_FFD3", .addr = A_XRAM_CE_FFD3, + .ro = 0xffffffff, + },{ .name = "XRAM_CE_FFE", .addr = A_XRAM_CE_FFE, + .rsvd = 0xffff0000, + .ro = 0xffff, + },{ .name = "XRAM_UE_FFA", .addr = A_XRAM_UE_FFA, + .rsvd = 0xfff00000, + .ro = 0xfffff, + },{ .name = "XRAM_UE_FFD0", .addr = A_XRAM_UE_FFD0, + .ro = 0xffffffff, + },{ .name = "XRAM_UE_FFD1", .addr = A_XRAM_UE_FFD1, + .ro = 0xffffffff, + },{ .name = "XRAM_UE_FFD2", .addr = A_XRAM_UE_FFD2, + .ro = 0xffffffff, + },{ .name = "XRAM_UE_FFD3", .addr = A_XRAM_UE_FFD3, + .ro = 0xffffffff, + },{ .name = "XRAM_UE_FFE", .addr = A_XRAM_UE_FFE, + .rsvd = 0xffff0000, + .ro = 0xffff, + },{ .name = "XRAM_FI_D0", .addr = A_XRAM_FI_D0, + },{ .name = "XRAM_FI_D1", .addr = A_XRAM_FI_D1, + },{ .name = "XRAM_FI_D2", .addr = A_XRAM_FI_D2, + },{ .name = "XRAM_FI_D3", .addr = A_XRAM_FI_D3, + },{ .name = "XRAM_FI_SY", .addr = A_XRAM_FI_SY, + .rsvd = 0xffff0000, + },{ .name = "XRAM_RMW_UE_FFA", .addr = A_XRAM_RMW_UE_FFA, + .rsvd = 0xfff00000, + .ro = 0xfffff, + },{ .name = "XRAM_FI_CNTR", .addr = A_XRAM_FI_CNTR, + .rsvd = 0xff000000, + },{ .name = "XRAM_IMP", .addr = A_XRAM_IMP, + .reset = 0x4, + .rsvd = 0xfffffff0, + .ro = 0xf, + },{ .name = "XRAM_PRDY_DBG", .addr = A_XRAM_PRDY_DBG, + .reset = 0xffff, + .rsvd = 0xffff0000, + .ro = 0xffff, + },{ .name = "XRAM_SAFETY_CHK", .addr = A_XRAM_SAFETY_CHK, + } +}; + +static void xram_ctrl_reset_enter(Object *obj, ResetType type) +{ + XlnxXramCtrl *s = XLNX_XRAM_CTRL(obj); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } + + ARRAY_FIELD_DP32(s->regs, XRAM_IMP, SIZE, s->cfg.encoded_size); +} + +static void xram_ctrl_reset_hold(Object *obj) +{ + XlnxXramCtrl *s = XLNX_XRAM_CTRL(obj); + + xram_update_irq(s); +} + +static const MemoryRegionOps xram_ctrl_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void xram_ctrl_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + XlnxXramCtrl *s = XLNX_XRAM_CTRL(dev); + + switch (s->cfg.size) { + case 64 * KiB: + s->cfg.encoded_size = 0; + break; + case 128 * KiB: + s->cfg.encoded_size = 1; + break; + case 256 * KiB: + s->cfg.encoded_size = 2; + break; + case 512 * KiB: + s->cfg.encoded_size = 3; + break; + case 1 * MiB: + s->cfg.encoded_size = 4; + break; + default: + error_setg(errp, "Unsupported XRAM size %" PRId64, s->cfg.size); + return; + } + + memory_region_init_ram(&s->ram, OBJECT(s), + object_get_canonical_path_component(OBJECT(s)), + s->cfg.size, &error_fatal); + sysbus_init_mmio(sbd, &s->ram); +} + +static void xram_ctrl_init(Object *obj) +{ + XlnxXramCtrl *s = XLNX_XRAM_CTRL(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + s->reg_array = + register_init_block32(DEVICE(obj), xram_ctrl_regs_info, + ARRAY_SIZE(xram_ctrl_regs_info), + s->regs_info, s->regs, + &xram_ctrl_ops, + XLNX_XRAM_CTRL_ERR_DEBUG, + XRAM_CTRL_R_MAX * 4); + sysbus_init_mmio(sbd, &s->reg_array->mem); + sysbus_init_irq(sbd, &s->irq); +} + +static void xram_ctrl_finalize(Object *obj) +{ + XlnxXramCtrl *s = XLNX_XRAM_CTRL(obj); + register_finalize_block(s->reg_array); +} + +static const VMStateDescription vmstate_xram_ctrl = { + .name = TYPE_XLNX_XRAM_CTRL, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxXramCtrl, XRAM_CTRL_R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property xram_ctrl_properties[] = { + DEFINE_PROP_UINT64("size", XlnxXramCtrl, cfg.size, 1 * MiB), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xram_ctrl_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xram_ctrl_realize; + dc->vmsd = &vmstate_xram_ctrl; + device_class_set_props(dc, xram_ctrl_properties); + + rc->phases.enter = xram_ctrl_reset_enter; + rc->phases.hold = xram_ctrl_reset_hold; +} + +static const TypeInfo xram_ctrl_info = { + .name = TYPE_XLNX_XRAM_CTRL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxXramCtrl), + .class_init = xram_ctrl_class_init, + .instance_init = xram_ctrl_init, + .instance_finalize = xram_ctrl_finalize, +}; + +static void xram_ctrl_register_types(void) +{ + type_register_static(&xram_ctrl_info); +} + +type_init(xram_ctrl_register_types) diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c new file mode 100644 index 000000000..8b7028596 --- /dev/null +++ b/hw/misc/zynq_slcr.c @@ -0,0 +1,637 @@ +/* + * Status and system control registers for Xilinx Zynq Platform + * + * Copyright (c) 2011 Michal Simek + * Copyright (c) 2012 PetaLogix Pty Ltd. + * Based on hw/arm_sysctl.c, written by Paul Brook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "sysemu/runstate.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/registerfields.h" +#include "hw/qdev-clock.h" +#include "qom/object.h" + +#ifndef ZYNQ_SLCR_ERR_DEBUG +#define ZYNQ_SLCR_ERR_DEBUG 0 +#endif + +#define DB_PRINT(...) do { \ + if (ZYNQ_SLCR_ERR_DEBUG) { \ + fprintf(stderr, ": %s: ", __func__); \ + fprintf(stderr, ## __VA_ARGS__); \ + } \ + } while (0) + +#define XILINX_LOCK_KEY 0x767b +#define XILINX_UNLOCK_KEY 0xdf0d + +REG32(SCL, 0x000) +REG32(LOCK, 0x004) +REG32(UNLOCK, 0x008) +REG32(LOCKSTA, 0x00c) + +REG32(ARM_PLL_CTRL, 0x100) +REG32(DDR_PLL_CTRL, 0x104) +REG32(IO_PLL_CTRL, 0x108) +/* fields for [ARM|DDR|IO]_PLL_CTRL registers */ + FIELD(xxx_PLL_CTRL, PLL_RESET, 0, 1) + FIELD(xxx_PLL_CTRL, PLL_PWRDWN, 1, 1) + FIELD(xxx_PLL_CTRL, PLL_BYPASS_QUAL, 3, 1) + FIELD(xxx_PLL_CTRL, PLL_BYPASS_FORCE, 4, 1) + FIELD(xxx_PLL_CTRL, PLL_FPDIV, 12, 7) +REG32(PLL_STATUS, 0x10c) +REG32(ARM_PLL_CFG, 0x110) +REG32(DDR_PLL_CFG, 0x114) +REG32(IO_PLL_CFG, 0x118) + +REG32(ARM_CLK_CTRL, 0x120) +REG32(DDR_CLK_CTRL, 0x124) +REG32(DCI_CLK_CTRL, 0x128) +REG32(APER_CLK_CTRL, 0x12c) +REG32(USB0_CLK_CTRL, 0x130) +REG32(USB1_CLK_CTRL, 0x134) +REG32(GEM0_RCLK_CTRL, 0x138) +REG32(GEM1_RCLK_CTRL, 0x13c) +REG32(GEM0_CLK_CTRL, 0x140) +REG32(GEM1_CLK_CTRL, 0x144) +REG32(SMC_CLK_CTRL, 0x148) +REG32(LQSPI_CLK_CTRL, 0x14c) +REG32(SDIO_CLK_CTRL, 0x150) +REG32(UART_CLK_CTRL, 0x154) + FIELD(UART_CLK_CTRL, CLKACT0, 0, 1) + FIELD(UART_CLK_CTRL, CLKACT1, 1, 1) + FIELD(UART_CLK_CTRL, SRCSEL, 4, 2) + FIELD(UART_CLK_CTRL, DIVISOR, 8, 6) +REG32(SPI_CLK_CTRL, 0x158) +REG32(CAN_CLK_CTRL, 0x15c) +REG32(CAN_MIOCLK_CTRL, 0x160) +REG32(DBG_CLK_CTRL, 0x164) +REG32(PCAP_CLK_CTRL, 0x168) +REG32(TOPSW_CLK_CTRL, 0x16c) + +#define FPGA_CTRL_REGS(n, start) \ + REG32(FPGA ## n ## _CLK_CTRL, (start)) \ + REG32(FPGA ## n ## _THR_CTRL, (start) + 0x4)\ + REG32(FPGA ## n ## _THR_CNT, (start) + 0x8)\ + REG32(FPGA ## n ## _THR_STA, (start) + 0xc) +FPGA_CTRL_REGS(0, 0x170) +FPGA_CTRL_REGS(1, 0x180) +FPGA_CTRL_REGS(2, 0x190) +FPGA_CTRL_REGS(3, 0x1a0) + +REG32(BANDGAP_TRIP, 0x1b8) +REG32(PLL_PREDIVISOR, 0x1c0) +REG32(CLK_621_TRUE, 0x1c4) + +REG32(PSS_RST_CTRL, 0x200) + FIELD(PSS_RST_CTRL, SOFT_RST, 0, 1) +REG32(DDR_RST_CTRL, 0x204) +REG32(TOPSW_RESET_CTRL, 0x208) +REG32(DMAC_RST_CTRL, 0x20c) +REG32(USB_RST_CTRL, 0x210) +REG32(GEM_RST_CTRL, 0x214) +REG32(SDIO_RST_CTRL, 0x218) +REG32(SPI_RST_CTRL, 0x21c) +REG32(CAN_RST_CTRL, 0x220) +REG32(I2C_RST_CTRL, 0x224) +REG32(UART_RST_CTRL, 0x228) +REG32(GPIO_RST_CTRL, 0x22c) +REG32(LQSPI_RST_CTRL, 0x230) +REG32(SMC_RST_CTRL, 0x234) +REG32(OCM_RST_CTRL, 0x238) +REG32(FPGA_RST_CTRL, 0x240) +REG32(A9_CPU_RST_CTRL, 0x244) + +REG32(RS_AWDT_CTRL, 0x24c) +REG32(RST_REASON, 0x250) + +REG32(REBOOT_STATUS, 0x258) +REG32(BOOT_MODE, 0x25c) + +REG32(APU_CTRL, 0x300) +REG32(WDT_CLK_SEL, 0x304) + +REG32(TZ_DMA_NS, 0x440) +REG32(TZ_DMA_IRQ_NS, 0x444) +REG32(TZ_DMA_PERIPH_NS, 0x448) + +REG32(PSS_IDCODE, 0x530) + +REG32(DDR_URGENT, 0x600) +REG32(DDR_CAL_START, 0x60c) +REG32(DDR_REF_START, 0x614) +REG32(DDR_CMD_STA, 0x618) +REG32(DDR_URGENT_SEL, 0x61c) +REG32(DDR_DFI_STATUS, 0x620) + +REG32(MIO, 0x700) +#define MIO_LENGTH 54 + +REG32(MIO_LOOPBACK, 0x804) +REG32(MIO_MST_TRI0, 0x808) +REG32(MIO_MST_TRI1, 0x80c) + +REG32(SD0_WP_CD_SEL, 0x830) +REG32(SD1_WP_CD_SEL, 0x834) + +REG32(LVL_SHFTR_EN, 0x900) +REG32(OCM_CFG, 0x910) + +REG32(CPU_RAM, 0xa00) + +REG32(IOU, 0xa30) + +REG32(DMAC_RAM, 0xa50) + +REG32(AFI0, 0xa60) +REG32(AFI1, 0xa6c) +REG32(AFI2, 0xa78) +REG32(AFI3, 0xa84) +#define AFI_LENGTH 3 + +REG32(OCM, 0xa90) + +REG32(DEVCI_RAM, 0xaa0) + +REG32(CSG_RAM, 0xab0) + +REG32(GPIOB_CTRL, 0xb00) +REG32(GPIOB_CFG_CMOS18, 0xb04) +REG32(GPIOB_CFG_CMOS25, 0xb08) +REG32(GPIOB_CFG_CMOS33, 0xb0c) +REG32(GPIOB_CFG_HSTL, 0xb14) +REG32(GPIOB_DRVR_BIAS_CTRL, 0xb18) + +REG32(DDRIOB, 0xb40) +#define DDRIOB_LENGTH 14 + +#define ZYNQ_SLCR_MMIO_SIZE 0x1000 +#define ZYNQ_SLCR_NUM_REGS (ZYNQ_SLCR_MMIO_SIZE / 4) + +#define TYPE_ZYNQ_SLCR "xilinx-zynq_slcr" +OBJECT_DECLARE_SIMPLE_TYPE(ZynqSLCRState, ZYNQ_SLCR) + +struct ZynqSLCRState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + uint32_t regs[ZYNQ_SLCR_NUM_REGS]; + + Clock *ps_clk; + Clock *uart0_ref_clk; + Clock *uart1_ref_clk; +}; + +/* + * return the output frequency of ARM/DDR/IO pll + * using input frequency and PLL_CTRL register + */ +static uint64_t zynq_slcr_compute_pll(uint64_t input, uint32_t ctrl_reg) +{ + uint32_t mult = ((ctrl_reg & R_xxx_PLL_CTRL_PLL_FPDIV_MASK) >> + R_xxx_PLL_CTRL_PLL_FPDIV_SHIFT); + + /* first, check if pll is bypassed */ + if (ctrl_reg & R_xxx_PLL_CTRL_PLL_BYPASS_FORCE_MASK) { + return input; + } + + /* is pll disabled ? */ + if (ctrl_reg & (R_xxx_PLL_CTRL_PLL_RESET_MASK | + R_xxx_PLL_CTRL_PLL_PWRDWN_MASK)) { + return 0; + } + + /* Consider zero feedback as maximum divide ratio possible */ + if (!mult) { + mult = 1 << R_xxx_PLL_CTRL_PLL_FPDIV_LENGTH; + } + + /* frequency multiplier -> period division */ + return input / mult; +} + +/* + * return the output period of a clock given: + * + the periods in an array corresponding to input mux selector + * + the register xxx_CLK_CTRL value + * + enable bit index in ctrl register + * + * This function makes the assumption that the ctrl_reg value is organized as + * follows: + * + bits[13:8] clock frequency divisor + * + bits[5:4] clock mux selector (index in array) + * + bits[index] clock enable + */ +static uint64_t zynq_slcr_compute_clock(const uint64_t periods[], + uint32_t ctrl_reg, + unsigned index) +{ + uint32_t srcsel = extract32(ctrl_reg, 4, 2); /* bits [5:4] */ + uint32_t divisor = extract32(ctrl_reg, 8, 6); /* bits [13:8] */ + + /* first, check if clock is disabled */ + if (((ctrl_reg >> index) & 1u) == 0) { + return 0; + } + + /* + * according to the Zynq technical ref. manual UG585 v1.12.2 in + * Clocks chapter, section 25.10.1 page 705: + * "The 6-bit divider provides a divide range of 1 to 63" + * We follow here what is implemented in linux kernel and consider + * the 0 value as a bypass (no division). + */ + /* frequency divisor -> period multiplication */ + return periods[srcsel] * (divisor ? divisor : 1u); +} + +/* + * macro helper around zynq_slcr_compute_clock to avoid repeating + * the register name. + */ +#define ZYNQ_COMPUTE_CLK(state, plls, reg, enable_field) \ + zynq_slcr_compute_clock((plls), (state)->regs[reg], \ + reg ## _ ## enable_field ## _SHIFT) + +static void zynq_slcr_compute_clocks_internal(ZynqSLCRState *s, uint64_t ps_clk) +{ + uint64_t io_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_IO_PLL_CTRL]); + uint64_t arm_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_ARM_PLL_CTRL]); + uint64_t ddr_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_DDR_PLL_CTRL]); + + uint64_t uart_mux[4] = {io_pll, io_pll, arm_pll, ddr_pll}; + + /* compute uartX reference clocks */ + clock_set(s->uart0_ref_clk, + ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT0)); + clock_set(s->uart1_ref_clk, + ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT1)); +} + +/** + * Compute and set the ouputs clocks periods. + * But do not propagate them further. Connected clocks + * will not receive any updates (See zynq_slcr_compute_clocks()) + */ +static void zynq_slcr_compute_clocks(ZynqSLCRState *s) +{ + uint64_t ps_clk = clock_get(s->ps_clk); + + /* consider outputs clocks are disabled while in reset */ + if (device_is_in_reset(DEVICE(s))) { + ps_clk = 0; + } + + zynq_slcr_compute_clocks_internal(s, ps_clk); +} + +/** + * Propagate the outputs clocks. + * zynq_slcr_compute_clocks() should have been called before + * to configure them. + */ +static void zynq_slcr_propagate_clocks(ZynqSLCRState *s) +{ + clock_propagate(s->uart0_ref_clk); + clock_propagate(s->uart1_ref_clk); +} + +static void zynq_slcr_ps_clk_callback(void *opaque, ClockEvent event) +{ + ZynqSLCRState *s = (ZynqSLCRState *) opaque; + + zynq_slcr_compute_clocks(s); + zynq_slcr_propagate_clocks(s); +} + +static void zynq_slcr_reset_init(Object *obj, ResetType type) +{ + ZynqSLCRState *s = ZYNQ_SLCR(obj); + int i; + + DB_PRINT("RESET\n"); + + s->regs[R_LOCKSTA] = 1; + /* 0x100 - 0x11C */ + s->regs[R_ARM_PLL_CTRL] = 0x0001A008; + s->regs[R_DDR_PLL_CTRL] = 0x0001A008; + s->regs[R_IO_PLL_CTRL] = 0x0001A008; + s->regs[R_PLL_STATUS] = 0x0000003F; + s->regs[R_ARM_PLL_CFG] = 0x00014000; + s->regs[R_DDR_PLL_CFG] = 0x00014000; + s->regs[R_IO_PLL_CFG] = 0x00014000; + + /* 0x120 - 0x16C */ + s->regs[R_ARM_CLK_CTRL] = 0x1F000400; + s->regs[R_DDR_CLK_CTRL] = 0x18400003; + s->regs[R_DCI_CLK_CTRL] = 0x01E03201; + s->regs[R_APER_CLK_CTRL] = 0x01FFCCCD; + s->regs[R_USB0_CLK_CTRL] = s->regs[R_USB1_CLK_CTRL] = 0x00101941; + s->regs[R_GEM0_RCLK_CTRL] = s->regs[R_GEM1_RCLK_CTRL] = 0x00000001; + s->regs[R_GEM0_CLK_CTRL] = s->regs[R_GEM1_CLK_CTRL] = 0x00003C01; + s->regs[R_SMC_CLK_CTRL] = 0x00003C01; + s->regs[R_LQSPI_CLK_CTRL] = 0x00002821; + s->regs[R_SDIO_CLK_CTRL] = 0x00001E03; + s->regs[R_UART_CLK_CTRL] = 0x00003F03; + s->regs[R_SPI_CLK_CTRL] = 0x00003F03; + s->regs[R_CAN_CLK_CTRL] = 0x00501903; + s->regs[R_DBG_CLK_CTRL] = 0x00000F03; + s->regs[R_PCAP_CLK_CTRL] = 0x00000F01; + + /* 0x170 - 0x1AC */ + s->regs[R_FPGA0_CLK_CTRL] = s->regs[R_FPGA1_CLK_CTRL] + = s->regs[R_FPGA2_CLK_CTRL] + = s->regs[R_FPGA3_CLK_CTRL] = 0x00101800; + s->regs[R_FPGA0_THR_STA] = s->regs[R_FPGA1_THR_STA] + = s->regs[R_FPGA2_THR_STA] + = s->regs[R_FPGA3_THR_STA] = 0x00010000; + + /* 0x1B0 - 0x1D8 */ + s->regs[R_BANDGAP_TRIP] = 0x0000001F; + s->regs[R_PLL_PREDIVISOR] = 0x00000001; + s->regs[R_CLK_621_TRUE] = 0x00000001; + + /* 0x200 - 0x25C */ + s->regs[R_FPGA_RST_CTRL] = 0x01F33F0F; + s->regs[R_RST_REASON] = 0x00000040; + + s->regs[R_BOOT_MODE] = 0x00000001; + + /* 0x700 - 0x7D4 */ + for (i = 0; i < 54; i++) { + s->regs[R_MIO + i] = 0x00001601; + } + for (i = 2; i <= 8; i++) { + s->regs[R_MIO + i] = 0x00000601; + } + + s->regs[R_MIO_MST_TRI0] = s->regs[R_MIO_MST_TRI1] = 0xFFFFFFFF; + + s->regs[R_CPU_RAM + 0] = s->regs[R_CPU_RAM + 1] = s->regs[R_CPU_RAM + 3] + = s->regs[R_CPU_RAM + 4] = s->regs[R_CPU_RAM + 7] + = 0x00010101; + s->regs[R_CPU_RAM + 2] = s->regs[R_CPU_RAM + 5] = 0x01010101; + s->regs[R_CPU_RAM + 6] = 0x00000001; + + s->regs[R_IOU + 0] = s->regs[R_IOU + 1] = s->regs[R_IOU + 2] + = s->regs[R_IOU + 3] = 0x09090909; + s->regs[R_IOU + 4] = s->regs[R_IOU + 5] = 0x00090909; + s->regs[R_IOU + 6] = 0x00000909; + + s->regs[R_DMAC_RAM] = 0x00000009; + + s->regs[R_AFI0 + 0] = s->regs[R_AFI0 + 1] = 0x09090909; + s->regs[R_AFI1 + 0] = s->regs[R_AFI1 + 1] = 0x09090909; + s->regs[R_AFI2 + 0] = s->regs[R_AFI2 + 1] = 0x09090909; + s->regs[R_AFI3 + 0] = s->regs[R_AFI3 + 1] = 0x09090909; + s->regs[R_AFI0 + 2] = s->regs[R_AFI1 + 2] = s->regs[R_AFI2 + 2] + = s->regs[R_AFI3 + 2] = 0x00000909; + + s->regs[R_OCM + 0] = 0x01010101; + s->regs[R_OCM + 1] = s->regs[R_OCM + 2] = 0x09090909; + + s->regs[R_DEVCI_RAM] = 0x00000909; + s->regs[R_CSG_RAM] = 0x00000001; + + s->regs[R_DDRIOB + 0] = s->regs[R_DDRIOB + 1] = s->regs[R_DDRIOB + 2] + = s->regs[R_DDRIOB + 3] = 0x00000e00; + s->regs[R_DDRIOB + 4] = s->regs[R_DDRIOB + 5] = s->regs[R_DDRIOB + 6] + = 0x00000e00; + s->regs[R_DDRIOB + 12] = 0x00000021; +} + +static void zynq_slcr_reset_hold(Object *obj) +{ + ZynqSLCRState *s = ZYNQ_SLCR(obj); + + /* will disable all output clocks */ + zynq_slcr_compute_clocks_internal(s, 0); + zynq_slcr_propagate_clocks(s); +} + +static void zynq_slcr_reset_exit(Object *obj) +{ + ZynqSLCRState *s = ZYNQ_SLCR(obj); + + /* will compute output clocks according to ps_clk and registers */ + zynq_slcr_compute_clocks_internal(s, clock_get(s->ps_clk)); + zynq_slcr_propagate_clocks(s); +} + +static bool zynq_slcr_check_offset(hwaddr offset, bool rnw) +{ + switch (offset) { + case R_LOCK: + case R_UNLOCK: + case R_DDR_CAL_START: + case R_DDR_REF_START: + return !rnw; /* Write only */ + case R_LOCKSTA: + case R_FPGA0_THR_STA: + case R_FPGA1_THR_STA: + case R_FPGA2_THR_STA: + case R_FPGA3_THR_STA: + case R_BOOT_MODE: + case R_PSS_IDCODE: + case R_DDR_CMD_STA: + case R_DDR_DFI_STATUS: + case R_PLL_STATUS: + return rnw;/* read only */ + case R_SCL: + case R_ARM_PLL_CTRL ... R_IO_PLL_CTRL: + case R_ARM_PLL_CFG ... R_IO_PLL_CFG: + case R_ARM_CLK_CTRL ... R_TOPSW_CLK_CTRL: + case R_FPGA0_CLK_CTRL ... R_FPGA0_THR_CNT: + case R_FPGA1_CLK_CTRL ... R_FPGA1_THR_CNT: + case R_FPGA2_CLK_CTRL ... R_FPGA2_THR_CNT: + case R_FPGA3_CLK_CTRL ... R_FPGA3_THR_CNT: + case R_BANDGAP_TRIP: + case R_PLL_PREDIVISOR: + case R_CLK_621_TRUE: + case R_PSS_RST_CTRL ... R_A9_CPU_RST_CTRL: + case R_RS_AWDT_CTRL: + case R_RST_REASON: + case R_REBOOT_STATUS: + case R_APU_CTRL: + case R_WDT_CLK_SEL: + case R_TZ_DMA_NS ... R_TZ_DMA_PERIPH_NS: + case R_DDR_URGENT: + case R_DDR_URGENT_SEL: + case R_MIO ... R_MIO + MIO_LENGTH - 1: + case R_MIO_LOOPBACK ... R_MIO_MST_TRI1: + case R_SD0_WP_CD_SEL: + case R_SD1_WP_CD_SEL: + case R_LVL_SHFTR_EN: + case R_OCM_CFG: + case R_CPU_RAM: + case R_IOU: + case R_DMAC_RAM: + case R_AFI0 ... R_AFI3 + AFI_LENGTH - 1: + case R_OCM: + case R_DEVCI_RAM: + case R_CSG_RAM: + case R_GPIOB_CTRL ... R_GPIOB_CFG_CMOS33: + case R_GPIOB_CFG_HSTL: + case R_GPIOB_DRVR_BIAS_CTRL: + case R_DDRIOB ... R_DDRIOB + DDRIOB_LENGTH - 1: + return true; + default: + return false; + } +} + +static uint64_t zynq_slcr_read(void *opaque, hwaddr offset, + unsigned size) +{ + ZynqSLCRState *s = opaque; + offset /= 4; + uint32_t ret = s->regs[offset]; + + if (!zynq_slcr_check_offset(offset, true)) { + qemu_log_mask(LOG_GUEST_ERROR, "zynq_slcr: Invalid read access to " + " addr %" HWADDR_PRIx "\n", offset * 4); + } + + DB_PRINT("addr: %08" HWADDR_PRIx " data: %08" PRIx32 "\n", offset * 4, ret); + return ret; +} + +static void zynq_slcr_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + ZynqSLCRState *s = (ZynqSLCRState *)opaque; + offset /= 4; + + DB_PRINT("addr: %08" HWADDR_PRIx " data: %08" PRIx64 "\n", offset * 4, val); + + if (!zynq_slcr_check_offset(offset, false)) { + qemu_log_mask(LOG_GUEST_ERROR, "zynq_slcr: Invalid write access to " + "addr %" HWADDR_PRIx "\n", offset * 4); + return; + } + + switch (offset) { + case R_SCL: + s->regs[R_SCL] = val & 0x1; + return; + case R_LOCK: + if ((val & 0xFFFF) == XILINX_LOCK_KEY) { + DB_PRINT("XILINX LOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset, + (unsigned)val & 0xFFFF); + s->regs[R_LOCKSTA] = 1; + } else { + DB_PRINT("WRONG XILINX LOCK KEY 0xF8000000 + 0x%x <= 0x%x\n", + (int)offset, (unsigned)val & 0xFFFF); + } + return; + case R_UNLOCK: + if ((val & 0xFFFF) == XILINX_UNLOCK_KEY) { + DB_PRINT("XILINX UNLOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset, + (unsigned)val & 0xFFFF); + s->regs[R_LOCKSTA] = 0; + } else { + DB_PRINT("WRONG XILINX UNLOCK KEY 0xF8000000 + 0x%x <= 0x%x\n", + (int)offset, (unsigned)val & 0xFFFF); + } + return; + } + + if (s->regs[R_LOCKSTA]) { + qemu_log_mask(LOG_GUEST_ERROR, + "SCLR registers are locked. Unlock them first\n"); + return; + } + s->regs[offset] = val; + + switch (offset) { + case R_PSS_RST_CTRL: + if (FIELD_EX32(val, PSS_RST_CTRL, SOFT_RST)) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + case R_IO_PLL_CTRL: + case R_ARM_PLL_CTRL: + case R_DDR_PLL_CTRL: + case R_UART_CLK_CTRL: + zynq_slcr_compute_clocks(s); + zynq_slcr_propagate_clocks(s); + break; + } +} + +static const MemoryRegionOps slcr_ops = { + .read = zynq_slcr_read, + .write = zynq_slcr_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const ClockPortInitArray zynq_slcr_clocks = { + QDEV_CLOCK_IN(ZynqSLCRState, ps_clk, zynq_slcr_ps_clk_callback, ClockUpdate), + QDEV_CLOCK_OUT(ZynqSLCRState, uart0_ref_clk), + QDEV_CLOCK_OUT(ZynqSLCRState, uart1_ref_clk), + QDEV_CLOCK_END +}; + +static void zynq_slcr_init(Object *obj) +{ + ZynqSLCRState *s = ZYNQ_SLCR(obj); + + memory_region_init_io(&s->iomem, obj, &slcr_ops, s, "slcr", + ZYNQ_SLCR_MMIO_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem); + + qdev_init_clocks(DEVICE(obj), zynq_slcr_clocks); +} + +static const VMStateDescription vmstate_zynq_slcr = { + .name = "zynq_slcr", + .version_id = 3, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, ZynqSLCRState, ZYNQ_SLCR_NUM_REGS), + VMSTATE_CLOCK_V(ps_clk, ZynqSLCRState, 3), + VMSTATE_END_OF_LIST() + } +}; + +static void zynq_slcr_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->vmsd = &vmstate_zynq_slcr; + rc->phases.enter = zynq_slcr_reset_init; + rc->phases.hold = zynq_slcr_reset_hold; + rc->phases.exit = zynq_slcr_reset_exit; +} + +static const TypeInfo zynq_slcr_info = { + .class_init = zynq_slcr_class_init, + .name = TYPE_ZYNQ_SLCR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ZynqSLCRState), + .instance_init = zynq_slcr_init, +}; + +static void zynq_slcr_register_types(void) +{ + type_register_static(&zynq_slcr_info); +} + +type_init(zynq_slcr_register_types) diff --git a/hw/net/Kconfig b/hw/net/Kconfig new file mode 100644 index 000000000..6d795ec75 --- /dev/null +++ b/hw/net/Kconfig @@ -0,0 +1,155 @@ +config DP8393X + bool + +config NE2000_COMMON + bool + +config NE2000_PCI + bool + default y if PCI_DEVICES + depends on PCI + select NE2000_COMMON + +config EEPRO100_PCI + bool + default y if PCI_DEVICES + depends on PCI + select NMC93XX_EEPROM + +config PCNET_PCI + bool + default y if PCI_DEVICES + depends on PCI + select PCNET_COMMON + +config PCNET_COMMON + bool + +config TULIP + bool + default y if PCI_DEVICES + depends on PCI + select NMC93XX_EEPROM + +config I82596_COMMON + bool + +config E1000_PCI + bool + default y if PCI_DEVICES + depends on PCI + +config E1000E_PCI_EXPRESS + bool + default y if PCI_DEVICES + depends on PCI_EXPRESS && MSI_NONBROKEN + +config RTL8139_PCI + bool + default y if PCI_DEVICES + depends on PCI + +config VMXNET3_PCI + bool + default y if PCI_DEVICES + depends on PCI + +config SMC91C111 + bool + +config LAN9118 + bool + select PTIMER + +config NE2000_ISA + bool + default y + depends on ISA_BUS + select NE2000_COMMON + +config OPENCORES_ETH + bool + +config XGMAC + bool + +config MIPSNET + bool + +config ALLWINNER_EMAC + bool + +config ALLWINNER_SUN8I_EMAC + bool + +config IMX_FEC + bool + +config CADENCE + bool + +config STELLARIS_ENET + bool + +config LANCE + bool + select PCNET_COMMON + +config LASI_82596 + bool + select I82596_COMMON + +config SUNHME + bool + +config FTGMAC100 + bool + +config SUNGEM + bool + depends on PCI + +config COLDFIRE + bool + +config XILINX_ETHLITE + bool + +config VIRTIO_NET + bool + default y + depends on VIRTIO + +config ETSEC + bool + select PTIMER + +config ROCKER + bool + default y if PCI_DEVICES + depends on PCI && MSI_NONBROKEN + +config CAN_BUS + bool + +config CAN_SJA1000 + bool + default y if PCI_DEVICES + select CAN_BUS + +config CAN_PCI + bool + default y if PCI_DEVICES + depends on PCI && CAN_SJA1000 + select CAN_BUS + +config CAN_CTUCANFD + bool + default y if PCI_DEVICES + select CAN_BUS + +config CAN_CTUCANFD_PCI + bool + default y if PCI_DEVICES + depends on PCI && CAN_CTUCANFD + select CAN_BUS diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c new file mode 100644 index 000000000..ff611f18f --- /dev/null +++ b/hw/net/allwinner-sun8i-emac.c @@ -0,0 +1,887 @@ +/* + * Allwinner Sun8i Ethernet MAC emulation + * + * Copyright (C) 2019 Niek Linnenbank + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "net/net.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "trace.h" +#include "net/checksum.h" +#include "qemu/module.h" +#include "exec/cpu-common.h" +#include "sysemu/dma.h" +#include "hw/net/allwinner-sun8i-emac.h" + +/* EMAC register offsets */ +enum { + REG_BASIC_CTL_0 = 0x0000, /* Basic Control 0 */ + REG_BASIC_CTL_1 = 0x0004, /* Basic Control 1 */ + REG_INT_STA = 0x0008, /* Interrupt Status */ + REG_INT_EN = 0x000C, /* Interrupt Enable */ + REG_TX_CTL_0 = 0x0010, /* Transmit Control 0 */ + REG_TX_CTL_1 = 0x0014, /* Transmit Control 1 */ + REG_TX_FLOW_CTL = 0x001C, /* Transmit Flow Control */ + REG_TX_DMA_DESC_LIST = 0x0020, /* Transmit Descriptor List Address */ + REG_RX_CTL_0 = 0x0024, /* Receive Control 0 */ + REG_RX_CTL_1 = 0x0028, /* Receive Control 1 */ + REG_RX_DMA_DESC_LIST = 0x0034, /* Receive Descriptor List Address */ + REG_FRM_FLT = 0x0038, /* Receive Frame Filter */ + REG_RX_HASH_0 = 0x0040, /* Receive Hash Table 0 */ + REG_RX_HASH_1 = 0x0044, /* Receive Hash Table 1 */ + REG_MII_CMD = 0x0048, /* Management Interface Command */ + REG_MII_DATA = 0x004C, /* Management Interface Data */ + REG_ADDR_HIGH = 0x0050, /* MAC Address High */ + REG_ADDR_LOW = 0x0054, /* MAC Address Low */ + REG_TX_DMA_STA = 0x00B0, /* Transmit DMA Status */ + REG_TX_CUR_DESC = 0x00B4, /* Transmit Current Descriptor */ + REG_TX_CUR_BUF = 0x00B8, /* Transmit Current Buffer */ + REG_RX_DMA_STA = 0x00C0, /* Receive DMA Status */ + REG_RX_CUR_DESC = 0x00C4, /* Receive Current Descriptor */ + REG_RX_CUR_BUF = 0x00C8, /* Receive Current Buffer */ + REG_RGMII_STA = 0x00D0, /* RGMII Status */ +}; + +/* EMAC register flags */ +enum { + BASIC_CTL0_100Mbps = (0b11 << 2), + BASIC_CTL0_FD = (1 << 0), + BASIC_CTL1_SOFTRST = (1 << 0), +}; + +enum { + INT_STA_RGMII_LINK = (1 << 16), + INT_STA_RX_EARLY = (1 << 13), + INT_STA_RX_OVERFLOW = (1 << 12), + INT_STA_RX_TIMEOUT = (1 << 11), + INT_STA_RX_DMA_STOP = (1 << 10), + INT_STA_RX_BUF_UA = (1 << 9), + INT_STA_RX = (1 << 8), + INT_STA_TX_EARLY = (1 << 5), + INT_STA_TX_UNDERFLOW = (1 << 4), + INT_STA_TX_TIMEOUT = (1 << 3), + INT_STA_TX_BUF_UA = (1 << 2), + INT_STA_TX_DMA_STOP = (1 << 1), + INT_STA_TX = (1 << 0), +}; + +enum { + INT_EN_RX_EARLY = (1 << 13), + INT_EN_RX_OVERFLOW = (1 << 12), + INT_EN_RX_TIMEOUT = (1 << 11), + INT_EN_RX_DMA_STOP = (1 << 10), + INT_EN_RX_BUF_UA = (1 << 9), + INT_EN_RX = (1 << 8), + INT_EN_TX_EARLY = (1 << 5), + INT_EN_TX_UNDERFLOW = (1 << 4), + INT_EN_TX_TIMEOUT = (1 << 3), + INT_EN_TX_BUF_UA = (1 << 2), + INT_EN_TX_DMA_STOP = (1 << 1), + INT_EN_TX = (1 << 0), +}; + +enum { + TX_CTL0_TX_EN = (1 << 31), + TX_CTL1_TX_DMA_START = (1 << 31), + TX_CTL1_TX_DMA_EN = (1 << 30), + TX_CTL1_TX_FLUSH = (1 << 0), +}; + +enum { + RX_CTL0_RX_EN = (1 << 31), + RX_CTL0_STRIP_FCS = (1 << 28), + RX_CTL0_CRC_IPV4 = (1 << 27), +}; + +enum { + RX_CTL1_RX_DMA_START = (1 << 31), + RX_CTL1_RX_DMA_EN = (1 << 30), + RX_CTL1_RX_MD = (1 << 1), +}; + +enum { + RX_FRM_FLT_DIS_ADDR = (1 << 31), +}; + +enum { + MII_CMD_PHY_ADDR_SHIFT = (12), + MII_CMD_PHY_ADDR_MASK = (0xf000), + MII_CMD_PHY_REG_SHIFT = (4), + MII_CMD_PHY_REG_MASK = (0xf0), + MII_CMD_PHY_RW = (1 << 1), + MII_CMD_PHY_BUSY = (1 << 0), +}; + +enum { + TX_DMA_STA_STOP = (0b000), + TX_DMA_STA_RUN_FETCH = (0b001), + TX_DMA_STA_WAIT_STA = (0b010), +}; + +enum { + RX_DMA_STA_STOP = (0b000), + RX_DMA_STA_RUN_FETCH = (0b001), + RX_DMA_STA_WAIT_FRM = (0b011), +}; + +/* EMAC register reset values */ +enum { + REG_BASIC_CTL_1_RST = 0x08000000, +}; + +/* EMAC constants */ +enum { + AW_SUN8I_EMAC_MIN_PKT_SZ = 64 +}; + +/* Transmit/receive frame descriptor */ +typedef struct FrameDescriptor { + uint32_t status; + uint32_t status2; + uint32_t addr; + uint32_t next; +} FrameDescriptor; + +/* Frame descriptor flags */ +enum { + DESC_STATUS_CTL = (1 << 31), + DESC_STATUS2_BUF_SIZE_MASK = (0x7ff), +}; + +/* Transmit frame descriptor flags */ +enum { + TX_DESC_STATUS_LENGTH_ERR = (1 << 14), + TX_DESC_STATUS2_FIRST_DESC = (1 << 29), + TX_DESC_STATUS2_LAST_DESC = (1 << 30), + TX_DESC_STATUS2_CHECKSUM_MASK = (0x3 << 27), +}; + +/* Receive frame descriptor flags */ +enum { + RX_DESC_STATUS_FIRST_DESC = (1 << 9), + RX_DESC_STATUS_LAST_DESC = (1 << 8), + RX_DESC_STATUS_FRM_LEN_MASK = (0x3fff0000), + RX_DESC_STATUS_FRM_LEN_SHIFT = (16), + RX_DESC_STATUS_NO_BUF = (1 << 14), + RX_DESC_STATUS_HEADER_ERR = (1 << 7), + RX_DESC_STATUS_LENGTH_ERR = (1 << 4), + RX_DESC_STATUS_CRC_ERR = (1 << 1), + RX_DESC_STATUS_PAYLOAD_ERR = (1 << 0), + RX_DESC_STATUS2_RX_INT_CTL = (1 << 31), +}; + +/* MII register offsets */ +enum { + MII_REG_CR = (0x0), /* Control */ + MII_REG_ST = (0x1), /* Status */ + MII_REG_ID_HIGH = (0x2), /* Identifier High */ + MII_REG_ID_LOW = (0x3), /* Identifier Low */ + MII_REG_ADV = (0x4), /* Advertised abilities */ + MII_REG_LPA = (0x5), /* Link partner abilities */ +}; + +/* MII register flags */ +enum { + MII_REG_CR_RESET = (1 << 15), + MII_REG_CR_POWERDOWN = (1 << 11), + MII_REG_CR_10Mbit = (0), + MII_REG_CR_100Mbit = (1 << 13), + MII_REG_CR_1000Mbit = (1 << 6), + MII_REG_CR_AUTO_NEG = (1 << 12), + MII_REG_CR_AUTO_NEG_RESTART = (1 << 9), + MII_REG_CR_FULLDUPLEX = (1 << 8), +}; + +enum { + MII_REG_ST_100BASE_T4 = (1 << 15), + MII_REG_ST_100BASE_X_FD = (1 << 14), + MII_REG_ST_100BASE_X_HD = (1 << 13), + MII_REG_ST_10_FD = (1 << 12), + MII_REG_ST_10_HD = (1 << 11), + MII_REG_ST_100BASE_T2_FD = (1 << 10), + MII_REG_ST_100BASE_T2_HD = (1 << 9), + MII_REG_ST_AUTONEG_COMPLETE = (1 << 5), + MII_REG_ST_AUTONEG_AVAIL = (1 << 3), + MII_REG_ST_LINK_UP = (1 << 2), +}; + +enum { + MII_REG_LPA_10_HD = (1 << 5), + MII_REG_LPA_10_FD = (1 << 6), + MII_REG_LPA_100_HD = (1 << 7), + MII_REG_LPA_100_FD = (1 << 8), + MII_REG_LPA_PAUSE = (1 << 10), + MII_REG_LPA_ASYMPAUSE = (1 << 11), +}; + +/* MII constants */ +enum { + MII_PHY_ID_HIGH = 0x0044, + MII_PHY_ID_LOW = 0x1400, +}; + +static void allwinner_sun8i_emac_mii_set_link(AwSun8iEmacState *s, + bool link_active) +{ + if (link_active) { + s->mii_st |= MII_REG_ST_LINK_UP; + } else { + s->mii_st &= ~MII_REG_ST_LINK_UP; + } +} + +static void allwinner_sun8i_emac_mii_reset(AwSun8iEmacState *s, + bool link_active) +{ + s->mii_cr = MII_REG_CR_100Mbit | MII_REG_CR_AUTO_NEG | + MII_REG_CR_FULLDUPLEX; + s->mii_st = MII_REG_ST_100BASE_T4 | MII_REG_ST_100BASE_X_FD | + MII_REG_ST_100BASE_X_HD | MII_REG_ST_10_FD | MII_REG_ST_10_HD | + MII_REG_ST_100BASE_T2_FD | MII_REG_ST_100BASE_T2_HD | + MII_REG_ST_AUTONEG_COMPLETE | MII_REG_ST_AUTONEG_AVAIL; + s->mii_adv = 0; + + allwinner_sun8i_emac_mii_set_link(s, link_active); +} + +static void allwinner_sun8i_emac_mii_cmd(AwSun8iEmacState *s) +{ + uint8_t addr, reg; + + addr = (s->mii_cmd & MII_CMD_PHY_ADDR_MASK) >> MII_CMD_PHY_ADDR_SHIFT; + reg = (s->mii_cmd & MII_CMD_PHY_REG_MASK) >> MII_CMD_PHY_REG_SHIFT; + + if (addr != s->mii_phy_addr) { + return; + } + + /* Read or write a PHY register? */ + if (s->mii_cmd & MII_CMD_PHY_RW) { + trace_allwinner_sun8i_emac_mii_write_reg(reg, s->mii_data); + + switch (reg) { + case MII_REG_CR: + if (s->mii_data & MII_REG_CR_RESET) { + allwinner_sun8i_emac_mii_reset(s, s->mii_st & + MII_REG_ST_LINK_UP); + } else { + s->mii_cr = s->mii_data & ~(MII_REG_CR_RESET | + MII_REG_CR_AUTO_NEG_RESTART); + } + break; + case MII_REG_ADV: + s->mii_adv = s->mii_data; + break; + case MII_REG_ID_HIGH: + case MII_REG_ID_LOW: + case MII_REG_LPA: + break; + default: + qemu_log_mask(LOG_UNIMP, "allwinner-h3-emac: write access to " + "unknown MII register 0x%x\n", reg); + break; + } + } else { + switch (reg) { + case MII_REG_CR: + s->mii_data = s->mii_cr; + break; + case MII_REG_ST: + s->mii_data = s->mii_st; + break; + case MII_REG_ID_HIGH: + s->mii_data = MII_PHY_ID_HIGH; + break; + case MII_REG_ID_LOW: + s->mii_data = MII_PHY_ID_LOW; + break; + case MII_REG_ADV: + s->mii_data = s->mii_adv; + break; + case MII_REG_LPA: + s->mii_data = MII_REG_LPA_10_HD | MII_REG_LPA_10_FD | + MII_REG_LPA_100_HD | MII_REG_LPA_100_FD | + MII_REG_LPA_PAUSE | MII_REG_LPA_ASYMPAUSE; + break; + default: + qemu_log_mask(LOG_UNIMP, "allwinner-h3-emac: read access to " + "unknown MII register 0x%x\n", reg); + s->mii_data = 0; + break; + } + + trace_allwinner_sun8i_emac_mii_read_reg(reg, s->mii_data); + } +} + +static void allwinner_sun8i_emac_update_irq(AwSun8iEmacState *s) +{ + qemu_set_irq(s->irq, (s->int_sta & s->int_en) != 0); +} + +static bool allwinner_sun8i_emac_desc_owned(FrameDescriptor *desc, + size_t min_buf_size) +{ + return (desc->status & DESC_STATUS_CTL) && (min_buf_size == 0 || + (desc->status2 & DESC_STATUS2_BUF_SIZE_MASK) >= min_buf_size); +} + +static void allwinner_sun8i_emac_get_desc(AwSun8iEmacState *s, + FrameDescriptor *desc, + uint32_t phys_addr) +{ + dma_memory_read(&s->dma_as, phys_addr, desc, sizeof(*desc)); +} + +static uint32_t allwinner_sun8i_emac_next_desc(AwSun8iEmacState *s, + FrameDescriptor *desc) +{ + const uint32_t nxt = desc->next; + allwinner_sun8i_emac_get_desc(s, desc, nxt); + return nxt; +} + +static uint32_t allwinner_sun8i_emac_find_desc(AwSun8iEmacState *s, + FrameDescriptor *desc, + uint32_t start_addr, + size_t min_size) +{ + uint32_t desc_addr = start_addr; + + /* Note that the list is a cycle. Last entry points back to the head. */ + while (desc_addr != 0) { + allwinner_sun8i_emac_get_desc(s, desc, desc_addr); + + if (allwinner_sun8i_emac_desc_owned(desc, min_size)) { + return desc_addr; + } else if (desc->next == start_addr) { + break; + } else { + desc_addr = desc->next; + } + } + + return 0; +} + +static uint32_t allwinner_sun8i_emac_rx_desc(AwSun8iEmacState *s, + FrameDescriptor *desc, + size_t min_size) +{ + return allwinner_sun8i_emac_find_desc(s, desc, s->rx_desc_curr, min_size); +} + +static uint32_t allwinner_sun8i_emac_tx_desc(AwSun8iEmacState *s, + FrameDescriptor *desc) +{ + allwinner_sun8i_emac_get_desc(s, desc, s->tx_desc_curr); + return s->tx_desc_curr; +} + +static void allwinner_sun8i_emac_flush_desc(AwSun8iEmacState *s, + FrameDescriptor *desc, + uint32_t phys_addr) +{ + dma_memory_write(&s->dma_as, phys_addr, desc, sizeof(*desc)); +} + +static bool allwinner_sun8i_emac_can_receive(NetClientState *nc) +{ + AwSun8iEmacState *s = qemu_get_nic_opaque(nc); + FrameDescriptor desc; + + return (s->rx_ctl0 & RX_CTL0_RX_EN) && + (allwinner_sun8i_emac_rx_desc(s, &desc, 0) != 0); +} + +static ssize_t allwinner_sun8i_emac_receive(NetClientState *nc, + const uint8_t *buf, + size_t size) +{ + AwSun8iEmacState *s = qemu_get_nic_opaque(nc); + FrameDescriptor desc; + size_t bytes_left = size; + size_t desc_bytes = 0; + size_t pad_fcs_size = 4; + size_t padding = 0; + + if (!(s->rx_ctl0 & RX_CTL0_RX_EN)) { + return -1; + } + + s->rx_desc_curr = allwinner_sun8i_emac_rx_desc(s, &desc, + AW_SUN8I_EMAC_MIN_PKT_SZ); + if (!s->rx_desc_curr) { + s->int_sta |= INT_STA_RX_BUF_UA; + } + + /* Keep filling RX descriptors until the whole frame is written */ + while (s->rx_desc_curr && bytes_left > 0) { + desc.status &= ~DESC_STATUS_CTL; + desc.status &= ~RX_DESC_STATUS_FRM_LEN_MASK; + + if (bytes_left == size) { + desc.status |= RX_DESC_STATUS_FIRST_DESC; + } + + if ((desc.status2 & DESC_STATUS2_BUF_SIZE_MASK) < + (bytes_left + pad_fcs_size)) { + desc_bytes = desc.status2 & DESC_STATUS2_BUF_SIZE_MASK; + desc.status |= desc_bytes << RX_DESC_STATUS_FRM_LEN_SHIFT; + } else { + padding = pad_fcs_size; + if (bytes_left < AW_SUN8I_EMAC_MIN_PKT_SZ) { + padding += (AW_SUN8I_EMAC_MIN_PKT_SZ - bytes_left); + } + + desc_bytes = (bytes_left); + desc.status |= RX_DESC_STATUS_LAST_DESC; + desc.status |= (bytes_left + padding) + << RX_DESC_STATUS_FRM_LEN_SHIFT; + } + + dma_memory_write(&s->dma_as, desc.addr, buf, desc_bytes); + allwinner_sun8i_emac_flush_desc(s, &desc, s->rx_desc_curr); + trace_allwinner_sun8i_emac_receive(s->rx_desc_curr, desc.addr, + desc_bytes); + + /* Check if frame needs to raise the receive interrupt */ + if (!(desc.status2 & RX_DESC_STATUS2_RX_INT_CTL)) { + s->int_sta |= INT_STA_RX; + } + + /* Increment variables */ + buf += desc_bytes; + bytes_left -= desc_bytes; + + /* Move to the next descriptor */ + s->rx_desc_curr = allwinner_sun8i_emac_find_desc(s, &desc, desc.next, + AW_SUN8I_EMAC_MIN_PKT_SZ); + if (!s->rx_desc_curr) { + /* Not enough buffer space available */ + s->int_sta |= INT_STA_RX_BUF_UA; + s->rx_desc_curr = s->rx_desc_head; + break; + } + } + + /* Report receive DMA is finished */ + s->rx_ctl1 &= ~RX_CTL1_RX_DMA_START; + allwinner_sun8i_emac_update_irq(s); + + return size; +} + +static void allwinner_sun8i_emac_transmit(AwSun8iEmacState *s) +{ + NetClientState *nc = qemu_get_queue(s->nic); + FrameDescriptor desc; + size_t bytes = 0; + size_t packet_bytes = 0; + size_t transmitted = 0; + static uint8_t packet_buf[2048]; + + s->tx_desc_curr = allwinner_sun8i_emac_tx_desc(s, &desc); + + /* Read all transmit descriptors */ + while (allwinner_sun8i_emac_desc_owned(&desc, 0)) { + + /* Read from physical memory into packet buffer */ + bytes = desc.status2 & DESC_STATUS2_BUF_SIZE_MASK; + if (bytes + packet_bytes > sizeof(packet_buf)) { + desc.status |= TX_DESC_STATUS_LENGTH_ERR; + break; + } + dma_memory_read(&s->dma_as, desc.addr, packet_buf + packet_bytes, bytes); + packet_bytes += bytes; + desc.status &= ~DESC_STATUS_CTL; + allwinner_sun8i_emac_flush_desc(s, &desc, s->tx_desc_curr); + + /* After the last descriptor, send the packet */ + if (desc.status2 & TX_DESC_STATUS2_LAST_DESC) { + if (desc.status2 & TX_DESC_STATUS2_CHECKSUM_MASK) { + net_checksum_calculate(packet_buf, packet_bytes, CSUM_ALL); + } + + qemu_send_packet(nc, packet_buf, packet_bytes); + trace_allwinner_sun8i_emac_transmit(s->tx_desc_curr, desc.addr, + bytes); + + packet_bytes = 0; + transmitted++; + } + s->tx_desc_curr = allwinner_sun8i_emac_next_desc(s, &desc); + } + + /* Raise transmit completed interrupt */ + if (transmitted > 0) { + s->int_sta |= INT_STA_TX; + s->tx_ctl1 &= ~TX_CTL1_TX_DMA_START; + allwinner_sun8i_emac_update_irq(s); + } +} + +static void allwinner_sun8i_emac_reset(DeviceState *dev) +{ + AwSun8iEmacState *s = AW_SUN8I_EMAC(dev); + NetClientState *nc = qemu_get_queue(s->nic); + + trace_allwinner_sun8i_emac_reset(); + + s->mii_cmd = 0; + s->mii_data = 0; + s->basic_ctl0 = 0; + s->basic_ctl1 = REG_BASIC_CTL_1_RST; + s->int_en = 0; + s->int_sta = 0; + s->frm_flt = 0; + s->rx_ctl0 = 0; + s->rx_ctl1 = RX_CTL1_RX_MD; + s->rx_desc_head = 0; + s->rx_desc_curr = 0; + s->tx_ctl0 = 0; + s->tx_ctl1 = 0; + s->tx_desc_head = 0; + s->tx_desc_curr = 0; + s->tx_flowctl = 0; + + allwinner_sun8i_emac_mii_reset(s, !nc->link_down); +} + +static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, + unsigned size) +{ + AwSun8iEmacState *s = AW_SUN8I_EMAC(opaque); + uint64_t value = 0; + FrameDescriptor desc; + + switch (offset) { + case REG_BASIC_CTL_0: /* Basic Control 0 */ + value = s->basic_ctl0; + break; + case REG_BASIC_CTL_1: /* Basic Control 1 */ + value = s->basic_ctl1; + break; + case REG_INT_STA: /* Interrupt Status */ + value = s->int_sta; + break; + case REG_INT_EN: /* Interrupt Enable */ + value = s->int_en; + break; + case REG_TX_CTL_0: /* Transmit Control 0 */ + value = s->tx_ctl0; + break; + case REG_TX_CTL_1: /* Transmit Control 1 */ + value = s->tx_ctl1; + break; + case REG_TX_FLOW_CTL: /* Transmit Flow Control */ + value = s->tx_flowctl; + break; + case REG_TX_DMA_DESC_LIST: /* Transmit Descriptor List Address */ + value = s->tx_desc_head; + break; + case REG_RX_CTL_0: /* Receive Control 0 */ + value = s->rx_ctl0; + break; + case REG_RX_CTL_1: /* Receive Control 1 */ + value = s->rx_ctl1; + break; + case REG_RX_DMA_DESC_LIST: /* Receive Descriptor List Address */ + value = s->rx_desc_head; + break; + case REG_FRM_FLT: /* Receive Frame Filter */ + value = s->frm_flt; + break; + case REG_RX_HASH_0: /* Receive Hash Table 0 */ + case REG_RX_HASH_1: /* Receive Hash Table 1 */ + break; + case REG_MII_CMD: /* Management Interface Command */ + value = s->mii_cmd; + break; + case REG_MII_DATA: /* Management Interface Data */ + value = s->mii_data; + break; + case REG_ADDR_HIGH: /* MAC Address High */ + value = lduw_le_p(s->conf.macaddr.a + 4); + break; + case REG_ADDR_LOW: /* MAC Address Low */ + value = ldl_le_p(s->conf.macaddr.a); + break; + case REG_TX_DMA_STA: /* Transmit DMA Status */ + break; + case REG_TX_CUR_DESC: /* Transmit Current Descriptor */ + value = s->tx_desc_curr; + break; + case REG_TX_CUR_BUF: /* Transmit Current Buffer */ + if (s->tx_desc_curr != 0) { + dma_memory_read(&s->dma_as, s->tx_desc_curr, &desc, sizeof(desc)); + value = desc.addr; + } else { + value = 0; + } + break; + case REG_RX_DMA_STA: /* Receive DMA Status */ + break; + case REG_RX_CUR_DESC: /* Receive Current Descriptor */ + value = s->rx_desc_curr; + break; + case REG_RX_CUR_BUF: /* Receive Current Buffer */ + if (s->rx_desc_curr != 0) { + dma_memory_read(&s->dma_as, s->rx_desc_curr, &desc, sizeof(desc)); + value = desc.addr; + } else { + value = 0; + } + break; + case REG_RGMII_STA: /* RGMII Status */ + break; + default: + qemu_log_mask(LOG_UNIMP, "allwinner-h3-emac: read access to unknown " + "EMAC register 0x" TARGET_FMT_plx "\n", + offset); + } + + trace_allwinner_sun8i_emac_read(offset, value); + return value; +} + +static void allwinner_sun8i_emac_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + AwSun8iEmacState *s = AW_SUN8I_EMAC(opaque); + NetClientState *nc = qemu_get_queue(s->nic); + + trace_allwinner_sun8i_emac_write(offset, value); + + switch (offset) { + case REG_BASIC_CTL_0: /* Basic Control 0 */ + s->basic_ctl0 = value; + break; + case REG_BASIC_CTL_1: /* Basic Control 1 */ + if (value & BASIC_CTL1_SOFTRST) { + allwinner_sun8i_emac_reset(DEVICE(s)); + value &= ~BASIC_CTL1_SOFTRST; + } + s->basic_ctl1 = value; + if (allwinner_sun8i_emac_can_receive(nc)) { + qemu_flush_queued_packets(nc); + } + break; + case REG_INT_STA: /* Interrupt Status */ + s->int_sta &= ~value; + allwinner_sun8i_emac_update_irq(s); + break; + case REG_INT_EN: /* Interrupt Enable */ + s->int_en = value; + allwinner_sun8i_emac_update_irq(s); + break; + case REG_TX_CTL_0: /* Transmit Control 0 */ + s->tx_ctl0 = value; + break; + case REG_TX_CTL_1: /* Transmit Control 1 */ + s->tx_ctl1 = value; + if (value & TX_CTL1_TX_DMA_EN) { + allwinner_sun8i_emac_transmit(s); + } + break; + case REG_TX_FLOW_CTL: /* Transmit Flow Control */ + s->tx_flowctl = value; + break; + case REG_TX_DMA_DESC_LIST: /* Transmit Descriptor List Address */ + s->tx_desc_head = value; + s->tx_desc_curr = value; + break; + case REG_RX_CTL_0: /* Receive Control 0 */ + s->rx_ctl0 = value; + break; + case REG_RX_CTL_1: /* Receive Control 1 */ + s->rx_ctl1 = value | RX_CTL1_RX_MD; + if ((value & RX_CTL1_RX_DMA_EN) && + allwinner_sun8i_emac_can_receive(nc)) { + qemu_flush_queued_packets(nc); + } + break; + case REG_RX_DMA_DESC_LIST: /* Receive Descriptor List Address */ + s->rx_desc_head = value; + s->rx_desc_curr = value; + break; + case REG_FRM_FLT: /* Receive Frame Filter */ + s->frm_flt = value; + break; + case REG_RX_HASH_0: /* Receive Hash Table 0 */ + case REG_RX_HASH_1: /* Receive Hash Table 1 */ + break; + case REG_MII_CMD: /* Management Interface Command */ + s->mii_cmd = value & ~MII_CMD_PHY_BUSY; + allwinner_sun8i_emac_mii_cmd(s); + break; + case REG_MII_DATA: /* Management Interface Data */ + s->mii_data = value; + break; + case REG_ADDR_HIGH: /* MAC Address High */ + stw_le_p(s->conf.macaddr.a + 4, value); + break; + case REG_ADDR_LOW: /* MAC Address Low */ + stl_le_p(s->conf.macaddr.a, value); + break; + case REG_TX_DMA_STA: /* Transmit DMA Status */ + case REG_TX_CUR_DESC: /* Transmit Current Descriptor */ + case REG_TX_CUR_BUF: /* Transmit Current Buffer */ + case REG_RX_DMA_STA: /* Receive DMA Status */ + case REG_RX_CUR_DESC: /* Receive Current Descriptor */ + case REG_RX_CUR_BUF: /* Receive Current Buffer */ + case REG_RGMII_STA: /* RGMII Status */ + break; + default: + qemu_log_mask(LOG_UNIMP, "allwinner-h3-emac: write access to unknown " + "EMAC register 0x" TARGET_FMT_plx "\n", + offset); + } +} + +static void allwinner_sun8i_emac_set_link(NetClientState *nc) +{ + AwSun8iEmacState *s = qemu_get_nic_opaque(nc); + + trace_allwinner_sun8i_emac_set_link(!nc->link_down); + allwinner_sun8i_emac_mii_set_link(s, !nc->link_down); +} + +static const MemoryRegionOps allwinner_sun8i_emac_mem_ops = { + .read = allwinner_sun8i_emac_read, + .write = allwinner_sun8i_emac_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static NetClientInfo net_allwinner_sun8i_emac_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = allwinner_sun8i_emac_can_receive, + .receive = allwinner_sun8i_emac_receive, + .link_status_changed = allwinner_sun8i_emac_set_link, +}; + +static void allwinner_sun8i_emac_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwSun8iEmacState *s = AW_SUN8I_EMAC(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_sun8i_emac_mem_ops, + s, TYPE_AW_SUN8I_EMAC, 64 * KiB); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); +} + +static void allwinner_sun8i_emac_realize(DeviceState *dev, Error **errp) +{ + AwSun8iEmacState *s = AW_SUN8I_EMAC(dev); + + if (!s->dma_mr) { + error_setg(errp, TYPE_AW_SUN8I_EMAC " 'dma-memory' link not set"); + return; + } + + address_space_init(&s->dma_as, s->dma_mr, "emac-dma"); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_allwinner_sun8i_emac_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static Property allwinner_sun8i_emac_properties[] = { + DEFINE_NIC_PROPERTIES(AwSun8iEmacState, conf), + DEFINE_PROP_UINT8("phy-addr", AwSun8iEmacState, mii_phy_addr, 0), + DEFINE_PROP_LINK("dma-memory", AwSun8iEmacState, dma_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static int allwinner_sun8i_emac_post_load(void *opaque, int version_id) +{ + AwSun8iEmacState *s = opaque; + + allwinner_sun8i_emac_set_link(qemu_get_queue(s->nic)); + + return 0; +} + +static const VMStateDescription vmstate_aw_emac = { + .name = "allwinner-sun8i-emac", + .version_id = 1, + .minimum_version_id = 1, + .post_load = allwinner_sun8i_emac_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(mii_phy_addr, AwSun8iEmacState), + VMSTATE_UINT32(mii_cmd, AwSun8iEmacState), + VMSTATE_UINT32(mii_data, AwSun8iEmacState), + VMSTATE_UINT32(mii_cr, AwSun8iEmacState), + VMSTATE_UINT32(mii_st, AwSun8iEmacState), + VMSTATE_UINT32(mii_adv, AwSun8iEmacState), + VMSTATE_UINT32(basic_ctl0, AwSun8iEmacState), + VMSTATE_UINT32(basic_ctl1, AwSun8iEmacState), + VMSTATE_UINT32(int_en, AwSun8iEmacState), + VMSTATE_UINT32(int_sta, AwSun8iEmacState), + VMSTATE_UINT32(frm_flt, AwSun8iEmacState), + VMSTATE_UINT32(rx_ctl0, AwSun8iEmacState), + VMSTATE_UINT32(rx_ctl1, AwSun8iEmacState), + VMSTATE_UINT32(rx_desc_head, AwSun8iEmacState), + VMSTATE_UINT32(rx_desc_curr, AwSun8iEmacState), + VMSTATE_UINT32(tx_ctl0, AwSun8iEmacState), + VMSTATE_UINT32(tx_ctl1, AwSun8iEmacState), + VMSTATE_UINT32(tx_desc_head, AwSun8iEmacState), + VMSTATE_UINT32(tx_desc_curr, AwSun8iEmacState), + VMSTATE_UINT32(tx_flowctl, AwSun8iEmacState), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_sun8i_emac_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = allwinner_sun8i_emac_realize; + dc->reset = allwinner_sun8i_emac_reset; + dc->vmsd = &vmstate_aw_emac; + device_class_set_props(dc, allwinner_sun8i_emac_properties); +} + +static const TypeInfo allwinner_sun8i_emac_info = { + .name = TYPE_AW_SUN8I_EMAC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AwSun8iEmacState), + .instance_init = allwinner_sun8i_emac_init, + .class_init = allwinner_sun8i_emac_class_init, +}; + +static void allwinner_sun8i_emac_register_types(void) +{ + type_register_static(&allwinner_sun8i_emac_info); +} + +type_init(allwinner_sun8i_emac_register_types) diff --git a/hw/net/allwinner_emac.c b/hw/net/allwinner_emac.c new file mode 100644 index 000000000..ddddf35c4 --- /dev/null +++ b/hw/net/allwinner_emac.c @@ -0,0 +1,540 @@ +/* + * Emulation of Allwinner EMAC Fast Ethernet controller and + * Realtek RTL8201CP PHY + * + * Copyright (C) 2014 Beniamino Galvani + * + * This model is based on reverse-engineering of Linux kernel driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "net/net.h" +#include "qemu/fifo8.h" +#include "hw/irq.h" +#include "hw/net/allwinner_emac.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include + +static uint8_t padding[60]; + +static void mii_set_link(RTL8201CPState *mii, bool link_ok) +{ + if (link_ok) { + mii->bmsr |= MII_BMSR_LINK_ST | MII_BMSR_AN_COMP; + mii->anlpar |= MII_ANAR_TXFD | MII_ANAR_10FD | MII_ANAR_10 | + MII_ANAR_CSMACD; + } else { + mii->bmsr &= ~(MII_BMSR_LINK_ST | MII_BMSR_AN_COMP); + mii->anlpar = MII_ANAR_TX; + } +} + +static void mii_reset(RTL8201CPState *mii, bool link_ok) +{ + mii->bmcr = MII_BMCR_FD | MII_BMCR_AUTOEN | MII_BMCR_SPEED; + mii->bmsr = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD | + MII_BMSR_10T_HD | MII_BMSR_MFPS | MII_BMSR_AUTONEG; + mii->anar = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD | MII_ANAR_10 | + MII_ANAR_CSMACD; + mii->anlpar = MII_ANAR_TX; + + mii_set_link(mii, link_ok); +} + +static uint16_t RTL8201CP_mdio_read(AwEmacState *s, uint8_t addr, uint8_t reg) +{ + RTL8201CPState *mii = &s->mii; + uint16_t ret = 0xffff; + + if (addr == s->phy_addr) { + switch (reg) { + case MII_BMCR: + return mii->bmcr; + case MII_BMSR: + return mii->bmsr; + case MII_PHYID1: + return RTL8201CP_PHYID1; + case MII_PHYID2: + return RTL8201CP_PHYID2; + case MII_ANAR: + return mii->anar; + case MII_ANLPAR: + return mii->anlpar; + case MII_ANER: + case MII_NSR: + case MII_LBREMR: + case MII_REC: + case MII_SNRDR: + case MII_TEST: + qemu_log_mask(LOG_UNIMP, + "allwinner_emac: read from unimpl. mii reg 0x%x\n", + reg); + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "allwinner_emac: read from invalid mii reg 0x%x\n", + reg); + return 0; + } + } + return ret; +} + +static void RTL8201CP_mdio_write(AwEmacState *s, uint8_t addr, uint8_t reg, + uint16_t value) +{ + RTL8201CPState *mii = &s->mii; + NetClientState *nc; + + if (addr == s->phy_addr) { + switch (reg) { + case MII_BMCR: + if (value & MII_BMCR_RESET) { + nc = qemu_get_queue(s->nic); + mii_reset(mii, !nc->link_down); + } else { + mii->bmcr = value; + } + break; + case MII_ANAR: + mii->anar = value; + break; + case MII_BMSR: + case MII_PHYID1: + case MII_PHYID2: + case MII_ANLPAR: + case MII_ANER: + qemu_log_mask(LOG_GUEST_ERROR, + "allwinner_emac: write to read-only mii reg 0x%x\n", + reg); + break; + case MII_NSR: + case MII_LBREMR: + case MII_REC: + case MII_SNRDR: + case MII_TEST: + qemu_log_mask(LOG_UNIMP, + "allwinner_emac: write to unimpl. mii reg 0x%x\n", + reg); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "allwinner_emac: write to invalid mii reg 0x%x\n", + reg); + } + } +} + +static void aw_emac_update_irq(AwEmacState *s) +{ + qemu_set_irq(s->irq, (s->int_sta & s->int_ctl) != 0); +} + +static void aw_emac_tx_reset(AwEmacState *s, int chan) +{ + fifo8_reset(&s->tx_fifo[chan]); + s->tx_length[chan] = 0; +} + +static void aw_emac_rx_reset(AwEmacState *s) +{ + fifo8_reset(&s->rx_fifo); + s->rx_num_packets = 0; + s->rx_packet_size = 0; + s->rx_packet_pos = 0; +} + +static void fifo8_push_word(Fifo8 *fifo, uint32_t val) +{ + fifo8_push(fifo, val); + fifo8_push(fifo, val >> 8); + fifo8_push(fifo, val >> 16); + fifo8_push(fifo, val >> 24); +} + +static uint32_t fifo8_pop_word(Fifo8 *fifo) +{ + uint32_t ret; + + ret = fifo8_pop(fifo); + ret |= fifo8_pop(fifo) << 8; + ret |= fifo8_pop(fifo) << 16; + ret |= fifo8_pop(fifo) << 24; + + return ret; +} + +static bool aw_emac_can_receive(NetClientState *nc) +{ + AwEmacState *s = qemu_get_nic_opaque(nc); + + /* + * To avoid packet drops, allow reception only when there is space + * for a full frame: 1522 + 8 (rx headers) + 2 (padding). + */ + return (s->ctl & EMAC_CTL_RX_EN) && (fifo8_num_free(&s->rx_fifo) >= 1532); +} + +static ssize_t aw_emac_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + AwEmacState *s = qemu_get_nic_opaque(nc); + Fifo8 *fifo = &s->rx_fifo; + size_t padded_size, total_size; + uint32_t crc; + + padded_size = size > 60 ? size : 60; + total_size = QEMU_ALIGN_UP(RX_HDR_SIZE + padded_size + CRC_SIZE, 4); + + if (!(s->ctl & EMAC_CTL_RX_EN) || (fifo8_num_free(fifo) < total_size)) { + return -1; + } + + fifo8_push_word(fifo, EMAC_UNDOCUMENTED_MAGIC); + fifo8_push_word(fifo, EMAC_RX_HEADER(padded_size + CRC_SIZE, + EMAC_RX_IO_DATA_STATUS_OK)); + fifo8_push_all(fifo, buf, size); + crc = crc32(~0, buf, size); + + if (padded_size != size) { + fifo8_push_all(fifo, padding, padded_size - size); + crc = crc32(crc, padding, padded_size - size); + } + + fifo8_push_word(fifo, crc); + fifo8_push_all(fifo, padding, QEMU_ALIGN_UP(padded_size, 4) - padded_size); + s->rx_num_packets++; + + s->int_sta |= EMAC_INT_RX; + aw_emac_update_irq(s); + + return size; +} + +static void aw_emac_reset(DeviceState *dev) +{ + AwEmacState *s = AW_EMAC(dev); + NetClientState *nc = qemu_get_queue(s->nic); + + s->ctl = 0; + s->tx_mode = 0; + s->int_ctl = 0; + s->int_sta = 0; + s->tx_channel = 0; + s->phy_target = 0; + + aw_emac_tx_reset(s, 0); + aw_emac_tx_reset(s, 1); + aw_emac_rx_reset(s); + + mii_reset(&s->mii, !nc->link_down); +} + +static uint64_t aw_emac_read(void *opaque, hwaddr offset, unsigned size) +{ + AwEmacState *s = opaque; + Fifo8 *fifo = &s->rx_fifo; + NetClientState *nc; + uint64_t ret; + + switch (offset) { + case EMAC_CTL_REG: + return s->ctl; + case EMAC_TX_MODE_REG: + return s->tx_mode; + case EMAC_TX_INS_REG: + return s->tx_channel; + case EMAC_RX_CTL_REG: + return s->rx_ctl; + case EMAC_RX_IO_DATA_REG: + if (!s->rx_num_packets) { + qemu_log_mask(LOG_GUEST_ERROR, + "Read IO data register when no packet available"); + return 0; + } + + ret = fifo8_pop_word(fifo); + + switch (s->rx_packet_pos) { + case 0: /* Word is magic header */ + s->rx_packet_pos += 4; + break; + case 4: /* Word is rx info header */ + s->rx_packet_pos += 4; + s->rx_packet_size = QEMU_ALIGN_UP(extract32(ret, 0, 16), 4); + break; + default: /* Word is packet data */ + s->rx_packet_pos += 4; + s->rx_packet_size -= 4; + + if (!s->rx_packet_size) { + s->rx_packet_pos = 0; + s->rx_num_packets--; + nc = qemu_get_queue(s->nic); + if (aw_emac_can_receive(nc)) { + qemu_flush_queued_packets(nc); + } + } + } + return ret; + case EMAC_RX_FBC_REG: + return s->rx_num_packets; + case EMAC_INT_CTL_REG: + return s->int_ctl; + case EMAC_INT_STA_REG: + return s->int_sta; + case EMAC_MAC_MRDD_REG: + return RTL8201CP_mdio_read(s, + extract32(s->phy_target, PHY_ADDR_SHIFT, 8), + extract32(s->phy_target, PHY_REG_SHIFT, 8)); + default: + qemu_log_mask(LOG_UNIMP, + "allwinner_emac: read access to unknown register 0x" + TARGET_FMT_plx "\n", offset); + ret = 0; + } + + return ret; +} + +static void aw_emac_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + AwEmacState *s = opaque; + Fifo8 *fifo; + NetClientState *nc = qemu_get_queue(s->nic); + int chan; + + switch (offset) { + case EMAC_CTL_REG: + if (value & EMAC_CTL_RESET) { + aw_emac_reset(DEVICE(s)); + value &= ~EMAC_CTL_RESET; + } + s->ctl = value; + if (aw_emac_can_receive(nc)) { + qemu_flush_queued_packets(nc); + } + break; + case EMAC_TX_MODE_REG: + s->tx_mode = value; + break; + case EMAC_TX_CTL0_REG: + case EMAC_TX_CTL1_REG: + chan = (offset == EMAC_TX_CTL0_REG ? 0 : 1); + if ((value & 1) && (s->ctl & EMAC_CTL_TX_EN)) { + uint32_t len, ret; + const uint8_t *data; + + fifo = &s->tx_fifo[chan]; + len = s->tx_length[chan]; + + if (len > fifo8_num_used(fifo)) { + len = fifo8_num_used(fifo); + qemu_log_mask(LOG_GUEST_ERROR, + "allwinner_emac: TX length > fifo data length\n"); + } + if (len > 0) { + data = fifo8_pop_buf(fifo, len, &ret); + qemu_send_packet(nc, data, ret); + aw_emac_tx_reset(s, chan); + /* Raise TX interrupt */ + s->int_sta |= EMAC_INT_TX_CHAN(chan); + aw_emac_update_irq(s); + } + } + break; + case EMAC_TX_INS_REG: + s->tx_channel = value < NUM_TX_FIFOS ? value : 0; + break; + case EMAC_TX_PL0_REG: + case EMAC_TX_PL1_REG: + chan = (offset == EMAC_TX_PL0_REG ? 0 : 1); + if (value > TX_FIFO_SIZE) { + qemu_log_mask(LOG_GUEST_ERROR, + "allwinner_emac: invalid TX frame length %d\n", + (int)value); + value = TX_FIFO_SIZE; + } + s->tx_length[chan] = value; + break; + case EMAC_TX_IO_DATA_REG: + fifo = &s->tx_fifo[s->tx_channel]; + if (fifo8_num_free(fifo) < 4) { + qemu_log_mask(LOG_GUEST_ERROR, + "allwinner_emac: TX data overruns fifo\n"); + break; + } + fifo8_push_word(fifo, value); + break; + case EMAC_RX_CTL_REG: + s->rx_ctl = value; + break; + case EMAC_RX_FBC_REG: + if (value == 0) { + aw_emac_rx_reset(s); + } + break; + case EMAC_INT_CTL_REG: + s->int_ctl = value; + aw_emac_update_irq(s); + break; + case EMAC_INT_STA_REG: + s->int_sta &= ~value; + aw_emac_update_irq(s); + break; + case EMAC_MAC_MADR_REG: + s->phy_target = value; + break; + case EMAC_MAC_MWTD_REG: + RTL8201CP_mdio_write(s, extract32(s->phy_target, PHY_ADDR_SHIFT, 8), + extract32(s->phy_target, PHY_REG_SHIFT, 8), value); + break; + default: + qemu_log_mask(LOG_UNIMP, + "allwinner_emac: write access to unknown register 0x" + TARGET_FMT_plx "\n", offset); + } +} + +static void aw_emac_set_link(NetClientState *nc) +{ + AwEmacState *s = qemu_get_nic_opaque(nc); + + mii_set_link(&s->mii, !nc->link_down); +} + +static const MemoryRegionOps aw_emac_mem_ops = { + .read = aw_emac_read, + .write = aw_emac_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static NetClientInfo net_aw_emac_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = aw_emac_can_receive, + .receive = aw_emac_receive, + .link_status_changed = aw_emac_set_link, +}; + +static void aw_emac_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwEmacState *s = AW_EMAC(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &aw_emac_mem_ops, s, + "aw_emac", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); +} + +static void aw_emac_realize(DeviceState *dev, Error **errp) +{ + AwEmacState *s = AW_EMAC(dev); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_aw_emac_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + + fifo8_create(&s->rx_fifo, RX_FIFO_SIZE); + fifo8_create(&s->tx_fifo[0], TX_FIFO_SIZE); + fifo8_create(&s->tx_fifo[1], TX_FIFO_SIZE); +} + +static Property aw_emac_properties[] = { + DEFINE_NIC_PROPERTIES(AwEmacState, conf), + DEFINE_PROP_UINT8("phy-addr", AwEmacState, phy_addr, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_mii = { + .name = "rtl8201cp", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(bmcr, RTL8201CPState), + VMSTATE_UINT16(bmsr, RTL8201CPState), + VMSTATE_UINT16(anar, RTL8201CPState), + VMSTATE_UINT16(anlpar, RTL8201CPState), + VMSTATE_END_OF_LIST() + } +}; + +static int aw_emac_post_load(void *opaque, int version_id) +{ + AwEmacState *s = opaque; + + aw_emac_set_link(qemu_get_queue(s->nic)); + + return 0; +} + +static const VMStateDescription vmstate_aw_emac = { + .name = "allwinner_emac", + .version_id = 1, + .minimum_version_id = 1, + .post_load = aw_emac_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(mii, AwEmacState, 1, vmstate_mii, RTL8201CPState), + VMSTATE_UINT32(ctl, AwEmacState), + VMSTATE_UINT32(tx_mode, AwEmacState), + VMSTATE_UINT32(rx_ctl, AwEmacState), + VMSTATE_UINT32(int_ctl, AwEmacState), + VMSTATE_UINT32(int_sta, AwEmacState), + VMSTATE_UINT32(phy_target, AwEmacState), + VMSTATE_FIFO8(rx_fifo, AwEmacState), + VMSTATE_UINT32(rx_num_packets, AwEmacState), + VMSTATE_UINT32(rx_packet_size, AwEmacState), + VMSTATE_UINT32(rx_packet_pos, AwEmacState), + VMSTATE_STRUCT_ARRAY(tx_fifo, AwEmacState, NUM_TX_FIFOS, 1, + vmstate_fifo8, Fifo8), + VMSTATE_UINT32_ARRAY(tx_length, AwEmacState, NUM_TX_FIFOS), + VMSTATE_UINT32(tx_channel, AwEmacState), + VMSTATE_END_OF_LIST() + } +}; + +static void aw_emac_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aw_emac_realize; + device_class_set_props(dc, aw_emac_properties); + dc->reset = aw_emac_reset; + dc->vmsd = &vmstate_aw_emac; +} + +static const TypeInfo aw_emac_info = { + .name = TYPE_AW_EMAC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AwEmacState), + .instance_init = aw_emac_init, + .class_init = aw_emac_class_init, +}; + +static void aw_emac_register_types(void) +{ + type_register_static(&aw_emac_info); +} + +type_init(aw_emac_register_types) diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c new file mode 100644 index 000000000..24b3a0ff6 --- /dev/null +++ b/hw/net/cadence_gem.c @@ -0,0 +1,1720 @@ +/* + * QEMU Cadence GEM emulation + * + * Copyright (c) 2011 Xilinx, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include /* For crc32 */ + +#include "hw/irq.h" +#include "hw/net/cadence_gem.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "sysemu/dma.h" +#include "net/checksum.h" +#include "net/eth.h" + +#define CADENCE_GEM_ERR_DEBUG 0 +#define DB_PRINT(...) do {\ + if (CADENCE_GEM_ERR_DEBUG) { \ + qemu_log(": %s: ", __func__); \ + qemu_log(__VA_ARGS__); \ + } \ +} while (0) + +#define GEM_NWCTRL (0x00000000 / 4) /* Network Control reg */ +#define GEM_NWCFG (0x00000004 / 4) /* Network Config reg */ +#define GEM_NWSTATUS (0x00000008 / 4) /* Network Status reg */ +#define GEM_USERIO (0x0000000C / 4) /* User IO reg */ +#define GEM_DMACFG (0x00000010 / 4) /* DMA Control reg */ +#define GEM_TXSTATUS (0x00000014 / 4) /* TX Status reg */ +#define GEM_RXQBASE (0x00000018 / 4) /* RX Q Base address reg */ +#define GEM_TXQBASE (0x0000001C / 4) /* TX Q Base address reg */ +#define GEM_RXSTATUS (0x00000020 / 4) /* RX Status reg */ +#define GEM_ISR (0x00000024 / 4) /* Interrupt Status reg */ +#define GEM_IER (0x00000028 / 4) /* Interrupt Enable reg */ +#define GEM_IDR (0x0000002C / 4) /* Interrupt Disable reg */ +#define GEM_IMR (0x00000030 / 4) /* Interrupt Mask reg */ +#define GEM_PHYMNTNC (0x00000034 / 4) /* Phy Maintenance reg */ +#define GEM_RXPAUSE (0x00000038 / 4) /* RX Pause Time reg */ +#define GEM_TXPAUSE (0x0000003C / 4) /* TX Pause Time reg */ +#define GEM_TXPARTIALSF (0x00000040 / 4) /* TX Partial Store and Forward */ +#define GEM_RXPARTIALSF (0x00000044 / 4) /* RX Partial Store and Forward */ +#define GEM_JUMBO_MAX_LEN (0x00000048 / 4) /* Max Jumbo Frame Size */ +#define GEM_HASHLO (0x00000080 / 4) /* Hash Low address reg */ +#define GEM_HASHHI (0x00000084 / 4) /* Hash High address reg */ +#define GEM_SPADDR1LO (0x00000088 / 4) /* Specific addr 1 low reg */ +#define GEM_SPADDR1HI (0x0000008C / 4) /* Specific addr 1 high reg */ +#define GEM_SPADDR2LO (0x00000090 / 4) /* Specific addr 2 low reg */ +#define GEM_SPADDR2HI (0x00000094 / 4) /* Specific addr 2 high reg */ +#define GEM_SPADDR3LO (0x00000098 / 4) /* Specific addr 3 low reg */ +#define GEM_SPADDR3HI (0x0000009C / 4) /* Specific addr 3 high reg */ +#define GEM_SPADDR4LO (0x000000A0 / 4) /* Specific addr 4 low reg */ +#define GEM_SPADDR4HI (0x000000A4 / 4) /* Specific addr 4 high reg */ +#define GEM_TIDMATCH1 (0x000000A8 / 4) /* Type ID1 Match reg */ +#define GEM_TIDMATCH2 (0x000000AC / 4) /* Type ID2 Match reg */ +#define GEM_TIDMATCH3 (0x000000B0 / 4) /* Type ID3 Match reg */ +#define GEM_TIDMATCH4 (0x000000B4 / 4) /* Type ID4 Match reg */ +#define GEM_WOLAN (0x000000B8 / 4) /* Wake on LAN reg */ +#define GEM_IPGSTRETCH (0x000000BC / 4) /* IPG Stretch reg */ +#define GEM_SVLAN (0x000000C0 / 4) /* Stacked VLAN reg */ +#define GEM_MODID (0x000000FC / 4) /* Module ID reg */ +#define GEM_OCTTXLO (0x00000100 / 4) /* Octects transmitted Low reg */ +#define GEM_OCTTXHI (0x00000104 / 4) /* Octects transmitted High reg */ +#define GEM_TXCNT (0x00000108 / 4) /* Error-free Frames transmitted */ +#define GEM_TXBCNT (0x0000010C / 4) /* Error-free Broadcast Frames */ +#define GEM_TXMCNT (0x00000110 / 4) /* Error-free Multicast Frame */ +#define GEM_TXPAUSECNT (0x00000114 / 4) /* Pause Frames Transmitted */ +#define GEM_TX64CNT (0x00000118 / 4) /* Error-free 64 TX */ +#define GEM_TX65CNT (0x0000011C / 4) /* Error-free 65-127 TX */ +#define GEM_TX128CNT (0x00000120 / 4) /* Error-free 128-255 TX */ +#define GEM_TX256CNT (0x00000124 / 4) /* Error-free 256-511 */ +#define GEM_TX512CNT (0x00000128 / 4) /* Error-free 512-1023 TX */ +#define GEM_TX1024CNT (0x0000012C / 4) /* Error-free 1024-1518 TX */ +#define GEM_TX1519CNT (0x00000130 / 4) /* Error-free larger than 1519 TX */ +#define GEM_TXURUNCNT (0x00000134 / 4) /* TX under run error counter */ +#define GEM_SINGLECOLLCNT (0x00000138 / 4) /* Single Collision Frames */ +#define GEM_MULTCOLLCNT (0x0000013C / 4) /* Multiple Collision Frames */ +#define GEM_EXCESSCOLLCNT (0x00000140 / 4) /* Excessive Collision Frames */ +#define GEM_LATECOLLCNT (0x00000144 / 4) /* Late Collision Frames */ +#define GEM_DEFERTXCNT (0x00000148 / 4) /* Deferred Transmission Frames */ +#define GEM_CSENSECNT (0x0000014C / 4) /* Carrier Sense Error Counter */ +#define GEM_OCTRXLO (0x00000150 / 4) /* Octects Received register Low */ +#define GEM_OCTRXHI (0x00000154 / 4) /* Octects Received register High */ +#define GEM_RXCNT (0x00000158 / 4) /* Error-free Frames Received */ +#define GEM_RXBROADCNT (0x0000015C / 4) /* Error-free Broadcast Frames RX */ +#define GEM_RXMULTICNT (0x00000160 / 4) /* Error-free Multicast Frames RX */ +#define GEM_RXPAUSECNT (0x00000164 / 4) /* Pause Frames Received Counter */ +#define GEM_RX64CNT (0x00000168 / 4) /* Error-free 64 byte Frames RX */ +#define GEM_RX65CNT (0x0000016C / 4) /* Error-free 65-127B Frames RX */ +#define GEM_RX128CNT (0x00000170 / 4) /* Error-free 128-255B Frames RX */ +#define GEM_RX256CNT (0x00000174 / 4) /* Error-free 256-512B Frames RX */ +#define GEM_RX512CNT (0x00000178 / 4) /* Error-free 512-1023B Frames RX */ +#define GEM_RX1024CNT (0x0000017C / 4) /* Error-free 1024-1518B Frames RX */ +#define GEM_RX1519CNT (0x00000180 / 4) /* Error-free 1519-max Frames RX */ +#define GEM_RXUNDERCNT (0x00000184 / 4) /* Undersize Frames Received */ +#define GEM_RXOVERCNT (0x00000188 / 4) /* Oversize Frames Received */ +#define GEM_RXJABCNT (0x0000018C / 4) /* Jabbers Received Counter */ +#define GEM_RXFCSCNT (0x00000190 / 4) /* Frame Check seq. Error Counter */ +#define GEM_RXLENERRCNT (0x00000194 / 4) /* Length Field Error Counter */ +#define GEM_RXSYMERRCNT (0x00000198 / 4) /* Symbol Error Counter */ +#define GEM_RXALIGNERRCNT (0x0000019C / 4) /* Alignment Error Counter */ +#define GEM_RXRSCERRCNT (0x000001A0 / 4) /* Receive Resource Error Counter */ +#define GEM_RXORUNCNT (0x000001A4 / 4) /* Receive Overrun Counter */ +#define GEM_RXIPCSERRCNT (0x000001A8 / 4) /* IP header Checksum Err Counter */ +#define GEM_RXTCPCCNT (0x000001AC / 4) /* TCP Checksum Error Counter */ +#define GEM_RXUDPCCNT (0x000001B0 / 4) /* UDP Checksum Error Counter */ + +#define GEM_1588S (0x000001D0 / 4) /* 1588 Timer Seconds */ +#define GEM_1588NS (0x000001D4 / 4) /* 1588 Timer Nanoseconds */ +#define GEM_1588ADJ (0x000001D8 / 4) /* 1588 Timer Adjust */ +#define GEM_1588INC (0x000001DC / 4) /* 1588 Timer Increment */ +#define GEM_PTPETXS (0x000001E0 / 4) /* PTP Event Frame Transmitted (s) */ +#define GEM_PTPETXNS (0x000001E4 / 4) /* + * PTP Event Frame Transmitted (ns) + */ +#define GEM_PTPERXS (0x000001E8 / 4) /* PTP Event Frame Received (s) */ +#define GEM_PTPERXNS (0x000001EC / 4) /* PTP Event Frame Received (ns) */ +#define GEM_PTPPTXS (0x000001E0 / 4) /* PTP Peer Frame Transmitted (s) */ +#define GEM_PTPPTXNS (0x000001E4 / 4) /* PTP Peer Frame Transmitted (ns) */ +#define GEM_PTPPRXS (0x000001E8 / 4) /* PTP Peer Frame Received (s) */ +#define GEM_PTPPRXNS (0x000001EC / 4) /* PTP Peer Frame Received (ns) */ + +/* Design Configuration Registers */ +#define GEM_DESCONF (0x00000280 / 4) +#define GEM_DESCONF2 (0x00000284 / 4) +#define GEM_DESCONF3 (0x00000288 / 4) +#define GEM_DESCONF4 (0x0000028C / 4) +#define GEM_DESCONF5 (0x00000290 / 4) +#define GEM_DESCONF6 (0x00000294 / 4) +#define GEM_DESCONF6_64B_MASK (1U << 23) +#define GEM_DESCONF7 (0x00000298 / 4) + +#define GEM_INT_Q1_STATUS (0x00000400 / 4) +#define GEM_INT_Q1_MASK (0x00000640 / 4) + +#define GEM_TRANSMIT_Q1_PTR (0x00000440 / 4) +#define GEM_TRANSMIT_Q7_PTR (GEM_TRANSMIT_Q1_PTR + 6) + +#define GEM_RECEIVE_Q1_PTR (0x00000480 / 4) +#define GEM_RECEIVE_Q7_PTR (GEM_RECEIVE_Q1_PTR + 6) + +#define GEM_TBQPH (0x000004C8 / 4) +#define GEM_RBQPH (0x000004D4 / 4) + +#define GEM_INT_Q1_ENABLE (0x00000600 / 4) +#define GEM_INT_Q7_ENABLE (GEM_INT_Q1_ENABLE + 6) + +#define GEM_INT_Q1_DISABLE (0x00000620 / 4) +#define GEM_INT_Q7_DISABLE (GEM_INT_Q1_DISABLE + 6) + +#define GEM_INT_Q1_MASK (0x00000640 / 4) +#define GEM_INT_Q7_MASK (GEM_INT_Q1_MASK + 6) + +#define GEM_SCREENING_TYPE1_REGISTER_0 (0x00000500 / 4) + +#define GEM_ST1R_UDP_PORT_MATCH_ENABLE (1 << 29) +#define GEM_ST1R_DSTC_ENABLE (1 << 28) +#define GEM_ST1R_UDP_PORT_MATCH_SHIFT (12) +#define GEM_ST1R_UDP_PORT_MATCH_WIDTH (27 - GEM_ST1R_UDP_PORT_MATCH_SHIFT + 1) +#define GEM_ST1R_DSTC_MATCH_SHIFT (4) +#define GEM_ST1R_DSTC_MATCH_WIDTH (11 - GEM_ST1R_DSTC_MATCH_SHIFT + 1) +#define GEM_ST1R_QUEUE_SHIFT (0) +#define GEM_ST1R_QUEUE_WIDTH (3 - GEM_ST1R_QUEUE_SHIFT + 1) + +#define GEM_SCREENING_TYPE2_REGISTER_0 (0x00000540 / 4) + +#define GEM_ST2R_COMPARE_A_ENABLE (1 << 18) +#define GEM_ST2R_COMPARE_A_SHIFT (13) +#define GEM_ST2R_COMPARE_WIDTH (17 - GEM_ST2R_COMPARE_A_SHIFT + 1) +#define GEM_ST2R_ETHERTYPE_ENABLE (1 << 12) +#define GEM_ST2R_ETHERTYPE_INDEX_SHIFT (9) +#define GEM_ST2R_ETHERTYPE_INDEX_WIDTH (11 - GEM_ST2R_ETHERTYPE_INDEX_SHIFT \ + + 1) +#define GEM_ST2R_QUEUE_SHIFT (0) +#define GEM_ST2R_QUEUE_WIDTH (3 - GEM_ST2R_QUEUE_SHIFT + 1) + +#define GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 (0x000006e0 / 4) +#define GEM_TYPE2_COMPARE_0_WORD_0 (0x00000700 / 4) + +#define GEM_T2CW1_COMPARE_OFFSET_SHIFT (7) +#define GEM_T2CW1_COMPARE_OFFSET_WIDTH (8 - GEM_T2CW1_COMPARE_OFFSET_SHIFT + 1) +#define GEM_T2CW1_OFFSET_VALUE_SHIFT (0) +#define GEM_T2CW1_OFFSET_VALUE_WIDTH (6 - GEM_T2CW1_OFFSET_VALUE_SHIFT + 1) + +/*****************************************/ +#define GEM_NWCTRL_TXSTART 0x00000200 /* Transmit Enable */ +#define GEM_NWCTRL_TXENA 0x00000008 /* Transmit Enable */ +#define GEM_NWCTRL_RXENA 0x00000004 /* Receive Enable */ +#define GEM_NWCTRL_LOCALLOOP 0x00000002 /* Local Loopback */ + +#define GEM_NWCFG_STRIP_FCS 0x00020000 /* Strip FCS field */ +#define GEM_NWCFG_LERR_DISC 0x00010000 /* Discard RX frames with len err */ +#define GEM_NWCFG_BUFF_OFST_M 0x0000C000 /* Receive buffer offset mask */ +#define GEM_NWCFG_BUFF_OFST_S 14 /* Receive buffer offset shift */ +#define GEM_NWCFG_RCV_1538 0x00000100 /* Receive 1538 bytes frame */ +#define GEM_NWCFG_UCAST_HASH 0x00000080 /* accept unicast if hash match */ +#define GEM_NWCFG_MCAST_HASH 0x00000040 /* accept multicast if hash match */ +#define GEM_NWCFG_BCAST_REJ 0x00000020 /* Reject broadcast packets */ +#define GEM_NWCFG_PROMISC 0x00000010 /* Accept all packets */ +#define GEM_NWCFG_JUMBO_FRAME 0x00000008 /* Jumbo Frames enable */ + +#define GEM_DMACFG_ADDR_64B (1U << 30) +#define GEM_DMACFG_TX_BD_EXT (1U << 29) +#define GEM_DMACFG_RX_BD_EXT (1U << 28) +#define GEM_DMACFG_RBUFSZ_M 0x00FF0000 /* DMA RX Buffer Size mask */ +#define GEM_DMACFG_RBUFSZ_S 16 /* DMA RX Buffer Size shift */ +#define GEM_DMACFG_RBUFSZ_MUL 64 /* DMA RX Buffer Size multiplier */ +#define GEM_DMACFG_TXCSUM_OFFL 0x00000800 /* Transmit checksum offload */ + +#define GEM_TXSTATUS_TXCMPL 0x00000020 /* Transmit Complete */ +#define GEM_TXSTATUS_USED 0x00000001 /* sw owned descriptor encountered */ + +#define GEM_RXSTATUS_FRMRCVD 0x00000002 /* Frame received */ +#define GEM_RXSTATUS_NOBUF 0x00000001 /* Buffer unavailable */ + +/* GEM_ISR GEM_IER GEM_IDR GEM_IMR */ +#define GEM_INT_TXCMPL 0x00000080 /* Transmit Complete */ +#define GEM_INT_AMBA_ERR 0x00000040 +#define GEM_INT_TXUSED 0x00000008 +#define GEM_INT_RXUSED 0x00000004 +#define GEM_INT_RXCMPL 0x00000002 + +#define GEM_PHYMNTNC_OP_R 0x20000000 /* read operation */ +#define GEM_PHYMNTNC_OP_W 0x10000000 /* write operation */ +#define GEM_PHYMNTNC_ADDR 0x0F800000 /* Address bits */ +#define GEM_PHYMNTNC_ADDR_SHFT 23 +#define GEM_PHYMNTNC_REG 0x007C0000 /* register bits */ +#define GEM_PHYMNTNC_REG_SHIFT 18 + +/* Marvell PHY definitions */ +#define BOARD_PHY_ADDRESS 0 /* PHY address we will emulate a device at */ + +#define PHY_REG_CONTROL 0 +#define PHY_REG_STATUS 1 +#define PHY_REG_PHYID1 2 +#define PHY_REG_PHYID2 3 +#define PHY_REG_ANEGADV 4 +#define PHY_REG_LINKPABIL 5 +#define PHY_REG_ANEGEXP 6 +#define PHY_REG_NEXTP 7 +#define PHY_REG_LINKPNEXTP 8 +#define PHY_REG_100BTCTRL 9 +#define PHY_REG_1000BTSTAT 10 +#define PHY_REG_EXTSTAT 15 +#define PHY_REG_PHYSPCFC_CTL 16 +#define PHY_REG_PHYSPCFC_ST 17 +#define PHY_REG_INT_EN 18 +#define PHY_REG_INT_ST 19 +#define PHY_REG_EXT_PHYSPCFC_CTL 20 +#define PHY_REG_RXERR 21 +#define PHY_REG_EACD 22 +#define PHY_REG_LED 24 +#define PHY_REG_LED_OVRD 25 +#define PHY_REG_EXT_PHYSPCFC_CTL2 26 +#define PHY_REG_EXT_PHYSPCFC_ST 27 +#define PHY_REG_CABLE_DIAG 28 + +#define PHY_REG_CONTROL_RST 0x8000 +#define PHY_REG_CONTROL_LOOP 0x4000 +#define PHY_REG_CONTROL_ANEG 0x1000 +#define PHY_REG_CONTROL_ANRESTART 0x0200 + +#define PHY_REG_STATUS_LINK 0x0004 +#define PHY_REG_STATUS_ANEGCMPL 0x0020 + +#define PHY_REG_INT_ST_ANEGCMPL 0x0800 +#define PHY_REG_INT_ST_LINKC 0x0400 +#define PHY_REG_INT_ST_ENERGY 0x0010 + +/***********************************************************************/ +#define GEM_RX_REJECT (-1) +#define GEM_RX_PROMISCUOUS_ACCEPT (-2) +#define GEM_RX_BROADCAST_ACCEPT (-3) +#define GEM_RX_MULTICAST_HASH_ACCEPT (-4) +#define GEM_RX_UNICAST_HASH_ACCEPT (-5) + +#define GEM_RX_SAR_ACCEPT 0 + +/***********************************************************************/ + +#define DESC_1_USED 0x80000000 +#define DESC_1_LENGTH 0x00001FFF + +#define DESC_1_TX_WRAP 0x40000000 +#define DESC_1_TX_LAST 0x00008000 + +#define DESC_0_RX_WRAP 0x00000002 +#define DESC_0_RX_OWNERSHIP 0x00000001 + +#define R_DESC_1_RX_SAR_SHIFT 25 +#define R_DESC_1_RX_SAR_LENGTH 2 +#define R_DESC_1_RX_SAR_MATCH (1 << 27) +#define R_DESC_1_RX_UNICAST_HASH (1 << 29) +#define R_DESC_1_RX_MULTICAST_HASH (1 << 30) +#define R_DESC_1_RX_BROADCAST (1 << 31) + +#define DESC_1_RX_SOF 0x00004000 +#define DESC_1_RX_EOF 0x00008000 + +#define GEM_MODID_VALUE 0x00020118 + +static inline uint64_t tx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc) +{ + uint64_t ret = desc[0]; + + if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { + ret |= (uint64_t)desc[2] << 32; + } + return ret; +} + +static inline unsigned tx_desc_get_used(uint32_t *desc) +{ + return (desc[1] & DESC_1_USED) ? 1 : 0; +} + +static inline void tx_desc_set_used(uint32_t *desc) +{ + desc[1] |= DESC_1_USED; +} + +static inline unsigned tx_desc_get_wrap(uint32_t *desc) +{ + return (desc[1] & DESC_1_TX_WRAP) ? 1 : 0; +} + +static inline unsigned tx_desc_get_last(uint32_t *desc) +{ + return (desc[1] & DESC_1_TX_LAST) ? 1 : 0; +} + +static inline unsigned tx_desc_get_length(uint32_t *desc) +{ + return desc[1] & DESC_1_LENGTH; +} + +static inline void print_gem_tx_desc(uint32_t *desc, uint8_t queue) +{ + DB_PRINT("TXDESC (queue %" PRId8 "):\n", queue); + DB_PRINT("bufaddr: 0x%08x\n", *desc); + DB_PRINT("used_hw: %d\n", tx_desc_get_used(desc)); + DB_PRINT("wrap: %d\n", tx_desc_get_wrap(desc)); + DB_PRINT("last: %d\n", tx_desc_get_last(desc)); + DB_PRINT("length: %d\n", tx_desc_get_length(desc)); +} + +static inline uint64_t rx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc) +{ + uint64_t ret = desc[0] & ~0x3UL; + + if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { + ret |= (uint64_t)desc[2] << 32; + } + return ret; +} + +static inline int gem_get_desc_len(CadenceGEMState *s, bool rx_n_tx) +{ + int ret = 2; + + if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { + ret += 2; + } + if (s->regs[GEM_DMACFG] & (rx_n_tx ? GEM_DMACFG_RX_BD_EXT + : GEM_DMACFG_TX_BD_EXT)) { + ret += 2; + } + + assert(ret <= DESC_MAX_NUM_WORDS); + return ret; +} + +static inline unsigned rx_desc_get_wrap(uint32_t *desc) +{ + return desc[0] & DESC_0_RX_WRAP ? 1 : 0; +} + +static inline unsigned rx_desc_get_ownership(uint32_t *desc) +{ + return desc[0] & DESC_0_RX_OWNERSHIP ? 1 : 0; +} + +static inline void rx_desc_set_ownership(uint32_t *desc) +{ + desc[0] |= DESC_0_RX_OWNERSHIP; +} + +static inline void rx_desc_set_sof(uint32_t *desc) +{ + desc[1] |= DESC_1_RX_SOF; +} + +static inline void rx_desc_clear_control(uint32_t *desc) +{ + desc[1] = 0; +} + +static inline void rx_desc_set_eof(uint32_t *desc) +{ + desc[1] |= DESC_1_RX_EOF; +} + +static inline void rx_desc_set_length(uint32_t *desc, unsigned len) +{ + desc[1] &= ~DESC_1_LENGTH; + desc[1] |= len; +} + +static inline void rx_desc_set_broadcast(uint32_t *desc) +{ + desc[1] |= R_DESC_1_RX_BROADCAST; +} + +static inline void rx_desc_set_unicast_hash(uint32_t *desc) +{ + desc[1] |= R_DESC_1_RX_UNICAST_HASH; +} + +static inline void rx_desc_set_multicast_hash(uint32_t *desc) +{ + desc[1] |= R_DESC_1_RX_MULTICAST_HASH; +} + +static inline void rx_desc_set_sar(uint32_t *desc, int sar_idx) +{ + desc[1] = deposit32(desc[1], R_DESC_1_RX_SAR_SHIFT, R_DESC_1_RX_SAR_LENGTH, + sar_idx); + desc[1] |= R_DESC_1_RX_SAR_MATCH; +} + +/* The broadcast MAC address: 0xFFFFFFFFFFFF */ +static const uint8_t broadcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + +static uint32_t gem_get_max_buf_len(CadenceGEMState *s, bool tx) +{ + uint32_t size; + if (s->regs[GEM_NWCFG] & GEM_NWCFG_JUMBO_FRAME) { + size = s->regs[GEM_JUMBO_MAX_LEN]; + if (size > s->jumbo_max_len) { + size = s->jumbo_max_len; + qemu_log_mask(LOG_GUEST_ERROR, "GEM_JUMBO_MAX_LEN reg cannot be" + " greater than 0x%" PRIx32 "\n", s->jumbo_max_len); + } + } else if (tx) { + size = 1518; + } else { + size = s->regs[GEM_NWCFG] & GEM_NWCFG_RCV_1538 ? 1538 : 1518; + } + return size; +} + +static void gem_set_isr(CadenceGEMState *s, int q, uint32_t flag) +{ + if (q == 0) { + s->regs[GEM_ISR] |= flag & ~(s->regs[GEM_IMR]); + } else { + s->regs[GEM_INT_Q1_STATUS + q - 1] |= flag & + ~(s->regs[GEM_INT_Q1_MASK + q - 1]); + } +} + +/* + * gem_init_register_masks: + * One time initialization. + * Set masks to identify which register bits have magical clear properties + */ +static void gem_init_register_masks(CadenceGEMState *s) +{ + unsigned int i; + /* Mask of register bits which are read only */ + memset(&s->regs_ro[0], 0, sizeof(s->regs_ro)); + s->regs_ro[GEM_NWCTRL] = 0xFFF80000; + s->regs_ro[GEM_NWSTATUS] = 0xFFFFFFFF; + s->regs_ro[GEM_DMACFG] = 0x8E00F000; + s->regs_ro[GEM_TXSTATUS] = 0xFFFFFE08; + s->regs_ro[GEM_RXQBASE] = 0x00000003; + s->regs_ro[GEM_TXQBASE] = 0x00000003; + s->regs_ro[GEM_RXSTATUS] = 0xFFFFFFF0; + s->regs_ro[GEM_ISR] = 0xFFFFFFFF; + s->regs_ro[GEM_IMR] = 0xFFFFFFFF; + s->regs_ro[GEM_MODID] = 0xFFFFFFFF; + for (i = 0; i < s->num_priority_queues; i++) { + s->regs_ro[GEM_INT_Q1_STATUS + i] = 0xFFFFFFFF; + s->regs_ro[GEM_INT_Q1_ENABLE + i] = 0xFFFFF319; + s->regs_ro[GEM_INT_Q1_DISABLE + i] = 0xFFFFF319; + s->regs_ro[GEM_INT_Q1_MASK + i] = 0xFFFFFFFF; + } + + /* Mask of register bits which are clear on read */ + memset(&s->regs_rtc[0], 0, sizeof(s->regs_rtc)); + s->regs_rtc[GEM_ISR] = 0xFFFFFFFF; + for (i = 0; i < s->num_priority_queues; i++) { + s->regs_rtc[GEM_INT_Q1_STATUS + i] = 0x00000CE6; + } + + /* Mask of register bits which are write 1 to clear */ + memset(&s->regs_w1c[0], 0, sizeof(s->regs_w1c)); + s->regs_w1c[GEM_TXSTATUS] = 0x000001F7; + s->regs_w1c[GEM_RXSTATUS] = 0x0000000F; + + /* Mask of register bits which are write only */ + memset(&s->regs_wo[0], 0, sizeof(s->regs_wo)); + s->regs_wo[GEM_NWCTRL] = 0x00073E60; + s->regs_wo[GEM_IER] = 0x07FFFFFF; + s->regs_wo[GEM_IDR] = 0x07FFFFFF; + for (i = 0; i < s->num_priority_queues; i++) { + s->regs_wo[GEM_INT_Q1_ENABLE + i] = 0x00000CE6; + s->regs_wo[GEM_INT_Q1_DISABLE + i] = 0x00000CE6; + } +} + +/* + * phy_update_link: + * Make the emulated PHY link state match the QEMU "interface" state. + */ +static void phy_update_link(CadenceGEMState *s) +{ + DB_PRINT("down %d\n", qemu_get_queue(s->nic)->link_down); + + /* Autonegotiation status mirrors link status. */ + if (qemu_get_queue(s->nic)->link_down) { + s->phy_regs[PHY_REG_STATUS] &= ~(PHY_REG_STATUS_ANEGCMPL | + PHY_REG_STATUS_LINK); + s->phy_regs[PHY_REG_INT_ST] |= PHY_REG_INT_ST_LINKC; + } else { + s->phy_regs[PHY_REG_STATUS] |= (PHY_REG_STATUS_ANEGCMPL | + PHY_REG_STATUS_LINK); + s->phy_regs[PHY_REG_INT_ST] |= (PHY_REG_INT_ST_LINKC | + PHY_REG_INT_ST_ANEGCMPL | + PHY_REG_INT_ST_ENERGY); + } +} + +static bool gem_can_receive(NetClientState *nc) +{ + CadenceGEMState *s; + int i; + + s = qemu_get_nic_opaque(nc); + + /* Do nothing if receive is not enabled. */ + if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_RXENA)) { + if (s->can_rx_state != 1) { + s->can_rx_state = 1; + DB_PRINT("can't receive - no enable\n"); + } + return false; + } + + for (i = 0; i < s->num_priority_queues; i++) { + if (rx_desc_get_ownership(s->rx_desc[i]) != 1) { + break; + } + }; + + if (i == s->num_priority_queues) { + if (s->can_rx_state != 2) { + s->can_rx_state = 2; + DB_PRINT("can't receive - all the buffer descriptors are busy\n"); + } + return false; + } + + if (s->can_rx_state != 0) { + s->can_rx_state = 0; + DB_PRINT("can receive\n"); + } + return true; +} + +/* + * gem_update_int_status: + * Raise or lower interrupt based on current status. + */ +static void gem_update_int_status(CadenceGEMState *s) +{ + int i; + + qemu_set_irq(s->irq[0], !!s->regs[GEM_ISR]); + + for (i = 1; i < s->num_priority_queues; ++i) { + qemu_set_irq(s->irq[i], !!s->regs[GEM_INT_Q1_STATUS + i - 1]); + } +} + +/* + * gem_receive_updatestats: + * Increment receive statistics. + */ +static void gem_receive_updatestats(CadenceGEMState *s, const uint8_t *packet, + unsigned bytes) +{ + uint64_t octets; + + /* Total octets (bytes) received */ + octets = ((uint64_t)(s->regs[GEM_OCTRXLO]) << 32) | + s->regs[GEM_OCTRXHI]; + octets += bytes; + s->regs[GEM_OCTRXLO] = octets >> 32; + s->regs[GEM_OCTRXHI] = octets; + + /* Error-free Frames received */ + s->regs[GEM_RXCNT]++; + + /* Error-free Broadcast Frames counter */ + if (!memcmp(packet, broadcast_addr, 6)) { + s->regs[GEM_RXBROADCNT]++; + } + + /* Error-free Multicast Frames counter */ + if (packet[0] == 0x01) { + s->regs[GEM_RXMULTICNT]++; + } + + if (bytes <= 64) { + s->regs[GEM_RX64CNT]++; + } else if (bytes <= 127) { + s->regs[GEM_RX65CNT]++; + } else if (bytes <= 255) { + s->regs[GEM_RX128CNT]++; + } else if (bytes <= 511) { + s->regs[GEM_RX256CNT]++; + } else if (bytes <= 1023) { + s->regs[GEM_RX512CNT]++; + } else if (bytes <= 1518) { + s->regs[GEM_RX1024CNT]++; + } else { + s->regs[GEM_RX1519CNT]++; + } +} + +/* + * Get the MAC Address bit from the specified position + */ +static unsigned get_bit(const uint8_t *mac, unsigned bit) +{ + unsigned byte; + + byte = mac[bit / 8]; + byte >>= (bit & 0x7); + byte &= 1; + + return byte; +} + +/* + * Calculate a GEM MAC Address hash index + */ +static unsigned calc_mac_hash(const uint8_t *mac) +{ + int index_bit, mac_bit; + unsigned hash_index; + + hash_index = 0; + mac_bit = 5; + for (index_bit = 5; index_bit >= 0; index_bit--) { + hash_index |= (get_bit(mac, mac_bit) ^ + get_bit(mac, mac_bit + 6) ^ + get_bit(mac, mac_bit + 12) ^ + get_bit(mac, mac_bit + 18) ^ + get_bit(mac, mac_bit + 24) ^ + get_bit(mac, mac_bit + 30) ^ + get_bit(mac, mac_bit + 36) ^ + get_bit(mac, mac_bit + 42)) << index_bit; + mac_bit--; + } + + return hash_index; +} + +/* + * gem_mac_address_filter: + * Accept or reject this destination address? + * Returns: + * GEM_RX_REJECT: reject + * >= 0: Specific address accept (which matched SAR is returned) + * others for various other modes of accept: + * GEM_RM_PROMISCUOUS_ACCEPT, GEM_RX_BROADCAST_ACCEPT, + * GEM_RX_MULTICAST_HASH_ACCEPT or GEM_RX_UNICAST_HASH_ACCEPT + */ +static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet) +{ + uint8_t *gem_spaddr; + int i, is_mc; + + /* Promiscuous mode? */ + if (s->regs[GEM_NWCFG] & GEM_NWCFG_PROMISC) { + return GEM_RX_PROMISCUOUS_ACCEPT; + } + + if (!memcmp(packet, broadcast_addr, 6)) { + /* Reject broadcast packets? */ + if (s->regs[GEM_NWCFG] & GEM_NWCFG_BCAST_REJ) { + return GEM_RX_REJECT; + } + return GEM_RX_BROADCAST_ACCEPT; + } + + /* Accept packets -w- hash match? */ + is_mc = is_multicast_ether_addr(packet); + if ((is_mc && (s->regs[GEM_NWCFG] & GEM_NWCFG_MCAST_HASH)) || + (!is_mc && (s->regs[GEM_NWCFG] & GEM_NWCFG_UCAST_HASH))) { + uint64_t buckets; + unsigned hash_index; + + hash_index = calc_mac_hash(packet); + buckets = ((uint64_t)s->regs[GEM_HASHHI] << 32) | s->regs[GEM_HASHLO]; + if ((buckets >> hash_index) & 1) { + return is_mc ? GEM_RX_MULTICAST_HASH_ACCEPT + : GEM_RX_UNICAST_HASH_ACCEPT; + } + } + + /* Check all 4 specific addresses */ + gem_spaddr = (uint8_t *)&(s->regs[GEM_SPADDR1LO]); + for (i = 3; i >= 0; i--) { + if (s->sar_active[i] && !memcmp(packet, gem_spaddr + 8 * i, 6)) { + return GEM_RX_SAR_ACCEPT + i; + } + } + + /* No address match; reject the packet */ + return GEM_RX_REJECT; +} + +/* Figure out which queue the received data should be sent to */ +static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr, + unsigned rxbufsize) +{ + uint32_t reg; + bool matched, mismatched; + int i, j; + + for (i = 0; i < s->num_type1_screeners; i++) { + reg = s->regs[GEM_SCREENING_TYPE1_REGISTER_0 + i]; + matched = false; + mismatched = false; + + /* Screening is based on UDP Port */ + if (reg & GEM_ST1R_UDP_PORT_MATCH_ENABLE) { + uint16_t udp_port = rxbuf_ptr[14 + 22] << 8 | rxbuf_ptr[14 + 23]; + if (udp_port == extract32(reg, GEM_ST1R_UDP_PORT_MATCH_SHIFT, + GEM_ST1R_UDP_PORT_MATCH_WIDTH)) { + matched = true; + } else { + mismatched = true; + } + } + + /* Screening is based on DS/TC */ + if (reg & GEM_ST1R_DSTC_ENABLE) { + uint8_t dscp = rxbuf_ptr[14 + 1]; + if (dscp == extract32(reg, GEM_ST1R_DSTC_MATCH_SHIFT, + GEM_ST1R_DSTC_MATCH_WIDTH)) { + matched = true; + } else { + mismatched = true; + } + } + + if (matched && !mismatched) { + return extract32(reg, GEM_ST1R_QUEUE_SHIFT, GEM_ST1R_QUEUE_WIDTH); + } + } + + for (i = 0; i < s->num_type2_screeners; i++) { + reg = s->regs[GEM_SCREENING_TYPE2_REGISTER_0 + i]; + matched = false; + mismatched = false; + + if (reg & GEM_ST2R_ETHERTYPE_ENABLE) { + uint16_t type = rxbuf_ptr[12] << 8 | rxbuf_ptr[13]; + int et_idx = extract32(reg, GEM_ST2R_ETHERTYPE_INDEX_SHIFT, + GEM_ST2R_ETHERTYPE_INDEX_WIDTH); + + if (et_idx > s->num_type2_screeners) { + qemu_log_mask(LOG_GUEST_ERROR, "Out of range ethertype " + "register index: %d\n", et_idx); + } + if (type == s->regs[GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 + + et_idx]) { + matched = true; + } else { + mismatched = true; + } + } + + /* Compare A, B, C */ + for (j = 0; j < 3; j++) { + uint32_t cr0, cr1, mask; + uint16_t rx_cmp; + int offset; + int cr_idx = extract32(reg, GEM_ST2R_COMPARE_A_SHIFT + j * 6, + GEM_ST2R_COMPARE_WIDTH); + + if (!(reg & (GEM_ST2R_COMPARE_A_ENABLE << (j * 6)))) { + continue; + } + if (cr_idx > s->num_type2_screeners) { + qemu_log_mask(LOG_GUEST_ERROR, "Out of range compare " + "register index: %d\n", cr_idx); + } + + cr0 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2]; + cr1 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2 + 1]; + offset = extract32(cr1, GEM_T2CW1_OFFSET_VALUE_SHIFT, + GEM_T2CW1_OFFSET_VALUE_WIDTH); + + switch (extract32(cr1, GEM_T2CW1_COMPARE_OFFSET_SHIFT, + GEM_T2CW1_COMPARE_OFFSET_WIDTH)) { + case 3: /* Skip UDP header */ + qemu_log_mask(LOG_UNIMP, "TCP compare offsets" + "unimplemented - assuming UDP\n"); + offset += 8; + /* Fallthrough */ + case 2: /* skip the IP header */ + offset += 20; + /* Fallthrough */ + case 1: /* Count from after the ethertype */ + offset += 14; + break; + case 0: + /* Offset from start of frame */ + break; + } + + rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset]; + mask = extract32(cr0, 0, 16); + + if ((rx_cmp & mask) == (extract32(cr0, 16, 16) & mask)) { + matched = true; + } else { + mismatched = true; + } + } + + if (matched && !mismatched) { + return extract32(reg, GEM_ST2R_QUEUE_SHIFT, GEM_ST2R_QUEUE_WIDTH); + } + } + + /* We made it here, assume it's queue 0 */ + return 0; +} + +static uint32_t gem_get_queue_base_addr(CadenceGEMState *s, bool tx, int q) +{ + uint32_t base_addr = 0; + + switch (q) { + case 0: + base_addr = s->regs[tx ? GEM_TXQBASE : GEM_RXQBASE]; + break; + case 1 ... (MAX_PRIORITY_QUEUES - 1): + base_addr = s->regs[(tx ? GEM_TRANSMIT_Q1_PTR : + GEM_RECEIVE_Q1_PTR) + q - 1]; + break; + default: + g_assert_not_reached(); + }; + + return base_addr; +} + +static inline uint32_t gem_get_tx_queue_base_addr(CadenceGEMState *s, int q) +{ + return gem_get_queue_base_addr(s, true, q); +} + +static inline uint32_t gem_get_rx_queue_base_addr(CadenceGEMState *s, int q) +{ + return gem_get_queue_base_addr(s, false, q); +} + +static hwaddr gem_get_desc_addr(CadenceGEMState *s, bool tx, int q) +{ + hwaddr desc_addr = 0; + + if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { + desc_addr = s->regs[tx ? GEM_TBQPH : GEM_RBQPH]; + } + desc_addr <<= 32; + desc_addr |= tx ? s->tx_desc_addr[q] : s->rx_desc_addr[q]; + return desc_addr; +} + +static hwaddr gem_get_tx_desc_addr(CadenceGEMState *s, int q) +{ + return gem_get_desc_addr(s, true, q); +} + +static hwaddr gem_get_rx_desc_addr(CadenceGEMState *s, int q) +{ + return gem_get_desc_addr(s, false, q); +} + +static void gem_get_rx_desc(CadenceGEMState *s, int q) +{ + hwaddr desc_addr = gem_get_rx_desc_addr(s, q); + + DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", desc_addr); + + /* read current descriptor */ + address_space_read(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED, + s->rx_desc[q], + sizeof(uint32_t) * gem_get_desc_len(s, true)); + + /* Descriptor owned by software ? */ + if (rx_desc_get_ownership(s->rx_desc[q]) == 1) { + DB_PRINT("descriptor 0x%" HWADDR_PRIx " owned by sw.\n", desc_addr); + s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_NOBUF; + gem_set_isr(s, q, GEM_INT_RXUSED); + /* Handle interrupt consequences */ + gem_update_int_status(s); + } +} + +/* + * gem_receive: + * Fit a packet handed to us by QEMU into the receive descriptor ring. + */ +static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + CadenceGEMState *s = qemu_get_nic_opaque(nc); + unsigned rxbufsize, bytes_to_copy; + unsigned rxbuf_offset; + uint8_t *rxbuf_ptr; + bool first_desc = true; + int maf; + int q = 0; + + /* Is this destination MAC address "for us" ? */ + maf = gem_mac_address_filter(s, buf); + if (maf == GEM_RX_REJECT) { + return size; /* no, drop siliently b/c it's not an error */ + } + + /* Discard packets with receive length error enabled ? */ + if (s->regs[GEM_NWCFG] & GEM_NWCFG_LERR_DISC) { + unsigned type_len; + + /* Fish the ethertype / length field out of the RX packet */ + type_len = buf[12] << 8 | buf[13]; + /* It is a length field, not an ethertype */ + if (type_len < 0x600) { + if (size < type_len) { + /* discard */ + return -1; + } + } + } + + /* + * Determine configured receive buffer offset (probably 0) + */ + rxbuf_offset = (s->regs[GEM_NWCFG] & GEM_NWCFG_BUFF_OFST_M) >> + GEM_NWCFG_BUFF_OFST_S; + + /* The configure size of each receive buffer. Determines how many + * buffers needed to hold this packet. + */ + rxbufsize = ((s->regs[GEM_DMACFG] & GEM_DMACFG_RBUFSZ_M) >> + GEM_DMACFG_RBUFSZ_S) * GEM_DMACFG_RBUFSZ_MUL; + bytes_to_copy = size; + + /* Hardware allows a zero value here but warns against it. To avoid QEMU + * indefinite loops we enforce a minimum value here + */ + if (rxbufsize < GEM_DMACFG_RBUFSZ_MUL) { + rxbufsize = GEM_DMACFG_RBUFSZ_MUL; + } + + /* Pad to minimum length. Assume FCS field is stripped, logic + * below will increment it to the real minimum of 64 when + * not FCS stripping + */ + if (size < 60) { + size = 60; + } + + /* Strip of FCS field ? (usually yes) */ + if (s->regs[GEM_NWCFG] & GEM_NWCFG_STRIP_FCS) { + rxbuf_ptr = (void *)buf; + } else { + unsigned crc_val; + + if (size > MAX_FRAME_SIZE - sizeof(crc_val)) { + size = MAX_FRAME_SIZE - sizeof(crc_val); + } + bytes_to_copy = size; + /* The application wants the FCS field, which QEMU does not provide. + * We must try and calculate one. + */ + + memcpy(s->rx_packet, buf, size); + memset(s->rx_packet + size, 0, MAX_FRAME_SIZE - size); + rxbuf_ptr = s->rx_packet; + crc_val = cpu_to_le32(crc32(0, s->rx_packet, MAX(size, 60))); + memcpy(s->rx_packet + size, &crc_val, sizeof(crc_val)); + + bytes_to_copy += 4; + size += 4; + } + + DB_PRINT("config bufsize: %u packet size: %zd\n", rxbufsize, size); + + /* Find which queue we are targeting */ + q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize); + + if (size > gem_get_max_buf_len(s, false)) { + qemu_log_mask(LOG_GUEST_ERROR, "rx frame too long\n"); + gem_set_isr(s, q, GEM_INT_AMBA_ERR); + return -1; + } + + while (bytes_to_copy) { + hwaddr desc_addr; + + /* Do nothing if receive is not enabled. */ + if (!gem_can_receive(nc)) { + return -1; + } + + DB_PRINT("copy %" PRIu32 " bytes to 0x%" PRIx64 "\n", + MIN(bytes_to_copy, rxbufsize), + rx_desc_get_buffer(s, s->rx_desc[q])); + + /* Copy packet data to emulated DMA buffer */ + address_space_write(&s->dma_as, rx_desc_get_buffer(s, s->rx_desc[q]) + + rxbuf_offset, + MEMTXATTRS_UNSPECIFIED, rxbuf_ptr, + MIN(bytes_to_copy, rxbufsize)); + rxbuf_ptr += MIN(bytes_to_copy, rxbufsize); + bytes_to_copy -= MIN(bytes_to_copy, rxbufsize); + + rx_desc_clear_control(s->rx_desc[q]); + + /* Update the descriptor. */ + if (first_desc) { + rx_desc_set_sof(s->rx_desc[q]); + first_desc = false; + } + if (bytes_to_copy == 0) { + rx_desc_set_eof(s->rx_desc[q]); + rx_desc_set_length(s->rx_desc[q], size); + } + rx_desc_set_ownership(s->rx_desc[q]); + + switch (maf) { + case GEM_RX_PROMISCUOUS_ACCEPT: + break; + case GEM_RX_BROADCAST_ACCEPT: + rx_desc_set_broadcast(s->rx_desc[q]); + break; + case GEM_RX_UNICAST_HASH_ACCEPT: + rx_desc_set_unicast_hash(s->rx_desc[q]); + break; + case GEM_RX_MULTICAST_HASH_ACCEPT: + rx_desc_set_multicast_hash(s->rx_desc[q]); + break; + case GEM_RX_REJECT: + abort(); + default: /* SAR */ + rx_desc_set_sar(s->rx_desc[q], maf); + } + + /* Descriptor write-back. */ + desc_addr = gem_get_rx_desc_addr(s, q); + address_space_write(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED, + s->rx_desc[q], + sizeof(uint32_t) * gem_get_desc_len(s, true)); + + /* Next descriptor */ + if (rx_desc_get_wrap(s->rx_desc[q])) { + DB_PRINT("wrapping RX descriptor list\n"); + s->rx_desc_addr[q] = gem_get_rx_queue_base_addr(s, q); + } else { + DB_PRINT("incrementing RX descriptor list\n"); + s->rx_desc_addr[q] += 4 * gem_get_desc_len(s, true); + } + + gem_get_rx_desc(s, q); + } + + /* Count it */ + gem_receive_updatestats(s, buf, size); + + s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_FRMRCVD; + gem_set_isr(s, q, GEM_INT_RXCMPL); + + /* Handle interrupt consequences */ + gem_update_int_status(s); + + return size; +} + +/* + * gem_transmit_updatestats: + * Increment transmit statistics. + */ +static void gem_transmit_updatestats(CadenceGEMState *s, const uint8_t *packet, + unsigned bytes) +{ + uint64_t octets; + + /* Total octets (bytes) transmitted */ + octets = ((uint64_t)(s->regs[GEM_OCTTXLO]) << 32) | + s->regs[GEM_OCTTXHI]; + octets += bytes; + s->regs[GEM_OCTTXLO] = octets >> 32; + s->regs[GEM_OCTTXHI] = octets; + + /* Error-free Frames transmitted */ + s->regs[GEM_TXCNT]++; + + /* Error-free Broadcast Frames counter */ + if (!memcmp(packet, broadcast_addr, 6)) { + s->regs[GEM_TXBCNT]++; + } + + /* Error-free Multicast Frames counter */ + if (packet[0] == 0x01) { + s->regs[GEM_TXMCNT]++; + } + + if (bytes <= 64) { + s->regs[GEM_TX64CNT]++; + } else if (bytes <= 127) { + s->regs[GEM_TX65CNT]++; + } else if (bytes <= 255) { + s->regs[GEM_TX128CNT]++; + } else if (bytes <= 511) { + s->regs[GEM_TX256CNT]++; + } else if (bytes <= 1023) { + s->regs[GEM_TX512CNT]++; + } else if (bytes <= 1518) { + s->regs[GEM_TX1024CNT]++; + } else { + s->regs[GEM_TX1519CNT]++; + } +} + +/* + * gem_transmit: + * Fish packets out of the descriptor ring and feed them to QEMU + */ +static void gem_transmit(CadenceGEMState *s) +{ + uint32_t desc[DESC_MAX_NUM_WORDS]; + hwaddr packet_desc_addr; + uint8_t *p; + unsigned total_bytes; + int q = 0; + + /* Do nothing if transmit is not enabled. */ + if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { + return; + } + + DB_PRINT("\n"); + + /* The packet we will hand off to QEMU. + * Packets scattered across multiple descriptors are gathered to this + * one contiguous buffer first. + */ + p = s->tx_packet; + total_bytes = 0; + + for (q = s->num_priority_queues - 1; q >= 0; q--) { + /* read current descriptor */ + packet_desc_addr = gem_get_tx_desc_addr(s, q); + + DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); + address_space_read(&s->dma_as, packet_desc_addr, + MEMTXATTRS_UNSPECIFIED, desc, + sizeof(uint32_t) * gem_get_desc_len(s, false)); + /* Handle all descriptors owned by hardware */ + while (tx_desc_get_used(desc) == 0) { + + /* Do nothing if transmit is not enabled. */ + if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { + return; + } + print_gem_tx_desc(desc, q); + + /* The real hardware would eat this (and possibly crash). + * For QEMU let's lend a helping hand. + */ + if ((tx_desc_get_buffer(s, desc) == 0) || + (tx_desc_get_length(desc) == 0)) { + DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n", + packet_desc_addr); + break; + } + + if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) - + (p - s->tx_packet)) { + qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \ + HWADDR_PRIx " too large: size 0x%x space 0x%zx\n", + packet_desc_addr, tx_desc_get_length(desc), + gem_get_max_buf_len(s, true) - (p - s->tx_packet)); + gem_set_isr(s, q, GEM_INT_AMBA_ERR); + break; + } + + /* Gather this fragment of the packet from "dma memory" to our + * contig buffer. + */ + address_space_read(&s->dma_as, tx_desc_get_buffer(s, desc), + MEMTXATTRS_UNSPECIFIED, + p, tx_desc_get_length(desc)); + p += tx_desc_get_length(desc); + total_bytes += tx_desc_get_length(desc); + + /* Last descriptor for this packet; hand the whole thing off */ + if (tx_desc_get_last(desc)) { + uint32_t desc_first[DESC_MAX_NUM_WORDS]; + hwaddr desc_addr = gem_get_tx_desc_addr(s, q); + + /* Modify the 1st descriptor of this packet to be owned by + * the processor. + */ + address_space_read(&s->dma_as, desc_addr, + MEMTXATTRS_UNSPECIFIED, desc_first, + sizeof(desc_first)); + tx_desc_set_used(desc_first); + address_space_write(&s->dma_as, desc_addr, + MEMTXATTRS_UNSPECIFIED, desc_first, + sizeof(desc_first)); + /* Advance the hardware current descriptor past this packet */ + if (tx_desc_get_wrap(desc)) { + s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q); + } else { + s->tx_desc_addr[q] = packet_desc_addr + + 4 * gem_get_desc_len(s, false); + } + DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]); + + s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL; + gem_set_isr(s, q, GEM_INT_TXCMPL); + + /* Handle interrupt consequences */ + gem_update_int_status(s); + + /* Is checksum offload enabled? */ + if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) { + net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL); + } + + /* Update MAC statistics */ + gem_transmit_updatestats(s, s->tx_packet, total_bytes); + + /* Send the packet somewhere */ + if (s->phy_loop || (s->regs[GEM_NWCTRL] & + GEM_NWCTRL_LOCALLOOP)) { + qemu_receive_packet(qemu_get_queue(s->nic), s->tx_packet, + total_bytes); + } else { + qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet, + total_bytes); + } + + /* Prepare for next packet */ + p = s->tx_packet; + total_bytes = 0; + } + + /* read next descriptor */ + if (tx_desc_get_wrap(desc)) { + + if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) { + packet_desc_addr = s->regs[GEM_TBQPH]; + packet_desc_addr <<= 32; + } else { + packet_desc_addr = 0; + } + packet_desc_addr |= gem_get_tx_queue_base_addr(s, q); + } else { + packet_desc_addr += 4 * gem_get_desc_len(s, false); + } + DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); + address_space_read(&s->dma_as, packet_desc_addr, + MEMTXATTRS_UNSPECIFIED, desc, + sizeof(uint32_t) * gem_get_desc_len(s, false)); + } + + if (tx_desc_get_used(desc)) { + s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED; + /* IRQ TXUSED is defined only for queue 0 */ + if (q == 0) { + gem_set_isr(s, 0, GEM_INT_TXUSED); + } + gem_update_int_status(s); + } + } +} + +static void gem_phy_reset(CadenceGEMState *s) +{ + memset(&s->phy_regs[0], 0, sizeof(s->phy_regs)); + s->phy_regs[PHY_REG_CONTROL] = 0x1140; + s->phy_regs[PHY_REG_STATUS] = 0x7969; + s->phy_regs[PHY_REG_PHYID1] = 0x0141; + s->phy_regs[PHY_REG_PHYID2] = 0x0CC2; + s->phy_regs[PHY_REG_ANEGADV] = 0x01E1; + s->phy_regs[PHY_REG_LINKPABIL] = 0xCDE1; + s->phy_regs[PHY_REG_ANEGEXP] = 0x000F; + s->phy_regs[PHY_REG_NEXTP] = 0x2001; + s->phy_regs[PHY_REG_LINKPNEXTP] = 0x40E6; + s->phy_regs[PHY_REG_100BTCTRL] = 0x0300; + s->phy_regs[PHY_REG_1000BTSTAT] = 0x7C00; + s->phy_regs[PHY_REG_EXTSTAT] = 0x3000; + s->phy_regs[PHY_REG_PHYSPCFC_CTL] = 0x0078; + s->phy_regs[PHY_REG_PHYSPCFC_ST] = 0x7C00; + s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL] = 0x0C60; + s->phy_regs[PHY_REG_LED] = 0x4100; + s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL2] = 0x000A; + s->phy_regs[PHY_REG_EXT_PHYSPCFC_ST] = 0x848B; + + phy_update_link(s); +} + +static void gem_reset(DeviceState *d) +{ + int i; + CadenceGEMState *s = CADENCE_GEM(d); + const uint8_t *a; + uint32_t queues_mask = 0; + + DB_PRINT("\n"); + + /* Set post reset register values */ + memset(&s->regs[0], 0, sizeof(s->regs)); + s->regs[GEM_NWCFG] = 0x00080000; + s->regs[GEM_NWSTATUS] = 0x00000006; + s->regs[GEM_DMACFG] = 0x00020784; + s->regs[GEM_IMR] = 0x07ffffff; + s->regs[GEM_TXPAUSE] = 0x0000ffff; + s->regs[GEM_TXPARTIALSF] = 0x000003ff; + s->regs[GEM_RXPARTIALSF] = 0x000003ff; + s->regs[GEM_MODID] = s->revision; + s->regs[GEM_DESCONF] = 0x02D00111; + s->regs[GEM_DESCONF2] = 0x2ab10000 | s->jumbo_max_len; + s->regs[GEM_DESCONF5] = 0x002f2045; + s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK; + s->regs[GEM_INT_Q1_MASK] = 0x00000CE6; + s->regs[GEM_JUMBO_MAX_LEN] = s->jumbo_max_len; + + if (s->num_priority_queues > 1) { + queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1); + s->regs[GEM_DESCONF6] |= queues_mask; + } + + /* Set MAC address */ + a = &s->conf.macaddr.a[0]; + s->regs[GEM_SPADDR1LO] = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24); + s->regs[GEM_SPADDR1HI] = a[4] | (a[5] << 8); + + for (i = 0; i < 4; i++) { + s->sar_active[i] = false; + } + + gem_phy_reset(s); + + gem_update_int_status(s); +} + +static uint16_t gem_phy_read(CadenceGEMState *s, unsigned reg_num) +{ + DB_PRINT("reg: %d value: 0x%04x\n", reg_num, s->phy_regs[reg_num]); + return s->phy_regs[reg_num]; +} + +static void gem_phy_write(CadenceGEMState *s, unsigned reg_num, uint16_t val) +{ + DB_PRINT("reg: %d value: 0x%04x\n", reg_num, val); + + switch (reg_num) { + case PHY_REG_CONTROL: + if (val & PHY_REG_CONTROL_RST) { + /* Phy reset */ + gem_phy_reset(s); + val &= ~(PHY_REG_CONTROL_RST | PHY_REG_CONTROL_LOOP); + s->phy_loop = 0; + } + if (val & PHY_REG_CONTROL_ANEG) { + /* Complete autonegotiation immediately */ + val &= ~(PHY_REG_CONTROL_ANEG | PHY_REG_CONTROL_ANRESTART); + s->phy_regs[PHY_REG_STATUS] |= PHY_REG_STATUS_ANEGCMPL; + } + if (val & PHY_REG_CONTROL_LOOP) { + DB_PRINT("PHY placed in loopback\n"); + s->phy_loop = 1; + } else { + s->phy_loop = 0; + } + break; + } + s->phy_regs[reg_num] = val; +} + +/* + * gem_read32: + * Read a GEM register. + */ +static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size) +{ + CadenceGEMState *s; + uint32_t retval; + s = (CadenceGEMState *)opaque; + + offset >>= 2; + retval = s->regs[offset]; + + DB_PRINT("offset: 0x%04x read: 0x%08x\n", (unsigned)offset*4, retval); + + switch (offset) { + case GEM_ISR: + DB_PRINT("lowering irqs on ISR read\n"); + /* The interrupts get updated at the end of the function. */ + break; + case GEM_PHYMNTNC: + if (retval & GEM_PHYMNTNC_OP_R) { + uint32_t phy_addr, reg_num; + + phy_addr = (retval & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT; + if (phy_addr == s->phy_addr) { + reg_num = (retval & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT; + retval &= 0xFFFF0000; + retval |= gem_phy_read(s, reg_num); + } else { + retval |= 0xFFFF; /* No device at this address */ + } + } + break; + } + + /* Squash read to clear bits */ + s->regs[offset] &= ~(s->regs_rtc[offset]); + + /* Do not provide write only bits */ + retval &= ~(s->regs_wo[offset]); + + DB_PRINT("0x%08x\n", retval); + gem_update_int_status(s); + return retval; +} + +/* + * gem_write32: + * Write a GEM register. + */ +static void gem_write(void *opaque, hwaddr offset, uint64_t val, + unsigned size) +{ + CadenceGEMState *s = (CadenceGEMState *)opaque; + uint32_t readonly; + int i; + + DB_PRINT("offset: 0x%04x write: 0x%08x ", (unsigned)offset, (unsigned)val); + offset >>= 2; + + /* Squash bits which are read only in write value */ + val &= ~(s->regs_ro[offset]); + /* Preserve (only) bits which are read only and wtc in register */ + readonly = s->regs[offset] & (s->regs_ro[offset] | s->regs_w1c[offset]); + + /* Copy register write to backing store */ + s->regs[offset] = (val & ~s->regs_w1c[offset]) | readonly; + + /* do w1c */ + s->regs[offset] &= ~(s->regs_w1c[offset] & val); + + /* Handle register write side effects */ + switch (offset) { + case GEM_NWCTRL: + if (val & GEM_NWCTRL_RXENA) { + for (i = 0; i < s->num_priority_queues; ++i) { + gem_get_rx_desc(s, i); + } + } + if (val & GEM_NWCTRL_TXSTART) { + gem_transmit(s); + } + if (!(val & GEM_NWCTRL_TXENA)) { + /* Reset to start of Q when transmit disabled. */ + for (i = 0; i < s->num_priority_queues; i++) { + s->tx_desc_addr[i] = gem_get_tx_queue_base_addr(s, i); + } + } + if (gem_can_receive(qemu_get_queue(s->nic))) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + + case GEM_TXSTATUS: + gem_update_int_status(s); + break; + case GEM_RXQBASE: + s->rx_desc_addr[0] = val; + break; + case GEM_RECEIVE_Q1_PTR ... GEM_RECEIVE_Q7_PTR: + s->rx_desc_addr[offset - GEM_RECEIVE_Q1_PTR + 1] = val; + break; + case GEM_TXQBASE: + s->tx_desc_addr[0] = val; + break; + case GEM_TRANSMIT_Q1_PTR ... GEM_TRANSMIT_Q7_PTR: + s->tx_desc_addr[offset - GEM_TRANSMIT_Q1_PTR + 1] = val; + break; + case GEM_RXSTATUS: + gem_update_int_status(s); + break; + case GEM_IER: + s->regs[GEM_IMR] &= ~val; + gem_update_int_status(s); + break; + case GEM_JUMBO_MAX_LEN: + s->regs[GEM_JUMBO_MAX_LEN] = val & MAX_JUMBO_FRAME_SIZE_MASK; + break; + case GEM_INT_Q1_ENABLE ... GEM_INT_Q7_ENABLE: + s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_ENABLE] &= ~val; + gem_update_int_status(s); + break; + case GEM_IDR: + s->regs[GEM_IMR] |= val; + gem_update_int_status(s); + break; + case GEM_INT_Q1_DISABLE ... GEM_INT_Q7_DISABLE: + s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_DISABLE] |= val; + gem_update_int_status(s); + break; + case GEM_SPADDR1LO: + case GEM_SPADDR2LO: + case GEM_SPADDR3LO: + case GEM_SPADDR4LO: + s->sar_active[(offset - GEM_SPADDR1LO) / 2] = false; + break; + case GEM_SPADDR1HI: + case GEM_SPADDR2HI: + case GEM_SPADDR3HI: + case GEM_SPADDR4HI: + s->sar_active[(offset - GEM_SPADDR1HI) / 2] = true; + break; + case GEM_PHYMNTNC: + if (val & GEM_PHYMNTNC_OP_W) { + uint32_t phy_addr, reg_num; + + phy_addr = (val & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT; + if (phy_addr == s->phy_addr) { + reg_num = (val & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT; + gem_phy_write(s, reg_num, val); + } + } + break; + } + + DB_PRINT("newval: 0x%08x\n", s->regs[offset]); +} + +static const MemoryRegionOps gem_ops = { + .read = gem_read, + .write = gem_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void gem_set_link(NetClientState *nc) +{ + CadenceGEMState *s = qemu_get_nic_opaque(nc); + + DB_PRINT("\n"); + phy_update_link(s); + gem_update_int_status(s); +} + +static NetClientInfo net_gem_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = gem_can_receive, + .receive = gem_receive, + .link_status_changed = gem_set_link, +}; + +static void gem_realize(DeviceState *dev, Error **errp) +{ + CadenceGEMState *s = CADENCE_GEM(dev); + int i; + + address_space_init(&s->dma_as, + s->dma_mr ? s->dma_mr : get_system_memory(), "dma"); + + if (s->num_priority_queues == 0 || + s->num_priority_queues > MAX_PRIORITY_QUEUES) { + error_setg(errp, "Invalid num-priority-queues value: %" PRIx8, + s->num_priority_queues); + return; + } else if (s->num_type1_screeners > MAX_TYPE1_SCREENERS) { + error_setg(errp, "Invalid num-type1-screeners value: %" PRIx8, + s->num_type1_screeners); + return; + } else if (s->num_type2_screeners > MAX_TYPE2_SCREENERS) { + error_setg(errp, "Invalid num-type2-screeners value: %" PRIx8, + s->num_type2_screeners); + return; + } + + for (i = 0; i < s->num_priority_queues; ++i) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); + } + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + + s->nic = qemu_new_nic(&net_gem_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + + if (s->jumbo_max_len > MAX_FRAME_SIZE) { + error_setg(errp, "jumbo-max-len is greater than %d", + MAX_FRAME_SIZE); + return; + } +} + +static void gem_init(Object *obj) +{ + CadenceGEMState *s = CADENCE_GEM(obj); + DeviceState *dev = DEVICE(obj); + + DB_PRINT("\n"); + + gem_init_register_masks(s); + memory_region_init_io(&s->iomem, OBJECT(s), &gem_ops, s, + "enet", sizeof(s->regs)); + + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + + object_property_add_link(obj, "dma", TYPE_MEMORY_REGION, + (Object **)&s->dma_mr, + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); +} + +static const VMStateDescription vmstate_cadence_gem = { + .name = "cadence_gem", + .version_id = 4, + .minimum_version_id = 4, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG), + VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32), + VMSTATE_UINT8(phy_loop, CadenceGEMState), + VMSTATE_UINT32_ARRAY(rx_desc_addr, CadenceGEMState, + MAX_PRIORITY_QUEUES), + VMSTATE_UINT32_ARRAY(tx_desc_addr, CadenceGEMState, + MAX_PRIORITY_QUEUES), + VMSTATE_BOOL_ARRAY(sar_active, CadenceGEMState, 4), + VMSTATE_END_OF_LIST(), + } +}; + +static Property gem_properties[] = { + DEFINE_NIC_PROPERTIES(CadenceGEMState, conf), + DEFINE_PROP_UINT32("revision", CadenceGEMState, revision, + GEM_MODID_VALUE), + DEFINE_PROP_UINT8("phy-addr", CadenceGEMState, phy_addr, BOARD_PHY_ADDRESS), + DEFINE_PROP_UINT8("num-priority-queues", CadenceGEMState, + num_priority_queues, 1), + DEFINE_PROP_UINT8("num-type1-screeners", CadenceGEMState, + num_type1_screeners, 4), + DEFINE_PROP_UINT8("num-type2-screeners", CadenceGEMState, + num_type2_screeners, 4), + DEFINE_PROP_UINT16("jumbo-max-len", CadenceGEMState, + jumbo_max_len, 10240), + DEFINE_PROP_END_OF_LIST(), +}; + +static void gem_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = gem_realize; + device_class_set_props(dc, gem_properties); + dc->vmsd = &vmstate_cadence_gem; + dc->reset = gem_reset; +} + +static const TypeInfo gem_info = { + .name = TYPE_CADENCE_GEM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CadenceGEMState), + .instance_init = gem_init, + .class_init = gem_class_init, +}; + +static void gem_register_types(void) +{ + type_register_static(&gem_info); +} + +type_init(gem_register_types) diff --git a/hw/net/can/can_kvaser_pci.c b/hw/net/can/can_kvaser_pci.c new file mode 100644 index 000000000..168b3a620 --- /dev/null +++ b/hw/net/can/can_kvaser_pci.c @@ -0,0 +1,324 @@ +/* + * Kvaser PCI CAN device (SJA1000 based) emulation + * + * Copyright (c) 2013-2014 Jin Yang + * Copyright (c) 2014-2018 Pavel Pisa + * + * Partially based on educational PCIexpress APOHW hardware + * emulator used fro class A0B36APO at CTU FEE course by + * Rostislav Lisovy and Pavel Pisa + * + * Initial development supported by Google GSoC 2013 from RTEMS project slot + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/event_notifier.h" +#include "qemu/module.h" +#include "qemu/thread.h" +#include "qemu/sockets.h" +#include "qapi/error.h" +#include "chardev/char.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "net/can_emu.h" + +#include "can_sja1000.h" +#include "qom/object.h" + +#define TYPE_CAN_PCI_DEV "kvaser_pci" + +typedef struct KvaserPCIState KvaserPCIState; +DECLARE_INSTANCE_CHECKER(KvaserPCIState, KVASER_PCI_DEV, + TYPE_CAN_PCI_DEV) + +#ifndef KVASER_PCI_VENDOR_ID1 +#define KVASER_PCI_VENDOR_ID1 0x10e8 /* the PCI device and vendor IDs */ +#endif + +#ifndef KVASER_PCI_DEVICE_ID1 +#define KVASER_PCI_DEVICE_ID1 0x8406 +#endif + +#define KVASER_PCI_S5920_RANGE 0x80 +#define KVASER_PCI_SJA_RANGE 0x80 +#define KVASER_PCI_XILINX_RANGE 0x8 + +#define KVASER_PCI_BYTES_PER_SJA 0x20 + +#define S5920_OMB 0x0C +#define S5920_IMB 0x1C +#define S5920_MBEF 0x34 +#define S5920_INTCSR 0x38 +#define S5920_RCR 0x3C +#define S5920_PTCR 0x60 + +#define S5920_INTCSR_ADDON_INTENABLE_M 0x2000 +#define S5920_INTCSR_INTERRUPT_ASSERTED_M 0x800000 + +#define KVASER_PCI_XILINX_VERINT 7 /* Lower nibble simulate interrupts, + high nibble version number. */ + +#define KVASER_PCI_XILINX_VERSION_NUMBER 13 + +struct KvaserPCIState { + /*< private >*/ + PCIDevice dev; + /*< public >*/ + MemoryRegion s5920_io; + MemoryRegion sja_io; + MemoryRegion xilinx_io; + + CanSJA1000State sja_state; + qemu_irq irq; + + uint32_t s5920_intcsr; + uint32_t s5920_irqstate; + + CanBusState *canbus; +}; + +static void kvaser_pci_irq_handler(void *opaque, int irq_num, int level) +{ + KvaserPCIState *d = (KvaserPCIState *)opaque; + + d->s5920_irqstate = level; + if (d->s5920_intcsr & S5920_INTCSR_ADDON_INTENABLE_M) { + pci_set_irq(&d->dev, level); + } +} + +static void kvaser_pci_reset(DeviceState *dev) +{ + KvaserPCIState *d = KVASER_PCI_DEV(dev); + CanSJA1000State *s = &d->sja_state; + + can_sja_hardware_reset(s); +} + +static uint64_t kvaser_pci_s5920_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + KvaserPCIState *d = opaque; + uint64_t val; + + switch (addr) { + case S5920_INTCSR: + val = d->s5920_intcsr; + val &= ~S5920_INTCSR_INTERRUPT_ASSERTED_M; + if (d->s5920_irqstate) { + val |= S5920_INTCSR_INTERRUPT_ASSERTED_M; + } + return val; + } + return 0; +} + +static void kvaser_pci_s5920_io_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + KvaserPCIState *d = opaque; + + switch (addr) { + case S5920_INTCSR: + if (d->s5920_irqstate && + ((d->s5920_intcsr ^ data) & S5920_INTCSR_ADDON_INTENABLE_M)) { + pci_set_irq(&d->dev, !!(data & S5920_INTCSR_ADDON_INTENABLE_M)); + } + d->s5920_intcsr = data; + break; + } +} + +static uint64_t kvaser_pci_sja_io_read(void *opaque, hwaddr addr, unsigned size) +{ + KvaserPCIState *d = opaque; + CanSJA1000State *s = &d->sja_state; + + if (addr >= KVASER_PCI_BYTES_PER_SJA) { + return 0; + } + + return can_sja_mem_read(s, addr, size); +} + +static void kvaser_pci_sja_io_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + KvaserPCIState *d = opaque; + CanSJA1000State *s = &d->sja_state; + + if (addr >= KVASER_PCI_BYTES_PER_SJA) { + return; + } + + can_sja_mem_write(s, addr, data, size); +} + +static uint64_t kvaser_pci_xilinx_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + switch (addr) { + case KVASER_PCI_XILINX_VERINT: + return (KVASER_PCI_XILINX_VERSION_NUMBER << 4) | 0; + } + + return 0; +} + +static void kvaser_pci_xilinx_io_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + +} + +static const MemoryRegionOps kvaser_pci_s5920_io_ops = { + .read = kvaser_pci_s5920_io_read, + .write = kvaser_pci_s5920_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps kvaser_pci_sja_io_ops = { + .read = kvaser_pci_sja_io_read, + .write = kvaser_pci_sja_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .max_access_size = 1, + }, +}; + +static const MemoryRegionOps kvaser_pci_xilinx_io_ops = { + .read = kvaser_pci_xilinx_io_read, + .write = kvaser_pci_xilinx_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .max_access_size = 1, + }, +}; + +static void kvaser_pci_realize(PCIDevice *pci_dev, Error **errp) +{ + KvaserPCIState *d = KVASER_PCI_DEV(pci_dev); + CanSJA1000State *s = &d->sja_state; + uint8_t *pci_conf; + + pci_conf = pci_dev->config; + pci_conf[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */ + + d->irq = qemu_allocate_irq(kvaser_pci_irq_handler, d, 0); + + can_sja_init(s, d->irq); + + if (can_sja_connect_to_bus(s, d->canbus) < 0) { + error_setg(errp, "can_sja_connect_to_bus failed"); + return; + } + + memory_region_init_io(&d->s5920_io, OBJECT(d), &kvaser_pci_s5920_io_ops, + d, "kvaser_pci-s5920", KVASER_PCI_S5920_RANGE); + memory_region_init_io(&d->sja_io, OBJECT(d), &kvaser_pci_sja_io_ops, + d, "kvaser_pci-sja", KVASER_PCI_SJA_RANGE); + memory_region_init_io(&d->xilinx_io, OBJECT(d), &kvaser_pci_xilinx_io_ops, + d, "kvaser_pci-xilinx", KVASER_PCI_XILINX_RANGE); + + pci_register_bar(&d->dev, /*BAR*/ 0, PCI_BASE_ADDRESS_SPACE_IO, + &d->s5920_io); + pci_register_bar(&d->dev, /*BAR*/ 1, PCI_BASE_ADDRESS_SPACE_IO, + &d->sja_io); + pci_register_bar(&d->dev, /*BAR*/ 2, PCI_BASE_ADDRESS_SPACE_IO, + &d->xilinx_io); +} + +static void kvaser_pci_exit(PCIDevice *pci_dev) +{ + KvaserPCIState *d = KVASER_PCI_DEV(pci_dev); + CanSJA1000State *s = &d->sja_state; + + can_sja_disconnect(s); + + qemu_free_irq(d->irq); +} + +static const VMStateDescription vmstate_kvaser_pci = { + .name = "kvaser_pci", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, KvaserPCIState), + /* Load this before sja_state. */ + VMSTATE_UINT32(s5920_intcsr, KvaserPCIState), + VMSTATE_STRUCT(sja_state, KvaserPCIState, 0, vmstate_can_sja, + CanSJA1000State), + VMSTATE_END_OF_LIST() + } +}; + +static void kvaser_pci_instance_init(Object *obj) +{ + KvaserPCIState *d = KVASER_PCI_DEV(obj); + + object_property_add_link(obj, "canbus", TYPE_CAN_BUS, + (Object **)&d->canbus, + qdev_prop_allow_set_link_before_realize, + 0); +} + +static void kvaser_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = kvaser_pci_realize; + k->exit = kvaser_pci_exit; + k->vendor_id = KVASER_PCI_VENDOR_ID1; + k->device_id = KVASER_PCI_DEVICE_ID1; + k->revision = 0x00; + k->class_id = 0x00ff00; + dc->desc = "Kvaser PCICANx"; + dc->vmsd = &vmstate_kvaser_pci; + dc->reset = kvaser_pci_reset; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo kvaser_pci_info = { + .name = TYPE_CAN_PCI_DEV, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(KvaserPCIState), + .class_init = kvaser_pci_class_init, + .instance_init = kvaser_pci_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void kvaser_pci_register_types(void) +{ + type_register_static(&kvaser_pci_info); +} + +type_init(kvaser_pci_register_types) diff --git a/hw/net/can/can_mioe3680_pci.c b/hw/net/can/can_mioe3680_pci.c new file mode 100644 index 000000000..7a79e2605 --- /dev/null +++ b/hw/net/can/can_mioe3680_pci.c @@ -0,0 +1,267 @@ +/* + * MIOe-3680 PCI CAN device (SJA1000 based) emulation + * + * Copyright (c) 2016 Deniz Eren (deniz.eren@icloud.com) + * + * Based on Kvaser PCI CAN device (SJA1000 based) emulation implemented by + * Jin Yang and Pavel Pisa + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/event_notifier.h" +#include "qemu/module.h" +#include "qemu/thread.h" +#include "qemu/sockets.h" +#include "qapi/error.h" +#include "chardev/char.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "net/can_emu.h" + +#include "can_sja1000.h" +#include "qom/object.h" + +#define TYPE_CAN_PCI_DEV "mioe3680_pci" + +typedef struct Mioe3680PCIState Mioe3680PCIState; +DECLARE_INSTANCE_CHECKER(Mioe3680PCIState, MIOe3680_PCI_DEV, + TYPE_CAN_PCI_DEV) + +/* the PCI device and vendor IDs */ +#ifndef MIOe3680_PCI_VENDOR_ID1 +#define MIOe3680_PCI_VENDOR_ID1 0x13fe +#endif + +#ifndef MIOe3680_PCI_DEVICE_ID1 +#define MIOe3680_PCI_DEVICE_ID1 0xc302 +#endif + +#define MIOe3680_PCI_SJA_COUNT 2 +#define MIOe3680_PCI_SJA_RANGE 0x400 + +#define MIOe3680_PCI_BYTES_PER_SJA 0x80 + +struct Mioe3680PCIState { + /*< private >*/ + PCIDevice dev; + /*< public >*/ + MemoryRegion sja_io[MIOe3680_PCI_SJA_COUNT]; + + CanSJA1000State sja_state[MIOe3680_PCI_SJA_COUNT]; + qemu_irq irq; + + char *model; /* The model that support, only SJA1000 now. */ + CanBusState *canbus[MIOe3680_PCI_SJA_COUNT]; +}; + +static void mioe3680_pci_reset(DeviceState *dev) +{ + Mioe3680PCIState *d = MIOe3680_PCI_DEV(dev); + int i; + + for (i = 0 ; i < MIOe3680_PCI_SJA_COUNT; i++) { + can_sja_hardware_reset(&d->sja_state[i]); + } +} + +static uint64_t mioe3680_pci_sja1_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + Mioe3680PCIState *d = opaque; + CanSJA1000State *s = &d->sja_state[0]; + + if (addr >= MIOe3680_PCI_BYTES_PER_SJA) { + return 0; + } + + return can_sja_mem_read(s, addr >> 2, size); +} + +static void mioe3680_pci_sja1_io_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + Mioe3680PCIState *d = opaque; + CanSJA1000State *s = &d->sja_state[0]; + + if (addr >= MIOe3680_PCI_BYTES_PER_SJA) { + return; + } + + can_sja_mem_write(s, addr >> 2, data, size); +} + +static uint64_t mioe3680_pci_sja2_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + Mioe3680PCIState *d = opaque; + CanSJA1000State *s = &d->sja_state[1]; + + if (addr >= MIOe3680_PCI_BYTES_PER_SJA) { + return 0; + } + + return can_sja_mem_read(s, addr >> 2, size); +} + +static void mioe3680_pci_sja2_io_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + Mioe3680PCIState *d = opaque; + CanSJA1000State *s = &d->sja_state[1]; + + if (addr >= MIOe3680_PCI_BYTES_PER_SJA) { + return; + } + + can_sja_mem_write(s, addr >> 2, data, size); +} + +static const MemoryRegionOps mioe3680_pci_sja1_io_ops = { + .read = mioe3680_pci_sja1_io_read, + .write = mioe3680_pci_sja1_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .max_access_size = 1, + }, +}; + +static const MemoryRegionOps mioe3680_pci_sja2_io_ops = { + .read = mioe3680_pci_sja2_io_read, + .write = mioe3680_pci_sja2_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .max_access_size = 1, + }, +}; + +static void mioe3680_pci_realize(PCIDevice *pci_dev, Error **errp) +{ + Mioe3680PCIState *d = MIOe3680_PCI_DEV(pci_dev); + uint8_t *pci_conf; + int i; + + pci_conf = pci_dev->config; + pci_conf[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */ + + d->irq = pci_allocate_irq(&d->dev); + + for (i = 0 ; i < MIOe3680_PCI_SJA_COUNT; i++) { + can_sja_init(&d->sja_state[i], d->irq); + } + + for (i = 0 ; i < MIOe3680_PCI_SJA_COUNT; i++) { + if (can_sja_connect_to_bus(&d->sja_state[i], d->canbus[i]) < 0) { + error_setg(errp, "can_sja_connect_to_bus failed"); + return; + } + } + + memory_region_init_io(&d->sja_io[0], OBJECT(d), &mioe3680_pci_sja1_io_ops, + d, "mioe3680_pci-sja1", MIOe3680_PCI_SJA_RANGE); + memory_region_init_io(&d->sja_io[1], OBJECT(d), &mioe3680_pci_sja2_io_ops, + d, "mioe3680_pci-sja2", MIOe3680_PCI_SJA_RANGE); + + for (i = 0 ; i < MIOe3680_PCI_SJA_COUNT; i++) { + pci_register_bar(&d->dev, /*BAR*/ i, PCI_BASE_ADDRESS_SPACE_IO, + &d->sja_io[i]); + } +} + +static void mioe3680_pci_exit(PCIDevice *pci_dev) +{ + Mioe3680PCIState *d = MIOe3680_PCI_DEV(pci_dev); + int i; + + for (i = 0 ; i < MIOe3680_PCI_SJA_COUNT; i++) { + can_sja_disconnect(&d->sja_state[i]); + } + + qemu_free_irq(d->irq); +} + +static const VMStateDescription vmstate_mioe3680_pci = { + .name = "mioe3680_pci", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, Mioe3680PCIState), + VMSTATE_STRUCT(sja_state[0], Mioe3680PCIState, 0, vmstate_can_sja, + CanSJA1000State), + VMSTATE_STRUCT(sja_state[1], Mioe3680PCIState, 0, vmstate_can_sja, + CanSJA1000State), + VMSTATE_END_OF_LIST() + } +}; + +static void mioe3680_pci_instance_init(Object *obj) +{ + Mioe3680PCIState *d = MIOe3680_PCI_DEV(obj); + + object_property_add_link(obj, "canbus0", TYPE_CAN_BUS, + (Object **)&d->canbus[0], + qdev_prop_allow_set_link_before_realize, + 0); + object_property_add_link(obj, "canbus1", TYPE_CAN_BUS, + (Object **)&d->canbus[1], + qdev_prop_allow_set_link_before_realize, + 0); +} + +static void mioe3680_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = mioe3680_pci_realize; + k->exit = mioe3680_pci_exit; + k->vendor_id = MIOe3680_PCI_VENDOR_ID1; + k->device_id = MIOe3680_PCI_DEVICE_ID1; + k->revision = 0x00; + k->class_id = 0x000c09; + k->subsystem_vendor_id = MIOe3680_PCI_VENDOR_ID1; + k->subsystem_id = MIOe3680_PCI_DEVICE_ID1; + dc->desc = "Mioe3680 PCICANx"; + dc->vmsd = &vmstate_mioe3680_pci; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->reset = mioe3680_pci_reset; +} + +static const TypeInfo mioe3680_pci_info = { + .name = TYPE_CAN_PCI_DEV, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(Mioe3680PCIState), + .class_init = mioe3680_pci_class_init, + .instance_init = mioe3680_pci_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void mioe3680_pci_register_types(void) +{ + type_register_static(&mioe3680_pci_info); +} + +type_init(mioe3680_pci_register_types) diff --git a/hw/net/can/can_pcm3680_pci.c b/hw/net/can/can_pcm3680_pci.c new file mode 100644 index 000000000..8ef4e74af --- /dev/null +++ b/hw/net/can/can_pcm3680_pci.c @@ -0,0 +1,268 @@ +/* + * PCM-3680i PCI CAN device (SJA1000 based) emulation + * + * Copyright (c) 2016 Deniz Eren (deniz.eren@icloud.com) + * + * Based on Kvaser PCI CAN device (SJA1000 based) emulation implemented by + * Jin Yang and Pavel Pisa + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/event_notifier.h" +#include "qemu/module.h" +#include "qemu/thread.h" +#include "qemu/sockets.h" +#include "qapi/error.h" +#include "chardev/char.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "net/can_emu.h" + +#include "can_sja1000.h" +#include "qom/object.h" + +#define TYPE_CAN_PCI_DEV "pcm3680_pci" + +typedef struct Pcm3680iPCIState Pcm3680iPCIState; +DECLARE_INSTANCE_CHECKER(Pcm3680iPCIState, PCM3680i_PCI_DEV, + TYPE_CAN_PCI_DEV) + +/* the PCI device and vendor IDs */ +#ifndef PCM3680i_PCI_VENDOR_ID1 +#define PCM3680i_PCI_VENDOR_ID1 0x13fe +#endif + +#ifndef PCM3680i_PCI_DEVICE_ID1 +#define PCM3680i_PCI_DEVICE_ID1 0xc002 +#endif + +#define PCM3680i_PCI_SJA_COUNT 2 +#define PCM3680i_PCI_SJA_RANGE 0x100 + +#define PCM3680i_PCI_BYTES_PER_SJA 0x20 + +struct Pcm3680iPCIState { + /*< private >*/ + PCIDevice dev; + /*< public >*/ + MemoryRegion sja_io[PCM3680i_PCI_SJA_COUNT]; + + CanSJA1000State sja_state[PCM3680i_PCI_SJA_COUNT]; + qemu_irq irq; + + char *model; /* The model that support, only SJA1000 now. */ + CanBusState *canbus[PCM3680i_PCI_SJA_COUNT]; +}; + +static void pcm3680i_pci_reset(DeviceState *dev) +{ + Pcm3680iPCIState *d = PCM3680i_PCI_DEV(dev); + int i; + + for (i = 0; i < PCM3680i_PCI_SJA_COUNT; i++) { + can_sja_hardware_reset(&d->sja_state[i]); + } +} + +static uint64_t pcm3680i_pci_sja1_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + Pcm3680iPCIState *d = opaque; + CanSJA1000State *s = &d->sja_state[0]; + + if (addr >= PCM3680i_PCI_BYTES_PER_SJA) { + return 0; + } + + return can_sja_mem_read(s, addr, size); +} + +static void pcm3680i_pci_sja1_io_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + Pcm3680iPCIState *d = opaque; + CanSJA1000State *s = &d->sja_state[0]; + + if (addr >= PCM3680i_PCI_BYTES_PER_SJA) { + return; + } + + can_sja_mem_write(s, addr, data, size); +} + +static uint64_t pcm3680i_pci_sja2_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + Pcm3680iPCIState *d = opaque; + CanSJA1000State *s = &d->sja_state[1]; + + if (addr >= PCM3680i_PCI_BYTES_PER_SJA) { + return 0; + } + + return can_sja_mem_read(s, addr, size); +} + +static void pcm3680i_pci_sja2_io_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + Pcm3680iPCIState *d = opaque; + CanSJA1000State *s = &d->sja_state[1]; + + if (addr >= PCM3680i_PCI_BYTES_PER_SJA) { + return; + } + + can_sja_mem_write(s, addr, data, size); +} + +static const MemoryRegionOps pcm3680i_pci_sja1_io_ops = { + .read = pcm3680i_pci_sja1_io_read, + .write = pcm3680i_pci_sja1_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .max_access_size = 1, + }, +}; + +static const MemoryRegionOps pcm3680i_pci_sja2_io_ops = { + .read = pcm3680i_pci_sja2_io_read, + .write = pcm3680i_pci_sja2_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .max_access_size = 1, + }, +}; + +static void pcm3680i_pci_realize(PCIDevice *pci_dev, Error **errp) +{ + Pcm3680iPCIState *d = PCM3680i_PCI_DEV(pci_dev); + uint8_t *pci_conf; + int i; + + pci_conf = pci_dev->config; + pci_conf[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */ + + d->irq = pci_allocate_irq(&d->dev); + + for (i = 0; i < PCM3680i_PCI_SJA_COUNT; i++) { + can_sja_init(&d->sja_state[i], d->irq); + } + + for (i = 0; i < PCM3680i_PCI_SJA_COUNT; i++) { + if (can_sja_connect_to_bus(&d->sja_state[i], d->canbus[i]) < 0) { + error_setg(errp, "can_sja_connect_to_bus failed"); + return; + } + } + + memory_region_init_io(&d->sja_io[0], OBJECT(d), &pcm3680i_pci_sja1_io_ops, + d, "pcm3680i_pci-sja1", PCM3680i_PCI_SJA_RANGE); + + memory_region_init_io(&d->sja_io[1], OBJECT(d), &pcm3680i_pci_sja2_io_ops, + d, "pcm3680i_pci-sja2", PCM3680i_PCI_SJA_RANGE); + + for (i = 0; i < PCM3680i_PCI_SJA_COUNT; i++) { + pci_register_bar(&d->dev, /*BAR*/ i, PCI_BASE_ADDRESS_SPACE_IO, + &d->sja_io[i]); + } +} + +static void pcm3680i_pci_exit(PCIDevice *pci_dev) +{ + Pcm3680iPCIState *d = PCM3680i_PCI_DEV(pci_dev); + int i; + + for (i = 0; i < PCM3680i_PCI_SJA_COUNT; i++) { + can_sja_disconnect(&d->sja_state[i]); + } + + qemu_free_irq(d->irq); +} + +static const VMStateDescription vmstate_pcm3680i_pci = { + .name = "pcm3680i_pci", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, Pcm3680iPCIState), + VMSTATE_STRUCT(sja_state[0], Pcm3680iPCIState, 0, + vmstate_can_sja, CanSJA1000State), + VMSTATE_STRUCT(sja_state[1], Pcm3680iPCIState, 0, + vmstate_can_sja, CanSJA1000State), + VMSTATE_END_OF_LIST() + } +}; + +static void pcm3680i_pci_instance_init(Object *obj) +{ + Pcm3680iPCIState *d = PCM3680i_PCI_DEV(obj); + + object_property_add_link(obj, "canbus0", TYPE_CAN_BUS, + (Object **)&d->canbus[0], + qdev_prop_allow_set_link_before_realize, + 0); + object_property_add_link(obj, "canbus1", TYPE_CAN_BUS, + (Object **)&d->canbus[1], + qdev_prop_allow_set_link_before_realize, + 0); +} + +static void pcm3680i_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pcm3680i_pci_realize; + k->exit = pcm3680i_pci_exit; + k->vendor_id = PCM3680i_PCI_VENDOR_ID1; + k->device_id = PCM3680i_PCI_DEVICE_ID1; + k->revision = 0x00; + k->class_id = 0x000c09; + k->subsystem_vendor_id = PCM3680i_PCI_VENDOR_ID1; + k->subsystem_id = PCM3680i_PCI_DEVICE_ID1; + dc->desc = "Pcm3680i PCICANx"; + dc->vmsd = &vmstate_pcm3680i_pci; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->reset = pcm3680i_pci_reset; +} + +static const TypeInfo pcm3680i_pci_info = { + .name = TYPE_CAN_PCI_DEV, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(Pcm3680iPCIState), + .class_init = pcm3680i_pci_class_init, + .instance_init = pcm3680i_pci_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void pcm3680i_pci_register_types(void) +{ + type_register_static(&pcm3680i_pci_info); +} + +type_init(pcm3680i_pci_register_types) diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c new file mode 100644 index 000000000..34eea684c --- /dev/null +++ b/hw/net/can/can_sja1000.c @@ -0,0 +1,988 @@ +/* + * CAN device - SJA1000 chip emulation for QEMU + * + * Copyright (c) 2013-2014 Jin Yang + * Copyright (c) 2014-2018 Pavel Pisa + * + * Initial development supported by Google GSoC 2013 from RTEMS project slot + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "chardev/char.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "net/can_emu.h" + +#include "can_sja1000.h" + +#ifndef DEBUG_FILTER +#define DEBUG_FILTER 0 +#endif /*DEBUG_FILTER*/ + +#ifndef DEBUG_CAN +#define DEBUG_CAN 0 +#endif /*DEBUG_CAN*/ + +#define DPRINTF(fmt, ...) \ + do { \ + if (DEBUG_CAN) { \ + qemu_log("[cansja]: " fmt , ## __VA_ARGS__); \ + } \ + } while (0) + +static void can_sja_software_reset(CanSJA1000State *s) +{ + s->mode &= ~0x31; + s->mode |= 0x01; + s->status_pel &= ~0x37; + s->status_pel |= 0x34; + + s->rxbuf_start = 0x00; + s->rxmsg_cnt = 0x00; + s->rx_cnt = 0x00; +} + +void can_sja_hardware_reset(CanSJA1000State *s) +{ + /* Reset by hardware, p10 */ + s->mode = 0x01; + s->status_pel = 0x3c; + s->interrupt_pel = 0x00; + s->clock = 0x00; + s->rxbuf_start = 0x00; + s->rxmsg_cnt = 0x00; + s->rx_cnt = 0x00; + + s->control = 0x01; + s->status_bas = 0x0c; + s->interrupt_bas = 0x00; + + qemu_irq_lower(s->irq); +} + +static +void can_sja_single_filter(struct qemu_can_filter *filter, + const uint8_t *acr, const uint8_t *amr, int extended) +{ + if (extended) { + filter->can_id = (uint32_t)acr[0] << 21; + filter->can_id |= (uint32_t)acr[1] << 13; + filter->can_id |= (uint32_t)acr[2] << 5; + filter->can_id |= (uint32_t)acr[3] >> 3; + if (acr[3] & 4) { + filter->can_id |= QEMU_CAN_RTR_FLAG; + } + + filter->can_mask = (uint32_t)amr[0] << 21; + filter->can_mask |= (uint32_t)amr[1] << 13; + filter->can_mask |= (uint32_t)amr[2] << 5; + filter->can_mask |= (uint32_t)amr[3] >> 3; + filter->can_mask = ~filter->can_mask & QEMU_CAN_EFF_MASK; + if (!(amr[3] & 4)) { + filter->can_mask |= QEMU_CAN_RTR_FLAG; + } + } else { + filter->can_id = (uint32_t)acr[0] << 3; + filter->can_id |= (uint32_t)acr[1] >> 5; + if (acr[1] & 0x10) { + filter->can_id |= QEMU_CAN_RTR_FLAG; + } + + filter->can_mask = (uint32_t)amr[0] << 3; + filter->can_mask |= (uint32_t)amr[1] << 5; + filter->can_mask = ~filter->can_mask & QEMU_CAN_SFF_MASK; + if (!(amr[1] & 0x10)) { + filter->can_mask |= QEMU_CAN_RTR_FLAG; + } + } +} + +static +void can_sja_dual_filter(struct qemu_can_filter *filter, + const uint8_t *acr, const uint8_t *amr, int extended) +{ + if (extended) { + filter->can_id = (uint32_t)acr[0] << 21; + filter->can_id |= (uint32_t)acr[1] << 13; + + filter->can_mask = (uint32_t)amr[0] << 21; + filter->can_mask |= (uint32_t)amr[1] << 13; + filter->can_mask = ~filter->can_mask & QEMU_CAN_EFF_MASK & ~0x1fff; + } else { + filter->can_id = (uint32_t)acr[0] << 3; + filter->can_id |= (uint32_t)acr[1] >> 5; + if (acr[1] & 0x10) { + filter->can_id |= QEMU_CAN_RTR_FLAG; + } + + filter->can_mask = (uint32_t)amr[0] << 3; + filter->can_mask |= (uint32_t)amr[1] >> 5; + filter->can_mask = ~filter->can_mask & QEMU_CAN_SFF_MASK; + if (!(amr[1] & 0x10)) { + filter->can_mask |= QEMU_CAN_RTR_FLAG; + } + } +} + +/* Details in DS-p22, what we need to do here is to test the data. */ +static +int can_sja_accept_filter(CanSJA1000State *s, + const qemu_can_frame *frame) +{ + + struct qemu_can_filter filter; + + if (s->clock & 0x80) { /* PeliCAN Mode */ + if (s->mode & (1 << 3)) { /* Single mode. */ + if (frame->can_id & QEMU_CAN_EFF_FLAG) { /* EFF */ + can_sja_single_filter(&filter, + s->code_mask + 0, s->code_mask + 4, 1); + + if (!can_bus_filter_match(&filter, frame->can_id)) { + return 0; + } + } else { /* SFF */ + can_sja_single_filter(&filter, + s->code_mask + 0, s->code_mask + 4, 0); + + if (!can_bus_filter_match(&filter, frame->can_id)) { + return 0; + } + + if (frame->can_id & QEMU_CAN_RTR_FLAG) { /* RTR */ + return 1; + } + + if (frame->can_dlc == 0) { + return 1; + } + + if ((frame->data[0] & ~(s->code_mask[6])) != + (s->code_mask[2] & ~(s->code_mask[6]))) { + return 0; + } + + if (frame->can_dlc < 2) { + return 1; + } + + if ((frame->data[1] & ~(s->code_mask[7])) == + (s->code_mask[3] & ~(s->code_mask[7]))) { + return 1; + } + + return 0; + } + } else { /* Dual mode */ + if (frame->can_id & QEMU_CAN_EFF_FLAG) { /* EFF */ + can_sja_dual_filter(&filter, + s->code_mask + 0, s->code_mask + 4, 1); + + if (can_bus_filter_match(&filter, frame->can_id)) { + return 1; + } + + can_sja_dual_filter(&filter, + s->code_mask + 2, s->code_mask + 6, 1); + + if (can_bus_filter_match(&filter, frame->can_id)) { + return 1; + } + + return 0; + } else { + can_sja_dual_filter(&filter, + s->code_mask + 0, s->code_mask + 4, 0); + + if (can_bus_filter_match(&filter, frame->can_id)) { + uint8_t expect; + uint8_t mask; + expect = s->code_mask[1] << 4; + expect |= s->code_mask[3] & 0x0f; + + mask = s->code_mask[5] << 4; + mask |= s->code_mask[7] & 0x0f; + mask = ~mask & 0xff; + + if ((frame->data[0] & mask) == + (expect & mask)) { + return 1; + } + } + + can_sja_dual_filter(&filter, + s->code_mask + 2, s->code_mask + 6, 0); + + if (can_bus_filter_match(&filter, frame->can_id)) { + return 1; + } + + return 0; + } + } + } + + return 1; +} + +static void can_display_msg(const char *prefix, const qemu_can_frame *msg) +{ + int i; + FILE *logfile = qemu_log_lock(); + + qemu_log("%s%03X [%01d] %s %s", + prefix, + msg->can_id & QEMU_CAN_EFF_MASK, + msg->can_dlc, + msg->can_id & QEMU_CAN_EFF_FLAG ? "EFF" : "SFF", + msg->can_id & QEMU_CAN_RTR_FLAG ? "RTR" : "DAT"); + + for (i = 0; i < msg->can_dlc; i++) { + qemu_log(" %02X", msg->data[i]); + } + qemu_log("\n"); + qemu_log_flush(); + qemu_log_unlock(logfile); +} + +static void buff2frame_pel(const uint8_t *buff, qemu_can_frame *frame) +{ + uint8_t i; + + frame->flags = 0; + frame->can_id = 0; + if (buff[0] & 0x40) { /* RTR */ + frame->can_id = QEMU_CAN_RTR_FLAG; + } + frame->can_dlc = buff[0] & 0x0f; + + if (frame->can_dlc > 8) { + frame->can_dlc = 8; + } + + if (buff[0] & 0x80) { /* Extended */ + frame->can_id |= QEMU_CAN_EFF_FLAG; + frame->can_id |= buff[1] << 21; /* ID.28~ID.21 */ + frame->can_id |= buff[2] << 13; /* ID.20~ID.13 */ + frame->can_id |= buff[3] << 5; + frame->can_id |= buff[4] >> 3; + for (i = 0; i < frame->can_dlc; i++) { + frame->data[i] = buff[5 + i]; + } + for (; i < 8; i++) { + frame->data[i] = 0; + } + } else { + frame->can_id |= buff[1] << 3; + frame->can_id |= buff[2] >> 5; + for (i = 0; i < frame->can_dlc; i++) { + frame->data[i] = buff[3 + i]; + } + for (; i < 8; i++) { + frame->data[i] = 0; + } + } +} + + +static void buff2frame_bas(const uint8_t *buff, qemu_can_frame *frame) +{ + uint8_t i; + + frame->flags = 0; + frame->can_id = ((buff[0] << 3) & (0xff << 3)) + ((buff[1] >> 5) & 0x07); + if (buff[1] & 0x10) { /* RTR */ + frame->can_id = QEMU_CAN_RTR_FLAG; + } + frame->can_dlc = buff[1] & 0x0f; + + if (frame->can_dlc > 8) { + frame->can_dlc = 8; + } + + for (i = 0; i < frame->can_dlc; i++) { + frame->data[i] = buff[2 + i]; + } + for (; i < 8; i++) { + frame->data[i] = 0; + } +} + + +static int frame2buff_pel(const qemu_can_frame *frame, uint8_t *buff) +{ + int i; + int dlen = frame->can_dlc; + + if (frame->can_id & QEMU_CAN_ERR_FLAG) { /* error frame, NOT support now. */ + return -1; + } + + if (dlen > 8) { + return -1; + } + + buff[0] = 0x0f & frame->can_dlc; /* DLC */ + if (frame->can_id & QEMU_CAN_RTR_FLAG) { /* RTR */ + buff[0] |= (1 << 6); + } + if (frame->can_id & QEMU_CAN_EFF_FLAG) { /* EFF */ + buff[0] |= (1 << 7); + buff[1] = extract32(frame->can_id, 21, 8); /* ID.28~ID.21 */ + buff[2] = extract32(frame->can_id, 13, 8); /* ID.20~ID.13 */ + buff[3] = extract32(frame->can_id, 5, 8); /* ID.12~ID.05 */ + buff[4] = extract32(frame->can_id, 0, 5) << 3; /* ID.04~ID.00,xxx */ + for (i = 0; i < dlen; i++) { + buff[5 + i] = frame->data[i]; + } + return dlen + 5; + } else { /* SFF */ + buff[1] = extract32(frame->can_id, 3, 8); /* ID.10~ID.03 */ + buff[2] = extract32(frame->can_id, 0, 3) << 5; /* ID.02~ID.00,xxxxx */ + for (i = 0; i < dlen; i++) { + buff[3 + i] = frame->data[i]; + } + + return dlen + 3; + } + + return -1; +} + +static int frame2buff_bas(const qemu_can_frame *frame, uint8_t *buff) +{ + int i; + int dlen = frame->can_dlc; + + /* + * EFF, no support for BasicMode + * No use for Error frames now, + * they could be used in future to update SJA1000 error state + */ + if ((frame->can_id & QEMU_CAN_EFF_FLAG) || + (frame->can_id & QEMU_CAN_ERR_FLAG)) { + return -1; + } + + if (dlen > 8) { + return -1; + } + + buff[0] = extract32(frame->can_id, 3, 8); /* ID.10~ID.03 */ + buff[1] = extract32(frame->can_id, 0, 3) << 5; /* ID.02~ID.00,xxxxx */ + if (frame->can_id & QEMU_CAN_RTR_FLAG) { /* RTR */ + buff[1] |= (1 << 4); + } + buff[1] |= frame->can_dlc & 0x0f; + for (i = 0; i < dlen; i++) { + buff[2 + i] = frame->data[i]; + } + + return dlen + 2; +} + +static void can_sja_update_pel_irq(CanSJA1000State *s) +{ + if (s->interrupt_en & s->interrupt_pel) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static void can_sja_update_bas_irq(CanSJA1000State *s) +{ + if ((s->control >> 1) & s->interrupt_bas) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +void can_sja_mem_write(CanSJA1000State *s, hwaddr addr, uint64_t val, + unsigned size) +{ + qemu_can_frame frame; + uint32_t tmp; + uint8_t tmp8, count; + + + DPRINTF("write 0x%02llx addr 0x%02x\n", + (unsigned long long)val, (unsigned int)addr); + + if (addr > CAN_SJA_MEM_SIZE) { + return ; + } + + if (s->clock & 0x80) { /* PeliCAN Mode */ + switch (addr) { + case SJA_MOD: /* Mode register */ + s->mode = 0x1f & val; + if ((s->mode & 0x01) && ((val & 0x01) == 0)) { + /* Go to operation mode from reset mode. */ + if (s->mode & (1 << 3)) { /* Single mode. */ + /* For EFF */ + can_sja_single_filter(&s->filter[0], + s->code_mask + 0, s->code_mask + 4, 1); + + /* For SFF */ + can_sja_single_filter(&s->filter[1], + s->code_mask + 0, s->code_mask + 4, 0); + + can_bus_client_set_filters(&s->bus_client, s->filter, 2); + } else { /* Dual mode */ + /* For EFF */ + can_sja_dual_filter(&s->filter[0], + s->code_mask + 0, s->code_mask + 4, 1); + + can_sja_dual_filter(&s->filter[1], + s->code_mask + 2, s->code_mask + 6, 1); + + /* For SFF */ + can_sja_dual_filter(&s->filter[2], + s->code_mask + 0, s->code_mask + 4, 0); + + can_sja_dual_filter(&s->filter[3], + s->code_mask + 2, s->code_mask + 6, 0); + + can_bus_client_set_filters(&s->bus_client, s->filter, 4); + } + + s->rxmsg_cnt = 0; + s->rx_cnt = 0; + } + break; + + case SJA_CMR: /* Command register. */ + if (0x01 & val) { /* Send transmission request. */ + buff2frame_pel(s->tx_buff, &frame); + if (DEBUG_FILTER) { + can_display_msg("[cansja]: Tx request " , &frame); + } + + /* + * Clear transmission complete status, + * and Transmit Buffer Status. + * write to the backends. + */ + s->status_pel &= ~(3 << 2); + + can_bus_client_send(&s->bus_client, &frame, 1); + + /* + * Set transmission complete status + * and Transmit Buffer Status. + */ + s->status_pel |= (3 << 2); + + /* Clear transmit status. */ + s->status_pel &= ~(1 << 5); + s->interrupt_pel |= 0x02; + can_sja_update_pel_irq(s); + } + if (0x04 & val) { /* Release Receive Buffer */ + if (s->rxmsg_cnt <= 0) { + break; + } + + tmp8 = s->rx_buff[s->rxbuf_start]; count = 0; + if (tmp8 & (1 << 7)) { /* EFF */ + count += 2; + } + count += 3; + if (!(tmp8 & (1 << 6))) { /* DATA */ + count += (tmp8 & 0x0f); + } + + if (DEBUG_FILTER) { + qemu_log("[cansja]: message released from " + "Rx FIFO cnt=%d, count=%d\n", s->rx_cnt, count); + } + + s->rxbuf_start += count; + s->rxbuf_start %= SJA_RCV_BUF_LEN; + + s->rx_cnt -= count; + s->rxmsg_cnt--; + if (s->rxmsg_cnt == 0) { + s->status_pel &= ~(1 << 0); + s->interrupt_pel &= ~(1 << 0); + can_sja_update_pel_irq(s); + } + } + if (0x08 & val) { /* Clear data overrun */ + s->status_pel &= ~(1 << 1); + s->interrupt_pel &= ~(1 << 3); + can_sja_update_pel_irq(s); + } + break; + case SJA_SR: /* Status register */ + case SJA_IR: /* Interrupt register */ + break; /* Do nothing */ + case SJA_IER: /* Interrupt enable register */ + s->interrupt_en = val; + break; + case 16: /* RX frame information addr16-28. */ + s->status_pel |= (1 << 5); /* Set transmit status. */ + /* fallthrough */ + case 17 ... 28: + if (s->mode & 0x01) { /* Reset mode */ + if (addr < 24) { + s->code_mask[addr - 16] = val; + } + } else { /* Operation mode */ + s->tx_buff[addr - 16] = val; /* Store to TX buffer directly. */ + } + break; + case SJA_CDR: + s->clock = val; + break; + } + } else { /* Basic Mode */ + switch (addr) { + case SJA_BCAN_CTR: /* Control register, addr 0 */ + if ((s->control & 0x01) && ((val & 0x01) == 0)) { + /* Go to operation mode from reset mode. */ + s->filter[0].can_id = (s->code << 3) & (0xff << 3); + tmp = (~(s->mask << 3)) & (0xff << 3); + tmp |= QEMU_CAN_EFF_FLAG; /* Only Basic CAN Frame. */ + s->filter[0].can_mask = tmp; + can_bus_client_set_filters(&s->bus_client, s->filter, 1); + + s->rxmsg_cnt = 0; + s->rx_cnt = 0; + } else if (!(s->control & 0x01) && !(val & 0x01)) { + can_sja_software_reset(s); + } + + s->control = 0x1f & val; + break; + case SJA_BCAN_CMR: /* Command register, addr 1 */ + if (0x01 & val) { /* Send transmission request. */ + buff2frame_bas(s->tx_buff, &frame); + if (DEBUG_FILTER) { + can_display_msg("[cansja]: Tx request " , &frame); + } + + /* + * Clear transmission complete status, + * and Transmit Buffer Status. + */ + s->status_bas &= ~(3 << 2); + + /* write to the backends. */ + can_bus_client_send(&s->bus_client, &frame, 1); + + /* + * Set transmission complete status, + * and Transmit Buffer Status. + */ + s->status_bas |= (3 << 2); + + /* Clear transmit status. */ + s->status_bas &= ~(1 << 5); + s->interrupt_bas |= 0x02; + can_sja_update_bas_irq(s); + } + if (0x04 & val) { /* Release Receive Buffer */ + if (s->rxmsg_cnt <= 0) { + break; + } + + tmp8 = s->rx_buff[(s->rxbuf_start + 1) % SJA_RCV_BUF_LEN]; + count = 2 + (tmp8 & 0x0f); + + if (DEBUG_FILTER) { + qemu_log("[cansja]: message released from " + "Rx FIFO cnt=%d, count=%d\n", s->rx_cnt, count); + } + + s->rxbuf_start += count; + s->rxbuf_start %= SJA_RCV_BUF_LEN; + s->rx_cnt -= count; + s->rxmsg_cnt--; + + if (s->rxmsg_cnt == 0) { + s->status_bas &= ~(1 << 0); + s->interrupt_bas &= ~(1 << 0); + can_sja_update_bas_irq(s); + } + } + if (0x08 & val) { /* Clear data overrun */ + s->status_bas &= ~(1 << 1); + s->interrupt_bas &= ~(1 << 3); + can_sja_update_bas_irq(s); + } + break; + case 4: + s->code = val; + break; + case 5: + s->mask = val; + break; + case 10: + s->status_bas |= (1 << 5); /* Set transmit status. */ + /* fallthrough */ + case 11 ... 19: + if ((s->control & 0x01) == 0) { /* Operation mode */ + s->tx_buff[addr - 10] = val; /* Store to TX buffer directly. */ + } + break; + case SJA_CDR: + s->clock = val; + break; + } + } +} + +uint64_t can_sja_mem_read(CanSJA1000State *s, hwaddr addr, unsigned size) +{ + uint64_t temp = 0; + + DPRINTF("read addr 0x%02x ...\n", (unsigned int)addr); + + if (addr > CAN_SJA_MEM_SIZE) { + return 0; + } + + if (s->clock & 0x80) { /* PeliCAN Mode */ + switch (addr) { + case SJA_MOD: /* Mode register, addr 0 */ + temp = s->mode; + break; + case SJA_CMR: /* Command register, addr 1 */ + temp = 0x00; /* Command register, cannot be read. */ + break; + case SJA_SR: /* Status register, addr 2 */ + temp = s->status_pel; + break; + case SJA_IR: /* Interrupt register, addr 3 */ + temp = s->interrupt_pel; + s->interrupt_pel = 0; + if (s->rxmsg_cnt) { + s->interrupt_pel |= (1 << 0); /* Receive interrupt. */ + } + can_sja_update_pel_irq(s); + break; + case SJA_IER: /* Interrupt enable register, addr 4 */ + temp = s->interrupt_en; + break; + case 5: /* Reserved */ + case 6: /* Bus timing 0, hardware related, not support now. */ + case 7: /* Bus timing 1, hardware related, not support now. */ + case 8: /* + * Output control register, hardware related, + * not supported for now. + */ + case 9: /* Test. */ + case 10 ... 15: /* Reserved */ + temp = 0x00; + break; + + case 16 ... 28: + if (s->mode & 0x01) { /* Reset mode */ + if (addr < 24) { + temp = s->code_mask[addr - 16]; + } else { + temp = 0x00; + } + } else { /* Operation mode */ + temp = s->rx_buff[(s->rxbuf_start + addr - 16) % + SJA_RCV_BUF_LEN]; + } + break; + case SJA_CDR: + temp = s->clock; + break; + default: + temp = 0xff; + } + } else { /* Basic Mode */ + switch (addr) { + case SJA_BCAN_CTR: /* Control register, addr 0 */ + temp = s->control; + break; + case SJA_BCAN_SR: /* Status register, addr 2 */ + temp = s->status_bas; + break; + case SJA_BCAN_IR: /* Interrupt register, addr 3 */ + temp = s->interrupt_bas; + s->interrupt_bas = 0; + if (s->rxmsg_cnt) { + s->interrupt_bas |= (1 << 0); /* Receive interrupt. */ + } + can_sja_update_bas_irq(s); + break; + case 4: + temp = s->code; + break; + case 5: + temp = s->mask; + break; + case 20 ... 29: + temp = s->rx_buff[(s->rxbuf_start + addr - 20) % SJA_RCV_BUF_LEN]; + break; + case 31: + temp = s->clock; + break; + default: + temp = 0xff; + break; + } + } + DPRINTF("read addr 0x%02x, %d bytes, content 0x%02lx\n", + (int)addr, size, (long unsigned int)temp); + + return temp; +} + +bool can_sja_can_receive(CanBusClientState *client) +{ + CanSJA1000State *s = container_of(client, CanSJA1000State, bus_client); + + if (s->clock & 0x80) { /* PeliCAN Mode */ + if (s->mode & 0x01) { /* reset mode. */ + return false; + } + } else { /* BasicCAN mode */ + if (s->control & 0x01) { + return false; + } + } + + return true; /* always return true, when operation mode */ +} + +ssize_t can_sja_receive(CanBusClientState *client, const qemu_can_frame *frames, + size_t frames_cnt) +{ + CanSJA1000State *s = container_of(client, CanSJA1000State, bus_client); + static uint8_t rcv[SJA_MSG_MAX_LEN]; + int i; + int ret = -1; + const qemu_can_frame *frame = frames; + + if (frames_cnt <= 0) { + return 0; + } + if (frame->flags & QEMU_CAN_FRMF_TYPE_FD) { + if (DEBUG_FILTER) { + can_display_msg("[cansja]: ignor fd frame ", frame); + } + return 1; + } + + if (DEBUG_FILTER) { + can_display_msg("[cansja]: receive ", frame); + } + + if (s->clock & 0x80) { /* PeliCAN Mode */ + + /* the CAN controller is receiving a message */ + s->status_pel |= (1 << 4); + + if (can_sja_accept_filter(s, frame) == 0) { + s->status_pel &= ~(1 << 4); + if (DEBUG_FILTER) { + qemu_log("[cansja]: filter rejects message\n"); + } + return ret; + } + + ret = frame2buff_pel(frame, rcv); + if (ret < 0) { + s->status_pel &= ~(1 << 4); + if (DEBUG_FILTER) { + qemu_log("[cansja]: message store failed\n"); + } + return ret; /* maybe not support now. */ + } + + if (s->rx_cnt + ret > SJA_RCV_BUF_LEN) { /* Data overrun. */ + s->status_pel |= (1 << 1); /* Overrun status */ + s->interrupt_pel |= (1 << 3); + s->status_pel &= ~(1 << 4); + if (DEBUG_FILTER) { + qemu_log("[cansja]: receive FIFO overrun\n"); + } + can_sja_update_pel_irq(s); + return ret; + } + s->rx_cnt += ret; + s->rxmsg_cnt++; + if (DEBUG_FILTER) { + qemu_log("[cansja]: message stored in receive FIFO\n"); + } + + for (i = 0; i < ret; i++) { + s->rx_buff[(s->rx_ptr++) % SJA_RCV_BUF_LEN] = rcv[i]; + } + s->rx_ptr %= SJA_RCV_BUF_LEN; /* update the pointer. */ + + s->status_pel |= 0x01; /* Set the Receive Buffer Status. DS-p23 */ + s->interrupt_pel |= 0x01; + s->status_pel &= ~(1 << 4); + s->status_pel |= (1 << 0); + can_sja_update_pel_irq(s); + } else { /* BasicCAN mode */ + + /* the CAN controller is receiving a message */ + s->status_bas |= (1 << 4); + + ret = frame2buff_bas(frame, rcv); + if (ret < 0) { + s->status_bas &= ~(1 << 4); + if (DEBUG_FILTER) { + qemu_log("[cansja]: message store failed\n"); + } + return ret; /* maybe not support now. */ + } + + if (s->rx_cnt + ret > SJA_RCV_BUF_LEN) { /* Data overrun. */ + s->status_bas |= (1 << 1); /* Overrun status */ + s->status_bas &= ~(1 << 4); + s->interrupt_bas |= (1 << 3); + can_sja_update_bas_irq(s); + if (DEBUG_FILTER) { + qemu_log("[cansja]: receive FIFO overrun\n"); + } + return ret; + } + s->rx_cnt += ret; + s->rxmsg_cnt++; + + if (DEBUG_FILTER) { + qemu_log("[cansja]: message stored\n"); + } + + for (i = 0; i < ret; i++) { + s->rx_buff[(s->rx_ptr++) % SJA_RCV_BUF_LEN] = rcv[i]; + } + s->rx_ptr %= SJA_RCV_BUF_LEN; /* update the pointer. */ + + s->status_bas |= 0x01; /* Set the Receive Buffer Status. DS-p15 */ + s->status_bas &= ~(1 << 4); + s->interrupt_bas |= (1 << 0); + can_sja_update_bas_irq(s); + } + return 1; +} + +static CanBusClientInfo can_sja_bus_client_info = { + .can_receive = can_sja_can_receive, + .receive = can_sja_receive, +}; + + +int can_sja_connect_to_bus(CanSJA1000State *s, CanBusState *bus) +{ + s->bus_client.info = &can_sja_bus_client_info; + + if (!bus) { + return -EINVAL; + } + + if (can_bus_insert_client(bus, &s->bus_client) < 0) { + return -1; + } + + return 0; +} + +void can_sja_disconnect(CanSJA1000State *s) +{ + can_bus_remove_client(&s->bus_client); +} + +int can_sja_init(CanSJA1000State *s, qemu_irq irq) +{ + s->irq = irq; + + qemu_irq_lower(s->irq); + + can_sja_hardware_reset(s); + + return 0; +} + +const VMStateDescription vmstate_qemu_can_filter = { + .name = "qemu_can_filter", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(can_id, qemu_can_filter), + VMSTATE_UINT32(can_mask, qemu_can_filter), + VMSTATE_END_OF_LIST() + } +}; + +static int can_sja_post_load(void *opaque, int version_id) +{ + CanSJA1000State *s = opaque; + if (s->clock & 0x80) { /* PeliCAN Mode */ + can_sja_update_pel_irq(s); + } else { + can_sja_update_bas_irq(s); + } + return 0; +} + +/* VMState is needed for live migration of QEMU images */ +const VMStateDescription vmstate_can_sja = { + .name = "can_sja", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .post_load = can_sja_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(mode, CanSJA1000State), + + VMSTATE_UINT8(status_pel, CanSJA1000State), + VMSTATE_UINT8(interrupt_pel, CanSJA1000State), + VMSTATE_UINT8(interrupt_en, CanSJA1000State), + VMSTATE_UINT8(rxmsg_cnt, CanSJA1000State), + VMSTATE_UINT8(rxbuf_start, CanSJA1000State), + VMSTATE_UINT8(clock, CanSJA1000State), + + VMSTATE_BUFFER(code_mask, CanSJA1000State), + VMSTATE_BUFFER(tx_buff, CanSJA1000State), + + VMSTATE_BUFFER(rx_buff, CanSJA1000State), + + VMSTATE_UINT32(rx_ptr, CanSJA1000State), + VMSTATE_UINT32(rx_cnt, CanSJA1000State), + + VMSTATE_UINT8(control, CanSJA1000State), + + VMSTATE_UINT8(status_bas, CanSJA1000State), + VMSTATE_UINT8(interrupt_bas, CanSJA1000State), + VMSTATE_UINT8(code, CanSJA1000State), + VMSTATE_UINT8(mask, CanSJA1000State), + + VMSTATE_STRUCT_ARRAY(filter, CanSJA1000State, 4, 0, + vmstate_qemu_can_filter, qemu_can_filter), + + + VMSTATE_END_OF_LIST() + } +}; diff --git a/hw/net/can/can_sja1000.h b/hw/net/can/can_sja1000.h new file mode 100644 index 000000000..7ca9cd681 --- /dev/null +++ b/hw/net/can/can_sja1000.h @@ -0,0 +1,147 @@ +/* + * CAN device - SJA1000 chip emulation for QEMU + * + * Copyright (c) 2013-2014 Jin Yang + * Copyright (c) 2014-2018 Pavel Pisa + * + * Initial development supported by Google GSoC 2013 from RTEMS project slot + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef HW_CAN_SJA1000_H +#define HW_CAN_SJA1000_H + +#include "exec/hwaddr.h" +#include "net/can_emu.h" + +#define CAN_SJA_MEM_SIZE 128 + +/* The max size for a message buffer, EFF and DLC=8, DS-p39 */ +#define SJA_MSG_MAX_LEN 13 +/* The receive buffer size. */ +#define SJA_RCV_BUF_LEN 64 + +typedef struct CanSJA1000State { + /* PeliCAN state and registers sorted by address */ + uint8_t mode; /* 0 .. Mode register, DS-p26 */ + /* 1 .. Command register */ + uint8_t status_pel; /* 2 .. Status register, p15 */ + uint8_t interrupt_pel; /* 3 .. Interrupt register */ + uint8_t interrupt_en; /* 4 .. Interrupt Enable register */ + uint8_t rxmsg_cnt; /* 29 .. RX message counter. DS-p49 */ + uint8_t rxbuf_start; /* 30 .. RX buffer start address, DS-p49 */ + uint8_t clock; /* 31 .. Clock Divider register, DS-p55 */ + + uint8_t code_mask[8]; /* 16~23 */ + uint8_t tx_buff[13]; /* 96~108 .. transmit buffer */ + /* 10~19 .. transmit buffer for BasicCAN */ + + uint8_t rx_buff[SJA_RCV_BUF_LEN]; /* 32~95 .. 64bytes Rx FIFO */ + uint32_t rx_ptr; /* Count by bytes. */ + uint32_t rx_cnt; /* Count by bytes. */ + + /* PeliCAN state and registers sorted by address */ + uint8_t control; /* 0 .. Control register */ + /* 1 .. Command register */ + uint8_t status_bas; /* 2 .. Status register */ + uint8_t interrupt_bas; /* 3 .. Interrupt register */ + uint8_t code; /* 4 .. Acceptance code register */ + uint8_t mask; /* 5 .. Acceptance mask register */ + + qemu_can_filter filter[4]; + + qemu_irq irq; + CanBusClientState bus_client; +} CanSJA1000State; + +/* PeliCAN mode */ +enum SJA1000_PeliCAN_regs { + SJA_MOD = 0x00, /* Mode control register */ + SJA_CMR = 0x01, /* Command register */ + SJA_SR = 0x02, /* Status register */ + SJA_IR = 0x03, /* Interrupt register */ + SJA_IER = 0x04, /* Interrupt Enable */ + SJA_BTR0 = 0x06, /* Bus Timing register 0 */ + SJA_BTR1 = 0x07, /* Bus Timing register 1 */ + SJA_OCR = 0x08, /* Output Control register */ + SJA_ALC = 0x0b, /* Arbitration Lost Capture */ + SJA_ECC = 0x0c, /* Error Code Capture */ + SJA_EWLR = 0x0d, /* Error Warning Limit */ + SJA_RXERR = 0x0e, /* RX Error Counter */ + SJA_TXERR0 = 0x0e, /* TX Error Counter */ + SJA_TXERR1 = 0x0f, + SJA_RMC = 0x1d, /* Rx Message Counter + * number of messages in RX FIFO + */ + SJA_RBSA = 0x1e, /* Rx Buffer Start Addr + * address of current message + */ + SJA_FRM = 0x10, /* Transmit Buffer + * write: Receive Buffer + * read: Frame Information + */ +/* + * ID bytes (11 bits in 0 and 1 for standard message or + * 16 bits in 0,1 and 13 bits in 2,3 for extended message) + * The most significant bit of ID is placed in MSB + * position of ID0 register. + */ + SJA_ID0 = 0x11, /* ID for standard and extended frames */ + SJA_ID1 = 0x12, + SJA_ID2 = 0x13, /* ID cont. for extended frames */ + SJA_ID3 = 0x14, + + SJA_DATS = 0x13, /* Data start standard frame */ + SJA_DATE = 0x15, /* Data start extended frame */ + SJA_ACR0 = 0x10, /* Acceptance Code (4 bytes) in RESET mode */ + SJA_AMR0 = 0x14, /* Acceptance Mask (4 bytes) in RESET mode */ + SJA_PeliCAN_AC_LEN = 4, /* 4 bytes */ + SJA_CDR = 0x1f /* Clock Divider */ +}; + + +/* BasicCAN mode */ +enum SJA1000_BasicCAN_regs { + SJA_BCAN_CTR = 0x00, /* Control register */ + SJA_BCAN_CMR = 0x01, /* Command register */ + SJA_BCAN_SR = 0x02, /* Status register */ + SJA_BCAN_IR = 0x03 /* Interrupt register */ +}; + +void can_sja_hardware_reset(CanSJA1000State *s); + +void can_sja_mem_write(CanSJA1000State *s, hwaddr addr, uint64_t val, + unsigned size); + +uint64_t can_sja_mem_read(CanSJA1000State *s, hwaddr addr, unsigned size); + +int can_sja_connect_to_bus(CanSJA1000State *s, CanBusState *bus); + +void can_sja_disconnect(CanSJA1000State *s); + +int can_sja_init(CanSJA1000State *s, qemu_irq irq); + +bool can_sja_can_receive(CanBusClientState *client); + +ssize_t can_sja_receive(CanBusClientState *client, + const qemu_can_frame *frames, size_t frames_cnt); + +extern const VMStateDescription vmstate_can_sja; + +#endif diff --git a/hw/net/can/ctu_can_fd_frame.h b/hw/net/can/ctu_can_fd_frame.h new file mode 100644 index 000000000..04d956c84 --- /dev/null +++ b/hw/net/can/ctu_can_fd_frame.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille FEE CTU + * Copyright (C) 2018-2020 Ondrej Ille self-funded + * Copyright (C) 2018-2019 Martin Jerabek FEE CTU + * Copyright (C) 2018-2020 Pavel Pisa FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak + * Pavel Pisa + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + ******************************************************************************/ + +/* This file is autogenerated, DO NOT EDIT! */ + +#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__ +#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__ + +/* CAN_Frame_format memory map */ +enum ctu_can_fd_can_frame_format { + CTU_CAN_FD_FRAME_FORM_W = 0x0, + CTU_CAN_FD_IDENTIFIER_W = 0x4, + CTU_CAN_FD_TIMESTAMP_L_W = 0x8, + CTU_CAN_FD_TIMESTAMP_U_W = 0xc, + CTU_CAN_FD_DATA_1_4_W = 0x10, + CTU_CAN_FD_DATA_5_8_W = 0x14, + CTU_CAN_FD_DATA_61_64_W = 0x4c, +}; + + +/* Register descriptions: */ +union ctu_can_fd_frame_form_w { + uint32_t u32; + struct ctu_can_fd_frame_form_w_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* FRAME_FORM_W */ + uint32_t dlc : 4; + uint32_t reserved_4 : 1; + uint32_t rtr : 1; + uint32_t ide : 1; + uint32_t fdf : 1; + uint32_t reserved_8 : 1; + uint32_t brs : 1; + uint32_t esi_rsv : 1; + uint32_t rwcnt : 5; + uint32_t reserved_31_16 : 16; +#else + uint32_t reserved_31_16 : 16; + uint32_t rwcnt : 5; + uint32_t esi_rsv : 1; + uint32_t brs : 1; + uint32_t reserved_8 : 1; + uint32_t fdf : 1; + uint32_t ide : 1; + uint32_t rtr : 1; + uint32_t reserved_4 : 1; + uint32_t dlc : 4; +#endif + } s; +}; + +enum ctu_can_fd_frame_form_w_rtr { + NO_RTR_FRAME = 0x0, + RTR_FRAME = 0x1, +}; + +enum ctu_can_fd_frame_form_w_ide { + BASE = 0x0, + EXTENDED = 0x1, +}; + +enum ctu_can_fd_frame_form_w_fdf { + NORMAL_CAN = 0x0, + FD_CAN = 0x1, +}; + +enum ctu_can_fd_frame_form_w_brs { + BR_NO_SHIFT = 0x0, + BR_SHIFT = 0x1, +}; + +enum ctu_can_fd_frame_form_w_esi_rsv { + ESI_ERR_ACTIVE = 0x0, + ESI_ERR_PASIVE = 0x1, +}; + +union ctu_can_fd_identifier_w { + uint32_t u32; + struct ctu_can_fd_identifier_w_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* IDENTIFIER_W */ + uint32_t identifier_ext : 18; + uint32_t identifier_base : 11; + uint32_t reserved_31_29 : 3; +#else + uint32_t reserved_31_29 : 3; + uint32_t identifier_base : 11; + uint32_t identifier_ext : 18; +#endif + } s; +}; + +union ctu_can_fd_timestamp_l_w { + uint32_t u32; + struct ctu_can_fd_timestamp_l_w_s { + /* TIMESTAMP_L_W */ + uint32_t time_stamp_31_0 : 32; + } s; +}; + +union ctu_can_fd_timestamp_u_w { + uint32_t u32; + struct ctu_can_fd_timestamp_u_w_s { + /* TIMESTAMP_U_W */ + uint32_t timestamp_l_w : 32; + } s; +}; + +union ctu_can_fd_data_1_4_w { + uint32_t u32; + struct ctu_can_fd_data_1_4_w_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* DATA_1_4_W */ + uint32_t data_1 : 8; + uint32_t data_2 : 8; + uint32_t data_3 : 8; + uint32_t data_4 : 8; +#else + uint32_t data_4 : 8; + uint32_t data_3 : 8; + uint32_t data_2 : 8; + uint32_t data_1 : 8; +#endif + } s; +}; + +union ctu_can_fd_data_5_8_w { + uint32_t u32; + struct ctu_can_fd_data_5_8_w_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* DATA_5_8_W */ + uint32_t data_5 : 8; + uint32_t data_6 : 8; + uint32_t data_7 : 8; + uint32_t data_8 : 8; +#else + uint32_t data_8 : 8; + uint32_t data_7 : 8; + uint32_t data_6 : 8; + uint32_t data_5 : 8; +#endif + } s; +}; + +union ctu_can_fd_data_61_64_w { + uint32_t u32; + struct ctu_can_fd_data_61_64_w_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* DATA_61_64_W */ + uint32_t data_61 : 8; + uint32_t data_62 : 8; + uint32_t data_63 : 8; + uint32_t data_64 : 8; +#else + uint32_t data_64 : 8; + uint32_t data_63 : 8; + uint32_t data_62 : 8; + uint32_t data_61 : 8; +#endif + } s; +}; + +#endif diff --git a/hw/net/can/ctu_can_fd_regs.h b/hw/net/can/ctu_can_fd_regs.h new file mode 100644 index 000000000..450f4b9fb --- /dev/null +++ b/hw/net/can/ctu_can_fd_regs.h @@ -0,0 +1,971 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille FEE CTU + * Copyright (C) 2018-2020 Ondrej Ille self-funded + * Copyright (C) 2018-2019 Martin Jerabek FEE CTU + * Copyright (C) 2018-2020 Pavel Pisa FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak + * Pavel Pisa + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + ******************************************************************************/ + +/* This file is autogenerated, DO NOT EDIT! */ + +#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__ +#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__ + +/* CAN_Registers memory map */ +enum ctu_can_fd_can_registers { + CTU_CAN_FD_DEVICE_ID = 0x0, + CTU_CAN_FD_VERSION = 0x2, + CTU_CAN_FD_MODE = 0x4, + CTU_CAN_FD_SETTINGS = 0x6, + CTU_CAN_FD_STATUS = 0x8, + CTU_CAN_FD_COMMAND = 0xc, + CTU_CAN_FD_INT_STAT = 0x10, + CTU_CAN_FD_INT_ENA_SET = 0x14, + CTU_CAN_FD_INT_ENA_CLR = 0x18, + CTU_CAN_FD_INT_MASK_SET = 0x1c, + CTU_CAN_FD_INT_MASK_CLR = 0x20, + CTU_CAN_FD_BTR = 0x24, + CTU_CAN_FD_BTR_FD = 0x28, + CTU_CAN_FD_EWL = 0x2c, + CTU_CAN_FD_ERP = 0x2d, + CTU_CAN_FD_FAULT_STATE = 0x2e, + CTU_CAN_FD_REC = 0x30, + CTU_CAN_FD_TEC = 0x32, + CTU_CAN_FD_ERR_NORM = 0x34, + CTU_CAN_FD_ERR_FD = 0x36, + CTU_CAN_FD_CTR_PRES = 0x38, + CTU_CAN_FD_FILTER_A_MASK = 0x3c, + CTU_CAN_FD_FILTER_A_VAL = 0x40, + CTU_CAN_FD_FILTER_B_MASK = 0x44, + CTU_CAN_FD_FILTER_B_VAL = 0x48, + CTU_CAN_FD_FILTER_C_MASK = 0x4c, + CTU_CAN_FD_FILTER_C_VAL = 0x50, + CTU_CAN_FD_FILTER_RAN_LOW = 0x54, + CTU_CAN_FD_FILTER_RAN_HIGH = 0x58, + CTU_CAN_FD_FILTER_CONTROL = 0x5c, + CTU_CAN_FD_FILTER_STATUS = 0x5e, + CTU_CAN_FD_RX_MEM_INFO = 0x60, + CTU_CAN_FD_RX_POINTERS = 0x64, + CTU_CAN_FD_RX_STATUS = 0x68, + CTU_CAN_FD_RX_SETTINGS = 0x6a, + CTU_CAN_FD_RX_DATA = 0x6c, + CTU_CAN_FD_TX_STATUS = 0x70, + CTU_CAN_FD_TX_COMMAND = 0x74, + CTU_CAN_FD_TX_PRIORITY = 0x78, + CTU_CAN_FD_ERR_CAPT = 0x7c, + CTU_CAN_FD_ALC = 0x7e, + CTU_CAN_FD_TRV_DELAY = 0x80, + CTU_CAN_FD_SSP_CFG = 0x82, + CTU_CAN_FD_RX_FR_CTR = 0x84, + CTU_CAN_FD_TX_FR_CTR = 0x88, + CTU_CAN_FD_DEBUG_REGISTER = 0x8c, + CTU_CAN_FD_YOLO_REG = 0x90, + CTU_CAN_FD_TIMESTAMP_LOW = 0x94, + CTU_CAN_FD_TIMESTAMP_HIGH = 0x98, + CTU_CAN_FD_TXTB1_DATA_1 = 0x100, + CTU_CAN_FD_TXTB1_DATA_2 = 0x104, + CTU_CAN_FD_TXTB1_DATA_20 = 0x14c, + CTU_CAN_FD_TXTB2_DATA_1 = 0x200, + CTU_CAN_FD_TXTB2_DATA_2 = 0x204, + CTU_CAN_FD_TXTB2_DATA_20 = 0x24c, + CTU_CAN_FD_TXTB3_DATA_1 = 0x300, + CTU_CAN_FD_TXTB3_DATA_2 = 0x304, + CTU_CAN_FD_TXTB3_DATA_20 = 0x34c, + CTU_CAN_FD_TXTB4_DATA_1 = 0x400, + CTU_CAN_FD_TXTB4_DATA_2 = 0x404, + CTU_CAN_FD_TXTB4_DATA_20 = 0x44c, +}; + + +/* Register descriptions: */ +union ctu_can_fd_device_id_version { + uint32_t u32; + struct ctu_can_fd_device_id_version_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* DEVICE_ID */ + uint32_t device_id : 16; + /* VERSION */ + uint32_t ver_minor : 8; + uint32_t ver_major : 8; +#else + uint32_t ver_major : 8; + uint32_t ver_minor : 8; + uint32_t device_id : 16; +#endif + } s; +}; + +enum ctu_can_fd_device_id_device_id { + CTU_CAN_FD_ID = 0xcafd, +}; + +union ctu_can_fd_mode_settings { + uint32_t u32; + struct ctu_can_fd_mode_settings_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* MODE */ + uint32_t rst : 1; + uint32_t lom : 1; + uint32_t stm : 1; + uint32_t afm : 1; + uint32_t fde : 1; + uint32_t reserved_6_5 : 2; + uint32_t acf : 1; + uint32_t tstm : 1; + uint32_t reserved_15_9 : 7; + /* SETTINGS */ + uint32_t rtrle : 1; + uint32_t rtrth : 4; + uint32_t ilbp : 1; + uint32_t ena : 1; + uint32_t nisofd : 1; + uint32_t pex : 1; + uint32_t reserved_31_25 : 7; +#else + uint32_t reserved_31_25 : 7; + uint32_t pex : 1; + uint32_t nisofd : 1; + uint32_t ena : 1; + uint32_t ilbp : 1; + uint32_t rtrth : 4; + uint32_t rtrle : 1; + uint32_t reserved_15_9 : 7; + uint32_t tstm : 1; + uint32_t acf : 1; + uint32_t reserved_6_5 : 2; + uint32_t fde : 1; + uint32_t afm : 1; + uint32_t stm : 1; + uint32_t lom : 1; + uint32_t rst : 1; +#endif + } s; +}; + +enum ctu_can_fd_mode_lom { + LOM_DISABLED = 0x0, + LOM_ENABLED = 0x1, +}; + +enum ctu_can_fd_mode_stm { + STM_DISABLED = 0x0, + STM_ENABLED = 0x1, +}; + +enum ctu_can_fd_mode_afm { + AFM_DISABLED = 0x0, + AFM_ENABLED = 0x1, +}; + +enum ctu_can_fd_mode_fde { + FDE_DISABLE = 0x0, + FDE_ENABLE = 0x1, +}; + +enum ctu_can_fd_mode_acf { + ACF_DISABLED = 0x0, + ACF_ENABLED = 0x1, +}; + +enum ctu_can_fd_settings_rtrle { + RTRLE_DISABLED = 0x0, + RTRLE_ENABLED = 0x1, +}; + +enum ctu_can_fd_settings_ilbp { + INT_LOOP_DISABLED = 0x0, + INT_LOOP_ENABLED = 0x1, +}; + +enum ctu_can_fd_settings_ena { + CTU_CAN_DISABLED = 0x0, + CTU_CAN_ENABLED = 0x1, +}; + +enum ctu_can_fd_settings_nisofd { + ISO_FD = 0x0, + NON_ISO_FD = 0x1, +}; + +enum ctu_can_fd_settings_pex { + PROTOCOL_EXCEPTION_DISABLED = 0x0, + PROTOCOL_EXCEPTION_ENABLED = 0x1, +}; + +union ctu_can_fd_status { + uint32_t u32; + struct ctu_can_fd_status_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* STATUS */ + uint32_t rxne : 1; + uint32_t dor : 1; + uint32_t txnf : 1; + uint32_t eft : 1; + uint32_t rxs : 1; + uint32_t txs : 1; + uint32_t ewl : 1; + uint32_t idle : 1; + uint32_t reserved_31_8 : 24; +#else + uint32_t reserved_31_8 : 24; + uint32_t idle : 1; + uint32_t ewl : 1; + uint32_t txs : 1; + uint32_t rxs : 1; + uint32_t eft : 1; + uint32_t txnf : 1; + uint32_t dor : 1; + uint32_t rxne : 1; +#endif + } s; +}; + +union ctu_can_fd_command { + uint32_t u32; + struct ctu_can_fd_command_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + uint32_t reserved_1_0 : 2; + /* COMMAND */ + uint32_t rrb : 1; + uint32_t cdo : 1; + uint32_t ercrst : 1; + uint32_t rxfcrst : 1; + uint32_t txfcrst : 1; + uint32_t reserved_31_7 : 25; +#else + uint32_t reserved_31_7 : 25; + uint32_t txfcrst : 1; + uint32_t rxfcrst : 1; + uint32_t ercrst : 1; + uint32_t cdo : 1; + uint32_t rrb : 1; + uint32_t reserved_1_0 : 2; +#endif + } s; +}; + +union ctu_can_fd_int_stat { + uint32_t u32; + struct ctu_can_fd_int_stat_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* INT_STAT */ + uint32_t rxi : 1; + uint32_t txi : 1; + uint32_t ewli : 1; + uint32_t doi : 1; + uint32_t fcsi : 1; + uint32_t ali : 1; + uint32_t bei : 1; + uint32_t ofi : 1; + uint32_t rxfi : 1; + uint32_t bsi : 1; + uint32_t rbnei : 1; + uint32_t txbhci : 1; + uint32_t reserved_31_12 : 20; +#else + uint32_t reserved_31_12 : 20; + uint32_t txbhci : 1; + uint32_t rbnei : 1; + uint32_t bsi : 1; + uint32_t rxfi : 1; + uint32_t ofi : 1; + uint32_t bei : 1; + uint32_t ali : 1; + uint32_t fcsi : 1; + uint32_t doi : 1; + uint32_t ewli : 1; + uint32_t txi : 1; + uint32_t rxi : 1; +#endif + } s; +}; + +union ctu_can_fd_int_ena_set { + uint32_t u32; + struct ctu_can_fd_int_ena_set_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* INT_ENA_SET */ + uint32_t int_ena_set : 12; + uint32_t reserved_31_12 : 20; +#else + uint32_t reserved_31_12 : 20; + uint32_t int_ena_set : 12; +#endif + } s; +}; + +union ctu_can_fd_int_ena_clr { + uint32_t u32; + struct ctu_can_fd_int_ena_clr_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* INT_ENA_CLR */ + uint32_t int_ena_clr : 12; + uint32_t reserved_31_12 : 20; +#else + uint32_t reserved_31_12 : 20; + uint32_t int_ena_clr : 12; +#endif + } s; +}; + +union ctu_can_fd_int_mask_set { + uint32_t u32; + struct ctu_can_fd_int_mask_set_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* INT_MASK_SET */ + uint32_t int_mask_set : 12; + uint32_t reserved_31_12 : 20; +#else + uint32_t reserved_31_12 : 20; + uint32_t int_mask_set : 12; +#endif + } s; +}; + +union ctu_can_fd_int_mask_clr { + uint32_t u32; + struct ctu_can_fd_int_mask_clr_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* INT_MASK_CLR */ + uint32_t int_mask_clr : 12; + uint32_t reserved_31_12 : 20; +#else + uint32_t reserved_31_12 : 20; + uint32_t int_mask_clr : 12; +#endif + } s; +}; + +union ctu_can_fd_btr { + uint32_t u32; + struct ctu_can_fd_btr_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* BTR */ + uint32_t prop : 7; + uint32_t ph1 : 6; + uint32_t ph2 : 6; + uint32_t brp : 8; + uint32_t sjw : 5; +#else + uint32_t sjw : 5; + uint32_t brp : 8; + uint32_t ph2 : 6; + uint32_t ph1 : 6; + uint32_t prop : 7; +#endif + } s; +}; + +union ctu_can_fd_btr_fd { + uint32_t u32; + struct ctu_can_fd_btr_fd_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* BTR_FD */ + uint32_t prop_fd : 6; + uint32_t reserved_6 : 1; + uint32_t ph1_fd : 5; + uint32_t reserved_12 : 1; + uint32_t ph2_fd : 5; + uint32_t reserved_18 : 1; + uint32_t brp_fd : 8; + uint32_t sjw_fd : 5; +#else + uint32_t sjw_fd : 5; + uint32_t brp_fd : 8; + uint32_t reserved_18 : 1; + uint32_t ph2_fd : 5; + uint32_t reserved_12 : 1; + uint32_t ph1_fd : 5; + uint32_t reserved_6 : 1; + uint32_t prop_fd : 6; +#endif + } s; +}; + +union ctu_can_fd_ewl_erp_fault_state { + uint32_t u32; + struct ctu_can_fd_ewl_erp_fault_state_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* EWL */ + uint32_t ew_limit : 8; + /* ERP */ + uint32_t erp_limit : 8; + /* FAULT_STATE */ + uint32_t era : 1; + uint32_t erp : 1; + uint32_t bof : 1; + uint32_t reserved_31_19 : 13; +#else + uint32_t reserved_31_19 : 13; + uint32_t bof : 1; + uint32_t erp : 1; + uint32_t era : 1; + uint32_t erp_limit : 8; + uint32_t ew_limit : 8; +#endif + } s; +}; + +union ctu_can_fd_rec_tec { + uint32_t u32; + struct ctu_can_fd_rec_tec_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* REC */ + uint32_t rec_val : 9; + uint32_t reserved_15_9 : 7; + /* TEC */ + uint32_t tec_val : 9; + uint32_t reserved_31_25 : 7; +#else + uint32_t reserved_31_25 : 7; + uint32_t tec_val : 9; + uint32_t reserved_15_9 : 7; + uint32_t rec_val : 9; +#endif + } s; +}; + +union ctu_can_fd_err_norm_err_fd { + uint32_t u32; + struct ctu_can_fd_err_norm_err_fd_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* ERR_NORM */ + uint32_t err_norm_val : 16; + /* ERR_FD */ + uint32_t err_fd_val : 16; +#else + uint32_t err_fd_val : 16; + uint32_t err_norm_val : 16; +#endif + } s; +}; + +union ctu_can_fd_ctr_pres { + uint32_t u32; + struct ctu_can_fd_ctr_pres_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* CTR_PRES */ + uint32_t ctpv : 9; + uint32_t ptx : 1; + uint32_t prx : 1; + uint32_t enorm : 1; + uint32_t efd : 1; + uint32_t reserved_31_13 : 19; +#else + uint32_t reserved_31_13 : 19; + uint32_t efd : 1; + uint32_t enorm : 1; + uint32_t prx : 1; + uint32_t ptx : 1; + uint32_t ctpv : 9; +#endif + } s; +}; + +union ctu_can_fd_filter_a_mask { + uint32_t u32; + struct ctu_can_fd_filter_a_mask_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* FILTER_A_MASK */ + uint32_t bit_mask_a_val : 29; + uint32_t reserved_31_29 : 3; +#else + uint32_t reserved_31_29 : 3; + uint32_t bit_mask_a_val : 29; +#endif + } s; +}; + +union ctu_can_fd_filter_a_val { + uint32_t u32; + struct ctu_can_fd_filter_a_val_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* FILTER_A_VAL */ + uint32_t bit_val_a_val : 29; + uint32_t reserved_31_29 : 3; +#else + uint32_t reserved_31_29 : 3; + uint32_t bit_val_a_val : 29; +#endif + } s; +}; + +union ctu_can_fd_filter_b_mask { + uint32_t u32; + struct ctu_can_fd_filter_b_mask_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* FILTER_B_MASK */ + uint32_t bit_mask_b_val : 29; + uint32_t reserved_31_29 : 3; +#else + uint32_t reserved_31_29 : 3; + uint32_t bit_mask_b_val : 29; +#endif + } s; +}; + +union ctu_can_fd_filter_b_val { + uint32_t u32; + struct ctu_can_fd_filter_b_val_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* FILTER_B_VAL */ + uint32_t bit_val_b_val : 29; + uint32_t reserved_31_29 : 3; +#else + uint32_t reserved_31_29 : 3; + uint32_t bit_val_b_val : 29; +#endif + } s; +}; + +union ctu_can_fd_filter_c_mask { + uint32_t u32; + struct ctu_can_fd_filter_c_mask_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* FILTER_C_MASK */ + uint32_t bit_mask_c_val : 29; + uint32_t reserved_31_29 : 3; +#else + uint32_t reserved_31_29 : 3; + uint32_t bit_mask_c_val : 29; +#endif + } s; +}; + +union ctu_can_fd_filter_c_val { + uint32_t u32; + struct ctu_can_fd_filter_c_val_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* FILTER_C_VAL */ + uint32_t bit_val_c_val : 29; + uint32_t reserved_31_29 : 3; +#else + uint32_t reserved_31_29 : 3; + uint32_t bit_val_c_val : 29; +#endif + } s; +}; + +union ctu_can_fd_filter_ran_low { + uint32_t u32; + struct ctu_can_fd_filter_ran_low_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* FILTER_RAN_LOW */ + uint32_t bit_ran_low_val : 29; + uint32_t reserved_31_29 : 3; +#else + uint32_t reserved_31_29 : 3; + uint32_t bit_ran_low_val : 29; +#endif + } s; +}; + +union ctu_can_fd_filter_ran_high { + uint32_t u32; + struct ctu_can_fd_filter_ran_high_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* FILTER_RAN_HIGH */ + uint32_t bit_ran_high_val : 29; + uint32_t reserved_31_29 : 3; +#else + uint32_t reserved_31_29 : 3; + uint32_t bit_ran_high_val : 29; +#endif + } s; +}; + +union ctu_can_fd_filter_control_filter_status { + uint32_t u32; + struct ctu_can_fd_filter_control_filter_status_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* FILTER_CONTROL */ + uint32_t fanb : 1; + uint32_t fane : 1; + uint32_t fafb : 1; + uint32_t fafe : 1; + uint32_t fbnb : 1; + uint32_t fbne : 1; + uint32_t fbfb : 1; + uint32_t fbfe : 1; + uint32_t fcnb : 1; + uint32_t fcne : 1; + uint32_t fcfb : 1; + uint32_t fcfe : 1; + uint32_t frnb : 1; + uint32_t frne : 1; + uint32_t frfb : 1; + uint32_t frfe : 1; + /* FILTER_STATUS */ + uint32_t sfa : 1; + uint32_t sfb : 1; + uint32_t sfc : 1; + uint32_t sfr : 1; + uint32_t reserved_31_20 : 12; +#else + uint32_t reserved_31_20 : 12; + uint32_t sfr : 1; + uint32_t sfc : 1; + uint32_t sfb : 1; + uint32_t sfa : 1; + uint32_t frfe : 1; + uint32_t frfb : 1; + uint32_t frne : 1; + uint32_t frnb : 1; + uint32_t fcfe : 1; + uint32_t fcfb : 1; + uint32_t fcne : 1; + uint32_t fcnb : 1; + uint32_t fbfe : 1; + uint32_t fbfb : 1; + uint32_t fbne : 1; + uint32_t fbnb : 1; + uint32_t fafe : 1; + uint32_t fafb : 1; + uint32_t fane : 1; + uint32_t fanb : 1; +#endif + } s; +}; + +union ctu_can_fd_rx_mem_info { + uint32_t u32; + struct ctu_can_fd_rx_mem_info_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* RX_MEM_INFO */ + uint32_t rx_buff_size : 13; + uint32_t reserved_15_13 : 3; + uint32_t rx_mem_free : 13; + uint32_t reserved_31_29 : 3; +#else + uint32_t reserved_31_29 : 3; + uint32_t rx_mem_free : 13; + uint32_t reserved_15_13 : 3; + uint32_t rx_buff_size : 13; +#endif + } s; +}; + +union ctu_can_fd_rx_pointers { + uint32_t u32; + struct ctu_can_fd_rx_pointers_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* RX_POINTERS */ + uint32_t rx_wpp : 12; + uint32_t reserved_15_12 : 4; + uint32_t rx_rpp : 12; + uint32_t reserved_31_28 : 4; +#else + uint32_t reserved_31_28 : 4; + uint32_t rx_rpp : 12; + uint32_t reserved_15_12 : 4; + uint32_t rx_wpp : 12; +#endif + } s; +}; + +union ctu_can_fd_rx_status_rx_settings { + uint32_t u32; + struct ctu_can_fd_rx_status_rx_settings_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* RX_STATUS */ + uint32_t rxe : 1; + uint32_t rxf : 1; + uint32_t reserved_3_2 : 2; + uint32_t rxfrc : 11; + uint32_t reserved_15 : 1; + /* RX_SETTINGS */ + uint32_t rtsop : 1; + uint32_t reserved_31_17 : 15; +#else + uint32_t reserved_31_17 : 15; + uint32_t rtsop : 1; + uint32_t reserved_15 : 1; + uint32_t rxfrc : 11; + uint32_t reserved_3_2 : 2; + uint32_t rxf : 1; + uint32_t rxe : 1; +#endif + } s; +}; + +enum ctu_can_fd_rx_settings_rtsop { + RTS_END = 0x0, + RTS_BEG = 0x1, +}; + +union ctu_can_fd_rx_data { + uint32_t u32; + struct ctu_can_fd_rx_data_s { + /* RX_DATA */ + uint32_t rx_data : 32; + } s; +}; + +union ctu_can_fd_tx_status { + uint32_t u32; + struct ctu_can_fd_tx_status_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* TX_STATUS */ + uint32_t tx1s : 4; + uint32_t tx2s : 4; + uint32_t tx3s : 4; + uint32_t tx4s : 4; + uint32_t reserved_31_16 : 16; +#else + uint32_t reserved_31_16 : 16; + uint32_t tx4s : 4; + uint32_t tx3s : 4; + uint32_t tx2s : 4; + uint32_t tx1s : 4; +#endif + } s; +}; + +enum ctu_can_fd_tx_status_tx1s { + TXT_RDY = 0x1, + TXT_TRAN = 0x2, + TXT_ABTP = 0x3, + TXT_TOK = 0x4, + TXT_ERR = 0x6, + TXT_ABT = 0x7, + TXT_ETY = 0x8, +}; + +union ctu_can_fd_tx_command { + uint32_t u32; + struct ctu_can_fd_tx_command_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* TX_COMMAND */ + uint32_t txce : 1; + uint32_t txcr : 1; + uint32_t txca : 1; + uint32_t reserved_7_3 : 5; + uint32_t txb1 : 1; + uint32_t txb2 : 1; + uint32_t txb3 : 1; + uint32_t txb4 : 1; + uint32_t reserved_31_12 : 20; +#else + uint32_t reserved_31_12 : 20; + uint32_t txb4 : 1; + uint32_t txb3 : 1; + uint32_t txb2 : 1; + uint32_t txb1 : 1; + uint32_t reserved_7_3 : 5; + uint32_t txca : 1; + uint32_t txcr : 1; + uint32_t txce : 1; +#endif + } s; +}; + +union ctu_can_fd_tx_priority { + uint32_t u32; + struct ctu_can_fd_tx_priority_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* TX_PRIORITY */ + uint32_t txt1p : 3; + uint32_t reserved_3 : 1; + uint32_t txt2p : 3; + uint32_t reserved_7 : 1; + uint32_t txt3p : 3; + uint32_t reserved_11 : 1; + uint32_t txt4p : 3; + uint32_t reserved_31_15 : 17; +#else + uint32_t reserved_31_15 : 17; + uint32_t txt4p : 3; + uint32_t reserved_11 : 1; + uint32_t txt3p : 3; + uint32_t reserved_7 : 1; + uint32_t txt2p : 3; + uint32_t reserved_3 : 1; + uint32_t txt1p : 3; +#endif + } s; +}; + +union ctu_can_fd_err_capt_alc { + uint32_t u32; + struct ctu_can_fd_err_capt_alc_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* ERR_CAPT */ + uint32_t err_pos : 5; + uint32_t err_type : 3; + uint32_t reserved_15_8 : 8; + /* ALC */ + uint32_t alc_bit : 5; + uint32_t alc_id_field : 3; + uint32_t reserved_31_24 : 8; +#else + uint32_t reserved_31_24 : 8; + uint32_t alc_id_field : 3; + uint32_t alc_bit : 5; + uint32_t reserved_15_8 : 8; + uint32_t err_type : 3; + uint32_t err_pos : 5; +#endif + } s; +}; + +enum ctu_can_fd_err_capt_err_pos { + ERC_POS_SOF = 0x0, + ERC_POS_ARB = 0x1, + ERC_POS_CTRL = 0x2, + ERC_POS_DATA = 0x3, + ERC_POS_CRC = 0x4, + ERC_POS_ACK = 0x5, + ERC_POS_EOF = 0x6, + ERC_POS_ERR = 0x7, + ERC_POS_OVRL = 0x8, + ERC_POS_OTHER = 0x1f, +}; + +enum ctu_can_fd_err_capt_err_type { + ERC_BIT_ERR = 0x0, + ERC_CRC_ERR = 0x1, + ERC_FRM_ERR = 0x2, + ERC_ACK_ERR = 0x3, + ERC_STUF_ERR = 0x4, +}; + +enum ctu_can_fd_alc_alc_id_field { + ALC_RSVD = 0x0, + ALC_BASE_ID = 0x1, + ALC_SRR_RTR = 0x2, + ALC_IDE = 0x3, + ALC_EXTENSION = 0x4, + ALC_RTR = 0x5, +}; + +union ctu_can_fd_trv_delay_ssp_cfg { + uint32_t u32; + struct ctu_can_fd_trv_delay_ssp_cfg_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* TRV_DELAY */ + uint32_t trv_delay_value : 7; + uint32_t reserved_15_7 : 9; + /* SSP_CFG */ + uint32_t ssp_offset : 8; + uint32_t ssp_src : 2; + uint32_t reserved_31_26 : 6; +#else + uint32_t reserved_31_26 : 6; + uint32_t ssp_src : 2; + uint32_t ssp_offset : 8; + uint32_t reserved_15_7 : 9; + uint32_t trv_delay_value : 7; +#endif + } s; +}; + +enum ctu_can_fd_ssp_cfg_ssp_src { + SSP_SRC_MEAS_N_OFFSET = 0x0, + SSP_SRC_NO_SSP = 0x1, + SSP_SRC_OFFSET = 0x2, +}; + +union ctu_can_fd_rx_fr_ctr { + uint32_t u32; + struct ctu_can_fd_rx_fr_ctr_s { + /* RX_FR_CTR */ + uint32_t rx_fr_ctr_val : 32; + } s; +}; + +union ctu_can_fd_tx_fr_ctr { + uint32_t u32; + struct ctu_can_fd_tx_fr_ctr_s { + /* TX_FR_CTR */ + uint32_t tx_fr_ctr_val : 32; + } s; +}; + +union ctu_can_fd_debug_register { + uint32_t u32; + struct ctu_can_fd_debug_register_s { +#ifdef __LITTLE_ENDIAN_BITFIELD + /* DEBUG_REGISTER */ + uint32_t stuff_count : 3; + uint32_t destuff_count : 3; + uint32_t pc_arb : 1; + uint32_t pc_con : 1; + uint32_t pc_dat : 1; + uint32_t pc_stc : 1; + uint32_t pc_crc : 1; + uint32_t pc_crcd : 1; + uint32_t pc_ack : 1; + uint32_t pc_ackd : 1; + uint32_t pc_eof : 1; + uint32_t pc_int : 1; + uint32_t pc_susp : 1; + uint32_t pc_ovr : 1; + uint32_t pc_sof : 1; + uint32_t reserved_31_19 : 13; +#else + uint32_t reserved_31_19 : 13; + uint32_t pc_sof : 1; + uint32_t pc_ovr : 1; + uint32_t pc_susp : 1; + uint32_t pc_int : 1; + uint32_t pc_eof : 1; + uint32_t pc_ackd : 1; + uint32_t pc_ack : 1; + uint32_t pc_crcd : 1; + uint32_t pc_crc : 1; + uint32_t pc_stc : 1; + uint32_t pc_dat : 1; + uint32_t pc_con : 1; + uint32_t pc_arb : 1; + uint32_t destuff_count : 3; + uint32_t stuff_count : 3; +#endif + } s; +}; + +union ctu_can_fd_yolo_reg { + uint32_t u32; + struct ctu_can_fd_yolo_reg_s { + /* YOLO_REG */ + uint32_t yolo_val : 32; + } s; +}; + +union ctu_can_fd_timestamp_low { + uint32_t u32; + struct ctu_can_fd_timestamp_low_s { + /* TIMESTAMP_LOW */ + uint32_t timestamp_low : 32; + } s; +}; + +union ctu_can_fd_timestamp_high { + uint32_t u32; + struct ctu_can_fd_timestamp_high_s { + /* TIMESTAMP_HIGH */ + uint32_t timestamp_high : 32; + } s; +}; + +#endif diff --git a/hw/net/can/ctucan_core.c b/hw/net/can/ctucan_core.c new file mode 100644 index 000000000..d171c372e --- /dev/null +++ b/hw/net/can/ctucan_core.c @@ -0,0 +1,687 @@ +/* + * CTU CAN FD PCI device emulation + * http://canbus.pages.fel.cvut.cz/ + * + * Copyright (c) 2019 Jan Charvat (jancharvat.charvat@gmail.com) + * + * Based on Kvaser PCI CAN device (SJA1000 based) emulation implemented by + * Jin Yang and Pavel Pisa + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "chardev/char.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "net/can_emu.h" + +#include "ctucan_core.h" + +#ifndef DEBUG_CAN +#define DEBUG_CAN 0 +#endif /*DEBUG_CAN*/ + +#define DPRINTF(fmt, ...) \ + do { \ + if (DEBUG_CAN) { \ + qemu_log("[ctucan]: " fmt , ## __VA_ARGS__); \ + } \ + } while (0) + +static void ctucan_buff2frame(const uint8_t *buff, qemu_can_frame *frame) +{ + frame->can_id = 0; + frame->can_dlc = 0; + frame->flags = 0; + + if (buff == NULL) { + return; + } + { + union ctu_can_fd_frame_form_w frame_form_w; + union ctu_can_fd_identifier_w identifier_w; + unsigned int ide; + uint32_t w; + + w = le32_to_cpu(*(uint32_t *)buff); + frame_form_w = (union ctu_can_fd_frame_form_w)w; + frame->can_dlc = can_dlc2len(frame_form_w.s.dlc); + + w = le32_to_cpu(*(uint32_t *)(buff + 4)); + identifier_w = (union ctu_can_fd_identifier_w)w; + + ide = frame_form_w.s.ide; + if (ide) { + frame->can_id = (identifier_w.s.identifier_base << 18) | + identifier_w.s.identifier_ext; + frame->can_id |= QEMU_CAN_EFF_FLAG; + } else { + frame->can_id = identifier_w.s.identifier_base; + } + + if (frame_form_w.s.esi_rsv) { + frame->flags |= QEMU_CAN_FRMF_ESI; + } + + if (frame_form_w.s.rtr) { + frame->can_id |= QEMU_CAN_RTR_FLAG; + } + + if (frame_form_w.s.fdf) { /*CAN FD*/ + frame->flags |= QEMU_CAN_FRMF_TYPE_FD; + if (frame_form_w.s.brs) { + frame->flags |= QEMU_CAN_FRMF_BRS; + } + } + } + + memcpy(frame->data, buff + 0x10, 0x40); +} + + +static int ctucan_frame2buff(const qemu_can_frame *frame, uint8_t *buff) +{ + unsigned int bytes_cnt = -1; + memset(buff, 0, CTUCAN_MSG_MAX_LEN * sizeof(*buff)); + + if (frame == NULL) { + return bytes_cnt; + } + { + union ctu_can_fd_frame_form_w frame_form_w; + union ctu_can_fd_identifier_w identifier_w; + + frame_form_w.u32 = 0; + identifier_w.u32 = 0; + + bytes_cnt = frame->can_dlc; + bytes_cnt = (bytes_cnt + 3) & ~3; + bytes_cnt += 16; + frame_form_w.s.rwcnt = (bytes_cnt >> 2) - 1; + + frame_form_w.s.dlc = can_len2dlc(frame->can_dlc); + + if (frame->can_id & QEMU_CAN_EFF_FLAG) { + frame_form_w.s.ide = 1; + identifier_w.s.identifier_base = + (frame->can_id & 0x1FFC0000) >> 18; + identifier_w.s.identifier_ext = frame->can_id & 0x3FFFF; + } else { + identifier_w.s.identifier_base = frame->can_id & 0x7FF; + } + + if (frame->flags & QEMU_CAN_FRMF_ESI) { + frame_form_w.s.esi_rsv = 1; + } + + if (frame->can_id & QEMU_CAN_RTR_FLAG) { + frame_form_w.s.rtr = 1; + } + + if (frame->flags & QEMU_CAN_FRMF_TYPE_FD) { /*CAN FD*/ + frame_form_w.s.fdf = 1; + if (frame->flags & QEMU_CAN_FRMF_BRS) { + frame_form_w.s.brs = 1; + } + } + *(uint32_t *)buff = cpu_to_le32(frame_form_w.u32); + *(uint32_t *)(buff + 4) = cpu_to_le32(identifier_w.u32); + } + + memcpy(buff + 0x10, frame->data, 0x40); + + return bytes_cnt; +} + +static void ctucan_update_irq(CtuCanCoreState *s) +{ + union ctu_can_fd_int_stat int_rq; + + int_rq.u32 = 0; + + if (s->rx_status_rx_settings.s.rxfrc) { + int_rq.s.rbnei = 1; + } + + int_rq.u32 &= ~s->int_mask.u32; + s->int_stat.u32 |= int_rq.u32; + if (s->int_stat.u32 & s->int_ena.u32) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static void ctucan_update_txnf(CtuCanCoreState *s) +{ + int i; + int txnf; + unsigned int buff_st; + + txnf = 0; + + for (i = 0; i < CTUCAN_CORE_TXBUF_NUM; i++) { + buff_st = (s->tx_status.u32 >> (i * 4)) & 0xf; + if (buff_st == TXT_ETY) { + txnf = 1; + } + } + s->status.s.txnf = txnf; +} + +void ctucan_hardware_reset(CtuCanCoreState *s) +{ + DPRINTF("Hardware reset in progress!!!\n"); + int i; + unsigned int buff_st; + uint32_t buff_st_mask; + + s->tx_status.u32 = 0; + for (i = 0; i < CTUCAN_CORE_TXBUF_NUM; i++) { + buff_st_mask = 0xf << (i * 4); + buff_st = TXT_ETY; + s->tx_status.u32 = (s->tx_status.u32 & ~buff_st_mask) | + (buff_st << (i * 4)); + } + s->status.s.idle = 1; + + ctucan_update_txnf(s); + + s->rx_status_rx_settings.u32 = 0; + s->rx_tail_pos = 0; + s->rx_cnt = 0; + s->rx_frame_rem = 0; + + /* Flush RX buffer */ + s->rx_tail_pos = 0; + s->rx_cnt = 0; + s->rx_frame_rem = 0; + + /* Set on progdokum reset value */ + s->mode_settings.u32 = 0; + s->mode_settings.s.fde = 1; + + s->int_stat.u32 = 0; + s->int_ena.u32 = 0; + s->int_mask.u32 = 0; + + s->rx_status_rx_settings.u32 = 0; + s->rx_status_rx_settings.s.rxe = 0; + + s->rx_fr_ctr.u32 = 0; + s->tx_fr_ctr.u32 = 0; + + s->yolo_reg.s.yolo_val = 3735928559; + + qemu_irq_lower(s->irq); +} + +static void ctucan_send_ready_buffers(CtuCanCoreState *s) +{ + qemu_can_frame frame; + uint8_t *pf; + int buff2tx_idx; + uint32_t tx_prio_max; + + if (!s->mode_settings.s.ena) { + return; + } + + do { + union ctu_can_fd_int_stat int_stat; + int i; + buff2tx_idx = -1; + tx_prio_max = 0; + + for (i = 0; i < CTUCAN_CORE_TXBUF_NUM; i++) { + uint32_t prio; + + if (extract32(s->tx_status.u32, i * 4, 4) != TXT_RDY) { + continue; + } + prio = (s->tx_priority.u32 >> (i * 4)) & 0x7; + if (tx_prio_max < prio) { + tx_prio_max = prio; + buff2tx_idx = i; + } + } + if (buff2tx_idx == -1) { + break; + } + int_stat.u32 = 0; + pf = s->tx_buffer[buff2tx_idx].data; + ctucan_buff2frame(pf, &frame); + s->status.s.idle = 0; + s->status.s.txs = 1; + can_bus_client_send(&s->bus_client, &frame, 1); + s->status.s.idle = 1; + s->status.s.txs = 0; + s->tx_fr_ctr.s.tx_fr_ctr_val++; + int_stat.s.txi = 1; + int_stat.s.txbhci = 1; + s->int_stat.u32 |= int_stat.u32 & ~s->int_mask.u32; + s->tx_status.u32 = deposit32(s->tx_status.u32, + buff2tx_idx * 4, 4, TXT_TOK); + } while (1); +} + +#define CTUCAN_CORE_TXBUFF_SPAN \ + (CTU_CAN_FD_TXTB2_DATA_1 - CTU_CAN_FD_TXTB1_DATA_1) + +void ctucan_mem_write(CtuCanCoreState *s, hwaddr addr, uint64_t val, + unsigned size) +{ + int i; + + DPRINTF("write 0x%02llx addr 0x%02x\n", + (unsigned long long)val, (unsigned int)addr); + + if (addr >= CTUCAN_CORE_MEM_SIZE) { + return; + } + + if (addr >= CTU_CAN_FD_TXTB1_DATA_1) { + int buff_num; + addr -= CTU_CAN_FD_TXTB1_DATA_1; + buff_num = addr / CTUCAN_CORE_TXBUFF_SPAN; + addr %= CTUCAN_CORE_TXBUFF_SPAN; + if ((buff_num < CTUCAN_CORE_TXBUF_NUM) && + ((addr + size) <= sizeof(s->tx_buffer[buff_num].data))) { + stn_le_p(s->tx_buffer[buff_num].data + addr, size, val); + } + } else { + switch (addr & ~3) { + case CTU_CAN_FD_MODE: + s->mode_settings.u32 = (uint32_t)val; + if (s->mode_settings.s.rst) { + ctucan_hardware_reset(s); + s->mode_settings.s.rst = 0; + } + break; + case CTU_CAN_FD_COMMAND: + { + union ctu_can_fd_command command; + command.u32 = (uint32_t)val; + if (command.s.cdo) { + s->status.s.dor = 0; + } + if (command.s.rrb) { + s->rx_tail_pos = 0; + s->rx_cnt = 0; + s->rx_frame_rem = 0; + s->rx_status_rx_settings.s.rxfrc = 0; + } + if (command.s.txfcrst) { + s->tx_fr_ctr.s.tx_fr_ctr_val = 0; + } + if (command.s.rxfcrst) { + s->rx_fr_ctr.s.rx_fr_ctr_val = 0; + } + break; + } + case CTU_CAN_FD_INT_STAT: + s->int_stat.u32 &= ~(uint32_t)val; + break; + case CTU_CAN_FD_INT_ENA_SET: + s->int_ena.u32 |= (uint32_t)val; + break; + case CTU_CAN_FD_INT_ENA_CLR: + s->int_ena.u32 &= ~(uint32_t)val; + break; + case CTU_CAN_FD_INT_MASK_SET: + s->int_mask.u32 |= (uint32_t)val; + break; + case CTU_CAN_FD_INT_MASK_CLR: + s->int_mask.u32 &= ~(uint32_t)val; + break; + case CTU_CAN_FD_TX_COMMAND: + if (s->mode_settings.s.ena) { + union ctu_can_fd_tx_command tx_command; + union ctu_can_fd_tx_command mask; + unsigned int buff_st; + uint32_t buff_st_mask; + + tx_command.u32 = (uint32_t)val; + mask.u32 = 0; + mask.s.txb1 = 1; + + for (i = 0; i < CTUCAN_CORE_TXBUF_NUM; i++) { + if (!(tx_command.u32 & (mask.u32 << i))) { + continue; + } + buff_st_mask = 0xf << (i * 4); + buff_st = (s->tx_status.u32 >> (i * 4)) & 0xf; + if (tx_command.s.txca) { + if (buff_st == TXT_RDY) { + buff_st = TXT_ABT; + } + } + if (tx_command.s.txcr) { + if ((buff_st == TXT_TOK) || (buff_st == TXT_ERR) || + (buff_st == TXT_ABT) || (buff_st == TXT_ETY)) + buff_st = TXT_RDY; + } + if (tx_command.s.txce) { + if ((buff_st == TXT_TOK) || (buff_st == TXT_ERR) || + (buff_st == TXT_ABT)) + buff_st = TXT_ETY; + } + s->tx_status.u32 = (s->tx_status.u32 & ~buff_st_mask) | + (buff_st << (i * 4)); + } + + ctucan_send_ready_buffers(s); + ctucan_update_txnf(s); + } + break; + case CTU_CAN_FD_TX_PRIORITY: + s->tx_priority.u32 = (uint32_t)val; + break; + } + + ctucan_update_irq(s); + } + + return; +} + +uint64_t ctucan_mem_read(CtuCanCoreState *s, hwaddr addr, unsigned size) +{ + uint32_t val = 0; + + DPRINTF("read addr 0x%02x ...\n", (unsigned int)addr); + + if (addr > CTUCAN_CORE_MEM_SIZE) { + return 0; + } + + switch (addr & ~3) { + case CTU_CAN_FD_DEVICE_ID: + { + union ctu_can_fd_device_id_version idver; + idver.u32 = 0; + idver.s.device_id = CTU_CAN_FD_ID; + idver.s.ver_major = 2; + idver.s.ver_minor = 2; + val = idver.u32; + } + break; + case CTU_CAN_FD_MODE: + val = s->mode_settings.u32; + break; + case CTU_CAN_FD_STATUS: + val = s->status.u32; + break; + case CTU_CAN_FD_INT_STAT: + val = s->int_stat.u32; + break; + case CTU_CAN_FD_INT_ENA_SET: + case CTU_CAN_FD_INT_ENA_CLR: + val = s->int_ena.u32; + break; + case CTU_CAN_FD_INT_MASK_SET: + case CTU_CAN_FD_INT_MASK_CLR: + val = s->int_mask.u32; + break; + case CTU_CAN_FD_RX_MEM_INFO: + s->rx_mem_info.u32 = 0; + s->rx_mem_info.s.rx_buff_size = CTUCAN_RCV_BUF_LEN >> 2; + s->rx_mem_info.s.rx_mem_free = (CTUCAN_RCV_BUF_LEN - + s->rx_cnt) >> 2; + val = s->rx_mem_info.u32; + break; + case CTU_CAN_FD_RX_POINTERS: + { + uint32_t rx_head_pos = s->rx_tail_pos + s->rx_cnt; + rx_head_pos %= CTUCAN_RCV_BUF_LEN; + s->rx_pointers.s.rx_wpp = rx_head_pos; + s->rx_pointers.s.rx_rpp = s->rx_tail_pos; + val = s->rx_pointers.u32; + break; + } + case CTU_CAN_FD_RX_STATUS: + case CTU_CAN_FD_RX_SETTINGS: + if (!s->rx_status_rx_settings.s.rxfrc) { + s->rx_status_rx_settings.s.rxe = 1; + } else { + s->rx_status_rx_settings.s.rxe = 0; + } + if (((s->rx_cnt + 3) & ~3) == CTUCAN_RCV_BUF_LEN) { + s->rx_status_rx_settings.s.rxf = 1; + } else { + s->rx_status_rx_settings.s.rxf = 0; + } + val = s->rx_status_rx_settings.u32; + break; + case CTU_CAN_FD_RX_DATA: + if (s->rx_cnt) { + memcpy(&val, s->rx_buff + s->rx_tail_pos, 4); + val = le32_to_cpu(val); + if (!s->rx_frame_rem) { + union ctu_can_fd_frame_form_w frame_form_w; + frame_form_w.u32 = val; + s->rx_frame_rem = frame_form_w.s.rwcnt * 4 + 4; + } + s->rx_cnt -= 4; + s->rx_frame_rem -= 4; + if (!s->rx_frame_rem) { + s->rx_status_rx_settings.s.rxfrc--; + if (!s->rx_status_rx_settings.s.rxfrc) { + s->status.s.rxne = 0; + s->status.s.idle = 1; + s->status.s.rxs = 0; + } + } + s->rx_tail_pos = (s->rx_tail_pos + 4) % CTUCAN_RCV_BUF_LEN; + } else { + val = 0; + } + break; + case CTU_CAN_FD_TX_STATUS: + val = s->tx_status.u32; + break; + case CTU_CAN_FD_TX_PRIORITY: + val = s->tx_priority.u32; + break; + case CTU_CAN_FD_RX_FR_CTR: + val = s->rx_fr_ctr.s.rx_fr_ctr_val; + break; + case CTU_CAN_FD_TX_FR_CTR: + val = s->tx_fr_ctr.s.tx_fr_ctr_val; + break; + case CTU_CAN_FD_YOLO_REG: + val = s->yolo_reg.s.yolo_val; + break; + } + + val >>= ((addr & 3) << 3); + if (size < 8) { + val &= ((uint64_t)1 << (size << 3)) - 1; + } + + return val; +} + +bool ctucan_can_receive(CanBusClientState *client) +{ + CtuCanCoreState *s = container_of(client, CtuCanCoreState, bus_client); + + if (!s->mode_settings.s.ena) { + return false; + } + + return true; /* always return true, when operation mode */ +} + +ssize_t ctucan_receive(CanBusClientState *client, const qemu_can_frame *frames, + size_t frames_cnt) +{ + CtuCanCoreState *s = container_of(client, CtuCanCoreState, bus_client); + static uint8_t rcv[CTUCAN_MSG_MAX_LEN]; + int i; + int ret = -1; + const qemu_can_frame *frame = frames; + union ctu_can_fd_int_stat int_stat; + int_stat.u32 = 0; + + if (frames_cnt <= 0) { + return 0; + } + + ret = ctucan_frame2buff(frame, rcv); + + if (s->rx_cnt + ret > CTUCAN_RCV_BUF_LEN) { /* Data overrun. */ + s->status.s.dor = 1; + int_stat.s.doi = 1; + s->int_stat.u32 |= int_stat.u32 & ~s->int_mask.u32; + ctucan_update_irq(s); + DPRINTF("Receive FIFO overrun\n"); + return ret; + } + s->status.s.idle = 0; + s->status.s.rxs = 1; + int_stat.s.rxi = 1; + if (((s->rx_cnt + 3) & ~3) == CTUCAN_RCV_BUF_LEN) { + int_stat.s.rxfi = 1; + } + s->int_stat.u32 |= int_stat.u32 & ~s->int_mask.u32; + s->rx_fr_ctr.s.rx_fr_ctr_val++; + s->rx_status_rx_settings.s.rxfrc++; + for (i = 0; i < ret; i++) { + s->rx_buff[(s->rx_tail_pos + s->rx_cnt) % CTUCAN_RCV_BUF_LEN] = rcv[i]; + s->rx_cnt++; + } + s->status.s.rxne = 1; + + ctucan_update_irq(s); + + return 1; +} + +static CanBusClientInfo ctucan_bus_client_info = { + .can_receive = ctucan_can_receive, + .receive = ctucan_receive, +}; + + +int ctucan_connect_to_bus(CtuCanCoreState *s, CanBusState *bus) +{ + s->bus_client.info = &ctucan_bus_client_info; + + if (!bus) { + return -EINVAL; + } + + if (can_bus_insert_client(bus, &s->bus_client) < 0) { + return -1; + } + + return 0; +} + +void ctucan_disconnect(CtuCanCoreState *s) +{ + can_bus_remove_client(&s->bus_client); +} + +int ctucan_init(CtuCanCoreState *s, qemu_irq irq) +{ + s->irq = irq; + + qemu_irq_lower(s->irq); + + ctucan_hardware_reset(s); + + return 0; +} + +const VMStateDescription vmstate_qemu_ctucan_tx_buffer = { + .name = "qemu_ctucan_tx_buffer", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(data, CtuCanCoreMsgBuffer, CTUCAN_CORE_MSG_MAX_LEN), + VMSTATE_END_OF_LIST() + } +}; + +static int ctucan_post_load(void *opaque, int version_id) +{ + CtuCanCoreState *s = opaque; + ctucan_update_irq(s); + return 0; +} + +/* VMState is needed for live migration of QEMU images */ +const VMStateDescription vmstate_ctucan = { + .name = "ctucan", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .post_load = ctucan_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(mode_settings.u32, CtuCanCoreState), + VMSTATE_UINT32(status.u32, CtuCanCoreState), + VMSTATE_UINT32(int_stat.u32, CtuCanCoreState), + VMSTATE_UINT32(int_ena.u32, CtuCanCoreState), + VMSTATE_UINT32(int_mask.u32, CtuCanCoreState), + VMSTATE_UINT32(brt.u32, CtuCanCoreState), + VMSTATE_UINT32(brt_fd.u32, CtuCanCoreState), + VMSTATE_UINT32(ewl_erp_fault_state.u32, CtuCanCoreState), + VMSTATE_UINT32(rec_tec.u32, CtuCanCoreState), + VMSTATE_UINT32(err_norm_err_fd.u32, CtuCanCoreState), + VMSTATE_UINT32(ctr_pres.u32, CtuCanCoreState), + VMSTATE_UINT32(filter_a_mask.u32, CtuCanCoreState), + VMSTATE_UINT32(filter_a_val.u32, CtuCanCoreState), + VMSTATE_UINT32(filter_b_mask.u32, CtuCanCoreState), + VMSTATE_UINT32(filter_b_val.u32, CtuCanCoreState), + VMSTATE_UINT32(filter_c_mask.u32, CtuCanCoreState), + VMSTATE_UINT32(filter_c_val.u32, CtuCanCoreState), + VMSTATE_UINT32(filter_ran_low.u32, CtuCanCoreState), + VMSTATE_UINT32(filter_ran_high.u32, CtuCanCoreState), + VMSTATE_UINT32(filter_control_filter_status.u32, CtuCanCoreState), + VMSTATE_UINT32(rx_mem_info.u32, CtuCanCoreState), + VMSTATE_UINT32(rx_pointers.u32, CtuCanCoreState), + VMSTATE_UINT32(rx_status_rx_settings.u32, CtuCanCoreState), + VMSTATE_UINT32(tx_status.u32, CtuCanCoreState), + VMSTATE_UINT32(tx_priority.u32, CtuCanCoreState), + VMSTATE_UINT32(err_capt_alc.u32, CtuCanCoreState), + VMSTATE_UINT32(trv_delay_ssp_cfg.u32, CtuCanCoreState), + VMSTATE_UINT32(rx_fr_ctr.u32, CtuCanCoreState), + VMSTATE_UINT32(tx_fr_ctr.u32, CtuCanCoreState), + VMSTATE_UINT32(debug_register.u32, CtuCanCoreState), + VMSTATE_UINT32(yolo_reg.u32, CtuCanCoreState), + VMSTATE_UINT32(timestamp_low.u32, CtuCanCoreState), + VMSTATE_UINT32(timestamp_high.u32, CtuCanCoreState), + + VMSTATE_STRUCT_ARRAY(tx_buffer, CtuCanCoreState, + CTUCAN_CORE_TXBUF_NUM, 0, vmstate_qemu_ctucan_tx_buffer, + CtuCanCoreMsgBuffer), + + VMSTATE_BUFFER(rx_buff, CtuCanCoreState), + VMSTATE_UINT32(rx_tail_pos, CtuCanCoreState), + VMSTATE_UINT32(rx_cnt, CtuCanCoreState), + VMSTATE_UINT32(rx_frame_rem, CtuCanCoreState), + + VMSTATE_END_OF_LIST() + } +}; diff --git a/hw/net/can/ctucan_core.h b/hw/net/can/ctucan_core.h new file mode 100644 index 000000000..bbc09ae06 --- /dev/null +++ b/hw/net/can/ctucan_core.h @@ -0,0 +1,126 @@ +/* + * CTU CAN FD device emulation + * http://canbus.pages.fel.cvut.cz/ + * + * Copyright (c) 2019 Jan Charvat (jancharvat.charvat@gmail.com) + * + * Based on Kvaser PCI CAN device (SJA1000 based) emulation implemented by + * Jin Yang and Pavel Pisa + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef HW_CAN_CTUCAN_CORE_H +#define HW_CAN_CTUCAN_CORE_H + +#include "exec/hwaddr.h" +#include "net/can_emu.h" + +#ifndef HOST_WORDS_BIGENDIAN +#define __LITTLE_ENDIAN_BITFIELD 1 +#endif + +#include "ctu_can_fd_frame.h" +#include "ctu_can_fd_regs.h" + +#define CTUCAN_CORE_MEM_SIZE 0x500 + +/* The max size for a message in FIFO */ +#define CTUCAN_MSG_MAX_LEN (CTU_CAN_FD_DATA_1_4_W + 64) +/* The receive buffer size. */ +#define CTUCAN_RCV_BUF_LEN (1024 * 8) + + +/* The max size for a message buffer */ +#define CTUCAN_CORE_MSG_MAX_LEN 0x50 +/* The receive buffer size. */ +#define CTUCAN_CORE_RCV_BUF_LEN 0x1000 + +#define CTUCAN_CORE_TXBUF_NUM 4 + +typedef struct CtuCanCoreMsgBuffer { + uint8_t data[CTUCAN_CORE_MSG_MAX_LEN]; +} CtuCanCoreMsgBuffer; + +typedef struct CtuCanCoreState { + union ctu_can_fd_mode_settings mode_settings; + union ctu_can_fd_status status; + union ctu_can_fd_int_stat int_stat; + union ctu_can_fd_int_ena_set int_ena; + union ctu_can_fd_int_mask_set int_mask; + union ctu_can_fd_btr brt; + union ctu_can_fd_btr_fd brt_fd; + union ctu_can_fd_ewl_erp_fault_state ewl_erp_fault_state; + union ctu_can_fd_rec_tec rec_tec; + union ctu_can_fd_err_norm_err_fd err_norm_err_fd; + union ctu_can_fd_ctr_pres ctr_pres; + union ctu_can_fd_filter_a_mask filter_a_mask; + union ctu_can_fd_filter_a_val filter_a_val; + union ctu_can_fd_filter_b_mask filter_b_mask; + union ctu_can_fd_filter_b_val filter_b_val; + union ctu_can_fd_filter_c_mask filter_c_mask; + union ctu_can_fd_filter_c_val filter_c_val; + union ctu_can_fd_filter_ran_low filter_ran_low; + union ctu_can_fd_filter_ran_high filter_ran_high; + union ctu_can_fd_filter_control_filter_status filter_control_filter_status; + union ctu_can_fd_rx_mem_info rx_mem_info; + union ctu_can_fd_rx_pointers rx_pointers; + union ctu_can_fd_rx_status_rx_settings rx_status_rx_settings; + union ctu_can_fd_tx_status tx_status; + union ctu_can_fd_tx_priority tx_priority; + union ctu_can_fd_err_capt_alc err_capt_alc; + union ctu_can_fd_trv_delay_ssp_cfg trv_delay_ssp_cfg; + union ctu_can_fd_rx_fr_ctr rx_fr_ctr; + union ctu_can_fd_tx_fr_ctr tx_fr_ctr; + union ctu_can_fd_debug_register debug_register; + union ctu_can_fd_yolo_reg yolo_reg; + union ctu_can_fd_timestamp_low timestamp_low; + union ctu_can_fd_timestamp_high timestamp_high; + + CtuCanCoreMsgBuffer tx_buffer[CTUCAN_CORE_TXBUF_NUM]; + + uint8_t rx_buff[CTUCAN_RCV_BUF_LEN]; /* 32~95 .. 64bytes Rx FIFO */ + uint32_t rx_tail_pos; /* Count by bytes. */ + uint32_t rx_cnt; /* Count by bytes. */ + uint32_t rx_frame_rem; + + qemu_irq irq; + CanBusClientState bus_client; +} CtuCanCoreState; + +void ctucan_hardware_reset(CtuCanCoreState *s); + +void ctucan_mem_write(CtuCanCoreState *s, hwaddr addr, uint64_t val, + unsigned size); + +uint64_t ctucan_mem_read(CtuCanCoreState *s, hwaddr addr, unsigned size); + +int ctucan_connect_to_bus(CtuCanCoreState *s, CanBusState *bus); + +void ctucan_disconnect(CtuCanCoreState *s); + +int ctucan_init(CtuCanCoreState *s, qemu_irq irq); + +bool ctucan_can_receive(CanBusClientState *client); + +ssize_t ctucan_receive(CanBusClientState *client, + const qemu_can_frame *frames, size_t frames_cnt); + +extern const VMStateDescription vmstate_ctucan; + +#endif diff --git a/hw/net/can/ctucan_pci.c b/hw/net/can/ctucan_pci.c new file mode 100644 index 000000000..f1c86cd06 --- /dev/null +++ b/hw/net/can/ctucan_pci.c @@ -0,0 +1,281 @@ +/* + * CTU CAN FD PCI device emulation + * http://canbus.pages.fel.cvut.cz/ + * + * Copyright (c) 2019 Jan Charvat (jancharvat.charvat@gmail.com) + * + * Based on Kvaser PCI CAN device (SJA1000 based) emulation implemented by + * Jin Yang and Pavel Pisa + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/event_notifier.h" +#include "qemu/module.h" +#include "qemu/thread.h" +#include "qemu/sockets.h" +#include "qapi/error.h" +#include "chardev/char.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "net/can_emu.h" + +#include "ctucan_core.h" + +#define TYPE_CTUCAN_PCI_DEV "ctucan_pci" + +typedef struct CtuCanPCIState CtuCanPCIState; +DECLARE_INSTANCE_CHECKER(CtuCanPCIState, CTUCAN_PCI_DEV, + TYPE_CTUCAN_PCI_DEV) + +#define CTUCAN_PCI_CORE_COUNT 2 +#define CTUCAN_PCI_CORE_RANGE 0x10000 + +#define CTUCAN_PCI_BAR_COUNT 2 + +#define CTUCAN_PCI_BYTES_PER_CORE 0x4000 + +#ifndef PCI_VENDOR_ID_TEDIA +#define PCI_VENDOR_ID_TEDIA 0x1760 +#endif + +#define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00 + +#define CTUCAN_BAR0_RANGE 0x8000 +#define CTUCAN_BAR0_CTUCAN_ID 0x0000 +#define CTUCAN_BAR0_CRA_BASE 0x4000 +#define CYCLONE_IV_CRA_A2P_IE (0x0050) + +#define CTUCAN_WITHOUT_CTUCAN_ID 0 +#define CTUCAN_WITH_CTUCAN_ID 1 + +struct CtuCanPCIState { + /*< private >*/ + PCIDevice dev; + /*< public >*/ + MemoryRegion ctucan_io[CTUCAN_PCI_BAR_COUNT]; + + CtuCanCoreState ctucan_state[CTUCAN_PCI_CORE_COUNT]; + qemu_irq irq; + + char *model; /* The model that support, only SJA1000 now. */ + CanBusState *canbus[CTUCAN_PCI_CORE_COUNT]; +}; + +static void ctucan_pci_reset(DeviceState *dev) +{ + CtuCanPCIState *d = CTUCAN_PCI_DEV(dev); + int i; + + for (i = 0 ; i < CTUCAN_PCI_CORE_COUNT; i++) { + ctucan_hardware_reset(&d->ctucan_state[i]); + } +} + +static uint64_t ctucan_pci_id_cra_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + if (addr >= 4) { + return 0; + } + + uint64_t tmp = 0xC0000000 + CTUCAN_PCI_CORE_COUNT; + tmp >>= ((addr & 3) << 3); + if (size < 8) { + tmp &= ((uint64_t)1 << (size << 3)) - 1; + } + return tmp; +} + +static void ctucan_pci_id_cra_io_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + +} + +static uint64_t ctucan_pci_cores_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + CtuCanPCIState *d = opaque; + CtuCanCoreState *s; + hwaddr core_num = addr / CTUCAN_PCI_BYTES_PER_CORE; + + if (core_num >= CTUCAN_PCI_CORE_COUNT) { + return 0; + } + + s = &d->ctucan_state[core_num]; + + return ctucan_mem_read(s, addr % CTUCAN_PCI_BYTES_PER_CORE, size); +} + +static void ctucan_pci_cores_io_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + CtuCanPCIState *d = opaque; + CtuCanCoreState *s; + hwaddr core_num = addr / CTUCAN_PCI_BYTES_PER_CORE; + + if (core_num >= CTUCAN_PCI_CORE_COUNT) { + return; + } + + s = &d->ctucan_state[core_num]; + + return ctucan_mem_write(s, addr % CTUCAN_PCI_BYTES_PER_CORE, data, size); +} + +static const MemoryRegionOps ctucan_pci_id_cra_io_ops = { + .read = ctucan_pci_id_cra_io_read, + .write = ctucan_pci_id_cra_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 1, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, +}; + +static const MemoryRegionOps ctucan_pci_cores_io_ops = { + .read = ctucan_pci_cores_io_read, + .write = ctucan_pci_cores_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 1, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, +}; + +static void ctucan_pci_realize(PCIDevice *pci_dev, Error **errp) +{ + CtuCanPCIState *d = CTUCAN_PCI_DEV(pci_dev); + uint8_t *pci_conf; + int i; + + pci_conf = pci_dev->config; + pci_conf[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */ + + d->irq = pci_allocate_irq(&d->dev); + + for (i = 0 ; i < CTUCAN_PCI_CORE_COUNT; i++) { + ctucan_init(&d->ctucan_state[i], d->irq); + } + + for (i = 0 ; i < CTUCAN_PCI_CORE_COUNT; i++) { + if (ctucan_connect_to_bus(&d->ctucan_state[i], d->canbus[i]) < 0) { + error_setg(errp, "ctucan_connect_to_bus failed"); + return; + } + } + + memory_region_init_io(&d->ctucan_io[0], OBJECT(d), + &ctucan_pci_id_cra_io_ops, d, + "ctucan_pci-core0", CTUCAN_BAR0_RANGE); + memory_region_init_io(&d->ctucan_io[1], OBJECT(d), + &ctucan_pci_cores_io_ops, d, + "ctucan_pci-core1", CTUCAN_PCI_CORE_RANGE); + + for (i = 0 ; i < CTUCAN_PCI_BAR_COUNT; i++) { + pci_register_bar(&d->dev, i, PCI_BASE_ADDRESS_MEM_MASK & 0, + &d->ctucan_io[i]); + } +} + +static void ctucan_pci_exit(PCIDevice *pci_dev) +{ + CtuCanPCIState *d = CTUCAN_PCI_DEV(pci_dev); + int i; + + for (i = 0 ; i < CTUCAN_PCI_CORE_COUNT; i++) { + ctucan_disconnect(&d->ctucan_state[i]); + } + + qemu_free_irq(d->irq); +} + +static const VMStateDescription vmstate_ctucan_pci = { + .name = "ctucan_pci", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, CtuCanPCIState), + VMSTATE_STRUCT(ctucan_state[0], CtuCanPCIState, 0, vmstate_ctucan, + CtuCanCoreState), +#if CTUCAN_PCI_CORE_COUNT >= 2 + VMSTATE_STRUCT(ctucan_state[1], CtuCanPCIState, 0, vmstate_ctucan, + CtuCanCoreState), +#endif + VMSTATE_END_OF_LIST() + } +}; + +static void ctucan_pci_instance_init(Object *obj) +{ + CtuCanPCIState *d = CTUCAN_PCI_DEV(obj); + + object_property_add_link(obj, "canbus0", TYPE_CAN_BUS, + (Object **)&d->canbus[0], + qdev_prop_allow_set_link_before_realize, 0); +#if CTUCAN_PCI_CORE_COUNT >= 2 + object_property_add_link(obj, "canbus1", TYPE_CAN_BUS, + (Object **)&d->canbus[1], + qdev_prop_allow_set_link_before_realize, 0); +#endif +} + +static void ctucan_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = ctucan_pci_realize; + k->exit = ctucan_pci_exit; + k->vendor_id = PCI_VENDOR_ID_TEDIA; + k->device_id = PCI_DEVICE_ID_TEDIA_CTUCAN_VER21; + k->revision = 0x00; + k->class_id = 0x000c09; + k->subsystem_vendor_id = PCI_VENDOR_ID_TEDIA; + k->subsystem_id = PCI_DEVICE_ID_TEDIA_CTUCAN_VER21; + dc->desc = "CTU CAN PCI"; + dc->vmsd = &vmstate_ctucan_pci; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->reset = ctucan_pci_reset; +} + +static const TypeInfo ctucan_pci_info = { + .name = TYPE_CTUCAN_PCI_DEV, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(CtuCanPCIState), + .class_init = ctucan_pci_class_init, + .instance_init = ctucan_pci_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void ctucan_pci_register_types(void) +{ + type_register_static(&ctucan_pci_info); +} + +type_init(ctucan_pci_register_types) diff --git a/hw/net/can/meson.build b/hw/net/can/meson.build new file mode 100644 index 000000000..8fabbd9ee --- /dev/null +++ b/hw/net/can/meson.build @@ -0,0 +1,7 @@ +softmmu_ss.add(when: 'CONFIG_CAN_SJA1000', if_true: files('can_sja1000.c')) +softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_kvaser_pci.c')) +softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_pcm3680_pci.c')) +softmmu_ss.add(when: 'CONFIG_CAN_PCI', if_true: files('can_mioe3680_pci.c')) +softmmu_ss.add(when: 'CONFIG_CAN_CTUCANFD', if_true: files('ctucan_core.c')) +softmmu_ss.add(when: 'CONFIG_CAN_CTUCANFD_PCI', if_true: files('ctucan_pci.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-can.c')) diff --git a/hw/net/can/trace-events b/hw/net/can/trace-events new file mode 100644 index 000000000..8346a98ab --- /dev/null +++ b/hw/net/can/trace-events @@ -0,0 +1,9 @@ +# xlnx-zynqmp-can.c +xlnx_can_update_irq(uint32_t isr, uint32_t ier, uint32_t irq) "ISR: 0x%08x IER: 0x%08x IRQ: 0x%08x" +xlnx_can_reset(uint32_t val) "Resetting controller with value = 0x%08x" +xlnx_can_rx_fifo_filter_reject(uint32_t id, uint8_t dlc) "Frame: ID: 0x%08x DLC: 0x%02x" +xlnx_can_filter_id_pre_write(uint8_t filter_num, uint32_t value) "Filter%d ID: 0x%08x" +xlnx_can_filter_mask_pre_write(uint8_t filter_num, uint32_t value) "Filter%d MASK: 0x%08x" +xlnx_can_tx_data(uint32_t id, uint8_t dlc, uint8_t db0, uint8_t db1, uint8_t db2, uint8_t db3, uint8_t db4, uint8_t db5, uint8_t db6, uint8_t db7) "Frame: ID: 0x%08x DLC: 0x%02x DATA: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x" +xlnx_can_rx_data(uint32_t id, uint32_t dlc, uint8_t db0, uint8_t db1, uint8_t db2, uint8_t db3, uint8_t db4, uint8_t db5, uint8_t db6, uint8_t db7) "Frame: ID: 0x%08x DLC: 0x%02x DATA: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x" +xlnx_can_rx_discard(uint32_t status) "Controller is not enabled for bus communication. Status Register: 0x%08x" diff --git a/hw/net/can/trace.h b/hw/net/can/trace.h new file mode 100644 index 000000000..d391c6490 --- /dev/null +++ b/hw/net/can/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_net_can.h" diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c new file mode 100644 index 000000000..22bb8910f --- /dev/null +++ b/hw/net/can/xlnx-zynqmp-can.c @@ -0,0 +1,1160 @@ +/* + * QEMU model of the Xilinx ZynqMP CAN controller. + * This implementation is based on the following datasheet: + * https://www.xilinx.com/support/documentation/user_guides/ug1085-zynq-ultrascale-trm.pdf + * + * Copyright (c) 2020 Xilinx Inc. + * + * Written-by: Vikram Garhwal + * + * Based on QEMU CAN Device emulation implemented by Jin Yang, Deniz Eren and + * Pavel Pisa + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "hw/irq.h" +#include "qapi/error.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/cutils.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "net/can_emu.h" +#include "net/can_host.h" +#include "qemu/event_notifier.h" +#include "qom/object_interfaces.h" +#include "hw/net/xlnx-zynqmp-can.h" +#include "trace.h" + +#ifndef XLNX_ZYNQMP_CAN_ERR_DEBUG +#define XLNX_ZYNQMP_CAN_ERR_DEBUG 0 +#endif + +#define MAX_DLC 8 +#undef ERROR + +REG32(SOFTWARE_RESET_REGISTER, 0x0) + FIELD(SOFTWARE_RESET_REGISTER, CEN, 1, 1) + FIELD(SOFTWARE_RESET_REGISTER, SRST, 0, 1) +REG32(MODE_SELECT_REGISTER, 0x4) + FIELD(MODE_SELECT_REGISTER, SNOOP, 2, 1) + FIELD(MODE_SELECT_REGISTER, LBACK, 1, 1) + FIELD(MODE_SELECT_REGISTER, SLEEP, 0, 1) +REG32(ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, 0x8) + FIELD(ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, BRP, 0, 8) +REG32(ARBITRATION_PHASE_BIT_TIMING_REGISTER, 0xc) + FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, SJW, 7, 2) + FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, TS2, 4, 3) + FIELD(ARBITRATION_PHASE_BIT_TIMING_REGISTER, TS1, 0, 4) +REG32(ERROR_COUNTER_REGISTER, 0x10) + FIELD(ERROR_COUNTER_REGISTER, REC, 8, 8) + FIELD(ERROR_COUNTER_REGISTER, TEC, 0, 8) +REG32(ERROR_STATUS_REGISTER, 0x14) + FIELD(ERROR_STATUS_REGISTER, ACKER, 4, 1) + FIELD(ERROR_STATUS_REGISTER, BERR, 3, 1) + FIELD(ERROR_STATUS_REGISTER, STER, 2, 1) + FIELD(ERROR_STATUS_REGISTER, FMER, 1, 1) + FIELD(ERROR_STATUS_REGISTER, CRCER, 0, 1) +REG32(STATUS_REGISTER, 0x18) + FIELD(STATUS_REGISTER, SNOOP, 12, 1) + FIELD(STATUS_REGISTER, ACFBSY, 11, 1) + FIELD(STATUS_REGISTER, TXFLL, 10, 1) + FIELD(STATUS_REGISTER, TXBFLL, 9, 1) + FIELD(STATUS_REGISTER, ESTAT, 7, 2) + FIELD(STATUS_REGISTER, ERRWRN, 6, 1) + FIELD(STATUS_REGISTER, BBSY, 5, 1) + FIELD(STATUS_REGISTER, BIDLE, 4, 1) + FIELD(STATUS_REGISTER, NORMAL, 3, 1) + FIELD(STATUS_REGISTER, SLEEP, 2, 1) + FIELD(STATUS_REGISTER, LBACK, 1, 1) + FIELD(STATUS_REGISTER, CONFIG, 0, 1) +REG32(INTERRUPT_STATUS_REGISTER, 0x1c) + FIELD(INTERRUPT_STATUS_REGISTER, TXFEMP, 14, 1) + FIELD(INTERRUPT_STATUS_REGISTER, TXFWMEMP, 13, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXFWMFLL, 12, 1) + FIELD(INTERRUPT_STATUS_REGISTER, WKUP, 11, 1) + FIELD(INTERRUPT_STATUS_REGISTER, SLP, 10, 1) + FIELD(INTERRUPT_STATUS_REGISTER, BSOFF, 9, 1) + FIELD(INTERRUPT_STATUS_REGISTER, ERROR, 8, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXNEMP, 7, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXOFLW, 6, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXUFLW, 5, 1) + FIELD(INTERRUPT_STATUS_REGISTER, RXOK, 4, 1) + FIELD(INTERRUPT_STATUS_REGISTER, TXBFLL, 3, 1) + FIELD(INTERRUPT_STATUS_REGISTER, TXFLL, 2, 1) + FIELD(INTERRUPT_STATUS_REGISTER, TXOK, 1, 1) + FIELD(INTERRUPT_STATUS_REGISTER, ARBLST, 0, 1) +REG32(INTERRUPT_ENABLE_REGISTER, 0x20) + FIELD(INTERRUPT_ENABLE_REGISTER, ETXFEMP, 14, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ETXFWMEMP, 13, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERXFWMFLL, 12, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, EWKUP, 11, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ESLP, 10, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, EBSOFF, 9, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, EERROR, 8, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERXNEMP, 7, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERXOFLW, 6, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERXUFLW, 5, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ERXOK, 4, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ETXBFLL, 3, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ETXFLL, 2, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, ETXOK, 1, 1) + FIELD(INTERRUPT_ENABLE_REGISTER, EARBLST, 0, 1) +REG32(INTERRUPT_CLEAR_REGISTER, 0x24) + FIELD(INTERRUPT_CLEAR_REGISTER, CTXFEMP, 14, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CTXFWMEMP, 13, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRXFWMFLL, 12, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CWKUP, 11, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CSLP, 10, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CBSOFF, 9, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CERROR, 8, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRXNEMP, 7, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRXOFLW, 6, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRXUFLW, 5, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CRXOK, 4, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CTXBFLL, 3, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CTXFLL, 2, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CTXOK, 1, 1) + FIELD(INTERRUPT_CLEAR_REGISTER, CARBLST, 0, 1) +REG32(TIMESTAMP_REGISTER, 0x28) + FIELD(TIMESTAMP_REGISTER, CTS, 0, 1) +REG32(WIR, 0x2c) + FIELD(WIR, EW, 8, 8) + FIELD(WIR, FW, 0, 8) +REG32(TXFIFO_ID, 0x30) + FIELD(TXFIFO_ID, IDH, 21, 11) + FIELD(TXFIFO_ID, SRRRTR, 20, 1) + FIELD(TXFIFO_ID, IDE, 19, 1) + FIELD(TXFIFO_ID, IDL, 1, 18) + FIELD(TXFIFO_ID, RTR, 0, 1) +REG32(TXFIFO_DLC, 0x34) + FIELD(TXFIFO_DLC, DLC, 28, 4) +REG32(TXFIFO_DATA1, 0x38) + FIELD(TXFIFO_DATA1, DB0, 24, 8) + FIELD(TXFIFO_DATA1, DB1, 16, 8) + FIELD(TXFIFO_DATA1, DB2, 8, 8) + FIELD(TXFIFO_DATA1, DB3, 0, 8) +REG32(TXFIFO_DATA2, 0x3c) + FIELD(TXFIFO_DATA2, DB4, 24, 8) + FIELD(TXFIFO_DATA2, DB5, 16, 8) + FIELD(TXFIFO_DATA2, DB6, 8, 8) + FIELD(TXFIFO_DATA2, DB7, 0, 8) +REG32(TXHPB_ID, 0x40) + FIELD(TXHPB_ID, IDH, 21, 11) + FIELD(TXHPB_ID, SRRRTR, 20, 1) + FIELD(TXHPB_ID, IDE, 19, 1) + FIELD(TXHPB_ID, IDL, 1, 18) + FIELD(TXHPB_ID, RTR, 0, 1) +REG32(TXHPB_DLC, 0x44) + FIELD(TXHPB_DLC, DLC, 28, 4) +REG32(TXHPB_DATA1, 0x48) + FIELD(TXHPB_DATA1, DB0, 24, 8) + FIELD(TXHPB_DATA1, DB1, 16, 8) + FIELD(TXHPB_DATA1, DB2, 8, 8) + FIELD(TXHPB_DATA1, DB3, 0, 8) +REG32(TXHPB_DATA2, 0x4c) + FIELD(TXHPB_DATA2, DB4, 24, 8) + FIELD(TXHPB_DATA2, DB5, 16, 8) + FIELD(TXHPB_DATA2, DB6, 8, 8) + FIELD(TXHPB_DATA2, DB7, 0, 8) +REG32(RXFIFO_ID, 0x50) + FIELD(RXFIFO_ID, IDH, 21, 11) + FIELD(RXFIFO_ID, SRRRTR, 20, 1) + FIELD(RXFIFO_ID, IDE, 19, 1) + FIELD(RXFIFO_ID, IDL, 1, 18) + FIELD(RXFIFO_ID, RTR, 0, 1) +REG32(RXFIFO_DLC, 0x54) + FIELD(RXFIFO_DLC, DLC, 28, 4) + FIELD(RXFIFO_DLC, RXT, 0, 16) +REG32(RXFIFO_DATA1, 0x58) + FIELD(RXFIFO_DATA1, DB0, 24, 8) + FIELD(RXFIFO_DATA1, DB1, 16, 8) + FIELD(RXFIFO_DATA1, DB2, 8, 8) + FIELD(RXFIFO_DATA1, DB3, 0, 8) +REG32(RXFIFO_DATA2, 0x5c) + FIELD(RXFIFO_DATA2, DB4, 24, 8) + FIELD(RXFIFO_DATA2, DB5, 16, 8) + FIELD(RXFIFO_DATA2, DB6, 8, 8) + FIELD(RXFIFO_DATA2, DB7, 0, 8) +REG32(AFR, 0x60) + FIELD(AFR, UAF4, 3, 1) + FIELD(AFR, UAF3, 2, 1) + FIELD(AFR, UAF2, 1, 1) + FIELD(AFR, UAF1, 0, 1) +REG32(AFMR1, 0x64) + FIELD(AFMR1, AMIDH, 21, 11) + FIELD(AFMR1, AMSRR, 20, 1) + FIELD(AFMR1, AMIDE, 19, 1) + FIELD(AFMR1, AMIDL, 1, 18) + FIELD(AFMR1, AMRTR, 0, 1) +REG32(AFIR1, 0x68) + FIELD(AFIR1, AIIDH, 21, 11) + FIELD(AFIR1, AISRR, 20, 1) + FIELD(AFIR1, AIIDE, 19, 1) + FIELD(AFIR1, AIIDL, 1, 18) + FIELD(AFIR1, AIRTR, 0, 1) +REG32(AFMR2, 0x6c) + FIELD(AFMR2, AMIDH, 21, 11) + FIELD(AFMR2, AMSRR, 20, 1) + FIELD(AFMR2, AMIDE, 19, 1) + FIELD(AFMR2, AMIDL, 1, 18) + FIELD(AFMR2, AMRTR, 0, 1) +REG32(AFIR2, 0x70) + FIELD(AFIR2, AIIDH, 21, 11) + FIELD(AFIR2, AISRR, 20, 1) + FIELD(AFIR2, AIIDE, 19, 1) + FIELD(AFIR2, AIIDL, 1, 18) + FIELD(AFIR2, AIRTR, 0, 1) +REG32(AFMR3, 0x74) + FIELD(AFMR3, AMIDH, 21, 11) + FIELD(AFMR3, AMSRR, 20, 1) + FIELD(AFMR3, AMIDE, 19, 1) + FIELD(AFMR3, AMIDL, 1, 18) + FIELD(AFMR3, AMRTR, 0, 1) +REG32(AFIR3, 0x78) + FIELD(AFIR3, AIIDH, 21, 11) + FIELD(AFIR3, AISRR, 20, 1) + FIELD(AFIR3, AIIDE, 19, 1) + FIELD(AFIR3, AIIDL, 1, 18) + FIELD(AFIR3, AIRTR, 0, 1) +REG32(AFMR4, 0x7c) + FIELD(AFMR4, AMIDH, 21, 11) + FIELD(AFMR4, AMSRR, 20, 1) + FIELD(AFMR4, AMIDE, 19, 1) + FIELD(AFMR4, AMIDL, 1, 18) + FIELD(AFMR4, AMRTR, 0, 1) +REG32(AFIR4, 0x80) + FIELD(AFIR4, AIIDH, 21, 11) + FIELD(AFIR4, AISRR, 20, 1) + FIELD(AFIR4, AIIDE, 19, 1) + FIELD(AFIR4, AIIDL, 1, 18) + FIELD(AFIR4, AIRTR, 0, 1) + +static void can_update_irq(XlnxZynqMPCANState *s) +{ + uint32_t irq; + + /* Watermark register interrupts. */ + if ((fifo32_num_free(&s->tx_fifo) / CAN_FRAME_SIZE) > + ARRAY_FIELD_EX32(s->regs, WIR, EW)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXFWMEMP, 1); + } + + if ((fifo32_num_used(&s->rx_fifo) / CAN_FRAME_SIZE) > + ARRAY_FIELD_EX32(s->regs, WIR, FW)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXFWMFLL, 1); + } + + /* RX Interrupts. */ + if (fifo32_num_used(&s->rx_fifo) >= CAN_FRAME_SIZE) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXNEMP, 1); + } + + /* TX interrupts. */ + if (fifo32_is_empty(&s->tx_fifo)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXFEMP, 1); + } + + if (fifo32_is_full(&s->tx_fifo)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXFLL, 1); + } + + if (fifo32_is_full(&s->txhpb_fifo)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXBFLL, 1); + } + + irq = s->regs[R_INTERRUPT_STATUS_REGISTER]; + irq &= s->regs[R_INTERRUPT_ENABLE_REGISTER]; + + trace_xlnx_can_update_irq(s->regs[R_INTERRUPT_STATUS_REGISTER], + s->regs[R_INTERRUPT_ENABLE_REGISTER], irq); + qemu_set_irq(s->irq, irq); +} + +static void can_ier_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + + can_update_irq(s); +} + +static uint64_t can_icr_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + + s->regs[R_INTERRUPT_STATUS_REGISTER] &= ~val; + can_update_irq(s); + + return 0; +} + +static void can_config_reset(XlnxZynqMPCANState *s) +{ + /* Reset all the configuration registers. */ + register_reset(&s->reg_info[R_SOFTWARE_RESET_REGISTER]); + register_reset(&s->reg_info[R_MODE_SELECT_REGISTER]); + register_reset( + &s->reg_info[R_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER]); + register_reset(&s->reg_info[R_ARBITRATION_PHASE_BIT_TIMING_REGISTER]); + register_reset(&s->reg_info[R_STATUS_REGISTER]); + register_reset(&s->reg_info[R_INTERRUPT_STATUS_REGISTER]); + register_reset(&s->reg_info[R_INTERRUPT_ENABLE_REGISTER]); + register_reset(&s->reg_info[R_INTERRUPT_CLEAR_REGISTER]); + register_reset(&s->reg_info[R_WIR]); +} + +static void can_config_mode(XlnxZynqMPCANState *s) +{ + register_reset(&s->reg_info[R_ERROR_COUNTER_REGISTER]); + register_reset(&s->reg_info[R_ERROR_STATUS_REGISTER]); + + /* Put XlnxZynqMPCAN in configuration mode. */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, CONFIG, 1); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, WKUP, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, SLP, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, BSOFF, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, ERROR, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOFLW, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXOK, 0); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, ARBLST, 0); + + can_update_irq(s); +} + +static void update_status_register_mode_bits(XlnxZynqMPCANState *s) +{ + bool sleep_status = ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP); + bool sleep_mode = ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SLEEP); + /* Wake up interrupt bit. */ + bool wakeup_irq_val = sleep_status && (sleep_mode == 0); + /* Sleep interrupt bit. */ + bool sleep_irq_val = sleep_mode && (sleep_status == 0); + + /* Clear previous core mode status bits. */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, LBACK, 0); + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SLEEP, 0); + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SNOOP, 0); + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, NORMAL, 0); + + /* set current mode bit and generate irqs accordingly. */ + if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, LBACK)) { + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, LBACK, 1); + } else if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SLEEP)) { + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SLEEP, 1); + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, SLP, + sleep_irq_val); + } else if (ARRAY_FIELD_EX32(s->regs, MODE_SELECT_REGISTER, SNOOP)) { + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, SNOOP, 1); + } else { + /* + * If all bits are zero then XlnxZynqMPCAN is set in normal mode. + */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, NORMAL, 1); + /* Set wakeup interrupt bit. */ + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, WKUP, + wakeup_irq_val); + } + + can_update_irq(s); +} + +static void can_exit_sleep_mode(XlnxZynqMPCANState *s) +{ + ARRAY_FIELD_DP32(s->regs, MODE_SELECT_REGISTER, SLEEP, 0); + update_status_register_mode_bits(s); +} + +static void generate_frame(qemu_can_frame *frame, uint32_t *data) +{ + frame->can_id = data[0]; + frame->can_dlc = FIELD_EX32(data[1], TXFIFO_DLC, DLC); + + frame->data[0] = FIELD_EX32(data[2], TXFIFO_DATA1, DB3); + frame->data[1] = FIELD_EX32(data[2], TXFIFO_DATA1, DB2); + frame->data[2] = FIELD_EX32(data[2], TXFIFO_DATA1, DB1); + frame->data[3] = FIELD_EX32(data[2], TXFIFO_DATA1, DB0); + + frame->data[4] = FIELD_EX32(data[3], TXFIFO_DATA2, DB7); + frame->data[5] = FIELD_EX32(data[3], TXFIFO_DATA2, DB6); + frame->data[6] = FIELD_EX32(data[3], TXFIFO_DATA2, DB5); + frame->data[7] = FIELD_EX32(data[3], TXFIFO_DATA2, DB4); +} + +static bool tx_ready_check(XlnxZynqMPCANState *s) +{ + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, SRST)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer data while" + " data while controller is in reset mode.\n", + path); + return false; + } + + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer" + " data while controller is in configuration mode. Reset" + " the core so operations can start fresh.\n", + path); + return false; + } + + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SNOOP)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to transfer" + " data while controller is in SNOOP MODE.\n", + path); + return false; + } + + return true; +} + +static void transfer_fifo(XlnxZynqMPCANState *s, Fifo32 *fifo) +{ + qemu_can_frame frame; + uint32_t data[CAN_FRAME_SIZE]; + int i; + bool can_tx = tx_ready_check(s); + + if (!can_tx) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is not enabled for data" + " transfer.\n", path); + can_update_irq(s); + return; + } + + while (!fifo32_is_empty(fifo)) { + for (i = 0; i < CAN_FRAME_SIZE; i++) { + data[i] = fifo32_pop(fifo); + } + + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, LBACK)) { + /* + * Controller is in loopback. In Loopback mode, the CAN core + * transmits a recessive bitstream on to the XlnxZynqMPCAN Bus. + * Any message transmitted is looped back to the RX line and + * acknowledged. The XlnxZynqMPCAN core receives any message + * that it transmits. + */ + if (fifo32_is_full(&s->rx_fifo)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOFLW, 1); + } else { + for (i = 0; i < CAN_FRAME_SIZE; i++) { + fifo32_push(&s->rx_fifo, data[i]); + } + + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1); + } + } else { + /* Normal mode Tx. */ + generate_frame(&frame, data); + + trace_xlnx_can_tx_data(frame.can_id, frame.can_dlc, + frame.data[0], frame.data[1], + frame.data[2], frame.data[3], + frame.data[4], frame.data[5], + frame.data[6], frame.data[7]); + can_bus_client_send(&s->bus_client, &frame, 1); + } + } + + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, TXOK, 1); + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, TXBFLL, 0); + + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP)) { + can_exit_sleep_mode(s); + } + + can_update_irq(s); +} + +static uint64_t can_srr_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + + ARRAY_FIELD_DP32(s->regs, SOFTWARE_RESET_REGISTER, CEN, + FIELD_EX32(val, SOFTWARE_RESET_REGISTER, CEN)); + + if (FIELD_EX32(val, SOFTWARE_RESET_REGISTER, SRST)) { + trace_xlnx_can_reset(val); + + /* First, core will do software reset then will enter in config mode. */ + can_config_reset(s); + } + + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { + can_config_mode(s); + } else { + /* + * Leave config mode. Now XlnxZynqMPCAN core will enter normal, + * sleep, snoop or loopback mode depending upon LBACK, SLEEP, SNOOP + * register states. + */ + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, CONFIG, 0); + + ptimer_transaction_begin(s->can_timer); + ptimer_set_count(s->can_timer, 0); + ptimer_transaction_commit(s->can_timer); + + /* XlnxZynqMPCAN is out of config mode. It will send pending data. */ + transfer_fifo(s, &s->txhpb_fifo); + transfer_fifo(s, &s->tx_fifo); + } + + update_status_register_mode_bits(s); + + return s->regs[R_SOFTWARE_RESET_REGISTER]; +} + +static uint64_t can_msr_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + uint8_t multi_mode; + + /* + * Multiple mode set check. This is done to make sure user doesn't set + * multiple modes. + */ + multi_mode = FIELD_EX32(val, MODE_SELECT_REGISTER, LBACK) + + FIELD_EX32(val, MODE_SELECT_REGISTER, SLEEP) + + FIELD_EX32(val, MODE_SELECT_REGISTER, SNOOP); + + if (multi_mode > 1) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to config" + " several modes simultaneously. One mode will be selected" + " according to their priority: LBACK > SLEEP > SNOOP.\n", + path); + } + + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN) == 0) { + /* We are in configuration mode, any mode can be selected. */ + s->regs[R_MODE_SELECT_REGISTER] = val; + } else { + bool sleep_mode_bit = FIELD_EX32(val, MODE_SELECT_REGISTER, SLEEP); + + ARRAY_FIELD_DP32(s->regs, MODE_SELECT_REGISTER, SLEEP, sleep_mode_bit); + + if (FIELD_EX32(val, MODE_SELECT_REGISTER, LBACK)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to set" + " LBACK mode without setting CEN bit as 0.\n", + path); + } else if (FIELD_EX32(val, MODE_SELECT_REGISTER, SNOOP)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Attempting to set" + " SNOOP mode without setting CEN bit as 0.\n", + path); + } + + update_status_register_mode_bits(s); + } + + return s->regs[R_MODE_SELECT_REGISTER]; +} + +static uint64_t can_brpr_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + + /* Only allow writes when in config mode. */ + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) { + return s->regs[R_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER]; + } + + return val; +} + +static uint64_t can_btr_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + + /* Only allow writes when in config mode. */ + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) { + return s->regs[R_ARBITRATION_PHASE_BIT_TIMING_REGISTER]; + } + + return val; +} + +static uint64_t can_tcr_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + + if (FIELD_EX32(val, TIMESTAMP_REGISTER, CTS)) { + ptimer_transaction_begin(s->can_timer); + ptimer_set_count(s->can_timer, 0); + ptimer_transaction_commit(s->can_timer); + } + + return 0; +} + +static void update_rx_fifo(XlnxZynqMPCANState *s, const qemu_can_frame *frame) +{ + bool filter_pass = false; + uint16_t timestamp = 0; + + /* If no filter is enabled. Message will be stored in FIFO. */ + if (!((ARRAY_FIELD_EX32(s->regs, AFR, UAF1)) | + (ARRAY_FIELD_EX32(s->regs, AFR, UAF2)) | + (ARRAY_FIELD_EX32(s->regs, AFR, UAF3)) | + (ARRAY_FIELD_EX32(s->regs, AFR, UAF4)))) { + filter_pass = true; + } + + /* + * Messages that pass any of the acceptance filters will be stored in + * the RX FIFO. + */ + if (ARRAY_FIELD_EX32(s->regs, AFR, UAF1)) { + uint32_t id_masked = s->regs[R_AFMR1] & frame->can_id; + uint32_t filter_id_masked = s->regs[R_AFMR1] & s->regs[R_AFIR1]; + + if (filter_id_masked == id_masked) { + filter_pass = true; + } + } + + if (ARRAY_FIELD_EX32(s->regs, AFR, UAF2)) { + uint32_t id_masked = s->regs[R_AFMR2] & frame->can_id; + uint32_t filter_id_masked = s->regs[R_AFMR2] & s->regs[R_AFIR2]; + + if (filter_id_masked == id_masked) { + filter_pass = true; + } + } + + if (ARRAY_FIELD_EX32(s->regs, AFR, UAF3)) { + uint32_t id_masked = s->regs[R_AFMR3] & frame->can_id; + uint32_t filter_id_masked = s->regs[R_AFMR3] & s->regs[R_AFIR3]; + + if (filter_id_masked == id_masked) { + filter_pass = true; + } + } + + if (ARRAY_FIELD_EX32(s->regs, AFR, UAF4)) { + uint32_t id_masked = s->regs[R_AFMR4] & frame->can_id; + uint32_t filter_id_masked = s->regs[R_AFMR4] & s->regs[R_AFIR4]; + + if (filter_id_masked == id_masked) { + filter_pass = true; + } + } + + if (!filter_pass) { + trace_xlnx_can_rx_fifo_filter_reject(frame->can_id, frame->can_dlc); + return; + } + + /* Store the message in fifo if it passed through any of the filters. */ + if (filter_pass && frame->can_dlc <= MAX_DLC) { + + if (fifo32_is_full(&s->rx_fifo)) { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOFLW, 1); + } else { + timestamp = CAN_TIMER_MAX - ptimer_get_count(s->can_timer); + + fifo32_push(&s->rx_fifo, frame->can_id); + + fifo32_push(&s->rx_fifo, deposit32(0, R_RXFIFO_DLC_DLC_SHIFT, + R_RXFIFO_DLC_DLC_LENGTH, + frame->can_dlc) | + deposit32(0, R_RXFIFO_DLC_RXT_SHIFT, + R_RXFIFO_DLC_RXT_LENGTH, + timestamp)); + + /* First 32 bit of the data. */ + fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA1_DB3_SHIFT, + R_TXFIFO_DATA1_DB3_LENGTH, + frame->data[0]) | + deposit32(0, R_TXFIFO_DATA1_DB2_SHIFT, + R_TXFIFO_DATA1_DB2_LENGTH, + frame->data[1]) | + deposit32(0, R_TXFIFO_DATA1_DB1_SHIFT, + R_TXFIFO_DATA1_DB1_LENGTH, + frame->data[2]) | + deposit32(0, R_TXFIFO_DATA1_DB0_SHIFT, + R_TXFIFO_DATA1_DB0_LENGTH, + frame->data[3])); + /* Last 32 bit of the data. */ + fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA2_DB7_SHIFT, + R_TXFIFO_DATA2_DB7_LENGTH, + frame->data[4]) | + deposit32(0, R_TXFIFO_DATA2_DB6_SHIFT, + R_TXFIFO_DATA2_DB6_LENGTH, + frame->data[5]) | + deposit32(0, R_TXFIFO_DATA2_DB5_SHIFT, + R_TXFIFO_DATA2_DB5_LENGTH, + frame->data[6]) | + deposit32(0, R_TXFIFO_DATA2_DB4_SHIFT, + R_TXFIFO_DATA2_DB4_LENGTH, + frame->data[7])); + + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1); + trace_xlnx_can_rx_data(frame->can_id, frame->can_dlc, + frame->data[0], frame->data[1], + frame->data[2], frame->data[3], + frame->data[4], frame->data[5], + frame->data[6], frame->data[7]); + } + + can_update_irq(s); + } +} + +static uint64_t can_rxfifo_pre_read(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + + if (!fifo32_is_empty(&s->rx_fifo)) { + val = fifo32_pop(&s->rx_fifo); + } else { + ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXUFLW, 1); + } + + can_update_irq(s); + return val; +} + +static void can_filter_enable_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + + if (ARRAY_FIELD_EX32(s->regs, AFR, UAF1) && + ARRAY_FIELD_EX32(s->regs, AFR, UAF2) && + ARRAY_FIELD_EX32(s->regs, AFR, UAF3) && + ARRAY_FIELD_EX32(s->regs, AFR, UAF4)) { + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, ACFBSY, 1); + } else { + ARRAY_FIELD_DP32(s->regs, STATUS_REGISTER, ACFBSY, 0); + } +} + +static uint64_t can_filter_mask_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + uint32_t reg_idx = (reg->access->addr) / 4; + uint32_t filter_number = (reg_idx - R_AFMR1) / 2; + + /* modify an acceptance filter, the corresponding UAF bit should be '0'. */ + if (!(s->regs[R_AFR] & (1 << filter_number))) { + s->regs[reg_idx] = val; + + trace_xlnx_can_filter_mask_pre_write(filter_number, s->regs[reg_idx]); + } else { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d" + " mask is not set as corresponding UAF bit is not 0.\n", + path, filter_number + 1); + } + + return s->regs[reg_idx]; +} + +static uint64_t can_filter_id_pre_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + uint32_t reg_idx = (reg->access->addr) / 4; + uint32_t filter_number = (reg_idx - R_AFIR1) / 2; + + if (!(s->regs[R_AFR] & (1 << filter_number))) { + s->regs[reg_idx] = val; + + trace_xlnx_can_filter_id_pre_write(filter_number, s->regs[reg_idx]); + } else { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d" + " id is not set as corresponding UAF bit is not 0.\n", + path, filter_number + 1); + } + + return s->regs[reg_idx]; +} + +static void can_tx_post_write(RegisterInfo *reg, uint64_t val) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(reg->opaque); + + bool is_txhpb = reg->access->addr > A_TXFIFO_DATA2; + + bool initiate_transfer = (reg->access->addr == A_TXFIFO_DATA2) || + (reg->access->addr == A_TXHPB_DATA2); + + Fifo32 *f = is_txhpb ? &s->txhpb_fifo : &s->tx_fifo; + + if (!fifo32_is_full(f)) { + fifo32_push(f, val); + } else { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: TX FIFO is full.\n", path); + } + + /* Initiate the message send if TX register is written. */ + if (initiate_transfer && + ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) { + transfer_fifo(s, f); + } + + can_update_irq(s); +} + +static const RegisterAccessInfo can_regs_info[] = { + { .name = "SOFTWARE_RESET_REGISTER", + .addr = A_SOFTWARE_RESET_REGISTER, + .rsvd = 0xfffffffc, + .pre_write = can_srr_pre_write, + },{ .name = "MODE_SELECT_REGISTER", + .addr = A_MODE_SELECT_REGISTER, + .rsvd = 0xfffffff8, + .pre_write = can_msr_pre_write, + },{ .name = "ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER", + .addr = A_ARBITRATION_PHASE_BAUD_RATE_PRESCALER_REGISTER, + .rsvd = 0xffffff00, + .pre_write = can_brpr_pre_write, + },{ .name = "ARBITRATION_PHASE_BIT_TIMING_REGISTER", + .addr = A_ARBITRATION_PHASE_BIT_TIMING_REGISTER, + .rsvd = 0xfffffe00, + .pre_write = can_btr_pre_write, + },{ .name = "ERROR_COUNTER_REGISTER", + .addr = A_ERROR_COUNTER_REGISTER, + .rsvd = 0xffff0000, + .ro = 0xffffffff, + },{ .name = "ERROR_STATUS_REGISTER", + .addr = A_ERROR_STATUS_REGISTER, + .rsvd = 0xffffffe0, + .w1c = 0x1f, + },{ .name = "STATUS_REGISTER", .addr = A_STATUS_REGISTER, + .reset = 0x1, + .rsvd = 0xffffe000, + .ro = 0x1fff, + },{ .name = "INTERRUPT_STATUS_REGISTER", + .addr = A_INTERRUPT_STATUS_REGISTER, + .reset = 0x6000, + .rsvd = 0xffff8000, + .ro = 0x7fff, + },{ .name = "INTERRUPT_ENABLE_REGISTER", + .addr = A_INTERRUPT_ENABLE_REGISTER, + .rsvd = 0xffff8000, + .post_write = can_ier_post_write, + },{ .name = "INTERRUPT_CLEAR_REGISTER", + .addr = A_INTERRUPT_CLEAR_REGISTER, + .rsvd = 0xffff8000, + .pre_write = can_icr_pre_write, + },{ .name = "TIMESTAMP_REGISTER", + .addr = A_TIMESTAMP_REGISTER, + .rsvd = 0xfffffffe, + .pre_write = can_tcr_pre_write, + },{ .name = "WIR", .addr = A_WIR, + .reset = 0x3f3f, + .rsvd = 0xffff0000, + },{ .name = "TXFIFO_ID", .addr = A_TXFIFO_ID, + .post_write = can_tx_post_write, + },{ .name = "TXFIFO_DLC", .addr = A_TXFIFO_DLC, + .rsvd = 0xfffffff, + .post_write = can_tx_post_write, + },{ .name = "TXFIFO_DATA1", .addr = A_TXFIFO_DATA1, + .post_write = can_tx_post_write, + },{ .name = "TXFIFO_DATA2", .addr = A_TXFIFO_DATA2, + .post_write = can_tx_post_write, + },{ .name = "TXHPB_ID", .addr = A_TXHPB_ID, + .post_write = can_tx_post_write, + },{ .name = "TXHPB_DLC", .addr = A_TXHPB_DLC, + .rsvd = 0xfffffff, + .post_write = can_tx_post_write, + },{ .name = "TXHPB_DATA1", .addr = A_TXHPB_DATA1, + .post_write = can_tx_post_write, + },{ .name = "TXHPB_DATA2", .addr = A_TXHPB_DATA2, + .post_write = can_tx_post_write, + },{ .name = "RXFIFO_ID", .addr = A_RXFIFO_ID, + .ro = 0xffffffff, + .post_read = can_rxfifo_pre_read, + },{ .name = "RXFIFO_DLC", .addr = A_RXFIFO_DLC, + .rsvd = 0xfff0000, + .post_read = can_rxfifo_pre_read, + },{ .name = "RXFIFO_DATA1", .addr = A_RXFIFO_DATA1, + .post_read = can_rxfifo_pre_read, + },{ .name = "RXFIFO_DATA2", .addr = A_RXFIFO_DATA2, + .post_read = can_rxfifo_pre_read, + },{ .name = "AFR", .addr = A_AFR, + .rsvd = 0xfffffff0, + .post_write = can_filter_enable_post_write, + },{ .name = "AFMR1", .addr = A_AFMR1, + .pre_write = can_filter_mask_pre_write, + },{ .name = "AFIR1", .addr = A_AFIR1, + .pre_write = can_filter_id_pre_write, + },{ .name = "AFMR2", .addr = A_AFMR2, + .pre_write = can_filter_mask_pre_write, + },{ .name = "AFIR2", .addr = A_AFIR2, + .pre_write = can_filter_id_pre_write, + },{ .name = "AFMR3", .addr = A_AFMR3, + .pre_write = can_filter_mask_pre_write, + },{ .name = "AFIR3", .addr = A_AFIR3, + .pre_write = can_filter_id_pre_write, + },{ .name = "AFMR4", .addr = A_AFMR4, + .pre_write = can_filter_mask_pre_write, + },{ .name = "AFIR4", .addr = A_AFIR4, + .pre_write = can_filter_id_pre_write, + } +}; + +static void xlnx_zynqmp_can_ptimer_cb(void *opaque) +{ + /* No action required on the timer rollover. */ +} + +static const MemoryRegionOps can_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void xlnx_zynqmp_can_reset_init(Object *obj, ResetType type) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(obj); + unsigned int i; + + for (i = R_RXFIFO_ID; i < ARRAY_SIZE(s->reg_info); ++i) { + register_reset(&s->reg_info[i]); + } + + ptimer_transaction_begin(s->can_timer); + ptimer_set_count(s->can_timer, 0); + ptimer_transaction_commit(s->can_timer); +} + +static void xlnx_zynqmp_can_reset_hold(Object *obj) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(obj); + unsigned int i; + + for (i = 0; i < R_RXFIFO_ID; ++i) { + register_reset(&s->reg_info[i]); + } + + /* + * Reset FIFOs when CAN model is reset. This will clear the fifo writes + * done by post_write which gets called from register_reset function, + * post_write handle will not be able to trigger tx because CAN will be + * disabled when software_reset_register is cleared first. + */ + fifo32_reset(&s->rx_fifo); + fifo32_reset(&s->tx_fifo); + fifo32_reset(&s->txhpb_fifo); +} + +static bool xlnx_zynqmp_can_can_receive(CanBusClientState *client) +{ + XlnxZynqMPCANState *s = container_of(client, XlnxZynqMPCANState, + bus_client); + + if (ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, SRST)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is in reset state.\n", + path); + return false; + } + + if ((ARRAY_FIELD_EX32(s->regs, SOFTWARE_RESET_REGISTER, CEN)) == 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Controller is disabled. Incoming" + " messages will be discarded.\n", path); + return false; + } + + return true; +} + +static ssize_t xlnx_zynqmp_can_receive(CanBusClientState *client, + const qemu_can_frame *buf, size_t buf_size) { + XlnxZynqMPCANState *s = container_of(client, XlnxZynqMPCANState, + bus_client); + const qemu_can_frame *frame = buf; + + if (buf_size <= 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: Error in the data received.\n", + path); + return 0; + } + + if (ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SNOOP)) { + /* Snoop Mode: Just keep the data. no response back. */ + update_rx_fifo(s, frame); + } else if ((ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP))) { + /* + * XlnxZynqMPCAN is in sleep mode. Any data on bus will bring it to wake + * up state. + */ + can_exit_sleep_mode(s); + update_rx_fifo(s, frame); + } else if ((ARRAY_FIELD_EX32(s->regs, STATUS_REGISTER, SLEEP)) == 0) { + update_rx_fifo(s, frame); + } else { + /* + * XlnxZynqMPCAN will not participate in normal bus communication + * and will not receive any messages transmitted by other CAN nodes. + */ + trace_xlnx_can_rx_discard(s->regs[R_STATUS_REGISTER]); + } + + return 1; +} + +static CanBusClientInfo can_xilinx_bus_client_info = { + .can_receive = xlnx_zynqmp_can_can_receive, + .receive = xlnx_zynqmp_can_receive, +}; + +static int xlnx_zynqmp_can_connect_to_bus(XlnxZynqMPCANState *s, + CanBusState *bus) +{ + s->bus_client.info = &can_xilinx_bus_client_info; + + if (can_bus_insert_client(bus, &s->bus_client) < 0) { + return -1; + } + return 0; +} + +static void xlnx_zynqmp_can_realize(DeviceState *dev, Error **errp) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(dev); + + if (s->canbus) { + if (xlnx_zynqmp_can_connect_to_bus(s, s->canbus) < 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, "%s: xlnx_zynqmp_can_connect_to_bus" + " failed.", path); + return; + } + } + + /* Create RX FIFO, TXFIFO, TXHPB storage. */ + fifo32_create(&s->rx_fifo, RXFIFO_SIZE); + fifo32_create(&s->tx_fifo, RXFIFO_SIZE); + fifo32_create(&s->txhpb_fifo, CAN_FRAME_SIZE); + + /* Allocate a new timer. */ + s->can_timer = ptimer_init(xlnx_zynqmp_can_ptimer_cb, s, + PTIMER_POLICY_DEFAULT); + + ptimer_transaction_begin(s->can_timer); + + ptimer_set_freq(s->can_timer, s->cfg.ext_clk_freq); + ptimer_set_limit(s->can_timer, CAN_TIMER_MAX, 1); + ptimer_run(s->can_timer, 0); + ptimer_transaction_commit(s->can_timer); +} + +static void xlnx_zynqmp_can_init(Object *obj) +{ + XlnxZynqMPCANState *s = XLNX_ZYNQMP_CAN(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + RegisterInfoArray *reg_array; + + memory_region_init(&s->iomem, obj, TYPE_XLNX_ZYNQMP_CAN, + XLNX_ZYNQMP_CAN_R_MAX * 4); + reg_array = register_init_block32(DEVICE(obj), can_regs_info, + ARRAY_SIZE(can_regs_info), + s->reg_info, s->regs, + &can_ops, + XLNX_ZYNQMP_CAN_ERR_DEBUG, + XLNX_ZYNQMP_CAN_R_MAX * 4); + + memory_region_add_subregion(&s->iomem, 0x00, ®_array->mem); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); +} + +static const VMStateDescription vmstate_can = { + .name = TYPE_XLNX_ZYNQMP_CAN, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_FIFO32(rx_fifo, XlnxZynqMPCANState), + VMSTATE_FIFO32(tx_fifo, XlnxZynqMPCANState), + VMSTATE_FIFO32(txhpb_fifo, XlnxZynqMPCANState), + VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPCANState, XLNX_ZYNQMP_CAN_R_MAX), + VMSTATE_PTIMER(can_timer, XlnxZynqMPCANState), + VMSTATE_END_OF_LIST(), + } +}; + +static Property xlnx_zynqmp_can_properties[] = { + DEFINE_PROP_UINT32("ext_clk_freq", XlnxZynqMPCANState, cfg.ext_clk_freq, + CAN_DEFAULT_CLOCK), + DEFINE_PROP_LINK("canbus", XlnxZynqMPCANState, canbus, TYPE_CAN_BUS, + CanBusState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xlnx_zynqmp_can_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.enter = xlnx_zynqmp_can_reset_init; + rc->phases.hold = xlnx_zynqmp_can_reset_hold; + dc->realize = xlnx_zynqmp_can_realize; + device_class_set_props(dc, xlnx_zynqmp_can_properties); + dc->vmsd = &vmstate_can; +} + +static const TypeInfo can_info = { + .name = TYPE_XLNX_ZYNQMP_CAN, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZynqMPCANState), + .class_init = xlnx_zynqmp_can_class_init, + .instance_init = xlnx_zynqmp_can_init, +}; + +static void can_register_types(void) +{ + type_register_static(&can_info); +} + +type_init(can_register_types) diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c new file mode 100644 index 000000000..45b954e46 --- /dev/null +++ b/hw/net/dp8393x.c @@ -0,0 +1,996 @@ +/* + * QEMU NS SONIC DP8393x netcard + * + * Copyright (c) 2008-2009 Herve Poussineau + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "net/net.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include +#include "qom/object.h" +#include "trace.h" + +static const char *reg_names[] = { + "CR", "DCR", "RCR", "TCR", "IMR", "ISR", "UTDA", "CTDA", + "TPS", "TFC", "TSA0", "TSA1", "TFS", "URDA", "CRDA", "CRBA0", + "CRBA1", "RBWC0", "RBWC1", "EOBC", "URRA", "RSA", "REA", "RRP", + "RWP", "TRBA0", "TRBA1", "0x1b", "0x1c", "0x1d", "0x1e", "LLFA", + "TTDA", "CEP", "CAP2", "CAP1", "CAP0", "CE", "CDP", "CDC", + "SR", "WT0", "WT1", "RSC", "CRCT", "FAET", "MPT", "MDT", + "0x30", "0x31", "0x32", "0x33", "0x34", "0x35", "0x36", "0x37", + "0x38", "0x39", "0x3a", "0x3b", "0x3c", "0x3d", "0x3e", "DCR2" }; + +#define SONIC_CR 0x00 +#define SONIC_DCR 0x01 +#define SONIC_RCR 0x02 +#define SONIC_TCR 0x03 +#define SONIC_IMR 0x04 +#define SONIC_ISR 0x05 +#define SONIC_UTDA 0x06 +#define SONIC_CTDA 0x07 +#define SONIC_TPS 0x08 +#define SONIC_TFC 0x09 +#define SONIC_TSA0 0x0a +#define SONIC_TSA1 0x0b +#define SONIC_TFS 0x0c +#define SONIC_URDA 0x0d +#define SONIC_CRDA 0x0e +#define SONIC_CRBA0 0x0f +#define SONIC_CRBA1 0x10 +#define SONIC_RBWC0 0x11 +#define SONIC_RBWC1 0x12 +#define SONIC_EOBC 0x13 +#define SONIC_URRA 0x14 +#define SONIC_RSA 0x15 +#define SONIC_REA 0x16 +#define SONIC_RRP 0x17 +#define SONIC_RWP 0x18 +#define SONIC_TRBA0 0x19 +#define SONIC_TRBA1 0x1a +#define SONIC_LLFA 0x1f +#define SONIC_TTDA 0x20 +#define SONIC_CEP 0x21 +#define SONIC_CAP2 0x22 +#define SONIC_CAP1 0x23 +#define SONIC_CAP0 0x24 +#define SONIC_CE 0x25 +#define SONIC_CDP 0x26 +#define SONIC_CDC 0x27 +#define SONIC_SR 0x28 +#define SONIC_WT0 0x29 +#define SONIC_WT1 0x2a +#define SONIC_RSC 0x2b +#define SONIC_CRCT 0x2c +#define SONIC_FAET 0x2d +#define SONIC_MPT 0x2e +#define SONIC_MDT 0x2f +#define SONIC_DCR2 0x3f +#define SONIC_REG_COUNT 0x40 + +#define SONIC_CR_HTX 0x0001 +#define SONIC_CR_TXP 0x0002 +#define SONIC_CR_RXDIS 0x0004 +#define SONIC_CR_RXEN 0x0008 +#define SONIC_CR_STP 0x0010 +#define SONIC_CR_ST 0x0020 +#define SONIC_CR_RST 0x0080 +#define SONIC_CR_RRRA 0x0100 +#define SONIC_CR_LCAM 0x0200 +#define SONIC_CR_MASK 0x03bf + +#define SONIC_DCR_DW 0x0020 +#define SONIC_DCR_LBR 0x2000 +#define SONIC_DCR_EXBUS 0x8000 + +#define SONIC_RCR_PRX 0x0001 +#define SONIC_RCR_LBK 0x0002 +#define SONIC_RCR_FAER 0x0004 +#define SONIC_RCR_CRCR 0x0008 +#define SONIC_RCR_CRS 0x0020 +#define SONIC_RCR_LPKT 0x0040 +#define SONIC_RCR_BC 0x0080 +#define SONIC_RCR_MC 0x0100 +#define SONIC_RCR_LB0 0x0200 +#define SONIC_RCR_LB1 0x0400 +#define SONIC_RCR_AMC 0x0800 +#define SONIC_RCR_PRO 0x1000 +#define SONIC_RCR_BRD 0x2000 +#define SONIC_RCR_RNT 0x4000 + +#define SONIC_TCR_PTX 0x0001 +#define SONIC_TCR_BCM 0x0002 +#define SONIC_TCR_FU 0x0004 +#define SONIC_TCR_EXC 0x0040 +#define SONIC_TCR_CRSL 0x0080 +#define SONIC_TCR_NCRS 0x0100 +#define SONIC_TCR_EXD 0x0400 +#define SONIC_TCR_CRCI 0x2000 +#define SONIC_TCR_PINT 0x8000 + +#define SONIC_ISR_RBAE 0x0010 +#define SONIC_ISR_RBE 0x0020 +#define SONIC_ISR_RDE 0x0040 +#define SONIC_ISR_TC 0x0080 +#define SONIC_ISR_TXDN 0x0200 +#define SONIC_ISR_PKTRX 0x0400 +#define SONIC_ISR_PINT 0x0800 +#define SONIC_ISR_LCD 0x1000 + +#define SONIC_DESC_EOL 0x0001 +#define SONIC_DESC_ADDR 0xFFFE + +#define TYPE_DP8393X "dp8393x" +OBJECT_DECLARE_SIMPLE_TYPE(dp8393xState, DP8393X) + +struct dp8393xState { + SysBusDevice parent_obj; + + /* Hardware */ + uint8_t it_shift; + bool big_endian; + bool last_rba_is_full; + qemu_irq irq; + int irq_level; + QEMUTimer *watchdog; + int64_t wt_last_update; + NICConf conf; + NICState *nic; + MemoryRegion mmio; + + /* Registers */ + uint16_t cam[16][3]; + uint16_t regs[SONIC_REG_COUNT]; + + /* Temporaries */ + uint8_t tx_buffer[0x10000]; + int loopback_packet; + + /* Memory access */ + MemoryRegion *dma_mr; + AddressSpace as; +}; + +/* + * Accessor functions for values which are formed by + * concatenating two 16 bit device registers. By putting these + * in their own functions with a uint32_t return type we avoid the + * pitfall of implicit sign extension where ((x << 16) | y) is a + * signed 32 bit integer that might get sign-extended to a 64 bit integer. + */ +static uint32_t dp8393x_cdp(dp8393xState *s) +{ + return (s->regs[SONIC_URRA] << 16) | s->regs[SONIC_CDP]; +} + +static uint32_t dp8393x_crba(dp8393xState *s) +{ + return (s->regs[SONIC_CRBA1] << 16) | s->regs[SONIC_CRBA0]; +} + +static uint32_t dp8393x_crda(dp8393xState *s) +{ + return (s->regs[SONIC_URDA] << 16) | + (s->regs[SONIC_CRDA] & SONIC_DESC_ADDR); +} + +static uint32_t dp8393x_rbwc(dp8393xState *s) +{ + return (s->regs[SONIC_RBWC1] << 16) | s->regs[SONIC_RBWC0]; +} + +static uint32_t dp8393x_rrp(dp8393xState *s) +{ + return (s->regs[SONIC_URRA] << 16) | s->regs[SONIC_RRP]; +} + +static uint32_t dp8393x_tsa(dp8393xState *s) +{ + return (s->regs[SONIC_TSA1] << 16) | s->regs[SONIC_TSA0]; +} + +static uint32_t dp8393x_ttda(dp8393xState *s) +{ + return (s->regs[SONIC_UTDA] << 16) | + (s->regs[SONIC_TTDA] & SONIC_DESC_ADDR); +} + +static uint32_t dp8393x_wt(dp8393xState *s) +{ + return s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0]; +} + +static uint16_t dp8393x_get(dp8393xState *s, hwaddr addr, int offset) +{ + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + uint16_t val; + + if (s->regs[SONIC_DCR] & SONIC_DCR_DW) { + addr += offset << 2; + if (s->big_endian) { + val = address_space_ldl_be(&s->as, addr, attrs, NULL); + } else { + val = address_space_ldl_le(&s->as, addr, attrs, NULL); + } + } else { + addr += offset << 1; + if (s->big_endian) { + val = address_space_lduw_be(&s->as, addr, attrs, NULL); + } else { + val = address_space_lduw_le(&s->as, addr, attrs, NULL); + } + } + + return val; +} + +static void dp8393x_put(dp8393xState *s, + hwaddr addr, int offset, uint16_t val) +{ + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + + if (s->regs[SONIC_DCR] & SONIC_DCR_DW) { + addr += offset << 2; + if (s->big_endian) { + address_space_stl_be(&s->as, addr, val, attrs, NULL); + } else { + address_space_stl_le(&s->as, addr, val, attrs, NULL); + } + } else { + addr += offset << 1; + if (s->big_endian) { + address_space_stw_be(&s->as, addr, val, attrs, NULL); + } else { + address_space_stw_le(&s->as, addr, val, attrs, NULL); + } + } +} + +static void dp8393x_update_irq(dp8393xState *s) +{ + int level = (s->regs[SONIC_IMR] & s->regs[SONIC_ISR]) ? 1 : 0; + + if (level != s->irq_level) { + s->irq_level = level; + if (level) { + trace_dp8393x_raise_irq(s->regs[SONIC_ISR]); + } else { + trace_dp8393x_lower_irq(); + } + } + + qemu_set_irq(s->irq, level); +} + +static void dp8393x_do_load_cam(dp8393xState *s) +{ + int width, size; + uint16_t index; + + width = (s->regs[SONIC_DCR] & SONIC_DCR_DW) ? 2 : 1; + size = sizeof(uint16_t) * 4 * width; + + while (s->regs[SONIC_CDC] & 0x1f) { + /* Fill current entry */ + index = dp8393x_get(s, dp8393x_cdp(s), 0) & 0xf; + s->cam[index][0] = dp8393x_get(s, dp8393x_cdp(s), 1); + s->cam[index][1] = dp8393x_get(s, dp8393x_cdp(s), 2); + s->cam[index][2] = dp8393x_get(s, dp8393x_cdp(s), 3); + trace_dp8393x_load_cam(index, + s->cam[index][0] >> 8, s->cam[index][0] & 0xff, + s->cam[index][1] >> 8, s->cam[index][1] & 0xff, + s->cam[index][2] >> 8, s->cam[index][2] & 0xff); + /* Move to next entry */ + s->regs[SONIC_CDC]--; + s->regs[SONIC_CDP] += size; + } + + /* Read CAM enable */ + s->regs[SONIC_CE] = dp8393x_get(s, dp8393x_cdp(s), 0); + trace_dp8393x_load_cam_done(s->regs[SONIC_CE]); + + /* Done */ + s->regs[SONIC_CR] &= ~SONIC_CR_LCAM; + s->regs[SONIC_ISR] |= SONIC_ISR_LCD; + dp8393x_update_irq(s); +} + +static void dp8393x_do_read_rra(dp8393xState *s) +{ + int width, size; + + /* Read memory */ + width = (s->regs[SONIC_DCR] & SONIC_DCR_DW) ? 2 : 1; + size = sizeof(uint16_t) * 4 * width; + + /* Update SONIC registers */ + s->regs[SONIC_CRBA0] = dp8393x_get(s, dp8393x_rrp(s), 0); + s->regs[SONIC_CRBA1] = dp8393x_get(s, dp8393x_rrp(s), 1); + s->regs[SONIC_RBWC0] = dp8393x_get(s, dp8393x_rrp(s), 2); + s->regs[SONIC_RBWC1] = dp8393x_get(s, dp8393x_rrp(s), 3); + trace_dp8393x_read_rra_regs(s->regs[SONIC_CRBA0], s->regs[SONIC_CRBA1], + s->regs[SONIC_RBWC0], s->regs[SONIC_RBWC1]); + + /* Go to next entry */ + s->regs[SONIC_RRP] += size; + + /* Handle wrap */ + if (s->regs[SONIC_RRP] == s->regs[SONIC_REA]) { + s->regs[SONIC_RRP] = s->regs[SONIC_RSA]; + } + + /* Warn the host if CRBA now has the last available resource */ + if (s->regs[SONIC_RRP] == s->regs[SONIC_RWP]) { + s->regs[SONIC_ISR] |= SONIC_ISR_RBE; + dp8393x_update_irq(s); + } + + /* Allow packet reception */ + s->last_rba_is_full = false; +} + +static void dp8393x_do_software_reset(dp8393xState *s) +{ + timer_del(s->watchdog); + + s->regs[SONIC_CR] &= ~(SONIC_CR_LCAM | SONIC_CR_RRRA | SONIC_CR_TXP | + SONIC_CR_HTX); + s->regs[SONIC_CR] |= SONIC_CR_RST | SONIC_CR_RXDIS; +} + +static void dp8393x_set_next_tick(dp8393xState *s) +{ + uint32_t ticks; + int64_t delay; + + if (s->regs[SONIC_CR] & SONIC_CR_STP) { + timer_del(s->watchdog); + return; + } + + ticks = dp8393x_wt(s); + s->wt_last_update = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + delay = NANOSECONDS_PER_SECOND * ticks / 5000000; + timer_mod(s->watchdog, s->wt_last_update + delay); +} + +static void dp8393x_update_wt_regs(dp8393xState *s) +{ + int64_t elapsed; + uint32_t val; + + if (s->regs[SONIC_CR] & SONIC_CR_STP) { + timer_del(s->watchdog); + return; + } + + elapsed = s->wt_last_update - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + val = dp8393x_wt(s); + val -= elapsed / 5000000; + s->regs[SONIC_WT1] = (val >> 16) & 0xffff; + s->regs[SONIC_WT0] = (val >> 0) & 0xffff; + dp8393x_set_next_tick(s); + +} + +static void dp8393x_do_start_timer(dp8393xState *s) +{ + s->regs[SONIC_CR] &= ~SONIC_CR_STP; + dp8393x_set_next_tick(s); +} + +static void dp8393x_do_stop_timer(dp8393xState *s) +{ + s->regs[SONIC_CR] &= ~SONIC_CR_ST; + dp8393x_update_wt_regs(s); +} + +static bool dp8393x_can_receive(NetClientState *nc); + +static void dp8393x_do_receiver_enable(dp8393xState *s) +{ + s->regs[SONIC_CR] &= ~SONIC_CR_RXDIS; + if (dp8393x_can_receive(s->nic->ncs)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } +} + +static void dp8393x_do_receiver_disable(dp8393xState *s) +{ + s->regs[SONIC_CR] &= ~SONIC_CR_RXEN; +} + +static void dp8393x_do_transmit_packets(dp8393xState *s) +{ + NetClientState *nc = qemu_get_queue(s->nic); + int tx_len, len; + uint16_t i; + + while (1) { + /* Read memory */ + s->regs[SONIC_TTDA] = s->regs[SONIC_CTDA]; + trace_dp8393x_transmit_packet(dp8393x_ttda(s)); + tx_len = 0; + + /* Update registers */ + s->regs[SONIC_TCR] = dp8393x_get(s, dp8393x_ttda(s), 1) & 0xf000; + s->regs[SONIC_TPS] = dp8393x_get(s, dp8393x_ttda(s), 2); + s->regs[SONIC_TFC] = dp8393x_get(s, dp8393x_ttda(s), 3); + s->regs[SONIC_TSA0] = dp8393x_get(s, dp8393x_ttda(s), 4); + s->regs[SONIC_TSA1] = dp8393x_get(s, dp8393x_ttda(s), 5); + s->regs[SONIC_TFS] = dp8393x_get(s, dp8393x_ttda(s), 6); + + /* Handle programmable interrupt */ + if (s->regs[SONIC_TCR] & SONIC_TCR_PINT) { + s->regs[SONIC_ISR] |= SONIC_ISR_PINT; + } else { + s->regs[SONIC_ISR] &= ~SONIC_ISR_PINT; + } + + for (i = 0; i < s->regs[SONIC_TFC]; ) { + /* Append fragment */ + len = s->regs[SONIC_TFS]; + if (tx_len + len > sizeof(s->tx_buffer)) { + len = sizeof(s->tx_buffer) - tx_len; + } + address_space_read(&s->as, dp8393x_tsa(s), MEMTXATTRS_UNSPECIFIED, + &s->tx_buffer[tx_len], len); + tx_len += len; + + i++; + if (i != s->regs[SONIC_TFC]) { + /* Read next fragment details */ + s->regs[SONIC_TSA0] = dp8393x_get(s, dp8393x_ttda(s), + 4 + 3 * i); + s->regs[SONIC_TSA1] = dp8393x_get(s, dp8393x_ttda(s), + 5 + 3 * i); + s->regs[SONIC_TFS] = dp8393x_get(s, dp8393x_ttda(s), + 6 + 3 * i); + } + } + + /* Handle Ethernet checksum */ + if (!(s->regs[SONIC_TCR] & SONIC_TCR_CRCI)) { + /* + * Don't append FCS there, to look like slirp packets + * which don't have one + */ + } else { + /* Remove existing FCS */ + tx_len -= 4; + if (tx_len < 0) { + trace_dp8393x_transmit_txlen_error(tx_len); + break; + } + } + + if (s->regs[SONIC_RCR] & (SONIC_RCR_LB1 | SONIC_RCR_LB0)) { + /* Loopback */ + s->regs[SONIC_TCR] |= SONIC_TCR_CRSL; + if (nc->info->can_receive(nc)) { + s->loopback_packet = 1; + qemu_receive_packet(nc, s->tx_buffer, tx_len); + } + } else { + /* Transmit packet */ + qemu_send_packet(nc, s->tx_buffer, tx_len); + } + s->regs[SONIC_TCR] |= SONIC_TCR_PTX; + + /* Write status */ + dp8393x_put(s, dp8393x_ttda(s), 0, s->regs[SONIC_TCR] & 0x0fff); + + if (!(s->regs[SONIC_CR] & SONIC_CR_HTX)) { + /* Read footer of packet */ + s->regs[SONIC_CTDA] = dp8393x_get(s, dp8393x_ttda(s), + 4 + 3 * s->regs[SONIC_TFC]); + if (s->regs[SONIC_CTDA] & SONIC_DESC_EOL) { + /* EOL detected */ + break; + } + } + } + + /* Done */ + s->regs[SONIC_CR] &= ~SONIC_CR_TXP; + s->regs[SONIC_ISR] |= SONIC_ISR_TXDN; + dp8393x_update_irq(s); +} + +static void dp8393x_do_halt_transmission(dp8393xState *s) +{ + /* Nothing to do */ +} + +static void dp8393x_do_command(dp8393xState *s, uint16_t command) +{ + if ((s->regs[SONIC_CR] & SONIC_CR_RST) && !(command & SONIC_CR_RST)) { + s->regs[SONIC_CR] &= ~SONIC_CR_RST; + return; + } + + s->regs[SONIC_CR] |= (command & SONIC_CR_MASK); + + if (command & SONIC_CR_HTX) { + dp8393x_do_halt_transmission(s); + } + if (command & SONIC_CR_TXP) { + dp8393x_do_transmit_packets(s); + } + if (command & SONIC_CR_RXDIS) { + dp8393x_do_receiver_disable(s); + } + if (command & SONIC_CR_RXEN) { + dp8393x_do_receiver_enable(s); + } + if (command & SONIC_CR_STP) { + dp8393x_do_stop_timer(s); + } + if (command & SONIC_CR_ST) { + dp8393x_do_start_timer(s); + } + if (command & SONIC_CR_RST) { + dp8393x_do_software_reset(s); + } + if (command & SONIC_CR_RRRA) { + dp8393x_do_read_rra(s); + s->regs[SONIC_CR] &= ~SONIC_CR_RRRA; + } + if (command & SONIC_CR_LCAM) { + dp8393x_do_load_cam(s); + } +} + +static uint64_t dp8393x_read(void *opaque, hwaddr addr, unsigned int size) +{ + dp8393xState *s = opaque; + int reg = addr >> s->it_shift; + uint16_t val = 0; + + switch (reg) { + /* Update data before reading it */ + case SONIC_WT0: + case SONIC_WT1: + dp8393x_update_wt_regs(s); + val = s->regs[reg]; + break; + /* Accept read to some registers only when in reset mode */ + case SONIC_CAP2: + case SONIC_CAP1: + case SONIC_CAP0: + if (s->regs[SONIC_CR] & SONIC_CR_RST) { + val = s->cam[s->regs[SONIC_CEP] & 0xf][SONIC_CAP0 - reg]; + } + break; + /* All other registers have no special contraints */ + default: + val = s->regs[reg]; + } + + trace_dp8393x_read(reg, reg_names[reg], val, size); + + return val; +} + +static void dp8393x_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + dp8393xState *s = opaque; + int reg = addr >> s->it_shift; + + trace_dp8393x_write(reg, reg_names[reg], val, size); + + switch (reg) { + /* Command register */ + case SONIC_CR: + dp8393x_do_command(s, val); + break; + /* Prevent write to read-only registers */ + case SONIC_CAP2: + case SONIC_CAP1: + case SONIC_CAP0: + case SONIC_SR: + case SONIC_MDT: + trace_dp8393x_write_invalid(reg); + break; + /* Accept write to some registers only when in reset mode */ + case SONIC_DCR: + if (s->regs[SONIC_CR] & SONIC_CR_RST) { + s->regs[reg] = val & 0xbfff; + } else { + trace_dp8393x_write_invalid_dcr("DCR"); + } + break; + case SONIC_DCR2: + if (s->regs[SONIC_CR] & SONIC_CR_RST) { + s->regs[reg] = val & 0xf017; + } else { + trace_dp8393x_write_invalid_dcr("DCR2"); + } + break; + /* 12 lower bytes are Read Only */ + case SONIC_TCR: + s->regs[reg] = val & 0xf000; + break; + /* 9 lower bytes are Read Only */ + case SONIC_RCR: + s->regs[reg] = val & 0xffe0; + break; + /* Ignore most significant bit */ + case SONIC_IMR: + s->regs[reg] = val & 0x7fff; + dp8393x_update_irq(s); + break; + /* Clear bits by writing 1 to them */ + case SONIC_ISR: + val &= s->regs[reg]; + s->regs[reg] &= ~val; + if (val & SONIC_ISR_RBE) { + dp8393x_do_read_rra(s); + } + dp8393x_update_irq(s); + break; + /* The guest is required to store aligned pointers here */ + case SONIC_RSA: + case SONIC_REA: + case SONIC_RRP: + case SONIC_RWP: + if (s->regs[SONIC_DCR] & SONIC_DCR_DW) { + s->regs[reg] = val & 0xfffc; + } else { + s->regs[reg] = val & 0xfffe; + } + break; + /* Invert written value for some registers */ + case SONIC_CRCT: + case SONIC_FAET: + case SONIC_MPT: + s->regs[reg] = val ^ 0xffff; + break; + /* All other registers have no special contrainst */ + default: + s->regs[reg] = val; + } + + if (reg == SONIC_WT0 || reg == SONIC_WT1) { + dp8393x_set_next_tick(s); + } +} + +/* + * Since .impl.max_access_size is effectively controlled by the it_shift + * property, leave it unspecified for now to allow the memory API to + * correctly zero extend the 16-bit register values to the access size up to and + * including it_shift. + */ +static const MemoryRegionOps dp8393x_ops = { + .read = dp8393x_read, + .write = dp8393x_write, + .impl.min_access_size = 2, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void dp8393x_watchdog(void *opaque) +{ + dp8393xState *s = opaque; + + if (s->regs[SONIC_CR] & SONIC_CR_STP) { + return; + } + + s->regs[SONIC_WT1] = 0xffff; + s->regs[SONIC_WT0] = 0xffff; + dp8393x_set_next_tick(s); + + /* Signal underflow */ + s->regs[SONIC_ISR] |= SONIC_ISR_TC; + dp8393x_update_irq(s); +} + +static bool dp8393x_can_receive(NetClientState *nc) +{ + dp8393xState *s = qemu_get_nic_opaque(nc); + + return !!(s->regs[SONIC_CR] & SONIC_CR_RXEN); +} + +static int dp8393x_receive_filter(dp8393xState *s, const uint8_t * buf, + int size) +{ + static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + int i; + + /* Check promiscuous mode */ + if ((s->regs[SONIC_RCR] & SONIC_RCR_PRO) && (buf[0] & 1) == 0) { + return 0; + } + + /* Check multicast packets */ + if ((s->regs[SONIC_RCR] & SONIC_RCR_AMC) && (buf[0] & 1) == 1) { + return SONIC_RCR_MC; + } + + /* Check broadcast */ + if ((s->regs[SONIC_RCR] & SONIC_RCR_BRD) && + !memcmp(buf, bcast, sizeof(bcast))) { + return SONIC_RCR_BC; + } + + /* Check CAM */ + for (i = 0; i < 16; i++) { + if (s->regs[SONIC_CE] & (1 << i)) { + /* Entry enabled */ + if (!memcmp(buf, s->cam[i], sizeof(s->cam[i]))) { + return 0; + } + } + } + + return -1; +} + +static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf, + size_t pkt_size) +{ + dp8393xState *s = qemu_get_nic_opaque(nc); + int packet_type; + uint32_t available, address; + int rx_len, padded_len; + uint32_t checksum; + int size; + + s->regs[SONIC_RCR] &= ~(SONIC_RCR_PRX | SONIC_RCR_LBK | SONIC_RCR_FAER | + SONIC_RCR_CRCR | SONIC_RCR_LPKT | SONIC_RCR_BC | SONIC_RCR_MC); + + if (s->last_rba_is_full) { + return pkt_size; + } + + rx_len = pkt_size + sizeof(checksum); + if (s->regs[SONIC_DCR] & SONIC_DCR_DW) { + padded_len = ((rx_len - 1) | 3) + 1; + } else { + padded_len = ((rx_len - 1) | 1) + 1; + } + + if (padded_len > dp8393x_rbwc(s) * 2) { + trace_dp8393x_receive_oversize(pkt_size); + s->regs[SONIC_ISR] |= SONIC_ISR_RBAE; + dp8393x_update_irq(s); + s->regs[SONIC_RCR] |= SONIC_RCR_LPKT; + goto done; + } + + packet_type = dp8393x_receive_filter(s, buf, pkt_size); + if (packet_type < 0) { + trace_dp8393x_receive_not_netcard(); + return -1; + } + + /* Check for EOL */ + if (s->regs[SONIC_LLFA] & SONIC_DESC_EOL) { + /* Are we still in resource exhaustion? */ + s->regs[SONIC_LLFA] = dp8393x_get(s, dp8393x_crda(s), 5); + if (s->regs[SONIC_LLFA] & SONIC_DESC_EOL) { + /* Still EOL ; stop reception */ + return -1; + } + /* Link has been updated by host */ + + /* Clear in_use */ + dp8393x_put(s, dp8393x_crda(s), 6, 0x0000); + + /* Move to next descriptor */ + s->regs[SONIC_CRDA] = s->regs[SONIC_LLFA]; + s->regs[SONIC_ISR] |= SONIC_ISR_PKTRX; + } + + /* Save current position */ + s->regs[SONIC_TRBA1] = s->regs[SONIC_CRBA1]; + s->regs[SONIC_TRBA0] = s->regs[SONIC_CRBA0]; + + /* Calculate the ethernet checksum */ + checksum = cpu_to_le32(crc32(0, buf, pkt_size)); + + /* Put packet into RBA */ + trace_dp8393x_receive_packet(dp8393x_crba(s)); + address = dp8393x_crba(s); + address_space_write(&s->as, address, MEMTXATTRS_UNSPECIFIED, + buf, pkt_size); + address += pkt_size; + + /* Put frame checksum into RBA */ + address_space_write(&s->as, address, MEMTXATTRS_UNSPECIFIED, + &checksum, sizeof(checksum)); + address += sizeof(checksum); + + /* Pad short packets to keep pointers aligned */ + if (rx_len < padded_len) { + size = padded_len - rx_len; + address_space_write(&s->as, address, MEMTXATTRS_UNSPECIFIED, + "\xFF\xFF\xFF", size); + address += size; + } + + s->regs[SONIC_CRBA1] = address >> 16; + s->regs[SONIC_CRBA0] = address & 0xffff; + available = dp8393x_rbwc(s); + available -= padded_len >> 1; + s->regs[SONIC_RBWC1] = available >> 16; + s->regs[SONIC_RBWC0] = available & 0xffff; + + /* Update status */ + if (dp8393x_rbwc(s) < s->regs[SONIC_EOBC]) { + s->regs[SONIC_RCR] |= SONIC_RCR_LPKT; + } + s->regs[SONIC_RCR] |= packet_type; + s->regs[SONIC_RCR] |= SONIC_RCR_PRX; + if (s->loopback_packet) { + s->regs[SONIC_RCR] |= SONIC_RCR_LBK; + s->loopback_packet = 0; + } + + /* Write status to memory */ + trace_dp8393x_receive_write_status(dp8393x_crda(s)); + dp8393x_put(s, dp8393x_crda(s), 0, s->regs[SONIC_RCR]); /* status */ + dp8393x_put(s, dp8393x_crda(s), 1, rx_len); /* byte count */ + dp8393x_put(s, dp8393x_crda(s), 2, s->regs[SONIC_TRBA0]); /* pkt_ptr0 */ + dp8393x_put(s, dp8393x_crda(s), 3, s->regs[SONIC_TRBA1]); /* pkt_ptr1 */ + dp8393x_put(s, dp8393x_crda(s), 4, s->regs[SONIC_RSC]); /* seq_no */ + + /* Check link field */ + s->regs[SONIC_LLFA] = dp8393x_get(s, dp8393x_crda(s), 5); + if (s->regs[SONIC_LLFA] & SONIC_DESC_EOL) { + /* EOL detected */ + s->regs[SONIC_ISR] |= SONIC_ISR_RDE; + } else { + /* Clear in_use */ + dp8393x_put(s, dp8393x_crda(s), 6, 0x0000); + + /* Move to next descriptor */ + s->regs[SONIC_CRDA] = s->regs[SONIC_LLFA]; + s->regs[SONIC_ISR] |= SONIC_ISR_PKTRX; + } + + dp8393x_update_irq(s); + + s->regs[SONIC_RSC] = (s->regs[SONIC_RSC] & 0xff00) | + ((s->regs[SONIC_RSC] + 1) & 0x00ff); + +done: + + if (s->regs[SONIC_RCR] & SONIC_RCR_LPKT) { + if (s->regs[SONIC_RRP] == s->regs[SONIC_RWP]) { + /* Stop packet reception */ + s->last_rba_is_full = true; + } else { + /* Read next resource */ + dp8393x_do_read_rra(s); + } + } + + return pkt_size; +} + +static void dp8393x_reset(DeviceState *dev) +{ + dp8393xState *s = DP8393X(dev); + timer_del(s->watchdog); + + memset(s->regs, 0, sizeof(s->regs)); + s->regs[SONIC_SR] = 0x0004; /* only revision recognized by Linux/mips */ + s->regs[SONIC_CR] = SONIC_CR_RST | SONIC_CR_STP | SONIC_CR_RXDIS; + s->regs[SONIC_DCR] &= ~(SONIC_DCR_EXBUS | SONIC_DCR_LBR); + s->regs[SONIC_RCR] &= ~(SONIC_RCR_LB0 | SONIC_RCR_LB1 | SONIC_RCR_BRD | + SONIC_RCR_RNT); + s->regs[SONIC_TCR] |= SONIC_TCR_NCRS | SONIC_TCR_PTX; + s->regs[SONIC_TCR] &= ~SONIC_TCR_BCM; + s->regs[SONIC_IMR] = 0; + s->regs[SONIC_ISR] = 0; + s->regs[SONIC_DCR2] = 0; + s->regs[SONIC_EOBC] = 0x02F8; + s->regs[SONIC_RSC] = 0; + s->regs[SONIC_CE] = 0; + s->regs[SONIC_RSC] = 0; + + /* Network cable is connected */ + s->regs[SONIC_RCR] |= SONIC_RCR_CRS; + + dp8393x_update_irq(s); +} + +static NetClientInfo net_dp83932_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = dp8393x_can_receive, + .receive = dp8393x_receive, +}; + +static void dp8393x_instance_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + dp8393xState *s = DP8393X(obj); + + sysbus_init_mmio(sbd, &s->mmio); + sysbus_init_irq(sbd, &s->irq); +} + +static void dp8393x_realize(DeviceState *dev, Error **errp) +{ + dp8393xState *s = DP8393X(dev); + + address_space_init(&s->as, s->dma_mr, "dp8393x"); + memory_region_init_io(&s->mmio, OBJECT(dev), &dp8393x_ops, s, + "dp8393x-regs", SONIC_REG_COUNT << s->it_shift); + + s->nic = qemu_new_nic(&net_dp83932_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + + s->watchdog = timer_new_ns(QEMU_CLOCK_VIRTUAL, dp8393x_watchdog, s); +} + +static const VMStateDescription vmstate_dp8393x = { + .name = "dp8393x", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField []) { + VMSTATE_UINT16_2DARRAY(cam, dp8393xState, 16, 3), + VMSTATE_UINT16_ARRAY(regs, dp8393xState, SONIC_REG_COUNT), + VMSTATE_END_OF_LIST() + } +}; + +static Property dp8393x_properties[] = { + DEFINE_NIC_PROPERTIES(dp8393xState, conf), + DEFINE_PROP_LINK("dma_mr", dp8393xState, dma_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_UINT8("it_shift", dp8393xState, it_shift, 0), + DEFINE_PROP_BOOL("big_endian", dp8393xState, big_endian, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void dp8393x_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->realize = dp8393x_realize; + dc->reset = dp8393x_reset; + dc->vmsd = &vmstate_dp8393x; + device_class_set_props(dc, dp8393x_properties); +} + +static const TypeInfo dp8393x_info = { + .name = TYPE_DP8393X, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(dp8393xState), + .instance_init = dp8393x_instance_init, + .class_init = dp8393x_class_init, +}; + +static void dp8393x_register_types(void) +{ + type_register_static(&dp8393x_info); +} + +type_init(dp8393x_register_types) diff --git a/hw/net/e1000.c b/hw/net/e1000.c new file mode 100644 index 000000000..f5bc81296 --- /dev/null +++ b/hw/net/e1000.c @@ -0,0 +1,1856 @@ +/* + * QEMU e1000 emulation + * + * Software developer's manual: + * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf + * + * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. + * Copyright (c) 2008 Qumranet + * Based on work done by: + * Copyright (c) 2007 Dan Aloni + * Copyright (c) 2004 Antony T Curtis + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "net/eth.h" +#include "net/net.h" +#include "net/checksum.h" +#include "sysemu/sysemu.h" +#include "sysemu/dma.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "qemu/range.h" + +#include "e1000x_common.h" +#include "trace.h" +#include "qom/object.h" + +static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +/* #define E1000_DEBUG */ + +#ifdef E1000_DEBUG +enum { + DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT, + DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM, + DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR, + DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET, +}; +#define DBGBIT(x) (1<= 10.6 + * Others never tested + */ + +struct E1000State_st { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + NICState *nic; + NICConf conf; + MemoryRegion mmio; + MemoryRegion io; + + uint32_t mac_reg[0x8000]; + uint16_t phy_reg[0x20]; + uint16_t eeprom_data[64]; + + uint32_t rxbuf_size; + uint32_t rxbuf_min_shift; + struct e1000_tx { + unsigned char header[256]; + unsigned char vlan_header[4]; + /* Fields vlan and data must not be reordered or separated. */ + unsigned char vlan[4]; + unsigned char data[0x10000]; + uint16_t size; + unsigned char vlan_needed; + unsigned char sum_needed; + bool cptse; + e1000x_txd_props props; + e1000x_txd_props tso_props; + uint16_t tso_frames; + bool busy; + } tx; + + struct { + uint32_t val_in; /* shifted in from guest driver */ + uint16_t bitnum_in; + uint16_t bitnum_out; + uint16_t reading; + uint32_t old_eecd; + } eecd_state; + + QEMUTimer *autoneg_timer; + + QEMUTimer *mit_timer; /* Mitigation timer. */ + bool mit_timer_on; /* Mitigation timer is running. */ + bool mit_irq_level; /* Tracks interrupt pin level. */ + uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */ + + QEMUTimer *flush_queue_timer; + +/* Compatibility flags for migration to/from qemu 1.3.0 and older */ +#define E1000_FLAG_AUTONEG_BIT 0 +#define E1000_FLAG_MIT_BIT 1 +#define E1000_FLAG_MAC_BIT 2 +#define E1000_FLAG_TSO_BIT 3 +#define E1000_FLAG_VET_BIT 4 +#define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT) +#define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT) +#define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT) +#define E1000_FLAG_TSO (1 << E1000_FLAG_TSO_BIT) +#define E1000_FLAG_VET (1 << E1000_FLAG_VET_BIT) + + uint32_t compat_flags; + bool received_tx_tso; + bool use_tso_for_migration; + e1000x_txd_props mig_props; +}; +typedef struct E1000State_st E1000State; + +#define chkflag(x) (s->compat_flags & E1000_FLAG_##x) + +struct E1000BaseClass { + PCIDeviceClass parent_class; + uint16_t phy_id2; +}; +typedef struct E1000BaseClass E1000BaseClass; + +#define TYPE_E1000_BASE "e1000-base" + +DECLARE_OBJ_CHECKERS(E1000State, E1000BaseClass, + E1000, TYPE_E1000_BASE) + + +static void +e1000_link_up(E1000State *s) +{ + e1000x_update_regs_on_link_up(s->mac_reg, s->phy_reg); + + /* E1000_STATUS_LU is tested by e1000_can_receive() */ + qemu_flush_queued_packets(qemu_get_queue(s->nic)); +} + +static void +e1000_autoneg_done(E1000State *s) +{ + e1000x_update_regs_on_autoneg_done(s->mac_reg, s->phy_reg); + + /* E1000_STATUS_LU is tested by e1000_can_receive() */ + qemu_flush_queued_packets(qemu_get_queue(s->nic)); +} + +static bool +have_autoneg(E1000State *s) +{ + return chkflag(AUTONEG) && (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN); +} + +static void +set_phy_ctrl(E1000State *s, int index, uint16_t val) +{ + /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */ + s->phy_reg[PHY_CTRL] = val & ~(0x3f | + MII_CR_RESET | + MII_CR_RESTART_AUTO_NEG); + + /* + * QEMU 1.3 does not support link auto-negotiation emulation, so if we + * migrate during auto negotiation, after migration the link will be + * down. + */ + if (have_autoneg(s) && (val & MII_CR_RESTART_AUTO_NEG)) { + e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer); + } +} + +static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = { + [PHY_CTRL] = set_phy_ctrl, +}; + +enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) }; + +enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W }; +static const char phy_regcap[0x20] = { + [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW, + [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW, + [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW, + [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R, + [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R, + [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R, + [PHY_AUTONEG_EXP] = PHY_R, +}; + +/* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */ +static const uint16_t phy_reg_init[] = { + [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB | + MII_CR_FULL_DUPLEX | + MII_CR_AUTO_NEG_EN, + + [PHY_STATUS] = MII_SR_EXTENDED_CAPS | + MII_SR_LINK_STATUS | /* link initially up */ + MII_SR_AUTONEG_CAPS | + /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */ + MII_SR_PREAMBLE_SUPPRESS | + MII_SR_EXTENDED_STATUS | + MII_SR_10T_HD_CAPS | + MII_SR_10T_FD_CAPS | + MII_SR_100X_HD_CAPS | + MII_SR_100X_FD_CAPS, + + [PHY_ID1] = 0x141, + /* [PHY_ID2] configured per DevId, from e1000_reset() */ + [PHY_AUTONEG_ADV] = 0xde1, + [PHY_LP_ABILITY] = 0x1e0, + [PHY_1000T_CTRL] = 0x0e00, + [PHY_1000T_STATUS] = 0x3c00, + [M88E1000_PHY_SPEC_CTRL] = 0x360, + [M88E1000_PHY_SPEC_STATUS] = 0xac00, + [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, +}; + +static const uint32_t mac_reg_init[] = { + [PBA] = 0x00100030, + [LEDCTL] = 0x602, + [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 | + E1000_CTRL_SPD_1000 | E1000_CTRL_SLU, + [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE | + E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK | + E1000_STATUS_SPEED_1000 | E1000_STATUS_FD | + E1000_STATUS_LU, + [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN | + E1000_MANC_ARP_EN | E1000_MANC_0298_EN | + E1000_MANC_RMCP_EN, +}; + +/* Helper function, *curr == 0 means the value is not set */ +static inline void +mit_update_delay(uint32_t *curr, uint32_t value) +{ + if (value && (*curr == 0 || value < *curr)) { + *curr = value; + } +} + +static void +set_interrupt_cause(E1000State *s, int index, uint32_t val) +{ + PCIDevice *d = PCI_DEVICE(s); + uint32_t pending_ints; + uint32_t mit_delay; + + s->mac_reg[ICR] = val; + + /* + * Make sure ICR and ICS registers have the same value. + * The spec says that the ICS register is write-only. However in practice, + * on real hardware ICS is readable, and for reads it has the same value as + * ICR (except that ICS does not have the clear on read behaviour of ICR). + * + * The VxWorks PRO/1000 driver uses this behaviour. + */ + s->mac_reg[ICS] = val; + + pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]); + if (!s->mit_irq_level && pending_ints) { + /* + * Here we detect a potential raising edge. We postpone raising the + * interrupt line if we are inside the mitigation delay window + * (s->mit_timer_on == 1). + * We provide a partial implementation of interrupt mitigation, + * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for + * RADV and TADV, 256ns units for ITR). RDTR is only used to enable + * RADV; relative timers based on TIDV and RDTR are not implemented. + */ + if (s->mit_timer_on) { + return; + } + if (chkflag(MIT)) { + /* Compute the next mitigation delay according to pending + * interrupts and the current values of RADV (provided + * RDTR!=0), TADV and ITR. + * Then rearm the timer. + */ + mit_delay = 0; + if (s->mit_ide && + (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) { + mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4); + } + if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) { + mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4); + } + mit_update_delay(&mit_delay, s->mac_reg[ITR]); + + /* + * According to e1000 SPEC, the Ethernet controller guarantees + * a maximum observable interrupt rate of 7813 interrupts/sec. + * Thus if mit_delay < 500 then the delay should be set to the + * minimum delay possible which is 500. + */ + mit_delay = (mit_delay < 500) ? 500 : mit_delay; + + s->mit_timer_on = 1; + timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + mit_delay * 256); + s->mit_ide = 0; + } + } + + s->mit_irq_level = (pending_ints != 0); + pci_set_irq(d, s->mit_irq_level); +} + +static void +e1000_mit_timer(void *opaque) +{ + E1000State *s = opaque; + + s->mit_timer_on = 0; + /* Call set_interrupt_cause to update the irq level (if necessary). */ + set_interrupt_cause(s, 0, s->mac_reg[ICR]); +} + +static void +set_ics(E1000State *s, int index, uint32_t val) +{ + DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR], + s->mac_reg[IMS]); + set_interrupt_cause(s, 0, val | s->mac_reg[ICR]); +} + +static void +e1000_autoneg_timer(void *opaque) +{ + E1000State *s = opaque; + if (!qemu_get_queue(s->nic)->link_down) { + e1000_autoneg_done(s); + set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */ + } +} + +static bool e1000_vet_init_need(void *opaque) +{ + E1000State *s = opaque; + + return chkflag(VET); +} + +static void e1000_reset(void *opaque) +{ + E1000State *d = opaque; + E1000BaseClass *edc = E1000_GET_CLASS(d); + uint8_t *macaddr = d->conf.macaddr.a; + + timer_del(d->autoneg_timer); + timer_del(d->mit_timer); + timer_del(d->flush_queue_timer); + d->mit_timer_on = 0; + d->mit_irq_level = 0; + d->mit_ide = 0; + memset(d->phy_reg, 0, sizeof d->phy_reg); + memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init); + d->phy_reg[PHY_ID2] = edc->phy_id2; + memset(d->mac_reg, 0, sizeof d->mac_reg); + memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init); + d->rxbuf_min_shift = 1; + memset(&d->tx, 0, sizeof d->tx); + + if (qemu_get_queue(d->nic)->link_down) { + e1000x_update_regs_on_link_down(d->mac_reg, d->phy_reg); + } + + e1000x_reset_mac_addr(d->nic, d->mac_reg, macaddr); + + if (e1000_vet_init_need(d)) { + d->mac_reg[VET] = ETH_P_VLAN; + } +} + +static void +set_ctrl(E1000State *s, int index, uint32_t val) +{ + /* RST is self clearing */ + s->mac_reg[CTRL] = val & ~E1000_CTRL_RST; +} + +static void +e1000_flush_queue_timer(void *opaque) +{ + E1000State *s = opaque; + + qemu_flush_queued_packets(qemu_get_queue(s->nic)); +} + +static void +set_rx_control(E1000State *s, int index, uint32_t val) +{ + s->mac_reg[RCTL] = val; + s->rxbuf_size = e1000x_rxbufsize(val); + s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1; + DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT], + s->mac_reg[RCTL]); + timer_mod(s->flush_queue_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); +} + +static void +set_mdic(E1000State *s, int index, uint32_t val) +{ + uint32_t data = val & E1000_MDIC_DATA_MASK; + uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + + if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy # + val = s->mac_reg[MDIC] | E1000_MDIC_ERROR; + else if (val & E1000_MDIC_OP_READ) { + DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr); + if (!(phy_regcap[addr] & PHY_R)) { + DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr); + val |= E1000_MDIC_ERROR; + } else + val = (val ^ data) | s->phy_reg[addr]; + } else if (val & E1000_MDIC_OP_WRITE) { + DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data); + if (!(phy_regcap[addr] & PHY_W)) { + DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr); + val |= E1000_MDIC_ERROR; + } else { + if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) { + phyreg_writeops[addr](s, index, data); + } else { + s->phy_reg[addr] = data; + } + } + } + s->mac_reg[MDIC] = val | E1000_MDIC_READY; + + if (val & E1000_MDIC_INT_EN) { + set_ics(s, 0, E1000_ICR_MDAC); + } +} + +static uint32_t +get_eecd(E1000State *s, int index) +{ + uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd; + + DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n", + s->eecd_state.bitnum_out, s->eecd_state.reading); + if (!s->eecd_state.reading || + ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >> + ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1) + ret |= E1000_EECD_DO; + return ret; +} + +static void +set_eecd(E1000State *s, int index, uint32_t val) +{ + uint32_t oldval = s->eecd_state.old_eecd; + + s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS | + E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ); + if (!(E1000_EECD_CS & val)) { /* CS inactive; nothing to do */ + return; + } + if (E1000_EECD_CS & (val ^ oldval)) { /* CS rise edge; reset state */ + s->eecd_state.val_in = 0; + s->eecd_state.bitnum_in = 0; + s->eecd_state.bitnum_out = 0; + s->eecd_state.reading = 0; + } + if (!(E1000_EECD_SK & (val ^ oldval))) { /* no clock edge */ + return; + } + if (!(E1000_EECD_SK & val)) { /* falling edge */ + s->eecd_state.bitnum_out++; + return; + } + s->eecd_state.val_in <<= 1; + if (val & E1000_EECD_DI) + s->eecd_state.val_in |= 1; + if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) { + s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1; + s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) == + EEPROM_READ_OPCODE_MICROWIRE); + } + DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n", + s->eecd_state.bitnum_in, s->eecd_state.bitnum_out, + s->eecd_state.reading); +} + +static uint32_t +flash_eerd_read(E1000State *s, int x) +{ + unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START; + + if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0) + return (s->mac_reg[EERD]); + + if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG) + return (E1000_EEPROM_RW_REG_DONE | r); + + return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) | + E1000_EEPROM_RW_REG_DONE | r); +} + +static void +putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse) +{ + uint32_t sum; + + if (cse && cse < n) + n = cse + 1; + if (sloc < n-1) { + sum = net_checksum_add(n-css, data+css); + stw_be_p(data + sloc, net_checksum_finish_nozero(sum)); + } +} + +static inline void +inc_tx_bcast_or_mcast_count(E1000State *s, const unsigned char *arr) +{ + if (!memcmp(arr, bcast, sizeof bcast)) { + e1000x_inc_reg_if_not_full(s->mac_reg, BPTC); + } else if (arr[0] & 1) { + e1000x_inc_reg_if_not_full(s->mac_reg, MPTC); + } +} + +static void +e1000_send_packet(E1000State *s, const uint8_t *buf, int size) +{ + static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511, + PTC1023, PTC1522 }; + + NetClientState *nc = qemu_get_queue(s->nic); + if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) { + qemu_receive_packet(nc, buf, size); + } else { + qemu_send_packet(nc, buf, size); + } + inc_tx_bcast_or_mcast_count(s, buf); + e1000x_increase_size_stats(s->mac_reg, PTCregs, size); +} + +static void +xmit_seg(E1000State *s) +{ + uint16_t len; + unsigned int frames = s->tx.tso_frames, css, sofar; + struct e1000_tx *tp = &s->tx; + struct e1000x_txd_props *props = tp->cptse ? &tp->tso_props : &tp->props; + + if (tp->cptse) { + css = props->ipcss; + DBGOUT(TXSUM, "frames %d size %d ipcss %d\n", + frames, tp->size, css); + if (props->ip) { /* IPv4 */ + stw_be_p(tp->data+css+2, tp->size - css); + stw_be_p(tp->data+css+4, + lduw_be_p(tp->data + css + 4) + frames); + } else { /* IPv6 */ + stw_be_p(tp->data+css+4, tp->size - css); + } + css = props->tucss; + len = tp->size - css; + DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", props->tcp, css, len); + if (props->tcp) { + sofar = frames * props->mss; + stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */ + if (props->paylen - sofar > props->mss) { + tp->data[css + 13] &= ~9; /* PSH, FIN */ + } else if (frames) { + e1000x_inc_reg_if_not_full(s->mac_reg, TSCTC); + } + } else { /* UDP */ + stw_be_p(tp->data+css+4, len); + } + if (tp->sum_needed & E1000_TXD_POPTS_TXSM) { + unsigned int phsum; + // add pseudo-header length before checksum calculation + void *sp = tp->data + props->tucso; + + phsum = lduw_be_p(sp) + len; + phsum = (phsum >> 16) + (phsum & 0xffff); + stw_be_p(sp, phsum); + } + tp->tso_frames++; + } + + if (tp->sum_needed & E1000_TXD_POPTS_TXSM) { + putsum(tp->data, tp->size, props->tucso, props->tucss, props->tucse); + } + if (tp->sum_needed & E1000_TXD_POPTS_IXSM) { + putsum(tp->data, tp->size, props->ipcso, props->ipcss, props->ipcse); + } + if (tp->vlan_needed) { + memmove(tp->vlan, tp->data, 4); + memmove(tp->data, tp->data + 4, 8); + memcpy(tp->data + 8, tp->vlan_header, 4); + e1000_send_packet(s, tp->vlan, tp->size + 4); + } else { + e1000_send_packet(s, tp->data, tp->size); + } + + e1000x_inc_reg_if_not_full(s->mac_reg, TPT); + e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size); + s->mac_reg[GPTC] = s->mac_reg[TPT]; + s->mac_reg[GOTCL] = s->mac_reg[TOTL]; + s->mac_reg[GOTCH] = s->mac_reg[TOTH]; +} + +static void +process_tx_desc(E1000State *s, struct e1000_tx_desc *dp) +{ + PCIDevice *d = PCI_DEVICE(s); + uint32_t txd_lower = le32_to_cpu(dp->lower.data); + uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D); + unsigned int split_size = txd_lower & 0xffff, bytes, sz; + unsigned int msh = 0xfffff; + uint64_t addr; + struct e1000_context_desc *xp = (struct e1000_context_desc *)dp; + struct e1000_tx *tp = &s->tx; + + s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE); + if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */ + if (le32_to_cpu(xp->cmd_and_length) & E1000_TXD_CMD_TSE) { + e1000x_read_tx_ctx_descr(xp, &tp->tso_props); + s->use_tso_for_migration = 1; + tp->tso_frames = 0; + } else { + e1000x_read_tx_ctx_descr(xp, &tp->props); + s->use_tso_for_migration = 0; + } + return; + } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) { + // data descriptor + if (tp->size == 0) { + tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8; + } + tp->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0; + } else { + // legacy descriptor + tp->cptse = 0; + } + + if (e1000x_vlan_enabled(s->mac_reg) && + e1000x_is_vlan_txd(txd_lower) && + (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) { + tp->vlan_needed = 1; + stw_be_p(tp->vlan_header, + le16_to_cpu(s->mac_reg[VET])); + stw_be_p(tp->vlan_header + 2, + le16_to_cpu(dp->upper.fields.special)); + } + + addr = le64_to_cpu(dp->buffer_addr); + if (tp->cptse) { + msh = tp->tso_props.hdr_len + tp->tso_props.mss; + do { + bytes = split_size; + if (tp->size >= msh) { + goto eop; + } + if (tp->size + bytes > msh) + bytes = msh - tp->size; + + bytes = MIN(sizeof(tp->data) - tp->size, bytes); + pci_dma_read(d, addr, tp->data + tp->size, bytes); + sz = tp->size + bytes; + if (sz >= tp->tso_props.hdr_len + && tp->size < tp->tso_props.hdr_len) { + memmove(tp->header, tp->data, tp->tso_props.hdr_len); + } + tp->size = sz; + addr += bytes; + if (sz == msh) { + xmit_seg(s); + memmove(tp->data, tp->header, tp->tso_props.hdr_len); + tp->size = tp->tso_props.hdr_len; + } + split_size -= bytes; + } while (bytes && split_size); + } else { + split_size = MIN(sizeof(tp->data) - tp->size, split_size); + pci_dma_read(d, addr, tp->data + tp->size, split_size); + tp->size += split_size; + } + +eop: + if (!(txd_lower & E1000_TXD_CMD_EOP)) + return; + if (!(tp->cptse && tp->size < tp->tso_props.hdr_len)) { + xmit_seg(s); + } + tp->tso_frames = 0; + tp->sum_needed = 0; + tp->vlan_needed = 0; + tp->size = 0; + tp->cptse = 0; +} + +static uint32_t +txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp) +{ + PCIDevice *d = PCI_DEVICE(s); + uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data); + + if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS))) + return 0; + txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) & + ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU); + dp->upper.data = cpu_to_le32(txd_upper); + pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp), + &dp->upper, sizeof(dp->upper)); + return E1000_ICR_TXDW; +} + +static uint64_t tx_desc_base(E1000State *s) +{ + uint64_t bah = s->mac_reg[TDBAH]; + uint64_t bal = s->mac_reg[TDBAL] & ~0xf; + + return (bah << 32) + bal; +} + +static void +start_xmit(E1000State *s) +{ + PCIDevice *d = PCI_DEVICE(s); + dma_addr_t base; + struct e1000_tx_desc desc; + uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE; + + if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) { + DBGOUT(TX, "tx disabled\n"); + return; + } + + if (s->tx.busy) { + return; + } + s->tx.busy = true; + + while (s->mac_reg[TDH] != s->mac_reg[TDT]) { + base = tx_desc_base(s) + + sizeof(struct e1000_tx_desc) * s->mac_reg[TDH]; + pci_dma_read(d, base, &desc, sizeof(desc)); + + DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH], + (void *)(intptr_t)desc.buffer_addr, desc.lower.data, + desc.upper.data); + + process_tx_desc(s, &desc); + cause |= txdesc_writeback(s, base, &desc); + + if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN]) + s->mac_reg[TDH] = 0; + /* + * the following could happen only if guest sw assigns + * bogus values to TDT/TDLEN. + * there's nothing too intelligent we could do about this. + */ + if (s->mac_reg[TDH] == tdh_start || + tdh_start >= s->mac_reg[TDLEN] / sizeof(desc)) { + DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n", + tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]); + break; + } + } + s->tx.busy = false; + set_ics(s, 0, cause); +} + +static int +receive_filter(E1000State *s, const uint8_t *buf, int size) +{ + uint32_t rctl = s->mac_reg[RCTL]; + int isbcast = !memcmp(buf, bcast, sizeof bcast), ismcast = (buf[0] & 1); + + if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) && + e1000x_vlan_rx_filter_enabled(s->mac_reg)) { + uint16_t vid = lduw_be_p(buf + 14); + uint32_t vfta = ldl_le_p((uint32_t*)(s->mac_reg + VFTA) + + ((vid >> 5) & 0x7f)); + if ((vfta & (1 << (vid & 0x1f))) == 0) + return 0; + } + + if (!isbcast && !ismcast && (rctl & E1000_RCTL_UPE)) { /* promiscuous ucast */ + return 1; + } + + if (ismcast && (rctl & E1000_RCTL_MPE)) { /* promiscuous mcast */ + e1000x_inc_reg_if_not_full(s->mac_reg, MPRC); + return 1; + } + + if (isbcast && (rctl & E1000_RCTL_BAM)) { /* broadcast enabled */ + e1000x_inc_reg_if_not_full(s->mac_reg, BPRC); + return 1; + } + + return e1000x_rx_group_filter(s->mac_reg, buf); +} + +static void +e1000_set_link_status(NetClientState *nc) +{ + E1000State *s = qemu_get_nic_opaque(nc); + uint32_t old_status = s->mac_reg[STATUS]; + + if (nc->link_down) { + e1000x_update_regs_on_link_down(s->mac_reg, s->phy_reg); + } else { + if (have_autoneg(s) && + !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { + e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer); + } else { + e1000_link_up(s); + } + } + + if (s->mac_reg[STATUS] != old_status) + set_ics(s, 0, E1000_ICR_LSC); +} + +static bool e1000_has_rxbufs(E1000State *s, size_t total_size) +{ + int bufs; + /* Fast-path short packets */ + if (total_size <= s->rxbuf_size) { + return s->mac_reg[RDH] != s->mac_reg[RDT]; + } + if (s->mac_reg[RDH] < s->mac_reg[RDT]) { + bufs = s->mac_reg[RDT] - s->mac_reg[RDH]; + } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) { + bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) + + s->mac_reg[RDT] - s->mac_reg[RDH]; + } else { + return false; + } + return total_size <= bufs * s->rxbuf_size; +} + +static bool +e1000_can_receive(NetClientState *nc) +{ + E1000State *s = qemu_get_nic_opaque(nc); + + return e1000x_rx_ready(&s->parent_obj, s->mac_reg) && + e1000_has_rxbufs(s, 1) && !timer_pending(s->flush_queue_timer); +} + +static uint64_t rx_desc_base(E1000State *s) +{ + uint64_t bah = s->mac_reg[RDBAH]; + uint64_t bal = s->mac_reg[RDBAL] & ~0xf; + + return (bah << 32) + bal; +} + +static void +e1000_receiver_overrun(E1000State *s, size_t size) +{ + trace_e1000_receiver_overrun(size, s->mac_reg[RDH], s->mac_reg[RDT]); + e1000x_inc_reg_if_not_full(s->mac_reg, RNBC); + e1000x_inc_reg_if_not_full(s->mac_reg, MPC); + set_ics(s, 0, E1000_ICS_RXO); +} + +static ssize_t +e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) +{ + E1000State *s = qemu_get_nic_opaque(nc); + PCIDevice *d = PCI_DEVICE(s); + struct e1000_rx_desc desc; + dma_addr_t base; + unsigned int n, rdt; + uint32_t rdh_start; + uint16_t vlan_special = 0; + uint8_t vlan_status = 0; + uint8_t min_buf[MIN_BUF_SIZE]; + struct iovec min_iov; + uint8_t *filter_buf = iov->iov_base; + size_t size = iov_size(iov, iovcnt); + size_t iov_ofs = 0; + size_t desc_offset; + size_t desc_size; + size_t total_size; + + if (!e1000x_hw_rx_enabled(s->mac_reg)) { + return -1; + } + + if (timer_pending(s->flush_queue_timer)) { + return 0; + } + + /* Pad to minimum Ethernet frame length */ + if (size < sizeof(min_buf)) { + iov_to_buf(iov, iovcnt, 0, min_buf, size); + memset(&min_buf[size], 0, sizeof(min_buf) - size); + min_iov.iov_base = filter_buf = min_buf; + min_iov.iov_len = size = sizeof(min_buf); + iovcnt = 1; + iov = &min_iov; + } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) { + /* This is very unlikely, but may happen. */ + iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN); + filter_buf = min_buf; + } + + /* Discard oversized packets if !LPE and !SBP. */ + if (e1000x_is_oversized(s->mac_reg, size)) { + return size; + } + + if (!receive_filter(s, filter_buf, size)) { + return size; + } + + if (e1000x_vlan_enabled(s->mac_reg) && + e1000x_is_vlan_packet(filter_buf, le16_to_cpu(s->mac_reg[VET]))) { + vlan_special = cpu_to_le16(lduw_be_p(filter_buf + 14)); + iov_ofs = 4; + if (filter_buf == iov->iov_base) { + memmove(filter_buf + 4, filter_buf, 12); + } else { + iov_from_buf(iov, iovcnt, 4, filter_buf, 12); + while (iov->iov_len <= iov_ofs) { + iov_ofs -= iov->iov_len; + iov++; + } + } + vlan_status = E1000_RXD_STAT_VP; + size -= 4; + } + + rdh_start = s->mac_reg[RDH]; + desc_offset = 0; + total_size = size + e1000x_fcs_len(s->mac_reg); + if (!e1000_has_rxbufs(s, total_size)) { + e1000_receiver_overrun(s, total_size); + return -1; + } + do { + desc_size = total_size - desc_offset; + if (desc_size > s->rxbuf_size) { + desc_size = s->rxbuf_size; + } + base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH]; + pci_dma_read(d, base, &desc, sizeof(desc)); + desc.special = vlan_special; + desc.status |= (vlan_status | E1000_RXD_STAT_DD); + if (desc.buffer_addr) { + if (desc_offset < size) { + size_t iov_copy; + hwaddr ba = le64_to_cpu(desc.buffer_addr); + size_t copy_size = size - desc_offset; + if (copy_size > s->rxbuf_size) { + copy_size = s->rxbuf_size; + } + do { + iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); + pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy); + copy_size -= iov_copy; + ba += iov_copy; + iov_ofs += iov_copy; + if (iov_ofs == iov->iov_len) { + iov++; + iov_ofs = 0; + } + } while (copy_size); + } + desc_offset += desc_size; + desc.length = cpu_to_le16(desc_size); + if (desc_offset >= total_size) { + desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM; + } else { + /* Guest zeroing out status is not a hardware requirement. + Clear EOP in case guest didn't do it. */ + desc.status &= ~E1000_RXD_STAT_EOP; + } + } else { // as per intel docs; skip descriptors with null buf addr + DBGOUT(RX, "Null RX descriptor!!\n"); + } + pci_dma_write(d, base, &desc, sizeof(desc)); + + if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN]) + s->mac_reg[RDH] = 0; + /* see comment in start_xmit; same here */ + if (s->mac_reg[RDH] == rdh_start || + rdh_start >= s->mac_reg[RDLEN] / sizeof(desc)) { + DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n", + rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]); + e1000_receiver_overrun(s, total_size); + return -1; + } + } while (desc_offset < total_size); + + e1000x_update_rx_total_stats(s->mac_reg, size, total_size); + + n = E1000_ICS_RXT0; + if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH]) + rdt += s->mac_reg[RDLEN] / sizeof(desc); + if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >> + s->rxbuf_min_shift) + n |= E1000_ICS_RXDMT0; + + set_ics(s, 0, n); + + return size; +} + +static ssize_t +e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + const struct iovec iov = { + .iov_base = (uint8_t *)buf, + .iov_len = size + }; + + return e1000_receive_iov(nc, &iov, 1); +} + +static uint32_t +mac_readreg(E1000State *s, int index) +{ + return s->mac_reg[index]; +} + +static uint32_t +mac_low4_read(E1000State *s, int index) +{ + return s->mac_reg[index] & 0xf; +} + +static uint32_t +mac_low11_read(E1000State *s, int index) +{ + return s->mac_reg[index] & 0x7ff; +} + +static uint32_t +mac_low13_read(E1000State *s, int index) +{ + return s->mac_reg[index] & 0x1fff; +} + +static uint32_t +mac_low16_read(E1000State *s, int index) +{ + return s->mac_reg[index] & 0xffff; +} + +static uint32_t +mac_icr_read(E1000State *s, int index) +{ + uint32_t ret = s->mac_reg[ICR]; + + DBGOUT(INTERRUPT, "ICR read: %x\n", ret); + set_interrupt_cause(s, 0, 0); + return ret; +} + +static uint32_t +mac_read_clr4(E1000State *s, int index) +{ + uint32_t ret = s->mac_reg[index]; + + s->mac_reg[index] = 0; + return ret; +} + +static uint32_t +mac_read_clr8(E1000State *s, int index) +{ + uint32_t ret = s->mac_reg[index]; + + s->mac_reg[index] = 0; + s->mac_reg[index-1] = 0; + return ret; +} + +static void +mac_writereg(E1000State *s, int index, uint32_t val) +{ + uint32_t macaddr[2]; + + s->mac_reg[index] = val; + + if (index == RA + 1) { + macaddr[0] = cpu_to_le32(s->mac_reg[RA]); + macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]); + qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr); + } +} + +static void +set_rdt(E1000State *s, int index, uint32_t val) +{ + s->mac_reg[index] = val & 0xffff; + if (e1000_has_rxbufs(s, 1)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } +} + +static void +set_16bit(E1000State *s, int index, uint32_t val) +{ + s->mac_reg[index] = val & 0xffff; +} + +static void +set_dlen(E1000State *s, int index, uint32_t val) +{ + s->mac_reg[index] = val & 0xfff80; +} + +static void +set_tctl(E1000State *s, int index, uint32_t val) +{ + s->mac_reg[index] = val; + s->mac_reg[TDT] &= 0xffff; + start_xmit(s); +} + +static void +set_icr(E1000State *s, int index, uint32_t val) +{ + DBGOUT(INTERRUPT, "set_icr %x\n", val); + set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val); +} + +static void +set_imc(E1000State *s, int index, uint32_t val) +{ + s->mac_reg[IMS] &= ~val; + set_ics(s, 0, 0); +} + +static void +set_ims(E1000State *s, int index, uint32_t val) +{ + s->mac_reg[IMS] |= val; + set_ics(s, 0, 0); +} + +#define getreg(x) [x] = mac_readreg +typedef uint32_t (*readops)(E1000State *, int); +static const readops macreg_readops[] = { + getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL), + getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL), + getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS), + getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL), + getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS), + getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL), + getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV), + getreg(TADV), getreg(ITR), getreg(FCRUC), getreg(IPAV), + getreg(WUC), getreg(WUS), getreg(SCC), getreg(ECOL), + getreg(MCC), getreg(LATECOL), getreg(COLC), getreg(DC), + getreg(TNCRS), getreg(SEQEC), getreg(CEXTERR), getreg(RLEC), + getreg(XONRXC), getreg(XONTXC), getreg(XOFFRXC), getreg(XOFFTXC), + getreg(RFC), getreg(RJC), getreg(RNBC), getreg(TSCTFC), + getreg(MGTPRC), getreg(MGTPDC), getreg(MGTPTC), getreg(GORCL), + getreg(GOTCL), + + [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, + [GOTCH] = mac_read_clr8, [GORCH] = mac_read_clr8, + [PRC64] = mac_read_clr4, [PRC127] = mac_read_clr4, + [PRC255] = mac_read_clr4, [PRC511] = mac_read_clr4, + [PRC1023] = mac_read_clr4, [PRC1522] = mac_read_clr4, + [PTC64] = mac_read_clr4, [PTC127] = mac_read_clr4, + [PTC255] = mac_read_clr4, [PTC511] = mac_read_clr4, + [PTC1023] = mac_read_clr4, [PTC1522] = mac_read_clr4, + [GPRC] = mac_read_clr4, [GPTC] = mac_read_clr4, + [TPT] = mac_read_clr4, [TPR] = mac_read_clr4, + [RUC] = mac_read_clr4, [ROC] = mac_read_clr4, + [BPRC] = mac_read_clr4, [MPRC] = mac_read_clr4, + [TSCTC] = mac_read_clr4, [BPTC] = mac_read_clr4, + [MPTC] = mac_read_clr4, + [ICR] = mac_icr_read, [EECD] = get_eecd, + [EERD] = flash_eerd_read, + [RDFH] = mac_low13_read, [RDFT] = mac_low13_read, + [RDFHS] = mac_low13_read, [RDFTS] = mac_low13_read, + [RDFPC] = mac_low13_read, + [TDFH] = mac_low11_read, [TDFT] = mac_low11_read, + [TDFHS] = mac_low13_read, [TDFTS] = mac_low13_read, + [TDFPC] = mac_low13_read, + [AIT] = mac_low16_read, + + [CRCERRS ... MPC] = &mac_readreg, + [IP6AT ... IP6AT+3] = &mac_readreg, [IP4AT ... IP4AT+6] = &mac_readreg, + [FFLT ... FFLT+6] = &mac_low11_read, + [RA ... RA+31] = &mac_readreg, + [WUPM ... WUPM+31] = &mac_readreg, + [MTA ... MTA+127] = &mac_readreg, + [VFTA ... VFTA+127] = &mac_readreg, + [FFMT ... FFMT+254] = &mac_low4_read, + [FFVT ... FFVT+254] = &mac_readreg, + [PBM ... PBM+16383] = &mac_readreg, +}; +enum { NREADOPS = ARRAY_SIZE(macreg_readops) }; + +#define putreg(x) [x] = mac_writereg +typedef void (*writeops)(E1000State *, int, uint32_t); +static const writeops macreg_writeops[] = { + putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC), + putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH), + putreg(RDBAL), putreg(LEDCTL), putreg(VET), putreg(FCRUC), + putreg(TDFH), putreg(TDFT), putreg(TDFHS), putreg(TDFTS), + putreg(TDFPC), putreg(RDFH), putreg(RDFT), putreg(RDFHS), + putreg(RDFTS), putreg(RDFPC), putreg(IPAV), putreg(WUC), + putreg(WUS), putreg(AIT), + + [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl, + [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics, + [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt, + [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr, + [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl, + [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit, + [ITR] = set_16bit, + + [IP6AT ... IP6AT+3] = &mac_writereg, [IP4AT ... IP4AT+6] = &mac_writereg, + [FFLT ... FFLT+6] = &mac_writereg, + [RA ... RA+31] = &mac_writereg, + [WUPM ... WUPM+31] = &mac_writereg, + [MTA ... MTA+127] = &mac_writereg, + [VFTA ... VFTA+127] = &mac_writereg, + [FFMT ... FFMT+254] = &mac_writereg, [FFVT ... FFVT+254] = &mac_writereg, + [PBM ... PBM+16383] = &mac_writereg, +}; + +enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) }; + +enum { MAC_ACCESS_PARTIAL = 1, MAC_ACCESS_FLAG_NEEDED = 2 }; + +#define markflag(x) ((E1000_FLAG_##x << 2) | MAC_ACCESS_FLAG_NEEDED) +/* In the array below the meaning of the bits is: [f|f|f|f|f|f|n|p] + * f - flag bits (up to 6 possible flags) + * n - flag needed + * p - partially implenented */ +static const uint8_t mac_reg_access[0x8000] = { + [RDTR] = markflag(MIT), [TADV] = markflag(MIT), + [RADV] = markflag(MIT), [ITR] = markflag(MIT), + + [IPAV] = markflag(MAC), [WUC] = markflag(MAC), + [IP6AT] = markflag(MAC), [IP4AT] = markflag(MAC), + [FFVT] = markflag(MAC), [WUPM] = markflag(MAC), + [ECOL] = markflag(MAC), [MCC] = markflag(MAC), + [DC] = markflag(MAC), [TNCRS] = markflag(MAC), + [RLEC] = markflag(MAC), [XONRXC] = markflag(MAC), + [XOFFTXC] = markflag(MAC), [RFC] = markflag(MAC), + [TSCTFC] = markflag(MAC), [MGTPRC] = markflag(MAC), + [WUS] = markflag(MAC), [AIT] = markflag(MAC), + [FFLT] = markflag(MAC), [FFMT] = markflag(MAC), + [SCC] = markflag(MAC), [FCRUC] = markflag(MAC), + [LATECOL] = markflag(MAC), [COLC] = markflag(MAC), + [SEQEC] = markflag(MAC), [CEXTERR] = markflag(MAC), + [XONTXC] = markflag(MAC), [XOFFRXC] = markflag(MAC), + [RJC] = markflag(MAC), [RNBC] = markflag(MAC), + [MGTPDC] = markflag(MAC), [MGTPTC] = markflag(MAC), + [RUC] = markflag(MAC), [ROC] = markflag(MAC), + [GORCL] = markflag(MAC), [GORCH] = markflag(MAC), + [GOTCL] = markflag(MAC), [GOTCH] = markflag(MAC), + [BPRC] = markflag(MAC), [MPRC] = markflag(MAC), + [TSCTC] = markflag(MAC), [PRC64] = markflag(MAC), + [PRC127] = markflag(MAC), [PRC255] = markflag(MAC), + [PRC511] = markflag(MAC), [PRC1023] = markflag(MAC), + [PRC1522] = markflag(MAC), [PTC64] = markflag(MAC), + [PTC127] = markflag(MAC), [PTC255] = markflag(MAC), + [PTC511] = markflag(MAC), [PTC1023] = markflag(MAC), + [PTC1522] = markflag(MAC), [MPTC] = markflag(MAC), + [BPTC] = markflag(MAC), + + [TDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [TDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [TDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [TDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [TDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [RDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [RDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [RDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [RDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [RDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL, + [PBM] = markflag(MAC) | MAC_ACCESS_PARTIAL, +}; + +static void +e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + E1000State *s = opaque; + unsigned int index = (addr & 0x1ffff) >> 2; + + if (index < NWRITEOPS && macreg_writeops[index]) { + if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED) + || (s->compat_flags & (mac_reg_access[index] >> 2))) { + if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { + DBGOUT(GENERAL, "Writing to register at offset: 0x%08x. " + "It is not fully implemented.\n", index<<2); + } + macreg_writeops[index](s, index, val); + } else { /* "flag needed" bit is set, but the flag is not active */ + DBGOUT(MMIO, "MMIO write attempt to disabled reg. addr=0x%08x\n", + index<<2); + } + } else if (index < NREADOPS && macreg_readops[index]) { + DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", + index<<2, val); + } else { + DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n", + index<<2, val); + } +} + +static uint64_t +e1000_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + E1000State *s = opaque; + unsigned int index = (addr & 0x1ffff) >> 2; + + if (index < NREADOPS && macreg_readops[index]) { + if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED) + || (s->compat_flags & (mac_reg_access[index] >> 2))) { + if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { + DBGOUT(GENERAL, "Reading register at offset: 0x%08x. " + "It is not fully implemented.\n", index<<2); + } + return macreg_readops[index](s, index); + } else { /* "flag needed" bit is set, but the flag is not active */ + DBGOUT(MMIO, "MMIO read attempt of disabled reg. addr=0x%08x\n", + index<<2); + } + } else { + DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2); + } + return 0; +} + +static const MemoryRegionOps e1000_mmio_ops = { + .read = e1000_mmio_read, + .write = e1000_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t e1000_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + E1000State *s = opaque; + + (void)s; + return 0; +} + +static void e1000_io_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + E1000State *s = opaque; + + (void)s; +} + +static const MemoryRegionOps e1000_io_ops = { + .read = e1000_io_read, + .write = e1000_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static bool is_version_1(void *opaque, int version_id) +{ + return version_id == 1; +} + +static int e1000_pre_save(void *opaque) +{ + E1000State *s = opaque; + NetClientState *nc = qemu_get_queue(s->nic); + + /* + * If link is down and auto-negotiation is supported and ongoing, + * complete auto-negotiation immediately. This allows us to look + * at MII_SR_AUTONEG_COMPLETE to infer link status on load. + */ + if (nc->link_down && have_autoneg(s)) { + s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE; + } + + /* Decide which set of props to migrate in the main structure */ + if (chkflag(TSO) || !s->use_tso_for_migration) { + /* Either we're migrating with the extra subsection, in which + * case the mig_props is always 'props' OR + * we've not got the subsection, but 'props' was the last + * updated. + */ + s->mig_props = s->tx.props; + } else { + /* We're not using the subsection, and 'tso_props' was + * the last updated. + */ + s->mig_props = s->tx.tso_props; + } + return 0; +} + +static int e1000_post_load(void *opaque, int version_id) +{ + E1000State *s = opaque; + NetClientState *nc = qemu_get_queue(s->nic); + + if (!chkflag(MIT)) { + s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] = + s->mac_reg[TADV] = 0; + s->mit_irq_level = false; + } + s->mit_ide = 0; + s->mit_timer_on = true; + timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1); + + /* nc.link_down can't be migrated, so infer link_down according + * to link status bit in mac_reg[STATUS]. + * Alternatively, restart link negotiation if it was in progress. */ + nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0; + + if (have_autoneg(s) && + !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { + nc->link_down = false; + timer_mod(s->autoneg_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500); + } + + s->tx.props = s->mig_props; + if (!s->received_tx_tso) { + /* We received only one set of offload data (tx.props) + * and haven't got tx.tso_props. The best we can do + * is dupe the data. + */ + s->tx.tso_props = s->mig_props; + } + return 0; +} + +static int e1000_tx_tso_post_load(void *opaque, int version_id) +{ + E1000State *s = opaque; + s->received_tx_tso = true; + return 0; +} + +static bool e1000_mit_state_needed(void *opaque) +{ + E1000State *s = opaque; + + return chkflag(MIT); +} + +static bool e1000_full_mac_needed(void *opaque) +{ + E1000State *s = opaque; + + return chkflag(MAC); +} + +static bool e1000_tso_state_needed(void *opaque) +{ + E1000State *s = opaque; + + return chkflag(TSO); +} + +static const VMStateDescription vmstate_e1000_mit_state = { + .name = "e1000/mit_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = e1000_mit_state_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(mac_reg[RDTR], E1000State), + VMSTATE_UINT32(mac_reg[RADV], E1000State), + VMSTATE_UINT32(mac_reg[TADV], E1000State), + VMSTATE_UINT32(mac_reg[ITR], E1000State), + VMSTATE_BOOL(mit_irq_level, E1000State), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_e1000_full_mac_state = { + .name = "e1000/full_mac_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = e1000_full_mac_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_e1000_tx_tso_state = { + .name = "e1000/tx_tso_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = e1000_tso_state_needed, + .post_load = e1000_tx_tso_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(tx.tso_props.ipcss, E1000State), + VMSTATE_UINT8(tx.tso_props.ipcso, E1000State), + VMSTATE_UINT16(tx.tso_props.ipcse, E1000State), + VMSTATE_UINT8(tx.tso_props.tucss, E1000State), + VMSTATE_UINT8(tx.tso_props.tucso, E1000State), + VMSTATE_UINT16(tx.tso_props.tucse, E1000State), + VMSTATE_UINT32(tx.tso_props.paylen, E1000State), + VMSTATE_UINT8(tx.tso_props.hdr_len, E1000State), + VMSTATE_UINT16(tx.tso_props.mss, E1000State), + VMSTATE_INT8(tx.tso_props.ip, E1000State), + VMSTATE_INT8(tx.tso_props.tcp, E1000State), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_e1000 = { + .name = "e1000", + .version_id = 2, + .minimum_version_id = 1, + .pre_save = e1000_pre_save, + .post_load = e1000_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, E1000State), + VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */ + VMSTATE_UNUSED(4), /* Was mmio_base. */ + VMSTATE_UINT32(rxbuf_size, E1000State), + VMSTATE_UINT32(rxbuf_min_shift, E1000State), + VMSTATE_UINT32(eecd_state.val_in, E1000State), + VMSTATE_UINT16(eecd_state.bitnum_in, E1000State), + VMSTATE_UINT16(eecd_state.bitnum_out, E1000State), + VMSTATE_UINT16(eecd_state.reading, E1000State), + VMSTATE_UINT32(eecd_state.old_eecd, E1000State), + VMSTATE_UINT8(mig_props.ipcss, E1000State), + VMSTATE_UINT8(mig_props.ipcso, E1000State), + VMSTATE_UINT16(mig_props.ipcse, E1000State), + VMSTATE_UINT8(mig_props.tucss, E1000State), + VMSTATE_UINT8(mig_props.tucso, E1000State), + VMSTATE_UINT16(mig_props.tucse, E1000State), + VMSTATE_UINT32(mig_props.paylen, E1000State), + VMSTATE_UINT8(mig_props.hdr_len, E1000State), + VMSTATE_UINT16(mig_props.mss, E1000State), + VMSTATE_UINT16(tx.size, E1000State), + VMSTATE_UINT16(tx.tso_frames, E1000State), + VMSTATE_UINT8(tx.sum_needed, E1000State), + VMSTATE_INT8(mig_props.ip, E1000State), + VMSTATE_INT8(mig_props.tcp, E1000State), + VMSTATE_BUFFER(tx.header, E1000State), + VMSTATE_BUFFER(tx.data, E1000State), + VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64), + VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20), + VMSTATE_UINT32(mac_reg[CTRL], E1000State), + VMSTATE_UINT32(mac_reg[EECD], E1000State), + VMSTATE_UINT32(mac_reg[EERD], E1000State), + VMSTATE_UINT32(mac_reg[GPRC], E1000State), + VMSTATE_UINT32(mac_reg[GPTC], E1000State), + VMSTATE_UINT32(mac_reg[ICR], E1000State), + VMSTATE_UINT32(mac_reg[ICS], E1000State), + VMSTATE_UINT32(mac_reg[IMC], E1000State), + VMSTATE_UINT32(mac_reg[IMS], E1000State), + VMSTATE_UINT32(mac_reg[LEDCTL], E1000State), + VMSTATE_UINT32(mac_reg[MANC], E1000State), + VMSTATE_UINT32(mac_reg[MDIC], E1000State), + VMSTATE_UINT32(mac_reg[MPC], E1000State), + VMSTATE_UINT32(mac_reg[PBA], E1000State), + VMSTATE_UINT32(mac_reg[RCTL], E1000State), + VMSTATE_UINT32(mac_reg[RDBAH], E1000State), + VMSTATE_UINT32(mac_reg[RDBAL], E1000State), + VMSTATE_UINT32(mac_reg[RDH], E1000State), + VMSTATE_UINT32(mac_reg[RDLEN], E1000State), + VMSTATE_UINT32(mac_reg[RDT], E1000State), + VMSTATE_UINT32(mac_reg[STATUS], E1000State), + VMSTATE_UINT32(mac_reg[SWSM], E1000State), + VMSTATE_UINT32(mac_reg[TCTL], E1000State), + VMSTATE_UINT32(mac_reg[TDBAH], E1000State), + VMSTATE_UINT32(mac_reg[TDBAL], E1000State), + VMSTATE_UINT32(mac_reg[TDH], E1000State), + VMSTATE_UINT32(mac_reg[TDLEN], E1000State), + VMSTATE_UINT32(mac_reg[TDT], E1000State), + VMSTATE_UINT32(mac_reg[TORH], E1000State), + VMSTATE_UINT32(mac_reg[TORL], E1000State), + VMSTATE_UINT32(mac_reg[TOTH], E1000State), + VMSTATE_UINT32(mac_reg[TOTL], E1000State), + VMSTATE_UINT32(mac_reg[TPR], E1000State), + VMSTATE_UINT32(mac_reg[TPT], E1000State), + VMSTATE_UINT32(mac_reg[TXDCTL], E1000State), + VMSTATE_UINT32(mac_reg[WUFC], E1000State), + VMSTATE_UINT32(mac_reg[VET], E1000State), + VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32), + VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128), + VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_e1000_mit_state, + &vmstate_e1000_full_mac_state, + &vmstate_e1000_tx_tso_state, + NULL + } +}; + +/* + * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102. + * Note: A valid DevId will be inserted during pci_e1000_realize(). + */ +static const uint16_t e1000_eeprom_template[64] = { + 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000, + 0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040, + 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700, + 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706, + 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000, +}; + +/* PCI interface */ + +static void +e1000_mmio_setup(E1000State *d) +{ + int i; + const uint32_t excluded_regs[] = { + E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS, + E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE + }; + + memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d, + "e1000-mmio", PNPMMIO_SIZE); + memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]); + for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++) + memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4, + excluded_regs[i+1] - excluded_regs[i] - 4); + memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE); +} + +static void +pci_e1000_uninit(PCIDevice *dev) +{ + E1000State *d = E1000(dev); + + timer_free(d->autoneg_timer); + timer_free(d->mit_timer); + timer_free(d->flush_queue_timer); + qemu_del_nic(d->nic); +} + +static NetClientInfo net_e1000_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = e1000_can_receive, + .receive = e1000_receive, + .receive_iov = e1000_receive_iov, + .link_status_changed = e1000_set_link_status, +}; + +static void e1000_write_config(PCIDevice *pci_dev, uint32_t address, + uint32_t val, int len) +{ + E1000State *s = E1000(pci_dev); + + pci_default_write_config(pci_dev, address, val, len); + + if (range_covers_byte(address, len, PCI_COMMAND) && + (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } +} + +static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp) +{ + DeviceState *dev = DEVICE(pci_dev); + E1000State *d = E1000(pci_dev); + uint8_t *pci_conf; + uint8_t *macaddr; + + pci_dev->config_write = e1000_write_config; + + pci_conf = pci_dev->config; + + /* TODO: RST# value should be 0, PCI spec 6.2.4 */ + pci_conf[PCI_CACHE_LINE_SIZE] = 0x10; + + pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ + + e1000_mmio_setup(d); + + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio); + + pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io); + + qemu_macaddr_default_if_unset(&d->conf.macaddr); + macaddr = d->conf.macaddr.a; + + e1000x_core_prepare_eeprom(d->eeprom_data, + e1000_eeprom_template, + sizeof(e1000_eeprom_template), + PCI_DEVICE_GET_CLASS(pci_dev)->device_id, + macaddr); + + d->nic = qemu_new_nic(&net_e1000_info, &d->conf, + object_get_typename(OBJECT(d)), dev->id, d); + + qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr); + + d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d); + d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d); + d->flush_queue_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, + e1000_flush_queue_timer, d); +} + +static void qdev_e1000_reset(DeviceState *dev) +{ + E1000State *d = E1000(dev); + e1000_reset(d); +} + +static Property e1000_properties[] = { + DEFINE_NIC_PROPERTIES(E1000State, conf), + DEFINE_PROP_BIT("autonegotiation", E1000State, + compat_flags, E1000_FLAG_AUTONEG_BIT, true), + DEFINE_PROP_BIT("mitigation", E1000State, + compat_flags, E1000_FLAG_MIT_BIT, true), + DEFINE_PROP_BIT("extra_mac_registers", E1000State, + compat_flags, E1000_FLAG_MAC_BIT, true), + DEFINE_PROP_BIT("migrate_tso_props", E1000State, + compat_flags, E1000_FLAG_TSO_BIT, true), + DEFINE_PROP_BIT("init-vet", E1000State, + compat_flags, E1000_FLAG_VET_BIT, true), + DEFINE_PROP_END_OF_LIST(), +}; + +typedef struct E1000Info { + const char *name; + uint16_t device_id; + uint8_t revision; + uint16_t phy_id2; +} E1000Info; + +static void e1000_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + E1000BaseClass *e = E1000_CLASS(klass); + const E1000Info *info = data; + + k->realize = pci_e1000_realize; + k->exit = pci_e1000_uninit; + k->romfile = "efi-e1000.rom"; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = info->device_id; + k->revision = info->revision; + e->phy_id2 = info->phy_id2; + k->class_id = PCI_CLASS_NETWORK_ETHERNET; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->desc = "Intel Gigabit Ethernet"; + dc->reset = qdev_e1000_reset; + dc->vmsd = &vmstate_e1000; + device_class_set_props(dc, e1000_properties); +} + +static void e1000_instance_init(Object *obj) +{ + E1000State *n = E1000(obj); + device_add_bootindex_property(obj, &n->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(n)); +} + +static const TypeInfo e1000_base_info = { + .name = TYPE_E1000_BASE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(E1000State), + .instance_init = e1000_instance_init, + .class_size = sizeof(E1000BaseClass), + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static const E1000Info e1000_devices[] = { + { + .name = "e1000", + .device_id = E1000_DEV_ID_82540EM, + .revision = 0x03, + .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT, + }, + { + .name = "e1000-82544gc", + .device_id = E1000_DEV_ID_82544GC_COPPER, + .revision = 0x03, + .phy_id2 = E1000_PHY_ID2_82544x, + }, + { + .name = "e1000-82545em", + .device_id = E1000_DEV_ID_82545EM_COPPER, + .revision = 0x03, + .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT, + }, +}; + +static void e1000_register_types(void) +{ + int i; + + type_register_static(&e1000_base_info); + for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) { + const E1000Info *info = &e1000_devices[i]; + TypeInfo type_info = {}; + + type_info.name = info->name; + type_info.parent = TYPE_E1000_BASE; + type_info.class_data = (void *)info; + type_info.class_init = e1000_class_init; + + type_register(&type_info); + } +} + +type_init(e1000_register_types) diff --git a/hw/net/e1000_regs.h b/hw/net/e1000_regs.h new file mode 100644 index 000000000..ae99f58ba --- /dev/null +++ b/hw/net/e1000_regs.h @@ -0,0 +1,1250 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef HW_E1000_REGS_H +#define HW_E1000_REGS_H + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_82574L 0x10D3 +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB + +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D + +/* Device Specific Register Defaults */ +#define E1000_PHY_ID2_82541x 0x380 +#define E1000_PHY_ID2_82544x 0xC30 +#define E1000_PHY_ID2_8254xx_DEFAULT 0xC20 /* 82540x, 82545x, and 82546x */ +#define E1000_PHY_ID2_82573x 0xCC0 +#define E1000_PHY_ID2_82574x 0xCB1 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_EIAC 0x000DC /* Ext. Interrupt Auto Clear - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ +#define E1000_EITR 0x000E8 /* Extended Interrupt Throttling Rate - RW */ +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_FCRTV 0x05F40 /* Flow Control Refresh Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBM 0x10000 /* Packet Buffer Memory - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size - RW */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGDATA 0x01014 /* MNG EEPROM Read/Write data */ +#define E1000_FLMNGCTL 0x01018 /* MNG Flash Control */ +#define E1000_FLMNGDATA 0x0101C /* MNG FLASH Read data */ +#define E1000_FLMNGCNT 0x01020 /* MNG FLASH Read Counter */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_FLOL 0x01050 /* FEEP Auto Load */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTL_A 0x00168 /* Alias to FCRTL */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_FCRTH_A 0x00160 /* Alias to FCRTH */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDTR_A 0x00108 /* Alias to RDTR */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAL0_A 0x00110 /* Alias to RDBAL0 */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDBAH0_A 0x00114 /* Alias to RDBAH0 */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDLEN0_A 0x00118 /* Alias to RDLEN0 */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDH0_A 0x00120 /* Alias to RDH0 */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDT0_A 0x00128 /* Alias to RDT0 */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_POEMB 0x00F10 /* PHY OEM Bits Register - RW */ +#define E1000_RDFH 0x02410 /* Receive Data FIFO Head Register - RW */ +#define E1000_RDFH_A 0x08000 /* Alias to RDFH */ +#define E1000_RDFT 0x02418 /* Receive Data FIFO Tail Register - RW */ +#define E1000_RDFT_A 0x08008 /* Alias to RDFT */ +#define E1000_RDFHS 0x02420 /* Receive Data FIFO Head Saved Register - RW */ +#define E1000_RDFTS 0x02428 /* Receive Data FIFO Tail Saved Register - RW */ +#define E1000_RDFPC 0x02430 /* Receive Data FIFO Packet Count - RW */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFH_A 0x08010 /* Alias to TDFH */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFT_A 0x08018 /* Alias to TDFT */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAL_A 0x00420 /* Alias to TDBAL */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDBAH_A 0x00424 /* Alias to TDBAH */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDLEN_A 0x00428 /* Alias to TDLEN */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDH_A 0x00430 /* Alias to TDH */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TDT_A 0x00438 /* Alias to TDT */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TIDV_A 0x00440 /* Alias to TIDV */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEQEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MAVTV0 0x05010 /* Management VLAN TAG Value 0 */ +#define E1000_MAVTV1 0x05014 /* Management VLAN TAG Value 1 */ +#define E1000_MAVTV2 0x05018 /* Management VLAN TAG Value 2 */ +#define E1000_MAVTV3 0x0501c /* Management VLAN TAG Value 3 */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA_A 0x00040 /* Alias to RA */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VFTA_A 0x00600 /* Alias to VFTA */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_MFUTP01 0x05828 /* Management Flex UDP/TCP Ports 0/1 - RW */ +#define E1000_MFUTP23 0x05830 /* Management Flex UDP/TCP Ports 2/3 - RW */ +#define E1000_MFVAL 0x05824 /* Manageability Filters Valid - RW */ +#define E1000_MDEF 0x05890 /* Manageability Decision Filters - RW Array */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FTFT 0x09400 /* Flexible TCO Filter Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_FUNCTAG 0x05B08 /* Function-Tag Register */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_GSCN_0 0x05B20 /* 3GIO Statistic Counter Register #0 */ +#define E1000_GSCN_1 0x05B24 /* 3GIO Statistic Counter Register #1 */ +#define E1000_GSCN_2 0x05B28 /* 3GIO Statistic Counter Register #2 */ +#define E1000_GSCN_3 0x05B2C /* 3GIO Statistic Counter Register #3 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_GCR2 0x05B64 /* 3GIO Control Register 2 */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_PBACLR 0x05B68 /* MSI-X PBA Clear */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Inteface Control */ + +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ +#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TIMADJL 0x0B60C /* Time Adjustment Offset register Low - RW */ +#define E1000_TIMADJH 0x0B610 /* Time Adjustment Offset register High - RW */ +#define E1000_RXCFGL 0x0B634 /* RX Ethertype and Message Type - RW*/ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ + +#define E1000_MRQC_ENABLED(mrqc) (((mrqc) & (BIT(0) | BIT(1))) == BIT(0)) + +#define E1000_RETA_IDX(hash) ((hash) & (BIT(7) - 1)) +#define E1000_RETA_VAL(reta, hash) (((uint8_t *)(reta))[E1000_RETA_IDX(hash)]) +#define E1000_RSS_QUEUE(reta, hash) ((E1000_RETA_VAL(reta, hash) & BIT(7)) >> 7) + +#define E1000_MRQC_EN_TCPIPV4(mrqc) ((mrqc) & BIT(16)) +#define E1000_MRQC_EN_IPV4(mrqc) ((mrqc) & BIT(17)) +#define E1000_MRQC_EN_TCPIPV6(mrqc) ((mrqc) & BIT(18)) +#define E1000_MRQC_EN_IPV6EX(mrqc) ((mrqc) & BIT(19)) +#define E1000_MRQC_EN_IPV6(mrqc) ((mrqc) & BIT(20)) + +#define E1000_MRQ_RSS_TYPE_NONE (0) +#define E1000_MRQ_RSS_TYPE_IPV4TCP (1) +#define E1000_MRQ_RSS_TYPE_IPV4 (2) +#define E1000_MRQ_RSS_TYPE_IPV6TCP (3) +#define E1000_MRQ_RSS_TYPE_IPV6EX (4) +#define E1000_MRQ_RSS_TYPE_IPV6 (5) + +#define E1000_ICR_ASSERTED BIT(31) +#define E1000_EIAC_MASK 0x01F00000 + +/* [TR]DBAL and [TR]DLEN masks */ +#define E1000_XDBAL_MASK (~(BIT(4) - 1)) +#define E1000_XDLEN_MASK ((BIT(20) - 1) & (~(BIT(7) - 1))) + +/* IVAR register parsing helpers */ +#define E1000_IVAR_INT_ALLOC_VALID (0x8) + +#define E1000_IVAR_RXQ0_SHIFT (0) +#define E1000_IVAR_RXQ1_SHIFT (4) +#define E1000_IVAR_TXQ0_SHIFT (8) +#define E1000_IVAR_TXQ1_SHIFT (12) +#define E1000_IVAR_OTHER_SHIFT (16) + +#define E1000_IVAR_ENTRY_MASK (0xF) +#define E1000_IVAR_ENTRY_VALID_MASK E1000_IVAR_INT_ALLOC_VALID +#define E1000_IVAR_ENTRY_VEC_MASK (0x7) + +#define E1000_IVAR_RXQ0(x) ((x) >> E1000_IVAR_RXQ0_SHIFT) +#define E1000_IVAR_RXQ1(x) ((x) >> E1000_IVAR_RXQ1_SHIFT) +#define E1000_IVAR_TXQ0(x) ((x) >> E1000_IVAR_TXQ0_SHIFT) +#define E1000_IVAR_TXQ1(x) ((x) >> E1000_IVAR_TXQ1_SHIFT) +#define E1000_IVAR_OTHER(x) ((x) >> E1000_IVAR_OTHER_SHIFT) + +#define E1000_IVAR_ENTRY_VALID(x) ((x) & E1000_IVAR_ENTRY_VALID_MASK) +#define E1000_IVAR_ENTRY_VEC(x) ((x) & E1000_IVAR_ENTRY_VEC_MASK) + +#define E1000_IVAR_TX_INT_EVERY_WB BIT(31) + +/* RFCTL register bits */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACK_DATA_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* PSRCTL parsing */ +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 0 +#define E1000_PSRCTL_BSIZE1_SHIFT 8 +#define E1000_PSRCTL_BSIZE2_SHIFT 16 +#define E1000_PSRCTL_BSIZE3_SHIFT 24 + +#define E1000_PSRCTL_BUFFS_PER_DESC 4 + +/* TARC* parsing */ +#define E1000_TARC_ENABLE BIT(10) + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Regiser */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +/* 82574-specific registers */ +#define PHY_COPPER_CTRL1 0x10 /* Copper Specific Control Register 1 */ +#define PHY_COPPER_STAT1 0x11 /* Copper Specific Status Register 1 */ +#define PHY_COPPER_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define PHY_COPPER_STAT2 0x13 /* Copper Specific Status Register 2 */ +#define PHY_COPPER_CTRL3 0x14 /* Copper Specific Control Register 3 */ +#define PHY_COPPER_CTRL2 0x1A /* Copper Specific Control Register 2 */ +#define PHY_RX_ERR_CNTR 0x15 /* Receive Error Counter */ +#define PHY_PAGE 0x16 /* Page Address (Any page) */ +#define PHY_OEM_BITS 0x19 /* OEM Bits (Page 0) */ +#define PHY_BIAS_1 0x1d /* Bias Setting Register */ +#define PHY_BIAS_2 0x1e /* Bias Setting Register */ + +/* 82574-specific registers - page 2 */ +#define PHY_MAC_CTRL1 0x10 /* MAC Specific Control Register 1 */ +#define PHY_MAC_INT_ENABLE 0x12 /* MAC Interrupt Enable Register */ +#define PHY_MAC_STAT 0x13 /* MAC Specific Status Register */ +#define PHY_MAC_CTRL2 0x15 /* MAC Specific Control Register 2 */ + +/* 82574-specific registers - page 3 */ +#define PHY_LED_03_FUNC_CTRL1 0x10 /* LED[3:0] Function Control */ +#define PHY_LED_03_POL_CTRL 0x11 /* LED[3:0] Polarity Control */ +#define PHY_LED_TIMER_CTRL 0x12 /* LED Timer Control */ +#define PHY_LED_45_CTRL 0x13 /* LED[5:4] Function Control and Polarity */ + +/* 82574-specific registers - page 5 */ +#define PHY_1000T_SKEW 0x14 /* 1000 BASE - T Pair Skew Register */ +#define PHY_1000T_SWAP 0x15 /* 1000 BASE - T Pair Swap and Polarity */ + +/* 82574-specific registers - page 6 */ +#define PHY_CRC_COUNTERS 0x11 /* CRC Counters */ + +#define PHY_PAGE_RW_MASK 0x7F /* R/W part of page address register */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* PHY Link Partner Ability Register */ +#define MII_LPAR_LPACK 0x4000 /* Acked by link partner */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ +#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ + +#define E1000_ICR_OTHER_CAUSES (E1000_ICR_LSC | \ + E1000_ICR_RXO | \ + E1000_ICR_MDAC | \ + E1000_ICR_SRPD | \ + E1000_ICR_ACK | \ + E1000_ICR_MNG) + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 +#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 +#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 +#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 +#define E1000_IMS_OTHER E1000_ICR_OTHER +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 0x10 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 8 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ + +/* 82574 EERD/EEWR registers layout */ +#define E1000_EERW_START BIT(0) +#define E1000_EERW_DONE BIT(1) +#define E1000_EERW_ADDR_SHIFT 2 +#define E1000_EERW_ADDR_MASK ((1L << 14) - 1) +#define E1000_EERW_DATA_SHIFT 16 +#define E1000_EERW_DATA_MASK ((1L << 16) - 1) + +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_SPD_SHIFT 8 /* Speed Select Shift */ + +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* auto speed detection check */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* EEPROM reset */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA 0x20000000 +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ + +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_ASDV_10 0x00000000 /* ASDV 10Mb */ +#define E1000_STATUS_ASDV_100 0x00000100 /* ASDV 100Mb */ +#define E1000_STATUS_ASDV_1000 0x00000200 /* ASDV 1Gb */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ +#define E1000_STATUS_SPEED_SHIFT 6 +#define E1000_STATUS_ASDV_SHIFT 8 + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ + + +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* Rx Interrupt Delay Timer */ +#define E1000_RDTR_FPD BIT(31) + +/* Tx Interrupt Delay Timer */ +#define E1000_TIDV_FPD BIT(31) + +/* Delay increments in nanoseconds for delayed interrupts registers */ +#define E1000_INTR_DELAY_NS_RES (1024) + +/* Delay increments in nanoseconds for interrupt throttling registers */ +#define E1000_INTR_THROTTLING_NS_RES (256) + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */ + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* PCI Express Control */ +/* 3GIO Control Register - GCR (0x05B00; RW) */ +#define E1000_L0S_ADJUST (1 << 9) +#define E1000_L1_ENTRY_LATENCY_MSB (1 << 23) +#define E1000_L1_ENTRY_LATENCY_LSB (1 << 25 | 1 << 26) + +#define E1000_L0S_ADJUST (1 << 9) +#define E1000_L1_ENTRY_LATENCY_MSB (1 << 23) +#define E1000_L1_ENTRY_LATENCY_LSB (1 << 25 | 1 << 26) + +#define E1000_GCR_RO_BITS (1 << 23 | 1 << 25 | 1 << 26) + +/* MSI-X PBA Clear register */ +#define E1000_PBACLR_VALID_MASK (BIT(5) - 1) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + uint64_t buffer_addr; /* Address of the descriptor's data buffer */ + union { + uint32_t data; + struct { + uint16_t length; /* Data buffer length */ + uint8_t cso; /* Checksum offset */ + uint8_t cmd; /* Descriptor control */ + } flags; + } lower; + union { + uint32_t data; + struct { + uint8_t status; /* Descriptor status */ + uint8_t css; /* Checksum start */ + uint16_t special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_CMD_SNAP 0x40000000 /* Update SNAP header */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Legacy Receive Descriptor */ +struct e1000_rx_desc { + uint64_t buffer_addr; /* Address of the descriptor's data buffer */ + uint16_t length; /* Length of data DMAed into data buffer */ + uint16_t csum; /* Packet checksum */ + uint8_t status; /* Descriptor status */ + uint8_t errors; /* Descriptor Errors */ + uint16_t special; +}; + +/* Extended Receive Descriptor */ +union e1000_rx_desc_extended { + struct { + uint64_t buffer_addr; + uint64_t reserved; + } read; + struct { + struct { + uint32_t mrq; /* Multiple Rx Queues */ + union { + uint32_t rss; /* RSS Hash */ + struct { + uint16_t ip_id; /* IP id */ + uint16_t csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + uint32_t status_error; /* ext status/error */ + uint16_t length; + uint16_t vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + uint64_t buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + uint32_t mrq; /* Multiple Rx Queues */ + union { + uint32_t rss; /* RSS Hash */ + struct { + uint16_t ip_id; /* IP id */ + uint16_t csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + uint32_t status_error; /* ext status/error */ + uint16_t length0; /* length of buffer 0 */ + uint16_t vlan; /* VLAN tag */ + } middle; + struct { + uint16_t header_status; + /* length of buffers 1-3 */ + uint16_t length[PS_PAGE_BUFFERS]; + } upper; + uint64_t reserved; + } wb; /* writeback */ +}; + +/* Receive Checksum Control bits */ +#define E1000_RXCSUM_IPOFLD 0x100 /* IP Checksum Offload Enable */ +#define E1000_RXCSUM_TUOFLD 0x200 /* TCP/UDP Checksum Offload Enable */ +#define E1000_RXCSUM_PCSD 0x2000 /* Packet Checksum Disable */ + +#define E1000_RING_DESC_LEN (16) +#define E1000_RING_DESC_LEN_SHIFT (4) + +#define E1000_MIN_RX_DESC_LEN E1000_RING_DESC_LEN +#define E1000_MAX_RX_DESC_LEN (sizeof(union e1000_rx_desc_packet_split)) + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +/* RX packet types */ +#define E1000_RXD_PKT_MAC (0) +#define E1000_RXD_PKT_IP4 (1) +#define E1000_RXD_PKT_IP4_XDP (2) +#define E1000_RXD_PKT_IP6 (5) +#define E1000_RXD_PKT_IP6_XDP (6) + +#define E1000_RXD_PKT_TYPE(t) ((t) << 16) + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + uint32_t ip_config; + struct { + uint8_t ipcss; /* IP checksum start */ + uint8_t ipcso; /* IP checksum offset */ + uint16_t ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + uint32_t tcp_config; + struct { + uint8_t tucss; /* TCP checksum start */ + uint8_t tucso; /* TCP checksum offset */ + uint16_t tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + uint32_t cmd_and_length; /* */ + union { + uint32_t data; + struct { + uint8_t status; /* Descriptor status */ + uint8_t hdr_len; /* Header length */ + uint16_t mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + uint64_t buffer_addr; /* Address of the descriptor's buffer address */ + union { + uint32_t data; + struct { + uint16_t length; /* Data buffer length */ + uint8_t typ_len_ext; /* */ + uint8_t cmd; /* */ + } flags; + } lower; + union { + uint32_t data; + struct { + uint8_t status; /* Descriptor status */ + uint8_t popts; /* Packet Options */ + uint16_t special; /* */ + } fields; + } upper; +}; + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_DIS_IP_CHK_ARP 0x10000000 /* Disable IP address chacking */ + /*for ARP packets - in 82574 */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* FACTPS Control */ +#define E1000_FACTPS_LAN0_ON 0x00000004 /* Lan 0 enable */ + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* I/O-Mapped Access to Internal Registers, Memories, and Flash */ +#define E1000_IOADDR 0x00 +#define E1000_IODATA 0x04 + +#endif /* HW_E1000_REGS_H */ diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c new file mode 100644 index 000000000..ac96f7665 --- /dev/null +++ b/hw/net/e1000e.c @@ -0,0 +1,735 @@ +/* +* QEMU INTEL 82574 GbE NIC emulation +* +* Software developer's manuals: +* http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf +* +* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) +* Developed by Daynix Computing LTD (http://www.daynix.com) +* +* Authors: +* Dmitry Fleytman +* Leonid Bloch +* Yan Vugenfirer +* +* Based on work done by: +* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. +* Copyright (c) 2008 Qumranet +* Based on work done by: +* Copyright (c) 2007 Dan Aloni +* Copyright (c) 2004 Antony T Curtis +* +* This library is free software; you can redistribute it and/or +* modify it under the terms of the GNU Lesser General Public +* License as published by the Free Software Foundation; either +* version 2.1 of the License, or (at your option) any later version. +* +* This library is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* Lesser General Public License for more details. +* +* You should have received a copy of the GNU Lesser General Public +* License along with this library; if not, see . +*/ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "net/eth.h" +#include "net/net.h" +#include "net/tap.h" +#include "qemu/module.h" +#include "qemu/range.h" +#include "sysemu/sysemu.h" +#include "hw/hw.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +#include "e1000_regs.h" + +#include "e1000x_common.h" +#include "e1000e_core.h" + +#include "trace.h" +#include "qapi/error.h" +#include "qom/object.h" + +#define TYPE_E1000E "e1000e" +OBJECT_DECLARE_SIMPLE_TYPE(E1000EState, E1000E) + +struct E1000EState { + PCIDevice parent_obj; + NICState *nic; + NICConf conf; + + MemoryRegion mmio; + MemoryRegion flash; + MemoryRegion io; + MemoryRegion msix; + + uint32_t ioaddr; + + uint16_t subsys_ven; + uint16_t subsys; + + uint16_t subsys_ven_used; + uint16_t subsys_used; + + bool disable_vnet; + + E1000ECore core; + bool init_vet; +}; + +#define E1000E_MMIO_IDX 0 +#define E1000E_FLASH_IDX 1 +#define E1000E_IO_IDX 2 +#define E1000E_MSIX_IDX 3 + +#define E1000E_MMIO_SIZE (128 * KiB) +#define E1000E_FLASH_SIZE (128 * KiB) +#define E1000E_IO_SIZE (32) +#define E1000E_MSIX_SIZE (16 * KiB) + +#define E1000E_MSIX_TABLE (0x0000) +#define E1000E_MSIX_PBA (0x2000) + +static uint64_t +e1000e_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + E1000EState *s = opaque; + return e1000e_core_read(&s->core, addr, size); +} + +static void +e1000e_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + E1000EState *s = opaque; + e1000e_core_write(&s->core, addr, val, size); +} + +static bool +e1000e_io_get_reg_index(E1000EState *s, uint32_t *idx) +{ + if (s->ioaddr < 0x1FFFF) { + *idx = s->ioaddr; + return true; + } + + if (s->ioaddr < 0x7FFFF) { + trace_e1000e_wrn_io_addr_undefined(s->ioaddr); + return false; + } + + if (s->ioaddr < 0xFFFFF) { + trace_e1000e_wrn_io_addr_flash(s->ioaddr); + return false; + } + + trace_e1000e_wrn_io_addr_unknown(s->ioaddr); + return false; +} + +static uint64_t +e1000e_io_read(void *opaque, hwaddr addr, unsigned size) +{ + E1000EState *s = opaque; + uint32_t idx = 0; + uint64_t val; + + switch (addr) { + case E1000_IOADDR: + trace_e1000e_io_read_addr(s->ioaddr); + return s->ioaddr; + case E1000_IODATA: + if (e1000e_io_get_reg_index(s, &idx)) { + val = e1000e_core_read(&s->core, idx, sizeof(val)); + trace_e1000e_io_read_data(idx, val); + return val; + } + return 0; + default: + trace_e1000e_wrn_io_read_unknown(addr); + return 0; + } +} + +static void +e1000e_io_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + E1000EState *s = opaque; + uint32_t idx = 0; + + switch (addr) { + case E1000_IOADDR: + trace_e1000e_io_write_addr(val); + s->ioaddr = (uint32_t) val; + return; + case E1000_IODATA: + if (e1000e_io_get_reg_index(s, &idx)) { + trace_e1000e_io_write_data(idx, val); + e1000e_core_write(&s->core, idx, val, sizeof(val)); + } + return; + default: + trace_e1000e_wrn_io_write_unknown(addr); + return; + } +} + +static const MemoryRegionOps mmio_ops = { + .read = e1000e_mmio_read, + .write = e1000e_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps io_ops = { + .read = e1000e_io_read, + .write = e1000e_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static bool +e1000e_nc_can_receive(NetClientState *nc) +{ + E1000EState *s = qemu_get_nic_opaque(nc); + return e1000e_can_receive(&s->core); +} + +static ssize_t +e1000e_nc_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) +{ + E1000EState *s = qemu_get_nic_opaque(nc); + return e1000e_receive_iov(&s->core, iov, iovcnt); +} + +static ssize_t +e1000e_nc_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + E1000EState *s = qemu_get_nic_opaque(nc); + return e1000e_receive(&s->core, buf, size); +} + +static void +e1000e_set_link_status(NetClientState *nc) +{ + E1000EState *s = qemu_get_nic_opaque(nc); + e1000e_core_set_link_status(&s->core); +} + +static NetClientInfo net_e1000e_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = e1000e_nc_can_receive, + .receive = e1000e_nc_receive, + .receive_iov = e1000e_nc_receive_iov, + .link_status_changed = e1000e_set_link_status, +}; + +/* +* EEPROM (NVM) contents documented in Table 36, section 6.1 +* and generally 6.1.2 Software accessed words. +*/ +static const uint16_t e1000e_eeprom_template[64] = { + /* Address | Compat. | ImVer | Compat. */ + 0x0000, 0x0000, 0x0000, 0x0420, 0xf746, 0x2010, 0xffff, 0xffff, + /* PBA |ICtrl1 | SSID | SVID | DevID |-------|ICtrl2 */ + 0x0000, 0x0000, 0x026b, 0x0000, 0x8086, 0x0000, 0x0000, 0x8058, + /* NVM words 1,2,3 |-------------------------------|PCI-EID*/ + 0x0000, 0x2001, 0x7e7c, 0xffff, 0x1000, 0x00c8, 0x0000, 0x2704, + /* PCIe Init. Conf 1,2,3 |PCICtrl|PHY|LD1|-------| RevID | LD0,2 */ + 0x6cc9, 0x3150, 0x070e, 0x460b, 0x2d84, 0x0100, 0xf000, 0x0706, + /* FLPAR |FLANADD|LAN-PWR|FlVndr |ICtrl3 |APTSMBA|APTRxEP|APTSMBC*/ + 0x6000, 0x0080, 0x0f04, 0x7fff, 0x4f01, 0xc600, 0x0000, 0x20ff, + /* APTIF | APTMC |APTuCP |LSWFWID|MSWFWID|NC-SIMC|NC-SIC | VPDP */ + 0x0028, 0x0003, 0x0000, 0x0000, 0x0000, 0x0003, 0x0000, 0xffff, + /* SW Section */ + 0x0100, 0xc000, 0x121c, 0xc007, 0xffff, 0xffff, 0xffff, 0xffff, + /* SW Section |CHKSUM */ + 0xffff, 0xffff, 0xffff, 0xffff, 0x0000, 0x0120, 0xffff, 0x0000, +}; + +static void e1000e_core_realize(E1000EState *s) +{ + s->core.owner = &s->parent_obj; + s->core.owner_nic = s->nic; +} + +static void +e1000e_unuse_msix_vectors(E1000EState *s, int num_vectors) +{ + int i; + for (i = 0; i < num_vectors; i++) { + msix_vector_unuse(PCI_DEVICE(s), i); + } +} + +static bool +e1000e_use_msix_vectors(E1000EState *s, int num_vectors) +{ + int i; + for (i = 0; i < num_vectors; i++) { + int res = msix_vector_use(PCI_DEVICE(s), i); + if (res < 0) { + trace_e1000e_msix_use_vector_fail(i, res); + e1000e_unuse_msix_vectors(s, i); + return false; + } + } + return true; +} + +static void +e1000e_init_msix(E1000EState *s) +{ + PCIDevice *d = PCI_DEVICE(s); + int res = msix_init(PCI_DEVICE(s), E1000E_MSIX_VEC_NUM, + &s->msix, + E1000E_MSIX_IDX, E1000E_MSIX_TABLE, + &s->msix, + E1000E_MSIX_IDX, E1000E_MSIX_PBA, + 0xA0, NULL); + + if (res < 0) { + trace_e1000e_msix_init_fail(res); + } else { + if (!e1000e_use_msix_vectors(s, E1000E_MSIX_VEC_NUM)) { + msix_uninit(d, &s->msix, &s->msix); + } + } +} + +static void +e1000e_cleanup_msix(E1000EState *s) +{ + if (msix_present(PCI_DEVICE(s))) { + e1000e_unuse_msix_vectors(s, E1000E_MSIX_VEC_NUM); + msix_uninit(PCI_DEVICE(s), &s->msix, &s->msix); + } +} + +static void +e1000e_init_net_peer(E1000EState *s, PCIDevice *pci_dev, uint8_t *macaddr) +{ + DeviceState *dev = DEVICE(pci_dev); + NetClientState *nc; + int i; + + s->nic = qemu_new_nic(&net_e1000e_info, &s->conf, + object_get_typename(OBJECT(s)), dev->id, s); + + s->core.max_queue_num = s->conf.peers.queues ? s->conf.peers.queues - 1 : 0; + + trace_e1000e_mac_set_permanent(MAC_ARG(macaddr)); + memcpy(s->core.permanent_mac, macaddr, sizeof(s->core.permanent_mac)); + + qemu_format_nic_info_str(qemu_get_queue(s->nic), macaddr); + + /* Setup virtio headers */ + if (s->disable_vnet) { + s->core.has_vnet = false; + trace_e1000e_cfg_support_virtio(false); + return; + } else { + s->core.has_vnet = true; + } + + for (i = 0; i < s->conf.peers.queues; i++) { + nc = qemu_get_subqueue(s->nic, i); + if (!nc->peer || !qemu_has_vnet_hdr(nc->peer)) { + s->core.has_vnet = false; + trace_e1000e_cfg_support_virtio(false); + return; + } + } + + trace_e1000e_cfg_support_virtio(true); + + for (i = 0; i < s->conf.peers.queues; i++) { + nc = qemu_get_subqueue(s->nic, i); + qemu_set_vnet_hdr_len(nc->peer, sizeof(struct virtio_net_hdr)); + qemu_using_vnet_hdr(nc->peer, true); + } +} + +static inline uint64_t +e1000e_gen_dsn(uint8_t *mac) +{ + return (uint64_t)(mac[5]) | + (uint64_t)(mac[4]) << 8 | + (uint64_t)(mac[3]) << 16 | + (uint64_t)(0x00FF) << 24 | + (uint64_t)(0x00FF) << 32 | + (uint64_t)(mac[2]) << 40 | + (uint64_t)(mac[1]) << 48 | + (uint64_t)(mac[0]) << 56; +} + +static int +e1000e_add_pm_capability(PCIDevice *pdev, uint8_t offset, uint16_t pmc) +{ + Error *local_err = NULL; + int ret = pci_add_capability(pdev, PCI_CAP_ID_PM, offset, + PCI_PM_SIZEOF, &local_err); + + if (local_err) { + error_report_err(local_err); + return ret; + } + + pci_set_word(pdev->config + offset + PCI_PM_PMC, + PCI_PM_CAP_VER_1_1 | + pmc); + + pci_set_word(pdev->wmask + offset + PCI_PM_CTRL, + PCI_PM_CTRL_STATE_MASK | + PCI_PM_CTRL_PME_ENABLE | + PCI_PM_CTRL_DATA_SEL_MASK); + + pci_set_word(pdev->w1cmask + offset + PCI_PM_CTRL, + PCI_PM_CTRL_PME_STATUS); + + return ret; +} + +static void e1000e_write_config(PCIDevice *pci_dev, uint32_t address, + uint32_t val, int len) +{ + E1000EState *s = E1000E(pci_dev); + + pci_default_write_config(pci_dev, address, val, len); + + if (range_covers_byte(address, len, PCI_COMMAND) && + (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { + e1000e_start_recv(&s->core); + } +} + +static void e1000e_pci_realize(PCIDevice *pci_dev, Error **errp) +{ + static const uint16_t e1000e_pmrb_offset = 0x0C8; + static const uint16_t e1000e_pcie_offset = 0x0E0; + static const uint16_t e1000e_aer_offset = 0x100; + static const uint16_t e1000e_dsn_offset = 0x140; + E1000EState *s = E1000E(pci_dev); + uint8_t *macaddr; + int ret; + + trace_e1000e_cb_pci_realize(); + + pci_dev->config_write = e1000e_write_config; + + pci_dev->config[PCI_CACHE_LINE_SIZE] = 0x10; + pci_dev->config[PCI_INTERRUPT_PIN] = 1; + + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, s->subsys_ven); + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, s->subsys); + + s->subsys_ven_used = s->subsys_ven; + s->subsys_used = s->subsys; + + /* Define IO/MMIO regions */ + memory_region_init_io(&s->mmio, OBJECT(s), &mmio_ops, s, + "e1000e-mmio", E1000E_MMIO_SIZE); + pci_register_bar(pci_dev, E1000E_MMIO_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio); + + /* + * We provide a dummy implementation for the flash BAR + * for drivers that may theoretically probe for its presence. + */ + memory_region_init(&s->flash, OBJECT(s), + "e1000e-flash", E1000E_FLASH_SIZE); + pci_register_bar(pci_dev, E1000E_FLASH_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->flash); + + memory_region_init_io(&s->io, OBJECT(s), &io_ops, s, + "e1000e-io", E1000E_IO_SIZE); + pci_register_bar(pci_dev, E1000E_IO_IDX, + PCI_BASE_ADDRESS_SPACE_IO, &s->io); + + memory_region_init(&s->msix, OBJECT(s), "e1000e-msix", + E1000E_MSIX_SIZE); + pci_register_bar(pci_dev, E1000E_MSIX_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->msix); + + /* Create networking backend */ + qemu_macaddr_default_if_unset(&s->conf.macaddr); + macaddr = s->conf.macaddr.a; + + e1000e_init_msix(s); + + if (pcie_endpoint_cap_v1_init(pci_dev, e1000e_pcie_offset) < 0) { + hw_error("Failed to initialize PCIe capability"); + } + + ret = msi_init(PCI_DEVICE(s), 0xD0, 1, true, false, NULL); + if (ret) { + trace_e1000e_msi_init_fail(ret); + } + + if (e1000e_add_pm_capability(pci_dev, e1000e_pmrb_offset, + PCI_PM_CAP_DSI) < 0) { + hw_error("Failed to initialize PM capability"); + } + + if (pcie_aer_init(pci_dev, PCI_ERR_VER, e1000e_aer_offset, + PCI_ERR_SIZEOF, NULL) < 0) { + hw_error("Failed to initialize AER capability"); + } + + pcie_dev_ser_num_init(pci_dev, e1000e_dsn_offset, + e1000e_gen_dsn(macaddr)); + + e1000e_init_net_peer(s, pci_dev, macaddr); + + /* Initialize core */ + e1000e_core_realize(s); + + e1000e_core_pci_realize(&s->core, + e1000e_eeprom_template, + sizeof(e1000e_eeprom_template), + macaddr); +} + +static void e1000e_pci_uninit(PCIDevice *pci_dev) +{ + E1000EState *s = E1000E(pci_dev); + + trace_e1000e_cb_pci_uninit(); + + e1000e_core_pci_uninit(&s->core); + + pcie_aer_exit(pci_dev); + pcie_cap_exit(pci_dev); + + qemu_del_nic(s->nic); + + e1000e_cleanup_msix(s); + msi_uninit(pci_dev); +} + +static void e1000e_qdev_reset(DeviceState *dev) +{ + E1000EState *s = E1000E(dev); + + trace_e1000e_cb_qdev_reset(); + + e1000e_core_reset(&s->core); + + if (s->init_vet) { + s->core.mac[VET] = ETH_P_VLAN; + } +} + +static int e1000e_pre_save(void *opaque) +{ + E1000EState *s = opaque; + + trace_e1000e_cb_pre_save(); + + e1000e_core_pre_save(&s->core); + + return 0; +} + +static int e1000e_post_load(void *opaque, int version_id) +{ + E1000EState *s = opaque; + + trace_e1000e_cb_post_load(); + + if ((s->subsys != s->subsys_used) || + (s->subsys_ven != s->subsys_ven_used)) { + fprintf(stderr, + "ERROR: Cannot migrate while device properties " + "(subsys/subsys_ven) differ"); + return -1; + } + + return e1000e_core_post_load(&s->core); +} + +static const VMStateDescription e1000e_vmstate_tx = { + .name = "e1000e-tx", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(sum_needed, struct e1000e_tx), + VMSTATE_UINT8(props.ipcss, struct e1000e_tx), + VMSTATE_UINT8(props.ipcso, struct e1000e_tx), + VMSTATE_UINT16(props.ipcse, struct e1000e_tx), + VMSTATE_UINT8(props.tucss, struct e1000e_tx), + VMSTATE_UINT8(props.tucso, struct e1000e_tx), + VMSTATE_UINT16(props.tucse, struct e1000e_tx), + VMSTATE_UINT8(props.hdr_len, struct e1000e_tx), + VMSTATE_UINT16(props.mss, struct e1000e_tx), + VMSTATE_UINT32(props.paylen, struct e1000e_tx), + VMSTATE_INT8(props.ip, struct e1000e_tx), + VMSTATE_INT8(props.tcp, struct e1000e_tx), + VMSTATE_BOOL(props.tse, struct e1000e_tx), + VMSTATE_BOOL(cptse, struct e1000e_tx), + VMSTATE_BOOL(skip_cp, struct e1000e_tx), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription e1000e_vmstate_intr_timer = { + .name = "e1000e-intr-timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(timer, E1000IntrDelayTimer), + VMSTATE_BOOL(running, E1000IntrDelayTimer), + VMSTATE_END_OF_LIST() + } +}; + +#define VMSTATE_E1000E_INTR_DELAY_TIMER(_f, _s) \ + VMSTATE_STRUCT(_f, _s, 0, \ + e1000e_vmstate_intr_timer, E1000IntrDelayTimer) + +#define VMSTATE_E1000E_INTR_DELAY_TIMER_ARRAY(_f, _s, _num) \ + VMSTATE_STRUCT_ARRAY(_f, _s, _num, 0, \ + e1000e_vmstate_intr_timer, E1000IntrDelayTimer) + +static const VMStateDescription e1000e_vmstate = { + .name = "e1000e", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = e1000e_pre_save, + .post_load = e1000e_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, E1000EState), + VMSTATE_MSIX(parent_obj, E1000EState), + + VMSTATE_UINT32(ioaddr, E1000EState), + VMSTATE_UINT32(core.rxbuf_min_shift, E1000EState), + VMSTATE_UINT8(core.rx_desc_len, E1000EState), + VMSTATE_UINT32_ARRAY(core.rxbuf_sizes, E1000EState, + E1000_PSRCTL_BUFFS_PER_DESC), + VMSTATE_UINT32(core.rx_desc_buf_size, E1000EState), + VMSTATE_UINT16_ARRAY(core.eeprom, E1000EState, E1000E_EEPROM_SIZE), + VMSTATE_UINT16_2DARRAY(core.phy, E1000EState, + E1000E_PHY_PAGES, E1000E_PHY_PAGE_SIZE), + VMSTATE_UINT32_ARRAY(core.mac, E1000EState, E1000E_MAC_SIZE), + VMSTATE_UINT8_ARRAY(core.permanent_mac, E1000EState, ETH_ALEN), + + VMSTATE_UINT32(core.delayed_causes, E1000EState), + + VMSTATE_UINT16(subsys, E1000EState), + VMSTATE_UINT16(subsys_ven, E1000EState), + + VMSTATE_E1000E_INTR_DELAY_TIMER(core.rdtr, E1000EState), + VMSTATE_E1000E_INTR_DELAY_TIMER(core.radv, E1000EState), + VMSTATE_E1000E_INTR_DELAY_TIMER(core.raid, E1000EState), + VMSTATE_E1000E_INTR_DELAY_TIMER(core.tadv, E1000EState), + VMSTATE_E1000E_INTR_DELAY_TIMER(core.tidv, E1000EState), + + VMSTATE_E1000E_INTR_DELAY_TIMER(core.itr, E1000EState), + VMSTATE_BOOL(core.itr_intr_pending, E1000EState), + + VMSTATE_E1000E_INTR_DELAY_TIMER_ARRAY(core.eitr, E1000EState, + E1000E_MSIX_VEC_NUM), + VMSTATE_BOOL_ARRAY(core.eitr_intr_pending, E1000EState, + E1000E_MSIX_VEC_NUM), + + VMSTATE_UINT32(core.itr_guest_value, E1000EState), + VMSTATE_UINT32_ARRAY(core.eitr_guest_value, E1000EState, + E1000E_MSIX_VEC_NUM), + + VMSTATE_UINT16(core.vet, E1000EState), + + VMSTATE_STRUCT_ARRAY(core.tx, E1000EState, E1000E_NUM_QUEUES, 0, + e1000e_vmstate_tx, struct e1000e_tx), + VMSTATE_END_OF_LIST() + } +}; + +static PropertyInfo e1000e_prop_disable_vnet, + e1000e_prop_subsys_ven, + e1000e_prop_subsys; + +static Property e1000e_properties[] = { + DEFINE_NIC_PROPERTIES(E1000EState, conf), + DEFINE_PROP_SIGNED("disable_vnet_hdr", E1000EState, disable_vnet, false, + e1000e_prop_disable_vnet, bool), + DEFINE_PROP_SIGNED("subsys_ven", E1000EState, subsys_ven, + PCI_VENDOR_ID_INTEL, + e1000e_prop_subsys_ven, uint16_t), + DEFINE_PROP_SIGNED("subsys", E1000EState, subsys, 0, + e1000e_prop_subsys, uint16_t), + DEFINE_PROP_BOOL("init-vet", E1000EState, init_vet, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void e1000e_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(class); + PCIDeviceClass *c = PCI_DEVICE_CLASS(class); + + c->realize = e1000e_pci_realize; + c->exit = e1000e_pci_uninit; + c->vendor_id = PCI_VENDOR_ID_INTEL; + c->device_id = E1000_DEV_ID_82574L; + c->revision = 0; + c->romfile = "efi-e1000e.rom"; + c->class_id = PCI_CLASS_NETWORK_ETHERNET; + + dc->desc = "Intel 82574L GbE Controller"; + dc->reset = e1000e_qdev_reset; + dc->vmsd = &e1000e_vmstate; + + e1000e_prop_disable_vnet = qdev_prop_uint8; + e1000e_prop_disable_vnet.description = "Do not use virtio headers, " + "perform SW offloads emulation " + "instead"; + + e1000e_prop_subsys_ven = qdev_prop_uint16; + e1000e_prop_subsys_ven.description = "PCI device Subsystem Vendor ID"; + + e1000e_prop_subsys = qdev_prop_uint16; + e1000e_prop_subsys.description = "PCI device Subsystem ID"; + + device_class_set_props(dc, e1000e_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static void e1000e_instance_init(Object *obj) +{ + E1000EState *s = E1000E(obj); + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(obj)); +} + +static const TypeInfo e1000e_info = { + .name = TYPE_E1000E, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(E1000EState), + .class_init = e1000e_class_init, + .instance_init = e1000e_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void e1000e_register_types(void) +{ + type_register_static(&e1000e_info); +} + +type_init(e1000e_register_types) diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c new file mode 100644 index 000000000..8ae6fb7e1 --- /dev/null +++ b/hw/net/e1000e_core.c @@ -0,0 +1,3507 @@ +/* +* Core code for QEMU e1000e emulation +* +* Software developer's manuals: +* http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf +* +* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) +* Developed by Daynix Computing LTD (http://www.daynix.com) +* +* Authors: +* Dmitry Fleytman +* Leonid Bloch +* Yan Vugenfirer +* +* Based on work done by: +* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. +* Copyright (c) 2008 Qumranet +* Based on work done by: +* Copyright (c) 2007 Dan Aloni +* Copyright (c) 2004 Antony T Curtis +* +* This library is free software; you can redistribute it and/or +* modify it under the terms of the GNU Lesser General Public +* License as published by the Free Software Foundation; either +* version 2.1 of the License, or (at your option) any later version. +* +* This library is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* Lesser General Public License for more details. +* +* You should have received a copy of the GNU Lesser General Public +* License along with this library; if not, see . +*/ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "net/net.h" +#include "net/tap.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "sysemu/runstate.h" + +#include "net_tx_pkt.h" +#include "net_rx_pkt.h" + +#include "e1000x_common.h" +#include "e1000e_core.h" + +#include "trace.h" + +#define E1000E_MIN_XITR (500) /* No more then 7813 interrupts per + second according to spec 10.2.4.2 */ +#define E1000E_MAX_TX_FRAGS (64) + +static inline void +e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val); + +static inline void +e1000e_process_ts_option(E1000ECore *core, struct e1000_tx_desc *dp) +{ + if (le32_to_cpu(dp->upper.data) & E1000_TXD_EXTCMD_TSTAMP) { + trace_e1000e_wrn_no_ts_support(); + } +} + +static inline void +e1000e_process_snap_option(E1000ECore *core, uint32_t cmd_and_length) +{ + if (cmd_and_length & E1000_TXD_CMD_SNAP) { + trace_e1000e_wrn_no_snap_support(); + } +} + +static inline void +e1000e_raise_legacy_irq(E1000ECore *core) +{ + trace_e1000e_irq_legacy_notify(true); + e1000x_inc_reg_if_not_full(core->mac, IAC); + pci_set_irq(core->owner, 1); +} + +static inline void +e1000e_lower_legacy_irq(E1000ECore *core) +{ + trace_e1000e_irq_legacy_notify(false); + pci_set_irq(core->owner, 0); +} + +static inline void +e1000e_intrmgr_rearm_timer(E1000IntrDelayTimer *timer) +{ + int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] * + timer->delay_resolution_ns; + + trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns); + + timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns); + + timer->running = true; +} + +static void +e1000e_intmgr_timer_resume(E1000IntrDelayTimer *timer) +{ + if (timer->running) { + e1000e_intrmgr_rearm_timer(timer); + } +} + +static void +e1000e_intmgr_timer_pause(E1000IntrDelayTimer *timer) +{ + if (timer->running) { + timer_del(timer->timer); + } +} + +static inline void +e1000e_intrmgr_stop_timer(E1000IntrDelayTimer *timer) +{ + if (timer->running) { + timer_del(timer->timer); + timer->running = false; + } +} + +static inline void +e1000e_intrmgr_fire_delayed_interrupts(E1000ECore *core) +{ + trace_e1000e_irq_fire_delayed_interrupts(); + e1000e_set_interrupt_cause(core, 0); +} + +static void +e1000e_intrmgr_on_timer(void *opaque) +{ + E1000IntrDelayTimer *timer = opaque; + + trace_e1000e_irq_throttling_timer(timer->delay_reg << 2); + + timer->running = false; + e1000e_intrmgr_fire_delayed_interrupts(timer->core); +} + +static void +e1000e_intrmgr_on_throttling_timer(void *opaque) +{ + E1000IntrDelayTimer *timer = opaque; + + assert(!msix_enabled(timer->core->owner)); + + timer->running = false; + + if (!timer->core->itr_intr_pending) { + trace_e1000e_irq_throttling_no_pending_interrupts(); + return; + } + + if (msi_enabled(timer->core->owner)) { + trace_e1000e_irq_msi_notify_postponed(); + e1000e_set_interrupt_cause(timer->core, 0); + } else { + trace_e1000e_irq_legacy_notify_postponed(); + e1000e_set_interrupt_cause(timer->core, 0); + } +} + +static void +e1000e_intrmgr_on_msix_throttling_timer(void *opaque) +{ + E1000IntrDelayTimer *timer = opaque; + int idx = timer - &timer->core->eitr[0]; + + assert(msix_enabled(timer->core->owner)); + + timer->running = false; + + if (!timer->core->eitr_intr_pending[idx]) { + trace_e1000e_irq_throttling_no_pending_vec(idx); + return; + } + + trace_e1000e_irq_msix_notify_postponed_vec(idx); + msix_notify(timer->core->owner, idx); +} + +static void +e1000e_intrmgr_initialize_all_timers(E1000ECore *core, bool create) +{ + int i; + + core->radv.delay_reg = RADV; + core->rdtr.delay_reg = RDTR; + core->raid.delay_reg = RAID; + core->tadv.delay_reg = TADV; + core->tidv.delay_reg = TIDV; + + core->radv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; + core->rdtr.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; + core->raid.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; + core->tadv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; + core->tidv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; + + core->radv.core = core; + core->rdtr.core = core; + core->raid.core = core; + core->tadv.core = core; + core->tidv.core = core; + + core->itr.core = core; + core->itr.delay_reg = ITR; + core->itr.delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES; + + for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { + core->eitr[i].core = core; + core->eitr[i].delay_reg = EITR + i; + core->eitr[i].delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES; + } + + if (!create) { + return; + } + + core->radv.timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->radv); + core->rdtr.timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->rdtr); + core->raid.timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->raid); + + core->tadv.timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tadv); + core->tidv.timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tidv); + + core->itr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + e1000e_intrmgr_on_throttling_timer, + &core->itr); + + for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { + core->eitr[i].timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, + e1000e_intrmgr_on_msix_throttling_timer, + &core->eitr[i]); + } +} + +static inline void +e1000e_intrmgr_stop_delay_timers(E1000ECore *core) +{ + e1000e_intrmgr_stop_timer(&core->radv); + e1000e_intrmgr_stop_timer(&core->rdtr); + e1000e_intrmgr_stop_timer(&core->raid); + e1000e_intrmgr_stop_timer(&core->tidv); + e1000e_intrmgr_stop_timer(&core->tadv); +} + +static bool +e1000e_intrmgr_delay_rx_causes(E1000ECore *core, uint32_t *causes) +{ + uint32_t delayable_causes; + uint32_t rdtr = core->mac[RDTR]; + uint32_t radv = core->mac[RADV]; + uint32_t raid = core->mac[RAID]; + + if (msix_enabled(core->owner)) { + return false; + } + + delayable_causes = E1000_ICR_RXQ0 | + E1000_ICR_RXQ1 | + E1000_ICR_RXT0; + + if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS)) { + delayable_causes |= E1000_ICR_ACK; + } + + /* Clean up all causes that may be delayed */ + core->delayed_causes |= *causes & delayable_causes; + *causes &= ~delayable_causes; + + /* Check if delayed RX interrupts disabled by client + or if there are causes that cannot be delayed */ + if ((rdtr == 0) || (*causes != 0)) { + return false; + } + + /* Check if delayed RX ACK interrupts disabled by client + and there is an ACK packet received */ + if ((raid == 0) && (core->delayed_causes & E1000_ICR_ACK)) { + return false; + } + + /* All causes delayed */ + e1000e_intrmgr_rearm_timer(&core->rdtr); + + if (!core->radv.running && (radv != 0)) { + e1000e_intrmgr_rearm_timer(&core->radv); + } + + if (!core->raid.running && (core->delayed_causes & E1000_ICR_ACK)) { + e1000e_intrmgr_rearm_timer(&core->raid); + } + + return true; +} + +static bool +e1000e_intrmgr_delay_tx_causes(E1000ECore *core, uint32_t *causes) +{ + static const uint32_t delayable_causes = E1000_ICR_TXQ0 | + E1000_ICR_TXQ1 | + E1000_ICR_TXQE | + E1000_ICR_TXDW; + + if (msix_enabled(core->owner)) { + return false; + } + + /* Clean up all causes that may be delayed */ + core->delayed_causes |= *causes & delayable_causes; + *causes &= ~delayable_causes; + + /* If there are causes that cannot be delayed */ + if (*causes != 0) { + return false; + } + + /* All causes delayed */ + e1000e_intrmgr_rearm_timer(&core->tidv); + + if (!core->tadv.running && (core->mac[TADV] != 0)) { + e1000e_intrmgr_rearm_timer(&core->tadv); + } + + return true; +} + +static uint32_t +e1000e_intmgr_collect_delayed_causes(E1000ECore *core) +{ + uint32_t res; + + if (msix_enabled(core->owner)) { + assert(core->delayed_causes == 0); + return 0; + } + + res = core->delayed_causes; + core->delayed_causes = 0; + + e1000e_intrmgr_stop_delay_timers(core); + + return res; +} + +static void +e1000e_intrmgr_fire_all_timers(E1000ECore *core) +{ + int i; + uint32_t val = e1000e_intmgr_collect_delayed_causes(core); + + trace_e1000e_irq_adding_delayed_causes(val, core->mac[ICR]); + core->mac[ICR] |= val; + + if (core->itr.running) { + timer_del(core->itr.timer); + e1000e_intrmgr_on_throttling_timer(&core->itr); + } + + for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { + if (core->eitr[i].running) { + timer_del(core->eitr[i].timer); + e1000e_intrmgr_on_msix_throttling_timer(&core->eitr[i]); + } + } +} + +static void +e1000e_intrmgr_resume(E1000ECore *core) +{ + int i; + + e1000e_intmgr_timer_resume(&core->radv); + e1000e_intmgr_timer_resume(&core->rdtr); + e1000e_intmgr_timer_resume(&core->raid); + e1000e_intmgr_timer_resume(&core->tidv); + e1000e_intmgr_timer_resume(&core->tadv); + + e1000e_intmgr_timer_resume(&core->itr); + + for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { + e1000e_intmgr_timer_resume(&core->eitr[i]); + } +} + +static void +e1000e_intrmgr_pause(E1000ECore *core) +{ + int i; + + e1000e_intmgr_timer_pause(&core->radv); + e1000e_intmgr_timer_pause(&core->rdtr); + e1000e_intmgr_timer_pause(&core->raid); + e1000e_intmgr_timer_pause(&core->tidv); + e1000e_intmgr_timer_pause(&core->tadv); + + e1000e_intmgr_timer_pause(&core->itr); + + for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { + e1000e_intmgr_timer_pause(&core->eitr[i]); + } +} + +static void +e1000e_intrmgr_reset(E1000ECore *core) +{ + int i; + + core->delayed_causes = 0; + + e1000e_intrmgr_stop_delay_timers(core); + + e1000e_intrmgr_stop_timer(&core->itr); + + for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { + e1000e_intrmgr_stop_timer(&core->eitr[i]); + } +} + +static void +e1000e_intrmgr_pci_unint(E1000ECore *core) +{ + int i; + + timer_free(core->radv.timer); + timer_free(core->rdtr.timer); + timer_free(core->raid.timer); + + timer_free(core->tadv.timer); + timer_free(core->tidv.timer); + + timer_free(core->itr.timer); + + for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { + timer_free(core->eitr[i].timer); + } +} + +static void +e1000e_intrmgr_pci_realize(E1000ECore *core) +{ + e1000e_intrmgr_initialize_all_timers(core, true); +} + +static inline bool +e1000e_rx_csum_enabled(E1000ECore *core) +{ + return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true; +} + +static inline bool +e1000e_rx_use_legacy_descriptor(E1000ECore *core) +{ + return (core->mac[RFCTL] & E1000_RFCTL_EXTEN) ? false : true; +} + +static inline bool +e1000e_rx_use_ps_descriptor(E1000ECore *core) +{ + return !e1000e_rx_use_legacy_descriptor(core) && + (core->mac[RCTL] & E1000_RCTL_DTYP_PS); +} + +static inline bool +e1000e_rss_enabled(E1000ECore *core) +{ + return E1000_MRQC_ENABLED(core->mac[MRQC]) && + !e1000e_rx_csum_enabled(core) && + !e1000e_rx_use_legacy_descriptor(core); +} + +typedef struct E1000E_RSSInfo_st { + bool enabled; + uint32_t hash; + uint32_t queue; + uint32_t type; +} E1000E_RSSInfo; + +static uint32_t +e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) +{ + bool isip4, isip6, isudp, istcp; + + assert(e1000e_rss_enabled(core)); + + net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); + + if (isip4) { + bool fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; + + trace_e1000e_rx_rss_ip4(fragment, istcp, core->mac[MRQC], + E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]), + E1000_MRQC_EN_IPV4(core->mac[MRQC])); + + if (!fragment && istcp && E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV4TCP; + } + + if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV4; + } + } else if (isip6) { + eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt); + + bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS; + bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS; + + /* + * Following two traces must not be combined because resulting + * event will have 11 arguments totally and some trace backends + * (at least "ust") have limitation of maximum 10 arguments per + * event. Events with more arguments fail to compile for + * backends like these. + */ + trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]); + trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, istcp, + ip6info->has_ext_hdrs, + ip6info->rss_ex_dst_valid, + ip6info->rss_ex_src_valid, + core->mac[MRQC], + E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]), + E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), + E1000_MRQC_EN_IPV6(core->mac[MRQC])); + + if ((!ex_dis || !ip6info->has_ext_hdrs) && + (!new_ex_dis || !(ip6info->rss_ex_dst_valid || + ip6info->rss_ex_src_valid))) { + + if (istcp && !ip6info->fragment && + E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6TCP; + } + + if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6EX; + } + + } + + if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) { + return E1000_MRQ_RSS_TYPE_IPV6; + } + + } + + return E1000_MRQ_RSS_TYPE_NONE; +} + +static uint32_t +e1000e_rss_calc_hash(E1000ECore *core, + struct NetRxPkt *pkt, + E1000E_RSSInfo *info) +{ + NetRxPktRssType type; + + assert(e1000e_rss_enabled(core)); + + switch (info->type) { + case E1000_MRQ_RSS_TYPE_IPV4: + type = NetPktRssIpV4; + break; + case E1000_MRQ_RSS_TYPE_IPV4TCP: + type = NetPktRssIpV4Tcp; + break; + case E1000_MRQ_RSS_TYPE_IPV6TCP: + type = NetPktRssIpV6TcpEx; + break; + case E1000_MRQ_RSS_TYPE_IPV6: + type = NetPktRssIpV6; + break; + case E1000_MRQ_RSS_TYPE_IPV6EX: + type = NetPktRssIpV6Ex; + break; + default: + assert(false); + return 0; + } + + return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]); +} + +static void +e1000e_rss_parse_packet(E1000ECore *core, + struct NetRxPkt *pkt, + E1000E_RSSInfo *info) +{ + trace_e1000e_rx_rss_started(); + + if (!e1000e_rss_enabled(core)) { + info->enabled = false; + info->hash = 0; + info->queue = 0; + info->type = 0; + trace_e1000e_rx_rss_disabled(); + return; + } + + info->enabled = true; + + info->type = e1000e_rss_get_hash_type(core, pkt); + + trace_e1000e_rx_rss_type(info->type); + + if (info->type == E1000_MRQ_RSS_TYPE_NONE) { + info->hash = 0; + info->queue = 0; + return; + } + + info->hash = e1000e_rss_calc_hash(core, pkt, info); + info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash); +} + +static void +e1000e_setup_tx_offloads(E1000ECore *core, struct e1000e_tx *tx) +{ + if (tx->props.tse && tx->cptse) { + net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss); + net_tx_pkt_update_ip_checksums(tx->tx_pkt); + e1000x_inc_reg_if_not_full(core->mac, TSCTC); + return; + } + + if (tx->sum_needed & E1000_TXD_POPTS_TXSM) { + net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0); + } + + if (tx->sum_needed & E1000_TXD_POPTS_IXSM) { + net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt); + } +} + +static bool +e1000e_tx_pkt_send(E1000ECore *core, struct e1000e_tx *tx, int queue_index) +{ + int target_queue = MIN(core->max_queue_num, queue_index); + NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue); + + e1000e_setup_tx_offloads(core, tx); + + net_tx_pkt_dump(tx->tx_pkt); + + if ((core->phy[0][PHY_CTRL] & MII_CR_LOOPBACK) || + ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) { + return net_tx_pkt_send_loopback(tx->tx_pkt, queue); + } else { + return net_tx_pkt_send(tx->tx_pkt, queue); + } +} + +static void +e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) +{ + static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511, + PTC1023, PTC1522 }; + + size_t tot_len = net_tx_pkt_get_total_len(tx_pkt); + + e1000x_increase_size_stats(core->mac, PTCregs, tot_len); + e1000x_inc_reg_if_not_full(core->mac, TPT); + e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len); + + switch (net_tx_pkt_get_packet_type(tx_pkt)) { + case ETH_PKT_BCAST: + e1000x_inc_reg_if_not_full(core->mac, BPTC); + break; + case ETH_PKT_MCAST: + e1000x_inc_reg_if_not_full(core->mac, MPTC); + break; + case ETH_PKT_UCAST: + break; + default: + g_assert_not_reached(); + } + + core->mac[GPTC] = core->mac[TPT]; + core->mac[GOTCL] = core->mac[TOTL]; + core->mac[GOTCH] = core->mac[TOTH]; +} + +static void +e1000e_process_tx_desc(E1000ECore *core, + struct e1000e_tx *tx, + struct e1000_tx_desc *dp, + int queue_index) +{ + uint32_t txd_lower = le32_to_cpu(dp->lower.data); + uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D); + unsigned int split_size = txd_lower & 0xffff; + uint64_t addr; + struct e1000_context_desc *xp = (struct e1000_context_desc *)dp; + bool eop = txd_lower & E1000_TXD_CMD_EOP; + + if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */ + e1000x_read_tx_ctx_descr(xp, &tx->props); + e1000e_process_snap_option(core, le32_to_cpu(xp->cmd_and_length)); + return; + } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) { + /* data descriptor */ + tx->sum_needed = le32_to_cpu(dp->upper.data) >> 8; + tx->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0; + e1000e_process_ts_option(core, dp); + } else { + /* legacy descriptor */ + e1000e_process_ts_option(core, dp); + tx->cptse = 0; + } + + addr = le64_to_cpu(dp->buffer_addr); + + if (!tx->skip_cp) { + if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) { + tx->skip_cp = true; + } + } + + if (eop) { + if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) { + if (e1000x_vlan_enabled(core->mac) && + e1000x_is_vlan_txd(txd_lower)) { + net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, + le16_to_cpu(dp->upper.fields.special), core->mac[VET]); + } + if (e1000e_tx_pkt_send(core, tx, queue_index)) { + e1000e_on_tx_done_update_stats(core, tx->tx_pkt); + } + } + + tx->skip_cp = false; + net_tx_pkt_reset(tx->tx_pkt); + + tx->sum_needed = 0; + tx->cptse = 0; + } +} + +static inline uint32_t +e1000e_tx_wb_interrupt_cause(E1000ECore *core, int queue_idx) +{ + if (!msix_enabled(core->owner)) { + return E1000_ICR_TXDW; + } + + return (queue_idx == 0) ? E1000_ICR_TXQ0 : E1000_ICR_TXQ1; +} + +static inline uint32_t +e1000e_rx_wb_interrupt_cause(E1000ECore *core, int queue_idx, + bool min_threshold_hit) +{ + if (!msix_enabled(core->owner)) { + return E1000_ICS_RXT0 | (min_threshold_hit ? E1000_ICS_RXDMT0 : 0); + } + + return (queue_idx == 0) ? E1000_ICR_RXQ0 : E1000_ICR_RXQ1; +} + +static uint32_t +e1000e_txdesc_writeback(E1000ECore *core, dma_addr_t base, + struct e1000_tx_desc *dp, bool *ide, int queue_idx) +{ + uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data); + + if (!(txd_lower & E1000_TXD_CMD_RS) && + !(core->mac[IVAR] & E1000_IVAR_TX_INT_EVERY_WB)) { + return 0; + } + + *ide = (txd_lower & E1000_TXD_CMD_IDE) ? true : false; + + txd_upper = le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD; + + dp->upper.data = cpu_to_le32(txd_upper); + pci_dma_write(core->owner, base + ((char *)&dp->upper - (char *)dp), + &dp->upper, sizeof(dp->upper)); + return e1000e_tx_wb_interrupt_cause(core, queue_idx); +} + +typedef struct E1000E_RingInfo_st { + int dbah; + int dbal; + int dlen; + int dh; + int dt; + int idx; +} E1000E_RingInfo; + +static inline bool +e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r) +{ + return core->mac[r->dh] == core->mac[r->dt] || + core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN; +} + +static inline uint64_t +e1000e_ring_base(E1000ECore *core, const E1000E_RingInfo *r) +{ + uint64_t bah = core->mac[r->dbah]; + uint64_t bal = core->mac[r->dbal]; + + return (bah << 32) + bal; +} + +static inline uint64_t +e1000e_ring_head_descr(E1000ECore *core, const E1000E_RingInfo *r) +{ + return e1000e_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh]; +} + +static inline void +e1000e_ring_advance(E1000ECore *core, const E1000E_RingInfo *r, uint32_t count) +{ + core->mac[r->dh] += count; + + if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) { + core->mac[r->dh] = 0; + } +} + +static inline uint32_t +e1000e_ring_free_descr_num(E1000ECore *core, const E1000E_RingInfo *r) +{ + trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen], + core->mac[r->dh], core->mac[r->dt]); + + if (core->mac[r->dh] <= core->mac[r->dt]) { + return core->mac[r->dt] - core->mac[r->dh]; + } + + if (core->mac[r->dh] > core->mac[r->dt]) { + return core->mac[r->dlen] / E1000_RING_DESC_LEN + + core->mac[r->dt] - core->mac[r->dh]; + } + + g_assert_not_reached(); + return 0; +} + +static inline bool +e1000e_ring_enabled(E1000ECore *core, const E1000E_RingInfo *r) +{ + return core->mac[r->dlen] > 0; +} + +static inline uint32_t +e1000e_ring_len(E1000ECore *core, const E1000E_RingInfo *r) +{ + return core->mac[r->dlen]; +} + +typedef struct E1000E_TxRing_st { + const E1000E_RingInfo *i; + struct e1000e_tx *tx; +} E1000E_TxRing; + +static inline int +e1000e_mq_queue_idx(int base_reg_idx, int reg_idx) +{ + return (reg_idx - base_reg_idx) / (0x100 >> 2); +} + +static inline void +e1000e_tx_ring_init(E1000ECore *core, E1000E_TxRing *txr, int idx) +{ + static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { + { TDBAH, TDBAL, TDLEN, TDH, TDT, 0 }, + { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 } + }; + + assert(idx < ARRAY_SIZE(i)); + + txr->i = &i[idx]; + txr->tx = &core->tx[idx]; +} + +typedef struct E1000E_RxRing_st { + const E1000E_RingInfo *i; +} E1000E_RxRing; + +static inline void +e1000e_rx_ring_init(E1000ECore *core, E1000E_RxRing *rxr, int idx) +{ + static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { + { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 }, + { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 } + }; + + assert(idx < ARRAY_SIZE(i)); + + rxr->i = &i[idx]; +} + +static void +e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) +{ + dma_addr_t base; + struct e1000_tx_desc desc; + bool ide = false; + const E1000E_RingInfo *txi = txr->i; + uint32_t cause = E1000_ICS_TXQE; + + if (!(core->mac[TCTL] & E1000_TCTL_EN)) { + trace_e1000e_tx_disabled(); + return; + } + + while (!e1000e_ring_empty(core, txi)) { + base = e1000e_ring_head_descr(core, txi); + + pci_dma_read(core->owner, base, &desc, sizeof(desc)); + + trace_e1000e_tx_descr((void *)(intptr_t)desc.buffer_addr, + desc.lower.data, desc.upper.data); + + e1000e_process_tx_desc(core, txr->tx, &desc, txi->idx); + cause |= e1000e_txdesc_writeback(core, base, &desc, &ide, txi->idx); + + e1000e_ring_advance(core, txi, 1); + } + + if (!ide || !e1000e_intrmgr_delay_tx_causes(core, &cause)) { + e1000e_set_interrupt_cause(core, cause); + } +} + +static bool +e1000e_has_rxbufs(E1000ECore *core, const E1000E_RingInfo *r, + size_t total_size) +{ + uint32_t bufs = e1000e_ring_free_descr_num(core, r); + + trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, + core->rx_desc_buf_size); + + return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) * + core->rx_desc_buf_size; +} + +void +e1000e_start_recv(E1000ECore *core) +{ + int i; + + trace_e1000e_rx_start_recv(); + + for (i = 0; i <= core->max_queue_num; i++) { + qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i)); + } +} + +bool +e1000e_can_receive(E1000ECore *core) +{ + int i; + + if (!e1000x_rx_ready(core->owner, core->mac)) { + return false; + } + + for (i = 0; i < E1000E_NUM_QUEUES; i++) { + E1000E_RxRing rxr; + + e1000e_rx_ring_init(core, &rxr, i); + if (e1000e_ring_enabled(core, rxr.i) && + e1000e_has_rxbufs(core, rxr.i, 1)) { + trace_e1000e_rx_can_recv(); + return true; + } + } + + trace_e1000e_rx_can_recv_rings_full(); + return false; +} + +ssize_t +e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size) +{ + const struct iovec iov = { + .iov_base = (uint8_t *)buf, + .iov_len = size + }; + + return e1000e_receive_iov(core, &iov, 1); +} + +static inline bool +e1000e_rx_l3_cso_enabled(E1000ECore *core) +{ + return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD); +} + +static inline bool +e1000e_rx_l4_cso_enabled(E1000ECore *core) +{ + return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD); +} + +static bool +e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size) +{ + uint32_t rctl = core->mac[RCTL]; + + if (e1000x_is_vlan_packet(buf, core->mac[VET]) && + e1000x_vlan_rx_filter_enabled(core->mac)) { + uint16_t vid = lduw_be_p(buf + 14); + uint32_t vfta = ldl_le_p((uint32_t *)(core->mac + VFTA) + + ((vid >> 5) & 0x7f)); + if ((vfta & (1 << (vid & 0x1f))) == 0) { + trace_e1000e_rx_flt_vlan_mismatch(vid); + return false; + } else { + trace_e1000e_rx_flt_vlan_match(vid); + } + } + + switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { + case ETH_PKT_UCAST: + if (rctl & E1000_RCTL_UPE) { + return true; /* promiscuous ucast */ + } + break; + + case ETH_PKT_BCAST: + if (rctl & E1000_RCTL_BAM) { + return true; /* broadcast enabled */ + } + break; + + case ETH_PKT_MCAST: + if (rctl & E1000_RCTL_MPE) { + return true; /* promiscuous mcast */ + } + break; + + default: + g_assert_not_reached(); + } + + return e1000x_rx_group_filter(core->mac, buf); +} + +static inline void +e1000e_read_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr) +{ + struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; + *buff_addr = le64_to_cpu(d->buffer_addr); +} + +static inline void +e1000e_read_ext_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr) +{ + union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc; + *buff_addr = le64_to_cpu(d->read.buffer_addr); +} + +static inline void +e1000e_read_ps_rx_descr(E1000ECore *core, uint8_t *desc, + hwaddr (*buff_addr)[MAX_PS_BUFFERS]) +{ + int i; + union e1000_rx_desc_packet_split *d = + (union e1000_rx_desc_packet_split *) desc; + + for (i = 0; i < MAX_PS_BUFFERS; i++) { + (*buff_addr)[i] = le64_to_cpu(d->read.buffer_addr[i]); + } + + trace_e1000e_rx_desc_ps_read((*buff_addr)[0], (*buff_addr)[1], + (*buff_addr)[2], (*buff_addr)[3]); +} + +static inline void +e1000e_read_rx_descr(E1000ECore *core, uint8_t *desc, + hwaddr (*buff_addr)[MAX_PS_BUFFERS]) +{ + if (e1000e_rx_use_legacy_descriptor(core)) { + e1000e_read_lgcy_rx_descr(core, desc, &(*buff_addr)[0]); + (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0; + } else { + if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { + e1000e_read_ps_rx_descr(core, desc, buff_addr); + } else { + e1000e_read_ext_rx_descr(core, desc, &(*buff_addr)[0]); + (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0; + } + } +} + +static void +e1000e_verify_csum_in_sw(E1000ECore *core, + struct NetRxPkt *pkt, + uint32_t *status_flags, + bool istcp, bool isudp) +{ + bool csum_valid; + uint32_t csum_error; + + if (e1000e_rx_l3_cso_enabled(core)) { + if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) { + trace_e1000e_rx_metadata_l3_csum_validation_failed(); + } else { + csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE; + *status_flags |= E1000_RXD_STAT_IPCS | csum_error; + } + } else { + trace_e1000e_rx_metadata_l3_cso_disabled(); + } + + if (!e1000e_rx_l4_cso_enabled(core)) { + trace_e1000e_rx_metadata_l4_cso_disabled(); + return; + } + + if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { + trace_e1000e_rx_metadata_l4_csum_validation_failed(); + return; + } + + csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE; + + if (istcp) { + *status_flags |= E1000_RXD_STAT_TCPCS | + csum_error; + } else if (isudp) { + *status_flags |= E1000_RXD_STAT_TCPCS | + E1000_RXD_STAT_UDPCS | + csum_error; + } +} + +static inline bool +e1000e_is_tcp_ack(E1000ECore *core, struct NetRxPkt *rx_pkt) +{ + if (!net_rx_pkt_is_tcp_ack(rx_pkt)) { + return false; + } + + if (core->mac[RFCTL] & E1000_RFCTL_ACK_DATA_DIS) { + return !net_rx_pkt_has_tcp_data(rx_pkt); + } + + return true; +} + +static void +e1000e_build_rx_metadata(E1000ECore *core, + struct NetRxPkt *pkt, + bool is_eop, + const E1000E_RSSInfo *rss_info, + uint32_t *rss, uint32_t *mrq, + uint32_t *status_flags, + uint16_t *ip_id, + uint16_t *vlan_tag) +{ + struct virtio_net_hdr *vhdr; + bool isip4, isip6, istcp, isudp; + uint32_t pkt_type; + + *status_flags = E1000_RXD_STAT_DD; + + /* No additional metadata needed for non-EOP descriptors */ + if (!is_eop) { + goto func_exit; + } + + *status_flags |= E1000_RXD_STAT_EOP; + + net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); + trace_e1000e_rx_metadata_protocols(isip4, isip6, isudp, istcp); + + /* VLAN state */ + if (net_rx_pkt_is_vlan_stripped(pkt)) { + *status_flags |= E1000_RXD_STAT_VP; + *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt)); + trace_e1000e_rx_metadata_vlan(*vlan_tag); + } + + /* Packet parsing results */ + if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) { + if (rss_info->enabled) { + *rss = cpu_to_le32(rss_info->hash); + *mrq = cpu_to_le32(rss_info->type | (rss_info->queue << 8)); + trace_e1000e_rx_metadata_rss(*rss, *mrq); + } + } else if (isip4) { + *status_flags |= E1000_RXD_STAT_IPIDV; + *ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt)); + trace_e1000e_rx_metadata_ip_id(*ip_id); + } + + if (istcp && e1000e_is_tcp_ack(core, pkt)) { + *status_flags |= E1000_RXD_STAT_ACK; + trace_e1000e_rx_metadata_ack(); + } + + if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { + trace_e1000e_rx_metadata_ipv6_filtering_disabled(); + pkt_type = E1000_RXD_PKT_MAC; + } else if (istcp || isudp) { + pkt_type = isip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP; + } else if (isip4 || isip6) { + pkt_type = isip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6; + } else { + pkt_type = E1000_RXD_PKT_MAC; + } + + *status_flags |= E1000_RXD_PKT_TYPE(pkt_type); + trace_e1000e_rx_metadata_pkt_type(pkt_type); + + /* RX CSO information */ + if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { + trace_e1000e_rx_metadata_ipv6_sum_disabled(); + goto func_exit; + } + + if (!net_rx_pkt_has_virt_hdr(pkt)) { + trace_e1000e_rx_metadata_no_virthdr(); + e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp); + goto func_exit; + } + + vhdr = net_rx_pkt_get_vhdr(pkt); + + if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) && + !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) { + trace_e1000e_rx_metadata_virthdr_no_csum_info(); + e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp); + goto func_exit; + } + + if (e1000e_rx_l3_cso_enabled(core)) { + *status_flags |= isip4 ? E1000_RXD_STAT_IPCS : 0; + } else { + trace_e1000e_rx_metadata_l3_cso_disabled(); + } + + if (e1000e_rx_l4_cso_enabled(core)) { + if (istcp) { + *status_flags |= E1000_RXD_STAT_TCPCS; + } else if (isudp) { + *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS; + } + } else { + trace_e1000e_rx_metadata_l4_cso_disabled(); + } + + trace_e1000e_rx_metadata_status_flags(*status_flags); + +func_exit: + *status_flags = cpu_to_le32(*status_flags); +} + +static inline void +e1000e_write_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, + struct NetRxPkt *pkt, + const E1000E_RSSInfo *rss_info, + uint16_t length) +{ + uint32_t status_flags, rss, mrq; + uint16_t ip_id; + + struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; + + assert(!rss_info->enabled); + + d->length = cpu_to_le16(length); + d->csum = 0; + + e1000e_build_rx_metadata(core, pkt, pkt != NULL, + rss_info, + &rss, &mrq, + &status_flags, &ip_id, + &d->special); + d->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24); + d->status = (uint8_t) le32_to_cpu(status_flags); +} + +static inline void +e1000e_write_ext_rx_descr(E1000ECore *core, uint8_t *desc, + struct NetRxPkt *pkt, + const E1000E_RSSInfo *rss_info, + uint16_t length) +{ + union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc; + + memset(&d->wb, 0, sizeof(d->wb)); + + d->wb.upper.length = cpu_to_le16(length); + + e1000e_build_rx_metadata(core, pkt, pkt != NULL, + rss_info, + &d->wb.lower.hi_dword.rss, + &d->wb.lower.mrq, + &d->wb.upper.status_error, + &d->wb.lower.hi_dword.csum_ip.ip_id, + &d->wb.upper.vlan); +} + +static inline void +e1000e_write_ps_rx_descr(E1000ECore *core, uint8_t *desc, + struct NetRxPkt *pkt, + const E1000E_RSSInfo *rss_info, + size_t ps_hdr_len, + uint16_t(*written)[MAX_PS_BUFFERS]) +{ + int i; + union e1000_rx_desc_packet_split *d = + (union e1000_rx_desc_packet_split *) desc; + + memset(&d->wb, 0, sizeof(d->wb)); + + d->wb.middle.length0 = cpu_to_le16((*written)[0]); + + for (i = 0; i < PS_PAGE_BUFFERS; i++) { + d->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]); + } + + e1000e_build_rx_metadata(core, pkt, pkt != NULL, + rss_info, + &d->wb.lower.hi_dword.rss, + &d->wb.lower.mrq, + &d->wb.middle.status_error, + &d->wb.lower.hi_dword.csum_ip.ip_id, + &d->wb.middle.vlan); + + d->wb.upper.header_status = + cpu_to_le16(ps_hdr_len | (ps_hdr_len ? E1000_RXDPS_HDRSTAT_HDRSP : 0)); + + trace_e1000e_rx_desc_ps_write((*written)[0], (*written)[1], + (*written)[2], (*written)[3]); +} + +static inline void +e1000e_write_rx_descr(E1000ECore *core, uint8_t *desc, +struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, + size_t ps_hdr_len, uint16_t(*written)[MAX_PS_BUFFERS]) +{ + if (e1000e_rx_use_legacy_descriptor(core)) { + assert(ps_hdr_len == 0); + e1000e_write_lgcy_rx_descr(core, desc, pkt, rss_info, (*written)[0]); + } else { + if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { + e1000e_write_ps_rx_descr(core, desc, pkt, rss_info, + ps_hdr_len, written); + } else { + assert(ps_hdr_len == 0); + e1000e_write_ext_rx_descr(core, desc, pkt, rss_info, + (*written)[0]); + } + } +} + +typedef struct e1000e_ba_state_st { + uint16_t written[MAX_PS_BUFFERS]; + uint8_t cur_idx; +} e1000e_ba_state; + +static inline void +e1000e_write_hdr_to_rx_buffers(E1000ECore *core, + hwaddr (*ba)[MAX_PS_BUFFERS], + e1000e_ba_state *bastate, + const char *data, + dma_addr_t data_len) +{ + assert(data_len <= core->rxbuf_sizes[0] - bastate->written[0]); + + pci_dma_write(core->owner, (*ba)[0] + bastate->written[0], data, data_len); + bastate->written[0] += data_len; + + bastate->cur_idx = 1; +} + +static void +e1000e_write_to_rx_buffers(E1000ECore *core, + hwaddr (*ba)[MAX_PS_BUFFERS], + e1000e_ba_state *bastate, + const char *data, + dma_addr_t data_len) +{ + while (data_len > 0) { + uint32_t cur_buf_len = core->rxbuf_sizes[bastate->cur_idx]; + uint32_t cur_buf_bytes_left = cur_buf_len - + bastate->written[bastate->cur_idx]; + uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left); + + trace_e1000e_rx_desc_buff_write(bastate->cur_idx, + (*ba)[bastate->cur_idx], + bastate->written[bastate->cur_idx], + data, + bytes_to_write); + + pci_dma_write(core->owner, + (*ba)[bastate->cur_idx] + bastate->written[bastate->cur_idx], + data, bytes_to_write); + + bastate->written[bastate->cur_idx] += bytes_to_write; + data += bytes_to_write; + data_len -= bytes_to_write; + + if (bastate->written[bastate->cur_idx] == cur_buf_len) { + bastate->cur_idx++; + } + + assert(bastate->cur_idx < MAX_PS_BUFFERS); + } +} + +static void +e1000e_update_rx_stats(E1000ECore *core, + size_t data_size, + size_t data_fcs_size) +{ + e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size); + + switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { + case ETH_PKT_BCAST: + e1000x_inc_reg_if_not_full(core->mac, BPRC); + break; + + case ETH_PKT_MCAST: + e1000x_inc_reg_if_not_full(core->mac, MPRC); + break; + + default: + break; + } +} + +static inline bool +e1000e_rx_descr_threshold_hit(E1000ECore *core, const E1000E_RingInfo *rxi) +{ + return e1000e_ring_free_descr_num(core, rxi) == + e1000e_ring_len(core, rxi) >> core->rxbuf_min_shift; +} + +static bool +e1000e_do_ps(E1000ECore *core, struct NetRxPkt *pkt, size_t *hdr_len) +{ + bool isip4, isip6, isudp, istcp; + bool fragment; + + if (!e1000e_rx_use_ps_descriptor(core)) { + return false; + } + + net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); + + if (isip4) { + fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; + } else if (isip6) { + fragment = net_rx_pkt_get_ip6_info(pkt)->fragment; + } else { + return false; + } + + if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) { + return false; + } + + if (!fragment && (isudp || istcp)) { + *hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt); + } else { + *hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt); + } + + if ((*hdr_len > core->rxbuf_sizes[0]) || + (*hdr_len > net_rx_pkt_get_total_len(pkt))) { + return false; + } + + return true; +} + +static void +e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, + const E1000E_RxRing *rxr, + const E1000E_RSSInfo *rss_info) +{ + PCIDevice *d = core->owner; + dma_addr_t base; + uint8_t desc[E1000_MAX_RX_DESC_LEN]; + size_t desc_size; + size_t desc_offset = 0; + size_t iov_ofs = 0; + + struct iovec *iov = net_rx_pkt_get_iovec(pkt); + size_t size = net_rx_pkt_get_total_len(pkt); + size_t total_size = size + e1000x_fcs_len(core->mac); + const E1000E_RingInfo *rxi; + size_t ps_hdr_len = 0; + bool do_ps = e1000e_do_ps(core, pkt, &ps_hdr_len); + bool is_first = true; + + rxi = rxr->i; + + do { + hwaddr ba[MAX_PS_BUFFERS]; + e1000e_ba_state bastate = { { 0 } }; + bool is_last = false; + + desc_size = total_size - desc_offset; + + if (desc_size > core->rx_desc_buf_size) { + desc_size = core->rx_desc_buf_size; + } + + if (e1000e_ring_empty(core, rxi)) { + return; + } + + base = e1000e_ring_head_descr(core, rxi); + + pci_dma_read(d, base, &desc, core->rx_desc_len); + + trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len); + + e1000e_read_rx_descr(core, desc, &ba); + + if (ba[0]) { + if (desc_offset < size) { + static const uint32_t fcs_pad; + size_t iov_copy; + size_t copy_size = size - desc_offset; + if (copy_size > core->rx_desc_buf_size) { + copy_size = core->rx_desc_buf_size; + } + + /* For PS mode copy the packet header first */ + if (do_ps) { + if (is_first) { + size_t ps_hdr_copied = 0; + do { + iov_copy = MIN(ps_hdr_len - ps_hdr_copied, + iov->iov_len - iov_ofs); + + e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate, + iov->iov_base, iov_copy); + + copy_size -= iov_copy; + ps_hdr_copied += iov_copy; + + iov_ofs += iov_copy; + if (iov_ofs == iov->iov_len) { + iov++; + iov_ofs = 0; + } + } while (ps_hdr_copied < ps_hdr_len); + + is_first = false; + } else { + /* Leave buffer 0 of each descriptor except first */ + /* empty as per spec 7.1.5.1 */ + e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate, + NULL, 0); + } + } + + /* Copy packet payload */ + while (copy_size) { + iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); + + e1000e_write_to_rx_buffers(core, &ba, &bastate, + iov->iov_base + iov_ofs, iov_copy); + + copy_size -= iov_copy; + iov_ofs += iov_copy; + if (iov_ofs == iov->iov_len) { + iov++; + iov_ofs = 0; + } + } + + if (desc_offset + desc_size >= total_size) { + /* Simulate FCS checksum presence in the last descriptor */ + e1000e_write_to_rx_buffers(core, &ba, &bastate, + (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); + } + } + } else { /* as per intel docs; skip descriptors with null buf addr */ + trace_e1000e_rx_null_descriptor(); + } + desc_offset += desc_size; + if (desc_offset >= total_size) { + is_last = true; + } + + e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL, + rss_info, do_ps ? ps_hdr_len : 0, &bastate.written); + pci_dma_write(d, base, &desc, core->rx_desc_len); + + e1000e_ring_advance(core, rxi, + core->rx_desc_len / E1000_MIN_RX_DESC_LEN); + + } while (desc_offset < total_size); + + e1000e_update_rx_stats(core, size, total_size); +} + +static inline void +e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt) +{ + if (net_rx_pkt_has_virt_hdr(pkt)) { + struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt); + + if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + net_rx_pkt_fix_l4_csum(pkt); + } + } +} + +ssize_t +e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) +{ + static const int maximum_ethernet_hdr_len = (14 + 4); + /* Min. octets in an ethernet frame sans FCS */ + static const int min_buf_size = 60; + + uint32_t n = 0; + uint8_t min_buf[min_buf_size]; + struct iovec min_iov; + uint8_t *filter_buf; + size_t size, orig_size; + size_t iov_ofs = 0; + E1000E_RxRing rxr; + E1000E_RSSInfo rss_info; + size_t total_size; + ssize_t retval; + bool rdmts_hit; + + trace_e1000e_rx_receive_iov(iovcnt); + + if (!e1000x_hw_rx_enabled(core->mac)) { + return -1; + } + + /* Pull virtio header in */ + if (core->has_vnet) { + net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt); + iov_ofs = sizeof(struct virtio_net_hdr); + } + + filter_buf = iov->iov_base + iov_ofs; + orig_size = iov_size(iov, iovcnt); + size = orig_size - iov_ofs; + + /* Pad to minimum Ethernet frame length */ + if (size < sizeof(min_buf)) { + iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size); + memset(&min_buf[size], 0, sizeof(min_buf) - size); + e1000x_inc_reg_if_not_full(core->mac, RUC); + min_iov.iov_base = filter_buf = min_buf; + min_iov.iov_len = size = sizeof(min_buf); + iovcnt = 1; + iov = &min_iov; + iov_ofs = 0; + } else if (iov->iov_len < maximum_ethernet_hdr_len) { + /* This is very unlikely, but may happen. */ + iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len); + filter_buf = min_buf; + } + + /* Discard oversized packets if !LPE and !SBP. */ + if (e1000x_is_oversized(core->mac, size)) { + return orig_size; + } + + net_rx_pkt_set_packet_type(core->rx_pkt, + get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf))); + + if (!e1000e_receive_filter(core, filter_buf, size)) { + trace_e1000e_rx_flt_dropped(); + return orig_size; + } + + net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, + e1000x_vlan_enabled(core->mac), core->mac[VET]); + + e1000e_rss_parse_packet(core, core->rx_pkt, &rss_info); + e1000e_rx_ring_init(core, &rxr, rss_info.queue); + + trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx); + + total_size = net_rx_pkt_get_total_len(core->rx_pkt) + + e1000x_fcs_len(core->mac); + + if (e1000e_has_rxbufs(core, rxr.i, total_size)) { + e1000e_rx_fix_l4_csum(core, core->rx_pkt); + + e1000e_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info); + + retval = orig_size; + + /* Perform small receive detection (RSRPD) */ + if (total_size < core->mac[RSRPD]) { + n |= E1000_ICS_SRPD; + } + + /* Perform ACK receive detection */ + if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS) && + (e1000e_is_tcp_ack(core, core->rx_pkt))) { + n |= E1000_ICS_ACK; + } + + /* Check if receive descriptor minimum threshold hit */ + rdmts_hit = e1000e_rx_descr_threshold_hit(core, rxr.i); + n |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); + + trace_e1000e_rx_written_to_guest(n); + } else { + n |= E1000_ICS_RXO; + retval = 0; + + trace_e1000e_rx_not_written_to_guest(n); + } + + if (!e1000e_intrmgr_delay_rx_causes(core, &n)) { + trace_e1000e_rx_interrupt_set(n); + e1000e_set_interrupt_cause(core, n); + } else { + trace_e1000e_rx_interrupt_delayed(n); + } + + return retval; +} + +static inline bool +e1000e_have_autoneg(E1000ECore *core) +{ + return core->phy[0][PHY_CTRL] & MII_CR_AUTO_NEG_EN; +} + +static void e1000e_update_flowctl_status(E1000ECore *core) +{ + if (e1000e_have_autoneg(core) && + core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE) { + trace_e1000e_link_autoneg_flowctl(true); + core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE; + } else { + trace_e1000e_link_autoneg_flowctl(false); + } +} + +static inline void +e1000e_link_down(E1000ECore *core) +{ + e1000x_update_regs_on_link_down(core->mac, core->phy[0]); + e1000e_update_flowctl_status(core); +} + +static inline void +e1000e_set_phy_ctrl(E1000ECore *core, int index, uint16_t val) +{ + /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */ + core->phy[0][PHY_CTRL] = val & ~(0x3f | + MII_CR_RESET | + MII_CR_RESTART_AUTO_NEG); + + if ((val & MII_CR_RESTART_AUTO_NEG) && + e1000e_have_autoneg(core)) { + e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); + } +} + +static void +e1000e_set_phy_oem_bits(E1000ECore *core, int index, uint16_t val) +{ + core->phy[0][PHY_OEM_BITS] = val & ~BIT(10); + + if (val & BIT(10)) { + e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); + } +} + +static void +e1000e_set_phy_page(E1000ECore *core, int index, uint16_t val) +{ + core->phy[0][PHY_PAGE] = val & PHY_PAGE_RW_MASK; +} + +void +e1000e_core_set_link_status(E1000ECore *core) +{ + NetClientState *nc = qemu_get_queue(core->owner_nic); + uint32_t old_status = core->mac[STATUS]; + + trace_e1000e_link_status_changed(nc->link_down ? false : true); + + if (nc->link_down) { + e1000x_update_regs_on_link_down(core->mac, core->phy[0]); + } else { + if (e1000e_have_autoneg(core) && + !(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { + e1000x_restart_autoneg(core->mac, core->phy[0], + core->autoneg_timer); + } else { + e1000x_update_regs_on_link_up(core->mac, core->phy[0]); + e1000e_start_recv(core); + } + } + + if (core->mac[STATUS] != old_status) { + e1000e_set_interrupt_cause(core, E1000_ICR_LSC); + } +} + +static void +e1000e_set_ctrl(E1000ECore *core, int index, uint32_t val) +{ + trace_e1000e_core_ctrl_write(index, val); + + /* RST is self clearing */ + core->mac[CTRL] = val & ~E1000_CTRL_RST; + core->mac[CTRL_DUP] = core->mac[CTRL]; + + trace_e1000e_link_set_params( + !!(val & E1000_CTRL_ASDE), + (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, + !!(val & E1000_CTRL_FRCSPD), + !!(val & E1000_CTRL_FRCDPX), + !!(val & E1000_CTRL_RFCE), + !!(val & E1000_CTRL_TFCE)); + + if (val & E1000_CTRL_RST) { + trace_e1000e_core_ctrl_sw_reset(); + e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); + } + + if (val & E1000_CTRL_PHY_RST) { + trace_e1000e_core_ctrl_phy_reset(); + core->mac[STATUS] |= E1000_STATUS_PHYRA; + } +} + +static void +e1000e_set_rfctl(E1000ECore *core, int index, uint32_t val) +{ + trace_e1000e_rx_set_rfctl(val); + + if (!(val & E1000_RFCTL_ISCSI_DIS)) { + trace_e1000e_wrn_iscsi_filtering_not_supported(); + } + + if (!(val & E1000_RFCTL_NFSW_DIS)) { + trace_e1000e_wrn_nfsw_filtering_not_supported(); + } + + if (!(val & E1000_RFCTL_NFSR_DIS)) { + trace_e1000e_wrn_nfsr_filtering_not_supported(); + } + + core->mac[RFCTL] = val; +} + +static void +e1000e_calc_per_desc_buf_size(E1000ECore *core) +{ + int i; + core->rx_desc_buf_size = 0; + + for (i = 0; i < ARRAY_SIZE(core->rxbuf_sizes); i++) { + core->rx_desc_buf_size += core->rxbuf_sizes[i]; + } +} + +static void +e1000e_parse_rxbufsize(E1000ECore *core) +{ + uint32_t rctl = core->mac[RCTL]; + + memset(core->rxbuf_sizes, 0, sizeof(core->rxbuf_sizes)); + + if (rctl & E1000_RCTL_DTYP_MASK) { + uint32_t bsize; + + bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE0_MASK; + core->rxbuf_sizes[0] = (bsize >> E1000_PSRCTL_BSIZE0_SHIFT) * 128; + + bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE1_MASK; + core->rxbuf_sizes[1] = (bsize >> E1000_PSRCTL_BSIZE1_SHIFT) * 1024; + + bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE2_MASK; + core->rxbuf_sizes[2] = (bsize >> E1000_PSRCTL_BSIZE2_SHIFT) * 1024; + + bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE3_MASK; + core->rxbuf_sizes[3] = (bsize >> E1000_PSRCTL_BSIZE3_SHIFT) * 1024; + } else if (rctl & E1000_RCTL_FLXBUF_MASK) { + int flxbuf = rctl & E1000_RCTL_FLXBUF_MASK; + core->rxbuf_sizes[0] = (flxbuf >> E1000_RCTL_FLXBUF_SHIFT) * 1024; + } else { + core->rxbuf_sizes[0] = e1000x_rxbufsize(rctl); + } + + trace_e1000e_rx_desc_buff_sizes(core->rxbuf_sizes[0], core->rxbuf_sizes[1], + core->rxbuf_sizes[2], core->rxbuf_sizes[3]); + + e1000e_calc_per_desc_buf_size(core); +} + +static void +e1000e_calc_rxdesclen(E1000ECore *core) +{ + if (e1000e_rx_use_legacy_descriptor(core)) { + core->rx_desc_len = sizeof(struct e1000_rx_desc); + } else { + if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { + core->rx_desc_len = sizeof(union e1000_rx_desc_packet_split); + } else { + core->rx_desc_len = sizeof(union e1000_rx_desc_extended); + } + } + trace_e1000e_rx_desc_len(core->rx_desc_len); +} + +static void +e1000e_set_rx_control(E1000ECore *core, int index, uint32_t val) +{ + core->mac[RCTL] = val; + trace_e1000e_rx_set_rctl(core->mac[RCTL]); + + if (val & E1000_RCTL_EN) { + e1000e_parse_rxbufsize(core); + e1000e_calc_rxdesclen(core); + core->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1 + + E1000_RING_DESC_LEN_SHIFT; + + e1000e_start_recv(core); + } +} + +static +void(*e1000e_phyreg_writeops[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE]) +(E1000ECore *, int, uint16_t) = { + [0] = { + [PHY_CTRL] = e1000e_set_phy_ctrl, + [PHY_PAGE] = e1000e_set_phy_page, + [PHY_OEM_BITS] = e1000e_set_phy_oem_bits + } +}; + +static inline void +e1000e_clear_ims_bits(E1000ECore *core, uint32_t bits) +{ + trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits); + core->mac[IMS] &= ~bits; +} + +static inline bool +e1000e_postpone_interrupt(bool *interrupt_pending, + E1000IntrDelayTimer *timer) +{ + if (timer->running) { + trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2); + + *interrupt_pending = true; + return true; + } + + if (timer->core->mac[timer->delay_reg] != 0) { + e1000e_intrmgr_rearm_timer(timer); + } + + return false; +} + +static inline bool +e1000e_itr_should_postpone(E1000ECore *core) +{ + return e1000e_postpone_interrupt(&core->itr_intr_pending, &core->itr); +} + +static inline bool +e1000e_eitr_should_postpone(E1000ECore *core, int idx) +{ + return e1000e_postpone_interrupt(&core->eitr_intr_pending[idx], + &core->eitr[idx]); +} + +static void +e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) +{ + uint32_t effective_eiac; + + if (E1000_IVAR_ENTRY_VALID(int_cfg)) { + uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg); + if (vec < E1000E_MSIX_VEC_NUM) { + if (!e1000e_eitr_should_postpone(core, vec)) { + trace_e1000e_irq_msix_notify_vec(vec); + msix_notify(core->owner, vec); + } + } else { + trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg); + } + } else { + trace_e1000e_wrn_msix_invalid(cause, int_cfg); + } + + if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_EIAME) { + trace_e1000e_irq_iam_clear_eiame(core->mac[IAM], cause); + core->mac[IAM] &= ~cause; + } + + trace_e1000e_irq_icr_clear_eiac(core->mac[ICR], core->mac[EIAC]); + + effective_eiac = core->mac[EIAC] & cause; + + core->mac[ICR] &= ~effective_eiac; + core->msi_causes_pending &= ~effective_eiac; + + if (!(core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { + core->mac[IMS] &= ~effective_eiac; + } +} + +static void +e1000e_msix_notify(E1000ECore *core, uint32_t causes) +{ + if (causes & E1000_ICR_RXQ0) { + e1000e_msix_notify_one(core, E1000_ICR_RXQ0, + E1000_IVAR_RXQ0(core->mac[IVAR])); + } + + if (causes & E1000_ICR_RXQ1) { + e1000e_msix_notify_one(core, E1000_ICR_RXQ1, + E1000_IVAR_RXQ1(core->mac[IVAR])); + } + + if (causes & E1000_ICR_TXQ0) { + e1000e_msix_notify_one(core, E1000_ICR_TXQ0, + E1000_IVAR_TXQ0(core->mac[IVAR])); + } + + if (causes & E1000_ICR_TXQ1) { + e1000e_msix_notify_one(core, E1000_ICR_TXQ1, + E1000_IVAR_TXQ1(core->mac[IVAR])); + } + + if (causes & E1000_ICR_OTHER) { + e1000e_msix_notify_one(core, E1000_ICR_OTHER, + E1000_IVAR_OTHER(core->mac[IVAR])); + } +} + +static void +e1000e_msix_clear_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) +{ + if (E1000_IVAR_ENTRY_VALID(int_cfg)) { + uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg); + if (vec < E1000E_MSIX_VEC_NUM) { + trace_e1000e_irq_msix_pending_clearing(cause, int_cfg, vec); + msix_clr_pending(core->owner, vec); + } else { + trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg); + } + } else { + trace_e1000e_wrn_msix_invalid(cause, int_cfg); + } +} + +static void +e1000e_msix_clear(E1000ECore *core, uint32_t causes) +{ + if (causes & E1000_ICR_RXQ0) { + e1000e_msix_clear_one(core, E1000_ICR_RXQ0, + E1000_IVAR_RXQ0(core->mac[IVAR])); + } + + if (causes & E1000_ICR_RXQ1) { + e1000e_msix_clear_one(core, E1000_ICR_RXQ1, + E1000_IVAR_RXQ1(core->mac[IVAR])); + } + + if (causes & E1000_ICR_TXQ0) { + e1000e_msix_clear_one(core, E1000_ICR_TXQ0, + E1000_IVAR_TXQ0(core->mac[IVAR])); + } + + if (causes & E1000_ICR_TXQ1) { + e1000e_msix_clear_one(core, E1000_ICR_TXQ1, + E1000_IVAR_TXQ1(core->mac[IVAR])); + } + + if (causes & E1000_ICR_OTHER) { + e1000e_msix_clear_one(core, E1000_ICR_OTHER, + E1000_IVAR_OTHER(core->mac[IVAR])); + } +} + +static inline void +e1000e_fix_icr_asserted(E1000ECore *core) +{ + core->mac[ICR] &= ~E1000_ICR_ASSERTED; + if (core->mac[ICR]) { + core->mac[ICR] |= E1000_ICR_ASSERTED; + } + + trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); +} + +static void +e1000e_send_msi(E1000ECore *core, bool msix) +{ + uint32_t causes = core->mac[ICR] & core->mac[IMS] & ~E1000_ICR_ASSERTED; + + core->msi_causes_pending &= causes; + causes ^= core->msi_causes_pending; + if (causes == 0) { + return; + } + core->msi_causes_pending |= causes; + + if (msix) { + e1000e_msix_notify(core, causes); + } else { + if (!e1000e_itr_should_postpone(core)) { + trace_e1000e_irq_msi_notify(causes); + msi_notify(core->owner, 0); + } + } +} + +static void +e1000e_update_interrupt_state(E1000ECore *core) +{ + bool interrupts_pending; + bool is_msix = msix_enabled(core->owner); + + /* Set ICR[OTHER] for MSI-X */ + if (is_msix) { + if (core->mac[ICR] & E1000_ICR_OTHER_CAUSES) { + core->mac[ICR] |= E1000_ICR_OTHER; + trace_e1000e_irq_add_msi_other(core->mac[ICR]); + } + } + + e1000e_fix_icr_asserted(core); + + /* + * Make sure ICR and ICS registers have the same value. + * The spec says that the ICS register is write-only. However in practice, + * on real hardware ICS is readable, and for reads it has the same value as + * ICR (except that ICS does not have the clear on read behaviour of ICR). + * + * The VxWorks PRO/1000 driver uses this behaviour. + */ + core->mac[ICS] = core->mac[ICR]; + + interrupts_pending = (core->mac[IMS] & core->mac[ICR]) ? true : false; + if (!interrupts_pending) { + core->msi_causes_pending = 0; + } + + trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], + core->mac[ICR], core->mac[IMS]); + + if (is_msix || msi_enabled(core->owner)) { + if (interrupts_pending) { + e1000e_send_msi(core, is_msix); + } + } else { + if (interrupts_pending) { + if (!e1000e_itr_should_postpone(core)) { + e1000e_raise_legacy_irq(core); + } + } else { + e1000e_lower_legacy_irq(core); + } + } +} + +static void +e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val) +{ + trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]); + + val |= e1000e_intmgr_collect_delayed_causes(core); + core->mac[ICR] |= val; + + trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]); + + e1000e_update_interrupt_state(core); +} + +static inline void +e1000e_autoneg_timer(void *opaque) +{ + E1000ECore *core = opaque; + if (!qemu_get_queue(core->owner_nic)->link_down) { + e1000x_update_regs_on_autoneg_done(core->mac, core->phy[0]); + e1000e_start_recv(core); + + e1000e_update_flowctl_status(core); + /* signal link status change to the guest */ + e1000e_set_interrupt_cause(core, E1000_ICR_LSC); + } +} + +static inline uint16_t +e1000e_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr) +{ + uint16_t index = (addr & 0x1ffff) >> 2; + return index + (mac_reg_access[index] & 0xfffe); +} + +static const char e1000e_phy_regcap[E1000E_PHY_PAGES][0x20] = { + [0] = { + [PHY_CTRL] = PHY_ANYPAGE | PHY_RW, + [PHY_STATUS] = PHY_ANYPAGE | PHY_R, + [PHY_ID1] = PHY_ANYPAGE | PHY_R, + [PHY_ID2] = PHY_ANYPAGE | PHY_R, + [PHY_AUTONEG_ADV] = PHY_ANYPAGE | PHY_RW, + [PHY_LP_ABILITY] = PHY_ANYPAGE | PHY_R, + [PHY_AUTONEG_EXP] = PHY_ANYPAGE | PHY_R, + [PHY_NEXT_PAGE_TX] = PHY_ANYPAGE | PHY_RW, + [PHY_LP_NEXT_PAGE] = PHY_ANYPAGE | PHY_R, + [PHY_1000T_CTRL] = PHY_ANYPAGE | PHY_RW, + [PHY_1000T_STATUS] = PHY_ANYPAGE | PHY_R, + [PHY_EXT_STATUS] = PHY_ANYPAGE | PHY_R, + [PHY_PAGE] = PHY_ANYPAGE | PHY_RW, + + [PHY_COPPER_CTRL1] = PHY_RW, + [PHY_COPPER_STAT1] = PHY_R, + [PHY_COPPER_CTRL3] = PHY_RW, + [PHY_RX_ERR_CNTR] = PHY_R, + [PHY_OEM_BITS] = PHY_RW, + [PHY_BIAS_1] = PHY_RW, + [PHY_BIAS_2] = PHY_RW, + [PHY_COPPER_INT_ENABLE] = PHY_RW, + [PHY_COPPER_STAT2] = PHY_R, + [PHY_COPPER_CTRL2] = PHY_RW + }, + [2] = { + [PHY_MAC_CTRL1] = PHY_RW, + [PHY_MAC_INT_ENABLE] = PHY_RW, + [PHY_MAC_STAT] = PHY_R, + [PHY_MAC_CTRL2] = PHY_RW + }, + [3] = { + [PHY_LED_03_FUNC_CTRL1] = PHY_RW, + [PHY_LED_03_POL_CTRL] = PHY_RW, + [PHY_LED_TIMER_CTRL] = PHY_RW, + [PHY_LED_45_CTRL] = PHY_RW + }, + [5] = { + [PHY_1000T_SKEW] = PHY_R, + [PHY_1000T_SWAP] = PHY_R + }, + [6] = { + [PHY_CRC_COUNTERS] = PHY_R + } +}; + +static bool +e1000e_phy_reg_check_cap(E1000ECore *core, uint32_t addr, + char cap, uint8_t *page) +{ + *page = + (e1000e_phy_regcap[0][addr] & PHY_ANYPAGE) ? 0 + : core->phy[0][PHY_PAGE]; + + if (*page >= E1000E_PHY_PAGES) { + return false; + } + + return e1000e_phy_regcap[*page][addr] & cap; +} + +static void +e1000e_phy_reg_write(E1000ECore *core, uint8_t page, + uint32_t addr, uint16_t data) +{ + assert(page < E1000E_PHY_PAGES); + assert(addr < E1000E_PHY_PAGE_SIZE); + + if (e1000e_phyreg_writeops[page][addr]) { + e1000e_phyreg_writeops[page][addr](core, addr, data); + } else { + core->phy[page][addr] = data; + } +} + +static void +e1000e_set_mdic(E1000ECore *core, int index, uint32_t val) +{ + uint32_t data = val & E1000_MDIC_DATA_MASK; + uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + uint8_t page; + + if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */ + val = core->mac[MDIC] | E1000_MDIC_ERROR; + } else if (val & E1000_MDIC_OP_READ) { + if (!e1000e_phy_reg_check_cap(core, addr, PHY_R, &page)) { + trace_e1000e_core_mdic_read_unhandled(page, addr); + val |= E1000_MDIC_ERROR; + } else { + val = (val ^ data) | core->phy[page][addr]; + trace_e1000e_core_mdic_read(page, addr, val); + } + } else if (val & E1000_MDIC_OP_WRITE) { + if (!e1000e_phy_reg_check_cap(core, addr, PHY_W, &page)) { + trace_e1000e_core_mdic_write_unhandled(page, addr); + val |= E1000_MDIC_ERROR; + } else { + trace_e1000e_core_mdic_write(page, addr, data); + e1000e_phy_reg_write(core, page, addr, data); + } + } + core->mac[MDIC] = val | E1000_MDIC_READY; + + if (val & E1000_MDIC_INT_EN) { + e1000e_set_interrupt_cause(core, E1000_ICR_MDAC); + } +} + +static void +e1000e_set_rdt(E1000ECore *core, int index, uint32_t val) +{ + core->mac[index] = val & 0xffff; + trace_e1000e_rx_set_rdt(e1000e_mq_queue_idx(RDT0, index), val); + e1000e_start_recv(core); +} + +static void +e1000e_set_status(E1000ECore *core, int index, uint32_t val) +{ + if ((val & E1000_STATUS_PHYRA) == 0) { + core->mac[index] &= ~E1000_STATUS_PHYRA; + } +} + +static void +e1000e_set_ctrlext(E1000ECore *core, int index, uint32_t val) +{ + trace_e1000e_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK), + !!(val & E1000_CTRL_EXT_SPD_BYPS)); + + /* Zero self-clearing bits */ + val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST); + core->mac[CTRL_EXT] = val; +} + +static void +e1000e_set_pbaclr(E1000ECore *core, int index, uint32_t val) +{ + int i; + + core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK; + + if (!msix_enabled(core->owner)) { + return; + } + + for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { + if (core->mac[PBACLR] & BIT(i)) { + msix_clr_pending(core->owner, i); + } + } +} + +static void +e1000e_set_fcrth(E1000ECore *core, int index, uint32_t val) +{ + core->mac[FCRTH] = val & 0xFFF8; +} + +static void +e1000e_set_fcrtl(E1000ECore *core, int index, uint32_t val) +{ + core->mac[FCRTL] = val & 0x8000FFF8; +} + +static inline void +e1000e_set_16bit(E1000ECore *core, int index, uint32_t val) +{ + core->mac[index] = val & 0xffff; +} + +static void +e1000e_set_12bit(E1000ECore *core, int index, uint32_t val) +{ + core->mac[index] = val & 0xfff; +} + +static void +e1000e_set_vet(E1000ECore *core, int index, uint32_t val) +{ + core->mac[VET] = val & 0xffff; + trace_e1000e_vlan_vet(core->mac[VET]); +} + +static void +e1000e_set_dlen(E1000ECore *core, int index, uint32_t val) +{ + core->mac[index] = val & E1000_XDLEN_MASK; +} + +static void +e1000e_set_dbal(E1000ECore *core, int index, uint32_t val) +{ + core->mac[index] = val & E1000_XDBAL_MASK; +} + +static void +e1000e_set_tctl(E1000ECore *core, int index, uint32_t val) +{ + E1000E_TxRing txr; + core->mac[index] = val; + + if (core->mac[TARC0] & E1000_TARC_ENABLE) { + e1000e_tx_ring_init(core, &txr, 0); + e1000e_start_xmit(core, &txr); + } + + if (core->mac[TARC1] & E1000_TARC_ENABLE) { + e1000e_tx_ring_init(core, &txr, 1); + e1000e_start_xmit(core, &txr); + } +} + +static void +e1000e_set_tdt(E1000ECore *core, int index, uint32_t val) +{ + E1000E_TxRing txr; + int qidx = e1000e_mq_queue_idx(TDT, index); + uint32_t tarc_reg = (qidx == 0) ? TARC0 : TARC1; + + core->mac[index] = val & 0xffff; + + if (core->mac[tarc_reg] & E1000_TARC_ENABLE) { + e1000e_tx_ring_init(core, &txr, qidx); + e1000e_start_xmit(core, &txr); + } +} + +static void +e1000e_set_ics(E1000ECore *core, int index, uint32_t val) +{ + trace_e1000e_irq_write_ics(val); + e1000e_set_interrupt_cause(core, val); +} + +static void +e1000e_set_icr(E1000ECore *core, int index, uint32_t val) +{ + uint32_t icr = 0; + if ((core->mac[ICR] & E1000_ICR_ASSERTED) && + (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { + trace_e1000e_irq_icr_process_iame(); + e1000e_clear_ims_bits(core, core->mac[IAM]); + } + + icr = core->mac[ICR] & ~val; + /* Windows driver expects that the "receive overrun" bit and other + * ones to be cleared when the "Other" bit (#24) is cleared. + */ + icr = (val & E1000_ICR_OTHER) ? (icr & ~E1000_ICR_OTHER_CAUSES) : icr; + trace_e1000e_irq_icr_write(val, core->mac[ICR], icr); + core->mac[ICR] = icr; + e1000e_update_interrupt_state(core); +} + +static void +e1000e_set_imc(E1000ECore *core, int index, uint32_t val) +{ + trace_e1000e_irq_ims_clear_set_imc(val); + e1000e_clear_ims_bits(core, val); + e1000e_update_interrupt_state(core); +} + +static void +e1000e_set_ims(E1000ECore *core, int index, uint32_t val) +{ + static const uint32_t ims_ext_mask = + E1000_IMS_RXQ0 | E1000_IMS_RXQ1 | + E1000_IMS_TXQ0 | E1000_IMS_TXQ1 | + E1000_IMS_OTHER; + + static const uint32_t ims_valid_mask = + E1000_IMS_TXDW | E1000_IMS_TXQE | E1000_IMS_LSC | + E1000_IMS_RXDMT0 | E1000_IMS_RXO | E1000_IMS_RXT0 | + E1000_IMS_MDAC | E1000_IMS_TXD_LOW | E1000_IMS_SRPD | + E1000_IMS_ACK | E1000_IMS_MNG | E1000_IMS_RXQ0 | + E1000_IMS_RXQ1 | E1000_IMS_TXQ0 | E1000_IMS_TXQ1 | + E1000_IMS_OTHER; + + uint32_t valid_val = val & ims_valid_mask; + + trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val); + core->mac[IMS] |= valid_val; + + if ((valid_val & ims_ext_mask) && + (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PBA_CLR) && + msix_enabled(core->owner)) { + e1000e_msix_clear(core, valid_val); + } + + if ((valid_val == ims_valid_mask) && + (core->mac[CTRL_EXT] & E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA)) { + trace_e1000e_irq_fire_all_timers(val); + e1000e_intrmgr_fire_all_timers(core); + } + + e1000e_update_interrupt_state(core); +} + +static void +e1000e_set_rdtr(E1000ECore *core, int index, uint32_t val) +{ + e1000e_set_16bit(core, index, val); + + if ((val & E1000_RDTR_FPD) && (core->rdtr.running)) { + trace_e1000e_irq_rdtr_fpd_running(); + e1000e_intrmgr_fire_delayed_interrupts(core); + } else { + trace_e1000e_irq_rdtr_fpd_not_running(); + } +} + +static void +e1000e_set_tidv(E1000ECore *core, int index, uint32_t val) +{ + e1000e_set_16bit(core, index, val); + + if ((val & E1000_TIDV_FPD) && (core->tidv.running)) { + trace_e1000e_irq_tidv_fpd_running(); + e1000e_intrmgr_fire_delayed_interrupts(core); + } else { + trace_e1000e_irq_tidv_fpd_not_running(); + } +} + +static uint32_t +e1000e_mac_readreg(E1000ECore *core, int index) +{ + return core->mac[index]; +} + +static uint32_t +e1000e_mac_ics_read(E1000ECore *core, int index) +{ + trace_e1000e_irq_read_ics(core->mac[ICS]); + return core->mac[ICS]; +} + +static uint32_t +e1000e_mac_ims_read(E1000ECore *core, int index) +{ + trace_e1000e_irq_read_ims(core->mac[IMS]); + return core->mac[IMS]; +} + +#define E1000E_LOW_BITS_READ_FUNC(num) \ + static uint32_t \ + e1000e_mac_low##num##_read(E1000ECore *core, int index) \ + { \ + return core->mac[index] & (BIT(num) - 1); \ + } \ + +#define E1000E_LOW_BITS_READ(num) \ + e1000e_mac_low##num##_read + +E1000E_LOW_BITS_READ_FUNC(4); +E1000E_LOW_BITS_READ_FUNC(6); +E1000E_LOW_BITS_READ_FUNC(11); +E1000E_LOW_BITS_READ_FUNC(13); +E1000E_LOW_BITS_READ_FUNC(16); + +static uint32_t +e1000e_mac_swsm_read(E1000ECore *core, int index) +{ + uint32_t val = core->mac[SWSM]; + core->mac[SWSM] = val | 1; + return val; +} + +static uint32_t +e1000e_mac_itr_read(E1000ECore *core, int index) +{ + return core->itr_guest_value; +} + +static uint32_t +e1000e_mac_eitr_read(E1000ECore *core, int index) +{ + return core->eitr_guest_value[index - EITR]; +} + +static uint32_t +e1000e_mac_icr_read(E1000ECore *core, int index) +{ + uint32_t ret = core->mac[ICR]; + trace_e1000e_irq_icr_read_entry(ret); + + if (core->mac[IMS] == 0) { + trace_e1000e_irq_icr_clear_zero_ims(); + core->mac[ICR] = 0; + } + + if ((core->mac[ICR] & E1000_ICR_ASSERTED) && + (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { + trace_e1000e_irq_icr_clear_iame(); + core->mac[ICR] = 0; + trace_e1000e_irq_icr_process_iame(); + e1000e_clear_ims_bits(core, core->mac[IAM]); + } + + trace_e1000e_irq_icr_read_exit(core->mac[ICR]); + e1000e_update_interrupt_state(core); + return ret; +} + +static uint32_t +e1000e_mac_read_clr4(E1000ECore *core, int index) +{ + uint32_t ret = core->mac[index]; + + core->mac[index] = 0; + return ret; +} + +static uint32_t +e1000e_mac_read_clr8(E1000ECore *core, int index) +{ + uint32_t ret = core->mac[index]; + + core->mac[index] = 0; + core->mac[index - 1] = 0; + return ret; +} + +static uint32_t +e1000e_get_ctrl(E1000ECore *core, int index) +{ + uint32_t val = core->mac[CTRL]; + + trace_e1000e_link_read_params( + !!(val & E1000_CTRL_ASDE), + (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, + !!(val & E1000_CTRL_FRCSPD), + !!(val & E1000_CTRL_FRCDPX), + !!(val & E1000_CTRL_RFCE), + !!(val & E1000_CTRL_TFCE)); + + return val; +} + +static uint32_t +e1000e_get_status(E1000ECore *core, int index) +{ + uint32_t res = core->mac[STATUS]; + + if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) { + res |= E1000_STATUS_GIO_MASTER_ENABLE; + } + + if (core->mac[CTRL] & E1000_CTRL_FRCDPX) { + res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0; + } else { + res |= E1000_STATUS_FD; + } + + if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) || + (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) { + switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) { + case E1000_CTRL_SPD_10: + res |= E1000_STATUS_SPEED_10; + break; + case E1000_CTRL_SPD_100: + res |= E1000_STATUS_SPEED_100; + break; + case E1000_CTRL_SPD_1000: + default: + res |= E1000_STATUS_SPEED_1000; + break; + } + } else { + res |= E1000_STATUS_SPEED_1000; + } + + trace_e1000e_link_status( + !!(res & E1000_STATUS_LU), + !!(res & E1000_STATUS_FD), + (res & E1000_STATUS_SPEED_MASK) >> E1000_STATUS_SPEED_SHIFT, + (res & E1000_STATUS_ASDV) >> E1000_STATUS_ASDV_SHIFT); + + return res; +} + +static uint32_t +e1000e_get_tarc(E1000ECore *core, int index) +{ + return core->mac[index] & ((BIT(11) - 1) | + BIT(27) | + BIT(28) | + BIT(29) | + BIT(30)); +} + +static void +e1000e_mac_writereg(E1000ECore *core, int index, uint32_t val) +{ + core->mac[index] = val; +} + +static void +e1000e_mac_setmacaddr(E1000ECore *core, int index, uint32_t val) +{ + uint32_t macaddr[2]; + + core->mac[index] = val; + + macaddr[0] = cpu_to_le32(core->mac[RA]); + macaddr[1] = cpu_to_le32(core->mac[RA + 1]); + qemu_format_nic_info_str(qemu_get_queue(core->owner_nic), + (uint8_t *) macaddr); + + trace_e1000e_mac_set_sw(MAC_ARG(macaddr)); +} + +static void +e1000e_set_eecd(E1000ECore *core, int index, uint32_t val) +{ + static const uint32_t ro_bits = E1000_EECD_PRES | + E1000_EECD_AUTO_RD | + E1000_EECD_SIZE_EX_MASK; + + core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits); +} + +static void +e1000e_set_eerd(E1000ECore *core, int index, uint32_t val) +{ + uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; + uint32_t flags = 0; + uint32_t data = 0; + + if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) { + data = core->eeprom[addr]; + flags = E1000_EERW_DONE; + } + + core->mac[EERD] = flags | + (addr << E1000_EERW_ADDR_SHIFT) | + (data << E1000_EERW_DATA_SHIFT); +} + +static void +e1000e_set_eewr(E1000ECore *core, int index, uint32_t val) +{ + uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; + uint32_t data = (val >> E1000_EERW_DATA_SHIFT) & E1000_EERW_DATA_MASK; + uint32_t flags = 0; + + if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) { + core->eeprom[addr] = data; + flags = E1000_EERW_DONE; + } + + core->mac[EERD] = flags | + (addr << E1000_EERW_ADDR_SHIFT) | + (data << E1000_EERW_DATA_SHIFT); +} + +static void +e1000e_set_rxdctl(E1000ECore *core, int index, uint32_t val) +{ + core->mac[RXDCTL] = core->mac[RXDCTL1] = val; +} + +static void +e1000e_set_itr(E1000ECore *core, int index, uint32_t val) +{ + uint32_t interval = val & 0xffff; + + trace_e1000e_irq_itr_set(val); + + core->itr_guest_value = interval; + core->mac[index] = MAX(interval, E1000E_MIN_XITR); +} + +static void +e1000e_set_eitr(E1000ECore *core, int index, uint32_t val) +{ + uint32_t interval = val & 0xffff; + uint32_t eitr_num = index - EITR; + + trace_e1000e_irq_eitr_set(eitr_num, val); + + core->eitr_guest_value[eitr_num] = interval; + core->mac[index] = MAX(interval, E1000E_MIN_XITR); +} + +static void +e1000e_set_psrctl(E1000ECore *core, int index, uint32_t val) +{ + if (core->mac[RCTL] & E1000_RCTL_DTYP_MASK) { + + if ((val & E1000_PSRCTL_BSIZE0_MASK) == 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "e1000e: PSRCTL.BSIZE0 cannot be zero"); + return; + } + + if ((val & E1000_PSRCTL_BSIZE1_MASK) == 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "e1000e: PSRCTL.BSIZE1 cannot be zero"); + return; + } + } + + core->mac[PSRCTL] = val; +} + +static void +e1000e_update_rx_offloads(E1000ECore *core) +{ + int cso_state = e1000e_rx_l4_cso_enabled(core); + + trace_e1000e_rx_set_cso(cso_state); + + if (core->has_vnet) { + qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, + cso_state, 0, 0, 0, 0); + } +} + +static void +e1000e_set_rxcsum(E1000ECore *core, int index, uint32_t val) +{ + core->mac[RXCSUM] = val; + e1000e_update_rx_offloads(core); +} + +static void +e1000e_set_gcr(E1000ECore *core, int index, uint32_t val) +{ + uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS; + core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits; +} + +#define e1000e_getreg(x) [x] = e1000e_mac_readreg +typedef uint32_t (*readops)(E1000ECore *, int); +static const readops e1000e_macreg_readops[] = { + e1000e_getreg(PBA), + e1000e_getreg(WUFC), + e1000e_getreg(MANC), + e1000e_getreg(TOTL), + e1000e_getreg(RDT0), + e1000e_getreg(RDBAH0), + e1000e_getreg(TDBAL1), + e1000e_getreg(RDLEN0), + e1000e_getreg(RDH1), + e1000e_getreg(LATECOL), + e1000e_getreg(SEQEC), + e1000e_getreg(XONTXC), + e1000e_getreg(WUS), + e1000e_getreg(GORCL), + e1000e_getreg(MGTPRC), + e1000e_getreg(EERD), + e1000e_getreg(EIAC), + e1000e_getreg(PSRCTL), + e1000e_getreg(MANC2H), + e1000e_getreg(RXCSUM), + e1000e_getreg(GSCL_3), + e1000e_getreg(GSCN_2), + e1000e_getreg(RSRPD), + e1000e_getreg(RDBAL1), + e1000e_getreg(FCAH), + e1000e_getreg(FCRTH), + e1000e_getreg(FLOP), + e1000e_getreg(FLASHT), + e1000e_getreg(RXSTMPH), + e1000e_getreg(TXSTMPL), + e1000e_getreg(TIMADJL), + e1000e_getreg(TXDCTL), + e1000e_getreg(RDH0), + e1000e_getreg(TDT1), + e1000e_getreg(TNCRS), + e1000e_getreg(RJC), + e1000e_getreg(IAM), + e1000e_getreg(GSCL_2), + e1000e_getreg(RDBAH1), + e1000e_getreg(FLSWDATA), + e1000e_getreg(RXSATRH), + e1000e_getreg(TIPG), + e1000e_getreg(FLMNGCTL), + e1000e_getreg(FLMNGCNT), + e1000e_getreg(TSYNCTXCTL), + e1000e_getreg(EXTCNF_SIZE), + e1000e_getreg(EXTCNF_CTRL), + e1000e_getreg(EEMNGDATA), + e1000e_getreg(CTRL_EXT), + e1000e_getreg(SYSTIMH), + e1000e_getreg(EEMNGCTL), + e1000e_getreg(FLMNGDATA), + e1000e_getreg(TSYNCRXCTL), + e1000e_getreg(TDH), + e1000e_getreg(LEDCTL), + e1000e_getreg(TCTL), + e1000e_getreg(TDBAL), + e1000e_getreg(TDLEN), + e1000e_getreg(TDH1), + e1000e_getreg(RADV), + e1000e_getreg(ECOL), + e1000e_getreg(DC), + e1000e_getreg(RLEC), + e1000e_getreg(XOFFTXC), + e1000e_getreg(RFC), + e1000e_getreg(RNBC), + e1000e_getreg(MGTPTC), + e1000e_getreg(TIMINCA), + e1000e_getreg(RXCFGL), + e1000e_getreg(MFUTP01), + e1000e_getreg(FACTPS), + e1000e_getreg(GSCL_1), + e1000e_getreg(GSCN_0), + e1000e_getreg(GCR2), + e1000e_getreg(RDT1), + e1000e_getreg(PBACLR), + e1000e_getreg(FCTTV), + e1000e_getreg(EEWR), + e1000e_getreg(FLSWCTL), + e1000e_getreg(RXDCTL1), + e1000e_getreg(RXSATRL), + e1000e_getreg(SYSTIML), + e1000e_getreg(RXUDP), + e1000e_getreg(TORL), + e1000e_getreg(TDLEN1), + e1000e_getreg(MCC), + e1000e_getreg(WUC), + e1000e_getreg(EECD), + e1000e_getreg(MFUTP23), + e1000e_getreg(RAID), + e1000e_getreg(FCRTV), + e1000e_getreg(TXDCTL1), + e1000e_getreg(RCTL), + e1000e_getreg(TDT), + e1000e_getreg(MDIC), + e1000e_getreg(FCRUC), + e1000e_getreg(VET), + e1000e_getreg(RDBAL0), + e1000e_getreg(TDBAH1), + e1000e_getreg(RDTR), + e1000e_getreg(SCC), + e1000e_getreg(COLC), + e1000e_getreg(CEXTERR), + e1000e_getreg(XOFFRXC), + e1000e_getreg(IPAV), + e1000e_getreg(GOTCL), + e1000e_getreg(MGTPDC), + e1000e_getreg(GCR), + e1000e_getreg(IVAR), + e1000e_getreg(POEMB), + e1000e_getreg(MFVAL), + e1000e_getreg(FUNCTAG), + e1000e_getreg(GSCL_4), + e1000e_getreg(GSCN_3), + e1000e_getreg(MRQC), + e1000e_getreg(RDLEN1), + e1000e_getreg(FCT), + e1000e_getreg(FLA), + e1000e_getreg(FLOL), + e1000e_getreg(RXDCTL), + e1000e_getreg(RXSTMPL), + e1000e_getreg(TXSTMPH), + e1000e_getreg(TIMADJH), + e1000e_getreg(FCRTL), + e1000e_getreg(TDBAH), + e1000e_getreg(TADV), + e1000e_getreg(XONRXC), + e1000e_getreg(TSCTFC), + e1000e_getreg(RFCTL), + e1000e_getreg(GSCN_1), + e1000e_getreg(FCAL), + e1000e_getreg(FLSWCNT), + + [TOTH] = e1000e_mac_read_clr8, + [GOTCH] = e1000e_mac_read_clr8, + [PRC64] = e1000e_mac_read_clr4, + [PRC255] = e1000e_mac_read_clr4, + [PRC1023] = e1000e_mac_read_clr4, + [PTC64] = e1000e_mac_read_clr4, + [PTC255] = e1000e_mac_read_clr4, + [PTC1023] = e1000e_mac_read_clr4, + [GPRC] = e1000e_mac_read_clr4, + [TPT] = e1000e_mac_read_clr4, + [RUC] = e1000e_mac_read_clr4, + [BPRC] = e1000e_mac_read_clr4, + [MPTC] = e1000e_mac_read_clr4, + [IAC] = e1000e_mac_read_clr4, + [ICR] = e1000e_mac_icr_read, + [RDFH] = E1000E_LOW_BITS_READ(13), + [RDFHS] = E1000E_LOW_BITS_READ(13), + [RDFPC] = E1000E_LOW_BITS_READ(13), + [TDFH] = E1000E_LOW_BITS_READ(13), + [TDFHS] = E1000E_LOW_BITS_READ(13), + [STATUS] = e1000e_get_status, + [TARC0] = e1000e_get_tarc, + [PBS] = E1000E_LOW_BITS_READ(6), + [ICS] = e1000e_mac_ics_read, + [AIT] = E1000E_LOW_BITS_READ(16), + [TORH] = e1000e_mac_read_clr8, + [GORCH] = e1000e_mac_read_clr8, + [PRC127] = e1000e_mac_read_clr4, + [PRC511] = e1000e_mac_read_clr4, + [PRC1522] = e1000e_mac_read_clr4, + [PTC127] = e1000e_mac_read_clr4, + [PTC511] = e1000e_mac_read_clr4, + [PTC1522] = e1000e_mac_read_clr4, + [GPTC] = e1000e_mac_read_clr4, + [TPR] = e1000e_mac_read_clr4, + [ROC] = e1000e_mac_read_clr4, + [MPRC] = e1000e_mac_read_clr4, + [BPTC] = e1000e_mac_read_clr4, + [TSCTC] = e1000e_mac_read_clr4, + [ITR] = e1000e_mac_itr_read, + [RDFT] = E1000E_LOW_BITS_READ(13), + [RDFTS] = E1000E_LOW_BITS_READ(13), + [TDFPC] = E1000E_LOW_BITS_READ(13), + [TDFT] = E1000E_LOW_BITS_READ(13), + [TDFTS] = E1000E_LOW_BITS_READ(13), + [CTRL] = e1000e_get_ctrl, + [TARC1] = e1000e_get_tarc, + [SWSM] = e1000e_mac_swsm_read, + [IMS] = e1000e_mac_ims_read, + + [CRCERRS ... MPC] = e1000e_mac_readreg, + [IP6AT ... IP6AT + 3] = e1000e_mac_readreg, + [IP4AT ... IP4AT + 6] = e1000e_mac_readreg, + [RA ... RA + 31] = e1000e_mac_readreg, + [WUPM ... WUPM + 31] = e1000e_mac_readreg, + [MTA ... MTA + 127] = e1000e_mac_readreg, + [VFTA ... VFTA + 127] = e1000e_mac_readreg, + [FFMT ... FFMT + 254] = E1000E_LOW_BITS_READ(4), + [FFVT ... FFVT + 254] = e1000e_mac_readreg, + [MDEF ... MDEF + 7] = e1000e_mac_readreg, + [FFLT ... FFLT + 10] = E1000E_LOW_BITS_READ(11), + [FTFT ... FTFT + 254] = e1000e_mac_readreg, + [PBM ... PBM + 10239] = e1000e_mac_readreg, + [RETA ... RETA + 31] = e1000e_mac_readreg, + [RSSRK ... RSSRK + 31] = e1000e_mac_readreg, + [MAVTV0 ... MAVTV3] = e1000e_mac_readreg, + [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = e1000e_mac_eitr_read +}; +enum { E1000E_NREADOPS = ARRAY_SIZE(e1000e_macreg_readops) }; + +#define e1000e_putreg(x) [x] = e1000e_mac_writereg +typedef void (*writeops)(E1000ECore *, int, uint32_t); +static const writeops e1000e_macreg_writeops[] = { + e1000e_putreg(PBA), + e1000e_putreg(SWSM), + e1000e_putreg(WUFC), + e1000e_putreg(RDBAH1), + e1000e_putreg(TDBAH), + e1000e_putreg(TXDCTL), + e1000e_putreg(RDBAH0), + e1000e_putreg(LEDCTL), + e1000e_putreg(FCAL), + e1000e_putreg(FCRUC), + e1000e_putreg(AIT), + e1000e_putreg(TDFH), + e1000e_putreg(TDFT), + e1000e_putreg(TDFHS), + e1000e_putreg(TDFTS), + e1000e_putreg(TDFPC), + e1000e_putreg(WUC), + e1000e_putreg(WUS), + e1000e_putreg(RDFH), + e1000e_putreg(RDFT), + e1000e_putreg(RDFHS), + e1000e_putreg(RDFTS), + e1000e_putreg(RDFPC), + e1000e_putreg(IPAV), + e1000e_putreg(TDBAH1), + e1000e_putreg(TIMINCA), + e1000e_putreg(IAM), + e1000e_putreg(EIAC), + e1000e_putreg(IVAR), + e1000e_putreg(TARC0), + e1000e_putreg(TARC1), + e1000e_putreg(FLSWDATA), + e1000e_putreg(POEMB), + e1000e_putreg(PBS), + e1000e_putreg(MFUTP01), + e1000e_putreg(MFUTP23), + e1000e_putreg(MANC), + e1000e_putreg(MANC2H), + e1000e_putreg(MFVAL), + e1000e_putreg(EXTCNF_CTRL), + e1000e_putreg(FACTPS), + e1000e_putreg(FUNCTAG), + e1000e_putreg(GSCL_1), + e1000e_putreg(GSCL_2), + e1000e_putreg(GSCL_3), + e1000e_putreg(GSCL_4), + e1000e_putreg(GSCN_0), + e1000e_putreg(GSCN_1), + e1000e_putreg(GSCN_2), + e1000e_putreg(GSCN_3), + e1000e_putreg(GCR2), + e1000e_putreg(MRQC), + e1000e_putreg(FLOP), + e1000e_putreg(FLOL), + e1000e_putreg(FLSWCTL), + e1000e_putreg(FLSWCNT), + e1000e_putreg(FLA), + e1000e_putreg(RXDCTL1), + e1000e_putreg(TXDCTL1), + e1000e_putreg(TIPG), + e1000e_putreg(RXSTMPH), + e1000e_putreg(RXSTMPL), + e1000e_putreg(RXSATRL), + e1000e_putreg(RXSATRH), + e1000e_putreg(TXSTMPL), + e1000e_putreg(TXSTMPH), + e1000e_putreg(SYSTIML), + e1000e_putreg(SYSTIMH), + e1000e_putreg(TIMADJL), + e1000e_putreg(TIMADJH), + e1000e_putreg(RXUDP), + e1000e_putreg(RXCFGL), + e1000e_putreg(TSYNCRXCTL), + e1000e_putreg(TSYNCTXCTL), + e1000e_putreg(EXTCNF_SIZE), + e1000e_putreg(EEMNGCTL), + e1000e_putreg(RA), + + [TDH1] = e1000e_set_16bit, + [TDT1] = e1000e_set_tdt, + [TCTL] = e1000e_set_tctl, + [TDT] = e1000e_set_tdt, + [MDIC] = e1000e_set_mdic, + [ICS] = e1000e_set_ics, + [TDH] = e1000e_set_16bit, + [RDH0] = e1000e_set_16bit, + [RDT0] = e1000e_set_rdt, + [IMC] = e1000e_set_imc, + [IMS] = e1000e_set_ims, + [ICR] = e1000e_set_icr, + [EECD] = e1000e_set_eecd, + [RCTL] = e1000e_set_rx_control, + [CTRL] = e1000e_set_ctrl, + [RDTR] = e1000e_set_rdtr, + [RADV] = e1000e_set_16bit, + [TADV] = e1000e_set_16bit, + [ITR] = e1000e_set_itr, + [EERD] = e1000e_set_eerd, + [GCR] = e1000e_set_gcr, + [PSRCTL] = e1000e_set_psrctl, + [RXCSUM] = e1000e_set_rxcsum, + [RAID] = e1000e_set_16bit, + [RSRPD] = e1000e_set_12bit, + [TIDV] = e1000e_set_tidv, + [TDLEN1] = e1000e_set_dlen, + [TDLEN] = e1000e_set_dlen, + [RDLEN0] = e1000e_set_dlen, + [RDLEN1] = e1000e_set_dlen, + [TDBAL] = e1000e_set_dbal, + [TDBAL1] = e1000e_set_dbal, + [RDBAL0] = e1000e_set_dbal, + [RDBAL1] = e1000e_set_dbal, + [RDH1] = e1000e_set_16bit, + [RDT1] = e1000e_set_rdt, + [STATUS] = e1000e_set_status, + [PBACLR] = e1000e_set_pbaclr, + [CTRL_EXT] = e1000e_set_ctrlext, + [FCAH] = e1000e_set_16bit, + [FCT] = e1000e_set_16bit, + [FCTTV] = e1000e_set_16bit, + [FCRTV] = e1000e_set_16bit, + [FCRTH] = e1000e_set_fcrth, + [FCRTL] = e1000e_set_fcrtl, + [VET] = e1000e_set_vet, + [RXDCTL] = e1000e_set_rxdctl, + [FLASHT] = e1000e_set_16bit, + [EEWR] = e1000e_set_eewr, + [CTRL_DUP] = e1000e_set_ctrl, + [RFCTL] = e1000e_set_rfctl, + [RA + 1] = e1000e_mac_setmacaddr, + + [IP6AT ... IP6AT + 3] = e1000e_mac_writereg, + [IP4AT ... IP4AT + 6] = e1000e_mac_writereg, + [RA + 2 ... RA + 31] = e1000e_mac_writereg, + [WUPM ... WUPM + 31] = e1000e_mac_writereg, + [MTA ... MTA + 127] = e1000e_mac_writereg, + [VFTA ... VFTA + 127] = e1000e_mac_writereg, + [FFMT ... FFMT + 254] = e1000e_mac_writereg, + [FFVT ... FFVT + 254] = e1000e_mac_writereg, + [PBM ... PBM + 10239] = e1000e_mac_writereg, + [MDEF ... MDEF + 7] = e1000e_mac_writereg, + [FFLT ... FFLT + 10] = e1000e_mac_writereg, + [FTFT ... FTFT + 254] = e1000e_mac_writereg, + [RETA ... RETA + 31] = e1000e_mac_writereg, + [RSSRK ... RSSRK + 31] = e1000e_mac_writereg, + [MAVTV0 ... MAVTV3] = e1000e_mac_writereg, + [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = e1000e_set_eitr +}; +enum { E1000E_NWRITEOPS = ARRAY_SIZE(e1000e_macreg_writeops) }; + +enum { MAC_ACCESS_PARTIAL = 1 }; + +/* The array below combines alias offsets of the index values for the + * MAC registers that have aliases, with the indication of not fully + * implemented registers (lowest bit). This combination is possible + * because all of the offsets are even. */ +static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = { + /* Alias index offsets */ + [FCRTL_A] = 0x07fe, [FCRTH_A] = 0x0802, + [RDH0_A] = 0x09bc, [RDT0_A] = 0x09bc, [RDTR_A] = 0x09c6, + [RDFH_A] = 0xe904, [RDFT_A] = 0xe904, + [TDH_A] = 0x0cf8, [TDT_A] = 0x0cf8, [TIDV_A] = 0x0cf8, + [TDFH_A] = 0xed00, [TDFT_A] = 0xed00, + [RA_A ... RA_A + 31] = 0x14f0, + [VFTA_A ... VFTA_A + 127] = 0x1400, + [RDBAL0_A ... RDLEN0_A] = 0x09bc, + [TDBAL_A ... TDLEN_A] = 0x0cf8, + /* Access options */ + [RDFH] = MAC_ACCESS_PARTIAL, [RDFT] = MAC_ACCESS_PARTIAL, + [RDFHS] = MAC_ACCESS_PARTIAL, [RDFTS] = MAC_ACCESS_PARTIAL, + [RDFPC] = MAC_ACCESS_PARTIAL, + [TDFH] = MAC_ACCESS_PARTIAL, [TDFT] = MAC_ACCESS_PARTIAL, + [TDFHS] = MAC_ACCESS_PARTIAL, [TDFTS] = MAC_ACCESS_PARTIAL, + [TDFPC] = MAC_ACCESS_PARTIAL, [EECD] = MAC_ACCESS_PARTIAL, + [PBM] = MAC_ACCESS_PARTIAL, [FLA] = MAC_ACCESS_PARTIAL, + [FCAL] = MAC_ACCESS_PARTIAL, [FCAH] = MAC_ACCESS_PARTIAL, + [FCT] = MAC_ACCESS_PARTIAL, [FCTTV] = MAC_ACCESS_PARTIAL, + [FCRTV] = MAC_ACCESS_PARTIAL, [FCRTL] = MAC_ACCESS_PARTIAL, + [FCRTH] = MAC_ACCESS_PARTIAL, [TXDCTL] = MAC_ACCESS_PARTIAL, + [TXDCTL1] = MAC_ACCESS_PARTIAL, + [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL +}; + +void +e1000e_core_write(E1000ECore *core, hwaddr addr, uint64_t val, unsigned size) +{ + uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr); + + if (index < E1000E_NWRITEOPS && e1000e_macreg_writeops[index]) { + if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { + trace_e1000e_wrn_regs_write_trivial(index << 2); + } + trace_e1000e_core_write(index << 2, size, val); + e1000e_macreg_writeops[index](core, index, val); + } else if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) { + trace_e1000e_wrn_regs_write_ro(index << 2, size, val); + } else { + trace_e1000e_wrn_regs_write_unknown(index << 2, size, val); + } +} + +uint64_t +e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size) +{ + uint64_t val; + uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr); + + if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) { + if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { + trace_e1000e_wrn_regs_read_trivial(index << 2); + } + val = e1000e_macreg_readops[index](core, index); + trace_e1000e_core_read(index << 2, size, val); + return val; + } else { + trace_e1000e_wrn_regs_read_unknown(index << 2, size); + } + return 0; +} + +static inline void +e1000e_autoneg_pause(E1000ECore *core) +{ + timer_del(core->autoneg_timer); +} + +static void +e1000e_autoneg_resume(E1000ECore *core) +{ + if (e1000e_have_autoneg(core) && + !(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { + qemu_get_queue(core->owner_nic)->link_down = false; + timer_mod(core->autoneg_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500); + } +} + +static void +e1000e_vm_state_change(void *opaque, bool running, RunState state) +{ + E1000ECore *core = opaque; + + if (running) { + trace_e1000e_vm_state_running(); + e1000e_intrmgr_resume(core); + e1000e_autoneg_resume(core); + } else { + trace_e1000e_vm_state_stopped(); + e1000e_autoneg_pause(core); + e1000e_intrmgr_pause(core); + } +} + +void +e1000e_core_pci_realize(E1000ECore *core, + const uint16_t *eeprom_templ, + uint32_t eeprom_size, + const uint8_t *macaddr) +{ + int i; + + core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, + e1000e_autoneg_timer, core); + e1000e_intrmgr_pci_realize(core); + + core->vmstate = + qemu_add_vm_change_state_handler(e1000e_vm_state_change, core); + + for (i = 0; i < E1000E_NUM_QUEUES; i++) { + net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, + E1000E_MAX_TX_FRAGS, core->has_vnet); + } + + net_rx_pkt_init(&core->rx_pkt, core->has_vnet); + + e1000x_core_prepare_eeprom(core->eeprom, + eeprom_templ, + eeprom_size, + PCI_DEVICE_GET_CLASS(core->owner)->device_id, + macaddr); + e1000e_update_rx_offloads(core); +} + +void +e1000e_core_pci_uninit(E1000ECore *core) +{ + int i; + + timer_free(core->autoneg_timer); + + e1000e_intrmgr_pci_unint(core); + + qemu_del_vm_change_state_handler(core->vmstate); + + for (i = 0; i < E1000E_NUM_QUEUES; i++) { + net_tx_pkt_reset(core->tx[i].tx_pkt); + net_tx_pkt_uninit(core->tx[i].tx_pkt); + } + + net_rx_pkt_uninit(core->rx_pkt); +} + +static const uint16_t +e1000e_phy_reg_init[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE] = { + [0] = { + [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB | + MII_CR_FULL_DUPLEX | + MII_CR_AUTO_NEG_EN, + + [PHY_STATUS] = MII_SR_EXTENDED_CAPS | + MII_SR_LINK_STATUS | + MII_SR_AUTONEG_CAPS | + MII_SR_PREAMBLE_SUPPRESS | + MII_SR_EXTENDED_STATUS | + MII_SR_10T_HD_CAPS | + MII_SR_10T_FD_CAPS | + MII_SR_100X_HD_CAPS | + MII_SR_100X_FD_CAPS, + + [PHY_ID1] = 0x141, + [PHY_ID2] = E1000_PHY_ID2_82574x, + [PHY_AUTONEG_ADV] = 0xde1, + [PHY_LP_ABILITY] = 0x7e0, + [PHY_AUTONEG_EXP] = BIT(2), + [PHY_NEXT_PAGE_TX] = BIT(0) | BIT(13), + [PHY_1000T_CTRL] = BIT(8) | BIT(9) | BIT(10) | BIT(11), + [PHY_1000T_STATUS] = 0x3c00, + [PHY_EXT_STATUS] = BIT(12) | BIT(13), + + [PHY_COPPER_CTRL1] = BIT(5) | BIT(6) | BIT(8) | BIT(9) | + BIT(12) | BIT(13), + [PHY_COPPER_STAT1] = BIT(3) | BIT(10) | BIT(11) | BIT(13) | BIT(15) + }, + [2] = { + [PHY_MAC_CTRL1] = BIT(3) | BIT(7), + [PHY_MAC_CTRL2] = BIT(1) | BIT(2) | BIT(6) | BIT(12) + }, + [3] = { + [PHY_LED_TIMER_CTRL] = BIT(0) | BIT(2) | BIT(14) + } +}; + +static const uint32_t e1000e_mac_reg_init[] = { + [PBA] = 0x00140014, + [LEDCTL] = BIT(1) | BIT(8) | BIT(9) | BIT(15) | BIT(17) | BIT(18), + [EXTCNF_CTRL] = BIT(3), + [EEMNGCTL] = BIT(31), + [FLASHT] = 0x2, + [FLSWCTL] = BIT(30) | BIT(31), + [FLOL] = BIT(0), + [RXDCTL] = BIT(16), + [RXDCTL1] = BIT(16), + [TIPG] = 0x8 | (0x8 << 10) | (0x6 << 20), + [RXCFGL] = 0x88F7, + [RXUDP] = 0x319, + [CTRL] = E1000_CTRL_FD | E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 | + E1000_CTRL_SPD_1000 | E1000_CTRL_SLU | + E1000_CTRL_ADVD3WUC, + [STATUS] = E1000_STATUS_ASDV_1000 | E1000_STATUS_LU, + [PSRCTL] = (2 << E1000_PSRCTL_BSIZE0_SHIFT) | + (4 << E1000_PSRCTL_BSIZE1_SHIFT) | + (4 << E1000_PSRCTL_BSIZE2_SHIFT), + [TARC0] = 0x3 | E1000_TARC_ENABLE, + [TARC1] = 0x3 | E1000_TARC_ENABLE, + [EECD] = E1000_EECD_AUTO_RD | E1000_EECD_PRES, + [EERD] = E1000_EERW_DONE, + [EEWR] = E1000_EERW_DONE, + [GCR] = E1000_L0S_ADJUST | + E1000_L1_ENTRY_LATENCY_MSB | + E1000_L1_ENTRY_LATENCY_LSB, + [TDFH] = 0x600, + [TDFT] = 0x600, + [TDFHS] = 0x600, + [TDFTS] = 0x600, + [POEMB] = 0x30D, + [PBS] = 0x028, + [MANC] = E1000_MANC_DIS_IP_CHK_ARP, + [FACTPS] = E1000_FACTPS_LAN0_ON | 0x20000000, + [SWSM] = 1, + [RXCSUM] = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD, + [ITR] = E1000E_MIN_XITR, + [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = E1000E_MIN_XITR, +}; + +void +e1000e_core_reset(E1000ECore *core) +{ + int i; + + timer_del(core->autoneg_timer); + + e1000e_intrmgr_reset(core); + + memset(core->phy, 0, sizeof core->phy); + memmove(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init); + memset(core->mac, 0, sizeof core->mac); + memmove(core->mac, e1000e_mac_reg_init, sizeof e1000e_mac_reg_init); + + core->rxbuf_min_shift = 1 + E1000_RING_DESC_LEN_SHIFT; + + if (qemu_get_queue(core->owner_nic)->link_down) { + e1000e_link_down(core); + } + + e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); + + for (i = 0; i < ARRAY_SIZE(core->tx); i++) { + net_tx_pkt_reset(core->tx[i].tx_pkt); + memset(&core->tx[i].props, 0, sizeof(core->tx[i].props)); + core->tx[i].skip_cp = false; + } +} + +void e1000e_core_pre_save(E1000ECore *core) +{ + int i; + NetClientState *nc = qemu_get_queue(core->owner_nic); + + /* + * If link is down and auto-negotiation is supported and ongoing, + * complete auto-negotiation immediately. This allows us to look + * at MII_SR_AUTONEG_COMPLETE to infer link status on load. + */ + if (nc->link_down && e1000e_have_autoneg(core)) { + core->phy[0][PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE; + e1000e_update_flowctl_status(core); + } + + for (i = 0; i < ARRAY_SIZE(core->tx); i++) { + if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) { + core->tx[i].skip_cp = true; + } + } +} + +int +e1000e_core_post_load(E1000ECore *core) +{ + NetClientState *nc = qemu_get_queue(core->owner_nic); + + /* nc.link_down can't be migrated, so infer link_down according + * to link status bit in core.mac[STATUS]. + */ + nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0; + + return 0; +} diff --git a/hw/net/e1000e_core.h b/hw/net/e1000e_core.h new file mode 100644 index 000000000..4ddb4d2c3 --- /dev/null +++ b/hw/net/e1000e_core.h @@ -0,0 +1,158 @@ +/* +* Core code for QEMU e1000e emulation +* +* Software developer's manuals: +* http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf +* +* Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) +* Developed by Daynix Computing LTD (http://www.daynix.com) +* +* Authors: +* Dmitry Fleytman +* Leonid Bloch +* Yan Vugenfirer +* +* Based on work done by: +* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. +* Copyright (c) 2008 Qumranet +* Based on work done by: +* Copyright (c) 2007 Dan Aloni +* Copyright (c) 2004 Antony T Curtis +* +* This library is free software; you can redistribute it and/or +* modify it under the terms of the GNU Lesser General Public +* License as published by the Free Software Foundation; either +* version 2.1 of the License, or (at your option) any later version. +* +* This library is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* Lesser General Public License for more details. +* +* You should have received a copy of the GNU Lesser General Public +* License along with this library; if not, see . +*/ + +#ifndef HW_NET_E1000E_CORE_H +#define HW_NET_E1000E_CORE_H + +#define E1000E_PHY_PAGE_SIZE (0x20) +#define E1000E_PHY_PAGES (0x07) +#define E1000E_MAC_SIZE (0x8000) +#define E1000E_EEPROM_SIZE (64) +#define E1000E_MSIX_VEC_NUM (5) +#define E1000E_NUM_QUEUES (2) + +typedef struct E1000Core E1000ECore; + +enum { PHY_R = BIT(0), + PHY_W = BIT(1), + PHY_RW = PHY_R | PHY_W, + PHY_ANYPAGE = BIT(2) }; + +typedef struct E1000IntrDelayTimer_st { + QEMUTimer *timer; + bool running; + uint32_t delay_reg; + uint32_t delay_resolution_ns; + E1000ECore *core; +} E1000IntrDelayTimer; + +struct E1000Core { + uint32_t mac[E1000E_MAC_SIZE]; + uint16_t phy[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE]; + uint16_t eeprom[E1000E_EEPROM_SIZE]; + + uint32_t rxbuf_sizes[E1000_PSRCTL_BUFFS_PER_DESC]; + uint32_t rx_desc_buf_size; + uint32_t rxbuf_min_shift; + uint8_t rx_desc_len; + + QEMUTimer *autoneg_timer; + + struct e1000e_tx { + e1000x_txd_props props; + + bool skip_cp; + unsigned char sum_needed; + bool cptse; + struct NetTxPkt *tx_pkt; + } tx[E1000E_NUM_QUEUES]; + + struct NetRxPkt *rx_pkt; + + bool has_vnet; + int max_queue_num; + + /* Interrupt moderation management */ + uint32_t delayed_causes; + + E1000IntrDelayTimer radv; + E1000IntrDelayTimer rdtr; + E1000IntrDelayTimer raid; + + E1000IntrDelayTimer tadv; + E1000IntrDelayTimer tidv; + + E1000IntrDelayTimer itr; + bool itr_intr_pending; + + E1000IntrDelayTimer eitr[E1000E_MSIX_VEC_NUM]; + bool eitr_intr_pending[E1000E_MSIX_VEC_NUM]; + + VMChangeStateEntry *vmstate; + + uint32_t itr_guest_value; + uint32_t eitr_guest_value[E1000E_MSIX_VEC_NUM]; + + uint16_t vet; + + uint8_t permanent_mac[ETH_ALEN]; + + NICState *owner_nic; + PCIDevice *owner; + void (*owner_start_recv)(PCIDevice *d); + + uint32_t msi_causes_pending; +}; + +void +e1000e_core_write(E1000ECore *core, hwaddr addr, uint64_t val, unsigned size); + +uint64_t +e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size); + +void +e1000e_core_pci_realize(E1000ECore *regs, + const uint16_t *eeprom_templ, + uint32_t eeprom_size, + const uint8_t *macaddr); + +void +e1000e_core_reset(E1000ECore *core); + +void +e1000e_core_pre_save(E1000ECore *core); + +int +e1000e_core_post_load(E1000ECore *core); + +void +e1000e_core_set_link_status(E1000ECore *core); + +void +e1000e_core_pci_uninit(E1000ECore *core); + +bool +e1000e_can_receive(E1000ECore *core); + +ssize_t +e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size); + +ssize_t +e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt); + +void +e1000e_start_recv(E1000ECore *core); + +#endif diff --git a/hw/net/e1000x_common.c b/hw/net/e1000x_common.c new file mode 100644 index 000000000..a8d93870b --- /dev/null +++ b/hw/net/e1000x_common.c @@ -0,0 +1,267 @@ +/* +* QEMU e1000(e) emulation - shared code +* +* Copyright (c) 2008 Qumranet +* +* Based on work done by: +* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. +* Copyright (c) 2007 Dan Aloni +* Copyright (c) 2004 Antony T Curtis +* +* This library is free software; you can redistribute it and/or +* modify it under the terms of the GNU Lesser General Public +* License as published by the Free Software Foundation; either +* version 2.1 of the License, or (at your option) any later version. +* +* This library is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* Lesser General Public License for more details. +* +* You should have received a copy of the GNU Lesser General Public +* License along with this library; if not, see . +*/ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/pci/pci.h" +#include "net/net.h" + +#include "e1000x_common.h" + +#include "trace.h" + +bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac) +{ + bool link_up = mac[STATUS] & E1000_STATUS_LU; + bool rx_enabled = mac[RCTL] & E1000_RCTL_EN; + bool pci_master = d->config[PCI_COMMAND] & PCI_COMMAND_MASTER; + + if (!link_up || !rx_enabled || !pci_master) { + trace_e1000x_rx_can_recv_disabled(link_up, rx_enabled, pci_master); + return false; + } + + return true; +} + +bool e1000x_is_vlan_packet(const uint8_t *buf, uint16_t vet) +{ + uint16_t eth_proto = lduw_be_p(buf + 12); + bool res = (eth_proto == vet); + + trace_e1000x_vlan_is_vlan_pkt(res, eth_proto, vet); + + return res; +} + +bool e1000x_rx_group_filter(uint32_t *mac, const uint8_t *buf) +{ + static const int mta_shift[] = { 4, 3, 2, 0 }; + uint32_t f, ra[2], *rp, rctl = mac[RCTL]; + + for (rp = mac + RA; rp < mac + RA + 32; rp += 2) { + if (!(rp[1] & E1000_RAH_AV)) { + continue; + } + ra[0] = cpu_to_le32(rp[0]); + ra[1] = cpu_to_le32(rp[1]); + if (!memcmp(buf, (uint8_t *)ra, 6)) { + trace_e1000x_rx_flt_ucast_match((int)(rp - mac - RA) / 2, + MAC_ARG(buf)); + return true; + } + } + trace_e1000x_rx_flt_ucast_mismatch(MAC_ARG(buf)); + + f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3]; + f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff; + if (mac[MTA + (f >> 5)] & (1 << (f & 0x1f))) { + e1000x_inc_reg_if_not_full(mac, MPRC); + return true; + } + + trace_e1000x_rx_flt_inexact_mismatch(MAC_ARG(buf), + (rctl >> E1000_RCTL_MO_SHIFT) & 3, + f >> 5, + mac[MTA + (f >> 5)]); + + return false; +} + +bool e1000x_hw_rx_enabled(uint32_t *mac) +{ + if (!(mac[STATUS] & E1000_STATUS_LU)) { + trace_e1000x_rx_link_down(mac[STATUS]); + return false; + } + + if (!(mac[RCTL] & E1000_RCTL_EN)) { + trace_e1000x_rx_disabled(mac[RCTL]); + return false; + } + + return true; +} + +bool e1000x_is_oversized(uint32_t *mac, size_t size) +{ + /* this is the size past which hardware will + drop packets when setting LPE=0 */ + static const int maximum_ethernet_vlan_size = 1522; + /* this is the size past which hardware will + drop packets when setting LPE=1 */ + static const int maximum_ethernet_lpe_size = 16 * KiB; + + if ((size > maximum_ethernet_lpe_size || + (size > maximum_ethernet_vlan_size + && !(mac[RCTL] & E1000_RCTL_LPE))) + && !(mac[RCTL] & E1000_RCTL_SBP)) { + e1000x_inc_reg_if_not_full(mac, ROC); + trace_e1000x_rx_oversized(size); + return true; + } + + return false; +} + +void e1000x_restart_autoneg(uint32_t *mac, uint16_t *phy, QEMUTimer *timer) +{ + e1000x_update_regs_on_link_down(mac, phy); + trace_e1000x_link_negotiation_start(); + timer_mod(timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500); +} + +void e1000x_reset_mac_addr(NICState *nic, uint32_t *mac_regs, + uint8_t *mac_addr) +{ + int i; + + mac_regs[RA] = 0; + mac_regs[RA + 1] = E1000_RAH_AV; + for (i = 0; i < 4; i++) { + mac_regs[RA] |= mac_addr[i] << (8 * i); + mac_regs[RA + 1] |= + (i < 2) ? mac_addr[i + 4] << (8 * i) : 0; + } + + qemu_format_nic_info_str(qemu_get_queue(nic), mac_addr); + trace_e1000x_mac_indicate(MAC_ARG(mac_addr)); +} + +void e1000x_update_regs_on_autoneg_done(uint32_t *mac, uint16_t *phy) +{ + e1000x_update_regs_on_link_up(mac, phy); + phy[PHY_LP_ABILITY] |= MII_LPAR_LPACK; + phy[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE; + trace_e1000x_link_negotiation_done(); +} + +void +e1000x_core_prepare_eeprom(uint16_t *eeprom, + const uint16_t *templ, + uint32_t templ_size, + uint16_t dev_id, + const uint8_t *macaddr) +{ + uint16_t checksum = 0; + int i; + + memmove(eeprom, templ, templ_size); + + for (i = 0; i < 3; i++) { + eeprom[i] = (macaddr[2 * i + 1] << 8) | macaddr[2 * i]; + } + + eeprom[11] = eeprom[13] = dev_id; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + checksum += eeprom[i]; + } + + checksum = (uint16_t) EEPROM_SUM - checksum; + + eeprom[EEPROM_CHECKSUM_REG] = checksum; +} + +uint32_t +e1000x_rxbufsize(uint32_t rctl) +{ + rctl &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 | + E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 | + E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256; + switch (rctl) { + case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384: + return 16384; + case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192: + return 8192; + case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096: + return 4096; + case E1000_RCTL_SZ_1024: + return 1024; + case E1000_RCTL_SZ_512: + return 512; + case E1000_RCTL_SZ_256: + return 256; + } + return 2048; +} + +void +e1000x_update_rx_total_stats(uint32_t *mac, + size_t data_size, + size_t data_fcs_size) +{ + static const int PRCregs[6] = { PRC64, PRC127, PRC255, PRC511, + PRC1023, PRC1522 }; + + e1000x_increase_size_stats(mac, PRCregs, data_fcs_size); + e1000x_inc_reg_if_not_full(mac, TPR); + mac[GPRC] = mac[TPR]; + /* TOR - Total Octets Received: + * This register includes bytes received in a packet from the field through the field, inclusively. + * Always include FCS length (4) in size. + */ + e1000x_grow_8reg_if_not_full(mac, TORL, data_size + 4); + mac[GORCL] = mac[TORL]; + mac[GORCH] = mac[TORH]; +} + +void +e1000x_increase_size_stats(uint32_t *mac, const int *size_regs, int size) +{ + if (size > 1023) { + e1000x_inc_reg_if_not_full(mac, size_regs[5]); + } else if (size > 511) { + e1000x_inc_reg_if_not_full(mac, size_regs[4]); + } else if (size > 255) { + e1000x_inc_reg_if_not_full(mac, size_regs[3]); + } else if (size > 127) { + e1000x_inc_reg_if_not_full(mac, size_regs[2]); + } else if (size > 64) { + e1000x_inc_reg_if_not_full(mac, size_regs[1]); + } else if (size == 64) { + e1000x_inc_reg_if_not_full(mac, size_regs[0]); + } +} + +void +e1000x_read_tx_ctx_descr(struct e1000_context_desc *d, + e1000x_txd_props *props) +{ + uint32_t op = le32_to_cpu(d->cmd_and_length); + + props->ipcss = d->lower_setup.ip_fields.ipcss; + props->ipcso = d->lower_setup.ip_fields.ipcso; + props->ipcse = le16_to_cpu(d->lower_setup.ip_fields.ipcse); + props->tucss = d->upper_setup.tcp_fields.tucss; + props->tucso = d->upper_setup.tcp_fields.tucso; + props->tucse = le16_to_cpu(d->upper_setup.tcp_fields.tucse); + props->paylen = op & 0xfffff; + props->hdr_len = d->tcp_seg_setup.fields.hdr_len; + props->mss = le16_to_cpu(d->tcp_seg_setup.fields.mss); + props->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0; + props->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0; + props->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0; +} diff --git a/hw/net/e1000x_common.h b/hw/net/e1000x_common.h new file mode 100644 index 000000000..b7742775c --- /dev/null +++ b/hw/net/e1000x_common.h @@ -0,0 +1,216 @@ +/* +* QEMU e1000(e) emulation - shared code +* +* Copyright (c) 2008 Qumranet +* +* Based on work done by: +* Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. +* Copyright (c) 2007 Dan Aloni +* Copyright (c) 2004 Antony T Curtis +* +* This library is free software; you can redistribute it and/or +* modify it under the terms of the GNU Lesser General Public +* License as published by the Free Software Foundation; either +* version 2.1 of the License, or (at your option) any later version. +* +* This library is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* Lesser General Public License for more details. +* +* You should have received a copy of the GNU Lesser General Public +* License along with this library; if not, see . +*/ + +#ifndef HW_NET_E1000X_COMMON_H +#define HW_NET_E1000X_COMMON_H + +#include "e1000_regs.h" + +#define defreg(x) x = (E1000_##x >> 2) +enum { + defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC), + defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC), + defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC), + defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH0), + defreg(RDBAL0), defreg(RDH0), defreg(RDLEN0), defreg(RDT0), + defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH), + defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT), + defreg(TDLEN1), defreg(TDBAL1), defreg(TDBAH1), defreg(TDH1), + defreg(TDT1), defreg(TORH), defreg(TORL), defreg(TOTH), + defreg(TOTL), defreg(TPR), defreg(TPT), defreg(TXDCTL), + defreg(WUFC), defreg(RA), defreg(MTA), defreg(CRCERRS), + defreg(VFTA), defreg(VET), defreg(RDTR), defreg(RADV), + defreg(TADV), defreg(ITR), defreg(SCC), defreg(ECOL), + defreg(MCC), defreg(LATECOL), defreg(COLC), defreg(DC), + defreg(TNCRS), defreg(SEQEC), defreg(CEXTERR), defreg(RLEC), + defreg(XONRXC), defreg(XONTXC), defreg(XOFFRXC), defreg(XOFFTXC), + defreg(FCRUC), defreg(AIT), defreg(TDFH), defreg(TDFT), + defreg(TDFHS), defreg(TDFTS), defreg(TDFPC), defreg(WUC), + defreg(WUS), defreg(POEMB), defreg(PBS), defreg(RDFH), + defreg(RDFT), defreg(RDFHS), defreg(RDFTS), defreg(RDFPC), + defreg(PBM), defreg(IPAV), defreg(IP4AT), defreg(IP6AT), + defreg(WUPM), defreg(FFLT), defreg(FFMT), defreg(FFVT), + defreg(TARC0), defreg(TARC1), defreg(IAM), defreg(EXTCNF_CTRL), + defreg(GCR), defreg(TIMINCA), defreg(EIAC), defreg(CTRL_EXT), + defreg(IVAR), defreg(MFUTP01), defreg(MFUTP23), defreg(MANC2H), + defreg(MFVAL), defreg(MDEF), defreg(FACTPS), defreg(FTFT), + defreg(RUC), defreg(ROC), defreg(RFC), defreg(RJC), + defreg(PRC64), defreg(PRC127), defreg(PRC255), defreg(PRC511), + defreg(PRC1023), defreg(PRC1522), defreg(PTC64), defreg(PTC127), + defreg(PTC255), defreg(PTC511), defreg(PTC1023), defreg(PTC1522), + defreg(GORCL), defreg(GORCH), defreg(GOTCL), defreg(GOTCH), + defreg(RNBC), defreg(BPRC), defreg(MPRC), defreg(RFCTL), + defreg(PSRCTL), defreg(MPTC), defreg(BPTC), defreg(TSCTFC), + defreg(IAC), defreg(MGTPRC), defreg(MGTPDC), defreg(MGTPTC), + defreg(TSCTC), defreg(RXCSUM), defreg(FUNCTAG), defreg(GSCL_1), + defreg(GSCL_2), defreg(GSCL_3), defreg(GSCL_4), defreg(GSCN_0), + defreg(GSCN_1), defreg(GSCN_2), defreg(GSCN_3), defreg(GCR2), + defreg(RAID), defreg(RSRPD), defreg(TIDV), defreg(EITR), + defreg(MRQC), defreg(RETA), defreg(RSSRK), defreg(RDBAH1), + defreg(RDBAL1), defreg(RDLEN1), defreg(RDH1), defreg(RDT1), + defreg(PBACLR), defreg(FCAL), defreg(FCAH), defreg(FCT), + defreg(FCRTH), defreg(FCRTL), defreg(FCTTV), defreg(FCRTV), + defreg(FLA), defreg(EEWR), defreg(FLOP), defreg(FLOL), + defreg(FLSWCTL), defreg(FLSWCNT), defreg(RXDCTL), defreg(RXDCTL1), + defreg(MAVTV0), defreg(MAVTV1), defreg(MAVTV2), defreg(MAVTV3), + defreg(TXSTMPL), defreg(TXSTMPH), defreg(SYSTIML), defreg(SYSTIMH), + defreg(RXCFGL), defreg(RXUDP), defreg(TIMADJL), defreg(TIMADJH), + defreg(RXSTMPH), defreg(RXSTMPL), defreg(RXSATRL), defreg(RXSATRH), + defreg(FLASHT), defreg(TIPG), defreg(RDH), defreg(RDT), + defreg(RDLEN), defreg(RDBAH), defreg(RDBAL), + defreg(TXDCTL1), + defreg(FLSWDATA), + defreg(CTRL_DUP), + defreg(EXTCNF_SIZE), + defreg(EEMNGCTL), + defreg(EEMNGDATA), + defreg(FLMNGCTL), + defreg(FLMNGDATA), + defreg(FLMNGCNT), + defreg(TSYNCRXCTL), + defreg(TSYNCTXCTL), + + /* Aliases */ + defreg(RDH0_A), defreg(RDT0_A), defreg(RDTR_A), defreg(RDFH_A), + defreg(RDFT_A), defreg(TDH_A), defreg(TDT_A), defreg(TIDV_A), + defreg(TDFH_A), defreg(TDFT_A), defreg(RA_A), defreg(RDBAL0_A), + defreg(TDBAL_A), defreg(TDLEN_A), defreg(VFTA_A), defreg(RDLEN0_A), + defreg(FCRTL_A), defreg(FCRTH_A) +}; + +static inline void +e1000x_inc_reg_if_not_full(uint32_t *mac, int index) +{ + if (mac[index] != 0xffffffff) { + mac[index]++; + } +} + +static inline void +e1000x_grow_8reg_if_not_full(uint32_t *mac, int index, int size) +{ + uint64_t sum = mac[index] | (uint64_t)mac[index + 1] << 32; + + if (sum + size < sum) { + sum = ~0ULL; + } else { + sum += size; + } + mac[index] = sum; + mac[index + 1] = sum >> 32; +} + +static inline int +e1000x_vlan_enabled(uint32_t *mac) +{ + return ((mac[CTRL] & E1000_CTRL_VME) != 0); +} + +static inline int +e1000x_is_vlan_txd(uint32_t txd_lower) +{ + return ((txd_lower & E1000_TXD_CMD_VLE) != 0); +} + +static inline int +e1000x_vlan_rx_filter_enabled(uint32_t *mac) +{ + return ((mac[RCTL] & E1000_RCTL_VFE) != 0); +} + +static inline int +e1000x_fcs_len(uint32_t *mac) +{ + /* FCS aka Ethernet CRC-32. We don't get it from backends and can't + * fill it in, just pad descriptor length by 4 bytes unless guest + * told us to strip it off the packet. */ + return (mac[RCTL] & E1000_RCTL_SECRC) ? 0 : 4; +} + +static inline void +e1000x_update_regs_on_link_down(uint32_t *mac, uint16_t *phy) +{ + mac[STATUS] &= ~E1000_STATUS_LU; + phy[PHY_STATUS] &= ~MII_SR_LINK_STATUS; + phy[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE; + phy[PHY_LP_ABILITY] &= ~MII_LPAR_LPACK; +} + +static inline void +e1000x_update_regs_on_link_up(uint32_t *mac, uint16_t *phy) +{ + mac[STATUS] |= E1000_STATUS_LU; + phy[PHY_STATUS] |= MII_SR_LINK_STATUS; +} + +void e1000x_update_rx_total_stats(uint32_t *mac, + size_t data_size, + size_t data_fcs_size); + +void e1000x_core_prepare_eeprom(uint16_t *eeprom, + const uint16_t *templ, + uint32_t templ_size, + uint16_t dev_id, + const uint8_t *macaddr); + +uint32_t e1000x_rxbufsize(uint32_t rctl); + +bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac); + +bool e1000x_is_vlan_packet(const uint8_t *buf, uint16_t vet); + +bool e1000x_rx_group_filter(uint32_t *mac, const uint8_t *buf); + +bool e1000x_hw_rx_enabled(uint32_t *mac); + +bool e1000x_is_oversized(uint32_t *mac, size_t size); + +void e1000x_restart_autoneg(uint32_t *mac, uint16_t *phy, QEMUTimer *timer); + +void e1000x_reset_mac_addr(NICState *nic, uint32_t *mac_regs, + uint8_t *mac_addr); + +void e1000x_update_regs_on_autoneg_done(uint32_t *mac, uint16_t *phy); + +void e1000x_increase_size_stats(uint32_t *mac, const int *size_regs, int size); + +typedef struct e1000x_txd_props { + uint8_t ipcss; + uint8_t ipcso; + uint16_t ipcse; + uint8_t tucss; + uint8_t tucso; + uint16_t tucse; + uint32_t paylen; + uint8_t hdr_len; + uint16_t mss; + int8_t ip; + int8_t tcp; + bool tse; +} e1000x_txd_props; + +void e1000x_read_tx_ctx_descr(struct e1000_context_desc *d, + e1000x_txd_props *props); + +#endif diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c new file mode 100644 index 000000000..16e95ef9c --- /dev/null +++ b/hw/net/eepro100.c @@ -0,0 +1,2097 @@ +/* + * QEMU i8255x (PRO100) emulation + * + * Copyright (C) 2006-2011 Stefan Weil + * + * Portions of the code are copies from grub / etherboot eepro100.c + * and linux e100.c. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) version 3 or any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Tested features (i82559): + * PXE boot (i386 guest, i386 / mips / mipsel / ppc host) ok + * Linux networking (i386) ok + * + * Untested: + * Windows networking + * + * References: + * + * Intel 8255x 10/100 Mbps Ethernet Controller Family + * Open Source Software Developer Manual + * + * TODO: + * * PHY emulation should be separated from nic emulation. + * Most nic emulations could share the same phy code. + * * i82550 is untested. It is programmed like the i82559. + * * i82562 is untested. It is programmed like the i82559. + * * Power management (i82558 and later) is not implemented. + * * Wake-on-LAN is not implemented. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "net/net.h" +#include "net/eth.h" +#include "hw/nvram/eeprom93xx.h" +#include "sysemu/sysemu.h" +#include "sysemu/dma.h" +#include "sysemu/reset.h" +#include "qemu/bitops.h" +#include "qemu/module.h" +#include "qapi/error.h" + +/* QEMU sends frames smaller than 60 bytes to ethernet nics. + * Such frames are rejected by real nics and their emulations. + * To avoid this behaviour, other nic emulations pad received + * frames. The following definition enables this padding for + * eepro100, too. We keep the define around in case it might + * become useful the future if the core networking is ever + * changed to pad short packets itself. */ +#define CONFIG_PAD_RECEIVED_FRAMES + +/* Debug EEPRO100 card. */ +#if 0 +# define DEBUG_EEPRO100 +#endif + +#ifdef DEBUG_EEPRO100 +#define logout(fmt, ...) fprintf(stderr, "EE100\t%-24s" fmt, __func__, ## __VA_ARGS__) +#else +#define logout(fmt, ...) ((void)0) +#endif + +/* Set flags to 0 to disable debug output. */ +#define INT 1 /* interrupt related actions */ +#define MDI 1 /* mdi related actions */ +#define OTHER 1 +#define RXTX 1 +#define EEPROM 1 /* eeprom related actions */ + +#define TRACE(flag, command) ((flag) ? (command) : (void)0) + +#define missing(text) fprintf(stderr, "eepro100: feature is missing in this emulation: " text "\n") + +#define MAX_ETH_FRAME_SIZE 1514 + +/* This driver supports several different devices which are declared here. */ +#define i82550 0x82550 +#define i82551 0x82551 +#define i82557A 0x82557a +#define i82557B 0x82557b +#define i82557C 0x82557c +#define i82558A 0x82558a +#define i82558B 0x82558b +#define i82559A 0x82559a +#define i82559B 0x82559b +#define i82559C 0x82559c +#define i82559ER 0x82559e +#define i82562 0x82562 +#define i82801 0x82801 + +/* Use 64 word EEPROM. TODO: could be a runtime option. */ +#define EEPROM_SIZE 64 + +#define PCI_MEM_SIZE (4 * KiB) +#define PCI_IO_SIZE 64 +#define PCI_FLASH_SIZE (128 * KiB) + +#define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m) + +/* The SCB accepts the following controls for the Tx and Rx units: */ +#define CU_NOP 0x0000 /* No operation. */ +#define CU_START 0x0010 /* CU start. */ +#define CU_RESUME 0x0020 /* CU resume. */ +#define CU_STATSADDR 0x0040 /* Load dump counters address. */ +#define CU_SHOWSTATS 0x0050 /* Dump statistical counters. */ +#define CU_CMD_BASE 0x0060 /* Load CU base address. */ +#define CU_DUMPSTATS 0x0070 /* Dump and reset statistical counters. */ +#define CU_SRESUME 0x00a0 /* CU static resume. */ + +#define RU_NOP 0x0000 +#define RX_START 0x0001 +#define RX_RESUME 0x0002 +#define RU_ABORT 0x0004 +#define RX_ADDR_LOAD 0x0006 +#define RX_RESUMENR 0x0007 +#define INT_MASK 0x0100 +#define DRVR_INT 0x0200 /* Driver generated interrupt. */ + +typedef struct { + const char *name; + const char *desc; + uint16_t device_id; + uint8_t revision; + uint16_t subsystem_vendor_id; + uint16_t subsystem_id; + + uint32_t device; + uint8_t stats_size; + bool has_extended_tcb_support; + bool power_management; +} E100PCIDeviceInfo; + +/* Offsets to the various registers. + All accesses need not be longword aligned. */ +typedef enum { + SCBStatus = 0, /* Status Word. */ + SCBAck = 1, + SCBCmd = 2, /* Rx/Command Unit command and status. */ + SCBIntmask = 3, + SCBPointer = 4, /* General purpose pointer. */ + SCBPort = 8, /* Misc. commands and operands. */ + SCBflash = 12, /* Flash memory control. */ + SCBeeprom = 14, /* EEPROM control. */ + SCBCtrlMDI = 16, /* MDI interface control. */ + SCBEarlyRx = 20, /* Early receive byte count. */ + SCBFlow = 24, /* Flow Control. */ + SCBpmdr = 27, /* Power Management Driver. */ + SCBgctrl = 28, /* General Control. */ + SCBgstat = 29, /* General Status. */ +} E100RegisterOffset; + +/* A speedo3 transmit buffer descriptor with two buffers... */ +typedef struct { + uint16_t status; + uint16_t command; + uint32_t link; /* void * */ + uint32_t tbd_array_addr; /* transmit buffer descriptor array address. */ + uint16_t tcb_bytes; /* transmit command block byte count (in lower 14 bits */ + uint8_t tx_threshold; /* transmit threshold */ + uint8_t tbd_count; /* TBD number */ +#if 0 + /* This constitutes two "TBD" entries: hdr and data */ + uint32_t tx_buf_addr0; /* void *, header of frame to be transmitted. */ + int32_t tx_buf_size0; /* Length of Tx hdr. */ + uint32_t tx_buf_addr1; /* void *, data to be transmitted. */ + int32_t tx_buf_size1; /* Length of Tx data. */ +#endif +} eepro100_tx_t; + +/* Receive frame descriptor. */ +typedef struct { + int16_t status; + uint16_t command; + uint32_t link; /* struct RxFD * */ + uint32_t rx_buf_addr; /* void * */ + uint16_t count; + uint16_t size; + /* Ethernet frame data follows. */ +} eepro100_rx_t; + +typedef enum { + COMMAND_EL = BIT(15), + COMMAND_S = BIT(14), + COMMAND_I = BIT(13), + COMMAND_NC = BIT(4), + COMMAND_SF = BIT(3), + COMMAND_CMD = BITS(2, 0), +} scb_command_bit; + +typedef enum { + STATUS_C = BIT(15), + STATUS_OK = BIT(13), +} scb_status_bit; + +typedef struct { + uint32_t tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + uint32_t rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + uint32_t fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + uint16_t xmt_tco_frames, rcv_tco_frames; + /* TODO: i82559 has six reserved statistics but a total of 24 dwords. */ + uint32_t reserved[4]; +} eepro100_stats_t; + +typedef enum { + cu_idle = 0, + cu_suspended = 1, + cu_active = 2, + cu_lpq_active = 2, + cu_hqp_active = 3 +} cu_state_t; + +typedef enum { + ru_idle = 0, + ru_suspended = 1, + ru_no_resources = 2, + ru_ready = 4 +} ru_state_t; + +typedef struct { + PCIDevice dev; + /* Hash register (multicast mask array, multiple individual addresses). */ + uint8_t mult[8]; + MemoryRegion mmio_bar; + MemoryRegion io_bar; + MemoryRegion flash_bar; + NICState *nic; + NICConf conf; + uint8_t scb_stat; /* SCB stat/ack byte */ + uint8_t int_stat; /* PCI interrupt status */ + /* region must not be saved by nic_save. */ + uint16_t mdimem[32]; + eeprom_t *eeprom; + uint32_t device; /* device variant */ + /* (cu_base + cu_offset) address the next command block in the command block list. */ + uint32_t cu_base; /* CU base address */ + uint32_t cu_offset; /* CU address offset */ + /* (ru_base + ru_offset) address the RFD in the Receive Frame Area. */ + uint32_t ru_base; /* RU base address */ + uint32_t ru_offset; /* RU address offset */ + uint32_t statsaddr; /* pointer to eepro100_stats_t */ + + /* Temporary status information (no need to save these values), + * used while processing CU commands. */ + eepro100_tx_t tx; /* transmit buffer descriptor */ + uint32_t cb_address; /* = cu_base + cu_offset */ + + /* Statistical counters. Also used for wake-up packet (i82559). */ + eepro100_stats_t statistics; + + /* Data in mem is always in the byte order of the controller (le). + * It must be dword aligned to allow direct access to 32 bit values. */ + uint8_t mem[PCI_MEM_SIZE] __attribute__((aligned(8))); + + /* Configuration bytes. */ + uint8_t configuration[22]; + + /* vmstate for each particular nic */ + VMStateDescription *vmstate; + + /* Quasi static device properties (no need to save them). */ + uint16_t stats_size; + bool has_extended_tcb_support; +} EEPRO100State; + +/* Word indices in EEPROM. */ +typedef enum { + EEPROM_CNFG_MDIX = 0x03, + EEPROM_ID = 0x05, + EEPROM_PHY_ID = 0x06, + EEPROM_VENDOR_ID = 0x0c, + EEPROM_CONFIG_ASF = 0x0d, + EEPROM_DEVICE_ID = 0x23, + EEPROM_SMBUS_ADDR = 0x90, +} EEPROMOffset; + +/* Bit values for EEPROM ID word. */ +typedef enum { + EEPROM_ID_MDM = BIT(0), /* Modem */ + EEPROM_ID_STB = BIT(1), /* Standby Enable */ + EEPROM_ID_WMR = BIT(2), /* ??? */ + EEPROM_ID_WOL = BIT(5), /* Wake on LAN */ + EEPROM_ID_DPD = BIT(6), /* Deep Power Down */ + EEPROM_ID_ALT = BIT(7), /* */ + /* BITS(10, 8) device revision */ + EEPROM_ID_BD = BIT(11), /* boot disable */ + EEPROM_ID_ID = BIT(13), /* id bit */ + /* BITS(15, 14) signature */ + EEPROM_ID_VALID = BIT(14), /* signature for valid eeprom */ +} eeprom_id_bit; + +/* Default values for MDI (PHY) registers */ +static const uint16_t eepro100_mdi_default[] = { + /* MDI Registers 0 - 6, 7 */ + 0x3000, 0x780d, 0x02a8, 0x0154, 0x05e1, 0x0000, 0x0000, 0x0000, + /* MDI Registers 8 - 15 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* MDI Registers 16 - 31 */ + 0x0003, 0x0000, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, +}; + +/* Readonly mask for MDI (PHY) registers */ +static const uint16_t eepro100_mdi_mask[] = { + 0x0000, 0xffff, 0xffff, 0xffff, 0xc01f, 0xffff, 0xffff, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0fff, 0x0000, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, +}; + +static E100PCIDeviceInfo *eepro100_get_class(EEPRO100State *s); + +/* Read a 16 bit control/status (CSR) register. */ +static uint16_t e100_read_reg2(EEPRO100State *s, E100RegisterOffset addr) +{ + assert(!((uintptr_t)&s->mem[addr] & 1)); + return lduw_le_p(&s->mem[addr]); +} + +/* Read a 32 bit control/status (CSR) register. */ +static uint32_t e100_read_reg4(EEPRO100State *s, E100RegisterOffset addr) +{ + assert(!((uintptr_t)&s->mem[addr] & 3)); + return ldl_le_p(&s->mem[addr]); +} + +/* Write a 16 bit control/status (CSR) register. */ +static void e100_write_reg2(EEPRO100State *s, E100RegisterOffset addr, + uint16_t val) +{ + assert(!((uintptr_t)&s->mem[addr] & 1)); + stw_le_p(&s->mem[addr], val); +} + +/* Read a 32 bit control/status (CSR) register. */ +static void e100_write_reg4(EEPRO100State *s, E100RegisterOffset addr, + uint32_t val) +{ + assert(!((uintptr_t)&s->mem[addr] & 3)); + stl_le_p(&s->mem[addr], val); +} + +#if defined(DEBUG_EEPRO100) +static const char *nic_dump(const uint8_t * buf, unsigned size) +{ + static char dump[3 * 16 + 1]; + char *p = &dump[0]; + if (size > 16) { + size = 16; + } + while (size-- > 0) { + p += sprintf(p, " %02x", *buf++); + } + return dump; +} +#endif /* DEBUG_EEPRO100 */ + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +static void disable_interrupt(EEPRO100State * s) +{ + if (s->int_stat) { + TRACE(INT, logout("interrupt disabled\n")); + pci_irq_deassert(&s->dev); + s->int_stat = 0; + } +} + +static void enable_interrupt(EEPRO100State * s) +{ + if (!s->int_stat) { + TRACE(INT, logout("interrupt enabled\n")); + pci_irq_assert(&s->dev); + s->int_stat = 1; + } +} + +static void eepro100_acknowledge(EEPRO100State * s) +{ + s->scb_stat &= ~s->mem[SCBAck]; + s->mem[SCBAck] = s->scb_stat; + if (s->scb_stat == 0) { + disable_interrupt(s); + } +} + +static void eepro100_interrupt(EEPRO100State * s, uint8_t status) +{ + uint8_t mask = ~s->mem[SCBIntmask]; + s->mem[SCBAck] |= status; + status = s->scb_stat = s->mem[SCBAck]; + status &= (mask | 0x0f); +#if 0 + status &= (~s->mem[SCBIntmask] | 0x0xf); +#endif + if (status && (mask & 0x01)) { + /* SCB mask and SCB Bit M do not disable interrupt. */ + enable_interrupt(s); + } else if (s->int_stat) { + disable_interrupt(s); + } +} + +static void eepro100_cx_interrupt(EEPRO100State * s) +{ + /* CU completed action command. */ + /* Transmit not ok (82557 only, not in emulation). */ + eepro100_interrupt(s, 0x80); +} + +static void eepro100_cna_interrupt(EEPRO100State * s) +{ + /* CU left the active state. */ + eepro100_interrupt(s, 0x20); +} + +static void eepro100_fr_interrupt(EEPRO100State * s) +{ + /* RU received a complete frame. */ + eepro100_interrupt(s, 0x40); +} + +static void eepro100_rnr_interrupt(EEPRO100State * s) +{ + /* RU is not ready. */ + eepro100_interrupt(s, 0x10); +} + +static void eepro100_mdi_interrupt(EEPRO100State * s) +{ + /* MDI completed read or write cycle. */ + eepro100_interrupt(s, 0x08); +} + +static void eepro100_swi_interrupt(EEPRO100State * s) +{ + /* Software has requested an interrupt. */ + eepro100_interrupt(s, 0x04); +} + +#if 0 +static void eepro100_fcp_interrupt(EEPRO100State * s) +{ + /* Flow control pause interrupt (82558 and later). */ + eepro100_interrupt(s, 0x01); +} +#endif + +static void e100_pci_reset(EEPRO100State *s, Error **errp) +{ + E100PCIDeviceInfo *info = eepro100_get_class(s); + uint32_t device = s->device; + uint8_t *pci_conf = s->dev.config; + + TRACE(OTHER, logout("%p\n", s)); + + /* PCI Status */ + pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_DEVSEL_MEDIUM | + PCI_STATUS_FAST_BACK); + /* PCI Latency Timer */ + pci_set_byte(pci_conf + PCI_LATENCY_TIMER, 0x20); /* latency timer = 32 clocks */ + /* Capability Pointer is set by PCI framework. */ + /* Interrupt Line */ + /* Interrupt Pin */ + pci_set_byte(pci_conf + PCI_INTERRUPT_PIN, 1); /* interrupt pin A */ + /* Minimum Grant */ + pci_set_byte(pci_conf + PCI_MIN_GNT, 0x08); + /* Maximum Latency */ + pci_set_byte(pci_conf + PCI_MAX_LAT, 0x18); + + s->stats_size = info->stats_size; + s->has_extended_tcb_support = info->has_extended_tcb_support; + + switch (device) { + case i82550: + case i82551: + case i82557A: + case i82557B: + case i82557C: + case i82558A: + case i82558B: + case i82559A: + case i82559B: + case i82559ER: + case i82562: + case i82801: + case i82559C: + break; + default: + logout("Device %X is undefined!\n", device); + } + + /* Standard TxCB. */ + s->configuration[6] |= BIT(4); + + /* Standard statistical counters. */ + s->configuration[6] |= BIT(5); + + if (s->stats_size == 80) { + /* TODO: check TCO Statistical Counters bit. Documentation not clear. */ + if (s->configuration[6] & BIT(2)) { + /* TCO statistical counters. */ + assert(s->configuration[6] & BIT(5)); + } else { + if (s->configuration[6] & BIT(5)) { + /* No extended statistical counters, i82557 compatible. */ + s->stats_size = 64; + } else { + /* i82558 compatible. */ + s->stats_size = 76; + } + } + } else { + if (s->configuration[6] & BIT(5)) { + /* No extended statistical counters. */ + s->stats_size = 64; + } + } + assert(s->stats_size > 0 && s->stats_size <= sizeof(s->statistics)); + + if (info->power_management) { + /* Power Management Capabilities */ + int cfg_offset = 0xdc; + int r = pci_add_capability(&s->dev, PCI_CAP_ID_PM, + cfg_offset, PCI_PM_SIZEOF, + errp); + if (r < 0) { + return; + } + + pci_set_word(pci_conf + cfg_offset + PCI_PM_PMC, 0x7e21); +#if 0 /* TODO: replace dummy code for power management emulation. */ + /* TODO: Power Management Control / Status. */ + pci_set_word(pci_conf + cfg_offset + PCI_PM_CTRL, 0x0000); + /* TODO: Ethernet Power Consumption Registers (i82559 and later). */ + pci_set_byte(pci_conf + cfg_offset + PCI_PM_PPB_EXTENSIONS, 0x0000); +#endif + } + +#if EEPROM_SIZE > 0 + if (device == i82557C || device == i82558B || device == i82559C) { + /* + TODO: get vendor id from EEPROM for i82557C or later. + TODO: get device id from EEPROM for i82557C or later. + TODO: status bit 4 can be disabled by EEPROM for i82558, i82559. + TODO: header type is determined by EEPROM for i82559. + TODO: get subsystem id from EEPROM for i82557C or later. + TODO: get subsystem vendor id from EEPROM for i82557C or later. + TODO: exp. rom baddr depends on a bit in EEPROM for i82558 or later. + TODO: capability pointer depends on EEPROM for i82558. + */ + logout("Get device id and revision from EEPROM!!!\n"); + } +#endif /* EEPROM_SIZE > 0 */ +} + +static void nic_selective_reset(EEPRO100State * s) +{ + size_t i; + uint16_t *eeprom_contents = eeprom93xx_data(s->eeprom); +#if 0 + eeprom93xx_reset(s->eeprom); +#endif + memcpy(eeprom_contents, s->conf.macaddr.a, 6); + eeprom_contents[EEPROM_ID] = EEPROM_ID_VALID; + if (s->device == i82557B || s->device == i82557C) + eeprom_contents[5] = 0x0100; + eeprom_contents[EEPROM_PHY_ID] = 1; + uint16_t sum = 0; + for (i = 0; i < EEPROM_SIZE - 1; i++) { + sum += eeprom_contents[i]; + } + eeprom_contents[EEPROM_SIZE - 1] = 0xbaba - sum; + TRACE(EEPROM, logout("checksum=0x%04x\n", eeprom_contents[EEPROM_SIZE - 1])); + + memset(s->mem, 0, sizeof(s->mem)); + e100_write_reg4(s, SCBCtrlMDI, BIT(21)); + + assert(sizeof(s->mdimem) == sizeof(eepro100_mdi_default)); + memcpy(&s->mdimem[0], &eepro100_mdi_default[0], sizeof(s->mdimem)); +} + +static void nic_reset(void *opaque) +{ + EEPRO100State *s = opaque; + TRACE(OTHER, logout("%p\n", s)); + /* TODO: Clearing of hash register for selective reset, too? */ + memset(&s->mult[0], 0, sizeof(s->mult)); + nic_selective_reset(s); +} + +#if defined(DEBUG_EEPRO100) +static const char * const e100_reg[PCI_IO_SIZE / 4] = { + "Command/Status", + "General Pointer", + "Port", + "EEPROM/Flash Control", + "MDI Control", + "Receive DMA Byte Count", + "Flow Control", + "General Status/Control" +}; + +static char *regname(uint32_t addr) +{ + static char buf[32]; + if (addr < PCI_IO_SIZE) { + const char *r = e100_reg[addr / 4]; + if (r != 0) { + snprintf(buf, sizeof(buf), "%s+%u", r, addr % 4); + } else { + snprintf(buf, sizeof(buf), "0x%02x", addr); + } + } else { + snprintf(buf, sizeof(buf), "??? 0x%08x", addr); + } + return buf; +} +#endif /* DEBUG_EEPRO100 */ + +/***************************************************************************** + * + * Command emulation. + * + ****************************************************************************/ + +#if 0 +static uint16_t eepro100_read_command(EEPRO100State * s) +{ + uint16_t val = 0xffff; + TRACE(OTHER, logout("val=0x%04x\n", val)); + return val; +} +#endif + +/* Commands that can be put in a command list entry. */ +enum commands { + CmdNOp = 0, + CmdIASetup = 1, + CmdConfigure = 2, + CmdMulticastList = 3, + CmdTx = 4, + CmdTDR = 5, /* load microcode */ + CmdDump = 6, + CmdDiagnose = 7, + + /* And some extra flags: */ + CmdSuspend = 0x4000, /* Suspend after completion. */ + CmdIntr = 0x2000, /* Interrupt after completion. */ + CmdTxFlex = 0x0008, /* Use "Flexible mode" for CmdTx command. */ +}; + +static cu_state_t get_cu_state(EEPRO100State * s) +{ + return ((s->mem[SCBStatus] & BITS(7, 6)) >> 6); +} + +static void set_cu_state(EEPRO100State * s, cu_state_t state) +{ + s->mem[SCBStatus] = (s->mem[SCBStatus] & ~BITS(7, 6)) + (state << 6); +} + +static ru_state_t get_ru_state(EEPRO100State * s) +{ + return ((s->mem[SCBStatus] & BITS(5, 2)) >> 2); +} + +static void set_ru_state(EEPRO100State * s, ru_state_t state) +{ + s->mem[SCBStatus] = (s->mem[SCBStatus] & ~BITS(5, 2)) + (state << 2); +} + +static void dump_statistics(EEPRO100State * s) +{ + /* Dump statistical data. Most data is never changed by the emulation + * and always 0, so we first just copy the whole block and then those + * values which really matter. + * Number of data should check configuration!!! + */ + pci_dma_write(&s->dev, s->statsaddr, &s->statistics, s->stats_size); + stl_le_pci_dma(&s->dev, s->statsaddr + 0, + s->statistics.tx_good_frames); + stl_le_pci_dma(&s->dev, s->statsaddr + 36, + s->statistics.rx_good_frames); + stl_le_pci_dma(&s->dev, s->statsaddr + 48, + s->statistics.rx_resource_errors); + stl_le_pci_dma(&s->dev, s->statsaddr + 60, + s->statistics.rx_short_frame_errors); +#if 0 + stw_le_pci_dma(&s->dev, s->statsaddr + 76, s->statistics.xmt_tco_frames); + stw_le_pci_dma(&s->dev, s->statsaddr + 78, s->statistics.rcv_tco_frames); + missing("CU dump statistical counters"); +#endif +} + +static void read_cb(EEPRO100State *s) +{ + pci_dma_read(&s->dev, s->cb_address, &s->tx, sizeof(s->tx)); + s->tx.status = le16_to_cpu(s->tx.status); + s->tx.command = le16_to_cpu(s->tx.command); + s->tx.link = le32_to_cpu(s->tx.link); + s->tx.tbd_array_addr = le32_to_cpu(s->tx.tbd_array_addr); + s->tx.tcb_bytes = le16_to_cpu(s->tx.tcb_bytes); +} + +static void tx_command(EEPRO100State *s) +{ + uint32_t tbd_array = s->tx.tbd_array_addr; + uint16_t tcb_bytes = s->tx.tcb_bytes & 0x3fff; + /* Sends larger than MAX_ETH_FRAME_SIZE are allowed, up to 2600 bytes. */ + uint8_t buf[2600]; + uint16_t size = 0; + uint32_t tbd_address = s->cb_address + 0x10; + TRACE(RXTX, logout + ("transmit, TBD array address 0x%08x, TCB byte count 0x%04x, TBD count %u\n", + tbd_array, tcb_bytes, s->tx.tbd_count)); + + if (tcb_bytes > 2600) { + logout("TCB byte count too large, using 2600\n"); + tcb_bytes = 2600; + } + if (!((tcb_bytes > 0) || (tbd_array != 0xffffffff))) { + logout + ("illegal values of TBD array address and TCB byte count!\n"); + } + assert(tcb_bytes <= sizeof(buf)); + while (size < tcb_bytes) { + TRACE(RXTX, logout + ("TBD (simplified mode): buffer address 0x%08x, size 0x%04x\n", + tbd_address, tcb_bytes)); + pci_dma_read(&s->dev, tbd_address, &buf[size], tcb_bytes); + size += tcb_bytes; + } + if (tbd_array == 0xffffffff) { + /* Simplified mode. Was already handled by code above. */ + } else { + /* Flexible mode. */ + uint8_t tbd_count = 0; + if (s->has_extended_tcb_support && !(s->configuration[6] & BIT(4))) { + /* Extended Flexible TCB. */ + for (; tbd_count < 2; tbd_count++) { + uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, + tbd_address); + uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, + tbd_address + 4); + uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, + tbd_address + 6); + tbd_address += 8; + TRACE(RXTX, logout + ("TBD (extended flexible mode): buffer address 0x%08x, size 0x%04x\n", + tx_buffer_address, tx_buffer_size)); + tx_buffer_size = MIN(tx_buffer_size, sizeof(buf) - size); + pci_dma_read(&s->dev, tx_buffer_address, + &buf[size], tx_buffer_size); + size += tx_buffer_size; + if (tx_buffer_el & 1) { + break; + } + } + } + tbd_address = tbd_array; + for (; tbd_count < s->tx.tbd_count; tbd_count++) { + uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, tbd_address); + uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, tbd_address + 4); + uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, tbd_address + 6); + tbd_address += 8; + TRACE(RXTX, logout + ("TBD (flexible mode): buffer address 0x%08x, size 0x%04x\n", + tx_buffer_address, tx_buffer_size)); + tx_buffer_size = MIN(tx_buffer_size, sizeof(buf) - size); + pci_dma_read(&s->dev, tx_buffer_address, + &buf[size], tx_buffer_size); + size += tx_buffer_size; + if (tx_buffer_el & 1) { + break; + } + } + } + TRACE(RXTX, logout("%p sending frame, len=%d,%s\n", s, size, nic_dump(buf, size))); + qemu_send_packet(qemu_get_queue(s->nic), buf, size); + s->statistics.tx_good_frames++; + /* Transmit with bad status would raise an CX/TNO interrupt. + * (82557 only). Emulation never has bad status. */ +#if 0 + eepro100_cx_interrupt(s); +#endif +} + +static void set_multicast_list(EEPRO100State *s) +{ + uint16_t multicast_count = s->tx.tbd_array_addr & BITS(13, 0); + uint16_t i; + memset(&s->mult[0], 0, sizeof(s->mult)); + TRACE(OTHER, logout("multicast list, multicast count = %u\n", multicast_count)); + for (i = 0; i < multicast_count; i += 6) { + uint8_t multicast_addr[6]; + pci_dma_read(&s->dev, s->cb_address + 10 + i, multicast_addr, 6); + TRACE(OTHER, logout("multicast entry %s\n", nic_dump(multicast_addr, 6))); + unsigned mcast_idx = (net_crc32(multicast_addr, ETH_ALEN) & + BITS(7, 2)) >> 2; + assert(mcast_idx < 64); + s->mult[mcast_idx >> 3] |= (1 << (mcast_idx & 7)); + } +} + +static void action_command(EEPRO100State *s) +{ + /* The loop below won't stop if it gets special handcrafted data. + Therefore we limit the number of iterations. */ + unsigned max_loop_count = 16; + + for (;;) { + bool bit_el; + bool bit_s; + bool bit_i; + bool bit_nc; + uint16_t ok_status = STATUS_OK; + s->cb_address = s->cu_base + s->cu_offset; + read_cb(s); + bit_el = ((s->tx.command & COMMAND_EL) != 0); + bit_s = ((s->tx.command & COMMAND_S) != 0); + bit_i = ((s->tx.command & COMMAND_I) != 0); + bit_nc = ((s->tx.command & COMMAND_NC) != 0); +#if 0 + bool bit_sf = ((s->tx.command & COMMAND_SF) != 0); +#endif + + if (max_loop_count-- == 0) { + /* Prevent an endless loop. */ + logout("loop in %s:%u\n", __FILE__, __LINE__); + break; + } + + s->cu_offset = s->tx.link; + TRACE(OTHER, + logout("val=(cu start), status=0x%04x, command=0x%04x, link=0x%08x\n", + s->tx.status, s->tx.command, s->tx.link)); + switch (s->tx.command & COMMAND_CMD) { + case CmdNOp: + /* Do nothing. */ + break; + case CmdIASetup: + pci_dma_read(&s->dev, s->cb_address + 8, &s->conf.macaddr.a[0], 6); + TRACE(OTHER, logout("macaddr: %s\n", nic_dump(&s->conf.macaddr.a[0], 6))); + break; + case CmdConfigure: + pci_dma_read(&s->dev, s->cb_address + 8, + &s->configuration[0], sizeof(s->configuration)); + TRACE(OTHER, logout("configuration: %s\n", + nic_dump(&s->configuration[0], 16))); + TRACE(OTHER, logout("configuration: %s\n", + nic_dump(&s->configuration[16], + ARRAY_SIZE(s->configuration) - 16))); + if (s->configuration[20] & BIT(6)) { + TRACE(OTHER, logout("Multiple IA bit\n")); + } + break; + case CmdMulticastList: + set_multicast_list(s); + break; + case CmdTx: + if (bit_nc) { + missing("CmdTx: NC = 0"); + ok_status = 0; + break; + } + tx_command(s); + break; + case CmdTDR: + TRACE(OTHER, logout("load microcode\n")); + /* Starting with offset 8, the command contains + * 64 dwords microcode which we just ignore here. */ + break; + case CmdDiagnose: + TRACE(OTHER, logout("diagnose\n")); + /* Make sure error flag is not set. */ + s->tx.status = 0; + break; + default: + missing("undefined command"); + ok_status = 0; + break; + } + /* Write new status. */ + stw_le_pci_dma(&s->dev, s->cb_address, + s->tx.status | ok_status | STATUS_C); + if (bit_i) { + /* CU completed action. */ + eepro100_cx_interrupt(s); + } + if (bit_el) { + /* CU becomes idle. Terminate command loop. */ + set_cu_state(s, cu_idle); + eepro100_cna_interrupt(s); + break; + } else if (bit_s) { + /* CU becomes suspended. Terminate command loop. */ + set_cu_state(s, cu_suspended); + eepro100_cna_interrupt(s); + break; + } else { + /* More entries in list. */ + TRACE(OTHER, logout("CU list with at least one more entry\n")); + } + } + TRACE(OTHER, logout("CU list empty\n")); + /* List is empty. Now CU is idle or suspended. */ +} + +static void eepro100_cu_command(EEPRO100State * s, uint8_t val) +{ + cu_state_t cu_state; + switch (val) { + case CU_NOP: + /* No operation. */ + break; + case CU_START: + cu_state = get_cu_state(s); + if (cu_state != cu_idle && cu_state != cu_suspended) { + /* Intel documentation says that CU must be idle or suspended + * for the CU start command. */ + logout("unexpected CU state is %u\n", cu_state); + } + set_cu_state(s, cu_active); + s->cu_offset = e100_read_reg4(s, SCBPointer); + action_command(s); + break; + case CU_RESUME: + if (get_cu_state(s) != cu_suspended) { + logout("bad CU resume from CU state %u\n", get_cu_state(s)); + /* Workaround for bad Linux eepro100 driver which resumes + * from idle state. */ +#if 0 + missing("cu resume"); +#endif + set_cu_state(s, cu_suspended); + } + if (get_cu_state(s) == cu_suspended) { + TRACE(OTHER, logout("CU resuming\n")); + set_cu_state(s, cu_active); + action_command(s); + } + break; + case CU_STATSADDR: + /* Load dump counters address. */ + s->statsaddr = e100_read_reg4(s, SCBPointer); + TRACE(OTHER, logout("val=0x%02x (dump counters address)\n", val)); + if (s->statsaddr & 3) { + /* Memory must be Dword aligned. */ + logout("unaligned dump counters address\n"); + /* Handling of misaligned addresses is undefined. + * Here we align the address by ignoring the lower bits. */ + /* TODO: Test unaligned dump counter address on real hardware. */ + s->statsaddr &= ~3; + } + break; + case CU_SHOWSTATS: + /* Dump statistical counters. */ + TRACE(OTHER, logout("val=0x%02x (dump stats)\n", val)); + dump_statistics(s); + stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa005); + break; + case CU_CMD_BASE: + /* Load CU base. */ + TRACE(OTHER, logout("val=0x%02x (CU base address)\n", val)); + s->cu_base = e100_read_reg4(s, SCBPointer); + break; + case CU_DUMPSTATS: + /* Dump and reset statistical counters. */ + TRACE(OTHER, logout("val=0x%02x (dump stats and reset)\n", val)); + dump_statistics(s); + stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa007); + memset(&s->statistics, 0, sizeof(s->statistics)); + break; + case CU_SRESUME: + /* CU static resume. */ + missing("CU static resume"); + break; + default: + missing("Undefined CU command"); + } +} + +static void eepro100_ru_command(EEPRO100State * s, uint8_t val) +{ + switch (val) { + case RU_NOP: + /* No operation. */ + break; + case RX_START: + /* RU start. */ + if (get_ru_state(s) != ru_idle) { + logout("RU state is %u, should be %u\n", get_ru_state(s), ru_idle); +#if 0 + assert(!"wrong RU state"); +#endif + } + set_ru_state(s, ru_ready); + s->ru_offset = e100_read_reg4(s, SCBPointer); + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + TRACE(OTHER, logout("val=0x%02x (rx start)\n", val)); + break; + case RX_RESUME: + /* Restart RU. */ + if (get_ru_state(s) != ru_suspended) { + logout("RU state is %u, should be %u\n", get_ru_state(s), + ru_suspended); +#if 0 + assert(!"wrong RU state"); +#endif + } + set_ru_state(s, ru_ready); + break; + case RU_ABORT: + /* RU abort. */ + if (get_ru_state(s) == ru_ready) { + eepro100_rnr_interrupt(s); + } + set_ru_state(s, ru_idle); + break; + case RX_ADDR_LOAD: + /* Load RU base. */ + TRACE(OTHER, logout("val=0x%02x (RU base address)\n", val)); + s->ru_base = e100_read_reg4(s, SCBPointer); + break; + default: + logout("val=0x%02x (undefined RU command)\n", val); + missing("Undefined SU command"); + } +} + +static void eepro100_write_command(EEPRO100State * s, uint8_t val) +{ + eepro100_ru_command(s, val & 0x0f); + eepro100_cu_command(s, val & 0xf0); + if ((val) == 0) { + TRACE(OTHER, logout("val=0x%02x\n", val)); + } + /* Clear command byte after command was accepted. */ + s->mem[SCBCmd] = 0; +} + +/***************************************************************************** + * + * EEPROM emulation. + * + ****************************************************************************/ + +#define EEPROM_CS 0x02 +#define EEPROM_SK 0x01 +#define EEPROM_DI 0x04 +#define EEPROM_DO 0x08 + +static uint16_t eepro100_read_eeprom(EEPRO100State * s) +{ + uint16_t val = e100_read_reg2(s, SCBeeprom); + if (eeprom93xx_read(s->eeprom)) { + val |= EEPROM_DO; + } else { + val &= ~EEPROM_DO; + } + TRACE(EEPROM, logout("val=0x%04x\n", val)); + return val; +} + +static void eepro100_write_eeprom(eeprom_t * eeprom, uint8_t val) +{ + TRACE(EEPROM, logout("val=0x%02x\n", val)); + + /* mask unwritable bits */ +#if 0 + val = SET_MASKED(val, 0x31, eeprom->value); +#endif + + int eecs = ((val & EEPROM_CS) != 0); + int eesk = ((val & EEPROM_SK) != 0); + int eedi = ((val & EEPROM_DI) != 0); + eeprom93xx_write(eeprom, eecs, eesk, eedi); +} + +/***************************************************************************** + * + * MDI emulation. + * + ****************************************************************************/ + +#if defined(DEBUG_EEPRO100) +static const char * const mdi_op_name[] = { + "opcode 0", + "write", + "read", + "opcode 3" +}; + +static const char * const mdi_reg_name[] = { + "Control", + "Status", + "PHY Identification (Word 1)", + "PHY Identification (Word 2)", + "Auto-Negotiation Advertisement", + "Auto-Negotiation Link Partner Ability", + "Auto-Negotiation Expansion" +}; + +static const char *reg2name(uint8_t reg) +{ + static char buffer[10]; + const char *p = buffer; + if (reg < ARRAY_SIZE(mdi_reg_name)) { + p = mdi_reg_name[reg]; + } else { + snprintf(buffer, sizeof(buffer), "reg=0x%02x", reg); + } + return p; +} +#endif /* DEBUG_EEPRO100 */ + +static uint32_t eepro100_read_mdi(EEPRO100State * s) +{ + uint32_t val = e100_read_reg4(s, SCBCtrlMDI); + +#ifdef DEBUG_EEPRO100 + uint8_t raiseint = (val & BIT(29)) >> 29; + uint8_t opcode = (val & BITS(27, 26)) >> 26; + uint8_t phy = (val & BITS(25, 21)) >> 21; + uint8_t reg = (val & BITS(20, 16)) >> 16; + uint16_t data = (val & BITS(15, 0)); +#endif + /* Emulation takes no time to finish MDI transaction. */ + val |= BIT(28); + TRACE(MDI, logout("val=0x%08x (int=%u, %s, phy=%u, %s, data=0x%04x\n", + val, raiseint, mdi_op_name[opcode], phy, + reg2name(reg), data)); + return val; +} + +static void eepro100_write_mdi(EEPRO100State *s) +{ + uint32_t val = e100_read_reg4(s, SCBCtrlMDI); + uint8_t raiseint = (val & BIT(29)) >> 29; + uint8_t opcode = (val & BITS(27, 26)) >> 26; + uint8_t phy = (val & BITS(25, 21)) >> 21; + uint8_t reg = (val & BITS(20, 16)) >> 16; + uint16_t data = (val & BITS(15, 0)); + TRACE(MDI, logout("val=0x%08x (int=%u, %s, phy=%u, %s, data=0x%04x\n", + val, raiseint, mdi_op_name[opcode], phy, reg2name(reg), data)); + if (phy != 1) { + /* Unsupported PHY address. */ +#if 0 + logout("phy must be 1 but is %u\n", phy); +#endif + data = 0; + } else if (opcode != 1 && opcode != 2) { + /* Unsupported opcode. */ + logout("opcode must be 1 or 2 but is %u\n", opcode); + data = 0; + } else if (reg > 6) { + /* Unsupported register. */ + logout("register must be 0...6 but is %u\n", reg); + data = 0; + } else { + TRACE(MDI, logout("val=0x%08x (int=%u, %s, phy=%u, %s, data=0x%04x\n", + val, raiseint, mdi_op_name[opcode], phy, + reg2name(reg), data)); + if (opcode == 1) { + /* MDI write */ + switch (reg) { + case 0: /* Control Register */ + if (data & 0x8000) { + /* Reset status and control registers to default. */ + s->mdimem[0] = eepro100_mdi_default[0]; + s->mdimem[1] = eepro100_mdi_default[1]; + data = s->mdimem[reg]; + } else { + /* Restart Auto Configuration = Normal Operation */ + data &= ~0x0200; + } + break; + case 1: /* Status Register */ + missing("not writable"); + break; + case 2: /* PHY Identification Register (Word 1) */ + case 3: /* PHY Identification Register (Word 2) */ + missing("not implemented"); + break; + case 4: /* Auto-Negotiation Advertisement Register */ + case 5: /* Auto-Negotiation Link Partner Ability Register */ + break; + case 6: /* Auto-Negotiation Expansion Register */ + default: + missing("not implemented"); + } + s->mdimem[reg] &= eepro100_mdi_mask[reg]; + s->mdimem[reg] |= data & ~eepro100_mdi_mask[reg]; + } else if (opcode == 2) { + /* MDI read */ + switch (reg) { + case 0: /* Control Register */ + if (data & 0x8000) { + /* Reset status and control registers to default. */ + s->mdimem[0] = eepro100_mdi_default[0]; + s->mdimem[1] = eepro100_mdi_default[1]; + } + break; + case 1: /* Status Register */ + s->mdimem[reg] |= 0x0020; + break; + case 2: /* PHY Identification Register (Word 1) */ + case 3: /* PHY Identification Register (Word 2) */ + case 4: /* Auto-Negotiation Advertisement Register */ + break; + case 5: /* Auto-Negotiation Link Partner Ability Register */ + s->mdimem[reg] = 0x41fe; + break; + case 6: /* Auto-Negotiation Expansion Register */ + s->mdimem[reg] = 0x0001; + break; + } + data = s->mdimem[reg]; + } + /* Emulation takes no time to finish MDI transaction. + * Set MDI bit in SCB status register. */ + s->mem[SCBAck] |= 0x08; + val |= BIT(28); + if (raiseint) { + eepro100_mdi_interrupt(s); + } + } + val = (val & 0xffff0000) + data; + e100_write_reg4(s, SCBCtrlMDI, val); +} + +/***************************************************************************** + * + * Port emulation. + * + ****************************************************************************/ + +#define PORT_SOFTWARE_RESET 0 +#define PORT_SELFTEST 1 +#define PORT_SELECTIVE_RESET 2 +#define PORT_DUMP 3 +#define PORT_SELECTION_MASK 3 + +typedef struct { + uint32_t st_sign; /* Self Test Signature */ + uint32_t st_result; /* Self Test Results */ +} eepro100_selftest_t; + +static uint32_t eepro100_read_port(EEPRO100State * s) +{ + return 0; +} + +static void eepro100_write_port(EEPRO100State *s) +{ + uint32_t val = e100_read_reg4(s, SCBPort); + uint32_t address = (val & ~PORT_SELECTION_MASK); + uint8_t selection = (val & PORT_SELECTION_MASK); + switch (selection) { + case PORT_SOFTWARE_RESET: + nic_reset(s); + break; + case PORT_SELFTEST: + TRACE(OTHER, logout("selftest address=0x%08x\n", address)); + eepro100_selftest_t data; + pci_dma_read(&s->dev, address, (uint8_t *) &data, sizeof(data)); + data.st_sign = 0xffffffff; + data.st_result = 0; + pci_dma_write(&s->dev, address, (uint8_t *) &data, sizeof(data)); + break; + case PORT_SELECTIVE_RESET: + TRACE(OTHER, logout("selective reset, selftest address=0x%08x\n", address)); + nic_selective_reset(s); + break; + default: + logout("val=0x%08x\n", val); + missing("unknown port selection"); + } +} + +/***************************************************************************** + * + * General hardware emulation. + * + ****************************************************************************/ + +static uint8_t eepro100_read1(EEPRO100State * s, uint32_t addr) +{ + uint8_t val = 0; + if (addr <= sizeof(s->mem) - sizeof(val)) { + val = s->mem[addr]; + } + + switch (addr) { + case SCBStatus: + case SCBAck: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBCmd: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); +#if 0 + val = eepro100_read_command(s); +#endif + break; + case SCBIntmask: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBPort + 3: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBeeprom: + val = eepro100_read_eeprom(s); + break; + case SCBCtrlMDI: + case SCBCtrlMDI + 1: + case SCBCtrlMDI + 2: + case SCBCtrlMDI + 3: + val = (uint8_t)(eepro100_read_mdi(s) >> (8 * (addr & 3))); + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBpmdr: /* Power Management Driver Register */ + val = 0; + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBgctrl: /* General Control Register */ + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBgstat: /* General Status Register */ + /* 100 Mbps full duplex, valid link */ + val = 0x07; + TRACE(OTHER, logout("addr=General Status val=%02x\n", val)); + break; + default: + logout("addr=%s val=0x%02x\n", regname(addr), val); + missing("unknown byte read"); + } + return val; +} + +static uint16_t eepro100_read2(EEPRO100State * s, uint32_t addr) +{ + uint16_t val = 0; + if (addr <= sizeof(s->mem) - sizeof(val)) { + val = e100_read_reg2(s, addr); + } + + switch (addr) { + case SCBStatus: + case SCBCmd: + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + break; + case SCBeeprom: + val = eepro100_read_eeprom(s); + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + break; + case SCBCtrlMDI: + case SCBCtrlMDI + 2: + val = (uint16_t)(eepro100_read_mdi(s) >> (8 * (addr & 3))); + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + break; + default: + logout("addr=%s val=0x%04x\n", regname(addr), val); + missing("unknown word read"); + } + return val; +} + +static uint32_t eepro100_read4(EEPRO100State * s, uint32_t addr) +{ + uint32_t val = 0; + if (addr <= sizeof(s->mem) - sizeof(val)) { + val = e100_read_reg4(s, addr); + } + + switch (addr) { + case SCBStatus: + TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); + break; + case SCBPointer: + TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); + break; + case SCBPort: + val = eepro100_read_port(s); + TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); + break; + case SCBflash: + val = eepro100_read_eeprom(s); + TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); + break; + case SCBCtrlMDI: + val = eepro100_read_mdi(s); + break; + default: + logout("addr=%s val=0x%08x\n", regname(addr), val); + missing("unknown longword read"); + } + return val; +} + +static void eepro100_write1(EEPRO100State * s, uint32_t addr, uint8_t val) +{ + /* SCBStatus is readonly. */ + if (addr > SCBStatus && addr <= sizeof(s->mem) - sizeof(val)) { + s->mem[addr] = val; + } + + switch (addr) { + case SCBStatus: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBAck: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + eepro100_acknowledge(s); + break; + case SCBCmd: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + eepro100_write_command(s, val); + break; + case SCBIntmask: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + if (val & BIT(1)) { + eepro100_swi_interrupt(s); + } + eepro100_interrupt(s, 0); + break; + case SCBPointer: + case SCBPointer + 1: + case SCBPointer + 2: + case SCBPointer + 3: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBPort: + case SCBPort + 1: + case SCBPort + 2: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBPort + 3: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + eepro100_write_port(s); + break; + case SCBFlow: /* does not exist on 82557 */ + case SCBFlow + 1: + case SCBFlow + 2: + case SCBpmdr: /* does not exist on 82557 */ + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBeeprom: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + eepro100_write_eeprom(s->eeprom, val); + break; + case SCBCtrlMDI: + case SCBCtrlMDI + 1: + case SCBCtrlMDI + 2: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + break; + case SCBCtrlMDI + 3: + TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); + eepro100_write_mdi(s); + break; + default: + logout("addr=%s val=0x%02x\n", regname(addr), val); + missing("unknown byte write"); + } +} + +static void eepro100_write2(EEPRO100State * s, uint32_t addr, uint16_t val) +{ + /* SCBStatus is readonly. */ + if (addr > SCBStatus && addr <= sizeof(s->mem) - sizeof(val)) { + e100_write_reg2(s, addr, val); + } + + switch (addr) { + case SCBStatus: + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + s->mem[SCBAck] = (val >> 8); + eepro100_acknowledge(s); + break; + case SCBCmd: + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + eepro100_write_command(s, val); + eepro100_write1(s, SCBIntmask, val >> 8); + break; + case SCBPointer: + case SCBPointer + 2: + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + break; + case SCBPort: + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + break; + case SCBPort + 2: + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + eepro100_write_port(s); + break; + case SCBeeprom: + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + eepro100_write_eeprom(s->eeprom, val); + break; + case SCBCtrlMDI: + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + break; + case SCBCtrlMDI + 2: + TRACE(OTHER, logout("addr=%s val=0x%04x\n", regname(addr), val)); + eepro100_write_mdi(s); + break; + default: + logout("addr=%s val=0x%04x\n", regname(addr), val); + missing("unknown word write"); + } +} + +static void eepro100_write4(EEPRO100State * s, uint32_t addr, uint32_t val) +{ + if (addr <= sizeof(s->mem) - sizeof(val)) { + e100_write_reg4(s, addr, val); + } + + switch (addr) { + case SCBPointer: + TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); + break; + case SCBPort: + TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); + eepro100_write_port(s); + break; + case SCBflash: + TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); + val = val >> 16; + eepro100_write_eeprom(s->eeprom, val); + break; + case SCBCtrlMDI: + TRACE(OTHER, logout("addr=%s val=0x%08x\n", regname(addr), val)); + eepro100_write_mdi(s); + break; + default: + logout("addr=%s val=0x%08x\n", regname(addr), val); + missing("unknown longword write"); + } +} + +static uint64_t eepro100_read(void *opaque, hwaddr addr, + unsigned size) +{ + EEPRO100State *s = opaque; + + switch (size) { + case 1: return eepro100_read1(s, addr); + case 2: return eepro100_read2(s, addr); + case 4: return eepro100_read4(s, addr); + default: abort(); + } +} + +static void eepro100_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + EEPRO100State *s = opaque; + + switch (size) { + case 1: + eepro100_write1(s, addr, data); + break; + case 2: + eepro100_write2(s, addr, data); + break; + case 4: + eepro100_write4(s, addr, data); + break; + default: + abort(); + } +} + +static const MemoryRegionOps eepro100_ops = { + .read = eepro100_read, + .write = eepro100_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size) +{ + /* TODO: + * - Magic packets should set bit 30 in power management driver register. + * - Interesting packets should set bit 29 in power management driver register. + */ + EEPRO100State *s = qemu_get_nic_opaque(nc); + uint16_t rfd_status = 0xa000; +#if defined(CONFIG_PAD_RECEIVED_FRAMES) + uint8_t min_buf[60]; +#endif + static const uint8_t broadcast_macaddr[6] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + +#if defined(CONFIG_PAD_RECEIVED_FRAMES) + /* Pad to minimum Ethernet frame length */ + if (size < sizeof(min_buf)) { + memcpy(min_buf, buf, size); + memset(&min_buf[size], 0, sizeof(min_buf) - size); + buf = min_buf; + size = sizeof(min_buf); + } +#endif + + if (s->configuration[8] & 0x80) { + /* CSMA is disabled. */ + logout("%p received while CSMA is disabled\n", s); + return -1; +#if !defined(CONFIG_PAD_RECEIVED_FRAMES) + } else if (size < 64 && (s->configuration[7] & BIT(0))) { + /* Short frame and configuration byte 7/0 (discard short receive) set: + * Short frame is discarded */ + logout("%p received short frame (%zu byte)\n", s, size); + s->statistics.rx_short_frame_errors++; + return -1; +#endif + } else if ((size > MAX_ETH_FRAME_SIZE + 4) && !(s->configuration[18] & BIT(3))) { + /* Long frame and configuration byte 18/3 (long receive ok) not set: + * Long frames are discarded. */ + logout("%p received long frame (%zu byte), ignored\n", s, size); + return -1; + } else if (memcmp(buf, s->conf.macaddr.a, 6) == 0) { /* !!! */ + /* Frame matches individual address. */ + /* TODO: check configuration byte 15/4 (ignore U/L). */ + TRACE(RXTX, logout("%p received frame for me, len=%zu\n", s, size)); + } else if (memcmp(buf, broadcast_macaddr, 6) == 0) { + /* Broadcast frame. */ + TRACE(RXTX, logout("%p received broadcast, len=%zu\n", s, size)); + rfd_status |= 0x0002; + } else if (buf[0] & 0x01) { + /* Multicast frame. */ + TRACE(RXTX, logout("%p received multicast, len=%zu,%s\n", s, size, nic_dump(buf, size))); + if (s->configuration[21] & BIT(3)) { + /* Multicast all bit is set, receive all multicast frames. */ + } else { + unsigned mcast_idx = (net_crc32(buf, ETH_ALEN) & BITS(7, 2)) >> 2; + assert(mcast_idx < 64); + if (s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))) { + /* Multicast frame is allowed in hash table. */ + } else if (s->configuration[15] & BIT(0)) { + /* Promiscuous: receive all. */ + rfd_status |= 0x0004; + } else { + TRACE(RXTX, logout("%p multicast ignored\n", s)); + return -1; + } + } + /* TODO: Next not for promiscuous mode? */ + rfd_status |= 0x0002; + } else if (s->configuration[15] & BIT(0)) { + /* Promiscuous: receive all. */ + TRACE(RXTX, logout("%p received frame in promiscuous mode, len=%zu\n", s, size)); + rfd_status |= 0x0004; + } else if (s->configuration[20] & BIT(6)) { + /* Multiple IA bit set. */ + unsigned mcast_idx = net_crc32(buf, ETH_ALEN) >> 26; + assert(mcast_idx < 64); + if (s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))) { + TRACE(RXTX, logout("%p accepted, multiple IA bit set\n", s)); + } else { + TRACE(RXTX, logout("%p frame ignored, multiple IA bit set\n", s)); + return -1; + } + } else { + TRACE(RXTX, logout("%p received frame, ignored, len=%zu,%s\n", s, size, + nic_dump(buf, size))); + return size; + } + + if (get_ru_state(s) != ru_ready) { + /* No resources available. */ + logout("no resources, state=%u\n", get_ru_state(s)); + /* TODO: RNR interrupt only at first failed frame? */ + eepro100_rnr_interrupt(s); + s->statistics.rx_resource_errors++; +#if 0 + assert(!"no resources"); +#endif + return -1; + } + /* !!! */ + eepro100_rx_t rx; + pci_dma_read(&s->dev, s->ru_base + s->ru_offset, + &rx, sizeof(eepro100_rx_t)); + uint16_t rfd_command = le16_to_cpu(rx.command); + uint16_t rfd_size = le16_to_cpu(rx.size); + + if (size > rfd_size) { + logout("Receive buffer (%" PRId16 " bytes) too small for data " + "(%zu bytes); data truncated\n", rfd_size, size); + size = rfd_size; + } +#if !defined(CONFIG_PAD_RECEIVED_FRAMES) + if (size < 64) { + rfd_status |= 0x0080; + } +#endif + TRACE(OTHER, logout("command 0x%04x, link 0x%08x, addr 0x%08x, size %u\n", + rfd_command, rx.link, rx.rx_buf_addr, rfd_size)); + stw_le_pci_dma(&s->dev, s->ru_base + s->ru_offset + + offsetof(eepro100_rx_t, status), rfd_status); + stw_le_pci_dma(&s->dev, s->ru_base + s->ru_offset + + offsetof(eepro100_rx_t, count), size); + /* Early receive interrupt not supported. */ +#if 0 + eepro100_er_interrupt(s); +#endif + /* Receive CRC Transfer not supported. */ + if (s->configuration[18] & BIT(2)) { + missing("Receive CRC Transfer"); + return -1; + } + /* TODO: check stripping enable bit. */ +#if 0 + assert(!(s->configuration[17] & BIT(0))); +#endif + pci_dma_write(&s->dev, s->ru_base + s->ru_offset + + sizeof(eepro100_rx_t), buf, size); + s->statistics.rx_good_frames++; + eepro100_fr_interrupt(s); + s->ru_offset = le32_to_cpu(rx.link); + if (rfd_command & COMMAND_EL) { + /* EL bit is set, so this was the last frame. */ + logout("receive: Running out of frames\n"); + set_ru_state(s, ru_no_resources); + eepro100_rnr_interrupt(s); + } + if (rfd_command & COMMAND_S) { + /* S bit is set. */ + set_ru_state(s, ru_suspended); + } + return size; +} + +static const VMStateDescription vmstate_eepro100 = { + .version_id = 3, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, EEPRO100State), + VMSTATE_UNUSED(32), + VMSTATE_BUFFER(mult, EEPRO100State), + VMSTATE_BUFFER(mem, EEPRO100State), + /* Save all members of struct between scb_stat and mem. */ + VMSTATE_UINT8(scb_stat, EEPRO100State), + VMSTATE_UINT8(int_stat, EEPRO100State), + VMSTATE_UNUSED(3*4), + VMSTATE_MACADDR(conf.macaddr, EEPRO100State), + VMSTATE_UNUSED(19*4), + VMSTATE_UINT16_ARRAY(mdimem, EEPRO100State, 32), + /* The eeprom should be saved and restored by its own routines. */ + VMSTATE_UINT32(device, EEPRO100State), + /* TODO check device. */ + VMSTATE_UINT32(cu_base, EEPRO100State), + VMSTATE_UINT32(cu_offset, EEPRO100State), + VMSTATE_UINT32(ru_base, EEPRO100State), + VMSTATE_UINT32(ru_offset, EEPRO100State), + VMSTATE_UINT32(statsaddr, EEPRO100State), + /* Save eepro100_stats_t statistics. */ + VMSTATE_UINT32(statistics.tx_good_frames, EEPRO100State), + VMSTATE_UINT32(statistics.tx_max_collisions, EEPRO100State), + VMSTATE_UINT32(statistics.tx_late_collisions, EEPRO100State), + VMSTATE_UINT32(statistics.tx_underruns, EEPRO100State), + VMSTATE_UINT32(statistics.tx_lost_crs, EEPRO100State), + VMSTATE_UINT32(statistics.tx_deferred, EEPRO100State), + VMSTATE_UINT32(statistics.tx_single_collisions, EEPRO100State), + VMSTATE_UINT32(statistics.tx_multiple_collisions, EEPRO100State), + VMSTATE_UINT32(statistics.tx_total_collisions, EEPRO100State), + VMSTATE_UINT32(statistics.rx_good_frames, EEPRO100State), + VMSTATE_UINT32(statistics.rx_crc_errors, EEPRO100State), + VMSTATE_UINT32(statistics.rx_alignment_errors, EEPRO100State), + VMSTATE_UINT32(statistics.rx_resource_errors, EEPRO100State), + VMSTATE_UINT32(statistics.rx_overrun_errors, EEPRO100State), + VMSTATE_UINT32(statistics.rx_cdt_errors, EEPRO100State), + VMSTATE_UINT32(statistics.rx_short_frame_errors, EEPRO100State), + VMSTATE_UINT32(statistics.fc_xmt_pause, EEPRO100State), + VMSTATE_UINT32(statistics.fc_rcv_pause, EEPRO100State), + VMSTATE_UINT32(statistics.fc_rcv_unsupported, EEPRO100State), + VMSTATE_UINT16(statistics.xmt_tco_frames, EEPRO100State), + VMSTATE_UINT16(statistics.rcv_tco_frames, EEPRO100State), + /* Configuration bytes. */ + VMSTATE_BUFFER(configuration, EEPRO100State), + VMSTATE_END_OF_LIST() + } +}; + +static void pci_nic_uninit(PCIDevice *pci_dev) +{ + EEPRO100State *s = DO_UPCAST(EEPRO100State, dev, pci_dev); + + vmstate_unregister(VMSTATE_IF(&pci_dev->qdev), s->vmstate, s); + g_free(s->vmstate); + eeprom93xx_free(&pci_dev->qdev, s->eeprom); + qemu_del_nic(s->nic); +} + +static NetClientInfo net_eepro100_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = nic_receive, +}; + +static void e100_nic_realize(PCIDevice *pci_dev, Error **errp) +{ + EEPRO100State *s = DO_UPCAST(EEPRO100State, dev, pci_dev); + E100PCIDeviceInfo *info = eepro100_get_class(s); + Error *local_err = NULL; + + TRACE(OTHER, logout("\n")); + + s->device = info->device; + + e100_pci_reset(s, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Add 64 * 2 EEPROM. i82557 and i82558 support a 64 word EEPROM, + * i82559 and later support 64 or 256 word EEPROM. */ + s->eeprom = eeprom93xx_new(&pci_dev->qdev, EEPROM_SIZE); + + /* Handler for memory-mapped I/O */ + memory_region_init_io(&s->mmio_bar, OBJECT(s), &eepro100_ops, s, + "eepro100-mmio", PCI_MEM_SIZE); + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->mmio_bar); + memory_region_init_io(&s->io_bar, OBJECT(s), &eepro100_ops, s, + "eepro100-io", PCI_IO_SIZE); + pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->io_bar); + /* FIXME: flash aliases to mmio?! */ + memory_region_init_io(&s->flash_bar, OBJECT(s), &eepro100_ops, s, + "eepro100-flash", PCI_FLASH_SIZE); + pci_register_bar(&s->dev, 2, 0, &s->flash_bar); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + logout("macaddr: %s\n", nic_dump(&s->conf.macaddr.a[0], 6)); + + nic_reset(s); + + s->nic = qemu_new_nic(&net_eepro100_info, &s->conf, + object_get_typename(OBJECT(pci_dev)), pci_dev->qdev.id, s); + + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + TRACE(OTHER, logout("%s\n", qemu_get_queue(s->nic)->info_str)); + + qemu_register_reset(nic_reset, s); + + s->vmstate = g_memdup(&vmstate_eepro100, sizeof(vmstate_eepro100)); + s->vmstate->name = qemu_get_queue(s->nic)->model; + vmstate_register(VMSTATE_IF(&pci_dev->qdev), VMSTATE_INSTANCE_ID_ANY, + s->vmstate, s); +} + +static void eepro100_instance_init(Object *obj) +{ + EEPRO100State *s = DO_UPCAST(EEPRO100State, dev, PCI_DEVICE(obj)); + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(s)); +} + +static E100PCIDeviceInfo e100_devices[] = { + { + .name = "i82550", + .desc = "Intel i82550 Ethernet", + .device = i82550, + /* TODO: check device id. */ + .device_id = PCI_DEVICE_ID_INTEL_82551IT, + /* Revision ID: 0x0c, 0x0d, 0x0e. */ + .revision = 0x0e, + /* TODO: check size of statistical counters. */ + .stats_size = 80, + /* TODO: check extended tcb support. */ + .has_extended_tcb_support = true, + .power_management = true, + },{ + .name = "i82551", + .desc = "Intel i82551 Ethernet", + .device = i82551, + .device_id = PCI_DEVICE_ID_INTEL_82551IT, + /* Revision ID: 0x0f, 0x10. */ + .revision = 0x0f, + /* TODO: check size of statistical counters. */ + .stats_size = 80, + .has_extended_tcb_support = true, + .power_management = true, + },{ + .name = "i82557a", + .desc = "Intel i82557A Ethernet", + .device = i82557A, + .device_id = PCI_DEVICE_ID_INTEL_82557, + .revision = 0x01, + .power_management = false, + },{ + .name = "i82557b", + .desc = "Intel i82557B Ethernet", + .device = i82557B, + .device_id = PCI_DEVICE_ID_INTEL_82557, + .revision = 0x02, + .power_management = false, + },{ + .name = "i82557c", + .desc = "Intel i82557C Ethernet", + .device = i82557C, + .device_id = PCI_DEVICE_ID_INTEL_82557, + .revision = 0x03, + .power_management = false, + },{ + .name = "i82558a", + .desc = "Intel i82558A Ethernet", + .device = i82558A, + .device_id = PCI_DEVICE_ID_INTEL_82557, + .revision = 0x04, + .stats_size = 76, + .has_extended_tcb_support = true, + .power_management = true, + },{ + .name = "i82558b", + .desc = "Intel i82558B Ethernet", + .device = i82558B, + .device_id = PCI_DEVICE_ID_INTEL_82557, + .revision = 0x05, + .stats_size = 76, + .has_extended_tcb_support = true, + .power_management = true, + },{ + .name = "i82559a", + .desc = "Intel i82559A Ethernet", + .device = i82559A, + .device_id = PCI_DEVICE_ID_INTEL_82557, + .revision = 0x06, + .stats_size = 80, + .has_extended_tcb_support = true, + .power_management = true, + },{ + .name = "i82559b", + .desc = "Intel i82559B Ethernet", + .device = i82559B, + .device_id = PCI_DEVICE_ID_INTEL_82557, + .revision = 0x07, + .stats_size = 80, + .has_extended_tcb_support = true, + .power_management = true, + },{ + .name = "i82559c", + .desc = "Intel i82559C Ethernet", + .device = i82559C, + .device_id = PCI_DEVICE_ID_INTEL_82557, +#if 0 + .revision = 0x08, +#endif + /* TODO: Windows wants revision id 0x0c. */ + .revision = 0x0c, +#if EEPROM_SIZE > 0 + .subsystem_vendor_id = PCI_VENDOR_ID_INTEL, + .subsystem_id = 0x0040, +#endif + .stats_size = 80, + .has_extended_tcb_support = true, + .power_management = true, + },{ + .name = "i82559er", + .desc = "Intel i82559ER Ethernet", + .device = i82559ER, + .device_id = PCI_DEVICE_ID_INTEL_82551IT, + .revision = 0x09, + .stats_size = 80, + .has_extended_tcb_support = true, + .power_management = true, + },{ + .name = "i82562", + .desc = "Intel i82562 Ethernet", + .device = i82562, + /* TODO: check device id. */ + .device_id = PCI_DEVICE_ID_INTEL_82551IT, + /* TODO: wrong revision id. */ + .revision = 0x0e, + .stats_size = 80, + .has_extended_tcb_support = true, + .power_management = true, + },{ + /* Toshiba Tecra 8200. */ + .name = "i82801", + .desc = "Intel i82801 Ethernet", + .device = i82801, + .device_id = 0x2449, + .revision = 0x03, + .stats_size = 80, + .has_extended_tcb_support = true, + .power_management = true, + } +}; + +static E100PCIDeviceInfo *eepro100_get_class_by_name(const char *typename) +{ + E100PCIDeviceInfo *info = NULL; + int i; + + /* This is admittedly awkward but also temporary. QOM allows for + * parameterized typing and for subclassing both of which would suitable + * handle what's going on here. But class_data is already being used as + * a stop-gap hack to allow incremental qdev conversion so we cannot use it + * right now. Once we merge the final QOM series, we can come back here and + * do this in a much more elegant fashion. + */ + for (i = 0; i < ARRAY_SIZE(e100_devices); i++) { + if (strcmp(e100_devices[i].name, typename) == 0) { + info = &e100_devices[i]; + break; + } + } + assert(info != NULL); + + return info; +} + +static E100PCIDeviceInfo *eepro100_get_class(EEPRO100State *s) +{ + return eepro100_get_class_by_name(object_get_typename(OBJECT(s))); +} + +static Property e100_properties[] = { + DEFINE_NIC_PROPERTIES(EEPRO100State, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void eepro100_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + E100PCIDeviceInfo *info; + + info = eepro100_get_class_by_name(object_class_get_name(klass)); + + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + device_class_set_props(dc, e100_properties); + dc->desc = info->desc; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->class_id = PCI_CLASS_NETWORK_ETHERNET; + k->romfile = "pxe-eepro100.rom"; + k->realize = e100_nic_realize; + k->exit = pci_nic_uninit; + k->device_id = info->device_id; + k->revision = info->revision; + k->subsystem_vendor_id = info->subsystem_vendor_id; + k->subsystem_id = info->subsystem_id; +} + +static void eepro100_register_types(void) +{ + size_t i; + for (i = 0; i < ARRAY_SIZE(e100_devices); i++) { + TypeInfo type_info = {}; + E100PCIDeviceInfo *info = &e100_devices[i]; + + type_info.name = info->name; + type_info.parent = TYPE_PCI_DEVICE; + type_info.class_init = eepro100_class_init; + type_info.instance_size = sizeof(EEPRO100State); + type_info.instance_init = eepro100_instance_init; + type_info.interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }; + + type_register(&type_info); + } +} + +type_init(eepro100_register_types) diff --git a/hw/net/etraxfs_eth.c b/hw/net/etraxfs_eth.c new file mode 100644 index 000000000..1b82aec79 --- /dev/null +++ b/hw/net/etraxfs_eth.c @@ -0,0 +1,688 @@ +/* + * QEMU ETRAX Ethernet Controller. + * + * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "net/net.h" +#include "hw/cris/etraxfs.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" + +#define D(x) + +/* Advertisement control register. */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ + +/* + * The MDIO extensions in the TDK PHY model were reversed engineered from the + * linux driver (PHYID and Diagnostics reg). + * TODO: Add friendly names for the register nums. + */ +struct qemu_phy +{ + uint32_t regs[32]; + + int link; + + unsigned int (*read)(struct qemu_phy *phy, unsigned int req); + void (*write)(struct qemu_phy *phy, unsigned int req, unsigned int data); +}; + +static unsigned int tdk_read(struct qemu_phy *phy, unsigned int req) +{ + int regnum; + unsigned r = 0; + + regnum = req & 0x1f; + + switch (regnum) { + case 1: + if (!phy->link) { + break; + } + /* MR1. */ + /* Speeds and modes. */ + r |= (1 << 13) | (1 << 14); + r |= (1 << 11) | (1 << 12); + r |= (1 << 5); /* Autoneg complete. */ + r |= (1 << 3); /* Autoneg able. */ + r |= (1 << 2); /* link. */ + break; + case 5: + /* Link partner ability. + We are kind; always agree with whatever best mode + the guest advertises. */ + r = 1 << 14; /* Success. */ + /* Copy advertised modes. */ + r |= phy->regs[4] & (15 << 5); + /* Autoneg support. */ + r |= 1; + break; + case 18: + { + /* Diagnostics reg. */ + int duplex = 0; + int speed_100 = 0; + + if (!phy->link) { + break; + } + + /* Are we advertising 100 half or 100 duplex ? */ + speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF); + speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL); + + /* Are we advertising 10 duplex or 100 duplex ? */ + duplex = !!(phy->regs[4] & ADVERTISE_100FULL); + duplex |= !!(phy->regs[4] & ADVERTISE_10FULL); + r = (speed_100 << 10) | (duplex << 11); + } + break; + + default: + r = phy->regs[regnum]; + break; + } + trace_mdio_phy_read(regnum, r); + return r; +} + +static void +tdk_write(struct qemu_phy *phy, unsigned int req, unsigned int data) +{ + int regnum; + + regnum = req & 0x1f; + trace_mdio_phy_write(regnum, data); + switch (regnum) { + default: + phy->regs[regnum] = data; + break; + } +} + +static void +tdk_reset(struct qemu_phy *phy) +{ + phy->regs[0] = 0x3100; + /* PHY Id. */ + phy->regs[2] = 0x0300; + phy->regs[3] = 0xe400; + /* Autonegotiation advertisement reg. */ + phy->regs[4] = 0x01E1; + phy->link = 1; +} + +struct qemu_mdio +{ + /* bus. */ + int mdc; + int mdio; + + /* decoder. */ + enum { + PREAMBLE, + SOF, + OPC, + ADDR, + REQ, + TURNAROUND, + DATA + } state; + unsigned int drive; + + unsigned int cnt; + unsigned int addr; + unsigned int opc; + unsigned int req; + unsigned int data; + + struct qemu_phy *devs[32]; +}; + +static void +mdio_attach(struct qemu_mdio *bus, struct qemu_phy *phy, unsigned int addr) +{ + bus->devs[addr & 0x1f] = phy; +} + +#ifdef USE_THIS_DEAD_CODE +static void +mdio_detach(struct qemu_mdio *bus, struct qemu_phy *phy, unsigned int addr) +{ + bus->devs[addr & 0x1f] = NULL; +} +#endif + +static void mdio_read_req(struct qemu_mdio *bus) +{ + struct qemu_phy *phy; + + phy = bus->devs[bus->addr]; + if (phy && phy->read) { + bus->data = phy->read(phy, bus->req); + } else { + bus->data = 0xffff; + } +} + +static void mdio_write_req(struct qemu_mdio *bus) +{ + struct qemu_phy *phy; + + phy = bus->devs[bus->addr]; + if (phy && phy->write) { + phy->write(phy, bus->req, bus->data); + } +} + +static void mdio_cycle(struct qemu_mdio *bus) +{ + bus->cnt++; + + trace_mdio_bitbang(bus->mdc, bus->mdio, bus->state, bus->cnt, bus->drive); +#if 0 + if (bus->mdc) { + printf("%d", bus->mdio); + } +#endif + switch (bus->state) { + case PREAMBLE: + if (bus->mdc) { + if (bus->cnt >= (32 * 2) && !bus->mdio) { + bus->cnt = 0; + bus->state = SOF; + bus->data = 0; + } + } + break; + case SOF: + if (bus->mdc) { + if (bus->mdio != 1) { + printf("WARNING: no SOF\n"); + } + if (bus->cnt == 1*2) { + bus->cnt = 0; + bus->opc = 0; + bus->state = OPC; + } + } + break; + case OPC: + if (bus->mdc) { + bus->opc <<= 1; + bus->opc |= bus->mdio & 1; + if (bus->cnt == 2*2) { + bus->cnt = 0; + bus->addr = 0; + bus->state = ADDR; + } + } + break; + case ADDR: + if (bus->mdc) { + bus->addr <<= 1; + bus->addr |= bus->mdio & 1; + + if (bus->cnt == 5*2) { + bus->cnt = 0; + bus->req = 0; + bus->state = REQ; + } + } + break; + case REQ: + if (bus->mdc) { + bus->req <<= 1; + bus->req |= bus->mdio & 1; + if (bus->cnt == 5*2) { + bus->cnt = 0; + bus->state = TURNAROUND; + } + } + break; + case TURNAROUND: + if (bus->mdc && bus->cnt == 2*2) { + bus->mdio = 0; + bus->cnt = 0; + + if (bus->opc == 2) { + bus->drive = 1; + mdio_read_req(bus); + bus->mdio = bus->data & 1; + } + bus->state = DATA; + } + break; + case DATA: + if (!bus->mdc) { + if (bus->drive) { + bus->mdio = !!(bus->data & (1 << 15)); + bus->data <<= 1; + } + } else { + if (!bus->drive) { + bus->data <<= 1; + bus->data |= bus->mdio; + } + if (bus->cnt == 16 * 2) { + bus->cnt = 0; + bus->state = PREAMBLE; + if (!bus->drive) { + mdio_write_req(bus); + } + bus->drive = 0; + } + } + break; + default: + break; + } +} + +/* ETRAX-FS Ethernet MAC block starts here. */ + +#define RW_MA0_LO 0x00 +#define RW_MA0_HI 0x01 +#define RW_MA1_LO 0x02 +#define RW_MA1_HI 0x03 +#define RW_GA_LO 0x04 +#define RW_GA_HI 0x05 +#define RW_GEN_CTRL 0x06 +#define RW_REC_CTRL 0x07 +#define RW_TR_CTRL 0x08 +#define RW_CLR_ERR 0x09 +#define RW_MGM_CTRL 0x0a +#define R_STAT 0x0b +#define FS_ETH_MAX_REGS 0x17 + +#define TYPE_ETRAX_FS_ETH "etraxfs-eth" +OBJECT_DECLARE_SIMPLE_TYPE(ETRAXFSEthState, ETRAX_FS_ETH) + +struct ETRAXFSEthState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + NICState *nic; + NICConf conf; + + /* Two addrs in the filter. */ + uint8_t macaddr[2][6]; + uint32_t regs[FS_ETH_MAX_REGS]; + + struct etraxfs_dma_client *dma_out; + struct etraxfs_dma_client *dma_in; + + /* MDIO bus. */ + struct qemu_mdio mdio_bus; + unsigned int phyaddr; + int duplex_mismatch; + + /* PHY. */ + struct qemu_phy phy; +}; + +static void eth_validate_duplex(ETRAXFSEthState *eth) +{ + struct qemu_phy *phy; + unsigned int phy_duplex; + unsigned int mac_duplex; + int new_mm = 0; + + phy = eth->mdio_bus.devs[eth->phyaddr]; + phy_duplex = !!(phy->read(phy, 18) & (1 << 11)); + mac_duplex = !!(eth->regs[RW_REC_CTRL] & 128); + + if (mac_duplex != phy_duplex) { + new_mm = 1; + } + + if (eth->regs[RW_GEN_CTRL] & 1) { + if (new_mm != eth->duplex_mismatch) { + if (new_mm) { + printf("HW: WARNING ETH duplex mismatch MAC=%d PHY=%d\n", + mac_duplex, phy_duplex); + } else { + printf("HW: ETH duplex ok.\n"); + } + } + eth->duplex_mismatch = new_mm; + } +} + +static uint64_t +eth_read(void *opaque, hwaddr addr, unsigned int size) +{ + ETRAXFSEthState *eth = opaque; + uint32_t r = 0; + + addr >>= 2; + + switch (addr) { + case R_STAT: + r = eth->mdio_bus.mdio & 1; + break; + default: + r = eth->regs[addr]; + D(printf("%s %x\n", __func__, addr * 4)); + break; + } + return r; +} + +static void eth_update_ma(ETRAXFSEthState *eth, int ma) +{ + int reg; + int i = 0; + + ma &= 1; + + reg = RW_MA0_LO; + if (ma) { + reg = RW_MA1_LO; + } + + eth->macaddr[ma][i++] = eth->regs[reg]; + eth->macaddr[ma][i++] = eth->regs[reg] >> 8; + eth->macaddr[ma][i++] = eth->regs[reg] >> 16; + eth->macaddr[ma][i++] = eth->regs[reg] >> 24; + eth->macaddr[ma][i++] = eth->regs[reg + 1]; + eth->macaddr[ma][i] = eth->regs[reg + 1] >> 8; + + D(printf("set mac%d=%x.%x.%x.%x.%x.%x\n", ma, + eth->macaddr[ma][0], eth->macaddr[ma][1], + eth->macaddr[ma][2], eth->macaddr[ma][3], + eth->macaddr[ma][4], eth->macaddr[ma][5])); +} + +static void +eth_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + ETRAXFSEthState *eth = opaque; + uint32_t value = val64; + + addr >>= 2; + switch (addr) { + case RW_MA0_LO: + case RW_MA0_HI: + eth->regs[addr] = value; + eth_update_ma(eth, 0); + break; + case RW_MA1_LO: + case RW_MA1_HI: + eth->regs[addr] = value; + eth_update_ma(eth, 1); + break; + + case RW_MGM_CTRL: + /* Attach an MDIO/PHY abstraction. */ + if (value & 2) { + eth->mdio_bus.mdio = value & 1; + } + if (eth->mdio_bus.mdc != (value & 4)) { + mdio_cycle(ð->mdio_bus); + eth_validate_duplex(eth); + } + eth->mdio_bus.mdc = !!(value & 4); + eth->regs[addr] = value; + break; + + case RW_REC_CTRL: + eth->regs[addr] = value; + eth_validate_duplex(eth); + break; + + default: + eth->regs[addr] = value; + D(printf("%s %x %x\n", __func__, addr, value)); + break; + } +} + +/* The ETRAX FS has a groupt address table (GAT) which works like a k=1 bloom + filter dropping group addresses we have not joined. The filter has 64 + bits (m). The has function is a simple nible xor of the group addr. */ +static int eth_match_groupaddr(ETRAXFSEthState *eth, const unsigned char *sa) +{ + unsigned int hsh; + int m_individual = eth->regs[RW_REC_CTRL] & 4; + int match; + + /* First bit on the wire of a MAC address signals multicast or + physical address. */ + if (!m_individual && !(sa[0] & 1)) { + return 0; + } + + /* Calculate the hash index for the GA registers. */ + hsh = 0; + hsh ^= (*sa) & 0x3f; + hsh ^= ((*sa) >> 6) & 0x03; + ++sa; + hsh ^= ((*sa) << 2) & 0x03c; + hsh ^= ((*sa) >> 4) & 0xf; + ++sa; + hsh ^= ((*sa) << 4) & 0x30; + hsh ^= ((*sa) >> 2) & 0x3f; + ++sa; + hsh ^= (*sa) & 0x3f; + hsh ^= ((*sa) >> 6) & 0x03; + ++sa; + hsh ^= ((*sa) << 2) & 0x03c; + hsh ^= ((*sa) >> 4) & 0xf; + ++sa; + hsh ^= ((*sa) << 4) & 0x30; + hsh ^= ((*sa) >> 2) & 0x3f; + + hsh &= 63; + if (hsh > 31) { + match = eth->regs[RW_GA_HI] & (1 << (hsh - 32)); + } else { + match = eth->regs[RW_GA_LO] & (1 << hsh); + } + D(printf("hsh=%x ga=%x.%x mtch=%d\n", hsh, + eth->regs[RW_GA_HI], eth->regs[RW_GA_LO], match)); + return match; +} + +static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + ETRAXFSEthState *eth = qemu_get_nic_opaque(nc); + int use_ma0 = eth->regs[RW_REC_CTRL] & 1; + int use_ma1 = eth->regs[RW_REC_CTRL] & 2; + int r_bcast = eth->regs[RW_REC_CTRL] & 8; + + if (size < 12) { + return -1; + } + + D(printf("%x.%x.%x.%x.%x.%x ma=%d %d bc=%d\n", + buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], + use_ma0, use_ma1, r_bcast)); + + /* Does the frame get through the address filters? */ + if ((!use_ma0 || memcmp(buf, eth->macaddr[0], 6)) + && (!use_ma1 || memcmp(buf, eth->macaddr[1], 6)) + && (!r_bcast || memcmp(buf, sa_bcast, 6)) + && !eth_match_groupaddr(eth, buf)) { + return size; + } + + /* FIXME: Find another way to pass on the fake csum. */ + etraxfs_dmac_input(eth->dma_in, (void *)buf, size + 4, 1); + + return size; +} + +static int eth_tx_push(void *opaque, unsigned char *buf, int len, bool eop) +{ + ETRAXFSEthState *eth = opaque; + + D(printf("%s buf=%p len=%d\n", __func__, buf, len)); + qemu_send_packet(qemu_get_queue(eth->nic), buf, len); + return len; +} + +static void eth_set_link(NetClientState *nc) +{ + ETRAXFSEthState *eth = qemu_get_nic_opaque(nc); + D(printf("%s %d\n", __func__, nc->link_down)); + eth->phy.link = !nc->link_down; +} + +static const MemoryRegionOps eth_ops = { + .read = eth_read, + .write = eth_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static NetClientInfo net_etraxfs_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = eth_receive, + .link_status_changed = eth_set_link, +}; + +static void etraxfs_eth_reset(DeviceState *dev) +{ + ETRAXFSEthState *s = ETRAX_FS_ETH(dev); + + memset(s->regs, 0, sizeof(s->regs)); + memset(s->macaddr, 0, sizeof(s->macaddr)); + s->duplex_mismatch = 0; + + s->mdio_bus.mdc = 0; + s->mdio_bus.mdio = 0; + s->mdio_bus.state = 0; + s->mdio_bus.drive = 0; + s->mdio_bus.cnt = 0; + s->mdio_bus.addr = 0; + s->mdio_bus.opc = 0; + s->mdio_bus.req = 0; + s->mdio_bus.data = 0; + + tdk_reset(&s->phy); +} + +static void etraxfs_eth_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + ETRAXFSEthState *s = ETRAX_FS_ETH(dev); + + if (!s->dma_out || !s->dma_in) { + error_setg(errp, "Unconnected ETRAX-FS Ethernet MAC"); + return; + } + + s->dma_out->client.push = eth_tx_push; + s->dma_out->client.opaque = s; + s->dma_in->client.opaque = s; + s->dma_in->client.pull = NULL; + + memory_region_init_io(&s->mmio, OBJECT(dev), ð_ops, s, + "etraxfs-eth", 0x5c); + sysbus_init_mmio(sbd, &s->mmio); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_etraxfs_info, &s->conf, + object_get_typename(OBJECT(s)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + + s->phy.read = tdk_read; + s->phy.write = tdk_write; + mdio_attach(&s->mdio_bus, &s->phy, s->phyaddr); +} + +static Property etraxfs_eth_properties[] = { + DEFINE_PROP_UINT32("phyaddr", ETRAXFSEthState, phyaddr, 1), + DEFINE_NIC_PROPERTIES(ETRAXFSEthState, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void etraxfs_eth_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = etraxfs_eth_realize; + dc->reset = etraxfs_eth_reset; + device_class_set_props(dc, etraxfs_eth_properties); + /* Reason: dma_out, dma_in are not user settable */ + dc->user_creatable = false; +} + + +/* Instantiate an ETRAXFS Ethernet MAC. */ +DeviceState * +etraxfs_eth_init(NICInfo *nd, hwaddr base, int phyaddr, + struct etraxfs_dma_client *dma_out, + struct etraxfs_dma_client *dma_in) +{ + DeviceState *dev; + qemu_check_nic_model(nd, "fseth"); + + dev = qdev_new("etraxfs-eth"); + qdev_set_nic_properties(dev, nd); + qdev_prop_set_uint32(dev, "phyaddr", phyaddr); + + /* + * TODO: QOM design, define a QOM interface for "I am an etraxfs + * DMA client" (which replaces the current 'struct + * etraxfs_dma_client' ad-hoc interface), implement it on the + * ethernet device, and then have QOM link properties on the DMA + * controller device so that you can pass the interface + * implementations to it. + */ + ETRAX_FS_ETH(dev)->dma_out = dma_out; + ETRAX_FS_ETH(dev)->dma_in = dma_in; + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + + return dev; +} + +static const TypeInfo etraxfs_eth_info = { + .name = TYPE_ETRAX_FS_ETH, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ETRAXFSEthState), + .class_init = etraxfs_eth_class_init, +}; + +static void etraxfs_eth_register_types(void) +{ + type_register_static(&etraxfs_eth_info); +} + +type_init(etraxfs_eth_register_types) diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c new file mode 100644 index 000000000..bd9d62b55 --- /dev/null +++ b/hw/net/fsl_etsec/etsec.c @@ -0,0 +1,469 @@ +/* + * QEMU Freescale eTSEC Emulator + * + * Copyright (c) 2011-2013 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * This implementation doesn't include ring priority, TCP/IP Off-Load, QoS. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "etsec.h" +#include "registers.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" + +/* #define HEX_DUMP */ +/* #define DEBUG_REGISTER */ + +#ifdef DEBUG_REGISTER +static const int debug_etsec = 1; +#else +static const int debug_etsec; +#endif + +#define DPRINTF(fmt, ...) do { \ + if (debug_etsec) { \ + qemu_log(fmt , ## __VA_ARGS__); \ + } \ + } while (0) + +/* call after any change to IEVENT or IMASK */ +void etsec_update_irq(eTSEC *etsec) +{ + uint32_t ievent = etsec->regs[IEVENT].value; + uint32_t imask = etsec->regs[IMASK].value; + uint32_t active = ievent & imask; + + int tx = !!(active & IEVENT_TX_MASK); + int rx = !!(active & IEVENT_RX_MASK); + int err = !!(active & IEVENT_ERR_MASK); + + DPRINTF("%s IRQ ievent=%"PRIx32" imask=%"PRIx32" %c%c%c", + __func__, ievent, imask, + tx ? 'T' : '_', + rx ? 'R' : '_', + err ? 'E' : '_'); + + qemu_set_irq(etsec->tx_irq, tx); + qemu_set_irq(etsec->rx_irq, rx); + qemu_set_irq(etsec->err_irq, err); +} + +static uint64_t etsec_read(void *opaque, hwaddr addr, unsigned size) +{ + eTSEC *etsec = opaque; + uint32_t reg_index = addr / 4; + eTSEC_Register *reg = NULL; + uint32_t ret = 0x0; + + assert(reg_index < ETSEC_REG_NUMBER); + + reg = &etsec->regs[reg_index]; + + + switch (reg->access) { + case ACC_WO: + ret = 0x00000000; + break; + + case ACC_RW: + case ACC_W1C: + case ACC_RO: + default: + ret = reg->value; + break; + } + + DPRINTF("Read 0x%08x @ 0x" TARGET_FMT_plx + " : %s (%s)\n", + ret, addr, reg->name, reg->desc); + + return ret; +} + +static void write_tstat(eTSEC *etsec, + eTSEC_Register *reg, + uint32_t reg_index, + uint32_t value) +{ + int i = 0; + + for (i = 0; i < 8; i++) { + /* Check THLTi flag in TSTAT */ + if (value & (1 << (31 - i))) { + etsec_walk_tx_ring(etsec, i); + } + } + + /* Write 1 to clear */ + reg->value &= ~value; +} + +static void write_rstat(eTSEC *etsec, + eTSEC_Register *reg, + uint32_t reg_index, + uint32_t value) +{ + int i = 0; + + for (i = 0; i < 8; i++) { + /* Check QHLTi flag in RSTAT */ + if (value & (1 << (23 - i)) && !(reg->value & (1 << (23 - i)))) { + etsec_walk_rx_ring(etsec, i); + } + } + + /* Write 1 to clear */ + reg->value &= ~value; +} + +static void write_tbasex(eTSEC *etsec, + eTSEC_Register *reg, + uint32_t reg_index, + uint32_t value) +{ + reg->value = value & ~0x7; + + /* Copy this value in the ring's TxBD pointer */ + etsec->regs[TBPTR0 + (reg_index - TBASE0)].value = value & ~0x7; +} + +static void write_rbasex(eTSEC *etsec, + eTSEC_Register *reg, + uint32_t reg_index, + uint32_t value) +{ + reg->value = value & ~0x7; + + /* Copy this value in the ring's RxBD pointer */ + etsec->regs[RBPTR0 + (reg_index - RBASE0)].value = value & ~0x7; +} + +static void write_dmactrl(eTSEC *etsec, + eTSEC_Register *reg, + uint32_t reg_index, + uint32_t value) +{ + reg->value = value; + + if (value & DMACTRL_GRS) { + + if (etsec->rx_buffer_len != 0) { + /* Graceful receive stop delayed until end of frame */ + } else { + /* Graceful receive stop now */ + etsec->regs[IEVENT].value |= IEVENT_GRSC; + etsec_update_irq(etsec); + } + } + + if (value & DMACTRL_GTS) { + + if (etsec->tx_buffer_len != 0) { + /* Graceful transmit stop delayed until end of frame */ + } else { + /* Graceful transmit stop now */ + etsec->regs[IEVENT].value |= IEVENT_GTSC; + etsec_update_irq(etsec); + } + } + + if (!(value & DMACTRL_WOP)) { + /* Start polling */ + ptimer_transaction_begin(etsec->ptimer); + ptimer_stop(etsec->ptimer); + ptimer_set_count(etsec->ptimer, 1); + ptimer_run(etsec->ptimer, 1); + ptimer_transaction_commit(etsec->ptimer); + } +} + +static void etsec_write(void *opaque, + hwaddr addr, + uint64_t value, + unsigned size) +{ + eTSEC *etsec = opaque; + uint32_t reg_index = addr / 4; + eTSEC_Register *reg = NULL; + uint32_t before = 0x0; + + assert(reg_index < ETSEC_REG_NUMBER); + + reg = &etsec->regs[reg_index]; + before = reg->value; + + switch (reg_index) { + case IEVENT: + /* Write 1 to clear */ + reg->value &= ~value; + + etsec_update_irq(etsec); + break; + + case IMASK: + reg->value = value; + + etsec_update_irq(etsec); + break; + + case DMACTRL: + write_dmactrl(etsec, reg, reg_index, value); + break; + + case TSTAT: + write_tstat(etsec, reg, reg_index, value); + break; + + case RSTAT: + write_rstat(etsec, reg, reg_index, value); + break; + + case TBASE0 ... TBASE7: + write_tbasex(etsec, reg, reg_index, value); + break; + + case RBASE0 ... RBASE7: + write_rbasex(etsec, reg, reg_index, value); + break; + + case MIIMCFG ... MIIMIND: + etsec_write_miim(etsec, reg, reg_index, value); + break; + + default: + /* Default handling */ + switch (reg->access) { + + case ACC_RW: + case ACC_WO: + reg->value = value; + break; + + case ACC_W1C: + reg->value &= ~value; + break; + + case ACC_RO: + default: + /* Read Only or Unknown register */ + break; + } + } + + DPRINTF("Write 0x%08x @ 0x" TARGET_FMT_plx + " val:0x%08x->0x%08x : %s (%s)\n", + (unsigned int)value, addr, before, reg->value, + reg->name, reg->desc); +} + +static const MemoryRegionOps etsec_ops = { + .read = etsec_read, + .write = etsec_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void etsec_timer_hit(void *opaque) +{ + eTSEC *etsec = opaque; + + ptimer_stop(etsec->ptimer); + + if (!(etsec->regs[DMACTRL].value & DMACTRL_WOP)) { + + if (!(etsec->regs[DMACTRL].value & DMACTRL_GTS)) { + etsec_walk_tx_ring(etsec, 0); + } + ptimer_set_count(etsec->ptimer, 1); + ptimer_run(etsec->ptimer, 1); + } +} + +static void etsec_reset(DeviceState *d) +{ + eTSEC *etsec = ETSEC_COMMON(d); + int i = 0; + int reg_index = 0; + + /* Default value for all registers */ + for (i = 0; i < ETSEC_REG_NUMBER; i++) { + etsec->regs[i].name = "Reserved"; + etsec->regs[i].desc = ""; + etsec->regs[i].access = ACC_UNKNOWN; + etsec->regs[i].value = 0x00000000; + } + + /* Set-up known registers */ + for (i = 0; eTSEC_registers_def[i].name != NULL; i++) { + + reg_index = eTSEC_registers_def[i].offset / 4; + + etsec->regs[reg_index].name = eTSEC_registers_def[i].name; + etsec->regs[reg_index].desc = eTSEC_registers_def[i].desc; + etsec->regs[reg_index].access = eTSEC_registers_def[i].access; + etsec->regs[reg_index].value = eTSEC_registers_def[i].reset; + } + + etsec->tx_buffer = NULL; + etsec->tx_buffer_len = 0; + etsec->rx_buffer = NULL; + etsec->rx_buffer_len = 0; + + etsec->phy_status = + MII_SR_EXTENDED_CAPS | MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS | + MII_SR_AUTONEG_COMPLETE | MII_SR_PREAMBLE_SUPPRESS | + MII_SR_EXTENDED_STATUS | MII_SR_100T2_HD_CAPS | MII_SR_100T2_FD_CAPS | + MII_SR_10T_HD_CAPS | MII_SR_10T_FD_CAPS | MII_SR_100X_HD_CAPS | + MII_SR_100X_FD_CAPS | MII_SR_100T4_CAPS; + + etsec_update_irq(etsec); +} + +static ssize_t etsec_receive(NetClientState *nc, + const uint8_t *buf, + size_t size) +{ + ssize_t ret; + eTSEC *etsec = qemu_get_nic_opaque(nc); + +#if defined(HEX_DUMP) + fprintf(stderr, "%s receive size:%zd\n", nc->name, size); + qemu_hexdump(stderr, "", buf, size); +#endif + /* Flush is unnecessary as are already in receiving path */ + etsec->need_flush = false; + ret = etsec_rx_ring_write(etsec, buf, size); + if (ret == 0) { + /* The packet will be queued, let's flush it when buffer is available + * again. */ + etsec->need_flush = true; + } + return ret; +} + + +static void etsec_set_link_status(NetClientState *nc) +{ + eTSEC *etsec = qemu_get_nic_opaque(nc); + + etsec_miim_link_status(etsec, nc); +} + +static NetClientInfo net_etsec_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = etsec_receive, + .link_status_changed = etsec_set_link_status, +}; + +static void etsec_realize(DeviceState *dev, Error **errp) +{ + eTSEC *etsec = ETSEC_COMMON(dev); + + etsec->nic = qemu_new_nic(&net_etsec_info, &etsec->conf, + object_get_typename(OBJECT(dev)), dev->id, etsec); + qemu_format_nic_info_str(qemu_get_queue(etsec->nic), etsec->conf.macaddr.a); + + etsec->ptimer = ptimer_init(etsec_timer_hit, etsec, PTIMER_POLICY_DEFAULT); + ptimer_transaction_begin(etsec->ptimer); + ptimer_set_freq(etsec->ptimer, 100); + ptimer_transaction_commit(etsec->ptimer); +} + +static void etsec_instance_init(Object *obj) +{ + eTSEC *etsec = ETSEC_COMMON(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&etsec->io_area, OBJECT(etsec), &etsec_ops, etsec, + "eTSEC", 0x1000); + sysbus_init_mmio(sbd, &etsec->io_area); + + sysbus_init_irq(sbd, &etsec->tx_irq); + sysbus_init_irq(sbd, &etsec->rx_irq); + sysbus_init_irq(sbd, &etsec->err_irq); +} + +static Property etsec_properties[] = { + DEFINE_NIC_PROPERTIES(eTSEC, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void etsec_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = etsec_realize; + dc->reset = etsec_reset; + device_class_set_props(dc, etsec_properties); + /* Supported by ppce500 machine */ + dc->user_creatable = true; +} + +static TypeInfo etsec_info = { + .name = TYPE_ETSEC_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(eTSEC), + .class_init = etsec_class_init, + .instance_init = etsec_instance_init, +}; + +static void etsec_register_types(void) +{ + type_register_static(&etsec_info); +} + +type_init(etsec_register_types) + +DeviceState *etsec_create(hwaddr base, + MemoryRegion * mr, + NICInfo * nd, + qemu_irq tx_irq, + qemu_irq rx_irq, + qemu_irq err_irq) +{ + DeviceState *dev; + + dev = qdev_new("eTSEC"); + qdev_set_nic_properties(dev, nd); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, tx_irq); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, rx_irq); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, err_irq); + + memory_region_add_subregion(mr, base, + SYS_BUS_DEVICE(dev)->mmio[0].memory); + + return dev; +} diff --git a/hw/net/fsl_etsec/etsec.h b/hw/net/fsl_etsec/etsec.h new file mode 100644 index 000000000..fddf55154 --- /dev/null +++ b/hw/net/fsl_etsec/etsec.h @@ -0,0 +1,178 @@ +/* + * QEMU Freescale eTSEC Emulator + * + * Copyright (c) 2011-2013 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef ETSEC_H +#define ETSEC_H + +#include "hw/sysbus.h" +#include "net/net.h" +#include "hw/ptimer.h" +#include "qom/object.h" + +/* Buffer Descriptors */ + +typedef struct eTSEC_rxtx_bd { + uint16_t flags; + uint16_t length; + uint32_t bufptr; +} eTSEC_rxtx_bd; + +#define BD_WRAP (1 << 13) +#define BD_INTERRUPT (1 << 12) +#define BD_LAST (1 << 11) + +#define BD_TX_READY (1 << 15) +#define BD_TX_PADCRC (1 << 14) +#define BD_TX_TC (1 << 10) +#define BD_TX_PREDEF (1 << 9) +#define BD_TX_HFELC (1 << 7) +#define BD_TX_CFRL (1 << 6) +#define BD_TX_RC_MASK 0xF +#define BD_TX_RC_OFFSET 0x2 +#define BD_TX_TOEUN (1 << 1) +#define BD_TX_TR (1 << 0) + +#define BD_RX_EMPTY (1 << 15) +#define BD_RX_RO1 (1 << 14) +#define BD_RX_FIRST (1 << 10) +#define BD_RX_MISS (1 << 8) +#define BD_RX_BROADCAST (1 << 7) +#define BD_RX_MULTICAST (1 << 6) +#define BD_RX_LG (1 << 5) +#define BD_RX_NO (1 << 4) +#define BD_RX_SH (1 << 3) +#define BD_RX_CR (1 << 2) +#define BD_RX_OV (1 << 1) +#define BD_RX_TR (1 << 0) + +/* Tx FCB flags */ +#define FCB_TX_VLN (1 << 7) +#define FCB_TX_IP (1 << 6) +#define FCB_TX_IP6 (1 << 5) +#define FCB_TX_TUP (1 << 4) +#define FCB_TX_UDP (1 << 3) +#define FCB_TX_CIP (1 << 2) +#define FCB_TX_CTU (1 << 1) +#define FCB_TX_NPH (1 << 0) + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* eTSEC */ + +/* Number of register in the device */ +#define ETSEC_REG_NUMBER 1024 + +typedef struct eTSEC_Register { + const char *name; + const char *desc; + uint32_t access; + uint32_t value; +} eTSEC_Register; + +struct eTSEC { + SysBusDevice busdev; + + MemoryRegion io_area; + + eTSEC_Register regs[ETSEC_REG_NUMBER]; + + NICState *nic; + NICConf conf; + + /* Tx */ + + uint8_t *tx_buffer; + uint32_t tx_buffer_len; + eTSEC_rxtx_bd first_bd; + + /* Rx */ + + uint8_t *rx_buffer; + uint32_t rx_buffer_len; + uint32_t rx_remaining_data; + uint8_t rx_first_in_frame; + uint8_t rx_fcb_size; + eTSEC_rxtx_bd rx_first_bd; + uint8_t rx_fcb[10]; + uint32_t rx_padding; + + /* IRQs */ + qemu_irq tx_irq; + qemu_irq rx_irq; + qemu_irq err_irq; + + + uint16_t phy_status; + uint16_t phy_control; + + /* Polling */ + struct ptimer_state *ptimer; + + /* Whether we should flush the rx queue when buffer becomes available. */ + bool need_flush; +}; +typedef struct eTSEC eTSEC; + +#define TYPE_ETSEC_COMMON "eTSEC" +OBJECT_DECLARE_SIMPLE_TYPE(eTSEC, ETSEC_COMMON) + +#define eTSEC_TRANSMIT 1 +#define eTSEC_RECEIVE 2 + +DeviceState *etsec_create(hwaddr base, + MemoryRegion *mr, + NICInfo *nd, + qemu_irq tx_irq, + qemu_irq rx_irq, + qemu_irq err_irq); + +void etsec_update_irq(eTSEC *etsec); + +void etsec_walk_tx_ring(eTSEC *etsec, int ring_nbr); +void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr); +ssize_t etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size); + +void etsec_write_miim(eTSEC *etsec, + eTSEC_Register *reg, + uint32_t reg_index, + uint32_t value); + +void etsec_miim_link_status(eTSEC *etsec, NetClientState *nc); + +#endif /* ETSEC_H */ diff --git a/hw/net/fsl_etsec/miim.c b/hw/net/fsl_etsec/miim.c new file mode 100644 index 000000000..6bba01c82 --- /dev/null +++ b/hw/net/fsl_etsec/miim.c @@ -0,0 +1,147 @@ +/* + * QEMU Freescale eTSEC Emulator + * + * Copyright (c) 2011-2013 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "etsec.h" +#include "registers.h" + +/* #define DEBUG_MIIM */ + +#define MIIM_CONTROL 0 +#define MIIM_STATUS 1 +#define MIIM_PHY_ID_1 2 +#define MIIM_PHY_ID_2 3 +#define MIIM_T2_STATUS 10 +#define MIIM_EXT_STATUS 15 + +static void miim_read_cycle(eTSEC *etsec) +{ + uint8_t phy; + uint8_t addr; + uint16_t value; + + phy = (etsec->regs[MIIMADD].value >> 8) & 0x1F; + (void)phy; /* Unreferenced */ + addr = etsec->regs[MIIMADD].value & 0x1F; + + switch (addr) { + case MIIM_CONTROL: + value = etsec->phy_control; + break; + case MIIM_STATUS: + value = etsec->phy_status; + break; + case MIIM_T2_STATUS: + value = 0x1800; /* Local and remote receivers OK */ + break; + default: + value = 0x0; + break; + }; + +#ifdef DEBUG_MIIM + qemu_log("%s phy:%d addr:0x%x value:0x%x\n", __func__, phy, addr, value); +#endif + + etsec->regs[MIIMSTAT].value = value; +} + +static void miim_write_cycle(eTSEC *etsec) +{ + uint8_t phy; + uint8_t addr; + uint16_t value; + + phy = (etsec->regs[MIIMADD].value >> 8) & 0x1F; + (void)phy; /* Unreferenced */ + addr = etsec->regs[MIIMADD].value & 0x1F; + value = etsec->regs[MIIMCON].value & 0xffff; + +#ifdef DEBUG_MIIM + qemu_log("%s phy:%d addr:0x%x value:0x%x\n", __func__, phy, addr, value); +#endif + + switch (addr) { + case MIIM_CONTROL: + etsec->phy_control = value & ~(0x8100); + break; + default: + break; + }; +} + +void etsec_write_miim(eTSEC *etsec, + eTSEC_Register *reg, + uint32_t reg_index, + uint32_t value) +{ + + switch (reg_index) { + + case MIIMCOM: + /* Read and scan cycle */ + + if ((!(reg->value & MIIMCOM_READ)) && (value & MIIMCOM_READ)) { + /* Read */ + miim_read_cycle(etsec); + } + reg->value = value; + break; + + case MIIMCON: + reg->value = value & 0xffff; + miim_write_cycle(etsec); + break; + + default: + /* Default handling */ + switch (reg->access) { + + case ACC_RW: + case ACC_WO: + reg->value = value; + break; + + case ACC_W1C: + reg->value &= ~value; + break; + + case ACC_RO: + default: + /* Read Only or Unknown register */ + break; + } + } + +} + +void etsec_miim_link_status(eTSEC *etsec, NetClientState *nc) +{ + /* Set link status */ + if (nc->link_down) { + etsec->phy_status &= ~MII_SR_LINK_STATUS; + } else { + etsec->phy_status |= MII_SR_LINK_STATUS; + } +} diff --git a/hw/net/fsl_etsec/registers.c b/hw/net/fsl_etsec/registers.c new file mode 100644 index 000000000..46ce7a84b --- /dev/null +++ b/hw/net/fsl_etsec/registers.c @@ -0,0 +1,296 @@ +/* + * QEMU Freescale eTSEC Emulator + * + * Copyright (c) 2011-2013 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "registers.h" + +const eTSEC_Register_Definition eTSEC_registers_def[] = { +{0x000, "TSEC_ID", "Controller ID register", ACC_RO, 0x01240000}, +{0x004, "TSEC_ID2", "Controller ID register 2", ACC_RO, 0x003000F0}, +{0x010, "IEVENT", "Interrupt event register", ACC_W1C, 0x00000000}, +{0x014, "IMASK", "Interrupt mask register", ACC_RW, 0x00000000}, +{0x018, "EDIS", "Error disabled register", ACC_RW, 0x00000000}, +{0x020, "ECNTRL", "Ethernet control register", ACC_RW, 0x00000040}, +{0x028, "PTV", "Pause time value register", ACC_RW, 0x00000000}, +{0x02C, "DMACTRL", "DMA control register", ACC_RW, 0x00000000}, +{0x030, "TBIPA", "TBI PHY address register", ACC_RW, 0x00000000}, + +/* eTSEC FIFO Control and Status Registers */ + +{0x058, "FIFO_RX_ALARM", "FIFO receive alarm start threshold register", ACC_RW, 0x00000040}, +{0x05C, "FIFO_RX_ALARM_SHUTOFF", "FIFO receive alarm shut-off threshold register", ACC_RW, 0x00000080}, +{0x08C, "FIFO_TX_THR", "FIFO transmit threshold register", ACC_RW, 0x00000080}, +{0x098, "FIFO_TX_STARVE", "FIFO transmit starve register", ACC_RW, 0x00000040}, +{0x09C, "FIFO_TX_STARVE_SHUTOFF", "FIFO transmit starve shut-off register", ACC_RW, 0x00000080}, + +/* eTSEC Transmit Control and Status Registers */ + +{0x100, "TCTRL", "Transmit control register", ACC_RW, 0x00000000}, +{0x104, "TSTAT", "Transmit status register", ACC_W1C, 0x00000000}, +{0x108, "DFVLAN", "Default VLAN control word", ACC_RW, 0x81000000}, +{0x110, "TXIC", "Transmit interrupt coalescing register", ACC_RW, 0x00000000}, +{0x114, "TQUEUE", "Transmit queue control register", ACC_RW, 0x00008000}, +{0x140, "TR03WT", "TxBD Rings 0-3 round-robin weightings", ACC_RW, 0x00000000}, +{0x144, "TR47WT", "TxBD Rings 4-7 round-robin weightings", ACC_RW, 0x00000000}, +{0x180, "TBDBPH", "Tx data buffer pointer high bits", ACC_RW, 0x00000000}, +{0x184, "TBPTR0", "TxBD pointer for ring 0", ACC_RW, 0x00000000}, +{0x18C, "TBPTR1", "TxBD pointer for ring 1", ACC_RW, 0x00000000}, +{0x194, "TBPTR2", "TxBD pointer for ring 2", ACC_RW, 0x00000000}, +{0x19C, "TBPTR3", "TxBD pointer for ring 3", ACC_RW, 0x00000000}, +{0x1A4, "TBPTR4", "TxBD pointer for ring 4", ACC_RW, 0x00000000}, +{0x1AC, "TBPTR5", "TxBD pointer for ring 5", ACC_RW, 0x00000000}, +{0x1B4, "TBPTR6", "TxBD pointer for ring 6", ACC_RW, 0x00000000}, +{0x1BC, "TBPTR7", "TxBD pointer for ring 7", ACC_RW, 0x00000000}, +{0x200, "TBASEH", "TxBD base address high bits", ACC_RW, 0x00000000}, +{0x204, "TBASE0", "TxBD base address of ring 0", ACC_RW, 0x00000000}, +{0x20C, "TBASE1", "TxBD base address of ring 1", ACC_RW, 0x00000000}, +{0x214, "TBASE2", "TxBD base address of ring 2", ACC_RW, 0x00000000}, +{0x21C, "TBASE3", "TxBD base address of ring 3", ACC_RW, 0x00000000}, +{0x224, "TBASE4", "TxBD base address of ring 4", ACC_RW, 0x00000000}, +{0x22C, "TBASE5", "TxBD base address of ring 5", ACC_RW, 0x00000000}, +{0x234, "TBASE6", "TxBD base address of ring 6", ACC_RW, 0x00000000}, +{0x23C, "TBASE7", "TxBD base address of ring 7", ACC_RW, 0x00000000}, +{0x280, "TMR_TXTS1_ID", "Tx time stamp identification tag (set 1)", ACC_RO, 0x00000000}, +{0x284, "TMR_TXTS2_ID", "Tx time stamp identification tag (set 2)", ACC_RO, 0x00000000}, +{0x2C0, "TMR_TXTS1_H", "Tx time stamp high (set 1)", ACC_RO, 0x00000000}, +{0x2C4, "TMR_TXTS1_L", "Tx time stamp high (set 1)", ACC_RO, 0x00000000}, +{0x2C8, "TMR_TXTS2_H", "Tx time stamp high (set 2)", ACC_RO, 0x00000000}, +{0x2CC, "TMR_TXTS2_L", "Tx time stamp high (set 2)", ACC_RO, 0x00000000}, + +/* eTSEC Receive Control and Status Registers */ + +{0x300, "RCTRL", "Receive control register", ACC_RW, 0x00000000}, +{0x304, "RSTAT", "Receive status register", ACC_W1C, 0x00000000}, +{0x310, "RXIC", "Receive interrupt coalescing register", ACC_RW, 0x00000000}, +{0x314, "RQUEUE", "Receive queue control register.", ACC_RW, 0x00800080}, +{0x330, "RBIFX", "Receive bit field extract control register", ACC_RW, 0x00000000}, +{0x334, "RQFAR", "Receive queue filing table address register", ACC_RW, 0x00000000}, +{0x338, "RQFCR", "Receive queue filing table control register", ACC_RW, 0x00000000}, +{0x33C, "RQFPR", "Receive queue filing table property register", ACC_RW, 0x00000000}, +{0x340, "MRBLR", "Maximum receive buffer length register", ACC_RW, 0x00000000}, +{0x380, "RBDBPH", "Rx data buffer pointer high bits", ACC_RW, 0x00000000}, +{0x384, "RBPTR0", "RxBD pointer for ring 0", ACC_RW, 0x00000000}, +{0x38C, "RBPTR1", "RxBD pointer for ring 1", ACC_RW, 0x00000000}, +{0x394, "RBPTR2", "RxBD pointer for ring 2", ACC_RW, 0x00000000}, +{0x39C, "RBPTR3", "RxBD pointer for ring 3", ACC_RW, 0x00000000}, +{0x3A4, "RBPTR4", "RxBD pointer for ring 4", ACC_RW, 0x00000000}, +{0x3AC, "RBPTR5", "RxBD pointer for ring 5", ACC_RW, 0x00000000}, +{0x3B4, "RBPTR6", "RxBD pointer for ring 6", ACC_RW, 0x00000000}, +{0x3BC, "RBPTR7", "RxBD pointer for ring 7", ACC_RW, 0x00000000}, +{0x400, "RBASEH", "RxBD base address high bits", ACC_RW, 0x00000000}, +{0x404, "RBASE0", "RxBD base address of ring 0", ACC_RW, 0x00000000}, +{0x40C, "RBASE1", "RxBD base address of ring 1", ACC_RW, 0x00000000}, +{0x414, "RBASE2", "RxBD base address of ring 2", ACC_RW, 0x00000000}, +{0x41C, "RBASE3", "RxBD base address of ring 3", ACC_RW, 0x00000000}, +{0x424, "RBASE4", "RxBD base address of ring 4", ACC_RW, 0x00000000}, +{0x42C, "RBASE5", "RxBD base address of ring 5", ACC_RW, 0x00000000}, +{0x434, "RBASE6", "RxBD base address of ring 6", ACC_RW, 0x00000000}, +{0x43C, "RBASE7", "RxBD base address of ring 7", ACC_RW, 0x00000000}, +{0x4C0, "TMR_RXTS_H", "Rx timer time stamp register high", ACC_RW, 0x00000000}, +{0x4C4, "TMR_RXTS_L", "Rx timer time stamp register low", ACC_RW, 0x00000000}, + +/* eTSEC MAC Registers */ + +{0x500, "MACCFG1", "MAC configuration register 1", ACC_RW, 0x00000000}, +{0x504, "MACCFG2", "MAC configuration register 2", ACC_RW, 0x00007000}, +{0x508, "IPGIFG", "Inter-packet/inter-frame gap register", ACC_RW, 0x40605060}, +{0x50C, "HAFDUP", "Half-duplex control", ACC_RW, 0x00A1F037}, +{0x510, "MAXFRM", "Maximum frame length", ACC_RW, 0x00000600}, +{0x520, "MIIMCFG", "MII management configuration", ACC_RW, 0x00000007}, +{0x524, "MIIMCOM", "MII management command", ACC_RW, 0x00000000}, +{0x528, "MIIMADD", "MII management address", ACC_RW, 0x00000000}, +{0x52C, "MIIMCON", "MII management control", ACC_WO, 0x00000000}, +{0x530, "MIIMSTAT", "MII management status", ACC_RO, 0x00000000}, +{0x534, "MIIMIND", "MII management indicator", ACC_RO, 0x00000000}, +{0x53C, "IFSTAT", "Interface status", ACC_RO, 0x00000000}, +{0x540, "MACSTNADDR1", "MAC station address register 1", ACC_RW, 0x00000000}, +{0x544, "MACSTNADDR2", "MAC station address register 2", ACC_RW, 0x00000000}, +{0x548, "MAC01ADDR1", "MAC exact match address 1, part 1", ACC_RW, 0x00000000}, +{0x54C, "MAC01ADDR2", "MAC exact match address 1, part 2", ACC_RW, 0x00000000}, +{0x550, "MAC02ADDR1", "MAC exact match address 2, part 1", ACC_RW, 0x00000000}, +{0x554, "MAC02ADDR2", "MAC exact match address 2, part 2", ACC_RW, 0x00000000}, +{0x558, "MAC03ADDR1", "MAC exact match address 3, part 1", ACC_RW, 0x00000000}, +{0x55C, "MAC03ADDR2", "MAC exact match address 3, part 2", ACC_RW, 0x00000000}, +{0x560, "MAC04ADDR1", "MAC exact match address 4, part 1", ACC_RW, 0x00000000}, +{0x564, "MAC04ADDR2", "MAC exact match address 4, part 2", ACC_RW, 0x00000000}, +{0x568, "MAC05ADDR1", "MAC exact match address 5, part 1", ACC_RW, 0x00000000}, +{0x56C, "MAC05ADDR2", "MAC exact match address 5, part 2", ACC_RW, 0x00000000}, +{0x570, "MAC06ADDR1", "MAC exact match address 6, part 1", ACC_RW, 0x00000000}, +{0x574, "MAC06ADDR2", "MAC exact match address 6, part 2", ACC_RW, 0x00000000}, +{0x578, "MAC07ADDR1", "MAC exact match address 7, part 1", ACC_RW, 0x00000000}, +{0x57C, "MAC07ADDR2", "MAC exact match address 7, part 2", ACC_RW, 0x00000000}, +{0x580, "MAC08ADDR1", "MAC exact match address 8, part 1", ACC_RW, 0x00000000}, +{0x584, "MAC08ADDR2", "MAC exact match address 8, part 2", ACC_RW, 0x00000000}, +{0x588, "MAC09ADDR1", "MAC exact match address 9, part 1", ACC_RW, 0x00000000}, +{0x58C, "MAC09ADDR2", "MAC exact match address 9, part 2", ACC_RW, 0x00000000}, +{0x590, "MAC10ADDR1", "MAC exact match address 10, part 1", ACC_RW, 0x00000000}, +{0x594, "MAC10ADDR2", "MAC exact match address 10, part 2", ACC_RW, 0x00000000}, +{0x598, "MAC11ADDR1", "MAC exact match address 11, part 1", ACC_RW, 0x00000000}, +{0x59C, "MAC11ADDR2", "MAC exact match address 11, part 2", ACC_RW, 0x00000000}, +{0x5A0, "MAC12ADDR1", "MAC exact match address 12, part 1", ACC_RW, 0x00000000}, +{0x5A4, "MAC12ADDR2", "MAC exact match address 12, part 2", ACC_RW, 0x00000000}, +{0x5A8, "MAC13ADDR1", "MAC exact match address 13, part 1", ACC_RW, 0x00000000}, +{0x5AC, "MAC13ADDR2", "MAC exact match address 13, part 2", ACC_RW, 0x00000000}, +{0x5B0, "MAC14ADDR1", "MAC exact match address 14, part 1", ACC_RW, 0x00000000}, +{0x5B4, "MAC14ADDR2", "MAC exact match address 14, part 2", ACC_RW, 0x00000000}, +{0x5B8, "MAC15ADDR1", "MAC exact match address 15, part 1", ACC_RW, 0x00000000}, +{0x5BC, "MAC15ADDR2", "MAC exact match address 15, part 2", ACC_RW, 0x00000000}, + +/* eTSEC, "Transmit", "and", Receive, Counters */ + +{0x680, "TR64", "Transmit and receive 64-byte frame counter ", ACC_RW, 0x00000000}, +{0x684, "TR127", "Transmit and receive 65- to 127-byte frame counter", ACC_RW, 0x00000000}, +{0x688, "TR255", "Transmit and receive 128- to 255-byte frame counter", ACC_RW, 0x00000000}, +{0x68C, "TR511", "Transmit and receive 256- to 511-byte frame counter", ACC_RW, 0x00000000}, +{0x690, "TR1K", "Transmit and receive 512- to 1023-byte frame counter", ACC_RW, 0x00000000}, +{0x694, "TRMAX", "Transmit and receive 1024- to 1518-byte frame counter", ACC_RW, 0x00000000}, +{0x698, "TRMGV", "Transmit and receive 1519- to 1522-byte good VLAN frame count", ACC_RW, 0x00000000}, + +/* eTSEC Receive Counters */ + +{0x69C, "RBYT", "Receive byte counter", ACC_RW, 0x00000000}, +{0x6A0, "RPKT", "Receive packet counter", ACC_RW, 0x00000000}, +{0x6A4, "RFCS", "Receive FCS error counter", ACC_RW, 0x00000000}, +{0x6A8, "RMCA", "Receive multicast packet counter", ACC_RW, 0x00000000}, +{0x6AC, "RBCA", "Receive broadcast packet counter", ACC_RW, 0x00000000}, +{0x6B0, "RXCF", "Receive control frame packet counter ", ACC_RW, 0x00000000}, +{0x6B4, "RXPF", "Receive PAUSE frame packet counter", ACC_RW, 0x00000000}, +{0x6B8, "RXUO", "Receive unknown OP code counter ", ACC_RW, 0x00000000}, +{0x6BC, "RALN", "Receive alignment error counter ", ACC_RW, 0x00000000}, +{0x6C0, "RFLR", "Receive frame length error counter ", ACC_RW, 0x00000000}, +{0x6C4, "RCDE", "Receive code error counter ", ACC_RW, 0x00000000}, +{0x6C8, "RCSE", "Receive carrier sense error counter", ACC_RW, 0x00000000}, +{0x6CC, "RUND", "Receive undersize packet counter", ACC_RW, 0x00000000}, +{0x6D0, "ROVR", "Receive oversize packet counter ", ACC_RW, 0x00000000}, +{0x6D4, "RFRG", "Receive fragments counter", ACC_RW, 0x00000000}, +{0x6D8, "RJBR", "Receive jabber counter ", ACC_RW, 0x00000000}, +{0x6DC, "RDRP", "Receive drop counter", ACC_RW, 0x00000000}, + +/* eTSEC Transmit Counters */ + +{0x6E0, "TBYT", "Transmit byte counter", ACC_RW, 0x00000000}, +{0x6E4, "TPKT", "Transmit packet counter", ACC_RW, 0x00000000}, +{0x6E8, "TMCA", "Transmit multicast packet counter ", ACC_RW, 0x00000000}, +{0x6EC, "TBCA", "Transmit broadcast packet counter ", ACC_RW, 0x00000000}, +{0x6F0, "TXPF", "Transmit PAUSE control frame counter ", ACC_RW, 0x00000000}, +{0x6F4, "TDFR", "Transmit deferral packet counter ", ACC_RW, 0x00000000}, +{0x6F8, "TEDF", "Transmit excessive deferral packet counter ", ACC_RW, 0x00000000}, +{0x6FC, "TSCL", "Transmit single collision packet counter", ACC_RW, 0x00000000}, +{0x700, "TMCL", "Transmit multiple collision packet counter", ACC_RW, 0x00000000}, +{0x704, "TLCL", "Transmit late collision packet counter", ACC_RW, 0x00000000}, +{0x708, "TXCL", "Transmit excessive collision packet counter", ACC_RW, 0x00000000}, +{0x70C, "TNCL", "Transmit total collision counter ", ACC_RW, 0x00000000}, +{0x714, "TDRP", "Transmit drop frame counter", ACC_RW, 0x00000000}, +{0x718, "TJBR", "Transmit jabber frame counter ", ACC_RW, 0x00000000}, +{0x71C, "TFCS", "Transmit FCS error counter", ACC_RW, 0x00000000}, +{0x720, "TXCF", "Transmit control frame counter ", ACC_RW, 0x00000000}, +{0x724, "TOVR", "Transmit oversize frame counter", ACC_RW, 0x00000000}, +{0x728, "TUND", "Transmit undersize frame counter ", ACC_RW, 0x00000000}, +{0x72C, "TFRG", "Transmit fragments frame counter ", ACC_RW, 0x00000000}, + +/* eTSEC Counter Control and TOE Statistics Registers */ + +{0x730, "CAR1", "Carry register one register", ACC_W1C, 0x00000000}, +{0x734, "CAR2", "Carry register two register ", ACC_W1C, 0x00000000}, +{0x738, "CAM1", "Carry register one mask register ", ACC_RW, 0xFE03FFFF}, +{0x73C, "CAM2", "Carry register two mask register ", ACC_RW, 0x000FFFFD}, +{0x740, "RREJ", "Receive filer rejected packet counter", ACC_RW, 0x00000000}, + +/* Hash Function Registers */ + +{0x800, "IGADDR0", "Individual/group address register 0", ACC_RW, 0x00000000}, +{0x804, "IGADDR1", "Individual/group address register 1", ACC_RW, 0x00000000}, +{0x808, "IGADDR2", "Individual/group address register 2", ACC_RW, 0x00000000}, +{0x80C, "IGADDR3", "Individual/group address register 3", ACC_RW, 0x00000000}, +{0x810, "IGADDR4", "Individual/group address register 4", ACC_RW, 0x00000000}, +{0x814, "IGADDR5", "Individual/group address register 5", ACC_RW, 0x00000000}, +{0x818, "IGADDR6", "Individual/group address register 6", ACC_RW, 0x00000000}, +{0x81C, "IGADDR7", "Individual/group address register 7", ACC_RW, 0x00000000}, +{0x880, "GADDR0", "Group address register 0", ACC_RW, 0x00000000}, +{0x884, "GADDR1", "Group address register 1", ACC_RW, 0x00000000}, +{0x888, "GADDR2", "Group address register 2", ACC_RW, 0x00000000}, +{0x88C, "GADDR3", "Group address register 3", ACC_RW, 0x00000000}, +{0x890, "GADDR4", "Group address register 4", ACC_RW, 0x00000000}, +{0x894, "GADDR5", "Group address register 5", ACC_RW, 0x00000000}, +{0x898, "GADDR6", "Group address register 6", ACC_RW, 0x00000000}, +{0x89C, "GADDR7", "Group address register 7", ACC_RW, 0x00000000}, + +/* eTSEC DMA Attribute Registers */ + +{0xBF8, "ATTR", "Attribute register", ACC_RW, 0x00000000}, +{0xBFC, "ATTRELI", "Attribute extract length and extract index register", ACC_RW, 0x00000000}, + + +/* eTSEC Lossless Flow Control Registers */ + +{0xC00, "RQPRM0", "Receive Queue Parameters register 0 ", ACC_RW, 0x00000000}, +{0xC04, "RQPRM1", "Receive Queue Parameters register 1 ", ACC_RW, 0x00000000}, +{0xC08, "RQPRM2", "Receive Queue Parameters register 2 ", ACC_RW, 0x00000000}, +{0xC0C, "RQPRM3", "Receive Queue Parameters register 3 ", ACC_RW, 0x00000000}, +{0xC10, "RQPRM4", "Receive Queue Parameters register 4 ", ACC_RW, 0x00000000}, +{0xC14, "RQPRM5", "Receive Queue Parameters register 5 ", ACC_RW, 0x00000000}, +{0xC18, "RQPRM6", "Receive Queue Parameters register 6 ", ACC_RW, 0x00000000}, +{0xC1C, "RQPRM7", "Receive Queue Parameters register 7 ", ACC_RW, 0x00000000}, +{0xC44, "RFBPTR0", "Last Free RxBD pointer for ring 0", ACC_RW, 0x00000000}, +{0xC4C, "RFBPTR1", "Last Free RxBD pointer for ring 1", ACC_RW, 0x00000000}, +{0xC54, "RFBPTR2", "Last Free RxBD pointer for ring 2", ACC_RW, 0x00000000}, +{0xC5C, "RFBPTR3", "Last Free RxBD pointer for ring 3", ACC_RW, 0x00000000}, +{0xC64, "RFBPTR4", "Last Free RxBD pointer for ring 4", ACC_RW, 0x00000000}, +{0xC6C, "RFBPTR5", "Last Free RxBD pointer for ring 5", ACC_RW, 0x00000000}, +{0xC74, "RFBPTR6", "Last Free RxBD pointer for ring 6", ACC_RW, 0x00000000}, +{0xC7C, "RFBPTR7", "Last Free RxBD pointer for ring 7", ACC_RW, 0x00000000}, + +/* eTSEC Future Expansion Space */ + +/* Reserved*/ + +/* eTSEC IEEE 1588 Registers */ + +{0xE00, "TMR_CTRL", "Timer control register", ACC_RW, 0x00010001}, +{0xE04, "TMR_TEVENT", "time stamp event register", ACC_W1C, 0x00000000}, +{0xE08, "TMR_TEMASK", "Timer event mask register", ACC_RW, 0x00000000}, +{0xE0C, "TMR_PEVENT", "time stamp event register", ACC_RW, 0x00000000}, +{0xE10, "TMR_PEMASK", "Timer event mask register", ACC_RW, 0x00000000}, +{0xE14, "TMR_STAT", "time stamp status register", ACC_RW, 0x00000000}, +{0xE18, "TMR_CNT_H", "timer counter high register", ACC_RW, 0x00000000}, +{0xE1C, "TMR_CNT_L", "timer counter low register", ACC_RW, 0x00000000}, +{0xE20, "TMR_ADD", "Timer drift compensation addend register", ACC_RW, 0x00000000}, +{0xE24, "TMR_ACC", "Timer accumulator register", ACC_RW, 0x00000000}, +{0xE28, "TMR_PRSC", "Timer prescale", ACC_RW, 0x00000002}, +{0xE30, "TMROFF_H", "Timer offset high", ACC_RW, 0x00000000}, +{0xE34, "TMROFF_L", "Timer offset low", ACC_RW, 0x00000000}, +{0xE40, "TMR_ALARM1_H", "Timer alarm 1 high register", ACC_RW, 0xFFFFFFFF}, +{0xE44, "TMR_ALARM1_L", "Timer alarm 1 high register", ACC_RW, 0xFFFFFFFF}, +{0xE48, "TMR_ALARM2_H", "Timer alarm 2 high register", ACC_RW, 0xFFFFFFFF}, +{0xE4C, "TMR_ALARM2_L", "Timer alarm 2 high register", ACC_RW, 0xFFFFFFFF}, +{0xE80, "TMR_FIPER1", "Timer fixed period interval", ACC_RW, 0xFFFFFFFF}, +{0xE84, "TMR_FIPER2", "Timer fixed period interval", ACC_RW, 0xFFFFFFFF}, +{0xE88, "TMR_FIPER3", "Timer fixed period interval", ACC_RW, 0xFFFFFFFF}, +{0xEA0, "TMR_ETTS1_H", "Time stamp of general purpose external trigger ", ACC_RW, 0x00000000}, +{0xEA4, "TMR_ETTS1_L", "Time stamp of general purpose external trigger", ACC_RW, 0x00000000}, +{0xEA8, "TMR_ETTS2_H", "Time stamp of general purpose external trigger ", ACC_RW, 0x00000000}, +{0xEAC, "TMR_ETTS2_L", "Time stamp of general purpose external trigger", ACC_RW, 0x00000000}, + +/* End Of Table */ +{0x0, 0x0, 0x0, 0x0, 0x0} +}; diff --git a/hw/net/fsl_etsec/registers.h b/hw/net/fsl_etsec/registers.h new file mode 100644 index 000000000..f085537ec --- /dev/null +++ b/hw/net/fsl_etsec/registers.h @@ -0,0 +1,329 @@ +/* + * QEMU Freescale eTSEC Emulator + * + * Copyright (c) 2011-2013 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef ETSEC_REGISTERS_H +#define ETSEC_REGISTERS_H + +enum eTSEC_Register_Access_Type { + ACC_RW = 1, /* Read/Write */ + ACC_RO = 2, /* Read Only */ + ACC_WO = 3, /* Write Only */ + ACC_W1C = 4, /* Write 1 to clear */ + ACC_UNKNOWN = 5 /* Unknown register*/ +}; + +typedef struct eTSEC_Register_Definition { + uint32_t offset; + const char *name; + const char *desc; + enum eTSEC_Register_Access_Type access; + uint32_t reset; +} eTSEC_Register_Definition; + +extern const eTSEC_Register_Definition eTSEC_registers_def[]; + +#define DMACTRL_LE (1 << 15) +#define DMACTRL_GRS (1 << 4) +#define DMACTRL_GTS (1 << 3) +#define DMACTRL_WOP (1 << 0) + +#define IEVENT_PERR (1 << 0) +#define IEVENT_DPE (1 << 1) +#define IEVENT_FIQ (1 << 2) +#define IEVENT_FIR (1 << 3) +#define IEVENT_FGPI (1 << 4) +#define IEVENT_RXF (1 << 7) +#define IEVENT_GRSC (1 << 8) +#define IEVENT_MMRW (1 << 9) +#define IEVENT_MMRD (1 << 10) +#define IEVENT_MAG (1 << 11) +#define IEVENT_RXB (1 << 15) +#define IEVENT_XFUN (1 << 16) +#define IEVENT_CRL (1 << 17) +#define IEVENT_LC (1 << 18) +#define IEVENT_TXF (1 << 20) +#define IEVENT_TXB (1 << 21) +#define IEVENT_TXE (1 << 22) +#define IEVENT_TXC (1 << 23) +#define IEVENT_BABT (1 << 24) +#define IEVENT_GTSC (1 << 25) +#define IEVENT_MSRO (1 << 26) +#define IEVENT_EBERR (1 << 28) +#define IEVENT_BSY (1 << 29) +#define IEVENT_RXC (1 << 30) +#define IEVENT_BABR (1 << 31) + +/* Mapping between interrupt pin and interrupt flags */ +#define IEVENT_RX_MASK (IEVENT_RXF | IEVENT_RXB) +#define IEVENT_TX_MASK (IEVENT_TXF | IEVENT_TXB) +#define IEVENT_ERR_MASK (IEVENT_MAG | IEVENT_GTSC | IEVENT_GRSC | IEVENT_TXC | \ + IEVENT_RXC | IEVENT_BABR | IEVENT_BABT | IEVENT_LC | \ + IEVENT_CRL | IEVENT_FGPI | IEVENT_FIR | IEVENT_FIQ | \ + IEVENT_DPE | IEVENT_PERR | IEVENT_EBERR | IEVENT_TXE | \ + IEVENT_XFUN | IEVENT_BSY | IEVENT_MSRO | IEVENT_MMRD | \ + IEVENT_MMRW) + +#define IMASK_RXFEN (1 << 7) +#define IMASK_GRSCEN (1 << 8) +#define IMASK_RXBEN (1 << 15) +#define IMASK_TXFEN (1 << 20) +#define IMASK_TXBEN (1 << 21) +#define IMASK_GTSCEN (1 << 25) + +#define MACCFG1_TX_EN (1 << 0) +#define MACCFG1_RX_EN (1 << 2) + +#define MACCFG2_CRC_EN (1 << 1) +#define MACCFG2_PADCRC (1 << 2) + +#define MIIMCOM_READ (1 << 0) +#define MIIMCOM_SCAN (1 << 1) + +#define RCTRL_PRSDEP_MASK (0x3) +#define RCTRL_PRSDEP_OFFSET (6) +#define RCTRL_RSF (1 << 2) + +/* Index of each register */ + +#define TSEC_ID (0x000 / 4) +#define TSEC_ID2 (0x004 / 4) +#define IEVENT (0x010 / 4) +#define IMASK (0x014 / 4) +#define EDIS (0x018 / 4) +#define ECNTRL (0x020 / 4) +#define PTV (0x028 / 4) +#define DMACTRL (0x02C / 4) +#define TBIPA (0x030 / 4) +#define TCTRL (0x100 / 4) +#define TSTAT (0x104 / 4) +#define DFVLAN (0x108 / 4) +#define TXIC (0x110 / 4) +#define TQUEUE (0x114 / 4) +#define TR03WT (0x140 / 4) +#define TR47WT (0x144 / 4) +#define TBDBPH (0x180 / 4) +#define TBPTR0 (0x184 / 4) +#define TBPTR1 (0x18C / 4) +#define TBPTR2 (0x194 / 4) +#define TBPTR3 (0x19C / 4) +#define TBPTR4 (0x1A4 / 4) +#define TBPTR5 (0x1AC / 4) +#define TBPTR6 (0x1B4 / 4) +#define TBPTR7 (0x1BC / 4) +#define TBASEH (0x200 / 4) +#define TBASE0 (0x204 / 4) +#define TBASE1 (0x20C / 4) +#define TBASE2 (0x214 / 4) +#define TBASE3 (0x21C / 4) +#define TBASE4 (0x224 / 4) +#define TBASE5 (0x22C / 4) +#define TBASE6 (0x234 / 4) +#define TBASE7 (0x23C / 4) +#define TMR_TXTS1_ID (0x280 / 4) +#define TMR_TXTS2_ID (0x284 / 4) +#define TMR_TXTS1_H (0x2C0 / 4) +#define TMR_TXTS1_L (0x2C4 / 4) +#define TMR_TXTS2_H (0x2C8 / 4) +#define TMR_TXTS2_L (0x2CC / 4) +#define RCTRL (0x300 / 4) +#define RSTAT (0x304 / 4) +#define RXIC (0x310 / 4) +#define RQUEUE (0x314 / 4) +#define RBIFX (0x330 / 4) +#define RQFAR (0x334 / 4) +#define RQFCR (0x338 / 4) +#define RQFPR (0x33C / 4) +#define MRBLR (0x340 / 4) +#define RBDBPH (0x380 / 4) +#define RBPTR0 (0x384 / 4) +#define RBPTR1 (0x38C / 4) +#define RBPTR2 (0x394 / 4) +#define RBPTR3 (0x39C / 4) +#define RBPTR4 (0x3A4 / 4) +#define RBPTR5 (0x3AC / 4) +#define RBPTR6 (0x3B4 / 4) +#define RBPTR7 (0x3BC / 4) +#define RBASEH (0x400 / 4) +#define RBASE0 (0x404 / 4) +#define RBASE1 (0x40C / 4) +#define RBASE2 (0x414 / 4) +#define RBASE3 (0x41C / 4) +#define RBASE4 (0x424 / 4) +#define RBASE5 (0x42C / 4) +#define RBASE6 (0x434 / 4) +#define RBASE7 (0x43C / 4) +#define TMR_RXTS_H (0x4C0 / 4) +#define TMR_RXTS_L (0x4C4 / 4) +#define MACCFG1 (0x500 / 4) +#define MACCFG2 (0x504 / 4) +#define IPGIFG (0x508 / 4) +#define HAFDUP (0x50C / 4) +#define MAXFRM (0x510 / 4) +#define MIIMCFG (0x520 / 4) +#define MIIMCOM (0x524 / 4) +#define MIIMADD (0x528 / 4) +#define MIIMCON (0x52C / 4) +#define MIIMSTAT (0x530 / 4) +#define MIIMIND (0x534 / 4) +#define IFSTAT (0x53C / 4) +#define MACSTNADDR1 (0x540 / 4) +#define MACSTNADDR2 (0x544 / 4) +#define MAC01ADDR1 (0x548 / 4) +#define MAC01ADDR2 (0x54C / 4) +#define MAC02ADDR1 (0x550 / 4) +#define MAC02ADDR2 (0x554 / 4) +#define MAC03ADDR1 (0x558 / 4) +#define MAC03ADDR2 (0x55C / 4) +#define MAC04ADDR1 (0x560 / 4) +#define MAC04ADDR2 (0x564 / 4) +#define MAC05ADDR1 (0x568 / 4) +#define MAC05ADDR2 (0x56C / 4) +#define MAC06ADDR1 (0x570 / 4) +#define MAC06ADDR2 (0x574 / 4) +#define MAC07ADDR1 (0x578 / 4) +#define MAC07ADDR2 (0x57C / 4) +#define MAC08ADDR1 (0x580 / 4) +#define MAC08ADDR2 (0x584 / 4) +#define MAC09ADDR1 (0x588 / 4) +#define MAC09ADDR2 (0x58C / 4) +#define MAC10ADDR1 (0x590 / 4) +#define MAC10ADDR2 (0x594 / 4) +#define MAC11ADDR1 (0x598 / 4) +#define MAC11ADDR2 (0x59C / 4) +#define MAC12ADDR1 (0x5A0 / 4) +#define MAC12ADDR2 (0x5A4 / 4) +#define MAC13ADDR1 (0x5A8 / 4) +#define MAC13ADDR2 (0x5AC / 4) +#define MAC14ADDR1 (0x5B0 / 4) +#define MAC14ADDR2 (0x5B4 / 4) +#define MAC15ADDR1 (0x5B8 / 4) +#define MAC15ADDR2 (0x5BC / 4) +#define TR64 (0x680 / 4) +#define TR127 (0x684 / 4) +#define TR255 (0x688 / 4) +#define TR511 (0x68C / 4) +#define TR1K (0x690 / 4) +#define TRMAX (0x694 / 4) +#define TRMGV (0x698 / 4) +#define RBYT (0x69C / 4) +#define RPKT (0x6A0 / 4) +#define RFCS (0x6A4 / 4) +#define RMCA (0x6A8 / 4) +#define RBCA (0x6AC / 4) +#define RXCF (0x6B0 / 4) +#define RXPF (0x6B4 / 4) +#define RXUO (0x6B8 / 4) +#define RALN (0x6BC / 4) +#define RFLR (0x6C0 / 4) +#define RCDE (0x6C4 / 4) +#define RCSE (0x6C8 / 4) +#define RUND (0x6CC / 4) +#define ROVR (0x6D0 / 4) +#define RFRG (0x6D4 / 4) +#define RJBR (0x6D8 / 4) +#define RDRP (0x6DC / 4) +#define TBYT (0x6E0 / 4) +#define TPKT (0x6E4 / 4) +#define TMCA (0x6E8 / 4) +#define TBCA (0x6EC / 4) +#define TXPF (0x6F0 / 4) +#define TDFR (0x6F4 / 4) +#define TEDF (0x6F8 / 4) +#define TSCL (0x6FC / 4) +#define TMCL (0x700 / 4) +#define TLCL (0x704 / 4) +#define TXCL (0x708 / 4) +#define TNCL (0x70C / 4) +#define TDRP (0x714 / 4) +#define TJBR (0x718 / 4) +#define TFCS (0x71C / 4) +#define TXCF (0x720 / 4) +#define TOVR (0x724 / 4) +#define TUND (0x728 / 4) +#define TFRG (0x72C / 4) +#define CAR1 (0x730 / 4) +#define CAR2 (0x734 / 4) +#define CAM1 (0x738 / 4) +#define CAM2 (0x73C / 4) +#define RREJ (0x740 / 4) +#define IGADDR0 (0x800 / 4) +#define IGADDR1 (0x804 / 4) +#define IGADDR2 (0x808 / 4) +#define IGADDR3 (0x80C / 4) +#define IGADDR4 (0x810 / 4) +#define IGADDR5 (0x814 / 4) +#define IGADDR6 (0x818 / 4) +#define IGADDR7 (0x81C / 4) +#define GADDR0 (0x880 / 4) +#define GADDR1 (0x884 / 4) +#define GADDR2 (0x888 / 4) +#define GADDR3 (0x88C / 4) +#define GADDR4 (0x890 / 4) +#define GADDR5 (0x894 / 4) +#define GADDR6 (0x898 / 4) +#define GADDR7 (0x89C / 4) +#define ATTR (0xBF8 / 4) +#define ATTRELI (0xBFC / 4) +#define RQPRM0 (0xC00 / 4) +#define RQPRM1 (0xC04 / 4) +#define RQPRM2 (0xC08 / 4) +#define RQPRM3 (0xC0C / 4) +#define RQPRM4 (0xC10 / 4) +#define RQPRM5 (0xC14 / 4) +#define RQPRM6 (0xC18 / 4) +#define RQPRM7 (0xC1C / 4) +#define RFBPTR0 (0xC44 / 4) +#define RFBPTR1 (0xC4C / 4) +#define RFBPTR2 (0xC54 / 4) +#define RFBPTR3 (0xC5C / 4) +#define RFBPTR4 (0xC64 / 4) +#define RFBPTR5 (0xC6C / 4) +#define RFBPTR6 (0xC74 / 4) +#define RFBPTR7 (0xC7C / 4) +#define TMR_CTRL (0xE00 / 4) +#define TMR_TEVENT (0xE04 / 4) +#define TMR_TEMASK (0xE08 / 4) +#define TMR_PEVENT (0xE0C / 4) +#define TMR_PEMASK (0xE10 / 4) +#define TMR_STAT (0xE14 / 4) +#define TMR_CNT_H (0xE18 / 4) +#define TMR_CNT_L (0xE1C / 4) +#define TMR_ADD (0xE20 / 4) +#define TMR_ACC (0xE24 / 4) +#define TMR_PRSC (0xE28 / 4) +#define TMROFF_H (0xE30 / 4) +#define TMROFF_L (0xE34 / 4) +#define TMR_ALARM1_H (0xE40 / 4) +#define TMR_ALARM1_L (0xE44 / 4) +#define TMR_ALARM2_H (0xE48 / 4) +#define TMR_ALARM2_L (0xE4C / 4) +#define TMR_FIPER1 (0xE80 / 4) +#define TMR_FIPER2 (0xE84 / 4) +#define TMR_FIPER3 (0xE88 / 4) +#define TMR_ETTS1_H (0xEA0 / 4) +#define TMR_ETTS1_L (0xEA4 / 4) +#define TMR_ETTS2_H (0xEA8 / 4) +#define TMR_ETTS2_L (0xEAC / 4) + +#endif /* ETSEC_REGISTERS_H */ diff --git a/hw/net/fsl_etsec/rings.c b/hw/net/fsl_etsec/rings.c new file mode 100644 index 000000000..8f0844641 --- /dev/null +++ b/hw/net/fsl_etsec/rings.c @@ -0,0 +1,652 @@ +/* + * QEMU Freescale eTSEC Emulator + * + * Copyright (c) 2011-2013 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "net/checksum.h" +#include "qemu/log.h" +#include "etsec.h" +#include "registers.h" + +/* #define ETSEC_RING_DEBUG */ +/* #define HEX_DUMP */ +/* #define DEBUG_BD */ + +#ifdef ETSEC_RING_DEBUG +static const int debug_etsec = 1; +#else +static const int debug_etsec; +#endif + +#define RING_DEBUG(fmt, ...) do { \ + if (debug_etsec) { \ + qemu_log(fmt , ## __VA_ARGS__); \ + } \ + } while (0) + +#ifdef DEBUG_BD + +static void print_tx_bd_flags(uint16_t flags) +{ + qemu_log(" Ready: %d\n", !!(flags & BD_TX_READY)); + qemu_log(" PAD/CRC: %d\n", !!(flags & BD_TX_PADCRC)); + qemu_log(" Wrap: %d\n", !!(flags & BD_WRAP)); + qemu_log(" Interrupt: %d\n", !!(flags & BD_INTERRUPT)); + qemu_log(" Last in frame: %d\n", !!(flags & BD_LAST)); + qemu_log(" Tx CRC: %d\n", !!(flags & BD_TX_TC)); + qemu_log(" User-defined preamble / defer: %d\n", + !!(flags & BD_TX_PREDEF)); + qemu_log(" Huge frame enable / Late collision: %d\n", + !!(flags & BD_TX_HFELC)); + qemu_log(" Control frame / Retransmission Limit: %d\n", + !!(flags & BD_TX_CFRL)); + qemu_log(" Retry count: %d\n", + (flags >> BD_TX_RC_OFFSET) & BD_TX_RC_MASK); + qemu_log(" Underrun / TCP/IP off-load enable: %d\n", + !!(flags & BD_TX_TOEUN)); + qemu_log(" Truncation: %d\n", !!(flags & BD_TX_TR)); +} + +static void print_rx_bd_flags(uint16_t flags) +{ + qemu_log(" Empty: %d\n", !!(flags & BD_RX_EMPTY)); + qemu_log(" Receive software ownership: %d\n", !!(flags & BD_RX_RO1)); + qemu_log(" Wrap: %d\n", !!(flags & BD_WRAP)); + qemu_log(" Interrupt: %d\n", !!(flags & BD_INTERRUPT)); + qemu_log(" Last in frame: %d\n", !!(flags & BD_LAST)); + qemu_log(" First in frame: %d\n", !!(flags & BD_RX_FIRST)); + qemu_log(" Miss: %d\n", !!(flags & BD_RX_MISS)); + qemu_log(" Broadcast: %d\n", !!(flags & BD_RX_BROADCAST)); + qemu_log(" Multicast: %d\n", !!(flags & BD_RX_MULTICAST)); + qemu_log(" Rx frame length violation: %d\n", !!(flags & BD_RX_LG)); + qemu_log(" Rx non-octet aligned frame: %d\n", !!(flags & BD_RX_NO)); + qemu_log(" Short frame: %d\n", !!(flags & BD_RX_SH)); + qemu_log(" Rx CRC Error: %d\n", !!(flags & BD_RX_CR)); + qemu_log(" Overrun: %d\n", !!(flags & BD_RX_OV)); + qemu_log(" Truncation: %d\n", !!(flags & BD_RX_TR)); +} + + +static void print_bd(eTSEC_rxtx_bd bd, int mode, uint32_t index) +{ + qemu_log("eTSEC %s Data Buffer Descriptor (%u)\n", + mode == eTSEC_TRANSMIT ? "Transmit" : "Receive", + index); + qemu_log(" Flags : 0x%04x\n", bd.flags); + if (mode == eTSEC_TRANSMIT) { + print_tx_bd_flags(bd.flags); + } else { + print_rx_bd_flags(bd.flags); + } + qemu_log(" Length : 0x%04x\n", bd.length); + qemu_log(" Pointer : 0x%08x\n", bd.bufptr); +} + +#endif /* DEBUG_BD */ + +static void read_buffer_descriptor(eTSEC *etsec, + hwaddr addr, + eTSEC_rxtx_bd *bd) +{ + assert(bd != NULL); + + RING_DEBUG("READ Buffer Descriptor @ 0x" TARGET_FMT_plx"\n", addr); + cpu_physical_memory_read(addr, + bd, + sizeof(eTSEC_rxtx_bd)); + + if (etsec->regs[DMACTRL].value & DMACTRL_LE) { + bd->flags = lduw_le_p(&bd->flags); + bd->length = lduw_le_p(&bd->length); + bd->bufptr = ldl_le_p(&bd->bufptr); + } else { + bd->flags = lduw_be_p(&bd->flags); + bd->length = lduw_be_p(&bd->length); + bd->bufptr = ldl_be_p(&bd->bufptr); + } +} + +static void write_buffer_descriptor(eTSEC *etsec, + hwaddr addr, + eTSEC_rxtx_bd *bd) +{ + assert(bd != NULL); + + if (etsec->regs[DMACTRL].value & DMACTRL_LE) { + stw_le_p(&bd->flags, bd->flags); + stw_le_p(&bd->length, bd->length); + stl_le_p(&bd->bufptr, bd->bufptr); + } else { + stw_be_p(&bd->flags, bd->flags); + stw_be_p(&bd->length, bd->length); + stl_be_p(&bd->bufptr, bd->bufptr); + } + + RING_DEBUG("Write Buffer Descriptor @ 0x" TARGET_FMT_plx"\n", addr); + cpu_physical_memory_write(addr, + bd, + sizeof(eTSEC_rxtx_bd)); +} + +static void ievent_set(eTSEC *etsec, + uint32_t flags) +{ + etsec->regs[IEVENT].value |= flags; + + etsec_update_irq(etsec); +} + +static void tx_padding_and_crc(eTSEC *etsec, uint32_t min_frame_len) +{ + int add = min_frame_len - etsec->tx_buffer_len; + + /* Padding */ + if (add > 0) { + RING_DEBUG("pad:%u\n", add); + etsec->tx_buffer = g_realloc(etsec->tx_buffer, + etsec->tx_buffer_len + add); + + memset(etsec->tx_buffer + etsec->tx_buffer_len, 0x0, add); + etsec->tx_buffer_len += add; + } + + /* Never add CRC in QEMU */ +} + +static void process_tx_fcb(eTSEC *etsec) +{ + uint8_t flags = (uint8_t)(*etsec->tx_buffer); + /* L3 header offset from start of frame */ + uint8_t l3_header_offset = (uint8_t)*(etsec->tx_buffer + 3); + /* L4 header offset from start of L3 header */ + uint8_t l4_header_offset = (uint8_t)*(etsec->tx_buffer + 2); + /* L3 header */ + uint8_t *l3_header = etsec->tx_buffer + 8 + l3_header_offset; + /* L4 header */ + uint8_t *l4_header = l3_header + l4_header_offset; + int csum = 0; + + /* if packet is IP4 and IP checksum is requested */ + if (flags & FCB_TX_IP && flags & FCB_TX_CIP) { + csum |= CSUM_IP; + } + /* TODO Check the correct usage of the PHCS field of the FCB in case the NPH + * flag is on */ + + /* if packet is IP4 and TCP or UDP */ + if (flags & FCB_TX_IP && flags & FCB_TX_TUP) { + /* if UDP */ + if (flags & FCB_TX_UDP) { + /* if checksum is requested */ + if (flags & FCB_TX_CTU) { + /* do UDP checksum */ + csum |= CSUM_UDP; + } else { + /* set checksum field to 0 */ + l4_header[6] = 0; + l4_header[7] = 0; + } + } else if (flags & FCB_TX_CTU) { /* if TCP and checksum is requested */ + /* do TCP checksum */ + csum |= CSUM_TCP; + } + } + + if (csum) { + net_checksum_calculate(etsec->tx_buffer + 8, + etsec->tx_buffer_len - 8, csum); + } +} + +static void process_tx_bd(eTSEC *etsec, + eTSEC_rxtx_bd *bd) +{ + uint8_t *tmp_buff = NULL; + hwaddr tbdbth = (hwaddr)(etsec->regs[TBDBPH].value & 0xF) << 32; + + if (bd->length == 0) { + /* ERROR */ + return; + } + + if (etsec->tx_buffer_len == 0) { + /* It's the first BD */ + etsec->first_bd = *bd; + } + + /* TODO: if TxBD[TOE/UN] skip the Tx Frame Control Block*/ + + /* Load this Data Buffer */ + etsec->tx_buffer = g_realloc(etsec->tx_buffer, + etsec->tx_buffer_len + bd->length); + tmp_buff = etsec->tx_buffer + etsec->tx_buffer_len; + cpu_physical_memory_read(bd->bufptr + tbdbth, tmp_buff, bd->length); + + /* Update buffer length */ + etsec->tx_buffer_len += bd->length; + + + if (etsec->tx_buffer_len != 0 && (bd->flags & BD_LAST)) { + if (etsec->regs[MACCFG1].value & MACCFG1_TX_EN) { + /* MAC Transmit enabled */ + + /* Process offload Tx FCB */ + if (etsec->first_bd.flags & BD_TX_TOEUN) { + process_tx_fcb(etsec); + } + + if (etsec->first_bd.flags & BD_TX_PADCRC + || etsec->regs[MACCFG2].value & MACCFG2_PADCRC) { + + /* Padding and CRC (Padding implies CRC) */ + tx_padding_and_crc(etsec, 60); + + } else if (etsec->first_bd.flags & BD_TX_TC + || etsec->regs[MACCFG2].value & MACCFG2_CRC_EN) { + + /* Only CRC */ + /* Never add CRC in QEMU */ + } + +#if defined(HEX_DUMP) + qemu_log("eTSEC Send packet size:%d\n", etsec->tx_buffer_len); + qemu_hexdump(stderr, "", etsec->tx_buffer, etsec->tx_buffer_len); +#endif /* ETSEC_RING_DEBUG */ + + if (etsec->first_bd.flags & BD_TX_TOEUN) { + qemu_send_packet(qemu_get_queue(etsec->nic), + etsec->tx_buffer + 8, + etsec->tx_buffer_len - 8); + } else { + qemu_send_packet(qemu_get_queue(etsec->nic), + etsec->tx_buffer, + etsec->tx_buffer_len); + } + + } + + etsec->tx_buffer_len = 0; + + if (bd->flags & BD_INTERRUPT) { + ievent_set(etsec, IEVENT_TXF); + } + } else { + if (bd->flags & BD_INTERRUPT) { + ievent_set(etsec, IEVENT_TXB); + } + } + + /* Update DB flags */ + + /* Clear Ready */ + bd->flags &= ~BD_TX_READY; + + /* Clear Defer */ + bd->flags &= ~BD_TX_PREDEF; + + /* Clear Late Collision */ + bd->flags &= ~BD_TX_HFELC; + + /* Clear Retransmission Limit */ + bd->flags &= ~BD_TX_CFRL; + + /* Clear Retry Count */ + bd->flags &= ~(BD_TX_RC_MASK << BD_TX_RC_OFFSET); + + /* Clear Underrun */ + bd->flags &= ~BD_TX_TOEUN; + + /* Clear Truncation */ + bd->flags &= ~BD_TX_TR; +} + +void etsec_walk_tx_ring(eTSEC *etsec, int ring_nbr) +{ + hwaddr ring_base = 0; + hwaddr bd_addr = 0; + eTSEC_rxtx_bd bd; + uint16_t bd_flags; + + if (!(etsec->regs[MACCFG1].value & MACCFG1_TX_EN)) { + RING_DEBUG("%s: MAC Transmit not enabled\n", __func__); + return; + } + + ring_base = (hwaddr)(etsec->regs[TBASEH].value & 0xF) << 32; + ring_base += etsec->regs[TBASE0 + ring_nbr].value & ~0x7; + bd_addr = etsec->regs[TBPTR0 + ring_nbr].value & ~0x7; + + do { + read_buffer_descriptor(etsec, bd_addr, &bd); + +#ifdef DEBUG_BD + print_bd(bd, + eTSEC_TRANSMIT, + (bd_addr - ring_base) / sizeof(eTSEC_rxtx_bd)); + +#endif /* DEBUG_BD */ + + /* Save flags before BD update */ + bd_flags = bd.flags; + + if (!(bd_flags & BD_TX_READY)) { + break; + } + + process_tx_bd(etsec, &bd); + /* Write back BD after update */ + write_buffer_descriptor(etsec, bd_addr, &bd); + + /* Wrap or next BD */ + if (bd_flags & BD_WRAP) { + bd_addr = ring_base; + } else { + bd_addr += sizeof(eTSEC_rxtx_bd); + } + } while (TRUE); + + /* Save the Buffer Descriptor Pointers to last bd that was not + * succesfully closed */ + etsec->regs[TBPTR0 + ring_nbr].value = bd_addr; + + /* Set transmit halt THLTx */ + etsec->regs[TSTAT].value |= 1 << (31 - ring_nbr); +} + +static void fill_rx_bd(eTSEC *etsec, + eTSEC_rxtx_bd *bd, + const uint8_t **buf, + size_t *size) +{ + uint16_t to_write; + hwaddr bufptr = bd->bufptr + + ((hwaddr)(etsec->regs[TBDBPH].value & 0xF) << 32); + uint8_t padd[etsec->rx_padding]; + uint8_t rem; + + RING_DEBUG("eTSEC fill Rx buffer @ 0x%016" HWADDR_PRIx + " size:%zu(padding + crc:%u) + fcb:%u\n", + bufptr, *size, etsec->rx_padding, etsec->rx_fcb_size); + + bd->length = 0; + + /* This operation will only write FCB */ + if (etsec->rx_fcb_size != 0) { + + cpu_physical_memory_write(bufptr, etsec->rx_fcb, etsec->rx_fcb_size); + + bufptr += etsec->rx_fcb_size; + bd->length += etsec->rx_fcb_size; + etsec->rx_fcb_size = 0; + + } + + /* We remove padding from the computation of to_write because it is not + * allocated in the buffer. + */ + to_write = MIN(*size - etsec->rx_padding, + etsec->regs[MRBLR].value - etsec->rx_fcb_size); + + /* This operation can only write packet data and no padding */ + if (to_write > 0) { + cpu_physical_memory_write(bufptr, *buf, to_write); + + *buf += to_write; + bufptr += to_write; + *size -= to_write; + + bd->flags &= ~BD_RX_EMPTY; + bd->length += to_write; + } + + if (*size == etsec->rx_padding) { + /* The remaining bytes are only for padding which is not actually + * allocated in the data buffer. + */ + + rem = MIN(etsec->regs[MRBLR].value - bd->length, etsec->rx_padding); + + if (rem > 0) { + memset(padd, 0x0, sizeof(padd)); + etsec->rx_padding -= rem; + *size -= rem; + bd->length += rem; + cpu_physical_memory_write(bufptr, padd, rem); + } + } +} + +static void rx_init_frame(eTSEC *etsec, const uint8_t *buf, size_t size) +{ + uint32_t fcb_size = 0; + uint8_t prsdep = (etsec->regs[RCTRL].value >> RCTRL_PRSDEP_OFFSET) + & RCTRL_PRSDEP_MASK; + + if (prsdep != 0) { + /* Prepend FCB (FCB size + RCTRL[PAL]) */ + fcb_size = 8 + ((etsec->regs[RCTRL].value >> 16) & 0x1F); + + etsec->rx_fcb_size = fcb_size; + + /* TODO: fill_FCB(etsec); */ + memset(etsec->rx_fcb, 0x0, sizeof(etsec->rx_fcb)); + + } else { + etsec->rx_fcb_size = 0; + } + + g_free(etsec->rx_buffer); + + /* Do not copy the frame for now */ + etsec->rx_buffer = (uint8_t *)buf; + etsec->rx_buffer_len = size; + + /* CRC padding (We don't have to compute the CRC) */ + etsec->rx_padding = 4; + + /* + * Ensure that payload length + CRC length is at least 802.3 + * minimum MTU size bytes long (64) + */ + if (etsec->rx_buffer_len < 60) { + etsec->rx_padding += 60 - etsec->rx_buffer_len; + } + + etsec->rx_first_in_frame = 1; + etsec->rx_remaining_data = etsec->rx_buffer_len; + RING_DEBUG("%s: rx_buffer_len:%u rx_padding+crc:%u\n", __func__, + etsec->rx_buffer_len, etsec->rx_padding); +} + +ssize_t etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size) +{ + int ring_nbr = 0; /* Always use ring0 (no filer) */ + + if (etsec->rx_buffer_len != 0) { + RING_DEBUG("%s: We can't receive now," + " a buffer is already in the pipe\n", __func__); + return 0; + } + + if (etsec->regs[RSTAT].value & 1 << (23 - ring_nbr)) { + RING_DEBUG("%s: The ring is halted\n", __func__); + return -1; + } + + if (etsec->regs[DMACTRL].value & DMACTRL_GRS) { + RING_DEBUG("%s: Graceful receive stop\n", __func__); + return -1; + } + + if (!(etsec->regs[MACCFG1].value & MACCFG1_RX_EN)) { + RING_DEBUG("%s: MAC Receive not enabled\n", __func__); + return -1; + } + + if (!(etsec->regs[RCTRL].value & RCTRL_RSF) && (size < 60)) { + /* CRC is not in the packet yet, so short frame is below 60 bytes */ + RING_DEBUG("%s: Drop short frame\n", __func__); + return -1; + } + + rx_init_frame(etsec, buf, size); + + etsec_walk_rx_ring(etsec, ring_nbr); + + return size; +} + +void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr) +{ + hwaddr ring_base = 0; + hwaddr bd_addr = 0; + hwaddr start_bd_addr = 0; + eTSEC_rxtx_bd bd; + uint16_t bd_flags; + size_t remaining_data; + const uint8_t *buf; + uint8_t *tmp_buf; + size_t size; + + if (etsec->rx_buffer_len == 0) { + /* No frame to send */ + RING_DEBUG("No frame to send\n"); + return; + } + + remaining_data = etsec->rx_remaining_data + etsec->rx_padding; + buf = etsec->rx_buffer + + (etsec->rx_buffer_len - etsec->rx_remaining_data); + size = etsec->rx_buffer_len + etsec->rx_padding; + + ring_base = (hwaddr)(etsec->regs[RBASEH].value & 0xF) << 32; + ring_base += etsec->regs[RBASE0 + ring_nbr].value & ~0x7; + start_bd_addr = bd_addr = etsec->regs[RBPTR0 + ring_nbr].value & ~0x7; + + do { + read_buffer_descriptor(etsec, bd_addr, &bd); + +#ifdef DEBUG_BD + print_bd(bd, + eTSEC_RECEIVE, + (bd_addr - ring_base) / sizeof(eTSEC_rxtx_bd)); + +#endif /* DEBUG_BD */ + + /* Save flags before BD update */ + bd_flags = bd.flags; + + if (bd_flags & BD_RX_EMPTY) { + fill_rx_bd(etsec, &bd, &buf, &remaining_data); + + if (etsec->rx_first_in_frame) { + bd.flags |= BD_RX_FIRST; + etsec->rx_first_in_frame = 0; + etsec->rx_first_bd = bd; + } + + /* Last in frame */ + if (remaining_data == 0) { + + /* Clear flags */ + + bd.flags &= ~0x7ff; + + bd.flags |= BD_LAST; + + /* NOTE: non-octet aligned frame is impossible in qemu */ + + if (size >= etsec->regs[MAXFRM].value) { + /* frame length violation */ + qemu_log("%s frame length violation: size:%zu MAXFRM:%d\n", + __func__, size, etsec->regs[MAXFRM].value); + + bd.flags |= BD_RX_LG; + } + + if (size < 64) { + /* Short frame */ + bd.flags |= BD_RX_SH; + } + + /* TODO: Broadcast and Multicast */ + + if (bd.flags & BD_INTERRUPT) { + /* Set RXFx */ + etsec->regs[RSTAT].value |= 1 << (7 - ring_nbr); + + /* Set IEVENT */ + ievent_set(etsec, IEVENT_RXF); + } + + } else { + if (bd.flags & BD_INTERRUPT) { + /* Set IEVENT */ + ievent_set(etsec, IEVENT_RXB); + } + } + + /* Write back BD after update */ + write_buffer_descriptor(etsec, bd_addr, &bd); + } + + /* Wrap or next BD */ + if (bd_flags & BD_WRAP) { + bd_addr = ring_base; + } else { + bd_addr += sizeof(eTSEC_rxtx_bd); + } + } while (remaining_data != 0 + && (bd_flags & BD_RX_EMPTY) + && bd_addr != start_bd_addr); + + /* Reset ring ptr */ + etsec->regs[RBPTR0 + ring_nbr].value = bd_addr; + + /* The frame is too large to fit in the Rx ring */ + if (remaining_data > 0) { + + /* Set RSTAT[QHLTx] */ + etsec->regs[RSTAT].value |= 1 << (23 - ring_nbr); + + /* Save remaining data to send the end of the frame when the ring will + * be restarted + */ + etsec->rx_remaining_data = remaining_data; + + /* Copy the frame */ + tmp_buf = g_malloc(size); + memcpy(tmp_buf, etsec->rx_buffer, size); + etsec->rx_buffer = tmp_buf; + + RING_DEBUG("no empty RxBD available any more\n"); + } else { + etsec->rx_buffer_len = 0; + etsec->rx_buffer = NULL; + if (etsec->need_flush) { + qemu_flush_queued_packets(qemu_get_queue(etsec->nic)); + } + } + + RING_DEBUG("eTSEC End of ring_write: remaining_data:%zu\n", remaining_data); +} diff --git a/hw/net/ftgmac100.c b/hw/net/ftgmac100.c new file mode 100644 index 000000000..25685ba3a --- /dev/null +++ b/hw/net/ftgmac100.c @@ -0,0 +1,1344 @@ +/* + * Faraday FTGMAC100 Gigabit Ethernet + * + * Copyright (C) 2016-2017, IBM Corporation. + * + * Based on Coldfire Fast Ethernet Controller emulation. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/net/ftgmac100.h" +#include "sysemu/dma.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "net/checksum.h" +#include "net/eth.h" +#include "hw/net/mii.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +/* For crc32 */ +#include + +/* + * FTGMAC100 registers + */ +#define FTGMAC100_ISR 0x00 +#define FTGMAC100_IER 0x04 +#define FTGMAC100_MAC_MADR 0x08 +#define FTGMAC100_MAC_LADR 0x0c +#define FTGMAC100_MATH0 0x10 +#define FTGMAC100_MATH1 0x14 +#define FTGMAC100_NPTXPD 0x18 +#define FTGMAC100_RXPD 0x1C +#define FTGMAC100_NPTXR_BADR 0x20 +#define FTGMAC100_RXR_BADR 0x24 +#define FTGMAC100_HPTXPD 0x28 +#define FTGMAC100_HPTXR_BADR 0x2c +#define FTGMAC100_ITC 0x30 +#define FTGMAC100_APTC 0x34 +#define FTGMAC100_DBLAC 0x38 +#define FTGMAC100_REVR 0x40 +#define FTGMAC100_FEAR1 0x44 +#define FTGMAC100_RBSR 0x4c +#define FTGMAC100_TPAFCR 0x48 + +#define FTGMAC100_MACCR 0x50 +#define FTGMAC100_MACSR 0x54 +#define FTGMAC100_PHYCR 0x60 +#define FTGMAC100_PHYDATA 0x64 +#define FTGMAC100_FCR 0x68 + +/* + * Interrupt status register & interrupt enable register + */ +#define FTGMAC100_INT_RPKT_BUF (1 << 0) +#define FTGMAC100_INT_RPKT_FIFO (1 << 1) +#define FTGMAC100_INT_NO_RXBUF (1 << 2) +#define FTGMAC100_INT_RPKT_LOST (1 << 3) +#define FTGMAC100_INT_XPKT_ETH (1 << 4) +#define FTGMAC100_INT_XPKT_FIFO (1 << 5) +#define FTGMAC100_INT_NO_NPTXBUF (1 << 6) +#define FTGMAC100_INT_XPKT_LOST (1 << 7) +#define FTGMAC100_INT_AHB_ERR (1 << 8) +#define FTGMAC100_INT_PHYSTS_CHG (1 << 9) +#define FTGMAC100_INT_NO_HPTXBUF (1 << 10) + +/* + * Automatic polling timer control register + */ +#define FTGMAC100_APTC_RXPOLL_CNT(x) ((x) & 0xf) +#define FTGMAC100_APTC_RXPOLL_TIME_SEL (1 << 4) +#define FTGMAC100_APTC_TXPOLL_CNT(x) (((x) >> 8) & 0xf) +#define FTGMAC100_APTC_TXPOLL_TIME_SEL (1 << 12) + +/* + * DMA burst length and arbitration control register + */ +#define FTGMAC100_DBLAC_RXBURST_SIZE(x) (((x) >> 8) & 0x3) +#define FTGMAC100_DBLAC_TXBURST_SIZE(x) (((x) >> 10) & 0x3) +#define FTGMAC100_DBLAC_RXDES_SIZE(x) ((((x) >> 12) & 0xf) * 8) +#define FTGMAC100_DBLAC_TXDES_SIZE(x) ((((x) >> 16) & 0xf) * 8) +#define FTGMAC100_DBLAC_IFG_CNT(x) (((x) >> 20) & 0x7) +#define FTGMAC100_DBLAC_IFG_INC (1 << 23) + +/* + * PHY control register + */ +#define FTGMAC100_PHYCR_MIIRD (1 << 26) +#define FTGMAC100_PHYCR_MIIWR (1 << 27) + +#define FTGMAC100_PHYCR_DEV(x) (((x) >> 16) & 0x1f) +#define FTGMAC100_PHYCR_REG(x) (((x) >> 21) & 0x1f) + +/* + * PHY data register + */ +#define FTGMAC100_PHYDATA_MIIWDATA(x) ((x) & 0xffff) +#define FTGMAC100_PHYDATA_MIIRDATA(x) (((x) >> 16) & 0xffff) + +/* + * PHY control register - New MDC/MDIO interface + */ +#define FTGMAC100_PHYCR_NEW_DATA(x) (((x) >> 16) & 0xffff) +#define FTGMAC100_PHYCR_NEW_FIRE (1 << 15) +#define FTGMAC100_PHYCR_NEW_ST_22 (1 << 12) +#define FTGMAC100_PHYCR_NEW_OP(x) (((x) >> 10) & 3) +#define FTGMAC100_PHYCR_NEW_OP_WRITE 0x1 +#define FTGMAC100_PHYCR_NEW_OP_READ 0x2 +#define FTGMAC100_PHYCR_NEW_DEV(x) (((x) >> 5) & 0x1f) +#define FTGMAC100_PHYCR_NEW_REG(x) ((x) & 0x1f) + +/* + * Feature Register + */ +#define FTGMAC100_REVR_NEW_MDIO_INTERFACE (1 << 31) + +/* + * MAC control register + */ +#define FTGMAC100_MACCR_TXDMA_EN (1 << 0) +#define FTGMAC100_MACCR_RXDMA_EN (1 << 1) +#define FTGMAC100_MACCR_TXMAC_EN (1 << 2) +#define FTGMAC100_MACCR_RXMAC_EN (1 << 3) +#define FTGMAC100_MACCR_RM_VLAN (1 << 4) +#define FTGMAC100_MACCR_HPTXR_EN (1 << 5) +#define FTGMAC100_MACCR_LOOP_EN (1 << 6) +#define FTGMAC100_MACCR_ENRX_IN_HALFTX (1 << 7) +#define FTGMAC100_MACCR_FULLDUP (1 << 8) +#define FTGMAC100_MACCR_GIGA_MODE (1 << 9) +#define FTGMAC100_MACCR_CRC_APD (1 << 10) /* not needed */ +#define FTGMAC100_MACCR_RX_RUNT (1 << 12) +#define FTGMAC100_MACCR_JUMBO_LF (1 << 13) +#define FTGMAC100_MACCR_RX_ALL (1 << 14) +#define FTGMAC100_MACCR_HT_MULTI_EN (1 << 15) +#define FTGMAC100_MACCR_RX_MULTIPKT (1 << 16) +#define FTGMAC100_MACCR_RX_BROADPKT (1 << 17) +#define FTGMAC100_MACCR_DISCARD_CRCERR (1 << 18) +#define FTGMAC100_MACCR_FAST_MODE (1 << 19) +#define FTGMAC100_MACCR_SW_RST (1 << 31) + +/* + * Transmit descriptor + */ +#define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff) +#define FTGMAC100_TXDES0_EDOTR (1 << 15) +#define FTGMAC100_TXDES0_CRC_ERR (1 << 19) +#define FTGMAC100_TXDES0_LTS (1 << 28) +#define FTGMAC100_TXDES0_FTS (1 << 29) +#define FTGMAC100_TXDES0_EDOTR_ASPEED (1 << 30) +#define FTGMAC100_TXDES0_TXDMA_OWN (1 << 31) + +#define FTGMAC100_TXDES1_VLANTAG_CI(x) ((x) & 0xffff) +#define FTGMAC100_TXDES1_INS_VLANTAG (1 << 16) +#define FTGMAC100_TXDES1_TCP_CHKSUM (1 << 17) +#define FTGMAC100_TXDES1_UDP_CHKSUM (1 << 18) +#define FTGMAC100_TXDES1_IP_CHKSUM (1 << 19) +#define FTGMAC100_TXDES1_LLC (1 << 22) +#define FTGMAC100_TXDES1_TX2FIC (1 << 30) +#define FTGMAC100_TXDES1_TXIC (1 << 31) + +/* + * Receive descriptor + */ +#define FTGMAC100_RXDES0_VDBC 0x3fff +#define FTGMAC100_RXDES0_EDORR (1 << 15) +#define FTGMAC100_RXDES0_MULTICAST (1 << 16) +#define FTGMAC100_RXDES0_BROADCAST (1 << 17) +#define FTGMAC100_RXDES0_RX_ERR (1 << 18) +#define FTGMAC100_RXDES0_CRC_ERR (1 << 19) +#define FTGMAC100_RXDES0_FTL (1 << 20) +#define FTGMAC100_RXDES0_RUNT (1 << 21) +#define FTGMAC100_RXDES0_RX_ODD_NB (1 << 22) +#define FTGMAC100_RXDES0_FIFO_FULL (1 << 23) +#define FTGMAC100_RXDES0_PAUSE_OPCODE (1 << 24) +#define FTGMAC100_RXDES0_PAUSE_FRAME (1 << 25) +#define FTGMAC100_RXDES0_LRS (1 << 28) +#define FTGMAC100_RXDES0_FRS (1 << 29) +#define FTGMAC100_RXDES0_EDORR_ASPEED (1 << 30) +#define FTGMAC100_RXDES0_RXPKT_RDY (1 << 31) + +#define FTGMAC100_RXDES1_VLANTAG_CI 0xffff +#define FTGMAC100_RXDES1_PROT_MASK (0x3 << 20) +#define FTGMAC100_RXDES1_PROT_NONIP (0x0 << 20) +#define FTGMAC100_RXDES1_PROT_IP (0x1 << 20) +#define FTGMAC100_RXDES1_PROT_TCPIP (0x2 << 20) +#define FTGMAC100_RXDES1_PROT_UDPIP (0x3 << 20) +#define FTGMAC100_RXDES1_LLC (1 << 22) +#define FTGMAC100_RXDES1_DF (1 << 23) +#define FTGMAC100_RXDES1_VLANTAG_AVAIL (1 << 24) +#define FTGMAC100_RXDES1_TCP_CHKSUM_ERR (1 << 25) +#define FTGMAC100_RXDES1_UDP_CHKSUM_ERR (1 << 26) +#define FTGMAC100_RXDES1_IP_CHKSUM_ERR (1 << 27) + +/* + * Receive and transmit Buffer Descriptor + */ +typedef struct { + uint32_t des0; + uint32_t des1; + uint32_t des2; /* not used by HW */ + uint32_t des3; +} FTGMAC100Desc; + +#define FTGMAC100_DESC_ALIGNMENT 16 + +/* + * Specific RTL8211E MII Registers + */ +#define RTL8211E_MII_PHYCR 16 /* PHY Specific Control */ +#define RTL8211E_MII_PHYSR 17 /* PHY Specific Status */ +#define RTL8211E_MII_INER 18 /* Interrupt Enable */ +#define RTL8211E_MII_INSR 19 /* Interrupt Status */ +#define RTL8211E_MII_RXERC 24 /* Receive Error Counter */ +#define RTL8211E_MII_LDPSR 27 /* Link Down Power Saving */ +#define RTL8211E_MII_EPAGSR 30 /* Extension Page Select */ +#define RTL8211E_MII_PAGSEL 31 /* Page Select */ + +/* + * RTL8211E Interrupt Status + */ +#define PHY_INT_AUTONEG_ERROR (1 << 15) +#define PHY_INT_PAGE_RECV (1 << 12) +#define PHY_INT_AUTONEG_COMPLETE (1 << 11) +#define PHY_INT_LINK_STATUS (1 << 10) +#define PHY_INT_ERROR (1 << 9) +#define PHY_INT_DOWN (1 << 8) +#define PHY_INT_JABBER (1 << 0) + +/* + * Max frame size for the receiving buffer + */ +#define FTGMAC100_MAX_FRAME_SIZE 9220 + +/* Limits depending on the type of the frame + * + * 9216 for Jumbo frames (+ 4 for VLAN) + * 1518 for other frames (+ 4 for VLAN) + */ +static int ftgmac100_max_frame_size(FTGMAC100State *s, uint16_t proto) +{ + int max = (s->maccr & FTGMAC100_MACCR_JUMBO_LF ? 9216 : 1518); + + return max + (proto == ETH_P_VLAN ? 4 : 0); +} + +static void ftgmac100_update_irq(FTGMAC100State *s) +{ + qemu_set_irq(s->irq, s->isr & s->ier); +} + +/* + * The MII phy could raise a GPIO to the processor which in turn + * could be handled as an interrpt by the OS. + * For now we don't handle any GPIO/interrupt line, so the OS will + * have to poll for the PHY status. + */ +static void phy_update_irq(FTGMAC100State *s) +{ + ftgmac100_update_irq(s); +} + +static void phy_update_link(FTGMAC100State *s) +{ + /* Autonegotiation status mirrors link status. */ + if (qemu_get_queue(s->nic)->link_down) { + s->phy_status &= ~(MII_BMSR_LINK_ST | MII_BMSR_AN_COMP); + s->phy_int |= PHY_INT_DOWN; + } else { + s->phy_status |= (MII_BMSR_LINK_ST | MII_BMSR_AN_COMP); + s->phy_int |= PHY_INT_AUTONEG_COMPLETE; + } + phy_update_irq(s); +} + +static void ftgmac100_set_link(NetClientState *nc) +{ + phy_update_link(FTGMAC100(qemu_get_nic_opaque(nc))); +} + +static void phy_reset(FTGMAC100State *s) +{ + s->phy_status = (MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD | + MII_BMSR_10T_HD | MII_BMSR_EXTSTAT | MII_BMSR_MFPS | + MII_BMSR_AN_COMP | MII_BMSR_AUTONEG | MII_BMSR_LINK_ST | + MII_BMSR_EXTCAP); + s->phy_control = (MII_BMCR_AUTOEN | MII_BMCR_FD | MII_BMCR_SPEED1000); + s->phy_advertise = (MII_ANAR_PAUSE_ASYM | MII_ANAR_PAUSE | MII_ANAR_TXFD | + MII_ANAR_TX | MII_ANAR_10FD | MII_ANAR_10 | + MII_ANAR_CSMACD); + s->phy_int_mask = 0; + s->phy_int = 0; +} + +static uint16_t do_phy_read(FTGMAC100State *s, uint8_t reg) +{ + uint16_t val; + + switch (reg) { + case MII_BMCR: /* Basic Control */ + val = s->phy_control; + break; + case MII_BMSR: /* Basic Status */ + val = s->phy_status; + break; + case MII_PHYID1: /* ID1 */ + val = RTL8211E_PHYID1; + break; + case MII_PHYID2: /* ID2 */ + val = RTL8211E_PHYID2; + break; + case MII_ANAR: /* Auto-neg advertisement */ + val = s->phy_advertise; + break; + case MII_ANLPAR: /* Auto-neg Link Partner Ability */ + val = (MII_ANLPAR_ACK | MII_ANLPAR_PAUSE | MII_ANLPAR_TXFD | + MII_ANLPAR_TX | MII_ANLPAR_10FD | MII_ANLPAR_10 | + MII_ANLPAR_CSMACD); + break; + case MII_ANER: /* Auto-neg Expansion */ + val = MII_ANER_NWAY; + break; + case MII_CTRL1000: /* 1000BASE-T control */ + val = (MII_CTRL1000_HALF | MII_CTRL1000_FULL); + break; + case MII_STAT1000: /* 1000BASE-T status */ + val = MII_STAT1000_FULL; + break; + case RTL8211E_MII_INSR: /* Interrupt status. */ + val = s->phy_int; + s->phy_int = 0; + phy_update_irq(s); + break; + case RTL8211E_MII_INER: /* Interrupt enable */ + val = s->phy_int_mask; + break; + case RTL8211E_MII_PHYCR: + case RTL8211E_MII_PHYSR: + case RTL8211E_MII_RXERC: + case RTL8211E_MII_LDPSR: + case RTL8211E_MII_EPAGSR: + case RTL8211E_MII_PAGSEL: + qemu_log_mask(LOG_UNIMP, "%s: reg %d not implemented\n", + __func__, reg); + val = 0; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address at offset %d\n", + __func__, reg); + val = 0; + break; + } + + return val; +} + +#define MII_BMCR_MASK (MII_BMCR_LOOPBACK | MII_BMCR_SPEED100 | \ + MII_BMCR_SPEED | MII_BMCR_AUTOEN | MII_BMCR_PDOWN | \ + MII_BMCR_FD | MII_BMCR_CTST) +#define MII_ANAR_MASK 0x2d7f + +static void do_phy_write(FTGMAC100State *s, uint8_t reg, uint16_t val) +{ + switch (reg) { + case MII_BMCR: /* Basic Control */ + if (val & MII_BMCR_RESET) { + phy_reset(s); + } else { + s->phy_control = val & MII_BMCR_MASK; + /* Complete autonegotiation immediately. */ + if (val & MII_BMCR_AUTOEN) { + s->phy_status |= MII_BMSR_AN_COMP; + } + } + break; + case MII_ANAR: /* Auto-neg advertisement */ + s->phy_advertise = (val & MII_ANAR_MASK) | MII_ANAR_TX; + break; + case RTL8211E_MII_INER: /* Interrupt enable */ + s->phy_int_mask = val & 0xff; + phy_update_irq(s); + break; + case RTL8211E_MII_PHYCR: + case RTL8211E_MII_PHYSR: + case RTL8211E_MII_RXERC: + case RTL8211E_MII_LDPSR: + case RTL8211E_MII_EPAGSR: + case RTL8211E_MII_PAGSEL: + qemu_log_mask(LOG_UNIMP, "%s: reg %d not implemented\n", + __func__, reg); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address at offset %d\n", + __func__, reg); + break; + } +} + +static void do_phy_new_ctl(FTGMAC100State *s) +{ + uint8_t reg; + uint16_t data; + + if (!(s->phycr & FTGMAC100_PHYCR_NEW_ST_22)) { + qemu_log_mask(LOG_UNIMP, "%s: unsupported ST code\n", __func__); + return; + } + + /* Nothing to do */ + if (!(s->phycr & FTGMAC100_PHYCR_NEW_FIRE)) { + return; + } + + reg = FTGMAC100_PHYCR_NEW_REG(s->phycr); + data = FTGMAC100_PHYCR_NEW_DATA(s->phycr); + + switch (FTGMAC100_PHYCR_NEW_OP(s->phycr)) { + case FTGMAC100_PHYCR_NEW_OP_WRITE: + do_phy_write(s, reg, data); + break; + case FTGMAC100_PHYCR_NEW_OP_READ: + s->phydata = do_phy_read(s, reg) & 0xffff; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid OP code %08x\n", + __func__, s->phycr); + } + + s->phycr &= ~FTGMAC100_PHYCR_NEW_FIRE; +} + +static void do_phy_ctl(FTGMAC100State *s) +{ + uint8_t reg = FTGMAC100_PHYCR_REG(s->phycr); + + if (s->phycr & FTGMAC100_PHYCR_MIIWR) { + do_phy_write(s, reg, s->phydata & 0xffff); + s->phycr &= ~FTGMAC100_PHYCR_MIIWR; + } else if (s->phycr & FTGMAC100_PHYCR_MIIRD) { + s->phydata = do_phy_read(s, reg) << 16; + s->phycr &= ~FTGMAC100_PHYCR_MIIRD; + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: no OP code %08x\n", + __func__, s->phycr); + } +} + +static int ftgmac100_read_bd(FTGMAC100Desc *bd, dma_addr_t addr) +{ + if (dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to read descriptor @ 0x%" + HWADDR_PRIx "\n", __func__, addr); + return -1; + } + bd->des0 = le32_to_cpu(bd->des0); + bd->des1 = le32_to_cpu(bd->des1); + bd->des2 = le32_to_cpu(bd->des2); + bd->des3 = le32_to_cpu(bd->des3); + return 0; +} + +static int ftgmac100_write_bd(FTGMAC100Desc *bd, dma_addr_t addr) +{ + FTGMAC100Desc lebd; + + lebd.des0 = cpu_to_le32(bd->des0); + lebd.des1 = cpu_to_le32(bd->des1); + lebd.des2 = cpu_to_le32(bd->des2); + lebd.des3 = cpu_to_le32(bd->des3); + if (dma_memory_write(&address_space_memory, addr, &lebd, sizeof(lebd))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to write descriptor @ 0x%" + HWADDR_PRIx "\n", __func__, addr); + return -1; + } + return 0; +} + +static int ftgmac100_insert_vlan(FTGMAC100State *s, int frame_size, + uint8_t vlan_tci) +{ + uint8_t *vlan_hdr = s->frame + (ETH_ALEN * 2); + uint8_t *payload = vlan_hdr + sizeof(struct vlan_header); + + if (frame_size < sizeof(struct eth_header)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: frame too small for VLAN insertion : %d bytes\n", + __func__, frame_size); + s->isr |= FTGMAC100_INT_XPKT_LOST; + goto out; + } + + if (frame_size + sizeof(struct vlan_header) > sizeof(s->frame)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: frame too big : %d bytes\n", + __func__, frame_size); + s->isr |= FTGMAC100_INT_XPKT_LOST; + frame_size -= sizeof(struct vlan_header); + } + + memmove(payload, vlan_hdr, frame_size - (ETH_ALEN * 2)); + stw_be_p(vlan_hdr, ETH_P_VLAN); + stw_be_p(vlan_hdr + 2, vlan_tci); + frame_size += sizeof(struct vlan_header); + +out: + return frame_size; +} + +static void ftgmac100_do_tx(FTGMAC100State *s, uint32_t tx_ring, + uint32_t tx_descriptor) +{ + int frame_size = 0; + uint8_t *ptr = s->frame; + uint32_t addr = tx_descriptor; + uint32_t flags = 0; + + while (1) { + FTGMAC100Desc bd; + int len; + + if (ftgmac100_read_bd(&bd, addr) || + ((bd.des0 & FTGMAC100_TXDES0_TXDMA_OWN) == 0)) { + /* Run out of descriptors to transmit. */ + s->isr |= FTGMAC100_INT_NO_NPTXBUF; + break; + } + + /* record transmit flags as they are valid only on the first + * segment */ + if (bd.des0 & FTGMAC100_TXDES0_FTS) { + flags = bd.des1; + } + + len = FTGMAC100_TXDES0_TXBUF_SIZE(bd.des0); + if (!len) { + /* + * 0 is an invalid size, however the HW does not raise any + * interrupt. Flag an error because the guest is buggy. + */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid segment size\n", + __func__); + } + + if (frame_size + len > sizeof(s->frame)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: frame too big : %d bytes\n", + __func__, len); + s->isr |= FTGMAC100_INT_XPKT_LOST; + len = sizeof(s->frame) - frame_size; + } + + if (dma_memory_read(&address_space_memory, bd.des3, ptr, len)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to read packet @ 0x%x\n", + __func__, bd.des3); + s->isr |= FTGMAC100_INT_AHB_ERR; + break; + } + + ptr += len; + frame_size += len; + if (bd.des0 & FTGMAC100_TXDES0_LTS) { + int csum = 0; + + /* Check for VLAN */ + if (flags & FTGMAC100_TXDES1_INS_VLANTAG && + be16_to_cpu(PKT_GET_ETH_HDR(s->frame)->h_proto) != ETH_P_VLAN) { + frame_size = ftgmac100_insert_vlan(s, frame_size, + FTGMAC100_TXDES1_VLANTAG_CI(flags)); + } + + if (flags & FTGMAC100_TXDES1_IP_CHKSUM) { + csum |= CSUM_IP; + } + if (flags & FTGMAC100_TXDES1_TCP_CHKSUM) { + csum |= CSUM_TCP; + } + if (flags & FTGMAC100_TXDES1_UDP_CHKSUM) { + csum |= CSUM_UDP; + } + if (csum) { + net_checksum_calculate(s->frame, frame_size, csum); + } + + /* Last buffer in frame. */ + qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size); + ptr = s->frame; + frame_size = 0; + s->isr |= FTGMAC100_INT_XPKT_ETH; + } + + if (flags & FTGMAC100_TXDES1_TX2FIC) { + s->isr |= FTGMAC100_INT_XPKT_FIFO; + } + bd.des0 &= ~FTGMAC100_TXDES0_TXDMA_OWN; + + /* Write back the modified descriptor. */ + ftgmac100_write_bd(&bd, addr); + /* Advance to the next descriptor. */ + if (bd.des0 & s->txdes0_edotr) { + addr = tx_ring; + } else { + addr += FTGMAC100_DBLAC_TXDES_SIZE(s->dblac); + } + } + + s->tx_descriptor = addr; + + ftgmac100_update_irq(s); +} + +static bool ftgmac100_can_receive(NetClientState *nc) +{ + FTGMAC100State *s = FTGMAC100(qemu_get_nic_opaque(nc)); + FTGMAC100Desc bd; + + if ((s->maccr & (FTGMAC100_MACCR_RXDMA_EN | FTGMAC100_MACCR_RXMAC_EN)) + != (FTGMAC100_MACCR_RXDMA_EN | FTGMAC100_MACCR_RXMAC_EN)) { + return false; + } + + if (ftgmac100_read_bd(&bd, s->rx_descriptor)) { + return false; + } + return !(bd.des0 & FTGMAC100_RXDES0_RXPKT_RDY); +} + +/* + * This is purely informative. The HW can poll the RW (and RX) ring + * buffers for available descriptors but we don't need to trigger a + * timer for that in qemu. + */ +static uint32_t ftgmac100_rxpoll(FTGMAC100State *s) +{ + /* Polling times : + * + * Speed TIME_SEL=0 TIME_SEL=1 + * + * 10 51.2 ms 819.2 ms + * 100 5.12 ms 81.92 ms + * 1000 1.024 ms 16.384 ms + */ + static const int div[] = { 20, 200, 1000 }; + + uint32_t cnt = 1024 * FTGMAC100_APTC_RXPOLL_CNT(s->aptcr); + uint32_t speed = (s->maccr & FTGMAC100_MACCR_FAST_MODE) ? 1 : 0; + + if (s->aptcr & FTGMAC100_APTC_RXPOLL_TIME_SEL) { + cnt <<= 4; + } + + if (s->maccr & FTGMAC100_MACCR_GIGA_MODE) { + speed = 2; + } + + return cnt / div[speed]; +} + +static void ftgmac100_do_reset(FTGMAC100State *s, bool sw_reset) +{ + /* Reset the FTGMAC100 */ + s->isr = 0; + s->ier = 0; + s->rx_enabled = 0; + s->rx_ring = 0; + s->rbsr = 0x640; + s->rx_descriptor = 0; + s->tx_ring = 0; + s->tx_descriptor = 0; + s->math[0] = 0; + s->math[1] = 0; + s->itc = 0; + s->aptcr = 1; + s->dblac = 0x00022f00; + s->revr = 0; + s->fear1 = 0; + s->tpafcr = 0xf1; + + if (sw_reset) { + s->maccr &= FTGMAC100_MACCR_GIGA_MODE | FTGMAC100_MACCR_FAST_MODE; + } else { + s->maccr = 0; + } + + s->phycr = 0; + s->phydata = 0; + s->fcr = 0x400; + + /* and the PHY */ + phy_reset(s); +} + +static void ftgmac100_reset(DeviceState *d) +{ + ftgmac100_do_reset(FTGMAC100(d), false); +} + +static uint64_t ftgmac100_read(void *opaque, hwaddr addr, unsigned size) +{ + FTGMAC100State *s = FTGMAC100(opaque); + + switch (addr & 0xff) { + case FTGMAC100_ISR: + return s->isr; + case FTGMAC100_IER: + return s->ier; + case FTGMAC100_MAC_MADR: + return (s->conf.macaddr.a[0] << 8) | s->conf.macaddr.a[1]; + case FTGMAC100_MAC_LADR: + return ((uint32_t) s->conf.macaddr.a[2] << 24) | + (s->conf.macaddr.a[3] << 16) | (s->conf.macaddr.a[4] << 8) | + s->conf.macaddr.a[5]; + case FTGMAC100_MATH0: + return s->math[0]; + case FTGMAC100_MATH1: + return s->math[1]; + case FTGMAC100_RXR_BADR: + return s->rx_ring; + case FTGMAC100_NPTXR_BADR: + return s->tx_ring; + case FTGMAC100_ITC: + return s->itc; + case FTGMAC100_DBLAC: + return s->dblac; + case FTGMAC100_REVR: + return s->revr; + case FTGMAC100_FEAR1: + return s->fear1; + case FTGMAC100_TPAFCR: + return s->tpafcr; + case FTGMAC100_FCR: + return s->fcr; + case FTGMAC100_MACCR: + return s->maccr; + case FTGMAC100_PHYCR: + return s->phycr; + case FTGMAC100_PHYDATA: + return s->phydata; + + /* We might want to support these one day */ + case FTGMAC100_HPTXPD: /* High Priority Transmit Poll Demand */ + case FTGMAC100_HPTXR_BADR: /* High Priority Transmit Ring Base Address */ + case FTGMAC100_MACSR: /* MAC Status Register (MACSR) */ + qemu_log_mask(LOG_UNIMP, "%s: read to unimplemented register 0x%" + HWADDR_PRIx "\n", __func__, addr); + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address at offset 0x%" + HWADDR_PRIx "\n", __func__, addr); + return 0; + } +} + +static void ftgmac100_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + FTGMAC100State *s = FTGMAC100(opaque); + + switch (addr & 0xff) { + case FTGMAC100_ISR: /* Interrupt status */ + s->isr &= ~value; + break; + case FTGMAC100_IER: /* Interrupt control */ + s->ier = value; + break; + case FTGMAC100_MAC_MADR: /* MAC */ + s->conf.macaddr.a[0] = value >> 8; + s->conf.macaddr.a[1] = value; + break; + case FTGMAC100_MAC_LADR: + s->conf.macaddr.a[2] = value >> 24; + s->conf.macaddr.a[3] = value >> 16; + s->conf.macaddr.a[4] = value >> 8; + s->conf.macaddr.a[5] = value; + break; + case FTGMAC100_MATH0: /* Multicast Address Hash Table 0 */ + s->math[0] = value; + break; + case FTGMAC100_MATH1: /* Multicast Address Hash Table 1 */ + s->math[1] = value; + break; + case FTGMAC100_ITC: /* TODO: Interrupt Timer Control */ + s->itc = value; + break; + case FTGMAC100_RXR_BADR: /* Ring buffer address */ + if (!QEMU_IS_ALIGNED(value, FTGMAC100_DESC_ALIGNMENT)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad RX buffer alignment 0x%" + HWADDR_PRIx "\n", __func__, value); + return; + } + + s->rx_ring = value; + s->rx_descriptor = s->rx_ring; + break; + + case FTGMAC100_RBSR: /* DMA buffer size */ + s->rbsr = value; + break; + + case FTGMAC100_NPTXR_BADR: /* Transmit buffer address */ + if (!QEMU_IS_ALIGNED(value, FTGMAC100_DESC_ALIGNMENT)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad TX buffer alignment 0x%" + HWADDR_PRIx "\n", __func__, value); + return; + } + s->tx_ring = value; + s->tx_descriptor = s->tx_ring; + break; + + case FTGMAC100_NPTXPD: /* Trigger transmit */ + if ((s->maccr & (FTGMAC100_MACCR_TXDMA_EN | FTGMAC100_MACCR_TXMAC_EN)) + == (FTGMAC100_MACCR_TXDMA_EN | FTGMAC100_MACCR_TXMAC_EN)) { + /* TODO: high priority tx ring */ + ftgmac100_do_tx(s, s->tx_ring, s->tx_descriptor); + } + if (ftgmac100_can_receive(qemu_get_queue(s->nic))) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + + case FTGMAC100_RXPD: /* Receive Poll Demand Register */ + if (ftgmac100_can_receive(qemu_get_queue(s->nic))) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + + case FTGMAC100_APTC: /* Automatic polling */ + s->aptcr = value; + + if (FTGMAC100_APTC_RXPOLL_CNT(s->aptcr)) { + ftgmac100_rxpoll(s); + } + + if (FTGMAC100_APTC_TXPOLL_CNT(s->aptcr)) { + qemu_log_mask(LOG_UNIMP, "%s: no transmit polling\n", __func__); + } + break; + + case FTGMAC100_MACCR: /* MAC Device control */ + s->maccr = value; + if (value & FTGMAC100_MACCR_SW_RST) { + ftgmac100_do_reset(s, true); + } + + if (ftgmac100_can_receive(qemu_get_queue(s->nic))) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + + case FTGMAC100_PHYCR: /* PHY Device control */ + s->phycr = value; + if (s->revr & FTGMAC100_REVR_NEW_MDIO_INTERFACE) { + do_phy_new_ctl(s); + } else { + do_phy_ctl(s); + } + break; + case FTGMAC100_PHYDATA: + s->phydata = value & 0xffff; + break; + case FTGMAC100_DBLAC: /* DMA Burst Length and Arbitration Control */ + if (FTGMAC100_DBLAC_TXDES_SIZE(value) < sizeof(FTGMAC100Desc)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: transmit descriptor too small: %" PRIx64 + " bytes\n", __func__, + FTGMAC100_DBLAC_TXDES_SIZE(value)); + break; + } + if (FTGMAC100_DBLAC_RXDES_SIZE(value) < sizeof(FTGMAC100Desc)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: receive descriptor too small : %" PRIx64 + " bytes\n", __func__, + FTGMAC100_DBLAC_RXDES_SIZE(value)); + break; + } + s->dblac = value; + break; + case FTGMAC100_REVR: /* Feature Register */ + s->revr = value; + break; + case FTGMAC100_FEAR1: /* Feature Register 1 */ + s->fear1 = value; + break; + case FTGMAC100_TPAFCR: /* Transmit Priority Arbitration and FIFO Control */ + s->tpafcr = value; + break; + case FTGMAC100_FCR: /* Flow Control */ + s->fcr = value; + break; + + case FTGMAC100_HPTXPD: /* High Priority Transmit Poll Demand */ + case FTGMAC100_HPTXR_BADR: /* High Priority Transmit Ring Base Address */ + case FTGMAC100_MACSR: /* MAC Status Register (MACSR) */ + qemu_log_mask(LOG_UNIMP, "%s: write to unimplemented register 0x%" + HWADDR_PRIx "\n", __func__, addr); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address at offset 0x%" + HWADDR_PRIx "\n", __func__, addr); + break; + } + + ftgmac100_update_irq(s); +} + +static int ftgmac100_filter(FTGMAC100State *s, const uint8_t *buf, size_t len) +{ + unsigned mcast_idx; + + if (s->maccr & FTGMAC100_MACCR_RX_ALL) { + return 1; + } + + switch (get_eth_packet_type(PKT_GET_ETH_HDR(buf))) { + case ETH_PKT_BCAST: + if (!(s->maccr & FTGMAC100_MACCR_RX_BROADPKT)) { + return 0; + } + break; + case ETH_PKT_MCAST: + if (!(s->maccr & FTGMAC100_MACCR_RX_MULTIPKT)) { + if (!(s->maccr & FTGMAC100_MACCR_HT_MULTI_EN)) { + return 0; + } + + mcast_idx = net_crc32_le(buf, ETH_ALEN); + mcast_idx = (~(mcast_idx >> 2)) & 0x3f; + if (!(s->math[mcast_idx / 32] & (1 << (mcast_idx % 32)))) { + return 0; + } + } + break; + case ETH_PKT_UCAST: + if (memcmp(s->conf.macaddr.a, buf, 6)) { + return 0; + } + break; + } + + return 1; +} + +static ssize_t ftgmac100_receive(NetClientState *nc, const uint8_t *buf, + size_t len) +{ + FTGMAC100State *s = FTGMAC100(qemu_get_nic_opaque(nc)); + FTGMAC100Desc bd; + uint32_t flags = 0; + uint32_t addr; + uint32_t crc; + uint32_t buf_addr; + uint8_t *crc_ptr; + uint32_t buf_len; + size_t size = len; + uint32_t first = FTGMAC100_RXDES0_FRS; + uint16_t proto = be16_to_cpu(PKT_GET_ETH_HDR(buf)->h_proto); + int max_frame_size = ftgmac100_max_frame_size(s, proto); + + if ((s->maccr & (FTGMAC100_MACCR_RXDMA_EN | FTGMAC100_MACCR_RXMAC_EN)) + != (FTGMAC100_MACCR_RXDMA_EN | FTGMAC100_MACCR_RXMAC_EN)) { + return -1; + } + + /* TODO : Pad to minimum Ethernet frame length */ + /* handle small packets. */ + if (size < 10) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: dropped frame of %zd bytes\n", + __func__, size); + return size; + } + + if (!ftgmac100_filter(s, buf, size)) { + return size; + } + + /* 4 bytes for the CRC. */ + size += 4; + crc = cpu_to_be32(crc32(~0, buf, size)); + crc_ptr = (uint8_t *) &crc; + + /* Huge frames are truncated. */ + if (size > max_frame_size) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: frame too big : %zd bytes\n", + __func__, size); + size = max_frame_size; + flags |= FTGMAC100_RXDES0_FTL; + } + + switch (get_eth_packet_type(PKT_GET_ETH_HDR(buf))) { + case ETH_PKT_BCAST: + flags |= FTGMAC100_RXDES0_BROADCAST; + break; + case ETH_PKT_MCAST: + flags |= FTGMAC100_RXDES0_MULTICAST; + break; + case ETH_PKT_UCAST: + break; + } + + s->isr |= FTGMAC100_INT_RPKT_FIFO; + addr = s->rx_descriptor; + while (size > 0) { + if (!ftgmac100_can_receive(nc)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Unexpected packet\n", __func__); + return -1; + } + + if (ftgmac100_read_bd(&bd, addr) || + (bd.des0 & FTGMAC100_RXDES0_RXPKT_RDY)) { + /* No descriptors available. Bail out. */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: Lost end of frame\n", + __func__); + s->isr |= FTGMAC100_INT_NO_RXBUF; + break; + } + buf_len = (size <= s->rbsr) ? size : s->rbsr; + bd.des0 |= buf_len & 0x3fff; + size -= buf_len; + + /* The last 4 bytes are the CRC. */ + if (size < 4) { + buf_len += size - 4; + } + buf_addr = bd.des3; + if (first && proto == ETH_P_VLAN && buf_len >= 18) { + bd.des1 = lduw_be_p(buf + 14) | FTGMAC100_RXDES1_VLANTAG_AVAIL; + + if (s->maccr & FTGMAC100_MACCR_RM_VLAN) { + dma_memory_write(&address_space_memory, buf_addr, buf, 12); + dma_memory_write(&address_space_memory, buf_addr + 12, buf + 16, + buf_len - 16); + } else { + dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + } + } else { + bd.des1 = 0; + dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + } + buf += buf_len; + if (size < 4) { + dma_memory_write(&address_space_memory, buf_addr + buf_len, + crc_ptr, 4 - size); + crc_ptr += 4 - size; + } + + bd.des0 |= first | FTGMAC100_RXDES0_RXPKT_RDY; + first = 0; + if (size == 0) { + /* Last buffer in frame. */ + bd.des0 |= flags | FTGMAC100_RXDES0_LRS; + s->isr |= FTGMAC100_INT_RPKT_BUF; + } + ftgmac100_write_bd(&bd, addr); + if (bd.des0 & s->rxdes0_edorr) { + addr = s->rx_ring; + } else { + addr += FTGMAC100_DBLAC_RXDES_SIZE(s->dblac); + } + } + s->rx_descriptor = addr; + + ftgmac100_update_irq(s); + return len; +} + +static const MemoryRegionOps ftgmac100_ops = { + .read = ftgmac100_read, + .write = ftgmac100_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ftgmac100_cleanup(NetClientState *nc) +{ + FTGMAC100State *s = FTGMAC100(qemu_get_nic_opaque(nc)); + + s->nic = NULL; +} + +static NetClientInfo net_ftgmac100_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = ftgmac100_can_receive, + .receive = ftgmac100_receive, + .cleanup = ftgmac100_cleanup, + .link_status_changed = ftgmac100_set_link, +}; + +static void ftgmac100_realize(DeviceState *dev, Error **errp) +{ + FTGMAC100State *s = FTGMAC100(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + if (s->aspeed) { + s->txdes0_edotr = FTGMAC100_TXDES0_EDOTR_ASPEED; + s->rxdes0_edorr = FTGMAC100_RXDES0_EDORR_ASPEED; + } else { + s->txdes0_edotr = FTGMAC100_TXDES0_EDOTR; + s->rxdes0_edorr = FTGMAC100_RXDES0_EDORR; + } + + memory_region_init_io(&s->iomem, OBJECT(dev), &ftgmac100_ops, s, + TYPE_FTGMAC100, 0x2000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + qemu_macaddr_default_if_unset(&s->conf.macaddr); + + s->nic = qemu_new_nic(&net_ftgmac100_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static const VMStateDescription vmstate_ftgmac100 = { + .name = TYPE_FTGMAC100, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(irq_state, FTGMAC100State), + VMSTATE_UINT32(isr, FTGMAC100State), + VMSTATE_UINT32(ier, FTGMAC100State), + VMSTATE_UINT32(rx_enabled, FTGMAC100State), + VMSTATE_UINT32(rx_ring, FTGMAC100State), + VMSTATE_UINT32(rbsr, FTGMAC100State), + VMSTATE_UINT32(tx_ring, FTGMAC100State), + VMSTATE_UINT32(rx_descriptor, FTGMAC100State), + VMSTATE_UINT32(tx_descriptor, FTGMAC100State), + VMSTATE_UINT32_ARRAY(math, FTGMAC100State, 2), + VMSTATE_UINT32(itc, FTGMAC100State), + VMSTATE_UINT32(aptcr, FTGMAC100State), + VMSTATE_UINT32(dblac, FTGMAC100State), + VMSTATE_UINT32(revr, FTGMAC100State), + VMSTATE_UINT32(fear1, FTGMAC100State), + VMSTATE_UINT32(tpafcr, FTGMAC100State), + VMSTATE_UINT32(maccr, FTGMAC100State), + VMSTATE_UINT32(phycr, FTGMAC100State), + VMSTATE_UINT32(phydata, FTGMAC100State), + VMSTATE_UINT32(fcr, FTGMAC100State), + VMSTATE_UINT32(phy_status, FTGMAC100State), + VMSTATE_UINT32(phy_control, FTGMAC100State), + VMSTATE_UINT32(phy_advertise, FTGMAC100State), + VMSTATE_UINT32(phy_int, FTGMAC100State), + VMSTATE_UINT32(phy_int_mask, FTGMAC100State), + VMSTATE_UINT32(txdes0_edotr, FTGMAC100State), + VMSTATE_UINT32(rxdes0_edorr, FTGMAC100State), + VMSTATE_END_OF_LIST() + } +}; + +static Property ftgmac100_properties[] = { + DEFINE_PROP_BOOL("aspeed", FTGMAC100State, aspeed, false), + DEFINE_NIC_PROPERTIES(FTGMAC100State, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ftgmac100_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_ftgmac100; + dc->reset = ftgmac100_reset; + device_class_set_props(dc, ftgmac100_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->realize = ftgmac100_realize; + dc->desc = "Faraday FTGMAC100 Gigabit Ethernet emulation"; +} + +static const TypeInfo ftgmac100_info = { + .name = TYPE_FTGMAC100, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(FTGMAC100State), + .class_init = ftgmac100_class_init, +}; + +/* + * AST2600 MII controller + */ +#define ASPEED_MII_PHYCR_FIRE BIT(31) +#define ASPEED_MII_PHYCR_ST_22 BIT(28) +#define ASPEED_MII_PHYCR_OP(x) ((x) & (ASPEED_MII_PHYCR_OP_WRITE | \ + ASPEED_MII_PHYCR_OP_READ)) +#define ASPEED_MII_PHYCR_OP_WRITE BIT(26) +#define ASPEED_MII_PHYCR_OP_READ BIT(27) +#define ASPEED_MII_PHYCR_DATA(x) (x & 0xffff) +#define ASPEED_MII_PHYCR_PHY(x) (((x) >> 21) & 0x1f) +#define ASPEED_MII_PHYCR_REG(x) (((x) >> 16) & 0x1f) + +#define ASPEED_MII_PHYDATA_IDLE BIT(16) + +static void aspeed_mii_transition(AspeedMiiState *s, bool fire) +{ + if (fire) { + s->phycr |= ASPEED_MII_PHYCR_FIRE; + s->phydata &= ~ASPEED_MII_PHYDATA_IDLE; + } else { + s->phycr &= ~ASPEED_MII_PHYCR_FIRE; + s->phydata |= ASPEED_MII_PHYDATA_IDLE; + } +} + +static void aspeed_mii_do_phy_ctl(AspeedMiiState *s) +{ + uint8_t reg; + uint16_t data; + + if (!(s->phycr & ASPEED_MII_PHYCR_ST_22)) { + aspeed_mii_transition(s, !ASPEED_MII_PHYCR_FIRE); + qemu_log_mask(LOG_UNIMP, "%s: unsupported ST code\n", __func__); + return; + } + + /* Nothing to do */ + if (!(s->phycr & ASPEED_MII_PHYCR_FIRE)) { + return; + } + + reg = ASPEED_MII_PHYCR_REG(s->phycr); + data = ASPEED_MII_PHYCR_DATA(s->phycr); + + switch (ASPEED_MII_PHYCR_OP(s->phycr)) { + case ASPEED_MII_PHYCR_OP_WRITE: + do_phy_write(s->nic, reg, data); + break; + case ASPEED_MII_PHYCR_OP_READ: + s->phydata = (s->phydata & ~0xffff) | do_phy_read(s->nic, reg); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid OP code %08x\n", + __func__, s->phycr); + } + + aspeed_mii_transition(s, !ASPEED_MII_PHYCR_FIRE); +} + +static uint64_t aspeed_mii_read(void *opaque, hwaddr addr, unsigned size) +{ + AspeedMiiState *s = ASPEED_MII(opaque); + + switch (addr) { + case 0x0: + return s->phycr; + case 0x4: + return s->phydata; + default: + g_assert_not_reached(); + } +} + +static void aspeed_mii_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + AspeedMiiState *s = ASPEED_MII(opaque); + + switch (addr) { + case 0x0: + s->phycr = value & ~(s->phycr & ASPEED_MII_PHYCR_FIRE); + break; + case 0x4: + s->phydata = value & ~(0xffff | ASPEED_MII_PHYDATA_IDLE); + break; + default: + g_assert_not_reached(); + } + + aspeed_mii_transition(s, !!(s->phycr & ASPEED_MII_PHYCR_FIRE)); + aspeed_mii_do_phy_ctl(s); +} + +static const MemoryRegionOps aspeed_mii_ops = { + .read = aspeed_mii_read, + .write = aspeed_mii_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void aspeed_mii_reset(DeviceState *dev) +{ + AspeedMiiState *s = ASPEED_MII(dev); + + s->phycr = 0; + s->phydata = 0; + + aspeed_mii_transition(s, !!(s->phycr & ASPEED_MII_PHYCR_FIRE)); +}; + +static void aspeed_mii_realize(DeviceState *dev, Error **errp) +{ + AspeedMiiState *s = ASPEED_MII(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + assert(s->nic); + + memory_region_init_io(&s->iomem, OBJECT(dev), &aspeed_mii_ops, s, + TYPE_ASPEED_MII, 0x8); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription vmstate_aspeed_mii = { + .name = TYPE_ASPEED_MII, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(phycr, FTGMAC100State), + VMSTATE_UINT32(phydata, FTGMAC100State), + VMSTATE_END_OF_LIST() + } +}; + +static Property aspeed_mii_properties[] = { + DEFINE_PROP_LINK("nic", AspeedMiiState, nic, TYPE_FTGMAC100, + FTGMAC100State *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_mii_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_aspeed_mii; + dc->reset = aspeed_mii_reset; + dc->realize = aspeed_mii_realize; + dc->desc = "Aspeed MII controller"; + device_class_set_props(dc, aspeed_mii_properties); +} + +static const TypeInfo aspeed_mii_info = { + .name = TYPE_ASPEED_MII, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedMiiState), + .class_init = aspeed_mii_class_init, +}; + +static void ftgmac100_register_types(void) +{ + type_register_static(&ftgmac100_info); + type_register_static(&aspeed_mii_info); +} + +type_init(ftgmac100_register_types) diff --git a/hw/net/i82596.c b/hw/net/i82596.c new file mode 100644 index 000000000..ec21e2699 --- /dev/null +++ b/hw/net/i82596.c @@ -0,0 +1,754 @@ +/* + * QEMU Intel i82596 (Apricot) emulation + * + * Copyright (c) 2019 Helge Deller + * This work is licensed under the GNU GPL license version 2 or later. + * + * This software was written to be compatible with the specification: + * https://www.intel.com/assets/pdf/general/82596ca.pdf + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "net/net.h" +#include "net/eth.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "trace.h" +#include "i82596.h" +#include /* For crc32 */ + +#if defined(ENABLE_DEBUG) +#define DBG(x) x +#else +#define DBG(x) do { } while (0) +#endif + +#define USE_TIMER 0 + +#define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m) + +#define PKT_BUF_SZ 1536 +#define MAX_MC_CNT 64 + +#define ISCP_BUSY 0x0001 + +#define I596_NULL ((uint32_t)0xffffffff) + +#define SCB_STATUS_CX 0x8000 /* CU finished command with I bit */ +#define SCB_STATUS_FR 0x4000 /* RU finished receiving a frame */ +#define SCB_STATUS_CNA 0x2000 /* CU left active state */ +#define SCB_STATUS_RNR 0x1000 /* RU left active state */ + +#define SCB_COMMAND_ACK_MASK \ + (SCB_STATUS_CX | SCB_STATUS_FR | SCB_STATUS_CNA | SCB_STATUS_RNR) + +#define CU_IDLE 0 +#define CU_SUSPENDED 1 +#define CU_ACTIVE 2 + +#define RX_IDLE 0 +#define RX_SUSPENDED 1 +#define RX_READY 4 + +#define CMD_EOL 0x8000 /* The last command of the list, stop. */ +#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ +#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ + +#define CMD_FLEX 0x0008 /* Enable flexible memory model */ + +enum commands { + CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, + CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 +}; + +#define STAT_C 0x8000 /* Set to 0 after execution */ +#define STAT_B 0x4000 /* Command being executed */ +#define STAT_OK 0x2000 /* Command executed ok */ +#define STAT_A 0x1000 /* Command aborted */ + +#define I596_EOF 0x8000 +#define SIZE_MASK 0x3fff + +#define ETHER_TYPE_LEN 2 +#define VLAN_TCI_LEN 2 +#define VLAN_HLEN (ETHER_TYPE_LEN + VLAN_TCI_LEN) + +/* various flags in the chip config registers */ +#define I596_PREFETCH (s->config[0] & 0x80) +#define I596_PROMISC (s->config[8] & 0x01) +#define I596_BC_DISABLE (s->config[8] & 0x02) /* broadcast disable */ +#define I596_NOCRC_INS (s->config[8] & 0x08) +#define I596_CRCINM (s->config[11] & 0x04) /* CRC appended */ +#define I596_MC_ALL (s->config[11] & 0x20) +#define I596_MULTIIA (s->config[13] & 0x40) + + +static uint8_t get_byte(uint32_t addr) +{ + return ldub_phys(&address_space_memory, addr); +} + +static void set_byte(uint32_t addr, uint8_t c) +{ + return stb_phys(&address_space_memory, addr, c); +} + +static uint16_t get_uint16(uint32_t addr) +{ + return lduw_be_phys(&address_space_memory, addr); +} + +static void set_uint16(uint32_t addr, uint16_t w) +{ + return stw_be_phys(&address_space_memory, addr, w); +} + +static uint32_t get_uint32(uint32_t addr) +{ + uint32_t lo = lduw_be_phys(&address_space_memory, addr); + uint32_t hi = lduw_be_phys(&address_space_memory, addr + 2); + return (hi << 16) | lo; +} + +static void set_uint32(uint32_t addr, uint32_t val) +{ + set_uint16(addr, (uint16_t) val); + set_uint16(addr + 2, val >> 16); +} + + +struct qemu_ether_header { + uint8_t ether_dhost[6]; + uint8_t ether_shost[6]; + uint16_t ether_type; +}; + +#define PRINT_PKTHDR(txt, BUF) do { \ + struct qemu_ether_header *hdr = (void *)(BUF); \ + printf(txt ": packet dhost=" MAC_FMT ", shost=" MAC_FMT ", type=0x%04x\n",\ + MAC_ARG(hdr->ether_dhost), MAC_ARG(hdr->ether_shost), \ + be16_to_cpu(hdr->ether_type)); \ +} while (0) + +static void i82596_transmit(I82596State *s, uint32_t addr) +{ + uint32_t tdb_p; /* Transmit Buffer Descriptor */ + + /* TODO: Check flexible mode */ + tdb_p = get_uint32(addr + 8); + while (tdb_p != I596_NULL) { + uint16_t size, len; + uint32_t tba; + + size = get_uint16(tdb_p); + len = size & SIZE_MASK; + tba = get_uint32(tdb_p + 8); + trace_i82596_transmit(len, tba); + + if (s->nic && len) { + assert(len <= sizeof(s->tx_buffer)); + address_space_read(&address_space_memory, tba, + MEMTXATTRS_UNSPECIFIED, s->tx_buffer, len); + DBG(PRINT_PKTHDR("Send", &s->tx_buffer)); + DBG(printf("Sending %d bytes\n", len)); + qemu_send_packet(qemu_get_queue(s->nic), s->tx_buffer, len); + } + + /* was this the last package? */ + if (size & I596_EOF) { + break; + } + + /* get next buffer pointer */ + tdb_p = get_uint32(tdb_p + 4); + } +} + +static void set_individual_address(I82596State *s, uint32_t addr) +{ + NetClientState *nc; + uint8_t *m; + + nc = qemu_get_queue(s->nic); + m = s->conf.macaddr.a; + address_space_read(&address_space_memory, addr + 8, + MEMTXATTRS_UNSPECIFIED, m, ETH_ALEN); + qemu_format_nic_info_str(nc, m); + trace_i82596_new_mac(nc->info_str); +} + +static void set_multicast_list(I82596State *s, uint32_t addr) +{ + uint16_t mc_count, i; + + memset(&s->mult[0], 0, sizeof(s->mult)); + mc_count = get_uint16(addr + 8) / ETH_ALEN; + addr += 10; + if (mc_count > MAX_MC_CNT) { + mc_count = MAX_MC_CNT; + } + for (i = 0; i < mc_count; i++) { + uint8_t multicast_addr[ETH_ALEN]; + address_space_read(&address_space_memory, addr + i * ETH_ALEN, + MEMTXATTRS_UNSPECIFIED, multicast_addr, ETH_ALEN); + DBG(printf("Add multicast entry " MAC_FMT "\n", + MAC_ARG(multicast_addr))); + unsigned mcast_idx = (net_crc32(multicast_addr, ETH_ALEN) & + BITS(7, 2)) >> 2; + assert(mcast_idx < 8 * sizeof(s->mult)); + s->mult[mcast_idx >> 3] |= (1 << (mcast_idx & 7)); + } + trace_i82596_set_multicast(mc_count); +} + +void i82596_set_link_status(NetClientState *nc) +{ + I82596State *d = qemu_get_nic_opaque(nc); + + d->lnkst = nc->link_down ? 0 : 0x8000; +} + +static void update_scb_status(I82596State *s) +{ + s->scb_status = (s->scb_status & 0xf000) + | (s->cu_status << 8) | (s->rx_status << 4); + set_uint16(s->scb, s->scb_status); +} + + +static void i82596_s_reset(I82596State *s) +{ + trace_i82596_s_reset(s); + s->scp = 0; + s->scb_status = 0; + s->cu_status = CU_IDLE; + s->rx_status = RX_SUSPENDED; + s->cmd_p = I596_NULL; + s->lnkst = 0x8000; /* initial link state: up */ + s->ca = s->ca_active = 0; + s->send_irq = 0; +} + + +static void command_loop(I82596State *s) +{ + uint16_t cmd; + uint16_t status; + uint8_t byte_cnt; + + DBG(printf("STARTING COMMAND LOOP cmd_p=%08x\n", s->cmd_p)); + + while (s->cmd_p != I596_NULL) { + /* set status */ + status = STAT_B; + set_uint16(s->cmd_p, status); + status = STAT_C | STAT_OK; /* update, but write later */ + + cmd = get_uint16(s->cmd_p + 2); + DBG(printf("Running command %04x at %08x\n", cmd, s->cmd_p)); + + switch (cmd & 0x07) { + case CmdNOp: + break; + case CmdSASetup: + set_individual_address(s, s->cmd_p); + break; + case CmdConfigure: + byte_cnt = get_byte(s->cmd_p + 8) & 0x0f; + byte_cnt = MAX(byte_cnt, 4); + byte_cnt = MIN(byte_cnt, sizeof(s->config)); + /* copy byte_cnt max. */ + address_space_read(&address_space_memory, s->cmd_p + 8, + MEMTXATTRS_UNSPECIFIED, s->config, byte_cnt); + /* config byte according to page 35ff */ + s->config[2] &= 0x82; /* mask valid bits */ + s->config[2] |= 0x40; + s->config[7] &= 0xf7; /* clear zero bit */ + assert(I596_NOCRC_INS == 0); /* do CRC insertion */ + s->config[10] = MAX(s->config[10], 5); /* min frame length */ + s->config[12] &= 0x40; /* only full duplex field valid */ + s->config[13] |= 0x3f; /* set ones in byte 13 */ + break; + case CmdTDR: + /* get signal LINK */ + set_uint32(s->cmd_p + 8, s->lnkst); + break; + case CmdTx: + i82596_transmit(s, s->cmd_p); + break; + case CmdMulticastList: + set_multicast_list(s, s->cmd_p); + break; + case CmdDump: + case CmdDiagnose: + printf("FIXME Command %d !!\n", cmd & 7); + assert(0); + } + + /* update status */ + set_uint16(s->cmd_p, status); + + s->cmd_p = get_uint32(s->cmd_p + 4); /* get link address */ + DBG(printf("NEXT addr would be %08x\n", s->cmd_p)); + if (s->cmd_p == 0) { + s->cmd_p = I596_NULL; + } + + /* Stop when last command of the list. */ + if (cmd & CMD_EOL) { + s->cmd_p = I596_NULL; + } + /* Suspend after doing cmd? */ + if (cmd & CMD_SUSP) { + s->cu_status = CU_SUSPENDED; + printf("FIXME SUSPEND !!\n"); + } + /* Interrupt after doing cmd? */ + if (cmd & CMD_INTR) { + s->scb_status |= SCB_STATUS_CX; + } else { + s->scb_status &= ~SCB_STATUS_CX; + } + update_scb_status(s); + + /* Interrupt after doing cmd? */ + if (cmd & CMD_INTR) { + s->send_irq = 1; + } + + if (s->cu_status != CU_ACTIVE) { + break; + } + } + DBG(printf("FINISHED COMMAND LOOP\n")); + qemu_flush_queued_packets(qemu_get_queue(s->nic)); +} + +static void i82596_flush_queue_timer(void *opaque) +{ + I82596State *s = opaque; + if (0) { + timer_del(s->flush_queue_timer); + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + timer_mod(s->flush_queue_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); + } +} + +static void examine_scb(I82596State *s) +{ + uint16_t command, cuc, ruc; + + /* get the scb command word */ + command = get_uint16(s->scb + 2); + cuc = (command >> 8) & 0x7; + ruc = (command >> 4) & 0x7; + DBG(printf("MAIN COMMAND %04x cuc %02x ruc %02x\n", command, cuc, ruc)); + /* and clear the scb command word */ + set_uint16(s->scb + 2, 0); + + s->scb_status &= ~(command & SCB_COMMAND_ACK_MASK); + + switch (cuc) { + case 0: /* no change */ + break; + case 1: /* CUC_START */ + s->cu_status = CU_ACTIVE; + break; + case 4: /* CUC_ABORT */ + s->cu_status = CU_SUSPENDED; + s->scb_status |= SCB_STATUS_CNA; /* CU left active state */ + break; + default: + printf("WARNING: Unknown CUC %d!\n", cuc); + } + + switch (ruc) { + case 0: /* no change */ + break; + case 1: /* RX_START */ + case 2: /* RX_RESUME */ + s->rx_status = RX_IDLE; + if (USE_TIMER) { + timer_mod(s->flush_queue_timer, qemu_clock_get_ms( + QEMU_CLOCK_VIRTUAL) + 1000); + } + break; + case 3: /* RX_SUSPEND */ + case 4: /* RX_ABORT */ + s->rx_status = RX_SUSPENDED; + s->scb_status |= SCB_STATUS_RNR; /* RU left active state */ + break; + default: + printf("WARNING: Unknown RUC %d!\n", ruc); + } + + if (command & 0x80) { /* reset bit set? */ + i82596_s_reset(s); + } + + /* execute commands from SCBL */ + if (s->cu_status != CU_SUSPENDED) { + if (s->cmd_p == I596_NULL) { + s->cmd_p = get_uint32(s->scb + 4); + } + } + + /* update scb status */ + update_scb_status(s); + + command_loop(s); +} + +static void signal_ca(I82596State *s) +{ + uint32_t iscp = 0; + + /* trace_i82596_channel_attention(s); */ + if (s->scp) { + /* CA after reset -> do init with new scp. */ + s->sysbus = get_byte(s->scp + 3); /* big endian */ + DBG(printf("SYSBUS = %08x\n", s->sysbus)); + if (((s->sysbus >> 1) & 0x03) != 2) { + printf("WARNING: NO LINEAR MODE !!\n"); + } + if ((s->sysbus >> 7)) { + printf("WARNING: 32BIT LINMODE IN B-STEPPING NOT SUPPORTED !!\n"); + } + iscp = get_uint32(s->scp + 8); + s->scb = get_uint32(iscp + 4); + set_byte(iscp + 1, 0); /* clear BUSY flag in iscp */ + s->scp = 0; + } + + s->ca++; /* count ca() */ + if (!s->ca_active) { + s->ca_active = 1; + while (s->ca) { + examine_scb(s); + s->ca--; + } + s->ca_active = 0; + } + + if (s->send_irq) { + s->send_irq = 0; + qemu_set_irq(s->irq, 1); + } +} + +void i82596_ioport_writew(void *opaque, uint32_t addr, uint32_t val) +{ + I82596State *s = opaque; + /* printf("i82596_ioport_writew addr=0x%08x val=0x%04x\n", addr, val); */ + switch (addr) { + case PORT_RESET: /* Reset */ + i82596_s_reset(s); + break; + case PORT_ALTSCP: + s->scp = val; + break; + case PORT_CA: + signal_ca(s); + break; + } +} + +uint32_t i82596_ioport_readw(void *opaque, uint32_t addr) +{ + return -1; +} + +void i82596_h_reset(void *opaque) +{ + I82596State *s = opaque; + + i82596_s_reset(s); +} + +bool i82596_can_receive(NetClientState *nc) +{ + I82596State *s = qemu_get_nic_opaque(nc); + + if (s->rx_status == RX_SUSPENDED) { + return false; + } + + if (!s->lnkst) { + return false; + } + + if (USE_TIMER && !timer_pending(s->flush_queue_timer)) { + return true; + } + + return true; +} + +#define MIN_BUF_SIZE 60 + +ssize_t i82596_receive(NetClientState *nc, const uint8_t *buf, size_t sz) +{ + I82596State *s = qemu_get_nic_opaque(nc); + uint32_t rfd_p; + uint32_t rbd; + uint16_t is_broadcast = 0; + size_t len = sz; /* length of data for guest (including CRC) */ + size_t bufsz = sz; /* length of data in buf */ + uint32_t crc; + uint8_t *crc_ptr; + uint8_t buf1[MIN_BUF_SIZE + VLAN_HLEN]; + static const uint8_t broadcast_macaddr[6] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + DBG(printf("i82596_receive() start\n")); + + if (USE_TIMER && timer_pending(s->flush_queue_timer)) { + return 0; + } + + /* first check if receiver is enabled */ + if (s->rx_status == RX_SUSPENDED) { + trace_i82596_receive_analysis(">>> Receiving suspended"); + return -1; + } + + if (!s->lnkst) { + trace_i82596_receive_analysis(">>> Link down"); + return -1; + } + + /* Received frame smaller than configured "min frame len"? */ + if (sz < s->config[10]) { + printf("Received frame too small, %zu vs. %u bytes\n", + sz, s->config[10]); + return -1; + } + + DBG(printf("Received %lu bytes\n", sz)); + + if (I596_PROMISC) { + + /* promiscuous: receive all */ + trace_i82596_receive_analysis( + ">>> packet received in promiscuous mode"); + + } else { + + if (!memcmp(buf, broadcast_macaddr, 6)) { + /* broadcast address */ + if (I596_BC_DISABLE) { + trace_i82596_receive_analysis(">>> broadcast packet rejected"); + + return len; + } + + trace_i82596_receive_analysis(">>> broadcast packet received"); + is_broadcast = 1; + + } else if (buf[0] & 0x01) { + /* multicast */ + if (!I596_MC_ALL) { + trace_i82596_receive_analysis(">>> multicast packet rejected"); + + return len; + } + + int mcast_idx = (net_crc32(buf, ETH_ALEN) & BITS(7, 2)) >> 2; + assert(mcast_idx < 8 * sizeof(s->mult)); + + if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7)))) { + trace_i82596_receive_analysis(">>> multicast address mismatch"); + + return len; + } + + trace_i82596_receive_analysis(">>> multicast packet received"); + is_broadcast = 1; + + } else if (!memcmp(s->conf.macaddr.a, buf, 6)) { + + /* match */ + trace_i82596_receive_analysis( + ">>> physical address matching packet received"); + + } else { + + trace_i82596_receive_analysis(">>> unknown packet"); + + return len; + } + } + + /* if too small buffer, then expand it */ + if (len < MIN_BUF_SIZE + VLAN_HLEN) { + memcpy(buf1, buf, len); + memset(buf1 + len, 0, MIN_BUF_SIZE + VLAN_HLEN - len); + buf = buf1; + if (len < MIN_BUF_SIZE) { + len = MIN_BUF_SIZE; + } + bufsz = len; + } + + /* Calculate the ethernet checksum (4 bytes) */ + len += 4; + crc = cpu_to_be32(crc32(~0, buf, sz)); + crc_ptr = (uint8_t *) &crc; + + rfd_p = get_uint32(s->scb + 8); /* get Receive Frame Descriptor */ + assert(rfd_p && rfd_p != I596_NULL); + + /* get first Receive Buffer Descriptor Address */ + rbd = get_uint32(rfd_p + 8); + assert(rbd && rbd != I596_NULL); + + trace_i82596_receive_packet(len); + /* PRINT_PKTHDR("Receive", buf); */ + + while (len) { + uint16_t command, status; + uint32_t next_rfd; + + command = get_uint16(rfd_p + 2); + assert(command & CMD_FLEX); /* assert Flex Mode */ + /* get first Receive Buffer Descriptor Address */ + rbd = get_uint32(rfd_p + 8); + assert(get_uint16(rfd_p + 14) == 0); + + /* printf("Receive: rfd is %08x\n", rfd_p); */ + + while (len) { + uint16_t buffer_size, num; + uint32_t rba; + size_t bufcount, crccount; + + /* printf("Receive: rbd is %08x\n", rbd); */ + buffer_size = get_uint16(rbd + 12); + /* printf("buffer_size is 0x%x\n", buffer_size); */ + assert(buffer_size != 0); + + num = buffer_size & SIZE_MASK; + if (num > len) { + num = len; + } + rba = get_uint32(rbd + 8); + /* printf("rba is 0x%x\n", rba); */ + /* + * Calculate how many bytes we want from buf[] and how many + * from the CRC. + */ + if ((len - num) >= 4) { + /* The whole guest buffer, we haven't hit the CRC yet */ + bufcount = num; + } else { + /* All that's left of buf[] */ + bufcount = len - 4; + } + crccount = num - bufcount; + + if (bufcount > 0) { + /* Still some of the actual data buffer to transfer */ + assert(bufsz >= bufcount); + bufsz -= bufcount; + address_space_write(&address_space_memory, rba, + MEMTXATTRS_UNSPECIFIED, buf, bufcount); + rba += bufcount; + buf += bufcount; + len -= bufcount; + } + + /* Write as much of the CRC as fits */ + if (crccount > 0) { + address_space_write(&address_space_memory, rba, + MEMTXATTRS_UNSPECIFIED, crc_ptr, crccount); + rba += crccount; + crc_ptr += crccount; + len -= crccount; + } + + num |= 0x4000; /* set F BIT */ + if (len == 0) { + num |= I596_EOF; /* set EOF BIT */ + } + set_uint16(rbd + 0, num); /* write actual count with flags */ + + /* get next rbd */ + rbd = get_uint32(rbd + 4); + /* printf("Next Receive: rbd is %08x\n", rbd); */ + + if (buffer_size & I596_EOF) /* last entry */ + break; + } + + /* Housekeeping, see pg. 18 */ + next_rfd = get_uint32(rfd_p + 4); + set_uint32(next_rfd + 8, rbd); + + status = STAT_C | STAT_OK | is_broadcast; + set_uint16(rfd_p, status); + + if (command & CMD_SUSP) { /* suspend after command? */ + s->rx_status = RX_SUSPENDED; + s->scb_status |= SCB_STATUS_RNR; /* RU left active state */ + break; + } + if (command & CMD_EOL) /* was it last Frame Descriptor? */ + break; + + assert(len == 0); + } + + assert(len == 0); + + s->scb_status |= SCB_STATUS_FR; /* set "RU finished receiving frame" bit. */ + update_scb_status(s); + + /* send IRQ that we received data */ + qemu_set_irq(s->irq, 1); + /* s->send_irq = 1; */ + + if (0) { + DBG(printf("Checking:\n")); + rfd_p = get_uint32(s->scb + 8); /* get Receive Frame Descriptor */ + DBG(printf("Next Receive: rfd is %08x\n", rfd_p)); + rfd_p = get_uint32(rfd_p + 4); /* get Next Receive Frame Descriptor */ + DBG(printf("Next Receive: rfd is %08x\n", rfd_p)); + /* get first Receive Buffer Descriptor Address */ + rbd = get_uint32(rfd_p + 8); + DBG(printf("Next Receive: rbd is %08x\n", rbd)); + } + + return sz; +} + + +const VMStateDescription vmstate_i82596 = { + .name = "i82596", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(lnkst, I82596State), + VMSTATE_TIMER_PTR(flush_queue_timer, I82596State), + VMSTATE_END_OF_LIST() + } +}; + +void i82596_common_init(DeviceState *dev, I82596State *s, NetClientInfo *info) +{ + if (s->conf.macaddr.a[0] == 0) { + qemu_macaddr_default_if_unset(&s->conf.macaddr); + } + s->nic = qemu_new_nic(info, &s->conf, object_get_typename(OBJECT(dev)), + dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + + if (USE_TIMER) { + s->flush_queue_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + i82596_flush_queue_timer, s); + } + s->lnkst = 0x8000; /* initial link state: up */ +} diff --git a/hw/net/i82596.h b/hw/net/i82596.h new file mode 100644 index 000000000..f0bbe810e --- /dev/null +++ b/hw/net/i82596.h @@ -0,0 +1,55 @@ +#ifndef HW_I82596_H +#define HW_I82596_H + +#define I82596_IOPORT_SIZE 0x20 + +#include "exec/memory.h" +#include "exec/address-spaces.h" + +#define PORT_RESET 0x00 /* reset 82596 */ +#define PORT_SELFTEST 0x01 /* selftest */ +#define PORT_ALTSCP 0x02 /* alternate SCB address */ +#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ +#define PORT_CA 0x10 /* QEMU-internal CA signal */ + +typedef struct I82596State_st I82596State; + +struct I82596State_st { + MemoryRegion mmio; + MemoryRegion *as; + qemu_irq irq; + NICState *nic; + NICConf conf; + QEMUTimer *flush_queue_timer; + + hwaddr scp; /* pointer to SCP */ + uint8_t sysbus; + uint32_t scb; /* SCB */ + uint16_t scb_status; + uint8_t cu_status, rx_status; + uint16_t lnkst; + + uint32_t cmd_p; /* addr of current command */ + int ca; + int ca_active; + int send_irq; + + /* Hash register (multicast mask array, multiple individual addresses). */ + uint8_t mult[8]; + uint8_t config[14]; /* config bytes from CONFIGURE command */ + + uint8_t tx_buffer[0x4000]; +}; + +void i82596_h_reset(void *opaque); +void i82596_ioport_writew(void *opaque, uint32_t addr, uint32_t val); +uint32_t i82596_ioport_readw(void *opaque, uint32_t addr); +void i82596_ioport_writel(void *opaque, uint32_t addr, uint32_t val); +uint32_t i82596_ioport_readl(void *opaque, uint32_t addr); +uint32_t i82596_bcr_readw(I82596State *s, uint32_t rap); +ssize_t i82596_receive(NetClientState *nc, const uint8_t *buf, size_t size_); +bool i82596_can_receive(NetClientState *nc); +void i82596_set_link_status(NetClientState *nc); +void i82596_common_init(DeviceState *dev, I82596State *s, NetClientInfo *info); +extern const VMStateDescription vmstate_i82596; +#endif diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c new file mode 100644 index 000000000..9c7035bc9 --- /dev/null +++ b/hw/net/imx_fec.c @@ -0,0 +1,1370 @@ +/* + * i.MX Fast Ethernet Controller emulation. + * + * Copyright (c) 2013 Jean-Christophe Dubois. + * + * Based on Coldfire Fast Ethernet Controller emulation. + * + * Copyright (c) 2007 CodeSourcery. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/net/imx_fec.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "sysemu/dma.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "net/checksum.h" +#include "net/eth.h" +#include "trace.h" + +/* For crc32 */ +#include + +#define IMX_MAX_DESC 1024 + +static const char *imx_default_reg_name(IMXFECState *s, uint32_t index) +{ + static char tmp[20]; + sprintf(tmp, "index %d", index); + return tmp; +} + +static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index) +{ + switch (index) { + case ENET_FRBR: + return "FRBR"; + case ENET_FRSR: + return "FRSR"; + case ENET_MIIGSK_CFGR: + return "MIIGSK_CFGR"; + case ENET_MIIGSK_ENR: + return "MIIGSK_ENR"; + default: + return imx_default_reg_name(s, index); + } +} + +static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index) +{ + switch (index) { + case ENET_RSFL: + return "RSFL"; + case ENET_RSEM: + return "RSEM"; + case ENET_RAEM: + return "RAEM"; + case ENET_RAFL: + return "RAFL"; + case ENET_TSEM: + return "TSEM"; + case ENET_TAEM: + return "TAEM"; + case ENET_TAFL: + return "TAFL"; + case ENET_TIPG: + return "TIPG"; + case ENET_FTRL: + return "FTRL"; + case ENET_TACC: + return "TACC"; + case ENET_RACC: + return "RACC"; + case ENET_ATCR: + return "ATCR"; + case ENET_ATVR: + return "ATVR"; + case ENET_ATOFF: + return "ATOFF"; + case ENET_ATPER: + return "ATPER"; + case ENET_ATCOR: + return "ATCOR"; + case ENET_ATINC: + return "ATINC"; + case ENET_ATSTMP: + return "ATSTMP"; + case ENET_TGSR: + return "TGSR"; + case ENET_TCSR0: + return "TCSR0"; + case ENET_TCCR0: + return "TCCR0"; + case ENET_TCSR1: + return "TCSR1"; + case ENET_TCCR1: + return "TCCR1"; + case ENET_TCSR2: + return "TCSR2"; + case ENET_TCCR2: + return "TCCR2"; + case ENET_TCSR3: + return "TCSR3"; + case ENET_TCCR3: + return "TCCR3"; + default: + return imx_default_reg_name(s, index); + } +} + +static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index) +{ + switch (index) { + case ENET_EIR: + return "EIR"; + case ENET_EIMR: + return "EIMR"; + case ENET_RDAR: + return "RDAR"; + case ENET_TDAR: + return "TDAR"; + case ENET_ECR: + return "ECR"; + case ENET_MMFR: + return "MMFR"; + case ENET_MSCR: + return "MSCR"; + case ENET_MIBC: + return "MIBC"; + case ENET_RCR: + return "RCR"; + case ENET_TCR: + return "TCR"; + case ENET_PALR: + return "PALR"; + case ENET_PAUR: + return "PAUR"; + case ENET_OPD: + return "OPD"; + case ENET_IAUR: + return "IAUR"; + case ENET_IALR: + return "IALR"; + case ENET_GAUR: + return "GAUR"; + case ENET_GALR: + return "GALR"; + case ENET_TFWR: + return "TFWR"; + case ENET_RDSR: + return "RDSR"; + case ENET_TDSR: + return "TDSR"; + case ENET_MRBR: + return "MRBR"; + default: + if (s->is_fec) { + return imx_fec_reg_name(s, index); + } else { + return imx_enet_reg_name(s, index); + } + } +} + +/* + * Versions of this device with more than one TX descriptor save the + * 2nd and 3rd descriptors in a subsection, to maintain migration + * compatibility with previous versions of the device that only + * supported a single descriptor. + */ +static bool imx_eth_is_multi_tx_ring(void *opaque) +{ + IMXFECState *s = IMX_FEC(opaque); + + return s->tx_ring_num > 1; +} + +static const VMStateDescription vmstate_imx_eth_txdescs = { + .name = "imx.fec/txdescs", + .version_id = 1, + .minimum_version_id = 1, + .needed = imx_eth_is_multi_tx_ring, + .fields = (VMStateField[]) { + VMSTATE_UINT32(tx_descriptor[1], IMXFECState), + VMSTATE_UINT32(tx_descriptor[2], IMXFECState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_imx_eth = { + .name = TYPE_IMX_FEC, + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX), + VMSTATE_UINT32(rx_descriptor, IMXFECState), + VMSTATE_UINT32(tx_descriptor[0], IMXFECState), + VMSTATE_UINT32(phy_status, IMXFECState), + VMSTATE_UINT32(phy_control, IMXFECState), + VMSTATE_UINT32(phy_advertise, IMXFECState), + VMSTATE_UINT32(phy_int, IMXFECState), + VMSTATE_UINT32(phy_int_mask, IMXFECState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_imx_eth_txdescs, + NULL + }, +}; + +#define PHY_INT_ENERGYON (1 << 7) +#define PHY_INT_AUTONEG_COMPLETE (1 << 6) +#define PHY_INT_FAULT (1 << 5) +#define PHY_INT_DOWN (1 << 4) +#define PHY_INT_AUTONEG_LP (1 << 3) +#define PHY_INT_PARFAULT (1 << 2) +#define PHY_INT_AUTONEG_PAGE (1 << 1) + +static void imx_eth_update(IMXFECState *s); + +/* + * The MII phy could raise a GPIO to the processor which in turn + * could be handled as an interrpt by the OS. + * For now we don't handle any GPIO/interrupt line, so the OS will + * have to poll for the PHY status. + */ +static void imx_phy_update_irq(IMXFECState *s) +{ + imx_eth_update(s); +} + +static void imx_phy_update_link(IMXFECState *s) +{ + /* Autonegotiation status mirrors link status. */ + if (qemu_get_queue(s->nic)->link_down) { + trace_imx_phy_update_link("down"); + s->phy_status &= ~0x0024; + s->phy_int |= PHY_INT_DOWN; + } else { + trace_imx_phy_update_link("up"); + s->phy_status |= 0x0024; + s->phy_int |= PHY_INT_ENERGYON; + s->phy_int |= PHY_INT_AUTONEG_COMPLETE; + } + imx_phy_update_irq(s); +} + +static void imx_eth_set_link(NetClientState *nc) +{ + imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc))); +} + +static void imx_phy_reset(IMXFECState *s) +{ + trace_imx_phy_reset(); + + s->phy_status = 0x7809; + s->phy_control = 0x3000; + s->phy_advertise = 0x01e1; + s->phy_int_mask = 0; + s->phy_int = 0; + imx_phy_update_link(s); +} + +static uint32_t imx_phy_read(IMXFECState *s, int reg) +{ + uint32_t val; + uint32_t phy = reg / 32; + + if (phy != s->phy_num) { + trace_imx_phy_read_num(phy, s->phy_num); + return 0xffff; + } + + reg %= 32; + + switch (reg) { + case 0: /* Basic Control */ + val = s->phy_control; + break; + case 1: /* Basic Status */ + val = s->phy_status; + break; + case 2: /* ID1 */ + val = 0x0007; + break; + case 3: /* ID2 */ + val = 0xc0d1; + break; + case 4: /* Auto-neg advertisement */ + val = s->phy_advertise; + break; + case 5: /* Auto-neg Link Partner Ability */ + val = 0x0f71; + break; + case 6: /* Auto-neg Expansion */ + val = 1; + break; + case 29: /* Interrupt source. */ + val = s->phy_int; + s->phy_int = 0; + imx_phy_update_irq(s); + break; + case 30: /* Interrupt mask */ + val = s->phy_int_mask; + break; + case 17: + case 18: + case 27: + case 31: + qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n", + TYPE_IMX_FEC, __func__, reg); + val = 0; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n", + TYPE_IMX_FEC, __func__, reg); + val = 0; + break; + } + + trace_imx_phy_read(val, phy, reg); + + return val; +} + +static void imx_phy_write(IMXFECState *s, int reg, uint32_t val) +{ + uint32_t phy = reg / 32; + + if (phy != s->phy_num) { + trace_imx_phy_write_num(phy, s->phy_num); + return; + } + + reg %= 32; + + trace_imx_phy_write(val, phy, reg); + + switch (reg) { + case 0: /* Basic Control */ + if (val & 0x8000) { + imx_phy_reset(s); + } else { + s->phy_control = val & 0x7980; + /* Complete autonegotiation immediately. */ + if (val & 0x1000) { + s->phy_status |= 0x0020; + } + } + break; + case 4: /* Auto-neg advertisement */ + s->phy_advertise = (val & 0x2d7f) | 0x80; + break; + case 30: /* Interrupt mask */ + s->phy_int_mask = val & 0xff; + imx_phy_update_irq(s); + break; + case 17: + case 18: + case 27: + case 31: + qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n", + TYPE_IMX_FEC, __func__, reg); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n", + TYPE_IMX_FEC, __func__, reg); + break; + } +} + +static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr) +{ + dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); + + trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data); +} + +static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr) +{ + dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); +} + +static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr) +{ + dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); + + trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data, + bd->option, bd->status); +} + +static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr) +{ + dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); +} + +static void imx_eth_update(IMXFECState *s) +{ + /* + * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER + * interrupts swapped. This worked with older versions of Linux (4.14 + * and older) since Linux associated both interrupt lines with Ethernet + * MAC interrupts. Specifically, + * - Linux 4.15 and later have separate interrupt handlers for the MAC and + * timer interrupts. Those versions of Linux fail with versions of QEMU + * with swapped interrupt assignments. + * - In linux 4.14, both interrupt lines were registered with the Ethernet + * MAC interrupt handler. As a result, all versions of qemu happen to + * work, though that is accidental. + * - In Linux 4.9 and older, the timer interrupt was registered directly + * with the Ethernet MAC interrupt handler. The MAC interrupt was + * redirected to a GPIO interrupt to work around erratum ERR006687. + * This was implemented using the SOC's IOMUX block. In qemu, this GPIO + * interrupt never fired since IOMUX is currently not supported in qemu. + * Linux instead received MAC interrupts on the timer interrupt. + * As a result, qemu versions with the swapped interrupt assignment work, + * albeit accidentally, but qemu versions with the correct interrupt + * assignment fail. + * + * To ensure that all versions of Linux work, generate ENET_INT_MAC + * interrrupts on both interrupt lines. This should be changed if and when + * qemu supports IOMUX. + */ + if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & + (ENET_INT_MAC | ENET_INT_TS_TIMER)) { + qemu_set_irq(s->irq[1], 1); + } else { + qemu_set_irq(s->irq[1], 0); + } + + if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) { + qemu_set_irq(s->irq[0], 1); + } else { + qemu_set_irq(s->irq[0], 0); + } +} + +static void imx_fec_do_tx(IMXFECState *s) +{ + int frame_size = 0, descnt = 0; + uint8_t *ptr = s->frame; + uint32_t addr = s->tx_descriptor[0]; + + while (descnt++ < IMX_MAX_DESC) { + IMXFECBufDesc bd; + int len; + + imx_fec_read_bd(&bd, addr); + if ((bd.flags & ENET_BD_R) == 0) { + + /* Run out of descriptors to transmit. */ + trace_imx_eth_tx_bd_busy(); + + break; + } + len = bd.length; + if (frame_size + len > ENET_MAX_FRAME_SIZE) { + len = ENET_MAX_FRAME_SIZE - frame_size; + s->regs[ENET_EIR] |= ENET_INT_BABT; + } + dma_memory_read(&address_space_memory, bd.data, ptr, len); + ptr += len; + frame_size += len; + if (bd.flags & ENET_BD_L) { + /* Last buffer in frame. */ + qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size); + ptr = s->frame; + frame_size = 0; + s->regs[ENET_EIR] |= ENET_INT_TXF; + } + s->regs[ENET_EIR] |= ENET_INT_TXB; + bd.flags &= ~ENET_BD_R; + /* Write back the modified descriptor. */ + imx_fec_write_bd(&bd, addr); + /* Advance to the next descriptor. */ + if ((bd.flags & ENET_BD_W) != 0) { + addr = s->regs[ENET_TDSR]; + } else { + addr += sizeof(bd); + } + } + + s->tx_descriptor[0] = addr; + + imx_eth_update(s); +} + +static void imx_enet_do_tx(IMXFECState *s, uint32_t index) +{ + int frame_size = 0, descnt = 0; + + uint8_t *ptr = s->frame; + uint32_t addr, int_txb, int_txf, tdsr; + size_t ring; + + switch (index) { + case ENET_TDAR: + ring = 0; + int_txb = ENET_INT_TXB; + int_txf = ENET_INT_TXF; + tdsr = ENET_TDSR; + break; + case ENET_TDAR1: + ring = 1; + int_txb = ENET_INT_TXB1; + int_txf = ENET_INT_TXF1; + tdsr = ENET_TDSR1; + break; + case ENET_TDAR2: + ring = 2; + int_txb = ENET_INT_TXB2; + int_txf = ENET_INT_TXF2; + tdsr = ENET_TDSR2; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bogus value for index %x\n", + __func__, index); + abort(); + break; + } + + addr = s->tx_descriptor[ring]; + + while (descnt++ < IMX_MAX_DESC) { + IMXENETBufDesc bd; + int len; + + imx_enet_read_bd(&bd, addr); + if ((bd.flags & ENET_BD_R) == 0) { + /* Run out of descriptors to transmit. */ + + trace_imx_eth_tx_bd_busy(); + + break; + } + len = bd.length; + if (frame_size + len > ENET_MAX_FRAME_SIZE) { + len = ENET_MAX_FRAME_SIZE - frame_size; + s->regs[ENET_EIR] |= ENET_INT_BABT; + } + dma_memory_read(&address_space_memory, bd.data, ptr, len); + ptr += len; + frame_size += len; + if (bd.flags & ENET_BD_L) { + int csum = 0; + + if (bd.option & ENET_BD_PINS) { + csum |= (CSUM_TCP | CSUM_UDP); + } + if (bd.option & ENET_BD_IINS) { + csum |= CSUM_IP; + } + if (csum) { + net_checksum_calculate(s->frame, frame_size, csum); + } + + /* Last buffer in frame. */ + + qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size); + ptr = s->frame; + + frame_size = 0; + if (bd.option & ENET_BD_TX_INT) { + s->regs[ENET_EIR] |= int_txf; + } + /* Indicate that we've updated the last buffer descriptor. */ + bd.last_buffer = ENET_BD_BDU; + } + if (bd.option & ENET_BD_TX_INT) { + s->regs[ENET_EIR] |= int_txb; + } + bd.flags &= ~ENET_BD_R; + /* Write back the modified descriptor. */ + imx_enet_write_bd(&bd, addr); + /* Advance to the next descriptor. */ + if ((bd.flags & ENET_BD_W) != 0) { + addr = s->regs[tdsr]; + } else { + addr += sizeof(bd); + } + } + + s->tx_descriptor[ring] = addr; + + imx_eth_update(s); +} + +static void imx_eth_do_tx(IMXFECState *s, uint32_t index) +{ + if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) { + imx_enet_do_tx(s, index); + } else { + imx_fec_do_tx(s); + } +} + +static void imx_eth_enable_rx(IMXFECState *s, bool flush) +{ + IMXFECBufDesc bd; + + imx_fec_read_bd(&bd, s->rx_descriptor); + + s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0; + + if (!s->regs[ENET_RDAR]) { + trace_imx_eth_rx_bd_full(); + } else if (flush) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } +} + +static void imx_eth_reset(DeviceState *d) +{ + IMXFECState *s = IMX_FEC(d); + + /* Reset the Device */ + memset(s->regs, 0, sizeof(s->regs)); + s->regs[ENET_ECR] = 0xf0000000; + s->regs[ENET_MIBC] = 0xc0000000; + s->regs[ENET_RCR] = 0x05ee0001; + s->regs[ENET_OPD] = 0x00010000; + + s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24) + | (s->conf.macaddr.a[1] << 16) + | (s->conf.macaddr.a[2] << 8) + | s->conf.macaddr.a[3]; + s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24) + | (s->conf.macaddr.a[5] << 16) + | 0x8808; + + if (s->is_fec) { + s->regs[ENET_FRBR] = 0x00000600; + s->regs[ENET_FRSR] = 0x00000500; + s->regs[ENET_MIIGSK_ENR] = 0x00000006; + } else { + s->regs[ENET_RAEM] = 0x00000004; + s->regs[ENET_RAFL] = 0x00000004; + s->regs[ENET_TAEM] = 0x00000004; + s->regs[ENET_TAFL] = 0x00000008; + s->regs[ENET_TIPG] = 0x0000000c; + s->regs[ENET_FTRL] = 0x000007ff; + s->regs[ENET_ATPER] = 0x3b9aca00; + } + + s->rx_descriptor = 0; + memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor)); + + /* We also reset the PHY */ + imx_phy_reset(s); +} + +static uint32_t imx_default_read(IMXFECState *s, uint32_t index) +{ + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4); + return 0; +} + +static uint32_t imx_fec_read(IMXFECState *s, uint32_t index) +{ + switch (index) { + case ENET_FRBR: + case ENET_FRSR: + case ENET_MIIGSK_CFGR: + case ENET_MIIGSK_ENR: + return s->regs[index]; + default: + return imx_default_read(s, index); + } +} + +static uint32_t imx_enet_read(IMXFECState *s, uint32_t index) +{ + switch (index) { + case ENET_RSFL: + case ENET_RSEM: + case ENET_RAEM: + case ENET_RAFL: + case ENET_TSEM: + case ENET_TAEM: + case ENET_TAFL: + case ENET_TIPG: + case ENET_FTRL: + case ENET_TACC: + case ENET_RACC: + case ENET_ATCR: + case ENET_ATVR: + case ENET_ATOFF: + case ENET_ATPER: + case ENET_ATCOR: + case ENET_ATINC: + case ENET_ATSTMP: + case ENET_TGSR: + case ENET_TCSR0: + case ENET_TCCR0: + case ENET_TCSR1: + case ENET_TCCR1: + case ENET_TCSR2: + case ENET_TCCR2: + case ENET_TCSR3: + case ENET_TCCR3: + return s->regs[index]; + default: + return imx_default_read(s, index); + } +} + +static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value = 0; + IMXFECState *s = IMX_FEC(opaque); + uint32_t index = offset >> 2; + + switch (index) { + case ENET_EIR: + case ENET_EIMR: + case ENET_RDAR: + case ENET_TDAR: + case ENET_ECR: + case ENET_MMFR: + case ENET_MSCR: + case ENET_MIBC: + case ENET_RCR: + case ENET_TCR: + case ENET_PALR: + case ENET_PAUR: + case ENET_OPD: + case ENET_IAUR: + case ENET_IALR: + case ENET_GAUR: + case ENET_GALR: + case ENET_TFWR: + case ENET_RDSR: + case ENET_TDSR: + case ENET_MRBR: + value = s->regs[index]; + break; + default: + if (s->is_fec) { + value = imx_fec_read(s, index); + } else { + value = imx_enet_read(s, index); + } + break; + } + + trace_imx_eth_read(index, imx_eth_reg_name(s, index), value); + + return value; +} + +static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value) +{ + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%" + PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4); + return; +} + +static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value) +{ + switch (index) { + case ENET_FRBR: + /* FRBR is read only */ + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n", + TYPE_IMX_FEC, __func__); + break; + case ENET_FRSR: + s->regs[index] = (value & 0x000003fc) | 0x00000400; + break; + case ENET_MIIGSK_CFGR: + s->regs[index] = value & 0x00000053; + break; + case ENET_MIIGSK_ENR: + s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0; + break; + default: + imx_default_write(s, index, value); + break; + } +} + +static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value) +{ + switch (index) { + case ENET_RSFL: + case ENET_RSEM: + case ENET_RAEM: + case ENET_RAFL: + case ENET_TSEM: + case ENET_TAEM: + case ENET_TAFL: + s->regs[index] = value & 0x000001ff; + break; + case ENET_TIPG: + s->regs[index] = value & 0x0000001f; + break; + case ENET_FTRL: + s->regs[index] = value & 0x00003fff; + break; + case ENET_TACC: + s->regs[index] = value & 0x00000019; + break; + case ENET_RACC: + s->regs[index] = value & 0x000000C7; + break; + case ENET_ATCR: + s->regs[index] = value & 0x00002a9d; + break; + case ENET_ATVR: + case ENET_ATOFF: + case ENET_ATPER: + s->regs[index] = value; + break; + case ENET_ATSTMP: + /* ATSTMP is read only */ + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n", + TYPE_IMX_FEC, __func__); + break; + case ENET_ATCOR: + s->regs[index] = value & 0x7fffffff; + break; + case ENET_ATINC: + s->regs[index] = value & 0x00007f7f; + break; + case ENET_TGSR: + /* implement clear timer flag */ + s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */ + break; + case ENET_TCSR0: + case ENET_TCSR1: + case ENET_TCSR2: + case ENET_TCSR3: + s->regs[index] &= ~(value & 0x00000080); /* W1C bits */ + s->regs[index] &= ~0x0000007d; /* writable fields */ + s->regs[index] |= (value & 0x0000007d); + break; + case ENET_TCCR0: + case ENET_TCCR1: + case ENET_TCCR2: + case ENET_TCCR3: + s->regs[index] = value; + break; + default: + imx_default_write(s, index, value); + break; + } +} + +static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + IMXFECState *s = IMX_FEC(opaque); + const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s); + uint32_t index = offset >> 2; + + trace_imx_eth_write(index, imx_eth_reg_name(s, index), value); + + switch (index) { + case ENET_EIR: + s->regs[index] &= ~value; + break; + case ENET_EIMR: + s->regs[index] = value; + break; + case ENET_RDAR: + if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) { + if (!s->regs[index]) { + imx_eth_enable_rx(s, true); + } + } else { + s->regs[index] = 0; + } + break; + case ENET_TDAR1: + case ENET_TDAR2: + if (unlikely(single_tx_ring)) { + qemu_log_mask(LOG_GUEST_ERROR, + "[%s]%s: trying to access TDAR2 or TDAR1\n", + TYPE_IMX_FEC, __func__); + return; + } + /* fall through */ + case ENET_TDAR: + if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) { + s->regs[index] = ENET_TDAR_TDAR; + imx_eth_do_tx(s, index); + } + s->regs[index] = 0; + break; + case ENET_ECR: + if (value & ENET_ECR_RESET) { + return imx_eth_reset(DEVICE(s)); + } + s->regs[index] = value; + if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) { + s->regs[ENET_RDAR] = 0; + s->rx_descriptor = s->regs[ENET_RDSR]; + s->regs[ENET_TDAR] = 0; + s->regs[ENET_TDAR1] = 0; + s->regs[ENET_TDAR2] = 0; + s->tx_descriptor[0] = s->regs[ENET_TDSR]; + s->tx_descriptor[1] = s->regs[ENET_TDSR1]; + s->tx_descriptor[2] = s->regs[ENET_TDSR2]; + } + break; + case ENET_MMFR: + s->regs[index] = value; + if (extract32(value, 29, 1)) { + /* This is a read operation */ + s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16, + imx_phy_read(s, + extract32(value, + 18, 10))); + } else { + /* This is a write operation */ + imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16)); + } + /* raise the interrupt as the PHY operation is done */ + s->regs[ENET_EIR] |= ENET_INT_MII; + break; + case ENET_MSCR: + s->regs[index] = value & 0xfe; + break; + case ENET_MIBC: + /* TODO: Implement MIB. */ + s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0; + break; + case ENET_RCR: + s->regs[index] = value & 0x07ff003f; + /* TODO: Implement LOOP mode. */ + break; + case ENET_TCR: + /* We transmit immediately, so raise GRA immediately. */ + s->regs[index] = value; + if (value & 1) { + s->regs[ENET_EIR] |= ENET_INT_GRA; + } + break; + case ENET_PALR: + s->regs[index] = value; + s->conf.macaddr.a[0] = value >> 24; + s->conf.macaddr.a[1] = value >> 16; + s->conf.macaddr.a[2] = value >> 8; + s->conf.macaddr.a[3] = value; + break; + case ENET_PAUR: + s->regs[index] = (value | 0x0000ffff) & 0xffff8808; + s->conf.macaddr.a[4] = value >> 24; + s->conf.macaddr.a[5] = value >> 16; + break; + case ENET_OPD: + s->regs[index] = (value & 0x0000ffff) | 0x00010000; + break; + case ENET_IAUR: + case ENET_IALR: + case ENET_GAUR: + case ENET_GALR: + /* TODO: implement MAC hash filtering. */ + break; + case ENET_TFWR: + if (s->is_fec) { + s->regs[index] = value & 0x3; + } else { + s->regs[index] = value & 0x13f; + } + break; + case ENET_RDSR: + if (s->is_fec) { + s->regs[index] = value & ~3; + } else { + s->regs[index] = value & ~7; + } + s->rx_descriptor = s->regs[index]; + break; + case ENET_TDSR: + if (s->is_fec) { + s->regs[index] = value & ~3; + } else { + s->regs[index] = value & ~7; + } + s->tx_descriptor[0] = s->regs[index]; + break; + case ENET_TDSR1: + if (unlikely(single_tx_ring)) { + qemu_log_mask(LOG_GUEST_ERROR, + "[%s]%s: trying to access TDSR1\n", + TYPE_IMX_FEC, __func__); + return; + } + + s->regs[index] = value & ~7; + s->tx_descriptor[1] = s->regs[index]; + break; + case ENET_TDSR2: + if (unlikely(single_tx_ring)) { + qemu_log_mask(LOG_GUEST_ERROR, + "[%s]%s: trying to access TDSR2\n", + TYPE_IMX_FEC, __func__); + return; + } + + s->regs[index] = value & ~7; + s->tx_descriptor[2] = s->regs[index]; + break; + case ENET_MRBR: + s->regs[index] = value & 0x00003ff0; + break; + default: + if (s->is_fec) { + imx_fec_write(s, index, value); + } else { + imx_enet_write(s, index, value); + } + return; + } + + imx_eth_update(s); +} + +static bool imx_eth_can_receive(NetClientState *nc) +{ + IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); + + return !!s->regs[ENET_RDAR]; +} + +static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf, + size_t len) +{ + IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); + IMXFECBufDesc bd; + uint32_t flags = 0; + uint32_t addr; + uint32_t crc; + uint32_t buf_addr; + uint8_t *crc_ptr; + unsigned int buf_len; + size_t size = len; + + trace_imx_fec_receive(size); + + if (!s->regs[ENET_RDAR]) { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n", + TYPE_IMX_FEC, __func__); + return 0; + } + + /* 4 bytes for the CRC. */ + size += 4; + crc = cpu_to_be32(crc32(~0, buf, size)); + crc_ptr = (uint8_t *) &crc; + + /* Huge frames are truncated. */ + if (size > ENET_MAX_FRAME_SIZE) { + size = ENET_MAX_FRAME_SIZE; + flags |= ENET_BD_TR | ENET_BD_LG; + } + + /* Frames larger than the user limit just set error flags. */ + if (size > (s->regs[ENET_RCR] >> 16)) { + flags |= ENET_BD_LG; + } + + addr = s->rx_descriptor; + while (size > 0) { + imx_fec_read_bd(&bd, addr); + if ((bd.flags & ENET_BD_E) == 0) { + /* No descriptors available. Bail out. */ + /* + * FIXME: This is wrong. We should probably either + * save the remainder for when more RX buffers are + * available, or flag an error. + */ + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n", + TYPE_IMX_FEC, __func__); + break; + } + buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR]; + bd.length = buf_len; + size -= buf_len; + + trace_imx_fec_receive_len(addr, bd.length); + + /* The last 4 bytes are the CRC. */ + if (size < 4) { + buf_len += size - 4; + } + buf_addr = bd.data; + dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + buf += buf_len; + if (size < 4) { + dma_memory_write(&address_space_memory, buf_addr + buf_len, + crc_ptr, 4 - size); + crc_ptr += 4 - size; + } + bd.flags &= ~ENET_BD_E; + if (size == 0) { + /* Last buffer in frame. */ + bd.flags |= flags | ENET_BD_L; + + trace_imx_fec_receive_last(bd.flags); + + s->regs[ENET_EIR] |= ENET_INT_RXF; + } else { + s->regs[ENET_EIR] |= ENET_INT_RXB; + } + imx_fec_write_bd(&bd, addr); + /* Advance to the next descriptor. */ + if ((bd.flags & ENET_BD_W) != 0) { + addr = s->regs[ENET_RDSR]; + } else { + addr += sizeof(bd); + } + } + s->rx_descriptor = addr; + imx_eth_enable_rx(s, false); + imx_eth_update(s); + return len; +} + +static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf, + size_t len) +{ + IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); + IMXENETBufDesc bd; + uint32_t flags = 0; + uint32_t addr; + uint32_t crc; + uint32_t buf_addr; + uint8_t *crc_ptr; + unsigned int buf_len; + size_t size = len; + bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16; + + trace_imx_enet_receive(size); + + if (!s->regs[ENET_RDAR]) { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n", + TYPE_IMX_FEC, __func__); + return 0; + } + + /* 4 bytes for the CRC. */ + size += 4; + crc = cpu_to_be32(crc32(~0, buf, size)); + crc_ptr = (uint8_t *) &crc; + + if (shift16) { + size += 2; + } + + /* Huge frames are truncated. */ + if (size > s->regs[ENET_FTRL]) { + size = s->regs[ENET_FTRL]; + flags |= ENET_BD_TR | ENET_BD_LG; + } + + /* Frames larger than the user limit just set error flags. */ + if (size > (s->regs[ENET_RCR] >> 16)) { + flags |= ENET_BD_LG; + } + + addr = s->rx_descriptor; + while (size > 0) { + imx_enet_read_bd(&bd, addr); + if ((bd.flags & ENET_BD_E) == 0) { + /* No descriptors available. Bail out. */ + /* + * FIXME: This is wrong. We should probably either + * save the remainder for when more RX buffers are + * available, or flag an error. + */ + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n", + TYPE_IMX_FEC, __func__); + break; + } + buf_len = MIN(size, s->regs[ENET_MRBR]); + bd.length = buf_len; + size -= buf_len; + + trace_imx_enet_receive_len(addr, bd.length); + + /* The last 4 bytes are the CRC. */ + if (size < 4) { + buf_len += size - 4; + } + buf_addr = bd.data; + + if (shift16) { + /* + * If SHIFT16 bit of ENETx_RACC register is set we need to + * align the payload to 4-byte boundary. + */ + const uint8_t zeros[2] = { 0 }; + + dma_memory_write(&address_space_memory, buf_addr, + zeros, sizeof(zeros)); + + buf_addr += sizeof(zeros); + buf_len -= sizeof(zeros); + + /* We only do this once per Ethernet frame */ + shift16 = false; + } + + dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + buf += buf_len; + if (size < 4) { + dma_memory_write(&address_space_memory, buf_addr + buf_len, + crc_ptr, 4 - size); + crc_ptr += 4 - size; + } + bd.flags &= ~ENET_BD_E; + if (size == 0) { + /* Last buffer in frame. */ + bd.flags |= flags | ENET_BD_L; + + trace_imx_enet_receive_last(bd.flags); + + /* Indicate that we've updated the last buffer descriptor. */ + bd.last_buffer = ENET_BD_BDU; + if (bd.option & ENET_BD_RX_INT) { + s->regs[ENET_EIR] |= ENET_INT_RXF; + } + } else { + if (bd.option & ENET_BD_RX_INT) { + s->regs[ENET_EIR] |= ENET_INT_RXB; + } + } + imx_enet_write_bd(&bd, addr); + /* Advance to the next descriptor. */ + if ((bd.flags & ENET_BD_W) != 0) { + addr = s->regs[ENET_RDSR]; + } else { + addr += sizeof(bd); + } + } + s->rx_descriptor = addr; + imx_eth_enable_rx(s, false); + imx_eth_update(s); + return len; +} + +static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf, + size_t len) +{ + IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); + + if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) { + return imx_enet_receive(nc, buf, len); + } else { + return imx_fec_receive(nc, buf, len); + } +} + +static const MemoryRegionOps imx_eth_ops = { + .read = imx_eth_read, + .write = imx_eth_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void imx_eth_cleanup(NetClientState *nc) +{ + IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); + + s->nic = NULL; +} + +static NetClientInfo imx_eth_net_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = imx_eth_can_receive, + .receive = imx_eth_receive, + .cleanup = imx_eth_cleanup, + .link_status_changed = imx_eth_set_link, +}; + + +static void imx_eth_realize(DeviceState *dev, Error **errp) +{ + IMXFECState *s = IMX_FEC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s, + TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq[0]); + sysbus_init_irq(sbd, &s->irq[1]); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + + s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf, + object_get_typename(OBJECT(dev)), + dev->id, s); + + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static Property imx_eth_properties[] = { + DEFINE_NIC_PROPERTIES(IMXFECState, conf), + DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1), + DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void imx_eth_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_imx_eth; + dc->reset = imx_eth_reset; + device_class_set_props(dc, imx_eth_properties); + dc->realize = imx_eth_realize; + dc->desc = "i.MX FEC/ENET Ethernet Controller"; +} + +static void imx_fec_init(Object *obj) +{ + IMXFECState *s = IMX_FEC(obj); + + s->is_fec = true; +} + +static void imx_enet_init(Object *obj) +{ + IMXFECState *s = IMX_FEC(obj); + + s->is_fec = false; +} + +static const TypeInfo imx_fec_info = { + .name = TYPE_IMX_FEC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXFECState), + .instance_init = imx_fec_init, + .class_init = imx_eth_class_init, +}; + +static const TypeInfo imx_enet_info = { + .name = TYPE_IMX_ENET, + .parent = TYPE_IMX_FEC, + .instance_init = imx_enet_init, +}; + +static void imx_eth_register_types(void) +{ + type_register_static(&imx_fec_info); + type_register_static(&imx_enet_info); +} + +type_init(imx_eth_register_types) diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c new file mode 100644 index 000000000..6aff424cb --- /dev/null +++ b/hw/net/lan9118.c @@ -0,0 +1,1417 @@ +/* + * SMSC LAN9118 Ethernet interface emulation + * + * Copyright (c) 2009 CodeSourcery, LLC. + * Written by Paul Brook + * + * This code is licensed under the GNU GPL v2 + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "net/net.h" +#include "net/eth.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/net/lan9118.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +/* For crc32 */ +#include +#include "qom/object.h" + +//#define DEBUG_LAN9118 + +#ifdef DEBUG_LAN9118 +#define DPRINTF(fmt, ...) \ +do { printf("lan9118: " fmt , ## __VA_ARGS__); } while (0) +#define BADF(fmt, ...) \ +do { hw_error("lan9118: error: " fmt , ## __VA_ARGS__);} while (0) +#else +#define DPRINTF(fmt, ...) do {} while(0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "lan9118: error: " fmt , ## __VA_ARGS__);} while (0) +#endif + +/* The tx and rx fifo ports are a range of aliased 32-bit registers */ +#define RX_DATA_FIFO_PORT_FIRST 0x00 +#define RX_DATA_FIFO_PORT_LAST 0x1f +#define TX_DATA_FIFO_PORT_FIRST 0x20 +#define TX_DATA_FIFO_PORT_LAST 0x3f + +#define RX_STATUS_FIFO_PORT 0x40 +#define RX_STATUS_FIFO_PEEK 0x44 +#define TX_STATUS_FIFO_PORT 0x48 +#define TX_STATUS_FIFO_PEEK 0x4c + +#define CSR_ID_REV 0x50 +#define CSR_IRQ_CFG 0x54 +#define CSR_INT_STS 0x58 +#define CSR_INT_EN 0x5c +#define CSR_BYTE_TEST 0x64 +#define CSR_FIFO_INT 0x68 +#define CSR_RX_CFG 0x6c +#define CSR_TX_CFG 0x70 +#define CSR_HW_CFG 0x74 +#define CSR_RX_DP_CTRL 0x78 +#define CSR_RX_FIFO_INF 0x7c +#define CSR_TX_FIFO_INF 0x80 +#define CSR_PMT_CTRL 0x84 +#define CSR_GPIO_CFG 0x88 +#define CSR_GPT_CFG 0x8c +#define CSR_GPT_CNT 0x90 +#define CSR_WORD_SWAP 0x98 +#define CSR_FREE_RUN 0x9c +#define CSR_RX_DROP 0xa0 +#define CSR_MAC_CSR_CMD 0xa4 +#define CSR_MAC_CSR_DATA 0xa8 +#define CSR_AFC_CFG 0xac +#define CSR_E2P_CMD 0xb0 +#define CSR_E2P_DATA 0xb4 + +#define E2P_CMD_MAC_ADDR_LOADED 0x100 + +/* IRQ_CFG */ +#define IRQ_INT 0x00001000 +#define IRQ_EN 0x00000100 +#define IRQ_POL 0x00000010 +#define IRQ_TYPE 0x00000001 + +/* INT_STS/INT_EN */ +#define SW_INT 0x80000000 +#define TXSTOP_INT 0x02000000 +#define RXSTOP_INT 0x01000000 +#define RXDFH_INT 0x00800000 +#define TX_IOC_INT 0x00200000 +#define RXD_INT 0x00100000 +#define GPT_INT 0x00080000 +#define PHY_INT 0x00040000 +#define PME_INT 0x00020000 +#define TXSO_INT 0x00010000 +#define RWT_INT 0x00008000 +#define RXE_INT 0x00004000 +#define TXE_INT 0x00002000 +#define TDFU_INT 0x00000800 +#define TDFO_INT 0x00000400 +#define TDFA_INT 0x00000200 +#define TSFF_INT 0x00000100 +#define TSFL_INT 0x00000080 +#define RXDF_INT 0x00000040 +#define RDFL_INT 0x00000020 +#define RSFF_INT 0x00000010 +#define RSFL_INT 0x00000008 +#define GPIO2_INT 0x00000004 +#define GPIO1_INT 0x00000002 +#define GPIO0_INT 0x00000001 +#define RESERVED_INT 0x7c001000 + +#define MAC_CR 1 +#define MAC_ADDRH 2 +#define MAC_ADDRL 3 +#define MAC_HASHH 4 +#define MAC_HASHL 5 +#define MAC_MII_ACC 6 +#define MAC_MII_DATA 7 +#define MAC_FLOW 8 +#define MAC_VLAN1 9 /* TODO */ +#define MAC_VLAN2 10 /* TODO */ +#define MAC_WUFF 11 /* TODO */ +#define MAC_WUCSR 12 /* TODO */ + +#define MAC_CR_RXALL 0x80000000 +#define MAC_CR_RCVOWN 0x00800000 +#define MAC_CR_LOOPBK 0x00200000 +#define MAC_CR_FDPX 0x00100000 +#define MAC_CR_MCPAS 0x00080000 +#define MAC_CR_PRMS 0x00040000 +#define MAC_CR_INVFILT 0x00020000 +#define MAC_CR_PASSBAD 0x00010000 +#define MAC_CR_HO 0x00008000 +#define MAC_CR_HPFILT 0x00002000 +#define MAC_CR_LCOLL 0x00001000 +#define MAC_CR_BCAST 0x00000800 +#define MAC_CR_DISRTY 0x00000400 +#define MAC_CR_PADSTR 0x00000100 +#define MAC_CR_BOLMT 0x000000c0 +#define MAC_CR_DFCHK 0x00000020 +#define MAC_CR_TXEN 0x00000008 +#define MAC_CR_RXEN 0x00000004 +#define MAC_CR_RESERVED 0x7f404213 + +#define PHY_INT_ENERGYON 0x80 +#define PHY_INT_AUTONEG_COMPLETE 0x40 +#define PHY_INT_FAULT 0x20 +#define PHY_INT_DOWN 0x10 +#define PHY_INT_AUTONEG_LP 0x08 +#define PHY_INT_PARFAULT 0x04 +#define PHY_INT_AUTONEG_PAGE 0x02 + +#define GPT_TIMER_EN 0x20000000 + +enum tx_state { + TX_IDLE, + TX_B, + TX_DATA +}; + +typedef struct { + /* state is a tx_state but we can't put enums in VMStateDescriptions. */ + uint32_t state; + uint32_t cmd_a; + uint32_t cmd_b; + int32_t buffer_size; + int32_t offset; + int32_t pad; + int32_t fifo_used; + int32_t len; + uint8_t data[2048]; +} LAN9118Packet; + +static const VMStateDescription vmstate_lan9118_packet = { + .name = "lan9118_packet", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(state, LAN9118Packet), + VMSTATE_UINT32(cmd_a, LAN9118Packet), + VMSTATE_UINT32(cmd_b, LAN9118Packet), + VMSTATE_INT32(buffer_size, LAN9118Packet), + VMSTATE_INT32(offset, LAN9118Packet), + VMSTATE_INT32(pad, LAN9118Packet), + VMSTATE_INT32(fifo_used, LAN9118Packet), + VMSTATE_INT32(len, LAN9118Packet), + VMSTATE_UINT8_ARRAY(data, LAN9118Packet, 2048), + VMSTATE_END_OF_LIST() + } +}; + +OBJECT_DECLARE_SIMPLE_TYPE(lan9118_state, LAN9118) + +struct lan9118_state { + SysBusDevice parent_obj; + + NICState *nic; + NICConf conf; + qemu_irq irq; + MemoryRegion mmio; + ptimer_state *timer; + + uint32_t irq_cfg; + uint32_t int_sts; + uint32_t int_en; + uint32_t fifo_int; + uint32_t rx_cfg; + uint32_t tx_cfg; + uint32_t hw_cfg; + uint32_t pmt_ctrl; + uint32_t gpio_cfg; + uint32_t gpt_cfg; + uint32_t word_swap; + uint32_t free_timer_start; + uint32_t mac_cmd; + uint32_t mac_data; + uint32_t afc_cfg; + uint32_t e2p_cmd; + uint32_t e2p_data; + + uint32_t mac_cr; + uint32_t mac_hashh; + uint32_t mac_hashl; + uint32_t mac_mii_acc; + uint32_t mac_mii_data; + uint32_t mac_flow; + + uint32_t phy_status; + uint32_t phy_control; + uint32_t phy_advertise; + uint32_t phy_int; + uint32_t phy_int_mask; + + int32_t eeprom_writable; + uint8_t eeprom[128]; + + int32_t tx_fifo_size; + LAN9118Packet *txp; + LAN9118Packet tx_packet; + + int32_t tx_status_fifo_used; + int32_t tx_status_fifo_head; + uint32_t tx_status_fifo[512]; + + int32_t rx_status_fifo_size; + int32_t rx_status_fifo_used; + int32_t rx_status_fifo_head; + uint32_t rx_status_fifo[896]; + int32_t rx_fifo_size; + int32_t rx_fifo_used; + int32_t rx_fifo_head; + uint32_t rx_fifo[3360]; + int32_t rx_packet_size_head; + int32_t rx_packet_size_tail; + int32_t rx_packet_size[1024]; + + int32_t rxp_offset; + int32_t rxp_size; + int32_t rxp_pad; + + uint32_t write_word_prev_offset; + uint32_t write_word_n; + uint16_t write_word_l; + uint16_t write_word_h; + uint32_t read_word_prev_offset; + uint32_t read_word_n; + uint32_t read_long; + + uint32_t mode_16bit; +}; + +static const VMStateDescription vmstate_lan9118 = { + .name = "lan9118", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PTIMER(timer, lan9118_state), + VMSTATE_UINT32(irq_cfg, lan9118_state), + VMSTATE_UINT32(int_sts, lan9118_state), + VMSTATE_UINT32(int_en, lan9118_state), + VMSTATE_UINT32(fifo_int, lan9118_state), + VMSTATE_UINT32(rx_cfg, lan9118_state), + VMSTATE_UINT32(tx_cfg, lan9118_state), + VMSTATE_UINT32(hw_cfg, lan9118_state), + VMSTATE_UINT32(pmt_ctrl, lan9118_state), + VMSTATE_UINT32(gpio_cfg, lan9118_state), + VMSTATE_UINT32(gpt_cfg, lan9118_state), + VMSTATE_UINT32(word_swap, lan9118_state), + VMSTATE_UINT32(free_timer_start, lan9118_state), + VMSTATE_UINT32(mac_cmd, lan9118_state), + VMSTATE_UINT32(mac_data, lan9118_state), + VMSTATE_UINT32(afc_cfg, lan9118_state), + VMSTATE_UINT32(e2p_cmd, lan9118_state), + VMSTATE_UINT32(e2p_data, lan9118_state), + VMSTATE_UINT32(mac_cr, lan9118_state), + VMSTATE_UINT32(mac_hashh, lan9118_state), + VMSTATE_UINT32(mac_hashl, lan9118_state), + VMSTATE_UINT32(mac_mii_acc, lan9118_state), + VMSTATE_UINT32(mac_mii_data, lan9118_state), + VMSTATE_UINT32(mac_flow, lan9118_state), + VMSTATE_UINT32(phy_status, lan9118_state), + VMSTATE_UINT32(phy_control, lan9118_state), + VMSTATE_UINT32(phy_advertise, lan9118_state), + VMSTATE_UINT32(phy_int, lan9118_state), + VMSTATE_UINT32(phy_int_mask, lan9118_state), + VMSTATE_INT32(eeprom_writable, lan9118_state), + VMSTATE_UINT8_ARRAY(eeprom, lan9118_state, 128), + VMSTATE_INT32(tx_fifo_size, lan9118_state), + /* txp always points at tx_packet so need not be saved */ + VMSTATE_STRUCT(tx_packet, lan9118_state, 0, + vmstate_lan9118_packet, LAN9118Packet), + VMSTATE_INT32(tx_status_fifo_used, lan9118_state), + VMSTATE_INT32(tx_status_fifo_head, lan9118_state), + VMSTATE_UINT32_ARRAY(tx_status_fifo, lan9118_state, 512), + VMSTATE_INT32(rx_status_fifo_size, lan9118_state), + VMSTATE_INT32(rx_status_fifo_used, lan9118_state), + VMSTATE_INT32(rx_status_fifo_head, lan9118_state), + VMSTATE_UINT32_ARRAY(rx_status_fifo, lan9118_state, 896), + VMSTATE_INT32(rx_fifo_size, lan9118_state), + VMSTATE_INT32(rx_fifo_used, lan9118_state), + VMSTATE_INT32(rx_fifo_head, lan9118_state), + VMSTATE_UINT32_ARRAY(rx_fifo, lan9118_state, 3360), + VMSTATE_INT32(rx_packet_size_head, lan9118_state), + VMSTATE_INT32(rx_packet_size_tail, lan9118_state), + VMSTATE_INT32_ARRAY(rx_packet_size, lan9118_state, 1024), + VMSTATE_INT32(rxp_offset, lan9118_state), + VMSTATE_INT32(rxp_size, lan9118_state), + VMSTATE_INT32(rxp_pad, lan9118_state), + VMSTATE_UINT32_V(write_word_prev_offset, lan9118_state, 2), + VMSTATE_UINT32_V(write_word_n, lan9118_state, 2), + VMSTATE_UINT16_V(write_word_l, lan9118_state, 2), + VMSTATE_UINT16_V(write_word_h, lan9118_state, 2), + VMSTATE_UINT32_V(read_word_prev_offset, lan9118_state, 2), + VMSTATE_UINT32_V(read_word_n, lan9118_state, 2), + VMSTATE_UINT32_V(read_long, lan9118_state, 2), + VMSTATE_UINT32_V(mode_16bit, lan9118_state, 2), + VMSTATE_END_OF_LIST() + } +}; + +static void lan9118_update(lan9118_state *s) +{ + int level; + + /* TODO: Implement FIFO level IRQs. */ + level = (s->int_sts & s->int_en) != 0; + if (level) { + s->irq_cfg |= IRQ_INT; + } else { + s->irq_cfg &= ~IRQ_INT; + } + if ((s->irq_cfg & IRQ_EN) == 0) { + level = 0; + } + if ((s->irq_cfg & (IRQ_TYPE | IRQ_POL)) != (IRQ_TYPE | IRQ_POL)) { + /* Interrupt is active low unless we're configured as + * active-high polarity, push-pull type. + */ + level = !level; + } + qemu_set_irq(s->irq, level); +} + +static void lan9118_mac_changed(lan9118_state *s) +{ + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static void lan9118_reload_eeprom(lan9118_state *s) +{ + int i; + if (s->eeprom[0] != 0xa5) { + s->e2p_cmd &= ~E2P_CMD_MAC_ADDR_LOADED; + DPRINTF("MACADDR load failed\n"); + return; + } + for (i = 0; i < 6; i++) { + s->conf.macaddr.a[i] = s->eeprom[i + 1]; + } + s->e2p_cmd |= E2P_CMD_MAC_ADDR_LOADED; + DPRINTF("MACADDR loaded from eeprom\n"); + lan9118_mac_changed(s); +} + +static void phy_update_irq(lan9118_state *s) +{ + if (s->phy_int & s->phy_int_mask) { + s->int_sts |= PHY_INT; + } else { + s->int_sts &= ~PHY_INT; + } + lan9118_update(s); +} + +static void phy_update_link(lan9118_state *s) +{ + /* Autonegotiation status mirrors link status. */ + if (qemu_get_queue(s->nic)->link_down) { + s->phy_status &= ~0x0024; + s->phy_int |= PHY_INT_DOWN; + } else { + s->phy_status |= 0x0024; + s->phy_int |= PHY_INT_ENERGYON; + s->phy_int |= PHY_INT_AUTONEG_COMPLETE; + } + phy_update_irq(s); +} + +static void lan9118_set_link(NetClientState *nc) +{ + phy_update_link(qemu_get_nic_opaque(nc)); +} + +static void phy_reset(lan9118_state *s) +{ + s->phy_status = 0x7809; + s->phy_control = 0x3000; + s->phy_advertise = 0x01e1; + s->phy_int_mask = 0; + s->phy_int = 0; + phy_update_link(s); +} + +static void lan9118_reset(DeviceState *d) +{ + lan9118_state *s = LAN9118(d); + + s->irq_cfg &= (IRQ_TYPE | IRQ_POL); + s->int_sts = 0; + s->int_en = 0; + s->fifo_int = 0x48000000; + s->rx_cfg = 0; + s->tx_cfg = 0; + s->hw_cfg = s->mode_16bit ? 0x00050000 : 0x00050004; + s->pmt_ctrl &= 0x45; + s->gpio_cfg = 0; + s->txp->fifo_used = 0; + s->txp->state = TX_IDLE; + s->txp->cmd_a = 0xffffffffu; + s->txp->cmd_b = 0xffffffffu; + s->txp->len = 0; + s->txp->fifo_used = 0; + s->tx_fifo_size = 4608; + s->tx_status_fifo_used = 0; + s->rx_status_fifo_size = 704; + s->rx_fifo_size = 2640; + s->rx_fifo_used = 0; + s->rx_status_fifo_size = 176; + s->rx_status_fifo_used = 0; + s->rxp_offset = 0; + s->rxp_size = 0; + s->rxp_pad = 0; + s->rx_packet_size_tail = s->rx_packet_size_head; + s->rx_packet_size[s->rx_packet_size_head] = 0; + s->mac_cmd = 0; + s->mac_data = 0; + s->afc_cfg = 0; + s->e2p_cmd = 0; + s->e2p_data = 0; + s->free_timer_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / 40; + + ptimer_transaction_begin(s->timer); + ptimer_stop(s->timer); + ptimer_set_count(s->timer, 0xffff); + ptimer_transaction_commit(s->timer); + s->gpt_cfg = 0xffff; + + s->mac_cr = MAC_CR_PRMS; + s->mac_hashh = 0; + s->mac_hashl = 0; + s->mac_mii_acc = 0; + s->mac_mii_data = 0; + s->mac_flow = 0; + + s->read_word_n = 0; + s->write_word_n = 0; + + phy_reset(s); + + s->eeprom_writable = 0; + lan9118_reload_eeprom(s); +} + +static void rx_fifo_push(lan9118_state *s, uint32_t val) +{ + int fifo_pos; + fifo_pos = s->rx_fifo_head + s->rx_fifo_used; + if (fifo_pos >= s->rx_fifo_size) + fifo_pos -= s->rx_fifo_size; + s->rx_fifo[fifo_pos] = val; + s->rx_fifo_used++; +} + +/* Return nonzero if the packet is accepted by the filter. */ +static int lan9118_filter(lan9118_state *s, const uint8_t *addr) +{ + int multicast; + uint32_t hash; + + if (s->mac_cr & MAC_CR_PRMS) { + return 1; + } + if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff && + addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) { + return (s->mac_cr & MAC_CR_BCAST) == 0; + } + + multicast = addr[0] & 1; + if (multicast &&s->mac_cr & MAC_CR_MCPAS) { + return 1; + } + if (multicast ? (s->mac_cr & MAC_CR_HPFILT) == 0 + : (s->mac_cr & MAC_CR_HO) == 0) { + /* Exact matching. */ + hash = memcmp(addr, s->conf.macaddr.a, 6); + if (s->mac_cr & MAC_CR_INVFILT) { + return hash != 0; + } else { + return hash == 0; + } + } else { + /* Hash matching */ + hash = net_crc32(addr, ETH_ALEN) >> 26; + if (hash & 0x20) { + return (s->mac_hashh >> (hash & 0x1f)) & 1; + } else { + return (s->mac_hashl >> (hash & 0x1f)) & 1; + } + } +} + +static ssize_t lan9118_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + lan9118_state *s = qemu_get_nic_opaque(nc); + int fifo_len; + int offset; + int src_pos; + int n; + int filter; + uint32_t val; + uint32_t crc; + uint32_t status; + + if ((s->mac_cr & MAC_CR_RXEN) == 0) { + return -1; + } + + if (size >= 2048 || size < 14) { + return -1; + } + + /* TODO: Implement FIFO overflow notification. */ + if (s->rx_status_fifo_used == s->rx_status_fifo_size) { + return -1; + } + + filter = lan9118_filter(s, buf); + if (!filter && (s->mac_cr & MAC_CR_RXALL) == 0) { + return size; + } + + offset = (s->rx_cfg >> 8) & 0x1f; + n = offset & 3; + fifo_len = (size + n + 3) >> 2; + /* Add a word for the CRC. */ + fifo_len++; + if (s->rx_fifo_size - s->rx_fifo_used < fifo_len) { + return -1; + } + + DPRINTF("Got packet len:%d fifo:%d filter:%s\n", + (int)size, fifo_len, filter ? "pass" : "fail"); + val = 0; + crc = bswap32(crc32(~0, buf, size)); + for (src_pos = 0; src_pos < size; src_pos++) { + val = (val >> 8) | ((uint32_t)buf[src_pos] << 24); + n++; + if (n == 4) { + n = 0; + rx_fifo_push(s, val); + val = 0; + } + } + if (n) { + val >>= ((4 - n) * 8); + val |= crc << (n * 8); + rx_fifo_push(s, val); + val = crc >> ((4 - n) * 8); + rx_fifo_push(s, val); + } else { + rx_fifo_push(s, crc); + } + n = s->rx_status_fifo_head + s->rx_status_fifo_used; + if (n >= s->rx_status_fifo_size) { + n -= s->rx_status_fifo_size; + } + s->rx_packet_size[s->rx_packet_size_tail] = fifo_len; + s->rx_packet_size_tail = (s->rx_packet_size_tail + 1023) & 1023; + s->rx_status_fifo_used++; + + status = (size + 4) << 16; + if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff && + buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) { + status |= 0x00002000; + } else if (buf[0] & 1) { + status |= 0x00000400; + } + if (!filter) { + status |= 0x40000000; + } + s->rx_status_fifo[n] = status; + + if (s->rx_status_fifo_used > (s->fifo_int & 0xff)) { + s->int_sts |= RSFL_INT; + } + lan9118_update(s); + + return size; +} + +static uint32_t rx_fifo_pop(lan9118_state *s) +{ + int n; + uint32_t val; + + if (s->rxp_size == 0 && s->rxp_pad == 0) { + s->rxp_size = s->rx_packet_size[s->rx_packet_size_head]; + s->rx_packet_size[s->rx_packet_size_head] = 0; + if (s->rxp_size != 0) { + s->rx_packet_size_head = (s->rx_packet_size_head + 1023) & 1023; + s->rxp_offset = (s->rx_cfg >> 10) & 7; + n = s->rxp_offset + s->rxp_size; + switch (s->rx_cfg >> 30) { + case 1: + n = (-n) & 3; + break; + case 2: + n = (-n) & 7; + break; + default: + n = 0; + break; + } + s->rxp_pad = n; + DPRINTF("Pop packet size:%d offset:%d pad: %d\n", + s->rxp_size, s->rxp_offset, s->rxp_pad); + } + } + if (s->rxp_offset > 0) { + s->rxp_offset--; + val = 0; + } else if (s->rxp_size > 0) { + s->rxp_size--; + val = s->rx_fifo[s->rx_fifo_head++]; + if (s->rx_fifo_head >= s->rx_fifo_size) { + s->rx_fifo_head -= s->rx_fifo_size; + } + s->rx_fifo_used--; + } else if (s->rxp_pad > 0) { + s->rxp_pad--; + val = 0; + } else { + DPRINTF("RX underflow\n"); + s->int_sts |= RXE_INT; + val = 0; + } + lan9118_update(s); + return val; +} + +static void do_tx_packet(lan9118_state *s) +{ + int n; + uint32_t status; + + /* FIXME: Honor TX disable, and allow queueing of packets. */ + if (s->phy_control & 0x4000) { + /* This assumes the receive routine doesn't touch the VLANClient. */ + qemu_receive_packet(qemu_get_queue(s->nic), s->txp->data, s->txp->len); + } else { + qemu_send_packet(qemu_get_queue(s->nic), s->txp->data, s->txp->len); + } + s->txp->fifo_used = 0; + + if (s->tx_status_fifo_used == 512) { + /* Status FIFO full */ + return; + } + /* Add entry to status FIFO. */ + status = s->txp->cmd_b & 0xffff0000u; + DPRINTF("Sent packet tag:%04x len %d\n", status >> 16, s->txp->len); + n = (s->tx_status_fifo_head + s->tx_status_fifo_used) & 511; + s->tx_status_fifo[n] = status; + s->tx_status_fifo_used++; + if (s->tx_status_fifo_used == 512) { + s->int_sts |= TSFF_INT; + /* TODO: Stop transmission. */ + } +} + +static uint32_t rx_status_fifo_pop(lan9118_state *s) +{ + uint32_t val; + + val = s->rx_status_fifo[s->rx_status_fifo_head]; + if (s->rx_status_fifo_used != 0) { + s->rx_status_fifo_used--; + s->rx_status_fifo_head++; + if (s->rx_status_fifo_head >= s->rx_status_fifo_size) { + s->rx_status_fifo_head -= s->rx_status_fifo_size; + } + /* ??? What value should be returned when the FIFO is empty? */ + DPRINTF("RX status pop 0x%08x\n", val); + } + return val; +} + +static uint32_t tx_status_fifo_pop(lan9118_state *s) +{ + uint32_t val; + + val = s->tx_status_fifo[s->tx_status_fifo_head]; + if (s->tx_status_fifo_used != 0) { + s->tx_status_fifo_used--; + s->tx_status_fifo_head = (s->tx_status_fifo_head + 1) & 511; + /* ??? What value should be returned when the FIFO is empty? */ + } + return val; +} + +static void tx_fifo_push(lan9118_state *s, uint32_t val) +{ + int n; + + if (s->txp->fifo_used == s->tx_fifo_size) { + s->int_sts |= TDFO_INT; + return; + } + switch (s->txp->state) { + case TX_IDLE: + s->txp->cmd_a = val & 0x831f37ff; + s->txp->fifo_used++; + s->txp->state = TX_B; + s->txp->buffer_size = extract32(s->txp->cmd_a, 0, 11); + s->txp->offset = extract32(s->txp->cmd_a, 16, 5); + break; + case TX_B: + if (s->txp->cmd_a & 0x2000) { + /* First segment */ + s->txp->cmd_b = val; + s->txp->fifo_used++; + /* End alignment does not include command words. */ + n = (s->txp->buffer_size + s->txp->offset + 3) >> 2; + switch ((n >> 24) & 3) { + case 1: + n = (-n) & 3; + break; + case 2: + n = (-n) & 7; + break; + default: + n = 0; + } + s->txp->pad = n; + s->txp->len = 0; + } + DPRINTF("Block len:%d offset:%d pad:%d cmd %08x\n", + s->txp->buffer_size, s->txp->offset, s->txp->pad, + s->txp->cmd_a); + s->txp->state = TX_DATA; + break; + case TX_DATA: + if (s->txp->offset >= 4) { + s->txp->offset -= 4; + break; + } + if (s->txp->buffer_size <= 0 && s->txp->pad != 0) { + s->txp->pad--; + } else { + n = MIN(4, s->txp->buffer_size + s->txp->offset); + while (s->txp->offset) { + val >>= 8; + n--; + s->txp->offset--; + } + /* Documentation is somewhat unclear on the ordering of bytes + in FIFO words. Empirical results show it to be little-endian. + */ + /* TODO: FIFO overflow checking. */ + while (n--) { + s->txp->data[s->txp->len] = val & 0xff; + s->txp->len++; + val >>= 8; + s->txp->buffer_size--; + } + s->txp->fifo_used++; + } + if (s->txp->buffer_size <= 0 && s->txp->pad == 0) { + if (s->txp->cmd_a & 0x1000) { + do_tx_packet(s); + } + if (s->txp->cmd_a & 0x80000000) { + s->int_sts |= TX_IOC_INT; + } + s->txp->state = TX_IDLE; + } + break; + } +} + +static uint32_t do_phy_read(lan9118_state *s, int reg) +{ + uint32_t val; + + switch (reg) { + case 0: /* Basic Control */ + return s->phy_control; + case 1: /* Basic Status */ + return s->phy_status; + case 2: /* ID1 */ + return 0x0007; + case 3: /* ID2 */ + return 0xc0d1; + case 4: /* Auto-neg advertisement */ + return s->phy_advertise; + case 5: /* Auto-neg Link Partner Ability */ + return 0x0f71; + case 6: /* Auto-neg Expansion */ + return 1; + /* TODO 17, 18, 27, 29, 30, 31 */ + case 29: /* Interrupt source. */ + val = s->phy_int; + s->phy_int = 0; + phy_update_irq(s); + return val; + case 30: /* Interrupt mask */ + return s->phy_int_mask; + default: + BADF("PHY read reg %d\n", reg); + return 0; + } +} + +static void do_phy_write(lan9118_state *s, int reg, uint32_t val) +{ + switch (reg) { + case 0: /* Basic Control */ + if (val & 0x8000) { + phy_reset(s); + break; + } + s->phy_control = val & 0x7980; + /* Complete autonegotiation immediately. */ + if (val & 0x1000) { + s->phy_status |= 0x0020; + } + break; + case 4: /* Auto-neg advertisement */ + s->phy_advertise = (val & 0x2d7f) | 0x80; + break; + /* TODO 17, 18, 27, 31 */ + case 30: /* Interrupt mask */ + s->phy_int_mask = val & 0xff; + phy_update_irq(s); + break; + default: + BADF("PHY write reg %d = 0x%04x\n", reg, val); + } +} + +static void do_mac_write(lan9118_state *s, int reg, uint32_t val) +{ + switch (reg) { + case MAC_CR: + if ((s->mac_cr & MAC_CR_RXEN) != 0 && (val & MAC_CR_RXEN) == 0) { + s->int_sts |= RXSTOP_INT; + } + s->mac_cr = val & ~MAC_CR_RESERVED; + DPRINTF("MAC_CR: %08x\n", val); + break; + case MAC_ADDRH: + s->conf.macaddr.a[4] = val & 0xff; + s->conf.macaddr.a[5] = (val >> 8) & 0xff; + lan9118_mac_changed(s); + break; + case MAC_ADDRL: + s->conf.macaddr.a[0] = val & 0xff; + s->conf.macaddr.a[1] = (val >> 8) & 0xff; + s->conf.macaddr.a[2] = (val >> 16) & 0xff; + s->conf.macaddr.a[3] = (val >> 24) & 0xff; + lan9118_mac_changed(s); + break; + case MAC_HASHH: + s->mac_hashh = val; + break; + case MAC_HASHL: + s->mac_hashl = val; + break; + case MAC_MII_ACC: + s->mac_mii_acc = val & 0xffc2; + if (val & 2) { + DPRINTF("PHY write %d = 0x%04x\n", + (val >> 6) & 0x1f, s->mac_mii_data); + do_phy_write(s, (val >> 6) & 0x1f, s->mac_mii_data); + } else { + s->mac_mii_data = do_phy_read(s, (val >> 6) & 0x1f); + DPRINTF("PHY read %d = 0x%04x\n", + (val >> 6) & 0x1f, s->mac_mii_data); + } + break; + case MAC_MII_DATA: + s->mac_mii_data = val & 0xffff; + break; + case MAC_FLOW: + s->mac_flow = val & 0xffff0000; + break; + case MAC_VLAN1: + /* Writing to this register changes a condition for + * FrameTooLong bit in rx_status. Since we do not set + * FrameTooLong anyway, just ignore write to this. + */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "lan9118: Unimplemented MAC register write: %d = 0x%x\n", + s->mac_cmd & 0xf, val); + } +} + +static uint32_t do_mac_read(lan9118_state *s, int reg) +{ + switch (reg) { + case MAC_CR: + return s->mac_cr; + case MAC_ADDRH: + return s->conf.macaddr.a[4] | (s->conf.macaddr.a[5] << 8); + case MAC_ADDRL: + return s->conf.macaddr.a[0] | (s->conf.macaddr.a[1] << 8) + | (s->conf.macaddr.a[2] << 16) | (s->conf.macaddr.a[3] << 24); + case MAC_HASHH: + return s->mac_hashh; + case MAC_HASHL: + return s->mac_hashl; + case MAC_MII_ACC: + return s->mac_mii_acc; + case MAC_MII_DATA: + return s->mac_mii_data; + case MAC_FLOW: + return s->mac_flow; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "lan9118: Unimplemented MAC register read: %d\n", + s->mac_cmd & 0xf); + return 0; + } +} + +static void lan9118_eeprom_cmd(lan9118_state *s, int cmd, int addr) +{ + s->e2p_cmd = (s->e2p_cmd & E2P_CMD_MAC_ADDR_LOADED) | (cmd << 28) | addr; + switch (cmd) { + case 0: + s->e2p_data = s->eeprom[addr]; + DPRINTF("EEPROM Read %d = 0x%02x\n", addr, s->e2p_data); + break; + case 1: + s->eeprom_writable = 0; + DPRINTF("EEPROM Write Disable\n"); + break; + case 2: /* EWEN */ + s->eeprom_writable = 1; + DPRINTF("EEPROM Write Enable\n"); + break; + case 3: /* WRITE */ + if (s->eeprom_writable) { + s->eeprom[addr] &= s->e2p_data; + DPRINTF("EEPROM Write %d = 0x%02x\n", addr, s->e2p_data); + } else { + DPRINTF("EEPROM Write %d (ignored)\n", addr); + } + break; + case 4: /* WRAL */ + if (s->eeprom_writable) { + for (addr = 0; addr < 128; addr++) { + s->eeprom[addr] &= s->e2p_data; + } + DPRINTF("EEPROM Write All 0x%02x\n", s->e2p_data); + } else { + DPRINTF("EEPROM Write All (ignored)\n"); + } + break; + case 5: /* ERASE */ + if (s->eeprom_writable) { + s->eeprom[addr] = 0xff; + DPRINTF("EEPROM Erase %d\n", addr); + } else { + DPRINTF("EEPROM Erase %d (ignored)\n", addr); + } + break; + case 6: /* ERAL */ + if (s->eeprom_writable) { + memset(s->eeprom, 0xff, 128); + DPRINTF("EEPROM Erase All\n"); + } else { + DPRINTF("EEPROM Erase All (ignored)\n"); + } + break; + case 7: /* RELOAD */ + lan9118_reload_eeprom(s); + break; + } +} + +static void lan9118_tick(void *opaque) +{ + lan9118_state *s = (lan9118_state *)opaque; + if (s->int_en & GPT_INT) { + s->int_sts |= GPT_INT; + } + lan9118_update(s); +} + +static void lan9118_writel(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + lan9118_state *s = (lan9118_state *)opaque; + offset &= 0xff; + + //DPRINTF("Write reg 0x%02x = 0x%08x\n", (int)offset, val); + if (offset >= TX_DATA_FIFO_PORT_FIRST && + offset <= TX_DATA_FIFO_PORT_LAST) { + /* TX FIFO */ + tx_fifo_push(s, val); + return; + } + switch (offset) { + case CSR_IRQ_CFG: + /* TODO: Implement interrupt deassertion intervals. */ + val &= (IRQ_EN | IRQ_POL | IRQ_TYPE); + s->irq_cfg = (s->irq_cfg & IRQ_INT) | val; + break; + case CSR_INT_STS: + s->int_sts &= ~val; + break; + case CSR_INT_EN: + s->int_en = val & ~RESERVED_INT; + s->int_sts |= val & SW_INT; + break; + case CSR_FIFO_INT: + DPRINTF("FIFO INT levels %08x\n", val); + s->fifo_int = val; + break; + case CSR_RX_CFG: + if (val & 0x8000) { + /* RX_DUMP */ + s->rx_fifo_used = 0; + s->rx_status_fifo_used = 0; + s->rx_packet_size_tail = s->rx_packet_size_head; + s->rx_packet_size[s->rx_packet_size_head] = 0; + } + s->rx_cfg = val & 0xcfff1ff0; + break; + case CSR_TX_CFG: + if (val & 0x8000) { + s->tx_status_fifo_used = 0; + } + if (val & 0x4000) { + s->txp->state = TX_IDLE; + s->txp->fifo_used = 0; + s->txp->cmd_a = 0xffffffff; + } + s->tx_cfg = val & 6; + break; + case CSR_HW_CFG: + if (val & 1) { + /* SRST */ + lan9118_reset(DEVICE(s)); + } else { + s->hw_cfg = (val & 0x003f300) | (s->hw_cfg & 0x4); + } + break; + case CSR_RX_DP_CTRL: + if (val & 0x80000000) { + /* Skip forward to next packet. */ + s->rxp_pad = 0; + s->rxp_offset = 0; + if (s->rxp_size == 0) { + /* Pop a word to start the next packet. */ + rx_fifo_pop(s); + s->rxp_pad = 0; + s->rxp_offset = 0; + } + s->rx_fifo_head += s->rxp_size; + if (s->rx_fifo_head >= s->rx_fifo_size) { + s->rx_fifo_head -= s->rx_fifo_size; + } + } + break; + case CSR_PMT_CTRL: + if (val & 0x400) { + phy_reset(s); + } + s->pmt_ctrl &= ~0x34e; + s->pmt_ctrl |= (val & 0x34e); + break; + case CSR_GPIO_CFG: + /* Probably just enabling LEDs. */ + s->gpio_cfg = val & 0x7777071f; + break; + case CSR_GPT_CFG: + if ((s->gpt_cfg ^ val) & GPT_TIMER_EN) { + ptimer_transaction_begin(s->timer); + if (val & GPT_TIMER_EN) { + ptimer_set_count(s->timer, val & 0xffff); + ptimer_run(s->timer, 0); + } else { + ptimer_stop(s->timer); + ptimer_set_count(s->timer, 0xffff); + } + ptimer_transaction_commit(s->timer); + } + s->gpt_cfg = val & (GPT_TIMER_EN | 0xffff); + break; + case CSR_WORD_SWAP: + /* Ignored because we're in 32-bit mode. */ + s->word_swap = val; + break; + case CSR_MAC_CSR_CMD: + s->mac_cmd = val & 0x4000000f; + if (val & 0x80000000) { + if (val & 0x40000000) { + s->mac_data = do_mac_read(s, val & 0xf); + DPRINTF("MAC read %d = 0x%08x\n", val & 0xf, s->mac_data); + } else { + DPRINTF("MAC write %d = 0x%08x\n", val & 0xf, s->mac_data); + do_mac_write(s, val & 0xf, s->mac_data); + } + } + break; + case CSR_MAC_CSR_DATA: + s->mac_data = val; + break; + case CSR_AFC_CFG: + s->afc_cfg = val & 0x00ffffff; + break; + case CSR_E2P_CMD: + lan9118_eeprom_cmd(s, (val >> 28) & 7, val & 0x7f); + break; + case CSR_E2P_DATA: + s->e2p_data = val & 0xff; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "lan9118_write: Bad reg 0x%x = %x\n", + (int)offset, (int)val); + break; + } + lan9118_update(s); +} + +static void lan9118_writew(void *opaque, hwaddr offset, + uint32_t val) +{ + lan9118_state *s = (lan9118_state *)opaque; + offset &= 0xff; + + if (s->write_word_prev_offset != (offset & ~0x3)) { + /* New offset, reset word counter */ + s->write_word_n = 0; + s->write_word_prev_offset = offset & ~0x3; + } + + if (offset & 0x2) { + s->write_word_h = val; + } else { + s->write_word_l = val; + } + + //DPRINTF("Writew reg 0x%02x = 0x%08x\n", (int)offset, val); + s->write_word_n++; + if (s->write_word_n == 2) { + s->write_word_n = 0; + lan9118_writel(s, offset & ~3, s->write_word_l + + (s->write_word_h << 16), 4); + } +} + +static void lan9118_16bit_mode_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + switch (size) { + case 2: + lan9118_writew(opaque, offset, (uint32_t)val); + return; + case 4: + lan9118_writel(opaque, offset, val, size); + return; + } + + hw_error("lan9118_write: Bad size 0x%x\n", size); +} + +static uint64_t lan9118_readl(void *opaque, hwaddr offset, + unsigned size) +{ + lan9118_state *s = (lan9118_state *)opaque; + + //DPRINTF("Read reg 0x%02x\n", (int)offset); + if (offset <= RX_DATA_FIFO_PORT_LAST) { + /* RX FIFO */ + return rx_fifo_pop(s); + } + switch (offset) { + case RX_STATUS_FIFO_PORT: + return rx_status_fifo_pop(s); + case RX_STATUS_FIFO_PEEK: + return s->rx_status_fifo[s->rx_status_fifo_head]; + case TX_STATUS_FIFO_PORT: + return tx_status_fifo_pop(s); + case TX_STATUS_FIFO_PEEK: + return s->tx_status_fifo[s->tx_status_fifo_head]; + case CSR_ID_REV: + return 0x01180001; + case CSR_IRQ_CFG: + return s->irq_cfg; + case CSR_INT_STS: + return s->int_sts; + case CSR_INT_EN: + return s->int_en; + case CSR_BYTE_TEST: + return 0x87654321; + case CSR_FIFO_INT: + return s->fifo_int; + case CSR_RX_CFG: + return s->rx_cfg; + case CSR_TX_CFG: + return s->tx_cfg; + case CSR_HW_CFG: + return s->hw_cfg; + case CSR_RX_DP_CTRL: + return 0; + case CSR_RX_FIFO_INF: + return (s->rx_status_fifo_used << 16) | (s->rx_fifo_used << 2); + case CSR_TX_FIFO_INF: + return (s->tx_status_fifo_used << 16) + | (s->tx_fifo_size - s->txp->fifo_used); + case CSR_PMT_CTRL: + return s->pmt_ctrl; + case CSR_GPIO_CFG: + return s->gpio_cfg; + case CSR_GPT_CFG: + return s->gpt_cfg; + case CSR_GPT_CNT: + return ptimer_get_count(s->timer); + case CSR_WORD_SWAP: + return s->word_swap; + case CSR_FREE_RUN: + return (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / 40) - s->free_timer_start; + case CSR_RX_DROP: + /* TODO: Implement dropped frames counter. */ + return 0; + case CSR_MAC_CSR_CMD: + return s->mac_cmd; + case CSR_MAC_CSR_DATA: + return s->mac_data; + case CSR_AFC_CFG: + return s->afc_cfg; + case CSR_E2P_CMD: + return s->e2p_cmd; + case CSR_E2P_DATA: + return s->e2p_data; + } + qemu_log_mask(LOG_GUEST_ERROR, "lan9118_read: Bad reg 0x%x\n", (int)offset); + return 0; +} + +static uint32_t lan9118_readw(void *opaque, hwaddr offset) +{ + lan9118_state *s = (lan9118_state *)opaque; + uint32_t val; + + if (s->read_word_prev_offset != (offset & ~0x3)) { + /* New offset, reset word counter */ + s->read_word_n = 0; + s->read_word_prev_offset = offset & ~0x3; + } + + s->read_word_n++; + if (s->read_word_n == 1) { + s->read_long = lan9118_readl(s, offset & ~3, 4); + } else { + s->read_word_n = 0; + } + + if (offset & 2) { + val = s->read_long >> 16; + } else { + val = s->read_long & 0xFFFF; + } + + //DPRINTF("Readw reg 0x%02x, val 0x%x\n", (int)offset, val); + return val; +} + +static uint64_t lan9118_16bit_mode_read(void *opaque, hwaddr offset, + unsigned size) +{ + switch (size) { + case 2: + return lan9118_readw(opaque, offset); + case 4: + return lan9118_readl(opaque, offset, size); + } + + hw_error("lan9118_read: Bad size 0x%x\n", size); + return 0; +} + +static const MemoryRegionOps lan9118_mem_ops = { + .read = lan9118_readl, + .write = lan9118_writel, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const MemoryRegionOps lan9118_16bit_mem_ops = { + .read = lan9118_16bit_mode_read, + .write = lan9118_16bit_mode_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static NetClientInfo net_lan9118_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = lan9118_receive, + .link_status_changed = lan9118_set_link, +}; + +static void lan9118_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + lan9118_state *s = LAN9118(dev); + int i; + const MemoryRegionOps *mem_ops = + s->mode_16bit ? &lan9118_16bit_mem_ops : &lan9118_mem_ops; + + memory_region_init_io(&s->mmio, OBJECT(dev), mem_ops, s, + "lan9118-mmio", 0x100); + sysbus_init_mmio(sbd, &s->mmio); + sysbus_init_irq(sbd, &s->irq); + qemu_macaddr_default_if_unset(&s->conf.macaddr); + + s->nic = qemu_new_nic(&net_lan9118_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + s->eeprom[0] = 0xa5; + for (i = 0; i < 6; i++) { + s->eeprom[i + 1] = s->conf.macaddr.a[i]; + } + s->pmt_ctrl = 1; + s->txp = &s->tx_packet; + + s->timer = ptimer_init(lan9118_tick, s, PTIMER_POLICY_DEFAULT); + ptimer_transaction_begin(s->timer); + ptimer_set_freq(s->timer, 10000); + ptimer_set_limit(s->timer, 0xffff, 1); + ptimer_transaction_commit(s->timer); +} + +static Property lan9118_properties[] = { + DEFINE_NIC_PROPERTIES(lan9118_state, conf), + DEFINE_PROP_UINT32("mode_16bit", lan9118_state, mode_16bit, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void lan9118_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = lan9118_reset; + device_class_set_props(dc, lan9118_properties); + dc->vmsd = &vmstate_lan9118; + dc->realize = lan9118_realize; +} + +static const TypeInfo lan9118_info = { + .name = TYPE_LAN9118, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(lan9118_state), + .class_init = lan9118_class_init, +}; + +static void lan9118_register_types(void) +{ + type_register_static(&lan9118_info); +} + +/* Legacy helper function. Should go away when machine config files are + implemented. */ +void lan9118_init(NICInfo *nd, uint32_t base, qemu_irq irq) +{ + DeviceState *dev; + SysBusDevice *s; + + qemu_check_nic_model(nd, "lan9118"); + dev = qdev_new(TYPE_LAN9118); + qdev_set_nic_properties(dev, nd); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, base); + sysbus_connect_irq(s, 0, irq); +} + +type_init(lan9118_register_types) diff --git a/hw/net/lance.c b/hw/net/lance.c new file mode 100644 index 000000000..4c5f01baa --- /dev/null +++ b/hw/net/lance.c @@ -0,0 +1,172 @@ +/* + * QEMU AMD PC-Net II (Am79C970A) emulation + * + * Copyright (c) 2004 Antony T Curtis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* This software was written to be compatible with the specification: + * AMD Am79C970A PCnet-PCI II Ethernet Controller Data-Sheet + * AMD Publication# 19436 Rev:E Amendment/0 Issue Date: June 2000 + */ + +/* + * On Sparc32, this is the Lance (Am7990) part of chip STP2000 (Master I/O), also + * produced as NCR89C100. See + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt + * and + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR92C990.txt + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/sparc/sparc32_dma.h" +#include "migration/vmstate.h" +#include "hw/net/lance.h" +#include "hw/qdev-properties.h" +#include "trace.h" +#include "sysemu/sysemu.h" + + +static void parent_lance_reset(void *opaque, int irq, int level) +{ + SysBusPCNetState *d = opaque; + if (level) + pcnet_h_reset(&d->state); +} + +static void lance_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SysBusPCNetState *d = opaque; + + trace_lance_mem_writew(addr, val & 0xffff); + pcnet_ioport_writew(&d->state, addr, val & 0xffff); +} + +static uint64_t lance_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + SysBusPCNetState *d = opaque; + uint32_t val; + + val = pcnet_ioport_readw(&d->state, addr); + trace_lance_mem_readw(addr, val & 0xffff); + return val & 0xffff; +} + +static const MemoryRegionOps lance_mem_ops = { + .read = lance_mem_read, + .write = lance_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 2, + .max_access_size = 2, + }, +}; + +static NetClientInfo net_lance_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = pcnet_receive, + .link_status_changed = pcnet_set_link_status, +}; + +static const VMStateDescription vmstate_lance = { + .name = "pcnet", + .version_id = 3, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(state, SysBusPCNetState, 0, vmstate_pcnet, PCNetState), + VMSTATE_END_OF_LIST() + } +}; + +static void lance_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + SysBusPCNetState *d = SYSBUS_PCNET(dev); + PCNetState *s = &d->state; + + memory_region_init_io(&s->mmio, OBJECT(d), &lance_mem_ops, d, + "lance-mmio", 4); + + qdev_init_gpio_in(dev, parent_lance_reset, 1); + + sysbus_init_mmio(sbd, &s->mmio); + + sysbus_init_irq(sbd, &s->irq); + + s->phys_mem_read = ledma_memory_read; + s->phys_mem_write = ledma_memory_write; + pcnet_common_init(dev, s, &net_lance_info); +} + +static void lance_reset(DeviceState *dev) +{ + SysBusPCNetState *d = SYSBUS_PCNET(dev); + + pcnet_h_reset(&d->state); +} + +static void lance_instance_init(Object *obj) +{ + SysBusPCNetState *d = SYSBUS_PCNET(obj); + PCNetState *s = &d->state; + + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(obj)); +} + +static Property lance_properties[] = { + DEFINE_PROP_LINK("dma", SysBusPCNetState, state.dma_opaque, + TYPE_DEVICE, DeviceState *), + DEFINE_NIC_PROPERTIES(SysBusPCNetState, state.conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void lance_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = lance_realize; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->fw_name = "ethernet"; + dc->reset = lance_reset; + dc->vmsd = &vmstate_lance; + device_class_set_props(dc, lance_properties); +} + +static const TypeInfo lance_info = { + .name = TYPE_LANCE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SysBusPCNetState), + .class_init = lance_class_init, + .instance_init = lance_instance_init, +}; + +static void lance_register_types(void) +{ + type_register_static(&lance_info); +} + +type_init(lance_register_types) diff --git a/hw/net/lasi_i82596.c b/hw/net/lasi_i82596.c new file mode 100644 index 000000000..e37f7fabe --- /dev/null +++ b/hw/net/lasi_i82596.c @@ -0,0 +1,189 @@ +/* + * QEMU LASI NIC i82596 emulation + * + * Copyright (c) 2019 Helge Deller + * This work is licensed under the GNU GPL license version 2 or later. + * + * + * On PA-RISC, this is the Network part of LASI chip. + * See: + * https://parisc.wiki.kernel.org/images-parisc/7/79/Lasi_ers.pdf + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "hw/sysbus.h" +#include "net/eth.h" +#include "hw/net/lasi_82596.h" +#include "hw/net/i82596.h" +#include "trace.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/ +#define PA_CPU_PORT_L_ACCESS 4 +#define PA_CHANNEL_ATTENTION 8 +#define PA_GET_MACADDR 12 + +#define SWAP32(x) (((uint32_t)(x) << 16) | ((((uint32_t)(x))) >> 16)) + +static void lasi_82596_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SysBusI82596State *d = opaque; + + trace_lasi_82596_mem_writew(addr, val); + switch (addr) { + case PA_I82596_RESET: + i82596_h_reset(&d->state); + break; + case PA_CPU_PORT_L_ACCESS: + d->val_index++; + if (d->val_index == 0) { + uint32_t v = d->last_val | (val << 16); + v = v & ~0xff; + i82596_ioport_writew(&d->state, d->last_val & 0xff, v); + } + d->last_val = val; + break; + case PA_CHANNEL_ATTENTION: + i82596_ioport_writew(&d->state, PORT_CA, val); + break; + case PA_GET_MACADDR: + /* + * Provided for SeaBIOS only. Write MAC of Network card to addr @val. + * Needed for the PDC_LAN_STATION_ID_READ PDC call. + */ + address_space_write(&address_space_memory, val, + MEMTXATTRS_UNSPECIFIED, d->state.conf.macaddr.a, + ETH_ALEN); + break; + } +} + +static uint64_t lasi_82596_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + SysBusI82596State *d = opaque; + uint32_t val; + + if (addr == PA_GET_MACADDR) { + val = 0xBEEFBABE; + } else { + val = i82596_ioport_readw(&d->state, addr); + } + trace_lasi_82596_mem_readw(addr, val); + return val; +} + +static const MemoryRegionOps lasi_82596_mem_ops = { + .read = lasi_82596_mem_read, + .write = lasi_82596_mem_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static NetClientInfo net_lasi_82596_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = i82596_can_receive, + .receive = i82596_receive, + .link_status_changed = i82596_set_link_status, +}; + +static const VMStateDescription vmstate_lasi_82596 = { + .name = "i82596", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(state, SysBusI82596State, 0, vmstate_i82596, + I82596State), + VMSTATE_END_OF_LIST() + } +}; + +static void lasi_82596_realize(DeviceState *dev, Error **errp) +{ + SysBusI82596State *d = SYSBUS_I82596(dev); + I82596State *s = &d->state; + + memory_region_init_io(&s->mmio, OBJECT(d), &lasi_82596_mem_ops, d, + "lasi_82596-mmio", PA_GET_MACADDR + 4); + + i82596_common_init(dev, s, &net_lasi_82596_info); +} + +SysBusI82596State *lasi_82596_init(MemoryRegion *addr_space, + hwaddr hpa, qemu_irq lan_irq) +{ + DeviceState *dev; + SysBusI82596State *s; + static const MACAddr HP_MAC = { + .a = { 0x08, 0x00, 0x09, 0xef, 0x34, 0xf6 } }; + + qemu_check_nic_model(&nd_table[0], TYPE_LASI_82596); + dev = qdev_new(TYPE_LASI_82596); + s = SYSBUS_I82596(dev); + s->state.irq = lan_irq; + qdev_set_nic_properties(dev, &nd_table[0]); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + s->state.conf.macaddr = HP_MAC; /* set HP MAC prefix */ + + /* LASI 82596 ports in main memory. */ + memory_region_add_subregion(addr_space, hpa, &s->state.mmio); + return s; +} + +static void lasi_82596_reset(DeviceState *dev) +{ + SysBusI82596State *d = SYSBUS_I82596(dev); + + i82596_h_reset(&d->state); +} + +static void lasi_82596_instance_init(Object *obj) +{ + SysBusI82596State *d = SYSBUS_I82596(obj); + I82596State *s = &d->state; + + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(obj)); +} + +static Property lasi_82596_properties[] = { + DEFINE_NIC_PROPERTIES(SysBusI82596State, state.conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void lasi_82596_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = lasi_82596_realize; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->fw_name = "ethernet"; + dc->reset = lasi_82596_reset; + dc->vmsd = &vmstate_lasi_82596; + dc->user_creatable = false; + device_class_set_props(dc, lasi_82596_properties); +} + +static const TypeInfo lasi_82596_info = { + .name = TYPE_LASI_82596, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SysBusI82596State), + .class_init = lasi_82596_class_init, + .instance_init = lasi_82596_instance_init, +}; + +static void lasi_82596_register_types(void) +{ + type_register_static(&lasi_82596_info); +} + +type_init(lasi_82596_register_types) diff --git a/hw/net/mcf_fec.c b/hw/net/mcf_fec.c new file mode 100644 index 000000000..25e3e453a --- /dev/null +++ b/hw/net/mcf_fec.c @@ -0,0 +1,692 @@ +/* + * ColdFire Fast Ethernet Controller emulation. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "net/net.h" +#include "qemu/module.h" +#include "hw/m68k/mcf.h" +#include "hw/m68k/mcf_fec.h" +#include "hw/net/mii.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +/* For crc32 */ +#include + +//#define DEBUG_FEC 1 + +#ifdef DEBUG_FEC +#define DPRINTF(fmt, ...) \ +do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while(0) +#endif + +#define FEC_MAX_DESC 1024 +#define FEC_MAX_FRAME_SIZE 2032 +#define FEC_MIB_SIZE 64 + +struct mcf_fec_state { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq[FEC_NUM_IRQ]; + NICState *nic; + NICConf conf; + uint32_t irq_state; + uint32_t eir; + uint32_t eimr; + int rx_enabled; + uint32_t rx_descriptor; + uint32_t tx_descriptor; + uint32_t ecr; + uint32_t mmfr; + uint32_t mscr; + uint32_t rcr; + uint32_t tcr; + uint32_t tfwr; + uint32_t rfsr; + uint32_t erdsr; + uint32_t etdsr; + uint32_t emrbr; + uint32_t mib[FEC_MIB_SIZE]; +}; + +#define FEC_INT_HB 0x80000000 +#define FEC_INT_BABR 0x40000000 +#define FEC_INT_BABT 0x20000000 +#define FEC_INT_GRA 0x10000000 +#define FEC_INT_TXF 0x08000000 +#define FEC_INT_TXB 0x04000000 +#define FEC_INT_RXF 0x02000000 +#define FEC_INT_RXB 0x01000000 +#define FEC_INT_MII 0x00800000 +#define FEC_INT_EB 0x00400000 +#define FEC_INT_LC 0x00200000 +#define FEC_INT_RL 0x00100000 +#define FEC_INT_UN 0x00080000 + +#define FEC_EN 2 +#define FEC_RESET 1 + +/* Map interrupt flags onto IRQ lines. */ +static const uint32_t mcf_fec_irq_map[FEC_NUM_IRQ] = { + FEC_INT_TXF, + FEC_INT_TXB, + FEC_INT_UN, + FEC_INT_RL, + FEC_INT_RXF, + FEC_INT_RXB, + FEC_INT_MII, + FEC_INT_LC, + FEC_INT_HB, + FEC_INT_GRA, + FEC_INT_EB, + FEC_INT_BABT, + FEC_INT_BABR +}; + +/* Buffer Descriptor. */ +typedef struct { + uint16_t flags; + uint16_t length; + uint32_t data; +} mcf_fec_bd; + +#define FEC_BD_R 0x8000 +#define FEC_BD_E 0x8000 +#define FEC_BD_O1 0x4000 +#define FEC_BD_W 0x2000 +#define FEC_BD_O2 0x1000 +#define FEC_BD_L 0x0800 +#define FEC_BD_TC 0x0400 +#define FEC_BD_ABC 0x0200 +#define FEC_BD_M 0x0100 +#define FEC_BD_BC 0x0080 +#define FEC_BD_MC 0x0040 +#define FEC_BD_LG 0x0020 +#define FEC_BD_NO 0x0010 +#define FEC_BD_CR 0x0004 +#define FEC_BD_OV 0x0002 +#define FEC_BD_TR 0x0001 + +#define MIB_RMON_T_DROP 0 +#define MIB_RMON_T_PACKETS 1 +#define MIB_RMON_T_BC_PKT 2 +#define MIB_RMON_T_MC_PKT 3 +#define MIB_RMON_T_CRC_ALIGN 4 +#define MIB_RMON_T_UNDERSIZE 5 +#define MIB_RMON_T_OVERSIZE 6 +#define MIB_RMON_T_FRAG 7 +#define MIB_RMON_T_JAB 8 +#define MIB_RMON_T_COL 9 +#define MIB_RMON_T_P64 10 +#define MIB_RMON_T_P65TO127 11 +#define MIB_RMON_T_P128TO255 12 +#define MIB_RMON_T_P256TO511 13 +#define MIB_RMON_T_P512TO1023 14 +#define MIB_RMON_T_P1024TO2047 15 +#define MIB_RMON_T_P_GTE2048 16 +#define MIB_RMON_T_OCTETS 17 +#define MIB_IEEE_T_DROP 18 +#define MIB_IEEE_T_FRAME_OK 19 +#define MIB_IEEE_T_1COL 20 +#define MIB_IEEE_T_MCOL 21 +#define MIB_IEEE_T_DEF 22 +#define MIB_IEEE_T_LCOL 23 +#define MIB_IEEE_T_EXCOL 24 +#define MIB_IEEE_T_MACERR 25 +#define MIB_IEEE_T_CSERR 26 +#define MIB_IEEE_T_SQE 27 +#define MIB_IEEE_T_FDXFC 28 +#define MIB_IEEE_T_OCTETS_OK 29 + +#define MIB_RMON_R_DROP 32 +#define MIB_RMON_R_PACKETS 33 +#define MIB_RMON_R_BC_PKT 34 +#define MIB_RMON_R_MC_PKT 35 +#define MIB_RMON_R_CRC_ALIGN 36 +#define MIB_RMON_R_UNDERSIZE 37 +#define MIB_RMON_R_OVERSIZE 38 +#define MIB_RMON_R_FRAG 39 +#define MIB_RMON_R_JAB 40 +#define MIB_RMON_R_RESVD_0 41 +#define MIB_RMON_R_P64 42 +#define MIB_RMON_R_P65TO127 43 +#define MIB_RMON_R_P128TO255 44 +#define MIB_RMON_R_P256TO511 45 +#define MIB_RMON_R_P512TO1023 46 +#define MIB_RMON_R_P1024TO2047 47 +#define MIB_RMON_R_P_GTE2048 48 +#define MIB_RMON_R_OCTETS 49 +#define MIB_IEEE_R_DROP 50 +#define MIB_IEEE_R_FRAME_OK 51 +#define MIB_IEEE_R_CRC 52 +#define MIB_IEEE_R_ALIGN 53 +#define MIB_IEEE_R_MACERR 54 +#define MIB_IEEE_R_FDXFC 55 +#define MIB_IEEE_R_OCTETS_OK 56 + +static void mcf_fec_read_bd(mcf_fec_bd *bd, uint32_t addr) +{ + cpu_physical_memory_read(addr, bd, sizeof(*bd)); + be16_to_cpus(&bd->flags); + be16_to_cpus(&bd->length); + be32_to_cpus(&bd->data); +} + +static void mcf_fec_write_bd(mcf_fec_bd *bd, uint32_t addr) +{ + mcf_fec_bd tmp; + tmp.flags = cpu_to_be16(bd->flags); + tmp.length = cpu_to_be16(bd->length); + tmp.data = cpu_to_be32(bd->data); + cpu_physical_memory_write(addr, &tmp, sizeof(tmp)); +} + +static void mcf_fec_update(mcf_fec_state *s) +{ + uint32_t active; + uint32_t changed; + uint32_t mask; + int i; + + active = s->eir & s->eimr; + changed = active ^s->irq_state; + for (i = 0; i < FEC_NUM_IRQ; i++) { + mask = mcf_fec_irq_map[i]; + if (changed & mask) { + DPRINTF("IRQ %d = %d\n", i, (active & mask) != 0); + qemu_set_irq(s->irq[i], (active & mask) != 0); + } + } + s->irq_state = active; +} + +static void mcf_fec_tx_stats(mcf_fec_state *s, int size) +{ + s->mib[MIB_RMON_T_PACKETS]++; + s->mib[MIB_RMON_T_OCTETS] += size; + if (size < 64) { + s->mib[MIB_RMON_T_FRAG]++; + } else if (size == 64) { + s->mib[MIB_RMON_T_P64]++; + } else if (size < 128) { + s->mib[MIB_RMON_T_P65TO127]++; + } else if (size < 256) { + s->mib[MIB_RMON_T_P128TO255]++; + } else if (size < 512) { + s->mib[MIB_RMON_T_P256TO511]++; + } else if (size < 1024) { + s->mib[MIB_RMON_T_P512TO1023]++; + } else if (size < 2048) { + s->mib[MIB_RMON_T_P1024TO2047]++; + } else { + s->mib[MIB_RMON_T_P_GTE2048]++; + } + s->mib[MIB_IEEE_T_FRAME_OK]++; + s->mib[MIB_IEEE_T_OCTETS_OK] += size; +} + +static void mcf_fec_do_tx(mcf_fec_state *s) +{ + uint32_t addr; + mcf_fec_bd bd; + int frame_size; + int len, descnt = 0; + uint8_t frame[FEC_MAX_FRAME_SIZE]; + uint8_t *ptr; + + DPRINTF("do_tx\n"); + ptr = frame; + frame_size = 0; + addr = s->tx_descriptor; + while (descnt++ < FEC_MAX_DESC) { + mcf_fec_read_bd(&bd, addr); + DPRINTF("tx_bd %x flags %04x len %d data %08x\n", + addr, bd.flags, bd.length, bd.data); + if ((bd.flags & FEC_BD_R) == 0) { + /* Run out of descriptors to transmit. */ + break; + } + len = bd.length; + if (frame_size + len > FEC_MAX_FRAME_SIZE) { + len = FEC_MAX_FRAME_SIZE - frame_size; + s->eir |= FEC_INT_BABT; + } + cpu_physical_memory_read(bd.data, ptr, len); + ptr += len; + frame_size += len; + if (bd.flags & FEC_BD_L) { + /* Last buffer in frame. */ + DPRINTF("Sending packet\n"); + qemu_send_packet(qemu_get_queue(s->nic), frame, frame_size); + mcf_fec_tx_stats(s, frame_size); + ptr = frame; + frame_size = 0; + s->eir |= FEC_INT_TXF; + } + s->eir |= FEC_INT_TXB; + bd.flags &= ~FEC_BD_R; + /* Write back the modified descriptor. */ + mcf_fec_write_bd(&bd, addr); + /* Advance to the next descriptor. */ + if ((bd.flags & FEC_BD_W) != 0) { + addr = s->etdsr; + } else { + addr += 8; + } + } + s->tx_descriptor = addr; +} + +static void mcf_fec_enable_rx(mcf_fec_state *s) +{ + NetClientState *nc = qemu_get_queue(s->nic); + mcf_fec_bd bd; + + mcf_fec_read_bd(&bd, s->rx_descriptor); + s->rx_enabled = ((bd.flags & FEC_BD_E) != 0); + if (s->rx_enabled) { + qemu_flush_queued_packets(nc); + } +} + +static void mcf_fec_reset(DeviceState *dev) +{ + mcf_fec_state *s = MCF_FEC_NET(dev); + + s->eir = 0; + s->eimr = 0; + s->rx_enabled = 0; + s->ecr = 0; + s->mscr = 0; + s->rcr = 0x05ee0001; + s->tcr = 0; + s->tfwr = 0; + s->rfsr = 0x500; +} + +#define MMFR_WRITE_OP (1 << 28) +#define MMFR_READ_OP (2 << 28) +#define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f) +#define MMFR_REGNUM(v) (((v) >> 18) & 0x1f) + +static uint64_t mcf_fec_read_mdio(mcf_fec_state *s) +{ + uint64_t v; + + if (s->mmfr & MMFR_WRITE_OP) + return s->mmfr; + if (MMFR_PHYADDR(s->mmfr) != 1) + return s->mmfr |= 0xffff; + + switch (MMFR_REGNUM(s->mmfr)) { + case MII_BMCR: + v = MII_BMCR_SPEED | MII_BMCR_AUTOEN | MII_BMCR_FD; + break; + case MII_BMSR: + v = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD | + MII_BMSR_10T_HD | MII_BMSR_MFPS | MII_BMSR_AN_COMP | + MII_BMSR_AUTONEG | MII_BMSR_LINK_ST; + break; + case MII_PHYID1: + v = DP83848_PHYID1; + break; + case MII_PHYID2: + v = DP83848_PHYID2; + break; + case MII_ANAR: + v = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD | + MII_ANAR_10 | MII_ANAR_CSMACD; + break; + case MII_ANLPAR: + v = MII_ANLPAR_ACK | MII_ANLPAR_TXFD | MII_ANLPAR_TX | + MII_ANLPAR_10FD | MII_ANLPAR_10 | MII_ANLPAR_CSMACD; + break; + default: + v = 0xffff; + break; + } + s->mmfr = (s->mmfr & ~0xffff) | v; + return s->mmfr; +} + +static uint64_t mcf_fec_read(void *opaque, hwaddr addr, + unsigned size) +{ + mcf_fec_state *s = (mcf_fec_state *)opaque; + switch (addr & 0x3ff) { + case 0x004: return s->eir; + case 0x008: return s->eimr; + case 0x010: return s->rx_enabled ? (1 << 24) : 0; /* RDAR */ + case 0x014: return 0; /* TDAR */ + case 0x024: return s->ecr; + case 0x040: return mcf_fec_read_mdio(s); + case 0x044: return s->mscr; + case 0x064: return 0; /* MIBC */ + case 0x084: return s->rcr; + case 0x0c4: return s->tcr; + case 0x0e4: /* PALR */ + return (s->conf.macaddr.a[0] << 24) | (s->conf.macaddr.a[1] << 16) + | (s->conf.macaddr.a[2] << 8) | s->conf.macaddr.a[3]; + break; + case 0x0e8: /* PAUR */ + return (s->conf.macaddr.a[4] << 24) | (s->conf.macaddr.a[5] << 16) | 0x8808; + case 0x0ec: return 0x10000; /* OPD */ + case 0x118: return 0; + case 0x11c: return 0; + case 0x120: return 0; + case 0x124: return 0; + case 0x144: return s->tfwr; + case 0x14c: return 0x600; + case 0x150: return s->rfsr; + case 0x180: return s->erdsr; + case 0x184: return s->etdsr; + case 0x188: return s->emrbr; + case 0x200 ... 0x2e0: return s->mib[(addr & 0x1ff) / 4]; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address 0x%" HWADDR_PRIX "\n", + __func__, addr); + return 0; + } +} + +static void mcf_fec_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + mcf_fec_state *s = (mcf_fec_state *)opaque; + switch (addr & 0x3ff) { + case 0x004: + s->eir &= ~value; + break; + case 0x008: + s->eimr = value; + break; + case 0x010: /* RDAR */ + if ((s->ecr & FEC_EN) && !s->rx_enabled) { + DPRINTF("RX enable\n"); + mcf_fec_enable_rx(s); + } + break; + case 0x014: /* TDAR */ + if (s->ecr & FEC_EN) { + mcf_fec_do_tx(s); + } + break; + case 0x024: + s->ecr = value; + if (value & FEC_RESET) { + DPRINTF("Reset\n"); + mcf_fec_reset(opaque); + } + if ((s->ecr & FEC_EN) == 0) { + s->rx_enabled = 0; + } + break; + case 0x040: + s->mmfr = value; + s->eir |= FEC_INT_MII; + break; + case 0x044: + s->mscr = value & 0xfe; + break; + case 0x064: + /* TODO: Implement MIB. */ + break; + case 0x084: + s->rcr = value & 0x07ff003f; + /* TODO: Implement LOOP mode. */ + break; + case 0x0c4: /* TCR */ + /* We transmit immediately, so raise GRA immediately. */ + s->tcr = value; + if (value & 1) + s->eir |= FEC_INT_GRA; + break; + case 0x0e4: /* PALR */ + s->conf.macaddr.a[0] = value >> 24; + s->conf.macaddr.a[1] = value >> 16; + s->conf.macaddr.a[2] = value >> 8; + s->conf.macaddr.a[3] = value; + break; + case 0x0e8: /* PAUR */ + s->conf.macaddr.a[4] = value >> 24; + s->conf.macaddr.a[5] = value >> 16; + break; + case 0x0ec: + /* OPD */ + break; + case 0x118: + case 0x11c: + case 0x120: + case 0x124: + /* TODO: implement MAC hash filtering. */ + break; + case 0x144: + s->tfwr = value & 3; + break; + case 0x14c: + /* FRBR writes ignored. */ + break; + case 0x150: + s->rfsr = (value & 0x3fc) | 0x400; + break; + case 0x180: + s->erdsr = value & ~3; + s->rx_descriptor = s->erdsr; + break; + case 0x184: + s->etdsr = value & ~3; + s->tx_descriptor = s->etdsr; + break; + case 0x188: + s->emrbr = value > 0 ? value & 0x7F0 : 0x7F0; + break; + case 0x200 ... 0x2e0: + s->mib[(addr & 0x1ff) / 4] = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address 0x%" HWADDR_PRIX "\n", + __func__, addr); + return; + } + mcf_fec_update(s); +} + +static void mcf_fec_rx_stats(mcf_fec_state *s, int size) +{ + s->mib[MIB_RMON_R_PACKETS]++; + s->mib[MIB_RMON_R_OCTETS] += size; + if (size < 64) { + s->mib[MIB_RMON_R_FRAG]++; + } else if (size == 64) { + s->mib[MIB_RMON_R_P64]++; + } else if (size < 128) { + s->mib[MIB_RMON_R_P65TO127]++; + } else if (size < 256) { + s->mib[MIB_RMON_R_P128TO255]++; + } else if (size < 512) { + s->mib[MIB_RMON_R_P256TO511]++; + } else if (size < 1024) { + s->mib[MIB_RMON_R_P512TO1023]++; + } else if (size < 2048) { + s->mib[MIB_RMON_R_P1024TO2047]++; + } else { + s->mib[MIB_RMON_R_P_GTE2048]++; + } + s->mib[MIB_IEEE_R_FRAME_OK]++; + s->mib[MIB_IEEE_R_OCTETS_OK] += size; +} + +static int mcf_fec_have_receive_space(mcf_fec_state *s, size_t want) +{ + mcf_fec_bd bd; + uint32_t addr; + + /* Walk descriptor list to determine if we have enough buffer */ + addr = s->rx_descriptor; + while (want > 0) { + mcf_fec_read_bd(&bd, addr); + if ((bd.flags & FEC_BD_E) == 0) { + return 0; + } + if (want < s->emrbr) { + return 1; + } + want -= s->emrbr; + /* Advance to the next descriptor. */ + if ((bd.flags & FEC_BD_W) != 0) { + addr = s->erdsr; + } else { + addr += 8; + } + } + return 0; +} + +static ssize_t mcf_fec_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + mcf_fec_state *s = qemu_get_nic_opaque(nc); + mcf_fec_bd bd; + uint32_t flags = 0; + uint32_t addr; + uint32_t crc; + uint32_t buf_addr; + uint8_t *crc_ptr; + unsigned int buf_len; + size_t retsize; + + DPRINTF("do_rx len %d\n", size); + if (!s->rx_enabled) { + return -1; + } + /* 4 bytes for the CRC. */ + size += 4; + crc = cpu_to_be32(crc32(~0, buf, size)); + crc_ptr = (uint8_t *)&crc; + /* Huge frames are truncted. */ + if (size > FEC_MAX_FRAME_SIZE) { + size = FEC_MAX_FRAME_SIZE; + flags |= FEC_BD_TR | FEC_BD_LG; + } + /* Frames larger than the user limit just set error flags. */ + if (size > (s->rcr >> 16)) { + flags |= FEC_BD_LG; + } + /* Check if we have enough space in current descriptors */ + if (!mcf_fec_have_receive_space(s, size)) { + return 0; + } + addr = s->rx_descriptor; + retsize = size; + while (size > 0) { + mcf_fec_read_bd(&bd, addr); + buf_len = (size <= s->emrbr) ? size: s->emrbr; + bd.length = buf_len; + size -= buf_len; + DPRINTF("rx_bd %x length %d\n", addr, bd.length); + /* The last 4 bytes are the CRC. */ + if (size < 4) + buf_len += size - 4; + buf_addr = bd.data; + cpu_physical_memory_write(buf_addr, buf, buf_len); + buf += buf_len; + if (size < 4) { + cpu_physical_memory_write(buf_addr + buf_len, crc_ptr, 4 - size); + crc_ptr += 4 - size; + } + bd.flags &= ~FEC_BD_E; + if (size == 0) { + /* Last buffer in frame. */ + bd.flags |= flags | FEC_BD_L; + DPRINTF("rx frame flags %04x\n", bd.flags); + s->eir |= FEC_INT_RXF; + } else { + s->eir |= FEC_INT_RXB; + } + mcf_fec_write_bd(&bd, addr); + /* Advance to the next descriptor. */ + if ((bd.flags & FEC_BD_W) != 0) { + addr = s->erdsr; + } else { + addr += 8; + } + } + s->rx_descriptor = addr; + mcf_fec_rx_stats(s, retsize); + mcf_fec_enable_rx(s); + mcf_fec_update(s); + return retsize; +} + +static const MemoryRegionOps mcf_fec_ops = { + .read = mcf_fec_read, + .write = mcf_fec_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static NetClientInfo net_mcf_fec_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = mcf_fec_receive, +}; + +static void mcf_fec_realize(DeviceState *dev, Error **errp) +{ + mcf_fec_state *s = MCF_FEC_NET(dev); + + s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static void mcf_fec_instance_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + mcf_fec_state *s = MCF_FEC_NET(obj); + int i; + + memory_region_init_io(&s->iomem, obj, &mcf_fec_ops, s, "fec", 0x400); + sysbus_init_mmio(sbd, &s->iomem); + for (i = 0; i < FEC_NUM_IRQ; i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } +} + +static Property mcf_fec_properties[] = { + DEFINE_NIC_PROPERTIES(mcf_fec_state, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mcf_fec_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->realize = mcf_fec_realize; + dc->desc = "MCF Fast Ethernet Controller network device"; + dc->reset = mcf_fec_reset; + device_class_set_props(dc, mcf_fec_properties); +} + +static const TypeInfo mcf_fec_info = { + .name = TYPE_MCF_FEC_NET, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(mcf_fec_state), + .instance_init = mcf_fec_instance_init, + .class_init = mcf_fec_class_init, +}; + +static void mcf_fec_register_types(void) +{ + type_register_static(&mcf_fec_info); +} + +type_init(mcf_fec_register_types) diff --git a/hw/net/meson.build b/hw/net/meson.build new file mode 100644 index 000000000..bdf71f1f4 --- /dev/null +++ b/hw/net/meson.build @@ -0,0 +1,67 @@ +softmmu_ss.add(when: 'CONFIG_DP8393X', if_true: files('dp8393x.c')) +softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen_nic.c')) +softmmu_ss.add(when: 'CONFIG_NE2000_COMMON', if_true: files('ne2000.c')) + +# PCI network cards +softmmu_ss.add(when: 'CONFIG_NE2000_PCI', if_true: files('ne2000-pci.c')) +softmmu_ss.add(when: 'CONFIG_EEPRO100_PCI', if_true: files('eepro100.c')) +softmmu_ss.add(when: 'CONFIG_PCNET_PCI', if_true: files('pcnet-pci.c')) +softmmu_ss.add(when: 'CONFIG_PCNET_COMMON', if_true: files('pcnet.c')) +softmmu_ss.add(when: 'CONFIG_E1000_PCI', if_true: files('e1000.c', 'e1000x_common.c')) +softmmu_ss.add(when: 'CONFIG_E1000E_PCI_EXPRESS', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) +softmmu_ss.add(when: 'CONFIG_E1000E_PCI_EXPRESS', if_true: files('e1000e.c', 'e1000e_core.c', 'e1000x_common.c')) +softmmu_ss.add(when: 'CONFIG_RTL8139_PCI', if_true: files('rtl8139.c')) +softmmu_ss.add(when: 'CONFIG_TULIP', if_true: files('tulip.c')) +softmmu_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('net_tx_pkt.c', 'net_rx_pkt.c')) +softmmu_ss.add(when: 'CONFIG_VMXNET3_PCI', if_true: files('vmxnet3.c')) + +softmmu_ss.add(when: 'CONFIG_SMC91C111', if_true: files('smc91c111.c')) +softmmu_ss.add(when: 'CONFIG_LAN9118', if_true: files('lan9118.c')) +softmmu_ss.add(when: 'CONFIG_NE2000_ISA', if_true: files('ne2000-isa.c')) +softmmu_ss.add(when: 'CONFIG_OPENCORES_ETH', if_true: files('opencores_eth.c')) +softmmu_ss.add(when: 'CONFIG_XGMAC', if_true: files('xgmac.c')) +softmmu_ss.add(when: 'CONFIG_MIPSNET', if_true: files('mipsnet.c')) +softmmu_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('xilinx_axienet.c')) +softmmu_ss.add(when: 'CONFIG_ALLWINNER_EMAC', if_true: files('allwinner_emac.c')) +softmmu_ss.add(when: 'CONFIG_ALLWINNER_SUN8I_EMAC', if_true: files('allwinner-sun8i-emac.c')) +softmmu_ss.add(when: 'CONFIG_IMX_FEC', if_true: files('imx_fec.c')) +softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-emac.c')) + +softmmu_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_gem.c')) +softmmu_ss.add(when: 'CONFIG_STELLARIS_ENET', if_true: files('stellaris_enet.c')) +softmmu_ss.add(when: 'CONFIG_LANCE', if_true: files('lance.c')) +softmmu_ss.add(when: 'CONFIG_LASI_I82596', if_true: files('lasi_i82596.c')) +softmmu_ss.add(when: 'CONFIG_I82596_COMMON', if_true: files('i82596.c')) +softmmu_ss.add(when: 'CONFIG_SUNHME', if_true: files('sunhme.c')) +softmmu_ss.add(when: 'CONFIG_FTGMAC100', if_true: files('ftgmac100.c')) +softmmu_ss.add(when: 'CONFIG_SUNGEM', if_true: files('sungem.c')) +softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_emc.c')) + +softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_eth.c')) +softmmu_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_fec.c')) +specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_llan.c')) +specific_ss.add(when: 'CONFIG_XILINX_ETHLITE', if_true: files('xilinx_ethlite.c')) + +softmmu_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('net_rx_pkt.c')) +specific_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('virtio-net.c')) + +softmmu_ss.add(when: ['CONFIG_VIRTIO_NET', 'CONFIG_VHOST_NET'], if_true: files('vhost_net.c'), if_false: files('vhost_net-stub.c')) +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost_net-stub.c')) + +softmmu_ss.add(when: 'CONFIG_ETSEC', if_true: files( + 'fsl_etsec/etsec.c', + 'fsl_etsec/miim.c', + 'fsl_etsec/registers.c', + 'fsl_etsec/rings.c', +)) + +softmmu_ss.add(when: 'CONFIG_ROCKER', if_true: files( + 'rocker/rocker.c', + 'rocker/rocker_desc.c', + 'rocker/rocker_fp.c', + 'rocker/rocker_of_dpa.c', + 'rocker/rocker_world.c', +), if_false: files('rocker/qmp-norocker.c')) +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('rocker/qmp-norocker.c')) + +subdir('can') diff --git a/hw/net/mipsnet.c b/hw/net/mipsnet.c new file mode 100644 index 000000000..2ade72dea --- /dev/null +++ b/hw/net/mipsnet.c @@ -0,0 +1,297 @@ +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "net/net.h" +#include "qemu/module.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +/* MIPSnet register offsets */ + +#define MIPSNET_DEV_ID 0x00 +#define MIPSNET_BUSY 0x08 +#define MIPSNET_RX_DATA_COUNT 0x0c +#define MIPSNET_TX_DATA_COUNT 0x10 +#define MIPSNET_INT_CTL 0x14 +# define MIPSNET_INTCTL_TXDONE 0x00000001 +# define MIPSNET_INTCTL_RXDONE 0x00000002 +# define MIPSNET_INTCTL_TESTBIT 0x80000000 +#define MIPSNET_INTERRUPT_INFO 0x18 +#define MIPSNET_RX_DATA_BUFFER 0x1c +#define MIPSNET_TX_DATA_BUFFER 0x20 + +#define MAX_ETH_FRAME_SIZE 1514 + +#define TYPE_MIPS_NET "mipsnet" +OBJECT_DECLARE_SIMPLE_TYPE(MIPSnetState, MIPS_NET) + +struct MIPSnetState { + SysBusDevice parent_obj; + + uint32_t busy; + uint32_t rx_count; + uint32_t rx_read; + uint32_t tx_count; + uint32_t tx_written; + uint32_t intctl; + uint8_t rx_buffer[MAX_ETH_FRAME_SIZE]; + uint8_t tx_buffer[MAX_ETH_FRAME_SIZE]; + MemoryRegion io; + qemu_irq irq; + NICState *nic; + NICConf conf; +}; + +static void mipsnet_reset(MIPSnetState *s) +{ + s->busy = 1; + s->rx_count = 0; + s->rx_read = 0; + s->tx_count = 0; + s->tx_written = 0; + s->intctl = 0; + memset(s->rx_buffer, 0, MAX_ETH_FRAME_SIZE); + memset(s->tx_buffer, 0, MAX_ETH_FRAME_SIZE); +} + +static void mipsnet_update_irq(MIPSnetState *s) +{ + int isr = !!s->intctl; + trace_mipsnet_irq(isr, s->intctl); + qemu_set_irq(s->irq, isr); +} + +static int mipsnet_buffer_full(MIPSnetState *s) +{ + if (s->rx_count >= MAX_ETH_FRAME_SIZE) { + return 1; + } + return 0; +} + +static int mipsnet_can_receive(NetClientState *nc) +{ + MIPSnetState *s = qemu_get_nic_opaque(nc); + + if (s->busy) { + return 0; + } + return !mipsnet_buffer_full(s); +} + +static ssize_t mipsnet_receive(NetClientState *nc, + const uint8_t *buf, size_t size) +{ + MIPSnetState *s = qemu_get_nic_opaque(nc); + + trace_mipsnet_receive(size); + if (!mipsnet_can_receive(nc)) { + return 0; + } + + if (size >= sizeof(s->rx_buffer)) { + return 0; + } + s->busy = 1; + + /* Just accept everything. */ + + /* Write packet data. */ + memcpy(s->rx_buffer, buf, size); + + s->rx_count = size; + s->rx_read = 0; + + /* Now we can signal we have received something. */ + s->intctl |= MIPSNET_INTCTL_RXDONE; + mipsnet_update_irq(s); + + return size; +} + +static uint64_t mipsnet_ioport_read(void *opaque, hwaddr addr, + unsigned int size) +{ + MIPSnetState *s = opaque; + int ret = 0; + + addr &= 0x3f; + switch (addr) { + case MIPSNET_DEV_ID: + ret = be32_to_cpu(0x4d495053); /* MIPS */ + break; + case MIPSNET_DEV_ID + 4: + ret = be32_to_cpu(0x4e455430); /* NET0 */ + break; + case MIPSNET_BUSY: + ret = s->busy; + break; + case MIPSNET_RX_DATA_COUNT: + ret = s->rx_count; + break; + case MIPSNET_TX_DATA_COUNT: + ret = s->tx_count; + break; + case MIPSNET_INT_CTL: + ret = s->intctl; + s->intctl &= ~MIPSNET_INTCTL_TESTBIT; + break; + case MIPSNET_INTERRUPT_INFO: + /* XXX: This seems to be a per-VPE interrupt number. */ + ret = 0; + break; + case MIPSNET_RX_DATA_BUFFER: + if (s->rx_count) { + s->rx_count--; + ret = s->rx_buffer[s->rx_read++]; + if (mipsnet_can_receive(s->nic->ncs)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + } + break; + /* Reads as zero. */ + case MIPSNET_TX_DATA_BUFFER: + default: + break; + } + trace_mipsnet_read(addr, ret); + return ret; +} + +static void mipsnet_ioport_write(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + MIPSnetState *s = opaque; + + addr &= 0x3f; + trace_mipsnet_write(addr, val); + switch (addr) { + case MIPSNET_TX_DATA_COUNT: + s->tx_count = (val <= MAX_ETH_FRAME_SIZE) ? val : 0; + s->tx_written = 0; + break; + case MIPSNET_INT_CTL: + if (val & MIPSNET_INTCTL_TXDONE) { + s->intctl &= ~MIPSNET_INTCTL_TXDONE; + } else if (val & MIPSNET_INTCTL_RXDONE) { + s->intctl &= ~MIPSNET_INTCTL_RXDONE; + } else if (val & MIPSNET_INTCTL_TESTBIT) { + mipsnet_reset(s); + s->intctl |= MIPSNET_INTCTL_TESTBIT; + } else if (!val) { + /* ACK testbit interrupt, flag was cleared on read. */ + } + s->busy = !!s->intctl; + mipsnet_update_irq(s); + if (mipsnet_can_receive(s->nic->ncs)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + case MIPSNET_TX_DATA_BUFFER: + s->tx_buffer[s->tx_written++] = val; + if ((s->tx_written >= MAX_ETH_FRAME_SIZE) + || (s->tx_written == s->tx_count)) { + /* Send buffer. */ + trace_mipsnet_send(s->tx_written); + qemu_send_packet(qemu_get_queue(s->nic), + s->tx_buffer, s->tx_written); + s->tx_count = s->tx_written = 0; + s->intctl |= MIPSNET_INTCTL_TXDONE; + s->busy = 1; + mipsnet_update_irq(s); + } + break; + /* Read-only registers */ + case MIPSNET_DEV_ID: + case MIPSNET_BUSY: + case MIPSNET_RX_DATA_COUNT: + case MIPSNET_INTERRUPT_INFO: + case MIPSNET_RX_DATA_BUFFER: + default: + break; + } +} + +static const VMStateDescription vmstate_mipsnet = { + .name = "mipsnet", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(busy, MIPSnetState), + VMSTATE_UINT32(rx_count, MIPSnetState), + VMSTATE_UINT32(rx_read, MIPSnetState), + VMSTATE_UINT32(tx_count, MIPSnetState), + VMSTATE_UINT32(tx_written, MIPSnetState), + VMSTATE_UINT32(intctl, MIPSnetState), + VMSTATE_BUFFER(rx_buffer, MIPSnetState), + VMSTATE_BUFFER(tx_buffer, MIPSnetState), + VMSTATE_END_OF_LIST() + } +}; + +static NetClientInfo net_mipsnet_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = mipsnet_receive, +}; + +static const MemoryRegionOps mipsnet_ioport_ops = { + .read = mipsnet_ioport_read, + .write = mipsnet_ioport_write, + .impl.min_access_size = 1, + .impl.max_access_size = 4, +}; + +static void mipsnet_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + MIPSnetState *s = MIPS_NET(dev); + + memory_region_init_io(&s->io, OBJECT(dev), &mipsnet_ioport_ops, s, + "mipsnet-io", 36); + sysbus_init_mmio(sbd, &s->io); + sysbus_init_irq(sbd, &s->irq); + + s->nic = qemu_new_nic(&net_mipsnet_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static void mipsnet_sysbus_reset(DeviceState *dev) +{ + MIPSnetState *s = MIPS_NET(dev); + mipsnet_reset(s); +} + +static Property mipsnet_properties[] = { + DEFINE_NIC_PROPERTIES(MIPSnetState, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mipsnet_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mipsnet_realize; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->desc = "MIPS Simulator network device"; + dc->reset = mipsnet_sysbus_reset; + dc->vmsd = &vmstate_mipsnet; + device_class_set_props(dc, mipsnet_properties); +} + +static const TypeInfo mipsnet_info = { + .name = TYPE_MIPS_NET, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MIPSnetState), + .class_init = mipsnet_class_init, +}; + +static void mipsnet_register_types(void) +{ + type_register_static(&mipsnet_info); +} + +type_init(mipsnet_register_types) diff --git a/hw/net/msf2-emac.c b/hw/net/msf2-emac.c new file mode 100644 index 000000000..9278fdce0 --- /dev/null +++ b/hw/net/msf2-emac.c @@ -0,0 +1,588 @@ +/* + * QEMU model of the Smartfusion2 Ethernet MAC. + * + * Copyright (c) 2020 Subbaraya Sundeep . + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * Refer to section Ethernet MAC in the document: + * UG0331: SmartFusion2 Microcontroller Subsystem User Guide + * Datasheet URL: + * https://www.microsemi.com/document-portal/cat_view/56661-internal-documents/ + * 56758-soc?lang=en&limit=20&limitstart=220 + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/registerfields.h" +#include "hw/net/msf2-emac.h" +#include "hw/net/mii.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +REG32(CFG1, 0x0) + FIELD(CFG1, RESET, 31, 1) + FIELD(CFG1, RX_EN, 2, 1) + FIELD(CFG1, TX_EN, 0, 1) + FIELD(CFG1, LB_EN, 8, 1) +REG32(CFG2, 0x4) +REG32(IFG, 0x8) +REG32(HALF_DUPLEX, 0xc) +REG32(MAX_FRAME_LENGTH, 0x10) +REG32(MII_CMD, 0x24) + FIELD(MII_CMD, READ, 0, 1) +REG32(MII_ADDR, 0x28) + FIELD(MII_ADDR, REGADDR, 0, 5) + FIELD(MII_ADDR, PHYADDR, 8, 5) +REG32(MII_CTL, 0x2c) +REG32(MII_STS, 0x30) +REG32(STA1, 0x40) +REG32(STA2, 0x44) +REG32(FIFO_CFG0, 0x48) +REG32(FIFO_CFG4, 0x58) + FIELD(FIFO_CFG4, BCAST, 9, 1) + FIELD(FIFO_CFG4, MCAST, 8, 1) +REG32(FIFO_CFG5, 0x5C) + FIELD(FIFO_CFG5, BCAST, 9, 1) + FIELD(FIFO_CFG5, MCAST, 8, 1) +REG32(DMA_TX_CTL, 0x180) + FIELD(DMA_TX_CTL, EN, 0, 1) +REG32(DMA_TX_DESC, 0x184) +REG32(DMA_TX_STATUS, 0x188) + FIELD(DMA_TX_STATUS, PKTCNT, 16, 8) + FIELD(DMA_TX_STATUS, UNDERRUN, 1, 1) + FIELD(DMA_TX_STATUS, PKT_SENT, 0, 1) +REG32(DMA_RX_CTL, 0x18c) + FIELD(DMA_RX_CTL, EN, 0, 1) +REG32(DMA_RX_DESC, 0x190) +REG32(DMA_RX_STATUS, 0x194) + FIELD(DMA_RX_STATUS, PKTCNT, 16, 8) + FIELD(DMA_RX_STATUS, OVERFLOW, 2, 1) + FIELD(DMA_RX_STATUS, PKT_RCVD, 0, 1) +REG32(DMA_IRQ_MASK, 0x198) +REG32(DMA_IRQ, 0x19c) + +#define EMPTY_MASK (1 << 31) +#define PKT_SIZE 0x7FF +#define PHYADDR 0x1 +#define MAX_PKT_SIZE 2048 + +typedef struct { + uint32_t pktaddr; + uint32_t pktsize; + uint32_t next; +} EmacDesc; + +static uint32_t emac_get_isr(MSF2EmacState *s) +{ + uint32_t ier = s->regs[R_DMA_IRQ_MASK]; + uint32_t tx = s->regs[R_DMA_TX_STATUS] & 0xF; + uint32_t rx = s->regs[R_DMA_RX_STATUS] & 0xF; + uint32_t isr = (rx << 4) | tx; + + s->regs[R_DMA_IRQ] = ier & isr; + return s->regs[R_DMA_IRQ]; +} + +static void emac_update_irq(MSF2EmacState *s) +{ + bool intr = emac_get_isr(s); + + qemu_set_irq(s->irq, intr); +} + +static void emac_load_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc) +{ + address_space_read(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, d, sizeof *d); + /* Convert from LE into host endianness. */ + d->pktaddr = le32_to_cpu(d->pktaddr); + d->pktsize = le32_to_cpu(d->pktsize); + d->next = le32_to_cpu(d->next); +} + +static void emac_store_desc(MSF2EmacState *s, EmacDesc *d, hwaddr desc) +{ + /* Convert from host endianness into LE. */ + d->pktaddr = cpu_to_le32(d->pktaddr); + d->pktsize = cpu_to_le32(d->pktsize); + d->next = cpu_to_le32(d->next); + + address_space_write(&s->dma_as, desc, MEMTXATTRS_UNSPECIFIED, d, sizeof *d); +} + +static void msf2_dma_tx(MSF2EmacState *s) +{ + NetClientState *nc = qemu_get_queue(s->nic); + hwaddr desc = s->regs[R_DMA_TX_DESC]; + uint8_t buf[MAX_PKT_SIZE]; + EmacDesc d; + int size; + uint8_t pktcnt; + uint32_t status; + + if (!(s->regs[R_CFG1] & R_CFG1_TX_EN_MASK)) { + return; + } + + while (1) { + emac_load_desc(s, &d, desc); + if (d.pktsize & EMPTY_MASK) { + break; + } + size = d.pktsize & PKT_SIZE; + address_space_read(&s->dma_as, d.pktaddr, MEMTXATTRS_UNSPECIFIED, + buf, size); + /* + * This is very basic way to send packets. Ideally there should be + * a FIFO and packets should be sent out from FIFO only when + * R_CFG1 bit 0 is set. + */ + if (s->regs[R_CFG1] & R_CFG1_LB_EN_MASK) { + qemu_receive_packet(nc, buf, size); + } else { + qemu_send_packet(nc, buf, size); + } + d.pktsize |= EMPTY_MASK; + emac_store_desc(s, &d, desc); + /* update sent packets count */ + status = s->regs[R_DMA_TX_STATUS]; + pktcnt = FIELD_EX32(status, DMA_TX_STATUS, PKTCNT); + pktcnt++; + s->regs[R_DMA_TX_STATUS] = FIELD_DP32(status, DMA_TX_STATUS, + PKTCNT, pktcnt); + s->regs[R_DMA_TX_STATUS] |= R_DMA_TX_STATUS_PKT_SENT_MASK; + desc = d.next; + } + s->regs[R_DMA_TX_STATUS] |= R_DMA_TX_STATUS_UNDERRUN_MASK; + s->regs[R_DMA_TX_CTL] &= ~R_DMA_TX_CTL_EN_MASK; +} + +static void msf2_phy_update_link(MSF2EmacState *s) +{ + /* Autonegotiation status mirrors link status. */ + if (qemu_get_queue(s->nic)->link_down) { + s->phy_regs[MII_BMSR] &= ~(MII_BMSR_AN_COMP | + MII_BMSR_LINK_ST); + } else { + s->phy_regs[MII_BMSR] |= (MII_BMSR_AN_COMP | + MII_BMSR_LINK_ST); + } +} + +static void msf2_phy_reset(MSF2EmacState *s) +{ + memset(&s->phy_regs[0], 0, sizeof(s->phy_regs)); + s->phy_regs[MII_BMCR] = 0x1140; + s->phy_regs[MII_BMSR] = 0x7968; + s->phy_regs[MII_PHYID1] = 0x0022; + s->phy_regs[MII_PHYID2] = 0x1550; + s->phy_regs[MII_ANAR] = 0x01E1; + s->phy_regs[MII_ANLPAR] = 0xCDE1; + + msf2_phy_update_link(s); +} + +static void write_to_phy(MSF2EmacState *s) +{ + uint8_t reg_addr = s->regs[R_MII_ADDR] & R_MII_ADDR_REGADDR_MASK; + uint8_t phy_addr = (s->regs[R_MII_ADDR] >> R_MII_ADDR_PHYADDR_SHIFT) & + R_MII_ADDR_REGADDR_MASK; + uint16_t data = s->regs[R_MII_CTL] & 0xFFFF; + + if (phy_addr != PHYADDR) { + return; + } + + switch (reg_addr) { + case MII_BMCR: + if (data & MII_BMCR_RESET) { + /* Phy reset */ + msf2_phy_reset(s); + data &= ~MII_BMCR_RESET; + } + if (data & MII_BMCR_AUTOEN) { + /* Complete autonegotiation immediately */ + data &= ~MII_BMCR_AUTOEN; + s->phy_regs[MII_BMSR] |= MII_BMSR_AN_COMP; + } + break; + } + + s->phy_regs[reg_addr] = data; +} + +static uint16_t read_from_phy(MSF2EmacState *s) +{ + uint8_t reg_addr = s->regs[R_MII_ADDR] & R_MII_ADDR_REGADDR_MASK; + uint8_t phy_addr = (s->regs[R_MII_ADDR] >> R_MII_ADDR_PHYADDR_SHIFT) & + R_MII_ADDR_REGADDR_MASK; + + if (phy_addr == PHYADDR) { + return s->phy_regs[reg_addr]; + } else { + return 0xFFFF; + } +} + +static void msf2_emac_do_reset(MSF2EmacState *s) +{ + memset(&s->regs[0], 0, sizeof(s->regs)); + s->regs[R_CFG1] = 0x80000000; + s->regs[R_CFG2] = 0x00007000; + s->regs[R_IFG] = 0x40605060; + s->regs[R_HALF_DUPLEX] = 0x00A1F037; + s->regs[R_MAX_FRAME_LENGTH] = 0x00000600; + s->regs[R_FIFO_CFG5] = 0X3FFFF; + + msf2_phy_reset(s); +} + +static uint64_t emac_read(void *opaque, hwaddr addr, unsigned int size) +{ + MSF2EmacState *s = opaque; + uint32_t r = 0; + + addr >>= 2; + + switch (addr) { + case R_DMA_IRQ: + r = emac_get_isr(s); + break; + default: + if (addr >= ARRAY_SIZE(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, + addr * 4); + return r; + } + r = s->regs[addr]; + break; + } + return r; +} + +static void emac_write(void *opaque, hwaddr addr, uint64_t val64, + unsigned int size) +{ + MSF2EmacState *s = opaque; + uint32_t value = val64; + uint32_t enreqbits; + uint8_t pktcnt; + + addr >>= 2; + switch (addr) { + case R_DMA_TX_CTL: + s->regs[addr] = value; + if (value & R_DMA_TX_CTL_EN_MASK) { + msf2_dma_tx(s); + } + break; + case R_DMA_RX_CTL: + s->regs[addr] = value; + if (value & R_DMA_RX_CTL_EN_MASK) { + s->rx_desc = s->regs[R_DMA_RX_DESC]; + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + case R_CFG1: + s->regs[addr] = value; + if (value & R_CFG1_RESET_MASK) { + msf2_emac_do_reset(s); + } + break; + case R_FIFO_CFG0: + /* + * For our implementation, turning on modules is instantaneous, + * so the states requested via the *ENREQ bits appear in the + * *ENRPLY bits immediately. Also the reset bits to reset PE-MCXMAC + * module are not emulated here since it deals with start of frames, + * inter-packet gap and control frames. + */ + enreqbits = extract32(value, 8, 5); + s->regs[addr] = deposit32(value, 16, 5, enreqbits); + break; + case R_DMA_TX_DESC: + if (value & 0x3) { + qemu_log_mask(LOG_GUEST_ERROR, "Tx Descriptor address should be" + " 32 bit aligned\n"); + } + /* Ignore [1:0] bits */ + s->regs[addr] = value & ~3; + break; + case R_DMA_RX_DESC: + if (value & 0x3) { + qemu_log_mask(LOG_GUEST_ERROR, "Rx Descriptor address should be" + " 32 bit aligned\n"); + } + /* Ignore [1:0] bits */ + s->regs[addr] = value & ~3; + break; + case R_DMA_TX_STATUS: + if (value & R_DMA_TX_STATUS_UNDERRUN_MASK) { + s->regs[addr] &= ~R_DMA_TX_STATUS_UNDERRUN_MASK; + } + if (value & R_DMA_TX_STATUS_PKT_SENT_MASK) { + pktcnt = FIELD_EX32(s->regs[addr], DMA_TX_STATUS, PKTCNT); + pktcnt--; + s->regs[addr] = FIELD_DP32(s->regs[addr], DMA_TX_STATUS, + PKTCNT, pktcnt); + if (pktcnt == 0) { + s->regs[addr] &= ~R_DMA_TX_STATUS_PKT_SENT_MASK; + } + } + break; + case R_DMA_RX_STATUS: + if (value & R_DMA_RX_STATUS_OVERFLOW_MASK) { + s->regs[addr] &= ~R_DMA_RX_STATUS_OVERFLOW_MASK; + } + if (value & R_DMA_RX_STATUS_PKT_RCVD_MASK) { + pktcnt = FIELD_EX32(s->regs[addr], DMA_RX_STATUS, PKTCNT); + pktcnt--; + s->regs[addr] = FIELD_DP32(s->regs[addr], DMA_RX_STATUS, + PKTCNT, pktcnt); + if (pktcnt == 0) { + s->regs[addr] &= ~R_DMA_RX_STATUS_PKT_RCVD_MASK; + } + } + break; + case R_DMA_IRQ: + break; + case R_MII_CMD: + if (value & R_MII_CMD_READ_MASK) { + s->regs[R_MII_STS] = read_from_phy(s); + } + break; + case R_MII_CTL: + s->regs[addr] = value; + write_to_phy(s); + break; + case R_STA1: + s->regs[addr] = value; + /* + * R_STA1 [31:24] : octet 1 of mac address + * R_STA1 [23:16] : octet 2 of mac address + * R_STA1 [15:8] : octet 3 of mac address + * R_STA1 [7:0] : octet 4 of mac address + */ + stl_be_p(s->mac_addr, value); + break; + case R_STA2: + s->regs[addr] = value; + /* + * R_STA2 [31:24] : octet 5 of mac address + * R_STA2 [23:16] : octet 6 of mac address + */ + stw_be_p(s->mac_addr + 4, value >> 16); + break; + default: + if (addr >= ARRAY_SIZE(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, + addr * 4); + return; + } + s->regs[addr] = value; + break; + } + emac_update_irq(s); +} + +static const MemoryRegionOps emac_ops = { + .read = emac_read, + .write = emac_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static bool emac_can_rx(NetClientState *nc) +{ + MSF2EmacState *s = qemu_get_nic_opaque(nc); + + return (s->regs[R_CFG1] & R_CFG1_RX_EN_MASK) && + (s->regs[R_DMA_RX_CTL] & R_DMA_RX_CTL_EN_MASK); +} + +static bool addr_filter_ok(MSF2EmacState *s, const uint8_t *buf) +{ + /* The broadcast MAC address: FF:FF:FF:FF:FF:FF */ + const uint8_t broadcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF }; + bool bcast_en = true; + bool mcast_en = true; + + if (s->regs[R_FIFO_CFG5] & R_FIFO_CFG5_BCAST_MASK) { + bcast_en = true; /* Broadcast dont care for drop circuitry */ + } else if (s->regs[R_FIFO_CFG4] & R_FIFO_CFG4_BCAST_MASK) { + bcast_en = false; + } + + if (s->regs[R_FIFO_CFG5] & R_FIFO_CFG5_MCAST_MASK) { + mcast_en = true; /* Multicast dont care for drop circuitry */ + } else if (s->regs[R_FIFO_CFG4] & R_FIFO_CFG4_MCAST_MASK) { + mcast_en = false; + } + + if (!memcmp(buf, broadcast_addr, sizeof(broadcast_addr))) { + return bcast_en; + } + + if (buf[0] & 1) { + return mcast_en; + } + + return !memcmp(buf, s->mac_addr, sizeof(s->mac_addr)); +} + +static ssize_t emac_rx(NetClientState *nc, const uint8_t *buf, size_t size) +{ + MSF2EmacState *s = qemu_get_nic_opaque(nc); + EmacDesc d; + uint8_t pktcnt; + uint32_t status; + + if (size > (s->regs[R_MAX_FRAME_LENGTH] & 0xFFFF)) { + return size; + } + if (!addr_filter_ok(s, buf)) { + return size; + } + + emac_load_desc(s, &d, s->rx_desc); + + if (d.pktsize & EMPTY_MASK) { + address_space_write(&s->dma_as, d.pktaddr, MEMTXATTRS_UNSPECIFIED, + buf, size & PKT_SIZE); + d.pktsize = size & PKT_SIZE; + emac_store_desc(s, &d, s->rx_desc); + /* update received packets count */ + status = s->regs[R_DMA_RX_STATUS]; + pktcnt = FIELD_EX32(status, DMA_RX_STATUS, PKTCNT); + pktcnt++; + s->regs[R_DMA_RX_STATUS] = FIELD_DP32(status, DMA_RX_STATUS, + PKTCNT, pktcnt); + s->regs[R_DMA_RX_STATUS] |= R_DMA_RX_STATUS_PKT_RCVD_MASK; + s->rx_desc = d.next; + } else { + s->regs[R_DMA_RX_CTL] &= ~R_DMA_RX_CTL_EN_MASK; + s->regs[R_DMA_RX_STATUS] |= R_DMA_RX_STATUS_OVERFLOW_MASK; + } + emac_update_irq(s); + return size; +} + +static void msf2_emac_reset(DeviceState *dev) +{ + MSF2EmacState *s = MSS_EMAC(dev); + + msf2_emac_do_reset(s); +} + +static void emac_set_link(NetClientState *nc) +{ + MSF2EmacState *s = qemu_get_nic_opaque(nc); + + msf2_phy_update_link(s); +} + +static NetClientInfo net_msf2_emac_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = emac_can_rx, + .receive = emac_rx, + .link_status_changed = emac_set_link, +}; + +static void msf2_emac_realize(DeviceState *dev, Error **errp) +{ + MSF2EmacState *s = MSS_EMAC(dev); + + if (!s->dma_mr) { + error_setg(errp, "MSS_EMAC 'ahb-bus' link not set"); + return; + } + + address_space_init(&s->dma_as, s->dma_mr, "emac-ahb"); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_msf2_emac_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static void msf2_emac_init(Object *obj) +{ + MSF2EmacState *s = MSS_EMAC(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, &emac_ops, s, + "msf2-emac", R_MAX * 4); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static Property msf2_emac_properties[] = { + DEFINE_PROP_LINK("ahb-bus", MSF2EmacState, dma_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_NIC_PROPERTIES(MSF2EmacState, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_msf2_emac = { + .name = TYPE_MSS_EMAC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(mac_addr, MSF2EmacState, ETH_ALEN), + VMSTATE_UINT32(rx_desc, MSF2EmacState), + VMSTATE_UINT16_ARRAY(phy_regs, MSF2EmacState, PHY_MAX_REGS), + VMSTATE_UINT32_ARRAY(regs, MSF2EmacState, R_MAX), + VMSTATE_END_OF_LIST() + } +}; + +static void msf2_emac_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = msf2_emac_realize; + dc->reset = msf2_emac_reset; + dc->vmsd = &vmstate_msf2_emac; + device_class_set_props(dc, msf2_emac_properties); +} + +static const TypeInfo msf2_emac_info = { + .name = TYPE_MSS_EMAC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MSF2EmacState), + .instance_init = msf2_emac_init, + .class_init = msf2_emac_class_init, +}; + +static void msf2_emac_register_types(void) +{ + type_register_static(&msf2_emac_info); +} + +type_init(msf2_emac_register_types) diff --git a/hw/net/ne2000-isa.c b/hw/net/ne2000-isa.c new file mode 100644 index 000000000..dd6f6e34d --- /dev/null +++ b/hw/net/ne2000-isa.c @@ -0,0 +1,152 @@ +/* + * QEMU NE2000 emulation -- isa bus windup + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "hw/net/ne2000-isa.h" +#include "migration/vmstate.h" +#include "ne2000.h" +#include "sysemu/sysemu.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/module.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(ISANE2000State, ISA_NE2000) + +struct ISANE2000State { + ISADevice parent_obj; + + uint32_t iobase; + uint32_t isairq; + NE2000State ne2000; +}; + +static NetClientInfo net_ne2000_isa_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = ne2000_receive, +}; + +static const VMStateDescription vmstate_isa_ne2000 = { + .name = "ne2000", + .version_id = 2, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(ne2000, ISANE2000State, 0, vmstate_ne2000, NE2000State), + VMSTATE_END_OF_LIST() + } +}; + +static void isa_ne2000_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + ISANE2000State *isa = ISA_NE2000(dev); + NE2000State *s = &isa->ne2000; + + ne2000_setup_io(s, DEVICE(isadev), 0x20); + isa_register_ioport(isadev, &s->io, isa->iobase); + + isa_init_irq(isadev, &s->irq, isa->isairq); + + qemu_macaddr_default_if_unset(&s->c.macaddr); + ne2000_reset(s); + + s->nic = qemu_new_nic(&net_ne2000_isa_info, &s->c, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->c.macaddr.a); +} + +static Property ne2000_isa_properties[] = { + DEFINE_PROP_UINT32("iobase", ISANE2000State, iobase, 0x300), + DEFINE_PROP_UINT32("irq", ISANE2000State, isairq, 9), + DEFINE_NIC_PROPERTIES(ISANE2000State, ne2000.c), + DEFINE_PROP_END_OF_LIST(), +}; + +static void isa_ne2000_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = isa_ne2000_realizefn; + device_class_set_props(dc, ne2000_isa_properties); + dc->vmsd = &vmstate_isa_ne2000; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static void isa_ne2000_get_bootindex(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + ISANE2000State *isa = ISA_NE2000(obj); + NE2000State *s = &isa->ne2000; + + visit_type_int32(v, name, &s->c.bootindex, errp); +} + +static void isa_ne2000_set_bootindex(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + ISANE2000State *isa = ISA_NE2000(obj); + NE2000State *s = &isa->ne2000; + int32_t boot_index; + Error *local_err = NULL; + + if (!visit_type_int32(v, name, &boot_index, errp)) { + return; + } + /* check whether bootindex is present in fw_boot_order list */ + check_boot_index(boot_index, &local_err); + if (local_err) { + goto out; + } + /* change bootindex to a new one */ + s->c.bootindex = boot_index; + +out: + error_propagate(errp, local_err); +} + +static void isa_ne2000_instance_init(Object *obj) +{ + object_property_add(obj, "bootindex", "int32", + isa_ne2000_get_bootindex, + isa_ne2000_set_bootindex, NULL, NULL); + object_property_set_int(obj, "bootindex", -1, NULL); +} +static const TypeInfo ne2000_isa_info = { + .name = TYPE_ISA_NE2000, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISANE2000State), + .class_init = isa_ne2000_class_initfn, + .instance_init = isa_ne2000_instance_init, +}; + +static void ne2000_isa_register_types(void) +{ + type_register_static(&ne2000_isa_info); +} + +type_init(ne2000_isa_register_types) diff --git a/hw/net/ne2000-pci.c b/hw/net/ne2000-pci.c new file mode 100644 index 000000000..9e5d10859 --- /dev/null +++ b/hw/net/ne2000-pci.c @@ -0,0 +1,136 @@ +/* + * QEMU NE2000 emulation (PCI bus) + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "ne2000.h" +#include "sysemu/sysemu.h" + +typedef struct PCINE2000State { + PCIDevice dev; + NE2000State ne2000; +} PCINE2000State; + +static const VMStateDescription vmstate_pci_ne2000 = { + .name = "ne2000", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PCINE2000State), + VMSTATE_STRUCT(ne2000, PCINE2000State, 0, vmstate_ne2000, NE2000State), + VMSTATE_END_OF_LIST() + } +}; + +static NetClientInfo net_ne2000_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = ne2000_receive, +}; + +static void pci_ne2000_realize(PCIDevice *pci_dev, Error **errp) +{ + PCINE2000State *d = DO_UPCAST(PCINE2000State, dev, pci_dev); + NE2000State *s; + uint8_t *pci_conf; + + pci_conf = d->dev.config; + pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ + + s = &d->ne2000; + ne2000_setup_io(s, DEVICE(pci_dev), 0x100); + pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io); + s->irq = pci_allocate_irq(&d->dev); + + qemu_macaddr_default_if_unset(&s->c.macaddr); + ne2000_reset(s); + + s->nic = qemu_new_nic(&net_ne2000_info, &s->c, + object_get_typename(OBJECT(pci_dev)), + pci_dev->qdev.id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->c.macaddr.a); +} + +static void pci_ne2000_exit(PCIDevice *pci_dev) +{ + PCINE2000State *d = DO_UPCAST(PCINE2000State, dev, pci_dev); + NE2000State *s = &d->ne2000; + + qemu_del_nic(s->nic); + qemu_free_irq(s->irq); +} + +static void ne2000_instance_init(Object *obj) +{ + PCIDevice *pci_dev = PCI_DEVICE(obj); + PCINE2000State *d = DO_UPCAST(PCINE2000State, dev, pci_dev); + NE2000State *s = &d->ne2000; + + device_add_bootindex_property(obj, &s->c.bootindex, + "bootindex", "/ethernet-phy@0", + &pci_dev->qdev); +} + +static Property ne2000_properties[] = { + DEFINE_NIC_PROPERTIES(PCINE2000State, ne2000.c), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ne2000_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_ne2000_realize; + k->exit = pci_ne2000_exit; + k->romfile = "efi-ne2k_pci.rom", + k->vendor_id = PCI_VENDOR_ID_REALTEK; + k->device_id = PCI_DEVICE_ID_REALTEK_8029; + k->class_id = PCI_CLASS_NETWORK_ETHERNET; + dc->vmsd = &vmstate_pci_ne2000; + device_class_set_props(dc, ne2000_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static const TypeInfo ne2000_info = { + .name = "ne2k_pci", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCINE2000State), + .class_init = ne2000_class_init, + .instance_init = ne2000_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void ne2000_register_types(void) +{ + type_register_static(&ne2000_info); +} + +type_init(ne2000_register_types) diff --git a/hw/net/ne2000.c b/hw/net/ne2000.c new file mode 100644 index 000000000..6c17ee1ae --- /dev/null +++ b/hw/net/ne2000.c @@ -0,0 +1,700 @@ +/* + * QEMU NE2000 emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "net/eth.h" +#include "qemu/module.h" +#include "exec/memory.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "ne2000.h" +#include "trace.h" + +/* debug NE2000 card */ +//#define DEBUG_NE2000 + +#define MAX_ETH_FRAME_SIZE 1514 + +#define E8390_CMD 0x00 /* The command register (for all pages) */ +/* Page 0 register offsets. */ +#define EN0_CLDALO 0x01 /* Low byte of current local dma addr RD */ +#define EN0_STARTPG 0x01 /* Starting page of ring bfr WR */ +#define EN0_CLDAHI 0x02 /* High byte of current local dma addr RD */ +#define EN0_STOPPG 0x02 /* Ending page +1 of ring bfr WR */ +#define EN0_BOUNDARY 0x03 /* Boundary page of ring bfr RD WR */ +#define EN0_TSR 0x04 /* Transmit status reg RD */ +#define EN0_TPSR 0x04 /* Transmit starting page WR */ +#define EN0_NCR 0x05 /* Number of collision reg RD */ +#define EN0_TCNTLO 0x05 /* Low byte of tx byte count WR */ +#define EN0_FIFO 0x06 /* FIFO RD */ +#define EN0_TCNTHI 0x06 /* High byte of tx byte count WR */ +#define EN0_ISR 0x07 /* Interrupt status reg RD WR */ +#define EN0_CRDALO 0x08 /* low byte of current remote dma address RD */ +#define EN0_RSARLO 0x08 /* Remote start address reg 0 */ +#define EN0_CRDAHI 0x09 /* high byte, current remote dma address RD */ +#define EN0_RSARHI 0x09 /* Remote start address reg 1 */ +#define EN0_RCNTLO 0x0a /* Remote byte count reg WR */ +#define EN0_RTL8029ID0 0x0a /* Realtek ID byte #1 RD */ +#define EN0_RCNTHI 0x0b /* Remote byte count reg WR */ +#define EN0_RTL8029ID1 0x0b /* Realtek ID byte #2 RD */ +#define EN0_RSR 0x0c /* rx status reg RD */ +#define EN0_RXCR 0x0c /* RX configuration reg WR */ +#define EN0_TXCR 0x0d /* TX configuration reg WR */ +#define EN0_COUNTER0 0x0d /* Rcv alignment error counter RD */ +#define EN0_DCFG 0x0e /* Data configuration reg WR */ +#define EN0_COUNTER1 0x0e /* Rcv CRC error counter RD */ +#define EN0_IMR 0x0f /* Interrupt mask reg WR */ +#define EN0_COUNTER2 0x0f /* Rcv missed frame error counter RD */ + +#define EN1_PHYS 0x11 +#define EN1_CURPAG 0x17 +#define EN1_MULT 0x18 + +#define EN2_STARTPG 0x21 /* Starting page of ring bfr RD */ +#define EN2_STOPPG 0x22 /* Ending page +1 of ring bfr RD */ + +#define EN3_CONFIG0 0x33 +#define EN3_CONFIG1 0x34 +#define EN3_CONFIG2 0x35 +#define EN3_CONFIG3 0x36 + +/* Register accessed at EN_CMD, the 8390 base addr. */ +#define E8390_STOP 0x01 /* Stop and reset the chip */ +#define E8390_START 0x02 /* Start the chip, clear reset */ +#define E8390_TRANS 0x04 /* Transmit a frame */ +#define E8390_RREAD 0x08 /* Remote read */ +#define E8390_RWRITE 0x10 /* Remote write */ +#define E8390_NODMA 0x20 /* Remote DMA */ +#define E8390_PAGE0 0x00 /* Select page chip registers */ +#define E8390_PAGE1 0x40 /* using the two high-order bits */ +#define E8390_PAGE2 0x80 /* Page 3 is invalid. */ + +/* Bits in EN0_ISR - Interrupt status register */ +#define ENISR_RX 0x01 /* Receiver, no error */ +#define ENISR_TX 0x02 /* Transmitter, no error */ +#define ENISR_RX_ERR 0x04 /* Receiver, with error */ +#define ENISR_TX_ERR 0x08 /* Transmitter, with error */ +#define ENISR_OVER 0x10 /* Receiver overwrote the ring */ +#define ENISR_COUNTERS 0x20 /* Counters need emptying */ +#define ENISR_RDC 0x40 /* remote dma complete */ +#define ENISR_RESET 0x80 /* Reset completed */ +#define ENISR_ALL 0x3f /* Interrupts we will enable */ + +/* Bits in received packet status byte and EN0_RSR*/ +#define ENRSR_RXOK 0x01 /* Received a good packet */ +#define ENRSR_CRC 0x02 /* CRC error */ +#define ENRSR_FAE 0x04 /* frame alignment error */ +#define ENRSR_FO 0x08 /* FIFO overrun */ +#define ENRSR_MPA 0x10 /* missed pkt */ +#define ENRSR_PHY 0x20 /* physical/multicast address */ +#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */ +#define ENRSR_DEF 0x80 /* deferring */ + +/* Transmitted packet status, EN0_TSR. */ +#define ENTSR_PTX 0x01 /* Packet transmitted without error */ +#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */ +#define ENTSR_COL 0x04 /* The transmit collided at least once. */ +#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */ +#define ENTSR_CRS 0x10 /* The carrier sense was lost. */ +#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */ +#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */ +#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */ + +void ne2000_reset(NE2000State *s) +{ + int i; + + s->isr = ENISR_RESET; + memcpy(s->mem, &s->c.macaddr, 6); + s->mem[14] = 0x57; + s->mem[15] = 0x57; + + /* duplicate prom data */ + for(i = 15;i >= 0; i--) { + s->mem[2 * i] = s->mem[i]; + s->mem[2 * i + 1] = s->mem[i]; + } +} + +static void ne2000_update_irq(NE2000State *s) +{ + int isr; + isr = (s->isr & s->imr) & 0x7f; +#if defined(DEBUG_NE2000) + printf("NE2000: Set IRQ to %d (%02x %02x)\n", + isr ? 1 : 0, s->isr, s->imr); +#endif + qemu_set_irq(s->irq, (isr != 0)); +} + +static int ne2000_buffer_full(NE2000State *s) +{ + int avail, index, boundary; + + if (s->stop <= s->start) { + return 1; + } + + index = s->curpag << 8; + boundary = s->boundary << 8; + if (index < boundary) + avail = boundary - index; + else + avail = (s->stop - s->start) - (index - boundary); + if (avail < (MAX_ETH_FRAME_SIZE + 4)) + return 1; + return 0; +} + +#define MIN_BUF_SIZE 60 + +ssize_t ne2000_receive(NetClientState *nc, const uint8_t *buf, size_t size_) +{ + NE2000State *s = qemu_get_nic_opaque(nc); + size_t size = size_; + uint8_t *p; + unsigned int total_len, next, avail, len, index, mcast_idx; + uint8_t buf1[60]; + static const uint8_t broadcast_macaddr[6] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + +#if defined(DEBUG_NE2000) + printf("NE2000: received len=%zu\n", size); +#endif + + if (s->cmd & E8390_STOP || ne2000_buffer_full(s)) + return -1; + + /* XXX: check this */ + if (s->rxcr & 0x10) { + /* promiscuous: receive all */ + } else { + if (!memcmp(buf, broadcast_macaddr, 6)) { + /* broadcast address */ + if (!(s->rxcr & 0x04)) + return size; + } else if (buf[0] & 0x01) { + /* multicast */ + if (!(s->rxcr & 0x08)) + return size; + mcast_idx = net_crc32(buf, ETH_ALEN) >> 26; + if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7)))) + return size; + } else if (s->mem[0] == buf[0] && + s->mem[2] == buf[1] && + s->mem[4] == buf[2] && + s->mem[6] == buf[3] && + s->mem[8] == buf[4] && + s->mem[10] == buf[5]) { + /* match */ + } else { + return size; + } + } + + + /* if too small buffer, then expand it */ + if (size < MIN_BUF_SIZE) { + memcpy(buf1, buf, size); + memset(buf1 + size, 0, MIN_BUF_SIZE - size); + buf = buf1; + size = MIN_BUF_SIZE; + } + + index = s->curpag << 8; + if (index >= NE2000_PMEM_END) { + index = s->start; + } + /* 4 bytes for header */ + total_len = size + 4; + /* address for next packet (4 bytes for CRC) */ + next = index + ((total_len + 4 + 255) & ~0xff); + if (next >= s->stop) + next -= (s->stop - s->start); + /* prepare packet header */ + p = s->mem + index; + s->rsr = ENRSR_RXOK; /* receive status */ + /* XXX: check this */ + if (buf[0] & 0x01) + s->rsr |= ENRSR_PHY; + p[0] = s->rsr; + p[1] = next >> 8; + p[2] = total_len; + p[3] = total_len >> 8; + index += 4; + + /* write packet data */ + while (size > 0) { + if (index <= s->stop) + avail = s->stop - index; + else + break; + len = size; + if (len > avail) + len = avail; + memcpy(s->mem + index, buf, len); + buf += len; + index += len; + if (index == s->stop) + index = s->start; + size -= len; + } + s->curpag = next >> 8; + + /* now we can signal we have received something */ + s->isr |= ENISR_RX; + ne2000_update_irq(s); + + return size_; +} + +static void ne2000_ioport_write(void *opaque, uint32_t addr, uint32_t val) +{ + NE2000State *s = opaque; + int offset, page, index; + + addr &= 0xf; + trace_ne2000_ioport_write(addr, val); + if (addr == E8390_CMD) { + /* control register */ + s->cmd = val; + if (!(val & E8390_STOP)) { /* START bit makes no sense on RTL8029... */ + s->isr &= ~ENISR_RESET; + /* test specific case: zero length transfer */ + if ((val & (E8390_RREAD | E8390_RWRITE)) && + s->rcnt == 0) { + s->isr |= ENISR_RDC; + ne2000_update_irq(s); + } + if (val & E8390_TRANS) { + index = (s->tpsr << 8); + /* XXX: next 2 lines are a hack to make netware 3.11 work */ + if (index >= NE2000_PMEM_END) + index -= NE2000_PMEM_SIZE; + /* fail safe: check range on the transmitted length */ + if (index + s->tcnt <= NE2000_PMEM_END) { + qemu_send_packet(qemu_get_queue(s->nic), s->mem + index, + s->tcnt); + } + /* signal end of transfer */ + s->tsr = ENTSR_PTX; + s->isr |= ENISR_TX; + s->cmd &= ~E8390_TRANS; + ne2000_update_irq(s); + } + } + } else { + page = s->cmd >> 6; + offset = addr | (page << 4); + switch(offset) { + case EN0_STARTPG: + if (val << 8 <= NE2000_PMEM_END) { + s->start = val << 8; + } + break; + case EN0_STOPPG: + if (val << 8 <= NE2000_PMEM_END) { + s->stop = val << 8; + } + break; + case EN0_BOUNDARY: + if (val << 8 < NE2000_PMEM_END) { + s->boundary = val; + } + break; + case EN0_IMR: + s->imr = val; + ne2000_update_irq(s); + break; + case EN0_TPSR: + s->tpsr = val; + break; + case EN0_TCNTLO: + s->tcnt = (s->tcnt & 0xff00) | val; + break; + case EN0_TCNTHI: + s->tcnt = (s->tcnt & 0x00ff) | (val << 8); + break; + case EN0_RSARLO: + s->rsar = (s->rsar & 0xff00) | val; + break; + case EN0_RSARHI: + s->rsar = (s->rsar & 0x00ff) | (val << 8); + break; + case EN0_RCNTLO: + s->rcnt = (s->rcnt & 0xff00) | val; + break; + case EN0_RCNTHI: + s->rcnt = (s->rcnt & 0x00ff) | (val << 8); + break; + case EN0_RXCR: + s->rxcr = val; + break; + case EN0_DCFG: + s->dcfg = val; + break; + case EN0_ISR: + s->isr &= ~(val & 0x7f); + ne2000_update_irq(s); + break; + case EN1_PHYS ... EN1_PHYS + 5: + s->phys[offset - EN1_PHYS] = val; + break; + case EN1_CURPAG: + if (val << 8 < NE2000_PMEM_END) { + s->curpag = val; + } + break; + case EN1_MULT ... EN1_MULT + 7: + s->mult[offset - EN1_MULT] = val; + break; + } + } +} + +static uint32_t ne2000_ioport_read(void *opaque, uint32_t addr) +{ + NE2000State *s = opaque; + int offset, page, ret; + + addr &= 0xf; + if (addr == E8390_CMD) { + ret = s->cmd; + } else { + page = s->cmd >> 6; + offset = addr | (page << 4); + switch(offset) { + case EN0_TSR: + ret = s->tsr; + break; + case EN0_BOUNDARY: + ret = s->boundary; + break; + case EN0_ISR: + ret = s->isr; + break; + case EN0_RSARLO: + ret = s->rsar & 0x00ff; + break; + case EN0_RSARHI: + ret = s->rsar >> 8; + break; + case EN1_PHYS ... EN1_PHYS + 5: + ret = s->phys[offset - EN1_PHYS]; + break; + case EN1_CURPAG: + ret = s->curpag; + break; + case EN1_MULT ... EN1_MULT + 7: + ret = s->mult[offset - EN1_MULT]; + break; + case EN0_RSR: + ret = s->rsr; + break; + case EN2_STARTPG: + ret = s->start >> 8; + break; + case EN2_STOPPG: + ret = s->stop >> 8; + break; + case EN0_RTL8029ID0: + ret = 0x50; + break; + case EN0_RTL8029ID1: + ret = 0x43; + break; + case EN3_CONFIG0: + ret = 0; /* 10baseT media */ + break; + case EN3_CONFIG2: + ret = 0x40; /* 10baseT active */ + break; + case EN3_CONFIG3: + ret = 0x40; /* Full duplex */ + break; + default: + ret = 0x00; + break; + } + } + trace_ne2000_ioport_read(addr, ret); + return ret; +} + +static inline void ne2000_mem_writeb(NE2000State *s, uint32_t addr, + uint32_t val) +{ + if (addr < 32 || + (addr >= NE2000_PMEM_START && addr < NE2000_MEM_SIZE)) { + s->mem[addr] = val; + } +} + +static inline void ne2000_mem_writew(NE2000State *s, uint32_t addr, + uint32_t val) +{ + addr &= ~1; /* XXX: check exact behaviour if not even */ + if (addr < 32 || + (addr >= NE2000_PMEM_START && addr < NE2000_MEM_SIZE)) { + *(uint16_t *)(s->mem + addr) = cpu_to_le16(val); + } +} + +static inline void ne2000_mem_writel(NE2000State *s, uint32_t addr, + uint32_t val) +{ + addr &= ~1; /* XXX: check exact behaviour if not even */ + if (addr < 32 + || (addr >= NE2000_PMEM_START + && addr + sizeof(uint32_t) <= NE2000_MEM_SIZE)) { + stl_le_p(s->mem + addr, val); + } +} + +static inline uint32_t ne2000_mem_readb(NE2000State *s, uint32_t addr) +{ + if (addr < 32 || + (addr >= NE2000_PMEM_START && addr < NE2000_MEM_SIZE)) { + return s->mem[addr]; + } else { + return 0xff; + } +} + +static inline uint32_t ne2000_mem_readw(NE2000State *s, uint32_t addr) +{ + addr &= ~1; /* XXX: check exact behaviour if not even */ + if (addr < 32 || + (addr >= NE2000_PMEM_START && addr < NE2000_MEM_SIZE)) { + return le16_to_cpu(*(uint16_t *)(s->mem + addr)); + } else { + return 0xffff; + } +} + +static inline uint32_t ne2000_mem_readl(NE2000State *s, uint32_t addr) +{ + addr &= ~1; /* XXX: check exact behaviour if not even */ + if (addr < 32 + || (addr >= NE2000_PMEM_START + && addr + sizeof(uint32_t) <= NE2000_MEM_SIZE)) { + return ldl_le_p(s->mem + addr); + } else { + return 0xffffffff; + } +} + +static inline void ne2000_dma_update(NE2000State *s, int len) +{ + s->rsar += len; + /* wrap */ + /* XXX: check what to do if rsar > stop */ + if (s->rsar == s->stop) + s->rsar = s->start; + + if (s->rcnt <= len) { + s->rcnt = 0; + /* signal end of transfer */ + s->isr |= ENISR_RDC; + ne2000_update_irq(s); + } else { + s->rcnt -= len; + } +} + +static void ne2000_asic_ioport_write(void *opaque, uint32_t addr, uint32_t val) +{ + NE2000State *s = opaque; + +#ifdef DEBUG_NE2000 + printf("NE2000: asic write val=0x%04x\n", val); +#endif + if (s->rcnt == 0) + return; + if (s->dcfg & 0x01) { + /* 16 bit access */ + ne2000_mem_writew(s, s->rsar, val); + ne2000_dma_update(s, 2); + } else { + /* 8 bit access */ + ne2000_mem_writeb(s, s->rsar, val); + ne2000_dma_update(s, 1); + } +} + +static uint32_t ne2000_asic_ioport_read(void *opaque, uint32_t addr) +{ + NE2000State *s = opaque; + int ret; + + if (s->dcfg & 0x01) { + /* 16 bit access */ + ret = ne2000_mem_readw(s, s->rsar); + ne2000_dma_update(s, 2); + } else { + /* 8 bit access */ + ret = ne2000_mem_readb(s, s->rsar); + ne2000_dma_update(s, 1); + } +#ifdef DEBUG_NE2000 + printf("NE2000: asic read val=0x%04x\n", ret); +#endif + return ret; +} + +static void ne2000_asic_ioport_writel(void *opaque, uint32_t addr, uint32_t val) +{ + NE2000State *s = opaque; + +#ifdef DEBUG_NE2000 + printf("NE2000: asic writel val=0x%04x\n", val); +#endif + if (s->rcnt == 0) + return; + /* 32 bit access */ + ne2000_mem_writel(s, s->rsar, val); + ne2000_dma_update(s, 4); +} + +static uint32_t ne2000_asic_ioport_readl(void *opaque, uint32_t addr) +{ + NE2000State *s = opaque; + int ret; + + /* 32 bit access */ + ret = ne2000_mem_readl(s, s->rsar); + ne2000_dma_update(s, 4); +#ifdef DEBUG_NE2000 + printf("NE2000: asic readl val=0x%04x\n", ret); +#endif + return ret; +} + +static void ne2000_reset_ioport_write(void *opaque, uint32_t addr, uint32_t val) +{ + /* nothing to do (end of reset pulse) */ +} + +static uint32_t ne2000_reset_ioport_read(void *opaque, uint32_t addr) +{ + NE2000State *s = opaque; + ne2000_reset(s); + return 0; +} + +static int ne2000_post_load(void* opaque, int version_id) +{ + NE2000State* s = opaque; + + if (version_id < 2) { + s->rxcr = 0x0c; + } + return 0; +} + +const VMStateDescription vmstate_ne2000 = { + .name = "ne2000", + .version_id = 2, + .minimum_version_id = 0, + .post_load = ne2000_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8_V(rxcr, NE2000State, 2), + VMSTATE_UINT8(cmd, NE2000State), + VMSTATE_UINT32(start, NE2000State), + VMSTATE_UINT32(stop, NE2000State), + VMSTATE_UINT8(boundary, NE2000State), + VMSTATE_UINT8(tsr, NE2000State), + VMSTATE_UINT8(tpsr, NE2000State), + VMSTATE_UINT16(tcnt, NE2000State), + VMSTATE_UINT16(rcnt, NE2000State), + VMSTATE_UINT32(rsar, NE2000State), + VMSTATE_UINT8(rsr, NE2000State), + VMSTATE_UINT8(isr, NE2000State), + VMSTATE_UINT8(dcfg, NE2000State), + VMSTATE_UINT8(imr, NE2000State), + VMSTATE_BUFFER(phys, NE2000State), + VMSTATE_UINT8(curpag, NE2000State), + VMSTATE_BUFFER(mult, NE2000State), + VMSTATE_UNUSED(4), /* was irq */ + VMSTATE_BUFFER(mem, NE2000State), + VMSTATE_END_OF_LIST() + } +}; + +static uint64_t ne2000_read(void *opaque, hwaddr addr, + unsigned size) +{ + NE2000State *s = opaque; + uint64_t val; + + if (addr < 0x10 && size == 1) { + val = ne2000_ioport_read(s, addr); + } else if (addr == 0x10) { + if (size <= 2) { + val = ne2000_asic_ioport_read(s, addr); + } else { + val = ne2000_asic_ioport_readl(s, addr); + } + } else if (addr == 0x1f && size == 1) { + val = ne2000_reset_ioport_read(s, addr); + } else { + val = ((uint64_t)1 << (size * 8)) - 1; + } + trace_ne2000_read(addr, val); + + return val; +} + +static void ne2000_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + NE2000State *s = opaque; + + trace_ne2000_write(addr, data); + if (addr < 0x10 && size == 1) { + ne2000_ioport_write(s, addr, data); + } else if (addr == 0x10) { + if (size <= 2) { + ne2000_asic_ioport_write(s, addr, data); + } else { + ne2000_asic_ioport_writel(s, addr, data); + } + } else if (addr == 0x1f && size == 1) { + ne2000_reset_ioport_write(s, addr, data); + } +} + +static const MemoryRegionOps ne2000_ops = { + .read = ne2000_read, + .write = ne2000_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/***********************************************************/ +/* PCI NE2000 definitions */ + +void ne2000_setup_io(NE2000State *s, DeviceState *dev, unsigned size) +{ + memory_region_init_io(&s->io, OBJECT(dev), &ne2000_ops, s, "ne2000", size); +} diff --git a/hw/net/ne2000.h b/hw/net/ne2000.h new file mode 100644 index 000000000..fc7584370 --- /dev/null +++ b/hw/net/ne2000.h @@ -0,0 +1,42 @@ +#ifndef HW_NE2000_H +#define HW_NE2000_H + +#include "qemu/units.h" +#include "net/net.h" + +#define NE2000_PMEM_SIZE (32 * KiB) +#define NE2000_PMEM_START (16 * KiB) +#define NE2000_PMEM_END (NE2000_PMEM_SIZE+NE2000_PMEM_START) +#define NE2000_MEM_SIZE NE2000_PMEM_END + +typedef struct NE2000State { + MemoryRegion io; + uint8_t cmd; + uint32_t start; + uint32_t stop; + uint8_t boundary; + uint8_t tsr; + uint8_t tpsr; + uint16_t tcnt; + uint16_t rcnt; + uint32_t rsar; + uint8_t rsr; + uint8_t rxcr; + uint8_t isr; + uint8_t dcfg; + uint8_t imr; + uint8_t phys[6]; /* mac address */ + uint8_t curpag; + uint8_t mult[8]; /* multicast mask array */ + qemu_irq irq; + NICState *nic; + NICConf c; + uint8_t mem[NE2000_MEM_SIZE]; +} NE2000State; + +void ne2000_setup_io(NE2000State *s, DeviceState *dev, unsigned size); +extern const VMStateDescription vmstate_ne2000; +void ne2000_reset(NE2000State *s); +ssize_t ne2000_receive(NetClientState *nc, const uint8_t *buf, size_t size_); + +#endif diff --git a/hw/net/net_rx_pkt.c b/hw/net/net_rx_pkt.c new file mode 100644 index 000000000..1e1c504e4 --- /dev/null +++ b/hw/net/net_rx_pkt.c @@ -0,0 +1,643 @@ +/* + * QEMU RX packets abstractions + * + * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Tamir Shomer + * Yan Vugenfirer + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "trace.h" +#include "net_rx_pkt.h" +#include "net/checksum.h" +#include "net/tap.h" + +struct NetRxPkt { + struct virtio_net_hdr virt_hdr; + uint8_t ehdr_buf[sizeof(struct eth_header) + sizeof(struct vlan_header)]; + struct iovec *vec; + uint16_t vec_len_total; + uint16_t vec_len; + uint32_t tot_len; + uint16_t tci; + size_t ehdr_buf_len; + bool has_virt_hdr; + eth_pkt_types_e packet_type; + + /* Analysis results */ + bool isip4; + bool isip6; + bool isudp; + bool istcp; + + size_t l3hdr_off; + size_t l4hdr_off; + size_t l5hdr_off; + + eth_ip6_hdr_info ip6hdr_info; + eth_ip4_hdr_info ip4hdr_info; + eth_l4_hdr_info l4hdr_info; +}; + +void net_rx_pkt_init(struct NetRxPkt **pkt, bool has_virt_hdr) +{ + struct NetRxPkt *p = g_malloc0(sizeof *p); + p->has_virt_hdr = has_virt_hdr; + p->vec = NULL; + p->vec_len_total = 0; + *pkt = p; +} + +void net_rx_pkt_uninit(struct NetRxPkt *pkt) +{ + if (pkt->vec_len_total != 0) { + g_free(pkt->vec); + } + + g_free(pkt); +} + +struct virtio_net_hdr *net_rx_pkt_get_vhdr(struct NetRxPkt *pkt) +{ + assert(pkt); + return &pkt->virt_hdr; +} + +static inline void +net_rx_pkt_iovec_realloc(struct NetRxPkt *pkt, + int new_iov_len) +{ + if (pkt->vec_len_total < new_iov_len) { + g_free(pkt->vec); + pkt->vec = g_malloc(sizeof(*pkt->vec) * new_iov_len); + pkt->vec_len_total = new_iov_len; + } +} + +static void +net_rx_pkt_pull_data(struct NetRxPkt *pkt, + const struct iovec *iov, int iovcnt, + size_t ploff) +{ + uint32_t pllen = iov_size(iov, iovcnt) - ploff; + + if (pkt->ehdr_buf_len) { + net_rx_pkt_iovec_realloc(pkt, iovcnt + 1); + + pkt->vec[0].iov_base = pkt->ehdr_buf; + pkt->vec[0].iov_len = pkt->ehdr_buf_len; + + pkt->tot_len = pllen + pkt->ehdr_buf_len; + pkt->vec_len = iov_copy(pkt->vec + 1, pkt->vec_len_total - 1, + iov, iovcnt, ploff, pllen) + 1; + } else { + net_rx_pkt_iovec_realloc(pkt, iovcnt); + + pkt->tot_len = pllen; + pkt->vec_len = iov_copy(pkt->vec, pkt->vec_len_total, + iov, iovcnt, ploff, pkt->tot_len); + } + + eth_get_protocols(pkt->vec, pkt->vec_len, &pkt->isip4, &pkt->isip6, + &pkt->isudp, &pkt->istcp, + &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off, + &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info); + + trace_net_rx_pkt_parsed(pkt->isip4, pkt->isip6, pkt->isudp, pkt->istcp, + pkt->l3hdr_off, pkt->l4hdr_off, pkt->l5hdr_off); +} + +void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, + const struct iovec *iov, int iovcnt, + size_t iovoff, bool strip_vlan) +{ + uint16_t tci = 0; + uint16_t ploff = iovoff; + assert(pkt); + + if (strip_vlan) { + pkt->ehdr_buf_len = eth_strip_vlan(iov, iovcnt, iovoff, pkt->ehdr_buf, + &ploff, &tci); + } else { + pkt->ehdr_buf_len = 0; + } + + pkt->tci = tci; + + net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff); +} + +void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt, + const struct iovec *iov, int iovcnt, + size_t iovoff, bool strip_vlan, + uint16_t vet) +{ + uint16_t tci = 0; + uint16_t ploff = iovoff; + assert(pkt); + + if (strip_vlan) { + pkt->ehdr_buf_len = eth_strip_vlan_ex(iov, iovcnt, iovoff, vet, + pkt->ehdr_buf, + &ploff, &tci); + } else { + pkt->ehdr_buf_len = 0; + } + + pkt->tci = tci; + + net_rx_pkt_pull_data(pkt, iov, iovcnt, ploff); +} + +void net_rx_pkt_dump(struct NetRxPkt *pkt) +{ +#ifdef NET_RX_PKT_DEBUG + assert(pkt); + + printf("RX PKT: tot_len: %d, ehdr_buf_len: %lu, vlan_tag: %d\n", + pkt->tot_len, pkt->ehdr_buf_len, pkt->tci); +#endif +} + +void net_rx_pkt_set_packet_type(struct NetRxPkt *pkt, + eth_pkt_types_e packet_type) +{ + assert(pkt); + + pkt->packet_type = packet_type; + +} + +eth_pkt_types_e net_rx_pkt_get_packet_type(struct NetRxPkt *pkt) +{ + assert(pkt); + + return pkt->packet_type; +} + +size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt) +{ + assert(pkt); + + return pkt->tot_len; +} + +void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, const void *data, + size_t len) +{ + const struct iovec iov = { + .iov_base = (void *)data, + .iov_len = len + }; + + assert(pkt); + + eth_get_protocols(&iov, 1, &pkt->isip4, &pkt->isip6, + &pkt->isudp, &pkt->istcp, + &pkt->l3hdr_off, &pkt->l4hdr_off, &pkt->l5hdr_off, + &pkt->ip6hdr_info, &pkt->ip4hdr_info, &pkt->l4hdr_info); +} + +void net_rx_pkt_get_protocols(struct NetRxPkt *pkt, + bool *isip4, bool *isip6, + bool *isudp, bool *istcp) +{ + assert(pkt); + + *isip4 = pkt->isip4; + *isip6 = pkt->isip6; + *isudp = pkt->isudp; + *istcp = pkt->istcp; +} + +size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt) +{ + assert(pkt); + return pkt->l3hdr_off; +} + +size_t net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt *pkt) +{ + assert(pkt); + return pkt->l4hdr_off; +} + +size_t net_rx_pkt_get_l5_hdr_offset(struct NetRxPkt *pkt) +{ + assert(pkt); + return pkt->l5hdr_off; +} + +eth_ip6_hdr_info *net_rx_pkt_get_ip6_info(struct NetRxPkt *pkt) +{ + return &pkt->ip6hdr_info; +} + +eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt) +{ + return &pkt->ip4hdr_info; +} + +eth_l4_hdr_info *net_rx_pkt_get_l4_info(struct NetRxPkt *pkt) +{ + return &pkt->l4hdr_info; +} + +static inline void +_net_rx_rss_add_chunk(uint8_t *rss_input, size_t *bytes_written, + void *ptr, size_t size) +{ + memcpy(&rss_input[*bytes_written], ptr, size); + trace_net_rx_pkt_rss_add_chunk(ptr, size, *bytes_written); + *bytes_written += size; +} + +static inline void +_net_rx_rss_prepare_ip4(uint8_t *rss_input, + struct NetRxPkt *pkt, + size_t *bytes_written) +{ + struct ip_header *ip4_hdr = &pkt->ip4hdr_info.ip4_hdr; + + _net_rx_rss_add_chunk(rss_input, bytes_written, + &ip4_hdr->ip_src, sizeof(uint32_t)); + + _net_rx_rss_add_chunk(rss_input, bytes_written, + &ip4_hdr->ip_dst, sizeof(uint32_t)); +} + +static inline void +_net_rx_rss_prepare_ip6(uint8_t *rss_input, + struct NetRxPkt *pkt, + bool ipv6ex, size_t *bytes_written) +{ + eth_ip6_hdr_info *ip6info = &pkt->ip6hdr_info; + + _net_rx_rss_add_chunk(rss_input, bytes_written, + (ipv6ex && ip6info->rss_ex_src_valid) ? &ip6info->rss_ex_src + : &ip6info->ip6_hdr.ip6_src, + sizeof(struct in6_address)); + + _net_rx_rss_add_chunk(rss_input, bytes_written, + (ipv6ex && ip6info->rss_ex_dst_valid) ? &ip6info->rss_ex_dst + : &ip6info->ip6_hdr.ip6_dst, + sizeof(struct in6_address)); +} + +static inline void +_net_rx_rss_prepare_tcp(uint8_t *rss_input, + struct NetRxPkt *pkt, + size_t *bytes_written) +{ + struct tcp_header *tcphdr = &pkt->l4hdr_info.hdr.tcp; + + _net_rx_rss_add_chunk(rss_input, bytes_written, + &tcphdr->th_sport, sizeof(uint16_t)); + + _net_rx_rss_add_chunk(rss_input, bytes_written, + &tcphdr->th_dport, sizeof(uint16_t)); +} + +static inline void +_net_rx_rss_prepare_udp(uint8_t *rss_input, + struct NetRxPkt *pkt, + size_t *bytes_written) +{ + struct udp_header *udphdr = &pkt->l4hdr_info.hdr.udp; + + _net_rx_rss_add_chunk(rss_input, bytes_written, + &udphdr->uh_sport, sizeof(uint16_t)); + + _net_rx_rss_add_chunk(rss_input, bytes_written, + &udphdr->uh_dport, sizeof(uint16_t)); +} + +uint32_t +net_rx_pkt_calc_rss_hash(struct NetRxPkt *pkt, + NetRxPktRssType type, + uint8_t *key) +{ + uint8_t rss_input[36]; + size_t rss_length = 0; + uint32_t rss_hash = 0; + net_toeplitz_key key_data; + + switch (type) { + case NetPktRssIpV4: + assert(pkt->isip4); + trace_net_rx_pkt_rss_ip4(); + _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length); + break; + case NetPktRssIpV4Tcp: + assert(pkt->isip4); + assert(pkt->istcp); + trace_net_rx_pkt_rss_ip4_tcp(); + _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length); + _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length); + break; + case NetPktRssIpV6Tcp: + assert(pkt->isip6); + assert(pkt->istcp); + trace_net_rx_pkt_rss_ip6_tcp(); + _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length); + _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length); + break; + case NetPktRssIpV6: + assert(pkt->isip6); + trace_net_rx_pkt_rss_ip6(); + _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length); + break; + case NetPktRssIpV6Ex: + assert(pkt->isip6); + trace_net_rx_pkt_rss_ip6_ex(); + _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length); + break; + case NetPktRssIpV6TcpEx: + assert(pkt->isip6); + assert(pkt->istcp); + trace_net_rx_pkt_rss_ip6_ex_tcp(); + _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length); + _net_rx_rss_prepare_tcp(&rss_input[0], pkt, &rss_length); + break; + case NetPktRssIpV4Udp: + assert(pkt->isip4); + assert(pkt->isudp); + trace_net_rx_pkt_rss_ip4_udp(); + _net_rx_rss_prepare_ip4(&rss_input[0], pkt, &rss_length); + _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length); + break; + case NetPktRssIpV6Udp: + assert(pkt->isip6); + assert(pkt->isudp); + trace_net_rx_pkt_rss_ip6_udp(); + _net_rx_rss_prepare_ip6(&rss_input[0], pkt, false, &rss_length); + _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length); + break; + case NetPktRssIpV6UdpEx: + assert(pkt->isip6); + assert(pkt->isudp); + trace_net_rx_pkt_rss_ip6_ex_udp(); + _net_rx_rss_prepare_ip6(&rss_input[0], pkt, true, &rss_length); + _net_rx_rss_prepare_udp(&rss_input[0], pkt, &rss_length); + break; + default: + assert(false); + break; + } + + net_toeplitz_key_init(&key_data, key); + net_toeplitz_add(&rss_hash, rss_input, rss_length, &key_data); + + trace_net_rx_pkt_rss_hash(rss_length, rss_hash); + + return rss_hash; +} + +uint16_t net_rx_pkt_get_ip_id(struct NetRxPkt *pkt) +{ + assert(pkt); + + if (pkt->isip4) { + return be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_id); + } + + return 0; +} + +bool net_rx_pkt_is_tcp_ack(struct NetRxPkt *pkt) +{ + assert(pkt); + + if (pkt->istcp) { + return TCP_HEADER_FLAGS(&pkt->l4hdr_info.hdr.tcp) & TCP_FLAG_ACK; + } + + return false; +} + +bool net_rx_pkt_has_tcp_data(struct NetRxPkt *pkt) +{ + assert(pkt); + + if (pkt->istcp) { + return pkt->l4hdr_info.has_tcp_data; + } + + return false; +} + +struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt) +{ + assert(pkt); + + return pkt->vec; +} + +uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt) +{ + assert(pkt); + + return pkt->vec_len; +} + +void net_rx_pkt_set_vhdr(struct NetRxPkt *pkt, + struct virtio_net_hdr *vhdr) +{ + assert(pkt); + + memcpy(&pkt->virt_hdr, vhdr, sizeof pkt->virt_hdr); +} + +void net_rx_pkt_set_vhdr_iovec(struct NetRxPkt *pkt, + const struct iovec *iov, int iovcnt) +{ + assert(pkt); + + iov_to_buf(iov, iovcnt, 0, &pkt->virt_hdr, sizeof pkt->virt_hdr); +} + +bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt) +{ + assert(pkt); + + return pkt->ehdr_buf_len ? true : false; +} + +bool net_rx_pkt_has_virt_hdr(struct NetRxPkt *pkt) +{ + assert(pkt); + + return pkt->has_virt_hdr; +} + +uint16_t net_rx_pkt_get_vlan_tag(struct NetRxPkt *pkt) +{ + assert(pkt); + + return pkt->tci; +} + +bool net_rx_pkt_validate_l3_csum(struct NetRxPkt *pkt, bool *csum_valid) +{ + uint32_t cntr; + uint16_t csum; + uint32_t csl; + + trace_net_rx_pkt_l3_csum_validate_entry(); + + if (!pkt->isip4) { + trace_net_rx_pkt_l3_csum_validate_not_ip4(); + return false; + } + + csl = pkt->l4hdr_off - pkt->l3hdr_off; + + cntr = net_checksum_add_iov(pkt->vec, pkt->vec_len, + pkt->l3hdr_off, + csl, 0); + + csum = net_checksum_finish(cntr); + + *csum_valid = (csum == 0); + + trace_net_rx_pkt_l3_csum_validate_csum(pkt->l3hdr_off, csl, + cntr, csum, *csum_valid); + + return true; +} + +static uint16_t +_net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt) +{ + uint32_t cntr; + uint16_t csum; + uint16_t csl; + uint32_t cso; + + trace_net_rx_pkt_l4_csum_calc_entry(); + + if (pkt->isip4) { + if (pkt->isudp) { + csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen); + trace_net_rx_pkt_l4_csum_calc_ip4_udp(); + } else { + csl = be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_len) - + IP_HDR_GET_LEN(&pkt->ip4hdr_info.ip4_hdr); + trace_net_rx_pkt_l4_csum_calc_ip4_tcp(); + } + + cntr = eth_calc_ip4_pseudo_hdr_csum(&pkt->ip4hdr_info.ip4_hdr, + csl, &cso); + trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl); + } else { + if (pkt->isudp) { + csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen); + trace_net_rx_pkt_l4_csum_calc_ip6_udp(); + } else { + struct ip6_header *ip6hdr = &pkt->ip6hdr_info.ip6_hdr; + size_t full_ip6hdr_len = pkt->l4hdr_off - pkt->l3hdr_off; + size_t ip6opts_len = full_ip6hdr_len - sizeof(struct ip6_header); + + csl = be16_to_cpu(ip6hdr->ip6_ctlun.ip6_un1.ip6_un1_plen) - + ip6opts_len; + trace_net_rx_pkt_l4_csum_calc_ip6_tcp(); + } + + cntr = eth_calc_ip6_pseudo_hdr_csum(&pkt->ip6hdr_info.ip6_hdr, csl, + pkt->ip6hdr_info.l4proto, &cso); + trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl); + } + + cntr += net_checksum_add_iov(pkt->vec, pkt->vec_len, + pkt->l4hdr_off, csl, cso); + + csum = net_checksum_finish_nozero(cntr); + + trace_net_rx_pkt_l4_csum_calc_csum(pkt->l4hdr_off, csl, cntr, csum); + + return csum; +} + +bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid) +{ + uint16_t csum; + + trace_net_rx_pkt_l4_csum_validate_entry(); + + if (!pkt->istcp && !pkt->isudp) { + trace_net_rx_pkt_l4_csum_validate_not_xxp(); + return false; + } + + if (pkt->isudp && (pkt->l4hdr_info.hdr.udp.uh_sum == 0)) { + trace_net_rx_pkt_l4_csum_validate_udp_with_no_checksum(); + return false; + } + + if (pkt->isip4 && pkt->ip4hdr_info.fragment) { + trace_net_rx_pkt_l4_csum_validate_ip4_fragment(); + return false; + } + + csum = _net_rx_pkt_calc_l4_csum(pkt); + + *csum_valid = ((csum == 0) || (csum == 0xFFFF)); + + trace_net_rx_pkt_l4_csum_validate_csum(*csum_valid); + + return true; +} + +bool net_rx_pkt_fix_l4_csum(struct NetRxPkt *pkt) +{ + uint16_t csum = 0; + uint32_t l4_cso; + + trace_net_rx_pkt_l4_csum_fix_entry(); + + if (pkt->istcp) { + l4_cso = offsetof(struct tcp_header, th_sum); + trace_net_rx_pkt_l4_csum_fix_tcp(l4_cso); + } else if (pkt->isudp) { + if (pkt->l4hdr_info.hdr.udp.uh_sum == 0) { + trace_net_rx_pkt_l4_csum_fix_udp_with_no_checksum(); + return false; + } + l4_cso = offsetof(struct udp_header, uh_sum); + trace_net_rx_pkt_l4_csum_fix_udp(l4_cso); + } else { + trace_net_rx_pkt_l4_csum_fix_not_xxp(); + return false; + } + + if (pkt->isip4 && pkt->ip4hdr_info.fragment) { + trace_net_rx_pkt_l4_csum_fix_ip4_fragment(); + return false; + } + + /* Set zero to checksum word */ + iov_from_buf(pkt->vec, pkt->vec_len, + pkt->l4hdr_off + l4_cso, + &csum, sizeof(csum)); + + /* Calculate L4 checksum */ + csum = cpu_to_be16(_net_rx_pkt_calc_l4_csum(pkt)); + + /* Set calculated checksum to checksum word */ + iov_from_buf(pkt->vec, pkt->vec_len, + pkt->l4hdr_off + l4_cso, + &csum, sizeof(csum)); + + trace_net_rx_pkt_l4_csum_fix_csum(pkt->l4hdr_off + l4_cso, csum); + + return true; +} diff --git a/hw/net/net_rx_pkt.h b/hw/net/net_rx_pkt.h new file mode 100644 index 000000000..048e3461f --- /dev/null +++ b/hw/net/net_rx_pkt.h @@ -0,0 +1,367 @@ +/* + * QEMU RX packets abstraction + * + * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Tamir Shomer + * Yan Vugenfirer + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef NET_RX_PKT_H +#define NET_RX_PKT_H + +#include "net/eth.h" + +/* defines to enable packet dump functions */ +/*#define NET_RX_PKT_DEBUG*/ + +struct NetRxPkt; + +/** + * Clean all rx packet resources + * + * @pkt: packet + * + */ +void net_rx_pkt_uninit(struct NetRxPkt *pkt); + +/** + * Init function for rx packet functionality + * + * @pkt: packet pointer + * @has_virt_hdr: device uses virtio header + * + */ +void net_rx_pkt_init(struct NetRxPkt **pkt, bool has_virt_hdr); + +/** + * returns total length of data attached to rx context + * + * @pkt: packet + * + * Return: nothing + * + */ +size_t net_rx_pkt_get_total_len(struct NetRxPkt *pkt); + +/** + * parse and set packet analysis results + * + * @pkt: packet + * @data: pointer to the data buffer to be parsed + * @len: data length + * + */ +void net_rx_pkt_set_protocols(struct NetRxPkt *pkt, const void *data, + size_t len); + +/** + * fetches packet analysis results + * + * @pkt: packet + * @isip4: whether the packet given is IPv4 + * @isip6: whether the packet given is IPv6 + * @isudp: whether the packet given is UDP + * @istcp: whether the packet given is TCP + * + */ +void net_rx_pkt_get_protocols(struct NetRxPkt *pkt, + bool *isip4, bool *isip6, + bool *isudp, bool *istcp); + +/** +* fetches L3 header offset +* +* @pkt: packet +* +*/ +size_t net_rx_pkt_get_l3_hdr_offset(struct NetRxPkt *pkt); + +/** +* fetches L4 header offset +* +* @pkt: packet +* +*/ +size_t net_rx_pkt_get_l4_hdr_offset(struct NetRxPkt *pkt); + +/** +* fetches L5 header offset +* +* @pkt: packet +* +*/ +size_t net_rx_pkt_get_l5_hdr_offset(struct NetRxPkt *pkt); + +/** + * fetches IP6 header analysis results + * + * Return: pointer to analysis results structure which is stored in internal + * packet area. + * + */ +eth_ip6_hdr_info *net_rx_pkt_get_ip6_info(struct NetRxPkt *pkt); + +/** + * fetches IP4 header analysis results + * + * Return: pointer to analysis results structure which is stored in internal + * packet area. + * + */ +eth_ip4_hdr_info *net_rx_pkt_get_ip4_info(struct NetRxPkt *pkt); + +/** + * fetches L4 header analysis results + * + * Return: pointer to analysis results structure which is stored in internal + * packet area. + * + */ +eth_l4_hdr_info *net_rx_pkt_get_l4_info(struct NetRxPkt *pkt); + +typedef enum { + NetPktRssIpV4, + NetPktRssIpV4Tcp, + NetPktRssIpV6Tcp, + NetPktRssIpV6, + NetPktRssIpV6Ex, + NetPktRssIpV6TcpEx, + NetPktRssIpV4Udp, + NetPktRssIpV6Udp, + NetPktRssIpV6UdpEx, +} NetRxPktRssType; + +/** +* calculates RSS hash for packet +* +* @pkt: packet +* @type: RSS hash type +* +* Return: Toeplitz RSS hash. +* +*/ +uint32_t +net_rx_pkt_calc_rss_hash(struct NetRxPkt *pkt, + NetRxPktRssType type, + uint8_t *key); + +/** +* fetches IP identification for the packet +* +* @pkt: packet +* +*/ +uint16_t net_rx_pkt_get_ip_id(struct NetRxPkt *pkt); + +/** +* check if given packet is a TCP ACK packet +* +* @pkt: packet +* +*/ +bool net_rx_pkt_is_tcp_ack(struct NetRxPkt *pkt); + +/** +* check if given packet contains TCP data +* +* @pkt: packet +* +*/ +bool net_rx_pkt_has_tcp_data(struct NetRxPkt *pkt); + +/** + * returns virtio header stored in rx context + * + * @pkt: packet + * @ret: virtio header + * + */ +struct virtio_net_hdr *net_rx_pkt_get_vhdr(struct NetRxPkt *pkt); + +/** + * returns packet type + * + * @pkt: packet + * @ret: packet type + * + */ +eth_pkt_types_e net_rx_pkt_get_packet_type(struct NetRxPkt *pkt); + +/** + * returns vlan tag + * + * @pkt: packet + * @ret: VLAN tag + * + */ +uint16_t net_rx_pkt_get_vlan_tag(struct NetRxPkt *pkt); + +/** + * tells whether vlan was stripped from the packet + * + * @pkt: packet + * @ret: VLAN stripped sign + * + */ +bool net_rx_pkt_is_vlan_stripped(struct NetRxPkt *pkt); + +/** + * notifies caller if the packet has virtio header + * + * @pkt: packet + * @ret: true if packet has virtio header, false otherwize + * + */ +bool net_rx_pkt_has_virt_hdr(struct NetRxPkt *pkt); + +/** +* attach scatter-gather data to rx packet +* +* @pkt: packet +* @iov: received data scatter-gather list +* @iovcnt number of elements in iov +* @iovoff data start offset in the iov +* @strip_vlan: should the module strip vlan from data +* +*/ +void net_rx_pkt_attach_iovec(struct NetRxPkt *pkt, + const struct iovec *iov, + int iovcnt, size_t iovoff, + bool strip_vlan); + +/** +* attach scatter-gather data to rx packet +* +* @pkt: packet +* @iov: received data scatter-gather list +* @iovcnt number of elements in iov +* @iovoff data start offset in the iov +* @strip_vlan: should the module strip vlan from data +* @vet: VLAN tag Ethernet type +* +*/ +void net_rx_pkt_attach_iovec_ex(struct NetRxPkt *pkt, + const struct iovec *iov, int iovcnt, + size_t iovoff, bool strip_vlan, + uint16_t vet); + +/** + * attach data to rx packet + * + * @pkt: packet + * @data: pointer to the data buffer + * @len: data length + * @strip_vlan: should the module strip vlan from data + * + */ +static inline void +net_rx_pkt_attach_data(struct NetRxPkt *pkt, const void *data, + size_t len, bool strip_vlan) +{ + const struct iovec iov = { + .iov_base = (void *) data, + .iov_len = len + }; + + net_rx_pkt_attach_iovec(pkt, &iov, 1, 0, strip_vlan); +} + +/** + * returns io vector that holds the attached data + * + * @pkt: packet + * @ret: pointer to IOVec + * + */ +struct iovec *net_rx_pkt_get_iovec(struct NetRxPkt *pkt); + +/** +* returns io vector length that holds the attached data +* +* @pkt: packet +* @ret: IOVec length +* +*/ +uint16_t net_rx_pkt_get_iovec_len(struct NetRxPkt *pkt); + +/** + * prints rx packet data if debug is enabled + * + * @pkt: packet + * + */ +void net_rx_pkt_dump(struct NetRxPkt *pkt); + +/** + * copy passed vhdr data to packet context + * + * @pkt: packet + * @vhdr: VHDR buffer + * + */ +void net_rx_pkt_set_vhdr(struct NetRxPkt *pkt, + struct virtio_net_hdr *vhdr); + +/** +* copy passed vhdr data to packet context +* +* @pkt: packet +* @iov: VHDR iov +* @iovcnt: VHDR iov array size +* +*/ +void net_rx_pkt_set_vhdr_iovec(struct NetRxPkt *pkt, + const struct iovec *iov, int iovcnt); + +/** + * save packet type in packet context + * + * @pkt: packet + * @packet_type: the packet type + * + */ +void net_rx_pkt_set_packet_type(struct NetRxPkt *pkt, + eth_pkt_types_e packet_type); + +/** +* validate TCP/UDP checksum of the packet +* +* @pkt: packet +* @csum_valid: checksum validation result +* @ret: true if validation was performed, false in case packet is +* not TCP/UDP or checksum validation is not possible +* +*/ +bool net_rx_pkt_validate_l4_csum(struct NetRxPkt *pkt, bool *csum_valid); + +/** +* validate IPv4 checksum of the packet +* +* @pkt: packet +* @csum_valid: checksum validation result +* @ret: true if validation was performed, false in case packet is +* not TCP/UDP or checksum validation is not possible +* +*/ +bool net_rx_pkt_validate_l3_csum(struct NetRxPkt *pkt, bool *csum_valid); + +/** +* fix IPv4 checksum of the packet +* +* @pkt: packet +* @ret: true if checksum was fixed, false in case packet is +* not TCP/UDP or checksum correction is not possible +* +*/ +bool net_rx_pkt_fix_l4_csum(struct NetRxPkt *pkt); + +#endif diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c new file mode 100644 index 000000000..1cb1125d9 --- /dev/null +++ b/hw/net/net_tx_pkt.c @@ -0,0 +1,674 @@ +/* + * QEMU TX packets abstractions + * + * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Tamir Shomer + * Yan Vugenfirer + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "net_tx_pkt.h" +#include "net/eth.h" +#include "net/checksum.h" +#include "net/tap.h" +#include "net/net.h" +#include "hw/pci/pci.h" + +enum { + NET_TX_PKT_VHDR_FRAG = 0, + NET_TX_PKT_L2HDR_FRAG, + NET_TX_PKT_L3HDR_FRAG, + NET_TX_PKT_PL_START_FRAG +}; + +/* TX packet private context */ +struct NetTxPkt { + PCIDevice *pci_dev; + + struct virtio_net_hdr virt_hdr; + bool has_virt_hdr; + + struct iovec *raw; + uint32_t raw_frags; + uint32_t max_raw_frags; + + struct iovec *vec; + + uint8_t l2_hdr[ETH_MAX_L2_HDR_LEN]; + uint8_t l3_hdr[ETH_MAX_IP_DGRAM_LEN]; + + uint32_t payload_len; + + uint32_t payload_frags; + uint32_t max_payload_frags; + + uint16_t hdr_len; + eth_pkt_types_e packet_type; + uint8_t l4proto; + + bool is_loopback; +}; + +void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, + uint32_t max_frags, bool has_virt_hdr) +{ + struct NetTxPkt *p = g_malloc0(sizeof *p); + + p->pci_dev = pci_dev; + + p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG); + + p->raw = g_new(struct iovec, max_frags); + + p->max_payload_frags = max_frags; + p->max_raw_frags = max_frags; + p->has_virt_hdr = has_virt_hdr; + p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr; + p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = + p->has_virt_hdr ? sizeof p->virt_hdr : 0; + p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr; + p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr; + + *pkt = p; +} + +void net_tx_pkt_uninit(struct NetTxPkt *pkt) +{ + if (pkt) { + g_free(pkt->vec); + g_free(pkt->raw); + g_free(pkt); + } +} + +void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt) +{ + uint16_t csum; + assert(pkt); + struct ip_header *ip_hdr; + ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; + + ip_hdr->ip_len = cpu_to_be16(pkt->payload_len + + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); + + ip_hdr->ip_sum = 0; + csum = net_raw_checksum((uint8_t *)ip_hdr, + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); + ip_hdr->ip_sum = cpu_to_be16(csum); +} + +void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) +{ + uint16_t csum; + uint32_t cntr, cso; + assert(pkt); + uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; + void *ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; + + if (pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len > + ETH_MAX_IP_DGRAM_LEN) { + return; + } + + if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 || + gso_type == VIRTIO_NET_HDR_GSO_UDP) { + /* Calculate IP header checksum */ + net_tx_pkt_update_ip_hdr_checksum(pkt); + + /* Calculate IP pseudo header checksum */ + cntr = eth_calc_ip4_pseudo_hdr_csum(ip_hdr, pkt->payload_len, &cso); + csum = cpu_to_be16(~net_checksum_finish(cntr)); + } else if (gso_type == VIRTIO_NET_HDR_GSO_TCPV6) { + /* Calculate IP pseudo header checksum */ + cntr = eth_calc_ip6_pseudo_hdr_csum(ip_hdr, pkt->payload_len, + IP_PROTO_TCP, &cso); + csum = cpu_to_be16(~net_checksum_finish(cntr)); + } else { + return; + } + + iov_from_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, + pkt->virt_hdr.csum_offset, &csum, sizeof(csum)); +} + +static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt) +{ + pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len + + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; +} + +static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt) +{ + struct iovec *l2_hdr, *l3_hdr; + size_t bytes_read; + size_t full_ip6hdr_len; + uint16_t l3_proto; + + assert(pkt); + + l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; + l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG]; + + bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base, + ETH_MAX_L2_HDR_LEN); + if (bytes_read < sizeof(struct eth_header)) { + l2_hdr->iov_len = 0; + return false; + } + + l2_hdr->iov_len = sizeof(struct eth_header); + switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) { + case ETH_P_VLAN: + l2_hdr->iov_len += sizeof(struct vlan_header); + break; + case ETH_P_DVLAN: + l2_hdr->iov_len += 2 * sizeof(struct vlan_header); + break; + } + + if (bytes_read < l2_hdr->iov_len) { + l2_hdr->iov_len = 0; + l3_hdr->iov_len = 0; + pkt->packet_type = ETH_PKT_UCAST; + return false; + } else { + l2_hdr->iov_len = ETH_MAX_L2_HDR_LEN; + l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base); + pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base); + } + + l3_proto = eth_get_l3_proto(l2_hdr, 1, l2_hdr->iov_len); + + switch (l3_proto) { + case ETH_P_IP: + bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, + l3_hdr->iov_base, sizeof(struct ip_header)); + + if (bytes_read < sizeof(struct ip_header)) { + l3_hdr->iov_len = 0; + return false; + } + + l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base); + + if (l3_hdr->iov_len < sizeof(struct ip_header)) { + l3_hdr->iov_len = 0; + return false; + } + + pkt->l4proto = IP_HDR_GET_P(l3_hdr->iov_base); + + if (IP_HDR_GET_LEN(l3_hdr->iov_base) != sizeof(struct ip_header)) { + /* copy optional IPv4 header data if any*/ + bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, + l2_hdr->iov_len + sizeof(struct ip_header), + l3_hdr->iov_base + sizeof(struct ip_header), + l3_hdr->iov_len - sizeof(struct ip_header)); + if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) { + l3_hdr->iov_len = 0; + return false; + } + } + + break; + + case ETH_P_IPV6: + { + eth_ip6_hdr_info hdrinfo; + + if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, + &hdrinfo)) { + l3_hdr->iov_len = 0; + return false; + } + + pkt->l4proto = hdrinfo.l4proto; + full_ip6hdr_len = hdrinfo.full_hdr_len; + + if (full_ip6hdr_len > ETH_MAX_IP_DGRAM_LEN) { + l3_hdr->iov_len = 0; + return false; + } + + bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, + l3_hdr->iov_base, full_ip6hdr_len); + + if (bytes_read < full_ip6hdr_len) { + l3_hdr->iov_len = 0; + return false; + } else { + l3_hdr->iov_len = full_ip6hdr_len; + } + break; + } + default: + l3_hdr->iov_len = 0; + break; + } + + net_tx_pkt_calculate_hdr_len(pkt); + return true; +} + +static void net_tx_pkt_rebuild_payload(struct NetTxPkt *pkt) +{ + pkt->payload_len = iov_size(pkt->raw, pkt->raw_frags) - pkt->hdr_len; + pkt->payload_frags = iov_copy(&pkt->vec[NET_TX_PKT_PL_START_FRAG], + pkt->max_payload_frags, + pkt->raw, pkt->raw_frags, + pkt->hdr_len, pkt->payload_len); +} + +bool net_tx_pkt_parse(struct NetTxPkt *pkt) +{ + if (net_tx_pkt_parse_headers(pkt)) { + net_tx_pkt_rebuild_payload(pkt); + return true; + } else { + return false; + } +} + +struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt) +{ + assert(pkt); + return &pkt->virt_hdr; +} + +static uint8_t net_tx_pkt_get_gso_type(struct NetTxPkt *pkt, + bool tso_enable) +{ + uint8_t rc = VIRTIO_NET_HDR_GSO_NONE; + uint16_t l3_proto; + + l3_proto = eth_get_l3_proto(&pkt->vec[NET_TX_PKT_L2HDR_FRAG], 1, + pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len); + + if (!tso_enable) { + goto func_exit; + } + + rc = eth_get_gso_type(l3_proto, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, + pkt->l4proto); + +func_exit: + return rc; +} + +void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, + bool csum_enable, uint32_t gso_size) +{ + struct tcp_hdr l4hdr; + assert(pkt); + + /* csum has to be enabled if tso is. */ + assert(csum_enable || !tso_enable); + + pkt->virt_hdr.gso_type = net_tx_pkt_get_gso_type(pkt, tso_enable); + + switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_NONE: + pkt->virt_hdr.hdr_len = 0; + pkt->virt_hdr.gso_size = 0; + break; + + case VIRTIO_NET_HDR_GSO_UDP: + pkt->virt_hdr.gso_size = gso_size; + pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header); + break; + + case VIRTIO_NET_HDR_GSO_TCPV4: + case VIRTIO_NET_HDR_GSO_TCPV6: + iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, + 0, &l4hdr, sizeof(l4hdr)); + pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t); + pkt->virt_hdr.gso_size = gso_size; + break; + + default: + g_assert_not_reached(); + } + + if (csum_enable) { + switch (pkt->l4proto) { + case IP_PROTO_TCP: + pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + pkt->virt_hdr.csum_start = pkt->hdr_len; + pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum); + break; + case IP_PROTO_UDP: + pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + pkt->virt_hdr.csum_start = pkt->hdr_len; + pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum); + break; + default: + break; + } + } +} + +void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt, + uint16_t vlan, uint16_t vlan_ethtype) +{ + bool is_new; + assert(pkt); + + eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, + vlan, vlan_ethtype, &is_new); + + /* update l2hdrlen */ + if (is_new) { + pkt->hdr_len += sizeof(struct vlan_header); + pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len += + sizeof(struct vlan_header); + } +} + +bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, + size_t len) +{ + hwaddr mapped_len = 0; + struct iovec *ventry; + assert(pkt); + + if (pkt->raw_frags >= pkt->max_raw_frags) { + return false; + } + + if (!len) { + return true; + } + + ventry = &pkt->raw[pkt->raw_frags]; + mapped_len = len; + + ventry->iov_base = pci_dma_map(pkt->pci_dev, pa, + &mapped_len, DMA_DIRECTION_TO_DEVICE); + + if ((ventry->iov_base != NULL) && (len == mapped_len)) { + ventry->iov_len = mapped_len; + pkt->raw_frags++; + return true; + } else { + return false; + } +} + +bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt) +{ + return pkt->raw_frags > 0; +} + +eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt) +{ + assert(pkt); + + return pkt->packet_type; +} + +size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt) +{ + assert(pkt); + + return pkt->hdr_len + pkt->payload_len; +} + +void net_tx_pkt_dump(struct NetTxPkt *pkt) +{ +#ifdef NET_TX_PKT_DEBUG + assert(pkt); + + printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, " + "l3hdr_len: %lu, payload_len: %u\n", pkt->hdr_len, pkt->packet_type, + pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len, + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len, pkt->payload_len); +#endif +} + +void net_tx_pkt_reset(struct NetTxPkt *pkt) +{ + int i; + + /* no assert, as reset can be called before tx_pkt_init */ + if (!pkt) { + return; + } + + memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr)); + + assert(pkt->vec); + + pkt->payload_len = 0; + pkt->payload_frags = 0; + + if (pkt->max_raw_frags > 0) { + assert(pkt->raw); + for (i = 0; i < pkt->raw_frags; i++) { + assert(pkt->raw[i].iov_base); + pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base, + pkt->raw[i].iov_len, DMA_DIRECTION_TO_DEVICE, 0); + } + } + pkt->raw_frags = 0; + + pkt->hdr_len = 0; + pkt->l4proto = 0; +} + +static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt) +{ + struct iovec *iov = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; + uint32_t csum_cntr; + uint16_t csum = 0; + uint32_t cso; + /* num of iovec without vhdr */ + uint32_t iov_len = pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1; + uint16_t csl; + size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset; + uint16_t l3_proto = eth_get_l3_proto(iov, 1, iov->iov_len); + + /* Put zero to checksum field */ + iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); + + /* Calculate L4 TCP/UDP checksum */ + csl = pkt->payload_len; + + csum_cntr = 0; + cso = 0; + /* add pseudo header to csum */ + if (l3_proto == ETH_P_IP) { + csum_cntr = eth_calc_ip4_pseudo_hdr_csum( + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, + csl, &cso); + } else if (l3_proto == ETH_P_IPV6) { + csum_cntr = eth_calc_ip6_pseudo_hdr_csum( + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base, + csl, pkt->l4proto, &cso); + } + + /* data checksum */ + csum_cntr += + net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl, cso); + + /* Put the checksum obtained into the packet */ + csum = cpu_to_be16(net_checksum_finish_nozero(csum_cntr)); + iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); +} + +enum { + NET_TX_PKT_FRAGMENT_L2_HDR_POS = 0, + NET_TX_PKT_FRAGMENT_L3_HDR_POS, + NET_TX_PKT_FRAGMENT_HEADER_NUM +}; + +#define NET_MAX_FRAG_SG_LIST (64) + +static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt, + int *src_idx, size_t *src_offset, struct iovec *dst, int *dst_idx) +{ + size_t fetched = 0; + struct iovec *src = pkt->vec; + + *dst_idx = NET_TX_PKT_FRAGMENT_HEADER_NUM; + + while (fetched < IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size)) { + + /* no more place in fragment iov */ + if (*dst_idx == NET_MAX_FRAG_SG_LIST) { + break; + } + + /* no more data in iovec */ + if (*src_idx == (pkt->payload_frags + NET_TX_PKT_PL_START_FRAG)) { + break; + } + + + dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset; + dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset, + IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size) - fetched); + + *src_offset += dst[*dst_idx].iov_len; + fetched += dst[*dst_idx].iov_len; + + if (*src_offset == src[*src_idx].iov_len) { + *src_offset = 0; + (*src_idx)++; + } + + (*dst_idx)++; + } + + return fetched; +} + +static inline void net_tx_pkt_sendv(struct NetTxPkt *pkt, + NetClientState *nc, const struct iovec *iov, int iov_cnt) +{ + if (pkt->is_loopback) { + qemu_receive_packet_iov(nc, iov, iov_cnt); + } else { + qemu_sendv_packet(nc, iov, iov_cnt); + } +} + +static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, + NetClientState *nc) +{ + struct iovec fragment[NET_MAX_FRAG_SG_LIST]; + size_t fragment_len = 0; + bool more_frags = false; + + /* some pointers for shorter code */ + void *l2_iov_base, *l3_iov_base; + size_t l2_iov_len, l3_iov_len; + int src_idx = NET_TX_PKT_PL_START_FRAG, dst_idx; + size_t src_offset = 0; + size_t fragment_offset = 0; + + l2_iov_base = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base; + l2_iov_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len; + l3_iov_base = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; + l3_iov_len = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; + + /* Copy headers */ + fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_base = l2_iov_base; + fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_len = l2_iov_len; + fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_base = l3_iov_base; + fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_len = l3_iov_len; + + + /* Put as much data as possible and send */ + do { + fragment_len = net_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset, + fragment, &dst_idx); + + more_frags = (fragment_offset + fragment_len < pkt->payload_len); + + eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base, + l3_iov_len, fragment_len, fragment_offset, more_frags); + + eth_fix_ip4_checksum(l3_iov_base, l3_iov_len); + + net_tx_pkt_sendv(pkt, nc, fragment, dst_idx); + + fragment_offset += fragment_len; + + } while (fragment_len && more_frags); + + return true; +} + +bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc) +{ + assert(pkt); + + if (!pkt->has_virt_hdr && + pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + net_tx_pkt_do_sw_csum(pkt); + } + + /* + * Since underlying infrastructure does not support IP datagrams longer + * than 64K we should drop such packets and don't even try to send + */ + if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) { + if (pkt->payload_len > + ETH_MAX_IP_DGRAM_LEN - + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) { + return false; + } + } + + if (pkt->has_virt_hdr || + pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) { + net_tx_pkt_fix_ip6_payload_len(pkt); + net_tx_pkt_sendv(pkt, nc, pkt->vec, + pkt->payload_frags + NET_TX_PKT_PL_START_FRAG); + return true; + } + + return net_tx_pkt_do_sw_fragmentation(pkt, nc); +} + +bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc) +{ + bool res; + + pkt->is_loopback = true; + res = net_tx_pkt_send(pkt, nc); + pkt->is_loopback = false; + + return res; +} + +void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt) +{ + struct iovec *l2 = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; + if (eth_get_l3_proto(l2, 1, l2->iov_len) == ETH_P_IPV6) { + struct ip6_header *ip6 = (struct ip6_header *) pkt->l3_hdr; + /* + * TODO: if qemu would support >64K packets - add jumbo option check + * something like that: + * 'if (ip6->ip6_plen == 0 && !has_jumbo_option(ip6)) {' + */ + if (ip6->ip6_plen == 0) { + if (pkt->payload_len <= ETH_MAX_IP_DGRAM_LEN) { + ip6->ip6_plen = htons(pkt->payload_len); + } + /* + * TODO: if qemu would support >64K packets + * add jumbo option for packets greater then 65,535 bytes + */ + } + } +} diff --git a/hw/net/net_tx_pkt.h b/hw/net/net_tx_pkt.h new file mode 100644 index 000000000..4ec8bbe9b --- /dev/null +++ b/hw/net/net_tx_pkt.h @@ -0,0 +1,204 @@ +/* + * QEMU TX packets abstraction + * + * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Tamir Shomer + * Yan Vugenfirer + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef NET_TX_PKT_H +#define NET_TX_PKT_H + +#include "net/eth.h" +#include "exec/hwaddr.h" + +/* define to enable packet dump functions */ +/*#define NET_TX_PKT_DEBUG*/ + +struct NetTxPkt; + +/** + * Init function for tx packet functionality + * + * @pkt: packet pointer + * @pci_dev: PCI device processing this packet + * @max_frags: max tx ip fragments + * @has_virt_hdr: device uses virtio header. + */ +void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev, + uint32_t max_frags, bool has_virt_hdr); + +/** + * Clean all tx packet resources. + * + * @pkt: packet. + */ +void net_tx_pkt_uninit(struct NetTxPkt *pkt); + +/** + * get virtio header + * + * @pkt: packet + * @ret: virtio header + */ +struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt); + +/** + * build virtio header (will be stored in module context) + * + * @pkt: packet + * @tso_enable: TSO enabled + * @csum_enable: CSO enabled + * @gso_size: MSS size for TSO + * + */ +void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, + bool csum_enable, uint32_t gso_size); + +/** +* updates vlan tag, and adds vlan header with custom ethernet type +* in case it is missing. +* +* @pkt: packet +* @vlan: VLAN tag +* @vlan_ethtype: VLAN header Ethernet type +* +*/ +void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt, + uint16_t vlan, uint16_t vlan_ethtype); + +/** +* updates vlan tag, and adds vlan header in case it is missing +* +* @pkt: packet +* @vlan: VLAN tag +* +*/ +static inline void +net_tx_pkt_setup_vlan_header(struct NetTxPkt *pkt, uint16_t vlan) +{ + net_tx_pkt_setup_vlan_header_ex(pkt, vlan, ETH_P_VLAN); +} + +/** + * populate data fragment into pkt context. + * + * @pkt: packet + * @pa: physical address of fragment + * @len: length of fragment + * + */ +bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa, + size_t len); + +/** + * Fix ip header fields and calculate IP header and pseudo header checksums. + * + * @pkt: packet + * + */ +void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt); + +/** + * Calculate the IP header checksum. + * + * @pkt: packet + * + */ +void net_tx_pkt_update_ip_hdr_checksum(struct NetTxPkt *pkt); + +/** + * get length of all populated data. + * + * @pkt: packet + * @ret: total data length + * + */ +size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt); + +/** + * get packet type + * + * @pkt: packet + * @ret: packet type + * + */ +eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt); + +/** + * prints packet data if debug is enabled + * + * @pkt: packet + * + */ +void net_tx_pkt_dump(struct NetTxPkt *pkt); + +/** + * reset tx packet private context (needed to be called between packets) + * + * @pkt: packet + * + */ +void net_tx_pkt_reset(struct NetTxPkt *pkt); + +/** + * Send packet to qemu. handles sw offloads if vhdr is not supported. + * + * @pkt: packet + * @nc: NetClientState + * @ret: operation result + * + */ +bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc); + +/** +* Redirect packet directly to receive path (emulate loopback phy). +* Handles sw offloads if vhdr is not supported. +* +* @pkt: packet +* @nc: NetClientState +* @ret: operation result +* +*/ +bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc); + +/** + * parse raw packet data and analyze offload requirements. + * + * @pkt: packet + * + */ +bool net_tx_pkt_parse(struct NetTxPkt *pkt); + +/** +* indicates if there are data fragments held by this packet object. +* +* @pkt: packet +* +*/ +bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt); + +/** + * Fix IPv6 'plen' field. + * If ipv6 payload length field is 0 - then there should be Hop-by-Hop + * option for packets greater than 65,535. + * For packets with a payload less than 65,535: fix 'plen' field. + * For backends with vheader, we need just one packet with proper + * payload size. For now, qemu drops every packet with size greater 64K + * (see net_tx_pkt_send()) so, there is no reason to add jumbo option to ip6 + * hop-by-hop extension if it's missed + * + * @pkt packet + */ +void net_tx_pkt_fix_ip6_payload_len(struct NetTxPkt *pkt); + +#endif diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c new file mode 100644 index 000000000..7c892f820 --- /dev/null +++ b/hw/net/npcm7xx_emc.c @@ -0,0 +1,859 @@ +/* + * Nuvoton NPCM7xx EMC Module + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Unsupported/unimplemented features: + * - MCMDR.FDUP (full duplex) is ignored, half duplex is not supported + * - Only CAM0 is supported, CAM[1-15] are not + * - writes to CAMEN.[1-15] are ignored, these bits always read as zeroes + * - MII is not implemented, MIIDA.BUSY and MIID always return zero + * - MCMDR.LBK is not implemented + * - MCMDR.{OPMOD,ENSQE,AEP,ARP} are not supported + * - H/W FIFOs are not supported, MCMDR.FFTCR is ignored + * - MGSTA.SQE is not supported + * - pause and control frames are not implemented + * - MGSTA.CCNT is not supported + * - MPCNT, DMARFS are not implemented + */ + +#include "qemu/osdep.h" + +/* For crc32 */ +#include + +#include "qemu-common.h" +#include "hw/irq.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/net/npcm7xx_emc.h" +#include "net/eth.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "sysemu/dma.h" +#include "trace.h" + +#define CRC_LENGTH 4 + +/* + * The maximum size of a (layer 2) ethernet frame as defined by 802.3. + * 1518 = 6(dest macaddr) + 6(src macaddr) + 2(proto) + 4(crc) + 1500(payload) + * This does not include an additional 4 for the vlan field (802.1q). + */ +#define MAX_ETH_FRAME_SIZE 1518 + +static const char *emc_reg_name(int regno) +{ +#define REG(name) case REG_ ## name: return #name; + switch (regno) { + REG(CAMCMR) + REG(CAMEN) + REG(TXDLSA) + REG(RXDLSA) + REG(MCMDR) + REG(MIID) + REG(MIIDA) + REG(FFTCR) + REG(TSDR) + REG(RSDR) + REG(DMARFC) + REG(MIEN) + REG(MISTA) + REG(MGSTA) + REG(MPCNT) + REG(MRPC) + REG(MRPCC) + REG(MREPC) + REG(DMARFS) + REG(CTXDSA) + REG(CTXBSA) + REG(CRXDSA) + REG(CRXBSA) + case REG_CAMM_BASE + 0: return "CAM0M"; + case REG_CAML_BASE + 0: return "CAM0L"; + case REG_CAMM_BASE + 2 ... REG_CAMML_LAST: + /* Only CAM0 is supported, fold the others into something simple. */ + if (regno & 1) { + return "CAML"; + } else { + return "CAMM"; + } + default: return "UNKNOWN"; + } +#undef REG +} + +static void emc_reset(NPCM7xxEMCState *emc) +{ + trace_npcm7xx_emc_reset(emc->emc_num); + + memset(&emc->regs[0], 0, sizeof(emc->regs)); + + /* These regs have non-zero reset values. */ + emc->regs[REG_TXDLSA] = 0xfffffffc; + emc->regs[REG_RXDLSA] = 0xfffffffc; + emc->regs[REG_MIIDA] = 0x00900000; + emc->regs[REG_FFTCR] = 0x0101; + emc->regs[REG_DMARFC] = 0x0800; + emc->regs[REG_MPCNT] = 0x7fff; + + emc->tx_active = false; + emc->rx_active = false; +} + +static void npcm7xx_emc_reset(DeviceState *dev) +{ + NPCM7xxEMCState *emc = NPCM7XX_EMC(dev); + emc_reset(emc); +} + +static void emc_soft_reset(NPCM7xxEMCState *emc) +{ + /* + * The docs say at least MCMDR.{LBK,OPMOD} bits are not changed during a + * soft reset, but does not go into further detail. For now, KISS. + */ + uint32_t mcmdr = emc->regs[REG_MCMDR]; + emc_reset(emc); + emc->regs[REG_MCMDR] = mcmdr & (REG_MCMDR_LBK | REG_MCMDR_OPMOD); + + qemu_set_irq(emc->tx_irq, 0); + qemu_set_irq(emc->rx_irq, 0); +} + +static void emc_set_link(NetClientState *nc) +{ + /* Nothing to do yet. */ +} + +/* MISTA.TXINTR is the union of the individual bits with their enables. */ +static void emc_update_mista_txintr(NPCM7xxEMCState *emc) +{ + /* Only look at the bits we support. */ + uint32_t mask = (REG_MISTA_TXBERR | + REG_MISTA_TDU | + REG_MISTA_TXCP); + if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & mask) { + emc->regs[REG_MISTA] |= REG_MISTA_TXINTR; + } else { + emc->regs[REG_MISTA] &= ~REG_MISTA_TXINTR; + } +} + +/* MISTA.RXINTR is the union of the individual bits with their enables. */ +static void emc_update_mista_rxintr(NPCM7xxEMCState *emc) +{ + /* Only look at the bits we support. */ + uint32_t mask = (REG_MISTA_RXBERR | + REG_MISTA_RDU | + REG_MISTA_RXGD); + if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & mask) { + emc->regs[REG_MISTA] |= REG_MISTA_RXINTR; + } else { + emc->regs[REG_MISTA] &= ~REG_MISTA_RXINTR; + } +} + +/* N.B. emc_update_mista_txintr must have already been called. */ +static void emc_update_tx_irq(NPCM7xxEMCState *emc) +{ + int level = !!(emc->regs[REG_MISTA] & + emc->regs[REG_MIEN] & + REG_MISTA_TXINTR); + trace_npcm7xx_emc_update_tx_irq(level); + qemu_set_irq(emc->tx_irq, level); +} + +/* N.B. emc_update_mista_rxintr must have already been called. */ +static void emc_update_rx_irq(NPCM7xxEMCState *emc) +{ + int level = !!(emc->regs[REG_MISTA] & + emc->regs[REG_MIEN] & + REG_MISTA_RXINTR); + trace_npcm7xx_emc_update_rx_irq(level); + qemu_set_irq(emc->rx_irq, level); +} + +/* Update IRQ states due to changes in MIEN,MISTA. */ +static void emc_update_irq_from_reg_change(NPCM7xxEMCState *emc) +{ + emc_update_mista_txintr(emc); + emc_update_tx_irq(emc); + + emc_update_mista_rxintr(emc); + emc_update_rx_irq(emc); +} + +static int emc_read_tx_desc(dma_addr_t addr, NPCM7xxEMCTxDesc *desc) +{ + if (dma_memory_read(&address_space_memory, addr, desc, sizeof(*desc))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" + HWADDR_PRIx "\n", __func__, addr); + return -1; + } + desc->flags = le32_to_cpu(desc->flags); + desc->txbsa = le32_to_cpu(desc->txbsa); + desc->status_and_length = le32_to_cpu(desc->status_and_length); + desc->ntxdsa = le32_to_cpu(desc->ntxdsa); + return 0; +} + +static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr) +{ + NPCM7xxEMCTxDesc le_desc; + + le_desc.flags = cpu_to_le32(desc->flags); + le_desc.txbsa = cpu_to_le32(desc->txbsa); + le_desc.status_and_length = cpu_to_le32(desc->status_and_length); + le_desc.ntxdsa = cpu_to_le32(desc->ntxdsa); + if (dma_memory_write(&address_space_memory, addr, &le_desc, + sizeof(le_desc))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" + HWADDR_PRIx "\n", __func__, addr); + return -1; + } + return 0; +} + +static int emc_read_rx_desc(dma_addr_t addr, NPCM7xxEMCRxDesc *desc) +{ + if (dma_memory_read(&address_space_memory, addr, desc, sizeof(*desc))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" + HWADDR_PRIx "\n", __func__, addr); + return -1; + } + desc->status_and_length = le32_to_cpu(desc->status_and_length); + desc->rxbsa = le32_to_cpu(desc->rxbsa); + desc->reserved = le32_to_cpu(desc->reserved); + desc->nrxdsa = le32_to_cpu(desc->nrxdsa); + return 0; +} + +static int emc_write_rx_desc(const NPCM7xxEMCRxDesc *desc, dma_addr_t addr) +{ + NPCM7xxEMCRxDesc le_desc; + + le_desc.status_and_length = cpu_to_le32(desc->status_and_length); + le_desc.rxbsa = cpu_to_le32(desc->rxbsa); + le_desc.reserved = cpu_to_le32(desc->reserved); + le_desc.nrxdsa = cpu_to_le32(desc->nrxdsa); + if (dma_memory_write(&address_space_memory, addr, &le_desc, + sizeof(le_desc))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" + HWADDR_PRIx "\n", __func__, addr); + return -1; + } + return 0; +} + +static void emc_set_mista(NPCM7xxEMCState *emc, uint32_t flags) +{ + trace_npcm7xx_emc_set_mista(flags); + emc->regs[REG_MISTA] |= flags; + if (extract32(flags, 16, 16)) { + emc_update_mista_txintr(emc); + } + if (extract32(flags, 0, 16)) { + emc_update_mista_rxintr(emc); + } +} + +static void emc_halt_tx(NPCM7xxEMCState *emc, uint32_t mista_flag) +{ + emc->tx_active = false; + emc_set_mista(emc, mista_flag); +} + +static void emc_halt_rx(NPCM7xxEMCState *emc, uint32_t mista_flag) +{ + emc->rx_active = false; + emc_set_mista(emc, mista_flag); +} + +static void emc_set_next_tx_descriptor(NPCM7xxEMCState *emc, + const NPCM7xxEMCTxDesc *tx_desc, + uint32_t desc_addr) +{ + /* Update the current descriptor, if only to reset the owner flag. */ + if (emc_write_tx_desc(tx_desc, desc_addr)) { + /* + * We just read it so this shouldn't generally happen. + * Error already reported. + */ + emc_set_mista(emc, REG_MISTA_TXBERR); + } + emc->regs[REG_CTXDSA] = TX_DESC_NTXDSA(tx_desc->ntxdsa); +} + +static void emc_set_next_rx_descriptor(NPCM7xxEMCState *emc, + const NPCM7xxEMCRxDesc *rx_desc, + uint32_t desc_addr) +{ + /* Update the current descriptor, if only to reset the owner flag. */ + if (emc_write_rx_desc(rx_desc, desc_addr)) { + /* + * We just read it so this shouldn't generally happen. + * Error already reported. + */ + emc_set_mista(emc, REG_MISTA_RXBERR); + } + emc->regs[REG_CRXDSA] = RX_DESC_NRXDSA(rx_desc->nrxdsa); +} + +static void emc_try_send_next_packet(NPCM7xxEMCState *emc) +{ + /* Working buffer for sending out packets. Most packets fit in this. */ +#define TX_BUFFER_SIZE 2048 + uint8_t tx_send_buffer[TX_BUFFER_SIZE]; + uint32_t desc_addr = TX_DESC_NTXDSA(emc->regs[REG_CTXDSA]); + NPCM7xxEMCTxDesc tx_desc; + uint32_t next_buf_addr, length; + uint8_t *buf; + g_autofree uint8_t *malloced_buf = NULL; + + if (emc_read_tx_desc(desc_addr, &tx_desc)) { + /* Error reading descriptor, already reported. */ + emc_halt_tx(emc, REG_MISTA_TXBERR); + emc_update_tx_irq(emc); + return; + } + + /* Nothing we can do if we don't own the descriptor. */ + if (!(tx_desc.flags & TX_DESC_FLAG_OWNER_MASK)) { + trace_npcm7xx_emc_cpu_owned_desc(desc_addr); + emc_halt_tx(emc, REG_MISTA_TDU); + emc_update_tx_irq(emc); + return; + } + + /* Give the descriptor back regardless of what happens. */ + tx_desc.flags &= ~TX_DESC_FLAG_OWNER_MASK; + tx_desc.status_and_length &= 0xffff; + + /* + * Despite the h/w documentation saying the tx buffer is word aligned, + * the linux driver does not word align the buffer. There is value in not + * aligning the buffer: See the description of NET_IP_ALIGN in linux + * kernel sources. + */ + next_buf_addr = tx_desc.txbsa; + emc->regs[REG_CTXBSA] = next_buf_addr; + length = TX_DESC_PKT_LEN(tx_desc.status_and_length); + buf = &tx_send_buffer[0]; + + if (length > sizeof(tx_send_buffer)) { + malloced_buf = g_malloc(length); + buf = malloced_buf; + } + + if (dma_memory_read(&address_space_memory, next_buf_addr, buf, length)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", + __func__, next_buf_addr); + emc_set_mista(emc, REG_MISTA_TXBERR); + emc_set_next_tx_descriptor(emc, &tx_desc, desc_addr); + emc_update_tx_irq(emc); + trace_npcm7xx_emc_tx_done(emc->regs[REG_CTXDSA]); + return; + } + + if ((tx_desc.flags & TX_DESC_FLAG_PADEN) && (length < MIN_PACKET_LENGTH)) { + memset(buf + length, 0, MIN_PACKET_LENGTH - length); + length = MIN_PACKET_LENGTH; + } + + /* N.B. emc_receive can get called here. */ + qemu_send_packet(qemu_get_queue(emc->nic), buf, length); + trace_npcm7xx_emc_sent_packet(length); + + tx_desc.status_and_length |= TX_DESC_STATUS_TXCP; + if (tx_desc.flags & TX_DESC_FLAG_INTEN) { + emc_set_mista(emc, REG_MISTA_TXCP); + } + if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & REG_MISTA_TXINTR) { + tx_desc.status_and_length |= TX_DESC_STATUS_TXINTR; + } + + emc_set_next_tx_descriptor(emc, &tx_desc, desc_addr); + emc_update_tx_irq(emc); + trace_npcm7xx_emc_tx_done(emc->regs[REG_CTXDSA]); +} + +static bool emc_can_receive(NetClientState *nc) +{ + NPCM7xxEMCState *emc = NPCM7XX_EMC(qemu_get_nic_opaque(nc)); + + bool can_receive = emc->rx_active; + trace_npcm7xx_emc_can_receive(can_receive); + return can_receive; +} + +/* If result is false then *fail_reason contains the reason. */ +static bool emc_receive_filter1(NPCM7xxEMCState *emc, const uint8_t *buf, + size_t len, const char **fail_reason) +{ + eth_pkt_types_e pkt_type = get_eth_packet_type(PKT_GET_ETH_HDR(buf)); + + switch (pkt_type) { + case ETH_PKT_BCAST: + if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) { + return true; + } else { + *fail_reason = "Broadcast packet disabled"; + return !!(emc->regs[REG_CAMCMR] & REG_CAMCMR_ABP); + } + case ETH_PKT_MCAST: + if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) { + return true; + } else { + *fail_reason = "Multicast packet disabled"; + return !!(emc->regs[REG_CAMCMR] & REG_CAMCMR_AMP); + } + case ETH_PKT_UCAST: { + bool matches; + if (emc->regs[REG_CAMCMR] & REG_CAMCMR_AUP) { + return true; + } + matches = ((emc->regs[REG_CAMCMR] & REG_CAMCMR_ECMP) && + /* We only support one CAM register, CAM0. */ + (emc->regs[REG_CAMEN] & (1 << 0)) && + memcmp(buf, emc->conf.macaddr.a, ETH_ALEN) == 0); + if (emc->regs[REG_CAMCMR] & REG_CAMCMR_CCAM) { + *fail_reason = "MACADDR matched, comparison complemented"; + return !matches; + } else { + *fail_reason = "MACADDR didn't match"; + return matches; + } + } + default: + g_assert_not_reached(); + } +} + +static bool emc_receive_filter(NPCM7xxEMCState *emc, const uint8_t *buf, + size_t len) +{ + const char *fail_reason = NULL; + bool ok = emc_receive_filter1(emc, buf, len, &fail_reason); + if (!ok) { + trace_npcm7xx_emc_packet_filtered_out(fail_reason); + } + return ok; +} + +static ssize_t emc_receive(NetClientState *nc, const uint8_t *buf, size_t len1) +{ + NPCM7xxEMCState *emc = NPCM7XX_EMC(qemu_get_nic_opaque(nc)); + const uint32_t len = len1; + size_t max_frame_len; + bool long_frame; + uint32_t desc_addr; + NPCM7xxEMCRxDesc rx_desc; + uint32_t crc; + uint8_t *crc_ptr; + uint32_t buf_addr; + + trace_npcm7xx_emc_receiving_packet(len); + + if (!emc_can_receive(nc)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Unexpected packet\n", __func__); + return -1; + } + + if (len < ETH_HLEN || + /* Defensive programming: drop unsupportable large packets. */ + len > 0xffff - CRC_LENGTH) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Dropped frame of %u bytes\n", + __func__, len); + return len; + } + + /* + * DENI is set if EMC received the Length/Type field of the incoming + * packet, so it will be set regardless of what happens next. + */ + emc_set_mista(emc, REG_MISTA_DENI); + + if (!emc_receive_filter(emc, buf, len)) { + emc_update_rx_irq(emc); + return len; + } + + /* Huge frames (> DMARFC) are dropped. */ + max_frame_len = REG_DMARFC_RXMS(emc->regs[REG_DMARFC]); + if (len + CRC_LENGTH > max_frame_len) { + trace_npcm7xx_emc_packet_dropped(len); + emc_set_mista(emc, REG_MISTA_DFOI); + emc_update_rx_irq(emc); + return len; + } + + /* + * Long Frames (> MAX_ETH_FRAME_SIZE) are also dropped, unless MCMDR.ALP + * is set. + */ + long_frame = false; + if (len + CRC_LENGTH > MAX_ETH_FRAME_SIZE) { + if (emc->regs[REG_MCMDR] & REG_MCMDR_ALP) { + long_frame = true; + } else { + trace_npcm7xx_emc_packet_dropped(len); + emc_set_mista(emc, REG_MISTA_PTLE); + emc_update_rx_irq(emc); + return len; + } + } + + desc_addr = RX_DESC_NRXDSA(emc->regs[REG_CRXDSA]); + if (emc_read_rx_desc(desc_addr, &rx_desc)) { + /* Error reading descriptor, already reported. */ + emc_halt_rx(emc, REG_MISTA_RXBERR); + emc_update_rx_irq(emc); + return len; + } + + /* Nothing we can do if we don't own the descriptor. */ + if (!(rx_desc.status_and_length & RX_DESC_STATUS_OWNER_MASK)) { + trace_npcm7xx_emc_cpu_owned_desc(desc_addr); + emc_halt_rx(emc, REG_MISTA_RDU); + emc_update_rx_irq(emc); + return len; + } + + crc = 0; + crc_ptr = (uint8_t *) &crc; + if (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC)) { + crc = cpu_to_be32(crc32(~0, buf, len)); + } + + /* Give the descriptor back regardless of what happens. */ + rx_desc.status_and_length &= ~RX_DESC_STATUS_OWNER_MASK; + + buf_addr = rx_desc.rxbsa; + emc->regs[REG_CRXBSA] = buf_addr; + if (dma_memory_write(&address_space_memory, buf_addr, buf, len) || + (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC) && + dma_memory_write(&address_space_memory, buf_addr + len, crc_ptr, + 4))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bus error writing packet\n", + __func__); + emc_set_mista(emc, REG_MISTA_RXBERR); + emc_set_next_rx_descriptor(emc, &rx_desc, desc_addr); + emc_update_rx_irq(emc); + trace_npcm7xx_emc_rx_done(emc->regs[REG_CRXDSA]); + return len; + } + + trace_npcm7xx_emc_received_packet(len); + + /* Note: We've already verified len+4 <= 0xffff. */ + rx_desc.status_and_length = len; + if (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC)) { + rx_desc.status_and_length += 4; + } + rx_desc.status_and_length |= RX_DESC_STATUS_RXGD; + emc_set_mista(emc, REG_MISTA_RXGD); + + if (emc->regs[REG_MISTA] & emc->regs[REG_MIEN] & REG_MISTA_RXINTR) { + rx_desc.status_and_length |= RX_DESC_STATUS_RXINTR; + } + if (long_frame) { + rx_desc.status_and_length |= RX_DESC_STATUS_PTLE; + } + + emc_set_next_rx_descriptor(emc, &rx_desc, desc_addr); + emc_update_rx_irq(emc); + trace_npcm7xx_emc_rx_done(emc->regs[REG_CRXDSA]); + return len; +} + +static void emc_try_receive_next_packet(NPCM7xxEMCState *emc) +{ + if (emc_can_receive(qemu_get_queue(emc->nic))) { + qemu_flush_queued_packets(qemu_get_queue(emc->nic)); + } +} + +static uint64_t npcm7xx_emc_read(void *opaque, hwaddr offset, unsigned size) +{ + NPCM7xxEMCState *emc = opaque; + uint32_t reg = offset / sizeof(uint32_t); + uint32_t result; + + if (reg >= NPCM7XX_NUM_EMC_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid offset 0x%04" HWADDR_PRIx "\n", + __func__, offset); + return 0; + } + + switch (reg) { + case REG_MIID: + /* + * We don't implement MII. For determinism, always return zero as + * writes record the last value written for debugging purposes. + */ + qemu_log_mask(LOG_UNIMP, "%s: Read of MIID, returning 0\n", __func__); + result = 0; + break; + case REG_TSDR: + case REG_RSDR: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Read of write-only reg, %s/%d\n", + __func__, emc_reg_name(reg), reg); + return 0; + default: + result = emc->regs[reg]; + break; + } + + trace_npcm7xx_emc_reg_read(emc->emc_num, result, emc_reg_name(reg), reg); + return result; +} + +static void npcm7xx_emc_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + NPCM7xxEMCState *emc = opaque; + uint32_t reg = offset / sizeof(uint32_t); + uint32_t value = v; + + g_assert(size == sizeof(uint32_t)); + + if (reg >= NPCM7XX_NUM_EMC_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid offset 0x%04" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + trace_npcm7xx_emc_reg_write(emc->emc_num, emc_reg_name(reg), reg, value); + + switch (reg) { + case REG_CAMCMR: + emc->regs[reg] = value; + break; + case REG_CAMEN: + /* Only CAM0 is supported, don't pretend otherwise. */ + if (value & ~1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Only CAM0 is supported, cannot enable others" + ": 0x%x\n", + __func__, value); + } + emc->regs[reg] = value & 1; + break; + case REG_CAMM_BASE + 0: + emc->regs[reg] = value; + emc->conf.macaddr.a[0] = value >> 24; + emc->conf.macaddr.a[1] = value >> 16; + emc->conf.macaddr.a[2] = value >> 8; + emc->conf.macaddr.a[3] = value >> 0; + break; + case REG_CAML_BASE + 0: + emc->regs[reg] = value; + emc->conf.macaddr.a[4] = value >> 24; + emc->conf.macaddr.a[5] = value >> 16; + break; + case REG_MCMDR: { + uint32_t prev; + if (value & REG_MCMDR_SWR) { + emc_soft_reset(emc); + /* On h/w the reset happens over multiple cycles. For now KISS. */ + break; + } + prev = emc->regs[reg]; + emc->regs[reg] = value; + /* Update tx state. */ + if (!(prev & REG_MCMDR_TXON) && + (value & REG_MCMDR_TXON)) { + emc->regs[REG_CTXDSA] = emc->regs[REG_TXDLSA]; + /* + * Linux kernel turns TX on with CPU still holding descriptor, + * which suggests we should wait for a write to TSDR before trying + * to send a packet: so we don't send one here. + */ + } else if ((prev & REG_MCMDR_TXON) && + !(value & REG_MCMDR_TXON)) { + emc->regs[REG_MGSTA] |= REG_MGSTA_TXHA; + } + if (!(value & REG_MCMDR_TXON)) { + emc_halt_tx(emc, 0); + } + /* Update rx state. */ + if (!(prev & REG_MCMDR_RXON) && + (value & REG_MCMDR_RXON)) { + emc->regs[REG_CRXDSA] = emc->regs[REG_RXDLSA]; + } else if ((prev & REG_MCMDR_RXON) && + !(value & REG_MCMDR_RXON)) { + emc->regs[REG_MGSTA] |= REG_MGSTA_RXHA; + } + if (value & REG_MCMDR_RXON) { + emc->rx_active = true; + } else { + emc_halt_rx(emc, 0); + } + break; + } + case REG_TXDLSA: + case REG_RXDLSA: + case REG_DMARFC: + case REG_MIID: + emc->regs[reg] = value; + break; + case REG_MIEN: + emc->regs[reg] = value; + emc_update_irq_from_reg_change(emc); + break; + case REG_MISTA: + /* Clear the bits that have 1 in "value". */ + emc->regs[reg] &= ~value; + emc_update_irq_from_reg_change(emc); + break; + case REG_MGSTA: + /* Clear the bits that have 1 in "value". */ + emc->regs[reg] &= ~value; + break; + case REG_TSDR: + if (emc->regs[REG_MCMDR] & REG_MCMDR_TXON) { + emc->tx_active = true; + /* Keep trying to send packets until we run out. */ + while (emc->tx_active) { + emc_try_send_next_packet(emc); + } + } + break; + case REG_RSDR: + if (emc->regs[REG_MCMDR] & REG_MCMDR_RXON) { + emc->rx_active = true; + emc_try_receive_next_packet(emc); + } + break; + case REG_MIIDA: + emc->regs[reg] = value & ~REG_MIIDA_BUSY; + break; + case REG_MRPC: + case REG_MRPCC: + case REG_MREPC: + case REG_CTXDSA: + case REG_CTXBSA: + case REG_CRXDSA: + case REG_CRXBSA: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write to read-only reg %s/%d\n", + __func__, emc_reg_name(reg), reg); + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Write to unimplemented reg %s/%d\n", + __func__, emc_reg_name(reg), reg); + break; + } +} + +static const struct MemoryRegionOps npcm7xx_emc_ops = { + .read = npcm7xx_emc_read, + .write = npcm7xx_emc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void emc_cleanup(NetClientState *nc) +{ + /* Nothing to do yet. */ +} + +static NetClientInfo net_npcm7xx_emc_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = emc_can_receive, + .receive = emc_receive, + .cleanup = emc_cleanup, + .link_status_changed = emc_set_link, +}; + +static void npcm7xx_emc_realize(DeviceState *dev, Error **errp) +{ + NPCM7xxEMCState *emc = NPCM7XX_EMC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(emc); + + memory_region_init_io(&emc->iomem, OBJECT(emc), &npcm7xx_emc_ops, emc, + TYPE_NPCM7XX_EMC, 4 * KiB); + sysbus_init_mmio(sbd, &emc->iomem); + sysbus_init_irq(sbd, &emc->tx_irq); + sysbus_init_irq(sbd, &emc->rx_irq); + + qemu_macaddr_default_if_unset(&emc->conf.macaddr); + emc->nic = qemu_new_nic(&net_npcm7xx_emc_info, &emc->conf, + object_get_typename(OBJECT(dev)), dev->id, emc); + qemu_format_nic_info_str(qemu_get_queue(emc->nic), emc->conf.macaddr.a); +} + +static void npcm7xx_emc_unrealize(DeviceState *dev) +{ + NPCM7xxEMCState *emc = NPCM7XX_EMC(dev); + + qemu_del_nic(emc->nic); +} + +static const VMStateDescription vmstate_npcm7xx_emc = { + .name = TYPE_NPCM7XX_EMC, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(emc_num, NPCM7xxEMCState), + VMSTATE_UINT32_ARRAY(regs, NPCM7xxEMCState, NPCM7XX_NUM_EMC_REGS), + VMSTATE_BOOL(tx_active, NPCM7xxEMCState), + VMSTATE_BOOL(rx_active, NPCM7xxEMCState), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property npcm7xx_emc_properties[] = { + DEFINE_NIC_PROPERTIES(NPCM7xxEMCState, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void npcm7xx_emc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->desc = "NPCM7xx EMC Controller"; + dc->realize = npcm7xx_emc_realize; + dc->unrealize = npcm7xx_emc_unrealize; + dc->reset = npcm7xx_emc_reset; + dc->vmsd = &vmstate_npcm7xx_emc; + device_class_set_props(dc, npcm7xx_emc_properties); +} + +static const TypeInfo npcm7xx_emc_info = { + .name = TYPE_NPCM7XX_EMC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxEMCState), + .class_init = npcm7xx_emc_class_init, +}; + +static void npcm7xx_emc_register_type(void) +{ + type_register_static(&npcm7xx_emc_info); +} + +type_init(npcm7xx_emc_register_type) diff --git a/hw/net/opencores_eth.c b/hw/net/opencores_eth.c new file mode 100644 index 000000000..0b3dc3146 --- /dev/null +++ b/hw/net/opencores_eth.c @@ -0,0 +1,773 @@ +/* + * OpenCores Ethernet MAC 10/100 + subset of + * National Semiconductors DP83848C 10/100 PHY + * + * http://opencores.org/svnget,ethmac?file=%2Ftrunk%2F%2Fdoc%2Feth_speci.pdf + * http://cache.national.com/ds/DP/DP83848C.pdf + * + * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Open Source and Linux Lab nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/net/mii.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "net/net.h" +#include "qemu/module.h" +#include "net/eth.h" +#include "trace.h" +#include "qom/object.h" + +/* RECSMALL is not used because it breaks tap networking in linux: + * incoming ARP responses are too short + */ +#undef USE_RECSMALL + +#define GET_FIELD(v, field) (((v) & (field)) >> (field ## _LBN)) +#define GET_REGBIT(s, reg, field) ((s)->regs[reg] & (reg ## _ ## field)) +#define GET_REGFIELD(s, reg, field) \ + GET_FIELD((s)->regs[reg], reg ## _ ## field) + +#define SET_FIELD(v, field, data) \ + ((v) = (((v) & ~(field)) | (((data) << (field ## _LBN)) & (field)))) +#define SET_REGFIELD(s, reg, field, data) \ + SET_FIELD((s)->regs[reg], reg ## _ ## field, data) + +/* PHY MII registers */ +enum { + MII_REG_MAX = 16, +}; + +typedef struct Mii { + uint16_t regs[MII_REG_MAX]; + bool link_ok; +} Mii; + +static void mii_set_link(Mii *s, bool link_ok) +{ + if (link_ok) { + s->regs[MII_BMSR] |= MII_BMSR_LINK_ST; + s->regs[MII_ANLPAR] |= MII_ANLPAR_TXFD | MII_ANLPAR_TX | + MII_ANLPAR_10FD | MII_ANLPAR_10 | MII_ANLPAR_CSMACD; + } else { + s->regs[MII_BMSR] &= ~MII_BMSR_LINK_ST; + s->regs[MII_ANLPAR] &= 0x01ff; + } + s->link_ok = link_ok; +} + +static void mii_reset(Mii *s) +{ + memset(s->regs, 0, sizeof(s->regs)); + s->regs[MII_BMCR] = MII_BMCR_AUTOEN; + s->regs[MII_BMSR] = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | + MII_BMSR_10T_FD | MII_BMSR_10T_HD | MII_BMSR_MFPS | + MII_BMSR_AN_COMP | MII_BMSR_AUTONEG; + s->regs[MII_PHYID1] = 0x2000; + s->regs[MII_PHYID2] = 0x5c90; + s->regs[MII_ANAR] = MII_ANAR_TXFD | MII_ANAR_TX | + MII_ANAR_10FD | MII_ANAR_10 | MII_ANAR_CSMACD; + mii_set_link(s, s->link_ok); +} + +static void mii_ro(Mii *s, uint16_t v) +{ +} + +static void mii_write_bmcr(Mii *s, uint16_t v) +{ + if (v & MII_BMCR_RESET) { + mii_reset(s); + } else { + s->regs[MII_BMCR] = v; + } +} + +static void mii_write_host(Mii *s, unsigned idx, uint16_t v) +{ + static void (*reg_write[MII_REG_MAX])(Mii *s, uint16_t v) = { + [MII_BMCR] = mii_write_bmcr, + [MII_BMSR] = mii_ro, + [MII_PHYID1] = mii_ro, + [MII_PHYID2] = mii_ro, + }; + + if (idx < MII_REG_MAX) { + trace_open_eth_mii_write(idx, v); + if (reg_write[idx]) { + reg_write[idx](s, v); + } else { + s->regs[idx] = v; + } + } +} + +static uint16_t mii_read_host(Mii *s, unsigned idx) +{ + trace_open_eth_mii_read(idx, s->regs[idx]); + return s->regs[idx]; +} + +/* OpenCores Ethernet registers */ +enum { + MODER, + INT_SOURCE, + INT_MASK, + IPGT, + IPGR1, + IPGR2, + PACKETLEN, + COLLCONF, + TX_BD_NUM, + CTRLMODER, + MIIMODER, + MIICOMMAND, + MIIADDRESS, + MIITX_DATA, + MIIRX_DATA, + MIISTATUS, + MAC_ADDR0, + MAC_ADDR1, + HASH0, + HASH1, + TXCTRL, + REG_MAX, +}; + +enum { + MODER_RECSMALL = 0x10000, + MODER_PAD = 0x8000, + MODER_HUGEN = 0x4000, + MODER_RST = 0x800, + MODER_LOOPBCK = 0x80, + MODER_PRO = 0x20, + MODER_IAM = 0x10, + MODER_BRO = 0x8, + MODER_TXEN = 0x2, + MODER_RXEN = 0x1, +}; + +enum { + INT_SOURCE_BUSY = 0x10, + INT_SOURCE_RXB = 0x4, + INT_SOURCE_TXB = 0x1, +}; + +enum { + PACKETLEN_MINFL = 0xffff0000, + PACKETLEN_MINFL_LBN = 16, + PACKETLEN_MAXFL = 0xffff, + PACKETLEN_MAXFL_LBN = 0, +}; + +enum { + MIICOMMAND_WCTRLDATA = 0x4, + MIICOMMAND_RSTAT = 0x2, + MIICOMMAND_SCANSTAT = 0x1, +}; + +enum { + MIIADDRESS_RGAD = 0x1f00, + MIIADDRESS_RGAD_LBN = 8, + MIIADDRESS_FIAD = 0x1f, + MIIADDRESS_FIAD_LBN = 0, +}; + +enum { + MIITX_DATA_CTRLDATA = 0xffff, + MIITX_DATA_CTRLDATA_LBN = 0, +}; + +enum { + MIIRX_DATA_PRSD = 0xffff, + MIIRX_DATA_PRSD_LBN = 0, +}; + +enum { + MIISTATUS_LINKFAIL = 0x1, + MIISTATUS_LINKFAIL_LBN = 0, +}; + +enum { + MAC_ADDR0_BYTE2 = 0xff000000, + MAC_ADDR0_BYTE2_LBN = 24, + MAC_ADDR0_BYTE3 = 0xff0000, + MAC_ADDR0_BYTE3_LBN = 16, + MAC_ADDR0_BYTE4 = 0xff00, + MAC_ADDR0_BYTE4_LBN = 8, + MAC_ADDR0_BYTE5 = 0xff, + MAC_ADDR0_BYTE5_LBN = 0, +}; + +enum { + MAC_ADDR1_BYTE0 = 0xff00, + MAC_ADDR1_BYTE0_LBN = 8, + MAC_ADDR1_BYTE1 = 0xff, + MAC_ADDR1_BYTE1_LBN = 0, +}; + +enum { + TXD_LEN = 0xffff0000, + TXD_LEN_LBN = 16, + TXD_RD = 0x8000, + TXD_IRQ = 0x4000, + TXD_WR = 0x2000, + TXD_PAD = 0x1000, + TXD_CRC = 0x800, + TXD_UR = 0x100, + TXD_RTRY = 0xf0, + TXD_RTRY_LBN = 4, + TXD_RL = 0x8, + TXD_LC = 0x4, + TXD_DF = 0x2, + TXD_CS = 0x1, +}; + +enum { + RXD_LEN = 0xffff0000, + RXD_LEN_LBN = 16, + RXD_E = 0x8000, + RXD_IRQ = 0x4000, + RXD_WRAP = 0x2000, + RXD_CF = 0x100, + RXD_M = 0x80, + RXD_OR = 0x40, + RXD_IS = 0x20, + RXD_DN = 0x10, + RXD_TL = 0x8, + RXD_SF = 0x4, + RXD_CRC = 0x2, + RXD_LC = 0x1, +}; + +typedef struct desc { + uint32_t len_flags; + uint32_t buf_ptr; +} desc; + +#define DEFAULT_PHY 1 + +#define TYPE_OPEN_ETH "open_eth" +OBJECT_DECLARE_SIMPLE_TYPE(OpenEthState, OPEN_ETH) + +struct OpenEthState { + SysBusDevice parent_obj; + + NICState *nic; + NICConf conf; + MemoryRegion reg_io; + MemoryRegion desc_io; + qemu_irq irq; + + Mii mii; + uint32_t regs[REG_MAX]; + unsigned tx_desc; + unsigned rx_desc; + desc desc[128]; +}; + +static desc *rx_desc(OpenEthState *s) +{ + return s->desc + s->rx_desc; +} + +static desc *tx_desc(OpenEthState *s) +{ + return s->desc + s->tx_desc; +} + +static void open_eth_update_irq(OpenEthState *s, + uint32_t old, uint32_t new) +{ + if (!old != !new) { + trace_open_eth_update_irq(new); + qemu_set_irq(s->irq, new); + } +} + +static void open_eth_int_source_write(OpenEthState *s, + uint32_t val) +{ + uint32_t old_val = s->regs[INT_SOURCE]; + + s->regs[INT_SOURCE] = val; + open_eth_update_irq(s, old_val & s->regs[INT_MASK], + s->regs[INT_SOURCE] & s->regs[INT_MASK]); +} + +static void open_eth_set_link_status(NetClientState *nc) +{ + OpenEthState *s = qemu_get_nic_opaque(nc); + + if (GET_REGBIT(s, MIICOMMAND, SCANSTAT)) { + SET_REGFIELD(s, MIISTATUS, LINKFAIL, nc->link_down); + } + mii_set_link(&s->mii, !nc->link_down); +} + +static void open_eth_reset(void *opaque) +{ + OpenEthState *s = opaque; + + memset(s->regs, 0, sizeof(s->regs)); + s->regs[MODER] = 0xa000; + s->regs[IPGT] = 0x12; + s->regs[IPGR1] = 0xc; + s->regs[IPGR2] = 0x12; + s->regs[PACKETLEN] = 0x400600; + s->regs[COLLCONF] = 0xf003f; + s->regs[TX_BD_NUM] = 0x40; + s->regs[MIIMODER] = 0x64; + + s->tx_desc = 0; + s->rx_desc = 0x40; + + mii_reset(&s->mii); + open_eth_set_link_status(qemu_get_queue(s->nic)); +} + +static bool open_eth_can_receive(NetClientState *nc) +{ + OpenEthState *s = qemu_get_nic_opaque(nc); + + return GET_REGBIT(s, MODER, RXEN) && (s->regs[TX_BD_NUM] < 0x80); +} + +static ssize_t open_eth_receive(NetClientState *nc, + const uint8_t *buf, size_t size) +{ + OpenEthState *s = qemu_get_nic_opaque(nc); + size_t maxfl = GET_REGFIELD(s, PACKETLEN, MAXFL); + size_t minfl = GET_REGFIELD(s, PACKETLEN, MINFL); + size_t fcsl = 4; + bool miss = true; + + trace_open_eth_receive((unsigned)size); + + if (size >= 6) { + static const uint8_t bcast_addr[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + if (memcmp(buf, bcast_addr, sizeof(bcast_addr)) == 0) { + miss = GET_REGBIT(s, MODER, BRO); + } else if ((buf[0] & 0x1) || GET_REGBIT(s, MODER, IAM)) { + unsigned mcast_idx = net_crc32(buf, ETH_ALEN) >> 26; + miss = !(s->regs[HASH0 + mcast_idx / 32] & + (1 << (mcast_idx % 32))); + trace_open_eth_receive_mcast( + mcast_idx, s->regs[HASH0], s->regs[HASH1]); + } else { + miss = GET_REGFIELD(s, MAC_ADDR1, BYTE0) != buf[0] || + GET_REGFIELD(s, MAC_ADDR1, BYTE1) != buf[1] || + GET_REGFIELD(s, MAC_ADDR0, BYTE2) != buf[2] || + GET_REGFIELD(s, MAC_ADDR0, BYTE3) != buf[3] || + GET_REGFIELD(s, MAC_ADDR0, BYTE4) != buf[4] || + GET_REGFIELD(s, MAC_ADDR0, BYTE5) != buf[5]; + } + } + + if (miss && !GET_REGBIT(s, MODER, PRO)) { + trace_open_eth_receive_reject(); + return size; + } + +#ifdef USE_RECSMALL + if (GET_REGBIT(s, MODER, RECSMALL) || size >= minfl) { +#else + { +#endif + static const uint8_t zero[64] = {0}; + desc *desc = rx_desc(s); + size_t copy_size = GET_REGBIT(s, MODER, HUGEN) ? 65536 : maxfl; + + if (!(desc->len_flags & RXD_E)) { + open_eth_int_source_write(s, + s->regs[INT_SOURCE] | INT_SOURCE_BUSY); + return size; + } + + desc->len_flags &= ~(RXD_CF | RXD_M | RXD_OR | + RXD_IS | RXD_DN | RXD_TL | RXD_SF | RXD_CRC | RXD_LC); + + if (copy_size > size) { + copy_size = size; + } else { + fcsl = 0; + } + if (miss) { + desc->len_flags |= RXD_M; + } + if (GET_REGBIT(s, MODER, HUGEN) && size > maxfl) { + desc->len_flags |= RXD_TL; + } +#ifdef USE_RECSMALL + if (size < minfl) { + desc->len_flags |= RXD_SF; + } +#endif + + cpu_physical_memory_write(desc->buf_ptr, buf, copy_size); + + if (GET_REGBIT(s, MODER, PAD) && copy_size < minfl) { + if (minfl - copy_size > fcsl) { + fcsl = 0; + } else { + fcsl -= minfl - copy_size; + } + while (copy_size < minfl) { + size_t zero_sz = minfl - copy_size < sizeof(zero) ? + minfl - copy_size : sizeof(zero); + + cpu_physical_memory_write(desc->buf_ptr + copy_size, + zero, zero_sz); + copy_size += zero_sz; + } + } + + /* There's no FCS in the frames handed to us by the QEMU, zero fill it. + * Don't do it if the frame is cut at the MAXFL or padded with 4 or + * more bytes to the MINFL. + */ + cpu_physical_memory_write(desc->buf_ptr + copy_size, zero, fcsl); + copy_size += fcsl; + + SET_FIELD(desc->len_flags, RXD_LEN, copy_size); + + if ((desc->len_flags & RXD_WRAP) || s->rx_desc == 0x7f) { + s->rx_desc = s->regs[TX_BD_NUM]; + } else { + ++s->rx_desc; + } + desc->len_flags &= ~RXD_E; + + trace_open_eth_receive_desc(desc->buf_ptr, desc->len_flags); + + if (desc->len_flags & RXD_IRQ) { + open_eth_int_source_write(s, + s->regs[INT_SOURCE] | INT_SOURCE_RXB); + } + } + return size; +} + +static NetClientInfo net_open_eth_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = open_eth_can_receive, + .receive = open_eth_receive, + .link_status_changed = open_eth_set_link_status, +}; + +static void open_eth_start_xmit(OpenEthState *s, desc *tx) +{ + uint8_t *buf = NULL; + uint8_t buffer[0x600]; + unsigned len = GET_FIELD(tx->len_flags, TXD_LEN); + unsigned tx_len = len; + + if ((tx->len_flags & TXD_PAD) && + tx_len < GET_REGFIELD(s, PACKETLEN, MINFL)) { + tx_len = GET_REGFIELD(s, PACKETLEN, MINFL); + } + if (!GET_REGBIT(s, MODER, HUGEN) && + tx_len > GET_REGFIELD(s, PACKETLEN, MAXFL)) { + tx_len = GET_REGFIELD(s, PACKETLEN, MAXFL); + } + + trace_open_eth_start_xmit(tx->buf_ptr, len, tx_len); + + if (tx_len > sizeof(buffer)) { + buf = g_new(uint8_t, tx_len); + } else { + buf = buffer; + } + if (len > tx_len) { + len = tx_len; + } + cpu_physical_memory_read(tx->buf_ptr, buf, len); + if (tx_len > len) { + memset(buf + len, 0, tx_len - len); + } + qemu_send_packet(qemu_get_queue(s->nic), buf, tx_len); + if (tx_len > sizeof(buffer)) { + g_free(buf); + } + + if (tx->len_flags & TXD_WR) { + s->tx_desc = 0; + } else { + ++s->tx_desc; + if (s->tx_desc >= s->regs[TX_BD_NUM]) { + s->tx_desc = 0; + } + } + tx->len_flags &= ~(TXD_RD | TXD_UR | + TXD_RTRY | TXD_RL | TXD_LC | TXD_DF | TXD_CS); + if (tx->len_flags & TXD_IRQ) { + open_eth_int_source_write(s, s->regs[INT_SOURCE] | INT_SOURCE_TXB); + } + +} + +static void open_eth_check_start_xmit(OpenEthState *s) +{ + desc *tx = tx_desc(s); + if (GET_REGBIT(s, MODER, TXEN) && s->regs[TX_BD_NUM] > 0 && + (tx->len_flags & TXD_RD) && + GET_FIELD(tx->len_flags, TXD_LEN) > 4) { + open_eth_start_xmit(s, tx); + } +} + +static uint64_t open_eth_reg_read(void *opaque, + hwaddr addr, unsigned int size) +{ + static uint32_t (*reg_read[REG_MAX])(OpenEthState *s) = { + }; + OpenEthState *s = opaque; + unsigned idx = addr / 4; + uint64_t v = 0; + + if (idx < REG_MAX) { + if (reg_read[idx]) { + v = reg_read[idx](s); + } else { + v = s->regs[idx]; + } + } + trace_open_eth_reg_read((uint32_t)addr, (uint32_t)v); + return v; +} + +static void open_eth_notify_can_receive(OpenEthState *s) +{ + NetClientState *nc = qemu_get_queue(s->nic); + + if (open_eth_can_receive(nc)) { + qemu_flush_queued_packets(nc); + } +} + +static void open_eth_ro(OpenEthState *s, uint32_t val) +{ +} + +static void open_eth_moder_host_write(OpenEthState *s, uint32_t val) +{ + uint32_t set = val & ~s->regs[MODER]; + + if (set & MODER_RST) { + open_eth_reset(s); + } + + s->regs[MODER] = val; + + if (set & MODER_RXEN) { + s->rx_desc = s->regs[TX_BD_NUM]; + open_eth_notify_can_receive(s); + } + if (set & MODER_TXEN) { + s->tx_desc = 0; + open_eth_check_start_xmit(s); + } +} + +static void open_eth_int_source_host_write(OpenEthState *s, uint32_t val) +{ + uint32_t old = s->regs[INT_SOURCE]; + + s->regs[INT_SOURCE] &= ~val; + open_eth_update_irq(s, old & s->regs[INT_MASK], + s->regs[INT_SOURCE] & s->regs[INT_MASK]); +} + +static void open_eth_int_mask_host_write(OpenEthState *s, uint32_t val) +{ + uint32_t old = s->regs[INT_MASK]; + + s->regs[INT_MASK] = val; + open_eth_update_irq(s, s->regs[INT_SOURCE] & old, + s->regs[INT_SOURCE] & s->regs[INT_MASK]); +} + +static void open_eth_tx_bd_num_host_write(OpenEthState *s, uint32_t val) +{ + if (val < 0x80) { + bool enable = s->regs[TX_BD_NUM] == 0x80; + + s->regs[TX_BD_NUM] = val; + if (enable) { + open_eth_notify_can_receive(s); + } + } +} + +static void open_eth_mii_command_host_write(OpenEthState *s, uint32_t val) +{ + unsigned fiad = GET_REGFIELD(s, MIIADDRESS, FIAD); + unsigned rgad = GET_REGFIELD(s, MIIADDRESS, RGAD); + + if (val & MIICOMMAND_WCTRLDATA) { + if (fiad == DEFAULT_PHY) { + mii_write_host(&s->mii, rgad, + GET_REGFIELD(s, MIITX_DATA, CTRLDATA)); + } + } + if (val & MIICOMMAND_RSTAT) { + if (fiad == DEFAULT_PHY) { + SET_REGFIELD(s, MIIRX_DATA, PRSD, + mii_read_host(&s->mii, rgad)); + } else { + s->regs[MIIRX_DATA] = 0xffff; + } + SET_REGFIELD(s, MIISTATUS, LINKFAIL, qemu_get_queue(s->nic)->link_down); + } +} + +static void open_eth_mii_tx_host_write(OpenEthState *s, uint32_t val) +{ + SET_REGFIELD(s, MIITX_DATA, CTRLDATA, val); + if (GET_REGFIELD(s, MIIADDRESS, FIAD) == DEFAULT_PHY) { + mii_write_host(&s->mii, GET_REGFIELD(s, MIIADDRESS, RGAD), + GET_REGFIELD(s, MIITX_DATA, CTRLDATA)); + } +} + +static void open_eth_reg_write(void *opaque, + hwaddr addr, uint64_t val, unsigned int size) +{ + static void (*reg_write[REG_MAX])(OpenEthState *s, uint32_t val) = { + [MODER] = open_eth_moder_host_write, + [INT_SOURCE] = open_eth_int_source_host_write, + [INT_MASK] = open_eth_int_mask_host_write, + [TX_BD_NUM] = open_eth_tx_bd_num_host_write, + [MIICOMMAND] = open_eth_mii_command_host_write, + [MIITX_DATA] = open_eth_mii_tx_host_write, + [MIISTATUS] = open_eth_ro, + }; + OpenEthState *s = opaque; + unsigned idx = addr / 4; + + if (idx < REG_MAX) { + trace_open_eth_reg_write((uint32_t)addr, (uint32_t)val); + if (reg_write[idx]) { + reg_write[idx](s, val); + } else { + s->regs[idx] = val; + } + } +} + +static uint64_t open_eth_desc_read(void *opaque, + hwaddr addr, unsigned int size) +{ + OpenEthState *s = opaque; + uint64_t v = 0; + + addr &= 0x3ff; + memcpy(&v, (uint8_t *)s->desc + addr, size); + trace_open_eth_desc_read((uint32_t)addr, (uint32_t)v); + return v; +} + +static void open_eth_desc_write(void *opaque, + hwaddr addr, uint64_t val, unsigned int size) +{ + OpenEthState *s = opaque; + + addr &= 0x3ff; + trace_open_eth_desc_write((uint32_t)addr, (uint32_t)val); + memcpy((uint8_t *)s->desc + addr, &val, size); + open_eth_check_start_xmit(s); +} + + +static const MemoryRegionOps open_eth_reg_ops = { + .read = open_eth_reg_read, + .write = open_eth_reg_write, +}; + +static const MemoryRegionOps open_eth_desc_ops = { + .read = open_eth_desc_read, + .write = open_eth_desc_write, +}; + +static void sysbus_open_eth_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + OpenEthState *s = OPEN_ETH(dev); + + memory_region_init_io(&s->reg_io, OBJECT(dev), &open_eth_reg_ops, s, + "open_eth.regs", 0x54); + sysbus_init_mmio(sbd, &s->reg_io); + + memory_region_init_io(&s->desc_io, OBJECT(dev), &open_eth_desc_ops, s, + "open_eth.desc", 0x400); + sysbus_init_mmio(sbd, &s->desc_io); + + sysbus_init_irq(sbd, &s->irq); + + s->nic = qemu_new_nic(&net_open_eth_info, &s->conf, + object_get_typename(OBJECT(s)), dev->id, s); +} + +static void qdev_open_eth_reset(DeviceState *dev) +{ + OpenEthState *d = OPEN_ETH(dev); + + open_eth_reset(d); +} + +static Property open_eth_properties[] = { + DEFINE_NIC_PROPERTIES(OpenEthState, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void open_eth_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sysbus_open_eth_realize; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->desc = "Opencores 10/100 Mbit Ethernet"; + dc->reset = qdev_open_eth_reset; + device_class_set_props(dc, open_eth_properties); +} + +static const TypeInfo open_eth_info = { + .name = TYPE_OPEN_ETH, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(OpenEthState), + .class_init = open_eth_class_init, +}; + +static void open_eth_register_types(void) +{ + type_register_static(&open_eth_info); +} + +type_init(open_eth_register_types) diff --git a/hw/net/pcnet-pci.c b/hw/net/pcnet-pci.c new file mode 100644 index 000000000..95d27102a --- /dev/null +++ b/hw/net/pcnet-pci.c @@ -0,0 +1,295 @@ +/* + * QEMU AMD PC-Net II (Am79C970A) PCI emulation + * + * Copyright (c) 2004 Antony T Curtis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* This software was written to be compatible with the specification: + * AMD Am79C970A PCnet-PCI II Ethernet Controller Data-Sheet + * AMD Publication# 19436 Rev:E Amendment/0 Issue Date: June 2000 + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "net/net.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "sysemu/dma.h" +#include "sysemu/sysemu.h" +#include "trace.h" + +#include "pcnet.h" +#include "qom/object.h" + +//#define PCNET_DEBUG +//#define PCNET_DEBUG_IO +//#define PCNET_DEBUG_BCR +//#define PCNET_DEBUG_CSR +//#define PCNET_DEBUG_RMD +//#define PCNET_DEBUG_TMD +//#define PCNET_DEBUG_MATCH + +#define TYPE_PCI_PCNET "pcnet" + +OBJECT_DECLARE_SIMPLE_TYPE(PCIPCNetState, PCI_PCNET) + +struct PCIPCNetState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + PCNetState state; + MemoryRegion io_bar; +}; + +static void pcnet_aprom_writeb(void *opaque, uint32_t addr, uint32_t val) +{ + PCNetState *s = opaque; + + trace_pcnet_aprom_writeb(opaque, addr, val); + if (BCR_APROMWE(s)) { + s->prom[addr & 15] = val; + } +} + +static uint32_t pcnet_aprom_readb(void *opaque, uint32_t addr) +{ + PCNetState *s = opaque; + uint32_t val = s->prom[addr & 15]; + + trace_pcnet_aprom_readb(opaque, addr, val); + return val; +} + +static uint64_t pcnet_ioport_read(void *opaque, hwaddr addr, + unsigned size) +{ + PCNetState *d = opaque; + + trace_pcnet_ioport_read(opaque, addr, size); + if (addr < 0x10) { + if (!BCR_DWIO(d) && size == 1) { + return pcnet_aprom_readb(d, addr); + } else if (!BCR_DWIO(d) && (addr & 1) == 0 && size == 2) { + return pcnet_aprom_readb(d, addr) | + (pcnet_aprom_readb(d, addr + 1) << 8); + } else if (BCR_DWIO(d) && (addr & 3) == 0 && size == 4) { + return pcnet_aprom_readb(d, addr) | + (pcnet_aprom_readb(d, addr + 1) << 8) | + (pcnet_aprom_readb(d, addr + 2) << 16) | + (pcnet_aprom_readb(d, addr + 3) << 24); + } + } else { + if (size == 2) { + return pcnet_ioport_readw(d, addr); + } else if (size == 4) { + return pcnet_ioport_readl(d, addr); + } + } + return ((uint64_t)1 << (size * 8)) - 1; +} + +static void pcnet_ioport_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + PCNetState *d = opaque; + + trace_pcnet_ioport_write(opaque, addr, data, size); + if (addr < 0x10) { + if (!BCR_DWIO(d) && size == 1) { + pcnet_aprom_writeb(d, addr, data); + } else if (!BCR_DWIO(d) && (addr & 1) == 0 && size == 2) { + pcnet_aprom_writeb(d, addr, data & 0xff); + pcnet_aprom_writeb(d, addr + 1, data >> 8); + } else if (BCR_DWIO(d) && (addr & 3) == 0 && size == 4) { + pcnet_aprom_writeb(d, addr, data & 0xff); + pcnet_aprom_writeb(d, addr + 1, (data >> 8) & 0xff); + pcnet_aprom_writeb(d, addr + 2, (data >> 16) & 0xff); + pcnet_aprom_writeb(d, addr + 3, data >> 24); + } + } else { + if (size == 2) { + pcnet_ioport_writew(d, addr, data); + } else if (size == 4) { + pcnet_ioport_writel(d, addr, data); + } + } +} + +static const MemoryRegionOps pcnet_io_ops = { + .read = pcnet_ioport_read, + .write = pcnet_ioport_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const VMStateDescription vmstate_pci_pcnet = { + .name = "pcnet", + .version_id = 3, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCIPCNetState), + VMSTATE_STRUCT(state, PCIPCNetState, 0, vmstate_pcnet, PCNetState), + VMSTATE_END_OF_LIST() + } +}; + +/* PCI interface */ + +static const MemoryRegionOps pcnet_mmio_ops = { + .read = pcnet_ioport_read, + .write = pcnet_ioport_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void pci_physical_memory_write(void *dma_opaque, hwaddr addr, + uint8_t *buf, int len, int do_bswap) +{ + pci_dma_write(dma_opaque, addr, buf, len); +} + +static void pci_physical_memory_read(void *dma_opaque, hwaddr addr, + uint8_t *buf, int len, int do_bswap) +{ + pci_dma_read(dma_opaque, addr, buf, len); +} + +static void pci_pcnet_uninit(PCIDevice *dev) +{ + PCIPCNetState *d = PCI_PCNET(dev); + + qemu_free_irq(d->state.irq); + timer_free(d->state.poll_timer); + qemu_del_nic(d->state.nic); +} + +static NetClientInfo net_pci_pcnet_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = pcnet_receive, + .link_status_changed = pcnet_set_link_status, +}; + +static void pci_pcnet_realize(PCIDevice *pci_dev, Error **errp) +{ + PCIPCNetState *d = PCI_PCNET(pci_dev); + PCNetState *s = &d->state; + uint8_t *pci_conf; + +#if 0 + printf("sizeof(RMD)=%d, sizeof(TMD)=%d\n", + sizeof(struct pcnet_RMD), sizeof(struct pcnet_TMD)); +#endif + + pci_conf = pci_dev->config; + + pci_set_word(pci_conf + PCI_STATUS, + PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM); + + pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0); + pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0); + + pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ + pci_conf[PCI_MIN_GNT] = 0x06; + pci_conf[PCI_MAX_LAT] = 0xff; + + /* Handler for memory-mapped I/O */ + memory_region_init_io(&d->state.mmio, OBJECT(d), &pcnet_mmio_ops, s, + "pcnet-mmio", PCNET_PNPMMIO_SIZE); + + memory_region_init_io(&d->io_bar, OBJECT(d), &pcnet_io_ops, s, "pcnet-io", + PCNET_IOPORT_SIZE); + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->io_bar); + + pci_register_bar(pci_dev, 1, 0, &s->mmio); + + s->irq = pci_allocate_irq(pci_dev); + s->phys_mem_read = pci_physical_memory_read; + s->phys_mem_write = pci_physical_memory_write; + s->dma_opaque = DEVICE(pci_dev); + + pcnet_common_init(DEVICE(pci_dev), s, &net_pci_pcnet_info); +} + +static void pci_reset(DeviceState *dev) +{ + PCIPCNetState *d = PCI_PCNET(dev); + + pcnet_h_reset(&d->state); +} + +static void pcnet_instance_init(Object *obj) +{ + PCIPCNetState *d = PCI_PCNET(obj); + PCNetState *s = &d->state; + + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(obj)); +} + +static Property pcnet_properties[] = { + DEFINE_NIC_PROPERTIES(PCIPCNetState, state.conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pcnet_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_pcnet_realize; + k->exit = pci_pcnet_uninit; + k->romfile = "efi-pcnet.rom", + k->vendor_id = PCI_VENDOR_ID_AMD; + k->device_id = PCI_DEVICE_ID_AMD_LANCE; + k->revision = 0x10; + k->class_id = PCI_CLASS_NETWORK_ETHERNET; + dc->reset = pci_reset; + dc->vmsd = &vmstate_pci_pcnet; + device_class_set_props(dc, pcnet_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static const TypeInfo pcnet_info = { + .name = TYPE_PCI_PCNET, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIPCNetState), + .class_init = pcnet_class_init, + .instance_init = pcnet_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void pci_pcnet_register_types(void) +{ + type_register_static(&pcnet_info); +} + +type_init(pci_pcnet_register_types) diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c new file mode 100644 index 000000000..dcd3fc494 --- /dev/null +++ b/hw/net/pcnet.c @@ -0,0 +1,1752 @@ +/* + * QEMU AMD PC-Net II (Am79C970A) emulation + * + * Copyright (c) 2004 Antony T Curtis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* This software was written to be compatible with the specification: + * AMD Am79C970A PCnet-PCI II Ethernet Controller Data-Sheet + * AMD Publication# 19436 Rev:E Amendment/0 Issue Date: June 2000 + */ + +/* + * On Sparc32, this is the Lance (Am7990) part of chip STP2000 (Master I/O), also + * produced as NCR89C100. See + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt + * and + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR92C990.txt + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "net/net.h" +#include "net/eth.h" +#include "qemu/timer.h" +#include "trace.h" + +#include "pcnet.h" + +//#define PCNET_DEBUG +//#define PCNET_DEBUG_IO +//#define PCNET_DEBUG_BCR +//#define PCNET_DEBUG_CSR +//#define PCNET_DEBUG_RMD +//#define PCNET_DEBUG_TMD +//#define PCNET_DEBUG_MATCH + + +struct qemu_ether_header { + uint8_t ether_dhost[6]; + uint8_t ether_shost[6]; + uint16_t ether_type; +}; + +#define CSR_INIT(S) !!(((S)->csr[0])&0x0001) +#define CSR_STRT(S) !!(((S)->csr[0])&0x0002) +#define CSR_STOP(S) !!(((S)->csr[0])&0x0004) +#define CSR_TDMD(S) !!(((S)->csr[0])&0x0008) +#define CSR_TXON(S) !!(((S)->csr[0])&0x0010) +#define CSR_RXON(S) !!(((S)->csr[0])&0x0020) +#define CSR_INEA(S) !!(((S)->csr[0])&0x0040) +#define CSR_BSWP(S) !!(((S)->csr[3])&0x0004) +#define CSR_LAPPEN(S) !!(((S)->csr[3])&0x0020) +#define CSR_DXSUFLO(S) !!(((S)->csr[3])&0x0040) +#define CSR_ASTRP_RCV(S) !!(((S)->csr[4])&0x0800) +#define CSR_DPOLL(S) !!(((S)->csr[4])&0x1000) +#define CSR_SPND(S) !!(((S)->csr[5])&0x0001) +#define CSR_LTINTEN(S) !!(((S)->csr[5])&0x4000) +#define CSR_TOKINTD(S) !!(((S)->csr[5])&0x8000) +#define CSR_DRX(S) !!(((S)->csr[15])&0x0001) +#define CSR_DTX(S) !!(((S)->csr[15])&0x0002) +#define CSR_LOOP(S) !!(((S)->csr[15])&0x0004) +#define CSR_DXMTFCS(S) !!(((S)->csr[15])&0x0008) +#define CSR_INTL(S) !!(((S)->csr[15])&0x0040) +#define CSR_DRCVPA(S) !!(((S)->csr[15])&0x2000) +#define CSR_DRCVBC(S) !!(((S)->csr[15])&0x4000) +#define CSR_PROM(S) !!(((S)->csr[15])&0x8000) + +#define CSR_CRBC(S) ((S)->csr[40]) +#define CSR_CRST(S) ((S)->csr[41]) +#define CSR_CXBC(S) ((S)->csr[42]) +#define CSR_CXST(S) ((S)->csr[43]) +#define CSR_NRBC(S) ((S)->csr[44]) +#define CSR_NRST(S) ((S)->csr[45]) +#define CSR_POLL(S) ((S)->csr[46]) +#define CSR_PINT(S) ((S)->csr[47]) +#define CSR_RCVRC(S) ((S)->csr[72]) +#define CSR_XMTRC(S) ((S)->csr[74]) +#define CSR_RCVRL(S) ((S)->csr[76]) +#define CSR_XMTRL(S) ((S)->csr[78]) +#define CSR_MISSC(S) ((S)->csr[112]) + +#define CSR_IADR(S) ((S)->csr[ 1] | ((uint32_t)(S)->csr[ 2] << 16)) +#define CSR_CRBA(S) ((S)->csr[18] | ((uint32_t)(S)->csr[19] << 16)) +#define CSR_CXBA(S) ((S)->csr[20] | ((uint32_t)(S)->csr[21] << 16)) +#define CSR_NRBA(S) ((S)->csr[22] | ((uint32_t)(S)->csr[23] << 16)) +#define CSR_BADR(S) ((S)->csr[24] | ((uint32_t)(S)->csr[25] << 16)) +#define CSR_NRDA(S) ((S)->csr[26] | ((uint32_t)(S)->csr[27] << 16)) +#define CSR_CRDA(S) ((S)->csr[28] | ((uint32_t)(S)->csr[29] << 16)) +#define CSR_BADX(S) ((S)->csr[30] | ((uint32_t)(S)->csr[31] << 16)) +#define CSR_NXDA(S) ((S)->csr[32] | ((uint32_t)(S)->csr[33] << 16)) +#define CSR_CXDA(S) ((S)->csr[34] | ((uint32_t)(S)->csr[35] << 16)) +#define CSR_NNRD(S) ((S)->csr[36] | ((uint32_t)(S)->csr[37] << 16)) +#define CSR_NNXD(S) ((S)->csr[38] | ((uint32_t)(S)->csr[39] << 16)) +#define CSR_PXDA(S) ((S)->csr[60] | ((uint32_t)(S)->csr[61] << 16)) +#define CSR_NXBA(S) ((S)->csr[64] | ((uint32_t)(S)->csr[65] << 16)) + +#define PHYSADDR(S,A) \ + (BCR_SSIZE32(S) ? (A) : (A) | ((0xff00 & (uint32_t)(S)->csr[2])<<16)) + +struct pcnet_initblk16 { + uint16_t mode; + uint16_t padr[3]; + uint16_t ladrf[4]; + uint32_t rdra; + uint32_t tdra; +}; + +struct pcnet_initblk32 { + uint16_t mode; + uint8_t rlen; + uint8_t tlen; + uint16_t padr[3]; + uint16_t _res; + uint16_t ladrf[4]; + uint32_t rdra; + uint32_t tdra; +}; + +struct pcnet_TMD { + uint32_t tbadr; + int16_t length; + int16_t status; + uint32_t misc; + uint32_t res; +}; + +#define TMDL_BCNT_MASK 0x0fff +#define TMDL_BCNT_SH 0 +#define TMDL_ONES_MASK 0xf000 +#define TMDL_ONES_SH 12 + +#define TMDS_BPE_MASK 0x0080 +#define TMDS_BPE_SH 7 +#define TMDS_ENP_MASK 0x0100 +#define TMDS_ENP_SH 8 +#define TMDS_STP_MASK 0x0200 +#define TMDS_STP_SH 9 +#define TMDS_DEF_MASK 0x0400 +#define TMDS_DEF_SH 10 +#define TMDS_ONE_MASK 0x0800 +#define TMDS_ONE_SH 11 +#define TMDS_LTINT_MASK 0x1000 +#define TMDS_LTINT_SH 12 +#define TMDS_NOFCS_MASK 0x2000 +#define TMDS_NOFCS_SH 13 +#define TMDS_ADDFCS_MASK TMDS_NOFCS_MASK +#define TMDS_ADDFCS_SH TMDS_NOFCS_SH +#define TMDS_ERR_MASK 0x4000 +#define TMDS_ERR_SH 14 +#define TMDS_OWN_MASK 0x8000 +#define TMDS_OWN_SH 15 + +#define TMDM_TRC_MASK 0x0000000f +#define TMDM_TRC_SH 0 +#define TMDM_TDR_MASK 0x03ff0000 +#define TMDM_TDR_SH 16 +#define TMDM_RTRY_MASK 0x04000000 +#define TMDM_RTRY_SH 26 +#define TMDM_LCAR_MASK 0x08000000 +#define TMDM_LCAR_SH 27 +#define TMDM_LCOL_MASK 0x10000000 +#define TMDM_LCOL_SH 28 +#define TMDM_EXDEF_MASK 0x20000000 +#define TMDM_EXDEF_SH 29 +#define TMDM_UFLO_MASK 0x40000000 +#define TMDM_UFLO_SH 30 +#define TMDM_BUFF_MASK 0x80000000 +#define TMDM_BUFF_SH 31 + +struct pcnet_RMD { + uint32_t rbadr; + int16_t buf_length; + int16_t status; + uint32_t msg_length; + uint32_t res; +}; + +#define RMDL_BCNT_MASK 0x0fff +#define RMDL_BCNT_SH 0 +#define RMDL_ONES_MASK 0xf000 +#define RMDL_ONES_SH 12 + +#define RMDS_BAM_MASK 0x0010 +#define RMDS_BAM_SH 4 +#define RMDS_LFAM_MASK 0x0020 +#define RMDS_LFAM_SH 5 +#define RMDS_PAM_MASK 0x0040 +#define RMDS_PAM_SH 6 +#define RMDS_BPE_MASK 0x0080 +#define RMDS_BPE_SH 7 +#define RMDS_ENP_MASK 0x0100 +#define RMDS_ENP_SH 8 +#define RMDS_STP_MASK 0x0200 +#define RMDS_STP_SH 9 +#define RMDS_BUFF_MASK 0x0400 +#define RMDS_BUFF_SH 10 +#define RMDS_CRC_MASK 0x0800 +#define RMDS_CRC_SH 11 +#define RMDS_OFLO_MASK 0x1000 +#define RMDS_OFLO_SH 12 +#define RMDS_FRAM_MASK 0x2000 +#define RMDS_FRAM_SH 13 +#define RMDS_ERR_MASK 0x4000 +#define RMDS_ERR_SH 14 +#define RMDS_OWN_MASK 0x8000 +#define RMDS_OWN_SH 15 + +#define RMDM_MCNT_MASK 0x00000fff +#define RMDM_MCNT_SH 0 +#define RMDM_ZEROS_MASK 0x0000f000 +#define RMDM_ZEROS_SH 12 +#define RMDM_RPC_MASK 0x00ff0000 +#define RMDM_RPC_SH 16 +#define RMDM_RCC_MASK 0xff000000 +#define RMDM_RCC_SH 24 + +#define SET_FIELD(regp, name, field, value) \ + (*(regp) = (*(regp) & ~(name ## _ ## field ## _MASK)) \ + | ((value) << name ## _ ## field ## _SH)) + +#define GET_FIELD(reg, name, field) \ + (((reg) & name ## _ ## field ## _MASK) >> name ## _ ## field ## _SH) + +#define PRINT_TMD(T) printf( \ + "TMD0 : TBADR=0x%08x\n" \ + "TMD1 : OWN=%d, ERR=%d, FCS=%d, LTI=%d, " \ + "ONE=%d, DEF=%d, STP=%d, ENP=%d,\n" \ + " BPE=%d, BCNT=%d\n" \ + "TMD2 : BUF=%d, UFL=%d, EXD=%d, LCO=%d, " \ + "LCA=%d, RTR=%d,\n" \ + " TDR=%d, TRC=%d\n", \ + (T)->tbadr, \ + GET_FIELD((T)->status, TMDS, OWN), \ + GET_FIELD((T)->status, TMDS, ERR), \ + GET_FIELD((T)->status, TMDS, NOFCS), \ + GET_FIELD((T)->status, TMDS, LTINT), \ + GET_FIELD((T)->status, TMDS, ONE), \ + GET_FIELD((T)->status, TMDS, DEF), \ + GET_FIELD((T)->status, TMDS, STP), \ + GET_FIELD((T)->status, TMDS, ENP), \ + GET_FIELD((T)->status, TMDS, BPE), \ + 4096-GET_FIELD((T)->length, TMDL, BCNT), \ + GET_FIELD((T)->misc, TMDM, BUFF), \ + GET_FIELD((T)->misc, TMDM, UFLO), \ + GET_FIELD((T)->misc, TMDM, EXDEF), \ + GET_FIELD((T)->misc, TMDM, LCOL), \ + GET_FIELD((T)->misc, TMDM, LCAR), \ + GET_FIELD((T)->misc, TMDM, RTRY), \ + GET_FIELD((T)->misc, TMDM, TDR), \ + GET_FIELD((T)->misc, TMDM, TRC)) + +#define PRINT_RMD(R) printf( \ + "RMD0 : RBADR=0x%08x\n" \ + "RMD1 : OWN=%d, ERR=%d, FRAM=%d, OFLO=%d, " \ + "CRC=%d, BUFF=%d, STP=%d, ENP=%d,\n " \ + "BPE=%d, PAM=%d, LAFM=%d, BAM=%d, ONES=%d, BCNT=%d\n" \ + "RMD2 : RCC=%d, RPC=%d, MCNT=%d, ZEROS=%d\n", \ + (R)->rbadr, \ + GET_FIELD((R)->status, RMDS, OWN), \ + GET_FIELD((R)->status, RMDS, ERR), \ + GET_FIELD((R)->status, RMDS, FRAM), \ + GET_FIELD((R)->status, RMDS, OFLO), \ + GET_FIELD((R)->status, RMDS, CRC), \ + GET_FIELD((R)->status, RMDS, BUFF), \ + GET_FIELD((R)->status, RMDS, STP), \ + GET_FIELD((R)->status, RMDS, ENP), \ + GET_FIELD((R)->status, RMDS, BPE), \ + GET_FIELD((R)->status, RMDS, PAM), \ + GET_FIELD((R)->status, RMDS, LFAM), \ + GET_FIELD((R)->status, RMDS, BAM), \ + GET_FIELD((R)->buf_length, RMDL, ONES), \ + 4096-GET_FIELD((R)->buf_length, RMDL, BCNT), \ + GET_FIELD((R)->msg_length, RMDM, RCC), \ + GET_FIELD((R)->msg_length, RMDM, RPC), \ + GET_FIELD((R)->msg_length, RMDM, MCNT), \ + GET_FIELD((R)->msg_length, RMDM, ZEROS)) + +static inline void pcnet_tmd_load(PCNetState *s, struct pcnet_TMD *tmd, + hwaddr addr) +{ + if (!BCR_SSIZE32(s)) { + struct { + uint32_t tbadr; + int16_t length; + int16_t status; + } xda; + s->phys_mem_read(s->dma_opaque, addr, (void *)&xda, sizeof(xda), 0); + tmd->tbadr = le32_to_cpu(xda.tbadr) & 0xffffff; + tmd->length = le16_to_cpu(xda.length); + tmd->status = (le32_to_cpu(xda.tbadr) >> 16) & 0xff00; + tmd->misc = le16_to_cpu(xda.status) << 16; + tmd->res = 0; + } else { + s->phys_mem_read(s->dma_opaque, addr, (void *)tmd, sizeof(*tmd), 0); + le32_to_cpus(&tmd->tbadr); + le16_to_cpus((uint16_t *)&tmd->length); + le16_to_cpus((uint16_t *)&tmd->status); + le32_to_cpus(&tmd->misc); + le32_to_cpus(&tmd->res); + if (BCR_SWSTYLE(s) == 3) { + uint32_t tmp = tmd->tbadr; + tmd->tbadr = tmd->misc; + tmd->misc = tmp; + } + } +} + +static inline void pcnet_tmd_store(PCNetState *s, const struct pcnet_TMD *tmd, + hwaddr addr) +{ + if (!BCR_SSIZE32(s)) { + struct { + uint32_t tbadr; + int16_t length; + int16_t status; + } xda; + xda.tbadr = cpu_to_le32((tmd->tbadr & 0xffffff) | + ((tmd->status & 0xff00) << 16)); + xda.length = cpu_to_le16(tmd->length); + xda.status = cpu_to_le16(tmd->misc >> 16); + s->phys_mem_write(s->dma_opaque, addr, (void *)&xda, sizeof(xda), 0); + } else { + struct { + uint32_t tbadr; + int16_t length; + int16_t status; + uint32_t misc; + uint32_t res; + } xda; + xda.tbadr = cpu_to_le32(tmd->tbadr); + xda.length = cpu_to_le16(tmd->length); + xda.status = cpu_to_le16(tmd->status); + xda.misc = cpu_to_le32(tmd->misc); + xda.res = cpu_to_le32(tmd->res); + if (BCR_SWSTYLE(s) == 3) { + uint32_t tmp = xda.tbadr; + xda.tbadr = xda.misc; + xda.misc = tmp; + } + s->phys_mem_write(s->dma_opaque, addr, (void *)&xda, sizeof(xda), 0); + } +} + +static inline void pcnet_rmd_load(PCNetState *s, struct pcnet_RMD *rmd, + hwaddr addr) +{ + if (!BCR_SSIZE32(s)) { + struct { + uint32_t rbadr; + int16_t buf_length; + int16_t msg_length; + } rda; + s->phys_mem_read(s->dma_opaque, addr, (void *)&rda, sizeof(rda), 0); + rmd->rbadr = le32_to_cpu(rda.rbadr) & 0xffffff; + rmd->buf_length = le16_to_cpu(rda.buf_length); + rmd->status = (le32_to_cpu(rda.rbadr) >> 16) & 0xff00; + rmd->msg_length = le16_to_cpu(rda.msg_length); + rmd->res = 0; + } else { + s->phys_mem_read(s->dma_opaque, addr, (void *)rmd, sizeof(*rmd), 0); + le32_to_cpus(&rmd->rbadr); + le16_to_cpus((uint16_t *)&rmd->buf_length); + le16_to_cpus((uint16_t *)&rmd->status); + le32_to_cpus(&rmd->msg_length); + le32_to_cpus(&rmd->res); + if (BCR_SWSTYLE(s) == 3) { + uint32_t tmp = rmd->rbadr; + rmd->rbadr = rmd->msg_length; + rmd->msg_length = tmp; + } + } +} + +static inline void pcnet_rmd_store(PCNetState *s, struct pcnet_RMD *rmd, + hwaddr addr) +{ + if (!BCR_SSIZE32(s)) { + struct { + uint32_t rbadr; + int16_t buf_length; + int16_t msg_length; + } rda; + rda.rbadr = cpu_to_le32((rmd->rbadr & 0xffffff) | + ((rmd->status & 0xff00) << 16)); + rda.buf_length = cpu_to_le16(rmd->buf_length); + rda.msg_length = cpu_to_le16(rmd->msg_length); + s->phys_mem_write(s->dma_opaque, addr, (void *)&rda, sizeof(rda), 0); + } else { + struct { + uint32_t rbadr; + int16_t buf_length; + int16_t status; + uint32_t msg_length; + uint32_t res; + } rda; + rda.rbadr = cpu_to_le32(rmd->rbadr); + rda.buf_length = cpu_to_le16(rmd->buf_length); + rda.status = cpu_to_le16(rmd->status); + rda.msg_length = cpu_to_le32(rmd->msg_length); + rda.res = cpu_to_le32(rmd->res); + if (BCR_SWSTYLE(s) == 3) { + uint32_t tmp = rda.rbadr; + rda.rbadr = rda.msg_length; + rda.msg_length = tmp; + } + s->phys_mem_write(s->dma_opaque, addr, (void *)&rda, sizeof(rda), 0); + } +} + + +#define TMDLOAD(TMD,ADDR) pcnet_tmd_load(s,TMD,ADDR) + +#define TMDSTORE(TMD,ADDR) pcnet_tmd_store(s,TMD,ADDR) + +#define RMDLOAD(RMD,ADDR) pcnet_rmd_load(s,RMD,ADDR) + +#define RMDSTORE(RMD,ADDR) pcnet_rmd_store(s,RMD,ADDR) + +#if 1 + +#define CHECK_RMD(ADDR,RES) do { \ + struct pcnet_RMD rmd; \ + RMDLOAD(&rmd,(ADDR)); \ + (RES) |= (GET_FIELD(rmd.buf_length, RMDL, ONES) != 15) \ + || (GET_FIELD(rmd.msg_length, RMDM, ZEROS) != 0); \ +} while (0) + +#define CHECK_TMD(ADDR,RES) do { \ + struct pcnet_TMD tmd; \ + TMDLOAD(&tmd,(ADDR)); \ + (RES) |= (GET_FIELD(tmd.length, TMDL, ONES) != 15); \ +} while (0) + +#else + +#define CHECK_RMD(ADDR,RES) do { \ + switch (BCR_SWSTYLE(s)) { \ + case 0x00: \ + { \ + uint16_t rda[4]; \ + s->phys_mem_read(s->dma_opaque, (ADDR), \ + (void *)&rda[0], sizeof(rda), 0); \ + (RES) |= (rda[2] & 0xf000)!=0xf000; \ + (RES) |= (rda[3] & 0xf000)!=0x0000; \ + } \ + break; \ + case 0x01: \ + case 0x02: \ + { \ + uint32_t rda[4]; \ + s->phys_mem_read(s->dma_opaque, (ADDR), \ + (void *)&rda[0], sizeof(rda), 0); \ + (RES) |= (rda[1] & 0x0000f000L)!=0x0000f000L; \ + (RES) |= (rda[2] & 0x0000f000L)!=0x00000000L; \ + } \ + break; \ + case 0x03: \ + { \ + uint32_t rda[4]; \ + s->phys_mem_read(s->dma_opaque, (ADDR), \ + (void *)&rda[0], sizeof(rda), 0); \ + (RES) |= (rda[0] & 0x0000f000L)!=0x00000000L; \ + (RES) |= (rda[1] & 0x0000f000L)!=0x0000f000L; \ + } \ + break; \ + } \ +} while (0) + +#define CHECK_TMD(ADDR,RES) do { \ + switch (BCR_SWSTYLE(s)) { \ + case 0x00: \ + { \ + uint16_t xda[4]; \ + s->phys_mem_read(s->dma_opaque, (ADDR), \ + (void *)&xda[0], sizeof(xda), 0); \ + (RES) |= (xda[2] & 0xf000)!=0xf000; \ + } \ + break; \ + case 0x01: \ + case 0x02: \ + case 0x03: \ + { \ + uint32_t xda[4]; \ + s->phys_mem_read(s->dma_opaque, (ADDR), \ + (void *)&xda[0], sizeof(xda), 0); \ + (RES) |= (xda[1] & 0x0000f000L)!=0x0000f000L; \ + } \ + break; \ + } \ +} while (0) + +#endif + +#define PRINT_PKTHDR(BUF) do { \ + struct qemu_ether_header *hdr = (void *)(BUF); \ + printf("packet dhost=%02x:%02x:%02x:%02x:%02x:%02x, " \ + "shost=%02x:%02x:%02x:%02x:%02x:%02x, " \ + "type=0x%04x\n", \ + hdr->ether_dhost[0],hdr->ether_dhost[1],hdr->ether_dhost[2], \ + hdr->ether_dhost[3],hdr->ether_dhost[4],hdr->ether_dhost[5], \ + hdr->ether_shost[0],hdr->ether_shost[1],hdr->ether_shost[2], \ + hdr->ether_shost[3],hdr->ether_shost[4],hdr->ether_shost[5], \ + be16_to_cpu(hdr->ether_type)); \ +} while (0) + +#define CRC(crc, ch) (crc = (crc >> 8) ^ crctab[(crc ^ (ch)) & 0xff]) + +/* generated using the AUTODIN II polynomial + * x^32 + x^26 + x^23 + x^22 + x^16 + + * x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + 1 + */ +static const uint32_t crctab[256] = { + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, + 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, + 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, + 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, + 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, + 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, + 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, + 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, + 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, + 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, + 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, + 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, + 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, + 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, + 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, + 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, + 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, + 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, + 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, + 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, + 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, + 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, + 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, + 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, + 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, + 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, + 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, + 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, + 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, + 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, + 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, + 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, + 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, + 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, + 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, + 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, + 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, + 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, + 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, + 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, + 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, + 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, + 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, +}; + +static inline int padr_match(PCNetState *s, const uint8_t *buf, int size) +{ + struct qemu_ether_header *hdr = (void *)buf; + uint8_t padr[6] = { + s->csr[12] & 0xff, s->csr[12] >> 8, + s->csr[13] & 0xff, s->csr[13] >> 8, + s->csr[14] & 0xff, s->csr[14] >> 8 + }; + int result = (!CSR_DRCVPA(s)) && !memcmp(hdr->ether_dhost, padr, 6); +#ifdef PCNET_DEBUG_MATCH + printf("packet dhost=%02x:%02x:%02x:%02x:%02x:%02x, " + "padr=%02x:%02x:%02x:%02x:%02x:%02x\n", + hdr->ether_dhost[0],hdr->ether_dhost[1],hdr->ether_dhost[2], + hdr->ether_dhost[3],hdr->ether_dhost[4],hdr->ether_dhost[5], + padr[0],padr[1],padr[2],padr[3],padr[4],padr[5]); + printf("padr_match result=%d\n", result); +#endif + return result; +} + +static inline int padr_bcast(PCNetState *s, const uint8_t *buf, int size) +{ + static const uint8_t BCAST[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct qemu_ether_header *hdr = (void *)buf; + int result = !CSR_DRCVBC(s) && !memcmp(hdr->ether_dhost, BCAST, 6); +#ifdef PCNET_DEBUG_MATCH + printf("padr_bcast result=%d\n", result); +#endif + return result; +} + +static inline int ladr_match(PCNetState *s, const uint8_t *buf, int size) +{ + struct qemu_ether_header *hdr = (void *)buf; + if ((*(hdr->ether_dhost)&0x01) && + ((uint64_t *)&s->csr[8])[0] != 0LL) { + uint8_t ladr[8] = { + s->csr[8] & 0xff, s->csr[8] >> 8, + s->csr[9] & 0xff, s->csr[9] >> 8, + s->csr[10] & 0xff, s->csr[10] >> 8, + s->csr[11] & 0xff, s->csr[11] >> 8 + }; + int index = net_crc32_le(hdr->ether_dhost, ETH_ALEN) >> 26; + return !!(ladr[index >> 3] & (1 << (index & 7))); + } + return 0; +} + +static inline hwaddr pcnet_rdra_addr(PCNetState *s, int idx) +{ + while (idx < 1) { + idx += CSR_RCVRL(s); + } + return s->rdra + ((CSR_RCVRL(s) - idx) * (BCR_SWSTYLE(s) ? 16 : 8)); +} + +static inline int64_t pcnet_get_next_poll_time(PCNetState *s, int64_t current_time) +{ + int64_t next_time = current_time + + (65536 - (CSR_SPND(s) ? 0 : CSR_POLL(s))) * 30; + + if (next_time <= current_time) { + next_time = current_time + 1; + } + return next_time; +} + +static void pcnet_poll(PCNetState *s); +static void pcnet_poll_timer(void *opaque); + +static uint32_t pcnet_csr_readw(PCNetState *s, uint32_t rap); +static void pcnet_csr_writew(PCNetState *s, uint32_t rap, uint32_t new_value); +static void pcnet_bcr_writew(PCNetState *s, uint32_t rap, uint32_t val); + +static void pcnet_s_reset(PCNetState *s) +{ + trace_pcnet_s_reset(s); + + s->rdra = 0; + s->tdra = 0; + s->rap = 0; + + s->bcr[BCR_BSBC] &= ~0x0080; + + s->csr[0] = 0x0004; + s->csr[3] = 0x0000; + s->csr[4] = 0x0115; + s->csr[5] = 0x0000; + s->csr[6] = 0x0000; + s->csr[8] = 0; + s->csr[9] = 0; + s->csr[10] = 0; + s->csr[11] = 0; + s->csr[12] = le16_to_cpu(((uint16_t *)&s->prom[0])[0]); + s->csr[13] = le16_to_cpu(((uint16_t *)&s->prom[0])[1]); + s->csr[14] = le16_to_cpu(((uint16_t *)&s->prom[0])[2]); + s->csr[15] &= 0x21c4; + s->csr[72] = 1; + s->csr[74] = 1; + s->csr[76] = 1; + s->csr[78] = 1; + s->csr[80] = 0x1410; + s->csr[88] = 0x1003; + s->csr[89] = 0x0262; + s->csr[94] = 0x0000; + s->csr[100] = 0x0200; + s->csr[103] = 0x0105; + s->csr[112] = 0x0000; + s->csr[114] = 0x0000; + s->csr[122] = 0x0000; + s->csr[124] = 0x0000; + + s->tx_busy = 0; +} + +static void pcnet_update_irq(PCNetState *s) +{ + int isr = 0; + s->csr[0] &= ~0x0080; + +#if 1 + if (((s->csr[0] & ~s->csr[3]) & 0x5f00) || + (((s->csr[4]>>1) & ~s->csr[4]) & 0x0115) || + (((s->csr[5]>>1) & s->csr[5]) & 0x0048)) +#else + if ((!(s->csr[3] & 0x4000) && !!(s->csr[0] & 0x4000)) /* BABL */ || + (!(s->csr[3] & 0x1000) && !!(s->csr[0] & 0x1000)) /* MISS */ || + (!(s->csr[3] & 0x0100) && !!(s->csr[0] & 0x0100)) /* IDON */ || + (!(s->csr[3] & 0x0200) && !!(s->csr[0] & 0x0200)) /* TINT */ || + (!(s->csr[3] & 0x0400) && !!(s->csr[0] & 0x0400)) /* RINT */ || + (!(s->csr[3] & 0x0800) && !!(s->csr[0] & 0x0800)) /* MERR */ || + (!(s->csr[4] & 0x0001) && !!(s->csr[4] & 0x0002)) /* JAB */ || + (!(s->csr[4] & 0x0004) && !!(s->csr[4] & 0x0008)) /* TXSTRT */ || + (!(s->csr[4] & 0x0010) && !!(s->csr[4] & 0x0020)) /* RCVO */ || + (!(s->csr[4] & 0x0100) && !!(s->csr[4] & 0x0200)) /* MFCO */ || + (!!(s->csr[5] & 0x0040) && !!(s->csr[5] & 0x0080)) /* EXDINT */ || + (!!(s->csr[5] & 0x0008) && !!(s->csr[5] & 0x0010)) /* MPINT */) +#endif + { + + isr = CSR_INEA(s); + s->csr[0] |= 0x0080; + } + + if (!!(s->csr[4] & 0x0080) && CSR_INEA(s)) { /* UINT */ + s->csr[4] &= ~0x0080; + s->csr[4] |= 0x0040; + s->csr[0] |= 0x0080; + isr = 1; + trace_pcnet_user_int(s); + } + +#if 1 + if (((s->csr[5]>>1) & s->csr[5]) & 0x0500) +#else + if ((!!(s->csr[5] & 0x0400) && !!(s->csr[5] & 0x0800)) /* SINT */ || + (!!(s->csr[5] & 0x0100) && !!(s->csr[5] & 0x0200)) /* SLPINT */ ) +#endif + { + isr = 1; + s->csr[0] |= 0x0080; + } + + if (isr != s->isr) { + trace_pcnet_isr_change(s, isr, s->isr); + } + qemu_set_irq(s->irq, isr); + s->isr = isr; +} + +static void pcnet_init(PCNetState *s) +{ + int rlen, tlen; + uint16_t padr[3], ladrf[4], mode; + uint32_t rdra, tdra; + + trace_pcnet_init(s, PHYSADDR(s, CSR_IADR(s))); + + if (BCR_SSIZE32(s)) { + struct pcnet_initblk32 initblk; + s->phys_mem_read(s->dma_opaque, PHYSADDR(s,CSR_IADR(s)), + (uint8_t *)&initblk, sizeof(initblk), 0); + mode = le16_to_cpu(initblk.mode); + rlen = initblk.rlen >> 4; + tlen = initblk.tlen >> 4; + ladrf[0] = le16_to_cpu(initblk.ladrf[0]); + ladrf[1] = le16_to_cpu(initblk.ladrf[1]); + ladrf[2] = le16_to_cpu(initblk.ladrf[2]); + ladrf[3] = le16_to_cpu(initblk.ladrf[3]); + padr[0] = le16_to_cpu(initblk.padr[0]); + padr[1] = le16_to_cpu(initblk.padr[1]); + padr[2] = le16_to_cpu(initblk.padr[2]); + rdra = le32_to_cpu(initblk.rdra); + tdra = le32_to_cpu(initblk.tdra); + } else { + struct pcnet_initblk16 initblk; + s->phys_mem_read(s->dma_opaque, PHYSADDR(s,CSR_IADR(s)), + (uint8_t *)&initblk, sizeof(initblk), 0); + mode = le16_to_cpu(initblk.mode); + ladrf[0] = le16_to_cpu(initblk.ladrf[0]); + ladrf[1] = le16_to_cpu(initblk.ladrf[1]); + ladrf[2] = le16_to_cpu(initblk.ladrf[2]); + ladrf[3] = le16_to_cpu(initblk.ladrf[3]); + padr[0] = le16_to_cpu(initblk.padr[0]); + padr[1] = le16_to_cpu(initblk.padr[1]); + padr[2] = le16_to_cpu(initblk.padr[2]); + rdra = le32_to_cpu(initblk.rdra); + tdra = le32_to_cpu(initblk.tdra); + rlen = rdra >> 29; + tlen = tdra >> 29; + rdra &= 0x00ffffff; + tdra &= 0x00ffffff; + } + + trace_pcnet_rlen_tlen(s, rlen, tlen); + + CSR_RCVRL(s) = (rlen < 9) ? (1 << rlen) : 512; + CSR_XMTRL(s) = (tlen < 9) ? (1 << tlen) : 512; + s->csr[ 6] = (tlen << 12) | (rlen << 8); + s->csr[15] = mode; + s->csr[ 8] = ladrf[0]; + s->csr[ 9] = ladrf[1]; + s->csr[10] = ladrf[2]; + s->csr[11] = ladrf[3]; + s->csr[12] = padr[0]; + s->csr[13] = padr[1]; + s->csr[14] = padr[2]; + s->rdra = PHYSADDR(s, rdra); + s->tdra = PHYSADDR(s, tdra); + + CSR_RCVRC(s) = CSR_RCVRL(s); + CSR_XMTRC(s) = CSR_XMTRL(s); + + trace_pcnet_ss32_rdra_tdra(s, BCR_SSIZE32(s), + s->rdra, CSR_RCVRL(s), s->tdra, CSR_XMTRL(s)); + + s->csr[0] |= 0x0101; + s->csr[0] &= ~0x0004; /* clear STOP bit */ + + qemu_flush_queued_packets(qemu_get_queue(s->nic)); +} + +static void pcnet_start(PCNetState *s) +{ +#ifdef PCNET_DEBUG + printf("pcnet_start\n"); +#endif + + if (!CSR_DTX(s)) { + s->csr[0] |= 0x0010; /* set TXON */ + } + if (!CSR_DRX(s)) { + s->csr[0] |= 0x0020; /* set RXON */ + } + s->csr[0] &= ~0x0004; /* clear STOP bit */ + s->csr[0] |= 0x0002; + pcnet_poll_timer(s); + + qemu_flush_queued_packets(qemu_get_queue(s->nic)); +} + +static void pcnet_stop(PCNetState *s) +{ +#ifdef PCNET_DEBUG + printf("pcnet_stop\n"); +#endif + s->csr[0] &= ~0xffeb; + s->csr[0] |= 0x0014; + s->csr[4] &= ~0x02c2; + s->csr[5] &= ~0x0011; + pcnet_poll_timer(s); +} + +static void pcnet_rdte_poll(PCNetState *s) +{ + s->csr[28] = s->csr[29] = 0; + if (s->rdra) { + int bad = 0; +#if 1 + hwaddr crda = pcnet_rdra_addr(s, CSR_RCVRC(s)); + hwaddr nrda = pcnet_rdra_addr(s, -1 + CSR_RCVRC(s)); + hwaddr nnrd = pcnet_rdra_addr(s, -2 + CSR_RCVRC(s)); +#else + hwaddr crda = s->rdra + + (CSR_RCVRL(s) - CSR_RCVRC(s)) * + (BCR_SWSTYLE(s) ? 16 : 8 ); + int nrdc = CSR_RCVRC(s)<=1 ? CSR_RCVRL(s) : CSR_RCVRC(s)-1; + hwaddr nrda = s->rdra + + (CSR_RCVRL(s) - nrdc) * + (BCR_SWSTYLE(s) ? 16 : 8 ); + int nnrc = nrdc<=1 ? CSR_RCVRL(s) : nrdc-1; + hwaddr nnrd = s->rdra + + (CSR_RCVRL(s) - nnrc) * + (BCR_SWSTYLE(s) ? 16 : 8 ); +#endif + + CHECK_RMD(crda, bad); + if (!bad) { + CHECK_RMD(nrda, bad); + if (bad || (nrda == crda)) nrda = 0; + CHECK_RMD(nnrd, bad); + if (bad || (nnrd == crda)) nnrd = 0; + + s->csr[28] = crda & 0xffff; + s->csr[29] = crda >> 16; + s->csr[26] = nrda & 0xffff; + s->csr[27] = nrda >> 16; + s->csr[36] = nnrd & 0xffff; + s->csr[37] = nnrd >> 16; +#ifdef PCNET_DEBUG + if (bad) { + printf("pcnet: BAD RMD RECORDS AFTER 0x" TARGET_FMT_plx "\n", + crda); + } + } else { + printf("pcnet: BAD RMD RDA=0x" TARGET_FMT_plx "\n", crda); +#endif + } + } + + if (CSR_CRDA(s)) { + struct pcnet_RMD rmd; + RMDLOAD(&rmd, PHYSADDR(s,CSR_CRDA(s))); + CSR_CRBC(s) = GET_FIELD(rmd.buf_length, RMDL, BCNT); + CSR_CRST(s) = rmd.status; +#ifdef PCNET_DEBUG_RMD_X + printf("CRDA=0x%08x CRST=0x%04x RCVRC=%d RMDL=0x%04x RMDS=0x%04x RMDM=0x%08x\n", + PHYSADDR(s,CSR_CRDA(s)), CSR_CRST(s), CSR_RCVRC(s), + rmd.buf_length, rmd.status, rmd.msg_length); + PRINT_RMD(&rmd); +#endif + } else { + CSR_CRBC(s) = CSR_CRST(s) = 0; + } + + if (CSR_NRDA(s)) { + struct pcnet_RMD rmd; + RMDLOAD(&rmd, PHYSADDR(s,CSR_NRDA(s))); + CSR_NRBC(s) = GET_FIELD(rmd.buf_length, RMDL, BCNT); + CSR_NRST(s) = rmd.status; + } else { + CSR_NRBC(s) = CSR_NRST(s) = 0; + } + +} + +static int pcnet_tdte_poll(PCNetState *s) +{ + s->csr[34] = s->csr[35] = 0; + if (s->tdra) { + hwaddr cxda = s->tdra + + (CSR_XMTRL(s) - CSR_XMTRC(s)) * + (BCR_SWSTYLE(s) ? 16 : 8); + int bad = 0; + CHECK_TMD(cxda, bad); + if (!bad) { + if (CSR_CXDA(s) != cxda) { + s->csr[60] = s->csr[34]; + s->csr[61] = s->csr[35]; + s->csr[62] = CSR_CXBC(s); + s->csr[63] = CSR_CXST(s); + } + s->csr[34] = cxda & 0xffff; + s->csr[35] = cxda >> 16; +#ifdef PCNET_DEBUG_X + printf("pcnet: BAD TMD XDA=0x%08x\n", cxda); +#endif + } + } + + if (CSR_CXDA(s)) { + struct pcnet_TMD tmd; + + TMDLOAD(&tmd, PHYSADDR(s,CSR_CXDA(s))); + + CSR_CXBC(s) = GET_FIELD(tmd.length, TMDL, BCNT); + CSR_CXST(s) = tmd.status; + } else { + CSR_CXBC(s) = CSR_CXST(s) = 0; + } + + return !!(CSR_CXST(s) & 0x8000); +} + +#define MIN_BUF_SIZE 60 + +ssize_t pcnet_receive(NetClientState *nc, const uint8_t *buf, size_t size_) +{ + PCNetState *s = qemu_get_nic_opaque(nc); + int is_padr = 0, is_bcast = 0, is_ladr = 0; + uint8_t buf1[60]; + int remaining; + int crc_err = 0; + size_t size = size_; + + if (CSR_DRX(s) || CSR_STOP(s) || CSR_SPND(s) || !size || + (CSR_LOOP(s) && !s->looptest)) { + return -1; + } +#ifdef PCNET_DEBUG + printf("pcnet_receive size=%zu\n", size); +#endif + + /* if too small buffer, then expand it */ + if (size < MIN_BUF_SIZE) { + memcpy(buf1, buf, size); + memset(buf1 + size, 0, MIN_BUF_SIZE - size); + buf = buf1; + size = MIN_BUF_SIZE; + } + + if (CSR_PROM(s) + || (is_padr=padr_match(s, buf, size)) + || (is_bcast=padr_bcast(s, buf, size)) + || (is_ladr=ladr_match(s, buf, size))) { + + pcnet_rdte_poll(s); + + if (!(CSR_CRST(s) & 0x8000) && s->rdra) { + struct pcnet_RMD rmd; + int rcvrc = CSR_RCVRC(s)-1,i; + hwaddr nrda; + for (i = CSR_RCVRL(s)-1; i > 0; i--, rcvrc--) { + if (rcvrc <= 1) + rcvrc = CSR_RCVRL(s); + nrda = s->rdra + + (CSR_RCVRL(s) - rcvrc) * + (BCR_SWSTYLE(s) ? 16 : 8 ); + RMDLOAD(&rmd, nrda); + if (GET_FIELD(rmd.status, RMDS, OWN)) { +#ifdef PCNET_DEBUG_RMD + printf("pcnet - scan buffer: RCVRC=%d PREV_RCVRC=%d\n", + rcvrc, CSR_RCVRC(s)); +#endif + CSR_RCVRC(s) = rcvrc; + pcnet_rdte_poll(s); + break; + } + } + } + + if (!(CSR_CRST(s) & 0x8000)) { +#ifdef PCNET_DEBUG_RMD + printf("pcnet - no buffer: RCVRC=%d\n", CSR_RCVRC(s)); +#endif + s->csr[0] |= 0x1000; /* Set MISS flag */ + CSR_MISSC(s)++; + } else { + uint8_t *src = s->buffer; + hwaddr crda = CSR_CRDA(s); + struct pcnet_RMD rmd; + int pktcount = 0; + + if (!s->looptest) { + if (size > 4092) { +#ifdef PCNET_DEBUG_RMD + fprintf(stderr, "pcnet: truncates rx packet.\n"); +#endif + size = 4092; + } + memcpy(src, buf, size); + /* no need to compute the CRC */ + src[size] = 0; + src[size + 1] = 0; + src[size + 2] = 0; + src[size + 3] = 0; + size += 4; + } else if (s->looptest == PCNET_LOOPTEST_CRC || + !CSR_DXMTFCS(s) || size < MIN_BUF_SIZE+4) { + uint32_t fcs = ~0; + uint8_t *p = src; + + while (p != &src[size]) + CRC(fcs, *p++); + *(uint32_t *)p = htonl(fcs); + size += 4; + } else { + uint32_t fcs = ~0; + uint8_t *p = src; + + while (p != &src[size]) + CRC(fcs, *p++); + crc_err = (*(uint32_t *)p != htonl(fcs)); + } + +#ifdef PCNET_DEBUG_MATCH + PRINT_PKTHDR(buf); +#endif + + RMDLOAD(&rmd, PHYSADDR(s,crda)); + /*if (!CSR_LAPPEN(s))*/ + SET_FIELD(&rmd.status, RMDS, STP, 1); + +#define PCNET_RECV_STORE() do { \ + int count = MIN(4096 - GET_FIELD(rmd.buf_length, RMDL, BCNT),remaining); \ + hwaddr rbadr = PHYSADDR(s, rmd.rbadr); \ + s->phys_mem_write(s->dma_opaque, rbadr, src, count, CSR_BSWP(s)); \ + src += count; remaining -= count; \ + SET_FIELD(&rmd.status, RMDS, OWN, 0); \ + RMDSTORE(&rmd, PHYSADDR(s,crda)); \ + pktcount++; \ +} while (0) + + remaining = size; + PCNET_RECV_STORE(); + if ((remaining > 0) && CSR_NRDA(s)) { + hwaddr nrda = CSR_NRDA(s); +#ifdef PCNET_DEBUG_RMD + PRINT_RMD(&rmd); +#endif + RMDLOAD(&rmd, PHYSADDR(s,nrda)); + if (GET_FIELD(rmd.status, RMDS, OWN)) { + crda = nrda; + PCNET_RECV_STORE(); +#ifdef PCNET_DEBUG_RMD + PRINT_RMD(&rmd); +#endif + if ((remaining > 0) && (nrda=CSR_NNRD(s))) { + RMDLOAD(&rmd, PHYSADDR(s,nrda)); + if (GET_FIELD(rmd.status, RMDS, OWN)) { + crda = nrda; + PCNET_RECV_STORE(); + } + } + } + } + +#undef PCNET_RECV_STORE + + RMDLOAD(&rmd, PHYSADDR(s,crda)); + if (remaining == 0) { + SET_FIELD(&rmd.msg_length, RMDM, MCNT, size); + SET_FIELD(&rmd.status, RMDS, ENP, 1); + SET_FIELD(&rmd.status, RMDS, PAM, !CSR_PROM(s) && is_padr); + SET_FIELD(&rmd.status, RMDS, LFAM, !CSR_PROM(s) && is_ladr); + SET_FIELD(&rmd.status, RMDS, BAM, !CSR_PROM(s) && is_bcast); + if (crc_err) { + SET_FIELD(&rmd.status, RMDS, CRC, 1); + SET_FIELD(&rmd.status, RMDS, ERR, 1); + } + } else { + SET_FIELD(&rmd.status, RMDS, OFLO, 1); + SET_FIELD(&rmd.status, RMDS, BUFF, 1); + SET_FIELD(&rmd.status, RMDS, ERR, 1); + } + RMDSTORE(&rmd, PHYSADDR(s,crda)); + s->csr[0] |= 0x0400; + +#ifdef PCNET_DEBUG + printf("RCVRC=%d CRDA=0x%08x BLKS=%d\n", + CSR_RCVRC(s), PHYSADDR(s,CSR_CRDA(s)), pktcount); +#endif +#ifdef PCNET_DEBUG_RMD + PRINT_RMD(&rmd); +#endif + + while (pktcount--) { + if (CSR_RCVRC(s) <= 1) { + CSR_RCVRC(s) = CSR_RCVRL(s); + } else { + CSR_RCVRC(s)--; + } + } + + pcnet_rdte_poll(s); + + } + } + + pcnet_poll(s); + pcnet_update_irq(s); + + return size_; +} + +void pcnet_set_link_status(NetClientState *nc) +{ + PCNetState *d = qemu_get_nic_opaque(nc); + + d->lnkst = nc->link_down ? 0 : 0x40; +} + +static void pcnet_transmit(PCNetState *s) +{ + hwaddr xmit_cxda = 0; + int count = CSR_XMTRL(s)-1; + int add_crc = 0; + int bcnt; + s->xmit_pos = -1; + + if (!CSR_TXON(s)) { + s->csr[0] &= ~0x0008; + return; + } + + s->tx_busy = 1; + +txagain: + if (pcnet_tdte_poll(s)) { + struct pcnet_TMD tmd; + + TMDLOAD(&tmd, PHYSADDR(s,CSR_CXDA(s))); + +#ifdef PCNET_DEBUG_TMD + printf(" TMDLOAD 0x%08x\n", PHYSADDR(s,CSR_CXDA(s))); + PRINT_TMD(&tmd); +#endif + if (GET_FIELD(tmd.status, TMDS, STP)) { + s->xmit_pos = 0; + xmit_cxda = PHYSADDR(s,CSR_CXDA(s)); + if (BCR_SWSTYLE(s) != 1) + add_crc = GET_FIELD(tmd.status, TMDS, ADDFCS); + } + if (s->lnkst == 0 && + (!CSR_LOOP(s) || (!CSR_INTL(s) && !BCR_TMAULOOP(s)))) { + SET_FIELD(&tmd.misc, TMDM, LCAR, 1); + SET_FIELD(&tmd.status, TMDS, ERR, 1); + SET_FIELD(&tmd.status, TMDS, OWN, 0); + s->csr[0] |= 0xa000; /* ERR | CERR */ + s->xmit_pos = -1; + goto txdone; + } + + if (s->xmit_pos < 0) { + goto txdone; + } + + bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT); + + /* if multi-tmd packet outsizes s->buffer then skip it silently. + * Note: this is not what real hw does. + * Last four bytes of s->buffer are used to store CRC FCS code. + */ + if (s->xmit_pos + bcnt > sizeof(s->buffer) - 4) { + s->xmit_pos = -1; + goto txdone; + } + + s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr), + s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s)); + s->xmit_pos += bcnt; + + if (!GET_FIELD(tmd.status, TMDS, ENP)) { + goto txdone; + } + +#ifdef PCNET_DEBUG + printf("pcnet_transmit size=%d\n", s->xmit_pos); +#endif + if (CSR_LOOP(s)) { + if (BCR_SWSTYLE(s) == 1) + add_crc = !GET_FIELD(tmd.status, TMDS, NOFCS); + s->looptest = add_crc ? PCNET_LOOPTEST_CRC : PCNET_LOOPTEST_NOCRC; + qemu_receive_packet(qemu_get_queue(s->nic), s->buffer, s->xmit_pos); + s->looptest = 0; + } else { + if (s->nic) { + qemu_send_packet(qemu_get_queue(s->nic), s->buffer, + s->xmit_pos); + } + } + + s->csr[0] &= ~0x0008; /* clear TDMD */ + s->csr[4] |= 0x0004; /* set TXSTRT */ + s->xmit_pos = -1; + +txdone: + SET_FIELD(&tmd.status, TMDS, OWN, 0); + TMDSTORE(&tmd, PHYSADDR(s,CSR_CXDA(s))); + if (!CSR_TOKINTD(s) + || (CSR_LTINTEN(s) && GET_FIELD(tmd.status, TMDS, LTINT))) { + s->csr[0] |= 0x0200; /* set TINT */ + } + if (CSR_XMTRC(s) <= 1) { + CSR_XMTRC(s) = CSR_XMTRL(s); + } else { + CSR_XMTRC(s)--; + } + if (count--) { + goto txagain; + } + } else if (s->xmit_pos >= 0) { + struct pcnet_TMD tmd; + TMDLOAD(&tmd, xmit_cxda); + SET_FIELD(&tmd.misc, TMDM, BUFF, 1); + SET_FIELD(&tmd.misc, TMDM, UFLO, 1); + SET_FIELD(&tmd.status, TMDS, ERR, 1); + SET_FIELD(&tmd.status, TMDS, OWN, 0); + TMDSTORE(&tmd, xmit_cxda); + s->csr[0] |= 0x0200; /* set TINT */ + if (!CSR_DXSUFLO(s)) { + s->csr[0] &= ~0x0010; + } else if (count--) { + goto txagain; + } + } + + s->tx_busy = 0; +} + +static void pcnet_poll(PCNetState *s) +{ + if (CSR_RXON(s)) { + pcnet_rdte_poll(s); + } + + if (CSR_TDMD(s) || (CSR_TXON(s) && !CSR_DPOLL(s) && pcnet_tdte_poll(s))) { + /* prevent recursion */ + if (s->tx_busy) { + return; + } + pcnet_transmit(s); + } +} + +static void pcnet_poll_timer(void *opaque) +{ + PCNetState *s = opaque; + + timer_del(s->poll_timer); + + if (CSR_TDMD(s)) { + pcnet_transmit(s); + } + + pcnet_update_irq(s); + + if (!CSR_STOP(s) && !CSR_SPND(s) && !CSR_DPOLL(s)) { + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) * 33; + if (!s->timer || !now) { + s->timer = now; + } else { + uint64_t t = now - s->timer + CSR_POLL(s); + if (t > 0xffffLL) { + pcnet_poll(s); + CSR_POLL(s) = CSR_PINT(s); + } else { + CSR_POLL(s) = t; + } + } + timer_mod(s->poll_timer, + pcnet_get_next_poll_time(s,qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL))); + } +} + + +static void pcnet_csr_writew(PCNetState *s, uint32_t rap, uint32_t new_value) +{ + uint16_t val = new_value; +#ifdef PCNET_DEBUG_CSR + printf("pcnet_csr_writew rap=%d val=0x%04x\n", rap, val); +#endif + switch (rap) { + case 0: + s->csr[0] &= ~(val & 0x7f00); /* Clear any interrupt flags */ + + s->csr[0] = (s->csr[0] & ~0x0040) | (val & 0x0048); + + val = (val & 0x007f) | (s->csr[0] & 0x7f00); + + /* IFF STOP, STRT and INIT are set, clear STRT and INIT */ + if ((val & 7) == 7) { + val &= ~3; + } + if (!CSR_STOP(s) && (val & 4)) { + pcnet_stop(s); + } + if (!CSR_INIT(s) && (val & 1)) { + pcnet_init(s); + } + if (!CSR_STRT(s) && (val & 2)) { + pcnet_start(s); + } + if (CSR_TDMD(s)) { + pcnet_transmit(s); + } + return; + case 1: + case 2: + case 8: + case 9: + case 10: + case 11: + case 12: + case 13: + case 14: + case 15: + case 18: /* CRBAL */ + case 19: /* CRBAU */ + case 20: /* CXBAL */ + case 21: /* CXBAU */ + case 22: /* NRBAU */ + case 23: /* NRBAU */ + case 24: + case 25: + case 26: + case 27: + case 28: + case 29: + case 30: + case 31: + case 32: + case 33: + case 34: + case 35: + case 36: + case 37: + case 38: + case 39: + case 40: /* CRBC */ + case 41: + case 42: /* CXBC */ + case 43: + case 44: + case 45: + case 46: /* POLL */ + case 47: /* POLLINT */ + case 72: + case 74: + break; + case 76: /* RCVRL */ + case 78: /* XMTRL */ + val = (val > 0) ? val : 512; + break; + case 112: + if (CSR_STOP(s) || CSR_SPND(s)) { + break; + } + return; + case 3: + break; + case 4: + s->csr[4] &= ~(val & 0x026a); + val &= ~0x026a; val |= s->csr[4] & 0x026a; + break; + case 5: + s->csr[5] &= ~(val & 0x0a90); + val &= ~0x0a90; val |= s->csr[5] & 0x0a90; + break; + case 16: + pcnet_csr_writew(s,1,val); + return; + case 17: + pcnet_csr_writew(s,2,val); + return; + case 58: + pcnet_bcr_writew(s,BCR_SWS,val); + break; + default: + return; + } + s->csr[rap] = val; +} + +static uint32_t pcnet_csr_readw(PCNetState *s, uint32_t rap) +{ + uint32_t val; + switch (rap) { + case 0: + pcnet_update_irq(s); + val = s->csr[0]; + val |= (val & 0x7800) ? 0x8000 : 0; + break; + case 16: + return pcnet_csr_readw(s,1); + case 17: + return pcnet_csr_readw(s,2); + case 58: + return pcnet_bcr_readw(s,BCR_SWS); + case 88: + val = s->csr[89]; + val <<= 16; + val |= s->csr[88]; + break; + default: + val = s->csr[rap]; + } +#ifdef PCNET_DEBUG_CSR + printf("pcnet_csr_readw rap=%d val=0x%04x\n", rap, val); +#endif + return val; +} + +static void pcnet_bcr_writew(PCNetState *s, uint32_t rap, uint32_t val) +{ + rap &= 127; +#ifdef PCNET_DEBUG_BCR + printf("pcnet_bcr_writew rap=%d val=0x%04x\n", rap, val); +#endif + switch (rap) { + case BCR_SWS: + if (!(CSR_STOP(s) || CSR_SPND(s))) + return; + val &= ~0x0300; + switch (val & 0x00ff) { + case 0: + val |= 0x0200; + break; + case 1: + val |= 0x0100; + break; + case 2: + case 3: + val |= 0x0300; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "pcnet: Bad SWSTYLE=0x%02x\n", + val & 0xff); + val = 0x0200; + break; + } +#ifdef PCNET_DEBUG + printf("BCR_SWS=0x%04x\n", val); +#endif + /* fall through */ + case BCR_LNKST: + case BCR_LED1: + case BCR_LED2: + case BCR_LED3: + case BCR_MC: + case BCR_FDC: + case BCR_BSBC: + case BCR_EECAS: + case BCR_PLAT: + s->bcr[rap] = val; + break; + default: + break; + } +} + +uint32_t pcnet_bcr_readw(PCNetState *s, uint32_t rap) +{ + uint32_t val; + rap &= 127; + switch (rap) { + case BCR_LNKST: + case BCR_LED1: + case BCR_LED2: + case BCR_LED3: + val = s->bcr[rap] & ~0x8000; + val |= (val & 0x017f & s->lnkst) ? 0x8000 : 0; + break; + default: + val = rap < 32 ? s->bcr[rap] : 0; + break; + } +#ifdef PCNET_DEBUG_BCR + printf("pcnet_bcr_readw rap=%d val=0x%04x\n", rap, val); +#endif + return val; +} + +void pcnet_h_reset(void *opaque) +{ + PCNetState *s = opaque; + + s->bcr[BCR_MSRDA] = 0x0005; + s->bcr[BCR_MSWRA] = 0x0005; + s->bcr[BCR_MC ] = 0x0002; + s->bcr[BCR_LNKST] = 0x00c0; + s->bcr[BCR_LED1 ] = 0x0084; + s->bcr[BCR_LED2 ] = 0x0088; + s->bcr[BCR_LED3 ] = 0x0090; + s->bcr[BCR_FDC ] = 0x0000; + s->bcr[BCR_BSBC ] = 0x9001; + s->bcr[BCR_EECAS] = 0x0002; + s->bcr[BCR_SWS ] = 0x0200; + s->bcr[BCR_PLAT ] = 0xff06; + + pcnet_s_reset(s); + pcnet_update_irq(s); + pcnet_poll_timer(s); +} + +void pcnet_ioport_writew(void *opaque, uint32_t addr, uint32_t val) +{ + PCNetState *s = opaque; + pcnet_poll_timer(s); +#ifdef PCNET_DEBUG_IO + printf("pcnet_ioport_writew addr=0x%08x val=0x%04x\n", addr, val); +#endif + if (!BCR_DWIO(s)) { + switch (addr & 0x0f) { + case 0x00: /* RDP */ + pcnet_csr_writew(s, s->rap, val); + break; + case 0x02: + s->rap = val & 0x7f; + break; + case 0x06: + pcnet_bcr_writew(s, s->rap, val); + break; + } + } + pcnet_update_irq(s); +} + +uint32_t pcnet_ioport_readw(void *opaque, uint32_t addr) +{ + PCNetState *s = opaque; + uint32_t val = -1; + pcnet_poll_timer(s); + if (!BCR_DWIO(s)) { + switch (addr & 0x0f) { + case 0x00: /* RDP */ + val = pcnet_csr_readw(s, s->rap); + break; + case 0x02: + val = s->rap; + break; + case 0x04: + pcnet_s_reset(s); + val = 0; + break; + case 0x06: + val = pcnet_bcr_readw(s, s->rap); + break; + } + } + pcnet_update_irq(s); +#ifdef PCNET_DEBUG_IO + printf("pcnet_ioport_readw addr=0x%08x val=0x%04x\n", addr, val & 0xffff); +#endif + return val; +} + +void pcnet_ioport_writel(void *opaque, uint32_t addr, uint32_t val) +{ + PCNetState *s = opaque; + pcnet_poll_timer(s); +#ifdef PCNET_DEBUG_IO + printf("pcnet_ioport_writel addr=0x%08x val=0x%08x\n", addr, val); +#endif + if (BCR_DWIO(s)) { + switch (addr & 0x0f) { + case 0x00: /* RDP */ + pcnet_csr_writew(s, s->rap, val & 0xffff); + break; + case 0x04: + s->rap = val & 0x7f; + break; + case 0x0c: + pcnet_bcr_writew(s, s->rap, val & 0xffff); + break; + } + } else if ((addr & 0x0f) == 0) { + /* switch device to dword i/o mode */ + pcnet_bcr_writew(s, BCR_BSBC, pcnet_bcr_readw(s, BCR_BSBC) | 0x0080); +#ifdef PCNET_DEBUG_IO + printf("device switched into dword i/o mode\n"); +#endif + } + pcnet_update_irq(s); +} + +uint32_t pcnet_ioport_readl(void *opaque, uint32_t addr) +{ + PCNetState *s = opaque; + uint32_t val = -1; + pcnet_poll_timer(s); + if (BCR_DWIO(s)) { + switch (addr & 0x0f) { + case 0x00: /* RDP */ + val = pcnet_csr_readw(s, s->rap); + break; + case 0x04: + val = s->rap; + break; + case 0x08: + pcnet_s_reset(s); + val = 0; + break; + case 0x0c: + val = pcnet_bcr_readw(s, s->rap); + break; + } + } + pcnet_update_irq(s); +#ifdef PCNET_DEBUG_IO + printf("pcnet_ioport_readl addr=0x%08x val=0x%08x\n", addr, val); +#endif + return val; +} + +static bool is_version_2(void *opaque, int version_id) +{ + return version_id == 2; +} + +const VMStateDescription vmstate_pcnet = { + .name = "pcnet", + .version_id = 3, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_INT32(rap, PCNetState), + VMSTATE_INT32(isr, PCNetState), + VMSTATE_INT32(lnkst, PCNetState), + VMSTATE_UINT32(rdra, PCNetState), + VMSTATE_UINT32(tdra, PCNetState), + VMSTATE_BUFFER(prom, PCNetState), + VMSTATE_UINT16_ARRAY(csr, PCNetState, 128), + VMSTATE_UINT16_ARRAY(bcr, PCNetState, 32), + VMSTATE_UINT64(timer, PCNetState), + VMSTATE_INT32(xmit_pos, PCNetState), + VMSTATE_BUFFER(buffer, PCNetState), + VMSTATE_UNUSED_TEST(is_version_2, 4), + VMSTATE_INT32(tx_busy, PCNetState), + VMSTATE_TIMER_PTR(poll_timer, PCNetState), + VMSTATE_END_OF_LIST() + } +}; + +void pcnet_common_init(DeviceState *dev, PCNetState *s, NetClientInfo *info) +{ + int i; + uint16_t checksum; + + s->poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pcnet_poll_timer, s); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(info, &s->conf, object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + + /* Initialize the PROM */ + + /* + Datasheet: http://pdfdata.datasheetsite.com/web/24528/AM79C970A.pdf + page 95 + */ + memcpy(s->prom, s->conf.macaddr.a, 6); + /* Reserved Location: must be 00h */ + s->prom[6] = s->prom[7] = 0x00; + /* Reserved Location: must be 00h */ + s->prom[8] = 0x00; + /* Hardware ID: must be 11h if compatibility to AMD drivers is desired */ + s->prom[9] = 0x11; + /* User programmable space, init with 0 */ + s->prom[10] = s->prom[11] = 0x00; + /* LSByte of two-byte checksum, which is the sum of bytes 00h-0Bh + and bytes 0Eh and 0Fh, must therefore be initialized with 0! */ + s->prom[12] = s->prom[13] = 0x00; + /* Must be ASCII W (57h) if compatibility to AMD + driver software is desired */ + s->prom[14] = s->prom[15] = 0x57; + + for (i = 0, checksum = 0; i < 16; i++) { + checksum += s->prom[i]; + } + *(uint16_t *)&s->prom[12] = cpu_to_le16(checksum); + + s->lnkst = 0x40; /* initial link state: up */ +} diff --git a/hw/net/pcnet.h b/hw/net/pcnet.h new file mode 100644 index 000000000..f49b213c5 --- /dev/null +++ b/hw/net/pcnet.h @@ -0,0 +1,68 @@ +#ifndef HW_PCNET_H +#define HW_PCNET_H + +#define PCNET_IOPORT_SIZE 0x20 +#define PCNET_PNPMMIO_SIZE 0x20 + +#define PCNET_LOOPTEST_CRC 1 +#define PCNET_LOOPTEST_NOCRC 2 + +#include "exec/memory.h" +#include "hw/irq.h" + +/* BUS CONFIGURATION REGISTERS */ +#define BCR_MSRDA 0 +#define BCR_MSWRA 1 +#define BCR_MC 2 +#define BCR_LNKST 4 +#define BCR_LED1 5 +#define BCR_LED2 6 +#define BCR_LED3 7 +#define BCR_FDC 9 +#define BCR_BSBC 18 +#define BCR_EECAS 19 +#define BCR_SWS 20 +#define BCR_PLAT 22 + +#define BCR_TMAULOOP(S) !!((S)->bcr[BCR_MC ] & 0x4000) +#define BCR_APROMWE(S) !!((S)->bcr[BCR_MC ] & 0x0100) +#define BCR_DWIO(S) !!((S)->bcr[BCR_BSBC] & 0x0080) +#define BCR_SSIZE32(S) !!((S)->bcr[BCR_SWS ] & 0x0100) +#define BCR_SWSTYLE(S) ((S)->bcr[BCR_SWS ] & 0x00FF) + +typedef struct PCNetState_st PCNetState; + +struct PCNetState_st { + NICState *nic; + NICConf conf; + QEMUTimer *poll_timer; + int rap, isr, lnkst; + uint32_t rdra, tdra; + uint8_t prom[16]; + uint16_t csr[128]; + uint16_t bcr[32]; + int xmit_pos; + uint64_t timer; + MemoryRegion mmio; + uint8_t buffer[4096]; + qemu_irq irq; + void (*phys_mem_read)(void *dma_opaque, hwaddr addr, + uint8_t *buf, int len, int do_bswap); + void (*phys_mem_write)(void *dma_opaque, hwaddr addr, + uint8_t *buf, int len, int do_bswap); + DeviceState *dma_opaque; + int tx_busy; + int looptest; +}; + +void pcnet_h_reset(void *opaque); +void pcnet_ioport_writew(void *opaque, uint32_t addr, uint32_t val); +uint32_t pcnet_ioport_readw(void *opaque, uint32_t addr); +void pcnet_ioport_writel(void *opaque, uint32_t addr, uint32_t val); +uint32_t pcnet_ioport_readl(void *opaque, uint32_t addr); +uint32_t pcnet_bcr_readw(PCNetState *s, uint32_t rap); +ssize_t pcnet_receive(NetClientState *nc, const uint8_t *buf, size_t size_); +void pcnet_set_link_status(NetClientState *nc); +void pcnet_common_init(DeviceState *dev, PCNetState *s, NetClientInfo *info); +extern const VMStateDescription vmstate_pcnet; +#endif diff --git a/hw/net/rocker/qmp-norocker.c b/hw/net/rocker/qmp-norocker.c new file mode 100644 index 000000000..5ef4f9324 --- /dev/null +++ b/hw/net/rocker/qmp-norocker.c @@ -0,0 +1,51 @@ +/* + * QMP Target options - Commands handled based on a target config + * versus a host config + * + * Copyright (c) 2015 David Ahern + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-rocker.h" +#include "qapi/qmp/qerror.h" + +RockerSwitch *qmp_query_rocker(const char *name, Error **errp) +{ + error_setg(errp, QERR_FEATURE_DISABLED, "rocker"); + return NULL; +}; + +RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp) +{ + error_setg(errp, QERR_FEATURE_DISABLED, "rocker"); + return NULL; +}; + +RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name, + bool has_tbl_id, + uint32_t tbl_id, + Error **errp) +{ + error_setg(errp, QERR_FEATURE_DISABLED, "rocker"); + return NULL; +}; + +RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name, + bool has_type, + uint8_t type, + Error **errp) +{ + error_setg(errp, QERR_FEATURE_DISABLED, "rocker"); + return NULL; +}; diff --git a/hw/net/rocker/rocker.c b/hw/net/rocker/rocker.c new file mode 100644 index 000000000..31f2340fb --- /dev/null +++ b/hw/net/rocker/rocker.c @@ -0,0 +1,1535 @@ +/* + * QEMU rocker switch emulation - PCI device + * + * Copyright (c) 2014 Scott Feldman + * Copyright (c) 2014 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "hw/pci/msix.h" +#include "net/net.h" +#include "net/eth.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-rocker.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "qemu/bitops.h" +#include "qemu/log.h" + +#include "rocker.h" +#include "rocker_hw.h" +#include "rocker_fp.h" +#include "rocker_desc.h" +#include "rocker_tlv.h" +#include "rocker_world.h" +#include "rocker_of_dpa.h" + +struct rocker { + /* private */ + PCIDevice parent_obj; + /* public */ + + MemoryRegion mmio; + MemoryRegion msix_bar; + + /* switch configuration */ + char *name; /* switch name */ + char *world_name; /* world name */ + uint32_t fp_ports; /* front-panel port count */ + NICPeers *fp_ports_peers; + MACAddr fp_start_macaddr; /* front-panel port 0 mac addr */ + uint64_t switch_id; /* switch id */ + + /* front-panel ports */ + FpPort *fp_port[ROCKER_FP_PORTS_MAX]; + + /* register backings */ + uint32_t test_reg; + uint64_t test_reg64; + dma_addr_t test_dma_addr; + uint32_t test_dma_size; + uint64_t lower32; /* lower 32-bit val in 2-part 64-bit access */ + + /* desc rings */ + DescRing **rings; + + /* switch worlds */ + World *worlds[ROCKER_WORLD_TYPE_MAX]; + World *world_dflt; + + QLIST_ENTRY(rocker) next; +}; + +static QLIST_HEAD(, rocker) rockers; + +Rocker *rocker_find(const char *name) +{ + Rocker *r; + + QLIST_FOREACH(r, &rockers, next) + if (strcmp(r->name, name) == 0) { + return r; + } + + return NULL; +} + +World *rocker_get_world(Rocker *r, enum rocker_world_type type) +{ + if (type < ROCKER_WORLD_TYPE_MAX) { + return r->worlds[type]; + } + return NULL; +} + +RockerSwitch *qmp_query_rocker(const char *name, Error **errp) +{ + RockerSwitch *rocker; + Rocker *r; + + r = rocker_find(name); + if (!r) { + error_setg(errp, "rocker %s not found", name); + return NULL; + } + + rocker = g_new0(RockerSwitch, 1); + rocker->name = g_strdup(r->name); + rocker->id = r->switch_id; + rocker->ports = r->fp_ports; + + return rocker; +} + +RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp) +{ + RockerPortList *list = NULL; + Rocker *r; + int i; + + r = rocker_find(name); + if (!r) { + error_setg(errp, "rocker %s not found", name); + return NULL; + } + + for (i = r->fp_ports - 1; i >= 0; i--) { + QAPI_LIST_PREPEND(list, fp_port_get_info(r->fp_port[i])); + } + + return list; +} + +uint32_t rocker_fp_ports(Rocker *r) +{ + return r->fp_ports; +} + +static uint32_t rocker_get_pport_by_tx_ring(Rocker *r, + DescRing *ring) +{ + return (desc_ring_index(ring) - 2) / 2 + 1; +} + +static int tx_consume(Rocker *r, DescInfo *info) +{ + PCIDevice *dev = PCI_DEVICE(r); + char *buf = desc_get_buf(info, true); + RockerTlv *tlv_frag; + RockerTlv *tlvs[ROCKER_TLV_TX_MAX + 1]; + struct iovec iov[ROCKER_TX_FRAGS_MAX] = { { 0, }, }; + uint32_t pport; + uint32_t port; + uint16_t tx_offload = ROCKER_TX_OFFLOAD_NONE; + uint16_t tx_l3_csum_off = 0; + uint16_t tx_tso_mss = 0; + uint16_t tx_tso_hdr_len = 0; + int iovcnt = 0; + int err = ROCKER_OK; + int rem; + int i; + + if (!buf) { + return -ROCKER_ENXIO; + } + + rocker_tlv_parse(tlvs, ROCKER_TLV_TX_MAX, buf, desc_tlv_size(info)); + + if (!tlvs[ROCKER_TLV_TX_FRAGS]) { + return -ROCKER_EINVAL; + } + + pport = rocker_get_pport_by_tx_ring(r, desc_get_ring(info)); + if (!fp_port_from_pport(pport, &port)) { + return -ROCKER_EINVAL; + } + + if (tlvs[ROCKER_TLV_TX_OFFLOAD]) { + tx_offload = rocker_tlv_get_u8(tlvs[ROCKER_TLV_TX_OFFLOAD]); + } + + switch (tx_offload) { + case ROCKER_TX_OFFLOAD_L3_CSUM: + if (!tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) { + return -ROCKER_EINVAL; + } + break; + case ROCKER_TX_OFFLOAD_TSO: + if (!tlvs[ROCKER_TLV_TX_TSO_MSS] || + !tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) { + return -ROCKER_EINVAL; + } + break; + } + + if (tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) { + tx_l3_csum_off = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]); + qemu_log_mask(LOG_UNIMP, "rocker %s: L3 not implemented" + " (cksum off: %u)\n", + __func__, tx_l3_csum_off); + } + + if (tlvs[ROCKER_TLV_TX_TSO_MSS]) { + tx_tso_mss = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_MSS]); + qemu_log_mask(LOG_UNIMP, "rocker %s: TSO not implemented (MSS: %u)\n", + __func__, tx_tso_mss); + } + + if (tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) { + tx_tso_hdr_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]); + qemu_log_mask(LOG_UNIMP, "rocker %s: TSO not implemented" + " (hdr length: %u)\n", + __func__, tx_tso_hdr_len); + } + + rocker_tlv_for_each_nested(tlv_frag, tlvs[ROCKER_TLV_TX_FRAGS], rem) { + hwaddr frag_addr; + uint16_t frag_len; + + if (rocker_tlv_type(tlv_frag) != ROCKER_TLV_TX_FRAG) { + err = -ROCKER_EINVAL; + goto err_bad_attr; + } + + rocker_tlv_parse_nested(tlvs, ROCKER_TLV_TX_FRAG_ATTR_MAX, tlv_frag); + + if (!tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || + !tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) { + err = -ROCKER_EINVAL; + goto err_bad_attr; + } + + frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); + frag_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); + + if (iovcnt >= ROCKER_TX_FRAGS_MAX) { + goto err_too_many_frags; + } + iov[iovcnt].iov_len = frag_len; + iov[iovcnt].iov_base = g_malloc(frag_len); + + pci_dma_read(dev, frag_addr, iov[iovcnt].iov_base, + iov[iovcnt].iov_len); + + iovcnt++; + } + + err = fp_port_eg(r->fp_port[port], iov, iovcnt); + +err_too_many_frags: +err_bad_attr: + for (i = 0; i < ROCKER_TX_FRAGS_MAX; i++) { + g_free(iov[i].iov_base); + } + + return err; +} + +static int cmd_get_port_settings(Rocker *r, + DescInfo *info, char *buf, + RockerTlv *cmd_info_tlv) +{ + RockerTlv *tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + RockerTlv *nest; + FpPort *fp_port; + uint32_t pport; + uint32_t port; + uint32_t speed; + uint8_t duplex; + uint8_t autoneg; + uint8_t learning; + char *phys_name; + MACAddr macaddr; + enum rocker_world_type mode; + size_t tlv_size; + int pos; + int err; + + rocker_tlv_parse_nested(tlvs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + cmd_info_tlv); + + if (!tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]) { + return -ROCKER_EINVAL; + } + + pport = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]); + if (!fp_port_from_pport(pport, &port)) { + return -ROCKER_EINVAL; + } + fp_port = r->fp_port[port]; + + err = fp_port_get_settings(fp_port, &speed, &duplex, &autoneg); + if (err) { + return err; + } + + fp_port_get_macaddr(fp_port, &macaddr); + mode = world_type(fp_port_get_world(fp_port)); + learning = fp_port_get_learning(fp_port); + phys_name = fp_port_get_name(fp_port); + + tlv_size = rocker_tlv_total_size(0) + /* nest */ + rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */ + rocker_tlv_total_size(sizeof(uint32_t)) + /* speed */ + rocker_tlv_total_size(sizeof(uint8_t)) + /* duplex */ + rocker_tlv_total_size(sizeof(uint8_t)) + /* autoneg */ + rocker_tlv_total_size(sizeof(macaddr.a)) + /* macaddr */ + rocker_tlv_total_size(sizeof(uint8_t)) + /* mode */ + rocker_tlv_total_size(sizeof(uint8_t)) + /* learning */ + rocker_tlv_total_size(strlen(phys_name)); + + if (tlv_size > desc_buf_size(info)) { + return -ROCKER_EMSGSIZE; + } + + pos = 0; + nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_CMD_INFO); + rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, pport); + rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, speed); + rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, duplex); + rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, autoneg); + rocker_tlv_put(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, + sizeof(macaddr.a), macaddr.a); + rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_MODE, mode); + rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, + learning); + rocker_tlv_put(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, + strlen(phys_name), phys_name); + rocker_tlv_nest_end(buf, &pos, nest); + + return desc_set_buf(info, tlv_size); +} + +static int cmd_set_port_settings(Rocker *r, + RockerTlv *cmd_info_tlv) +{ + RockerTlv *tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + FpPort *fp_port; + uint32_t pport; + uint32_t port; + uint32_t speed; + uint8_t duplex; + uint8_t autoneg; + uint8_t learning; + MACAddr macaddr; + enum rocker_world_type mode; + int err; + + rocker_tlv_parse_nested(tlvs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + cmd_info_tlv); + + if (!tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]) { + return -ROCKER_EINVAL; + } + + pport = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]); + if (!fp_port_from_pport(pport, &port)) { + return -ROCKER_EINVAL; + } + fp_port = r->fp_port[port]; + + if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] && + tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] && + tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) { + + speed = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); + duplex = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); + autoneg = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); + + err = fp_port_set_settings(fp_port, speed, duplex, autoneg); + if (err) { + return err; + } + } + + if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]) { + if (rocker_tlv_len(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]) != + sizeof(macaddr.a)) { + return -ROCKER_EINVAL; + } + memcpy(macaddr.a, + rocker_tlv_data(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]), + sizeof(macaddr.a)); + fp_port_set_macaddr(fp_port, &macaddr); + } + + if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]) { + mode = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]); + if (mode >= ROCKER_WORLD_TYPE_MAX) { + return -ROCKER_EINVAL; + } + /* We don't support world change. */ + if (!fp_port_check_world(fp_port, r->worlds[mode])) { + return -ROCKER_EINVAL; + } + } + + if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING]) { + learning = + rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING]); + fp_port_set_learning(fp_port, learning); + } + + return ROCKER_OK; +} + +static int cmd_consume(Rocker *r, DescInfo *info) +{ + char *buf = desc_get_buf(info, false); + RockerTlv *tlvs[ROCKER_TLV_CMD_MAX + 1]; + RockerTlv *info_tlv; + World *world; + uint16_t cmd; + int err; + + if (!buf) { + return -ROCKER_ENXIO; + } + + rocker_tlv_parse(tlvs, ROCKER_TLV_CMD_MAX, buf, desc_tlv_size(info)); + + if (!tlvs[ROCKER_TLV_CMD_TYPE] || !tlvs[ROCKER_TLV_CMD_INFO]) { + return -ROCKER_EINVAL; + } + + cmd = rocker_tlv_get_le16(tlvs[ROCKER_TLV_CMD_TYPE]); + info_tlv = tlvs[ROCKER_TLV_CMD_INFO]; + + /* This might be reworked to something like this: + * Every world will have an array of command handlers from + * ROCKER_TLV_CMD_TYPE_UNSPEC to ROCKER_TLV_CMD_TYPE_MAX. There is + * up to each world to implement whatever command it want. + * It can reference "generic" commands as cmd_set_port_settings or + * cmd_get_port_settings + */ + + switch (cmd) { + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD: + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD: + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL: + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS: + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD: + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD: + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL: + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS: + world = r->worlds[ROCKER_WORLD_TYPE_OF_DPA]; + err = world_do_cmd(world, info, buf, cmd, info_tlv); + break; + case ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS: + err = cmd_get_port_settings(r, info, buf, info_tlv); + break; + case ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS: + err = cmd_set_port_settings(r, info_tlv); + break; + default: + err = -ROCKER_EINVAL; + break; + } + + return err; +} + +static void rocker_msix_irq(Rocker *r, unsigned vector) +{ + PCIDevice *dev = PCI_DEVICE(r); + + DPRINTF("MSI-X notify request for vector %d\n", vector); + if (vector >= ROCKER_MSIX_VEC_COUNT(r->fp_ports)) { + DPRINTF("incorrect vector %d\n", vector); + return; + } + msix_notify(dev, vector); +} + +int rocker_event_link_changed(Rocker *r, uint32_t pport, bool link_up) +{ + DescRing *ring = r->rings[ROCKER_RING_EVENT]; + DescInfo *info = desc_ring_fetch_desc(ring); + RockerTlv *nest; + char *buf; + size_t tlv_size; + int pos; + int err; + + if (!info) { + return -ROCKER_ENOBUFS; + } + + tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* event type */ + rocker_tlv_total_size(0) + /* nest */ + rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */ + rocker_tlv_total_size(sizeof(uint8_t)); /* link up */ + + if (tlv_size > desc_buf_size(info)) { + err = -ROCKER_EMSGSIZE; + goto err_too_big; + } + + buf = desc_get_buf(info, false); + if (!buf) { + err = -ROCKER_ENOMEM; + goto err_no_mem; + } + + pos = 0; + rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_TYPE, + ROCKER_TLV_EVENT_TYPE_LINK_CHANGED); + nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_EVENT_INFO); + rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, pport); + rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, + link_up ? 1 : 0); + rocker_tlv_nest_end(buf, &pos, nest); + + err = desc_set_buf(info, tlv_size); + +err_too_big: +err_no_mem: + if (desc_ring_post_desc(ring, err)) { + rocker_msix_irq(r, ROCKER_MSIX_VEC_EVENT); + } + + return err; +} + +int rocker_event_mac_vlan_seen(Rocker *r, uint32_t pport, uint8_t *addr, + uint16_t vlan_id) +{ + DescRing *ring = r->rings[ROCKER_RING_EVENT]; + DescInfo *info; + FpPort *fp_port; + uint32_t port; + RockerTlv *nest; + char *buf; + size_t tlv_size; + int pos; + int err; + + if (!fp_port_from_pport(pport, &port)) { + return -ROCKER_EINVAL; + } + fp_port = r->fp_port[port]; + if (!fp_port_get_learning(fp_port)) { + return ROCKER_OK; + } + + info = desc_ring_fetch_desc(ring); + if (!info) { + return -ROCKER_ENOBUFS; + } + + tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* event type */ + rocker_tlv_total_size(0) + /* nest */ + rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */ + rocker_tlv_total_size(ETH_ALEN) + /* mac addr */ + rocker_tlv_total_size(sizeof(uint16_t)); /* vlan_id */ + + if (tlv_size > desc_buf_size(info)) { + err = -ROCKER_EMSGSIZE; + goto err_too_big; + } + + buf = desc_get_buf(info, false); + if (!buf) { + err = -ROCKER_ENOMEM; + goto err_no_mem; + } + + pos = 0; + rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_TYPE, + ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN); + nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_EVENT_INFO); + rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_PPORT, pport); + rocker_tlv_put(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_MAC, ETH_ALEN, addr); + rocker_tlv_put_u16(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, vlan_id); + rocker_tlv_nest_end(buf, &pos, nest); + + err = desc_set_buf(info, tlv_size); + +err_too_big: +err_no_mem: + if (desc_ring_post_desc(ring, err)) { + rocker_msix_irq(r, ROCKER_MSIX_VEC_EVENT); + } + + return err; +} + +static DescRing *rocker_get_rx_ring_by_pport(Rocker *r, + uint32_t pport) +{ + return r->rings[(pport - 1) * 2 + 3]; +} + +int rx_produce(World *world, uint32_t pport, + const struct iovec *iov, int iovcnt, uint8_t copy_to_cpu) +{ + Rocker *r = world_rocker(world); + PCIDevice *dev = (PCIDevice *)r; + DescRing *ring = rocker_get_rx_ring_by_pport(r, pport); + DescInfo *info = desc_ring_fetch_desc(ring); + char *data; + size_t data_size = iov_size(iov, iovcnt); + char *buf; + uint16_t rx_flags = 0; + uint16_t rx_csum = 0; + size_t tlv_size; + RockerTlv *tlvs[ROCKER_TLV_RX_MAX + 1]; + hwaddr frag_addr; + uint16_t frag_max_len; + int pos; + int err; + + if (!info) { + return -ROCKER_ENOBUFS; + } + + buf = desc_get_buf(info, false); + if (!buf) { + err = -ROCKER_ENXIO; + goto out; + } + rocker_tlv_parse(tlvs, ROCKER_TLV_RX_MAX, buf, desc_tlv_size(info)); + + if (!tlvs[ROCKER_TLV_RX_FRAG_ADDR] || + !tlvs[ROCKER_TLV_RX_FRAG_MAX_LEN]) { + err = -ROCKER_EINVAL; + goto out; + } + + frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_RX_FRAG_ADDR]); + frag_max_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_RX_FRAG_MAX_LEN]); + + if (data_size > frag_max_len) { + err = -ROCKER_EMSGSIZE; + goto out; + } + + if (copy_to_cpu) { + rx_flags |= ROCKER_RX_FLAGS_FWD_OFFLOAD; + } + + /* XXX calc rx flags/csum */ + + tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* flags */ + rocker_tlv_total_size(sizeof(uint16_t)) + /* scum */ + rocker_tlv_total_size(sizeof(uint64_t)) + /* frag addr */ + rocker_tlv_total_size(sizeof(uint16_t)) + /* frag max len */ + rocker_tlv_total_size(sizeof(uint16_t)); /* frag len */ + + if (tlv_size > desc_buf_size(info)) { + err = -ROCKER_EMSGSIZE; + goto out; + } + + /* TODO: + * iov dma write can be optimized in similar way e1000 does it in + * e1000_receive_iov. But maybe if would make sense to introduce + * generic helper iov_dma_write. + */ + + data = g_malloc(data_size); + + iov_to_buf(iov, iovcnt, 0, data, data_size); + pci_dma_write(dev, frag_addr, data, data_size); + g_free(data); + + pos = 0; + rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FLAGS, rx_flags); + rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_CSUM, rx_csum); + rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_RX_FRAG_ADDR, frag_addr); + rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FRAG_MAX_LEN, frag_max_len); + rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FRAG_LEN, data_size); + + err = desc_set_buf(info, tlv_size); + +out: + if (desc_ring_post_desc(ring, err)) { + rocker_msix_irq(r, ROCKER_MSIX_VEC_RX(pport - 1)); + } + + return err; +} + +int rocker_port_eg(Rocker *r, uint32_t pport, + const struct iovec *iov, int iovcnt) +{ + FpPort *fp_port; + uint32_t port; + + if (!fp_port_from_pport(pport, &port)) { + return -ROCKER_EINVAL; + } + + fp_port = r->fp_port[port]; + + return fp_port_eg(fp_port, iov, iovcnt); +} + +static void rocker_test_dma_ctrl(Rocker *r, uint32_t val) +{ + PCIDevice *dev = PCI_DEVICE(r); + char *buf; + int i; + + buf = g_malloc(r->test_dma_size); + + switch (val) { + case ROCKER_TEST_DMA_CTRL_CLEAR: + memset(buf, 0, r->test_dma_size); + break; + case ROCKER_TEST_DMA_CTRL_FILL: + memset(buf, 0x96, r->test_dma_size); + break; + case ROCKER_TEST_DMA_CTRL_INVERT: + pci_dma_read(dev, r->test_dma_addr, buf, r->test_dma_size); + for (i = 0; i < r->test_dma_size; i++) { + buf[i] = ~buf[i]; + } + break; + default: + DPRINTF("not test dma control val=0x%08x\n", val); + goto err_out; + } + pci_dma_write(dev, r->test_dma_addr, buf, r->test_dma_size); + + rocker_msix_irq(r, ROCKER_MSIX_VEC_TEST); + +err_out: + g_free(buf); +} + +static void rocker_reset(DeviceState *dev); + +static void rocker_control(Rocker *r, uint32_t val) +{ + if (val & ROCKER_CONTROL_RESET) { + rocker_reset(DEVICE(r)); + } +} + +static int rocker_pci_ring_count(Rocker *r) +{ + /* There are: + * - command ring + * - event ring + * - tx and rx ring per each port + */ + return 2 + (2 * r->fp_ports); +} + +static bool rocker_addr_is_desc_reg(Rocker *r, hwaddr addr) +{ + hwaddr start = ROCKER_DMA_DESC_BASE; + hwaddr end = start + (ROCKER_DMA_DESC_SIZE * rocker_pci_ring_count(r)); + + return addr >= start && addr < end; +} + +static void rocker_port_phys_enable_write(Rocker *r, uint64_t new) +{ + int i; + bool old_enabled; + bool new_enabled; + FpPort *fp_port; + + for (i = 0; i < r->fp_ports; i++) { + fp_port = r->fp_port[i]; + old_enabled = fp_port_enabled(fp_port); + new_enabled = (new >> (i + 1)) & 0x1; + if (new_enabled == old_enabled) { + continue; + } + if (new_enabled) { + fp_port_enable(r->fp_port[i]); + } else { + fp_port_disable(r->fp_port[i]); + } + } +} + +static void rocker_io_writel(void *opaque, hwaddr addr, uint32_t val) +{ + Rocker *r = opaque; + + if (rocker_addr_is_desc_reg(r, addr)) { + unsigned index = ROCKER_RING_INDEX(addr); + unsigned offset = addr & ROCKER_DMA_DESC_MASK; + + switch (offset) { + case ROCKER_DMA_DESC_ADDR_OFFSET: + r->lower32 = (uint64_t)val; + break; + case ROCKER_DMA_DESC_ADDR_OFFSET + 4: + desc_ring_set_base_addr(r->rings[index], + ((uint64_t)val) << 32 | r->lower32); + r->lower32 = 0; + break; + case ROCKER_DMA_DESC_SIZE_OFFSET: + desc_ring_set_size(r->rings[index], val); + break; + case ROCKER_DMA_DESC_HEAD_OFFSET: + if (desc_ring_set_head(r->rings[index], val)) { + rocker_msix_irq(r, desc_ring_get_msix_vector(r->rings[index])); + } + break; + case ROCKER_DMA_DESC_CTRL_OFFSET: + desc_ring_set_ctrl(r->rings[index], val); + break; + case ROCKER_DMA_DESC_CREDITS_OFFSET: + if (desc_ring_ret_credits(r->rings[index], val)) { + rocker_msix_irq(r, desc_ring_get_msix_vector(r->rings[index])); + } + break; + default: + DPRINTF("not implemented dma reg write(l) addr=0x" TARGET_FMT_plx + " val=0x%08x (ring %d, addr=0x%02x)\n", + addr, val, index, offset); + break; + } + return; + } + + switch (addr) { + case ROCKER_TEST_REG: + r->test_reg = val; + break; + case ROCKER_TEST_REG64: + case ROCKER_TEST_DMA_ADDR: + case ROCKER_PORT_PHYS_ENABLE: + r->lower32 = (uint64_t)val; + break; + case ROCKER_TEST_REG64 + 4: + r->test_reg64 = ((uint64_t)val) << 32 | r->lower32; + r->lower32 = 0; + break; + case ROCKER_TEST_IRQ: + rocker_msix_irq(r, val); + break; + case ROCKER_TEST_DMA_SIZE: + r->test_dma_size = val & 0xFFFF; + break; + case ROCKER_TEST_DMA_ADDR + 4: + r->test_dma_addr = ((uint64_t)val) << 32 | r->lower32; + r->lower32 = 0; + break; + case ROCKER_TEST_DMA_CTRL: + rocker_test_dma_ctrl(r, val); + break; + case ROCKER_CONTROL: + rocker_control(r, val); + break; + case ROCKER_PORT_PHYS_ENABLE + 4: + rocker_port_phys_enable_write(r, ((uint64_t)val) << 32 | r->lower32); + r->lower32 = 0; + break; + default: + DPRINTF("not implemented write(l) addr=0x" TARGET_FMT_plx + " val=0x%08x\n", addr, val); + break; + } +} + +static void rocker_io_writeq(void *opaque, hwaddr addr, uint64_t val) +{ + Rocker *r = opaque; + + if (rocker_addr_is_desc_reg(r, addr)) { + unsigned index = ROCKER_RING_INDEX(addr); + unsigned offset = addr & ROCKER_DMA_DESC_MASK; + + switch (offset) { + case ROCKER_DMA_DESC_ADDR_OFFSET: + desc_ring_set_base_addr(r->rings[index], val); + break; + default: + DPRINTF("not implemented dma reg write(q) addr=0x" TARGET_FMT_plx + " val=0x" TARGET_FMT_plx " (ring %d, offset=0x%02x)\n", + addr, val, index, offset); + break; + } + return; + } + + switch (addr) { + case ROCKER_TEST_REG64: + r->test_reg64 = val; + break; + case ROCKER_TEST_DMA_ADDR: + r->test_dma_addr = val; + break; + case ROCKER_PORT_PHYS_ENABLE: + rocker_port_phys_enable_write(r, val); + break; + default: + DPRINTF("not implemented write(q) addr=0x" TARGET_FMT_plx + " val=0x" TARGET_FMT_plx "\n", addr, val); + break; + } +} + +#ifdef DEBUG_ROCKER +#define regname(reg) case (reg): return #reg +static const char *rocker_reg_name(void *opaque, hwaddr addr) +{ + Rocker *r = opaque; + + if (rocker_addr_is_desc_reg(r, addr)) { + unsigned index = ROCKER_RING_INDEX(addr); + unsigned offset = addr & ROCKER_DMA_DESC_MASK; + static char buf[100]; + char ring_name[10]; + + switch (index) { + case 0: + sprintf(ring_name, "cmd"); + break; + case 1: + sprintf(ring_name, "event"); + break; + default: + sprintf(ring_name, "%s-%d", index % 2 ? "rx" : "tx", + (index - 2) / 2); + } + + switch (offset) { + case ROCKER_DMA_DESC_ADDR_OFFSET: + sprintf(buf, "Ring[%s] ADDR", ring_name); + return buf; + case ROCKER_DMA_DESC_ADDR_OFFSET+4: + sprintf(buf, "Ring[%s] ADDR+4", ring_name); + return buf; + case ROCKER_DMA_DESC_SIZE_OFFSET: + sprintf(buf, "Ring[%s] SIZE", ring_name); + return buf; + case ROCKER_DMA_DESC_HEAD_OFFSET: + sprintf(buf, "Ring[%s] HEAD", ring_name); + return buf; + case ROCKER_DMA_DESC_TAIL_OFFSET: + sprintf(buf, "Ring[%s] TAIL", ring_name); + return buf; + case ROCKER_DMA_DESC_CTRL_OFFSET: + sprintf(buf, "Ring[%s] CTRL", ring_name); + return buf; + case ROCKER_DMA_DESC_CREDITS_OFFSET: + sprintf(buf, "Ring[%s] CREDITS", ring_name); + return buf; + default: + sprintf(buf, "Ring[%s] ???", ring_name); + return buf; + } + } else { + switch (addr) { + regname(ROCKER_BOGUS_REG0); + regname(ROCKER_BOGUS_REG1); + regname(ROCKER_BOGUS_REG2); + regname(ROCKER_BOGUS_REG3); + regname(ROCKER_TEST_REG); + regname(ROCKER_TEST_REG64); + regname(ROCKER_TEST_REG64+4); + regname(ROCKER_TEST_IRQ); + regname(ROCKER_TEST_DMA_ADDR); + regname(ROCKER_TEST_DMA_ADDR+4); + regname(ROCKER_TEST_DMA_SIZE); + regname(ROCKER_TEST_DMA_CTRL); + regname(ROCKER_CONTROL); + regname(ROCKER_PORT_PHYS_COUNT); + regname(ROCKER_PORT_PHYS_LINK_STATUS); + regname(ROCKER_PORT_PHYS_LINK_STATUS+4); + regname(ROCKER_PORT_PHYS_ENABLE); + regname(ROCKER_PORT_PHYS_ENABLE+4); + regname(ROCKER_SWITCH_ID); + regname(ROCKER_SWITCH_ID+4); + } + } + return "???"; +} +#else +static const char *rocker_reg_name(void *opaque, hwaddr addr) +{ + return NULL; +} +#endif + +static void rocker_mmio_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + DPRINTF("Write %s addr " TARGET_FMT_plx + ", size %u, val " TARGET_FMT_plx "\n", + rocker_reg_name(opaque, addr), addr, size, val); + + switch (size) { + case 4: + rocker_io_writel(opaque, addr, val); + break; + case 8: + rocker_io_writeq(opaque, addr, val); + break; + } +} + +static uint64_t rocker_port_phys_link_status(Rocker *r) +{ + int i; + uint64_t status = 0; + + for (i = 0; i < r->fp_ports; i++) { + FpPort *port = r->fp_port[i]; + + if (fp_port_get_link_up(port)) { + status |= 1 << (i + 1); + } + } + return status; +} + +static uint64_t rocker_port_phys_enable_read(Rocker *r) +{ + int i; + uint64_t ret = 0; + + for (i = 0; i < r->fp_ports; i++) { + FpPort *port = r->fp_port[i]; + + if (fp_port_enabled(port)) { + ret |= 1 << (i + 1); + } + } + return ret; +} + +static uint32_t rocker_io_readl(void *opaque, hwaddr addr) +{ + Rocker *r = opaque; + uint32_t ret; + + if (rocker_addr_is_desc_reg(r, addr)) { + unsigned index = ROCKER_RING_INDEX(addr); + unsigned offset = addr & ROCKER_DMA_DESC_MASK; + + switch (offset) { + case ROCKER_DMA_DESC_ADDR_OFFSET: + ret = (uint32_t)desc_ring_get_base_addr(r->rings[index]); + break; + case ROCKER_DMA_DESC_ADDR_OFFSET + 4: + ret = (uint32_t)(desc_ring_get_base_addr(r->rings[index]) >> 32); + break; + case ROCKER_DMA_DESC_SIZE_OFFSET: + ret = desc_ring_get_size(r->rings[index]); + break; + case ROCKER_DMA_DESC_HEAD_OFFSET: + ret = desc_ring_get_head(r->rings[index]); + break; + case ROCKER_DMA_DESC_TAIL_OFFSET: + ret = desc_ring_get_tail(r->rings[index]); + break; + case ROCKER_DMA_DESC_CREDITS_OFFSET: + ret = desc_ring_get_credits(r->rings[index]); + break; + default: + DPRINTF("not implemented dma reg read(l) addr=0x" TARGET_FMT_plx + " (ring %d, addr=0x%02x)\n", addr, index, offset); + ret = 0; + break; + } + return ret; + } + + switch (addr) { + case ROCKER_BOGUS_REG0: + case ROCKER_BOGUS_REG1: + case ROCKER_BOGUS_REG2: + case ROCKER_BOGUS_REG3: + ret = 0xDEADBABE; + break; + case ROCKER_TEST_REG: + ret = r->test_reg * 2; + break; + case ROCKER_TEST_REG64: + ret = (uint32_t)(r->test_reg64 * 2); + break; + case ROCKER_TEST_REG64 + 4: + ret = (uint32_t)((r->test_reg64 * 2) >> 32); + break; + case ROCKER_TEST_DMA_SIZE: + ret = r->test_dma_size; + break; + case ROCKER_TEST_DMA_ADDR: + ret = (uint32_t)r->test_dma_addr; + break; + case ROCKER_TEST_DMA_ADDR + 4: + ret = (uint32_t)(r->test_dma_addr >> 32); + break; + case ROCKER_PORT_PHYS_COUNT: + ret = r->fp_ports; + break; + case ROCKER_PORT_PHYS_LINK_STATUS: + ret = (uint32_t)rocker_port_phys_link_status(r); + break; + case ROCKER_PORT_PHYS_LINK_STATUS + 4: + ret = (uint32_t)(rocker_port_phys_link_status(r) >> 32); + break; + case ROCKER_PORT_PHYS_ENABLE: + ret = (uint32_t)rocker_port_phys_enable_read(r); + break; + case ROCKER_PORT_PHYS_ENABLE + 4: + ret = (uint32_t)(rocker_port_phys_enable_read(r) >> 32); + break; + case ROCKER_SWITCH_ID: + ret = (uint32_t)r->switch_id; + break; + case ROCKER_SWITCH_ID + 4: + ret = (uint32_t)(r->switch_id >> 32); + break; + default: + DPRINTF("not implemented read(l) addr=0x" TARGET_FMT_plx "\n", addr); + ret = 0; + break; + } + return ret; +} + +static uint64_t rocker_io_readq(void *opaque, hwaddr addr) +{ + Rocker *r = opaque; + uint64_t ret; + + if (rocker_addr_is_desc_reg(r, addr)) { + unsigned index = ROCKER_RING_INDEX(addr); + unsigned offset = addr & ROCKER_DMA_DESC_MASK; + + switch (addr & ROCKER_DMA_DESC_MASK) { + case ROCKER_DMA_DESC_ADDR_OFFSET: + ret = desc_ring_get_base_addr(r->rings[index]); + break; + default: + DPRINTF("not implemented dma reg read(q) addr=0x" TARGET_FMT_plx + " (ring %d, addr=0x%02x)\n", addr, index, offset); + ret = 0; + break; + } + return ret; + } + + switch (addr) { + case ROCKER_BOGUS_REG0: + case ROCKER_BOGUS_REG2: + ret = 0xDEADBABEDEADBABEULL; + break; + case ROCKER_TEST_REG64: + ret = r->test_reg64 * 2; + break; + case ROCKER_TEST_DMA_ADDR: + ret = r->test_dma_addr; + break; + case ROCKER_PORT_PHYS_LINK_STATUS: + ret = rocker_port_phys_link_status(r); + break; + case ROCKER_PORT_PHYS_ENABLE: + ret = rocker_port_phys_enable_read(r); + break; + case ROCKER_SWITCH_ID: + ret = r->switch_id; + break; + default: + DPRINTF("not implemented read(q) addr=0x" TARGET_FMT_plx "\n", addr); + ret = 0; + break; + } + return ret; +} + +static uint64_t rocker_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + DPRINTF("Read %s addr " TARGET_FMT_plx ", size %u\n", + rocker_reg_name(opaque, addr), addr, size); + + switch (size) { + case 4: + return rocker_io_readl(opaque, addr); + case 8: + return rocker_io_readq(opaque, addr); + } + + return -1; +} + +static const MemoryRegionOps rocker_mmio_ops = { + .read = rocker_mmio_read, + .write = rocker_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; + +static void rocker_msix_vectors_unuse(Rocker *r, + unsigned int num_vectors) +{ + PCIDevice *dev = PCI_DEVICE(r); + int i; + + for (i = 0; i < num_vectors; i++) { + msix_vector_unuse(dev, i); + } +} + +static int rocker_msix_vectors_use(Rocker *r, + unsigned int num_vectors) +{ + PCIDevice *dev = PCI_DEVICE(r); + int err; + int i; + + for (i = 0; i < num_vectors; i++) { + err = msix_vector_use(dev, i); + if (err) { + goto rollback; + } + } + return 0; + +rollback: + rocker_msix_vectors_unuse(r, i); + return err; +} + +static int rocker_msix_init(Rocker *r, Error **errp) +{ + PCIDevice *dev = PCI_DEVICE(r); + int err; + + err = msix_init(dev, ROCKER_MSIX_VEC_COUNT(r->fp_ports), + &r->msix_bar, + ROCKER_PCI_MSIX_BAR_IDX, ROCKER_PCI_MSIX_TABLE_OFFSET, + &r->msix_bar, + ROCKER_PCI_MSIX_BAR_IDX, ROCKER_PCI_MSIX_PBA_OFFSET, + 0, errp); + if (err) { + return err; + } + + err = rocker_msix_vectors_use(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports)); + if (err) { + goto err_msix_vectors_use; + } + + return 0; + +err_msix_vectors_use: + msix_uninit(dev, &r->msix_bar, &r->msix_bar); + return err; +} + +static void rocker_msix_uninit(Rocker *r) +{ + PCIDevice *dev = PCI_DEVICE(r); + + msix_uninit(dev, &r->msix_bar, &r->msix_bar); + rocker_msix_vectors_unuse(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports)); +} + +static World *rocker_world_type_by_name(Rocker *r, const char *name) +{ + int i; + + for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) { + if (strcmp(name, world_name(r->worlds[i])) == 0) { + return r->worlds[i]; + } + } + return NULL; +} + +static void pci_rocker_realize(PCIDevice *dev, Error **errp) +{ + Rocker *r = ROCKER(dev); + const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } }; + const MACAddr dflt = { .a = { 0x52, 0x54, 0x00, 0x12, 0x35, 0x01 } }; + static int sw_index; + int i, err = 0; + + /* allocate worlds */ + + r->worlds[ROCKER_WORLD_TYPE_OF_DPA] = of_dpa_world_alloc(r); + + if (!r->world_name) { + r->world_name = g_strdup(world_name(r->worlds[ROCKER_WORLD_TYPE_OF_DPA])); + } + + r->world_dflt = rocker_world_type_by_name(r, r->world_name); + if (!r->world_dflt) { + error_setg(errp, + "invalid argument requested world %s does not exist", + r->world_name); + goto err_world_type_by_name; + } + + /* set up memory-mapped region at BAR0 */ + + memory_region_init_io(&r->mmio, OBJECT(r), &rocker_mmio_ops, r, + "rocker-mmio", ROCKER_PCI_BAR0_SIZE); + pci_register_bar(dev, ROCKER_PCI_BAR0_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &r->mmio); + + /* set up memory-mapped region for MSI-X */ + + memory_region_init(&r->msix_bar, OBJECT(r), "rocker-msix-bar", + ROCKER_PCI_MSIX_BAR_SIZE); + pci_register_bar(dev, ROCKER_PCI_MSIX_BAR_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &r->msix_bar); + + /* MSI-X init */ + + err = rocker_msix_init(r, errp); + if (err) { + goto err_msix_init; + } + + /* validate switch properties */ + + if (!r->name) { + r->name = g_strdup(TYPE_ROCKER); + } + + if (rocker_find(r->name)) { + error_setg(errp, "%s already exists", r->name); + goto err_duplicate; + } + + /* Rocker name is passed in port name requests to OS with the intention + * that the name is used in interface names. Limit the length of the + * rocker name to avoid naming problems in the OS. Also, adding the + * port number as p# and unganged breakout b#, where # is at most 2 + * digits, so leave room for it too (-1 for string terminator, -3 for + * p# and -3 for b#) + */ +#define ROCKER_IFNAMSIZ 16 +#define MAX_ROCKER_NAME_LEN (ROCKER_IFNAMSIZ - 1 - 3 - 3) + if (strlen(r->name) > MAX_ROCKER_NAME_LEN) { + error_setg(errp, + "name too long; please shorten to at most %d chars", + MAX_ROCKER_NAME_LEN); + goto err_name_too_long; + } + + if (memcmp(&r->fp_start_macaddr, &zero, sizeof(zero)) == 0) { + memcpy(&r->fp_start_macaddr, &dflt, sizeof(dflt)); + r->fp_start_macaddr.a[4] += (sw_index++); + } + + if (!r->switch_id) { + memcpy(&r->switch_id, &r->fp_start_macaddr, + sizeof(r->fp_start_macaddr)); + } + + if (r->fp_ports > ROCKER_FP_PORTS_MAX) { + r->fp_ports = ROCKER_FP_PORTS_MAX; + } + + r->rings = g_new(DescRing *, rocker_pci_ring_count(r)); + + /* Rings are ordered like this: + * - command ring + * - event ring + * - port0 tx ring + * - port0 rx ring + * - port1 tx ring + * - port1 rx ring + * ..... + */ + + for (i = 0; i < rocker_pci_ring_count(r); i++) { + DescRing *ring = desc_ring_alloc(r, i); + + if (i == ROCKER_RING_CMD) { + desc_ring_set_consume(ring, cmd_consume, ROCKER_MSIX_VEC_CMD); + } else if (i == ROCKER_RING_EVENT) { + desc_ring_set_consume(ring, NULL, ROCKER_MSIX_VEC_EVENT); + } else if (i % 2 == 0) { + desc_ring_set_consume(ring, tx_consume, + ROCKER_MSIX_VEC_TX((i - 2) / 2)); + } else if (i % 2 == 1) { + desc_ring_set_consume(ring, NULL, ROCKER_MSIX_VEC_RX((i - 3) / 2)); + } + + r->rings[i] = ring; + } + + for (i = 0; i < r->fp_ports; i++) { + FpPort *port = + fp_port_alloc(r, r->name, &r->fp_start_macaddr, + i, &r->fp_ports_peers[i]); + + r->fp_port[i] = port; + fp_port_set_world(port, r->world_dflt); + } + + QLIST_INSERT_HEAD(&rockers, r, next); + + return; + +err_name_too_long: +err_duplicate: + rocker_msix_uninit(r); +err_msix_init: + object_unparent(OBJECT(&r->msix_bar)); + object_unparent(OBJECT(&r->mmio)); +err_world_type_by_name: + for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) { + if (r->worlds[i]) { + world_free(r->worlds[i]); + } + } +} + +static void pci_rocker_uninit(PCIDevice *dev) +{ + Rocker *r = ROCKER(dev); + int i; + + QLIST_REMOVE(r, next); + + for (i = 0; i < r->fp_ports; i++) { + FpPort *port = r->fp_port[i]; + + fp_port_free(port); + r->fp_port[i] = NULL; + } + + for (i = 0; i < rocker_pci_ring_count(r); i++) { + if (r->rings[i]) { + desc_ring_free(r->rings[i]); + } + } + g_free(r->rings); + + rocker_msix_uninit(r); + object_unparent(OBJECT(&r->msix_bar)); + object_unparent(OBJECT(&r->mmio)); + + for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) { + if (r->worlds[i]) { + world_free(r->worlds[i]); + } + } + g_free(r->fp_ports_peers); +} + +static void rocker_reset(DeviceState *dev) +{ + Rocker *r = ROCKER(dev); + int i; + + for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) { + if (r->worlds[i]) { + world_reset(r->worlds[i]); + } + } + for (i = 0; i < r->fp_ports; i++) { + fp_port_reset(r->fp_port[i]); + fp_port_set_world(r->fp_port[i], r->world_dflt); + } + + r->test_reg = 0; + r->test_reg64 = 0; + r->test_dma_addr = 0; + r->test_dma_size = 0; + + for (i = 0; i < rocker_pci_ring_count(r); i++) { + desc_ring_reset(r->rings[i]); + } + + DPRINTF("Reset done\n"); +} + +static Property rocker_properties[] = { + DEFINE_PROP_STRING("name", Rocker, name), + DEFINE_PROP_STRING("world", Rocker, world_name), + DEFINE_PROP_MACADDR("fp_start_macaddr", Rocker, + fp_start_macaddr), + DEFINE_PROP_UINT64("switch_id", Rocker, + switch_id, 0), + DEFINE_PROP_ARRAY("ports", Rocker, fp_ports, + fp_ports_peers, qdev_prop_netdev, NICPeers), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription rocker_vmsd = { + .name = TYPE_ROCKER, + .unmigratable = 1, +}; + +static void rocker_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_rocker_realize; + k->exit = pci_rocker_uninit; + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_ROCKER; + k->revision = ROCKER_PCI_REVISION; + k->class_id = PCI_CLASS_NETWORK_OTHER; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->desc = "Rocker Switch"; + dc->reset = rocker_reset; + device_class_set_props(dc, rocker_properties); + dc->vmsd = &rocker_vmsd; +} + +static const TypeInfo rocker_info = { + .name = TYPE_ROCKER, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(Rocker), + .class_init = rocker_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void rocker_register_types(void) +{ + type_register_static(&rocker_info); +} + +type_init(rocker_register_types) diff --git a/hw/net/rocker/rocker.h b/hw/net/rocker/rocker.h new file mode 100644 index 000000000..412fa44d0 --- /dev/null +++ b/hw/net/rocker/rocker.h @@ -0,0 +1,84 @@ +/* + * QEMU rocker switch emulation + * + * Copyright (c) 2014 Scott Feldman + * Copyright (c) 2014 Jiri Pirko + * Copyright (c) 2014 Neil Horman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ROCKER_H +#define ROCKER_H + +#include "qemu/sockets.h" +#include "qom/object.h" + +#if defined(DEBUG_ROCKER) +# define DPRINTF(fmt, ...) \ + do { \ + g_autoptr(GDateTime) now = g_date_time_new_now_local(); \ + g_autofree char *nowstr = g_date_time_format(now, "%T.%f");\ + fprintf(stderr, "%s ROCKER: " fmt, nowstr, ## __VA_ARGS__);\ + } while (0) +#else +static inline GCC_FMT_ATTR(1, 2) int DPRINTF(const char *fmt, ...) +{ + return 0; +} +#endif + +#define __le16 uint16_t +#define __le32 uint32_t +#define __le64 uint64_t + +#define __be16 uint16_t +#define __be32 uint32_t +#define __be64 uint64_t + +static inline bool ipv4_addr_is_multicast(__be32 addr) +{ + return (addr & htonl(0xf0000000)) == htonl(0xe0000000); +} + +typedef struct ipv6_addr { + union { + uint8_t addr8[16]; + __be16 addr16[8]; + __be32 addr32[4]; + }; +} Ipv6Addr; + +static inline bool ipv6_addr_is_multicast(const Ipv6Addr *addr) +{ + return (addr->addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000); +} + +typedef struct world World; +typedef struct desc_info DescInfo; +typedef struct desc_ring DescRing; + +#define TYPE_ROCKER "rocker" +typedef struct rocker Rocker; +DECLARE_INSTANCE_CHECKER(Rocker, ROCKER, + TYPE_ROCKER) + +Rocker *rocker_find(const char *name); +uint32_t rocker_fp_ports(Rocker *r); +int rocker_event_link_changed(Rocker *r, uint32_t pport, bool link_up); +int rocker_event_mac_vlan_seen(Rocker *r, uint32_t pport, uint8_t *addr, + uint16_t vlan_id); +int rx_produce(World *world, uint32_t pport, + const struct iovec *iov, int iovcnt, uint8_t copy_to_cpu); +int rocker_port_eg(Rocker *r, uint32_t pport, + const struct iovec *iov, int iovcnt); + +#endif /* ROCKER_H */ diff --git a/hw/net/rocker/rocker_desc.c b/hw/net/rocker/rocker_desc.c new file mode 100644 index 000000000..01845f115 --- /dev/null +++ b/hw/net/rocker/rocker_desc.c @@ -0,0 +1,361 @@ +/* + * QEMU rocker switch emulation - Descriptor ring support + * + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "net/net.h" +#include "hw/pci/pci.h" + +#include "rocker.h" +#include "rocker_hw.h" +#include "rocker_desc.h" + +struct desc_ring { + hwaddr base_addr; + uint32_t size; + uint32_t head; + uint32_t tail; + uint32_t ctrl; + uint32_t credits; + Rocker *r; + DescInfo *info; + int index; + desc_ring_consume *consume; + unsigned msix_vector; +}; + +struct desc_info { + DescRing *ring; + RockerDesc desc; + char *buf; + size_t buf_size; +}; + +uint16_t desc_buf_size(DescInfo *info) +{ + return le16_to_cpu(info->desc.buf_size); +} + +uint16_t desc_tlv_size(DescInfo *info) +{ + return le16_to_cpu(info->desc.tlv_size); +} + +char *desc_get_buf(DescInfo *info, bool read_only) +{ + PCIDevice *dev = PCI_DEVICE(info->ring->r); + size_t size = read_only ? le16_to_cpu(info->desc.tlv_size) : + le16_to_cpu(info->desc.buf_size); + + if (size > info->buf_size) { + info->buf = g_realloc(info->buf, size); + info->buf_size = size; + } + + pci_dma_read(dev, le64_to_cpu(info->desc.buf_addr), info->buf, size); + + return info->buf; +} + +int desc_set_buf(DescInfo *info, size_t tlv_size) +{ + PCIDevice *dev = PCI_DEVICE(info->ring->r); + + if (tlv_size > info->buf_size) { + DPRINTF("ERROR: trying to write more to desc buf than it " + "can hold buf_size %zu tlv_size %zu\n", + info->buf_size, tlv_size); + return -ROCKER_EMSGSIZE; + } + + info->desc.tlv_size = cpu_to_le16(tlv_size); + pci_dma_write(dev, le64_to_cpu(info->desc.buf_addr), info->buf, tlv_size); + + return ROCKER_OK; +} + +DescRing *desc_get_ring(DescInfo *info) +{ + return info->ring; +} + +int desc_ring_index(DescRing *ring) +{ + return ring->index; +} + +static bool desc_ring_empty(DescRing *ring) +{ + return ring->head == ring->tail; +} + +bool desc_ring_set_base_addr(DescRing *ring, uint64_t base_addr) +{ + if (base_addr & 0x7) { + DPRINTF("ERROR: ring[%d] desc base addr (0x" TARGET_FMT_plx + ") not 8-byte aligned\n", ring->index, base_addr); + return false; + } + + ring->base_addr = base_addr; + + return true; +} + +uint64_t desc_ring_get_base_addr(DescRing *ring) +{ + return ring->base_addr; +} + +bool desc_ring_set_size(DescRing *ring, uint32_t size) +{ + int i; + + if (size < 2 || size > 0x10000 || (size & (size - 1))) { + DPRINTF("ERROR: ring[%d] size (%d) not a power of 2 " + "or in range [2, 64K]\n", ring->index, size); + return false; + } + + for (i = 0; i < ring->size; i++) { + g_free(ring->info[i].buf); + } + + ring->size = size; + ring->head = ring->tail = 0; + + ring->info = g_renew(DescInfo, ring->info, size); + + memset(ring->info, 0, size * sizeof(DescInfo)); + + for (i = 0; i < size; i++) { + ring->info[i].ring = ring; + } + + return true; +} + +uint32_t desc_ring_get_size(DescRing *ring) +{ + return ring->size; +} + +static DescInfo *desc_read(DescRing *ring, uint32_t index) +{ + PCIDevice *dev = PCI_DEVICE(ring->r); + DescInfo *info = &ring->info[index]; + hwaddr addr = ring->base_addr + (sizeof(RockerDesc) * index); + + pci_dma_read(dev, addr, &info->desc, sizeof(info->desc)); + + return info; +} + +static void desc_write(DescRing *ring, uint32_t index) +{ + PCIDevice *dev = PCI_DEVICE(ring->r); + DescInfo *info = &ring->info[index]; + hwaddr addr = ring->base_addr + (sizeof(RockerDesc) * index); + + pci_dma_write(dev, addr, &info->desc, sizeof(info->desc)); +} + +static bool desc_ring_base_addr_check(DescRing *ring) +{ + if (!ring->base_addr) { + DPRINTF("ERROR: ring[%d] not-initialized desc base address!\n", + ring->index); + return false; + } + return true; +} + +static DescInfo *__desc_ring_fetch_desc(DescRing *ring) +{ + return desc_read(ring, ring->tail); +} + +DescInfo *desc_ring_fetch_desc(DescRing *ring) +{ + if (desc_ring_empty(ring) || !desc_ring_base_addr_check(ring)) { + return NULL; + } + + return desc_read(ring, ring->tail); +} + +static bool __desc_ring_post_desc(DescRing *ring, int err) +{ + uint16_t comp_err = 0x8000 | (uint16_t)-err; + DescInfo *info = &ring->info[ring->tail]; + + info->desc.comp_err = cpu_to_le16(comp_err); + desc_write(ring, ring->tail); + ring->tail = (ring->tail + 1) % ring->size; + + /* return true if starting credit count */ + + return ring->credits++ == 0; +} + +bool desc_ring_post_desc(DescRing *ring, int err) +{ + if (desc_ring_empty(ring)) { + DPRINTF("ERROR: ring[%d] trying to post desc to empty ring\n", + ring->index); + return false; + } + + if (!desc_ring_base_addr_check(ring)) { + return false; + } + + return __desc_ring_post_desc(ring, err); +} + +static bool ring_pump(DescRing *ring) +{ + DescInfo *info; + bool primed = false; + int err; + + /* If the ring has a consumer, call consumer for each + * desc starting at tail and stopping when tail reaches + * head (the empty ring condition). + */ + + if (ring->consume) { + while (ring->head != ring->tail) { + info = __desc_ring_fetch_desc(ring); + err = ring->consume(ring->r, info); + if (__desc_ring_post_desc(ring, err)) { + primed = true; + } + } + } + + return primed; +} + +bool desc_ring_set_head(DescRing *ring, uint32_t new) +{ + uint32_t tail = ring->tail; + uint32_t head = ring->head; + + if (!desc_ring_base_addr_check(ring)) { + return false; + } + + if (new >= ring->size) { + DPRINTF("ERROR: trying to set head (%d) past ring[%d] size (%d)\n", + new, ring->index, ring->size); + return false; + } + + if (((head < tail) && ((new >= tail) || (new < head))) || + ((head > tail) && ((new >= tail) && (new < head)))) { + DPRINTF("ERROR: trying to wrap ring[%d] " + "(head %d, tail %d, new head %d)\n", + ring->index, head, tail, new); + return false; + } + + if (new == ring->head) { + DPRINTF("WARNING: setting head (%d) to current head position\n", new); + } + + ring->head = new; + + return ring_pump(ring); +} + +uint32_t desc_ring_get_head(DescRing *ring) +{ + return ring->head; +} + +uint32_t desc_ring_get_tail(DescRing *ring) +{ + return ring->tail; +} + +void desc_ring_set_ctrl(DescRing *ring, uint32_t val) +{ + if (val & ROCKER_DMA_DESC_CTRL_RESET) { + DPRINTF("ring[%d] resetting\n", ring->index); + desc_ring_reset(ring); + } +} + +bool desc_ring_ret_credits(DescRing *ring, uint32_t credits) +{ + if (credits > ring->credits) { + DPRINTF("ERROR: trying to return more credits (%d) " + "than are outstanding (%d)\n", credits, ring->credits); + ring->credits = 0; + return false; + } + + ring->credits -= credits; + + /* return true if credits are still outstanding */ + + return ring->credits > 0; +} + +uint32_t desc_ring_get_credits(DescRing *ring) +{ + return ring->credits; +} + +void desc_ring_set_consume(DescRing *ring, desc_ring_consume *consume, + unsigned vector) +{ + ring->consume = consume; + ring->msix_vector = vector; +} + +unsigned desc_ring_get_msix_vector(DescRing *ring) +{ + return ring->msix_vector; +} + +DescRing *desc_ring_alloc(Rocker *r, int index) +{ + DescRing *ring; + + ring = g_new0(DescRing, 1); + + ring->r = r; + ring->index = index; + + return ring; +} + +void desc_ring_free(DescRing *ring) +{ + g_free(ring->info); + g_free(ring); +} + +void desc_ring_reset(DescRing *ring) +{ + ring->base_addr = 0; + ring->size = 0; + ring->head = 0; + ring->tail = 0; + ring->ctrl = 0; + ring->credits = 0; +} diff --git a/hw/net/rocker/rocker_desc.h b/hw/net/rocker/rocker_desc.h new file mode 100644 index 000000000..1dec33561 --- /dev/null +++ b/hw/net/rocker/rocker_desc.h @@ -0,0 +1,52 @@ +/* + * QEMU rocker switch emulation - Descriptor ring support + * + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ROCKER_DESC_H +#define ROCKER_DESC_H + +#include "rocker_hw.h" + +typedef int (desc_ring_consume)(Rocker *r, DescInfo *info); + +uint16_t desc_buf_size(DescInfo *info); +uint16_t desc_tlv_size(DescInfo *info); +char *desc_get_buf(DescInfo *info, bool read_only); +int desc_set_buf(DescInfo *info, size_t tlv_size); +DescRing *desc_get_ring(DescInfo *info); + +int desc_ring_index(DescRing *ring); +bool desc_ring_set_base_addr(DescRing *ring, uint64_t base_addr); +uint64_t desc_ring_get_base_addr(DescRing *ring); +bool desc_ring_set_size(DescRing *ring, uint32_t size); +uint32_t desc_ring_get_size(DescRing *ring); +bool desc_ring_set_head(DescRing *ring, uint32_t new); +uint32_t desc_ring_get_head(DescRing *ring); +uint32_t desc_ring_get_tail(DescRing *ring); +void desc_ring_set_ctrl(DescRing *ring, uint32_t val); +bool desc_ring_ret_credits(DescRing *ring, uint32_t credits); +uint32_t desc_ring_get_credits(DescRing *ring); + +DescInfo *desc_ring_fetch_desc(DescRing *ring); +bool desc_ring_post_desc(DescRing *ring, int status); + +void desc_ring_set_consume(DescRing *ring, desc_ring_consume *consume, + unsigned vector); +unsigned desc_ring_get_msix_vector(DescRing *ring); +DescRing *desc_ring_alloc(Rocker *r, int index); +void desc_ring_free(DescRing *ring); +void desc_ring_reset(DescRing *ring); + +#endif diff --git a/hw/net/rocker/rocker_fp.c b/hw/net/rocker/rocker_fp.c new file mode 100644 index 000000000..cbeed65bd --- /dev/null +++ b/hw/net/rocker/rocker_fp.c @@ -0,0 +1,267 @@ +/* + * QEMU rocker switch emulation - front-panel ports + * + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "qapi/qapi-types-rocker.h" +#include "rocker.h" +#include "rocker_hw.h" +#include "rocker_fp.h" +#include "rocker_world.h" + +enum duplex { + DUPLEX_HALF = 0, + DUPLEX_FULL +}; + +struct fp_port { + Rocker *r; + World *world; + unsigned int index; + char *name; + uint32_t pport; + bool enabled; + uint32_t speed; + uint8_t duplex; + uint8_t autoneg; + uint8_t learning; + NICState *nic; + NICConf conf; +}; + +char *fp_port_get_name(FpPort *port) +{ + return port->name; +} + +bool fp_port_get_link_up(FpPort *port) +{ + return !qemu_get_queue(port->nic)->link_down; +} + +RockerPort *fp_port_get_info(FpPort *port) +{ + RockerPort *value = g_malloc0(sizeof(*value)); + + value->name = g_strdup(port->name); + value->enabled = port->enabled; + value->link_up = fp_port_get_link_up(port); + value->speed = port->speed; + value->duplex = port->duplex; + value->autoneg = port->autoneg; + return value; +} + +void fp_port_get_macaddr(FpPort *port, MACAddr *macaddr) +{ + memcpy(macaddr->a, port->conf.macaddr.a, sizeof(macaddr->a)); +} + +void fp_port_set_macaddr(FpPort *port, MACAddr *macaddr) +{ +/* XXX TODO implement and test setting mac addr + * XXX memcpy(port->conf.macaddr.a, macaddr.a, sizeof(port->conf.macaddr.a)); + */ +} + +uint8_t fp_port_get_learning(FpPort *port) +{ + return port->learning; +} + +void fp_port_set_learning(FpPort *port, uint8_t learning) +{ + port->learning = learning; +} + +int fp_port_get_settings(FpPort *port, uint32_t *speed, + uint8_t *duplex, uint8_t *autoneg) +{ + *speed = port->speed; + *duplex = port->duplex; + *autoneg = port->autoneg; + + return ROCKER_OK; +} + +int fp_port_set_settings(FpPort *port, uint32_t speed, + uint8_t duplex, uint8_t autoneg) +{ + /* XXX validate inputs */ + + port->speed = speed; + port->duplex = duplex; + port->autoneg = autoneg; + + return ROCKER_OK; +} + +bool fp_port_from_pport(uint32_t pport, uint32_t *port) +{ + if (pport < 1 || pport > ROCKER_FP_PORTS_MAX) { + return false; + } + *port = pport - 1; + return true; +} + +int fp_port_eg(FpPort *port, const struct iovec *iov, int iovcnt) +{ + NetClientState *nc = qemu_get_queue(port->nic); + + if (port->enabled) { + qemu_sendv_packet(nc, iov, iovcnt); + } + + return ROCKER_OK; +} + +static ssize_t fp_port_receive_iov(NetClientState *nc, const struct iovec *iov, + int iovcnt) +{ + FpPort *port = qemu_get_nic_opaque(nc); + + /* If the port is disabled, we want to drop this pkt + * now rather than queing it for later. We don't want + * any stale pkts getting into the device when the port + * transitions to enabled. + */ + + if (!port->enabled) { + return -1; + } + + return world_ingress(port->world, port->pport, iov, iovcnt); +} + +static ssize_t fp_port_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + const struct iovec iov = { + .iov_base = (uint8_t *)buf, + .iov_len = size + }; + + return fp_port_receive_iov(nc, &iov, 1); +} + +static void fp_port_cleanup(NetClientState *nc) +{ +} + +static void fp_port_set_link_status(NetClientState *nc) +{ + FpPort *port = qemu_get_nic_opaque(nc); + + rocker_event_link_changed(port->r, port->pport, !nc->link_down); +} + +static NetClientInfo fp_port_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = fp_port_receive, + .receive_iov = fp_port_receive_iov, + .cleanup = fp_port_cleanup, + .link_status_changed = fp_port_set_link_status, +}; + +World *fp_port_get_world(FpPort *port) +{ + return port->world; +} + +void fp_port_set_world(FpPort *port, World *world) +{ + DPRINTF("port %d setting world \"%s\"\n", port->index, world_name(world)); + port->world = world; +} + +bool fp_port_check_world(FpPort *port, World *world) +{ + return port->world == world; +} + +bool fp_port_enabled(FpPort *port) +{ + return port->enabled; +} + +static void fp_port_set_link(FpPort *port, bool up) +{ + NetClientState *nc = qemu_get_queue(port->nic); + + if (up == nc->link_down) { + nc->link_down = !up; + nc->info->link_status_changed(nc); + } +} + +void fp_port_enable(FpPort *port) +{ + fp_port_set_link(port, true); + port->enabled = true; + DPRINTF("port %d enabled\n", port->index); +} + +void fp_port_disable(FpPort *port) +{ + port->enabled = false; + fp_port_set_link(port, false); + DPRINTF("port %d disabled\n", port->index); +} + +FpPort *fp_port_alloc(Rocker *r, char *sw_name, + MACAddr *start_mac, unsigned int index, + NICPeers *peers) +{ + FpPort *port = g_new0(FpPort, 1); + + port->r = r; + port->index = index; + port->pport = index + 1; + + /* front-panel switch port names are 1-based */ + + port->name = g_strdup_printf("%sp%d", sw_name, port->pport); + + memcpy(port->conf.macaddr.a, start_mac, sizeof(port->conf.macaddr.a)); + port->conf.macaddr.a[5] += index; + port->conf.bootindex = -1; + port->conf.peers = *peers; + + port->nic = qemu_new_nic(&fp_port_info, &port->conf, + sw_name, NULL, port); + qemu_format_nic_info_str(qemu_get_queue(port->nic), + port->conf.macaddr.a); + + fp_port_reset(port); + + return port; +} + +void fp_port_free(FpPort *port) +{ + qemu_del_nic(port->nic); + g_free(port->name); + g_free(port); +} + +void fp_port_reset(FpPort *port) +{ + fp_port_disable(port); + port->speed = 10000; /* 10Gbps */ + port->duplex = DUPLEX_FULL; + port->autoneg = 0; +} diff --git a/hw/net/rocker/rocker_fp.h b/hw/net/rocker/rocker_fp.h new file mode 100644 index 000000000..7ff57aac0 --- /dev/null +++ b/hw/net/rocker/rocker_fp.h @@ -0,0 +1,54 @@ +/* + * QEMU rocker switch emulation - front-panel ports + * + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ROCKER_FP_H +#define ROCKER_FP_H + +#include "net/net.h" +#include "qemu/iov.h" + +#define ROCKER_FP_PORTS_MAX 62 + +typedef struct fp_port FpPort; + +int fp_port_eg(FpPort *port, const struct iovec *iov, int iovcnt); + +char *fp_port_get_name(FpPort *port); +bool fp_port_get_link_up(FpPort *port); +RockerPort *fp_port_get_info(FpPort *port); +void fp_port_get_macaddr(FpPort *port, MACAddr *macaddr); +void fp_port_set_macaddr(FpPort *port, MACAddr *macaddr); +uint8_t fp_port_get_learning(FpPort *port); +void fp_port_set_learning(FpPort *port, uint8_t learning); +int fp_port_get_settings(FpPort *port, uint32_t *speed, + uint8_t *duplex, uint8_t *autoneg); +int fp_port_set_settings(FpPort *port, uint32_t speed, + uint8_t duplex, uint8_t autoneg); +bool fp_port_from_pport(uint32_t pport, uint32_t *port); +World *fp_port_get_world(FpPort *port); +void fp_port_set_world(FpPort *port, World *world); +bool fp_port_check_world(FpPort *port, World *world); +bool fp_port_enabled(FpPort *port); +void fp_port_enable(FpPort *port); +void fp_port_disable(FpPort *port); + +FpPort *fp_port_alloc(Rocker *r, char *sw_name, + MACAddr *start_mac, unsigned int index, + NICPeers *peers); +void fp_port_free(FpPort *port); +void fp_port_reset(FpPort *port); + +#endif /* ROCKER_FP_H */ diff --git a/hw/net/rocker/rocker_hw.h b/hw/net/rocker/rocker_hw.h new file mode 100644 index 000000000..1786323fa --- /dev/null +++ b/hw/net/rocker/rocker_hw.h @@ -0,0 +1,493 @@ +/* + * Rocker switch hardware register and descriptor definitions. + * + * Copyright (c) 2014 Scott Feldman + * Copyright (c) 2014 Jiri Pirko + * + */ + +#ifndef ROCKER_HW_H +#define ROCKER_HW_H + +#define __le16 uint16_t +#define __le32 uint32_t +#define __le64 uint64_t + +/* + * Return codes + */ + +enum { + ROCKER_OK = 0, + ROCKER_ENOENT = 2, + ROCKER_ENXIO = 6, + ROCKER_ENOMEM = 12, + ROCKER_EEXIST = 17, + ROCKER_EINVAL = 22, + ROCKER_EMSGSIZE = 90, + ROCKER_ENOTSUP = 95, + ROCKER_ENOBUFS = 105, +}; + +/* + * PCI configuration space + */ + +#define ROCKER_PCI_REVISION 0x1 +#define ROCKER_PCI_BAR0_IDX 0 +#define ROCKER_PCI_BAR0_SIZE 0x2000 +#define ROCKER_PCI_MSIX_BAR_IDX 1 +#define ROCKER_PCI_MSIX_BAR_SIZE 0x2000 +#define ROCKER_PCI_MSIX_TABLE_OFFSET 0x0000 +#define ROCKER_PCI_MSIX_PBA_OFFSET 0x1000 + +/* + * MSI-X vectors + */ + +enum { + ROCKER_MSIX_VEC_CMD, + ROCKER_MSIX_VEC_EVENT, + ROCKER_MSIX_VEC_TEST, + ROCKER_MSIX_VEC_RESERVED0, + __ROCKER_MSIX_VEC_TX, + __ROCKER_MSIX_VEC_RX, +#define ROCKER_MSIX_VEC_TX(port) \ + (__ROCKER_MSIX_VEC_TX + ((port) * 2)) +#define ROCKER_MSIX_VEC_RX(port) \ + (__ROCKER_MSIX_VEC_RX + ((port) * 2)) +#define ROCKER_MSIX_VEC_COUNT(portcnt) \ + (ROCKER_MSIX_VEC_RX((portcnt) - 1) + 1) +}; + +/* + * Rocker bogus registers + */ +#define ROCKER_BOGUS_REG0 0x0000 +#define ROCKER_BOGUS_REG1 0x0004 +#define ROCKER_BOGUS_REG2 0x0008 +#define ROCKER_BOGUS_REG3 0x000c + +/* + * Rocker test registers + */ +#define ROCKER_TEST_REG 0x0010 +#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ +#define ROCKER_TEST_IRQ 0x0020 +#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ +#define ROCKER_TEST_DMA_SIZE 0x0030 +#define ROCKER_TEST_DMA_CTRL 0x0034 + +/* + * Rocker test register ctrl + */ +#define ROCKER_TEST_DMA_CTRL_CLEAR (1 << 0) +#define ROCKER_TEST_DMA_CTRL_FILL (1 << 1) +#define ROCKER_TEST_DMA_CTRL_INVERT (1 << 2) + +/* + * Rocker DMA ring register offsets + */ +#define ROCKER_DMA_DESC_BASE 0x1000 +#define ROCKER_DMA_DESC_SIZE 32 +#define ROCKER_DMA_DESC_MASK 0x1F +#define ROCKER_DMA_DESC_TOTAL_SIZE \ + (ROCKER_DMA_DESC_SIZE * 64) /* 62 ports + event + cmd */ +#define ROCKER_DMA_DESC_ADDR_OFFSET 0x00 /* 8-byte */ +#define ROCKER_DMA_DESC_SIZE_OFFSET 0x08 +#define ROCKER_DMA_DESC_HEAD_OFFSET 0x0c +#define ROCKER_DMA_DESC_TAIL_OFFSET 0x10 +#define ROCKER_DMA_DESC_CTRL_OFFSET 0x14 +#define ROCKER_DMA_DESC_CREDITS_OFFSET 0x18 +#define ROCKER_DMA_DESC_RSVD_OFFSET 0x1c + +/* + * Rocker dma ctrl register bits + */ +#define ROCKER_DMA_DESC_CTRL_RESET (1 << 0) + +/* + * Rocker ring indices + */ +#define ROCKER_RING_CMD 0 +#define ROCKER_RING_EVENT 1 + +/* + * Helper macro to do convert a dma ring register + * to its index. Based on the fact that the register + * group stride is 32 bytes. + */ +#define ROCKER_RING_INDEX(reg) ((reg >> 5) & 0x7F) + +/* + * Rocker DMA Descriptor + */ + +typedef struct rocker_desc { + __le64 buf_addr; + uint64_t cookie; + __le16 buf_size; + __le16 tlv_size; + __le16 rsvd[5]; /* pad to 32 bytes */ + __le16 comp_err; +} __attribute__((packed, aligned(8))) RockerDesc; + +/* + * Rocker TLV type fields + */ + +typedef struct rocker_tlv { + __le32 type; + __le16 len; + __le16 rsvd; +} __attribute__((packed, aligned(8))) RockerTlv; + +/* cmd msg */ +enum { + ROCKER_TLV_CMD_UNSPEC, + ROCKER_TLV_CMD_TYPE, /* u16 */ + ROCKER_TLV_CMD_INFO, /* nest */ + + __ROCKER_TLV_CMD_MAX, + ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_TYPE_UNSPEC, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, + + __ROCKER_TLV_CMD_TYPE_MAX, + ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, +}; + +/* cmd info nested for set/get port settings */ +enum { + ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, + ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ + + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + ROCKER_TLV_CMD_PORT_SETTINGS_MAX = __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, +}; + +enum { + ROCKER_PORT_MODE_OF_DPA, +}; + +/* event msg */ +enum { + ROCKER_TLV_EVENT_UNSPEC, + ROCKER_TLV_EVENT_TYPE, /* u16 */ + ROCKER_TLV_EVENT_INFO, /* nest */ + + __ROCKER_TLV_EVENT_MAX, + ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_TYPE_UNSPEC, + ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, + ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, + + __ROCKER_TLV_EVENT_TYPE_MAX, + ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, +}; + +/* event info nested for link changed */ +enum { + ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, + ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ + + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, + ROCKER_TLV_EVENT_LINK_CHANGED_MAX = __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, +}; + +/* event info nested for MAC/VLAN */ +enum { + ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, + ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ + ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ + + __ROCKER_TLV_EVENT_MAC_VLAN_MAX, + ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, +}; + +/* Rx msg */ +enum { + ROCKER_TLV_RX_UNSPEC, + ROCKER_TLV_RX_FLAGS, /* u16, see RX_FLAGS_ */ + ROCKER_TLV_RX_CSUM, /* u16 */ + ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ + ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ + ROCKER_TLV_RX_FRAG_LEN, /* u16 */ + + __ROCKER_TLV_RX_MAX, + ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, +}; + +#define ROCKER_RX_FLAGS_IPV4 (1 << 0) +#define ROCKER_RX_FLAGS_IPV6 (1 << 1) +#define ROCKER_RX_FLAGS_CSUM_CALC (1 << 2) +#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD (1 << 3) +#define ROCKER_RX_FLAGS_IP_FRAG (1 << 4) +#define ROCKER_RX_FLAGS_TCP (1 << 5) +#define ROCKER_RX_FLAGS_UDP (1 << 6) +#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD (1 << 7) +#define ROCKER_RX_FLAGS_FWD_OFFLOAD (1 << 8) + +/* Tx msg */ +enum { + ROCKER_TLV_TX_UNSPEC, + ROCKER_TLV_TX_OFFLOAD, /* u8, see TX_OFFLOAD_ */ + ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ + ROCKER_TLV_TX_TSO_MSS, /* u16 */ + ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ + ROCKER_TLV_TX_FRAGS, /* array */ + + __ROCKER_TLV_TX_MAX, + ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, +}; + +#define ROCKER_TX_OFFLOAD_NONE 0 +#define ROCKER_TX_OFFLOAD_IP_CSUM 1 +#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 +#define ROCKER_TX_OFFLOAD_L3_CSUM 3 +#define ROCKER_TX_OFFLOAD_TSO 4 + +#define ROCKER_TX_FRAGS_MAX 16 + +enum { + ROCKER_TLV_TX_FRAG_UNSPEC, + ROCKER_TLV_TX_FRAG, /* nest */ + + __ROCKER_TLV_TX_FRAG_MAX, + ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, +}; + +enum { + ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, + ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ + ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ + + __ROCKER_TLV_TX_FRAG_ATTR_MAX, + ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, +}; + +/* + * cmd info nested for OF-DPA msgs + */ + +enum { + ROCKER_TLV_OF_DPA_UNSPEC, + ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ + ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ + ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ + ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ + ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ + ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ + ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ + ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ + ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ + ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ + ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ + ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ + ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ + ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ + + __ROCKER_TLV_OF_DPA_MAX, + ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, +}; + +/* + * OF-DPA table IDs + */ + +enum rocker_of_dpa_table_id { + ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, + ROCKER_OF_DPA_TABLE_ID_VLAN = 10, + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, + ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, +}; + +/* + * OF-DPA flow stats + */ + +enum { + ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, + ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ + + __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, + ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, +}; + +/* + * OF-DPA group types + */ + +enum rocker_of_dpa_group_type { + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, + ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, + ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, + ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, + ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, + ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, +}; + +/* + * OF-DPA group L2 overlay types + */ + +enum rocker_of_dpa_overlay_type { + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, +}; + +/* + * OF-DPA group ID encoding + */ + +#define ROCKER_GROUP_TYPE_SHIFT 28 +#define ROCKER_GROUP_TYPE_MASK 0xf0000000 +#define ROCKER_GROUP_VLAN_ID_SHIFT 16 +#define ROCKER_GROUP_VLAN_ID_MASK 0x0fff0000 +#define ROCKER_GROUP_PORT_SHIFT 0 +#define ROCKER_GROUP_PORT_MASK 0x0000ffff +#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 +#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 +#define ROCKER_GROUP_SUBTYPE_SHIFT 10 +#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 +#define ROCKER_GROUP_INDEX_SHIFT 0 +#define ROCKER_GROUP_INDEX_MASK 0x0000ffff +#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 +#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff + +#define ROCKER_GROUP_TYPE_GET(group_id) \ + (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) +#define ROCKER_GROUP_TYPE_SET(type) \ + (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) +#define ROCKER_GROUP_VLAN_GET(group_id) \ + (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) +#define ROCKER_GROUP_VLAN_SET(vlan_id) \ + (((vlan_id) << ROCKER_GROUP_VLAN_ID_SHIFT) & ROCKER_GROUP_VLAN_ID_MASK) +#define ROCKER_GROUP_PORT_GET(group_id) \ + (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) +#define ROCKER_GROUP_PORT_SET(port) \ + (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) +#define ROCKER_GROUP_INDEX_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) +#define ROCKER_GROUP_INDEX_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) +#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ + ROCKER_GROUP_INDEX_LONG_SHIFT) +#define ROCKER_GROUP_INDEX_LONG_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ + ROCKER_GROUP_INDEX_LONG_MASK) + +#define ROCKER_GROUP_NONE 0 +#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) +#define ROCKER_GROUP_L2_REWRITE(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) +#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L3_UNICAST(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) + +/* + * Rocker general purpose registers + */ +#define ROCKER_CONTROL 0x0300 +#define ROCKER_PORT_PHYS_COUNT 0x0304 +#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ +#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ +#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ + +/* + * Rocker control bits + */ +#define ROCKER_CONTROL_RESET (1 << 0) + +#endif /* ROCKER_HW_H */ diff --git a/hw/net/rocker/rocker_of_dpa.c b/hw/net/rocker/rocker_of_dpa.c new file mode 100644 index 000000000..b3b8c5bb6 --- /dev/null +++ b/hw/net/rocker/rocker_of_dpa.c @@ -0,0 +1,2597 @@ +/* + * QEMU rocker switch emulation - OF-DPA flow processing support + * + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "net/eth.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-rocker.h" +#include "qemu/iov.h" +#include "qemu/timer.h" + +#include "rocker.h" +#include "rocker_hw.h" +#include "rocker_fp.h" +#include "rocker_tlv.h" +#include "rocker_world.h" +#include "rocker_desc.h" +#include "rocker_of_dpa.h" + +static const MACAddr zero_mac = { .a = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }; +static const MACAddr ff_mac = { .a = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } }; + +typedef struct of_dpa { + World *world; + GHashTable *flow_tbl; + GHashTable *group_tbl; + unsigned int flow_tbl_max_size; + unsigned int group_tbl_max_size; +} OfDpa; + +/* flow_key stolen mostly from OVS + * + * Note: fields that compare with network packet header fields + * are stored in network order (BE) to avoid per-packet field + * byte-swaps. + */ + +typedef struct of_dpa_flow_key { + uint32_t in_pport; /* ingress port */ + uint32_t tunnel_id; /* overlay tunnel id */ + uint32_t tbl_id; /* table id */ + struct { + __be16 vlan_id; /* 0 if no VLAN */ + MACAddr src; /* ethernet source address */ + MACAddr dst; /* ethernet destination address */ + __be16 type; /* ethernet frame type */ + } eth; + struct { + uint8_t proto; /* IP protocol or ARP opcode */ + uint8_t tos; /* IP ToS */ + uint8_t ttl; /* IP TTL/hop limit */ + uint8_t frag; /* one of FRAG_TYPE_* */ + } ip; + union { + struct { + struct { + __be32 src; /* IP source address */ + __be32 dst; /* IP destination address */ + } addr; + union { + struct { + __be16 src; /* TCP/UDP/SCTP source port */ + __be16 dst; /* TCP/UDP/SCTP destination port */ + __be16 flags; /* TCP flags */ + } tp; + struct { + MACAddr sha; /* ARP source hardware address */ + MACAddr tha; /* ARP target hardware address */ + } arp; + }; + } ipv4; + struct { + struct { + Ipv6Addr src; /* IPv6 source address */ + Ipv6Addr dst; /* IPv6 destination address */ + } addr; + __be32 label; /* IPv6 flow label */ + struct { + __be16 src; /* TCP/UDP/SCTP source port */ + __be16 dst; /* TCP/UDP/SCTP destination port */ + __be16 flags; /* TCP flags */ + } tp; + struct { + Ipv6Addr target; /* ND target address */ + MACAddr sll; /* ND source link layer address */ + MACAddr tll; /* ND target link layer address */ + } nd; + } ipv6; + }; + int width; /* how many uint64_t's in key? */ +} OfDpaFlowKey; + +/* Width of key which includes field 'f' in u64s, rounded up */ +#define FLOW_KEY_WIDTH(f) \ + DIV_ROUND_UP(offsetof(OfDpaFlowKey, f) + sizeof_field(OfDpaFlowKey, f), \ + sizeof(uint64_t)) + +typedef struct of_dpa_flow_action { + uint32_t goto_tbl; + struct { + uint32_t group_id; + uint32_t tun_log_lport; + __be16 vlan_id; + } write; + struct { + __be16 new_vlan_id; + uint32_t out_pport; + uint8_t copy_to_cpu; + __be16 vlan_id; + } apply; +} OfDpaFlowAction; + +typedef struct of_dpa_flow { + uint32_t lpm; + uint32_t priority; + uint32_t hardtime; + uint32_t idletime; + uint64_t cookie; + OfDpaFlowKey key; + OfDpaFlowKey mask; + OfDpaFlowAction action; + struct { + uint64_t hits; + int64_t install_time; + int64_t refresh_time; + uint64_t rx_pkts; + uint64_t tx_pkts; + } stats; +} OfDpaFlow; + +typedef struct of_dpa_flow_pkt_fields { + uint32_t tunnel_id; + struct eth_header *ethhdr; + __be16 *h_proto; + struct vlan_header *vlanhdr; + struct ip_header *ipv4hdr; + struct ip6_header *ipv6hdr; + Ipv6Addr *ipv6_src_addr; + Ipv6Addr *ipv6_dst_addr; +} OfDpaFlowPktFields; + +typedef struct of_dpa_flow_context { + uint32_t in_pport; + uint32_t tunnel_id; + struct iovec *iov; + int iovcnt; + struct eth_header ethhdr_rewrite; + struct vlan_header vlanhdr_rewrite; + struct vlan_header vlanhdr; + OfDpa *of_dpa; + OfDpaFlowPktFields fields; + OfDpaFlowAction action_set; +} OfDpaFlowContext; + +typedef struct of_dpa_flow_match { + OfDpaFlowKey value; + OfDpaFlow *best; +} OfDpaFlowMatch; + +typedef struct of_dpa_group { + uint32_t id; + union { + struct { + uint32_t out_pport; + uint8_t pop_vlan; + } l2_interface; + struct { + uint32_t group_id; + MACAddr src_mac; + MACAddr dst_mac; + __be16 vlan_id; + } l2_rewrite; + struct { + uint16_t group_count; + uint32_t *group_ids; + } l2_flood; + struct { + uint32_t group_id; + MACAddr src_mac; + MACAddr dst_mac; + __be16 vlan_id; + uint8_t ttl_check; + } l3_unicast; + }; +} OfDpaGroup; + +static int of_dpa_mask2prefix(__be32 mask) +{ + int i; + int count = 32; + + for (i = 0; i < 32; i++) { + if (!(ntohl(mask) & ((2 << i) - 1))) { + count--; + } + } + + return count; +} + +#if defined(DEBUG_ROCKER) +static void of_dpa_flow_key_dump(OfDpaFlowKey *key, OfDpaFlowKey *mask) +{ + char buf[512], *b = buf, *mac; + + b += sprintf(b, " tbl %2d", key->tbl_id); + + if (key->in_pport || (mask && mask->in_pport)) { + b += sprintf(b, " in_pport %2d", key->in_pport); + if (mask && mask->in_pport != 0xffffffff) { + b += sprintf(b, "/0x%08x", key->in_pport); + } + } + + if (key->tunnel_id || (mask && mask->tunnel_id)) { + b += sprintf(b, " tun %8d", key->tunnel_id); + if (mask && mask->tunnel_id != 0xffffffff) { + b += sprintf(b, "/0x%08x", key->tunnel_id); + } + } + + if (key->eth.vlan_id || (mask && mask->eth.vlan_id)) { + b += sprintf(b, " vlan %4d", ntohs(key->eth.vlan_id)); + if (mask && mask->eth.vlan_id != 0xffff) { + b += sprintf(b, "/0x%04x", ntohs(key->eth.vlan_id)); + } + } + + if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) || + (mask && memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN))) { + mac = qemu_mac_strdup_printf(key->eth.src.a); + b += sprintf(b, " src %s", mac); + g_free(mac); + if (mask && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) { + mac = qemu_mac_strdup_printf(mask->eth.src.a); + b += sprintf(b, "/%s", mac); + g_free(mac); + } + } + + if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) || + (mask && memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN))) { + mac = qemu_mac_strdup_printf(key->eth.dst.a); + b += sprintf(b, " dst %s", mac); + g_free(mac); + if (mask && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) { + mac = qemu_mac_strdup_printf(mask->eth.dst.a); + b += sprintf(b, "/%s", mac); + g_free(mac); + } + } + + if (key->eth.type || (mask && mask->eth.type)) { + b += sprintf(b, " type 0x%04x", ntohs(key->eth.type)); + if (mask && mask->eth.type != 0xffff) { + b += sprintf(b, "/0x%04x", ntohs(mask->eth.type)); + } + switch (ntohs(key->eth.type)) { + case 0x0800: + case 0x86dd: + if (key->ip.proto || (mask && mask->ip.proto)) { + b += sprintf(b, " ip proto %2d", key->ip.proto); + if (mask && mask->ip.proto != 0xff) { + b += sprintf(b, "/0x%02x", mask->ip.proto); + } + } + if (key->ip.tos || (mask && mask->ip.tos)) { + b += sprintf(b, " ip tos %2d", key->ip.tos); + if (mask && mask->ip.tos != 0xff) { + b += sprintf(b, "/0x%02x", mask->ip.tos); + } + } + break; + } + switch (ntohs(key->eth.type)) { + case 0x0800: + if (key->ipv4.addr.dst || (mask && mask->ipv4.addr.dst)) { + b += sprintf(b, " dst %s", + inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst)); + if (mask) { + b += sprintf(b, "/%d", + of_dpa_mask2prefix(mask->ipv4.addr.dst)); + } + } + break; + } + } + + DPRINTF("%s\n", buf); +} +#else +#define of_dpa_flow_key_dump(k, m) +#endif + +static void _of_dpa_flow_match(void *key, void *value, void *user_data) +{ + OfDpaFlow *flow = value; + OfDpaFlowMatch *match = user_data; + uint64_t *k = (uint64_t *)&flow->key; + uint64_t *m = (uint64_t *)&flow->mask; + uint64_t *v = (uint64_t *)&match->value; + int i; + + if (flow->key.tbl_id == match->value.tbl_id) { + of_dpa_flow_key_dump(&flow->key, &flow->mask); + } + + if (flow->key.width > match->value.width) { + return; + } + + for (i = 0; i < flow->key.width; i++, k++, m++, v++) { + if ((~*k & *m & *v) | (*k & *m & ~*v)) { + return; + } + } + + DPRINTF("match\n"); + + if (!match->best || + flow->priority > match->best->priority || + flow->lpm > match->best->lpm) { + match->best = flow; + } +} + +static OfDpaFlow *of_dpa_flow_match(OfDpa *of_dpa, OfDpaFlowMatch *match) +{ + DPRINTF("\nnew search\n"); + of_dpa_flow_key_dump(&match->value, NULL); + + g_hash_table_foreach(of_dpa->flow_tbl, _of_dpa_flow_match, match); + + return match->best; +} + +static OfDpaFlow *of_dpa_flow_find(OfDpa *of_dpa, uint64_t cookie) +{ + return g_hash_table_lookup(of_dpa->flow_tbl, &cookie); +} + +static int of_dpa_flow_add(OfDpa *of_dpa, OfDpaFlow *flow) +{ + g_hash_table_insert(of_dpa->flow_tbl, &flow->cookie, flow); + + return ROCKER_OK; +} + +static void of_dpa_flow_del(OfDpa *of_dpa, OfDpaFlow *flow) +{ + g_hash_table_remove(of_dpa->flow_tbl, &flow->cookie); +} + +static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie) +{ + OfDpaFlow *flow; + int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000; + + flow = g_new0(OfDpaFlow, 1); + + flow->cookie = cookie; + flow->mask.tbl_id = 0xffffffff; + + flow->stats.install_time = flow->stats.refresh_time = now; + + return flow; +} + +static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext *fc) +{ + OfDpaFlowPktFields *fields = &fc->fields; + + fc->iov[0].iov_base = fields->ethhdr; + fc->iov[0].iov_len = sizeof(struct eth_header); + fc->iov[1].iov_base = fields->vlanhdr; + fc->iov[1].iov_len = fields->vlanhdr ? sizeof(struct vlan_header) : 0; +} + +static void of_dpa_flow_pkt_parse(OfDpaFlowContext *fc, + const struct iovec *iov, int iovcnt) +{ + OfDpaFlowPktFields *fields = &fc->fields; + size_t sofar = 0; + int i; + + sofar += sizeof(struct eth_header); + if (iov->iov_len < sofar) { + DPRINTF("flow_pkt_parse underrun on eth_header\n"); + return; + } + + fields->ethhdr = iov->iov_base; + fields->h_proto = &fields->ethhdr->h_proto; + + if (ntohs(*fields->h_proto) == ETH_P_VLAN) { + sofar += sizeof(struct vlan_header); + if (iov->iov_len < sofar) { + DPRINTF("flow_pkt_parse underrun on vlan_header\n"); + return; + } + fields->vlanhdr = (struct vlan_header *)(fields->ethhdr + 1); + fields->h_proto = &fields->vlanhdr->h_proto; + } + + switch (ntohs(*fields->h_proto)) { + case ETH_P_IP: + sofar += sizeof(struct ip_header); + if (iov->iov_len < sofar) { + DPRINTF("flow_pkt_parse underrun on ip_header\n"); + return; + } + fields->ipv4hdr = (struct ip_header *)(fields->h_proto + 1); + break; + case ETH_P_IPV6: + sofar += sizeof(struct ip6_header); + if (iov->iov_len < sofar) { + DPRINTF("flow_pkt_parse underrun on ip6_header\n"); + return; + } + fields->ipv6hdr = (struct ip6_header *)(fields->h_proto + 1); + break; + } + + /* To facilitate (potential) VLAN tag insertion, Make a + * copy of the iov and insert two new vectors at the + * beginning for eth hdr and vlan hdr. No data is copied, + * just the vectors. + */ + + of_dpa_flow_pkt_hdr_reset(fc); + + fc->iov[2].iov_base = fields->h_proto + 1; + fc->iov[2].iov_len = iov->iov_len - fc->iov[0].iov_len - fc->iov[1].iov_len; + + for (i = 1; i < iovcnt; i++) { + fc->iov[i+2] = iov[i]; + } + + fc->iovcnt = iovcnt + 2; +} + +static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext *fc, __be16 vlan_id) +{ + OfDpaFlowPktFields *fields = &fc->fields; + uint16_t h_proto = fields->ethhdr->h_proto; + + if (fields->vlanhdr) { + DPRINTF("flow_pkt_insert_vlan packet already has vlan\n"); + return; + } + + fields->ethhdr->h_proto = htons(ETH_P_VLAN); + fields->vlanhdr = &fc->vlanhdr; + fields->vlanhdr->h_tci = vlan_id; + fields->vlanhdr->h_proto = h_proto; + fields->h_proto = &fields->vlanhdr->h_proto; + + fc->iov[1].iov_base = fields->vlanhdr; + fc->iov[1].iov_len = sizeof(struct vlan_header); +} + +static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext *fc) +{ + OfDpaFlowPktFields *fields = &fc->fields; + + if (!fields->vlanhdr) { + return; + } + + fc->iov[0].iov_len -= sizeof(fields->ethhdr->h_proto); + fc->iov[1].iov_base = fields->h_proto; + fc->iov[1].iov_len = sizeof(fields->ethhdr->h_proto); +} + +static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext *fc, + uint8_t *src_mac, uint8_t *dst_mac, + __be16 vlan_id) +{ + OfDpaFlowPktFields *fields = &fc->fields; + + if (src_mac || dst_mac) { + memcpy(&fc->ethhdr_rewrite, fields->ethhdr, sizeof(struct eth_header)); + if (src_mac && memcmp(src_mac, zero_mac.a, ETH_ALEN)) { + memcpy(fc->ethhdr_rewrite.h_source, src_mac, ETH_ALEN); + } + if (dst_mac && memcmp(dst_mac, zero_mac.a, ETH_ALEN)) { + memcpy(fc->ethhdr_rewrite.h_dest, dst_mac, ETH_ALEN); + } + fc->iov[0].iov_base = &fc->ethhdr_rewrite; + } + + if (vlan_id && fields->vlanhdr) { + fc->vlanhdr_rewrite = fc->vlanhdr; + fc->vlanhdr_rewrite.h_tci = vlan_id; + fc->iov[1].iov_base = &fc->vlanhdr_rewrite; + } +} + +static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id); + +static void of_dpa_ig_port_build_match(OfDpaFlowContext *fc, + OfDpaFlowMatch *match) +{ + match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; + match->value.in_pport = fc->in_pport; + match->value.width = FLOW_KEY_WIDTH(tbl_id); +} + +static void of_dpa_ig_port_miss(OfDpaFlowContext *fc) +{ + uint32_t port; + + /* The default on miss is for packets from physical ports + * to go to the VLAN Flow Table. There is no default rule + * for packets from logical ports, which are dropped on miss. + */ + + if (fp_port_from_pport(fc->in_pport, &port)) { + of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_VLAN); + } +} + +static void of_dpa_vlan_build_match(OfDpaFlowContext *fc, + OfDpaFlowMatch *match) +{ + match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; + match->value.in_pport = fc->in_pport; + if (fc->fields.vlanhdr) { + match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci; + } + match->value.width = FLOW_KEY_WIDTH(eth.vlan_id); +} + +static void of_dpa_vlan_insert(OfDpaFlowContext *fc, + OfDpaFlow *flow) +{ + if (flow->action.apply.new_vlan_id) { + of_dpa_flow_pkt_insert_vlan(fc, flow->action.apply.new_vlan_id); + } +} + +static void of_dpa_term_mac_build_match(OfDpaFlowContext *fc, + OfDpaFlowMatch *match) +{ + match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + match->value.in_pport = fc->in_pport; + match->value.eth.type = *fc->fields.h_proto; + match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci; + memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest, + sizeof(match->value.eth.dst.a)); + match->value.width = FLOW_KEY_WIDTH(eth.type); +} + +static void of_dpa_term_mac_miss(OfDpaFlowContext *fc) +{ + of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_BRIDGING); +} + +static void of_dpa_apply_actions(OfDpaFlowContext *fc, + OfDpaFlow *flow) +{ + fc->action_set.apply.copy_to_cpu = flow->action.apply.copy_to_cpu; + fc->action_set.apply.vlan_id = flow->key.eth.vlan_id; +} + +static void of_dpa_bridging_build_match(OfDpaFlowContext *fc, + OfDpaFlowMatch *match) +{ + match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; + if (fc->fields.vlanhdr) { + match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci; + } else if (fc->tunnel_id) { + match->value.tunnel_id = fc->tunnel_id; + } + memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest, + sizeof(match->value.eth.dst.a)); + match->value.width = FLOW_KEY_WIDTH(eth.dst); +} + +static void of_dpa_bridging_learn(OfDpaFlowContext *fc, + OfDpaFlow *dst_flow) +{ + OfDpaFlowMatch match = { { 0, }, }; + OfDpaFlow *flow; + uint8_t *addr; + uint16_t vlan_id; + int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000; + int64_t refresh_delay = 1; + + /* Do a lookup in bridge table by src_mac/vlan */ + + addr = fc->fields.ethhdr->h_source; + vlan_id = fc->fields.vlanhdr->h_tci; + + match.value.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; + match.value.eth.vlan_id = vlan_id; + memcpy(match.value.eth.dst.a, addr, sizeof(match.value.eth.dst.a)); + match.value.width = FLOW_KEY_WIDTH(eth.dst); + + flow = of_dpa_flow_match(fc->of_dpa, &match); + if (flow) { + if (!memcmp(flow->mask.eth.dst.a, ff_mac.a, + sizeof(flow->mask.eth.dst.a))) { + /* src_mac/vlan already learned; if in_port and out_port + * don't match, the end station has moved and the port + * needs updating */ + /* XXX implement the in_port/out_port check */ + if (now - flow->stats.refresh_time < refresh_delay) { + return; + } + flow->stats.refresh_time = now; + } + } + + /* Let driver know about mac/vlan. This may be a new mac/vlan + * or a refresh of existing mac/vlan that's been hit after the + * refresh_delay. + */ + + rocker_event_mac_vlan_seen(world_rocker(fc->of_dpa->world), + fc->in_pport, addr, vlan_id); +} + +static void of_dpa_bridging_miss(OfDpaFlowContext *fc) +{ + of_dpa_bridging_learn(fc, NULL); + of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY); +} + +static void of_dpa_bridging_action_write(OfDpaFlowContext *fc, + OfDpaFlow *flow) +{ + if (flow->action.write.group_id != ROCKER_GROUP_NONE) { + fc->action_set.write.group_id = flow->action.write.group_id; + } + fc->action_set.write.tun_log_lport = flow->action.write.tun_log_lport; +} + +static void of_dpa_unicast_routing_build_match(OfDpaFlowContext *fc, + OfDpaFlowMatch *match) +{ + match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + match->value.eth.type = *fc->fields.h_proto; + if (fc->fields.ipv4hdr) { + match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst; + } + if (fc->fields.ipv6_dst_addr) { + memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr, + sizeof(match->value.ipv6.addr.dst)); + } + match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst); +} + +static void of_dpa_unicast_routing_miss(OfDpaFlowContext *fc) +{ + of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY); +} + +static void of_dpa_unicast_routing_action_write(OfDpaFlowContext *fc, + OfDpaFlow *flow) +{ + if (flow->action.write.group_id != ROCKER_GROUP_NONE) { + fc->action_set.write.group_id = flow->action.write.group_id; + } +} + +static void +of_dpa_multicast_routing_build_match(OfDpaFlowContext *fc, + OfDpaFlowMatch *match) +{ + match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; + match->value.eth.type = *fc->fields.h_proto; + match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci; + if (fc->fields.ipv4hdr) { + match->value.ipv4.addr.src = fc->fields.ipv4hdr->ip_src; + match->value.ipv4.addr.dst = fc->fields.ipv4hdr->ip_dst; + } + if (fc->fields.ipv6_src_addr) { + memcpy(&match->value.ipv6.addr.src, fc->fields.ipv6_src_addr, + sizeof(match->value.ipv6.addr.src)); + } + if (fc->fields.ipv6_dst_addr) { + memcpy(&match->value.ipv6.addr.dst, fc->fields.ipv6_dst_addr, + sizeof(match->value.ipv6.addr.dst)); + } + match->value.width = FLOW_KEY_WIDTH(ipv6.addr.dst); +} + +static void of_dpa_multicast_routing_miss(OfDpaFlowContext *fc) +{ + of_dpa_flow_ig_tbl(fc, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY); +} + +static void +of_dpa_multicast_routing_action_write(OfDpaFlowContext *fc, + OfDpaFlow *flow) +{ + if (flow->action.write.group_id != ROCKER_GROUP_NONE) { + fc->action_set.write.group_id = flow->action.write.group_id; + } + fc->action_set.write.vlan_id = flow->action.write.vlan_id; +} + +static void of_dpa_acl_build_match(OfDpaFlowContext *fc, + OfDpaFlowMatch *match) +{ + match->value.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + match->value.in_pport = fc->in_pport; + memcpy(match->value.eth.src.a, fc->fields.ethhdr->h_source, + sizeof(match->value.eth.src.a)); + memcpy(match->value.eth.dst.a, fc->fields.ethhdr->h_dest, + sizeof(match->value.eth.dst.a)); + match->value.eth.type = *fc->fields.h_proto; + match->value.eth.vlan_id = fc->fields.vlanhdr->h_tci; + match->value.width = FLOW_KEY_WIDTH(eth.type); + if (fc->fields.ipv4hdr) { + match->value.ip.proto = fc->fields.ipv4hdr->ip_p; + match->value.ip.tos = fc->fields.ipv4hdr->ip_tos; + match->value.width = FLOW_KEY_WIDTH(ip.tos); + } else if (fc->fields.ipv6hdr) { + match->value.ip.proto = + fc->fields.ipv6hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt; + match->value.ip.tos = 0; /* XXX what goes here? */ + match->value.width = FLOW_KEY_WIDTH(ip.tos); + } +} + +static void of_dpa_eg(OfDpaFlowContext *fc); +static void of_dpa_acl_hit(OfDpaFlowContext *fc, + OfDpaFlow *dst_flow) +{ + of_dpa_eg(fc); +} + +static void of_dpa_acl_action_write(OfDpaFlowContext *fc, + OfDpaFlow *flow) +{ + if (flow->action.write.group_id != ROCKER_GROUP_NONE) { + fc->action_set.write.group_id = flow->action.write.group_id; + } +} + +static void of_dpa_drop(OfDpaFlowContext *fc) +{ + /* drop packet */ +} + +static OfDpaGroup *of_dpa_group_find(OfDpa *of_dpa, + uint32_t group_id) +{ + return g_hash_table_lookup(of_dpa->group_tbl, &group_id); +} + +static int of_dpa_group_add(OfDpa *of_dpa, OfDpaGroup *group) +{ + g_hash_table_insert(of_dpa->group_tbl, &group->id, group); + + return 0; +} + +#if 0 +static int of_dpa_group_mod(OfDpa *of_dpa, OfDpaGroup *group) +{ + OfDpaGroup *old_group = of_dpa_group_find(of_dpa, group->id); + + if (!old_group) { + return -ENOENT; + } + + /* XXX */ + + return 0; +} +#endif + +static int of_dpa_group_del(OfDpa *of_dpa, OfDpaGroup *group) +{ + g_hash_table_remove(of_dpa->group_tbl, &group->id); + + return 0; +} + +#if 0 +static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id) +{ + OfDpaGroup *group = of_dpa_group_find(of_dpa, id); + + if (!group) { + return -ENOENT; + } + + /* XXX get/return stats */ + + return 0; +} +#endif + +static OfDpaGroup *of_dpa_group_alloc(uint32_t id) +{ + OfDpaGroup *group = g_new0(OfDpaGroup, 1); + + group->id = id; + + return group; +} + +static void of_dpa_output_l2_interface(OfDpaFlowContext *fc, + OfDpaGroup *group) +{ + uint8_t copy_to_cpu = fc->action_set.apply.copy_to_cpu; + + if (group->l2_interface.pop_vlan) { + of_dpa_flow_pkt_strip_vlan(fc); + } + + /* Note: By default, and as per the OpenFlow 1.3.1 + * specification, a packet cannot be forwarded back + * to the IN_PORT from which it came in. An action + * bucket that specifies the particular packet's + * egress port is not evaluated. + */ + + if (group->l2_interface.out_pport == 0) { + rx_produce(fc->of_dpa->world, fc->in_pport, fc->iov, fc->iovcnt, + copy_to_cpu); + } else if (group->l2_interface.out_pport != fc->in_pport) { + rocker_port_eg(world_rocker(fc->of_dpa->world), + group->l2_interface.out_pport, + fc->iov, fc->iovcnt); + } +} + +static void of_dpa_output_l2_rewrite(OfDpaFlowContext *fc, + OfDpaGroup *group) +{ + OfDpaGroup *l2_group = + of_dpa_group_find(fc->of_dpa, group->l2_rewrite.group_id); + + if (!l2_group) { + return; + } + + of_dpa_flow_pkt_hdr_rewrite(fc, group->l2_rewrite.src_mac.a, + group->l2_rewrite.dst_mac.a, + group->l2_rewrite.vlan_id); + of_dpa_output_l2_interface(fc, l2_group); +} + +static void of_dpa_output_l2_flood(OfDpaFlowContext *fc, + OfDpaGroup *group) +{ + OfDpaGroup *l2_group; + int i; + + for (i = 0; i < group->l2_flood.group_count; i++) { + of_dpa_flow_pkt_hdr_reset(fc); + l2_group = of_dpa_group_find(fc->of_dpa, group->l2_flood.group_ids[i]); + if (!l2_group) { + continue; + } + switch (ROCKER_GROUP_TYPE_GET(l2_group->id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + of_dpa_output_l2_interface(fc, l2_group); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + of_dpa_output_l2_rewrite(fc, l2_group); + break; + } + } +} + +static void of_dpa_output_l3_unicast(OfDpaFlowContext *fc, OfDpaGroup *group) +{ + OfDpaGroup *l2_group = + of_dpa_group_find(fc->of_dpa, group->l3_unicast.group_id); + + if (!l2_group) { + return; + } + + of_dpa_flow_pkt_hdr_rewrite(fc, group->l3_unicast.src_mac.a, + group->l3_unicast.dst_mac.a, + group->l3_unicast.vlan_id); + /* XXX need ttl_check */ + of_dpa_output_l2_interface(fc, l2_group); +} + +static void of_dpa_eg(OfDpaFlowContext *fc) +{ + OfDpaFlowAction *set = &fc->action_set; + OfDpaGroup *group; + uint32_t group_id; + + /* send a copy of pkt to CPU (controller)? */ + + if (set->apply.copy_to_cpu) { + group_id = ROCKER_GROUP_L2_INTERFACE(set->apply.vlan_id, 0); + group = of_dpa_group_find(fc->of_dpa, group_id); + if (group) { + of_dpa_output_l2_interface(fc, group); + of_dpa_flow_pkt_hdr_reset(fc); + } + } + + /* process group write actions */ + + if (!set->write.group_id) { + return; + } + + group = of_dpa_group_find(fc->of_dpa, set->write.group_id); + if (!group) { + return; + } + + switch (ROCKER_GROUP_TYPE_GET(group->id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + of_dpa_output_l2_interface(fc, group); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + of_dpa_output_l2_rewrite(fc, group); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + of_dpa_output_l2_flood(fc, group); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: + of_dpa_output_l3_unicast(fc, group); + break; + } +} + +typedef struct of_dpa_flow_tbl_ops { + void (*build_match)(OfDpaFlowContext *fc, OfDpaFlowMatch *match); + void (*hit)(OfDpaFlowContext *fc, OfDpaFlow *flow); + void (*miss)(OfDpaFlowContext *fc); + void (*hit_no_goto)(OfDpaFlowContext *fc); + void (*action_apply)(OfDpaFlowContext *fc, OfDpaFlow *flow); + void (*action_write)(OfDpaFlowContext *fc, OfDpaFlow *flow); +} OfDpaFlowTblOps; + +static OfDpaFlowTblOps of_dpa_tbl_ops[] = { + [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT] = { + .build_match = of_dpa_ig_port_build_match, + .miss = of_dpa_ig_port_miss, + .hit_no_goto = of_dpa_drop, + }, + [ROCKER_OF_DPA_TABLE_ID_VLAN] = { + .build_match = of_dpa_vlan_build_match, + .hit_no_goto = of_dpa_drop, + .action_apply = of_dpa_vlan_insert, + }, + [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC] = { + .build_match = of_dpa_term_mac_build_match, + .miss = of_dpa_term_mac_miss, + .hit_no_goto = of_dpa_drop, + .action_apply = of_dpa_apply_actions, + }, + [ROCKER_OF_DPA_TABLE_ID_BRIDGING] = { + .build_match = of_dpa_bridging_build_match, + .hit = of_dpa_bridging_learn, + .miss = of_dpa_bridging_miss, + .hit_no_goto = of_dpa_drop, + .action_apply = of_dpa_apply_actions, + .action_write = of_dpa_bridging_action_write, + }, + [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING] = { + .build_match = of_dpa_unicast_routing_build_match, + .miss = of_dpa_unicast_routing_miss, + .hit_no_goto = of_dpa_drop, + .action_write = of_dpa_unicast_routing_action_write, + }, + [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING] = { + .build_match = of_dpa_multicast_routing_build_match, + .miss = of_dpa_multicast_routing_miss, + .hit_no_goto = of_dpa_drop, + .action_write = of_dpa_multicast_routing_action_write, + }, + [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY] = { + .build_match = of_dpa_acl_build_match, + .hit = of_dpa_acl_hit, + .miss = of_dpa_eg, + .action_apply = of_dpa_apply_actions, + .action_write = of_dpa_acl_action_write, + }, +}; + +static void of_dpa_flow_ig_tbl(OfDpaFlowContext *fc, uint32_t tbl_id) +{ + OfDpaFlowTblOps *ops = &of_dpa_tbl_ops[tbl_id]; + OfDpaFlowMatch match = { { 0, }, }; + OfDpaFlow *flow; + + if (ops->build_match) { + ops->build_match(fc, &match); + } else { + return; + } + + flow = of_dpa_flow_match(fc->of_dpa, &match); + if (!flow) { + if (ops->miss) { + ops->miss(fc); + } + return; + } + + flow->stats.hits++; + + if (ops->action_apply) { + ops->action_apply(fc, flow); + } + + if (ops->action_write) { + ops->action_write(fc, flow); + } + + if (ops->hit) { + ops->hit(fc, flow); + } + + if (flow->action.goto_tbl) { + of_dpa_flow_ig_tbl(fc, flow->action.goto_tbl); + } else if (ops->hit_no_goto) { + ops->hit_no_goto(fc); + } + + /* drop packet */ +} + +static ssize_t of_dpa_ig(World *world, uint32_t pport, + const struct iovec *iov, int iovcnt) +{ + struct iovec iov_copy[iovcnt + 2]; + OfDpaFlowContext fc = { + .of_dpa = world_private(world), + .in_pport = pport, + .iov = iov_copy, + .iovcnt = iovcnt + 2, + }; + + of_dpa_flow_pkt_parse(&fc, iov, iovcnt); + of_dpa_flow_ig_tbl(&fc, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT); + + return iov_size(iov, iovcnt); +} + +#define ROCKER_TUNNEL_LPORT 0x00010000 + +static int of_dpa_cmd_add_ig_port(OfDpaFlow *flow, RockerTlv **flow_tlvs) +{ + OfDpaFlowKey *key = &flow->key; + OfDpaFlowKey *mask = &flow->mask; + OfDpaFlowAction *action = &flow->action; + bool overlay_tunnel; + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] || + !flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) { + return -ROCKER_EINVAL; + } + + key->tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; + key->width = FLOW_KEY_WIDTH(tbl_id); + + key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]); + if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) { + mask->in_pport = + rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]); + } + + overlay_tunnel = !!(key->in_pport & ROCKER_TUNNEL_LPORT); + + action->goto_tbl = + rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]); + + if (!overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_VLAN) { + return -ROCKER_EINVAL; + } + + if (overlay_tunnel && action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_BRIDGING) { + return -ROCKER_EINVAL; + } + + return ROCKER_OK; +} + +static int of_dpa_cmd_add_vlan(OfDpaFlow *flow, RockerTlv **flow_tlvs) +{ + OfDpaFlowKey *key = &flow->key; + OfDpaFlowKey *mask = &flow->mask; + OfDpaFlowAction *action = &flow->action; + uint32_t port; + bool untagged; + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] || + !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) { + DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n"); + return -ROCKER_EINVAL; + } + + key->tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; + key->width = FLOW_KEY_WIDTH(eth.vlan_id); + + key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]); + if (!fp_port_from_pport(key->in_pport, &port)) { + DPRINTF("in_pport (%d) not a front-panel port\n", key->in_pport); + return -ROCKER_EINVAL; + } + mask->in_pport = 0xffffffff; + + key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]); + + if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) { + mask->eth.vlan_id = + rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]); + } + + if (key->eth.vlan_id) { + untagged = false; /* filtering */ + } else { + untagged = true; + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) { + action->goto_tbl = + rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]); + if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) { + DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action->goto_tbl); + return -ROCKER_EINVAL; + } + } + + if (untagged) { + if (!flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]) { + DPRINTF("Must specify new vlan_id if untagged\n"); + return -ROCKER_EINVAL; + } + action->apply.new_vlan_id = + rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_NEW_VLAN_ID]); + if (1 > ntohs(action->apply.new_vlan_id) || + ntohs(action->apply.new_vlan_id) > 4095) { + DPRINTF("New vlan_id (%d) must be between 1 and 4095\n", + ntohs(action->apply.new_vlan_id)); + return -ROCKER_EINVAL; + } + } + + return ROCKER_OK; +} + +static int of_dpa_cmd_add_term_mac(OfDpaFlow *flow, RockerTlv **flow_tlvs) +{ + OfDpaFlowKey *key = &flow->key; + OfDpaFlowKey *mask = &flow->mask; + OfDpaFlowAction *action = &flow->action; + const MACAddr ipv4_mcast = { .a = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } }; + const MACAddr ipv4_mask = { .a = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } }; + const MACAddr ipv6_mcast = { .a = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } }; + const MACAddr ipv6_mask = { .a = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } }; + uint32_t port; + bool unicast = false; + bool multicast = false; + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] || + !flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK] || + !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] || + !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC] || + !flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK] || + !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] || + !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) { + return -ROCKER_EINVAL; + } + + key->tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + key->width = FLOW_KEY_WIDTH(eth.type); + + key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]); + if (!fp_port_from_pport(key->in_pport, &port)) { + return -ROCKER_EINVAL; + } + mask->in_pport = + rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]); + + key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]); + if (key->eth.type != htons(0x0800) && key->eth.type != htons(0x86dd)) { + return -ROCKER_EINVAL; + } + mask->eth.type = htons(0xffff); + + memcpy(key->eth.dst.a, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]), + sizeof(key->eth.dst.a)); + memcpy(mask->eth.dst.a, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]), + sizeof(mask->eth.dst.a)); + + if ((key->eth.dst.a[0] & 0x01) == 0x00) { + unicast = true; + } + + /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */ + if (memcmp(key->eth.dst.a, ipv4_mcast.a, sizeof(key->eth.dst.a)) == 0 && + memcmp(mask->eth.dst.a, ipv4_mask.a, sizeof(mask->eth.dst.a)) == 0) { + multicast = true; + } + if (memcmp(key->eth.dst.a, ipv6_mcast.a, sizeof(key->eth.dst.a)) == 0 && + memcmp(mask->eth.dst.a, ipv6_mask.a, sizeof(mask->eth.dst.a)) == 0) { + multicast = true; + } + + if (!unicast && !multicast) { + return -ROCKER_EINVAL; + } + + key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]); + mask->eth.vlan_id = + rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]); + + if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) { + action->goto_tbl = + rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]); + + if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING && + action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) { + return -ROCKER_EINVAL; + } + + if (unicast && + action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) { + return -ROCKER_EINVAL; + } + + if (multicast && + action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING) { + return -ROCKER_EINVAL; + } + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) { + action->apply.copy_to_cpu = + rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]); + } + + return ROCKER_OK; +} + +static int of_dpa_cmd_add_bridging(OfDpaFlow *flow, RockerTlv **flow_tlvs) +{ + OfDpaFlowKey *key = &flow->key; + OfDpaFlowKey *mask = &flow->mask; + OfDpaFlowAction *action = &flow->action; + bool unicast = false; + bool dst_mac = false; + bool dst_mac_mask = false; + enum { + BRIDGING_MODE_UNKNOWN, + BRIDGING_MODE_VLAN_UCAST, + BRIDGING_MODE_VLAN_MCAST, + BRIDGING_MODE_VLAN_DFLT, + BRIDGING_MODE_TUNNEL_UCAST, + BRIDGING_MODE_TUNNEL_MCAST, + BRIDGING_MODE_TUNNEL_DFLT, + } mode = BRIDGING_MODE_UNKNOWN; + + key->tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; + + if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) { + key->eth.vlan_id = + rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]); + mask->eth.vlan_id = 0xffff; + key->width = FLOW_KEY_WIDTH(eth.vlan_id); + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) { + key->tunnel_id = + rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]); + mask->tunnel_id = 0xffffffff; + key->width = FLOW_KEY_WIDTH(tunnel_id); + } + + /* can't do VLAN bridging and tunnel bridging at same time */ + if (key->eth.vlan_id && key->tunnel_id) { + DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n"); + return -ROCKER_EINVAL; + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) { + memcpy(key->eth.dst.a, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]), + sizeof(key->eth.dst.a)); + key->width = FLOW_KEY_WIDTH(eth.dst); + dst_mac = true; + unicast = (key->eth.dst.a[0] & 0x01) == 0x00; + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) { + memcpy(mask->eth.dst.a, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]), + sizeof(mask->eth.dst.a)); + key->width = FLOW_KEY_WIDTH(eth.dst); + dst_mac_mask = true; + } else if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) { + memcpy(mask->eth.dst.a, ff_mac.a, sizeof(mask->eth.dst.a)); + } + + if (key->eth.vlan_id) { + if (dst_mac && !dst_mac_mask) { + mode = unicast ? BRIDGING_MODE_VLAN_UCAST : + BRIDGING_MODE_VLAN_MCAST; + } else if ((dst_mac && dst_mac_mask) || !dst_mac) { + mode = BRIDGING_MODE_VLAN_DFLT; + } + } else if (key->tunnel_id) { + if (dst_mac && !dst_mac_mask) { + mode = unicast ? BRIDGING_MODE_TUNNEL_UCAST : + BRIDGING_MODE_TUNNEL_MCAST; + } else if ((dst_mac && dst_mac_mask) || !dst_mac) { + mode = BRIDGING_MODE_TUNNEL_DFLT; + } + } + + if (mode == BRIDGING_MODE_UNKNOWN) { + DPRINTF("Unknown bridging mode\n"); + return -ROCKER_EINVAL; + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) { + action->goto_tbl = + rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]); + if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) { + DPRINTF("Briding goto tbl must be ACL policy\n"); + return -ROCKER_EINVAL; + } + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) { + action->write.group_id = + rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]); + switch (mode) { + case BRIDGING_MODE_VLAN_UCAST: + if (ROCKER_GROUP_TYPE_GET(action->write.group_id) != + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) { + DPRINTF("Bridging mode vlan ucast needs L2 " + "interface group (0x%08x)\n", + action->write.group_id); + return -ROCKER_EINVAL; + } + break; + case BRIDGING_MODE_VLAN_MCAST: + if (ROCKER_GROUP_TYPE_GET(action->write.group_id) != + ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) { + DPRINTF("Bridging mode vlan mcast needs L2 " + "mcast group (0x%08x)\n", + action->write.group_id); + return -ROCKER_EINVAL; + } + break; + case BRIDGING_MODE_VLAN_DFLT: + if (ROCKER_GROUP_TYPE_GET(action->write.group_id) != + ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) { + DPRINTF("Bridging mode vlan dflt needs L2 " + "flood group (0x%08x)\n", + action->write.group_id); + return -ROCKER_EINVAL; + } + break; + case BRIDGING_MODE_TUNNEL_MCAST: + if (ROCKER_GROUP_TYPE_GET(action->write.group_id) != + ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) { + DPRINTF("Bridging mode tunnel mcast needs L2 " + "overlay group (0x%08x)\n", + action->write.group_id); + return -ROCKER_EINVAL; + } + break; + case BRIDGING_MODE_TUNNEL_DFLT: + if (ROCKER_GROUP_TYPE_GET(action->write.group_id) != + ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY) { + DPRINTF("Bridging mode tunnel dflt needs L2 " + "overlay group (0x%08x)\n", + action->write.group_id); + return -ROCKER_EINVAL; + } + break; + default: + return -ROCKER_EINVAL; + } + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]) { + action->write.tun_log_lport = + rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_LPORT]); + if (mode != BRIDGING_MODE_TUNNEL_UCAST) { + DPRINTF("Have tunnel logical port but not " + "in bridging tunnel mode\n"); + return -ROCKER_EINVAL; + } + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) { + action->apply.copy_to_cpu = + rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]); + } + + return ROCKER_OK; +} + +static int of_dpa_cmd_add_unicast_routing(OfDpaFlow *flow, + RockerTlv **flow_tlvs) +{ + OfDpaFlowKey *key = &flow->key; + OfDpaFlowKey *mask = &flow->mask; + OfDpaFlowAction *action = &flow->action; + enum { + UNICAST_ROUTING_MODE_UNKNOWN, + UNICAST_ROUTING_MODE_IPV4, + UNICAST_ROUTING_MODE_IPV6, + } mode = UNICAST_ROUTING_MODE_UNKNOWN; + uint8_t type; + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) { + return -ROCKER_EINVAL; + } + + key->tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + key->width = FLOW_KEY_WIDTH(ipv6.addr.dst); + + key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]); + switch (ntohs(key->eth.type)) { + case 0x0800: + mode = UNICAST_ROUTING_MODE_IPV4; + break; + case 0x86dd: + mode = UNICAST_ROUTING_MODE_IPV6; + break; + default: + return -ROCKER_EINVAL; + } + mask->eth.type = htons(0xffff); + + switch (mode) { + case UNICAST_ROUTING_MODE_IPV4: + if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) { + return -ROCKER_EINVAL; + } + key->ipv4.addr.dst = + rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]); + if (ipv4_addr_is_multicast(key->ipv4.addr.dst)) { + return -ROCKER_EINVAL; + } + flow->lpm = of_dpa_mask2prefix(htonl(0xffffffff)); + if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]) { + mask->ipv4.addr.dst = + rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP_MASK]); + flow->lpm = of_dpa_mask2prefix(mask->ipv4.addr.dst); + } + break; + case UNICAST_ROUTING_MODE_IPV6: + if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) { + return -ROCKER_EINVAL; + } + memcpy(&key->ipv6.addr.dst, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]), + sizeof(key->ipv6.addr.dst)); + if (ipv6_addr_is_multicast(&key->ipv6.addr.dst)) { + return -ROCKER_EINVAL; + } + if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]) { + memcpy(&mask->ipv6.addr.dst, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6_MASK]), + sizeof(mask->ipv6.addr.dst)); + } + break; + default: + return -ROCKER_EINVAL; + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) { + action->goto_tbl = + rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]); + if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) { + return -ROCKER_EINVAL; + } + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) { + action->write.group_id = + rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]); + type = ROCKER_GROUP_TYPE_GET(action->write.group_id); + if (type != ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE && + type != ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST && + type != ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP) { + return -ROCKER_EINVAL; + } + } + + return ROCKER_OK; +} + +static int of_dpa_cmd_add_multicast_routing(OfDpaFlow *flow, + RockerTlv **flow_tlvs) +{ + OfDpaFlowKey *key = &flow->key; + OfDpaFlowKey *mask = &flow->mask; + OfDpaFlowAction *action = &flow->action; + enum { + MULTICAST_ROUTING_MODE_UNKNOWN, + MULTICAST_ROUTING_MODE_IPV4, + MULTICAST_ROUTING_MODE_IPV6, + } mode = MULTICAST_ROUTING_MODE_UNKNOWN; + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE] || + !flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) { + return -ROCKER_EINVAL; + } + + key->tbl_id = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; + key->width = FLOW_KEY_WIDTH(ipv6.addr.dst); + + key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]); + switch (ntohs(key->eth.type)) { + case 0x0800: + mode = MULTICAST_ROUTING_MODE_IPV4; + break; + case 0x86dd: + mode = MULTICAST_ROUTING_MODE_IPV6; + break; + default: + return -ROCKER_EINVAL; + } + + key->eth.vlan_id = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]); + + switch (mode) { + case MULTICAST_ROUTING_MODE_IPV4: + + if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) { + key->ipv4.addr.src = + rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]); + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]) { + mask->ipv4.addr.src = + rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP_MASK]); + } + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IP]) { + if (mask->ipv4.addr.src != 0) { + return -ROCKER_EINVAL; + } + } + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]) { + return -ROCKER_EINVAL; + } + + key->ipv4.addr.dst = + rocker_tlv_get_u32(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IP]); + if (!ipv4_addr_is_multicast(key->ipv4.addr.dst)) { + return -ROCKER_EINVAL; + } + + break; + + case MULTICAST_ROUTING_MODE_IPV6: + + if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) { + memcpy(&key->ipv6.addr.src, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]), + sizeof(key->ipv6.addr.src)); + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]) { + memcpy(&mask->ipv6.addr.src, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK]), + sizeof(mask->ipv6.addr.src)); + } + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_SRC_IPV6]) { + if (mask->ipv6.addr.src.addr32[0] != 0 && + mask->ipv6.addr.src.addr32[1] != 0 && + mask->ipv6.addr.src.addr32[2] != 0 && + mask->ipv6.addr.src.addr32[3] != 0) { + return -ROCKER_EINVAL; + } + } + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]) { + return -ROCKER_EINVAL; + } + + memcpy(&key->ipv6.addr.dst, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_IPV6]), + sizeof(key->ipv6.addr.dst)); + if (!ipv6_addr_is_multicast(&key->ipv6.addr.dst)) { + return -ROCKER_EINVAL; + } + + break; + + default: + return -ROCKER_EINVAL; + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]) { + action->goto_tbl = + rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID]); + if (action->goto_tbl != ROCKER_OF_DPA_TABLE_ID_ACL_POLICY) { + return -ROCKER_EINVAL; + } + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) { + action->write.group_id = + rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]); + if (ROCKER_GROUP_TYPE_GET(action->write.group_id) != + ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST) { + return -ROCKER_EINVAL; + } + action->write.vlan_id = key->eth.vlan_id; + } + + return ROCKER_OK; +} + +static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey *key, OfDpaFlowKey *mask, + RockerTlv **flow_tlvs) +{ + key->width = FLOW_KEY_WIDTH(ip.tos); + + key->ip.proto = 0; + key->ip.tos = 0; + mask->ip.proto = 0; + mask->ip.tos = 0; + + if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]) { + key->ip.proto = + rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO]); + } + if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]) { + mask->ip.proto = + rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_PROTO_MASK]); + } + if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]) { + key->ip.tos = + rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP]); + } + if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]) { + mask->ip.tos = + rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_DSCP_MASK]); + } + if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) { + key->ip.tos |= + rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN]) << 6; + } + if (flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) { + mask->ip.tos |= + rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_IP_ECN_MASK]) << 6; + } + + return ROCKER_OK; +} + +static int of_dpa_cmd_add_acl(OfDpaFlow *flow, RockerTlv **flow_tlvs) +{ + OfDpaFlowKey *key = &flow->key; + OfDpaFlowKey *mask = &flow->mask; + OfDpaFlowAction *action = &flow->action; + enum { + ACL_MODE_UNKNOWN, + ACL_MODE_IPV4_VLAN, + ACL_MODE_IPV6_VLAN, + ACL_MODE_IPV4_TENANT, + ACL_MODE_IPV6_TENANT, + ACL_MODE_NON_IP_VLAN, + ACL_MODE_NON_IP_TENANT, + ACL_MODE_ANY_VLAN, + ACL_MODE_ANY_TENANT, + } mode = ACL_MODE_UNKNOWN; + int err = ROCKER_OK; + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT] || + !flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]) { + return -ROCKER_EINVAL; + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID] && + flow_tlvs[ROCKER_TLV_OF_DPA_TUNNEL_ID]) { + return -ROCKER_EINVAL; + } + + key->tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + key->width = FLOW_KEY_WIDTH(eth.type); + + key->in_pport = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT]); + if (flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]) { + mask->in_pport = + rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IN_PPORT_MASK]); + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) { + memcpy(key->eth.src.a, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]), + sizeof(key->eth.src.a)); + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]) { + memcpy(mask->eth.src.a, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC_MASK]), + sizeof(mask->eth.src.a)); + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) { + memcpy(key->eth.dst.a, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]), + sizeof(key->eth.dst.a)); + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]) { + memcpy(mask->eth.dst.a, + rocker_tlv_data(flow_tlvs[ROCKER_TLV_OF_DPA_DST_MAC_MASK]), + sizeof(mask->eth.dst.a)); + } + + key->eth.type = rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_ETHERTYPE]); + if (key->eth.type) { + mask->eth.type = 0xffff; + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) { + key->eth.vlan_id = + rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]); + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]) { + mask->eth.vlan_id = + rocker_tlv_get_u16(flow_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID_MASK]); + } + + switch (ntohs(key->eth.type)) { + case 0x0000: + mode = (key->eth.vlan_id) ? ACL_MODE_ANY_VLAN : ACL_MODE_ANY_TENANT; + break; + case 0x0800: + mode = (key->eth.vlan_id) ? ACL_MODE_IPV4_VLAN : ACL_MODE_IPV4_TENANT; + break; + case 0x86dd: + mode = (key->eth.vlan_id) ? ACL_MODE_IPV6_VLAN : ACL_MODE_IPV6_TENANT; + break; + default: + mode = (key->eth.vlan_id) ? ACL_MODE_NON_IP_VLAN : + ACL_MODE_NON_IP_TENANT; + break; + } + + /* XXX only supporting VLAN modes for now */ + if (mode != ACL_MODE_IPV4_VLAN && + mode != ACL_MODE_IPV6_VLAN && + mode != ACL_MODE_NON_IP_VLAN && + mode != ACL_MODE_ANY_VLAN) { + return -ROCKER_EINVAL; + } + + switch (ntohs(key->eth.type)) { + case 0x0800: + case 0x86dd: + err = of_dpa_cmd_add_acl_ip(key, mask, flow_tlvs); + break; + } + + if (err) { + return err; + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) { + action->write.group_id = + rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]); + } + + if (flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]) { + action->apply.copy_to_cpu = + rocker_tlv_get_u8(flow_tlvs[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION]); + } + + return ROCKER_OK; +} + +static int of_dpa_cmd_flow_add_mod(OfDpa *of_dpa, OfDpaFlow *flow, + RockerTlv **flow_tlvs) +{ + enum rocker_of_dpa_table_id tbl; + int err = ROCKER_OK; + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID] || + !flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY] || + !flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]) { + return -ROCKER_EINVAL; + } + + tbl = rocker_tlv_get_le16(flow_tlvs[ROCKER_TLV_OF_DPA_TABLE_ID]); + flow->priority = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_PRIORITY]); + flow->hardtime = rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_HARDTIME]); + + if (flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]) { + if (tbl == ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT || + tbl == ROCKER_OF_DPA_TABLE_ID_VLAN || + tbl == ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC) { + return -ROCKER_EINVAL; + } + flow->idletime = + rocker_tlv_get_le32(flow_tlvs[ROCKER_TLV_OF_DPA_IDLETIME]); + } + + switch (tbl) { + case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: + err = of_dpa_cmd_add_ig_port(flow, flow_tlvs); + break; + case ROCKER_OF_DPA_TABLE_ID_VLAN: + err = of_dpa_cmd_add_vlan(flow, flow_tlvs); + break; + case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: + err = of_dpa_cmd_add_term_mac(flow, flow_tlvs); + break; + case ROCKER_OF_DPA_TABLE_ID_BRIDGING: + err = of_dpa_cmd_add_bridging(flow, flow_tlvs); + break; + case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: + err = of_dpa_cmd_add_unicast_routing(flow, flow_tlvs); + break; + case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING: + err = of_dpa_cmd_add_multicast_routing(flow, flow_tlvs); + break; + case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: + err = of_dpa_cmd_add_acl(flow, flow_tlvs); + break; + } + + return err; +} + +static int of_dpa_cmd_flow_add(OfDpa *of_dpa, uint64_t cookie, + RockerTlv **flow_tlvs) +{ + OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie); + int err = ROCKER_OK; + + if (flow) { + return -ROCKER_EEXIST; + } + + flow = of_dpa_flow_alloc(cookie); + + err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs); + if (err) { + g_free(flow); + return err; + } + + return of_dpa_flow_add(of_dpa, flow); +} + +static int of_dpa_cmd_flow_mod(OfDpa *of_dpa, uint64_t cookie, + RockerTlv **flow_tlvs) +{ + OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie); + + if (!flow) { + return -ROCKER_ENOENT; + } + + return of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs); +} + +static int of_dpa_cmd_flow_del(OfDpa *of_dpa, uint64_t cookie) +{ + OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie); + + if (!flow) { + return -ROCKER_ENOENT; + } + + of_dpa_flow_del(of_dpa, flow); + + return ROCKER_OK; +} + +static int of_dpa_cmd_flow_get_stats(OfDpa *of_dpa, uint64_t cookie, + struct desc_info *info, char *buf) +{ + OfDpaFlow *flow = of_dpa_flow_find(of_dpa, cookie); + size_t tlv_size; + int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000; + int pos; + + if (!flow) { + return -ROCKER_ENOENT; + } + + tlv_size = rocker_tlv_total_size(sizeof(uint32_t)) + /* duration */ + rocker_tlv_total_size(sizeof(uint64_t)) + /* rx_pkts */ + rocker_tlv_total_size(sizeof(uint64_t)); /* tx_ptks */ + + if (tlv_size > desc_buf_size(info)) { + return -ROCKER_EMSGSIZE; + } + + pos = 0; + rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, + (int32_t)(now - flow->stats.install_time)); + rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, + flow->stats.rx_pkts); + rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, + flow->stats.tx_pkts); + + return desc_set_buf(info, tlv_size); +} + +static int of_dpa_flow_cmd(OfDpa *of_dpa, struct desc_info *info, + char *buf, uint16_t cmd, + RockerTlv **flow_tlvs) +{ + uint64_t cookie; + + if (!flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]) { + return -ROCKER_EINVAL; + } + + cookie = rocker_tlv_get_le64(flow_tlvs[ROCKER_TLV_OF_DPA_COOKIE]); + + switch (cmd) { + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD: + return of_dpa_cmd_flow_add(of_dpa, cookie, flow_tlvs); + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD: + return of_dpa_cmd_flow_mod(of_dpa, cookie, flow_tlvs); + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL: + return of_dpa_cmd_flow_del(of_dpa, cookie); + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS: + return of_dpa_cmd_flow_get_stats(of_dpa, cookie, info, buf); + } + + return -ROCKER_ENOTSUP; +} + +static int of_dpa_cmd_add_l2_interface(OfDpaGroup *group, + RockerTlv **group_tlvs) +{ + if (!group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT] || + !group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]) { + return -ROCKER_EINVAL; + } + + group->l2_interface.out_pport = + rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_OUT_PPORT]); + group->l2_interface.pop_vlan = + rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_POP_VLAN]); + + return ROCKER_OK; +} + +static int of_dpa_cmd_add_l2_rewrite(OfDpa *of_dpa, OfDpaGroup *group, + RockerTlv **group_tlvs) +{ + OfDpaGroup *l2_interface_group; + + if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) { + return -ROCKER_EINVAL; + } + + group->l2_rewrite.group_id = + rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]); + + l2_interface_group = of_dpa_group_find(of_dpa, group->l2_rewrite.group_id); + if (!l2_interface_group || + ROCKER_GROUP_TYPE_GET(l2_interface_group->id) != + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) { + DPRINTF("l2 rewrite group needs a valid l2 interface group\n"); + return -ROCKER_EINVAL; + } + + if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) { + memcpy(group->l2_rewrite.src_mac.a, + rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]), + sizeof(group->l2_rewrite.src_mac.a)); + } + + if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) { + memcpy(group->l2_rewrite.dst_mac.a, + rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]), + sizeof(group->l2_rewrite.dst_mac.a)); + } + + if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) { + group->l2_rewrite.vlan_id = + rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]); + if (ROCKER_GROUP_VLAN_GET(l2_interface_group->id) != + (ntohs(group->l2_rewrite.vlan_id) & VLAN_VID_MASK)) { + DPRINTF("Set VLAN ID must be same as L2 interface group\n"); + return -ROCKER_EINVAL; + } + } + + return ROCKER_OK; +} + +static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group, + RockerTlv **group_tlvs) +{ + OfDpaGroup *l2_group; + RockerTlv **tlvs; + int err; + int i; + + if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT] || + !group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]) { + return -ROCKER_EINVAL; + } + + group->l2_flood.group_count = + rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]); + + tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1); + + g_free(group->l2_flood.group_ids); + group->l2_flood.group_ids = + g_new0(uint32_t, group->l2_flood.group_count); + + rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count, + group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]); + + for (i = 0; i < group->l2_flood.group_count; i++) { + group->l2_flood.group_ids[i] = rocker_tlv_get_le32(tlvs[i + 1]); + } + + /* All of the L2 interface groups referenced by the L2 flood + * must have same VLAN + */ + + for (i = 0; i < group->l2_flood.group_count; i++) { + l2_group = of_dpa_group_find(of_dpa, group->l2_flood.group_ids[i]); + if (!l2_group) { + continue; + } + if ((ROCKER_GROUP_TYPE_GET(l2_group->id) == + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) && + (ROCKER_GROUP_VLAN_GET(l2_group->id) != + ROCKER_GROUP_VLAN_GET(group->id))) { + DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 " + "flood group 0x%08x\n", + group->l2_flood.group_ids[i], group->id); + err = -ROCKER_EINVAL; + goto err_out; + } + } + + g_free(tlvs); + return ROCKER_OK; + +err_out: + group->l2_flood.group_count = 0; + g_free(group->l2_flood.group_ids); + g_free(tlvs); + + return err; +} + +static int of_dpa_cmd_add_l3_unicast(OfDpaGroup *group, RockerTlv **group_tlvs) +{ + if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]) { + return -ROCKER_EINVAL; + } + + group->l3_unicast.group_id = + rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER]); + + if (group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]) { + memcpy(group->l3_unicast.src_mac.a, + rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_SRC_MAC]), + sizeof(group->l3_unicast.src_mac.a)); + } + + if (group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]) { + memcpy(group->l3_unicast.dst_mac.a, + rocker_tlv_data(group_tlvs[ROCKER_TLV_OF_DPA_DST_MAC]), + sizeof(group->l3_unicast.dst_mac.a)); + } + + if (group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]) { + group->l3_unicast.vlan_id = + rocker_tlv_get_u16(group_tlvs[ROCKER_TLV_OF_DPA_VLAN_ID]); + } + + if (group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]) { + group->l3_unicast.ttl_check = + rocker_tlv_get_u8(group_tlvs[ROCKER_TLV_OF_DPA_TTL_CHECK]); + } + + return ROCKER_OK; +} + +static int of_dpa_cmd_group_do(OfDpa *of_dpa, uint32_t group_id, + OfDpaGroup *group, RockerTlv **group_tlvs) +{ + uint8_t type = ROCKER_GROUP_TYPE_GET(group_id); + + switch (type) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + return of_dpa_cmd_add_l2_interface(group, group_tlvs); + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + return of_dpa_cmd_add_l2_rewrite(of_dpa, group, group_tlvs); + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + /* Treat L2 multicast group same as a L2 flood group */ + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + return of_dpa_cmd_add_l2_flood(of_dpa, group, group_tlvs); + case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: + return of_dpa_cmd_add_l3_unicast(group, group_tlvs); + } + + return -ROCKER_ENOTSUP; +} + +static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id, + RockerTlv **group_tlvs) +{ + OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id); + int err; + + if (group) { + return -ROCKER_EEXIST; + } + + group = of_dpa_group_alloc(group_id); + + err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs); + if (err) { + goto err_cmd_add; + } + + err = of_dpa_group_add(of_dpa, group); + if (err) { + goto err_cmd_add; + } + + return ROCKER_OK; + +err_cmd_add: + g_free(group); + return err; +} + +static int of_dpa_cmd_group_mod(OfDpa *of_dpa, uint32_t group_id, + RockerTlv **group_tlvs) +{ + OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id); + + if (!group) { + return -ROCKER_ENOENT; + } + + return of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs); +} + +static int of_dpa_cmd_group_del(OfDpa *of_dpa, uint32_t group_id) +{ + OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id); + + if (!group) { + return -ROCKER_ENOENT; + } + + return of_dpa_group_del(of_dpa, group); +} + +static int of_dpa_cmd_group_get_stats(OfDpa *of_dpa, uint32_t group_id, + struct desc_info *info, char *buf) +{ + return -ROCKER_ENOTSUP; +} + +static int of_dpa_group_cmd(OfDpa *of_dpa, struct desc_info *info, + char *buf, uint16_t cmd, RockerTlv **group_tlvs) +{ + uint32_t group_id; + + if (!group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]) { + return -ROCKER_EINVAL; + } + + group_id = rocker_tlv_get_le32(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_ID]); + + switch (cmd) { + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD: + return of_dpa_cmd_group_add(of_dpa, group_id, group_tlvs); + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD: + return of_dpa_cmd_group_mod(of_dpa, group_id, group_tlvs); + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL: + return of_dpa_cmd_group_del(of_dpa, group_id); + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS: + return of_dpa_cmd_group_get_stats(of_dpa, group_id, info, buf); + } + + return -ROCKER_ENOTSUP; +} + +static int of_dpa_cmd(World *world, struct desc_info *info, + char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv) +{ + OfDpa *of_dpa = world_private(world); + RockerTlv *tlvs[ROCKER_TLV_OF_DPA_MAX + 1]; + + rocker_tlv_parse_nested(tlvs, ROCKER_TLV_OF_DPA_MAX, cmd_info_tlv); + + switch (cmd) { + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD: + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD: + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL: + case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS: + return of_dpa_flow_cmd(of_dpa, info, buf, cmd, tlvs); + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD: + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD: + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL: + case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS: + return of_dpa_group_cmd(of_dpa, info, buf, cmd, tlvs); + } + + return -ROCKER_ENOTSUP; +} + +static gboolean rocker_int64_equal(gconstpointer v1, gconstpointer v2) +{ + return *((const uint64_t *)v1) == *((const uint64_t *)v2); +} + +static guint rocker_int64_hash(gconstpointer v) +{ + return (guint)*(const uint64_t *)v; +} + +static int of_dpa_init(World *world) +{ + OfDpa *of_dpa = world_private(world); + + of_dpa->world = world; + + of_dpa->flow_tbl = g_hash_table_new_full(rocker_int64_hash, + rocker_int64_equal, + NULL, g_free); + if (!of_dpa->flow_tbl) { + return -ENOMEM; + } + + of_dpa->group_tbl = g_hash_table_new_full(g_int_hash, g_int_equal, + NULL, g_free); + if (!of_dpa->group_tbl) { + goto err_group_tbl; + } + + /* XXX hardcode some artificial table max values */ + of_dpa->flow_tbl_max_size = 100; + of_dpa->group_tbl_max_size = 100; + + return 0; + +err_group_tbl: + g_hash_table_destroy(of_dpa->flow_tbl); + return -ENOMEM; +} + +static void of_dpa_uninit(World *world) +{ + OfDpa *of_dpa = world_private(world); + + g_hash_table_destroy(of_dpa->group_tbl); + g_hash_table_destroy(of_dpa->flow_tbl); +} + +struct of_dpa_flow_fill_context { + RockerOfDpaFlowList *list; + uint32_t tbl_id; +}; + +static void of_dpa_flow_fill(void *cookie, void *value, void *user_data) +{ + struct of_dpa_flow *flow = value; + struct of_dpa_flow_key *key = &flow->key; + struct of_dpa_flow_key *mask = &flow->mask; + struct of_dpa_flow_fill_context *flow_context = user_data; + RockerOfDpaFlow *nflow; + RockerOfDpaFlowKey *nkey; + RockerOfDpaFlowMask *nmask; + RockerOfDpaFlowAction *naction; + + if (flow_context->tbl_id != -1 && + flow_context->tbl_id != key->tbl_id) { + return; + } + + nflow = g_malloc0(sizeof(*nflow)); + nkey = nflow->key = g_malloc0(sizeof(*nkey)); + nmask = nflow->mask = g_malloc0(sizeof(*nmask)); + naction = nflow->action = g_malloc0(sizeof(*naction)); + + nflow->cookie = flow->cookie; + nflow->hits = flow->stats.hits; + nkey->priority = flow->priority; + nkey->tbl_id = key->tbl_id; + + if (key->in_pport || mask->in_pport) { + nkey->has_in_pport = true; + nkey->in_pport = key->in_pport; + } + + if (nkey->has_in_pport && mask->in_pport != 0xffffffff) { + nmask->has_in_pport = true; + nmask->in_pport = mask->in_pport; + } + + if (key->eth.vlan_id || mask->eth.vlan_id) { + nkey->has_vlan_id = true; + nkey->vlan_id = ntohs(key->eth.vlan_id); + } + + if (nkey->has_vlan_id && mask->eth.vlan_id != 0xffff) { + nmask->has_vlan_id = true; + nmask->vlan_id = ntohs(mask->eth.vlan_id); + } + + if (key->tunnel_id || mask->tunnel_id) { + nkey->has_tunnel_id = true; + nkey->tunnel_id = key->tunnel_id; + } + + if (nkey->has_tunnel_id && mask->tunnel_id != 0xffffffff) { + nmask->has_tunnel_id = true; + nmask->tunnel_id = mask->tunnel_id; + } + + if (memcmp(key->eth.src.a, zero_mac.a, ETH_ALEN) || + memcmp(mask->eth.src.a, zero_mac.a, ETH_ALEN)) { + nkey->has_eth_src = true; + nkey->eth_src = qemu_mac_strdup_printf(key->eth.src.a); + } + + if (nkey->has_eth_src && memcmp(mask->eth.src.a, ff_mac.a, ETH_ALEN)) { + nmask->has_eth_src = true; + nmask->eth_src = qemu_mac_strdup_printf(mask->eth.src.a); + } + + if (memcmp(key->eth.dst.a, zero_mac.a, ETH_ALEN) || + memcmp(mask->eth.dst.a, zero_mac.a, ETH_ALEN)) { + nkey->has_eth_dst = true; + nkey->eth_dst = qemu_mac_strdup_printf(key->eth.dst.a); + } + + if (nkey->has_eth_dst && memcmp(mask->eth.dst.a, ff_mac.a, ETH_ALEN)) { + nmask->has_eth_dst = true; + nmask->eth_dst = qemu_mac_strdup_printf(mask->eth.dst.a); + } + + if (key->eth.type) { + + nkey->has_eth_type = true; + nkey->eth_type = ntohs(key->eth.type); + + switch (ntohs(key->eth.type)) { + case 0x0800: + case 0x86dd: + if (key->ip.proto || mask->ip.proto) { + nkey->has_ip_proto = true; + nkey->ip_proto = key->ip.proto; + } + if (nkey->has_ip_proto && mask->ip.proto != 0xff) { + nmask->has_ip_proto = true; + nmask->ip_proto = mask->ip.proto; + } + if (key->ip.tos || mask->ip.tos) { + nkey->has_ip_tos = true; + nkey->ip_tos = key->ip.tos; + } + if (nkey->has_ip_tos && mask->ip.tos != 0xff) { + nmask->has_ip_tos = true; + nmask->ip_tos = mask->ip.tos; + } + break; + } + + switch (ntohs(key->eth.type)) { + case 0x0800: + if (key->ipv4.addr.dst || mask->ipv4.addr.dst) { + char *dst = inet_ntoa(*(struct in_addr *)&key->ipv4.addr.dst); + int dst_len = of_dpa_mask2prefix(mask->ipv4.addr.dst); + nkey->has_ip_dst = true; + nkey->ip_dst = g_strdup_printf("%s/%d", dst, dst_len); + } + break; + } + } + + if (flow->action.goto_tbl) { + naction->has_goto_tbl = true; + naction->goto_tbl = flow->action.goto_tbl; + } + + if (flow->action.write.group_id) { + naction->has_group_id = true; + naction->group_id = flow->action.write.group_id; + } + + if (flow->action.apply.new_vlan_id) { + naction->has_new_vlan_id = true; + naction->new_vlan_id = flow->action.apply.new_vlan_id; + } + + QAPI_LIST_PREPEND(flow_context->list, nflow); +} + +RockerOfDpaFlowList *qmp_query_rocker_of_dpa_flows(const char *name, + bool has_tbl_id, + uint32_t tbl_id, + Error **errp) +{ + struct rocker *r; + struct world *w; + struct of_dpa *of_dpa; + struct of_dpa_flow_fill_context fill_context = { + .list = NULL, + .tbl_id = tbl_id, + }; + + r = rocker_find(name); + if (!r) { + error_setg(errp, "rocker %s not found", name); + return NULL; + } + + w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA); + if (!w) { + error_setg(errp, "rocker %s doesn't have OF-DPA world", name); + return NULL; + } + + of_dpa = world_private(w); + + g_hash_table_foreach(of_dpa->flow_tbl, of_dpa_flow_fill, &fill_context); + + return fill_context.list; +} + +struct of_dpa_group_fill_context { + RockerOfDpaGroupList *list; + uint8_t type; +}; + +static void of_dpa_group_fill(void *key, void *value, void *user_data) +{ + struct of_dpa_group *group = value; + struct of_dpa_group_fill_context *flow_context = user_data; + RockerOfDpaGroup *ngroup; + int i; + + if (flow_context->type != 9 && + flow_context->type != ROCKER_GROUP_TYPE_GET(group->id)) { + return; + } + + ngroup = g_malloc0(sizeof(*ngroup)); + + ngroup->id = group->id; + + ngroup->type = ROCKER_GROUP_TYPE_GET(group->id); + + switch (ngroup->type) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + ngroup->has_vlan_id = true; + ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id); + ngroup->has_pport = true; + ngroup->pport = ROCKER_GROUP_PORT_GET(group->id); + ngroup->has_out_pport = true; + ngroup->out_pport = group->l2_interface.out_pport; + ngroup->has_pop_vlan = true; + ngroup->pop_vlan = group->l2_interface.pop_vlan; + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + ngroup->has_index = true; + ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id); + ngroup->has_group_id = true; + ngroup->group_id = group->l2_rewrite.group_id; + if (group->l2_rewrite.vlan_id) { + ngroup->has_set_vlan_id = true; + ngroup->set_vlan_id = ntohs(group->l2_rewrite.vlan_id); + } + if (memcmp(group->l2_rewrite.src_mac.a, zero_mac.a, ETH_ALEN)) { + ngroup->has_set_eth_src = true; + ngroup->set_eth_src = + qemu_mac_strdup_printf(group->l2_rewrite.src_mac.a); + } + if (memcmp(group->l2_rewrite.dst_mac.a, zero_mac.a, ETH_ALEN)) { + ngroup->has_set_eth_dst = true; + ngroup->set_eth_dst = + qemu_mac_strdup_printf(group->l2_rewrite.dst_mac.a); + } + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + ngroup->has_vlan_id = true; + ngroup->vlan_id = ROCKER_GROUP_VLAN_GET(group->id); + ngroup->has_index = true; + ngroup->index = ROCKER_GROUP_INDEX_GET(group->id); + for (i = 0; i < group->l2_flood.group_count; i++) { + ngroup->has_group_ids = true; + QAPI_LIST_PREPEND(ngroup->group_ids, group->l2_flood.group_ids[i]); + } + break; + case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: + ngroup->has_index = true; + ngroup->index = ROCKER_GROUP_INDEX_LONG_GET(group->id); + ngroup->has_group_id = true; + ngroup->group_id = group->l3_unicast.group_id; + if (group->l3_unicast.vlan_id) { + ngroup->has_set_vlan_id = true; + ngroup->set_vlan_id = ntohs(group->l3_unicast.vlan_id); + } + if (memcmp(group->l3_unicast.src_mac.a, zero_mac.a, ETH_ALEN)) { + ngroup->has_set_eth_src = true; + ngroup->set_eth_src = + qemu_mac_strdup_printf(group->l3_unicast.src_mac.a); + } + if (memcmp(group->l3_unicast.dst_mac.a, zero_mac.a, ETH_ALEN)) { + ngroup->has_set_eth_dst = true; + ngroup->set_eth_dst = + qemu_mac_strdup_printf(group->l3_unicast.dst_mac.a); + } + if (group->l3_unicast.ttl_check) { + ngroup->has_ttl_check = true; + ngroup->ttl_check = group->l3_unicast.ttl_check; + } + break; + } + + QAPI_LIST_PREPEND(flow_context->list, ngroup); +} + +RockerOfDpaGroupList *qmp_query_rocker_of_dpa_groups(const char *name, + bool has_type, + uint8_t type, + Error **errp) +{ + struct rocker *r; + struct world *w; + struct of_dpa *of_dpa; + struct of_dpa_group_fill_context fill_context = { + .list = NULL, + .type = type, + }; + + r = rocker_find(name); + if (!r) { + error_setg(errp, "rocker %s not found", name); + return NULL; + } + + w = rocker_get_world(r, ROCKER_WORLD_TYPE_OF_DPA); + if (!w) { + error_setg(errp, "rocker %s doesn't have OF-DPA world", name); + return NULL; + } + + of_dpa = world_private(w); + + g_hash_table_foreach(of_dpa->group_tbl, of_dpa_group_fill, &fill_context); + + return fill_context.list; +} + +static WorldOps of_dpa_ops = { + .name = "ofdpa", + .init = of_dpa_init, + .uninit = of_dpa_uninit, + .ig = of_dpa_ig, + .cmd = of_dpa_cmd, +}; + +World *of_dpa_world_alloc(Rocker *r) +{ + return world_alloc(r, sizeof(OfDpa), ROCKER_WORLD_TYPE_OF_DPA, &of_dpa_ops); +} diff --git a/hw/net/rocker/rocker_of_dpa.h b/hw/net/rocker/rocker_of_dpa.h new file mode 100644 index 000000000..01c7a97d0 --- /dev/null +++ b/hw/net/rocker/rocker_of_dpa.h @@ -0,0 +1,22 @@ +/* + * QEMU rocker switch emulation - OF-DPA flow processing support + * + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ROCKER_OF_DPA_H +#define ROCKER_OF_DPA_H + +World *of_dpa_world_alloc(Rocker *r); + +#endif /* ROCKER_OF_DPA_H */ diff --git a/hw/net/rocker/rocker_tlv.h b/hw/net/rocker/rocker_tlv.h new file mode 100644 index 000000000..dd28d0844 --- /dev/null +++ b/hw/net/rocker/rocker_tlv.h @@ -0,0 +1,244 @@ +/* + * QEMU rocker switch emulation - TLV parsing and composing + * + * Copyright (c) 2014 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ROCKER_TLV_H +#define ROCKER_TLV_H + +#define ROCKER_TLV_ALIGNTO 8U +#define ROCKER_TLV_ALIGN(len) \ + (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) +#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(RockerTlv)) + +/* + * <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * | Header | Pad | Payload | Pad | + * | (RockerTlv) | ing | | ing | + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * <--------------------------- tlv->len --------------------------> + */ + +static inline RockerTlv *rocker_tlv_next(const RockerTlv *tlv, int *remaining) +{ + int totlen = ROCKER_TLV_ALIGN(le16_to_cpu(tlv->len)); + + *remaining -= totlen; + return (RockerTlv *) ((char *) tlv + totlen); +} + +static inline int rocker_tlv_ok(const RockerTlv *tlv, int remaining) +{ + return remaining >= (int) ROCKER_TLV_HDRLEN && + le16_to_cpu(tlv->len) >= ROCKER_TLV_HDRLEN && + le16_to_cpu(tlv->len) <= remaining; +} + +#define rocker_tlv_for_each(pos, head, len, rem) \ + for (pos = head, rem = len; \ + rocker_tlv_ok(pos, rem); \ + pos = rocker_tlv_next(pos, &(rem))) + +#define rocker_tlv_for_each_nested(pos, tlv, rem) \ + rocker_tlv_for_each(pos, rocker_tlv_data(tlv), rocker_tlv_len(tlv), rem) + +static inline int rocker_tlv_size(int payload) +{ + return ROCKER_TLV_HDRLEN + payload; +} + +static inline int rocker_tlv_total_size(int payload) +{ + return ROCKER_TLV_ALIGN(rocker_tlv_size(payload)); +} + +static inline int rocker_tlv_padlen(int payload) +{ + return rocker_tlv_total_size(payload) - rocker_tlv_size(payload); +} + +static inline int rocker_tlv_type(const RockerTlv *tlv) +{ + return le32_to_cpu(tlv->type); +} + +static inline void *rocker_tlv_data(const RockerTlv *tlv) +{ + return (char *) tlv + ROCKER_TLV_HDRLEN; +} + +static inline int rocker_tlv_len(const RockerTlv *tlv) +{ + return le16_to_cpu(tlv->len) - ROCKER_TLV_HDRLEN; +} + +static inline uint8_t rocker_tlv_get_u8(const RockerTlv *tlv) +{ + return *(uint8_t *) rocker_tlv_data(tlv); +} + +static inline uint16_t rocker_tlv_get_u16(const RockerTlv *tlv) +{ + return *(uint16_t *) rocker_tlv_data(tlv); +} + +static inline uint32_t rocker_tlv_get_u32(const RockerTlv *tlv) +{ + return *(uint32_t *) rocker_tlv_data(tlv); +} + +static inline uint64_t rocker_tlv_get_u64(const RockerTlv *tlv) +{ + return *(uint64_t *) rocker_tlv_data(tlv); +} + +static inline uint16_t rocker_tlv_get_le16(const RockerTlv *tlv) +{ + return lduw_le_p(rocker_tlv_data(tlv)); +} + +static inline uint32_t rocker_tlv_get_le32(const RockerTlv *tlv) +{ + return ldl_le_p(rocker_tlv_data(tlv)); +} + +static inline uint64_t rocker_tlv_get_le64(const RockerTlv *tlv) +{ + return ldq_le_p(rocker_tlv_data(tlv)); +} + +static inline void rocker_tlv_parse(RockerTlv **tb, int maxtype, + const char *buf, int buf_len) +{ + const RockerTlv *tlv; + const RockerTlv *head = (const RockerTlv *) buf; + int rem; + + memset(tb, 0, sizeof(RockerTlv *) * (maxtype + 1)); + + rocker_tlv_for_each(tlv, head, buf_len, rem) { + uint32_t type = rocker_tlv_type(tlv); + + if (type > 0 && type <= maxtype) { + tb[type] = (RockerTlv *) tlv; + } + } +} + +static inline void rocker_tlv_parse_nested(RockerTlv **tb, int maxtype, + const RockerTlv *tlv) +{ + rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), rocker_tlv_len(tlv)); +} + +static inline RockerTlv *rocker_tlv_start(char *buf, int buf_pos) +{ + return (RockerTlv *) (buf + buf_pos); +} + +static inline void rocker_tlv_put_iov(char *buf, int *buf_pos, + int type, const struct iovec *iov, + const unsigned int iovcnt) +{ + size_t len = iov_size(iov, iovcnt); + int total_size = rocker_tlv_total_size(len); + RockerTlv *tlv; + + tlv = rocker_tlv_start(buf, *buf_pos); + *buf_pos += total_size; + tlv->type = cpu_to_le32(type); + tlv->len = cpu_to_le16(rocker_tlv_size(len)); + iov_to_buf(iov, iovcnt, 0, rocker_tlv_data(tlv), len); + memset((char *) tlv + le16_to_cpu(tlv->len), 0, rocker_tlv_padlen(len)); +} + +static inline void rocker_tlv_put(char *buf, int *buf_pos, + int type, int len, void *data) +{ + struct iovec iov = { + .iov_base = data, + .iov_len = len, + }; + + rocker_tlv_put_iov(buf, buf_pos, type, &iov, 1); +} + +static inline void rocker_tlv_put_u8(char *buf, int *buf_pos, + int type, uint8_t value) +{ + rocker_tlv_put(buf, buf_pos, type, sizeof(uint8_t), &value); +} + +static inline void rocker_tlv_put_u16(char *buf, int *buf_pos, + int type, uint16_t value) +{ + rocker_tlv_put(buf, buf_pos, type, sizeof(uint16_t), &value); +} + +static inline void rocker_tlv_put_u32(char *buf, int *buf_pos, + int type, uint32_t value) +{ + rocker_tlv_put(buf, buf_pos, type, sizeof(uint32_t), &value); +} + +static inline void rocker_tlv_put_u64(char *buf, int *buf_pos, + int type, uint64_t value) +{ + rocker_tlv_put(buf, buf_pos, type, sizeof(uint64_t), &value); +} + +static inline void rocker_tlv_put_le16(char *buf, int *buf_pos, + int type, uint16_t value) +{ + value = cpu_to_le16(value); + rocker_tlv_put(buf, buf_pos, type, sizeof(uint16_t), &value); +} + +static inline void rocker_tlv_put_le32(char *buf, int *buf_pos, + int type, uint32_t value) +{ + value = cpu_to_le32(value); + rocker_tlv_put(buf, buf_pos, type, sizeof(uint32_t), &value); +} + +static inline void rocker_tlv_put_le64(char *buf, int *buf_pos, + int type, uint64_t value) +{ + value = cpu_to_le64(value); + rocker_tlv_put(buf, buf_pos, type, sizeof(uint64_t), &value); +} + +static inline RockerTlv *rocker_tlv_nest_start(char *buf, int *buf_pos, + int type) +{ + RockerTlv *start = rocker_tlv_start(buf, *buf_pos); + + rocker_tlv_put(buf, buf_pos, type, 0, NULL); + return start; +} + +static inline void rocker_tlv_nest_end(char *buf, int *buf_pos, + RockerTlv *start) +{ + start->len = (char *) rocker_tlv_start(buf, *buf_pos) - (char *) start; +} + +static inline void rocker_tlv_nest_cancel(char *buf, int *buf_pos, + RockerTlv *start) +{ + *buf_pos = (char *) start - buf; +} + +#endif diff --git a/hw/net/rocker/rocker_world.c b/hw/net/rocker/rocker_world.c new file mode 100644 index 000000000..f73c5340e --- /dev/null +++ b/hw/net/rocker/rocker_world.c @@ -0,0 +1,100 @@ +/* + * QEMU rocker switch emulation - switch worlds + * + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "qemu/iov.h" + +#include "rocker.h" +#include "rocker_world.h" + +struct world { + Rocker *r; + enum rocker_world_type type; + WorldOps *ops; +}; + +ssize_t world_ingress(World *world, uint32_t pport, + const struct iovec *iov, int iovcnt) +{ + if (world->ops->ig) { + return world->ops->ig(world, pport, iov, iovcnt); + } + + return -1; +} + +int world_do_cmd(World *world, DescInfo *info, + char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv) +{ + if (world->ops->cmd) { + return world->ops->cmd(world, info, buf, cmd, cmd_info_tlv); + } + + return -ROCKER_ENOTSUP; +} + +World *world_alloc(Rocker *r, size_t sizeof_private, + enum rocker_world_type type, WorldOps *ops) +{ + World *w = g_malloc0(sizeof(World) + sizeof_private); + + w->r = r; + w->type = type; + w->ops = ops; + if (w->ops->init) { + w->ops->init(w); + } + + return w; +} + +void world_free(World *world) +{ + if (world->ops->uninit) { + world->ops->uninit(world); + } + g_free(world); +} + +void world_reset(World *world) +{ + if (world->ops->uninit) { + world->ops->uninit(world); + } + if (world->ops->init) { + world->ops->init(world); + } +} + +void *world_private(World *world) +{ + return world + 1; +} + +Rocker *world_rocker(World *world) +{ + return world->r; +} + +enum rocker_world_type world_type(World *world) +{ + return world->type; +} + +const char *world_name(World *world) +{ + return world->ops->name; +} diff --git a/hw/net/rocker/rocker_world.h b/hw/net/rocker/rocker_world.h new file mode 100644 index 000000000..44f1fe3e1 --- /dev/null +++ b/hw/net/rocker/rocker_world.h @@ -0,0 +1,61 @@ +/* + * QEMU rocker switch emulation - switch worlds + * + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ROCKER_WORLD_H +#define ROCKER_WORLD_H + +#include "rocker_hw.h" + +enum rocker_world_type { + ROCKER_WORLD_TYPE_OF_DPA = ROCKER_PORT_MODE_OF_DPA, + ROCKER_WORLD_TYPE_MAX, +}; + +typedef int (world_init)(World *world); +typedef void (world_uninit)(World *world); +typedef ssize_t (world_ig)(World *world, uint32_t pport, + const struct iovec *iov, int iovcnt); +typedef int (world_cmd)(World *world, DescInfo *info, + char *buf, uint16_t cmd, + RockerTlv *cmd_info_tlv); + +typedef struct world_ops { + const char *name; + world_init *init; + world_uninit *uninit; + world_ig *ig; + world_cmd *cmd; +} WorldOps; + +ssize_t world_ingress(World *world, uint32_t pport, + const struct iovec *iov, int iovcnt); +int world_do_cmd(World *world, DescInfo *info, + char *buf, uint16_t cmd, RockerTlv *cmd_info_tlv); + +World *world_alloc(Rocker *r, size_t sizeof_private, + enum rocker_world_type type, WorldOps *ops); +void world_free(World *world); +void world_reset(World *world); + +void *world_private(World *world); +Rocker *world_rocker(World *world); + +enum rocker_world_type world_type(World *world); +const char *world_name(World *world); + +World *rocker_get_world(Rocker *r, enum rocker_world_type type); + +#endif /* ROCKER_WORLD_H */ diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c new file mode 100644 index 000000000..90b4fc63c --- /dev/null +++ b/hw/net/rtl8139.c @@ -0,0 +1,3460 @@ +/** + * QEMU RTL8139 emulation + * + * Copyright (c) 2006 Igor Kovalenko + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + + * Modifications: + * 2006-Jan-28 Mark Malakanov : TSAD and CSCR implementation (for Windows driver) + * + * 2006-Apr-28 Juergen Lock : EEPROM emulation changes for FreeBSD driver + * HW revision ID changes for FreeBSD driver + * + * 2006-Jul-01 Igor Kovalenko : Implemented loopback mode for FreeBSD driver + * Corrected packet transfer reassembly routine for 8139C+ mode + * Rearranged debugging print statements + * Implemented PCI timer interrupt (disabled by default) + * Implemented Tally Counters, increased VM load/save version + * Implemented IP/TCP/UDP checksum task offloading + * + * 2006-Jul-04 Igor Kovalenko : Implemented TCP segmentation offloading + * Fixed MTU=1500 for produced ethernet frames + * + * 2006-Jul-09 Igor Kovalenko : Fixed TCP header length calculation while processing + * segmentation offloading + * Removed slirp.h dependency + * Added rx/tx buffer reset when enabling rx/tx operation + * + * 2010-Feb-04 Frediano Ziglio: Rewrote timer support using QEMU timer only + * when strictly needed (required for + * Darwin) + * 2011-Mar-22 Benjamin Poirier: Implemented VLAN offloading + */ + +/* For crc32 */ + +#include "qemu/osdep.h" +#include + +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "sysemu/dma.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "net/net.h" +#include "net/eth.h" +#include "sysemu/sysemu.h" +#include "qom/object.h" + +/* debug RTL8139 card */ +//#define DEBUG_RTL8139 1 + +#define PCI_PERIOD 30 /* 30 ns period = 33.333333 Mhz frequency */ + +#define SET_MASKED(input, mask, curr) \ + ( ( (input) & ~(mask) ) | ( (curr) & (mask) ) ) + +/* arg % size for size which is a power of 2 */ +#define MOD2(input, size) \ + ( ( input ) & ( size - 1 ) ) + +#define ETHER_TYPE_LEN 2 +#define ETH_MTU 1500 + +#define VLAN_TCI_LEN 2 +#define VLAN_HLEN (ETHER_TYPE_LEN + VLAN_TCI_LEN) + +#if defined (DEBUG_RTL8139) +# define DPRINTF(fmt, ...) \ + do { fprintf(stderr, "RTL8139: " fmt, ## __VA_ARGS__); } while (0) +#else +static inline GCC_FMT_ATTR(1, 2) int DPRINTF(const char *fmt, ...) +{ + return 0; +} +#endif + +#define TYPE_RTL8139 "rtl8139" + +OBJECT_DECLARE_SIMPLE_TYPE(RTL8139State, RTL8139) + +/* Symbolic offsets to registers. */ +enum RTL8139_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAR0 = 8, /* Multicast filter. */ + TxStatus0 = 0x10,/* Transmit status (Four 32bit registers). C mode only */ + /* Dump Tally Conter control register(64bit). C+ mode only */ + TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ + RxBuf = 0x30, + ChipCmd = 0x37, + RxBufPtr = 0x38, + RxBufAddr = 0x3A, + IntrMask = 0x3C, + IntrStatus = 0x3E, + TxConfig = 0x40, + RxConfig = 0x44, + Timer = 0x48, /* A general-purpose counter. */ + RxMissed = 0x4C, /* 24 bits valid, write clears. */ + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + FlashReg = 0x54, + MediaStatus = 0x58, + Config3 = 0x59, + Config4 = 0x5A, /* absent on RTL-8139A */ + HltClk = 0x5B, + MultiIntr = 0x5C, + PCIRevisionID = 0x5E, + TxSummary = 0x60, /* TSAD register. Transmit Status of All Descriptors*/ + BasicModeCtrl = 0x62, + BasicModeStatus = 0x64, + NWayAdvert = 0x66, + NWayLPAR = 0x68, + NWayExpansion = 0x6A, + /* Undocumented registers, but required for proper operation. */ + FIFOTMS = 0x70, /* FIFO Control and test. */ + CSCR = 0x74, /* Chip Status and Configuration Register. */ + PARA78 = 0x78, + PARA7c = 0x7c, /* Magic transceiver parameter register. */ + Config5 = 0xD8, /* absent on RTL-8139A */ + /* C+ mode */ + TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ + RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ + CpCmd = 0xE0, /* C+ Command register (C+ mode only) */ + IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */ + RxRingAddrLO = 0xE4, /* 64-bit start addr of Rx ring */ + RxRingAddrHI = 0xE8, /* 64-bit start addr of Rx ring */ + TxThresh = 0xEC, /* Early Tx threshold */ +}; + +enum ClearBitMasks { + MultiIntrClear = 0xF000, + ChipCmdClear = 0xE2, + Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), +}; + +enum ChipCmdBits { + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, +}; + +/* C+ mode */ +enum CplusCmdBits { + CPlusRxVLAN = 0x0040, /* enable receive VLAN detagging */ + CPlusRxChkSum = 0x0020, /* enable receive checksum offloading */ + CPlusRxEnb = 0x0002, + CPlusTxEnb = 0x0001, +}; + +/* Interrupt register bits, using my own meaningful names. */ +enum IntrStatusBits { + PCIErr = 0x8000, + PCSTimeout = 0x4000, + RxFIFOOver = 0x40, + RxUnderrun = 0x20, /* Packet Underrun / Link Change */ + RxOverflow = 0x10, + TxErr = 0x08, + TxOK = 0x04, + RxErr = 0x02, + RxOK = 0x01, + + RxAckBits = RxFIFOOver | RxOverflow | RxOK, +}; + +enum TxStatusBits { + TxHostOwns = 0x2000, + TxUnderrun = 0x4000, + TxStatOK = 0x8000, + TxOutOfWindow = 0x20000000, + TxAborted = 0x40000000, + TxCarrierLost = 0x80000000, +}; +enum RxStatusBits { + RxMulticast = 0x8000, + RxPhysical = 0x4000, + RxBroadcast = 0x2000, + RxBadSymbol = 0x0020, + RxRunt = 0x0010, + RxTooLong = 0x0008, + RxCRCErr = 0x0004, + RxBadAlign = 0x0002, + RxStatusOK = 0x0001, +}; + +/* Bits in RxConfig. */ +enum rx_mode_bits { + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +}; + +/* Bits in TxConfig. */ +enum tx_config_bits { + + /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */ + TxIFGShift = 24, + TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */ + TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */ + TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */ + TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */ + + TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ + TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */ + TxClearAbt = (1 << 0), /* Clear abort (WO) */ + TxDMAShift = 8, /* DMA burst value (0-7) is shifted this many bits */ + TxRetryShift = 4, /* TXRR value (0-15) is shifted this many bits */ + + TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ +}; + + +/* Transmit Status of All Descriptors (TSAD) Register */ +enum TSAD_bits { + TSAD_TOK3 = 1<<15, // TOK bit of Descriptor 3 + TSAD_TOK2 = 1<<14, // TOK bit of Descriptor 2 + TSAD_TOK1 = 1<<13, // TOK bit of Descriptor 1 + TSAD_TOK0 = 1<<12, // TOK bit of Descriptor 0 + TSAD_TUN3 = 1<<11, // TUN bit of Descriptor 3 + TSAD_TUN2 = 1<<10, // TUN bit of Descriptor 2 + TSAD_TUN1 = 1<<9, // TUN bit of Descriptor 1 + TSAD_TUN0 = 1<<8, // TUN bit of Descriptor 0 + TSAD_TABT3 = 1<<07, // TABT bit of Descriptor 3 + TSAD_TABT2 = 1<<06, // TABT bit of Descriptor 2 + TSAD_TABT1 = 1<<05, // TABT bit of Descriptor 1 + TSAD_TABT0 = 1<<04, // TABT bit of Descriptor 0 + TSAD_OWN3 = 1<<03, // OWN bit of Descriptor 3 + TSAD_OWN2 = 1<<02, // OWN bit of Descriptor 2 + TSAD_OWN1 = 1<<01, // OWN bit of Descriptor 1 + TSAD_OWN0 = 1<<00, // OWN bit of Descriptor 0 +}; + + +/* Bits in Config1 */ +enum Config1Bits { + Cfg1_PM_Enable = 0x01, + Cfg1_VPD_Enable = 0x02, + Cfg1_PIO = 0x04, + Cfg1_MMIO = 0x08, + LWAKE = 0x10, /* not on 8139, 8139A */ + Cfg1_Driver_Load = 0x20, + Cfg1_LED0 = 0x40, + Cfg1_LED1 = 0x80, + SLEEP = (1 << 1), /* only on 8139, 8139A */ + PWRDN = (1 << 0), /* only on 8139, 8139A */ +}; + +/* Bits in Config3 */ +enum Config3Bits { + Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */ + Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */ + Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */ + Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */ + Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */ + Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */ + Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */ + Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */ +}; + +/* Bits in Config4 */ +enum Config4Bits { + LWPTN = (1 << 2), /* not on 8139, 8139A */ +}; + +/* Bits in Config5 */ +enum Config5Bits { + Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */ + Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */ + Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */ + Cfg5_FIFOAddrPtr = (1 << 3), /* Realtek internal SRAM testing */ + Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */ + Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */ + Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */ +}; + +enum RxConfigBits { + /* rx fifo threshold */ + RxCfgFIFOShift = 13, + RxCfgFIFONone = (7 << RxCfgFIFOShift), + + /* Max DMA burst */ + RxCfgDMAShift = 8, + RxCfgDMAUnlimited = (7 << RxCfgDMAShift), + + /* rx ring buffer length */ + RxCfgRcv8K = 0, + RxCfgRcv16K = (1 << 11), + RxCfgRcv32K = (1 << 12), + RxCfgRcv64K = (1 << 11) | (1 << 12), + + /* Disable packet wrap at end of Rx buffer. (not possible with 64k) */ + RxNoWrap = (1 << 7), +}; + +/* Twister tuning parameters from RealTek. + Completely undocumented, but required to tune bad links on some boards. */ +/* +enum CSCRBits { + CSCR_LinkOKBit = 0x0400, + CSCR_LinkChangeBit = 0x0800, + CSCR_LinkStatusBits = 0x0f000, + CSCR_LinkDownOffCmd = 0x003c0, + CSCR_LinkDownCmd = 0x0f3c0, +*/ +enum CSCRBits { + CSCR_Testfun = 1<<15, /* 1 = Auto-neg speeds up internal timer, WO, def 0 */ + CSCR_LD = 1<<9, /* Active low TPI link disable signal. When low, TPI still transmits link pulses and TPI stays in good link state. def 1*/ + CSCR_HEART_BIT = 1<<8, /* 1 = HEART BEAT enable, 0 = HEART BEAT disable. HEART BEAT function is only valid in 10Mbps mode. def 1*/ + CSCR_JBEN = 1<<7, /* 1 = enable jabber function. 0 = disable jabber function, def 1*/ + CSCR_F_LINK_100 = 1<<6, /* Used to login force good link in 100Mbps for diagnostic purposes. 1 = DISABLE, 0 = ENABLE. def 1*/ + CSCR_F_Connect = 1<<5, /* Assertion of this bit forces the disconnect function to be bypassed. def 0*/ + CSCR_Con_status = 1<<3, /* This bit indicates the status of the connection. 1 = valid connected link detected; 0 = disconnected link detected. RO def 0*/ + CSCR_Con_status_En = 1<<2, /* Assertion of this bit configures LED1 pin to indicate connection status. def 0*/ + CSCR_PASS_SCR = 1<<0, /* Bypass Scramble, def 0*/ +}; + +enum Cfg9346Bits { + Cfg9346_Normal = 0x00, + Cfg9346_Autoload = 0x40, + Cfg9346_Programming = 0x80, + Cfg9346_ConfigWrite = 0xC0, +}; + +typedef enum { + CH_8139 = 0, + CH_8139_K, + CH_8139A, + CH_8139A_G, + CH_8139B, + CH_8130, + CH_8139C, + CH_8100, + CH_8100B_8139D, + CH_8101, +} chip_t; + +enum chip_flags { + HasHltClk = (1 << 0), + HasLWake = (1 << 1), +}; + +#define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \ + (b30<<30 | b29<<29 | b28<<28 | b27<<27 | b26<<26 | b23<<23 | b22<<22) +#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1) + +#define RTL8139_PCI_REVID_8139 0x10 +#define RTL8139_PCI_REVID_8139CPLUS 0x20 + +#define RTL8139_PCI_REVID RTL8139_PCI_REVID_8139CPLUS + +/* Size is 64 * 16bit words */ +#define EEPROM_9346_ADDR_BITS 6 +#define EEPROM_9346_SIZE (1 << EEPROM_9346_ADDR_BITS) +#define EEPROM_9346_ADDR_MASK (EEPROM_9346_SIZE - 1) + +enum Chip9346Operation +{ + Chip9346_op_mask = 0xc0, /* 10 zzzzzz */ + Chip9346_op_read = 0x80, /* 10 AAAAAA */ + Chip9346_op_write = 0x40, /* 01 AAAAAA D(15)..D(0) */ + Chip9346_op_ext_mask = 0xf0, /* 11 zzzzzz */ + Chip9346_op_write_enable = 0x30, /* 00 11zzzz */ + Chip9346_op_write_all = 0x10, /* 00 01zzzz */ + Chip9346_op_write_disable = 0x00, /* 00 00zzzz */ +}; + +enum Chip9346Mode +{ + Chip9346_none = 0, + Chip9346_enter_command_mode, + Chip9346_read_command, + Chip9346_data_read, /* from output register */ + Chip9346_data_write, /* to input register, then to contents at specified address */ + Chip9346_data_write_all, /* to input register, then filling contents */ +}; + +typedef struct EEprom9346 +{ + uint16_t contents[EEPROM_9346_SIZE]; + int mode; + uint32_t tick; + uint8_t address; + uint16_t input; + uint16_t output; + + uint8_t eecs; + uint8_t eesk; + uint8_t eedi; + uint8_t eedo; +} EEprom9346; + +typedef struct RTL8139TallyCounters +{ + /* Tally counters */ + uint64_t TxOk; + uint64_t RxOk; + uint64_t TxERR; + uint32_t RxERR; + uint16_t MissPkt; + uint16_t FAE; + uint32_t Tx1Col; + uint32_t TxMCol; + uint64_t RxOkPhy; + uint64_t RxOkBrd; + uint32_t RxOkMul; + uint16_t TxAbt; + uint16_t TxUndrn; +} RTL8139TallyCounters; + +/* Clears all tally counters */ +static void RTL8139TallyCounters_clear(RTL8139TallyCounters* counters); + +struct RTL8139State { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + uint8_t phys[8]; /* mac address */ + uint8_t mult[8]; /* multicast mask array */ + + uint32_t TxStatus[4]; /* TxStatus0 in C mode*/ /* also DTCCR[0] and DTCCR[1] in C+ mode */ + uint32_t TxAddr[4]; /* TxAddr0 */ + uint32_t RxBuf; /* Receive buffer */ + uint32_t RxBufferSize;/* internal variable, receive ring buffer size in C mode */ + uint32_t RxBufPtr; + uint32_t RxBufAddr; + + uint16_t IntrStatus; + uint16_t IntrMask; + + uint32_t TxConfig; + uint32_t RxConfig; + uint32_t RxMissed; + + uint16_t CSCR; + + uint8_t Cfg9346; + uint8_t Config0; + uint8_t Config1; + uint8_t Config3; + uint8_t Config4; + uint8_t Config5; + + uint8_t clock_enabled; + uint8_t bChipCmdState; + + uint16_t MultiIntr; + + uint16_t BasicModeCtrl; + uint16_t BasicModeStatus; + uint16_t NWayAdvert; + uint16_t NWayLPAR; + uint16_t NWayExpansion; + + uint16_t CpCmd; + uint8_t TxThresh; + + NICState *nic; + NICConf conf; + + /* C ring mode */ + uint32_t currTxDesc; + + /* C+ mode */ + uint32_t cplus_enabled; + + uint32_t currCPlusRxDesc; + uint32_t currCPlusTxDesc; + + uint32_t RxRingAddrLO; + uint32_t RxRingAddrHI; + + EEprom9346 eeprom; + + uint32_t TCTR; + uint32_t TimerInt; + int64_t TCTR_base; + + /* Tally counters */ + RTL8139TallyCounters tally_counters; + + /* Non-persistent data */ + uint8_t *cplus_txbuffer; + int cplus_txbuffer_len; + int cplus_txbuffer_offset; + + /* PCI interrupt timer */ + QEMUTimer *timer; + + MemoryRegion bar_io; + MemoryRegion bar_mem; + + /* Support migration to/from old versions */ + int rtl8139_mmio_io_addr_dummy; +}; + +/* Writes tally counters to memory via DMA */ +static void RTL8139TallyCounters_dma_write(RTL8139State *s, dma_addr_t tc_addr); + +static void rtl8139_set_next_tctr_time(RTL8139State *s); + +static void prom9346_decode_command(EEprom9346 *eeprom, uint8_t command) +{ + DPRINTF("eeprom command 0x%02x\n", command); + + switch (command & Chip9346_op_mask) + { + case Chip9346_op_read: + { + eeprom->address = command & EEPROM_9346_ADDR_MASK; + eeprom->output = eeprom->contents[eeprom->address]; + eeprom->eedo = 0; + eeprom->tick = 0; + eeprom->mode = Chip9346_data_read; + DPRINTF("eeprom read from address 0x%02x data=0x%04x\n", + eeprom->address, eeprom->output); + } + break; + + case Chip9346_op_write: + { + eeprom->address = command & EEPROM_9346_ADDR_MASK; + eeprom->input = 0; + eeprom->tick = 0; + eeprom->mode = Chip9346_none; /* Chip9346_data_write */ + DPRINTF("eeprom begin write to address 0x%02x\n", + eeprom->address); + } + break; + default: + eeprom->mode = Chip9346_none; + switch (command & Chip9346_op_ext_mask) + { + case Chip9346_op_write_enable: + DPRINTF("eeprom write enabled\n"); + break; + case Chip9346_op_write_all: + DPRINTF("eeprom begin write all\n"); + break; + case Chip9346_op_write_disable: + DPRINTF("eeprom write disabled\n"); + break; + } + break; + } +} + +static void prom9346_shift_clock(EEprom9346 *eeprom) +{ + int bit = eeprom->eedi?1:0; + + ++ eeprom->tick; + + DPRINTF("eeprom: tick %d eedi=%d eedo=%d\n", eeprom->tick, eeprom->eedi, + eeprom->eedo); + + switch (eeprom->mode) + { + case Chip9346_enter_command_mode: + if (bit) + { + eeprom->mode = Chip9346_read_command; + eeprom->tick = 0; + eeprom->input = 0; + DPRINTF("eeprom: +++ synchronized, begin command read\n"); + } + break; + + case Chip9346_read_command: + eeprom->input = (eeprom->input << 1) | (bit & 1); + if (eeprom->tick == 8) + { + prom9346_decode_command(eeprom, eeprom->input & 0xff); + } + break; + + case Chip9346_data_read: + eeprom->eedo = (eeprom->output & 0x8000)?1:0; + eeprom->output <<= 1; + if (eeprom->tick == 16) + { +#if 1 + // the FreeBSD drivers (rl and re) don't explicitly toggle + // CS between reads (or does setting Cfg9346 to 0 count too?), + // so we need to enter wait-for-command state here + eeprom->mode = Chip9346_enter_command_mode; + eeprom->input = 0; + eeprom->tick = 0; + + DPRINTF("eeprom: +++ end of read, awaiting next command\n"); +#else + // original behaviour + ++eeprom->address; + eeprom->address &= EEPROM_9346_ADDR_MASK; + eeprom->output = eeprom->contents[eeprom->address]; + eeprom->tick = 0; + + DPRINTF("eeprom: +++ read next address 0x%02x data=0x%04x\n", + eeprom->address, eeprom->output); +#endif + } + break; + + case Chip9346_data_write: + eeprom->input = (eeprom->input << 1) | (bit & 1); + if (eeprom->tick == 16) + { + DPRINTF("eeprom write to address 0x%02x data=0x%04x\n", + eeprom->address, eeprom->input); + + eeprom->contents[eeprom->address] = eeprom->input; + eeprom->mode = Chip9346_none; /* waiting for next command after CS cycle */ + eeprom->tick = 0; + eeprom->input = 0; + } + break; + + case Chip9346_data_write_all: + eeprom->input = (eeprom->input << 1) | (bit & 1); + if (eeprom->tick == 16) + { + int i; + for (i = 0; i < EEPROM_9346_SIZE; i++) + { + eeprom->contents[i] = eeprom->input; + } + DPRINTF("eeprom filled with data=0x%04x\n", eeprom->input); + + eeprom->mode = Chip9346_enter_command_mode; + eeprom->tick = 0; + eeprom->input = 0; + } + break; + + default: + break; + } +} + +static int prom9346_get_wire(RTL8139State *s) +{ + EEprom9346 *eeprom = &s->eeprom; + if (!eeprom->eecs) + return 0; + + return eeprom->eedo; +} + +/* FIXME: This should be merged into/replaced by eeprom93xx.c. */ +static void prom9346_set_wire(RTL8139State *s, int eecs, int eesk, int eedi) +{ + EEprom9346 *eeprom = &s->eeprom; + uint8_t old_eecs = eeprom->eecs; + uint8_t old_eesk = eeprom->eesk; + + eeprom->eecs = eecs; + eeprom->eesk = eesk; + eeprom->eedi = eedi; + + DPRINTF("eeprom: +++ wires CS=%d SK=%d DI=%d DO=%d\n", eeprom->eecs, + eeprom->eesk, eeprom->eedi, eeprom->eedo); + + if (!old_eecs && eecs) + { + /* Synchronize start */ + eeprom->tick = 0; + eeprom->input = 0; + eeprom->output = 0; + eeprom->mode = Chip9346_enter_command_mode; + + DPRINTF("=== eeprom: begin access, enter command mode\n"); + } + + if (!eecs) + { + DPRINTF("=== eeprom: end access\n"); + return; + } + + if (!old_eesk && eesk) + { + /* SK front rules */ + prom9346_shift_clock(eeprom); + } +} + +static void rtl8139_update_irq(RTL8139State *s) +{ + PCIDevice *d = PCI_DEVICE(s); + int isr; + isr = (s->IntrStatus & s->IntrMask) & 0xffff; + + DPRINTF("Set IRQ to %d (%04x %04x)\n", isr ? 1 : 0, s->IntrStatus, + s->IntrMask); + + pci_set_irq(d, (isr != 0)); +} + +static int rtl8139_RxWrap(RTL8139State *s) +{ + /* wrapping enabled; assume 1.5k more buffer space if size < 65536 */ + return (s->RxConfig & (1 << 7)); +} + +static int rtl8139_receiver_enabled(RTL8139State *s) +{ + return s->bChipCmdState & CmdRxEnb; +} + +static int rtl8139_transmitter_enabled(RTL8139State *s) +{ + return s->bChipCmdState & CmdTxEnb; +} + +static int rtl8139_cp_receiver_enabled(RTL8139State *s) +{ + return s->CpCmd & CPlusRxEnb; +} + +static int rtl8139_cp_transmitter_enabled(RTL8139State *s) +{ + return s->CpCmd & CPlusTxEnb; +} + +static void rtl8139_write_buffer(RTL8139State *s, const void *buf, int size) +{ + PCIDevice *d = PCI_DEVICE(s); + + if (s->RxBufAddr + size > s->RxBufferSize) + { + int wrapped = MOD2(s->RxBufAddr + size, s->RxBufferSize); + + /* write packet data */ + if (wrapped && !(s->RxBufferSize < 65536 && rtl8139_RxWrap(s))) + { + DPRINTF(">>> rx packet wrapped in buffer at %d\n", size - wrapped); + + if (size > wrapped) + { + pci_dma_write(d, s->RxBuf + s->RxBufAddr, + buf, size-wrapped); + } + + /* reset buffer pointer */ + s->RxBufAddr = 0; + + pci_dma_write(d, s->RxBuf + s->RxBufAddr, + buf + (size-wrapped), wrapped); + + s->RxBufAddr = wrapped; + + return; + } + } + + /* non-wrapping path or overwrapping enabled */ + pci_dma_write(d, s->RxBuf + s->RxBufAddr, buf, size); + + s->RxBufAddr += size; +} + +#define MIN_BUF_SIZE 60 +static inline dma_addr_t rtl8139_addr64(uint32_t low, uint32_t high) +{ + return low | ((uint64_t)high << 32); +} + +/* Workaround for buggy guest driver such as linux who allocates rx + * rings after the receiver were enabled. */ +static bool rtl8139_cp_rx_valid(RTL8139State *s) +{ + return !(s->RxRingAddrLO == 0 && s->RxRingAddrHI == 0); +} + +static bool rtl8139_can_receive(NetClientState *nc) +{ + RTL8139State *s = qemu_get_nic_opaque(nc); + int avail; + + /* Receive (drop) packets if card is disabled. */ + if (!s->clock_enabled) { + return true; + } + if (!rtl8139_receiver_enabled(s)) { + return true; + } + + if (rtl8139_cp_receiver_enabled(s) && rtl8139_cp_rx_valid(s)) { + /* ??? Flow control not implemented in c+ mode. + This is a hack to work around slirp deficiencies anyway. */ + return true; + } + + avail = MOD2(s->RxBufferSize + s->RxBufPtr - s->RxBufAddr, + s->RxBufferSize); + return avail == 0 || avail >= 1514 || (s->IntrMask & RxOverflow); +} + +static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t size_, int do_interrupt) +{ + RTL8139State *s = qemu_get_nic_opaque(nc); + PCIDevice *d = PCI_DEVICE(s); + /* size is the length of the buffer passed to the driver */ + size_t size = size_; + const uint8_t *dot1q_buf = NULL; + + uint32_t packet_header = 0; + + uint8_t buf1[MIN_BUF_SIZE + VLAN_HLEN]; + static const uint8_t broadcast_macaddr[6] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + DPRINTF(">>> received len=%zu\n", size); + + /* test if board clock is stopped */ + if (!s->clock_enabled) + { + DPRINTF("stopped ==========================\n"); + return -1; + } + + /* first check if receiver is enabled */ + + if (!rtl8139_receiver_enabled(s)) + { + DPRINTF("receiver disabled ================\n"); + return -1; + } + + /* XXX: check this */ + if (s->RxConfig & AcceptAllPhys) { + /* promiscuous: receive all */ + DPRINTF(">>> packet received in promiscuous mode\n"); + + } else { + if (!memcmp(buf, broadcast_macaddr, 6)) { + /* broadcast address */ + if (!(s->RxConfig & AcceptBroadcast)) + { + DPRINTF(">>> broadcast packet rejected\n"); + + /* update tally counter */ + ++s->tally_counters.RxERR; + + return size; + } + + packet_header |= RxBroadcast; + + DPRINTF(">>> broadcast packet received\n"); + + /* update tally counter */ + ++s->tally_counters.RxOkBrd; + + } else if (buf[0] & 0x01) { + /* multicast */ + if (!(s->RxConfig & AcceptMulticast)) + { + DPRINTF(">>> multicast packet rejected\n"); + + /* update tally counter */ + ++s->tally_counters.RxERR; + + return size; + } + + int mcast_idx = net_crc32(buf, ETH_ALEN) >> 26; + + if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7)))) + { + DPRINTF(">>> multicast address mismatch\n"); + + /* update tally counter */ + ++s->tally_counters.RxERR; + + return size; + } + + packet_header |= RxMulticast; + + DPRINTF(">>> multicast packet received\n"); + + /* update tally counter */ + ++s->tally_counters.RxOkMul; + + } else if (s->phys[0] == buf[0] && + s->phys[1] == buf[1] && + s->phys[2] == buf[2] && + s->phys[3] == buf[3] && + s->phys[4] == buf[4] && + s->phys[5] == buf[5]) { + /* match */ + if (!(s->RxConfig & AcceptMyPhys)) + { + DPRINTF(">>> rejecting physical address matching packet\n"); + + /* update tally counter */ + ++s->tally_counters.RxERR; + + return size; + } + + packet_header |= RxPhysical; + + DPRINTF(">>> physical address matching packet received\n"); + + /* update tally counter */ + ++s->tally_counters.RxOkPhy; + + } else { + + DPRINTF(">>> unknown packet\n"); + + /* update tally counter */ + ++s->tally_counters.RxERR; + + return size; + } + } + + /* if too small buffer, then expand it + * Include some tailroom in case a vlan tag is later removed. */ + if (size < MIN_BUF_SIZE + VLAN_HLEN) { + memcpy(buf1, buf, size); + memset(buf1 + size, 0, MIN_BUF_SIZE + VLAN_HLEN - size); + buf = buf1; + if (size < MIN_BUF_SIZE) { + size = MIN_BUF_SIZE; + } + } + + if (rtl8139_cp_receiver_enabled(s)) + { + if (!rtl8139_cp_rx_valid(s)) { + return size; + } + + DPRINTF("in C+ Rx mode ================\n"); + + /* begin C+ receiver mode */ + +/* w0 ownership flag */ +#define CP_RX_OWN (1<<31) +/* w0 end of ring flag */ +#define CP_RX_EOR (1<<30) +/* w0 bits 0...12 : buffer size */ +#define CP_RX_BUFFER_SIZE_MASK ((1<<13) - 1) +/* w1 tag available flag */ +#define CP_RX_TAVA (1<<16) +/* w1 bits 0...15 : VLAN tag */ +#define CP_RX_VLAN_TAG_MASK ((1<<16) - 1) +/* w2 low 32bit of Rx buffer ptr */ +/* w3 high 32bit of Rx buffer ptr */ + + int descriptor = s->currCPlusRxDesc; + dma_addr_t cplus_rx_ring_desc; + + cplus_rx_ring_desc = rtl8139_addr64(s->RxRingAddrLO, s->RxRingAddrHI); + cplus_rx_ring_desc += 16 * descriptor; + + DPRINTF("+++ C+ mode reading RX descriptor %d from host memory at " + "%08x %08x = "DMA_ADDR_FMT"\n", descriptor, s->RxRingAddrHI, + s->RxRingAddrLO, cplus_rx_ring_desc); + + uint32_t val, rxdw0,rxdw1,rxbufLO,rxbufHI; + + pci_dma_read(d, cplus_rx_ring_desc, &val, 4); + rxdw0 = le32_to_cpu(val); + pci_dma_read(d, cplus_rx_ring_desc+4, &val, 4); + rxdw1 = le32_to_cpu(val); + pci_dma_read(d, cplus_rx_ring_desc+8, &val, 4); + rxbufLO = le32_to_cpu(val); + pci_dma_read(d, cplus_rx_ring_desc+12, &val, 4); + rxbufHI = le32_to_cpu(val); + + DPRINTF("+++ C+ mode RX descriptor %d %08x %08x %08x %08x\n", + descriptor, rxdw0, rxdw1, rxbufLO, rxbufHI); + + if (!(rxdw0 & CP_RX_OWN)) + { + DPRINTF("C+ Rx mode : descriptor %d is owned by host\n", + descriptor); + + s->IntrStatus |= RxOverflow; + ++s->RxMissed; + + /* update tally counter */ + ++s->tally_counters.RxERR; + ++s->tally_counters.MissPkt; + + rtl8139_update_irq(s); + return size_; + } + + uint32_t rx_space = rxdw0 & CP_RX_BUFFER_SIZE_MASK; + + /* write VLAN info to descriptor variables. */ + if (s->CpCmd & CPlusRxVLAN && + lduw_be_p(&buf[ETH_ALEN * 2]) == ETH_P_VLAN) { + dot1q_buf = &buf[ETH_ALEN * 2]; + size -= VLAN_HLEN; + /* if too small buffer, use the tailroom added duing expansion */ + if (size < MIN_BUF_SIZE) { + size = MIN_BUF_SIZE; + } + + rxdw1 &= ~CP_RX_VLAN_TAG_MASK; + /* BE + ~le_to_cpu()~ + cpu_to_le() = BE */ + rxdw1 |= CP_RX_TAVA | lduw_le_p(&dot1q_buf[ETHER_TYPE_LEN]); + + DPRINTF("C+ Rx mode : extracted vlan tag with tci: ""%u\n", + lduw_be_p(&dot1q_buf[ETHER_TYPE_LEN])); + } else { + /* reset VLAN tag flag */ + rxdw1 &= ~CP_RX_TAVA; + } + + /* TODO: scatter the packet over available receive ring descriptors space */ + + if (size+4 > rx_space) + { + DPRINTF("C+ Rx mode : descriptor %d size %d received %zu + 4\n", + descriptor, rx_space, size); + + s->IntrStatus |= RxOverflow; + ++s->RxMissed; + + /* update tally counter */ + ++s->tally_counters.RxERR; + ++s->tally_counters.MissPkt; + + rtl8139_update_irq(s); + return size_; + } + + dma_addr_t rx_addr = rtl8139_addr64(rxbufLO, rxbufHI); + + /* receive/copy to target memory */ + if (dot1q_buf) { + pci_dma_write(d, rx_addr, buf, 2 * ETH_ALEN); + pci_dma_write(d, rx_addr + 2 * ETH_ALEN, + buf + 2 * ETH_ALEN + VLAN_HLEN, + size - 2 * ETH_ALEN); + } else { + pci_dma_write(d, rx_addr, buf, size); + } + + if (s->CpCmd & CPlusRxChkSum) + { + /* do some packet checksumming */ + } + + /* write checksum */ + val = cpu_to_le32(crc32(0, buf, size_)); + pci_dma_write(d, rx_addr+size, (uint8_t *)&val, 4); + +/* first segment of received packet flag */ +#define CP_RX_STATUS_FS (1<<29) +/* last segment of received packet flag */ +#define CP_RX_STATUS_LS (1<<28) +/* multicast packet flag */ +#define CP_RX_STATUS_MAR (1<<26) +/* physical-matching packet flag */ +#define CP_RX_STATUS_PAM (1<<25) +/* broadcast packet flag */ +#define CP_RX_STATUS_BAR (1<<24) +/* runt packet flag */ +#define CP_RX_STATUS_RUNT (1<<19) +/* crc error flag */ +#define CP_RX_STATUS_CRC (1<<18) +/* IP checksum error flag */ +#define CP_RX_STATUS_IPF (1<<15) +/* UDP checksum error flag */ +#define CP_RX_STATUS_UDPF (1<<14) +/* TCP checksum error flag */ +#define CP_RX_STATUS_TCPF (1<<13) + + /* transfer ownership to target */ + rxdw0 &= ~CP_RX_OWN; + + /* set first segment bit */ + rxdw0 |= CP_RX_STATUS_FS; + + /* set last segment bit */ + rxdw0 |= CP_RX_STATUS_LS; + + /* set received packet type flags */ + if (packet_header & RxBroadcast) + rxdw0 |= CP_RX_STATUS_BAR; + if (packet_header & RxMulticast) + rxdw0 |= CP_RX_STATUS_MAR; + if (packet_header & RxPhysical) + rxdw0 |= CP_RX_STATUS_PAM; + + /* set received size */ + rxdw0 &= ~CP_RX_BUFFER_SIZE_MASK; + rxdw0 |= (size+4); + + /* update ring data */ + val = cpu_to_le32(rxdw0); + pci_dma_write(d, cplus_rx_ring_desc, (uint8_t *)&val, 4); + val = cpu_to_le32(rxdw1); + pci_dma_write(d, cplus_rx_ring_desc+4, (uint8_t *)&val, 4); + + /* update tally counter */ + ++s->tally_counters.RxOk; + + /* seek to next Rx descriptor */ + if (rxdw0 & CP_RX_EOR) + { + s->currCPlusRxDesc = 0; + } + else + { + ++s->currCPlusRxDesc; + } + + DPRINTF("done C+ Rx mode ----------------\n"); + + } + else + { + DPRINTF("in ring Rx mode ================\n"); + + /* begin ring receiver mode */ + int avail = MOD2(s->RxBufferSize + s->RxBufPtr - s->RxBufAddr, s->RxBufferSize); + + /* if receiver buffer is empty then avail == 0 */ + +#define RX_ALIGN(x) (((x) + 3) & ~0x3) + + if (avail != 0 && RX_ALIGN(size + 8) >= avail) + { + DPRINTF("rx overflow: rx buffer length %d head 0x%04x " + "read 0x%04x === available 0x%04x need 0x%04zx\n", + s->RxBufferSize, s->RxBufAddr, s->RxBufPtr, avail, size + 8); + + s->IntrStatus |= RxOverflow; + ++s->RxMissed; + rtl8139_update_irq(s); + return 0; + } + + packet_header |= RxStatusOK; + + packet_header |= (((size+4) << 16) & 0xffff0000); + + /* write header */ + uint32_t val = cpu_to_le32(packet_header); + + rtl8139_write_buffer(s, (uint8_t *)&val, 4); + + rtl8139_write_buffer(s, buf, size); + + /* write checksum */ + val = cpu_to_le32(crc32(0, buf, size)); + rtl8139_write_buffer(s, (uint8_t *)&val, 4); + + /* correct buffer write pointer */ + s->RxBufAddr = MOD2(RX_ALIGN(s->RxBufAddr), s->RxBufferSize); + + /* now we can signal we have received something */ + + DPRINTF("received: rx buffer length %d head 0x%04x read 0x%04x\n", + s->RxBufferSize, s->RxBufAddr, s->RxBufPtr); + } + + s->IntrStatus |= RxOK; + + if (do_interrupt) + { + rtl8139_update_irq(s); + } + + return size_; +} + +static ssize_t rtl8139_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + return rtl8139_do_receive(nc, buf, size, 1); +} + +static void rtl8139_reset_rxring(RTL8139State *s, uint32_t bufferSize) +{ + s->RxBufferSize = bufferSize; + s->RxBufPtr = 0; + s->RxBufAddr = 0; +} + +static void rtl8139_reset_phy(RTL8139State *s) +{ + s->BasicModeStatus = 0x7809; + s->BasicModeStatus |= 0x0020; /* autonegotiation completed */ + /* preserve link state */ + s->BasicModeStatus |= qemu_get_queue(s->nic)->link_down ? 0 : 0x04; + + s->NWayAdvert = 0x05e1; /* all modes, full duplex */ + s->NWayLPAR = 0x05e1; /* all modes, full duplex */ + s->NWayExpansion = 0x0001; /* autonegotiation supported */ + + s->CSCR = CSCR_F_LINK_100 | CSCR_HEART_BIT | CSCR_LD; +} + +static void rtl8139_reset(DeviceState *d) +{ + RTL8139State *s = RTL8139(d); + int i; + + /* restore MAC address */ + memcpy(s->phys, s->conf.macaddr.a, 6); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->phys); + + /* reset interrupt mask */ + s->IntrStatus = 0; + s->IntrMask = 0; + + rtl8139_update_irq(s); + + /* mark all status registers as owned by host */ + for (i = 0; i < 4; ++i) + { + s->TxStatus[i] = TxHostOwns; + } + + s->currTxDesc = 0; + s->currCPlusRxDesc = 0; + s->currCPlusTxDesc = 0; + + s->RxRingAddrLO = 0; + s->RxRingAddrHI = 0; + + s->RxBuf = 0; + + rtl8139_reset_rxring(s, 8192); + + /* ACK the reset */ + s->TxConfig = 0; + +#if 0 +// s->TxConfig |= HW_REVID(1, 0, 0, 0, 0, 0, 0); // RTL-8139 HasHltClk + s->clock_enabled = 0; +#else + s->TxConfig |= HW_REVID(1, 1, 1, 0, 1, 1, 0); // RTL-8139C+ HasLWake + s->clock_enabled = 1; +#endif + + s->bChipCmdState = CmdReset; /* RxBufEmpty bit is calculated on read from ChipCmd */; + + /* set initial state data */ + s->Config0 = 0x0; /* No boot ROM */ + s->Config1 = 0xC; /* IO mapped and MEM mapped registers available */ + s->Config3 = 0x1; /* fast back-to-back compatible */ + s->Config5 = 0x0; + + s->CpCmd = 0x0; /* reset C+ mode */ + s->cplus_enabled = 0; + +// s->BasicModeCtrl = 0x3100; // 100Mbps, full duplex, autonegotiation +// s->BasicModeCtrl = 0x2100; // 100Mbps, full duplex + s->BasicModeCtrl = 0x1000; // autonegotiation + + rtl8139_reset_phy(s); + + /* also reset timer and disable timer interrupt */ + s->TCTR = 0; + s->TimerInt = 0; + s->TCTR_base = 0; + rtl8139_set_next_tctr_time(s); + + /* reset tally counters */ + RTL8139TallyCounters_clear(&s->tally_counters); +} + +static void RTL8139TallyCounters_clear(RTL8139TallyCounters* counters) +{ + counters->TxOk = 0; + counters->RxOk = 0; + counters->TxERR = 0; + counters->RxERR = 0; + counters->MissPkt = 0; + counters->FAE = 0; + counters->Tx1Col = 0; + counters->TxMCol = 0; + counters->RxOkPhy = 0; + counters->RxOkBrd = 0; + counters->RxOkMul = 0; + counters->TxAbt = 0; + counters->TxUndrn = 0; +} + +static void RTL8139TallyCounters_dma_write(RTL8139State *s, dma_addr_t tc_addr) +{ + PCIDevice *d = PCI_DEVICE(s); + RTL8139TallyCounters *tally_counters = &s->tally_counters; + uint16_t val16; + uint32_t val32; + uint64_t val64; + + val64 = cpu_to_le64(tally_counters->TxOk); + pci_dma_write(d, tc_addr + 0, (uint8_t *)&val64, 8); + + val64 = cpu_to_le64(tally_counters->RxOk); + pci_dma_write(d, tc_addr + 8, (uint8_t *)&val64, 8); + + val64 = cpu_to_le64(tally_counters->TxERR); + pci_dma_write(d, tc_addr + 16, (uint8_t *)&val64, 8); + + val32 = cpu_to_le32(tally_counters->RxERR); + pci_dma_write(d, tc_addr + 24, (uint8_t *)&val32, 4); + + val16 = cpu_to_le16(tally_counters->MissPkt); + pci_dma_write(d, tc_addr + 28, (uint8_t *)&val16, 2); + + val16 = cpu_to_le16(tally_counters->FAE); + pci_dma_write(d, tc_addr + 30, (uint8_t *)&val16, 2); + + val32 = cpu_to_le32(tally_counters->Tx1Col); + pci_dma_write(d, tc_addr + 32, (uint8_t *)&val32, 4); + + val32 = cpu_to_le32(tally_counters->TxMCol); + pci_dma_write(d, tc_addr + 36, (uint8_t *)&val32, 4); + + val64 = cpu_to_le64(tally_counters->RxOkPhy); + pci_dma_write(d, tc_addr + 40, (uint8_t *)&val64, 8); + + val64 = cpu_to_le64(tally_counters->RxOkBrd); + pci_dma_write(d, tc_addr + 48, (uint8_t *)&val64, 8); + + val32 = cpu_to_le32(tally_counters->RxOkMul); + pci_dma_write(d, tc_addr + 56, (uint8_t *)&val32, 4); + + val16 = cpu_to_le16(tally_counters->TxAbt); + pci_dma_write(d, tc_addr + 60, (uint8_t *)&val16, 2); + + val16 = cpu_to_le16(tally_counters->TxUndrn); + pci_dma_write(d, tc_addr + 62, (uint8_t *)&val16, 2); +} + +static void rtl8139_ChipCmd_write(RTL8139State *s, uint32_t val) +{ + DeviceState *d = DEVICE(s); + + val &= 0xff; + + DPRINTF("ChipCmd write val=0x%08x\n", val); + + if (val & CmdReset) + { + DPRINTF("ChipCmd reset\n"); + rtl8139_reset(d); + } + if (val & CmdRxEnb) + { + DPRINTF("ChipCmd enable receiver\n"); + + s->currCPlusRxDesc = 0; + } + if (val & CmdTxEnb) + { + DPRINTF("ChipCmd enable transmitter\n"); + + s->currCPlusTxDesc = 0; + } + + /* mask unwritable bits */ + val = SET_MASKED(val, 0xe3, s->bChipCmdState); + + /* Deassert reset pin before next read */ + val &= ~CmdReset; + + s->bChipCmdState = val; +} + +static int rtl8139_RxBufferEmpty(RTL8139State *s) +{ + int unread = MOD2(s->RxBufferSize + s->RxBufAddr - s->RxBufPtr, s->RxBufferSize); + + if (unread != 0) + { + DPRINTF("receiver buffer data available 0x%04x\n", unread); + return 0; + } + + DPRINTF("receiver buffer is empty\n"); + + return 1; +} + +static uint32_t rtl8139_ChipCmd_read(RTL8139State *s) +{ + uint32_t ret = s->bChipCmdState; + + if (rtl8139_RxBufferEmpty(s)) + ret |= RxBufEmpty; + + DPRINTF("ChipCmd read val=0x%04x\n", ret); + + return ret; +} + +static void rtl8139_CpCmd_write(RTL8139State *s, uint32_t val) +{ + val &= 0xffff; + + DPRINTF("C+ command register write(w) val=0x%04x\n", val); + + s->cplus_enabled = 1; + + /* mask unwritable bits */ + val = SET_MASKED(val, 0xff84, s->CpCmd); + + s->CpCmd = val; +} + +static uint32_t rtl8139_CpCmd_read(RTL8139State *s) +{ + uint32_t ret = s->CpCmd; + + DPRINTF("C+ command register read(w) val=0x%04x\n", ret); + + return ret; +} + +static void rtl8139_IntrMitigate_write(RTL8139State *s, uint32_t val) +{ + DPRINTF("C+ IntrMitigate register write(w) val=0x%04x\n", val); +} + +static uint32_t rtl8139_IntrMitigate_read(RTL8139State *s) +{ + uint32_t ret = 0; + + DPRINTF("C+ IntrMitigate register read(w) val=0x%04x\n", ret); + + return ret; +} + +static int rtl8139_config_writable(RTL8139State *s) +{ + if ((s->Cfg9346 & Chip9346_op_mask) == Cfg9346_ConfigWrite) + { + return 1; + } + + DPRINTF("Configuration registers are write-protected\n"); + + return 0; +} + +static void rtl8139_BasicModeCtrl_write(RTL8139State *s, uint32_t val) +{ + val &= 0xffff; + + DPRINTF("BasicModeCtrl register write(w) val=0x%04x\n", val); + + /* mask unwritable bits */ + uint32_t mask = 0xccff; + + if (1 || !rtl8139_config_writable(s)) + { + /* Speed setting and autonegotiation enable bits are read-only */ + mask |= 0x3000; + /* Duplex mode setting is read-only */ + mask |= 0x0100; + } + + if (val & 0x8000) { + /* Reset PHY */ + rtl8139_reset_phy(s); + } + + val = SET_MASKED(val, mask, s->BasicModeCtrl); + + s->BasicModeCtrl = val; +} + +static uint32_t rtl8139_BasicModeCtrl_read(RTL8139State *s) +{ + uint32_t ret = s->BasicModeCtrl; + + DPRINTF("BasicModeCtrl register read(w) val=0x%04x\n", ret); + + return ret; +} + +static void rtl8139_BasicModeStatus_write(RTL8139State *s, uint32_t val) +{ + val &= 0xffff; + + DPRINTF("BasicModeStatus register write(w) val=0x%04x\n", val); + + /* mask unwritable bits */ + val = SET_MASKED(val, 0xff3f, s->BasicModeStatus); + + s->BasicModeStatus = val; +} + +static uint32_t rtl8139_BasicModeStatus_read(RTL8139State *s) +{ + uint32_t ret = s->BasicModeStatus; + + DPRINTF("BasicModeStatus register read(w) val=0x%04x\n", ret); + + return ret; +} + +static void rtl8139_Cfg9346_write(RTL8139State *s, uint32_t val) +{ + DeviceState *d = DEVICE(s); + + val &= 0xff; + + DPRINTF("Cfg9346 write val=0x%02x\n", val); + + /* mask unwritable bits */ + val = SET_MASKED(val, 0x31, s->Cfg9346); + + uint32_t opmode = val & 0xc0; + uint32_t eeprom_val = val & 0xf; + + if (opmode == 0x80) { + /* eeprom access */ + int eecs = (eeprom_val & 0x08)?1:0; + int eesk = (eeprom_val & 0x04)?1:0; + int eedi = (eeprom_val & 0x02)?1:0; + prom9346_set_wire(s, eecs, eesk, eedi); + } else if (opmode == 0x40) { + /* Reset. */ + val = 0; + rtl8139_reset(d); + } + + s->Cfg9346 = val; +} + +static uint32_t rtl8139_Cfg9346_read(RTL8139State *s) +{ + uint32_t ret = s->Cfg9346; + + uint32_t opmode = ret & 0xc0; + + if (opmode == 0x80) + { + /* eeprom access */ + int eedo = prom9346_get_wire(s); + if (eedo) + { + ret |= 0x01; + } + else + { + ret &= ~0x01; + } + } + + DPRINTF("Cfg9346 read val=0x%02x\n", ret); + + return ret; +} + +static void rtl8139_Config0_write(RTL8139State *s, uint32_t val) +{ + val &= 0xff; + + DPRINTF("Config0 write val=0x%02x\n", val); + + if (!rtl8139_config_writable(s)) { + return; + } + + /* mask unwritable bits */ + val = SET_MASKED(val, 0xf8, s->Config0); + + s->Config0 = val; +} + +static uint32_t rtl8139_Config0_read(RTL8139State *s) +{ + uint32_t ret = s->Config0; + + DPRINTF("Config0 read val=0x%02x\n", ret); + + return ret; +} + +static void rtl8139_Config1_write(RTL8139State *s, uint32_t val) +{ + val &= 0xff; + + DPRINTF("Config1 write val=0x%02x\n", val); + + if (!rtl8139_config_writable(s)) { + return; + } + + /* mask unwritable bits */ + val = SET_MASKED(val, 0xC, s->Config1); + + s->Config1 = val; +} + +static uint32_t rtl8139_Config1_read(RTL8139State *s) +{ + uint32_t ret = s->Config1; + + DPRINTF("Config1 read val=0x%02x\n", ret); + + return ret; +} + +static void rtl8139_Config3_write(RTL8139State *s, uint32_t val) +{ + val &= 0xff; + + DPRINTF("Config3 write val=0x%02x\n", val); + + if (!rtl8139_config_writable(s)) { + return; + } + + /* mask unwritable bits */ + val = SET_MASKED(val, 0x8F, s->Config3); + + s->Config3 = val; +} + +static uint32_t rtl8139_Config3_read(RTL8139State *s) +{ + uint32_t ret = s->Config3; + + DPRINTF("Config3 read val=0x%02x\n", ret); + + return ret; +} + +static void rtl8139_Config4_write(RTL8139State *s, uint32_t val) +{ + val &= 0xff; + + DPRINTF("Config4 write val=0x%02x\n", val); + + if (!rtl8139_config_writable(s)) { + return; + } + + /* mask unwritable bits */ + val = SET_MASKED(val, 0x0a, s->Config4); + + s->Config4 = val; +} + +static uint32_t rtl8139_Config4_read(RTL8139State *s) +{ + uint32_t ret = s->Config4; + + DPRINTF("Config4 read val=0x%02x\n", ret); + + return ret; +} + +static void rtl8139_Config5_write(RTL8139State *s, uint32_t val) +{ + val &= 0xff; + + DPRINTF("Config5 write val=0x%02x\n", val); + + /* mask unwritable bits */ + val = SET_MASKED(val, 0x80, s->Config5); + + s->Config5 = val; +} + +static uint32_t rtl8139_Config5_read(RTL8139State *s) +{ + uint32_t ret = s->Config5; + + DPRINTF("Config5 read val=0x%02x\n", ret); + + return ret; +} + +static void rtl8139_TxConfig_write(RTL8139State *s, uint32_t val) +{ + if (!rtl8139_transmitter_enabled(s)) + { + DPRINTF("transmitter disabled; no TxConfig write val=0x%08x\n", val); + return; + } + + DPRINTF("TxConfig write val=0x%08x\n", val); + + val = SET_MASKED(val, TxVersionMask | 0x8070f80f, s->TxConfig); + + s->TxConfig = val; +} + +static void rtl8139_TxConfig_writeb(RTL8139State *s, uint32_t val) +{ + DPRINTF("RTL8139C TxConfig via write(b) val=0x%02x\n", val); + + uint32_t tc = s->TxConfig; + tc &= 0xFFFFFF00; + tc |= (val & 0x000000FF); + rtl8139_TxConfig_write(s, tc); +} + +static uint32_t rtl8139_TxConfig_read(RTL8139State *s) +{ + uint32_t ret = s->TxConfig; + + DPRINTF("TxConfig read val=0x%04x\n", ret); + + return ret; +} + +static void rtl8139_RxConfig_write(RTL8139State *s, uint32_t val) +{ + DPRINTF("RxConfig write val=0x%08x\n", val); + + /* mask unwritable bits */ + val = SET_MASKED(val, 0xf0fc0040, s->RxConfig); + + s->RxConfig = val; + + /* reset buffer size and read/write pointers */ + rtl8139_reset_rxring(s, 8192 << ((s->RxConfig >> 11) & 0x3)); + + DPRINTF("RxConfig write reset buffer size to %d\n", s->RxBufferSize); +} + +static uint32_t rtl8139_RxConfig_read(RTL8139State *s) +{ + uint32_t ret = s->RxConfig; + + DPRINTF("RxConfig read val=0x%08x\n", ret); + + return ret; +} + +static void rtl8139_transfer_frame(RTL8139State *s, uint8_t *buf, int size, + int do_interrupt, const uint8_t *dot1q_buf) +{ + struct iovec *iov = NULL; + struct iovec vlan_iov[3]; + + if (!size) + { + DPRINTF("+++ empty ethernet frame\n"); + return; + } + + if (dot1q_buf && size >= ETH_ALEN * 2) { + iov = (struct iovec[3]) { + { .iov_base = buf, .iov_len = ETH_ALEN * 2 }, + { .iov_base = (void *) dot1q_buf, .iov_len = VLAN_HLEN }, + { .iov_base = buf + ETH_ALEN * 2, + .iov_len = size - ETH_ALEN * 2 }, + }; + + memcpy(vlan_iov, iov, sizeof(vlan_iov)); + iov = vlan_iov; + } + + if (TxLoopBack == (s->TxConfig & TxLoopBack)) + { + size_t buf2_size; + uint8_t *buf2; + + if (iov) { + buf2_size = iov_size(iov, 3); + buf2 = g_malloc(buf2_size); + iov_to_buf(iov, 3, 0, buf2, buf2_size); + buf = buf2; + } + + DPRINTF("+++ transmit loopback mode\n"); + qemu_receive_packet(qemu_get_queue(s->nic), buf, size); + + if (iov) { + g_free(buf2); + } + } + else + { + if (iov) { + qemu_sendv_packet(qemu_get_queue(s->nic), iov, 3); + } else { + qemu_send_packet(qemu_get_queue(s->nic), buf, size); + } + } +} + +static int rtl8139_transmit_one(RTL8139State *s, int descriptor) +{ + if (!rtl8139_transmitter_enabled(s)) + { + DPRINTF("+++ cannot transmit from descriptor %d: transmitter " + "disabled\n", descriptor); + return 0; + } + + if (s->TxStatus[descriptor] & TxHostOwns) + { + DPRINTF("+++ cannot transmit from descriptor %d: owned by host " + "(%08x)\n", descriptor, s->TxStatus[descriptor]); + return 0; + } + + DPRINTF("+++ transmitting from descriptor %d\n", descriptor); + + PCIDevice *d = PCI_DEVICE(s); + int txsize = s->TxStatus[descriptor] & 0x1fff; + uint8_t txbuffer[0x2000]; + + DPRINTF("+++ transmit reading %d bytes from host memory at 0x%08x\n", + txsize, s->TxAddr[descriptor]); + + pci_dma_read(d, s->TxAddr[descriptor], txbuffer, txsize); + + /* Mark descriptor as transferred */ + s->TxStatus[descriptor] |= TxHostOwns; + s->TxStatus[descriptor] |= TxStatOK; + + rtl8139_transfer_frame(s, txbuffer, txsize, 0, NULL); + + DPRINTF("+++ transmitted %d bytes from descriptor %d\n", txsize, + descriptor); + + /* update interrupt */ + s->IntrStatus |= TxOK; + rtl8139_update_irq(s); + + return 1; +} + +#define TCP_HEADER_CLEAR_FLAGS(tcp, off) ((tcp)->th_offset_flags &= cpu_to_be16(~TCP_FLAGS_ONLY(off))) + +/* produces ones' complement sum of data */ +static uint16_t ones_complement_sum(uint8_t *data, size_t len) +{ + uint32_t result = 0; + + for (; len > 1; data+=2, len-=2) + { + result += *(uint16_t*)data; + } + + /* add the remainder byte */ + if (len) + { + uint8_t odd[2] = {*data, 0}; + result += *(uint16_t*)odd; + } + + while (result>>16) + result = (result & 0xffff) + (result >> 16); + + return result; +} + +static uint16_t ip_checksum(void *data, size_t len) +{ + return ~ones_complement_sum((uint8_t*)data, len); +} + +static int rtl8139_cplus_transmit_one(RTL8139State *s) +{ + if (!rtl8139_transmitter_enabled(s)) + { + DPRINTF("+++ C+ mode: transmitter disabled\n"); + return 0; + } + + if (!rtl8139_cp_transmitter_enabled(s)) + { + DPRINTF("+++ C+ mode: C+ transmitter disabled\n"); + return 0 ; + } + + PCIDevice *d = PCI_DEVICE(s); + int descriptor = s->currCPlusTxDesc; + + dma_addr_t cplus_tx_ring_desc = rtl8139_addr64(s->TxAddr[0], s->TxAddr[1]); + + /* Normal priority ring */ + cplus_tx_ring_desc += 16 * descriptor; + + DPRINTF("+++ C+ mode reading TX descriptor %d from host memory at " + "%08x %08x = 0x"DMA_ADDR_FMT"\n", descriptor, s->TxAddr[1], + s->TxAddr[0], cplus_tx_ring_desc); + + uint32_t val, txdw0,txdw1,txbufLO,txbufHI; + + pci_dma_read(d, cplus_tx_ring_desc, (uint8_t *)&val, 4); + txdw0 = le32_to_cpu(val); + pci_dma_read(d, cplus_tx_ring_desc+4, (uint8_t *)&val, 4); + txdw1 = le32_to_cpu(val); + pci_dma_read(d, cplus_tx_ring_desc+8, (uint8_t *)&val, 4); + txbufLO = le32_to_cpu(val); + pci_dma_read(d, cplus_tx_ring_desc+12, (uint8_t *)&val, 4); + txbufHI = le32_to_cpu(val); + + DPRINTF("+++ C+ mode TX descriptor %d %08x %08x %08x %08x\n", descriptor, + txdw0, txdw1, txbufLO, txbufHI); + +/* w0 ownership flag */ +#define CP_TX_OWN (1<<31) +/* w0 end of ring flag */ +#define CP_TX_EOR (1<<30) +/* first segment of received packet flag */ +#define CP_TX_FS (1<<29) +/* last segment of received packet flag */ +#define CP_TX_LS (1<<28) +/* large send packet flag */ +#define CP_TX_LGSEN (1<<27) +/* large send MSS mask, bits 16...25 */ +#define CP_TC_LGSEN_MSS_MASK ((1 << 12) - 1) + +/* IP checksum offload flag */ +#define CP_TX_IPCS (1<<18) +/* UDP checksum offload flag */ +#define CP_TX_UDPCS (1<<17) +/* TCP checksum offload flag */ +#define CP_TX_TCPCS (1<<16) + +/* w0 bits 0...15 : buffer size */ +#define CP_TX_BUFFER_SIZE (1<<16) +#define CP_TX_BUFFER_SIZE_MASK (CP_TX_BUFFER_SIZE - 1) +/* w1 add tag flag */ +#define CP_TX_TAGC (1<<17) +/* w1 bits 0...15 : VLAN tag (big endian) */ +#define CP_TX_VLAN_TAG_MASK ((1<<16) - 1) +/* w2 low 32bit of Rx buffer ptr */ +/* w3 high 32bit of Rx buffer ptr */ + +/* set after transmission */ +/* FIFO underrun flag */ +#define CP_TX_STATUS_UNF (1<<25) +/* transmit error summary flag, valid if set any of three below */ +#define CP_TX_STATUS_TES (1<<23) +/* out-of-window collision flag */ +#define CP_TX_STATUS_OWC (1<<22) +/* link failure flag */ +#define CP_TX_STATUS_LNKF (1<<21) +/* excessive collisions flag */ +#define CP_TX_STATUS_EXC (1<<20) + + if (!(txdw0 & CP_TX_OWN)) + { + DPRINTF("C+ Tx mode : descriptor %d is owned by host\n", descriptor); + return 0 ; + } + + DPRINTF("+++ C+ Tx mode : transmitting from descriptor %d\n", descriptor); + + if (txdw0 & CP_TX_FS) + { + DPRINTF("+++ C+ Tx mode : descriptor %d is first segment " + "descriptor\n", descriptor); + + /* reset internal buffer offset */ + s->cplus_txbuffer_offset = 0; + } + + int txsize = txdw0 & CP_TX_BUFFER_SIZE_MASK; + dma_addr_t tx_addr = rtl8139_addr64(txbufLO, txbufHI); + + /* make sure we have enough space to assemble the packet */ + if (!s->cplus_txbuffer) + { + s->cplus_txbuffer_len = CP_TX_BUFFER_SIZE; + s->cplus_txbuffer = g_malloc(s->cplus_txbuffer_len); + s->cplus_txbuffer_offset = 0; + + DPRINTF("+++ C+ mode transmission buffer allocated space %d\n", + s->cplus_txbuffer_len); + } + + if (s->cplus_txbuffer_offset + txsize >= s->cplus_txbuffer_len) + { + /* The spec didn't tell the maximum size, stick to CP_TX_BUFFER_SIZE */ + txsize = s->cplus_txbuffer_len - s->cplus_txbuffer_offset; + DPRINTF("+++ C+ mode transmission buffer overrun, truncated descriptor" + "length to %d\n", txsize); + } + + /* append more data to the packet */ + + DPRINTF("+++ C+ mode transmit reading %d bytes from host memory at " + DMA_ADDR_FMT" to offset %d\n", txsize, tx_addr, + s->cplus_txbuffer_offset); + + pci_dma_read(d, tx_addr, + s->cplus_txbuffer + s->cplus_txbuffer_offset, txsize); + s->cplus_txbuffer_offset += txsize; + + /* seek to next Rx descriptor */ + if (txdw0 & CP_TX_EOR) + { + s->currCPlusTxDesc = 0; + } + else + { + ++s->currCPlusTxDesc; + if (s->currCPlusTxDesc >= 64) + s->currCPlusTxDesc = 0; + } + + /* transfer ownership to target */ + txdw0 &= ~CP_TX_OWN; + + /* reset error indicator bits */ + txdw0 &= ~CP_TX_STATUS_UNF; + txdw0 &= ~CP_TX_STATUS_TES; + txdw0 &= ~CP_TX_STATUS_OWC; + txdw0 &= ~CP_TX_STATUS_LNKF; + txdw0 &= ~CP_TX_STATUS_EXC; + + /* update ring data */ + val = cpu_to_le32(txdw0); + pci_dma_write(d, cplus_tx_ring_desc, (uint8_t *)&val, 4); + + /* Now decide if descriptor being processed is holding the last segment of packet */ + if (txdw0 & CP_TX_LS) + { + uint8_t dot1q_buffer_space[VLAN_HLEN]; + uint16_t *dot1q_buffer; + + DPRINTF("+++ C+ Tx mode : descriptor %d is last segment descriptor\n", + descriptor); + + /* can transfer fully assembled packet */ + + uint8_t *saved_buffer = s->cplus_txbuffer; + int saved_size = s->cplus_txbuffer_offset; + int saved_buffer_len = s->cplus_txbuffer_len; + + /* create vlan tag */ + if (txdw1 & CP_TX_TAGC) { + /* the vlan tag is in BE byte order in the descriptor + * BE + le_to_cpu() + ~swap()~ = cpu */ + DPRINTF("+++ C+ Tx mode : inserting vlan tag with ""tci: %u\n", + bswap16(txdw1 & CP_TX_VLAN_TAG_MASK)); + + dot1q_buffer = (uint16_t *) dot1q_buffer_space; + dot1q_buffer[0] = cpu_to_be16(ETH_P_VLAN); + /* BE + le_to_cpu() + ~cpu_to_le()~ = BE */ + dot1q_buffer[1] = cpu_to_le16(txdw1 & CP_TX_VLAN_TAG_MASK); + } else { + dot1q_buffer = NULL; + } + + /* reset the card space to protect from recursive call */ + s->cplus_txbuffer = NULL; + s->cplus_txbuffer_offset = 0; + s->cplus_txbuffer_len = 0; + + if (txdw0 & (CP_TX_IPCS | CP_TX_UDPCS | CP_TX_TCPCS | CP_TX_LGSEN)) + { + DPRINTF("+++ C+ mode offloaded task checksum\n"); + + /* Large enough for Ethernet and IP headers? */ + if (saved_size < ETH_HLEN + sizeof(struct ip_header)) { + goto skip_offload; + } + + /* ip packet header */ + struct ip_header *ip = NULL; + int hlen = 0; + uint8_t ip_protocol = 0; + uint16_t ip_data_len = 0; + + uint8_t *eth_payload_data = NULL; + size_t eth_payload_len = 0; + + int proto = be16_to_cpu(*(uint16_t *)(saved_buffer + 12)); + if (proto != ETH_P_IP) + { + goto skip_offload; + } + + DPRINTF("+++ C+ mode has IP packet\n"); + + /* Note on memory alignment: eth_payload_data is 16-bit aligned + * since saved_buffer is allocated with g_malloc() and ETH_HLEN is + * even. 32-bit accesses must use ldl/stl wrappers to avoid + * unaligned accesses. + */ + eth_payload_data = saved_buffer + ETH_HLEN; + eth_payload_len = saved_size - ETH_HLEN; + + ip = (struct ip_header*)eth_payload_data; + + if (IP_HEADER_VERSION(ip) != IP_HEADER_VERSION_4) { + DPRINTF("+++ C+ mode packet has bad IP version %d " + "expected %d\n", IP_HEADER_VERSION(ip), + IP_HEADER_VERSION_4); + goto skip_offload; + } + + hlen = IP_HDR_GET_LEN(ip); + if (hlen < sizeof(struct ip_header) || hlen > eth_payload_len) { + goto skip_offload; + } + + ip_protocol = ip->ip_p; + + ip_data_len = be16_to_cpu(ip->ip_len); + if (ip_data_len < hlen || ip_data_len > eth_payload_len) { + goto skip_offload; + } + ip_data_len -= hlen; + + if (txdw0 & CP_TX_IPCS) + { + DPRINTF("+++ C+ mode need IP checksum\n"); + + ip->ip_sum = 0; + ip->ip_sum = ip_checksum(ip, hlen); + DPRINTF("+++ C+ mode IP header len=%d checksum=%04x\n", + hlen, ip->ip_sum); + } + + if ((txdw0 & CP_TX_LGSEN) && ip_protocol == IP_PROTO_TCP) + { + /* Large enough for the TCP header? */ + if (ip_data_len < sizeof(tcp_header)) { + goto skip_offload; + } + + int large_send_mss = (txdw0 >> 16) & CP_TC_LGSEN_MSS_MASK; + + DPRINTF("+++ C+ mode offloaded task TSO MTU=%d IP data %d " + "frame data %d specified MSS=%d\n", ETH_MTU, + ip_data_len, saved_size - ETH_HLEN, large_send_mss); + + int tcp_send_offset = 0; + int send_count = 0; + + /* maximum IP header length is 60 bytes */ + uint8_t saved_ip_header[60]; + + /* save IP header template; data area is used in tcp checksum calculation */ + memcpy(saved_ip_header, eth_payload_data, hlen); + + /* a placeholder for checksum calculation routine in tcp case */ + uint8_t *data_to_checksum = eth_payload_data + hlen - 12; + // size_t data_to_checksum_len = eth_payload_len - hlen + 12; + + /* pointer to TCP header */ + tcp_header *p_tcp_hdr = (tcp_header*)(eth_payload_data + hlen); + + int tcp_hlen = TCP_HEADER_DATA_OFFSET(p_tcp_hdr); + + /* Invalid TCP data offset? */ + if (tcp_hlen < sizeof(tcp_header) || tcp_hlen > ip_data_len) { + goto skip_offload; + } + + /* ETH_MTU = ip header len + tcp header len + payload */ + int tcp_data_len = ip_data_len - tcp_hlen; + int tcp_chunk_size = ETH_MTU - hlen - tcp_hlen; + + DPRINTF("+++ C+ mode TSO IP data len %d TCP hlen %d TCP " + "data len %d TCP chunk size %d\n", ip_data_len, + tcp_hlen, tcp_data_len, tcp_chunk_size); + + /* note the cycle below overwrites IP header data, + but restores it from saved_ip_header before sending packet */ + + int is_last_frame = 0; + + for (tcp_send_offset = 0; tcp_send_offset < tcp_data_len; tcp_send_offset += tcp_chunk_size) + { + uint16_t chunk_size = tcp_chunk_size; + + /* check if this is the last frame */ + if (tcp_send_offset + tcp_chunk_size >= tcp_data_len) + { + is_last_frame = 1; + chunk_size = tcp_data_len - tcp_send_offset; + } + + DPRINTF("+++ C+ mode TSO TCP seqno %08x\n", + ldl_be_p(&p_tcp_hdr->th_seq)); + + /* add 4 TCP pseudoheader fields */ + /* copy IP source and destination fields */ + memcpy(data_to_checksum, saved_ip_header + 12, 8); + + DPRINTF("+++ C+ mode TSO calculating TCP checksum for " + "packet with %d bytes data\n", tcp_hlen + + chunk_size); + + if (tcp_send_offset) + { + memcpy((uint8_t*)p_tcp_hdr + tcp_hlen, (uint8_t*)p_tcp_hdr + tcp_hlen + tcp_send_offset, chunk_size); + } + + /* keep PUSH and FIN flags only for the last frame */ + if (!is_last_frame) + { + TCP_HEADER_CLEAR_FLAGS(p_tcp_hdr, TH_PUSH | TH_FIN); + } + + /* recalculate TCP checksum */ + ip_pseudo_header *p_tcpip_hdr = (ip_pseudo_header *)data_to_checksum; + p_tcpip_hdr->zeros = 0; + p_tcpip_hdr->ip_proto = IP_PROTO_TCP; + p_tcpip_hdr->ip_payload = cpu_to_be16(tcp_hlen + chunk_size); + + p_tcp_hdr->th_sum = 0; + + int tcp_checksum = ip_checksum(data_to_checksum, tcp_hlen + chunk_size + 12); + DPRINTF("+++ C+ mode TSO TCP checksum %04x\n", + tcp_checksum); + + p_tcp_hdr->th_sum = tcp_checksum; + + /* restore IP header */ + memcpy(eth_payload_data, saved_ip_header, hlen); + + /* set IP data length and recalculate IP checksum */ + ip->ip_len = cpu_to_be16(hlen + tcp_hlen + chunk_size); + + /* increment IP id for subsequent frames */ + ip->ip_id = cpu_to_be16(tcp_send_offset/tcp_chunk_size + be16_to_cpu(ip->ip_id)); + + ip->ip_sum = 0; + ip->ip_sum = ip_checksum(eth_payload_data, hlen); + DPRINTF("+++ C+ mode TSO IP header len=%d " + "checksum=%04x\n", hlen, ip->ip_sum); + + int tso_send_size = ETH_HLEN + hlen + tcp_hlen + chunk_size; + DPRINTF("+++ C+ mode TSO transferring packet size " + "%d\n", tso_send_size); + rtl8139_transfer_frame(s, saved_buffer, tso_send_size, + 0, (uint8_t *) dot1q_buffer); + + /* add transferred count to TCP sequence number */ + stl_be_p(&p_tcp_hdr->th_seq, + chunk_size + ldl_be_p(&p_tcp_hdr->th_seq)); + ++send_count; + } + + /* Stop sending this frame */ + saved_size = 0; + } + else if (txdw0 & (CP_TX_TCPCS|CP_TX_UDPCS)) + { + DPRINTF("+++ C+ mode need TCP or UDP checksum\n"); + + /* maximum IP header length is 60 bytes */ + uint8_t saved_ip_header[60]; + memcpy(saved_ip_header, eth_payload_data, hlen); + + uint8_t *data_to_checksum = eth_payload_data + hlen - 12; + // size_t data_to_checksum_len = eth_payload_len - hlen + 12; + + /* add 4 TCP pseudoheader fields */ + /* copy IP source and destination fields */ + memcpy(data_to_checksum, saved_ip_header + 12, 8); + + if ((txdw0 & CP_TX_TCPCS) && ip_protocol == IP_PROTO_TCP) + { + DPRINTF("+++ C+ mode calculating TCP checksum for " + "packet with %d bytes data\n", ip_data_len); + + ip_pseudo_header *p_tcpip_hdr = (ip_pseudo_header *)data_to_checksum; + p_tcpip_hdr->zeros = 0; + p_tcpip_hdr->ip_proto = IP_PROTO_TCP; + p_tcpip_hdr->ip_payload = cpu_to_be16(ip_data_len); + + tcp_header* p_tcp_hdr = (tcp_header *) (data_to_checksum+12); + + p_tcp_hdr->th_sum = 0; + + int tcp_checksum = ip_checksum(data_to_checksum, ip_data_len + 12); + DPRINTF("+++ C+ mode TCP checksum %04x\n", + tcp_checksum); + + p_tcp_hdr->th_sum = tcp_checksum; + } + else if ((txdw0 & CP_TX_UDPCS) && ip_protocol == IP_PROTO_UDP) + { + DPRINTF("+++ C+ mode calculating UDP checksum for " + "packet with %d bytes data\n", ip_data_len); + + ip_pseudo_header *p_udpip_hdr = (ip_pseudo_header *)data_to_checksum; + p_udpip_hdr->zeros = 0; + p_udpip_hdr->ip_proto = IP_PROTO_UDP; + p_udpip_hdr->ip_payload = cpu_to_be16(ip_data_len); + + udp_header *p_udp_hdr = (udp_header *) (data_to_checksum+12); + + p_udp_hdr->uh_sum = 0; + + int udp_checksum = ip_checksum(data_to_checksum, ip_data_len + 12); + DPRINTF("+++ C+ mode UDP checksum %04x\n", + udp_checksum); + + p_udp_hdr->uh_sum = udp_checksum; + } + + /* restore IP header */ + memcpy(eth_payload_data, saved_ip_header, hlen); + } + } + +skip_offload: + /* update tally counter */ + ++s->tally_counters.TxOk; + + DPRINTF("+++ C+ mode transmitting %d bytes packet\n", saved_size); + + rtl8139_transfer_frame(s, saved_buffer, saved_size, 1, + (uint8_t *) dot1q_buffer); + + /* restore card space if there was no recursion and reset offset */ + if (!s->cplus_txbuffer) + { + s->cplus_txbuffer = saved_buffer; + s->cplus_txbuffer_len = saved_buffer_len; + s->cplus_txbuffer_offset = 0; + } + else + { + g_free(saved_buffer); + } + } + else + { + DPRINTF("+++ C+ mode transmission continue to next descriptor\n"); + } + + return 1; +} + +static void rtl8139_cplus_transmit(RTL8139State *s) +{ + int txcount = 0; + + while (txcount < 64 && rtl8139_cplus_transmit_one(s)) + { + ++txcount; + } + + /* Mark transfer completed */ + if (!txcount) + { + DPRINTF("C+ mode : transmitter queue stalled, current TxDesc = %d\n", + s->currCPlusTxDesc); + } + else + { + /* update interrupt status */ + s->IntrStatus |= TxOK; + rtl8139_update_irq(s); + } +} + +static void rtl8139_transmit(RTL8139State *s) +{ + int descriptor = s->currTxDesc, txcount = 0; + + /*while*/ + if (rtl8139_transmit_one(s, descriptor)) + { + ++s->currTxDesc; + s->currTxDesc %= 4; + ++txcount; + } + + /* Mark transfer completed */ + if (!txcount) + { + DPRINTF("transmitter queue stalled, current TxDesc = %d\n", + s->currTxDesc); + } +} + +static void rtl8139_TxStatus_write(RTL8139State *s, uint32_t txRegOffset, uint32_t val) +{ + + int descriptor = txRegOffset/4; + + /* handle C+ transmit mode register configuration */ + + if (s->cplus_enabled) + { + DPRINTF("RTL8139C+ DTCCR write offset=0x%x val=0x%08x " + "descriptor=%d\n", txRegOffset, val, descriptor); + + /* handle Dump Tally Counters command */ + s->TxStatus[descriptor] = val; + + if (descriptor == 0 && (val & 0x8)) + { + hwaddr tc_addr = rtl8139_addr64(s->TxStatus[0] & ~0x3f, s->TxStatus[1]); + + /* dump tally counters to specified memory location */ + RTL8139TallyCounters_dma_write(s, tc_addr); + + /* mark dump completed */ + s->TxStatus[0] &= ~0x8; + } + + return; + } + + DPRINTF("TxStatus write offset=0x%x val=0x%08x descriptor=%d\n", + txRegOffset, val, descriptor); + + /* mask only reserved bits */ + val &= ~0xff00c000; /* these bits are reset on write */ + val = SET_MASKED(val, 0x00c00000, s->TxStatus[descriptor]); + + s->TxStatus[descriptor] = val; + + /* attempt to start transmission */ + rtl8139_transmit(s); +} + +static uint32_t rtl8139_TxStatus_TxAddr_read(RTL8139State *s, uint32_t regs[], + uint32_t base, uint8_t addr, + int size) +{ + uint32_t reg = (addr - base) / 4; + uint32_t offset = addr & 0x3; + uint32_t ret = 0; + + if (addr & (size - 1)) { + DPRINTF("not implemented read for TxStatus/TxAddr " + "addr=0x%x size=0x%x\n", addr, size); + return ret; + } + + switch (size) { + case 1: /* fall through */ + case 2: /* fall through */ + case 4: + ret = (regs[reg] >> offset * 8) & (((uint64_t)1 << (size * 8)) - 1); + DPRINTF("TxStatus/TxAddr[%d] read addr=0x%x size=0x%x val=0x%08x\n", + reg, addr, size, ret); + break; + default: + DPRINTF("unsupported size 0x%x of TxStatus/TxAddr reading\n", size); + break; + } + + return ret; +} + +static uint16_t rtl8139_TSAD_read(RTL8139State *s) +{ + uint16_t ret = 0; + + /* Simulate TSAD, it is read only anyway */ + + ret = ((s->TxStatus[3] & TxStatOK )?TSAD_TOK3:0) + |((s->TxStatus[2] & TxStatOK )?TSAD_TOK2:0) + |((s->TxStatus[1] & TxStatOK )?TSAD_TOK1:0) + |((s->TxStatus[0] & TxStatOK )?TSAD_TOK0:0) + + |((s->TxStatus[3] & TxUnderrun)?TSAD_TUN3:0) + |((s->TxStatus[2] & TxUnderrun)?TSAD_TUN2:0) + |((s->TxStatus[1] & TxUnderrun)?TSAD_TUN1:0) + |((s->TxStatus[0] & TxUnderrun)?TSAD_TUN0:0) + + |((s->TxStatus[3] & TxAborted )?TSAD_TABT3:0) + |((s->TxStatus[2] & TxAborted )?TSAD_TABT2:0) + |((s->TxStatus[1] & TxAborted )?TSAD_TABT1:0) + |((s->TxStatus[0] & TxAborted )?TSAD_TABT0:0) + + |((s->TxStatus[3] & TxHostOwns )?TSAD_OWN3:0) + |((s->TxStatus[2] & TxHostOwns )?TSAD_OWN2:0) + |((s->TxStatus[1] & TxHostOwns )?TSAD_OWN1:0) + |((s->TxStatus[0] & TxHostOwns )?TSAD_OWN0:0) ; + + + DPRINTF("TSAD read val=0x%04x\n", ret); + + return ret; +} + +static uint16_t rtl8139_CSCR_read(RTL8139State *s) +{ + uint16_t ret = s->CSCR; + + DPRINTF("CSCR read val=0x%04x\n", ret); + + return ret; +} + +static void rtl8139_TxAddr_write(RTL8139State *s, uint32_t txAddrOffset, uint32_t val) +{ + DPRINTF("TxAddr write offset=0x%x val=0x%08x\n", txAddrOffset, val); + + s->TxAddr[txAddrOffset/4] = val; +} + +static uint32_t rtl8139_TxAddr_read(RTL8139State *s, uint32_t txAddrOffset) +{ + uint32_t ret = s->TxAddr[txAddrOffset/4]; + + DPRINTF("TxAddr read offset=0x%x val=0x%08x\n", txAddrOffset, ret); + + return ret; +} + +static void rtl8139_RxBufPtr_write(RTL8139State *s, uint32_t val) +{ + DPRINTF("RxBufPtr write val=0x%04x\n", val); + + /* this value is off by 16 */ + s->RxBufPtr = MOD2(val + 0x10, s->RxBufferSize); + + /* more buffer space may be available so try to receive */ + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + + DPRINTF(" CAPR write: rx buffer length %d head 0x%04x read 0x%04x\n", + s->RxBufferSize, s->RxBufAddr, s->RxBufPtr); +} + +static uint32_t rtl8139_RxBufPtr_read(RTL8139State *s) +{ + /* this value is off by 16 */ + uint32_t ret = s->RxBufPtr - 0x10; + + DPRINTF("RxBufPtr read val=0x%04x\n", ret); + + return ret; +} + +static uint32_t rtl8139_RxBufAddr_read(RTL8139State *s) +{ + /* this value is NOT off by 16 */ + uint32_t ret = s->RxBufAddr; + + DPRINTF("RxBufAddr read val=0x%04x\n", ret); + + return ret; +} + +static void rtl8139_RxBuf_write(RTL8139State *s, uint32_t val) +{ + DPRINTF("RxBuf write val=0x%08x\n", val); + + s->RxBuf = val; + + /* may need to reset rxring here */ +} + +static uint32_t rtl8139_RxBuf_read(RTL8139State *s) +{ + uint32_t ret = s->RxBuf; + + DPRINTF("RxBuf read val=0x%08x\n", ret); + + return ret; +} + +static void rtl8139_IntrMask_write(RTL8139State *s, uint32_t val) +{ + DPRINTF("IntrMask write(w) val=0x%04x\n", val); + + /* mask unwritable bits */ + val = SET_MASKED(val, 0x1e00, s->IntrMask); + + s->IntrMask = val; + + rtl8139_update_irq(s); + +} + +static uint32_t rtl8139_IntrMask_read(RTL8139State *s) +{ + uint32_t ret = s->IntrMask; + + DPRINTF("IntrMask read(w) val=0x%04x\n", ret); + + return ret; +} + +static void rtl8139_IntrStatus_write(RTL8139State *s, uint32_t val) +{ + DPRINTF("IntrStatus write(w) val=0x%04x\n", val); + +#if 0 + + /* writing to ISR has no effect */ + + return; + +#else + uint16_t newStatus = s->IntrStatus & ~val; + + /* mask unwritable bits */ + newStatus = SET_MASKED(newStatus, 0x1e00, s->IntrStatus); + + /* writing 1 to interrupt status register bit clears it */ + s->IntrStatus = 0; + rtl8139_update_irq(s); + + s->IntrStatus = newStatus; + rtl8139_set_next_tctr_time(s); + rtl8139_update_irq(s); + +#endif +} + +static uint32_t rtl8139_IntrStatus_read(RTL8139State *s) +{ + uint32_t ret = s->IntrStatus; + + DPRINTF("IntrStatus read(w) val=0x%04x\n", ret); + +#if 0 + + /* reading ISR clears all interrupts */ + s->IntrStatus = 0; + + rtl8139_update_irq(s); + +#endif + + return ret; +} + +static void rtl8139_MultiIntr_write(RTL8139State *s, uint32_t val) +{ + DPRINTF("MultiIntr write(w) val=0x%04x\n", val); + + /* mask unwritable bits */ + val = SET_MASKED(val, 0xf000, s->MultiIntr); + + s->MultiIntr = val; +} + +static uint32_t rtl8139_MultiIntr_read(RTL8139State *s) +{ + uint32_t ret = s->MultiIntr; + + DPRINTF("MultiIntr read(w) val=0x%04x\n", ret); + + return ret; +} + +static void rtl8139_io_writeb(void *opaque, uint8_t addr, uint32_t val) +{ + RTL8139State *s = opaque; + + switch (addr) + { + case MAC0 ... MAC0+4: + s->phys[addr - MAC0] = val; + break; + case MAC0+5: + s->phys[addr - MAC0] = val; + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->phys); + break; + case MAC0+6 ... MAC0+7: + /* reserved */ + break; + case MAR0 ... MAR0+7: + s->mult[addr - MAR0] = val; + break; + case ChipCmd: + rtl8139_ChipCmd_write(s, val); + break; + case Cfg9346: + rtl8139_Cfg9346_write(s, val); + break; + case TxConfig: /* windows driver sometimes writes using byte-lenth call */ + rtl8139_TxConfig_writeb(s, val); + break; + case Config0: + rtl8139_Config0_write(s, val); + break; + case Config1: + rtl8139_Config1_write(s, val); + break; + case Config3: + rtl8139_Config3_write(s, val); + break; + case Config4: + rtl8139_Config4_write(s, val); + break; + case Config5: + rtl8139_Config5_write(s, val); + break; + case MediaStatus: + /* ignore */ + DPRINTF("not implemented write(b) to MediaStatus val=0x%02x\n", + val); + break; + + case HltClk: + DPRINTF("HltClk write val=0x%08x\n", val); + if (val == 'R') + { + s->clock_enabled = 1; + } + else if (val == 'H') + { + s->clock_enabled = 0; + } + break; + + case TxThresh: + DPRINTF("C+ TxThresh write(b) val=0x%02x\n", val); + s->TxThresh = val; + break; + + case TxPoll: + DPRINTF("C+ TxPoll write(b) val=0x%02x\n", val); + if (val & (1 << 7)) + { + DPRINTF("C+ TxPoll high priority transmission (not " + "implemented)\n"); + //rtl8139_cplus_transmit(s); + } + if (val & (1 << 6)) + { + DPRINTF("C+ TxPoll normal priority transmission\n"); + rtl8139_cplus_transmit(s); + } + + break; + + default: + DPRINTF("not implemented write(b) addr=0x%x val=0x%02x\n", addr, + val); + break; + } +} + +static void rtl8139_io_writew(void *opaque, uint8_t addr, uint32_t val) +{ + RTL8139State *s = opaque; + + switch (addr) + { + case IntrMask: + rtl8139_IntrMask_write(s, val); + break; + + case IntrStatus: + rtl8139_IntrStatus_write(s, val); + break; + + case MultiIntr: + rtl8139_MultiIntr_write(s, val); + break; + + case RxBufPtr: + rtl8139_RxBufPtr_write(s, val); + break; + + case BasicModeCtrl: + rtl8139_BasicModeCtrl_write(s, val); + break; + case BasicModeStatus: + rtl8139_BasicModeStatus_write(s, val); + break; + case NWayAdvert: + DPRINTF("NWayAdvert write(w) val=0x%04x\n", val); + s->NWayAdvert = val; + break; + case NWayLPAR: + DPRINTF("forbidden NWayLPAR write(w) val=0x%04x\n", val); + break; + case NWayExpansion: + DPRINTF("NWayExpansion write(w) val=0x%04x\n", val); + s->NWayExpansion = val; + break; + + case CpCmd: + rtl8139_CpCmd_write(s, val); + break; + + case IntrMitigate: + rtl8139_IntrMitigate_write(s, val); + break; + + default: + DPRINTF("ioport write(w) addr=0x%x val=0x%04x via write(b)\n", + addr, val); + + rtl8139_io_writeb(opaque, addr, val & 0xff); + rtl8139_io_writeb(opaque, addr + 1, (val >> 8) & 0xff); + break; + } +} + +static void rtl8139_set_next_tctr_time(RTL8139State *s) +{ + const uint64_t ns_per_period = (uint64_t)PCI_PERIOD << 32; + + DPRINTF("entered rtl8139_set_next_tctr_time\n"); + + /* This function is called at least once per period, so it is a good + * place to update the timer base. + * + * After one iteration of this loop the value in the Timer register does + * not change, but the device model is counting up by 2^32 ticks (approx. + * 130 seconds). + */ + while (s->TCTR_base + ns_per_period <= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) { + s->TCTR_base += ns_per_period; + } + + if (!s->TimerInt) { + timer_del(s->timer); + } else { + uint64_t delta = (uint64_t)s->TimerInt * PCI_PERIOD; + if (s->TCTR_base + delta <= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) { + delta += ns_per_period; + } + timer_mod(s->timer, s->TCTR_base + delta); + } +} + +static void rtl8139_io_writel(void *opaque, uint8_t addr, uint32_t val) +{ + RTL8139State *s = opaque; + + switch (addr) + { + case RxMissed: + DPRINTF("RxMissed clearing on write\n"); + s->RxMissed = 0; + break; + + case TxConfig: + rtl8139_TxConfig_write(s, val); + break; + + case RxConfig: + rtl8139_RxConfig_write(s, val); + break; + + case TxStatus0 ... TxStatus0+4*4-1: + rtl8139_TxStatus_write(s, addr-TxStatus0, val); + break; + + case TxAddr0 ... TxAddr0+4*4-1: + rtl8139_TxAddr_write(s, addr-TxAddr0, val); + break; + + case RxBuf: + rtl8139_RxBuf_write(s, val); + break; + + case RxRingAddrLO: + DPRINTF("C+ RxRing low bits write val=0x%08x\n", val); + s->RxRingAddrLO = val; + break; + + case RxRingAddrHI: + DPRINTF("C+ RxRing high bits write val=0x%08x\n", val); + s->RxRingAddrHI = val; + break; + + case Timer: + DPRINTF("TCTR Timer reset on write\n"); + s->TCTR_base = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + rtl8139_set_next_tctr_time(s); + break; + + case FlashReg: + DPRINTF("FlashReg TimerInt write val=0x%08x\n", val); + if (s->TimerInt != val) { + s->TimerInt = val; + rtl8139_set_next_tctr_time(s); + } + break; + + default: + DPRINTF("ioport write(l) addr=0x%x val=0x%08x via write(b)\n", + addr, val); + rtl8139_io_writeb(opaque, addr, val & 0xff); + rtl8139_io_writeb(opaque, addr + 1, (val >> 8) & 0xff); + rtl8139_io_writeb(opaque, addr + 2, (val >> 16) & 0xff); + rtl8139_io_writeb(opaque, addr + 3, (val >> 24) & 0xff); + break; + } +} + +static uint32_t rtl8139_io_readb(void *opaque, uint8_t addr) +{ + RTL8139State *s = opaque; + int ret; + + switch (addr) + { + case MAC0 ... MAC0+5: + ret = s->phys[addr - MAC0]; + break; + case MAC0+6 ... MAC0+7: + ret = 0; + break; + case MAR0 ... MAR0+7: + ret = s->mult[addr - MAR0]; + break; + case TxStatus0 ... TxStatus0+4*4-1: + ret = rtl8139_TxStatus_TxAddr_read(s, s->TxStatus, TxStatus0, + addr, 1); + break; + case ChipCmd: + ret = rtl8139_ChipCmd_read(s); + break; + case Cfg9346: + ret = rtl8139_Cfg9346_read(s); + break; + case Config0: + ret = rtl8139_Config0_read(s); + break; + case Config1: + ret = rtl8139_Config1_read(s); + break; + case Config3: + ret = rtl8139_Config3_read(s); + break; + case Config4: + ret = rtl8139_Config4_read(s); + break; + case Config5: + ret = rtl8139_Config5_read(s); + break; + + case MediaStatus: + /* The LinkDown bit of MediaStatus is inverse with link status */ + ret = 0xd0 | (~s->BasicModeStatus & 0x04); + DPRINTF("MediaStatus read 0x%x\n", ret); + break; + + case HltClk: + ret = s->clock_enabled; + DPRINTF("HltClk read 0x%x\n", ret); + break; + + case PCIRevisionID: + ret = RTL8139_PCI_REVID; + DPRINTF("PCI Revision ID read 0x%x\n", ret); + break; + + case TxThresh: + ret = s->TxThresh; + DPRINTF("C+ TxThresh read(b) val=0x%02x\n", ret); + break; + + case 0x43: /* Part of TxConfig register. Windows driver tries to read it */ + ret = s->TxConfig >> 24; + DPRINTF("RTL8139C TxConfig at 0x43 read(b) val=0x%02x\n", ret); + break; + + default: + DPRINTF("not implemented read(b) addr=0x%x\n", addr); + ret = 0; + break; + } + + return ret; +} + +static uint32_t rtl8139_io_readw(void *opaque, uint8_t addr) +{ + RTL8139State *s = opaque; + uint32_t ret; + + switch (addr) + { + case TxAddr0 ... TxAddr0+4*4-1: + ret = rtl8139_TxStatus_TxAddr_read(s, s->TxAddr, TxAddr0, addr, 2); + break; + case IntrMask: + ret = rtl8139_IntrMask_read(s); + break; + + case IntrStatus: + ret = rtl8139_IntrStatus_read(s); + break; + + case MultiIntr: + ret = rtl8139_MultiIntr_read(s); + break; + + case RxBufPtr: + ret = rtl8139_RxBufPtr_read(s); + break; + + case RxBufAddr: + ret = rtl8139_RxBufAddr_read(s); + break; + + case BasicModeCtrl: + ret = rtl8139_BasicModeCtrl_read(s); + break; + case BasicModeStatus: + ret = rtl8139_BasicModeStatus_read(s); + break; + case NWayAdvert: + ret = s->NWayAdvert; + DPRINTF("NWayAdvert read(w) val=0x%04x\n", ret); + break; + case NWayLPAR: + ret = s->NWayLPAR; + DPRINTF("NWayLPAR read(w) val=0x%04x\n", ret); + break; + case NWayExpansion: + ret = s->NWayExpansion; + DPRINTF("NWayExpansion read(w) val=0x%04x\n", ret); + break; + + case CpCmd: + ret = rtl8139_CpCmd_read(s); + break; + + case IntrMitigate: + ret = rtl8139_IntrMitigate_read(s); + break; + + case TxSummary: + ret = rtl8139_TSAD_read(s); + break; + + case CSCR: + ret = rtl8139_CSCR_read(s); + break; + + default: + DPRINTF("ioport read(w) addr=0x%x via read(b)\n", addr); + + ret = rtl8139_io_readb(opaque, addr); + ret |= rtl8139_io_readb(opaque, addr + 1) << 8; + + DPRINTF("ioport read(w) addr=0x%x val=0x%04x\n", addr, ret); + break; + } + + return ret; +} + +static uint32_t rtl8139_io_readl(void *opaque, uint8_t addr) +{ + RTL8139State *s = opaque; + uint32_t ret; + + switch (addr) + { + case RxMissed: + ret = s->RxMissed; + + DPRINTF("RxMissed read val=0x%08x\n", ret); + break; + + case TxConfig: + ret = rtl8139_TxConfig_read(s); + break; + + case RxConfig: + ret = rtl8139_RxConfig_read(s); + break; + + case TxStatus0 ... TxStatus0+4*4-1: + ret = rtl8139_TxStatus_TxAddr_read(s, s->TxStatus, TxStatus0, + addr, 4); + break; + + case TxAddr0 ... TxAddr0+4*4-1: + ret = rtl8139_TxAddr_read(s, addr-TxAddr0); + break; + + case RxBuf: + ret = rtl8139_RxBuf_read(s); + break; + + case RxRingAddrLO: + ret = s->RxRingAddrLO; + DPRINTF("C+ RxRing low bits read val=0x%08x\n", ret); + break; + + case RxRingAddrHI: + ret = s->RxRingAddrHI; + DPRINTF("C+ RxRing high bits read val=0x%08x\n", ret); + break; + + case Timer: + ret = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->TCTR_base) / + PCI_PERIOD; + DPRINTF("TCTR Timer read val=0x%08x\n", ret); + break; + + case FlashReg: + ret = s->TimerInt; + DPRINTF("FlashReg TimerInt read val=0x%08x\n", ret); + break; + + default: + DPRINTF("ioport read(l) addr=0x%x via read(b)\n", addr); + + ret = rtl8139_io_readb(opaque, addr); + ret |= rtl8139_io_readb(opaque, addr + 1) << 8; + ret |= rtl8139_io_readb(opaque, addr + 2) << 16; + ret |= rtl8139_io_readb(opaque, addr + 3) << 24; + + DPRINTF("read(l) addr=0x%x val=%08x\n", addr, ret); + break; + } + + return ret; +} + +/* */ + +static int rtl8139_post_load(void *opaque, int version_id) +{ + RTL8139State* s = opaque; + rtl8139_set_next_tctr_time(s); + if (version_id < 4) { + s->cplus_enabled = s->CpCmd != 0; + } + + /* nc.link_down can't be migrated, so infer link_down according + * to link status bit in BasicModeStatus */ + qemu_get_queue(s->nic)->link_down = (s->BasicModeStatus & 0x04) == 0; + + return 0; +} + +static bool rtl8139_hotplug_ready_needed(void *opaque) +{ + return qdev_machine_modified(); +} + +static const VMStateDescription vmstate_rtl8139_hotplug_ready ={ + .name = "rtl8139/hotplug_ready", + .version_id = 1, + .minimum_version_id = 1, + .needed = rtl8139_hotplug_ready_needed, + .fields = (VMStateField[]) { + VMSTATE_END_OF_LIST() + } +}; + +static int rtl8139_pre_save(void *opaque) +{ + RTL8139State* s = opaque; + int64_t current_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + /* for migration to older versions */ + s->TCTR = (current_time - s->TCTR_base) / PCI_PERIOD; + s->rtl8139_mmio_io_addr_dummy = 0; + + return 0; +} + +static const VMStateDescription vmstate_rtl8139 = { + .name = "rtl8139", + .version_id = 5, + .minimum_version_id = 3, + .post_load = rtl8139_post_load, + .pre_save = rtl8139_pre_save, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, RTL8139State), + VMSTATE_PARTIAL_BUFFER(phys, RTL8139State, 6), + VMSTATE_BUFFER(mult, RTL8139State), + VMSTATE_UINT32_ARRAY(TxStatus, RTL8139State, 4), + VMSTATE_UINT32_ARRAY(TxAddr, RTL8139State, 4), + + VMSTATE_UINT32(RxBuf, RTL8139State), + VMSTATE_UINT32(RxBufferSize, RTL8139State), + VMSTATE_UINT32(RxBufPtr, RTL8139State), + VMSTATE_UINT32(RxBufAddr, RTL8139State), + + VMSTATE_UINT16(IntrStatus, RTL8139State), + VMSTATE_UINT16(IntrMask, RTL8139State), + + VMSTATE_UINT32(TxConfig, RTL8139State), + VMSTATE_UINT32(RxConfig, RTL8139State), + VMSTATE_UINT32(RxMissed, RTL8139State), + VMSTATE_UINT16(CSCR, RTL8139State), + + VMSTATE_UINT8(Cfg9346, RTL8139State), + VMSTATE_UINT8(Config0, RTL8139State), + VMSTATE_UINT8(Config1, RTL8139State), + VMSTATE_UINT8(Config3, RTL8139State), + VMSTATE_UINT8(Config4, RTL8139State), + VMSTATE_UINT8(Config5, RTL8139State), + + VMSTATE_UINT8(clock_enabled, RTL8139State), + VMSTATE_UINT8(bChipCmdState, RTL8139State), + + VMSTATE_UINT16(MultiIntr, RTL8139State), + + VMSTATE_UINT16(BasicModeCtrl, RTL8139State), + VMSTATE_UINT16(BasicModeStatus, RTL8139State), + VMSTATE_UINT16(NWayAdvert, RTL8139State), + VMSTATE_UINT16(NWayLPAR, RTL8139State), + VMSTATE_UINT16(NWayExpansion, RTL8139State), + + VMSTATE_UINT16(CpCmd, RTL8139State), + VMSTATE_UINT8(TxThresh, RTL8139State), + + VMSTATE_UNUSED(4), + VMSTATE_MACADDR(conf.macaddr, RTL8139State), + VMSTATE_INT32(rtl8139_mmio_io_addr_dummy, RTL8139State), + + VMSTATE_UINT32(currTxDesc, RTL8139State), + VMSTATE_UINT32(currCPlusRxDesc, RTL8139State), + VMSTATE_UINT32(currCPlusTxDesc, RTL8139State), + VMSTATE_UINT32(RxRingAddrLO, RTL8139State), + VMSTATE_UINT32(RxRingAddrHI, RTL8139State), + + VMSTATE_UINT16_ARRAY(eeprom.contents, RTL8139State, EEPROM_9346_SIZE), + VMSTATE_INT32(eeprom.mode, RTL8139State), + VMSTATE_UINT32(eeprom.tick, RTL8139State), + VMSTATE_UINT8(eeprom.address, RTL8139State), + VMSTATE_UINT16(eeprom.input, RTL8139State), + VMSTATE_UINT16(eeprom.output, RTL8139State), + + VMSTATE_UINT8(eeprom.eecs, RTL8139State), + VMSTATE_UINT8(eeprom.eesk, RTL8139State), + VMSTATE_UINT8(eeprom.eedi, RTL8139State), + VMSTATE_UINT8(eeprom.eedo, RTL8139State), + + VMSTATE_UINT32(TCTR, RTL8139State), + VMSTATE_UINT32(TimerInt, RTL8139State), + VMSTATE_INT64(TCTR_base, RTL8139State), + + VMSTATE_UINT64(tally_counters.TxOk, RTL8139State), + VMSTATE_UINT64(tally_counters.RxOk, RTL8139State), + VMSTATE_UINT64(tally_counters.TxERR, RTL8139State), + VMSTATE_UINT32(tally_counters.RxERR, RTL8139State), + VMSTATE_UINT16(tally_counters.MissPkt, RTL8139State), + VMSTATE_UINT16(tally_counters.FAE, RTL8139State), + VMSTATE_UINT32(tally_counters.Tx1Col, RTL8139State), + VMSTATE_UINT32(tally_counters.TxMCol, RTL8139State), + VMSTATE_UINT64(tally_counters.RxOkPhy, RTL8139State), + VMSTATE_UINT64(tally_counters.RxOkBrd, RTL8139State), + VMSTATE_UINT32_V(tally_counters.RxOkMul, RTL8139State, 5), + VMSTATE_UINT16(tally_counters.TxAbt, RTL8139State), + VMSTATE_UINT16(tally_counters.TxUndrn, RTL8139State), + + VMSTATE_UINT32_V(cplus_enabled, RTL8139State, 4), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_rtl8139_hotplug_ready, + NULL + } +}; + +/***********************************************************/ +/* PCI RTL8139 definitions */ + +static void rtl8139_ioport_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + switch (size) { + case 1: + rtl8139_io_writeb(opaque, addr, val); + break; + case 2: + rtl8139_io_writew(opaque, addr, val); + break; + case 4: + rtl8139_io_writel(opaque, addr, val); + break; + } +} + +static uint64_t rtl8139_ioport_read(void *opaque, hwaddr addr, + unsigned size) +{ + switch (size) { + case 1: + return rtl8139_io_readb(opaque, addr); + case 2: + return rtl8139_io_readw(opaque, addr); + case 4: + return rtl8139_io_readl(opaque, addr); + } + + return -1; +} + +static const MemoryRegionOps rtl8139_io_ops = { + .read = rtl8139_ioport_read, + .write = rtl8139_ioport_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void rtl8139_timer(void *opaque) +{ + RTL8139State *s = opaque; + + if (!s->clock_enabled) + { + DPRINTF(">>> timer: clock is not running\n"); + return; + } + + s->IntrStatus |= PCSTimeout; + rtl8139_update_irq(s); + rtl8139_set_next_tctr_time(s); +} + +static void pci_rtl8139_uninit(PCIDevice *dev) +{ + RTL8139State *s = RTL8139(dev); + + g_free(s->cplus_txbuffer); + s->cplus_txbuffer = NULL; + timer_free(s->timer); + qemu_del_nic(s->nic); +} + +static void rtl8139_set_link_status(NetClientState *nc) +{ + RTL8139State *s = qemu_get_nic_opaque(nc); + + if (nc->link_down) { + s->BasicModeStatus &= ~0x04; + } else { + s->BasicModeStatus |= 0x04; + } + + s->IntrStatus |= RxUnderrun; + rtl8139_update_irq(s); +} + +static NetClientInfo net_rtl8139_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = rtl8139_can_receive, + .receive = rtl8139_receive, + .link_status_changed = rtl8139_set_link_status, +}; + +static void pci_rtl8139_realize(PCIDevice *dev, Error **errp) +{ + RTL8139State *s = RTL8139(dev); + DeviceState *d = DEVICE(dev); + uint8_t *pci_conf; + + pci_conf = dev->config; + pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ + /* TODO: start of capability list, but no capability + * list bit in status register, and offset 0xdc seems unused. */ + pci_conf[PCI_CAPABILITY_LIST] = 0xdc; + + memory_region_init_io(&s->bar_io, OBJECT(s), &rtl8139_io_ops, s, + "rtl8139", 0x100); + memory_region_init_alias(&s->bar_mem, OBJECT(s), "rtl8139-mem", &s->bar_io, + 0, 0x100); + + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->bar_io); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar_mem); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + + /* prepare eeprom */ + s->eeprom.contents[0] = 0x8129; +#if 1 + /* PCI vendor and device ID should be mirrored here */ + s->eeprom.contents[1] = PCI_VENDOR_ID_REALTEK; + s->eeprom.contents[2] = PCI_DEVICE_ID_REALTEK_8139; +#endif + s->eeprom.contents[7] = s->conf.macaddr.a[0] | s->conf.macaddr.a[1] << 8; + s->eeprom.contents[8] = s->conf.macaddr.a[2] | s->conf.macaddr.a[3] << 8; + s->eeprom.contents[9] = s->conf.macaddr.a[4] | s->conf.macaddr.a[5] << 8; + + s->nic = qemu_new_nic(&net_rtl8139_info, &s->conf, + object_get_typename(OBJECT(dev)), d->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + + s->cplus_txbuffer = NULL; + s->cplus_txbuffer_len = 0; + s->cplus_txbuffer_offset = 0; + + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, rtl8139_timer, s); +} + +static void rtl8139_instance_init(Object *obj) +{ + RTL8139State *s = RTL8139(obj); + + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(obj)); +} + +static Property rtl8139_properties[] = { + DEFINE_NIC_PROPERTIES(RTL8139State, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void rtl8139_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_rtl8139_realize; + k->exit = pci_rtl8139_uninit; + k->romfile = "efi-rtl8139.rom"; + k->vendor_id = PCI_VENDOR_ID_REALTEK; + k->device_id = PCI_DEVICE_ID_REALTEK_8139; + k->revision = RTL8139_PCI_REVID; /* >=0x20 is for 8139C+ */ + k->class_id = PCI_CLASS_NETWORK_ETHERNET; + dc->reset = rtl8139_reset; + dc->vmsd = &vmstate_rtl8139; + device_class_set_props(dc, rtl8139_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static const TypeInfo rtl8139_info = { + .name = TYPE_RTL8139, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(RTL8139State), + .class_init = rtl8139_class_init, + .instance_init = rtl8139_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void rtl8139_register_types(void) +{ + type_register_static(&rtl8139_info); +} + +type_init(rtl8139_register_types) diff --git a/hw/net/smc91c111.c b/hw/net/smc91c111.c new file mode 100644 index 000000000..ad778cd8f --- /dev/null +++ b/hw/net/smc91c111.c @@ -0,0 +1,834 @@ +/* + * SMSC 91C111 Ethernet interface emulation + * + * Copyright (c) 2005 CodeSourcery, LLC. + * Written by Paul Brook + * + * This code is licensed under the GPL + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "net/net.h" +#include "hw/irq.h" +#include "hw/net/smc91c111.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +/* For crc32 */ +#include +#include "qom/object.h" + +/* Number of 2k memory pages available. */ +#define NUM_PACKETS 4 + +#define TYPE_SMC91C111 "smc91c111" +OBJECT_DECLARE_SIMPLE_TYPE(smc91c111_state, SMC91C111) + +struct smc91c111_state { + SysBusDevice parent_obj; + + NICState *nic; + NICConf conf; + uint16_t tcr; + uint16_t rcr; + uint16_t cr; + uint16_t ctr; + uint16_t gpr; + uint16_t ptr; + uint16_t ercv; + qemu_irq irq; + int bank; + int packet_num; + int tx_alloc; + /* Bitmask of allocated packets. */ + int allocated; + int tx_fifo_len; + int tx_fifo[NUM_PACKETS]; + int rx_fifo_len; + int rx_fifo[NUM_PACKETS]; + int tx_fifo_done_len; + int tx_fifo_done[NUM_PACKETS]; + /* Packet buffer memory. */ + uint8_t data[NUM_PACKETS][2048]; + uint8_t int_level; + uint8_t int_mask; + MemoryRegion mmio; +}; + +static const VMStateDescription vmstate_smc91c111 = { + .name = "smc91c111", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(tcr, smc91c111_state), + VMSTATE_UINT16(rcr, smc91c111_state), + VMSTATE_UINT16(cr, smc91c111_state), + VMSTATE_UINT16(ctr, smc91c111_state), + VMSTATE_UINT16(gpr, smc91c111_state), + VMSTATE_UINT16(ptr, smc91c111_state), + VMSTATE_UINT16(ercv, smc91c111_state), + VMSTATE_INT32(bank, smc91c111_state), + VMSTATE_INT32(packet_num, smc91c111_state), + VMSTATE_INT32(tx_alloc, smc91c111_state), + VMSTATE_INT32(allocated, smc91c111_state), + VMSTATE_INT32(tx_fifo_len, smc91c111_state), + VMSTATE_INT32_ARRAY(tx_fifo, smc91c111_state, NUM_PACKETS), + VMSTATE_INT32(rx_fifo_len, smc91c111_state), + VMSTATE_INT32_ARRAY(rx_fifo, smc91c111_state, NUM_PACKETS), + VMSTATE_INT32(tx_fifo_done_len, smc91c111_state), + VMSTATE_INT32_ARRAY(tx_fifo_done, smc91c111_state, NUM_PACKETS), + VMSTATE_BUFFER_UNSAFE(data, smc91c111_state, 0, NUM_PACKETS * 2048), + VMSTATE_UINT8(int_level, smc91c111_state), + VMSTATE_UINT8(int_mask, smc91c111_state), + VMSTATE_END_OF_LIST() + } +}; + +#define RCR_SOFT_RST 0x8000 +#define RCR_STRIP_CRC 0x0200 +#define RCR_RXEN 0x0100 + +#define TCR_EPH_LOOP 0x2000 +#define TCR_NOCRC 0x0100 +#define TCR_PAD_EN 0x0080 +#define TCR_FORCOL 0x0004 +#define TCR_LOOP 0x0002 +#define TCR_TXEN 0x0001 + +#define INT_MD 0x80 +#define INT_ERCV 0x40 +#define INT_EPH 0x20 +#define INT_RX_OVRN 0x10 +#define INT_ALLOC 0x08 +#define INT_TX_EMPTY 0x04 +#define INT_TX 0x02 +#define INT_RCV 0x01 + +#define CTR_AUTO_RELEASE 0x0800 +#define CTR_RELOAD 0x0002 +#define CTR_STORE 0x0001 + +#define RS_ALGNERR 0x8000 +#define RS_BRODCAST 0x4000 +#define RS_BADCRC 0x2000 +#define RS_ODDFRAME 0x1000 +#define RS_TOOLONG 0x0800 +#define RS_TOOSHORT 0x0400 +#define RS_MULTICAST 0x0001 + +/* Update interrupt status. */ +static void smc91c111_update(smc91c111_state *s) +{ + int level; + + if (s->tx_fifo_len == 0) + s->int_level |= INT_TX_EMPTY; + if (s->tx_fifo_done_len != 0) + s->int_level |= INT_TX; + level = (s->int_level & s->int_mask) != 0; + qemu_set_irq(s->irq, level); +} + +static bool smc91c111_can_receive(smc91c111_state *s) +{ + if ((s->rcr & RCR_RXEN) == 0 || (s->rcr & RCR_SOFT_RST)) { + return true; + } + if (s->allocated == (1 << NUM_PACKETS) - 1 || + s->rx_fifo_len == NUM_PACKETS) { + return false; + } + return true; +} + +static inline void smc91c111_flush_queued_packets(smc91c111_state *s) +{ + if (smc91c111_can_receive(s)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } +} + +/* Try to allocate a packet. Returns 0x80 on failure. */ +static int smc91c111_allocate_packet(smc91c111_state *s) +{ + int i; + if (s->allocated == (1 << NUM_PACKETS) - 1) { + return 0x80; + } + + for (i = 0; i < NUM_PACKETS; i++) { + if ((s->allocated & (1 << i)) == 0) + break; + } + s->allocated |= 1 << i; + return i; +} + + +/* Process a pending TX allocate. */ +static void smc91c111_tx_alloc(smc91c111_state *s) +{ + s->tx_alloc = smc91c111_allocate_packet(s); + if (s->tx_alloc == 0x80) + return; + s->int_level |= INT_ALLOC; + smc91c111_update(s); +} + +/* Remove and item from the RX FIFO. */ +static void smc91c111_pop_rx_fifo(smc91c111_state *s) +{ + int i; + + s->rx_fifo_len--; + if (s->rx_fifo_len) { + for (i = 0; i < s->rx_fifo_len; i++) + s->rx_fifo[i] = s->rx_fifo[i + 1]; + s->int_level |= INT_RCV; + } else { + s->int_level &= ~INT_RCV; + } + smc91c111_flush_queued_packets(s); + smc91c111_update(s); +} + +/* Remove an item from the TX completion FIFO. */ +static void smc91c111_pop_tx_fifo_done(smc91c111_state *s) +{ + int i; + + if (s->tx_fifo_done_len == 0) + return; + s->tx_fifo_done_len--; + for (i = 0; i < s->tx_fifo_done_len; i++) + s->tx_fifo_done[i] = s->tx_fifo_done[i + 1]; +} + +/* Release the memory allocated to a packet. */ +static void smc91c111_release_packet(smc91c111_state *s, int packet) +{ + s->allocated &= ~(1 << packet); + if (s->tx_alloc == 0x80) + smc91c111_tx_alloc(s); + smc91c111_flush_queued_packets(s); +} + +/* Flush the TX FIFO. */ +static void smc91c111_do_tx(smc91c111_state *s) +{ + int i; + int len; + int control; + int packetnum; + uint8_t *p; + + if ((s->tcr & TCR_TXEN) == 0) + return; + if (s->tx_fifo_len == 0) + return; + for (i = 0; i < s->tx_fifo_len; i++) { + packetnum = s->tx_fifo[i]; + p = &s->data[packetnum][0]; + /* Set status word. */ + *(p++) = 0x01; + *(p++) = 0x40; + len = *(p++); + len |= ((int)*(p++)) << 8; + len -= 6; + control = p[len + 1]; + if (control & 0x20) + len++; + /* ??? This overwrites the data following the buffer. + Don't know what real hardware does. */ + if (len < 64 && (s->tcr & TCR_PAD_EN)) { + memset(p + len, 0, 64 - len); + len = 64; + } +#if 0 + { + int add_crc; + + /* The card is supposed to append the CRC to the frame. + However none of the other network traffic has the CRC + appended. Suspect this is low level ethernet detail we + don't need to worry about. */ + add_crc = (control & 0x10) || (s->tcr & TCR_NOCRC) == 0; + if (add_crc) { + uint32_t crc; + + crc = crc32(~0, p, len); + memcpy(p + len, &crc, 4); + len += 4; + } + } +#endif + if (s->ctr & CTR_AUTO_RELEASE) + /* Race? */ + smc91c111_release_packet(s, packetnum); + else if (s->tx_fifo_done_len < NUM_PACKETS) + s->tx_fifo_done[s->tx_fifo_done_len++] = packetnum; + qemu_send_packet(qemu_get_queue(s->nic), p, len); + } + s->tx_fifo_len = 0; + smc91c111_update(s); +} + +/* Add a packet to the TX FIFO. */ +static void smc91c111_queue_tx(smc91c111_state *s, int packet) +{ + if (s->tx_fifo_len == NUM_PACKETS) + return; + s->tx_fifo[s->tx_fifo_len++] = packet; + smc91c111_do_tx(s); +} + +static void smc91c111_reset(DeviceState *dev) +{ + smc91c111_state *s = SMC91C111(dev); + + s->bank = 0; + s->tx_fifo_len = 0; + s->tx_fifo_done_len = 0; + s->rx_fifo_len = 0; + s->allocated = 0; + s->packet_num = 0; + s->tx_alloc = 0; + s->tcr = 0; + s->rcr = 0; + s->cr = 0xa0b1; + s->ctr = 0x1210; + s->ptr = 0; + s->ercv = 0x1f; + s->int_level = INT_TX_EMPTY; + s->int_mask = 0; + smc91c111_update(s); +} + +#define SET_LOW(name, val) s->name = (s->name & 0xff00) | val +#define SET_HIGH(name, val) s->name = (s->name & 0xff) | (val << 8) + +static void smc91c111_writeb(void *opaque, hwaddr offset, + uint32_t value) +{ + smc91c111_state *s = (smc91c111_state *)opaque; + + offset = offset & 0xf; + if (offset == 14) { + s->bank = value; + return; + } + if (offset == 15) + return; + switch (s->bank) { + case 0: + switch (offset) { + case 0: /* TCR */ + SET_LOW(tcr, value); + return; + case 1: + SET_HIGH(tcr, value); + return; + case 4: /* RCR */ + SET_LOW(rcr, value); + return; + case 5: + SET_HIGH(rcr, value); + if (s->rcr & RCR_SOFT_RST) { + smc91c111_reset(DEVICE(s)); + } + smc91c111_flush_queued_packets(s); + return; + case 10: case 11: /* RPCR */ + /* Ignored */ + return; + case 12: case 13: /* Reserved */ + return; + } + break; + + case 1: + switch (offset) { + case 0: /* CONFIG */ + SET_LOW(cr, value); + return; + case 1: + SET_HIGH(cr,value); + return; + case 2: case 3: /* BASE */ + case 4: case 5: case 6: case 7: case 8: case 9: /* IA */ + /* Not implemented. */ + return; + case 10: /* Genral Purpose */ + SET_LOW(gpr, value); + return; + case 11: + SET_HIGH(gpr, value); + return; + case 12: /* Control */ + if (value & 1) { + qemu_log_mask(LOG_UNIMP, + "smc91c111: EEPROM store not implemented\n"); + } + if (value & 2) { + qemu_log_mask(LOG_UNIMP, + "smc91c111: EEPROM reload not implemented\n"); + } + value &= ~3; + SET_LOW(ctr, value); + return; + case 13: + SET_HIGH(ctr, value); + return; + } + break; + + case 2: + switch (offset) { + case 0: /* MMU Command */ + switch (value >> 5) { + case 0: /* no-op */ + break; + case 1: /* Allocate for TX. */ + s->tx_alloc = 0x80; + s->int_level &= ~INT_ALLOC; + smc91c111_update(s); + smc91c111_tx_alloc(s); + break; + case 2: /* Reset MMU. */ + s->allocated = 0; + s->tx_fifo_len = 0; + s->tx_fifo_done_len = 0; + s->rx_fifo_len = 0; + s->tx_alloc = 0; + break; + case 3: /* Remove from RX FIFO. */ + smc91c111_pop_rx_fifo(s); + break; + case 4: /* Remove from RX FIFO and release. */ + if (s->rx_fifo_len > 0) { + smc91c111_release_packet(s, s->rx_fifo[0]); + } + smc91c111_pop_rx_fifo(s); + break; + case 5: /* Release. */ + smc91c111_release_packet(s, s->packet_num); + break; + case 6: /* Add to TX FIFO. */ + smc91c111_queue_tx(s, s->packet_num); + break; + case 7: /* Reset TX FIFO. */ + s->tx_fifo_len = 0; + s->tx_fifo_done_len = 0; + break; + } + return; + case 1: + /* Ignore. */ + return; + case 2: /* Packet Number Register */ + s->packet_num = value; + return; + case 3: case 4: case 5: + /* Should be readonly, but linux writes to them anyway. Ignore. */ + return; + case 6: /* Pointer */ + SET_LOW(ptr, value); + return; + case 7: + SET_HIGH(ptr, value); + return; + case 8: case 9: case 10: case 11: /* Data */ + { + int p; + int n; + + if (s->ptr & 0x8000) + n = s->rx_fifo[0]; + else + n = s->packet_num; + p = s->ptr & 0x07ff; + if (s->ptr & 0x4000) { + s->ptr = (s->ptr & 0xf800) | ((s->ptr + 1) & 0x7ff); + } else { + p += (offset & 3); + } + s->data[n][p] = value; + } + return; + case 12: /* Interrupt ACK. */ + s->int_level &= ~(value & 0xd6); + if (value & INT_TX) + smc91c111_pop_tx_fifo_done(s); + smc91c111_update(s); + return; + case 13: /* Interrupt mask. */ + s->int_mask = value; + smc91c111_update(s); + return; + } + break; + + case 3: + switch (offset) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + /* Multicast table. */ + /* Not implemented. */ + return; + case 8: case 9: /* Management Interface. */ + /* Not implemented. */ + return; + case 12: /* Early receive. */ + s->ercv = value & 0x1f; + return; + case 13: + /* Ignore. */ + return; + } + break; + } + qemu_log_mask(LOG_GUEST_ERROR, "smc91c111_write(bank:%d) Illegal register" + " 0x%" HWADDR_PRIx " = 0x%x\n", + s->bank, offset, value); +} + +static uint32_t smc91c111_readb(void *opaque, hwaddr offset) +{ + smc91c111_state *s = (smc91c111_state *)opaque; + + offset = offset & 0xf; + if (offset == 14) { + return s->bank; + } + if (offset == 15) + return 0x33; + switch (s->bank) { + case 0: + switch (offset) { + case 0: /* TCR */ + return s->tcr & 0xff; + case 1: + return s->tcr >> 8; + case 2: /* EPH Status */ + return 0; + case 3: + return 0x40; + case 4: /* RCR */ + return s->rcr & 0xff; + case 5: + return s->rcr >> 8; + case 6: /* Counter */ + case 7: + /* Not implemented. */ + return 0; + case 8: /* Memory size. */ + return NUM_PACKETS; + case 9: /* Free memory available. */ + { + int i; + int n; + n = 0; + for (i = 0; i < NUM_PACKETS; i++) { + if (s->allocated & (1 << i)) + n++; + } + return n; + } + case 10: case 11: /* RPCR */ + /* Not implemented. */ + return 0; + case 12: case 13: /* Reserved */ + return 0; + } + break; + + case 1: + switch (offset) { + case 0: /* CONFIG */ + return s->cr & 0xff; + case 1: + return s->cr >> 8; + case 2: case 3: /* BASE */ + /* Not implemented. */ + return 0; + case 4: case 5: case 6: case 7: case 8: case 9: /* IA */ + return s->conf.macaddr.a[offset - 4]; + case 10: /* General Purpose */ + return s->gpr & 0xff; + case 11: + return s->gpr >> 8; + case 12: /* Control */ + return s->ctr & 0xff; + case 13: + return s->ctr >> 8; + } + break; + + case 2: + switch (offset) { + case 0: case 1: /* MMUCR Busy bit. */ + return 0; + case 2: /* Packet Number. */ + return s->packet_num; + case 3: /* Allocation Result. */ + return s->tx_alloc; + case 4: /* TX FIFO */ + if (s->tx_fifo_done_len == 0) + return 0x80; + else + return s->tx_fifo_done[0]; + case 5: /* RX FIFO */ + if (s->rx_fifo_len == 0) + return 0x80; + else + return s->rx_fifo[0]; + case 6: /* Pointer */ + return s->ptr & 0xff; + case 7: + return (s->ptr >> 8) & 0xf7; + case 8: case 9: case 10: case 11: /* Data */ + { + int p; + int n; + + if (s->ptr & 0x8000) + n = s->rx_fifo[0]; + else + n = s->packet_num; + p = s->ptr & 0x07ff; + if (s->ptr & 0x4000) { + s->ptr = (s->ptr & 0xf800) | ((s->ptr + 1) & 0x07ff); + } else { + p += (offset & 3); + } + return s->data[n][p]; + } + case 12: /* Interrupt status. */ + return s->int_level; + case 13: /* Interrupt mask. */ + return s->int_mask; + } + break; + + case 3: + switch (offset) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + /* Multicast table. */ + /* Not implemented. */ + return 0; + case 8: /* Management Interface. */ + /* Not implemented. */ + return 0x30; + case 9: + return 0x33; + case 10: /* Revision. */ + return 0x91; + case 11: + return 0x33; + case 12: + return s->ercv; + case 13: + return 0; + } + break; + } + qemu_log_mask(LOG_GUEST_ERROR, "smc91c111_read(bank:%d) Illegal register" + " 0x%" HWADDR_PRIx "\n", + s->bank, offset); + return 0; +} + +static uint64_t smc91c111_readfn(void *opaque, hwaddr addr, unsigned size) +{ + int i; + uint32_t val = 0; + + for (i = 0; i < size; i++) { + val |= smc91c111_readb(opaque, addr + i) << (i * 8); + } + return val; +} + +static void smc91c111_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + int i = 0; + + /* 32-bit writes to offset 0xc only actually write to the bank select + * register (offset 0xe), so skip the first two bytes we would write. + */ + if (addr == 0xc && size == 4) { + i += 2; + } + + for (; i < size; i++) { + smc91c111_writeb(opaque, addr + i, + extract32(value, i * 8, 8)); + } +} + +static bool smc91c111_can_receive_nc(NetClientState *nc) +{ + smc91c111_state *s = qemu_get_nic_opaque(nc); + + return smc91c111_can_receive(s); +} + +static ssize_t smc91c111_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + smc91c111_state *s = qemu_get_nic_opaque(nc); + int status; + int packetsize; + uint32_t crc; + int packetnum; + uint8_t *p; + + if ((s->rcr & RCR_RXEN) == 0 || (s->rcr & RCR_SOFT_RST)) + return -1; + /* Short packets are padded with zeros. Receiving a packet + < 64 bytes long is considered an error condition. */ + if (size < 64) + packetsize = 64; + else + packetsize = (size & ~1); + packetsize += 6; + crc = (s->rcr & RCR_STRIP_CRC) == 0; + if (crc) + packetsize += 4; + /* TODO: Flag overrun and receive errors. */ + if (packetsize > 2048) + return -1; + packetnum = smc91c111_allocate_packet(s); + if (packetnum == 0x80) + return -1; + s->rx_fifo[s->rx_fifo_len++] = packetnum; + + p = &s->data[packetnum][0]; + /* ??? Multicast packets? */ + status = 0; + if (size > 1518) + status |= RS_TOOLONG; + if (size & 1) + status |= RS_ODDFRAME; + *(p++) = status & 0xff; + *(p++) = status >> 8; + *(p++) = packetsize & 0xff; + *(p++) = packetsize >> 8; + memcpy(p, buf, size & ~1); + p += (size & ~1); + /* Pad short packets. */ + if (size < 64) { + int pad; + + if (size & 1) + *(p++) = buf[size - 1]; + pad = 64 - size; + memset(p, 0, pad); + p += pad; + size = 64; + } + /* It's not clear if the CRC should go before or after the last byte in + odd sized packets. Linux disables the CRC, so that's no help. + The pictures in the documentation show the CRC aligned on a 16-bit + boundary before the last odd byte, so that's what we do. */ + if (crc) { + crc = crc32(~0, buf, size); + *(p++) = crc & 0xff; crc >>= 8; + *(p++) = crc & 0xff; crc >>= 8; + *(p++) = crc & 0xff; crc >>= 8; + *(p++) = crc & 0xff; + } + if (size & 1) { + *(p++) = buf[size - 1]; + *p = 0x60; + } else { + *(p++) = 0; + *p = 0x40; + } + /* TODO: Raise early RX interrupt? */ + s->int_level |= INT_RCV; + smc91c111_update(s); + + return size; +} + +static const MemoryRegionOps smc91c111_mem_ops = { + /* The special case for 32 bit writes to 0xc means we can't just + * set .impl.min/max_access_size to 1, unfortunately + */ + .read = smc91c111_readfn, + .write = smc91c111_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static NetClientInfo net_smc91c111_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = smc91c111_can_receive_nc, + .receive = smc91c111_receive, +}; + +static void smc91c111_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + smc91c111_state *s = SMC91C111(dev); + + memory_region_init_io(&s->mmio, OBJECT(s), &smc91c111_mem_ops, s, + "smc91c111-mmio", 16); + sysbus_init_mmio(sbd, &s->mmio); + sysbus_init_irq(sbd, &s->irq); + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_smc91c111_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + /* ??? Save/restore. */ +} + +static Property smc91c111_properties[] = { + DEFINE_NIC_PROPERTIES(smc91c111_state, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void smc91c111_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = smc91c111_realize; + dc->reset = smc91c111_reset; + dc->vmsd = &vmstate_smc91c111; + device_class_set_props(dc, smc91c111_properties); +} + +static const TypeInfo smc91c111_info = { + .name = TYPE_SMC91C111, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(smc91c111_state), + .class_init = smc91c111_class_init, +}; + +static void smc91c111_register_types(void) +{ + type_register_static(&smc91c111_info); +} + +/* Legacy helper function. Should go away when machine config files are + implemented. */ +void smc91c111_init(NICInfo *nd, uint32_t base, qemu_irq irq) +{ + DeviceState *dev; + SysBusDevice *s; + + qemu_check_nic_model(nd, "smc91c111"); + dev = qdev_new(TYPE_SMC91C111); + qdev_set_nic_properties(dev, nd); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, base); + sysbus_connect_irq(s, 0, irq); +} + +type_init(smc91c111_register_types) diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c new file mode 100644 index 000000000..a6876a936 --- /dev/null +++ b/hw/net/spapr_llan.c @@ -0,0 +1,885 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * PAPR Inter-VM Logical Lan, aka ibmveth + * + * Copyright (c) 2010,2011 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "net/net.h" +#include "migration/vmstate.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" +#include "hw/qdev-properties.h" +#include "sysemu/sysemu.h" +#include "trace.h" + +#include +#include "qom/object.h" + +#define ETH_ALEN 6 +#define MAX_PACKET_SIZE 65536 + +/* Compatibility flags for migration */ +#define SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT 0 +#define SPAPRVLAN_FLAG_RX_BUF_POOLS (1 << SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT) + +/* + * Virtual LAN device + */ + +typedef uint64_t vlan_bd_t; + +#define VLAN_BD_VALID 0x8000000000000000ULL +#define VLAN_BD_TOGGLE 0x4000000000000000ULL +#define VLAN_BD_NO_CSUM 0x0200000000000000ULL +#define VLAN_BD_CSUM_GOOD 0x0100000000000000ULL +#define VLAN_BD_LEN_MASK 0x00ffffff00000000ULL +#define VLAN_BD_LEN(bd) (((bd) & VLAN_BD_LEN_MASK) >> 32) +#define VLAN_BD_ADDR_MASK 0x00000000ffffffffULL +#define VLAN_BD_ADDR(bd) ((bd) & VLAN_BD_ADDR_MASK) + +#define VLAN_VALID_BD(addr, len) (VLAN_BD_VALID | \ + (((len) << 32) & VLAN_BD_LEN_MASK) | \ + (addr & VLAN_BD_ADDR_MASK)) + +#define VLAN_RXQC_TOGGLE 0x80 +#define VLAN_RXQC_VALID 0x40 +#define VLAN_RXQC_NO_CSUM 0x02 +#define VLAN_RXQC_CSUM_GOOD 0x01 + +#define VLAN_RQ_ALIGNMENT 16 +#define VLAN_RXQ_BD_OFF 0 +#define VLAN_FILTER_BD_OFF 8 +#define VLAN_RX_BDS_OFF 16 +/* + * The final 8 bytes of the buffer list is a counter of frames dropped + * because there was not a buffer in the buffer list capable of holding + * the frame. We must avoid it, or the operating system will report garbage + * for this statistic. + */ +#define VLAN_RX_BDS_LEN (SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF - 8) +#define VLAN_MAX_BUFS (VLAN_RX_BDS_LEN / 8) + +#define TYPE_VIO_SPAPR_VLAN_DEVICE "spapr-vlan" +OBJECT_DECLARE_SIMPLE_TYPE(SpaprVioVlan, VIO_SPAPR_VLAN_DEVICE) + +#define RX_POOL_MAX_BDS 4096 +#define RX_MAX_POOLS 5 + +typedef struct { + int32_t bufsize; + int32_t count; + vlan_bd_t bds[RX_POOL_MAX_BDS]; +} RxBufPool; + +struct SpaprVioVlan { + SpaprVioDevice sdev; + NICConf nicconf; + NICState *nic; + MACAddr perm_mac; + bool isopen; + hwaddr buf_list; + uint32_t add_buf_ptr, use_buf_ptr, rx_bufs; + hwaddr rxq_ptr; + QEMUTimer *rxp_timer; + uint32_t compat_flags; /* Compatibility flags for migration */ + RxBufPool *rx_pool[RX_MAX_POOLS]; /* Receive buffer descriptor pools */ +}; + +static bool spapr_vlan_can_receive(NetClientState *nc) +{ + SpaprVioVlan *dev = qemu_get_nic_opaque(nc); + + return dev->isopen && dev->rx_bufs > 0; +} + +/** + * The last 8 bytes of the receive buffer list page (that has been + * supplied by the guest with the H_REGISTER_LOGICAL_LAN call) contain + * a counter for frames that have been dropped because there was no + * suitable receive buffer available. This function is used to increase + * this counter by one. + */ +static void spapr_vlan_record_dropped_rx_frame(SpaprVioVlan *dev) +{ + uint64_t cnt; + + cnt = vio_ldq(&dev->sdev, dev->buf_list + 4096 - 8); + vio_stq(&dev->sdev, dev->buf_list + 4096 - 8, cnt + 1); +} + +/** + * Get buffer descriptor from one of our receive buffer pools + */ +static vlan_bd_t spapr_vlan_get_rx_bd_from_pool(SpaprVioVlan *dev, + size_t size) +{ + vlan_bd_t bd; + int pool; + + for (pool = 0; pool < RX_MAX_POOLS; pool++) { + if (dev->rx_pool[pool]->count > 0 && + dev->rx_pool[pool]->bufsize >= size + 8) { + break; + } + } + if (pool == RX_MAX_POOLS) { + /* Failed to find a suitable buffer */ + return 0; + } + + + trace_spapr_vlan_get_rx_bd_from_pool_found(pool, + dev->rx_pool[pool]->count, + dev->rx_bufs); + + /* Remove the buffer from the pool */ + dev->rx_pool[pool]->count--; + bd = dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count]; + dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count] = 0; + + return bd; +} + +/** + * Get buffer descriptor from the receive buffer list page that has been + * supplied by the guest with the H_REGISTER_LOGICAL_LAN call + */ +static vlan_bd_t spapr_vlan_get_rx_bd_from_page(SpaprVioVlan *dev, + size_t size) +{ + int buf_ptr = dev->use_buf_ptr; + vlan_bd_t bd; + + do { + buf_ptr += 8; + if (buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) { + buf_ptr = VLAN_RX_BDS_OFF; + } + + bd = vio_ldq(&dev->sdev, dev->buf_list + buf_ptr); + + trace_spapr_vlan_get_rx_bd_from_page(buf_ptr, (uint64_t)bd); + } while ((!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8) + && buf_ptr != dev->use_buf_ptr); + + if (!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8) { + /* Failed to find a suitable buffer */ + return 0; + } + + /* Remove the buffer from the pool */ + dev->use_buf_ptr = buf_ptr; + vio_stq(&dev->sdev, dev->buf_list + dev->use_buf_ptr, 0); + + trace_spapr_vlan_get_rx_bd_from_page_found(dev->use_buf_ptr, dev->rx_bufs); + + return bd; +} + +static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + SpaprVioVlan *dev = qemu_get_nic_opaque(nc); + SpaprVioDevice *sdev = VIO_SPAPR_DEVICE(dev); + vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF); + vlan_bd_t bd; + uint64_t handle; + uint8_t control; + + trace_spapr_vlan_receive(sdev->qdev.id, dev->rx_bufs); + + if (!dev->isopen) { + return -1; + } + + if (!dev->rx_bufs) { + spapr_vlan_record_dropped_rx_frame(dev); + return 0; + } + + if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { + bd = spapr_vlan_get_rx_bd_from_pool(dev, size); + } else { + bd = spapr_vlan_get_rx_bd_from_page(dev, size); + } + if (!bd) { + spapr_vlan_record_dropped_rx_frame(dev); + return 0; + } + + dev->rx_bufs--; + + /* Transfer the packet data */ + if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) { + return -1; + } + + trace_spapr_vlan_receive_dma_completed(); + + /* Update the receive queue */ + control = VLAN_RXQC_TOGGLE | VLAN_RXQC_VALID; + if (rxq_bd & VLAN_BD_TOGGLE) { + control ^= VLAN_RXQC_TOGGLE; + } + + handle = vio_ldq(sdev, VLAN_BD_ADDR(bd)); + vio_stq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle); + vio_stl(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size); + vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8); + vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control); + + trace_spapr_vlan_receive_wrote(dev->rxq_ptr, + vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) + + dev->rxq_ptr), + vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) + + dev->rxq_ptr + 8)); + + dev->rxq_ptr += 16; + if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) { + dev->rxq_ptr = 0; + vio_stq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE); + } + + if (sdev->signal_state & 1) { + spapr_vio_irq_pulse(sdev); + } + + return size; +} + +static NetClientInfo net_spapr_vlan_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = spapr_vlan_can_receive, + .receive = spapr_vlan_receive, +}; + +static void spapr_vlan_flush_rx_queue(void *opaque) +{ + SpaprVioVlan *dev = opaque; + + qemu_flush_queued_packets(qemu_get_queue(dev->nic)); +} + +static void spapr_vlan_reset_rx_pool(RxBufPool *rxp) +{ + /* + * Use INT_MAX as bufsize so that unused buffers are moved to the end + * of the list during the qsort in spapr_vlan_add_rxbuf_to_pool() later. + */ + rxp->bufsize = INT_MAX; + rxp->count = 0; + memset(rxp->bds, 0, sizeof(rxp->bds)); +} + +static void spapr_vlan_reset(SpaprVioDevice *sdev) +{ + SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); + int i; + + dev->buf_list = 0; + dev->rx_bufs = 0; + dev->isopen = 0; + + if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { + for (i = 0; i < RX_MAX_POOLS; i++) { + spapr_vlan_reset_rx_pool(dev->rx_pool[i]); + } + } + + memcpy(&dev->nicconf.macaddr.a, &dev->perm_mac.a, + sizeof(dev->nicconf.macaddr.a)); + qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a); +} + +static void spapr_vlan_realize(SpaprVioDevice *sdev, Error **errp) +{ + SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); + + qemu_macaddr_default_if_unset(&dev->nicconf.macaddr); + + memcpy(&dev->perm_mac.a, &dev->nicconf.macaddr.a, sizeof(dev->perm_mac.a)); + + dev->nic = qemu_new_nic(&net_spapr_vlan_info, &dev->nicconf, + object_get_typename(OBJECT(sdev)), sdev->qdev.id, dev); + qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a); + + dev->rxp_timer = timer_new_us(QEMU_CLOCK_VIRTUAL, spapr_vlan_flush_rx_queue, + dev); +} + +static void spapr_vlan_instance_init(Object *obj) +{ + SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(obj); + int i; + + device_add_bootindex_property(obj, &dev->nicconf.bootindex, + "bootindex", "", + DEVICE(dev)); + + if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { + for (i = 0; i < RX_MAX_POOLS; i++) { + dev->rx_pool[i] = g_new(RxBufPool, 1); + spapr_vlan_reset_rx_pool(dev->rx_pool[i]); + } + } +} + +static void spapr_vlan_instance_finalize(Object *obj) +{ + SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(obj); + int i; + + if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { + for (i = 0; i < RX_MAX_POOLS; i++) { + g_free(dev->rx_pool[i]); + dev->rx_pool[i] = NULL; + } + } + + if (dev->rxp_timer) { + timer_free(dev->rxp_timer); + } +} + +void spapr_vlan_create(SpaprVioBus *bus, NICInfo *nd) +{ + DeviceState *dev; + + dev = qdev_new("spapr-vlan"); + + qdev_set_nic_properties(dev, nd); + + qdev_realize_and_unref(dev, &bus->bus, &error_fatal); +} + +static int spapr_vlan_devnode(SpaprVioDevice *dev, void *fdt, int node_off) +{ + SpaprVioVlan *vdev = VIO_SPAPR_VLAN_DEVICE(dev); + uint8_t padded_mac[8] = {0, 0}; + int ret; + + /* Some old phyp versions give the mac address in an 8-byte + * property. The kernel driver (before 3.10) has an insane workaround; + * rather than doing the obvious thing and checking the property + * length, it checks whether the first byte has 0b10 in the low + * bits. If a correct 6-byte property has a different first byte + * the kernel will get the wrong mac address, overrunning its + * buffer in the process (read only, thank goodness). + * + * Here we return a 6-byte address unless that would break a pre-3.10 + * driver. In that case we return a padded 8-byte address to allow the old + * workaround to succeed. */ + if ((vdev->nicconf.macaddr.a[0] & 0x3) == 0x2) { + ret = fdt_setprop(fdt, node_off, "local-mac-address", + &vdev->nicconf.macaddr, ETH_ALEN); + } else { + memcpy(&padded_mac[2], &vdev->nicconf.macaddr, ETH_ALEN); + ret = fdt_setprop(fdt, node_off, "local-mac-address", + padded_mac, sizeof(padded_mac)); + } + if (ret < 0) { + return ret; + } + + ret = fdt_setprop_cell(fdt, node_off, "ibm,mac-address-filters", 0); + if (ret < 0) { + return ret; + } + + return 0; +} + +static int check_bd(SpaprVioVlan *dev, vlan_bd_t bd, + target_ulong alignment) +{ + if ((VLAN_BD_ADDR(bd) % alignment) + || (VLAN_BD_LEN(bd) % alignment)) { + return -1; + } + + if (!spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd), + VLAN_BD_LEN(bd), DMA_DIRECTION_FROM_DEVICE) + || !spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd), + VLAN_BD_LEN(bd), DMA_DIRECTION_TO_DEVICE)) { + return -1; + } + + return 0; +} + +static target_ulong h_register_logical_lan(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong reg = args[0]; + target_ulong buf_list = args[1]; + target_ulong rec_queue = args[2]; + target_ulong filter_list = args[3]; + SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); + vlan_bd_t filter_list_bd; + + if (!dev) { + return H_PARAMETER; + } + + if (dev->isopen) { + hcall_dprintf("H_REGISTER_LOGICAL_LAN called twice without " + "H_FREE_LOGICAL_LAN\n"); + return H_RESOURCE; + } + + if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_TCE_PAGE_SIZE), + SPAPR_TCE_PAGE_SIZE) < 0) { + hcall_dprintf("Bad buf_list 0x" TARGET_FMT_lx "\n", buf_list); + return H_PARAMETER; + } + + filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_TCE_PAGE_SIZE); + if (check_bd(dev, filter_list_bd, SPAPR_TCE_PAGE_SIZE) < 0) { + hcall_dprintf("Bad filter_list 0x" TARGET_FMT_lx "\n", filter_list); + return H_PARAMETER; + } + + if (!(rec_queue & VLAN_BD_VALID) + || (check_bd(dev, rec_queue, VLAN_RQ_ALIGNMENT) < 0)) { + hcall_dprintf("Bad receive queue\n"); + return H_PARAMETER; + } + + dev->buf_list = buf_list; + sdev->signal_state = 0; + + rec_queue &= ~VLAN_BD_TOGGLE; + + /* Initialize the buffer list */ + vio_stq(sdev, buf_list, rec_queue); + vio_stq(sdev, buf_list + 8, filter_list_bd); + spapr_vio_dma_set(sdev, buf_list + VLAN_RX_BDS_OFF, 0, + SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF); + dev->add_buf_ptr = VLAN_RX_BDS_OFF - 8; + dev->use_buf_ptr = VLAN_RX_BDS_OFF - 8; + dev->rx_bufs = 0; + dev->rxq_ptr = 0; + + /* Initialize the receive queue */ + spapr_vio_dma_set(sdev, VLAN_BD_ADDR(rec_queue), 0, VLAN_BD_LEN(rec_queue)); + + dev->isopen = 1; + qemu_flush_queued_packets(qemu_get_queue(dev->nic)); + + return H_SUCCESS; +} + + +static target_ulong h_free_logical_lan(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong reg = args[0]; + SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); + + if (!dev) { + return H_PARAMETER; + } + + if (!dev->isopen) { + hcall_dprintf("H_FREE_LOGICAL_LAN called without " + "H_REGISTER_LOGICAL_LAN\n"); + return H_RESOURCE; + } + + spapr_vlan_reset(sdev); + return H_SUCCESS; +} + +/** + * Used for qsort, this function compares two RxBufPools by size. + */ +static int rx_pool_size_compare(const void *p1, const void *p2) +{ + const RxBufPool *pool1 = *(RxBufPool **)p1; + const RxBufPool *pool2 = *(RxBufPool **)p2; + + if (pool1->bufsize < pool2->bufsize) { + return -1; + } + return pool1->bufsize > pool2->bufsize; +} + +/** + * Search for a matching buffer pool with exact matching size, + * or return -1 if no matching pool has been found. + */ +static int spapr_vlan_get_rx_pool_id(SpaprVioVlan *dev, int size) +{ + int pool; + + for (pool = 0; pool < RX_MAX_POOLS; pool++) { + if (dev->rx_pool[pool]->bufsize == size) { + return pool; + } + } + + return -1; +} + +/** + * Enqueuing receive buffer by adding it to one of our receive buffer pools + */ +static target_long spapr_vlan_add_rxbuf_to_pool(SpaprVioVlan *dev, + target_ulong buf) +{ + int size = VLAN_BD_LEN(buf); + int pool; + + pool = spapr_vlan_get_rx_pool_id(dev, size); + if (pool < 0) { + /* + * No matching pool found? Try to use a new one. If the guest used all + * pools before, but changed the size of one pool in the meantime, we might + * need to recycle that pool here (if it's empty already). Thus scan + * all buffer pools now, starting with the last (likely empty) one. + */ + for (pool = RX_MAX_POOLS - 1; pool >= 0 ; pool--) { + if (dev->rx_pool[pool]->count == 0) { + dev->rx_pool[pool]->bufsize = size; + /* + * Sort pools by size so that spapr_vlan_receive() + * can later find the smallest buffer pool easily. + */ + qsort(dev->rx_pool, RX_MAX_POOLS, sizeof(dev->rx_pool[0]), + rx_pool_size_compare); + pool = spapr_vlan_get_rx_pool_id(dev, size); + trace_spapr_vlan_add_rxbuf_to_pool_create(pool, + VLAN_BD_LEN(buf)); + break; + } + } + } + /* Still no usable pool? Give up */ + if (pool < 0 || dev->rx_pool[pool]->count >= RX_POOL_MAX_BDS) { + return H_RESOURCE; + } + + trace_spapr_vlan_add_rxbuf_to_pool(pool, VLAN_BD_LEN(buf), + dev->rx_pool[pool]->count); + + dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count++] = buf; + + return 0; +} + +/** + * This is the old way of enqueuing receive buffers: Add it to the rx queue + * page that has been supplied by the guest (which is quite limited in size). + */ +static target_long spapr_vlan_add_rxbuf_to_page(SpaprVioVlan *dev, + target_ulong buf) +{ + vlan_bd_t bd; + + if (dev->rx_bufs >= VLAN_MAX_BUFS) { + return H_RESOURCE; + } + + do { + dev->add_buf_ptr += 8; + if (dev->add_buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) { + dev->add_buf_ptr = VLAN_RX_BDS_OFF; + } + + bd = vio_ldq(&dev->sdev, dev->buf_list + dev->add_buf_ptr); + } while (bd & VLAN_BD_VALID); + + vio_stq(&dev->sdev, dev->buf_list + dev->add_buf_ptr, buf); + + trace_spapr_vlan_add_rxbuf_to_page(dev->add_buf_ptr, dev->rx_bufs, buf); + + return 0; +} + +static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong reg = args[0]; + target_ulong buf = args[1]; + SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); + target_long ret; + + trace_spapr_vlan_h_add_logical_lan_buffer(reg, buf); + + if (!sdev) { + hcall_dprintf("Bad device\n"); + return H_PARAMETER; + } + + if ((check_bd(dev, buf, 4) < 0) + || (VLAN_BD_LEN(buf) < 16)) { + hcall_dprintf("Bad buffer enqueued\n"); + return H_PARAMETER; + } + + if (!dev->isopen) { + return H_RESOURCE; + } + + if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) { + ret = spapr_vlan_add_rxbuf_to_pool(dev, buf); + } else { + ret = spapr_vlan_add_rxbuf_to_page(dev, buf); + } + if (ret) { + return ret; + } + + dev->rx_bufs++; + + /* + * Give guest some more time to add additional RX buffers before we + * flush the receive queue, so that e.g. fragmented IP packets can + * be passed to the guest in one go later (instead of passing single + * fragments if there is only one receive buffer available). + */ + timer_mod(dev->rxp_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 500); + + return H_SUCCESS; +} + +static target_ulong h_send_logical_lan(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong reg = args[0]; + target_ulong *bufs = args + 1; + target_ulong continue_token = args[7]; + SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); + unsigned total_len; + uint8_t *p; + g_autofree uint8_t *lbuf = NULL; + int i, nbufs; + int ret; + + trace_spapr_vlan_h_send_logical_lan(reg, continue_token); + + if (!sdev) { + return H_PARAMETER; + } + + trace_spapr_vlan_h_send_logical_lan_rxbufs(dev->rx_bufs); + + if (!dev->isopen) { + return H_DROPPED; + } + + if (continue_token) { + return H_HARDWARE; /* FIXME actually handle this */ + } + + total_len = 0; + for (i = 0; i < 6; i++) { + trace_spapr_vlan_h_send_logical_lan_buf_desc(bufs[i]); + if (!(bufs[i] & VLAN_BD_VALID)) { + break; + } + total_len += VLAN_BD_LEN(bufs[i]); + } + + nbufs = i; + trace_spapr_vlan_h_send_logical_lan_total(nbufs, total_len); + + if (total_len == 0) { + return H_SUCCESS; + } + + if (total_len > MAX_PACKET_SIZE) { + /* Don't let the guest force too large an allocation */ + return H_RESOURCE; + } + + lbuf = g_malloc(total_len); + p = lbuf; + for (i = 0; i < nbufs; i++) { + ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]), + p, VLAN_BD_LEN(bufs[i])); + if (ret < 0) { + return ret; + } + + p += VLAN_BD_LEN(bufs[i]); + } + + qemu_send_packet(qemu_get_queue(dev->nic), lbuf, total_len); + + return H_SUCCESS; +} + +static target_ulong h_multicast_ctrl(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong reg = args[0]; + SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + + if (!dev) { + return H_PARAMETER; + } + + return H_SUCCESS; +} + +static target_ulong h_change_logical_lan_mac(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong reg = args[0]; + target_ulong macaddr = args[1]; + SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev); + int i; + + for (i = 0; i < ETH_ALEN; i++) { + dev->nicconf.macaddr.a[ETH_ALEN - i - 1] = macaddr & 0xff; + macaddr >>= 8; + } + + qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a); + + return H_SUCCESS; +} + +static Property spapr_vlan_properties[] = { + DEFINE_SPAPR_PROPERTIES(SpaprVioVlan, sdev), + DEFINE_NIC_PROPERTIES(SpaprVioVlan, nicconf), + DEFINE_PROP_BIT("use-rx-buffer-pools", SpaprVioVlan, + compat_flags, SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static bool spapr_vlan_rx_buffer_pools_needed(void *opaque) +{ + SpaprVioVlan *dev = opaque; + + return (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) != 0; +} + +static const VMStateDescription vmstate_rx_buffer_pool = { + .name = "spapr_llan/rx_buffer_pool", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_vlan_rx_buffer_pools_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(bufsize, RxBufPool), + VMSTATE_INT32(count, RxBufPool), + VMSTATE_UINT64_ARRAY(bds, RxBufPool, RX_POOL_MAX_BDS), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_rx_pools = { + .name = "spapr_llan/rx_pools", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_vlan_rx_buffer_pools_needed, + .fields = (VMStateField[]) { + VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(rx_pool, SpaprVioVlan, + RX_MAX_POOLS, 1, + vmstate_rx_buffer_pool, RxBufPool), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_spapr_llan = { + .name = "spapr_llan", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_SPAPR_VIO(sdev, SpaprVioVlan), + /* LLAN state */ + VMSTATE_BOOL(isopen, SpaprVioVlan), + VMSTATE_UINT64(buf_list, SpaprVioVlan), + VMSTATE_UINT32(add_buf_ptr, SpaprVioVlan), + VMSTATE_UINT32(use_buf_ptr, SpaprVioVlan), + VMSTATE_UINT32(rx_bufs, SpaprVioVlan), + VMSTATE_UINT64(rxq_ptr, SpaprVioVlan), + + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_rx_pools, + NULL + } +}; + +static void spapr_vlan_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); + + k->realize = spapr_vlan_realize; + k->reset = spapr_vlan_reset; + k->devnode = spapr_vlan_devnode; + k->dt_name = "l-lan"; + k->dt_type = "network"; + k->dt_compatible = "IBM,l-lan"; + k->signal_mask = 0x1; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + device_class_set_props(dc, spapr_vlan_properties); + k->rtce_window_size = 0x10000000; + dc->vmsd = &vmstate_spapr_llan; +} + +static const TypeInfo spapr_vlan_info = { + .name = TYPE_VIO_SPAPR_VLAN_DEVICE, + .parent = TYPE_VIO_SPAPR_DEVICE, + .instance_size = sizeof(SpaprVioVlan), + .class_init = spapr_vlan_class_init, + .instance_init = spapr_vlan_instance_init, + .instance_finalize = spapr_vlan_instance_finalize, +}; + +static void spapr_vlan_register_types(void) +{ + spapr_register_hypercall(H_REGISTER_LOGICAL_LAN, h_register_logical_lan); + spapr_register_hypercall(H_FREE_LOGICAL_LAN, h_free_logical_lan); + spapr_register_hypercall(H_SEND_LOGICAL_LAN, h_send_logical_lan); + spapr_register_hypercall(H_ADD_LOGICAL_LAN_BUFFER, + h_add_logical_lan_buffer); + spapr_register_hypercall(H_MULTICAST_CTRL, h_multicast_ctrl); + spapr_register_hypercall(H_CHANGE_LOGICAL_LAN_MAC, + h_change_logical_lan_mac); + type_register_static(&spapr_vlan_info); +} + +type_init(spapr_vlan_register_types) diff --git a/hw/net/stellaris_enet.c b/hw/net/stellaris_enet.c new file mode 100644 index 000000000..8dd60783d --- /dev/null +++ b/hw/net/stellaris_enet.c @@ -0,0 +1,526 @@ +/* + * Luminary Micro Stellaris Ethernet Controller + * + * Copyright (c) 2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "net/net.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include +#include "qom/object.h" + +//#define DEBUG_STELLARIS_ENET 1 + +#ifdef DEBUG_STELLARIS_ENET +#define DPRINTF(fmt, ...) \ +do { printf("stellaris_enet: " fmt , ## __VA_ARGS__); } while (0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "stellaris_enet: error: " fmt , ## __VA_ARGS__); exit(1);} while (0) +#else +#define DPRINTF(fmt, ...) do {} while(0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "stellaris_enet: error: " fmt , ## __VA_ARGS__);} while (0) +#endif + +#define SE_INT_RX 0x01 +#define SE_INT_TXER 0x02 +#define SE_INT_TXEMP 0x04 +#define SE_INT_FOV 0x08 +#define SE_INT_RXER 0x10 +#define SE_INT_MD 0x20 +#define SE_INT_PHY 0x40 + +#define SE_RCTL_RXEN 0x01 +#define SE_RCTL_AMUL 0x02 +#define SE_RCTL_PRMS 0x04 +#define SE_RCTL_BADCRC 0x08 +#define SE_RCTL_RSTFIFO 0x10 + +#define SE_TCTL_TXEN 0x01 +#define SE_TCTL_PADEN 0x02 +#define SE_TCTL_CRC 0x04 +#define SE_TCTL_DUPLEX 0x08 + +#define TYPE_STELLARIS_ENET "stellaris_enet" +OBJECT_DECLARE_SIMPLE_TYPE(stellaris_enet_state, STELLARIS_ENET) + +typedef struct { + uint8_t data[2048]; + uint32_t len; +} StellarisEnetRxFrame; + +struct stellaris_enet_state { + SysBusDevice parent_obj; + + uint32_t ris; + uint32_t im; + uint32_t rctl; + uint32_t tctl; + uint32_t thr; + uint32_t mctl; + uint32_t mdv; + uint32_t mtxd; + uint32_t mrxd; + uint32_t np; + uint32_t tx_fifo_len; + uint8_t tx_fifo[2048]; + /* Real hardware has a 2k fifo, which works out to be at most 31 packets. + We implement a full 31 packet fifo. */ + StellarisEnetRxFrame rx[31]; + uint32_t rx_fifo_offset; + uint32_t next_packet; + NICState *nic; + NICConf conf; + qemu_irq irq; + MemoryRegion mmio; +}; + +static const VMStateDescription vmstate_rx_frame = { + .name = "stellaris_enet/rx_frame", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(data, StellarisEnetRxFrame, 2048), + VMSTATE_UINT32(len, StellarisEnetRxFrame), + VMSTATE_END_OF_LIST() + } +}; + +static int stellaris_enet_post_load(void *opaque, int version_id) +{ + stellaris_enet_state *s = opaque; + int i; + + /* Sanitize inbound state. Note that next_packet is an index but + * np is a size; hence their valid upper bounds differ. + */ + if (s->next_packet >= ARRAY_SIZE(s->rx)) { + return -1; + } + + if (s->np > ARRAY_SIZE(s->rx)) { + return -1; + } + + for (i = 0; i < ARRAY_SIZE(s->rx); i++) { + if (s->rx[i].len > ARRAY_SIZE(s->rx[i].data)) { + return -1; + } + } + + if (s->rx_fifo_offset > ARRAY_SIZE(s->rx[0].data) - 4) { + return -1; + } + + if (s->tx_fifo_len > ARRAY_SIZE(s->tx_fifo)) { + return -1; + } + + return 0; +} + +static const VMStateDescription vmstate_stellaris_enet = { + .name = "stellaris_enet", + .version_id = 2, + .minimum_version_id = 2, + .post_load = stellaris_enet_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ris, stellaris_enet_state), + VMSTATE_UINT32(im, stellaris_enet_state), + VMSTATE_UINT32(rctl, stellaris_enet_state), + VMSTATE_UINT32(tctl, stellaris_enet_state), + VMSTATE_UINT32(thr, stellaris_enet_state), + VMSTATE_UINT32(mctl, stellaris_enet_state), + VMSTATE_UINT32(mdv, stellaris_enet_state), + VMSTATE_UINT32(mtxd, stellaris_enet_state), + VMSTATE_UINT32(mrxd, stellaris_enet_state), + VMSTATE_UINT32(np, stellaris_enet_state), + VMSTATE_UINT32(tx_fifo_len, stellaris_enet_state), + VMSTATE_UINT8_ARRAY(tx_fifo, stellaris_enet_state, 2048), + VMSTATE_STRUCT_ARRAY(rx, stellaris_enet_state, 31, 1, + vmstate_rx_frame, StellarisEnetRxFrame), + VMSTATE_UINT32(rx_fifo_offset, stellaris_enet_state), + VMSTATE_UINT32(next_packet, stellaris_enet_state), + VMSTATE_END_OF_LIST() + } +}; + +static void stellaris_enet_update(stellaris_enet_state *s) +{ + qemu_set_irq(s->irq, (s->ris & s->im) != 0); +} + +/* Return the data length of the packet currently being assembled + * in the TX fifo. + */ +static inline int stellaris_txpacket_datalen(stellaris_enet_state *s) +{ + return s->tx_fifo[0] | (s->tx_fifo[1] << 8); +} + +/* Return true if the packet currently in the TX FIFO is complete, +* ie the FIFO holds enough bytes for the data length, ethernet header, +* payload and optionally CRC. +*/ +static inline bool stellaris_txpacket_complete(stellaris_enet_state *s) +{ + int framelen = stellaris_txpacket_datalen(s); + framelen += 16; + if (!(s->tctl & SE_TCTL_CRC)) { + framelen += 4; + } + /* Cover the corner case of a 2032 byte payload with auto-CRC disabled: + * this requires more bytes than will fit in the FIFO. It's not totally + * clear how the h/w handles this, but if using threshold-based TX + * it will definitely try to transmit something. + */ + framelen = MIN(framelen, ARRAY_SIZE(s->tx_fifo)); + return s->tx_fifo_len >= framelen; +} + +/* Return true if the TX FIFO threshold is enabled and the FIFO + * has filled enough to reach it. + */ +static inline bool stellaris_tx_thr_reached(stellaris_enet_state *s) +{ + return (s->thr < 0x3f && + (s->tx_fifo_len >= 4 * (s->thr * 8 + 1))); +} + +/* Send the packet currently in the TX FIFO */ +static void stellaris_enet_send(stellaris_enet_state *s) +{ + int framelen = stellaris_txpacket_datalen(s); + + /* Ethernet header is in the FIFO but not in the datacount. + * We don't implement explicit CRC, so just ignore any + * CRC value in the FIFO. + */ + framelen += 14; + if ((s->tctl & SE_TCTL_PADEN) && framelen < 60) { + memset(&s->tx_fifo[framelen + 2], 0, 60 - framelen); + framelen = 60; + } + /* This MIN will have no effect unless the FIFO data is corrupt + * (eg bad data from an incoming migration); otherwise the check + * on the datalen at the start of writing the data into the FIFO + * will have caught this. Silently write a corrupt half-packet, + * which is what the hardware does in FIFO underrun situations. + */ + framelen = MIN(framelen, ARRAY_SIZE(s->tx_fifo) - 2); + qemu_send_packet(qemu_get_queue(s->nic), s->tx_fifo + 2, framelen); + s->tx_fifo_len = 0; + s->ris |= SE_INT_TXEMP; + stellaris_enet_update(s); + DPRINTF("Done TX\n"); +} + +/* TODO: Implement MAC address filtering. */ +static ssize_t stellaris_enet_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + stellaris_enet_state *s = qemu_get_nic_opaque(nc); + int n; + uint8_t *p; + uint32_t crc; + + if ((s->rctl & SE_RCTL_RXEN) == 0) + return -1; + if (s->np >= 31) { + return 0; + } + + DPRINTF("Received packet len=%zu\n", size); + n = s->next_packet + s->np; + if (n >= 31) + n -= 31; + + if (size >= sizeof(s->rx[n].data) - 6) { + /* If the packet won't fit into the + * emulated 2K RAM, this is reported + * as a FIFO overrun error. + */ + s->ris |= SE_INT_FOV; + stellaris_enet_update(s); + return -1; + } + + s->np++; + s->rx[n].len = size + 6; + p = s->rx[n].data; + *(p++) = (size + 6); + *(p++) = (size + 6) >> 8; + memcpy (p, buf, size); + p += size; + crc = crc32(~0, buf, size); + *(p++) = crc; + *(p++) = crc >> 8; + *(p++) = crc >> 16; + *(p++) = crc >> 24; + /* Clear the remaining bytes in the last word. */ + if ((size & 3) != 2) { + memset(p, 0, (6 - size) & 3); + } + + s->ris |= SE_INT_RX; + stellaris_enet_update(s); + + return size; +} + +static int stellaris_enet_can_receive(stellaris_enet_state *s) +{ + return (s->np < 31); +} + +static uint64_t stellaris_enet_read(void *opaque, hwaddr offset, + unsigned size) +{ + stellaris_enet_state *s = (stellaris_enet_state *)opaque; + uint32_t val; + + switch (offset) { + case 0x00: /* RIS */ + DPRINTF("IRQ status %02x\n", s->ris); + return s->ris; + case 0x04: /* IM */ + return s->im; + case 0x08: /* RCTL */ + return s->rctl; + case 0x0c: /* TCTL */ + return s->tctl; + case 0x10: /* DATA */ + { + uint8_t *rx_fifo; + + if (s->np == 0) { + BADF("RX underflow\n"); + return 0; + } + + rx_fifo = s->rx[s->next_packet].data + s->rx_fifo_offset; + + val = rx_fifo[0] | (rx_fifo[1] << 8) | (rx_fifo[2] << 16) + | (rx_fifo[3] << 24); + s->rx_fifo_offset += 4; + if (s->rx_fifo_offset >= s->rx[s->next_packet].len) { + s->rx_fifo_offset = 0; + s->next_packet++; + if (s->next_packet >= 31) + s->next_packet = 0; + s->np--; + DPRINTF("RX done np=%d\n", s->np); + if (!s->np && stellaris_enet_can_receive(s)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + } + return val; + } + case 0x14: /* IA0 */ + return s->conf.macaddr.a[0] | (s->conf.macaddr.a[1] << 8) + | (s->conf.macaddr.a[2] << 16) + | ((uint32_t)s->conf.macaddr.a[3] << 24); + case 0x18: /* IA1 */ + return s->conf.macaddr.a[4] | (s->conf.macaddr.a[5] << 8); + case 0x1c: /* THR */ + return s->thr; + case 0x20: /* MCTL */ + return s->mctl; + case 0x24: /* MDV */ + return s->mdv; + case 0x28: /* MADD */ + return 0; + case 0x2c: /* MTXD */ + return s->mtxd; + case 0x30: /* MRXD */ + return s->mrxd; + case 0x34: /* NP */ + return s->np; + case 0x38: /* TR */ + return 0; + case 0x3c: /* Undocumented: Timestamp? */ + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, "stellaris_enet_rd%d: Illegal register" + " 0x02%" HWADDR_PRIx "\n", + size * 8, offset); + return 0; + } +} + +static void stellaris_enet_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + stellaris_enet_state *s = (stellaris_enet_state *)opaque; + + switch (offset) { + case 0x00: /* IACK */ + s->ris &= ~value; + DPRINTF("IRQ ack %02" PRIx64 "/%02x\n", value, s->ris); + stellaris_enet_update(s); + /* Clearing TXER also resets the TX fifo. */ + if (value & SE_INT_TXER) { + s->tx_fifo_len = 0; + } + break; + case 0x04: /* IM */ + DPRINTF("IRQ mask %02" PRIx64 "/%02x\n", value, s->ris); + s->im = value; + stellaris_enet_update(s); + break; + case 0x08: /* RCTL */ + s->rctl = value; + if (value & SE_RCTL_RSTFIFO) { + s->np = 0; + s->rx_fifo_offset = 0; + stellaris_enet_update(s); + } + break; + case 0x0c: /* TCTL */ + s->tctl = value; + break; + case 0x10: /* DATA */ + if (s->tx_fifo_len == 0) { + /* The first word is special, it contains the data length */ + int framelen = value & 0xffff; + if (framelen > 2032) { + DPRINTF("TX frame too long (%d)\n", framelen); + s->ris |= SE_INT_TXER; + stellaris_enet_update(s); + break; + } + } + + if (s->tx_fifo_len + 4 <= ARRAY_SIZE(s->tx_fifo)) { + s->tx_fifo[s->tx_fifo_len++] = value; + s->tx_fifo[s->tx_fifo_len++] = value >> 8; + s->tx_fifo[s->tx_fifo_len++] = value >> 16; + s->tx_fifo[s->tx_fifo_len++] = value >> 24; + } + + if (stellaris_tx_thr_reached(s) && stellaris_txpacket_complete(s)) { + stellaris_enet_send(s); + } + break; + case 0x14: /* IA0 */ + s->conf.macaddr.a[0] = value; + s->conf.macaddr.a[1] = value >> 8; + s->conf.macaddr.a[2] = value >> 16; + s->conf.macaddr.a[3] = value >> 24; + break; + case 0x18: /* IA1 */ + s->conf.macaddr.a[4] = value; + s->conf.macaddr.a[5] = value >> 8; + break; + case 0x1c: /* THR */ + s->thr = value; + break; + case 0x20: /* MCTL */ + /* TODO: MII registers aren't modelled. + * Clear START, indicating that the operation completes immediately. + */ + s->mctl = value & ~1; + break; + case 0x24: /* MDV */ + s->mdv = value; + break; + case 0x28: /* MADD */ + /* ignored. */ + break; + case 0x2c: /* MTXD */ + s->mtxd = value & 0xff; + break; + case 0x38: /* TR */ + if (value & 1) { + stellaris_enet_send(s); + } + break; + case 0x30: /* MRXD */ + case 0x34: /* NP */ + /* Ignored. */ + case 0x3c: /* Undocuented: Timestamp? */ + /* Ignored. */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "stellaris_enet_wr%d: Illegal register " + "0x02%" HWADDR_PRIx " = 0x%" PRIx64 "\n", + size * 8, offset, value); + } +} + +static const MemoryRegionOps stellaris_enet_ops = { + .read = stellaris_enet_read, + .write = stellaris_enet_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void stellaris_enet_reset(DeviceState *dev) +{ + stellaris_enet_state *s = STELLARIS_ENET(dev); + + s->mdv = 0x80; + s->rctl = SE_RCTL_BADCRC; + s->im = SE_INT_PHY | SE_INT_MD | SE_INT_RXER | SE_INT_FOV | SE_INT_TXEMP + | SE_INT_TXER | SE_INT_RX; + s->thr = 0x3f; + s->tx_fifo_len = 0; +} + +static NetClientInfo net_stellaris_enet_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = stellaris_enet_receive, +}; + +static void stellaris_enet_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + stellaris_enet_state *s = STELLARIS_ENET(dev); + + memory_region_init_io(&s->mmio, OBJECT(s), &stellaris_enet_ops, s, + "stellaris_enet", 0x1000); + sysbus_init_mmio(sbd, &s->mmio); + sysbus_init_irq(sbd, &s->irq); + qemu_macaddr_default_if_unset(&s->conf.macaddr); + + s->nic = qemu_new_nic(&net_stellaris_enet_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static Property stellaris_enet_properties[] = { + DEFINE_NIC_PROPERTIES(stellaris_enet_state, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void stellaris_enet_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = stellaris_enet_realize; + dc->reset = stellaris_enet_reset; + device_class_set_props(dc, stellaris_enet_properties); + dc->vmsd = &vmstate_stellaris_enet; +} + +static const TypeInfo stellaris_enet_info = { + .name = TYPE_STELLARIS_ENET, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(stellaris_enet_state), + .class_init = stellaris_enet_class_init, +}; + +static void stellaris_enet_register_types(void) +{ + type_register_static(&stellaris_enet_info); +} + +type_init(stellaris_enet_register_types) diff --git a/hw/net/sungem.c b/hw/net/sungem.c new file mode 100644 index 000000000..3684a4d73 --- /dev/null +++ b/hw/net/sungem.c @@ -0,0 +1,1454 @@ +/* + * QEMU model of SUN GEM ethernet controller + * + * As found in Apple ASICs among others + * + * Copyright 2016 Ben Herrenschmidt + * Copyright 2017 Mark Cave-Ayland + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "net/net.h" +#include "net/eth.h" +#include "net/checksum.h" +#include "hw/net/mii.h" +#include "sysemu/sysemu.h" +#include "trace.h" +#include "qom/object.h" + +#define TYPE_SUNGEM "sungem" + +OBJECT_DECLARE_SIMPLE_TYPE(SunGEMState, SUNGEM) + +#define MAX_PACKET_SIZE 9016 + +#define SUNGEM_MMIO_SIZE 0x200000 + +/* Global registers */ +#define SUNGEM_MMIO_GREG_SIZE 0x2000 + +#define GREG_SEBSTATE 0x0000UL /* SEB State Register */ + +#define GREG_STAT 0x000CUL /* Status Register */ +#define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */ +#define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */ +#define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */ +#define GREG_STAT_RXDONE 0x00000010 /* One RX frame arrived */ +#define GREG_STAT_RXNOBUF 0x00000020 /* No free RX buffers available */ +#define GREG_STAT_RXTAGERR 0x00000040 /* RX tag framing is corrupt */ +#define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */ +#define GREG_STAT_RXMAC 0x00008000 /* RX MAC signalled interrupt */ +#define GREG_STAT_MAC 0x00010000 /* MAC Control signalled irq */ +#define GREG_STAT_TXNR 0xfff80000 /* == TXDMA_TXDONE reg val */ +#define GREG_STAT_TXNR_SHIFT 19 + +/* These interrupts are edge latches in the status register, + * reading it (or writing the corresponding bit in IACK) will + * clear them + */ +#define GREG_STAT_LATCH (GREG_STAT_TXALL | GREG_STAT_TXINTME | \ + GREG_STAT_RXDONE | GREG_STAT_RXDONE | \ + GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR) + +#define GREG_IMASK 0x0010UL /* Interrupt Mask Register */ +#define GREG_IACK 0x0014UL /* Interrupt ACK Register */ +#define GREG_STAT2 0x001CUL /* Alias of GREG_STAT */ +#define GREG_PCIESTAT 0x1000UL /* PCI Error Status Register */ +#define GREG_PCIEMASK 0x1004UL /* PCI Error Mask Register */ + +#define GREG_SWRST 0x1010UL /* Software Reset Register */ +#define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */ +#define GREG_SWRST_RXRST 0x00000002 /* RX Software Reset */ +#define GREG_SWRST_RSTOUT 0x00000004 /* Force RST# pin active */ + +/* TX DMA Registers */ +#define SUNGEM_MMIO_TXDMA_SIZE 0x1000 + +#define TXDMA_KICK 0x0000UL /* TX Kick Register */ + +#define TXDMA_CFG 0x0004UL /* TX Configuration Register */ +#define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */ +#define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */ + +#define TXDMA_DBLOW 0x0008UL /* TX Desc. Base Low */ +#define TXDMA_DBHI 0x000CUL /* TX Desc. Base High */ +#define TXDMA_PCNT 0x0024UL /* TX FIFO Packet Counter */ +#define TXDMA_SMACHINE 0x0028UL /* TX State Machine Register */ +#define TXDMA_DPLOW 0x0030UL /* TX Data Pointer Low */ +#define TXDMA_DPHI 0x0034UL /* TX Data Pointer High */ +#define TXDMA_TXDONE 0x0100UL /* TX Completion Register */ +#define TXDMA_FTAG 0x0108UL /* TX FIFO Tag */ +#define TXDMA_FSZ 0x0118UL /* TX FIFO Size */ + +/* Receive DMA Registers */ +#define SUNGEM_MMIO_RXDMA_SIZE 0x2000 + +#define RXDMA_CFG 0x0000UL /* RX Configuration Register */ +#define RXDMA_CFG_ENABLE 0x00000001 /* Enable RX DMA channel */ +#define RXDMA_CFG_RINGSZ 0x0000001e /* RX descriptor ring size */ +#define RXDMA_CFG_FBOFF 0x00001c00 /* Offset of first data byte */ +#define RXDMA_CFG_CSUMOFF 0x000fe000 /* Skip bytes before csum calc */ + +#define RXDMA_DBLOW 0x0004UL /* RX Descriptor Base Low */ +#define RXDMA_DBHI 0x0008UL /* RX Descriptor Base High */ +#define RXDMA_PCNT 0x0018UL /* RX FIFO Packet Counter */ +#define RXDMA_SMACHINE 0x001CUL /* RX State Machine Register */ +#define RXDMA_PTHRESH 0x0020UL /* Pause Thresholds */ +#define RXDMA_DPLOW 0x0024UL /* RX Data Pointer Low */ +#define RXDMA_DPHI 0x0028UL /* RX Data Pointer High */ +#define RXDMA_KICK 0x0100UL /* RX Kick Register */ +#define RXDMA_DONE 0x0104UL /* RX Completion Register */ +#define RXDMA_BLANK 0x0108UL /* RX Blanking Register */ +#define RXDMA_FTAG 0x0110UL /* RX FIFO Tag */ +#define RXDMA_FSZ 0x0120UL /* RX FIFO Size */ + +/* MAC Registers */ +#define SUNGEM_MMIO_MAC_SIZE 0x200 + +#define MAC_TXRST 0x0000UL /* TX MAC Software Reset Command */ +#define MAC_RXRST 0x0004UL /* RX MAC Software Reset Command */ +#define MAC_TXSTAT 0x0010UL /* TX MAC Status Register */ +#define MAC_RXSTAT 0x0014UL /* RX MAC Status Register */ + +#define MAC_CSTAT 0x0018UL /* MAC Control Status Register */ +#define MAC_CSTAT_PTR 0xffff0000 /* Pause Time Received */ + +#define MAC_TXMASK 0x0020UL /* TX MAC Mask Register */ +#define MAC_RXMASK 0x0024UL /* RX MAC Mask Register */ +#define MAC_MCMASK 0x0028UL /* MAC Control Mask Register */ + +#define MAC_TXCFG 0x0030UL /* TX MAC Configuration Register */ +#define MAC_TXCFG_ENAB 0x00000001 /* TX MAC Enable */ + +#define MAC_RXCFG 0x0034UL /* RX MAC Configuration Register */ +#define MAC_RXCFG_ENAB 0x00000001 /* RX MAC Enable */ +#define MAC_RXCFG_SFCS 0x00000004 /* Strip FCS */ +#define MAC_RXCFG_PROM 0x00000008 /* Promiscuous Mode */ +#define MAC_RXCFG_PGRP 0x00000010 /* Promiscuous Group */ +#define MAC_RXCFG_HFE 0x00000020 /* Hash Filter Enable */ + +#define MAC_XIFCFG 0x003CUL /* XIF Configuration Register */ +#define MAC_XIFCFG_LBCK 0x00000002 /* Loopback TX to RX */ + +#define MAC_MINFSZ 0x0050UL /* MinFrameSize Register */ +#define MAC_MAXFSZ 0x0054UL /* MaxFrameSize Register */ +#define MAC_ADDR0 0x0080UL /* MAC Address 0 Register */ +#define MAC_ADDR1 0x0084UL /* MAC Address 1 Register */ +#define MAC_ADDR2 0x0088UL /* MAC Address 2 Register */ +#define MAC_ADDR3 0x008CUL /* MAC Address 3 Register */ +#define MAC_ADDR4 0x0090UL /* MAC Address 4 Register */ +#define MAC_ADDR5 0x0094UL /* MAC Address 5 Register */ +#define MAC_HASH0 0x00C0UL /* Hash Table 0 Register */ +#define MAC_PATMPS 0x0114UL /* Peak Attempts Register */ +#define MAC_SMACHINE 0x0134UL /* State Machine Register */ + +/* MIF Registers */ +#define SUNGEM_MMIO_MIF_SIZE 0x20 + +#define MIF_FRAME 0x000CUL /* MIF Frame/Output Register */ +#define MIF_FRAME_OP 0x30000000 /* OPcode */ +#define MIF_FRAME_PHYAD 0x0f800000 /* PHY ADdress */ +#define MIF_FRAME_REGAD 0x007c0000 /* REGister ADdress */ +#define MIF_FRAME_TALSB 0x00010000 /* Turn Around LSB */ +#define MIF_FRAME_DATA 0x0000ffff /* Instruction Payload */ + +#define MIF_CFG 0x0010UL /* MIF Configuration Register */ +#define MIF_CFG_MDI0 0x00000100 /* MDIO_0 present or read-bit */ +#define MIF_CFG_MDI1 0x00000200 /* MDIO_1 present or read-bit */ + +#define MIF_STATUS 0x0018UL /* MIF Status Register */ +#define MIF_SMACHINE 0x001CUL /* MIF State Machine Register */ + +/* PCS/Serialink Registers */ +#define SUNGEM_MMIO_PCS_SIZE 0x60 +#define PCS_MIISTAT 0x0004UL /* PCS MII Status Register */ +#define PCS_ISTAT 0x0018UL /* PCS Interrupt Status Reg */ +#define PCS_SSTATE 0x005CUL /* Serialink State Register */ + +/* Descriptors */ +struct gem_txd { + uint64_t control_word; + uint64_t buffer; +}; + +#define TXDCTRL_BUFSZ 0x0000000000007fffULL /* Buffer Size */ +#define TXDCTRL_CSTART 0x00000000001f8000ULL /* CSUM Start Offset */ +#define TXDCTRL_COFF 0x000000001fe00000ULL /* CSUM Stuff Offset */ +#define TXDCTRL_CENAB 0x0000000020000000ULL /* CSUM Enable */ +#define TXDCTRL_EOF 0x0000000040000000ULL /* End of Frame */ +#define TXDCTRL_SOF 0x0000000080000000ULL /* Start of Frame */ +#define TXDCTRL_INTME 0x0000000100000000ULL /* "Interrupt Me" */ + +struct gem_rxd { + uint64_t status_word; + uint64_t buffer; +}; + +#define RXDCTRL_HPASS 0x1000000000000000ULL /* Passed Hash Filter */ +#define RXDCTRL_ALTMAC 0x2000000000000000ULL /* Matched ALT MAC */ + + +struct SunGEMState { + PCIDevice pdev; + + MemoryRegion sungem; + MemoryRegion greg; + MemoryRegion txdma; + MemoryRegion rxdma; + MemoryRegion mac; + MemoryRegion mif; + MemoryRegion pcs; + NICState *nic; + NICConf conf; + uint32_t phy_addr; + + uint32_t gregs[SUNGEM_MMIO_GREG_SIZE >> 2]; + uint32_t txdmaregs[SUNGEM_MMIO_TXDMA_SIZE >> 2]; + uint32_t rxdmaregs[SUNGEM_MMIO_RXDMA_SIZE >> 2]; + uint32_t macregs[SUNGEM_MMIO_MAC_SIZE >> 2]; + uint32_t mifregs[SUNGEM_MMIO_MIF_SIZE >> 2]; + uint32_t pcsregs[SUNGEM_MMIO_PCS_SIZE >> 2]; + + /* Cache some useful things */ + uint32_t rx_mask; + uint32_t tx_mask; + + /* Current tx packet */ + uint8_t tx_data[MAX_PACKET_SIZE]; + uint32_t tx_size; + uint64_t tx_first_ctl; +}; + + +static void sungem_eval_irq(SunGEMState *s) +{ + uint32_t stat, mask; + + mask = s->gregs[GREG_IMASK >> 2]; + stat = s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR; + if (stat & ~mask) { + pci_set_irq(PCI_DEVICE(s), 1); + } else { + pci_set_irq(PCI_DEVICE(s), 0); + } +} + +static void sungem_update_status(SunGEMState *s, uint32_t bits, bool val) +{ + uint32_t stat; + + stat = s->gregs[GREG_STAT >> 2]; + if (val) { + stat |= bits; + } else { + stat &= ~bits; + } + s->gregs[GREG_STAT >> 2] = stat; + sungem_eval_irq(s); +} + +static void sungem_eval_cascade_irq(SunGEMState *s) +{ + uint32_t stat, mask; + + mask = s->macregs[MAC_TXSTAT >> 2]; + stat = s->macregs[MAC_TXMASK >> 2]; + if (stat & ~mask) { + sungem_update_status(s, GREG_STAT_TXMAC, true); + } else { + sungem_update_status(s, GREG_STAT_TXMAC, false); + } + + mask = s->macregs[MAC_RXSTAT >> 2]; + stat = s->macregs[MAC_RXMASK >> 2]; + if (stat & ~mask) { + sungem_update_status(s, GREG_STAT_RXMAC, true); + } else { + sungem_update_status(s, GREG_STAT_RXMAC, false); + } + + mask = s->macregs[MAC_CSTAT >> 2]; + stat = s->macregs[MAC_MCMASK >> 2] & ~MAC_CSTAT_PTR; + if (stat & ~mask) { + sungem_update_status(s, GREG_STAT_MAC, true); + } else { + sungem_update_status(s, GREG_STAT_MAC, false); + } +} + +static void sungem_do_tx_csum(SunGEMState *s) +{ + uint16_t start, off; + uint32_t csum; + + start = (s->tx_first_ctl & TXDCTRL_CSTART) >> 15; + off = (s->tx_first_ctl & TXDCTRL_COFF) >> 21; + + trace_sungem_tx_checksum(start, off); + + if (start > (s->tx_size - 2) || off > (s->tx_size - 2)) { + trace_sungem_tx_checksum_oob(); + return; + } + + csum = net_raw_checksum(s->tx_data + start, s->tx_size - start); + stw_be_p(s->tx_data + off, csum); +} + +static void sungem_send_packet(SunGEMState *s, const uint8_t *buf, + int size) +{ + NetClientState *nc = qemu_get_queue(s->nic); + + if (s->macregs[MAC_XIFCFG >> 2] & MAC_XIFCFG_LBCK) { + qemu_receive_packet(nc, buf, size); + } else { + qemu_send_packet(nc, buf, size); + } +} + +static void sungem_process_tx_desc(SunGEMState *s, struct gem_txd *desc) +{ + PCIDevice *d = PCI_DEVICE(s); + uint32_t len; + + /* If it's a start of frame, discard anything we had in the + * buffer and start again. This should be an error condition + * if we had something ... for now we ignore it + */ + if (desc->control_word & TXDCTRL_SOF) { + if (s->tx_first_ctl) { + trace_sungem_tx_unfinished(); + } + s->tx_size = 0; + s->tx_first_ctl = desc->control_word; + } + + /* Grab data size */ + len = desc->control_word & TXDCTRL_BUFSZ; + + /* Clamp it to our max size */ + if ((s->tx_size + len) > MAX_PACKET_SIZE) { + trace_sungem_tx_overflow(); + len = MAX_PACKET_SIZE - s->tx_size; + } + + /* Read the data */ + pci_dma_read(d, desc->buffer, &s->tx_data[s->tx_size], len); + s->tx_size += len; + + /* If end of frame, send packet */ + if (desc->control_word & TXDCTRL_EOF) { + trace_sungem_tx_finished(s->tx_size); + + /* Handle csum */ + if (s->tx_first_ctl & TXDCTRL_CENAB) { + sungem_do_tx_csum(s); + } + + /* Send it */ + sungem_send_packet(s, s->tx_data, s->tx_size); + + /* No more pending packet */ + s->tx_size = 0; + s->tx_first_ctl = 0; + } +} + +static void sungem_tx_kick(SunGEMState *s) +{ + PCIDevice *d = PCI_DEVICE(s); + uint32_t comp, kick; + uint32_t txdma_cfg, txmac_cfg, ints; + uint64_t dbase; + + trace_sungem_tx_kick(); + + /* Check that both TX MAC and TX DMA are enabled. We don't + * handle DMA-less direct FIFO operations (we don't emulate + * the FIFO at all). + * + * A write to TXDMA_KICK while DMA isn't enabled can happen + * when the driver is resetting the pointer. + */ + txdma_cfg = s->txdmaregs[TXDMA_CFG >> 2]; + txmac_cfg = s->macregs[MAC_TXCFG >> 2]; + if (!(txdma_cfg & TXDMA_CFG_ENABLE) || + !(txmac_cfg & MAC_TXCFG_ENAB)) { + trace_sungem_tx_disabled(); + return; + } + + /* XXX Test min frame size register ? */ + /* XXX Test max frame size register ? */ + + dbase = s->txdmaregs[TXDMA_DBHI >> 2]; + dbase = (dbase << 32) | s->txdmaregs[TXDMA_DBLOW >> 2]; + + comp = s->txdmaregs[TXDMA_TXDONE >> 2] & s->tx_mask; + kick = s->txdmaregs[TXDMA_KICK >> 2] & s->tx_mask; + + trace_sungem_tx_process(comp, kick, s->tx_mask + 1); + + /* This is rather primitive for now, we just send everything we + * can in one go, like e1000. Ideally we should do the sending + * from some kind of background task + */ + while (comp != kick) { + struct gem_txd desc; + + /* Read the next descriptor */ + pci_dma_read(d, dbase + comp * sizeof(desc), &desc, sizeof(desc)); + + /* Byteswap descriptor */ + desc.control_word = le64_to_cpu(desc.control_word); + desc.buffer = le64_to_cpu(desc.buffer); + trace_sungem_tx_desc(comp, desc.control_word, desc.buffer); + + /* Send it for processing */ + sungem_process_tx_desc(s, &desc); + + /* Interrupt */ + ints = GREG_STAT_TXDONE; + if (desc.control_word & TXDCTRL_INTME) { + ints |= GREG_STAT_TXINTME; + } + sungem_update_status(s, ints, true); + + /* Next ! */ + comp = (comp + 1) & s->tx_mask; + s->txdmaregs[TXDMA_TXDONE >> 2] = comp; + } + + /* We sent everything, set status/irq bit */ + sungem_update_status(s, GREG_STAT_TXALL, true); +} + +static bool sungem_rx_full(SunGEMState *s, uint32_t kick, uint32_t done) +{ + return kick == ((done + 1) & s->rx_mask); +} + +static bool sungem_can_receive(NetClientState *nc) +{ + SunGEMState *s = qemu_get_nic_opaque(nc); + uint32_t kick, done, rxdma_cfg, rxmac_cfg; + bool full; + + rxmac_cfg = s->macregs[MAC_RXCFG >> 2]; + rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2]; + + /* If MAC disabled, can't receive */ + if ((rxmac_cfg & MAC_RXCFG_ENAB) == 0) { + trace_sungem_rx_mac_disabled(); + return false; + } + if ((rxdma_cfg & RXDMA_CFG_ENABLE) == 0) { + trace_sungem_rx_txdma_disabled(); + return false; + } + + /* Check RX availability */ + kick = s->rxdmaregs[RXDMA_KICK >> 2]; + done = s->rxdmaregs[RXDMA_DONE >> 2]; + full = sungem_rx_full(s, kick, done); + + trace_sungem_rx_check(!full, kick, done); + + return !full; +} + +enum { + rx_no_match, + rx_match_promisc, + rx_match_bcast, + rx_match_allmcast, + rx_match_mcast, + rx_match_mac, + rx_match_altmac, +}; + +static int sungem_check_rx_mac(SunGEMState *s, const uint8_t *mac, uint32_t crc) +{ + uint32_t rxcfg = s->macregs[MAC_RXCFG >> 2]; + uint32_t mac0, mac1, mac2; + + /* Promisc enabled ? */ + if (rxcfg & MAC_RXCFG_PROM) { + return rx_match_promisc; + } + + /* Format MAC address into dwords */ + mac0 = (mac[4] << 8) | mac[5]; + mac1 = (mac[2] << 8) | mac[3]; + mac2 = (mac[0] << 8) | mac[1]; + + trace_sungem_rx_mac_check(mac0, mac1, mac2); + + /* Is this a broadcast frame ? */ + if (mac0 == 0xffff && mac1 == 0xffff && mac2 == 0xffff) { + return rx_match_bcast; + } + + /* TODO: Implement address filter registers (or we don't care ?) */ + + /* Is this a multicast frame ? */ + if (mac[0] & 1) { + trace_sungem_rx_mac_multicast(); + + /* Promisc group enabled ? */ + if (rxcfg & MAC_RXCFG_PGRP) { + return rx_match_allmcast; + } + + /* TODO: Check MAC control frames (or we don't care) ? */ + + /* Check hash filter (somebody check that's correct ?) */ + if (rxcfg & MAC_RXCFG_HFE) { + uint32_t hash, idx; + + crc >>= 24; + idx = (crc >> 2) & 0x3c; + hash = s->macregs[(MAC_HASH0 + idx) >> 2]; + if (hash & (1 << (15 - (crc & 0xf)))) { + return rx_match_mcast; + } + } + return rx_no_match; + } + + /* Main MAC check */ + trace_sungem_rx_mac_compare(s->macregs[MAC_ADDR0 >> 2], + s->macregs[MAC_ADDR1 >> 2], + s->macregs[MAC_ADDR2 >> 2]); + + if (mac0 == s->macregs[MAC_ADDR0 >> 2] && + mac1 == s->macregs[MAC_ADDR1 >> 2] && + mac2 == s->macregs[MAC_ADDR2 >> 2]) { + return rx_match_mac; + } + + /* Alt MAC check */ + if (mac0 == s->macregs[MAC_ADDR3 >> 2] && + mac1 == s->macregs[MAC_ADDR4 >> 2] && + mac2 == s->macregs[MAC_ADDR5 >> 2]) { + return rx_match_altmac; + } + + return rx_no_match; +} + +static ssize_t sungem_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + SunGEMState *s = qemu_get_nic_opaque(nc); + PCIDevice *d = PCI_DEVICE(s); + uint32_t mac_crc, done, kick, max_fsize; + uint32_t fcs_size, ints, rxdma_cfg, rxmac_cfg, csum, coff; + uint8_t smallbuf[60]; + struct gem_rxd desc; + uint64_t dbase, baddr; + unsigned int rx_cond; + + trace_sungem_rx_packet(size); + + rxmac_cfg = s->macregs[MAC_RXCFG >> 2]; + rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2]; + max_fsize = s->macregs[MAC_MAXFSZ >> 2] & 0x7fff; + + /* If MAC or DMA disabled, can't receive */ + if (!(rxdma_cfg & RXDMA_CFG_ENABLE) || + !(rxmac_cfg & MAC_RXCFG_ENAB)) { + trace_sungem_rx_disabled(); + return 0; + } + + /* Size adjustment for FCS */ + if (rxmac_cfg & MAC_RXCFG_SFCS) { + fcs_size = 0; + } else { + fcs_size = 4; + } + + /* Discard frame smaller than a MAC or larger than max frame size + * (when accounting for FCS) + */ + if (size < 6 || (size + 4) > max_fsize) { + trace_sungem_rx_bad_frame_size(size); + /* XXX Increment error statistics ? */ + return size; + } + + /* We don't drop too small frames since we get them in qemu, we pad + * them instead. We should probably use the min frame size register + * but I don't want to use a variable size staging buffer and I + * know both MacOS and Linux use the default 64 anyway. We use 60 + * here to account for the non-existent FCS. + */ + if (size < 60) { + memcpy(smallbuf, buf, size); + memset(&smallbuf[size], 0, 60 - size); + buf = smallbuf; + size = 60; + } + + /* Get MAC crc */ + mac_crc = net_crc32_le(buf, ETH_ALEN); + + /* Packet isn't for me ? */ + rx_cond = sungem_check_rx_mac(s, buf, mac_crc); + if (rx_cond == rx_no_match) { + /* Just drop it */ + trace_sungem_rx_unmatched(); + return size; + } + + /* Get ring pointers */ + kick = s->rxdmaregs[RXDMA_KICK >> 2] & s->rx_mask; + done = s->rxdmaregs[RXDMA_DONE >> 2] & s->rx_mask; + + trace_sungem_rx_process(done, kick, s->rx_mask + 1); + + /* Ring full ? Can't receive */ + if (sungem_rx_full(s, kick, done)) { + trace_sungem_rx_ringfull(); + return 0; + } + + /* Note: The real GEM will fetch descriptors in blocks of 4, + * for now we handle them one at a time, I think the driver will + * cope + */ + + dbase = s->rxdmaregs[RXDMA_DBHI >> 2]; + dbase = (dbase << 32) | s->rxdmaregs[RXDMA_DBLOW >> 2]; + + /* Read the next descriptor */ + pci_dma_read(d, dbase + done * sizeof(desc), &desc, sizeof(desc)); + + trace_sungem_rx_desc(le64_to_cpu(desc.status_word), + le64_to_cpu(desc.buffer)); + + /* Effective buffer address */ + baddr = le64_to_cpu(desc.buffer) & ~7ull; + baddr |= (rxdma_cfg & RXDMA_CFG_FBOFF) >> 10; + + /* Write buffer out */ + pci_dma_write(d, baddr, buf, size); + + if (fcs_size) { + /* Should we add an FCS ? Linux doesn't ask us to strip it, + * however I believe nothing checks it... For now we just + * do nothing. It's faster this way. + */ + } + + /* Calculate the checksum */ + coff = (rxdma_cfg & RXDMA_CFG_CSUMOFF) >> 13; + csum = net_raw_checksum((uint8_t *)buf + coff, size - coff); + + /* Build the updated descriptor */ + desc.status_word = (size + fcs_size) << 16; + desc.status_word |= ((uint64_t)(mac_crc >> 16)) << 44; + desc.status_word |= csum; + if (rx_cond == rx_match_mcast) { + desc.status_word |= RXDCTRL_HPASS; + } + if (rx_cond == rx_match_altmac) { + desc.status_word |= RXDCTRL_ALTMAC; + } + desc.status_word = cpu_to_le64(desc.status_word); + + pci_dma_write(d, dbase + done * sizeof(desc), &desc, sizeof(desc)); + + done = (done + 1) & s->rx_mask; + s->rxdmaregs[RXDMA_DONE >> 2] = done; + + /* XXX Unconditionally set RX interrupt for now. The interrupt + * mitigation timer might well end up adding more overhead than + * helping here... + */ + ints = GREG_STAT_RXDONE; + if (sungem_rx_full(s, kick, done)) { + ints |= GREG_STAT_RXNOBUF; + } + sungem_update_status(s, ints, true); + + return size; +} + +static void sungem_set_link_status(NetClientState *nc) +{ + /* We don't do anything for now as I believe none of the OSes + * drivers use the MIF autopoll feature nor the PHY interrupt + */ +} + +static void sungem_update_masks(SunGEMState *s) +{ + uint32_t sz; + + sz = 1 << (((s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_RINGSZ) >> 1) + 5); + s->rx_mask = sz - 1; + + sz = 1 << (((s->txdmaregs[TXDMA_CFG >> 2] & TXDMA_CFG_RINGSZ) >> 1) + 5); + s->tx_mask = sz - 1; +} + +static void sungem_reset_rx(SunGEMState *s) +{ + trace_sungem_rx_reset(); + + /* XXX Do RXCFG */ + /* XXX Check value */ + s->rxdmaregs[RXDMA_FSZ >> 2] = 0x140; + s->rxdmaregs[RXDMA_DONE >> 2] = 0; + s->rxdmaregs[RXDMA_KICK >> 2] = 0; + s->rxdmaregs[RXDMA_CFG >> 2] = 0x1000010; + s->rxdmaregs[RXDMA_PTHRESH >> 2] = 0xf8; + s->rxdmaregs[RXDMA_BLANK >> 2] = 0; + + sungem_update_masks(s); +} + +static void sungem_reset_tx(SunGEMState *s) +{ + trace_sungem_tx_reset(); + + /* XXX Do TXCFG */ + /* XXX Check value */ + s->txdmaregs[TXDMA_FSZ >> 2] = 0x90; + s->txdmaregs[TXDMA_TXDONE >> 2] = 0; + s->txdmaregs[TXDMA_KICK >> 2] = 0; + s->txdmaregs[TXDMA_CFG >> 2] = 0x118010; + + sungem_update_masks(s); + + s->tx_size = 0; + s->tx_first_ctl = 0; +} + +static void sungem_reset_all(SunGEMState *s, bool pci_reset) +{ + trace_sungem_reset(pci_reset); + + sungem_reset_rx(s); + sungem_reset_tx(s); + + s->gregs[GREG_IMASK >> 2] = 0xFFFFFFF; + s->gregs[GREG_STAT >> 2] = 0; + if (pci_reset) { + uint8_t *ma = s->conf.macaddr.a; + + s->gregs[GREG_SWRST >> 2] = 0; + s->macregs[MAC_ADDR0 >> 2] = (ma[4] << 8) | ma[5]; + s->macregs[MAC_ADDR1 >> 2] = (ma[2] << 8) | ma[3]; + s->macregs[MAC_ADDR2 >> 2] = (ma[0] << 8) | ma[1]; + } else { + s->gregs[GREG_SWRST >> 2] &= GREG_SWRST_RSTOUT; + } + s->mifregs[MIF_CFG >> 2] = MIF_CFG_MDI0; +} + +static void sungem_mii_write(SunGEMState *s, uint8_t phy_addr, + uint8_t reg_addr, uint16_t val) +{ + trace_sungem_mii_write(phy_addr, reg_addr, val); + + /* XXX TODO */ +} + +static uint16_t __sungem_mii_read(SunGEMState *s, uint8_t phy_addr, + uint8_t reg_addr) +{ + if (phy_addr != s->phy_addr) { + return 0xffff; + } + /* Primitive emulation of a BCM5201 to please the driver, + * ID is 0x00406210. TODO: Do a gigabit PHY like BCM5400 + */ + switch (reg_addr) { + case MII_BMCR: + return 0; + case MII_PHYID1: + return 0x0040; + case MII_PHYID2: + return 0x6210; + case MII_BMSR: + if (qemu_get_queue(s->nic)->link_down) { + return MII_BMSR_100TX_FD | MII_BMSR_AUTONEG; + } else { + return MII_BMSR_100TX_FD | MII_BMSR_AN_COMP | + MII_BMSR_AUTONEG | MII_BMSR_LINK_ST; + } + case MII_ANLPAR: + case MII_ANAR: + return MII_ANLPAR_TXFD; + case 0x18: /* 5201 AUX status */ + return 3; /* 100FD */ + default: + return 0; + }; +} +static uint16_t sungem_mii_read(SunGEMState *s, uint8_t phy_addr, + uint8_t reg_addr) +{ + uint16_t val; + + val = __sungem_mii_read(s, phy_addr, reg_addr); + + trace_sungem_mii_read(phy_addr, reg_addr, val); + + return val; +} + +static uint32_t sungem_mii_op(SunGEMState *s, uint32_t val) +{ + uint8_t phy_addr, reg_addr, op; + + /* Ignore not start of frame */ + if ((val >> 30) != 1) { + trace_sungem_mii_invalid_sof(val >> 30); + return 0xffff; + } + phy_addr = (val & MIF_FRAME_PHYAD) >> 23; + reg_addr = (val & MIF_FRAME_REGAD) >> 18; + op = (val & MIF_FRAME_OP) >> 28; + switch (op) { + case 1: + sungem_mii_write(s, phy_addr, reg_addr, val & MIF_FRAME_DATA); + return val | MIF_FRAME_TALSB; + case 2: + return sungem_mii_read(s, phy_addr, reg_addr) | MIF_FRAME_TALSB; + default: + trace_sungem_mii_invalid_op(op); + } + return 0xffff | MIF_FRAME_TALSB; +} + +static void sungem_mmio_greg_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + SunGEMState *s = opaque; + + if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Write to unknown GREG register 0x%"HWADDR_PRIx"\n", + addr); + return; + } + + trace_sungem_mmio_greg_write(addr, val); + + /* Pre-write filter */ + switch (addr) { + /* Read only registers */ + case GREG_SEBSTATE: + case GREG_STAT: + case GREG_STAT2: + case GREG_PCIESTAT: + return; /* No actual write */ + case GREG_IACK: + val &= GREG_STAT_LATCH; + s->gregs[GREG_STAT >> 2] &= ~val; + sungem_eval_irq(s); + return; /* No actual write */ + case GREG_PCIEMASK: + val &= 0x7; + break; + } + + s->gregs[addr >> 2] = val; + + /* Post write action */ + switch (addr) { + case GREG_IMASK: + /* Re-evaluate interrupt */ + sungem_eval_irq(s); + break; + case GREG_SWRST: + switch (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)) { + case GREG_SWRST_RXRST: + sungem_reset_rx(s); + break; + case GREG_SWRST_TXRST: + sungem_reset_tx(s); + break; + case GREG_SWRST_RXRST | GREG_SWRST_TXRST: + sungem_reset_all(s, false); + } + break; + } +} + +static uint64_t sungem_mmio_greg_read(void *opaque, hwaddr addr, unsigned size) +{ + SunGEMState *s = opaque; + uint32_t val; + + if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Read from unknown GREG register 0x%"HWADDR_PRIx"\n", + addr); + return 0; + } + + val = s->gregs[addr >> 2]; + + trace_sungem_mmio_greg_read(addr, val); + + switch (addr) { + case GREG_STAT: + /* Side effect, clear bottom 7 bits */ + s->gregs[GREG_STAT >> 2] &= ~GREG_STAT_LATCH; + sungem_eval_irq(s); + + /* Inject TX completion in returned value */ + val = (val & ~GREG_STAT_TXNR) | + (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT); + break; + case GREG_STAT2: + /* Return the status reg without side effect + * (and inject TX completion in returned value) + */ + val = (s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR) | + (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT); + break; + } + + return val; +} + +static const MemoryRegionOps sungem_mmio_greg_ops = { + .read = sungem_mmio_greg_read, + .write = sungem_mmio_greg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sungem_mmio_txdma_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + SunGEMState *s = opaque; + + if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Write to unknown TXDMA register 0x%"HWADDR_PRIx"\n", + addr); + return; + } + + trace_sungem_mmio_txdma_write(addr, val); + + /* Pre-write filter */ + switch (addr) { + /* Read only registers */ + case TXDMA_TXDONE: + case TXDMA_PCNT: + case TXDMA_SMACHINE: + case TXDMA_DPLOW: + case TXDMA_DPHI: + case TXDMA_FSZ: + case TXDMA_FTAG: + return; /* No actual write */ + } + + s->txdmaregs[addr >> 2] = val; + + /* Post write action */ + switch (addr) { + case TXDMA_KICK: + sungem_tx_kick(s); + break; + case TXDMA_CFG: + sungem_update_masks(s); + break; + } +} + +static uint64_t sungem_mmio_txdma_read(void *opaque, hwaddr addr, unsigned size) +{ + SunGEMState *s = opaque; + uint32_t val; + + if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Read from unknown TXDMA register 0x%"HWADDR_PRIx"\n", + addr); + return 0; + } + + val = s->txdmaregs[addr >> 2]; + + trace_sungem_mmio_txdma_read(addr, val); + + return val; +} + +static const MemoryRegionOps sungem_mmio_txdma_ops = { + .read = sungem_mmio_txdma_read, + .write = sungem_mmio_txdma_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sungem_mmio_rxdma_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + SunGEMState *s = opaque; + + if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Write to unknown RXDMA register 0x%"HWADDR_PRIx"\n", + addr); + return; + } + + trace_sungem_mmio_rxdma_write(addr, val); + + /* Pre-write filter */ + switch (addr) { + /* Read only registers */ + case RXDMA_DONE: + case RXDMA_PCNT: + case RXDMA_SMACHINE: + case RXDMA_DPLOW: + case RXDMA_DPHI: + case RXDMA_FSZ: + case RXDMA_FTAG: + return; /* No actual write */ + } + + s->rxdmaregs[addr >> 2] = val; + + /* Post write action */ + switch (addr) { + case RXDMA_KICK: + trace_sungem_rx_kick(val); + break; + case RXDMA_CFG: + sungem_update_masks(s); + if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 && + (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + } +} + +static uint64_t sungem_mmio_rxdma_read(void *opaque, hwaddr addr, unsigned size) +{ + SunGEMState *s = opaque; + uint32_t val; + + if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Read from unknown RXDMA register 0x%"HWADDR_PRIx"\n", + addr); + return 0; + } + + val = s->rxdmaregs[addr >> 2]; + + trace_sungem_mmio_rxdma_read(addr, val); + + return val; +} + +static const MemoryRegionOps sungem_mmio_rxdma_ops = { + .read = sungem_mmio_rxdma_read, + .write = sungem_mmio_rxdma_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sungem_mmio_mac_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + SunGEMState *s = opaque; + + if (!(addr <= 0x134)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Write to unknown MAC register 0x%"HWADDR_PRIx"\n", + addr); + return; + } + + trace_sungem_mmio_mac_write(addr, val); + + /* Pre-write filter */ + switch (addr) { + /* Read only registers */ + case MAC_TXRST: /* Not technically read-only but will do for now */ + case MAC_RXRST: /* Not technically read-only but will do for now */ + case MAC_TXSTAT: + case MAC_RXSTAT: + case MAC_CSTAT: + case MAC_PATMPS: + case MAC_SMACHINE: + return; /* No actual write */ + case MAC_MINFSZ: + /* 10-bits implemented */ + val &= 0x3ff; + break; + } + + s->macregs[addr >> 2] = val; + + /* Post write action */ + switch (addr) { + case MAC_TXMASK: + case MAC_RXMASK: + case MAC_MCMASK: + sungem_eval_cascade_irq(s); + break; + case MAC_RXCFG: + sungem_update_masks(s); + if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 && + (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + } +} + +static uint64_t sungem_mmio_mac_read(void *opaque, hwaddr addr, unsigned size) +{ + SunGEMState *s = opaque; + uint32_t val; + + if (!(addr <= 0x134)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Read from unknown MAC register 0x%"HWADDR_PRIx"\n", + addr); + return 0; + } + + val = s->macregs[addr >> 2]; + + trace_sungem_mmio_mac_read(addr, val); + + switch (addr) { + case MAC_TXSTAT: + /* Side effect, clear all */ + s->macregs[addr >> 2] = 0; + sungem_update_status(s, GREG_STAT_TXMAC, false); + break; + case MAC_RXSTAT: + /* Side effect, clear all */ + s->macregs[addr >> 2] = 0; + sungem_update_status(s, GREG_STAT_RXMAC, false); + break; + case MAC_CSTAT: + /* Side effect, interrupt bits */ + s->macregs[addr >> 2] &= MAC_CSTAT_PTR; + sungem_update_status(s, GREG_STAT_MAC, false); + break; + } + + return val; +} + +static const MemoryRegionOps sungem_mmio_mac_ops = { + .read = sungem_mmio_mac_read, + .write = sungem_mmio_mac_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sungem_mmio_mif_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + SunGEMState *s = opaque; + + if (!(addr <= 0x1c)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Write to unknown MIF register 0x%"HWADDR_PRIx"\n", + addr); + return; + } + + trace_sungem_mmio_mif_write(addr, val); + + /* Pre-write filter */ + switch (addr) { + /* Read only registers */ + case MIF_STATUS: + case MIF_SMACHINE: + return; /* No actual write */ + case MIF_CFG: + /* Maintain the RO MDI bits to advertize an MDIO PHY on MDI0 */ + val &= ~MIF_CFG_MDI1; + val |= MIF_CFG_MDI0; + break; + } + + s->mifregs[addr >> 2] = val; + + /* Post write action */ + switch (addr) { + case MIF_FRAME: + s->mifregs[addr >> 2] = sungem_mii_op(s, val); + break; + } +} + +static uint64_t sungem_mmio_mif_read(void *opaque, hwaddr addr, unsigned size) +{ + SunGEMState *s = opaque; + uint32_t val; + + if (!(addr <= 0x1c)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Read from unknown MIF register 0x%"HWADDR_PRIx"\n", + addr); + return 0; + } + + val = s->mifregs[addr >> 2]; + + trace_sungem_mmio_mif_read(addr, val); + + return val; +} + +static const MemoryRegionOps sungem_mmio_mif_ops = { + .read = sungem_mmio_mif_read, + .write = sungem_mmio_mif_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sungem_mmio_pcs_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + SunGEMState *s = opaque; + + if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Write to unknown PCS register 0x%"HWADDR_PRIx"\n", + addr); + return; + } + + trace_sungem_mmio_pcs_write(addr, val); + + /* Pre-write filter */ + switch (addr) { + /* Read only registers */ + case PCS_MIISTAT: + case PCS_ISTAT: + case PCS_SSTATE: + return; /* No actual write */ + } + + s->pcsregs[addr >> 2] = val; +} + +static uint64_t sungem_mmio_pcs_read(void *opaque, hwaddr addr, unsigned size) +{ + SunGEMState *s = opaque; + uint32_t val; + + if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Read from unknown PCS register 0x%"HWADDR_PRIx"\n", + addr); + return 0; + } + + val = s->pcsregs[addr >> 2]; + + trace_sungem_mmio_pcs_read(addr, val); + + return val; +} + +static const MemoryRegionOps sungem_mmio_pcs_ops = { + .read = sungem_mmio_pcs_read, + .write = sungem_mmio_pcs_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sungem_uninit(PCIDevice *dev) +{ + SunGEMState *s = SUNGEM(dev); + + qemu_del_nic(s->nic); +} + +static NetClientInfo net_sungem_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = sungem_can_receive, + .receive = sungem_receive, + .link_status_changed = sungem_set_link_status, +}; + +static void sungem_realize(PCIDevice *pci_dev, Error **errp) +{ + DeviceState *dev = DEVICE(pci_dev); + SunGEMState *s = SUNGEM(pci_dev); + uint8_t *pci_conf; + + pci_conf = pci_dev->config; + + pci_set_word(pci_conf + PCI_STATUS, + PCI_STATUS_FAST_BACK | + PCI_STATUS_DEVSEL_MEDIUM | + PCI_STATUS_66MHZ); + + pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0); + pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0); + + pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ + pci_conf[PCI_MIN_GNT] = 0x40; + pci_conf[PCI_MAX_LAT] = 0x40; + + sungem_reset_all(s, true); + memory_region_init(&s->sungem, OBJECT(s), "sungem", SUNGEM_MMIO_SIZE); + + memory_region_init_io(&s->greg, OBJECT(s), &sungem_mmio_greg_ops, s, + "sungem.greg", SUNGEM_MMIO_GREG_SIZE); + memory_region_add_subregion(&s->sungem, 0, &s->greg); + + memory_region_init_io(&s->txdma, OBJECT(s), &sungem_mmio_txdma_ops, s, + "sungem.txdma", SUNGEM_MMIO_TXDMA_SIZE); + memory_region_add_subregion(&s->sungem, 0x2000, &s->txdma); + + memory_region_init_io(&s->rxdma, OBJECT(s), &sungem_mmio_rxdma_ops, s, + "sungem.rxdma", SUNGEM_MMIO_RXDMA_SIZE); + memory_region_add_subregion(&s->sungem, 0x4000, &s->rxdma); + + memory_region_init_io(&s->mac, OBJECT(s), &sungem_mmio_mac_ops, s, + "sungem.mac", SUNGEM_MMIO_MAC_SIZE); + memory_region_add_subregion(&s->sungem, 0x6000, &s->mac); + + memory_region_init_io(&s->mif, OBJECT(s), &sungem_mmio_mif_ops, s, + "sungem.mif", SUNGEM_MMIO_MIF_SIZE); + memory_region_add_subregion(&s->sungem, 0x6200, &s->mif); + + memory_region_init_io(&s->pcs, OBJECT(s), &sungem_mmio_pcs_ops, s, + "sungem.pcs", SUNGEM_MMIO_PCS_SIZE); + memory_region_add_subregion(&s->sungem, 0x9000, &s->pcs); + + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->sungem); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_sungem_info, &s->conf, + object_get_typename(OBJECT(dev)), + dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), + s->conf.macaddr.a); +} + +static void sungem_reset(DeviceState *dev) +{ + SunGEMState *s = SUNGEM(dev); + + sungem_reset_all(s, true); +} + +static void sungem_instance_init(Object *obj) +{ + SunGEMState *s = SUNGEM(obj); + + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(obj)); +} + +static Property sungem_properties[] = { + DEFINE_NIC_PROPERTIES(SunGEMState, conf), + /* Phy address should be 0 for most Apple machines except + * for K2 in which case it's 1. Will be set by a machine + * override. + */ + DEFINE_PROP_UINT32("phy_addr", SunGEMState, phy_addr, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_sungem = { + .name = "sungem", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(pdev, SunGEMState), + VMSTATE_MACADDR(conf.macaddr, SunGEMState), + VMSTATE_UINT32(phy_addr, SunGEMState), + VMSTATE_UINT32_ARRAY(gregs, SunGEMState, (SUNGEM_MMIO_GREG_SIZE >> 2)), + VMSTATE_UINT32_ARRAY(txdmaregs, SunGEMState, + (SUNGEM_MMIO_TXDMA_SIZE >> 2)), + VMSTATE_UINT32_ARRAY(rxdmaregs, SunGEMState, + (SUNGEM_MMIO_RXDMA_SIZE >> 2)), + VMSTATE_UINT32_ARRAY(macregs, SunGEMState, (SUNGEM_MMIO_MAC_SIZE >> 2)), + VMSTATE_UINT32_ARRAY(mifregs, SunGEMState, (SUNGEM_MMIO_MIF_SIZE >> 2)), + VMSTATE_UINT32_ARRAY(pcsregs, SunGEMState, (SUNGEM_MMIO_PCS_SIZE >> 2)), + VMSTATE_UINT32(rx_mask, SunGEMState), + VMSTATE_UINT32(tx_mask, SunGEMState), + VMSTATE_UINT8_ARRAY(tx_data, SunGEMState, MAX_PACKET_SIZE), + VMSTATE_UINT32(tx_size, SunGEMState), + VMSTATE_UINT64(tx_first_ctl, SunGEMState), + VMSTATE_END_OF_LIST() + } +}; + +static void sungem_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = sungem_realize; + k->exit = sungem_uninit; + k->vendor_id = PCI_VENDOR_ID_APPLE; + k->device_id = PCI_DEVICE_ID_APPLE_UNI_N_GMAC; + k->revision = 0x01; + k->class_id = PCI_CLASS_NETWORK_ETHERNET; + dc->vmsd = &vmstate_sungem; + dc->reset = sungem_reset; + device_class_set_props(dc, sungem_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static const TypeInfo sungem_info = { + .name = TYPE_SUNGEM, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(SunGEMState), + .class_init = sungem_class_init, + .instance_init = sungem_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + } +}; + +static void sungem_register_types(void) +{ + type_register_static(&sungem_info); +} + +type_init(sungem_register_types) diff --git a/hw/net/sunhme.c b/hw/net/sunhme.c new file mode 100644 index 000000000..fc34905f8 --- /dev/null +++ b/hw/net/sunhme.c @@ -0,0 +1,983 @@ +/* + * QEMU Sun Happy Meal Ethernet emulation + * + * Copyright (c) 2017 Mark Cave-Ayland + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/net/mii.h" +#include "net/net.h" +#include "qemu/module.h" +#include "net/checksum.h" +#include "net/eth.h" +#include "sysemu/sysemu.h" +#include "trace.h" +#include "qom/object.h" + +#define HME_REG_SIZE 0x8000 + +#define HME_SEB_REG_SIZE 0x2000 + +#define HME_SEBI_RESET 0x0 +#define HME_SEB_RESET_ETX 0x1 +#define HME_SEB_RESET_ERX 0x2 + +#define HME_SEBI_STAT 0x100 +#define HME_SEBI_STAT_LINUXBUG 0x108 +#define HME_SEB_STAT_RXTOHOST 0x10000 +#define HME_SEB_STAT_NORXD 0x20000 +#define HME_SEB_STAT_MIFIRQ 0x800000 +#define HME_SEB_STAT_HOSTTOTX 0x1000000 +#define HME_SEB_STAT_TXALL 0x2000000 + +#define HME_SEBI_IMASK 0x104 +#define HME_SEBI_IMASK_LINUXBUG 0x10c + +#define HME_ETX_REG_SIZE 0x2000 + +#define HME_ETXI_PENDING 0x0 + +#define HME_ETXI_RING 0x8 +#define HME_ETXI_RING_ADDR 0xffffff00 +#define HME_ETXI_RING_OFFSET 0xff + +#define HME_ETXI_RSIZE 0x2c + +#define HME_ERX_REG_SIZE 0x2000 + +#define HME_ERXI_CFG 0x0 +#define HME_ERX_CFG_RINGSIZE 0x600 +#define HME_ERX_CFG_RINGSIZE_SHIFT 9 +#define HME_ERX_CFG_BYTEOFFSET 0x38 +#define HME_ERX_CFG_BYTEOFFSET_SHIFT 3 +#define HME_ERX_CFG_CSUMSTART 0x7f0000 +#define HME_ERX_CFG_CSUMSHIFT 16 + +#define HME_ERXI_RING 0x4 +#define HME_ERXI_RING_ADDR 0xffffff00 +#define HME_ERXI_RING_OFFSET 0xff + +#define HME_MAC_REG_SIZE 0x1000 + +#define HME_MACI_TXCFG 0x20c +#define HME_MAC_TXCFG_ENABLE 0x1 + +#define HME_MACI_RXCFG 0x30c +#define HME_MAC_RXCFG_ENABLE 0x1 +#define HME_MAC_RXCFG_PMISC 0x40 +#define HME_MAC_RXCFG_HENABLE 0x800 + +#define HME_MACI_MACADDR2 0x318 +#define HME_MACI_MACADDR1 0x31c +#define HME_MACI_MACADDR0 0x320 + +#define HME_MACI_HASHTAB3 0x340 +#define HME_MACI_HASHTAB2 0x344 +#define HME_MACI_HASHTAB1 0x348 +#define HME_MACI_HASHTAB0 0x34c + +#define HME_MIF_REG_SIZE 0x20 + +#define HME_MIFI_FO 0xc +#define HME_MIF_FO_ST 0xc0000000 +#define HME_MIF_FO_ST_SHIFT 30 +#define HME_MIF_FO_OPC 0x30000000 +#define HME_MIF_FO_OPC_SHIFT 28 +#define HME_MIF_FO_PHYAD 0x0f800000 +#define HME_MIF_FO_PHYAD_SHIFT 23 +#define HME_MIF_FO_REGAD 0x007c0000 +#define HME_MIF_FO_REGAD_SHIFT 18 +#define HME_MIF_FO_TAMSB 0x20000 +#define HME_MIF_FO_TALSB 0x10000 +#define HME_MIF_FO_DATA 0xffff + +#define HME_MIFI_CFG 0x10 +#define HME_MIF_CFG_MDI0 0x100 +#define HME_MIF_CFG_MDI1 0x200 + +#define HME_MIFI_IMASK 0x14 + +#define HME_MIFI_STAT 0x18 + + +/* Wired HME PHY addresses */ +#define HME_PHYAD_INTERNAL 1 +#define HME_PHYAD_EXTERNAL 0 + +#define MII_COMMAND_START 0x1 +#define MII_COMMAND_READ 0x2 +#define MII_COMMAND_WRITE 0x1 + +#define TYPE_SUNHME "sunhme" +OBJECT_DECLARE_SIMPLE_TYPE(SunHMEState, SUNHME) + +/* Maximum size of buffer */ +#define HME_FIFO_SIZE 0x800 + +/* Size of TX/RX descriptor */ +#define HME_DESC_SIZE 0x8 + +#define HME_XD_OWN 0x80000000 +#define HME_XD_OFL 0x40000000 +#define HME_XD_SOP 0x40000000 +#define HME_XD_EOP 0x20000000 +#define HME_XD_RXLENMSK 0x3fff0000 +#define HME_XD_RXLENSHIFT 16 +#define HME_XD_RXCKSUM 0xffff +#define HME_XD_TXLENMSK 0x00001fff +#define HME_XD_TXCKSUM 0x10000000 +#define HME_XD_TXCSSTUFF 0xff00000 +#define HME_XD_TXCSSTUFFSHIFT 20 +#define HME_XD_TXCSSTART 0xfc000 +#define HME_XD_TXCSSTARTSHIFT 14 + +#define HME_MII_REGS_SIZE 0x20 + +struct SunHMEState { + /*< private >*/ + PCIDevice parent_obj; + + NICState *nic; + NICConf conf; + + MemoryRegion hme; + MemoryRegion sebreg; + MemoryRegion etxreg; + MemoryRegion erxreg; + MemoryRegion macreg; + MemoryRegion mifreg; + + uint32_t sebregs[HME_SEB_REG_SIZE >> 2]; + uint32_t etxregs[HME_ETX_REG_SIZE >> 2]; + uint32_t erxregs[HME_ERX_REG_SIZE >> 2]; + uint32_t macregs[HME_MAC_REG_SIZE >> 2]; + uint32_t mifregs[HME_MIF_REG_SIZE >> 2]; + + uint16_t miiregs[HME_MII_REGS_SIZE]; +}; + +static Property sunhme_properties[] = { + DEFINE_NIC_PROPERTIES(SunHMEState, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sunhme_reset_tx(SunHMEState *s) +{ + /* Indicate TX reset complete */ + s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX; +} + +static void sunhme_reset_rx(SunHMEState *s) +{ + /* Indicate RX reset complete */ + s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX; +} + +static void sunhme_update_irq(SunHMEState *s) +{ + PCIDevice *d = PCI_DEVICE(s); + int level; + + /* MIF interrupt mask (16-bit) */ + uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff; + uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask; + + /* Main SEB interrupt mask (include MIF status from above) */ + uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) & + ~HME_SEB_STAT_MIFIRQ; + uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask; + if (mif) { + seb |= HME_SEB_STAT_MIFIRQ; + } + + level = (seb ? 1 : 0); + trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level); + + pci_set_irq(d, level); +} + +static void sunhme_seb_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SunHMEState *s = SUNHME(opaque); + + trace_sunhme_seb_write(addr, val); + + /* Handly buggy Linux drivers before 4.13 which have + the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */ + switch (addr) { + case HME_SEBI_STAT_LINUXBUG: + addr = HME_SEBI_STAT; + break; + case HME_SEBI_IMASK_LINUXBUG: + addr = HME_SEBI_IMASK; + break; + default: + break; + } + + switch (addr) { + case HME_SEBI_RESET: + if (val & HME_SEB_RESET_ETX) { + sunhme_reset_tx(s); + } + if (val & HME_SEB_RESET_ERX) { + sunhme_reset_rx(s); + } + val = s->sebregs[HME_SEBI_RESET >> 2]; + break; + } + + s->sebregs[addr >> 2] = val; +} + +static uint64_t sunhme_seb_read(void *opaque, hwaddr addr, + unsigned size) +{ + SunHMEState *s = SUNHME(opaque); + uint64_t val; + + /* Handly buggy Linux drivers before 4.13 which have + the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */ + switch (addr) { + case HME_SEBI_STAT_LINUXBUG: + addr = HME_SEBI_STAT; + break; + case HME_SEBI_IMASK_LINUXBUG: + addr = HME_SEBI_IMASK; + break; + default: + break; + } + + val = s->sebregs[addr >> 2]; + + switch (addr) { + case HME_SEBI_STAT: + /* Autoclear status (except MIF) */ + s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ; + sunhme_update_irq(s); + break; + } + + trace_sunhme_seb_read(addr, val); + + return val; +} + +static const MemoryRegionOps sunhme_seb_ops = { + .read = sunhme_seb_read, + .write = sunhme_seb_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sunhme_transmit(SunHMEState *s); + +static void sunhme_etx_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SunHMEState *s = SUNHME(opaque); + + trace_sunhme_etx_write(addr, val); + + switch (addr) { + case HME_ETXI_PENDING: + if (val) { + sunhme_transmit(s); + } + break; + } + + s->etxregs[addr >> 2] = val; +} + +static uint64_t sunhme_etx_read(void *opaque, hwaddr addr, + unsigned size) +{ + SunHMEState *s = SUNHME(opaque); + uint64_t val; + + val = s->etxregs[addr >> 2]; + + trace_sunhme_etx_read(addr, val); + + return val; +} + +static const MemoryRegionOps sunhme_etx_ops = { + .read = sunhme_etx_read, + .write = sunhme_etx_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sunhme_erx_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SunHMEState *s = SUNHME(opaque); + + trace_sunhme_erx_write(addr, val); + + s->erxregs[addr >> 2] = val; +} + +static uint64_t sunhme_erx_read(void *opaque, hwaddr addr, + unsigned size) +{ + SunHMEState *s = SUNHME(opaque); + uint64_t val; + + val = s->erxregs[addr >> 2]; + + trace_sunhme_erx_read(addr, val); + + return val; +} + +static const MemoryRegionOps sunhme_erx_ops = { + .read = sunhme_erx_read, + .write = sunhme_erx_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sunhme_mac_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SunHMEState *s = SUNHME(opaque); + uint64_t oldval = s->macregs[addr >> 2]; + + trace_sunhme_mac_write(addr, val); + + s->macregs[addr >> 2] = val; + + switch (addr) { + case HME_MACI_RXCFG: + if (!(oldval & HME_MAC_RXCFG_ENABLE) && + (val & HME_MAC_RXCFG_ENABLE)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + } +} + +static uint64_t sunhme_mac_read(void *opaque, hwaddr addr, + unsigned size) +{ + SunHMEState *s = SUNHME(opaque); + uint64_t val; + + val = s->macregs[addr >> 2]; + + trace_sunhme_mac_read(addr, val); + + return val; +} + +static const MemoryRegionOps sunhme_mac_ops = { + .read = sunhme_mac_read, + .write = sunhme_mac_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data) +{ + trace_sunhme_mii_write(reg, data); + + switch (reg) { + case MII_BMCR: + if (data & MII_BMCR_RESET) { + /* Autoclear reset bit, enable auto negotiation */ + data &= ~MII_BMCR_RESET; + data |= MII_BMCR_AUTOEN; + } + if (data & MII_BMCR_ANRESTART) { + /* Autoclear auto negotiation restart */ + data &= ~MII_BMCR_ANRESTART; + + /* Indicate negotiation complete */ + s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP; + + if (!qemu_get_queue(s->nic)->link_down) { + s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; + s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; + } + } + break; + } + + s->miiregs[reg] = data; +} + +static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg) +{ + uint16_t data = s->miiregs[reg]; + + trace_sunhme_mii_read(reg, data); + + return data; +} + +static void sunhme_mif_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SunHMEState *s = SUNHME(opaque); + uint8_t cmd, reg; + uint16_t data; + + trace_sunhme_mif_write(addr, val); + + switch (addr) { + case HME_MIFI_CFG: + /* Mask the read-only bits */ + val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1); + val |= s->mifregs[HME_MIFI_CFG >> 2] & + (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1); + break; + case HME_MIFI_FO: + /* Detect start of MII command */ + if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT + != MII_COMMAND_START) { + val |= HME_MIF_FO_TALSB; + break; + } + + /* Internal phy only */ + if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT + != HME_PHYAD_INTERNAL) { + val |= HME_MIF_FO_TALSB; + break; + } + + cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT; + reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT; + data = (val & HME_MIF_FO_DATA); + + switch (cmd) { + case MII_COMMAND_WRITE: + sunhme_mii_write(s, reg, data); + break; + + case MII_COMMAND_READ: + val &= ~HME_MIF_FO_DATA; + val |= sunhme_mii_read(s, reg); + break; + } + + val |= HME_MIF_FO_TALSB; + break; + } + + s->mifregs[addr >> 2] = val; +} + +static uint64_t sunhme_mif_read(void *opaque, hwaddr addr, + unsigned size) +{ + SunHMEState *s = SUNHME(opaque); + uint64_t val; + + val = s->mifregs[addr >> 2]; + + switch (addr) { + case HME_MIFI_STAT: + /* Autoclear MIF interrupt status */ + s->mifregs[HME_MIFI_STAT >> 2] = 0; + sunhme_update_irq(s); + break; + } + + trace_sunhme_mif_read(addr, val); + + return val; +} + +static const MemoryRegionOps sunhme_mif_ops = { + .read = sunhme_mif_read, + .write = sunhme_mif_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size) +{ + qemu_send_packet(qemu_get_queue(s->nic), buf, size); +} + +static inline int sunhme_get_tx_ring_count(SunHMEState *s) +{ + return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4; +} + +static inline int sunhme_get_tx_ring_nr(SunHMEState *s) +{ + return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET; +} + +static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i) +{ + uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET; + ring |= i & HME_ETXI_RING_OFFSET; + + s->etxregs[HME_ETXI_RING >> 2] = ring; +} + +static void sunhme_transmit(SunHMEState *s) +{ + PCIDevice *d = PCI_DEVICE(s); + dma_addr_t tb, addr; + uint32_t intstatus, status, buffer, sum = 0; + int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0; + uint16_t csum = 0; + uint8_t xmit_buffer[HME_FIFO_SIZE]; + + tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR; + nr = sunhme_get_tx_ring_count(s); + cr = sunhme_get_tx_ring_nr(s); + + pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4); + pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4); + + xmit_pos = 0; + while (status & HME_XD_OWN) { + trace_sunhme_tx_desc(buffer, status, cr, nr); + + /* Copy data into transmit buffer */ + addr = buffer; + len = status & HME_XD_TXLENMSK; + + if (xmit_pos + len > HME_FIFO_SIZE) { + len = HME_FIFO_SIZE - xmit_pos; + } + + pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len); + xmit_pos += len; + + /* Detect start of packet for TX checksum */ + if (status & HME_XD_SOP) { + sum = 0; + csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT; + csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >> + HME_XD_TXCSSTUFFSHIFT; + } + + if (status & HME_XD_TXCKSUM) { + /* Only start calculation from csum_offset */ + if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) { + sum += net_checksum_add(xmit_pos - csum_offset, + xmit_buffer + csum_offset); + trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset); + } else { + sum += net_checksum_add(len, xmit_buffer + xmit_pos - len); + trace_sunhme_tx_xsum_add(xmit_pos - len, len); + } + } + + /* Detect end of packet for TX checksum */ + if (status & HME_XD_EOP) { + /* Stuff the checksum if required */ + if (status & HME_XD_TXCKSUM) { + csum = net_checksum_finish(sum); + stw_be_p(xmit_buffer + csum_stuff_offset, csum); + trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset); + } + + if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) { + sunhme_transmit_frame(s, xmit_buffer, xmit_pos); + trace_sunhme_tx_done(xmit_pos); + } + } + + /* Update status */ + status &= ~HME_XD_OWN; + pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4); + + /* Move onto next descriptor */ + cr++; + if (cr >= nr) { + cr = 0; + } + sunhme_set_tx_ring_nr(s, cr); + + pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4); + pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4); + + /* Indicate TX complete */ + intstatus = s->sebregs[HME_SEBI_STAT >> 2]; + intstatus |= HME_SEB_STAT_HOSTTOTX; + s->sebregs[HME_SEBI_STAT >> 2] = intstatus; + + /* Autoclear TX pending */ + s->etxregs[HME_ETXI_PENDING >> 2] = 0; + + sunhme_update_irq(s); + } + + /* TX FIFO now clear */ + intstatus = s->sebregs[HME_SEBI_STAT >> 2]; + intstatus |= HME_SEB_STAT_TXALL; + s->sebregs[HME_SEBI_STAT >> 2] = intstatus; + sunhme_update_irq(s); +} + +static bool sunhme_can_receive(NetClientState *nc) +{ + SunHMEState *s = qemu_get_nic_opaque(nc); + + return !!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE); +} + +static void sunhme_link_status_changed(NetClientState *nc) +{ + SunHMEState *s = qemu_get_nic_opaque(nc); + + if (nc->link_down) { + s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD; + s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST; + } else { + s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; + s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; + } + + /* Exact bits unknown */ + s->mifregs[HME_MIFI_STAT >> 2] = 0xffff; + sunhme_update_irq(s); +} + +static inline int sunhme_get_rx_ring_count(SunHMEState *s) +{ + uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE) + >> HME_ERX_CFG_RINGSIZE_SHIFT; + + switch (rings) { + case 0: + return 32; + case 1: + return 64; + case 2: + return 128; + case 3: + return 256; + } + + return 0; +} + +static inline int sunhme_get_rx_ring_nr(SunHMEState *s) +{ + return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET; +} + +static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i) +{ + uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET; + ring |= i & HME_ERXI_RING_OFFSET; + + s->erxregs[HME_ERXI_RING >> 2] = ring; +} + +#define MIN_BUF_SIZE 60 + +static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + SunHMEState *s = qemu_get_nic_opaque(nc); + PCIDevice *d = PCI_DEVICE(s); + dma_addr_t rb, addr; + uint32_t intstatus, status, buffer, buffersize, sum; + uint16_t csum; + uint8_t buf1[60]; + int nr, cr, len, rxoffset, csum_offset; + + trace_sunhme_rx_incoming(size); + + /* Do nothing if MAC RX disabled */ + if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) { + return 0; + } + + trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2], + buf[3], buf[4], buf[5]); + + /* Check destination MAC address */ + if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) { + /* Try and match local MAC address */ + if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] && + (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] && + ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] && + (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] && + ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] && + (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) { + /* Matched local MAC address */ + trace_sunhme_rx_filter_local_match(); + } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff && + buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) { + /* Matched broadcast address */ + trace_sunhme_rx_filter_bcast_match(); + } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) { + /* Didn't match local address, check hash filter */ + int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26; + if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] & + (1 << (mcast_idx & 0xf)))) { + /* Didn't match hash filter */ + trace_sunhme_rx_filter_hash_nomatch(); + trace_sunhme_rx_filter_reject(); + return -1; + } else { + trace_sunhme_rx_filter_hash_match(); + } + } else { + /* Not for us */ + trace_sunhme_rx_filter_reject(); + return -1; + } + } else { + trace_sunhme_rx_filter_promisc_match(); + } + + trace_sunhme_rx_filter_accept(); + + /* If too small buffer, then expand it */ + if (size < MIN_BUF_SIZE) { + memcpy(buf1, buf, size); + memset(buf1 + size, 0, MIN_BUF_SIZE - size); + buf = buf1; + size = MIN_BUF_SIZE; + } + + rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR; + nr = sunhme_get_rx_ring_count(s); + cr = sunhme_get_rx_ring_nr(s); + + pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4); + pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4); + + /* If we don't own the current descriptor then indicate overflow error */ + if (!(status & HME_XD_OWN)) { + s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD; + sunhme_update_irq(s); + trace_sunhme_rx_norxd(); + return -1; + } + + rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >> + HME_ERX_CFG_BYTEOFFSET_SHIFT; + + addr = buffer + rxoffset; + buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT; + + /* Detect receive overflow */ + len = size; + if (size > buffersize) { + status |= HME_XD_OFL; + len = buffersize; + } + + pci_dma_write(d, addr, buf, len); + + trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr); + + /* Calculate the receive checksum */ + csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >> + HME_ERX_CFG_CSUMSHIFT << 1; + sum = 0; + sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset); + csum = net_checksum_finish(sum); + + trace_sunhme_rx_xsum_calc(csum); + + /* Update status */ + status &= ~HME_XD_OWN; + status &= ~HME_XD_RXLENMSK; + status |= len << HME_XD_RXLENSHIFT; + status &= ~HME_XD_RXCKSUM; + status |= csum; + + pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4); + + cr++; + if (cr >= nr) { + cr = 0; + } + + sunhme_set_rx_ring_nr(s, cr); + + /* Indicate RX complete */ + intstatus = s->sebregs[HME_SEBI_STAT >> 2]; + intstatus |= HME_SEB_STAT_RXTOHOST; + s->sebregs[HME_SEBI_STAT >> 2] = intstatus; + + sunhme_update_irq(s); + + return len; +} + +static NetClientInfo net_sunhme_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = sunhme_can_receive, + .receive = sunhme_receive, + .link_status_changed = sunhme_link_status_changed, +}; + +static void sunhme_realize(PCIDevice *pci_dev, Error **errp) +{ + SunHMEState *s = SUNHME(pci_dev); + DeviceState *d = DEVICE(pci_dev); + uint8_t *pci_conf; + + pci_conf = pci_dev->config; + pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ + + memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE); + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme); + + memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s, + "sunhme.seb", HME_SEB_REG_SIZE); + memory_region_add_subregion(&s->hme, 0, &s->sebreg); + + memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s, + "sunhme.etx", HME_ETX_REG_SIZE); + memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg); + + memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s, + "sunhme.erx", HME_ERX_REG_SIZE); + memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg); + + memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s, + "sunhme.mac", HME_MAC_REG_SIZE); + memory_region_add_subregion(&s->hme, 0x6000, &s->macreg); + + memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s, + "sunhme.mif", HME_MIF_REG_SIZE); + memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_sunhme_info, &s->conf, + object_get_typename(OBJECT(d)), d->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static void sunhme_instance_init(Object *obj) +{ + SunHMEState *s = SUNHME(obj); + + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(obj)); +} + +static void sunhme_reset(DeviceState *ds) +{ + SunHMEState *s = SUNHME(ds); + + /* Configure internal transceiver */ + s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0; + + /* Advetise auto, 100Mbps FD */ + s->miiregs[MII_ANAR] = MII_ANAR_TXFD; + s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD | + MII_BMSR_AN_COMP; + + if (!qemu_get_queue(s->nic)->link_down) { + s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD; + s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST; + } + + /* Set manufacturer */ + s->miiregs[MII_PHYID1] = DP83840_PHYID1; + s->miiregs[MII_PHYID2] = DP83840_PHYID2; + + /* Configure default interrupt mask */ + s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff; + s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff; +} + +static const VMStateDescription vmstate_hme = { + .name = "sunhme", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, SunHMEState), + VMSTATE_MACADDR(conf.macaddr, SunHMEState), + VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)), + VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)), + VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)), + VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)), + VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)), + VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE), + VMSTATE_END_OF_LIST() + } +}; + +static void sunhme_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = sunhme_realize; + k->vendor_id = PCI_VENDOR_ID_SUN; + k->device_id = PCI_DEVICE_ID_SUN_HME; + k->class_id = PCI_CLASS_NETWORK_ETHERNET; + dc->vmsd = &vmstate_hme; + dc->reset = sunhme_reset; + device_class_set_props(dc, sunhme_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static const TypeInfo sunhme_info = { + .name = TYPE_SUNHME, + .parent = TYPE_PCI_DEVICE, + .class_init = sunhme_class_init, + .instance_size = sizeof(SunHMEState), + .instance_init = sunhme_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + } +}; + +static void sunhme_register_types(void) +{ + type_register_static(&sunhme_info); +} + +type_init(sunhme_register_types) diff --git a/hw/net/trace-events b/hw/net/trace-events new file mode 100644 index 000000000..643338f61 --- /dev/null +++ b/hw/net/trace-events @@ -0,0 +1,455 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# allwinner-sun8i-emac.c +allwinner_sun8i_emac_mii_write_reg(uint32_t reg, uint32_t value) "MII write: reg=0x%" PRIx32 " value=0x%" PRIx32 +allwinner_sun8i_emac_mii_read_reg(uint32_t reg, uint32_t value) "MII read: reg=0x%" PRIx32 " value=0x%" PRIx32 +allwinner_sun8i_emac_receive(uint32_t desc, uint32_t paddr, uint32_t bytes) "RX packet: desc=0x%" PRIx32 " paddr=0x%" PRIx32 " bytes=%" PRIu32 +allwinner_sun8i_emac_transmit(uint32_t desc, uint32_t paddr, uint32_t bytes) "TX packet: desc=0x%" PRIx32 " paddr=0x%" PRIx32 " bytes=%" PRIu32 +allwinner_sun8i_emac_reset(void) "HW reset" +allwinner_sun8i_emac_set_link(bool active) "Set link: active=%u" +allwinner_sun8i_emac_read(uint64_t offset, uint64_t val) "MMIO read: offset=0x%" PRIx64 " value=0x%" PRIx64 +allwinner_sun8i_emac_write(uint64_t offset, uint64_t val) "MMIO write: offset=0x%" PRIx64 " value=0x%" PRIx64 + +# etraxfs_eth.c +mdio_phy_read(int regnum, uint16_t value) "read phy_reg:%d value:0x%04x" +mdio_phy_write(int regnum, uint16_t value) "write phy_reg:%d value:0x%04x" +mdio_bitbang(bool mdc, bool mdio, int state, uint16_t cnt, unsigned int drive) "bitbang mdc=%u mdio=%u state=%d cnt=%u drv=%d" + +# lance.c +lance_mem_readw(uint64_t addr, uint32_t ret) "addr=0x%"PRIx64"val=0x%04x" +lance_mem_writew(uint64_t addr, uint32_t val) "addr=0x%"PRIx64"val=0x%04x" + +# mipsnet.c +mipsnet_send(uint32_t size) "sending len=%u" +mipsnet_receive(uint32_t size) "receiving len=%u" +mipsnet_read(uint64_t addr, uint32_t val) "read addr=0x%" PRIx64 " val=0x%x" +mipsnet_write(uint64_t addr, uint64_t val) "write addr=0x%" PRIx64 " val=0x%" PRIx64 +mipsnet_irq(uint32_t isr, uint32_t intctl) "set irq to %d (0x%02x)" + +# ne2000.c +ne2000_read(uint64_t addr, uint64_t val) "read addr=0x%" PRIx64 " val=0x%" PRIx64 +ne2000_write(uint64_t addr, uint64_t val) "write addr=0x%" PRIx64 " val=0x%" PRIx64 +ne2000_ioport_read(uint64_t addr, uint64_t val) "io read addr=0x%02" PRIx64 " val=0x%02" PRIx64 +ne2000_ioport_write(uint64_t addr, uint64_t val) "io write addr=0x%02" PRIx64 " val=0x%02" PRIx64 + +# opencores_eth.c +open_eth_mii_write(unsigned idx, uint16_t v) "MII[0x%02x] <- 0x%04x" +open_eth_mii_read(unsigned idx, uint16_t v) "MII[0x%02x] -> 0x%04x" +open_eth_update_irq(uint32_t v) "IRQ <- 0x%x" +open_eth_receive(unsigned len) "RX: len: %u" +open_eth_receive_mcast(unsigned idx, uint32_t h0, uint32_t h1) "MCAST: idx = %u, hash: %08x:%08x" +open_eth_receive_reject(void) "RX: rejected" +open_eth_receive_desc(uint32_t addr, uint32_t len_flags) "RX: 0x%08x, len_flags: 0x%08x" +open_eth_start_xmit(uint32_t addr, unsigned len, unsigned tx_len) "TX: 0x%08x, len: %u, tx_len: %u" +open_eth_reg_read(uint32_t addr, uint32_t v) "MAC[0x%02x] -> 0x%08x" +open_eth_reg_write(uint32_t addr, uint32_t v) "MAC[0x%02x] <- 0x%08x" +open_eth_desc_read(uint32_t addr, uint32_t v) "DESC[0x%04x] -> 0x%08x" +open_eth_desc_write(uint32_t addr, uint32_t v) "DESC[0x%04x] <- 0x%08x" + +# pcnet.c +pcnet_s_reset(void *s) "s=%p" +pcnet_user_int(void *s) "s=%p" +pcnet_isr_change(void *s, uint32_t isr, uint32_t isr_old) "s=%p INTA=%d<=%d" +pcnet_init(void *s, uint64_t init_addr) "s=%p init_addr=0x%"PRIx64 +pcnet_rlen_tlen(void *s, uint32_t rlen, uint32_t tlen) "s=%p rlen=%d tlen=%d" +pcnet_ss32_rdra_tdra(void *s, uint32_t ss32, uint32_t rdra, uint32_t rcvrl, uint32_t tdra, uint32_t xmtrl) "s=%p ss32=%d rdra=0x%08x[%d] tdra=0x%08x[%d]" + +# pcnet-pci.c +pcnet_aprom_writeb(void *opaque, uint32_t addr, uint32_t val) "opaque=%p addr=0x%08x val=0x%02x" +pcnet_aprom_readb(void *opaque, uint32_t addr, uint32_t val) "opaque=%p addr=0x%08x val=0x%02x" +pcnet_ioport_read(void *opaque, uint64_t addr, unsigned size) "opaque=%p addr=0x%"PRIx64" size=%d" +pcnet_ioport_write(void *opaque, uint64_t addr, uint64_t data, unsigned size) "opaque=%p addr=0x%"PRIx64" data=0x%"PRIx64" size=%d" + +# net_rx_pkt.c +net_rx_pkt_parsed(bool ip4, bool ip6, bool udp, bool tcp, size_t l3o, size_t l4o, size_t l5o) "RX packet parsed: ip4: %d, ip6: %d, udp: %d, tcp: %d, l3 offset: %zu, l4 offset: %zu, l5 offset: %zu" +net_rx_pkt_l4_csum_validate_entry(void) "Starting L4 checksum validation" +net_rx_pkt_l4_csum_validate_not_xxp(void) "Not a TCP/UDP packet" +net_rx_pkt_l4_csum_validate_udp_with_no_checksum(void) "UDP packet without checksum" +net_rx_pkt_l4_csum_validate_ip4_fragment(void) "IP4 fragment" +net_rx_pkt_l4_csum_validate_csum(bool csum_valid) "Checksum valid: %d" + +net_rx_pkt_l4_csum_calc_entry(void) "Starting L4 checksum calculation" +net_rx_pkt_l4_csum_calc_ip4_udp(void) "IP4/UDP packet" +net_rx_pkt_l4_csum_calc_ip4_tcp(void) "IP4/TCP packet" +net_rx_pkt_l4_csum_calc_ip6_udp(void) "IP6/UDP packet" +net_rx_pkt_l4_csum_calc_ip6_tcp(void) "IP6/TCP packet" +net_rx_pkt_l4_csum_calc_ph_csum(uint32_t cntr, uint16_t csl) "Pseudo-header: checksum counter %u, length %u" +net_rx_pkt_l4_csum_calc_csum(size_t l4hdr_off, uint16_t csl, uint32_t cntr, uint16_t csum) "L4 Checksum: L4 header offset: %zu, length: %u, counter: 0x%X, final checksum: 0x%X" + +net_rx_pkt_l4_csum_fix_entry(void) "Starting L4 checksum correction" +net_rx_pkt_l4_csum_fix_tcp(uint32_t l4_cso) "TCP packet, L4 cso: %u" +net_rx_pkt_l4_csum_fix_udp(uint32_t l4_cso) "UDP packet, L4 cso: %u" +net_rx_pkt_l4_csum_fix_not_xxp(void) "Not an IP4 packet" +net_rx_pkt_l4_csum_fix_ip4_fragment(void) "IP4 fragment" +net_rx_pkt_l4_csum_fix_udp_with_no_checksum(void) "UDP packet without checksum" +net_rx_pkt_l4_csum_fix_csum(uint32_t cso, uint16_t csum) "L4 Checksum: Offset: %u, value 0x%X" + +net_rx_pkt_l3_csum_validate_entry(void) "Starting L3 checksum validation" +net_rx_pkt_l3_csum_validate_not_ip4(void) "Not an IP4 packet" +net_rx_pkt_l3_csum_validate_csum(size_t l3hdr_off, uint32_t csl, uint32_t cntr, uint16_t csum, bool csum_valid) "L3 Checksum: L3 header offset: %zu, length: %u, counter: 0x%X, final checksum: 0x%X, valid: %d" + +net_rx_pkt_rss_ip4(void) "Calculating IPv4 RSS hash" +net_rx_pkt_rss_ip4_tcp(void) "Calculating IPv4/TCP RSS hash" +net_rx_pkt_rss_ip4_udp(void) "Calculating IPv4/UDP RSS hash" +net_rx_pkt_rss_ip6_tcp(void) "Calculating IPv6/TCP RSS hash" +net_rx_pkt_rss_ip6_udp(void) "Calculating IPv6/UDP RSS hash" +net_rx_pkt_rss_ip6(void) "Calculating IPv6 RSS hash" +net_rx_pkt_rss_ip6_ex(void) "Calculating IPv6/EX RSS hash" +net_rx_pkt_rss_ip6_ex_tcp(void) "Calculating IPv6/EX/TCP RSS hash" +net_rx_pkt_rss_ip6_ex_udp(void) "Calculating IPv6/EX/UDP RSS hash" +net_rx_pkt_rss_hash(size_t rss_length, uint32_t rss_hash) "RSS hash for %zu bytes: 0x%X" +net_rx_pkt_rss_add_chunk(void* ptr, size_t size, size_t input_offset) "Add RSS chunk %p, %zu bytes, RSS input offset %zu bytes" + +# e1000.c +e1000_receiver_overrun(size_t s, uint32_t rdh, uint32_t rdt) "Receiver overrun: dropped packet of %zu bytes, RDH=%u, RDT=%u" + +# e1000x_common.c +e1000x_rx_can_recv_disabled(bool link_up, bool rx_enabled, bool pci_master) "link_up: %d, rx_enabled %d, pci_master %d" +e1000x_vlan_is_vlan_pkt(bool is_vlan_pkt, uint16_t eth_proto, uint16_t vet) "Is VLAN packet: %d, ETH proto: 0x%X, VET: 0x%X" +e1000x_rx_flt_ucast_match(uint32_t idx, uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x" +e1000x_rx_flt_ucast_mismatch(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x" +e1000x_rx_flt_inexact_mismatch(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5, uint32_t mo, uint32_t mta, uint32_t mta_val) "inexact mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] 0x%x" +e1000x_rx_link_down(uint32_t status_reg) "Received packet dropped because the link is down STATUS = %u" +e1000x_rx_disabled(uint32_t rctl_reg) "Received packet dropped because receive is disabled RCTL = %u" +e1000x_rx_oversized(size_t size) "Received packet dropped because it was oversized (%zu bytes)" +e1000x_mac_indicate(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "Indicating MAC to guest: %02x:%02x:%02x:%02x:%02x:%02x" +e1000x_link_negotiation_start(void) "Start link auto negotiation" +e1000x_link_negotiation_done(void) "Auto negotiation is completed" + +# e1000e_core.c +e1000e_core_write(uint64_t index, uint32_t size, uint64_t val) "Write to register 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64 +e1000e_core_read(uint64_t index, uint32_t size, uint64_t val) "Read from register 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64 +e1000e_core_mdic_read(uint8_t page, uint32_t addr, uint32_t data) "MDIC READ: PHY[%u][%u] = 0x%x" +e1000e_core_mdic_read_unhandled(uint8_t page, uint32_t addr) "MDIC READ: PHY[%u][%u] UNHANDLED" +e1000e_core_mdic_write(uint8_t page, uint32_t addr, uint32_t data) "MDIC WRITE: PHY[%u][%u] = 0x%x" +e1000e_core_mdic_write_unhandled(uint8_t page, uint32_t addr) "MDIC WRITE: PHY[%u][%u] UNHANDLED" +e1000e_core_ctrl_write(uint64_t index, uint32_t val) "Write CTRL register 0x%"PRIx64", value: 0x%X" +e1000e_core_ctrl_sw_reset(void) "Doing SW reset" +e1000e_core_ctrl_phy_reset(void) "Doing PHY reset" + +e1000e_link_autoneg_flowctl(bool enabled) "Auto-negotiated flow control state is %d" +e1000e_link_set_params(bool autodetect, uint32_t speed, bool force_spd, bool force_dplx, bool rx_fctl, bool tx_fctl) "Set link params: Autodetect: %d, Speed: %d, Force speed: %d, Force duplex: %d, RX flow control %d, TX flow control %d" +e1000e_link_read_params(bool autodetect, uint32_t speed, bool force_spd, bool force_dplx, bool rx_fctl, bool tx_fctl) "Get link params: Autodetect: %d, Speed: %d, Force speed: %d, Force duplex: %d, RX flow control %d, TX flow control %d" +e1000e_link_set_ext_params(bool asd_check, bool speed_select_bypass) "Set extended link params: ASD check: %d, Speed select bypass: %d" +e1000e_link_status(bool link_up, bool full_dplx, uint32_t speed, uint32_t asdv) "Link up: %d, Duplex: %d, Speed: %d, ASDV: %d" +e1000e_link_status_changed(bool status) "New link status: %d" + +e1000e_wrn_regs_write_ro(uint64_t index, uint32_t size, uint64_t val) "WARNING: Write to RO register 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64 +e1000e_wrn_regs_write_unknown(uint64_t index, uint32_t size, uint64_t val) "WARNING: Write to unknown register 0x%"PRIx64", %d byte(s), value: 0x%"PRIx64 +e1000e_wrn_regs_read_unknown(uint64_t index, uint32_t size) "WARNING: Read from unknown register 0x%"PRIx64", %d byte(s)" +e1000e_wrn_regs_read_trivial(uint32_t index) "WARNING: Reading register at offset: 0x%05x. It is not fully implemented." +e1000e_wrn_regs_write_trivial(uint32_t index) "WARNING: Writing to register at offset: 0x%05x. It is not fully implemented." +e1000e_wrn_no_ts_support(void) "WARNING: Guest requested TX timestamping which is not supported" +e1000e_wrn_no_snap_support(void) "WARNING: Guest requested TX SNAP header update which is not supported" +e1000e_wrn_iscsi_filtering_not_supported(void) "WARNING: Guest requested iSCSI filtering which is not supported" +e1000e_wrn_nfsw_filtering_not_supported(void) "WARNING: Guest requested NFS write filtering which is not supported" +e1000e_wrn_nfsr_filtering_not_supported(void) "WARNING: Guest requested NFS read filtering which is not supported" + +e1000e_tx_disabled(void) "TX Disabled" +e1000e_tx_descr(void *addr, uint32_t lower, uint32_t upper) "%p : %x %x" + +e1000e_ring_free_space(int ridx, uint32_t rdlen, uint32_t rdh, uint32_t rdt) "ring #%d: LEN: %u, DH: %u, DT: %u" + +e1000e_rx_can_recv_rings_full(void) "Cannot receive: all rings are full" +e1000e_rx_can_recv(void) "Can receive" +e1000e_rx_has_buffers(int ridx, uint32_t free_desc, size_t total_size, uint32_t desc_buf_size) "ring #%d: free descr: %u, packet size %zu, descr buffer size %u" +e1000e_rx_null_descriptor(void) "Null RX descriptor!!" +e1000e_rx_flt_vlan_mismatch(uint16_t vid) "VID mismatch: 0x%X" +e1000e_rx_flt_vlan_match(uint16_t vid) "VID match: 0x%X" +e1000e_rx_desc_ps_read(uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3) "buffers: [0x%"PRIx64", 0x%"PRIx64", 0x%"PRIx64", 0x%"PRIx64"]" +e1000e_rx_desc_ps_write(uint16_t a0, uint16_t a1, uint16_t a2, uint16_t a3) "bytes written: [%u, %u, %u, %u]" +e1000e_rx_desc_buff_sizes(uint32_t b0, uint32_t b1, uint32_t b2, uint32_t b3) "buffer sizes: [%u, %u, %u, %u]" +e1000e_rx_desc_len(uint8_t rx_desc_len) "RX descriptor length: %u" +e1000e_rx_desc_buff_write(uint8_t idx, uint64_t addr, uint16_t offset, const void* source, uint32_t len) "buffer #%u, addr: 0x%"PRIx64", offset: %u, from: %p, length: %u" +e1000e_rx_descr(int ridx, uint64_t base, uint8_t len) "Next RX descriptor: ring #%d, PA: 0x%"PRIx64", length: %u" +e1000e_rx_set_rctl(uint32_t rctl) "RCTL = 0x%x" +e1000e_rx_receive_iov(int iovcnt) "Received vector of %d fragments" +e1000e_rx_flt_dropped(void) "Received packet dropped by RX filter" +e1000e_rx_written_to_guest(uint32_t causes) "Received packet written to guest (ICR causes %u)" +e1000e_rx_not_written_to_guest(uint32_t causes) "Received packet NOT written to guest (ICR causes %u)" +e1000e_rx_interrupt_set(uint32_t causes) "Receive interrupt set (ICR causes %u)" +e1000e_rx_interrupt_delayed(uint32_t causes) "Receive interrupt delayed (ICR causes %u)" +e1000e_rx_set_cso(int cso_state) "RX CSO state set to %d" +e1000e_rx_set_rdt(int queue_idx, uint32_t val) "Setting RDT[%d] = %u" +e1000e_rx_set_rfctl(uint32_t val) "Setting RFCTL = 0x%X" +e1000e_rx_start_recv(void) + +e1000e_rx_rss_started(void) "Starting RSS processing" +e1000e_rx_rss_disabled(void) "RSS is disabled" +e1000e_rx_rss_type(uint32_t type) "RSS type is %u" +e1000e_rx_rss_ip4(bool isfragment, bool istcp, uint32_t mrqc, bool tcpipv4_enabled, bool ipv4_enabled) "RSS IPv4: fragment %d, tcp %d, mrqc 0x%X, tcpipv4 enabled %d, ipv4 enabled %d" +e1000e_rx_rss_ip6_rfctl(uint32_t rfctl) "RSS IPv6: rfctl 0x%X" +e1000e_rx_rss_ip6(bool ex_dis, bool new_ex_dis, bool istcp, bool has_ext_headers, bool ex_dst_valid, bool ex_src_valid, uint32_t mrqc, bool tcpipv6_enabled, bool ipv6ex_enabled, bool ipv6_enabled) "RSS IPv6: ex_dis: %d, new_ex_dis: %d, tcp %d, has_ext_headers %d, ex_dst_valid %d, ex_src_valid %d, mrqc 0x%X, tcpipv6 enabled %d, ipv6ex enabled %d, ipv6 enabled %d" +e1000e_rx_rss_dispatched_to_queue(int queue_idx) "Packet being dispatched to queue %d" + +e1000e_rx_metadata_protocols(bool isip4, bool isip6, bool isudp, bool istcp) "protocols: ip4: %d, ip6: %d, udp: %d, tcp: %d" +e1000e_rx_metadata_vlan(uint16_t vlan_tag) "VLAN tag is 0x%X" +e1000e_rx_metadata_rss(uint32_t rss, uint32_t mrq) "RSS data: rss: 0x%X, mrq: 0x%X" +e1000e_rx_metadata_ip_id(uint16_t ip_id) "the IPv4 ID is 0x%X" +e1000e_rx_metadata_ack(void) "the packet is TCP ACK" +e1000e_rx_metadata_pkt_type(uint32_t pkt_type) "the packet type is %u" +e1000e_rx_metadata_no_virthdr(void) "the packet has no virt-header" +e1000e_rx_metadata_virthdr_no_csum_info(void) "virt-header does not contain checksum info" +e1000e_rx_metadata_l3_cso_disabled(void) "IP4 CSO is disabled" +e1000e_rx_metadata_l4_cso_disabled(void) "TCP/UDP CSO is disabled" +e1000e_rx_metadata_l3_csum_validation_failed(void) "Cannot validate L3 checksum" +e1000e_rx_metadata_l4_csum_validation_failed(void) "Cannot validate L4 checksum" +e1000e_rx_metadata_status_flags(uint32_t status_flags) "status_flags is 0x%X" +e1000e_rx_metadata_ipv6_sum_disabled(void) "IPv6 RX checksummimg disabled by RFCTL" +e1000e_rx_metadata_ipv6_filtering_disabled(void) "IPv6 RX filtering disabled by RFCTL" + +e1000e_vlan_vet(uint16_t vet) "Setting VLAN ethernet type 0x%X" + +e1000e_irq_msi_notify(uint32_t cause) "MSI notify 0x%x" +e1000e_irq_throttling_no_pending_interrupts(void) "No pending interrupts to notify" +e1000e_irq_msi_notify_postponed(void) "Sending MSI postponed by ITR" +e1000e_irq_legacy_notify_postponed(void) "Raising legacy IRQ postponed by ITR" +e1000e_irq_throttling_no_pending_vec(int idx) "No pending interrupts for vector %d" +e1000e_irq_msix_notify_postponed_vec(int idx) "Sending MSI-X postponed by EITR[%d]" +e1000e_irq_legacy_notify(bool level) "IRQ line state: %d" +e1000e_irq_msix_notify_vec(uint32_t vector) "MSI-X notify vector 0x%x" +e1000e_irq_postponed_by_xitr(uint32_t reg) "Interrupt postponed by [E]ITR register 0x%x" +e1000e_irq_clear_ims(uint32_t bits, uint32_t old_ims, uint32_t new_ims) "Clearing IMS bits 0x%x: 0x%x --> 0x%x" +e1000e_irq_set_ims(uint32_t bits, uint32_t old_ims, uint32_t new_ims) "Setting IMS bits 0x%x: 0x%x --> 0x%x" +e1000e_irq_fix_icr_asserted(uint32_t new_val) "ICR_ASSERTED bit fixed: 0x%x" +e1000e_irq_add_msi_other(uint32_t new_val) "ICR_OTHER bit added: 0x%x" +e1000e_irq_pending_interrupts(uint32_t pending, uint32_t icr, uint32_t ims) "ICR PENDING: 0x%x (ICR: 0x%x, IMS: 0x%x)" +e1000e_irq_set_cause_entry(uint32_t val, uint32_t icr) "Going to set IRQ cause 0x%x, ICR: 0x%x" +e1000e_irq_set_cause_exit(uint32_t val, uint32_t icr) "Set IRQ cause 0x%x, ICR: 0x%x" +e1000e_irq_icr_write(uint32_t bits, uint32_t old_icr, uint32_t new_icr) "Clearing ICR bits 0x%x: 0x%x --> 0x%x" +e1000e_irq_write_ics(uint32_t val) "Adding ICR bits 0x%x" +e1000e_irq_icr_process_iame(void) "Clearing IMS bits due to IAME" +e1000e_irq_read_ics(uint32_t ics) "Current ICS: 0x%x" +e1000e_irq_read_ims(uint32_t ims) "Current IMS: 0x%x" +e1000e_irq_icr_read_entry(uint32_t icr) "Starting ICR read. Current ICR: 0x%x" +e1000e_irq_icr_read_exit(uint32_t icr) "Ending ICR read. Current ICR: 0x%x" +e1000e_irq_icr_clear_zero_ims(void) "Clearing ICR on read due to zero IMS" +e1000e_irq_icr_clear_iame(void) "Clearing ICR on read due to IAME" +e1000e_irq_iam_clear_eiame(uint32_t iam, uint32_t cause) "Clearing IMS due to EIAME, IAM: 0x%X, cause: 0x%X" +e1000e_irq_icr_clear_eiac(uint32_t icr, uint32_t eiac) "Clearing ICR bits due to EIAC, ICR: 0x%X, EIAC: 0x%X" +e1000e_irq_ims_clear_set_imc(uint32_t val) "Clearing IMS bits due to IMC write 0x%x" +e1000e_irq_fire_delayed_interrupts(void) "Firing delayed interrupts" +e1000e_irq_rearm_timer(uint32_t reg, int64_t delay_ns) "Mitigation timer armed for register 0x%X, delay %"PRId64" ns" +e1000e_irq_throttling_timer(uint32_t reg) "Mitigation timer shot for register 0x%X" +e1000e_irq_rdtr_fpd_running(void) "FPD written while RDTR was running" +e1000e_irq_rdtr_fpd_not_running(void) "FPD written while RDTR was not running" +e1000e_irq_tidv_fpd_running(void) "FPD written while TIDV was running" +e1000e_irq_tidv_fpd_not_running(void) "FPD written while TIDV was not running" +e1000e_irq_eitr_set(uint32_t eitr_num, uint32_t val) "EITR[%u] = %u" +e1000e_irq_itr_set(uint32_t val) "ITR = %u" +e1000e_irq_fire_all_timers(uint32_t val) "Firing all delay/throttling timers on all interrupts enable (0x%X written to IMS)" +e1000e_irq_adding_delayed_causes(uint32_t val, uint32_t icr) "Merging delayed causes 0x%X to ICR 0x%X" +e1000e_irq_msix_pending_clearing(uint32_t cause, uint32_t int_cfg, uint32_t vec) "Clearing MSI-X pending bit for cause 0x%x, IVAR config 0x%x, vector %u" + +e1000e_wrn_msix_vec_wrong(uint32_t cause, uint32_t cfg) "Invalid configuration for cause 0x%x: 0x%x" +e1000e_wrn_msix_invalid(uint32_t cause, uint32_t cfg) "Invalid entry for cause 0x%x: 0x%x" + +e1000e_mac_set_sw(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "Set SW MAC: %02x:%02x:%02x:%02x:%02x:%02x" + +e1000e_vm_state_running(void) "VM state is running" +e1000e_vm_state_stopped(void) "VM state is stopped" + +# e1000e.c +e1000e_cb_pci_realize(void) "E1000E PCI realize entry" +e1000e_cb_pci_uninit(void) "E1000E PCI unit entry" +e1000e_cb_qdev_reset(void) "E1000E qdev reset entry" +e1000e_cb_pre_save(void) "E1000E pre save entry" +e1000e_cb_post_load(void) "E1000E post load entry" + +e1000e_io_write_addr(uint64_t addr) "IOADDR write 0x%"PRIx64 +e1000e_io_write_data(uint64_t addr, uint64_t val) "IODATA write 0x%"PRIx64", value: 0x%"PRIx64 +e1000e_io_read_addr(uint64_t addr) "IOADDR read 0x%"PRIx64 +e1000e_io_read_data(uint64_t addr, uint64_t val) "IODATA read 0x%"PRIx64", value: 0x%"PRIx64 +e1000e_wrn_io_write_unknown(uint64_t addr) "IO write unknown address 0x%"PRIx64 +e1000e_wrn_io_read_unknown(uint64_t addr) "IO read unknown address 0x%"PRIx64 +e1000e_wrn_io_addr_undefined(uint64_t addr) "IO undefined register 0x%"PRIx64 +e1000e_wrn_io_addr_flash(uint64_t addr) "IO flash access (0x%"PRIx64") not implemented" +e1000e_wrn_io_addr_unknown(uint64_t addr) "IO unknown register 0x%"PRIx64 + +e1000e_msi_init_fail(int32_t res) "Failed to initialize MSI, error %d" +e1000e_msix_init_fail(int32_t res) "Failed to initialize MSI-X, error %d" +e1000e_msix_use_vector_fail(uint32_t vec, int32_t res) "Failed to use MSI-X vector %d, error %d" + +e1000e_mac_set_permanent(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "Set permanent MAC: %02x:%02x:%02x:%02x:%02x:%02x" +e1000e_cfg_support_virtio(bool support) "Virtio header supported: %d" + +# spapr_llan.c +spapr_vlan_get_rx_bd_from_pool_found(int pool, int32_t count, uint32_t rx_bufs) "pool=%d count=%"PRId32" rxbufs=%"PRIu32 +spapr_vlan_get_rx_bd_from_page(int buf_ptr, uint64_t bd) "use_buf_ptr=%d bd=0x%016"PRIx64 +spapr_vlan_get_rx_bd_from_page_found(uint32_t use_buf_ptr, uint32_t rx_bufs) "ptr=%"PRIu32" rxbufs=%"PRIu32 +spapr_vlan_receive(const char *id, uint32_t rx_bufs) "[%s] rx_bufs=%"PRIu32 +spapr_vlan_receive_dma_completed(void) "DMA write completed" +spapr_vlan_receive_wrote(uint64_t ptr, uint64_t hi, uint64_t lo) "rxq entry (ptr=0x%"PRIx64"): 0x%016"PRIx64" 0x%016"PRIx64 +spapr_vlan_add_rxbuf_to_pool_create(int pool, uint64_t len) "created RX pool %d for size %"PRIu64 +spapr_vlan_add_rxbuf_to_pool(int pool, uint64_t len, int32_t count) "add buf using pool %d (size %"PRIu64", count=%"PRId32")" +spapr_vlan_add_rxbuf_to_page(uint32_t ptr, uint32_t rx_bufs, uint64_t bd) "added buf ptr=%"PRIu32" rx_bufs=%"PRIu32" bd=0x%016"PRIx64 +spapr_vlan_h_add_logical_lan_buffer(uint64_t reg, uint64_t buf) "H_ADD_LOGICAL_LAN_BUFFER(0x%"PRIx64", 0x%"PRIx64")" +spapr_vlan_h_send_logical_lan(uint64_t reg, uint64_t continue_token) "H_SEND_LOGICAL_LAN(0x%"PRIx64", , 0x%"PRIx64")" +spapr_vlan_h_send_logical_lan_rxbufs(uint32_t rx_bufs) "rxbufs = %"PRIu32 +spapr_vlan_h_send_logical_lan_buf_desc(uint64_t buf) " buf desc: 0x%"PRIx64 +spapr_vlan_h_send_logical_lan_total(int nbufs, unsigned total_len) "%d buffers, total length 0x%x" + +# sungem.c +sungem_tx_checksum(uint16_t start, uint16_t off) "TX checksumming from byte %d, inserting at %d" +sungem_tx_checksum_oob(void) "TX checksum out of packet bounds" +sungem_tx_unfinished(void) "TX packet started without finishing the previous one" +sungem_tx_overflow(void) "TX packet queue overflow" +sungem_tx_finished(uint32_t size) "TX completing %"PRIu32 " bytes packet" +sungem_tx_kick(void) "TX Kick..." +sungem_tx_disabled(void) "TX not enabled" +sungem_tx_process(uint32_t comp, uint32_t kick, uint32_t size) "TX processing comp=%"PRIu32", kick=%"PRIu32" out of %"PRIu32 +sungem_tx_desc(uint32_t comp, uint64_t control, uint64_t buffer) "TX desc %"PRIu32 ": 0x%"PRIx64" 0x%"PRIx64 +sungem_tx_reset(void) "TX reset" +sungem_rx_mac_disabled(void) "Check RX MAC disabled" +sungem_rx_txdma_disabled(void) "Check RX TXDMA disabled" +sungem_rx_check(bool full, uint32_t kick, uint32_t done) "Check RX %d (kick=%"PRIu32", done=%"PRIu32")" +sungem_rx_mac_check(uint32_t mac0, uint32_t mac1, uint32_t mac2) "Word MAC: 0x%"PRIx32" 0x%"PRIx32" 0x%"PRIx32 +sungem_rx_mac_multicast(void) "Multicast" +sungem_rx_mac_compare(uint32_t mac0, uint32_t mac1, uint32_t mac2) "Compare MAC to 0x%"PRIx32" 0x%"PRIx32" 0x%"PRIx32".." +sungem_rx_packet(size_t size) "RX got %zu bytes packet" +sungem_rx_disabled(void) "RX not enabled" +sungem_rx_bad_frame_size(size_t size) "RX bad frame size %zu, dropped" +sungem_rx_unmatched(void) "No match, dropped" +sungem_rx_process(uint32_t done, uint32_t kick, uint32_t size) "RX processing done=%"PRIu32", kick=%"PRIu32" out of %"PRIu32 +sungem_rx_ringfull(void) "RX ring full" +sungem_rx_desc(uint64_t control, uint64_t buffer) "RX desc: 0x%"PRIx64" 0x%"PRIx64 +sungem_rx_reset(void) "RX reset" +sungem_rx_kick(uint64_t val) "RXDMA_KICK written to %"PRIu64 +sungem_reset(bool pci_reset) "Full reset (PCI:%d)" +sungem_mii_write(uint8_t phy_addr, uint8_t reg_addr, uint16_t val) "MII write addr 0x%x reg 0x%02x val 0x%04x" +sungem_mii_read(uint8_t phy_addr, uint8_t reg_addr, uint16_t val) "MII read addr 0x%x reg 0x%02x val 0x%04x" +sungem_mii_invalid_sof(uint32_t val) "MII op, invalid SOF field 0x%"PRIx32 +sungem_mii_invalid_op(uint8_t op) "MII op, invalid op field 0x%x" +sungem_mmio_greg_write(uint64_t addr, uint64_t val) "MMIO greg write to 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_greg_read(uint64_t addr, uint64_t val) "MMIO greg read from 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_txdma_write(uint64_t addr, uint64_t val) "MMIO txdma write to 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_txdma_read(uint64_t addr, uint64_t val) "MMIO txdma read from 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_rxdma_write(uint64_t addr, uint64_t val) "MMIO rxdma write to 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_rxdma_read(uint64_t addr, uint64_t val) "MMIO rxdma read from 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_mac_write(uint64_t addr, uint64_t val) "MMIO mac write to 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_mac_read(uint64_t addr, uint64_t val) "MMIO mac read from 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_mif_write(uint64_t addr, uint64_t val) "MMIO mif write to 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_mif_read(uint64_t addr, uint64_t val) "MMIO mif read from 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_pcs_write(uint64_t addr, uint64_t val) "MMIO pcs write to 0x%"PRIx64" val=0x%"PRIx64 +sungem_mmio_pcs_read(uint64_t addr, uint64_t val) "MMIO pcs read from 0x%"PRIx64" val=0x%"PRIx64 + +# sunhme.c +sunhme_seb_write(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 +sunhme_seb_read(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 +sunhme_etx_write(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 +sunhme_etx_read(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 +sunhme_erx_write(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 +sunhme_erx_read(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 +sunhme_mac_write(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 +sunhme_mac_read(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 +sunhme_mii_write(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 +sunhme_mii_read(uint8_t addr, uint16_t value) "addr 0x%x value 0x%x" +sunhme_mif_write(uint8_t addr, uint16_t value) "addr 0x%x value 0x%x" +sunhme_mif_read(uint64_t addr, uint64_t value) "addr 0x%"PRIx64" value 0x%"PRIx64 +sunhme_tx_desc(uint64_t buffer, uint32_t status, int cr, int nr) "addr 0x%"PRIx64" status 0x%"PRIx32 " (ring %d/%d)" +sunhme_tx_xsum_add(int offset, int len) "adding xsum at offset %d, len %d" +sunhme_tx_xsum_stuff(uint16_t xsum, int offset) "stuffing xsum 0x%x at offset %d" +sunhme_tx_done(int len) "successfully transmitted frame with len %d" +sunhme_rx_incoming(size_t len) "received incoming frame with len %zu" +sunhme_rx_filter_destmac(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5) "received frame for MAC: %02x:%02x:%02x:%02x:%02x:%02x" +sunhme_rx_filter_local_match(void) "incoming frame matches local MAC address" +sunhme_rx_filter_bcast_match(void) "incoming frame matches broadcast MAC address" +sunhme_rx_filter_hash_nomatch(void) "incoming MAC address not in hash table" +sunhme_rx_filter_hash_match(void) "incoming MAC address found in hash table" +sunhme_rx_filter_promisc_match(void) "incoming frame accepted due to promiscuous mode" +sunhme_rx_filter_reject(void) "rejecting incoming frame" +sunhme_rx_filter_accept(void) "accepting incoming frame" +sunhme_rx_desc(uint32_t addr, int offset, uint32_t status, int len, int cr, int nr) "addr 0x%"PRIx32"(+0x%x) status 0x%"PRIx32 " len %d (ring %d/%d)" +sunhme_rx_xsum_calc(uint16_t xsum) "calculated incoming xsum as 0x%x" +sunhme_rx_norxd(void) "no free rx descriptors available" +sunhme_update_irq(uint32_t mifmask, uint32_t mif, uint32_t sebmask, uint32_t seb, int level) "mifmask: 0x%x mif: 0x%x sebmask: 0x%x seb: 0x%x level: %d" + +# virtio-net.c +virtio_net_announce_notify(void) "" +virtio_net_announce_timer(int round) "%d" +virtio_net_handle_announce(int round) "%d" +virtio_net_post_load_device(void) +virtio_net_rss_disable(void) +virtio_net_rss_error(const char *msg, uint32_t value) "%s, value 0x%08x" +virtio_net_rss_enable(uint32_t p1, uint16_t p2, uint8_t p3) "hashes 0x%x, table of %d, key of %d" + +# tulip.c +tulip_reg_write(uint64_t addr, const char *name, int size, uint64_t val) "addr 0x%02"PRIx64" (%s) size %d value 0x%08"PRIx64 +tulip_reg_read(uint64_t addr, const char *name, int size, uint64_t val) "addr 0x%02"PRIx64" (%s) size %d value 0x%08"PRIx64 +tulip_receive(const uint8_t *buf, size_t len) "buf %p size %zu" +tulip_descriptor(const char *prefix, uint32_t addr, uint32_t status, uint32_t control, uint32_t len1, uint32_t len2, uint32_t buf1, uint32_t buf2) "%s 0x%08x: status 0x%08x control 0x%03x len1 %4d len2 %4d buf1 0x%08x buf2 0x%08x" +tulip_rx_state(const char *state) "RX %s" +tulip_tx_state(const char *state) "TX %s" +tulip_irq(uint32_t mask, uint32_t en, const char *state) "mask 0x%08x ie 0x%08x %s" +tulip_mii_write(int phy, int reg, uint16_t data) "phy 0x%x reg 0x%x data 0x%04x" +tulip_mii_read(int phy, int reg, uint16_t data) "phy 0x%x, reg 0x%x data 0x%04x" +tulip_reset(void) "" +tulip_setup_frame(void) "" +tulip_setup_filter(int n, uint8_t a, uint8_t b, uint8_t c, uint8_t d, uint8_t e, uint8_t f) "%d: %02x:%02x:%02x:%02x:%02x:%02x" + +# lasi_i82596.c +lasi_82596_mem_readw(uint64_t addr, uint32_t ret) "addr=0x%"PRIx64" val=0x%04x" +lasi_82596_mem_writew(uint64_t addr, uint32_t val) "addr=0x%"PRIx64" val=0x%04x" + +# i82596.c +i82596_s_reset(void *s) "%p Reset chip" +i82596_transmit(uint32_t size, uint32_t addr) "size %u from addr 0x%04x" +i82596_receive_analysis(const char *s) "%s" +i82596_receive_packet(size_t sz) "len=%zu" +i82596_new_mac(const char *id_with_mac) "New MAC for: %s" +i82596_set_multicast(uint16_t count) "Added %d multicast entries" +i82596_channel_attention(void *s) "%p: Received CHANNEL ATTENTION" + +# imx_fec.c +imx_phy_read(uint32_t val, int phy, int reg) "0x%04"PRIx32" <= phy[%d].reg[%d]" +imx_phy_read_num(int phy, int configured) "read request from unconfigured phy %d (configured %d)" +imx_phy_write(uint32_t val, int phy, int reg) "0x%04"PRIx32" => phy[%d].reg[%d]" +imx_phy_write_num(int phy, int configured) "write request to unconfigured phy %d (configured %d)" +imx_phy_update_link(const char *s) "%s" +imx_phy_reset(void) "" +imx_fec_read_bd(uint64_t addr, int flags, int len, int data) "tx_bd 0x%"PRIx64" flags 0x%04x len %d data 0x%08x" +imx_enet_read_bd(uint64_t addr, int flags, int len, int data, int options, int status) "tx_bd 0x%"PRIx64" flags 0x%04x len %d data 0x%08x option 0x%04x status 0x%04x" +imx_eth_tx_bd_busy(void) "tx_bd ran out of descriptors to transmit" +imx_eth_rx_bd_full(void) "RX buffer is full" +imx_eth_read(int reg, const char *reg_name, uint32_t value) "reg[%d:%s] => 0x%08"PRIx32 +imx_eth_write(int reg, const char *reg_name, uint64_t value) "reg[%d:%s] <= 0x%08"PRIx64 +imx_fec_receive(size_t size) "len %zu" +imx_fec_receive_len(uint64_t addr, int len) "rx_bd 0x%"PRIx64" length %d" +imx_fec_receive_last(int last) "rx frame flags 0x%04x" +imx_enet_receive(size_t size) "len %zu" +imx_enet_receive_len(uint64_t addr, int len) "rx_bd 0x%"PRIx64" length %d" +imx_enet_receive_last(int last) "rx frame flags 0x%04x" + +# npcm7xx_emc.c +npcm7xx_emc_reset(int emc_num) "Resetting emc%d" +npcm7xx_emc_update_tx_irq(int level) "Setting tx irq to %d" +npcm7xx_emc_update_rx_irq(int level) "Setting rx irq to %d" +npcm7xx_emc_set_mista(uint32_t flags) "ORing 0x%x into MISTA" +npcm7xx_emc_cpu_owned_desc(uint32_t addr) "Can't process cpu-owned descriptor @0x%x" +npcm7xx_emc_sent_packet(uint32_t len) "Sent %u byte packet" +npcm7xx_emc_tx_done(uint32_t ctxdsa) "TX done, CTXDSA=0x%x" +npcm7xx_emc_can_receive(int can_receive) "Can receive: %d" +npcm7xx_emc_packet_filtered_out(const char* fail_reason) "Packet filtered out: %s" +npcm7xx_emc_packet_dropped(uint32_t len) "%u byte packet dropped" +npcm7xx_emc_receiving_packet(uint32_t len) "Receiving %u byte packet" +npcm7xx_emc_received_packet(uint32_t len) "Received %u byte packet" +npcm7xx_emc_rx_done(uint32_t crxdsa) "RX done, CRXDSA=0x%x" +npcm7xx_emc_reg_read(int emc_num, uint32_t result, const char *name, int regno) "emc%d: 0x%x = reg[%s/%d]" +npcm7xx_emc_reg_write(int emc_num, const char *name, int regno, uint32_t value) "emc%d: reg[%s/%d] = 0x%x" + +# dp8398x.c +dp8393x_raise_irq(int isr) "raise irq, isr is 0x%04x" +dp8393x_lower_irq(void) "lower irq" +dp8393x_load_cam(int idx, int cam0, int cam1, int cam2, int cam3, int cam4, int cam5) "load cam[%d] with 0x%02x0x%02x0x%02x0x%02x0x%02x0x%02x" +dp8393x_load_cam_done(int cen) "load cam done. cam enable mask 0x%04x" +dp8393x_read_rra_regs(int crba0, int crba1, int rbwc0, int rbwc1) "CRBA0/1: 0x%04x/0x%04x, RBWC0/1: 0x%04x/0x%04x" +dp8393x_transmit_packet(int ttda) "Transmit packet at 0x%"PRIx32 +dp8393x_transmit_txlen_error(int len) "tx_len is %d" +dp8393x_read(int reg, const char *name, int val, int size) "reg=0x%x [%s] val=0x%04x size=%d" +dp8393x_write(int reg, const char *name, int val, int size) "reg=0x%x [%s] val=0x%04x size=%d" +dp8393x_write_invalid(int reg) "writing to reg %d invalid" +dp8393x_write_invalid_dcr(const char *name) "writing to %s invalid" +dp8393x_receive_oversize(int size) "oversize packet, pkt_size is %d" +dp8393x_receive_not_netcard(void) "packet not for netcard" +dp8393x_receive_packet(int crba) "Receive packet at 0x%"PRIx32 +dp8393x_receive_write_status(int crba) "Write status at 0x%"PRIx32 diff --git a/hw/net/trace.h b/hw/net/trace.h new file mode 100644 index 000000000..93249af0a --- /dev/null +++ b/hw/net/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_net.h" diff --git a/hw/net/tulip.c b/hw/net/tulip.c new file mode 100644 index 000000000..ca69f7ea5 --- /dev/null +++ b/hw/net/tulip.c @@ -0,0 +1,1047 @@ +/* + * QEMU TULIP Emulation + * + * Copyright (c) 2019 Sven Schnelle + * + * This work is licensed under the GNU GPL license version 2 or later. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/nvram/eeprom93xx.h" +#include "migration/vmstate.h" +#include "sysemu/sysemu.h" +#include "tulip.h" +#include "trace.h" +#include "net/eth.h" + +struct TULIPState { + PCIDevice dev; + MemoryRegion io; + MemoryRegion memory; + NICConf c; + qemu_irq irq; + NICState *nic; + eeprom_t *eeprom; + uint32_t csr[16]; + + /* state for MII */ + uint32_t old_csr9; + uint32_t mii_word; + uint32_t mii_bitcnt; + + hwaddr current_rx_desc; + hwaddr current_tx_desc; + + uint8_t rx_frame[2048]; + uint8_t tx_frame[2048]; + uint16_t tx_frame_len; + uint16_t rx_frame_len; + uint16_t rx_frame_size; + + uint32_t rx_status; + uint8_t filter[16][6]; +}; + +static const VMStateDescription vmstate_pci_tulip = { + .name = "tulip", + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, TULIPState), + VMSTATE_UINT32_ARRAY(csr, TULIPState, 16), + VMSTATE_UINT32(old_csr9, TULIPState), + VMSTATE_UINT32(mii_word, TULIPState), + VMSTATE_UINT32(mii_bitcnt, TULIPState), + VMSTATE_UINT64(current_rx_desc, TULIPState), + VMSTATE_UINT64(current_tx_desc, TULIPState), + VMSTATE_BUFFER(rx_frame, TULIPState), + VMSTATE_BUFFER(tx_frame, TULIPState), + VMSTATE_UINT16(rx_frame_len, TULIPState), + VMSTATE_UINT16(tx_frame_len, TULIPState), + VMSTATE_UINT16(rx_frame_size, TULIPState), + VMSTATE_UINT32(rx_status, TULIPState), + VMSTATE_UINT8_2DARRAY(filter, TULIPState, 16, 6), + VMSTATE_END_OF_LIST() + } +}; + +static void tulip_desc_read(TULIPState *s, hwaddr p, + struct tulip_descriptor *desc) +{ + if (s->csr[0] & CSR0_DBO) { + desc->status = ldl_be_pci_dma(&s->dev, p); + desc->control = ldl_be_pci_dma(&s->dev, p + 4); + desc->buf_addr1 = ldl_be_pci_dma(&s->dev, p + 8); + desc->buf_addr2 = ldl_be_pci_dma(&s->dev, p + 12); + } else { + desc->status = ldl_le_pci_dma(&s->dev, p); + desc->control = ldl_le_pci_dma(&s->dev, p + 4); + desc->buf_addr1 = ldl_le_pci_dma(&s->dev, p + 8); + desc->buf_addr2 = ldl_le_pci_dma(&s->dev, p + 12); + } +} + +static void tulip_desc_write(TULIPState *s, hwaddr p, + struct tulip_descriptor *desc) +{ + if (s->csr[0] & CSR0_DBO) { + stl_be_pci_dma(&s->dev, p, desc->status); + stl_be_pci_dma(&s->dev, p + 4, desc->control); + stl_be_pci_dma(&s->dev, p + 8, desc->buf_addr1); + stl_be_pci_dma(&s->dev, p + 12, desc->buf_addr2); + } else { + stl_le_pci_dma(&s->dev, p, desc->status); + stl_le_pci_dma(&s->dev, p + 4, desc->control); + stl_le_pci_dma(&s->dev, p + 8, desc->buf_addr1); + stl_le_pci_dma(&s->dev, p + 12, desc->buf_addr2); + } +} + +static void tulip_update_int(TULIPState *s) +{ + uint32_t ie = s->csr[5] & s->csr[7]; + bool assert = false; + + s->csr[5] &= ~(CSR5_AIS | CSR5_NIS); + + if (ie & (CSR5_TI | CSR5_TU | CSR5_RI | CSR5_GTE | CSR5_ERI)) { + s->csr[5] |= CSR5_NIS; + } + + if (ie & (CSR5_LC | CSR5_GPI | CSR5_FBE | CSR5_LNF | CSR5_ETI | CSR5_RWT | + CSR5_RPS | CSR5_RU | CSR5_UNF | CSR5_LNP_ANC | CSR5_TJT | + CSR5_TPS)) { + s->csr[5] |= CSR5_AIS; + } + + assert = s->csr[5] & s->csr[7] & (CSR5_AIS | CSR5_NIS); + trace_tulip_irq(s->csr[5], s->csr[7], assert ? "assert" : "deassert"); + qemu_set_irq(s->irq, assert); +} + +static bool tulip_rx_stopped(TULIPState *s) +{ + return ((s->csr[5] >> CSR5_RS_SHIFT) & CSR5_RS_MASK) == CSR5_RS_STOPPED; +} + +static void tulip_dump_tx_descriptor(TULIPState *s, + struct tulip_descriptor *desc) +{ + trace_tulip_descriptor("TX ", s->current_tx_desc, + desc->status, desc->control >> 22, + desc->control & 0x7ff, (desc->control >> 11) & 0x7ff, + desc->buf_addr1, desc->buf_addr2); +} + +static void tulip_dump_rx_descriptor(TULIPState *s, + struct tulip_descriptor *desc) +{ + trace_tulip_descriptor("RX ", s->current_rx_desc, + desc->status, desc->control >> 22, + desc->control & 0x7ff, (desc->control >> 11) & 0x7ff, + desc->buf_addr1, desc->buf_addr2); +} + +static void tulip_next_rx_descriptor(TULIPState *s, + struct tulip_descriptor *desc) +{ + if (desc->control & RDES1_RER) { + s->current_rx_desc = s->csr[3]; + } else if (desc->control & RDES1_RCH) { + s->current_rx_desc = desc->buf_addr2; + } else { + s->current_rx_desc += sizeof(struct tulip_descriptor) + + (((s->csr[0] >> CSR0_DSL_SHIFT) & CSR0_DSL_MASK) << 2); + } + s->current_rx_desc &= ~3ULL; +} + +static void tulip_copy_rx_bytes(TULIPState *s, struct tulip_descriptor *desc) +{ + int len1 = (desc->control >> RDES1_BUF1_SIZE_SHIFT) & RDES1_BUF1_SIZE_MASK; + int len2 = (desc->control >> RDES1_BUF2_SIZE_SHIFT) & RDES1_BUF2_SIZE_MASK; + int len; + + if (s->rx_frame_len && len1) { + if (s->rx_frame_len > len1) { + len = len1; + } else { + len = s->rx_frame_len; + } + + pci_dma_write(&s->dev, desc->buf_addr1, s->rx_frame + + (s->rx_frame_size - s->rx_frame_len), len); + s->rx_frame_len -= len; + } + + if (s->rx_frame_len && len2) { + if (s->rx_frame_len > len2) { + len = len2; + } else { + len = s->rx_frame_len; + } + + pci_dma_write(&s->dev, desc->buf_addr2, s->rx_frame + + (s->rx_frame_size - s->rx_frame_len), len); + s->rx_frame_len -= len; + } +} + +static bool tulip_filter_address(TULIPState *s, const uint8_t *addr) +{ + static const char broadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + bool ret = false; + int i; + + for (i = 0; i < 16 && ret == false; i++) { + if (!memcmp(&s->filter[i], addr, ETH_ALEN)) { + ret = true; + } + } + + if (!memcmp(addr, broadcast, ETH_ALEN)) { + return true; + } + + if (s->csr[6] & (CSR6_PR | CSR6_RA)) { + /* Promiscuous mode enabled */ + s->rx_status |= RDES0_FF; + return true; + } + + if ((s->csr[6] & CSR6_PM) && (addr[0] & 1)) { + /* Pass all Multicast enabled */ + s->rx_status |= RDES0_MF; + return true; + } + + if (s->csr[6] & CSR6_IF) { + ret ^= true; + } + return ret; +} + +static ssize_t tulip_receive(TULIPState *s, const uint8_t *buf, size_t size) +{ + struct tulip_descriptor desc; + + trace_tulip_receive(buf, size); + + if (size < 14 || size > sizeof(s->rx_frame) - 4 + || s->rx_frame_len || tulip_rx_stopped(s)) { + return 0; + } + + if (!tulip_filter_address(s, buf)) { + return size; + } + + do { + tulip_desc_read(s, s->current_rx_desc, &desc); + tulip_dump_rx_descriptor(s, &desc); + + if (!(desc.status & RDES0_OWN)) { + s->csr[5] |= CSR5_RU; + tulip_update_int(s); + return s->rx_frame_size - s->rx_frame_len; + } + desc.status = 0; + + if (!s->rx_frame_len) { + s->rx_frame_size = size + 4; + s->rx_status = RDES0_LS | + ((s->rx_frame_size & RDES0_FL_MASK) << RDES0_FL_SHIFT); + desc.status |= RDES0_FS; + memcpy(s->rx_frame, buf, size); + s->rx_frame_len = s->rx_frame_size; + } + + tulip_copy_rx_bytes(s, &desc); + + if (!s->rx_frame_len) { + desc.status |= s->rx_status; + s->csr[5] |= CSR5_RI; + tulip_update_int(s); + } + tulip_dump_rx_descriptor(s, &desc); + tulip_desc_write(s, s->current_rx_desc, &desc); + tulip_next_rx_descriptor(s, &desc); + } while (s->rx_frame_len); + return size; +} + +static ssize_t tulip_receive_nc(NetClientState *nc, + const uint8_t *buf, size_t size) +{ + return tulip_receive(qemu_get_nic_opaque(nc), buf, size); +} + +static NetClientInfo net_tulip_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = tulip_receive_nc, +}; + +static const char *tulip_reg_name(const hwaddr addr) +{ + switch (addr) { + case CSR(0): + return "CSR0"; + + case CSR(1): + return "CSR1"; + + case CSR(2): + return "CSR2"; + + case CSR(3): + return "CSR3"; + + case CSR(4): + return "CSR4"; + + case CSR(5): + return "CSR5"; + + case CSR(6): + return "CSR6"; + + case CSR(7): + return "CSR7"; + + case CSR(8): + return "CSR8"; + + case CSR(9): + return "CSR9"; + + case CSR(10): + return "CSR10"; + + case CSR(11): + return "CSR11"; + + case CSR(12): + return "CSR12"; + + case CSR(13): + return "CSR13"; + + case CSR(14): + return "CSR14"; + + case CSR(15): + return "CSR15"; + + default: + break; + } + return ""; +} + +static const char *tulip_rx_state_name(int state) +{ + switch (state) { + case CSR5_RS_STOPPED: + return "STOPPED"; + + case CSR5_RS_RUNNING_FETCH: + return "RUNNING/FETCH"; + + case CSR5_RS_RUNNING_CHECK_EOR: + return "RUNNING/CHECK EOR"; + + case CSR5_RS_RUNNING_WAIT_RECEIVE: + return "WAIT RECEIVE"; + + case CSR5_RS_SUSPENDED: + return "SUSPENDED"; + + case CSR5_RS_RUNNING_CLOSE: + return "RUNNING/CLOSE"; + + case CSR5_RS_RUNNING_FLUSH: + return "RUNNING/FLUSH"; + + case CSR5_RS_RUNNING_QUEUE: + return "RUNNING/QUEUE"; + + default: + break; + } + return ""; +} + +static const char *tulip_tx_state_name(int state) +{ + switch (state) { + case CSR5_TS_STOPPED: + return "STOPPED"; + + case CSR5_TS_RUNNING_FETCH: + return "RUNNING/FETCH"; + + case CSR5_TS_RUNNING_WAIT_EOT: + return "RUNNING/WAIT EOT"; + + case CSR5_TS_RUNNING_READ_BUF: + return "RUNNING/READ BUF"; + + case CSR5_TS_RUNNING_SETUP: + return "RUNNING/SETUP"; + + case CSR5_TS_SUSPENDED: + return "SUSPENDED"; + + case CSR5_TS_RUNNING_CLOSE: + return "RUNNING/CLOSE"; + + default: + break; + } + return ""; +} + +static void tulip_update_rs(TULIPState *s, int state) +{ + s->csr[5] &= ~(CSR5_RS_MASK << CSR5_RS_SHIFT); + s->csr[5] |= (state & CSR5_RS_MASK) << CSR5_RS_SHIFT; + trace_tulip_rx_state(tulip_rx_state_name(state)); +} + +static uint16_t tulip_mdi_default[] = { + /* MDI Registers 0 - 6, 7 */ + 0x3100, 0xf02c, 0x7810, 0x0000, 0x0501, 0x4181, 0x0000, 0x0000, + /* MDI Registers 8 - 15 */ + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* MDI Registers 16 - 31 */ + 0x0003, 0x0000, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, +}; + +/* Readonly mask for MDI (PHY) registers */ +static const uint16_t tulip_mdi_mask[] = { + 0x0000, 0xffff, 0xffff, 0xffff, 0xc01f, 0xffff, 0xffff, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0fff, 0x0000, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, +}; + +static uint16_t tulip_mii_read(TULIPState *s, int phy, int reg) +{ + uint16_t ret = 0; + if (phy == 1) { + ret = tulip_mdi_default[reg]; + } + trace_tulip_mii_read(phy, reg, ret); + return ret; +} + +static void tulip_mii_write(TULIPState *s, int phy, int reg, uint16_t data) +{ + trace_tulip_mii_write(phy, reg, data); + + if (phy != 1) { + return; + } + + tulip_mdi_default[reg] &= ~tulip_mdi_mask[reg]; + tulip_mdi_default[reg] |= (data & tulip_mdi_mask[reg]); +} + +static void tulip_mii(TULIPState *s) +{ + uint32_t changed = s->old_csr9 ^ s->csr[9]; + uint16_t data; + int op, phy, reg; + + if (!(changed & CSR9_MDC)) { + return; + } + + if (!(s->csr[9] & CSR9_MDC)) { + return; + } + + s->mii_bitcnt++; + s->mii_word <<= 1; + + if (s->csr[9] & CSR9_MDO && (s->mii_bitcnt < 16 || + !(s->csr[9] & CSR9_MII))) { + /* write op or address bits */ + s->mii_word |= 1; + } + + if (s->mii_bitcnt >= 16 && (s->csr[9] & CSR9_MII)) { + if (s->mii_word & 0x8000) { + s->csr[9] |= CSR9_MDI; + } else { + s->csr[9] &= ~CSR9_MDI; + } + } + + if (s->mii_word == 0xffffffff) { + s->mii_bitcnt = 0; + } else if (s->mii_bitcnt == 16) { + op = (s->mii_word >> 12) & 0x0f; + phy = (s->mii_word >> 7) & 0x1f; + reg = (s->mii_word >> 2) & 0x1f; + + if (op == 6) { + s->mii_word = tulip_mii_read(s, phy, reg); + } + } else if (s->mii_bitcnt == 32) { + op = (s->mii_word >> 28) & 0x0f; + phy = (s->mii_word >> 23) & 0x1f; + reg = (s->mii_word >> 18) & 0x1f; + data = s->mii_word & 0xffff; + + if (op == 5) { + tulip_mii_write(s, phy, reg, data); + } + } +} + +static uint32_t tulip_csr9_read(TULIPState *s) +{ + if (s->csr[9] & CSR9_SR) { + if (eeprom93xx_read(s->eeprom)) { + s->csr[9] |= CSR9_SR_DO; + } else { + s->csr[9] &= ~CSR9_SR_DO; + } + } + + tulip_mii(s); + return s->csr[9]; +} + +static void tulip_update_ts(TULIPState *s, int state) +{ + s->csr[5] &= ~(CSR5_TS_MASK << CSR5_TS_SHIFT); + s->csr[5] |= (state & CSR5_TS_MASK) << CSR5_TS_SHIFT; + trace_tulip_tx_state(tulip_tx_state_name(state)); +} + +static uint64_t tulip_read(void *opaque, hwaddr addr, + unsigned size) +{ + TULIPState *s = opaque; + uint64_t data = 0; + + switch (addr) { + case CSR(9): + data = tulip_csr9_read(s); + break; + + case CSR(12): + /* Fake autocompletion complete until we have PHY emulation */ + data = 5 << CSR12_ANS_SHIFT; + break; + + default: + if (addr & 7) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: read access at unknown address" + " 0x%"PRIx64"\n", __func__, addr); + } else { + data = s->csr[addr >> 3]; + } + break; + } + trace_tulip_reg_read(addr, tulip_reg_name(addr), size, data); + return data; +} + +static void tulip_tx(TULIPState *s, struct tulip_descriptor *desc) +{ + if (s->tx_frame_len) { + if ((s->csr[6] >> CSR6_OM_SHIFT) & CSR6_OM_MASK) { + /* Internal or external Loopback */ + tulip_receive(s, s->tx_frame, s->tx_frame_len); + } else if (s->tx_frame_len <= sizeof(s->tx_frame)) { + qemu_send_packet(qemu_get_queue(s->nic), + s->tx_frame, s->tx_frame_len); + } + } + + if (desc->control & TDES1_IC) { + s->csr[5] |= CSR5_TI; + tulip_update_int(s); + } +} + +static int tulip_copy_tx_buffers(TULIPState *s, struct tulip_descriptor *desc) +{ + int len1 = (desc->control >> TDES1_BUF1_SIZE_SHIFT) & TDES1_BUF1_SIZE_MASK; + int len2 = (desc->control >> TDES1_BUF2_SIZE_SHIFT) & TDES1_BUF2_SIZE_MASK; + + if (s->tx_frame_len + len1 > sizeof(s->tx_frame)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: descriptor overflow (ofs: %u, len:%d, size:%zu)\n", + __func__, s->tx_frame_len, len1, sizeof(s->tx_frame)); + return -1; + } + if (len1) { + pci_dma_read(&s->dev, desc->buf_addr1, + s->tx_frame + s->tx_frame_len, len1); + s->tx_frame_len += len1; + } + + if (s->tx_frame_len + len2 > sizeof(s->tx_frame)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: descriptor overflow (ofs: %u, len:%d, size:%zu)\n", + __func__, s->tx_frame_len, len2, sizeof(s->tx_frame)); + return -1; + } + if (len2) { + pci_dma_read(&s->dev, desc->buf_addr2, + s->tx_frame + s->tx_frame_len, len2); + s->tx_frame_len += len2; + } + desc->status = (len1 + len2) ? 0 : 0x7fffffff; + + return 0; +} + +static void tulip_setup_filter_addr(TULIPState *s, uint8_t *buf, int n) +{ + int offset = n * 12; + + s->filter[n][0] = buf[offset]; + s->filter[n][1] = buf[offset + 1]; + + s->filter[n][2] = buf[offset + 4]; + s->filter[n][3] = buf[offset + 5]; + + s->filter[n][4] = buf[offset + 8]; + s->filter[n][5] = buf[offset + 9]; + + trace_tulip_setup_filter(n, s->filter[n][5], s->filter[n][4], + s->filter[n][3], s->filter[n][2], s->filter[n][1], s->filter[n][0]); +} + +static void tulip_setup_frame(TULIPState *s, + struct tulip_descriptor *desc) +{ + uint8_t buf[4096]; + int len = (desc->control >> TDES1_BUF1_SIZE_SHIFT) & TDES1_BUF1_SIZE_MASK; + int i; + + trace_tulip_setup_frame(); + + if (len == 192) { + pci_dma_read(&s->dev, desc->buf_addr1, buf, len); + for (i = 0; i < 16; i++) { + tulip_setup_filter_addr(s, buf, i); + } + } + + desc->status = 0x7fffffff; + + if (desc->control & TDES1_IC) { + s->csr[5] |= CSR5_TI; + tulip_update_int(s); + } +} + +static void tulip_next_tx_descriptor(TULIPState *s, + struct tulip_descriptor *desc) +{ + if (desc->control & TDES1_TER) { + s->current_tx_desc = s->csr[4]; + } else if (desc->control & TDES1_TCH) { + s->current_tx_desc = desc->buf_addr2; + } else { + s->current_tx_desc += sizeof(struct tulip_descriptor) + + (((s->csr[0] >> CSR0_DSL_SHIFT) & CSR0_DSL_MASK) << 2); + } + s->current_tx_desc &= ~3ULL; +} + +static uint32_t tulip_ts(TULIPState *s) +{ + return (s->csr[5] >> CSR5_TS_SHIFT) & CSR5_TS_MASK; +} + +static void tulip_xmit_list_update(TULIPState *s) +{ +#define TULIP_DESC_MAX 128 + uint8_t i = 0; + struct tulip_descriptor desc; + + if (tulip_ts(s) != CSR5_TS_SUSPENDED) { + return; + } + + for (i = 0; i < TULIP_DESC_MAX; i++) { + tulip_desc_read(s, s->current_tx_desc, &desc); + tulip_dump_tx_descriptor(s, &desc); + + if (!(desc.status & TDES0_OWN)) { + tulip_update_ts(s, CSR5_TS_SUSPENDED); + s->csr[5] |= CSR5_TU; + tulip_update_int(s); + return; + } + + if (desc.control & TDES1_SET) { + tulip_setup_frame(s, &desc); + } else { + if (desc.control & TDES1_FS) { + s->tx_frame_len = 0; + } + + if (!tulip_copy_tx_buffers(s, &desc)) { + if (desc.control & TDES1_LS) { + tulip_tx(s, &desc); + } + } + } + tulip_desc_write(s, s->current_tx_desc, &desc); + tulip_next_tx_descriptor(s, &desc); + } +} + +static void tulip_csr9_write(TULIPState *s, uint32_t old_val, + uint32_t new_val) +{ + if (new_val & CSR9_SR) { + eeprom93xx_write(s->eeprom, + !!(new_val & CSR9_SR_CS), + !!(new_val & CSR9_SR_SK), + !!(new_val & CSR9_SR_DI)); + } +} + +static void tulip_reset(TULIPState *s) +{ + trace_tulip_reset(); + + s->csr[0] = 0xfe000000; + s->csr[1] = 0xffffffff; + s->csr[2] = 0xffffffff; + s->csr[5] = 0xf0000000; + s->csr[6] = 0x32000040; + s->csr[7] = 0xf3fe0000; + s->csr[8] = 0xe0000000; + s->csr[9] = 0xfff483ff; + s->csr[11] = 0xfffe0000; + s->csr[12] = 0x000000c6; + s->csr[13] = 0xffff0000; + s->csr[14] = 0xffffffff; + s->csr[15] = 0x8ff00000; +} + +static void tulip_qdev_reset(DeviceState *dev) +{ + PCIDevice *d = PCI_DEVICE(dev); + TULIPState *s = TULIP(d); + + tulip_reset(s); +} + +static void tulip_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + TULIPState *s = opaque; + trace_tulip_reg_write(addr, tulip_reg_name(addr), size, data); + + switch (addr) { + case CSR(0): + s->csr[0] = data; + if (data & CSR0_SWR) { + tulip_reset(s); + tulip_update_int(s); + } + break; + + case CSR(1): + tulip_xmit_list_update(s); + break; + + case CSR(2): + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + break; + + case CSR(3): + s->csr[3] = data & ~3ULL; + s->current_rx_desc = s->csr[3]; + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + break; + + case CSR(4): + s->csr[4] = data & ~3ULL; + s->current_tx_desc = s->csr[4]; + tulip_xmit_list_update(s); + break; + + case CSR(5): + /* Status register, write clears bit */ + s->csr[5] &= ~(data & (CSR5_TI | CSR5_TPS | CSR5_TU | CSR5_TJT | + CSR5_LNP_ANC | CSR5_UNF | CSR5_RI | CSR5_RU | + CSR5_RPS | CSR5_RWT | CSR5_ETI | CSR5_GTE | + CSR5_LNF | CSR5_FBE | CSR5_ERI | CSR5_AIS | + CSR5_NIS | CSR5_GPI | CSR5_LC)); + tulip_update_int(s); + break; + + case CSR(6): + s->csr[6] = data; + if (s->csr[6] & CSR6_SR) { + tulip_update_rs(s, CSR5_RS_RUNNING_WAIT_RECEIVE); + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } else { + tulip_update_rs(s, CSR5_RS_STOPPED); + } + + if (s->csr[6] & CSR6_ST) { + tulip_update_ts(s, CSR5_TS_SUSPENDED); + tulip_xmit_list_update(s); + } else { + tulip_update_ts(s, CSR5_TS_STOPPED); + } + break; + + case CSR(7): + s->csr[7] = data; + tulip_update_int(s); + break; + + case CSR(8): + s->csr[9] = data; + break; + + case CSR(9): + tulip_csr9_write(s, s->csr[9], data); + /* don't clear MII read data */ + s->csr[9] &= CSR9_MDI; + s->csr[9] |= (data & ~CSR9_MDI); + tulip_mii(s); + s->old_csr9 = s->csr[9]; + break; + + case CSR(10): + s->csr[10] = data; + break; + + case CSR(11): + s->csr[11] = data; + break; + + case CSR(12): + /* SIA Status register, some bits are cleared by writing 1 */ + s->csr[12] &= ~(data & (CSR12_MRA | CSR12_TRA | CSR12_ARA)); + break; + + case CSR(13): + s->csr[13] = data; + break; + + case CSR(14): + s->csr[14] = data; + break; + + case CSR(15): + s->csr[15] = data; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: write to CSR at unknown address " + "0x%"PRIx64"\n", __func__, addr); + break; + } +} + +static const MemoryRegionOps tulip_ops = { + .read = tulip_read, + .write = tulip_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void tulip_idblock_crc(TULIPState *s, uint16_t *srom) +{ + int word, n; + int bit; + unsigned char bitval, crc; + const int len = 9; + n = 0; + crc = -1; + + for (word = 0; word < len; word++) { + for (bit = 15; bit >= 0; bit--) { + if ((word == (len - 1)) && (bit == 7)) { + /* + * Insert the correct CRC result into input data stream + * in place. + */ + srom[len - 1] = (srom[len - 1] & 0xff00) | (unsigned short)crc; + break; + } + n++; + bitval = ((srom[word] >> bit) & 1) ^ ((crc >> 7) & 1); + crc = crc << 1; + if (bitval == 1) { + crc ^= 6; + crc |= 0x01; + } + } + } +} + +static uint16_t tulip_srom_crc(TULIPState *s, uint8_t *eeprom, size_t len) +{ + unsigned long crc = 0xffffffff; + unsigned long flippedcrc = 0; + unsigned char currentbyte; + unsigned int msb, bit, i; + + for (i = 0; i < len; i++) { + currentbyte = eeprom[i]; + for (bit = 0; bit < 8; bit++) { + msb = (crc >> 31) & 1; + crc <<= 1; + if (msb ^ (currentbyte & 1)) { + crc ^= 0x04c11db6; + crc |= 0x00000001; + } + currentbyte >>= 1; + } + } + + for (i = 0; i < 32; i++) { + flippedcrc <<= 1; + bit = crc & 1; + crc >>= 1; + flippedcrc += bit; + } + return (flippedcrc ^ 0xffffffff) & 0xffff; +} + +static const uint8_t eeprom_default[128] = { + 0x3c, 0x10, 0x4f, 0x10, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x56, 0x08, 0x04, 0x01, 0x00, 0x80, 0x48, 0xb3, + 0x0e, 0xa7, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x08, + 0x01, 0x8d, 0x03, 0x00, 0x00, 0x00, 0x00, 0x78, + 0xe0, 0x01, 0x00, 0x50, 0x00, 0x18, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0x6b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, + 0x48, 0xb3, 0x0e, 0xa7, 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static void tulip_fill_eeprom(TULIPState *s) +{ + uint16_t *eeprom = eeprom93xx_data(s->eeprom); + memcpy(eeprom, eeprom_default, 128); + + /* patch in our mac address */ + eeprom[10] = cpu_to_le16(s->c.macaddr.a[0] | (s->c.macaddr.a[1] << 8)); + eeprom[11] = cpu_to_le16(s->c.macaddr.a[2] | (s->c.macaddr.a[3] << 8)); + eeprom[12] = cpu_to_le16(s->c.macaddr.a[4] | (s->c.macaddr.a[5] << 8)); + tulip_idblock_crc(s, eeprom); + eeprom[63] = cpu_to_le16(tulip_srom_crc(s, (uint8_t *)eeprom, 126)); +} + +static void pci_tulip_realize(PCIDevice *pci_dev, Error **errp) +{ + TULIPState *s = DO_UPCAST(TULIPState, dev, pci_dev); + uint8_t *pci_conf; + + pci_conf = s->dev.config; + pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ + + s->eeprom = eeprom93xx_new(&pci_dev->qdev, 64); + tulip_fill_eeprom(s); + + memory_region_init_io(&s->io, OBJECT(&s->dev), &tulip_ops, s, + "tulip-io", 128); + + memory_region_init_io(&s->memory, OBJECT(&s->dev), &tulip_ops, s, + "tulip-mem", 128); + + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io); + pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->memory); + + s->irq = pci_allocate_irq(&s->dev); + + qemu_macaddr_default_if_unset(&s->c.macaddr); + + s->nic = qemu_new_nic(&net_tulip_info, &s->c, + object_get_typename(OBJECT(pci_dev)), + pci_dev->qdev.id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->c.macaddr.a); +} + +static void pci_tulip_exit(PCIDevice *pci_dev) +{ + TULIPState *s = DO_UPCAST(TULIPState, dev, pci_dev); + + qemu_del_nic(s->nic); + qemu_free_irq(s->irq); + eeprom93xx_free(&pci_dev->qdev, s->eeprom); +} + +static void tulip_instance_init(Object *obj) +{ + PCIDevice *pci_dev = PCI_DEVICE(obj); + TULIPState *d = DO_UPCAST(TULIPState, dev, pci_dev); + + device_add_bootindex_property(obj, &d->c.bootindex, + "bootindex", "/ethernet-phy@0", + &pci_dev->qdev); +} + +static Property tulip_properties[] = { + DEFINE_NIC_PROPERTIES(TULIPState, c), + DEFINE_PROP_END_OF_LIST(), +}; + +static void tulip_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_tulip_realize; + k->exit = pci_tulip_exit; + k->vendor_id = PCI_VENDOR_ID_DEC; + k->device_id = PCI_DEVICE_ID_DEC_21143; + k->subsystem_vendor_id = 0x103c; + k->subsystem_id = 0x104f; + k->class_id = PCI_CLASS_NETWORK_ETHERNET; + dc->vmsd = &vmstate_pci_tulip; + device_class_set_props(dc, tulip_properties); + dc->reset = tulip_qdev_reset; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static const TypeInfo tulip_info = { + .name = TYPE_TULIP, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(TULIPState), + .class_init = tulip_class_init, + .instance_init = tulip_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void tulip_register_types(void) +{ + type_register_static(&tulip_info); +} + +type_init(tulip_register_types) diff --git a/hw/net/tulip.h b/hw/net/tulip.h new file mode 100644 index 000000000..ffd1f88d5 --- /dev/null +++ b/hw/net/tulip.h @@ -0,0 +1,268 @@ +#ifndef HW_TULIP_H +#define HW_TULIP_H + +#include "qemu/units.h" +#include "net/net.h" +#include "qom/object.h" + +#define TYPE_TULIP "tulip" +OBJECT_DECLARE_SIMPLE_TYPE(TULIPState, TULIP) + +#define CSR(_x) ((_x) << 3) + +#define CSR0_SWR BIT(0) +#define CSR0_BAR BIT(1) +#define CSR0_DSL_SHIFT 2 +#define CSR0_DSL_MASK 0x1f +#define CSR0_BLE BIT(7) +#define CSR0_PBL_SHIFT 8 +#define CSR0_PBL_MASK 0x3f +#define CSR0_CAC_SHIFT 14 +#define CSR0_CAC_MASK 0x3 +#define CSR0_DAS 0x10000 +#define CSR0_TAP_SHIFT 17 +#define CSR0_TAP_MASK 0x7 +#define CSR0_DBO 0x100000 +#define CSR1_TPD 0x01 +#define CSR0_RLE BIT(23) +#define CSR0_WIE BIT(24) + +#define CSR2_RPD 0x01 + +#define CSR5_TI BIT(0) +#define CSR5_TPS BIT(1) +#define CSR5_TU BIT(2) +#define CSR5_TJT BIT(3) +#define CSR5_LNP_ANC BIT(4) +#define CSR5_UNF BIT(5) +#define CSR5_RI BIT(6) +#define CSR5_RU BIT(7) +#define CSR5_RPS BIT(8) +#define CSR5_RWT BIT(9) +#define CSR5_ETI BIT(10) +#define CSR5_GTE BIT(11) +#define CSR5_LNF BIT(12) +#define CSR5_FBE BIT(13) +#define CSR5_ERI BIT(14) +#define CSR5_AIS BIT(15) +#define CSR5_NIS BIT(16) +#define CSR5_RS_SHIFT 17 +#define CSR5_RS_MASK 7 +#define CSR5_TS_SHIFT 20 +#define CSR5_TS_MASK 7 + +#define CSR5_TS_STOPPED 0 +#define CSR5_TS_RUNNING_FETCH 1 +#define CSR5_TS_RUNNING_WAIT_EOT 2 +#define CSR5_TS_RUNNING_READ_BUF 3 +#define CSR5_TS_RUNNING_SETUP 5 +#define CSR5_TS_SUSPENDED 6 +#define CSR5_TS_RUNNING_CLOSE 7 + +#define CSR5_RS_STOPPED 0 +#define CSR5_RS_RUNNING_FETCH 1 +#define CSR5_RS_RUNNING_CHECK_EOR 2 +#define CSR5_RS_RUNNING_WAIT_RECEIVE 3 +#define CSR5_RS_SUSPENDED 4 +#define CSR5_RS_RUNNING_CLOSE 5 +#define CSR5_RS_RUNNING_FLUSH 6 +#define CSR5_RS_RUNNING_QUEUE 7 + +#define CSR5_EB_SHIFT 23 +#define CSR5_EB_MASK 7 + +#define CSR5_GPI BIT(26) +#define CSR5_LC BIT(27) + +#define CSR6_HP BIT(0) +#define CSR6_SR BIT(1) +#define CSR6_HO BIT(2) +#define CSR6_PB BIT(3) +#define CSR6_IF BIT(4) +#define CSR6_SB BIT(5) +#define CSR6_PR BIT(6) +#define CSR6_PM BIT(7) +#define CSR6_FKD BIT(8) +#define CSR6_FD BIT(9) + +#define CSR6_OM_SHIFT 10 +#define CSR6_OM_MASK 3 +#define CSR6_OM_NORMAL 0 +#define CSR6_OM_INT_LOOPBACK 1 +#define CSR6_OM_EXT_LOOPBACK 2 + +#define CSR6_FC BIT(12) +#define CSR6_ST BIT(13) + + +#define CSR6_TR_SHIFT 14 +#define CSR6_TR_MASK 3 +#define CSR6_TR_72 0 +#define CSR6_TR_96 1 +#define CSR6_TR_128 2 +#define CSR6_TR_160 3 + +#define CSR6_CA BIT(17) +#define CSR6_RA BIT(30) +#define CSR6_SC BIT(31) + +#define CSR7_TIM BIT(0) +#define CSR7_TSM BIT(1) +#define CSR7_TUM BIT(2) +#define CSR7_TJM BIT(3) +#define CSR7_LPM BIT(4) +#define CSR7_UNM BIT(5) +#define CSR7_RIM BIT(6) +#define CSR7_RUM BIT(7) +#define CSR7_RSM BIT(8) +#define CSR7_RWM BIT(9) +#define CSR7_TMM BIT(11) +#define CSR7_LFM BIT(12) +#define CSR7_SEM BIT(13) +#define CSR7_ERM BIT(14) +#define CSR7_AIM BIT(15) +#define CSR7_NIM BIT(16) + +#define CSR8_MISSED_FRAME_OVL BIT(16) +#define CSR8_MISSED_FRAME_CNT_MASK 0xffff + +#define CSR9_DATA_MASK 0xff +#define CSR9_SR_CS BIT(0) +#define CSR9_SR_SK BIT(1) +#define CSR9_SR_DI BIT(2) +#define CSR9_SR_DO BIT(3) +#define CSR9_REG BIT(10) +#define CSR9_SR BIT(11) +#define CSR9_BR BIT(12) +#define CSR9_WR BIT(13) +#define CSR9_RD BIT(14) +#define CSR9_MOD BIT(15) +#define CSR9_MDC BIT(16) +#define CSR9_MDO BIT(17) +#define CSR9_MII BIT(18) +#define CSR9_MDI BIT(19) + +#define CSR11_CON BIT(16) +#define CSR11_TIMER_MASK 0xffff + +#define CSR12_MRA BIT(0) +#define CSR12_LS100 BIT(1) +#define CSR12_LS10 BIT(2) +#define CSR12_APS BIT(3) +#define CSR12_ARA BIT(8) +#define CSR12_TRA BIT(9) +#define CSR12_NSN BIT(10) +#define CSR12_TRF BIT(11) +#define CSR12_ANS_SHIFT 12 +#define CSR12_ANS_MASK 7 +#define CSR12_LPN BIT(15) +#define CSR12_LPC_SHIFT 16 +#define CSR12_LPC_MASK 0xffff + +#define CSR13_SRL BIT(0) +#define CSR13_CAC BIT(2) +#define CSR13_AUI BIT(3) +#define CSR13_SDM_SHIFT 4 +#define CSR13_SDM_MASK 0xfff + +#define CSR14_ECEN BIT(0) +#define CSR14_LBK BIT(1) +#define CSR14_DREN BIT(2) +#define CSR14_LSE BIT(3) +#define CSR14_CPEN_SHIFT 4 +#define CSR14_CPEN_MASK 3 +#define CSR14_MBO BIT(6) +#define CSR14_ANE BIT(7) +#define CSR14_RSQ BIT(8) +#define CSR14_CSQ BIT(9) +#define CSR14_CLD BIT(10) +#define CSR14_SQE BIT(11) +#define CSR14_LTE BIT(12) +#define CSR14_APE BIT(13) +#define CSR14_SPP BIT(14) +#define CSR14_TAS BIT(15) + +#define CSR15_JBD BIT(0) +#define CSR15_HUJ BIT(1) +#define CSR15_JCK BIT(2) +#define CSR15_ABM BIT(3) +#define CSR15_RWD BIT(4) +#define CSR15_RWR BIT(5) +#define CSR15_LE1 BIT(6) +#define CSR15_LV1 BIT(7) +#define CSR15_TSCK BIT(8) +#define CSR15_FUSQ BIT(9) +#define CSR15_FLF BIT(10) +#define CSR15_LSD BIT(11) +#define CSR15_DPST BIT(12) +#define CSR15_FRL BIT(13) +#define CSR15_LE2 BIT(14) +#define CSR15_LV2 BIT(15) + +#define RDES0_OF BIT(0) +#define RDES0_CE BIT(1) +#define RDES0_DB BIT(2) +#define RDES0_RJ BIT(4) +#define RDES0_FT BIT(5) +#define RDES0_CS BIT(6) +#define RDES0_TL BIT(7) +#define RDES0_LS BIT(8) +#define RDES0_FS BIT(9) +#define RDES0_MF BIT(10) +#define RDES0_RF BIT(11) +#define RDES0_DT_SHIFT 12 +#define RDES0_DT_MASK 3 +#define RDES0_DE BIT(14) +#define RDES0_ES BIT(15) +#define RDES0_FL_SHIFT 16 +#define RDES0_FL_MASK 0x3fff +#define RDES0_FF BIT(30) +#define RDES0_OWN BIT(31) + +#define RDES1_BUF1_SIZE_SHIFT 0 +#define RDES1_BUF1_SIZE_MASK 0x7ff + +#define RDES1_BUF2_SIZE_SHIFT 11 +#define RDES1_BUF2_SIZE_MASK 0x7ff +#define RDES1_RCH BIT(24) +#define RDES1_RER BIT(25) + +#define TDES0_DE BIT(0) +#define TDES0_UF BIT(1) +#define TDES0_LF BIT(2) +#define TDES0_CC_SHIFT 3 +#define TDES0_CC_MASK 0xf +#define TDES0_HF BIT(7) +#define TDES0_EC BIT(8) +#define TDES0_LC BIT(9) +#define TDES0_NC BIT(10) +#define TDES0_LO BIT(11) +#define TDES0_TO BIT(14) +#define TDES0_ES BIT(15) +#define TDES0_OWN BIT(31) + +#define TDES1_BUF1_SIZE_SHIFT 0 +#define TDES1_BUF1_SIZE_MASK 0x7ff + +#define TDES1_BUF2_SIZE_SHIFT 11 +#define TDES1_BUF2_SIZE_MASK 0x7ff + +#define TDES1_FT0 BIT(22) +#define TDES1_DPD BIT(23) +#define TDES1_TCH BIT(24) +#define TDES1_TER BIT(25) +#define TDES1_AC BIT(26) +#define TDES1_SET BIT(27) +#define TDES1_FT1 BIT(28) +#define TDES1_FS BIT(29) +#define TDES1_LS BIT(30) +#define TDES1_IC BIT(31) + +struct tulip_descriptor { + uint32_t status; + uint32_t control; + uint32_t buf_addr1; + uint32_t buf_addr2; +}; + +#endif diff --git a/hw/net/vhost_net-stub.c b/hw/net/vhost_net-stub.c new file mode 100644 index 000000000..89d71cfb8 --- /dev/null +++ b/hw/net/vhost_net-stub.c @@ -0,0 +1,103 @@ +/* + * vhost-net support + * + * Copyright Red Hat, Inc. 2010 + * + * Authors: + * Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "net/net.h" +#include "net/tap.h" +#include "net/vhost-user.h" + +#include "hw/virtio/virtio-net.h" +#include "net/vhost_net.h" +#include "qemu/error-report.h" + + +uint64_t vhost_net_get_max_queues(VHostNetState *net) +{ + return 1; +} + +struct vhost_net *vhost_net_init(VhostNetOptions *options) +{ + error_report("vhost-net support is not compiled in"); + return NULL; +} + +int vhost_net_start(VirtIODevice *dev, + NetClientState *ncs, + int data_queue_pairs, int cvq) +{ + return -ENOSYS; +} +void vhost_net_stop(VirtIODevice *dev, + NetClientState *ncs, + int data_queue_pairs, int cvq) +{ +} + +void vhost_net_cleanup(struct vhost_net *net) +{ +} + +uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features) +{ + return features; +} + +int vhost_net_get_config(struct vhost_net *net, uint8_t *config, + uint32_t config_len) +{ + return 0; +} +int vhost_net_set_config(struct vhost_net *net, const uint8_t *data, + uint32_t offset, uint32_t size, uint32_t flags) +{ + return 0; +} + +void vhost_net_ack_features(struct vhost_net *net, uint64_t features) +{ +} + +uint64_t vhost_net_get_acked_features(VHostNetState *net) +{ + return 0; +} + +bool vhost_net_virtqueue_pending(VHostNetState *net, int idx) +{ + return false; +} + +void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev, + int idx, bool mask) +{ +} + +int vhost_net_notify_migration_done(struct vhost_net *net, char* mac_addr) +{ + return -1; +} + +VHostNetState *get_vhost_net(NetClientState *nc) +{ + return 0; +} + +int vhost_set_vring_enable(NetClientState *nc, int enable) +{ + return 0; +} + +int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu) +{ + return 0; +} diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c new file mode 100644 index 000000000..30379d2ca --- /dev/null +++ b/hw/net/vhost_net.c @@ -0,0 +1,514 @@ +/* + * vhost-net support + * + * Copyright Red Hat, Inc. 2010 + * + * Authors: + * Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "net/net.h" +#include "net/tap.h" +#include "net/vhost-user.h" +#include "net/vhost-vdpa.h" + +#include "standard-headers/linux/vhost_types.h" +#include "hw/virtio/virtio-net.h" +#include "net/vhost_net.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" + +#include +#include +#include + + +#include "standard-headers/linux/virtio_ring.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/virtio-bus.h" + + +/* Features supported by host kernel. */ +static const int kernel_feature_bits[] = { + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + VIRTIO_NET_F_MRG_RXBUF, + VIRTIO_F_VERSION_1, + VIRTIO_NET_F_MTU, + VIRTIO_F_IOMMU_PLATFORM, + VIRTIO_F_RING_PACKED, + VIRTIO_NET_F_HASH_REPORT, + VHOST_INVALID_FEATURE_BIT +}; + +/* Features supported by others. */ +static const int user_feature_bits[] = { + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + + VIRTIO_F_ANY_LAYOUT, + VIRTIO_F_VERSION_1, + VIRTIO_NET_F_CSUM, + VIRTIO_NET_F_GUEST_CSUM, + VIRTIO_NET_F_GSO, + VIRTIO_NET_F_GUEST_TSO4, + VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_ECN, + VIRTIO_NET_F_GUEST_UFO, + VIRTIO_NET_F_HOST_TSO4, + VIRTIO_NET_F_HOST_TSO6, + VIRTIO_NET_F_HOST_ECN, + VIRTIO_NET_F_HOST_UFO, + VIRTIO_NET_F_MRG_RXBUF, + VIRTIO_NET_F_MTU, + VIRTIO_F_IOMMU_PLATFORM, + VIRTIO_F_RING_PACKED, + VIRTIO_NET_F_RSS, + VIRTIO_NET_F_HASH_REPORT, + + /* This bit implies RARP isn't sent by QEMU out of band */ + VIRTIO_NET_F_GUEST_ANNOUNCE, + + VIRTIO_NET_F_MQ, + + VHOST_INVALID_FEATURE_BIT +}; + +static const int *vhost_net_get_feature_bits(struct vhost_net *net) +{ + const int *feature_bits = 0; + + switch (net->nc->info->type) { + case NET_CLIENT_DRIVER_TAP: + feature_bits = kernel_feature_bits; + break; + case NET_CLIENT_DRIVER_VHOST_USER: + feature_bits = user_feature_bits; + break; +#ifdef CONFIG_VHOST_NET_VDPA + case NET_CLIENT_DRIVER_VHOST_VDPA: + feature_bits = vdpa_feature_bits; + break; +#endif + default: + error_report("Feature bits not defined for this type: %d", + net->nc->info->type); + break; + } + + return feature_bits; +} + +uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features) +{ + return vhost_get_features(&net->dev, vhost_net_get_feature_bits(net), + features); +} +int vhost_net_get_config(struct vhost_net *net, uint8_t *config, + uint32_t config_len) +{ + return vhost_dev_get_config(&net->dev, config, config_len, NULL); +} +int vhost_net_set_config(struct vhost_net *net, const uint8_t *data, + uint32_t offset, uint32_t size, uint32_t flags) +{ + return vhost_dev_set_config(&net->dev, data, offset, size, flags); +} + +void vhost_net_ack_features(struct vhost_net *net, uint64_t features) +{ + net->dev.acked_features = net->dev.backend_features; + vhost_ack_features(&net->dev, vhost_net_get_feature_bits(net), features); +} + +uint64_t vhost_net_get_max_queues(VHostNetState *net) +{ + return net->dev.max_queues; +} + +uint64_t vhost_net_get_acked_features(VHostNetState *net) +{ + return net->dev.acked_features; +} + +static int vhost_net_get_fd(NetClientState *backend) +{ + switch (backend->info->type) { + case NET_CLIENT_DRIVER_TAP: + return tap_get_fd(backend); + default: + fprintf(stderr, "vhost-net requires tap backend\n"); + return -ENOSYS; + } +} + +struct vhost_net *vhost_net_init(VhostNetOptions *options) +{ + int r; + bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL; + struct vhost_net *net = g_new0(struct vhost_net, 1); + uint64_t features = 0; + Error *local_err = NULL; + + if (!options->net_backend) { + fprintf(stderr, "vhost-net requires net backend to be setup\n"); + goto fail; + } + net->nc = options->net_backend; + net->dev.nvqs = options->nvqs; + + net->dev.max_queues = 1; + net->dev.vqs = net->vqs; + + if (backend_kernel) { + r = vhost_net_get_fd(options->net_backend); + if (r < 0) { + goto fail; + } + net->dev.backend_features = qemu_has_vnet_hdr(options->net_backend) + ? 0 : (1ULL << VHOST_NET_F_VIRTIO_NET_HDR); + net->backend = r; + net->dev.protocol_features = 0; + } else { + net->dev.backend_features = 0; + net->dev.protocol_features = 0; + net->backend = -1; + + /* vhost-user needs vq_index to initiate a specific queue pair */ + net->dev.vq_index = net->nc->queue_index * net->dev.nvqs; + } + + r = vhost_dev_init(&net->dev, options->opaque, + options->backend_type, options->busyloop_timeout, + &local_err); + if (r < 0) { + error_report_err(local_err); + goto fail; + } + if (backend_kernel) { + if (!qemu_has_vnet_hdr_len(options->net_backend, + sizeof(struct virtio_net_hdr_mrg_rxbuf))) { + net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF); + } + if (~net->dev.features & net->dev.backend_features) { + fprintf(stderr, "vhost lacks feature mask %" PRIu64 + " for backend\n", + (uint64_t)(~net->dev.features & net->dev.backend_features)); + goto fail; + } + } + + /* Set sane init value. Override when guest acks. */ +#ifdef CONFIG_VHOST_NET_USER + if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) { + features = vhost_user_get_acked_features(net->nc); + if (~net->dev.features & features) { + fprintf(stderr, "vhost lacks feature mask %" PRIu64 + " for backend\n", + (uint64_t)(~net->dev.features & features)); + goto fail; + } + } +#endif + + vhost_net_ack_features(net, features); + + return net; + +fail: + vhost_dev_cleanup(&net->dev); + g_free(net); + return NULL; +} + +static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index, + int vq_index_end) +{ + net->dev.vq_index = vq_index; + net->dev.vq_index_end = vq_index_end; +} + +static int vhost_net_start_one(struct vhost_net *net, + VirtIODevice *dev) +{ + struct vhost_vring_file file = { }; + int r; + + r = vhost_dev_enable_notifiers(&net->dev, dev); + if (r < 0) { + goto fail_notifiers; + } + + r = vhost_dev_start(&net->dev, dev); + if (r < 0) { + goto fail_start; + } + + if (net->nc->info->poll) { + net->nc->info->poll(net->nc, false); + } + + if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) { + qemu_set_fd_handler(net->backend, NULL, NULL, NULL); + file.fd = net->backend; + for (file.index = 0; file.index < net->dev.nvqs; ++file.index) { + if (!virtio_queue_enabled(dev, net->dev.vq_index + + file.index)) { + /* Queue might not be ready for start */ + continue; + } + r = vhost_net_set_backend(&net->dev, &file); + if (r < 0) { + r = -errno; + goto fail; + } + } + } + return 0; +fail: + file.fd = -1; + if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) { + while (file.index-- > 0) { + if (!virtio_queue_enabled(dev, net->dev.vq_index + + file.index)) { + /* Queue might not be ready for start */ + continue; + } + int r = vhost_net_set_backend(&net->dev, &file); + assert(r >= 0); + } + } + if (net->nc->info->poll) { + net->nc->info->poll(net->nc, true); + } + vhost_dev_stop(&net->dev, dev); +fail_start: + vhost_dev_disable_notifiers(&net->dev, dev); +fail_notifiers: + return r; +} + +static void vhost_net_stop_one(struct vhost_net *net, + VirtIODevice *dev) +{ + struct vhost_vring_file file = { .fd = -1 }; + + if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) { + for (file.index = 0; file.index < net->dev.nvqs; ++file.index) { + int r = vhost_net_set_backend(&net->dev, &file); + assert(r >= 0); + } + } + if (net->nc->info->poll) { + net->nc->info->poll(net->nc, true); + } + vhost_dev_stop(&net->dev, dev); + vhost_dev_disable_notifiers(&net->dev, dev); +} + +int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, + int data_queue_pairs, int cvq) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); + VirtioBusState *vbus = VIRTIO_BUS(qbus); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); + int total_notifiers = data_queue_pairs * 2 + cvq; + VirtIONet *n = VIRTIO_NET(dev); + int nvhosts = data_queue_pairs + cvq; + struct vhost_net *net; + int r, e, i, index_end = data_queue_pairs * 2; + NetClientState *peer; + + if (cvq) { + index_end += 1; + } + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return -ENOSYS; + } + + for (i = 0; i < nvhosts; i++) { + + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { /* Control Virtqueue */ + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } + + net = get_vhost_net(peer); + vhost_net_set_vq_index(net, i * 2, index_end); + + /* Suppress the masking guest notifiers on vhost user + * because vhost user doesn't interrupt masking/unmasking + * properly. + */ + if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) { + dev->use_guest_notifier_mask = false; + } + } + + r = k->set_guest_notifiers(qbus->parent, total_notifiers, true); + if (r < 0) { + error_report("Error binding guest notifier: %d", -r); + goto err; + } + + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } + r = vhost_net_start_one(get_vhost_net(peer), dev); + + if (r < 0) { + goto err_start; + } + + if (peer->vring_enable) { + /* restore vring enable state */ + r = vhost_set_vring_enable(peer, peer->vring_enable); + + if (r < 0) { + goto err_start; + } + } + } + + return 0; + +err_start: + while (--i >= 0) { + peer = qemu_get_peer(ncs , i); + vhost_net_stop_one(get_vhost_net(peer), dev); + } + e = k->set_guest_notifiers(qbus->parent, total_notifiers, false); + if (e < 0) { + fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e); + fflush(stderr); + } +err: + return r; +} + +void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, + int data_queue_pairs, int cvq) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); + VirtioBusState *vbus = VIRTIO_BUS(qbus); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); + VirtIONet *n = VIRTIO_NET(dev); + NetClientState *peer; + int total_notifiers = data_queue_pairs * 2 + cvq; + int nvhosts = data_queue_pairs + cvq; + int i, r; + + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } + vhost_net_stop_one(get_vhost_net(peer), dev); + } + + r = k->set_guest_notifiers(qbus->parent, total_notifiers, false); + if (r < 0) { + fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); + fflush(stderr); + } + assert(r >= 0); +} + +void vhost_net_cleanup(struct vhost_net *net) +{ + vhost_dev_cleanup(&net->dev); +} + +int vhost_net_notify_migration_done(struct vhost_net *net, char* mac_addr) +{ + const VhostOps *vhost_ops = net->dev.vhost_ops; + + assert(vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); + assert(vhost_ops->vhost_migration_done); + + return vhost_ops->vhost_migration_done(&net->dev, mac_addr); +} + +bool vhost_net_virtqueue_pending(VHostNetState *net, int idx) +{ + return vhost_virtqueue_pending(&net->dev, idx); +} + +void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev, + int idx, bool mask) +{ + vhost_virtqueue_mask(&net->dev, dev, idx, mask); +} + +VHostNetState *get_vhost_net(NetClientState *nc) +{ + VHostNetState *vhost_net = 0; + + if (!nc) { + return 0; + } + + switch (nc->info->type) { + case NET_CLIENT_DRIVER_TAP: + vhost_net = tap_get_vhost_net(nc); + break; +#ifdef CONFIG_VHOST_NET_USER + case NET_CLIENT_DRIVER_VHOST_USER: + vhost_net = vhost_user_get_vhost_net(nc); + assert(vhost_net); + break; +#endif +#ifdef CONFIG_VHOST_NET_VDPA + case NET_CLIENT_DRIVER_VHOST_VDPA: + vhost_net = vhost_vdpa_get_vhost_net(nc); + assert(vhost_net); + break; +#endif + default: + break; + } + + return vhost_net; +} + +int vhost_set_vring_enable(NetClientState *nc, int enable) +{ + VHostNetState *net = get_vhost_net(nc); + const VhostOps *vhost_ops = net->dev.vhost_ops; + + nc->vring_enable = enable; + + if (vhost_ops && vhost_ops->vhost_set_vring_enable) { + return vhost_ops->vhost_set_vring_enable(&net->dev, enable); + } + + return 0; +} + +int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu) +{ + const VhostOps *vhost_ops = net->dev.vhost_ops; + + if (!vhost_ops->vhost_net_set_mtu) { + return 0; + } + + return vhost_ops->vhost_net_set_mtu(&net->dev, mtu); +} diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c new file mode 100644 index 000000000..f2014d5ea --- /dev/null +++ b/hw/net/virtio-net.c @@ -0,0 +1,3730 @@ +/* + * Virtio Network Device + * + * Copyright IBM, Corp. 2007 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/atomic.h" +#include "qemu/iov.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "hw/virtio/virtio.h" +#include "net/net.h" +#include "net/checksum.h" +#include "net/tap.h" +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "qemu/option.h" +#include "qemu/option_int.h" +#include "qemu/config-file.h" +#include "qapi/qmp/qdict.h" +#include "hw/virtio/virtio-net.h" +#include "net/vhost_net.h" +#include "net/announce.h" +#include "hw/virtio/virtio-bus.h" +#include "qapi/error.h" +#include "qapi/qapi-events-net.h" +#include "hw/qdev-properties.h" +#include "qapi/qapi-types-migration.h" +#include "qapi/qapi-events-migration.h" +#include "hw/virtio/virtio-access.h" +#include "migration/misc.h" +#include "standard-headers/linux/ethtool.h" +#include "sysemu/sysemu.h" +#include "trace.h" +#include "monitor/qdev.h" +#include "hw/pci/pci.h" +#include "net_rx_pkt.h" +#include "hw/virtio/vhost.h" + +#define VIRTIO_NET_VM_VERSION 11 + +#define MAC_TABLE_ENTRIES 64 +#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ + +/* previously fixed value */ +#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256 +#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256 + +/* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */ +#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE + +#define VIRTIO_NET_IP4_ADDR_SIZE 8 /* ipv4 saddr + daddr */ + +#define VIRTIO_NET_TCP_FLAG 0x3F +#define VIRTIO_NET_TCP_HDR_LENGTH 0xF000 + +/* IPv4 max payload, 16 bits in the header */ +#define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header)) +#define VIRTIO_NET_MAX_TCP_PAYLOAD 65535 + +/* header length value in ip header without option */ +#define VIRTIO_NET_IP4_HEADER_LENGTH 5 + +#define VIRTIO_NET_IP6_ADDR_SIZE 32 /* ipv6 saddr + daddr */ +#define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD + +/* Purge coalesced packets timer interval, This value affects the performance + a lot, and should be tuned carefully, '300000'(300us) is the recommended + value to pass the WHQL test, '50000' can gain 2x netperf throughput with + tso/gso/gro 'off'. */ +#define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000 + +#define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \ + VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \ + VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \ + VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \ + VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \ + VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \ + VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \ + VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \ + VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) + +static const VirtIOFeature feature_sizes[] = { + {.flags = 1ULL << VIRTIO_NET_F_MAC, + .end = endof(struct virtio_net_config, mac)}, + {.flags = 1ULL << VIRTIO_NET_F_STATUS, + .end = endof(struct virtio_net_config, status)}, + {.flags = 1ULL << VIRTIO_NET_F_MQ, + .end = endof(struct virtio_net_config, max_virtqueue_pairs)}, + {.flags = 1ULL << VIRTIO_NET_F_MTU, + .end = endof(struct virtio_net_config, mtu)}, + {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX, + .end = endof(struct virtio_net_config, duplex)}, + {.flags = (1ULL << VIRTIO_NET_F_RSS) | (1ULL << VIRTIO_NET_F_HASH_REPORT), + .end = endof(struct virtio_net_config, supported_hash_types)}, + {} +}; + +static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc) +{ + VirtIONet *n = qemu_get_nic_opaque(nc); + + return &n->vqs[nc->queue_index]; +} + +static int vq2q(int queue_index) +{ + return queue_index / 2; +} + +/* TODO + * - we could suppress RX interrupt if we were so inclined. + */ + +static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VirtIONet *n = VIRTIO_NET(vdev); + struct virtio_net_config netcfg; + NetClientState *nc = qemu_get_queue(n->nic); + static const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } }; + + int ret = 0; + memset(&netcfg, 0 , sizeof(struct virtio_net_config)); + virtio_stw_p(vdev, &netcfg.status, n->status); + virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queue_pairs); + virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu); + memcpy(netcfg.mac, n->mac, ETH_ALEN); + virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed); + netcfg.duplex = n->net_conf.duplex; + netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE; + virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length, + virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ? + VIRTIO_NET_RSS_MAX_TABLE_LEN : 1); + virtio_stl_p(vdev, &netcfg.supported_hash_types, + VIRTIO_NET_RSS_SUPPORTED_HASHES); + memcpy(config, &netcfg, n->config_size); + + /* + * Is this VDPA? No peer means not VDPA: there's no way to + * disconnect/reconnect a VDPA peer. + */ + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + ret = vhost_net_get_config(get_vhost_net(nc->peer), (uint8_t *)&netcfg, + n->config_size); + if (ret != -1) { + /* + * Some NIC/kernel combinations present 0 as the mac address. As + * that is not a legal address, try to proceed with the + * address from the QEMU command line in the hope that the + * address has been configured correctly elsewhere - just not + * reported by the device. + */ + if (memcmp(&netcfg.mac, &zero, sizeof(zero)) == 0) { + info_report("Zero hardware mac address detected. Ignoring."); + memcpy(netcfg.mac, n->mac, ETH_ALEN); + } + memcpy(config, &netcfg, n->config_size); + } + } +} + +static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) +{ + VirtIONet *n = VIRTIO_NET(vdev); + struct virtio_net_config netcfg = {}; + NetClientState *nc = qemu_get_queue(n->nic); + + memcpy(&netcfg, config, n->config_size); + + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) && + !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) && + memcmp(netcfg.mac, n->mac, ETH_ALEN)) { + memcpy(n->mac, netcfg.mac, ETH_ALEN); + qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); + } + + /* + * Is this VDPA? No peer means not VDPA: there's no way to + * disconnect/reconnect a VDPA peer. + */ + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + vhost_net_set_config(get_vhost_net(nc->peer), + (uint8_t *)&netcfg, 0, n->config_size, + VHOST_SET_CONFIG_TYPE_MASTER); + } +} + +static bool virtio_net_started(VirtIONet *n, uint8_t status) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + return (status & VIRTIO_CONFIG_S_DRIVER_OK) && + (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running; +} + +static void virtio_net_announce_notify(VirtIONet *net) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(net); + trace_virtio_net_announce_notify(); + + net->status |= VIRTIO_NET_S_ANNOUNCE; + virtio_notify_config(vdev); +} + +static void virtio_net_announce_timer(void *opaque) +{ + VirtIONet *n = opaque; + trace_virtio_net_announce_timer(n->announce_timer.round); + + n->announce_timer.round--; + virtio_net_announce_notify(n); +} + +static void virtio_net_announce(NetClientState *nc) +{ + VirtIONet *n = qemu_get_nic_opaque(nc); + VirtIODevice *vdev = VIRTIO_DEVICE(n); + + /* + * Make sure the virtio migration announcement timer isn't running + * If it is, let it trigger announcement so that we do not cause + * confusion. + */ + if (n->announce_timer.round) { + return; + } + + if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && + virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { + virtio_net_announce_notify(n); + } +} + +static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + NetClientState *nc = qemu_get_queue(n->nic); + int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; + int cvq = n->max_ncs - n->max_queue_pairs; + + if (!get_vhost_net(nc->peer)) { + return; + } + + if ((virtio_net_started(n, status) && !nc->peer->link_down) == + !!n->vhost_started) { + return; + } + if (!n->vhost_started) { + int r, i; + + if (n->needs_vnet_hdr_swap) { + error_report("backend does not support %s vnet headers; " + "falling back on userspace virtio", + virtio_is_big_endian(vdev) ? "BE" : "LE"); + return; + } + + /* Any packets outstanding? Purge them to avoid touching rings + * when vhost is running. + */ + for (i = 0; i < queue_pairs; i++) { + NetClientState *qnc = qemu_get_subqueue(n->nic, i); + + /* Purge both directions: TX and RX. */ + qemu_net_queue_purge(qnc->peer->incoming_queue, qnc); + qemu_net_queue_purge(qnc->incoming_queue, qnc->peer); + } + + if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) { + r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu); + if (r < 0) { + error_report("%uBytes MTU not supported by the backend", + n->net_conf.mtu); + + return; + } + } + + n->vhost_started = 1; + r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, cvq); + if (r < 0) { + error_report("unable to start vhost net: %d: " + "falling back on userspace virtio", -r); + n->vhost_started = 0; + } + } else { + vhost_net_stop(vdev, n->nic->ncs, queue_pairs, cvq); + n->vhost_started = 0; + } +} + +static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev, + NetClientState *peer, + bool enable) +{ + if (virtio_is_big_endian(vdev)) { + return qemu_set_vnet_be(peer, enable); + } else { + return qemu_set_vnet_le(peer, enable); + } +} + +static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, + int queue_pairs, bool enable) +{ + int i; + + for (i = 0; i < queue_pairs; i++) { + if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 && + enable) { + while (--i >= 0) { + virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false); + } + + return true; + } + } + + return false; +} + +static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; + + if (virtio_net_started(n, status)) { + /* Before using the device, we tell the network backend about the + * endianness to use when parsing vnet headers. If the backend + * can't do it, we fallback onto fixing the headers in the core + * virtio-net code. + */ + n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs, + queue_pairs, true); + } else if (virtio_net_started(n, vdev->status)) { + /* After using the device, we need to reset the network backend to + * the default (guest native endianness), otherwise the guest may + * lose network connectivity if it is rebooted into a different + * endianness. + */ + virtio_net_set_vnet_endian(vdev, n->nic->ncs, queue_pairs, false); + } +} + +static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq) +{ + unsigned int dropped = virtqueue_drop_all(vq); + if (dropped) { + virtio_notify(vdev, vq); + } +} + +static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) +{ + VirtIONet *n = VIRTIO_NET(vdev); + VirtIONetQueue *q; + int i; + uint8_t queue_status; + + virtio_net_vnet_endian_status(n, status); + virtio_net_vhost_status(n, status); + + for (i = 0; i < n->max_queue_pairs; i++) { + NetClientState *ncs = qemu_get_subqueue(n->nic, i); + bool queue_started; + q = &n->vqs[i]; + + if ((!n->multiqueue && i != 0) || i >= n->curr_queue_pairs) { + queue_status = 0; + } else { + queue_status = status; + } + queue_started = + virtio_net_started(n, queue_status) && !n->vhost_started; + + if (queue_started) { + qemu_flush_queued_packets(ncs); + } + + if (!q->tx_waiting) { + continue; + } + + if (queue_started) { + if (q->tx_timer) { + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + } else { + qemu_bh_schedule(q->tx_bh); + } + } else { + if (q->tx_timer) { + timer_del(q->tx_timer); + } else { + qemu_bh_cancel(q->tx_bh); + } + if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 && + (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) && + vdev->vm_running) { + /* if tx is waiting we are likely have some packets in tx queue + * and disabled notification */ + q->tx_waiting = 0; + virtio_queue_set_notification(q->tx_vq, 1); + virtio_net_drop_tx_queue_data(vdev, q->tx_vq); + } + } + } +} + +static void virtio_net_set_link_status(NetClientState *nc) +{ + VirtIONet *n = qemu_get_nic_opaque(nc); + VirtIODevice *vdev = VIRTIO_DEVICE(n); + uint16_t old_status = n->status; + + if (nc->link_down) + n->status &= ~VIRTIO_NET_S_LINK_UP; + else + n->status |= VIRTIO_NET_S_LINK_UP; + + if (n->status != old_status) + virtio_notify_config(vdev); + + virtio_net_set_status(vdev, vdev->status); +} + +static void rxfilter_notify(NetClientState *nc) +{ + VirtIONet *n = qemu_get_nic_opaque(nc); + + if (nc->rxfilter_notify_enabled) { + char *path = object_get_canonical_path(OBJECT(n->qdev)); + qapi_event_send_nic_rx_filter_changed(!!n->netclient_name, + n->netclient_name, path); + g_free(path); + + /* disable event notification to avoid events flooding */ + nc->rxfilter_notify_enabled = 0; + } +} + +static intList *get_vlan_table(VirtIONet *n) +{ + intList *list; + int i, j; + + list = NULL; + for (i = 0; i < MAX_VLAN >> 5; i++) { + for (j = 0; n->vlans[i] && j <= 0x1f; j++) { + if (n->vlans[i] & (1U << j)) { + QAPI_LIST_PREPEND(list, (i << 5) + j); + } + } + } + + return list; +} + +static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc) +{ + VirtIONet *n = qemu_get_nic_opaque(nc); + VirtIODevice *vdev = VIRTIO_DEVICE(n); + RxFilterInfo *info; + strList *str_list; + int i; + + info = g_malloc0(sizeof(*info)); + info->name = g_strdup(nc->name); + info->promiscuous = n->promisc; + + if (n->nouni) { + info->unicast = RX_STATE_NONE; + } else if (n->alluni) { + info->unicast = RX_STATE_ALL; + } else { + info->unicast = RX_STATE_NORMAL; + } + + if (n->nomulti) { + info->multicast = RX_STATE_NONE; + } else if (n->allmulti) { + info->multicast = RX_STATE_ALL; + } else { + info->multicast = RX_STATE_NORMAL; + } + + info->broadcast_allowed = n->nobcast; + info->multicast_overflow = n->mac_table.multi_overflow; + info->unicast_overflow = n->mac_table.uni_overflow; + + info->main_mac = qemu_mac_strdup_printf(n->mac); + + str_list = NULL; + for (i = 0; i < n->mac_table.first_multi; i++) { + QAPI_LIST_PREPEND(str_list, + qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN)); + } + info->unicast_table = str_list; + + str_list = NULL; + for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { + QAPI_LIST_PREPEND(str_list, + qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN)); + } + info->multicast_table = str_list; + info->vlan_table = get_vlan_table(n); + + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) { + info->vlan = RX_STATE_ALL; + } else if (!info->vlan_table) { + info->vlan = RX_STATE_NONE; + } else { + info->vlan = RX_STATE_NORMAL; + } + + /* enable event notification after query */ + nc->rxfilter_notify_enabled = 1; + + return info; +} + +static void virtio_net_reset(VirtIODevice *vdev) +{ + VirtIONet *n = VIRTIO_NET(vdev); + int i; + + /* Reset back to compatibility mode */ + n->promisc = 1; + n->allmulti = 0; + n->alluni = 0; + n->nomulti = 0; + n->nouni = 0; + n->nobcast = 0; + /* multiqueue is disabled by default */ + n->curr_queue_pairs = 1; + timer_del(n->announce_timer.tm); + n->announce_timer.round = 0; + n->status &= ~VIRTIO_NET_S_ANNOUNCE; + + /* Flush any MAC and VLAN filter table state */ + n->mac_table.in_use = 0; + n->mac_table.first_multi = 0; + n->mac_table.multi_overflow = 0; + n->mac_table.uni_overflow = 0; + memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); + memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); + qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); + memset(n->vlans, 0, MAX_VLAN >> 3); + + /* Flush any async TX */ + for (i = 0; i < n->max_queue_pairs; i++) { + NetClientState *nc = qemu_get_subqueue(n->nic, i); + + if (nc->peer) { + qemu_flush_or_purge_queued_packets(nc->peer, true); + assert(!virtio_net_get_subqueue(nc)->async_tx.elem); + } + } +} + +static void peer_test_vnet_hdr(VirtIONet *n) +{ + NetClientState *nc = qemu_get_queue(n->nic); + if (!nc->peer) { + return; + } + + n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer); +} + +static int peer_has_vnet_hdr(VirtIONet *n) +{ + return n->has_vnet_hdr; +} + +static int peer_has_ufo(VirtIONet *n) +{ + if (!peer_has_vnet_hdr(n)) + return 0; + + n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer); + + return n->has_ufo; +} + +static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, + int version_1, int hash_report) +{ + int i; + NetClientState *nc; + + n->mergeable_rx_bufs = mergeable_rx_bufs; + + if (version_1) { + n->guest_hdr_len = hash_report ? + sizeof(struct virtio_net_hdr_v1_hash) : + sizeof(struct virtio_net_hdr_mrg_rxbuf); + n->rss_data.populate_hash = !!hash_report; + } else { + n->guest_hdr_len = n->mergeable_rx_bufs ? + sizeof(struct virtio_net_hdr_mrg_rxbuf) : + sizeof(struct virtio_net_hdr); + } + + for (i = 0; i < n->max_queue_pairs; i++) { + nc = qemu_get_subqueue(n->nic, i); + + if (peer_has_vnet_hdr(n) && + qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { + qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); + n->host_hdr_len = n->guest_hdr_len; + } + } +} + +static int virtio_net_max_tx_queue_size(VirtIONet *n) +{ + NetClientState *peer = n->nic_conf.peers.ncs[0]; + + /* + * Backends other than vhost-user don't support max queue size. + */ + if (!peer) { + return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; + } + + if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) { + return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; + } + + return VIRTQUEUE_MAX_SIZE; +} + +static int peer_attach(VirtIONet *n, int index) +{ + NetClientState *nc = qemu_get_subqueue(n->nic, index); + + if (!nc->peer) { + return 0; + } + + if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { + vhost_set_vring_enable(nc->peer, 1); + } + + if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { + return 0; + } + + if (n->max_queue_pairs == 1) { + return 0; + } + + return tap_enable(nc->peer); +} + +static int peer_detach(VirtIONet *n, int index) +{ + NetClientState *nc = qemu_get_subqueue(n->nic, index); + + if (!nc->peer) { + return 0; + } + + if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { + vhost_set_vring_enable(nc->peer, 0); + } + + if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) { + return 0; + } + + return tap_disable(nc->peer); +} + +static void virtio_net_set_queue_pairs(VirtIONet *n) +{ + int i; + int r; + + if (n->nic->peer_deleted) { + return; + } + + for (i = 0; i < n->max_queue_pairs; i++) { + if (i < n->curr_queue_pairs) { + r = peer_attach(n, i); + assert(!r); + } else { + r = peer_detach(n, i); + assert(!r); + } + } +} + +static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue); + +static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VirtIONet *n = VIRTIO_NET(vdev); + NetClientState *nc = qemu_get_queue(n->nic); + + /* Firstly sync all virtio-net possible supported features */ + features |= n->host_features; + + virtio_add_feature(&features, VIRTIO_NET_F_MAC); + + if (!peer_has_vnet_hdr(n)) { + virtio_clear_feature(&features, VIRTIO_NET_F_CSUM); + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4); + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6); + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN); + + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); + + virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); + } + + if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO); + virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); + } + + if (!get_vhost_net(nc->peer)) { + return features; + } + + if (!ebpf_rss_is_loaded(&n->ebpf_rss)) { + virtio_clear_feature(&features, VIRTIO_NET_F_RSS); + } + features = vhost_net_get_features(get_vhost_net(nc->peer), features); + vdev->backend_features = features; + + if (n->mtu_bypass_backend && + (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) { + features |= (1ULL << VIRTIO_NET_F_MTU); + } + + return features; +} + +static uint64_t virtio_net_bad_features(VirtIODevice *vdev) +{ + uint64_t features = 0; + + /* Linux kernel 2.6.25. It understood MAC (as everyone must), + * but also these: */ + virtio_add_feature(&features, VIRTIO_NET_F_MAC); + virtio_add_feature(&features, VIRTIO_NET_F_CSUM); + virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4); + virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6); + virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN); + + return features; +} + +static void virtio_net_apply_guest_offloads(VirtIONet *n) +{ + qemu_set_offload(qemu_get_queue(n->nic)->peer, + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); +} + +static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) +{ + static const uint64_t guest_offloads_mask = + (1ULL << VIRTIO_NET_F_GUEST_CSUM) | + (1ULL << VIRTIO_NET_F_GUEST_TSO4) | + (1ULL << VIRTIO_NET_F_GUEST_TSO6) | + (1ULL << VIRTIO_NET_F_GUEST_ECN) | + (1ULL << VIRTIO_NET_F_GUEST_UFO); + + return guest_offloads_mask & features; +} + +static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + return virtio_net_guest_offloads_by_features(vdev->guest_features); +} + +typedef struct { + VirtIONet *n; + DeviceState *dev; +} FailoverDevice; + +/** + * Set the failover primary device + * + * @opaque: FailoverId to setup + * @opts: opts for device we are handling + * @errp: returns an error if this function fails + */ +static int failover_set_primary(DeviceState *dev, void *opaque) +{ + FailoverDevice *fdev = opaque; + PCIDevice *pci_dev = (PCIDevice *) + object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE); + + if (!pci_dev) { + return 0; + } + + if (!g_strcmp0(pci_dev->failover_pair_id, fdev->n->netclient_name)) { + fdev->dev = dev; + return 1; + } + + return 0; +} + +/** + * Find the primary device for this failover virtio-net + * + * @n: VirtIONet device + * @errp: returns an error if this function fails + */ +static DeviceState *failover_find_primary_device(VirtIONet *n) +{ + FailoverDevice fdev = { + .n = n, + }; + + qbus_walk_children(sysbus_get_default(), failover_set_primary, NULL, + NULL, NULL, &fdev); + return fdev.dev; +} + +static void failover_add_primary(VirtIONet *n, Error **errp) +{ + Error *err = NULL; + DeviceState *dev = failover_find_primary_device(n); + + if (dev) { + return; + } + + if (!n->primary_opts) { + error_setg(errp, "Primary device not found"); + error_append_hint(errp, "Virtio-net failover will not work. Make " + "sure primary device has parameter" + " failover_pair_id=%s\n", n->netclient_name); + return; + } + + dev = qdev_device_add_from_qdict(n->primary_opts, + n->primary_opts_from_json, + &err); + if (err) { + qobject_unref(n->primary_opts); + n->primary_opts = NULL; + } else { + object_unref(OBJECT(dev)); + } + error_propagate(errp, err); +} + +static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) +{ + VirtIONet *n = VIRTIO_NET(vdev); + Error *err = NULL; + int i; + + if (n->mtu_bypass_backend && + !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) { + features &= ~(1ULL << VIRTIO_NET_F_MTU); + } + + virtio_net_set_multiqueue(n, + virtio_has_feature(features, VIRTIO_NET_F_RSS) || + virtio_has_feature(features, VIRTIO_NET_F_MQ)); + + virtio_net_set_mrg_rx_bufs(n, + virtio_has_feature(features, + VIRTIO_NET_F_MRG_RXBUF), + virtio_has_feature(features, + VIRTIO_F_VERSION_1), + virtio_has_feature(features, + VIRTIO_NET_F_HASH_REPORT)); + + n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) && + virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4); + n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) && + virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6); + n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS); + + if (n->has_vnet_hdr) { + n->curr_guest_offloads = + virtio_net_guest_offloads_by_features(features); + virtio_net_apply_guest_offloads(n); + } + + for (i = 0; i < n->max_queue_pairs; i++) { + NetClientState *nc = qemu_get_subqueue(n->nic, i); + + if (!get_vhost_net(nc->peer)) { + continue; + } + vhost_net_ack_features(get_vhost_net(nc->peer), features); + } + + if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) { + memset(n->vlans, 0, MAX_VLAN >> 3); + } else { + memset(n->vlans, 0xff, MAX_VLAN >> 3); + } + + if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) { + qapi_event_send_failover_negotiated(n->netclient_name); + qatomic_set(&n->failover_primary_hidden, false); + failover_add_primary(n, &err); + if (err) { + warn_report_err(err); + } + } +} + +static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, + struct iovec *iov, unsigned int iov_cnt) +{ + uint8_t on; + size_t s; + NetClientState *nc = qemu_get_queue(n->nic); + + s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on)); + if (s != sizeof(on)) { + return VIRTIO_NET_ERR; + } + + if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) { + n->promisc = on; + } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) { + n->allmulti = on; + } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) { + n->alluni = on; + } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) { + n->nomulti = on; + } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) { + n->nouni = on; + } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) { + n->nobcast = on; + } else { + return VIRTIO_NET_ERR; + } + + rxfilter_notify(nc); + + return VIRTIO_NET_OK; +} + +static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, + struct iovec *iov, unsigned int iov_cnt) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + uint64_t offloads; + size_t s; + + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { + return VIRTIO_NET_ERR; + } + + s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); + if (s != sizeof(offloads)) { + return VIRTIO_NET_ERR; + } + + if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { + uint64_t supported_offloads; + + offloads = virtio_ldq_p(vdev, &offloads); + + if (!n->has_vnet_hdr) { + return VIRTIO_NET_ERR; + } + + n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) && + virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4); + n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) && + virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6); + virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT); + + supported_offloads = virtio_net_supported_guest_offloads(n); + if (offloads & ~supported_offloads) { + return VIRTIO_NET_ERR; + } + + n->curr_guest_offloads = offloads; + virtio_net_apply_guest_offloads(n); + + return VIRTIO_NET_OK; + } else { + return VIRTIO_NET_ERR; + } +} + +static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, + struct iovec *iov, unsigned int iov_cnt) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + struct virtio_net_ctrl_mac mac_data; + size_t s; + NetClientState *nc = qemu_get_queue(n->nic); + + if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) { + if (iov_size(iov, iov_cnt) != sizeof(n->mac)) { + return VIRTIO_NET_ERR; + } + s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac)); + assert(s == sizeof(n->mac)); + qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); + rxfilter_notify(nc); + + return VIRTIO_NET_OK; + } + + if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) { + return VIRTIO_NET_ERR; + } + + int in_use = 0; + int first_multi = 0; + uint8_t uni_overflow = 0; + uint8_t multi_overflow = 0; + uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); + + s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, + sizeof(mac_data.entries)); + mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); + if (s != sizeof(mac_data.entries)) { + goto error; + } + iov_discard_front(&iov, &iov_cnt, s); + + if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) { + goto error; + } + + if (mac_data.entries <= MAC_TABLE_ENTRIES) { + s = iov_to_buf(iov, iov_cnt, 0, macs, + mac_data.entries * ETH_ALEN); + if (s != mac_data.entries * ETH_ALEN) { + goto error; + } + in_use += mac_data.entries; + } else { + uni_overflow = 1; + } + + iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN); + + first_multi = in_use; + + s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries, + sizeof(mac_data.entries)); + mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries); + if (s != sizeof(mac_data.entries)) { + goto error; + } + + iov_discard_front(&iov, &iov_cnt, s); + + if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) { + goto error; + } + + if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) { + s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN], + mac_data.entries * ETH_ALEN); + if (s != mac_data.entries * ETH_ALEN) { + goto error; + } + in_use += mac_data.entries; + } else { + multi_overflow = 1; + } + + n->mac_table.in_use = in_use; + n->mac_table.first_multi = first_multi; + n->mac_table.uni_overflow = uni_overflow; + n->mac_table.multi_overflow = multi_overflow; + memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN); + g_free(macs); + rxfilter_notify(nc); + + return VIRTIO_NET_OK; + +error: + g_free(macs); + return VIRTIO_NET_ERR; +} + +static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd, + struct iovec *iov, unsigned int iov_cnt) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + uint16_t vid; + size_t s; + NetClientState *nc = qemu_get_queue(n->nic); + + s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid)); + vid = virtio_lduw_p(vdev, &vid); + if (s != sizeof(vid)) { + return VIRTIO_NET_ERR; + } + + if (vid >= MAX_VLAN) + return VIRTIO_NET_ERR; + + if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) + n->vlans[vid >> 5] |= (1U << (vid & 0x1f)); + else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) + n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f)); + else + return VIRTIO_NET_ERR; + + rxfilter_notify(nc); + + return VIRTIO_NET_OK; +} + +static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd, + struct iovec *iov, unsigned int iov_cnt) +{ + trace_virtio_net_handle_announce(n->announce_timer.round); + if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK && + n->status & VIRTIO_NET_S_ANNOUNCE) { + n->status &= ~VIRTIO_NET_S_ANNOUNCE; + if (n->announce_timer.round) { + qemu_announce_timer_step(&n->announce_timer); + } + return VIRTIO_NET_OK; + } else { + return VIRTIO_NET_ERR; + } +} + +static void virtio_net_detach_epbf_rss(VirtIONet *n); + +static void virtio_net_disable_rss(VirtIONet *n) +{ + if (n->rss_data.enabled) { + trace_virtio_net_rss_disable(); + } + n->rss_data.enabled = false; + + virtio_net_detach_epbf_rss(n); +} + +static bool virtio_net_attach_ebpf_to_backend(NICState *nic, int prog_fd) +{ + NetClientState *nc = qemu_get_peer(qemu_get_queue(nic), 0); + if (nc == NULL || nc->info->set_steering_ebpf == NULL) { + return false; + } + + return nc->info->set_steering_ebpf(nc, prog_fd); +} + +static void rss_data_to_rss_config(struct VirtioNetRssData *data, + struct EBPFRSSConfig *config) +{ + config->redirect = data->redirect; + config->populate_hash = data->populate_hash; + config->hash_types = data->hash_types; + config->indirections_len = data->indirections_len; + config->default_queue = data->default_queue; +} + +static bool virtio_net_attach_epbf_rss(VirtIONet *n) +{ + struct EBPFRSSConfig config = {}; + + if (!ebpf_rss_is_loaded(&n->ebpf_rss)) { + return false; + } + + rss_data_to_rss_config(&n->rss_data, &config); + + if (!ebpf_rss_set_all(&n->ebpf_rss, &config, + n->rss_data.indirections_table, n->rss_data.key)) { + return false; + } + + if (!virtio_net_attach_ebpf_to_backend(n->nic, n->ebpf_rss.program_fd)) { + return false; + } + + return true; +} + +static void virtio_net_detach_epbf_rss(VirtIONet *n) +{ + virtio_net_attach_ebpf_to_backend(n->nic, -1); +} + +static bool virtio_net_load_ebpf(VirtIONet *n) +{ + if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) { + /* backend does't support steering ebpf */ + return false; + } + + return ebpf_rss_load(&n->ebpf_rss); +} + +static void virtio_net_unload_ebpf(VirtIONet *n) +{ + virtio_net_attach_ebpf_to_backend(n->nic, -1); + ebpf_rss_unload(&n->ebpf_rss); +} + +static uint16_t virtio_net_handle_rss(VirtIONet *n, + struct iovec *iov, + unsigned int iov_cnt, + bool do_rss) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + struct virtio_net_rss_config cfg; + size_t s, offset = 0, size_get; + uint16_t queue_pairs, i; + struct { + uint16_t us; + uint8_t b; + } QEMU_PACKED temp; + const char *err_msg = ""; + uint32_t err_value = 0; + + if (do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) { + err_msg = "RSS is not negotiated"; + goto error; + } + if (!do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) { + err_msg = "Hash report is not negotiated"; + goto error; + } + size_get = offsetof(struct virtio_net_rss_config, indirection_table); + s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get); + if (s != size_get) { + err_msg = "Short command buffer"; + err_value = (uint32_t)s; + goto error; + } + n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types); + n->rss_data.indirections_len = + virtio_lduw_p(vdev, &cfg.indirection_table_mask); + n->rss_data.indirections_len++; + if (!do_rss) { + n->rss_data.indirections_len = 1; + } + if (!is_power_of_2(n->rss_data.indirections_len)) { + err_msg = "Invalid size of indirection table"; + err_value = n->rss_data.indirections_len; + goto error; + } + if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) { + err_msg = "Too large indirection table"; + err_value = n->rss_data.indirections_len; + goto error; + } + n->rss_data.default_queue = do_rss ? + virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0; + if (n->rss_data.default_queue >= n->max_queue_pairs) { + err_msg = "Invalid default queue"; + err_value = n->rss_data.default_queue; + goto error; + } + offset += size_get; + size_get = sizeof(uint16_t) * n->rss_data.indirections_len; + g_free(n->rss_data.indirections_table); + n->rss_data.indirections_table = g_malloc(size_get); + if (!n->rss_data.indirections_table) { + err_msg = "Can't allocate indirections table"; + err_value = n->rss_data.indirections_len; + goto error; + } + s = iov_to_buf(iov, iov_cnt, offset, + n->rss_data.indirections_table, size_get); + if (s != size_get) { + err_msg = "Short indirection table buffer"; + err_value = (uint32_t)s; + goto error; + } + for (i = 0; i < n->rss_data.indirections_len; ++i) { + uint16_t val = n->rss_data.indirections_table[i]; + n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val); + } + offset += size_get; + size_get = sizeof(temp); + s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get); + if (s != size_get) { + err_msg = "Can't get queue_pairs"; + err_value = (uint32_t)s; + goto error; + } + queue_pairs = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queue_pairs; + if (queue_pairs == 0 || queue_pairs > n->max_queue_pairs) { + err_msg = "Invalid number of queue_pairs"; + err_value = queue_pairs; + goto error; + } + if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) { + err_msg = "Invalid key size"; + err_value = temp.b; + goto error; + } + if (!temp.b && n->rss_data.hash_types) { + err_msg = "No key provided"; + err_value = 0; + goto error; + } + if (!temp.b && !n->rss_data.hash_types) { + virtio_net_disable_rss(n); + return queue_pairs; + } + offset += size_get; + size_get = temp.b; + s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get); + if (s != size_get) { + err_msg = "Can get key buffer"; + err_value = (uint32_t)s; + goto error; + } + n->rss_data.enabled = true; + + if (!n->rss_data.populate_hash) { + if (!virtio_net_attach_epbf_rss(n)) { + /* EBPF must be loaded for vhost */ + if (get_vhost_net(qemu_get_queue(n->nic)->peer)) { + warn_report("Can't load eBPF RSS for vhost"); + goto error; + } + /* fallback to software RSS */ + warn_report("Can't load eBPF RSS - fallback to software RSS"); + n->rss_data.enabled_software_rss = true; + } + } else { + /* use software RSS for hash populating */ + /* and detach eBPF if was loaded before */ + virtio_net_detach_epbf_rss(n); + n->rss_data.enabled_software_rss = true; + } + + trace_virtio_net_rss_enable(n->rss_data.hash_types, + n->rss_data.indirections_len, + temp.b); + return queue_pairs; +error: + trace_virtio_net_rss_error(err_msg, err_value); + virtio_net_disable_rss(n); + return 0; +} + +static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, + struct iovec *iov, unsigned int iov_cnt) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + uint16_t queue_pairs; + + virtio_net_disable_rss(n); + if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) { + queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, false); + return queue_pairs ? VIRTIO_NET_OK : VIRTIO_NET_ERR; + } + if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) { + queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, true); + } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { + struct virtio_net_ctrl_mq mq; + size_t s; + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) { + return VIRTIO_NET_ERR; + } + s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq)); + if (s != sizeof(mq)) { + return VIRTIO_NET_ERR; + } + queue_pairs = virtio_lduw_p(vdev, &mq.virtqueue_pairs); + + } else { + return VIRTIO_NET_ERR; + } + + if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || + queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || + queue_pairs > n->max_queue_pairs || + !n->multiqueue) { + return VIRTIO_NET_ERR; + } + + n->curr_queue_pairs = queue_pairs; + /* stop the backend before changing the number of queue_pairs to avoid handling a + * disabled queue */ + virtio_net_set_status(vdev, vdev->status); + virtio_net_set_queue_pairs(n); + + return VIRTIO_NET_OK; +} + +static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIONet *n = VIRTIO_NET(vdev); + struct virtio_net_ctrl_hdr ctrl; + virtio_net_ctrl_ack status = VIRTIO_NET_ERR; + VirtQueueElement *elem; + size_t s; + struct iovec *iov, *iov2; + unsigned int iov_cnt; + + for (;;) { + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || + iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { + virtio_error(vdev, "virtio-net ctrl missing headers"); + virtqueue_detach_element(vq, elem, 0); + g_free(elem); + break; + } + + iov_cnt = elem->out_num; + iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num); + s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); + iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); + if (s != sizeof(ctrl)) { + status = VIRTIO_NET_ERR; + } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { + status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); + } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { + status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); + } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { + status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); + } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { + status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); + } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { + status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); + } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { + status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); + } + + s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); + assert(s == sizeof(status)); + + virtqueue_push(vq, elem, sizeof(status)); + virtio_notify(vdev, vq); + g_free(iov2); + g_free(elem); + } +} + +/* RX */ + +static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIONet *n = VIRTIO_NET(vdev); + int queue_index = vq2q(virtio_get_queue_index(vq)); + + qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index)); +} + +static bool virtio_net_can_receive(NetClientState *nc) +{ + VirtIONet *n = qemu_get_nic_opaque(nc); + VirtIODevice *vdev = VIRTIO_DEVICE(n); + VirtIONetQueue *q = virtio_net_get_subqueue(nc); + + if (!vdev->vm_running) { + return false; + } + + if (nc->queue_index >= n->curr_queue_pairs) { + return false; + } + + if (!virtio_queue_ready(q->rx_vq) || + !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { + return false; + } + + return true; +} + +static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize) +{ + VirtIONet *n = q->n; + if (virtio_queue_empty(q->rx_vq) || + (n->mergeable_rx_bufs && + !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { + virtio_queue_set_notification(q->rx_vq, 1); + + /* To avoid a race condition where the guest has made some buffers + * available after the above check but before notification was + * enabled, check for available buffers again. + */ + if (virtio_queue_empty(q->rx_vq) || + (n->mergeable_rx_bufs && + !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) { + return 0; + } + } + + virtio_queue_set_notification(q->rx_vq, 0); + return 1; +} + +static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr) +{ + virtio_tswap16s(vdev, &hdr->hdr_len); + virtio_tswap16s(vdev, &hdr->gso_size); + virtio_tswap16s(vdev, &hdr->csum_start); + virtio_tswap16s(vdev, &hdr->csum_offset); +} + +/* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so + * it never finds out that the packets don't have valid checksums. This + * causes dhclient to get upset. Fedora's carried a patch for ages to + * fix this with Xen but it hasn't appeared in an upstream release of + * dhclient yet. + * + * To avoid breaking existing guests, we catch udp packets and add + * checksums. This is terrible but it's better than hacking the guest + * kernels. + * + * N.B. if we introduce a zero-copy API, this operation is no longer free so + * we should provide a mechanism to disable it to avoid polluting the host + * cache. + */ +static void work_around_broken_dhclient(struct virtio_net_hdr *hdr, + uint8_t *buf, size_t size) +{ + if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */ + (size > 27 && size < 1500) && /* normal sized MTU */ + (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */ + (buf[23] == 17) && /* ip.protocol == UDP */ + (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */ + net_checksum_calculate(buf, size, CSUM_UDP); + hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; + } +} + +static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt, + const void *buf, size_t size) +{ + if (n->has_vnet_hdr) { + /* FIXME this cast is evil */ + void *wbuf = (void *)buf; + work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len, + size - n->host_hdr_len); + + if (n->needs_vnet_hdr_swap) { + virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf); + } + iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr)); + } else { + struct virtio_net_hdr hdr = { + .flags = 0, + .gso_type = VIRTIO_NET_HDR_GSO_NONE + }; + iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr); + } +} + +static int receive_filter(VirtIONet *n, const uint8_t *buf, int size) +{ + static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + static const uint8_t vlan[] = {0x81, 0x00}; + uint8_t *ptr = (uint8_t *)buf; + int i; + + if (n->promisc) + return 1; + + ptr += n->host_hdr_len; + + if (!memcmp(&ptr[12], vlan, sizeof(vlan))) { + int vid = lduw_be_p(ptr + 14) & 0xfff; + if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f)))) + return 0; + } + + if (ptr[0] & 1) { // multicast + if (!memcmp(ptr, bcast, sizeof(bcast))) { + return !n->nobcast; + } else if (n->nomulti) { + return 0; + } else if (n->allmulti || n->mac_table.multi_overflow) { + return 1; + } + + for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) { + if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { + return 1; + } + } + } else { // unicast + if (n->nouni) { + return 0; + } else if (n->alluni || n->mac_table.uni_overflow) { + return 1; + } else if (!memcmp(ptr, n->mac, ETH_ALEN)) { + return 1; + } + + for (i = 0; i < n->mac_table.first_multi; i++) { + if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) { + return 1; + } + } + } + + return 0; +} + +static uint8_t virtio_net_get_hash_type(bool isip4, + bool isip6, + bool isudp, + bool istcp, + uint32_t types) +{ + if (isip4) { + if (istcp && (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4)) { + return NetPktRssIpV4Tcp; + } + if (isudp && (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4)) { + return NetPktRssIpV4Udp; + } + if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) { + return NetPktRssIpV4; + } + } else if (isip6) { + uint32_t mask = VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | + VIRTIO_NET_RSS_HASH_TYPE_TCPv6; + + if (istcp && (types & mask)) { + return (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) ? + NetPktRssIpV6TcpEx : NetPktRssIpV6Tcp; + } + mask = VIRTIO_NET_RSS_HASH_TYPE_UDP_EX | VIRTIO_NET_RSS_HASH_TYPE_UDPv6; + if (isudp && (types & mask)) { + return (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) ? + NetPktRssIpV6UdpEx : NetPktRssIpV6Udp; + } + mask = VIRTIO_NET_RSS_HASH_TYPE_IP_EX | VIRTIO_NET_RSS_HASH_TYPE_IPv6; + if (types & mask) { + return (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) ? + NetPktRssIpV6Ex : NetPktRssIpV6; + } + } + return 0xff; +} + +static void virtio_set_packet_hash(const uint8_t *buf, uint8_t report, + uint32_t hash) +{ + struct virtio_net_hdr_v1_hash *hdr = (void *)buf; + hdr->hash_value = hash; + hdr->hash_report = report; +} + +static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + VirtIONet *n = qemu_get_nic_opaque(nc); + unsigned int index = nc->queue_index, new_index = index; + struct NetRxPkt *pkt = n->rx_pkt; + uint8_t net_hash_type; + uint32_t hash; + bool isip4, isip6, isudp, istcp; + static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = { + VIRTIO_NET_HASH_REPORT_IPv4, + VIRTIO_NET_HASH_REPORT_TCPv4, + VIRTIO_NET_HASH_REPORT_TCPv6, + VIRTIO_NET_HASH_REPORT_IPv6, + VIRTIO_NET_HASH_REPORT_IPv6_EX, + VIRTIO_NET_HASH_REPORT_TCPv6_EX, + VIRTIO_NET_HASH_REPORT_UDPv4, + VIRTIO_NET_HASH_REPORT_UDPv6, + VIRTIO_NET_HASH_REPORT_UDPv6_EX + }; + + net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len, + size - n->host_hdr_len); + net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); + if (isip4 && (net_rx_pkt_get_ip4_info(pkt)->fragment)) { + istcp = isudp = false; + } + if (isip6 && (net_rx_pkt_get_ip6_info(pkt)->fragment)) { + istcp = isudp = false; + } + net_hash_type = virtio_net_get_hash_type(isip4, isip6, isudp, istcp, + n->rss_data.hash_types); + if (net_hash_type > NetPktRssIpV6UdpEx) { + if (n->rss_data.populate_hash) { + virtio_set_packet_hash(buf, VIRTIO_NET_HASH_REPORT_NONE, 0); + } + return n->rss_data.redirect ? n->rss_data.default_queue : -1; + } + + hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key); + + if (n->rss_data.populate_hash) { + virtio_set_packet_hash(buf, reports[net_hash_type], hash); + } + + if (n->rss_data.redirect) { + new_index = hash & (n->rss_data.indirections_len - 1); + new_index = n->rss_data.indirections_table[new_index]; + } + + return (index == new_index) ? -1 : new_index; +} + +static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, + size_t size, bool no_rss) +{ + VirtIONet *n = qemu_get_nic_opaque(nc); + VirtIONetQueue *q = virtio_net_get_subqueue(nc); + VirtIODevice *vdev = VIRTIO_DEVICE(n); + VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE]; + size_t lens[VIRTQUEUE_MAX_SIZE]; + struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; + struct virtio_net_hdr_mrg_rxbuf mhdr; + unsigned mhdr_cnt = 0; + size_t offset, i, guest_offset, j; + ssize_t err; + + if (!virtio_net_can_receive(nc)) { + return -1; + } + + if (!no_rss && n->rss_data.enabled && n->rss_data.enabled_software_rss) { + int index = virtio_net_process_rss(nc, buf, size); + if (index >= 0) { + NetClientState *nc2 = qemu_get_subqueue(n->nic, index); + return virtio_net_receive_rcu(nc2, buf, size, true); + } + } + + /* hdr_len refers to the header we supply to the guest */ + if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { + return 0; + } + + if (!receive_filter(n, buf, size)) + return size; + + offset = i = 0; + + while (offset < size) { + VirtQueueElement *elem; + int len, total; + const struct iovec *sg; + + total = 0; + + if (i == VIRTQUEUE_MAX_SIZE) { + virtio_error(vdev, "virtio-net unexpected long buffer chain"); + err = size; + goto err; + } + + elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement)); + if (!elem) { + if (i) { + virtio_error(vdev, "virtio-net unexpected empty queue: " + "i %zd mergeable %d offset %zd, size %zd, " + "guest hdr len %zd, host hdr len %zd " + "guest features 0x%" PRIx64, + i, n->mergeable_rx_bufs, offset, size, + n->guest_hdr_len, n->host_hdr_len, + vdev->guest_features); + } + err = -1; + goto err; + } + + if (elem->in_num < 1) { + virtio_error(vdev, + "virtio-net receive queue contains no in buffers"); + virtqueue_detach_element(q->rx_vq, elem, 0); + g_free(elem); + err = -1; + goto err; + } + + sg = elem->in_sg; + if (i == 0) { + assert(offset == 0); + if (n->mergeable_rx_bufs) { + mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), + sg, elem->in_num, + offsetof(typeof(mhdr), num_buffers), + sizeof(mhdr.num_buffers)); + } + + receive_header(n, sg, elem->in_num, buf, size); + if (n->rss_data.populate_hash) { + offset = sizeof(mhdr); + iov_from_buf(sg, elem->in_num, offset, + buf + offset, n->host_hdr_len - sizeof(mhdr)); + } + offset = n->host_hdr_len; + total += n->guest_hdr_len; + guest_offset = n->guest_hdr_len; + } else { + guest_offset = 0; + } + + /* copy in packet. ugh */ + len = iov_from_buf(sg, elem->in_num, guest_offset, + buf + offset, size - offset); + total += len; + offset += len; + /* If buffers can't be merged, at this point we + * must have consumed the complete packet. + * Otherwise, drop it. */ + if (!n->mergeable_rx_bufs && offset < size) { + virtqueue_unpop(q->rx_vq, elem, total); + g_free(elem); + err = size; + goto err; + } + + elems[i] = elem; + lens[i] = total; + i++; + } + + if (mhdr_cnt) { + virtio_stw_p(vdev, &mhdr.num_buffers, i); + iov_from_buf(mhdr_sg, mhdr_cnt, + 0, + &mhdr.num_buffers, sizeof mhdr.num_buffers); + } + + for (j = 0; j < i; j++) { + /* signal other side */ + virtqueue_fill(q->rx_vq, elems[j], lens[j], j); + g_free(elems[j]); + } + + virtqueue_flush(q->rx_vq, i); + virtio_notify(vdev, q->rx_vq); + + return size; + +err: + for (j = 0; j < i; j++) { + g_free(elems[j]); + } + + return err; +} + +static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + RCU_READ_LOCK_GUARD(); + + return virtio_net_receive_rcu(nc, buf, size, false); +} + +static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain, + const uint8_t *buf, + VirtioNetRscUnit *unit) +{ + uint16_t ip_hdrlen; + struct ip_header *ip; + + ip = (struct ip_header *)(buf + chain->n->guest_hdr_len + + sizeof(struct eth_header)); + unit->ip = (void *)ip; + ip_hdrlen = (ip->ip_ver_len & 0xF) << 2; + unit->ip_plen = &ip->ip_len; + unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen); + unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10; + unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen; +} + +static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain, + const uint8_t *buf, + VirtioNetRscUnit *unit) +{ + struct ip6_header *ip6; + + ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len + + sizeof(struct eth_header)); + unit->ip = ip6; + unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen); + unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + + sizeof(struct ip6_header)); + unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10; + + /* There is a difference between payload lenght in ipv4 and v6, + ip header is excluded in ipv6 */ + unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen; +} + +static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain, + VirtioNetRscSeg *seg) +{ + int ret; + struct virtio_net_hdr_v1 *h; + + h = (struct virtio_net_hdr_v1 *)seg->buf; + h->flags = 0; + h->gso_type = VIRTIO_NET_HDR_GSO_NONE; + + if (seg->is_coalesced) { + h->rsc.segments = seg->packets; + h->rsc.dup_acks = seg->dup_ack; + h->flags = VIRTIO_NET_HDR_F_RSC_INFO; + if (chain->proto == ETH_P_IP) { + h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; + } else { + h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + } + } + + ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size); + QTAILQ_REMOVE(&chain->buffers, seg, next); + g_free(seg->buf); + g_free(seg); + + return ret; +} + +static void virtio_net_rsc_purge(void *opq) +{ + VirtioNetRscSeg *seg, *rn; + VirtioNetRscChain *chain = (VirtioNetRscChain *)opq; + + QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) { + if (virtio_net_rsc_drain_seg(chain, seg) == 0) { + chain->stat.purge_failed++; + continue; + } + } + + chain->stat.timer++; + if (!QTAILQ_EMPTY(&chain->buffers)) { + timer_mod(chain->drain_timer, + qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout); + } +} + +static void virtio_net_rsc_cleanup(VirtIONet *n) +{ + VirtioNetRscChain *chain, *rn_chain; + VirtioNetRscSeg *seg, *rn_seg; + + QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) { + QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) { + QTAILQ_REMOVE(&chain->buffers, seg, next); + g_free(seg->buf); + g_free(seg); + } + + timer_free(chain->drain_timer); + QTAILQ_REMOVE(&n->rsc_chains, chain, next); + g_free(chain); + } +} + +static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain, + NetClientState *nc, + const uint8_t *buf, size_t size) +{ + uint16_t hdr_len; + VirtioNetRscSeg *seg; + + hdr_len = chain->n->guest_hdr_len; + seg = g_malloc(sizeof(VirtioNetRscSeg)); + seg->buf = g_malloc(hdr_len + sizeof(struct eth_header) + + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD); + memcpy(seg->buf, buf, size); + seg->size = size; + seg->packets = 1; + seg->dup_ack = 0; + seg->is_coalesced = 0; + seg->nc = nc; + + QTAILQ_INSERT_TAIL(&chain->buffers, seg, next); + chain->stat.cache++; + + switch (chain->proto) { + case ETH_P_IP: + virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit); + break; + case ETH_P_IPV6: + virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit); + break; + default: + g_assert_not_reached(); + } +} + +static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain, + VirtioNetRscSeg *seg, + const uint8_t *buf, + struct tcp_header *n_tcp, + struct tcp_header *o_tcp) +{ + uint32_t nack, oack; + uint16_t nwin, owin; + + nack = htonl(n_tcp->th_ack); + nwin = htons(n_tcp->th_win); + oack = htonl(o_tcp->th_ack); + owin = htons(o_tcp->th_win); + + if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) { + chain->stat.ack_out_of_win++; + return RSC_FINAL; + } else if (nack == oack) { + /* duplicated ack or window probe */ + if (nwin == owin) { + /* duplicated ack, add dup ack count due to whql test up to 1 */ + chain->stat.dup_ack++; + return RSC_FINAL; + } else { + /* Coalesce window update */ + o_tcp->th_win = n_tcp->th_win; + chain->stat.win_update++; + return RSC_COALESCE; + } + } else { + /* pure ack, go to 'C', finalize*/ + chain->stat.pure_ack++; + return RSC_FINAL; + } +} + +static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain, + VirtioNetRscSeg *seg, + const uint8_t *buf, + VirtioNetRscUnit *n_unit) +{ + void *data; + uint16_t o_ip_len; + uint32_t nseq, oseq; + VirtioNetRscUnit *o_unit; + + o_unit = &seg->unit; + o_ip_len = htons(*o_unit->ip_plen); + nseq = htonl(n_unit->tcp->th_seq); + oseq = htonl(o_unit->tcp->th_seq); + + /* out of order or retransmitted. */ + if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) { + chain->stat.data_out_of_win++; + return RSC_FINAL; + } + + data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen; + if (nseq == oseq) { + if ((o_unit->payload == 0) && n_unit->payload) { + /* From no payload to payload, normal case, not a dup ack or etc */ + chain->stat.data_after_pure_ack++; + goto coalesce; + } else { + return virtio_net_rsc_handle_ack(chain, seg, buf, + n_unit->tcp, o_unit->tcp); + } + } else if ((nseq - oseq) != o_unit->payload) { + /* Not a consistent packet, out of order */ + chain->stat.data_out_of_order++; + return RSC_FINAL; + } else { +coalesce: + if ((o_ip_len + n_unit->payload) > chain->max_payload) { + chain->stat.over_size++; + return RSC_FINAL; + } + + /* Here comes the right data, the payload length in v4/v6 is different, + so use the field value to update and record the new data len */ + o_unit->payload += n_unit->payload; /* update new data len */ + + /* update field in ip header */ + *o_unit->ip_plen = htons(o_ip_len + n_unit->payload); + + /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced + for windows guest, while this may change the behavior for linux + guest (only if it uses RSC feature). */ + o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags; + + o_unit->tcp->th_ack = n_unit->tcp->th_ack; + o_unit->tcp->th_win = n_unit->tcp->th_win; + + memmove(seg->buf + seg->size, data, n_unit->payload); + seg->size += n_unit->payload; + seg->packets++; + chain->stat.coalesced++; + return RSC_COALESCE; + } +} + +static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain, + VirtioNetRscSeg *seg, + const uint8_t *buf, size_t size, + VirtioNetRscUnit *unit) +{ + struct ip_header *ip1, *ip2; + + ip1 = (struct ip_header *)(unit->ip); + ip2 = (struct ip_header *)(seg->unit.ip); + if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst) + || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport) + || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) { + chain->stat.no_match++; + return RSC_NO_MATCH; + } + + return virtio_net_rsc_coalesce_data(chain, seg, buf, unit); +} + +static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain, + VirtioNetRscSeg *seg, + const uint8_t *buf, size_t size, + VirtioNetRscUnit *unit) +{ + struct ip6_header *ip1, *ip2; + + ip1 = (struct ip6_header *)(unit->ip); + ip2 = (struct ip6_header *)(seg->unit.ip); + if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address)) + || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address)) + || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport) + || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) { + chain->stat.no_match++; + return RSC_NO_MATCH; + } + + return virtio_net_rsc_coalesce_data(chain, seg, buf, unit); +} + +/* Packets with 'SYN' should bypass, other flag should be sent after drain + * to prevent out of order */ +static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain, + struct tcp_header *tcp) +{ + uint16_t tcp_hdr; + uint16_t tcp_flag; + + tcp_flag = htons(tcp->th_offset_flags); + tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10; + tcp_flag &= VIRTIO_NET_TCP_FLAG; + if (tcp_flag & TH_SYN) { + chain->stat.tcp_syn++; + return RSC_BYPASS; + } + + if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) { + chain->stat.tcp_ctrl_drain++; + return RSC_FINAL; + } + + if (tcp_hdr > sizeof(struct tcp_header)) { + chain->stat.tcp_all_opt++; + return RSC_FINAL; + } + + return RSC_CANDIDATE; +} + +static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain, + NetClientState *nc, + const uint8_t *buf, size_t size, + VirtioNetRscUnit *unit) +{ + int ret; + VirtioNetRscSeg *seg, *nseg; + + if (QTAILQ_EMPTY(&chain->buffers)) { + chain->stat.empty_cache++; + virtio_net_rsc_cache_buf(chain, nc, buf, size); + timer_mod(chain->drain_timer, + qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout); + return size; + } + + QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) { + if (chain->proto == ETH_P_IP) { + ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit); + } else { + ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit); + } + + if (ret == RSC_FINAL) { + if (virtio_net_rsc_drain_seg(chain, seg) == 0) { + /* Send failed */ + chain->stat.final_failed++; + return 0; + } + + /* Send current packet */ + return virtio_net_do_receive(nc, buf, size); + } else if (ret == RSC_NO_MATCH) { + continue; + } else { + /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */ + seg->is_coalesced = 1; + return size; + } + } + + chain->stat.no_match_cache++; + virtio_net_rsc_cache_buf(chain, nc, buf, size); + return size; +} + +/* Drain a connection data, this is to avoid out of order segments */ +static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain, + NetClientState *nc, + const uint8_t *buf, size_t size, + uint16_t ip_start, uint16_t ip_size, + uint16_t tcp_port) +{ + VirtioNetRscSeg *seg, *nseg; + uint32_t ppair1, ppair2; + + ppair1 = *(uint32_t *)(buf + tcp_port); + QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) { + ppair2 = *(uint32_t *)(seg->buf + tcp_port); + if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size) + || (ppair1 != ppair2)) { + continue; + } + if (virtio_net_rsc_drain_seg(chain, seg) == 0) { + chain->stat.drain_failed++; + } + + break; + } + + return virtio_net_do_receive(nc, buf, size); +} + +static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain, + struct ip_header *ip, + const uint8_t *buf, size_t size) +{ + uint16_t ip_len; + + /* Not an ipv4 packet */ + if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) { + chain->stat.ip_option++; + return RSC_BYPASS; + } + + /* Don't handle packets with ip option */ + if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) { + chain->stat.ip_option++; + return RSC_BYPASS; + } + + if (ip->ip_p != IPPROTO_TCP) { + chain->stat.bypass_not_tcp++; + return RSC_BYPASS; + } + + /* Don't handle packets with ip fragment */ + if (!(htons(ip->ip_off) & IP_DF)) { + chain->stat.ip_frag++; + return RSC_BYPASS; + } + + /* Don't handle packets with ecn flag */ + if (IPTOS_ECN(ip->ip_tos)) { + chain->stat.ip_ecn++; + return RSC_BYPASS; + } + + ip_len = htons(ip->ip_len); + if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header)) + || ip_len > (size - chain->n->guest_hdr_len - + sizeof(struct eth_header))) { + chain->stat.ip_hacked++; + return RSC_BYPASS; + } + + return RSC_CANDIDATE; +} + +static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain, + NetClientState *nc, + const uint8_t *buf, size_t size) +{ + int32_t ret; + uint16_t hdr_len; + VirtioNetRscUnit unit; + + hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len; + + if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header) + + sizeof(struct tcp_header))) { + chain->stat.bypass_not_tcp++; + return virtio_net_do_receive(nc, buf, size); + } + + virtio_net_rsc_extract_unit4(chain, buf, &unit); + if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size) + != RSC_CANDIDATE) { + return virtio_net_do_receive(nc, buf, size); + } + + ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp); + if (ret == RSC_BYPASS) { + return virtio_net_do_receive(nc, buf, size); + } else if (ret == RSC_FINAL) { + return virtio_net_rsc_drain_flow(chain, nc, buf, size, + ((hdr_len + sizeof(struct eth_header)) + 12), + VIRTIO_NET_IP4_ADDR_SIZE, + hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)); + } + + return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit); +} + +static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain, + struct ip6_header *ip6, + const uint8_t *buf, size_t size) +{ + uint16_t ip_len; + + if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4) + != IP_HEADER_VERSION_6) { + return RSC_BYPASS; + } + + /* Both option and protocol is checked in this */ + if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) { + chain->stat.bypass_not_tcp++; + return RSC_BYPASS; + } + + ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen); + if (ip_len < sizeof(struct tcp_header) || + ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header) + - sizeof(struct ip6_header))) { + chain->stat.ip_hacked++; + return RSC_BYPASS; + } + + /* Don't handle packets with ecn flag */ + if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) { + chain->stat.ip_ecn++; + return RSC_BYPASS; + } + + return RSC_CANDIDATE; +} + +static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc, + const uint8_t *buf, size_t size) +{ + int32_t ret; + uint16_t hdr_len; + VirtioNetRscChain *chain; + VirtioNetRscUnit unit; + + chain = (VirtioNetRscChain *)opq; + hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len; + + if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header) + + sizeof(tcp_header))) { + return virtio_net_do_receive(nc, buf, size); + } + + virtio_net_rsc_extract_unit6(chain, buf, &unit); + if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain, + unit.ip, buf, size)) { + return virtio_net_do_receive(nc, buf, size); + } + + ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp); + if (ret == RSC_BYPASS) { + return virtio_net_do_receive(nc, buf, size); + } else if (ret == RSC_FINAL) { + return virtio_net_rsc_drain_flow(chain, nc, buf, size, + ((hdr_len + sizeof(struct eth_header)) + 8), + VIRTIO_NET_IP6_ADDR_SIZE, + hdr_len + sizeof(struct eth_header) + + sizeof(struct ip6_header)); + } + + return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit); +} + +static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n, + NetClientState *nc, + uint16_t proto) +{ + VirtioNetRscChain *chain; + + if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) { + return NULL; + } + + QTAILQ_FOREACH(chain, &n->rsc_chains, next) { + if (chain->proto == proto) { + return chain; + } + } + + chain = g_malloc(sizeof(*chain)); + chain->n = n; + chain->proto = proto; + if (proto == (uint16_t)ETH_P_IP) { + chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD; + chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; + } else { + chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD; + chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + } + chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST, + virtio_net_rsc_purge, chain); + memset(&chain->stat, 0, sizeof(chain->stat)); + + QTAILQ_INIT(&chain->buffers); + QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next); + + return chain; +} + +static ssize_t virtio_net_rsc_receive(NetClientState *nc, + const uint8_t *buf, + size_t size) +{ + uint16_t proto; + VirtioNetRscChain *chain; + struct eth_header *eth; + VirtIONet *n; + + n = qemu_get_nic_opaque(nc); + if (size < (n->host_hdr_len + sizeof(struct eth_header))) { + return virtio_net_do_receive(nc, buf, size); + } + + eth = (struct eth_header *)(buf + n->guest_hdr_len); + proto = htons(eth->h_proto); + + chain = virtio_net_rsc_lookup_chain(n, nc, proto); + if (chain) { + chain->stat.received++; + if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) { + return virtio_net_rsc_receive4(chain, nc, buf, size); + } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) { + return virtio_net_rsc_receive6(chain, nc, buf, size); + } + } + return virtio_net_do_receive(nc, buf, size); +} + +static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, + size_t size) +{ + VirtIONet *n = qemu_get_nic_opaque(nc); + if ((n->rsc4_enabled || n->rsc6_enabled)) { + return virtio_net_rsc_receive(nc, buf, size); + } else { + return virtio_net_do_receive(nc, buf, size); + } +} + +static int32_t virtio_net_flush_tx(VirtIONetQueue *q); + +static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) +{ + VirtIONet *n = qemu_get_nic_opaque(nc); + VirtIONetQueue *q = virtio_net_get_subqueue(nc); + VirtIODevice *vdev = VIRTIO_DEVICE(n); + + virtqueue_push(q->tx_vq, q->async_tx.elem, 0); + virtio_notify(vdev, q->tx_vq); + + g_free(q->async_tx.elem); + q->async_tx.elem = NULL; + + virtio_queue_set_notification(q->tx_vq, 1); + virtio_net_flush_tx(q); +} + +/* TX */ +static int32_t virtio_net_flush_tx(VirtIONetQueue *q) +{ + VirtIONet *n = q->n; + VirtIODevice *vdev = VIRTIO_DEVICE(n); + VirtQueueElement *elem; + int32_t num_packets = 0; + int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); + if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { + return num_packets; + } + + if (q->async_tx.elem) { + virtio_queue_set_notification(q->tx_vq, 0); + return num_packets; + } + + for (;;) { + ssize_t ret; + unsigned int out_num; + struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg; + struct virtio_net_hdr_mrg_rxbuf mhdr; + + elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + + out_num = elem->out_num; + out_sg = elem->out_sg; + if (out_num < 1) { + virtio_error(vdev, "virtio-net header not in first element"); + virtqueue_detach_element(q->tx_vq, elem, 0); + g_free(elem); + return -EINVAL; + } + + if (n->has_vnet_hdr) { + if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < + n->guest_hdr_len) { + virtio_error(vdev, "virtio-net header incorrect"); + virtqueue_detach_element(q->tx_vq, elem, 0); + g_free(elem); + return -EINVAL; + } + if (n->needs_vnet_hdr_swap) { + virtio_net_hdr_swap(vdev, (void *) &mhdr); + sg2[0].iov_base = &mhdr; + sg2[0].iov_len = n->guest_hdr_len; + out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, + out_sg, out_num, + n->guest_hdr_len, -1); + if (out_num == VIRTQUEUE_MAX_SIZE) { + goto drop; + } + out_num += 1; + out_sg = sg2; + } + } + /* + * If host wants to see the guest header as is, we can + * pass it on unchanged. Otherwise, copy just the parts + * that host is interested in. + */ + assert(n->host_hdr_len <= n->guest_hdr_len); + if (n->host_hdr_len != n->guest_hdr_len) { + unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), + out_sg, out_num, + 0, n->host_hdr_len); + sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, + out_sg, out_num, + n->guest_hdr_len, -1); + out_num = sg_num; + out_sg = sg; + } + + ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), + out_sg, out_num, virtio_net_tx_complete); + if (ret == 0) { + virtio_queue_set_notification(q->tx_vq, 0); + q->async_tx.elem = elem; + return -EBUSY; + } + +drop: + virtqueue_push(q->tx_vq, elem, 0); + virtio_notify(vdev, q->tx_vq); + g_free(elem); + + if (++num_packets >= n->tx_burst) { + break; + } + } + return num_packets; +} + +static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIONet *n = VIRTIO_NET(vdev); + VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; + + if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { + virtio_net_drop_tx_queue_data(vdev, vq); + return; + } + + /* This happens when device was stopped but VCPU wasn't. */ + if (!vdev->vm_running) { + q->tx_waiting = 1; + return; + } + + if (q->tx_waiting) { + virtio_queue_set_notification(vq, 1); + timer_del(q->tx_timer); + q->tx_waiting = 0; + if (virtio_net_flush_tx(q) == -EINVAL) { + return; + } + } else { + timer_mod(q->tx_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); + q->tx_waiting = 1; + virtio_queue_set_notification(vq, 0); + } +} + +static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIONet *n = VIRTIO_NET(vdev); + VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; + + if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { + virtio_net_drop_tx_queue_data(vdev, vq); + return; + } + + if (unlikely(q->tx_waiting)) { + return; + } + q->tx_waiting = 1; + /* This happens when device was stopped but VCPU wasn't. */ + if (!vdev->vm_running) { + return; + } + virtio_queue_set_notification(vq, 0); + qemu_bh_schedule(q->tx_bh); +} + +static void virtio_net_tx_timer(void *opaque) +{ + VirtIONetQueue *q = opaque; + VirtIONet *n = q->n; + VirtIODevice *vdev = VIRTIO_DEVICE(n); + /* This happens when device was stopped but BH wasn't. */ + if (!vdev->vm_running) { + /* Make sure tx waiting is set, so we'll run when restarted. */ + assert(q->tx_waiting); + return; + } + + q->tx_waiting = 0; + + /* Just in case the driver is not ready on more */ + if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { + return; + } + + virtio_queue_set_notification(q->tx_vq, 1); + virtio_net_flush_tx(q); +} + +static void virtio_net_tx_bh(void *opaque) +{ + VirtIONetQueue *q = opaque; + VirtIONet *n = q->n; + VirtIODevice *vdev = VIRTIO_DEVICE(n); + int32_t ret; + + /* This happens when device was stopped but BH wasn't. */ + if (!vdev->vm_running) { + /* Make sure tx waiting is set, so we'll run when restarted. */ + assert(q->tx_waiting); + return; + } + + q->tx_waiting = 0; + + /* Just in case the driver is not ready on more */ + if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { + return; + } + + ret = virtio_net_flush_tx(q); + if (ret == -EBUSY || ret == -EINVAL) { + return; /* Notification re-enable handled by tx_complete or device + * broken */ + } + + /* If we flush a full burst of packets, assume there are + * more coming and immediately reschedule */ + if (ret >= n->tx_burst) { + qemu_bh_schedule(q->tx_bh); + q->tx_waiting = 1; + return; + } + + /* If less than a full burst, re-enable notification and flush + * anything that may have come in while we weren't looking. If + * we find something, assume the guest is still active and reschedule */ + virtio_queue_set_notification(q->tx_vq, 1); + ret = virtio_net_flush_tx(q); + if (ret == -EINVAL) { + return; + } else if (ret > 0) { + virtio_queue_set_notification(q->tx_vq, 0); + qemu_bh_schedule(q->tx_bh); + q->tx_waiting = 1; + } +} + +static void virtio_net_add_queue(VirtIONet *n, int index) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + + n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size, + virtio_net_handle_rx); + + if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { + n->vqs[index].tx_vq = + virtio_add_queue(vdev, n->net_conf.tx_queue_size, + virtio_net_handle_tx_timer); + n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + virtio_net_tx_timer, + &n->vqs[index]); + } else { + n->vqs[index].tx_vq = + virtio_add_queue(vdev, n->net_conf.tx_queue_size, + virtio_net_handle_tx_bh); + n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); + } + + n->vqs[index].tx_waiting = 0; + n->vqs[index].n = n; +} + +static void virtio_net_del_queue(VirtIONet *n, int index) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + VirtIONetQueue *q = &n->vqs[index]; + NetClientState *nc = qemu_get_subqueue(n->nic, index); + + qemu_purge_queued_packets(nc); + + virtio_del_queue(vdev, index * 2); + if (q->tx_timer) { + timer_free(q->tx_timer); + q->tx_timer = NULL; + } else { + qemu_bh_delete(q->tx_bh); + q->tx_bh = NULL; + } + q->tx_waiting = 0; + virtio_del_queue(vdev, index * 2 + 1); +} + +static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + int old_num_queues = virtio_get_num_queues(vdev); + int new_num_queues = new_max_queue_pairs * 2 + 1; + int i; + + assert(old_num_queues >= 3); + assert(old_num_queues % 2 == 1); + + if (old_num_queues == new_num_queues) { + return; + } + + /* + * We always need to remove and add ctrl vq if + * old_num_queues != new_num_queues. Remove ctrl_vq first, + * and then we only enter one of the following two loops. + */ + virtio_del_queue(vdev, old_num_queues - 1); + + for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) { + /* new_num_queues < old_num_queues */ + virtio_net_del_queue(n, i / 2); + } + + for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) { + /* new_num_queues > old_num_queues */ + virtio_net_add_queue(n, i / 2); + } + + /* add ctrl_vq last */ + n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); +} + +static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) +{ + int max = multiqueue ? n->max_queue_pairs : 1; + + n->multiqueue = multiqueue; + virtio_net_change_num_queue_pairs(n, max); + + virtio_net_set_queue_pairs(n); +} + +static int virtio_net_post_load_device(void *opaque, int version_id) +{ + VirtIONet *n = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(n); + int i, link_down; + + trace_virtio_net_post_load_device(); + virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs, + virtio_vdev_has_feature(vdev, + VIRTIO_F_VERSION_1), + virtio_vdev_has_feature(vdev, + VIRTIO_NET_F_HASH_REPORT)); + + /* MAC_TABLE_ENTRIES may be different from the saved image */ + if (n->mac_table.in_use > MAC_TABLE_ENTRIES) { + n->mac_table.in_use = 0; + } + + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { + n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); + } + + /* + * curr_guest_offloads will be later overwritten by the + * virtio_set_features_nocheck call done from the virtio_load. + * Here we make sure it is preserved and restored accordingly + * in the virtio_net_post_load_virtio callback. + */ + n->saved_guest_offloads = n->curr_guest_offloads; + + virtio_net_set_queue_pairs(n); + + /* Find the first multicast entry in the saved MAC filter */ + for (i = 0; i < n->mac_table.in_use; i++) { + if (n->mac_table.macs[i * ETH_ALEN] & 1) { + break; + } + } + n->mac_table.first_multi = i; + + /* nc.link_down can't be migrated, so infer link_down according + * to link status bit in n->status */ + link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; + for (i = 0; i < n->max_queue_pairs; i++) { + qemu_get_subqueue(n->nic, i)->link_down = link_down; + } + + if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) && + virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { + qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(), + QEMU_CLOCK_VIRTUAL, + virtio_net_announce_timer, n); + if (n->announce_timer.round) { + timer_mod(n->announce_timer.tm, + qemu_clock_get_ms(n->announce_timer.type)); + } else { + qemu_announce_timer_del(&n->announce_timer, false); + } + } + + if (n->rss_data.enabled) { + n->rss_data.enabled_software_rss = n->rss_data.populate_hash; + if (!n->rss_data.populate_hash) { + if (!virtio_net_attach_epbf_rss(n)) { + if (get_vhost_net(qemu_get_queue(n->nic)->peer)) { + warn_report("Can't post-load eBPF RSS for vhost"); + } else { + warn_report("Can't post-load eBPF RSS - " + "fallback to software RSS"); + n->rss_data.enabled_software_rss = true; + } + } + } + + trace_virtio_net_rss_enable(n->rss_data.hash_types, + n->rss_data.indirections_len, + sizeof(n->rss_data.key)); + } else { + trace_virtio_net_rss_disable(); + } + return 0; +} + +static int virtio_net_post_load_virtio(VirtIODevice *vdev) +{ + VirtIONet *n = VIRTIO_NET(vdev); + /* + * The actual needed state is now in saved_guest_offloads, + * see virtio_net_post_load_device for detail. + * Restore it back and apply the desired offloads. + */ + n->curr_guest_offloads = n->saved_guest_offloads; + if (peer_has_vnet_hdr(n)) { + virtio_net_apply_guest_offloads(n); + } + + return 0; +} + +/* tx_waiting field of a VirtIONetQueue */ +static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { + .name = "virtio-net-queue-tx_waiting", + .fields = (VMStateField[]) { + VMSTATE_UINT32(tx_waiting, VirtIONetQueue), + VMSTATE_END_OF_LIST() + }, +}; + +static bool max_queue_pairs_gt_1(void *opaque, int version_id) +{ + return VIRTIO_NET(opaque)->max_queue_pairs > 1; +} + +static bool has_ctrl_guest_offloads(void *opaque, int version_id) +{ + return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque), + VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); +} + +static bool mac_table_fits(void *opaque, int version_id) +{ + return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES; +} + +static bool mac_table_doesnt_fit(void *opaque, int version_id) +{ + return !mac_table_fits(opaque, version_id); +} + +/* This temporary type is shared by all the WITH_TMP methods + * although only some fields are used by each. + */ +struct VirtIONetMigTmp { + VirtIONet *parent; + VirtIONetQueue *vqs_1; + uint16_t curr_queue_pairs_1; + uint8_t has_ufo; + uint32_t has_vnet_hdr; +}; + +/* The 2nd and subsequent tx_waiting flags are loaded later than + * the 1st entry in the queue_pairs and only if there's more than one + * entry. We use the tmp mechanism to calculate a temporary + * pointer and count and also validate the count. + */ + +static int virtio_net_tx_waiting_pre_save(void *opaque) +{ + struct VirtIONetMigTmp *tmp = opaque; + + tmp->vqs_1 = tmp->parent->vqs + 1; + tmp->curr_queue_pairs_1 = tmp->parent->curr_queue_pairs - 1; + if (tmp->parent->curr_queue_pairs == 0) { + tmp->curr_queue_pairs_1 = 0; + } + + return 0; +} + +static int virtio_net_tx_waiting_pre_load(void *opaque) +{ + struct VirtIONetMigTmp *tmp = opaque; + + /* Reuse the pointer setup from save */ + virtio_net_tx_waiting_pre_save(opaque); + + if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) { + error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x", + tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs); + + return -EINVAL; + } + + return 0; /* all good */ +} + +static const VMStateDescription vmstate_virtio_net_tx_waiting = { + .name = "virtio-net-tx_waiting", + .pre_load = virtio_net_tx_waiting_pre_load, + .pre_save = virtio_net_tx_waiting_pre_save, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, + curr_queue_pairs_1, + vmstate_virtio_net_queue_tx_waiting, + struct VirtIONetQueue), + VMSTATE_END_OF_LIST() + }, +}; + +/* the 'has_ufo' flag is just tested; if the incoming stream has the + * flag set we need to check that we have it + */ +static int virtio_net_ufo_post_load(void *opaque, int version_id) +{ + struct VirtIONetMigTmp *tmp = opaque; + + if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) { + error_report("virtio-net: saved image requires TUN_F_UFO support"); + return -EINVAL; + } + + return 0; +} + +static int virtio_net_ufo_pre_save(void *opaque) +{ + struct VirtIONetMigTmp *tmp = opaque; + + tmp->has_ufo = tmp->parent->has_ufo; + + return 0; +} + +static const VMStateDescription vmstate_virtio_net_has_ufo = { + .name = "virtio-net-ufo", + .post_load = virtio_net_ufo_post_load, + .pre_save = virtio_net_ufo_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp), + VMSTATE_END_OF_LIST() + }, +}; + +/* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the + * flag set we need to check that we have it + */ +static int virtio_net_vnet_post_load(void *opaque, int version_id) +{ + struct VirtIONetMigTmp *tmp = opaque; + + if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) { + error_report("virtio-net: saved image requires vnet_hdr=on"); + return -EINVAL; + } + + return 0; +} + +static int virtio_net_vnet_pre_save(void *opaque) +{ + struct VirtIONetMigTmp *tmp = opaque; + + tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr; + + return 0; +} + +static const VMStateDescription vmstate_virtio_net_has_vnet = { + .name = "virtio-net-vnet", + .post_load = virtio_net_vnet_post_load, + .pre_save = virtio_net_vnet_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp), + VMSTATE_END_OF_LIST() + }, +}; + +static bool virtio_net_rss_needed(void *opaque) +{ + return VIRTIO_NET(opaque)->rss_data.enabled; +} + +static const VMStateDescription vmstate_virtio_net_rss = { + .name = "virtio-net-device/rss", + .version_id = 1, + .minimum_version_id = 1, + .needed = virtio_net_rss_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(rss_data.enabled, VirtIONet), + VMSTATE_BOOL(rss_data.redirect, VirtIONet), + VMSTATE_BOOL(rss_data.populate_hash, VirtIONet), + VMSTATE_UINT32(rss_data.hash_types, VirtIONet), + VMSTATE_UINT16(rss_data.indirections_len, VirtIONet), + VMSTATE_UINT16(rss_data.default_queue, VirtIONet), + VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet, + VIRTIO_NET_RSS_MAX_KEY_SIZE), + VMSTATE_VARRAY_UINT16_ALLOC(rss_data.indirections_table, VirtIONet, + rss_data.indirections_len, 0, + vmstate_info_uint16, uint16_t), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_virtio_net_device = { + .name = "virtio-net-device", + .version_id = VIRTIO_NET_VM_VERSION, + .minimum_version_id = VIRTIO_NET_VM_VERSION, + .post_load = virtio_net_post_load_device, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN), + VMSTATE_STRUCT_POINTER(vqs, VirtIONet, + vmstate_virtio_net_queue_tx_waiting, + VirtIONetQueue), + VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet), + VMSTATE_UINT16(status, VirtIONet), + VMSTATE_UINT8(promisc, VirtIONet), + VMSTATE_UINT8(allmulti, VirtIONet), + VMSTATE_UINT32(mac_table.in_use, VirtIONet), + + /* Guarded pair: If it fits we load it, else we throw it away + * - can happen if source has a larger MAC table.; post-load + * sets flags in this case. + */ + VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet, + 0, mac_table_fits, mac_table.in_use, + ETH_ALEN), + VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0, + mac_table.in_use, ETH_ALEN), + + /* Note: This is an array of uint32's that's always been saved as a + * buffer; hold onto your endiannesses; it's actually used as a bitmap + * but based on the uint. + */ + VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3), + VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, + vmstate_virtio_net_has_vnet), + VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet), + VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet), + VMSTATE_UINT8(alluni, VirtIONet), + VMSTATE_UINT8(nomulti, VirtIONet), + VMSTATE_UINT8(nouni, VirtIONet), + VMSTATE_UINT8(nobcast, VirtIONet), + VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, + vmstate_virtio_net_has_ufo), + VMSTATE_SINGLE_TEST(max_queue_pairs, VirtIONet, max_queue_pairs_gt_1, 0, + vmstate_info_uint16_equal, uint16_t), + VMSTATE_UINT16_TEST(curr_queue_pairs, VirtIONet, max_queue_pairs_gt_1), + VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, + vmstate_virtio_net_tx_waiting), + VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, + has_ctrl_guest_offloads), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_virtio_net_rss, + NULL + } +}; + +static NetClientInfo net_virtio_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = virtio_net_can_receive, + .receive = virtio_net_receive, + .link_status_changed = virtio_net_set_link_status, + .query_rx_filter = virtio_net_query_rxfilter, + .announce = virtio_net_announce, +}; + +static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) +{ + VirtIONet *n = VIRTIO_NET(vdev); + NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); + assert(n->vhost_started); + return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); +} + +static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, + bool mask) +{ + VirtIONet *n = VIRTIO_NET(vdev); + NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); + assert(n->vhost_started); + vhost_net_virtqueue_mask(get_vhost_net(nc->peer), + vdev, idx, mask); +} + +static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) +{ + virtio_add_feature(&host_features, VIRTIO_NET_F_MAC); + + n->config_size = virtio_feature_get_config_size(feature_sizes, + host_features); +} + +void virtio_net_set_netclient_name(VirtIONet *n, const char *name, + const char *type) +{ + /* + * The name can be NULL, the netclient name will be type.x. + */ + assert(type != NULL); + + g_free(n->netclient_name); + g_free(n->netclient_type); + n->netclient_name = g_strdup(name); + n->netclient_type = g_strdup(type); +} + +static bool failover_unplug_primary(VirtIONet *n, DeviceState *dev) +{ + HotplugHandler *hotplug_ctrl; + PCIDevice *pci_dev; + Error *err = NULL; + + hotplug_ctrl = qdev_get_hotplug_handler(dev); + if (hotplug_ctrl) { + pci_dev = PCI_DEVICE(dev); + pci_dev->partially_hotplugged = true; + hotplug_handler_unplug_request(hotplug_ctrl, dev, &err); + if (err) { + error_report_err(err); + return false; + } + } else { + return false; + } + return true; +} + +static bool failover_replug_primary(VirtIONet *n, DeviceState *dev, + Error **errp) +{ + Error *err = NULL; + HotplugHandler *hotplug_ctrl; + PCIDevice *pdev = PCI_DEVICE(dev); + BusState *primary_bus; + + if (!pdev->partially_hotplugged) { + return true; + } + primary_bus = dev->parent_bus; + if (!primary_bus) { + error_setg(errp, "virtio_net: couldn't find primary bus"); + return false; + } + qdev_set_parent_bus(dev, primary_bus, &error_abort); + qatomic_set(&n->failover_primary_hidden, false); + hotplug_ctrl = qdev_get_hotplug_handler(dev); + if (hotplug_ctrl) { + hotplug_handler_pre_plug(hotplug_ctrl, dev, &err); + if (err) { + goto out; + } + hotplug_handler_plug(hotplug_ctrl, dev, &err); + } + pdev->partially_hotplugged = false; + +out: + error_propagate(errp, err); + return !err; +} + +static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s) +{ + bool should_be_hidden; + Error *err = NULL; + DeviceState *dev = failover_find_primary_device(n); + + if (!dev) { + return; + } + + should_be_hidden = qatomic_read(&n->failover_primary_hidden); + + if (migration_in_setup(s) && !should_be_hidden) { + if (failover_unplug_primary(n, dev)) { + vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev); + qapi_event_send_unplug_primary(dev->id); + qatomic_set(&n->failover_primary_hidden, true); + } else { + warn_report("couldn't unplug primary device"); + } + } else if (migration_has_failed(s)) { + /* We already unplugged the device let's plug it back */ + if (!failover_replug_primary(n, dev, &err)) { + if (err) { + error_report_err(err); + } + } + } +} + +static void virtio_net_migration_state_notifier(Notifier *notifier, void *data) +{ + MigrationState *s = data; + VirtIONet *n = container_of(notifier, VirtIONet, migration_state); + virtio_net_handle_migration_primary(n, s); +} + +static bool failover_hide_primary_device(DeviceListener *listener, + const QDict *device_opts, + bool from_json, + Error **errp) +{ + VirtIONet *n = container_of(listener, VirtIONet, primary_listener); + const char *standby_id; + + if (!device_opts) { + return false; + } + + if (!qdict_haskey(device_opts, "failover_pair_id")) { + return false; + } + + if (!qdict_haskey(device_opts, "id")) { + error_setg(errp, "Device with failover_pair_id needs to have id"); + return false; + } + + standby_id = qdict_get_str(device_opts, "failover_pair_id"); + if (g_strcmp0(standby_id, n->netclient_name) != 0) { + return false; + } + + /* + * The hide helper can be called several times for a given device. + * Check there is only one primary for a virtio-net device but + * don't duplicate the qdict several times if it's called for the same + * device. + */ + if (n->primary_opts) { + const char *old, *new; + /* devices with failover_pair_id always have an id */ + old = qdict_get_str(n->primary_opts, "id"); + new = qdict_get_str(device_opts, "id"); + if (strcmp(old, new) != 0) { + error_setg(errp, "Cannot attach more than one primary device to " + "'%s': '%s' and '%s'", n->netclient_name, old, new); + return false; + } + } else { + n->primary_opts = qdict_clone_shallow(device_opts); + n->primary_opts_from_json = from_json; + } + + /* failover_primary_hidden is set during feature negotiation */ + return qatomic_read(&n->failover_primary_hidden); +} + +static void virtio_net_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIONet *n = VIRTIO_NET(dev); + NetClientState *nc; + int i; + + if (n->net_conf.mtu) { + n->host_features |= (1ULL << VIRTIO_NET_F_MTU); + } + + if (n->net_conf.duplex_str) { + if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) { + n->net_conf.duplex = DUPLEX_HALF; + } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) { + n->net_conf.duplex = DUPLEX_FULL; + } else { + error_setg(errp, "'duplex' must be 'half' or 'full'"); + return; + } + n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX); + } else { + n->net_conf.duplex = DUPLEX_UNKNOWN; + } + + if (n->net_conf.speed < SPEED_UNKNOWN) { + error_setg(errp, "'speed' must be between 0 and INT_MAX"); + return; + } + if (n->net_conf.speed >= 0) { + n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX); + } + + if (n->failover) { + n->primary_listener.hide_device = failover_hide_primary_device; + qatomic_set(&n->failover_primary_hidden, true); + device_listener_register(&n->primary_listener); + n->migration_state.notify = virtio_net_migration_state_notifier; + add_migration_state_change_notifier(&n->migration_state); + n->host_features |= (1ULL << VIRTIO_NET_F_STANDBY); + } + + virtio_net_set_config_size(n, n->host_features); + virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); + + /* + * We set a lower limit on RX queue size to what it always was. + * Guests that want a smaller ring can always resize it without + * help from us (using virtio 1 and up). + */ + if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE || + n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE || + !is_power_of_2(n->net_conf.rx_queue_size)) { + error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), " + "must be a power of 2 between %d and %d.", + n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE, + VIRTQUEUE_MAX_SIZE); + virtio_cleanup(vdev); + return; + } + + if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE || + n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE || + !is_power_of_2(n->net_conf.tx_queue_size)) { + error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), " + "must be a power of 2 between %d and %d", + n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE, + VIRTQUEUE_MAX_SIZE); + virtio_cleanup(vdev); + return; + } + + n->max_ncs = MAX(n->nic_conf.peers.queues, 1); + + /* + * Figure out the datapath queue pairs since the backend could + * provide control queue via peers as well. + */ + if (n->nic_conf.peers.queues) { + for (i = 0; i < n->max_ncs; i++) { + if (n->nic_conf.peers.ncs[i]->is_datapath) { + ++n->max_queue_pairs; + } + } + } + n->max_queue_pairs = MAX(n->max_queue_pairs, 1); + + if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) { + error_setg(errp, "Invalid number of queue pairs (= %" PRIu32 "), " + "must be a positive integer less than %d.", + n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2); + virtio_cleanup(vdev); + return; + } + n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queue_pairs); + n->curr_queue_pairs = 1; + n->tx_timeout = n->net_conf.txtimer; + + if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") + && strcmp(n->net_conf.tx, "bh")) { + warn_report("virtio-net: " + "Unknown option tx=%s, valid options: \"timer\" \"bh\"", + n->net_conf.tx); + error_printf("Defaulting to \"bh\""); + } + + n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n), + n->net_conf.tx_queue_size); + + for (i = 0; i < n->max_queue_pairs; i++) { + virtio_net_add_queue(n, i); + } + + n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); + qemu_macaddr_default_if_unset(&n->nic_conf.macaddr); + memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac)); + n->status = VIRTIO_NET_S_LINK_UP; + qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(), + QEMU_CLOCK_VIRTUAL, + virtio_net_announce_timer, n); + n->announce_timer.round = 0; + + if (n->netclient_type) { + /* + * Happen when virtio_net_set_netclient_name has been called. + */ + n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, + n->netclient_type, n->netclient_name, n); + } else { + n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, + object_get_typename(OBJECT(dev)), dev->id, n); + } + + for (i = 0; i < n->max_queue_pairs; i++) { + n->nic->ncs[i].do_not_pad = true; + } + + peer_test_vnet_hdr(n); + if (peer_has_vnet_hdr(n)) { + for (i = 0; i < n->max_queue_pairs; i++) { + qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); + } + n->host_hdr_len = sizeof(struct virtio_net_hdr); + } else { + n->host_hdr_len = 0; + } + + qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a); + + n->vqs[0].tx_waiting = 0; + n->tx_burst = n->net_conf.txburst; + virtio_net_set_mrg_rx_bufs(n, 0, 0, 0); + n->promisc = 1; /* for compatibility */ + + n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); + + n->vlans = g_malloc0(MAX_VLAN >> 3); + + nc = qemu_get_queue(n->nic); + nc->rxfilter_notify_enabled = 1; + + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + struct virtio_net_config netcfg = {}; + memcpy(&netcfg.mac, &n->nic_conf.macaddr, ETH_ALEN); + vhost_net_set_config(get_vhost_net(nc->peer), + (uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_MASTER); + } + QTAILQ_INIT(&n->rsc_chains); + n->qdev = dev; + + net_rx_pkt_init(&n->rx_pkt, false); + + if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) { + virtio_net_load_ebpf(n); + } +} + +static void virtio_net_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIONet *n = VIRTIO_NET(dev); + int i, max_queue_pairs; + + if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) { + virtio_net_unload_ebpf(n); + } + + /* This will stop vhost backend if appropriate. */ + virtio_net_set_status(vdev, 0); + + g_free(n->netclient_name); + n->netclient_name = NULL; + g_free(n->netclient_type); + n->netclient_type = NULL; + + g_free(n->mac_table.macs); + g_free(n->vlans); + + if (n->failover) { + qobject_unref(n->primary_opts); + device_listener_unregister(&n->primary_listener); + remove_migration_state_change_notifier(&n->migration_state); + } else { + assert(n->primary_opts == NULL); + } + + max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; + for (i = 0; i < max_queue_pairs; i++) { + virtio_net_del_queue(n, i); + } + /* delete also control vq */ + virtio_del_queue(vdev, max_queue_pairs * 2); + qemu_announce_timer_del(&n->announce_timer, false); + g_free(n->vqs); + qemu_del_nic(n->nic); + virtio_net_rsc_cleanup(n); + g_free(n->rss_data.indirections_table); + net_rx_pkt_uninit(n->rx_pkt); + virtio_cleanup(vdev); +} + +static void virtio_net_instance_init(Object *obj) +{ + VirtIONet *n = VIRTIO_NET(obj); + + /* + * The default config_size is sizeof(struct virtio_net_config). + * Can be overriden with virtio_net_set_config_size. + */ + n->config_size = sizeof(struct virtio_net_config); + device_add_bootindex_property(obj, &n->nic_conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(n)); + + ebpf_rss_init(&n->ebpf_rss); +} + +static int virtio_net_pre_save(void *opaque) +{ + VirtIONet *n = opaque; + + /* At this point, backend must be stopped, otherwise + * it might keep writing to memory. */ + assert(!n->vhost_started); + + return 0; +} + +static bool primary_unplug_pending(void *opaque) +{ + DeviceState *dev = opaque; + DeviceState *primary; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIONet *n = VIRTIO_NET(vdev); + + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { + return false; + } + primary = failover_find_primary_device(n); + return primary ? primary->pending_deleted_event : false; +} + +static bool dev_unplug_pending(void *opaque) +{ + DeviceState *dev = opaque; + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); + + return vdc->primary_unplug_pending(dev); +} + +static const VMStateDescription vmstate_virtio_net = { + .name = "virtio-net", + .minimum_version_id = VIRTIO_NET_VM_VERSION, + .version_id = VIRTIO_NET_VM_VERSION, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, + .pre_save = virtio_net_pre_save, + .dev_unplug_pending = dev_unplug_pending, +}; + +static Property virtio_net_properties[] = { + DEFINE_PROP_BIT64("csum", VirtIONet, host_features, + VIRTIO_NET_F_CSUM, true), + DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features, + VIRTIO_NET_F_GUEST_CSUM, true), + DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true), + DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features, + VIRTIO_NET_F_GUEST_TSO4, true), + DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features, + VIRTIO_NET_F_GUEST_TSO6, true), + DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features, + VIRTIO_NET_F_GUEST_ECN, true), + DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features, + VIRTIO_NET_F_GUEST_UFO, true), + DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features, + VIRTIO_NET_F_GUEST_ANNOUNCE, true), + DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features, + VIRTIO_NET_F_HOST_TSO4, true), + DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features, + VIRTIO_NET_F_HOST_TSO6, true), + DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features, + VIRTIO_NET_F_HOST_ECN, true), + DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features, + VIRTIO_NET_F_HOST_UFO, true), + DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features, + VIRTIO_NET_F_MRG_RXBUF, true), + DEFINE_PROP_BIT64("status", VirtIONet, host_features, + VIRTIO_NET_F_STATUS, true), + DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features, + VIRTIO_NET_F_CTRL_VQ, true), + DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features, + VIRTIO_NET_F_CTRL_RX, true), + DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features, + VIRTIO_NET_F_CTRL_VLAN, true), + DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features, + VIRTIO_NET_F_CTRL_RX_EXTRA, true), + DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features, + VIRTIO_NET_F_CTRL_MAC_ADDR, true), + DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features, + VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true), + DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false), + DEFINE_PROP_BIT64("rss", VirtIONet, host_features, + VIRTIO_NET_F_RSS, false), + DEFINE_PROP_BIT64("hash", VirtIONet, host_features, + VIRTIO_NET_F_HASH_REPORT, false), + DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features, + VIRTIO_NET_F_RSC_EXT, false), + DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout, + VIRTIO_NET_RSC_DEFAULT_INTERVAL), + DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf), + DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer, + TX_TIMER_INTERVAL), + DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), + DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), + DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size, + VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE), + DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size, + VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE), + DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0), + DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend, + true), + DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN), + DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str), + DEFINE_PROP_BOOL("failover", VirtIONet, failover, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_net_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_net_properties); + dc->vmsd = &vmstate_virtio_net; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + vdc->realize = virtio_net_device_realize; + vdc->unrealize = virtio_net_device_unrealize; + vdc->get_config = virtio_net_get_config; + vdc->set_config = virtio_net_set_config; + vdc->get_features = virtio_net_get_features; + vdc->set_features = virtio_net_set_features; + vdc->bad_features = virtio_net_bad_features; + vdc->reset = virtio_net_reset; + vdc->set_status = virtio_net_set_status; + vdc->guest_notifier_mask = virtio_net_guest_notifier_mask; + vdc->guest_notifier_pending = virtio_net_guest_notifier_pending; + vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO); + vdc->post_load = virtio_net_post_load_virtio; + vdc->vmsd = &vmstate_virtio_net_device; + vdc->primary_unplug_pending = primary_unplug_pending; +} + +static const TypeInfo virtio_net_info = { + .name = TYPE_VIRTIO_NET, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIONet), + .instance_init = virtio_net_instance_init, + .class_init = virtio_net_class_init, +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_net_info); +} + +type_init(virtio_register_types) diff --git a/hw/net/vmware_utils.h b/hw/net/vmware_utils.h new file mode 100644 index 000000000..6b1e25159 --- /dev/null +++ b/hw/net/vmware_utils.h @@ -0,0 +1,153 @@ +/* + * QEMU VMWARE paravirtual devices - auxiliary code + * + * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Yan Vugenfirer + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef VMWARE_UTILS_H +#define VMWARE_UTILS_H + +#include "qemu/range.h" +#include "vmxnet_debug.h" + +/* + * Shared memory access functions with byte swap support + * Each function contains printout for reverse-engineering needs + * + */ +static inline void +vmw_shmem_read(PCIDevice *d, hwaddr addr, void *buf, int len) +{ + VMW_SHPRN("SHMEM r: %" PRIx64 ", len: %d to %p", addr, len, buf); + pci_dma_read(d, addr, buf, len); +} + +static inline void +vmw_shmem_write(PCIDevice *d, hwaddr addr, void *buf, int len) +{ + VMW_SHPRN("SHMEM w: %" PRIx64 ", len: %d to %p", addr, len, buf); + pci_dma_write(d, addr, buf, len); +} + +static inline void +vmw_shmem_rw(PCIDevice *d, hwaddr addr, void *buf, int len, int is_write) +{ + VMW_SHPRN("SHMEM r/w: %" PRIx64 ", len: %d (to %p), is write: %d", + addr, len, buf, is_write); + + if (is_write) + pci_dma_write(d, addr, buf, len); + else + pci_dma_read(d, addr, buf, len); +} + +static inline void +vmw_shmem_set(PCIDevice *d, hwaddr addr, uint8_t val, int len) +{ + int i; + VMW_SHPRN("SHMEM set: %" PRIx64 ", len: %d (value 0x%X)", addr, len, val); + + for (i = 0; i < len; i++) { + pci_dma_write(d, addr + i, &val, 1); + } +} + +static inline uint32_t +vmw_shmem_ld8(PCIDevice *d, hwaddr addr) +{ + uint8_t res; + pci_dma_read(d, addr, &res, 1); + VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res); + return res; +} + +static inline void +vmw_shmem_st8(PCIDevice *d, hwaddr addr, uint8_t value) +{ + VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value); + pci_dma_write(d, addr, &value, 1); +} + +static inline uint32_t +vmw_shmem_ld16(PCIDevice *d, hwaddr addr) +{ + uint16_t res; + pci_dma_read(d, addr, &res, 2); + res = le16_to_cpu(res); + VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res); + return res; +} + +static inline void +vmw_shmem_st16(PCIDevice *d, hwaddr addr, uint16_t value) +{ + VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value); + value = cpu_to_le16(value); + pci_dma_write(d, addr, &value, 2); +} + +static inline uint32_t +vmw_shmem_ld32(PCIDevice *d, hwaddr addr) +{ + uint32_t res; + pci_dma_read(d, addr, &res, 4); + res = le32_to_cpu(res); + VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res); + return res; +} + +static inline void +vmw_shmem_st32(PCIDevice *d, hwaddr addr, uint32_t value) +{ + VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value); + value = cpu_to_le32(value); + pci_dma_write(d, addr, &value, 4); +} + +static inline uint64_t +vmw_shmem_ld64(PCIDevice *d, hwaddr addr) +{ + uint64_t res; + pci_dma_read(d, addr, &res, 8); + res = le64_to_cpu(res); + VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res); + return res; +} + +static inline void +vmw_shmem_st64(PCIDevice *d, hwaddr addr, uint64_t value) +{ + VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value); + value = cpu_to_le64(value); + pci_dma_write(d, addr, &value, 8); +} + +/* Macros for simplification of operations on array-style registers */ + +/* + * Whether lies inside of array-style register defined by , + * number of elements () and element size () + * +*/ +#define VMW_IS_MULTIREG_ADDR(addr, base, cnt, regsize) \ + range_covers_byte(base, cnt * regsize, addr) + +/* + * Returns index of given register () in array-style register defined by + * and element size () + * +*/ +#define VMW_MULTIREG_IDX_BY_ADDR(addr, base, regsize) \ + (((addr) - (base)) / (regsize)) + +#endif diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c new file mode 100644 index 000000000..f65af4e9e --- /dev/null +++ b/hw/net/vmxnet3.c @@ -0,0 +1,2561 @@ +/* + * QEMU VMWARE VMXNET3 paravirtual NIC + * + * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Tamir Shomer + * Yan Vugenfirer + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "net/tap.h" +#include "net/checksum.h" +#include "sysemu/sysemu.h" +#include "qemu/bswap.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/pci/msix.h" +#include "hw/pci/msi.h" +#include "migration/register.h" +#include "migration/vmstate.h" + +#include "vmxnet3.h" +#include "vmxnet3_defs.h" +#include "vmxnet_debug.h" +#include "vmware_utils.h" +#include "net_tx_pkt.h" +#include "net_rx_pkt.h" +#include "qom/object.h" + +#define PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION 0x1 +#define VMXNET3_MSIX_BAR_SIZE 0x2000 +#define MIN_BUF_SIZE 60 + +/* Compatibility flags for migration */ +#define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT 0 +#define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS \ + (1 << VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT) +#define VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT 1 +#define VMXNET3_COMPAT_FLAG_DISABLE_PCIE \ + (1 << VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT) + +#define VMXNET3_EXP_EP_OFFSET (0x48) +#define VMXNET3_MSI_OFFSET(s) \ + ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0x50 : 0x84) +#define VMXNET3_MSIX_OFFSET(s) \ + ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0 : 0x9c) +#define VMXNET3_DSN_OFFSET (0x100) + +#define VMXNET3_BAR0_IDX (0) +#define VMXNET3_BAR1_IDX (1) +#define VMXNET3_MSIX_BAR_IDX (2) + +#define VMXNET3_OFF_MSIX_TABLE (0x000) +#define VMXNET3_OFF_MSIX_PBA(s) \ + ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0x800 : 0x1000) + +/* Link speed in Mbps should be shifted by 16 */ +#define VMXNET3_LINK_SPEED (1000 << 16) + +/* Link status: 1 - up, 0 - down. */ +#define VMXNET3_LINK_STATUS_UP 0x1 + +/* Least significant bit should be set for revision and version */ +#define VMXNET3_UPT_REVISION 0x1 +#define VMXNET3_DEVICE_REVISION 0x1 + +/* Number of interrupt vectors for non-MSIx modes */ +#define VMXNET3_MAX_NMSIX_INTRS (1) + +/* Macros for rings descriptors access */ +#define VMXNET3_READ_TX_QUEUE_DESCR8(_d, dpa, field) \ + (vmw_shmem_ld8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) + +#define VMXNET3_WRITE_TX_QUEUE_DESCR8(_d, dpa, field, value) \ + (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value))) + +#define VMXNET3_READ_TX_QUEUE_DESCR32(_d, dpa, field) \ + (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) + +#define VMXNET3_WRITE_TX_QUEUE_DESCR32(_d, dpa, field, value) \ + (vmw_shmem_st32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) + +#define VMXNET3_READ_TX_QUEUE_DESCR64(_d, dpa, field) \ + (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) + +#define VMXNET3_WRITE_TX_QUEUE_DESCR64(_d, dpa, field, value) \ + (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) + +#define VMXNET3_READ_RX_QUEUE_DESCR64(_d, dpa, field) \ + (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) + +#define VMXNET3_READ_RX_QUEUE_DESCR32(_d, dpa, field) \ + (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) + +#define VMXNET3_WRITE_RX_QUEUE_DESCR64(_d, dpa, field, value) \ + (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) + +#define VMXNET3_WRITE_RX_QUEUE_DESCR8(_d, dpa, field, value) \ + (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) + +/* Macros for guest driver shared area access */ +#define VMXNET3_READ_DRV_SHARED64(_d, shpa, field) \ + (vmw_shmem_ld64(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) + +#define VMXNET3_READ_DRV_SHARED32(_d, shpa, field) \ + (vmw_shmem_ld32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) + +#define VMXNET3_WRITE_DRV_SHARED32(_d, shpa, field, val) \ + (vmw_shmem_st32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), val)) + +#define VMXNET3_READ_DRV_SHARED16(_d, shpa, field) \ + (vmw_shmem_ld16(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) + +#define VMXNET3_READ_DRV_SHARED8(_d, shpa, field) \ + (vmw_shmem_ld8(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) + +#define VMXNET3_READ_DRV_SHARED(_d, shpa, field, b, l) \ + (vmw_shmem_read(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l)) + +#define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag)) + +struct VMXNET3Class { + PCIDeviceClass parent_class; + DeviceRealize parent_dc_realize; +}; +typedef struct VMXNET3Class VMXNET3Class; + +DECLARE_CLASS_CHECKERS(VMXNET3Class, VMXNET3_DEVICE, + TYPE_VMXNET3) + +static inline void vmxnet3_ring_init(PCIDevice *d, + Vmxnet3Ring *ring, + hwaddr pa, + uint32_t size, + uint32_t cell_size, + bool zero_region) +{ + ring->pa = pa; + ring->size = size; + ring->cell_size = cell_size; + ring->gen = VMXNET3_INIT_GEN; + ring->next = 0; + + if (zero_region) { + vmw_shmem_set(d, pa, 0, size * cell_size); + } +} + +#define VMXNET3_RING_DUMP(macro, ring_name, ridx, r) \ + macro("%s#%d: base %" PRIx64 " size %u cell_size %u gen %d next %u", \ + (ring_name), (ridx), \ + (r)->pa, (r)->size, (r)->cell_size, (r)->gen, (r)->next) + +static inline void vmxnet3_ring_inc(Vmxnet3Ring *ring) +{ + if (++ring->next >= ring->size) { + ring->next = 0; + ring->gen ^= 1; + } +} + +static inline void vmxnet3_ring_dec(Vmxnet3Ring *ring) +{ + if (ring->next-- == 0) { + ring->next = ring->size - 1; + ring->gen ^= 1; + } +} + +static inline hwaddr vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring) +{ + return ring->pa + ring->next * ring->cell_size; +} + +static inline void vmxnet3_ring_read_curr_cell(PCIDevice *d, Vmxnet3Ring *ring, + void *buff) +{ + vmw_shmem_read(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); +} + +static inline void vmxnet3_ring_write_curr_cell(PCIDevice *d, Vmxnet3Ring *ring, + void *buff) +{ + vmw_shmem_write(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); +} + +static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring) +{ + return ring->next; +} + +static inline uint8_t vmxnet3_ring_curr_gen(Vmxnet3Ring *ring) +{ + return ring->gen; +} + +/* Debug trace-related functions */ +static inline void +vmxnet3_dump_tx_descr(struct Vmxnet3_TxDesc *descr) +{ + VMW_PKPRN("TX DESCR: " + "addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, " + "dtype: %d, ext1: %d, msscof: %d, hlen: %d, om: %d, " + "eop: %d, cq: %d, ext2: %d, ti: %d, tci: %d", + descr->addr, descr->len, descr->gen, descr->rsvd, + descr->dtype, descr->ext1, descr->msscof, descr->hlen, descr->om, + descr->eop, descr->cq, descr->ext2, descr->ti, descr->tci); +} + +static inline void +vmxnet3_dump_virt_hdr(struct virtio_net_hdr *vhdr) +{ + VMW_PKPRN("VHDR: flags 0x%x, gso_type: 0x%x, hdr_len: %d, gso_size: %d, " + "csum_start: %d, csum_offset: %d", + vhdr->flags, vhdr->gso_type, vhdr->hdr_len, vhdr->gso_size, + vhdr->csum_start, vhdr->csum_offset); +} + +static inline void +vmxnet3_dump_rx_descr(struct Vmxnet3_RxDesc *descr) +{ + VMW_PKPRN("RX DESCR: addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, " + "dtype: %d, ext1: %d, btype: %d", + descr->addr, descr->len, descr->gen, + descr->rsvd, descr->dtype, descr->ext1, descr->btype); +} + +/* Interrupt management */ + +/* + * This function returns sign whether interrupt line is in asserted state + * This depends on the type of interrupt used. For INTX interrupt line will + * be asserted until explicit deassertion, for MSI(X) interrupt line will + * be deasserted automatically due to notification semantics of the MSI(X) + * interrupts + */ +static bool _vmxnet3_assert_interrupt_line(VMXNET3State *s, uint32_t int_idx) +{ + PCIDevice *d = PCI_DEVICE(s); + + if (s->msix_used && msix_enabled(d)) { + VMW_IRPRN("Sending MSI-X notification for vector %u", int_idx); + msix_notify(d, int_idx); + return false; + } + if (msi_enabled(d)) { + VMW_IRPRN("Sending MSI notification for vector %u", int_idx); + msi_notify(d, int_idx); + return false; + } + + VMW_IRPRN("Asserting line for interrupt %u", int_idx); + pci_irq_assert(d); + return true; +} + +static void _vmxnet3_deassert_interrupt_line(VMXNET3State *s, int lidx) +{ + PCIDevice *d = PCI_DEVICE(s); + + /* + * This function should never be called for MSI(X) interrupts + * because deassertion never required for message interrupts + */ + assert(!s->msix_used || !msix_enabled(d)); + /* + * This function should never be called for MSI(X) interrupts + * because deassertion never required for message interrupts + */ + assert(!msi_enabled(d)); + + VMW_IRPRN("Deasserting line for interrupt %u", lidx); + pci_irq_deassert(d); +} + +static void vmxnet3_update_interrupt_line_state(VMXNET3State *s, int lidx) +{ + if (!s->interrupt_states[lidx].is_pending && + s->interrupt_states[lidx].is_asserted) { + VMW_IRPRN("New interrupt line state for index %d is DOWN", lidx); + _vmxnet3_deassert_interrupt_line(s, lidx); + s->interrupt_states[lidx].is_asserted = false; + return; + } + + if (s->interrupt_states[lidx].is_pending && + !s->interrupt_states[lidx].is_masked && + !s->interrupt_states[lidx].is_asserted) { + VMW_IRPRN("New interrupt line state for index %d is UP", lidx); + s->interrupt_states[lidx].is_asserted = + _vmxnet3_assert_interrupt_line(s, lidx); + s->interrupt_states[lidx].is_pending = false; + return; + } +} + +static void vmxnet3_trigger_interrupt(VMXNET3State *s, int lidx) +{ + PCIDevice *d = PCI_DEVICE(s); + s->interrupt_states[lidx].is_pending = true; + vmxnet3_update_interrupt_line_state(s, lidx); + + if (s->msix_used && msix_enabled(d) && s->auto_int_masking) { + goto do_automask; + } + + if (msi_enabled(d) && s->auto_int_masking) { + goto do_automask; + } + + return; + +do_automask: + s->interrupt_states[lidx].is_masked = true; + vmxnet3_update_interrupt_line_state(s, lidx); +} + +static bool vmxnet3_interrupt_asserted(VMXNET3State *s, int lidx) +{ + return s->interrupt_states[lidx].is_asserted; +} + +static void vmxnet3_clear_interrupt(VMXNET3State *s, int int_idx) +{ + s->interrupt_states[int_idx].is_pending = false; + if (s->auto_int_masking) { + s->interrupt_states[int_idx].is_masked = true; + } + vmxnet3_update_interrupt_line_state(s, int_idx); +} + +static void +vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int lidx, bool is_masked) +{ + s->interrupt_states[lidx].is_masked = is_masked; + vmxnet3_update_interrupt_line_state(s, lidx); +} + +static bool vmxnet3_verify_driver_magic(PCIDevice *d, hwaddr dshmem) +{ + return (VMXNET3_READ_DRV_SHARED32(d, dshmem, magic) == VMXNET3_REV1_MAGIC); +} + +#define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF) +#define VMXNET3_MAKE_BYTE(byte_num, val) \ + (((uint32_t)((val) & 0xFF)) << (byte_num)*8) + +static void vmxnet3_set_variable_mac(VMXNET3State *s, uint32_t h, uint32_t l) +{ + s->conf.macaddr.a[0] = VMXNET3_GET_BYTE(l, 0); + s->conf.macaddr.a[1] = VMXNET3_GET_BYTE(l, 1); + s->conf.macaddr.a[2] = VMXNET3_GET_BYTE(l, 2); + s->conf.macaddr.a[3] = VMXNET3_GET_BYTE(l, 3); + s->conf.macaddr.a[4] = VMXNET3_GET_BYTE(h, 0); + s->conf.macaddr.a[5] = VMXNET3_GET_BYTE(h, 1); + + VMW_CFPRN("Variable MAC: " MAC_FMT, MAC_ARG(s->conf.macaddr.a)); + + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static uint64_t vmxnet3_get_mac_low(MACAddr *addr) +{ + return VMXNET3_MAKE_BYTE(0, addr->a[0]) | + VMXNET3_MAKE_BYTE(1, addr->a[1]) | + VMXNET3_MAKE_BYTE(2, addr->a[2]) | + VMXNET3_MAKE_BYTE(3, addr->a[3]); +} + +static uint64_t vmxnet3_get_mac_high(MACAddr *addr) +{ + return VMXNET3_MAKE_BYTE(0, addr->a[4]) | + VMXNET3_MAKE_BYTE(1, addr->a[5]); +} + +static void +vmxnet3_inc_tx_consumption_counter(VMXNET3State *s, int qidx) +{ + vmxnet3_ring_inc(&s->txq_descr[qidx].tx_ring); +} + +static inline void +vmxnet3_inc_rx_consumption_counter(VMXNET3State *s, int qidx, int ridx) +{ + vmxnet3_ring_inc(&s->rxq_descr[qidx].rx_ring[ridx]); +} + +static inline void +vmxnet3_inc_tx_completion_counter(VMXNET3State *s, int qidx) +{ + vmxnet3_ring_inc(&s->txq_descr[qidx].comp_ring); +} + +static void +vmxnet3_inc_rx_completion_counter(VMXNET3State *s, int qidx) +{ + vmxnet3_ring_inc(&s->rxq_descr[qidx].comp_ring); +} + +static void +vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx) +{ + vmxnet3_ring_dec(&s->rxq_descr[qidx].comp_ring); +} + +static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx) +{ + struct Vmxnet3_TxCompDesc txcq_descr; + PCIDevice *d = PCI_DEVICE(s); + + VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring); + + memset(&txcq_descr, 0, sizeof(txcq_descr)); + txcq_descr.txdIdx = tx_ridx; + txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring); + txcq_descr.val1 = cpu_to_le32(txcq_descr.val1); + txcq_descr.val2 = cpu_to_le32(txcq_descr.val2); + vmxnet3_ring_write_curr_cell(d, &s->txq_descr[qidx].comp_ring, &txcq_descr); + + /* Flush changes in TX descriptor before changing the counter value */ + smp_wmb(); + + vmxnet3_inc_tx_completion_counter(s, qidx); + vmxnet3_trigger_interrupt(s, s->txq_descr[qidx].intr_idx); +} + +static bool +vmxnet3_setup_tx_offloads(VMXNET3State *s) +{ + switch (s->offload_mode) { + case VMXNET3_OM_NONE: + net_tx_pkt_build_vheader(s->tx_pkt, false, false, 0); + break; + + case VMXNET3_OM_CSUM: + net_tx_pkt_build_vheader(s->tx_pkt, false, true, 0); + VMW_PKPRN("L4 CSO requested\n"); + break; + + case VMXNET3_OM_TSO: + net_tx_pkt_build_vheader(s->tx_pkt, true, true, + s->cso_or_gso_size); + net_tx_pkt_update_ip_checksums(s->tx_pkt); + VMW_PKPRN("GSO offload requested."); + break; + + default: + g_assert_not_reached(); + return false; + } + + return true; +} + +static void +vmxnet3_tx_retrieve_metadata(VMXNET3State *s, + const struct Vmxnet3_TxDesc *txd) +{ + s->offload_mode = txd->om; + s->cso_or_gso_size = txd->msscof; + s->tci = txd->tci; + s->needs_vlan = txd->ti; +} + +typedef enum { + VMXNET3_PKT_STATUS_OK, + VMXNET3_PKT_STATUS_ERROR, + VMXNET3_PKT_STATUS_DISCARD,/* only for tx */ + VMXNET3_PKT_STATUS_OUT_OF_BUF /* only for rx */ +} Vmxnet3PktStatus; + +static void +vmxnet3_on_tx_done_update_stats(VMXNET3State *s, int qidx, + Vmxnet3PktStatus status) +{ + size_t tot_len = net_tx_pkt_get_total_len(s->tx_pkt); + struct UPT1_TxStats *stats = &s->txq_descr[qidx].txq_stats; + + switch (status) { + case VMXNET3_PKT_STATUS_OK: + switch (net_tx_pkt_get_packet_type(s->tx_pkt)) { + case ETH_PKT_BCAST: + stats->bcastPktsTxOK++; + stats->bcastBytesTxOK += tot_len; + break; + case ETH_PKT_MCAST: + stats->mcastPktsTxOK++; + stats->mcastBytesTxOK += tot_len; + break; + case ETH_PKT_UCAST: + stats->ucastPktsTxOK++; + stats->ucastBytesTxOK += tot_len; + break; + default: + g_assert_not_reached(); + } + + if (s->offload_mode == VMXNET3_OM_TSO) { + /* + * According to VMWARE headers this statistic is a number + * of packets after segmentation but since we don't have + * this information in QEMU model, the best we can do is to + * provide number of non-segmented packets + */ + stats->TSOPktsTxOK++; + stats->TSOBytesTxOK += tot_len; + } + break; + + case VMXNET3_PKT_STATUS_DISCARD: + stats->pktsTxDiscard++; + break; + + case VMXNET3_PKT_STATUS_ERROR: + stats->pktsTxError++; + break; + + default: + g_assert_not_reached(); + } +} + +static void +vmxnet3_on_rx_done_update_stats(VMXNET3State *s, + int qidx, + Vmxnet3PktStatus status) +{ + struct UPT1_RxStats *stats = &s->rxq_descr[qidx].rxq_stats; + size_t tot_len = net_rx_pkt_get_total_len(s->rx_pkt); + + switch (status) { + case VMXNET3_PKT_STATUS_OUT_OF_BUF: + stats->pktsRxOutOfBuf++; + break; + + case VMXNET3_PKT_STATUS_ERROR: + stats->pktsRxError++; + break; + case VMXNET3_PKT_STATUS_OK: + switch (net_rx_pkt_get_packet_type(s->rx_pkt)) { + case ETH_PKT_BCAST: + stats->bcastPktsRxOK++; + stats->bcastBytesRxOK += tot_len; + break; + case ETH_PKT_MCAST: + stats->mcastPktsRxOK++; + stats->mcastBytesRxOK += tot_len; + break; + case ETH_PKT_UCAST: + stats->ucastPktsRxOK++; + stats->ucastBytesRxOK += tot_len; + break; + default: + g_assert_not_reached(); + } + + if (tot_len > s->mtu) { + stats->LROPktsRxOK++; + stats->LROBytesRxOK += tot_len; + } + break; + default: + g_assert_not_reached(); + } +} + +static inline void +vmxnet3_ring_read_curr_txdesc(PCIDevice *pcidev, Vmxnet3Ring *ring, + struct Vmxnet3_TxDesc *txd) +{ + vmxnet3_ring_read_curr_cell(pcidev, ring, txd); + txd->addr = le64_to_cpu(txd->addr); + txd->val1 = le32_to_cpu(txd->val1); + txd->val2 = le32_to_cpu(txd->val2); +} + +static inline bool +vmxnet3_pop_next_tx_descr(VMXNET3State *s, + int qidx, + struct Vmxnet3_TxDesc *txd, + uint32_t *descr_idx) +{ + Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring; + PCIDevice *d = PCI_DEVICE(s); + + vmxnet3_ring_read_curr_txdesc(d, ring, txd); + if (txd->gen == vmxnet3_ring_curr_gen(ring)) { + /* Only read after generation field verification */ + smp_rmb(); + /* Re-read to be sure we got the latest version */ + vmxnet3_ring_read_curr_txdesc(d, ring, txd); + VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring); + *descr_idx = vmxnet3_ring_curr_cell_idx(ring); + vmxnet3_inc_tx_consumption_counter(s, qidx); + return true; + } + + return false; +} + +static bool +vmxnet3_send_packet(VMXNET3State *s, uint32_t qidx) +{ + Vmxnet3PktStatus status = VMXNET3_PKT_STATUS_OK; + + if (!vmxnet3_setup_tx_offloads(s)) { + status = VMXNET3_PKT_STATUS_ERROR; + goto func_exit; + } + + /* debug prints */ + vmxnet3_dump_virt_hdr(net_tx_pkt_get_vhdr(s->tx_pkt)); + net_tx_pkt_dump(s->tx_pkt); + + if (!net_tx_pkt_send(s->tx_pkt, qemu_get_queue(s->nic))) { + status = VMXNET3_PKT_STATUS_DISCARD; + goto func_exit; + } + +func_exit: + vmxnet3_on_tx_done_update_stats(s, qidx, status); + return (status == VMXNET3_PKT_STATUS_OK); +} + +static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) +{ + struct Vmxnet3_TxDesc txd; + uint32_t txd_idx; + uint32_t data_len; + hwaddr data_pa; + + for (;;) { + if (!vmxnet3_pop_next_tx_descr(s, qidx, &txd, &txd_idx)) { + break; + } + + vmxnet3_dump_tx_descr(&txd); + + if (!s->skip_current_tx_pkt) { + data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE; + data_pa = txd.addr; + + if (!net_tx_pkt_add_raw_fragment(s->tx_pkt, + data_pa, + data_len)) { + s->skip_current_tx_pkt = true; + } + } + + if (s->tx_sop) { + vmxnet3_tx_retrieve_metadata(s, &txd); + s->tx_sop = false; + } + + if (txd.eop) { + if (!s->skip_current_tx_pkt && net_tx_pkt_parse(s->tx_pkt)) { + if (s->needs_vlan) { + net_tx_pkt_setup_vlan_header(s->tx_pkt, s->tci); + } + + vmxnet3_send_packet(s, qidx); + } else { + vmxnet3_on_tx_done_update_stats(s, qidx, + VMXNET3_PKT_STATUS_ERROR); + } + + vmxnet3_complete_packet(s, qidx, txd_idx); + s->tx_sop = true; + s->skip_current_tx_pkt = false; + net_tx_pkt_reset(s->tx_pkt); + } + } +} + +static inline void +vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx, + struct Vmxnet3_RxDesc *dbuf, uint32_t *didx) +{ + PCIDevice *d = PCI_DEVICE(s); + + Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx]; + *didx = vmxnet3_ring_curr_cell_idx(ring); + vmxnet3_ring_read_curr_cell(d, ring, dbuf); + dbuf->addr = le64_to_cpu(dbuf->addr); + dbuf->val1 = le32_to_cpu(dbuf->val1); + dbuf->ext1 = le32_to_cpu(dbuf->ext1); +} + +static inline uint8_t +vmxnet3_get_rx_ring_gen(VMXNET3State *s, int qidx, int ridx) +{ + return s->rxq_descr[qidx].rx_ring[ridx].gen; +} + +static inline hwaddr +vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen) +{ + uint8_t ring_gen; + struct Vmxnet3_RxCompDesc rxcd; + + hwaddr daddr = + vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring); + + pci_dma_read(PCI_DEVICE(s), + daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); + rxcd.val1 = le32_to_cpu(rxcd.val1); + rxcd.val2 = le32_to_cpu(rxcd.val2); + rxcd.val3 = le32_to_cpu(rxcd.val3); + ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring); + + if (rxcd.gen != ring_gen) { + *descr_gen = ring_gen; + vmxnet3_inc_rx_completion_counter(s, qidx); + return daddr; + } + + return 0; +} + +static inline void +vmxnet3_revert_rxc_descr(VMXNET3State *s, int qidx) +{ + vmxnet3_dec_rx_completion_counter(s, qidx); +} + +#define RXQ_IDX (0) +#define RX_HEAD_BODY_RING (0) +#define RX_BODY_ONLY_RING (1) + +static bool +vmxnet3_get_next_head_rx_descr(VMXNET3State *s, + struct Vmxnet3_RxDesc *descr_buf, + uint32_t *descr_idx, + uint32_t *ridx) +{ + for (;;) { + uint32_t ring_gen; + vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, + descr_buf, descr_idx); + + /* If no more free descriptors - return */ + ring_gen = vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING); + if (descr_buf->gen != ring_gen) { + return false; + } + + /* Only read after generation field verification */ + smp_rmb(); + /* Re-read to be sure we got the latest version */ + vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, + descr_buf, descr_idx); + + /* Mark current descriptor as used/skipped */ + vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING); + + /* If this is what we are looking for - return */ + if (descr_buf->btype == VMXNET3_RXD_BTYPE_HEAD) { + *ridx = RX_HEAD_BODY_RING; + return true; + } + } +} + +static bool +vmxnet3_get_next_body_rx_descr(VMXNET3State *s, + struct Vmxnet3_RxDesc *d, + uint32_t *didx, + uint32_t *ridx) +{ + vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx); + + /* Try to find corresponding descriptor in head/body ring */ + if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING)) { + /* Only read after generation field verification */ + smp_rmb(); + /* Re-read to be sure we got the latest version */ + vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx); + if (d->btype == VMXNET3_RXD_BTYPE_BODY) { + vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING); + *ridx = RX_HEAD_BODY_RING; + return true; + } + } + + /* + * If there is no free descriptors on head/body ring or next free + * descriptor is a head descriptor switch to body only ring + */ + vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx); + + /* If no more free descriptors - return */ + if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_BODY_ONLY_RING)) { + /* Only read after generation field verification */ + smp_rmb(); + /* Re-read to be sure we got the latest version */ + vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx); + assert(d->btype == VMXNET3_RXD_BTYPE_BODY); + *ridx = RX_BODY_ONLY_RING; + vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_BODY_ONLY_RING); + return true; + } + + return false; +} + +static inline bool +vmxnet3_get_next_rx_descr(VMXNET3State *s, bool is_head, + struct Vmxnet3_RxDesc *descr_buf, + uint32_t *descr_idx, + uint32_t *ridx) +{ + if (is_head || !s->rx_packets_compound) { + return vmxnet3_get_next_head_rx_descr(s, descr_buf, descr_idx, ridx); + } else { + return vmxnet3_get_next_body_rx_descr(s, descr_buf, descr_idx, ridx); + } +} + +/* In case packet was csum offloaded (either NEEDS_CSUM or DATA_VALID), + * the implementation always passes an RxCompDesc with a "Checksum + * calculated and found correct" to the OS (cnc=0 and tuc=1, see + * vmxnet3_rx_update_descr). This emulates the observed ESXi behavior. + * + * Therefore, if packet has the NEEDS_CSUM set, we must calculate + * and place a fully computed checksum into the tcp/udp header. + * Otherwise, the OS driver will receive a checksum-correct indication + * (CHECKSUM_UNNECESSARY), but with the actual tcp/udp checksum field + * having just the pseudo header csum value. + * + * While this is not a problem if packet is destined for local delivery, + * in the case the host OS performs forwarding, it will forward an + * incorrectly checksummed packet. + */ +static void vmxnet3_rx_need_csum_calculate(struct NetRxPkt *pkt, + const void *pkt_data, + size_t pkt_len) +{ + struct virtio_net_hdr *vhdr; + bool isip4, isip6, istcp, isudp; + uint8_t *data; + int len; + + if (!net_rx_pkt_has_virt_hdr(pkt)) { + return; + } + + vhdr = net_rx_pkt_get_vhdr(pkt); + if (!VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM)) { + return; + } + + net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); + if (!(isip4 || isip6) || !(istcp || isudp)) { + return; + } + + vmxnet3_dump_virt_hdr(vhdr); + + /* Validate packet len: csum_start + scum_offset + length of csum field */ + if (pkt_len < (vhdr->csum_start + vhdr->csum_offset + 2)) { + VMW_PKPRN("packet len:%zu < csum_start(%d) + csum_offset(%d) + 2, " + "cannot calculate checksum", + pkt_len, vhdr->csum_start, vhdr->csum_offset); + return; + } + + data = (uint8_t *)pkt_data + vhdr->csum_start; + len = pkt_len - vhdr->csum_start; + /* Put the checksum obtained into the packet */ + stw_be_p(data + vhdr->csum_offset, + net_checksum_finish_nozero(net_checksum_add(len, data))); + + vhdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; + vhdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID; +} + +static void vmxnet3_rx_update_descr(struct NetRxPkt *pkt, + struct Vmxnet3_RxCompDesc *rxcd) +{ + int csum_ok, is_gso; + bool isip4, isip6, istcp, isudp; + struct virtio_net_hdr *vhdr; + uint8_t offload_type; + + if (net_rx_pkt_is_vlan_stripped(pkt)) { + rxcd->ts = 1; + rxcd->tci = net_rx_pkt_get_vlan_tag(pkt); + } + + if (!net_rx_pkt_has_virt_hdr(pkt)) { + goto nocsum; + } + + vhdr = net_rx_pkt_get_vhdr(pkt); + /* + * Checksum is valid when lower level tell so or when lower level + * requires checksum offload telling that packet produced/bridged + * locally and did travel over network after last checksum calculation + * or production + */ + csum_ok = VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_DATA_VALID) || + VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM); + + offload_type = vhdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN; + is_gso = (offload_type != VIRTIO_NET_HDR_GSO_NONE) ? 1 : 0; + + if (!csum_ok && !is_gso) { + goto nocsum; + } + + net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); + if ((!istcp && !isudp) || (!isip4 && !isip6)) { + goto nocsum; + } + + rxcd->cnc = 0; + rxcd->v4 = isip4 ? 1 : 0; + rxcd->v6 = isip6 ? 1 : 0; + rxcd->tcp = istcp ? 1 : 0; + rxcd->udp = isudp ? 1 : 0; + rxcd->fcs = rxcd->tuc = rxcd->ipc = 1; + return; + +nocsum: + rxcd->cnc = 1; + return; +} + +static void +vmxnet3_pci_dma_writev(PCIDevice *pci_dev, + const struct iovec *iov, + size_t start_iov_off, + hwaddr target_addr, + size_t bytes_to_copy) +{ + size_t curr_off = 0; + size_t copied = 0; + + while (bytes_to_copy) { + if (start_iov_off < (curr_off + iov->iov_len)) { + size_t chunk_len = + MIN((curr_off + iov->iov_len) - start_iov_off, bytes_to_copy); + + pci_dma_write(pci_dev, target_addr + copied, + iov->iov_base + start_iov_off - curr_off, + chunk_len); + + copied += chunk_len; + start_iov_off += chunk_len; + curr_off = start_iov_off; + bytes_to_copy -= chunk_len; + } else { + curr_off += iov->iov_len; + } + iov++; + } +} + +static void +vmxnet3_pci_dma_write_rxcd(PCIDevice *pcidev, dma_addr_t pa, + struct Vmxnet3_RxCompDesc *rxcd) +{ + rxcd->val1 = cpu_to_le32(rxcd->val1); + rxcd->val2 = cpu_to_le32(rxcd->val2); + rxcd->val3 = cpu_to_le32(rxcd->val3); + pci_dma_write(pcidev, pa, rxcd, sizeof(*rxcd)); +} + +static bool +vmxnet3_indicate_packet(VMXNET3State *s) +{ + struct Vmxnet3_RxDesc rxd; + PCIDevice *d = PCI_DEVICE(s); + bool is_head = true; + uint32_t rxd_idx; + uint32_t rx_ridx = 0; + + struct Vmxnet3_RxCompDesc rxcd; + uint32_t new_rxcd_gen = VMXNET3_INIT_GEN; + hwaddr new_rxcd_pa = 0; + hwaddr ready_rxcd_pa = 0; + struct iovec *data = net_rx_pkt_get_iovec(s->rx_pkt); + size_t bytes_copied = 0; + size_t bytes_left = net_rx_pkt_get_total_len(s->rx_pkt); + uint16_t num_frags = 0; + size_t chunk_size; + + net_rx_pkt_dump(s->rx_pkt); + + while (bytes_left > 0) { + + /* cannot add more frags to packet */ + if (num_frags == s->max_rx_frags) { + break; + } + + new_rxcd_pa = vmxnet3_pop_rxc_descr(s, RXQ_IDX, &new_rxcd_gen); + if (!new_rxcd_pa) { + break; + } + + if (!vmxnet3_get_next_rx_descr(s, is_head, &rxd, &rxd_idx, &rx_ridx)) { + break; + } + + chunk_size = MIN(bytes_left, rxd.len); + vmxnet3_pci_dma_writev(d, data, bytes_copied, rxd.addr, chunk_size); + bytes_copied += chunk_size; + bytes_left -= chunk_size; + + vmxnet3_dump_rx_descr(&rxd); + + if (ready_rxcd_pa != 0) { + vmxnet3_pci_dma_write_rxcd(d, ready_rxcd_pa, &rxcd); + } + + memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc)); + rxcd.rxdIdx = rxd_idx; + rxcd.len = chunk_size; + rxcd.sop = is_head; + rxcd.gen = new_rxcd_gen; + rxcd.rqID = RXQ_IDX + rx_ridx * s->rxq_num; + + if (bytes_left == 0) { + vmxnet3_rx_update_descr(s->rx_pkt, &rxcd); + } + + VMW_RIPRN("RX Completion descriptor: rxRing: %lu rxIdx %lu len %lu " + "sop %d csum_correct %lu", + (unsigned long) rx_ridx, + (unsigned long) rxcd.rxdIdx, + (unsigned long) rxcd.len, + (int) rxcd.sop, + (unsigned long) rxcd.tuc); + + is_head = false; + ready_rxcd_pa = new_rxcd_pa; + new_rxcd_pa = 0; + num_frags++; + } + + if (ready_rxcd_pa != 0) { + rxcd.eop = 1; + rxcd.err = (bytes_left != 0); + + vmxnet3_pci_dma_write_rxcd(d, ready_rxcd_pa, &rxcd); + + /* Flush RX descriptor changes */ + smp_wmb(); + } + + if (new_rxcd_pa != 0) { + vmxnet3_revert_rxc_descr(s, RXQ_IDX); + } + + vmxnet3_trigger_interrupt(s, s->rxq_descr[RXQ_IDX].intr_idx); + + if (bytes_left == 0) { + vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_OK); + return true; + } else if (num_frags == s->max_rx_frags) { + vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_ERROR); + return false; + } else { + vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, + VMXNET3_PKT_STATUS_OUT_OF_BUF); + return false; + } +} + +static void +vmxnet3_io_bar0_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + VMXNET3State *s = opaque; + + if (!s->device_active) { + return; + } + + if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_TXPROD, + VMXNET3_DEVICE_MAX_TX_QUEUES, VMXNET3_REG_ALIGN)) { + int tx_queue_idx = + VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_TXPROD, + VMXNET3_REG_ALIGN); + if (tx_queue_idx <= s->txq_num) { + vmxnet3_process_tx_queue(s, tx_queue_idx); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "vmxnet3: Illegal TX queue %d/%d\n", + tx_queue_idx, s->txq_num); + } + return; + } + + if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR, + VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) { + int l = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_IMR, + VMXNET3_REG_ALIGN); + + VMW_CBPRN("Interrupt mask for line %d written: 0x%" PRIx64, l, val); + + vmxnet3_on_interrupt_mask_changed(s, l, val); + return; + } + + if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD, + VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN) || + VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD2, + VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN)) { + return; + } + + VMW_WRPRN("BAR0 unknown write [%" PRIx64 "] = %" PRIx64 ", size %d", + (uint64_t) addr, val, size); +} + +static uint64_t +vmxnet3_io_bar0_read(void *opaque, hwaddr addr, unsigned size) +{ + VMXNET3State *s = opaque; + + if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR, + VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) { + int l = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_IMR, + VMXNET3_REG_ALIGN); + return s->interrupt_states[l].is_masked; + } + + VMW_CBPRN("BAR0 unknown read [%" PRIx64 "], size %d", addr, size); + return 0; +} + +static void vmxnet3_reset_interrupt_states(VMXNET3State *s) +{ + int i; + for (i = 0; i < ARRAY_SIZE(s->interrupt_states); i++) { + s->interrupt_states[i].is_asserted = false; + s->interrupt_states[i].is_pending = false; + s->interrupt_states[i].is_masked = true; + } +} + +static void vmxnet3_reset_mac(VMXNET3State *s) +{ + memcpy(&s->conf.macaddr.a, &s->perm_mac.a, sizeof(s->perm_mac.a)); + VMW_CFPRN("MAC address set to: " MAC_FMT, MAC_ARG(s->conf.macaddr.a)); +} + +static void vmxnet3_deactivate_device(VMXNET3State *s) +{ + if (s->device_active) { + VMW_CBPRN("Deactivating vmxnet3..."); + net_tx_pkt_reset(s->tx_pkt); + net_tx_pkt_uninit(s->tx_pkt); + net_rx_pkt_uninit(s->rx_pkt); + s->device_active = false; + } +} + +static void vmxnet3_reset(VMXNET3State *s) +{ + VMW_CBPRN("Resetting vmxnet3..."); + + vmxnet3_deactivate_device(s); + vmxnet3_reset_interrupt_states(s); + s->drv_shmem = 0; + s->tx_sop = true; + s->skip_current_tx_pkt = false; +} + +static void vmxnet3_update_rx_mode(VMXNET3State *s) +{ + PCIDevice *d = PCI_DEVICE(s); + + s->rx_mode = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, + devRead.rxFilterConf.rxMode); + VMW_CFPRN("RX mode: 0x%08X", s->rx_mode); +} + +static void vmxnet3_update_vlan_filters(VMXNET3State *s) +{ + int i; + PCIDevice *d = PCI_DEVICE(s); + + /* Copy configuration from shared memory */ + VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, + devRead.rxFilterConf.vfTable, + s->vlan_table, + sizeof(s->vlan_table)); + + /* Invert byte order when needed */ + for (i = 0; i < ARRAY_SIZE(s->vlan_table); i++) { + s->vlan_table[i] = le32_to_cpu(s->vlan_table[i]); + } + + /* Dump configuration for debugging purposes */ + VMW_CFPRN("Configured VLANs:"); + for (i = 0; i < sizeof(s->vlan_table) * 8; i++) { + if (VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, i)) { + VMW_CFPRN("\tVLAN %d is present", i); + } + } +} + +static void vmxnet3_update_mcast_filters(VMXNET3State *s) +{ + PCIDevice *d = PCI_DEVICE(s); + + uint16_t list_bytes = + VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, + devRead.rxFilterConf.mfTableLen); + + s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]); + + s->mcast_list = g_realloc(s->mcast_list, list_bytes); + if (!s->mcast_list) { + if (s->mcast_list_len == 0) { + VMW_CFPRN("Current multicast list is empty"); + } else { + VMW_ERPRN("Failed to allocate multicast list of %d elements", + s->mcast_list_len); + } + s->mcast_list_len = 0; + } else { + int i; + hwaddr mcast_list_pa = + VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, + devRead.rxFilterConf.mfTablePA); + + pci_dma_read(d, mcast_list_pa, s->mcast_list, list_bytes); + + VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len); + for (i = 0; i < s->mcast_list_len; i++) { + VMW_CFPRN("\t" MAC_FMT, MAC_ARG(s->mcast_list[i].a)); + } + } +} + +static void vmxnet3_setup_rx_filtering(VMXNET3State *s) +{ + vmxnet3_update_rx_mode(s); + vmxnet3_update_vlan_filters(s); + vmxnet3_update_mcast_filters(s); +} + +static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s) +{ + uint32_t interrupt_mode = VMXNET3_IT_AUTO | (VMXNET3_IMM_AUTO << 2); + VMW_CFPRN("Interrupt config is 0x%X", interrupt_mode); + return interrupt_mode; +} + +static void vmxnet3_fill_stats(VMXNET3State *s) +{ + int i; + PCIDevice *d = PCI_DEVICE(s); + + if (!s->device_active) + return; + + for (i = 0; i < s->txq_num; i++) { + pci_dma_write(d, + s->txq_descr[i].tx_stats_pa, + &s->txq_descr[i].txq_stats, + sizeof(s->txq_descr[i].txq_stats)); + } + + for (i = 0; i < s->rxq_num; i++) { + pci_dma_write(d, + s->rxq_descr[i].rx_stats_pa, + &s->rxq_descr[i].rxq_stats, + sizeof(s->rxq_descr[i].rxq_stats)); + } +} + +static void vmxnet3_adjust_by_guest_type(VMXNET3State *s) +{ + struct Vmxnet3_GOSInfo gos; + PCIDevice *d = PCI_DEVICE(s); + + VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, devRead.misc.driverInfo.gos, + &gos, sizeof(gos)); + s->rx_packets_compound = + (gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true; + + VMW_CFPRN("Guest type specifics: RXCOMPOUND: %d", s->rx_packets_compound); +} + +static void +vmxnet3_dump_conf_descr(const char *name, + struct Vmxnet3_VariableLenConfDesc *pm_descr) +{ + VMW_CFPRN("%s descriptor dump: Version %u, Length %u", + name, pm_descr->confVer, pm_descr->confLen); + +}; + +static void vmxnet3_update_pm_state(VMXNET3State *s) +{ + struct Vmxnet3_VariableLenConfDesc pm_descr; + PCIDevice *d = PCI_DEVICE(s); + + pm_descr.confLen = + VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confLen); + pm_descr.confVer = + VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confVer); + pm_descr.confPA = + VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.pmConfDesc.confPA); + + vmxnet3_dump_conf_descr("PM State", &pm_descr); +} + +static void vmxnet3_update_features(VMXNET3State *s) +{ + uint32_t guest_features; + int rxcso_supported; + PCIDevice *d = PCI_DEVICE(s); + + guest_features = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, + devRead.misc.uptFeatures); + + rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM); + s->rx_vlan_stripping = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXVLAN); + s->lro_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_LRO); + + VMW_CFPRN("Features configuration: LRO: %d, RXCSUM: %d, VLANSTRIP: %d", + s->lro_supported, rxcso_supported, + s->rx_vlan_stripping); + if (s->peer_has_vhdr) { + qemu_set_offload(qemu_get_queue(s->nic)->peer, + rxcso_supported, + s->lro_supported, + s->lro_supported, + 0, + 0); + } +} + +static bool vmxnet3_verify_intx(VMXNET3State *s, int intx) +{ + return s->msix_used || msi_enabled(PCI_DEVICE(s)) + || intx == pci_get_byte(s->parent_obj.config + PCI_INTERRUPT_PIN) - 1; +} + +static void vmxnet3_validate_interrupt_idx(bool is_msix, int idx) +{ + int max_ints = is_msix ? VMXNET3_MAX_INTRS : VMXNET3_MAX_NMSIX_INTRS; + if (idx >= max_ints) { + hw_error("Bad interrupt index: %d\n", idx); + } +} + +static void vmxnet3_validate_interrupts(VMXNET3State *s) +{ + int i; + + VMW_CFPRN("Verifying event interrupt index (%d)", s->event_int_idx); + vmxnet3_validate_interrupt_idx(s->msix_used, s->event_int_idx); + + for (i = 0; i < s->txq_num; i++) { + int idx = s->txq_descr[i].intr_idx; + VMW_CFPRN("Verifying TX queue %d interrupt index (%d)", i, idx); + vmxnet3_validate_interrupt_idx(s->msix_used, idx); + } + + for (i = 0; i < s->rxq_num; i++) { + int idx = s->rxq_descr[i].intr_idx; + VMW_CFPRN("Verifying RX queue %d interrupt index (%d)", i, idx); + vmxnet3_validate_interrupt_idx(s->msix_used, idx); + } +} + +static bool vmxnet3_validate_queues(VMXNET3State *s) +{ + /* + * txq_num and rxq_num are total number of queues + * configured by guest. These numbers must not + * exceed corresponding maximal values. + */ + + if (s->txq_num > VMXNET3_DEVICE_MAX_TX_QUEUES) { + qemu_log_mask(LOG_GUEST_ERROR, "vmxnet3: Bad TX queues number: %d\n", + s->txq_num); + return false; + } + + if (s->rxq_num > VMXNET3_DEVICE_MAX_RX_QUEUES) { + qemu_log_mask(LOG_GUEST_ERROR, "vmxnet3: Bad RX queues number: %d\n", + s->rxq_num); + return false; + } + + return true; +} + +static void vmxnet3_activate_device(VMXNET3State *s) +{ + int i; + static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1; + PCIDevice *d = PCI_DEVICE(s); + hwaddr qdescr_table_pa; + uint64_t pa; + uint32_t size; + + /* Verify configuration consistency */ + if (!vmxnet3_verify_driver_magic(d, s->drv_shmem)) { + VMW_ERPRN("Device configuration received from driver is invalid"); + return; + } + + /* Verify if device is active */ + if (s->device_active) { + VMW_CFPRN("Vmxnet3 device is active"); + return; + } + + s->txq_num = + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numTxQueues); + s->rxq_num = + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numRxQueues); + + VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num); + if (!vmxnet3_validate_queues(s)) { + return; + } + + vmxnet3_adjust_by_guest_type(s); + vmxnet3_update_features(s); + vmxnet3_update_pm_state(s); + vmxnet3_setup_rx_filtering(s); + /* Cache fields from shared memory */ + s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu); + assert(VMXNET3_MIN_MTU <= s->mtu && s->mtu < VMXNET3_MAX_MTU); + VMW_CFPRN("MTU is %u", s->mtu); + + s->max_rx_frags = + VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, devRead.misc.maxNumRxSG); + + if (s->max_rx_frags == 0) { + s->max_rx_frags = 1; + } + + VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags); + + s->event_int_idx = + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.eventIntrIdx); + assert(vmxnet3_verify_intx(s, s->event_int_idx)); + VMW_CFPRN("Events interrupt line is %u", s->event_int_idx); + + s->auto_int_masking = + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.autoMask); + VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking); + + qdescr_table_pa = + VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.misc.queueDescPA); + VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa); + + /* + * Worst-case scenario is a packet that holds all TX rings space so + * we calculate total size of all TX rings for max TX fragments number + */ + s->max_tx_frags = 0; + + /* TX queues */ + for (i = 0; i < s->txq_num; i++) { + hwaddr qdescr_pa = + qdescr_table_pa + i * sizeof(struct Vmxnet3_TxQueueDesc); + + /* Read interrupt number for this TX queue */ + s->txq_descr[i].intr_idx = + VMXNET3_READ_TX_QUEUE_DESCR8(d, qdescr_pa, conf.intrIdx); + assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx)); + + VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx); + + /* Read rings memory locations for TX queues */ + pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA); + size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize); + if (size > VMXNET3_TX_RING_MAX_SIZE) { + size = VMXNET3_TX_RING_MAX_SIZE; + } + + vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size, + sizeof(struct Vmxnet3_TxDesc), false); + VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring); + + s->max_tx_frags += size; + + /* TXC ring */ + pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.compRingBasePA); + size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.compRingSize); + if (size > VMXNET3_TC_RING_MAX_SIZE) { + size = VMXNET3_TC_RING_MAX_SIZE; + } + vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size, + sizeof(struct Vmxnet3_TxCompDesc), true); + VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring); + + s->txq_descr[i].tx_stats_pa = + qdescr_pa + offsetof(struct Vmxnet3_TxQueueDesc, stats); + + memset(&s->txq_descr[i].txq_stats, 0, + sizeof(s->txq_descr[i].txq_stats)); + + /* Fill device-managed parameters for queues */ + VMXNET3_WRITE_TX_QUEUE_DESCR32(d, qdescr_pa, + ctrl.txThreshold, + VMXNET3_DEF_TX_THRESHOLD); + } + + /* Preallocate TX packet wrapper */ + VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags); + net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), + s->max_tx_frags, s->peer_has_vhdr); + net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); + + /* Read rings memory locations for RX queues */ + for (i = 0; i < s->rxq_num; i++) { + int j; + hwaddr qd_pa = + qdescr_table_pa + s->txq_num * sizeof(struct Vmxnet3_TxQueueDesc) + + i * sizeof(struct Vmxnet3_RxQueueDesc); + + /* Read interrupt number for this RX queue */ + s->rxq_descr[i].intr_idx = + VMXNET3_READ_TX_QUEUE_DESCR8(d, qd_pa, conf.intrIdx); + assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx)); + + VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx); + + /* Read rings memory locations */ + for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) { + /* RX rings */ + pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.rxRingBasePA[j]); + size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.rxRingSize[j]); + if (size > VMXNET3_RX_RING_MAX_SIZE) { + size = VMXNET3_RX_RING_MAX_SIZE; + } + vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size, + sizeof(struct Vmxnet3_RxDesc), false); + VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d", + i, j, pa, size); + } + + /* RXC ring */ + pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA); + size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize); + if (size > VMXNET3_RC_RING_MAX_SIZE) { + size = VMXNET3_RC_RING_MAX_SIZE; + } + vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size, + sizeof(struct Vmxnet3_RxCompDesc), true); + VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size); + + s->rxq_descr[i].rx_stats_pa = + qd_pa + offsetof(struct Vmxnet3_RxQueueDesc, stats); + memset(&s->rxq_descr[i].rxq_stats, 0, + sizeof(s->rxq_descr[i].rxq_stats)); + } + + vmxnet3_validate_interrupts(s); + + /* Make sure everything is in place before device activation */ + smp_wmb(); + + vmxnet3_reset_mac(s); + + s->device_active = true; +} + +static void vmxnet3_handle_command(VMXNET3State *s, uint64_t cmd) +{ + s->last_command = cmd; + + switch (cmd) { + case VMXNET3_CMD_GET_PERM_MAC_HI: + VMW_CBPRN("Set: Get upper part of permanent MAC"); + break; + + case VMXNET3_CMD_GET_PERM_MAC_LO: + VMW_CBPRN("Set: Get lower part of permanent MAC"); + break; + + case VMXNET3_CMD_GET_STATS: + VMW_CBPRN("Set: Get device statistics"); + vmxnet3_fill_stats(s); + break; + + case VMXNET3_CMD_ACTIVATE_DEV: + VMW_CBPRN("Set: Activating vmxnet3 device"); + vmxnet3_activate_device(s); + break; + + case VMXNET3_CMD_UPDATE_RX_MODE: + VMW_CBPRN("Set: Update rx mode"); + vmxnet3_update_rx_mode(s); + break; + + case VMXNET3_CMD_UPDATE_VLAN_FILTERS: + VMW_CBPRN("Set: Update VLAN filters"); + vmxnet3_update_vlan_filters(s); + break; + + case VMXNET3_CMD_UPDATE_MAC_FILTERS: + VMW_CBPRN("Set: Update MAC filters"); + vmxnet3_update_mcast_filters(s); + break; + + case VMXNET3_CMD_UPDATE_FEATURE: + VMW_CBPRN("Set: Update features"); + vmxnet3_update_features(s); + break; + + case VMXNET3_CMD_UPDATE_PMCFG: + VMW_CBPRN("Set: Update power management config"); + vmxnet3_update_pm_state(s); + break; + + case VMXNET3_CMD_GET_LINK: + VMW_CBPRN("Set: Get link"); + break; + + case VMXNET3_CMD_RESET_DEV: + VMW_CBPRN("Set: Reset device"); + vmxnet3_reset(s); + break; + + case VMXNET3_CMD_QUIESCE_DEV: + VMW_CBPRN("Set: VMXNET3_CMD_QUIESCE_DEV - deactivate the device"); + vmxnet3_deactivate_device(s); + break; + + case VMXNET3_CMD_GET_CONF_INTR: + VMW_CBPRN("Set: VMXNET3_CMD_GET_CONF_INTR - interrupt configuration"); + break; + + case VMXNET3_CMD_GET_ADAPTIVE_RING_INFO: + VMW_CBPRN("Set: VMXNET3_CMD_GET_ADAPTIVE_RING_INFO - " + "adaptive ring info flags"); + break; + + case VMXNET3_CMD_GET_DID_LO: + VMW_CBPRN("Set: Get lower part of device ID"); + break; + + case VMXNET3_CMD_GET_DID_HI: + VMW_CBPRN("Set: Get upper part of device ID"); + break; + + case VMXNET3_CMD_GET_DEV_EXTRA_INFO: + VMW_CBPRN("Set: Get device extra info"); + break; + + default: + VMW_CBPRN("Received unknown command: %" PRIx64, cmd); + break; + } +} + +static uint64_t vmxnet3_get_command_status(VMXNET3State *s) +{ + uint64_t ret; + + switch (s->last_command) { + case VMXNET3_CMD_ACTIVATE_DEV: + ret = (s->device_active) ? 0 : 1; + VMW_CFPRN("Device active: %" PRIx64, ret); + break; + + case VMXNET3_CMD_RESET_DEV: + case VMXNET3_CMD_QUIESCE_DEV: + case VMXNET3_CMD_GET_QUEUE_STATUS: + case VMXNET3_CMD_GET_DEV_EXTRA_INFO: + ret = 0; + break; + + case VMXNET3_CMD_GET_LINK: + ret = s->link_status_and_speed; + VMW_CFPRN("Link and speed: %" PRIx64, ret); + break; + + case VMXNET3_CMD_GET_PERM_MAC_LO: + ret = vmxnet3_get_mac_low(&s->perm_mac); + break; + + case VMXNET3_CMD_GET_PERM_MAC_HI: + ret = vmxnet3_get_mac_high(&s->perm_mac); + break; + + case VMXNET3_CMD_GET_CONF_INTR: + ret = vmxnet3_get_interrupt_config(s); + break; + + case VMXNET3_CMD_GET_ADAPTIVE_RING_INFO: + ret = VMXNET3_DISABLE_ADAPTIVE_RING; + break; + + case VMXNET3_CMD_GET_DID_LO: + ret = PCI_DEVICE_ID_VMWARE_VMXNET3; + break; + + case VMXNET3_CMD_GET_DID_HI: + ret = VMXNET3_DEVICE_REVISION; + break; + + default: + VMW_WRPRN("Received request for unknown command: %x", s->last_command); + ret = 0; + break; + } + + return ret; +} + +static void vmxnet3_set_events(VMXNET3State *s, uint32_t val) +{ + uint32_t events; + PCIDevice *d = PCI_DEVICE(s); + + VMW_CBPRN("Setting events: 0x%x", val); + events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) | val; + VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events); +} + +static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val) +{ + PCIDevice *d = PCI_DEVICE(s); + uint32_t events; + + VMW_CBPRN("Clearing events: 0x%x", val); + events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) & ~val; + VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events); +} + +static void +vmxnet3_io_bar1_write(void *opaque, + hwaddr addr, + uint64_t val, + unsigned size) +{ + VMXNET3State *s = opaque; + + switch (addr) { + /* Vmxnet3 Revision Report Selection */ + case VMXNET3_REG_VRRS: + VMW_CBPRN("Write BAR1 [VMXNET3_REG_VRRS] = %" PRIx64 ", size %d", + val, size); + break; + + /* UPT Version Report Selection */ + case VMXNET3_REG_UVRS: + VMW_CBPRN("Write BAR1 [VMXNET3_REG_UVRS] = %" PRIx64 ", size %d", + val, size); + break; + + /* Driver Shared Address Low */ + case VMXNET3_REG_DSAL: + VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAL] = %" PRIx64 ", size %d", + val, size); + /* + * Guest driver will first write the low part of the shared + * memory address. We save it to temp variable and set the + * shared address only after we get the high part + */ + if (val == 0) { + vmxnet3_deactivate_device(s); + } + s->temp_shared_guest_driver_memory = val; + s->drv_shmem = 0; + break; + + /* Driver Shared Address High */ + case VMXNET3_REG_DSAH: + VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAH] = %" PRIx64 ", size %d", + val, size); + /* + * Set the shared memory between guest driver and device. + * We already should have low address part. + */ + s->drv_shmem = s->temp_shared_guest_driver_memory | (val << 32); + break; + + /* Command */ + case VMXNET3_REG_CMD: + VMW_CBPRN("Write BAR1 [VMXNET3_REG_CMD] = %" PRIx64 ", size %d", + val, size); + vmxnet3_handle_command(s, val); + break; + + /* MAC Address Low */ + case VMXNET3_REG_MACL: + VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACL] = %" PRIx64 ", size %d", + val, size); + s->temp_mac = val; + break; + + /* MAC Address High */ + case VMXNET3_REG_MACH: + VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACH] = %" PRIx64 ", size %d", + val, size); + vmxnet3_set_variable_mac(s, val, s->temp_mac); + break; + + /* Interrupt Cause Register */ + case VMXNET3_REG_ICR: + VMW_CBPRN("Write BAR1 [VMXNET3_REG_ICR] = %" PRIx64 ", size %d", + val, size); + g_assert_not_reached(); + break; + + /* Event Cause Register */ + case VMXNET3_REG_ECR: + VMW_CBPRN("Write BAR1 [VMXNET3_REG_ECR] = %" PRIx64 ", size %d", + val, size); + vmxnet3_ack_events(s, val); + break; + + default: + VMW_CBPRN("Unknown Write to BAR1 [%" PRIx64 "] = %" PRIx64 ", size %d", + addr, val, size); + break; + } +} + +static uint64_t +vmxnet3_io_bar1_read(void *opaque, hwaddr addr, unsigned size) +{ + VMXNET3State *s = opaque; + uint64_t ret = 0; + + switch (addr) { + /* Vmxnet3 Revision Report Selection */ + case VMXNET3_REG_VRRS: + VMW_CBPRN("Read BAR1 [VMXNET3_REG_VRRS], size %d", size); + ret = VMXNET3_DEVICE_REVISION; + break; + + /* UPT Version Report Selection */ + case VMXNET3_REG_UVRS: + VMW_CBPRN("Read BAR1 [VMXNET3_REG_UVRS], size %d", size); + ret = VMXNET3_UPT_REVISION; + break; + + /* Command */ + case VMXNET3_REG_CMD: + VMW_CBPRN("Read BAR1 [VMXNET3_REG_CMD], size %d", size); + ret = vmxnet3_get_command_status(s); + break; + + /* MAC Address Low */ + case VMXNET3_REG_MACL: + VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACL], size %d", size); + ret = vmxnet3_get_mac_low(&s->conf.macaddr); + break; + + /* MAC Address High */ + case VMXNET3_REG_MACH: + VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACH], size %d", size); + ret = vmxnet3_get_mac_high(&s->conf.macaddr); + break; + + /* + * Interrupt Cause Register + * Used for legacy interrupts only so interrupt index always 0 + */ + case VMXNET3_REG_ICR: + VMW_CBPRN("Read BAR1 [VMXNET3_REG_ICR], size %d", size); + if (vmxnet3_interrupt_asserted(s, 0)) { + vmxnet3_clear_interrupt(s, 0); + ret = true; + } else { + ret = false; + } + break; + + default: + VMW_CBPRN("Unknow read BAR1[%" PRIx64 "], %d bytes", addr, size); + break; + } + + return ret; +} + +static int +vmxnet3_can_receive(NetClientState *nc) +{ + VMXNET3State *s = qemu_get_nic_opaque(nc); + return s->device_active && + VMXNET_FLAG_IS_SET(s->link_status_and_speed, VMXNET3_LINK_STATUS_UP); +} + +static inline bool +vmxnet3_is_registered_vlan(VMXNET3State *s, const void *data) +{ + uint16_t vlan_tag = eth_get_pkt_tci(data) & VLAN_VID_MASK; + if (IS_SPECIAL_VLAN_ID(vlan_tag)) { + return true; + } + + return VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, vlan_tag); +} + +static bool +vmxnet3_is_allowed_mcast_group(VMXNET3State *s, const uint8_t *group_mac) +{ + int i; + for (i = 0; i < s->mcast_list_len; i++) { + if (!memcmp(group_mac, s->mcast_list[i].a, sizeof(s->mcast_list[i]))) { + return true; + } + } + return false; +} + +static bool +vmxnet3_rx_filter_may_indicate(VMXNET3State *s, const void *data, + size_t size) +{ + struct eth_header *ehdr = PKT_GET_ETH_HDR(data); + + if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_PROMISC)) { + return true; + } + + if (!vmxnet3_is_registered_vlan(s, data)) { + return false; + } + + switch (net_rx_pkt_get_packet_type(s->rx_pkt)) { + case ETH_PKT_UCAST: + if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_UCAST)) { + return false; + } + if (memcmp(s->conf.macaddr.a, ehdr->h_dest, ETH_ALEN)) { + return false; + } + break; + + case ETH_PKT_BCAST: + if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_BCAST)) { + return false; + } + break; + + case ETH_PKT_MCAST: + if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_ALL_MULTI)) { + return true; + } + if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_MCAST)) { + return false; + } + if (!vmxnet3_is_allowed_mcast_group(s, ehdr->h_dest)) { + return false; + } + break; + + default: + g_assert_not_reached(); + } + + return true; +} + +static ssize_t +vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + VMXNET3State *s = qemu_get_nic_opaque(nc); + size_t bytes_indicated; + uint8_t min_buf[MIN_BUF_SIZE]; + + if (!vmxnet3_can_receive(nc)) { + VMW_PKPRN("Cannot receive now"); + return -1; + } + + if (s->peer_has_vhdr) { + net_rx_pkt_set_vhdr(s->rx_pkt, (struct virtio_net_hdr *)buf); + buf += sizeof(struct virtio_net_hdr); + size -= sizeof(struct virtio_net_hdr); + } + + /* Pad to minimum Ethernet frame length */ + if (size < sizeof(min_buf)) { + memcpy(min_buf, buf, size); + memset(&min_buf[size], 0, sizeof(min_buf) - size); + buf = min_buf; + size = sizeof(min_buf); + } + + net_rx_pkt_set_packet_type(s->rx_pkt, + get_eth_packet_type(PKT_GET_ETH_HDR(buf))); + + if (vmxnet3_rx_filter_may_indicate(s, buf, size)) { + net_rx_pkt_set_protocols(s->rx_pkt, buf, size); + vmxnet3_rx_need_csum_calculate(s->rx_pkt, buf, size); + net_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping); + bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1; + if (bytes_indicated < size) { + VMW_PKPRN("RX: %zu of %zu bytes indicated", bytes_indicated, size); + } + } else { + VMW_PKPRN("Packet dropped by RX filter"); + bytes_indicated = size; + } + + assert(size > 0); + assert(bytes_indicated != 0); + return bytes_indicated; +} + +static void vmxnet3_set_link_status(NetClientState *nc) +{ + VMXNET3State *s = qemu_get_nic_opaque(nc); + + if (nc->link_down) { + s->link_status_and_speed &= ~VMXNET3_LINK_STATUS_UP; + } else { + s->link_status_and_speed |= VMXNET3_LINK_STATUS_UP; + } + + vmxnet3_set_events(s, VMXNET3_ECR_LINK); + vmxnet3_trigger_interrupt(s, s->event_int_idx); +} + +static NetClientInfo net_vmxnet3_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = vmxnet3_receive, + .link_status_changed = vmxnet3_set_link_status, +}; + +static bool vmxnet3_peer_has_vnet_hdr(VMXNET3State *s) +{ + NetClientState *nc = qemu_get_queue(s->nic); + + if (qemu_has_vnet_hdr(nc->peer)) { + return true; + } + + return false; +} + +static void vmxnet3_net_uninit(VMXNET3State *s) +{ + g_free(s->mcast_list); + vmxnet3_deactivate_device(s); + qemu_del_nic(s->nic); +} + +static void vmxnet3_net_init(VMXNET3State *s) +{ + DeviceState *d = DEVICE(s); + + VMW_CBPRN("vmxnet3_net_init called..."); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + + /* Windows guest will query the address that was set on init */ + memcpy(&s->perm_mac.a, &s->conf.macaddr.a, sizeof(s->perm_mac.a)); + + s->mcast_list = NULL; + s->mcast_list_len = 0; + + s->link_status_and_speed = VMXNET3_LINK_SPEED | VMXNET3_LINK_STATUS_UP; + + VMW_CFPRN("Permanent MAC: " MAC_FMT, MAC_ARG(s->perm_mac.a)); + + s->nic = qemu_new_nic(&net_vmxnet3_info, &s->conf, + object_get_typename(OBJECT(s)), + d->id, s); + + s->peer_has_vhdr = vmxnet3_peer_has_vnet_hdr(s); + s->tx_sop = true; + s->skip_current_tx_pkt = false; + s->tx_pkt = NULL; + s->rx_pkt = NULL; + s->rx_vlan_stripping = false; + s->lro_supported = false; + + if (s->peer_has_vhdr) { + qemu_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer, + sizeof(struct virtio_net_hdr)); + + qemu_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1); + } + + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static void +vmxnet3_unuse_msix_vectors(VMXNET3State *s, int num_vectors) +{ + PCIDevice *d = PCI_DEVICE(s); + int i; + for (i = 0; i < num_vectors; i++) { + msix_vector_unuse(d, i); + } +} + +static bool +vmxnet3_use_msix_vectors(VMXNET3State *s, int num_vectors) +{ + PCIDevice *d = PCI_DEVICE(s); + int i; + for (i = 0; i < num_vectors; i++) { + int res = msix_vector_use(d, i); + if (0 > res) { + VMW_WRPRN("Failed to use MSI-X vector %d, error %d", i, res); + vmxnet3_unuse_msix_vectors(s, i); + return false; + } + } + return true; +} + +static bool +vmxnet3_init_msix(VMXNET3State *s) +{ + PCIDevice *d = PCI_DEVICE(s); + int res = msix_init(d, VMXNET3_MAX_INTRS, + &s->msix_bar, + VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_TABLE, + &s->msix_bar, + VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_PBA(s), + VMXNET3_MSIX_OFFSET(s), NULL); + + if (0 > res) { + VMW_WRPRN("Failed to initialize MSI-X, error %d", res); + s->msix_used = false; + } else { + if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) { + VMW_WRPRN("Failed to use MSI-X vectors, error %d", res); + msix_uninit(d, &s->msix_bar, &s->msix_bar); + s->msix_used = false; + } else { + s->msix_used = true; + } + } + return s->msix_used; +} + +static void +vmxnet3_cleanup_msix(VMXNET3State *s) +{ + PCIDevice *d = PCI_DEVICE(s); + + if (s->msix_used) { + vmxnet3_unuse_msix_vectors(s, VMXNET3_MAX_INTRS); + msix_uninit(d, &s->msix_bar, &s->msix_bar); + } +} + +static void +vmxnet3_cleanup_msi(VMXNET3State *s) +{ + PCIDevice *d = PCI_DEVICE(s); + + msi_uninit(d); +} + +static const MemoryRegionOps b0_ops = { + .read = vmxnet3_io_bar0_read, + .write = vmxnet3_io_bar0_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps b1_ops = { + .read = vmxnet3_io_bar1_read, + .write = vmxnet3_io_bar1_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t vmxnet3_device_serial_num(VMXNET3State *s) +{ + uint64_t dsn_payload; + uint8_t *dsnp = (uint8_t *)&dsn_payload; + + dsnp[0] = 0xfe; + dsnp[1] = s->conf.macaddr.a[3]; + dsnp[2] = s->conf.macaddr.a[4]; + dsnp[3] = s->conf.macaddr.a[5]; + dsnp[4] = s->conf.macaddr.a[0]; + dsnp[5] = s->conf.macaddr.a[1]; + dsnp[6] = s->conf.macaddr.a[2]; + dsnp[7] = 0xff; + return dsn_payload; +} + + +#define VMXNET3_USE_64BIT (true) +#define VMXNET3_PER_VECTOR_MASK (false) + +static void vmxnet3_pci_realize(PCIDevice *pci_dev, Error **errp) +{ + VMXNET3State *s = VMXNET3(pci_dev); + int ret; + + VMW_CBPRN("Starting init..."); + + memory_region_init_io(&s->bar0, OBJECT(s), &b0_ops, s, + "vmxnet3-b0", VMXNET3_PT_REG_SIZE); + pci_register_bar(pci_dev, VMXNET3_BAR0_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0); + + memory_region_init_io(&s->bar1, OBJECT(s), &b1_ops, s, + "vmxnet3-b1", VMXNET3_VD_REG_SIZE); + pci_register_bar(pci_dev, VMXNET3_BAR1_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar1); + + memory_region_init(&s->msix_bar, OBJECT(s), "vmxnet3-msix-bar", + VMXNET3_MSIX_BAR_SIZE); + pci_register_bar(pci_dev, VMXNET3_MSIX_BAR_IDX, + PCI_BASE_ADDRESS_SPACE_MEMORY, &s->msix_bar); + + vmxnet3_reset_interrupt_states(s); + + /* Interrupt pin A */ + pci_dev->config[PCI_INTERRUPT_PIN] = 0x01; + + ret = msi_init(pci_dev, VMXNET3_MSI_OFFSET(s), VMXNET3_MAX_NMSIX_INTRS, + VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK, NULL); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error. Fall back to INTx silently on -ENOTSUP */ + assert(!ret || ret == -ENOTSUP); + + if (!vmxnet3_init_msix(s)) { + VMW_WRPRN("Failed to initialize MSI-X, configuration is inconsistent."); + } + + vmxnet3_net_init(s); + + if (pci_is_express(pci_dev)) { + if (pci_bus_is_express(pci_get_bus(pci_dev))) { + pcie_endpoint_cap_init(pci_dev, VMXNET3_EXP_EP_OFFSET); + } + + pcie_dev_ser_num_init(pci_dev, VMXNET3_DSN_OFFSET, + vmxnet3_device_serial_num(s)); + } +} + +static void vmxnet3_instance_init(Object *obj) +{ + VMXNET3State *s = VMXNET3(obj); + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + DEVICE(obj)); +} + +static void vmxnet3_pci_uninit(PCIDevice *pci_dev) +{ + VMXNET3State *s = VMXNET3(pci_dev); + + VMW_CBPRN("Starting uninit..."); + + vmxnet3_net_uninit(s); + + vmxnet3_cleanup_msix(s); + + vmxnet3_cleanup_msi(s); +} + +static void vmxnet3_qdev_reset(DeviceState *dev) +{ + PCIDevice *d = PCI_DEVICE(dev); + VMXNET3State *s = VMXNET3(d); + + VMW_CBPRN("Starting QDEV reset..."); + vmxnet3_reset(s); +} + +static bool vmxnet3_mc_list_needed(void *opaque) +{ + return true; +} + +static int vmxnet3_mcast_list_pre_load(void *opaque) +{ + VMXNET3State *s = opaque; + + s->mcast_list = g_malloc(s->mcast_list_buff_size); + + return 0; +} + + +static int vmxnet3_pre_save(void *opaque) +{ + VMXNET3State *s = opaque; + + s->mcast_list_buff_size = s->mcast_list_len * sizeof(MACAddr); + + return 0; +} + +static const VMStateDescription vmxstate_vmxnet3_mcast_list = { + .name = "vmxnet3/mcast_list", + .version_id = 1, + .minimum_version_id = 1, + .pre_load = vmxnet3_mcast_list_pre_load, + .needed = vmxnet3_mc_list_needed, + .fields = (VMStateField[]) { + VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, + mcast_list_buff_size), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_vmxnet3_ring = { + .name = "vmxnet3-ring", + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT64(pa, Vmxnet3Ring), + VMSTATE_UINT32(size, Vmxnet3Ring), + VMSTATE_UINT32(cell_size, Vmxnet3Ring), + VMSTATE_UINT32(next, Vmxnet3Ring), + VMSTATE_UINT8(gen, Vmxnet3Ring), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_vmxnet3_tx_stats = { + .name = "vmxnet3-tx-stats", + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT64(TSOPktsTxOK, struct UPT1_TxStats), + VMSTATE_UINT64(TSOBytesTxOK, struct UPT1_TxStats), + VMSTATE_UINT64(ucastPktsTxOK, struct UPT1_TxStats), + VMSTATE_UINT64(ucastBytesTxOK, struct UPT1_TxStats), + VMSTATE_UINT64(mcastPktsTxOK, struct UPT1_TxStats), + VMSTATE_UINT64(mcastBytesTxOK, struct UPT1_TxStats), + VMSTATE_UINT64(bcastPktsTxOK, struct UPT1_TxStats), + VMSTATE_UINT64(bcastBytesTxOK, struct UPT1_TxStats), + VMSTATE_UINT64(pktsTxError, struct UPT1_TxStats), + VMSTATE_UINT64(pktsTxDiscard, struct UPT1_TxStats), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_vmxnet3_txq_descr = { + .name = "vmxnet3-txq-descr", + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(tx_ring, Vmxnet3TxqDescr, 0, vmstate_vmxnet3_ring, + Vmxnet3Ring), + VMSTATE_STRUCT(comp_ring, Vmxnet3TxqDescr, 0, vmstate_vmxnet3_ring, + Vmxnet3Ring), + VMSTATE_UINT8(intr_idx, Vmxnet3TxqDescr), + VMSTATE_UINT64(tx_stats_pa, Vmxnet3TxqDescr), + VMSTATE_STRUCT(txq_stats, Vmxnet3TxqDescr, 0, vmstate_vmxnet3_tx_stats, + struct UPT1_TxStats), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_vmxnet3_rx_stats = { + .name = "vmxnet3-rx-stats", + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT64(LROPktsRxOK, struct UPT1_RxStats), + VMSTATE_UINT64(LROBytesRxOK, struct UPT1_RxStats), + VMSTATE_UINT64(ucastPktsRxOK, struct UPT1_RxStats), + VMSTATE_UINT64(ucastBytesRxOK, struct UPT1_RxStats), + VMSTATE_UINT64(mcastPktsRxOK, struct UPT1_RxStats), + VMSTATE_UINT64(mcastBytesRxOK, struct UPT1_RxStats), + VMSTATE_UINT64(bcastPktsRxOK, struct UPT1_RxStats), + VMSTATE_UINT64(bcastBytesRxOK, struct UPT1_RxStats), + VMSTATE_UINT64(pktsRxOutOfBuf, struct UPT1_RxStats), + VMSTATE_UINT64(pktsRxError, struct UPT1_RxStats), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_vmxnet3_rxq_descr = { + .name = "vmxnet3-rxq-descr", + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(rx_ring, Vmxnet3RxqDescr, + VMXNET3_RX_RINGS_PER_QUEUE, 0, + vmstate_vmxnet3_ring, Vmxnet3Ring), + VMSTATE_STRUCT(comp_ring, Vmxnet3RxqDescr, 0, vmstate_vmxnet3_ring, + Vmxnet3Ring), + VMSTATE_UINT8(intr_idx, Vmxnet3RxqDescr), + VMSTATE_UINT64(rx_stats_pa, Vmxnet3RxqDescr), + VMSTATE_STRUCT(rxq_stats, Vmxnet3RxqDescr, 0, vmstate_vmxnet3_rx_stats, + struct UPT1_RxStats), + VMSTATE_END_OF_LIST() + } +}; + +static int vmxnet3_post_load(void *opaque, int version_id) +{ + VMXNET3State *s = opaque; + PCIDevice *d = PCI_DEVICE(s); + + net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), + s->max_tx_frags, s->peer_has_vhdr); + net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); + + if (s->msix_used) { + if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) { + VMW_WRPRN("Failed to re-use MSI-X vectors"); + msix_uninit(d, &s->msix_bar, &s->msix_bar); + s->msix_used = false; + return -1; + } + } + + if (!vmxnet3_validate_queues(s)) { + return -1; + } + vmxnet3_validate_interrupts(s); + + return 0; +} + +static const VMStateDescription vmstate_vmxnet3_int_state = { + .name = "vmxnet3-int-state", + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_BOOL(is_masked, Vmxnet3IntState), + VMSTATE_BOOL(is_pending, Vmxnet3IntState), + VMSTATE_BOOL(is_asserted, Vmxnet3IntState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_vmxnet3 = { + .name = "vmxnet3", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = vmxnet3_pre_save, + .post_load = vmxnet3_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, VMXNET3State), + VMSTATE_MSIX(parent_obj, VMXNET3State), + VMSTATE_BOOL(rx_packets_compound, VMXNET3State), + VMSTATE_BOOL(rx_vlan_stripping, VMXNET3State), + VMSTATE_BOOL(lro_supported, VMXNET3State), + VMSTATE_UINT32(rx_mode, VMXNET3State), + VMSTATE_UINT32(mcast_list_len, VMXNET3State), + VMSTATE_UINT32(mcast_list_buff_size, VMXNET3State), + VMSTATE_UINT32_ARRAY(vlan_table, VMXNET3State, VMXNET3_VFT_SIZE), + VMSTATE_UINT32(mtu, VMXNET3State), + VMSTATE_UINT16(max_rx_frags, VMXNET3State), + VMSTATE_UINT32(max_tx_frags, VMXNET3State), + VMSTATE_UINT8(event_int_idx, VMXNET3State), + VMSTATE_BOOL(auto_int_masking, VMXNET3State), + VMSTATE_UINT8(txq_num, VMXNET3State), + VMSTATE_UINT8(rxq_num, VMXNET3State), + VMSTATE_UINT32(device_active, VMXNET3State), + VMSTATE_UINT32(last_command, VMXNET3State), + VMSTATE_UINT32(link_status_and_speed, VMXNET3State), + VMSTATE_UINT32(temp_mac, VMXNET3State), + VMSTATE_UINT64(drv_shmem, VMXNET3State), + VMSTATE_UINT64(temp_shared_guest_driver_memory, VMXNET3State), + + VMSTATE_STRUCT_ARRAY(txq_descr, VMXNET3State, + VMXNET3_DEVICE_MAX_TX_QUEUES, 0, vmstate_vmxnet3_txq_descr, + Vmxnet3TxqDescr), + VMSTATE_STRUCT_ARRAY(rxq_descr, VMXNET3State, + VMXNET3_DEVICE_MAX_RX_QUEUES, 0, vmstate_vmxnet3_rxq_descr, + Vmxnet3RxqDescr), + VMSTATE_STRUCT_ARRAY(interrupt_states, VMXNET3State, + VMXNET3_MAX_INTRS, 0, vmstate_vmxnet3_int_state, + Vmxnet3IntState), + + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmxstate_vmxnet3_mcast_list, + NULL + } +}; + +static Property vmxnet3_properties[] = { + DEFINE_NIC_PROPERTIES(VMXNET3State, conf), + DEFINE_PROP_BIT("x-old-msi-offsets", VMXNET3State, compat_flags, + VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT, false), + DEFINE_PROP_BIT("x-disable-pcie", VMXNET3State, compat_flags, + VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vmxnet3_realize(DeviceState *qdev, Error **errp) +{ + VMXNET3Class *vc = VMXNET3_DEVICE_GET_CLASS(qdev); + PCIDevice *pci_dev = PCI_DEVICE(qdev); + VMXNET3State *s = VMXNET3(qdev); + + if (!(s->compat_flags & VMXNET3_COMPAT_FLAG_DISABLE_PCIE)) { + pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; + } + + vc->parent_dc_realize(qdev, errp); +} + +static void vmxnet3_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(class); + PCIDeviceClass *c = PCI_DEVICE_CLASS(class); + VMXNET3Class *vc = VMXNET3_DEVICE_CLASS(class); + + c->realize = vmxnet3_pci_realize; + c->exit = vmxnet3_pci_uninit; + c->vendor_id = PCI_VENDOR_ID_VMWARE; + c->device_id = PCI_DEVICE_ID_VMWARE_VMXNET3; + c->revision = PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION; + c->romfile = "efi-vmxnet3.rom"; + c->class_id = PCI_CLASS_NETWORK_ETHERNET; + c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE; + c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3; + device_class_set_parent_realize(dc, vmxnet3_realize, + &vc->parent_dc_realize); + dc->desc = "VMWare Paravirtualized Ethernet v3"; + dc->reset = vmxnet3_qdev_reset; + dc->vmsd = &vmstate_vmxnet3; + device_class_set_props(dc, vmxnet3_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static const TypeInfo vmxnet3_info = { + .name = TYPE_VMXNET3, + .parent = TYPE_PCI_DEVICE, + .class_size = sizeof(VMXNET3Class), + .instance_size = sizeof(VMXNET3State), + .class_init = vmxnet3_class_init, + .instance_init = vmxnet3_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + }, +}; + +static void vmxnet3_register_types(void) +{ + VMW_CBPRN("vmxnet3_register_types called..."); + type_register_static(&vmxnet3_info); +} + +type_init(vmxnet3_register_types) diff --git a/hw/net/vmxnet3.h b/hw/net/vmxnet3.h new file mode 100644 index 000000000..5b3b76ba7 --- /dev/null +++ b/hw/net/vmxnet3.h @@ -0,0 +1,807 @@ +/* + * QEMU VMWARE VMXNET3 paravirtual NIC interface definitions + * + * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Tamir Shomer + * Yan Vugenfirer + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_VMXNET3_H +#define QEMU_VMXNET3_H + +#define VMXNET3_DEVICE_MAX_TX_QUEUES 8 +#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */ + +/* + * VMWARE headers we got from Linux kernel do not fully comply QEMU coding + * standards in sense of types and defines used. + * Since we didn't want to change VMWARE code, following set of typedefs + * and defines needed to compile these headers with QEMU introduced. + */ +#define u64 uint64_t +#define u32 uint32_t +#define u16 uint16_t +#define u8 uint8_t +#define __le16 uint16_t +#define __le32 uint32_t +#define __le64 uint64_t + +#if defined(HOST_WORDS_BIGENDIAN) +#define __BIG_ENDIAN_BITFIELD +#else +#endif + +/* + * Following is an interface definition for + * VMXNET3 device as provided by VMWARE + * See original copyright from Linux kernel v3.2.8 + * header file drivers/net/vmxnet3/vmxnet3_defs.h below. + */ + +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara + * + */ + +struct UPT1_TxStats { + u64 TSOPktsTxOK; /* TSO pkts post-segmentation */ + u64 TSOBytesTxOK; + u64 ucastPktsTxOK; + u64 ucastBytesTxOK; + u64 mcastPktsTxOK; + u64 mcastBytesTxOK; + u64 bcastPktsTxOK; + u64 bcastBytesTxOK; + u64 pktsTxError; + u64 pktsTxDiscard; +}; + +struct UPT1_RxStats { + u64 LROPktsRxOK; /* LRO pkts */ + u64 LROBytesRxOK; /* bytes from LRO pkts */ + /* the following counters are for pkts from the wire, i.e., pre-LRO */ + u64 ucastPktsRxOK; + u64 ucastBytesRxOK; + u64 mcastPktsRxOK; + u64 mcastBytesRxOK; + u64 bcastPktsRxOK; + u64 bcastBytesRxOK; + u64 pktsRxOutOfBuf; + u64 pktsRxError; +}; + +/* interrupt moderation level */ +enum { + UPT1_IML_NONE = 0, /* no interrupt moderation */ + UPT1_IML_HIGHEST = 7, /* least intr generated */ + UPT1_IML_ADAPTIVE = 8, /* adpative intr moderation */ +}; +/* values for UPT1_RSSConf.hashFunc */ +enum { + UPT1_RSS_HASH_TYPE_NONE = 0x0, + UPT1_RSS_HASH_TYPE_IPV4 = 0x01, + UPT1_RSS_HASH_TYPE_TCP_IPV4 = 0x02, + UPT1_RSS_HASH_TYPE_IPV6 = 0x04, + UPT1_RSS_HASH_TYPE_TCP_IPV6 = 0x08, +}; + +enum { + UPT1_RSS_HASH_FUNC_NONE = 0x0, + UPT1_RSS_HASH_FUNC_TOEPLITZ = 0x01, +}; + +#define UPT1_RSS_MAX_KEY_SIZE 40 +#define UPT1_RSS_MAX_IND_TABLE_SIZE 128 + +struct UPT1_RSSConf { + u16 hashType; + u16 hashFunc; + u16 hashKeySize; + u16 indTableSize; + u8 hashKey[UPT1_RSS_MAX_KEY_SIZE]; + u8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE]; +}; + +/* features */ +enum { + UPT1_F_RXCSUM = 0x0001, /* rx csum verification */ + UPT1_F_RSS = 0x0002, + UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */ + UPT1_F_LRO = 0x0008, +}; + +/* all registers are 32 bit wide */ +/* BAR 1 */ +enum { + VMXNET3_REG_VRRS = 0x0, /* Vmxnet3 Revision Report Selection */ + VMXNET3_REG_UVRS = 0x8, /* UPT Version Report Selection */ + VMXNET3_REG_DSAL = 0x10, /* Driver Shared Address Low */ + VMXNET3_REG_DSAH = 0x18, /* Driver Shared Address High */ + VMXNET3_REG_CMD = 0x20, /* Command */ + VMXNET3_REG_MACL = 0x28, /* MAC Address Low */ + VMXNET3_REG_MACH = 0x30, /* MAC Address High */ + VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */ + VMXNET3_REG_ECR = 0x40 /* Event Cause Register */ +}; + +/* BAR 0 */ +enum { + VMXNET3_REG_IMR = 0x0, /* Interrupt Mask Register */ + VMXNET3_REG_TXPROD = 0x600, /* Tx Producer Index */ + VMXNET3_REG_RXPROD = 0x800, /* Rx Producer Index for ring 1 */ + VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */ +}; + +#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */ +#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */ + +#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */ +#define VMXNET3_REG_ALIGN_MASK 0x7 + +/* I/O Mapped access to registers */ +#define VMXNET3_IO_TYPE_PT 0 +#define VMXNET3_IO_TYPE_VD 1 +#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF)) +#define VMXNET3_IO_TYPE(addr) ((addr) >> 24) +#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF) + +enum { + VMXNET3_CMD_FIRST_SET = 0xCAFE0000, + VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET, /* 0xCAFE0000 */ + VMXNET3_CMD_QUIESCE_DEV, /* 0xCAFE0001 */ + VMXNET3_CMD_RESET_DEV, /* 0xCAFE0002 */ + VMXNET3_CMD_UPDATE_RX_MODE, /* 0xCAFE0003 */ + VMXNET3_CMD_UPDATE_MAC_FILTERS, /* 0xCAFE0004 */ + VMXNET3_CMD_UPDATE_VLAN_FILTERS, /* 0xCAFE0005 */ + VMXNET3_CMD_UPDATE_RSSIDT, /* 0xCAFE0006 */ + VMXNET3_CMD_UPDATE_IML, /* 0xCAFE0007 */ + VMXNET3_CMD_UPDATE_PMCFG, /* 0xCAFE0008 */ + VMXNET3_CMD_UPDATE_FEATURE, /* 0xCAFE0009 */ + VMXNET3_CMD_LOAD_PLUGIN, /* 0xCAFE000A */ + + VMXNET3_CMD_FIRST_GET = 0xF00D0000, + VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET, /* 0xF00D0000 */ + VMXNET3_CMD_GET_STATS, /* 0xF00D0001 */ + VMXNET3_CMD_GET_LINK, /* 0xF00D0002 */ + VMXNET3_CMD_GET_PERM_MAC_LO, /* 0xF00D0003 */ + VMXNET3_CMD_GET_PERM_MAC_HI, /* 0xF00D0004 */ + VMXNET3_CMD_GET_DID_LO, /* 0xF00D0005 */ + VMXNET3_CMD_GET_DID_HI, /* 0xF00D0006 */ + VMXNET3_CMD_GET_DEV_EXTRA_INFO, /* 0xF00D0007 */ + VMXNET3_CMD_GET_CONF_INTR, /* 0xF00D0008 */ + VMXNET3_CMD_GET_ADAPTIVE_RING_INFO /* 0xF00D0009 */ +}; + +/* Adaptive Ring Info Flags */ +#define VMXNET3_DISABLE_ADAPTIVE_RING 1 + +/* + * Little Endian layout of bitfields - + * Byte 0 : 7.....len.....0 + * Byte 1 : rsvd gen 13.len.8 + * Byte 2 : 5.msscof.0 ext1 dtype + * Byte 3 : 13...msscof...6 + * + * Big Endian layout of bitfields - + * Byte 0: 13...msscof...6 + * Byte 1 : 5.msscof.0 ext1 dtype + * Byte 2 : rsvd gen 13.len.8 + * Byte 3 : 7.....len.....0 + * + * Thus, le32_to_cpu on the dword will allow the big endian driver to read + * the bit fields correctly. And cpu_to_le32 will convert bitfields + * bit fields written by big endian driver to format required by device. + */ + +struct Vmxnet3_TxDesc { + __le64 addr; + + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u32 msscof:14; /* MSS, checksum offset, flags */ + u32 ext1:1; + u32 dtype:1; /* descriptor type */ + u32 rsvd:1; + u32 gen:1; /* generation bit */ + u32 len:14; +#else + u32 len:14; + u32 gen:1; /* generation bit */ + u32 rsvd:1; + u32 dtype:1; /* descriptor type */ + u32 ext1:1; + u32 msscof:14; /* MSS, checksum offset, flags */ +#endif /* __BIG_ENDIAN_BITFIELD */ + }; + u32 val1; + }; + + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u32 tci:16; /* Tag to Insert */ + u32 ti:1; /* VLAN Tag Insertion */ + u32 ext2:1; + u32 cq:1; /* completion request */ + u32 eop:1; /* End Of Packet */ + u32 om:2; /* offload mode */ + u32 hlen:10; /* header len */ +#else + u32 hlen:10; /* header len */ + u32 om:2; /* offload mode */ + u32 eop:1; /* End Of Packet */ + u32 cq:1; /* completion request */ + u32 ext2:1; + u32 ti:1; /* VLAN Tag Insertion */ + u32 tci:16; /* Tag to Insert */ +#endif /* __BIG_ENDIAN_BITFIELD */ + }; + u32 val2; + }; +}; + +/* TxDesc.OM values */ +#define VMXNET3_OM_NONE 0 +#define VMXNET3_OM_CSUM 2 +#define VMXNET3_OM_TSO 3 + +/* fields in TxDesc we access w/o using bit fields */ +#define VMXNET3_TXD_EOP_SHIFT 12 +#define VMXNET3_TXD_CQ_SHIFT 13 +#define VMXNET3_TXD_GEN_SHIFT 14 +#define VMXNET3_TXD_EOP_DWORD_SHIFT 3 +#define VMXNET3_TXD_GEN_DWORD_SHIFT 2 + +#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT) +#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT) +#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT) + +#define VMXNET3_HDR_COPY_SIZE 128 + + +struct Vmxnet3_TxDataDesc { + u8 data[VMXNET3_HDR_COPY_SIZE]; +}; + +#define VMXNET3_TCD_GEN_SHIFT 31 +#define VMXNET3_TCD_GEN_SIZE 1 +#define VMXNET3_TCD_TXIDX_SHIFT 0 +#define VMXNET3_TCD_TXIDX_SIZE 12 +#define VMXNET3_TCD_GEN_DWORD_SHIFT 3 + +struct Vmxnet3_TxCompDesc { + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u32 ext1:20; + u32 txdIdx:12; /* Index of the EOP TxDesc */ +#else + u32 txdIdx:12; /* Index of the EOP TxDesc */ + u32 ext1:20; +#endif + }; + u32 val1; + }; + __le32 ext2; + __le32 ext3; + + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u32 gen:1; /* generation bit */ + u32 type:7; /* completion type */ + u32 rsvd:24; +#else + u32 rsvd:24; + u32 type:7; /* completion type */ + u32 gen:1; /* generation bit */ +#endif + }; + u32 val2; + }; +}; + +struct Vmxnet3_RxDesc { + __le64 addr; + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u32 gen:1; /* Generation bit */ + u32 rsvd:15; + u32 dtype:1; /* Descriptor type */ + u32 btype:1; /* Buffer Type */ + u32 len:14; +#else + u32 len:14; + u32 btype:1; /* Buffer Type */ + u32 dtype:1; /* Descriptor type */ + u32 rsvd:15; + u32 gen:1; /* Generation bit */ +#endif + }; + u32 val1; + }; + u32 ext1; +}; + +/* values of RXD.BTYPE */ +#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */ +#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */ + +/* fields in RxDesc we access w/o using bit fields */ +#define VMXNET3_RXD_BTYPE_SHIFT 14 +#define VMXNET3_RXD_GEN_SHIFT 31 + +struct Vmxnet3_RxCompDesc { + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u32 ext2:1; + u32 cnc:1; /* Checksum Not Calculated */ + u32 rssType:4; /* RSS hash type used */ + u32 rqID:10; /* rx queue/ring ID */ + u32 sop:1; /* Start of Packet */ + u32 eop:1; /* End of Packet */ + u32 ext1:2; + u32 rxdIdx:12; /* Index of the RxDesc */ +#else + u32 rxdIdx:12; /* Index of the RxDesc */ + u32 ext1:2; + u32 eop:1; /* End of Packet */ + u32 sop:1; /* Start of Packet */ + u32 rqID:10; /* rx queue/ring ID */ + u32 rssType:4; /* RSS hash type used */ + u32 cnc:1; /* Checksum Not Calculated */ + u32 ext2:1; +#endif /* __BIG_ENDIAN_BITFIELD */ + }; + u32 val1; + }; + + __le32 rssHash; /* RSS hash value */ + + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u32 tci:16; /* Tag stripped */ + u32 ts:1; /* Tag is stripped */ + u32 err:1; /* Error */ + u32 len:14; /* data length */ +#else + u32 len:14; /* data length */ + u32 err:1; /* Error */ + u32 ts:1; /* Tag is stripped */ + u32 tci:16; /* Tag stripped */ +#endif /* __BIG_ENDIAN_BITFIELD */ + }; + u32 val2; + }; + + union { + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u32 gen:1; /* generation bit */ + u32 type:7; /* completion type */ + u32 fcs:1; /* Frame CRC correct */ + u32 frg:1; /* IP Fragment */ + u32 v4:1; /* IPv4 */ + u32 v6:1; /* IPv6 */ + u32 ipc:1; /* IP Checksum Correct */ + u32 tcp:1; /* TCP packet */ + u32 udp:1; /* UDP packet */ + u32 tuc:1; /* TCP/UDP Checksum Correct */ + u32 csum:16; +#else + u32 csum:16; + u32 tuc:1; /* TCP/UDP Checksum Correct */ + u32 udp:1; /* UDP packet */ + u32 tcp:1; /* TCP packet */ + u32 ipc:1; /* IP Checksum Correct */ + u32 v6:1; /* IPv6 */ + u32 v4:1; /* IPv4 */ + u32 frg:1; /* IP Fragment */ + u32 fcs:1; /* Frame CRC correct */ + u32 type:7; /* completion type */ + u32 gen:1; /* generation bit */ +#endif /* __BIG_ENDIAN_BITFIELD */ + }; + u32 val3; + }; +}; + +/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */ +#define VMXNET3_RCD_TUC_SHIFT 16 +#define VMXNET3_RCD_IPC_SHIFT 19 + +/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */ +#define VMXNET3_RCD_TYPE_SHIFT 56 +#define VMXNET3_RCD_GEN_SHIFT 63 + +/* csum OK for TCP/UDP pkts over IP */ +#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \ + 1 << VMXNET3_RCD_IPC_SHIFT) +#define VMXNET3_TXD_GEN_SIZE 1 +#define VMXNET3_TXD_EOP_SIZE 1 + +/* value of RxCompDesc.rssType */ +enum { + VMXNET3_RCD_RSS_TYPE_NONE = 0, + VMXNET3_RCD_RSS_TYPE_IPV4 = 1, + VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2, + VMXNET3_RCD_RSS_TYPE_IPV6 = 3, + VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4, +}; + + +/* a union for accessing all cmd/completion descriptors */ +union Vmxnet3_GenericDesc { + __le64 qword[2]; + __le32 dword[4]; + __le16 word[8]; + struct Vmxnet3_TxDesc txd; + struct Vmxnet3_RxDesc rxd; + struct Vmxnet3_TxCompDesc tcd; + struct Vmxnet3_RxCompDesc rcd; +}; + +#define VMXNET3_INIT_GEN 1 + +/* Max size of a single tx buffer */ +#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14) + +/* # of tx desc needed for a tx buffer size */ +#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / \ + VMXNET3_MAX_TX_BUF_SIZE) + +/* max # of tx descs for a non-tso pkt */ +#define VMXNET3_MAX_TXD_PER_PKT 16 + +/* Max size of a single rx buffer */ +#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1) +/* Minimum size of a type 0 buffer */ +#define VMXNET3_MIN_T0_BUF_SIZE 128 +#define VMXNET3_MAX_CSUM_OFFSET 1024 + +/* Ring base address alignment */ +#define VMXNET3_RING_BA_ALIGN 512 +#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1) + +/* Ring size must be a multiple of 32 */ +#define VMXNET3_RING_SIZE_ALIGN 32 +#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1) + +/* Max ring size */ +#define VMXNET3_TX_RING_MAX_SIZE 4096 +#define VMXNET3_TC_RING_MAX_SIZE 4096 +#define VMXNET3_RX_RING_MAX_SIZE 4096 +#define VMXNET3_RC_RING_MAX_SIZE 8192 + +/* a list of reasons for queue stop */ + +enum { + VMXNET3_ERR_NOEOP = 0x80000000, /* cannot find the EOP desc of a pkt */ + VMXNET3_ERR_TXD_REUSE = 0x80000001, /* reuse TxDesc before tx completion */ + VMXNET3_ERR_BIG_PKT = 0x80000002, /* too many TxDesc for a pkt */ + VMXNET3_ERR_DESC_NOT_SPT = 0x80000003, /* descriptor type not supported */ + VMXNET3_ERR_SMALL_BUF = 0x80000004, /* type 0 buffer too small */ + VMXNET3_ERR_STRESS = 0x80000005, /* stress option firing in vmkernel */ + VMXNET3_ERR_SWITCH = 0x80000006, /* mode switch failure */ + VMXNET3_ERR_TXD_INVALID = 0x80000007, /* invalid TxDesc */ +}; + +/* completion descriptor types */ +#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */ +#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */ + +enum { + VMXNET3_GOS_BITS_UNK = 0, /* unknown */ + VMXNET3_GOS_BITS_32 = 1, + VMXNET3_GOS_BITS_64 = 2, +}; + +#define VMXNET3_GOS_TYPE_UNK 0 /* unknown */ +#define VMXNET3_GOS_TYPE_LINUX 1 +#define VMXNET3_GOS_TYPE_WIN 2 +#define VMXNET3_GOS_TYPE_SOLARIS 3 +#define VMXNET3_GOS_TYPE_FREEBSD 4 +#define VMXNET3_GOS_TYPE_PXE 5 + +struct Vmxnet3_GOSInfo { +#ifdef __BIG_ENDIAN_BITFIELD + u32 gosMisc:10; /* other info about gos */ + u32 gosVer:16; /* gos version */ + u32 gosType:4; /* which guest */ + u32 gosBits:2; /* 32-bit or 64-bit? */ +#else + u32 gosBits:2; /* 32-bit or 64-bit? */ + u32 gosType:4; /* which guest */ + u32 gosVer:16; /* gos version */ + u32 gosMisc:10; /* other info about gos */ +#endif /* __BIG_ENDIAN_BITFIELD */ +}; + +struct Vmxnet3_DriverInfo { + __le32 version; + struct Vmxnet3_GOSInfo gos; + __le32 vmxnet3RevSpt; + __le32 uptVerSpt; +}; + + +#define VMXNET3_REV1_MAGIC 0xbabefee1 + +/* + * QueueDescPA must be 128 bytes aligned. It points to an array of + * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc. + * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by + * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively. + */ +#define VMXNET3_QUEUE_DESC_ALIGN 128 + + +struct Vmxnet3_MiscConf { + struct Vmxnet3_DriverInfo driverInfo; + __le64 uptFeatures; + __le64 ddPA; /* driver data PA */ + __le64 queueDescPA; /* queue descriptor table PA */ + __le32 ddLen; /* driver data len */ + __le32 queueDescLen; /* queue desc. table len in bytes */ + __le32 mtu; + __le16 maxNumRxSG; + u8 numTxQueues; + u8 numRxQueues; + __le32 reserved[4]; +}; + + +struct Vmxnet3_TxQueueConf { + __le64 txRingBasePA; + __le64 dataRingBasePA; + __le64 compRingBasePA; + __le64 ddPA; /* driver data */ + __le64 reserved; + __le32 txRingSize; /* # of tx desc */ + __le32 dataRingSize; /* # of data desc */ + __le32 compRingSize; /* # of comp desc */ + __le32 ddLen; /* size of driver data */ + u8 intrIdx; + u8 _pad[7]; +}; + + +struct Vmxnet3_RxQueueConf { + __le64 rxRingBasePA[2]; + __le64 compRingBasePA; + __le64 ddPA; /* driver data */ + __le64 reserved; + __le32 rxRingSize[2]; /* # of rx desc */ + __le32 compRingSize; /* # of rx comp desc */ + __le32 ddLen; /* size of driver data */ + u8 intrIdx; + u8 _pad[7]; +}; + + +enum vmxnet3_intr_mask_mode { + VMXNET3_IMM_AUTO = 0, + VMXNET3_IMM_ACTIVE = 1, + VMXNET3_IMM_LAZY = 2 +}; + +enum vmxnet3_intr_type { + VMXNET3_IT_AUTO = 0, + VMXNET3_IT_INTX = 1, + VMXNET3_IT_MSI = 2, + VMXNET3_IT_MSIX = 3 +}; + +#define VMXNET3_MAX_TX_QUEUES 8 +#define VMXNET3_MAX_RX_QUEUES 16 +/* addition 1 for events */ +#define VMXNET3_MAX_INTRS 25 + +/* value of intrCtrl */ +#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */ + + +struct Vmxnet3_IntrConf { + bool autoMask; + u8 numIntrs; /* # of interrupts */ + u8 eventIntrIdx; + u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for + * each intr */ + __le32 intrCtrl; + __le32 reserved[2]; +}; + +/* one bit per VLAN ID, the size is in the units of u32 */ +#define VMXNET3_VFT_SIZE (4096/(sizeof(uint32_t)*8)) + + +struct Vmxnet3_QueueStatus { + bool stopped; + u8 _pad[3]; + __le32 error; +}; + + +struct Vmxnet3_TxQueueCtrl { + __le32 txNumDeferred; + __le32 txThreshold; + __le64 reserved; +}; + + +struct Vmxnet3_RxQueueCtrl { + bool updateRxProd; + u8 _pad[7]; + __le64 reserved; +}; + +enum { + VMXNET3_RXM_UCAST = 0x01, /* unicast only */ + VMXNET3_RXM_MCAST = 0x02, /* multicast passing the filters */ + VMXNET3_RXM_BCAST = 0x04, /* broadcast only */ + VMXNET3_RXM_ALL_MULTI = 0x08, /* all multicast */ + VMXNET3_RXM_PROMISC = 0x10 /* promiscuous */ +}; + +struct Vmxnet3_RxFilterConf { + __le32 rxMode; /* VMXNET3_RXM_xxx */ + __le16 mfTableLen; /* size of the multicast filter table */ + __le16 _pad1; + __le64 mfTablePA; /* PA of the multicast filters table */ + __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ +}; + + +#define VMXNET3_PM_MAX_FILTERS 6 +#define VMXNET3_PM_MAX_PATTERN_SIZE 128 +#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8) + +#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */ +#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching + * filters */ + + +struct Vmxnet3_PM_PktFilter { + u8 maskSize; + u8 patternSize; + u8 mask[VMXNET3_PM_MAX_MASK_SIZE]; + u8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE]; + u8 pad[6]; +}; + + +struct Vmxnet3_PMConf { + __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ + u8 numFilters; + u8 pad[5]; + struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS]; +}; + + +struct Vmxnet3_VariableLenConfDesc { + __le32 confVer; + __le32 confLen; + __le64 confPA; +}; + + +struct Vmxnet3_TxQueueDesc { + struct Vmxnet3_TxQueueCtrl ctrl; + struct Vmxnet3_TxQueueConf conf; + + /* Driver read after a GET command */ + struct Vmxnet3_QueueStatus status; + struct UPT1_TxStats stats; + u8 _pad[88]; /* 128 aligned */ +}; + + +struct Vmxnet3_RxQueueDesc { + struct Vmxnet3_RxQueueCtrl ctrl; + struct Vmxnet3_RxQueueConf conf; + /* Driver read after a GET commad */ + struct Vmxnet3_QueueStatus status; + struct UPT1_RxStats stats; + u8 __pad[88]; /* 128 aligned */ +}; + + +struct Vmxnet3_DSDevRead { + /* read-only region for device, read by dev in response to a SET cmd */ + struct Vmxnet3_MiscConf misc; + struct Vmxnet3_IntrConf intrConf; + struct Vmxnet3_RxFilterConf rxFilterConf; + struct Vmxnet3_VariableLenConfDesc rssConfDesc; + struct Vmxnet3_VariableLenConfDesc pmConfDesc; + struct Vmxnet3_VariableLenConfDesc pluginConfDesc; +}; + +/* All structures in DriverShared are padded to multiples of 8 bytes */ +struct Vmxnet3_DriverShared { + __le32 magic; + /* make devRead start at 64bit boundaries */ + __le32 pad; + struct Vmxnet3_DSDevRead devRead; + __le32 ecr; + __le32 reserved[5]; +}; + + +#define VMXNET3_ECR_RQERR (1 << 0) +#define VMXNET3_ECR_TQERR (1 << 1) +#define VMXNET3_ECR_LINK (1 << 2) +#define VMXNET3_ECR_DIC (1 << 3) +#define VMXNET3_ECR_DEBUG (1 << 4) + +/* flip the gen bit of a ring */ +#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1) + +/* only use this if moving the idx won't affect the gen bit */ +#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \ + do {\ + (idx)++;\ + if (unlikely((idx) == (ring_size))) {\ + (idx) = 0;\ + } \ + } while (0) + +#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \ + (vfTable[vid >> 5] |= (1 << (vid & 31))) +#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \ + (vfTable[vid >> 5] &= ~(1 << (vid & 31))) + +#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \ + ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0) + +#define VMXNET3_MAX_MTU 9000 +#define VMXNET3_MIN_MTU 60 + +#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */ +#define VMXNET3_LINK_DOWN 0 + +#undef u64 +#undef u32 +#undef u16 +#undef u8 +#undef __le16 +#undef __le32 +#undef __le64 +#if defined(HOST_WORDS_BIGENDIAN) +#undef __BIG_ENDIAN_BITFIELD +#endif + +#endif diff --git a/hw/net/vmxnet3_defs.h b/hw/net/vmxnet3_defs.h new file mode 100644 index 000000000..71440509c --- /dev/null +++ b/hw/net/vmxnet3_defs.h @@ -0,0 +1,140 @@ +/* + * QEMU VMWARE VMXNET3 paravirtual NIC + * + * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Tamir Shomer + * Yan Vugenfirer + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#ifndef HW_NET_VMXNET3_DEFS_H +#define HW_NET_VMXNET3_DEFS_H + +#include "net/net.h" +#include "hw/net/vmxnet3.h" +#include "qom/object.h" + +#define TYPE_VMXNET3 "vmxnet3" +typedef struct VMXNET3State VMXNET3State; +DECLARE_INSTANCE_CHECKER(VMXNET3State, VMXNET3, + TYPE_VMXNET3) + +/* Device state and helper functions */ +#define VMXNET3_RX_RINGS_PER_QUEUE (2) + +/* Cyclic ring abstraction */ +typedef struct { + hwaddr pa; + uint32_t size; + uint32_t cell_size; + uint32_t next; + uint8_t gen; +} Vmxnet3Ring; + +typedef struct { + Vmxnet3Ring tx_ring; + Vmxnet3Ring comp_ring; + + uint8_t intr_idx; + hwaddr tx_stats_pa; + struct UPT1_TxStats txq_stats; +} Vmxnet3TxqDescr; + +typedef struct { + Vmxnet3Ring rx_ring[VMXNET3_RX_RINGS_PER_QUEUE]; + Vmxnet3Ring comp_ring; + uint8_t intr_idx; + hwaddr rx_stats_pa; + struct UPT1_RxStats rxq_stats; +} Vmxnet3RxqDescr; + +typedef struct { + bool is_masked; + bool is_pending; + bool is_asserted; +} Vmxnet3IntState; + +struct VMXNET3State { + PCIDevice parent_obj; + NICState *nic; + NICConf conf; + MemoryRegion bar0; + MemoryRegion bar1; + MemoryRegion msix_bar; + + Vmxnet3RxqDescr rxq_descr[VMXNET3_DEVICE_MAX_RX_QUEUES]; + Vmxnet3TxqDescr txq_descr[VMXNET3_DEVICE_MAX_TX_QUEUES]; + + /* Whether MSI-X support was installed successfully */ + bool msix_used; + hwaddr drv_shmem; + hwaddr temp_shared_guest_driver_memory; + + uint8_t txq_num; + + /* This boolean tells whether RX packet being indicated has to */ + /* be split into head and body chunks from different RX rings */ + bool rx_packets_compound; + + bool rx_vlan_stripping; + bool lro_supported; + + uint8_t rxq_num; + + /* Network MTU */ + uint32_t mtu; + + /* Maximum number of fragments for indicated TX packets */ + uint32_t max_tx_frags; + + /* Maximum number of fragments for indicated RX packets */ + uint16_t max_rx_frags; + + /* Index for events interrupt */ + uint8_t event_int_idx; + + /* Whether automatic interrupts masking enabled */ + bool auto_int_masking; + + bool peer_has_vhdr; + + /* TX packets to QEMU interface */ + struct NetTxPkt *tx_pkt; + uint32_t offload_mode; + uint32_t cso_or_gso_size; + uint16_t tci; + bool needs_vlan; + + struct NetRxPkt *rx_pkt; + + bool tx_sop; + bool skip_current_tx_pkt; + + uint32_t device_active; + uint32_t last_command; + + uint32_t link_status_and_speed; + + Vmxnet3IntState interrupt_states[VMXNET3_MAX_INTRS]; + + uint32_t temp_mac; /* To store the low part first */ + + MACAddr perm_mac; + uint32_t vlan_table[VMXNET3_VFT_SIZE]; + uint32_t rx_mode; + MACAddr *mcast_list; + uint32_t mcast_list_len; + uint32_t mcast_list_buff_size; /* needed for live migration. */ + + /* Compatibility flags for migration */ + uint32_t compat_flags; +}; + +#endif diff --git a/hw/net/vmxnet_debug.h b/hw/net/vmxnet_debug.h new file mode 100644 index 000000000..cb50aa95d --- /dev/null +++ b/hw/net/vmxnet_debug.h @@ -0,0 +1,145 @@ +/* + * QEMU VMWARE VMXNET* paravirtual NICs - debugging facilities + * + * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Authors: + * Dmitry Fleytman + * Tamir Shomer + * Yan Vugenfirer + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_VMXNET_DEBUG_H +#define QEMU_VMXNET_DEBUG_H + +#define VMXNET_DEVICE_NAME "vmxnet3" + +#define VMXNET_DEBUG_WARNINGS +#define VMXNET_DEBUG_ERRORS + +#undef VMXNET_DEBUG_CB +#undef VMXNET_DEBUG_INTERRUPTS +#undef VMXNET_DEBUG_CONFIG +#undef VMXNET_DEBUG_RINGS +#undef VMXNET_DEBUG_PACKETS +#undef VMXNET_DEBUG_SHMEM_ACCESS + +#ifdef VMXNET_DEBUG_CB +# define VMXNET_DEBUG_CB_ENABLED 1 +#else +# define VMXNET_DEBUG_CB_ENABLED 0 +#endif + +#ifdef VMXNET_DEBUG_WARNINGS +# define VMXNET_DEBUG_WARNINGS_ENABLED 1 +#else +# define VMXNET_DEBUG_WARNINGS_ENABLED 0 +#endif + +#ifdef VMXNET_DEBUG_ERRORS +# define VMXNET_DEBUG_ERRORS_ENABLED 1 +#else +# define VMXNET_DEBUG_ERRORS_ENABLED 0 +#endif + +#ifdef VMXNET_DEBUG_CONFIG +# define VMXNET_DEBUG_CONFIG_ENABLED 1 +#else +# define VMXNET_DEBUG_CONFIG_ENABLED 0 +#endif + +#ifdef VMXNET_DEBUG_RINGS +# define VMXNET_DEBUG_RINGS_ENABLED 1 +#else +# define VMXNET_DEBUG_RINGS_ENABLED 0 +#endif + +#ifdef VMXNET_DEBUG_PACKETS +# define VMXNET_DEBUG_PACKETS_ENABLED 1 +#else +# define VMXNET_DEBUG_PACKETS_ENABLED 0 +#endif + +#ifdef VMXNET_DEBUG_INTERRUPTS +# define VMXNET_DEBUG_INTERRUPTS_ENABLED 1 +#else +# define VMXNET_DEBUG_INTERRUPTS_ENABLED 0 +#endif + +#ifdef VMXNET_DEBUG_SHMEM_ACCESS +# define VMXNET_DEBUG_SHMEM_ACCESS_ENABLED 1 +#else +# define VMXNET_DEBUG_SHMEM_ACCESS_ENABLED 0 +#endif + +#define VMW_SHPRN(fmt, ...) \ + do { \ + if (VMXNET_DEBUG_SHMEM_ACCESS_ENABLED) { \ + printf("[%s][SH][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define VMW_CBPRN(fmt, ...) \ + do { \ + if (VMXNET_DEBUG_CB_ENABLED) { \ + printf("[%s][CB][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define VMW_PKPRN(fmt, ...) \ + do { \ + if (VMXNET_DEBUG_PACKETS_ENABLED) { \ + printf("[%s][PK][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define VMW_WRPRN(fmt, ...) \ + do { \ + if (VMXNET_DEBUG_WARNINGS_ENABLED) { \ + printf("[%s][WR][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define VMW_ERPRN(fmt, ...) \ + do { \ + if (VMXNET_DEBUG_ERRORS_ENABLED) { \ + printf("[%s][ER][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define VMW_IRPRN(fmt, ...) \ + do { \ + if (VMXNET_DEBUG_INTERRUPTS_ENABLED) { \ + printf("[%s][IR][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define VMW_CFPRN(fmt, ...) \ + do { \ + if (VMXNET_DEBUG_CONFIG_ENABLED) { \ + printf("[%s][CF][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define VMW_RIPRN(fmt, ...) \ + do { \ + if (VMXNET_DEBUG_RINGS_ENABLED) { \ + printf("[%s][RI][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#endif /* QEMU_VMXNET_DEBUG_H */ diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c new file mode 100644 index 000000000..5c815b4f0 --- /dev/null +++ b/hw/net/xen_nic.c @@ -0,0 +1,411 @@ +/* + * xen paravirt network card backend + * + * (c) Gerd Hoffmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; under version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include +#include +#include + +#include "net/net.h" +#include "net/checksum.h" +#include "net/util.h" +#include "hw/xen/xen-legacy-backend.h" + +#include "hw/xen/interface/io/netif.h" + +/* ------------------------------------------------------------- */ + +struct XenNetDev { + struct XenLegacyDevice xendev; /* must be first */ + char *mac; + int tx_work; + int tx_ring_ref; + int rx_ring_ref; + struct netif_tx_sring *txs; + struct netif_rx_sring *rxs; + netif_tx_back_ring_t tx_ring; + netif_rx_back_ring_t rx_ring; + NICConf conf; + NICState *nic; +}; + +/* ------------------------------------------------------------- */ + +static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st) +{ + RING_IDX i = netdev->tx_ring.rsp_prod_pvt; + netif_tx_response_t *resp; + int notify; + + resp = RING_GET_RESPONSE(&netdev->tx_ring, i); + resp->id = txp->id; + resp->status = st; + +#if 0 + if (txp->flags & NETTXF_extra_info) { + RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL; + } +#endif + + netdev->tx_ring.rsp_prod_pvt = ++i; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify); + if (notify) { + xen_pv_send_notify(&netdev->xendev); + } + + if (i == netdev->tx_ring.req_cons) { + int more_to_do; + RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do); + if (more_to_do) { + netdev->tx_work++; + } + } +} + +static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end) +{ +#if 0 + /* + * Hmm, why netback fails everything in the ring? + * Should we do that even when not supporting SG and TSO? + */ + RING_IDX cons = netdev->tx_ring.req_cons; + + do { + make_tx_response(netif, txp, NETIF_RSP_ERROR); + if (cons >= end) { + break; + } + txp = RING_GET_REQUEST(&netdev->tx_ring, cons++); + } while (1); + netdev->tx_ring.req_cons = cons; + netif_schedule_work(netif); + netif_put(netif); +#else + net_tx_response(netdev, txp, NETIF_RSP_ERROR); +#endif +} + +static void net_tx_packets(struct XenNetDev *netdev) +{ + netif_tx_request_t txreq; + RING_IDX rc, rp; + void *page; + void *tmpbuf = NULL; + + for (;;) { + rc = netdev->tx_ring.req_cons; + rp = netdev->tx_ring.sring->req_prod; + xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ + + while ((rc != rp)) { + if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) { + break; + } + memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq)); + netdev->tx_ring.req_cons = ++rc; + +#if 1 + /* should not happen in theory, we don't announce the * + * feature-{sg,gso,whatelse} flags in xenstore (yet?) */ + if (txreq.flags & NETTXF_extra_info) { + xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n"); + net_tx_error(netdev, &txreq, rc); + continue; + } + if (txreq.flags & NETTXF_more_data) { + xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n"); + net_tx_error(netdev, &txreq, rc); + continue; + } +#endif + + if (txreq.size < 14) { + xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n", + txreq.size); + net_tx_error(netdev, &txreq, rc); + continue; + } + + if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) { + xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n"); + net_tx_error(netdev, &txreq, rc); + continue; + } + + xen_pv_printf(&netdev->xendev, 3, + "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n", + txreq.gref, txreq.offset, txreq.size, txreq.flags, + (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "", + (txreq.flags & NETTXF_data_validated) ? " data_validated" : "", + (txreq.flags & NETTXF_more_data) ? " more_data" : "", + (txreq.flags & NETTXF_extra_info) ? " extra_info" : ""); + + page = xen_be_map_grant_ref(&netdev->xendev, txreq.gref, + PROT_READ); + if (page == NULL) { + xen_pv_printf(&netdev->xendev, 0, + "error: tx gref dereference failed (%d)\n", + txreq.gref); + net_tx_error(netdev, &txreq, rc); + continue; + } + if (txreq.flags & NETTXF_csum_blank) { + /* have read-only mapping -> can't fill checksum in-place */ + if (!tmpbuf) { + tmpbuf = g_malloc(XC_PAGE_SIZE); + } + memcpy(tmpbuf, page + txreq.offset, txreq.size); + net_checksum_calculate(tmpbuf, txreq.size, CSUM_ALL); + qemu_send_packet(qemu_get_queue(netdev->nic), tmpbuf, + txreq.size); + } else { + qemu_send_packet(qemu_get_queue(netdev->nic), + page + txreq.offset, txreq.size); + } + xen_be_unmap_grant_ref(&netdev->xendev, page); + net_tx_response(netdev, &txreq, NETIF_RSP_OKAY); + } + if (!netdev->tx_work) { + break; + } + netdev->tx_work = 0; + } + g_free(tmpbuf); +} + +/* ------------------------------------------------------------- */ + +static void net_rx_response(struct XenNetDev *netdev, + netif_rx_request_t *req, int8_t st, + uint16_t offset, uint16_t size, + uint16_t flags) +{ + RING_IDX i = netdev->rx_ring.rsp_prod_pvt; + netif_rx_response_t *resp; + int notify; + + resp = RING_GET_RESPONSE(&netdev->rx_ring, i); + resp->offset = offset; + resp->flags = flags; + resp->id = req->id; + resp->status = (int16_t)size; + if (st < 0) { + resp->status = (int16_t)st; + } + + xen_pv_printf(&netdev->xendev, 3, + "rx response: idx %d, status %d, flags 0x%x\n", + i, resp->status, resp->flags); + + netdev->rx_ring.rsp_prod_pvt = ++i; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify); + if (notify) { + xen_pv_send_notify(&netdev->xendev); + } +} + +#define NET_IP_ALIGN 2 + +static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size) +{ + struct XenNetDev *netdev = qemu_get_nic_opaque(nc); + netif_rx_request_t rxreq; + RING_IDX rc, rp; + void *page; + + if (netdev->xendev.be_state != XenbusStateConnected) { + return -1; + } + + rc = netdev->rx_ring.req_cons; + rp = netdev->rx_ring.sring->req_prod; + xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ + + if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { + return 0; + } + if (size > XC_PAGE_SIZE - NET_IP_ALIGN) { + xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", + (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); + return -1; + } + + memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq)); + netdev->rx_ring.req_cons = ++rc; + + page = xen_be_map_grant_ref(&netdev->xendev, rxreq.gref, PROT_WRITE); + if (page == NULL) { + xen_pv_printf(&netdev->xendev, 0, + "error: rx gref dereference failed (%d)\n", + rxreq.gref); + net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); + return -1; + } + memcpy(page + NET_IP_ALIGN, buf, size); + xen_be_unmap_grant_ref(&netdev->xendev, page); + net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0); + + return size; +} + +/* ------------------------------------------------------------- */ + +static NetClientInfo net_xen_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = net_rx_packet, +}; + +static int net_init(struct XenLegacyDevice *xendev) +{ + struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); + + /* read xenstore entries */ + if (netdev->mac == NULL) { + netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac"); + } + + /* do we have all we need? */ + if (netdev->mac == NULL) { + return -1; + } + + if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) { + return -1; + } + + netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf, + "xen", NULL, netdev); + + snprintf(qemu_get_queue(netdev->nic)->info_str, + sizeof(qemu_get_queue(netdev->nic)->info_str), + "nic: xenbus vif macaddr=%s", netdev->mac); + + /* fill info */ + xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1); + xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0); + + return 0; +} + +static int net_connect(struct XenLegacyDevice *xendev) +{ + struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); + int rx_copy; + + if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref", + &netdev->tx_ring_ref) == -1) { + return -1; + } + if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref", + &netdev->rx_ring_ref) == -1) { + return 1; + } + if (xenstore_read_fe_int(&netdev->xendev, "event-channel", + &netdev->xendev.remote_port) == -1) { + return -1; + } + + if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) { + rx_copy = 0; + } + if (rx_copy == 0) { + xen_pv_printf(&netdev->xendev, 0, + "frontend doesn't support rx-copy.\n"); + return -1; + } + + netdev->txs = xen_be_map_grant_ref(&netdev->xendev, + netdev->tx_ring_ref, + PROT_READ | PROT_WRITE); + if (!netdev->txs) { + return -1; + } + netdev->rxs = xen_be_map_grant_ref(&netdev->xendev, + netdev->rx_ring_ref, + PROT_READ | PROT_WRITE); + if (!netdev->rxs) { + xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs); + netdev->txs = NULL; + return -1; + } + BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE); + BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE); + + xen_be_bind_evtchn(&netdev->xendev); + + xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, " + "remote port %d, local port %d\n", + netdev->tx_ring_ref, netdev->rx_ring_ref, + netdev->xendev.remote_port, netdev->xendev.local_port); + + net_tx_packets(netdev); + return 0; +} + +static void net_disconnect(struct XenLegacyDevice *xendev) +{ + struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); + + xen_pv_unbind_evtchn(&netdev->xendev); + + if (netdev->txs) { + xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs); + netdev->txs = NULL; + } + if (netdev->rxs) { + xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs); + netdev->rxs = NULL; + } +} + +static void net_event(struct XenLegacyDevice *xendev) +{ + struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); + net_tx_packets(netdev); + qemu_flush_queued_packets(qemu_get_queue(netdev->nic)); +} + +static int net_free(struct XenLegacyDevice *xendev) +{ + struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev); + + if (netdev->nic) { + qemu_del_nic(netdev->nic); + netdev->nic = NULL; + } + g_free(netdev->mac); + netdev->mac = NULL; + return 0; +} + +/* ------------------------------------------------------------- */ + +struct XenDevOps xen_netdev_ops = { + .size = sizeof(struct XenNetDev), + .flags = DEVOPS_FLAG_NEED_GNTDEV, + .init = net_init, + .initialise = net_connect, + .event = net_event, + .disconnect = net_disconnect, + .free = net_free, +}; diff --git a/hw/net/xgmac.c b/hw/net/xgmac.c new file mode 100644 index 000000000..0ab6ae91a --- /dev/null +++ b/hw/net/xgmac.c @@ -0,0 +1,442 @@ +/* + * QEMU model of XGMAC Ethernet. + * + * derived from the Xilinx AXI-Ethernet by Edgar E. Iglesias. + * + * Copyright (c) 2011 Calxeda, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "net/net.h" +#include "qom/object.h" + +#ifdef DEBUG_XGMAC +#define DEBUGF_BRK(message, args...) do { \ + fprintf(stderr, (message), ## args); \ + } while (0) +#else +#define DEBUGF_BRK(message, args...) do { } while (0) +#endif + +#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ +#define XGMAC_FRAME_FILTER 0x00000001 /* MAC Frame Filter */ +#define XGMAC_FLOW_CTRL 0x00000006 /* MAC Flow Control */ +#define XGMAC_VLAN_TAG 0x00000007 /* VLAN Tags */ +#define XGMAC_VERSION 0x00000008 /* Version */ +/* VLAN tag for insertion or replacement into tx frames */ +#define XGMAC_VLAN_INCL 0x00000009 +#define XGMAC_LPI_CTRL 0x0000000a /* LPI Control and Status */ +#define XGMAC_LPI_TIMER 0x0000000b /* LPI Timers Control */ +#define XGMAC_TX_PACE 0x0000000c /* Transmit Pace and Stretch */ +#define XGMAC_VLAN_HASH 0x0000000d /* VLAN Hash Table */ +#define XGMAC_DEBUG 0x0000000e /* Debug */ +#define XGMAC_INT_STATUS 0x0000000f /* Interrupt and Control */ +/* HASH table registers */ +#define XGMAC_HASH(n) ((0x00000300/4) + (n)) +#define XGMAC_NUM_HASH 16 +/* Operation Mode */ +#define XGMAC_OPMODE (0x00000400/4) +/* Remote Wake-Up Frame Filter */ +#define XGMAC_REMOTE_WAKE (0x00000700/4) +/* PMT Control and Status */ +#define XGMAC_PMT (0x00000704/4) + +#define XGMAC_ADDR_HIGH(reg) (0x00000010+((reg) * 2)) +#define XGMAC_ADDR_LOW(reg) (0x00000011+((reg) * 2)) + +#define DMA_BUS_MODE 0x000003c0 /* Bus Mode */ +#define DMA_XMT_POLL_DEMAND 0x000003c1 /* Transmit Poll Demand */ +#define DMA_RCV_POLL_DEMAND 0x000003c2 /* Received Poll Demand */ +#define DMA_RCV_BASE_ADDR 0x000003c3 /* Receive List Base */ +#define DMA_TX_BASE_ADDR 0x000003c4 /* Transmit List Base */ +#define DMA_STATUS 0x000003c5 /* Status Register */ +#define DMA_CONTROL 0x000003c6 /* Ctrl (Operational Mode) */ +#define DMA_INTR_ENA 0x000003c7 /* Interrupt Enable */ +#define DMA_MISSED_FRAME_CTR 0x000003c8 /* Missed Frame Counter */ +/* Receive Interrupt Watchdog Timer */ +#define DMA_RI_WATCHDOG_TIMER 0x000003c9 +#define DMA_AXI_BUS 0x000003ca /* AXI Bus Mode */ +#define DMA_AXI_STATUS 0x000003cb /* AXI Status */ +#define DMA_CUR_TX_DESC_ADDR 0x000003d2 /* Current Host Tx Descriptor */ +#define DMA_CUR_RX_DESC_ADDR 0x000003d3 /* Current Host Rx Descriptor */ +#define DMA_CUR_TX_BUF_ADDR 0x000003d4 /* Current Host Tx Buffer */ +#define DMA_CUR_RX_BUF_ADDR 0x000003d5 /* Current Host Rx Buffer */ +#define DMA_HW_FEATURE 0x000003d6 /* Enabled Hardware Features */ + +/* DMA Status register defines */ +#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ +#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ +#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ +#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ +#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ +#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ +#define DMA_STATUS_TS_SHIFT 20 +#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ +#define DMA_STATUS_RS_SHIFT 17 +#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ +#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ +#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ +#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ +#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ +#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ +#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ +#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ +#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ +#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ +#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ +#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ +#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ + +/* DMA Control register defines */ +#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ +#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ +#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ + +struct desc { + uint32_t ctl_stat; + uint16_t buffer1_size; + uint16_t buffer2_size; + uint32_t buffer1_addr; + uint32_t buffer2_addr; + uint32_t ext_stat; + uint32_t res[3]; +}; + +#define R_MAX 0x400 + +typedef struct RxTxStats { + uint64_t rx_bytes; + uint64_t tx_bytes; + + uint64_t rx; + uint64_t rx_bcast; + uint64_t rx_mcast; +} RxTxStats; + +#define TYPE_XGMAC "xgmac" +OBJECT_DECLARE_SIMPLE_TYPE(XgmacState, XGMAC) + +struct XgmacState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq sbd_irq; + qemu_irq pmt_irq; + qemu_irq mci_irq; + NICState *nic; + NICConf conf; + + struct RxTxStats stats; + uint32_t regs[R_MAX]; +}; + +static const VMStateDescription vmstate_rxtx_stats = { + .name = "xgmac_stats", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(rx_bytes, RxTxStats), + VMSTATE_UINT64(tx_bytes, RxTxStats), + VMSTATE_UINT64(rx, RxTxStats), + VMSTATE_UINT64(rx_bcast, RxTxStats), + VMSTATE_UINT64(rx_mcast, RxTxStats), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_xgmac = { + .name = "xgmac", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(stats, XgmacState, 0, vmstate_rxtx_stats, RxTxStats), + VMSTATE_UINT32_ARRAY(regs, XgmacState, R_MAX), + VMSTATE_END_OF_LIST() + } +}; + +static void xgmac_read_desc(XgmacState *s, struct desc *d, int rx) +{ + uint32_t addr = rx ? s->regs[DMA_CUR_RX_DESC_ADDR] : + s->regs[DMA_CUR_TX_DESC_ADDR]; + cpu_physical_memory_read(addr, d, sizeof(*d)); +} + +static void xgmac_write_desc(XgmacState *s, struct desc *d, int rx) +{ + int reg = rx ? DMA_CUR_RX_DESC_ADDR : DMA_CUR_TX_DESC_ADDR; + uint32_t addr = s->regs[reg]; + + if (!rx && (d->ctl_stat & 0x00200000)) { + s->regs[reg] = s->regs[DMA_TX_BASE_ADDR]; + } else if (rx && (d->buffer1_size & 0x8000)) { + s->regs[reg] = s->regs[DMA_RCV_BASE_ADDR]; + } else { + s->regs[reg] += sizeof(*d); + } + cpu_physical_memory_write(addr, d, sizeof(*d)); +} + +static void xgmac_enet_send(XgmacState *s) +{ + struct desc bd; + int frame_size; + int len; + uint8_t frame[8192]; + uint8_t *ptr; + + ptr = frame; + frame_size = 0; + while (1) { + xgmac_read_desc(s, &bd, 0); + if ((bd.ctl_stat & 0x80000000) == 0) { + /* Run out of descriptors to transmit. */ + break; + } + len = (bd.buffer1_size & 0xfff) + (bd.buffer2_size & 0xfff); + + /* + * FIXME: these cases of malformed tx descriptors (bad sizes) + * should probably be reported back to the guest somehow + * rather than simply silently stopping processing, but we + * don't know what the hardware does in this situation. + * This will only happen for buggy guests anyway. + */ + if ((bd.buffer1_size & 0xfff) > 2048) { + DEBUGF_BRK("qemu:%s:ERROR...ERROR...ERROR... -- " + "xgmac buffer 1 len on send > 2048 (0x%x)\n", + __func__, bd.buffer1_size & 0xfff); + break; + } + if ((bd.buffer2_size & 0xfff) != 0) { + DEBUGF_BRK("qemu:%s:ERROR...ERROR...ERROR... -- " + "xgmac buffer 2 len on send != 0 (0x%x)\n", + __func__, bd.buffer2_size & 0xfff); + break; + } + if (frame_size + len >= sizeof(frame)) { + DEBUGF_BRK("qemu:%s: buffer overflow %d read into %zu " + "buffer\n" , __func__, frame_size + len, sizeof(frame)); + DEBUGF_BRK("qemu:%s: buffer1.size=%d; buffer2.size=%d\n", + __func__, bd.buffer1_size, bd.buffer2_size); + break; + } + + cpu_physical_memory_read(bd.buffer1_addr, ptr, len); + ptr += len; + frame_size += len; + if (bd.ctl_stat & 0x20000000) { + /* Last buffer in frame. */ + qemu_send_packet(qemu_get_queue(s->nic), frame, len); + ptr = frame; + frame_size = 0; + s->regs[DMA_STATUS] |= DMA_STATUS_TI | DMA_STATUS_NIS; + } + bd.ctl_stat &= ~0x80000000; + /* Write back the modified descriptor. */ + xgmac_write_desc(s, &bd, 0); + } +} + +static void enet_update_irq(XgmacState *s) +{ + int stat = s->regs[DMA_STATUS] & s->regs[DMA_INTR_ENA]; + qemu_set_irq(s->sbd_irq, !!stat); +} + +static uint64_t enet_read(void *opaque, hwaddr addr, unsigned size) +{ + XgmacState *s = opaque; + uint64_t r = 0; + addr >>= 2; + + switch (addr) { + case XGMAC_VERSION: + r = 0x1012; + break; + default: + if (addr < ARRAY_SIZE(s->regs)) { + r = s->regs[addr]; + } + break; + } + return r; +} + +static void enet_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + XgmacState *s = opaque; + + addr >>= 2; + switch (addr) { + case DMA_BUS_MODE: + s->regs[DMA_BUS_MODE] = value & ~0x1; + break; + case DMA_XMT_POLL_DEMAND: + xgmac_enet_send(s); + break; + case DMA_STATUS: + s->regs[DMA_STATUS] = s->regs[DMA_STATUS] & ~value; + break; + case DMA_RCV_BASE_ADDR: + s->regs[DMA_RCV_BASE_ADDR] = s->regs[DMA_CUR_RX_DESC_ADDR] = value; + break; + case DMA_TX_BASE_ADDR: + s->regs[DMA_TX_BASE_ADDR] = s->regs[DMA_CUR_TX_DESC_ADDR] = value; + break; + default: + if (addr < ARRAY_SIZE(s->regs)) { + s->regs[addr] = value; + } + break; + } + enet_update_irq(s); +} + +static const MemoryRegionOps enet_mem_ops = { + .read = enet_read, + .write = enet_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static int eth_can_rx(XgmacState *s) +{ + /* RX enabled? */ + return s->regs[DMA_CONTROL] & DMA_CONTROL_SR; +} + +static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size) +{ + XgmacState *s = qemu_get_nic_opaque(nc); + static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, + 0xff, 0xff, 0xff}; + int unicast, broadcast, multicast; + struct desc bd; + ssize_t ret; + + if (!eth_can_rx(s)) { + return -1; + } + unicast = ~buf[0] & 0x1; + broadcast = memcmp(buf, sa_bcast, 6) == 0; + multicast = !unicast && !broadcast; + if (size < 12) { + s->regs[DMA_STATUS] |= DMA_STATUS_RI | DMA_STATUS_NIS; + ret = -1; + goto out; + } + + xgmac_read_desc(s, &bd, 1); + if ((bd.ctl_stat & 0x80000000) == 0) { + s->regs[DMA_STATUS] |= DMA_STATUS_RU | DMA_STATUS_AIS; + ret = size; + goto out; + } + + cpu_physical_memory_write(bd.buffer1_addr, buf, size); + + /* Add in the 4 bytes for crc (the real hw returns length incl crc) */ + size += 4; + bd.ctl_stat = (size << 16) | 0x300; + xgmac_write_desc(s, &bd, 1); + + s->stats.rx_bytes += size; + s->stats.rx++; + if (multicast) { + s->stats.rx_mcast++; + } else if (broadcast) { + s->stats.rx_bcast++; + } + + s->regs[DMA_STATUS] |= DMA_STATUS_RI | DMA_STATUS_NIS; + ret = size; + +out: + enet_update_irq(s); + return ret; +} + +static NetClientInfo net_xgmac_enet_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = eth_rx, +}; + +static void xgmac_enet_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + XgmacState *s = XGMAC(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &enet_mem_ops, s, + "xgmac", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->sbd_irq); + sysbus_init_irq(sbd, &s->pmt_irq); + sysbus_init_irq(sbd, &s->mci_irq); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_xgmac_enet_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + + s->regs[XGMAC_ADDR_HIGH(0)] = (s->conf.macaddr.a[5] << 8) | + s->conf.macaddr.a[4]; + s->regs[XGMAC_ADDR_LOW(0)] = (s->conf.macaddr.a[3] << 24) | + (s->conf.macaddr.a[2] << 16) | + (s->conf.macaddr.a[1] << 8) | + s->conf.macaddr.a[0]; +} + +static Property xgmac_properties[] = { + DEFINE_NIC_PROPERTIES(XgmacState, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xgmac_enet_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xgmac_enet_realize; + dc->vmsd = &vmstate_xgmac; + device_class_set_props(dc, xgmac_properties); +} + +static const TypeInfo xgmac_enet_info = { + .name = TYPE_XGMAC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XgmacState), + .class_init = xgmac_enet_class_init, +}; + +static void xgmac_enet_register_types(void) +{ + type_register_static(&xgmac_enet_info); +} + +type_init(xgmac_enet_register_types) diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c new file mode 100644 index 000000000..990ff3a1c --- /dev/null +++ b/hw/net/xilinx_axienet.c @@ -0,0 +1,1072 @@ +/* + * QEMU model of Xilinx AXI-Ethernet. + * + * Copyright (c) 2011 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "net/net.h" +#include "net/checksum.h" + +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/stream.h" +#include "qom/object.h" + +#define DPHY(x) + +#define TYPE_XILINX_AXI_ENET "xlnx.axi-ethernet" +#define TYPE_XILINX_AXI_ENET_DATA_STREAM "xilinx-axienet-data-stream" +#define TYPE_XILINX_AXI_ENET_CONTROL_STREAM "xilinx-axienet-control-stream" + +OBJECT_DECLARE_SIMPLE_TYPE(XilinxAXIEnet, XILINX_AXI_ENET) + +typedef struct XilinxAXIEnetStreamSink XilinxAXIEnetStreamSink; +DECLARE_INSTANCE_CHECKER(XilinxAXIEnetStreamSink, XILINX_AXI_ENET_DATA_STREAM, + TYPE_XILINX_AXI_ENET_DATA_STREAM) + +DECLARE_INSTANCE_CHECKER(XilinxAXIEnetStreamSink, XILINX_AXI_ENET_CONTROL_STREAM, + TYPE_XILINX_AXI_ENET_CONTROL_STREAM) + +/* Advertisement control register. */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ + +#define CONTROL_PAYLOAD_WORDS 5 +#define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t))) + +struct PHY { + uint32_t regs[32]; + + int link; + + unsigned int (*read)(struct PHY *phy, unsigned int req); + void (*write)(struct PHY *phy, unsigned int req, + unsigned int data); +}; + +static unsigned int tdk_read(struct PHY *phy, unsigned int req) +{ + int regnum; + unsigned r = 0; + + regnum = req & 0x1f; + + switch (regnum) { + case 1: + if (!phy->link) { + break; + } + /* MR1. */ + /* Speeds and modes. */ + r |= (1 << 13) | (1 << 14); + r |= (1 << 11) | (1 << 12); + r |= (1 << 5); /* Autoneg complete. */ + r |= (1 << 3); /* Autoneg able. */ + r |= (1 << 2); /* link. */ + r |= (1 << 1); /* link. */ + break; + case 5: + /* Link partner ability. + We are kind; always agree with whatever best mode + the guest advertises. */ + r = 1 << 14; /* Success. */ + /* Copy advertised modes. */ + r |= phy->regs[4] & (15 << 5); + /* Autoneg support. */ + r |= 1; + break; + case 17: + /* Marvell PHY on many xilinx boards. */ + r = 0x8000; /* 1000Mb */ + break; + case 18: + { + /* Diagnostics reg. */ + int duplex = 0; + int speed_100 = 0; + + if (!phy->link) { + break; + } + + /* Are we advertising 100 half or 100 duplex ? */ + speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF); + speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL); + + /* Are we advertising 10 duplex or 100 duplex ? */ + duplex = !!(phy->regs[4] & ADVERTISE_100FULL); + duplex |= !!(phy->regs[4] & ADVERTISE_10FULL); + r = (speed_100 << 10) | (duplex << 11); + } + break; + + default: + r = phy->regs[regnum]; + break; + } + DPHY(qemu_log("\n%s %x = reg[%d]\n", __func__, r, regnum)); + return r; +} + +static void +tdk_write(struct PHY *phy, unsigned int req, unsigned int data) +{ + int regnum; + + regnum = req & 0x1f; + DPHY(qemu_log("%s reg[%d] = %x\n", __func__, regnum, data)); + switch (regnum) { + default: + phy->regs[regnum] = data; + break; + } + + /* Unconditionally clear regs[BMCR][BMCR_RESET] and auto-neg */ + phy->regs[0] &= ~0x8200; +} + +static void +tdk_init(struct PHY *phy) +{ + phy->regs[0] = 0x3100; + /* PHY Id. */ + phy->regs[2] = 0x0300; + phy->regs[3] = 0xe400; + /* Autonegotiation advertisement reg. */ + phy->regs[4] = 0x01E1; + phy->link = 1; + + phy->read = tdk_read; + phy->write = tdk_write; +} + +struct MDIOBus { + struct PHY *devs[32]; +}; + +static void +mdio_attach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr) +{ + bus->devs[addr & 0x1f] = phy; +} + +#ifdef USE_THIS_DEAD_CODE +static void +mdio_detach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr) +{ + bus->devs[addr & 0x1f] = NULL; +} +#endif + +static uint16_t mdio_read_req(struct MDIOBus *bus, unsigned int addr, + unsigned int reg) +{ + struct PHY *phy; + uint16_t data; + + phy = bus->devs[addr]; + if (phy && phy->read) { + data = phy->read(phy, reg); + } else { + data = 0xffff; + } + DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data)); + return data; +} + +static void mdio_write_req(struct MDIOBus *bus, unsigned int addr, + unsigned int reg, uint16_t data) +{ + struct PHY *phy; + + DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data)); + phy = bus->devs[addr]; + if (phy && phy->write) { + phy->write(phy, reg, data); + } +} + +#define DENET(x) + +#define R_RAF (0x000 / 4) +enum { + RAF_MCAST_REJ = (1 << 1), + RAF_BCAST_REJ = (1 << 2), + RAF_EMCF_EN = (1 << 12), + RAF_NEWFUNC_EN = (1 << 11) +}; + +#define R_IS (0x00C / 4) +enum { + IS_HARD_ACCESS_COMPLETE = 1, + IS_AUTONEG = (1 << 1), + IS_RX_COMPLETE = (1 << 2), + IS_RX_REJECT = (1 << 3), + IS_TX_COMPLETE = (1 << 5), + IS_RX_DCM_LOCK = (1 << 6), + IS_MGM_RDY = (1 << 7), + IS_PHY_RST_DONE = (1 << 8), +}; + +#define R_IP (0x010 / 4) +#define R_IE (0x014 / 4) +#define R_UAWL (0x020 / 4) +#define R_UAWU (0x024 / 4) +#define R_PPST (0x030 / 4) +enum { + PPST_LINKSTATUS = (1 << 0), + PPST_PHY_LINKSTATUS = (1 << 7), +}; + +#define R_STATS_RX_BYTESL (0x200 / 4) +#define R_STATS_RX_BYTESH (0x204 / 4) +#define R_STATS_TX_BYTESL (0x208 / 4) +#define R_STATS_TX_BYTESH (0x20C / 4) +#define R_STATS_RXL (0x290 / 4) +#define R_STATS_RXH (0x294 / 4) +#define R_STATS_RX_BCASTL (0x2a0 / 4) +#define R_STATS_RX_BCASTH (0x2a4 / 4) +#define R_STATS_RX_MCASTL (0x2a8 / 4) +#define R_STATS_RX_MCASTH (0x2ac / 4) + +#define R_RCW0 (0x400 / 4) +#define R_RCW1 (0x404 / 4) +enum { + RCW1_VLAN = (1 << 27), + RCW1_RX = (1 << 28), + RCW1_FCS = (1 << 29), + RCW1_JUM = (1 << 30), + RCW1_RST = (1 << 31), +}; + +#define R_TC (0x408 / 4) +enum { + TC_VLAN = (1 << 27), + TC_TX = (1 << 28), + TC_FCS = (1 << 29), + TC_JUM = (1 << 30), + TC_RST = (1 << 31), +}; + +#define R_EMMC (0x410 / 4) +enum { + EMMC_LINKSPEED_10MB = (0 << 30), + EMMC_LINKSPEED_100MB = (1 << 30), + EMMC_LINKSPEED_1000MB = (2 << 30), +}; + +#define R_PHYC (0x414 / 4) + +#define R_MC (0x500 / 4) +#define MC_EN (1 << 6) + +#define R_MCR (0x504 / 4) +#define R_MWD (0x508 / 4) +#define R_MRD (0x50c / 4) +#define R_MIS (0x600 / 4) +#define R_MIP (0x620 / 4) +#define R_MIE (0x640 / 4) +#define R_MIC (0x640 / 4) + +#define R_UAW0 (0x700 / 4) +#define R_UAW1 (0x704 / 4) +#define R_FMI (0x708 / 4) +#define R_AF0 (0x710 / 4) +#define R_AF1 (0x714 / 4) +#define R_MAX (0x34 / 4) + +/* Indirect registers. */ +struct TEMAC { + struct MDIOBus mdio_bus; + struct PHY phy; + + void *parent; +}; + + +struct XilinxAXIEnetStreamSink { + Object parent; + + struct XilinxAXIEnet *enet; +} ; + +struct XilinxAXIEnet { + SysBusDevice busdev; + MemoryRegion iomem; + qemu_irq irq; + StreamSink *tx_data_dev; + StreamSink *tx_control_dev; + XilinxAXIEnetStreamSink rx_data_dev; + XilinxAXIEnetStreamSink rx_control_dev; + NICState *nic; + NICConf conf; + + + uint32_t c_rxmem; + uint32_t c_txmem; + uint32_t c_phyaddr; + + struct TEMAC TEMAC; + + /* MII regs. */ + union { + uint32_t regs[4]; + struct { + uint32_t mc; + uint32_t mcr; + uint32_t mwd; + uint32_t mrd; + }; + } mii; + + struct { + uint64_t rx_bytes; + uint64_t tx_bytes; + + uint64_t rx; + uint64_t rx_bcast; + uint64_t rx_mcast; + } stats; + + /* Receive configuration words. */ + uint32_t rcw[2]; + /* Transmit config. */ + uint32_t tc; + uint32_t emmc; + uint32_t phyc; + + /* Unicast Address Word. */ + uint32_t uaw[2]; + /* Unicast address filter used with extended mcast. */ + uint32_t ext_uaw[2]; + uint32_t fmi; + + uint32_t regs[R_MAX]; + + /* Multicast filter addrs. */ + uint32_t maddr[4][2]; + /* 32K x 1 lookup filter. */ + uint32_t ext_mtable[1024]; + + uint32_t hdr[CONTROL_PAYLOAD_WORDS]; + + uint8_t *txmem; + uint32_t txpos; + + uint8_t *rxmem; + uint32_t rxsize; + uint32_t rxpos; + + uint8_t rxapp[CONTROL_PAYLOAD_SIZE]; + uint32_t rxappsize; + + /* Whether axienet_eth_rx_notify should flush incoming queue. */ + bool need_flush; +}; + +static void axienet_rx_reset(XilinxAXIEnet *s) +{ + s->rcw[1] = RCW1_JUM | RCW1_FCS | RCW1_RX | RCW1_VLAN; +} + +static void axienet_tx_reset(XilinxAXIEnet *s) +{ + s->tc = TC_JUM | TC_TX | TC_VLAN; + s->txpos = 0; +} + +static inline int axienet_rx_resetting(XilinxAXIEnet *s) +{ + return s->rcw[1] & RCW1_RST; +} + +static inline int axienet_rx_enabled(XilinxAXIEnet *s) +{ + return s->rcw[1] & RCW1_RX; +} + +static inline int axienet_extmcf_enabled(XilinxAXIEnet *s) +{ + return !!(s->regs[R_RAF] & RAF_EMCF_EN); +} + +static inline int axienet_newfunc_enabled(XilinxAXIEnet *s) +{ + return !!(s->regs[R_RAF] & RAF_NEWFUNC_EN); +} + +static void xilinx_axienet_reset(DeviceState *d) +{ + XilinxAXIEnet *s = XILINX_AXI_ENET(d); + + axienet_rx_reset(s); + axienet_tx_reset(s); + + s->regs[R_PPST] = PPST_LINKSTATUS | PPST_PHY_LINKSTATUS; + s->regs[R_IS] = IS_AUTONEG | IS_RX_DCM_LOCK | IS_MGM_RDY | IS_PHY_RST_DONE; + + s->emmc = EMMC_LINKSPEED_100MB; +} + +static void enet_update_irq(XilinxAXIEnet *s) +{ + s->regs[R_IP] = s->regs[R_IS] & s->regs[R_IE]; + qemu_set_irq(s->irq, !!s->regs[R_IP]); +} + +static uint64_t enet_read(void *opaque, hwaddr addr, unsigned size) +{ + XilinxAXIEnet *s = opaque; + uint32_t r = 0; + addr >>= 2; + + switch (addr) { + case R_RCW0: + case R_RCW1: + r = s->rcw[addr & 1]; + break; + + case R_TC: + r = s->tc; + break; + + case R_EMMC: + r = s->emmc; + break; + + case R_PHYC: + r = s->phyc; + break; + + case R_MCR: + r = s->mii.regs[addr & 3] | (1 << 7); /* Always ready. */ + break; + + case R_STATS_RX_BYTESL: + case R_STATS_RX_BYTESH: + r = s->stats.rx_bytes >> (32 * (addr & 1)); + break; + + case R_STATS_TX_BYTESL: + case R_STATS_TX_BYTESH: + r = s->stats.tx_bytes >> (32 * (addr & 1)); + break; + + case R_STATS_RXL: + case R_STATS_RXH: + r = s->stats.rx >> (32 * (addr & 1)); + break; + case R_STATS_RX_BCASTL: + case R_STATS_RX_BCASTH: + r = s->stats.rx_bcast >> (32 * (addr & 1)); + break; + case R_STATS_RX_MCASTL: + case R_STATS_RX_MCASTH: + r = s->stats.rx_mcast >> (32 * (addr & 1)); + break; + + case R_MC: + case R_MWD: + case R_MRD: + r = s->mii.regs[addr & 3]; + break; + + case R_UAW0: + case R_UAW1: + r = s->uaw[addr & 1]; + break; + + case R_UAWU: + case R_UAWL: + r = s->ext_uaw[addr & 1]; + break; + + case R_FMI: + r = s->fmi; + break; + + case R_AF0: + case R_AF1: + r = s->maddr[s->fmi & 3][addr & 1]; + break; + + case 0x8000 ... 0x83ff: + r = s->ext_mtable[addr - 0x8000]; + break; + + default: + if (addr < ARRAY_SIZE(s->regs)) { + r = s->regs[addr]; + } + DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n", + __func__, addr * 4, r)); + break; + } + return r; +} + +static void enet_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + XilinxAXIEnet *s = opaque; + struct TEMAC *t = &s->TEMAC; + + addr >>= 2; + switch (addr) { + case R_RCW0: + case R_RCW1: + s->rcw[addr & 1] = value; + if ((addr & 1) && value & RCW1_RST) { + axienet_rx_reset(s); + } else { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + break; + + case R_TC: + s->tc = value; + if (value & TC_RST) { + axienet_tx_reset(s); + } + break; + + case R_EMMC: + s->emmc = value; + break; + + case R_PHYC: + s->phyc = value; + break; + + case R_MC: + value &= ((1 << 7) - 1); + + /* Enable the MII. */ + if (value & MC_EN) { + unsigned int miiclkdiv = value & ((1 << 6) - 1); + if (!miiclkdiv) { + qemu_log("AXIENET: MDIO enabled but MDIOCLK is zero!\n"); + } + } + s->mii.mc = value; + break; + + case R_MCR: { + unsigned int phyaddr = (value >> 24) & 0x1f; + unsigned int regaddr = (value >> 16) & 0x1f; + unsigned int op = (value >> 14) & 3; + unsigned int initiate = (value >> 11) & 1; + + if (initiate) { + if (op == 1) { + mdio_write_req(&t->mdio_bus, phyaddr, regaddr, s->mii.mwd); + } else if (op == 2) { + s->mii.mrd = mdio_read_req(&t->mdio_bus, phyaddr, regaddr); + } else { + qemu_log("AXIENET: invalid MDIOBus OP=%d\n", op); + } + } + s->mii.mcr = value; + break; + } + + case R_MWD: + case R_MRD: + s->mii.regs[addr & 3] = value; + break; + + + case R_UAW0: + case R_UAW1: + s->uaw[addr & 1] = value; + break; + + case R_UAWL: + case R_UAWU: + s->ext_uaw[addr & 1] = value; + break; + + case R_FMI: + s->fmi = value; + break; + + case R_AF0: + case R_AF1: + s->maddr[s->fmi & 3][addr & 1] = value; + break; + + case R_IS: + s->regs[addr] &= ~value; + break; + + case 0x8000 ... 0x83ff: + s->ext_mtable[addr - 0x8000] = value; + break; + + default: + DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n", + __func__, addr * 4, (unsigned)value)); + if (addr < ARRAY_SIZE(s->regs)) { + s->regs[addr] = value; + } + break; + } + enet_update_irq(s); +} + +static const MemoryRegionOps enet_ops = { + .read = enet_read, + .write = enet_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static int eth_can_rx(XilinxAXIEnet *s) +{ + /* RX enabled? */ + return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s); +} + +static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1) +{ + int match = 1; + + if (memcmp(buf, &f0, 4)) { + match = 0; + } + + if (buf[4] != (f1 & 0xff) || buf[5] != ((f1 >> 8) & 0xff)) { + match = 0; + } + + return match; +} + +static void axienet_eth_rx_notify(void *opaque) +{ + XilinxAXIEnet *s = XILINX_AXI_ENET(opaque); + + while (s->rxappsize && stream_can_push(s->tx_control_dev, + axienet_eth_rx_notify, s)) { + size_t ret = stream_push(s->tx_control_dev, + (void *)s->rxapp + CONTROL_PAYLOAD_SIZE + - s->rxappsize, s->rxappsize, true); + s->rxappsize -= ret; + } + + while (s->rxsize && stream_can_push(s->tx_data_dev, + axienet_eth_rx_notify, s)) { + size_t ret = stream_push(s->tx_data_dev, (void *)s->rxmem + s->rxpos, + s->rxsize, true); + s->rxsize -= ret; + s->rxpos += ret; + if (!s->rxsize) { + s->regs[R_IS] |= IS_RX_COMPLETE; + if (s->need_flush) { + s->need_flush = false; + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + } + } + enet_update_irq(s); +} + +static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size) +{ + XilinxAXIEnet *s = qemu_get_nic_opaque(nc); + static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, + 0xff, 0xff, 0xff}; + static const unsigned char sa_ipmcast[3] = {0x01, 0x00, 0x52}; + uint32_t app[CONTROL_PAYLOAD_WORDS] = {0}; + int promisc = s->fmi & (1 << 31); + int unicast, broadcast, multicast, ip_multicast = 0; + uint32_t csum32; + uint16_t csum16; + int i; + + DENET(qemu_log("%s: %zd bytes\n", __func__, size)); + + if (!eth_can_rx(s)) { + s->need_flush = true; + return 0; + } + + unicast = ~buf[0] & 0x1; + broadcast = memcmp(buf, sa_bcast, 6) == 0; + multicast = !unicast && !broadcast; + if (multicast && (memcmp(sa_ipmcast, buf, sizeof sa_ipmcast) == 0)) { + ip_multicast = 1; + } + + /* Jumbo or vlan sizes ? */ + if (!(s->rcw[1] & RCW1_JUM)) { + if (size > 1518 && size <= 1522 && !(s->rcw[1] & RCW1_VLAN)) { + return size; + } + } + + /* Basic Address filters. If you want to use the extended filters + you'll generally have to place the ethernet mac into promiscuous mode + to avoid the basic filtering from dropping most frames. */ + if (!promisc) { + if (unicast) { + if (!enet_match_addr(buf, s->uaw[0], s->uaw[1])) { + return size; + } + } else { + if (broadcast) { + /* Broadcast. */ + if (s->regs[R_RAF] & RAF_BCAST_REJ) { + return size; + } + } else { + int drop = 1; + + /* Multicast. */ + if (s->regs[R_RAF] & RAF_MCAST_REJ) { + return size; + } + + for (i = 0; i < 4; i++) { + if (enet_match_addr(buf, s->maddr[i][0], s->maddr[i][1])) { + drop = 0; + break; + } + } + + if (drop) { + return size; + } + } + } + } + + /* Extended mcast filtering enabled? */ + if (axienet_newfunc_enabled(s) && axienet_extmcf_enabled(s)) { + if (unicast) { + if (!enet_match_addr(buf, s->ext_uaw[0], s->ext_uaw[1])) { + return size; + } + } else { + if (broadcast) { + /* Broadcast. ??? */ + if (s->regs[R_RAF] & RAF_BCAST_REJ) { + return size; + } + } else { + int idx, bit; + + /* Multicast. */ + if (!memcmp(buf, sa_ipmcast, 3)) { + return size; + } + + idx = (buf[4] & 0x7f) << 8; + idx |= buf[5]; + + bit = 1 << (idx & 0x1f); + idx >>= 5; + + if (!(s->ext_mtable[idx] & bit)) { + return size; + } + } + } + } + + if (size < 12) { + s->regs[R_IS] |= IS_RX_REJECT; + enet_update_irq(s); + return -1; + } + + if (size > (s->c_rxmem - 4)) { + size = s->c_rxmem - 4; + } + + memcpy(s->rxmem, buf, size); + memset(s->rxmem + size, 0, 4); /* Clear the FCS. */ + + if (s->rcw[1] & RCW1_FCS) { + size += 4; /* fcs is inband. */ + } + + app[0] = 5 << 28; + csum32 = net_checksum_add(size - 14, (uint8_t *)s->rxmem + 14); + /* Fold it once. */ + csum32 = (csum32 & 0xffff) + (csum32 >> 16); + /* And twice to get rid of possible carries. */ + csum16 = (csum32 & 0xffff) + (csum32 >> 16); + app[3] = csum16; + app[4] = size & 0xffff; + + s->stats.rx_bytes += size; + s->stats.rx++; + if (multicast) { + s->stats.rx_mcast++; + app[2] |= 1 | (ip_multicast << 1); + } else if (broadcast) { + s->stats.rx_bcast++; + app[2] |= 1 << 3; + } + + /* Good frame. */ + app[2] |= 1 << 6; + + s->rxsize = size; + s->rxpos = 0; + for (i = 0; i < ARRAY_SIZE(app); ++i) { + app[i] = cpu_to_le32(app[i]); + } + s->rxappsize = CONTROL_PAYLOAD_SIZE; + memcpy(s->rxapp, app, s->rxappsize); + axienet_eth_rx_notify(s); + + enet_update_irq(s); + return size; +} + +static size_t +xilinx_axienet_control_stream_push(StreamSink *obj, uint8_t *buf, size_t len, + bool eop) +{ + int i; + XilinxAXIEnetStreamSink *cs = XILINX_AXI_ENET_CONTROL_STREAM(obj); + XilinxAXIEnet *s = cs->enet; + + assert(eop); + if (len != CONTROL_PAYLOAD_SIZE) { + hw_error("AXI Enet requires %d byte control stream payload\n", + (int)CONTROL_PAYLOAD_SIZE); + } + + memcpy(s->hdr, buf, len); + + for (i = 0; i < ARRAY_SIZE(s->hdr); ++i) { + s->hdr[i] = le32_to_cpu(s->hdr[i]); + } + return len; +} + +static size_t +xilinx_axienet_data_stream_push(StreamSink *obj, uint8_t *buf, size_t size, + bool eop) +{ + XilinxAXIEnetStreamSink *ds = XILINX_AXI_ENET_DATA_STREAM(obj); + XilinxAXIEnet *s = ds->enet; + + /* TX enable ? */ + if (!(s->tc & TC_TX)) { + return size; + } + + if (s->txpos + size > s->c_txmem) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Packet larger than txmem\n", + TYPE_XILINX_AXI_ENET); + s->txpos = 0; + return size; + } + + if (s->txpos == 0 && eop) { + /* Fast path single fragment. */ + s->txpos = size; + } else { + memcpy(s->txmem + s->txpos, buf, size); + buf = s->txmem; + s->txpos += size; + + if (!eop) { + return size; + } + } + + /* Jumbo or vlan sizes ? */ + if (!(s->tc & TC_JUM)) { + if (s->txpos > 1518 && s->txpos <= 1522 && !(s->tc & TC_VLAN)) { + s->txpos = 0; + return size; + } + } + + if (s->hdr[0] & 1) { + unsigned int start_off = s->hdr[1] >> 16; + unsigned int write_off = s->hdr[1] & 0xffff; + uint32_t tmp_csum; + uint16_t csum; + + tmp_csum = net_checksum_add(s->txpos - start_off, + buf + start_off); + /* Accumulate the seed. */ + tmp_csum += s->hdr[2] & 0xffff; + + /* Fold the 32bit partial checksum. */ + csum = net_checksum_finish(tmp_csum); + + /* Writeback. */ + buf[write_off] = csum >> 8; + buf[write_off + 1] = csum & 0xff; + } + + qemu_send_packet(qemu_get_queue(s->nic), buf, s->txpos); + + s->stats.tx_bytes += s->txpos; + s->regs[R_IS] |= IS_TX_COMPLETE; + enet_update_irq(s); + + s->txpos = 0; + return size; +} + +static NetClientInfo net_xilinx_enet_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = eth_rx, +}; + +static void xilinx_enet_realize(DeviceState *dev, Error **errp) +{ + XilinxAXIEnet *s = XILINX_AXI_ENET(dev); + XilinxAXIEnetStreamSink *ds = XILINX_AXI_ENET_DATA_STREAM(&s->rx_data_dev); + XilinxAXIEnetStreamSink *cs = XILINX_AXI_ENET_CONTROL_STREAM( + &s->rx_control_dev); + + object_property_add_link(OBJECT(ds), "enet", "xlnx.axi-ethernet", + (Object **) &ds->enet, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + object_property_add_link(OBJECT(cs), "enet", "xlnx.axi-ethernet", + (Object **) &cs->enet, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + object_property_set_link(OBJECT(ds), "enet", OBJECT(s), &error_abort); + object_property_set_link(OBJECT(cs), "enet", OBJECT(s), &error_abort); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + + tdk_init(&s->TEMAC.phy); + mdio_attach(&s->TEMAC.mdio_bus, &s->TEMAC.phy, s->c_phyaddr); + + s->TEMAC.parent = s; + + s->rxmem = g_malloc(s->c_rxmem); + s->txmem = g_malloc(s->c_txmem); +} + +static void xilinx_enet_init(Object *obj) +{ + XilinxAXIEnet *s = XILINX_AXI_ENET(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + object_initialize_child(OBJECT(s), "axistream-connected-target", + &s->rx_data_dev, TYPE_XILINX_AXI_ENET_DATA_STREAM); + object_initialize_child(OBJECT(s), "axistream-control-connected-target", + &s->rx_control_dev, + TYPE_XILINX_AXI_ENET_CONTROL_STREAM); + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property xilinx_enet_properties[] = { + DEFINE_PROP_UINT32("phyaddr", XilinxAXIEnet, c_phyaddr, 7), + DEFINE_PROP_UINT32("rxmem", XilinxAXIEnet, c_rxmem, 0x1000), + DEFINE_PROP_UINT32("txmem", XilinxAXIEnet, c_txmem, 0x1000), + DEFINE_NIC_PROPERTIES(XilinxAXIEnet, conf), + DEFINE_PROP_LINK("axistream-connected", XilinxAXIEnet, + tx_data_dev, TYPE_STREAM_SINK, StreamSink *), + DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIEnet, + tx_control_dev, TYPE_STREAM_SINK, StreamSink *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xilinx_enet_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xilinx_enet_realize; + device_class_set_props(dc, xilinx_enet_properties); + dc->reset = xilinx_axienet_reset; +} + +static void xilinx_enet_control_stream_class_init(ObjectClass *klass, + void *data) +{ + StreamSinkClass *ssc = STREAM_SINK_CLASS(klass); + + ssc->push = xilinx_axienet_control_stream_push; +} + +static void xilinx_enet_data_stream_class_init(ObjectClass *klass, void *data) +{ + StreamSinkClass *ssc = STREAM_SINK_CLASS(klass); + + ssc->push = xilinx_axienet_data_stream_push; +} + +static const TypeInfo xilinx_enet_info = { + .name = TYPE_XILINX_AXI_ENET, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XilinxAXIEnet), + .class_init = xilinx_enet_class_init, + .instance_init = xilinx_enet_init, +}; + +static const TypeInfo xilinx_enet_data_stream_info = { + .name = TYPE_XILINX_AXI_ENET_DATA_STREAM, + .parent = TYPE_OBJECT, + .instance_size = sizeof(XilinxAXIEnetStreamSink), + .class_init = xilinx_enet_data_stream_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_STREAM_SINK }, + { } + } +}; + +static const TypeInfo xilinx_enet_control_stream_info = { + .name = TYPE_XILINX_AXI_ENET_CONTROL_STREAM, + .parent = TYPE_OBJECT, + .instance_size = sizeof(XilinxAXIEnetStreamSink), + .class_init = xilinx_enet_control_stream_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_STREAM_SINK }, + { } + } +}; + +static void xilinx_enet_register_types(void) +{ + type_register_static(&xilinx_enet_info); + type_register_static(&xilinx_enet_data_stream_info); + type_register_static(&xilinx_enet_control_stream_info); +} + +type_init(xilinx_enet_register_types) diff --git a/hw/net/xilinx_ethlite.c b/hw/net/xilinx_ethlite.c new file mode 100644 index 000000000..6e09f7e42 --- /dev/null +++ b/hw/net/xilinx_ethlite.c @@ -0,0 +1,282 @@ +/* + * QEMU model of the Xilinx Ethernet Lite MAC. + * + * Copyright (c) 2009 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qom/object.h" +#include "cpu.h" /* FIXME should not use tswap* */ +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "net/net.h" + +#define D(x) +#define R_TX_BUF0 0 +#define R_TX_LEN0 (0x07f4 / 4) +#define R_TX_GIE0 (0x07f8 / 4) +#define R_TX_CTRL0 (0x07fc / 4) +#define R_TX_BUF1 (0x0800 / 4) +#define R_TX_LEN1 (0x0ff4 / 4) +#define R_TX_CTRL1 (0x0ffc / 4) + +#define R_RX_BUF0 (0x1000 / 4) +#define R_RX_CTRL0 (0x17fc / 4) +#define R_RX_BUF1 (0x1800 / 4) +#define R_RX_CTRL1 (0x1ffc / 4) +#define R_MAX (0x2000 / 4) + +#define GIE_GIE 0x80000000 + +#define CTRL_I 0x8 +#define CTRL_P 0x2 +#define CTRL_S 0x1 + +#define TYPE_XILINX_ETHLITE "xlnx.xps-ethernetlite" +DECLARE_INSTANCE_CHECKER(struct xlx_ethlite, XILINX_ETHLITE, + TYPE_XILINX_ETHLITE) + +struct xlx_ethlite +{ + SysBusDevice parent_obj; + + MemoryRegion mmio; + qemu_irq irq; + NICState *nic; + NICConf conf; + + uint32_t c_tx_pingpong; + uint32_t c_rx_pingpong; + unsigned int txbuf; + unsigned int rxbuf; + + uint32_t regs[R_MAX]; +}; + +static inline void eth_pulse_irq(struct xlx_ethlite *s) +{ + /* Only the first gie reg is active. */ + if (s->regs[R_TX_GIE0] & GIE_GIE) { + qemu_irq_pulse(s->irq); + } +} + +static uint64_t +eth_read(void *opaque, hwaddr addr, unsigned int size) +{ + struct xlx_ethlite *s = opaque; + uint32_t r = 0; + + addr >>= 2; + + switch (addr) + { + case R_TX_GIE0: + case R_TX_LEN0: + case R_TX_LEN1: + case R_TX_CTRL1: + case R_TX_CTRL0: + case R_RX_CTRL1: + case R_RX_CTRL0: + r = s->regs[addr]; + D(qemu_log("%s " TARGET_FMT_plx "=%x\n", __func__, addr * 4, r)); + break; + + default: + r = tswap32(s->regs[addr]); + break; + } + return r; +} + +static void +eth_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + struct xlx_ethlite *s = opaque; + unsigned int base = 0; + uint32_t value = val64; + + addr >>= 2; + switch (addr) + { + case R_TX_CTRL0: + case R_TX_CTRL1: + if (addr == R_TX_CTRL1) + base = 0x800 / 4; + + D(qemu_log("%s addr=" TARGET_FMT_plx " val=%x\n", + __func__, addr * 4, value)); + if ((value & (CTRL_P | CTRL_S)) == CTRL_S) { + qemu_send_packet(qemu_get_queue(s->nic), + (void *) &s->regs[base], + s->regs[base + R_TX_LEN0]); + D(qemu_log("eth_tx %d\n", s->regs[base + R_TX_LEN0])); + if (s->regs[base + R_TX_CTRL0] & CTRL_I) + eth_pulse_irq(s); + } else if ((value & (CTRL_P | CTRL_S)) == (CTRL_P | CTRL_S)) { + memcpy(&s->conf.macaddr.a[0], &s->regs[base], 6); + if (s->regs[base + R_TX_CTRL0] & CTRL_I) + eth_pulse_irq(s); + } + + /* We are fast and get ready pretty much immediately so + we actually never flip the S nor P bits to one. */ + s->regs[addr] = value & ~(CTRL_P | CTRL_S); + break; + + /* Keep these native. */ + case R_RX_CTRL0: + case R_RX_CTRL1: + if (!(value & CTRL_S)) { + qemu_flush_queued_packets(qemu_get_queue(s->nic)); + } + /* fall through */ + case R_TX_LEN0: + case R_TX_LEN1: + case R_TX_GIE0: + D(qemu_log("%s addr=" TARGET_FMT_plx " val=%x\n", + __func__, addr * 4, value)); + s->regs[addr] = value; + break; + + default: + s->regs[addr] = tswap32(value); + break; + } +} + +static const MemoryRegionOps eth_ops = { + .read = eth_read, + .write = eth_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static bool eth_can_rx(NetClientState *nc) +{ + struct xlx_ethlite *s = qemu_get_nic_opaque(nc); + unsigned int rxbase = s->rxbuf * (0x800 / 4); + + return !(s->regs[rxbase + R_RX_CTRL0] & CTRL_S); +} + +static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size) +{ + struct xlx_ethlite *s = qemu_get_nic_opaque(nc); + unsigned int rxbase = s->rxbuf * (0x800 / 4); + + /* DA filter. */ + if (!(buf[0] & 0x80) && memcmp(&s->conf.macaddr.a[0], buf, 6)) + return size; + + if (s->regs[rxbase + R_RX_CTRL0] & CTRL_S) { + D(qemu_log("ethlite lost packet %x\n", s->regs[R_RX_CTRL0])); + return -1; + } + + D(qemu_log("%s %zd rxbase=%x\n", __func__, size, rxbase)); + if (size > (R_MAX - R_RX_BUF0 - rxbase) * 4) { + D(qemu_log("ethlite packet is too big, size=%x\n", size)); + return -1; + } + memcpy(&s->regs[rxbase + R_RX_BUF0], buf, size); + + s->regs[rxbase + R_RX_CTRL0] |= CTRL_S; + if (s->regs[R_RX_CTRL0] & CTRL_I) { + eth_pulse_irq(s); + } + + /* If c_rx_pingpong was set flip buffers. */ + s->rxbuf ^= s->c_rx_pingpong; + return size; +} + +static void xilinx_ethlite_reset(DeviceState *dev) +{ + struct xlx_ethlite *s = XILINX_ETHLITE(dev); + + s->rxbuf = 0; +} + +static NetClientInfo net_xilinx_ethlite_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .can_receive = eth_can_rx, + .receive = eth_rx, +}; + +static void xilinx_ethlite_realize(DeviceState *dev, Error **errp) +{ + struct xlx_ethlite *s = XILINX_ETHLITE(dev); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_xilinx_ethlite_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); +} + +static void xilinx_ethlite_init(Object *obj) +{ + struct xlx_ethlite *s = XILINX_ETHLITE(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, ð_ops, s, + "xlnx.xps-ethernetlite", R_MAX * 4); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static Property xilinx_ethlite_properties[] = { + DEFINE_PROP_UINT32("tx-ping-pong", struct xlx_ethlite, c_tx_pingpong, 1), + DEFINE_PROP_UINT32("rx-ping-pong", struct xlx_ethlite, c_rx_pingpong, 1), + DEFINE_NIC_PROPERTIES(struct xlx_ethlite, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xilinx_ethlite_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xilinx_ethlite_realize; + dc->reset = xilinx_ethlite_reset; + device_class_set_props(dc, xilinx_ethlite_properties); +} + +static const TypeInfo xilinx_ethlite_info = { + .name = TYPE_XILINX_ETHLITE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(struct xlx_ethlite), + .instance_init = xilinx_ethlite_init, + .class_init = xilinx_ethlite_class_init, +}; + +static void xilinx_ethlite_register_types(void) +{ + type_register_static(&xilinx_ethlite_info); +} + +type_init(xilinx_ethlite_register_types) diff --git a/hw/nios2/10m50_devboard.c b/hw/nios2/10m50_devboard.c new file mode 100644 index 000000000..3d1205b8b --- /dev/null +++ b/hw/nios2/10m50_devboard.c @@ -0,0 +1,115 @@ +/* + * Altera 10M50 Nios2 GHRD + * + * Copyright (c) 2016 Marek Vasut + * + * Based on LabX device code + * + * Copyright (c) 2012 Chris Wulff + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" + +#include "hw/sysbus.h" +#include "hw/char/serial.h" +#include "hw/qdev-properties.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "qemu/config-file.h" + +#include "boot.h" + +#define BINARY_DEVICE_TREE_FILE "10m50-devboard.dtb" + +static void nios2_10m50_ghrd_init(MachineState *machine) +{ + Nios2CPU *cpu; + DeviceState *dev; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *phys_tcm = g_new(MemoryRegion, 1); + MemoryRegion *phys_tcm_alias = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram_alias = g_new(MemoryRegion, 1); + ram_addr_t tcm_base = 0x0; + ram_addr_t tcm_size = 0x1000; /* 1 kiB, but QEMU limit is 4 kiB */ + ram_addr_t ram_base = 0x08000000; + ram_addr_t ram_size = 0x08000000; + qemu_irq irq[32]; + int i; + + /* Physical TCM (tb_ram_1k) with alias at 0xc0000000 */ + memory_region_init_ram(phys_tcm, NULL, "nios2.tcm", tcm_size, + &error_abort); + memory_region_init_alias(phys_tcm_alias, NULL, "nios2.tcm.alias", + phys_tcm, 0, tcm_size); + memory_region_add_subregion(address_space_mem, tcm_base, phys_tcm); + memory_region_add_subregion(address_space_mem, 0xc0000000 + tcm_base, + phys_tcm_alias); + + /* Physical DRAM with alias at 0xc0000000 */ + memory_region_init_ram(phys_ram, NULL, "nios2.ram", ram_size, + &error_abort); + memory_region_init_alias(phys_ram_alias, NULL, "nios2.ram.alias", + phys_ram, 0, ram_size); + memory_region_add_subregion(address_space_mem, ram_base, phys_ram); + memory_region_add_subregion(address_space_mem, 0xc0000000 + ram_base, + phys_ram_alias); + + /* Create CPU -- FIXME */ + cpu = NIOS2_CPU(cpu_create(TYPE_NIOS2_CPU)); + for (i = 0; i < 32; i++) { + irq[i] = qdev_get_gpio_in_named(DEVICE(cpu), "IRQ", i); + } + + /* Register: Altera 16550 UART */ + serial_mm_init(address_space_mem, 0xf8001600, 2, irq[1], 115200, + serial_hd(0), DEVICE_NATIVE_ENDIAN); + + /* Register: Timer sys_clk_timer */ + dev = qdev_new("ALTR.timer"); + qdev_prop_set_uint32(dev, "clock-frequency", 75 * 1000000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xf8001440); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[0]); + + /* Register: Timer sys_clk_timer_1 */ + dev = qdev_new("ALTR.timer"); + qdev_prop_set_uint32(dev, "clock-frequency", 75 * 1000000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xe0000880); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[5]); + + /* Configure new exception vectors and reset CPU for it to take effect. */ + cpu->reset_addr = 0xd4000000; + cpu->exception_addr = 0xc8000120; + cpu->fast_tlb_miss_addr = 0xc0000100; + + nios2_load_kernel(cpu, ram_base, ram_size, machine->initrd_filename, + BINARY_DEVICE_TREE_FILE, NULL); +} + +static void nios2_10m50_ghrd_machine_init(struct MachineClass *mc) +{ + mc->desc = "Altera 10M50 GHRD Nios II design"; + mc->init = nios2_10m50_ghrd_init; + mc->is_default = true; +} + +DEFINE_MACHINE("10m50-ghrd", nios2_10m50_ghrd_machine_init); diff --git a/hw/nios2/Kconfig b/hw/nios2/Kconfig new file mode 100644 index 000000000..b10ea640d --- /dev/null +++ b/hw/nios2/Kconfig @@ -0,0 +1,12 @@ +config NIOS2_10M50 + bool + select NIOS2 + select SERIAL + select ALTERA_TIMER + +config NIOS2_GENERIC_NOMMU + bool + select NIOS2 + +config NIOS2 + bool diff --git a/hw/nios2/boot.c b/hw/nios2/boot.c new file mode 100644 index 000000000..5b3e4efed --- /dev/null +++ b/hw/nios2/boot.c @@ -0,0 +1,229 @@ +/* + * Nios2 kernel loader + * + * Copyright (c) 2016 Marek Vasut + * + * Based on microblaze kernel loader + * + * Copyright (c) 2012 Peter Crosthwaite + * Copyright (c) 2012 PetaLogix + * Copyright (c) 2009 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" +#include "sysemu/device_tree.h" +#include "sysemu/reset.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "elf.h" + +#include "boot.h" + +#define NIOS2_MAGIC 0x534f494e + +static struct nios2_boot_info { + void (*machine_cpu_reset)(Nios2CPU *); + uint32_t bootstrap_pc; + uint32_t cmdline; + uint32_t initrd_start; + uint32_t initrd_end; + uint32_t fdt; +} boot_info; + +static void main_cpu_reset(void *opaque) +{ + Nios2CPU *cpu = opaque; + CPUState *cs = CPU(cpu); + CPUNios2State *env = &cpu->env; + + cpu_reset(CPU(cpu)); + + env->regs[R_ARG0] = NIOS2_MAGIC; + env->regs[R_ARG1] = boot_info.initrd_start; + env->regs[R_ARG2] = boot_info.fdt; + env->regs[R_ARG3] = boot_info.cmdline; + + cpu_set_pc(cs, boot_info.bootstrap_pc); + if (boot_info.machine_cpu_reset) { + boot_info.machine_cpu_reset(cpu); + } +} + +static uint64_t translate_kernel_address(void *opaque, uint64_t addr) +{ + return addr - 0xc0000000LL; +} + +static int nios2_load_dtb(struct nios2_boot_info bi, const uint32_t ramsize, + const char *kernel_cmdline, const char *dtb_filename) +{ + int fdt_size; + void *fdt = NULL; + int r; + + if (dtb_filename) { + fdt = load_device_tree(dtb_filename, &fdt_size); + } + if (!fdt) { + return 0; + } + + if (kernel_cmdline) { + r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", + kernel_cmdline); + if (r < 0) { + fprintf(stderr, "couldn't set /chosen/bootargs\n"); + } + } + + if (bi.initrd_start) { + qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", + translate_kernel_address(NULL, bi.initrd_start)); + + qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", + translate_kernel_address(NULL, bi.initrd_end)); + } + + cpu_physical_memory_write(bi.fdt, fdt, fdt_size); + g_free(fdt); + return fdt_size; +} + +void nios2_load_kernel(Nios2CPU *cpu, hwaddr ddr_base, + uint32_t ramsize, + const char *initrd_filename, + const char *dtb_filename, + void (*machine_cpu_reset)(Nios2CPU *)) +{ + const char *kernel_filename; + const char *kernel_cmdline; + const char *dtb_arg; + char *filename = NULL; + + kernel_filename = current_machine->kernel_filename; + kernel_cmdline = current_machine->kernel_cmdline; + dtb_arg = current_machine->dtb; + /* default to pcbios dtb as passed by machine_init */ + if (!dtb_arg) { + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_filename); + } + + boot_info.machine_cpu_reset = machine_cpu_reset; + qemu_register_reset(main_cpu_reset, cpu); + + if (kernel_filename) { + int kernel_size, fdt_size; + uint64_t entry, high; + int big_endian = 0; + +#ifdef TARGET_WORDS_BIGENDIAN + big_endian = 1; +#endif + + /* Boots a kernel elf binary. */ + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, + &entry, NULL, &high, NULL, + big_endian, EM_ALTERA_NIOS2, 0, 0); + if ((uint32_t)entry == 0xc0000000) { + /* + * The Nios II processor reference guide documents that the + * kernel is placed at virtual memory address 0xc0000000, + * and we've got something that points there. Reload it + * and adjust the entry to get the address in physical RAM. + */ + kernel_size = load_elf(kernel_filename, NULL, + translate_kernel_address, NULL, + &entry, NULL, NULL, NULL, + big_endian, EM_ALTERA_NIOS2, 0, 0); + boot_info.bootstrap_pc = ddr_base + 0xc0000000 + + (entry & 0x07ffffff); + } else { + /* Use the entry point in the ELF image. */ + boot_info.bootstrap_pc = (uint32_t)entry; + } + + /* If it wasn't an ELF image, try an u-boot image. */ + if (kernel_size < 0) { + hwaddr uentry, loadaddr = LOAD_UIMAGE_LOADADDR_INVALID; + + kernel_size = load_uimage(kernel_filename, &uentry, &loadaddr, 0, + NULL, NULL); + boot_info.bootstrap_pc = uentry; + high = loadaddr + kernel_size; + } + + /* Not an ELF image nor an u-boot image, try a RAW image. */ + if (kernel_size < 0) { + kernel_size = load_image_targphys(kernel_filename, ddr_base, + ramsize); + boot_info.bootstrap_pc = ddr_base; + high = ddr_base + kernel_size; + } + + high = ROUND_UP(high, 1 * MiB); + + /* If initrd is available, it goes after the kernel, aligned to 1M. */ + if (initrd_filename) { + int initrd_size; + uint32_t initrd_offset; + + boot_info.initrd_start = high; + initrd_offset = boot_info.initrd_start - ddr_base; + + initrd_size = load_ramdisk(initrd_filename, + boot_info.initrd_start, + ramsize - initrd_offset); + if (initrd_size < 0) { + initrd_size = load_image_targphys(initrd_filename, + boot_info.initrd_start, + ramsize - initrd_offset); + } + if (initrd_size < 0) { + error_report("could not load initrd '%s'", + initrd_filename); + exit(EXIT_FAILURE); + } + high += initrd_size; + } + high = ROUND_UP(high, 4); + boot_info.initrd_end = high; + + /* Device tree must be placed right after initrd (if available) */ + boot_info.fdt = high; + fdt_size = nios2_load_dtb(boot_info, ramsize, kernel_cmdline, + /* Preference a -dtb argument */ + dtb_arg ? dtb_arg : filename); + high += fdt_size; + + /* Kernel command is at the end, 4k aligned. */ + boot_info.cmdline = ROUND_UP(high, 4 * KiB); + if (kernel_cmdline && strlen(kernel_cmdline)) { + pstrcpy_targphys("cmdline", boot_info.cmdline, 256, kernel_cmdline); + } + } + g_free(filename); +} diff --git a/hw/nios2/boot.h b/hw/nios2/boot.h new file mode 100644 index 000000000..59b9fbfc6 --- /dev/null +++ b/hw/nios2/boot.h @@ -0,0 +1,10 @@ +#ifndef NIOS2_BOOT_H +#define NIOS2_BOOT_H + +#include "cpu.h" + +void nios2_load_kernel(Nios2CPU *cpu, hwaddr ddr_base, uint32_t ramsize, + const char *initrd_filename, const char *dtb_filename, + void (*machine_cpu_reset)(Nios2CPU *)); + +#endif /* NIOS2_BOOT_H */ diff --git a/hw/nios2/generic_nommu.c b/hw/nios2/generic_nommu.c new file mode 100644 index 000000000..fbc18dbd0 --- /dev/null +++ b/hw/nios2/generic_nommu.c @@ -0,0 +1,101 @@ +/* + * Generic simulator target with no MMU or devices. This emulation is + * compatible with the libgloss qemu-hosted.ld linker script for using + * QEMU as an instruction set simulator. + * + * Copyright (c) 2018-2019 Mentor Graphics + * + * Copyright (c) 2016 Marek Vasut + * + * Based on LabX device code + * + * Copyright (c) 2012 Chris Wulff + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" + +#include "hw/char/serial.h" +#include "hw/boards.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "qemu/config-file.h" + +#include "boot.h" + +#define BINARY_DEVICE_TREE_FILE "generic-nommu.dtb" + +static void nios2_generic_nommu_init(MachineState *machine) +{ + Nios2CPU *cpu; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *phys_tcm = g_new(MemoryRegion, 1); + MemoryRegion *phys_tcm_alias = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram_alias = g_new(MemoryRegion, 1); + ram_addr_t tcm_base = 0x0; + ram_addr_t tcm_size = 0x1000; /* 1 kiB, but QEMU limit is 4 kiB */ + ram_addr_t ram_base = 0x10000000; + ram_addr_t ram_size = 0x08000000; + + /* Physical TCM (tb_ram_1k) with alias at 0xc0000000 */ + memory_region_init_ram(phys_tcm, NULL, "nios2.tcm", tcm_size, + &error_abort); + memory_region_init_alias(phys_tcm_alias, NULL, "nios2.tcm.alias", + phys_tcm, 0, tcm_size); + memory_region_add_subregion(address_space_mem, tcm_base, phys_tcm); + memory_region_add_subregion(address_space_mem, 0xc0000000 + tcm_base, + phys_tcm_alias); + + /* Physical DRAM with alias at 0xc0000000 */ + memory_region_init_ram(phys_ram, NULL, "nios2.ram", ram_size, + &error_abort); + memory_region_init_alias(phys_ram_alias, NULL, "nios2.ram.alias", + phys_ram, 0, ram_size); + memory_region_add_subregion(address_space_mem, ram_base, phys_ram); + memory_region_add_subregion(address_space_mem, 0xc0000000 + ram_base, + phys_ram_alias); + + cpu = NIOS2_CPU(cpu_create(TYPE_NIOS2_CPU)); + + /* Remove MMU */ + cpu->mmu_present = false; + + /* Reset vector is the first 32 bytes of RAM. */ + cpu->reset_addr = ram_base; + + /* The interrupt vector comes right after reset. */ + cpu->exception_addr = ram_base + 0x20; + + /* + * The linker script does have a TLB miss memory region declared, + * but this should never be used with no MMU. + */ + cpu->fast_tlb_miss_addr = 0x7fff400; + + nios2_load_kernel(cpu, ram_base, ram_size, machine->initrd_filename, + BINARY_DEVICE_TREE_FILE, NULL); +} + +static void nios2_generic_nommu_machine_init(struct MachineClass *mc) +{ + mc->desc = "Generic NOMMU Nios II design"; + mc->init = nios2_generic_nommu_init; +} + +DEFINE_MACHINE("nios2-generic-nommu", nios2_generic_nommu_machine_init); diff --git a/hw/nios2/meson.build b/hw/nios2/meson.build new file mode 100644 index 000000000..6c58e8082 --- /dev/null +++ b/hw/nios2/meson.build @@ -0,0 +1,6 @@ +nios2_ss = ss.source_set() +nios2_ss.add(files('boot.c')) +nios2_ss.add(when: 'CONFIG_NIOS2_10M50', if_true: files('10m50_devboard.c')) +nios2_ss.add(when: 'CONFIG_NIOS2_GENERIC_NOMMU', if_true: files('generic_nommu.c')) + +hw_arch += {'nios2': nios2_ss} diff --git a/hw/nubus/Kconfig b/hw/nubus/Kconfig new file mode 100644 index 000000000..8fb8b2218 --- /dev/null +++ b/hw/nubus/Kconfig @@ -0,0 +1,2 @@ +config NUBUS + bool diff --git a/hw/nubus/mac-nubus-bridge.c b/hw/nubus/mac-nubus-bridge.c new file mode 100644 index 000000000..a0da5a8b2 --- /dev/null +++ b/hw/nubus/mac-nubus-bridge.c @@ -0,0 +1,63 @@ +/* + * QEMU Macintosh Nubus + * + * Copyright (c) 2013-2018 Laurent Vivier + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/nubus/mac-nubus-bridge.h" + + +static void mac_nubus_bridge_init(Object *obj) +{ + MacNubusBridge *s = MAC_NUBUS_BRIDGE(obj); + NubusBridge *nb = NUBUS_BRIDGE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + NubusBus *bus = &nb->bus; + + /* Macintosh only has slots 0x9 to 0xe available */ + bus->slot_available_mask = MAKE_64BIT_MASK(MAC_NUBUS_FIRST_SLOT, + MAC_NUBUS_SLOT_NB); + + /* Aliases for slots 0x9 to 0xe */ + memory_region_init_alias(&s->super_slot_alias, obj, "super-slot-alias", + &bus->nubus_mr, + MAC_NUBUS_FIRST_SLOT * NUBUS_SUPER_SLOT_SIZE, + MAC_NUBUS_SLOT_NB * NUBUS_SUPER_SLOT_SIZE); + + memory_region_init_alias(&s->slot_alias, obj, "slot-alias", + &bus->nubus_mr, + NUBUS_SLOT_BASE + + MAC_NUBUS_FIRST_SLOT * NUBUS_SLOT_SIZE, + MAC_NUBUS_SLOT_NB * NUBUS_SLOT_SIZE); + + sysbus_init_mmio(sbd, &s->super_slot_alias); + sysbus_init_mmio(sbd, &s->slot_alias); +} + +static void mac_nubus_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Nubus bridge"; +} + +static const TypeInfo mac_nubus_bridge_info = { + .name = TYPE_MAC_NUBUS_BRIDGE, + .parent = TYPE_NUBUS_BRIDGE, + .instance_init = mac_nubus_bridge_init, + .instance_size = sizeof(MacNubusBridge), + .class_init = mac_nubus_bridge_class_init, +}; + +static void mac_nubus_bridge_register_types(void) +{ + type_register_static(&mac_nubus_bridge_info); +} + +type_init(mac_nubus_bridge_register_types) diff --git a/hw/nubus/meson.build b/hw/nubus/meson.build new file mode 100644 index 000000000..9287c633a --- /dev/null +++ b/hw/nubus/meson.build @@ -0,0 +1,7 @@ +nubus_ss = ss.source_set() +nubus_ss.add(files('nubus-device.c')) +nubus_ss.add(files('nubus-bus.c')) +nubus_ss.add(files('nubus-bridge.c')) +nubus_ss.add(when: 'CONFIG_Q800', if_true: files('mac-nubus-bridge.c')) + +softmmu_ss.add_all(when: 'CONFIG_NUBUS', if_true: nubus_ss) diff --git a/hw/nubus/nubus-bridge.c b/hw/nubus/nubus-bridge.c new file mode 100644 index 000000000..a42c86080 --- /dev/null +++ b/hw/nubus/nubus-bridge.c @@ -0,0 +1,53 @@ +/* + * QEMU Nubus + * + * Copyright (c) 2013-2018 Laurent Vivier + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/nubus/nubus.h" + + +static void nubus_bridge_init(Object *obj) +{ + NubusBridge *s = NUBUS_BRIDGE(obj); + NubusBus *bus = &s->bus; + + qbus_init(bus, sizeof(s->bus), TYPE_NUBUS_BUS, DEVICE(s), NULL); + + qdev_init_gpio_out(DEVICE(s), bus->irqs, NUBUS_IRQS); +} + +static Property nubus_bridge_properties[] = { + DEFINE_PROP_UINT16("slot-available-mask", NubusBridge, + bus.slot_available_mask, 0xffff), + DEFINE_PROP_END_OF_LIST() +}; + +static void nubus_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->fw_name = "nubus"; + device_class_set_props(dc, nubus_bridge_properties); +} + +static const TypeInfo nubus_bridge_info = { + .name = TYPE_NUBUS_BRIDGE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = nubus_bridge_init, + .instance_size = sizeof(NubusBridge), + .class_init = nubus_bridge_class_init, +}; + +static void nubus_register_types(void) +{ + type_register_static(&nubus_bridge_info); +} + +type_init(nubus_register_types) diff --git a/hw/nubus/nubus-bus.c b/hw/nubus/nubus-bus.c new file mode 100644 index 000000000..07c279bde --- /dev/null +++ b/hw/nubus/nubus-bus.c @@ -0,0 +1,188 @@ +/* + * QEMU Macintosh Nubus + * + * Copyright (c) 2013-2018 Laurent Vivier + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +/* + * References: + * Nubus Specification (TI) + * http://www.bitsavers.org/pdf/ti/nubus/2242825-0001_NuBus_Spec1983.pdf + * + * Designing Cards and Drivers for the Macintosh Family (Apple) + */ + +#include "qemu/osdep.h" +#include "hw/nubus/nubus.h" +#include "qapi/error.h" +#include "trace.h" + + +static NubusBus *nubus_find(void) +{ + /* Returns NULL unless there is exactly one nubus device */ + return NUBUS_BUS(object_resolve_path_type("", TYPE_NUBUS_BUS, NULL)); +} + +static MemTxResult nubus_slot_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) +{ + trace_nubus_slot_write(addr, val, size); + return MEMTX_DECODE_ERROR; +} + +static MemTxResult nubus_slot_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + trace_nubus_slot_read(addr, size); + return MEMTX_DECODE_ERROR; +} + +static const MemoryRegionOps nubus_slot_ops = { + .read_with_attrs = nubus_slot_read, + .write_with_attrs = nubus_slot_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static MemTxResult nubus_super_slot_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size, + MemTxAttrs attrs) +{ + trace_nubus_super_slot_write(addr, val, size); + return MEMTX_DECODE_ERROR; +} + +static MemTxResult nubus_super_slot_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + trace_nubus_super_slot_read(addr, size); + return MEMTX_DECODE_ERROR; +} + +static const MemoryRegionOps nubus_super_slot_ops = { + .read_with_attrs = nubus_super_slot_read, + .write_with_attrs = nubus_super_slot_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void nubus_unrealize(BusState *bus) +{ + NubusBus *nubus = NUBUS_BUS(bus); + + address_space_destroy(&nubus->nubus_as); +} + +static void nubus_realize(BusState *bus, Error **errp) +{ + NubusBus *nubus = NUBUS_BUS(bus); + + if (!nubus_find()) { + error_setg(errp, "at most one %s device is permitted", TYPE_NUBUS_BUS); + return; + } + + address_space_init(&nubus->nubus_as, &nubus->nubus_mr, "nubus"); +} + +static void nubus_init(Object *obj) +{ + NubusBus *nubus = NUBUS_BUS(obj); + + memory_region_init(&nubus->nubus_mr, obj, "nubus", 0x100000000); + + memory_region_init_io(&nubus->super_slot_io, obj, &nubus_super_slot_ops, + nubus, "nubus-super-slots", + (NUBUS_SUPER_SLOT_NB + 1) * NUBUS_SUPER_SLOT_SIZE); + memory_region_add_subregion(&nubus->nubus_mr, 0x0, &nubus->super_slot_io); + + memory_region_init_io(&nubus->slot_io, obj, &nubus_slot_ops, + nubus, "nubus-slots", + NUBUS_SLOT_NB * NUBUS_SLOT_SIZE); + memory_region_add_subregion(&nubus->nubus_mr, + (NUBUS_SUPER_SLOT_NB + 1) * + NUBUS_SUPER_SLOT_SIZE, &nubus->slot_io); + + nubus->slot_available_mask = MAKE_64BIT_MASK(NUBUS_FIRST_SLOT, + NUBUS_SLOT_NB); +} + +static char *nubus_get_dev_path(DeviceState *dev) +{ + NubusDevice *nd = NUBUS_DEVICE(dev); + BusState *bus = qdev_get_parent_bus(dev); + char *p = qdev_get_dev_path(bus->parent); + + if (p) { + char *ret = g_strdup_printf("%s/%s/%02x", p, bus->name, nd->slot); + g_free(p); + return ret; + } else { + return g_strdup_printf("%s/%02x", bus->name, nd->slot); + } +} + +static bool nubus_check_address(BusState *bus, DeviceState *dev, Error **errp) +{ + NubusDevice *nd = NUBUS_DEVICE(dev); + NubusBus *nubus = NUBUS_BUS(bus); + + if (nd->slot == -1) { + /* No slot specified, find first available free slot */ + int s = ctz32(nubus->slot_available_mask); + if (s != 32) { + nd->slot = s; + } else { + error_setg(errp, "Cannot register nubus card, no free slot " + "available"); + return false; + } + } else { + /* Slot specified, make sure the slot is available */ + if (!(nubus->slot_available_mask & BIT(nd->slot))) { + error_setg(errp, "Cannot register nubus card, slot %d is " + "unavailable or already occupied", nd->slot); + return false; + } + } + + nubus->slot_available_mask &= ~BIT(nd->slot); + return true; +} + +static void nubus_class_init(ObjectClass *oc, void *data) +{ + BusClass *bc = BUS_CLASS(oc); + + bc->realize = nubus_realize; + bc->unrealize = nubus_unrealize; + bc->check_address = nubus_check_address; + bc->get_dev_path = nubus_get_dev_path; +} + +static const TypeInfo nubus_bus_info = { + .name = TYPE_NUBUS_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(NubusBus), + .instance_init = nubus_init, + .class_init = nubus_class_init, +}; + +static void nubus_register_types(void) +{ + type_register_static(&nubus_bus_info); +} + +type_init(nubus_register_types) diff --git a/hw/nubus/nubus-device.c b/hw/nubus/nubus-device.c new file mode 100644 index 000000000..0f1852f67 --- /dev/null +++ b/hw/nubus/nubus-device.c @@ -0,0 +1,120 @@ +/* + * QEMU Macintosh Nubus + * + * Copyright (c) 2013-2018 Laurent Vivier + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/datadir.h" +#include "hw/irq.h" +#include "hw/loader.h" +#include "hw/nubus/nubus.h" +#include "qapi/error.h" +#include "qemu/error-report.h" + + +void nubus_set_irq(NubusDevice *nd, int level) +{ + NubusBus *nubus = NUBUS_BUS(qdev_get_parent_bus(DEVICE(nd))); + + qemu_set_irq(nubus->irqs[nd->slot], level); +} + +static void nubus_device_realize(DeviceState *dev, Error **errp) +{ + NubusBus *nubus = NUBUS_BUS(qdev_get_parent_bus(dev)); + NubusDevice *nd = NUBUS_DEVICE(dev); + char *name, *path; + hwaddr slot_offset; + int64_t size; + int ret; + + /* Super */ + slot_offset = nd->slot * NUBUS_SUPER_SLOT_SIZE; + + name = g_strdup_printf("nubus-super-slot-%x", nd->slot); + memory_region_init(&nd->super_slot_mem, OBJECT(dev), name, + NUBUS_SUPER_SLOT_SIZE); + memory_region_add_subregion(&nubus->super_slot_io, slot_offset, + &nd->super_slot_mem); + g_free(name); + + /* Normal */ + slot_offset = nd->slot * NUBUS_SLOT_SIZE; + + name = g_strdup_printf("nubus-slot-%x", nd->slot); + memory_region_init(&nd->slot_mem, OBJECT(dev), name, NUBUS_SLOT_SIZE); + memory_region_add_subregion(&nubus->slot_io, slot_offset, + &nd->slot_mem); + g_free(name); + + /* Declaration ROM */ + if (nd->romfile != NULL) { + path = qemu_find_file(QEMU_FILE_TYPE_BIOS, nd->romfile); + if (path == NULL) { + path = g_strdup(nd->romfile); + } + + size = get_image_size(path); + if (size < 0) { + error_setg(errp, "failed to find romfile \"%s\"", nd->romfile); + g_free(path); + return; + } else if (size == 0) { + error_setg(errp, "romfile \"%s\" is empty", nd->romfile); + g_free(path); + return; + } else if (size > NUBUS_DECL_ROM_MAX_SIZE) { + error_setg(errp, "romfile \"%s\" too large (maximum size 128K)", + nd->romfile); + g_free(path); + return; + } + + name = g_strdup_printf("nubus-slot-%x-declaration-rom", nd->slot); + memory_region_init_rom(&nd->decl_rom, OBJECT(dev), name, size, + &error_abort); + ret = load_image_mr(path, &nd->decl_rom); + g_free(path); + if (ret < 0) { + error_setg(errp, "could not load romfile \"%s\"", nd->romfile); + return; + } + memory_region_add_subregion(&nd->slot_mem, NUBUS_SLOT_SIZE - size, + &nd->decl_rom); + } +} + +static Property nubus_device_properties[] = { + DEFINE_PROP_INT32("slot", NubusDevice, slot, -1), + DEFINE_PROP_STRING("romfile", NubusDevice, romfile), + DEFINE_PROP_END_OF_LIST() +}; + +static void nubus_device_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = nubus_device_realize; + dc->bus_type = TYPE_NUBUS_BUS; + device_class_set_props(dc, nubus_device_properties); +} + +static const TypeInfo nubus_device_type_info = { + .name = TYPE_NUBUS_DEVICE, + .parent = TYPE_DEVICE, + .abstract = true, + .instance_size = sizeof(NubusDevice), + .class_init = nubus_device_class_init, +}; + +static void nubus_register_types(void) +{ + type_register_static(&nubus_device_type_info); +} + +type_init(nubus_register_types) diff --git a/hw/nubus/trace-events b/hw/nubus/trace-events new file mode 100644 index 000000000..e31833d69 --- /dev/null +++ b/hw/nubus/trace-events @@ -0,0 +1,7 @@ +# See docs/devel/tracing.txt for syntax documentation. + +# nubus-bus.c +nubus_slot_read(uint64_t addr, int size) "reading unassigned addr 0x%"PRIx64 " size %d" +nubus_slot_write(uint64_t addr, uint64_t val, int size) "writing unassigned addr 0x%"PRIx64 " value 0x%"PRIx64 " size %d" +nubus_super_slot_read(uint64_t addr, int size) "reading unassigned addr 0x%"PRIx64 " size %d" +nubus_super_slot_write(uint64_t addr, uint64_t val, int size) "writing unassigned addr 0x%"PRIx64 " value 0x%"PRIx64 " size %d" diff --git a/hw/nubus/trace.h b/hw/nubus/trace.h new file mode 100644 index 000000000..3749420da --- /dev/null +++ b/hw/nubus/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_nubus.h" diff --git a/hw/nvme/Kconfig b/hw/nvme/Kconfig new file mode 100644 index 000000000..8ac90942e --- /dev/null +++ b/hw/nvme/Kconfig @@ -0,0 +1,4 @@ +config NVME_PCI + bool + default y if PCI_DEVICES + depends on PCI diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c new file mode 100644 index 000000000..5f573c417 --- /dev/null +++ b/hw/nvme/ctrl.c @@ -0,0 +1,6731 @@ +/* + * QEMU NVM Express Controller + * + * Copyright (c) 2012, Intel Corporation + * + * Written by Keith Busch + * + * This code is licensed under the GNU GPL v2 or later. + */ + +/** + * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e + * + * https://nvmexpress.org/developers/nvme-specification/ + * + * + * Notes on coding style + * --------------------- + * While QEMU coding style prefers lowercase hexadecimals in constants, the + * NVMe subsystem use thes format from the NVMe specifications in the comments + * (i.e. 'h' suffix instead of '0x' prefix). + * + * Usage + * ----- + * See docs/system/nvme.rst for extensive documentation. + * + * Add options: + * -drive file=,if=none,id= + * -device nvme-subsys,id=,nqn= + * -device nvme,serial=,id=, \ + * cmb_size_mb=, \ + * [pmrdev=,] \ + * max_ioqpairs=, \ + * aerl=,aer_max_queued=, \ + * mdts=,vsl=, \ + * zoned.zasl=, \ + * zoned.auto_transition=, \ + * subsys= + * -device nvme-ns,drive=,bus=,nsid=,\ + * zoned=, \ + * subsys=,detached= + * + * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at + * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the + * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to + * always enable the CMBLOC and CMBSZ registers (v1.3 behavior). + * + * Enabling pmr emulation can be achieved by pointing to memory-backend-file. + * For example: + * -object memory-backend-file,id=,share=on,mem-path=, \ + * size= .... -device nvme,...,pmrdev= + * + * The PMR will use BAR 4/5 exclusively. + * + * To place controller(s) and namespace(s) to a subsystem, then provide + * nvme-subsys device as above. + * + * nvme subsystem device parameters + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * - `nqn` + * This parameter provides the `` part of the string + * `nqn.2019-08.org.qemu:` which will be reported in the SUBNQN field + * of subsystem controllers. Note that `` should be unique per + * subsystem, but this is not enforced by QEMU. If not specified, it will + * default to the value of the `id` parameter (``). + * + * nvme device parameters + * ~~~~~~~~~~~~~~~~~~~~~~ + * - `subsys` + * Specifying this parameter attaches the controller to the subsystem and + * the SUBNQN field in the controller will report the NQN of the subsystem + * device. This also enables multi controller capability represented in + * Identify Controller data structure in CMIC (Controller Multi-path I/O and + * Namesapce Sharing Capabilities). + * + * - `aerl` + * The Asynchronous Event Request Limit (AERL). Indicates the maximum number + * of concurrently outstanding Asynchronous Event Request commands support + * by the controller. This is a 0's based value. + * + * - `aer_max_queued` + * This is the maximum number of events that the device will enqueue for + * completion when there are no outstanding AERs. When the maximum number of + * enqueued events are reached, subsequent events will be dropped. + * + * - `mdts` + * Indicates the maximum data transfer size for a command that transfers data + * between host-accessible memory and the controller. The value is specified + * as a power of two (2^n) and is in units of the minimum memory page size + * (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB). + * + * - `vsl` + * Indicates the maximum data size limit for the Verify command. Like `mdts`, + * this value is specified as a power of two (2^n) and is in units of the + * minimum memory page size (CAP.MPSMIN). The default value is 7 (i.e. 512 + * KiB). + * + * - `zoned.zasl` + * Indicates the maximum data transfer size for the Zone Append command. Like + * `mdts`, the value is specified as a power of two (2^n) and is in units of + * the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e. + * defaulting to the value of `mdts`). + * + * - `zoned.auto_transition` + * Indicates if zones in zone state implicitly opened can be automatically + * transitioned to zone state closed for resource management purposes. + * Defaults to 'on'. + * + * nvme namespace device parameters + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * - `shared` + * When the parent nvme device (as defined explicitly by the 'bus' parameter + * or implicitly by the most recently defined NvmeBus) is linked to an + * nvme-subsys device, the namespace will be attached to all controllers in + * the subsystem. If set to 'off' (the default), the namespace will remain a + * private namespace and may only be attached to a single controller at a + * time. + * + * - `detached` + * This parameter is only valid together with the `subsys` parameter. If left + * at the default value (`false/off`), the namespace will be attached to all + * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the + * namespace will be available in the subsystem but not attached to any + * controllers. + * + * Setting `zoned` to true selects Zoned Command Set at the namespace. + * In this case, the following namespace properties are available to configure + * zoned operation: + * zoned.zone_size= + * The number may be followed by K, M, G as in kilo-, mega- or giga-. + * + * zoned.zone_capacity= + * The value 0 (default) forces zone capacity to be the same as zone + * size. The value of this property may not exceed zone size. + * + * zoned.descr_ext_size= + * This value needs to be specified in 64B units. If it is zero, + * namespace(s) will not support zone descriptor extensions. + * + * zoned.max_active= + * The default value means there is no limit to the number of + * concurrently active zones. + * + * zoned.max_open= + * The default value means there is no limit to the number of + * concurrently open zones. + * + * zoned.cross_read= + * Setting this property to true enables Read Across Zone Boundaries. + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "sysemu/sysemu.h" +#include "sysemu/block-backend.h" +#include "sysemu/hostmem.h" +#include "hw/pci/msix.h" +#include "migration/vmstate.h" + +#include "nvme.h" +#include "trace.h" + +#define NVME_MAX_IOQPAIRS 0xffff +#define NVME_DB_SIZE 4 +#define NVME_SPEC_VER 0x00010400 +#define NVME_CMB_BIR 2 +#define NVME_PMR_BIR 4 +#define NVME_TEMPERATURE 0x143 +#define NVME_TEMPERATURE_WARNING 0x157 +#define NVME_TEMPERATURE_CRITICAL 0x175 +#define NVME_NUM_FW_SLOTS 1 +#define NVME_DEFAULT_MAX_ZA_SIZE (128 * KiB) + +#define NVME_GUEST_ERR(trace, fmt, ...) \ + do { \ + (trace_##trace)(__VA_ARGS__); \ + qemu_log_mask(LOG_GUEST_ERROR, #trace \ + " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \ + } while (0) + +static const bool nvme_feature_support[NVME_FID_MAX] = { + [NVME_ARBITRATION] = true, + [NVME_POWER_MANAGEMENT] = true, + [NVME_TEMPERATURE_THRESHOLD] = true, + [NVME_ERROR_RECOVERY] = true, + [NVME_VOLATILE_WRITE_CACHE] = true, + [NVME_NUMBER_OF_QUEUES] = true, + [NVME_INTERRUPT_COALESCING] = true, + [NVME_INTERRUPT_VECTOR_CONF] = true, + [NVME_WRITE_ATOMICITY] = true, + [NVME_ASYNCHRONOUS_EVENT_CONF] = true, + [NVME_TIMESTAMP] = true, + [NVME_COMMAND_SET_PROFILE] = true, +}; + +static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { + [NVME_TEMPERATURE_THRESHOLD] = NVME_FEAT_CAP_CHANGE, + [NVME_ERROR_RECOVERY] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS, + [NVME_VOLATILE_WRITE_CACHE] = NVME_FEAT_CAP_CHANGE, + [NVME_NUMBER_OF_QUEUES] = NVME_FEAT_CAP_CHANGE, + [NVME_ASYNCHRONOUS_EVENT_CONF] = NVME_FEAT_CAP_CHANGE, + [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE, + [NVME_COMMAND_SET_PROFILE] = NVME_FEAT_CAP_CHANGE, +}; + +static const uint32_t nvme_cse_acs[256] = { + [NVME_ADM_CMD_DELETE_SQ] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_CREATE_SQ] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_GET_LOG_PAGE] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_DELETE_CQ] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_CREATE_CQ] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_IDENTIFY] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_ABORT] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC, + [NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, +}; + +static const uint32_t nvme_cse_iocs_none[256]; + +static const uint32_t nvme_cse_iocs_nvm[256] = { + [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_READ] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, +}; + +static const uint32_t nvme_cse_iocs_zoned[256] = { + [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_READ] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_ZONE_APPEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_ZONE_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_CMD_ZONE_MGMT_RECV] = NVME_CMD_EFF_CSUPP, +}; + +static void nvme_process_sq(void *opaque); + +static uint16_t nvme_sqid(NvmeRequest *req) +{ + return le16_to_cpu(req->sq->sqid); +} + +static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone, + NvmeZoneState state) +{ + if (QTAILQ_IN_USE(zone, entry)) { + switch (nvme_get_zone_state(zone)) { + case NVME_ZONE_STATE_EXPLICITLY_OPEN: + QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry); + break; + case NVME_ZONE_STATE_IMPLICITLY_OPEN: + QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); + break; + case NVME_ZONE_STATE_CLOSED: + QTAILQ_REMOVE(&ns->closed_zones, zone, entry); + break; + case NVME_ZONE_STATE_FULL: + QTAILQ_REMOVE(&ns->full_zones, zone, entry); + default: + ; + } + } + + nvme_set_zone_state(zone, state); + + switch (state) { + case NVME_ZONE_STATE_EXPLICITLY_OPEN: + QTAILQ_INSERT_TAIL(&ns->exp_open_zones, zone, entry); + break; + case NVME_ZONE_STATE_IMPLICITLY_OPEN: + QTAILQ_INSERT_TAIL(&ns->imp_open_zones, zone, entry); + break; + case NVME_ZONE_STATE_CLOSED: + QTAILQ_INSERT_TAIL(&ns->closed_zones, zone, entry); + break; + case NVME_ZONE_STATE_FULL: + QTAILQ_INSERT_TAIL(&ns->full_zones, zone, entry); + case NVME_ZONE_STATE_READ_ONLY: + break; + default: + zone->d.za = 0; + } +} + +/* + * Check if we can open a zone without exceeding open/active limits. + * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5). + */ +static int nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn) +{ + if (ns->params.max_active_zones != 0 && + ns->nr_active_zones + act > ns->params.max_active_zones) { + trace_pci_nvme_err_insuff_active_res(ns->params.max_active_zones); + return NVME_ZONE_TOO_MANY_ACTIVE | NVME_DNR; + } + if (ns->params.max_open_zones != 0 && + ns->nr_open_zones + opn > ns->params.max_open_zones) { + trace_pci_nvme_err_insuff_open_res(ns->params.max_open_zones); + return NVME_ZONE_TOO_MANY_OPEN | NVME_DNR; + } + + return NVME_SUCCESS; +} + +static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr) +{ + hwaddr hi, lo; + + if (!n->cmb.cmse) { + return false; + } + + lo = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; + hi = lo + int128_get64(n->cmb.mem.size); + + return addr >= lo && addr < hi; +} + +static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr) +{ + hwaddr base = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba; + return &n->cmb.buf[addr - base]; +} + +static bool nvme_addr_is_pmr(NvmeCtrl *n, hwaddr addr) +{ + hwaddr hi; + + if (!n->pmr.cmse) { + return false; + } + + hi = n->pmr.cba + int128_get64(n->pmr.dev->mr.size); + + return addr >= n->pmr.cba && addr < hi; +} + +static inline void *nvme_addr_to_pmr(NvmeCtrl *n, hwaddr addr) +{ + return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba); +} + +static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) +{ + hwaddr hi = addr + size - 1; + if (hi < addr) { + return 1; + } + + if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { + memcpy(buf, nvme_addr_to_cmb(n, addr), size); + return 0; + } + + if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) { + memcpy(buf, nvme_addr_to_pmr(n, addr), size); + return 0; + } + + return pci_dma_read(&n->parent_obj, addr, buf, size); +} + +static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, void *buf, int size) +{ + hwaddr hi = addr + size - 1; + if (hi < addr) { + return 1; + } + + if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) { + memcpy(nvme_addr_to_cmb(n, addr), buf, size); + return 0; + } + + if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) { + memcpy(nvme_addr_to_pmr(n, addr), buf, size); + return 0; + } + + return pci_dma_write(&n->parent_obj, addr, buf, size); +} + +static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid) +{ + return nsid && + (nsid == NVME_NSID_BROADCAST || nsid <= NVME_MAX_NAMESPACES); +} + +static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid) +{ + return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1; +} + +static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid) +{ + return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1; +} + +static void nvme_inc_cq_tail(NvmeCQueue *cq) +{ + cq->tail++; + if (cq->tail >= cq->size) { + cq->tail = 0; + cq->phase = !cq->phase; + } +} + +static void nvme_inc_sq_head(NvmeSQueue *sq) +{ + sq->head = (sq->head + 1) % sq->size; +} + +static uint8_t nvme_cq_full(NvmeCQueue *cq) +{ + return (cq->tail + 1) % cq->size == cq->head; +} + +static uint8_t nvme_sq_empty(NvmeSQueue *sq) +{ + return sq->head == sq->tail; +} + +static void nvme_irq_check(NvmeCtrl *n) +{ + uint32_t intms = ldl_le_p(&n->bar.intms); + + if (msix_enabled(&(n->parent_obj))) { + return; + } + if (~intms & n->irq_status) { + pci_irq_assert(&n->parent_obj); + } else { + pci_irq_deassert(&n->parent_obj); + } +} + +static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq) +{ + if (cq->irq_enabled) { + if (msix_enabled(&(n->parent_obj))) { + trace_pci_nvme_irq_msix(cq->vector); + msix_notify(&(n->parent_obj), cq->vector); + } else { + trace_pci_nvme_irq_pin(); + assert(cq->vector < 32); + n->irq_status |= 1 << cq->vector; + nvme_irq_check(n); + } + } else { + trace_pci_nvme_irq_masked(); + } +} + +static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq) +{ + if (cq->irq_enabled) { + if (msix_enabled(&(n->parent_obj))) { + return; + } else { + assert(cq->vector < 32); + if (!n->cq_pending) { + n->irq_status &= ~(1 << cq->vector); + } + nvme_irq_check(n); + } + } +} + +static void nvme_req_clear(NvmeRequest *req) +{ + req->ns = NULL; + req->opaque = NULL; + req->aiocb = NULL; + memset(&req->cqe, 0x0, sizeof(req->cqe)); + req->status = NVME_SUCCESS; +} + +static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma) +{ + if (dma) { + pci_dma_sglist_init(&sg->qsg, &n->parent_obj, 0); + sg->flags = NVME_SG_DMA; + } else { + qemu_iovec_init(&sg->iov, 0); + } + + sg->flags |= NVME_SG_ALLOC; +} + +static inline void nvme_sg_unmap(NvmeSg *sg) +{ + if (!(sg->flags & NVME_SG_ALLOC)) { + return; + } + + if (sg->flags & NVME_SG_DMA) { + qemu_sglist_destroy(&sg->qsg); + } else { + qemu_iovec_destroy(&sg->iov); + } + + memset(sg, 0x0, sizeof(*sg)); +} + +/* + * When metadata is transfered as extended LBAs, the DPTR mapped into `sg` + * holds both data and metadata. This function splits the data and metadata + * into two separate QSG/IOVs. + */ +static void nvme_sg_split(NvmeSg *sg, NvmeNamespace *ns, NvmeSg *data, + NvmeSg *mdata) +{ + NvmeSg *dst = data; + uint32_t trans_len, count = ns->lbasz; + uint64_t offset = 0; + bool dma = sg->flags & NVME_SG_DMA; + size_t sge_len; + size_t sg_len = dma ? sg->qsg.size : sg->iov.size; + int sg_idx = 0; + + assert(sg->flags & NVME_SG_ALLOC); + + while (sg_len) { + sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; + + trans_len = MIN(sg_len, count); + trans_len = MIN(trans_len, sge_len - offset); + + if (dst) { + if (dma) { + qemu_sglist_add(&dst->qsg, sg->qsg.sg[sg_idx].base + offset, + trans_len); + } else { + qemu_iovec_add(&dst->iov, + sg->iov.iov[sg_idx].iov_base + offset, + trans_len); + } + } + + sg_len -= trans_len; + count -= trans_len; + offset += trans_len; + + if (count == 0) { + dst = (dst == data) ? mdata : data; + count = (dst == data) ? ns->lbasz : ns->lbaf.ms; + } + + if (sge_len == offset) { + offset = 0; + sg_idx++; + } + } +} + +static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, + size_t len) +{ + if (!len) { + return NVME_SUCCESS; + } + + trace_pci_nvme_map_addr_cmb(addr, len); + + if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) { + return NVME_DATA_TRAS_ERROR; + } + + qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len); + + return NVME_SUCCESS; +} + +static uint16_t nvme_map_addr_pmr(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, + size_t len) +{ + if (!len) { + return NVME_SUCCESS; + } + + if (!nvme_addr_is_pmr(n, addr) || !nvme_addr_is_pmr(n, addr + len - 1)) { + return NVME_DATA_TRAS_ERROR; + } + + qemu_iovec_add(iov, nvme_addr_to_pmr(n, addr), len); + + return NVME_SUCCESS; +} + +static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len) +{ + bool cmb = false, pmr = false; + + if (!len) { + return NVME_SUCCESS; + } + + trace_pci_nvme_map_addr(addr, len); + + if (nvme_addr_is_cmb(n, addr)) { + cmb = true; + } else if (nvme_addr_is_pmr(n, addr)) { + pmr = true; + } + + if (cmb || pmr) { + if (sg->flags & NVME_SG_DMA) { + return NVME_INVALID_USE_OF_CMB | NVME_DNR; + } + + if (sg->iov.niov + 1 > IOV_MAX) { + goto max_mappings_exceeded; + } + + if (cmb) { + return nvme_map_addr_cmb(n, &sg->iov, addr, len); + } else { + return nvme_map_addr_pmr(n, &sg->iov, addr, len); + } + } + + if (!(sg->flags & NVME_SG_DMA)) { + return NVME_INVALID_USE_OF_CMB | NVME_DNR; + } + + if (sg->qsg.nsg + 1 > IOV_MAX) { + goto max_mappings_exceeded; + } + + qemu_sglist_add(&sg->qsg, addr, len); + + return NVME_SUCCESS; + +max_mappings_exceeded: + NVME_GUEST_ERR(pci_nvme_ub_too_many_mappings, + "number of mappings exceed 1024"); + return NVME_INTERNAL_DEV_ERROR | NVME_DNR; +} + +static inline bool nvme_addr_is_dma(NvmeCtrl *n, hwaddr addr) +{ + return !(nvme_addr_is_cmb(n, addr) || nvme_addr_is_pmr(n, addr)); +} + +static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1, + uint64_t prp2, uint32_t len) +{ + hwaddr trans_len = n->page_size - (prp1 % n->page_size); + trans_len = MIN(len, trans_len); + int num_prps = (len >> n->page_bits) + 1; + uint16_t status; + int ret; + + trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps); + + nvme_sg_init(n, sg, nvme_addr_is_dma(n, prp1)); + + status = nvme_map_addr(n, sg, prp1, trans_len); + if (status) { + goto unmap; + } + + len -= trans_len; + if (len) { + if (len > n->page_size) { + uint64_t prp_list[n->max_prp_ents]; + uint32_t nents, prp_trans; + int i = 0; + + /* + * The first PRP list entry, pointed to by PRP2 may contain offset. + * Hence, we need to calculate the number of entries in based on + * that offset. + */ + nents = (n->page_size - (prp2 & (n->page_size - 1))) >> 3; + prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); + ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); + if (ret) { + trace_pci_nvme_err_addr_read(prp2); + status = NVME_DATA_TRAS_ERROR; + goto unmap; + } + while (len != 0) { + uint64_t prp_ent = le64_to_cpu(prp_list[i]); + + if (i == nents - 1 && len > n->page_size) { + if (unlikely(prp_ent & (n->page_size - 1))) { + trace_pci_nvme_err_invalid_prplist_ent(prp_ent); + status = NVME_INVALID_PRP_OFFSET | NVME_DNR; + goto unmap; + } + + i = 0; + nents = (len + n->page_size - 1) >> n->page_bits; + nents = MIN(nents, n->max_prp_ents); + prp_trans = nents * sizeof(uint64_t); + ret = nvme_addr_read(n, prp_ent, (void *)prp_list, + prp_trans); + if (ret) { + trace_pci_nvme_err_addr_read(prp_ent); + status = NVME_DATA_TRAS_ERROR; + goto unmap; + } + prp_ent = le64_to_cpu(prp_list[i]); + } + + if (unlikely(prp_ent & (n->page_size - 1))) { + trace_pci_nvme_err_invalid_prplist_ent(prp_ent); + status = NVME_INVALID_PRP_OFFSET | NVME_DNR; + goto unmap; + } + + trans_len = MIN(len, n->page_size); + status = nvme_map_addr(n, sg, prp_ent, trans_len); + if (status) { + goto unmap; + } + + len -= trans_len; + i++; + } + } else { + if (unlikely(prp2 & (n->page_size - 1))) { + trace_pci_nvme_err_invalid_prp2_align(prp2); + status = NVME_INVALID_PRP_OFFSET | NVME_DNR; + goto unmap; + } + status = nvme_map_addr(n, sg, prp2, len); + if (status) { + goto unmap; + } + } + } + + return NVME_SUCCESS; + +unmap: + nvme_sg_unmap(sg); + return status; +} + +/* + * Map 'nsgld' data descriptors from 'segment'. The function will subtract the + * number of bytes mapped in len. + */ +static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, + NvmeSglDescriptor *segment, uint64_t nsgld, + size_t *len, NvmeCmd *cmd) +{ + dma_addr_t addr, trans_len; + uint32_t dlen; + uint16_t status; + + for (int i = 0; i < nsgld; i++) { + uint8_t type = NVME_SGL_TYPE(segment[i].type); + + switch (type) { + case NVME_SGL_DESCR_TYPE_BIT_BUCKET: + if (cmd->opcode == NVME_CMD_WRITE) { + continue; + } + case NVME_SGL_DESCR_TYPE_DATA_BLOCK: + break; + case NVME_SGL_DESCR_TYPE_SEGMENT: + case NVME_SGL_DESCR_TYPE_LAST_SEGMENT: + return NVME_INVALID_NUM_SGL_DESCRS | NVME_DNR; + default: + return NVME_SGL_DESCR_TYPE_INVALID | NVME_DNR; + } + + dlen = le32_to_cpu(segment[i].len); + + if (!dlen) { + continue; + } + + if (*len == 0) { + /* + * All data has been mapped, but the SGL contains additional + * segments and/or descriptors. The controller might accept + * ignoring the rest of the SGL. + */ + uint32_t sgls = le32_to_cpu(n->id_ctrl.sgls); + if (sgls & NVME_CTRL_SGLS_EXCESS_LENGTH) { + break; + } + + trace_pci_nvme_err_invalid_sgl_excess_length(dlen); + return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; + } + + trans_len = MIN(*len, dlen); + + if (type == NVME_SGL_DESCR_TYPE_BIT_BUCKET) { + goto next; + } + + addr = le64_to_cpu(segment[i].addr); + + if (UINT64_MAX - addr < dlen) { + return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; + } + + status = nvme_map_addr(n, sg, addr, trans_len); + if (status) { + return status; + } + +next: + *len -= trans_len; + } + + return NVME_SUCCESS; +} + +static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, + size_t len, NvmeCmd *cmd) +{ + /* + * Read the segment in chunks of 256 descriptors (one 4k page) to avoid + * dynamically allocating a potentially huge SGL. The spec allows the SGL + * to be larger (as in number of bytes required to describe the SGL + * descriptors and segment chain) than the command transfer size, so it is + * not bounded by MDTS. + */ + const int SEG_CHUNK_SIZE = 256; + + NvmeSglDescriptor segment[SEG_CHUNK_SIZE], *sgld, *last_sgld; + uint64_t nsgld; + uint32_t seg_len; + uint16_t status; + hwaddr addr; + int ret; + + sgld = &sgl; + addr = le64_to_cpu(sgl.addr); + + trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl.type), len); + + nvme_sg_init(n, sg, nvme_addr_is_dma(n, addr)); + + /* + * If the entire transfer can be described with a single data block it can + * be mapped directly. + */ + if (NVME_SGL_TYPE(sgl.type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) { + status = nvme_map_sgl_data(n, sg, sgld, 1, &len, cmd); + if (status) { + goto unmap; + } + + goto out; + } + + for (;;) { + switch (NVME_SGL_TYPE(sgld->type)) { + case NVME_SGL_DESCR_TYPE_SEGMENT: + case NVME_SGL_DESCR_TYPE_LAST_SEGMENT: + break; + default: + return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; + } + + seg_len = le32_to_cpu(sgld->len); + + /* check the length of the (Last) Segment descriptor */ + if ((!seg_len || seg_len & 0xf) && + (NVME_SGL_TYPE(sgld->type) != NVME_SGL_DESCR_TYPE_BIT_BUCKET)) { + return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; + } + + if (UINT64_MAX - addr < seg_len) { + return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; + } + + nsgld = seg_len / sizeof(NvmeSglDescriptor); + + while (nsgld > SEG_CHUNK_SIZE) { + if (nvme_addr_read(n, addr, segment, sizeof(segment))) { + trace_pci_nvme_err_addr_read(addr); + status = NVME_DATA_TRAS_ERROR; + goto unmap; + } + + status = nvme_map_sgl_data(n, sg, segment, SEG_CHUNK_SIZE, + &len, cmd); + if (status) { + goto unmap; + } + + nsgld -= SEG_CHUNK_SIZE; + addr += SEG_CHUNK_SIZE * sizeof(NvmeSglDescriptor); + } + + ret = nvme_addr_read(n, addr, segment, nsgld * + sizeof(NvmeSglDescriptor)); + if (ret) { + trace_pci_nvme_err_addr_read(addr); + status = NVME_DATA_TRAS_ERROR; + goto unmap; + } + + last_sgld = &segment[nsgld - 1]; + + /* + * If the segment ends with a Data Block or Bit Bucket Descriptor Type, + * then we are done. + */ + switch (NVME_SGL_TYPE(last_sgld->type)) { + case NVME_SGL_DESCR_TYPE_DATA_BLOCK: + case NVME_SGL_DESCR_TYPE_BIT_BUCKET: + status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd); + if (status) { + goto unmap; + } + + goto out; + + default: + break; + } + + /* + * If the last descriptor was not a Data Block or Bit Bucket, then the + * current segment must not be a Last Segment. + */ + if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) { + status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR; + goto unmap; + } + + sgld = last_sgld; + addr = le64_to_cpu(sgld->addr); + + /* + * Do not map the last descriptor; it will be a Segment or Last Segment + * descriptor and is handled by the next iteration. + */ + status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, cmd); + if (status) { + goto unmap; + } + } + +out: + /* if there is any residual left in len, the SGL was too short */ + if (len) { + status = NVME_DATA_SGL_LEN_INVALID | NVME_DNR; + goto unmap; + } + + return NVME_SUCCESS; + +unmap: + nvme_sg_unmap(sg); + return status; +} + +uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, + NvmeCmd *cmd) +{ + uint64_t prp1, prp2; + + switch (NVME_CMD_FLAGS_PSDT(cmd->flags)) { + case NVME_PSDT_PRP: + prp1 = le64_to_cpu(cmd->dptr.prp1); + prp2 = le64_to_cpu(cmd->dptr.prp2); + + return nvme_map_prp(n, sg, prp1, prp2, len); + case NVME_PSDT_SGL_MPTR_CONTIGUOUS: + case NVME_PSDT_SGL_MPTR_SGL: + return nvme_map_sgl(n, sg, cmd->dptr.sgl, len, cmd); + default: + return NVME_INVALID_FIELD; + } +} + +static uint16_t nvme_map_mptr(NvmeCtrl *n, NvmeSg *sg, size_t len, + NvmeCmd *cmd) +{ + int psdt = NVME_CMD_FLAGS_PSDT(cmd->flags); + hwaddr mptr = le64_to_cpu(cmd->mptr); + uint16_t status; + + if (psdt == NVME_PSDT_SGL_MPTR_SGL) { + NvmeSglDescriptor sgl; + + if (nvme_addr_read(n, mptr, &sgl, sizeof(sgl))) { + return NVME_DATA_TRAS_ERROR; + } + + status = nvme_map_sgl(n, sg, sgl, len, cmd); + if (status && (status & 0x7ff) == NVME_DATA_SGL_LEN_INVALID) { + status = NVME_MD_SGL_LEN_INVALID | NVME_DNR; + } + + return status; + } + + nvme_sg_init(n, sg, nvme_addr_is_dma(n, mptr)); + status = nvme_map_addr(n, sg, mptr, len); + if (status) { + nvme_sg_unmap(sg); + } + + return status; +} + +static uint16_t nvme_map_data(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) +{ + NvmeNamespace *ns = req->ns; + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); + bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); + size_t len = nvme_l2b(ns, nlb); + uint16_t status; + + if (nvme_ns_ext(ns) && !(pi && pract && ns->lbaf.ms == 8)) { + NvmeSg sg; + + len += nvme_m2b(ns, nlb); + + status = nvme_map_dptr(n, &sg, len, &req->cmd); + if (status) { + return status; + } + + nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); + nvme_sg_split(&sg, ns, &req->sg, NULL); + nvme_sg_unmap(&sg); + + return NVME_SUCCESS; + } + + return nvme_map_dptr(n, &req->sg, len, &req->cmd); +} + +static uint16_t nvme_map_mdata(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req) +{ + NvmeNamespace *ns = req->ns; + size_t len = nvme_m2b(ns, nlb); + uint16_t status; + + if (nvme_ns_ext(ns)) { + NvmeSg sg; + + len += nvme_l2b(ns, nlb); + + status = nvme_map_dptr(n, &sg, len, &req->cmd); + if (status) { + return status; + } + + nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA); + nvme_sg_split(&sg, ns, NULL, &req->sg); + nvme_sg_unmap(&sg); + + return NVME_SUCCESS; + } + + return nvme_map_mptr(n, &req->sg, len, &req->cmd); +} + +static uint16_t nvme_tx_interleaved(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, + uint32_t len, uint32_t bytes, + int32_t skip_bytes, int64_t offset, + NvmeTxDirection dir) +{ + hwaddr addr; + uint32_t trans_len, count = bytes; + bool dma = sg->flags & NVME_SG_DMA; + int64_t sge_len; + int sg_idx = 0; + int ret; + + assert(sg->flags & NVME_SG_ALLOC); + + while (len) { + sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len; + + if (sge_len - offset < 0) { + offset -= sge_len; + sg_idx++; + continue; + } + + if (sge_len == offset) { + offset = 0; + sg_idx++; + continue; + } + + trans_len = MIN(len, count); + trans_len = MIN(trans_len, sge_len - offset); + + if (dma) { + addr = sg->qsg.sg[sg_idx].base + offset; + } else { + addr = (hwaddr)(uintptr_t)sg->iov.iov[sg_idx].iov_base + offset; + } + + if (dir == NVME_TX_DIRECTION_TO_DEVICE) { + ret = nvme_addr_read(n, addr, ptr, trans_len); + } else { + ret = nvme_addr_write(n, addr, ptr, trans_len); + } + + if (ret) { + return NVME_DATA_TRAS_ERROR; + } + + ptr += trans_len; + len -= trans_len; + count -= trans_len; + offset += trans_len; + + if (count == 0) { + count = bytes; + offset += skip_bytes; + } + } + + return NVME_SUCCESS; +} + +static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, uint32_t len, + NvmeTxDirection dir) +{ + assert(sg->flags & NVME_SG_ALLOC); + + if (sg->flags & NVME_SG_DMA) { + uint64_t residual; + + if (dir == NVME_TX_DIRECTION_TO_DEVICE) { + residual = dma_buf_write(ptr, len, &sg->qsg); + } else { + residual = dma_buf_read(ptr, len, &sg->qsg); + } + + if (unlikely(residual)) { + trace_pci_nvme_err_invalid_dma(); + return NVME_INVALID_FIELD | NVME_DNR; + } + } else { + size_t bytes; + + if (dir == NVME_TX_DIRECTION_TO_DEVICE) { + bytes = qemu_iovec_to_buf(&sg->iov, 0, ptr, len); + } else { + bytes = qemu_iovec_from_buf(&sg->iov, 0, ptr, len); + } + + if (unlikely(bytes != len)) { + trace_pci_nvme_err_invalid_dma(); + return NVME_INVALID_FIELD | NVME_DNR; + } + } + + return NVME_SUCCESS; +} + +static inline uint16_t nvme_c2h(NvmeCtrl *n, uint8_t *ptr, uint32_t len, + NvmeRequest *req) +{ + uint16_t status; + + status = nvme_map_dptr(n, &req->sg, len, &req->cmd); + if (status) { + return status; + } + + return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE); +} + +static inline uint16_t nvme_h2c(NvmeCtrl *n, uint8_t *ptr, uint32_t len, + NvmeRequest *req) +{ + uint16_t status; + + status = nvme_map_dptr(n, &req->sg, len, &req->cmd); + if (status) { + return status; + } + + return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE); +} + +uint16_t nvme_bounce_data(NvmeCtrl *n, uint8_t *ptr, uint32_t len, + NvmeTxDirection dir, NvmeRequest *req) +{ + NvmeNamespace *ns = req->ns; + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + bool pi = !!NVME_ID_NS_DPS_TYPE(ns->id_ns.dps); + bool pract = !!(le16_to_cpu(rw->control) & NVME_RW_PRINFO_PRACT); + + if (nvme_ns_ext(ns) && !(pi && pract && ns->lbaf.ms == 8)) { + return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbasz, + ns->lbaf.ms, 0, dir); + } + + return nvme_tx(n, &req->sg, ptr, len, dir); +} + +uint16_t nvme_bounce_mdata(NvmeCtrl *n, uint8_t *ptr, uint32_t len, + NvmeTxDirection dir, NvmeRequest *req) +{ + NvmeNamespace *ns = req->ns; + uint16_t status; + + if (nvme_ns_ext(ns)) { + return nvme_tx_interleaved(n, &req->sg, ptr, len, ns->lbaf.ms, + ns->lbasz, ns->lbasz, dir); + } + + nvme_sg_unmap(&req->sg); + + status = nvme_map_mptr(n, &req->sg, len, &req->cmd); + if (status) { + return status; + } + + return nvme_tx(n, &req->sg, ptr, len, dir); +} + +static inline void nvme_blk_read(BlockBackend *blk, int64_t offset, + BlockCompletionFunc *cb, NvmeRequest *req) +{ + assert(req->sg.flags & NVME_SG_ALLOC); + + if (req->sg.flags & NVME_SG_DMA) { + req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, + cb, req); + } else { + req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req); + } +} + +static inline void nvme_blk_write(BlockBackend *blk, int64_t offset, + BlockCompletionFunc *cb, NvmeRequest *req) +{ + assert(req->sg.flags & NVME_SG_ALLOC); + + if (req->sg.flags & NVME_SG_DMA) { + req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, + cb, req); + } else { + req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req); + } +} + +static void nvme_post_cqes(void *opaque) +{ + NvmeCQueue *cq = opaque; + NvmeCtrl *n = cq->ctrl; + NvmeRequest *req, *next; + bool pending = cq->head != cq->tail; + int ret; + + QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { + NvmeSQueue *sq; + hwaddr addr; + + if (nvme_cq_full(cq)) { + break; + } + + sq = req->sq; + req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase); + req->cqe.sq_id = cpu_to_le16(sq->sqid); + req->cqe.sq_head = cpu_to_le16(sq->head); + addr = cq->dma_addr + cq->tail * n->cqe_size; + ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe, + sizeof(req->cqe)); + if (ret) { + trace_pci_nvme_err_addr_write(addr); + trace_pci_nvme_err_cfs(); + stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); + break; + } + QTAILQ_REMOVE(&cq->req_list, req, entry); + nvme_inc_cq_tail(cq); + nvme_sg_unmap(&req->sg); + QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); + } + if (cq->tail != cq->head) { + if (cq->irq_enabled && !pending) { + n->cq_pending++; + } + + nvme_irq_assert(n, cq); + } +} + +static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req) +{ + assert(cq->cqid == req->sq->cqid); + trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid, + le32_to_cpu(req->cqe.result), + le32_to_cpu(req->cqe.dw1), + req->status); + + if (req->status) { + trace_pci_nvme_err_req_status(nvme_cid(req), nvme_nsid(req->ns), + req->status, req->cmd.opcode); + } + + QTAILQ_REMOVE(&req->sq->out_req_list, req, entry); + QTAILQ_INSERT_TAIL(&cq->req_list, req, entry); + timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); +} + +static void nvme_process_aers(void *opaque) +{ + NvmeCtrl *n = opaque; + NvmeAsyncEvent *event, *next; + + trace_pci_nvme_process_aers(n->aer_queued); + + QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) { + NvmeRequest *req; + NvmeAerResult *result; + + /* can't post cqe if there is nothing to complete */ + if (!n->outstanding_aers) { + trace_pci_nvme_no_outstanding_aers(); + break; + } + + /* ignore if masked (cqe posted, but event not cleared) */ + if (n->aer_mask & (1 << event->result.event_type)) { + trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask); + continue; + } + + QTAILQ_REMOVE(&n->aer_queue, event, entry); + n->aer_queued--; + + n->aer_mask |= 1 << event->result.event_type; + n->outstanding_aers--; + + req = n->aer_reqs[n->outstanding_aers]; + + result = (NvmeAerResult *) &req->cqe.result; + result->event_type = event->result.event_type; + result->event_info = event->result.event_info; + result->log_page = event->result.log_page; + g_free(event); + + trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info, + result->log_page); + + nvme_enqueue_req_completion(&n->admin_cq, req); + } +} + +static void nvme_enqueue_event(NvmeCtrl *n, uint8_t event_type, + uint8_t event_info, uint8_t log_page) +{ + NvmeAsyncEvent *event; + + trace_pci_nvme_enqueue_event(event_type, event_info, log_page); + + if (n->aer_queued == n->params.aer_max_queued) { + trace_pci_nvme_enqueue_event_noqueue(n->aer_queued); + return; + } + + event = g_new(NvmeAsyncEvent, 1); + event->result = (NvmeAerResult) { + .event_type = event_type, + .event_info = event_info, + .log_page = log_page, + }; + + QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry); + n->aer_queued++; + + nvme_process_aers(n); +} + +static void nvme_smart_event(NvmeCtrl *n, uint8_t event) +{ + uint8_t aer_info; + + /* Ref SPEC */ + if (!(NVME_AEC_SMART(n->features.async_config) & event)) { + return; + } + + switch (event) { + case NVME_SMART_SPARE: + aer_info = NVME_AER_INFO_SMART_SPARE_THRESH; + break; + case NVME_SMART_TEMPERATURE: + aer_info = NVME_AER_INFO_SMART_TEMP_THRESH; + break; + case NVME_SMART_RELIABILITY: + case NVME_SMART_MEDIA_READ_ONLY: + case NVME_SMART_FAILED_VOLATILE_MEDIA: + case NVME_SMART_PMR_UNRELIABLE: + aer_info = NVME_AER_INFO_SMART_RELIABILITY; + break; + default: + return; + } + + nvme_enqueue_event(n, NVME_AER_TYPE_SMART, aer_info, NVME_LOG_SMART_INFO); +} + +static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type) +{ + n->aer_mask &= ~(1 << event_type); + if (!QTAILQ_EMPTY(&n->aer_queue)) { + nvme_process_aers(n); + } +} + +static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len) +{ + uint8_t mdts = n->params.mdts; + + if (mdts && len > n->page_size << mdts) { + trace_pci_nvme_err_mdts(len); + return NVME_INVALID_FIELD | NVME_DNR; + } + + return NVME_SUCCESS; +} + +static inline uint16_t nvme_check_bounds(NvmeNamespace *ns, uint64_t slba, + uint32_t nlb) +{ + uint64_t nsze = le64_to_cpu(ns->id_ns.nsze); + + if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) { + trace_pci_nvme_err_invalid_lba_range(slba, nlb, nsze); + return NVME_LBA_RANGE | NVME_DNR; + } + + return NVME_SUCCESS; +} + +static int nvme_block_status_all(NvmeNamespace *ns, uint64_t slba, + uint32_t nlb, int flags) +{ + BlockDriverState *bs = blk_bs(ns->blkconf.blk); + + int64_t pnum = 0, bytes = nvme_l2b(ns, nlb); + int64_t offset = nvme_l2b(ns, slba); + int ret; + + /* + * `pnum` holds the number of bytes after offset that shares the same + * allocation status as the byte at offset. If `pnum` is different from + * `bytes`, we should check the allocation status of the next range and + * continue this until all bytes have been checked. + */ + do { + bytes -= pnum; + + ret = bdrv_block_status(bs, offset, bytes, &pnum, NULL, NULL); + if (ret < 0) { + return ret; + } + + + trace_pci_nvme_block_status(offset, bytes, pnum, ret, + !!(ret & BDRV_BLOCK_ZERO)); + + if (!(ret & flags)) { + return 1; + } + + offset += pnum; + } while (pnum != bytes); + + return 0; +} + +static uint16_t nvme_check_dulbe(NvmeNamespace *ns, uint64_t slba, + uint32_t nlb) +{ + int ret; + Error *err = NULL; + + ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_DATA); + if (ret) { + if (ret < 0) { + error_setg_errno(&err, -ret, "unable to get block status"); + error_report_err(err); + + return NVME_INTERNAL_DEV_ERROR; + } + + return NVME_DULB; + } + + return NVME_SUCCESS; +} + +static void nvme_aio_err(NvmeRequest *req, int ret) +{ + uint16_t status = NVME_SUCCESS; + Error *local_err = NULL; + + switch (req->cmd.opcode) { + case NVME_CMD_READ: + status = NVME_UNRECOVERED_READ; + break; + case NVME_CMD_FLUSH: + case NVME_CMD_WRITE: + case NVME_CMD_WRITE_ZEROES: + case NVME_CMD_ZONE_APPEND: + status = NVME_WRITE_FAULT; + break; + default: + status = NVME_INTERNAL_DEV_ERROR; + break; + } + + trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status); + + error_setg_errno(&local_err, -ret, "aio failed"); + error_report_err(local_err); + + /* + * Set the command status code to the first encountered error but allow a + * subsequent Internal Device Error to trump it. + */ + if (req->status && status != NVME_INTERNAL_DEV_ERROR) { + return; + } + + req->status = status; +} + +static inline uint32_t nvme_zone_idx(NvmeNamespace *ns, uint64_t slba) +{ + return ns->zone_size_log2 > 0 ? slba >> ns->zone_size_log2 : + slba / ns->zone_size; +} + +static inline NvmeZone *nvme_get_zone_by_slba(NvmeNamespace *ns, uint64_t slba) +{ + uint32_t zone_idx = nvme_zone_idx(ns, slba); + + if (zone_idx >= ns->num_zones) { + return NULL; + } + + return &ns->zone_array[zone_idx]; +} + +static uint16_t nvme_check_zone_state_for_write(NvmeZone *zone) +{ + uint64_t zslba = zone->d.zslba; + + switch (nvme_get_zone_state(zone)) { + case NVME_ZONE_STATE_EMPTY: + case NVME_ZONE_STATE_IMPLICITLY_OPEN: + case NVME_ZONE_STATE_EXPLICITLY_OPEN: + case NVME_ZONE_STATE_CLOSED: + return NVME_SUCCESS; + case NVME_ZONE_STATE_FULL: + trace_pci_nvme_err_zone_is_full(zslba); + return NVME_ZONE_FULL; + case NVME_ZONE_STATE_OFFLINE: + trace_pci_nvme_err_zone_is_offline(zslba); + return NVME_ZONE_OFFLINE; + case NVME_ZONE_STATE_READ_ONLY: + trace_pci_nvme_err_zone_is_read_only(zslba); + return NVME_ZONE_READ_ONLY; + default: + assert(false); + } + + return NVME_INTERNAL_DEV_ERROR; +} + +static uint16_t nvme_check_zone_write(NvmeNamespace *ns, NvmeZone *zone, + uint64_t slba, uint32_t nlb) +{ + uint64_t zcap = nvme_zone_wr_boundary(zone); + uint16_t status; + + status = nvme_check_zone_state_for_write(zone); + if (status) { + return status; + } + + if (unlikely(slba != zone->w_ptr)) { + trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba, zone->w_ptr); + return NVME_ZONE_INVALID_WRITE; + } + + if (unlikely((slba + nlb) > zcap)) { + trace_pci_nvme_err_zone_boundary(slba, nlb, zcap); + return NVME_ZONE_BOUNDARY_ERROR; + } + + return NVME_SUCCESS; +} + +static uint16_t nvme_check_zone_state_for_read(NvmeZone *zone) +{ + switch (nvme_get_zone_state(zone)) { + case NVME_ZONE_STATE_EMPTY: + case NVME_ZONE_STATE_IMPLICITLY_OPEN: + case NVME_ZONE_STATE_EXPLICITLY_OPEN: + case NVME_ZONE_STATE_FULL: + case NVME_ZONE_STATE_CLOSED: + case NVME_ZONE_STATE_READ_ONLY: + return NVME_SUCCESS; + case NVME_ZONE_STATE_OFFLINE: + trace_pci_nvme_err_zone_is_offline(zone->d.zslba); + return NVME_ZONE_OFFLINE; + default: + assert(false); + } + + return NVME_INTERNAL_DEV_ERROR; +} + +static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba, + uint32_t nlb) +{ + NvmeZone *zone; + uint64_t bndry, end; + uint16_t status; + + zone = nvme_get_zone_by_slba(ns, slba); + assert(zone); + + bndry = nvme_zone_rd_boundary(ns, zone); + end = slba + nlb; + + status = nvme_check_zone_state_for_read(zone); + if (status) { + ; + } else if (unlikely(end > bndry)) { + if (!ns->params.cross_zone_read) { + status = NVME_ZONE_BOUNDARY_ERROR; + } else { + /* + * Read across zone boundary - check that all subsequent + * zones that are being read have an appropriate state. + */ + do { + zone++; + status = nvme_check_zone_state_for_read(zone); + if (status) { + break; + } + } while (end > nvme_zone_rd_boundary(ns, zone)); + } + } + + return status; +} + +static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone) +{ + switch (nvme_get_zone_state(zone)) { + case NVME_ZONE_STATE_FULL: + return NVME_SUCCESS; + + case NVME_ZONE_STATE_IMPLICITLY_OPEN: + case NVME_ZONE_STATE_EXPLICITLY_OPEN: + nvme_aor_dec_open(ns); + /* fallthrough */ + case NVME_ZONE_STATE_CLOSED: + nvme_aor_dec_active(ns); + /* fallthrough */ + case NVME_ZONE_STATE_EMPTY: + nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL); + return NVME_SUCCESS; + + default: + return NVME_ZONE_INVAL_TRANSITION; + } +} + +static uint16_t nvme_zrm_close(NvmeNamespace *ns, NvmeZone *zone) +{ + switch (nvme_get_zone_state(zone)) { + case NVME_ZONE_STATE_EXPLICITLY_OPEN: + case NVME_ZONE_STATE_IMPLICITLY_OPEN: + nvme_aor_dec_open(ns); + nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED); + /* fall through */ + case NVME_ZONE_STATE_CLOSED: + return NVME_SUCCESS; + + default: + return NVME_ZONE_INVAL_TRANSITION; + } +} + +static uint16_t nvme_zrm_reset(NvmeNamespace *ns, NvmeZone *zone) +{ + switch (nvme_get_zone_state(zone)) { + case NVME_ZONE_STATE_EXPLICITLY_OPEN: + case NVME_ZONE_STATE_IMPLICITLY_OPEN: + nvme_aor_dec_open(ns); + /* fallthrough */ + case NVME_ZONE_STATE_CLOSED: + nvme_aor_dec_active(ns); + /* fallthrough */ + case NVME_ZONE_STATE_FULL: + zone->w_ptr = zone->d.zslba; + zone->d.wp = zone->w_ptr; + nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EMPTY); + /* fallthrough */ + case NVME_ZONE_STATE_EMPTY: + return NVME_SUCCESS; + + default: + return NVME_ZONE_INVAL_TRANSITION; + } +} + +static void nvme_zrm_auto_transition_zone(NvmeNamespace *ns) +{ + NvmeZone *zone; + + if (ns->params.max_open_zones && + ns->nr_open_zones == ns->params.max_open_zones) { + zone = QTAILQ_FIRST(&ns->imp_open_zones); + if (zone) { + /* + * Automatically close this implicitly open zone. + */ + QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); + nvme_zrm_close(ns, zone); + } + } +} + +enum { + NVME_ZRM_AUTO = 1 << 0, +}; + +static uint16_t nvme_zrm_open_flags(NvmeCtrl *n, NvmeNamespace *ns, + NvmeZone *zone, int flags) +{ + int act = 0; + uint16_t status; + + switch (nvme_get_zone_state(zone)) { + case NVME_ZONE_STATE_EMPTY: + act = 1; + + /* fallthrough */ + + case NVME_ZONE_STATE_CLOSED: + if (n->params.auto_transition_zones) { + nvme_zrm_auto_transition_zone(ns); + } + status = nvme_aor_check(ns, act, 1); + if (status) { + return status; + } + + if (act) { + nvme_aor_inc_active(ns); + } + + nvme_aor_inc_open(ns); + + if (flags & NVME_ZRM_AUTO) { + nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN); + return NVME_SUCCESS; + } + + /* fallthrough */ + + case NVME_ZONE_STATE_IMPLICITLY_OPEN: + if (flags & NVME_ZRM_AUTO) { + return NVME_SUCCESS; + } + + nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN); + + /* fallthrough */ + + case NVME_ZONE_STATE_EXPLICITLY_OPEN: + return NVME_SUCCESS; + + default: + return NVME_ZONE_INVAL_TRANSITION; + } +} + +static inline uint16_t nvme_zrm_auto(NvmeCtrl *n, NvmeNamespace *ns, + NvmeZone *zone) +{ + return nvme_zrm_open_flags(n, ns, zone, NVME_ZRM_AUTO); +} + +static inline uint16_t nvme_zrm_open(NvmeCtrl *n, NvmeNamespace *ns, + NvmeZone *zone) +{ + return nvme_zrm_open_flags(n, ns, zone, 0); +} + +static void nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone, + uint32_t nlb) +{ + zone->d.wp += nlb; + + if (zone->d.wp == nvme_zone_wr_boundary(zone)) { + nvme_zrm_finish(ns, zone); + } +} + +static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req) +{ + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + NvmeZone *zone; + uint64_t slba; + uint32_t nlb; + + slba = le64_to_cpu(rw->slba); + nlb = le16_to_cpu(rw->nlb) + 1; + zone = nvme_get_zone_by_slba(ns, slba); + assert(zone); + + nvme_advance_zone_wp(ns, zone, nlb); +} + +static inline bool nvme_is_write(NvmeRequest *req) +{ + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + + return rw->opcode == NVME_CMD_WRITE || + rw->opcode == NVME_CMD_ZONE_APPEND || + rw->opcode == NVME_CMD_WRITE_ZEROES; +} + +static AioContext *nvme_get_aio_context(BlockAIOCB *acb) +{ + return qemu_get_aio_context(); +} + +static void nvme_misc_cb(void *opaque, int ret) +{ + NvmeRequest *req = opaque; + + trace_pci_nvme_misc_cb(nvme_cid(req)); + + if (ret) { + nvme_aio_err(req, ret); + } + + nvme_enqueue_req_completion(nvme_cq(req), req); +} + +void nvme_rw_complete_cb(void *opaque, int ret) +{ + NvmeRequest *req = opaque; + NvmeNamespace *ns = req->ns; + BlockBackend *blk = ns->blkconf.blk; + BlockAcctCookie *acct = &req->acct; + BlockAcctStats *stats = blk_get_stats(blk); + + trace_pci_nvme_rw_complete_cb(nvme_cid(req), blk_name(blk)); + + if (ret) { + block_acct_failed(stats, acct); + nvme_aio_err(req, ret); + } else { + block_acct_done(stats, acct); + } + + if (ns->params.zoned && nvme_is_write(req)) { + nvme_finalize_zoned_write(ns, req); + } + + nvme_enqueue_req_completion(nvme_cq(req), req); +} + +static void nvme_rw_cb(void *opaque, int ret) +{ + NvmeRequest *req = opaque; + NvmeNamespace *ns = req->ns; + + BlockBackend *blk = ns->blkconf.blk; + + trace_pci_nvme_rw_cb(nvme_cid(req), blk_name(blk)); + + if (ret) { + goto out; + } + + if (ns->lbaf.ms) { + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + uint64_t slba = le64_to_cpu(rw->slba); + uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; + uint64_t offset = nvme_moff(ns, slba); + + if (req->cmd.opcode == NVME_CMD_WRITE_ZEROES) { + size_t mlen = nvme_m2b(ns, nlb); + + req->aiocb = blk_aio_pwrite_zeroes(blk, offset, mlen, + BDRV_REQ_MAY_UNMAP, + nvme_rw_complete_cb, req); + return; + } + + if (nvme_ns_ext(ns) || req->cmd.mptr) { + uint16_t status; + + nvme_sg_unmap(&req->sg); + status = nvme_map_mdata(nvme_ctrl(req), nlb, req); + if (status) { + ret = -EFAULT; + goto out; + } + + if (req->cmd.opcode == NVME_CMD_READ) { + return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req); + } + + return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req); + } + } + +out: + nvme_rw_complete_cb(req, ret); +} + +static void nvme_verify_cb(void *opaque, int ret) +{ + NvmeBounceContext *ctx = opaque; + NvmeRequest *req = ctx->req; + NvmeNamespace *ns = req->ns; + BlockBackend *blk = ns->blkconf.blk; + BlockAcctCookie *acct = &req->acct; + BlockAcctStats *stats = blk_get_stats(blk); + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + uint64_t slba = le64_to_cpu(rw->slba); + uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); + uint16_t apptag = le16_to_cpu(rw->apptag); + uint16_t appmask = le16_to_cpu(rw->appmask); + uint32_t reftag = le32_to_cpu(rw->reftag); + uint16_t status; + + trace_pci_nvme_verify_cb(nvme_cid(req), prinfo, apptag, appmask, reftag); + + if (ret) { + block_acct_failed(stats, acct); + nvme_aio_err(req, ret); + goto out; + } + + block_acct_done(stats, acct); + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce, + ctx->mdata.iov.size, slba); + if (status) { + req->status = status; + goto out; + } + + req->status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, + ctx->mdata.bounce, ctx->mdata.iov.size, + prinfo, slba, apptag, appmask, &reftag); + } + +out: + qemu_iovec_destroy(&ctx->data.iov); + g_free(ctx->data.bounce); + + qemu_iovec_destroy(&ctx->mdata.iov); + g_free(ctx->mdata.bounce); + + g_free(ctx); + + nvme_enqueue_req_completion(nvme_cq(req), req); +} + + +static void nvme_verify_mdata_in_cb(void *opaque, int ret) +{ + NvmeBounceContext *ctx = opaque; + NvmeRequest *req = ctx->req; + NvmeNamespace *ns = req->ns; + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + uint64_t slba = le64_to_cpu(rw->slba); + uint32_t nlb = le16_to_cpu(rw->nlb) + 1; + size_t mlen = nvme_m2b(ns, nlb); + uint64_t offset = nvme_moff(ns, slba); + BlockBackend *blk = ns->blkconf.blk; + + trace_pci_nvme_verify_mdata_in_cb(nvme_cid(req), blk_name(blk)); + + if (ret) { + goto out; + } + + ctx->mdata.bounce = g_malloc(mlen); + + qemu_iovec_reset(&ctx->mdata.iov); + qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); + + req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, + nvme_verify_cb, ctx); + return; + +out: + nvme_verify_cb(ctx, ret); +} + +struct nvme_compare_ctx { + struct { + QEMUIOVector iov; + uint8_t *bounce; + } data; + + struct { + QEMUIOVector iov; + uint8_t *bounce; + } mdata; +}; + +static void nvme_compare_mdata_cb(void *opaque, int ret) +{ + NvmeRequest *req = opaque; + NvmeNamespace *ns = req->ns; + NvmeCtrl *n = nvme_ctrl(req); + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); + uint16_t apptag = le16_to_cpu(rw->apptag); + uint16_t appmask = le16_to_cpu(rw->appmask); + uint32_t reftag = le32_to_cpu(rw->reftag); + struct nvme_compare_ctx *ctx = req->opaque; + g_autofree uint8_t *buf = NULL; + BlockBackend *blk = ns->blkconf.blk; + BlockAcctCookie *acct = &req->acct; + BlockAcctStats *stats = blk_get_stats(blk); + uint16_t status = NVME_SUCCESS; + + trace_pci_nvme_compare_mdata_cb(nvme_cid(req)); + + if (ret) { + block_acct_failed(stats, acct); + nvme_aio_err(req, ret); + goto out; + } + + buf = g_malloc(ctx->mdata.iov.size); + + status = nvme_bounce_mdata(n, buf, ctx->mdata.iov.size, + NVME_TX_DIRECTION_TO_DEVICE, req); + if (status) { + req->status = status; + goto out; + } + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + uint64_t slba = le64_to_cpu(rw->slba); + uint8_t *bufp; + uint8_t *mbufp = ctx->mdata.bounce; + uint8_t *end = mbufp + ctx->mdata.iov.size; + int16_t pil = 0; + + status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, + ctx->mdata.bounce, ctx->mdata.iov.size, prinfo, + slba, apptag, appmask, &reftag); + if (status) { + req->status = status; + goto out; + } + + /* + * When formatted with protection information, do not compare the DIF + * tuple. + */ + if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { + pil = ns->lbaf.ms - sizeof(NvmeDifTuple); + } + + for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { + if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) { + req->status = NVME_CMP_FAILURE; + goto out; + } + } + + goto out; + } + + if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { + req->status = NVME_CMP_FAILURE; + goto out; + } + + block_acct_done(stats, acct); + +out: + qemu_iovec_destroy(&ctx->data.iov); + g_free(ctx->data.bounce); + + qemu_iovec_destroy(&ctx->mdata.iov); + g_free(ctx->mdata.bounce); + + g_free(ctx); + + nvme_enqueue_req_completion(nvme_cq(req), req); +} + +static void nvme_compare_data_cb(void *opaque, int ret) +{ + NvmeRequest *req = opaque; + NvmeCtrl *n = nvme_ctrl(req); + NvmeNamespace *ns = req->ns; + BlockBackend *blk = ns->blkconf.blk; + BlockAcctCookie *acct = &req->acct; + BlockAcctStats *stats = blk_get_stats(blk); + + struct nvme_compare_ctx *ctx = req->opaque; + g_autofree uint8_t *buf = NULL; + uint16_t status; + + trace_pci_nvme_compare_data_cb(nvme_cid(req)); + + if (ret) { + block_acct_failed(stats, acct); + nvme_aio_err(req, ret); + goto out; + } + + buf = g_malloc(ctx->data.iov.size); + + status = nvme_bounce_data(n, buf, ctx->data.iov.size, + NVME_TX_DIRECTION_TO_DEVICE, req); + if (status) { + req->status = status; + goto out; + } + + if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) { + req->status = NVME_CMP_FAILURE; + goto out; + } + + if (ns->lbaf.ms) { + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + uint64_t slba = le64_to_cpu(rw->slba); + uint32_t nlb = le16_to_cpu(rw->nlb) + 1; + size_t mlen = nvme_m2b(ns, nlb); + uint64_t offset = nvme_moff(ns, slba); + + ctx->mdata.bounce = g_malloc(mlen); + + qemu_iovec_init(&ctx->mdata.iov, 1); + qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); + + req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, + nvme_compare_mdata_cb, req); + return; + } + + block_acct_done(stats, acct); + +out: + qemu_iovec_destroy(&ctx->data.iov); + g_free(ctx->data.bounce); + g_free(ctx); + + nvme_enqueue_req_completion(nvme_cq(req), req); +} + +typedef struct NvmeDSMAIOCB { + BlockAIOCB common; + BlockAIOCB *aiocb; + NvmeRequest *req; + QEMUBH *bh; + int ret; + + NvmeDsmRange *range; + unsigned int nr; + unsigned int idx; +} NvmeDSMAIOCB; + +static void nvme_dsm_cancel(BlockAIOCB *aiocb) +{ + NvmeDSMAIOCB *iocb = container_of(aiocb, NvmeDSMAIOCB, common); + + /* break nvme_dsm_cb loop */ + iocb->idx = iocb->nr; + iocb->ret = -ECANCELED; + + if (iocb->aiocb) { + blk_aio_cancel_async(iocb->aiocb); + iocb->aiocb = NULL; + } else { + /* + * We only reach this if nvme_dsm_cancel() has already been called or + * the command ran to completion and nvme_dsm_bh is scheduled to run. + */ + assert(iocb->idx == iocb->nr); + } +} + +static const AIOCBInfo nvme_dsm_aiocb_info = { + .aiocb_size = sizeof(NvmeDSMAIOCB), + .cancel_async = nvme_dsm_cancel, +}; + +static void nvme_dsm_bh(void *opaque) +{ + NvmeDSMAIOCB *iocb = opaque; + + iocb->common.cb(iocb->common.opaque, iocb->ret); + + qemu_bh_delete(iocb->bh); + iocb->bh = NULL; + qemu_aio_unref(iocb); +} + +static void nvme_dsm_cb(void *opaque, int ret); + +static void nvme_dsm_md_cb(void *opaque, int ret) +{ + NvmeDSMAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeDsmRange *range; + uint64_t slba; + uint32_t nlb; + + if (ret < 0) { + iocb->ret = ret; + goto done; + } + + if (!ns->lbaf.ms) { + nvme_dsm_cb(iocb, 0); + return; + } + + range = &iocb->range[iocb->idx - 1]; + slba = le64_to_cpu(range->slba); + nlb = le32_to_cpu(range->nlb); + + /* + * Check that all block were discarded (zeroed); otherwise we do not zero + * the metadata. + */ + + ret = nvme_block_status_all(ns, slba, nlb, BDRV_BLOCK_ZERO); + if (ret) { + if (ret < 0) { + iocb->ret = ret; + goto done; + } + + nvme_dsm_cb(iocb, 0); + } + + iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, nvme_moff(ns, slba), + nvme_m2b(ns, nlb), BDRV_REQ_MAY_UNMAP, + nvme_dsm_cb, iocb); + return; + +done: + iocb->aiocb = NULL; + qemu_bh_schedule(iocb->bh); +} + +static void nvme_dsm_cb(void *opaque, int ret) +{ + NvmeDSMAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeCtrl *n = nvme_ctrl(req); + NvmeNamespace *ns = req->ns; + NvmeDsmRange *range; + uint64_t slba; + uint32_t nlb; + + if (ret < 0) { + iocb->ret = ret; + goto done; + } + +next: + if (iocb->idx == iocb->nr) { + goto done; + } + + range = &iocb->range[iocb->idx++]; + slba = le64_to_cpu(range->slba); + nlb = le32_to_cpu(range->nlb); + + trace_pci_nvme_dsm_deallocate(slba, nlb); + + if (nlb > n->dmrsl) { + trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, n->dmrsl); + goto next; + } + + if (nvme_check_bounds(ns, slba, nlb)) { + trace_pci_nvme_err_invalid_lba_range(slba, nlb, + ns->id_ns.nsze); + goto next; + } + + iocb->aiocb = blk_aio_pdiscard(ns->blkconf.blk, nvme_l2b(ns, slba), + nvme_l2b(ns, nlb), + nvme_dsm_md_cb, iocb); + return; + +done: + iocb->aiocb = NULL; + qemu_bh_schedule(iocb->bh); +} + +static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeNamespace *ns = req->ns; + NvmeDsmCmd *dsm = (NvmeDsmCmd *) &req->cmd; + uint32_t attr = le32_to_cpu(dsm->attributes); + uint32_t nr = (le32_to_cpu(dsm->nr) & 0xff) + 1; + uint16_t status = NVME_SUCCESS; + + trace_pci_nvme_dsm(nr, attr); + + if (attr & NVME_DSMGMT_AD) { + NvmeDSMAIOCB *iocb = blk_aio_get(&nvme_dsm_aiocb_info, ns->blkconf.blk, + nvme_misc_cb, req); + + iocb->req = req; + iocb->bh = qemu_bh_new(nvme_dsm_bh, iocb); + iocb->ret = 0; + iocb->range = g_new(NvmeDsmRange, nr); + iocb->nr = nr; + iocb->idx = 0; + + status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr, + req); + if (status) { + return status; + } + + req->aiocb = &iocb->common; + nvme_dsm_cb(iocb, 0); + + return NVME_NO_COMPLETE; + } + + return status; +} + +static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + NvmeNamespace *ns = req->ns; + BlockBackend *blk = ns->blkconf.blk; + uint64_t slba = le64_to_cpu(rw->slba); + uint32_t nlb = le16_to_cpu(rw->nlb) + 1; + size_t len = nvme_l2b(ns, nlb); + int64_t offset = nvme_l2b(ns, slba); + uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); + uint32_t reftag = le32_to_cpu(rw->reftag); + NvmeBounceContext *ctx = NULL; + uint16_t status; + + trace_pci_nvme_verify(nvme_cid(req), nvme_nsid(ns), slba, nlb); + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + status = nvme_check_prinfo(ns, prinfo, slba, reftag); + if (status) { + return status; + } + + if (prinfo & NVME_PRINFO_PRACT) { + return NVME_INVALID_PROT_INFO | NVME_DNR; + } + } + + if (len > n->page_size << n->params.vsl) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + status = nvme_check_bounds(ns, slba, nlb); + if (status) { + return status; + } + + if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { + status = nvme_check_dulbe(ns, slba, nlb); + if (status) { + return status; + } + } + + ctx = g_new0(NvmeBounceContext, 1); + ctx->req = req; + + ctx->data.bounce = g_malloc(len); + + qemu_iovec_init(&ctx->data.iov, 1); + qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len); + + block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size, + BLOCK_ACCT_READ); + + req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0, + nvme_verify_mdata_in_cb, ctx); + return NVME_NO_COMPLETE; +} + +typedef struct NvmeCopyAIOCB { + BlockAIOCB common; + BlockAIOCB *aiocb; + NvmeRequest *req; + QEMUBH *bh; + int ret; + + NvmeCopySourceRange *ranges; + int nr; + int idx; + + uint8_t *bounce; + QEMUIOVector iov; + struct { + BlockAcctCookie read; + BlockAcctCookie write; + } acct; + + uint32_t reftag; + uint64_t slba; + + NvmeZone *zone; +} NvmeCopyAIOCB; + +static void nvme_copy_cancel(BlockAIOCB *aiocb) +{ + NvmeCopyAIOCB *iocb = container_of(aiocb, NvmeCopyAIOCB, common); + + iocb->ret = -ECANCELED; + + if (iocb->aiocb) { + blk_aio_cancel_async(iocb->aiocb); + iocb->aiocb = NULL; + } +} + +static const AIOCBInfo nvme_copy_aiocb_info = { + .aiocb_size = sizeof(NvmeCopyAIOCB), + .cancel_async = nvme_copy_cancel, +}; + +static void nvme_copy_bh(void *opaque) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + BlockAcctStats *stats = blk_get_stats(ns->blkconf.blk); + + if (iocb->idx != iocb->nr) { + req->cqe.result = cpu_to_le32(iocb->idx); + } + + qemu_iovec_destroy(&iocb->iov); + g_free(iocb->bounce); + + qemu_bh_delete(iocb->bh); + iocb->bh = NULL; + + if (iocb->ret < 0) { + block_acct_failed(stats, &iocb->acct.read); + block_acct_failed(stats, &iocb->acct.write); + } else { + block_acct_done(stats, &iocb->acct.read); + block_acct_done(stats, &iocb->acct.write); + } + + iocb->common.cb(iocb->common.opaque, iocb->ret); + qemu_aio_unref(iocb); +} + +static void nvme_copy_cb(void *opaque, int ret); + +static void nvme_copy_out_completed_cb(void *opaque, int ret) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeCopySourceRange *range = &iocb->ranges[iocb->idx]; + uint32_t nlb = le32_to_cpu(range->nlb) + 1; + + if (ret < 0) { + iocb->ret = ret; + goto out; + } else if (iocb->ret < 0) { + goto out; + } + + if (ns->params.zoned) { + nvme_advance_zone_wp(ns, iocb->zone, nlb); + } + + iocb->idx++; + iocb->slba += nlb; +out: + nvme_copy_cb(iocb, iocb->ret); +} + +static void nvme_copy_out_cb(void *opaque, int ret) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeCopySourceRange *range; + uint32_t nlb; + size_t mlen; + uint8_t *mbounce; + + if (ret < 0) { + iocb->ret = ret; + goto out; + } else if (iocb->ret < 0) { + goto out; + } + + if (!ns->lbaf.ms) { + nvme_copy_out_completed_cb(iocb, 0); + return; + } + + range = &iocb->ranges[iocb->idx]; + nlb = le32_to_cpu(range->nlb) + 1; + + mlen = nvme_m2b(ns, nlb); + mbounce = iocb->bounce + nvme_l2b(ns, nlb); + + qemu_iovec_reset(&iocb->iov); + qemu_iovec_add(&iocb->iov, mbounce, mlen); + + iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_moff(ns, iocb->slba), + &iocb->iov, 0, nvme_copy_out_completed_cb, + iocb); + + return; + +out: + nvme_copy_cb(iocb, ret); +} + +static void nvme_copy_in_completed_cb(void *opaque, int ret) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeCopySourceRange *range; + uint32_t nlb; + size_t len; + uint16_t status; + + if (ret < 0) { + iocb->ret = ret; + goto out; + } else if (iocb->ret < 0) { + goto out; + } + + range = &iocb->ranges[iocb->idx]; + nlb = le32_to_cpu(range->nlb) + 1; + len = nvme_l2b(ns, nlb); + + trace_pci_nvme_copy_out(iocb->slba, nlb); + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; + + uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); + uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); + + uint16_t apptag = le16_to_cpu(range->apptag); + uint16_t appmask = le16_to_cpu(range->appmask); + uint32_t reftag = le32_to_cpu(range->reftag); + + uint64_t slba = le64_to_cpu(range->slba); + size_t mlen = nvme_m2b(ns, nlb); + uint8_t *mbounce = iocb->bounce + nvme_l2b(ns, nlb); + + status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, prinfor, + slba, apptag, appmask, &reftag); + if (status) { + goto invalid; + } + + apptag = le16_to_cpu(copy->apptag); + appmask = le16_to_cpu(copy->appmask); + + if (prinfow & NVME_PRINFO_PRACT) { + status = nvme_check_prinfo(ns, prinfow, iocb->slba, iocb->reftag); + if (status) { + goto invalid; + } + + nvme_dif_pract_generate_dif(ns, iocb->bounce, len, mbounce, mlen, + apptag, &iocb->reftag); + } else { + status = nvme_dif_check(ns, iocb->bounce, len, mbounce, mlen, + prinfow, iocb->slba, apptag, appmask, + &iocb->reftag); + if (status) { + goto invalid; + } + } + } + + status = nvme_check_bounds(ns, iocb->slba, nlb); + if (status) { + goto invalid; + } + + if (ns->params.zoned) { + status = nvme_check_zone_write(ns, iocb->zone, iocb->slba, nlb); + if (status) { + goto invalid; + } + + iocb->zone->w_ptr += nlb; + } + + qemu_iovec_reset(&iocb->iov); + qemu_iovec_add(&iocb->iov, iocb->bounce, len); + + iocb->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, iocb->slba), + &iocb->iov, 0, nvme_copy_out_cb, iocb); + + return; + +invalid: + req->status = status; + iocb->aiocb = NULL; + if (iocb->bh) { + qemu_bh_schedule(iocb->bh); + } + + return; + +out: + nvme_copy_cb(iocb, ret); +} + +static void nvme_copy_in_cb(void *opaque, int ret) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeCopySourceRange *range; + uint64_t slba; + uint32_t nlb; + + if (ret < 0) { + iocb->ret = ret; + goto out; + } else if (iocb->ret < 0) { + goto out; + } + + if (!ns->lbaf.ms) { + nvme_copy_in_completed_cb(iocb, 0); + return; + } + + range = &iocb->ranges[iocb->idx]; + slba = le64_to_cpu(range->slba); + nlb = le32_to_cpu(range->nlb) + 1; + + qemu_iovec_reset(&iocb->iov); + qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(ns, nlb), + nvme_m2b(ns, nlb)); + + iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_moff(ns, slba), + &iocb->iov, 0, nvme_copy_in_completed_cb, + iocb); + return; + +out: + nvme_copy_cb(iocb, iocb->ret); +} + +static void nvme_copy_cb(void *opaque, int ret) +{ + NvmeCopyAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + NvmeCopySourceRange *range; + uint64_t slba; + uint32_t nlb; + size_t len; + uint16_t status; + + if (ret < 0) { + iocb->ret = ret; + goto done; + } else if (iocb->ret < 0) { + goto done; + } + + if (iocb->idx == iocb->nr) { + goto done; + } + + range = &iocb->ranges[iocb->idx]; + slba = le64_to_cpu(range->slba); + nlb = le32_to_cpu(range->nlb) + 1; + len = nvme_l2b(ns, nlb); + + trace_pci_nvme_copy_source_range(slba, nlb); + + if (nlb > le16_to_cpu(ns->id_ns.mssrl)) { + status = NVME_CMD_SIZE_LIMIT | NVME_DNR; + goto invalid; + } + + status = nvme_check_bounds(ns, slba, nlb); + if (status) { + goto invalid; + } + + if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { + status = nvme_check_dulbe(ns, slba, nlb); + if (status) { + goto invalid; + } + } + + if (ns->params.zoned) { + status = nvme_check_zone_read(ns, slba, nlb); + if (status) { + goto invalid; + } + } + + qemu_iovec_reset(&iocb->iov); + qemu_iovec_add(&iocb->iov, iocb->bounce, len); + + iocb->aiocb = blk_aio_preadv(ns->blkconf.blk, nvme_l2b(ns, slba), + &iocb->iov, 0, nvme_copy_in_cb, iocb); + return; + +invalid: + req->status = status; +done: + iocb->aiocb = NULL; + if (iocb->bh) { + qemu_bh_schedule(iocb->bh); + } +} + + +static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeNamespace *ns = req->ns; + NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd; + NvmeCopyAIOCB *iocb = blk_aio_get(&nvme_copy_aiocb_info, ns->blkconf.blk, + nvme_misc_cb, req); + uint16_t nr = copy->nr + 1; + uint8_t format = copy->control[0] & 0xf; + uint16_t prinfor = ((copy->control[0] >> 4) & 0xf); + uint16_t prinfow = ((copy->control[2] >> 2) & 0xf); + + uint16_t status; + + trace_pci_nvme_copy(nvme_cid(req), nvme_nsid(ns), nr, format); + + iocb->ranges = NULL; + iocb->zone = NULL; + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && + ((prinfor & NVME_PRINFO_PRACT) != (prinfow & NVME_PRINFO_PRACT))) { + status = NVME_INVALID_FIELD | NVME_DNR; + goto invalid; + } + + if (!(n->id_ctrl.ocfs & (1 << format))) { + trace_pci_nvme_err_copy_invalid_format(format); + status = NVME_INVALID_FIELD | NVME_DNR; + goto invalid; + } + + if (nr > ns->id_ns.msrc + 1) { + status = NVME_CMD_SIZE_LIMIT | NVME_DNR; + goto invalid; + } + + iocb->ranges = g_new(NvmeCopySourceRange, nr); + + status = nvme_h2c(n, (uint8_t *)iocb->ranges, + sizeof(NvmeCopySourceRange) * nr, req); + if (status) { + goto invalid; + } + + iocb->slba = le64_to_cpu(copy->sdlba); + + if (ns->params.zoned) { + iocb->zone = nvme_get_zone_by_slba(ns, iocb->slba); + if (!iocb->zone) { + status = NVME_LBA_RANGE | NVME_DNR; + goto invalid; + } + + status = nvme_zrm_auto(n, ns, iocb->zone); + if (status) { + goto invalid; + } + } + + iocb->req = req; + iocb->bh = qemu_bh_new(nvme_copy_bh, iocb); + iocb->ret = 0; + iocb->nr = nr; + iocb->idx = 0; + iocb->reftag = le32_to_cpu(copy->reftag); + iocb->bounce = g_malloc_n(le16_to_cpu(ns->id_ns.mssrl), + ns->lbasz + ns->lbaf.ms); + + qemu_iovec_init(&iocb->iov, 1); + + block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.read, 0, + BLOCK_ACCT_READ); + block_acct_start(blk_get_stats(ns->blkconf.blk), &iocb->acct.write, 0, + BLOCK_ACCT_WRITE); + + req->aiocb = &iocb->common; + nvme_copy_cb(iocb, 0); + + return NVME_NO_COMPLETE; + +invalid: + g_free(iocb->ranges); + qemu_aio_unref(iocb); + return status; +} + +static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + NvmeNamespace *ns = req->ns; + BlockBackend *blk = ns->blkconf.blk; + uint64_t slba = le64_to_cpu(rw->slba); + uint32_t nlb = le16_to_cpu(rw->nlb) + 1; + uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); + size_t data_len = nvme_l2b(ns, nlb); + size_t len = data_len; + int64_t offset = nvme_l2b(ns, slba); + struct nvme_compare_ctx *ctx = NULL; + uint16_t status; + + trace_pci_nvme_compare(nvme_cid(req), nvme_nsid(ns), slba, nlb); + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && (prinfo & NVME_PRINFO_PRACT)) { + return NVME_INVALID_PROT_INFO | NVME_DNR; + } + + if (nvme_ns_ext(ns)) { + len += nvme_m2b(ns, nlb); + } + + status = nvme_check_mdts(n, len); + if (status) { + return status; + } + + status = nvme_check_bounds(ns, slba, nlb); + if (status) { + return status; + } + + if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { + status = nvme_check_dulbe(ns, slba, nlb); + if (status) { + return status; + } + } + + status = nvme_map_dptr(n, &req->sg, len, &req->cmd); + if (status) { + return status; + } + + ctx = g_new(struct nvme_compare_ctx, 1); + ctx->data.bounce = g_malloc(data_len); + + req->opaque = ctx; + + qemu_iovec_init(&ctx->data.iov, 1); + qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, data_len); + + block_acct_start(blk_get_stats(blk), &req->acct, data_len, + BLOCK_ACCT_READ); + req->aiocb = blk_aio_preadv(blk, offset, &ctx->data.iov, 0, + nvme_compare_data_cb, req); + + return NVME_NO_COMPLETE; +} + +typedef struct NvmeFlushAIOCB { + BlockAIOCB common; + BlockAIOCB *aiocb; + NvmeRequest *req; + QEMUBH *bh; + int ret; + + NvmeNamespace *ns; + uint32_t nsid; + bool broadcast; +} NvmeFlushAIOCB; + +static void nvme_flush_cancel(BlockAIOCB *acb) +{ + NvmeFlushAIOCB *iocb = container_of(acb, NvmeFlushAIOCB, common); + + iocb->ret = -ECANCELED; + + if (iocb->aiocb) { + blk_aio_cancel_async(iocb->aiocb); + } +} + +static const AIOCBInfo nvme_flush_aiocb_info = { + .aiocb_size = sizeof(NvmeFlushAIOCB), + .cancel_async = nvme_flush_cancel, + .get_aio_context = nvme_get_aio_context, +}; + +static void nvme_flush_ns_cb(void *opaque, int ret) +{ + NvmeFlushAIOCB *iocb = opaque; + NvmeNamespace *ns = iocb->ns; + + if (ret < 0) { + iocb->ret = ret; + goto out; + } else if (iocb->ret < 0) { + goto out; + } + + if (ns) { + trace_pci_nvme_flush_ns(iocb->nsid); + + iocb->ns = NULL; + iocb->aiocb = blk_aio_flush(ns->blkconf.blk, nvme_flush_ns_cb, iocb); + return; + } + +out: + iocb->aiocb = NULL; + qemu_bh_schedule(iocb->bh); +} + +static void nvme_flush_bh(void *opaque) +{ + NvmeFlushAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeCtrl *n = nvme_ctrl(req); + int i; + + if (iocb->ret < 0) { + goto done; + } + + if (iocb->broadcast) { + for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { + iocb->ns = nvme_ns(n, i); + if (iocb->ns) { + iocb->nsid = i; + break; + } + } + } + + if (!iocb->ns) { + goto done; + } + + nvme_flush_ns_cb(iocb, 0); + return; + +done: + qemu_bh_delete(iocb->bh); + iocb->bh = NULL; + + iocb->common.cb(iocb->common.opaque, iocb->ret); + + qemu_aio_unref(iocb); + + return; +} + +static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeFlushAIOCB *iocb; + uint32_t nsid = le32_to_cpu(req->cmd.nsid); + uint16_t status; + + iocb = qemu_aio_get(&nvme_flush_aiocb_info, NULL, nvme_misc_cb, req); + + iocb->req = req; + iocb->bh = qemu_bh_new(nvme_flush_bh, iocb); + iocb->ret = 0; + iocb->ns = NULL; + iocb->nsid = 0; + iocb->broadcast = (nsid == NVME_NSID_BROADCAST); + + if (!iocb->broadcast) { + if (!nvme_nsid_valid(n, nsid)) { + status = NVME_INVALID_NSID | NVME_DNR; + goto out; + } + + iocb->ns = nvme_ns(n, nsid); + if (!iocb->ns) { + status = NVME_INVALID_FIELD | NVME_DNR; + goto out; + } + + iocb->nsid = nsid; + } + + req->aiocb = &iocb->common; + qemu_bh_schedule(iocb->bh); + + return NVME_NO_COMPLETE; + +out: + qemu_bh_delete(iocb->bh); + iocb->bh = NULL; + qemu_aio_unref(iocb); + + return status; +} + +static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + NvmeNamespace *ns = req->ns; + uint64_t slba = le64_to_cpu(rw->slba); + uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; + uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); + uint64_t data_size = nvme_l2b(ns, nlb); + uint64_t mapped_size = data_size; + uint64_t data_offset; + BlockBackend *blk = ns->blkconf.blk; + uint16_t status; + + if (nvme_ns_ext(ns)) { + mapped_size += nvme_m2b(ns, nlb); + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + bool pract = prinfo & NVME_PRINFO_PRACT; + + if (pract && ns->lbaf.ms == 8) { + mapped_size = data_size; + } + } + } + + trace_pci_nvme_read(nvme_cid(req), nvme_nsid(ns), nlb, mapped_size, slba); + + status = nvme_check_mdts(n, mapped_size); + if (status) { + goto invalid; + } + + status = nvme_check_bounds(ns, slba, nlb); + if (status) { + goto invalid; + } + + if (ns->params.zoned) { + status = nvme_check_zone_read(ns, slba, nlb); + if (status) { + trace_pci_nvme_err_zone_read_not_ok(slba, nlb, status); + goto invalid; + } + } + + if (NVME_ERR_REC_DULBE(ns->features.err_rec)) { + status = nvme_check_dulbe(ns, slba, nlb); + if (status) { + goto invalid; + } + } + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + return nvme_dif_rw(n, req); + } + + status = nvme_map_data(n, nlb, req); + if (status) { + goto invalid; + } + + data_offset = nvme_l2b(ns, slba); + + block_acct_start(blk_get_stats(blk), &req->acct, data_size, + BLOCK_ACCT_READ); + nvme_blk_read(blk, data_offset, nvme_rw_cb, req); + return NVME_NO_COMPLETE; + +invalid: + block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ); + return status | NVME_DNR; +} + +static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, + bool wrz) +{ + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + NvmeNamespace *ns = req->ns; + uint64_t slba = le64_to_cpu(rw->slba); + uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1; + uint16_t ctrl = le16_to_cpu(rw->control); + uint8_t prinfo = NVME_RW_PRINFO(ctrl); + uint64_t data_size = nvme_l2b(ns, nlb); + uint64_t mapped_size = data_size; + uint64_t data_offset; + NvmeZone *zone; + NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe; + BlockBackend *blk = ns->blkconf.blk; + uint16_t status; + + if (nvme_ns_ext(ns)) { + mapped_size += nvme_m2b(ns, nlb); + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + bool pract = prinfo & NVME_PRINFO_PRACT; + + if (pract && ns->lbaf.ms == 8) { + mapped_size -= nvme_m2b(ns, nlb); + } + } + } + + trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode), + nvme_nsid(ns), nlb, mapped_size, slba); + + if (!wrz) { + status = nvme_check_mdts(n, mapped_size); + if (status) { + goto invalid; + } + } + + status = nvme_check_bounds(ns, slba, nlb); + if (status) { + goto invalid; + } + + if (ns->params.zoned) { + zone = nvme_get_zone_by_slba(ns, slba); + assert(zone); + + if (append) { + bool piremap = !!(ctrl & NVME_RW_PIREMAP); + + if (unlikely(slba != zone->d.zslba)) { + trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba); + status = NVME_INVALID_FIELD; + goto invalid; + } + + if (n->params.zasl && + data_size > (uint64_t)n->page_size << n->params.zasl) { + trace_pci_nvme_err_zasl(data_size); + return NVME_INVALID_FIELD | NVME_DNR; + } + + slba = zone->w_ptr; + rw->slba = cpu_to_le64(slba); + res->slba = cpu_to_le64(slba); + + switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + case NVME_ID_NS_DPS_TYPE_1: + if (!piremap) { + return NVME_INVALID_PROT_INFO | NVME_DNR; + } + + /* fallthrough */ + + case NVME_ID_NS_DPS_TYPE_2: + if (piremap) { + uint32_t reftag = le32_to_cpu(rw->reftag); + rw->reftag = cpu_to_le32(reftag + (slba - zone->d.zslba)); + } + + break; + + case NVME_ID_NS_DPS_TYPE_3: + if (piremap) { + return NVME_INVALID_PROT_INFO | NVME_DNR; + } + + break; + } + } + + status = nvme_check_zone_write(ns, zone, slba, nlb); + if (status) { + goto invalid; + } + + status = nvme_zrm_auto(n, ns, zone); + if (status) { + goto invalid; + } + + zone->w_ptr += nlb; + } + + data_offset = nvme_l2b(ns, slba); + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + return nvme_dif_rw(n, req); + } + + if (!wrz) { + status = nvme_map_data(n, nlb, req); + if (status) { + goto invalid; + } + + block_acct_start(blk_get_stats(blk), &req->acct, data_size, + BLOCK_ACCT_WRITE); + nvme_blk_write(blk, data_offset, nvme_rw_cb, req); + } else { + req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size, + BDRV_REQ_MAY_UNMAP, nvme_rw_cb, + req); + } + + return NVME_NO_COMPLETE; + +invalid: + block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE); + return status | NVME_DNR; +} + +static inline uint16_t nvme_write(NvmeCtrl *n, NvmeRequest *req) +{ + return nvme_do_write(n, req, false, false); +} + +static inline uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req) +{ + return nvme_do_write(n, req, false, true); +} + +static inline uint16_t nvme_zone_append(NvmeCtrl *n, NvmeRequest *req) +{ + return nvme_do_write(n, req, true, false); +} + +static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace *ns, NvmeCmd *c, + uint64_t *slba, uint32_t *zone_idx) +{ + uint32_t dw10 = le32_to_cpu(c->cdw10); + uint32_t dw11 = le32_to_cpu(c->cdw11); + + if (!ns->params.zoned) { + trace_pci_nvme_err_invalid_opc(c->opcode); + return NVME_INVALID_OPCODE | NVME_DNR; + } + + *slba = ((uint64_t)dw11) << 32 | dw10; + if (unlikely(*slba >= ns->id_ns.nsze)) { + trace_pci_nvme_err_invalid_lba_range(*slba, 0, ns->id_ns.nsze); + *slba = 0; + return NVME_LBA_RANGE | NVME_DNR; + } + + *zone_idx = nvme_zone_idx(ns, *slba); + assert(*zone_idx < ns->num_zones); + + return NVME_SUCCESS; +} + +typedef uint16_t (*op_handler_t)(NvmeNamespace *, NvmeZone *, NvmeZoneState, + NvmeRequest *); + +enum NvmeZoneProcessingMask { + NVME_PROC_CURRENT_ZONE = 0, + NVME_PROC_OPENED_ZONES = 1 << 0, + NVME_PROC_CLOSED_ZONES = 1 << 1, + NVME_PROC_READ_ONLY_ZONES = 1 << 2, + NVME_PROC_FULL_ZONES = 1 << 3, +}; + +static uint16_t nvme_open_zone(NvmeNamespace *ns, NvmeZone *zone, + NvmeZoneState state, NvmeRequest *req) +{ + return nvme_zrm_open(nvme_ctrl(req), ns, zone); +} + +static uint16_t nvme_close_zone(NvmeNamespace *ns, NvmeZone *zone, + NvmeZoneState state, NvmeRequest *req) +{ + return nvme_zrm_close(ns, zone); +} + +static uint16_t nvme_finish_zone(NvmeNamespace *ns, NvmeZone *zone, + NvmeZoneState state, NvmeRequest *req) +{ + return nvme_zrm_finish(ns, zone); +} + +static uint16_t nvme_offline_zone(NvmeNamespace *ns, NvmeZone *zone, + NvmeZoneState state, NvmeRequest *req) +{ + switch (state) { + case NVME_ZONE_STATE_READ_ONLY: + nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_OFFLINE); + /* fall through */ + case NVME_ZONE_STATE_OFFLINE: + return NVME_SUCCESS; + default: + return NVME_ZONE_INVAL_TRANSITION; + } +} + +static uint16_t nvme_set_zd_ext(NvmeNamespace *ns, NvmeZone *zone) +{ + uint16_t status; + uint8_t state = nvme_get_zone_state(zone); + + if (state == NVME_ZONE_STATE_EMPTY) { + status = nvme_aor_check(ns, 1, 0); + if (status) { + return status; + } + nvme_aor_inc_active(ns); + zone->d.za |= NVME_ZA_ZD_EXT_VALID; + nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED); + return NVME_SUCCESS; + } + + return NVME_ZONE_INVAL_TRANSITION; +} + +static uint16_t nvme_bulk_proc_zone(NvmeNamespace *ns, NvmeZone *zone, + enum NvmeZoneProcessingMask proc_mask, + op_handler_t op_hndlr, NvmeRequest *req) +{ + uint16_t status = NVME_SUCCESS; + NvmeZoneState zs = nvme_get_zone_state(zone); + bool proc_zone; + + switch (zs) { + case NVME_ZONE_STATE_IMPLICITLY_OPEN: + case NVME_ZONE_STATE_EXPLICITLY_OPEN: + proc_zone = proc_mask & NVME_PROC_OPENED_ZONES; + break; + case NVME_ZONE_STATE_CLOSED: + proc_zone = proc_mask & NVME_PROC_CLOSED_ZONES; + break; + case NVME_ZONE_STATE_READ_ONLY: + proc_zone = proc_mask & NVME_PROC_READ_ONLY_ZONES; + break; + case NVME_ZONE_STATE_FULL: + proc_zone = proc_mask & NVME_PROC_FULL_ZONES; + break; + default: + proc_zone = false; + } + + if (proc_zone) { + status = op_hndlr(ns, zone, zs, req); + } + + return status; +} + +static uint16_t nvme_do_zone_op(NvmeNamespace *ns, NvmeZone *zone, + enum NvmeZoneProcessingMask proc_mask, + op_handler_t op_hndlr, NvmeRequest *req) +{ + NvmeZone *next; + uint16_t status = NVME_SUCCESS; + int i; + + if (!proc_mask) { + status = op_hndlr(ns, zone, nvme_get_zone_state(zone), req); + } else { + if (proc_mask & NVME_PROC_CLOSED_ZONES) { + QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) { + status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, + req); + if (status && status != NVME_NO_COMPLETE) { + goto out; + } + } + } + if (proc_mask & NVME_PROC_OPENED_ZONES) { + QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) { + status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, + req); + if (status && status != NVME_NO_COMPLETE) { + goto out; + } + } + + QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) { + status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, + req); + if (status && status != NVME_NO_COMPLETE) { + goto out; + } + } + } + if (proc_mask & NVME_PROC_FULL_ZONES) { + QTAILQ_FOREACH_SAFE(zone, &ns->full_zones, entry, next) { + status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, + req); + if (status && status != NVME_NO_COMPLETE) { + goto out; + } + } + } + + if (proc_mask & NVME_PROC_READ_ONLY_ZONES) { + for (i = 0; i < ns->num_zones; i++, zone++) { + status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr, + req); + if (status && status != NVME_NO_COMPLETE) { + goto out; + } + } + } + } + +out: + return status; +} + +typedef struct NvmeZoneResetAIOCB { + BlockAIOCB common; + BlockAIOCB *aiocb; + NvmeRequest *req; + QEMUBH *bh; + int ret; + + bool all; + int idx; + NvmeZone *zone; +} NvmeZoneResetAIOCB; + +static void nvme_zone_reset_cancel(BlockAIOCB *aiocb) +{ + NvmeZoneResetAIOCB *iocb = container_of(aiocb, NvmeZoneResetAIOCB, common); + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + + iocb->idx = ns->num_zones; + + iocb->ret = -ECANCELED; + + if (iocb->aiocb) { + blk_aio_cancel_async(iocb->aiocb); + iocb->aiocb = NULL; + } +} + +static const AIOCBInfo nvme_zone_reset_aiocb_info = { + .aiocb_size = sizeof(NvmeZoneResetAIOCB), + .cancel_async = nvme_zone_reset_cancel, +}; + +static void nvme_zone_reset_bh(void *opaque) +{ + NvmeZoneResetAIOCB *iocb = opaque; + + iocb->common.cb(iocb->common.opaque, iocb->ret); + + qemu_bh_delete(iocb->bh); + iocb->bh = NULL; + qemu_aio_unref(iocb); +} + +static void nvme_zone_reset_cb(void *opaque, int ret); + +static void nvme_zone_reset_epilogue_cb(void *opaque, int ret) +{ + NvmeZoneResetAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + int64_t moff; + int count; + + if (ret < 0) { + nvme_zone_reset_cb(iocb, ret); + return; + } + + if (!ns->lbaf.ms) { + nvme_zone_reset_cb(iocb, 0); + return; + } + + moff = nvme_moff(ns, iocb->zone->d.zslba); + count = nvme_m2b(ns, ns->zone_size); + + iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, moff, count, + BDRV_REQ_MAY_UNMAP, + nvme_zone_reset_cb, iocb); + return; +} + +static void nvme_zone_reset_cb(void *opaque, int ret) +{ + NvmeZoneResetAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = req->ns; + + if (ret < 0) { + iocb->ret = ret; + goto done; + } + + if (iocb->zone) { + nvme_zrm_reset(ns, iocb->zone); + + if (!iocb->all) { + goto done; + } + } + + while (iocb->idx < ns->num_zones) { + NvmeZone *zone = &ns->zone_array[iocb->idx++]; + + switch (nvme_get_zone_state(zone)) { + case NVME_ZONE_STATE_EMPTY: + if (!iocb->all) { + goto done; + } + + continue; + + case NVME_ZONE_STATE_EXPLICITLY_OPEN: + case NVME_ZONE_STATE_IMPLICITLY_OPEN: + case NVME_ZONE_STATE_CLOSED: + case NVME_ZONE_STATE_FULL: + iocb->zone = zone; + break; + + default: + continue; + } + + trace_pci_nvme_zns_zone_reset(zone->d.zslba); + + iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, + nvme_l2b(ns, zone->d.zslba), + nvme_l2b(ns, ns->zone_size), + BDRV_REQ_MAY_UNMAP, + nvme_zone_reset_epilogue_cb, + iocb); + return; + } + +done: + iocb->aiocb = NULL; + if (iocb->bh) { + qemu_bh_schedule(iocb->bh); + } +} + +static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCmd *cmd = (NvmeCmd *)&req->cmd; + NvmeNamespace *ns = req->ns; + NvmeZone *zone; + NvmeZoneResetAIOCB *iocb; + uint8_t *zd_ext; + uint32_t dw13 = le32_to_cpu(cmd->cdw13); + uint64_t slba = 0; + uint32_t zone_idx = 0; + uint16_t status; + uint8_t action; + bool all; + enum NvmeZoneProcessingMask proc_mask = NVME_PROC_CURRENT_ZONE; + + action = dw13 & 0xff; + all = !!(dw13 & 0x100); + + req->status = NVME_SUCCESS; + + if (!all) { + status = nvme_get_mgmt_zone_slba_idx(ns, cmd, &slba, &zone_idx); + if (status) { + return status; + } + } + + zone = &ns->zone_array[zone_idx]; + if (slba != zone->d.zslba) { + trace_pci_nvme_err_unaligned_zone_cmd(action, slba, zone->d.zslba); + return NVME_INVALID_FIELD | NVME_DNR; + } + + switch (action) { + + case NVME_ZONE_ACTION_OPEN: + if (all) { + proc_mask = NVME_PROC_CLOSED_ZONES; + } + trace_pci_nvme_open_zone(slba, zone_idx, all); + status = nvme_do_zone_op(ns, zone, proc_mask, nvme_open_zone, req); + break; + + case NVME_ZONE_ACTION_CLOSE: + if (all) { + proc_mask = NVME_PROC_OPENED_ZONES; + } + trace_pci_nvme_close_zone(slba, zone_idx, all); + status = nvme_do_zone_op(ns, zone, proc_mask, nvme_close_zone, req); + break; + + case NVME_ZONE_ACTION_FINISH: + if (all) { + proc_mask = NVME_PROC_OPENED_ZONES | NVME_PROC_CLOSED_ZONES; + } + trace_pci_nvme_finish_zone(slba, zone_idx, all); + status = nvme_do_zone_op(ns, zone, proc_mask, nvme_finish_zone, req); + break; + + case NVME_ZONE_ACTION_RESET: + trace_pci_nvme_reset_zone(slba, zone_idx, all); + + iocb = blk_aio_get(&nvme_zone_reset_aiocb_info, ns->blkconf.blk, + nvme_misc_cb, req); + + iocb->req = req; + iocb->bh = qemu_bh_new(nvme_zone_reset_bh, iocb); + iocb->ret = 0; + iocb->all = all; + iocb->idx = zone_idx; + iocb->zone = NULL; + + req->aiocb = &iocb->common; + nvme_zone_reset_cb(iocb, 0); + + return NVME_NO_COMPLETE; + + case NVME_ZONE_ACTION_OFFLINE: + if (all) { + proc_mask = NVME_PROC_READ_ONLY_ZONES; + } + trace_pci_nvme_offline_zone(slba, zone_idx, all); + status = nvme_do_zone_op(ns, zone, proc_mask, nvme_offline_zone, req); + break; + + case NVME_ZONE_ACTION_SET_ZD_EXT: + trace_pci_nvme_set_descriptor_extension(slba, zone_idx); + if (all || !ns->params.zd_extension_size) { + return NVME_INVALID_FIELD | NVME_DNR; + } + zd_ext = nvme_get_zd_extension(ns, zone_idx); + status = nvme_h2c(n, zd_ext, ns->params.zd_extension_size, req); + if (status) { + trace_pci_nvme_err_zd_extension_map_error(zone_idx); + return status; + } + + status = nvme_set_zd_ext(ns, zone); + if (status == NVME_SUCCESS) { + trace_pci_nvme_zd_extension_set(zone_idx); + return status; + } + break; + + default: + trace_pci_nvme_err_invalid_mgmt_action(action); + status = NVME_INVALID_FIELD; + } + + if (status == NVME_ZONE_INVAL_TRANSITION) { + trace_pci_nvme_err_invalid_zone_state_transition(action, slba, + zone->d.za); + } + if (status) { + status |= NVME_DNR; + } + + return status; +} + +static bool nvme_zone_matches_filter(uint32_t zafs, NvmeZone *zl) +{ + NvmeZoneState zs = nvme_get_zone_state(zl); + + switch (zafs) { + case NVME_ZONE_REPORT_ALL: + return true; + case NVME_ZONE_REPORT_EMPTY: + return zs == NVME_ZONE_STATE_EMPTY; + case NVME_ZONE_REPORT_IMPLICITLY_OPEN: + return zs == NVME_ZONE_STATE_IMPLICITLY_OPEN; + case NVME_ZONE_REPORT_EXPLICITLY_OPEN: + return zs == NVME_ZONE_STATE_EXPLICITLY_OPEN; + case NVME_ZONE_REPORT_CLOSED: + return zs == NVME_ZONE_STATE_CLOSED; + case NVME_ZONE_REPORT_FULL: + return zs == NVME_ZONE_STATE_FULL; + case NVME_ZONE_REPORT_READ_ONLY: + return zs == NVME_ZONE_STATE_READ_ONLY; + case NVME_ZONE_REPORT_OFFLINE: + return zs == NVME_ZONE_STATE_OFFLINE; + default: + return false; + } +} + +static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCmd *cmd = (NvmeCmd *)&req->cmd; + NvmeNamespace *ns = req->ns; + /* cdw12 is zero-based number of dwords to return. Convert to bytes */ + uint32_t data_size = (le32_to_cpu(cmd->cdw12) + 1) << 2; + uint32_t dw13 = le32_to_cpu(cmd->cdw13); + uint32_t zone_idx, zra, zrasf, partial; + uint64_t max_zones, nr_zones = 0; + uint16_t status; + uint64_t slba; + NvmeZoneDescr *z; + NvmeZone *zone; + NvmeZoneReportHeader *header; + void *buf, *buf_p; + size_t zone_entry_sz; + int i; + + req->status = NVME_SUCCESS; + + status = nvme_get_mgmt_zone_slba_idx(ns, cmd, &slba, &zone_idx); + if (status) { + return status; + } + + zra = dw13 & 0xff; + if (zra != NVME_ZONE_REPORT && zra != NVME_ZONE_REPORT_EXTENDED) { + return NVME_INVALID_FIELD | NVME_DNR; + } + if (zra == NVME_ZONE_REPORT_EXTENDED && !ns->params.zd_extension_size) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + zrasf = (dw13 >> 8) & 0xff; + if (zrasf > NVME_ZONE_REPORT_OFFLINE) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (data_size < sizeof(NvmeZoneReportHeader)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + status = nvme_check_mdts(n, data_size); + if (status) { + return status; + } + + partial = (dw13 >> 16) & 0x01; + + zone_entry_sz = sizeof(NvmeZoneDescr); + if (zra == NVME_ZONE_REPORT_EXTENDED) { + zone_entry_sz += ns->params.zd_extension_size; + } + + max_zones = (data_size - sizeof(NvmeZoneReportHeader)) / zone_entry_sz; + buf = g_malloc0(data_size); + + zone = &ns->zone_array[zone_idx]; + for (i = zone_idx; i < ns->num_zones; i++) { + if (partial && nr_zones >= max_zones) { + break; + } + if (nvme_zone_matches_filter(zrasf, zone++)) { + nr_zones++; + } + } + header = (NvmeZoneReportHeader *)buf; + header->nr_zones = cpu_to_le64(nr_zones); + + buf_p = buf + sizeof(NvmeZoneReportHeader); + for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) { + zone = &ns->zone_array[zone_idx]; + if (nvme_zone_matches_filter(zrasf, zone)) { + z = (NvmeZoneDescr *)buf_p; + buf_p += sizeof(NvmeZoneDescr); + + z->zt = zone->d.zt; + z->zs = zone->d.zs; + z->zcap = cpu_to_le64(zone->d.zcap); + z->zslba = cpu_to_le64(zone->d.zslba); + z->za = zone->d.za; + + if (nvme_wp_is_valid(zone)) { + z->wp = cpu_to_le64(zone->d.wp); + } else { + z->wp = cpu_to_le64(~0ULL); + } + + if (zra == NVME_ZONE_REPORT_EXTENDED) { + if (zone->d.za & NVME_ZA_ZD_EXT_VALID) { + memcpy(buf_p, nvme_get_zd_extension(ns, zone_idx), + ns->params.zd_extension_size); + } + buf_p += ns->params.zd_extension_size; + } + + max_zones--; + } + } + + status = nvme_c2h(n, (uint8_t *)buf, data_size, req); + + g_free(buf); + + return status; +} + +static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeNamespace *ns; + uint32_t nsid = le32_to_cpu(req->cmd.nsid); + + trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req), + req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode)); + + if (!nvme_nsid_valid(n, nsid)) { + return NVME_INVALID_NSID | NVME_DNR; + } + + /* + * In the base NVM command set, Flush may apply to all namespaces + * (indicated by NSID being set to FFFFFFFFh). But if that feature is used + * along with TP 4056 (Namespace Types), it may be pretty screwed up. + * + * If NSID is indeed set to FFFFFFFFh, we simply cannot associate the + * opcode with a specific command since we cannot determine a unique I/O + * command set. Opcode 0h could have any other meaning than something + * equivalent to flushing and say it DOES have completely different + * semantics in some other command set - does an NSID of FFFFFFFFh then + * mean "for all namespaces, apply whatever command set specific command + * that uses the 0h opcode?" Or does it mean "for all namespaces, apply + * whatever command that uses the 0h opcode if, and only if, it allows NSID + * to be FFFFFFFFh"? + * + * Anyway (and luckily), for now, we do not care about this since the + * device only supports namespace types that includes the NVM Flush command + * (NVM and Zoned), so always do an NVM Flush. + */ + if (req->cmd.opcode == NVME_CMD_FLUSH) { + return nvme_flush(n, req); + } + + ns = nvme_ns(n, nsid); + if (unlikely(!ns)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (!(ns->iocs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { + trace_pci_nvme_err_invalid_opc(req->cmd.opcode); + return NVME_INVALID_OPCODE | NVME_DNR; + } + + if (ns->status) { + return ns->status; + } + + if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { + return NVME_INVALID_FIELD; + } + + req->ns = ns; + + switch (req->cmd.opcode) { + case NVME_CMD_WRITE_ZEROES: + return nvme_write_zeroes(n, req); + case NVME_CMD_ZONE_APPEND: + return nvme_zone_append(n, req); + case NVME_CMD_WRITE: + return nvme_write(n, req); + case NVME_CMD_READ: + return nvme_read(n, req); + case NVME_CMD_COMPARE: + return nvme_compare(n, req); + case NVME_CMD_DSM: + return nvme_dsm(n, req); + case NVME_CMD_VERIFY: + return nvme_verify(n, req); + case NVME_CMD_COPY: + return nvme_copy(n, req); + case NVME_CMD_ZONE_MGMT_SEND: + return nvme_zone_mgmt_send(n, req); + case NVME_CMD_ZONE_MGMT_RECV: + return nvme_zone_mgmt_recv(n, req); + default: + assert(false); + } + + return NVME_INVALID_OPCODE | NVME_DNR; +} + +static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n) +{ + n->sq[sq->sqid] = NULL; + timer_free(sq->timer); + g_free(sq->io_req); + if (sq->sqid) { + g_free(sq); + } +} + +static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; + NvmeRequest *r, *next; + NvmeSQueue *sq; + NvmeCQueue *cq; + uint16_t qid = le16_to_cpu(c->qid); + + if (unlikely(!qid || nvme_check_sqid(n, qid))) { + trace_pci_nvme_err_invalid_del_sq(qid); + return NVME_INVALID_QID | NVME_DNR; + } + + trace_pci_nvme_del_sq(qid); + + sq = n->sq[qid]; + while (!QTAILQ_EMPTY(&sq->out_req_list)) { + r = QTAILQ_FIRST(&sq->out_req_list); + assert(r->aiocb); + blk_aio_cancel(r->aiocb); + } + + assert(QTAILQ_EMPTY(&sq->out_req_list)); + + if (!nvme_check_cqid(n, sq->cqid)) { + cq = n->cq[sq->cqid]; + QTAILQ_REMOVE(&cq->sq_list, sq, entry); + + nvme_post_cqes(cq); + QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) { + if (r->sq == sq) { + QTAILQ_REMOVE(&cq->req_list, r, entry); + QTAILQ_INSERT_TAIL(&sq->req_list, r, entry); + } + } + } + + nvme_free_sq(sq, n); + return NVME_SUCCESS; +} + +static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, + uint16_t sqid, uint16_t cqid, uint16_t size) +{ + int i; + NvmeCQueue *cq; + + sq->ctrl = n; + sq->dma_addr = dma_addr; + sq->sqid = sqid; + sq->size = size; + sq->cqid = cqid; + sq->head = sq->tail = 0; + sq->io_req = g_new0(NvmeRequest, sq->size); + + QTAILQ_INIT(&sq->req_list); + QTAILQ_INIT(&sq->out_req_list); + for (i = 0; i < sq->size; i++) { + sq->io_req[i].sq = sq; + QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); + } + sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq); + + assert(n->cq[cqid]); + cq = n->cq[cqid]; + QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry); + n->sq[sqid] = sq; +} + +static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeSQueue *sq; + NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd; + + uint16_t cqid = le16_to_cpu(c->cqid); + uint16_t sqid = le16_to_cpu(c->sqid); + uint16_t qsize = le16_to_cpu(c->qsize); + uint16_t qflags = le16_to_cpu(c->sq_flags); + uint64_t prp1 = le64_to_cpu(c->prp1); + + trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags); + + if (unlikely(!cqid || nvme_check_cqid(n, cqid))) { + trace_pci_nvme_err_invalid_create_sq_cqid(cqid); + return NVME_INVALID_CQID | NVME_DNR; + } + if (unlikely(!sqid || sqid > n->params.max_ioqpairs || + n->sq[sqid] != NULL)) { + trace_pci_nvme_err_invalid_create_sq_sqid(sqid); + return NVME_INVALID_QID | NVME_DNR; + } + if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) { + trace_pci_nvme_err_invalid_create_sq_size(qsize); + return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; + } + if (unlikely(prp1 & (n->page_size - 1))) { + trace_pci_nvme_err_invalid_create_sq_addr(prp1); + return NVME_INVALID_PRP_OFFSET | NVME_DNR; + } + if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) { + trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags)); + return NVME_INVALID_FIELD | NVME_DNR; + } + sq = g_malloc0(sizeof(*sq)); + nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1); + return NVME_SUCCESS; +} + +struct nvme_stats { + uint64_t units_read; + uint64_t units_written; + uint64_t read_commands; + uint64_t write_commands; +}; + +static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats) +{ + BlockAcctStats *s = blk_get_stats(ns->blkconf.blk); + + stats->units_read += s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS; + stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS; + stats->read_commands += s->nr_ops[BLOCK_ACCT_READ]; + stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE]; +} + +static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, + uint64_t off, NvmeRequest *req) +{ + uint32_t nsid = le32_to_cpu(req->cmd.nsid); + struct nvme_stats stats = { 0 }; + NvmeSmartLog smart = { 0 }; + uint32_t trans_len; + NvmeNamespace *ns; + time_t current_ms; + + if (off >= sizeof(smart)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (nsid != 0xffffffff) { + ns = nvme_ns(n, nsid); + if (!ns) { + return NVME_INVALID_NSID | NVME_DNR; + } + nvme_set_blk_stats(ns, &stats); + } else { + int i; + + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (!ns) { + continue; + } + nvme_set_blk_stats(ns, &stats); + } + } + + trans_len = MIN(sizeof(smart) - off, buf_len); + smart.critical_warning = n->smart_critical_warning; + + smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read, + 1000)); + smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written, + 1000)); + smart.host_read_commands[0] = cpu_to_le64(stats.read_commands); + smart.host_write_commands[0] = cpu_to_le64(stats.write_commands); + + smart.temperature = cpu_to_le16(n->temperature); + + if ((n->temperature >= n->features.temp_thresh_hi) || + (n->temperature <= n->features.temp_thresh_low)) { + smart.critical_warning |= NVME_SMART_TEMPERATURE; + } + + current_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + smart.power_on_hours[0] = + cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60); + + if (!rae) { + nvme_clear_events(n, NVME_AER_TYPE_SMART); + } + + return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req); +} + +static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off, + NvmeRequest *req) +{ + uint32_t trans_len; + NvmeFwSlotInfoLog fw_log = { + .afi = 0x1, + }; + + if (off >= sizeof(fw_log)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' '); + trans_len = MIN(sizeof(fw_log) - off, buf_len); + + return nvme_c2h(n, (uint8_t *) &fw_log + off, trans_len, req); +} + +static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, + uint64_t off, NvmeRequest *req) +{ + uint32_t trans_len; + NvmeErrorLog errlog; + + if (off >= sizeof(errlog)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (!rae) { + nvme_clear_events(n, NVME_AER_TYPE_ERROR); + } + + memset(&errlog, 0x0, sizeof(errlog)); + trans_len = MIN(sizeof(errlog) - off, buf_len); + + return nvme_c2h(n, (uint8_t *)&errlog, trans_len, req); +} + +static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, + uint64_t off, NvmeRequest *req) +{ + uint32_t nslist[1024]; + uint32_t trans_len; + int i = 0; + uint32_t nsid; + + if (off >= sizeof(nslist)) { + trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(nslist)); + return NVME_INVALID_FIELD | NVME_DNR; + } + + memset(nslist, 0x0, sizeof(nslist)); + trans_len = MIN(sizeof(nslist) - off, buf_len); + + while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) != + NVME_CHANGED_NSID_SIZE) { + /* + * If more than 1024 namespaces, the first entry in the log page should + * be set to FFFFFFFFh and the others to 0 as spec. + */ + if (i == ARRAY_SIZE(nslist)) { + memset(nslist, 0x0, sizeof(nslist)); + nslist[0] = 0xffffffff; + break; + } + + nslist[i++] = nsid; + clear_bit(nsid, n->changed_nsids); + } + + /* + * Remove all the remaining list entries in case returns directly due to + * more than 1024 namespaces. + */ + if (nslist[0] == 0xffffffff) { + bitmap_zero(n->changed_nsids, NVME_CHANGED_NSID_SIZE); + } + + if (!rae) { + nvme_clear_events(n, NVME_AER_TYPE_NOTICE); + } + + return nvme_c2h(n, ((uint8_t *)nslist) + off, trans_len, req); +} + +static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len, + uint64_t off, NvmeRequest *req) +{ + NvmeEffectsLog log = {}; + const uint32_t *src_iocs = NULL; + uint32_t trans_len; + + if (off >= sizeof(log)) { + trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(log)); + return NVME_INVALID_FIELD | NVME_DNR; + } + + switch (NVME_CC_CSS(ldl_le_p(&n->bar.cc))) { + case NVME_CC_CSS_NVM: + src_iocs = nvme_cse_iocs_nvm; + /* fall through */ + case NVME_CC_CSS_ADMIN_ONLY: + break; + case NVME_CC_CSS_CSI: + switch (csi) { + case NVME_CSI_NVM: + src_iocs = nvme_cse_iocs_nvm; + break; + case NVME_CSI_ZONED: + src_iocs = nvme_cse_iocs_zoned; + break; + } + } + + memcpy(log.acs, nvme_cse_acs, sizeof(nvme_cse_acs)); + + if (src_iocs) { + memcpy(log.iocs, src_iocs, sizeof(log.iocs)); + } + + trans_len = MIN(sizeof(log) - off, buf_len); + + return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req); +} + +static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCmd *cmd = &req->cmd; + + uint32_t dw10 = le32_to_cpu(cmd->cdw10); + uint32_t dw11 = le32_to_cpu(cmd->cdw11); + uint32_t dw12 = le32_to_cpu(cmd->cdw12); + uint32_t dw13 = le32_to_cpu(cmd->cdw13); + uint8_t lid = dw10 & 0xff; + uint8_t lsp = (dw10 >> 8) & 0xf; + uint8_t rae = (dw10 >> 15) & 0x1; + uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24; + uint32_t numdl, numdu; + uint64_t off, lpol, lpou; + size_t len; + uint16_t status; + + numdl = (dw10 >> 16); + numdu = (dw11 & 0xffff); + lpol = dw12; + lpou = dw13; + + len = (((numdu << 16) | numdl) + 1) << 2; + off = (lpou << 32ULL) | lpol; + + if (off & 0x3) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + trace_pci_nvme_get_log(nvme_cid(req), lid, lsp, rae, len, off); + + status = nvme_check_mdts(n, len); + if (status) { + return status; + } + + switch (lid) { + case NVME_LOG_ERROR_INFO: + return nvme_error_info(n, rae, len, off, req); + case NVME_LOG_SMART_INFO: + return nvme_smart_info(n, rae, len, off, req); + case NVME_LOG_FW_SLOT_INFO: + return nvme_fw_log_info(n, len, off, req); + case NVME_LOG_CHANGED_NSLIST: + return nvme_changed_nslist(n, rae, len, off, req); + case NVME_LOG_CMD_EFFECTS: + return nvme_cmd_effects(n, csi, len, off, req); + default: + trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid); + return NVME_INVALID_FIELD | NVME_DNR; + } +} + +static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n) +{ + n->cq[cq->cqid] = NULL; + timer_free(cq->timer); + if (msix_enabled(&n->parent_obj)) { + msix_vector_unuse(&n->parent_obj, cq->vector); + } + if (cq->cqid) { + g_free(cq); + } +} + +static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd; + NvmeCQueue *cq; + uint16_t qid = le16_to_cpu(c->qid); + + if (unlikely(!qid || nvme_check_cqid(n, qid))) { + trace_pci_nvme_err_invalid_del_cq_cqid(qid); + return NVME_INVALID_CQID | NVME_DNR; + } + + cq = n->cq[qid]; + if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) { + trace_pci_nvme_err_invalid_del_cq_notempty(qid); + return NVME_INVALID_QUEUE_DEL; + } + + if (cq->irq_enabled && cq->tail != cq->head) { + n->cq_pending--; + } + + nvme_irq_deassert(n, cq); + trace_pci_nvme_del_cq(qid); + nvme_free_cq(cq, n); + return NVME_SUCCESS; +} + +static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, + uint16_t cqid, uint16_t vector, uint16_t size, + uint16_t irq_enabled) +{ + int ret; + + if (msix_enabled(&n->parent_obj)) { + ret = msix_vector_use(&n->parent_obj, vector); + assert(ret == 0); + } + cq->ctrl = n; + cq->cqid = cqid; + cq->size = size; + cq->dma_addr = dma_addr; + cq->phase = 1; + cq->irq_enabled = irq_enabled; + cq->vector = vector; + cq->head = cq->tail = 0; + QTAILQ_INIT(&cq->req_list); + QTAILQ_INIT(&cq->sq_list); + n->cq[cqid] = cq; + cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq); +} + +static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCQueue *cq; + NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd; + uint16_t cqid = le16_to_cpu(c->cqid); + uint16_t vector = le16_to_cpu(c->irq_vector); + uint16_t qsize = le16_to_cpu(c->qsize); + uint16_t qflags = le16_to_cpu(c->cq_flags); + uint64_t prp1 = le64_to_cpu(c->prp1); + + trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags, + NVME_CQ_FLAGS_IEN(qflags) != 0); + + if (unlikely(!cqid || cqid > n->params.max_ioqpairs || + n->cq[cqid] != NULL)) { + trace_pci_nvme_err_invalid_create_cq_cqid(cqid); + return NVME_INVALID_QID | NVME_DNR; + } + if (unlikely(!qsize || qsize > NVME_CAP_MQES(ldq_le_p(&n->bar.cap)))) { + trace_pci_nvme_err_invalid_create_cq_size(qsize); + return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; + } + if (unlikely(prp1 & (n->page_size - 1))) { + trace_pci_nvme_err_invalid_create_cq_addr(prp1); + return NVME_INVALID_PRP_OFFSET | NVME_DNR; + } + if (unlikely(!msix_enabled(&n->parent_obj) && vector)) { + trace_pci_nvme_err_invalid_create_cq_vector(vector); + return NVME_INVALID_IRQ_VECTOR | NVME_DNR; + } + if (unlikely(vector >= n->params.msix_qsize)) { + trace_pci_nvme_err_invalid_create_cq_vector(vector); + return NVME_INVALID_IRQ_VECTOR | NVME_DNR; + } + if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) { + trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags)); + return NVME_INVALID_FIELD | NVME_DNR; + } + + cq = g_malloc0(sizeof(*cq)); + nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1, + NVME_CQ_FLAGS_IEN(qflags)); + + /* + * It is only required to set qs_created when creating a completion queue; + * creating a submission queue without a matching completion queue will + * fail. + */ + n->qs_created = true; + return NVME_SUCCESS; +} + +static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest *req) +{ + uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {}; + + return nvme_c2h(n, id, sizeof(id), req); +} + +static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req) +{ + trace_pci_nvme_identify_ctrl(); + + return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req); +} + +static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {}; + NvmeIdCtrlNvm *id_nvm = (NvmeIdCtrlNvm *)&id; + + trace_pci_nvme_identify_ctrl_csi(c->csi); + + switch (c->csi) { + case NVME_CSI_NVM: + id_nvm->vsl = n->params.vsl; + id_nvm->dmrsl = cpu_to_le32(n->dmrsl); + break; + + case NVME_CSI_ZONED: + ((NvmeIdCtrlZoned *)&id)->zasl = n->params.zasl; + break; + + default: + return NVME_INVALID_FIELD | NVME_DNR; + } + + return nvme_c2h(n, id, sizeof(id), req); +} + +static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active) +{ + NvmeNamespace *ns; + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + uint32_t nsid = le32_to_cpu(c->nsid); + + trace_pci_nvme_identify_ns(nsid); + + if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { + return NVME_INVALID_NSID | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (unlikely(!ns)) { + if (!active) { + ns = nvme_subsys_ns(n->subsys, nsid); + if (!ns) { + return nvme_rpt_empty_id_struct(n, req); + } + } else { + return nvme_rpt_empty_id_struct(n, req); + } + } + + if (active || ns->csi == NVME_CSI_NVM) { + return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req); + } + + return NVME_INVALID_CMD_SET | NVME_DNR; +} + +static uint16_t nvme_identify_ctrl_list(NvmeCtrl *n, NvmeRequest *req, + bool attached) +{ + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + uint32_t nsid = le32_to_cpu(c->nsid); + uint16_t min_id = le16_to_cpu(c->ctrlid); + uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {}; + uint16_t *ids = &list[1]; + NvmeNamespace *ns; + NvmeCtrl *ctrl; + int cntlid, nr_ids = 0; + + trace_pci_nvme_identify_ctrl_list(c->cns, min_id); + + if (!n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (attached) { + if (nsid == NVME_NSID_BROADCAST) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ns = nvme_subsys_ns(n->subsys, nsid); + if (!ns) { + return NVME_INVALID_FIELD | NVME_DNR; + } + } + + for (cntlid = min_id; cntlid < ARRAY_SIZE(n->subsys->ctrls); cntlid++) { + ctrl = nvme_subsys_ctrl(n->subsys, cntlid); + if (!ctrl) { + continue; + } + + if (attached && !nvme_ns(ctrl, nsid)) { + continue; + } + + ids[nr_ids++] = cntlid; + } + + list[0] = nr_ids; + + return nvme_c2h(n, (uint8_t *)list, sizeof(list), req); +} + +static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, + bool active) +{ + NvmeNamespace *ns; + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + uint32_t nsid = le32_to_cpu(c->nsid); + + trace_pci_nvme_identify_ns_csi(nsid, c->csi); + + if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { + return NVME_INVALID_NSID | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (unlikely(!ns)) { + if (!active) { + ns = nvme_subsys_ns(n->subsys, nsid); + if (!ns) { + return nvme_rpt_empty_id_struct(n, req); + } + } else { + return nvme_rpt_empty_id_struct(n, req); + } + } + + if (c->csi == NVME_CSI_NVM) { + return nvme_rpt_empty_id_struct(n, req); + } else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) { + return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned), + req); + } + + return NVME_INVALID_FIELD | NVME_DNR; +} + +static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req, + bool active) +{ + NvmeNamespace *ns; + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + uint32_t min_nsid = le32_to_cpu(c->nsid); + uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; + static const int data_len = sizeof(list); + uint32_t *list_ptr = (uint32_t *)list; + int i, j = 0; + + trace_pci_nvme_identify_nslist(min_nsid); + + /* + * Both FFFFFFFFh (NVME_NSID_BROADCAST) and FFFFFFFFEh are invalid values + * since the Active Namespace ID List should return namespaces with ids + * *higher* than the NSID specified in the command. This is also specified + * in the spec (NVM Express v1.3d, Section 5.15.4). + */ + if (min_nsid >= NVME_NSID_BROADCAST - 1) { + return NVME_INVALID_NSID | NVME_DNR; + } + + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (!ns) { + if (!active) { + ns = nvme_subsys_ns(n->subsys, i); + if (!ns) { + continue; + } + } else { + continue; + } + } + if (ns->params.nsid <= min_nsid) { + continue; + } + list_ptr[j++] = cpu_to_le32(ns->params.nsid); + if (j == data_len / sizeof(uint32_t)) { + break; + } + } + + return nvme_c2h(n, list, data_len, req); +} + +static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req, + bool active) +{ + NvmeNamespace *ns; + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + uint32_t min_nsid = le32_to_cpu(c->nsid); + uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; + static const int data_len = sizeof(list); + uint32_t *list_ptr = (uint32_t *)list; + int i, j = 0; + + trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi); + + /* + * Same as in nvme_identify_nslist(), FFFFFFFFh/FFFFFFFFEh are invalid. + */ + if (min_nsid >= NVME_NSID_BROADCAST - 1) { + return NVME_INVALID_NSID | NVME_DNR; + } + + if (c->csi != NVME_CSI_NVM && c->csi != NVME_CSI_ZONED) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (!ns) { + if (!active) { + ns = nvme_subsys_ns(n->subsys, i); + if (!ns) { + continue; + } + } else { + continue; + } + } + if (ns->params.nsid <= min_nsid || c->csi != ns->csi) { + continue; + } + list_ptr[j++] = cpu_to_le32(ns->params.nsid); + if (j == data_len / sizeof(uint32_t)) { + break; + } + } + + return nvme_c2h(n, list, data_len, req); +} + +static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeNamespace *ns; + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + uint32_t nsid = le32_to_cpu(c->nsid); + uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; + uint8_t *pos = list; + struct { + NvmeIdNsDescr hdr; + uint8_t v[NVME_NIDL_UUID]; + } QEMU_PACKED uuid = {}; + struct { + NvmeIdNsDescr hdr; + uint64_t v; + } QEMU_PACKED eui64 = {}; + struct { + NvmeIdNsDescr hdr; + uint8_t v; + } QEMU_PACKED csi = {}; + + trace_pci_nvme_identify_ns_descr_list(nsid); + + if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { + return NVME_INVALID_NSID | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (unlikely(!ns)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + /* + * If the EUI-64 field is 0 and the NGUID field is 0, the namespace must + * provide a valid Namespace UUID in the Namespace Identification Descriptor + * data structure. QEMU does not yet support setting NGUID. + */ + uuid.hdr.nidt = NVME_NIDT_UUID; + uuid.hdr.nidl = NVME_NIDL_UUID; + memcpy(uuid.v, ns->params.uuid.data, NVME_NIDL_UUID); + memcpy(pos, &uuid, sizeof(uuid)); + pos += sizeof(uuid); + + if (ns->params.eui64) { + eui64.hdr.nidt = NVME_NIDT_EUI64; + eui64.hdr.nidl = NVME_NIDL_EUI64; + eui64.v = cpu_to_be64(ns->params.eui64); + memcpy(pos, &eui64, sizeof(eui64)); + pos += sizeof(eui64); + } + + csi.hdr.nidt = NVME_NIDT_CSI; + csi.hdr.nidl = NVME_NIDL_CSI; + csi.v = ns->csi; + memcpy(pos, &csi, sizeof(csi)); + pos += sizeof(csi); + + return nvme_c2h(n, list, sizeof(list), req); +} + +static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req) +{ + uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {}; + static const int data_len = sizeof(list); + + trace_pci_nvme_identify_cmd_set(); + + NVME_SET_CSI(*list, NVME_CSI_NVM); + NVME_SET_CSI(*list, NVME_CSI_ZONED); + + return nvme_c2h(n, list, data_len, req); +} + +static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + + trace_pci_nvme_identify(nvme_cid(req), c->cns, le16_to_cpu(c->ctrlid), + c->csi); + + switch (c->cns) { + case NVME_ID_CNS_NS: + return nvme_identify_ns(n, req, true); + case NVME_ID_CNS_NS_PRESENT: + return nvme_identify_ns(n, req, false); + case NVME_ID_CNS_NS_ATTACHED_CTRL_LIST: + return nvme_identify_ctrl_list(n, req, true); + case NVME_ID_CNS_CTRL_LIST: + return nvme_identify_ctrl_list(n, req, false); + case NVME_ID_CNS_CS_NS: + return nvme_identify_ns_csi(n, req, true); + case NVME_ID_CNS_CS_NS_PRESENT: + return nvme_identify_ns_csi(n, req, false); + case NVME_ID_CNS_CTRL: + return nvme_identify_ctrl(n, req); + case NVME_ID_CNS_CS_CTRL: + return nvme_identify_ctrl_csi(n, req); + case NVME_ID_CNS_NS_ACTIVE_LIST: + return nvme_identify_nslist(n, req, true); + case NVME_ID_CNS_NS_PRESENT_LIST: + return nvme_identify_nslist(n, req, false); + case NVME_ID_CNS_CS_NS_ACTIVE_LIST: + return nvme_identify_nslist_csi(n, req, true); + case NVME_ID_CNS_CS_NS_PRESENT_LIST: + return nvme_identify_nslist_csi(n, req, false); + case NVME_ID_CNS_NS_DESCR_LIST: + return nvme_identify_ns_descr_list(n, req); + case NVME_ID_CNS_IO_COMMAND_SET: + return nvme_identify_cmd_set(n, req); + default: + trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns)); + return NVME_INVALID_FIELD | NVME_DNR; + } +} + +static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req) +{ + uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff; + + req->cqe.result = 1; + if (nvme_check_sqid(n, sqid)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + return NVME_SUCCESS; +} + +static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts) +{ + trace_pci_nvme_setfeat_timestamp(ts); + + n->host_timestamp = le64_to_cpu(ts); + n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); +} + +static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n) +{ + uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms; + + union nvme_timestamp { + struct { + uint64_t timestamp:48; + uint64_t sync:1; + uint64_t origin:3; + uint64_t rsvd1:12; + }; + uint64_t all; + }; + + union nvme_timestamp ts; + ts.all = 0; + ts.timestamp = n->host_timestamp + elapsed_time; + + /* If the host timestamp is non-zero, set the timestamp origin */ + ts.origin = n->host_timestamp ? 0x01 : 0x00; + + trace_pci_nvme_getfeat_timestamp(ts.all); + + return cpu_to_le64(ts.all); +} + +static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) +{ + uint64_t timestamp = nvme_get_timestamp(n); + + return nvme_c2h(n, (uint8_t *)×tamp, sizeof(timestamp), req); +} + +static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCmd *cmd = &req->cmd; + uint32_t dw10 = le32_to_cpu(cmd->cdw10); + uint32_t dw11 = le32_to_cpu(cmd->cdw11); + uint32_t nsid = le32_to_cpu(cmd->nsid); + uint32_t result; + uint8_t fid = NVME_GETSETFEAT_FID(dw10); + NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10); + uint16_t iv; + NvmeNamespace *ns; + int i; + + static const uint32_t nvme_feature_default[NVME_FID_MAX] = { + [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT, + }; + + trace_pci_nvme_getfeat(nvme_cid(req), nsid, fid, sel, dw11); + + if (!nvme_feature_support[fid]) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { + if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { + /* + * The Reservation Notification Mask and Reservation Persistence + * features require a status code of Invalid Field in Command when + * NSID is FFFFFFFFh. Since the device does not support those + * features we can always return Invalid Namespace or Format as we + * should do for all other features. + */ + return NVME_INVALID_NSID | NVME_DNR; + } + + if (!nvme_ns(n, nsid)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + } + + switch (sel) { + case NVME_GETFEAT_SELECT_CURRENT: + break; + case NVME_GETFEAT_SELECT_SAVED: + /* no features are saveable by the controller; fallthrough */ + case NVME_GETFEAT_SELECT_DEFAULT: + goto defaults; + case NVME_GETFEAT_SELECT_CAP: + result = nvme_feature_cap[fid]; + goto out; + } + + switch (fid) { + case NVME_TEMPERATURE_THRESHOLD: + result = 0; + + /* + * The controller only implements the Composite Temperature sensor, so + * return 0 for all other sensors. + */ + if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { + goto out; + } + + switch (NVME_TEMP_THSEL(dw11)) { + case NVME_TEMP_THSEL_OVER: + result = n->features.temp_thresh_hi; + goto out; + case NVME_TEMP_THSEL_UNDER: + result = n->features.temp_thresh_low; + goto out; + } + + return NVME_INVALID_FIELD | NVME_DNR; + case NVME_ERROR_RECOVERY: + if (!nvme_nsid_valid(n, nsid)) { + return NVME_INVALID_NSID | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (unlikely(!ns)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + result = ns->features.err_rec; + goto out; + case NVME_VOLATILE_WRITE_CACHE: + result = 0; + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (!ns) { + continue; + } + + result = blk_enable_write_cache(ns->blkconf.blk); + if (result) { + break; + } + } + trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled"); + goto out; + case NVME_ASYNCHRONOUS_EVENT_CONF: + result = n->features.async_config; + goto out; + case NVME_TIMESTAMP: + return nvme_get_feature_timestamp(n, req); + default: + break; + } + +defaults: + switch (fid) { + case NVME_TEMPERATURE_THRESHOLD: + result = 0; + + if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { + break; + } + + if (NVME_TEMP_THSEL(dw11) == NVME_TEMP_THSEL_OVER) { + result = NVME_TEMPERATURE_WARNING; + } + + break; + case NVME_NUMBER_OF_QUEUES: + result = (n->params.max_ioqpairs - 1) | + ((n->params.max_ioqpairs - 1) << 16); + trace_pci_nvme_getfeat_numq(result); + break; + case NVME_INTERRUPT_VECTOR_CONF: + iv = dw11 & 0xffff; + if (iv >= n->params.max_ioqpairs + 1) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + result = iv; + if (iv == n->admin_cq.vector) { + result |= NVME_INTVC_NOCOALESCING; + } + break; + default: + result = nvme_feature_default[fid]; + break; + } + +out: + req->cqe.result = cpu_to_le32(result); + return NVME_SUCCESS; +} + +static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) +{ + uint16_t ret; + uint64_t timestamp; + + ret = nvme_h2c(n, (uint8_t *)×tamp, sizeof(timestamp), req); + if (ret) { + return ret; + } + + nvme_set_timestamp(n, timestamp); + + return NVME_SUCCESS; +} + +static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeNamespace *ns = NULL; + + NvmeCmd *cmd = &req->cmd; + uint32_t dw10 = le32_to_cpu(cmd->cdw10); + uint32_t dw11 = le32_to_cpu(cmd->cdw11); + uint32_t nsid = le32_to_cpu(cmd->nsid); + uint8_t fid = NVME_GETSETFEAT_FID(dw10); + uint8_t save = NVME_SETFEAT_SAVE(dw10); + int i; + + trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11); + + if (save && !(nvme_feature_cap[fid] & NVME_FEAT_CAP_SAVE)) { + return NVME_FID_NOT_SAVEABLE | NVME_DNR; + } + + if (!nvme_feature_support[fid]) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) { + if (nsid != NVME_NSID_BROADCAST) { + if (!nvme_nsid_valid(n, nsid)) { + return NVME_INVALID_NSID | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (unlikely(!ns)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + } + } else if (nsid && nsid != NVME_NSID_BROADCAST) { + if (!nvme_nsid_valid(n, nsid)) { + return NVME_INVALID_NSID | NVME_DNR; + } + + return NVME_FEAT_NOT_NS_SPEC | NVME_DNR; + } + + if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) { + return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; + } + + switch (fid) { + case NVME_TEMPERATURE_THRESHOLD: + if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) { + break; + } + + switch (NVME_TEMP_THSEL(dw11)) { + case NVME_TEMP_THSEL_OVER: + n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11); + break; + case NVME_TEMP_THSEL_UNDER: + n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11); + break; + default: + return NVME_INVALID_FIELD | NVME_DNR; + } + + if ((n->temperature >= n->features.temp_thresh_hi) || + (n->temperature <= n->features.temp_thresh_low)) { + nvme_smart_event(n, NVME_AER_INFO_SMART_TEMP_THRESH); + } + + break; + case NVME_ERROR_RECOVERY: + if (nsid == NVME_NSID_BROADCAST) { + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + + if (!ns) { + continue; + } + + if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { + ns->features.err_rec = dw11; + } + } + + break; + } + + assert(ns); + if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) { + ns->features.err_rec = dw11; + } + break; + case NVME_VOLATILE_WRITE_CACHE: + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (!ns) { + continue; + } + + if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) { + blk_flush(ns->blkconf.blk); + } + + blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1); + } + + break; + + case NVME_NUMBER_OF_QUEUES: + if (n->qs_created) { + return NVME_CMD_SEQ_ERROR | NVME_DNR; + } + + /* + * NVMe v1.3, Section 5.21.1.7: FFFFh is not an allowed value for NCQR + * and NSQR. + */ + if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + trace_pci_nvme_setfeat_numq((dw11 & 0xffff) + 1, + ((dw11 >> 16) & 0xffff) + 1, + n->params.max_ioqpairs, + n->params.max_ioqpairs); + req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) | + ((n->params.max_ioqpairs - 1) << 16)); + break; + case NVME_ASYNCHRONOUS_EVENT_CONF: + n->features.async_config = dw11; + break; + case NVME_TIMESTAMP: + return nvme_set_feature_timestamp(n, req); + case NVME_COMMAND_SET_PROFILE: + if (dw11 & 0x1ff) { + trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff); + return NVME_CMD_SET_CMB_REJECTED | NVME_DNR; + } + break; + default: + return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; + } + return NVME_SUCCESS; +} + +static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req) +{ + trace_pci_nvme_aer(nvme_cid(req)); + + if (n->outstanding_aers > n->params.aerl) { + trace_pci_nvme_aer_aerl_exceeded(); + return NVME_AER_LIMIT_EXCEEDED; + } + + n->aer_reqs[n->outstanding_aers] = req; + n->outstanding_aers++; + + if (!QTAILQ_EMPTY(&n->aer_queue)) { + nvme_process_aers(n); + } + + return NVME_NO_COMPLETE; +} + +static void nvme_update_dmrsl(NvmeCtrl *n) +{ + int nsid; + + for (nsid = 1; nsid <= NVME_MAX_NAMESPACES; nsid++) { + NvmeNamespace *ns = nvme_ns(n, nsid); + if (!ns) { + continue; + } + + n->dmrsl = MIN_NON_ZERO(n->dmrsl, + BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); + } +} + +static void nvme_select_iocs_ns(NvmeCtrl *n, NvmeNamespace *ns) +{ + uint32_t cc = ldl_le_p(&n->bar.cc); + + ns->iocs = nvme_cse_iocs_none; + switch (ns->csi) { + case NVME_CSI_NVM: + if (NVME_CC_CSS(cc) != NVME_CC_CSS_ADMIN_ONLY) { + ns->iocs = nvme_cse_iocs_nvm; + } + break; + case NVME_CSI_ZONED: + if (NVME_CC_CSS(cc) == NVME_CC_CSS_CSI) { + ns->iocs = nvme_cse_iocs_zoned; + } else if (NVME_CC_CSS(cc) == NVME_CC_CSS_NVM) { + ns->iocs = nvme_cse_iocs_nvm; + } + break; + } +} + +static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeNamespace *ns; + NvmeCtrl *ctrl; + uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {}; + uint32_t nsid = le32_to_cpu(req->cmd.nsid); + uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); + uint8_t sel = dw10 & 0xf; + uint16_t *nr_ids = &list[0]; + uint16_t *ids = &list[1]; + uint16_t ret; + int i; + + trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf); + + if (!nvme_nsid_valid(n, nsid)) { + return NVME_INVALID_NSID | NVME_DNR; + } + + ns = nvme_subsys_ns(n->subsys, nsid); + if (!ns) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ret = nvme_h2c(n, (uint8_t *)list, 4096, req); + if (ret) { + return ret; + } + + if (!*nr_ids) { + return NVME_NS_CTRL_LIST_INVALID | NVME_DNR; + } + + *nr_ids = MIN(*nr_ids, NVME_CONTROLLER_LIST_SIZE - 1); + for (i = 0; i < *nr_ids; i++) { + ctrl = nvme_subsys_ctrl(n->subsys, ids[i]); + if (!ctrl) { + return NVME_NS_CTRL_LIST_INVALID | NVME_DNR; + } + + switch (sel) { + case NVME_NS_ATTACHMENT_ATTACH: + if (nvme_ns(ctrl, nsid)) { + return NVME_NS_ALREADY_ATTACHED | NVME_DNR; + } + + if (ns->attached && !ns->params.shared) { + return NVME_NS_PRIVATE | NVME_DNR; + } + + nvme_attach_ns(ctrl, ns); + nvme_select_iocs_ns(ctrl, ns); + + break; + + case NVME_NS_ATTACHMENT_DETACH: + if (!nvme_ns(ctrl, nsid)) { + return NVME_NS_NOT_ATTACHED | NVME_DNR; + } + + ctrl->namespaces[nsid] = NULL; + ns->attached--; + + nvme_update_dmrsl(ctrl); + + break; + + default: + return NVME_INVALID_FIELD | NVME_DNR; + } + + /* + * Add namespace id to the changed namespace id list for event clearing + * via Get Log Page command. + */ + if (!test_and_set_bit(nsid, ctrl->changed_nsids)) { + nvme_enqueue_event(ctrl, NVME_AER_TYPE_NOTICE, + NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED, + NVME_LOG_CHANGED_NSLIST); + } + } + + return NVME_SUCCESS; +} + +typedef struct NvmeFormatAIOCB { + BlockAIOCB common; + BlockAIOCB *aiocb; + QEMUBH *bh; + NvmeRequest *req; + int ret; + + NvmeNamespace *ns; + uint32_t nsid; + bool broadcast; + int64_t offset; +} NvmeFormatAIOCB; + +static void nvme_format_bh(void *opaque); + +static void nvme_format_cancel(BlockAIOCB *aiocb) +{ + NvmeFormatAIOCB *iocb = container_of(aiocb, NvmeFormatAIOCB, common); + + if (iocb->aiocb) { + blk_aio_cancel_async(iocb->aiocb); + } +} + +static const AIOCBInfo nvme_format_aiocb_info = { + .aiocb_size = sizeof(NvmeFormatAIOCB), + .cancel_async = nvme_format_cancel, + .get_aio_context = nvme_get_aio_context, +}; + +static void nvme_format_set(NvmeNamespace *ns, NvmeCmd *cmd) +{ + uint32_t dw10 = le32_to_cpu(cmd->cdw10); + uint8_t lbaf = dw10 & 0xf; + uint8_t pi = (dw10 >> 5) & 0x7; + uint8_t mset = (dw10 >> 4) & 0x1; + uint8_t pil = (dw10 >> 8) & 0x1; + + trace_pci_nvme_format_set(ns->params.nsid, lbaf, mset, pi, pil); + + ns->id_ns.dps = (pil << 3) | pi; + ns->id_ns.flbas = lbaf | (mset << 4); + + nvme_ns_init_format(ns); +} + +static void nvme_format_ns_cb(void *opaque, int ret) +{ + NvmeFormatAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeNamespace *ns = iocb->ns; + int bytes; + + if (ret < 0) { + iocb->ret = ret; + goto done; + } + + assert(ns); + + if (iocb->offset < ns->size) { + bytes = MIN(BDRV_REQUEST_MAX_BYTES, ns->size - iocb->offset); + + iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, iocb->offset, + bytes, BDRV_REQ_MAY_UNMAP, + nvme_format_ns_cb, iocb); + + iocb->offset += bytes; + return; + } + + nvme_format_set(ns, &req->cmd); + ns->status = 0x0; + iocb->ns = NULL; + iocb->offset = 0; + +done: + iocb->aiocb = NULL; + qemu_bh_schedule(iocb->bh); +} + +static uint16_t nvme_format_check(NvmeNamespace *ns, uint8_t lbaf, uint8_t pi) +{ + if (ns->params.zoned) { + return NVME_INVALID_FORMAT | NVME_DNR; + } + + if (lbaf > ns->id_ns.nlbaf) { + return NVME_INVALID_FORMAT | NVME_DNR; + } + + if (pi && (ns->id_ns.lbaf[lbaf].ms < sizeof(NvmeDifTuple))) { + return NVME_INVALID_FORMAT | NVME_DNR; + } + + if (pi && pi > NVME_ID_NS_DPS_TYPE_3) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + return NVME_SUCCESS; +} + +static void nvme_format_bh(void *opaque) +{ + NvmeFormatAIOCB *iocb = opaque; + NvmeRequest *req = iocb->req; + NvmeCtrl *n = nvme_ctrl(req); + uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); + uint8_t lbaf = dw10 & 0xf; + uint8_t pi = (dw10 >> 5) & 0x7; + uint16_t status; + int i; + + if (iocb->ret < 0) { + goto done; + } + + if (iocb->broadcast) { + for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { + iocb->ns = nvme_ns(n, i); + if (iocb->ns) { + iocb->nsid = i; + break; + } + } + } + + if (!iocb->ns) { + goto done; + } + + status = nvme_format_check(iocb->ns, lbaf, pi); + if (status) { + req->status = status; + goto done; + } + + iocb->ns->status = NVME_FORMAT_IN_PROGRESS; + nvme_format_ns_cb(iocb, 0); + return; + +done: + qemu_bh_delete(iocb->bh); + iocb->bh = NULL; + + iocb->common.cb(iocb->common.opaque, iocb->ret); + + qemu_aio_unref(iocb); +} + +static uint16_t nvme_format(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeFormatAIOCB *iocb; + uint32_t nsid = le32_to_cpu(req->cmd.nsid); + uint16_t status; + + iocb = qemu_aio_get(&nvme_format_aiocb_info, NULL, nvme_misc_cb, req); + + iocb->req = req; + iocb->bh = qemu_bh_new(nvme_format_bh, iocb); + iocb->ret = 0; + iocb->ns = NULL; + iocb->nsid = 0; + iocb->broadcast = (nsid == NVME_NSID_BROADCAST); + iocb->offset = 0; + + if (!iocb->broadcast) { + if (!nvme_nsid_valid(n, nsid)) { + status = NVME_INVALID_NSID | NVME_DNR; + goto out; + } + + iocb->ns = nvme_ns(n, nsid); + if (!iocb->ns) { + status = NVME_INVALID_FIELD | NVME_DNR; + goto out; + } + } + + req->aiocb = &iocb->common; + qemu_bh_schedule(iocb->bh); + + return NVME_NO_COMPLETE; + +out: + qemu_bh_delete(iocb->bh); + iocb->bh = NULL; + qemu_aio_unref(iocb); + return status; +} + +static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) +{ + trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode, + nvme_adm_opc_str(req->cmd.opcode)); + + if (!(nvme_cse_acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) { + trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode); + return NVME_INVALID_OPCODE | NVME_DNR; + } + + /* SGLs shall not be used for Admin commands in NVMe over PCIe */ + if (NVME_CMD_FLAGS_PSDT(req->cmd.flags) != NVME_PSDT_PRP) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) { + return NVME_INVALID_FIELD; + } + + switch (req->cmd.opcode) { + case NVME_ADM_CMD_DELETE_SQ: + return nvme_del_sq(n, req); + case NVME_ADM_CMD_CREATE_SQ: + return nvme_create_sq(n, req); + case NVME_ADM_CMD_GET_LOG_PAGE: + return nvme_get_log(n, req); + case NVME_ADM_CMD_DELETE_CQ: + return nvme_del_cq(n, req); + case NVME_ADM_CMD_CREATE_CQ: + return nvme_create_cq(n, req); + case NVME_ADM_CMD_IDENTIFY: + return nvme_identify(n, req); + case NVME_ADM_CMD_ABORT: + return nvme_abort(n, req); + case NVME_ADM_CMD_SET_FEATURES: + return nvme_set_feature(n, req); + case NVME_ADM_CMD_GET_FEATURES: + return nvme_get_feature(n, req); + case NVME_ADM_CMD_ASYNC_EV_REQ: + return nvme_aer(n, req); + case NVME_ADM_CMD_NS_ATTACHMENT: + return nvme_ns_attachment(n, req); + case NVME_ADM_CMD_FORMAT_NVM: + return nvme_format(n, req); + default: + assert(false); + } + + return NVME_INVALID_OPCODE | NVME_DNR; +} + +static void nvme_process_sq(void *opaque) +{ + NvmeSQueue *sq = opaque; + NvmeCtrl *n = sq->ctrl; + NvmeCQueue *cq = n->cq[sq->cqid]; + + uint16_t status; + hwaddr addr; + NvmeCmd cmd; + NvmeRequest *req; + + while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) { + addr = sq->dma_addr + sq->head * n->sqe_size; + if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) { + trace_pci_nvme_err_addr_read(addr); + trace_pci_nvme_err_cfs(); + stl_le_p(&n->bar.csts, NVME_CSTS_FAILED); + break; + } + nvme_inc_sq_head(sq); + + req = QTAILQ_FIRST(&sq->req_list); + QTAILQ_REMOVE(&sq->req_list, req, entry); + QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry); + nvme_req_clear(req); + req->cqe.cid = cmd.cid; + memcpy(&req->cmd, &cmd, sizeof(NvmeCmd)); + + status = sq->sqid ? nvme_io_cmd(n, req) : + nvme_admin_cmd(n, req); + if (status != NVME_NO_COMPLETE) { + req->status = status; + nvme_enqueue_req_completion(cq, req); + } + } +} + +static void nvme_ctrl_reset(NvmeCtrl *n) +{ + NvmeNamespace *ns; + int i; + + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (!ns) { + continue; + } + + nvme_ns_drain(ns); + } + + for (i = 0; i < n->params.max_ioqpairs + 1; i++) { + if (n->sq[i] != NULL) { + nvme_free_sq(n->sq[i], n); + } + } + for (i = 0; i < n->params.max_ioqpairs + 1; i++) { + if (n->cq[i] != NULL) { + nvme_free_cq(n->cq[i], n); + } + } + + while (!QTAILQ_EMPTY(&n->aer_queue)) { + NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue); + QTAILQ_REMOVE(&n->aer_queue, event, entry); + g_free(event); + } + + n->aer_queued = 0; + n->outstanding_aers = 0; + n->qs_created = false; +} + +static void nvme_ctrl_shutdown(NvmeCtrl *n) +{ + NvmeNamespace *ns; + int i; + + if (n->pmr.dev) { + memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); + } + + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (!ns) { + continue; + } + + nvme_ns_shutdown(ns); + } +} + +static void nvme_select_iocs(NvmeCtrl *n) +{ + NvmeNamespace *ns; + int i; + + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (!ns) { + continue; + } + + nvme_select_iocs_ns(n, ns); + } +} + +static int nvme_start_ctrl(NvmeCtrl *n) +{ + uint64_t cap = ldq_le_p(&n->bar.cap); + uint32_t cc = ldl_le_p(&n->bar.cc); + uint32_t aqa = ldl_le_p(&n->bar.aqa); + uint64_t asq = ldq_le_p(&n->bar.asq); + uint64_t acq = ldq_le_p(&n->bar.acq); + uint32_t page_bits = NVME_CC_MPS(cc) + 12; + uint32_t page_size = 1 << page_bits; + + if (unlikely(n->cq[0])) { + trace_pci_nvme_err_startfail_cq(); + return -1; + } + if (unlikely(n->sq[0])) { + trace_pci_nvme_err_startfail_sq(); + return -1; + } + if (unlikely(asq & (page_size - 1))) { + trace_pci_nvme_err_startfail_asq_misaligned(asq); + return -1; + } + if (unlikely(acq & (page_size - 1))) { + trace_pci_nvme_err_startfail_acq_misaligned(acq); + return -1; + } + if (unlikely(!(NVME_CAP_CSS(cap) & (1 << NVME_CC_CSS(cc))))) { + trace_pci_nvme_err_startfail_css(NVME_CC_CSS(cc)); + return -1; + } + if (unlikely(NVME_CC_MPS(cc) < NVME_CAP_MPSMIN(cap))) { + trace_pci_nvme_err_startfail_page_too_small( + NVME_CC_MPS(cc), + NVME_CAP_MPSMIN(cap)); + return -1; + } + if (unlikely(NVME_CC_MPS(cc) > + NVME_CAP_MPSMAX(cap))) { + trace_pci_nvme_err_startfail_page_too_large( + NVME_CC_MPS(cc), + NVME_CAP_MPSMAX(cap)); + return -1; + } + if (unlikely(NVME_CC_IOCQES(cc) < + NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) { + trace_pci_nvme_err_startfail_cqent_too_small( + NVME_CC_IOCQES(cc), + NVME_CTRL_CQES_MIN(cap)); + return -1; + } + if (unlikely(NVME_CC_IOCQES(cc) > + NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) { + trace_pci_nvme_err_startfail_cqent_too_large( + NVME_CC_IOCQES(cc), + NVME_CTRL_CQES_MAX(cap)); + return -1; + } + if (unlikely(NVME_CC_IOSQES(cc) < + NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) { + trace_pci_nvme_err_startfail_sqent_too_small( + NVME_CC_IOSQES(cc), + NVME_CTRL_SQES_MIN(cap)); + return -1; + } + if (unlikely(NVME_CC_IOSQES(cc) > + NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) { + trace_pci_nvme_err_startfail_sqent_too_large( + NVME_CC_IOSQES(cc), + NVME_CTRL_SQES_MAX(cap)); + return -1; + } + if (unlikely(!NVME_AQA_ASQS(aqa))) { + trace_pci_nvme_err_startfail_asqent_sz_zero(); + return -1; + } + if (unlikely(!NVME_AQA_ACQS(aqa))) { + trace_pci_nvme_err_startfail_acqent_sz_zero(); + return -1; + } + + n->page_bits = page_bits; + n->page_size = page_size; + n->max_prp_ents = n->page_size / sizeof(uint64_t); + n->cqe_size = 1 << NVME_CC_IOCQES(cc); + n->sqe_size = 1 << NVME_CC_IOSQES(cc); + nvme_init_cq(&n->admin_cq, n, acq, 0, 0, NVME_AQA_ACQS(aqa) + 1, 1); + nvme_init_sq(&n->admin_sq, n, asq, 0, 0, NVME_AQA_ASQS(aqa) + 1); + + nvme_set_timestamp(n, 0ULL); + + QTAILQ_INIT(&n->aer_queue); + + nvme_select_iocs(n); + + return 0; +} + +static void nvme_cmb_enable_regs(NvmeCtrl *n) +{ + uint32_t cmbloc = ldl_le_p(&n->bar.cmbloc); + uint32_t cmbsz = ldl_le_p(&n->bar.cmbsz); + + NVME_CMBLOC_SET_CDPCILS(cmbloc, 1); + NVME_CMBLOC_SET_CDPMLS(cmbloc, 1); + NVME_CMBLOC_SET_BIR(cmbloc, NVME_CMB_BIR); + stl_le_p(&n->bar.cmbloc, cmbloc); + + NVME_CMBSZ_SET_SQS(cmbsz, 1); + NVME_CMBSZ_SET_CQS(cmbsz, 0); + NVME_CMBSZ_SET_LISTS(cmbsz, 1); + NVME_CMBSZ_SET_RDS(cmbsz, 1); + NVME_CMBSZ_SET_WDS(cmbsz, 1); + NVME_CMBSZ_SET_SZU(cmbsz, 2); /* MBs */ + NVME_CMBSZ_SET_SZ(cmbsz, n->params.cmb_size_mb); + stl_le_p(&n->bar.cmbsz, cmbsz); +} + +static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, + unsigned size) +{ + uint64_t cap = ldq_le_p(&n->bar.cap); + uint32_t cc = ldl_le_p(&n->bar.cc); + uint32_t intms = ldl_le_p(&n->bar.intms); + uint32_t csts = ldl_le_p(&n->bar.csts); + uint32_t pmrsts = ldl_le_p(&n->bar.pmrsts); + + if (unlikely(offset & (sizeof(uint32_t) - 1))) { + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32, + "MMIO write not 32-bit aligned," + " offset=0x%"PRIx64"", offset); + /* should be ignored, fall through for now */ + } + + if (unlikely(size < sizeof(uint32_t))) { + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall, + "MMIO write smaller than 32-bits," + " offset=0x%"PRIx64", size=%u", + offset, size); + /* should be ignored, fall through for now */ + } + + switch (offset) { + case NVME_REG_INTMS: + if (unlikely(msix_enabled(&(n->parent_obj)))) { + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, + "undefined access to interrupt mask set" + " when MSI-X is enabled"); + /* should be ignored, fall through for now */ + } + intms |= data; + stl_le_p(&n->bar.intms, intms); + n->bar.intmc = n->bar.intms; + trace_pci_nvme_mmio_intm_set(data & 0xffffffff, intms); + nvme_irq_check(n); + break; + case NVME_REG_INTMC: + if (unlikely(msix_enabled(&(n->parent_obj)))) { + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix, + "undefined access to interrupt mask clr" + " when MSI-X is enabled"); + /* should be ignored, fall through for now */ + } + intms &= ~data; + stl_le_p(&n->bar.intms, intms); + n->bar.intmc = n->bar.intms; + trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, intms); + nvme_irq_check(n); + break; + case NVME_REG_CC: + trace_pci_nvme_mmio_cfg(data & 0xffffffff); + + /* Windows first sends data, then sends enable bit */ + if (!NVME_CC_EN(data) && !NVME_CC_EN(cc) && + !NVME_CC_SHN(data) && !NVME_CC_SHN(cc)) + { + cc = data; + } + + if (NVME_CC_EN(data) && !NVME_CC_EN(cc)) { + cc = data; + + /* flush CC since nvme_start_ctrl() needs the value */ + stl_le_p(&n->bar.cc, cc); + if (unlikely(nvme_start_ctrl(n))) { + trace_pci_nvme_err_startfail(); + csts = NVME_CSTS_FAILED; + } else { + trace_pci_nvme_mmio_start_success(); + csts = NVME_CSTS_READY; + } + } else if (!NVME_CC_EN(data) && NVME_CC_EN(cc)) { + trace_pci_nvme_mmio_stopped(); + nvme_ctrl_reset(n); + cc = 0; + csts &= ~NVME_CSTS_READY; + } + + if (NVME_CC_SHN(data) && !(NVME_CC_SHN(cc))) { + trace_pci_nvme_mmio_shutdown_set(); + nvme_ctrl_shutdown(n); + cc = data; + csts |= NVME_CSTS_SHST_COMPLETE; + } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(cc)) { + trace_pci_nvme_mmio_shutdown_cleared(); + csts &= ~NVME_CSTS_SHST_COMPLETE; + cc = data; + } + + stl_le_p(&n->bar.cc, cc); + stl_le_p(&n->bar.csts, csts); + + break; + case NVME_REG_CSTS: + if (data & (1 << 4)) { + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported, + "attempted to W1C CSTS.NSSRO" + " but CAP.NSSRS is zero (not supported)"); + } else if (data != 0) { + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts, + "attempted to set a read only bit" + " of controller status"); + } + break; + case NVME_REG_NSSR: + if (data == 0x4e564d65) { + trace_pci_nvme_ub_mmiowr_ssreset_unsupported(); + } else { + /* The spec says that writes of other values have no effect */ + return; + } + break; + case NVME_REG_AQA: + stl_le_p(&n->bar.aqa, data); + trace_pci_nvme_mmio_aqattr(data & 0xffffffff); + break; + case NVME_REG_ASQ: + stn_le_p(&n->bar.asq, size, data); + trace_pci_nvme_mmio_asqaddr(data); + break; + case NVME_REG_ASQ + 4: + stl_le_p((uint8_t *)&n->bar.asq + 4, data); + trace_pci_nvme_mmio_asqaddr_hi(data, ldq_le_p(&n->bar.asq)); + break; + case NVME_REG_ACQ: + trace_pci_nvme_mmio_acqaddr(data); + stn_le_p(&n->bar.acq, size, data); + break; + case NVME_REG_ACQ + 4: + stl_le_p((uint8_t *)&n->bar.acq + 4, data); + trace_pci_nvme_mmio_acqaddr_hi(data, ldq_le_p(&n->bar.acq)); + break; + case NVME_REG_CMBLOC: + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved, + "invalid write to reserved CMBLOC" + " when CMBSZ is zero, ignored"); + return; + case NVME_REG_CMBSZ: + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly, + "invalid write to read only CMBSZ, ignored"); + return; + case NVME_REG_CMBMSC: + if (!NVME_CAP_CMBS(cap)) { + return; + } + + stn_le_p(&n->bar.cmbmsc, size, data); + n->cmb.cmse = false; + + if (NVME_CMBMSC_CRE(data)) { + nvme_cmb_enable_regs(n); + + if (NVME_CMBMSC_CMSE(data)) { + uint64_t cmbmsc = ldq_le_p(&n->bar.cmbmsc); + hwaddr cba = NVME_CMBMSC_CBA(cmbmsc) << CMBMSC_CBA_SHIFT; + if (cba + int128_get64(n->cmb.mem.size) < cba) { + uint32_t cmbsts = ldl_le_p(&n->bar.cmbsts); + NVME_CMBSTS_SET_CBAI(cmbsts, 1); + stl_le_p(&n->bar.cmbsts, cmbsts); + return; + } + + n->cmb.cba = cba; + n->cmb.cmse = true; + } + } else { + n->bar.cmbsz = 0; + n->bar.cmbloc = 0; + } + + return; + case NVME_REG_CMBMSC + 4: + stl_le_p((uint8_t *)&n->bar.cmbmsc + 4, data); + return; + + case NVME_REG_PMRCAP: + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly, + "invalid write to PMRCAP register, ignored"); + return; + case NVME_REG_PMRCTL: + if (!NVME_CAP_PMRS(cap)) { + return; + } + + stl_le_p(&n->bar.pmrctl, data); + if (NVME_PMRCTL_EN(data)) { + memory_region_set_enabled(&n->pmr.dev->mr, true); + pmrsts = 0; + } else { + memory_region_set_enabled(&n->pmr.dev->mr, false); + NVME_PMRSTS_SET_NRDY(pmrsts, 1); + n->pmr.cmse = false; + } + stl_le_p(&n->bar.pmrsts, pmrsts); + return; + case NVME_REG_PMRSTS: + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly, + "invalid write to PMRSTS register, ignored"); + return; + case NVME_REG_PMREBS: + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly, + "invalid write to PMREBS register, ignored"); + return; + case NVME_REG_PMRSWTP: + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly, + "invalid write to PMRSWTP register, ignored"); + return; + case NVME_REG_PMRMSCL: + if (!NVME_CAP_PMRS(cap)) { + return; + } + + stl_le_p(&n->bar.pmrmscl, data); + n->pmr.cmse = false; + + if (NVME_PMRMSCL_CMSE(data)) { + uint64_t pmrmscu = ldl_le_p(&n->bar.pmrmscu); + hwaddr cba = pmrmscu << 32 | + (NVME_PMRMSCL_CBA(data) << PMRMSCL_CBA_SHIFT); + if (cba + int128_get64(n->pmr.dev->mr.size) < cba) { + NVME_PMRSTS_SET_CBAI(pmrsts, 1); + stl_le_p(&n->bar.pmrsts, pmrsts); + return; + } + + n->pmr.cmse = true; + n->pmr.cba = cba; + } + + return; + case NVME_REG_PMRMSCU: + if (!NVME_CAP_PMRS(cap)) { + return; + } + + stl_le_p(&n->bar.pmrmscu, data); + return; + default: + NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid, + "invalid MMIO write," + " offset=0x%"PRIx64", data=%"PRIx64"", + offset, data); + break; + } +} + +static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + NvmeCtrl *n = (NvmeCtrl *)opaque; + uint8_t *ptr = (uint8_t *)&n->bar; + + trace_pci_nvme_mmio_read(addr, size); + + if (unlikely(addr & (sizeof(uint32_t) - 1))) { + NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32, + "MMIO read not 32-bit aligned," + " offset=0x%"PRIx64"", addr); + /* should RAZ, fall through for now */ + } else if (unlikely(size < sizeof(uint32_t))) { + NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall, + "MMIO read smaller than 32-bits," + " offset=0x%"PRIx64"", addr); + /* should RAZ, fall through for now */ + } + + if (addr > sizeof(n->bar) - size) { + NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs, + "MMIO read beyond last register," + " offset=0x%"PRIx64", returning 0", addr); + + return 0; + } + + /* + * When PMRWBM bit 1 is set then read from + * from PMRSTS should ensure prior writes + * made it to persistent media + */ + if (addr == NVME_REG_PMRSTS && + (NVME_PMRCAP_PMRWBM(ldl_le_p(&n->bar.pmrcap)) & 0x02)) { + memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size); + } + + return ldn_le_p(ptr + addr, size); +} + +static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) +{ + uint32_t qid; + + if (unlikely(addr & ((1 << 2) - 1))) { + NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned, + "doorbell write not 32-bit aligned," + " offset=0x%"PRIx64", ignoring", addr); + return; + } + + if (((addr - 0x1000) >> 2) & 1) { + /* Completion queue doorbell write */ + + uint16_t new_head = val & 0xffff; + int start_sqs; + NvmeCQueue *cq; + + qid = (addr - (0x1000 + (1 << 2))) >> 3; + if (unlikely(nvme_check_cqid(n, qid))) { + NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq, + "completion queue doorbell write" + " for nonexistent queue," + " sqid=%"PRIu32", ignoring", qid); + + /* + * NVM Express v1.3d, Section 4.1 state: "If host software writes + * an invalid value to the Submission Queue Tail Doorbell or + * Completion Queue Head Doorbell regiter and an Asynchronous Event + * Request command is outstanding, then an asynchronous event is + * posted to the Admin Completion Queue with a status code of + * Invalid Doorbell Write Value." + * + * Also note that the spec includes the "Invalid Doorbell Register" + * status code, but nowhere does it specify when to use it. + * However, it seems reasonable to use it here in a similar + * fashion. + */ + if (n->outstanding_aers) { + nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, + NVME_AER_INFO_ERR_INVALID_DB_REGISTER, + NVME_LOG_ERROR_INFO); + } + + return; + } + + cq = n->cq[qid]; + if (unlikely(new_head >= cq->size)) { + NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead, + "completion queue doorbell write value" + " beyond queue size, sqid=%"PRIu32"," + " new_head=%"PRIu16", ignoring", + qid, new_head); + + if (n->outstanding_aers) { + nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, + NVME_AER_INFO_ERR_INVALID_DB_VALUE, + NVME_LOG_ERROR_INFO); + } + + return; + } + + trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head); + + start_sqs = nvme_cq_full(cq) ? 1 : 0; + cq->head = new_head; + if (start_sqs) { + NvmeSQueue *sq; + QTAILQ_FOREACH(sq, &cq->sq_list, entry) { + timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); + } + timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); + } + + if (cq->tail == cq->head) { + if (cq->irq_enabled) { + n->cq_pending--; + } + + nvme_irq_deassert(n, cq); + } + } else { + /* Submission queue doorbell write */ + + uint16_t new_tail = val & 0xffff; + NvmeSQueue *sq; + + qid = (addr - 0x1000) >> 3; + if (unlikely(nvme_check_sqid(n, qid))) { + NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq, + "submission queue doorbell write" + " for nonexistent queue," + " sqid=%"PRIu32", ignoring", qid); + + if (n->outstanding_aers) { + nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, + NVME_AER_INFO_ERR_INVALID_DB_REGISTER, + NVME_LOG_ERROR_INFO); + } + + return; + } + + sq = n->sq[qid]; + if (unlikely(new_tail >= sq->size)) { + NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail, + "submission queue doorbell write value" + " beyond queue size, sqid=%"PRIu32"," + " new_tail=%"PRIu16", ignoring", + qid, new_tail); + + if (n->outstanding_aers) { + nvme_enqueue_event(n, NVME_AER_TYPE_ERROR, + NVME_AER_INFO_ERR_INVALID_DB_VALUE, + NVME_LOG_ERROR_INFO); + } + + return; + } + + trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail); + + sq->tail = new_tail; + timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); + } +} + +static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + NvmeCtrl *n = (NvmeCtrl *)opaque; + + trace_pci_nvme_mmio_write(addr, data, size); + + if (addr < sizeof(n->bar)) { + nvme_write_bar(n, addr, data, size); + } else { + nvme_process_db(n, addr, data); + } +} + +static const MemoryRegionOps nvme_mmio_ops = { + .read = nvme_mmio_read, + .write = nvme_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 2, + .max_access_size = 8, + }, +}; + +static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + NvmeCtrl *n = (NvmeCtrl *)opaque; + stn_le_p(&n->cmb.buf[addr], size, data); +} + +static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size) +{ + NvmeCtrl *n = (NvmeCtrl *)opaque; + return ldn_le_p(&n->cmb.buf[addr], size); +} + +static const MemoryRegionOps nvme_cmb_ops = { + .read = nvme_cmb_read, + .write = nvme_cmb_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static void nvme_check_constraints(NvmeCtrl *n, Error **errp) +{ + NvmeParams *params = &n->params; + + if (params->num_queues) { + warn_report("num_queues is deprecated; please use max_ioqpairs " + "instead"); + + params->max_ioqpairs = params->num_queues - 1; + } + + if (n->namespace.blkconf.blk && n->subsys) { + error_setg(errp, "subsystem support is unavailable with legacy " + "namespace ('drive' property)"); + return; + } + + if (params->max_ioqpairs < 1 || + params->max_ioqpairs > NVME_MAX_IOQPAIRS) { + error_setg(errp, "max_ioqpairs must be between 1 and %d", + NVME_MAX_IOQPAIRS); + return; + } + + if (params->msix_qsize < 1 || + params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) { + error_setg(errp, "msix_qsize must be between 1 and %d", + PCI_MSIX_FLAGS_QSIZE + 1); + return; + } + + if (!params->serial) { + error_setg(errp, "serial property not set"); + return; + } + + if (n->pmr.dev) { + if (host_memory_backend_is_mapped(n->pmr.dev)) { + error_setg(errp, "can't use already busy memdev: %s", + object_get_canonical_path_component(OBJECT(n->pmr.dev))); + return; + } + + if (!is_power_of_2(n->pmr.dev->size)) { + error_setg(errp, "pmr backend size needs to be power of 2 in size"); + return; + } + + host_memory_backend_set_mapped(n->pmr.dev, true); + } + + if (n->params.zasl > n->params.mdts) { + error_setg(errp, "zoned.zasl (Zone Append Size Limit) must be less " + "than or equal to mdts (Maximum Data Transfer Size)"); + return; + } + + if (!n->params.vsl) { + error_setg(errp, "vsl must be non-zero"); + return; + } +} + +static void nvme_init_state(NvmeCtrl *n) +{ + /* add one to max_ioqpairs to account for the admin queue pair */ + n->reg_size = pow2ceil(sizeof(NvmeBar) + + 2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE); + n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1); + n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1); + n->temperature = NVME_TEMPERATURE; + n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING; + n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1); +} + +static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev) +{ + uint64_t cmb_size = n->params.cmb_size_mb * MiB; + uint64_t cap = ldq_le_p(&n->bar.cap); + + n->cmb.buf = g_malloc0(cmb_size); + memory_region_init_io(&n->cmb.mem, OBJECT(n), &nvme_cmb_ops, n, + "nvme-cmb", cmb_size); + pci_register_bar(pci_dev, NVME_CMB_BIR, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64 | + PCI_BASE_ADDRESS_MEM_PREFETCH, &n->cmb.mem); + + NVME_CAP_SET_CMBS(cap, 1); + stq_le_p(&n->bar.cap, cap); + + if (n->params.legacy_cmb) { + nvme_cmb_enable_regs(n); + n->cmb.cmse = true; + } +} + +static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev) +{ + uint32_t pmrcap = ldl_le_p(&n->bar.pmrcap); + + NVME_PMRCAP_SET_RDS(pmrcap, 1); + NVME_PMRCAP_SET_WDS(pmrcap, 1); + NVME_PMRCAP_SET_BIR(pmrcap, NVME_PMR_BIR); + /* Turn on bit 1 support */ + NVME_PMRCAP_SET_PMRWBM(pmrcap, 0x02); + NVME_PMRCAP_SET_CMSS(pmrcap, 1); + stl_le_p(&n->bar.pmrcap, pmrcap); + + pci_register_bar(pci_dev, NVME_PMR_BIR, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64 | + PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmr.dev->mr); + + memory_region_set_enabled(&n->pmr.dev->mr, false); +} + +static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) +{ + uint8_t *pci_conf = pci_dev->config; + uint64_t bar_size, msix_table_size, msix_pba_size; + unsigned msix_table_offset, msix_pba_offset; + int ret; + + Error *err = NULL; + + pci_conf[PCI_INTERRUPT_PIN] = 1; + pci_config_set_prog_interface(pci_conf, 0x2); + + if (n->params.use_intel_id) { + pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); + pci_config_set_device_id(pci_conf, 0x5845); + } else { + pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REDHAT); + pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_REDHAT_NVME); + } + + pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS); + pcie_endpoint_cap_init(pci_dev, 0x80); + + bar_size = QEMU_ALIGN_UP(n->reg_size, 4 * KiB); + msix_table_offset = bar_size; + msix_table_size = PCI_MSIX_ENTRY_SIZE * n->params.msix_qsize; + + bar_size += msix_table_size; + bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB); + msix_pba_offset = bar_size; + msix_pba_size = QEMU_ALIGN_UP(n->params.msix_qsize, 64) / 8; + + bar_size += msix_pba_size; + bar_size = pow2ceil(bar_size); + + memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size); + memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme", + n->reg_size); + memory_region_add_subregion(&n->bar0, 0, &n->iomem); + + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); + ret = msix_init(pci_dev, n->params.msix_qsize, + &n->bar0, 0, msix_table_offset, + &n->bar0, 0, msix_pba_offset, 0, &err); + if (ret < 0) { + if (ret == -ENOTSUP) { + warn_report_err(err); + } else { + error_propagate(errp, err); + return ret; + } + } + + if (n->params.cmb_size_mb) { + nvme_init_cmb(n, pci_dev); + } + + if (n->pmr.dev) { + nvme_init_pmr(n, pci_dev); + } + + return 0; +} + +static void nvme_init_subnqn(NvmeCtrl *n) +{ + NvmeSubsystem *subsys = n->subsys; + NvmeIdCtrl *id = &n->id_ctrl; + + if (!subsys) { + snprintf((char *)id->subnqn, sizeof(id->subnqn), + "nqn.2019-08.org.qemu:%s", n->params.serial); + } else { + pstrcpy((char *)id->subnqn, sizeof(id->subnqn), (char*)subsys->subnqn); + } +} + +static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) +{ + NvmeIdCtrl *id = &n->id_ctrl; + uint8_t *pci_conf = pci_dev->config; + uint64_t cap = ldq_le_p(&n->bar.cap); + + id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); + id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); + strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' '); + strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' '); + strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' '); + + id->cntlid = cpu_to_le16(n->cntlid); + + id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); + + id->rab = 6; + + if (n->params.use_intel_id) { + id->ieee[0] = 0xb3; + id->ieee[1] = 0x02; + id->ieee[2] = 0x00; + } else { + id->ieee[0] = 0x00; + id->ieee[1] = 0x54; + id->ieee[2] = 0x52; + } + + id->mdts = n->params.mdts; + id->ver = cpu_to_le32(NVME_SPEC_VER); + id->oacs = cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT); + id->cntrltype = 0x1; + + /* + * Because the controller always completes the Abort command immediately, + * there can never be more than one concurrently executing Abort command, + * so this value is never used for anything. Note that there can easily be + * many Abort commands in the queues, but they are not considered + * "executing" until processed by nvme_abort. + * + * The specification recommends a value of 3 for Abort Command Limit (four + * concurrently outstanding Abort commands), so lets use that though it is + * inconsequential. + */ + id->acl = 3; + id->aerl = n->params.aerl; + id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO; + id->lpa = NVME_LPA_NS_SMART | NVME_LPA_CSE | NVME_LPA_EXTENDED; + + /* recommended default value (~70 C) */ + id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING); + id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL); + + id->sqes = (0x6 << 4) | 0x6; + id->cqes = (0x4 << 4) | 0x4; + id->nn = cpu_to_le32(NVME_MAX_NAMESPACES); + id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP | + NVME_ONCS_FEATURES | NVME_ONCS_DSM | + NVME_ONCS_COMPARE | NVME_ONCS_COPY); + + /* + * NOTE: If this device ever supports a command set that does NOT use 0x0 + * as a Flush-equivalent operation, support for the broadcast NSID in Flush + * should probably be removed. + * + * See comment in nvme_io_cmd. + */ + id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT; + + id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0); + id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN | + NVME_CTRL_SGLS_BITBUCKET); + + nvme_init_subnqn(n); + + id->psd[0].mp = cpu_to_le16(0x9c4); + id->psd[0].enlat = cpu_to_le32(0x10); + id->psd[0].exlat = cpu_to_le32(0x4); + + if (n->subsys) { + id->cmic |= NVME_CMIC_MULTI_CTRL; + } + + NVME_CAP_SET_MQES(cap, 0x7ff); + NVME_CAP_SET_CQR(cap, 1); + NVME_CAP_SET_TO(cap, 0xf); + NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_NVM); + NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_CSI_SUPP); + NVME_CAP_SET_CSS(cap, NVME_CAP_CSS_ADMIN_ONLY); + NVME_CAP_SET_MPSMAX(cap, 4); + NVME_CAP_SET_CMBS(cap, n->params.cmb_size_mb ? 1 : 0); + NVME_CAP_SET_PMRS(cap, n->pmr.dev ? 1 : 0); + stq_le_p(&n->bar.cap, cap); + + stl_le_p(&n->bar.vs, NVME_SPEC_VER); + n->bar.intmc = n->bar.intms = 0; +} + +static int nvme_init_subsys(NvmeCtrl *n, Error **errp) +{ + int cntlid; + + if (!n->subsys) { + return 0; + } + + cntlid = nvme_subsys_register_ctrl(n, errp); + if (cntlid < 0) { + return -1; + } + + n->cntlid = cntlid; + + return 0; +} + +void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns) +{ + uint32_t nsid = ns->params.nsid; + assert(nsid && nsid <= NVME_MAX_NAMESPACES); + + n->namespaces[nsid] = ns; + ns->attached++; + + n->dmrsl = MIN_NON_ZERO(n->dmrsl, + BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1)); +} + +static void nvme_realize(PCIDevice *pci_dev, Error **errp) +{ + NvmeCtrl *n = NVME(pci_dev); + NvmeNamespace *ns; + Error *local_err = NULL; + + nvme_check_constraints(n, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, + &pci_dev->qdev, n->parent_obj.qdev.id); + + nvme_init_state(n); + if (nvme_init_pci(n, pci_dev, errp)) { + return; + } + + if (nvme_init_subsys(n, errp)) { + error_propagate(errp, local_err); + return; + } + nvme_init_ctrl(n, pci_dev); + + /* setup a namespace if the controller drive property was given */ + if (n->namespace.blkconf.blk) { + ns = &n->namespace; + ns->params.nsid = 1; + + if (nvme_ns_setup(ns, errp)) { + return; + } + + nvme_attach_ns(n, ns); + } +} + +static void nvme_exit(PCIDevice *pci_dev) +{ + NvmeCtrl *n = NVME(pci_dev); + NvmeNamespace *ns; + int i; + + nvme_ctrl_reset(n); + + if (n->subsys) { + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + ns = nvme_ns(n, i); + if (ns) { + ns->attached--; + } + } + + nvme_subsys_unregister_ctrl(n->subsys, n); + } + + g_free(n->cq); + g_free(n->sq); + g_free(n->aer_reqs); + + if (n->params.cmb_size_mb) { + g_free(n->cmb.buf); + } + + if (n->pmr.dev) { + host_memory_backend_set_mapped(n->pmr.dev, false); + } + msix_uninit(pci_dev, &n->bar0, &n->bar0); + memory_region_del_subregion(&n->bar0, &n->iomem); +} + +static Property nvme_props[] = { + DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf), + DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmr.dev, TYPE_MEMORY_BACKEND, + HostMemoryBackend *), + DEFINE_PROP_LINK("subsys", NvmeCtrl, subsys, TYPE_NVME_SUBSYS, + NvmeSubsystem *), + DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial), + DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0), + DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0), + DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64), + DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65), + DEFINE_PROP_UINT8("aerl", NvmeCtrl, params.aerl, 3), + DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64), + DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7), + DEFINE_PROP_UINT8("vsl", NvmeCtrl, params.vsl, 7), + DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false), + DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false), + DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0), + DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl, + params.auto_transition_zones, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void nvme_get_smart_warning(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + NvmeCtrl *n = NVME(obj); + uint8_t value = n->smart_critical_warning; + + visit_type_uint8(v, name, &value, errp); +} + +static void nvme_set_smart_warning(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + NvmeCtrl *n = NVME(obj); + uint8_t value, old_value, cap = 0, index, event; + + if (!visit_type_uint8(v, name, &value, errp)) { + return; + } + + cap = NVME_SMART_SPARE | NVME_SMART_TEMPERATURE | NVME_SMART_RELIABILITY + | NVME_SMART_MEDIA_READ_ONLY | NVME_SMART_FAILED_VOLATILE_MEDIA; + if (NVME_CAP_PMRS(ldq_le_p(&n->bar.cap))) { + cap |= NVME_SMART_PMR_UNRELIABLE; + } + + if ((value & cap) != value) { + error_setg(errp, "unsupported smart critical warning bits: 0x%x", + value & ~cap); + return; + } + + old_value = n->smart_critical_warning; + n->smart_critical_warning = value; + + /* only inject new bits of smart critical warning */ + for (index = 0; index < NVME_SMART_WARN_MAX; index++) { + event = 1 << index; + if (value & ~old_value & event) + nvme_smart_event(n, event); + } +} + +static const VMStateDescription nvme_vmstate = { + .name = "nvme", + .unmigratable = 1, +}; + +static void nvme_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); + + pc->realize = nvme_realize; + pc->exit = nvme_exit; + pc->class_id = PCI_CLASS_STORAGE_EXPRESS; + pc->revision = 2; + + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->desc = "Non-Volatile Memory Express"; + device_class_set_props(dc, nvme_props); + dc->vmsd = &nvme_vmstate; +} + +static void nvme_instance_init(Object *obj) +{ + NvmeCtrl *n = NVME(obj); + + device_add_bootindex_property(obj, &n->namespace.blkconf.bootindex, + "bootindex", "/namespace@1,0", + DEVICE(obj)); + + object_property_add(obj, "smart_critical_warning", "uint8", + nvme_get_smart_warning, + nvme_set_smart_warning, NULL, NULL); +} + +static const TypeInfo nvme_info = { + .name = TYPE_NVME, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(NvmeCtrl), + .instance_init = nvme_instance_init, + .class_init = nvme_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static const TypeInfo nvme_bus_info = { + .name = TYPE_NVME_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(NvmeBus), +}; + +static void nvme_register_types(void) +{ + type_register_static(&nvme_info); + type_register_static(&nvme_bus_info); +} + +type_init(nvme_register_types) diff --git a/hw/nvme/dif.c b/hw/nvme/dif.c new file mode 100644 index 000000000..5dbd18b2a --- /dev/null +++ b/hw/nvme/dif.c @@ -0,0 +1,509 @@ +/* + * QEMU NVM Express End-to-End Data Protection support + * + * Copyright (c) 2021 Samsung Electronics Co., Ltd. + * + * Authors: + * Klaus Jensen + * Gollu Appalanaidu + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "sysemu/block-backend.h" + +#include "nvme.h" +#include "trace.h" + +uint16_t nvme_check_prinfo(NvmeNamespace *ns, uint8_t prinfo, uint64_t slba, + uint32_t reftag) +{ + if ((NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) == NVME_ID_NS_DPS_TYPE_1) && + (prinfo & NVME_PRINFO_PRCHK_REF) && (slba & 0xffffffff) != reftag) { + return NVME_INVALID_PROT_INFO | NVME_DNR; + } + + return NVME_SUCCESS; +} + +/* from Linux kernel (crypto/crct10dif_common.c) */ +static uint16_t crc_t10dif(uint16_t crc, const unsigned char *buffer, + size_t len) +{ + unsigned int i; + + for (i = 0; i < len; i++) { + crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; + } + + return crc; +} + +void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len, + uint8_t *mbuf, size_t mlen, uint16_t apptag, + uint32_t *reftag) +{ + uint8_t *end = buf + len; + int16_t pil = 0; + + if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { + pil = ns->lbaf.ms - sizeof(NvmeDifTuple); + } + + trace_pci_nvme_dif_pract_generate_dif(len, ns->lbasz, ns->lbasz + pil, + apptag, *reftag); + + for (; buf < end; buf += ns->lbasz, mbuf += ns->lbaf.ms) { + NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil); + uint16_t crc = crc_t10dif(0x0, buf, ns->lbasz); + + if (pil) { + crc = crc_t10dif(crc, mbuf, pil); + } + + dif->guard = cpu_to_be16(crc); + dif->apptag = cpu_to_be16(apptag); + dif->reftag = cpu_to_be32(*reftag); + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) != NVME_ID_NS_DPS_TYPE_3) { + (*reftag)++; + } + } +} + +static uint16_t nvme_dif_prchk(NvmeNamespace *ns, NvmeDifTuple *dif, + uint8_t *buf, uint8_t *mbuf, size_t pil, + uint8_t prinfo, uint16_t apptag, + uint16_t appmask, uint32_t reftag) +{ + switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + case NVME_ID_NS_DPS_TYPE_3: + if (be32_to_cpu(dif->reftag) != 0xffffffff) { + break; + } + + /* fallthrough */ + case NVME_ID_NS_DPS_TYPE_1: + case NVME_ID_NS_DPS_TYPE_2: + if (be16_to_cpu(dif->apptag) != 0xffff) { + break; + } + + trace_pci_nvme_dif_prchk_disabled(be16_to_cpu(dif->apptag), + be32_to_cpu(dif->reftag)); + + return NVME_SUCCESS; + } + + if (prinfo & NVME_PRINFO_PRCHK_GUARD) { + uint16_t crc = crc_t10dif(0x0, buf, ns->lbasz); + + if (pil) { + crc = crc_t10dif(crc, mbuf, pil); + } + + trace_pci_nvme_dif_prchk_guard(be16_to_cpu(dif->guard), crc); + + if (be16_to_cpu(dif->guard) != crc) { + return NVME_E2E_GUARD_ERROR; + } + } + + if (prinfo & NVME_PRINFO_PRCHK_APP) { + trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif->apptag), apptag, + appmask); + + if ((be16_to_cpu(dif->apptag) & appmask) != (apptag & appmask)) { + return NVME_E2E_APP_ERROR; + } + } + + if (prinfo & NVME_PRINFO_PRCHK_REF) { + trace_pci_nvme_dif_prchk_reftag(be32_to_cpu(dif->reftag), reftag); + + if (be32_to_cpu(dif->reftag) != reftag) { + return NVME_E2E_REF_ERROR; + } + } + + return NVME_SUCCESS; +} + +uint16_t nvme_dif_check(NvmeNamespace *ns, uint8_t *buf, size_t len, + uint8_t *mbuf, size_t mlen, uint8_t prinfo, + uint64_t slba, uint16_t apptag, + uint16_t appmask, uint32_t *reftag) +{ + uint8_t *end = buf + len; + int16_t pil = 0; + uint16_t status; + + status = nvme_check_prinfo(ns, prinfo, slba, *reftag); + if (status) { + return status; + } + + if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { + pil = ns->lbaf.ms - sizeof(NvmeDifTuple); + } + + trace_pci_nvme_dif_check(prinfo, ns->lbasz + pil); + + for (; buf < end; buf += ns->lbasz, mbuf += ns->lbaf.ms) { + NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil); + + status = nvme_dif_prchk(ns, dif, buf, mbuf, pil, prinfo, apptag, + appmask, *reftag); + if (status) { + return status; + } + + if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) != NVME_ID_NS_DPS_TYPE_3) { + (*reftag)++; + } + } + + return NVME_SUCCESS; +} + +uint16_t nvme_dif_mangle_mdata(NvmeNamespace *ns, uint8_t *mbuf, size_t mlen, + uint64_t slba) +{ + BlockBackend *blk = ns->blkconf.blk; + BlockDriverState *bs = blk_bs(blk); + + int64_t moffset = 0, offset = nvme_l2b(ns, slba); + uint8_t *mbufp, *end; + bool zeroed; + int16_t pil = 0; + int64_t bytes = (mlen / ns->lbaf.ms) << ns->lbaf.ds; + int64_t pnum = 0; + + Error *err = NULL; + + + if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { + pil = ns->lbaf.ms - sizeof(NvmeDifTuple); + } + + do { + int ret; + + bytes -= pnum; + + ret = bdrv_block_status(bs, offset, bytes, &pnum, NULL, NULL); + if (ret < 0) { + error_setg_errno(&err, -ret, "unable to get block status"); + error_report_err(err); + + return NVME_INTERNAL_DEV_ERROR; + } + + zeroed = !!(ret & BDRV_BLOCK_ZERO); + + trace_pci_nvme_block_status(offset, bytes, pnum, ret, zeroed); + + if (zeroed) { + mbufp = mbuf + moffset; + mlen = (pnum >> ns->lbaf.ds) * ns->lbaf.ms; + end = mbufp + mlen; + + for (; mbufp < end; mbufp += ns->lbaf.ms) { + memset(mbufp + pil, 0xff, sizeof(NvmeDifTuple)); + } + } + + moffset += (pnum >> ns->lbaf.ds) * ns->lbaf.ms; + offset += pnum; + } while (pnum != bytes); + + return NVME_SUCCESS; +} + +static void nvme_dif_rw_cb(void *opaque, int ret) +{ + NvmeBounceContext *ctx = opaque; + NvmeRequest *req = ctx->req; + NvmeNamespace *ns = req->ns; + BlockBackend *blk = ns->blkconf.blk; + + trace_pci_nvme_dif_rw_cb(nvme_cid(req), blk_name(blk)); + + qemu_iovec_destroy(&ctx->data.iov); + g_free(ctx->data.bounce); + + qemu_iovec_destroy(&ctx->mdata.iov); + g_free(ctx->mdata.bounce); + + g_free(ctx); + + nvme_rw_complete_cb(req, ret); +} + +static void nvme_dif_rw_check_cb(void *opaque, int ret) +{ + NvmeBounceContext *ctx = opaque; + NvmeRequest *req = ctx->req; + NvmeNamespace *ns = req->ns; + NvmeCtrl *n = nvme_ctrl(req); + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + uint64_t slba = le64_to_cpu(rw->slba); + uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); + uint16_t apptag = le16_to_cpu(rw->apptag); + uint16_t appmask = le16_to_cpu(rw->appmask); + uint32_t reftag = le32_to_cpu(rw->reftag); + uint16_t status; + + trace_pci_nvme_dif_rw_check_cb(nvme_cid(req), prinfo, apptag, appmask, + reftag); + + if (ret) { + goto out; + } + + status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce, ctx->mdata.iov.size, + slba); + if (status) { + req->status = status; + goto out; + } + + status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, + ctx->mdata.bounce, ctx->mdata.iov.size, prinfo, + slba, apptag, appmask, &reftag); + if (status) { + req->status = status; + goto out; + } + + status = nvme_bounce_data(n, ctx->data.bounce, ctx->data.iov.size, + NVME_TX_DIRECTION_FROM_DEVICE, req); + if (status) { + req->status = status; + goto out; + } + + if (prinfo & NVME_PRINFO_PRACT && ns->lbaf.ms == 8) { + goto out; + } + + status = nvme_bounce_mdata(n, ctx->mdata.bounce, ctx->mdata.iov.size, + NVME_TX_DIRECTION_FROM_DEVICE, req); + if (status) { + req->status = status; + } + +out: + nvme_dif_rw_cb(ctx, ret); +} + +static void nvme_dif_rw_mdata_in_cb(void *opaque, int ret) +{ + NvmeBounceContext *ctx = opaque; + NvmeRequest *req = ctx->req; + NvmeNamespace *ns = req->ns; + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + uint64_t slba = le64_to_cpu(rw->slba); + uint32_t nlb = le16_to_cpu(rw->nlb) + 1; + size_t mlen = nvme_m2b(ns, nlb); + uint64_t offset = nvme_moff(ns, slba); + BlockBackend *blk = ns->blkconf.blk; + + trace_pci_nvme_dif_rw_mdata_in_cb(nvme_cid(req), blk_name(blk)); + + if (ret) { + goto out; + } + + ctx->mdata.bounce = g_malloc(mlen); + + qemu_iovec_reset(&ctx->mdata.iov); + qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); + + req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0, + nvme_dif_rw_check_cb, ctx); + return; + +out: + nvme_dif_rw_cb(ctx, ret); +} + +static void nvme_dif_rw_mdata_out_cb(void *opaque, int ret) +{ + NvmeBounceContext *ctx = opaque; + NvmeRequest *req = ctx->req; + NvmeNamespace *ns = req->ns; + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + uint64_t slba = le64_to_cpu(rw->slba); + uint64_t offset = nvme_moff(ns, slba); + BlockBackend *blk = ns->blkconf.blk; + + trace_pci_nvme_dif_rw_mdata_out_cb(nvme_cid(req), blk_name(blk)); + + if (ret) { + goto out; + } + + req->aiocb = blk_aio_pwritev(blk, offset, &ctx->mdata.iov, 0, + nvme_dif_rw_cb, ctx); + return; + +out: + nvme_dif_rw_cb(ctx, ret); +} + +uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + NvmeNamespace *ns = req->ns; + BlockBackend *blk = ns->blkconf.blk; + bool wrz = rw->opcode == NVME_CMD_WRITE_ZEROES; + uint32_t nlb = le16_to_cpu(rw->nlb) + 1; + uint64_t slba = le64_to_cpu(rw->slba); + size_t len = nvme_l2b(ns, nlb); + size_t mlen = nvme_m2b(ns, nlb); + size_t mapped_len = len; + int64_t offset = nvme_l2b(ns, slba); + uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); + uint16_t apptag = le16_to_cpu(rw->apptag); + uint16_t appmask = le16_to_cpu(rw->appmask); + uint32_t reftag = le32_to_cpu(rw->reftag); + bool pract = !!(prinfo & NVME_PRINFO_PRACT); + NvmeBounceContext *ctx; + uint16_t status; + + trace_pci_nvme_dif_rw(pract, prinfo); + + ctx = g_new0(NvmeBounceContext, 1); + ctx->req = req; + + if (wrz) { + BdrvRequestFlags flags = BDRV_REQ_MAY_UNMAP; + + if (prinfo & NVME_PRINFO_PRCHK_MASK) { + status = NVME_INVALID_PROT_INFO | NVME_DNR; + goto err; + } + + if (pract) { + uint8_t *mbuf, *end; + int16_t pil = ns->lbaf.ms - sizeof(NvmeDifTuple); + + status = nvme_check_prinfo(ns, prinfo, slba, reftag); + if (status) { + goto err; + } + + flags = 0; + + ctx->mdata.bounce = g_malloc0(mlen); + + qemu_iovec_init(&ctx->mdata.iov, 1); + qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); + + mbuf = ctx->mdata.bounce; + end = mbuf + mlen; + + if (ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT) { + pil = 0; + } + + for (; mbuf < end; mbuf += ns->lbaf.ms) { + NvmeDifTuple *dif = (NvmeDifTuple *)(mbuf + pil); + + dif->apptag = cpu_to_be16(apptag); + dif->reftag = cpu_to_be32(reftag); + + switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { + case NVME_ID_NS_DPS_TYPE_1: + case NVME_ID_NS_DPS_TYPE_2: + reftag++; + } + } + } + + req->aiocb = blk_aio_pwrite_zeroes(blk, offset, len, flags, + nvme_dif_rw_mdata_out_cb, ctx); + return NVME_NO_COMPLETE; + } + + if (nvme_ns_ext(ns) && !(pract && ns->lbaf.ms == 8)) { + mapped_len += mlen; + } + + status = nvme_map_dptr(n, &req->sg, mapped_len, &req->cmd); + if (status) { + goto err; + } + + ctx->data.bounce = g_malloc(len); + + qemu_iovec_init(&ctx->data.iov, 1); + qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len); + + if (req->cmd.opcode == NVME_CMD_READ) { + block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size, + BLOCK_ACCT_READ); + + req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0, + nvme_dif_rw_mdata_in_cb, ctx); + return NVME_NO_COMPLETE; + } + + status = nvme_bounce_data(n, ctx->data.bounce, ctx->data.iov.size, + NVME_TX_DIRECTION_TO_DEVICE, req); + if (status) { + goto err; + } + + ctx->mdata.bounce = g_malloc(mlen); + + qemu_iovec_init(&ctx->mdata.iov, 1); + qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen); + + if (!(pract && ns->lbaf.ms == 8)) { + status = nvme_bounce_mdata(n, ctx->mdata.bounce, ctx->mdata.iov.size, + NVME_TX_DIRECTION_TO_DEVICE, req); + if (status) { + goto err; + } + } + + status = nvme_check_prinfo(ns, prinfo, slba, reftag); + if (status) { + goto err; + } + + if (pract) { + /* splice generated protection information into the buffer */ + nvme_dif_pract_generate_dif(ns, ctx->data.bounce, ctx->data.iov.size, + ctx->mdata.bounce, ctx->mdata.iov.size, + apptag, &reftag); + } else { + status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, + ctx->mdata.bounce, ctx->mdata.iov.size, prinfo, + slba, apptag, appmask, &reftag); + if (status) { + goto err; + } + } + + block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size, + BLOCK_ACCT_WRITE); + + req->aiocb = blk_aio_pwritev(ns->blkconf.blk, offset, &ctx->data.iov, 0, + nvme_dif_rw_mdata_out_cb, ctx); + + return NVME_NO_COMPLETE; + +err: + qemu_iovec_destroy(&ctx->data.iov); + g_free(ctx->data.bounce); + + qemu_iovec_destroy(&ctx->mdata.iov); + g_free(ctx->mdata.bounce); + + g_free(ctx); + + return status; +} diff --git a/hw/nvme/meson.build b/hw/nvme/meson.build new file mode 100644 index 000000000..3cf40046e --- /dev/null +++ b/hw/nvme/meson.build @@ -0,0 +1 @@ +softmmu_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('ctrl.c', 'dif.c', 'ns.c', 'subsys.c')) diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c new file mode 100644 index 000000000..8b5f98c76 --- /dev/null +++ b/hw/nvme/ns.c @@ -0,0 +1,595 @@ +/* + * QEMU NVM Express Virtual Namespace + * + * Copyright (c) 2019 CNEX Labs + * Copyright (c) 2020 Samsung Electronics + * + * Authors: + * Klaus Jensen + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "sysemu/sysemu.h" +#include "sysemu/block-backend.h" + +#include "nvme.h" +#include "trace.h" + +#define MIN_DISCARD_GRANULARITY (4 * KiB) +#define NVME_DEFAULT_ZONE_SIZE (128 * MiB) + +void nvme_ns_init_format(NvmeNamespace *ns) +{ + NvmeIdNs *id_ns = &ns->id_ns; + BlockDriverInfo bdi; + int npdg, nlbas, ret; + + ns->lbaf = id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(id_ns->flbas)]; + ns->lbasz = 1 << ns->lbaf.ds; + + nlbas = ns->size / (ns->lbasz + ns->lbaf.ms); + + id_ns->nsze = cpu_to_le64(nlbas); + + /* no thin provisioning */ + id_ns->ncap = id_ns->nsze; + id_ns->nuse = id_ns->ncap; + + ns->moff = (int64_t)nlbas << ns->lbaf.ds; + + npdg = ns->blkconf.discard_granularity / ns->lbasz; + + ret = bdrv_get_info(blk_bs(ns->blkconf.blk), &bdi); + if (ret >= 0 && bdi.cluster_size > ns->blkconf.discard_granularity) { + npdg = bdi.cluster_size / ns->lbasz; + } + + id_ns->npda = id_ns->npdg = npdg - 1; +} + +static int nvme_ns_init(NvmeNamespace *ns, Error **errp) +{ + static uint64_t ns_count; + NvmeIdNs *id_ns = &ns->id_ns; + uint8_t ds; + uint16_t ms; + int i; + + ns->csi = NVME_CSI_NVM; + ns->status = 0x0; + + ns->id_ns.dlfeat = 0x1; + + /* support DULBE and I/O optimization fields */ + id_ns->nsfeat |= (0x4 | 0x10); + + if (ns->params.shared) { + id_ns->nmic |= NVME_NMIC_NS_SHARED; + } + + /* Substitute a missing EUI-64 by an autogenerated one */ + ++ns_count; + if (!ns->params.eui64 && ns->params.eui64_default) { + ns->params.eui64 = ns_count + NVME_EUI64_DEFAULT; + } + + /* simple copy */ + id_ns->mssrl = cpu_to_le16(ns->params.mssrl); + id_ns->mcl = cpu_to_le32(ns->params.mcl); + id_ns->msrc = ns->params.msrc; + id_ns->eui64 = cpu_to_be64(ns->params.eui64); + + ds = 31 - clz32(ns->blkconf.logical_block_size); + ms = ns->params.ms; + + id_ns->mc = NVME_ID_NS_MC_EXTENDED | NVME_ID_NS_MC_SEPARATE; + + if (ms && ns->params.mset) { + id_ns->flbas |= NVME_ID_NS_FLBAS_EXTENDED; + } + + id_ns->dpc = 0x1f; + id_ns->dps = ns->params.pi; + if (ns->params.pi && ns->params.pil) { + id_ns->dps |= NVME_ID_NS_DPS_FIRST_EIGHT; + } + + static const NvmeLBAF lbaf[16] = { + [0] = { .ds = 9 }, + [1] = { .ds = 9, .ms = 8 }, + [2] = { .ds = 9, .ms = 16 }, + [3] = { .ds = 9, .ms = 64 }, + [4] = { .ds = 12 }, + [5] = { .ds = 12, .ms = 8 }, + [6] = { .ds = 12, .ms = 16 }, + [7] = { .ds = 12, .ms = 64 }, + }; + + memcpy(&id_ns->lbaf, &lbaf, sizeof(lbaf)); + id_ns->nlbaf = 7; + + for (i = 0; i <= id_ns->nlbaf; i++) { + NvmeLBAF *lbaf = &id_ns->lbaf[i]; + if (lbaf->ds == ds) { + if (lbaf->ms == ms) { + id_ns->flbas |= i; + goto lbaf_found; + } + } + } + + /* add non-standard lba format */ + id_ns->nlbaf++; + id_ns->lbaf[id_ns->nlbaf].ds = ds; + id_ns->lbaf[id_ns->nlbaf].ms = ms; + id_ns->flbas |= id_ns->nlbaf; + +lbaf_found: + nvme_ns_init_format(ns); + + return 0; +} + +static int nvme_ns_init_blk(NvmeNamespace *ns, Error **errp) +{ + bool read_only; + + if (!blkconf_blocksizes(&ns->blkconf, errp)) { + return -1; + } + + read_only = !blk_supports_write_perm(ns->blkconf.blk); + if (!blkconf_apply_backend_options(&ns->blkconf, read_only, false, errp)) { + return -1; + } + + if (ns->blkconf.discard_granularity == -1) { + ns->blkconf.discard_granularity = + MAX(ns->blkconf.logical_block_size, MIN_DISCARD_GRANULARITY); + } + + ns->size = blk_getlength(ns->blkconf.blk); + if (ns->size < 0) { + error_setg_errno(errp, -ns->size, "could not get blockdev size"); + return -1; + } + + return 0; +} + +static int nvme_ns_zoned_check_calc_geometry(NvmeNamespace *ns, Error **errp) +{ + uint64_t zone_size, zone_cap; + + /* Make sure that the values of ZNS properties are sane */ + if (ns->params.zone_size_bs) { + zone_size = ns->params.zone_size_bs; + } else { + zone_size = NVME_DEFAULT_ZONE_SIZE; + } + if (ns->params.zone_cap_bs) { + zone_cap = ns->params.zone_cap_bs; + } else { + zone_cap = zone_size; + } + if (zone_cap > zone_size) { + error_setg(errp, "zone capacity %"PRIu64"B exceeds " + "zone size %"PRIu64"B", zone_cap, zone_size); + return -1; + } + if (zone_size < ns->lbasz) { + error_setg(errp, "zone size %"PRIu64"B too small, " + "must be at least %zuB", zone_size, ns->lbasz); + return -1; + } + if (zone_cap < ns->lbasz) { + error_setg(errp, "zone capacity %"PRIu64"B too small, " + "must be at least %zuB", zone_cap, ns->lbasz); + return -1; + } + + /* + * Save the main zone geometry values to avoid + * calculating them later again. + */ + ns->zone_size = zone_size / ns->lbasz; + ns->zone_capacity = zone_cap / ns->lbasz; + ns->num_zones = le64_to_cpu(ns->id_ns.nsze) / ns->zone_size; + + /* Do a few more sanity checks of ZNS properties */ + if (!ns->num_zones) { + error_setg(errp, + "insufficient drive capacity, must be at least the size " + "of one zone (%"PRIu64"B)", zone_size); + return -1; + } + + return 0; +} + +static void nvme_ns_zoned_init_state(NvmeNamespace *ns) +{ + uint64_t start = 0, zone_size = ns->zone_size; + uint64_t capacity = ns->num_zones * zone_size; + NvmeZone *zone; + int i; + + ns->zone_array = g_new0(NvmeZone, ns->num_zones); + if (ns->params.zd_extension_size) { + ns->zd_extensions = g_malloc0(ns->params.zd_extension_size * + ns->num_zones); + } + + QTAILQ_INIT(&ns->exp_open_zones); + QTAILQ_INIT(&ns->imp_open_zones); + QTAILQ_INIT(&ns->closed_zones); + QTAILQ_INIT(&ns->full_zones); + + zone = ns->zone_array; + for (i = 0; i < ns->num_zones; i++, zone++) { + if (start + zone_size > capacity) { + zone_size = capacity - start; + } + zone->d.zt = NVME_ZONE_TYPE_SEQ_WRITE; + nvme_set_zone_state(zone, NVME_ZONE_STATE_EMPTY); + zone->d.za = 0; + zone->d.zcap = ns->zone_capacity; + zone->d.zslba = start; + zone->d.wp = start; + zone->w_ptr = start; + start += zone_size; + } + + ns->zone_size_log2 = 0; + if (is_power_of_2(ns->zone_size)) { + ns->zone_size_log2 = 63 - clz64(ns->zone_size); + } +} + +static void nvme_ns_init_zoned(NvmeNamespace *ns) +{ + NvmeIdNsZoned *id_ns_z; + int i; + + nvme_ns_zoned_init_state(ns); + + id_ns_z = g_malloc0(sizeof(NvmeIdNsZoned)); + + /* MAR/MOR are zeroes-based, FFFFFFFFFh means no limit */ + id_ns_z->mar = cpu_to_le32(ns->params.max_active_zones - 1); + id_ns_z->mor = cpu_to_le32(ns->params.max_open_zones - 1); + id_ns_z->zoc = 0; + id_ns_z->ozcs = ns->params.cross_zone_read ? 0x01 : 0x00; + + for (i = 0; i <= ns->id_ns.nlbaf; i++) { + id_ns_z->lbafe[i].zsze = cpu_to_le64(ns->zone_size); + id_ns_z->lbafe[i].zdes = + ns->params.zd_extension_size >> 6; /* Units of 64B */ + } + + ns->csi = NVME_CSI_ZONED; + ns->id_ns.nsze = cpu_to_le64(ns->num_zones * ns->zone_size); + ns->id_ns.ncap = ns->id_ns.nsze; + ns->id_ns.nuse = ns->id_ns.ncap; + + /* + * The device uses the BDRV_BLOCK_ZERO flag to determine the "deallocated" + * status of logical blocks. Since the spec defines that logical blocks + * SHALL be deallocated when then zone is in the Empty or Offline states, + * we can only support DULBE if the zone size is a multiple of the + * calculated NPDG. + */ + if (ns->zone_size % (ns->id_ns.npdg + 1)) { + warn_report("the zone size (%"PRIu64" blocks) is not a multiple of " + "the calculated deallocation granularity (%d blocks); " + "DULBE support disabled", + ns->zone_size, ns->id_ns.npdg + 1); + + ns->id_ns.nsfeat &= ~0x4; + } + + ns->id_ns_zoned = id_ns_z; +} + +static void nvme_clear_zone(NvmeNamespace *ns, NvmeZone *zone) +{ + uint8_t state; + + zone->w_ptr = zone->d.wp; + state = nvme_get_zone_state(zone); + if (zone->d.wp != zone->d.zslba || + (zone->d.za & NVME_ZA_ZD_EXT_VALID)) { + if (state != NVME_ZONE_STATE_CLOSED) { + trace_pci_nvme_clear_ns_close(state, zone->d.zslba); + nvme_set_zone_state(zone, NVME_ZONE_STATE_CLOSED); + } + nvme_aor_inc_active(ns); + QTAILQ_INSERT_HEAD(&ns->closed_zones, zone, entry); + } else { + trace_pci_nvme_clear_ns_reset(state, zone->d.zslba); + nvme_set_zone_state(zone, NVME_ZONE_STATE_EMPTY); + } +} + +/* + * Close all the zones that are currently open. + */ +static void nvme_zoned_ns_shutdown(NvmeNamespace *ns) +{ + NvmeZone *zone, *next; + + QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) { + QTAILQ_REMOVE(&ns->closed_zones, zone, entry); + nvme_aor_dec_active(ns); + nvme_clear_zone(ns, zone); + } + QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) { + QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry); + nvme_aor_dec_open(ns); + nvme_aor_dec_active(ns); + nvme_clear_zone(ns, zone); + } + QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) { + QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry); + nvme_aor_dec_open(ns); + nvme_aor_dec_active(ns); + nvme_clear_zone(ns, zone); + } + + assert(ns->nr_open_zones == 0); +} + +static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp) +{ + if (!ns->blkconf.blk) { + error_setg(errp, "block backend not configured"); + return -1; + } + + if (ns->params.pi && ns->params.ms < 8) { + error_setg(errp, "at least 8 bytes of metadata required to enable " + "protection information"); + return -1; + } + + if (ns->params.nsid > NVME_MAX_NAMESPACES) { + error_setg(errp, "invalid namespace id (must be between 0 and %d)", + NVME_MAX_NAMESPACES); + return -1; + } + + if (ns->params.zoned) { + if (ns->params.max_active_zones) { + if (ns->params.max_open_zones > ns->params.max_active_zones) { + error_setg(errp, "max_open_zones (%u) exceeds " + "max_active_zones (%u)", ns->params.max_open_zones, + ns->params.max_active_zones); + return -1; + } + + if (!ns->params.max_open_zones) { + ns->params.max_open_zones = ns->params.max_active_zones; + } + } + + if (ns->params.zd_extension_size) { + if (ns->params.zd_extension_size & 0x3f) { + error_setg(errp, "zone descriptor extension size must be a " + "multiple of 64B"); + return -1; + } + if ((ns->params.zd_extension_size >> 6) > 0xff) { + error_setg(errp, + "zone descriptor extension size is too large"); + return -1; + } + } + } + + return 0; +} + +int nvme_ns_setup(NvmeNamespace *ns, Error **errp) +{ + if (nvme_ns_check_constraints(ns, errp)) { + return -1; + } + + if (nvme_ns_init_blk(ns, errp)) { + return -1; + } + + if (nvme_ns_init(ns, errp)) { + return -1; + } + if (ns->params.zoned) { + if (nvme_ns_zoned_check_calc_geometry(ns, errp) != 0) { + return -1; + } + nvme_ns_init_zoned(ns); + } + + return 0; +} + +void nvme_ns_drain(NvmeNamespace *ns) +{ + blk_drain(ns->blkconf.blk); +} + +void nvme_ns_shutdown(NvmeNamespace *ns) +{ + blk_flush(ns->blkconf.blk); + if (ns->params.zoned) { + nvme_zoned_ns_shutdown(ns); + } +} + +void nvme_ns_cleanup(NvmeNamespace *ns) +{ + if (ns->params.zoned) { + g_free(ns->id_ns_zoned); + g_free(ns->zone_array); + g_free(ns->zd_extensions); + } +} + +static void nvme_ns_unrealize(DeviceState *dev) +{ + NvmeNamespace *ns = NVME_NS(dev); + + nvme_ns_drain(ns); + nvme_ns_shutdown(ns); + nvme_ns_cleanup(ns); +} + +static void nvme_ns_realize(DeviceState *dev, Error **errp) +{ + NvmeNamespace *ns = NVME_NS(dev); + BusState *s = qdev_get_parent_bus(dev); + NvmeCtrl *n = NVME(s->parent); + NvmeSubsystem *subsys = n->subsys; + uint32_t nsid = ns->params.nsid; + int i; + + if (!n->subsys) { + if (ns->params.detached) { + error_setg(errp, "detached requires that the nvme device is " + "linked to an nvme-subsys device"); + return; + } + } else { + /* + * If this namespace belongs to a subsystem (through a link on the + * controller device), reparent the device. + */ + if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) { + return; + } + } + + if (nvme_ns_setup(ns, errp)) { + return; + } + + if (!nsid) { + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + if (nvme_ns(n, i) || nvme_subsys_ns(subsys, i)) { + continue; + } + + nsid = ns->params.nsid = i; + break; + } + + if (!nsid) { + error_setg(errp, "no free namespace id"); + return; + } + } else { + if (nvme_ns(n, nsid) || nvme_subsys_ns(subsys, nsid)) { + error_setg(errp, "namespace id '%d' already allocated", nsid); + return; + } + } + + if (subsys) { + subsys->namespaces[nsid] = ns; + + if (ns->params.detached) { + return; + } + + if (ns->params.shared) { + for (i = 0; i < ARRAY_SIZE(subsys->ctrls); i++) { + NvmeCtrl *ctrl = subsys->ctrls[i]; + + if (ctrl) { + nvme_attach_ns(ctrl, ns); + } + } + + return; + } + } + + nvme_attach_ns(n, ns); +} + +static Property nvme_ns_props[] = { + DEFINE_BLOCK_PROPERTIES(NvmeNamespace, blkconf), + DEFINE_PROP_BOOL("detached", NvmeNamespace, params.detached, false), + DEFINE_PROP_BOOL("shared", NvmeNamespace, params.shared, true), + DEFINE_PROP_UINT32("nsid", NvmeNamespace, params.nsid, 0), + DEFINE_PROP_UUID("uuid", NvmeNamespace, params.uuid), + DEFINE_PROP_UINT64("eui64", NvmeNamespace, params.eui64, 0), + DEFINE_PROP_UINT16("ms", NvmeNamespace, params.ms, 0), + DEFINE_PROP_UINT8("mset", NvmeNamespace, params.mset, 0), + DEFINE_PROP_UINT8("pi", NvmeNamespace, params.pi, 0), + DEFINE_PROP_UINT8("pil", NvmeNamespace, params.pil, 0), + DEFINE_PROP_UINT16("mssrl", NvmeNamespace, params.mssrl, 128), + DEFINE_PROP_UINT32("mcl", NvmeNamespace, params.mcl, 128), + DEFINE_PROP_UINT8("msrc", NvmeNamespace, params.msrc, 127), + DEFINE_PROP_BOOL("zoned", NvmeNamespace, params.zoned, false), + DEFINE_PROP_SIZE("zoned.zone_size", NvmeNamespace, params.zone_size_bs, + NVME_DEFAULT_ZONE_SIZE), + DEFINE_PROP_SIZE("zoned.zone_capacity", NvmeNamespace, params.zone_cap_bs, + 0), + DEFINE_PROP_BOOL("zoned.cross_read", NvmeNamespace, + params.cross_zone_read, false), + DEFINE_PROP_UINT32("zoned.max_active", NvmeNamespace, + params.max_active_zones, 0), + DEFINE_PROP_UINT32("zoned.max_open", NvmeNamespace, + params.max_open_zones, 0), + DEFINE_PROP_UINT32("zoned.descr_ext_size", NvmeNamespace, + params.zd_extension_size, 0), + DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default, + true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void nvme_ns_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + + dc->bus_type = TYPE_NVME_BUS; + dc->realize = nvme_ns_realize; + dc->unrealize = nvme_ns_unrealize; + device_class_set_props(dc, nvme_ns_props); + dc->desc = "Virtual NVMe namespace"; +} + +static void nvme_ns_instance_init(Object *obj) +{ + NvmeNamespace *ns = NVME_NS(obj); + char *bootindex = g_strdup_printf("/namespace@%d,0", ns->params.nsid); + + device_add_bootindex_property(obj, &ns->bootindex, "bootindex", + bootindex, DEVICE(obj)); + + g_free(bootindex); +} + +static const TypeInfo nvme_ns_info = { + .name = TYPE_NVME_NS, + .parent = TYPE_DEVICE, + .class_init = nvme_ns_class_init, + .instance_size = sizeof(NvmeNamespace), + .instance_init = nvme_ns_instance_init, +}; + +static void nvme_ns_register_types(void) +{ + type_register_static(&nvme_ns_info); +} + +type_init(nvme_ns_register_types) diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h new file mode 100644 index 000000000..83ffabade --- /dev/null +++ b/hw/nvme/nvme.h @@ -0,0 +1,556 @@ +/* + * QEMU NVM Express + * + * Copyright (c) 2012 Intel Corporation + * Copyright (c) 2021 Minwoo Im + * Copyright (c) 2021 Samsung Electronics Co., Ltd. + * + * Authors: + * Keith Busch + * Klaus Jensen + * Gollu Appalanaidu + * Dmitry Fomichev + * Minwoo Im + * + * This code is licensed under the GNU GPL v2 or later. + */ + +#ifndef HW_NVME_INTERNAL_H +#define HW_NVME_INTERNAL_H + +#include "qemu/uuid.h" +#include "hw/pci/pci.h" +#include "hw/block/block.h" + +#include "block/nvme.h" + +#define NVME_MAX_CONTROLLERS 32 +#define NVME_MAX_NAMESPACES 256 +#define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000) + +QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1); + +typedef struct NvmeCtrl NvmeCtrl; +typedef struct NvmeNamespace NvmeNamespace; + +#define TYPE_NVME_BUS "nvme-bus" +OBJECT_DECLARE_SIMPLE_TYPE(NvmeBus, NVME_BUS) + +typedef struct NvmeBus { + BusState parent_bus; +} NvmeBus; + +#define TYPE_NVME_SUBSYS "nvme-subsys" +#define NVME_SUBSYS(obj) \ + OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS) + +typedef struct NvmeSubsystem { + DeviceState parent_obj; + NvmeBus bus; + uint8_t subnqn[256]; + + NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS]; + NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; + + struct { + char *nqn; + } params; +} NvmeSubsystem; + +int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp); +void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n); + +static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys, + uint32_t cntlid) +{ + if (!subsys || cntlid >= NVME_MAX_CONTROLLERS) { + return NULL; + } + + return subsys->ctrls[cntlid]; +} + +static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys, + uint32_t nsid) +{ + if (!subsys || !nsid || nsid > NVME_MAX_NAMESPACES) { + return NULL; + } + + return subsys->namespaces[nsid]; +} + +#define TYPE_NVME_NS "nvme-ns" +#define NVME_NS(obj) \ + OBJECT_CHECK(NvmeNamespace, (obj), TYPE_NVME_NS) + +typedef struct NvmeZone { + NvmeZoneDescr d; + uint64_t w_ptr; + QTAILQ_ENTRY(NvmeZone) entry; +} NvmeZone; + +typedef struct NvmeNamespaceParams { + bool detached; + bool shared; + uint32_t nsid; + QemuUUID uuid; + uint64_t eui64; + bool eui64_default; + + uint16_t ms; + uint8_t mset; + uint8_t pi; + uint8_t pil; + + uint16_t mssrl; + uint32_t mcl; + uint8_t msrc; + + bool zoned; + bool cross_zone_read; + uint64_t zone_size_bs; + uint64_t zone_cap_bs; + uint32_t max_active_zones; + uint32_t max_open_zones; + uint32_t zd_extension_size; +} NvmeNamespaceParams; + +typedef struct NvmeNamespace { + DeviceState parent_obj; + BlockConf blkconf; + int32_t bootindex; + int64_t size; + int64_t moff; + NvmeIdNs id_ns; + NvmeLBAF lbaf; + size_t lbasz; + const uint32_t *iocs; + uint8_t csi; + uint16_t status; + int attached; + + QTAILQ_ENTRY(NvmeNamespace) entry; + + NvmeIdNsZoned *id_ns_zoned; + NvmeZone *zone_array; + QTAILQ_HEAD(, NvmeZone) exp_open_zones; + QTAILQ_HEAD(, NvmeZone) imp_open_zones; + QTAILQ_HEAD(, NvmeZone) closed_zones; + QTAILQ_HEAD(, NvmeZone) full_zones; + uint32_t num_zones; + uint64_t zone_size; + uint64_t zone_capacity; + uint32_t zone_size_log2; + uint8_t *zd_extensions; + int32_t nr_open_zones; + int32_t nr_active_zones; + + NvmeNamespaceParams params; + + struct { + uint32_t err_rec; + } features; +} NvmeNamespace; + +static inline uint32_t nvme_nsid(NvmeNamespace *ns) +{ + if (ns) { + return ns->params.nsid; + } + + return 0; +} + +static inline size_t nvme_l2b(NvmeNamespace *ns, uint64_t lba) +{ + return lba << ns->lbaf.ds; +} + +static inline size_t nvme_m2b(NvmeNamespace *ns, uint64_t lba) +{ + return ns->lbaf.ms * lba; +} + +static inline int64_t nvme_moff(NvmeNamespace *ns, uint64_t lba) +{ + return ns->moff + nvme_m2b(ns, lba); +} + +static inline bool nvme_ns_ext(NvmeNamespace *ns) +{ + return !!NVME_ID_NS_FLBAS_EXTENDED(ns->id_ns.flbas); +} + +static inline NvmeZoneState nvme_get_zone_state(NvmeZone *zone) +{ + return zone->d.zs >> 4; +} + +static inline void nvme_set_zone_state(NvmeZone *zone, NvmeZoneState state) +{ + zone->d.zs = state << 4; +} + +static inline uint64_t nvme_zone_rd_boundary(NvmeNamespace *ns, NvmeZone *zone) +{ + return zone->d.zslba + ns->zone_size; +} + +static inline uint64_t nvme_zone_wr_boundary(NvmeZone *zone) +{ + return zone->d.zslba + zone->d.zcap; +} + +static inline bool nvme_wp_is_valid(NvmeZone *zone) +{ + uint8_t st = nvme_get_zone_state(zone); + + return st != NVME_ZONE_STATE_FULL && + st != NVME_ZONE_STATE_READ_ONLY && + st != NVME_ZONE_STATE_OFFLINE; +} + +static inline uint8_t *nvme_get_zd_extension(NvmeNamespace *ns, + uint32_t zone_idx) +{ + return &ns->zd_extensions[zone_idx * ns->params.zd_extension_size]; +} + +static inline void nvme_aor_inc_open(NvmeNamespace *ns) +{ + assert(ns->nr_open_zones >= 0); + if (ns->params.max_open_zones) { + ns->nr_open_zones++; + assert(ns->nr_open_zones <= ns->params.max_open_zones); + } +} + +static inline void nvme_aor_dec_open(NvmeNamespace *ns) +{ + if (ns->params.max_open_zones) { + assert(ns->nr_open_zones > 0); + ns->nr_open_zones--; + } + assert(ns->nr_open_zones >= 0); +} + +static inline void nvme_aor_inc_active(NvmeNamespace *ns) +{ + assert(ns->nr_active_zones >= 0); + if (ns->params.max_active_zones) { + ns->nr_active_zones++; + assert(ns->nr_active_zones <= ns->params.max_active_zones); + } +} + +static inline void nvme_aor_dec_active(NvmeNamespace *ns) +{ + if (ns->params.max_active_zones) { + assert(ns->nr_active_zones > 0); + ns->nr_active_zones--; + assert(ns->nr_active_zones >= ns->nr_open_zones); + } + assert(ns->nr_active_zones >= 0); +} + +void nvme_ns_init_format(NvmeNamespace *ns); +int nvme_ns_setup(NvmeNamespace *ns, Error **errp); +void nvme_ns_drain(NvmeNamespace *ns); +void nvme_ns_shutdown(NvmeNamespace *ns); +void nvme_ns_cleanup(NvmeNamespace *ns); + +typedef struct NvmeAsyncEvent { + QTAILQ_ENTRY(NvmeAsyncEvent) entry; + NvmeAerResult result; +} NvmeAsyncEvent; + +enum { + NVME_SG_ALLOC = 1 << 0, + NVME_SG_DMA = 1 << 1, +}; + +typedef struct NvmeSg { + int flags; + + union { + QEMUSGList qsg; + QEMUIOVector iov; + }; +} NvmeSg; + +typedef enum NvmeTxDirection { + NVME_TX_DIRECTION_TO_DEVICE = 0, + NVME_TX_DIRECTION_FROM_DEVICE = 1, +} NvmeTxDirection; + +typedef struct NvmeRequest { + struct NvmeSQueue *sq; + struct NvmeNamespace *ns; + BlockAIOCB *aiocb; + uint16_t status; + void *opaque; + NvmeCqe cqe; + NvmeCmd cmd; + BlockAcctCookie acct; + NvmeSg sg; + QTAILQ_ENTRY(NvmeRequest)entry; +} NvmeRequest; + +typedef struct NvmeBounceContext { + NvmeRequest *req; + + struct { + QEMUIOVector iov; + uint8_t *bounce; + } data, mdata; +} NvmeBounceContext; + +static inline const char *nvme_adm_opc_str(uint8_t opc) +{ + switch (opc) { + case NVME_ADM_CMD_DELETE_SQ: return "NVME_ADM_CMD_DELETE_SQ"; + case NVME_ADM_CMD_CREATE_SQ: return "NVME_ADM_CMD_CREATE_SQ"; + case NVME_ADM_CMD_GET_LOG_PAGE: return "NVME_ADM_CMD_GET_LOG_PAGE"; + case NVME_ADM_CMD_DELETE_CQ: return "NVME_ADM_CMD_DELETE_CQ"; + case NVME_ADM_CMD_CREATE_CQ: return "NVME_ADM_CMD_CREATE_CQ"; + case NVME_ADM_CMD_IDENTIFY: return "NVME_ADM_CMD_IDENTIFY"; + case NVME_ADM_CMD_ABORT: return "NVME_ADM_CMD_ABORT"; + case NVME_ADM_CMD_SET_FEATURES: return "NVME_ADM_CMD_SET_FEATURES"; + case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES"; + case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ"; + case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT"; + case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM"; + default: return "NVME_ADM_CMD_UNKNOWN"; + } +} + +static inline const char *nvme_io_opc_str(uint8_t opc) +{ + switch (opc) { + case NVME_CMD_FLUSH: return "NVME_NVM_CMD_FLUSH"; + case NVME_CMD_WRITE: return "NVME_NVM_CMD_WRITE"; + case NVME_CMD_READ: return "NVME_NVM_CMD_READ"; + case NVME_CMD_COMPARE: return "NVME_NVM_CMD_COMPARE"; + case NVME_CMD_WRITE_ZEROES: return "NVME_NVM_CMD_WRITE_ZEROES"; + case NVME_CMD_DSM: return "NVME_NVM_CMD_DSM"; + case NVME_CMD_VERIFY: return "NVME_NVM_CMD_VERIFY"; + case NVME_CMD_COPY: return "NVME_NVM_CMD_COPY"; + case NVME_CMD_ZONE_MGMT_SEND: return "NVME_ZONED_CMD_MGMT_SEND"; + case NVME_CMD_ZONE_MGMT_RECV: return "NVME_ZONED_CMD_MGMT_RECV"; + case NVME_CMD_ZONE_APPEND: return "NVME_ZONED_CMD_ZONE_APPEND"; + default: return "NVME_NVM_CMD_UNKNOWN"; + } +} + +typedef struct NvmeSQueue { + struct NvmeCtrl *ctrl; + uint16_t sqid; + uint16_t cqid; + uint32_t head; + uint32_t tail; + uint32_t size; + uint64_t dma_addr; + QEMUTimer *timer; + NvmeRequest *io_req; + QTAILQ_HEAD(, NvmeRequest) req_list; + QTAILQ_HEAD(, NvmeRequest) out_req_list; + QTAILQ_ENTRY(NvmeSQueue) entry; +} NvmeSQueue; + +typedef struct NvmeCQueue { + struct NvmeCtrl *ctrl; + uint8_t phase; + uint16_t cqid; + uint16_t irq_enabled; + uint32_t head; + uint32_t tail; + uint32_t vector; + uint32_t size; + uint64_t dma_addr; + QEMUTimer *timer; + QTAILQ_HEAD(, NvmeSQueue) sq_list; + QTAILQ_HEAD(, NvmeRequest) req_list; +} NvmeCQueue; + +#define TYPE_NVME "nvme" +#define NVME(obj) \ + OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME) + +typedef struct NvmeParams { + char *serial; + uint32_t num_queues; /* deprecated since 5.1 */ + uint32_t max_ioqpairs; + uint16_t msix_qsize; + uint32_t cmb_size_mb; + uint8_t aerl; + uint32_t aer_max_queued; + uint8_t mdts; + uint8_t vsl; + bool use_intel_id; + uint8_t zasl; + bool auto_transition_zones; + bool legacy_cmb; +} NvmeParams; + +typedef struct NvmeCtrl { + PCIDevice parent_obj; + MemoryRegion bar0; + MemoryRegion iomem; + NvmeBar bar; + NvmeParams params; + NvmeBus bus; + + uint16_t cntlid; + bool qs_created; + uint32_t page_size; + uint16_t page_bits; + uint16_t max_prp_ents; + uint16_t cqe_size; + uint16_t sqe_size; + uint32_t reg_size; + uint32_t max_q_ents; + uint8_t outstanding_aers; + uint32_t irq_status; + int cq_pending; + uint64_t host_timestamp; /* Timestamp sent by the host */ + uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */ + uint64_t starttime_ms; + uint16_t temperature; + uint8_t smart_critical_warning; + + struct { + MemoryRegion mem; + uint8_t *buf; + bool cmse; + hwaddr cba; + } cmb; + + struct { + HostMemoryBackend *dev; + bool cmse; + hwaddr cba; + } pmr; + + uint8_t aer_mask; + NvmeRequest **aer_reqs; + QTAILQ_HEAD(, NvmeAsyncEvent) aer_queue; + int aer_queued; + + uint32_t dmrsl; + + /* Namespace ID is started with 1 so bitmap should be 1-based */ +#define NVME_CHANGED_NSID_SIZE (NVME_MAX_NAMESPACES + 1) + DECLARE_BITMAP(changed_nsids, NVME_CHANGED_NSID_SIZE); + + NvmeSubsystem *subsys; + + NvmeNamespace namespace; + NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; + NvmeSQueue **sq; + NvmeCQueue **cq; + NvmeSQueue admin_sq; + NvmeCQueue admin_cq; + NvmeIdCtrl id_ctrl; + + struct { + struct { + uint16_t temp_thresh_hi; + uint16_t temp_thresh_low; + }; + uint32_t async_config; + } features; +} NvmeCtrl; + +static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid) +{ + if (!nsid || nsid > NVME_MAX_NAMESPACES) { + return NULL; + } + + return n->namespaces[nsid]; +} + +static inline NvmeCQueue *nvme_cq(NvmeRequest *req) +{ + NvmeSQueue *sq = req->sq; + NvmeCtrl *n = sq->ctrl; + + return n->cq[sq->cqid]; +} + +static inline NvmeCtrl *nvme_ctrl(NvmeRequest *req) +{ + NvmeSQueue *sq = req->sq; + return sq->ctrl; +} + +static inline uint16_t nvme_cid(NvmeRequest *req) +{ + if (!req) { + return 0xffff; + } + + return le16_to_cpu(req->cqe.cid); +} + +void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns); +uint16_t nvme_bounce_data(NvmeCtrl *n, uint8_t *ptr, uint32_t len, + NvmeTxDirection dir, NvmeRequest *req); +uint16_t nvme_bounce_mdata(NvmeCtrl *n, uint8_t *ptr, uint32_t len, + NvmeTxDirection dir, NvmeRequest *req); +void nvme_rw_complete_cb(void *opaque, int ret); +uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, + NvmeCmd *cmd); + +/* from Linux kernel (crypto/crct10dif_common.c) */ +static const uint16_t t10_dif_crc_table[256] = { + 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, + 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, + 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, + 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, + 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, + 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, + 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, + 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, + 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, + 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, + 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, + 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, + 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, + 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, + 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, + 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, + 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, + 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, + 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, + 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, + 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, + 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, + 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, + 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, + 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, + 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, + 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, + 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, + 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, + 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, + 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, + 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 +}; + +uint16_t nvme_check_prinfo(NvmeNamespace *ns, uint8_t prinfo, uint64_t slba, + uint32_t reftag); +uint16_t nvme_dif_mangle_mdata(NvmeNamespace *ns, uint8_t *mbuf, size_t mlen, + uint64_t slba); +void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len, + uint8_t *mbuf, size_t mlen, uint16_t apptag, + uint32_t *reftag); +uint16_t nvme_dif_check(NvmeNamespace *ns, uint8_t *buf, size_t len, + uint8_t *mbuf, size_t mlen, uint8_t prinfo, + uint64_t slba, uint16_t apptag, + uint16_t appmask, uint32_t *reftag); +uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req); + + +#endif /* HW_NVME_INTERNAL_H */ diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c new file mode 100644 index 000000000..fb58d6395 --- /dev/null +++ b/hw/nvme/subsys.c @@ -0,0 +1,96 @@ +/* + * QEMU NVM Express Subsystem: nvme-subsys + * + * Copyright (c) 2021 Minwoo Im + * + * This code is licensed under the GNU GPL v2. Refer COPYING. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" + +#include "nvme.h" + +int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp) +{ + NvmeSubsystem *subsys = n->subsys; + int cntlid, nsid; + + for (cntlid = 0; cntlid < ARRAY_SIZE(subsys->ctrls); cntlid++) { + if (!subsys->ctrls[cntlid]) { + break; + } + } + + if (cntlid == ARRAY_SIZE(subsys->ctrls)) { + error_setg(errp, "no more free controller id"); + return -1; + } + + subsys->ctrls[cntlid] = n; + + for (nsid = 1; nsid < ARRAY_SIZE(subsys->namespaces); nsid++) { + NvmeNamespace *ns = subsys->namespaces[nsid]; + if (ns && ns->params.shared && !ns->params.detached) { + nvme_attach_ns(n, ns); + } + } + + return cntlid; +} + +void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n) +{ + subsys->ctrls[n->cntlid] = NULL; + n->cntlid = -1; +} + +static void nvme_subsys_setup(NvmeSubsystem *subsys) +{ + const char *nqn = subsys->params.nqn ? + subsys->params.nqn : subsys->parent_obj.id; + + snprintf((char *)subsys->subnqn, sizeof(subsys->subnqn), + "nqn.2019-08.org.qemu:%s", nqn); +} + +static void nvme_subsys_realize(DeviceState *dev, Error **errp) +{ + NvmeSubsystem *subsys = NVME_SUBSYS(dev); + + qbus_init(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id); + + nvme_subsys_setup(subsys); +} + +static Property nvme_subsystem_props[] = { + DEFINE_PROP_STRING("nqn", NvmeSubsystem, params.nqn), + DEFINE_PROP_END_OF_LIST(), +}; + +static void nvme_subsys_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + + dc->realize = nvme_subsys_realize; + dc->desc = "Virtual NVMe subsystem"; + dc->hotpluggable = false; + + device_class_set_props(dc, nvme_subsystem_props); +} + +static const TypeInfo nvme_subsys_info = { + .name = TYPE_NVME_SUBSYS, + .parent = TYPE_DEVICE, + .class_init = nvme_subsys_class_init, + .instance_size = sizeof(NvmeSubsystem), +}; + +static void nvme_subsys_register_types(void) +{ + type_register_static(&nvme_subsys_info); +} + +type_init(nvme_subsys_register_types) diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events new file mode 100644 index 000000000..ff6cafd52 --- /dev/null +++ b/hw/nvme/trace-events @@ -0,0 +1,200 @@ +# successful events +pci_nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u" +pci_nvme_irq_pin(void) "pulsing IRQ pin" +pci_nvme_irq_masked(void) "IRQ is masked" +pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64"" +pci_nvme_map_addr(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64"" +pci_nvme_map_addr_cmb(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64"" +pci_nvme_map_prp(uint64_t trans_len, uint32_t len, uint64_t prp1, uint64_t prp2, int num_prps) "trans_len %"PRIu64" len %"PRIu32" prp1 0x%"PRIx64" prp2 0x%"PRIx64" num_prps %d" +pci_nvme_map_sgl(uint8_t typ, uint64_t len) "type 0x%"PRIx8" len %"PRIu64"" +pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode, const char *opname) "cid %"PRIu16" nsid 0x%"PRIx32" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'" +pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode, const char *opname) "cid %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'" +pci_nvme_flush_ns(uint32_t nsid) "nsid 0x%"PRIx32"" +pci_nvme_format_set(uint32_t nsid, uint8_t lbaf, uint8_t mset, uint8_t pi, uint8_t pil) "nsid %"PRIu32" lbaf %"PRIu8" mset %"PRIu8" pi %"PRIu8" pil %"PRIu8"" +pci_nvme_read(uint16_t cid, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64"" +pci_nvme_write(uint16_t cid, const char *verb, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" opname '%s' nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64"" +pci_nvme_rw_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" +pci_nvme_misc_cb(uint16_t cid) "cid %"PRIu16"" +pci_nvme_dif_rw(uint8_t pract, uint8_t prinfo) "pract 0x%"PRIx8" prinfo 0x%"PRIx8"" +pci_nvme_dif_rw_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" +pci_nvme_dif_rw_mdata_in_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" +pci_nvme_dif_rw_mdata_out_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" +pci_nvme_dif_rw_check_cb(uint16_t cid, uint8_t prinfo, uint16_t apptag, uint16_t appmask, uint32_t reftag) "cid %"PRIu16" prinfo 0x%"PRIx8" apptag 0x%"PRIx16" appmask 0x%"PRIx16" reftag 0x%"PRIx32"" +pci_nvme_dif_pract_generate_dif(size_t len, size_t lba_size, size_t chksum_len, uint16_t apptag, uint32_t reftag) "len %zu lba_size %zu chksum_len %zu apptag 0x%"PRIx16" reftag 0x%"PRIx32"" +pci_nvme_dif_check(uint8_t prinfo, uint16_t chksum_len) "prinfo 0x%"PRIx8" chksum_len %"PRIu16"" +pci_nvme_dif_prchk_disabled(uint16_t apptag, uint32_t reftag) "apptag 0x%"PRIx16" reftag 0x%"PRIx32"" +pci_nvme_dif_prchk_guard(uint16_t guard, uint16_t crc) "guard 0x%"PRIx16" crc 0x%"PRIx16"" +pci_nvme_dif_prchk_apptag(uint16_t apptag, uint16_t elbat, uint16_t elbatm) "apptag 0x%"PRIx16" elbat 0x%"PRIx16" elbatm 0x%"PRIx16"" +pci_nvme_dif_prchk_reftag(uint32_t reftag, uint32_t elbrt) "reftag 0x%"PRIx32" elbrt 0x%"PRIx32"" +pci_nvme_copy(uint16_t cid, uint32_t nsid, uint16_t nr, uint8_t format) "cid %"PRIu16" nsid %"PRIu32" nr %"PRIu16" format 0x%"PRIx8"" +pci_nvme_copy_source_range(uint64_t slba, uint32_t nlb) "slba 0x%"PRIx64" nlb %"PRIu32"" +pci_nvme_copy_out(uint64_t slba, uint32_t nlb) "slba 0x%"PRIx64" nlb %"PRIu32"" +pci_nvme_verify(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba 0x%"PRIx64" nlb %"PRIu32"" +pci_nvme_verify_mdata_in_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" +pci_nvme_verify_cb(uint16_t cid, uint8_t prinfo, uint16_t apptag, uint16_t appmask, uint32_t reftag) "cid %"PRIu16" prinfo 0x%"PRIx8" apptag 0x%"PRIx16" appmask 0x%"PRIx16" reftag 0x%"PRIx32"" +pci_nvme_rw_complete_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" +pci_nvme_block_status(int64_t offset, int64_t bytes, int64_t pnum, int ret, bool zeroed) "offset %"PRId64" bytes %"PRId64" pnum %"PRId64" ret 0x%x zeroed %d" +pci_nvme_dsm(uint32_t nr, uint32_t attr) "nr %"PRIu32" attr 0x%"PRIx32"" +pci_nvme_dsm_deallocate(uint64_t slba, uint32_t nlb) "slba %"PRIu64" nlb %"PRIu32"" +pci_nvme_dsm_single_range_limit_exceeded(uint32_t nlb, uint32_t dmrsl) "nlb %"PRIu32" dmrsl %"PRIu32"" +pci_nvme_compare(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba 0x%"PRIx64" nlb %"PRIu32"" +pci_nvme_compare_data_cb(uint16_t cid) "cid %"PRIu16"" +pci_nvme_compare_mdata_cb(uint16_t cid) "cid %"PRIu16"" +pci_nvme_aio_discard_cb(uint16_t cid) "cid %"PRIu16"" +pci_nvme_aio_copy_in_cb(uint16_t cid) "cid %"PRIu16"" +pci_nvme_aio_flush_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'" +pci_nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16"" +pci_nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d" +pci_nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16"" +pci_nvme_del_cq(uint16_t cqid) "deleted completion queue, cqid=%"PRIu16"" +pci_nvme_identify(uint16_t cid, uint8_t cns, uint16_t ctrlid, uint8_t csi) "cid %"PRIu16" cns 0x%"PRIx8" ctrlid %"PRIu16" csi 0x%"PRIx8"" +pci_nvme_identify_ctrl(void) "identify controller" +pci_nvme_identify_ctrl_csi(uint8_t csi) "identify controller, csi=0x%"PRIx8"" +pci_nvme_identify_ns(uint32_t ns) "nsid %"PRIu32"" +pci_nvme_identify_ctrl_list(uint8_t cns, uint16_t cntid) "cns 0x%"PRIx8" cntid %"PRIu16"" +pci_nvme_identify_ns_csi(uint32_t ns, uint8_t csi) "nsid=%"PRIu32", csi=0x%"PRIx8"" +pci_nvme_identify_nslist(uint32_t ns) "nsid %"PRIu32"" +pci_nvme_identify_nslist_csi(uint16_t ns, uint8_t csi) "nsid=%"PRIu16", csi=0x%"PRIx8"" +pci_nvme_identify_cmd_set(void) "identify i/o command set" +pci_nvme_identify_ns_descr_list(uint32_t ns) "nsid %"PRIu32"" +pci_nvme_get_log(uint16_t cid, uint8_t lid, uint8_t lsp, uint8_t rae, uint32_t len, uint64_t off) "cid %"PRIu16" lid 0x%"PRIx8" lsp 0x%"PRIx8" rae 0x%"PRIx8" len %"PRIu32" off %"PRIu64"" +pci_nvme_getfeat(uint16_t cid, uint32_t nsid, uint8_t fid, uint8_t sel, uint32_t cdw11) "cid %"PRIu16" nsid 0x%"PRIx32" fid 0x%"PRIx8" sel 0x%"PRIx8" cdw11 0x%"PRIx32"" +pci_nvme_setfeat(uint16_t cid, uint32_t nsid, uint8_t fid, uint8_t save, uint32_t cdw11) "cid %"PRIu16" nsid 0x%"PRIx32" fid 0x%"PRIx8" save 0x%"PRIx8" cdw11 0x%"PRIx32"" +pci_nvme_getfeat_vwcache(const char* result) "get feature volatile write cache, result=%s" +pci_nvme_getfeat_numq(int result) "get feature number of queues, result=%d" +pci_nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d" +pci_nvme_setfeat_timestamp(uint64_t ts) "set feature timestamp = 0x%"PRIx64"" +pci_nvme_getfeat_timestamp(uint64_t ts) "get feature timestamp = 0x%"PRIx64"" +pci_nvme_process_aers(int queued) "queued %d" +pci_nvme_aer(uint16_t cid) "cid %"PRIu16"" +pci_nvme_aer_aerl_exceeded(void) "aerl exceeded" +pci_nvme_aer_masked(uint8_t type, uint8_t mask) "type 0x%"PRIx8" mask 0x%"PRIx8"" +pci_nvme_aer_post_cqe(uint8_t typ, uint8_t info, uint8_t log_page) "type 0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8"" +pci_nvme_ns_attachment(uint16_t cid, uint8_t sel) "cid %"PRIu16", sel=0x%"PRIx8"" +pci_nvme_ns_attachment_attach(uint16_t cntlid, uint32_t nsid) "cntlid=0x%"PRIx16", nsid=0x%"PRIx32"" +pci_nvme_enqueue_event(uint8_t typ, uint8_t info, uint8_t log_page) "type 0x%"PRIx8" info 0x%"PRIx8" lid 0x%"PRIx8"" +pci_nvme_enqueue_event_noqueue(int queued) "queued %d" +pci_nvme_enqueue_event_masked(uint8_t typ) "type 0x%"PRIx8"" +pci_nvme_no_outstanding_aers(void) "ignoring event; no outstanding AERs" +pci_nvme_enqueue_req_completion(uint16_t cid, uint16_t cqid, uint32_t dw0, uint32_t dw1, uint16_t status) "cid %"PRIu16" cqid %"PRIu16" dw0 0x%"PRIx32" dw1 0x%"PRIx32" status 0x%"PRIx16"" +pci_nvme_mmio_read(uint64_t addr, unsigned size) "addr 0x%"PRIx64" size %d" +pci_nvme_mmio_write(uint64_t addr, uint64_t data, unsigned size) "addr 0x%"PRIx64" data 0x%"PRIx64" size %d" +pci_nvme_mmio_doorbell_cq(uint16_t cqid, uint16_t new_head) "cqid %"PRIu16" new_head %"PRIu16"" +pci_nvme_mmio_doorbell_sq(uint16_t sqid, uint16_t new_tail) "sqid %"PRIu16" new_tail %"PRIu16"" +pci_nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64"" +pci_nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64"" +pci_nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller config=0x%"PRIx64"" +pci_nvme_mmio_aqattr(uint64_t data) "wrote MMIO, admin queue attributes=0x%"PRIx64"" +pci_nvme_mmio_asqaddr(uint64_t data) "wrote MMIO, admin submission queue address=0x%"PRIx64"" +pci_nvme_mmio_acqaddr(uint64_t data) "wrote MMIO, admin completion queue address=0x%"PRIx64"" +pci_nvme_mmio_asqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin submission queue high half=0x%"PRIx64", new_address=0x%"PRIx64"" +pci_nvme_mmio_acqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin completion queue high half=0x%"PRIx64", new_address=0x%"PRIx64"" +pci_nvme_mmio_start_success(void) "setting controller enable bit succeeded" +pci_nvme_mmio_stopped(void) "cleared controller enable bit" +pci_nvme_mmio_shutdown_set(void) "shutdown bit set" +pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared" +pci_nvme_open_zone(uint64_t slba, uint32_t zone_idx, int all) "open zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" +pci_nvme_close_zone(uint64_t slba, uint32_t zone_idx, int all) "close zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" +pci_nvme_finish_zone(uint64_t slba, uint32_t zone_idx, int all) "finish zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" +pci_nvme_reset_zone(uint64_t slba, uint32_t zone_idx, int all) "reset zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" +pci_nvme_zns_zone_reset(uint64_t zslba) "zslba 0x%"PRIx64"" +pci_nvme_offline_zone(uint64_t slba, uint32_t zone_idx, int all) "offline zone, slba=%"PRIu64", idx=%"PRIu32", all=%"PRIi32"" +pci_nvme_set_descriptor_extension(uint64_t slba, uint32_t zone_idx) "set zone descriptor extension, slba=%"PRIu64", idx=%"PRIu32"" +pci_nvme_zd_extension_set(uint32_t zone_idx) "set descriptor extension for zone_idx=%"PRIu32"" +pci_nvme_clear_ns_close(uint32_t state, uint64_t slba) "zone state=%"PRIu32", slba=%"PRIu64" transitioned to Closed state" +pci_nvme_clear_ns_reset(uint32_t state, uint64_t slba) "zone state=%"PRIu32", slba=%"PRIu64" transitioned to Empty state" + +# error conditions +pci_nvme_err_mdts(size_t len) "len %zu" +pci_nvme_err_zasl(size_t len) "len %zu" +pci_nvme_err_req_status(uint16_t cid, uint32_t nsid, uint16_t status, uint8_t opc) "cid %"PRIu16" nsid %"PRIu32" status 0x%"PRIx16" opc 0x%"PRIx8"" +pci_nvme_err_addr_read(uint64_t addr) "addr 0x%"PRIx64"" +pci_nvme_err_addr_write(uint64_t addr) "addr 0x%"PRIx64"" +pci_nvme_err_cfs(void) "controller fatal status" +pci_nvme_err_aio(uint16_t cid, const char *errname, uint16_t status) "cid %"PRIu16" err '%s' status 0x%"PRIx16"" +pci_nvme_err_copy_invalid_format(uint8_t format) "format 0x%"PRIx8"" +pci_nvme_err_invalid_sgld(uint16_t cid, uint8_t typ) "cid %"PRIu16" type 0x%"PRIx8"" +pci_nvme_err_invalid_num_sgld(uint16_t cid, uint8_t typ) "cid %"PRIu16" type 0x%"PRIx8"" +pci_nvme_err_invalid_sgl_excess_length(uint32_t residual) "residual %"PRIu32"" +pci_nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size" +pci_nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is not page aligned: 0x%"PRIx64"" +pci_nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64"" +pci_nvme_err_invalid_opc(uint8_t opc) "invalid opcode 0x%"PRIx8"" +pci_nvme_err_invalid_admin_opc(uint8_t opc) "invalid admin opcode 0x%"PRIx8"" +pci_nvme_err_invalid_lba_range(uint64_t start, uint64_t len, uint64_t limit) "Invalid LBA start=%"PRIu64" len=%"PRIu64" limit=%"PRIu64"" +pci_nvme_err_invalid_log_page_offset(uint64_t ofs, uint64_t size) "must be <= %"PRIu64", got %"PRIu64"" +pci_nvme_err_cmb_invalid_cba(uint64_t cmbmsc) "cmbmsc 0x%"PRIx64"" +pci_nvme_err_cmb_not_enabled(uint64_t cmbmsc) "cmbmsc 0x%"PRIx64"" +pci_nvme_err_unaligned_zone_cmd(uint8_t action, uint64_t slba, uint64_t zslba) "unaligned zone op 0x%"PRIx32", got slba=%"PRIu64", zslba=%"PRIu64"" +pci_nvme_err_invalid_zone_state_transition(uint8_t action, uint64_t slba, uint8_t attrs) "action=0x%"PRIx8", slba=%"PRIu64", attrs=0x%"PRIx32"" +pci_nvme_err_write_not_at_wp(uint64_t slba, uint64_t zone, uint64_t wp) "writing at slba=%"PRIu64", zone=%"PRIu64", but wp=%"PRIu64"" +pci_nvme_err_append_not_at_start(uint64_t slba, uint64_t zone) "appending at slba=%"PRIu64", but zone=%"PRIu64"" +pci_nvme_err_zone_is_full(uint64_t zslba) "zslba 0x%"PRIx64"" +pci_nvme_err_zone_is_read_only(uint64_t zslba) "zslba 0x%"PRIx64"" +pci_nvme_err_zone_is_offline(uint64_t zslba) "zslba 0x%"PRIx64"" +pci_nvme_err_zone_boundary(uint64_t slba, uint32_t nlb, uint64_t zcap) "lba 0x%"PRIx64" nlb %"PRIu32" zcap 0x%"PRIx64"" +pci_nvme_err_zone_invalid_write(uint64_t slba, uint64_t wp) "lba 0x%"PRIx64" wp 0x%"PRIx64"" +pci_nvme_err_zone_write_not_ok(uint64_t slba, uint32_t nlb, uint16_t status) "slba=%"PRIu64", nlb=%"PRIu32", status=0x%"PRIx16"" +pci_nvme_err_zone_read_not_ok(uint64_t slba, uint32_t nlb, uint16_t status) "slba=%"PRIu64", nlb=%"PRIu32", status=0x%"PRIx16"" +pci_nvme_err_insuff_active_res(uint32_t max_active) "max_active=%"PRIu32" zone limit exceeded" +pci_nvme_err_insuff_open_res(uint32_t max_open) "max_open=%"PRIu32" zone limit exceeded" +pci_nvme_err_zd_extension_map_error(uint32_t zone_idx) "can't map descriptor extension for zone_idx=%"PRIu32"" +pci_nvme_err_invalid_iocsci(uint32_t idx) "unsupported command set combination index %"PRIu32"" +pci_nvme_err_invalid_del_sq(uint16_t qid) "invalid submission queue deletion, sid=%"PRIu16"" +pci_nvme_err_invalid_create_sq_cqid(uint16_t cqid) "failed creating submission queue, invalid cqid=%"PRIu16"" +pci_nvme_err_invalid_create_sq_sqid(uint16_t sqid) "failed creating submission queue, invalid sqid=%"PRIu16"" +pci_nvme_err_invalid_create_sq_size(uint16_t qsize) "failed creating submission queue, invalid qsize=%"PRIu16"" +pci_nvme_err_invalid_create_sq_addr(uint64_t addr) "failed creating submission queue, addr=0x%"PRIx64"" +pci_nvme_err_invalid_create_sq_qflags(uint16_t qflags) "failed creating submission queue, qflags=%"PRIu16"" +pci_nvme_err_invalid_del_cq_cqid(uint16_t cqid) "failed deleting completion queue, cqid=%"PRIu16"" +pci_nvme_err_invalid_del_cq_notempty(uint16_t cqid) "failed deleting completion queue, it is not empty, cqid=%"PRIu16"" +pci_nvme_err_invalid_create_cq_cqid(uint16_t cqid) "failed creating completion queue, cqid=%"PRIu16"" +pci_nvme_err_invalid_create_cq_size(uint16_t size) "failed creating completion queue, size=%"PRIu16"" +pci_nvme_err_invalid_create_cq_addr(uint64_t addr) "failed creating completion queue, addr=0x%"PRIx64"" +pci_nvme_err_invalid_create_cq_vector(uint16_t vector) "failed creating completion queue, vector=%"PRIu16"" +pci_nvme_err_invalid_create_cq_qflags(uint16_t qflags) "failed creating completion queue, qflags=%"PRIu16"" +pci_nvme_err_invalid_identify_cns(uint16_t cns) "identify, invalid cns=0x%"PRIx16"" +pci_nvme_err_invalid_getfeat(int dw10) "invalid get features, dw10=0x%"PRIx32"" +pci_nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx32"" +pci_nvme_err_invalid_log_page(uint16_t cid, uint16_t lid) "cid %"PRIu16" lid 0x%"PRIx16"" +pci_nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues" +pci_nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues" +pci_nvme_err_startfail_asq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin submission queue address is misaligned: 0x%"PRIx64"" +pci_nvme_err_startfail_acq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin completion queue address is misaligned: 0x%"PRIx64"" +pci_nvme_err_startfail_page_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too small: log2size=%u, min=%u" +pci_nvme_err_startfail_page_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too large: log2size=%u, max=%u" +pci_nvme_err_startfail_cqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too small: log2size=%u, min=%u" +pci_nvme_err_startfail_cqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too large: log2size=%u, max=%u" +pci_nvme_err_startfail_sqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too small: log2size=%u, min=%u" +pci_nvme_err_startfail_sqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too large: log2size=%u, max=%u" +pci_nvme_err_startfail_css(uint8_t css) "nvme_start_ctrl failed because invalid command set selected:%u" +pci_nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the admin submission queue size is zero" +pci_nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero" +pci_nvme_err_startfail_zasl_too_small(uint32_t zasl, uint32_t pagesz) "nvme_start_ctrl failed because zone append size limit %"PRIu32" is too small, needs to be >= %"PRIu32"" +pci_nvme_err_startfail(void) "setting controller enable bit failed" +pci_nvme_err_invalid_mgmt_action(uint8_t action) "action=0x%"PRIx8"" + +# undefined behavior +pci_nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64"" +pci_nvme_ub_mmiowr_toosmall(uint64_t offset, unsigned size) "MMIO write smaller than 32 bits, offset=0x%"PRIx64", size=%u" +pci_nvme_ub_mmiowr_intmask_with_msix(void) "undefined access to interrupt mask set when MSI-X is enabled" +pci_nvme_ub_mmiowr_ro_csts(void) "attempted to set a read only bit of controller status" +pci_nvme_ub_mmiowr_ssreset_w1c_unsupported(void) "attempted to W1C CSTS.NSSRO but CAP.NSSRS is zero (not supported)" +pci_nvme_ub_mmiowr_ssreset_unsupported(void) "attempted NVM subsystem reset but CAP.NSSRS is zero (not supported)" +pci_nvme_ub_mmiowr_cmbloc_reserved(void) "invalid write to reserved CMBLOC when CMBSZ is zero, ignored" +pci_nvme_ub_mmiowr_cmbsz_readonly(void) "invalid write to read only CMBSZ, ignored" +pci_nvme_ub_mmiowr_pmrcap_readonly(void) "invalid write to read only PMRCAP, ignored" +pci_nvme_ub_mmiowr_pmrsts_readonly(void) "invalid write to read only PMRSTS, ignored" +pci_nvme_ub_mmiowr_pmrebs_readonly(void) "invalid write to read only PMREBS, ignored" +pci_nvme_ub_mmiowr_pmrswtp_readonly(void) "invalid write to read only PMRSWTP, ignored" +pci_nvme_ub_mmiowr_invalid(uint64_t offset, uint64_t data) "invalid MMIO write, offset=0x%"PRIx64", data=0x%"PRIx64"" +pci_nvme_ub_mmiord_misaligned32(uint64_t offset) "MMIO read not 32-bit aligned, offset=0x%"PRIx64"" +pci_nvme_ub_mmiord_toosmall(uint64_t offset) "MMIO read smaller than 32-bits, offset=0x%"PRIx64"" +pci_nvme_ub_mmiord_invalid_ofs(uint64_t offset) "MMIO read beyond last register, offset=0x%"PRIx64", returning 0" +pci_nvme_ub_db_wr_misaligned(uint64_t offset) "doorbell write not 32-bit aligned, offset=0x%"PRIx64", ignoring" +pci_nvme_ub_db_wr_invalid_cq(uint32_t qid) "completion queue doorbell write for nonexistent queue, cqid=%"PRIu32", ignoring" +pci_nvme_ub_db_wr_invalid_cqhead(uint32_t qid, uint16_t new_head) "completion queue doorbell write value beyond queue size, cqid=%"PRIu32", new_head=%"PRIu16", ignoring" +pci_nvme_ub_db_wr_invalid_sq(uint32_t qid) "submission queue doorbell write for nonexistent queue, sqid=%"PRIu32", ignoring" +pci_nvme_ub_db_wr_invalid_sqtail(uint32_t qid, uint16_t new_tail) "submission queue doorbell write value beyond queue size, sqid=%"PRIu32", new_head=%"PRIu16", ignoring" +pci_nvme_ub_unknown_css_value(void) "unknown value in cc.css field" +pci_nvme_ub_too_many_mappings(void) "too many prp/sgl mappings" diff --git a/hw/nvme/trace.h b/hw/nvme/trace.h new file mode 100644 index 000000000..b398ea107 --- /dev/null +++ b/hw/nvme/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_nvme.h" diff --git a/hw/nvram/Kconfig b/hw/nvram/Kconfig new file mode 100644 index 000000000..24cfc18f8 --- /dev/null +++ b/hw/nvram/Kconfig @@ -0,0 +1,36 @@ +config DS1225Y + bool + +config AT24C + bool + depends on I2C + +config MAC_NVRAM + bool + select CHRP_NVRAM + +# NMC93XX uses the NS uWire interface (similar to SPI but less configurable) +config NMC93XX_EEPROM + bool + +config CHRP_NVRAM + bool + +config XLNX_EFUSE_CRC + bool + +config XLNX_EFUSE + bool + select XLNX_EFUSE_CRC + +config XLNX_EFUSE_VERSAL + bool + select XLNX_EFUSE + +config XLNX_EFUSE_ZYNQMP + bool + select XLNX_EFUSE + +config XLNX_BBRAM + bool + select XLNX_EFUSE_CRC diff --git a/hw/nvram/chrp_nvram.c b/hw/nvram/chrp_nvram.c new file mode 100644 index 000000000..d4d10a7c0 --- /dev/null +++ b/hw/nvram/chrp_nvram.c @@ -0,0 +1,102 @@ +/* + * Common Hardware Reference Platform NVRAM helper functions. + * + * The CHRP NVRAM layout is used by OpenBIOS and SLOF. See CHRP + * specification, chapter 8, or the LoPAPR specification for details + * about the NVRAM layout. + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "hw/nvram/chrp_nvram.h" +#include "sysemu/sysemu.h" + +static int chrp_nvram_set_var(uint8_t *nvram, int addr, const char *str, + int max_len) +{ + int len; + + len = strlen(str) + 1; + + if (max_len < len) { + return -1; + } + + memcpy(&nvram[addr], str, len); + + return addr + len; +} + +/** + * Create a "system partition", used for the Open Firmware + * environment variables. + */ +int chrp_nvram_create_system_partition(uint8_t *data, int min_len, int max_len) +{ + ChrpNvramPartHdr *part_header; + unsigned int i; + int end; + + if (max_len < sizeof(*part_header)) { + goto fail; + } + + part_header = (ChrpNvramPartHdr *)data; + part_header->signature = CHRP_NVPART_SYSTEM; + pstrcpy(part_header->name, sizeof(part_header->name), "system"); + + end = sizeof(ChrpNvramPartHdr); + for (i = 0; i < nb_prom_envs; i++) { + end = chrp_nvram_set_var(data, end, prom_envs[i], max_len - end); + if (end == -1) { + goto fail; + } + } + + /* End marker */ + data[end++] = '\0'; + + end = (end + 15) & ~15; + /* XXX: OpenBIOS is not able to grow up a partition. Leave some space for + new variables. */ + if (end < min_len) { + end = min_len; + } + chrp_nvram_finish_partition(part_header, end); + + return end; + +fail: + error_report("NVRAM is too small. Try to pass less data to -prom-env"); + exit(EXIT_FAILURE); +} + +/** + * Create a "free space" partition + */ +int chrp_nvram_create_free_partition(uint8_t *data, int len) +{ + ChrpNvramPartHdr *part_header; + + part_header = (ChrpNvramPartHdr *)data; + part_header->signature = CHRP_NVPART_FREE; + pstrcpy(part_header->name, sizeof(part_header->name), "free"); + + chrp_nvram_finish_partition(part_header, len); + + return len; +} diff --git a/hw/nvram/ds1225y.c b/hw/nvram/ds1225y.c new file mode 100644 index 000000000..3660a47c5 --- /dev/null +++ b/hw/nvram/ds1225y.c @@ -0,0 +1,172 @@ +/* + * QEMU NVRAM emulation for DS1225Y chip + * + * Copyright (c) 2007-2008 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qom/object.h" + +typedef struct { + MemoryRegion iomem; + uint32_t chip_size; + char *filename; + FILE *file; + uint8_t *contents; +} NvRamState; + +static uint64_t nvram_read(void *opaque, hwaddr addr, unsigned size) +{ + NvRamState *s = opaque; + uint32_t val; + + val = s->contents[addr]; + trace_nvram_read(addr, val); + return val; +} + +static void nvram_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + NvRamState *s = opaque; + + val &= 0xff; + trace_nvram_write(addr, s->contents[addr], val); + + s->contents[addr] = val; + if (s->file) { + fseek(s->file, addr, SEEK_SET); + fputc(val, s->file); + fflush(s->file); + } +} + +static const MemoryRegionOps nvram_ops = { + .read = nvram_read, + .write = nvram_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static int nvram_post_load(void *opaque, int version_id) +{ + NvRamState *s = opaque; + + /* Close file, as filename may has changed in load/store process */ + if (s->file) { + fclose(s->file); + } + + /* Write back nvram contents */ + s->file = s->filename ? fopen(s->filename, "wb") : NULL; + if (s->file) { + /* Write back contents, as 'wb' mode cleaned the file */ + if (fwrite(s->contents, s->chip_size, 1, s->file) != 1) { + printf("nvram_post_load: short write\n"); + } + fflush(s->file); + } + + return 0; +} + +static const VMStateDescription vmstate_nvram = { + .name = "nvram", + .version_id = 0, + .minimum_version_id = 0, + .post_load = nvram_post_load, + .fields = (VMStateField[]) { + VMSTATE_VARRAY_UINT32(contents, NvRamState, chip_size, 0, + vmstate_info_uint8, uint8_t), + VMSTATE_END_OF_LIST() + } +}; + +#define TYPE_DS1225Y "ds1225y" +OBJECT_DECLARE_SIMPLE_TYPE(SysBusNvRamState, DS1225Y) + +struct SysBusNvRamState { + SysBusDevice parent_obj; + + NvRamState nvram; +}; + +static void nvram_sysbus_realize(DeviceState *dev, Error **errp) +{ + SysBusNvRamState *sys = DS1225Y(dev); + NvRamState *s = &sys->nvram; + FILE *file; + + s->contents = g_malloc0(s->chip_size); + + memory_region_init_io(&s->iomem, OBJECT(s), &nvram_ops, s, + "nvram", s->chip_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + + /* Read current file */ + file = s->filename ? fopen(s->filename, "rb") : NULL; + if (file) { + /* Read nvram contents */ + if (fread(s->contents, s->chip_size, 1, file) != 1) { + error_report("nvram_sysbus_realize: short read"); + } + fclose(file); + } + nvram_post_load(s, 0); +} + +static Property nvram_sysbus_properties[] = { + DEFINE_PROP_UINT32("size", SysBusNvRamState, nvram.chip_size, 0x2000), + DEFINE_PROP_STRING("filename", SysBusNvRamState, nvram.filename), + DEFINE_PROP_END_OF_LIST(), +}; + +static void nvram_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = nvram_sysbus_realize; + dc->vmsd = &vmstate_nvram; + device_class_set_props(dc, nvram_sysbus_properties); +} + +static const TypeInfo nvram_sysbus_info = { + .name = TYPE_DS1225Y, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SysBusNvRamState), + .class_init = nvram_sysbus_class_init, +}; + +static void nvram_register_types(void) +{ + type_register_static(&nvram_sysbus_info); +} + +type_init(nvram_register_types) diff --git a/hw/nvram/eeprom93xx.c b/hw/nvram/eeprom93xx.c new file mode 100644 index 000000000..a1b9c7884 --- /dev/null +++ b/hw/nvram/eeprom93xx.c @@ -0,0 +1,342 @@ +/* + * QEMU EEPROM 93xx emulation + * + * Copyright (c) 2006-2007 Stefan Weil + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* Emulation for serial EEPROMs: + * NMC93C06 256-Bit (16 x 16) + * NMC93C46 1024-Bit (64 x 16) + * NMC93C56 2028 Bit (128 x 16) + * NMC93C66 4096 Bit (256 x 16) + * Compatible devices include FM93C46 and others. + * + * Other drivers use these interface functions: + * eeprom93xx_new - add a new EEPROM (with 16, 64 or 256 words) + * eeprom93xx_free - destroy EEPROM + * eeprom93xx_read - read data from the EEPROM + * eeprom93xx_write - write data to the EEPROM + * eeprom93xx_data - get EEPROM data array for external manipulation + * + * Todo list: + * - No emulation of EEPROM timings. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/eeprom93xx.h" +#include "migration/qemu-file-types.h" +#include "migration/vmstate.h" + +/* Debug EEPROM emulation. */ +//~ #define DEBUG_EEPROM + +#ifdef DEBUG_EEPROM +#define logout(fmt, ...) fprintf(stderr, "EEPROM\t%-24s" fmt, __func__, ## __VA_ARGS__) +#else +#define logout(fmt, ...) ((void)0) +#endif + +#define EEPROM_INSTANCE 0 +#define OLD_EEPROM_VERSION 20061112 +#define EEPROM_VERSION (OLD_EEPROM_VERSION + 1) + +#if 0 +typedef enum { + eeprom_read = 0x80, /* read register xx */ + eeprom_write = 0x40, /* write register xx */ + eeprom_erase = 0xc0, /* erase register xx */ + eeprom_ewen = 0x30, /* erase / write enable */ + eeprom_ewds = 0x00, /* erase / write disable */ + eeprom_eral = 0x20, /* erase all registers */ + eeprom_wral = 0x10, /* write all registers */ + eeprom_amask = 0x0f, + eeprom_imask = 0xf0 +} eeprom_instruction_t; +#endif + +#ifdef DEBUG_EEPROM +static const char *opstring[] = { + "extended", "write", "read", "erase" +}; +#endif + +struct _eeprom_t { + uint8_t tick; + uint8_t address; + uint8_t command; + uint8_t writable; + + uint8_t eecs; + uint8_t eesk; + uint8_t eedo; + + uint8_t addrbits; + uint16_t size; + uint16_t data; + uint16_t contents[]; +}; + +/* Code for saving and restoring of EEPROM state. */ + +/* Restore an uint16_t from an uint8_t + This is a Big hack, but it is how the old state did it. + */ + +static int get_uint16_from_uint8(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + uint16_t *v = pv; + *v = qemu_get_ubyte(f); + return 0; +} + +static int put_unused(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + fprintf(stderr, "uint16_from_uint8 is used only for backwards compatibility.\n"); + fprintf(stderr, "Never should be used to write a new state.\n"); + exit(0); + + return 0; +} + +static const VMStateInfo vmstate_hack_uint16_from_uint8 = { + .name = "uint16_from_uint8", + .get = get_uint16_from_uint8, + .put = put_unused, +}; + +#define VMSTATE_UINT16_HACK_TEST(_f, _s, _t) \ + VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint16_from_uint8, uint16_t) + +static bool is_old_eeprom_version(void *opaque, int version_id) +{ + return version_id == OLD_EEPROM_VERSION; +} + +static const VMStateDescription vmstate_eeprom = { + .name = "eeprom", + .version_id = EEPROM_VERSION, + .minimum_version_id = OLD_EEPROM_VERSION, + .fields = (VMStateField[]) { + VMSTATE_UINT8(tick, eeprom_t), + VMSTATE_UINT8(address, eeprom_t), + VMSTATE_UINT8(command, eeprom_t), + VMSTATE_UINT8(writable, eeprom_t), + + VMSTATE_UINT8(eecs, eeprom_t), + VMSTATE_UINT8(eesk, eeprom_t), + VMSTATE_UINT8(eedo, eeprom_t), + + VMSTATE_UINT8(addrbits, eeprom_t), + VMSTATE_UINT16_HACK_TEST(size, eeprom_t, is_old_eeprom_version), + VMSTATE_UNUSED_TEST(is_old_eeprom_version, 1), + VMSTATE_UINT16_EQUAL_V(size, eeprom_t, EEPROM_VERSION, NULL), + VMSTATE_UINT16(data, eeprom_t), + VMSTATE_VARRAY_UINT16_UNSAFE(contents, eeprom_t, size, 0, + vmstate_info_uint16, uint16_t), + VMSTATE_END_OF_LIST() + } +}; + +void eeprom93xx_write(eeprom_t *eeprom, int eecs, int eesk, int eedi) +{ + uint8_t tick = eeprom->tick; + uint8_t eedo = eeprom->eedo; + uint16_t address = eeprom->address; + uint8_t command = eeprom->command; + + logout("CS=%u SK=%u DI=%u DO=%u, tick = %u\n", + eecs, eesk, eedi, eedo, tick); + + if (!eeprom->eecs && eecs) { + /* Start chip select cycle. */ + logout("Cycle start, waiting for 1st start bit (0)\n"); + tick = 0; + command = 0x0; + address = 0x0; + } else if (eeprom->eecs && !eecs) { + /* End chip select cycle. This triggers write / erase. */ + if (eeprom->writable) { + uint8_t subcommand = address >> (eeprom->addrbits - 2); + if (command == 0 && subcommand == 2) { + /* Erase all. */ + for (address = 0; address < eeprom->size; address++) { + eeprom->contents[address] = 0xffff; + } + } else if (command == 3) { + /* Erase word. */ + eeprom->contents[address] = 0xffff; + } else if (tick >= 2 + 2 + eeprom->addrbits + 16) { + if (command == 1) { + /* Write word. */ + eeprom->contents[address] &= eeprom->data; + } else if (command == 0 && subcommand == 1) { + /* Write all. */ + for (address = 0; address < eeprom->size; address++) { + eeprom->contents[address] &= eeprom->data; + } + } + } + } + /* Output DO is tristate, read results in 1. */ + eedo = 1; + } else if (eecs && !eeprom->eesk && eesk) { + /* Raising edge of clock shifts data in. */ + if (tick == 0) { + /* Wait for 1st start bit. */ + if (eedi == 0) { + logout("Got correct 1st start bit, waiting for 2nd start bit (1)\n"); + tick++; + } else { + logout("wrong 1st start bit (is 1, should be 0)\n"); + tick = 2; + //~ assert(!"wrong start bit"); + } + } else if (tick == 1) { + /* Wait for 2nd start bit. */ + if (eedi != 0) { + logout("Got correct 2nd start bit, getting command + address\n"); + tick++; + } else { + logout("1st start bit is longer than needed\n"); + } + } else if (tick < 2 + 2) { + /* Got 2 start bits, transfer 2 opcode bits. */ + tick++; + command <<= 1; + if (eedi) { + command += 1; + } + } else if (tick < 2 + 2 + eeprom->addrbits) { + /* Got 2 start bits and 2 opcode bits, transfer all address bits. */ + tick++; + address = ((address << 1) | eedi); + if (tick == 2 + 2 + eeprom->addrbits) { + logout("%s command, address = 0x%02x (value 0x%04x)\n", + opstring[command], address, eeprom->contents[address]); + if (command == 2) { + eedo = 0; + } + address = address % eeprom->size; + if (command == 0) { + /* Command code in upper 2 bits of address. */ + switch (address >> (eeprom->addrbits - 2)) { + case 0: + logout("write disable command\n"); + eeprom->writable = 0; + break; + case 1: + logout("write all command\n"); + break; + case 2: + logout("erase all command\n"); + break; + case 3: + logout("write enable command\n"); + eeprom->writable = 1; + break; + } + } else { + /* Read, write or erase word. */ + eeprom->data = eeprom->contents[address]; + } + } + } else if (tick < 2 + 2 + eeprom->addrbits + 16) { + /* Transfer 16 data bits. */ + tick++; + if (command == 2) { + /* Read word. */ + eedo = ((eeprom->data & 0x8000) != 0); + } + eeprom->data <<= 1; + eeprom->data += eedi; + } else { + logout("additional unneeded tick, not processed\n"); + } + } + /* Save status of EEPROM. */ + eeprom->tick = tick; + eeprom->eecs = eecs; + eeprom->eesk = eesk; + eeprom->eedo = eedo; + eeprom->address = address; + eeprom->command = command; +} + +uint16_t eeprom93xx_read(eeprom_t *eeprom) +{ + /* Return status of pin DO (0 or 1). */ + logout("CS=%u DO=%u\n", eeprom->eecs, eeprom->eedo); + return eeprom->eedo; +} + +#if 0 +void eeprom93xx_reset(eeprom_t *eeprom) +{ + /* prepare eeprom */ + logout("eeprom = 0x%p\n", eeprom); + eeprom->tick = 0; + eeprom->command = 0; +} +#endif + +eeprom_t *eeprom93xx_new(DeviceState *dev, uint16_t nwords) +{ + /* Add a new EEPROM (with 16, 64 or 256 words). */ + eeprom_t *eeprom; + uint8_t addrbits; + + switch (nwords) { + case 16: + case 64: + addrbits = 6; + break; + case 128: + case 256: + addrbits = 8; + break; + default: + assert(!"Unsupported EEPROM size, fallback to 64 words!"); + nwords = 64; + addrbits = 6; + } + + eeprom = (eeprom_t *)g_malloc0(sizeof(*eeprom) + nwords * 2); + eeprom->size = nwords; + eeprom->addrbits = addrbits; + /* Output DO is tristate, read results in 1. */ + eeprom->eedo = 1; + logout("eeprom = 0x%p, nwords = %u\n", eeprom, nwords); + vmstate_register(VMSTATE_IF(dev), 0, &vmstate_eeprom, eeprom); + return eeprom; +} + +void eeprom93xx_free(DeviceState *dev, eeprom_t *eeprom) +{ + /* Destroy EEPROM. */ + logout("eeprom = 0x%p\n", eeprom); + vmstate_unregister(VMSTATE_IF(dev), &vmstate_eeprom, eeprom); + g_free(eeprom); +} + +uint16_t *eeprom93xx_data(eeprom_t *eeprom) +{ + /* Get EEPROM data array. */ + return &eeprom->contents[0]; +} + +/* eof */ diff --git a/hw/nvram/eeprom_at24c.c b/hw/nvram/eeprom_at24c.c new file mode 100644 index 000000000..af6f5dbb9 --- /dev/null +++ b/hw/nvram/eeprom_at24c.c @@ -0,0 +1,207 @@ +/* + * *AT24C* series I2C EEPROM + * + * Copyright (c) 2015 Michael Davidsaver + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the LICENSE file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/i2c/i2c.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "sysemu/block-backend.h" +#include "qom/object.h" + +/* #define DEBUG_AT24C */ + +#ifdef DEBUG_AT24C +#define DPRINTK(FMT, ...) printf(TYPE_AT24C_EE " : " FMT, ## __VA_ARGS__) +#else +#define DPRINTK(FMT, ...) do {} while (0) +#endif + +#define ERR(FMT, ...) fprintf(stderr, TYPE_AT24C_EE " : " FMT, \ + ## __VA_ARGS__) + +#define TYPE_AT24C_EE "at24c-eeprom" +typedef struct EEPROMState EEPROMState; +DECLARE_INSTANCE_CHECKER(EEPROMState, AT24C_EE, + TYPE_AT24C_EE) + +struct EEPROMState { + I2CSlave parent_obj; + + /* address counter */ + uint16_t cur; + /* total size in bytes */ + uint32_t rsize; + bool writable; + /* cells changed since last START? */ + bool changed; + /* during WRITE, # of address bytes transfered */ + uint8_t haveaddr; + + uint8_t *mem; + + BlockBackend *blk; +}; + +static +int at24c_eeprom_event(I2CSlave *s, enum i2c_event event) +{ + EEPROMState *ee = container_of(s, EEPROMState, parent_obj); + + switch (event) { + case I2C_START_SEND: + case I2C_START_RECV: + case I2C_FINISH: + ee->haveaddr = 0; + DPRINTK("clear\n"); + if (ee->blk && ee->changed) { + int len = blk_pwrite(ee->blk, 0, ee->mem, ee->rsize, 0); + if (len != ee->rsize) { + ERR(TYPE_AT24C_EE + " : failed to write backing file\n"); + } + DPRINTK("Wrote to backing file\n"); + } + ee->changed = false; + break; + case I2C_NACK: + break; + } + return 0; +} + +static +uint8_t at24c_eeprom_recv(I2CSlave *s) +{ + EEPROMState *ee = AT24C_EE(s); + uint8_t ret; + + ret = ee->mem[ee->cur]; + + ee->cur = (ee->cur + 1u) % ee->rsize; + DPRINTK("Recv %02x %c\n", ret, ret); + + return ret; +} + +static +int at24c_eeprom_send(I2CSlave *s, uint8_t data) +{ + EEPROMState *ee = AT24C_EE(s); + + if (ee->haveaddr < 2) { + ee->cur <<= 8; + ee->cur |= data; + ee->haveaddr++; + if (ee->haveaddr == 2) { + ee->cur %= ee->rsize; + DPRINTK("Set pointer %04x\n", ee->cur); + } + + } else { + if (ee->writable) { + DPRINTK("Send %02x\n", data); + ee->mem[ee->cur] = data; + ee->changed = true; + } else { + DPRINTK("Send error %02x read-only\n", data); + } + ee->cur = (ee->cur + 1u) % ee->rsize; + + } + + return 0; +} + +static void at24c_eeprom_realize(DeviceState *dev, Error **errp) +{ + EEPROMState *ee = AT24C_EE(dev); + + if (ee->blk) { + int64_t len = blk_getlength(ee->blk); + + if (len != ee->rsize) { + error_setg(errp, "%s: Backing file size %" PRId64 " != %u", + TYPE_AT24C_EE, len, ee->rsize); + return; + } + + if (blk_set_perm(ee->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, + BLK_PERM_ALL, &error_fatal) < 0) + { + error_setg(errp, "%s: Backing file incorrect permission", + TYPE_AT24C_EE); + return; + } + } + + ee->mem = g_malloc0(ee->rsize); +} + +static +void at24c_eeprom_reset(DeviceState *state) +{ + EEPROMState *ee = AT24C_EE(state); + + ee->changed = false; + ee->cur = 0; + ee->haveaddr = 0; + + memset(ee->mem, 0, ee->rsize); + + if (ee->blk) { + int len = blk_pread(ee->blk, 0, ee->mem, ee->rsize); + + if (len != ee->rsize) { + ERR(TYPE_AT24C_EE + " : Failed initial sync with backing file\n"); + } + DPRINTK("Reset read backing file\n"); + } +} + +static Property at24c_eeprom_props[] = { + DEFINE_PROP_UINT32("rom-size", EEPROMState, rsize, 0), + DEFINE_PROP_BOOL("writable", EEPROMState, writable, true), + DEFINE_PROP_DRIVE("drive", EEPROMState, blk), + DEFINE_PROP_END_OF_LIST() +}; + +static +void at24c_eeprom_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + dc->realize = &at24c_eeprom_realize; + k->event = &at24c_eeprom_event; + k->recv = &at24c_eeprom_recv; + k->send = &at24c_eeprom_send; + + device_class_set_props(dc, at24c_eeprom_props); + dc->reset = at24c_eeprom_reset; +} + +static +const TypeInfo at24c_eeprom_type = { + .name = TYPE_AT24C_EE, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(EEPROMState), + .class_size = sizeof(I2CSlaveClass), + .class_init = at24c_eeprom_class_init, +}; + +static void at24c_eeprom_register(void) +{ + type_register_static(&at24c_eeprom_type); +} + +type_init(at24c_eeprom_register) diff --git a/hw/nvram/fw_cfg-interface.c b/hw/nvram/fw_cfg-interface.c new file mode 100644 index 000000000..0e93feeae --- /dev/null +++ b/hw/nvram/fw_cfg-interface.c @@ -0,0 +1,23 @@ +/* + * QEMU Firmware configuration device emulation (QOM interfaces) + * + * Copyright 2020 Red Hat, Inc. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/nvram/fw_cfg.h" + +static const TypeInfo fw_cfg_data_generator_interface_info = { + .parent = TYPE_INTERFACE, + .name = TYPE_FW_CFG_DATA_GENERATOR_INTERFACE, + .class_size = sizeof(FWCfgDataGeneratorClass), +}; + +static void fw_cfg_register_interfaces(void) +{ + type_register_static(&fw_cfg_data_generator_interface_info); +} + +type_init(fw_cfg_register_interfaces) diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c new file mode 100644 index 000000000..c06b30de1 --- /dev/null +++ b/hw/nvram/fw_cfg.c @@ -0,0 +1,1395 @@ +/* + * QEMU Firmware configuration device emulation + * + * Copyright (c) 2008 Gleb Natapov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "sysemu/sysemu.h" +#include "sysemu/dma.h" +#include "sysemu/reset.h" +#include "hw/boards.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/qemu-file-types.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "qemu/cutils.h" +#include "qapi/error.h" +#include "hw/acpi/aml-build.h" +#include "hw/pci/pci_bus.h" + +#define FW_CFG_FILE_SLOTS_DFLT 0x20 + +/* FW_CFG_VERSION bits */ +#define FW_CFG_VERSION 0x01 +#define FW_CFG_VERSION_DMA 0x02 + +/* FW_CFG_DMA_CONTROL bits */ +#define FW_CFG_DMA_CTL_ERROR 0x01 +#define FW_CFG_DMA_CTL_READ 0x02 +#define FW_CFG_DMA_CTL_SKIP 0x04 +#define FW_CFG_DMA_CTL_SELECT 0x08 +#define FW_CFG_DMA_CTL_WRITE 0x10 + +#define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */ + +struct FWCfgEntry { + uint32_t len; + bool allow_write; + uint8_t *data; + void *callback_opaque; + FWCfgCallback select_cb; + FWCfgWriteCallback write_cb; +}; + +/** + * key_name: + * + * @key: The uint16 selector key. + * + * Returns: The stringified name if the selector refers to a well-known + * numerically defined item, or NULL on key lookup failure. + */ +static const char *key_name(uint16_t key) +{ + static const char *fw_cfg_wellknown_keys[FW_CFG_FILE_FIRST] = { + [FW_CFG_SIGNATURE] = "signature", + [FW_CFG_ID] = "id", + [FW_CFG_UUID] = "uuid", + [FW_CFG_RAM_SIZE] = "ram_size", + [FW_CFG_NOGRAPHIC] = "nographic", + [FW_CFG_NB_CPUS] = "nb_cpus", + [FW_CFG_MACHINE_ID] = "machine_id", + [FW_CFG_KERNEL_ADDR] = "kernel_addr", + [FW_CFG_KERNEL_SIZE] = "kernel_size", + [FW_CFG_KERNEL_CMDLINE] = "kernel_cmdline", + [FW_CFG_INITRD_ADDR] = "initrd_addr", + [FW_CFG_INITRD_SIZE] = "initdr_size", + [FW_CFG_BOOT_DEVICE] = "boot_device", + [FW_CFG_NUMA] = "numa", + [FW_CFG_BOOT_MENU] = "boot_menu", + [FW_CFG_MAX_CPUS] = "max_cpus", + [FW_CFG_KERNEL_ENTRY] = "kernel_entry", + [FW_CFG_KERNEL_DATA] = "kernel_data", + [FW_CFG_INITRD_DATA] = "initrd_data", + [FW_CFG_CMDLINE_ADDR] = "cmdline_addr", + [FW_CFG_CMDLINE_SIZE] = "cmdline_size", + [FW_CFG_CMDLINE_DATA] = "cmdline_data", + [FW_CFG_SETUP_ADDR] = "setup_addr", + [FW_CFG_SETUP_SIZE] = "setup_size", + [FW_CFG_SETUP_DATA] = "setup_data", + [FW_CFG_FILE_DIR] = "file_dir", + }; + + if (key & FW_CFG_ARCH_LOCAL) { + return fw_cfg_arch_key_name(key); + } + if (key < FW_CFG_FILE_FIRST) { + return fw_cfg_wellknown_keys[key]; + } + + return NULL; +} + +static inline const char *trace_key_name(uint16_t key) +{ + const char *name = key_name(key); + + return name ? name : "unknown"; +} + +#define JPG_FILE 0 +#define BMP_FILE 1 + +static char *read_splashfile(char *filename, gsize *file_sizep, + int *file_typep) +{ + GError *err = NULL; + gchar *content; + int file_type; + unsigned int filehead; + int bmp_bpp; + + if (!g_file_get_contents(filename, &content, file_sizep, &err)) { + error_report("failed to read splash file '%s': %s", + filename, err->message); + g_error_free(err); + return NULL; + } + + /* check file size */ + if (*file_sizep < 30) { + goto error; + } + + /* check magic ID */ + filehead = lduw_le_p(content); + if (filehead == 0xd8ff) { + file_type = JPG_FILE; + } else if (filehead == 0x4d42) { + file_type = BMP_FILE; + } else { + goto error; + } + + /* check BMP bpp */ + if (file_type == BMP_FILE) { + bmp_bpp = lduw_le_p(&content[28]); + if (bmp_bpp != 24) { + goto error; + } + } + + /* return values */ + *file_typep = file_type; + + return content; + +error: + error_report("splash file '%s' format not recognized; must be JPEG " + "or 24 bit BMP", filename); + g_free(content); + return NULL; +} + +static void fw_cfg_bootsplash(FWCfgState *s) +{ + const char *boot_splash_filename = NULL; + const char *boot_splash_time = NULL; + char *filename, *file_data; + gsize file_size; + int file_type; + + /* get user configuration */ + QemuOptsList *plist = qemu_find_opts("boot-opts"); + QemuOpts *opts = QTAILQ_FIRST(&plist->head); + boot_splash_filename = qemu_opt_get(opts, "splash"); + boot_splash_time = qemu_opt_get(opts, "splash-time"); + + /* insert splash time if user configurated */ + if (boot_splash_time) { + int64_t bst_val = qemu_opt_get_number(opts, "splash-time", -1); + uint16_t bst_le16; + + /* validate the input */ + if (bst_val < 0 || bst_val > 0xffff) { + error_report("splash-time is invalid," + "it should be a value between 0 and 65535"); + exit(1); + } + /* use little endian format */ + bst_le16 = cpu_to_le16(bst_val); + fw_cfg_add_file(s, "etc/boot-menu-wait", + g_memdup(&bst_le16, sizeof bst_le16), sizeof bst_le16); + } + + /* insert splash file if user configurated */ + if (boot_splash_filename) { + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, boot_splash_filename); + if (filename == NULL) { + error_report("failed to find file '%s'", boot_splash_filename); + return; + } + + /* loading file data */ + file_data = read_splashfile(filename, &file_size, &file_type); + if (file_data == NULL) { + g_free(filename); + return; + } + g_free(boot_splash_filedata); + boot_splash_filedata = (uint8_t *)file_data; + + /* insert data */ + if (file_type == JPG_FILE) { + fw_cfg_add_file(s, "bootsplash.jpg", + boot_splash_filedata, file_size); + } else { + fw_cfg_add_file(s, "bootsplash.bmp", + boot_splash_filedata, file_size); + } + g_free(filename); + } +} + +static void fw_cfg_reboot(FWCfgState *s) +{ + const char *reboot_timeout = NULL; + uint64_t rt_val = -1; + uint32_t rt_le32; + + /* get user configuration */ + QemuOptsList *plist = qemu_find_opts("boot-opts"); + QemuOpts *opts = QTAILQ_FIRST(&plist->head); + reboot_timeout = qemu_opt_get(opts, "reboot-timeout"); + + if (reboot_timeout) { + rt_val = qemu_opt_get_number(opts, "reboot-timeout", -1); + + /* validate the input */ + if (rt_val > 0xffff && rt_val != (uint64_t)-1) { + error_report("reboot timeout is invalid," + "it should be a value between -1 and 65535"); + exit(1); + } + } + + rt_le32 = cpu_to_le32(rt_val); + fw_cfg_add_file(s, "etc/boot-fail-wait", g_memdup(&rt_le32, 4), 4); +} + +static void fw_cfg_write(FWCfgState *s, uint8_t value) +{ + /* nothing, write support removed in QEMU v2.4+ */ +} + +static inline uint16_t fw_cfg_file_slots(const FWCfgState *s) +{ + return s->file_slots; +} + +/* Note: this function returns an exclusive limit. */ +static inline uint32_t fw_cfg_max_entry(const FWCfgState *s) +{ + return FW_CFG_FILE_FIRST + fw_cfg_file_slots(s); +} + +static int fw_cfg_select(FWCfgState *s, uint16_t key) +{ + int arch, ret; + FWCfgEntry *e; + + s->cur_offset = 0; + if ((key & FW_CFG_ENTRY_MASK) >= fw_cfg_max_entry(s)) { + s->cur_entry = FW_CFG_INVALID; + ret = 0; + } else { + s->cur_entry = key; + ret = 1; + /* entry successfully selected, now run callback if present */ + arch = !!(key & FW_CFG_ARCH_LOCAL); + e = &s->entries[arch][key & FW_CFG_ENTRY_MASK]; + if (e->select_cb) { + e->select_cb(e->callback_opaque); + } + } + + trace_fw_cfg_select(s, key, trace_key_name(key), ret); + return ret; +} + +static uint64_t fw_cfg_data_read(void *opaque, hwaddr addr, unsigned size) +{ + FWCfgState *s = opaque; + int arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL); + FWCfgEntry *e = (s->cur_entry == FW_CFG_INVALID) ? NULL : + &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK]; + uint64_t value = 0; + + assert(size > 0 && size <= sizeof(value)); + if (s->cur_entry != FW_CFG_INVALID && e->data && s->cur_offset < e->len) { + /* The least significant 'size' bytes of the return value are + * expected to contain a string preserving portion of the item + * data, padded with zeros on the right in case we run out early. + * In technical terms, we're composing the host-endian representation + * of the big endian interpretation of the fw_cfg string. + */ + do { + value = (value << 8) | e->data[s->cur_offset++]; + } while (--size && s->cur_offset < e->len); + /* If size is still not zero, we *did* run out early, so continue + * left-shifting, to add the appropriate number of padding zeros + * on the right. + */ + value <<= 8 * size; + } + + trace_fw_cfg_read(s, value); + return value; +} + +static void fw_cfg_data_mem_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + FWCfgState *s = opaque; + unsigned i = size; + + do { + fw_cfg_write(s, value >> (8 * --i)); + } while (i); +} + +static void fw_cfg_dma_transfer(FWCfgState *s) +{ + dma_addr_t len; + FWCfgDmaAccess dma; + int arch; + FWCfgEntry *e; + int read = 0, write = 0; + dma_addr_t dma_addr; + + /* Reset the address before the next access */ + dma_addr = s->dma_addr; + s->dma_addr = 0; + + if (dma_memory_read(s->dma_as, dma_addr, &dma, sizeof(dma))) { + stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control), + FW_CFG_DMA_CTL_ERROR); + return; + } + + dma.address = be64_to_cpu(dma.address); + dma.length = be32_to_cpu(dma.length); + dma.control = be32_to_cpu(dma.control); + + if (dma.control & FW_CFG_DMA_CTL_SELECT) { + fw_cfg_select(s, dma.control >> 16); + } + + arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL); + e = (s->cur_entry == FW_CFG_INVALID) ? NULL : + &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK]; + + if (dma.control & FW_CFG_DMA_CTL_READ) { + read = 1; + write = 0; + } else if (dma.control & FW_CFG_DMA_CTL_WRITE) { + read = 0; + write = 1; + } else if (dma.control & FW_CFG_DMA_CTL_SKIP) { + read = 0; + write = 0; + } else { + dma.length = 0; + } + + dma.control = 0; + + while (dma.length > 0 && !(dma.control & FW_CFG_DMA_CTL_ERROR)) { + if (s->cur_entry == FW_CFG_INVALID || !e->data || + s->cur_offset >= e->len) { + len = dma.length; + + /* If the access is not a read access, it will be a skip access, + * tested before. + */ + if (read) { + if (dma_memory_set(s->dma_as, dma.address, 0, len)) { + dma.control |= FW_CFG_DMA_CTL_ERROR; + } + } + if (write) { + dma.control |= FW_CFG_DMA_CTL_ERROR; + } + } else { + if (dma.length <= (e->len - s->cur_offset)) { + len = dma.length; + } else { + len = (e->len - s->cur_offset); + } + + /* If the access is not a read access, it will be a skip access, + * tested before. + */ + if (read) { + if (dma_memory_write(s->dma_as, dma.address, + &e->data[s->cur_offset], len)) { + dma.control |= FW_CFG_DMA_CTL_ERROR; + } + } + if (write) { + if (!e->allow_write || + len != dma.length || + dma_memory_read(s->dma_as, dma.address, + &e->data[s->cur_offset], len)) { + dma.control |= FW_CFG_DMA_CTL_ERROR; + } else if (e->write_cb) { + e->write_cb(e->callback_opaque, s->cur_offset, len); + } + } + + s->cur_offset += len; + } + + dma.address += len; + dma.length -= len; + + } + + stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control), + dma.control); + + trace_fw_cfg_read(s, 0); +} + +static uint64_t fw_cfg_dma_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + /* Return a signature value (and handle various read sizes) */ + return extract64(FW_CFG_DMA_SIGNATURE, (8 - addr - size) * 8, size * 8); +} + +static void fw_cfg_dma_mem_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + FWCfgState *s = opaque; + + if (size == 4) { + if (addr == 0) { + /* FWCfgDmaAccess high address */ + s->dma_addr = value << 32; + } else if (addr == 4) { + /* FWCfgDmaAccess low address */ + s->dma_addr |= value; + fw_cfg_dma_transfer(s); + } + } else if (size == 8 && addr == 0) { + s->dma_addr = value; + fw_cfg_dma_transfer(s); + } +} + +static bool fw_cfg_dma_mem_valid(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + return !is_write || ((size == 4 && (addr == 0 || addr == 4)) || + (size == 8 && addr == 0)); +} + +static bool fw_cfg_data_mem_valid(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + return addr == 0; +} + +static uint64_t fw_cfg_ctl_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void fw_cfg_ctl_mem_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + fw_cfg_select(opaque, (uint16_t)value); +} + +static bool fw_cfg_ctl_mem_valid(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + return is_write && size == 2; +} + +static void fw_cfg_comb_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + switch (size) { + case 1: + fw_cfg_write(opaque, (uint8_t)value); + break; + case 2: + fw_cfg_select(opaque, (uint16_t)value); + break; + } +} + +static bool fw_cfg_comb_valid(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + return (size == 1) || (is_write && size == 2); +} + +static const MemoryRegionOps fw_cfg_ctl_mem_ops = { + .read = fw_cfg_ctl_mem_read, + .write = fw_cfg_ctl_mem_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid.accepts = fw_cfg_ctl_mem_valid, +}; + +static const MemoryRegionOps fw_cfg_data_mem_ops = { + .read = fw_cfg_data_read, + .write = fw_cfg_data_mem_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 1, + .accepts = fw_cfg_data_mem_valid, + }, +}; + +static const MemoryRegionOps fw_cfg_comb_mem_ops = { + .read = fw_cfg_data_read, + .write = fw_cfg_comb_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.accepts = fw_cfg_comb_valid, +}; + +static const MemoryRegionOps fw_cfg_dma_mem_ops = { + .read = fw_cfg_dma_mem_read, + .write = fw_cfg_dma_mem_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid.accepts = fw_cfg_dma_mem_valid, + .valid.max_access_size = 8, + .impl.max_access_size = 8, +}; + +static void fw_cfg_reset(DeviceState *d) +{ + FWCfgState *s = FW_CFG(d); + + /* we never register a read callback for FW_CFG_SIGNATURE */ + fw_cfg_select(s, FW_CFG_SIGNATURE); +} + +/* Save restore 32 bit int as uint16_t + This is a Big hack, but it is how the old state did it. + Or we broke compatibility in the state, or we can't use struct tm + */ + +static int get_uint32_as_uint16(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + uint32_t *v = pv; + *v = qemu_get_be16(f); + return 0; +} + +static int put_unused(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + fprintf(stderr, "uint32_as_uint16 is only used for backward compatibility.\n"); + fprintf(stderr, "This functions shouldn't be called.\n"); + + return 0; +} + +static const VMStateInfo vmstate_hack_uint32_as_uint16 = { + .name = "int32_as_uint16", + .get = get_uint32_as_uint16, + .put = put_unused, +}; + +#define VMSTATE_UINT16_HACK(_f, _s, _t) \ + VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint32_as_uint16, uint32_t) + + +static bool is_version_1(void *opaque, int version_id) +{ + return version_id == 1; +} + +bool fw_cfg_dma_enabled(void *opaque) +{ + FWCfgState *s = opaque; + + return s->dma_enabled; +} + +static bool fw_cfg_acpi_mr_restore(void *opaque) +{ + FWCfgState *s = opaque; + bool mr_aligned; + + mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size) && + QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size) && + QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size); + return s->acpi_mr_restore && !mr_aligned; +} + +static void fw_cfg_update_mr(FWCfgState *s, uint16_t key, size_t size) +{ + MemoryRegion *mr; + ram_addr_t offset; + int arch = !!(key & FW_CFG_ARCH_LOCAL); + void *ptr; + + key &= FW_CFG_ENTRY_MASK; + assert(key < fw_cfg_max_entry(s)); + + ptr = s->entries[arch][key].data; + mr = memory_region_from_host(ptr, &offset); + + memory_region_ram_resize(mr, size, &error_abort); +} + +static int fw_cfg_acpi_mr_restore_post_load(void *opaque, int version_id) +{ + FWCfgState *s = opaque; + int i, index; + + assert(s->files); + + index = be32_to_cpu(s->files->count); + + for (i = 0; i < index; i++) { + if (!strcmp(s->files->f[i].name, ACPI_BUILD_TABLE_FILE)) { + fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->table_mr_size); + } else if (!strcmp(s->files->f[i].name, ACPI_BUILD_LOADER_FILE)) { + fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->linker_mr_size); + } else if (!strcmp(s->files->f[i].name, ACPI_BUILD_RSDP_FILE)) { + fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->rsdp_mr_size); + } + } + + return 0; +} + +static const VMStateDescription vmstate_fw_cfg_dma = { + .name = "fw_cfg/dma", + .needed = fw_cfg_dma_enabled, + .fields = (VMStateField[]) { + VMSTATE_UINT64(dma_addr, FWCfgState), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_fw_cfg_acpi_mr = { + .name = "fw_cfg/acpi_mr", + .version_id = 1, + .minimum_version_id = 1, + .needed = fw_cfg_acpi_mr_restore, + .post_load = fw_cfg_acpi_mr_restore_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64(table_mr_size, FWCfgState), + VMSTATE_UINT64(linker_mr_size, FWCfgState), + VMSTATE_UINT64(rsdp_mr_size, FWCfgState), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_fw_cfg = { + .name = "fw_cfg", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(cur_entry, FWCfgState), + VMSTATE_UINT16_HACK(cur_offset, FWCfgState, is_version_1), + VMSTATE_UINT32_V(cur_offset, FWCfgState, 2), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_fw_cfg_dma, + &vmstate_fw_cfg_acpi_mr, + NULL, + } +}; + +static void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key, + FWCfgCallback select_cb, + FWCfgWriteCallback write_cb, + void *callback_opaque, + void *data, size_t len, + bool read_only) +{ + int arch = !!(key & FW_CFG_ARCH_LOCAL); + + key &= FW_CFG_ENTRY_MASK; + + assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX); + assert(s->entries[arch][key].data == NULL); /* avoid key conflict */ + + s->entries[arch][key].data = data; + s->entries[arch][key].len = (uint32_t)len; + s->entries[arch][key].select_cb = select_cb; + s->entries[arch][key].write_cb = write_cb; + s->entries[arch][key].callback_opaque = callback_opaque; + s->entries[arch][key].allow_write = !read_only; +} + +static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key, + void *data, size_t len) +{ + void *ptr; + int arch = !!(key & FW_CFG_ARCH_LOCAL); + + key &= FW_CFG_ENTRY_MASK; + + assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX); + + /* return the old data to the function caller, avoid memory leak */ + ptr = s->entries[arch][key].data; + s->entries[arch][key].data = data; + s->entries[arch][key].len = len; + s->entries[arch][key].callback_opaque = NULL; + s->entries[arch][key].allow_write = false; + + return ptr; +} + +void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len) +{ + trace_fw_cfg_add_bytes(key, trace_key_name(key), len); + fw_cfg_add_bytes_callback(s, key, NULL, NULL, NULL, data, len, true); +} + +void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value) +{ + size_t sz = strlen(value) + 1; + + trace_fw_cfg_add_string(key, trace_key_name(key), value); + fw_cfg_add_bytes(s, key, g_memdup(value, sz), sz); +} + +void fw_cfg_modify_string(FWCfgState *s, uint16_t key, const char *value) +{ + size_t sz = strlen(value) + 1; + char *old; + + old = fw_cfg_modify_bytes_read(s, key, g_memdup(value, sz), sz); + g_free(old); +} + +void fw_cfg_add_i16(FWCfgState *s, uint16_t key, uint16_t value) +{ + uint16_t *copy; + + copy = g_malloc(sizeof(value)); + *copy = cpu_to_le16(value); + trace_fw_cfg_add_i16(key, trace_key_name(key), value); + fw_cfg_add_bytes(s, key, copy, sizeof(value)); +} + +void fw_cfg_modify_i16(FWCfgState *s, uint16_t key, uint16_t value) +{ + uint16_t *copy, *old; + + copy = g_malloc(sizeof(value)); + *copy = cpu_to_le16(value); + old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value)); + g_free(old); +} + +void fw_cfg_add_i32(FWCfgState *s, uint16_t key, uint32_t value) +{ + uint32_t *copy; + + copy = g_malloc(sizeof(value)); + *copy = cpu_to_le32(value); + trace_fw_cfg_add_i32(key, trace_key_name(key), value); + fw_cfg_add_bytes(s, key, copy, sizeof(value)); +} + +void fw_cfg_modify_i32(FWCfgState *s, uint16_t key, uint32_t value) +{ + uint32_t *copy, *old; + + copy = g_malloc(sizeof(value)); + *copy = cpu_to_le32(value); + old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value)); + g_free(old); +} + +void fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value) +{ + uint64_t *copy; + + copy = g_malloc(sizeof(value)); + *copy = cpu_to_le64(value); + trace_fw_cfg_add_i64(key, trace_key_name(key), value); + fw_cfg_add_bytes(s, key, copy, sizeof(value)); +} + +void fw_cfg_modify_i64(FWCfgState *s, uint16_t key, uint64_t value) +{ + uint64_t *copy, *old; + + copy = g_malloc(sizeof(value)); + *copy = cpu_to_le64(value); + old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value)); + g_free(old); +} + +void fw_cfg_set_order_override(FWCfgState *s, int order) +{ + assert(s->fw_cfg_order_override == 0); + s->fw_cfg_order_override = order; +} + +void fw_cfg_reset_order_override(FWCfgState *s) +{ + assert(s->fw_cfg_order_override != 0); + s->fw_cfg_order_override = 0; +} + +/* + * This is the legacy order list. For legacy systems, files are in + * the fw_cfg in the order defined below, by the "order" value. Note + * that some entries (VGA ROMs, NIC option ROMS, etc.) go into a + * specific area, but there may be more than one and they occur in the + * order that the user specifies them on the command line. Those are + * handled in a special manner, using the order override above. + * + * For non-legacy, the files are sorted by filename to avoid this kind + * of complexity in the future. + * + * This is only for x86, other arches don't implement versioning so + * they won't set legacy mode. + */ +static struct { + const char *name; + int order; +} fw_cfg_order[] = { + { "etc/boot-menu-wait", 10 }, + { "bootsplash.jpg", 11 }, + { "bootsplash.bmp", 12 }, + { "etc/boot-fail-wait", 15 }, + { "etc/smbios/smbios-tables", 20 }, + { "etc/smbios/smbios-anchor", 30 }, + { "etc/e820", 40 }, + { "etc/reserved-memory-end", 50 }, + { "genroms/kvmvapic.bin", 55 }, + { "genroms/linuxboot.bin", 60 }, + { }, /* VGA ROMs from pc_vga_init come here, 70. */ + { }, /* NIC option ROMs from pc_nic_init come here, 80. */ + { "etc/system-states", 90 }, + { }, /* User ROMs come here, 100. */ + { }, /* Device FW comes here, 110. */ + { "etc/extra-pci-roots", 120 }, + { "etc/acpi/tables", 130 }, + { "etc/table-loader", 140 }, + { "etc/tpm/log", 150 }, + { "etc/acpi/rsdp", 160 }, + { "bootorder", 170 }, + { "etc/msr_feature_control", 180 }, + +#define FW_CFG_ORDER_OVERRIDE_LAST 200 +}; + +/* + * Any sub-page size update to these table MRs will be lost during migration, + * as we use aligned size in ram_load_precopy() -> qemu_ram_resize() path. + * In order to avoid the inconsistency in sizes save them seperately and + * migrate over in vmstate post_load(). + */ +static void fw_cfg_acpi_mr_save(FWCfgState *s, const char *filename, size_t len) +{ + if (!strcmp(filename, ACPI_BUILD_TABLE_FILE)) { + s->table_mr_size = len; + } else if (!strcmp(filename, ACPI_BUILD_LOADER_FILE)) { + s->linker_mr_size = len; + } else if (!strcmp(filename, ACPI_BUILD_RSDP_FILE)) { + s->rsdp_mr_size = len; + } +} + +static int get_fw_cfg_order(FWCfgState *s, const char *name) +{ + int i; + + if (s->fw_cfg_order_override > 0) { + return s->fw_cfg_order_override; + } + + for (i = 0; i < ARRAY_SIZE(fw_cfg_order); i++) { + if (fw_cfg_order[i].name == NULL) { + continue; + } + + if (strcmp(name, fw_cfg_order[i].name) == 0) { + return fw_cfg_order[i].order; + } + } + + /* Stick unknown stuff at the end. */ + warn_report("Unknown firmware file in legacy mode: %s", name); + return FW_CFG_ORDER_OVERRIDE_LAST; +} + +void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, + FWCfgCallback select_cb, + FWCfgWriteCallback write_cb, + void *callback_opaque, + void *data, size_t len, bool read_only) +{ + int i, index, count; + size_t dsize; + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + int order = 0; + + if (!s->files) { + dsize = sizeof(uint32_t) + sizeof(FWCfgFile) * fw_cfg_file_slots(s); + s->files = g_malloc0(dsize); + fw_cfg_add_bytes(s, FW_CFG_FILE_DIR, s->files, dsize); + } + + count = be32_to_cpu(s->files->count); + assert(count < fw_cfg_file_slots(s)); + + /* Find the insertion point. */ + if (mc->legacy_fw_cfg_order) { + /* + * Sort by order. For files with the same order, we keep them + * in the sequence in which they were added. + */ + order = get_fw_cfg_order(s, filename); + for (index = count; + index > 0 && order < s->entry_order[index - 1]; + index--); + } else { + /* Sort by file name. */ + for (index = count; + index > 0 && strcmp(filename, s->files->f[index - 1].name) < 0; + index--); + } + + /* + * Move all the entries from the index point and after down one + * to create a slot for the new entry. Because calculations are + * being done with the index, make it so that "i" is the current + * index and "i - 1" is the one being copied from, thus the + * unusual start and end in the for statement. + */ + for (i = count; i > index; i--) { + s->files->f[i] = s->files->f[i - 1]; + s->files->f[i].select = cpu_to_be16(FW_CFG_FILE_FIRST + i); + s->entries[0][FW_CFG_FILE_FIRST + i] = + s->entries[0][FW_CFG_FILE_FIRST + i - 1]; + s->entry_order[i] = s->entry_order[i - 1]; + } + + memset(&s->files->f[index], 0, sizeof(FWCfgFile)); + memset(&s->entries[0][FW_CFG_FILE_FIRST + index], 0, sizeof(FWCfgEntry)); + + pstrcpy(s->files->f[index].name, sizeof(s->files->f[index].name), filename); + for (i = 0; i <= count; i++) { + if (i != index && + strcmp(s->files->f[index].name, s->files->f[i].name) == 0) { + error_report("duplicate fw_cfg file name: %s", + s->files->f[index].name); + exit(1); + } + } + + fw_cfg_add_bytes_callback(s, FW_CFG_FILE_FIRST + index, + select_cb, write_cb, + callback_opaque, data, len, + read_only); + + s->files->f[index].size = cpu_to_be32(len); + s->files->f[index].select = cpu_to_be16(FW_CFG_FILE_FIRST + index); + s->entry_order[index] = order; + trace_fw_cfg_add_file(s, index, s->files->f[index].name, len); + + s->files->count = cpu_to_be32(count+1); + fw_cfg_acpi_mr_save(s, filename, len); +} + +void fw_cfg_add_file(FWCfgState *s, const char *filename, + void *data, size_t len) +{ + fw_cfg_add_file_callback(s, filename, NULL, NULL, NULL, data, len, true); +} + +void *fw_cfg_modify_file(FWCfgState *s, const char *filename, + void *data, size_t len) +{ + int i, index; + void *ptr = NULL; + + assert(s->files); + + index = be32_to_cpu(s->files->count); + + for (i = 0; i < index; i++) { + if (strcmp(filename, s->files->f[i].name) == 0) { + ptr = fw_cfg_modify_bytes_read(s, FW_CFG_FILE_FIRST + i, + data, len); + s->files->f[i].size = cpu_to_be32(len); + fw_cfg_acpi_mr_save(s, filename, len); + return ptr; + } + } + + assert(index < fw_cfg_file_slots(s)); + + /* add new one */ + fw_cfg_add_file_callback(s, filename, NULL, NULL, NULL, data, len, true); + return NULL; +} + +bool fw_cfg_add_from_generator(FWCfgState *s, const char *filename, + const char *gen_id, Error **errp) +{ + FWCfgDataGeneratorClass *klass; + GByteArray *array; + Object *obj; + gsize size; + + obj = object_resolve_path_component(object_get_objects_root(), gen_id); + if (!obj) { + error_setg(errp, "Cannot find object ID '%s'", gen_id); + return false; + } + if (!object_dynamic_cast(obj, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE)) { + error_setg(errp, "Object ID '%s' is not a '%s' subclass", + gen_id, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE); + return false; + } + klass = FW_CFG_DATA_GENERATOR_GET_CLASS(obj); + array = klass->get_data(obj, errp); + if (!array) { + return false; + } + size = array->len; + fw_cfg_add_file(s, filename, g_byte_array_free(array, FALSE), size); + + return true; +} + +void fw_cfg_add_extra_pci_roots(PCIBus *bus, FWCfgState *s) +{ + int extra_hosts = 0; + + if (!bus) { + return; + } + + QLIST_FOREACH(bus, &bus->child, sibling) { + /* look for expander root buses */ + if (pci_bus_is_root(bus)) { + extra_hosts++; + } + } + + if (extra_hosts && s) { + uint64_t *val = g_malloc(sizeof(*val)); + *val = cpu_to_le64(extra_hosts); + fw_cfg_add_file(s, "etc/extra-pci-roots", val, sizeof(*val)); + } +} + +static void fw_cfg_machine_reset(void *opaque) +{ + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + FWCfgState *s = opaque; + void *ptr; + size_t len; + char *buf; + + buf = get_boot_devices_list(&len); + ptr = fw_cfg_modify_file(s, "bootorder", (uint8_t *)buf, len); + g_free(ptr); + + if (!mc->legacy_fw_cfg_order) { + buf = get_boot_devices_lchs_list(&len); + ptr = fw_cfg_modify_file(s, "bios-geometry", (uint8_t *)buf, len); + g_free(ptr); + } +} + +static void fw_cfg_machine_ready(struct Notifier *n, void *data) +{ + FWCfgState *s = container_of(n, FWCfgState, machine_ready); + qemu_register_reset(fw_cfg_machine_reset, s); +} + +static Property fw_cfg_properties[] = { + DEFINE_PROP_BOOL("acpi-mr-restore", FWCfgState, acpi_mr_restore, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void fw_cfg_common_realize(DeviceState *dev, Error **errp) +{ + FWCfgState *s = FW_CFG(dev); + MachineState *machine = MACHINE(qdev_get_machine()); + uint32_t version = FW_CFG_VERSION; + + if (!fw_cfg_find()) { + error_setg(errp, "at most one %s device is permitted", TYPE_FW_CFG); + return; + } + + fw_cfg_add_bytes(s, FW_CFG_SIGNATURE, (char *)"QEMU", 4); + fw_cfg_add_bytes(s, FW_CFG_UUID, &qemu_uuid, 16); + fw_cfg_add_i16(s, FW_CFG_NOGRAPHIC, (uint16_t)!machine->enable_graphics); + fw_cfg_add_i16(s, FW_CFG_BOOT_MENU, (uint16_t)boot_menu); + fw_cfg_bootsplash(s); + fw_cfg_reboot(s); + + if (s->dma_enabled) { + version |= FW_CFG_VERSION_DMA; + } + + fw_cfg_add_i32(s, FW_CFG_ID, version); + + s->machine_ready.notify = fw_cfg_machine_ready; + qemu_add_machine_init_done_notifier(&s->machine_ready); +} + +FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase, + AddressSpace *dma_as) +{ + DeviceState *dev; + SysBusDevice *sbd; + FWCfgIoState *ios; + FWCfgState *s; + bool dma_requested = dma_iobase && dma_as; + + dev = qdev_new(TYPE_FW_CFG_IO); + if (!dma_requested) { + qdev_prop_set_bit(dev, "dma_enabled", false); + } + + object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG, + OBJECT(dev)); + + sbd = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sbd, &error_fatal); + ios = FW_CFG_IO(dev); + sysbus_add_io(sbd, iobase, &ios->comb_iomem); + + s = FW_CFG(dev); + + if (s->dma_enabled) { + /* 64 bits for the address field */ + s->dma_as = dma_as; + s->dma_addr = 0; + sysbus_add_io(sbd, dma_iobase, &s->dma_iomem); + } + + return s; +} + +FWCfgState *fw_cfg_init_io(uint32_t iobase) +{ + return fw_cfg_init_io_dma(iobase, 0, NULL); +} + +FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr, + hwaddr data_addr, uint32_t data_width, + hwaddr dma_addr, AddressSpace *dma_as) +{ + DeviceState *dev; + SysBusDevice *sbd; + FWCfgState *s; + bool dma_requested = dma_addr && dma_as; + + dev = qdev_new(TYPE_FW_CFG_MEM); + qdev_prop_set_uint32(dev, "data_width", data_width); + if (!dma_requested) { + qdev_prop_set_bit(dev, "dma_enabled", false); + } + + object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG, + OBJECT(dev)); + + sbd = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, ctl_addr); + sysbus_mmio_map(sbd, 1, data_addr); + + s = FW_CFG(dev); + + if (s->dma_enabled) { + s->dma_as = dma_as; + s->dma_addr = 0; + sysbus_mmio_map(sbd, 2, dma_addr); + } + + return s; +} + +FWCfgState *fw_cfg_init_mem(hwaddr ctl_addr, hwaddr data_addr) +{ + return fw_cfg_init_mem_wide(ctl_addr, data_addr, + fw_cfg_data_mem_ops.valid.max_access_size, + 0, NULL); +} + + +FWCfgState *fw_cfg_find(void) +{ + /* Returns NULL unless there is exactly one fw_cfg device */ + return FW_CFG(object_resolve_path_type("", TYPE_FW_CFG, NULL)); +} + + +static void fw_cfg_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = fw_cfg_reset; + dc->vmsd = &vmstate_fw_cfg; + + device_class_set_props(dc, fw_cfg_properties); +} + +static const TypeInfo fw_cfg_info = { + .name = TYPE_FW_CFG, + .parent = TYPE_SYS_BUS_DEVICE, + .abstract = true, + .instance_size = sizeof(FWCfgState), + .class_init = fw_cfg_class_init, +}; + +static void fw_cfg_file_slots_allocate(FWCfgState *s, Error **errp) +{ + uint16_t file_slots_max; + + if (fw_cfg_file_slots(s) < FW_CFG_FILE_SLOTS_MIN) { + error_setg(errp, "\"file_slots\" must be at least 0x%x", + FW_CFG_FILE_SLOTS_MIN); + return; + } + + /* (UINT16_MAX & FW_CFG_ENTRY_MASK) is the highest inclusive selector value + * that we permit. The actual (exclusive) value coming from the + * configuration is (FW_CFG_FILE_FIRST + fw_cfg_file_slots(s)). */ + file_slots_max = (UINT16_MAX & FW_CFG_ENTRY_MASK) - FW_CFG_FILE_FIRST + 1; + if (fw_cfg_file_slots(s) > file_slots_max) { + error_setg(errp, "\"file_slots\" must not exceed 0x%" PRIx16, + file_slots_max); + return; + } + + s->entries[0] = g_new0(FWCfgEntry, fw_cfg_max_entry(s)); + s->entries[1] = g_new0(FWCfgEntry, fw_cfg_max_entry(s)); + s->entry_order = g_new0(int, fw_cfg_max_entry(s)); +} + +static Property fw_cfg_io_properties[] = { + DEFINE_PROP_BOOL("dma_enabled", FWCfgIoState, parent_obj.dma_enabled, + true), + DEFINE_PROP_UINT16("x-file-slots", FWCfgIoState, parent_obj.file_slots, + FW_CFG_FILE_SLOTS_DFLT), + DEFINE_PROP_END_OF_LIST(), +}; + +static void fw_cfg_io_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + FWCfgIoState *s = FW_CFG_IO(dev); + + fw_cfg_file_slots_allocate(FW_CFG(s), errp); + if (*errp) { + return; + } + + /* when using port i/o, the 8-bit data register ALWAYS overlaps + * with half of the 16-bit control register. Hence, the total size + * of the i/o region used is FW_CFG_CTL_SIZE */ + memory_region_init_io(&s->comb_iomem, OBJECT(s), &fw_cfg_comb_mem_ops, + FW_CFG(s), "fwcfg", FW_CFG_CTL_SIZE); + + if (FW_CFG(s)->dma_enabled) { + memory_region_init_io(&FW_CFG(s)->dma_iomem, OBJECT(s), + &fw_cfg_dma_mem_ops, FW_CFG(s), "fwcfg.dma", + sizeof(dma_addr_t)); + } + + fw_cfg_common_realize(dev, errp); +} + +static void fw_cfg_io_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = fw_cfg_io_realize; + device_class_set_props(dc, fw_cfg_io_properties); +} + +static const TypeInfo fw_cfg_io_info = { + .name = TYPE_FW_CFG_IO, + .parent = TYPE_FW_CFG, + .instance_size = sizeof(FWCfgIoState), + .class_init = fw_cfg_io_class_init, +}; + + +static Property fw_cfg_mem_properties[] = { + DEFINE_PROP_UINT32("data_width", FWCfgMemState, data_width, -1), + DEFINE_PROP_BOOL("dma_enabled", FWCfgMemState, parent_obj.dma_enabled, + true), + DEFINE_PROP_UINT16("x-file-slots", FWCfgMemState, parent_obj.file_slots, + FW_CFG_FILE_SLOTS_DFLT), + DEFINE_PROP_END_OF_LIST(), +}; + +static void fw_cfg_mem_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + FWCfgMemState *s = FW_CFG_MEM(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + const MemoryRegionOps *data_ops = &fw_cfg_data_mem_ops; + + fw_cfg_file_slots_allocate(FW_CFG(s), errp); + if (*errp) { + return; + } + + memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops, + FW_CFG(s), "fwcfg.ctl", FW_CFG_CTL_SIZE); + sysbus_init_mmio(sbd, &s->ctl_iomem); + + if (s->data_width > data_ops->valid.max_access_size) { + s->wide_data_ops = *data_ops; + + s->wide_data_ops.valid.max_access_size = s->data_width; + s->wide_data_ops.impl.max_access_size = s->data_width; + data_ops = &s->wide_data_ops; + } + memory_region_init_io(&s->data_iomem, OBJECT(s), data_ops, FW_CFG(s), + "fwcfg.data", data_ops->valid.max_access_size); + sysbus_init_mmio(sbd, &s->data_iomem); + + if (FW_CFG(s)->dma_enabled) { + memory_region_init_io(&FW_CFG(s)->dma_iomem, OBJECT(s), + &fw_cfg_dma_mem_ops, FW_CFG(s), "fwcfg.dma", + sizeof(dma_addr_t)); + sysbus_init_mmio(sbd, &FW_CFG(s)->dma_iomem); + } + + fw_cfg_common_realize(dev, errp); +} + +static void fw_cfg_mem_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = fw_cfg_mem_realize; + device_class_set_props(dc, fw_cfg_mem_properties); +} + +static const TypeInfo fw_cfg_mem_info = { + .name = TYPE_FW_CFG_MEM, + .parent = TYPE_FW_CFG, + .instance_size = sizeof(FWCfgMemState), + .class_init = fw_cfg_mem_class_init, +}; + +static void fw_cfg_register_types(void) +{ + type_register_static(&fw_cfg_info); + type_register_static(&fw_cfg_io_info); + type_register_static(&fw_cfg_mem_info); +} + +type_init(fw_cfg_register_types) diff --git a/hw/nvram/mac_nvram.c b/hw/nvram/mac_nvram.c new file mode 100644 index 000000000..11f2d31cd --- /dev/null +++ b/hw/nvram/mac_nvram.c @@ -0,0 +1,185 @@ +/* + * PowerMac NVRAM emulation + * + * Copyright (c) 2005-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/chrp_nvram.h" +#include "hw/ppc/mac.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/cutils.h" +#include "qemu/module.h" +#include "trace.h" +#include + +#define DEF_SYSTEM_SIZE 0xc10 + +/* macio style NVRAM device */ +static void macio_nvram_writeb(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + MacIONVRAMState *s = opaque; + + addr = (addr >> s->it_shift) & (s->size - 1); + trace_macio_nvram_write(addr, value); + s->data[addr] = value; +} + +static uint64_t macio_nvram_readb(void *opaque, hwaddr addr, + unsigned size) +{ + MacIONVRAMState *s = opaque; + uint32_t value; + + addr = (addr >> s->it_shift) & (s->size - 1); + value = s->data[addr]; + trace_macio_nvram_read(addr, value); + + return value; +} + +static const MemoryRegionOps macio_nvram_ops = { + .read = macio_nvram_readb, + .write = macio_nvram_writeb, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static const VMStateDescription vmstate_macio_nvram = { + .name = "macio_nvram", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VBUFFER_UINT32(data, MacIONVRAMState, 0, NULL, size), + VMSTATE_END_OF_LIST() + } +}; + + +static void macio_nvram_reset(DeviceState *dev) +{ +} + +static void macio_nvram_realizefn(DeviceState *dev, Error **errp) +{ + SysBusDevice *d = SYS_BUS_DEVICE(dev); + MacIONVRAMState *s = MACIO_NVRAM(dev); + + s->data = g_malloc0(s->size); + + memory_region_init_io(&s->mem, OBJECT(s), &macio_nvram_ops, s, + "macio-nvram", s->size << s->it_shift); + sysbus_init_mmio(d, &s->mem); +} + +static void macio_nvram_unrealizefn(DeviceState *dev) +{ + MacIONVRAMState *s = MACIO_NVRAM(dev); + + g_free(s->data); +} + +static Property macio_nvram_properties[] = { + DEFINE_PROP_UINT32("size", MacIONVRAMState, size, 0), + DEFINE_PROP_UINT32("it_shift", MacIONVRAMState, it_shift, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void macio_nvram_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = macio_nvram_realizefn; + dc->unrealize = macio_nvram_unrealizefn; + dc->reset = macio_nvram_reset; + dc->vmsd = &vmstate_macio_nvram; + device_class_set_props(dc, macio_nvram_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo macio_nvram_type_info = { + .name = TYPE_MACIO_NVRAM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MacIONVRAMState), + .class_init = macio_nvram_class_init, +}; + +static void macio_nvram_register_types(void) +{ + type_register_static(&macio_nvram_type_info); +} + +/* Set up a system OpenBIOS NVRAM partition */ +static void pmac_format_nvram_partition_of(MacIONVRAMState *nvr, int off, + int len) +{ + int sysp_end; + + /* OpenBIOS nvram variables partition */ + sysp_end = chrp_nvram_create_system_partition(&nvr->data[off], + DEF_SYSTEM_SIZE, len) + off; + + /* Free space partition */ + chrp_nvram_create_free_partition(&nvr->data[sysp_end], len - sysp_end); +} + +#define OSX_NVRAM_SIGNATURE (0x5A) + +/* Set up a Mac OS X NVRAM partition */ +static void pmac_format_nvram_partition_osx(MacIONVRAMState *nvr, int off, + int len) +{ + uint32_t start = off; + ChrpNvramPartHdr *part_header; + unsigned char *data = &nvr->data[start]; + + /* empty partition */ + part_header = (ChrpNvramPartHdr *)data; + part_header->signature = OSX_NVRAM_SIGNATURE; + pstrcpy(part_header->name, sizeof(part_header->name), "wwwwwwwwwwww"); + + chrp_nvram_finish_partition(part_header, len); + + /* Generation */ + stl_be_p(&data[20], 2); + + /* Adler32 checksum */ + stl_be_p(&data[16], adler32(0, &data[20], len - 20)); +} + +/* Set up NVRAM with OF and OSX partitions */ +void pmac_format_nvram_partition(MacIONVRAMState *nvr, int len) +{ + /* + * Mac OS X expects side "B" of the flash at the second half of NVRAM, + * so we use half of the chip for OF and the other half for a free OSX + * partition. + */ + pmac_format_nvram_partition_of(nvr, 0, len / 2); + pmac_format_nvram_partition_osx(nvr, len / 2, len / 2); +} +type_init(macio_nvram_register_types) diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build new file mode 100644 index 000000000..202a5466e --- /dev/null +++ b/hw/nvram/meson.build @@ -0,0 +1,21 @@ +# QOM interfaces must be available anytime QOM is used. +qom_ss.add(files('fw_cfg-interface.c')) + +softmmu_ss.add(files('fw_cfg.c')) +softmmu_ss.add(when: 'CONFIG_CHRP_NVRAM', if_true: files('chrp_nvram.c')) +softmmu_ss.add(when: 'CONFIG_DS1225Y', if_true: files('ds1225y.c')) +softmmu_ss.add(when: 'CONFIG_NMC93XX_EEPROM', if_true: files('eeprom93xx.c')) +softmmu_ss.add(when: 'CONFIG_AT24C', if_true: files('eeprom_at24c.c')) +softmmu_ss.add(when: 'CONFIG_MAC_NVRAM', if_true: files('mac_nvram.c')) +softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_otp.c')) +softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_nvm.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_CRC', if_true: files('xlnx-efuse-crc.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE', if_true: files('xlnx-efuse.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_VERSAL', if_true: files( + 'xlnx-versal-efuse-cache.c', + 'xlnx-versal-efuse-ctrl.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_ZYNQMP', if_true: files( + 'xlnx-zynqmp-efuse.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c')) + +specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c')) diff --git a/hw/nvram/npcm7xx_otp.c b/hw/nvram/npcm7xx_otp.c new file mode 100644 index 000000000..c61f2fc1a --- /dev/null +++ b/hw/nvram/npcm7xx_otp.c @@ -0,0 +1,440 @@ +/* + * Nuvoton NPCM7xx OTP (Fuse Array) Interface + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/nvram/npcm7xx_otp.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" + +/* Each module has 4 KiB of register space. Only a fraction of it is used. */ +#define NPCM7XX_OTP_REGS_SIZE (4 * KiB) + +/* 32-bit register indices. */ +typedef enum NPCM7xxOTPRegister { + NPCM7XX_OTP_FST, + NPCM7XX_OTP_FADDR, + NPCM7XX_OTP_FDATA, + NPCM7XX_OTP_FCFG, + /* Offset 0x10 is FKEYIND in OTP1, FUSTRAP in OTP2 */ + NPCM7XX_OTP_FKEYIND = 0x0010 / sizeof(uint32_t), + NPCM7XX_OTP_FUSTRAP = 0x0010 / sizeof(uint32_t), + NPCM7XX_OTP_FCTL, + NPCM7XX_OTP_REGS_END, +} NPCM7xxOTPRegister; + +/* Register field definitions. */ +#define FST_RIEN BIT(2) +#define FST_RDST BIT(1) +#define FST_RDY BIT(0) +#define FST_RO_MASK (FST_RDST | FST_RDY) + +#define FADDR_BYTEADDR(rv) extract32((rv), 0, 10) +#define FADDR_BITPOS(rv) extract32((rv), 10, 3) + +#define FDATA_CLEAR 0x00000001 + +#define FCFG_FDIS BIT(31) +#define FCFG_FCFGLK_MASK 0x00ff0000 + +#define FCTL_PROG_CMD1 0x00000001 +#define FCTL_PROG_CMD2 0xbf79e5d0 +#define FCTL_READ_CMD 0x00000002 + +/** + * struct NPCM7xxOTPClass - OTP module class. + * @parent: System bus device class. + * @mmio_ops: MMIO register operations for this type of module. + * + * The two OTP modules (key-storage and fuse-array) have slightly different + * behavior, so we give them different MMIO register operations. + */ +struct NPCM7xxOTPClass { + SysBusDeviceClass parent; + + const MemoryRegionOps *mmio_ops; +}; + +#define NPCM7XX_OTP_CLASS(klass) \ + OBJECT_CLASS_CHECK(NPCM7xxOTPClass, (klass), TYPE_NPCM7XX_OTP) +#define NPCM7XX_OTP_GET_CLASS(obj) \ + OBJECT_GET_CLASS(NPCM7xxOTPClass, (obj), TYPE_NPCM7XX_OTP) + +static uint8_t ecc_encode_nibble(uint8_t n) +{ + uint8_t result = n; + + result |= (((n >> 0) & 1) ^ ((n >> 1) & 1)) << 4; + result |= (((n >> 2) & 1) ^ ((n >> 3) & 1)) << 5; + result |= (((n >> 0) & 1) ^ ((n >> 2) & 1)) << 6; + result |= (((n >> 1) & 1) ^ ((n >> 3) & 1)) << 7; + + return result; +} + +void npcm7xx_otp_array_write(NPCM7xxOTPState *s, const void *data, + unsigned int offset, unsigned int len) +{ + const uint8_t *src = data; + uint8_t *dst = &s->array[offset]; + + while (len-- > 0) { + uint8_t c = *src++; + + *dst++ = ecc_encode_nibble(extract8(c, 0, 4)); + *dst++ = ecc_encode_nibble(extract8(c, 4, 4)); + } +} + +/* Common register read handler for both OTP classes. */ +static uint64_t npcm7xx_otp_read(NPCM7xxOTPState *s, NPCM7xxOTPRegister reg) +{ + uint32_t value = 0; + + switch (reg) { + case NPCM7XX_OTP_FST: + case NPCM7XX_OTP_FADDR: + case NPCM7XX_OTP_FDATA: + case NPCM7XX_OTP_FCFG: + value = s->regs[reg]; + break; + + case NPCM7XX_OTP_FCTL: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from write-only FCTL register\n", + DEVICE(s)->canonical_path); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: read from invalid offset 0x%zx\n", + DEVICE(s)->canonical_path, reg * sizeof(uint32_t)); + break; + } + + return value; +} + +/* Read a byte from the OTP array into the data register. */ +static void npcm7xx_otp_read_array(NPCM7xxOTPState *s) +{ + uint32_t faddr = s->regs[NPCM7XX_OTP_FADDR]; + + s->regs[NPCM7XX_OTP_FDATA] = s->array[FADDR_BYTEADDR(faddr)]; + s->regs[NPCM7XX_OTP_FST] |= FST_RDST | FST_RDY; +} + +/* Program a byte from the data register into the OTP array. */ +static void npcm7xx_otp_program_array(NPCM7xxOTPState *s) +{ + uint32_t faddr = s->regs[NPCM7XX_OTP_FADDR]; + + /* Bits can only go 0->1, never 1->0. */ + s->array[FADDR_BYTEADDR(faddr)] |= (1U << FADDR_BITPOS(faddr)); + s->regs[NPCM7XX_OTP_FST] |= FST_RDST | FST_RDY; +} + +/* Compute the next value of the FCFG register. */ +static uint32_t npcm7xx_otp_compute_fcfg(uint32_t cur_value, uint32_t new_value) +{ + uint32_t lock_mask; + uint32_t value; + + /* + * FCFGLK holds sticky bits 16..23, indicating which bits in FPRGLK (8..15) + * and FRDLK (0..7) that are read-only. + */ + lock_mask = (cur_value & FCFG_FCFGLK_MASK) >> 8; + lock_mask |= lock_mask >> 8; + /* FDIS and FCFGLK bits are sticky (write 1 to set; can't clear). */ + value = cur_value & (FCFG_FDIS | FCFG_FCFGLK_MASK); + /* Preserve read-only bits in FPRGLK and FRDLK */ + value |= cur_value & lock_mask; + /* Set all bits that aren't read-only. */ + value |= new_value & ~lock_mask; + + return value; +} + +/* Common register write handler for both OTP classes. */ +static void npcm7xx_otp_write(NPCM7xxOTPState *s, NPCM7xxOTPRegister reg, + uint32_t value) +{ + switch (reg) { + case NPCM7XX_OTP_FST: + /* RDST is cleared by writing 1 to it. */ + if (value & FST_RDST) { + s->regs[NPCM7XX_OTP_FST] &= ~FST_RDST; + } + /* Preserve read-only and write-one-to-clear bits */ + value &= ~FST_RO_MASK; + value |= s->regs[NPCM7XX_OTP_FST] & FST_RO_MASK; + break; + + case NPCM7XX_OTP_FADDR: + break; + + case NPCM7XX_OTP_FDATA: + /* + * This register is cleared by writing a magic value to it; no other + * values can be written. + */ + if (value == FDATA_CLEAR) { + value = 0; + } else { + value = s->regs[NPCM7XX_OTP_FDATA]; + } + break; + + case NPCM7XX_OTP_FCFG: + value = npcm7xx_otp_compute_fcfg(s->regs[NPCM7XX_OTP_FCFG], value); + break; + + case NPCM7XX_OTP_FCTL: + switch (value) { + case FCTL_READ_CMD: + npcm7xx_otp_read_array(s); + break; + + case FCTL_PROG_CMD1: + /* + * Programming requires writing two separate magic values to this + * register; this is the first one. Just store it so it can be + * verified later when the second magic value is received. + */ + break; + + case FCTL_PROG_CMD2: + /* + * Only initiate programming if we received the first half of the + * command immediately before this one. + */ + if (s->regs[NPCM7XX_OTP_FCTL] == FCTL_PROG_CMD1) { + npcm7xx_otp_program_array(s); + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: unrecognized FCNTL value 0x%" PRIx32 "\n", + DEVICE(s)->canonical_path, value); + break; + } + if (value != FCTL_PROG_CMD1) { + value = 0; + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: write to invalid offset 0x%zx\n", + DEVICE(s)->canonical_path, reg * sizeof(uint32_t)); + return; + } + + s->regs[reg] = value; +} + +/* Register read handler specific to the fuse array OTP module. */ +static uint64_t npcm7xx_fuse_array_read(void *opaque, hwaddr addr, + unsigned int size) +{ + NPCM7xxOTPRegister reg = addr / sizeof(uint32_t); + NPCM7xxOTPState *s = opaque; + uint32_t value; + + /* + * Only the Fuse Strap register needs special handling; all other registers + * work the same way for both kinds of OTP modules. + */ + if (reg != NPCM7XX_OTP_FUSTRAP) { + value = npcm7xx_otp_read(s, reg); + } else { + /* FUSTRAP is stored as three copies in the OTP array. */ + uint32_t fustrap[3]; + + memcpy(fustrap, &s->array[0], sizeof(fustrap)); + + /* Determine value by a majority vote on each bit. */ + value = (fustrap[0] & fustrap[1]) | (fustrap[0] & fustrap[2]) | + (fustrap[1] & fustrap[2]); + } + + return value; +} + +/* Register write handler specific to the fuse array OTP module. */ +static void npcm7xx_fuse_array_write(void *opaque, hwaddr addr, uint64_t v, + unsigned int size) +{ + NPCM7xxOTPRegister reg = addr / sizeof(uint32_t); + NPCM7xxOTPState *s = opaque; + + /* + * The Fuse Strap register is read-only. Other registers are handled by + * common code. + */ + if (reg != NPCM7XX_OTP_FUSTRAP) { + npcm7xx_otp_write(s, reg, v); + } +} + +static const MemoryRegionOps npcm7xx_fuse_array_ops = { + .read = npcm7xx_fuse_array_read, + .write = npcm7xx_fuse_array_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +/* Register read handler specific to the key storage OTP module. */ +static uint64_t npcm7xx_key_storage_read(void *opaque, hwaddr addr, + unsigned int size) +{ + NPCM7xxOTPRegister reg = addr / sizeof(uint32_t); + NPCM7xxOTPState *s = opaque; + + /* + * Only the Fuse Key Index register needs special handling; all other + * registers work the same way for both kinds of OTP modules. + */ + if (reg != NPCM7XX_OTP_FKEYIND) { + return npcm7xx_otp_read(s, reg); + } + + qemu_log_mask(LOG_UNIMP, "%s: FKEYIND is not implemented\n", __func__); + + return s->regs[NPCM7XX_OTP_FKEYIND]; +} + +/* Register write handler specific to the key storage OTP module. */ +static void npcm7xx_key_storage_write(void *opaque, hwaddr addr, uint64_t v, + unsigned int size) +{ + NPCM7xxOTPRegister reg = addr / sizeof(uint32_t); + NPCM7xxOTPState *s = opaque; + + /* + * Only the Fuse Key Index register needs special handling; all other + * registers work the same way for both kinds of OTP modules. + */ + if (reg != NPCM7XX_OTP_FKEYIND) { + npcm7xx_otp_write(s, reg, v); + return; + } + + qemu_log_mask(LOG_UNIMP, "%s: FKEYIND is not implemented\n", __func__); + + s->regs[NPCM7XX_OTP_FKEYIND] = v; +} + +static const MemoryRegionOps npcm7xx_key_storage_ops = { + .read = npcm7xx_key_storage_read, + .write = npcm7xx_key_storage_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm7xx_otp_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxOTPState *s = NPCM7XX_OTP(obj); + + memset(s->regs, 0, sizeof(s->regs)); + + s->regs[NPCM7XX_OTP_FST] = 0x00000001; + s->regs[NPCM7XX_OTP_FCFG] = 0x20000000; +} + +static void npcm7xx_otp_realize(DeviceState *dev, Error **errp) +{ + NPCM7xxOTPClass *oc = NPCM7XX_OTP_GET_CLASS(dev); + NPCM7xxOTPState *s = NPCM7XX_OTP(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memset(s->array, 0, sizeof(s->array)); + + memory_region_init_io(&s->mmio, OBJECT(s), oc->mmio_ops, s, "regs", + NPCM7XX_OTP_REGS_SIZE); + sysbus_init_mmio(sbd, &s->mmio); +} + +static const VMStateDescription vmstate_npcm7xx_otp = { + .name = "npcm7xx-otp", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, NPCM7xxOTPState, NPCM7XX_OTP_NR_REGS), + VMSTATE_UINT8_ARRAY(array, NPCM7xxOTPState, NPCM7XX_OTP_ARRAY_BYTES), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_otp_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + QEMU_BUILD_BUG_ON(NPCM7XX_OTP_REGS_END > NPCM7XX_OTP_NR_REGS); + + dc->realize = npcm7xx_otp_realize; + dc->vmsd = &vmstate_npcm7xx_otp; + rc->phases.enter = npcm7xx_otp_enter_reset; +} + +static void npcm7xx_key_storage_class_init(ObjectClass *klass, void *data) +{ + NPCM7xxOTPClass *oc = NPCM7XX_OTP_CLASS(klass); + + oc->mmio_ops = &npcm7xx_key_storage_ops; +} + +static void npcm7xx_fuse_array_class_init(ObjectClass *klass, void *data) +{ + NPCM7xxOTPClass *oc = NPCM7XX_OTP_CLASS(klass); + + oc->mmio_ops = &npcm7xx_fuse_array_ops; +} + +static const TypeInfo npcm7xx_otp_types[] = { + { + .name = TYPE_NPCM7XX_OTP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxOTPState), + .class_size = sizeof(NPCM7xxOTPClass), + .class_init = npcm7xx_otp_class_init, + .abstract = true, + }, + { + .name = TYPE_NPCM7XX_KEY_STORAGE, + .parent = TYPE_NPCM7XX_OTP, + .class_init = npcm7xx_key_storage_class_init, + }, + { + .name = TYPE_NPCM7XX_FUSE_ARRAY, + .parent = TYPE_NPCM7XX_OTP, + .class_init = npcm7xx_fuse_array_class_init, + }, +}; +DEFINE_TYPES(npcm7xx_otp_types); diff --git a/hw/nvram/nrf51_nvm.c b/hw/nvram/nrf51_nvm.c new file mode 100644 index 000000000..7f1db8c42 --- /dev/null +++ b/hw/nvram/nrf51_nvm.c @@ -0,0 +1,400 @@ +/* + * Nordic Semiconductor nRF51 non-volatile memory + * + * It provides an interface to erase regions in flash memory. + * Furthermore it provides the user and factory information registers. + * + * Reference Manual: http://infocenter.nordicsemi.com/pdf/nRF51_RM_v3.0.pdf + * + * See nRF51 reference manual and product sheet sections: + * + Non-Volatile Memory Controller (NVMC) + * + Factory Information Configuration Registers (FICR) + * + User Information Configuration Registers (UICR) + * + * Copyright 2018 Steffen Görtz + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/arm/nrf51.h" +#include "hw/nvram/nrf51_nvm.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +/* + * FICR Registers Assignments + * CODEPAGESIZE 0x010 + * CODESIZE 0x014 + * CLENR0 0x028 + * PPFC 0x02C + * NUMRAMBLOCK 0x034 + * SIZERAMBLOCKS 0x038 + * SIZERAMBLOCK[0] 0x038 + * SIZERAMBLOCK[1] 0x03C + * SIZERAMBLOCK[2] 0x040 + * SIZERAMBLOCK[3] 0x044 + * CONFIGID 0x05C + * DEVICEID[0] 0x060 + * DEVICEID[1] 0x064 + * ER[0] 0x080 + * ER[1] 0x084 + * ER[2] 0x088 + * ER[3] 0x08C + * IR[0] 0x090 + * IR[1] 0x094 + * IR[2] 0x098 + * IR[3] 0x09C + * DEVICEADDRTYPE 0x0A0 + * DEVICEADDR[0] 0x0A4 + * DEVICEADDR[1] 0x0A8 + * OVERRIDEEN 0x0AC + * NRF_1MBIT[0] 0x0B0 + * NRF_1MBIT[1] 0x0B4 + * NRF_1MBIT[2] 0x0B8 + * NRF_1MBIT[3] 0x0BC + * NRF_1MBIT[4] 0x0C0 + * BLE_1MBIT[0] 0x0EC + * BLE_1MBIT[1] 0x0F0 + * BLE_1MBIT[2] 0x0F4 + * BLE_1MBIT[3] 0x0F8 + * BLE_1MBIT[4] 0x0FC + */ +static const uint32_t ficr_content[64] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000400, + 0x00000100, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000002, 0x00002000, + 0x00002000, 0x00002000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000003, + 0x12345678, 0x9ABCDEF1, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF +}; + +static uint64_t ficr_read(void *opaque, hwaddr offset, unsigned int size) +{ + assert(offset < sizeof(ficr_content)); + return ficr_content[offset / 4]; +} + +static void ficr_write(void *opaque, hwaddr offset, uint64_t value, + unsigned int size) +{ + /* Intentionally do nothing */ +} + +static const MemoryRegionOps ficr_ops = { + .read = ficr_read, + .write = ficr_write, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN +}; + +/* + * UICR Registers Assignments + * CLENR0 0x000 + * RBPCONF 0x004 + * XTALFREQ 0x008 + * FWID 0x010 + * BOOTLOADERADDR 0x014 + * NRFFW[0] 0x014 + * NRFFW[1] 0x018 + * NRFFW[2] 0x01C + * NRFFW[3] 0x020 + * NRFFW[4] 0x024 + * NRFFW[5] 0x028 + * NRFFW[6] 0x02C + * NRFFW[7] 0x030 + * NRFFW[8] 0x034 + * NRFFW[9] 0x038 + * NRFFW[10] 0x03C + * NRFFW[11] 0x040 + * NRFFW[12] 0x044 + * NRFFW[13] 0x048 + * NRFFW[14] 0x04C + * NRFHW[0] 0x050 + * NRFHW[1] 0x054 + * NRFHW[2] 0x058 + * NRFHW[3] 0x05C + * NRFHW[4] 0x060 + * NRFHW[5] 0x064 + * NRFHW[6] 0x068 + * NRFHW[7] 0x06C + * NRFHW[8] 0x070 + * NRFHW[9] 0x074 + * NRFHW[10] 0x078 + * NRFHW[11] 0x07C + * CUSTOMER[0] 0x080 + * CUSTOMER[1] 0x084 + * CUSTOMER[2] 0x088 + * CUSTOMER[3] 0x08C + * CUSTOMER[4] 0x090 + * CUSTOMER[5] 0x094 + * CUSTOMER[6] 0x098 + * CUSTOMER[7] 0x09C + * CUSTOMER[8] 0x0A0 + * CUSTOMER[9] 0x0A4 + * CUSTOMER[10] 0x0A8 + * CUSTOMER[11] 0x0AC + * CUSTOMER[12] 0x0B0 + * CUSTOMER[13] 0x0B4 + * CUSTOMER[14] 0x0B8 + * CUSTOMER[15] 0x0BC + * CUSTOMER[16] 0x0C0 + * CUSTOMER[17] 0x0C4 + * CUSTOMER[18] 0x0C8 + * CUSTOMER[19] 0x0CC + * CUSTOMER[20] 0x0D0 + * CUSTOMER[21] 0x0D4 + * CUSTOMER[22] 0x0D8 + * CUSTOMER[23] 0x0DC + * CUSTOMER[24] 0x0E0 + * CUSTOMER[25] 0x0E4 + * CUSTOMER[26] 0x0E8 + * CUSTOMER[27] 0x0EC + * CUSTOMER[28] 0x0F0 + * CUSTOMER[29] 0x0F4 + * CUSTOMER[30] 0x0F8 + * CUSTOMER[31] 0x0FC + */ + +static uint64_t uicr_read(void *opaque, hwaddr offset, unsigned int size) +{ + NRF51NVMState *s = NRF51_NVM(opaque); + + assert(offset < sizeof(s->uicr_content)); + return s->uicr_content[offset / 4]; +} + +static void uicr_write(void *opaque, hwaddr offset, uint64_t value, + unsigned int size) +{ + NRF51NVMState *s = NRF51_NVM(opaque); + + assert(offset < sizeof(s->uicr_content)); + s->uicr_content[offset / 4] = value; +} + +static const MemoryRegionOps uicr_ops = { + .read = uicr_read, + .write = uicr_write, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN +}; + + +static uint64_t io_read(void *opaque, hwaddr offset, unsigned int size) +{ + NRF51NVMState *s = NRF51_NVM(opaque); + uint64_t r = 0; + + switch (offset) { + case NRF51_NVMC_READY: + r = NRF51_NVMC_READY_READY; + break; + case NRF51_NVMC_CONFIG: + r = s->config; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad read offset 0x%" HWADDR_PRIx "\n", __func__, offset); + break; + } + + return r; +} + +static void io_write(void *opaque, hwaddr offset, uint64_t value, + unsigned int size) +{ + NRF51NVMState *s = NRF51_NVM(opaque); + + switch (offset) { + case NRF51_NVMC_CONFIG: + s->config = value & NRF51_NVMC_CONFIG_MASK; + break; + case NRF51_NVMC_ERASEPCR0: + case NRF51_NVMC_ERASEPCR1: + if (s->config & NRF51_NVMC_CONFIG_EEN) { + /* Mask in-page sub address */ + value &= ~(NRF51_PAGE_SIZE - 1); + if (value <= (s->flash_size - NRF51_PAGE_SIZE)) { + memset(s->storage + value, 0xFF, NRF51_PAGE_SIZE); + memory_region_flush_rom_device(&s->flash, value, + NRF51_PAGE_SIZE); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Flash erase at 0x%" HWADDR_PRIx" while flash not erasable.\n", + __func__, offset); + } + break; + case NRF51_NVMC_ERASEALL: + if (value == NRF51_NVMC_ERASE) { + if (s->config & NRF51_NVMC_CONFIG_EEN) { + memset(s->storage, 0xFF, s->flash_size); + memory_region_flush_rom_device(&s->flash, 0, s->flash_size); + memset(s->uicr_content, 0xFF, sizeof(s->uicr_content)); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash not erasable.\n", + __func__); + } + } + break; + case NRF51_NVMC_ERASEUICR: + if (value == NRF51_NVMC_ERASE) { + memset(s->uicr_content, 0xFF, sizeof(s->uicr_content)); + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad write offset 0x%" HWADDR_PRIx "\n", __func__, offset); + } +} + +static const MemoryRegionOps io_ops = { + .read = io_read, + .write = io_write, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t flash_read(void *opaque, hwaddr offset, unsigned size) +{ + /* + * This is a rom_device MemoryRegion which is always in + * romd_mode (we never put it in MMIO mode), so reads always + * go directly to RAM and never come here. + */ + g_assert_not_reached(); +} + +static void flash_write(void *opaque, hwaddr offset, uint64_t value, + unsigned int size) +{ + NRF51NVMState *s = NRF51_NVM(opaque); + + if (s->config & NRF51_NVMC_CONFIG_WEN) { + uint32_t oldval; + + assert(offset + size <= s->flash_size); + + /* NOR Flash only allows bits to be flipped from 1's to 0's on write */ + oldval = ldl_le_p(s->storage + offset); + oldval &= value; + stl_le_p(s->storage + offset, oldval); + + memory_region_flush_rom_device(&s->flash, offset, size); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Flash write 0x%" HWADDR_PRIx" while flash not writable.\n", + __func__, offset); + } +} + + + +static const MemoryRegionOps flash_ops = { + .read = flash_read, + .write = flash_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void nrf51_nvm_init(Object *obj) +{ + NRF51NVMState *s = NRF51_NVM(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->mmio, obj, &io_ops, s, "nrf51_soc.nvmc", + NRF51_NVMC_SIZE); + sysbus_init_mmio(sbd, &s->mmio); + + memory_region_init_io(&s->ficr, obj, &ficr_ops, s, "nrf51_soc.ficr", + sizeof(ficr_content)); + sysbus_init_mmio(sbd, &s->ficr); + + memory_region_init_io(&s->uicr, obj, &uicr_ops, s, "nrf51_soc.uicr", + sizeof(s->uicr_content)); + sysbus_init_mmio(sbd, &s->uicr); +} + +static void nrf51_nvm_realize(DeviceState *dev, Error **errp) +{ + NRF51NVMState *s = NRF51_NVM(dev); + Error *err = NULL; + + memory_region_init_rom_device(&s->flash, OBJECT(dev), &flash_ops, s, + "nrf51_soc.flash", s->flash_size, &err); + if (err) { + error_propagate(errp, err); + return; + } + + s->storage = memory_region_get_ram_ptr(&s->flash); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->flash); +} + +static void nrf51_nvm_reset(DeviceState *dev) +{ + NRF51NVMState *s = NRF51_NVM(dev); + + s->config = 0x00; + memset(s->uicr_content, 0xFF, sizeof(s->uicr_content)); +} + +static Property nrf51_nvm_properties[] = { + DEFINE_PROP_UINT32("flash-size", NRF51NVMState, flash_size, 0x40000), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_nvm = { + .name = "nrf51_soc.nvm", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(uicr_content, NRF51NVMState, + NRF51_UICR_FIXTURE_SIZE), + VMSTATE_UINT32(config, NRF51NVMState), + VMSTATE_END_OF_LIST() + } +}; + +static void nrf51_nvm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, nrf51_nvm_properties); + dc->vmsd = &vmstate_nvm; + dc->realize = nrf51_nvm_realize; + dc->reset = nrf51_nvm_reset; +} + +static const TypeInfo nrf51_nvm_info = { + .name = TYPE_NRF51_NVM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NRF51NVMState), + .instance_init = nrf51_nvm_init, + .class_init = nrf51_nvm_class_init +}; + +static void nrf51_nvm_register_types(void) +{ + type_register_static(&nrf51_nvm_info); +} + +type_init(nrf51_nvm_register_types) diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c new file mode 100644 index 000000000..fbfdf47e2 --- /dev/null +++ b/hw/nvram/spapr_nvram.c @@ -0,0 +1,290 @@ +/* + * QEMU sPAPR NVRAM emulation + * + * Copyright (C) 2012 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include + +#include "sysemu/block-backend.h" +#include "sysemu/device_tree.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" +#include "migration/vmstate.h" +#include "hw/nvram/chrp_nvram.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qom/object.h" + +struct SpaprNvram { + SpaprVioDevice sdev; + uint32_t size; + uint8_t *buf; + BlockBackend *blk; + VMChangeStateEntry *vmstate; +}; + +#define TYPE_VIO_SPAPR_NVRAM "spapr-nvram" +OBJECT_DECLARE_SIMPLE_TYPE(SpaprNvram, VIO_SPAPR_NVRAM) + +#define MIN_NVRAM_SIZE (8 * KiB) +#define DEFAULT_NVRAM_SIZE (64 * KiB) +#define MAX_NVRAM_SIZE (1 * MiB) + +static void rtas_nvram_fetch(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + SpaprNvram *nvram = spapr->nvram; + hwaddr offset, buffer, len; + void *membuf; + + if ((nargs != 3) || (nret != 2)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + if (!nvram) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + rtas_st(rets, 1, 0); + return; + } + + offset = rtas_ld(args, 0); + buffer = rtas_ld(args, 1); + len = rtas_ld(args, 2); + + if (((offset + len) < offset) + || ((offset + len) > nvram->size)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + rtas_st(rets, 1, 0); + return; + } + + assert(nvram->buf); + + membuf = cpu_physical_memory_map(buffer, &len, true); + memcpy(membuf, nvram->buf + offset, len); + cpu_physical_memory_unmap(membuf, len, 1, len); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, len); +} + +static void rtas_nvram_store(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + SpaprNvram *nvram = spapr->nvram; + hwaddr offset, buffer, len; + int alen; + void *membuf; + + if ((nargs != 3) || (nret != 2)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + if (!nvram) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + offset = rtas_ld(args, 0); + buffer = rtas_ld(args, 1); + len = rtas_ld(args, 2); + + if (((offset + len) < offset) + || ((offset + len) > nvram->size)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + membuf = cpu_physical_memory_map(buffer, &len, false); + + alen = len; + if (nvram->blk) { + alen = blk_pwrite(nvram->blk, offset, membuf, len, 0); + } + + assert(nvram->buf); + memcpy(nvram->buf + offset, membuf, len); + + cpu_physical_memory_unmap(membuf, len, 0, len); + + rtas_st(rets, 0, (alen < len) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS); + rtas_st(rets, 1, (alen < 0) ? 0 : alen); +} + +static void spapr_nvram_realize(SpaprVioDevice *dev, Error **errp) +{ + SpaprNvram *nvram = VIO_SPAPR_NVRAM(dev); + int ret; + + if (nvram->blk) { + int64_t len = blk_getlength(nvram->blk); + + if (len < 0) { + error_setg_errno(errp, -len, + "could not get length of backing image"); + return; + } + + nvram->size = len; + + ret = blk_set_perm(nvram->blk, + BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, + BLK_PERM_ALL, errp); + if (ret < 0) { + return; + } + } else { + nvram->size = DEFAULT_NVRAM_SIZE; + } + + nvram->buf = g_malloc0(nvram->size); + + if ((nvram->size < MIN_NVRAM_SIZE) || (nvram->size > MAX_NVRAM_SIZE)) { + error_setg(errp, + "spapr-nvram must be between %" PRId64 + " and %" PRId64 " bytes in size", + MIN_NVRAM_SIZE, MAX_NVRAM_SIZE); + return; + } + + if (nvram->blk) { + int alen = blk_pread(nvram->blk, 0, nvram->buf, nvram->size); + + if (alen != nvram->size) { + error_setg(errp, "can't read spapr-nvram contents"); + return; + } + } else if (nb_prom_envs > 0) { + /* Create a system partition to pass the -prom-env variables */ + chrp_nvram_create_system_partition(nvram->buf, MIN_NVRAM_SIZE / 4, + nvram->size); + chrp_nvram_create_free_partition(&nvram->buf[MIN_NVRAM_SIZE / 4], + nvram->size - MIN_NVRAM_SIZE / 4); + } + + spapr_rtas_register(RTAS_NVRAM_FETCH, "nvram-fetch", rtas_nvram_fetch); + spapr_rtas_register(RTAS_NVRAM_STORE, "nvram-store", rtas_nvram_store); +} + +static int spapr_nvram_devnode(SpaprVioDevice *dev, void *fdt, int node_off) +{ + SpaprNvram *nvram = VIO_SPAPR_NVRAM(dev); + + return fdt_setprop_cell(fdt, node_off, "#bytes", nvram->size); +} + +static int spapr_nvram_pre_load(void *opaque) +{ + SpaprNvram *nvram = VIO_SPAPR_NVRAM(opaque); + + g_free(nvram->buf); + nvram->buf = NULL; + nvram->size = 0; + + return 0; +} + +static void postload_update_cb(void *opaque, bool running, RunState state) +{ + SpaprNvram *nvram = opaque; + + /* This is called after bdrv_invalidate_cache_all. */ + + qemu_del_vm_change_state_handler(nvram->vmstate); + nvram->vmstate = NULL; + + blk_pwrite(nvram->blk, 0, nvram->buf, nvram->size, 0); +} + +static int spapr_nvram_post_load(void *opaque, int version_id) +{ + SpaprNvram *nvram = VIO_SPAPR_NVRAM(opaque); + + if (nvram->blk) { + nvram->vmstate = qemu_add_vm_change_state_handler(postload_update_cb, + nvram); + } + + return 0; +} + +static const VMStateDescription vmstate_spapr_nvram = { + .name = "spapr_nvram", + .version_id = 1, + .minimum_version_id = 1, + .pre_load = spapr_nvram_pre_load, + .post_load = spapr_nvram_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(size, SpaprNvram), + VMSTATE_VBUFFER_ALLOC_UINT32(buf, SpaprNvram, 1, NULL, size), + VMSTATE_END_OF_LIST() + }, +}; + +static Property spapr_nvram_properties[] = { + DEFINE_SPAPR_PROPERTIES(SpaprNvram, sdev), + DEFINE_PROP_DRIVE("drive", SpaprNvram, blk), + DEFINE_PROP_END_OF_LIST(), +}; + +static void spapr_nvram_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); + + k->realize = spapr_nvram_realize; + k->devnode = spapr_nvram_devnode; + k->dt_name = "nvram"; + k->dt_type = "nvram"; + k->dt_compatible = "qemu,spapr-nvram"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, spapr_nvram_properties); + dc->vmsd = &vmstate_spapr_nvram; + /* Reason: Internal device only, uses spapr_rtas_register() in realize() */ + dc->user_creatable = false; +} + +static const TypeInfo spapr_nvram_type_info = { + .name = TYPE_VIO_SPAPR_NVRAM, + .parent = TYPE_VIO_SPAPR_DEVICE, + .instance_size = sizeof(SpaprNvram), + .class_init = spapr_nvram_class_init, +}; + +static void spapr_nvram_register_types(void) +{ + type_register_static(&spapr_nvram_type_info); +} + +type_init(spapr_nvram_register_types) diff --git a/hw/nvram/trace-events b/hw/nvram/trace-events new file mode 100644 index 000000000..5e33b24d4 --- /dev/null +++ b/hw/nvram/trace-events @@ -0,0 +1,19 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# ds1225y.c +nvram_read(uint32_t addr, uint32_t ret) "read addr %d: 0x%02x" +nvram_write(uint32_t addr, uint32_t old, uint32_t val) "write addr %d: 0x%02x -> 0x%02x" + +# fw_cfg.c +fw_cfg_select(void *s, uint16_t key_value, const char *key_name, int ret) "%p key 0x%04" PRIx16 " '%s', ret: %d" +fw_cfg_read(void *s, uint64_t ret) "%p = 0x%"PRIx64 +fw_cfg_add_bytes(uint16_t key_value, const char *key_name, size_t len) "key 0x%04" PRIx16 " '%s', %zu bytes" +fw_cfg_add_file(void *s, int index, char *name, size_t len) "%p #%d: %s (%zd bytes)" +fw_cfg_add_string(uint16_t key_value, const char *key_name, const char *value) "key 0x%04" PRIx16 " '%s', value '%s'" +fw_cfg_add_i16(uint16_t key_value, const char *key_name, uint16_t value) "key 0x%04" PRIx16 " '%s', value 0x%" PRIx16 +fw_cfg_add_i32(uint16_t key_value, const char *key_name, uint32_t value) "key 0x%04" PRIx16 " '%s', value 0x%" PRIx32 +fw_cfg_add_i64(uint16_t key_value, const char *key_name, uint64_t value) "key 0x%04" PRIx16 " '%s', value 0x%" PRIx64 + +# mac_nvram.c +macio_nvram_read(uint32_t addr, uint8_t val) "read addr=0x%04"PRIx32" val=0x%02x" +macio_nvram_write(uint32_t addr, uint8_t val) "write addr=0x%04"PRIx32" val=0x%02x" diff --git a/hw/nvram/trace.h b/hw/nvram/trace.h new file mode 100644 index 000000000..88fa900ad --- /dev/null +++ b/hw/nvram/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_nvram.h" diff --git a/hw/nvram/xlnx-bbram.c b/hw/nvram/xlnx-bbram.c new file mode 100644 index 000000000..b70828e5b --- /dev/null +++ b/hw/nvram/xlnx-bbram.c @@ -0,0 +1,545 @@ +/* + * QEMU model of the Xilinx BBRAM Battery Backed RAM + * + * Copyright (c) 2014-2021 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-bbram.h" + +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "sysemu/blockdev.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/nvram/xlnx-efuse.h" + +#ifndef XLNX_BBRAM_ERR_DEBUG +#define XLNX_BBRAM_ERR_DEBUG 0 +#endif + +REG32(BBRAM_STATUS, 0x0) + FIELD(BBRAM_STATUS, AES_CRC_PASS, 9, 1) + FIELD(BBRAM_STATUS, AES_CRC_DONE, 8, 1) + FIELD(BBRAM_STATUS, BBRAM_ZEROIZED, 4, 1) + FIELD(BBRAM_STATUS, PGM_MODE, 0, 1) +REG32(BBRAM_CTRL, 0x4) + FIELD(BBRAM_CTRL, ZEROIZE, 0, 1) +REG32(PGM_MODE, 0x8) +REG32(BBRAM_AES_CRC, 0xc) +REG32(BBRAM_0, 0x10) +REG32(BBRAM_1, 0x14) +REG32(BBRAM_2, 0x18) +REG32(BBRAM_3, 0x1c) +REG32(BBRAM_4, 0x20) +REG32(BBRAM_5, 0x24) +REG32(BBRAM_6, 0x28) +REG32(BBRAM_7, 0x2c) +REG32(BBRAM_8, 0x30) +REG32(BBRAM_SLVERR, 0x34) + FIELD(BBRAM_SLVERR, ENABLE, 0, 1) +REG32(BBRAM_ISR, 0x38) + FIELD(BBRAM_ISR, APB_SLVERR, 0, 1) +REG32(BBRAM_IMR, 0x3c) + FIELD(BBRAM_IMR, APB_SLVERR, 0, 1) +REG32(BBRAM_IER, 0x40) + FIELD(BBRAM_IER, APB_SLVERR, 0, 1) +REG32(BBRAM_IDR, 0x44) + FIELD(BBRAM_IDR, APB_SLVERR, 0, 1) +REG32(BBRAM_MSW_LOCK, 0x4c) + FIELD(BBRAM_MSW_LOCK, VAL, 0, 1) + +#define R_MAX (R_BBRAM_MSW_LOCK + 1) + +#define RAM_MAX (A_BBRAM_8 + 4 - A_BBRAM_0) + +#define BBRAM_PGM_MAGIC 0x757bdf0d + +QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxBBRam *)0)->regs)); + +static bool bbram_msw_locked(XlnxBBRam *s) +{ + return ARRAY_FIELD_EX32(s->regs, BBRAM_MSW_LOCK, VAL) != 0; +} + +static bool bbram_pgm_enabled(XlnxBBRam *s) +{ + return ARRAY_FIELD_EX32(s->regs, BBRAM_STATUS, PGM_MODE) != 0; +} + +static void bbram_bdrv_error(XlnxBBRam *s, int rc, gchar *detail) +{ + Error *errp; + + error_setg_errno(&errp, -rc, "%s: BBRAM backstore %s failed.", + blk_name(s->blk), detail); + error_report("%s", error_get_pretty(errp)); + error_free(errp); + + g_free(detail); +} + +static void bbram_bdrv_read(XlnxBBRam *s, Error **errp) +{ + uint32_t *ram = &s->regs[R_BBRAM_0]; + int nr = RAM_MAX; + + if (!s->blk) { + return; + } + + s->blk_ro = !blk_supports_write_perm(s->blk); + if (!s->blk_ro) { + int rc; + + rc = blk_set_perm(s->blk, + (BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE), + BLK_PERM_ALL, NULL); + if (rc) { + s->blk_ro = true; + } + } + if (s->blk_ro) { + warn_report("%s: Skip saving updates to read-only BBRAM backstore.", + blk_name(s->blk)); + } + + if (blk_pread(s->blk, 0, ram, nr) < 0) { + error_setg(errp, + "%s: Failed to read %u bytes from BBRAM backstore.", + blk_name(s->blk), nr); + return; + } + + /* Convert from little-endian backstore for each 32-bit word */ + nr /= 4; + while (nr--) { + ram[nr] = le32_to_cpu(ram[nr]); + } +} + +static void bbram_bdrv_sync(XlnxBBRam *s, uint64_t hwaddr) +{ + uint32_t le32; + unsigned offset; + int rc; + + assert(A_BBRAM_0 <= hwaddr && hwaddr <= A_BBRAM_8); + + /* Backstore is always in little-endian */ + le32 = cpu_to_le32(s->regs[hwaddr / 4]); + + /* Update zeroized flag */ + if (le32 && (hwaddr != A_BBRAM_8 || s->bbram8_wo)) { + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, BBRAM_ZEROIZED, 0); + } + + if (!s->blk || s->blk_ro) { + return; + } + + offset = hwaddr - A_BBRAM_0; + rc = blk_pwrite(s->blk, offset, &le32, 4, 0); + if (rc < 0) { + bbram_bdrv_error(s, rc, g_strdup_printf("write to offset %u", offset)); + } +} + +static void bbram_bdrv_zero(XlnxBBRam *s) +{ + int rc; + + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, BBRAM_ZEROIZED, 1); + + if (!s->blk || s->blk_ro) { + return; + } + + rc = blk_make_zero(s->blk, 0); + if (rc < 0) { + bbram_bdrv_error(s, rc, g_strdup("zeroizing")); + } + + /* Restore bbram8 if it is non-zero */ + if (s->regs[R_BBRAM_8]) { + bbram_bdrv_sync(s, A_BBRAM_8); + } +} + +static void bbram_zeroize(XlnxBBRam *s) +{ + int nr = RAM_MAX - (s->bbram8_wo ? 0 : 4); /* only wo bbram8 is cleared */ + + memset(&s->regs[R_BBRAM_0], 0, nr); + bbram_bdrv_zero(s); +} + +static void bbram_update_irq(XlnxBBRam *s) +{ + bool pending = s->regs[R_BBRAM_ISR] & ~s->regs[R_BBRAM_IMR]; + + qemu_set_irq(s->irq_bbram, pending); +} + +static void bbram_ctrl_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + if (val & R_BBRAM_CTRL_ZEROIZE_MASK) { + bbram_zeroize(s); + /* The bit is self clearing */ + s->regs[R_BBRAM_CTRL] &= ~R_BBRAM_CTRL_ZEROIZE_MASK; + } +} + +static void bbram_pgm_mode_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + if (val == BBRAM_PGM_MAGIC) { + bbram_zeroize(s); + + /* The status bit is cleared only by POR */ + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, PGM_MODE, 1); + } +} + +static void bbram_aes_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t calc_crc; + + if (!bbram_pgm_enabled(s)) { + /* We are not in programming mode, don't do anything */ + return; + } + + /* Perform the AES integrity check */ + s->regs[R_BBRAM_STATUS] |= R_BBRAM_STATUS_AES_CRC_DONE_MASK; + + /* + * Set check status. + * + * ZynqMP BBRAM check has a zero-u32 prepended; see: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_bbramps_zynqmp.c#L311 + */ + calc_crc = xlnx_efuse_calc_crc(&s->regs[R_BBRAM_0], + (R_BBRAM_8 - R_BBRAM_0), s->crc_zpads); + + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, AES_CRC_PASS, + (s->regs[R_BBRAM_AES_CRC] == calc_crc)); +} + +static uint64_t bbram_key_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t original_data = *(uint32_t *) reg->data; + + if (bbram_pgm_enabled(s)) { + return val64; + } else { + /* We are not in programming mode, don't do anything */ + qemu_log_mask(LOG_GUEST_ERROR, + "Not in programming mode, dropping the write\n"); + return original_data; + } +} + +static void bbram_key_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + bbram_bdrv_sync(s, reg->access->addr); +} + +static uint64_t bbram_wo_postr(RegisterInfo *reg, uint64_t val) +{ + return 0; +} + +static uint64_t bbram_r8_postr(RegisterInfo *reg, uint64_t val) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + return s->bbram8_wo ? bbram_wo_postr(reg, val) : val; +} + +static bool bbram_r8_readonly(XlnxBBRam *s) +{ + return !bbram_pgm_enabled(s) || bbram_msw_locked(s); +} + +static uint64_t bbram_r8_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + if (bbram_r8_readonly(s)) { + val64 = *(uint32_t *)reg->data; + } + + return val64; +} + +static void bbram_r8_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + if (!bbram_r8_readonly(s)) { + bbram_bdrv_sync(s, A_BBRAM_8); + } +} + +static uint64_t bbram_msw_lock_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + /* Never lock if bbram8 is wo; and, only POR can clear the lock */ + if (s->bbram8_wo) { + val64 = 0; + } else { + val64 |= s->regs[R_BBRAM_MSW_LOCK]; + } + + return val64; +} + +static void bbram_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + bbram_update_irq(s); +} + +static uint64_t bbram_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + s->regs[R_BBRAM_IMR] &= ~val; + bbram_update_irq(s); + return 0; +} + +static uint64_t bbram_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + s->regs[R_BBRAM_IMR] |= val; + bbram_update_irq(s); + return 0; +} + +static RegisterAccessInfo bbram_ctrl_regs_info[] = { + { .name = "BBRAM_STATUS", .addr = A_BBRAM_STATUS, + .rsvd = 0xee, + .ro = 0x3ff, + },{ .name = "BBRAM_CTRL", .addr = A_BBRAM_CTRL, + .post_write = bbram_ctrl_postw, + },{ .name = "PGM_MODE", .addr = A_PGM_MODE, + .post_write = bbram_pgm_mode_postw, + },{ .name = "BBRAM_AES_CRC", .addr = A_BBRAM_AES_CRC, + .post_write = bbram_aes_crc_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_0", .addr = A_BBRAM_0, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_1", .addr = A_BBRAM_1, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_2", .addr = A_BBRAM_2, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_3", .addr = A_BBRAM_3, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_4", .addr = A_BBRAM_4, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_5", .addr = A_BBRAM_5, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_6", .addr = A_BBRAM_6, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_7", .addr = A_BBRAM_7, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_8", .addr = A_BBRAM_8, + .pre_write = bbram_r8_prew, + .post_write = bbram_r8_postw, + .post_read = bbram_r8_postr, + },{ .name = "BBRAM_SLVERR", .addr = A_BBRAM_SLVERR, + .rsvd = ~1, + },{ .name = "BBRAM_ISR", .addr = A_BBRAM_ISR, + .w1c = 0x1, + .post_write = bbram_isr_postw, + },{ .name = "BBRAM_IMR", .addr = A_BBRAM_IMR, + .ro = 0x1, + },{ .name = "BBRAM_IER", .addr = A_BBRAM_IER, + .pre_write = bbram_ier_prew, + },{ .name = "BBRAM_IDR", .addr = A_BBRAM_IDR, + .pre_write = bbram_idr_prew, + },{ .name = "BBRAM_MSW_LOCK", .addr = A_BBRAM_MSW_LOCK, + .pre_write = bbram_msw_lock_prew, + .ro = ~R_BBRAM_MSW_LOCK_VAL_MASK, + } +}; + +static void bbram_ctrl_reset(DeviceState *dev) +{ + XlnxBBRam *s = XLNX_BBRAM(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + if (i < R_BBRAM_0 || i > R_BBRAM_8) { + register_reset(&s->regs_info[i]); + } + } + + bbram_update_irq(s); +} + +static const MemoryRegionOps bbram_ctrl_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void bbram_ctrl_realize(DeviceState *dev, Error **errp) +{ + XlnxBBRam *s = XLNX_BBRAM(dev); + + if (s->crc_zpads) { + s->bbram8_wo = true; + } + + bbram_bdrv_read(s, errp); +} + +static void bbram_ctrl_init(Object *obj) +{ + XlnxBBRam *s = XLNX_BBRAM(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + reg_array = + register_init_block32(DEVICE(obj), bbram_ctrl_regs_info, + ARRAY_SIZE(bbram_ctrl_regs_info), + s->regs_info, s->regs, + &bbram_ctrl_ops, + XLNX_BBRAM_ERR_DEBUG, + R_MAX * 4); + + sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_irq(sbd, &s->irq_bbram); +} + +static void bbram_prop_set_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + + qdev_prop_drive.set(obj, v, name, opaque, errp); + + /* Fill initial data if backend is attached after realized */ + if (dev->realized) { + bbram_bdrv_read(XLNX_BBRAM(obj), errp); + } +} + +static void bbram_prop_get_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + qdev_prop_drive.get(obj, v, name, opaque, errp); +} + +static void bbram_prop_release_drive(Object *obj, const char *name, + void *opaque) +{ + qdev_prop_drive.release(obj, name, opaque); +} + +static const PropertyInfo bbram_prop_drive = { + .name = "str", + .description = "Node name or ID of a block device to use as BBRAM backend", + .realized_set_allowed = true, + .get = bbram_prop_get_drive, + .set = bbram_prop_set_drive, + .release = bbram_prop_release_drive, +}; + +static const VMStateDescription vmstate_bbram_ctrl = { + .name = TYPE_XLNX_BBRAM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxBBRam, R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property bbram_ctrl_props[] = { + DEFINE_PROP("drive", XlnxBBRam, blk, bbram_prop_drive, BlockBackend *), + DEFINE_PROP_UINT32("crc-zpads", XlnxBBRam, crc_zpads, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void bbram_ctrl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bbram_ctrl_reset; + dc->realize = bbram_ctrl_realize; + dc->vmsd = &vmstate_bbram_ctrl; + device_class_set_props(dc, bbram_ctrl_props); +} + +static const TypeInfo bbram_ctrl_info = { + .name = TYPE_XLNX_BBRAM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxBBRam), + .class_init = bbram_ctrl_class_init, + .instance_init = bbram_ctrl_init, +}; + +static void bbram_ctrl_register_types(void) +{ + type_register_static(&bbram_ctrl_info); +} + +type_init(bbram_ctrl_register_types) diff --git a/hw/nvram/xlnx-efuse-crc.c b/hw/nvram/xlnx-efuse-crc.c new file mode 100644 index 000000000..5a5cc13f3 --- /dev/null +++ b/hw/nvram/xlnx-efuse-crc.c @@ -0,0 +1,119 @@ +/* + * Xilinx eFuse/bbram CRC calculator + * + * Copyright (c) 2021 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-efuse.h" + +static uint32_t xlnx_efuse_u37_crc(uint32_t prev_crc, uint32_t data, + uint32_t addr) +{ + /* A table for 7-bit slicing */ + static const uint32_t crc_tab[128] = { + 0x00000000, 0xe13b70f7, 0xc79a971f, 0x26a1e7e8, + 0x8ad958cf, 0x6be22838, 0x4d43cfd0, 0xac78bf27, + 0x105ec76f, 0xf165b798, 0xd7c45070, 0x36ff2087, + 0x9a879fa0, 0x7bbcef57, 0x5d1d08bf, 0xbc267848, + 0x20bd8ede, 0xc186fe29, 0xe72719c1, 0x061c6936, + 0xaa64d611, 0x4b5fa6e6, 0x6dfe410e, 0x8cc531f9, + 0x30e349b1, 0xd1d83946, 0xf779deae, 0x1642ae59, + 0xba3a117e, 0x5b016189, 0x7da08661, 0x9c9bf696, + 0x417b1dbc, 0xa0406d4b, 0x86e18aa3, 0x67dafa54, + 0xcba24573, 0x2a993584, 0x0c38d26c, 0xed03a29b, + 0x5125dad3, 0xb01eaa24, 0x96bf4dcc, 0x77843d3b, + 0xdbfc821c, 0x3ac7f2eb, 0x1c661503, 0xfd5d65f4, + 0x61c69362, 0x80fde395, 0xa65c047d, 0x4767748a, + 0xeb1fcbad, 0x0a24bb5a, 0x2c855cb2, 0xcdbe2c45, + 0x7198540d, 0x90a324fa, 0xb602c312, 0x5739b3e5, + 0xfb410cc2, 0x1a7a7c35, 0x3cdb9bdd, 0xdde0eb2a, + 0x82f63b78, 0x63cd4b8f, 0x456cac67, 0xa457dc90, + 0x082f63b7, 0xe9141340, 0xcfb5f4a8, 0x2e8e845f, + 0x92a8fc17, 0x73938ce0, 0x55326b08, 0xb4091bff, + 0x1871a4d8, 0xf94ad42f, 0xdfeb33c7, 0x3ed04330, + 0xa24bb5a6, 0x4370c551, 0x65d122b9, 0x84ea524e, + 0x2892ed69, 0xc9a99d9e, 0xef087a76, 0x0e330a81, + 0xb21572c9, 0x532e023e, 0x758fe5d6, 0x94b49521, + 0x38cc2a06, 0xd9f75af1, 0xff56bd19, 0x1e6dcdee, + 0xc38d26c4, 0x22b65633, 0x0417b1db, 0xe52cc12c, + 0x49547e0b, 0xa86f0efc, 0x8ecee914, 0x6ff599e3, + 0xd3d3e1ab, 0x32e8915c, 0x144976b4, 0xf5720643, + 0x590ab964, 0xb831c993, 0x9e902e7b, 0x7fab5e8c, + 0xe330a81a, 0x020bd8ed, 0x24aa3f05, 0xc5914ff2, + 0x69e9f0d5, 0x88d28022, 0xae7367ca, 0x4f48173d, + 0xf36e6f75, 0x12551f82, 0x34f4f86a, 0xd5cf889d, + 0x79b737ba, 0x988c474d, 0xbe2da0a5, 0x5f16d052 + }; + + /* + * eFuse calculation is shown here: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_utils.c#L1496 + * + * Each u32 word is appended a 5-bit value, for a total of 37 bits; see: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_utils.c#L1356 + */ + uint32_t crc = prev_crc; + const unsigned rshf = 7; + const uint32_t im = (1 << rshf) - 1; + const uint32_t rm = (1 << (32 - rshf)) - 1; + const uint32_t i2 = (1 << 2) - 1; + const uint32_t r2 = (1 << 30) - 1; + + unsigned j; + uint32_t i, r; + uint64_t w; + + w = (uint64_t)(addr) << 32; + w |= data; + + /* Feed 35 bits, in 5 rounds, each a slice of 7 bits */ + for (j = 0; j < 5; j++) { + r = rm & (crc >> rshf); + i = im & (crc ^ w); + crc = crc_tab[i] ^ r; + + w >>= rshf; + } + + /* Feed the remaining 2 bits */ + r = r2 & (crc >> 2); + i = i2 & (crc ^ w); + crc = crc_tab[i << (rshf - 2)] ^ r; + + return crc; +} + +uint32_t xlnx_efuse_calc_crc(const uint32_t *data, unsigned u32_cnt, + unsigned zpads) +{ + uint32_t crc = 0; + unsigned index; + + for (index = zpads; index; index--) { + crc = xlnx_efuse_u37_crc(crc, 0, (index + u32_cnt)); + } + + for (index = u32_cnt; index; index--) { + crc = xlnx_efuse_u37_crc(crc, data[index - 1], index); + } + + return crc; +} diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c new file mode 100644 index 000000000..a0fd77b58 --- /dev/null +++ b/hw/nvram/xlnx-efuse.c @@ -0,0 +1,283 @@ +/* + * QEMU model of the EFUSE eFuse + * + * Copyright (c) 2015 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-efuse.h" + +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "sysemu/blockdev.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" + +#define TBIT0_OFFSET 28 +#define TBIT1_OFFSET 29 +#define TBIT2_OFFSET 30 +#define TBIT3_OFFSET 31 +#define TBITS_PATTERN (0x0AU << TBIT0_OFFSET) +#define TBITS_MASK (0x0FU << TBIT0_OFFSET) + +bool xlnx_efuse_get_bit(XlnxEFuse *s, unsigned int bit) +{ + bool b = s->fuse32[bit / 32] & (1 << (bit % 32)); + return b; +} + +static int efuse_bytes(XlnxEFuse *s) +{ + return ROUND_UP((s->efuse_nr * s->efuse_size) / 8, 4); +} + +static int efuse_bdrv_read(XlnxEFuse *s, Error **errp) +{ + uint32_t *ram = s->fuse32; + int nr = efuse_bytes(s); + + if (!s->blk) { + return 0; + } + + s->blk_ro = !blk_supports_write_perm(s->blk); + if (!s->blk_ro) { + int rc; + + rc = blk_set_perm(s->blk, + (BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE), + BLK_PERM_ALL, NULL); + if (rc) { + s->blk_ro = true; + } + } + if (s->blk_ro) { + warn_report("%s: Skip saving updates to read-only eFUSE backstore.", + blk_name(s->blk)); + } + + if (blk_pread(s->blk, 0, ram, nr) < 0) { + error_setg(errp, "%s: Failed to read %u bytes from eFUSE backstore.", + blk_name(s->blk), nr); + return -1; + } + + /* Convert from little-endian backstore for each 32-bit row */ + nr /= 4; + while (nr--) { + ram[nr] = le32_to_cpu(ram[nr]); + } + + return 0; +} + +static void efuse_bdrv_sync(XlnxEFuse *s, unsigned int bit) +{ + unsigned int row_offset; + uint32_t le32; + + if (!s->blk || s->blk_ro) { + return; /* Silent on read-only backend to avoid message flood */ + } + + /* Backstore is always in little-endian */ + le32 = cpu_to_le32(xlnx_efuse_get_row(s, bit)); + + row_offset = (bit / 32) * 4; + if (blk_pwrite(s->blk, row_offset, &le32, 4, 0) < 0) { + error_report("%s: Failed to write offset %u of eFUSE backstore.", + blk_name(s->blk), row_offset); + } +} + +static int efuse_ro_bits_cmp(const void *a, const void *b) +{ + uint32_t i = *(const uint32_t *)a; + uint32_t j = *(const uint32_t *)b; + + return (i > j) - (i < j); +} + +static void efuse_ro_bits_sort(XlnxEFuse *s) +{ + uint32_t *ary = s->ro_bits; + const uint32_t cnt = s->ro_bits_cnt; + + if (ary && cnt > 1) { + qsort(ary, cnt, sizeof(ary[0]), efuse_ro_bits_cmp); + } +} + +static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k) +{ + const uint32_t *ary = s->ro_bits; + const uint32_t cnt = s->ro_bits_cnt; + + if (!ary || !cnt) { + return false; + } + + return bsearch(&k, ary, cnt, sizeof(ary[0]), efuse_ro_bits_cmp) != NULL; +} + +bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) +{ + if (efuse_ro_bits_find(s, bit)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: WARN: " + "Ignored setting of readonly efuse bit<%u,%u>!\n", + path, (bit / 32), (bit % 32)); + return false; + } + + s->fuse32[bit / 32] |= 1 << (bit % 32); + efuse_bdrv_sync(s, bit); + return true; +} + +bool xlnx_efuse_k256_check(XlnxEFuse *s, uint32_t crc, unsigned start) +{ + uint32_t calc; + + /* A key always occupies multiple of whole rows */ + assert((start % 32) == 0); + + calc = xlnx_efuse_calc_crc(&s->fuse32[start / 32], (256 / 32), 0); + return calc == crc; +} + +uint32_t xlnx_efuse_tbits_check(XlnxEFuse *s) +{ + int nr; + uint32_t check = 0; + + for (nr = s->efuse_nr; nr-- > 0; ) { + int efuse_start_row_num = (s->efuse_size * nr) / 32; + uint32_t data = s->fuse32[efuse_start_row_num]; + + /* + * If the option is on, auto-init blank T-bits. + * (non-blank will still be reported as '0' in the check, e.g., + * for error-injection tests) + */ + if ((data & TBITS_MASK) == 0 && s->init_tbits) { + data |= TBITS_PATTERN; + + s->fuse32[efuse_start_row_num] = data; + efuse_bdrv_sync(s, (efuse_start_row_num * 32 + TBIT0_OFFSET)); + } + + check = (check << 1) | ((data & TBITS_MASK) == TBITS_PATTERN); + } + + return check; +} + +static void efuse_realize(DeviceState *dev, Error **errp) +{ + XlnxEFuse *s = XLNX_EFUSE(dev); + + /* Sort readonly-list for bsearch lookup */ + efuse_ro_bits_sort(s); + + if ((s->efuse_size % 32) != 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, + "%s.efuse-size: %u: property value not multiple of 32.", + path, s->efuse_size); + return; + } + + s->fuse32 = g_malloc0(efuse_bytes(s)); + if (efuse_bdrv_read(s, errp)) { + g_free(s->fuse32); + } +} + +static void efuse_prop_set_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + + qdev_prop_drive.set(obj, v, name, opaque, errp); + + /* Fill initial data if backend is attached after realized */ + if (dev->realized) { + efuse_bdrv_read(XLNX_EFUSE(obj), errp); + } +} + +static void efuse_prop_get_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + qdev_prop_drive.get(obj, v, name, opaque, errp); +} + +static void efuse_prop_release_drive(Object *obj, const char *name, + void *opaque) +{ + qdev_prop_drive.release(obj, name, opaque); +} + +static const PropertyInfo efuse_prop_drive = { + .name = "str", + .description = "Node name or ID of a block device to use as eFUSE backend", + .realized_set_allowed = true, + .get = efuse_prop_get_drive, + .set = efuse_prop_set_drive, + .release = efuse_prop_release_drive, +}; + +static Property efuse_properties[] = { + DEFINE_PROP("drive", XlnxEFuse, blk, efuse_prop_drive, BlockBackend *), + DEFINE_PROP_UINT8("efuse-nr", XlnxEFuse, efuse_nr, 3), + DEFINE_PROP_UINT32("efuse-size", XlnxEFuse, efuse_size, 64 * 32), + DEFINE_PROP_BOOL("init-factory-tbits", XlnxEFuse, init_tbits, true), + DEFINE_PROP_ARRAY("read-only", XlnxEFuse, ro_bits_cnt, ro_bits, + qdev_prop_uint32, uint32_t), + DEFINE_PROP_END_OF_LIST(), +}; + +static void efuse_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = efuse_realize; + device_class_set_props(dc, efuse_properties); +} + +static const TypeInfo efuse_info = { + .name = TYPE_XLNX_EFUSE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XlnxEFuse), + .class_init = efuse_class_init, +}; + +static void efuse_register_types(void) +{ + type_register_static(&efuse_info); +} +type_init(efuse_register_types) diff --git a/hw/nvram/xlnx-versal-efuse-cache.c b/hw/nvram/xlnx-versal-efuse-cache.c new file mode 100644 index 000000000..eaec64d78 --- /dev/null +++ b/hw/nvram/xlnx-versal-efuse-cache.c @@ -0,0 +1,114 @@ +/* + * QEMU model of the EFuse_Cache + * + * Copyright (c) 2017 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-versal-efuse.h" + +#include "qemu/log.h" +#include "hw/qdev-properties.h" + +#define MR_SIZE 0xC00 + +static uint64_t efuse_cache_read(void *opaque, hwaddr addr, unsigned size) +{ + XlnxVersalEFuseCache *s = XLNX_VERSAL_EFUSE_CACHE(opaque); + unsigned int w0 = QEMU_ALIGN_DOWN(addr * 8, 32); + unsigned int w1 = QEMU_ALIGN_DOWN((addr + size - 1) * 8, 32); + + uint64_t ret; + + assert(w0 == w1 || (w0 + 32) == w1); + + ret = xlnx_versal_efuse_read_row(s->efuse, w1, NULL); + if (w0 < w1) { + ret <<= 32; + ret |= xlnx_versal_efuse_read_row(s->efuse, w0, NULL); + } + + /* If 'addr' unaligned, the guest is always assumed to be little-endian. */ + addr &= 3; + if (addr) { + ret >>= 8 * addr; + } + + return ret; +} + +static void efuse_cache_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + /* No Register Writes allowed */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: efuse cache registers are read-only", + __func__); +} + +static const MemoryRegionOps efuse_cache_ops = { + .read = efuse_cache_read, + .write = efuse_cache_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void efuse_cache_init(Object *obj) +{ + XlnxVersalEFuseCache *s = XLNX_VERSAL_EFUSE_CACHE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &efuse_cache_ops, s, + TYPE_XLNX_VERSAL_EFUSE_CACHE, MR_SIZE); + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property efuse_cache_props[] = { + DEFINE_PROP_LINK("efuse", + XlnxVersalEFuseCache, efuse, + TYPE_XLNX_EFUSE, XlnxEFuse *), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void efuse_cache_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, efuse_cache_props); +} + +static const TypeInfo efuse_cache_info = { + .name = TYPE_XLNX_VERSAL_EFUSE_CACHE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalEFuseCache), + .class_init = efuse_cache_class_init, + .instance_init = efuse_cache_init, +}; + +static void efuse_cache_register_types(void) +{ + type_register_static(&efuse_cache_info); +} + +type_init(efuse_cache_register_types) diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c new file mode 100644 index 000000000..b35ba65ab --- /dev/null +++ b/hw/nvram/xlnx-versal-efuse-ctrl.c @@ -0,0 +1,793 @@ +/* + * QEMU model of the Versal eFuse controller + * + * Copyright (c) 2020 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-versal-efuse.h" + +#include "qemu/log.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" + +#ifndef XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG +#define XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG 0 +#endif + +REG32(WR_LOCK, 0x0) + FIELD(WR_LOCK, LOCK, 0, 16) +REG32(CFG, 0x4) + FIELD(CFG, SLVERR_ENABLE, 5, 1) + FIELD(CFG, MARGIN_RD, 2, 1) + FIELD(CFG, PGM_EN, 1, 1) +REG32(STATUS, 0x8) + FIELD(STATUS, AES_USER_KEY_1_CRC_PASS, 11, 1) + FIELD(STATUS, AES_USER_KEY_1_CRC_DONE, 10, 1) + FIELD(STATUS, AES_USER_KEY_0_CRC_PASS, 9, 1) + FIELD(STATUS, AES_USER_KEY_0_CRC_DONE, 8, 1) + FIELD(STATUS, AES_CRC_PASS, 7, 1) + FIELD(STATUS, AES_CRC_DONE, 6, 1) + FIELD(STATUS, CACHE_DONE, 5, 1) + FIELD(STATUS, CACHE_LOAD, 4, 1) + FIELD(STATUS, EFUSE_2_TBIT, 2, 1) + FIELD(STATUS, EFUSE_1_TBIT, 1, 1) + FIELD(STATUS, EFUSE_0_TBIT, 0, 1) +REG32(EFUSE_PGM_ADDR, 0xc) + FIELD(EFUSE_PGM_ADDR, PAGE, 13, 4) + FIELD(EFUSE_PGM_ADDR, ROW, 5, 8) + FIELD(EFUSE_PGM_ADDR, COLUMN, 0, 5) +REG32(EFUSE_RD_ADDR, 0x10) + FIELD(EFUSE_RD_ADDR, PAGE, 13, 4) + FIELD(EFUSE_RD_ADDR, ROW, 5, 8) +REG32(EFUSE_RD_DATA, 0x14) +REG32(TPGM, 0x18) + FIELD(TPGM, VALUE, 0, 16) +REG32(TRD, 0x1c) + FIELD(TRD, VALUE, 0, 8) +REG32(TSU_H_PS, 0x20) + FIELD(TSU_H_PS, VALUE, 0, 8) +REG32(TSU_H_PS_CS, 0x24) + FIELD(TSU_H_PS_CS, VALUE, 0, 8) +REG32(TRDM, 0x28) + FIELD(TRDM, VALUE, 0, 8) +REG32(TSU_H_CS, 0x2c) + FIELD(TSU_H_CS, VALUE, 0, 8) +REG32(EFUSE_ISR, 0x30) + FIELD(EFUSE_ISR, APB_SLVERR, 31, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_ISR, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_ISR, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_ISR, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_ISR, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_ISR, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_ISR, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_ISR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_ISR, RD_ERROR, 3, 1) + FIELD(EFUSE_ISR, RD_DONE, 2, 1) + FIELD(EFUSE_ISR, PGM_ERROR, 1, 1) + FIELD(EFUSE_ISR, PGM_DONE, 0, 1) +REG32(EFUSE_IMR, 0x34) + FIELD(EFUSE_IMR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_IMR, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_IMR, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_IMR, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_IMR, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_IMR, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_IMR, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_IMR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IMR, RD_ERROR, 3, 1) + FIELD(EFUSE_IMR, RD_DONE, 2, 1) + FIELD(EFUSE_IMR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IMR, PGM_DONE, 0, 1) +REG32(EFUSE_IER, 0x38) + FIELD(EFUSE_IER, APB_SLVERR, 31, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_IER, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_IER, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_IER, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_IER, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_IER, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_IER, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_IER, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IER, RD_ERROR, 3, 1) + FIELD(EFUSE_IER, RD_DONE, 2, 1) + FIELD(EFUSE_IER, PGM_ERROR, 1, 1) + FIELD(EFUSE_IER, PGM_DONE, 0, 1) +REG32(EFUSE_IDR, 0x3c) + FIELD(EFUSE_IDR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_IDR, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_IDR, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_IDR, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_IDR, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_IDR, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_IDR, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_IDR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IDR, RD_ERROR, 3, 1) + FIELD(EFUSE_IDR, RD_DONE, 2, 1) + FIELD(EFUSE_IDR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IDR, PGM_DONE, 0, 1) +REG32(EFUSE_CACHE_LOAD, 0x40) + FIELD(EFUSE_CACHE_LOAD, LOAD, 0, 1) +REG32(EFUSE_PGM_LOCK, 0x44) + FIELD(EFUSE_PGM_LOCK, SPK_ID_LOCK, 0, 1) +REG32(EFUSE_AES_CRC, 0x48) +REG32(EFUSE_AES_USR_KEY0_CRC, 0x4c) +REG32(EFUSE_AES_USR_KEY1_CRC, 0x50) +REG32(EFUSE_PD, 0x54) +REG32(EFUSE_ANLG_OSC_SW_1LP, 0x60) +REG32(EFUSE_TEST_CTRL, 0x100) + +#define R_MAX (R_EFUSE_TEST_CTRL + 1) + +#define R_WR_LOCK_UNLOCK_PASSCODE (0xDF0D) + +/* + * eFuse layout references: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilnvm/src/xnvm_efuse_hw.h + */ +#define BIT_POS_OF(A_) \ + ((uint32_t)((A_) & (R_EFUSE_PGM_ADDR_ROW_MASK | \ + R_EFUSE_PGM_ADDR_COLUMN_MASK))) + +#define BIT_POS(R_, C_) \ + ((uint32_t)((R_EFUSE_PGM_ADDR_ROW_MASK \ + & ((R_) << R_EFUSE_PGM_ADDR_ROW_SHIFT)) \ + | \ + (R_EFUSE_PGM_ADDR_COLUMN_MASK \ + & ((C_) << R_EFUSE_PGM_ADDR_COLUMN_SHIFT)))) + +#define EFUSE_TBIT_POS(A_) (BIT_POS_OF(A_) >= BIT_POS(0, 28)) + +#define EFUSE_ANCHOR_ROW (0) +#define EFUSE_ANCHOR_3_COL (27) +#define EFUSE_ANCHOR_1_COL (1) + +#define EFUSE_AES_KEY_START BIT_POS(12, 0) +#define EFUSE_AES_KEY_END BIT_POS(19, 31) +#define EFUSE_USER_KEY_0_START BIT_POS(20, 0) +#define EFUSE_USER_KEY_0_END BIT_POS(27, 31) +#define EFUSE_USER_KEY_1_START BIT_POS(28, 0) +#define EFUSE_USER_KEY_1_END BIT_POS(35, 31) + +#define EFUSE_RD_BLOCKED_START EFUSE_AES_KEY_START +#define EFUSE_RD_BLOCKED_END EFUSE_USER_KEY_1_END + +#define EFUSE_GLITCH_DET_WR_LK BIT_POS(4, 31) +#define EFUSE_PPK0_WR_LK BIT_POS(43, 6) +#define EFUSE_PPK1_WR_LK BIT_POS(43, 7) +#define EFUSE_PPK2_WR_LK BIT_POS(43, 8) +#define EFUSE_AES_WR_LK BIT_POS(43, 11) +#define EFUSE_USER_KEY_0_WR_LK BIT_POS(43, 13) +#define EFUSE_USER_KEY_1_WR_LK BIT_POS(43, 15) +#define EFUSE_PUF_SYN_LK BIT_POS(43, 16) +#define EFUSE_DNA_WR_LK BIT_POS(43, 27) +#define EFUSE_BOOT_ENV_WR_LK BIT_POS(43, 28) + +#define EFUSE_PGM_LOCKED_START BIT_POS(44, 0) +#define EFUSE_PGM_LOCKED_END BIT_POS(51, 31) + +#define EFUSE_PUF_PAGE (2) +#define EFUSE_PUF_SYN_START BIT_POS(129, 0) +#define EFUSE_PUF_SYN_END BIT_POS(255, 27) + +#define EFUSE_KEY_CRC_LK_ROW (43) +#define EFUSE_AES_KEY_CRC_LK_MASK ((1U << 9) | (1U << 10)) +#define EFUSE_USER_KEY_0_CRC_LK_MASK (1U << 12) +#define EFUSE_USER_KEY_1_CRC_LK_MASK (1U << 14) + +/* + * A handy macro to return value of an array element, + * or a specific default if given index is out of bound. + */ +#define ARRAY_GET(A_, I_, D_) \ + ((unsigned int)(I_) < ARRAY_SIZE(A_) ? (A_)[I_] : (D_)) + +QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxVersalEFuseCtrl *)0)->regs)); + +typedef struct XlnxEFuseLkSpec { + uint16_t row; + uint16_t lk_bit; +} XlnxEFuseLkSpec; + +static void efuse_imr_update_irq(XlnxVersalEFuseCtrl *s) +{ + bool pending = s->regs[R_EFUSE_ISR] & ~s->regs[R_EFUSE_IMR]; + qemu_set_irq(s->irq_efuse_imr, pending); +} + +static void efuse_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + efuse_imr_update_irq(s); +} + +static uint64_t efuse_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] &= ~val; + efuse_imr_update_irq(s); + return 0; +} + +static uint64_t efuse_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] |= val; + efuse_imr_update_irq(s); + return 0; +} + +static void efuse_status_tbits_sync(XlnxVersalEFuseCtrl *s) +{ + uint32_t check = xlnx_efuse_tbits_check(s->efuse); + uint32_t val = s->regs[R_STATUS]; + + val = FIELD_DP32(val, STATUS, EFUSE_0_TBIT, !!(check & (1 << 0))); + val = FIELD_DP32(val, STATUS, EFUSE_1_TBIT, !!(check & (1 << 1))); + val = FIELD_DP32(val, STATUS, EFUSE_2_TBIT, !!(check & (1 << 2))); + + s->regs[R_STATUS] = val; +} + +static void efuse_anchor_bits_check(XlnxVersalEFuseCtrl *s) +{ + unsigned page; + + if (!s->efuse || !s->efuse->init_tbits) { + return; + } + + for (page = 0; page < s->efuse->efuse_nr; page++) { + uint32_t row = 0, bit; + + row = FIELD_DP32(row, EFUSE_PGM_ADDR, PAGE, page); + row = FIELD_DP32(row, EFUSE_PGM_ADDR, ROW, EFUSE_ANCHOR_ROW); + + bit = FIELD_DP32(row, EFUSE_PGM_ADDR, COLUMN, EFUSE_ANCHOR_3_COL); + if (!xlnx_efuse_get_bit(s->efuse, bit)) { + xlnx_efuse_set_bit(s->efuse, bit); + } + + bit = FIELD_DP32(row, EFUSE_PGM_ADDR, COLUMN, EFUSE_ANCHOR_1_COL); + if (!xlnx_efuse_get_bit(s->efuse, bit)) { + xlnx_efuse_set_bit(s->efuse, bit); + } + } +} + +static void efuse_key_crc_check(RegisterInfo *reg, uint32_t crc, + uint32_t pass_mask, uint32_t done_mask, + unsigned first, uint32_t lk_mask) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + uint32_t r, lk_bits; + + /* + * To start, assume both DONE and PASS, and clear PASS by xor + * if CRC-check fails or CRC-check disabled by lock fuse. + */ + r = s->regs[R_STATUS] | done_mask | pass_mask; + + lk_bits = xlnx_efuse_get_row(s->efuse, EFUSE_KEY_CRC_LK_ROW) & lk_mask; + if (lk_bits == 0 && xlnx_efuse_k256_check(s->efuse, crc, first)) { + pass_mask = 0; + } + + s->regs[R_STATUS] = r ^ pass_mask; +} + +static void efuse_data_sync(XlnxVersalEFuseCtrl *s) +{ + efuse_status_tbits_sync(s); +} + +static int efuse_lk_spec_cmp(const void *a, const void *b) +{ + uint16_t r1 = ((const XlnxEFuseLkSpec *)a)->row; + uint16_t r2 = ((const XlnxEFuseLkSpec *)b)->row; + + return (r1 > r2) - (r1 < r2); +} + +static void efuse_lk_spec_sort(XlnxVersalEFuseCtrl *s) +{ + XlnxEFuseLkSpec *ary = s->extra_pg0_lock_spec; + const uint32_t n8 = s->extra_pg0_lock_n16 * 2; + const uint32_t sz = sizeof(ary[0]); + const uint32_t cnt = n8 / sz; + + if (ary && cnt) { + qsort(ary, cnt, sz, efuse_lk_spec_cmp); + } +} + +static uint32_t efuse_lk_spec_find(XlnxVersalEFuseCtrl *s, uint32_t row) +{ + const XlnxEFuseLkSpec *ary = s->extra_pg0_lock_spec; + const uint32_t n8 = s->extra_pg0_lock_n16 * 2; + const uint32_t sz = sizeof(ary[0]); + const uint32_t cnt = n8 / sz; + const XlnxEFuseLkSpec *item = NULL; + + if (ary && cnt) { + XlnxEFuseLkSpec k = { .row = row, }; + + item = bsearch(&k, ary, cnt, sz, efuse_lk_spec_cmp); + } + + return item ? item->lk_bit : 0; +} + +static uint32_t efuse_bit_locked(XlnxVersalEFuseCtrl *s, uint32_t bit) +{ + /* Hard-coded locks */ + static const uint16_t pg0_hard_lock[] = { + [4] = EFUSE_GLITCH_DET_WR_LK, + [37] = EFUSE_BOOT_ENV_WR_LK, + + [8 ... 11] = EFUSE_DNA_WR_LK, + [12 ... 19] = EFUSE_AES_WR_LK, + [20 ... 27] = EFUSE_USER_KEY_0_WR_LK, + [28 ... 35] = EFUSE_USER_KEY_1_WR_LK, + [64 ... 71] = EFUSE_PPK0_WR_LK, + [72 ... 79] = EFUSE_PPK1_WR_LK, + [80 ... 87] = EFUSE_PPK2_WR_LK, + }; + + uint32_t row = FIELD_EX32(bit, EFUSE_PGM_ADDR, ROW); + uint32_t lk_bit = ARRAY_GET(pg0_hard_lock, row, 0); + + return lk_bit ? lk_bit : efuse_lk_spec_find(s, row); +} + +static bool efuse_pgm_locked(XlnxVersalEFuseCtrl *s, unsigned int bit) +{ + + unsigned int lock = 1; + + /* Global lock */ + if (!ARRAY_FIELD_EX32(s->regs, CFG, PGM_EN)) { + goto ret_lock; + } + + /* Row lock */ + switch (FIELD_EX32(bit, EFUSE_PGM_ADDR, PAGE)) { + case 0: + if (ARRAY_FIELD_EX32(s->regs, EFUSE_PGM_LOCK, SPK_ID_LOCK) && + bit >= EFUSE_PGM_LOCKED_START && bit <= EFUSE_PGM_LOCKED_END) { + goto ret_lock; + } + + lock = efuse_bit_locked(s, bit); + break; + case EFUSE_PUF_PAGE: + if (bit < EFUSE_PUF_SYN_START || bit > EFUSE_PUF_SYN_END) { + lock = 0; + goto ret_lock; + } + + lock = EFUSE_PUF_SYN_LK; + break; + default: + lock = 0; + goto ret_lock; + } + + /* Row lock by an efuse bit */ + if (lock) { + lock = xlnx_efuse_get_bit(s->efuse, lock); + } + + ret_lock: + return lock != 0; +} + +static void efuse_pgm_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + unsigned bit = val64; + bool ok = false; + + /* Always zero out PGM_ADDR because it is write-only */ + s->regs[R_EFUSE_PGM_ADDR] = 0; + + /* + * Indicate error if bit is write-protected (or read-only + * as guarded by efuse_set_bit()). + * + * Keep it simple by not modeling program timing. + * + * Note: model must NEVER clear the PGM_ERROR bit; it is + * up to guest to do so (or by reset). + */ + if (efuse_pgm_locked(s, bit)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Denied setting of efuse<%u, %u, %u>\n", + path, + FIELD_EX32(bit, EFUSE_PGM_ADDR, PAGE), + FIELD_EX32(bit, EFUSE_PGM_ADDR, ROW), + FIELD_EX32(bit, EFUSE_PGM_ADDR, COLUMN)); + } else if (xlnx_efuse_set_bit(s->efuse, bit)) { + ok = true; + if (EFUSE_TBIT_POS(bit)) { + efuse_status_tbits_sync(s); + } + } + + if (!ok) { + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 1); + } + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_DONE, 1); + efuse_imr_update_irq(s); +} + +static void efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + unsigned bit = val64; + bool denied; + + /* Always zero out RD_ADDR because it is write-only */ + s->regs[R_EFUSE_RD_ADDR] = 0; + + /* + * Indicate error if row is read-blocked. + * + * Note: model must NEVER clear the RD_ERROR bit; it is + * up to guest to do so (or by reset). + */ + s->regs[R_EFUSE_RD_DATA] = xlnx_versal_efuse_read_row(s->efuse, + bit, &denied); + if (denied) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Denied reading of efuse<%u, %u>\n", + path, + FIELD_EX32(bit, EFUSE_RD_ADDR, PAGE), + FIELD_EX32(bit, EFUSE_RD_ADDR, ROW)); + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 1); + } + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1); + efuse_imr_update_irq(s); + return; +} + +static uint64_t efuse_cache_load_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + + if (val64 & R_EFUSE_CACHE_LOAD_LOAD_MASK) { + efuse_data_sync(s); + + ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1); + efuse_imr_update_irq(s); + } + + return 0; +} + +static uint64_t efuse_pgm_lock_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + + /* Ignore all other bits */ + val64 = FIELD_EX32(val64, EFUSE_PGM_LOCK, SPK_ID_LOCK); + + /* Once the bit is written 1, only reset will clear it to 0 */ + val64 |= ARRAY_FIELD_EX32(s->regs, EFUSE_PGM_LOCK, SPK_ID_LOCK); + + return val64; +} + +static void efuse_aes_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + efuse_key_crc_check(reg, val64, + R_STATUS_AES_CRC_PASS_MASK, + R_STATUS_AES_CRC_DONE_MASK, + EFUSE_AES_KEY_START, + EFUSE_AES_KEY_CRC_LK_MASK); +} + +static void efuse_aes_u0_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + efuse_key_crc_check(reg, val64, + R_STATUS_AES_USER_KEY_0_CRC_PASS_MASK, + R_STATUS_AES_USER_KEY_0_CRC_DONE_MASK, + EFUSE_USER_KEY_0_START, + EFUSE_USER_KEY_0_CRC_LK_MASK); +} + +static void efuse_aes_u1_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + efuse_key_crc_check(reg, val64, + R_STATUS_AES_USER_KEY_1_CRC_PASS_MASK, + R_STATUS_AES_USER_KEY_1_CRC_DONE_MASK, + EFUSE_USER_KEY_1_START, + EFUSE_USER_KEY_1_CRC_LK_MASK); +} + +static uint64_t efuse_wr_lock_prew(RegisterInfo *reg, uint64_t val) +{ + return val != R_WR_LOCK_UNLOCK_PASSCODE; +} + +static const RegisterAccessInfo efuse_ctrl_regs_info[] = { + { .name = "WR_LOCK", .addr = A_WR_LOCK, + .reset = 0x1, + .pre_write = efuse_wr_lock_prew, + },{ .name = "CFG", .addr = A_CFG, + .rsvd = 0x9, + },{ .name = "STATUS", .addr = A_STATUS, + .rsvd = 0x8, + .ro = 0xfff, + },{ .name = "EFUSE_PGM_ADDR", .addr = A_EFUSE_PGM_ADDR, + .post_write = efuse_pgm_addr_postw, + },{ .name = "EFUSE_RD_ADDR", .addr = A_EFUSE_RD_ADDR, + .rsvd = 0x1f, + .post_write = efuse_rd_addr_postw, + },{ .name = "EFUSE_RD_DATA", .addr = A_EFUSE_RD_DATA, + .ro = 0xffffffff, + },{ .name = "TPGM", .addr = A_TPGM, + },{ .name = "TRD", .addr = A_TRD, + .reset = 0x19, + },{ .name = "TSU_H_PS", .addr = A_TSU_H_PS, + .reset = 0xff, + },{ .name = "TSU_H_PS_CS", .addr = A_TSU_H_PS_CS, + .reset = 0x11, + },{ .name = "TRDM", .addr = A_TRDM, + .reset = 0x3a, + },{ .name = "TSU_H_CS", .addr = A_TSU_H_CS, + .reset = 0x16, + },{ .name = "EFUSE_ISR", .addr = A_EFUSE_ISR, + .rsvd = 0x7fff8000, + .w1c = 0x80007fff, + .post_write = efuse_isr_postw, + },{ .name = "EFUSE_IMR", .addr = A_EFUSE_IMR, + .reset = 0x80007fff, + .rsvd = 0x7fff8000, + .ro = 0xffffffff, + },{ .name = "EFUSE_IER", .addr = A_EFUSE_IER, + .rsvd = 0x7fff8000, + .pre_write = efuse_ier_prew, + },{ .name = "EFUSE_IDR", .addr = A_EFUSE_IDR, + .rsvd = 0x7fff8000, + .pre_write = efuse_idr_prew, + },{ .name = "EFUSE_CACHE_LOAD", .addr = A_EFUSE_CACHE_LOAD, + .pre_write = efuse_cache_load_prew, + },{ .name = "EFUSE_PGM_LOCK", .addr = A_EFUSE_PGM_LOCK, + .pre_write = efuse_pgm_lock_prew, + },{ .name = "EFUSE_AES_CRC", .addr = A_EFUSE_AES_CRC, + .post_write = efuse_aes_crc_postw, + },{ .name = "EFUSE_AES_USR_KEY0_CRC", .addr = A_EFUSE_AES_USR_KEY0_CRC, + .post_write = efuse_aes_u0_crc_postw, + },{ .name = "EFUSE_AES_USR_KEY1_CRC", .addr = A_EFUSE_AES_USR_KEY1_CRC, + .post_write = efuse_aes_u1_crc_postw, + },{ .name = "EFUSE_PD", .addr = A_EFUSE_PD, + .ro = 0xfffffffe, + },{ .name = "EFUSE_ANLG_OSC_SW_1LP", .addr = A_EFUSE_ANLG_OSC_SW_1LP, + },{ .name = "EFUSE_TEST_CTRL", .addr = A_EFUSE_TEST_CTRL, + .reset = 0x8, + } +}; + +static void efuse_ctrl_reg_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + RegisterInfoArray *reg_array = opaque; + XlnxVersalEFuseCtrl *s; + Object *dev; + + assert(reg_array != NULL); + + dev = reg_array->mem.owner; + assert(dev); + + s = XLNX_VERSAL_EFUSE_CTRL(dev); + + if (addr != A_WR_LOCK && s->regs[R_WR_LOCK]) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s[reg_0x%02lx]: Attempt to write locked register.\n", + path, (long)addr); + } else { + register_write_memory(opaque, addr, data, size); + } +} + +static void efuse_ctrl_register_reset(RegisterInfo *reg) +{ + if (!reg->data || !reg->access) { + return; + } + + /* Reset must not trigger some registers' writers */ + switch (reg->access->addr) { + case A_EFUSE_AES_CRC: + case A_EFUSE_AES_USR_KEY0_CRC: + case A_EFUSE_AES_USR_KEY1_CRC: + *(uint32_t *)reg->data = reg->access->reset; + return; + } + + register_reset(reg); +} + +static void efuse_ctrl_reset(DeviceState *dev) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + efuse_ctrl_register_reset(&s->regs_info[i]); + } + + efuse_anchor_bits_check(s); + efuse_data_sync(s); + efuse_imr_update_irq(s); +} + +static const MemoryRegionOps efuse_ctrl_ops = { + .read = register_read_memory, + .write = efuse_ctrl_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void efuse_ctrl_realize(DeviceState *dev, Error **errp) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev); + const uint32_t lks_sz = sizeof(XlnxEFuseLkSpec) / 2; + + if (!s->efuse) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, "%s.efuse: link property not connected to XLNX-EFUSE", + path); + return; + } + + /* Sort property-defined pgm-locks for bsearch lookup */ + if ((s->extra_pg0_lock_n16 % lks_sz) != 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, + "%s.pg0-lock: array property item-count not multiple of %u", + path, lks_sz); + return; + } + + efuse_lk_spec_sort(s); +} + +static void efuse_ctrl_init(Object *obj) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + reg_array = + register_init_block32(DEVICE(obj), efuse_ctrl_regs_info, + ARRAY_SIZE(efuse_ctrl_regs_info), + s->regs_info, s->regs, + &efuse_ctrl_ops, + XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG, + R_MAX * 4); + + sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_irq(sbd, &s->irq_efuse_imr); +} + +static const VMStateDescription vmstate_efuse_ctrl = { + .name = TYPE_XLNX_VERSAL_EFUSE_CTRL, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxVersalEFuseCtrl, R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property efuse_ctrl_props[] = { + DEFINE_PROP_LINK("efuse", + XlnxVersalEFuseCtrl, efuse, + TYPE_XLNX_EFUSE, XlnxEFuse *), + DEFINE_PROP_ARRAY("pg0-lock", + XlnxVersalEFuseCtrl, extra_pg0_lock_n16, + extra_pg0_lock_spec, qdev_prop_uint16, uint16_t), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void efuse_ctrl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = efuse_ctrl_reset; + dc->realize = efuse_ctrl_realize; + dc->vmsd = &vmstate_efuse_ctrl; + device_class_set_props(dc, efuse_ctrl_props); +} + +static const TypeInfo efuse_ctrl_info = { + .name = TYPE_XLNX_VERSAL_EFUSE_CTRL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalEFuseCtrl), + .class_init = efuse_ctrl_class_init, + .instance_init = efuse_ctrl_init, +}; + +static void efuse_ctrl_register_types(void) +{ + type_register_static(&efuse_ctrl_info); +} + +type_init(efuse_ctrl_register_types) + +/* + * Retrieve a row, with unreadable bits returned as 0. + */ +uint32_t xlnx_versal_efuse_read_row(XlnxEFuse *efuse, + uint32_t bit, bool *denied) +{ + bool dummy; + + if (!denied) { + denied = &dummy; + } + + if (bit >= EFUSE_RD_BLOCKED_START && bit <= EFUSE_RD_BLOCKED_END) { + *denied = true; + return 0; + } + + *denied = false; + return xlnx_efuse_get_row(efuse, bit); +} diff --git a/hw/nvram/xlnx-zynqmp-efuse.c b/hw/nvram/xlnx-zynqmp-efuse.c new file mode 100644 index 000000000..228ba0bbf --- /dev/null +++ b/hw/nvram/xlnx-zynqmp-efuse.c @@ -0,0 +1,861 @@ +/* + * QEMU model of the ZynqMP eFuse + * + * Copyright (c) 2015 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-zynqmp-efuse.h" + +#include "qemu/log.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" + +#ifndef ZYNQMP_EFUSE_ERR_DEBUG +#define ZYNQMP_EFUSE_ERR_DEBUG 0 +#endif + +REG32(WR_LOCK, 0x0) + FIELD(WR_LOCK, LOCK, 0, 16) +REG32(CFG, 0x4) + FIELD(CFG, SLVERR_ENABLE, 5, 1) + FIELD(CFG, MARGIN_RD, 2, 2) + FIELD(CFG, PGM_EN, 1, 1) + FIELD(CFG, EFUSE_CLK_SEL, 0, 1) +REG32(STATUS, 0x8) + FIELD(STATUS, AES_CRC_PASS, 7, 1) + FIELD(STATUS, AES_CRC_DONE, 6, 1) + FIELD(STATUS, CACHE_DONE, 5, 1) + FIELD(STATUS, CACHE_LOAD, 4, 1) + FIELD(STATUS, EFUSE_3_TBIT, 2, 1) + FIELD(STATUS, EFUSE_2_TBIT, 1, 1) + FIELD(STATUS, EFUSE_0_TBIT, 0, 1) +REG32(EFUSE_PGM_ADDR, 0xc) + FIELD(EFUSE_PGM_ADDR, EFUSE, 11, 2) + FIELD(EFUSE_PGM_ADDR, ROW, 5, 6) + FIELD(EFUSE_PGM_ADDR, COLUMN, 0, 5) +REG32(EFUSE_RD_ADDR, 0x10) + FIELD(EFUSE_RD_ADDR, EFUSE, 11, 2) + FIELD(EFUSE_RD_ADDR, ROW, 5, 6) +REG32(EFUSE_RD_DATA, 0x14) +REG32(TPGM, 0x18) + FIELD(TPGM, VALUE, 0, 16) +REG32(TRD, 0x1c) + FIELD(TRD, VALUE, 0, 8) +REG32(TSU_H_PS, 0x20) + FIELD(TSU_H_PS, VALUE, 0, 8) +REG32(TSU_H_PS_CS, 0x24) + FIELD(TSU_H_PS_CS, VALUE, 0, 8) +REG32(TSU_H_CS, 0x2c) + FIELD(TSU_H_CS, VALUE, 0, 4) +REG32(EFUSE_ISR, 0x30) + FIELD(EFUSE_ISR, APB_SLVERR, 31, 1) + FIELD(EFUSE_ISR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_ISR, RD_ERROR, 3, 1) + FIELD(EFUSE_ISR, RD_DONE, 2, 1) + FIELD(EFUSE_ISR, PGM_ERROR, 1, 1) + FIELD(EFUSE_ISR, PGM_DONE, 0, 1) +REG32(EFUSE_IMR, 0x34) + FIELD(EFUSE_IMR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IMR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IMR, RD_ERROR, 3, 1) + FIELD(EFUSE_IMR, RD_DONE, 2, 1) + FIELD(EFUSE_IMR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IMR, PGM_DONE, 0, 1) +REG32(EFUSE_IER, 0x38) + FIELD(EFUSE_IER, APB_SLVERR, 31, 1) + FIELD(EFUSE_IER, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IER, RD_ERROR, 3, 1) + FIELD(EFUSE_IER, RD_DONE, 2, 1) + FIELD(EFUSE_IER, PGM_ERROR, 1, 1) + FIELD(EFUSE_IER, PGM_DONE, 0, 1) +REG32(EFUSE_IDR, 0x3c) + FIELD(EFUSE_IDR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IDR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IDR, RD_ERROR, 3, 1) + FIELD(EFUSE_IDR, RD_DONE, 2, 1) + FIELD(EFUSE_IDR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IDR, PGM_DONE, 0, 1) +REG32(EFUSE_CACHE_LOAD, 0x40) + FIELD(EFUSE_CACHE_LOAD, LOAD, 0, 1) +REG32(EFUSE_PGM_LOCK, 0x44) + FIELD(EFUSE_PGM_LOCK, SPK_ID_LOCK, 0, 1) +REG32(EFUSE_AES_CRC, 0x48) +REG32(EFUSE_TBITS_PRGRMG_EN, 0x100) + FIELD(EFUSE_TBITS_PRGRMG_EN, TBITS_PRGRMG_EN, 3, 1) +REG32(DNA_0, 0x100c) +REG32(DNA_1, 0x1010) +REG32(DNA_2, 0x1014) +REG32(IPDISABLE, 0x1018) + FIELD(IPDISABLE, VCU_DIS, 8, 1) + FIELD(IPDISABLE, GPU_DIS, 5, 1) + FIELD(IPDISABLE, APU3_DIS, 3, 1) + FIELD(IPDISABLE, APU2_DIS, 2, 1) + FIELD(IPDISABLE, APU1_DIS, 1, 1) + FIELD(IPDISABLE, APU0_DIS, 0, 1) +REG32(SYSOSC_CTRL, 0x101c) + FIELD(SYSOSC_CTRL, SYSOSC_EN, 0, 1) +REG32(USER_0, 0x1020) +REG32(USER_1, 0x1024) +REG32(USER_2, 0x1028) +REG32(USER_3, 0x102c) +REG32(USER_4, 0x1030) +REG32(USER_5, 0x1034) +REG32(USER_6, 0x1038) +REG32(USER_7, 0x103c) +REG32(MISC_USER_CTRL, 0x1040) + FIELD(MISC_USER_CTRL, FPD_SC_EN_0, 14, 1) + FIELD(MISC_USER_CTRL, LPD_SC_EN_0, 11, 1) + FIELD(MISC_USER_CTRL, LBIST_EN, 10, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_7, 7, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_6, 6, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_5, 5, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_4, 4, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_3, 3, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_2, 2, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_1, 1, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_0, 0, 1) +REG32(ROM_RSVD, 0x1044) + FIELD(ROM_RSVD, PBR_BOOT_ERROR, 0, 3) +REG32(PUF_CHASH, 0x1050) +REG32(PUF_MISC, 0x1054) + FIELD(PUF_MISC, REGISTER_DIS, 31, 1) + FIELD(PUF_MISC, SYN_WRLK, 30, 1) + FIELD(PUF_MISC, SYN_INVLD, 29, 1) + FIELD(PUF_MISC, TEST2_DIS, 28, 1) + FIELD(PUF_MISC, UNUSED27, 27, 1) + FIELD(PUF_MISC, UNUSED26, 26, 1) + FIELD(PUF_MISC, UNUSED25, 25, 1) + FIELD(PUF_MISC, UNUSED24, 24, 1) + FIELD(PUF_MISC, AUX, 0, 24) +REG32(SEC_CTRL, 0x1058) + FIELD(SEC_CTRL, PPK1_INVLD, 30, 2) + FIELD(SEC_CTRL, PPK1_WRLK, 29, 1) + FIELD(SEC_CTRL, PPK0_INVLD, 27, 2) + FIELD(SEC_CTRL, PPK0_WRLK, 26, 1) + FIELD(SEC_CTRL, RSA_EN, 11, 15) + FIELD(SEC_CTRL, SEC_LOCK, 10, 1) + FIELD(SEC_CTRL, PROG_GATE_2, 9, 1) + FIELD(SEC_CTRL, PROG_GATE_1, 8, 1) + FIELD(SEC_CTRL, PROG_GATE_0, 7, 1) + FIELD(SEC_CTRL, DFT_DIS, 6, 1) + FIELD(SEC_CTRL, JTAG_DIS, 5, 1) + FIELD(SEC_CTRL, ERROR_DIS, 4, 1) + FIELD(SEC_CTRL, BBRAM_DIS, 3, 1) + FIELD(SEC_CTRL, ENC_ONLY, 2, 1) + FIELD(SEC_CTRL, AES_WRLK, 1, 1) + FIELD(SEC_CTRL, AES_RDLK, 0, 1) +REG32(SPK_ID, 0x105c) +REG32(PPK0_0, 0x10a0) +REG32(PPK0_1, 0x10a4) +REG32(PPK0_2, 0x10a8) +REG32(PPK0_3, 0x10ac) +REG32(PPK0_4, 0x10b0) +REG32(PPK0_5, 0x10b4) +REG32(PPK0_6, 0x10b8) +REG32(PPK0_7, 0x10bc) +REG32(PPK0_8, 0x10c0) +REG32(PPK0_9, 0x10c4) +REG32(PPK0_10, 0x10c8) +REG32(PPK0_11, 0x10cc) +REG32(PPK1_0, 0x10d0) +REG32(PPK1_1, 0x10d4) +REG32(PPK1_2, 0x10d8) +REG32(PPK1_3, 0x10dc) +REG32(PPK1_4, 0x10e0) +REG32(PPK1_5, 0x10e4) +REG32(PPK1_6, 0x10e8) +REG32(PPK1_7, 0x10ec) +REG32(PPK1_8, 0x10f0) +REG32(PPK1_9, 0x10f4) +REG32(PPK1_10, 0x10f8) +REG32(PPK1_11, 0x10fc) + +#define BIT_POS(ROW, COLUMN) (ROW * 32 + COLUMN) +#define R_MAX (R_PPK1_11 + 1) + +/* #define EFUSE_XOSC 26 */ + +/* + * eFUSE layout references: + * ZynqMP: UG1085 (v2.1) August 21, 2019, p.277, Table 12-13 + */ +#define EFUSE_AES_RDLK BIT_POS(22, 0) +#define EFUSE_AES_WRLK BIT_POS(22, 1) +#define EFUSE_ENC_ONLY BIT_POS(22, 2) +#define EFUSE_BBRAM_DIS BIT_POS(22, 3) +#define EFUSE_ERROR_DIS BIT_POS(22, 4) +#define EFUSE_JTAG_DIS BIT_POS(22, 5) +#define EFUSE_DFT_DIS BIT_POS(22, 6) +#define EFUSE_PROG_GATE_0 BIT_POS(22, 7) +#define EFUSE_PROG_GATE_1 BIT_POS(22, 7) +#define EFUSE_PROG_GATE_2 BIT_POS(22, 9) +#define EFUSE_SEC_LOCK BIT_POS(22, 10) +#define EFUSE_RSA_EN BIT_POS(22, 11) +#define EFUSE_RSA_EN14 BIT_POS(22, 25) +#define EFUSE_PPK0_WRLK BIT_POS(22, 26) +#define EFUSE_PPK0_INVLD BIT_POS(22, 27) +#define EFUSE_PPK0_INVLD_1 BIT_POS(22, 28) +#define EFUSE_PPK1_WRLK BIT_POS(22, 29) +#define EFUSE_PPK1_INVLD BIT_POS(22, 30) +#define EFUSE_PPK1_INVLD_1 BIT_POS(22, 31) + +/* Areas. */ +#define EFUSE_TRIM_START BIT_POS(1, 0) +#define EFUSE_TRIM_END BIT_POS(1, 30) +#define EFUSE_DNA_START BIT_POS(3, 0) +#define EFUSE_DNA_END BIT_POS(5, 31) +#define EFUSE_AES_START BIT_POS(24, 0) +#define EFUSE_AES_END BIT_POS(31, 31) +#define EFUSE_ROM_START BIT_POS(17, 0) +#define EFUSE_ROM_END BIT_POS(17, 31) +#define EFUSE_IPDIS_START BIT_POS(6, 0) +#define EFUSE_IPDIS_END BIT_POS(6, 31) +#define EFUSE_USER_START BIT_POS(8, 0) +#define EFUSE_USER_END BIT_POS(15, 31) +#define EFUSE_BISR_START BIT_POS(32, 0) +#define EFUSE_BISR_END BIT_POS(39, 31) + +#define EFUSE_USER_CTRL_START BIT_POS(16, 0) +#define EFUSE_USER_CTRL_END BIT_POS(16, 16) +#define EFUSE_USER_CTRL_MASK ((uint32_t)MAKE_64BIT_MASK(0, 17)) + +#define EFUSE_PUF_CHASH_START BIT_POS(20, 0) +#define EFUSE_PUF_CHASH_END BIT_POS(20, 31) +#define EFUSE_PUF_MISC_START BIT_POS(21, 0) +#define EFUSE_PUF_MISC_END BIT_POS(21, 31) +#define EFUSE_PUF_SYN_WRLK BIT_POS(21, 30) + +#define EFUSE_SPK_START BIT_POS(23, 0) +#define EFUSE_SPK_END BIT_POS(23, 31) + +#define EFUSE_PPK0_START BIT_POS(40, 0) +#define EFUSE_PPK0_END BIT_POS(51, 31) +#define EFUSE_PPK1_START BIT_POS(52, 0) +#define EFUSE_PPK1_END BIT_POS(63, 31) + +#define EFUSE_CACHE_FLD(s, reg, field) \ + ARRAY_FIELD_DP32((s)->regs, reg, field, \ + (xlnx_efuse_get_row((s->efuse), EFUSE_ ## field) \ + >> (EFUSE_ ## field % 32))) + +#define EFUSE_CACHE_BIT(s, reg, field) \ + ARRAY_FIELD_DP32((s)->regs, reg, field, xlnx_efuse_get_bit((s->efuse), \ + EFUSE_ ## field)) + +#define FBIT_UNKNOWN (~0) + +QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxZynqMPEFuse *)0)->regs)); + +static void update_tbit_status(XlnxZynqMPEFuse *s) +{ + unsigned int check = xlnx_efuse_tbits_check(s->efuse); + uint32_t val = s->regs[R_STATUS]; + + val = FIELD_DP32(val, STATUS, EFUSE_0_TBIT, !!(check & (1 << 0))); + val = FIELD_DP32(val, STATUS, EFUSE_2_TBIT, !!(check & (1 << 1))); + val = FIELD_DP32(val, STATUS, EFUSE_3_TBIT, !!(check & (1 << 2))); + + s->regs[R_STATUS] = val; +} + +/* Update the u32 array from efuse bits. Slow but simple approach. */ +static void cache_sync_u32(XlnxZynqMPEFuse *s, unsigned int r_start, + unsigned int f_start, unsigned int f_end, + unsigned int f_written) +{ + uint32_t *u32 = &s->regs[r_start]; + unsigned int fbit, wbits = 0, u32_off = 0; + + /* Avoid working on bits that are not relevant. */ + if (f_written != FBIT_UNKNOWN + && (f_written < f_start || f_written > f_end)) { + return; + } + + for (fbit = f_start; fbit <= f_end; fbit++, wbits++) { + if (wbits == 32) { + /* Update the key offset. */ + u32_off += 1; + wbits = 0; + } + u32[u32_off] |= xlnx_efuse_get_bit(s->efuse, fbit) << wbits; + } +} + +/* + * Keep the syncs in bit order so we can bail out for the + * slower ones. + */ +static void zynqmp_efuse_sync_cache(XlnxZynqMPEFuse *s, unsigned int bit) +{ + EFUSE_CACHE_BIT(s, SEC_CTRL, AES_RDLK); + EFUSE_CACHE_BIT(s, SEC_CTRL, AES_WRLK); + EFUSE_CACHE_BIT(s, SEC_CTRL, ENC_ONLY); + EFUSE_CACHE_BIT(s, SEC_CTRL, BBRAM_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, ERROR_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, JTAG_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, DFT_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_0); + EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_1); + EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_2); + EFUSE_CACHE_BIT(s, SEC_CTRL, SEC_LOCK); + EFUSE_CACHE_BIT(s, SEC_CTRL, PPK0_WRLK); + EFUSE_CACHE_BIT(s, SEC_CTRL, PPK1_WRLK); + + EFUSE_CACHE_FLD(s, SEC_CTRL, RSA_EN); + EFUSE_CACHE_FLD(s, SEC_CTRL, PPK0_INVLD); + EFUSE_CACHE_FLD(s, SEC_CTRL, PPK1_INVLD); + + /* Update the tbits. */ + update_tbit_status(s); + + /* Sync the various areas. */ + s->regs[R_MISC_USER_CTRL] = xlnx_efuse_get_row(s->efuse, + EFUSE_USER_CTRL_START) + & EFUSE_USER_CTRL_MASK; + s->regs[R_PUF_CHASH] = xlnx_efuse_get_row(s->efuse, EFUSE_PUF_CHASH_START); + s->regs[R_PUF_MISC] = xlnx_efuse_get_row(s->efuse, EFUSE_PUF_MISC_START); + + cache_sync_u32(s, R_DNA_0, EFUSE_DNA_START, EFUSE_DNA_END, bit); + + if (bit < EFUSE_AES_START) { + return; + } + + cache_sync_u32(s, R_ROM_RSVD, EFUSE_ROM_START, EFUSE_ROM_END, bit); + cache_sync_u32(s, R_IPDISABLE, EFUSE_IPDIS_START, EFUSE_IPDIS_END, bit); + cache_sync_u32(s, R_USER_0, EFUSE_USER_START, EFUSE_USER_END, bit); + cache_sync_u32(s, R_SPK_ID, EFUSE_SPK_START, EFUSE_SPK_END, bit); + cache_sync_u32(s, R_PPK0_0, EFUSE_PPK0_START, EFUSE_PPK0_END, bit); + cache_sync_u32(s, R_PPK1_0, EFUSE_PPK1_START, EFUSE_PPK1_END, bit); +} + +static void zynqmp_efuse_update_irq(XlnxZynqMPEFuse *s) +{ + bool pending = s->regs[R_EFUSE_ISR] & s->regs[R_EFUSE_IMR]; + qemu_set_irq(s->irq, pending); +} + +static void zynqmp_efuse_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + zynqmp_efuse_update_irq(s); +} + +static uint64_t zynqmp_efuse_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] |= val; + zynqmp_efuse_update_irq(s); + return 0; +} + +static uint64_t zynqmp_efuse_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] &= ~val; + zynqmp_efuse_update_irq(s); + return 0; +} + +static void zynqmp_efuse_pgm_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + unsigned bit = val64; + unsigned page = FIELD_EX32(bit, EFUSE_PGM_ADDR, EFUSE); + bool puf_prot = false; + const char *errmsg = NULL; + + /* Allow only valid array, and adjust for skipped array 1 */ + switch (page) { + case 0: + break; + case 2 ... 3: + bit = FIELD_DP32(bit, EFUSE_PGM_ADDR, EFUSE, page - 1); + puf_prot = xlnx_efuse_get_bit(s->efuse, EFUSE_PUF_SYN_WRLK); + break; + default: + errmsg = "Invalid address"; + goto pgm_done; + } + + if (ARRAY_FIELD_EX32(s->regs, WR_LOCK, LOCK)) { + errmsg = "Array write-locked"; + goto pgm_done; + } + + if (!ARRAY_FIELD_EX32(s->regs, CFG, PGM_EN)) { + errmsg = "Array pgm-disabled"; + goto pgm_done; + } + + if (puf_prot) { + errmsg = "PUF_HD-store write-locked"; + goto pgm_done; + } + + if (ARRAY_FIELD_EX32(s->regs, SEC_CTRL, AES_WRLK) + && bit >= EFUSE_AES_START && bit <= EFUSE_AES_END) { + errmsg = "AES key-store Write-locked"; + goto pgm_done; + } + + if (!xlnx_efuse_set_bit(s->efuse, bit)) { + errmsg = "Write failed"; + } + + pgm_done: + if (!errmsg) { + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 0); + } else { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 1); + qemu_log_mask(LOG_GUEST_ERROR, + "%s - eFuse write error: %s; addr=0x%x\n", + path, errmsg, (unsigned)val64); + } + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_DONE, 1); + zynqmp_efuse_update_irq(s); +} + +static void zynqmp_efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + g_autofree char *path = NULL; + + /* + * Grant reads only to allowed bits; reference sources: + * 1/ XilSKey - XilSKey_ZynqMp_EfusePs_ReadRow() + * 2/ UG1085, v2.0, table 12-13 + * (note: enumerates the masks as per described in + * references to avoid mental translation). + */ +#define COL_MASK(L_, H_) \ + ((uint32_t)MAKE_64BIT_MASK((L_), (1 + (H_) - (L_)))) + + static const uint32_t ary0_col_mask[] = { + /* XilSKey - XSK_ZYNQMP_EFUSEPS_TBITS_ROW */ + [0] = COL_MASK(28, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_USR{0:7}_FUSE_ROW */ + [8] = COL_MASK(0, 31), [9] = COL_MASK(0, 31), + [10] = COL_MASK(0, 31), [11] = COL_MASK(0, 31), + [12] = COL_MASK(0, 31), [13] = COL_MASK(0, 31), + [14] = COL_MASK(0, 31), [15] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_MISC_USR_CTRL_ROW */ + [16] = COL_MASK(0, 7) | COL_MASK(10, 16), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PBR_BOOT_ERR_ROW */ + [17] = COL_MASK(0, 2), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PUF_CHASH_ROW */ + [20] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PUF_AUX_ROW */ + [21] = COL_MASK(0, 23) | COL_MASK(29, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_SEC_CTRL_ROW */ + [22] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_SPK_ID_ROW */ + [23] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PPK0_START_ROW */ + [40] = COL_MASK(0, 31), [41] = COL_MASK(0, 31), + [42] = COL_MASK(0, 31), [43] = COL_MASK(0, 31), + [44] = COL_MASK(0, 31), [45] = COL_MASK(0, 31), + [46] = COL_MASK(0, 31), [47] = COL_MASK(0, 31), + [48] = COL_MASK(0, 31), [49] = COL_MASK(0, 31), + [50] = COL_MASK(0, 31), [51] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PPK1_START_ROW */ + [52] = COL_MASK(0, 31), [53] = COL_MASK(0, 31), + [54] = COL_MASK(0, 31), [55] = COL_MASK(0, 31), + [56] = COL_MASK(0, 31), [57] = COL_MASK(0, 31), + [58] = COL_MASK(0, 31), [59] = COL_MASK(0, 31), + [60] = COL_MASK(0, 31), [61] = COL_MASK(0, 31), + [62] = COL_MASK(0, 31), [63] = COL_MASK(0, 31), + }; + + uint32_t col_mask = COL_MASK(0, 31); +#undef COL_MASK + + uint32_t efuse_idx = s->regs[R_EFUSE_RD_ADDR]; + uint32_t efuse_ary = FIELD_EX32(efuse_idx, EFUSE_RD_ADDR, EFUSE); + uint32_t efuse_row = FIELD_EX32(efuse_idx, EFUSE_RD_ADDR, ROW); + + switch (efuse_ary) { + case 0: /* Various */ + if (efuse_row >= ARRAY_SIZE(ary0_col_mask)) { + goto denied; + } + + col_mask = ary0_col_mask[efuse_row]; + if (!col_mask) { + goto denied; + } + break; + case 2: /* PUF helper data, adjust for skipped array 1 */ + case 3: + val64 = FIELD_DP32(efuse_idx, EFUSE_RD_ADDR, EFUSE, efuse_ary - 1); + break; + default: + goto denied; + } + + s->regs[R_EFUSE_RD_DATA] = xlnx_efuse_get_row(s->efuse, val64) & col_mask; + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 0); + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1); + zynqmp_efuse_update_irq(s); + return; + + denied: + path = object_get_canonical_path(OBJECT(s)); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Denied efuse read from array %u, row %u\n", + path, efuse_ary, efuse_row); + + s->regs[R_EFUSE_RD_DATA] = 0; + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 1); + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 0); + zynqmp_efuse_update_irq(s); +} + +static void zynqmp_efuse_aes_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + bool ok; + + ok = xlnx_efuse_k256_check(s->efuse, (uint32_t)val64, EFUSE_AES_START); + + ARRAY_FIELD_DP32(s->regs, STATUS, AES_CRC_PASS, (ok ? 1 : 0)); + ARRAY_FIELD_DP32(s->regs, STATUS, AES_CRC_DONE, 1); + + s->regs[R_EFUSE_AES_CRC] = 0; /* crc value is write-only */ +} + +static uint64_t zynqmp_efuse_cache_load_prew(RegisterInfo *reg, + uint64_t valu64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + + if (valu64 & R_EFUSE_CACHE_LOAD_LOAD_MASK) { + zynqmp_efuse_sync_cache(s, FBIT_UNKNOWN); + ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1); + zynqmp_efuse_update_irq(s); + } + + return 0; +} + +static uint64_t zynqmp_efuse_wr_lock_prew(RegisterInfo *reg, uint64_t val) +{ + return val == 0xDF0D ? 0 : 1; +} + +static RegisterAccessInfo zynqmp_efuse_regs_info[] = { + { .name = "WR_LOCK", .addr = A_WR_LOCK, + .reset = 0x1, + .pre_write = zynqmp_efuse_wr_lock_prew, + },{ .name = "CFG", .addr = A_CFG, + },{ .name = "STATUS", .addr = A_STATUS, + .rsvd = 0x8, + .ro = 0xff, + },{ .name = "EFUSE_PGM_ADDR", .addr = A_EFUSE_PGM_ADDR, + .post_write = zynqmp_efuse_pgm_addr_postw + },{ .name = "EFUSE_RD_ADDR", .addr = A_EFUSE_RD_ADDR, + .rsvd = 0x1f, + .post_write = zynqmp_efuse_rd_addr_postw, + },{ .name = "EFUSE_RD_DATA", .addr = A_EFUSE_RD_DATA, + .ro = 0xffffffff, + },{ .name = "TPGM", .addr = A_TPGM, + },{ .name = "TRD", .addr = A_TRD, + .reset = 0x1b, + },{ .name = "TSU_H_PS", .addr = A_TSU_H_PS, + .reset = 0xff, + },{ .name = "TSU_H_PS_CS", .addr = A_TSU_H_PS_CS, + .reset = 0xb, + },{ .name = "TSU_H_CS", .addr = A_TSU_H_CS, + .reset = 0x7, + },{ .name = "EFUSE_ISR", .addr = A_EFUSE_ISR, + .rsvd = 0x7fffffe0, + .w1c = 0x8000001f, + .post_write = zynqmp_efuse_isr_postw, + },{ .name = "EFUSE_IMR", .addr = A_EFUSE_IMR, + .reset = 0x8000001f, + .rsvd = 0x7fffffe0, + .ro = 0xffffffff, + },{ .name = "EFUSE_IER", .addr = A_EFUSE_IER, + .rsvd = 0x7fffffe0, + .pre_write = zynqmp_efuse_ier_prew, + },{ .name = "EFUSE_IDR", .addr = A_EFUSE_IDR, + .rsvd = 0x7fffffe0, + .pre_write = zynqmp_efuse_idr_prew, + },{ .name = "EFUSE_CACHE_LOAD", .addr = A_EFUSE_CACHE_LOAD, + .pre_write = zynqmp_efuse_cache_load_prew, + },{ .name = "EFUSE_PGM_LOCK", .addr = A_EFUSE_PGM_LOCK, + },{ .name = "EFUSE_AES_CRC", .addr = A_EFUSE_AES_CRC, + .post_write = zynqmp_efuse_aes_crc_postw, + },{ .name = "EFUSE_TBITS_PRGRMG_EN", .addr = A_EFUSE_TBITS_PRGRMG_EN, + .reset = R_EFUSE_TBITS_PRGRMG_EN_TBITS_PRGRMG_EN_MASK, + },{ .name = "DNA_0", .addr = A_DNA_0, + .ro = 0xffffffff, + },{ .name = "DNA_1", .addr = A_DNA_1, + .ro = 0xffffffff, + },{ .name = "DNA_2", .addr = A_DNA_2, + .ro = 0xffffffff, + },{ .name = "IPDISABLE", .addr = A_IPDISABLE, + .ro = 0xffffffff, + },{ .name = "SYSOSC_CTRL", .addr = A_SYSOSC_CTRL, + .ro = 0xffffffff, + },{ .name = "USER_0", .addr = A_USER_0, + .ro = 0xffffffff, + },{ .name = "USER_1", .addr = A_USER_1, + .ro = 0xffffffff, + },{ .name = "USER_2", .addr = A_USER_2, + .ro = 0xffffffff, + },{ .name = "USER_3", .addr = A_USER_3, + .ro = 0xffffffff, + },{ .name = "USER_4", .addr = A_USER_4, + .ro = 0xffffffff, + },{ .name = "USER_5", .addr = A_USER_5, + .ro = 0xffffffff, + },{ .name = "USER_6", .addr = A_USER_6, + .ro = 0xffffffff, + },{ .name = "USER_7", .addr = A_USER_7, + .ro = 0xffffffff, + },{ .name = "MISC_USER_CTRL", .addr = A_MISC_USER_CTRL, + .ro = 0xffffffff, + },{ .name = "ROM_RSVD", .addr = A_ROM_RSVD, + .ro = 0xffffffff, + },{ .name = "PUF_CHASH", .addr = A_PUF_CHASH, + .ro = 0xffffffff, + },{ .name = "PUF_MISC", .addr = A_PUF_MISC, + .ro = 0xffffffff, + },{ .name = "SEC_CTRL", .addr = A_SEC_CTRL, + .ro = 0xffffffff, + },{ .name = "SPK_ID", .addr = A_SPK_ID, + .ro = 0xffffffff, + },{ .name = "PPK0_0", .addr = A_PPK0_0, + .ro = 0xffffffff, + },{ .name = "PPK0_1", .addr = A_PPK0_1, + .ro = 0xffffffff, + },{ .name = "PPK0_2", .addr = A_PPK0_2, + .ro = 0xffffffff, + },{ .name = "PPK0_3", .addr = A_PPK0_3, + .ro = 0xffffffff, + },{ .name = "PPK0_4", .addr = A_PPK0_4, + .ro = 0xffffffff, + },{ .name = "PPK0_5", .addr = A_PPK0_5, + .ro = 0xffffffff, + },{ .name = "PPK0_6", .addr = A_PPK0_6, + .ro = 0xffffffff, + },{ .name = "PPK0_7", .addr = A_PPK0_7, + .ro = 0xffffffff, + },{ .name = "PPK0_8", .addr = A_PPK0_8, + .ro = 0xffffffff, + },{ .name = "PPK0_9", .addr = A_PPK0_9, + .ro = 0xffffffff, + },{ .name = "PPK0_10", .addr = A_PPK0_10, + .ro = 0xffffffff, + },{ .name = "PPK0_11", .addr = A_PPK0_11, + .ro = 0xffffffff, + },{ .name = "PPK1_0", .addr = A_PPK1_0, + .ro = 0xffffffff, + },{ .name = "PPK1_1", .addr = A_PPK1_1, + .ro = 0xffffffff, + },{ .name = "PPK1_2", .addr = A_PPK1_2, + .ro = 0xffffffff, + },{ .name = "PPK1_3", .addr = A_PPK1_3, + .ro = 0xffffffff, + },{ .name = "PPK1_4", .addr = A_PPK1_4, + .ro = 0xffffffff, + },{ .name = "PPK1_5", .addr = A_PPK1_5, + .ro = 0xffffffff, + },{ .name = "PPK1_6", .addr = A_PPK1_6, + .ro = 0xffffffff, + },{ .name = "PPK1_7", .addr = A_PPK1_7, + .ro = 0xffffffff, + },{ .name = "PPK1_8", .addr = A_PPK1_8, + .ro = 0xffffffff, + },{ .name = "PPK1_9", .addr = A_PPK1_9, + .ro = 0xffffffff, + },{ .name = "PPK1_10", .addr = A_PPK1_10, + .ro = 0xffffffff, + },{ .name = "PPK1_11", .addr = A_PPK1_11, + .ro = 0xffffffff, + } +}; + +static void zynqmp_efuse_reg_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + RegisterInfoArray *reg_array = opaque; + XlnxZynqMPEFuse *s; + Object *dev; + + assert(reg_array != NULL); + + dev = reg_array->mem.owner; + assert(dev); + + s = XLNX_ZYNQMP_EFUSE(dev); + + if (addr != A_WR_LOCK && s->regs[R_WR_LOCK]) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s[reg_0x%02lx]: Attempt to write locked register.\n", + path, (long)addr); + } else { + register_write_memory(opaque, addr, data, size); + } +} + +static const MemoryRegionOps zynqmp_efuse_ops = { + .read = register_read_memory, + .write = zynqmp_efuse_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void zynqmp_efuse_register_reset(RegisterInfo *reg) +{ + if (!reg->data || !reg->access) { + return; + } + + /* Reset must not trigger some registers' writers */ + switch (reg->access->addr) { + case A_EFUSE_AES_CRC: + *(uint32_t *)reg->data = reg->access->reset; + return; + } + + register_reset(reg); +} + +static void zynqmp_efuse_reset(DeviceState *dev) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + zynqmp_efuse_register_reset(&s->regs_info[i]); + } + + zynqmp_efuse_sync_cache(s, FBIT_UNKNOWN); + ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1); + zynqmp_efuse_update_irq(s); +} + +static void zynqmp_efuse_realize(DeviceState *dev, Error **errp) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(dev); + + if (!s->efuse) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, "%s.efuse: link property not connected to XLNX-EFUSE", + path); + return; + } + + s->efuse->dev = dev; +} + +static void zynqmp_efuse_init(Object *obj) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + reg_array = + register_init_block32(DEVICE(obj), zynqmp_efuse_regs_info, + ARRAY_SIZE(zynqmp_efuse_regs_info), + s->regs_info, s->regs, + &zynqmp_efuse_ops, + ZYNQMP_EFUSE_ERR_DEBUG, + R_MAX * 4); + + sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_irq(sbd, &s->irq); +} + +static const VMStateDescription vmstate_efuse = { + .name = TYPE_XLNX_ZYNQMP_EFUSE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPEFuse, R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property zynqmp_efuse_props[] = { + DEFINE_PROP_LINK("efuse", + XlnxZynqMPEFuse, efuse, + TYPE_XLNX_EFUSE, XlnxEFuse *), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void zynqmp_efuse_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = zynqmp_efuse_reset; + dc->realize = zynqmp_efuse_realize; + dc->vmsd = &vmstate_efuse; + device_class_set_props(dc, zynqmp_efuse_props); +} + + +static const TypeInfo efuse_info = { + .name = TYPE_XLNX_ZYNQMP_EFUSE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZynqMPEFuse), + .class_init = zynqmp_efuse_class_init, + .instance_init = zynqmp_efuse_init, +}; + +static void efuse_register_types(void) +{ + type_register_static(&efuse_info); +} + +type_init(efuse_register_types) diff --git a/hw/openrisc/Kconfig b/hw/openrisc/Kconfig new file mode 100644 index 000000000..8f284f3ba --- /dev/null +++ b/hw/openrisc/Kconfig @@ -0,0 +1,6 @@ +config OR1K_SIM + bool + select SERIAL + select OPENCORES_ETH + select OMPIC + select SPLIT_IRQ diff --git a/hw/openrisc/cputimer.c b/hw/openrisc/cputimer.c new file mode 100644 index 000000000..93268815d --- /dev/null +++ b/hw/openrisc/cputimer.c @@ -0,0 +1,145 @@ +/* + * QEMU OpenRISC timer support + * + * Copyright (c) 2011-2012 Jia Liu + * Zhizhou Zhang + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "migration/vmstate.h" +#include "qemu/timer.h" + +#define TIMER_PERIOD 50 /* 50 ns period for 20 MHz timer */ + +/* Tick Timer global state to allow all cores to be in sync */ +typedef struct OR1KTimerState { + uint32_t ttcr; + uint64_t last_clk; +} OR1KTimerState; + +static OR1KTimerState *or1k_timer; + +void cpu_openrisc_count_set(OpenRISCCPU *cpu, uint32_t val) +{ + or1k_timer->ttcr = val; +} + +uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu) +{ + return or1k_timer->ttcr; +} + +/* Add elapsed ticks to ttcr */ +void cpu_openrisc_count_update(OpenRISCCPU *cpu) +{ + uint64_t now; + + if (!cpu->env.is_counting) { + return; + } + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + or1k_timer->ttcr += (uint32_t)((now - or1k_timer->last_clk) + / TIMER_PERIOD); + or1k_timer->last_clk = now; +} + +/* Update the next timeout time as difference between ttmr and ttcr */ +void cpu_openrisc_timer_update(OpenRISCCPU *cpu) +{ + uint32_t wait; + uint64_t now, next; + + if (!cpu->env.is_counting) { + return; + } + + cpu_openrisc_count_update(cpu); + now = or1k_timer->last_clk; + + if ((cpu->env.ttmr & TTMR_TP) <= (or1k_timer->ttcr & TTMR_TP)) { + wait = TTMR_TP - (or1k_timer->ttcr & TTMR_TP) + 1; + wait += cpu->env.ttmr & TTMR_TP; + } else { + wait = (cpu->env.ttmr & TTMR_TP) - (or1k_timer->ttcr & TTMR_TP); + } + next = now + (uint64_t)wait * TIMER_PERIOD; + timer_mod(cpu->env.timer, next); +} + +void cpu_openrisc_count_start(OpenRISCCPU *cpu) +{ + cpu->env.is_counting = 1; + cpu_openrisc_count_update(cpu); +} + +void cpu_openrisc_count_stop(OpenRISCCPU *cpu) +{ + timer_del(cpu->env.timer); + cpu_openrisc_count_update(cpu); + cpu->env.is_counting = 0; +} + +static void openrisc_timer_cb(void *opaque) +{ + OpenRISCCPU *cpu = opaque; + + if ((cpu->env.ttmr & TTMR_IE) && + timer_expired(cpu->env.timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL))) { + CPUState *cs = CPU(cpu); + + cpu->env.ttmr |= TTMR_IP; + cs->interrupt_request |= CPU_INTERRUPT_TIMER; + } + + switch (cpu->env.ttmr & TTMR_M) { + case TIMER_NONE: + break; + case TIMER_INTR: + or1k_timer->ttcr = 0; + break; + case TIMER_SHOT: + cpu_openrisc_count_stop(cpu); + break; + case TIMER_CONT: + break; + } + + cpu_openrisc_timer_update(cpu); + qemu_cpu_kick(CPU(cpu)); +} + +static const VMStateDescription vmstate_or1k_timer = { + .name = "or1k_timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ttcr, OR1KTimerState), + VMSTATE_UINT64(last_clk, OR1KTimerState), + VMSTATE_END_OF_LIST() + } +}; + +void cpu_openrisc_clock_init(OpenRISCCPU *cpu) +{ + cpu->env.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &openrisc_timer_cb, cpu); + cpu->env.ttmr = 0x00000000; + + if (or1k_timer == NULL) { + or1k_timer = g_new0(OR1KTimerState, 1); + vmstate_register(NULL, 0, &vmstate_or1k_timer, or1k_timer); + } +} diff --git a/hw/openrisc/meson.build b/hw/openrisc/meson.build new file mode 100644 index 000000000..947f63ee0 --- /dev/null +++ b/hw/openrisc/meson.build @@ -0,0 +1,5 @@ +openrisc_ss = ss.source_set() +openrisc_ss.add(files('cputimer.c')) +openrisc_ss.add(when: 'CONFIG_OR1K_SIM', if_true: files('openrisc_sim.c')) + +hw_arch += {'openrisc': openrisc_ss} diff --git a/hw/openrisc/openrisc_sim.c b/hw/openrisc/openrisc_sim.c new file mode 100644 index 000000000..73fe383c2 --- /dev/null +++ b/hw/openrisc/openrisc_sim.c @@ -0,0 +1,195 @@ +/* + * OpenRISC simulator for use as an IIS. + * + * Copyright (c) 2011-2012 Jia Liu + * Feng Gao + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/irq.h" +#include "hw/boards.h" +#include "elf.h" +#include "hw/char/serial.h" +#include "net/net.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "sysemu/sysemu.h" +#include "hw/sysbus.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" +#include "hw/core/split-irq.h" + +#define KERNEL_LOAD_ADDR 0x100 + +static struct openrisc_boot_info { + uint32_t bootstrap_pc; +} boot_info; + +static void main_cpu_reset(void *opaque) +{ + OpenRISCCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + + cpu_reset(CPU(cpu)); + + cpu_set_pc(cs, boot_info.bootstrap_pc); +} + +static qemu_irq get_cpu_irq(OpenRISCCPU *cpus[], int cpunum, int irq_pin) +{ + return qdev_get_gpio_in_named(DEVICE(cpus[cpunum]), "IRQ", irq_pin); +} + +static void openrisc_sim_net_init(hwaddr base, hwaddr descriptors, + int num_cpus, OpenRISCCPU *cpus[], + int irq_pin, NICInfo *nd) +{ + DeviceState *dev; + SysBusDevice *s; + int i; + + dev = qdev_new("open_eth"); + qdev_set_nic_properties(dev, nd); + + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + if (num_cpus > 1) { + DeviceState *splitter = qdev_new(TYPE_SPLIT_IRQ); + qdev_prop_set_uint32(splitter, "num-lines", num_cpus); + qdev_realize_and_unref(splitter, NULL, &error_fatal); + for (i = 0; i < num_cpus; i++) { + qdev_connect_gpio_out(splitter, i, get_cpu_irq(cpus, i, irq_pin)); + } + sysbus_connect_irq(s, 0, qdev_get_gpio_in(splitter, 0)); + } else { + sysbus_connect_irq(s, 0, get_cpu_irq(cpus, 0, irq_pin)); + } + sysbus_mmio_map(s, 0, base); + sysbus_mmio_map(s, 1, descriptors); +} + +static void openrisc_sim_ompic_init(hwaddr base, int num_cpus, + OpenRISCCPU *cpus[], int irq_pin) +{ + DeviceState *dev; + SysBusDevice *s; + int i; + + dev = qdev_new("or1k-ompic"); + qdev_prop_set_uint32(dev, "num-cpus", num_cpus); + + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + for (i = 0; i < num_cpus; i++) { + sysbus_connect_irq(s, i, get_cpu_irq(cpus, i, irq_pin)); + } + sysbus_mmio_map(s, 0, base); +} + +static void openrisc_load_kernel(ram_addr_t ram_size, + const char *kernel_filename) +{ + long kernel_size; + uint64_t elf_entry; + hwaddr entry; + + if (kernel_filename && !qtest_enabled()) { + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, + &elf_entry, NULL, NULL, NULL, 1, EM_OPENRISC, + 1, 0); + entry = elf_entry; + if (kernel_size < 0) { + kernel_size = load_uimage(kernel_filename, + &entry, NULL, NULL, NULL, NULL); + } + if (kernel_size < 0) { + kernel_size = load_image_targphys(kernel_filename, + KERNEL_LOAD_ADDR, + ram_size - KERNEL_LOAD_ADDR); + } + + if (entry <= 0) { + entry = KERNEL_LOAD_ADDR; + } + + if (kernel_size < 0) { + error_report("couldn't load the kernel '%s'", kernel_filename); + exit(1); + } + boot_info.bootstrap_pc = entry; + } +} + +static void openrisc_sim_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + const char *kernel_filename = machine->kernel_filename; + OpenRISCCPU *cpus[2] = {}; + MemoryRegion *ram; + qemu_irq serial_irq; + int n; + unsigned int smp_cpus = machine->smp.cpus; + + assert(smp_cpus >= 1 && smp_cpus <= 2); + for (n = 0; n < smp_cpus; n++) { + cpus[n] = OPENRISC_CPU(cpu_create(machine->cpu_type)); + if (cpus[n] == NULL) { + fprintf(stderr, "Unable to find CPU definition!\n"); + exit(1); + } + + cpu_openrisc_clock_init(cpus[n]); + + qemu_register_reset(main_cpu_reset, cpus[n]); + } + + ram = g_malloc(sizeof(*ram)); + memory_region_init_ram(ram, NULL, "openrisc.ram", ram_size, &error_fatal); + memory_region_add_subregion(get_system_memory(), 0, ram); + + if (nd_table[0].used) { + openrisc_sim_net_init(0x92000000, 0x92000400, smp_cpus, + cpus, 4, nd_table); + } + + if (smp_cpus > 1) { + openrisc_sim_ompic_init(0x98000000, smp_cpus, cpus, 1); + + serial_irq = qemu_irq_split(get_cpu_irq(cpus, 0, 2), + get_cpu_irq(cpus, 1, 2)); + } else { + serial_irq = get_cpu_irq(cpus, 0, 2); + } + + serial_mm_init(get_system_memory(), 0x90000000, 0, serial_irq, + 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); + + openrisc_load_kernel(ram_size, kernel_filename); +} + +static void openrisc_sim_machine_init(MachineClass *mc) +{ + mc->desc = "or1k simulation"; + mc->init = openrisc_sim_init; + mc->max_cpus = 2; + mc->is_default = true; + mc->default_cpu_type = OPENRISC_CPU_TYPE_NAME("or1200"); +} + +DEFINE_MACHINE("or1k-sim", openrisc_sim_machine_init) diff --git a/hw/pci-bridge/Kconfig b/hw/pci-bridge/Kconfig new file mode 100644 index 000000000..f8df4315b --- /dev/null +++ b/hw/pci-bridge/Kconfig @@ -0,0 +1,29 @@ +config PCIE_PORT + bool + default y if PCI_DEVICES + depends on PCI_EXPRESS && MSI_NONBROKEN + +config PXB + bool + default y if Q35 || ARM_VIRT + +config XIO3130 + bool + default y if PCI_DEVICES + depends on PCI_EXPRESS && MSI_NONBROKEN + +config IOH3420 + bool + default y if PCI_DEVICES + depends on PCI_EXPRESS && MSI_NONBROKEN + +config I82801B11 + bool + default y if PCI_DEVICES + depends on PCI_EXPRESS + +config DEC_PCI + bool + +config SIMBA + bool diff --git a/hw/pci-bridge/dec.c b/hw/pci-bridge/dec.c new file mode 100644 index 000000000..4773d07e6 --- /dev/null +++ b/hw/pci-bridge/dec.c @@ -0,0 +1,164 @@ +/* + * QEMU DEC 21154 PCI bridge + * + * Copyright (c) 2006-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "dec.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(DECState, DEC_21154) + +struct DECState { + PCIHostState parent_obj; +}; + +static int dec_map_irq(PCIDevice *pci_dev, int irq_num) +{ + return irq_num; +} + +static void dec_pci_bridge_realize(PCIDevice *pci_dev, Error **errp) +{ + pci_bridge_initfn(pci_dev, TYPE_PCI_BUS); +} + +static void dec_21154_pci_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + k->realize = dec_pci_bridge_realize; + k->exit = pci_bridge_exitfn; + k->vendor_id = PCI_VENDOR_ID_DEC; + k->device_id = PCI_DEVICE_ID_DEC_21154; + k->config_write = pci_bridge_write_config; + k->is_bridge = true; + dc->desc = "DEC 21154 PCI-PCI bridge"; + dc->reset = pci_bridge_reset; + dc->vmsd = &vmstate_pci_device; +} + +static const TypeInfo dec_21154_pci_bridge_info = { + .name = "dec-21154-p2p-bridge", + .parent = TYPE_PCI_BRIDGE, + .instance_size = sizeof(PCIBridge), + .class_init = dec_21154_pci_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +PCIBus *pci_dec_21154_init(PCIBus *parent_bus, int devfn) +{ + PCIDevice *dev; + PCIBridge *br; + + dev = pci_new_multifunction(devfn, false, "dec-21154-p2p-bridge"); + br = PCI_BRIDGE(dev); + pci_bridge_map_irq(br, "DEC 21154 PCI-PCI bridge", dec_map_irq); + pci_realize_and_unref(dev, parent_bus, &error_fatal); + return pci_bridge_get_sec_bus(br); +} + +static void pci_dec_21154_device_realize(DeviceState *dev, Error **errp) +{ + PCIHostState *phb; + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + phb = PCI_HOST_BRIDGE(dev); + + memory_region_init_io(&phb->conf_mem, OBJECT(dev), &pci_host_conf_le_ops, + dev, "pci-conf-idx", 0x1000); + memory_region_init_io(&phb->data_mem, OBJECT(dev), &pci_host_data_le_ops, + dev, "pci-data-idx", 0x1000); + sysbus_init_mmio(sbd, &phb->conf_mem); + sysbus_init_mmio(sbd, &phb->data_mem); +} + +static void dec_21154_pci_host_realize(PCIDevice *d, Error **errp) +{ + /* PCI2PCI bridge same values as PearPC - check this */ +} + +static void dec_21154_pci_host_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + k->realize = dec_21154_pci_host_realize; + k->vendor_id = PCI_VENDOR_ID_DEC; + k->device_id = PCI_DEVICE_ID_DEC_21154; + k->revision = 0x02; + k->class_id = PCI_CLASS_BRIDGE_PCI; + k->is_bridge = true; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo dec_21154_pci_host_info = { + .name = "dec-21154", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = dec_21154_pci_host_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void pci_dec_21154_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pci_dec_21154_device_realize; +} + +static const TypeInfo pci_dec_21154_device_info = { + .name = TYPE_DEC_21154, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(DECState), + .class_init = pci_dec_21154_device_class_init, +}; + +static void dec_register_types(void) +{ + type_register_static(&pci_dec_21154_device_info); + type_register_static(&dec_21154_pci_host_info); + type_register_static(&dec_21154_pci_bridge_info); +} + +type_init(dec_register_types) diff --git a/hw/pci-bridge/dec.h b/hw/pci-bridge/dec.h new file mode 100644 index 000000000..869e90b13 --- /dev/null +++ b/hw/pci-bridge/dec.h @@ -0,0 +1,9 @@ +#ifndef HW_PCI_BRIDGE_DEC_H +#define HW_PCI_BRIDGE_DEC_H + + +#define TYPE_DEC_21154 "dec-21154-sysbus" + +PCIBus *pci_dec_21154_init(PCIBus *parent_bus, int devfn); + +#endif diff --git a/hw/pci-bridge/gen_pcie_root_port.c b/hw/pci-bridge/gen_pcie_root_port.c new file mode 100644 index 000000000..20099a8ae --- /dev/null +++ b/hw/pci-bridge/gen_pcie_root_port.c @@ -0,0 +1,178 @@ +/* + * Generic PCI Express Root Port emulation + * + * Copyright (C) 2017 Red Hat Inc + * + * Authors: + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/msix.h" +#include "hw/pci/pcie_port.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#define TYPE_GEN_PCIE_ROOT_PORT "pcie-root-port" +OBJECT_DECLARE_SIMPLE_TYPE(GenPCIERootPort, GEN_PCIE_ROOT_PORT) + +#define GEN_PCIE_ROOT_PORT_AER_OFFSET 0x100 +#define GEN_PCIE_ROOT_PORT_ACS_OFFSET \ + (GEN_PCIE_ROOT_PORT_AER_OFFSET + PCI_ERR_SIZEOF) + +#define GEN_PCIE_ROOT_PORT_MSIX_NR_VECTOR 1 +#define GEN_PCIE_ROOT_DEFAULT_IO_RANGE 4096 + +struct GenPCIERootPort { + /*< private >*/ + PCIESlot parent_obj; + /*< public >*/ + + bool migrate_msix; + + /* additional resources to reserve */ + PCIResReserve res_reserve; +}; + +static uint8_t gen_rp_aer_vector(const PCIDevice *d) +{ + return 0; +} + +static int gen_rp_interrupts_init(PCIDevice *d, Error **errp) +{ + int rc; + + rc = msix_init_exclusive_bar(d, GEN_PCIE_ROOT_PORT_MSIX_NR_VECTOR, 0, errp); + + if (rc < 0) { + assert(rc == -ENOTSUP); + } else { + msix_vector_use(d, 0); + } + + return rc; +} + +static void gen_rp_interrupts_uninit(PCIDevice *d) +{ + msix_uninit_exclusive_bar(d); +} + +static bool gen_rp_test_migrate_msix(void *opaque, int version_id) +{ + GenPCIERootPort *rp = opaque; + + return rp->migrate_msix; +} + +static void gen_rp_realize(DeviceState *dev, Error **errp) +{ + PCIDevice *d = PCI_DEVICE(dev); + PCIESlot *s = PCIE_SLOT(d); + GenPCIERootPort *grp = GEN_PCIE_ROOT_PORT(d); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(d); + Error *local_err = NULL; + + rpc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (grp->res_reserve.io == -1 && s->hotplug && !s->native_hotplug) { + grp->res_reserve.io = GEN_PCIE_ROOT_DEFAULT_IO_RANGE; + } + int rc = pci_bridge_qemu_reserve_cap_init(d, 0, + grp->res_reserve, errp); + + if (rc < 0) { + rpc->parent_class.exit(d); + return; + } + + if (!grp->res_reserve.io) { + pci_word_test_and_clear_mask(d->wmask + PCI_COMMAND, + PCI_COMMAND_IO); + d->wmask[PCI_IO_BASE] = 0; + d->wmask[PCI_IO_LIMIT] = 0; + } +} + +static const VMStateDescription vmstate_rp_dev = { + .name = "pcie-root-port", + .priority = MIG_PRI_PCI_BUS, + .version_id = 1, + .minimum_version_id = 1, + .post_load = pcie_cap_slot_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), + VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, + PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), + VMSTATE_MSIX_TEST(parent_obj.parent_obj.parent_obj.parent_obj, + GenPCIERootPort, + gen_rp_test_migrate_msix), + VMSTATE_END_OF_LIST() + } +}; + +static Property gen_rp_props[] = { + DEFINE_PROP_BOOL("x-migrate-msix", GenPCIERootPort, + migrate_msix, true), + DEFINE_PROP_UINT32("bus-reserve", GenPCIERootPort, + res_reserve.bus, -1), + DEFINE_PROP_SIZE("io-reserve", GenPCIERootPort, + res_reserve.io, -1), + DEFINE_PROP_SIZE("mem-reserve", GenPCIERootPort, + res_reserve.mem_non_pref, -1), + DEFINE_PROP_SIZE("pref32-reserve", GenPCIERootPort, + res_reserve.mem_pref_32, -1), + DEFINE_PROP_SIZE("pref64-reserve", GenPCIERootPort, + res_reserve.mem_pref_64, -1), + DEFINE_PROP_PCIE_LINK_SPEED("x-speed", PCIESlot, + speed, PCIE_LINK_SPEED_16), + DEFINE_PROP_PCIE_LINK_WIDTH("x-width", PCIESlot, + width, PCIE_LINK_WIDTH_32), + DEFINE_PROP_END_OF_LIST() +}; + +static void gen_rp_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); + + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_PCIE_RP; + dc->desc = "PCI Express Root Port"; + dc->vmsd = &vmstate_rp_dev; + device_class_set_props(dc, gen_rp_props); + + device_class_set_parent_realize(dc, gen_rp_realize, &rpc->parent_realize); + + rpc->aer_vector = gen_rp_aer_vector; + rpc->interrupts_init = gen_rp_interrupts_init; + rpc->interrupts_uninit = gen_rp_interrupts_uninit; + rpc->aer_offset = GEN_PCIE_ROOT_PORT_AER_OFFSET; + rpc->acs_offset = GEN_PCIE_ROOT_PORT_ACS_OFFSET; +} + +static const TypeInfo gen_rp_dev_info = { + .name = TYPE_GEN_PCIE_ROOT_PORT, + .parent = TYPE_PCIE_ROOT_PORT, + .instance_size = sizeof(GenPCIERootPort), + .class_init = gen_rp_dev_class_init, +}; + + static void gen_rp_register_types(void) + { + type_register_static(&gen_rp_dev_info); + } + type_init(gen_rp_register_types) diff --git a/hw/pci-bridge/i82801b11.c b/hw/pci-bridge/i82801b11.c new file mode 100644 index 000000000..f28181e21 --- /dev/null +++ b/hw/pci-bridge/i82801b11.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2006 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* + * QEMU i82801b11 dmi-to-pci Bridge Emulation + * + * Copyright (c) 2009, 2010, 2011 + * Isaku Yamahata + * VA Linux Systems Japan K.K. + * Copyright (C) 2012 Jason Baron + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "hw/i386/ich9.h" + +/*****************************************************************************/ +/* ICH9 DMI-to-PCI bridge */ +#define I82801ba_SSVID_OFFSET 0x50 +#define I82801ba_SSVID_SVID 0 +#define I82801ba_SSVID_SSID 0 + +typedef struct I82801b11Bridge { + /*< private >*/ + PCIBridge parent_obj; + /*< public >*/ +} I82801b11Bridge; + +static void i82801b11_bridge_realize(PCIDevice *d, Error **errp) +{ + int rc; + + pci_bridge_initfn(d, TYPE_PCI_BUS); + + rc = pci_bridge_ssvid_init(d, I82801ba_SSVID_OFFSET, + I82801ba_SSVID_SVID, I82801ba_SSVID_SSID, + errp); + if (rc < 0) { + goto err_bridge; + } + pci_config_set_prog_interface(d->config, PCI_CLASS_BRIDGE_PCI_INF_SUB); + return; + +err_bridge: + pci_bridge_exitfn(d); +} + +static const VMStateDescription i82801b11_bridge_dev_vmstate = { + .name = "i82801b11_bridge", + .priority = MIG_PRI_PCI_BUS, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), + VMSTATE_END_OF_LIST() + } +}; + +static void i82801b11_bridge_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->is_bridge = true; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82801BA_11; + k->revision = ICH9_D2P_A2_REVISION; + k->realize = i82801b11_bridge_realize; + k->config_write = pci_bridge_write_config; + dc->vmsd = &i82801b11_bridge_dev_vmstate; + dc->reset = pci_bridge_reset; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo i82801b11_bridge_info = { + .name = "i82801b11-bridge", + .parent = TYPE_PCI_BRIDGE, + .instance_size = sizeof(I82801b11Bridge), + .class_init = i82801b11_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void d2pbr_register(void) +{ + type_register_static(&i82801b11_bridge_info); +} + +type_init(d2pbr_register); diff --git a/hw/pci-bridge/ioh3420.c b/hw/pci-bridge/ioh3420.c new file mode 100644 index 000000000..f1e16135a --- /dev/null +++ b/hw/pci-bridge/ioh3420.c @@ -0,0 +1,130 @@ +/* + * ioh3420.c + * Intel X58 north bridge IOH + * PCI Express root port device id 3420 + * + * Copyright (c) 2010 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci_ids.h" +#include "hw/pci/msi.h" +#include "hw/pci/pcie.h" +#include "hw/pci/pcie_port.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +#define PCI_DEVICE_ID_IOH_EPORT 0x3420 /* D0:F0 express mode */ +#define PCI_DEVICE_ID_IOH_REV 0x2 +#define IOH_EP_SSVID_OFFSET 0x40 +#define IOH_EP_SSVID_SVID PCI_VENDOR_ID_INTEL +#define IOH_EP_SSVID_SSID 0 +#define IOH_EP_MSI_OFFSET 0x60 +#define IOH_EP_MSI_SUPPORTED_FLAGS PCI_MSI_FLAGS_MASKBIT +#define IOH_EP_MSI_NR_VECTOR 2 +#define IOH_EP_EXP_OFFSET 0x90 +#define IOH_EP_AER_OFFSET 0x100 + +/* + * If two MSI vector are allocated, Advanced Error Interrupt Message Number + * is 1. otherwise 0. + * 17.12.5.10 RPERRSTS, 32:27 bit Advanced Error Interrupt Message Number. + */ +static uint8_t ioh3420_aer_vector(const PCIDevice *d) +{ + switch (msi_nr_vectors_allocated(d)) { + case 1: + return 0; + case 2: + return 1; + case 4: + case 8: + case 16: + case 32: + default: + break; + } + abort(); + return 0; +} + +static int ioh3420_interrupts_init(PCIDevice *d, Error **errp) +{ + int rc; + + rc = msi_init(d, IOH_EP_MSI_OFFSET, IOH_EP_MSI_NR_VECTOR, + IOH_EP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT, + IOH_EP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT, + errp); + if (rc < 0) { + assert(rc == -ENOTSUP); + } + + return rc; +} + +static void ioh3420_interrupts_uninit(PCIDevice *d) +{ + msi_uninit(d); +} + +static const VMStateDescription vmstate_ioh3420 = { + .name = "ioh-3240-express-root-port", + .priority = MIG_PRI_PCI_BUS, + .version_id = 1, + .minimum_version_id = 1, + .post_load = pcie_cap_slot_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), + VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, + PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), + VMSTATE_END_OF_LIST() + } +}; + +static void ioh3420_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); + + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_IOH_EPORT; + k->revision = PCI_DEVICE_ID_IOH_REV; + dc->desc = "Intel IOH device id 3420 PCIE Root Port"; + dc->vmsd = &vmstate_ioh3420; + rpc->aer_vector = ioh3420_aer_vector; + rpc->interrupts_init = ioh3420_interrupts_init; + rpc->interrupts_uninit = ioh3420_interrupts_uninit; + rpc->exp_offset = IOH_EP_EXP_OFFSET; + rpc->aer_offset = IOH_EP_AER_OFFSET; + rpc->ssvid_offset = IOH_EP_SSVID_OFFSET; + rpc->ssid = IOH_EP_SSVID_SSID; +} + +static const TypeInfo ioh3420_info = { + .name = "ioh3420", + .parent = TYPE_PCIE_ROOT_PORT, + .class_init = ioh3420_class_init, +}; + +static void ioh3420_register_types(void) +{ + type_register_static(&ioh3420_info); +} + +type_init(ioh3420_register_types) diff --git a/hw/pci-bridge/meson.build b/hw/pci-bridge/meson.build new file mode 100644 index 000000000..daab8acf2 --- /dev/null +++ b/hw/pci-bridge/meson.build @@ -0,0 +1,14 @@ +pci_ss = ss.source_set() +pci_ss.add(files('pci_bridge_dev.c')) +pci_ss.add(when: 'CONFIG_I82801B11', if_true: files('i82801b11.c')) +pci_ss.add(when: 'CONFIG_IOH3420', if_true: files('ioh3420.c')) +pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c', 'pcie_pci_bridge.c')) +pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c')) +pci_ss.add(when: 'CONFIG_XIO3130', if_true: files('xio3130_upstream.c', 'xio3130_downstream.c')) + +# NewWorld PowerMac +pci_ss.add(when: 'CONFIG_DEC_PCI', if_true: files('dec.c')) +# Sun4u +pci_ss.add(when: 'CONFIG_SIMBA', if_true: files('simba.c')) + +softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) diff --git a/hw/pci-bridge/pci_bridge_dev.c b/hw/pci-bridge/pci_bridge_dev.c new file mode 100644 index 000000000..657a06ddb --- /dev/null +++ b/hw/pci-bridge/pci_bridge_dev.c @@ -0,0 +1,308 @@ +/* + * Standard PCI Bridge Device + * + * Copyright (c) 2011 Red Hat Inc. Author: Michael S. Tsirkin + * + * http://www.pcisig.com/specifications/conventional/pci_to_pci_bridge_architecture/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_ids.h" +#include "hw/pci/msi.h" +#include "hw/pci/shpc.h" +#include "hw/pci/slotid_cap.h" +#include "hw/qdev-properties.h" +#include "exec/memory.h" +#include "hw/pci/pci_bus.h" +#include "hw/hotplug.h" +#include "qom/object.h" + +#define TYPE_PCI_BRIDGE_DEV "pci-bridge" +#define TYPE_PCI_BRIDGE_SEAT_DEV "pci-bridge-seat" +OBJECT_DECLARE_SIMPLE_TYPE(PCIBridgeDev, PCI_BRIDGE_DEV) + +struct PCIBridgeDev { + /*< private >*/ + PCIBridge parent_obj; + /*< public >*/ + + MemoryRegion bar; + uint8_t chassis_nr; +#define PCI_BRIDGE_DEV_F_SHPC_REQ 0 + uint32_t flags; + + OnOffAuto msi; + + /* additional resources to reserve */ + PCIResReserve res_reserve; +}; + +static void pci_bridge_dev_realize(PCIDevice *dev, Error **errp) +{ + PCIBridge *br = PCI_BRIDGE(dev); + PCIBridgeDev *bridge_dev = PCI_BRIDGE_DEV(dev); + int err; + Error *local_err = NULL; + + pci_bridge_initfn(dev, TYPE_PCI_BUS); + + if (bridge_dev->flags & (1 << PCI_BRIDGE_DEV_F_SHPC_REQ)) { + dev->config[PCI_INTERRUPT_PIN] = 0x1; + memory_region_init(&bridge_dev->bar, OBJECT(dev), "shpc-bar", + shpc_bar_size(dev)); + err = shpc_init(dev, &br->sec_bus, &bridge_dev->bar, 0, errp); + if (err) { + goto shpc_error; + } + } else { + /* MSI is not applicable without SHPC */ + bridge_dev->msi = ON_OFF_AUTO_OFF; + } + + err = slotid_cap_init(dev, 0, bridge_dev->chassis_nr, 0, errp); + if (err) { + goto slotid_error; + } + + if (bridge_dev->msi != ON_OFF_AUTO_OFF) { + /* it means SHPC exists, because MSI is needed by SHPC */ + + err = msi_init(dev, 0, 1, true, true, &local_err); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error */ + assert(!err || err == -ENOTSUP); + if (err && bridge_dev->msi == ON_OFF_AUTO_ON) { + /* Can't satisfy user's explicit msi=on request, fail */ + error_append_hint(&local_err, "You have to use msi=auto (default) " + "or msi=off with this machine type.\n"); + error_propagate(errp, local_err); + goto msi_error; + } + assert(!local_err || bridge_dev->msi == ON_OFF_AUTO_AUTO); + /* With msi=auto, we fall back to MSI off silently */ + error_free(local_err); + } + + err = pci_bridge_qemu_reserve_cap_init(dev, 0, + bridge_dev->res_reserve, errp); + if (err) { + goto cap_error; + } + + if (shpc_present(dev)) { + /* TODO: spec recommends using 64 bit prefetcheable BAR. + * Check whether that works well. */ + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, &bridge_dev->bar); + } + return; + +cap_error: + msi_uninit(dev); +msi_error: + slotid_cap_cleanup(dev); +slotid_error: + if (shpc_present(dev)) { + shpc_cleanup(dev, &bridge_dev->bar); + } +shpc_error: + pci_bridge_exitfn(dev); +} + +static void pci_bridge_dev_exitfn(PCIDevice *dev) +{ + PCIBridgeDev *bridge_dev = PCI_BRIDGE_DEV(dev); + + pci_del_capability(dev, PCI_CAP_ID_VNDR, sizeof(PCIBridgeQemuCap)); + if (msi_present(dev)) { + msi_uninit(dev); + } + slotid_cap_cleanup(dev); + if (shpc_present(dev)) { + shpc_cleanup(dev, &bridge_dev->bar); + } + pci_bridge_exitfn(dev); +} + +static void pci_bridge_dev_instance_finalize(Object *obj) +{ + /* this function is idempotent and handles (PCIDevice.shpc == NULL) */ + shpc_free(PCI_DEVICE(obj)); +} + +static void pci_bridge_dev_write_config(PCIDevice *d, + uint32_t address, uint32_t val, int len) +{ + pci_bridge_write_config(d, address, val, len); + if (msi_present(d)) { + msi_write_config(d, address, val, len); + } + if (shpc_present(d)) { + shpc_cap_write_config(d, address, val, len); + } +} + +static void qdev_pci_bridge_dev_reset(DeviceState *qdev) +{ + PCIDevice *dev = PCI_DEVICE(qdev); + + pci_bridge_reset(qdev); + if (shpc_present(dev)) { + shpc_reset(dev); + } +} + +static Property pci_bridge_dev_properties[] = { + /* Note: 0 is not a legal chassis number. */ + DEFINE_PROP_UINT8(PCI_BRIDGE_DEV_PROP_CHASSIS_NR, PCIBridgeDev, chassis_nr, + 0), + DEFINE_PROP_ON_OFF_AUTO(PCI_BRIDGE_DEV_PROP_MSI, PCIBridgeDev, msi, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_BIT(PCI_BRIDGE_DEV_PROP_SHPC, PCIBridgeDev, flags, + PCI_BRIDGE_DEV_F_SHPC_REQ, true), + DEFINE_PROP_UINT32("bus-reserve", PCIBridgeDev, + res_reserve.bus, -1), + DEFINE_PROP_SIZE("io-reserve", PCIBridgeDev, + res_reserve.io, -1), + DEFINE_PROP_SIZE("mem-reserve", PCIBridgeDev, + res_reserve.mem_non_pref, -1), + DEFINE_PROP_SIZE("pref32-reserve", PCIBridgeDev, + res_reserve.mem_pref_32, -1), + DEFINE_PROP_SIZE("pref64-reserve", PCIBridgeDev, + res_reserve.mem_pref_64, -1), + + DEFINE_PROP_END_OF_LIST(), +}; + +static bool pci_device_shpc_present(void *opaque, int version_id) +{ + PCIDevice *dev = opaque; + + return shpc_present(dev); +} + +static const VMStateDescription pci_bridge_dev_vmstate = { + .name = "pci_bridge", + .priority = MIG_PRI_PCI_BUS, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), + SHPC_VMSTATE(shpc, PCIDevice, pci_device_shpc_present), + VMSTATE_END_OF_LIST() + } +}; + +void pci_bridge_dev_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev); + + if (!shpc_present(pci_hotplug_dev)) { + error_setg(errp, "standard hotplug controller has been disabled for " + "this %s", object_get_typename(OBJECT(hotplug_dev))); + return; + } + shpc_device_plug_cb(hotplug_dev, dev, errp); +} + +void pci_bridge_dev_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev); + + g_assert(shpc_present(pci_hotplug_dev)); + shpc_device_unplug_cb(hotplug_dev, dev, errp); +} + +void pci_bridge_dev_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev); + + if (!shpc_present(pci_hotplug_dev)) { + error_setg(errp, "standard hotplug controller has been disabled for " + "this %s", object_get_typename(OBJECT(hotplug_dev))); + return; + } + shpc_device_unplug_request_cb(hotplug_dev, dev, errp); +} + +static void pci_bridge_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + + k->realize = pci_bridge_dev_realize; + k->exit = pci_bridge_dev_exitfn; + k->config_write = pci_bridge_dev_write_config; + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_BRIDGE; + k->class_id = PCI_CLASS_BRIDGE_PCI; + k->is_bridge = true; + dc->desc = "Standard PCI Bridge"; + dc->reset = qdev_pci_bridge_dev_reset; + device_class_set_props(dc, pci_bridge_dev_properties); + dc->vmsd = &pci_bridge_dev_vmstate; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + hc->plug = pci_bridge_dev_plug_cb; + hc->unplug = pci_bridge_dev_unplug_cb; + hc->unplug_request = pci_bridge_dev_unplug_request_cb; +} + +static const TypeInfo pci_bridge_dev_info = { + .name = TYPE_PCI_BRIDGE_DEV, + .parent = TYPE_PCI_BRIDGE, + .instance_size = sizeof(PCIBridgeDev), + .class_init = pci_bridge_dev_class_init, + .instance_finalize = pci_bridge_dev_instance_finalize, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + } +}; + +/* + * Multiseat bridge. Same as the standard pci bridge, only with a + * different pci id, so we can match it easily in the guest for + * automagic multiseat configuration. See docs/multiseat.txt for more. + */ +static void pci_bridge_dev_seat_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->device_id = PCI_DEVICE_ID_REDHAT_BRIDGE_SEAT; + dc->desc = "Standard PCI Bridge (multiseat)"; +} + +static const TypeInfo pci_bridge_dev_seat_info = { + .name = TYPE_PCI_BRIDGE_SEAT_DEV, + .parent = TYPE_PCI_BRIDGE_DEV, + .instance_size = sizeof(PCIBridgeDev), + .class_init = pci_bridge_dev_seat_class_init, +}; + +static void pci_bridge_dev_register(void) +{ + type_register_static(&pci_bridge_dev_info); + type_register_static(&pci_bridge_dev_seat_info); +} + +type_init(pci_bridge_dev_register); diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c new file mode 100644 index 000000000..10e6e7c2a --- /dev/null +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -0,0 +1,385 @@ +/* + * PCI Expander Bridge Device Emulation + * + * Copyright (C) 2015 Red Hat Inc + * + * Authors: + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_host.h" +#include "hw/qdev-properties.h" +#include "hw/pci/pci_bridge.h" +#include "qemu/range.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "sysemu/numa.h" +#include "hw/boards.h" +#include "qom/object.h" + +#define TYPE_PXB_BUS "pxb-bus" +typedef struct PXBBus PXBBus; +DECLARE_INSTANCE_CHECKER(PXBBus, PXB_BUS, + TYPE_PXB_BUS) + +#define TYPE_PXB_PCIE_BUS "pxb-pcie-bus" +DECLARE_INSTANCE_CHECKER(PXBBus, PXB_PCIE_BUS, + TYPE_PXB_PCIE_BUS) + +struct PXBBus { + /*< private >*/ + PCIBus parent_obj; + /*< public >*/ + + char bus_path[8]; +}; + +#define TYPE_PXB_DEVICE "pxb" +typedef struct PXBDev PXBDev; +DECLARE_INSTANCE_CHECKER(PXBDev, PXB_DEV, + TYPE_PXB_DEVICE) + +#define TYPE_PXB_PCIE_DEVICE "pxb-pcie" +DECLARE_INSTANCE_CHECKER(PXBDev, PXB_PCIE_DEV, + TYPE_PXB_PCIE_DEVICE) + +struct PXBDev { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + uint8_t bus_nr; + uint16_t numa_node; + bool bypass_iommu; +}; + +static PXBDev *convert_to_pxb(PCIDevice *dev) +{ + return pci_bus_is_express(pci_get_bus(dev)) + ? PXB_PCIE_DEV(dev) : PXB_DEV(dev); +} + +static GList *pxb_dev_list; + +#define TYPE_PXB_HOST "pxb-host" + +static int pxb_bus_num(PCIBus *bus) +{ + PXBDev *pxb = convert_to_pxb(bus->parent_dev); + + return pxb->bus_nr; +} + +static uint16_t pxb_bus_numa_node(PCIBus *bus) +{ + PXBDev *pxb = convert_to_pxb(bus->parent_dev); + + return pxb->numa_node; +} + +static void pxb_bus_class_init(ObjectClass *class, void *data) +{ + PCIBusClass *pbc = PCI_BUS_CLASS(class); + + pbc->bus_num = pxb_bus_num; + pbc->numa_node = pxb_bus_numa_node; +} + +static const TypeInfo pxb_bus_info = { + .name = TYPE_PXB_BUS, + .parent = TYPE_PCI_BUS, + .instance_size = sizeof(PXBBus), + .class_init = pxb_bus_class_init, +}; + +static const TypeInfo pxb_pcie_bus_info = { + .name = TYPE_PXB_PCIE_BUS, + .parent = TYPE_PCIE_BUS, + .instance_size = sizeof(PXBBus), + .class_init = pxb_bus_class_init, +}; + +static const char *pxb_host_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + PXBBus *bus = pci_bus_is_express(rootbus) ? + PXB_PCIE_BUS(rootbus) : PXB_BUS(rootbus); + + snprintf(bus->bus_path, 8, "0000:%02x", pxb_bus_num(rootbus)); + return bus->bus_path; +} + +static char *pxb_host_ofw_unit_address(const SysBusDevice *dev) +{ + const PCIHostState *pxb_host; + const PCIBus *pxb_bus; + const PXBDev *pxb_dev; + int position; + const DeviceState *pxb_dev_base; + const PCIHostState *main_host; + const SysBusDevice *main_host_sbd; + + pxb_host = PCI_HOST_BRIDGE(dev); + pxb_bus = pxb_host->bus; + pxb_dev = convert_to_pxb(pxb_bus->parent_dev); + position = g_list_index(pxb_dev_list, pxb_dev); + assert(position >= 0); + + pxb_dev_base = DEVICE(pxb_dev); + main_host = PCI_HOST_BRIDGE(pxb_dev_base->parent_bus->parent); + main_host_sbd = SYS_BUS_DEVICE(main_host); + + if (main_host_sbd->num_mmio > 0) { + return g_strdup_printf(TARGET_FMT_plx ",%x", + main_host_sbd->mmio[0].addr, position + 1); + } + if (main_host_sbd->num_pio > 0) { + return g_strdup_printf("i%04x,%x", + main_host_sbd->pio[0], position + 1); + } + return NULL; +} + +static void pxb_host_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(class); + SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(class); + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(class); + + dc->fw_name = "pci"; + /* Reason: Internal part of the pxb/pxb-pcie device, not usable by itself */ + dc->user_creatable = false; + sbc->explicit_ofw_unit_address = pxb_host_ofw_unit_address; + hc->root_bus_path = pxb_host_root_bus_path; +} + +static const TypeInfo pxb_host_info = { + .name = TYPE_PXB_HOST, + .parent = TYPE_PCI_HOST_BRIDGE, + .class_init = pxb_host_class_init, +}; + +/* + * Registers the PXB bus as a child of pci host root bus. + */ +static void pxb_register_bus(PCIDevice *dev, PCIBus *pxb_bus, Error **errp) +{ + PCIBus *bus = pci_get_bus(dev); + int pxb_bus_num = pci_bus_num(pxb_bus); + + if (bus->parent_dev) { + error_setg(errp, "PXB devices can be attached only to root bus"); + return; + } + + QLIST_FOREACH(bus, &bus->child, sibling) { + if (pci_bus_num(bus) == pxb_bus_num) { + error_setg(errp, "Bus %d is already in use", pxb_bus_num); + return; + } + } + QLIST_INSERT_HEAD(&pci_get_bus(dev)->child, pxb_bus, sibling); +} + +static int pxb_map_irq_fn(PCIDevice *pci_dev, int pin) +{ + PCIDevice *pxb = pci_get_bus(pci_dev)->parent_dev; + + /* + * The bios does not index the pxb slot number when + * it computes the IRQ because it resides on bus 0 + * and not on the current bus. + * However QEMU routes the irq through bus 0 and adds + * the pxb slot to the IRQ computation of the PXB + * device. + * + * Synchronize between bios and QEMU by canceling + * pxb's effect. + */ + return pin - PCI_SLOT(pxb->devfn); +} + +static gint pxb_compare(gconstpointer a, gconstpointer b) +{ + const PXBDev *pxb_a = a, *pxb_b = b; + + return pxb_a->bus_nr < pxb_b->bus_nr ? -1 : + pxb_a->bus_nr > pxb_b->bus_nr ? 1 : + 0; +} + +static void pxb_dev_realize_common(PCIDevice *dev, bool pcie, Error **errp) +{ + PXBDev *pxb = convert_to_pxb(dev); + DeviceState *ds, *bds = NULL; + PCIBus *bus; + const char *dev_name = NULL; + Error *local_err = NULL; + MachineState *ms = MACHINE(qdev_get_machine()); + + if (ms->numa_state == NULL) { + error_setg(errp, "NUMA is not supported by this machine-type"); + return; + } + + if (pxb->numa_node != NUMA_NODE_UNASSIGNED && + pxb->numa_node >= ms->numa_state->num_nodes) { + error_setg(errp, "Illegal numa node %d", pxb->numa_node); + return; + } + + if (dev->qdev.id && *dev->qdev.id) { + dev_name = dev->qdev.id; + } + + ds = qdev_new(TYPE_PXB_HOST); + if (pcie) { + bus = pci_root_bus_new(ds, dev_name, NULL, NULL, 0, TYPE_PXB_PCIE_BUS); + } else { + bus = pci_root_bus_new(ds, "pxb-internal", NULL, NULL, 0, TYPE_PXB_BUS); + bds = qdev_new("pci-bridge"); + bds->id = g_strdup(dev_name); + qdev_prop_set_uint8(bds, PCI_BRIDGE_DEV_PROP_CHASSIS_NR, pxb->bus_nr); + qdev_prop_set_bit(bds, PCI_BRIDGE_DEV_PROP_SHPC, false); + } + + bus->parent_dev = dev; + bus->address_space_mem = pci_get_bus(dev)->address_space_mem; + bus->address_space_io = pci_get_bus(dev)->address_space_io; + bus->map_irq = pxb_map_irq_fn; + + PCI_HOST_BRIDGE(ds)->bus = bus; + PCI_HOST_BRIDGE(ds)->bypass_iommu = pxb->bypass_iommu; + + pxb_register_bus(dev, bus, &local_err); + if (local_err) { + error_propagate(errp, local_err); + goto err_register_bus; + } + + sysbus_realize_and_unref(SYS_BUS_DEVICE(ds), &error_fatal); + if (bds) { + qdev_realize_and_unref(bds, &bus->qbus, &error_fatal); + } + + pci_word_test_and_set_mask(dev->config + PCI_STATUS, + PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK); + pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_HOST); + + pxb_dev_list = g_list_insert_sorted(pxb_dev_list, pxb, pxb_compare); + return; + +err_register_bus: + object_unref(OBJECT(bds)); + object_unparent(OBJECT(bus)); + object_unref(OBJECT(ds)); +} + +static void pxb_dev_realize(PCIDevice *dev, Error **errp) +{ + if (pci_bus_is_express(pci_get_bus(dev))) { + error_setg(errp, "pxb devices cannot reside on a PCIe bus"); + return; + } + + pxb_dev_realize_common(dev, false, errp); +} + +static void pxb_dev_exitfn(PCIDevice *pci_dev) +{ + PXBDev *pxb = convert_to_pxb(pci_dev); + + pxb_dev_list = g_list_remove(pxb_dev_list, pxb); +} + +static Property pxb_dev_properties[] = { + /* Note: 0 is not a legal PXB bus number. */ + DEFINE_PROP_UINT8("bus_nr", PXBDev, bus_nr, 0), + DEFINE_PROP_UINT16("numa_node", PXBDev, numa_node, NUMA_NODE_UNASSIGNED), + DEFINE_PROP_BOOL("bypass_iommu", PXBDev, bypass_iommu, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pxb_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pxb_dev_realize; + k->exit = pxb_dev_exitfn; + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_PXB; + k->class_id = PCI_CLASS_BRIDGE_HOST; + + dc->desc = "PCI Expander Bridge"; + device_class_set_props(dc, pxb_dev_properties); + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo pxb_dev_info = { + .name = TYPE_PXB_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PXBDev), + .class_init = pxb_dev_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void pxb_pcie_dev_realize(PCIDevice *dev, Error **errp) +{ + if (!pci_bus_is_express(pci_get_bus(dev))) { + error_setg(errp, "pxb-pcie devices cannot reside on a PCI bus"); + return; + } + + pxb_dev_realize_common(dev, true, errp); +} + +static void pxb_pcie_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pxb_pcie_dev_realize; + k->exit = pxb_dev_exitfn; + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_PXB_PCIE; + k->class_id = PCI_CLASS_BRIDGE_HOST; + + dc->desc = "PCI Express Expander Bridge"; + device_class_set_props(dc, pxb_dev_properties); + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo pxb_pcie_dev_info = { + .name = TYPE_PXB_PCIE_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PXBDev), + .class_init = pxb_pcie_dev_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void pxb_register_types(void) +{ + type_register_static(&pxb_bus_info); + type_register_static(&pxb_pcie_bus_info); + type_register_static(&pxb_host_info); + type_register_static(&pxb_dev_info); + type_register_static(&pxb_pcie_dev_info); +} + +type_init(pxb_register_types) diff --git a/hw/pci-bridge/pcie_pci_bridge.c b/hw/pci-bridge/pcie_pci_bridge.c new file mode 100644 index 000000000..1cd917a45 --- /dev/null +++ b/hw/pci-bridge/pcie_pci_bridge.c @@ -0,0 +1,180 @@ +/* + * QEMU Generic PCIE-PCI Bridge + * + * Copyright (c) 2017 Aleksandr Bezzubikov + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/msi.h" +#include "hw/pci/shpc.h" +#include "hw/pci/slotid_cap.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +struct PCIEPCIBridge { + /*< private >*/ + PCIBridge parent_obj; + + OnOffAuto msi; + MemoryRegion shpc_bar; + /*< public >*/ +}; + +#define TYPE_PCIE_PCI_BRIDGE_DEV "pcie-pci-bridge" +OBJECT_DECLARE_SIMPLE_TYPE(PCIEPCIBridge, PCIE_PCI_BRIDGE_DEV) + +static void pcie_pci_bridge_realize(PCIDevice *d, Error **errp) +{ + PCIBridge *br = PCI_BRIDGE(d); + PCIEPCIBridge *pcie_br = PCIE_PCI_BRIDGE_DEV(d); + int rc, pos; + + pci_bridge_initfn(d, TYPE_PCI_BUS); + + d->config[PCI_INTERRUPT_PIN] = 0x1; + memory_region_init(&pcie_br->shpc_bar, OBJECT(d), "shpc-bar", + shpc_bar_size(d)); + rc = shpc_init(d, &br->sec_bus, &pcie_br->shpc_bar, 0, errp); + if (rc) { + goto error; + } + + rc = pcie_cap_init(d, 0, PCI_EXP_TYPE_PCI_BRIDGE, 0, errp); + if (rc < 0) { + goto cap_error; + } + + pos = pci_add_capability(d, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF, errp); + if (pos < 0) { + goto pm_error; + } + d->exp.pm_cap = pos; + pci_set_word(d->config + pos + PCI_PM_PMC, 0x3); + + pcie_cap_arifwd_init(d); + pcie_cap_deverr_init(d); + + rc = pcie_aer_init(d, PCI_ERR_VER, 0x100, PCI_ERR_SIZEOF, errp); + if (rc < 0) { + goto aer_error; + } + + Error *local_err = NULL; + if (pcie_br->msi != ON_OFF_AUTO_OFF) { + rc = msi_init(d, 0, 1, true, true, &local_err); + if (rc < 0) { + assert(rc == -ENOTSUP); + if (pcie_br->msi != ON_OFF_AUTO_ON) { + error_free(local_err); + } else { + /* failed to satisfy user's explicit request for MSI */ + error_propagate(errp, local_err); + goto msi_error; + } + } + } + pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, &pcie_br->shpc_bar); + return; + +msi_error: + pcie_aer_exit(d); +aer_error: +pm_error: + pcie_cap_exit(d); +cap_error: + shpc_cleanup(d, &pcie_br->shpc_bar); +error: + pci_bridge_exitfn(d); +} + +static void pcie_pci_bridge_exit(PCIDevice *d) +{ + PCIEPCIBridge *bridge_dev = PCIE_PCI_BRIDGE_DEV(d); + pcie_cap_exit(d); + shpc_cleanup(d, &bridge_dev->shpc_bar); + pci_bridge_exitfn(d); +} + +static void pcie_pci_bridge_reset(DeviceState *qdev) +{ + PCIDevice *d = PCI_DEVICE(qdev); + pci_bridge_reset(qdev); + if (msi_present(d)) { + msi_reset(d); + } + shpc_reset(d); +} + +static void pcie_pci_bridge_write_config(PCIDevice *d, + uint32_t address, uint32_t val, int len) +{ + pci_bridge_write_config(d, address, val, len); + if (msi_present(d)) { + msi_write_config(d, address, val, len); + } + shpc_cap_write_config(d, address, val, len); +} + +static Property pcie_pci_bridge_dev_properties[] = { + DEFINE_PROP_ON_OFF_AUTO("msi", PCIEPCIBridge, msi, ON_OFF_AUTO_AUTO), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription pcie_pci_bridge_dev_vmstate = { + .name = TYPE_PCIE_PCI_BRIDGE_DEV, + .priority = MIG_PRI_PCI_BUS, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), + SHPC_VMSTATE(shpc, PCIDevice, NULL), + VMSTATE_END_OF_LIST() + } +}; + +static void pcie_pci_bridge_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + + k->is_bridge = true; + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_PCIE_BRIDGE; + k->realize = pcie_pci_bridge_realize; + k->exit = pcie_pci_bridge_exit; + k->config_write = pcie_pci_bridge_write_config; + dc->vmsd = &pcie_pci_bridge_dev_vmstate; + device_class_set_props(dc, pcie_pci_bridge_dev_properties); + dc->reset = &pcie_pci_bridge_reset; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + hc->plug = pci_bridge_dev_plug_cb; + hc->unplug = pci_bridge_dev_unplug_cb; + hc->unplug_request = pci_bridge_dev_unplug_request_cb; +} + +static const TypeInfo pcie_pci_bridge_info = { + .name = TYPE_PCIE_PCI_BRIDGE_DEV, + .parent = TYPE_PCI_BRIDGE, + .instance_size = sizeof(PCIEPCIBridge), + .class_init = pcie_pci_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { INTERFACE_PCIE_DEVICE }, + { }, + } +}; + +static void pciepci_register(void) +{ + type_register_static(&pcie_pci_bridge_info); +} + +type_init(pciepci_register); diff --git a/hw/pci-bridge/pcie_root_port.c b/hw/pci-bridge/pcie_root_port.c new file mode 100644 index 000000000..f1cfe9d14 --- /dev/null +++ b/hw/pci-bridge/pcie_root_port.c @@ -0,0 +1,198 @@ +/* + * Base class for PCI Express Root Ports + * + * Copyright (C) 2017 Red Hat Inc + * + * Authors: + * Marcel Apfelbaum + * + * Most of the code was migrated from hw/pci-bridge/ioh3420. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/pcie_port.h" +#include "hw/qdev-properties.h" + +static void rp_aer_vector_update(PCIDevice *d) +{ + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(d); + + if (rpc->aer_vector) { + pcie_aer_root_set_vector(d, rpc->aer_vector(d)); + } +} + +static void rp_write_config(PCIDevice *d, uint32_t address, + uint32_t val, int len) +{ + uint32_t root_cmd = + pci_get_long(d->config + d->exp.aer_cap + PCI_ERR_ROOT_COMMAND); + uint16_t slt_ctl, slt_sta; + + pcie_cap_slot_get(d, &slt_ctl, &slt_sta); + + pci_bridge_write_config(d, address, val, len); + rp_aer_vector_update(d); + pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len); + pcie_aer_write_config(d, address, val, len); + pcie_aer_root_write_config(d, address, val, len, root_cmd); +} + +static void rp_reset(DeviceState *qdev) +{ + PCIDevice *d = PCI_DEVICE(qdev); + + rp_aer_vector_update(d); + pcie_cap_root_reset(d); + pcie_cap_deverr_reset(d); + pcie_cap_slot_reset(d); + pcie_cap_arifwd_reset(d); + pcie_acs_reset(d); + pcie_aer_root_reset(d); + pci_bridge_reset(qdev); + pci_bridge_disable_base_limit(d); +} + +static void rp_realize(PCIDevice *d, Error **errp) +{ + PCIEPort *p = PCIE_PORT(d); + PCIESlot *s = PCIE_SLOT(d); + PCIDeviceClass *dc = PCI_DEVICE_GET_CLASS(d); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(d); + int rc; + + pci_config_set_interrupt_pin(d->config, 1); + pci_bridge_initfn(d, TYPE_PCIE_BUS); + pcie_port_init_reg(d); + + rc = pci_bridge_ssvid_init(d, rpc->ssvid_offset, dc->vendor_id, + rpc->ssid, errp); + if (rc < 0) { + error_append_hint(errp, "Can't init SSV ID, error %d\n", rc); + goto err_bridge; + } + + if (rpc->interrupts_init) { + rc = rpc->interrupts_init(d, errp); + if (rc < 0) { + goto err_bridge; + } + } + + rc = pcie_cap_init(d, rpc->exp_offset, PCI_EXP_TYPE_ROOT_PORT, + p->port, errp); + if (rc < 0) { + error_append_hint(errp, "Can't add Root Port capability, " + "error %d\n", rc); + goto err_int; + } + + pcie_cap_arifwd_init(d); + pcie_cap_deverr_init(d); + pcie_cap_slot_init(d, s); + pcie_cap_root_init(d); + + pcie_chassis_create(s->chassis); + rc = pcie_chassis_add_slot(s); + if (rc < 0) { + error_setg(errp, "Can't add chassis slot, error %d", rc); + goto err_pcie_cap; + } + + rc = pcie_aer_init(d, PCI_ERR_VER, rpc->aer_offset, + PCI_ERR_SIZEOF, errp); + if (rc < 0) { + goto err; + } + pcie_aer_root_init(d); + rp_aer_vector_update(d); + + if (rpc->acs_offset && !s->disable_acs) { + pcie_acs_init(d, rpc->acs_offset); + } + return; + +err: + pcie_chassis_del_slot(s); +err_pcie_cap: + pcie_cap_exit(d); +err_int: + if (rpc->interrupts_uninit) { + rpc->interrupts_uninit(d); + } +err_bridge: + pci_bridge_exitfn(d); +} + +static void rp_exit(PCIDevice *d) +{ + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(d); + PCIESlot *s = PCIE_SLOT(d); + + pcie_aer_exit(d); + pcie_chassis_del_slot(s); + pcie_cap_exit(d); + if (rpc->interrupts_uninit) { + rpc->interrupts_uninit(d); + } + pci_bridge_exitfn(d); +} + +static Property rp_props[] = { + DEFINE_PROP_BIT(COMPAT_PROP_PCP, PCIDevice, cap_present, + QEMU_PCIE_SLTCAP_PCP_BITNR, true), + DEFINE_PROP_BOOL("disable-acs", PCIESlot, disable_acs, false), + DEFINE_PROP_END_OF_LIST() +}; + +static void rp_instance_post_init(Object *obj) +{ + PCIESlot *s = PCIE_SLOT(obj); + + if (!s->speed) { + s->speed = QEMU_PCI_EXP_LNK_2_5GT; + } + + if (!s->width) { + s->width = QEMU_PCI_EXP_LNK_X1; + } +} + +static void rp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->is_bridge = true; + k->config_write = rp_write_config; + k->realize = rp_realize; + k->exit = rp_exit; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->reset = rp_reset; + device_class_set_props(dc, rp_props); +} + +static const TypeInfo rp_info = { + .name = TYPE_PCIE_ROOT_PORT, + .parent = TYPE_PCIE_SLOT, + .instance_post_init = rp_instance_post_init, + .class_init = rp_class_init, + .abstract = true, + .class_size = sizeof(PCIERootPortClass), + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void rp_register_types(void) +{ + type_register_static(&rp_info); +} + +type_init(rp_register_types) diff --git a/hw/pci-bridge/simba.c b/hw/pci-bridge/simba.c new file mode 100644 index 000000000..ba55ab193 --- /dev/null +++ b/hw/pci-bridge/simba.c @@ -0,0 +1,102 @@ +/* + * QEMU Simba PCI bridge + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2012,2013 Artyom Tarasenko + * Copyright (c) 2018 Mark Cave-Ayland + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" +#include "qemu/module.h" +#include "hw/pci-bridge/simba.h" + +/* + * Chipset docs: + * APB: "Advanced PCI Bridge (APB) User's Manual", + * http://www.sun.com/processors/manuals/805-1251.pdf + */ + +static void simba_pci_bridge_realize(PCIDevice *dev, Error **errp) +{ + /* + * command register: + * According to PCI bridge spec, after reset + * bus master bit is off + * memory space enable bit is off + * According to manual (805-1251.pdf). + * the reset value should be zero unless the boot pin is tied high + * (which is true) and thus it should be PCI_COMMAND_MEMORY. + */ + SimbaPCIBridge *br = SIMBA_PCI_BRIDGE(dev); + + pci_bridge_initfn(dev, TYPE_PCI_BUS); + + pci_set_word(dev->config + PCI_COMMAND, PCI_COMMAND_MEMORY); + pci_set_word(dev->config + PCI_STATUS, + PCI_STATUS_FAST_BACK | PCI_STATUS_66MHZ | + PCI_STATUS_DEVSEL_MEDIUM); + + /* Allow 32-bit IO addresses */ + pci_set_word(dev->config + PCI_IO_BASE, PCI_IO_RANGE_TYPE_32); + pci_set_word(dev->config + PCI_IO_LIMIT, PCI_IO_RANGE_TYPE_32); + pci_set_word(dev->wmask + PCI_IO_BASE_UPPER16, 0xffff); + pci_set_word(dev->wmask + PCI_IO_LIMIT_UPPER16, 0xffff); + + pci_bridge_update_mappings(PCI_BRIDGE(br)); +} + +static void simba_pci_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = simba_pci_bridge_realize; + k->exit = pci_bridge_exitfn; + k->vendor_id = PCI_VENDOR_ID_SUN; + k->device_id = PCI_DEVICE_ID_SUN_SIMBA; + k->revision = 0x11; + k->config_write = pci_bridge_write_config; + k->is_bridge = true; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->reset = pci_bridge_reset; + dc->vmsd = &vmstate_pci_device; +} + +static const TypeInfo simba_pci_bridge_info = { + .name = TYPE_SIMBA_PCI_BRIDGE, + .parent = TYPE_PCI_BRIDGE, + .class_init = simba_pci_bridge_class_init, + .instance_size = sizeof(SimbaPCIBridge), + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void simba_register_types(void) +{ + type_register_static(&simba_pci_bridge_info); +} + +type_init(simba_register_types) diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c new file mode 100644 index 000000000..04aae72cd --- /dev/null +++ b/hw/pci-bridge/xio3130_downstream.c @@ -0,0 +1,190 @@ +/* + * x3130_downstream.c + * TI X3130 pci express downstream port switch + * + * Copyright (c) 2010 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci_ids.h" +#include "hw/pci/msi.h" +#include "hw/pci/pcie.h" +#include "hw/pci/pcie_port.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/module.h" + +#define PCI_DEVICE_ID_TI_XIO3130D 0x8233 /* downstream port */ +#define XIO3130_REVISION 0x1 +#define XIO3130_MSI_OFFSET 0x70 +#define XIO3130_MSI_SUPPORTED_FLAGS PCI_MSI_FLAGS_64BIT +#define XIO3130_MSI_NR_VECTOR 1 +#define XIO3130_SSVID_OFFSET 0x80 +#define XIO3130_SSVID_SVID 0 +#define XIO3130_SSVID_SSID 0 +#define XIO3130_EXP_OFFSET 0x90 +#define XIO3130_AER_OFFSET 0x100 + +static void xio3130_downstream_write_config(PCIDevice *d, uint32_t address, + uint32_t val, int len) +{ + uint16_t slt_ctl, slt_sta; + + pcie_cap_slot_get(d, &slt_ctl, &slt_sta); + pci_bridge_write_config(d, address, val, len); + pcie_cap_flr_write_config(d, address, val, len); + pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len); + pcie_aer_write_config(d, address, val, len); +} + +static void xio3130_downstream_reset(DeviceState *qdev) +{ + PCIDevice *d = PCI_DEVICE(qdev); + + pcie_cap_deverr_reset(d); + pcie_cap_slot_reset(d); + pcie_cap_arifwd_reset(d); + pci_bridge_reset(qdev); +} + +static void xio3130_downstream_realize(PCIDevice *d, Error **errp) +{ + PCIEPort *p = PCIE_PORT(d); + PCIESlot *s = PCIE_SLOT(d); + int rc; + + pci_bridge_initfn(d, TYPE_PCIE_BUS); + pcie_port_init_reg(d); + + rc = msi_init(d, XIO3130_MSI_OFFSET, XIO3130_MSI_NR_VECTOR, + XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT, + XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT, + errp); + if (rc < 0) { + assert(rc == -ENOTSUP); + goto err_bridge; + } + + rc = pci_bridge_ssvid_init(d, XIO3130_SSVID_OFFSET, + XIO3130_SSVID_SVID, XIO3130_SSVID_SSID, + errp); + if (rc < 0) { + goto err_bridge; + } + + rc = pcie_cap_init(d, XIO3130_EXP_OFFSET, PCI_EXP_TYPE_DOWNSTREAM, + p->port, errp); + if (rc < 0) { + goto err_msi; + } + pcie_cap_flr_init(d); + pcie_cap_deverr_init(d); + pcie_cap_slot_init(d, s); + pcie_cap_arifwd_init(d); + + pcie_chassis_create(s->chassis); + rc = pcie_chassis_add_slot(s); + if (rc < 0) { + error_setg(errp, "Can't add chassis slot, error %d", rc); + goto err_pcie_cap; + } + + rc = pcie_aer_init(d, PCI_ERR_VER, XIO3130_AER_OFFSET, + PCI_ERR_SIZEOF, errp); + if (rc < 0) { + goto err; + } + + return; + +err: + pcie_chassis_del_slot(s); +err_pcie_cap: + pcie_cap_exit(d); +err_msi: + msi_uninit(d); +err_bridge: + pci_bridge_exitfn(d); +} + +static void xio3130_downstream_exitfn(PCIDevice *d) +{ + PCIESlot *s = PCIE_SLOT(d); + + pcie_aer_exit(d); + pcie_chassis_del_slot(s); + pcie_cap_exit(d); + msi_uninit(d); + pci_bridge_exitfn(d); +} + +static Property xio3130_downstream_props[] = { + DEFINE_PROP_BIT(COMPAT_PROP_PCP, PCIDevice, cap_present, + QEMU_PCIE_SLTCAP_PCP_BITNR, true), + DEFINE_PROP_END_OF_LIST() +}; + +static const VMStateDescription vmstate_xio3130_downstream = { + .name = "xio3130-express-downstream-port", + .priority = MIG_PRI_PCI_BUS, + .version_id = 1, + .minimum_version_id = 1, + .post_load = pcie_cap_slot_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), + VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, + PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), + VMSTATE_END_OF_LIST() + } +}; + +static void xio3130_downstream_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->is_bridge = true; + k->config_write = xio3130_downstream_write_config; + k->realize = xio3130_downstream_realize; + k->exit = xio3130_downstream_exitfn; + k->vendor_id = PCI_VENDOR_ID_TI; + k->device_id = PCI_DEVICE_ID_TI_XIO3130D; + k->revision = XIO3130_REVISION; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->desc = "TI X3130 Downstream Port of PCI Express Switch"; + dc->reset = xio3130_downstream_reset; + dc->vmsd = &vmstate_xio3130_downstream; + device_class_set_props(dc, xio3130_downstream_props); +} + +static const TypeInfo xio3130_downstream_info = { + .name = "xio3130-downstream", + .parent = TYPE_PCIE_SLOT, + .class_init = xio3130_downstream_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void xio3130_downstream_register_types(void) +{ + type_register_static(&xio3130_downstream_info); +} + +type_init(xio3130_downstream_register_types) diff --git a/hw/pci-bridge/xio3130_upstream.c b/hw/pci-bridge/xio3130_upstream.c new file mode 100644 index 000000000..5cd3af4fb --- /dev/null +++ b/hw/pci-bridge/xio3130_upstream.c @@ -0,0 +1,159 @@ +/* + * xio3130_upstream.c + * TI X3130 pci express upstream port switch + * + * Copyright (c) 2010 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci_ids.h" +#include "hw/pci/msi.h" +#include "hw/pci/pcie.h" +#include "hw/pci/pcie_port.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +#define PCI_DEVICE_ID_TI_XIO3130U 0x8232 /* upstream port */ +#define XIO3130_REVISION 0x2 +#define XIO3130_MSI_OFFSET 0x70 +#define XIO3130_MSI_SUPPORTED_FLAGS PCI_MSI_FLAGS_64BIT +#define XIO3130_MSI_NR_VECTOR 1 +#define XIO3130_SSVID_OFFSET 0x80 +#define XIO3130_SSVID_SVID 0 +#define XIO3130_SSVID_SSID 0 +#define XIO3130_EXP_OFFSET 0x90 +#define XIO3130_AER_OFFSET 0x100 + +static void xio3130_upstream_write_config(PCIDevice *d, uint32_t address, + uint32_t val, int len) +{ + pci_bridge_write_config(d, address, val, len); + pcie_cap_flr_write_config(d, address, val, len); + pcie_aer_write_config(d, address, val, len); +} + +static void xio3130_upstream_reset(DeviceState *qdev) +{ + PCIDevice *d = PCI_DEVICE(qdev); + + pci_bridge_reset(qdev); + pcie_cap_deverr_reset(d); +} + +static void xio3130_upstream_realize(PCIDevice *d, Error **errp) +{ + PCIEPort *p = PCIE_PORT(d); + int rc; + + pci_bridge_initfn(d, TYPE_PCIE_BUS); + pcie_port_init_reg(d); + + rc = msi_init(d, XIO3130_MSI_OFFSET, XIO3130_MSI_NR_VECTOR, + XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT, + XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT, + errp); + if (rc < 0) { + assert(rc == -ENOTSUP); + goto err_bridge; + } + + rc = pci_bridge_ssvid_init(d, XIO3130_SSVID_OFFSET, + XIO3130_SSVID_SVID, XIO3130_SSVID_SSID, + errp); + if (rc < 0) { + goto err_bridge; + } + + rc = pcie_cap_init(d, XIO3130_EXP_OFFSET, PCI_EXP_TYPE_UPSTREAM, + p->port, errp); + if (rc < 0) { + goto err_msi; + } + pcie_cap_flr_init(d); + pcie_cap_deverr_init(d); + + rc = pcie_aer_init(d, PCI_ERR_VER, XIO3130_AER_OFFSET, + PCI_ERR_SIZEOF, errp); + if (rc < 0) { + goto err; + } + + return; + +err: + pcie_cap_exit(d); +err_msi: + msi_uninit(d); +err_bridge: + pci_bridge_exitfn(d); +} + +static void xio3130_upstream_exitfn(PCIDevice *d) +{ + pcie_aer_exit(d); + pcie_cap_exit(d); + msi_uninit(d); + pci_bridge_exitfn(d); +} + +static const VMStateDescription vmstate_xio3130_upstream = { + .name = "xio3130-express-upstream-port", + .priority = MIG_PRI_PCI_BUS, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj.parent_obj, PCIEPort), + VMSTATE_STRUCT(parent_obj.parent_obj.exp.aer_log, PCIEPort, 0, + vmstate_pcie_aer_log, PCIEAERLog), + VMSTATE_END_OF_LIST() + } +}; + +static void xio3130_upstream_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->is_bridge = true; + k->config_write = xio3130_upstream_write_config; + k->realize = xio3130_upstream_realize; + k->exit = xio3130_upstream_exitfn; + k->vendor_id = PCI_VENDOR_ID_TI; + k->device_id = PCI_DEVICE_ID_TI_XIO3130U; + k->revision = XIO3130_REVISION; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->desc = "TI X3130 Upstream Port of PCI Express Switch"; + dc->reset = xio3130_upstream_reset; + dc->vmsd = &vmstate_xio3130_upstream; +} + +static const TypeInfo xio3130_upstream_info = { + .name = "x3130-upstream", + .parent = TYPE_PCIE_PORT, + .class_init = xio3130_upstream_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void xio3130_upstream_register_types(void) +{ + type_register_static(&xio3130_upstream_info); +} + +type_init(xio3130_upstream_register_types) diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig new file mode 100644 index 000000000..2b5f7d58c --- /dev/null +++ b/hw/pci-host/Kconfig @@ -0,0 +1,79 @@ +config PAM + bool + +config XEN_IGD_PASSTHROUGH + bool + default y + depends on XEN && PCI_I440FX + +config RAVEN_PCI + bool + select PCI + select OR_IRQ + +config GRACKLE_PCI + select PCI + bool + +config UNIN_PCI + bool + select PCI + select DEC_PCI + select OPENPIC + +config PPCE500_PCI + select PCI + bool + +config VERSATILE_PCI + select PCI + bool + +config PCI_SABRE + select PCI + bool + +config PCI_I440FX + bool + select PCI + select PAM + +config PCI_EXPRESS_Q35 + bool + select PCI_EXPRESS + select PAM + +config PCI_EXPRESS_GENERIC_BRIDGE + bool + select PCI_EXPRESS + +config PCI_EXPRESS_XILINX + bool + select PCI_EXPRESS + +config PCI_EXPRESS_DESIGNWARE + bool + select PCI_EXPRESS + select MSI_NONBROKEN + +config PCI_BONITO + select PCI + select UNIMP + bool + +config PCI_POWERNV + select PCI_EXPRESS + select MSI_NONBROKEN + select PCIE_PORT + +config REMOTE_PCIHOST + bool + +config SH_PCI + bool + select PCI + +config MV64361 + bool + select PCI + select I8259 diff --git a/hw/pci-host/bonito.c b/hw/pci-host/bonito.c new file mode 100644 index 000000000..a57e81e3a --- /dev/null +++ b/hw/pci-host/bonito.c @@ -0,0 +1,819 @@ +/* + * bonito north bridge support + * + * Copyright (c) 2008 yajin (yajin@vm-kernel.org) + * Copyright (c) 2010 Huacai Chen (zltjiangshi@gmail.com) + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +/* + * fuloong 2e mini pc has a bonito north bridge. + */ + +/* + * what is the meaning of devfn in qemu and IDSEL in bonito northbridge? + * + * devfn pci_slot<<3 + funno + * one pci bus can have 32 devices and each device can have 8 functions. + * + * In bonito north bridge, pci slot = IDSEL bit - 12. + * For example, PCI_IDSEL_VIA686B = 17, + * pci slot = 17-12=5 + * + * so + * VT686B_FUN0's devfn = (5<<3)+0 + * VT686B_FUN1's devfn = (5<<3)+1 + * + * qemu also uses pci address for north bridge to access pci config register. + * bus_no [23:16] + * dev_no [15:11] + * fun_no [10:8] + * reg_no [7:2] + * + * so function bonito_sbridge_pciaddr for the translation from + * north bridge address to pci address. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/pci/pci.h" +#include "hw/irq.h" +#include "hw/mips/mips.h" +#include "hw/pci/pci_host.h" +#include "migration/vmstate.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "hw/misc/unimp.h" +#include "hw/registerfields.h" +#include "qom/object.h" +#include "trace.h" + +/* #define DEBUG_BONITO */ + +#ifdef DEBUG_BONITO +#define DPRINTF(fmt, ...) fprintf(stderr, "%s: " fmt, __func__, ##__VA_ARGS__) +#else +#define DPRINTF(fmt, ...) +#endif + +/* from linux soure code. include/asm-mips/mips-boards/bonito64.h*/ +#define BONITO_BOOT_BASE 0x1fc00000 +#define BONITO_BOOT_SIZE 0x00100000 +#define BONITO_BOOT_TOP (BONITO_BOOT_BASE + BONITO_BOOT_SIZE - 1) +#define BONITO_FLASH_BASE 0x1c000000 +#define BONITO_FLASH_SIZE 0x03000000 +#define BONITO_FLASH_TOP (BONITO_FLASH_BASE + BONITO_FLASH_SIZE - 1) +#define BONITO_SOCKET_BASE 0x1f800000 +#define BONITO_SOCKET_SIZE 0x00400000 +#define BONITO_SOCKET_TOP (BONITO_SOCKET_BASE + BONITO_SOCKET_SIZE - 1) +#define BONITO_REG_BASE 0x1fe00000 +#define BONITO_REG_SIZE 0x00040000 +#define BONITO_REG_TOP (BONITO_REG_BASE + BONITO_REG_SIZE - 1) +#define BONITO_DEV_BASE 0x1ff00000 +#define BONITO_DEV_SIZE 0x00100000 +#define BONITO_DEV_TOP (BONITO_DEV_BASE + BONITO_DEV_SIZE - 1) +#define BONITO_PCILO_BASE 0x10000000 +#define BONITO_PCILO_BASE_VA 0xb0000000 +#define BONITO_PCILO_SIZE 0x0c000000 +#define BONITO_PCILO_TOP (BONITO_PCILO_BASE + BONITO_PCILO_SIZE - 1) +#define BONITO_PCILO0_BASE 0x10000000 +#define BONITO_PCILO1_BASE 0x14000000 +#define BONITO_PCILO2_BASE 0x18000000 +#define BONITO_PCIHI_BASE 0x20000000 +#define BONITO_PCIHI_SIZE 0x60000000 +#define BONITO_PCIHI_TOP (BONITO_PCIHI_BASE + BONITO_PCIHI_SIZE - 1) +#define BONITO_PCIIO_BASE 0x1fd00000 +#define BONITO_PCIIO_BASE_VA 0xbfd00000 +#define BONITO_PCIIO_SIZE 0x00010000 +#define BONITO_PCIIO_TOP (BONITO_PCIIO_BASE + BONITO_PCIIO_SIZE - 1) +#define BONITO_PCICFG_BASE 0x1fe80000 +#define BONITO_PCICFG_SIZE 0x00080000 +#define BONITO_PCICFG_TOP (BONITO_PCICFG_BASE + BONITO_PCICFG_SIZE - 1) + + +#define BONITO_PCICONFIGBASE 0x00 +#define BONITO_REGBASE 0x100 + +#define BONITO_PCICONFIG_BASE (BONITO_PCICONFIGBASE + BONITO_REG_BASE) +#define BONITO_PCICONFIG_SIZE (0x100) + +#define BONITO_INTERNAL_REG_BASE (BONITO_REGBASE + BONITO_REG_BASE) +#define BONITO_INTERNAL_REG_SIZE (0x70) + +#define BONITO_SPCICONFIG_BASE (BONITO_PCICFG_BASE) +#define BONITO_SPCICONFIG_SIZE (BONITO_PCICFG_SIZE) + + + +/* 1. Bonito h/w Configuration */ +/* Power on register */ + +#define BONITO_BONPONCFG (0x00 >> 2) /* 0x100 */ + +/* PCI configuration register */ +#define BONITO_BONGENCFG_OFFSET 0x4 +#define BONITO_BONGENCFG (BONITO_BONGENCFG_OFFSET >> 2) /*0x104 */ +REG32(BONGENCFG, 0x104) +FIELD(BONGENCFG, DEBUGMODE, 0, 1) +FIELD(BONGENCFG, SNOOP, 1, 1) +FIELD(BONGENCFG, CPUSELFRESET, 2, 1) +FIELD(BONGENCFG, BYTESWAP, 6, 1) +FIELD(BONGENCFG, UNCACHED, 7, 1) +FIELD(BONGENCFG, PREFETCH, 8, 1) +FIELD(BONGENCFG, WRITEBEHIND, 9, 1) +FIELD(BONGENCFG, PCIQUEUE, 12, 1) + +/* 2. IO & IDE configuration */ +#define BONITO_IODEVCFG (0x08 >> 2) /* 0x108 */ + +/* 3. IO & IDE configuration */ +#define BONITO_SDCFG (0x0c >> 2) /* 0x10c */ + +/* 4. PCI address map control */ +#define BONITO_PCIMAP (0x10 >> 2) /* 0x110 */ +#define BONITO_PCIMEMBASECFG (0x14 >> 2) /* 0x114 */ +#define BONITO_PCIMAP_CFG (0x18 >> 2) /* 0x118 */ + +/* 5. ICU & GPIO regs */ +/* GPIO Regs - r/w */ +#define BONITO_GPIODATA_OFFSET 0x1c +#define BONITO_GPIODATA (BONITO_GPIODATA_OFFSET >> 2) /* 0x11c */ +#define BONITO_GPIOIE (0x20 >> 2) /* 0x120 */ + +/* ICU Configuration Regs - r/w */ +#define BONITO_INTEDGE (0x24 >> 2) /* 0x124 */ +#define BONITO_INTSTEER (0x28 >> 2) /* 0x128 */ +#define BONITO_INTPOL (0x2c >> 2) /* 0x12c */ + +/* ICU Enable Regs - IntEn & IntISR are r/o. */ +#define BONITO_INTENSET (0x30 >> 2) /* 0x130 */ +#define BONITO_INTENCLR (0x34 >> 2) /* 0x134 */ +#define BONITO_INTEN (0x38 >> 2) /* 0x138 */ +#define BONITO_INTISR (0x3c >> 2) /* 0x13c */ + +/* PCI mail boxes */ +#define BONITO_PCIMAIL0_OFFSET 0x40 +#define BONITO_PCIMAIL1_OFFSET 0x44 +#define BONITO_PCIMAIL2_OFFSET 0x48 +#define BONITO_PCIMAIL3_OFFSET 0x4c +#define BONITO_PCIMAIL0 (0x40 >> 2) /* 0x140 */ +#define BONITO_PCIMAIL1 (0x44 >> 2) /* 0x144 */ +#define BONITO_PCIMAIL2 (0x48 >> 2) /* 0x148 */ +#define BONITO_PCIMAIL3 (0x4c >> 2) /* 0x14c */ + +/* 6. PCI cache */ +#define BONITO_PCICACHECTRL (0x50 >> 2) /* 0x150 */ +#define BONITO_PCICACHETAG (0x54 >> 2) /* 0x154 */ +#define BONITO_PCIBADADDR (0x58 >> 2) /* 0x158 */ +#define BONITO_PCIMSTAT (0x5c >> 2) /* 0x15c */ + +/* 7. other*/ +#define BONITO_TIMECFG (0x60 >> 2) /* 0x160 */ +#define BONITO_CPUCFG (0x64 >> 2) /* 0x164 */ +#define BONITO_DQCFG (0x68 >> 2) /* 0x168 */ +#define BONITO_MEMSIZE (0x6C >> 2) /* 0x16c */ + +#define BONITO_REGS (0x70 >> 2) + +/* PCI config for south bridge. type 0 */ +#define BONITO_PCICONF_IDSEL_MASK 0xfffff800 /* [31:11] */ +#define BONITO_PCICONF_IDSEL_OFFSET 11 +#define BONITO_PCICONF_FUN_MASK 0x700 /* [10:8] */ +#define BONITO_PCICONF_FUN_OFFSET 8 +#define BONITO_PCICONF_REG_MASK_DS (~3) /* Per datasheet */ +#define BONITO_PCICONF_REG_MASK_HW 0xff /* As seen running PMON */ +#define BONITO_PCICONF_REG_OFFSET 0 + + +/* idsel BIT = pci slot number +12 */ +#define PCI_SLOT_BASE 12 +#define PCI_IDSEL_VIA686B_BIT (17) +#define PCI_IDSEL_VIA686B (1 << PCI_IDSEL_VIA686B_BIT) + +#define PCI_ADDR(busno , devno , funno , regno) \ + ((PCI_BUILD_BDF(busno, PCI_DEVFN(devno , funno)) << 8) + (regno)) + +typedef struct BonitoState BonitoState; + +struct PCIBonitoState { + PCIDevice dev; + + BonitoState *pcihost; + uint32_t regs[BONITO_REGS]; + + struct bonldma { + uint32_t ldmactrl; + uint32_t ldmastat; + uint32_t ldmaaddr; + uint32_t ldmago; + } bonldma; + + /* Based at 1fe00300, bonito Copier */ + struct boncop { + uint32_t copctrl; + uint32_t copstat; + uint32_t coppaddr; + uint32_t copgo; + } boncop; + + /* Bonito registers */ + MemoryRegion iomem; + MemoryRegion iomem_ldma; + MemoryRegion iomem_cop; + MemoryRegion bonito_pciio; + MemoryRegion bonito_localio; + +}; +typedef struct PCIBonitoState PCIBonitoState; + +struct BonitoState { + PCIHostState parent_obj; + qemu_irq *pic; + PCIBonitoState *pci_dev; + MemoryRegion pci_mem; +}; + +#define TYPE_BONITO_PCI_HOST_BRIDGE "Bonito-pcihost" +OBJECT_DECLARE_SIMPLE_TYPE(BonitoState, BONITO_PCI_HOST_BRIDGE) + +#define TYPE_PCI_BONITO "Bonito" +OBJECT_DECLARE_SIMPLE_TYPE(PCIBonitoState, PCI_BONITO) + +static void bonito_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PCIBonitoState *s = opaque; + uint32_t saddr; + int reset = 0; + + saddr = addr >> 2; + + DPRINTF("bonito_writel "TARGET_FMT_plx" val %lx saddr %x\n", + addr, val, saddr); + switch (saddr) { + case BONITO_BONPONCFG: + case BONITO_IODEVCFG: + case BONITO_SDCFG: + case BONITO_PCIMAP: + case BONITO_PCIMEMBASECFG: + case BONITO_PCIMAP_CFG: + case BONITO_GPIODATA: + case BONITO_GPIOIE: + case BONITO_INTEDGE: + case BONITO_INTSTEER: + case BONITO_INTPOL: + case BONITO_PCIMAIL0: + case BONITO_PCIMAIL1: + case BONITO_PCIMAIL2: + case BONITO_PCIMAIL3: + case BONITO_PCICACHECTRL: + case BONITO_PCICACHETAG: + case BONITO_PCIBADADDR: + case BONITO_PCIMSTAT: + case BONITO_TIMECFG: + case BONITO_CPUCFG: + case BONITO_DQCFG: + case BONITO_MEMSIZE: + s->regs[saddr] = val; + break; + case BONITO_BONGENCFG: + if (!(s->regs[saddr] & 0x04) && (val & 0x04)) { + reset = 1; /* bit 2 jump from 0 to 1 cause reset */ + } + s->regs[saddr] = val; + if (reset) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + case BONITO_INTENSET: + s->regs[BONITO_INTENSET] = val; + s->regs[BONITO_INTEN] |= val; + break; + case BONITO_INTENCLR: + s->regs[BONITO_INTENCLR] = val; + s->regs[BONITO_INTEN] &= ~val; + break; + case BONITO_INTEN: + case BONITO_INTISR: + DPRINTF("write to readonly bonito register %x\n", saddr); + break; + default: + DPRINTF("write to unknown bonito register %x\n", saddr); + break; + } +} + +static uint64_t bonito_readl(void *opaque, hwaddr addr, + unsigned size) +{ + PCIBonitoState *s = opaque; + uint32_t saddr; + + saddr = addr >> 2; + + DPRINTF("bonito_readl "TARGET_FMT_plx"\n", addr); + switch (saddr) { + case BONITO_INTISR: + return s->regs[saddr]; + default: + return s->regs[saddr]; + } +} + +static const MemoryRegionOps bonito_ops = { + .read = bonito_readl, + .write = bonito_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void bonito_pciconf_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PCIBonitoState *s = opaque; + PCIDevice *d = PCI_DEVICE(s); + + DPRINTF("bonito_pciconf_writel "TARGET_FMT_plx" val %lx\n", addr, val); + d->config_write(d, addr, val, 4); +} + +static uint64_t bonito_pciconf_readl(void *opaque, hwaddr addr, + unsigned size) +{ + + PCIBonitoState *s = opaque; + PCIDevice *d = PCI_DEVICE(s); + + DPRINTF("bonito_pciconf_readl "TARGET_FMT_plx"\n", addr); + return d->config_read(d, addr, 4); +} + +/* north bridge PCI configure space. 0x1fe0 0000 - 0x1fe0 00ff */ + +static const MemoryRegionOps bonito_pciconf_ops = { + .read = bonito_pciconf_readl, + .write = bonito_pciconf_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t bonito_ldma_readl(void *opaque, hwaddr addr, + unsigned size) +{ + uint32_t val; + PCIBonitoState *s = opaque; + + if (addr >= sizeof(s->bonldma)) { + return 0; + } + + val = ((uint32_t *)(&s->bonldma))[addr / sizeof(uint32_t)]; + + return val; +} + +static void bonito_ldma_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PCIBonitoState *s = opaque; + + if (addr >= sizeof(s->bonldma)) { + return; + } + + ((uint32_t *)(&s->bonldma))[addr / sizeof(uint32_t)] = val & 0xffffffff; +} + +static const MemoryRegionOps bonito_ldma_ops = { + .read = bonito_ldma_readl, + .write = bonito_ldma_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t bonito_cop_readl(void *opaque, hwaddr addr, + unsigned size) +{ + uint32_t val; + PCIBonitoState *s = opaque; + + if (addr >= sizeof(s->boncop)) { + return 0; + } + + val = ((uint32_t *)(&s->boncop))[addr / sizeof(uint32_t)]; + + return val; +} + +static void bonito_cop_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PCIBonitoState *s = opaque; + + if (addr >= sizeof(s->boncop)) { + return; + } + + ((uint32_t *)(&s->boncop))[addr / sizeof(uint32_t)] = val & 0xffffffff; +} + +static const MemoryRegionOps bonito_cop_ops = { + .read = bonito_cop_readl, + .write = bonito_cop_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint32_t bonito_sbridge_pciaddr(void *opaque, hwaddr addr) +{ + PCIBonitoState *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s->pcihost); + uint32_t cfgaddr; + uint32_t idsel; + uint32_t devno; + uint32_t funno; + uint32_t regno; + uint32_t pciaddr; + + /* support type0 pci config */ + if ((s->regs[BONITO_PCIMAP_CFG] & 0x10000) != 0x0) { + return 0xffffffff; + } + + cfgaddr = addr & 0xffff; + cfgaddr |= (s->regs[BONITO_PCIMAP_CFG] & 0xffff) << 16; + + idsel = (cfgaddr & BONITO_PCICONF_IDSEL_MASK) >> + BONITO_PCICONF_IDSEL_OFFSET; + devno = ctz32(idsel); + funno = (cfgaddr & BONITO_PCICONF_FUN_MASK) >> BONITO_PCICONF_FUN_OFFSET; + regno = (cfgaddr & BONITO_PCICONF_REG_MASK_HW) >> BONITO_PCICONF_REG_OFFSET; + + if (idsel == 0) { + error_report("error in bonito pci config address 0x" TARGET_FMT_plx + ",pcimap_cfg=0x%x", addr, s->regs[BONITO_PCIMAP_CFG]); + exit(1); + } + pciaddr = PCI_ADDR(pci_bus_num(phb->bus), devno, funno, regno); + DPRINTF("cfgaddr %x pciaddr %x busno %x devno %d funno %d regno %d\n", + cfgaddr, pciaddr, pci_bus_num(phb->bus), devno, funno, regno); + + return pciaddr; +} + +static void bonito_spciconf_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PCIBonitoState *s = opaque; + PCIDevice *d = PCI_DEVICE(s); + PCIHostState *phb = PCI_HOST_BRIDGE(s->pcihost); + uint32_t pciaddr; + uint16_t status; + + DPRINTF("bonito_spciconf_write "TARGET_FMT_plx" size %d val %lx\n", + addr, size, val); + + pciaddr = bonito_sbridge_pciaddr(s, addr); + + if (pciaddr == 0xffffffff) { + return; + } + if (addr & ~BONITO_PCICONF_REG_MASK_DS) { + trace_bonito_spciconf_small_access(addr, size); + } + + /* set the pci address in s->config_reg */ + phb->config_reg = (pciaddr) | (1u << 31); + pci_data_write(phb->bus, phb->config_reg, val, size); + + /* clear PCI_STATUS_REC_MASTER_ABORT and PCI_STATUS_REC_TARGET_ABORT */ + status = pci_get_word(d->config + PCI_STATUS); + status &= ~(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT); + pci_set_word(d->config + PCI_STATUS, status); +} + +static uint64_t bonito_spciconf_read(void *opaque, hwaddr addr, unsigned size) +{ + PCIBonitoState *s = opaque; + PCIDevice *d = PCI_DEVICE(s); + PCIHostState *phb = PCI_HOST_BRIDGE(s->pcihost); + uint32_t pciaddr; + uint16_t status; + + DPRINTF("bonito_spciconf_read "TARGET_FMT_plx" size %d\n", addr, size); + + pciaddr = bonito_sbridge_pciaddr(s, addr); + + if (pciaddr == 0xffffffff) { + return MAKE_64BIT_MASK(0, size * 8); + } + if (addr & ~BONITO_PCICONF_REG_MASK_DS) { + trace_bonito_spciconf_small_access(addr, size); + } + + /* set the pci address in s->config_reg */ + phb->config_reg = (pciaddr) | (1u << 31); + + /* clear PCI_STATUS_REC_MASTER_ABORT and PCI_STATUS_REC_TARGET_ABORT */ + status = pci_get_word(d->config + PCI_STATUS); + status &= ~(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT); + pci_set_word(d->config + PCI_STATUS, status); + + return pci_data_read(phb->bus, phb->config_reg, size); +} + +/* south bridge PCI configure space. 0x1fe8 0000 - 0x1fef ffff */ +static const MemoryRegionOps bonito_spciconf_ops = { + .read = bonito_spciconf_read, + .write = bonito_spciconf_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +#define BONITO_IRQ_BASE 32 + +static void pci_bonito_set_irq(void *opaque, int irq_num, int level) +{ + BonitoState *s = opaque; + qemu_irq *pic = s->pic; + PCIBonitoState *bonito_state = s->pci_dev; + int internal_irq = irq_num - BONITO_IRQ_BASE; + + if (bonito_state->regs[BONITO_INTEDGE] & (1 << internal_irq)) { + qemu_irq_pulse(*pic); + } else { /* level triggered */ + if (bonito_state->regs[BONITO_INTPOL] & (1 << internal_irq)) { + qemu_irq_raise(*pic); + } else { + qemu_irq_lower(*pic); + } + } +} + +/* map the original irq (0~3) to bonito irq (16~47, but 16~31 are unused) */ +static int pci_bonito_map_irq(PCIDevice *pci_dev, int irq_num) +{ + int slot; + + slot = PCI_SLOT(pci_dev->devfn); + + switch (slot) { + case 5: /* FULOONG2E_VIA_SLOT, SouthBridge, IDE, USB, ACPI, AC97, MC97 */ + return irq_num % 4 + BONITO_IRQ_BASE; + case 6: /* FULOONG2E_ATI_SLOT, VGA */ + return 4 + BONITO_IRQ_BASE; + case 7: /* FULOONG2E_RTL_SLOT, RTL8139 */ + return 5 + BONITO_IRQ_BASE; + case 8 ... 12: /* PCI slot 1 to 4 */ + return (slot - 8 + irq_num) + 6 + BONITO_IRQ_BASE; + default: /* Unknown device, don't do any translation */ + return irq_num; + } +} + +static void bonito_reset(void *opaque) +{ + PCIBonitoState *s = opaque; + uint32_t val = 0; + + /* set the default value of north bridge registers */ + + s->regs[BONITO_BONPONCFG] = 0xc40; + val = FIELD_DP32(val, BONGENCFG, PCIQUEUE, 1); + val = FIELD_DP32(val, BONGENCFG, WRITEBEHIND, 1); + val = FIELD_DP32(val, BONGENCFG, PREFETCH, 1); + val = FIELD_DP32(val, BONGENCFG, UNCACHED, 1); + val = FIELD_DP32(val, BONGENCFG, CPUSELFRESET, 1); + s->regs[BONITO_BONGENCFG] = val; + + s->regs[BONITO_IODEVCFG] = 0x2bff8010; + s->regs[BONITO_SDCFG] = 0x255e0091; + + s->regs[BONITO_GPIODATA] = 0x1ff; + s->regs[BONITO_GPIOIE] = 0x1ff; + s->regs[BONITO_DQCFG] = 0x8; + s->regs[BONITO_MEMSIZE] = 0x10000000; + s->regs[BONITO_PCIMAP] = 0x6140; +} + +static const VMStateDescription vmstate_bonito = { + .name = "Bonito", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, PCIBonitoState), + VMSTATE_END_OF_LIST() + } +}; + +static void bonito_pcihost_realize(DeviceState *dev, Error **errp) +{ + PCIHostState *phb = PCI_HOST_BRIDGE(dev); + BonitoState *bs = BONITO_PCI_HOST_BRIDGE(dev); + MemoryRegion *pcimem_lo_alias = g_new(MemoryRegion, 3); + + memory_region_init(&bs->pci_mem, OBJECT(dev), "pci.mem", BONITO_PCIHI_SIZE); + phb->bus = pci_register_root_bus(dev, "pci", + pci_bonito_set_irq, pci_bonito_map_irq, + dev, &bs->pci_mem, get_system_io(), + PCI_DEVFN(5, 0), 32, TYPE_PCI_BUS); + + for (size_t i = 0; i < 3; i++) { + char *name = g_strdup_printf("pci.lomem%zu", i); + + memory_region_init_alias(&pcimem_lo_alias[i], NULL, name, + &bs->pci_mem, i * 64 * MiB, 64 * MiB); + memory_region_add_subregion(get_system_memory(), + BONITO_PCILO_BASE + i * 64 * MiB, + &pcimem_lo_alias[i]); + g_free(name); + } + + create_unimplemented_device("pci.io", BONITO_PCIIO_BASE, 1 * MiB); +} + +static void bonito_realize(PCIDevice *dev, Error **errp) +{ + PCIBonitoState *s = PCI_BONITO(dev); + SysBusDevice *sysbus = SYS_BUS_DEVICE(s->pcihost); + PCIHostState *phb = PCI_HOST_BRIDGE(s->pcihost); + BonitoState *bs = BONITO_PCI_HOST_BRIDGE(s->pcihost); + MemoryRegion *pcimem_alias = g_new(MemoryRegion, 1); + + /* + * Bonito North Bridge, built on FPGA, + * VENDOR_ID/DEVICE_ID are "undefined" + */ + pci_config_set_prog_interface(dev->config, 0x00); + + /* set the north bridge register mapping */ + memory_region_init_io(&s->iomem, OBJECT(s), &bonito_ops, s, + "north-bridge-register", BONITO_INTERNAL_REG_SIZE); + sysbus_init_mmio(sysbus, &s->iomem); + sysbus_mmio_map(sysbus, 0, BONITO_INTERNAL_REG_BASE); + + /* set the north bridge pci configure mapping */ + memory_region_init_io(&phb->conf_mem, OBJECT(s), &bonito_pciconf_ops, s, + "north-bridge-pci-config", BONITO_PCICONFIG_SIZE); + sysbus_init_mmio(sysbus, &phb->conf_mem); + sysbus_mmio_map(sysbus, 1, BONITO_PCICONFIG_BASE); + + /* set the south bridge pci configure mapping */ + memory_region_init_io(&phb->data_mem, OBJECT(s), &bonito_spciconf_ops, s, + "south-bridge-pci-config", BONITO_SPCICONFIG_SIZE); + sysbus_init_mmio(sysbus, &phb->data_mem); + sysbus_mmio_map(sysbus, 2, BONITO_SPCICONFIG_BASE); + + create_unimplemented_device("bonito", BONITO_REG_BASE, BONITO_REG_SIZE); + + memory_region_init_io(&s->iomem_ldma, OBJECT(s), &bonito_ldma_ops, s, + "ldma", 0x100); + sysbus_init_mmio(sysbus, &s->iomem_ldma); + sysbus_mmio_map(sysbus, 3, 0x1fe00200); + + /* PCI copier */ + memory_region_init_io(&s->iomem_cop, OBJECT(s), &bonito_cop_ops, s, + "cop", 0x100); + sysbus_init_mmio(sysbus, &s->iomem_cop); + sysbus_mmio_map(sysbus, 4, 0x1fe00300); + + create_unimplemented_device("ROMCS", BONITO_FLASH_BASE, 60 * MiB); + + /* Map PCI IO Space 0x1fd0 0000 - 0x1fd1 0000 */ + memory_region_init_alias(&s->bonito_pciio, OBJECT(s), "isa_mmio", + get_system_io(), 0, BONITO_PCIIO_SIZE); + sysbus_init_mmio(sysbus, &s->bonito_pciio); + sysbus_mmio_map(sysbus, 5, BONITO_PCIIO_BASE); + + /* add pci local io mapping */ + + memory_region_init_alias(&s->bonito_localio, OBJECT(s), "IOCS[0]", + get_system_io(), 0, 256 * KiB); + sysbus_init_mmio(sysbus, &s->bonito_localio); + sysbus_mmio_map(sysbus, 6, BONITO_DEV_BASE); + create_unimplemented_device("IOCS[1]", BONITO_DEV_BASE + 1 * 256 * KiB, + 256 * KiB); + create_unimplemented_device("IOCS[2]", BONITO_DEV_BASE + 2 * 256 * KiB, + 256 * KiB); + create_unimplemented_device("IOCS[3]", BONITO_DEV_BASE + 3 * 256 * KiB, + 256 * KiB); + + memory_region_init_alias(pcimem_alias, NULL, "pci.mem.alias", + &bs->pci_mem, 0, BONITO_PCIHI_SIZE); + memory_region_add_subregion(get_system_memory(), + BONITO_PCIHI_BASE, pcimem_alias); + create_unimplemented_device("PCI_2", + (hwaddr)BONITO_PCIHI_BASE + BONITO_PCIHI_SIZE, + 2 * GiB); + + /* set the default value of north bridge pci config */ + pci_set_word(dev->config + PCI_COMMAND, 0x0000); + pci_set_word(dev->config + PCI_STATUS, 0x0000); + pci_set_word(dev->config + PCI_SUBSYSTEM_VENDOR_ID, 0x0000); + pci_set_word(dev->config + PCI_SUBSYSTEM_ID, 0x0000); + + pci_set_byte(dev->config + PCI_INTERRUPT_LINE, 0x00); + pci_config_set_interrupt_pin(dev->config, 0x01); /* interrupt pin A */ + + pci_set_byte(dev->config + PCI_MIN_GNT, 0x3c); + pci_set_byte(dev->config + PCI_MAX_LAT, 0x00); + + qemu_register_reset(bonito_reset, s); +} + +PCIBus *bonito_init(qemu_irq *pic) +{ + DeviceState *dev; + BonitoState *pcihost; + PCIHostState *phb; + PCIBonitoState *s; + PCIDevice *d; + + dev = qdev_new(TYPE_BONITO_PCI_HOST_BRIDGE); + phb = PCI_HOST_BRIDGE(dev); + pcihost = BONITO_PCI_HOST_BRIDGE(dev); + pcihost->pic = pic; + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + d = pci_new(PCI_DEVFN(0, 0), TYPE_PCI_BONITO); + s = PCI_BONITO(d); + s->pcihost = pcihost; + pcihost->pci_dev = s; + pci_realize_and_unref(d, phb->bus, &error_fatal); + + return phb->bus; +} + +static void bonito_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = bonito_realize; + k->vendor_id = 0xdf53; + k->device_id = 0x00d5; + k->revision = 0x01; + k->class_id = PCI_CLASS_BRIDGE_HOST; + dc->desc = "Host bridge"; + dc->vmsd = &vmstate_bonito; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo bonito_info = { + .name = TYPE_PCI_BONITO, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIBonitoState), + .class_init = bonito_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void bonito_pcihost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = bonito_pcihost_realize; +} + +static const TypeInfo bonito_pcihost_info = { + .name = TYPE_BONITO_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(BonitoState), + .class_init = bonito_pcihost_class_init, +}; + +static void bonito_register_types(void) +{ + type_register_static(&bonito_pcihost_info); + type_register_static(&bonito_info); +} + +type_init(bonito_register_types) diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c new file mode 100644 index 000000000..bde3a343a --- /dev/null +++ b/hw/pci-host/designware.c @@ -0,0 +1,773 @@ +/* + * Copyright (c) 2018, Impinj, Inc. + * + * Designware PCIe IP block emulation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "hw/pci/msi.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_host.h" +#include "hw/pci/pcie_port.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/pci-host/designware.h" + +#define DESIGNWARE_PCIE_PORT_LINK_CONTROL 0x710 +#define DESIGNWARE_PCIE_PHY_DEBUG_R1 0x72C +#define DESIGNWARE_PCIE_PHY_DEBUG_R1_XMLH_LINK_UP BIT(4) +#define DESIGNWARE_PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C +#define DESIGNWARE_PCIE_PORT_LOGIC_SPEED_CHANGE BIT(17) +#define DESIGNWARE_PCIE_MSI_ADDR_LO 0x820 +#define DESIGNWARE_PCIE_MSI_ADDR_HI 0x824 +#define DESIGNWARE_PCIE_MSI_INTR0_ENABLE 0x828 +#define DESIGNWARE_PCIE_MSI_INTR0_MASK 0x82C +#define DESIGNWARE_PCIE_MSI_INTR0_STATUS 0x830 +#define DESIGNWARE_PCIE_ATU_VIEWPORT 0x900 +#define DESIGNWARE_PCIE_ATU_REGION_INBOUND BIT(31) +#define DESIGNWARE_PCIE_ATU_CR1 0x904 +#define DESIGNWARE_PCIE_ATU_TYPE_MEM (0x0 << 0) +#define DESIGNWARE_PCIE_ATU_CR2 0x908 +#define DESIGNWARE_PCIE_ATU_ENABLE BIT(31) +#define DESIGNWARE_PCIE_ATU_LOWER_BASE 0x90C +#define DESIGNWARE_PCIE_ATU_UPPER_BASE 0x910 +#define DESIGNWARE_PCIE_ATU_LIMIT 0x914 +#define DESIGNWARE_PCIE_ATU_LOWER_TARGET 0x918 +#define DESIGNWARE_PCIE_ATU_BUS(x) (((x) >> 24) & 0xff) +#define DESIGNWARE_PCIE_ATU_DEVFN(x) (((x) >> 16) & 0xff) +#define DESIGNWARE_PCIE_ATU_UPPER_TARGET 0x91C + +#define DESIGNWARE_PCIE_IRQ_MSI 3 + +static DesignwarePCIEHost * +designware_pcie_root_to_host(DesignwarePCIERoot *root) +{ + BusState *bus = qdev_get_parent_bus(DEVICE(root)); + return DESIGNWARE_PCIE_HOST(bus->parent); +} + +static uint64_t designware_pcie_root_msi_read(void *opaque, hwaddr addr, + unsigned size) +{ + /* + * Attempts to read from the MSI address are undefined in + * the PCI specifications. For this hardware, the datasheet + * specifies that a read from the magic address is simply not + * intercepted by the MSI controller, and will go out to the + * AHB/AXI bus like any other PCI-device-initiated DMA read. + * This is not trivial to implement in QEMU, so since + * well-behaved guests won't ever ask a PCI device to DMA from + * this address we just log the missing functionality. + */ + qemu_log_mask(LOG_UNIMP, "%s not implemented\n", __func__); + return 0; +} + +static void designware_pcie_root_msi_write(void *opaque, hwaddr addr, + uint64_t val, unsigned len) +{ + DesignwarePCIERoot *root = DESIGNWARE_PCIE_ROOT(opaque); + DesignwarePCIEHost *host = designware_pcie_root_to_host(root); + + root->msi.intr[0].status |= BIT(val) & root->msi.intr[0].enable; + + if (root->msi.intr[0].status & ~root->msi.intr[0].mask) { + qemu_set_irq(host->pci.irqs[DESIGNWARE_PCIE_IRQ_MSI], 1); + } +} + +static const MemoryRegionOps designware_pci_host_msi_ops = { + .read = designware_pcie_root_msi_read, + .write = designware_pcie_root_msi_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void designware_pcie_root_update_msi_mapping(DesignwarePCIERoot *root) + +{ + MemoryRegion *mem = &root->msi.iomem; + const uint64_t base = root->msi.base; + const bool enable = root->msi.intr[0].enable; + + memory_region_set_address(mem, base); + memory_region_set_enabled(mem, enable); +} + +static DesignwarePCIEViewport * +designware_pcie_root_get_current_viewport(DesignwarePCIERoot *root) +{ + const unsigned int idx = root->atu_viewport & 0xF; + const unsigned int dir = + !!(root->atu_viewport & DESIGNWARE_PCIE_ATU_REGION_INBOUND); + return &root->viewports[dir][idx]; +} + +static uint32_t +designware_pcie_root_config_read(PCIDevice *d, uint32_t address, int len) +{ + DesignwarePCIERoot *root = DESIGNWARE_PCIE_ROOT(d); + DesignwarePCIEViewport *viewport = + designware_pcie_root_get_current_viewport(root); + + uint32_t val; + + switch (address) { + case DESIGNWARE_PCIE_PORT_LINK_CONTROL: + /* + * Linux guest uses this register only to configure number of + * PCIE lane (which in our case is irrelevant) and doesn't + * really care about the value it reads from this register + */ + val = 0xDEADBEEF; + break; + + case DESIGNWARE_PCIE_LINK_WIDTH_SPEED_CONTROL: + /* + * To make sure that any code in guest waiting for speed + * change does not time out we always report + * PORT_LOGIC_SPEED_CHANGE as set + */ + val = DESIGNWARE_PCIE_PORT_LOGIC_SPEED_CHANGE; + break; + + case DESIGNWARE_PCIE_MSI_ADDR_LO: + val = root->msi.base; + break; + + case DESIGNWARE_PCIE_MSI_ADDR_HI: + val = root->msi.base >> 32; + break; + + case DESIGNWARE_PCIE_MSI_INTR0_ENABLE: + val = root->msi.intr[0].enable; + break; + + case DESIGNWARE_PCIE_MSI_INTR0_MASK: + val = root->msi.intr[0].mask; + break; + + case DESIGNWARE_PCIE_MSI_INTR0_STATUS: + val = root->msi.intr[0].status; + break; + + case DESIGNWARE_PCIE_PHY_DEBUG_R1: + val = DESIGNWARE_PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; + break; + + case DESIGNWARE_PCIE_ATU_VIEWPORT: + val = root->atu_viewport; + break; + + case DESIGNWARE_PCIE_ATU_LOWER_BASE: + val = viewport->base; + break; + + case DESIGNWARE_PCIE_ATU_UPPER_BASE: + val = viewport->base >> 32; + break; + + case DESIGNWARE_PCIE_ATU_LOWER_TARGET: + val = viewport->target; + break; + + case DESIGNWARE_PCIE_ATU_UPPER_TARGET: + val = viewport->target >> 32; + break; + + case DESIGNWARE_PCIE_ATU_LIMIT: + val = viewport->limit; + break; + + case DESIGNWARE_PCIE_ATU_CR1: + case DESIGNWARE_PCIE_ATU_CR2: + val = viewport->cr[(address - DESIGNWARE_PCIE_ATU_CR1) / + sizeof(uint32_t)]; + break; + + default: + val = pci_default_read_config(d, address, len); + break; + } + + return val; +} + +static uint64_t designware_pcie_root_data_access(void *opaque, hwaddr addr, + uint64_t *val, unsigned len) +{ + DesignwarePCIEViewport *viewport = opaque; + DesignwarePCIERoot *root = viewport->root; + + const uint8_t busnum = DESIGNWARE_PCIE_ATU_BUS(viewport->target); + const uint8_t devfn = DESIGNWARE_PCIE_ATU_DEVFN(viewport->target); + PCIBus *pcibus = pci_get_bus(PCI_DEVICE(root)); + PCIDevice *pcidev = pci_find_device(pcibus, busnum, devfn); + + if (pcidev) { + addr &= pci_config_size(pcidev) - 1; + + if (val) { + pci_host_config_write_common(pcidev, addr, + pci_config_size(pcidev), + *val, len); + } else { + return pci_host_config_read_common(pcidev, addr, + pci_config_size(pcidev), + len); + } + } + + return UINT64_MAX; +} + +static uint64_t designware_pcie_root_data_read(void *opaque, hwaddr addr, + unsigned len) +{ + return designware_pcie_root_data_access(opaque, addr, NULL, len); +} + +static void designware_pcie_root_data_write(void *opaque, hwaddr addr, + uint64_t val, unsigned len) +{ + designware_pcie_root_data_access(opaque, addr, &val, len); +} + +static const MemoryRegionOps designware_pci_host_conf_ops = { + .read = designware_pcie_root_data_read, + .write = designware_pcie_root_data_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void designware_pcie_update_viewport(DesignwarePCIERoot *root, + DesignwarePCIEViewport *viewport) +{ + const uint64_t target = viewport->target; + const uint64_t base = viewport->base; + const uint64_t size = (uint64_t)viewport->limit - base + 1; + const bool enabled = viewport->cr[1] & DESIGNWARE_PCIE_ATU_ENABLE; + + MemoryRegion *current, *other; + + if (viewport->cr[0] == DESIGNWARE_PCIE_ATU_TYPE_MEM) { + current = &viewport->mem; + other = &viewport->cfg; + memory_region_set_alias_offset(current, target); + } else { + current = &viewport->cfg; + other = &viewport->mem; + } + + /* + * An outbound viewport can be reconfigure from being MEM to CFG, + * to account for that we disable the "other" memory region that + * becomes unused due to that fact. + */ + memory_region_set_enabled(other, false); + if (enabled) { + memory_region_set_size(current, size); + memory_region_set_address(current, base); + } + memory_region_set_enabled(current, enabled); +} + +static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address, + uint32_t val, int len) +{ + DesignwarePCIERoot *root = DESIGNWARE_PCIE_ROOT(d); + DesignwarePCIEHost *host = designware_pcie_root_to_host(root); + DesignwarePCIEViewport *viewport = + designware_pcie_root_get_current_viewport(root); + + switch (address) { + case DESIGNWARE_PCIE_PORT_LINK_CONTROL: + case DESIGNWARE_PCIE_LINK_WIDTH_SPEED_CONTROL: + case DESIGNWARE_PCIE_PHY_DEBUG_R1: + /* No-op */ + break; + + case DESIGNWARE_PCIE_MSI_ADDR_LO: + root->msi.base &= 0xFFFFFFFF00000000ULL; + root->msi.base |= val; + designware_pcie_root_update_msi_mapping(root); + break; + + case DESIGNWARE_PCIE_MSI_ADDR_HI: + root->msi.base &= 0x00000000FFFFFFFFULL; + root->msi.base |= (uint64_t)val << 32; + designware_pcie_root_update_msi_mapping(root); + break; + + case DESIGNWARE_PCIE_MSI_INTR0_ENABLE: + root->msi.intr[0].enable = val; + designware_pcie_root_update_msi_mapping(root); + break; + + case DESIGNWARE_PCIE_MSI_INTR0_MASK: + root->msi.intr[0].mask = val; + break; + + case DESIGNWARE_PCIE_MSI_INTR0_STATUS: + root->msi.intr[0].status ^= val; + if (!root->msi.intr[0].status) { + qemu_set_irq(host->pci.irqs[DESIGNWARE_PCIE_IRQ_MSI], 0); + } + break; + + case DESIGNWARE_PCIE_ATU_VIEWPORT: + root->atu_viewport = val; + break; + + case DESIGNWARE_PCIE_ATU_LOWER_BASE: + viewport->base &= 0xFFFFFFFF00000000ULL; + viewport->base |= val; + break; + + case DESIGNWARE_PCIE_ATU_UPPER_BASE: + viewport->base &= 0x00000000FFFFFFFFULL; + viewport->base |= (uint64_t)val << 32; + break; + + case DESIGNWARE_PCIE_ATU_LOWER_TARGET: + viewport->target &= 0xFFFFFFFF00000000ULL; + viewport->target |= val; + break; + + case DESIGNWARE_PCIE_ATU_UPPER_TARGET: + viewport->target &= 0x00000000FFFFFFFFULL; + viewport->target |= val; + break; + + case DESIGNWARE_PCIE_ATU_LIMIT: + viewport->limit = val; + break; + + case DESIGNWARE_PCIE_ATU_CR1: + viewport->cr[0] = val; + break; + case DESIGNWARE_PCIE_ATU_CR2: + viewport->cr[1] = val; + designware_pcie_update_viewport(root, viewport); + break; + + default: + pci_bridge_write_config(d, address, val, len); + break; + } +} + +static char *designware_pcie_viewport_name(const char *direction, + unsigned int i, + const char *type) +{ + return g_strdup_printf("PCI %s Viewport %u [%s]", + direction, i, type); +} + +static void designware_pcie_root_realize(PCIDevice *dev, Error **errp) +{ + DesignwarePCIERoot *root = DESIGNWARE_PCIE_ROOT(dev); + DesignwarePCIEHost *host = designware_pcie_root_to_host(root); + MemoryRegion *address_space = &host->pci.memory; + PCIBridge *br = PCI_BRIDGE(dev); + DesignwarePCIEViewport *viewport; + /* + * Dummy values used for initial configuration of MemoryRegions + * that belong to a given viewport + */ + const hwaddr dummy_offset = 0; + const uint64_t dummy_size = 4; + size_t i; + + br->bus_name = "dw-pcie"; + + pci_set_word(dev->config + PCI_COMMAND, + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + + pci_config_set_interrupt_pin(dev->config, 1); + pci_bridge_initfn(dev, TYPE_PCIE_BUS); + + pcie_port_init_reg(dev); + + pcie_cap_init(dev, 0x70, PCI_EXP_TYPE_ROOT_PORT, + 0, &error_fatal); + + msi_nonbroken = true; + msi_init(dev, 0x50, 32, true, true, &error_fatal); + + for (i = 0; i < DESIGNWARE_PCIE_NUM_VIEWPORTS; i++) { + MemoryRegion *source, *destination, *mem; + const char *direction; + char *name; + + viewport = &root->viewports[DESIGNWARE_PCIE_VIEWPORT_INBOUND][i]; + viewport->inbound = true; + viewport->base = 0x0000000000000000ULL; + viewport->target = 0x0000000000000000ULL; + viewport->limit = UINT32_MAX; + viewport->cr[0] = DESIGNWARE_PCIE_ATU_TYPE_MEM; + + source = &host->pci.address_space_root; + destination = get_system_memory(); + direction = "Inbound"; + + /* + * Configure MemoryRegion implementing PCI -> CPU memory + * access + */ + mem = &viewport->mem; + name = designware_pcie_viewport_name(direction, i, "MEM"); + memory_region_init_alias(mem, OBJECT(root), name, destination, + dummy_offset, dummy_size); + memory_region_add_subregion_overlap(source, dummy_offset, mem, -1); + memory_region_set_enabled(mem, false); + g_free(name); + + viewport = &root->viewports[DESIGNWARE_PCIE_VIEWPORT_OUTBOUND][i]; + viewport->root = root; + viewport->inbound = false; + viewport->base = 0x0000000000000000ULL; + viewport->target = 0x0000000000000000ULL; + viewport->limit = UINT32_MAX; + viewport->cr[0] = DESIGNWARE_PCIE_ATU_TYPE_MEM; + + destination = &host->pci.memory; + direction = "Outbound"; + source = get_system_memory(); + + /* + * Configure MemoryRegion implementing CPU -> PCI memory + * access + */ + mem = &viewport->mem; + name = designware_pcie_viewport_name(direction, i, "MEM"); + memory_region_init_alias(mem, OBJECT(root), name, destination, + dummy_offset, dummy_size); + memory_region_add_subregion(source, dummy_offset, mem); + memory_region_set_enabled(mem, false); + g_free(name); + + /* + * Configure MemoryRegion implementing access to configuration + * space + */ + mem = &viewport->cfg; + name = designware_pcie_viewport_name(direction, i, "CFG"); + memory_region_init_io(&viewport->cfg, OBJECT(root), + &designware_pci_host_conf_ops, + viewport, name, dummy_size); + memory_region_add_subregion(source, dummy_offset, mem); + memory_region_set_enabled(mem, false); + g_free(name); + } + + /* + * If no inbound iATU windows are configured, HW defaults to + * letting inbound TLPs to pass in. We emulate that by exlicitly + * configuring first inbound window to cover all of target's + * address space. + * + * NOTE: This will not work correctly for the case when first + * configured inbound window is window 0 + */ + viewport = &root->viewports[DESIGNWARE_PCIE_VIEWPORT_INBOUND][0]; + viewport->cr[1] = DESIGNWARE_PCIE_ATU_ENABLE; + designware_pcie_update_viewport(root, viewport); + + memory_region_init_io(&root->msi.iomem, OBJECT(root), + &designware_pci_host_msi_ops, + root, "pcie-msi", 0x4); + /* + * We initially place MSI interrupt I/O region a adress 0 and + * disable it. It'll be later moved to correct offset and enabled + * in designware_pcie_root_update_msi_mapping() as a part of + * initialization done by guest OS + */ + memory_region_add_subregion(address_space, dummy_offset, &root->msi.iomem); + memory_region_set_enabled(&root->msi.iomem, false); +} + +static void designware_pcie_set_irq(void *opaque, int irq_num, int level) +{ + DesignwarePCIEHost *host = DESIGNWARE_PCIE_HOST(opaque); + + qemu_set_irq(host->pci.irqs[irq_num], level); +} + +static const char * +designware_pcie_host_root_bus_path(PCIHostState *host_bridge, PCIBus *rootbus) +{ + return "0000:00"; +} + +static const VMStateDescription vmstate_designware_pcie_msi_bank = { + .name = "designware-pcie-msi-bank", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(enable, DesignwarePCIEMSIBank), + VMSTATE_UINT32(mask, DesignwarePCIEMSIBank), + VMSTATE_UINT32(status, DesignwarePCIEMSIBank), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_designware_pcie_msi = { + .name = "designware-pcie-msi", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(base, DesignwarePCIEMSI), + VMSTATE_STRUCT_ARRAY(intr, + DesignwarePCIEMSI, + DESIGNWARE_PCIE_NUM_MSI_BANKS, + 1, + vmstate_designware_pcie_msi_bank, + DesignwarePCIEMSIBank), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_designware_pcie_viewport = { + .name = "designware-pcie-viewport", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(base, DesignwarePCIEViewport), + VMSTATE_UINT64(target, DesignwarePCIEViewport), + VMSTATE_UINT32(limit, DesignwarePCIEViewport), + VMSTATE_UINT32_ARRAY(cr, DesignwarePCIEViewport, 2), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_designware_pcie_root = { + .name = "designware-pcie-root", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), + VMSTATE_UINT32(atu_viewport, DesignwarePCIERoot), + VMSTATE_STRUCT_2DARRAY(viewports, + DesignwarePCIERoot, + 2, + DESIGNWARE_PCIE_NUM_VIEWPORTS, + 1, + vmstate_designware_pcie_viewport, + DesignwarePCIEViewport), + VMSTATE_STRUCT(msi, + DesignwarePCIERoot, + 1, + vmstate_designware_pcie_msi, + DesignwarePCIEMSI), + VMSTATE_END_OF_LIST() + } +}; + +static void designware_pcie_root_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + + k->vendor_id = PCI_VENDOR_ID_SYNOPSYS; + k->device_id = 0xABCD; + k->revision = 0; + k->class_id = PCI_CLASS_BRIDGE_PCI; + k->is_bridge = true; + k->exit = pci_bridge_exitfn; + k->realize = designware_pcie_root_realize; + k->config_read = designware_pcie_root_config_read; + k->config_write = designware_pcie_root_config_write; + + dc->reset = pci_bridge_reset; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; + dc->vmsd = &vmstate_designware_pcie_root; +} + +static uint64_t designware_pcie_host_mmio_read(void *opaque, hwaddr addr, + unsigned int size) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(opaque); + PCIDevice *device = pci_find_device(pci->bus, 0, 0); + + return pci_host_config_read_common(device, + addr, + pci_config_size(device), + size); +} + +static void designware_pcie_host_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(opaque); + PCIDevice *device = pci_find_device(pci->bus, 0, 0); + + return pci_host_config_write_common(device, + addr, + pci_config_size(device), + val, size); +} + +static const MemoryRegionOps designware_pci_mmio_ops = { + .read = designware_pcie_host_mmio_read, + .write = designware_pcie_host_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static AddressSpace *designware_pcie_host_set_iommu(PCIBus *bus, void *opaque, + int devfn) +{ + DesignwarePCIEHost *s = DESIGNWARE_PCIE_HOST(opaque); + + return &s->pci.address_space; +} + +static void designware_pcie_host_realize(DeviceState *dev, Error **errp) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + DesignwarePCIEHost *s = DESIGNWARE_PCIE_HOST(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + size_t i; + + for (i = 0; i < ARRAY_SIZE(s->pci.irqs); i++) { + sysbus_init_irq(sbd, &s->pci.irqs[i]); + } + + memory_region_init_io(&s->mmio, + OBJECT(s), + &designware_pci_mmio_ops, + s, + "pcie.reg", 4 * 1024); + sysbus_init_mmio(sbd, &s->mmio); + + memory_region_init(&s->pci.io, OBJECT(s), "pcie-pio", 16); + memory_region_init(&s->pci.memory, OBJECT(s), + "pcie-bus-memory", + UINT64_MAX); + + pci->bus = pci_register_root_bus(dev, "pcie", + designware_pcie_set_irq, + pci_swizzle_map_irq_fn, + s, + &s->pci.memory, + &s->pci.io, + 0, 4, + TYPE_PCIE_BUS); + + memory_region_init(&s->pci.address_space_root, + OBJECT(s), + "pcie-bus-address-space-root", + UINT64_MAX); + memory_region_add_subregion(&s->pci.address_space_root, + 0x0, &s->pci.memory); + address_space_init(&s->pci.address_space, + &s->pci.address_space_root, + "pcie-bus-address-space"); + pci_setup_iommu(pci->bus, designware_pcie_host_set_iommu, s); + + qdev_realize(DEVICE(&s->root), BUS(pci->bus), &error_fatal); +} + +static const VMStateDescription vmstate_designware_pcie_host = { + .name = "designware-pcie-host", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(root, + DesignwarePCIEHost, + 1, + vmstate_designware_pcie_root, + DesignwarePCIERoot), + VMSTATE_END_OF_LIST() + } +}; + +static void designware_pcie_host_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + + hc->root_bus_path = designware_pcie_host_root_bus_path; + dc->realize = designware_pcie_host_realize; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->fw_name = "pci"; + dc->vmsd = &vmstate_designware_pcie_host; +} + +static void designware_pcie_host_init(Object *obj) +{ + DesignwarePCIEHost *s = DESIGNWARE_PCIE_HOST(obj); + DesignwarePCIERoot *root = &s->root; + + object_initialize_child(obj, "root", root, TYPE_DESIGNWARE_PCIE_ROOT); + qdev_prop_set_int32(DEVICE(root), "addr", PCI_DEVFN(0, 0)); + qdev_prop_set_bit(DEVICE(root), "multifunction", false); +} + +static const TypeInfo designware_pcie_root_info = { + .name = TYPE_DESIGNWARE_PCIE_ROOT, + .parent = TYPE_PCI_BRIDGE, + .instance_size = sizeof(DesignwarePCIERoot), + .class_init = designware_pcie_root_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static const TypeInfo designware_pcie_host_info = { + .name = TYPE_DESIGNWARE_PCIE_HOST, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(DesignwarePCIEHost), + .instance_init = designware_pcie_host_init, + .class_init = designware_pcie_host_class_init, +}; + +static void designware_pcie_register(void) +{ + type_register_static(&designware_pcie_root_info); + type_register_static(&designware_pcie_host_info); +} +type_init(designware_pcie_register) diff --git a/hw/pci-host/gpex-acpi.c b/hw/pci-host/gpex-acpi.c new file mode 100644 index 000000000..e7e162a00 --- /dev/null +++ b/hw/pci-host/gpex-acpi.c @@ -0,0 +1,269 @@ +#include "qemu/osdep.h" +#include "hw/acpi/aml-build.h" +#include "hw/pci-host/gpex.h" +#include "hw/arm/virt.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pcie_host.h" + +static void acpi_dsdt_add_pci_route_table(Aml *dev, uint32_t irq) +{ + Aml *method, *crs; + int i, slot_no; + + /* Declare the PCI Routing Table. */ + Aml *rt_pkg = aml_varpackage(PCI_SLOT_MAX * PCI_NUM_PINS); + for (slot_no = 0; slot_no < PCI_SLOT_MAX; slot_no++) { + for (i = 0; i < PCI_NUM_PINS; i++) { + int gsi = (i + slot_no) % PCI_NUM_PINS; + Aml *pkg = aml_package(4); + aml_append(pkg, aml_int((slot_no << 16) | 0xFFFF)); + aml_append(pkg, aml_int(i)); + aml_append(pkg, aml_name("GSI%d", gsi)); + aml_append(pkg, aml_int(0)); + aml_append(rt_pkg, pkg); + } + } + aml_append(dev, aml_name_decl("_PRT", rt_pkg)); + + /* Create GSI link device */ + for (i = 0; i < PCI_NUM_PINS; i++) { + uint32_t irqs = irq + i; + Aml *dev_gsi = aml_device("GSI%d", i); + aml_append(dev_gsi, aml_name_decl("_HID", aml_string("PNP0C0F"))); + aml_append(dev_gsi, aml_name_decl("_UID", aml_int(i))); + crs = aml_resource_template(); + aml_append(crs, + aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &irqs, 1)); + aml_append(dev_gsi, aml_name_decl("_PRS", crs)); + crs = aml_resource_template(); + aml_append(crs, + aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &irqs, 1)); + aml_append(dev_gsi, aml_name_decl("_CRS", crs)); + method = aml_method("_SRS", 1, AML_NOTSERIALIZED); + aml_append(dev_gsi, method); + aml_append(dev, dev_gsi); + } +} + +static void acpi_dsdt_add_pci_osc(Aml *dev) +{ + Aml *method, *UUID, *ifctx, *ifctx1, *elsectx, *buf; + + /* Declare an _OSC (OS Control Handoff) method */ + aml_append(dev, aml_name_decl("SUPP", aml_int(0))); + aml_append(dev, aml_name_decl("CTRL", aml_int(0))); + method = aml_method("_OSC", 4, AML_NOTSERIALIZED); + aml_append(method, + aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1")); + + /* PCI Firmware Specification 3.0 + * 4.5.1. _OSC Interface for PCI Host Bridge Devices + * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is + * identified by the Universal Unique IDentifier (UUID) + * 33DB4D5B-1FF7-401C-9657-7441C03DD766 + */ + UUID = aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766"); + ifctx = aml_if(aml_equal(aml_arg(0), UUID)); + aml_append(ifctx, + aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2")); + aml_append(ifctx, + aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3")); + aml_append(ifctx, aml_store(aml_name("CDW2"), aml_name("SUPP"))); + aml_append(ifctx, aml_store(aml_name("CDW3"), aml_name("CTRL"))); + + /* + * Allow OS control for all 5 features: + * PCIeHotplug SHPCHotplug PME AER PCIeCapability. + */ + aml_append(ifctx, aml_and(aml_name("CTRL"), aml_int(0x1F), + aml_name("CTRL"))); + + ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1)))); + aml_append(ifctx1, aml_or(aml_name("CDW1"), aml_int(0x08), + aml_name("CDW1"))); + aml_append(ifctx, ifctx1); + + ifctx1 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL")))); + aml_append(ifctx1, aml_or(aml_name("CDW1"), aml_int(0x10), + aml_name("CDW1"))); + aml_append(ifctx, ifctx1); + + aml_append(ifctx, aml_store(aml_name("CTRL"), aml_name("CDW3"))); + aml_append(ifctx, aml_return(aml_arg(3))); + aml_append(method, ifctx); + + elsectx = aml_else(); + aml_append(elsectx, aml_or(aml_name("CDW1"), aml_int(4), + aml_name("CDW1"))); + aml_append(elsectx, aml_return(aml_arg(3))); + aml_append(method, elsectx); + aml_append(dev, method); + + method = aml_method("_DSM", 4, AML_NOTSERIALIZED); + + /* PCI Firmware Specification 3.0 + * 4.6.1. _DSM for PCI Express Slot Information + * The UUID in _DSM in this context is + * {E5C937D0-3553-4D7A-9117-EA4D19C3434D} + */ + UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D"); + ifctx = aml_if(aml_equal(aml_arg(0), UUID)); + ifctx1 = aml_if(aml_equal(aml_arg(2), aml_int(0))); + uint8_t byte_list[1] = {1}; + buf = aml_buffer(1, byte_list); + aml_append(ifctx1, aml_return(buf)); + aml_append(ifctx, ifctx1); + aml_append(method, ifctx); + + byte_list[0] = 0; + buf = aml_buffer(1, byte_list); + aml_append(method, aml_return(buf)); + aml_append(dev, method); +} + +void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) +{ + int nr_pcie_buses = cfg->ecam.size / PCIE_MMCFG_SIZE_MIN; + Aml *method, *crs, *dev, *rbuf; + PCIBus *bus = cfg->bus; + CrsRangeSet crs_range_set; + CrsRangeEntry *entry; + int i; + + /* start to construct the tables for pxb */ + crs_range_set_init(&crs_range_set); + if (bus) { + QLIST_FOREACH(bus, &bus->child, sibling) { + uint8_t bus_num = pci_bus_num(bus); + uint8_t numa_node = pci_bus_numa_node(bus); + + if (!pci_bus_is_root(bus)) { + continue; + } + + /* + * 0 - (nr_pcie_buses - 1) is the bus range for the main + * host-bridge and it equals the MIN of the + * busNr defined for pxb-pcie. + */ + if (bus_num < nr_pcie_buses) { + nr_pcie_buses = bus_num; + } + + dev = aml_device("PC%.02X", bus_num); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08"))); + aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03"))); + aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); + aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); + aml_append(dev, aml_name_decl("_STR", aml_unicode("pxb Device"))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + if (numa_node != NUMA_NODE_UNASSIGNED) { + aml_append(dev, aml_name_decl("_PXM", aml_int(numa_node))); + } + + acpi_dsdt_add_pci_route_table(dev, cfg->irq); + + /* + * Resources defined for PXBs are composed by the folling parts: + * 1. The resources the pci-brige/pcie-root-port need. + * 2. The resources the devices behind pxb need. + */ + crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), &crs_range_set, + cfg->pio.base, 0, 0, 0); + aml_append(dev, aml_name_decl("_CRS", crs)); + + acpi_dsdt_add_pci_osc(dev); + + aml_append(scope, dev); + } + } + + /* tables for the main */ + dev = aml_device("%s", "PCI0"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A08"))); + aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03"))); + aml_append(dev, aml_name_decl("_SEG", aml_int(0))); + aml_append(dev, aml_name_decl("_BBN", aml_int(0))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + aml_append(dev, aml_name_decl("_STR", aml_unicode("PCIe 0 Device"))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + acpi_dsdt_add_pci_route_table(dev, cfg->irq); + + method = aml_method("_CBA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(cfg->ecam.base))); + aml_append(dev, method); + + /* + * At this point crs_range_set has all the ranges used by pci + * busses *other* than PCI0. These ranges will be excluded from + * the PCI0._CRS. + */ + rbuf = aml_resource_template(); + aml_append(rbuf, + aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, + 0x0000, 0x0000, nr_pcie_buses - 1, 0x0000, + nr_pcie_buses)); + if (cfg->mmio32.size) { + crs_replace_with_free_ranges(crs_range_set.mem_ranges, + cfg->mmio32.base, + cfg->mmio32.base + cfg->mmio32.size - 1); + for (i = 0; i < crs_range_set.mem_ranges->len; i++) { + entry = g_ptr_array_index(crs_range_set.mem_ranges, i); + aml_append(rbuf, + aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, + entry->base, entry->limit, + 0x0000, entry->limit - entry->base + 1)); + } + } + if (cfg->pio.size) { + crs_replace_with_free_ranges(crs_range_set.io_ranges, + 0x0000, + cfg->pio.size - 1); + for (i = 0; i < crs_range_set.io_ranges->len; i++) { + entry = g_ptr_array_index(crs_range_set.io_ranges, i); + aml_append(rbuf, + aml_dword_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, + AML_ENTIRE_RANGE, 0x0000, entry->base, + entry->limit, cfg->pio.base, + entry->limit - entry->base + 1)); + } + } + if (cfg->mmio64.size) { + crs_replace_with_free_ranges(crs_range_set.mem_64bit_ranges, + cfg->mmio64.base, + cfg->mmio64.base + cfg->mmio64.size - 1); + for (i = 0; i < crs_range_set.mem_64bit_ranges->len; i++) { + entry = g_ptr_array_index(crs_range_set.mem_64bit_ranges, i); + aml_append(rbuf, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, + entry->base, + entry->limit, 0x0000, + entry->limit - entry->base + 1)); + } + } + aml_append(dev, aml_name_decl("_CRS", rbuf)); + + acpi_dsdt_add_pci_osc(dev); + + Aml *dev_res0 = aml_device("%s", "RES0"); + aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02"))); + crs = aml_resource_template(); + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, + cfg->ecam.base, + cfg->ecam.base + cfg->ecam.size - 1, + 0x0000, + cfg->ecam.size)); + aml_append(dev_res0, aml_name_decl("_CRS", crs)); + aml_append(dev, dev_res0); + aml_append(scope, dev); + + crs_range_set_free(&crs_range_set); +} diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c new file mode 100644 index 000000000..a6752fac5 --- /dev/null +++ b/hw/pci-host/gpex.c @@ -0,0 +1,240 @@ +/* + * QEMU Generic PCI Express Bridge Emulation + * + * Copyright (C) 2015 Alexander Graf + * + * Code loosely based on q35.c. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * Check out these documents for more information on the device: + * + * http://www.kernel.org/doc/Documentation/devicetree/bindings/pci/host-generic-pci.txt + * http://www.firmware.org/1275/practice/imap/imap0_9d.pdf + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/pci-host/gpex.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +/**************************************************************************** + * GPEX host + */ + +static void gpex_set_irq(void *opaque, int irq_num, int level) +{ + GPEXHost *s = opaque; + + qemu_set_irq(s->irq[irq_num], level); +} + +int gpex_set_irq_num(GPEXHost *s, int index, int gsi) +{ + if (index >= GPEX_NUM_IRQS) { + return -EINVAL; + } + + s->irq_num[index] = gsi; + return 0; +} + +static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin) +{ + PCIINTxRoute route; + GPEXHost *s = opaque; + int gsi = s->irq_num[pin]; + + route.irq = gsi; + if (gsi < 0) { + route.mode = PCI_INTX_DISABLED; + } else { + route.mode = PCI_INTX_ENABLED; + } + + return route; +} + +static void gpex_host_realize(DeviceState *dev, Error **errp) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + GPEXHost *s = GPEX_HOST(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + PCIExpressHost *pex = PCIE_HOST_BRIDGE(dev); + int i; + + pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX); + sysbus_init_mmio(sbd, &pex->mmio); + + /* + * Note that the MemoryRegions io_mmio and io_ioport that we pass + * to pci_register_root_bus() are not the same as the + * MemoryRegions io_mmio_window and io_ioport_window that we + * expose as SysBus MRs. The difference is in the behaviour of + * accesses to addresses where no PCI device has been mapped. + * + * io_mmio and io_ioport are the underlying PCI view of the PCI + * address space, and when a PCI device does a bus master access + * to a bad address this is reported back to it as a transaction + * failure. + * + * io_mmio_window and io_ioport_window implement "unmapped + * addresses read as -1 and ignore writes"; this is traditional + * x86 PC behaviour, which is not mandated by the PCI spec proper + * but expected by much PCI-using guest software, including Linux. + * + * In the interests of not being unnecessarily surprising, we + * implement it in the gpex PCI host controller, by providing the + * _window MRs, which are containers with io ops that implement + * the 'background' behaviour and which hold the real PCI MRs as + * subregions. + */ + memory_region_init(&s->io_mmio, OBJECT(s), "gpex_mmio", UINT64_MAX); + memory_region_init(&s->io_ioport, OBJECT(s), "gpex_ioport", 64 * 1024); + + if (s->allow_unmapped_accesses) { + memory_region_init_io(&s->io_mmio_window, OBJECT(s), + &unassigned_io_ops, OBJECT(s), + "gpex_mmio_window", UINT64_MAX); + memory_region_init_io(&s->io_ioport_window, OBJECT(s), + &unassigned_io_ops, OBJECT(s), + "gpex_ioport_window", 64 * 1024); + + memory_region_add_subregion(&s->io_mmio_window, 0, &s->io_mmio); + memory_region_add_subregion(&s->io_ioport_window, 0, &s->io_ioport); + sysbus_init_mmio(sbd, &s->io_mmio_window); + sysbus_init_mmio(sbd, &s->io_ioport_window); + } else { + sysbus_init_mmio(sbd, &s->io_mmio); + sysbus_init_mmio(sbd, &s->io_ioport); + } + + for (i = 0; i < GPEX_NUM_IRQS; i++) { + sysbus_init_irq(sbd, &s->irq[i]); + s->irq_num[i] = -1; + } + + pci->bus = pci_register_root_bus(dev, "pcie.0", gpex_set_irq, + pci_swizzle_map_irq_fn, s, &s->io_mmio, + &s->io_ioport, 0, 4, TYPE_PCIE_BUS); + + pci_bus_set_route_irq_fn(pci->bus, gpex_route_intx_pin_to_irq); + qdev_realize(DEVICE(&s->gpex_root), BUS(pci->bus), &error_fatal); +} + +static const char *gpex_host_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + return "0000:00"; +} + +static Property gpex_host_properties[] = { + /* + * Permit CPU accesses to unmapped areas of the PIO and MMIO windows + * (discarding writes and returning -1 for reads) rather than aborting. + */ + DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost, + allow_unmapped_accesses, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void gpex_host_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + + hc->root_bus_path = gpex_host_root_bus_path; + dc->realize = gpex_host_realize; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->fw_name = "pci"; + device_class_set_props(dc, gpex_host_properties); +} + +static void gpex_host_initfn(Object *obj) +{ + GPEXHost *s = GPEX_HOST(obj); + GPEXRootState *root = &s->gpex_root; + + object_initialize_child(obj, "gpex_root", root, TYPE_GPEX_ROOT_DEVICE); + qdev_prop_set_int32(DEVICE(root), "addr", PCI_DEVFN(0, 0)); + qdev_prop_set_bit(DEVICE(root), "multifunction", false); +} + +static const TypeInfo gpex_host_info = { + .name = TYPE_GPEX_HOST, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_size = sizeof(GPEXHost), + .instance_init = gpex_host_initfn, + .class_init = gpex_host_class_init, +}; + +/**************************************************************************** + * GPEX Root D0:F0 + */ + +static const VMStateDescription vmstate_gpex_root = { + .name = "gpex_root", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, GPEXRootState), + VMSTATE_END_OF_LIST() + } +}; + +static void gpex_root_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->desc = "QEMU generic PCIe host bridge"; + dc->vmsd = &vmstate_gpex_root; + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_PCIE_HOST; + k->revision = 0; + k->class_id = PCI_CLASS_BRIDGE_HOST; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo gpex_root_info = { + .name = TYPE_GPEX_ROOT_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(GPEXRootState), + .class_init = gpex_root_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void gpex_register(void) +{ + type_register_static(&gpex_root_info); + type_register_static(&gpex_host_info); +} + +type_init(gpex_register) diff --git a/hw/pci-host/grackle.c b/hw/pci-host/grackle.c new file mode 100644 index 000000000..b05facf46 --- /dev/null +++ b/hw/pci-host/grackle.c @@ -0,0 +1,176 @@ +/* + * QEMU Grackle PCI host (heathrow OldWorld PowerMac) + * + * Copyright (c) 2006-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci_host.h" +#include "hw/ppc/mac.h" +#include "hw/qdev-properties.h" +#include "hw/pci/pci.h" +#include "hw/irq.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(GrackleState, GRACKLE_PCI_HOST_BRIDGE) + +struct GrackleState { + PCIHostState parent_obj; + + uint32_t ofw_addr; + qemu_irq irqs[4]; + MemoryRegion pci_mmio; + MemoryRegion pci_hole; + MemoryRegion pci_io; +}; + +/* Don't know if this matches real hardware, but it agrees with OHW. */ +static int pci_grackle_map_irq(PCIDevice *pci_dev, int irq_num) +{ + return (irq_num + (pci_dev->devfn >> 3)) & 3; +} + +static void pci_grackle_set_irq(void *opaque, int irq_num, int level) +{ + GrackleState *s = opaque; + + trace_grackle_set_irq(irq_num, level); + qemu_set_irq(s->irqs[irq_num], level); +} + +static void grackle_realize(DeviceState *dev, Error **errp) +{ + GrackleState *s = GRACKLE_PCI_HOST_BRIDGE(dev); + PCIHostState *phb = PCI_HOST_BRIDGE(dev); + + phb->bus = pci_register_root_bus(dev, NULL, + pci_grackle_set_irq, + pci_grackle_map_irq, + s, + &s->pci_mmio, + &s->pci_io, + 0, 4, TYPE_PCI_BUS); + + pci_create_simple(phb->bus, 0, "grackle"); +} + +static void grackle_init(Object *obj) +{ + GrackleState *s = GRACKLE_PCI_HOST_BRIDGE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + PCIHostState *phb = PCI_HOST_BRIDGE(obj); + + memory_region_init(&s->pci_mmio, OBJECT(s), "pci-mmio", 0x100000000ULL); + memory_region_init_io(&s->pci_io, OBJECT(s), &unassigned_io_ops, obj, + "pci-isa-mmio", 0x00200000); + + memory_region_init_alias(&s->pci_hole, OBJECT(s), "pci-hole", &s->pci_mmio, + 0x80000000ULL, 0x7e000000ULL); + + memory_region_init_io(&phb->conf_mem, obj, &pci_host_conf_le_ops, + DEVICE(obj), "pci-conf-idx", 0x1000); + memory_region_init_io(&phb->data_mem, obj, &pci_host_data_le_ops, + DEVICE(obj), "pci-data-idx", 0x1000); + + sysbus_init_mmio(sbd, &phb->conf_mem); + sysbus_init_mmio(sbd, &phb->data_mem); + sysbus_init_mmio(sbd, &s->pci_hole); + sysbus_init_mmio(sbd, &s->pci_io); + + qdev_init_gpio_out(DEVICE(obj), s->irqs, ARRAY_SIZE(s->irqs)); +} + +static void grackle_pci_realize(PCIDevice *d, Error **errp) +{ + d->config[0x09] = 0x01; +} + +static void grackle_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = grackle_pci_realize; + k->vendor_id = PCI_VENDOR_ID_MOTOROLA; + k->device_id = PCI_DEVICE_ID_MOTOROLA_MPC106; + k->revision = 0x00; + k->class_id = PCI_CLASS_BRIDGE_HOST; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo grackle_pci_info = { + .name = "grackle", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = grackle_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static char *grackle_ofw_unit_address(const SysBusDevice *dev) +{ + GrackleState *s = GRACKLE_PCI_HOST_BRIDGE(dev); + + return g_strdup_printf("%x", s->ofw_addr); +} + +static Property grackle_properties[] = { + DEFINE_PROP_UINT32("ofw-addr", GrackleState, ofw_addr, -1), + DEFINE_PROP_END_OF_LIST() +}; + +static void grackle_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); + + dc->realize = grackle_realize; + device_class_set_props(dc, grackle_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->fw_name = "pci"; + sbc->explicit_ofw_unit_address = grackle_ofw_unit_address; +} + +static const TypeInfo grackle_host_info = { + .name = TYPE_GRACKLE_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(GrackleState), + .instance_init = grackle_init, + .class_init = grackle_class_init, +}; + +static void grackle_register_types(void) +{ + type_register_static(&grackle_pci_info); + type_register_static(&grackle_host_info); +} + +type_init(grackle_register_types) diff --git a/hw/pci-host/i440fx.c b/hw/pci-host/i440fx.c new file mode 100644 index 000000000..e08716142 --- /dev/null +++ b/hw/pci-host/i440fx.c @@ -0,0 +1,412 @@ +/* + * QEMU i440FX PCI Bridge Emulation + * + * Copyright (c) 2006 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/range.h" +#include "hw/i386/pc.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/pci-host/i440fx.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "qapi/visitor.h" +#include "qemu/error-report.h" +#include "qom/object.h" + +/* + * I440FX chipset data sheet. + * https://wiki.qemu.org/File:29054901.pdf + */ + +OBJECT_DECLARE_SIMPLE_TYPE(I440FXState, I440FX_PCI_HOST_BRIDGE) + +struct I440FXState { + PCIHostState parent_obj; + Range pci_hole; + uint64_t pci_hole64_size; + bool pci_hole64_fix; + uint32_t short_root_bus; +}; + +#define I440FX_PAM 0x59 +#define I440FX_PAM_SIZE 7 +#define I440FX_SMRAM 0x72 + +/* Keep it 2G to comply with older win32 guests */ +#define I440FX_PCI_HOST_HOLE64_SIZE_DEFAULT (1ULL << 31) + +/* Older coreboot versions (4.0 and older) read a config register that doesn't + * exist in real hardware, to get the RAM size from QEMU. + */ +#define I440FX_COREBOOT_RAM_SIZE 0x57 + +static void i440fx_update_memory_mappings(PCII440FXState *d) +{ + int i; + PCIDevice *pd = PCI_DEVICE(d); + + memory_region_transaction_begin(); + for (i = 0; i < ARRAY_SIZE(d->pam_regions); i++) { + pam_update(&d->pam_regions[i], i, + pd->config[I440FX_PAM + DIV_ROUND_UP(i, 2)]); + } + memory_region_set_enabled(&d->smram_region, + !(pd->config[I440FX_SMRAM] & SMRAM_D_OPEN)); + memory_region_set_enabled(&d->smram, + pd->config[I440FX_SMRAM] & SMRAM_G_SMRAME); + memory_region_transaction_commit(); +} + + +static void i440fx_write_config(PCIDevice *dev, + uint32_t address, uint32_t val, int len) +{ + PCII440FXState *d = I440FX_PCI_DEVICE(dev); + + /* XXX: implement SMRAM.D_LOCK */ + pci_default_write_config(dev, address, val, len); + if (ranges_overlap(address, len, I440FX_PAM, I440FX_PAM_SIZE) || + range_covers_byte(address, len, I440FX_SMRAM)) { + i440fx_update_memory_mappings(d); + } +} + +static int i440fx_post_load(void *opaque, int version_id) +{ + PCII440FXState *d = opaque; + + i440fx_update_memory_mappings(d); + return 0; +} + +static const VMStateDescription vmstate_i440fx = { + .name = "I440FX", + .version_id = 3, + .minimum_version_id = 3, + .post_load = i440fx_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCII440FXState), + /* Used to be smm_enabled, which was basically always zero because + * SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code. + */ + VMSTATE_UNUSED(1), + VMSTATE_END_OF_LIST() + } +}; + +static void i440fx_pcihost_get_pci_hole_start(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + I440FXState *s = I440FX_PCI_HOST_BRIDGE(obj); + uint64_t val64; + uint32_t value; + + val64 = range_is_empty(&s->pci_hole) ? 0 : range_lob(&s->pci_hole); + value = val64; + assert(value == val64); + visit_type_uint32(v, name, &value, errp); +} + +static void i440fx_pcihost_get_pci_hole_end(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + I440FXState *s = I440FX_PCI_HOST_BRIDGE(obj); + uint64_t val64; + uint32_t value; + + val64 = range_is_empty(&s->pci_hole) ? 0 : range_upb(&s->pci_hole) + 1; + value = val64; + assert(value == val64); + visit_type_uint32(v, name, &value, errp); +} + +/* + * The 64bit PCI hole start is set by the Guest firmware + * as the address of the first 64bit PCI MEM resource. + * If no PCI device has resources on the 64bit area, + * the 64bit PCI hole will start after "over 4G RAM" and the + * reserved space for memory hotplug if any. + */ +static uint64_t i440fx_pcihost_get_pci_hole64_start_value(Object *obj) +{ + PCIHostState *h = PCI_HOST_BRIDGE(obj); + I440FXState *s = I440FX_PCI_HOST_BRIDGE(obj); + Range w64; + uint64_t value; + + pci_bus_get_w64_range(h->bus, &w64); + value = range_is_empty(&w64) ? 0 : range_lob(&w64); + if (!value && s->pci_hole64_fix) { + value = pc_pci_hole64_start(); + } + return value; +} + +static void i440fx_pcihost_get_pci_hole64_start(Object *obj, Visitor *v, + const char *name, + void *opaque, Error **errp) +{ + uint64_t hole64_start = i440fx_pcihost_get_pci_hole64_start_value(obj); + + visit_type_uint64(v, name, &hole64_start, errp); +} + +/* + * The 64bit PCI hole end is set by the Guest firmware + * as the address of the last 64bit PCI MEM resource. + * Then it is expanded to the PCI_HOST_PROP_PCI_HOLE64_SIZE + * that can be configured by the user. + */ +static void i440fx_pcihost_get_pci_hole64_end(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + PCIHostState *h = PCI_HOST_BRIDGE(obj); + I440FXState *s = I440FX_PCI_HOST_BRIDGE(obj); + uint64_t hole64_start = i440fx_pcihost_get_pci_hole64_start_value(obj); + Range w64; + uint64_t value, hole64_end; + + pci_bus_get_w64_range(h->bus, &w64); + value = range_is_empty(&w64) ? 0 : range_upb(&w64) + 1; + hole64_end = ROUND_UP(hole64_start + s->pci_hole64_size, 1ULL << 30); + if (s->pci_hole64_fix && value < hole64_end) { + value = hole64_end; + } + visit_type_uint64(v, name, &value, errp); +} + +static void i440fx_pcihost_initfn(Object *obj) +{ + PCIHostState *s = PCI_HOST_BRIDGE(obj); + + memory_region_init_io(&s->conf_mem, obj, &pci_host_conf_le_ops, s, + "pci-conf-idx", 4); + memory_region_init_io(&s->data_mem, obj, &pci_host_data_le_ops, s, + "pci-conf-data", 4); +} + +static void i440fx_pcihost_realize(DeviceState *dev, Error **errp) +{ + PCIHostState *s = PCI_HOST_BRIDGE(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + sysbus_add_io(sbd, 0xcf8, &s->conf_mem); + sysbus_init_ioports(sbd, 0xcf8, 4); + + sysbus_add_io(sbd, 0xcfc, &s->data_mem); + sysbus_init_ioports(sbd, 0xcfc, 4); + + /* register i440fx 0xcf8 port as coalesced pio */ + memory_region_set_flush_coalesced(&s->data_mem); + memory_region_add_coalescing(&s->conf_mem, 0, 4); +} + +static void i440fx_realize(PCIDevice *dev, Error **errp) +{ + dev->config[I440FX_SMRAM] = 0x02; + + if (object_property_get_bool(qdev_get_machine(), "iommu", NULL)) { + warn_report("i440fx doesn't support emulated iommu"); + } +} + +PCIBus *i440fx_init(const char *host_type, const char *pci_type, + PCII440FXState **pi440fx_state, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + ram_addr_t ram_size, + ram_addr_t below_4g_mem_size, + ram_addr_t above_4g_mem_size, + MemoryRegion *pci_address_space, + MemoryRegion *ram_memory) +{ + DeviceState *dev; + PCIBus *b; + PCIDevice *d; + PCIHostState *s; + PCII440FXState *f; + unsigned i; + I440FXState *i440fx; + + dev = qdev_new(host_type); + s = PCI_HOST_BRIDGE(dev); + b = pci_root_bus_new(dev, NULL, pci_address_space, + address_space_io, 0, TYPE_PCI_BUS); + s->bus = b; + object_property_add_child(qdev_get_machine(), "i440fx", OBJECT(dev)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + d = pci_create_simple(b, 0, pci_type); + *pi440fx_state = I440FX_PCI_DEVICE(d); + f = *pi440fx_state; + f->system_memory = address_space_mem; + f->pci_address_space = pci_address_space; + f->ram_memory = ram_memory; + + i440fx = I440FX_PCI_HOST_BRIDGE(dev); + range_set_bounds(&i440fx->pci_hole, below_4g_mem_size, + IO_APIC_DEFAULT_ADDRESS - 1); + + /* setup pci memory mapping */ + pc_pci_as_mapping_init(OBJECT(f), f->system_memory, + f->pci_address_space); + + /* if *disabled* show SMRAM to all CPUs */ + memory_region_init_alias(&f->smram_region, OBJECT(d), "smram-region", + f->pci_address_space, 0xa0000, 0x20000); + memory_region_add_subregion_overlap(f->system_memory, 0xa0000, + &f->smram_region, 1); + memory_region_set_enabled(&f->smram_region, true); + + /* smram, as seen by SMM CPUs */ + memory_region_init(&f->smram, OBJECT(d), "smram", 4 * GiB); + memory_region_set_enabled(&f->smram, true); + memory_region_init_alias(&f->low_smram, OBJECT(d), "smram-low", + f->ram_memory, 0xa0000, 0x20000); + memory_region_set_enabled(&f->low_smram, true); + memory_region_add_subregion(&f->smram, 0xa0000, &f->low_smram); + object_property_add_const_link(qdev_get_machine(), "smram", + OBJECT(&f->smram)); + + init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space, + &f->pam_regions[0], PAM_BIOS_BASE, PAM_BIOS_SIZE); + for (i = 0; i < ARRAY_SIZE(f->pam_regions) - 1; ++i) { + init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space, + &f->pam_regions[i+1], PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, + PAM_EXPAN_SIZE); + } + + ram_size = ram_size / 8 / 1024 / 1024; + if (ram_size > 255) { + ram_size = 255; + } + d->config[I440FX_COREBOOT_RAM_SIZE] = ram_size; + + i440fx_update_memory_mappings(f); + + return b; +} + +static void i440fx_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = i440fx_realize; + k->config_write = i440fx_write_config; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82441; + k->revision = 0x02; + k->class_id = PCI_CLASS_BRIDGE_HOST; + dc->desc = "Host bridge"; + dc->vmsd = &vmstate_i440fx; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; + dc->hotpluggable = false; +} + +static const TypeInfo i440fx_info = { + .name = TYPE_I440FX_PCI_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCII440FXState), + .class_init = i440fx_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static const char *i440fx_pcihost_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + I440FXState *s = I440FX_PCI_HOST_BRIDGE(host_bridge); + + /* For backwards compat with old device paths */ + if (s->short_root_bus) { + return "0000"; + } + return "0000:00"; +} + +static Property i440fx_props[] = { + DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, I440FXState, + pci_hole64_size, I440FX_PCI_HOST_HOLE64_SIZE_DEFAULT), + DEFINE_PROP_UINT32("short_root_bus", I440FXState, short_root_bus, 0), + DEFINE_PROP_BOOL("x-pci-hole64-fix", I440FXState, pci_hole64_fix, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void i440fx_pcihost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + + hc->root_bus_path = i440fx_pcihost_root_bus_path; + dc->realize = i440fx_pcihost_realize; + dc->fw_name = "pci"; + device_class_set_props(dc, i440fx_props); + /* Reason: needs to be wired up by pc_init1 */ + dc->user_creatable = false; + + object_class_property_add(klass, PCI_HOST_PROP_PCI_HOLE_START, "uint32", + i440fx_pcihost_get_pci_hole_start, + NULL, NULL, NULL); + + object_class_property_add(klass, PCI_HOST_PROP_PCI_HOLE_END, "uint32", + i440fx_pcihost_get_pci_hole_end, + NULL, NULL, NULL); + + object_class_property_add(klass, PCI_HOST_PROP_PCI_HOLE64_START, "uint64", + i440fx_pcihost_get_pci_hole64_start, + NULL, NULL, NULL); + + object_class_property_add(klass, PCI_HOST_PROP_PCI_HOLE64_END, "uint64", + i440fx_pcihost_get_pci_hole64_end, + NULL, NULL, NULL); +} + +static const TypeInfo i440fx_pcihost_info = { + .name = TYPE_I440FX_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(I440FXState), + .instance_init = i440fx_pcihost_initfn, + .class_init = i440fx_pcihost_class_init, +}; + +static void i440fx_register_types(void) +{ + type_register_static(&i440fx_info); + type_register_static(&i440fx_pcihost_info); +} + +type_init(i440fx_register_types) diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build new file mode 100644 index 000000000..4c4f39c15 --- /dev/null +++ b/hw/pci-host/meson.build @@ -0,0 +1,36 @@ +pci_ss = ss.source_set() +pci_ss.add(when: 'CONFIG_PAM', if_true: files('pam.c')) +pci_ss.add(when: 'CONFIG_PCI_BONITO', if_true: files('bonito.c')) +pci_ss.add(when: 'CONFIG_PCI_EXPRESS_DESIGNWARE', if_true: files('designware.c')) +pci_ss.add(when: 'CONFIG_PCI_EXPRESS_GENERIC_BRIDGE', if_true: files('gpex.c')) +pci_ss.add(when: ['CONFIG_PCI_EXPRESS_GENERIC_BRIDGE', 'CONFIG_ACPI'], if_true: files('gpex-acpi.c')) +pci_ss.add(when: 'CONFIG_PCI_EXPRESS_Q35', if_true: files('q35.c')) +pci_ss.add(when: 'CONFIG_PCI_EXPRESS_XILINX', if_true: files('xilinx-pcie.c')) +pci_ss.add(when: 'CONFIG_PCI_I440FX', if_true: files('i440fx.c')) +pci_ss.add(when: 'CONFIG_PCI_SABRE', if_true: files('sabre.c')) +pci_ss.add(when: 'CONFIG_XEN_IGD_PASSTHROUGH', if_true: files('xen_igd_pt.c')) +pci_ss.add(when: 'CONFIG_REMOTE_PCIHOST', if_true: files('remote.c')) +pci_ss.add(when: 'CONFIG_SH_PCI', if_true: files('sh_pci.c')) + +# PPC devices +pci_ss.add(when: 'CONFIG_RAVEN_PCI', if_true: files('raven.c')) +pci_ss.add(when: 'CONFIG_GRACKLE_PCI', if_true: files('grackle.c')) +# NewWorld PowerMac +pci_ss.add(when: 'CONFIG_UNIN_PCI', if_true: files('uninorth.c')) +# PowerPC E500 boards +pci_ss.add(when: 'CONFIG_PPCE500_PCI', if_true: files('ppce500.c')) +# Pegasos2 +pci_ss.add(when: 'CONFIG_MV64361', if_true: files('mv64361.c')) + +# ARM devices +pci_ss.add(when: 'CONFIG_VERSATILE_PCI', if_true: files('versatile.c')) + +softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) + +specific_ss.add(when: 'CONFIG_PCI_POWERNV', if_true: files( + 'pnv_phb3.c', + 'pnv_phb3_msi.c', + 'pnv_phb3_pbcq.c', + 'pnv_phb4.c', + 'pnv_phb4_pec.c' +)) diff --git a/hw/pci-host/mv64361.c b/hw/pci-host/mv64361.c new file mode 100644 index 000000000..00b3ff7d9 --- /dev/null +++ b/hw/pci-host/mv64361.c @@ -0,0 +1,951 @@ +/* + * Marvell Discovery II MV64361 System Controller for + * QEMU PowerPC CHRP (Genesi/bPlan Pegasos II) hardware System Emulator + * + * Copyright (c) 2018-2020 BALATON Zoltan + * + * This work is licensed under the GNU GPL license version 2 or later. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/hw.h" +#include "hw/sysbus.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/irq.h" +#include "hw/intc/i8259.h" +#include "hw/qdev-properties.h" +#include "exec/address-spaces.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "trace.h" +#include "hw/pci-host/mv64361.h" +#include "mv643xx.h" + +#define TYPE_MV64361_PCI_BRIDGE "mv64361-pcibridge" + +static void mv64361_pcibridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->vendor_id = PCI_VENDOR_ID_MARVELL; + k->device_id = PCI_DEVICE_ID_MARVELL_MV6436X; + k->class_id = PCI_CLASS_BRIDGE_HOST; + /* + * PCI-facing part of the host bridge, + * not usable without the host-facing part + */ + dc->user_creatable = false; +} + +static const TypeInfo mv64361_pcibridge_info = { + .name = TYPE_MV64361_PCI_BRIDGE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = mv64361_pcibridge_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + + +#define TYPE_MV64361_PCI "mv64361-pcihost" +OBJECT_DECLARE_SIMPLE_TYPE(MV64361PCIState, MV64361_PCI) + +struct MV64361PCIState { + PCIHostState parent_obj; + + uint8_t index; + MemoryRegion io; + MemoryRegion mem; + qemu_irq irq[PCI_NUM_PINS]; + + uint32_t io_base; + uint32_t io_size; + uint32_t mem_base[4]; + uint32_t mem_size[4]; + uint64_t remap[5]; +}; + +static int mv64361_pcihost_map_irq(PCIDevice *pci_dev, int n) +{ + return (n + PCI_SLOT(pci_dev->devfn)) % PCI_NUM_PINS; +} + +static void mv64361_pcihost_set_irq(void *opaque, int n, int level) +{ + MV64361PCIState *s = opaque; + qemu_set_irq(s->irq[n], level); +} + +static void mv64361_pcihost_realize(DeviceState *dev, Error **errp) +{ + MV64361PCIState *s = MV64361_PCI(dev); + PCIHostState *h = PCI_HOST_BRIDGE(dev); + char *name; + + name = g_strdup_printf("pci%d-io", s->index); + memory_region_init(&s->io, OBJECT(dev), name, 0x10000); + g_free(name); + name = g_strdup_printf("pci%d-mem", s->index); + memory_region_init(&s->mem, OBJECT(dev), name, 1ULL << 32); + g_free(name); + name = g_strdup_printf("pci.%d", s->index); + h->bus = pci_register_root_bus(dev, name, mv64361_pcihost_set_irq, + mv64361_pcihost_map_irq, dev, + &s->mem, &s->io, 0, 4, TYPE_PCI_BUS); + g_free(name); + pci_create_simple(h->bus, 0, TYPE_MV64361_PCI_BRIDGE); +} + +static Property mv64361_pcihost_props[] = { + DEFINE_PROP_UINT8("index", MV64361PCIState, index, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void mv64361_pcihost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mv64361_pcihost_realize; + device_class_set_props(dc, mv64361_pcihost_props); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo mv64361_pcihost_info = { + .name = TYPE_MV64361_PCI, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(MV64361PCIState), + .class_init = mv64361_pcihost_class_init, +}; + +static void mv64361_pci_register_types(void) +{ + type_register_static(&mv64361_pcihost_info); + type_register_static(&mv64361_pcibridge_info); +} + +type_init(mv64361_pci_register_types) + + +OBJECT_DECLARE_SIMPLE_TYPE(MV64361State, MV64361) + +struct MV64361State { + SysBusDevice parent_obj; + + MemoryRegion regs; + MV64361PCIState pci[2]; + MemoryRegion cpu_win[19]; + qemu_irq cpu_irq; + + /* registers state */ + uint32_t cpu_conf; + uint32_t regs_base; + uint32_t base_addr_enable; + uint64_t main_int_cr; + uint64_t cpu0_int_mask; + uint32_t gpp_io; + uint32_t gpp_level; + uint32_t gpp_value; + uint32_t gpp_int_cr; + uint32_t gpp_int_mask; + bool gpp_int_level; +}; + +enum mv64361_irq_cause { + MV64361_IRQ_DEVERR = 1, + MV64361_IRQ_DMAERR = 2, + MV64361_IRQ_CPUERR = 3, + MV64361_IRQ_IDMA0 = 4, + MV64361_IRQ_IDMA1 = 5, + MV64361_IRQ_IDMA2 = 6, + MV64361_IRQ_IDMA3 = 7, + MV64361_IRQ_TIMER0 = 8, + MV64361_IRQ_TIMER1 = 9, + MV64361_IRQ_TIMER2 = 10, + MV64361_IRQ_TIMER3 = 11, + MV64361_IRQ_PCI0 = 12, + MV64361_IRQ_SRAMERR = 13, + MV64361_IRQ_GBEERR = 14, + MV64361_IRQ_CERR = 15, + MV64361_IRQ_PCI1 = 16, + MV64361_IRQ_DRAMERR = 17, + MV64361_IRQ_WDNMI = 18, + MV64361_IRQ_WDE = 19, + MV64361_IRQ_PCI0IN = 20, + MV64361_IRQ_PCI0OUT = 21, + MV64361_IRQ_PCI1IN = 22, + MV64361_IRQ_PCI1OUT = 23, + MV64361_IRQ_P1_GPP0_7 = 24, + MV64361_IRQ_P1_GPP8_15 = 25, + MV64361_IRQ_P1_GPP16_23 = 26, + MV64361_IRQ_P1_GPP24_31 = 27, + MV64361_IRQ_P1_CPU_DB = 28, + /* 29-31: reserved */ + MV64361_IRQ_GBE0 = 32, + MV64361_IRQ_GBE1 = 33, + MV64361_IRQ_GBE2 = 34, + /* 35: reserved */ + MV64361_IRQ_SDMA0 = 36, + MV64361_IRQ_TWSI = 37, + MV64361_IRQ_SDMA1 = 38, + MV64361_IRQ_BRG = 39, + MV64361_IRQ_MPSC0 = 40, + MV64361_IRQ_MPSC1 = 41, + MV64361_IRQ_G0RX = 42, + MV64361_IRQ_G0TX = 43, + MV64361_IRQ_G0MISC = 44, + MV64361_IRQ_G1RX = 45, + MV64361_IRQ_G1TX = 46, + MV64361_IRQ_G1MISC = 47, + MV64361_IRQ_G2RX = 48, + MV64361_IRQ_G2TX = 49, + MV64361_IRQ_G2MISC = 50, + /* 51-55: reserved */ + MV64361_IRQ_P0_GPP0_7 = 56, + MV64361_IRQ_P0_GPP8_15 = 57, + MV64361_IRQ_P0_GPP16_23 = 58, + MV64361_IRQ_P0_GPP24_31 = 59, + MV64361_IRQ_P0_CPU_DB = 60, + /* 61-63: reserved */ +}; + +PCIBus *mv64361_get_pci_bus(DeviceState *dev, int n) +{ + MV64361State *mv = MV64361(dev); + return PCI_HOST_BRIDGE(&mv->pci[n])->bus; +} + +static void unmap_region(MemoryRegion *mr) +{ + if (memory_region_is_mapped(mr)) { + memory_region_del_subregion(get_system_memory(), mr); + object_unparent(OBJECT(mr)); + } +} + +static void map_pci_region(MemoryRegion *mr, MemoryRegion *parent, + struct Object *owner, const char *name, + hwaddr poffs, uint64_t size, hwaddr moffs) +{ + memory_region_init_alias(mr, owner, name, parent, poffs, size); + memory_region_add_subregion(get_system_memory(), moffs, mr); + trace_mv64361_region_map(name, poffs, size, moffs); +} + +static void set_mem_windows(MV64361State *s, uint32_t val) +{ + MV64361PCIState *p; + MemoryRegion *mr; + uint32_t mask; + int i; + + val &= 0x1fffff; + for (mask = 1, i = 0; i < 21; i++, mask <<= 1) { + if ((val & mask) != (s->base_addr_enable & mask)) { + trace_mv64361_region_enable(!(val & mask) ? "enable" : "disable", i); + /* + * 0-3 are SDRAM chip selects but we map all RAM directly + * 4-7 are device chip selects (not sure what those are) + * 8 is Boot device (ROM) chip select but we map that directly too + */ + if (i == 9) { + p = &s->pci[0]; + mr = &s->cpu_win[i]; + unmap_region(mr); + if (!(val & mask)) { + map_pci_region(mr, &p->io, OBJECT(s), "pci0-io-win", + p->remap[4], (p->io_size + 1) << 16, + (p->io_base & 0xfffff) << 16); + } + } else if (i == 10) { + p = &s->pci[0]; + mr = &s->cpu_win[i]; + unmap_region(mr); + if (!(val & mask)) { + map_pci_region(mr, &p->mem, OBJECT(s), "pci0-mem0-win", + p->remap[0], (p->mem_size[0] + 1) << 16, + (p->mem_base[0] & 0xfffff) << 16); + } + } else if (i == 11) { + p = &s->pci[0]; + mr = &s->cpu_win[i]; + unmap_region(mr); + if (!(val & mask)) { + map_pci_region(mr, &p->mem, OBJECT(s), "pci0-mem1-win", + p->remap[1], (p->mem_size[1] + 1) << 16, + (p->mem_base[1] & 0xfffff) << 16); + } + } else if (i == 12) { + p = &s->pci[0]; + mr = &s->cpu_win[i]; + unmap_region(mr); + if (!(val & mask)) { + map_pci_region(mr, &p->mem, OBJECT(s), "pci0-mem2-win", + p->remap[2], (p->mem_size[2] + 1) << 16, + (p->mem_base[2] & 0xfffff) << 16); + } + } else if (i == 13) { + p = &s->pci[0]; + mr = &s->cpu_win[i]; + unmap_region(mr); + if (!(val & mask)) { + map_pci_region(mr, &p->mem, OBJECT(s), "pci0-mem3-win", + p->remap[3], (p->mem_size[3] + 1) << 16, + (p->mem_base[3] & 0xfffff) << 16); + } + } else if (i == 14) { + p = &s->pci[1]; + mr = &s->cpu_win[i]; + unmap_region(mr); + if (!(val & mask)) { + map_pci_region(mr, &p->io, OBJECT(s), "pci1-io-win", + p->remap[4], (p->io_size + 1) << 16, + (p->io_base & 0xfffff) << 16); + } + } else if (i == 15) { + p = &s->pci[1]; + mr = &s->cpu_win[i]; + unmap_region(mr); + if (!(val & mask)) { + map_pci_region(mr, &p->mem, OBJECT(s), "pci1-mem0-win", + p->remap[0], (p->mem_size[0] + 1) << 16, + (p->mem_base[0] & 0xfffff) << 16); + } + } else if (i == 16) { + p = &s->pci[1]; + mr = &s->cpu_win[i]; + unmap_region(mr); + if (!(val & mask)) { + map_pci_region(mr, &p->mem, OBJECT(s), "pci1-mem1-win", + p->remap[1], (p->mem_size[1] + 1) << 16, + (p->mem_base[1] & 0xfffff) << 16); + } + } else if (i == 17) { + p = &s->pci[1]; + mr = &s->cpu_win[i]; + unmap_region(mr); + if (!(val & mask)) { + map_pci_region(mr, &p->mem, OBJECT(s), "pci1-mem2-win", + p->remap[2], (p->mem_size[2] + 1) << 16, + (p->mem_base[2] & 0xfffff) << 16); + } + } else if (i == 18) { + p = &s->pci[1]; + mr = &s->cpu_win[i]; + unmap_region(mr); + if (!(val & mask)) { + map_pci_region(mr, &p->mem, OBJECT(s), "pci1-mem3-win", + p->remap[3], (p->mem_size[3] + 1) << 16, + (p->mem_base[3] & 0xfffff) << 16); + } + /* 19 is integrated SRAM */ + } else if (i == 20) { + mr = &s->regs; + unmap_region(mr); + if (!(val & mask)) { + memory_region_add_subregion(get_system_memory(), + (s->regs_base & 0xfffff) << 16, mr); + } + } + } + } + s->base_addr_enable = val; +} + +static void mv64361_update_irq(void *opaque, int n, int level) +{ + MV64361State *s = opaque; + uint64_t val = s->main_int_cr; + + if (level) { + val |= BIT_ULL(n); + } else { + val &= ~BIT_ULL(n); + } + if ((s->main_int_cr & s->cpu0_int_mask) != (val & s->cpu0_int_mask)) { + qemu_set_irq(s->cpu_irq, level); + } + s->main_int_cr = val; +} + +static uint64_t mv64361_read(void *opaque, hwaddr addr, unsigned int size) +{ + MV64361State *s = MV64361(opaque); + uint32_t ret = 0; + + switch (addr) { + case MV64340_CPU_CONFIG: + ret = s->cpu_conf; + break; + case MV64340_PCI_0_IO_BASE_ADDR: + ret = s->pci[0].io_base; + break; + case MV64340_PCI_0_IO_SIZE: + ret = s->pci[0].io_size; + break; + case MV64340_PCI_0_IO_ADDR_REMAP: + ret = s->pci[0].remap[4] >> 16; + break; + case MV64340_PCI_0_MEMORY0_BASE_ADDR: + ret = s->pci[0].mem_base[0]; + break; + case MV64340_PCI_0_MEMORY0_SIZE: + ret = s->pci[0].mem_size[0]; + break; + case MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP: + ret = (s->pci[0].remap[0] & 0xffff0000) >> 16; + break; + case MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP: + ret = s->pci[0].remap[0] >> 32; + break; + case MV64340_PCI_0_MEMORY1_BASE_ADDR: + ret = s->pci[0].mem_base[1]; + break; + case MV64340_PCI_0_MEMORY1_SIZE: + ret = s->pci[0].mem_size[1]; + break; + case MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP: + ret = (s->pci[0].remap[1] & 0xffff0000) >> 16; + break; + case MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP: + ret = s->pci[0].remap[1] >> 32; + break; + case MV64340_PCI_0_MEMORY2_BASE_ADDR: + ret = s->pci[0].mem_base[2]; + break; + case MV64340_PCI_0_MEMORY2_SIZE: + ret = s->pci[0].mem_size[2]; + break; + case MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP: + ret = (s->pci[0].remap[2] & 0xffff0000) >> 16; + break; + case MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP: + ret = s->pci[0].remap[2] >> 32; + break; + case MV64340_PCI_0_MEMORY3_BASE_ADDR: + ret = s->pci[0].mem_base[3]; + break; + case MV64340_PCI_0_MEMORY3_SIZE: + ret = s->pci[0].mem_size[3]; + break; + case MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP: + ret = (s->pci[0].remap[3] & 0xffff0000) >> 16; + break; + case MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP: + ret = s->pci[0].remap[3] >> 32; + break; + case MV64340_PCI_1_IO_BASE_ADDR: + ret = s->pci[1].io_base; + break; + case MV64340_PCI_1_IO_SIZE: + ret = s->pci[1].io_size; + break; + case MV64340_PCI_1_IO_ADDR_REMAP: + ret = s->pci[1].remap[4] >> 16; + break; + case MV64340_PCI_1_MEMORY0_BASE_ADDR: + ret = s->pci[1].mem_base[0]; + break; + case MV64340_PCI_1_MEMORY0_SIZE: + ret = s->pci[1].mem_size[0]; + break; + case MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP: + ret = (s->pci[1].remap[0] & 0xffff0000) >> 16; + break; + case MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP: + ret = s->pci[1].remap[0] >> 32; + break; + case MV64340_PCI_1_MEMORY1_BASE_ADDR: + ret = s->pci[1].mem_base[1]; + break; + case MV64340_PCI_1_MEMORY1_SIZE: + ret = s->pci[1].mem_size[1]; + break; + case MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP: + ret = (s->pci[1].remap[1] & 0xffff0000) >> 16; + break; + case MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP: + ret = s->pci[1].remap[1] >> 32; + break; + case MV64340_PCI_1_MEMORY2_BASE_ADDR: + ret = s->pci[1].mem_base[2]; + break; + case MV64340_PCI_1_MEMORY2_SIZE: + ret = s->pci[1].mem_size[2]; + break; + case MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP: + ret = (s->pci[1].remap[2] & 0xffff0000) >> 16; + break; + case MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP: + ret = s->pci[1].remap[2] >> 32; + break; + case MV64340_PCI_1_MEMORY3_BASE_ADDR: + ret = s->pci[1].mem_base[3]; + break; + case MV64340_PCI_1_MEMORY3_SIZE: + ret = s->pci[1].mem_size[3]; + break; + case MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP: + ret = (s->pci[1].remap[3] & 0xffff0000) >> 16; + break; + case MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP: + ret = s->pci[1].remap[3] >> 32; + break; + case MV64340_INTERNAL_SPACE_BASE_ADDR: + ret = s->regs_base; + break; + case MV64340_BASE_ADDR_ENABLE: + ret = s->base_addr_enable; + break; + case MV64340_PCI_0_CONFIG_ADDR: + ret = pci_host_conf_le_ops.read(PCI_HOST_BRIDGE(&s->pci[0]), 0, size); + break; + case MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG ... + MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG + 3: + ret = pci_host_data_le_ops.read(PCI_HOST_BRIDGE(&s->pci[0]), + addr - MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG, size); + break; + case MV64340_PCI_1_CONFIG_ADDR: + ret = pci_host_conf_le_ops.read(PCI_HOST_BRIDGE(&s->pci[1]), 0, size); + break; + case MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG ... + MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG + 3: + ret = pci_host_data_le_ops.read(PCI_HOST_BRIDGE(&s->pci[1]), + addr - MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG, size); + break; + case MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG: + /* FIXME: Should this be sent via the PCI bus somehow? */ + if (s->gpp_int_level && (s->gpp_value & BIT(31))) { + ret = pic_read_irq(isa_pic); + } + break; + case MV64340_MAIN_INTERRUPT_CAUSE_LOW: + ret = s->main_int_cr; + break; + case MV64340_MAIN_INTERRUPT_CAUSE_HIGH: + ret = s->main_int_cr >> 32; + break; + case MV64340_CPU_INTERRUPT0_MASK_LOW: + ret = s->cpu0_int_mask; + break; + case MV64340_CPU_INTERRUPT0_MASK_HIGH: + ret = s->cpu0_int_mask >> 32; + break; + case MV64340_CPU_INTERRUPT0_SELECT_CAUSE: + ret = s->main_int_cr; + if (s->main_int_cr & s->cpu0_int_mask) { + if (!(s->main_int_cr & s->cpu0_int_mask & 0xffffffff)) { + ret = s->main_int_cr >> 32 | BIT(30); + } else if ((s->main_int_cr & s->cpu0_int_mask) >> 32) { + ret |= BIT(31); + } + } + break; + case MV64340_CUNIT_ARBITER_CONTROL_REG: + ret = 0x11ff0000 | (s->gpp_int_level << 10); + break; + case MV64340_GPP_IO_CONTROL: + ret = s->gpp_io; + break; + case MV64340_GPP_LEVEL_CONTROL: + ret = s->gpp_level; + break; + case MV64340_GPP_VALUE: + ret = s->gpp_value; + break; + case MV64340_GPP_VALUE_SET: + case MV64340_GPP_VALUE_CLEAR: + ret = 0; + break; + case MV64340_GPP_INTERRUPT_CAUSE: + ret = s->gpp_int_cr; + break; + case MV64340_GPP_INTERRUPT_MASK0: + case MV64340_GPP_INTERRUPT_MASK1: + ret = s->gpp_int_mask; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Unimplemented register read 0x%" + HWADDR_PRIx "\n", __func__, addr); + break; + } + if (addr != MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG) { + trace_mv64361_reg_read(addr, ret); + } + return ret; +} + +static void warn_swap_bit(uint64_t val) +{ + if ((val & 0x3000000ULL) >> 24 != 1) { + qemu_log_mask(LOG_UNIMP, "%s: Data swap not implemented", __func__); + } +} + +static void mv64361_set_pci_mem_remap(MV64361State *s, int bus, int idx, + uint64_t val, bool high) +{ + if (high) { + s->pci[bus].remap[idx] = val; + } else { + s->pci[bus].remap[idx] &= 0xffffffff00000000ULL; + s->pci[bus].remap[idx] |= (val & 0xffffULL) << 16; + } +} + +static void mv64361_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + MV64361State *s = MV64361(opaque); + + trace_mv64361_reg_write(addr, val); + switch (addr) { + case MV64340_CPU_CONFIG: + s->cpu_conf = val & 0xe4e3bffULL; + s->cpu_conf |= BIT(23); + break; + case MV64340_PCI_0_IO_BASE_ADDR: + s->pci[0].io_base = val & 0x30fffffULL; + warn_swap_bit(val); + if (!(s->cpu_conf & BIT(27))) { + s->pci[0].remap[4] = (val & 0xffffULL) << 16; + } + break; + case MV64340_PCI_0_IO_SIZE: + s->pci[0].io_size = val & 0xffffULL; + break; + case MV64340_PCI_0_IO_ADDR_REMAP: + s->pci[0].remap[4] = (val & 0xffffULL) << 16; + break; + case MV64340_PCI_0_MEMORY0_BASE_ADDR: + s->pci[0].mem_base[0] = val & 0x70fffffULL; + warn_swap_bit(val); + if (!(s->cpu_conf & BIT(27))) { + mv64361_set_pci_mem_remap(s, 0, 0, val, false); + } + break; + case MV64340_PCI_0_MEMORY0_SIZE: + s->pci[0].mem_size[0] = val & 0xffffULL; + break; + case MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP: + case MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP: + mv64361_set_pci_mem_remap(s, 0, 0, val, + (addr == MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP)); + break; + case MV64340_PCI_0_MEMORY1_BASE_ADDR: + s->pci[0].mem_base[1] = val & 0x70fffffULL; + warn_swap_bit(val); + if (!(s->cpu_conf & BIT(27))) { + mv64361_set_pci_mem_remap(s, 0, 1, val, false); + } + break; + case MV64340_PCI_0_MEMORY1_SIZE: + s->pci[0].mem_size[1] = val & 0xffffULL; + break; + case MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP: + case MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP: + mv64361_set_pci_mem_remap(s, 0, 1, val, + (addr == MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP)); + break; + case MV64340_PCI_0_MEMORY2_BASE_ADDR: + s->pci[0].mem_base[2] = val & 0x70fffffULL; + warn_swap_bit(val); + if (!(s->cpu_conf & BIT(27))) { + mv64361_set_pci_mem_remap(s, 0, 2, val, false); + } + break; + case MV64340_PCI_0_MEMORY2_SIZE: + s->pci[0].mem_size[2] = val & 0xffffULL; + break; + case MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP: + case MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP: + mv64361_set_pci_mem_remap(s, 0, 2, val, + (addr == MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP)); + break; + case MV64340_PCI_0_MEMORY3_BASE_ADDR: + s->pci[0].mem_base[3] = val & 0x70fffffULL; + warn_swap_bit(val); + if (!(s->cpu_conf & BIT(27))) { + mv64361_set_pci_mem_remap(s, 0, 3, val, false); + } + break; + case MV64340_PCI_0_MEMORY3_SIZE: + s->pci[0].mem_size[3] = val & 0xffffULL; + break; + case MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP: + case MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP: + mv64361_set_pci_mem_remap(s, 0, 3, val, + (addr == MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP)); + break; + case MV64340_PCI_1_IO_BASE_ADDR: + s->pci[1].io_base = val & 0x30fffffULL; + warn_swap_bit(val); + if (!(s->cpu_conf & BIT(27))) { + s->pci[1].remap[4] = (val & 0xffffULL) << 16; + } + break; + case MV64340_PCI_1_IO_SIZE: + s->pci[1].io_size = val & 0xffffULL; + break; + case MV64340_PCI_1_MEMORY0_BASE_ADDR: + s->pci[1].mem_base[0] = val & 0x70fffffULL; + warn_swap_bit(val); + if (!(s->cpu_conf & BIT(27))) { + mv64361_set_pci_mem_remap(s, 1, 0, val, false); + } + break; + case MV64340_PCI_1_MEMORY0_SIZE: + s->pci[1].mem_size[0] = val & 0xffffULL; + break; + case MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP: + case MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP: + mv64361_set_pci_mem_remap(s, 1, 0, val, + (addr == MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP)); + break; + case MV64340_PCI_1_MEMORY1_BASE_ADDR: + s->pci[1].mem_base[1] = val & 0x70fffffULL; + warn_swap_bit(val); + if (!(s->cpu_conf & BIT(27))) { + mv64361_set_pci_mem_remap(s, 1, 1, val, false); + } + break; + case MV64340_PCI_1_MEMORY1_SIZE: + s->pci[1].mem_size[1] = val & 0xffffULL; + break; + case MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP: + case MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP: + mv64361_set_pci_mem_remap(s, 1, 1, val, + (addr == MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP)); + break; + case MV64340_PCI_1_MEMORY2_BASE_ADDR: + s->pci[1].mem_base[2] = val & 0x70fffffULL; + warn_swap_bit(val); + if (!(s->cpu_conf & BIT(27))) { + mv64361_set_pci_mem_remap(s, 1, 2, val, false); + } + break; + case MV64340_PCI_1_MEMORY2_SIZE: + s->pci[1].mem_size[2] = val & 0xffffULL; + break; + case MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP: + case MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP: + mv64361_set_pci_mem_remap(s, 1, 2, val, + (addr == MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP)); + break; + case MV64340_PCI_1_MEMORY3_BASE_ADDR: + s->pci[1].mem_base[3] = val & 0x70fffffULL; + warn_swap_bit(val); + if (!(s->cpu_conf & BIT(27))) { + mv64361_set_pci_mem_remap(s, 1, 3, val, false); + } + break; + case MV64340_PCI_1_MEMORY3_SIZE: + s->pci[1].mem_size[3] = val & 0xffffULL; + break; + case MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP: + case MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP: + mv64361_set_pci_mem_remap(s, 1, 3, val, + (addr == MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP)); + break; + case MV64340_INTERNAL_SPACE_BASE_ADDR: + s->regs_base = val & 0xfffffULL; + break; + case MV64340_BASE_ADDR_ENABLE: + set_mem_windows(s, val); + break; + case MV64340_PCI_0_CONFIG_ADDR: + pci_host_conf_le_ops.write(PCI_HOST_BRIDGE(&s->pci[0]), 0, val, size); + break; + case MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG ... + MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG + 3: + pci_host_data_le_ops.write(PCI_HOST_BRIDGE(&s->pci[0]), + addr - MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG, val, size); + break; + case MV64340_PCI_1_CONFIG_ADDR: + pci_host_conf_le_ops.write(PCI_HOST_BRIDGE(&s->pci[1]), 0, val, size); + break; + case MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG ... + MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG + 3: + pci_host_data_le_ops.write(PCI_HOST_BRIDGE(&s->pci[1]), + addr - MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG, val, size); + break; + case MV64340_CPU_INTERRUPT0_MASK_LOW: + s->cpu0_int_mask &= 0xffffffff00000000ULL; + s->cpu0_int_mask |= val & 0xffffffffULL; + break; + case MV64340_CPU_INTERRUPT0_MASK_HIGH: + s->cpu0_int_mask &= 0xffffffffULL; + s->cpu0_int_mask |= val << 32; + break; + case MV64340_CUNIT_ARBITER_CONTROL_REG: + s->gpp_int_level = !!(val & BIT(10)); + break; + case MV64340_GPP_IO_CONTROL: + s->gpp_io = val; + break; + case MV64340_GPP_LEVEL_CONTROL: + s->gpp_level = val; + break; + case MV64340_GPP_VALUE: + s->gpp_value &= ~s->gpp_io; + s->gpp_value |= val & s->gpp_io; + break; + case MV64340_GPP_VALUE_SET: + s->gpp_value |= val & s->gpp_io; + break; + case MV64340_GPP_VALUE_CLEAR: + s->gpp_value &= ~(val & s->gpp_io); + break; + case MV64340_GPP_INTERRUPT_CAUSE: + if (!s->gpp_int_level && val != s->gpp_int_cr) { + int i; + uint32_t ch = s->gpp_int_cr ^ val; + s->gpp_int_cr = val; + for (i = 0; i < 4; i++) { + if ((ch & 0xff << i) && !(val & 0xff << i)) { + mv64361_update_irq(opaque, MV64361_IRQ_P0_GPP0_7 + i, 0); + } + } + } else { + s->gpp_int_cr = val; + } + break; + case MV64340_GPP_INTERRUPT_MASK0: + case MV64340_GPP_INTERRUPT_MASK1: + s->gpp_int_mask = val; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Unimplemented register write 0x%" + HWADDR_PRIx " = %"PRIx64"\n", __func__, addr, val); + break; + } +} + +static const MemoryRegionOps mv64361_ops = { + .read = mv64361_read, + .write = mv64361_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void mv64361_gpp_irq(void *opaque, int n, int level) +{ + MV64361State *s = opaque; + uint32_t mask = BIT(n); + uint32_t val = s->gpp_value & ~mask; + + if (s->gpp_level & mask) { + level = !level; + } + val |= level << n; + if (val > s->gpp_value) { + s->gpp_value = val; + s->gpp_int_cr |= mask; + if (s->gpp_int_mask & mask) { + mv64361_update_irq(opaque, MV64361_IRQ_P0_GPP0_7 + n / 8, 1); + } + } else if (val < s->gpp_value) { + int b = n / 8; + s->gpp_value = val; + if (s->gpp_int_level && !(val & 0xff << b)) { + mv64361_update_irq(opaque, MV64361_IRQ_P0_GPP0_7 + b, 0); + } + } +} + +static void mv64361_realize(DeviceState *dev, Error **errp) +{ + MV64361State *s = MV64361(dev); + int i; + + s->base_addr_enable = 0x1fffff; + memory_region_init_io(&s->regs, OBJECT(s), &mv64361_ops, s, + TYPE_MV64361, 0x10000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->regs); + for (i = 0; i < 2; i++) { + g_autofree char *name = g_strdup_printf("pcihost%d", i); + object_initialize_child(OBJECT(dev), name, &s->pci[i], + TYPE_MV64361_PCI); + DeviceState *pci = DEVICE(&s->pci[i]); + qdev_prop_set_uint8(pci, "index", i); + sysbus_realize_and_unref(SYS_BUS_DEVICE(pci), &error_fatal); + } + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cpu_irq); + qdev_init_gpio_in_named(dev, mv64361_gpp_irq, "gpp", 32); + /* FIXME: PCI IRQ connections may be board specific */ + for (i = 0; i < PCI_NUM_PINS; i++) { + s->pci[1].irq[i] = qdev_get_gpio_in_named(dev, "gpp", 12 + i); + } +} + +static void mv64361_reset(DeviceState *dev) +{ + MV64361State *s = MV64361(dev); + int i, j; + + /* + * These values may be board specific + * Real chip supports init from an eprom but that's not modelled + */ + set_mem_windows(s, 0x1fffff); + s->cpu_conf = 0x28000ff; + s->regs_base = 0x100f100; + s->pci[0].io_base = 0x100f800; + s->pci[0].io_size = 0xff; + s->pci[0].mem_base[0] = 0x100c000; + s->pci[0].mem_size[0] = 0x1fff; + s->pci[0].mem_base[1] = 0x100f900; + s->pci[0].mem_size[1] = 0xff; + s->pci[0].mem_base[2] = 0x100f400; + s->pci[0].mem_size[2] = 0x1ff; + s->pci[0].mem_base[3] = 0x100f600; + s->pci[0].mem_size[3] = 0x1ff; + s->pci[1].io_base = 0x100fe00; + s->pci[1].io_size = 0xff; + s->pci[1].mem_base[0] = 0x1008000; + s->pci[1].mem_size[0] = 0x3fff; + s->pci[1].mem_base[1] = 0x100fd00; + s->pci[1].mem_size[1] = 0xff; + s->pci[1].mem_base[2] = 0x1002600; + s->pci[1].mem_size[2] = 0x1ff; + s->pci[1].mem_base[3] = 0x100ff80; + s->pci[1].mem_size[3] = 0x7f; + for (i = 0; i < 2; i++) { + for (j = 0; j < 4; j++) { + s->pci[i].remap[j] = s->pci[i].mem_base[j] << 16; + } + } + s->pci[0].remap[1] = 0; + s->pci[1].remap[1] = 0; + set_mem_windows(s, 0xfbfff); +} + +static void mv64361_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mv64361_realize; + dc->reset = mv64361_reset; +} + +static const TypeInfo mv64361_type_info = { + .name = TYPE_MV64361, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MV64361State), + .class_init = mv64361_class_init, +}; + +static void mv64361_register_types(void) +{ + type_register_static(&mv64361_type_info); +} + +type_init(mv64361_register_types) diff --git a/hw/pci-host/mv643xx.h b/hw/pci-host/mv643xx.h new file mode 100644 index 000000000..cd26a43f1 --- /dev/null +++ b/hw/pci-host/mv643xx.h @@ -0,0 +1,918 @@ +/* + * mv643xx.h - MV-643XX Internal registers definition file. + * + * Copyright 2002 Momentum Computer, Inc. + * Author: Matthew Dharm + * Copyright 2002 GALILEO TECHNOLOGY, LTD. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef ASM_MV643XX_H +#define ASM_MV643XX_H + +/****************************************/ +/* Processor Address Space */ +/****************************************/ + +/* DDR SDRAM BAR and size registers */ + +#define MV64340_CS_0_BASE_ADDR 0x008 +#define MV64340_CS_0_SIZE 0x010 +#define MV64340_CS_1_BASE_ADDR 0x208 +#define MV64340_CS_1_SIZE 0x210 +#define MV64340_CS_2_BASE_ADDR 0x018 +#define MV64340_CS_2_SIZE 0x020 +#define MV64340_CS_3_BASE_ADDR 0x218 +#define MV64340_CS_3_SIZE 0x220 + +/* Devices BAR and size registers */ + +#define MV64340_DEV_CS0_BASE_ADDR 0x028 +#define MV64340_DEV_CS0_SIZE 0x030 +#define MV64340_DEV_CS1_BASE_ADDR 0x228 +#define MV64340_DEV_CS1_SIZE 0x230 +#define MV64340_DEV_CS2_BASE_ADDR 0x248 +#define MV64340_DEV_CS2_SIZE 0x250 +#define MV64340_DEV_CS3_BASE_ADDR 0x038 +#define MV64340_DEV_CS3_SIZE 0x040 +#define MV64340_BOOTCS_BASE_ADDR 0x238 +#define MV64340_BOOTCS_SIZE 0x240 + +/* PCI 0 BAR and size registers */ + +#define MV64340_PCI_0_IO_BASE_ADDR 0x048 +#define MV64340_PCI_0_IO_SIZE 0x050 +#define MV64340_PCI_0_MEMORY0_BASE_ADDR 0x058 +#define MV64340_PCI_0_MEMORY0_SIZE 0x060 +#define MV64340_PCI_0_MEMORY1_BASE_ADDR 0x080 +#define MV64340_PCI_0_MEMORY1_SIZE 0x088 +#define MV64340_PCI_0_MEMORY2_BASE_ADDR 0x258 +#define MV64340_PCI_0_MEMORY2_SIZE 0x260 +#define MV64340_PCI_0_MEMORY3_BASE_ADDR 0x280 +#define MV64340_PCI_0_MEMORY3_SIZE 0x288 + +/* PCI 1 BAR and size registers */ +#define MV64340_PCI_1_IO_BASE_ADDR 0x090 +#define MV64340_PCI_1_IO_SIZE 0x098 +#define MV64340_PCI_1_MEMORY0_BASE_ADDR 0x0a0 +#define MV64340_PCI_1_MEMORY0_SIZE 0x0a8 +#define MV64340_PCI_1_MEMORY1_BASE_ADDR 0x0b0 +#define MV64340_PCI_1_MEMORY1_SIZE 0x0b8 +#define MV64340_PCI_1_MEMORY2_BASE_ADDR 0x2a0 +#define MV64340_PCI_1_MEMORY2_SIZE 0x2a8 +#define MV64340_PCI_1_MEMORY3_BASE_ADDR 0x2b0 +#define MV64340_PCI_1_MEMORY3_SIZE 0x2b8 + +/* SRAM base address */ +#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268 + +/* internal registers space base address */ +#define MV64340_INTERNAL_SPACE_BASE_ADDR 0x068 + +/* Enables the CS , DEV_CS , PCI 0 and PCI 1 windows above */ +#define MV64340_BASE_ADDR_ENABLE 0x278 + +/****************************************/ +/* PCI remap registers */ +/****************************************/ + + /* PCI 0 */ +#define MV64340_PCI_0_IO_ADDR_REMAP 0x0f0 +#define MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP 0x0f8 +#define MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP 0x320 +#define MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP 0x100 +#define MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP 0x328 +#define MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP 0x2f8 +#define MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP 0x330 +#define MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP 0x300 +#define MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP 0x338 + /* PCI 1 */ +#define MV64340_PCI_1_IO_ADDR_REMAP 0x108 +#define MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP 0x110 +#define MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP 0x340 +#define MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP 0x118 +#define MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP 0x348 +#define MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP 0x310 +#define MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP 0x350 +#define MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP 0x318 +#define MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP 0x358 + +#define MV64340_CPU_PCI_0_HEADERS_RETARGET_CONTROL 0x3b0 +#define MV64340_CPU_PCI_0_HEADERS_RETARGET_BASE 0x3b8 +#define MV64340_CPU_PCI_1_HEADERS_RETARGET_CONTROL 0x3c0 +#define MV64340_CPU_PCI_1_HEADERS_RETARGET_BASE 0x3c8 +#define MV64340_CPU_GE_HEADERS_RETARGET_CONTROL 0x3d0 +#define MV64340_CPU_GE_HEADERS_RETARGET_BASE 0x3d8 +#define MV64340_CPU_IDMA_HEADERS_RETARGET_CONTROL 0x3e0 +#define MV64340_CPU_IDMA_HEADERS_RETARGET_BASE 0x3e8 + +/****************************************/ +/* CPU Control Registers */ +/****************************************/ + +#define MV64340_CPU_CONFIG 0x000 +#define MV64340_CPU_MODE 0x120 +#define MV64340_CPU_MASTER_CONTROL 0x160 +#define MV64340_CPU_CROSS_BAR_CONTROL_LOW 0x150 +#define MV64340_CPU_CROSS_BAR_CONTROL_HIGH 0x158 +#define MV64340_CPU_CROSS_BAR_TIMEOUT 0x168 + +/****************************************/ +/* SMP RegisterS */ +/****************************************/ + +#define MV64340_SMP_WHO_AM_I 0x200 +#define MV64340_SMP_CPU0_DOORBELL 0x214 +#define MV64340_SMP_CPU0_DOORBELL_CLEAR 0x21C +#define MV64340_SMP_CPU1_DOORBELL 0x224 +#define MV64340_SMP_CPU1_DOORBELL_CLEAR 0x22C +#define MV64340_SMP_CPU0_DOORBELL_MASK 0x234 +#define MV64340_SMP_CPU1_DOORBELL_MASK 0x23C +#define MV64340_SMP_SEMAPHOR0 0x244 +#define MV64340_SMP_SEMAPHOR1 0x24c +#define MV64340_SMP_SEMAPHOR2 0x254 +#define MV64340_SMP_SEMAPHOR3 0x25c +#define MV64340_SMP_SEMAPHOR4 0x264 +#define MV64340_SMP_SEMAPHOR5 0x26c +#define MV64340_SMP_SEMAPHOR6 0x274 +#define MV64340_SMP_SEMAPHOR7 0x27c + +/****************************************/ +/* CPU Sync Barrier Register */ +/****************************************/ + +#define MV64340_CPU_0_SYNC_BARRIER_TRIGGER 0x0c0 +#define MV64340_CPU_0_SYNC_BARRIER_VIRTUAL 0x0c8 +#define MV64340_CPU_1_SYNC_BARRIER_TRIGGER 0x0d0 +#define MV64340_CPU_1_SYNC_BARRIER_VIRTUAL 0x0d8 + +/****************************************/ +/* CPU Access Protect */ +/****************************************/ + +#define MV64340_CPU_PROTECT_WINDOW_0_BASE_ADDR 0x180 +#define MV64340_CPU_PROTECT_WINDOW_0_SIZE 0x188 +#define MV64340_CPU_PROTECT_WINDOW_1_BASE_ADDR 0x190 +#define MV64340_CPU_PROTECT_WINDOW_1_SIZE 0x198 +#define MV64340_CPU_PROTECT_WINDOW_2_BASE_ADDR 0x1a0 +#define MV64340_CPU_PROTECT_WINDOW_2_SIZE 0x1a8 +#define MV64340_CPU_PROTECT_WINDOW_3_BASE_ADDR 0x1b0 +#define MV64340_CPU_PROTECT_WINDOW_3_SIZE 0x1b8 + + +/****************************************/ +/* CPU Error Report */ +/****************************************/ + +#define MV64340_CPU_ERROR_ADDR_LOW 0x070 +#define MV64340_CPU_ERROR_ADDR_HIGH 0x078 +#define MV64340_CPU_ERROR_DATA_LOW 0x128 +#define MV64340_CPU_ERROR_DATA_HIGH 0x130 +#define MV64340_CPU_ERROR_PARITY 0x138 +#define MV64340_CPU_ERROR_CAUSE 0x140 +#define MV64340_CPU_ERROR_MASK 0x148 + +/****************************************/ +/* CPU Interface Debug Registers */ +/****************************************/ + +#define MV64340_PUNIT_SLAVE_DEBUG_LOW 0x360 +#define MV64340_PUNIT_SLAVE_DEBUG_HIGH 0x368 +#define MV64340_PUNIT_MASTER_DEBUG_LOW 0x370 +#define MV64340_PUNIT_MASTER_DEBUG_HIGH 0x378 +#define MV64340_PUNIT_MMASK 0x3e4 + +/****************************************/ +/* Integrated SRAM Registers */ +/****************************************/ + +#define MV64340_SRAM_CONFIG 0x380 +#define MV64340_SRAM_TEST_MODE 0X3F4 +#define MV64340_SRAM_ERROR_CAUSE 0x388 +#define MV64340_SRAM_ERROR_ADDR 0x390 +#define MV64340_SRAM_ERROR_ADDR_HIGH 0X3F8 +#define MV64340_SRAM_ERROR_DATA_LOW 0x398 +#define MV64340_SRAM_ERROR_DATA_HIGH 0x3a0 +#define MV64340_SRAM_ERROR_DATA_PARITY 0x3a8 + +/****************************************/ +/* SDRAM Configuration */ +/****************************************/ + +#define MV64340_SDRAM_CONFIG 0x1400 +#define MV64340_D_UNIT_CONTROL_LOW 0x1404 +#define MV64340_D_UNIT_CONTROL_HIGH 0x1424 +#define MV64340_SDRAM_TIMING_CONTROL_LOW 0x1408 +#define MV64340_SDRAM_TIMING_CONTROL_HIGH 0x140c +#define MV64340_SDRAM_ADDR_CONTROL 0x1410 +#define MV64340_SDRAM_OPEN_PAGES_CONTROL 0x1414 +#define MV64340_SDRAM_OPERATION 0x1418 +#define MV64340_SDRAM_MODE 0x141c +#define MV64340_EXTENDED_DRAM_MODE 0x1420 +#define MV64340_SDRAM_CROSS_BAR_CONTROL_LOW 0x1430 +#define MV64340_SDRAM_CROSS_BAR_CONTROL_HIGH 0x1434 +#define MV64340_SDRAM_CROSS_BAR_TIMEOUT 0x1438 +#define MV64340_SDRAM_ADDR_CTRL_PADS_CALIBRATION 0x14c0 +#define MV64340_SDRAM_DATA_PADS_CALIBRATION 0x14c4 + +/****************************************/ +/* SDRAM Error Report */ +/****************************************/ + +#define MV64340_SDRAM_ERROR_DATA_LOW 0x1444 +#define MV64340_SDRAM_ERROR_DATA_HIGH 0x1440 +#define MV64340_SDRAM_ERROR_ADDR 0x1450 +#define MV64340_SDRAM_RECEIVED_ECC 0x1448 +#define MV64340_SDRAM_CALCULATED_ECC 0x144c +#define MV64340_SDRAM_ECC_CONTROL 0x1454 +#define MV64340_SDRAM_ECC_ERROR_COUNTER 0x1458 + +/******************************************/ +/* Controlled Delay Line (CDL) Registers */ +/******************************************/ + +#define MV64340_DFCDL_CONFIG0 0x1480 +#define MV64340_DFCDL_CONFIG1 0x1484 +#define MV64340_DLL_WRITE 0x1488 +#define MV64340_DLL_READ 0x148c +#define MV64340_SRAM_ADDR 0x1490 +#define MV64340_SRAM_DATA0 0x1494 +#define MV64340_SRAM_DATA1 0x1498 +#define MV64340_SRAM_DATA2 0x149c +#define MV64340_DFCL_PROBE 0x14a0 + +/******************************************/ +/* Debug Registers */ +/******************************************/ + +#define MV64340_DUNIT_DEBUG_LOW 0x1460 +#define MV64340_DUNIT_DEBUG_HIGH 0x1464 +#define MV64340_DUNIT_MMASK 0X1b40 + +/****************************************/ +/* Device Parameters */ +/****************************************/ + +#define MV64340_DEVICE_BANK0_PARAMETERS 0x45c +#define MV64340_DEVICE_BANK1_PARAMETERS 0x460 +#define MV64340_DEVICE_BANK2_PARAMETERS 0x464 +#define MV64340_DEVICE_BANK3_PARAMETERS 0x468 +#define MV64340_DEVICE_BOOT_BANK_PARAMETERS 0x46c +#define MV64340_DEVICE_INTERFACE_CONTROL 0x4c0 +#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_LOW 0x4c8 +#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_HIGH 0x4cc +#define MV64340_DEVICE_INTERFACE_CROSS_BAR_TIMEOUT 0x4c4 + +/****************************************/ +/* Device interrupt registers */ +/****************************************/ + +#define MV64340_DEVICE_INTERRUPT_CAUSE 0x4d0 +#define MV64340_DEVICE_INTERRUPT_MASK 0x4d4 +#define MV64340_DEVICE_ERROR_ADDR 0x4d8 +#define MV64340_DEVICE_ERROR_DATA 0x4dc +#define MV64340_DEVICE_ERROR_PARITY 0x4e0 + +/****************************************/ +/* Device debug registers */ +/****************************************/ + +#define MV64340_DEVICE_DEBUG_LOW 0x4e4 +#define MV64340_DEVICE_DEBUG_HIGH 0x4e8 +#define MV64340_RUNIT_MMASK 0x4f0 + +/****************************************/ +/* PCI Slave Address Decoding registers */ +/****************************************/ + +#define MV64340_PCI_0_CS_0_BANK_SIZE 0xc08 +#define MV64340_PCI_1_CS_0_BANK_SIZE 0xc88 +#define MV64340_PCI_0_CS_1_BANK_SIZE 0xd08 +#define MV64340_PCI_1_CS_1_BANK_SIZE 0xd88 +#define MV64340_PCI_0_CS_2_BANK_SIZE 0xc0c +#define MV64340_PCI_1_CS_2_BANK_SIZE 0xc8c +#define MV64340_PCI_0_CS_3_BANK_SIZE 0xd0c +#define MV64340_PCI_1_CS_3_BANK_SIZE 0xd8c +#define MV64340_PCI_0_DEVCS_0_BANK_SIZE 0xc10 +#define MV64340_PCI_1_DEVCS_0_BANK_SIZE 0xc90 +#define MV64340_PCI_0_DEVCS_1_BANK_SIZE 0xd10 +#define MV64340_PCI_1_DEVCS_1_BANK_SIZE 0xd90 +#define MV64340_PCI_0_DEVCS_2_BANK_SIZE 0xd18 +#define MV64340_PCI_1_DEVCS_2_BANK_SIZE 0xd98 +#define MV64340_PCI_0_DEVCS_3_BANK_SIZE 0xc14 +#define MV64340_PCI_1_DEVCS_3_BANK_SIZE 0xc94 +#define MV64340_PCI_0_DEVCS_BOOT_BANK_SIZE 0xd14 +#define MV64340_PCI_1_DEVCS_BOOT_BANK_SIZE 0xd94 +#define MV64340_PCI_0_P2P_MEM0_BAR_SIZE 0xd1c +#define MV64340_PCI_1_P2P_MEM0_BAR_SIZE 0xd9c +#define MV64340_PCI_0_P2P_MEM1_BAR_SIZE 0xd20 +#define MV64340_PCI_1_P2P_MEM1_BAR_SIZE 0xda0 +#define MV64340_PCI_0_P2P_I_O_BAR_SIZE 0xd24 +#define MV64340_PCI_1_P2P_I_O_BAR_SIZE 0xda4 +#define MV64340_PCI_0_CPU_BAR_SIZE 0xd28 +#define MV64340_PCI_1_CPU_BAR_SIZE 0xda8 +#define MV64340_PCI_0_INTERNAL_SRAM_BAR_SIZE 0xe00 +#define MV64340_PCI_1_INTERNAL_SRAM_BAR_SIZE 0xe80 +#define MV64340_PCI_0_EXPANSION_ROM_BAR_SIZE 0xd2c +#define MV64340_PCI_1_EXPANSION_ROM_BAR_SIZE 0xd9c +#define MV64340_PCI_0_BASE_ADDR_REG_ENABLE 0xc3c +#define MV64340_PCI_1_BASE_ADDR_REG_ENABLE 0xcbc +#define MV64340_PCI_0_CS_0_BASE_ADDR_REMAP 0xc48 +#define MV64340_PCI_1_CS_0_BASE_ADDR_REMAP 0xcc8 +#define MV64340_PCI_0_CS_1_BASE_ADDR_REMAP 0xd48 +#define MV64340_PCI_1_CS_1_BASE_ADDR_REMAP 0xdc8 +#define MV64340_PCI_0_CS_2_BASE_ADDR_REMAP 0xc4c +#define MV64340_PCI_1_CS_2_BASE_ADDR_REMAP 0xccc +#define MV64340_PCI_0_CS_3_BASE_ADDR_REMAP 0xd4c +#define MV64340_PCI_1_CS_3_BASE_ADDR_REMAP 0xdcc +#define MV64340_PCI_0_CS_0_BASE_HIGH_ADDR_REMAP 0xF04 +#define MV64340_PCI_1_CS_0_BASE_HIGH_ADDR_REMAP 0xF84 +#define MV64340_PCI_0_CS_1_BASE_HIGH_ADDR_REMAP 0xF08 +#define MV64340_PCI_1_CS_1_BASE_HIGH_ADDR_REMAP 0xF88 +#define MV64340_PCI_0_CS_2_BASE_HIGH_ADDR_REMAP 0xF0C +#define MV64340_PCI_1_CS_2_BASE_HIGH_ADDR_REMAP 0xF8C +#define MV64340_PCI_0_CS_3_BASE_HIGH_ADDR_REMAP 0xF10 +#define MV64340_PCI_1_CS_3_BASE_HIGH_ADDR_REMAP 0xF90 +#define MV64340_PCI_0_DEVCS_0_BASE_ADDR_REMAP 0xc50 +#define MV64340_PCI_1_DEVCS_0_BASE_ADDR_REMAP 0xcd0 +#define MV64340_PCI_0_DEVCS_1_BASE_ADDR_REMAP 0xd50 +#define MV64340_PCI_1_DEVCS_1_BASE_ADDR_REMAP 0xdd0 +#define MV64340_PCI_0_DEVCS_2_BASE_ADDR_REMAP 0xd58 +#define MV64340_PCI_1_DEVCS_2_BASE_ADDR_REMAP 0xdd8 +#define MV64340_PCI_0_DEVCS_3_BASE_ADDR_REMAP 0xc54 +#define MV64340_PCI_1_DEVCS_3_BASE_ADDR_REMAP 0xcd4 +#define MV64340_PCI_0_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xd54 +#define MV64340_PCI_1_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xdd4 +#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xd5c +#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xddc +#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xd60 +#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xde0 +#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xd64 +#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xde4 +#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xd68 +#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xde8 +#define MV64340_PCI_0_P2P_I_O_BASE_ADDR_REMAP 0xd6c +#define MV64340_PCI_1_P2P_I_O_BASE_ADDR_REMAP 0xdec +#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_LOW 0xd70 +#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_LOW 0xdf0 +#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_HIGH 0xd74 +#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_HIGH 0xdf4 +#define MV64340_PCI_0_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf00 +#define MV64340_PCI_1_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf80 +#define MV64340_PCI_0_EXPANSION_ROM_BASE_ADDR_REMAP 0xf38 +#define MV64340_PCI_1_EXPANSION_ROM_BASE_ADDR_REMAP 0xfb8 +#define MV64340_PCI_0_ADDR_DECODE_CONTROL 0xd3c +#define MV64340_PCI_1_ADDR_DECODE_CONTROL 0xdbc +#define MV64340_PCI_0_HEADERS_RETARGET_CONTROL 0xF40 +#define MV64340_PCI_1_HEADERS_RETARGET_CONTROL 0xFc0 +#define MV64340_PCI_0_HEADERS_RETARGET_BASE 0xF44 +#define MV64340_PCI_1_HEADERS_RETARGET_BASE 0xFc4 +#define MV64340_PCI_0_HEADERS_RETARGET_HIGH 0xF48 +#define MV64340_PCI_1_HEADERS_RETARGET_HIGH 0xFc8 + +/***********************************/ +/* PCI Control Register Map */ +/***********************************/ + +#define MV64340_PCI_0_DLL_STATUS_AND_COMMAND 0x1d20 +#define MV64340_PCI_1_DLL_STATUS_AND_COMMAND 0x1da0 +#define MV64340_PCI_0_MPP_PADS_DRIVE_CONTROL 0x1d1C +#define MV64340_PCI_1_MPP_PADS_DRIVE_CONTROL 0x1d9C +#define MV64340_PCI_0_COMMAND 0xc00 +#define MV64340_PCI_1_COMMAND 0xc80 +#define MV64340_PCI_0_MODE 0xd00 +#define MV64340_PCI_1_MODE 0xd80 +#define MV64340_PCI_0_RETRY 0xc04 +#define MV64340_PCI_1_RETRY 0xc84 +#define MV64340_PCI_0_READ_BUFFER_DISCARD_TIMER 0xd04 +#define MV64340_PCI_1_READ_BUFFER_DISCARD_TIMER 0xd84 +#define MV64340_PCI_0_MSI_TRIGGER_TIMER 0xc38 +#define MV64340_PCI_1_MSI_TRIGGER_TIMER 0xcb8 +#define MV64340_PCI_0_ARBITER_CONTROL 0x1d00 +#define MV64340_PCI_1_ARBITER_CONTROL 0x1d80 +#define MV64340_PCI_0_CROSS_BAR_CONTROL_LOW 0x1d08 +#define MV64340_PCI_1_CROSS_BAR_CONTROL_LOW 0x1d88 +#define MV64340_PCI_0_CROSS_BAR_CONTROL_HIGH 0x1d0c +#define MV64340_PCI_1_CROSS_BAR_CONTROL_HIGH 0x1d8c +#define MV64340_PCI_0_CROSS_BAR_TIMEOUT 0x1d04 +#define MV64340_PCI_1_CROSS_BAR_TIMEOUT 0x1d84 +#define MV64340_PCI_0_SYNC_BARRIER_TRIGGER_REG 0x1D18 +#define MV64340_PCI_1_SYNC_BARRIER_TRIGGER_REG 0x1D98 +#define MV64340_PCI_0_SYNC_BARRIER_VIRTUAL_REG 0x1d10 +#define MV64340_PCI_1_SYNC_BARRIER_VIRTUAL_REG 0x1d90 +#define MV64340_PCI_0_P2P_CONFIG 0x1d14 +#define MV64340_PCI_1_P2P_CONFIG 0x1d94 + +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_LOW 0x1e00 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_HIGH 0x1e04 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_0 0x1e08 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_LOW 0x1e10 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_HIGH 0x1e14 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_1 0x1e18 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_LOW 0x1e20 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_HIGH 0x1e24 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_2 0x1e28 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_LOW 0x1e30 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_HIGH 0x1e34 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_3 0x1e38 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_LOW 0x1e40 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_HIGH 0x1e44 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_4 0x1e48 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_LOW 0x1e50 +#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_HIGH 0x1e54 +#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_5 0x1e58 + +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_LOW 0x1e80 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_HIGH 0x1e84 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_0 0x1e88 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_LOW 0x1e90 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_HIGH 0x1e94 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_1 0x1e98 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_LOW 0x1ea0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_HIGH 0x1ea4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_2 0x1ea8 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_LOW 0x1eb0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_HIGH 0x1eb4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_3 0x1eb8 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_LOW 0x1ec0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_HIGH 0x1ec4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_4 0x1ec8 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_LOW 0x1ed0 +#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_HIGH 0x1ed4 +#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_5 0x1ed8 + +/****************************************/ +/* PCI Configuration Access Registers */ +/****************************************/ + +#define MV64340_PCI_0_CONFIG_ADDR 0xcf8 +#define MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG 0xcfc +#define MV64340_PCI_1_CONFIG_ADDR 0xc78 +#define MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG 0xc7c +#define MV64340_PCI_0_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xc34 +#define MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xcb4 + +/****************************************/ +/* PCI Error Report Registers */ +/****************************************/ + +#define MV64340_PCI_0_SERR_MASK 0xc28 +#define MV64340_PCI_1_SERR_MASK 0xca8 +#define MV64340_PCI_0_ERROR_ADDR_LOW 0x1d40 +#define MV64340_PCI_1_ERROR_ADDR_LOW 0x1dc0 +#define MV64340_PCI_0_ERROR_ADDR_HIGH 0x1d44 +#define MV64340_PCI_1_ERROR_ADDR_HIGH 0x1dc4 +#define MV64340_PCI_0_ERROR_ATTRIBUTE 0x1d48 +#define MV64340_PCI_1_ERROR_ATTRIBUTE 0x1dc8 +#define MV64340_PCI_0_ERROR_COMMAND 0x1d50 +#define MV64340_PCI_1_ERROR_COMMAND 0x1dd0 +#define MV64340_PCI_0_ERROR_CAUSE 0x1d58 +#define MV64340_PCI_1_ERROR_CAUSE 0x1dd8 +#define MV64340_PCI_0_ERROR_MASK 0x1d5c +#define MV64340_PCI_1_ERROR_MASK 0x1ddc + +/****************************************/ +/* PCI Debug Registers */ +/****************************************/ + +#define MV64340_PCI_0_MMASK 0X1D24 +#define MV64340_PCI_1_MMASK 0X1DA4 + +/*********************************************/ +/* PCI Configuration, Function 0, Registers */ +/*********************************************/ + +#define MV64340_PCI_DEVICE_AND_VENDOR_ID 0x000 +#define MV64340_PCI_STATUS_AND_COMMAND 0x004 +#define MV64340_PCI_CLASS_CODE_AND_REVISION_ID 0x008 +#define MV64340_PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE 0x00C + +#define MV64340_PCI_SCS_0_BASE_ADDR_LOW 0x010 +#define MV64340_PCI_SCS_0_BASE_ADDR_HIGH 0x014 +#define MV64340_PCI_SCS_1_BASE_ADDR_LOW 0x018 +#define MV64340_PCI_SCS_1_BASE_ADDR_HIGH 0x01C +#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_LOW 0x020 +#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_HIGH 0x024 +#define MV64340_PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID 0x02c +#define MV64340_PCI_EXPANSION_ROM_BASE_ADDR_REG 0x030 +#define MV64340_PCI_CAPABILTY_LIST_POINTER 0x034 +#define MV64340_PCI_INTERRUPT_PIN_AND_LINE 0x03C + /* capability list */ +#define MV64340_PCI_POWER_MANAGEMENT_CAPABILITY 0x040 +#define MV64340_PCI_POWER_MANAGEMENT_STATUS_AND_CONTROL 0x044 +#define MV64340_PCI_VPD_ADDR 0x048 +#define MV64340_PCI_VPD_DATA 0x04c +#define MV64340_PCI_MSI_MESSAGE_CONTROL 0x050 +#define MV64340_PCI_MSI_MESSAGE_ADDR 0x054 +#define MV64340_PCI_MSI_MESSAGE_UPPER_ADDR 0x058 +#define MV64340_PCI_MSI_MESSAGE_DATA 0x05c +#define MV64340_PCI_X_COMMAND 0x060 +#define MV64340_PCI_X_STATUS 0x064 +#define MV64340_PCI_COMPACT_PCI_HOT_SWAP 0x068 + +/***********************************************/ +/* PCI Configuration, Function 1, Registers */ +/***********************************************/ + +#define MV64340_PCI_SCS_2_BASE_ADDR_LOW 0x110 +#define MV64340_PCI_SCS_2_BASE_ADDR_HIGH 0x114 +#define MV64340_PCI_SCS_3_BASE_ADDR_LOW 0x118 +#define MV64340_PCI_SCS_3_BASE_ADDR_HIGH 0x11c +#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_LOW 0x120 +#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_HIGH 0x124 + +/***********************************************/ +/* PCI Configuration, Function 2, Registers */ +/***********************************************/ + +#define MV64340_PCI_DEVCS_0_BASE_ADDR_LOW 0x210 +#define MV64340_PCI_DEVCS_0_BASE_ADDR_HIGH 0x214 +#define MV64340_PCI_DEVCS_1_BASE_ADDR_LOW 0x218 +#define MV64340_PCI_DEVCS_1_BASE_ADDR_HIGH 0x21c +#define MV64340_PCI_DEVCS_2_BASE_ADDR_LOW 0x220 +#define MV64340_PCI_DEVCS_2_BASE_ADDR_HIGH 0x224 + +/***********************************************/ +/* PCI Configuration, Function 3, Registers */ +/***********************************************/ + +#define MV64340_PCI_DEVCS_3_BASE_ADDR_LOW 0x310 +#define MV64340_PCI_DEVCS_3_BASE_ADDR_HIGH 0x314 +#define MV64340_PCI_BOOT_CS_BASE_ADDR_LOW 0x318 +#define MV64340_PCI_BOOT_CS_BASE_ADDR_HIGH 0x31c +#define MV64340_PCI_CPU_BASE_ADDR_LOW 0x220 +#define MV64340_PCI_CPU_BASE_ADDR_HIGH 0x224 + +/***********************************************/ +/* PCI Configuration, Function 4, Registers */ +/***********************************************/ + +#define MV64340_PCI_P2P_MEM0_BASE_ADDR_LOW 0x410 +#define MV64340_PCI_P2P_MEM0_BASE_ADDR_HIGH 0x414 +#define MV64340_PCI_P2P_MEM1_BASE_ADDR_LOW 0x418 +#define MV64340_PCI_P2P_MEM1_BASE_ADDR_HIGH 0x41c +#define MV64340_PCI_P2P_I_O_BASE_ADDR 0x420 +#define MV64340_PCI_INTERNAL_REGS_I_O_MAPPED_BASE_ADDR 0x424 + +/****************************************/ +/* Messaging Unit Registers (I20) */ +/****************************************/ + +#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_0_SIDE 0x010 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_0_SIDE 0x014 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_0_SIDE 0x018 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_0_SIDE 0x01C +#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_0_SIDE 0x020 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x024 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x028 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_0_SIDE 0x02C +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x030 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x034 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x040 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x044 +#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_0_SIDE 0x050 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_0_SIDE 0x054 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x060 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x064 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x068 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x06C +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x070 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x074 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x0F8 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x0FC + +#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_1_SIDE 0x090 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_1_SIDE 0x094 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_1_SIDE 0x098 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_1_SIDE 0x09C +#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_1_SIDE 0x0A0 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0A4 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0A8 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_1_SIDE 0x0AC +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0B0 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0B4 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C0 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C4 +#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_1_SIDE 0x0D0 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_1_SIDE 0x0D4 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0E0 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0E4 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x0E8 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x0EC +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0F0 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0F4 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x078 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x07C + +#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C10 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C14 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C18 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C1C +#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU0_SIDE 0x1C20 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C24 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C28 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU0_SIDE 0x1C2C +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C30 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C34 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C40 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C44 +#define MV64340_I2O_QUEUE_CONTROL_REG_CPU0_SIDE 0x1C50 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU0_SIDE 0x1C54 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C60 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C64 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1C68 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1C6C +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C70 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C74 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1CF8 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1CFC +#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C90 +#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C94 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C98 +#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C9C +#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU1_SIDE 0x1CA0 +#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CA4 +#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CA8 +#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU1_SIDE 0x1CAC +#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CB0 +#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CB4 +#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC0 +#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC4 +#define MV64340_I2O_QUEUE_CONTROL_REG_CPU1_SIDE 0x1CD0 +#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU1_SIDE 0x1CD4 +#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CE0 +#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CE4 +#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1CE8 +#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1CEC +#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CF0 +#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CF4 +#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1C78 +#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1C7C + +/****************************************/ +/* Ethernet Unit Registers */ +/****************************************/ + +/*******************************************/ +/* CUNIT Registers */ +/*******************************************/ + + /* Address Decoding Register Map */ + +#define MV64340_CUNIT_BASE_ADDR_REG0 0xf200 +#define MV64340_CUNIT_BASE_ADDR_REG1 0xf208 +#define MV64340_CUNIT_BASE_ADDR_REG2 0xf210 +#define MV64340_CUNIT_BASE_ADDR_REG3 0xf218 +#define MV64340_CUNIT_SIZE0 0xf204 +#define MV64340_CUNIT_SIZE1 0xf20c +#define MV64340_CUNIT_SIZE2 0xf214 +#define MV64340_CUNIT_SIZE3 0xf21c +#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG0 0xf240 +#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG1 0xf244 +#define MV64340_CUNIT_BASE_ADDR_ENABLE_REG 0xf250 +#define MV64340_MPSC0_ACCESS_PROTECTION_REG 0xf254 +#define MV64340_MPSC1_ACCESS_PROTECTION_REG 0xf258 +#define MV64340_CUNIT_INTERNAL_SPACE_BASE_ADDR_REG 0xf25C + + /* Error Report Registers */ + +#define MV64340_CUNIT_INTERRUPT_CAUSE_REG 0xf310 +#define MV64340_CUNIT_INTERRUPT_MASK_REG 0xf314 +#define MV64340_CUNIT_ERROR_ADDR 0xf318 + + /* Cunit Control Registers */ + +#define MV64340_CUNIT_ARBITER_CONTROL_REG 0xf300 +#define MV64340_CUNIT_CONFIG_REG 0xb40c +#define MV64340_CUNIT_CRROSBAR_TIMEOUT_REG 0xf304 + + /* Cunit Debug Registers */ + +#define MV64340_CUNIT_DEBUG_LOW 0xf340 +#define MV64340_CUNIT_DEBUG_HIGH 0xf344 +#define MV64340_CUNIT_MMASK 0xf380 + + /* MPSCs Clocks Routing Registers */ + +#define MV64340_MPSC_ROUTING_REG 0xb400 +#define MV64340_MPSC_RX_CLOCK_ROUTING_REG 0xb404 +#define MV64340_MPSC_TX_CLOCK_ROUTING_REG 0xb408 + + /* MPSCs Interrupts Registers */ + +#define MV64340_MPSC_CAUSE_REG(port) (0xb804 + (port << 3)) +#define MV64340_MPSC_MASK_REG(port) (0xb884 + (port << 3)) + +#define MV64340_MPSC_MAIN_CONFIG_LOW(port) (0x8000 + (port << 12)) +#define MV64340_MPSC_MAIN_CONFIG_HIGH(port) (0x8004 + (port << 12)) +#define MV64340_MPSC_PROTOCOL_CONFIG(port) (0x8008 + (port << 12)) +#define MV64340_MPSC_CHANNEL_REG1(port) (0x800c + (port << 12)) +#define MV64340_MPSC_CHANNEL_REG2(port) (0x8010 + (port << 12)) +#define MV64340_MPSC_CHANNEL_REG3(port) (0x8014 + (port << 12)) +#define MV64340_MPSC_CHANNEL_REG4(port) (0x8018 + (port << 12)) +#define MV64340_MPSC_CHANNEL_REG5(port) (0x801c + (port << 12)) +#define MV64340_MPSC_CHANNEL_REG6(port) (0x8020 + (port << 12)) +#define MV64340_MPSC_CHANNEL_REG7(port) (0x8024 + (port << 12)) +#define MV64340_MPSC_CHANNEL_REG8(port) (0x8028 + (port << 12)) +#define MV64340_MPSC_CHANNEL_REG9(port) (0x802c + (port << 12)) +#define MV64340_MPSC_CHANNEL_REG10(port) (0x8030 + (port << 12)) + + /* MPSC0 Registers */ + + +/***************************************/ +/* SDMA Registers */ +/***************************************/ + +#define MV64340_SDMA_CONFIG_REG(channel) (0x4000 + (channel << 13)) +#define MV64340_SDMA_COMMAND_REG(channel) (0x4008 + (channel << 13)) +#define MV64340_SDMA_CURRENT_RX_DESCRIPTOR_POINTER(channel) (0x4810 + (channel << 13)) +#define MV64340_SDMA_CURRENT_TX_DESCRIPTOR_POINTER(channel) (0x4c10 + (channel << 13)) +#define MV64340_SDMA_FIRST_TX_DESCRIPTOR_POINTER(channel) (0x4c14 + (channel << 13)) + +#define MV64340_SDMA_CAUSE_REG 0xb800 +#define MV64340_SDMA_MASK_REG 0xb880 + +/* BRG Interrupts */ + +#define MV64340_BRG_CONFIG_REG(brg) (0xb200 + (brg << 3)) +#define MV64340_BRG_BAUDE_TUNING_REG(brg) (0xb208 + (brg << 3)) +#define MV64340_BRG_CAUSE_REG 0xb834 +#define MV64340_BRG_MASK_REG 0xb8b4 + +/****************************************/ +/* DMA Channel Control */ +/****************************************/ + +#define MV64340_DMA_CHANNEL0_CONTROL 0x840 +#define MV64340_DMA_CHANNEL0_CONTROL_HIGH 0x880 +#define MV64340_DMA_CHANNEL1_CONTROL 0x844 +#define MV64340_DMA_CHANNEL1_CONTROL_HIGH 0x884 +#define MV64340_DMA_CHANNEL2_CONTROL 0x848 +#define MV64340_DMA_CHANNEL2_CONTROL_HIGH 0x888 +#define MV64340_DMA_CHANNEL3_CONTROL 0x84C +#define MV64340_DMA_CHANNEL3_CONTROL_HIGH 0x88C + + +/****************************************/ +/* IDMA Registers */ +/****************************************/ + +#define MV64340_DMA_CHANNEL0_BYTE_COUNT 0x800 +#define MV64340_DMA_CHANNEL1_BYTE_COUNT 0x804 +#define MV64340_DMA_CHANNEL2_BYTE_COUNT 0x808 +#define MV64340_DMA_CHANNEL3_BYTE_COUNT 0x80C +#define MV64340_DMA_CHANNEL0_SOURCE_ADDR 0x810 +#define MV64340_DMA_CHANNEL1_SOURCE_ADDR 0x814 +#define MV64340_DMA_CHANNEL2_SOURCE_ADDR 0x818 +#define MV64340_DMA_CHANNEL3_SOURCE_ADDR 0x81c +#define MV64340_DMA_CHANNEL0_DESTINATION_ADDR 0x820 +#define MV64340_DMA_CHANNEL1_DESTINATION_ADDR 0x824 +#define MV64340_DMA_CHANNEL2_DESTINATION_ADDR 0x828 +#define MV64340_DMA_CHANNEL3_DESTINATION_ADDR 0x82C +#define MV64340_DMA_CHANNEL0_NEXT_DESCRIPTOR_POINTER 0x830 +#define MV64340_DMA_CHANNEL1_NEXT_DESCRIPTOR_POINTER 0x834 +#define MV64340_DMA_CHANNEL2_NEXT_DESCRIPTOR_POINTER 0x838 +#define MV64340_DMA_CHANNEL3_NEXT_DESCRIPTOR_POINTER 0x83C +#define MV64340_DMA_CHANNEL0_CURRENT_DESCRIPTOR_POINTER 0x870 +#define MV64340_DMA_CHANNEL1_CURRENT_DESCRIPTOR_POINTER 0x874 +#define MV64340_DMA_CHANNEL2_CURRENT_DESCRIPTOR_POINTER 0x878 +#define MV64340_DMA_CHANNEL3_CURRENT_DESCRIPTOR_POINTER 0x87C + + /* IDMA Address Decoding Base Address Registers */ + +#define MV64340_DMA_BASE_ADDR_REG0 0xa00 +#define MV64340_DMA_BASE_ADDR_REG1 0xa08 +#define MV64340_DMA_BASE_ADDR_REG2 0xa10 +#define MV64340_DMA_BASE_ADDR_REG3 0xa18 +#define MV64340_DMA_BASE_ADDR_REG4 0xa20 +#define MV64340_DMA_BASE_ADDR_REG5 0xa28 +#define MV64340_DMA_BASE_ADDR_REG6 0xa30 +#define MV64340_DMA_BASE_ADDR_REG7 0xa38 + + /* IDMA Address Decoding Size Address Register */ + +#define MV64340_DMA_SIZE_REG0 0xa04 +#define MV64340_DMA_SIZE_REG1 0xa0c +#define MV64340_DMA_SIZE_REG2 0xa14 +#define MV64340_DMA_SIZE_REG3 0xa1c +#define MV64340_DMA_SIZE_REG4 0xa24 +#define MV64340_DMA_SIZE_REG5 0xa2c +#define MV64340_DMA_SIZE_REG6 0xa34 +#define MV64340_DMA_SIZE_REG7 0xa3C + + /* IDMA Address Decoding High Address Remap and Access Protection Registers */ + +#define MV64340_DMA_HIGH_ADDR_REMAP_REG0 0xa60 +#define MV64340_DMA_HIGH_ADDR_REMAP_REG1 0xa64 +#define MV64340_DMA_HIGH_ADDR_REMAP_REG2 0xa68 +#define MV64340_DMA_HIGH_ADDR_REMAP_REG3 0xa6C +#define MV64340_DMA_BASE_ADDR_ENABLE_REG 0xa80 +#define MV64340_DMA_CHANNEL0_ACCESS_PROTECTION_REG 0xa70 +#define MV64340_DMA_CHANNEL1_ACCESS_PROTECTION_REG 0xa74 +#define MV64340_DMA_CHANNEL2_ACCESS_PROTECTION_REG 0xa78 +#define MV64340_DMA_CHANNEL3_ACCESS_PROTECTION_REG 0xa7c +#define MV64340_DMA_ARBITER_CONTROL 0x860 +#define MV64340_DMA_CROSS_BAR_TIMEOUT 0x8d0 + + /* IDMA Headers Retarget Registers */ + +#define MV64340_DMA_HEADERS_RETARGET_CONTROL 0xa84 +#define MV64340_DMA_HEADERS_RETARGET_BASE 0xa88 + + /* IDMA Interrupt Register */ + +#define MV64340_DMA_INTERRUPT_CAUSE_REG 0x8c0 +#define MV64340_DMA_INTERRUPT_CAUSE_MASK 0x8c4 +#define MV64340_DMA_ERROR_ADDR 0x8c8 +#define MV64340_DMA_ERROR_SELECT 0x8cc + + /* IDMA Debug Register ( for internal use ) */ + +#define MV64340_DMA_DEBUG_LOW 0x8e0 +#define MV64340_DMA_DEBUG_HIGH 0x8e4 +#define MV64340_DMA_SPARE 0xA8C + +/****************************************/ +/* Timer_Counter */ +/****************************************/ + +#define MV64340_TIMER_COUNTER0 0x850 +#define MV64340_TIMER_COUNTER1 0x854 +#define MV64340_TIMER_COUNTER2 0x858 +#define MV64340_TIMER_COUNTER3 0x85C +#define MV64340_TIMER_COUNTER_0_3_CONTROL 0x864 +#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_CAUSE 0x868 +#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_MASK 0x86c + +/****************************************/ +/* Watchdog registers */ +/****************************************/ + +#define MV64340_WATCHDOG_CONFIG_REG 0xb410 +#define MV64340_WATCHDOG_VALUE_REG 0xb414 + +/****************************************/ +/* I2C Registers */ +/****************************************/ + +#define MV64XXX_I2C_OFFSET 0xc000 +#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020 + +/****************************************/ +/* GPP Interface Registers */ +/****************************************/ + +#define MV64340_GPP_IO_CONTROL 0xf100 +#define MV64340_GPP_LEVEL_CONTROL 0xf110 +#define MV64340_GPP_VALUE 0xf104 +#define MV64340_GPP_INTERRUPT_CAUSE 0xf108 +#define MV64340_GPP_INTERRUPT_MASK0 0xf10c +#define MV64340_GPP_INTERRUPT_MASK1 0xf114 +#define MV64340_GPP_VALUE_SET 0xf118 +#define MV64340_GPP_VALUE_CLEAR 0xf11c + +/****************************************/ +/* Interrupt Controller Registers */ +/****************************************/ + +/****************************************/ +/* Interrupts */ +/****************************************/ + +#define MV64340_MAIN_INTERRUPT_CAUSE_LOW 0x004 +#define MV64340_MAIN_INTERRUPT_CAUSE_HIGH 0x00c +#define MV64340_CPU_INTERRUPT0_MASK_LOW 0x014 +#define MV64340_CPU_INTERRUPT0_MASK_HIGH 0x01c +#define MV64340_CPU_INTERRUPT0_SELECT_CAUSE 0x024 +#define MV64340_CPU_INTERRUPT1_MASK_LOW 0x034 +#define MV64340_CPU_INTERRUPT1_MASK_HIGH 0x03c +#define MV64340_CPU_INTERRUPT1_SELECT_CAUSE 0x044 +#define MV64340_INTERRUPT0_MASK_0_LOW 0x054 +#define MV64340_INTERRUPT0_MASK_0_HIGH 0x05c +#define MV64340_INTERRUPT0_SELECT_CAUSE 0x064 +#define MV64340_INTERRUPT1_MASK_0_LOW 0x074 +#define MV64340_INTERRUPT1_MASK_0_HIGH 0x07c +#define MV64340_INTERRUPT1_SELECT_CAUSE 0x084 + +/****************************************/ +/* MPP Interface Registers */ +/****************************************/ + +#define MV64340_MPP_CONTROL0 0xf000 +#define MV64340_MPP_CONTROL1 0xf004 +#define MV64340_MPP_CONTROL2 0xf008 +#define MV64340_MPP_CONTROL3 0xf00c + +/****************************************/ +/* Serial Initialization registers */ +/****************************************/ + +#define MV64340_SERIAL_INIT_LAST_DATA 0xf324 +#define MV64340_SERIAL_INIT_CONTROL 0xf328 +#define MV64340_SERIAL_INIT_STATUS 0xf32c + +#endif /* ASM_MV643XX_H */ diff --git a/hw/pci-host/pam.c b/hw/pci-host/pam.c new file mode 100644 index 000000000..454dd120d --- /dev/null +++ b/hw/pci-host/pam.c @@ -0,0 +1,70 @@ +/* + * QEMU Smram/pam logic implementation + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2011 Isaku Yamahata + * VA Linux Systems Japan K.K. + * Copyright (c) 2012 Jason Baron + * + * Split out from piix.c + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci-host/pam.h" + +void init_pam(DeviceState *dev, MemoryRegion *ram_memory, + MemoryRegion *system_memory, MemoryRegion *pci_address_space, + PAMMemoryRegion *mem, uint32_t start, uint32_t size) +{ + int i; + + /* RAM */ + memory_region_init_alias(&mem->alias[3], OBJECT(dev), "pam-ram", ram_memory, + start, size); + /* ROM (XXX: not quite correct) */ + memory_region_init_alias(&mem->alias[1], OBJECT(dev), "pam-rom", ram_memory, + start, size); + memory_region_set_readonly(&mem->alias[1], true); + + /* XXX: should distinguish read/write cases */ + memory_region_init_alias(&mem->alias[0], OBJECT(dev), "pam-pci", pci_address_space, + start, size); + memory_region_init_alias(&mem->alias[2], OBJECT(dev), "pam-pci", ram_memory, + start, size); + + memory_region_transaction_begin(); + for (i = 0; i < 4; ++i) { + memory_region_set_enabled(&mem->alias[i], false); + memory_region_add_subregion_overlap(system_memory, start, + &mem->alias[i], 1); + } + memory_region_transaction_commit(); + mem->current = 0; +} + +void pam_update(PAMMemoryRegion *pam, int idx, uint8_t val) +{ + assert(0 <= idx && idx < PAM_REGIONS_COUNT); + + memory_region_set_enabled(&pam->alias[pam->current], false); + pam->current = (val >> ((!(idx & 1)) * 4)) & PAM_ATTR_MASK; + memory_region_set_enabled(&pam->alias[pam->current], true); +} diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c new file mode 100644 index 000000000..a7f968500 --- /dev/null +++ b/hw/pci-host/pnv_phb3.c @@ -0,0 +1,1186 @@ +/* + * QEMU PowerPC PowerNV (POWER8) PHB3 model + * + * Copyright (c) 2014-2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/visitor.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "hw/pci-host/pnv_phb3_regs.h" +#include "hw/pci-host/pnv_phb3.h" +#include "hw/pci/pcie_host.h" +#include "hw/pci/pcie_port.h" +#include "hw/ppc/pnv.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +#define phb3_error(phb, fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "phb3[%d:%d]: " fmt "\n", \ + (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__) + +static PCIDevice *pnv_phb3_find_cfg_dev(PnvPHB3 *phb) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(phb); + uint64_t addr = phb->regs[PHB_CONFIG_ADDRESS >> 3]; + uint8_t bus, devfn; + + if (!(addr >> 63)) { + return NULL; + } + bus = (addr >> 52) & 0xff; + devfn = (addr >> 44) & 0xff; + + return pci_find_device(pci->bus, bus, devfn); +} + +/* + * The CONFIG_DATA register expects little endian accesses, but as the + * region is big endian, we have to swap the value. + */ +static void pnv_phb3_config_write(PnvPHB3 *phb, unsigned off, + unsigned size, uint64_t val) +{ + uint32_t cfg_addr, limit; + PCIDevice *pdev; + + pdev = pnv_phb3_find_cfg_dev(phb); + if (!pdev) { + return; + } + cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; + cfg_addr |= off; + limit = pci_config_size(pdev); + if (limit <= cfg_addr) { + /* + * conventional pci device can be behind pcie-to-pci bridge. + * 256 <= addr < 4K has no effects. + */ + return; + } + switch (size) { + case 1: + break; + case 2: + val = bswap16(val); + break; + case 4: + val = bswap32(val); + break; + default: + g_assert_not_reached(); + } + pci_host_config_write_common(pdev, cfg_addr, limit, val, size); +} + +static uint64_t pnv_phb3_config_read(PnvPHB3 *phb, unsigned off, + unsigned size) +{ + uint32_t cfg_addr, limit; + PCIDevice *pdev; + uint64_t val; + + pdev = pnv_phb3_find_cfg_dev(phb); + if (!pdev) { + return ~0ull; + } + cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; + cfg_addr |= off; + limit = pci_config_size(pdev); + if (limit <= cfg_addr) { + /* + * conventional pci device can be behind pcie-to-pci bridge. + * 256 <= addr < 4K has no effects. + */ + return ~0ull; + } + val = pci_host_config_read_common(pdev, cfg_addr, limit, size); + switch (size) { + case 1: + return val; + case 2: + return bswap16(val); + case 4: + return bswap32(val); + default: + g_assert_not_reached(); + } +} + +static void pnv_phb3_check_m32(PnvPHB3 *phb) +{ + uint64_t base, start, size; + MemoryRegion *parent; + PnvPBCQState *pbcq = &phb->pbcq; + + if (memory_region_is_mapped(&phb->mr_m32)) { + memory_region_del_subregion(phb->mr_m32.container, &phb->mr_m32); + } + + if (!(phb->regs[PHB_PHB3_CONFIG >> 3] & PHB_PHB3C_M32_EN)) { + return; + } + + /* Grab geometry from registers */ + base = phb->regs[PHB_M32_BASE_ADDR >> 3]; + start = phb->regs[PHB_M32_START_ADDR >> 3]; + size = ~(phb->regs[PHB_M32_BASE_MASK >> 3] | 0xfffc000000000000ull) + 1; + + /* Check if it matches an enabled MMIO region in the PBCQ */ + if (memory_region_is_mapped(&pbcq->mmbar0) && + base >= pbcq->mmio0_base && + (base + size) <= (pbcq->mmio0_base + pbcq->mmio0_size)) { + parent = &pbcq->mmbar0; + base -= pbcq->mmio0_base; + } else if (memory_region_is_mapped(&pbcq->mmbar1) && + base >= pbcq->mmio1_base && + (base + size) <= (pbcq->mmio1_base + pbcq->mmio1_size)) { + parent = &pbcq->mmbar1; + base -= pbcq->mmio1_base; + } else { + return; + } + + /* Create alias */ + memory_region_init_alias(&phb->mr_m32, OBJECT(phb), "phb3-m32", + &phb->pci_mmio, start, size); + memory_region_add_subregion(parent, base, &phb->mr_m32); +} + +static void pnv_phb3_check_m64(PnvPHB3 *phb, uint32_t index) +{ + uint64_t base, start, size, m64; + MemoryRegion *parent; + PnvPBCQState *pbcq = &phb->pbcq; + + if (memory_region_is_mapped(&phb->mr_m64[index])) { + /* Should we destroy it in RCU friendly way... ? */ + memory_region_del_subregion(phb->mr_m64[index].container, + &phb->mr_m64[index]); + } + + /* Get table entry */ + m64 = phb->ioda_M64BT[index]; + + if (!(m64 & IODA2_M64BT_ENABLE)) { + return; + } + + /* Grab geometry from registers */ + base = GETFIELD(IODA2_M64BT_BASE, m64) << 20; + if (m64 & IODA2_M64BT_SINGLE_PE) { + base &= ~0x1ffffffull; + } + size = GETFIELD(IODA2_M64BT_MASK, m64) << 20; + size |= 0xfffc000000000000ull; + size = ~size + 1; + start = base | (phb->regs[PHB_M64_UPPER_BITS >> 3]); + + /* Check if it matches an enabled MMIO region in the PBCQ */ + if (memory_region_is_mapped(&pbcq->mmbar0) && + base >= pbcq->mmio0_base && + (base + size) <= (pbcq->mmio0_base + pbcq->mmio0_size)) { + parent = &pbcq->mmbar0; + base -= pbcq->mmio0_base; + } else if (memory_region_is_mapped(&pbcq->mmbar1) && + base >= pbcq->mmio1_base && + (base + size) <= (pbcq->mmio1_base + pbcq->mmio1_size)) { + parent = &pbcq->mmbar1; + base -= pbcq->mmio1_base; + } else { + return; + } + + /* Create alias */ + memory_region_init_alias(&phb->mr_m64[index], OBJECT(phb), "phb3-m64", + &phb->pci_mmio, start, size); + memory_region_add_subregion(parent, base, &phb->mr_m64[index]); +} + +static void pnv_phb3_check_all_m64s(PnvPHB3 *phb) +{ + uint64_t i; + + for (i = 0; i < PNV_PHB3_NUM_M64; i++) { + pnv_phb3_check_m64(phb, i); + } +} + +static void pnv_phb3_lxivt_write(PnvPHB3 *phb, unsigned idx, uint64_t val) +{ + uint8_t server, prio; + + phb->ioda_LXIVT[idx] = val & (IODA2_LXIVT_SERVER | + IODA2_LXIVT_PRIORITY | + IODA2_LXIVT_NODE_ID); + server = GETFIELD(IODA2_LXIVT_SERVER, val); + prio = GETFIELD(IODA2_LXIVT_PRIORITY, val); + + /* + * The low order 2 bits are the link pointer (Type II interrupts). + * Shift back to get a valid IRQ server. + */ + server >>= 2; + + ics_write_xive(&phb->lsis, idx, server, prio, prio); +} + +static uint64_t *pnv_phb3_ioda_access(PnvPHB3 *phb, + unsigned *out_table, unsigned *out_idx) +{ + uint64_t adreg = phb->regs[PHB_IODA_ADDR >> 3]; + unsigned int index = GETFIELD(PHB_IODA_AD_TADR, adreg); + unsigned int table = GETFIELD(PHB_IODA_AD_TSEL, adreg); + unsigned int mask; + uint64_t *tptr = NULL; + + switch (table) { + case IODA2_TBL_LIST: + tptr = phb->ioda_LIST; + mask = 7; + break; + case IODA2_TBL_LXIVT: + tptr = phb->ioda_LXIVT; + mask = 7; + break; + case IODA2_TBL_IVC_CAM: + case IODA2_TBL_RBA: + mask = 31; + break; + case IODA2_TBL_RCAM: + mask = 63; + break; + case IODA2_TBL_MRT: + mask = 7; + break; + case IODA2_TBL_PESTA: + case IODA2_TBL_PESTB: + mask = 255; + break; + case IODA2_TBL_TVT: + tptr = phb->ioda_TVT; + mask = 511; + break; + case IODA2_TBL_TCAM: + case IODA2_TBL_TDR: + mask = 63; + break; + case IODA2_TBL_M64BT: + tptr = phb->ioda_M64BT; + mask = 15; + break; + case IODA2_TBL_M32DT: + tptr = phb->ioda_MDT; + mask = 255; + break; + case IODA2_TBL_PEEV: + tptr = phb->ioda_PEEV; + mask = 3; + break; + default: + phb3_error(phb, "invalid IODA table %d", table); + return NULL; + } + index &= mask; + if (out_idx) { + *out_idx = index; + } + if (out_table) { + *out_table = table; + } + if (tptr) { + tptr += index; + } + if (adreg & PHB_IODA_AD_AUTOINC) { + index = (index + 1) & mask; + adreg = SETFIELD(PHB_IODA_AD_TADR, adreg, index); + } + phb->regs[PHB_IODA_ADDR >> 3] = adreg; + return tptr; +} + +static uint64_t pnv_phb3_ioda_read(PnvPHB3 *phb) +{ + unsigned table; + uint64_t *tptr; + + tptr = pnv_phb3_ioda_access(phb, &table, NULL); + if (!tptr) { + /* Return 0 on unsupported tables, not ff's */ + return 0; + } + return *tptr; +} + +static void pnv_phb3_ioda_write(PnvPHB3 *phb, uint64_t val) +{ + unsigned table, idx; + uint64_t *tptr; + + tptr = pnv_phb3_ioda_access(phb, &table, &idx); + if (!tptr) { + return; + } + + /* Handle side effects */ + switch (table) { + case IODA2_TBL_LXIVT: + pnv_phb3_lxivt_write(phb, idx, val); + break; + case IODA2_TBL_M64BT: + *tptr = val; + pnv_phb3_check_m64(phb, idx); + break; + default: + *tptr = val; + } +} + +/* + * This is called whenever the PHB LSI, MSI source ID register or + * the PBCQ irq filters are written. + */ +void pnv_phb3_remap_irqs(PnvPHB3 *phb) +{ + ICSState *ics = &phb->lsis; + uint32_t local, global, count, mask, comp; + uint64_t baren; + PnvPBCQState *pbcq = &phb->pbcq; + + /* + * First check if we are enabled. Unlike real HW we don't separate + * TX and RX so we enable if both are set + */ + baren = pbcq->nest_regs[PBCQ_NEST_BAR_EN]; + if (!(baren & PBCQ_NEST_BAR_EN_IRSN_RX) || + !(baren & PBCQ_NEST_BAR_EN_IRSN_TX)) { + ics->offset = 0; + return; + } + + /* Grab local LSI source ID */ + local = GETFIELD(PHB_LSI_SRC_ID, phb->regs[PHB_LSI_SOURCE_ID >> 3]) << 3; + + /* Grab global one and compare */ + global = GETFIELD(PBCQ_NEST_LSI_SRC, + pbcq->nest_regs[PBCQ_NEST_LSI_SRC_ID]) << 3; + if (global != local) { + /* + * This happens during initialization, let's come back when we + * are properly configured + */ + ics->offset = 0; + return; + } + + /* Get the base on the powerbus */ + comp = GETFIELD(PBCQ_NEST_IRSN_COMP, + pbcq->nest_regs[PBCQ_NEST_IRSN_COMPARE]); + mask = GETFIELD(PBCQ_NEST_IRSN_COMP, + pbcq->nest_regs[PBCQ_NEST_IRSN_MASK]); + count = ((~mask) + 1) & 0x7ffff; + phb->total_irq = count; + + /* Sanity checks */ + if ((global + PNV_PHB3_NUM_LSI) > count) { + phb3_error(phb, "LSIs out of reach: LSI base=%d total irq=%d", global, + count); + } + + if (count > 2048) { + phb3_error(phb, "More interrupts than supported: %d", count); + } + + if ((comp & mask) != comp) { + phb3_error(phb, "IRQ compare bits not in mask: comp=0x%x mask=0x%x", + comp, mask); + comp &= mask; + } + /* Setup LSI offset */ + ics->offset = comp + global; + + /* Setup MSI offset */ + pnv_phb3_msi_update_config(&phb->msis, comp, count - PNV_PHB3_NUM_LSI); +} + +static void pnv_phb3_lsi_src_id_write(PnvPHB3 *phb, uint64_t val) +{ + /* Sanitize content */ + val &= PHB_LSI_SRC_ID; + phb->regs[PHB_LSI_SOURCE_ID >> 3] = val; + pnv_phb3_remap_irqs(phb); +} + +static void pnv_phb3_rtc_invalidate(PnvPHB3 *phb, uint64_t val) +{ + PnvPhb3DMASpace *ds; + + /* Always invalidate all for now ... */ + QLIST_FOREACH(ds, &phb->dma_spaces, list) { + ds->pe_num = PHB_INVALID_PE; + } +} + + +static void pnv_phb3_update_msi_regions(PnvPhb3DMASpace *ds) +{ + uint64_t cfg = ds->phb->regs[PHB_PHB3_CONFIG >> 3]; + + if (cfg & PHB_PHB3C_32BIT_MSI_EN) { + if (!memory_region_is_mapped(&ds->msi32_mr)) { + memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), + 0xffff0000, &ds->msi32_mr); + } + } else { + if (memory_region_is_mapped(&ds->msi32_mr)) { + memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), + &ds->msi32_mr); + } + } + + if (cfg & PHB_PHB3C_64BIT_MSI_EN) { + if (!memory_region_is_mapped(&ds->msi64_mr)) { + memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), + (1ull << 60), &ds->msi64_mr); + } + } else { + if (memory_region_is_mapped(&ds->msi64_mr)) { + memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), + &ds->msi64_mr); + } + } +} + +static void pnv_phb3_update_all_msi_regions(PnvPHB3 *phb) +{ + PnvPhb3DMASpace *ds; + + QLIST_FOREACH(ds, &phb->dma_spaces, list) { + pnv_phb3_update_msi_regions(ds); + } +} + +void pnv_phb3_reg_write(void *opaque, hwaddr off, uint64_t val, unsigned size) +{ + PnvPHB3 *phb = opaque; + bool changed; + + /* Special case configuration data */ + if ((off & 0xfffc) == PHB_CONFIG_DATA) { + pnv_phb3_config_write(phb, off & 0x3, size, val); + return; + } + + /* Other registers are 64-bit only */ + if (size != 8 || off & 0x7) { + phb3_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", + off, size); + return; + } + + /* Handle masking & filtering */ + switch (off) { + case PHB_M64_UPPER_BITS: + val &= 0xfffc000000000000ull; + break; + case PHB_Q_DMA_R: + /* + * This is enough logic to make SW happy but we aren't actually + * quiescing the DMAs + */ + if (val & PHB_Q_DMA_R_AUTORESET) { + val = 0; + } else { + val &= PHB_Q_DMA_R_QUIESCE_DMA; + } + break; + /* LEM stuff */ + case PHB_LEM_FIR_AND_MASK: + phb->regs[PHB_LEM_FIR_ACCUM >> 3] &= val; + return; + case PHB_LEM_FIR_OR_MASK: + phb->regs[PHB_LEM_FIR_ACCUM >> 3] |= val; + return; + case PHB_LEM_ERROR_AND_MASK: + phb->regs[PHB_LEM_ERROR_MASK >> 3] &= val; + return; + case PHB_LEM_ERROR_OR_MASK: + phb->regs[PHB_LEM_ERROR_MASK >> 3] |= val; + return; + case PHB_LEM_WOF: + val = 0; + break; + } + + /* Record whether it changed */ + changed = phb->regs[off >> 3] != val; + + /* Store in register cache first */ + phb->regs[off >> 3] = val; + + /* Handle side effects */ + switch (off) { + case PHB_PHB3_CONFIG: + if (changed) { + pnv_phb3_update_all_msi_regions(phb); + } + /* fall through */ + case PHB_M32_BASE_ADDR: + case PHB_M32_BASE_MASK: + case PHB_M32_START_ADDR: + if (changed) { + pnv_phb3_check_m32(phb); + } + break; + case PHB_M64_UPPER_BITS: + if (changed) { + pnv_phb3_check_all_m64s(phb); + } + break; + case PHB_LSI_SOURCE_ID: + if (changed) { + pnv_phb3_lsi_src_id_write(phb, val); + } + break; + + /* IODA table accesses */ + case PHB_IODA_DATA0: + pnv_phb3_ioda_write(phb, val); + break; + + /* RTC invalidation */ + case PHB_RTC_INVALIDATE: + pnv_phb3_rtc_invalidate(phb, val); + break; + + /* FFI request */ + case PHB_FFI_REQUEST: + pnv_phb3_msi_ffi(&phb->msis, val); + break; + + /* Silent simple writes */ + case PHB_CONFIG_ADDRESS: + case PHB_IODA_ADDR: + case PHB_TCE_KILL: + case PHB_TCE_SPEC_CTL: + case PHB_PEST_BAR: + case PHB_PELTV_BAR: + case PHB_RTT_BAR: + case PHB_RBA_BAR: + case PHB_IVT_BAR: + case PHB_FFI_LOCK: + case PHB_LEM_FIR_ACCUM: + case PHB_LEM_ERROR_MASK: + case PHB_LEM_ACTION0: + case PHB_LEM_ACTION1: + break; + + /* Noise on anything else */ + default: + qemu_log_mask(LOG_UNIMP, "phb3: reg_write 0x%"PRIx64"=%"PRIx64"\n", + off, val); + } +} + +uint64_t pnv_phb3_reg_read(void *opaque, hwaddr off, unsigned size) +{ + PnvPHB3 *phb = opaque; + PCIHostState *pci = PCI_HOST_BRIDGE(phb); + uint64_t val; + + if ((off & 0xfffc) == PHB_CONFIG_DATA) { + return pnv_phb3_config_read(phb, off & 0x3, size); + } + + /* Other registers are 64-bit only */ + if (size != 8 || off & 0x7) { + phb3_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", + off, size); + return ~0ull; + } + + /* Default read from cache */ + val = phb->regs[off >> 3]; + + switch (off) { + /* Simulate venice DD2.0 */ + case PHB_VERSION: + return 0x000000a300000005ull; + case PHB_PCIE_SYSTEM_CONFIG: + return 0x441100fc30000000; + + /* IODA table accesses */ + case PHB_IODA_DATA0: + return pnv_phb3_ioda_read(phb); + + /* Link training always appears trained */ + case PHB_PCIE_DLP_TRAIN_CTL: + if (!pci_find_device(pci->bus, 1, 0)) { + return 0; + } + return PHB_PCIE_DLP_INBAND_PRESENCE | PHB_PCIE_DLP_TC_DL_LINKACT; + + /* FFI Lock */ + case PHB_FFI_LOCK: + /* Set lock and return previous value */ + phb->regs[off >> 3] |= PHB_FFI_LOCK_STATE; + return val; + + /* DMA read sync: make it look like it's complete */ + case PHB_DMARD_SYNC: + return PHB_DMARD_SYNC_COMPLETE; + + /* Silent simple reads */ + case PHB_PHB3_CONFIG: + case PHB_M32_BASE_ADDR: + case PHB_M32_BASE_MASK: + case PHB_M32_START_ADDR: + case PHB_CONFIG_ADDRESS: + case PHB_IODA_ADDR: + case PHB_RTC_INVALIDATE: + case PHB_TCE_KILL: + case PHB_TCE_SPEC_CTL: + case PHB_PEST_BAR: + case PHB_PELTV_BAR: + case PHB_RTT_BAR: + case PHB_RBA_BAR: + case PHB_IVT_BAR: + case PHB_M64_UPPER_BITS: + case PHB_LEM_FIR_ACCUM: + case PHB_LEM_ERROR_MASK: + case PHB_LEM_ACTION0: + case PHB_LEM_ACTION1: + break; + + /* Noise on anything else */ + default: + qemu_log_mask(LOG_UNIMP, "phb3: reg_read 0x%"PRIx64"=%"PRIx64"\n", + off, val); + } + return val; +} + +static const MemoryRegionOps pnv_phb3_reg_ops = { + .read = pnv_phb3_reg_read, + .write = pnv_phb3_reg_write, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .impl.min_access_size = 1, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static int pnv_phb3_map_irq(PCIDevice *pci_dev, int irq_num) +{ + /* Check that out properly ... */ + return irq_num & 3; +} + +static void pnv_phb3_set_irq(void *opaque, int irq_num, int level) +{ + PnvPHB3 *phb = opaque; + + /* LSI only ... */ + if (irq_num > 3) { + phb3_error(phb, "Unknown IRQ to set %d", irq_num); + } + qemu_set_irq(phb->qirqs[irq_num], level); +} + +static bool pnv_phb3_resolve_pe(PnvPhb3DMASpace *ds) +{ + uint64_t rtt, addr; + uint16_t rte; + int bus_num; + + /* Already resolved ? */ + if (ds->pe_num != PHB_INVALID_PE) { + return true; + } + + /* We need to lookup the RTT */ + rtt = ds->phb->regs[PHB_RTT_BAR >> 3]; + if (!(rtt & PHB_RTT_BAR_ENABLE)) { + phb3_error(ds->phb, "DMA with RTT BAR disabled !"); + /* Set error bits ? fence ? ... */ + return false; + } + + /* Read RTE */ + bus_num = pci_bus_num(ds->bus); + addr = rtt & PHB_RTT_BASE_ADDRESS_MASK; + addr += 2 * ((bus_num << 8) | ds->devfn); + if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) { + phb3_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr); + /* Set error bits ? fence ? ... */ + return false; + } + rte = be16_to_cpu(rte); + + /* Fail upon reading of invalid PE# */ + if (rte >= PNV_PHB3_NUM_PE) { + phb3_error(ds->phb, "RTE for RID 0x%x invalid (%04x", ds->devfn, rte); + /* Set error bits ? fence ? ... */ + return false; + } + ds->pe_num = rte; + return true; +} + +static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr, + bool is_write, uint64_t tve, + IOMMUTLBEntry *tlb) +{ + uint64_t tta = GETFIELD(IODA2_TVT_TABLE_ADDR, tve); + int32_t lev = GETFIELD(IODA2_TVT_NUM_LEVELS, tve); + uint32_t tts = GETFIELD(IODA2_TVT_TCE_TABLE_SIZE, tve); + uint32_t tps = GETFIELD(IODA2_TVT_IO_PSIZE, tve); + PnvPHB3 *phb = ds->phb; + + /* Invalid levels */ + if (lev > 4) { + phb3_error(phb, "Invalid #levels in TVE %d", lev); + return; + } + + /* IO Page Size of 0 means untranslated, else use TCEs */ + if (tps == 0) { + /* + * We only support non-translate in top window. + * + * TODO: Venice/Murano support it on bottom window above 4G and + * Naples suports it on everything + */ + if (!(tve & PPC_BIT(51))) { + phb3_error(phb, "xlate for invalid non-translate TVE"); + return; + } + /* TODO: Handle boundaries */ + + /* Use 4k pages like q35 ... for now */ + tlb->iova = addr & 0xfffffffffffff000ull; + tlb->translated_addr = addr & 0x0003fffffffff000ull; + tlb->addr_mask = 0xfffull; + tlb->perm = IOMMU_RW; + } else { + uint32_t tce_shift, tbl_shift, sh; + uint64_t base, taddr, tce, tce_mask; + + /* TVE disabled ? */ + if (tts == 0) { + phb3_error(phb, "xlate for invalid translated TVE"); + return; + } + + /* Address bits per bottom level TCE entry */ + tce_shift = tps + 11; + + /* Address bits per table level */ + tbl_shift = tts + 8; + + /* Top level table base address */ + base = tta << 12; + + /* Total shift to first level */ + sh = tbl_shift * lev + tce_shift; + + /* TODO: Multi-level untested */ + while ((lev--) >= 0) { + /* Grab the TCE address */ + taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3); + if (dma_memory_read(&address_space_memory, taddr, &tce, + sizeof(tce))) { + phb3_error(phb, "Failed to read TCE at 0x%"PRIx64, taddr); + return; + } + tce = be64_to_cpu(tce); + + /* Check permission for indirect TCE */ + if ((lev >= 0) && !(tce & 3)) { + phb3_error(phb, "Invalid indirect TCE at 0x%"PRIx64, taddr); + phb3_error(phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, + is_write ? 'W' : 'R', tve); + phb3_error(phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", + tta, lev, tts, tps); + return; + } + sh -= tbl_shift; + base = tce & ~0xfffull; + } + + /* We exit the loop with TCE being the final TCE */ + tce_mask = ~((1ull << tce_shift) - 1); + tlb->iova = addr & tce_mask; + tlb->translated_addr = tce & tce_mask; + tlb->addr_mask = ~tce_mask; + tlb->perm = tce & 3; + if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) { + phb3_error(phb, "TCE access fault at 0x%"PRIx64, taddr); + phb3_error(phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, + is_write ? 'W' : 'R', tve); + phb3_error(phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", + tta, lev, tts, tps); + } + } +} + +static IOMMUTLBEntry pnv_phb3_translate_iommu(IOMMUMemoryRegion *iommu, + hwaddr addr, + IOMMUAccessFlags flag, + int iommu_idx) +{ + PnvPhb3DMASpace *ds = container_of(iommu, PnvPhb3DMASpace, dma_mr); + int tve_sel; + uint64_t tve, cfg; + IOMMUTLBEntry ret = { + .target_as = &address_space_memory, + .iova = addr, + .translated_addr = 0, + .addr_mask = ~(hwaddr)0, + .perm = IOMMU_NONE, + }; + PnvPHB3 *phb = ds->phb; + + /* Resolve PE# */ + if (!pnv_phb3_resolve_pe(ds)) { + phb3_error(phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", + ds->bus, pci_bus_num(ds->bus), ds->devfn); + return ret; + } + + /* Check top bits */ + switch (addr >> 60) { + case 00: + /* DMA or 32-bit MSI ? */ + cfg = ds->phb->regs[PHB_PHB3_CONFIG >> 3]; + if ((cfg & PHB_PHB3C_32BIT_MSI_EN) && + ((addr & 0xffffffffffff0000ull) == 0xffff0000ull)) { + phb3_error(phb, "xlate on 32-bit MSI region"); + return ret; + } + /* Choose TVE XXX Use PHB3 Control Register */ + tve_sel = (addr >> 59) & 1; + tve = ds->phb->ioda_TVT[ds->pe_num * 2 + tve_sel]; + pnv_phb3_translate_tve(ds, addr, flag & IOMMU_WO, tve, &ret); + break; + case 01: + phb3_error(phb, "xlate on 64-bit MSI region"); + break; + default: + phb3_error(phb, "xlate on unsupported address 0x%"PRIx64, addr); + } + return ret; +} + +#define TYPE_PNV_PHB3_IOMMU_MEMORY_REGION "pnv-phb3-iommu-memory-region" +DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion, PNV_PHB3_IOMMU_MEMORY_REGION, + TYPE_PNV_PHB3_IOMMU_MEMORY_REGION) + +static void pnv_phb3_iommu_memory_region_class_init(ObjectClass *klass, + void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = pnv_phb3_translate_iommu; +} + +static const TypeInfo pnv_phb3_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_PNV_PHB3_IOMMU_MEMORY_REGION, + .class_init = pnv_phb3_iommu_memory_region_class_init, +}; + +/* + * MSI/MSIX memory region implementation. + * The handler handles both MSI and MSIX. + */ +static void pnv_phb3_msi_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + PnvPhb3DMASpace *ds = opaque; + + /* Resolve PE# */ + if (!pnv_phb3_resolve_pe(ds)) { + phb3_error(ds->phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", + ds->bus, pci_bus_num(ds->bus), ds->devfn); + return; + } + + pnv_phb3_msi_send(&ds->phb->msis, addr, data, ds->pe_num); +} + +/* There is no .read as the read result is undefined by PCI spec */ +static uint64_t pnv_phb3_msi_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvPhb3DMASpace *ds = opaque; + + phb3_error(ds->phb, "invalid read @ 0x%" HWADDR_PRIx, addr); + return -1; +} + +static const MemoryRegionOps pnv_phb3_msi_ops = { + .read = pnv_phb3_msi_read, + .write = pnv_phb3_msi_write, + .endianness = DEVICE_LITTLE_ENDIAN +}; + +static AddressSpace *pnv_phb3_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + PnvPHB3 *phb = opaque; + PnvPhb3DMASpace *ds; + + QLIST_FOREACH(ds, &phb->dma_spaces, list) { + if (ds->bus == bus && ds->devfn == devfn) { + break; + } + } + + if (ds == NULL) { + ds = g_malloc0(sizeof(PnvPhb3DMASpace)); + ds->bus = bus; + ds->devfn = devfn; + ds->pe_num = PHB_INVALID_PE; + ds->phb = phb; + memory_region_init_iommu(&ds->dma_mr, sizeof(ds->dma_mr), + TYPE_PNV_PHB3_IOMMU_MEMORY_REGION, + OBJECT(phb), "phb3_iommu", UINT64_MAX); + address_space_init(&ds->dma_as, MEMORY_REGION(&ds->dma_mr), + "phb3_iommu"); + memory_region_init_io(&ds->msi32_mr, OBJECT(phb), &pnv_phb3_msi_ops, + ds, "msi32", 0x10000); + memory_region_init_io(&ds->msi64_mr, OBJECT(phb), &pnv_phb3_msi_ops, + ds, "msi64", 0x100000); + pnv_phb3_update_msi_regions(ds); + + QLIST_INSERT_HEAD(&phb->dma_spaces, ds, list); + } + return &ds->dma_as; +} + +static void pnv_phb3_instance_init(Object *obj) +{ + PnvPHB3 *phb = PNV_PHB3(obj); + + QLIST_INIT(&phb->dma_spaces); + + /* LSI sources */ + object_initialize_child(obj, "lsi", &phb->lsis, TYPE_ICS); + + /* Default init ... will be fixed by HW inits */ + phb->lsis.offset = 0; + + /* MSI sources */ + object_initialize_child(obj, "msi", &phb->msis, TYPE_PHB3_MSI); + + /* Power Bus Common Queue */ + object_initialize_child(obj, "pbcq", &phb->pbcq, TYPE_PNV_PBCQ); + + /* Root Port */ + object_initialize_child(obj, "root", &phb->root, TYPE_PNV_PHB3_ROOT_PORT); + qdev_prop_set_int32(DEVICE(&phb->root), "addr", PCI_DEVFN(0, 0)); + qdev_prop_set_bit(DEVICE(&phb->root), "multifunction", false); +} + +static void pnv_phb3_realize(DeviceState *dev, Error **errp) +{ + PnvPHB3 *phb = PNV_PHB3(dev); + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); + int i; + + if (phb->phb_id >= PNV8_CHIP_PHB3_MAX) { + error_setg(errp, "invalid PHB index: %d", phb->phb_id); + return; + } + + /* LSI sources */ + object_property_set_link(OBJECT(&phb->lsis), "xics", OBJECT(pnv), + &error_abort); + object_property_set_int(OBJECT(&phb->lsis), "nr-irqs", PNV_PHB3_NUM_LSI, + &error_abort); + if (!qdev_realize(DEVICE(&phb->lsis), NULL, errp)) { + return; + } + + for (i = 0; i < phb->lsis.nr_irqs; i++) { + ics_set_irq_type(&phb->lsis, i, true); + } + + phb->qirqs = qemu_allocate_irqs(ics_set_irq, &phb->lsis, phb->lsis.nr_irqs); + + /* MSI sources */ + object_property_set_link(OBJECT(&phb->msis), "phb", OBJECT(phb), + &error_abort); + object_property_set_link(OBJECT(&phb->msis), "xics", OBJECT(pnv), + &error_abort); + object_property_set_int(OBJECT(&phb->msis), "nr-irqs", PHB3_MAX_MSI, + &error_abort); + if (!qdev_realize(DEVICE(&phb->msis), NULL, errp)) { + return; + } + + /* Power Bus Common Queue */ + object_property_set_link(OBJECT(&phb->pbcq), "phb", OBJECT(phb), + &error_abort); + if (!qdev_realize(DEVICE(&phb->pbcq), NULL, errp)) { + return; + } + + /* Controller Registers */ + memory_region_init_io(&phb->mr_regs, OBJECT(phb), &pnv_phb3_reg_ops, phb, + "phb3-regs", 0x1000); + + /* + * PHB3 doesn't support IO space. However, qemu gets very upset if + * we don't have an IO region to anchor IO BARs onto so we just + * initialize one which we never hook up to anything + */ + memory_region_init(&phb->pci_io, OBJECT(phb), "pci-io", 0x10000); + memory_region_init(&phb->pci_mmio, OBJECT(phb), "pci-mmio", + PCI_MMIO_TOTAL_SIZE); + + pci->bus = pci_register_root_bus(dev, "root-bus", + pnv_phb3_set_irq, pnv_phb3_map_irq, phb, + &phb->pci_mmio, &phb->pci_io, + 0, 4, TYPE_PNV_PHB3_ROOT_BUS); + + pci_setup_iommu(pci->bus, pnv_phb3_dma_iommu, phb); + + /* Add a single Root port */ + qdev_prop_set_uint8(DEVICE(&phb->root), "chassis", phb->chip_id); + qdev_prop_set_uint16(DEVICE(&phb->root), "slot", phb->phb_id); + qdev_realize(DEVICE(&phb->root), BUS(pci->bus), &error_fatal); +} + +void pnv_phb3_update_regions(PnvPHB3 *phb) +{ + PnvPBCQState *pbcq = &phb->pbcq; + + /* Unmap first always */ + if (memory_region_is_mapped(&phb->mr_regs)) { + memory_region_del_subregion(&pbcq->phbbar, &phb->mr_regs); + } + + /* Map registers if enabled */ + if (memory_region_is_mapped(&pbcq->phbbar)) { + /* TODO: We should use the PHB BAR 2 register but we don't ... */ + memory_region_add_subregion(&pbcq->phbbar, 0, &phb->mr_regs); + } + + /* Check/update m32 */ + if (memory_region_is_mapped(&phb->mr_m32)) { + pnv_phb3_check_m32(phb); + } + pnv_phb3_check_all_m64s(phb); +} + +static const char *pnv_phb3_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + PnvPHB3 *phb = PNV_PHB3(host_bridge); + + snprintf(phb->bus_path, sizeof(phb->bus_path), "00%02x:%02x", + phb->chip_id, phb->phb_id); + return phb->bus_path; +} + +static Property pnv_phb3_properties[] = { + DEFINE_PROP_UINT32("index", PnvPHB3, phb_id, 0), + DEFINE_PROP_UINT32("chip-id", PnvPHB3, chip_id, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_phb3_class_init(ObjectClass *klass, void *data) +{ + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + hc->root_bus_path = pnv_phb3_root_bus_path; + dc->realize = pnv_phb3_realize; + device_class_set_props(dc, pnv_phb3_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->user_creatable = false; +} + +static const TypeInfo pnv_phb3_type_info = { + .name = TYPE_PNV_PHB3, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_size = sizeof(PnvPHB3), + .class_init = pnv_phb3_class_init, + .instance_init = pnv_phb3_instance_init, +}; + +static void pnv_phb3_root_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + /* + * PHB3 has only a single root complex. Enforce the limit on the + * parent bus + */ + k->max_dev = 1; +} + +static const TypeInfo pnv_phb3_root_bus_info = { + .name = TYPE_PNV_PHB3_ROOT_BUS, + .parent = TYPE_PCIE_BUS, + .class_init = pnv_phb3_root_bus_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void pnv_phb3_root_port_realize(DeviceState *dev, Error **errp) +{ + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); + Error *local_err = NULL; + + rpc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } +} + +static void pnv_phb3_root_port_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); + + dc->desc = "IBM PHB3 PCIE Root Port"; + + device_class_set_parent_realize(dc, pnv_phb3_root_port_realize, + &rpc->parent_realize); + dc->user_creatable = false; + + k->vendor_id = PCI_VENDOR_ID_IBM; + k->device_id = 0x03dc; + k->revision = 0; + + rpc->exp_offset = 0x48; + rpc->aer_offset = 0x100; +} + +static const TypeInfo pnv_phb3_root_port_info = { + .name = TYPE_PNV_PHB3_ROOT_PORT, + .parent = TYPE_PCIE_ROOT_PORT, + .instance_size = sizeof(PnvPHB3RootPort), + .class_init = pnv_phb3_root_port_class_init, +}; + +static void pnv_phb3_register_types(void) +{ + type_register_static(&pnv_phb3_root_bus_info); + type_register_static(&pnv_phb3_root_port_info); + type_register_static(&pnv_phb3_type_info); + type_register_static(&pnv_phb3_iommu_memory_region_info); +} + +type_init(pnv_phb3_register_types) diff --git a/hw/pci-host/pnv_phb3_msi.c b/hw/pci-host/pnv_phb3_msi.c new file mode 100644 index 000000000..099d2092a --- /dev/null +++ b/hw/pci-host/pnv_phb3_msi.c @@ -0,0 +1,348 @@ +/* + * QEMU PowerPC PowerNV (POWER8) PHB3 model + * + * Copyright (c) 2014-2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "hw/pci-host/pnv_phb3_regs.h" +#include "hw/pci-host/pnv_phb3.h" +#include "hw/ppc/pnv.h" +#include "hw/pci/msi.h" +#include "monitor/monitor.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "sysemu/reset.h" + +static uint64_t phb3_msi_ive_addr(PnvPHB3 *phb, int srcno) +{ + uint64_t ivtbar = phb->regs[PHB_IVT_BAR >> 3]; + uint64_t phbctl = phb->regs[PHB_CONTROL >> 3]; + + if (!(ivtbar & PHB_IVT_BAR_ENABLE)) { + qemu_log_mask(LOG_GUEST_ERROR, "Failed access to disable IVT BAR !"); + return 0; + } + + if (srcno >= (ivtbar & PHB_IVT_LENGTH_MASK)) { + qemu_log_mask(LOG_GUEST_ERROR, "MSI out of bounds (%d vs 0x%"PRIx64")", + srcno, (uint64_t) (ivtbar & PHB_IVT_LENGTH_MASK)); + return 0; + } + + ivtbar &= PHB_IVT_BASE_ADDRESS_MASK; + + if (phbctl & PHB_CTRL_IVE_128_BYTES) { + return ivtbar + 128 * srcno; + } else { + return ivtbar + 16 * srcno; + } +} + +static bool phb3_msi_read_ive(PnvPHB3 *phb, int srcno, uint64_t *out_ive) +{ + uint64_t ive_addr, ive; + + ive_addr = phb3_msi_ive_addr(phb, srcno); + if (!ive_addr) { + return false; + } + + if (dma_memory_read(&address_space_memory, ive_addr, &ive, sizeof(ive))) { + qemu_log_mask(LOG_GUEST_ERROR, "Failed to read IVE at 0x%" PRIx64, + ive_addr); + return false; + } + *out_ive = be64_to_cpu(ive); + + return true; +} + +static void phb3_msi_set_p(Phb3MsiState *msi, int srcno, uint8_t gen) +{ + uint64_t ive_addr; + uint8_t p = 0x01 | (gen << 1); + + ive_addr = phb3_msi_ive_addr(msi->phb, srcno); + if (!ive_addr) { + return; + } + + if (dma_memory_write(&address_space_memory, ive_addr + 4, &p, 1)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Failed to write IVE (set P) at 0x%" PRIx64, ive_addr); + } +} + +static void phb3_msi_set_q(Phb3MsiState *msi, int srcno) +{ + uint64_t ive_addr; + uint8_t q = 0x01; + + ive_addr = phb3_msi_ive_addr(msi->phb, srcno); + if (!ive_addr) { + return; + } + + if (dma_memory_write(&address_space_memory, ive_addr + 5, &q, 1)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Failed to write IVE (set Q) at 0x%" PRIx64, ive_addr); + } +} + +static void phb3_msi_try_send(Phb3MsiState *msi, int srcno, bool force) +{ + ICSState *ics = ICS(msi); + uint64_t ive; + uint64_t server, prio, pq, gen; + + if (!phb3_msi_read_ive(msi->phb, srcno, &ive)) { + return; + } + + server = GETFIELD(IODA2_IVT_SERVER, ive); + prio = GETFIELD(IODA2_IVT_PRIORITY, ive); + if (!force) { + pq = GETFIELD(IODA2_IVT_Q, ive) | (GETFIELD(IODA2_IVT_P, ive) << 1); + } else { + pq = 0; + } + gen = GETFIELD(IODA2_IVT_GEN, ive); + + /* + * The low order 2 bits are the link pointer (Type II interrupts). + * Shift back to get a valid IRQ server. + */ + server >>= 2; + + switch (pq) { + case 0: /* 00 */ + if (prio == 0xff) { + /* Masked, set Q */ + phb3_msi_set_q(msi, srcno); + } else { + /* Enabled, set P and send */ + phb3_msi_set_p(msi, srcno, gen); + icp_irq(ics, server, srcno + ics->offset, prio); + } + break; + case 2: /* 10 */ + /* Already pending, set Q */ + phb3_msi_set_q(msi, srcno); + break; + case 1: /* 01 */ + case 3: /* 11 */ + default: + /* Just drop stuff if Q already set */ + break; + } +} + +static void phb3_msi_set_irq(void *opaque, int srcno, int val) +{ + Phb3MsiState *msi = PHB3_MSI(opaque); + + if (val) { + phb3_msi_try_send(msi, srcno, false); + } +} + + +void pnv_phb3_msi_send(Phb3MsiState *msi, uint64_t addr, uint16_t data, + int32_t dev_pe) +{ + ICSState *ics = ICS(msi); + uint64_t ive; + uint16_t pe; + uint32_t src = ((addr >> 4) & 0xffff) | (data & 0x1f); + + if (src >= ics->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "MSI %d out of bounds", src); + return; + } + if (dev_pe >= 0) { + if (!phb3_msi_read_ive(msi->phb, src, &ive)) { + return; + } + pe = GETFIELD(IODA2_IVT_PE, ive); + if (pe != dev_pe) { + qemu_log_mask(LOG_GUEST_ERROR, + "MSI %d send by PE#%d but assigned to PE#%d", + src, dev_pe, pe); + return; + } + } + qemu_irq_pulse(msi->qirqs[src]); +} + +void pnv_phb3_msi_ffi(Phb3MsiState *msi, uint64_t val) +{ + /* Emit interrupt */ + pnv_phb3_msi_send(msi, val, 0, -1); + + /* Clear FFI lock */ + msi->phb->regs[PHB_FFI_LOCK >> 3] = 0; +} + +static void phb3_msi_reject(ICSState *ics, uint32_t nr) +{ + Phb3MsiState *msi = PHB3_MSI(ics); + unsigned int srcno = nr - ics->offset; + unsigned int idx = srcno >> 6; + unsigned int bit = 1ull << (srcno & 0x3f); + + assert(srcno < PHB3_MAX_MSI); + + msi->rba[idx] |= bit; + msi->rba_sum |= (1u << idx); +} + +static void phb3_msi_resend(ICSState *ics) +{ + Phb3MsiState *msi = PHB3_MSI(ics); + unsigned int i, j; + + if (msi->rba_sum == 0) { + return; + } + + for (i = 0; i < 32; i++) { + if ((msi->rba_sum & (1u << i)) == 0) { + continue; + } + msi->rba_sum &= ~(1u << i); + for (j = 0; j < 64; j++) { + if ((msi->rba[i] & (1ull << j)) == 0) { + continue; + } + msi->rba[i] &= ~(1ull << j); + phb3_msi_try_send(msi, i * 64 + j, true); + } + } +} + +static void phb3_msi_reset(DeviceState *dev) +{ + Phb3MsiState *msi = PHB3_MSI(dev); + ICSStateClass *icsc = ICS_GET_CLASS(dev); + + icsc->parent_reset(dev); + + memset(msi->rba, 0, sizeof(msi->rba)); + msi->rba_sum = 0; +} + +static void phb3_msi_reset_handler(void *dev) +{ + phb3_msi_reset(dev); +} + +void pnv_phb3_msi_update_config(Phb3MsiState *msi, uint32_t base, + uint32_t count) +{ + ICSState *ics = ICS(msi); + + if (count > PHB3_MAX_MSI) { + count = PHB3_MAX_MSI; + } + ics->nr_irqs = count; + ics->offset = base; +} + +static void phb3_msi_realize(DeviceState *dev, Error **errp) +{ + Phb3MsiState *msi = PHB3_MSI(dev); + ICSState *ics = ICS(msi); + ICSStateClass *icsc = ICS_GET_CLASS(ics); + Error *local_err = NULL; + + assert(msi->phb); + + icsc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + msi->qirqs = qemu_allocate_irqs(phb3_msi_set_irq, msi, ics->nr_irqs); + + qemu_register_reset(phb3_msi_reset_handler, dev); +} + +static void phb3_msi_instance_init(Object *obj) +{ + Phb3MsiState *msi = PHB3_MSI(obj); + ICSState *ics = ICS(obj); + + object_property_add_link(obj, "phb", TYPE_PNV_PHB3, + (Object **)&msi->phb, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + + /* Will be overriden later */ + ics->offset = 0; +} + +static void phb3_msi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ICSStateClass *isc = ICS_CLASS(klass); + + device_class_set_parent_realize(dc, phb3_msi_realize, + &isc->parent_realize); + device_class_set_parent_reset(dc, phb3_msi_reset, + &isc->parent_reset); + + isc->reject = phb3_msi_reject; + isc->resend = phb3_msi_resend; +} + +static const TypeInfo phb3_msi_info = { + .name = TYPE_PHB3_MSI, + .parent = TYPE_ICS, + .instance_size = sizeof(Phb3MsiState), + .class_init = phb3_msi_class_init, + .class_size = sizeof(ICSStateClass), + .instance_init = phb3_msi_instance_init, +}; + +static void pnv_phb3_msi_register_types(void) +{ + type_register_static(&phb3_msi_info); +} + +type_init(pnv_phb3_msi_register_types); + +void pnv_phb3_msi_pic_print_info(Phb3MsiState *msi, Monitor *mon) +{ + ICSState *ics = ICS(msi); + int i; + + monitor_printf(mon, "ICS %4x..%4x %p\n", + ics->offset, ics->offset + ics->nr_irqs - 1, ics); + + for (i = 0; i < ics->nr_irqs; i++) { + uint64_t ive; + + if (!phb3_msi_read_ive(msi->phb, i, &ive)) { + return; + } + + if (GETFIELD(IODA2_IVT_PRIORITY, ive) == 0xff) { + continue; + } + + monitor_printf(mon, " %4x %c%c server=%04x prio=%02x gen=%d\n", + ics->offset + i, + GETFIELD(IODA2_IVT_P, ive) ? 'P' : '-', + GETFIELD(IODA2_IVT_Q, ive) ? 'Q' : '-', + (uint32_t) GETFIELD(IODA2_IVT_SERVER, ive) >> 2, + (uint32_t) GETFIELD(IODA2_IVT_PRIORITY, ive), + (uint32_t) GETFIELD(IODA2_IVT_GEN, ive)); + } +} diff --git a/hw/pci-host/pnv_phb3_pbcq.c b/hw/pci-host/pnv_phb3_pbcq.c new file mode 100644 index 000000000..a0526aa1e --- /dev/null +++ b/hw/pci-host/pnv_phb3_pbcq.c @@ -0,0 +1,358 @@ +/* + * QEMU PowerPC PowerNV (POWER8) PHB3 model + * + * Copyright (c) 2014-2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/log.h" +#include "target/ppc/cpu.h" +#include "hw/ppc/fdt.h" +#include "hw/pci-host/pnv_phb3_regs.h" +#include "hw/pci-host/pnv_phb3.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" + +#include + +#define phb3_pbcq_error(pbcq, fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "phb3_pbcq[%d:%d]: " fmt "\n", \ + (pbcq)->phb->chip_id, (pbcq)->phb->phb_id, ## __VA_ARGS__) + +static uint64_t pnv_pbcq_nest_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t offset = addr >> 3; + + return pbcq->nest_regs[offset]; +} + +static uint64_t pnv_pbcq_pci_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t offset = addr >> 3; + + return pbcq->pci_regs[offset]; +} + +static uint64_t pnv_pbcq_spci_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t offset = addr >> 3; + + if (offset == PBCQ_SPCI_ASB_DATA) { + return pnv_phb3_reg_read(pbcq->phb, + pbcq->spci_regs[PBCQ_SPCI_ASB_ADDR], 8); + } + return pbcq->spci_regs[offset]; +} + +static void pnv_pbcq_update_map(PnvPBCQState *pbcq) +{ + uint64_t bar_en = pbcq->nest_regs[PBCQ_NEST_BAR_EN]; + uint64_t bar, mask, size; + + /* + * NOTE: This will really not work well if those are remapped + * after the PHB has created its sub regions. We could do better + * if we had a way to resize regions but we don't really care + * that much in practice as the stuff below really only happens + * once early during boot + */ + + /* Handle unmaps */ + if (memory_region_is_mapped(&pbcq->mmbar0) && + !(bar_en & PBCQ_NEST_BAR_EN_MMIO0)) { + memory_region_del_subregion(get_system_memory(), &pbcq->mmbar0); + } + if (memory_region_is_mapped(&pbcq->mmbar1) && + !(bar_en & PBCQ_NEST_BAR_EN_MMIO1)) { + memory_region_del_subregion(get_system_memory(), &pbcq->mmbar1); + } + if (memory_region_is_mapped(&pbcq->phbbar) && + !(bar_en & PBCQ_NEST_BAR_EN_PHB)) { + memory_region_del_subregion(get_system_memory(), &pbcq->phbbar); + } + + /* Update PHB */ + pnv_phb3_update_regions(pbcq->phb); + + /* Handle maps */ + if (!memory_region_is_mapped(&pbcq->mmbar0) && + (bar_en & PBCQ_NEST_BAR_EN_MMIO0)) { + bar = pbcq->nest_regs[PBCQ_NEST_MMIO_BAR0] >> 14; + mask = pbcq->nest_regs[PBCQ_NEST_MMIO_MASK0]; + size = ((~mask) >> 14) + 1; + memory_region_init(&pbcq->mmbar0, OBJECT(pbcq), "pbcq-mmio0", size); + memory_region_add_subregion(get_system_memory(), bar, &pbcq->mmbar0); + pbcq->mmio0_base = bar; + pbcq->mmio0_size = size; + } + if (!memory_region_is_mapped(&pbcq->mmbar1) && + (bar_en & PBCQ_NEST_BAR_EN_MMIO1)) { + bar = pbcq->nest_regs[PBCQ_NEST_MMIO_BAR1] >> 14; + mask = pbcq->nest_regs[PBCQ_NEST_MMIO_MASK1]; + size = ((~mask) >> 14) + 1; + memory_region_init(&pbcq->mmbar1, OBJECT(pbcq), "pbcq-mmio1", size); + memory_region_add_subregion(get_system_memory(), bar, &pbcq->mmbar1); + pbcq->mmio1_base = bar; + pbcq->mmio1_size = size; + } + if (!memory_region_is_mapped(&pbcq->phbbar) + && (bar_en & PBCQ_NEST_BAR_EN_PHB)) { + bar = pbcq->nest_regs[PBCQ_NEST_PHB_BAR] >> 14; + size = 0x1000; + memory_region_init(&pbcq->phbbar, OBJECT(pbcq), "pbcq-phb", size); + memory_region_add_subregion(get_system_memory(), bar, &pbcq->phbbar); + } + + /* Update PHB */ + pnv_phb3_update_regions(pbcq->phb); +} + +static void pnv_pbcq_nest_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PBCQ_NEST_MMIO_BAR0: + case PBCQ_NEST_MMIO_BAR1: + case PBCQ_NEST_MMIO_MASK0: + case PBCQ_NEST_MMIO_MASK1: + if (pbcq->nest_regs[PBCQ_NEST_BAR_EN] & + (PBCQ_NEST_BAR_EN_MMIO0 | + PBCQ_NEST_BAR_EN_MMIO1)) { + phb3_pbcq_error(pbcq, "Changing enabled BAR unsupported"); + } + pbcq->nest_regs[reg] = val & 0xffffffffc0000000ull; + break; + case PBCQ_NEST_PHB_BAR: + if (pbcq->nest_regs[PBCQ_NEST_BAR_EN] & PBCQ_NEST_BAR_EN_PHB) { + phb3_pbcq_error(pbcq, "Changing enabled BAR unsupported"); + } + pbcq->nest_regs[reg] = val & 0xfffffffffc000000ull; + break; + case PBCQ_NEST_BAR_EN: + pbcq->nest_regs[reg] = val & 0xf800000000000000ull; + pnv_pbcq_update_map(pbcq); + pnv_phb3_remap_irqs(pbcq->phb); + break; + case PBCQ_NEST_IRSN_COMPARE: + case PBCQ_NEST_IRSN_MASK: + pbcq->nest_regs[reg] = val & PBCQ_NEST_IRSN_COMP; + pnv_phb3_remap_irqs(pbcq->phb); + break; + case PBCQ_NEST_LSI_SRC_ID: + pbcq->nest_regs[reg] = val & PBCQ_NEST_LSI_SRC; + pnv_phb3_remap_irqs(pbcq->phb); + break; + default: + phb3_pbcq_error(pbcq, "%s @0x%"HWADDR_PRIx"=%"PRIx64, __func__, + addr, val); + } +} + +static void pnv_pbcq_pci_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PBCQ_PCI_BAR2: + pbcq->pci_regs[reg] = val & 0xfffffffffc000000ull; + pnv_pbcq_update_map(pbcq); + break; + default: + phb3_pbcq_error(pbcq, "%s @0x%"HWADDR_PRIx"=%"PRIx64, __func__, + addr, val); + } +} + +static void pnv_pbcq_spci_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PBCQ_SPCI_ASB_ADDR: + pbcq->spci_regs[reg] = val & 0xfff; + break; + case PBCQ_SPCI_ASB_STATUS: + pbcq->spci_regs[reg] &= ~val; + break; + case PBCQ_SPCI_ASB_DATA: + pnv_phb3_reg_write(pbcq->phb, pbcq->spci_regs[PBCQ_SPCI_ASB_ADDR], + val, 8); + break; + case PBCQ_SPCI_AIB_CAPP_EN: + case PBCQ_SPCI_CAPP_SEC_TMR: + break; + default: + phb3_pbcq_error(pbcq, "%s @0x%"HWADDR_PRIx"=%"PRIx64, __func__, + addr, val); + } +} + +static const MemoryRegionOps pnv_pbcq_nest_xscom_ops = { + .read = pnv_pbcq_nest_xscom_read, + .write = pnv_pbcq_nest_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static const MemoryRegionOps pnv_pbcq_pci_xscom_ops = { + .read = pnv_pbcq_pci_xscom_read, + .write = pnv_pbcq_pci_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static const MemoryRegionOps pnv_pbcq_spci_xscom_ops = { + .read = pnv_pbcq_spci_xscom_read, + .write = pnv_pbcq_spci_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_pbcq_default_bars(PnvPBCQState *pbcq) +{ + uint64_t mm0, mm1, reg; + PnvPHB3 *phb = pbcq->phb; + + mm0 = 0x3d00000000000ull + 0x4000000000ull * phb->chip_id + + 0x1000000000ull * phb->phb_id; + mm1 = 0x3ff8000000000ull + 0x0200000000ull * phb->chip_id + + 0x0080000000ull * phb->phb_id; + reg = 0x3fffe40000000ull + 0x0000400000ull * phb->chip_id + + 0x0000100000ull * phb->phb_id; + + pbcq->nest_regs[PBCQ_NEST_MMIO_BAR0] = mm0 << 14; + pbcq->nest_regs[PBCQ_NEST_MMIO_BAR1] = mm1 << 14; + pbcq->nest_regs[PBCQ_NEST_PHB_BAR] = reg << 14; + pbcq->nest_regs[PBCQ_NEST_MMIO_MASK0] = 0x3fff000000000ull << 14; + pbcq->nest_regs[PBCQ_NEST_MMIO_MASK1] = 0x3ffff80000000ull << 14; + pbcq->pci_regs[PBCQ_PCI_BAR2] = reg << 14; +} + +static void pnv_pbcq_realize(DeviceState *dev, Error **errp) +{ + PnvPBCQState *pbcq = PNV_PBCQ(dev); + PnvPHB3 *phb; + char name[32]; + + assert(pbcq->phb); + phb = pbcq->phb; + + /* TODO: Fix OPAL to do that: establish default BAR values */ + pnv_pbcq_default_bars(pbcq); + + /* Initialize the XSCOM region for the PBCQ registers */ + snprintf(name, sizeof(name), "xscom-pbcq-nest-%d.%d", + phb->chip_id, phb->phb_id); + pnv_xscom_region_init(&pbcq->xscom_nest_regs, OBJECT(dev), + &pnv_pbcq_nest_xscom_ops, pbcq, name, + PNV_XSCOM_PBCQ_NEST_SIZE); + snprintf(name, sizeof(name), "xscom-pbcq-pci-%d.%d", + phb->chip_id, phb->phb_id); + pnv_xscom_region_init(&pbcq->xscom_pci_regs, OBJECT(dev), + &pnv_pbcq_pci_xscom_ops, pbcq, name, + PNV_XSCOM_PBCQ_PCI_SIZE); + snprintf(name, sizeof(name), "xscom-pbcq-spci-%d.%d", + phb->chip_id, phb->phb_id); + pnv_xscom_region_init(&pbcq->xscom_spci_regs, OBJECT(dev), + &pnv_pbcq_spci_xscom_ops, pbcq, name, + PNV_XSCOM_PBCQ_SPCI_SIZE); +} + +static int pnv_pbcq_dt_xscom(PnvXScomInterface *dev, void *fdt, + int xscom_offset) +{ + const char compat[] = "ibm,power8-pbcq"; + PnvPHB3 *phb = PNV_PBCQ(dev)->phb; + char *name; + int offset; + uint32_t lpc_pcba = PNV_XSCOM_PBCQ_NEST_BASE + 0x400 * phb->phb_id; + uint32_t reg[] = { + cpu_to_be32(lpc_pcba), + cpu_to_be32(PNV_XSCOM_PBCQ_NEST_SIZE), + cpu_to_be32(PNV_XSCOM_PBCQ_PCI_BASE + 0x400 * phb->phb_id), + cpu_to_be32(PNV_XSCOM_PBCQ_PCI_SIZE), + cpu_to_be32(PNV_XSCOM_PBCQ_SPCI_BASE + 0x040 * phb->phb_id), + cpu_to_be32(PNV_XSCOM_PBCQ_SPCI_SIZE) + }; + + name = g_strdup_printf("pbcq@%x", lpc_pcba); + offset = fdt_add_subnode(fdt, xscom_offset, name); + _FDT(offset); + g_free(name); + + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); + + _FDT((fdt_setprop_cell(fdt, offset, "ibm,phb-index", phb->phb_id))); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", phb->chip_id))); + _FDT((fdt_setprop(fdt, offset, "compatible", compat, + sizeof(compat)))); + return 0; +} + +static void phb3_pbcq_instance_init(Object *obj) +{ + PnvPBCQState *pbcq = PNV_PBCQ(obj); + + object_property_add_link(obj, "phb", TYPE_PNV_PHB3, + (Object **)&pbcq->phb, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); +} + +static void pnv_pbcq_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); + + xdc->dt_xscom = pnv_pbcq_dt_xscom; + + dc->realize = pnv_pbcq_realize; + dc->user_creatable = false; +} + +static const TypeInfo pnv_pbcq_type_info = { + .name = TYPE_PNV_PBCQ, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvPBCQState), + .instance_init = phb3_pbcq_instance_init, + .class_init = pnv_pbcq_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_pbcq_register_types(void) +{ + type_register_static(&pnv_pbcq_type_info); +} + +type_init(pnv_pbcq_register_types) diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c new file mode 100644 index 000000000..5c375a9f2 --- /dev/null +++ b/hw/pci-host/pnv_phb4.c @@ -0,0 +1,1437 @@ +/* + * QEMU PowerPC PowerNV (POWER9) PHB4 model + * + * Copyright (c) 2018-2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/visitor.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "monitor/monitor.h" +#include "target/ppc/cpu.h" +#include "hw/pci-host/pnv_phb4_regs.h" +#include "hw/pci-host/pnv_phb4.h" +#include "hw/pci/pcie_host.h" +#include "hw/pci/pcie_port.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" +#include "trace.h" + +#define phb_error(phb, fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "phb4[%d:%d]: " fmt "\n", \ + (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__) + +/* + * QEMU version of the GETFIELD/SETFIELD macros + * + * These are common with the PnvXive model. + */ +static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) +{ + return (word & mask) >> ctz64(mask); +} + +static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, + uint64_t value) +{ + return (word & ~mask) | ((value << ctz64(mask)) & mask); +} + +static PCIDevice *pnv_phb4_find_cfg_dev(PnvPHB4 *phb) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(phb); + uint64_t addr = phb->regs[PHB_CONFIG_ADDRESS >> 3]; + uint8_t bus, devfn; + + if (!(addr >> 63)) { + return NULL; + } + bus = (addr >> 52) & 0xff; + devfn = (addr >> 44) & 0xff; + + /* We don't access the root complex this way */ + if (bus == 0 && devfn == 0) { + return NULL; + } + return pci_find_device(pci->bus, bus, devfn); +} + +/* + * The CONFIG_DATA register expects little endian accesses, but as the + * region is big endian, we have to swap the value. + */ +static void pnv_phb4_config_write(PnvPHB4 *phb, unsigned off, + unsigned size, uint64_t val) +{ + uint32_t cfg_addr, limit; + PCIDevice *pdev; + + pdev = pnv_phb4_find_cfg_dev(phb); + if (!pdev) { + return; + } + cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; + cfg_addr |= off; + limit = pci_config_size(pdev); + if (limit <= cfg_addr) { + /* + * conventional pci device can be behind pcie-to-pci bridge. + * 256 <= addr < 4K has no effects. + */ + return; + } + switch (size) { + case 1: + break; + case 2: + val = bswap16(val); + break; + case 4: + val = bswap32(val); + break; + default: + g_assert_not_reached(); + } + pci_host_config_write_common(pdev, cfg_addr, limit, val, size); +} + +static uint64_t pnv_phb4_config_read(PnvPHB4 *phb, unsigned off, + unsigned size) +{ + uint32_t cfg_addr, limit; + PCIDevice *pdev; + uint64_t val; + + pdev = pnv_phb4_find_cfg_dev(phb); + if (!pdev) { + return ~0ull; + } + cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; + cfg_addr |= off; + limit = pci_config_size(pdev); + if (limit <= cfg_addr) { + /* + * conventional pci device can be behind pcie-to-pci bridge. + * 256 <= addr < 4K has no effects. + */ + return ~0ull; + } + val = pci_host_config_read_common(pdev, cfg_addr, limit, size); + switch (size) { + case 1: + return val; + case 2: + return bswap16(val); + case 4: + return bswap32(val); + default: + g_assert_not_reached(); + } +} + +/* + * Root complex register accesses are memory mapped. + */ +static void pnv_phb4_rc_config_write(PnvPHB4 *phb, unsigned off, + unsigned size, uint64_t val) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(phb); + PCIDevice *pdev; + + if (size != 4) { + phb_error(phb, "rc_config_write invalid size %d\n", size); + return; + } + + pdev = pci_find_device(pci->bus, 0, 0); + assert(pdev); + + pci_host_config_write_common(pdev, off, PHB_RC_CONFIG_SIZE, + bswap32(val), 4); +} + +static uint64_t pnv_phb4_rc_config_read(PnvPHB4 *phb, unsigned off, + unsigned size) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(phb); + PCIDevice *pdev; + uint64_t val; + + if (size != 4) { + phb_error(phb, "rc_config_read invalid size %d\n", size); + return ~0ull; + } + + pdev = pci_find_device(pci->bus, 0, 0); + assert(pdev); + + val = pci_host_config_read_common(pdev, off, PHB_RC_CONFIG_SIZE, 4); + return bswap32(val); +} + +static void pnv_phb4_check_mbt(PnvPHB4 *phb, uint32_t index) +{ + uint64_t base, start, size, mbe0, mbe1; + MemoryRegion *parent; + char name[64]; + + /* Unmap first */ + if (memory_region_is_mapped(&phb->mr_mmio[index])) { + /* Should we destroy it in RCU friendly way... ? */ + memory_region_del_subregion(phb->mr_mmio[index].container, + &phb->mr_mmio[index]); + } + + /* Get table entry */ + mbe0 = phb->ioda_MBT[(index << 1)]; + mbe1 = phb->ioda_MBT[(index << 1) + 1]; + + if (!(mbe0 & IODA3_MBT0_ENABLE)) { + return; + } + + /* Grab geometry from registers */ + base = GETFIELD(IODA3_MBT0_BASE_ADDR, mbe0) << 12; + size = GETFIELD(IODA3_MBT1_MASK, mbe1) << 12; + size |= 0xff00000000000000ull; + size = ~size + 1; + + /* Calculate PCI side start address based on M32/M64 window type */ + if (mbe0 & IODA3_MBT0_TYPE_M32) { + start = phb->regs[PHB_M32_START_ADDR >> 3]; + if ((start + size) > 0x100000000ull) { + phb_error(phb, "M32 set beyond 4GB boundary !"); + size = 0x100000000 - start; + } + } else { + start = base | (phb->regs[PHB_M64_UPPER_BITS >> 3]); + } + + /* TODO: Figure out how to implemet/decode AOMASK */ + + /* Check if it matches an enabled MMIO region in the PEC stack */ + if (memory_region_is_mapped(&phb->stack->mmbar0) && + base >= phb->stack->mmio0_base && + (base + size) <= (phb->stack->mmio0_base + phb->stack->mmio0_size)) { + parent = &phb->stack->mmbar0; + base -= phb->stack->mmio0_base; + } else if (memory_region_is_mapped(&phb->stack->mmbar1) && + base >= phb->stack->mmio1_base && + (base + size) <= (phb->stack->mmio1_base + phb->stack->mmio1_size)) { + parent = &phb->stack->mmbar1; + base -= phb->stack->mmio1_base; + } else { + phb_error(phb, "PHB MBAR %d out of parent bounds", index); + return; + } + + /* Create alias (better name ?) */ + snprintf(name, sizeof(name), "phb4-mbar%d", index); + memory_region_init_alias(&phb->mr_mmio[index], OBJECT(phb), name, + &phb->pci_mmio, start, size); + memory_region_add_subregion(parent, base, &phb->mr_mmio[index]); +} + +static void pnv_phb4_check_all_mbt(PnvPHB4 *phb) +{ + uint64_t i; + uint32_t num_windows = phb->big_phb ? PNV_PHB4_MAX_MMIO_WINDOWS : + PNV_PHB4_MIN_MMIO_WINDOWS; + + for (i = 0; i < num_windows; i++) { + pnv_phb4_check_mbt(phb, i); + } +} + +static uint64_t *pnv_phb4_ioda_access(PnvPHB4 *phb, + unsigned *out_table, unsigned *out_idx) +{ + uint64_t adreg = phb->regs[PHB_IODA_ADDR >> 3]; + unsigned int index = GETFIELD(PHB_IODA_AD_TADR, adreg); + unsigned int table = GETFIELD(PHB_IODA_AD_TSEL, adreg); + unsigned int mask; + uint64_t *tptr = NULL; + + switch (table) { + case IODA3_TBL_LIST: + tptr = phb->ioda_LIST; + mask = 7; + break; + case IODA3_TBL_MIST: + tptr = phb->ioda_MIST; + mask = phb->big_phb ? PNV_PHB4_MAX_MIST : (PNV_PHB4_MAX_MIST >> 1); + mask -= 1; + break; + case IODA3_TBL_RCAM: + mask = phb->big_phb ? 127 : 63; + break; + case IODA3_TBL_MRT: + mask = phb->big_phb ? 15 : 7; + break; + case IODA3_TBL_PESTA: + case IODA3_TBL_PESTB: + mask = phb->big_phb ? PNV_PHB4_MAX_PEs : (PNV_PHB4_MAX_PEs >> 1); + mask -= 1; + break; + case IODA3_TBL_TVT: + tptr = phb->ioda_TVT; + mask = phb->big_phb ? PNV_PHB4_MAX_TVEs : (PNV_PHB4_MAX_TVEs >> 1); + mask -= 1; + break; + case IODA3_TBL_TCR: + case IODA3_TBL_TDR: + mask = phb->big_phb ? 1023 : 511; + break; + case IODA3_TBL_MBT: + tptr = phb->ioda_MBT; + mask = phb->big_phb ? PNV_PHB4_MAX_MBEs : (PNV_PHB4_MAX_MBEs >> 1); + mask -= 1; + break; + case IODA3_TBL_MDT: + tptr = phb->ioda_MDT; + mask = phb->big_phb ? PNV_PHB4_MAX_PEs : (PNV_PHB4_MAX_PEs >> 1); + mask -= 1; + break; + case IODA3_TBL_PEEV: + tptr = phb->ioda_PEEV; + mask = phb->big_phb ? PNV_PHB4_MAX_PEEVs : (PNV_PHB4_MAX_PEEVs >> 1); + mask -= 1; + break; + default: + phb_error(phb, "invalid IODA table %d", table); + return NULL; + } + index &= mask; + if (out_idx) { + *out_idx = index; + } + if (out_table) { + *out_table = table; + } + if (tptr) { + tptr += index; + } + if (adreg & PHB_IODA_AD_AUTOINC) { + index = (index + 1) & mask; + adreg = SETFIELD(PHB_IODA_AD_TADR, adreg, index); + } + + phb->regs[PHB_IODA_ADDR >> 3] = adreg; + return tptr; +} + +static uint64_t pnv_phb4_ioda_read(PnvPHB4 *phb) +{ + unsigned table, idx; + uint64_t *tptr; + + tptr = pnv_phb4_ioda_access(phb, &table, &idx); + if (!tptr) { + /* Special PESTA case */ + if (table == IODA3_TBL_PESTA) { + return ((uint64_t)(phb->ioda_PEST_AB[idx] & 1)) << 63; + } else if (table == IODA3_TBL_PESTB) { + return ((uint64_t)(phb->ioda_PEST_AB[idx] & 2)) << 62; + } + /* Return 0 on unsupported tables, not ff's */ + return 0; + } + return *tptr; +} + +static void pnv_phb4_ioda_write(PnvPHB4 *phb, uint64_t val) +{ + unsigned table, idx; + uint64_t *tptr; + + tptr = pnv_phb4_ioda_access(phb, &table, &idx); + if (!tptr) { + /* Special PESTA case */ + if (table == IODA3_TBL_PESTA) { + phb->ioda_PEST_AB[idx] &= ~1; + phb->ioda_PEST_AB[idx] |= (val >> 63) & 1; + } else if (table == IODA3_TBL_PESTB) { + phb->ioda_PEST_AB[idx] &= ~2; + phb->ioda_PEST_AB[idx] |= (val >> 62) & 2; + } + return; + } + + /* Handle side effects */ + switch (table) { + case IODA3_TBL_LIST: + break; + case IODA3_TBL_MIST: { + /* Special mask for MIST partial write */ + uint64_t adreg = phb->regs[PHB_IODA_ADDR >> 3]; + uint32_t mmask = GETFIELD(PHB_IODA_AD_MIST_PWV, adreg); + uint64_t v = *tptr; + if (mmask == 0) { + mmask = 0xf; + } + if (mmask & 8) { + v &= 0x0000ffffffffffffull; + v |= 0xcfff000000000000ull & val; + } + if (mmask & 4) { + v &= 0xffff0000ffffffffull; + v |= 0x0000cfff00000000ull & val; + } + if (mmask & 2) { + v &= 0xffffffff0000ffffull; + v |= 0x00000000cfff0000ull & val; + } + if (mmask & 1) { + v &= 0xffffffffffff0000ull; + v |= 0x000000000000cfffull & val; + } + *tptr = v; + break; + } + case IODA3_TBL_MBT: + *tptr = val; + + /* Copy accross the valid bit to the other half */ + phb->ioda_MBT[idx ^ 1] &= 0x7fffffffffffffffull; + phb->ioda_MBT[idx ^ 1] |= 0x8000000000000000ull & val; + + /* Update mappings */ + pnv_phb4_check_mbt(phb, idx >> 1); + break; + default: + *tptr = val; + } +} + +static void pnv_phb4_rtc_invalidate(PnvPHB4 *phb, uint64_t val) +{ + PnvPhb4DMASpace *ds; + + /* Always invalidate all for now ... */ + QLIST_FOREACH(ds, &phb->dma_spaces, list) { + ds->pe_num = PHB_INVALID_PE; + } +} + +static void pnv_phb4_update_msi_regions(PnvPhb4DMASpace *ds) +{ + uint64_t cfg = ds->phb->regs[PHB_PHB4_CONFIG >> 3]; + + if (cfg & PHB_PHB4C_32BIT_MSI_EN) { + if (!memory_region_is_mapped(MEMORY_REGION(&ds->msi32_mr))) { + memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), + 0xffff0000, &ds->msi32_mr); + } + } else { + if (memory_region_is_mapped(MEMORY_REGION(&ds->msi32_mr))) { + memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), + &ds->msi32_mr); + } + } + + if (cfg & PHB_PHB4C_64BIT_MSI_EN) { + if (!memory_region_is_mapped(MEMORY_REGION(&ds->msi64_mr))) { + memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), + (1ull << 60), &ds->msi64_mr); + } + } else { + if (memory_region_is_mapped(MEMORY_REGION(&ds->msi64_mr))) { + memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), + &ds->msi64_mr); + } + } +} + +static void pnv_phb4_update_all_msi_regions(PnvPHB4 *phb) +{ + PnvPhb4DMASpace *ds; + + QLIST_FOREACH(ds, &phb->dma_spaces, list) { + pnv_phb4_update_msi_regions(ds); + } +} + +static void pnv_phb4_update_xsrc(PnvPHB4 *phb) +{ + int shift, flags, i, lsi_base; + XiveSource *xsrc = &phb->xsrc; + + /* The XIVE source characteristics can be set at run time */ + if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_PGSZ_64K) { + shift = XIVE_ESB_64K; + } else { + shift = XIVE_ESB_4K; + } + if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_STORE_EOI) { + flags = XIVE_SRC_STORE_EOI; + } else { + flags = 0; + } + + phb->xsrc.esb_shift = shift; + phb->xsrc.esb_flags = flags; + + lsi_base = GETFIELD(PHB_LSI_SRC_ID, phb->regs[PHB_LSI_SOURCE_ID >> 3]); + lsi_base <<= 3; + + /* TODO: handle reset values of PHB_LSI_SRC_ID */ + if (!lsi_base) { + return; + } + + /* TODO: need a xive_source_irq_reset_lsi() */ + bitmap_zero(xsrc->lsi_map, xsrc->nr_irqs); + + for (i = 0; i < xsrc->nr_irqs; i++) { + bool msi = (i < lsi_base || i >= (lsi_base + 8)); + if (!msi) { + xive_source_irq_set_lsi(xsrc, i); + } + } +} + +static void pnv_phb4_reg_write(void *opaque, hwaddr off, uint64_t val, + unsigned size) +{ + PnvPHB4 *phb = PNV_PHB4(opaque); + bool changed; + + /* Special case outbound configuration data */ + if ((off & 0xfffc) == PHB_CONFIG_DATA) { + pnv_phb4_config_write(phb, off & 0x3, size, val); + return; + } + + /* Special case RC configuration space */ + if ((off & 0xf800) == PHB_RC_CONFIG_BASE) { + pnv_phb4_rc_config_write(phb, off & 0x7ff, size, val); + return; + } + + /* Other registers are 64-bit only */ + if (size != 8 || off & 0x7) { + phb_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", + off, size); + return; + } + + /* Handle masking */ + switch (off) { + case PHB_LSI_SOURCE_ID: + val &= PHB_LSI_SRC_ID; + break; + case PHB_M64_UPPER_BITS: + val &= 0xff00000000000000ull; + break; + /* TCE Kill */ + case PHB_TCE_KILL: + /* Clear top 3 bits which HW does to indicate successful queuing */ + val &= ~(PHB_TCE_KILL_ALL | PHB_TCE_KILL_PE | PHB_TCE_KILL_ONE); + break; + case PHB_Q_DMA_R: + /* + * This is enough logic to make SW happy but we aren't + * actually quiescing the DMAs + */ + if (val & PHB_Q_DMA_R_AUTORESET) { + val = 0; + } else { + val &= PHB_Q_DMA_R_QUIESCE_DMA; + } + break; + /* LEM stuff */ + case PHB_LEM_FIR_AND_MASK: + phb->regs[PHB_LEM_FIR_ACCUM >> 3] &= val; + return; + case PHB_LEM_FIR_OR_MASK: + phb->regs[PHB_LEM_FIR_ACCUM >> 3] |= val; + return; + case PHB_LEM_ERROR_AND_MASK: + phb->regs[PHB_LEM_ERROR_MASK >> 3] &= val; + return; + case PHB_LEM_ERROR_OR_MASK: + phb->regs[PHB_LEM_ERROR_MASK >> 3] |= val; + return; + case PHB_LEM_WOF: + val = 0; + break; + /* TODO: More regs ..., maybe create a table with masks... */ + + /* Read only registers */ + case PHB_CPU_LOADSTORE_STATUS: + case PHB_ETU_ERR_SUMMARY: + case PHB_PHB4_GEN_CAP: + case PHB_PHB4_TCE_CAP: + case PHB_PHB4_IRQ_CAP: + case PHB_PHB4_EEH_CAP: + return; + } + + /* Record whether it changed */ + changed = phb->regs[off >> 3] != val; + + /* Store in register cache first */ + phb->regs[off >> 3] = val; + + /* Handle side effects */ + switch (off) { + case PHB_PHB4_CONFIG: + if (changed) { + pnv_phb4_update_all_msi_regions(phb); + } + break; + case PHB_M32_START_ADDR: + case PHB_M64_UPPER_BITS: + if (changed) { + pnv_phb4_check_all_mbt(phb); + } + break; + + /* IODA table accesses */ + case PHB_IODA_DATA0: + pnv_phb4_ioda_write(phb, val); + break; + + /* RTC invalidation */ + case PHB_RTC_INVALIDATE: + pnv_phb4_rtc_invalidate(phb, val); + break; + + /* PHB Control (Affects XIVE source) */ + case PHB_CTRLR: + case PHB_LSI_SOURCE_ID: + pnv_phb4_update_xsrc(phb); + break; + + /* Silent simple writes */ + case PHB_ASN_CMPM: + case PHB_CONFIG_ADDRESS: + case PHB_IODA_ADDR: + case PHB_TCE_KILL: + case PHB_TCE_SPEC_CTL: + case PHB_PEST_BAR: + case PHB_PELTV_BAR: + case PHB_RTT_BAR: + case PHB_LEM_FIR_ACCUM: + case PHB_LEM_ERROR_MASK: + case PHB_LEM_ACTION0: + case PHB_LEM_ACTION1: + case PHB_TCE_TAG_ENABLE: + case PHB_INT_NOTIFY_ADDR: + case PHB_INT_NOTIFY_INDEX: + case PHB_DMARD_SYNC: + break; + + /* Noise on anything else */ + default: + qemu_log_mask(LOG_UNIMP, "phb4: reg_write 0x%"PRIx64"=%"PRIx64"\n", + off, val); + } +} + +static uint64_t pnv_phb4_reg_read(void *opaque, hwaddr off, unsigned size) +{ + PnvPHB4 *phb = PNV_PHB4(opaque); + uint64_t val; + + if ((off & 0xfffc) == PHB_CONFIG_DATA) { + return pnv_phb4_config_read(phb, off & 0x3, size); + } + + /* Special case RC configuration space */ + if ((off & 0xf800) == PHB_RC_CONFIG_BASE) { + return pnv_phb4_rc_config_read(phb, off & 0x7ff, size); + } + + /* Other registers are 64-bit only */ + if (size != 8 || off & 0x7) { + phb_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", + off, size); + return ~0ull; + } + + /* Default read from cache */ + val = phb->regs[off >> 3]; + + switch (off) { + case PHB_VERSION: + return phb->version; + + /* Read-only */ + case PHB_PHB4_GEN_CAP: + return 0xe4b8000000000000ull; + case PHB_PHB4_TCE_CAP: + return phb->big_phb ? 0x4008440000000400ull : 0x2008440000000200ull; + case PHB_PHB4_IRQ_CAP: + return phb->big_phb ? 0x0800000000001000ull : 0x0800000000000800ull; + case PHB_PHB4_EEH_CAP: + return phb->big_phb ? 0x2000000000000000ull : 0x1000000000000000ull; + + /* IODA table accesses */ + case PHB_IODA_DATA0: + return pnv_phb4_ioda_read(phb); + + /* Link training always appears trained */ + case PHB_PCIE_DLP_TRAIN_CTL: + /* TODO: Do something sensible with speed ? */ + return PHB_PCIE_DLP_INBAND_PRESENCE | PHB_PCIE_DLP_TL_LINKACT; + + /* DMA read sync: make it look like it's complete */ + case PHB_DMARD_SYNC: + return PHB_DMARD_SYNC_COMPLETE; + + /* Silent simple reads */ + case PHB_LSI_SOURCE_ID: + case PHB_CPU_LOADSTORE_STATUS: + case PHB_ASN_CMPM: + case PHB_PHB4_CONFIG: + case PHB_M32_START_ADDR: + case PHB_CONFIG_ADDRESS: + case PHB_IODA_ADDR: + case PHB_RTC_INVALIDATE: + case PHB_TCE_KILL: + case PHB_TCE_SPEC_CTL: + case PHB_PEST_BAR: + case PHB_PELTV_BAR: + case PHB_RTT_BAR: + case PHB_M64_UPPER_BITS: + case PHB_CTRLR: + case PHB_LEM_FIR_ACCUM: + case PHB_LEM_ERROR_MASK: + case PHB_LEM_ACTION0: + case PHB_LEM_ACTION1: + case PHB_TCE_TAG_ENABLE: + case PHB_INT_NOTIFY_ADDR: + case PHB_INT_NOTIFY_INDEX: + case PHB_Q_DMA_R: + case PHB_ETU_ERR_SUMMARY: + break; + + /* Noise on anything else */ + default: + qemu_log_mask(LOG_UNIMP, "phb4: reg_read 0x%"PRIx64"=%"PRIx64"\n", + off, val); + } + return val; +} + +static const MemoryRegionOps pnv_phb4_reg_ops = { + .read = pnv_phb4_reg_read, + .write = pnv_phb4_reg_write, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .impl.min_access_size = 1, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static uint64_t pnv_phb4_xscom_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvPHB4 *phb = PNV_PHB4(opaque); + uint32_t reg = addr >> 3; + uint64_t val; + hwaddr offset; + + switch (reg) { + case PHB_SCOM_HV_IND_ADDR: + return phb->scom_hv_ind_addr_reg; + + case PHB_SCOM_HV_IND_DATA: + if (!(phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_VALID)) { + phb_error(phb, "Invalid indirect address"); + return ~0ull; + } + size = (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_4B) ? 4 : 8; + offset = GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, phb->scom_hv_ind_addr_reg); + val = pnv_phb4_reg_read(phb, offset, size); + if (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_AUTOINC) { + offset += size; + offset &= 0x3fff; + phb->scom_hv_ind_addr_reg = SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, + phb->scom_hv_ind_addr_reg, + offset); + } + return val; + case PHB_SCOM_ETU_LEM_FIR: + case PHB_SCOM_ETU_LEM_FIR_AND: + case PHB_SCOM_ETU_LEM_FIR_OR: + case PHB_SCOM_ETU_LEM_FIR_MSK: + case PHB_SCOM_ETU_LEM_ERR_MSK_AND: + case PHB_SCOM_ETU_LEM_ERR_MSK_OR: + case PHB_SCOM_ETU_LEM_ACT0: + case PHB_SCOM_ETU_LEM_ACT1: + case PHB_SCOM_ETU_LEM_WOF: + offset = ((reg - PHB_SCOM_ETU_LEM_FIR) << 3) + PHB_LEM_FIR_ACCUM; + return pnv_phb4_reg_read(phb, offset, size); + case PHB_SCOM_ETU_PMON_CONFIG: + case PHB_SCOM_ETU_PMON_CTR0: + case PHB_SCOM_ETU_PMON_CTR1: + case PHB_SCOM_ETU_PMON_CTR2: + case PHB_SCOM_ETU_PMON_CTR3: + offset = ((reg - PHB_SCOM_ETU_PMON_CONFIG) << 3) + PHB_PERFMON_CONFIG; + return pnv_phb4_reg_read(phb, offset, size); + + default: + qemu_log_mask(LOG_UNIMP, "phb4: xscom_read 0x%"HWADDR_PRIx"\n", addr); + return ~0ull; + } +} + +static void pnv_phb4_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPHB4 *phb = PNV_PHB4(opaque); + uint32_t reg = addr >> 3; + hwaddr offset; + + switch (reg) { + case PHB_SCOM_HV_IND_ADDR: + phb->scom_hv_ind_addr_reg = val & 0xe000000000001fff; + break; + case PHB_SCOM_HV_IND_DATA: + if (!(phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_VALID)) { + phb_error(phb, "Invalid indirect address"); + break; + } + size = (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_4B) ? 4 : 8; + offset = GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, phb->scom_hv_ind_addr_reg); + pnv_phb4_reg_write(phb, offset, val, size); + if (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_AUTOINC) { + offset += size; + offset &= 0x3fff; + phb->scom_hv_ind_addr_reg = SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, + phb->scom_hv_ind_addr_reg, + offset); + } + break; + case PHB_SCOM_ETU_LEM_FIR: + case PHB_SCOM_ETU_LEM_FIR_AND: + case PHB_SCOM_ETU_LEM_FIR_OR: + case PHB_SCOM_ETU_LEM_FIR_MSK: + case PHB_SCOM_ETU_LEM_ERR_MSK_AND: + case PHB_SCOM_ETU_LEM_ERR_MSK_OR: + case PHB_SCOM_ETU_LEM_ACT0: + case PHB_SCOM_ETU_LEM_ACT1: + case PHB_SCOM_ETU_LEM_WOF: + offset = ((reg - PHB_SCOM_ETU_LEM_FIR) << 3) + PHB_LEM_FIR_ACCUM; + pnv_phb4_reg_write(phb, offset, val, size); + break; + case PHB_SCOM_ETU_PMON_CONFIG: + case PHB_SCOM_ETU_PMON_CTR0: + case PHB_SCOM_ETU_PMON_CTR1: + case PHB_SCOM_ETU_PMON_CTR2: + case PHB_SCOM_ETU_PMON_CTR3: + offset = ((reg - PHB_SCOM_ETU_PMON_CONFIG) << 3) + PHB_PERFMON_CONFIG; + pnv_phb4_reg_write(phb, offset, val, size); + break; + default: + qemu_log_mask(LOG_UNIMP, "phb4: xscom_write 0x%"HWADDR_PRIx + "=%"PRIx64"\n", addr, val); + } +} + +const MemoryRegionOps pnv_phb4_xscom_ops = { + .read = pnv_phb4_xscom_read, + .write = pnv_phb4_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static int pnv_phb4_map_irq(PCIDevice *pci_dev, int irq_num) +{ + /* Check that out properly ... */ + return irq_num & 3; +} + +static void pnv_phb4_set_irq(void *opaque, int irq_num, int level) +{ + PnvPHB4 *phb = PNV_PHB4(opaque); + uint32_t lsi_base; + + /* LSI only ... */ + if (irq_num > 3) { + phb_error(phb, "IRQ %x is not an LSI", irq_num); + } + lsi_base = GETFIELD(PHB_LSI_SRC_ID, phb->regs[PHB_LSI_SOURCE_ID >> 3]); + lsi_base <<= 3; + qemu_set_irq(phb->qirqs[lsi_base + irq_num], level); +} + +static bool pnv_phb4_resolve_pe(PnvPhb4DMASpace *ds) +{ + uint64_t rtt, addr; + uint16_t rte; + int bus_num; + int num_PEs; + + /* Already resolved ? */ + if (ds->pe_num != PHB_INVALID_PE) { + return true; + } + + /* We need to lookup the RTT */ + rtt = ds->phb->regs[PHB_RTT_BAR >> 3]; + if (!(rtt & PHB_RTT_BAR_ENABLE)) { + phb_error(ds->phb, "DMA with RTT BAR disabled !"); + /* Set error bits ? fence ? ... */ + return false; + } + + /* Read RTE */ + bus_num = pci_bus_num(ds->bus); + addr = rtt & PHB_RTT_BASE_ADDRESS_MASK; + addr += 2 * PCI_BUILD_BDF(bus_num, ds->devfn); + if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) { + phb_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr); + /* Set error bits ? fence ? ... */ + return false; + } + rte = be16_to_cpu(rte); + + /* Fail upon reading of invalid PE# */ + num_PEs = ds->phb->big_phb ? PNV_PHB4_MAX_PEs : (PNV_PHB4_MAX_PEs >> 1); + if (rte >= num_PEs) { + phb_error(ds->phb, "RTE for RID 0x%x invalid (%04x", ds->devfn, rte); + rte &= num_PEs - 1; + } + ds->pe_num = rte; + return true; +} + +static void pnv_phb4_translate_tve(PnvPhb4DMASpace *ds, hwaddr addr, + bool is_write, uint64_t tve, + IOMMUTLBEntry *tlb) +{ + uint64_t tta = GETFIELD(IODA3_TVT_TABLE_ADDR, tve); + int32_t lev = GETFIELD(IODA3_TVT_NUM_LEVELS, tve); + uint32_t tts = GETFIELD(IODA3_TVT_TCE_TABLE_SIZE, tve); + uint32_t tps = GETFIELD(IODA3_TVT_IO_PSIZE, tve); + + /* Invalid levels */ + if (lev > 4) { + phb_error(ds->phb, "Invalid #levels in TVE %d", lev); + return; + } + + /* Invalid entry */ + if (tts == 0) { + phb_error(ds->phb, "Access to invalid TVE"); + return; + } + + /* IO Page Size of 0 means untranslated, else use TCEs */ + if (tps == 0) { + /* TODO: Handle boundaries */ + + /* Use 4k pages like q35 ... for now */ + tlb->iova = addr & 0xfffffffffffff000ull; + tlb->translated_addr = addr & 0x0003fffffffff000ull; + tlb->addr_mask = 0xfffull; + tlb->perm = IOMMU_RW; + } else { + uint32_t tce_shift, tbl_shift, sh; + uint64_t base, taddr, tce, tce_mask; + + /* Address bits per bottom level TCE entry */ + tce_shift = tps + 11; + + /* Address bits per table level */ + tbl_shift = tts + 8; + + /* Top level table base address */ + base = tta << 12; + + /* Total shift to first level */ + sh = tbl_shift * lev + tce_shift; + + /* TODO: Limit to support IO page sizes */ + + /* TODO: Multi-level untested */ + while ((lev--) >= 0) { + /* Grab the TCE address */ + taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3); + if (dma_memory_read(&address_space_memory, taddr, &tce, + sizeof(tce))) { + phb_error(ds->phb, "Failed to read TCE at 0x%"PRIx64, taddr); + return; + } + tce = be64_to_cpu(tce); + + /* Check permission for indirect TCE */ + if ((lev >= 0) && !(tce & 3)) { + phb_error(ds->phb, "Invalid indirect TCE at 0x%"PRIx64, taddr); + phb_error(ds->phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, + is_write ? 'W' : 'R', tve); + phb_error(ds->phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", + tta, lev, tts, tps); + return; + } + sh -= tbl_shift; + base = tce & ~0xfffull; + } + + /* We exit the loop with TCE being the final TCE */ + tce_mask = ~((1ull << tce_shift) - 1); + tlb->iova = addr & tce_mask; + tlb->translated_addr = tce & tce_mask; + tlb->addr_mask = ~tce_mask; + tlb->perm = tce & 3; + if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) { + phb_error(ds->phb, "TCE access fault at 0x%"PRIx64, taddr); + phb_error(ds->phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, + is_write ? 'W' : 'R', tve); + phb_error(ds->phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", + tta, lev, tts, tps); + } + } +} + +static IOMMUTLBEntry pnv_phb4_translate_iommu(IOMMUMemoryRegion *iommu, + hwaddr addr, + IOMMUAccessFlags flag, + int iommu_idx) +{ + PnvPhb4DMASpace *ds = container_of(iommu, PnvPhb4DMASpace, dma_mr); + int tve_sel; + uint64_t tve, cfg; + IOMMUTLBEntry ret = { + .target_as = &address_space_memory, + .iova = addr, + .translated_addr = 0, + .addr_mask = ~(hwaddr)0, + .perm = IOMMU_NONE, + }; + + /* Resolve PE# */ + if (!pnv_phb4_resolve_pe(ds)) { + phb_error(ds->phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", + ds->bus, pci_bus_num(ds->bus), ds->devfn); + return ret; + } + + /* Check top bits */ + switch (addr >> 60) { + case 00: + /* DMA or 32-bit MSI ? */ + cfg = ds->phb->regs[PHB_PHB4_CONFIG >> 3]; + if ((cfg & PHB_PHB4C_32BIT_MSI_EN) && + ((addr & 0xffffffffffff0000ull) == 0xffff0000ull)) { + phb_error(ds->phb, "xlate on 32-bit MSI region"); + return ret; + } + /* Choose TVE XXX Use PHB4 Control Register */ + tve_sel = (addr >> 59) & 1; + tve = ds->phb->ioda_TVT[ds->pe_num * 2 + tve_sel]; + pnv_phb4_translate_tve(ds, addr, flag & IOMMU_WO, tve, &ret); + break; + case 01: + phb_error(ds->phb, "xlate on 64-bit MSI region"); + break; + default: + phb_error(ds->phb, "xlate on unsupported address 0x%"PRIx64, addr); + } + return ret; +} + +#define TYPE_PNV_PHB4_IOMMU_MEMORY_REGION "pnv-phb4-iommu-memory-region" +DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion, PNV_PHB4_IOMMU_MEMORY_REGION, + TYPE_PNV_PHB4_IOMMU_MEMORY_REGION) + +static void pnv_phb4_iommu_memory_region_class_init(ObjectClass *klass, + void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = pnv_phb4_translate_iommu; +} + +static const TypeInfo pnv_phb4_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_PNV_PHB4_IOMMU_MEMORY_REGION, + .class_init = pnv_phb4_iommu_memory_region_class_init, +}; + +/* + * MSI/MSIX memory region implementation. + * The handler handles both MSI and MSIX. + */ +static void pnv_phb4_msi_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + PnvPhb4DMASpace *ds = opaque; + PnvPHB4 *phb = ds->phb; + + uint32_t src = ((addr >> 4) & 0xffff) | (data & 0x1f); + + /* Resolve PE# */ + if (!pnv_phb4_resolve_pe(ds)) { + phb_error(phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", + ds->bus, pci_bus_num(ds->bus), ds->devfn); + return; + } + + /* TODO: Check it doesn't collide with LSIs */ + if (src >= phb->xsrc.nr_irqs) { + phb_error(phb, "MSI %d out of bounds", src); + return; + } + + /* TODO: check PE/MSI assignement */ + + qemu_irq_pulse(phb->qirqs[src]); +} + +/* There is no .read as the read result is undefined by PCI spec */ +static uint64_t pnv_phb4_msi_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvPhb4DMASpace *ds = opaque; + + phb_error(ds->phb, "Invalid MSI read @ 0x%" HWADDR_PRIx, addr); + return -1; +} + +static const MemoryRegionOps pnv_phb4_msi_ops = { + .read = pnv_phb4_msi_read, + .write = pnv_phb4_msi_write, + .endianness = DEVICE_LITTLE_ENDIAN +}; + +static PnvPhb4DMASpace *pnv_phb4_dma_find(PnvPHB4 *phb, PCIBus *bus, int devfn) +{ + PnvPhb4DMASpace *ds; + + QLIST_FOREACH(ds, &phb->dma_spaces, list) { + if (ds->bus == bus && ds->devfn == devfn) { + break; + } + } + return ds; +} + +static AddressSpace *pnv_phb4_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + PnvPHB4 *phb = opaque; + PnvPhb4DMASpace *ds; + char name[32]; + + ds = pnv_phb4_dma_find(phb, bus, devfn); + + if (ds == NULL) { + ds = g_malloc0(sizeof(PnvPhb4DMASpace)); + ds->bus = bus; + ds->devfn = devfn; + ds->pe_num = PHB_INVALID_PE; + ds->phb = phb; + snprintf(name, sizeof(name), "phb4-%d.%d-iommu", phb->chip_id, + phb->phb_id); + memory_region_init_iommu(&ds->dma_mr, sizeof(ds->dma_mr), + TYPE_PNV_PHB4_IOMMU_MEMORY_REGION, + OBJECT(phb), name, UINT64_MAX); + address_space_init(&ds->dma_as, MEMORY_REGION(&ds->dma_mr), + name); + memory_region_init_io(&ds->msi32_mr, OBJECT(phb), &pnv_phb4_msi_ops, + ds, "msi32", 0x10000); + memory_region_init_io(&ds->msi64_mr, OBJECT(phb), &pnv_phb4_msi_ops, + ds, "msi64", 0x100000); + pnv_phb4_update_msi_regions(ds); + + QLIST_INSERT_HEAD(&phb->dma_spaces, ds, list); + } + return &ds->dma_as; +} + +static void pnv_phb4_instance_init(Object *obj) +{ + PnvPHB4 *phb = PNV_PHB4(obj); + + QLIST_INIT(&phb->dma_spaces); + + /* XIVE interrupt source object */ + object_initialize_child(obj, "source", &phb->xsrc, TYPE_XIVE_SOURCE); + + /* Root Port */ + object_initialize_child(obj, "root", &phb->root, TYPE_PNV_PHB4_ROOT_PORT); + + qdev_prop_set_int32(DEVICE(&phb->root), "addr", PCI_DEVFN(0, 0)); + qdev_prop_set_bit(DEVICE(&phb->root), "multifunction", false); +} + +static void pnv_phb4_realize(DeviceState *dev, Error **errp) +{ + PnvPHB4 *phb = PNV_PHB4(dev); + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + XiveSource *xsrc = &phb->xsrc; + int nr_irqs; + char name[32]; + + assert(phb->stack); + + /* Set the "big_phb" flag */ + phb->big_phb = phb->phb_id == 0 || phb->phb_id == 3; + + /* Controller Registers */ + snprintf(name, sizeof(name), "phb4-%d.%d-regs", phb->chip_id, + phb->phb_id); + memory_region_init_io(&phb->mr_regs, OBJECT(phb), &pnv_phb4_reg_ops, phb, + name, 0x2000); + + /* + * PHB4 doesn't support IO space. However, qemu gets very upset if + * we don't have an IO region to anchor IO BARs onto so we just + * initialize one which we never hook up to anything + */ + + snprintf(name, sizeof(name), "phb4-%d.%d-pci-io", phb->chip_id, + phb->phb_id); + memory_region_init(&phb->pci_io, OBJECT(phb), name, 0x10000); + + snprintf(name, sizeof(name), "phb4-%d.%d-pci-mmio", phb->chip_id, + phb->phb_id); + memory_region_init(&phb->pci_mmio, OBJECT(phb), name, + PCI_MMIO_TOTAL_SIZE); + + pci->bus = pci_register_root_bus(dev, "root-bus", + pnv_phb4_set_irq, pnv_phb4_map_irq, phb, + &phb->pci_mmio, &phb->pci_io, + 0, 4, TYPE_PNV_PHB4_ROOT_BUS); + pci_setup_iommu(pci->bus, pnv_phb4_dma_iommu, phb); + + /* Add a single Root port */ + qdev_prop_set_uint8(DEVICE(&phb->root), "chassis", phb->chip_id); + qdev_prop_set_uint16(DEVICE(&phb->root), "slot", phb->phb_id); + qdev_realize(DEVICE(&phb->root), BUS(pci->bus), &error_fatal); + + /* Setup XIVE Source */ + if (phb->big_phb) { + nr_irqs = PNV_PHB4_MAX_INTs; + } else { + nr_irqs = PNV_PHB4_MAX_INTs >> 1; + } + object_property_set_int(OBJECT(xsrc), "nr-irqs", nr_irqs, &error_fatal); + object_property_set_link(OBJECT(xsrc), "xive", OBJECT(phb), &error_fatal); + if (!qdev_realize(DEVICE(xsrc), NULL, errp)) { + return; + } + + pnv_phb4_update_xsrc(phb); + + phb->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc, xsrc->nr_irqs); +} + +static void pnv_phb4_reset(DeviceState *dev) +{ + PnvPHB4 *phb = PNV_PHB4(dev); + PCIDevice *root_dev = PCI_DEVICE(&phb->root); + + /* + * Configure PCI device id at reset using a property. + */ + pci_config_set_vendor_id(root_dev->config, PCI_VENDOR_ID_IBM); + pci_config_set_device_id(root_dev->config, phb->device_id); +} + +static const char *pnv_phb4_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + PnvPHB4 *phb = PNV_PHB4(host_bridge); + + snprintf(phb->bus_path, sizeof(phb->bus_path), "00%02x:%02x", + phb->chip_id, phb->phb_id); + return phb->bus_path; +} + +static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno) +{ + PnvPHB4 *phb = PNV_PHB4(xf); + uint64_t notif_port = phb->regs[PHB_INT_NOTIFY_ADDR >> 3]; + uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3]; + uint64_t data = XIVE_TRIGGER_PQ | offset | srcno; + MemTxResult result; + + trace_pnv_phb4_xive_notify(notif_port, data); + + address_space_stq_be(&address_space_memory, notif_port, data, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + phb_error(phb, "trigger failed @%"HWADDR_PRIx "\n", notif_port); + return; + } +} + +static Property pnv_phb4_properties[] = { + DEFINE_PROP_UINT32("index", PnvPHB4, phb_id, 0), + DEFINE_PROP_UINT32("chip-id", PnvPHB4, chip_id, 0), + DEFINE_PROP_UINT64("version", PnvPHB4, version, 0), + DEFINE_PROP_UINT16("device-id", PnvPHB4, device_id, 0), + DEFINE_PROP_LINK("stack", PnvPHB4, stack, TYPE_PNV_PHB4_PEC_STACK, + PnvPhb4PecStack *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_phb4_class_init(ObjectClass *klass, void *data) +{ + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + XiveNotifierClass *xfc = XIVE_NOTIFIER_CLASS(klass); + + hc->root_bus_path = pnv_phb4_root_bus_path; + dc->realize = pnv_phb4_realize; + device_class_set_props(dc, pnv_phb4_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->user_creatable = false; + dc->reset = pnv_phb4_reset; + + xfc->notify = pnv_phb4_xive_notify; +} + +static const TypeInfo pnv_phb4_type_info = { + .name = TYPE_PNV_PHB4, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_init = pnv_phb4_instance_init, + .instance_size = sizeof(PnvPHB4), + .class_init = pnv_phb4_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_XIVE_NOTIFIER }, + { }, + } +}; + +static void pnv_phb4_root_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + /* + * PHB4 has only a single root complex. Enforce the limit on the + * parent bus + */ + k->max_dev = 1; +} + +static const TypeInfo pnv_phb4_root_bus_info = { + .name = TYPE_PNV_PHB4_ROOT_BUS, + .parent = TYPE_PCIE_BUS, + .class_init = pnv_phb4_root_bus_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void pnv_phb4_root_port_reset(DeviceState *dev) +{ + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); + PCIDevice *d = PCI_DEVICE(dev); + uint8_t *conf = d->config; + + rpc->parent_reset(dev); + + pci_byte_test_and_set_mask(conf + PCI_IO_BASE, + PCI_IO_RANGE_MASK & 0xff); + pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT, + PCI_IO_RANGE_MASK & 0xff); + pci_set_word(conf + PCI_MEMORY_BASE, 0); + pci_set_word(conf + PCI_MEMORY_LIMIT, 0xfff0); + pci_set_word(conf + PCI_PREF_MEMORY_BASE, 0x1); + pci_set_word(conf + PCI_PREF_MEMORY_LIMIT, 0xfff1); + pci_set_long(conf + PCI_PREF_BASE_UPPER32, 0x1); /* Hack */ + pci_set_long(conf + PCI_PREF_LIMIT_UPPER32, 0xffffffff); +} + +static void pnv_phb4_root_port_realize(DeviceState *dev, Error **errp) +{ + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); + Error *local_err = NULL; + + rpc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } +} + +static void pnv_phb4_root_port_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); + + dc->desc = "IBM PHB4 PCIE Root Port"; + dc->user_creatable = false; + + device_class_set_parent_realize(dc, pnv_phb4_root_port_realize, + &rpc->parent_realize); + device_class_set_parent_reset(dc, pnv_phb4_root_port_reset, + &rpc->parent_reset); + + k->vendor_id = PCI_VENDOR_ID_IBM; + k->device_id = PNV_PHB4_DEVICE_ID; + k->revision = 0; + + rpc->exp_offset = 0x48; + rpc->aer_offset = 0x100; + + dc->reset = &pnv_phb4_root_port_reset; +} + +static const TypeInfo pnv_phb4_root_port_info = { + .name = TYPE_PNV_PHB4_ROOT_PORT, + .parent = TYPE_PCIE_ROOT_PORT, + .instance_size = sizeof(PnvPHB4RootPort), + .class_init = pnv_phb4_root_port_class_init, +}; + +static void pnv_phb4_register_types(void) +{ + type_register_static(&pnv_phb4_root_bus_info); + type_register_static(&pnv_phb4_root_port_info); + type_register_static(&pnv_phb4_type_info); + type_register_static(&pnv_phb4_iommu_memory_region_info); +} + +type_init(pnv_phb4_register_types); + +void pnv_phb4_update_regions(PnvPhb4PecStack *stack) +{ + PnvPHB4 *phb = &stack->phb; + + /* Unmap first always */ + if (memory_region_is_mapped(&phb->mr_regs)) { + memory_region_del_subregion(&stack->phbbar, &phb->mr_regs); + } + if (memory_region_is_mapped(&phb->xsrc.esb_mmio)) { + memory_region_del_subregion(&stack->intbar, &phb->xsrc.esb_mmio); + } + + /* Map registers if enabled */ + if (memory_region_is_mapped(&stack->phbbar)) { + memory_region_add_subregion(&stack->phbbar, 0, &phb->mr_regs); + } + + /* Map ESB if enabled */ + if (memory_region_is_mapped(&stack->intbar)) { + memory_region_add_subregion(&stack->intbar, 0, &phb->xsrc.esb_mmio); + } + + /* Check/update m32 */ + pnv_phb4_check_all_mbt(phb); +} + +void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon) +{ + uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3]; + + monitor_printf(mon, "PHB4[%x:%x] Source %08x .. %08x\n", + phb->chip_id, phb->phb_id, + offset, offset + phb->xsrc.nr_irqs - 1); + xive_source_pic_print_info(&phb->xsrc, 0, mon); +} diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c new file mode 100644 index 000000000..741ddc90e --- /dev/null +++ b/hw/pci-host/pnv_phb4_pec.c @@ -0,0 +1,593 @@ +/* + * QEMU PowerPC PowerNV (POWER9) PHB4 model + * + * Copyright (c) 2018-2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/log.h" +#include "target/ppc/cpu.h" +#include "hw/ppc/fdt.h" +#include "hw/pci-host/pnv_phb4_regs.h" +#include "hw/pci-host/pnv_phb4.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" +#include "hw/ppc/pnv.h" +#include "hw/qdev-properties.h" + +#include + +#define phb_pec_error(pec, fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "phb4_pec[%d:%d]: " fmt "\n", \ + (pec)->chip_id, (pec)->index, ## __VA_ARGS__) + + +static uint64_t pnv_pec_nest_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPhb4PecState *pec = PNV_PHB4_PEC(opaque); + uint32_t reg = addr >> 3; + + /* TODO: add list of allowed registers and error out if not */ + return pec->nest_regs[reg]; +} + +static void pnv_pec_nest_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPhb4PecState *pec = PNV_PHB4_PEC(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PEC_NEST_PBCQ_HW_CONFIG: + case PEC_NEST_DROP_PRIO_CTRL: + case PEC_NEST_PBCQ_ERR_INJECT: + case PEC_NEST_PCI_NEST_CLK_TRACE_CTL: + case PEC_NEST_PBCQ_PMON_CTRL: + case PEC_NEST_PBCQ_PBUS_ADDR_EXT: + case PEC_NEST_PBCQ_PRED_VEC_TIMEOUT: + case PEC_NEST_CAPP_CTRL: + case PEC_NEST_PBCQ_READ_STK_OVR: + case PEC_NEST_PBCQ_WRITE_STK_OVR: + case PEC_NEST_PBCQ_STORE_STK_OVR: + case PEC_NEST_PBCQ_RETRY_BKOFF_CTRL: + pec->nest_regs[reg] = val; + break; + default: + phb_pec_error(pec, "%s @0x%"HWADDR_PRIx"=%"PRIx64"\n", __func__, + addr, val); + } +} + +static const MemoryRegionOps pnv_pec_nest_xscom_ops = { + .read = pnv_pec_nest_xscom_read, + .write = pnv_pec_nest_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static uint64_t pnv_pec_pci_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPhb4PecState *pec = PNV_PHB4_PEC(opaque); + uint32_t reg = addr >> 3; + + /* TODO: add list of allowed registers and error out if not */ + return pec->pci_regs[reg]; +} + +static void pnv_pec_pci_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPhb4PecState *pec = PNV_PHB4_PEC(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PEC_PCI_PBAIB_HW_CONFIG: + case PEC_PCI_PBAIB_READ_STK_OVR: + pec->pci_regs[reg] = val; + break; + default: + phb_pec_error(pec, "%s @0x%"HWADDR_PRIx"=%"PRIx64"\n", __func__, + addr, val); + } +} + +static const MemoryRegionOps pnv_pec_pci_xscom_ops = { + .read = pnv_pec_pci_xscom_read, + .write = pnv_pec_pci_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static uint64_t pnv_pec_stk_nest_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); + uint32_t reg = addr >> 3; + + /* TODO: add list of allowed registers and error out if not */ + return stack->nest_regs[reg]; +} + +static void pnv_pec_stk_update_map(PnvPhb4PecStack *stack) +{ + PnvPhb4PecState *pec = stack->pec; + MemoryRegion *sysmem = pec->system_memory; + uint64_t bar_en = stack->nest_regs[PEC_NEST_STK_BAR_EN]; + uint64_t bar, mask, size; + char name[64]; + + /* + * NOTE: This will really not work well if those are remapped + * after the PHB has created its sub regions. We could do better + * if we had a way to resize regions but we don't really care + * that much in practice as the stuff below really only happens + * once early during boot + */ + + /* Handle unmaps */ + if (memory_region_is_mapped(&stack->mmbar0) && + !(bar_en & PEC_NEST_STK_BAR_EN_MMIO0)) { + memory_region_del_subregion(sysmem, &stack->mmbar0); + } + if (memory_region_is_mapped(&stack->mmbar1) && + !(bar_en & PEC_NEST_STK_BAR_EN_MMIO1)) { + memory_region_del_subregion(sysmem, &stack->mmbar1); + } + if (memory_region_is_mapped(&stack->phbbar) && + !(bar_en & PEC_NEST_STK_BAR_EN_PHB)) { + memory_region_del_subregion(sysmem, &stack->phbbar); + } + if (memory_region_is_mapped(&stack->intbar) && + !(bar_en & PEC_NEST_STK_BAR_EN_INT)) { + memory_region_del_subregion(sysmem, &stack->intbar); + } + + /* Update PHB */ + pnv_phb4_update_regions(stack); + + /* Handle maps */ + if (!memory_region_is_mapped(&stack->mmbar0) && + (bar_en & PEC_NEST_STK_BAR_EN_MMIO0)) { + bar = stack->nest_regs[PEC_NEST_STK_MMIO_BAR0] >> 8; + mask = stack->nest_regs[PEC_NEST_STK_MMIO_BAR0_MASK]; + size = ((~mask) >> 8) + 1; + snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-mmio0", + pec->chip_id, pec->index, stack->stack_no); + memory_region_init(&stack->mmbar0, OBJECT(stack), name, size); + memory_region_add_subregion(sysmem, bar, &stack->mmbar0); + stack->mmio0_base = bar; + stack->mmio0_size = size; + } + if (!memory_region_is_mapped(&stack->mmbar1) && + (bar_en & PEC_NEST_STK_BAR_EN_MMIO1)) { + bar = stack->nest_regs[PEC_NEST_STK_MMIO_BAR1] >> 8; + mask = stack->nest_regs[PEC_NEST_STK_MMIO_BAR1_MASK]; + size = ((~mask) >> 8) + 1; + snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-mmio1", + pec->chip_id, pec->index, stack->stack_no); + memory_region_init(&stack->mmbar1, OBJECT(stack), name, size); + memory_region_add_subregion(sysmem, bar, &stack->mmbar1); + stack->mmio1_base = bar; + stack->mmio1_size = size; + } + if (!memory_region_is_mapped(&stack->phbbar) && + (bar_en & PEC_NEST_STK_BAR_EN_PHB)) { + bar = stack->nest_regs[PEC_NEST_STK_PHB_REGS_BAR] >> 8; + size = PNV_PHB4_NUM_REGS << 3; + snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-phb", + pec->chip_id, pec->index, stack->stack_no); + memory_region_init(&stack->phbbar, OBJECT(stack), name, size); + memory_region_add_subregion(sysmem, bar, &stack->phbbar); + } + if (!memory_region_is_mapped(&stack->intbar) && + (bar_en & PEC_NEST_STK_BAR_EN_INT)) { + bar = stack->nest_regs[PEC_NEST_STK_INT_BAR] >> 8; + size = PNV_PHB4_MAX_INTs << 16; + snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-int", + stack->pec->chip_id, stack->pec->index, stack->stack_no); + memory_region_init(&stack->intbar, OBJECT(stack), name, size); + memory_region_add_subregion(sysmem, bar, &stack->intbar); + } + + /* Update PHB */ + pnv_phb4_update_regions(stack); +} + +static void pnv_pec_stk_nest_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); + PnvPhb4PecState *pec = stack->pec; + uint32_t reg = addr >> 3; + + switch (reg) { + case PEC_NEST_STK_PCI_NEST_FIR: + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] = val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_CLR: + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] &= val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_SET: + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] |= val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_MSK: + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] = val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_MSKC: + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] &= val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_MSKS: + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] |= val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_ACT0: + case PEC_NEST_STK_PCI_NEST_FIR_ACT1: + stack->nest_regs[reg] = val; + break; + case PEC_NEST_STK_PCI_NEST_FIR_WOF: + stack->nest_regs[reg] = 0; + break; + case PEC_NEST_STK_ERR_REPORT_0: + case PEC_NEST_STK_ERR_REPORT_1: + case PEC_NEST_STK_PBCQ_GNRL_STATUS: + /* Flag error ? */ + break; + case PEC_NEST_STK_PBCQ_MODE: + stack->nest_regs[reg] = val & 0xff00000000000000ull; + break; + case PEC_NEST_STK_MMIO_BAR0: + case PEC_NEST_STK_MMIO_BAR0_MASK: + case PEC_NEST_STK_MMIO_BAR1: + case PEC_NEST_STK_MMIO_BAR1_MASK: + if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & + (PEC_NEST_STK_BAR_EN_MMIO0 | + PEC_NEST_STK_BAR_EN_MMIO1)) { + phb_pec_error(pec, "Changing enabled BAR unsupported\n"); + } + stack->nest_regs[reg] = val & 0xffffffffff000000ull; + break; + case PEC_NEST_STK_PHB_REGS_BAR: + if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & PEC_NEST_STK_BAR_EN_PHB) { + phb_pec_error(pec, "Changing enabled BAR unsupported\n"); + } + stack->nest_regs[reg] = val & 0xffffffffffc00000ull; + break; + case PEC_NEST_STK_INT_BAR: + if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & PEC_NEST_STK_BAR_EN_INT) { + phb_pec_error(pec, "Changing enabled BAR unsupported\n"); + } + stack->nest_regs[reg] = val & 0xfffffff000000000ull; + break; + case PEC_NEST_STK_BAR_EN: + stack->nest_regs[reg] = val & 0xf000000000000000ull; + pnv_pec_stk_update_map(stack); + break; + case PEC_NEST_STK_DATA_FRZ_TYPE: + case PEC_NEST_STK_PBCQ_TUN_BAR: + /* Not used for now */ + stack->nest_regs[reg] = val; + break; + default: + qemu_log_mask(LOG_UNIMP, "phb4_pec: nest_xscom_write 0x%"HWADDR_PRIx + "=%"PRIx64"\n", addr, val); + } +} + +static const MemoryRegionOps pnv_pec_stk_nest_xscom_ops = { + .read = pnv_pec_stk_nest_xscom_read, + .write = pnv_pec_stk_nest_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static uint64_t pnv_pec_stk_pci_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); + uint32_t reg = addr >> 3; + + /* TODO: add list of allowed registers and error out if not */ + return stack->pci_regs[reg]; +} + +static void pnv_pec_stk_pci_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PEC_PCI_STK_PCI_FIR: + stack->nest_regs[reg] = val; + break; + case PEC_PCI_STK_PCI_FIR_CLR: + stack->nest_regs[PEC_PCI_STK_PCI_FIR] &= val; + break; + case PEC_PCI_STK_PCI_FIR_SET: + stack->nest_regs[PEC_PCI_STK_PCI_FIR] |= val; + break; + case PEC_PCI_STK_PCI_FIR_MSK: + stack->nest_regs[reg] = val; + break; + case PEC_PCI_STK_PCI_FIR_MSKC: + stack->nest_regs[PEC_PCI_STK_PCI_FIR_MSK] &= val; + break; + case PEC_PCI_STK_PCI_FIR_MSKS: + stack->nest_regs[PEC_PCI_STK_PCI_FIR_MSK] |= val; + break; + case PEC_PCI_STK_PCI_FIR_ACT0: + case PEC_PCI_STK_PCI_FIR_ACT1: + stack->nest_regs[reg] = val; + break; + case PEC_PCI_STK_PCI_FIR_WOF: + stack->nest_regs[reg] = 0; + break; + case PEC_PCI_STK_ETU_RESET: + stack->nest_regs[reg] = val & 0x8000000000000000ull; + /* TODO: Implement reset */ + break; + case PEC_PCI_STK_PBAIB_ERR_REPORT: + break; + case PEC_PCI_STK_PBAIB_TX_CMD_CRED: + case PEC_PCI_STK_PBAIB_TX_DAT_CRED: + stack->nest_regs[reg] = val; + break; + default: + qemu_log_mask(LOG_UNIMP, "phb4_pec_stk: pci_xscom_write 0x%"HWADDR_PRIx + "=%"PRIx64"\n", addr, val); + } +} + +static const MemoryRegionOps pnv_pec_stk_pci_xscom_ops = { + .read = pnv_pec_stk_pci_xscom_read, + .write = pnv_pec_stk_pci_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_pec_instance_init(Object *obj) +{ + PnvPhb4PecState *pec = PNV_PHB4_PEC(obj); + int i; + + for (i = 0; i < PHB4_PEC_MAX_STACKS; i++) { + object_initialize_child(obj, "stack[*]", &pec->stacks[i], + TYPE_PNV_PHB4_PEC_STACK); + } +} + +static void pnv_pec_realize(DeviceState *dev, Error **errp) +{ + PnvPhb4PecState *pec = PNV_PHB4_PEC(dev); + char name[64]; + int i; + + assert(pec->system_memory); + + /* Create stacks */ + for (i = 0; i < pec->num_stacks; i++) { + PnvPhb4PecStack *stack = &pec->stacks[i]; + Object *stk_obj = OBJECT(stack); + + object_property_set_int(stk_obj, "stack-no", i, &error_abort); + object_property_set_link(stk_obj, "pec", OBJECT(pec), &error_abort); + if (!qdev_realize(DEVICE(stk_obj), NULL, errp)) { + return; + } + } + for (; i < PHB4_PEC_MAX_STACKS; i++) { + object_unparent(OBJECT(&pec->stacks[i])); + } + + /* Initialize the XSCOM regions for the PEC registers */ + snprintf(name, sizeof(name), "xscom-pec-%d.%d-nest", pec->chip_id, + pec->index); + pnv_xscom_region_init(&pec->nest_regs_mr, OBJECT(dev), + &pnv_pec_nest_xscom_ops, pec, name, + PHB4_PEC_NEST_REGS_COUNT); + + snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci", pec->chip_id, + pec->index); + pnv_xscom_region_init(&pec->pci_regs_mr, OBJECT(dev), + &pnv_pec_pci_xscom_ops, pec, name, + PHB4_PEC_PCI_REGS_COUNT); +} + +static int pnv_pec_dt_xscom(PnvXScomInterface *dev, void *fdt, + int xscom_offset) +{ + PnvPhb4PecState *pec = PNV_PHB4_PEC(dev); + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(dev); + uint32_t nbase = pecc->xscom_nest_base(pec); + uint32_t pbase = pecc->xscom_pci_base(pec); + int offset, i; + char *name; + uint32_t reg[] = { + cpu_to_be32(nbase), + cpu_to_be32(pecc->xscom_nest_size), + cpu_to_be32(pbase), + cpu_to_be32(pecc->xscom_pci_size), + }; + + name = g_strdup_printf("pbcq@%x", nbase); + offset = fdt_add_subnode(fdt, xscom_offset, name); + _FDT(offset); + g_free(name); + + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); + + _FDT((fdt_setprop_cell(fdt, offset, "ibm,pec-index", pec->index))); + _FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 1))); + _FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 0))); + _FDT((fdt_setprop(fdt, offset, "compatible", pecc->compat, + pecc->compat_size))); + + for (i = 0; i < pec->num_stacks; i++) { + PnvPhb4PecStack *stack = &pec->stacks[i]; + PnvPHB4 *phb = &stack->phb; + int stk_offset; + + name = g_strdup_printf("stack@%x", i); + stk_offset = fdt_add_subnode(fdt, offset, name); + _FDT(stk_offset); + g_free(name); + _FDT((fdt_setprop(fdt, stk_offset, "compatible", pecc->stk_compat, + pecc->stk_compat_size))); + _FDT((fdt_setprop_cell(fdt, stk_offset, "reg", i))); + _FDT((fdt_setprop_cell(fdt, stk_offset, "ibm,phb-index", phb->phb_id))); + } + + return 0; +} + +static Property pnv_pec_properties[] = { + DEFINE_PROP_UINT32("index", PnvPhb4PecState, index, 0), + DEFINE_PROP_UINT32("num-stacks", PnvPhb4PecState, num_stacks, 0), + DEFINE_PROP_UINT32("chip-id", PnvPhb4PecState, chip_id, 0), + DEFINE_PROP_LINK("system-memory", PnvPhb4PecState, system_memory, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static uint32_t pnv_pec_xscom_pci_base(PnvPhb4PecState *pec) +{ + return PNV9_XSCOM_PEC_PCI_BASE + 0x1000000 * pec->index; +} + +static uint32_t pnv_pec_xscom_nest_base(PnvPhb4PecState *pec) +{ + return PNV9_XSCOM_PEC_NEST_BASE + 0x400 * pec->index; +} + +static void pnv_pec_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_CLASS(klass); + static const char compat[] = "ibm,power9-pbcq"; + static const char stk_compat[] = "ibm,power9-phb-stack"; + + xdc->dt_xscom = pnv_pec_dt_xscom; + + dc->realize = pnv_pec_realize; + device_class_set_props(dc, pnv_pec_properties); + dc->user_creatable = false; + + pecc->xscom_nest_base = pnv_pec_xscom_nest_base; + pecc->xscom_pci_base = pnv_pec_xscom_pci_base; + pecc->xscom_nest_size = PNV9_XSCOM_PEC_NEST_SIZE; + pecc->xscom_pci_size = PNV9_XSCOM_PEC_PCI_SIZE; + pecc->compat = compat; + pecc->compat_size = sizeof(compat); + pecc->stk_compat = stk_compat; + pecc->stk_compat_size = sizeof(stk_compat); +} + +static const TypeInfo pnv_pec_type_info = { + .name = TYPE_PNV_PHB4_PEC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvPhb4PecState), + .instance_init = pnv_pec_instance_init, + .class_init = pnv_pec_class_init, + .class_size = sizeof(PnvPhb4PecClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_pec_stk_instance_init(Object *obj) +{ + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(obj); + + object_initialize_child(obj, "phb", &stack->phb, TYPE_PNV_PHB4); +} + +static void pnv_pec_stk_realize(DeviceState *dev, Error **errp) +{ + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(dev); + PnvPhb4PecState *pec = stack->pec; + char name[64]; + + assert(pec); + + /* Initialize the XSCOM regions for the stack registers */ + snprintf(name, sizeof(name), "xscom-pec-%d.%d-nest-stack-%d", + pec->chip_id, pec->index, stack->stack_no); + pnv_xscom_region_init(&stack->nest_regs_mr, OBJECT(stack), + &pnv_pec_stk_nest_xscom_ops, stack, name, + PHB4_PEC_NEST_STK_REGS_COUNT); + + snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci-stack-%d", + pec->chip_id, pec->index, stack->stack_no); + pnv_xscom_region_init(&stack->pci_regs_mr, OBJECT(stack), + &pnv_pec_stk_pci_xscom_ops, stack, name, + PHB4_PEC_PCI_STK_REGS_COUNT); + + /* PHB pass-through */ + snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci-stack-%d-phb", + pec->chip_id, pec->index, stack->stack_no); + pnv_xscom_region_init(&stack->phb_regs_mr, OBJECT(&stack->phb), + &pnv_phb4_xscom_ops, &stack->phb, name, 0x40); + + /* + * Let the machine/chip realize the PHB object to customize more + * easily some fields + */ +} + +static Property pnv_pec_stk_properties[] = { + DEFINE_PROP_UINT32("stack-no", PnvPhb4PecStack, stack_no, 0), + DEFINE_PROP_LINK("pec", PnvPhb4PecStack, pec, TYPE_PNV_PHB4_PEC, + PnvPhb4PecState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_pec_stk_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, pnv_pec_stk_properties); + dc->realize = pnv_pec_stk_realize; + dc->user_creatable = false; + + /* TODO: reset regs ? */ +} + +static const TypeInfo pnv_pec_stk_type_info = { + .name = TYPE_PNV_PHB4_PEC_STACK, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvPhb4PecStack), + .instance_init = pnv_pec_stk_instance_init, + .class_init = pnv_pec_stk_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_pec_register_types(void) +{ + type_register_static(&pnv_pec_type_info); + type_register_static(&pnv_pec_stk_type_info); +} + +type_init(pnv_pec_register_types); diff --git a/hw/pci-host/ppce500.c b/hw/pci-host/ppce500.c new file mode 100644 index 000000000..89c1b53dd --- /dev/null +++ b/hw/pci-host/ppce500.c @@ -0,0 +1,547 @@ +/* + * QEMU PowerPC E500 embedded processors pci controller emulation + * + * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Yu Liu, + * + * This file is derived from hw/ppc4xx_pci.c, + * the copyright for that material belongs to the original owners. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/ppc/e500-ccsr.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "qemu/bswap.h" +#include "qemu/module.h" +#include "hw/pci-host/ppce500.h" +#include "qom/object.h" + +#ifdef DEBUG_PCI +#define pci_debug(fmt, ...) fprintf(stderr, fmt, ## __VA_ARGS__) +#else +#define pci_debug(fmt, ...) +#endif + +#define PCIE500_CFGADDR 0x0 +#define PCIE500_CFGDATA 0x4 +#define PCIE500_REG_BASE 0xC00 +#define PCIE500_ALL_SIZE 0x1000 +#define PCIE500_REG_SIZE (PCIE500_ALL_SIZE - PCIE500_REG_BASE) + +#define PCIE500_PCI_IOLEN 0x10000ULL + +#define PPCE500_PCI_CONFIG_ADDR 0x0 +#define PPCE500_PCI_CONFIG_DATA 0x4 +#define PPCE500_PCI_INTACK 0x8 + +#define PPCE500_PCI_OW1 (0xC20 - PCIE500_REG_BASE) +#define PPCE500_PCI_OW2 (0xC40 - PCIE500_REG_BASE) +#define PPCE500_PCI_OW3 (0xC60 - PCIE500_REG_BASE) +#define PPCE500_PCI_OW4 (0xC80 - PCIE500_REG_BASE) +#define PPCE500_PCI_IW3 (0xDA0 - PCIE500_REG_BASE) +#define PPCE500_PCI_IW2 (0xDC0 - PCIE500_REG_BASE) +#define PPCE500_PCI_IW1 (0xDE0 - PCIE500_REG_BASE) + +#define PPCE500_PCI_GASKET_TIMR (0xE20 - PCIE500_REG_BASE) + +#define PCI_POTAR 0x0 +#define PCI_POTEAR 0x4 +#define PCI_POWBAR 0x8 +#define PCI_POWAR 0x10 + +#define PCI_PITAR 0x0 +#define PCI_PIWBAR 0x8 +#define PCI_PIWBEAR 0xC +#define PCI_PIWAR 0x10 + +#define PPCE500_PCI_NR_POBS 5 +#define PPCE500_PCI_NR_PIBS 3 + +#define PIWAR_EN 0x80000000 /* Enable */ +#define PIWAR_PF 0x20000000 /* prefetch */ +#define PIWAR_TGI_LOCAL 0x00f00000 /* target - local memory */ +#define PIWAR_READ_SNOOP 0x00050000 +#define PIWAR_WRITE_SNOOP 0x00005000 +#define PIWAR_SZ_MASK 0x0000003f + +struct pci_outbound { + uint32_t potar; + uint32_t potear; + uint32_t powbar; + uint32_t powar; + MemoryRegion mem; +}; + +struct pci_inbound { + uint32_t pitar; + uint32_t piwbar; + uint32_t piwbear; + uint32_t piwar; + MemoryRegion mem; +}; + +#define TYPE_PPC_E500_PCI_HOST_BRIDGE "e500-pcihost" + +OBJECT_DECLARE_SIMPLE_TYPE(PPCE500PCIState, PPC_E500_PCI_HOST_BRIDGE) + +struct PPCE500PCIState { + PCIHostState parent_obj; + + struct pci_outbound pob[PPCE500_PCI_NR_POBS]; + struct pci_inbound pib[PPCE500_PCI_NR_PIBS]; + uint32_t gasket_time; + qemu_irq irq[PCI_NUM_PINS]; + uint32_t irq_num[PCI_NUM_PINS]; + uint32_t first_slot; + uint32_t first_pin_irq; + AddressSpace bm_as; + MemoryRegion bm; + /* mmio maps */ + MemoryRegion container; + MemoryRegion iomem; + MemoryRegion pio; + MemoryRegion busmem; +}; + +#define TYPE_PPC_E500_PCI_BRIDGE "e500-host-bridge" +OBJECT_DECLARE_SIMPLE_TYPE(PPCE500PCIBridgeState, PPC_E500_PCI_BRIDGE) + +struct PPCE500PCIBridgeState { + /*< private >*/ + PCIDevice parent; + /*< public >*/ + + MemoryRegion bar0; +}; + + +static uint64_t pci_reg_read4(void *opaque, hwaddr addr, + unsigned size) +{ + PPCE500PCIState *pci = opaque; + unsigned long win; + uint32_t value = 0; + int idx; + + win = addr & 0xfe0; + + switch (win) { + case PPCE500_PCI_OW1: + case PPCE500_PCI_OW2: + case PPCE500_PCI_OW3: + case PPCE500_PCI_OW4: + idx = (addr >> 5) & 0x7; + switch (addr & 0x1F) { + case PCI_POTAR: + value = pci->pob[idx].potar; + break; + case PCI_POTEAR: + value = pci->pob[idx].potear; + break; + case PCI_POWBAR: + value = pci->pob[idx].powbar; + break; + case PCI_POWAR: + value = pci->pob[idx].powar; + break; + default: + break; + } + break; + + case PPCE500_PCI_IW3: + case PPCE500_PCI_IW2: + case PPCE500_PCI_IW1: + idx = ((addr >> 5) & 0x3) - 1; + switch (addr & 0x1F) { + case PCI_PITAR: + value = pci->pib[idx].pitar; + break; + case PCI_PIWBAR: + value = pci->pib[idx].piwbar; + break; + case PCI_PIWBEAR: + value = pci->pib[idx].piwbear; + break; + case PCI_PIWAR: + value = pci->pib[idx].piwar; + break; + default: + break; + }; + break; + + case PPCE500_PCI_GASKET_TIMR: + value = pci->gasket_time; + break; + + default: + break; + } + + pci_debug("%s: win:%lx(addr:" TARGET_FMT_plx ") -> value:%x\n", __func__, + win, addr, value); + return value; +} + +/* DMA mapping */ +static void e500_update_piw(PPCE500PCIState *pci, int idx) +{ + uint64_t tar = ((uint64_t)pci->pib[idx].pitar) << 12; + uint64_t wbar = ((uint64_t)pci->pib[idx].piwbar) << 12; + uint64_t war = pci->pib[idx].piwar; + uint64_t size = 2ULL << (war & PIWAR_SZ_MASK); + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *mem = &pci->pib[idx].mem; + MemoryRegion *bm = &pci->bm; + char *name; + + if (memory_region_is_mapped(mem)) { + /* Before we modify anything, unmap and destroy the region */ + memory_region_del_subregion(bm, mem); + object_unparent(OBJECT(mem)); + } + + if (!(war & PIWAR_EN)) { + /* Not enabled, nothing to do */ + return; + } + + name = g_strdup_printf("PCI Inbound Window %d", idx); + memory_region_init_alias(mem, OBJECT(pci), name, address_space_mem, tar, + size); + memory_region_add_subregion_overlap(bm, wbar, mem, -1); + g_free(name); + + pci_debug("%s: Added window of size=%#lx from PCI=%#lx to CPU=%#lx\n", + __func__, size, wbar, tar); +} + +/* BAR mapping */ +static void e500_update_pow(PPCE500PCIState *pci, int idx) +{ + uint64_t tar = ((uint64_t)pci->pob[idx].potar) << 12; + uint64_t wbar = ((uint64_t)pci->pob[idx].powbar) << 12; + uint64_t war = pci->pob[idx].powar; + uint64_t size = 2ULL << (war & PIWAR_SZ_MASK); + MemoryRegion *mem = &pci->pob[idx].mem; + MemoryRegion *address_space_mem = get_system_memory(); + char *name; + + if (memory_region_is_mapped(mem)) { + /* Before we modify anything, unmap and destroy the region */ + memory_region_del_subregion(address_space_mem, mem); + object_unparent(OBJECT(mem)); + } + + if (!(war & PIWAR_EN)) { + /* Not enabled, nothing to do */ + return; + } + + name = g_strdup_printf("PCI Outbound Window %d", idx); + memory_region_init_alias(mem, OBJECT(pci), name, &pci->busmem, tar, + size); + memory_region_add_subregion(address_space_mem, wbar, mem); + g_free(name); + + pci_debug("%s: Added window of size=%#lx from CPU=%#lx to PCI=%#lx\n", + __func__, size, wbar, tar); +} + +static void pci_reg_write4(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + PPCE500PCIState *pci = opaque; + unsigned long win; + int idx; + + win = addr & 0xfe0; + + pci_debug("%s: value:%x -> win:%lx(addr:" TARGET_FMT_plx ")\n", + __func__, (unsigned)value, win, addr); + + switch (win) { + case PPCE500_PCI_OW1: + case PPCE500_PCI_OW2: + case PPCE500_PCI_OW3: + case PPCE500_PCI_OW4: + idx = (addr >> 5) & 0x7; + switch (addr & 0x1F) { + case PCI_POTAR: + pci->pob[idx].potar = value; + e500_update_pow(pci, idx); + break; + case PCI_POTEAR: + pci->pob[idx].potear = value; + e500_update_pow(pci, idx); + break; + case PCI_POWBAR: + pci->pob[idx].powbar = value; + e500_update_pow(pci, idx); + break; + case PCI_POWAR: + pci->pob[idx].powar = value; + e500_update_pow(pci, idx); + break; + default: + break; + }; + break; + + case PPCE500_PCI_IW3: + case PPCE500_PCI_IW2: + case PPCE500_PCI_IW1: + idx = ((addr >> 5) & 0x3) - 1; + switch (addr & 0x1F) { + case PCI_PITAR: + pci->pib[idx].pitar = value; + e500_update_piw(pci, idx); + break; + case PCI_PIWBAR: + pci->pib[idx].piwbar = value; + e500_update_piw(pci, idx); + break; + case PCI_PIWBEAR: + pci->pib[idx].piwbear = value; + e500_update_piw(pci, idx); + break; + case PCI_PIWAR: + pci->pib[idx].piwar = value; + e500_update_piw(pci, idx); + break; + default: + break; + }; + break; + + case PPCE500_PCI_GASKET_TIMR: + pci->gasket_time = value; + break; + + default: + break; + }; +} + +static const MemoryRegionOps e500_pci_reg_ops = { + .read = pci_reg_read4, + .write = pci_reg_write4, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static int mpc85xx_pci_map_irq(PCIDevice *pci_dev, int pin) +{ + int devno = PCI_SLOT(pci_dev->devfn); + int ret; + + ret = ppce500_pci_map_irq_slot(devno, pin); + + pci_debug("%s: devfn %x irq %d -> %d devno:%x\n", __func__, + pci_dev->devfn, pin, ret, devno); + + return ret; +} + +static void mpc85xx_pci_set_irq(void *opaque, int pin, int level) +{ + PPCE500PCIState *s = opaque; + qemu_irq *pic = s->irq; + + pci_debug("%s: PCI irq %d, level:%d\n", __func__, pin , level); + + qemu_set_irq(pic[pin], level); +} + +static PCIINTxRoute e500_route_intx_pin_to_irq(void *opaque, int pin) +{ + PCIINTxRoute route; + PPCE500PCIState *s = opaque; + + route.mode = PCI_INTX_ENABLED; + route.irq = s->irq_num[pin]; + + pci_debug("%s: PCI irq-pin = %d, irq_num= %d\n", __func__, pin, route.irq); + return route; +} + +static const VMStateDescription vmstate_pci_outbound = { + .name = "pci_outbound", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(potar, struct pci_outbound), + VMSTATE_UINT32(potear, struct pci_outbound), + VMSTATE_UINT32(powbar, struct pci_outbound), + VMSTATE_UINT32(powar, struct pci_outbound), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pci_inbound = { + .name = "pci_inbound", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(pitar, struct pci_inbound), + VMSTATE_UINT32(piwbar, struct pci_inbound), + VMSTATE_UINT32(piwbear, struct pci_inbound), + VMSTATE_UINT32(piwar, struct pci_inbound), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ppce500_pci = { + .name = "ppce500_pci", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(pob, PPCE500PCIState, PPCE500_PCI_NR_POBS, 1, + vmstate_pci_outbound, struct pci_outbound), + VMSTATE_STRUCT_ARRAY(pib, PPCE500PCIState, PPCE500_PCI_NR_PIBS, 1, + vmstate_pci_inbound, struct pci_inbound), + VMSTATE_UINT32(gasket_time, PPCE500PCIState), + VMSTATE_END_OF_LIST() + } +}; + + +static void e500_pcihost_bridge_realize(PCIDevice *d, Error **errp) +{ + PPCE500PCIBridgeState *b = PPC_E500_PCI_BRIDGE(d); + PPCE500CCSRState *ccsr = CCSR(container_get(qdev_get_machine(), + "/e500-ccsr")); + + memory_region_init_alias(&b->bar0, OBJECT(ccsr), "e500-pci-bar0", &ccsr->ccsr_space, + 0, int128_get64(ccsr->ccsr_space.size)); + pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &b->bar0); +} + +static AddressSpace *e500_pcihost_set_iommu(PCIBus *bus, void *opaque, + int devfn) +{ + PPCE500PCIState *s = opaque; + + return &s->bm_as; +} + +static void e500_pcihost_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + PCIHostState *h; + PPCE500PCIState *s; + PCIBus *b; + int i; + + h = PCI_HOST_BRIDGE(dev); + s = PPC_E500_PCI_HOST_BRIDGE(dev); + + for (i = 0; i < ARRAY_SIZE(s->irq); i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } + + for (i = 0; i < PCI_NUM_PINS; i++) { + s->irq_num[i] = s->first_pin_irq + i; + } + + memory_region_init(&s->pio, OBJECT(s), "pci-pio", PCIE500_PCI_IOLEN); + memory_region_init(&s->busmem, OBJECT(s), "pci bus memory", UINT64_MAX); + + /* PIO lives at the bottom of our bus space */ + memory_region_add_subregion_overlap(&s->busmem, 0, &s->pio, -2); + + b = pci_register_root_bus(dev, NULL, mpc85xx_pci_set_irq, + mpc85xx_pci_map_irq, s, &s->busmem, &s->pio, + PCI_DEVFN(s->first_slot, 0), 4, TYPE_PCI_BUS); + h->bus = b; + + /* Set up PCI view of memory */ + memory_region_init(&s->bm, OBJECT(s), "bm-e500", UINT64_MAX); + memory_region_add_subregion(&s->bm, 0x0, &s->busmem); + address_space_init(&s->bm_as, &s->bm, "pci-bm"); + pci_setup_iommu(b, e500_pcihost_set_iommu, s); + + pci_create_simple(b, 0, "e500-host-bridge"); + + memory_region_init(&s->container, OBJECT(h), "pci-container", PCIE500_ALL_SIZE); + memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_be_ops, h, + "pci-conf-idx", 4); + memory_region_init_io(&h->data_mem, OBJECT(h), &pci_host_data_le_ops, h, + "pci-conf-data", 4); + memory_region_init_io(&s->iomem, OBJECT(s), &e500_pci_reg_ops, s, + "pci.reg", PCIE500_REG_SIZE); + memory_region_add_subregion(&s->container, PCIE500_CFGADDR, &h->conf_mem); + memory_region_add_subregion(&s->container, PCIE500_CFGDATA, &h->data_mem); + memory_region_add_subregion(&s->container, PCIE500_REG_BASE, &s->iomem); + sysbus_init_mmio(sbd, &s->container); + pci_bus_set_route_irq_fn(b, e500_route_intx_pin_to_irq); +} + +static void e500_host_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = e500_pcihost_bridge_realize; + k->vendor_id = PCI_VENDOR_ID_FREESCALE; + k->device_id = PCI_DEVICE_ID_MPC8533E; + k->class_id = PCI_CLASS_PROCESSOR_POWERPC; + dc->desc = "Host bridge"; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo e500_host_bridge_info = { + .name = TYPE_PPC_E500_PCI_BRIDGE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PPCE500PCIBridgeState), + .class_init = e500_host_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static Property pcihost_properties[] = { + DEFINE_PROP_UINT32("first_slot", PPCE500PCIState, first_slot, 0x11), + DEFINE_PROP_UINT32("first_pin_irq", PPCE500PCIState, first_pin_irq, 0x1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void e500_pcihost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = e500_pcihost_realize; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + device_class_set_props(dc, pcihost_properties); + dc->vmsd = &vmstate_ppce500_pci; +} + +static const TypeInfo e500_pcihost_info = { + .name = TYPE_PPC_E500_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(PPCE500PCIState), + .class_init = e500_pcihost_class_init, +}; + +static void e500_pci_register_types(void) +{ + type_register_static(&e500_pcihost_info); + type_register_static(&e500_host_bridge_info); +} + +type_init(e500_pci_register_types) diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c new file mode 100644 index 000000000..ab5a47aff --- /dev/null +++ b/hw/pci-host/q35.c @@ -0,0 +1,721 @@ +/* + * QEMU MCH/ICH9 PCI Bridge Emulation + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2009, 2010, 2011 + * Isaku Yamahata + * VA Linux Systems Japan K.K. + * Copyright (C) 2012 Jason Baron + * + * This is based on piix.c, but heavily modified. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/i386/pc.h" +#include "hw/pci-host/q35.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/module.h" + +/**************************************************************************** + * Q35 host + */ + +#define Q35_PCI_HOST_HOLE64_SIZE_DEFAULT (1ULL << 35) + +static void q35_host_realize(DeviceState *dev, Error **errp) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + Q35PCIHost *s = Q35_HOST_DEVICE(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, &pci->conf_mem); + sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_ADDR, 4); + + sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, &pci->data_mem); + sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, 4); + + /* register q35 0xcf8 port as coalesced pio */ + memory_region_set_flush_coalesced(&pci->data_mem); + memory_region_add_coalescing(&pci->conf_mem, 0, 4); + + pci->bus = pci_root_bus_new(DEVICE(s), "pcie.0", + s->mch.pci_address_space, + s->mch.address_space_io, + 0, TYPE_PCIE_BUS); + PC_MACHINE(qdev_get_machine())->bus = pci->bus; + pci->bypass_iommu = + PC_MACHINE(qdev_get_machine())->default_bus_bypass_iommu; + qdev_realize(DEVICE(&s->mch), BUS(pci->bus), &error_fatal); +} + +static const char *q35_host_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + Q35PCIHost *s = Q35_HOST_DEVICE(host_bridge); + + /* For backwards compat with old device paths */ + if (s->mch.short_root_bus) { + return "0000"; + } + return "0000:00"; +} + +static void q35_host_get_pci_hole_start(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + Q35PCIHost *s = Q35_HOST_DEVICE(obj); + uint64_t val64; + uint32_t value; + + val64 = range_is_empty(&s->mch.pci_hole) + ? 0 : range_lob(&s->mch.pci_hole); + value = val64; + assert(value == val64); + visit_type_uint32(v, name, &value, errp); +} + +static void q35_host_get_pci_hole_end(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + Q35PCIHost *s = Q35_HOST_DEVICE(obj); + uint64_t val64; + uint32_t value; + + val64 = range_is_empty(&s->mch.pci_hole) + ? 0 : range_upb(&s->mch.pci_hole) + 1; + value = val64; + assert(value == val64); + visit_type_uint32(v, name, &value, errp); +} + +/* + * The 64bit PCI hole start is set by the Guest firmware + * as the address of the first 64bit PCI MEM resource. + * If no PCI device has resources on the 64bit area, + * the 64bit PCI hole will start after "over 4G RAM" and the + * reserved space for memory hotplug if any. + */ +static uint64_t q35_host_get_pci_hole64_start_value(Object *obj) +{ + PCIHostState *h = PCI_HOST_BRIDGE(obj); + Q35PCIHost *s = Q35_HOST_DEVICE(obj); + Range w64; + uint64_t value; + + pci_bus_get_w64_range(h->bus, &w64); + value = range_is_empty(&w64) ? 0 : range_lob(&w64); + if (!value && s->pci_hole64_fix) { + value = pc_pci_hole64_start(); + } + return value; +} + +static void q35_host_get_pci_hole64_start(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + uint64_t hole64_start = q35_host_get_pci_hole64_start_value(obj); + + visit_type_uint64(v, name, &hole64_start, errp); +} + +/* + * The 64bit PCI hole end is set by the Guest firmware + * as the address of the last 64bit PCI MEM resource. + * Then it is expanded to the PCI_HOST_PROP_PCI_HOLE64_SIZE + * that can be configured by the user. + */ +static void q35_host_get_pci_hole64_end(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + PCIHostState *h = PCI_HOST_BRIDGE(obj); + Q35PCIHost *s = Q35_HOST_DEVICE(obj); + uint64_t hole64_start = q35_host_get_pci_hole64_start_value(obj); + Range w64; + uint64_t value, hole64_end; + + pci_bus_get_w64_range(h->bus, &w64); + value = range_is_empty(&w64) ? 0 : range_upb(&w64) + 1; + hole64_end = ROUND_UP(hole64_start + s->mch.pci_hole64_size, 1ULL << 30); + if (s->pci_hole64_fix && value < hole64_end) { + value = hole64_end; + } + visit_type_uint64(v, name, &value, errp); +} + +/* + * NOTE: setting defaults for the mch.* fields in this table + * doesn't work, because mch is a separate QOM object that is + * zeroed by the object_initialize(&s->mch, ...) call inside + * q35_host_initfn(). The default values for those + * properties need to be initialized manually by + * q35_host_initfn() after the object_initialize() call. + */ +static Property q35_host_props[] = { + DEFINE_PROP_UINT64(PCIE_HOST_MCFG_BASE, Q35PCIHost, parent_obj.base_addr, + MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT), + DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, Q35PCIHost, + mch.pci_hole64_size, Q35_PCI_HOST_HOLE64_SIZE_DEFAULT), + DEFINE_PROP_UINT32("short_root_bus", Q35PCIHost, mch.short_root_bus, 0), + DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MEM_SIZE, Q35PCIHost, + mch.below_4g_mem_size, 0), + DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MEM_SIZE, Q35PCIHost, + mch.above_4g_mem_size, 0), + DEFINE_PROP_BOOL("x-pci-hole64-fix", Q35PCIHost, pci_hole64_fix, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void q35_host_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + + hc->root_bus_path = q35_host_root_bus_path; + dc->realize = q35_host_realize; + device_class_set_props(dc, q35_host_props); + /* Reason: needs to be wired up by pc_q35_init */ + dc->user_creatable = false; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->fw_name = "pci"; +} + +static void q35_host_initfn(Object *obj) +{ + Q35PCIHost *s = Q35_HOST_DEVICE(obj); + PCIHostState *phb = PCI_HOST_BRIDGE(obj); + PCIExpressHost *pehb = PCIE_HOST_BRIDGE(obj); + + memory_region_init_io(&phb->conf_mem, obj, &pci_host_conf_le_ops, phb, + "pci-conf-idx", 4); + memory_region_init_io(&phb->data_mem, obj, &pci_host_data_le_ops, phb, + "pci-conf-data", 4); + + object_initialize_child(OBJECT(s), "mch", &s->mch, TYPE_MCH_PCI_DEVICE); + qdev_prop_set_int32(DEVICE(&s->mch), "addr", PCI_DEVFN(0, 0)); + qdev_prop_set_bit(DEVICE(&s->mch), "multifunction", false); + /* mch's object_initialize resets the default value, set it again */ + qdev_prop_set_uint64(DEVICE(s), PCI_HOST_PROP_PCI_HOLE64_SIZE, + Q35_PCI_HOST_HOLE64_SIZE_DEFAULT); + object_property_add(obj, PCI_HOST_PROP_PCI_HOLE_START, "uint32", + q35_host_get_pci_hole_start, + NULL, NULL, NULL); + + object_property_add(obj, PCI_HOST_PROP_PCI_HOLE_END, "uint32", + q35_host_get_pci_hole_end, + NULL, NULL, NULL); + + object_property_add(obj, PCI_HOST_PROP_PCI_HOLE64_START, "uint64", + q35_host_get_pci_hole64_start, + NULL, NULL, NULL); + + object_property_add(obj, PCI_HOST_PROP_PCI_HOLE64_END, "uint64", + q35_host_get_pci_hole64_end, + NULL, NULL, NULL); + + object_property_add_uint64_ptr(obj, PCIE_HOST_MCFG_SIZE, + &pehb->size, OBJ_PROP_FLAG_READ); + + object_property_add_link(obj, MCH_HOST_PROP_RAM_MEM, TYPE_MEMORY_REGION, + (Object **) &s->mch.ram_memory, + qdev_prop_allow_set_link_before_realize, 0); + + object_property_add_link(obj, MCH_HOST_PROP_PCI_MEM, TYPE_MEMORY_REGION, + (Object **) &s->mch.pci_address_space, + qdev_prop_allow_set_link_before_realize, 0); + + object_property_add_link(obj, MCH_HOST_PROP_SYSTEM_MEM, TYPE_MEMORY_REGION, + (Object **) &s->mch.system_memory, + qdev_prop_allow_set_link_before_realize, 0); + + object_property_add_link(obj, MCH_HOST_PROP_IO_MEM, TYPE_MEMORY_REGION, + (Object **) &s->mch.address_space_io, + qdev_prop_allow_set_link_before_realize, 0); +} + +static const TypeInfo q35_host_info = { + .name = TYPE_Q35_HOST_DEVICE, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_size = sizeof(Q35PCIHost), + .instance_init = q35_host_initfn, + .class_init = q35_host_class_init, +}; + +/**************************************************************************** + * MCH D0:F0 + */ + +static uint64_t blackhole_read(void *ptr, hwaddr reg, unsigned size) +{ + return 0xffffffff; +} + +static void blackhole_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + /* nothing */ +} + +static const MemoryRegionOps blackhole_ops = { + .read = blackhole_read, + .write = blackhole_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* PCIe MMCFG */ +static void mch_update_pciexbar(MCHPCIState *mch) +{ + PCIDevice *pci_dev = PCI_DEVICE(mch); + BusState *bus = qdev_get_parent_bus(DEVICE(mch)); + PCIExpressHost *pehb = PCIE_HOST_BRIDGE(bus->parent); + + uint64_t pciexbar; + int enable; + uint64_t addr; + uint64_t addr_mask; + uint32_t length; + + pciexbar = pci_get_quad(pci_dev->config + MCH_HOST_BRIDGE_PCIEXBAR); + enable = pciexbar & MCH_HOST_BRIDGE_PCIEXBAREN; + addr_mask = MCH_HOST_BRIDGE_PCIEXBAR_ADMSK; + switch (pciexbar & MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_MASK) { + case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_256M: + length = 256 * 1024 * 1024; + break; + case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_128M: + length = 128 * 1024 * 1024; + addr_mask |= MCH_HOST_BRIDGE_PCIEXBAR_128ADMSK | + MCH_HOST_BRIDGE_PCIEXBAR_64ADMSK; + break; + case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_64M: + length = 64 * 1024 * 1024; + addr_mask |= MCH_HOST_BRIDGE_PCIEXBAR_64ADMSK; + break; + case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_RVD: + qemu_log_mask(LOG_GUEST_ERROR, "Q35: Reserved PCIEXBAR LENGTH\n"); + return; + default: + abort(); + } + addr = pciexbar & addr_mask; + pcie_host_mmcfg_update(pehb, enable, addr, length); +} + +/* PAM */ +static void mch_update_pam(MCHPCIState *mch) +{ + PCIDevice *pd = PCI_DEVICE(mch); + int i; + + memory_region_transaction_begin(); + for (i = 0; i < 13; i++) { + pam_update(&mch->pam_regions[i], i, + pd->config[MCH_HOST_BRIDGE_PAM0 + DIV_ROUND_UP(i, 2)]); + } + memory_region_transaction_commit(); +} + +/* SMRAM */ +static void mch_update_smram(MCHPCIState *mch) +{ + PCIDevice *pd = PCI_DEVICE(mch); + bool h_smrame = (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME); + uint32_t tseg_size; + + /* implement SMRAM.D_LCK */ + if (pd->config[MCH_HOST_BRIDGE_SMRAM] & MCH_HOST_BRIDGE_SMRAM_D_LCK) { + pd->config[MCH_HOST_BRIDGE_SMRAM] &= ~MCH_HOST_BRIDGE_SMRAM_D_OPEN; + pd->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK_LCK; + pd->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK_LCK; + } + + memory_region_transaction_begin(); + + if (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_D_OPEN) { + /* Hide (!) low SMRAM if H_SMRAME = 1 */ + memory_region_set_enabled(&mch->smram_region, h_smrame); + /* Show high SMRAM if H_SMRAME = 1 */ + memory_region_set_enabled(&mch->open_high_smram, h_smrame); + } else { + /* Hide high SMRAM and low SMRAM */ + memory_region_set_enabled(&mch->smram_region, true); + memory_region_set_enabled(&mch->open_high_smram, false); + } + + if (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_G_SMRAME) { + memory_region_set_enabled(&mch->low_smram, !h_smrame); + memory_region_set_enabled(&mch->high_smram, h_smrame); + } else { + memory_region_set_enabled(&mch->low_smram, false); + memory_region_set_enabled(&mch->high_smram, false); + } + + if (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_T_EN) { + switch (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & + MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK) { + case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_1MB: + tseg_size = 1024 * 1024; + break; + case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_2MB: + tseg_size = 1024 * 1024 * 2; + break; + case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_8MB: + tseg_size = 1024 * 1024 * 8; + break; + default: + tseg_size = 1024 * 1024 * (uint32_t)mch->ext_tseg_mbytes; + break; + } + } else { + tseg_size = 0; + } + memory_region_del_subregion(mch->system_memory, &mch->tseg_blackhole); + memory_region_set_enabled(&mch->tseg_blackhole, tseg_size); + memory_region_set_size(&mch->tseg_blackhole, tseg_size); + memory_region_add_subregion_overlap(mch->system_memory, + mch->below_4g_mem_size - tseg_size, + &mch->tseg_blackhole, 1); + + memory_region_set_enabled(&mch->tseg_window, tseg_size); + memory_region_set_size(&mch->tseg_window, tseg_size); + memory_region_set_address(&mch->tseg_window, + mch->below_4g_mem_size - tseg_size); + memory_region_set_alias_offset(&mch->tseg_window, + mch->below_4g_mem_size - tseg_size); + + memory_region_transaction_commit(); +} + +static void mch_update_ext_tseg_mbytes(MCHPCIState *mch) +{ + PCIDevice *pd = PCI_DEVICE(mch); + uint8_t *reg = pd->config + MCH_HOST_BRIDGE_EXT_TSEG_MBYTES; + + if (mch->ext_tseg_mbytes > 0 && + pci_get_word(reg) == MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY) { + pci_set_word(reg, mch->ext_tseg_mbytes); + } +} + +static void mch_update_smbase_smram(MCHPCIState *mch) +{ + PCIDevice *pd = PCI_DEVICE(mch); + uint8_t *reg = pd->config + MCH_HOST_BRIDGE_F_SMBASE; + bool lck; + + if (!mch->has_smram_at_smbase) { + return; + } + + if (*reg == MCH_HOST_BRIDGE_F_SMBASE_QUERY) { + pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] = + MCH_HOST_BRIDGE_F_SMBASE_LCK; + *reg = MCH_HOST_BRIDGE_F_SMBASE_IN_RAM; + return; + } + + /* + * default/reset state, discard written value + * which will disable SMRAM balackhole at SMBASE + */ + if (pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] == 0xff) { + *reg = 0x00; + } + + memory_region_transaction_begin(); + if (*reg & MCH_HOST_BRIDGE_F_SMBASE_LCK) { + /* disable all writes */ + pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] &= + ~MCH_HOST_BRIDGE_F_SMBASE_LCK; + *reg = MCH_HOST_BRIDGE_F_SMBASE_LCK; + lck = true; + } else { + lck = false; + } + memory_region_set_enabled(&mch->smbase_blackhole, lck); + memory_region_set_enabled(&mch->smbase_window, lck); + memory_region_transaction_commit(); +} + +static void mch_write_config(PCIDevice *d, + uint32_t address, uint32_t val, int len) +{ + MCHPCIState *mch = MCH_PCI_DEVICE(d); + + pci_default_write_config(d, address, val, len); + + if (ranges_overlap(address, len, MCH_HOST_BRIDGE_PAM0, + MCH_HOST_BRIDGE_PAM_SIZE)) { + mch_update_pam(mch); + } + + if (ranges_overlap(address, len, MCH_HOST_BRIDGE_PCIEXBAR, + MCH_HOST_BRIDGE_PCIEXBAR_SIZE)) { + mch_update_pciexbar(mch); + } + + if (ranges_overlap(address, len, MCH_HOST_BRIDGE_SMRAM, + MCH_HOST_BRIDGE_SMRAM_SIZE)) { + mch_update_smram(mch); + } + + if (ranges_overlap(address, len, MCH_HOST_BRIDGE_EXT_TSEG_MBYTES, + MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_SIZE)) { + mch_update_ext_tseg_mbytes(mch); + } + + if (ranges_overlap(address, len, MCH_HOST_BRIDGE_F_SMBASE, 1)) { + mch_update_smbase_smram(mch); + } +} + +static void mch_update(MCHPCIState *mch) +{ + mch_update_pciexbar(mch); + mch_update_pam(mch); + mch_update_smram(mch); + mch_update_ext_tseg_mbytes(mch); + mch_update_smbase_smram(mch); + + /* + * pci hole goes from end-of-low-ram to io-apic. + * mmconfig will be excluded by the dsdt builder. + */ + range_set_bounds(&mch->pci_hole, + mch->below_4g_mem_size, + IO_APIC_DEFAULT_ADDRESS - 1); +} + +static int mch_post_load(void *opaque, int version_id) +{ + MCHPCIState *mch = opaque; + mch_update(mch); + return 0; +} + +static const VMStateDescription vmstate_mch = { + .name = "mch", + .version_id = 1, + .minimum_version_id = 1, + .post_load = mch_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, MCHPCIState), + /* Used to be smm_enabled, which was basically always zero because + * SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code. + */ + VMSTATE_UNUSED(1), + VMSTATE_END_OF_LIST() + } +}; + +static void mch_reset(DeviceState *qdev) +{ + PCIDevice *d = PCI_DEVICE(qdev); + MCHPCIState *mch = MCH_PCI_DEVICE(d); + + pci_set_quad(d->config + MCH_HOST_BRIDGE_PCIEXBAR, + MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT); + + d->config[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_DEFAULT; + d->config[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_DEFAULT; + d->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK; + d->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK; + + if (mch->ext_tseg_mbytes > 0) { + pci_set_word(d->config + MCH_HOST_BRIDGE_EXT_TSEG_MBYTES, + MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY); + } + + d->config[MCH_HOST_BRIDGE_F_SMBASE] = 0; + d->wmask[MCH_HOST_BRIDGE_F_SMBASE] = 0xff; + + mch_update(mch); +} + +static void mch_realize(PCIDevice *d, Error **errp) +{ + int i; + MCHPCIState *mch = MCH_PCI_DEVICE(d); + + if (mch->ext_tseg_mbytes > MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_MAX) { + error_setg(errp, "invalid extended-tseg-mbytes value: %" PRIu16, + mch->ext_tseg_mbytes); + return; + } + + /* setup pci memory mapping */ + pc_pci_as_mapping_init(OBJECT(mch), mch->system_memory, + mch->pci_address_space); + + /* if *disabled* show SMRAM to all CPUs */ + memory_region_init_alias(&mch->smram_region, OBJECT(mch), "smram-region", + mch->pci_address_space, MCH_HOST_BRIDGE_SMRAM_C_BASE, + MCH_HOST_BRIDGE_SMRAM_C_SIZE); + memory_region_add_subregion_overlap(mch->system_memory, MCH_HOST_BRIDGE_SMRAM_C_BASE, + &mch->smram_region, 1); + memory_region_set_enabled(&mch->smram_region, true); + + memory_region_init_alias(&mch->open_high_smram, OBJECT(mch), "smram-open-high", + mch->ram_memory, MCH_HOST_BRIDGE_SMRAM_C_BASE, + MCH_HOST_BRIDGE_SMRAM_C_SIZE); + memory_region_add_subregion_overlap(mch->system_memory, 0xfeda0000, + &mch->open_high_smram, 1); + memory_region_set_enabled(&mch->open_high_smram, false); + + /* smram, as seen by SMM CPUs */ + memory_region_init(&mch->smram, OBJECT(mch), "smram", 4 * GiB); + memory_region_set_enabled(&mch->smram, true); + memory_region_init_alias(&mch->low_smram, OBJECT(mch), "smram-low", + mch->ram_memory, MCH_HOST_BRIDGE_SMRAM_C_BASE, + MCH_HOST_BRIDGE_SMRAM_C_SIZE); + memory_region_set_enabled(&mch->low_smram, true); + memory_region_add_subregion(&mch->smram, MCH_HOST_BRIDGE_SMRAM_C_BASE, + &mch->low_smram); + memory_region_init_alias(&mch->high_smram, OBJECT(mch), "smram-high", + mch->ram_memory, MCH_HOST_BRIDGE_SMRAM_C_BASE, + MCH_HOST_BRIDGE_SMRAM_C_SIZE); + memory_region_set_enabled(&mch->high_smram, true); + memory_region_add_subregion(&mch->smram, 0xfeda0000, &mch->high_smram); + + memory_region_init_io(&mch->tseg_blackhole, OBJECT(mch), + &blackhole_ops, NULL, + "tseg-blackhole", 0); + memory_region_set_enabled(&mch->tseg_blackhole, false); + memory_region_add_subregion_overlap(mch->system_memory, + mch->below_4g_mem_size, + &mch->tseg_blackhole, 1); + + memory_region_init_alias(&mch->tseg_window, OBJECT(mch), "tseg-window", + mch->ram_memory, mch->below_4g_mem_size, 0); + memory_region_set_enabled(&mch->tseg_window, false); + memory_region_add_subregion(&mch->smram, mch->below_4g_mem_size, + &mch->tseg_window); + + /* + * This is not what hardware does, so it's QEMU specific hack. + * See commit message for details. + */ + memory_region_init_io(&mch->smbase_blackhole, OBJECT(mch), &blackhole_ops, + NULL, "smbase-blackhole", + MCH_HOST_BRIDGE_SMBASE_SIZE); + memory_region_set_enabled(&mch->smbase_blackhole, false); + memory_region_add_subregion_overlap(mch->system_memory, + MCH_HOST_BRIDGE_SMBASE_ADDR, + &mch->smbase_blackhole, 1); + + memory_region_init_alias(&mch->smbase_window, OBJECT(mch), + "smbase-window", mch->ram_memory, + MCH_HOST_BRIDGE_SMBASE_ADDR, + MCH_HOST_BRIDGE_SMBASE_SIZE); + memory_region_set_enabled(&mch->smbase_window, false); + memory_region_add_subregion(&mch->smram, MCH_HOST_BRIDGE_SMBASE_ADDR, + &mch->smbase_window); + + object_property_add_const_link(qdev_get_machine(), "smram", + OBJECT(&mch->smram)); + + init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, + mch->pci_address_space, &mch->pam_regions[0], + PAM_BIOS_BASE, PAM_BIOS_SIZE); + for (i = 0; i < ARRAY_SIZE(mch->pam_regions) - 1; ++i) { + init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, + mch->pci_address_space, &mch->pam_regions[i+1], + PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); + } +} + +uint64_t mch_mcfg_base(void) +{ + bool ambiguous; + Object *o = object_resolve_path_type("", TYPE_MCH_PCI_DEVICE, &ambiguous); + if (!o) { + return 0; + } + return MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT; +} + +static Property mch_props[] = { + DEFINE_PROP_UINT16("extended-tseg-mbytes", MCHPCIState, ext_tseg_mbytes, + 16), + DEFINE_PROP_BOOL("smbase-smram", MCHPCIState, has_smram_at_smbase, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mch_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = mch_realize; + k->config_write = mch_write_config; + dc->reset = mch_reset; + device_class_set_props(dc, mch_props); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->desc = "Host bridge"; + dc->vmsd = &vmstate_mch; + k->vendor_id = PCI_VENDOR_ID_INTEL; + /* + * The 'q35' machine type implements an Intel Series 3 chipset, + * of which there are several variants. The key difference between + * the 82P35 MCH ('p35') and 82Q35 GMCH ('q35') variants is that + * the latter has an integrated graphics adapter. QEMU does not + * implement integrated graphics, so uses the PCI ID for the 82P35 + * chipset. + */ + k->device_id = PCI_DEVICE_ID_INTEL_P35_MCH; + k->revision = MCH_HOST_BRIDGE_REVISION_DEFAULT; + k->class_id = PCI_CLASS_BRIDGE_HOST; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo mch_info = { + .name = TYPE_MCH_PCI_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(MCHPCIState), + .class_init = mch_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void q35_register(void) +{ + type_register_static(&mch_info); + type_register_static(&q35_host_info); +} + +type_init(q35_register); diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c new file mode 100644 index 000000000..6e514f75e --- /dev/null +++ b/hw/pci-host/raven.c @@ -0,0 +1,445 @@ +/* + * QEMU PREP PCI host + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2011-2013 Andreas Färber + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/units.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_host.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/intc/i8259.h" +#include "hw/irq.h" +#include "hw/loader.h" +#include "hw/or-irq.h" +#include "elf.h" +#include "qom/object.h" + +#define TYPE_RAVEN_PCI_DEVICE "raven" +#define TYPE_RAVEN_PCI_HOST_BRIDGE "raven-pcihost" + +OBJECT_DECLARE_SIMPLE_TYPE(RavenPCIState, RAVEN_PCI_DEVICE) + +struct RavenPCIState { + PCIDevice dev; + + uint32_t elf_machine; + char *bios_name; + MemoryRegion bios; +}; + +typedef struct PRePPCIState PREPPCIState; +DECLARE_INSTANCE_CHECKER(PREPPCIState, RAVEN_PCI_HOST_BRIDGE, + TYPE_RAVEN_PCI_HOST_BRIDGE) + +struct PRePPCIState { + PCIHostState parent_obj; + + qemu_or_irq *or_irq; + qemu_irq pci_irqs[PCI_NUM_PINS]; + PCIBus pci_bus; + AddressSpace pci_io_as; + MemoryRegion pci_io; + MemoryRegion pci_io_non_contiguous; + MemoryRegion pci_memory; + MemoryRegion pci_intack; + MemoryRegion bm; + MemoryRegion bm_ram_alias; + MemoryRegion bm_pci_memory_alias; + AddressSpace bm_as; + RavenPCIState pci_dev; + + int contiguous_map; + bool is_legacy_prep; +}; + +#define BIOS_SIZE (1 * MiB) + +#define PCI_IO_BASE_ADDR 0x80000000 /* Physical address on main bus */ + +static inline uint32_t raven_pci_io_config(hwaddr addr) +{ + int i; + + for (i = 0; i < 11; i++) { + if ((addr & (1 << (11 + i))) != 0) { + break; + } + } + return (addr & 0x7ff) | (i << 11); +} + +static void raven_pci_io_write(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + PREPPCIState *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s); + pci_data_write(phb->bus, raven_pci_io_config(addr), val, size); +} + +static uint64_t raven_pci_io_read(void *opaque, hwaddr addr, + unsigned int size) +{ + PREPPCIState *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s); + return pci_data_read(phb->bus, raven_pci_io_config(addr), size); +} + +static const MemoryRegionOps raven_pci_io_ops = { + .read = raven_pci_io_read, + .write = raven_pci_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t raven_intack_read(void *opaque, hwaddr addr, + unsigned int size) +{ + return pic_read_irq(isa_pic); +} + +static void raven_intack_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "%s not implemented\n", __func__); +} + +static const MemoryRegionOps raven_intack_ops = { + .read = raven_intack_read, + .write = raven_intack_write, + .valid = { + .max_access_size = 1, + }, +}; + +static inline hwaddr raven_io_address(PREPPCIState *s, + hwaddr addr) +{ + if (s->contiguous_map == 0) { + /* 64 KB contiguous space for IOs */ + addr &= 0xFFFF; + } else { + /* 8 MB non-contiguous space for IOs */ + addr = (addr & 0x1F) | ((addr & 0x007FFF000) >> 7); + } + + /* FIXME: handle endianness switch */ + + return addr; +} + +static uint64_t raven_io_read(void *opaque, hwaddr addr, + unsigned int size) +{ + PREPPCIState *s = opaque; + uint8_t buf[4]; + + addr = raven_io_address(s, addr); + address_space_read(&s->pci_io_as, addr + PCI_IO_BASE_ADDR, + MEMTXATTRS_UNSPECIFIED, buf, size); + + if (size == 1) { + return buf[0]; + } else if (size == 2) { + return lduw_le_p(buf); + } else if (size == 4) { + return ldl_le_p(buf); + } else { + g_assert_not_reached(); + } +} + +static void raven_io_write(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + PREPPCIState *s = opaque; + uint8_t buf[4]; + + addr = raven_io_address(s, addr); + + if (size == 1) { + buf[0] = val; + } else if (size == 2) { + stw_le_p(buf, val); + } else if (size == 4) { + stl_le_p(buf, val); + } else { + g_assert_not_reached(); + } + + address_space_write(&s->pci_io_as, addr + PCI_IO_BASE_ADDR, + MEMTXATTRS_UNSPECIFIED, buf, size); +} + +static const MemoryRegionOps raven_io_ops = { + .read = raven_io_read, + .write = raven_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.max_access_size = 4, + .valid.unaligned = true, +}; + +static int raven_map_irq(PCIDevice *pci_dev, int irq_num) +{ + return (irq_num + (pci_dev->devfn >> 3)) & 1; +} + +static void raven_set_irq(void *opaque, int irq_num, int level) +{ + PREPPCIState *s = opaque; + + qemu_set_irq(s->pci_irqs[irq_num], level); +} + +static AddressSpace *raven_pcihost_set_iommu(PCIBus *bus, void *opaque, + int devfn) +{ + PREPPCIState *s = opaque; + + return &s->bm_as; +} + +static void raven_change_gpio(void *opaque, int n, int level) +{ + PREPPCIState *s = opaque; + + s->contiguous_map = level; +} + +static void raven_pcihost_realizefn(DeviceState *d, Error **errp) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(d); + PCIHostState *h = PCI_HOST_BRIDGE(dev); + PREPPCIState *s = RAVEN_PCI_HOST_BRIDGE(dev); + MemoryRegion *address_space_mem = get_system_memory(); + int i; + + if (s->is_legacy_prep) { + for (i = 0; i < PCI_NUM_PINS; i++) { + sysbus_init_irq(dev, &s->pci_irqs[i]); + } + } else { + /* According to PReP specification section 6.1.6 "System Interrupt + * Assignments", all PCI interrupts are routed via IRQ 15 */ + s->or_irq = OR_IRQ(object_new(TYPE_OR_IRQ)); + object_property_set_int(OBJECT(s->or_irq), "num-lines", PCI_NUM_PINS, + &error_fatal); + qdev_realize(DEVICE(s->or_irq), NULL, &error_fatal); + sysbus_init_irq(dev, &s->or_irq->out_irq); + + for (i = 0; i < PCI_NUM_PINS; i++) { + s->pci_irqs[i] = qdev_get_gpio_in(DEVICE(s->or_irq), i); + } + } + + qdev_init_gpio_in(d, raven_change_gpio, 1); + + pci_bus_irqs(&s->pci_bus, raven_set_irq, raven_map_irq, s, PCI_NUM_PINS); + + memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_le_ops, s, + "pci-conf-idx", 4); + memory_region_add_subregion(&s->pci_io, 0xcf8, &h->conf_mem); + + memory_region_init_io(&h->data_mem, OBJECT(h), &pci_host_data_le_ops, s, + "pci-conf-data", 4); + memory_region_add_subregion(&s->pci_io, 0xcfc, &h->data_mem); + + memory_region_init_io(&h->mmcfg, OBJECT(s), &raven_pci_io_ops, s, + "pciio", 0x00400000); + memory_region_add_subregion(address_space_mem, 0x80800000, &h->mmcfg); + + memory_region_init_io(&s->pci_intack, OBJECT(s), &raven_intack_ops, s, + "pci-intack", 1); + memory_region_add_subregion(address_space_mem, 0xbffffff0, &s->pci_intack); + + /* TODO Remove once realize propagates to child devices. */ + qdev_realize(DEVICE(&s->pci_dev), BUS(&s->pci_bus), errp); +} + +static void raven_pcihost_initfn(Object *obj) +{ + PCIHostState *h = PCI_HOST_BRIDGE(obj); + PREPPCIState *s = RAVEN_PCI_HOST_BRIDGE(obj); + MemoryRegion *address_space_mem = get_system_memory(); + DeviceState *pci_dev; + + memory_region_init(&s->pci_io, obj, "pci-io", 0x3f800000); + memory_region_init_io(&s->pci_io_non_contiguous, obj, &raven_io_ops, s, + "pci-io-non-contiguous", 0x00800000); + memory_region_init(&s->pci_memory, obj, "pci-memory", 0x3f000000); + address_space_init(&s->pci_io_as, &s->pci_io, "raven-io"); + + /* CPU address space */ + memory_region_add_subregion(address_space_mem, PCI_IO_BASE_ADDR, + &s->pci_io); + memory_region_add_subregion_overlap(address_space_mem, PCI_IO_BASE_ADDR, + &s->pci_io_non_contiguous, 1); + memory_region_add_subregion(address_space_mem, 0xc0000000, &s->pci_memory); + pci_root_bus_init(&s->pci_bus, sizeof(s->pci_bus), DEVICE(obj), NULL, + &s->pci_memory, &s->pci_io, 0, TYPE_PCI_BUS); + + /* Bus master address space */ + memory_region_init(&s->bm, obj, "bm-raven", 4 * GiB); + memory_region_init_alias(&s->bm_pci_memory_alias, obj, "bm-pci-memory", + &s->pci_memory, 0, + memory_region_size(&s->pci_memory)); + memory_region_init_alias(&s->bm_ram_alias, obj, "bm-system", + get_system_memory(), 0, 0x80000000); + memory_region_add_subregion(&s->bm, 0 , &s->bm_pci_memory_alias); + memory_region_add_subregion(&s->bm, 0x80000000, &s->bm_ram_alias); + address_space_init(&s->bm_as, &s->bm, "raven-bm"); + pci_setup_iommu(&s->pci_bus, raven_pcihost_set_iommu, s); + + h->bus = &s->pci_bus; + + object_initialize(&s->pci_dev, sizeof(s->pci_dev), TYPE_RAVEN_PCI_DEVICE); + pci_dev = DEVICE(&s->pci_dev); + object_property_set_int(OBJECT(&s->pci_dev), "addr", PCI_DEVFN(0, 0), + NULL); + qdev_prop_set_bit(pci_dev, "multifunction", false); +} + +static void raven_realize(PCIDevice *d, Error **errp) +{ + RavenPCIState *s = RAVEN_PCI_DEVICE(d); + char *filename; + int bios_size = -1; + + d->config[0x0C] = 0x08; // cache_line_size + d->config[0x0D] = 0x10; // latency_timer + d->config[0x34] = 0x00; // capabilities_pointer + + memory_region_init_rom_nomigrate(&s->bios, OBJECT(s), "bios", BIOS_SIZE, + &error_fatal); + memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE), + &s->bios); + if (s->bios_name) { + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, s->bios_name); + if (filename) { + if (s->elf_machine != EM_NONE) { + bios_size = load_elf(filename, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, 1, s->elf_machine, + 0, 0); + } + if (bios_size < 0) { + bios_size = get_image_size(filename); + if (bios_size > 0 && bios_size <= BIOS_SIZE) { + hwaddr bios_addr; + bios_size = (bios_size + 0xfff) & ~0xfff; + bios_addr = (uint32_t)(-BIOS_SIZE); + bios_size = load_image_targphys(filename, bios_addr, + bios_size); + } + } + } + g_free(filename); + if (bios_size < 0 || bios_size > BIOS_SIZE) { + memory_region_del_subregion(get_system_memory(), &s->bios); + error_setg(errp, "Could not load bios image '%s'", s->bios_name); + return; + } + } + + vmstate_register_ram_global(&s->bios); +} + +static const VMStateDescription vmstate_raven = { + .name = "raven", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, RavenPCIState), + VMSTATE_END_OF_LIST() + }, +}; + +static void raven_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = raven_realize; + k->vendor_id = PCI_VENDOR_ID_MOTOROLA; + k->device_id = PCI_DEVICE_ID_MOTOROLA_RAVEN; + k->revision = 0x00; + k->class_id = PCI_CLASS_BRIDGE_HOST; + dc->desc = "PReP Host Bridge - Motorola Raven"; + dc->vmsd = &vmstate_raven; + /* + * Reason: PCI-facing part of the host bridge, not usable without + * the host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo raven_info = { + .name = TYPE_RAVEN_PCI_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(RavenPCIState), + .class_init = raven_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static Property raven_pcihost_properties[] = { + DEFINE_PROP_UINT32("elf-machine", PREPPCIState, pci_dev.elf_machine, + EM_NONE), + DEFINE_PROP_STRING("bios-name", PREPPCIState, pci_dev.bios_name), + /* Temporary workaround until legacy prep machine is removed */ + DEFINE_PROP_BOOL("is-legacy-prep", PREPPCIState, is_legacy_prep, + false), + DEFINE_PROP_END_OF_LIST() +}; + +static void raven_pcihost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->realize = raven_pcihost_realizefn; + device_class_set_props(dc, raven_pcihost_properties); + dc->fw_name = "pci"; +} + +static const TypeInfo raven_pcihost_info = { + .name = TYPE_RAVEN_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(PREPPCIState), + .instance_init = raven_pcihost_initfn, + .class_init = raven_pcihost_class_init, +}; + +static void raven_register_types(void) +{ + type_register_static(&raven_pcihost_info); + type_register_static(&raven_info); +} + +type_init(raven_register_types) diff --git a/hw/pci-host/remote.c b/hw/pci-host/remote.c new file mode 100644 index 000000000..eee45444e --- /dev/null +++ b/hw/pci-host/remote.c @@ -0,0 +1,75 @@ +/* + * Remote PCI host device + * + * Unlike PCI host devices that model physical hardware, the purpose + * of this PCI host is to host multi-process QEMU devices. + * + * Multi-process QEMU extends the PCI host of a QEMU machine into a + * remote process. Any PCI device attached to the remote process is + * visible in the QEMU guest. This allows existing QEMU device models + * to be reused in the remote process. + * + * This PCI host is purely a container for PCI devices. It's fake in the + * sense that the guest never sees this PCI host and has no way of + * accessing it. Its job is just to provide the environment that QEMU + * PCI device models need when running in a remote process. + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/pci/pcie_host.h" +#include "hw/qdev-properties.h" +#include "hw/pci-host/remote.h" +#include "exec/memory.h" + +static const char *remote_pcihost_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + return "0000:00"; +} + +static void remote_pcihost_realize(DeviceState *dev, Error **errp) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + RemotePCIHost *s = REMOTE_PCIHOST(dev); + + pci->bus = pci_root_bus_new(DEVICE(s), "remote-pci", + s->mr_pci_mem, s->mr_sys_io, + 0, TYPE_PCIE_BUS); +} + +static void remote_pcihost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + + hc->root_bus_path = remote_pcihost_root_bus_path; + dc->realize = remote_pcihost_realize; + + dc->user_creatable = false; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->fw_name = "pci"; +} + +static const TypeInfo remote_pcihost_info = { + .name = TYPE_REMOTE_PCIHOST, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_size = sizeof(RemotePCIHost), + .class_init = remote_pcihost_class_init, +}; + +static void remote_pcihost_register(void) +{ + type_register_static(&remote_pcihost_info); +} + +type_init(remote_pcihost_register) diff --git a/hw/pci-host/sabre.c b/hw/pci-host/sabre.c new file mode 100644 index 000000000..949ecc21f --- /dev/null +++ b/hw/pci-host/sabre.c @@ -0,0 +1,526 @@ +/* + * QEMU Ultrasparc Sabre PCI host (PBM) + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2012,2013 Artyom Tarasenko + * Copyright (c) 2018 Mark Cave-Ayland + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/qdev-properties.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" +#include "hw/irq.h" +#include "hw/pci-bridge/simba.h" +#include "hw/pci-host/sabre.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" +#include "trace.h" + +/* + * Chipset docs: + * PBM: "UltraSPARC IIi User's Manual", + * https://web.archive.org/web/20030403110020/http://www.sun.com/processors/manuals/805-0087.pdf + */ + +#define PBM_PCI_IMR_MASK 0x7fffffff +#define PBM_PCI_IMR_ENABLED 0x80000000 + +#define POR (1U << 31) +#define SOFT_POR (1U << 30) +#define SOFT_XIR (1U << 29) +#define BTN_POR (1U << 28) +#define BTN_XIR (1U << 27) +#define RESET_MASK 0xf8000000 +#define RESET_WCMASK 0x98000000 +#define RESET_WMASK 0x60000000 + +#define NO_IRQ_REQUEST (MAX_IVEC + 1) + +static inline void sabre_set_request(SabreState *s, unsigned int irq_num) +{ + trace_sabre_set_request(irq_num); + s->irq_request = irq_num; + qemu_set_irq(s->ivec_irqs[irq_num], 1); +} + +static inline void sabre_check_irqs(SabreState *s) +{ + unsigned int i; + + /* Previous request is not acknowledged, resubmit */ + if (s->irq_request != NO_IRQ_REQUEST) { + sabre_set_request(s, s->irq_request); + return; + } + /* no request pending */ + if (s->pci_irq_in == 0ULL) { + return; + } + for (i = 0; i < 32; i++) { + if (s->pci_irq_in & (1ULL << i)) { + if (s->pci_irq_map[i >> 2] & PBM_PCI_IMR_ENABLED) { + sabre_set_request(s, i); + return; + } + } + } + for (i = 32; i < 64; i++) { + if (s->pci_irq_in & (1ULL << i)) { + if (s->obio_irq_map[i - 32] & PBM_PCI_IMR_ENABLED) { + sabre_set_request(s, i); + break; + } + } + } +} + +static inline void sabre_clear_request(SabreState *s, unsigned int irq_num) +{ + trace_sabre_clear_request(irq_num); + qemu_set_irq(s->ivec_irqs[irq_num], 0); + s->irq_request = NO_IRQ_REQUEST; +} + +static AddressSpace *sabre_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + IOMMUState *is = opaque; + + return &is->iommu_as; +} + +static void sabre_config_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SabreState *s = opaque; + + trace_sabre_config_write(addr, val); + + switch (addr) { + case 0x30 ... 0x4f: /* DMA error registers */ + /* XXX: not implemented yet */ + break; + case 0xc00 ... 0xc3f: /* PCI interrupt control */ + if (addr & 4) { + unsigned int ino = (addr & 0x3f) >> 3; + s->pci_irq_map[ino] &= PBM_PCI_IMR_MASK; + s->pci_irq_map[ino] |= val & ~PBM_PCI_IMR_MASK; + if ((s->irq_request == ino) && !(val & ~PBM_PCI_IMR_MASK)) { + sabre_clear_request(s, ino); + } + sabre_check_irqs(s); + } + break; + case 0x1000 ... 0x107f: /* OBIO interrupt control */ + if (addr & 4) { + unsigned int ino = ((addr & 0xff) >> 3); + s->obio_irq_map[ino] &= PBM_PCI_IMR_MASK; + s->obio_irq_map[ino] |= val & ~PBM_PCI_IMR_MASK; + if ((s->irq_request == (ino | 0x20)) + && !(val & ~PBM_PCI_IMR_MASK)) { + sabre_clear_request(s, ino | 0x20); + } + sabre_check_irqs(s); + } + break; + case 0x1400 ... 0x14ff: /* PCI interrupt clear */ + if (addr & 4) { + unsigned int ino = (addr & 0xff) >> 5; + if ((s->irq_request / 4) == ino) { + sabre_clear_request(s, s->irq_request); + sabre_check_irqs(s); + } + } + break; + case 0x1800 ... 0x1860: /* OBIO interrupt clear */ + if (addr & 4) { + unsigned int ino = ((addr & 0xff) >> 3) | 0x20; + if (s->irq_request == ino) { + sabre_clear_request(s, ino); + sabre_check_irqs(s); + } + } + break; + case 0x2000 ... 0x202f: /* PCI control */ + s->pci_control[(addr & 0x3f) >> 2] = val; + break; + case 0xf020 ... 0xf027: /* Reset control */ + if (addr & 4) { + val &= RESET_MASK; + s->reset_control &= ~(val & RESET_WCMASK); + s->reset_control |= val & RESET_WMASK; + if (val & SOFT_POR) { + s->nr_resets = 0; + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } else if (val & SOFT_XIR) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + } + break; + case 0x5000 ... 0x51cf: /* PIO/DMA diagnostics */ + case 0xa400 ... 0xa67f: /* IOMMU diagnostics */ + case 0xa800 ... 0xa80f: /* Interrupt diagnostics */ + case 0xf000 ... 0xf01f: /* FFB config, memory control */ + /* we don't care */ + default: + break; + } +} + +static uint64_t sabre_config_read(void *opaque, + hwaddr addr, unsigned size) +{ + SabreState *s = opaque; + uint32_t val = 0; + + switch (addr) { + case 0x30 ... 0x4f: /* DMA error registers */ + /* XXX: not implemented yet */ + break; + case 0xc00 ... 0xc3f: /* PCI interrupt control */ + if (addr & 4) { + val = s->pci_irq_map[(addr & 0x3f) >> 3]; + } + break; + case 0x1000 ... 0x107f: /* OBIO interrupt control */ + if (addr & 4) { + val = s->obio_irq_map[(addr & 0xff) >> 3]; + } + break; + case 0x1080 ... 0x108f: /* PCI bus error */ + if (addr & 4) { + val = s->pci_err_irq_map[(addr & 0xf) >> 3]; + } + break; + case 0x2000 ... 0x202f: /* PCI control */ + val = s->pci_control[(addr & 0x3f) >> 2]; + break; + case 0xf020 ... 0xf027: /* Reset control */ + if (addr & 4) { + val = s->reset_control; + } + break; + case 0x5000 ... 0x51cf: /* PIO/DMA diagnostics */ + case 0xa400 ... 0xa67f: /* IOMMU diagnostics */ + case 0xa800 ... 0xa80f: /* Interrupt diagnostics */ + case 0xf000 ... 0xf01f: /* FFB config, memory control */ + /* we don't care */ + default: + break; + } + trace_sabre_config_read(addr, val); + + return val; +} + +static const MemoryRegionOps sabre_config_ops = { + .read = sabre_config_read, + .write = sabre_config_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void sabre_pci_config_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + SabreState *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s); + + trace_sabre_pci_config_write(addr, val); + pci_data_write(phb->bus, addr, val, size); +} + +static uint64_t sabre_pci_config_read(void *opaque, hwaddr addr, + unsigned size) +{ + uint32_t ret; + SabreState *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s); + + ret = pci_data_read(phb->bus, addr, size); + trace_sabre_pci_config_read(addr, ret); + return ret; +} + +/* The sabre host has an IRQ line for each IRQ line of each slot. */ +static int pci_sabre_map_irq(PCIDevice *pci_dev, int irq_num) +{ + /* Return the irq as swizzled by the PBM */ + return irq_num; +} + +static int pci_simbaA_map_irq(PCIDevice *pci_dev, int irq_num) +{ + /* The on-board devices have fixed (legacy) OBIO intnos */ + switch (PCI_SLOT(pci_dev->devfn)) { + case 1: + /* Onboard NIC */ + return OBIO_NIC_IRQ; + case 3: + /* Onboard IDE */ + return OBIO_HDD_IRQ; + default: + /* Normal intno, fall through */ + break; + } + + return ((PCI_SLOT(pci_dev->devfn) << 2) + irq_num) & 0x1f; +} + +static int pci_simbaB_map_irq(PCIDevice *pci_dev, int irq_num) +{ + return (0x10 + (PCI_SLOT(pci_dev->devfn) << 2) + irq_num) & 0x1f; +} + +static void pci_sabre_set_irq(void *opaque, int irq_num, int level) +{ + SabreState *s = opaque; + + trace_sabre_pci_set_irq(irq_num, level); + + /* PCI IRQ map onto the first 32 INO. */ + if (irq_num < 32) { + if (level) { + s->pci_irq_in |= 1ULL << irq_num; + if (s->pci_irq_map[irq_num >> 2] & PBM_PCI_IMR_ENABLED) { + sabre_set_request(s, irq_num); + } + } else { + s->pci_irq_in &= ~(1ULL << irq_num); + } + } else { + /* OBIO IRQ map onto the next 32 INO. */ + if (level) { + trace_sabre_pci_set_obio_irq(irq_num, level); + s->pci_irq_in |= 1ULL << irq_num; + if ((s->irq_request == NO_IRQ_REQUEST) + && (s->obio_irq_map[irq_num - 32] & PBM_PCI_IMR_ENABLED)) { + sabre_set_request(s, irq_num); + } + } else { + s->pci_irq_in &= ~(1ULL << irq_num); + } + } +} + +static void sabre_reset(DeviceState *d) +{ + SabreState *s = SABRE(d); + PCIDevice *pci_dev; + unsigned int i; + uint16_t cmd; + + for (i = 0; i < 8; i++) { + s->pci_irq_map[i] &= PBM_PCI_IMR_MASK; + } + for (i = 0; i < 32; i++) { + s->obio_irq_map[i] &= PBM_PCI_IMR_MASK; + } + + s->irq_request = NO_IRQ_REQUEST; + s->pci_irq_in = 0ULL; + + if (s->nr_resets++ == 0) { + /* Power on reset */ + s->reset_control = POR; + } + + /* As this is the busA PCI bridge which contains the on-board devices + * attached to the ebus, ensure that we initially allow IO transactions + * so that we get the early serial console until OpenBIOS can properly + * configure the PCI bridge itself */ + pci_dev = PCI_DEVICE(s->bridgeA); + cmd = pci_get_word(pci_dev->config + PCI_COMMAND); + pci_set_word(pci_dev->config + PCI_COMMAND, cmd | PCI_COMMAND_IO); + pci_bridge_update_mappings(PCI_BRIDGE(pci_dev)); +} + +static const MemoryRegionOps pci_config_ops = { + .read = sabre_pci_config_read, + .write = sabre_pci_config_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void sabre_realize(DeviceState *dev, Error **errp) +{ + SabreState *s = SABRE(dev); + PCIHostState *phb = PCI_HOST_BRIDGE(dev); + PCIDevice *pci_dev; + + memory_region_init(&s->pci_mmio, OBJECT(s), "pci-mmio", 0x100000000ULL); + memory_region_add_subregion(get_system_memory(), s->mem_base, + &s->pci_mmio); + + phb->bus = pci_register_root_bus(dev, "pci", + pci_sabre_set_irq, pci_sabre_map_irq, s, + &s->pci_mmio, + &s->pci_ioport, + 0, 0x40, TYPE_PCI_BUS); + + pci_create_simple(phb->bus, 0, TYPE_SABRE_PCI_DEVICE); + + /* IOMMU */ + memory_region_add_subregion_overlap(&s->sabre_config, 0x200, + sysbus_mmio_get_region(SYS_BUS_DEVICE(s->iommu), 0), 1); + pci_setup_iommu(phb->bus, sabre_pci_dma_iommu, s->iommu); + + /* APB secondary busses */ + pci_dev = pci_new_multifunction(PCI_DEVFN(1, 0), true, + TYPE_SIMBA_PCI_BRIDGE); + s->bridgeB = PCI_BRIDGE(pci_dev); + pci_bridge_map_irq(s->bridgeB, "pciB", pci_simbaB_map_irq); + pci_realize_and_unref(pci_dev, phb->bus, &error_fatal); + + pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), true, + TYPE_SIMBA_PCI_BRIDGE); + s->bridgeA = PCI_BRIDGE(pci_dev); + pci_bridge_map_irq(s->bridgeA, "pciA", pci_simbaA_map_irq); + pci_realize_and_unref(pci_dev, phb->bus, &error_fatal); +} + +static void sabre_init(Object *obj) +{ + SabreState *s = SABRE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + unsigned int i; + + for (i = 0; i < 8; i++) { + s->pci_irq_map[i] = (0x1f << 6) | (i << 2); + } + for (i = 0; i < 2; i++) { + s->pci_err_irq_map[i] = (0x1f << 6) | 0x30; + } + for (i = 0; i < 32; i++) { + s->obio_irq_map[i] = ((0x1f << 6) | 0x20) + i; + } + qdev_init_gpio_in_named(DEVICE(s), pci_sabre_set_irq, "pbm-irq", MAX_IVEC); + qdev_init_gpio_out_named(DEVICE(s), s->ivec_irqs, "ivec-irq", MAX_IVEC); + s->irq_request = NO_IRQ_REQUEST; + s->pci_irq_in = 0ULL; + + /* IOMMU */ + object_property_add_link(obj, "iommu", TYPE_SUN4U_IOMMU, + (Object **) &s->iommu, + qdev_prop_allow_set_link_before_realize, + 0); + + /* sabre_config */ + memory_region_init_io(&s->sabre_config, OBJECT(s), &sabre_config_ops, s, + "sabre-config", 0x10000); + /* at region 0 */ + sysbus_init_mmio(sbd, &s->sabre_config); + + memory_region_init_io(&s->pci_config, OBJECT(s), &pci_config_ops, s, + "sabre-pci-config", 0x1000000); + /* at region 1 */ + sysbus_init_mmio(sbd, &s->pci_config); + + /* pci_ioport */ + memory_region_init(&s->pci_ioport, OBJECT(s), "sabre-pci-ioport", + 0x1000000); + + /* at region 2 */ + sysbus_init_mmio(sbd, &s->pci_ioport); +} + +static void sabre_pci_realize(PCIDevice *d, Error **errp) +{ + pci_set_word(d->config + PCI_COMMAND, + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_set_word(d->config + PCI_STATUS, + PCI_STATUS_FAST_BACK | PCI_STATUS_66MHZ | + PCI_STATUS_DEVSEL_MEDIUM); +} + +static void sabre_pci_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = sabre_pci_realize; + k->vendor_id = PCI_VENDOR_ID_SUN; + k->device_id = PCI_DEVICE_ID_SUN_SABRE; + k->class_id = PCI_CLASS_BRIDGE_HOST; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo sabre_pci_info = { + .name = TYPE_SABRE_PCI_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(SabrePCIState), + .class_init = sabre_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static char *sabre_ofw_unit_address(const SysBusDevice *dev) +{ + SabreState *s = SABRE(dev); + + return g_strdup_printf("%x,%x", + (uint32_t)((s->special_base >> 32) & 0xffffffff), + (uint32_t)(s->special_base & 0xffffffff)); +} + +static Property sabre_properties[] = { + DEFINE_PROP_UINT64("special-base", SabreState, special_base, 0), + DEFINE_PROP_UINT64("mem-base", SabreState, mem_base, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sabre_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); + + dc->realize = sabre_realize; + dc->reset = sabre_reset; + device_class_set_props(dc, sabre_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->fw_name = "pci"; + sbc->explicit_ofw_unit_address = sabre_ofw_unit_address; +} + +static const TypeInfo sabre_info = { + .name = TYPE_SABRE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(SabreState), + .instance_init = sabre_init, + .class_init = sabre_class_init, +}; + +static void sabre_register_types(void) +{ + type_register_static(&sabre_info); + type_register_static(&sabre_pci_info); +} + +type_init(sabre_register_types) diff --git a/hw/pci-host/sh_pci.c b/hw/pci-host/sh_pci.c new file mode 100644 index 000000000..719d6ca2a --- /dev/null +++ b/hw/pci-host/sh_pci.c @@ -0,0 +1,201 @@ +/* + * SuperH on-chip PCIC emulation. + * + * Copyright (c) 2008 Takashi YOSHII + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/sh4/sh.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "qemu/bswap.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define TYPE_SH_PCI_HOST_BRIDGE "sh_pci" + +OBJECT_DECLARE_SIMPLE_TYPE(SHPCIState, SH_PCI_HOST_BRIDGE) + +struct SHPCIState { + PCIHostState parent_obj; + + PCIDevice *dev; + qemu_irq irq[4]; + MemoryRegion memconfig_p4; + MemoryRegion memconfig_a7; + MemoryRegion isa; + uint32_t par; + uint32_t mbr; + uint32_t iobr; +}; + +static void sh_pci_reg_write(void *p, hwaddr addr, uint64_t val, unsigned size) +{ + SHPCIState *pcic = p; + PCIHostState *phb = PCI_HOST_BRIDGE(pcic); + + switch (addr) { + case 0 ... 0xfc: + stl_le_p(pcic->dev->config + addr, val); + break; + case 0x1c0: + pcic->par = val; + break; + case 0x1c4: + pcic->mbr = val & 0xff000001; + break; + case 0x1c8: + pcic->iobr = val & 0xfffc0001; + memory_region_set_alias_offset(&pcic->isa, val & 0xfffc0000); + break; + case 0x220: + pci_data_write(phb->bus, pcic->par, val, 4); + break; + } +} + +static uint64_t sh_pci_reg_read(void *p, hwaddr addr, unsigned size) +{ + SHPCIState *pcic = p; + PCIHostState *phb = PCI_HOST_BRIDGE(pcic); + + switch (addr) { + case 0 ... 0xfc: + return ldl_le_p(pcic->dev->config + addr); + case 0x1c0: + return pcic->par; + case 0x1c4: + return pcic->mbr; + case 0x1c8: + return pcic->iobr; + case 0x220: + return pci_data_read(phb->bus, pcic->par, 4); + } + return 0; +} + +static const MemoryRegionOps sh_pci_reg_ops = { + .read = sh_pci_reg_read, + .write = sh_pci_reg_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static int sh_pci_map_irq(PCIDevice *d, int irq_num) +{ + return PCI_SLOT(d->devfn); +} + +static void sh_pci_set_irq(void *opaque, int irq_num, int level) +{ + qemu_irq *pic = opaque; + + qemu_set_irq(pic[irq_num], level); +} + +static void sh_pci_device_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + SHPCIState *s = SH_PCI_HOST_BRIDGE(dev); + PCIHostState *phb = PCI_HOST_BRIDGE(s); + int i; + + for (i = 0; i < 4; i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } + phb->bus = pci_register_root_bus(dev, "pci", + sh_pci_set_irq, sh_pci_map_irq, + s->irq, + get_system_memory(), + get_system_io(), + PCI_DEVFN(0, 0), 4, TYPE_PCI_BUS); + memory_region_init_io(&s->memconfig_p4, OBJECT(s), &sh_pci_reg_ops, s, + "sh_pci", 0x224); + memory_region_init_alias(&s->memconfig_a7, OBJECT(s), "sh_pci.2", + &s->memconfig_p4, 0, 0x224); + memory_region_init_alias(&s->isa, OBJECT(s), "sh_pci.isa", + get_system_io(), 0, 0x40000); + sysbus_init_mmio(sbd, &s->memconfig_p4); + sysbus_init_mmio(sbd, &s->memconfig_a7); + memory_region_add_subregion(get_system_memory(), 0xfe240000, &s->isa); + + s->dev = pci_create_simple(phb->bus, PCI_DEVFN(0, 0), "sh_pci_host"); +} + +static void sh_pci_host_realize(PCIDevice *d, Error **errp) +{ + pci_set_word(d->config + PCI_COMMAND, PCI_COMMAND_WAIT); + pci_set_word(d->config + PCI_STATUS, PCI_STATUS_CAP_LIST | + PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM); +} + +static void sh_pci_host_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = sh_pci_host_realize; + k->vendor_id = PCI_VENDOR_ID_HITACHI; + k->device_id = PCI_DEVICE_ID_HITACHI_SH7751R; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo sh_pci_host_info = { + .name = "sh_pci_host", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = sh_pci_host_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void sh_pci_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sh_pci_device_realize; +} + +static const TypeInfo sh_pci_device_info = { + .name = TYPE_SH_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(SHPCIState), + .class_init = sh_pci_device_class_init, +}; + +static void sh_pci_register_types(void) +{ + type_register_static(&sh_pci_device_info); + type_register_static(&sh_pci_host_info); +} + +type_init(sh_pci_register_types) diff --git a/hw/pci-host/trace-events b/hw/pci-host/trace-events new file mode 100644 index 000000000..630e9fcc5 --- /dev/null +++ b/hw/pci-host/trace-events @@ -0,0 +1,34 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# bonito.c +bonito_spciconf_small_access(uint64_t addr, unsigned size) "PCI config address is smaller then 32-bit, addr: 0x%"PRIx64", size: %u" + +# grackle.c +grackle_set_irq(int irq_num, int level) "set_irq num %d level %d" + +# mv64361.c +mv64361_region_map(const char *name, uint64_t poffs, uint64_t size, uint64_t moffs) "Mapping %s 0x%"PRIx64"+0x%"PRIx64" @ 0x%"PRIx64 +mv64361_region_enable(const char *op, int num) "Should %s region %d" +mv64361_reg_read(uint64_t addr, uint32_t val) "0x%"PRIx64" -> 0x%x" +mv64361_reg_write(uint64_t addr, uint64_t val) "0x%"PRIx64" <- 0x%"PRIx64 + +# sabre.c +sabre_set_request(int irq_num) "request irq %d" +sabre_clear_request(int irq_num) "clear request irq %d" +sabre_config_write(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" val 0x%"PRIx64 +sabre_config_read(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" val 0x%"PRIx64 +sabre_pci_config_write(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" val 0x%"PRIx64 +sabre_pci_config_read(uint64_t addr, uint64_t val) "addr 0x%"PRIx64" val 0x%"PRIx64 +sabre_pci_set_irq(int irq_num, int level) "set irq_in %d level %d" +sabre_pci_set_obio_irq(int irq_num, int level) "set irq %d level %d" + +# uninorth.c +unin_set_irq(int irq_num, int level) "setting INT %d = %d" +unin_get_config_reg(uint32_t reg, uint32_t addr, uint32_t retval) "converted config space accessor 0x%"PRIx32 "/0x%"PRIx32 " -> 0x%"PRIx32 +unin_data_write(uint64_t addr, unsigned len, uint64_t val) "write addr 0x%"PRIx64 " len %d val 0x%"PRIx64 +unin_data_read(uint64_t addr, unsigned len, uint64_t val) "read addr 0x%"PRIx64 " len %d val 0x%"PRIx64 +unin_write(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64 +unin_read(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64 + +# pnv_phb4.c +pnv_phb4_xive_notify(uint64_t notif_port, uint64_t data) "notif=@0x%"PRIx64" data=0x%"PRIx64 diff --git a/hw/pci-host/trace.h b/hw/pci-host/trace.h new file mode 100644 index 000000000..93ec814a7 --- /dev/null +++ b/hw/pci-host/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_pci_host.h" diff --git a/hw/pci-host/uninorth.c b/hw/pci-host/uninorth.c new file mode 100644 index 000000000..d25b62d6a --- /dev/null +++ b/hw/pci-host/uninorth.c @@ -0,0 +1,582 @@ +/* + * QEMU Uninorth PCI host (for all Mac99 and newer machines) + * + * Copyright (c) 2006 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/ppc/mac.h" +#include "hw/qdev-properties.h" +#include "qemu/module.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/pci-host/uninorth.h" +#include "trace.h" + +static int pci_unin_map_irq(PCIDevice *pci_dev, int irq_num) +{ + return (irq_num + (pci_dev->devfn >> 3)) & 3; +} + +static void pci_unin_set_irq(void *opaque, int irq_num, int level) +{ + UNINHostState *s = opaque; + + trace_unin_set_irq(irq_num, level); + qemu_set_irq(s->irqs[irq_num], level); +} + +static uint32_t unin_get_config_reg(uint32_t reg, uint32_t addr) +{ + uint32_t retval; + + if (reg & (1u << 31)) { + /* XXX OpenBIOS compatibility hack */ + retval = reg | (addr & 3); + } else if (reg & 1) { + /* CFA1 style */ + retval = (reg & ~7u) | (addr & 7); + } else { + uint32_t slot, func; + + /* Grab CFA0 style values */ + slot = ctz32(reg & 0xfffff800); + if (slot == 32) { + slot = -1; /* XXX: should this be 0? */ + } + func = PCI_FUNC(reg >> 8); + + /* ... and then convert them to x86 format */ + /* config pointer */ + retval = (reg & (0xff - 7)) | (addr & 7); + /* slot, fn */ + retval |= PCI_DEVFN(slot, func) << 8; + } + + trace_unin_get_config_reg(reg, addr, retval); + + return retval; +} + +static void unin_data_write(void *opaque, hwaddr addr, + uint64_t val, unsigned len) +{ + UNINHostState *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s); + trace_unin_data_write(addr, len, val); + pci_data_write(phb->bus, + unin_get_config_reg(phb->config_reg, addr), + val, len); +} + +static uint64_t unin_data_read(void *opaque, hwaddr addr, + unsigned len) +{ + UNINHostState *s = opaque; + PCIHostState *phb = PCI_HOST_BRIDGE(s); + uint32_t val; + + val = pci_data_read(phb->bus, + unin_get_config_reg(phb->config_reg, addr), + len); + trace_unin_data_read(addr, len, val); + return val; +} + +static const MemoryRegionOps unin_data_ops = { + .read = unin_data_read, + .write = unin_data_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static char *pci_unin_main_ofw_unit_address(const SysBusDevice *dev) +{ + UNINHostState *s = UNI_NORTH_PCI_HOST_BRIDGE(dev); + + return g_strdup_printf("%x", s->ofw_addr); +} + +static void pci_unin_main_realize(DeviceState *dev, Error **errp) +{ + UNINHostState *s = UNI_NORTH_PCI_HOST_BRIDGE(dev); + PCIHostState *h = PCI_HOST_BRIDGE(dev); + + h->bus = pci_register_root_bus(dev, NULL, + pci_unin_set_irq, pci_unin_map_irq, + s, + &s->pci_mmio, + &s->pci_io, + PCI_DEVFN(11, 0), 4, TYPE_PCI_BUS); + + pci_create_simple(h->bus, PCI_DEVFN(11, 0), "uni-north-pci"); + + /* DEC 21154 bridge */ +#if 0 + /* XXX: not activated as PPC BIOS doesn't handle multiple buses properly */ + pci_create_simple(h->bus, PCI_DEVFN(12, 0), "dec-21154"); +#endif +} + +static void pci_unin_main_init(Object *obj) +{ + UNINHostState *s = UNI_NORTH_PCI_HOST_BRIDGE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + PCIHostState *h = PCI_HOST_BRIDGE(obj); + + /* Use values found on a real PowerMac */ + /* Uninorth main bus */ + memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_le_ops, + obj, "unin-pci-conf-idx", 0x1000); + memory_region_init_io(&h->data_mem, OBJECT(h), &unin_data_ops, obj, + "unin-pci-conf-data", 0x1000); + + memory_region_init(&s->pci_mmio, OBJECT(s), "unin-pci-mmio", + 0x100000000ULL); + memory_region_init_io(&s->pci_io, OBJECT(s), &unassigned_io_ops, obj, + "unin-pci-isa-mmio", 0x00800000); + + memory_region_init_alias(&s->pci_hole, OBJECT(s), + "unin-pci-hole", &s->pci_mmio, + 0x80000000ULL, 0x10000000ULL); + + sysbus_init_mmio(sbd, &h->conf_mem); + sysbus_init_mmio(sbd, &h->data_mem); + sysbus_init_mmio(sbd, &s->pci_hole); + sysbus_init_mmio(sbd, &s->pci_io); + + qdev_init_gpio_out(DEVICE(obj), s->irqs, ARRAY_SIZE(s->irqs)); +} + +static void pci_u3_agp_realize(DeviceState *dev, Error **errp) +{ + UNINHostState *s = U3_AGP_HOST_BRIDGE(dev); + PCIHostState *h = PCI_HOST_BRIDGE(dev); + + h->bus = pci_register_root_bus(dev, NULL, + pci_unin_set_irq, pci_unin_map_irq, + s, + &s->pci_mmio, + &s->pci_io, + PCI_DEVFN(11, 0), 4, TYPE_PCI_BUS); + + pci_create_simple(h->bus, PCI_DEVFN(11, 0), "u3-agp"); +} + +static void pci_u3_agp_init(Object *obj) +{ + UNINHostState *s = U3_AGP_HOST_BRIDGE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + PCIHostState *h = PCI_HOST_BRIDGE(obj); + + /* Uninorth U3 AGP bus */ + memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_le_ops, + obj, "unin-pci-conf-idx", 0x1000); + memory_region_init_io(&h->data_mem, OBJECT(h), &unin_data_ops, obj, + "unin-pci-conf-data", 0x1000); + + memory_region_init(&s->pci_mmio, OBJECT(s), "unin-pci-mmio", + 0x100000000ULL); + memory_region_init_io(&s->pci_io, OBJECT(s), &unassigned_io_ops, obj, + "unin-pci-isa-mmio", 0x00800000); + + memory_region_init_alias(&s->pci_hole, OBJECT(s), + "unin-pci-hole", &s->pci_mmio, + 0x80000000ULL, 0x70000000ULL); + + sysbus_init_mmio(sbd, &h->conf_mem); + sysbus_init_mmio(sbd, &h->data_mem); + sysbus_init_mmio(sbd, &s->pci_hole); + sysbus_init_mmio(sbd, &s->pci_io); + + qdev_init_gpio_out(DEVICE(obj), s->irqs, ARRAY_SIZE(s->irqs)); +} + +static void pci_unin_agp_realize(DeviceState *dev, Error **errp) +{ + UNINHostState *s = UNI_NORTH_AGP_HOST_BRIDGE(dev); + PCIHostState *h = PCI_HOST_BRIDGE(dev); + + h->bus = pci_register_root_bus(dev, NULL, + pci_unin_set_irq, pci_unin_map_irq, + s, + &s->pci_mmio, + &s->pci_io, + PCI_DEVFN(11, 0), 4, TYPE_PCI_BUS); + + pci_create_simple(h->bus, PCI_DEVFN(11, 0), "uni-north-agp"); +} + +static void pci_unin_agp_init(Object *obj) +{ + UNINHostState *s = UNI_NORTH_AGP_HOST_BRIDGE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + PCIHostState *h = PCI_HOST_BRIDGE(obj); + + /* Uninorth AGP bus */ + memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_le_ops, + obj, "unin-agp-conf-idx", 0x1000); + memory_region_init_io(&h->data_mem, OBJECT(h), &pci_host_data_le_ops, + obj, "unin-agp-conf-data", 0x1000); + + sysbus_init_mmio(sbd, &h->conf_mem); + sysbus_init_mmio(sbd, &h->data_mem); + + qdev_init_gpio_out(DEVICE(obj), s->irqs, ARRAY_SIZE(s->irqs)); +} + +static void pci_unin_internal_realize(DeviceState *dev, Error **errp) +{ + UNINHostState *s = UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE(dev); + PCIHostState *h = PCI_HOST_BRIDGE(dev); + + h->bus = pci_register_root_bus(dev, NULL, + pci_unin_set_irq, pci_unin_map_irq, + s, + &s->pci_mmio, + &s->pci_io, + PCI_DEVFN(14, 0), 4, TYPE_PCI_BUS); + + pci_create_simple(h->bus, PCI_DEVFN(14, 0), "uni-north-internal-pci"); +} + +static void pci_unin_internal_init(Object *obj) +{ + UNINHostState *s = UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + PCIHostState *h = PCI_HOST_BRIDGE(obj); + + /* Uninorth internal bus */ + memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_le_ops, + obj, "unin-pci-conf-idx", 0x1000); + memory_region_init_io(&h->data_mem, OBJECT(h), &pci_host_data_le_ops, + obj, "unin-pci-conf-data", 0x1000); + + sysbus_init_mmio(sbd, &h->conf_mem); + sysbus_init_mmio(sbd, &h->data_mem); + + qdev_init_gpio_out(DEVICE(obj), s->irqs, ARRAY_SIZE(s->irqs)); +} + +static void unin_main_pci_host_realize(PCIDevice *d, Error **errp) +{ + /* cache_line_size */ + d->config[0x0C] = 0x08; + /* latency_timer */ + d->config[0x0D] = 0x10; + /* capabilities_pointer */ + d->config[0x34] = 0x00; + + /* + * Set kMacRISCPCIAddressSelect (0x48) register to indicate PCI + * memory space with base 0x80000000, size 0x10000000 for Apple's + * AppleMacRiscPCI driver + */ + d->config[0x48] = 0x0; + d->config[0x49] = 0x0; + d->config[0x4a] = 0x0; + d->config[0x4b] = 0x1; +} + +static void unin_agp_pci_host_realize(PCIDevice *d, Error **errp) +{ + /* cache_line_size */ + d->config[0x0C] = 0x08; + /* latency_timer */ + d->config[0x0D] = 0x10; + /* capabilities_pointer + d->config[0x34] = 0x80; */ +} + +static void u3_agp_pci_host_realize(PCIDevice *d, Error **errp) +{ + /* cache line size */ + d->config[0x0C] = 0x08; + /* latency timer */ + d->config[0x0D] = 0x10; +} + +static void unin_internal_pci_host_realize(PCIDevice *d, Error **errp) +{ + /* cache_line_size */ + d->config[0x0C] = 0x08; + /* latency_timer */ + d->config[0x0D] = 0x10; + /* capabilities_pointer */ + d->config[0x34] = 0x00; +} + +static void unin_main_pci_host_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = unin_main_pci_host_realize; + k->vendor_id = PCI_VENDOR_ID_APPLE; + k->device_id = PCI_DEVICE_ID_APPLE_UNI_N_PCI; + k->revision = 0x00; + k->class_id = PCI_CLASS_BRIDGE_HOST; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo unin_main_pci_host_info = { + .name = "uni-north-pci", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = unin_main_pci_host_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void u3_agp_pci_host_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = u3_agp_pci_host_realize; + k->vendor_id = PCI_VENDOR_ID_APPLE; + k->device_id = PCI_DEVICE_ID_APPLE_U3_AGP; + k->revision = 0x00; + k->class_id = PCI_CLASS_BRIDGE_HOST; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo u3_agp_pci_host_info = { + .name = "u3-agp", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = u3_agp_pci_host_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void unin_agp_pci_host_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = unin_agp_pci_host_realize; + k->vendor_id = PCI_VENDOR_ID_APPLE; + k->device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP; + k->revision = 0x00; + k->class_id = PCI_CLASS_BRIDGE_HOST; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo unin_agp_pci_host_info = { + .name = "uni-north-agp", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = unin_agp_pci_host_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void unin_internal_pci_host_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = unin_internal_pci_host_realize; + k->vendor_id = PCI_VENDOR_ID_APPLE; + k->device_id = PCI_DEVICE_ID_APPLE_UNI_N_I_PCI; + k->revision = 0x00; + k->class_id = PCI_CLASS_BRIDGE_HOST; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo unin_internal_pci_host_info = { + .name = "uni-north-internal-pci", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = unin_internal_pci_host_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static Property pci_unin_main_pci_host_props[] = { + DEFINE_PROP_UINT32("ofw-addr", UNINHostState, ofw_addr, -1), + DEFINE_PROP_END_OF_LIST() +}; + +static void pci_unin_main_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); + + dc->realize = pci_unin_main_realize; + device_class_set_props(dc, pci_unin_main_pci_host_props); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->fw_name = "pci"; + sbc->explicit_ofw_unit_address = pci_unin_main_ofw_unit_address; +} + +static const TypeInfo pci_unin_main_info = { + .name = TYPE_UNI_NORTH_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(UNINHostState), + .instance_init = pci_unin_main_init, + .class_init = pci_unin_main_class_init, +}; + +static void pci_u3_agp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pci_u3_agp_realize; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo pci_u3_agp_info = { + .name = TYPE_U3_AGP_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(UNINHostState), + .instance_init = pci_u3_agp_init, + .class_init = pci_u3_agp_class_init, +}; + +static void pci_unin_agp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pci_unin_agp_realize; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo pci_unin_agp_info = { + .name = TYPE_UNI_NORTH_AGP_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(UNINHostState), + .instance_init = pci_unin_agp_init, + .class_init = pci_unin_agp_class_init, +}; + +static void pci_unin_internal_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pci_unin_internal_realize; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo pci_unin_internal_info = { + .name = TYPE_UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(UNINHostState), + .instance_init = pci_unin_internal_init, + .class_init = pci_unin_internal_class_init, +}; + +/* UniN device */ +static void unin_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + trace_unin_write(addr, value); +} + +static uint64_t unin_read(void *opaque, hwaddr addr, unsigned size) +{ + uint32_t value; + + switch (addr) { + case 0: + value = UNINORTH_VERSION_10A; + break; + default: + value = 0; + } + + trace_unin_read(addr, value); + + return value; +} + +static const MemoryRegionOps unin_ops = { + .read = unin_read, + .write = unin_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void unin_init(Object *obj) +{ + UNINState *s = UNI_NORTH(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->mem, obj, &unin_ops, s, "unin", 0x1000); + + sysbus_init_mmio(sbd, &s->mem); +} + +static void unin_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo unin_info = { + .name = TYPE_UNI_NORTH, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(UNINState), + .instance_init = unin_init, + .class_init = unin_class_init, +}; + +static void unin_register_types(void) +{ + type_register_static(&unin_main_pci_host_info); + type_register_static(&u3_agp_pci_host_info); + type_register_static(&unin_agp_pci_host_info); + type_register_static(&unin_internal_pci_host_info); + + type_register_static(&pci_unin_main_info); + type_register_static(&pci_u3_agp_info); + type_register_static(&pci_unin_agp_info); + type_register_static(&pci_unin_internal_info); + + type_register_static(&unin_info); +} + +type_init(unin_register_types) diff --git a/hw/pci-host/versatile.c b/hw/pci-host/versatile.c new file mode 100644 index 000000000..f66384fa0 --- /dev/null +++ b/hw/pci-host/versatile.c @@ -0,0 +1,548 @@ +/* + * ARM Versatile/PB PCI host controller + * + * Copyright (c) 2006-2009 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the LGPL. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_host.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* Old and buggy versions of QEMU used the wrong mapping from + * PCI IRQs to system interrupt lines. Unfortunately the Linux + * kernel also had the corresponding bug in setting up interrupts + * (so older kernels work on QEMU and not on real hardware). + * We automatically detect these broken kernels and flip back + * to the broken irq mapping by spotting guest writes to the + * PCI_INTERRUPT_LINE register to see where the guest thinks + * interrupts are going to be routed. So we start in state + * ASSUME_OK on reset, and transition to either BROKEN or + * FORCE_OK at the first write to an INTERRUPT_LINE register for + * a slot where broken and correct interrupt mapping would differ. + * Once in either BROKEN or FORCE_OK we never transition again; + * this allows a newer kernel to use the INTERRUPT_LINE + * registers arbitrarily once it has indicated that it isn't + * broken in its init code somewhere. + * + * Unfortunately we have to cope with multiple different + * variants on the broken kernel behaviour: + * phase I (before kernel commit 1bc39ac5d) kernels assume old + * QEMU behaviour, so they use IRQ 27 for all slots + * phase II (1bc39ac5d and later, but before e3e92a7be6) kernels + * swizzle IRQs between slots, but do it wrongly, so they + * work only for every fourth PCI card, and only if (like old + * QEMU) the PCI host device is at slot 0 rather than where + * the h/w actually puts it + * phase III (e3e92a7be6 and later) kernels still swizzle IRQs between + * slots wrongly, but add a fixed offset of 64 to everything + * they write to PCI_INTERRUPT_LINE. + * + * We live in hope of a mythical phase IV kernel which might + * actually behave in ways that work on the hardware. Such a + * kernel should probably start off by writing some value neither + * 27 nor 91 to slot zero's PCI_INTERRUPT_LINE register to + * disable the autodetection. After that it can do what it likes. + * + * Slot % 4 | hw | I | II | III + * ------------------------------- + * 0 | 29 | 27 | 27 | 91 + * 1 | 30 | 27 | 28 | 92 + * 2 | 27 | 27 | 29 | 93 + * 3 | 28 | 27 | 30 | 94 + * + * Since our autodetection is not perfect we also provide a + * property so the user can make us start in BROKEN or FORCE_OK + * on reset if they know they have a bad or good kernel. + */ +enum { + PCI_VPB_IRQMAP_ASSUME_OK, + PCI_VPB_IRQMAP_BROKEN, + PCI_VPB_IRQMAP_FORCE_OK, +}; + +struct PCIVPBState { + PCIHostState parent_obj; + + qemu_irq irq[4]; + MemoryRegion controlregs; + MemoryRegion mem_config; + MemoryRegion mem_config2; + /* Containers representing the PCI address spaces */ + MemoryRegion pci_io_space; + MemoryRegion pci_mem_space; + /* Alias regions into PCI address spaces which we expose as sysbus regions. + * The offsets into pci_mem_space are controlled by the imap registers. + */ + MemoryRegion pci_io_window; + MemoryRegion pci_mem_window[3]; + PCIBus pci_bus; + PCIDevice pci_dev; + + /* Constant for life of device: */ + int realview; + uint32_t mem_win_size[3]; + uint8_t irq_mapping_prop; + + /* Variable state: */ + uint32_t imap[3]; + uint32_t smap[3]; + uint32_t selfid; + uint32_t flags; + uint8_t irq_mapping; +}; +typedef struct PCIVPBState PCIVPBState; + +static void pci_vpb_update_window(PCIVPBState *s, int i) +{ + /* Adjust the offset of the alias region we use for + * the memory window i to account for a change in the + * value of the corresponding IMAP register. + * Note that the semantics of the IMAP register differ + * for realview and versatile variants of the controller. + */ + hwaddr offset; + if (s->realview) { + /* Top bits of register (masked according to window size) provide + * top bits of PCI address. + */ + offset = s->imap[i] & ~(s->mem_win_size[i] - 1); + } else { + /* Bottom 4 bits of register provide top 4 bits of PCI address */ + offset = s->imap[i] << 28; + } + memory_region_set_alias_offset(&s->pci_mem_window[i], offset); +} + +static void pci_vpb_update_all_windows(PCIVPBState *s) +{ + /* Update all alias windows based on the current register state */ + int i; + + for (i = 0; i < 3; i++) { + pci_vpb_update_window(s, i); + } +} + +static int pci_vpb_post_load(void *opaque, int version_id) +{ + PCIVPBState *s = opaque; + pci_vpb_update_all_windows(s); + return 0; +} + +static const VMStateDescription pci_vpb_vmstate = { + .name = "versatile-pci", + .version_id = 1, + .minimum_version_id = 1, + .post_load = pci_vpb_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(imap, PCIVPBState, 3), + VMSTATE_UINT32_ARRAY(smap, PCIVPBState, 3), + VMSTATE_UINT32(selfid, PCIVPBState), + VMSTATE_UINT32(flags, PCIVPBState), + VMSTATE_UINT8(irq_mapping, PCIVPBState), + VMSTATE_END_OF_LIST() + } +}; + +#define TYPE_VERSATILE_PCI "versatile_pci" +DECLARE_INSTANCE_CHECKER(PCIVPBState, PCI_VPB, + TYPE_VERSATILE_PCI) + +#define TYPE_VERSATILE_PCI_HOST "versatile_pci_host" +DECLARE_INSTANCE_CHECKER(PCIDevice, PCI_VPB_HOST, + TYPE_VERSATILE_PCI_HOST) + +typedef enum { + PCI_IMAP0 = 0x0, + PCI_IMAP1 = 0x4, + PCI_IMAP2 = 0x8, + PCI_SELFID = 0xc, + PCI_FLAGS = 0x10, + PCI_SMAP0 = 0x14, + PCI_SMAP1 = 0x18, + PCI_SMAP2 = 0x1c, +} PCIVPBControlRegs; + +static void pci_vpb_reg_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PCIVPBState *s = opaque; + + switch (addr) { + case PCI_IMAP0: + case PCI_IMAP1: + case PCI_IMAP2: + { + int win = (addr - PCI_IMAP0) >> 2; + s->imap[win] = val; + pci_vpb_update_window(s, win); + break; + } + case PCI_SELFID: + s->selfid = val; + break; + case PCI_FLAGS: + s->flags = val; + break; + case PCI_SMAP0: + case PCI_SMAP1: + case PCI_SMAP2: + { + int win = (addr - PCI_SMAP0) >> 2; + s->smap[win] = val; + break; + } + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pci_vpb_reg_write: Bad offset %x\n", (int)addr); + break; + } +} + +static uint64_t pci_vpb_reg_read(void *opaque, hwaddr addr, + unsigned size) +{ + PCIVPBState *s = opaque; + + switch (addr) { + case PCI_IMAP0: + case PCI_IMAP1: + case PCI_IMAP2: + { + int win = (addr - PCI_IMAP0) >> 2; + return s->imap[win]; + } + case PCI_SELFID: + return s->selfid; + case PCI_FLAGS: + return s->flags; + case PCI_SMAP0: + case PCI_SMAP1: + case PCI_SMAP2: + { + int win = (addr - PCI_SMAP0) >> 2; + return s->smap[win]; + } + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pci_vpb_reg_read: Bad offset %x\n", (int)addr); + return 0; + } +} + +static const MemoryRegionOps pci_vpb_reg_ops = { + .read = pci_vpb_reg_read, + .write = pci_vpb_reg_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static int pci_vpb_broken_irq(int slot, int irq) +{ + /* Determine whether this IRQ value for this slot represents a + * known broken Linux kernel behaviour for this slot. + * Return one of the PCI_VPB_IRQMAP_ constants: + * BROKEN : if this definitely looks like a broken kernel + * FORCE_OK : if this definitely looks good + * ASSUME_OK : if we can't tell + */ + slot %= PCI_NUM_PINS; + + if (irq == 27) { + if (slot == 2) { + /* Might be a Phase I kernel, or might be a fixed kernel, + * since slot 2 is where we expect this IRQ. + */ + return PCI_VPB_IRQMAP_ASSUME_OK; + } + /* Phase I kernel */ + return PCI_VPB_IRQMAP_BROKEN; + } + if (irq == slot + 27) { + /* Phase II kernel */ + return PCI_VPB_IRQMAP_BROKEN; + } + if (irq == slot + 27 + 64) { + /* Phase III kernel */ + return PCI_VPB_IRQMAP_BROKEN; + } + /* Anything else must be a fixed kernel, possibly using an + * arbitrary irq map. + */ + return PCI_VPB_IRQMAP_FORCE_OK; +} + +static void pci_vpb_config_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PCIVPBState *s = opaque; + if (!s->realview && (addr & 0xff) == PCI_INTERRUPT_LINE + && s->irq_mapping == PCI_VPB_IRQMAP_ASSUME_OK) { + uint8_t devfn = addr >> 8; + s->irq_mapping = pci_vpb_broken_irq(PCI_SLOT(devfn), val); + } + pci_data_write(&s->pci_bus, addr, val, size); +} + +static uint64_t pci_vpb_config_read(void *opaque, hwaddr addr, + unsigned size) +{ + PCIVPBState *s = opaque; + uint32_t val; + val = pci_data_read(&s->pci_bus, addr, size); + return val; +} + +static const MemoryRegionOps pci_vpb_config_ops = { + .read = pci_vpb_config_read, + .write = pci_vpb_config_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int pci_vpb_map_irq(PCIDevice *d, int irq_num) +{ + PCIVPBState *s = container_of(pci_get_bus(d), PCIVPBState, pci_bus); + + if (s->irq_mapping == PCI_VPB_IRQMAP_BROKEN) { + /* Legacy broken IRQ mapping for compatibility with old and + * buggy Linux guests + */ + return irq_num; + } + + /* Slot to IRQ mapping for RealView Platform Baseboard 926 backplane + * name slot IntA IntB IntC IntD + * A 31 IRQ28 IRQ29 IRQ30 IRQ27 + * B 30 IRQ27 IRQ28 IRQ29 IRQ30 + * C 29 IRQ30 IRQ27 IRQ28 IRQ29 + * Slot C is for the host bridge; A and B the peripherals. + * Our output irqs 0..3 correspond to the baseboard's 27..30. + * + * This mapping function takes account of an oddity in the PB926 + * board wiring, where the FPGA's P_nINTA input is connected to + * the INTB connection on the board PCI edge connector, P_nINTB + * is connected to INTC, and so on, so everything is one number + * further round from where you might expect. + */ + return pci_swizzle_map_irq_fn(d, irq_num + 2); +} + +static int pci_vpb_rv_map_irq(PCIDevice *d, int irq_num) +{ + /* Slot to IRQ mapping for RealView EB and PB1176 backplane + * name slot IntA IntB IntC IntD + * A 31 IRQ50 IRQ51 IRQ48 IRQ49 + * B 30 IRQ49 IRQ50 IRQ51 IRQ48 + * C 29 IRQ48 IRQ49 IRQ50 IRQ51 + * Slot C is for the host bridge; A and B the peripherals. + * Our output irqs 0..3 correspond to the baseboard's 48..51. + * + * The PB1176 and EB boards don't have the PB926 wiring oddity + * described above; P_nINTA connects to INTA, P_nINTB to INTB + * and so on, which is why this mapping function is different. + */ + return pci_swizzle_map_irq_fn(d, irq_num + 3); +} + +static void pci_vpb_set_irq(void *opaque, int irq_num, int level) +{ + qemu_irq *pic = opaque; + + qemu_set_irq(pic[irq_num], level); +} + +static void pci_vpb_reset(DeviceState *d) +{ + PCIVPBState *s = PCI_VPB(d); + + s->imap[0] = 0; + s->imap[1] = 0; + s->imap[2] = 0; + s->smap[0] = 0; + s->smap[1] = 0; + s->smap[2] = 0; + s->selfid = 0; + s->flags = 0; + s->irq_mapping = s->irq_mapping_prop; + + pci_vpb_update_all_windows(s); +} + +static void pci_vpb_init(Object *obj) +{ + PCIVPBState *s = PCI_VPB(obj); + + /* Window sizes for VersatilePB; realview_pci's init will override */ + s->mem_win_size[0] = 0x0c000000; + s->mem_win_size[1] = 0x10000000; + s->mem_win_size[2] = 0x10000000; +} + +static void pci_vpb_realize(DeviceState *dev, Error **errp) +{ + PCIVPBState *s = PCI_VPB(dev); + PCIHostState *h = PCI_HOST_BRIDGE(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + pci_map_irq_fn mapfn; + int i; + + memory_region_init(&s->pci_io_space, OBJECT(s), "pci_io", 4 * GiB); + memory_region_init(&s->pci_mem_space, OBJECT(s), "pci_mem", 4 * GiB); + + pci_root_bus_init(&s->pci_bus, sizeof(s->pci_bus), dev, "pci", + &s->pci_mem_space, &s->pci_io_space, + PCI_DEVFN(11, 0), TYPE_PCI_BUS); + h->bus = &s->pci_bus; + + object_initialize(&s->pci_dev, sizeof(s->pci_dev), TYPE_VERSATILE_PCI_HOST); + + for (i = 0; i < 4; i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } + + if (s->realview) { + mapfn = pci_vpb_rv_map_irq; + } else { + mapfn = pci_vpb_map_irq; + } + + pci_bus_irqs(&s->pci_bus, pci_vpb_set_irq, mapfn, s->irq, 4); + + /* Our memory regions are: + * 0 : our control registers + * 1 : PCI self config window + * 2 : PCI config window + * 3 : PCI IO window + * 4..6 : PCI memory windows + */ + memory_region_init_io(&s->controlregs, OBJECT(s), &pci_vpb_reg_ops, s, + "pci-vpb-regs", 0x1000); + sysbus_init_mmio(sbd, &s->controlregs); + memory_region_init_io(&s->mem_config, OBJECT(s), &pci_vpb_config_ops, s, + "pci-vpb-selfconfig", 0x1000000); + sysbus_init_mmio(sbd, &s->mem_config); + memory_region_init_io(&s->mem_config2, OBJECT(s), &pci_vpb_config_ops, s, + "pci-vpb-config", 0x1000000); + sysbus_init_mmio(sbd, &s->mem_config2); + + /* The window into I/O space is always into a fixed base address; + * its size is the same for both realview and versatile. + */ + memory_region_init_alias(&s->pci_io_window, OBJECT(s), "pci-vbp-io-window", + &s->pci_io_space, 0, 0x100000); + + sysbus_init_mmio(sbd, &s->pci_io_space); + + /* Create the alias regions corresponding to our three windows onto + * PCI memory space. The sizes vary from board to board; the base + * offsets are guest controllable via the IMAP registers. + */ + for (i = 0; i < 3; i++) { + memory_region_init_alias(&s->pci_mem_window[i], OBJECT(s), "pci-vbp-window", + &s->pci_mem_space, 0, s->mem_win_size[i]); + sysbus_init_mmio(sbd, &s->pci_mem_window[i]); + } + + /* TODO Remove once realize propagates to child devices. */ + qdev_realize(DEVICE(&s->pci_dev), BUS(&s->pci_bus), errp); +} + +static void versatile_pci_host_realize(PCIDevice *d, Error **errp) +{ + pci_set_word(d->config + PCI_STATUS, + PCI_STATUS_66MHZ | PCI_STATUS_DEVSEL_MEDIUM); + pci_set_byte(d->config + PCI_LATENCY_TIMER, 0x10); +} + +static void versatile_pci_host_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = versatile_pci_host_realize; + k->vendor_id = PCI_VENDOR_ID_XILINX; + k->device_id = PCI_DEVICE_ID_XILINX_XC2VP30; + k->class_id = PCI_CLASS_PROCESSOR_CO; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo versatile_pci_host_info = { + .name = TYPE_VERSATILE_PCI_HOST, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = versatile_pci_host_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static Property pci_vpb_properties[] = { + DEFINE_PROP_UINT8("broken-irq-mapping", PCIVPBState, irq_mapping_prop, + PCI_VPB_IRQMAP_ASSUME_OK), + DEFINE_PROP_END_OF_LIST() +}; + +static void pci_vpb_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pci_vpb_realize; + dc->reset = pci_vpb_reset; + dc->vmsd = &pci_vpb_vmstate; + device_class_set_props(dc, pci_vpb_properties); +} + +static const TypeInfo pci_vpb_info = { + .name = TYPE_VERSATILE_PCI, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(PCIVPBState), + .instance_init = pci_vpb_init, + .class_init = pci_vpb_class_init, +}; + +static void pci_realview_init(Object *obj) +{ + PCIVPBState *s = PCI_VPB(obj); + + s->realview = 1; + /* The PCI window sizes are different on Realview boards */ + s->mem_win_size[0] = 0x01000000; + s->mem_win_size[1] = 0x04000000; + s->mem_win_size[2] = 0x08000000; +} + +static const TypeInfo pci_realview_info = { + .name = "realview_pci", + .parent = TYPE_VERSATILE_PCI, + .instance_init = pci_realview_init, +}; + +static void versatile_pci_register_types(void) +{ + type_register_static(&pci_vpb_info); + type_register_static(&pci_realview_info); + type_register_static(&versatile_pci_host_info); +} + +type_init(versatile_pci_register_types) diff --git a/hw/pci-host/xen_igd_pt.c b/hw/pci-host/xen_igd_pt.c new file mode 100644 index 000000000..d094b675d --- /dev/null +++ b/hw/pci-host/xen_igd_pt.c @@ -0,0 +1,119 @@ +/* + * QEMU Intel IGD Passthrough Host Bridge Emulation + * + * Copyright (c) 2006 Fabrice Bellard + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/pci-host/i440fx.h" +#include "qapi/error.h" + +typedef struct { + uint8_t offset; + uint8_t len; +} IGDHostInfo; + +/* Here we just expose minimal host bridge offset subset. */ +static const IGDHostInfo igd_host_bridge_infos[] = { + {PCI_REVISION_ID, 2}, + {PCI_SUBSYSTEM_VENDOR_ID, 2}, + {PCI_SUBSYSTEM_ID, 2}, + {0x50, 2}, /* SNB: processor graphics control register */ + {0x52, 2}, /* processor graphics control register */ + {0xa4, 4}, /* SNB: graphics base of stolen memory */ + {0xa8, 4}, /* SNB: base of GTT stolen memory */ +}; + +static void host_pci_config_read(int pos, int len, uint32_t *val, Error **errp) +{ + int rc, config_fd; + /* Access real host bridge. */ + char *path = g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%d/%s", + 0, 0, 0, 0, "config"); + + config_fd = open(path, O_RDWR); + if (config_fd < 0) { + error_setg_errno(errp, errno, "Failed to open: %s", path); + goto out; + } + + if (lseek(config_fd, pos, SEEK_SET) != pos) { + error_setg_errno(errp, errno, "Failed to seek: %s", path); + goto out_close_fd; + } + + do { + rc = read(config_fd, (uint8_t *)val, len); + } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); + if (rc != len) { + error_setg_errno(errp, errno, "Failed to read: %s", path); + } + + out_close_fd: + close(config_fd); + out: + g_free(path); +} + +static void igd_pt_i440fx_realize(PCIDevice *pci_dev, Error **errp) +{ + ERRP_GUARD(); + uint32_t val = 0; + size_t i; + int pos, len; + + for (i = 0; i < ARRAY_SIZE(igd_host_bridge_infos); i++) { + pos = igd_host_bridge_infos[i].offset; + len = igd_host_bridge_infos[i].len; + host_pci_config_read(pos, len, &val, errp); + if (*errp) { + return; + } + pci_default_write_config(pci_dev, pos, val, len); + } +} + +static void igd_passthrough_i440fx_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = igd_pt_i440fx_realize; + dc->desc = "IGD Passthrough Host bridge"; +} + +static const TypeInfo igd_passthrough_i440fx_info = { + .name = TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE, + .parent = TYPE_I440FX_PCI_DEVICE, + .instance_size = sizeof(PCII440FXState), + .class_init = igd_passthrough_i440fx_class_init, +}; + +static void igd_pt_i440fx_register_types(void) +{ + type_register_static(&igd_passthrough_i440fx_info); +} + +type_init(igd_pt_i440fx_register_types) diff --git a/hw/pci-host/xilinx-pcie.c b/hw/pci-host/xilinx-pcie.c new file mode 100644 index 000000000..38d5901a4 --- /dev/null +++ b/hw/pci-host/xilinx-pcie.c @@ -0,0 +1,331 @@ +/* + * Xilinx PCIe host controller emulation. + * + * Copyright (c) 2016 Imagination Technologies + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/pci/pci_bridge.h" +#include "hw/qdev-properties.h" +#include "hw/irq.h" +#include "hw/pci-host/xilinx-pcie.h" + +enum root_cfg_reg { + /* Interrupt Decode Register */ + ROOTCFG_INTDEC = 0x138, + + /* Interrupt Mask Register */ + ROOTCFG_INTMASK = 0x13c, + /* INTx Interrupt Received */ +#define ROOTCFG_INTMASK_INTX (1 << 16) + /* MSI Interrupt Received */ +#define ROOTCFG_INTMASK_MSI (1 << 17) + + /* PHY Status/Control Register */ + ROOTCFG_PSCR = 0x144, + /* Link Up */ +#define ROOTCFG_PSCR_LINK_UP (1 << 11) + + /* Root Port Status/Control Register */ + ROOTCFG_RPSCR = 0x148, + /* Bridge Enable */ +#define ROOTCFG_RPSCR_BRIDGEEN (1 << 0) + /* Interrupt FIFO Not Empty */ +#define ROOTCFG_RPSCR_INTNEMPTY (1 << 18) + /* Interrupt FIFO Overflow */ +#define ROOTCFG_RPSCR_INTOVF (1 << 19) + + /* Root Port Interrupt FIFO Read Register 1 */ + ROOTCFG_RPIFR1 = 0x158, +#define ROOTCFG_RPIFR1_INT_LANE_SHIFT 27 +#define ROOTCFG_RPIFR1_INT_ASSERT_SHIFT 29 +#define ROOTCFG_RPIFR1_INT_VALID_SHIFT 31 + /* Root Port Interrupt FIFO Read Register 2 */ + ROOTCFG_RPIFR2 = 0x15c, +}; + +static void xilinx_pcie_update_intr(XilinxPCIEHost *s, + uint32_t set, uint32_t clear) +{ + int level; + + s->intr |= set; + s->intr &= ~clear; + + if (s->intr_fifo_r != s->intr_fifo_w) { + s->intr |= ROOTCFG_INTMASK_INTX; + } + + level = !!(s->intr & s->intr_mask); + qemu_set_irq(s->irq, level); +} + +static void xilinx_pcie_queue_intr(XilinxPCIEHost *s, + uint32_t fifo_reg1, uint32_t fifo_reg2) +{ + XilinxPCIEInt *intr; + unsigned int new_w; + + new_w = (s->intr_fifo_w + 1) % ARRAY_SIZE(s->intr_fifo); + if (new_w == s->intr_fifo_r) { + s->rpscr |= ROOTCFG_RPSCR_INTOVF; + return; + } + + intr = &s->intr_fifo[s->intr_fifo_w]; + s->intr_fifo_w = new_w; + + intr->fifo_reg1 = fifo_reg1; + intr->fifo_reg2 = fifo_reg2; + + xilinx_pcie_update_intr(s, ROOTCFG_INTMASK_INTX, 0); +} + +static void xilinx_pcie_set_irq(void *opaque, int irq_num, int level) +{ + XilinxPCIEHost *s = XILINX_PCIE_HOST(opaque); + + xilinx_pcie_queue_intr(s, + (irq_num << ROOTCFG_RPIFR1_INT_LANE_SHIFT) | + (level << ROOTCFG_RPIFR1_INT_ASSERT_SHIFT) | + (1 << ROOTCFG_RPIFR1_INT_VALID_SHIFT), + 0); +} + +static void xilinx_pcie_host_realize(DeviceState *dev, Error **errp) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + XilinxPCIEHost *s = XILINX_PCIE_HOST(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + PCIExpressHost *pex = PCIE_HOST_BRIDGE(dev); + + snprintf(s->name, sizeof(s->name), "pcie%u", s->bus_nr); + + /* PCI configuration space */ + pcie_host_mmcfg_init(pex, s->cfg_size); + + /* MMIO region */ + memory_region_init(&s->mmio, OBJECT(s), "mmio", UINT64_MAX); + memory_region_set_enabled(&s->mmio, false); + + /* dummy PCI I/O region (not visible to the CPU) */ + memory_region_init(&s->io, OBJECT(s), "io", 16); + + /* interrupt out */ + qdev_init_gpio_out_named(dev, &s->irq, "interrupt_out", 1); + + sysbus_init_mmio(sbd, &pex->mmio); + sysbus_init_mmio(sbd, &s->mmio); + + pci->bus = pci_register_root_bus(dev, s->name, xilinx_pcie_set_irq, + pci_swizzle_map_irq_fn, s, &s->mmio, + &s->io, 0, 4, TYPE_PCIE_BUS); + + qdev_realize(DEVICE(&s->root), BUS(pci->bus), &error_fatal); +} + +static const char *xilinx_pcie_host_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + return "0000:00"; +} + +static void xilinx_pcie_host_init(Object *obj) +{ + XilinxPCIEHost *s = XILINX_PCIE_HOST(obj); + XilinxPCIERoot *root = &s->root; + + object_initialize_child(obj, "root", root, TYPE_XILINX_PCIE_ROOT); + qdev_prop_set_int32(DEVICE(root), "addr", PCI_DEVFN(0, 0)); + qdev_prop_set_bit(DEVICE(root), "multifunction", false); +} + +static Property xilinx_pcie_host_props[] = { + DEFINE_PROP_UINT32("bus_nr", XilinxPCIEHost, bus_nr, 0), + DEFINE_PROP_SIZE("cfg_base", XilinxPCIEHost, cfg_base, 0), + DEFINE_PROP_SIZE("cfg_size", XilinxPCIEHost, cfg_size, 32 * MiB), + DEFINE_PROP_SIZE("mmio_base", XilinxPCIEHost, mmio_base, 0), + DEFINE_PROP_SIZE("mmio_size", XilinxPCIEHost, mmio_size, 1 * MiB), + DEFINE_PROP_BOOL("link_up", XilinxPCIEHost, link_up, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xilinx_pcie_host_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + + hc->root_bus_path = xilinx_pcie_host_root_bus_path; + dc->realize = xilinx_pcie_host_realize; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->fw_name = "pci"; + device_class_set_props(dc, xilinx_pcie_host_props); +} + +static const TypeInfo xilinx_pcie_host_info = { + .name = TYPE_XILINX_PCIE_HOST, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_size = sizeof(XilinxPCIEHost), + .instance_init = xilinx_pcie_host_init, + .class_init = xilinx_pcie_host_class_init, +}; + +static uint32_t xilinx_pcie_root_config_read(PCIDevice *d, + uint32_t address, int len) +{ + XilinxPCIEHost *s = XILINX_PCIE_HOST(OBJECT(d)->parent); + uint32_t val; + + switch (address) { + case ROOTCFG_INTDEC: + val = s->intr; + break; + case ROOTCFG_INTMASK: + val = s->intr_mask; + break; + case ROOTCFG_PSCR: + val = s->link_up ? ROOTCFG_PSCR_LINK_UP : 0; + break; + case ROOTCFG_RPSCR: + if (s->intr_fifo_r != s->intr_fifo_w) { + s->rpscr &= ~ROOTCFG_RPSCR_INTNEMPTY; + } else { + s->rpscr |= ROOTCFG_RPSCR_INTNEMPTY; + } + val = s->rpscr; + break; + case ROOTCFG_RPIFR1: + if (s->intr_fifo_w == s->intr_fifo_r) { + /* FIFO empty */ + val = 0; + } else { + val = s->intr_fifo[s->intr_fifo_r].fifo_reg1; + } + break; + case ROOTCFG_RPIFR2: + if (s->intr_fifo_w == s->intr_fifo_r) { + /* FIFO empty */ + val = 0; + } else { + val = s->intr_fifo[s->intr_fifo_r].fifo_reg2; + } + break; + default: + val = pci_default_read_config(d, address, len); + break; + } + return val; +} + +static void xilinx_pcie_root_config_write(PCIDevice *d, uint32_t address, + uint32_t val, int len) +{ + XilinxPCIEHost *s = XILINX_PCIE_HOST(OBJECT(d)->parent); + switch (address) { + case ROOTCFG_INTDEC: + xilinx_pcie_update_intr(s, 0, val); + break; + case ROOTCFG_INTMASK: + s->intr_mask = val; + xilinx_pcie_update_intr(s, 0, 0); + break; + case ROOTCFG_RPSCR: + s->rpscr &= ~ROOTCFG_RPSCR_BRIDGEEN; + s->rpscr |= val & ROOTCFG_RPSCR_BRIDGEEN; + memory_region_set_enabled(&s->mmio, val & ROOTCFG_RPSCR_BRIDGEEN); + + if (val & ROOTCFG_INTMASK_INTX) { + s->rpscr &= ~ROOTCFG_INTMASK_INTX; + } + break; + case ROOTCFG_RPIFR1: + case ROOTCFG_RPIFR2: + if (s->intr_fifo_w == s->intr_fifo_r) { + /* FIFO empty */ + return; + } else { + s->intr_fifo_r = (s->intr_fifo_r + 1) % ARRAY_SIZE(s->intr_fifo); + } + break; + default: + pci_default_write_config(d, address, val, len); + break; + } +} + +static void xilinx_pcie_root_realize(PCIDevice *pci_dev, Error **errp) +{ + BusState *bus = qdev_get_parent_bus(DEVICE(pci_dev)); + XilinxPCIEHost *s = XILINX_PCIE_HOST(bus->parent); + + pci_set_word(pci_dev->config + PCI_COMMAND, + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_set_word(pci_dev->config + PCI_MEMORY_BASE, s->mmio_base >> 16); + pci_set_word(pci_dev->config + PCI_MEMORY_LIMIT, + ((s->mmio_base + s->mmio_size - 1) >> 16) & 0xfff0); + + pci_bridge_initfn(pci_dev, TYPE_PCI_BUS); + + if (pcie_endpoint_cap_v1_init(pci_dev, 0x80) < 0) { + error_setg(errp, "Failed to initialize PCIe capability"); + } +} + +static void xilinx_pcie_root_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->desc = "Xilinx AXI-PCIe Host Bridge"; + k->vendor_id = PCI_VENDOR_ID_XILINX; + k->device_id = 0x7021; + k->revision = 0; + k->class_id = PCI_CLASS_BRIDGE_HOST; + k->is_bridge = true; + k->realize = xilinx_pcie_root_realize; + k->exit = pci_bridge_exitfn; + dc->reset = pci_bridge_reset; + k->config_read = xilinx_pcie_root_config_read; + k->config_write = xilinx_pcie_root_config_write; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo xilinx_pcie_root_info = { + .name = TYPE_XILINX_PCIE_ROOT, + .parent = TYPE_PCI_BRIDGE, + .instance_size = sizeof(XilinxPCIERoot), + .class_init = xilinx_pcie_root_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void xilinx_pcie_register(void) +{ + type_register_static(&xilinx_pcie_root_info); + type_register_static(&xilinx_pcie_host_info); +} + +type_init(xilinx_pcie_register) diff --git a/hw/pci/Kconfig b/hw/pci/Kconfig new file mode 100644 index 000000000..77f8b005f --- /dev/null +++ b/hw/pci/Kconfig @@ -0,0 +1,15 @@ +config PCI + bool + +config PCI_EXPRESS + bool + select PCI + +config PCI_DEVICES + bool + +config MSI_NONBROKEN + # selected by interrupt controllers that do not support MSI, + # or support it and have a good implementation. See commit + # 47d2b0f33c664533b8dbd5cb17faa8e6a01afe1f. + bool diff --git a/hw/pci/meson.build b/hw/pci/meson.build new file mode 100644 index 000000000..5c4bbac81 --- /dev/null +++ b/hw/pci/meson.build @@ -0,0 +1,19 @@ +pci_ss = ss.source_set() +pci_ss.add(files( + 'msi.c', + 'msix.c', + 'pci.c', + 'pci_bridge.c', + 'pci_host.c', + 'shpc.c', + 'slotid_cap.c' +)) +# The functions in these modules can be used by devices too. Since we +# allow plugging PCIe devices into PCI buses, include them even if +# CONFIG_PCI_EXPRESS=n. +pci_ss.add(files('pcie.c', 'pcie_aer.c')) +softmmu_ss.add(when: 'CONFIG_PCI_EXPRESS', if_true: files('pcie_port.c', 'pcie_host.c')) +softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) + +softmmu_ss.add(when: 'CONFIG_PCI', if_false: files('pci-stub.c')) +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('pci-stub.c')) diff --git a/hw/pci/msi.c b/hw/pci/msi.c new file mode 100644 index 000000000..47d2b0f33 --- /dev/null +++ b/hw/pci/msi.c @@ -0,0 +1,442 @@ +/* + * msi.c + * + * Copyright (c) 2010 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/msi.h" +#include "hw/xen/xen.h" +#include "qemu/range.h" +#include "qapi/error.h" + +/* PCI_MSI_ADDRESS_LO */ +#define PCI_MSI_ADDRESS_LO_MASK (~0x3) + +/* If we get rid of cap allocator, we won't need those. */ +#define PCI_MSI_32_SIZEOF 0x0a +#define PCI_MSI_64_SIZEOF 0x0e +#define PCI_MSI_32M_SIZEOF 0x14 +#define PCI_MSI_64M_SIZEOF 0x18 + +#define PCI_MSI_VECTORS_MAX 32 + +/* + * Flag for interrupt controllers to declare broken MSI/MSI-X support. + * values: false - broken; true - non-broken. + * + * Setting this flag to false will remove MSI/MSI-X capability from all devices. + * + * It is preferable for controllers to set this to true (non-broken) even if + * they do not actually support MSI/MSI-X: guests normally probe the controller + * type and do not attempt to enable MSI/MSI-X with interrupt controllers not + * supporting such, so removing the capability is not required, and + * it seems cleaner to have a given device look the same for all boards. + * + * TODO: some existing controllers violate the above rule. Identify and fix them. + */ +bool msi_nonbroken; + +/* If we get rid of cap allocator, we won't need this. */ +static inline uint8_t msi_cap_sizeof(uint16_t flags) +{ + switch (flags & (PCI_MSI_FLAGS_MASKBIT | PCI_MSI_FLAGS_64BIT)) { + case PCI_MSI_FLAGS_MASKBIT | PCI_MSI_FLAGS_64BIT: + return PCI_MSI_64M_SIZEOF; + case PCI_MSI_FLAGS_64BIT: + return PCI_MSI_64_SIZEOF; + case PCI_MSI_FLAGS_MASKBIT: + return PCI_MSI_32M_SIZEOF; + case 0: + return PCI_MSI_32_SIZEOF; + default: + abort(); + break; + } + return 0; +} + +//#define MSI_DEBUG + +#ifdef MSI_DEBUG +# define MSI_DPRINTF(fmt, ...) \ + fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__) +#else +# define MSI_DPRINTF(fmt, ...) do { } while (0) +#endif +#define MSI_DEV_PRINTF(dev, fmt, ...) \ + MSI_DPRINTF("%s:%x " fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__) + +static inline unsigned int msi_nr_vectors(uint16_t flags) +{ + return 1U << + ((flags & PCI_MSI_FLAGS_QSIZE) >> ctz32(PCI_MSI_FLAGS_QSIZE)); +} + +static inline uint8_t msi_flags_off(const PCIDevice* dev) +{ + return dev->msi_cap + PCI_MSI_FLAGS; +} + +static inline uint8_t msi_address_lo_off(const PCIDevice* dev) +{ + return dev->msi_cap + PCI_MSI_ADDRESS_LO; +} + +static inline uint8_t msi_address_hi_off(const PCIDevice* dev) +{ + return dev->msi_cap + PCI_MSI_ADDRESS_HI; +} + +static inline uint8_t msi_data_off(const PCIDevice* dev, bool msi64bit) +{ + return dev->msi_cap + (msi64bit ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32); +} + +static inline uint8_t msi_mask_off(const PCIDevice* dev, bool msi64bit) +{ + return dev->msi_cap + (msi64bit ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32); +} + +static inline uint8_t msi_pending_off(const PCIDevice* dev, bool msi64bit) +{ + return dev->msi_cap + (msi64bit ? PCI_MSI_PENDING_64 : PCI_MSI_PENDING_32); +} + +/* + * Special API for POWER to configure the vectors through + * a side channel. Should never be used by devices. + */ +void msi_set_message(PCIDevice *dev, MSIMessage msg) +{ + uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); + bool msi64bit = flags & PCI_MSI_FLAGS_64BIT; + + if (msi64bit) { + pci_set_quad(dev->config + msi_address_lo_off(dev), msg.address); + } else { + pci_set_long(dev->config + msi_address_lo_off(dev), msg.address); + } + pci_set_word(dev->config + msi_data_off(dev, msi64bit), msg.data); +} + +MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector) +{ + uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); + bool msi64bit = flags & PCI_MSI_FLAGS_64BIT; + unsigned int nr_vectors = msi_nr_vectors(flags); + MSIMessage msg; + + assert(vector < nr_vectors); + + if (msi64bit) { + msg.address = pci_get_quad(dev->config + msi_address_lo_off(dev)); + } else { + msg.address = pci_get_long(dev->config + msi_address_lo_off(dev)); + } + + /* upper bit 31:16 is zero */ + msg.data = pci_get_word(dev->config + msi_data_off(dev, msi64bit)); + if (nr_vectors > 1) { + msg.data &= ~(nr_vectors - 1); + msg.data |= vector; + } + + return msg; +} + +bool msi_enabled(const PCIDevice *dev) +{ + return msi_present(dev) && + (pci_get_word(dev->config + msi_flags_off(dev)) & + PCI_MSI_FLAGS_ENABLE); +} + +/* + * Make PCI device @dev MSI-capable. + * Non-zero @offset puts capability MSI at that offset in PCI config + * space. + * @nr_vectors is the number of MSI vectors (1, 2, 4, 8, 16 or 32). + * If @msi64bit, make the device capable of sending a 64-bit message + * address. + * If @msi_per_vector_mask, make the device support per-vector masking. + * @errp is for returning errors. + * Return 0 on success; set @errp and return -errno on error. + * + * -ENOTSUP means lacking msi support for a msi-capable platform. + * -EINVAL means capability overlap, happens when @offset is non-zero, + * also means a programming error, except device assignment, which can check + * if a real HW is broken. + */ +int msi_init(struct PCIDevice *dev, uint8_t offset, + unsigned int nr_vectors, bool msi64bit, + bool msi_per_vector_mask, Error **errp) +{ + unsigned int vectors_order; + uint16_t flags; + uint8_t cap_size; + int config_offset; + + if (!msi_nonbroken) { + error_setg(errp, "MSI is not supported by interrupt controller"); + return -ENOTSUP; + } + + MSI_DEV_PRINTF(dev, + "init offset: 0x%"PRIx8" vector: %"PRId8 + " 64bit %d mask %d\n", + offset, nr_vectors, msi64bit, msi_per_vector_mask); + + assert(!(nr_vectors & (nr_vectors - 1))); /* power of 2 */ + assert(nr_vectors > 0); + assert(nr_vectors <= PCI_MSI_VECTORS_MAX); + /* the nr of MSI vectors is up to 32 */ + vectors_order = ctz32(nr_vectors); + + flags = vectors_order << ctz32(PCI_MSI_FLAGS_QMASK); + if (msi64bit) { + flags |= PCI_MSI_FLAGS_64BIT; + } + if (msi_per_vector_mask) { + flags |= PCI_MSI_FLAGS_MASKBIT; + } + + cap_size = msi_cap_sizeof(flags); + config_offset = pci_add_capability(dev, PCI_CAP_ID_MSI, offset, + cap_size, errp); + if (config_offset < 0) { + return config_offset; + } + + dev->msi_cap = config_offset; + dev->cap_present |= QEMU_PCI_CAP_MSI; + + pci_set_word(dev->config + msi_flags_off(dev), flags); + pci_set_word(dev->wmask + msi_flags_off(dev), + PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE); + pci_set_long(dev->wmask + msi_address_lo_off(dev), + PCI_MSI_ADDRESS_LO_MASK); + if (msi64bit) { + pci_set_long(dev->wmask + msi_address_hi_off(dev), 0xffffffff); + } + pci_set_word(dev->wmask + msi_data_off(dev, msi64bit), 0xffff); + + if (msi_per_vector_mask) { + /* Make mask bits 0 to nr_vectors - 1 writable. */ + pci_set_long(dev->wmask + msi_mask_off(dev, msi64bit), + 0xffffffff >> (PCI_MSI_VECTORS_MAX - nr_vectors)); + } + + return 0; +} + +void msi_uninit(struct PCIDevice *dev) +{ + uint16_t flags; + uint8_t cap_size; + + if (!msi_present(dev)) { + return; + } + flags = pci_get_word(dev->config + msi_flags_off(dev)); + cap_size = msi_cap_sizeof(flags); + pci_del_capability(dev, PCI_CAP_ID_MSI, cap_size); + dev->cap_present &= ~QEMU_PCI_CAP_MSI; + + MSI_DEV_PRINTF(dev, "uninit\n"); +} + +void msi_reset(PCIDevice *dev) +{ + uint16_t flags; + bool msi64bit; + + if (!msi_present(dev)) { + return; + } + + flags = pci_get_word(dev->config + msi_flags_off(dev)); + flags &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE); + msi64bit = flags & PCI_MSI_FLAGS_64BIT; + + pci_set_word(dev->config + msi_flags_off(dev), flags); + pci_set_long(dev->config + msi_address_lo_off(dev), 0); + if (msi64bit) { + pci_set_long(dev->config + msi_address_hi_off(dev), 0); + } + pci_set_word(dev->config + msi_data_off(dev, msi64bit), 0); + if (flags & PCI_MSI_FLAGS_MASKBIT) { + pci_set_long(dev->config + msi_mask_off(dev, msi64bit), 0); + pci_set_long(dev->config + msi_pending_off(dev, msi64bit), 0); + } + MSI_DEV_PRINTF(dev, "reset\n"); +} + +bool msi_is_masked(const PCIDevice *dev, unsigned int vector) +{ + uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); + uint32_t mask, data; + bool msi64bit = flags & PCI_MSI_FLAGS_64BIT; + assert(vector < PCI_MSI_VECTORS_MAX); + + if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { + return false; + } + + data = pci_get_word(dev->config + msi_data_off(dev, msi64bit)); + if (xen_is_pirq_msi(data)) { + return false; + } + + mask = pci_get_long(dev->config + + msi_mask_off(dev, flags & PCI_MSI_FLAGS_64BIT)); + return mask & (1U << vector); +} + +void msi_notify(PCIDevice *dev, unsigned int vector) +{ + uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); + bool msi64bit = flags & PCI_MSI_FLAGS_64BIT; + unsigned int nr_vectors = msi_nr_vectors(flags); + MSIMessage msg; + + assert(vector < nr_vectors); + if (msi_is_masked(dev, vector)) { + assert(flags & PCI_MSI_FLAGS_MASKBIT); + pci_long_test_and_set_mask( + dev->config + msi_pending_off(dev, msi64bit), 1U << vector); + MSI_DEV_PRINTF(dev, "pending vector 0x%x\n", vector); + return; + } + + msg = msi_get_message(dev, vector); + + MSI_DEV_PRINTF(dev, + "notify vector 0x%x" + " address: 0x%"PRIx64" data: 0x%"PRIx32"\n", + vector, msg.address, msg.data); + msi_send_message(dev, msg); +} + +void msi_send_message(PCIDevice *dev, MSIMessage msg) +{ + MemTxAttrs attrs = {}; + + attrs.requester_id = pci_requester_id(dev); + address_space_stl_le(&dev->bus_master_as, msg.address, msg.data, + attrs, NULL); +} + +/* Normally called by pci_default_write_config(). */ +void msi_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) +{ + uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); + bool msi64bit = flags & PCI_MSI_FLAGS_64BIT; + bool msi_per_vector_mask = flags & PCI_MSI_FLAGS_MASKBIT; + unsigned int nr_vectors; + uint8_t log_num_vecs; + uint8_t log_max_vecs; + unsigned int vector; + uint32_t pending; + + if (!msi_present(dev) || + !ranges_overlap(addr, len, dev->msi_cap, msi_cap_sizeof(flags))) { + return; + } + +#ifdef MSI_DEBUG + MSI_DEV_PRINTF(dev, "addr 0x%"PRIx32" val 0x%"PRIx32" len %d\n", + addr, val, len); + MSI_DEV_PRINTF(dev, "ctrl: 0x%"PRIx16" address: 0x%"PRIx32, + flags, + pci_get_long(dev->config + msi_address_lo_off(dev))); + if (msi64bit) { + fprintf(stderr, " address-hi: 0x%"PRIx32, + pci_get_long(dev->config + msi_address_hi_off(dev))); + } + fprintf(stderr, " data: 0x%"PRIx16, + pci_get_word(dev->config + msi_data_off(dev, msi64bit))); + if (flags & PCI_MSI_FLAGS_MASKBIT) { + fprintf(stderr, " mask 0x%"PRIx32" pending 0x%"PRIx32, + pci_get_long(dev->config + msi_mask_off(dev, msi64bit)), + pci_get_long(dev->config + msi_pending_off(dev, msi64bit))); + } + fprintf(stderr, "\n"); +#endif + + if (!(flags & PCI_MSI_FLAGS_ENABLE)) { + return; + } + + /* + * Now MSI is enabled, clear INTx# interrupts. + * the driver is prohibited from writing enable bit to mask + * a service request. But the guest OS could do this. + * So we just discard the interrupts as moderate fallback. + * + * 6.8.3.3. Enabling Operation + * While enabled for MSI or MSI-X operation, a function is prohibited + * from using its INTx# pin (if implemented) to request + * service (MSI, MSI-X, and INTx# are mutually exclusive). + */ + pci_device_deassert_intx(dev); + + /* + * nr_vectors might be set bigger than capable. So clamp it. + * This is not legal by spec, so we can do anything we like, + * just don't crash the host + */ + log_num_vecs = + (flags & PCI_MSI_FLAGS_QSIZE) >> ctz32(PCI_MSI_FLAGS_QSIZE); + log_max_vecs = + (flags & PCI_MSI_FLAGS_QMASK) >> ctz32(PCI_MSI_FLAGS_QMASK); + if (log_num_vecs > log_max_vecs) { + flags &= ~PCI_MSI_FLAGS_QSIZE; + flags |= log_max_vecs << ctz32(PCI_MSI_FLAGS_QSIZE); + pci_set_word(dev->config + msi_flags_off(dev), flags); + } + + if (!msi_per_vector_mask) { + /* if per vector masking isn't supported, + there is no pending interrupt. */ + return; + } + + nr_vectors = msi_nr_vectors(flags); + + /* This will discard pending interrupts, if any. */ + pending = pci_get_long(dev->config + msi_pending_off(dev, msi64bit)); + pending &= 0xffffffff >> (PCI_MSI_VECTORS_MAX - nr_vectors); + pci_set_long(dev->config + msi_pending_off(dev, msi64bit), pending); + + /* deliver pending interrupts which are unmasked */ + for (vector = 0; vector < nr_vectors; ++vector) { + if (msi_is_masked(dev, vector) || !(pending & (1U << vector))) { + continue; + } + + pci_long_test_and_clear_mask( + dev->config + msi_pending_off(dev, msi64bit), 1U << vector); + msi_notify(dev, vector); + } +} + +unsigned int msi_nr_vectors_allocated(const PCIDevice *dev) +{ + uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); + return msi_nr_vectors(flags); +} diff --git a/hw/pci/msix.c b/hw/pci/msix.c new file mode 100644 index 000000000..ae9331cd0 --- /dev/null +++ b/hw/pci/msix.c @@ -0,0 +1,671 @@ +/* + * MSI-X device support + * + * This module includes support for MSI-X in pci devices. + * + * Author: Michael S. Tsirkin + * + * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com) + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/pci/pci.h" +#include "hw/xen/xen.h" +#include "sysemu/xen.h" +#include "migration/qemu-file-types.h" +#include "migration/vmstate.h" +#include "qemu/range.h" +#include "qapi/error.h" +#include "trace.h" + +/* MSI enable bit and maskall bit are in byte 1 in FLAGS register */ +#define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1) +#define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8) +#define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8) + +MSIMessage msix_get_message(PCIDevice *dev, unsigned vector) +{ + uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE; + MSIMessage msg; + + msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR); + msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA); + return msg; +} + +/* + * Special API for POWER to configure the vectors through + * a side channel. Should never be used by devices. + */ +void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg) +{ + uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE; + + pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address); + pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data); + table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; +} + +static uint8_t msix_pending_mask(int vector) +{ + return 1 << (vector % 8); +} + +static uint8_t *msix_pending_byte(PCIDevice *dev, int vector) +{ + return dev->msix_pba + vector / 8; +} + +static int msix_is_pending(PCIDevice *dev, int vector) +{ + return *msix_pending_byte(dev, vector) & msix_pending_mask(vector); +} + +void msix_set_pending(PCIDevice *dev, unsigned int vector) +{ + *msix_pending_byte(dev, vector) |= msix_pending_mask(vector); +} + +void msix_clr_pending(PCIDevice *dev, int vector) +{ + *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector); +} + +static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask) +{ + unsigned offset = vector * PCI_MSIX_ENTRY_SIZE; + uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA]; + /* MSIs on Xen can be remapped into pirqs. In those cases, masking + * and unmasking go through the PV evtchn path. */ + if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) { + return false; + } + return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] & + PCI_MSIX_ENTRY_CTRL_MASKBIT; +} + +bool msix_is_masked(PCIDevice *dev, unsigned int vector) +{ + return msix_vector_masked(dev, vector, dev->msix_function_masked); +} + +static void msix_fire_vector_notifier(PCIDevice *dev, + unsigned int vector, bool is_masked) +{ + MSIMessage msg; + int ret; + + if (!dev->msix_vector_use_notifier) { + return; + } + if (is_masked) { + dev->msix_vector_release_notifier(dev, vector); + } else { + msg = msix_get_message(dev, vector); + ret = dev->msix_vector_use_notifier(dev, vector, msg); + assert(ret >= 0); + } +} + +static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked) +{ + bool is_masked = msix_is_masked(dev, vector); + + if (is_masked == was_masked) { + return; + } + + msix_fire_vector_notifier(dev, vector, is_masked); + + if (!is_masked && msix_is_pending(dev, vector)) { + msix_clr_pending(dev, vector); + msix_notify(dev, vector); + } +} + +static bool msix_masked(PCIDevice *dev) +{ + return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK; +} + +static void msix_update_function_masked(PCIDevice *dev) +{ + dev->msix_function_masked = !msix_enabled(dev) || msix_masked(dev); +} + +/* Handle MSI-X capability config write. */ +void msix_write_config(PCIDevice *dev, uint32_t addr, + uint32_t val, int len) +{ + unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET; + int vector; + bool was_masked; + + if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) { + return; + } + + trace_msix_write_config(dev->name, msix_enabled(dev), msix_masked(dev)); + + was_masked = dev->msix_function_masked; + msix_update_function_masked(dev); + + if (!msix_enabled(dev)) { + return; + } + + pci_device_deassert_intx(dev); + + if (dev->msix_function_masked == was_masked) { + return; + } + + for (vector = 0; vector < dev->msix_entries_nr; ++vector) { + msix_handle_mask_update(dev, vector, + msix_vector_masked(dev, vector, was_masked)); + } +} + +static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + PCIDevice *dev = opaque; + + assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE); + return pci_get_long(dev->msix_table + addr); +} + +static void msix_table_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PCIDevice *dev = opaque; + int vector = addr / PCI_MSIX_ENTRY_SIZE; + bool was_masked; + + assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE); + + was_masked = msix_is_masked(dev, vector); + pci_set_long(dev->msix_table + addr, val); + msix_handle_mask_update(dev, vector, was_masked); +} + +static const MemoryRegionOps msix_table_mmio_ops = { + .read = msix_table_mmio_read, + .write = msix_table_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .max_access_size = 4, + }, +}; + +static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + PCIDevice *dev = opaque; + if (dev->msix_vector_poll_notifier) { + unsigned vector_start = addr * 8; + unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr); + dev->msix_vector_poll_notifier(dev, vector_start, vector_end); + } + + return pci_get_long(dev->msix_pba + addr); +} + +static void msix_pba_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ +} + +static const MemoryRegionOps msix_pba_mmio_ops = { + .read = msix_pba_mmio_read, + .write = msix_pba_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .max_access_size = 4, + }, +}; + +static void msix_mask_all(struct PCIDevice *dev, unsigned nentries) +{ + int vector; + + for (vector = 0; vector < nentries; ++vector) { + unsigned offset = + vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; + bool was_masked = msix_is_masked(dev, vector); + + dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT; + msix_handle_mask_update(dev, vector, was_masked); + } +} + +/* + * Make PCI device @dev MSI-X capable + * @nentries is the max number of MSI-X vectors that the device support. + * @table_bar is the MemoryRegion that MSI-X table structure resides. + * @table_bar_nr is number of base address register corresponding to @table_bar. + * @table_offset indicates the offset that the MSI-X table structure starts with + * in @table_bar. + * @pba_bar is the MemoryRegion that the Pending Bit Array structure resides. + * @pba_bar_nr is number of base address register corresponding to @pba_bar. + * @pba_offset indicates the offset that the Pending Bit Array structure + * starts with in @pba_bar. + * Non-zero @cap_pos puts capability MSI-X at that offset in PCI config space. + * @errp is for returning errors. + * + * Return 0 on success; set @errp and return -errno on error: + * -ENOTSUP means lacking msi support for a msi-capable platform. + * -EINVAL means capability overlap, happens when @cap_pos is non-zero, + * also means a programming error, except device assignment, which can check + * if a real HW is broken. + */ +int msix_init(struct PCIDevice *dev, unsigned short nentries, + MemoryRegion *table_bar, uint8_t table_bar_nr, + unsigned table_offset, MemoryRegion *pba_bar, + uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos, + Error **errp) +{ + int cap; + unsigned table_size, pba_size; + uint8_t *config; + + /* Nothing to do if MSI is not supported by interrupt controller */ + if (!msi_nonbroken) { + error_setg(errp, "MSI-X is not supported by interrupt controller"); + return -ENOTSUP; + } + + if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) { + error_setg(errp, "The number of MSI-X vectors is invalid"); + return -EINVAL; + } + + table_size = nentries * PCI_MSIX_ENTRY_SIZE; + pba_size = QEMU_ALIGN_UP(nentries, 64) / 8; + + /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */ + if ((table_bar_nr == pba_bar_nr && + ranges_overlap(table_offset, table_size, pba_offset, pba_size)) || + table_offset + table_size > memory_region_size(table_bar) || + pba_offset + pba_size > memory_region_size(pba_bar) || + (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) { + error_setg(errp, "table & pba overlap, or they don't fit in BARs," + " or don't align"); + return -EINVAL; + } + + cap = pci_add_capability(dev, PCI_CAP_ID_MSIX, + cap_pos, MSIX_CAP_LENGTH, errp); + if (cap < 0) { + return cap; + } + + dev->msix_cap = cap; + dev->cap_present |= QEMU_PCI_CAP_MSIX; + config = dev->config + cap; + + pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1); + dev->msix_entries_nr = nentries; + dev->msix_function_masked = true; + + pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr); + pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr); + + /* Make flags bit writable. */ + dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | + MSIX_MASKALL_MASK; + + dev->msix_table = g_malloc0(table_size); + dev->msix_pba = g_malloc0(pba_size); + dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used); + + msix_mask_all(dev, nentries); + + memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev, + "msix-table", table_size); + memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio); + memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev, + "msix-pba", pba_size); + memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio); + + return 0; +} + +int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries, + uint8_t bar_nr, Error **errp) +{ + int ret; + char *name; + uint32_t bar_size = 4096; + uint32_t bar_pba_offset = bar_size / 2; + uint32_t bar_pba_size = QEMU_ALIGN_UP(nentries, 64) / 8; + + /* + * Migration compatibility dictates that this remains a 4k + * BAR with the vector table in the lower half and PBA in + * the upper half for nentries which is lower or equal to 128. + * No need to care about using more than 65 entries for legacy + * machine types who has at most 64 queues. + */ + if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) { + bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE; + } + + if (bar_pba_offset + bar_pba_size > 4096) { + bar_size = bar_pba_offset + bar_pba_size; + } + + bar_size = pow2ceil(bar_size); + + name = g_strdup_printf("%s-msix", dev->name); + memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size); + g_free(name); + + ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr, + 0, &dev->msix_exclusive_bar, + bar_nr, bar_pba_offset, + 0, errp); + if (ret) { + return ret; + } + + pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY, + &dev->msix_exclusive_bar); + + return 0; +} + +static void msix_free_irq_entries(PCIDevice *dev) +{ + int vector; + + for (vector = 0; vector < dev->msix_entries_nr; ++vector) { + dev->msix_entry_used[vector] = 0; + msix_clr_pending(dev, vector); + } +} + +static void msix_clear_all_vectors(PCIDevice *dev) +{ + int vector; + + for (vector = 0; vector < dev->msix_entries_nr; ++vector) { + msix_clr_pending(dev, vector); + } +} + +/* Clean up resources for the device. */ +void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar) +{ + if (!msix_present(dev)) { + return; + } + pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH); + dev->msix_cap = 0; + msix_free_irq_entries(dev); + dev->msix_entries_nr = 0; + memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio); + g_free(dev->msix_pba); + dev->msix_pba = NULL; + memory_region_del_subregion(table_bar, &dev->msix_table_mmio); + g_free(dev->msix_table); + dev->msix_table = NULL; + g_free(dev->msix_entry_used); + dev->msix_entry_used = NULL; + dev->cap_present &= ~QEMU_PCI_CAP_MSIX; +} + +void msix_uninit_exclusive_bar(PCIDevice *dev) +{ + if (msix_present(dev)) { + msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar); + } +} + +void msix_save(PCIDevice *dev, QEMUFile *f) +{ + unsigned n = dev->msix_entries_nr; + + if (!msix_present(dev)) { + return; + } + + qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE); + qemu_put_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8)); +} + +/* Should be called after restoring the config space. */ +void msix_load(PCIDevice *dev, QEMUFile *f) +{ + unsigned n = dev->msix_entries_nr; + unsigned int vector; + + if (!msix_present(dev)) { + return; + } + + msix_clear_all_vectors(dev); + qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE); + qemu_get_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8)); + msix_update_function_masked(dev); + + for (vector = 0; vector < n; vector++) { + msix_handle_mask_update(dev, vector, true); + } +} + +/* Does device support MSI-X? */ +int msix_present(PCIDevice *dev) +{ + return dev->cap_present & QEMU_PCI_CAP_MSIX; +} + +/* Is MSI-X enabled? */ +int msix_enabled(PCIDevice *dev) +{ + return (dev->cap_present & QEMU_PCI_CAP_MSIX) && + (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & + MSIX_ENABLE_MASK); +} + +/* Send an MSI-X message */ +void msix_notify(PCIDevice *dev, unsigned vector) +{ + MSIMessage msg; + + if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { + return; + } + + if (msix_is_masked(dev, vector)) { + msix_set_pending(dev, vector); + return; + } + + msg = msix_get_message(dev, vector); + + msi_send_message(dev, msg); +} + +void msix_reset(PCIDevice *dev) +{ + if (!msix_present(dev)) { + return; + } + msix_clear_all_vectors(dev); + dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &= + ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET]; + memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE); + memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8); + msix_mask_all(dev, dev->msix_entries_nr); +} + +/* PCI spec suggests that devices make it possible for software to configure + * less vectors than supported by the device, but does not specify a standard + * mechanism for devices to do so. + * + * We support this by asking devices to declare vectors software is going to + * actually use, and checking this on the notification path. Devices that + * don't want to follow the spec suggestion can declare all vectors as used. */ + +/* Mark vector as used. */ +int msix_vector_use(PCIDevice *dev, unsigned vector) +{ + if (vector >= dev->msix_entries_nr) { + return -EINVAL; + } + + dev->msix_entry_used[vector]++; + return 0; +} + +/* Mark vector as unused. */ +void msix_vector_unuse(PCIDevice *dev, unsigned vector) +{ + if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { + return; + } + if (--dev->msix_entry_used[vector]) { + return; + } + msix_clr_pending(dev, vector); +} + +void msix_unuse_all_vectors(PCIDevice *dev) +{ + if (!msix_present(dev)) { + return; + } + msix_free_irq_entries(dev); +} + +unsigned int msix_nr_vectors_allocated(const PCIDevice *dev) +{ + return dev->msix_entries_nr; +} + +static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector) +{ + MSIMessage msg; + + if (msix_is_masked(dev, vector)) { + return 0; + } + msg = msix_get_message(dev, vector); + return dev->msix_vector_use_notifier(dev, vector, msg); +} + +static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector) +{ + if (msix_is_masked(dev, vector)) { + return; + } + dev->msix_vector_release_notifier(dev, vector); +} + +int msix_set_vector_notifiers(PCIDevice *dev, + MSIVectorUseNotifier use_notifier, + MSIVectorReleaseNotifier release_notifier, + MSIVectorPollNotifier poll_notifier) +{ + int vector, ret; + + assert(use_notifier && release_notifier); + + dev->msix_vector_use_notifier = use_notifier; + dev->msix_vector_release_notifier = release_notifier; + dev->msix_vector_poll_notifier = poll_notifier; + + if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & + (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) { + for (vector = 0; vector < dev->msix_entries_nr; vector++) { + ret = msix_set_notifier_for_vector(dev, vector); + if (ret < 0) { + goto undo; + } + } + } + if (dev->msix_vector_poll_notifier) { + dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr); + } + return 0; + +undo: + while (--vector >= 0) { + msix_unset_notifier_for_vector(dev, vector); + } + dev->msix_vector_use_notifier = NULL; + dev->msix_vector_release_notifier = NULL; + return ret; +} + +void msix_unset_vector_notifiers(PCIDevice *dev) +{ + int vector; + + assert(dev->msix_vector_use_notifier && + dev->msix_vector_release_notifier); + + if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & + (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) { + for (vector = 0; vector < dev->msix_entries_nr; vector++) { + msix_unset_notifier_for_vector(dev, vector); + } + } + dev->msix_vector_use_notifier = NULL; + dev->msix_vector_release_notifier = NULL; + dev->msix_vector_poll_notifier = NULL; +} + +static int put_msix_state(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + msix_save(pv, f); + + return 0; +} + +static int get_msix_state(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + msix_load(pv, f); + return 0; +} + +static VMStateInfo vmstate_info_msix = { + .name = "msix state", + .get = get_msix_state, + .put = put_msix_state, +}; + +const VMStateDescription vmstate_msix = { + .name = "msix", + .fields = (VMStateField[]) { + { + .name = "msix", + .version_id = 0, + .field_exists = NULL, + .size = 0, /* ouch */ + .info = &vmstate_info_msix, + .flags = VMS_SINGLE, + .offset = 0, + }, + VMSTATE_END_OF_LIST() + } +}; diff --git a/hw/pci/pci-stub.c b/hw/pci/pci-stub.c new file mode 100644 index 000000000..3a027c42e --- /dev/null +++ b/hw/pci/pci-stub.c @@ -0,0 +1,93 @@ +/* + * PCI stubs for platforms that don't support pci bus. + * + * Copyright (c) 2010 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "monitor/monitor.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-pci.h" +#include "qapi/qmp/qerror.h" +#include "hw/pci/pci.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" + +bool msi_nonbroken; +bool pci_available; + +PciInfoList *qmp_query_pci(Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); + return NULL; +} + +void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict) +{ + monitor_printf(mon, "PCI devices not supported\n"); +} + +/* kvm-all wants this */ +MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) +{ + g_assert(false); + return (MSIMessage){}; +} + +uint16_t pci_requester_id(PCIDevice *dev) +{ + g_assert(false); + return 0; +} + +/* Required by ahci.c */ +bool msi_enabled(const PCIDevice *dev) +{ + return false; +} + +void msi_notify(PCIDevice *dev, unsigned int vector) +{ + g_assert_not_reached(); +} + +/* Required by target/i386/kvm.c */ +bool msi_is_masked(const PCIDevice *dev, unsigned vector) +{ + g_assert_not_reached(); +} + +MSIMessage msi_get_message(PCIDevice *dev, unsigned int vector) +{ + g_assert_not_reached(); +} + +int msix_enabled(PCIDevice *dev) +{ + return false; +} + +bool msix_is_masked(PCIDevice *dev, unsigned vector) +{ + g_assert_not_reached(); +} + +MSIMessage msix_get_message(PCIDevice *dev, unsigned int vector) +{ + g_assert_not_reached(); +} diff --git a/hw/pci/pci.c b/hw/pci/pci.c new file mode 100644 index 000000000..e5993c1ef --- /dev/null +++ b/hw/pci/pci.c @@ -0,0 +1,2896 @@ +/* + * QEMU PCI bus manager + * + * Copyright (c) 2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/units.h" +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_host.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/qemu-file-types.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "net/net.h" +#include "sysemu/numa.h" +#include "sysemu/sysemu.h" +#include "hw/loader.h" +#include "qemu/error-report.h" +#include "qemu/range.h" +#include "trace.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/hotplug.h" +#include "hw/boards.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-pci.h" +#include "qemu/cutils.h" + +//#define DEBUG_PCI +#ifdef DEBUG_PCI +# define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__) +#else +# define PCI_DPRINTF(format, ...) do { } while (0) +#endif + +bool pci_available = true; + +static void pcibus_dev_print(Monitor *mon, DeviceState *dev, int indent); +static char *pcibus_get_dev_path(DeviceState *dev); +static char *pcibus_get_fw_dev_path(DeviceState *dev); +static void pcibus_reset(BusState *qbus); + +static Property pci_props[] = { + DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), + DEFINE_PROP_STRING("romfile", PCIDevice, romfile), + DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, -1), + DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1), + DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, + QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), + DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present, + QEMU_PCIE_LNKSTA_DLLLA_BITNR, true), + DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present, + QEMU_PCIE_EXTCAP_INIT_BITNR, true), + DEFINE_PROP_STRING("failover_pair_id", PCIDevice, + failover_pair_id), + DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static const VMStateDescription vmstate_pcibus = { + .name = "PCIBUS", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), + VMSTATE_VARRAY_INT32(irq_count, PCIBus, + nirq, 0, vmstate_info_int32, + int32_t), + VMSTATE_END_OF_LIST() + } +}; + +static void pci_init_bus_master(PCIDevice *pci_dev) +{ + AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); + + memory_region_init_alias(&pci_dev->bus_master_enable_region, + OBJECT(pci_dev), "bus master", + dma_as->root, 0, memory_region_size(dma_as->root)); + memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); + memory_region_add_subregion(&pci_dev->bus_master_container_region, 0, + &pci_dev->bus_master_enable_region); +} + +static void pcibus_machine_done(Notifier *notifier, void *data) +{ + PCIBus *bus = container_of(notifier, PCIBus, machine_done); + int i; + + for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { + if (bus->devices[i]) { + pci_init_bus_master(bus->devices[i]); + } + } +} + +static void pci_bus_realize(BusState *qbus, Error **errp) +{ + PCIBus *bus = PCI_BUS(qbus); + + bus->machine_done.notify = pcibus_machine_done; + qemu_add_machine_init_done_notifier(&bus->machine_done); + + vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_pcibus, bus); +} + +static void pcie_bus_realize(BusState *qbus, Error **errp) +{ + PCIBus *bus = PCI_BUS(qbus); + Error *local_err = NULL; + + pci_bus_realize(qbus, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* + * A PCI-E bus can support extended config space if it's the root + * bus, or if the bus/bridge above it does as well + */ + if (pci_bus_is_root(bus)) { + bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; + } else { + PCIBus *parent_bus = pci_get_bus(bus->parent_dev); + + if (pci_bus_allows_extended_config_space(parent_bus)) { + bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; + } + } +} + +static void pci_bus_unrealize(BusState *qbus) +{ + PCIBus *bus = PCI_BUS(qbus); + + qemu_remove_machine_init_done_notifier(&bus->machine_done); + + vmstate_unregister(NULL, &vmstate_pcibus, bus); +} + +static int pcibus_num(PCIBus *bus) +{ + if (pci_bus_is_root(bus)) { + return 0; /* pci host bridge */ + } + return bus->parent_dev->config[PCI_SECONDARY_BUS]; +} + +static uint16_t pcibus_numa_node(PCIBus *bus) +{ + return NUMA_NODE_UNASSIGNED; +} + +static void pci_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + PCIBusClass *pbc = PCI_BUS_CLASS(klass); + + k->print_dev = pcibus_dev_print; + k->get_dev_path = pcibus_get_dev_path; + k->get_fw_dev_path = pcibus_get_fw_dev_path; + k->realize = pci_bus_realize; + k->unrealize = pci_bus_unrealize; + k->reset = pcibus_reset; + + pbc->bus_num = pcibus_num; + pbc->numa_node = pcibus_numa_node; +} + +static const TypeInfo pci_bus_info = { + .name = TYPE_PCI_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(PCIBus), + .class_size = sizeof(PCIBusClass), + .class_init = pci_bus_class_init, +}; + +static const TypeInfo pcie_interface_info = { + .name = INTERFACE_PCIE_DEVICE, + .parent = TYPE_INTERFACE, +}; + +static const TypeInfo conventional_pci_interface_info = { + .name = INTERFACE_CONVENTIONAL_PCI_DEVICE, + .parent = TYPE_INTERFACE, +}; + +static void pcie_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->realize = pcie_bus_realize; +} + +static const TypeInfo pcie_bus_info = { + .name = TYPE_PCIE_BUS, + .parent = TYPE_PCI_BUS, + .class_init = pcie_bus_class_init, +}; + +static PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num); +static void pci_update_mappings(PCIDevice *d); +static void pci_irq_handler(void *opaque, int irq_num, int level); +static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **); +static void pci_del_option_rom(PCIDevice *pdev); + +static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET; +static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU; + +static QLIST_HEAD(, PCIHostState) pci_host_bridges; + +int pci_bar(PCIDevice *d, int reg) +{ + uint8_t type; + + if (reg != PCI_ROM_SLOT) + return PCI_BASE_ADDRESS_0 + reg * 4; + + type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; + return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS; +} + +static inline int pci_irq_state(PCIDevice *d, int irq_num) +{ + return (d->irq_state >> irq_num) & 0x1; +} + +static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level) +{ + d->irq_state &= ~(0x1 << irq_num); + d->irq_state |= level << irq_num; +} + +static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change) +{ + assert(irq_num >= 0); + assert(irq_num < bus->nirq); + bus->irq_count[irq_num] += change; + bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0); +} + +static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) +{ + PCIBus *bus; + for (;;) { + bus = pci_get_bus(pci_dev); + irq_num = bus->map_irq(pci_dev, irq_num); + if (bus->set_irq) + break; + pci_dev = bus->parent_dev; + } + pci_bus_change_irq_level(bus, irq_num, change); +} + +int pci_bus_get_irq_level(PCIBus *bus, int irq_num) +{ + assert(irq_num >= 0); + assert(irq_num < bus->nirq); + return !!bus->irq_count[irq_num]; +} + +/* Update interrupt status bit in config space on interrupt + * state change. */ +static void pci_update_irq_status(PCIDevice *dev) +{ + if (dev->irq_state) { + dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT; + } else { + dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; + } +} + +void pci_device_deassert_intx(PCIDevice *dev) +{ + int i; + for (i = 0; i < PCI_NUM_PINS; ++i) { + pci_irq_handler(dev, i, 0); + } +} + +static void pci_do_device_reset(PCIDevice *dev) +{ + int r; + + pci_device_deassert_intx(dev); + assert(dev->irq_state == 0); + + /* Clear all writable bits */ + pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, + pci_get_word(dev->wmask + PCI_COMMAND) | + pci_get_word(dev->w1cmask + PCI_COMMAND)); + pci_word_test_and_clear_mask(dev->config + PCI_STATUS, + pci_get_word(dev->wmask + PCI_STATUS) | + pci_get_word(dev->w1cmask + PCI_STATUS)); + /* Some devices make bits of PCI_INTERRUPT_LINE read only */ + pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE, + pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) | + pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE)); + dev->config[PCI_CACHE_LINE_SIZE] = 0x0; + for (r = 0; r < PCI_NUM_REGIONS; ++r) { + PCIIORegion *region = &dev->io_regions[r]; + if (!region->size) { + continue; + } + + if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && + region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { + pci_set_quad(dev->config + pci_bar(dev, r), region->type); + } else { + pci_set_long(dev->config + pci_bar(dev, r), region->type); + } + } + pci_update_mappings(dev); + + msi_reset(dev); + msix_reset(dev); +} + +/* + * This function is called on #RST and FLR. + * FLR if PCI_EXP_DEVCTL_BCR_FLR is set + */ +void pci_device_reset(PCIDevice *dev) +{ + qdev_reset_all(&dev->qdev); + pci_do_device_reset(dev); +} + +/* + * Trigger pci bus reset under a given bus. + * Called via qbus_reset_all on RST# assert, after the devices + * have been reset qdev_reset_all-ed already. + */ +static void pcibus_reset(BusState *qbus) +{ + PCIBus *bus = DO_UPCAST(PCIBus, qbus, qbus); + int i; + + for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { + if (bus->devices[i]) { + pci_do_device_reset(bus->devices[i]); + } + } + + for (i = 0; i < bus->nirq; i++) { + assert(bus->irq_count[i] == 0); + } +} + +static void pci_host_bus_register(DeviceState *host) +{ + PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); + + QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next); +} + +static void pci_host_bus_unregister(DeviceState *host) +{ + PCIHostState *host_bridge = PCI_HOST_BRIDGE(host); + + QLIST_REMOVE(host_bridge, next); +} + +PCIBus *pci_device_root_bus(const PCIDevice *d) +{ + PCIBus *bus = pci_get_bus(d); + + while (!pci_bus_is_root(bus)) { + d = bus->parent_dev; + assert(d != NULL); + + bus = pci_get_bus(d); + } + + return bus; +} + +const char *pci_root_bus_path(PCIDevice *dev) +{ + PCIBus *rootbus = pci_device_root_bus(dev); + PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge); + + assert(host_bridge->bus == rootbus); + + if (hc->root_bus_path) { + return (*hc->root_bus_path)(host_bridge, rootbus); + } + + return rootbus->qbus.name; +} + +bool pci_bus_bypass_iommu(PCIBus *bus) +{ + PCIBus *rootbus = bus; + PCIHostState *host_bridge; + + if (!pci_bus_is_root(bus)) { + rootbus = pci_device_root_bus(bus->parent_dev); + } + + host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent); + + assert(host_bridge->bus == rootbus); + + return host_bridge->bypass_iommu; +} + +static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + uint8_t devfn_min) +{ + assert(PCI_FUNC(devfn_min) == 0); + bus->devfn_min = devfn_min; + bus->slot_reserved_mask = 0x0; + bus->address_space_mem = address_space_mem; + bus->address_space_io = address_space_io; + bus->flags |= PCI_BUS_IS_ROOT; + + /* host bridge */ + QLIST_INIT(&bus->child); + + pci_host_bus_register(parent); +} + +static void pci_bus_uninit(PCIBus *bus) +{ + pci_host_bus_unregister(BUS(bus)->parent); +} + +bool pci_bus_is_express(PCIBus *bus) +{ + return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS); +} + +void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, + const char *name, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + uint8_t devfn_min, const char *typename) +{ + qbus_init(bus, bus_size, typename, parent, name); + pci_root_bus_internal_init(bus, parent, address_space_mem, + address_space_io, devfn_min); +} + +PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + uint8_t devfn_min, const char *typename) +{ + PCIBus *bus; + + bus = PCI_BUS(qbus_new(typename, parent, name)); + pci_root_bus_internal_init(bus, parent, address_space_mem, + address_space_io, devfn_min); + return bus; +} + +void pci_root_bus_cleanup(PCIBus *bus) +{ + pci_bus_uninit(bus); + /* the caller of the unplug hotplug handler will delete this device */ + qbus_unrealize(BUS(bus)); +} + +void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq, pci_map_irq_fn map_irq, + void *irq_opaque, int nirq) +{ + bus->set_irq = set_irq; + bus->map_irq = map_irq; + bus->irq_opaque = irq_opaque; + bus->nirq = nirq; + bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0])); +} + +void pci_bus_irqs_cleanup(PCIBus *bus) +{ + bus->set_irq = NULL; + bus->map_irq = NULL; + bus->irq_opaque = NULL; + bus->nirq = 0; + g_free(bus->irq_count); +} + +PCIBus *pci_register_root_bus(DeviceState *parent, const char *name, + pci_set_irq_fn set_irq, pci_map_irq_fn map_irq, + void *irq_opaque, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + uint8_t devfn_min, int nirq, + const char *typename) +{ + PCIBus *bus; + + bus = pci_root_bus_new(parent, name, address_space_mem, + address_space_io, devfn_min, typename); + pci_bus_irqs(bus, set_irq, map_irq, irq_opaque, nirq); + return bus; +} + +void pci_unregister_root_bus(PCIBus *bus) +{ + pci_bus_irqs_cleanup(bus); + pci_root_bus_cleanup(bus); +} + +int pci_bus_num(PCIBus *s) +{ + return PCI_BUS_GET_CLASS(s)->bus_num(s); +} + +/* Returns the min and max bus numbers of a PCI bus hierarchy */ +void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus) +{ + int i; + *min_bus = *max_bus = pci_bus_num(bus); + + for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { + PCIDevice *dev = bus->devices[i]; + + if (dev && PCI_DEVICE_GET_CLASS(dev)->is_bridge) { + *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]); + *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]); + } + } +} + +int pci_bus_numa_node(PCIBus *bus) +{ + return PCI_BUS_GET_CLASS(bus)->numa_node(bus); +} + +static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + PCIDevice *s = container_of(pv, PCIDevice, config); + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(s); + uint8_t *config; + int i; + + assert(size == pci_config_size(s)); + config = g_malloc(size); + + qemu_get_buffer(f, config, size); + for (i = 0; i < size; ++i) { + if ((config[i] ^ s->config[i]) & + s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) { + error_report("%s: Bad config data: i=0x%x read: %x device: %x " + "cmask: %x wmask: %x w1cmask:%x", __func__, + i, config[i], s->config[i], + s->cmask[i], s->wmask[i], s->w1cmask[i]); + g_free(config); + return -EINVAL; + } + } + memcpy(s->config, config, size); + + pci_update_mappings(s); + if (pc->is_bridge) { + PCIBridge *b = PCI_BRIDGE(s); + pci_bridge_update_mappings(b); + } + + memory_region_set_enabled(&s->bus_master_enable_region, + pci_get_word(s->config + PCI_COMMAND) + & PCI_COMMAND_MASTER); + + g_free(config); + return 0; +} + +/* just put buffer */ +static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + const uint8_t **v = pv; + assert(size == pci_config_size(container_of(pv, PCIDevice, config))); + qemu_put_buffer(f, *v, size); + + return 0; +} + +static VMStateInfo vmstate_info_pci_config = { + .name = "pci config", + .get = get_pci_config_device, + .put = put_pci_config_device, +}; + +static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + PCIDevice *s = container_of(pv, PCIDevice, irq_state); + uint32_t irq_state[PCI_NUM_PINS]; + int i; + for (i = 0; i < PCI_NUM_PINS; ++i) { + irq_state[i] = qemu_get_be32(f); + if (irq_state[i] != 0x1 && irq_state[i] != 0) { + fprintf(stderr, "irq state %d: must be 0 or 1.\n", + irq_state[i]); + return -EINVAL; + } + } + + for (i = 0; i < PCI_NUM_PINS; ++i) { + pci_set_irq_state(s, i, irq_state[i]); + } + + return 0; +} + +static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + int i; + PCIDevice *s = container_of(pv, PCIDevice, irq_state); + + for (i = 0; i < PCI_NUM_PINS; ++i) { + qemu_put_be32(f, pci_irq_state(s, i)); + } + + return 0; +} + +static VMStateInfo vmstate_info_pci_irq_state = { + .name = "pci irq state", + .get = get_pci_irq_state, + .put = put_pci_irq_state, +}; + +static bool migrate_is_pcie(void *opaque, int version_id) +{ + return pci_is_express((PCIDevice *)opaque); +} + +static bool migrate_is_not_pcie(void *opaque, int version_id) +{ + return !pci_is_express((PCIDevice *)opaque); +} + +const VMStateDescription vmstate_pci_device = { + .name = "PCIDevice", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), + VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, + migrate_is_not_pcie, + 0, vmstate_info_pci_config, + PCI_CONFIG_SPACE_SIZE), + VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, + migrate_is_pcie, + 0, vmstate_info_pci_config, + PCIE_CONFIG_SPACE_SIZE), + VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2, + vmstate_info_pci_irq_state, + PCI_NUM_PINS * sizeof(int32_t)), + VMSTATE_END_OF_LIST() + } +}; + + +void pci_device_save(PCIDevice *s, QEMUFile *f) +{ + /* Clear interrupt status bit: it is implicit + * in irq_state which we are saving. + * This makes us compatible with old devices + * which never set or clear this bit. */ + s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; + vmstate_save_state(f, &vmstate_pci_device, s, NULL); + /* Restore the interrupt status bit. */ + pci_update_irq_status(s); +} + +int pci_device_load(PCIDevice *s, QEMUFile *f) +{ + int ret; + ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); + /* Restore the interrupt status bit. */ + pci_update_irq_status(s); + return ret; +} + +static void pci_set_default_subsystem_id(PCIDevice *pci_dev) +{ + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, + pci_default_sub_vendor_id); + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, + pci_default_sub_device_id); +} + +/* + * Parse [[:]:], return -1 on error if funcp == NULL + * [[:]:]., return -1 on error + */ +static int pci_parse_devaddr(const char *addr, int *domp, int *busp, + unsigned int *slotp, unsigned int *funcp) +{ + const char *p; + char *e; + unsigned long val; + unsigned long dom = 0, bus = 0; + unsigned int slot = 0; + unsigned int func = 0; + + p = addr; + val = strtoul(p, &e, 16); + if (e == p) + return -1; + if (*e == ':') { + bus = val; + p = e + 1; + val = strtoul(p, &e, 16); + if (e == p) + return -1; + if (*e == ':') { + dom = bus; + bus = val; + p = e + 1; + val = strtoul(p, &e, 16); + if (e == p) + return -1; + } + } + + slot = val; + + if (funcp != NULL) { + if (*e != '.') + return -1; + + p = e + 1; + val = strtoul(p, &e, 16); + if (e == p) + return -1; + + func = val; + } + + /* if funcp == NULL func is 0 */ + if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) + return -1; + + if (*e) + return -1; + + *domp = dom; + *busp = bus; + *slotp = slot; + if (funcp != NULL) + *funcp = func; + return 0; +} + +static void pci_init_cmask(PCIDevice *dev) +{ + pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff); + pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff); + dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST; + dev->cmask[PCI_REVISION_ID] = 0xff; + dev->cmask[PCI_CLASS_PROG] = 0xff; + pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff); + dev->cmask[PCI_HEADER_TYPE] = 0xff; + dev->cmask[PCI_CAPABILITY_LIST] = 0xff; +} + +static void pci_init_wmask(PCIDevice *dev) +{ + int config_size = pci_config_size(dev); + + dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff; + dev->wmask[PCI_INTERRUPT_LINE] = 0xff; + pci_set_word(dev->wmask + PCI_COMMAND, + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_INTX_DISABLE); + pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR); + + memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff, + config_size - PCI_CONFIG_HEADER_SIZE); +} + +static void pci_init_w1cmask(PCIDevice *dev) +{ + /* + * Note: It's okay to set w1cmask even for readonly bits as + * long as their value is hardwired to 0. + */ + pci_set_word(dev->w1cmask + PCI_STATUS, + PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY); +} + +static void pci_init_mask_bridge(PCIDevice *d) +{ + /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and + PCI_SEC_LETENCY_TIMER */ + memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4); + + /* base and limit */ + d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff; + d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff; + pci_set_word(d->wmask + PCI_MEMORY_BASE, + PCI_MEMORY_RANGE_MASK & 0xffff); + pci_set_word(d->wmask + PCI_MEMORY_LIMIT, + PCI_MEMORY_RANGE_MASK & 0xffff); + pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE, + PCI_PREF_RANGE_MASK & 0xffff); + pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT, + PCI_PREF_RANGE_MASK & 0xffff); + + /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */ + memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8); + + /* Supported memory and i/o types */ + d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16; + d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16; + pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE, + PCI_PREF_RANGE_TYPE_64); + pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT, + PCI_PREF_RANGE_TYPE_64); + + /* + * TODO: Bridges default to 10-bit VGA decoding but we currently only + * implement 16-bit decoding (no alias support). + */ + pci_set_word(d->wmask + PCI_BRIDGE_CONTROL, + PCI_BRIDGE_CTL_PARITY | + PCI_BRIDGE_CTL_SERR | + PCI_BRIDGE_CTL_ISA | + PCI_BRIDGE_CTL_VGA | + PCI_BRIDGE_CTL_VGA_16BIT | + PCI_BRIDGE_CTL_MASTER_ABORT | + PCI_BRIDGE_CTL_BUS_RESET | + PCI_BRIDGE_CTL_FAST_BACK | + PCI_BRIDGE_CTL_DISCARD | + PCI_BRIDGE_CTL_SEC_DISCARD | + PCI_BRIDGE_CTL_DISCARD_SERR); + /* Below does not do anything as we never set this bit, put here for + * completeness. */ + pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL, + PCI_BRIDGE_CTL_DISCARD_STATUS); + d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK; + d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK; + pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE, + PCI_PREF_RANGE_TYPE_MASK); + pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT, + PCI_PREF_RANGE_TYPE_MASK); +} + +static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp) +{ + uint8_t slot = PCI_SLOT(dev->devfn); + uint8_t func; + + if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { + dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; + } + + /* + * multifunction bit is interpreted in two ways as follows. + * - all functions must set the bit to 1. + * Example: Intel X53 + * - function 0 must set the bit, but the rest function (> 0) + * is allowed to leave the bit to 0. + * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10, + * + * So OS (at least Linux) checks the bit of only function 0, + * and doesn't see the bit of function > 0. + * + * The below check allows both interpretation. + */ + if (PCI_FUNC(dev->devfn)) { + PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)]; + if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) { + /* function 0 should set multifunction bit */ + error_setg(errp, "PCI: single function device can't be populated " + "in function %x.%x", slot, PCI_FUNC(dev->devfn)); + return; + } + return; + } + + if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { + return; + } + /* function 0 indicates single function, so function > 0 must be NULL */ + for (func = 1; func < PCI_FUNC_MAX; ++func) { + if (bus->devices[PCI_DEVFN(slot, func)]) { + error_setg(errp, "PCI: %x.0 indicates single function, " + "but %x.%x is already populated.", + slot, slot, func); + return; + } + } +} + +static void pci_config_alloc(PCIDevice *pci_dev) +{ + int config_size = pci_config_size(pci_dev); + + pci_dev->config = g_malloc0(config_size); + pci_dev->cmask = g_malloc0(config_size); + pci_dev->wmask = g_malloc0(config_size); + pci_dev->w1cmask = g_malloc0(config_size); + pci_dev->used = g_malloc0(config_size); +} + +static void pci_config_free(PCIDevice *pci_dev) +{ + g_free(pci_dev->config); + g_free(pci_dev->cmask); + g_free(pci_dev->wmask); + g_free(pci_dev->w1cmask); + g_free(pci_dev->used); +} + +static void do_pci_unregister_device(PCIDevice *pci_dev) +{ + pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL; + pci_config_free(pci_dev); + + if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) { + memory_region_del_subregion(&pci_dev->bus_master_container_region, + &pci_dev->bus_master_enable_region); + } + address_space_destroy(&pci_dev->bus_master_as); +} + +/* Extract PCIReqIDCache into BDF format */ +static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache) +{ + uint8_t bus_n; + uint16_t result; + + switch (cache->type) { + case PCI_REQ_ID_BDF: + result = pci_get_bdf(cache->dev); + break; + case PCI_REQ_ID_SECONDARY_BUS: + bus_n = pci_dev_bus_num(cache->dev); + result = PCI_BUILD_BDF(bus_n, 0); + break; + default: + error_report("Invalid PCI requester ID cache type: %d", + cache->type); + exit(1); + break; + } + + return result; +} + +/* Parse bridges up to the root complex and return requester ID + * cache for specific device. For full PCIe topology, the cache + * result would be exactly the same as getting BDF of the device. + * However, several tricks are required when system mixed up with + * legacy PCI devices and PCIe-to-PCI bridges. + * + * Here we cache the proxy device (and type) not requester ID since + * bus number might change from time to time. + */ +static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev) +{ + PCIDevice *parent; + PCIReqIDCache cache = { + .dev = dev, + .type = PCI_REQ_ID_BDF, + }; + + while (!pci_bus_is_root(pci_get_bus(dev))) { + /* We are under PCI/PCIe bridges */ + parent = pci_get_bus(dev)->parent_dev; + if (pci_is_express(parent)) { + if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { + /* When we pass through PCIe-to-PCI/PCIX bridges, we + * override the requester ID using secondary bus + * number of parent bridge with zeroed devfn + * (pcie-to-pci bridge spec chap 2.3). */ + cache.type = PCI_REQ_ID_SECONDARY_BUS; + cache.dev = dev; + } + } else { + /* Legacy PCI, override requester ID with the bridge's + * BDF upstream. When the root complex connects to + * legacy PCI devices (including buses), it can only + * obtain requester ID info from directly attached + * devices. If devices are attached under bridges, only + * the requester ID of the bridge that is directly + * attached to the root complex can be recognized. */ + cache.type = PCI_REQ_ID_BDF; + cache.dev = parent; + } + dev = parent; + } + + return cache; +} + +uint16_t pci_requester_id(PCIDevice *dev) +{ + return pci_req_id_cache_extract(&dev->requester_id_cache); +} + +static bool pci_bus_devfn_available(PCIBus *bus, int devfn) +{ + return !(bus->devices[devfn]); +} + +static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn) +{ + return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn)); +} + +/* -1 for devfn means auto assign */ +static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, + const char *name, int devfn, + Error **errp) +{ + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); + PCIConfigReadFunc *config_read = pc->config_read; + PCIConfigWriteFunc *config_write = pc->config_write; + Error *local_err = NULL; + DeviceState *dev = DEVICE(pci_dev); + PCIBus *bus = pci_get_bus(pci_dev); + + /* Only pci bridges can be attached to extra PCI root buses */ + if (pci_bus_is_root(bus) && bus->parent_dev && !pc->is_bridge) { + error_setg(errp, + "PCI: Only PCI/PCIe bridges can be plugged into %s", + bus->parent_dev->name); + return NULL; + } + + if (devfn < 0) { + for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices); + devfn += PCI_FUNC_MAX) { + if (pci_bus_devfn_available(bus, devfn) && + !pci_bus_devfn_reserved(bus, devfn)) { + goto found; + } + } + error_setg(errp, "PCI: no slot/function available for %s, all in use " + "or reserved", name); + return NULL; + found: ; + } else if (pci_bus_devfn_reserved(bus, devfn)) { + error_setg(errp, "PCI: slot %d function %d not available for %s," + " reserved", + PCI_SLOT(devfn), PCI_FUNC(devfn), name); + return NULL; + } else if (!pci_bus_devfn_available(bus, devfn)) { + error_setg(errp, "PCI: slot %d function %d not available for %s," + " in use by %s", + PCI_SLOT(devfn), PCI_FUNC(devfn), name, + bus->devices[devfn]->name); + return NULL; + } else if (dev->hotplugged && + pci_get_function_0(pci_dev)) { + error_setg(errp, "PCI: slot %d function 0 already occupied by %s," + " new func %s cannot be exposed to guest.", + PCI_SLOT(pci_get_function_0(pci_dev)->devfn), + pci_get_function_0(pci_dev)->name, + name); + + return NULL; + } + + pci_dev->devfn = devfn; + pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev); + pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); + + memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev), + "bus master container", UINT64_MAX); + address_space_init(&pci_dev->bus_master_as, + &pci_dev->bus_master_container_region, pci_dev->name); + + if (phase_check(PHASE_MACHINE_READY)) { + pci_init_bus_master(pci_dev); + } + pci_dev->irq_state = 0; + pci_config_alloc(pci_dev); + + pci_config_set_vendor_id(pci_dev->config, pc->vendor_id); + pci_config_set_device_id(pci_dev->config, pc->device_id); + pci_config_set_revision(pci_dev->config, pc->revision); + pci_config_set_class(pci_dev->config, pc->class_id); + + if (!pc->is_bridge) { + if (pc->subsystem_vendor_id || pc->subsystem_id) { + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, + pc->subsystem_vendor_id); + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, + pc->subsystem_id); + } else { + pci_set_default_subsystem_id(pci_dev); + } + } else { + /* subsystem_vendor_id/subsystem_id are only for header type 0 */ + assert(!pc->subsystem_vendor_id); + assert(!pc->subsystem_id); + } + pci_init_cmask(pci_dev); + pci_init_wmask(pci_dev); + pci_init_w1cmask(pci_dev); + if (pc->is_bridge) { + pci_init_mask_bridge(pci_dev); + } + pci_init_multifunction(bus, pci_dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + do_pci_unregister_device(pci_dev); + return NULL; + } + + if (!config_read) + config_read = pci_default_read_config; + if (!config_write) + config_write = pci_default_write_config; + pci_dev->config_read = config_read; + pci_dev->config_write = config_write; + bus->devices[devfn] = pci_dev; + pci_dev->version_id = 2; /* Current pci device vmstate version */ + return pci_dev; +} + +static void pci_unregister_io_regions(PCIDevice *pci_dev) +{ + PCIIORegion *r; + int i; + + for(i = 0; i < PCI_NUM_REGIONS; i++) { + r = &pci_dev->io_regions[i]; + if (!r->size || r->addr == PCI_BAR_UNMAPPED) + continue; + memory_region_del_subregion(r->address_space, r->memory); + } + + pci_unregister_vga(pci_dev); +} + +static void pci_qdev_unrealize(DeviceState *dev) +{ + PCIDevice *pci_dev = PCI_DEVICE(dev); + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); + + pci_unregister_io_regions(pci_dev); + pci_del_option_rom(pci_dev); + + if (pc->exit) { + pc->exit(pci_dev); + } + + pci_device_deassert_intx(pci_dev); + do_pci_unregister_device(pci_dev); +} + +void pci_register_bar(PCIDevice *pci_dev, int region_num, + uint8_t type, MemoryRegion *memory) +{ + PCIIORegion *r; + uint32_t addr; /* offset in pci config space */ + uint64_t wmask; + pcibus_t size = memory_region_size(memory); + uint8_t hdr_type; + + assert(region_num >= 0); + assert(region_num < PCI_NUM_REGIONS); + assert(is_power_of_2(size)); + + /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */ + hdr_type = + pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; + assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2); + + r = &pci_dev->io_regions[region_num]; + r->addr = PCI_BAR_UNMAPPED; + r->size = size; + r->type = type; + r->memory = memory; + r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO + ? pci_get_bus(pci_dev)->address_space_io + : pci_get_bus(pci_dev)->address_space_mem; + + wmask = ~(size - 1); + if (region_num == PCI_ROM_SLOT) { + /* ROM enable bit is writable */ + wmask |= PCI_ROM_ADDRESS_ENABLE; + } + + addr = pci_bar(pci_dev, region_num); + pci_set_long(pci_dev->config + addr, type); + + if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && + r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { + pci_set_quad(pci_dev->wmask + addr, wmask); + pci_set_quad(pci_dev->cmask + addr, ~0ULL); + } else { + pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); + pci_set_long(pci_dev->cmask + addr, 0xffffffff); + } +} + +static void pci_update_vga(PCIDevice *pci_dev) +{ + uint16_t cmd; + + if (!pci_dev->has_vga) { + return; + } + + cmd = pci_get_word(pci_dev->config + PCI_COMMAND); + + memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM], + cmd & PCI_COMMAND_MEMORY); + memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO], + cmd & PCI_COMMAND_IO); + memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI], + cmd & PCI_COMMAND_IO); +} + +void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem, + MemoryRegion *io_lo, MemoryRegion *io_hi) +{ + PCIBus *bus = pci_get_bus(pci_dev); + + assert(!pci_dev->has_vga); + + assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE); + pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem; + memory_region_add_subregion_overlap(bus->address_space_mem, + QEMU_PCI_VGA_MEM_BASE, mem, 1); + + assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE); + pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo; + memory_region_add_subregion_overlap(bus->address_space_io, + QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1); + + assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE); + pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi; + memory_region_add_subregion_overlap(bus->address_space_io, + QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1); + pci_dev->has_vga = true; + + pci_update_vga(pci_dev); +} + +void pci_unregister_vga(PCIDevice *pci_dev) +{ + PCIBus *bus = pci_get_bus(pci_dev); + + if (!pci_dev->has_vga) { + return; + } + + memory_region_del_subregion(bus->address_space_mem, + pci_dev->vga_regions[QEMU_PCI_VGA_MEM]); + memory_region_del_subregion(bus->address_space_io, + pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]); + memory_region_del_subregion(bus->address_space_io, + pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]); + pci_dev->has_vga = false; +} + +pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num) +{ + return pci_dev->io_regions[region_num].addr; +} + +static pcibus_t pci_bar_address(PCIDevice *d, + int reg, uint8_t type, pcibus_t size) +{ + pcibus_t new_addr, last_addr; + int bar = pci_bar(d, reg); + uint16_t cmd = pci_get_word(d->config + PCI_COMMAND); + Object *machine = qdev_get_machine(); + ObjectClass *oc = object_get_class(machine); + MachineClass *mc = MACHINE_CLASS(oc); + bool allow_0_address = mc->pci_allow_0_address; + + if (type & PCI_BASE_ADDRESS_SPACE_IO) { + if (!(cmd & PCI_COMMAND_IO)) { + return PCI_BAR_UNMAPPED; + } + new_addr = pci_get_long(d->config + bar) & ~(size - 1); + last_addr = new_addr + size - 1; + /* Check if 32 bit BAR wraps around explicitly. + * TODO: make priorities correct and remove this work around. + */ + if (last_addr <= new_addr || last_addr >= UINT32_MAX || + (!allow_0_address && new_addr == 0)) { + return PCI_BAR_UNMAPPED; + } + return new_addr; + } + + if (!(cmd & PCI_COMMAND_MEMORY)) { + return PCI_BAR_UNMAPPED; + } + if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) { + new_addr = pci_get_quad(d->config + bar); + } else { + new_addr = pci_get_long(d->config + bar); + } + /* the ROM slot has a specific enable bit */ + if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) { + return PCI_BAR_UNMAPPED; + } + new_addr &= ~(size - 1); + last_addr = new_addr + size - 1; + /* NOTE: we do not support wrapping */ + /* XXX: as we cannot support really dynamic + mappings, we handle specific values as invalid + mappings. */ + if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED || + (!allow_0_address && new_addr == 0)) { + return PCI_BAR_UNMAPPED; + } + + /* Now pcibus_t is 64bit. + * Check if 32 bit BAR wraps around explicitly. + * Without this, PC ide doesn't work well. + * TODO: remove this work around. + */ + if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) { + return PCI_BAR_UNMAPPED; + } + + /* + * OS is allowed to set BAR beyond its addressable + * bits. For example, 32 bit OS can set 64bit bar + * to >4G. Check it. TODO: we might need to support + * it in the future for e.g. PAE. + */ + if (last_addr >= HWADDR_MAX) { + return PCI_BAR_UNMAPPED; + } + + return new_addr; +} + +static void pci_update_mappings(PCIDevice *d) +{ + PCIIORegion *r; + int i; + pcibus_t new_addr; + + for(i = 0; i < PCI_NUM_REGIONS; i++) { + r = &d->io_regions[i]; + + /* this region isn't registered */ + if (!r->size) + continue; + + new_addr = pci_bar_address(d, i, r->type, r->size); + if (!d->has_power) { + new_addr = PCI_BAR_UNMAPPED; + } + + /* This bar isn't changed */ + if (new_addr == r->addr) + continue; + + /* now do the real mapping */ + if (r->addr != PCI_BAR_UNMAPPED) { + trace_pci_update_mappings_del(d, pci_dev_bus_num(d), + PCI_SLOT(d->devfn), + PCI_FUNC(d->devfn), + i, r->addr, r->size); + memory_region_del_subregion(r->address_space, r->memory); + } + r->addr = new_addr; + if (r->addr != PCI_BAR_UNMAPPED) { + trace_pci_update_mappings_add(d, pci_dev_bus_num(d), + PCI_SLOT(d->devfn), + PCI_FUNC(d->devfn), + i, r->addr, r->size); + memory_region_add_subregion_overlap(r->address_space, + r->addr, r->memory, 1); + } + } + + pci_update_vga(d); +} + +static inline int pci_irq_disabled(PCIDevice *d) +{ + return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE; +} + +/* Called after interrupt disabled field update in config space, + * assert/deassert interrupts if necessary. + * Gets original interrupt disable bit value (before update). */ +static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled) +{ + int i, disabled = pci_irq_disabled(d); + if (disabled == was_irq_disabled) + return; + for (i = 0; i < PCI_NUM_PINS; ++i) { + int state = pci_irq_state(d, i); + pci_change_irq_level(d, i, disabled ? -state : state); + } +} + +uint32_t pci_default_read_config(PCIDevice *d, + uint32_t address, int len) +{ + uint32_t val = 0; + + assert(address + len <= pci_config_size(d)); + + if (pci_is_express_downstream_port(d) && + ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { + pcie_sync_bridge_lnk(d); + } + memcpy(&val, d->config + address, len); + return le32_to_cpu(val); +} + +void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l) +{ + int i, was_irq_disabled = pci_irq_disabled(d); + uint32_t val = val_in; + + assert(addr + l <= pci_config_size(d)); + + for (i = 0; i < l; val >>= 8, ++i) { + uint8_t wmask = d->wmask[addr + i]; + uint8_t w1cmask = d->w1cmask[addr + i]; + assert(!(wmask & w1cmask)); + d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); + d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ + } + if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) || + ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) || + ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) || + range_covers_byte(addr, l, PCI_COMMAND)) + pci_update_mappings(d); + + if (range_covers_byte(addr, l, PCI_COMMAND)) { + pci_update_irq_disabled(d, was_irq_disabled); + memory_region_set_enabled(&d->bus_master_enable_region, + (pci_get_word(d->config + PCI_COMMAND) + & PCI_COMMAND_MASTER) && d->has_power); + } + + msi_write_config(d, addr, val_in, l); + msix_write_config(d, addr, val_in, l); +} + +/***********************************************************/ +/* generic PCI irq support */ + +/* 0 <= irq_num <= 3. level must be 0 or 1 */ +static void pci_irq_handler(void *opaque, int irq_num, int level) +{ + PCIDevice *pci_dev = opaque; + int change; + + assert(0 <= irq_num && irq_num < PCI_NUM_PINS); + assert(level == 0 || level == 1); + change = level - pci_irq_state(pci_dev, irq_num); + if (!change) + return; + + pci_set_irq_state(pci_dev, irq_num, level); + pci_update_irq_status(pci_dev); + if (pci_irq_disabled(pci_dev)) + return; + pci_change_irq_level(pci_dev, irq_num, change); +} + +static inline int pci_intx(PCIDevice *pci_dev) +{ + return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1; +} + +qemu_irq pci_allocate_irq(PCIDevice *pci_dev) +{ + int intx = pci_intx(pci_dev); + assert(0 <= intx && intx < PCI_NUM_PINS); + + return qemu_allocate_irq(pci_irq_handler, pci_dev, intx); +} + +void pci_set_irq(PCIDevice *pci_dev, int level) +{ + int intx = pci_intx(pci_dev); + pci_irq_handler(pci_dev, intx, level); +} + +/* Special hooks used by device assignment */ +void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq) +{ + assert(pci_bus_is_root(bus)); + bus->route_intx_to_irq = route_intx_to_irq; +} + +PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) +{ + PCIBus *bus; + + do { + bus = pci_get_bus(dev); + pin = bus->map_irq(dev, pin); + dev = bus->parent_dev; + } while (dev); + + if (!bus->route_intx_to_irq) { + error_report("PCI: Bug - unimplemented PCI INTx routing (%s)", + object_get_typename(OBJECT(bus->qbus.parent))); + return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 }; + } + + return bus->route_intx_to_irq(bus->irq_opaque, pin); +} + +bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new) +{ + return old->mode != new->mode || old->irq != new->irq; +} + +void pci_bus_fire_intx_routing_notifier(PCIBus *bus) +{ + PCIDevice *dev; + PCIBus *sec; + int i; + + for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { + dev = bus->devices[i]; + if (dev && dev->intx_routing_notifier) { + dev->intx_routing_notifier(dev); + } + } + + QLIST_FOREACH(sec, &bus->child, sibling) { + pci_bus_fire_intx_routing_notifier(sec); + } +} + +void pci_device_set_intx_routing_notifier(PCIDevice *dev, + PCIINTxRoutingNotifier notifier) +{ + dev->intx_routing_notifier = notifier; +} + +/* + * PCI-to-PCI bridge specification + * 9.1: Interrupt routing. Table 9-1 + * + * the PCI Express Base Specification, Revision 2.1 + * 2.2.8.1: INTx interrutp signaling - Rules + * the Implementation Note + * Table 2-20 + */ +/* + * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD + * 0-origin unlike PCI interrupt pin register. + */ +int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) +{ + return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); +} + +/***********************************************************/ +/* monitor info on PCI */ + +typedef struct { + uint16_t class; + const char *desc; + const char *fw_name; + uint16_t fw_ign_bits; +} pci_class_desc; + +static const pci_class_desc pci_class_descriptions[] = +{ + { 0x0001, "VGA controller", "display"}, + { 0x0100, "SCSI controller", "scsi"}, + { 0x0101, "IDE controller", "ide"}, + { 0x0102, "Floppy controller", "fdc"}, + { 0x0103, "IPI controller", "ipi"}, + { 0x0104, "RAID controller", "raid"}, + { 0x0106, "SATA controller"}, + { 0x0107, "SAS controller"}, + { 0x0180, "Storage controller"}, + { 0x0200, "Ethernet controller", "ethernet"}, + { 0x0201, "Token Ring controller", "token-ring"}, + { 0x0202, "FDDI controller", "fddi"}, + { 0x0203, "ATM controller", "atm"}, + { 0x0280, "Network controller"}, + { 0x0300, "VGA controller", "display", 0x00ff}, + { 0x0301, "XGA controller"}, + { 0x0302, "3D controller"}, + { 0x0380, "Display controller"}, + { 0x0400, "Video controller", "video"}, + { 0x0401, "Audio controller", "sound"}, + { 0x0402, "Phone"}, + { 0x0403, "Audio controller", "sound"}, + { 0x0480, "Multimedia controller"}, + { 0x0500, "RAM controller", "memory"}, + { 0x0501, "Flash controller", "flash"}, + { 0x0580, "Memory controller"}, + { 0x0600, "Host bridge", "host"}, + { 0x0601, "ISA bridge", "isa"}, + { 0x0602, "EISA bridge", "eisa"}, + { 0x0603, "MC bridge", "mca"}, + { 0x0604, "PCI bridge", "pci-bridge"}, + { 0x0605, "PCMCIA bridge", "pcmcia"}, + { 0x0606, "NUBUS bridge", "nubus"}, + { 0x0607, "CARDBUS bridge", "cardbus"}, + { 0x0608, "RACEWAY bridge"}, + { 0x0680, "Bridge"}, + { 0x0700, "Serial port", "serial"}, + { 0x0701, "Parallel port", "parallel"}, + { 0x0800, "Interrupt controller", "interrupt-controller"}, + { 0x0801, "DMA controller", "dma-controller"}, + { 0x0802, "Timer", "timer"}, + { 0x0803, "RTC", "rtc"}, + { 0x0900, "Keyboard", "keyboard"}, + { 0x0901, "Pen", "pen"}, + { 0x0902, "Mouse", "mouse"}, + { 0x0A00, "Dock station", "dock", 0x00ff}, + { 0x0B00, "i386 cpu", "cpu", 0x00ff}, + { 0x0c00, "Fireware contorller", "fireware"}, + { 0x0c01, "Access bus controller", "access-bus"}, + { 0x0c02, "SSA controller", "ssa"}, + { 0x0c03, "USB controller", "usb"}, + { 0x0c04, "Fibre channel controller", "fibre-channel"}, + { 0x0c05, "SMBus"}, + { 0, NULL} +}; + +void pci_for_each_device_under_bus_reverse(PCIBus *bus, + pci_bus_dev_fn fn, + void *opaque) +{ + PCIDevice *d; + int devfn; + + for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn]; + if (d) { + fn(bus, d, opaque); + } + } +} + +void pci_for_each_device_reverse(PCIBus *bus, int bus_num, + pci_bus_dev_fn fn, void *opaque) +{ + bus = pci_find_bus_nr(bus, bus_num); + + if (bus) { + pci_for_each_device_under_bus_reverse(bus, fn, opaque); + } +} + +void pci_for_each_device_under_bus(PCIBus *bus, + pci_bus_dev_fn fn, void *opaque) +{ + PCIDevice *d; + int devfn; + + for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + d = bus->devices[devfn]; + if (d) { + fn(bus, d, opaque); + } + } +} + +void pci_for_each_device(PCIBus *bus, int bus_num, + pci_bus_dev_fn fn, void *opaque) +{ + bus = pci_find_bus_nr(bus, bus_num); + + if (bus) { + pci_for_each_device_under_bus(bus, fn, opaque); + } +} + +static const pci_class_desc *get_class_desc(int class) +{ + const pci_class_desc *desc; + + desc = pci_class_descriptions; + while (desc->desc && class != desc->class) { + desc++; + } + + return desc; +} + +static PciDeviceInfoList *qmp_query_pci_devices(PCIBus *bus, int bus_num); + +static PciMemoryRegionList *qmp_query_pci_regions(const PCIDevice *dev) +{ + PciMemoryRegionList *head = NULL, **tail = &head; + int i; + + for (i = 0; i < PCI_NUM_REGIONS; i++) { + const PCIIORegion *r = &dev->io_regions[i]; + PciMemoryRegion *region; + + if (!r->size) { + continue; + } + + region = g_malloc0(sizeof(*region)); + + if (r->type & PCI_BASE_ADDRESS_SPACE_IO) { + region->type = g_strdup("io"); + } else { + region->type = g_strdup("memory"); + region->has_prefetch = true; + region->prefetch = !!(r->type & PCI_BASE_ADDRESS_MEM_PREFETCH); + region->has_mem_type_64 = true; + region->mem_type_64 = !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64); + } + + region->bar = i; + region->address = r->addr; + region->size = r->size; + + QAPI_LIST_APPEND(tail, region); + } + + return head; +} + +static PciBridgeInfo *qmp_query_pci_bridge(PCIDevice *dev, PCIBus *bus, + int bus_num) +{ + PciBridgeInfo *info; + PciMemoryRange *range; + + info = g_new0(PciBridgeInfo, 1); + + info->bus = g_new0(PciBusInfo, 1); + info->bus->number = dev->config[PCI_PRIMARY_BUS]; + info->bus->secondary = dev->config[PCI_SECONDARY_BUS]; + info->bus->subordinate = dev->config[PCI_SUBORDINATE_BUS]; + + range = info->bus->io_range = g_new0(PciMemoryRange, 1); + range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_IO); + range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_IO); + + range = info->bus->memory_range = g_new0(PciMemoryRange, 1); + range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_MEMORY); + range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_MEMORY); + + range = info->bus->prefetchable_range = g_new0(PciMemoryRange, 1); + range->base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); + range->limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); + + if (dev->config[PCI_SECONDARY_BUS] != 0) { + PCIBus *child_bus = pci_find_bus_nr(bus, dev->config[PCI_SECONDARY_BUS]); + if (child_bus) { + info->has_devices = true; + info->devices = qmp_query_pci_devices(child_bus, dev->config[PCI_SECONDARY_BUS]); + } + } + + return info; +} + +static PciDeviceInfo *qmp_query_pci_device(PCIDevice *dev, PCIBus *bus, + int bus_num) +{ + const pci_class_desc *desc; + PciDeviceInfo *info; + uint8_t type; + int class; + + info = g_new0(PciDeviceInfo, 1); + info->bus = bus_num; + info->slot = PCI_SLOT(dev->devfn); + info->function = PCI_FUNC(dev->devfn); + + info->class_info = g_new0(PciDeviceClass, 1); + class = pci_get_word(dev->config + PCI_CLASS_DEVICE); + info->class_info->q_class = class; + desc = get_class_desc(class); + if (desc->desc) { + info->class_info->has_desc = true; + info->class_info->desc = g_strdup(desc->desc); + } + + info->id = g_new0(PciDeviceId, 1); + info->id->vendor = pci_get_word(dev->config + PCI_VENDOR_ID); + info->id->device = pci_get_word(dev->config + PCI_DEVICE_ID); + info->regions = qmp_query_pci_regions(dev); + info->qdev_id = g_strdup(dev->qdev.id ? dev->qdev.id : ""); + + info->irq_pin = dev->config[PCI_INTERRUPT_PIN]; + if (dev->config[PCI_INTERRUPT_PIN] != 0) { + info->has_irq = true; + info->irq = dev->config[PCI_INTERRUPT_LINE]; + } + + type = dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION; + if (type == PCI_HEADER_TYPE_BRIDGE) { + info->has_pci_bridge = true; + info->pci_bridge = qmp_query_pci_bridge(dev, bus, bus_num); + } else if (type == PCI_HEADER_TYPE_NORMAL) { + info->id->has_subsystem = info->id->has_subsystem_vendor = true; + info->id->subsystem = pci_get_word(dev->config + PCI_SUBSYSTEM_ID); + info->id->subsystem_vendor = + pci_get_word(dev->config + PCI_SUBSYSTEM_VENDOR_ID); + } else if (type == PCI_HEADER_TYPE_CARDBUS) { + info->id->has_subsystem = info->id->has_subsystem_vendor = true; + info->id->subsystem = pci_get_word(dev->config + PCI_CB_SUBSYSTEM_ID); + info->id->subsystem_vendor = + pci_get_word(dev->config + PCI_CB_SUBSYSTEM_VENDOR_ID); + } + + return info; +} + +static PciDeviceInfoList *qmp_query_pci_devices(PCIBus *bus, int bus_num) +{ + PciDeviceInfoList *head = NULL, **tail = &head; + PCIDevice *dev; + int devfn; + + for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) { + dev = bus->devices[devfn]; + if (dev) { + QAPI_LIST_APPEND(tail, qmp_query_pci_device(dev, bus, bus_num)); + } + } + + return head; +} + +static PciInfo *qmp_query_pci_bus(PCIBus *bus, int bus_num) +{ + PciInfo *info = NULL; + + bus = pci_find_bus_nr(bus, bus_num); + if (bus) { + info = g_malloc0(sizeof(*info)); + info->bus = bus_num; + info->devices = qmp_query_pci_devices(bus, bus_num); + } + + return info; +} + +PciInfoList *qmp_query_pci(Error **errp) +{ + PciInfoList *head = NULL, **tail = &head; + PCIHostState *host_bridge; + + QLIST_FOREACH(host_bridge, &pci_host_bridges, next) { + QAPI_LIST_APPEND(tail, + qmp_query_pci_bus(host_bridge->bus, + pci_bus_num(host_bridge->bus))); + } + + return head; +} + +/* Initialize a PCI NIC. */ +PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus, + const char *default_model, + const char *default_devaddr) +{ + const char *devaddr = nd->devaddr ? nd->devaddr : default_devaddr; + GSList *list; + GPtrArray *pci_nic_models; + PCIBus *bus; + PCIDevice *pci_dev; + DeviceState *dev; + int devfn; + int i; + int dom, busnr; + unsigned slot; + + if (nd->model && !strcmp(nd->model, "virtio")) { + g_free(nd->model); + nd->model = g_strdup("virtio-net-pci"); + } + + list = object_class_get_list_sorted(TYPE_PCI_DEVICE, false); + pci_nic_models = g_ptr_array_new(); + while (list) { + DeviceClass *dc = OBJECT_CLASS_CHECK(DeviceClass, list->data, + TYPE_DEVICE); + GSList *next; + if (test_bit(DEVICE_CATEGORY_NETWORK, dc->categories) && + dc->user_creatable) { + const char *name = object_class_get_name(list->data); + /* + * A network device might also be something else than a NIC, see + * e.g. the "rocker" device. Thus we have to look for the "netdev" + * property, too. Unfortunately, some devices like virtio-net only + * create this property during instance_init, so we have to create + * a temporary instance here to be able to check it. + */ + Object *obj = object_new_with_class(OBJECT_CLASS(dc)); + if (object_property_find(obj, "netdev")) { + g_ptr_array_add(pci_nic_models, (gpointer)name); + } + object_unref(obj); + } + next = list->next; + g_slist_free_1(list); + list = next; + } + g_ptr_array_add(pci_nic_models, NULL); + + if (qemu_show_nic_models(nd->model, (const char **)pci_nic_models->pdata)) { + exit(0); + } + + i = qemu_find_nic_model(nd, (const char **)pci_nic_models->pdata, + default_model); + if (i < 0) { + exit(1); + } + + if (!rootbus) { + error_report("No primary PCI bus"); + exit(1); + } + + assert(!rootbus->parent_dev); + + if (!devaddr) { + devfn = -1; + busnr = 0; + } else { + if (pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) { + error_report("Invalid PCI device address %s for device %s", + devaddr, nd->model); + exit(1); + } + + if (dom != 0) { + error_report("No support for non-zero PCI domains"); + exit(1); + } + + devfn = PCI_DEVFN(slot, 0); + } + + bus = pci_find_bus_nr(rootbus, busnr); + if (!bus) { + error_report("Invalid PCI device address %s for device %s", + devaddr, nd->model); + exit(1); + } + + pci_dev = pci_new(devfn, nd->model); + dev = &pci_dev->qdev; + qdev_set_nic_properties(dev, nd); + pci_realize_and_unref(pci_dev, bus, &error_fatal); + g_ptr_array_free(pci_nic_models, true); + return pci_dev; +} + +PCIDevice *pci_vga_init(PCIBus *bus) +{ + switch (vga_interface_type) { + case VGA_CIRRUS: + return pci_create_simple(bus, -1, "cirrus-vga"); + case VGA_QXL: + return pci_create_simple(bus, -1, "qxl-vga"); + case VGA_STD: + return pci_create_simple(bus, -1, "VGA"); + case VGA_VMWARE: + return pci_create_simple(bus, -1, "vmware-svga"); + case VGA_VIRTIO: + return pci_create_simple(bus, -1, "virtio-vga"); + case VGA_NONE: + default: /* Other non-PCI types. Checking for unsupported types is already + done in vl.c. */ + return NULL; + } +} + +/* Whether a given bus number is in range of the secondary + * bus of the given bridge device. */ +static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num) +{ + return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) & + PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ && + dev->config[PCI_SECONDARY_BUS] <= bus_num && + bus_num <= dev->config[PCI_SUBORDINATE_BUS]; +} + +/* Whether a given bus number is in a range of a root bus */ +static bool pci_root_bus_in_range(PCIBus *bus, int bus_num) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) { + PCIDevice *dev = bus->devices[i]; + + if (dev && PCI_DEVICE_GET_CLASS(dev)->is_bridge) { + if (pci_secondary_bus_in_range(dev, bus_num)) { + return true; + } + } + } + + return false; +} + +static PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num) +{ + PCIBus *sec; + + if (!bus) { + return NULL; + } + + if (pci_bus_num(bus) == bus_num) { + return bus; + } + + /* Consider all bus numbers in range for the host pci bridge. */ + if (!pci_bus_is_root(bus) && + !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) { + return NULL; + } + + /* try child bus */ + for (; bus; bus = sec) { + QLIST_FOREACH(sec, &bus->child, sibling) { + if (pci_bus_num(sec) == bus_num) { + return sec; + } + /* PXB buses assumed to be children of bus 0 */ + if (pci_bus_is_root(sec)) { + if (pci_root_bus_in_range(sec, bus_num)) { + break; + } + } else { + if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) { + break; + } + } + } + } + + return NULL; +} + +void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, + pci_bus_fn end, void *parent_state) +{ + PCIBus *sec; + void *state; + + if (!bus) { + return; + } + + if (begin) { + state = begin(bus, parent_state); + } else { + state = parent_state; + } + + QLIST_FOREACH(sec, &bus->child, sibling) { + pci_for_each_bus_depth_first(sec, begin, end, state); + } + + if (end) { + end(bus, state); + } +} + + +PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn) +{ + bus = pci_find_bus_nr(bus, bus_num); + + if (!bus) + return NULL; + + return bus->devices[devfn]; +} + +static void pci_qdev_realize(DeviceState *qdev, Error **errp) +{ + PCIDevice *pci_dev = (PCIDevice *)qdev; + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); + ObjectClass *klass = OBJECT_CLASS(pc); + Error *local_err = NULL; + bool is_default_rom; + uint16_t class_id; + + if (pci_dev->romsize != -1 && !is_power_of_2(pci_dev->romsize)) { + error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize); + return; + } + + /* initialize cap_present for pci_is_express() and pci_config_size(), + * Note that hybrid PCIs are not set automatically and need to manage + * QEMU_PCI_CAP_EXPRESS manually */ + if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) && + !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) { + pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; + } + + pci_dev = do_pci_register_device(pci_dev, + object_get_typename(OBJECT(qdev)), + pci_dev->devfn, errp); + if (pci_dev == NULL) + return; + + if (pc->realize) { + pc->realize(pci_dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + do_pci_unregister_device(pci_dev); + return; + } + } + + if (pci_dev->failover_pair_id) { + if (!pci_bus_is_express(pci_get_bus(pci_dev))) { + error_setg(errp, "failover primary device must be on " + "PCIExpress bus"); + pci_qdev_unrealize(DEVICE(pci_dev)); + return; + } + class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE); + if (class_id != PCI_CLASS_NETWORK_ETHERNET) { + error_setg(errp, "failover primary device is not an " + "Ethernet device"); + pci_qdev_unrealize(DEVICE(pci_dev)); + return; + } + if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) + || (PCI_FUNC(pci_dev->devfn) != 0)) { + error_setg(errp, "failover: primary device must be in its own " + "PCI slot"); + pci_qdev_unrealize(DEVICE(pci_dev)); + return; + } + qdev->allow_unplug_during_migration = true; + } + + /* rom loading */ + is_default_rom = false; + if (pci_dev->romfile == NULL && pc->romfile != NULL) { + pci_dev->romfile = g_strdup(pc->romfile); + is_default_rom = true; + } + + pci_add_option_rom(pci_dev, is_default_rom, &local_err); + if (local_err) { + error_propagate(errp, local_err); + pci_qdev_unrealize(DEVICE(pci_dev)); + return; + } + + pci_set_power(pci_dev, true); +} + +PCIDevice *pci_new_multifunction(int devfn, bool multifunction, + const char *name) +{ + DeviceState *dev; + + dev = qdev_new(name); + qdev_prop_set_int32(dev, "addr", devfn); + qdev_prop_set_bit(dev, "multifunction", multifunction); + return PCI_DEVICE(dev); +} + +PCIDevice *pci_new(int devfn, const char *name) +{ + return pci_new_multifunction(devfn, false, name); +} + +bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp) +{ + return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); +} + +PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn, + bool multifunction, + const char *name) +{ + PCIDevice *dev = pci_new_multifunction(devfn, multifunction, name); + pci_realize_and_unref(dev, bus, &error_fatal); + return dev; +} + +PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name) +{ + return pci_create_simple_multifunction(bus, devfn, false, name); +} + +static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size) +{ + int offset = PCI_CONFIG_HEADER_SIZE; + int i; + for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) { + if (pdev->used[i]) + offset = i + 1; + else if (i - offset + 1 == size) + return offset; + } + return 0; +} + +static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id, + uint8_t *prev_p) +{ + uint8_t next, prev; + + if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST)) + return 0; + + for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); + prev = next + PCI_CAP_LIST_NEXT) + if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id) + break; + + if (prev_p) + *prev_p = prev; + return next; +} + +static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset) +{ + uint8_t next, prev, found = 0; + + if (!(pdev->used[offset])) { + return 0; + } + + assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST); + + for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]); + prev = next + PCI_CAP_LIST_NEXT) { + if (next <= offset && next > found) { + found = next; + } + } + return found; +} + +/* Patch the PCI vendor and device ids in a PCI rom image if necessary. + This is needed for an option rom which is used for more than one device. */ +static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size) +{ + uint16_t vendor_id; + uint16_t device_id; + uint16_t rom_vendor_id; + uint16_t rom_device_id; + uint16_t rom_magic; + uint16_t pcir_offset; + uint8_t checksum; + + /* Words in rom data are little endian (like in PCI configuration), + so they can be read / written with pci_get_word / pci_set_word. */ + + /* Only a valid rom will be patched. */ + rom_magic = pci_get_word(ptr); + if (rom_magic != 0xaa55) { + PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic); + return; + } + pcir_offset = pci_get_word(ptr + 0x18); + if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) { + PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset); + return; + } + + vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); + device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); + rom_vendor_id = pci_get_word(ptr + pcir_offset + 4); + rom_device_id = pci_get_word(ptr + pcir_offset + 6); + + PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile, + vendor_id, device_id, rom_vendor_id, rom_device_id); + + checksum = ptr[6]; + + if (vendor_id != rom_vendor_id) { + /* Patch vendor id and checksum (at offset 6 for etherboot roms). */ + checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8); + checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8); + PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); + ptr[6] = checksum; + pci_set_word(ptr + pcir_offset + 4, vendor_id); + } + + if (device_id != rom_device_id) { + /* Patch device id and checksum (at offset 6 for etherboot roms). */ + checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8); + checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8); + PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum); + ptr[6] = checksum; + pci_set_word(ptr + pcir_offset + 6, device_id); + } +} + +/* Add an option rom for the device */ +static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, + Error **errp) +{ + int64_t size; + char *path; + void *ptr; + char name[32]; + const VMStateDescription *vmsd; + + if (!pdev->romfile) + return; + if (strlen(pdev->romfile) == 0) + return; + + if (!pdev->rom_bar) { + /* + * Load rom via fw_cfg instead of creating a rom bar, + * for 0.11 compatibility. + */ + int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); + + /* + * Hot-plugged devices can't use the option ROM + * if the rom bar is disabled. + */ + if (DEVICE(pdev)->hotplugged) { + error_setg(errp, "Hot-plugged device without ROM bar" + " can't have an option ROM"); + return; + } + + if (class == 0x0300) { + rom_add_vga(pdev->romfile); + } else { + rom_add_option(pdev->romfile, -1); + } + return; + } + + path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile); + if (path == NULL) { + path = g_strdup(pdev->romfile); + } + + size = get_image_size(path); + if (size < 0) { + error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile); + g_free(path); + return; + } else if (size == 0) { + error_setg(errp, "romfile \"%s\" is empty", pdev->romfile); + g_free(path); + return; + } else if (size > 2 * GiB) { + error_setg(errp, "romfile \"%s\" too large (size cannot exceed 2 GiB)", + pdev->romfile); + g_free(path); + return; + } + if (pdev->romsize != -1) { + if (size > pdev->romsize) { + error_setg(errp, "romfile \"%s\" (%u bytes) is too large for ROM size %u", + pdev->romfile, (uint32_t)size, pdev->romsize); + g_free(path); + return; + } + } else { + pdev->romsize = pow2ceil(size); + } + + vmsd = qdev_get_vmsd(DEVICE(pdev)); + + if (vmsd) { + snprintf(name, sizeof(name), "%s.rom", vmsd->name); + } else { + snprintf(name, sizeof(name), "%s.rom", object_get_typename(OBJECT(pdev))); + } + pdev->has_rom = true; + memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, &error_fatal); + ptr = memory_region_get_ram_ptr(&pdev->rom); + if (load_image_size(path, ptr, size) < 0) { + error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile); + g_free(path); + return; + } + g_free(path); + + if (is_default_rom) { + /* Only the default rom images will be patched (if needed). */ + pci_patch_ids(pdev, ptr, size); + } + + pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom); +} + +static void pci_del_option_rom(PCIDevice *pdev) +{ + if (!pdev->has_rom) + return; + + vmstate_unregister_ram(&pdev->rom, &pdev->qdev); + pdev->has_rom = false; +} + +/* + * On success, pci_add_capability() returns a positive value + * that the offset of the pci capability. + * On failure, it sets an error and returns a negative error + * code. + */ +int pci_add_capability(PCIDevice *pdev, uint8_t cap_id, + uint8_t offset, uint8_t size, + Error **errp) +{ + uint8_t *config; + int i, overlapping_cap; + + if (!offset) { + offset = pci_find_space(pdev, size); + /* out of PCI config space is programming error */ + assert(offset); + } else { + /* Verify that capabilities don't overlap. Note: device assignment + * depends on this check to verify that the device is not broken. + * Should never trigger for emulated devices, but it's helpful + * for debugging these. */ + for (i = offset; i < offset + size; i++) { + overlapping_cap = pci_find_capability_at_offset(pdev, i); + if (overlapping_cap) { + error_setg(errp, "%s:%02x:%02x.%x " + "Attempt to add PCI capability %x at offset " + "%x overlaps existing capability %x at offset %x", + pci_root_bus_path(pdev), pci_dev_bus_num(pdev), + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), + cap_id, offset, overlapping_cap, i); + return -EINVAL; + } + } + } + + config = pdev->config + offset; + config[PCI_CAP_LIST_ID] = cap_id; + config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST]; + pdev->config[PCI_CAPABILITY_LIST] = offset; + pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST; + memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4)); + /* Make capability read-only by default */ + memset(pdev->wmask + offset, 0, size); + /* Check capability by default */ + memset(pdev->cmask + offset, 0xFF, size); + return offset; +} + +/* Unlink capability from the pci config space. */ +void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size) +{ + uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev); + if (!offset) + return; + pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT]; + /* Make capability writable again */ + memset(pdev->wmask + offset, 0xff, size); + memset(pdev->w1cmask + offset, 0, size); + /* Clear cmask as device-specific registers can't be checked */ + memset(pdev->cmask + offset, 0, size); + memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4)); + + if (!pdev->config[PCI_CAPABILITY_LIST]) + pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST; +} + +uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id) +{ + return pci_find_capability_list(pdev, cap_id, NULL); +} + +static void pcibus_dev_print(Monitor *mon, DeviceState *dev, int indent) +{ + PCIDevice *d = (PCIDevice *)dev; + const pci_class_desc *desc; + char ctxt[64]; + PCIIORegion *r; + int i, class; + + class = pci_get_word(d->config + PCI_CLASS_DEVICE); + desc = pci_class_descriptions; + while (desc->desc && class != desc->class) + desc++; + if (desc->desc) { + snprintf(ctxt, sizeof(ctxt), "%s", desc->desc); + } else { + snprintf(ctxt, sizeof(ctxt), "Class %04x", class); + } + + monitor_printf(mon, "%*sclass %s, addr %02x:%02x.%x, " + "pci id %04x:%04x (sub %04x:%04x)\n", + indent, "", ctxt, pci_dev_bus_num(d), + PCI_SLOT(d->devfn), PCI_FUNC(d->devfn), + pci_get_word(d->config + PCI_VENDOR_ID), + pci_get_word(d->config + PCI_DEVICE_ID), + pci_get_word(d->config + PCI_SUBSYSTEM_VENDOR_ID), + pci_get_word(d->config + PCI_SUBSYSTEM_ID)); + for (i = 0; i < PCI_NUM_REGIONS; i++) { + r = &d->io_regions[i]; + if (!r->size) + continue; + monitor_printf(mon, "%*sbar %d: %s at 0x%"FMT_PCIBUS + " [0x%"FMT_PCIBUS"]\n", + indent, "", + i, r->type & PCI_BASE_ADDRESS_SPACE_IO ? "i/o" : "mem", + r->addr, r->addr + r->size - 1); + } +} + +static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len) +{ + PCIDevice *d = (PCIDevice *)dev; + const char *name = NULL; + const pci_class_desc *desc = pci_class_descriptions; + int class = pci_get_word(d->config + PCI_CLASS_DEVICE); + + while (desc->desc && + (class & ~desc->fw_ign_bits) != + (desc->class & ~desc->fw_ign_bits)) { + desc++; + } + + if (desc->desc) { + name = desc->fw_name; + } + + if (name) { + pstrcpy(buf, len, name); + } else { + snprintf(buf, len, "pci%04x,%04x", + pci_get_word(d->config + PCI_VENDOR_ID), + pci_get_word(d->config + PCI_DEVICE_ID)); + } + + return buf; +} + +static char *pcibus_get_fw_dev_path(DeviceState *dev) +{ + PCIDevice *d = (PCIDevice *)dev; + char path[50], name[33]; + int off; + + off = snprintf(path, sizeof(path), "%s@%x", + pci_dev_fw_name(dev, name, sizeof name), + PCI_SLOT(d->devfn)); + if (PCI_FUNC(d->devfn)) + snprintf(path + off, sizeof(path) + off, ",%x", PCI_FUNC(d->devfn)); + return g_strdup(path); +} + +static char *pcibus_get_dev_path(DeviceState *dev) +{ + PCIDevice *d = container_of(dev, PCIDevice, qdev); + PCIDevice *t; + int slot_depth; + /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function. + * 00 is added here to make this format compatible with + * domain:Bus:Slot.Func for systems without nested PCI bridges. + * Slot.Function list specifies the slot and function numbers for all + * devices on the path from root to the specific device. */ + const char *root_bus_path; + int root_bus_len; + char slot[] = ":SS.F"; + int slot_len = sizeof slot - 1 /* For '\0' */; + int path_len; + char *path, *p; + int s; + + root_bus_path = pci_root_bus_path(d); + root_bus_len = strlen(root_bus_path); + + /* Calculate # of slots on path between device and root. */; + slot_depth = 0; + for (t = d; t; t = pci_get_bus(t)->parent_dev) { + ++slot_depth; + } + + path_len = root_bus_len + slot_len * slot_depth; + + /* Allocate memory, fill in the terminating null byte. */ + path = g_malloc(path_len + 1 /* For '\0' */); + path[path_len] = '\0'; + + memcpy(path, root_bus_path, root_bus_len); + + /* Fill in slot numbers. We walk up from device to root, so need to print + * them in the reverse order, last to first. */ + p = path + path_len; + for (t = d; t; t = pci_get_bus(t)->parent_dev) { + p -= slot_len; + s = snprintf(slot, sizeof slot, ":%02x.%x", + PCI_SLOT(t->devfn), PCI_FUNC(t->devfn)); + assert(s == slot_len); + memcpy(p, slot, slot_len); + } + + return path; +} + +static int pci_qdev_find_recursive(PCIBus *bus, + const char *id, PCIDevice **pdev) +{ + DeviceState *qdev = qdev_find_recursive(&bus->qbus, id); + if (!qdev) { + return -ENODEV; + } + + /* roughly check if given qdev is pci device */ + if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) { + *pdev = PCI_DEVICE(qdev); + return 0; + } + return -EINVAL; +} + +int pci_qdev_find_device(const char *id, PCIDevice **pdev) +{ + PCIHostState *host_bridge; + int rc = -ENODEV; + + QLIST_FOREACH(host_bridge, &pci_host_bridges, next) { + int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev); + if (!tmp) { + rc = 0; + break; + } + if (tmp != -ENODEV) { + rc = tmp; + } + } + + return rc; +} + +MemoryRegion *pci_address_space(PCIDevice *dev) +{ + return pci_get_bus(dev)->address_space_mem; +} + +MemoryRegion *pci_address_space_io(PCIDevice *dev) +{ + return pci_get_bus(dev)->address_space_io; +} + +static void pci_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + + k->realize = pci_qdev_realize; + k->unrealize = pci_qdev_unrealize; + k->bus_type = TYPE_PCI_BUS; + device_class_set_props(k, pci_props); +} + +static void pci_device_class_base_init(ObjectClass *klass, void *data) +{ + if (!object_class_is_abstract(klass)) { + ObjectClass *conventional = + object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE); + ObjectClass *pcie = + object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE); + assert(conventional || pcie); + } +} + +AddressSpace *pci_device_iommu_address_space(PCIDevice *dev) +{ + PCIBus *bus = pci_get_bus(dev); + PCIBus *iommu_bus = bus; + uint8_t devfn = dev->devfn; + + while (iommu_bus && !iommu_bus->iommu_fn && iommu_bus->parent_dev) { + PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev); + + /* + * The requester ID of the provided device may be aliased, as seen from + * the IOMMU, due to topology limitations. The IOMMU relies on a + * requester ID to provide a unique AddressSpace for devices, but + * conventional PCI buses pre-date such concepts. Instead, the PCIe- + * to-PCI bridge creates and accepts transactions on behalf of down- + * stream devices. When doing so, all downstream devices are masked + * (aliased) behind a single requester ID. The requester ID used + * depends on the format of the bridge devices. Proper PCIe-to-PCI + * bridges, with a PCIe capability indicating such, follow the + * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification, + * where the bridge uses the seconary bus as the bridge portion of the + * requester ID and devfn of 00.0. For other bridges, typically those + * found on the root complex such as the dmi-to-pci-bridge, we follow + * the convention of typical bare-metal hardware, which uses the + * requester ID of the bridge itself. There are device specific + * exceptions to these rules, but these are the defaults that the + * Linux kernel uses when determining DMA aliases itself and believed + * to be true for the bare metal equivalents of the devices emulated + * in QEMU. + */ + if (!pci_bus_is_express(iommu_bus)) { + PCIDevice *parent = iommu_bus->parent_dev; + + if (pci_is_express(parent) && + pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) { + devfn = PCI_DEVFN(0, 0); + bus = iommu_bus; + } else { + devfn = parent->devfn; + bus = parent_bus; + } + } + + iommu_bus = parent_bus; + } + if (!pci_bus_bypass_iommu(bus) && iommu_bus && iommu_bus->iommu_fn) { + return iommu_bus->iommu_fn(bus, iommu_bus->iommu_opaque, devfn); + } + return &address_space_memory; +} + +void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, void *opaque) +{ + bus->iommu_fn = fn; + bus->iommu_opaque = opaque; +} + +static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) +{ + Range *range = opaque; + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); + uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND); + int i; + + if (!(cmd & PCI_COMMAND_MEMORY)) { + return; + } + + if (pc->is_bridge) { + pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); + pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH); + + base = MAX(base, 0x1ULL << 32); + + if (limit >= base) { + Range pref_range; + range_set_bounds(&pref_range, base, limit); + range_extend(range, &pref_range); + } + } + for (i = 0; i < PCI_NUM_REGIONS; ++i) { + PCIIORegion *r = &dev->io_regions[i]; + pcibus_t lob, upb; + Range region_range; + + if (!r->size || + (r->type & PCI_BASE_ADDRESS_SPACE_IO) || + !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) { + continue; + } + + lob = pci_bar_address(dev, i, r->type, r->size); + upb = lob + r->size - 1; + if (lob == PCI_BAR_UNMAPPED) { + continue; + } + + lob = MAX(lob, 0x1ULL << 32); + + if (upb >= lob) { + range_set_bounds(®ion_range, lob, upb); + range_extend(range, ®ion_range); + } + } +} + +void pci_bus_get_w64_range(PCIBus *bus, Range *range) +{ + range_make_empty(range); + pci_for_each_device_under_bus(bus, pci_dev_get_w64, range); +} + +static bool pcie_has_upstream_port(PCIDevice *dev) +{ + PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev)); + + /* Device associated with an upstream port. + * As there are several types of these, it's easier to check the + * parent device: upstream ports are always connected to + * root or downstream ports. + */ + return parent_dev && + pci_is_express(parent_dev) && + parent_dev->exp.exp_cap && + (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT || + pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM); +} + +PCIDevice *pci_get_function_0(PCIDevice *pci_dev) +{ + PCIBus *bus = pci_get_bus(pci_dev); + + if(pcie_has_upstream_port(pci_dev)) { + /* With an upstream PCIe port, we only support 1 device at slot 0 */ + return bus->devices[0]; + } else { + /* Other bus types might support multiple devices at slots 0-31 */ + return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)]; + } +} + +MSIMessage pci_get_msi_message(PCIDevice *dev, int vector) +{ + MSIMessage msg; + if (msix_enabled(dev)) { + msg = msix_get_message(dev, vector); + } else if (msi_enabled(dev)) { + msg = msi_get_message(dev, vector); + } else { + /* Should never happen */ + error_report("%s: unknown interrupt type", __func__); + abort(); + } + return msg; +} + +void pci_set_power(PCIDevice *d, bool state) +{ + if (d->has_power == state) { + return; + } + + d->has_power = state; + pci_update_mappings(d); + memory_region_set_enabled(&d->bus_master_enable_region, + (pci_get_word(d->config + PCI_COMMAND) + & PCI_COMMAND_MASTER) && d->has_power); + if (!d->has_power) { + pci_device_reset(d); + } +} + +static const TypeInfo pci_device_type_info = { + .name = TYPE_PCI_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PCIDevice), + .abstract = true, + .class_size = sizeof(PCIDeviceClass), + .class_init = pci_device_class_init, + .class_base_init = pci_device_class_base_init, +}; + +static void pci_register_types(void) +{ + type_register_static(&pci_bus_info); + type_register_static(&pcie_bus_info); + type_register_static(&conventional_pci_interface_info); + type_register_static(&pcie_interface_info); + type_register_static(&pci_device_type_info); +} + +type_init(pci_register_types) diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c new file mode 100644 index 000000000..da34c8ebc --- /dev/null +++ b/hw/pci/pci_bridge.c @@ -0,0 +1,482 @@ +/* + * QEMU PCI bus manager + * + * Copyright (c) 2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to dea + + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM + + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* + * split out from pci.c + * Copyright (c) 2010 Isaku Yamahata + * VA Linux Systems Japan K.K. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" +#include "qemu/module.h" +#include "qemu/range.h" +#include "qapi/error.h" + +/* PCI bridge subsystem vendor ID helper functions */ +#define PCI_SSVID_SIZEOF 8 +#define PCI_SSVID_SVID 4 +#define PCI_SSVID_SSID 6 + +int pci_bridge_ssvid_init(PCIDevice *dev, uint8_t offset, + uint16_t svid, uint16_t ssid, + Error **errp) +{ + int pos; + + pos = pci_add_capability(dev, PCI_CAP_ID_SSVID, offset, + PCI_SSVID_SIZEOF, errp); + if (pos < 0) { + return pos; + } + + pci_set_word(dev->config + pos + PCI_SSVID_SVID, svid); + pci_set_word(dev->config + pos + PCI_SSVID_SSID, ssid); + return pos; +} + +/* Accessor function to get parent bridge device from pci bus. */ +PCIDevice *pci_bridge_get_device(PCIBus *bus) +{ + return bus->parent_dev; +} + +/* Accessor function to get secondary bus from pci-to-pci bridge device */ +PCIBus *pci_bridge_get_sec_bus(PCIBridge *br) +{ + return &br->sec_bus; +} + +static uint32_t pci_config_get_io_base(const PCIDevice *d, + uint32_t base, uint32_t base_upper16) +{ + uint32_t val; + + val = ((uint32_t)d->config[base] & PCI_IO_RANGE_MASK) << 8; + if (d->config[base] & PCI_IO_RANGE_TYPE_32) { + val |= (uint32_t)pci_get_word(d->config + base_upper16) << 16; + } + return val; +} + +static pcibus_t pci_config_get_memory_base(const PCIDevice *d, uint32_t base) +{ + return ((pcibus_t)pci_get_word(d->config + base) & PCI_MEMORY_RANGE_MASK) + << 16; +} + +static pcibus_t pci_config_get_pref_base(const PCIDevice *d, + uint32_t base, uint32_t upper) +{ + pcibus_t tmp; + pcibus_t val; + + tmp = (pcibus_t)pci_get_word(d->config + base); + val = (tmp & PCI_PREF_RANGE_MASK) << 16; + if (tmp & PCI_PREF_RANGE_TYPE_64) { + val |= (pcibus_t)pci_get_long(d->config + upper) << 32; + } + return val; +} + +/* accessor function to get bridge filtering base address */ +pcibus_t pci_bridge_get_base(const PCIDevice *bridge, uint8_t type) +{ + pcibus_t base; + if (type & PCI_BASE_ADDRESS_SPACE_IO) { + base = pci_config_get_io_base(bridge, + PCI_IO_BASE, PCI_IO_BASE_UPPER16); + } else { + if (type & PCI_BASE_ADDRESS_MEM_PREFETCH) { + base = pci_config_get_pref_base( + bridge, PCI_PREF_MEMORY_BASE, PCI_PREF_BASE_UPPER32); + } else { + base = pci_config_get_memory_base(bridge, PCI_MEMORY_BASE); + } + } + + return base; +} + +/* accessor function to get bridge filtering limit */ +pcibus_t pci_bridge_get_limit(const PCIDevice *bridge, uint8_t type) +{ + pcibus_t limit; + if (type & PCI_BASE_ADDRESS_SPACE_IO) { + limit = pci_config_get_io_base(bridge, + PCI_IO_LIMIT, PCI_IO_LIMIT_UPPER16); + limit |= 0xfff; /* PCI bridge spec 3.2.5.6. */ + } else { + if (type & PCI_BASE_ADDRESS_MEM_PREFETCH) { + limit = pci_config_get_pref_base( + bridge, PCI_PREF_MEMORY_LIMIT, PCI_PREF_LIMIT_UPPER32); + } else { + limit = pci_config_get_memory_base(bridge, PCI_MEMORY_LIMIT); + } + limit |= 0xfffff; /* PCI bridge spec 3.2.5.{1, 8}. */ + } + return limit; +} + +static void pci_bridge_init_alias(PCIBridge *bridge, MemoryRegion *alias, + uint8_t type, const char *name, + MemoryRegion *space, + MemoryRegion *parent_space, + bool enabled) +{ + PCIDevice *bridge_dev = PCI_DEVICE(bridge); + pcibus_t base = pci_bridge_get_base(bridge_dev, type); + pcibus_t limit = pci_bridge_get_limit(bridge_dev, type); + /* TODO: this doesn't handle base = 0 limit = 2^64 - 1 correctly. + * Apparently no way to do this with existing memory APIs. */ + pcibus_t size = enabled && limit >= base ? limit + 1 - base : 0; + + memory_region_init_alias(alias, OBJECT(bridge), name, space, base, size); + memory_region_add_subregion_overlap(parent_space, base, alias, 1); +} + +static void pci_bridge_init_vga_aliases(PCIBridge *br, PCIBus *parent, + MemoryRegion *alias_vga) +{ + PCIDevice *pd = PCI_DEVICE(br); + uint16_t brctl = pci_get_word(pd->config + PCI_BRIDGE_CONTROL); + + memory_region_init_alias(&alias_vga[QEMU_PCI_VGA_IO_LO], OBJECT(br), + "pci_bridge_vga_io_lo", &br->address_space_io, + QEMU_PCI_VGA_IO_LO_BASE, QEMU_PCI_VGA_IO_LO_SIZE); + memory_region_init_alias(&alias_vga[QEMU_PCI_VGA_IO_HI], OBJECT(br), + "pci_bridge_vga_io_hi", &br->address_space_io, + QEMU_PCI_VGA_IO_HI_BASE, QEMU_PCI_VGA_IO_HI_SIZE); + memory_region_init_alias(&alias_vga[QEMU_PCI_VGA_MEM], OBJECT(br), + "pci_bridge_vga_mem", &br->address_space_mem, + QEMU_PCI_VGA_MEM_BASE, QEMU_PCI_VGA_MEM_SIZE); + + if (brctl & PCI_BRIDGE_CTL_VGA) { + pci_register_vga(pd, &alias_vga[QEMU_PCI_VGA_MEM], + &alias_vga[QEMU_PCI_VGA_IO_LO], + &alias_vga[QEMU_PCI_VGA_IO_HI]); + } +} + +static PCIBridgeWindows *pci_bridge_region_init(PCIBridge *br) +{ + PCIDevice *pd = PCI_DEVICE(br); + PCIBus *parent = pci_get_bus(pd); + PCIBridgeWindows *w = g_new(PCIBridgeWindows, 1); + uint16_t cmd = pci_get_word(pd->config + PCI_COMMAND); + + pci_bridge_init_alias(br, &w->alias_pref_mem, + PCI_BASE_ADDRESS_MEM_PREFETCH, + "pci_bridge_pref_mem", + &br->address_space_mem, + parent->address_space_mem, + cmd & PCI_COMMAND_MEMORY); + pci_bridge_init_alias(br, &w->alias_mem, + PCI_BASE_ADDRESS_SPACE_MEMORY, + "pci_bridge_mem", + &br->address_space_mem, + parent->address_space_mem, + cmd & PCI_COMMAND_MEMORY); + pci_bridge_init_alias(br, &w->alias_io, + PCI_BASE_ADDRESS_SPACE_IO, + "pci_bridge_io", + &br->address_space_io, + parent->address_space_io, + cmd & PCI_COMMAND_IO); + + pci_bridge_init_vga_aliases(br, parent, w->alias_vga); + + return w; +} + +static void pci_bridge_region_del(PCIBridge *br, PCIBridgeWindows *w) +{ + PCIDevice *pd = PCI_DEVICE(br); + PCIBus *parent = pci_get_bus(pd); + + memory_region_del_subregion(parent->address_space_io, &w->alias_io); + memory_region_del_subregion(parent->address_space_mem, &w->alias_mem); + memory_region_del_subregion(parent->address_space_mem, &w->alias_pref_mem); + pci_unregister_vga(pd); +} + +static void pci_bridge_region_cleanup(PCIBridge *br, PCIBridgeWindows *w) +{ + object_unparent(OBJECT(&w->alias_io)); + object_unparent(OBJECT(&w->alias_mem)); + object_unparent(OBJECT(&w->alias_pref_mem)); + object_unparent(OBJECT(&w->alias_vga[QEMU_PCI_VGA_IO_LO])); + object_unparent(OBJECT(&w->alias_vga[QEMU_PCI_VGA_IO_HI])); + object_unparent(OBJECT(&w->alias_vga[QEMU_PCI_VGA_MEM])); + g_free(w); +} + +void pci_bridge_update_mappings(PCIBridge *br) +{ + PCIBridgeWindows *w = br->windows; + + /* Make updates atomic to: handle the case of one VCPU updating the bridge + * while another accesses an unaffected region. */ + memory_region_transaction_begin(); + pci_bridge_region_del(br, br->windows); + pci_bridge_region_cleanup(br, w); + br->windows = pci_bridge_region_init(br); + memory_region_transaction_commit(); +} + +/* default write_config function for PCI-to-PCI bridge */ +void pci_bridge_write_config(PCIDevice *d, + uint32_t address, uint32_t val, int len) +{ + PCIBridge *s = PCI_BRIDGE(d); + uint16_t oldctl = pci_get_word(d->config + PCI_BRIDGE_CONTROL); + uint16_t newctl; + + pci_default_write_config(d, address, val, len); + + if (ranges_overlap(address, len, PCI_COMMAND, 2) || + + /* io base/limit */ + ranges_overlap(address, len, PCI_IO_BASE, 2) || + + /* memory base/limit, prefetchable base/limit and + io base/limit upper 16 */ + ranges_overlap(address, len, PCI_MEMORY_BASE, 20) || + + /* vga enable */ + ranges_overlap(address, len, PCI_BRIDGE_CONTROL, 2)) { + pci_bridge_update_mappings(s); + } + + newctl = pci_get_word(d->config + PCI_BRIDGE_CONTROL); + if (~oldctl & newctl & PCI_BRIDGE_CTL_BUS_RESET) { + /* Trigger hot reset on 0->1 transition. */ + qbus_reset_all(BUS(&s->sec_bus)); + } +} + +void pci_bridge_disable_base_limit(PCIDevice *dev) +{ + uint8_t *conf = dev->config; + + pci_byte_test_and_set_mask(conf + PCI_IO_BASE, + PCI_IO_RANGE_MASK & 0xff); + pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT, + PCI_IO_RANGE_MASK & 0xff); + pci_word_test_and_set_mask(conf + PCI_MEMORY_BASE, + PCI_MEMORY_RANGE_MASK & 0xffff); + pci_word_test_and_clear_mask(conf + PCI_MEMORY_LIMIT, + PCI_MEMORY_RANGE_MASK & 0xffff); + pci_word_test_and_set_mask(conf + PCI_PREF_MEMORY_BASE, + PCI_PREF_RANGE_MASK & 0xffff); + pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_LIMIT, + PCI_PREF_RANGE_MASK & 0xffff); + pci_set_long(conf + PCI_PREF_BASE_UPPER32, 0); + pci_set_long(conf + PCI_PREF_LIMIT_UPPER32, 0); +} + +/* reset bridge specific configuration registers */ +void pci_bridge_reset(DeviceState *qdev) +{ + PCIDevice *dev = PCI_DEVICE(qdev); + uint8_t *conf = dev->config; + + conf[PCI_PRIMARY_BUS] = 0; + conf[PCI_SECONDARY_BUS] = 0; + conf[PCI_SUBORDINATE_BUS] = 0; + conf[PCI_SEC_LATENCY_TIMER] = 0; + + /* + * the default values for base/limit registers aren't specified + * in the PCI-to-PCI-bridge spec. So we don't touch them here. + * Each implementation can override it. + * typical implementation does + * zero base/limit registers or + * disable forwarding: pci_bridge_disable_base_limit() + * If disable forwarding is wanted, call pci_bridge_disable_base_limit() + * after this function. + */ + pci_byte_test_and_clear_mask(conf + PCI_IO_BASE, + PCI_IO_RANGE_MASK & 0xff); + pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT, + PCI_IO_RANGE_MASK & 0xff); + pci_word_test_and_clear_mask(conf + PCI_MEMORY_BASE, + PCI_MEMORY_RANGE_MASK & 0xffff); + pci_word_test_and_clear_mask(conf + PCI_MEMORY_LIMIT, + PCI_MEMORY_RANGE_MASK & 0xffff); + pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_BASE, + PCI_PREF_RANGE_MASK & 0xffff); + pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_LIMIT, + PCI_PREF_RANGE_MASK & 0xffff); + pci_set_long(conf + PCI_PREF_BASE_UPPER32, 0); + pci_set_long(conf + PCI_PREF_LIMIT_UPPER32, 0); + + pci_set_word(conf + PCI_BRIDGE_CONTROL, 0); +} + +/* default qdev initialization function for PCI-to-PCI bridge */ +void pci_bridge_initfn(PCIDevice *dev, const char *typename) +{ + PCIBus *parent = pci_get_bus(dev); + PCIBridge *br = PCI_BRIDGE(dev); + PCIBus *sec_bus = &br->sec_bus; + + pci_word_test_and_set_mask(dev->config + PCI_STATUS, + PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK); + + /* + * TODO: We implement VGA Enable in the Bridge Control Register + * therefore per the PCI to PCI bridge spec we must also implement + * VGA Palette Snooping. When done, set this bit writable: + * + * pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, + * PCI_COMMAND_VGA_PALETTE); + */ + + pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_PCI); + dev->config[PCI_HEADER_TYPE] = + (dev->config[PCI_HEADER_TYPE] & PCI_HEADER_TYPE_MULTI_FUNCTION) | + PCI_HEADER_TYPE_BRIDGE; + pci_set_word(dev->config + PCI_SEC_STATUS, + PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK); + + /* + * If we don't specify the name, the bus will be addressed as .0, where + * id is the device id. + * Since PCI Bridge devices have a single bus each, we don't need the index: + * let users address the bus using the device name. + */ + if (!br->bus_name && dev->qdev.id && *dev->qdev.id) { + br->bus_name = dev->qdev.id; + } + + qbus_init(sec_bus, sizeof(br->sec_bus), typename, DEVICE(dev), + br->bus_name); + sec_bus->parent_dev = dev; + sec_bus->map_irq = br->map_irq ? br->map_irq : pci_swizzle_map_irq_fn; + sec_bus->address_space_mem = &br->address_space_mem; + memory_region_init(&br->address_space_mem, OBJECT(br), "pci_bridge_pci", UINT64_MAX); + sec_bus->address_space_io = &br->address_space_io; + memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io", + 4 * GiB); + br->windows = pci_bridge_region_init(br); + QLIST_INIT(&sec_bus->child); + QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling); +} + +/* default qdev clean up function for PCI-to-PCI bridge */ +void pci_bridge_exitfn(PCIDevice *pci_dev) +{ + PCIBridge *s = PCI_BRIDGE(pci_dev); + assert(QLIST_EMPTY(&s->sec_bus.child)); + QLIST_REMOVE(&s->sec_bus, sibling); + pci_bridge_region_del(s, s->windows); + pci_bridge_region_cleanup(s, s->windows); + /* object_unparent() is called automatically during device deletion */ +} + +/* + * before qdev initialization(qdev_init()), this function sets bus_name and + * map_irq callback which are necessary for pci_bridge_initfn() to + * initialize bus. + */ +void pci_bridge_map_irq(PCIBridge *br, const char* bus_name, + pci_map_irq_fn map_irq) +{ + br->map_irq = map_irq; + br->bus_name = bus_name; +} + + +int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset, + PCIResReserve res_reserve, Error **errp) +{ + if (res_reserve.mem_pref_32 != (uint64_t)-1 && + res_reserve.mem_pref_64 != (uint64_t)-1) { + error_setg(errp, + "PCI resource reserve cap: PREF32 and PREF64 conflict"); + return -EINVAL; + } + + if (res_reserve.mem_non_pref != (uint64_t)-1 && + res_reserve.mem_non_pref >= 4 * GiB) { + error_setg(errp, + "PCI resource reserve cap: mem-reserve must be less than 4G"); + return -EINVAL; + } + + if (res_reserve.mem_pref_32 != (uint64_t)-1 && + res_reserve.mem_pref_32 >= 4 * GiB) { + error_setg(errp, + "PCI resource reserve cap: pref32-reserve must be less than 4G"); + return -EINVAL; + } + + if (res_reserve.bus == (uint32_t)-1 && + res_reserve.io == (uint64_t)-1 && + res_reserve.mem_non_pref == (uint64_t)-1 && + res_reserve.mem_pref_32 == (uint64_t)-1 && + res_reserve.mem_pref_64 == (uint64_t)-1) { + return 0; + } + + size_t cap_len = sizeof(PCIBridgeQemuCap); + PCIBridgeQemuCap cap = { + .len = cap_len, + .type = REDHAT_PCI_CAP_RESOURCE_RESERVE, + .bus_res = cpu_to_le32(res_reserve.bus), + .io = cpu_to_le64(res_reserve.io), + .mem = cpu_to_le32(res_reserve.mem_non_pref), + .mem_pref_32 = cpu_to_le32(res_reserve.mem_pref_32), + .mem_pref_64 = cpu_to_le64(res_reserve.mem_pref_64) + }; + + int offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, + cap_offset, cap_len, errp); + if (offset < 0) { + return offset; + } + + memcpy(dev->config + offset + PCI_CAP_FLAGS, + (char *)&cap + PCI_CAP_FLAGS, + cap_len - PCI_CAP_FLAGS); + return 0; +} + +static const TypeInfo pci_bridge_type_info = { + .name = TYPE_PCI_BRIDGE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIBridge), + .abstract = true, +}; + +static void pci_bridge_register_types(void) +{ + type_register_static(&pci_bridge_type_info); +} + +type_init(pci_bridge_register_types) diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c new file mode 100644 index 000000000..7beafd40a --- /dev/null +++ b/hw/pci/pci_host.c @@ -0,0 +1,252 @@ +/* + * pci_host.c + * + * Copyright (c) 2009 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_host.h" +#include "hw/qdev-properties.h" +#include "qemu/module.h" +#include "hw/pci/pci_bus.h" +#include "migration/vmstate.h" +#include "trace.h" + +/* debug PCI */ +//#define DEBUG_PCI + +#ifdef DEBUG_PCI +#define PCI_DPRINTF(fmt, ...) \ +do { printf("pci_host_data: " fmt , ## __VA_ARGS__); } while (0) +#else +#define PCI_DPRINTF(fmt, ...) +#endif + +/* + * PCI address + * bit 16 - 24: bus number + * bit 8 - 15: devfun number + * bit 0 - 7: offset in configuration space of a given pci device + */ + +/* the helper function to get a PCIDevice* for a given pci address */ +static inline PCIDevice *pci_dev_find_by_addr(PCIBus *bus, uint32_t addr) +{ + uint8_t bus_num = addr >> 16; + uint8_t devfn = addr >> 8; + + return pci_find_device(bus, bus_num, devfn); +} + +static void pci_adjust_config_limit(PCIBus *bus, uint32_t *limit) +{ + if ((*limit > PCI_CONFIG_SPACE_SIZE) && + !pci_bus_allows_extended_config_space(bus)) { + *limit = PCI_CONFIG_SPACE_SIZE; + } +} + +void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr, + uint32_t limit, uint32_t val, uint32_t len) +{ + pci_adjust_config_limit(pci_get_bus(pci_dev), &limit); + if (limit <= addr) { + return; + } + + assert(len <= 4); + /* non-zero functions are only exposed when function 0 is present, + * allowing direct removal of unexposed functions. + */ + if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) || + !pci_dev->has_power) { + return; + } + + trace_pci_cfg_write(pci_dev->name, PCI_SLOT(pci_dev->devfn), + PCI_FUNC(pci_dev->devfn), addr, val); + pci_dev->config_write(pci_dev, addr, val, MIN(len, limit - addr)); +} + +uint32_t pci_host_config_read_common(PCIDevice *pci_dev, uint32_t addr, + uint32_t limit, uint32_t len) +{ + uint32_t ret; + + pci_adjust_config_limit(pci_get_bus(pci_dev), &limit); + if (limit <= addr) { + return ~0x0; + } + + assert(len <= 4); + /* non-zero functions are only exposed when function 0 is present, + * allowing direct removal of unexposed functions. + */ + if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) || + !pci_dev->has_power) { + return ~0x0; + } + + ret = pci_dev->config_read(pci_dev, addr, MIN(len, limit - addr)); + trace_pci_cfg_read(pci_dev->name, PCI_SLOT(pci_dev->devfn), + PCI_FUNC(pci_dev->devfn), addr, ret); + + return ret; +} + +void pci_data_write(PCIBus *s, uint32_t addr, uint32_t val, unsigned len) +{ + PCIDevice *pci_dev = pci_dev_find_by_addr(s, addr); + uint32_t config_addr = addr & (PCI_CONFIG_SPACE_SIZE - 1); + + if (!pci_dev) { + return; + } + + pci_host_config_write_common(pci_dev, config_addr, PCI_CONFIG_SPACE_SIZE, + val, len); +} + +uint32_t pci_data_read(PCIBus *s, uint32_t addr, unsigned len) +{ + PCIDevice *pci_dev = pci_dev_find_by_addr(s, addr); + uint32_t config_addr = addr & (PCI_CONFIG_SPACE_SIZE - 1); + + if (!pci_dev) { + return ~0x0; + } + + return pci_host_config_read_common(pci_dev, config_addr, + PCI_CONFIG_SPACE_SIZE, len); +} + +static void pci_host_config_write(void *opaque, hwaddr addr, + uint64_t val, unsigned len) +{ + PCIHostState *s = opaque; + + PCI_DPRINTF("%s addr " TARGET_FMT_plx " len %d val %"PRIx64"\n", + __func__, addr, len, val); + if (addr != 0 || len != 4) { + return; + } + s->config_reg = val; +} + +static uint64_t pci_host_config_read(void *opaque, hwaddr addr, + unsigned len) +{ + PCIHostState *s = opaque; + uint32_t val = s->config_reg; + + PCI_DPRINTF("%s addr " TARGET_FMT_plx " len %d val %"PRIx32"\n", + __func__, addr, len, val); + return val; +} + +static void pci_host_data_write(void *opaque, hwaddr addr, + uint64_t val, unsigned len) +{ + PCIHostState *s = opaque; + + if (s->config_reg & (1u << 31)) + pci_data_write(s->bus, s->config_reg | (addr & 3), val, len); +} + +static uint64_t pci_host_data_read(void *opaque, + hwaddr addr, unsigned len) +{ + PCIHostState *s = opaque; + + if (!(s->config_reg & (1U << 31))) { + return 0xffffffff; + } + return pci_data_read(s->bus, s->config_reg | (addr & 3), len); +} + +const MemoryRegionOps pci_host_conf_le_ops = { + .read = pci_host_config_read, + .write = pci_host_config_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +const MemoryRegionOps pci_host_conf_be_ops = { + .read = pci_host_config_read, + .write = pci_host_config_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +const MemoryRegionOps pci_host_data_le_ops = { + .read = pci_host_data_read, + .write = pci_host_data_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +const MemoryRegionOps pci_host_data_be_ops = { + .read = pci_host_data_read, + .write = pci_host_data_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static bool pci_host_needed(void *opaque) +{ + PCIHostState *s = opaque; + return s->mig_enabled; +} + +const VMStateDescription vmstate_pcihost = { + .name = "PCIHost", + .needed = pci_host_needed, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(config_reg, PCIHostState), + VMSTATE_END_OF_LIST() + } +}; + +static Property pci_host_properties_common[] = { + DEFINE_PROP_BOOL("x-config-reg-migration-enabled", PCIHostState, + mig_enabled, true), + DEFINE_PROP_BOOL("bypass-iommu", PCIHostState, bypass_iommu, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pci_host_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + device_class_set_props(dc, pci_host_properties_common); + dc->vmsd = &vmstate_pcihost; +} + +static const TypeInfo pci_host_type_info = { + .name = TYPE_PCI_HOST_BRIDGE, + .parent = TYPE_SYS_BUS_DEVICE, + .abstract = true, + .class_size = sizeof(PCIHostBridgeClass), + .instance_size = sizeof(PCIHostState), + .class_init = pci_host_class_init, +}; + +static void pci_host_register_types(void) +{ + type_register_static(&pci_host_type_info); +} + +type_init(pci_host_register_types) diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c new file mode 100644 index 000000000..d7d73a31e --- /dev/null +++ b/hw/pci/pcie.c @@ -0,0 +1,1079 @@ +/* + * pcie.c + * + * Copyright (c) 2010 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pcie.h" +#include "hw/pci/msix.h" +#include "hw/pci/msi.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pcie_regs.h" +#include "hw/pci/pcie_port.h" +#include "qemu/range.h" + +//#define DEBUG_PCIE +#ifdef DEBUG_PCIE +# define PCIE_DPRINTF(fmt, ...) \ + fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__) +#else +# define PCIE_DPRINTF(fmt, ...) do {} while (0) +#endif +#define PCIE_DEV_PRINTF(dev, fmt, ...) \ + PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__) + + +/*************************************************************************** + * pci express capability helper functions + */ + +static void +pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version) +{ + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + uint8_t *cmask = dev->cmask + dev->exp.exp_cap; + + /* capability register + interrupt message number defaults to 0 */ + pci_set_word(exp_cap + PCI_EXP_FLAGS, + ((type << PCI_EXP_FLAGS_TYPE_SHIFT) & PCI_EXP_FLAGS_TYPE) | + version); + + /* device capability register + * table 7-12: + * roll based error reporting bit must be set by all + * Functions conforming to the ECN, PCI Express Base + * Specification, Revision 1.1., or subsequent PCI Express Base + * Specification revisions. + */ + pci_set_long(exp_cap + PCI_EXP_DEVCAP, PCI_EXP_DEVCAP_RBER); + + pci_set_long(exp_cap + PCI_EXP_LNKCAP, + (port << PCI_EXP_LNKCAP_PN_SHIFT) | + PCI_EXP_LNKCAP_ASPMS_0S | + QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1) | + QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT)); + + pci_set_word(exp_cap + PCI_EXP_LNKSTA, + QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1) | + QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT)); + + /* We changed link status bits over time, and changing them across + * migrations is generally fine as hardware changes them too. + * Let's not bother checking. + */ + pci_set_word(cmask + PCI_EXP_LNKSTA, 0); +} + +static void pcie_cap_fill_slot_lnk(PCIDevice *dev) +{ + PCIESlot *s = (PCIESlot *)object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT); + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + + /* Skip anything that isn't a PCIESlot */ + if (!s) { + return; + } + + /* Clear and fill LNKCAP from what was configured above */ + pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP, + PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, + QEMU_PCI_EXP_LNKCAP_MLW(s->width) | + QEMU_PCI_EXP_LNKCAP_MLS(s->speed)); + + /* + * Link bandwidth notification is required for all root ports and + * downstream ports supporting links wider than x1 or multiple link + * speeds. + */ + if (s->width > QEMU_PCI_EXP_LNK_X1 || + s->speed > QEMU_PCI_EXP_LNK_2_5GT) { + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, + PCI_EXP_LNKCAP_LBNC); + } + + if (s->speed > QEMU_PCI_EXP_LNK_2_5GT) { + /* + * Hot-plug capable downstream ports and downstream ports supporting + * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC + * to 1b. PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which + * we also hardwire to 1b here. 2.5GT/s hot-plug slots should also + * technically implement this, but it's not done here for compatibility. + */ + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, + PCI_EXP_LNKCAP_DLLLARC); + /* the PCI_EXP_LNKSTA_DLLLA will be set in the hotplug function */ + + /* + * Target Link Speed defaults to the highest link speed supported by + * the component. 2.5GT/s devices are permitted to hardwire to zero. + */ + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_TLS); + pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKCTL2, + QEMU_PCI_EXP_LNKCAP_MLS(s->speed) & + PCI_EXP_LNKCTL2_TLS); + } + + /* + * 2.5 & 5.0GT/s can be fully described by LNKCAP, but 8.0GT/s is + * actually a reference to the highest bit supported in this register. + * We assume the device supports all link speeds. + */ + if (s->speed > QEMU_PCI_EXP_LNK_5GT) { + pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP2, ~0U); + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, + PCI_EXP_LNKCAP2_SLS_2_5GB | + PCI_EXP_LNKCAP2_SLS_5_0GB | + PCI_EXP_LNKCAP2_SLS_8_0GB); + if (s->speed > QEMU_PCI_EXP_LNK_8GT) { + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, + PCI_EXP_LNKCAP2_SLS_16_0GB); + } + } +} + +int pcie_cap_init(PCIDevice *dev, uint8_t offset, + uint8_t type, uint8_t port, + Error **errp) +{ + /* PCIe cap v2 init */ + int pos; + uint8_t *exp_cap; + + assert(pci_is_express(dev)); + + pos = pci_add_capability(dev, PCI_CAP_ID_EXP, offset, + PCI_EXP_VER2_SIZEOF, errp); + if (pos < 0) { + return pos; + } + dev->exp.exp_cap = pos; + exp_cap = dev->config + pos; + + /* Filling values common with v1 */ + pcie_cap_v1_fill(dev, port, type, PCI_EXP_FLAGS_VER2); + + /* Fill link speed and width options */ + pcie_cap_fill_slot_lnk(dev); + + /* Filling v2 specific values */ + pci_set_long(exp_cap + PCI_EXP_DEVCAP2, + PCI_EXP_DEVCAP2_EFF | PCI_EXP_DEVCAP2_EETLPP); + + pci_set_word(dev->wmask + pos + PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_EETLPPB); + + if (dev->cap_present & QEMU_PCIE_EXTCAP_INIT) { + /* read-only to behave like a 'NULL' Extended Capability Header */ + pci_set_long(dev->wmask + PCI_CONFIG_SPACE_SIZE, 0); + } + + return pos; +} + +int pcie_cap_v1_init(PCIDevice *dev, uint8_t offset, uint8_t type, + uint8_t port) +{ + /* PCIe cap v1 init */ + int pos; + Error *local_err = NULL; + + assert(pci_is_express(dev)); + + pos = pci_add_capability(dev, PCI_CAP_ID_EXP, offset, + PCI_EXP_VER1_SIZEOF, &local_err); + if (pos < 0) { + error_report_err(local_err); + return pos; + } + dev->exp.exp_cap = pos; + + pcie_cap_v1_fill(dev, port, type, PCI_EXP_FLAGS_VER1); + + return pos; +} + +static int +pcie_endpoint_cap_common_init(PCIDevice *dev, uint8_t offset, uint8_t cap_size) +{ + uint8_t type = PCI_EXP_TYPE_ENDPOINT; + Error *local_err = NULL; + int ret; + + /* + * Windows guests will report Code 10, device cannot start, if + * a regular Endpoint type is exposed on a root complex. These + * should instead be Root Complex Integrated Endpoints. + */ + if (pci_bus_is_express(pci_get_bus(dev)) + && pci_bus_is_root(pci_get_bus(dev))) { + type = PCI_EXP_TYPE_RC_END; + } + + if (cap_size == PCI_EXP_VER1_SIZEOF) { + return pcie_cap_v1_init(dev, offset, type, 0); + } else { + ret = pcie_cap_init(dev, offset, type, 0, &local_err); + + if (ret < 0) { + error_report_err(local_err); + } + + return ret; + } +} + +int pcie_endpoint_cap_init(PCIDevice *dev, uint8_t offset) +{ + return pcie_endpoint_cap_common_init(dev, offset, PCI_EXP_VER2_SIZEOF); +} + +int pcie_endpoint_cap_v1_init(PCIDevice *dev, uint8_t offset) +{ + return pcie_endpoint_cap_common_init(dev, offset, PCI_EXP_VER1_SIZEOF); +} + +void pcie_cap_exit(PCIDevice *dev) +{ + pci_del_capability(dev, PCI_CAP_ID_EXP, PCI_EXP_VER2_SIZEOF); +} + +void pcie_cap_v1_exit(PCIDevice *dev) +{ + pci_del_capability(dev, PCI_CAP_ID_EXP, PCI_EXP_VER1_SIZEOF); +} + +uint8_t pcie_cap_get_type(const PCIDevice *dev) +{ + uint32_t pos = dev->exp.exp_cap; + assert(pos > 0); + return (pci_get_word(dev->config + pos + PCI_EXP_FLAGS) & + PCI_EXP_FLAGS_TYPE) >> PCI_EXP_FLAGS_TYPE_SHIFT; +} + +/* MSI/MSI-X */ +/* pci express interrupt message number */ +/* 7.8.2 PCI Express Capabilities Register: Interrupt Message Number */ +void pcie_cap_flags_set_vector(PCIDevice *dev, uint8_t vector) +{ + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + assert(vector < 32); + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_FLAGS, PCI_EXP_FLAGS_IRQ); + pci_word_test_and_set_mask(exp_cap + PCI_EXP_FLAGS, + vector << PCI_EXP_FLAGS_IRQ_SHIFT); +} + +uint8_t pcie_cap_flags_get_vector(PCIDevice *dev) +{ + return (pci_get_word(dev->config + dev->exp.exp_cap + PCI_EXP_FLAGS) & + PCI_EXP_FLAGS_IRQ) >> PCI_EXP_FLAGS_IRQ_SHIFT; +} + +void pcie_cap_deverr_init(PCIDevice *dev) +{ + uint32_t pos = dev->exp.exp_cap; + pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_DEVCAP, + PCI_EXP_DEVCAP_RBER); + pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | + PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); + pci_long_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_CED | PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | PCI_EXP_DEVSTA_URD); +} + +void pcie_cap_deverr_reset(PCIDevice *dev) +{ + uint8_t *devctl = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL; + pci_long_test_and_clear_mask(devctl, + PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | + PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); +} + +void pcie_cap_lnkctl_init(PCIDevice *dev) +{ + uint32_t pos = dev->exp.exp_cap; + pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CCC | PCI_EXP_LNKCTL_ES); +} + +void pcie_cap_lnkctl_reset(PCIDevice *dev) +{ + uint8_t *lnkctl = dev->config + dev->exp.exp_cap + PCI_EXP_LNKCTL; + pci_long_test_and_clear_mask(lnkctl, + PCI_EXP_LNKCTL_CCC | PCI_EXP_LNKCTL_ES); +} + +static void hotplug_event_update_event_status(PCIDevice *dev) +{ + uint32_t pos = dev->exp.exp_cap; + uint8_t *exp_cap = dev->config + pos; + uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL); + uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); + + dev->exp.hpev_notified = (sltctl & PCI_EXP_SLTCTL_HPIE) && + (sltsta & sltctl & PCI_EXP_HP_EV_SUPPORTED); +} + +static void hotplug_event_notify(PCIDevice *dev) +{ + bool prev = dev->exp.hpev_notified; + + hotplug_event_update_event_status(dev); + + if (prev == dev->exp.hpev_notified) { + return; + } + + /* Note: the logic above does not take into account whether interrupts + * are masked. The result is that interrupt will be sent when it is + * subsequently unmasked. This appears to be legal: Section 6.7.3.4: + * The Port may optionally send an MSI when there are hot-plug events that + * occur while interrupt generation is disabled, and interrupt generation is + * subsequently enabled. */ + if (msix_enabled(dev)) { + msix_notify(dev, pcie_cap_flags_get_vector(dev)); + } else if (msi_enabled(dev)) { + msi_notify(dev, pcie_cap_flags_get_vector(dev)); + } else { + pci_set_irq(dev, dev->exp.hpev_notified); + } +} + +static void hotplug_event_clear(PCIDevice *dev) +{ + hotplug_event_update_event_status(dev); + if (!msix_enabled(dev) && !msi_enabled(dev) && !dev->exp.hpev_notified) { + pci_irq_deassert(dev); + } +} + +static void pcie_set_power_device(PCIBus *bus, PCIDevice *dev, void *opaque) +{ + bool *power = opaque; + + pci_set_power(dev, *power); +} + +static void pcie_cap_update_power(PCIDevice *hotplug_dev) +{ + uint8_t *exp_cap = hotplug_dev->config + hotplug_dev->exp.exp_cap; + PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(hotplug_dev)); + uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP); + uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL); + bool power = true; + + if (sltcap & PCI_EXP_SLTCAP_PCP) { + power = (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_ON; + } + + pci_for_each_device(sec_bus, pci_bus_num(sec_bus), + pcie_set_power_device, &power); +} + +/* + * A PCI Express Hot-Plug Event has occurred, so update slot status register + * and notify OS of the event if necessary. + * + * 6.7.3 PCI Express Hot-Plug Events + * 6.7.3.4 Software Notification of Hot-Plug Events + */ +static void pcie_cap_slot_event(PCIDevice *dev, PCIExpressHotPlugEvent event) +{ + /* Minor optimization: if nothing changed - no event is needed. */ + if (pci_word_test_and_set_mask(dev->config + dev->exp.exp_cap + + PCI_EXP_SLTSTA, event) == event) { + return; + } + hotplug_event_notify(dev); +} + +static void pcie_cap_slot_plug_common(PCIDevice *hotplug_dev, DeviceState *dev, + Error **errp) +{ + uint8_t *exp_cap = hotplug_dev->config + hotplug_dev->exp.exp_cap; + uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); + + PCIE_DEV_PRINTF(PCI_DEVICE(dev), "hotplug state: 0x%x\n", sltsta); + if (sltsta & PCI_EXP_SLTSTA_EIS) { + /* the slot is electromechanically locked. + * This error is propagated up to qdev and then to HMP/QMP. + */ + error_setg_errno(errp, EBUSY, "slot is electromechanically locked"); + } +} + +void pcie_cap_slot_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + PCIDevice *hotplug_pdev = PCI_DEVICE(hotplug_dev); + uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap; + uint32_t sltcap = pci_get_word(exp_cap + PCI_EXP_SLTCAP); + + /* Check if hot-plug is disabled on the slot */ + if (dev->hotplugged && (sltcap & PCI_EXP_SLTCAP_HPC) == 0) { + error_setg(errp, "Hot-plug failed: unsupported by the port device '%s'", + DEVICE(hotplug_pdev)->id); + return; + } + + pcie_cap_slot_plug_common(PCI_DEVICE(hotplug_dev), dev, errp); +} + +void pcie_cap_slot_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + PCIDevice *hotplug_pdev = PCI_DEVICE(hotplug_dev); + uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap; + PCIDevice *pci_dev = PCI_DEVICE(dev); + uint32_t lnkcap = pci_get_long(exp_cap + PCI_EXP_LNKCAP); + + /* Don't send event when device is enabled during qemu machine creation: + * it is present on boot, no hotplug event is necessary. We do send an + * event when the device is disabled later. */ + if (!dev->hotplugged) { + pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_PDS); + if (pci_dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA || + (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) { + pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_DLLLA); + } + pcie_cap_update_power(hotplug_pdev); + return; + } + + /* To enable multifunction hot-plug, we just ensure the function + * 0 added last. When function 0 is added, we set the sltsta and + * inform OS via event notification. + */ + if (pci_get_function_0(pci_dev)) { + pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_PDS); + if (pci_dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA || + (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) { + pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_DLLLA); + } + pcie_cap_slot_event(hotplug_pdev, + PCI_EXP_HP_EV_PDC | PCI_EXP_HP_EV_ABP); + pcie_cap_update_power(hotplug_pdev); + } +} + +void pcie_cap_slot_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + qdev_unrealize(dev); +} + +static void pcie_unplug_device(PCIBus *bus, PCIDevice *dev, void *opaque) +{ + HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(dev)); + + if (dev->partially_hotplugged) { + dev->qdev.pending_deleted_event = false; + return; + } + hotplug_handler_unplug(hotplug_ctrl, DEVICE(dev), &error_abort); + object_unparent(OBJECT(dev)); +} + +static void pcie_cap_slot_do_unplug(PCIDevice *dev) +{ + PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + uint32_t lnkcap = pci_get_long(exp_cap + PCI_EXP_LNKCAP); + + pci_for_each_device_under_bus(sec_bus, pcie_unplug_device, NULL); + + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_PDS); + if (dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA || + (lnkcap & PCI_EXP_LNKCAP_DLLLARC)) { + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_DLLLA); + } + pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_PDC); +} + +void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + Error *local_err = NULL; + PCIDevice *pci_dev = PCI_DEVICE(dev); + PCIBus *bus = pci_get_bus(pci_dev); + PCIDevice *hotplug_pdev = PCI_DEVICE(hotplug_dev); + uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap; + uint32_t sltcap = pci_get_word(exp_cap + PCI_EXP_SLTCAP); + uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL); + + /* Check if hot-unplug is disabled on the slot */ + if ((sltcap & PCI_EXP_SLTCAP_HPC) == 0) { + error_setg(errp, "Hot-unplug failed: " + "unsupported by the port device '%s'", + DEVICE(hotplug_pdev)->id); + return; + } + + pcie_cap_slot_plug_common(hotplug_pdev, dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if ((sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_BLINK) { + error_setg(errp, "Hot-unplug failed: " + "guest is busy (power indicator blinking)"); + return; + } + + dev->pending_deleted_event = true; + dev->pending_deleted_expires_ms = + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 5000; /* 5 secs */ + + /* In case user cancel the operation of multi-function hot-add, + * remove the function that is unexposed to guest individually, + * without interaction with guest. + */ + if (pci_dev->devfn && + !bus->devices[0]) { + pcie_unplug_device(bus, pci_dev, NULL); + + return; + } + + if (((sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF) && + ((sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_OFF)) { + /* slot is powered off -> unplug without round-trip to the guest */ + pcie_cap_slot_do_unplug(hotplug_pdev); + hotplug_event_notify(hotplug_pdev); + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_ABP); + return; + } + + pcie_cap_slot_push_attention_button(hotplug_pdev); +} + +/* pci express slot for pci express root/downstream port + PCI express capability slot registers */ +void pcie_cap_slot_init(PCIDevice *dev, PCIESlot *s) +{ + uint32_t pos = dev->exp.exp_cap; + + pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_FLAGS, + PCI_EXP_FLAGS_SLOT); + + pci_long_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCAP, + ~PCI_EXP_SLTCAP_PSN); + pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP, + (s->slot << PCI_EXP_SLTCAP_PSN_SHIFT) | + PCI_EXP_SLTCAP_EIP | + PCI_EXP_SLTCAP_PIP | + PCI_EXP_SLTCAP_AIP | + PCI_EXP_SLTCAP_ABP); + + /* + * Enable native hot-plug on all hot-plugged bridges unless + * hot-plug is disabled on the slot. + */ + if (s->hotplug && + (s->native_hotplug || DEVICE(dev)->hotplugged)) { + pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP, + PCI_EXP_SLTCAP_HPS | + PCI_EXP_SLTCAP_HPC); + } + + if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) { + pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP, + PCI_EXP_SLTCAP_PCP); + pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PCC); + pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PCC); + } + + pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PIC | + PCI_EXP_SLTCTL_AIC); + pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PIC_OFF | + PCI_EXP_SLTCTL_AIC_OFF); + pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PIC | + PCI_EXP_SLTCTL_AIC | + PCI_EXP_SLTCTL_HPIE | + PCI_EXP_SLTCTL_CCIE | + PCI_EXP_SLTCTL_PDCE | + PCI_EXP_SLTCTL_ABPE); + /* Although reading PCI_EXP_SLTCTL_EIC returns always 0, + * make the bit writable here in order to detect 1b is written. + * pcie_cap_slot_write_config() test-and-clear the bit, so + * this bit always returns 0 to the guest. + */ + pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_EIC); + + pci_word_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_SLTSTA, + PCI_EXP_HP_EV_SUPPORTED); + + dev->exp.hpev_notified = false; + + qbus_set_hotplug_handler(BUS(pci_bridge_get_sec_bus(PCI_BRIDGE(dev))), + OBJECT(dev)); +} + +void pcie_cap_slot_reset(PCIDevice *dev) +{ + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + uint8_t port_type = pcie_cap_get_type(dev); + + assert(port_type == PCI_EXP_TYPE_DOWNSTREAM || + port_type == PCI_EXP_TYPE_ROOT_PORT); + + PCIE_DEV_PRINTF(dev, "reset\n"); + + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_EIC | + PCI_EXP_SLTCTL_PIC | + PCI_EXP_SLTCTL_AIC | + PCI_EXP_SLTCTL_HPIE | + PCI_EXP_SLTCTL_CCIE | + PCI_EXP_SLTCTL_PDCE | + PCI_EXP_SLTCTL_ABPE); + pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_AIC_OFF); + + if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) { + /* Downstream ports enforce device number 0. */ + bool populated = pci_bridge_get_sec_bus(PCI_BRIDGE(dev))->devices[0]; + uint16_t pic; + + if (populated) { + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PCC); + } else { + pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PCC); + } + + pic = populated ? PCI_EXP_SLTCTL_PIC_ON : PCI_EXP_SLTCTL_PIC_OFF; + pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, pic); + } + + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_EIS |/* on reset, + the lock is released */ + PCI_EXP_SLTSTA_CC | + PCI_EXP_SLTSTA_PDC | + PCI_EXP_SLTSTA_ABP); + + pcie_cap_update_power(dev); + hotplug_event_update_event_status(dev); +} + +void pcie_cap_slot_get(PCIDevice *dev, uint16_t *slt_ctl, uint16_t *slt_sta) +{ + uint32_t pos = dev->exp.exp_cap; + uint8_t *exp_cap = dev->config + pos; + *slt_ctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL); + *slt_sta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); +} + +void pcie_cap_slot_write_config(PCIDevice *dev, + uint16_t old_slt_ctl, uint16_t old_slt_sta, + uint32_t addr, uint32_t val, int len) +{ + uint32_t pos = dev->exp.exp_cap; + uint8_t *exp_cap = dev->config + pos; + uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); + + if (ranges_overlap(addr, len, pos + PCI_EXP_SLTSTA, 2)) { + /* + * Guests tend to clears all bits during init. + * If they clear bits that weren't set this is racy and will lose events: + * not a big problem for manual button presses, but a problem for us. + * As a work-around, detect this and revert status to what it was + * before the write. + * + * Note: in theory this can be detected as a duplicate button press + * which cancels the previous press. Does not seem to happen in + * practice as guests seem to only have this bug during init. + */ +#define PCIE_SLOT_EVENTS (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | \ + PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | \ + PCI_EXP_SLTSTA_CC) + + if (val & ~old_slt_sta & PCIE_SLOT_EVENTS) { + sltsta = (sltsta & ~PCIE_SLOT_EVENTS) | (old_slt_sta & PCIE_SLOT_EVENTS); + pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta); + } + hotplug_event_clear(dev); + } + + if (!ranges_overlap(addr, len, pos + PCI_EXP_SLTCTL, 2)) { + return; + } + + if (pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_EIC)) { + sltsta ^= PCI_EXP_SLTSTA_EIS; /* toggle PCI_EXP_SLTSTA_EIS bit */ + pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta); + PCIE_DEV_PRINTF(dev, "PCI_EXP_SLTCTL_EIC: " + "sltsta -> 0x%02"PRIx16"\n", + sltsta); + } + + /* + * If the slot is populated, power indicator is off and power + * controller is off, it is safe to detach the devices. + * + * Note: don't detach if condition was already true: + * this is a work around for guests that overwrite + * control of powered off slots before powering them on. + */ + if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) && + (val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF && + (!(old_slt_ctl & PCI_EXP_SLTCTL_PCC) || + (old_slt_ctl & PCI_EXP_SLTCTL_PIC_OFF) != PCI_EXP_SLTCTL_PIC_OFF)) { + pcie_cap_slot_do_unplug(dev); + } + pcie_cap_update_power(dev); + + hotplug_event_notify(dev); + + /* + * 6.7.3.2 Command Completed Events + * + * Software issues a command to a hot-plug capable Downstream Port by + * issuing a write transaction that targets any portion of the Port’s Slot + * Control register. A single write to the Slot Control register is + * considered to be a single command, even if the write affects more than + * one field in the Slot Control register. In response to this transaction, + * the Port must carry out the requested actions and then set the + * associated status field for the command completed event. */ + + /* Real hardware might take a while to complete requested command because + * physical movement would be involved like locking the electromechanical + * lock. However in our case, command is completed instantaneously above, + * so send a command completion event right now. + */ + pcie_cap_slot_event(dev, PCI_EXP_HP_EV_CCI); +} + +int pcie_cap_slot_post_load(void *opaque, int version_id) +{ + PCIDevice *dev = opaque; + hotplug_event_update_event_status(dev); + pcie_cap_update_power(dev); + return 0; +} + +void pcie_cap_slot_push_attention_button(PCIDevice *dev) +{ + pcie_cap_slot_event(dev, PCI_EXP_HP_EV_ABP); +} + +/* root control/capabilities/status. PME isn't emulated for now */ +void pcie_cap_root_init(PCIDevice *dev) +{ + pci_set_word(dev->wmask + dev->exp.exp_cap + PCI_EXP_RTCTL, + PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE | + PCI_EXP_RTCTL_SEFEE); +} + +void pcie_cap_root_reset(PCIDevice *dev) +{ + pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_RTCTL, 0); +} + +/* function level reset(FLR) */ +void pcie_cap_flr_init(PCIDevice *dev) +{ + pci_long_test_and_set_mask(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCAP, + PCI_EXP_DEVCAP_FLR); + + /* Although reading BCR_FLR returns always 0, + * the bit is made writable here in order to detect the 1b is written + * pcie_cap_flr_write_config() test-and-clear the bit, so + * this bit always returns 0 to the guest. + */ + pci_word_test_and_set_mask(dev->wmask + dev->exp.exp_cap + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); +} + +void pcie_cap_flr_write_config(PCIDevice *dev, + uint32_t addr, uint32_t val, int len) +{ + uint8_t *devctl = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL; + if (pci_get_word(devctl) & PCI_EXP_DEVCTL_BCR_FLR) { + /* Clear PCI_EXP_DEVCTL_BCR_FLR after invoking the reset handler + so the handler can detect FLR by looking at this bit. */ + pci_device_reset(dev); + pci_word_test_and_clear_mask(devctl, PCI_EXP_DEVCTL_BCR_FLR); + } +} + +/* Alternative Routing-ID Interpretation (ARI) + * forwarding support for root and downstream ports + */ +void pcie_cap_arifwd_init(PCIDevice *dev) +{ + uint32_t pos = dev->exp.exp_cap; + pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_DEVCAP2, + PCI_EXP_DEVCAP2_ARI); + pci_long_test_and_set_mask(dev->wmask + pos + PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ARI); +} + +void pcie_cap_arifwd_reset(PCIDevice *dev) +{ + uint8_t *devctl2 = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2; + pci_long_test_and_clear_mask(devctl2, PCI_EXP_DEVCTL2_ARI); +} + +bool pcie_cap_is_arifwd_enabled(const PCIDevice *dev) +{ + if (!pci_is_express(dev)) { + return false; + } + if (!dev->exp.exp_cap) { + return false; + } + + return pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2) & + PCI_EXP_DEVCTL2_ARI; +} + +/************************************************************************** + * pci express extended capability list management functions + * uint16_t ext_cap_id (16 bit) + * uint8_t cap_ver (4 bit) + * uint16_t cap_offset (12 bit) + * uint16_t ext_cap_size + */ + +/* Passing a cap_id value > 0xffff will return 0 and put end of list in prev */ +static uint16_t pcie_find_capability_list(PCIDevice *dev, uint32_t cap_id, + uint16_t *prev_p) +{ + uint16_t prev = 0; + uint16_t next; + uint32_t header = pci_get_long(dev->config + PCI_CONFIG_SPACE_SIZE); + + if (!header) { + /* no extended capability */ + next = 0; + goto out; + } + for (next = PCI_CONFIG_SPACE_SIZE; next; + prev = next, next = PCI_EXT_CAP_NEXT(header)) { + + assert(next >= PCI_CONFIG_SPACE_SIZE); + assert(next <= PCIE_CONFIG_SPACE_SIZE - 8); + + header = pci_get_long(dev->config + next); + if (PCI_EXT_CAP_ID(header) == cap_id) { + break; + } + } + +out: + if (prev_p) { + *prev_p = prev; + } + return next; +} + +uint16_t pcie_find_capability(PCIDevice *dev, uint16_t cap_id) +{ + return pcie_find_capability_list(dev, cap_id, NULL); +} + +static void pcie_ext_cap_set_next(PCIDevice *dev, uint16_t pos, uint16_t next) +{ + uint32_t header = pci_get_long(dev->config + pos); + assert(!(next & (PCI_EXT_CAP_ALIGN - 1))); + header = (header & ~PCI_EXT_CAP_NEXT_MASK) | + ((next << PCI_EXT_CAP_NEXT_SHIFT) & PCI_EXT_CAP_NEXT_MASK); + pci_set_long(dev->config + pos, header); +} + +/* + * Caller must supply valid (offset, size) such that the range wouldn't + * overlap with other capability or other registers. + * This function doesn't check it. + */ +void pcie_add_capability(PCIDevice *dev, + uint16_t cap_id, uint8_t cap_ver, + uint16_t offset, uint16_t size) +{ + assert(offset >= PCI_CONFIG_SPACE_SIZE); + assert(offset < (uint16_t)(offset + size)); + assert((uint16_t)(offset + size) <= PCIE_CONFIG_SPACE_SIZE); + assert(size >= 8); + assert(pci_is_express(dev)); + + if (offset != PCI_CONFIG_SPACE_SIZE) { + uint16_t prev; + + /* + * 0xffffffff is not a valid cap id (it's a 16 bit field). use + * internally to find the last capability in the linked list. + */ + pcie_find_capability_list(dev, 0xffffffff, &prev); + assert(prev >= PCI_CONFIG_SPACE_SIZE); + pcie_ext_cap_set_next(dev, prev, offset); + } + pci_set_long(dev->config + offset, PCI_EXT_CAP(cap_id, cap_ver, 0)); + + /* Make capability read-only by default */ + memset(dev->wmask + offset, 0, size); + memset(dev->w1cmask + offset, 0, size); + /* Check capability by default */ + memset(dev->cmask + offset, 0xFF, size); +} + +/* + * Sync the PCIe Link Status negotiated speed and width of a bridge with the + * downstream device. If downstream device is not present, re-write with the + * Link Capability fields. If downstream device reports invalid width or + * speed, replace with minimum values (LnkSta fields are RsvdZ on VFs but such + * values interfere with PCIe native hotplug detecting new devices). Limit + * width and speed to bridge capabilities for compatibility. Use config_read + * to access the downstream device since it could be an assigned device with + * volatile link information. + */ +void pcie_sync_bridge_lnk(PCIDevice *bridge_dev) +{ + PCIBridge *br = PCI_BRIDGE(bridge_dev); + PCIBus *bus = pci_bridge_get_sec_bus(br); + PCIDevice *target = bus->devices[0]; + uint8_t *exp_cap = bridge_dev->config + bridge_dev->exp.exp_cap; + uint16_t lnksta, lnkcap = pci_get_word(exp_cap + PCI_EXP_LNKCAP); + + if (!target || !target->exp.exp_cap) { + lnksta = lnkcap; + } else { + lnksta = target->config_read(target, + target->exp.exp_cap + PCI_EXP_LNKSTA, + sizeof(lnksta)); + + if ((lnksta & PCI_EXP_LNKSTA_NLW) > (lnkcap & PCI_EXP_LNKCAP_MLW)) { + lnksta &= ~PCI_EXP_LNKSTA_NLW; + lnksta |= lnkcap & PCI_EXP_LNKCAP_MLW; + } else if (!(lnksta & PCI_EXP_LNKSTA_NLW)) { + lnksta |= QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1); + } + + if ((lnksta & PCI_EXP_LNKSTA_CLS) > (lnkcap & PCI_EXP_LNKCAP_SLS)) { + lnksta &= ~PCI_EXP_LNKSTA_CLS; + lnksta |= lnkcap & PCI_EXP_LNKCAP_SLS; + } else if (!(lnksta & PCI_EXP_LNKSTA_CLS)) { + lnksta |= QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT); + } + } + + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW); + pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, lnksta & + (PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW)); +} + +/************************************************************************** + * pci express extended capability helper functions + */ + +/* ARI */ +void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn) +{ + pcie_add_capability(dev, PCI_EXT_CAP_ID_ARI, PCI_ARI_VER, + offset, PCI_ARI_SIZEOF); + pci_set_long(dev->config + offset + PCI_ARI_CAP, (nextfn & 0xff) << 8); +} + +void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num) +{ + static const int pci_dsn_ver = 1; + static const int pci_dsn_cap = 4; + + pcie_add_capability(dev, PCI_EXT_CAP_ID_DSN, pci_dsn_ver, offset, + PCI_EXT_CAP_DSN_SIZEOF); + pci_set_quad(dev->config + offset + pci_dsn_cap, ser_num); +} + +void pcie_ats_init(PCIDevice *dev, uint16_t offset, bool aligned) +{ + pcie_add_capability(dev, PCI_EXT_CAP_ID_ATS, 0x1, + offset, PCI_EXT_CAP_ATS_SIZEOF); + + dev->exp.ats_cap = offset; + + /* Invalidate Queue Depth 0 */ + if (aligned) { + pci_set_word(dev->config + offset + PCI_ATS_CAP, + PCI_ATS_CAP_PAGE_ALIGNED); + } + /* STU 0, Disabled by default */ + pci_set_word(dev->config + offset + PCI_ATS_CTRL, 0); + + pci_set_word(dev->wmask + dev->exp.ats_cap + PCI_ATS_CTRL, 0x800f); +} + +/* ACS (Access Control Services) */ +void pcie_acs_init(PCIDevice *dev, uint16_t offset) +{ + bool is_downstream = pci_is_express_downstream_port(dev); + uint16_t cap_bits = 0; + + /* For endpoints, only multifunction devs may have an ACS capability: */ + assert(is_downstream || + (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) || + PCI_FUNC(dev->devfn)); + + pcie_add_capability(dev, PCI_EXT_CAP_ID_ACS, PCI_ACS_VER, offset, + PCI_ACS_SIZEOF); + dev->exp.acs_cap = offset; + + if (is_downstream) { + /* + * Downstream ports must implement SV, TB, RR, CR, UF, and DT (with + * caveats on the latter four that we ignore for simplicity). + * Endpoints may also implement a subset of ACS capabilities, + * but these are optional if the endpoint does not support + * peer-to-peer between functions and thus omitted here. + */ + cap_bits = PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | + PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT; + } + + pci_set_word(dev->config + offset + PCI_ACS_CAP, cap_bits); + pci_set_word(dev->wmask + offset + PCI_ACS_CTRL, cap_bits); +} + +void pcie_acs_reset(PCIDevice *dev) +{ + if (dev->exp.acs_cap) { + pci_set_word(dev->config + dev->exp.acs_cap + PCI_ACS_CTRL, 0); + } +} diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c new file mode 100644 index 000000000..27f9cc56a --- /dev/null +++ b/hw/pci/pcie_aer.c @@ -0,0 +1,1045 @@ +/* + * pcie_aer.c + * + * Copyright (c) 2010 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "qapi/qmp/qdict.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pcie.h" +#include "hw/pci/msix.h" +#include "hw/pci/msi.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pcie_regs.h" +#include "qapi/error.h" + +//#define DEBUG_PCIE +#ifdef DEBUG_PCIE +# define PCIE_DPRINTF(fmt, ...) \ + fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__) +#else +# define PCIE_DPRINTF(fmt, ...) do {} while (0) +#endif +#define PCIE_DEV_PRINTF(dev, fmt, ...) \ + PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__) + +#define PCI_ERR_SRC_COR_OFFS 0 +#define PCI_ERR_SRC_UNCOR_OFFS 2 + +typedef struct PCIEErrorDetails { + const char *id; + const char *root_bus; + int bus; + int devfn; +} PCIEErrorDetails; + +/* From 6.2.7 Error Listing and Rules. Table 6-2, 6-3 and 6-4 */ +static uint32_t pcie_aer_uncor_default_severity(uint32_t status) +{ + switch (status) { + case PCI_ERR_UNC_INTN: + case PCI_ERR_UNC_DLP: + case PCI_ERR_UNC_SDN: + case PCI_ERR_UNC_RX_OVER: + case PCI_ERR_UNC_FCP: + case PCI_ERR_UNC_MALF_TLP: + return PCI_ERR_ROOT_CMD_FATAL_EN; + case PCI_ERR_UNC_POISON_TLP: + case PCI_ERR_UNC_ECRC: + case PCI_ERR_UNC_UNSUP: + case PCI_ERR_UNC_COMP_TIME: + case PCI_ERR_UNC_COMP_ABORT: + case PCI_ERR_UNC_UNX_COMP: + case PCI_ERR_UNC_ACSV: + case PCI_ERR_UNC_MCBTLP: + case PCI_ERR_UNC_ATOP_EBLOCKED: + case PCI_ERR_UNC_TLP_PRF_BLOCKED: + return PCI_ERR_ROOT_CMD_NONFATAL_EN; + default: + abort(); + break; + } + return PCI_ERR_ROOT_CMD_FATAL_EN; +} + +static int aer_log_add_err(PCIEAERLog *aer_log, const PCIEAERErr *err) +{ + if (aer_log->log_num == aer_log->log_max) { + return -1; + } + memcpy(&aer_log->log[aer_log->log_num], err, sizeof *err); + aer_log->log_num++; + return 0; +} + +static void aer_log_del_err(PCIEAERLog *aer_log, PCIEAERErr *err) +{ + assert(aer_log->log_num); + *err = aer_log->log[0]; + aer_log->log_num--; + memmove(&aer_log->log[0], &aer_log->log[1], + aer_log->log_num * sizeof *err); +} + +static void aer_log_clear_all_err(PCIEAERLog *aer_log) +{ + aer_log->log_num = 0; +} + +int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset, + uint16_t size, Error **errp) +{ + pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, cap_ver, + offset, size); + dev->exp.aer_cap = offset; + + /* clip down the value to avoid unreasonable memory usage */ + if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) { + error_setg(errp, "Invalid aer_log_max %d. The max number of aer log " + "is %d", dev->exp.aer_log.log_max, PCIE_AER_LOG_MAX_LIMIT); + return -EINVAL; + } + dev->exp.aer_log.log = g_malloc0(sizeof dev->exp.aer_log.log[0] * + dev->exp.aer_log.log_max); + + pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, + PCI_ERR_UNC_SUPPORTED); + + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, + PCI_ERR_UNC_SEVERITY_DEFAULT); + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER, + PCI_ERR_UNC_SUPPORTED); + + pci_long_test_and_set_mask(dev->w1cmask + offset + PCI_ERR_COR_STATUS, + PCI_ERR_COR_SUPPORTED); + + pci_set_long(dev->config + offset + PCI_ERR_COR_MASK, + PCI_ERR_COR_MASK_DEFAULT); + pci_set_long(dev->wmask + offset + PCI_ERR_COR_MASK, + PCI_ERR_COR_SUPPORTED); + + /* capabilities and control. multiple header logging is supported */ + if (dev->exp.aer_log.log_max > 0) { + pci_set_long(dev->config + offset + PCI_ERR_CAP, + PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC | + PCI_ERR_CAP_MHRC); + pci_set_long(dev->wmask + offset + PCI_ERR_CAP, + PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE | + PCI_ERR_CAP_MHRE); + } else { + pci_set_long(dev->config + offset + PCI_ERR_CAP, + PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC); + pci_set_long(dev->wmask + offset + PCI_ERR_CAP, + PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); + } + + switch (pcie_cap_get_type(dev)) { + case PCI_EXP_TYPE_ROOT_PORT: + /* this case will be set by pcie_aer_root_init() */ + /* fallthrough */ + case PCI_EXP_TYPE_DOWNSTREAM: + case PCI_EXP_TYPE_UPSTREAM: + pci_word_test_and_set_mask(dev->wmask + PCI_BRIDGE_CONTROL, + PCI_BRIDGE_CTL_SERR); + pci_long_test_and_set_mask(dev->w1cmask + PCI_STATUS, + PCI_SEC_STATUS_RCV_SYSTEM_ERROR); + break; + default: + /* nothing */ + break; + } + return 0; +} + +void pcie_aer_exit(PCIDevice *dev) +{ + g_free(dev->exp.aer_log.log); +} + +static void pcie_aer_update_uncor_status(PCIDevice *dev) +{ + uint8_t *aer_cap = dev->config + dev->exp.aer_cap; + PCIEAERLog *aer_log = &dev->exp.aer_log; + + uint16_t i; + for (i = 0; i < aer_log->log_num; i++) { + pci_long_test_and_set_mask(aer_cap + PCI_ERR_UNCOR_STATUS, + dev->exp.aer_log.log[i].status); + } +} + +/* + * return value: + * true: error message needs to be sent up + * false: error message is masked + * + * 6.2.6 Error Message Control + * Figure 6-3 + * all pci express devices part + */ +static bool +pcie_aer_msg_alldev(PCIDevice *dev, const PCIEAERMsg *msg) +{ + if (!(pcie_aer_msg_is_uncor(msg) && + (pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_SERR))) { + return false; + } + + /* Signaled System Error + * + * 7.5.1.1 Command register + * Bit 8 SERR# Enable + * + * When Set, this bit enables reporting of Non-fatal and Fatal + * errors detected by the Function to the Root Complex. Note that + * errors are reported if enabled either through this bit or through + * the PCI Express specific bits in the Device Control register (see + * Section 7.8.4). + */ + pci_word_test_and_set_mask(dev->config + PCI_STATUS, + PCI_STATUS_SIG_SYSTEM_ERROR); + + if (!(msg->severity & + pci_get_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL))) { + return false; + } + + /* send up error message */ + return true; +} + +/* + * return value: + * true: error message is sent up + * false: error message is masked + * + * 6.2.6 Error Message Control + * Figure 6-3 + * virtual pci bridge part + */ +static bool pcie_aer_msg_vbridge(PCIDevice *dev, const PCIEAERMsg *msg) +{ + uint16_t bridge_control = pci_get_word(dev->config + PCI_BRIDGE_CONTROL); + + if (pcie_aer_msg_is_uncor(msg)) { + /* Received System Error */ + pci_word_test_and_set_mask(dev->config + PCI_SEC_STATUS, + PCI_SEC_STATUS_RCV_SYSTEM_ERROR); + } + + if (!(bridge_control & PCI_BRIDGE_CTL_SERR)) { + return false; + } + return true; +} + +void pcie_aer_root_set_vector(PCIDevice *dev, unsigned int vector) +{ + uint8_t *aer_cap = dev->config + dev->exp.aer_cap; + assert(vector < PCI_ERR_ROOT_IRQ_MAX); + pci_long_test_and_clear_mask(aer_cap + PCI_ERR_ROOT_STATUS, + PCI_ERR_ROOT_IRQ); + pci_long_test_and_set_mask(aer_cap + PCI_ERR_ROOT_STATUS, + vector << PCI_ERR_ROOT_IRQ_SHIFT); +} + +static unsigned int pcie_aer_root_get_vector(PCIDevice *dev) +{ + uint8_t *aer_cap = dev->config + dev->exp.aer_cap; + uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); + return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT; +} + +/* Given a status register, get corresponding bits in the command register */ +static uint32_t pcie_aer_status_to_cmd(uint32_t status) +{ + uint32_t cmd = 0; + if (status & PCI_ERR_ROOT_COR_RCV) { + cmd |= PCI_ERR_ROOT_CMD_COR_EN; + } + if (status & PCI_ERR_ROOT_NONFATAL_RCV) { + cmd |= PCI_ERR_ROOT_CMD_NONFATAL_EN; + } + if (status & PCI_ERR_ROOT_FATAL_RCV) { + cmd |= PCI_ERR_ROOT_CMD_FATAL_EN; + } + return cmd; +} + +static void pcie_aer_root_notify(PCIDevice *dev) +{ + if (msix_enabled(dev)) { + msix_notify(dev, pcie_aer_root_get_vector(dev)); + } else if (msi_enabled(dev)) { + msi_notify(dev, pcie_aer_root_get_vector(dev)); + } else { + pci_irq_assert(dev); + } +} + +/* + * 6.2.6 Error Message Control + * Figure 6-3 + * root port part + */ +static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg) +{ + uint16_t cmd; + uint8_t *aer_cap; + uint32_t root_cmd; + uint32_t root_status, prev_status; + + cmd = pci_get_word(dev->config + PCI_COMMAND); + aer_cap = dev->config + dev->exp.aer_cap; + root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND); + prev_status = root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); + + if (cmd & PCI_COMMAND_SERR) { + /* System Error. + * + * The way to report System Error is platform specific and + * it isn't implemented in qemu right now. + * So just discard the error for now. + * OS which cares of aer would receive errors via + * native aer mechanims, so this wouldn't matter. + */ + } + + /* Errro Message Received: Root Error Status register */ + switch (msg->severity) { + case PCI_ERR_ROOT_CMD_COR_EN: + if (root_status & PCI_ERR_ROOT_COR_RCV) { + root_status |= PCI_ERR_ROOT_MULTI_COR_RCV; + } else { + pci_set_word(aer_cap + PCI_ERR_ROOT_ERR_SRC + PCI_ERR_SRC_COR_OFFS, + msg->source_id); + } + root_status |= PCI_ERR_ROOT_COR_RCV; + break; + case PCI_ERR_ROOT_CMD_NONFATAL_EN: + root_status |= PCI_ERR_ROOT_NONFATAL_RCV; + break; + case PCI_ERR_ROOT_CMD_FATAL_EN: + if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) { + root_status |= PCI_ERR_ROOT_FIRST_FATAL; + } + root_status |= PCI_ERR_ROOT_FATAL_RCV; + break; + default: + abort(); + break; + } + if (pcie_aer_msg_is_uncor(msg)) { + if (root_status & PCI_ERR_ROOT_UNCOR_RCV) { + root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV; + } else { + pci_set_word(aer_cap + PCI_ERR_ROOT_ERR_SRC + + PCI_ERR_SRC_UNCOR_OFFS, msg->source_id); + } + root_status |= PCI_ERR_ROOT_UNCOR_RCV; + } + pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status); + + /* 6.2.4.1.2 Interrupt Generation */ + /* All the above did was set some bits in the status register. + * Specifically these that match message severity. + * The below code relies on this fact. */ + if (!(root_cmd & msg->severity) || + (pcie_aer_status_to_cmd(prev_status) & root_cmd)) { + /* Condition is not being set or was already true so nothing to do. */ + return; + } + + pcie_aer_root_notify(dev); +} + +/* + * 6.2.6 Error Message Control Figure 6-3 + * + * Walk up the bus tree from the device, propagate the error message. + */ +static void pcie_aer_msg(PCIDevice *dev, const PCIEAERMsg *msg) +{ + uint8_t type; + + while (dev) { + if (!pci_is_express(dev)) { + /* just ignore it */ + /* TODO: Shouldn't we set PCI_STATUS_SIG_SYSTEM_ERROR? + * Consider e.g. a PCI bridge above a PCI Express device. */ + return; + } + + type = pcie_cap_get_type(dev); + if ((type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_UPSTREAM || + type == PCI_EXP_TYPE_DOWNSTREAM) && + !pcie_aer_msg_vbridge(dev, msg)) { + return; + } + if (!pcie_aer_msg_alldev(dev, msg)) { + return; + } + if (type == PCI_EXP_TYPE_ROOT_PORT) { + pcie_aer_msg_root_port(dev, msg); + /* Root port can notify system itself, + or send the error message to root complex event collector. */ + /* + * if root port is associated with an event collector, + * return the root complex event collector here. + * For now root complex event collector isn't supported. + */ + return; + } + dev = pci_bridge_get_device(pci_get_bus(dev)); + } +} + +static void pcie_aer_update_log(PCIDevice *dev, const PCIEAERErr *err) +{ + uint8_t *aer_cap = dev->config + dev->exp.aer_cap; + uint8_t first_bit = ctz32(err->status); + uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); + int i; + + assert(err->status); + assert(!(err->status & (err->status - 1))); + + errcap &= ~(PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP); + errcap |= PCI_ERR_CAP_FEP(first_bit); + + if (err->flags & PCIE_AER_ERR_HEADER_VALID) { + for (i = 0; i < ARRAY_SIZE(err->header); ++i) { + /* 7.10.8 Header Log Register */ + uint8_t *header_log = + aer_cap + PCI_ERR_HEADER_LOG + i * sizeof err->header[0]; + stl_be_p(header_log, err->header[i]); + } + } else { + assert(!(err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT)); + memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE); + } + + if ((err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT) && + (pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCAP2) & + PCI_EXP_DEVCAP2_EETLPP)) { + for (i = 0; i < ARRAY_SIZE(err->prefix); ++i) { + /* 7.10.12 tlp prefix log register */ + uint8_t *prefix_log = + aer_cap + PCI_ERR_TLP_PREFIX_LOG + i * sizeof err->prefix[0]; + stl_be_p(prefix_log, err->prefix[i]); + } + errcap |= PCI_ERR_CAP_TLP; + } else { + memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0, + PCI_ERR_TLP_PREFIX_LOG_SIZE); + } + pci_set_long(aer_cap + PCI_ERR_CAP, errcap); +} + +static void pcie_aer_clear_log(PCIDevice *dev) +{ + uint8_t *aer_cap = dev->config + dev->exp.aer_cap; + + pci_long_test_and_clear_mask(aer_cap + PCI_ERR_CAP, + PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP); + + memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE); + memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0, PCI_ERR_TLP_PREFIX_LOG_SIZE); +} + +static void pcie_aer_clear_error(PCIDevice *dev) +{ + uint8_t *aer_cap = dev->config + dev->exp.aer_cap; + uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); + PCIEAERLog *aer_log = &dev->exp.aer_log; + PCIEAERErr err; + + if (!(errcap & PCI_ERR_CAP_MHRE) || !aer_log->log_num) { + pcie_aer_clear_log(dev); + return; + } + + /* + * If more errors are queued, set corresponding bits in uncorrectable + * error status. + * We emulate uncorrectable error status register as W1CS. + * So set bit in uncorrectable error status here again for multiple + * error recording support. + * + * 6.2.4.2 Multiple Error Handling(Advanced Error Reporting Capability) + */ + pcie_aer_update_uncor_status(dev); + + aer_log_del_err(aer_log, &err); + pcie_aer_update_log(dev, &err); +} + +static int pcie_aer_record_error(PCIDevice *dev, + const PCIEAERErr *err) +{ + uint8_t *aer_cap = dev->config + dev->exp.aer_cap; + uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); + int fep = PCI_ERR_CAP_FEP(errcap); + + assert(err->status); + assert(!(err->status & (err->status - 1))); + + if (errcap & PCI_ERR_CAP_MHRE && + (pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS) & (1U << fep))) { + /* Not first error. queue error */ + if (aer_log_add_err(&dev->exp.aer_log, err) < 0) { + /* overflow */ + return -1; + } + return 0; + } + + pcie_aer_update_log(dev, err); + return 0; +} + +typedef struct PCIEAERInject { + PCIDevice *dev; + uint8_t *aer_cap; + const PCIEAERErr *err; + uint16_t devctl; + uint16_t devsta; + uint32_t error_status; + bool unsupported_request; + bool log_overflow; + PCIEAERMsg msg; +} PCIEAERInject; + +static bool pcie_aer_inject_cor_error(PCIEAERInject *inj, + uint32_t uncor_status, + bool is_advisory_nonfatal) +{ + PCIDevice *dev = inj->dev; + + inj->devsta |= PCI_EXP_DEVSTA_CED; + if (inj->unsupported_request) { + inj->devsta |= PCI_EXP_DEVSTA_URD; + } + pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta); + + if (inj->aer_cap) { + uint32_t mask; + pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_COR_STATUS, + inj->error_status); + mask = pci_get_long(inj->aer_cap + PCI_ERR_COR_MASK); + if (mask & inj->error_status) { + return false; + } + if (is_advisory_nonfatal) { + uint32_t uncor_mask = + pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK); + if (!(uncor_mask & uncor_status)) { + inj->log_overflow = !!pcie_aer_record_error(dev, inj->err); + } + pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS, + uncor_status); + } + } + + if (inj->unsupported_request && !(inj->devctl & PCI_EXP_DEVCTL_URRE)) { + return false; + } + if (!(inj->devctl & PCI_EXP_DEVCTL_CERE)) { + return false; + } + + inj->msg.severity = PCI_ERR_ROOT_CMD_COR_EN; + return true; +} + +static bool pcie_aer_inject_uncor_error(PCIEAERInject *inj, bool is_fatal) +{ + PCIDevice *dev = inj->dev; + uint16_t cmd; + + if (is_fatal) { + inj->devsta |= PCI_EXP_DEVSTA_FED; + } else { + inj->devsta |= PCI_EXP_DEVSTA_NFED; + } + if (inj->unsupported_request) { + inj->devsta |= PCI_EXP_DEVSTA_URD; + } + pci_set_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta); + + if (inj->aer_cap) { + uint32_t mask = pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK); + if (mask & inj->error_status) { + pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS, + inj->error_status); + return false; + } + + inj->log_overflow = !!pcie_aer_record_error(dev, inj->err); + pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS, + inj->error_status); + } + + cmd = pci_get_word(dev->config + PCI_COMMAND); + if (inj->unsupported_request && + !(inj->devctl & PCI_EXP_DEVCTL_URRE) && !(cmd & PCI_COMMAND_SERR)) { + return false; + } + if (is_fatal) { + if (!((cmd & PCI_COMMAND_SERR) || + (inj->devctl & PCI_EXP_DEVCTL_FERE))) { + return false; + } + inj->msg.severity = PCI_ERR_ROOT_CMD_FATAL_EN; + } else { + if (!((cmd & PCI_COMMAND_SERR) || + (inj->devctl & PCI_EXP_DEVCTL_NFERE))) { + return false; + } + inj->msg.severity = PCI_ERR_ROOT_CMD_NONFATAL_EN; + } + return true; +} + +/* + * non-Function specific error must be recorded in all functions. + * It is the responsibility of the caller of this function. + * It is also caller's responsibility to determine which function should + * report the error. + * + * 6.2.4 Error Logging + * 6.2.5 Sequence of Device Error Signaling and Logging Operations + * Figure 6-2: Flowchart Showing Sequence of Device Error Signaling and Logging + * Operations + */ +static int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err) +{ + uint8_t *aer_cap = NULL; + uint16_t devctl = 0; + uint16_t devsta = 0; + uint32_t error_status = err->status; + PCIEAERInject inj; + + if (!pci_is_express(dev)) { + return -ENOSYS; + } + + if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) { + error_status &= PCI_ERR_COR_SUPPORTED; + } else { + error_status &= PCI_ERR_UNC_SUPPORTED; + } + + /* invalid status bit. one and only one bit must be set */ + if (!error_status || (error_status & (error_status - 1))) { + return -EINVAL; + } + + if (dev->exp.aer_cap) { + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + aer_cap = dev->config + dev->exp.aer_cap; + devctl = pci_get_long(exp_cap + PCI_EXP_DEVCTL); + devsta = pci_get_long(exp_cap + PCI_EXP_DEVSTA); + } + + inj.dev = dev; + inj.aer_cap = aer_cap; + inj.err = err; + inj.devctl = devctl; + inj.devsta = devsta; + inj.error_status = error_status; + inj.unsupported_request = !(err->flags & PCIE_AER_ERR_IS_CORRECTABLE) && + err->status == PCI_ERR_UNC_UNSUP; + inj.log_overflow = false; + + if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) { + if (!pcie_aer_inject_cor_error(&inj, 0, false)) { + return 0; + } + } else { + bool is_fatal = + pcie_aer_uncor_default_severity(error_status) == + PCI_ERR_ROOT_CMD_FATAL_EN; + if (aer_cap) { + is_fatal = + error_status & pci_get_long(aer_cap + PCI_ERR_UNCOR_SEVER); + } + if (!is_fatal && (err->flags & PCIE_AER_ERR_MAYBE_ADVISORY)) { + inj.error_status = PCI_ERR_COR_ADV_NONFATAL; + if (!pcie_aer_inject_cor_error(&inj, error_status, true)) { + return 0; + } + } else { + if (!pcie_aer_inject_uncor_error(&inj, is_fatal)) { + return 0; + } + } + } + + /* send up error message */ + inj.msg.source_id = err->source_id; + pcie_aer_msg(dev, &inj.msg); + + if (inj.log_overflow) { + PCIEAERErr header_log_overflow = { + .status = PCI_ERR_COR_HL_OVERFLOW, + .flags = PCIE_AER_ERR_IS_CORRECTABLE, + }; + int ret = pcie_aer_inject_error(dev, &header_log_overflow); + assert(!ret); + } + return 0; +} + +void pcie_aer_write_config(PCIDevice *dev, + uint32_t addr, uint32_t val, int len) +{ + uint8_t *aer_cap = dev->config + dev->exp.aer_cap; + uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); + uint32_t first_error = 1U << PCI_ERR_CAP_FEP(errcap); + uint32_t uncorsta = pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS); + + /* uncorrectable error */ + if (!(uncorsta & first_error)) { + /* the bit that corresponds to the first error is cleared */ + pcie_aer_clear_error(dev); + } else if (errcap & PCI_ERR_CAP_MHRE) { + /* When PCI_ERR_CAP_MHRE is enabled and the first error isn't cleared + * nothing should happen. So we have to revert the modification to + * the register. + */ + pcie_aer_update_uncor_status(dev); + } else { + /* capability & control + * PCI_ERR_CAP_MHRE might be cleared, so clear of header log. + */ + aer_log_clear_all_err(&dev->exp.aer_log); + } +} + +void pcie_aer_root_init(PCIDevice *dev) +{ + uint16_t pos = dev->exp.aer_cap; + + pci_set_long(dev->wmask + pos + PCI_ERR_ROOT_COMMAND, + PCI_ERR_ROOT_CMD_EN_MASK); + pci_set_long(dev->w1cmask + pos + PCI_ERR_ROOT_STATUS, + PCI_ERR_ROOT_STATUS_REPORT_MASK); + /* PCI_ERR_ROOT_IRQ is RO but devices change it using a + * device-specific method. + */ + pci_set_long(dev->cmask + pos + PCI_ERR_ROOT_STATUS, + ~PCI_ERR_ROOT_IRQ); +} + +void pcie_aer_root_reset(PCIDevice *dev) +{ + uint8_t* aer_cap = dev->config + dev->exp.aer_cap; + + pci_set_long(aer_cap + PCI_ERR_ROOT_COMMAND, 0); + + /* + * Advanced Error Interrupt Message Number in Root Error Status Register + * must be updated by chip dependent code because it's chip dependent + * which number is used. + */ +} + +void pcie_aer_root_write_config(PCIDevice *dev, + uint32_t addr, uint32_t val, int len, + uint32_t root_cmd_prev) +{ + uint8_t *aer_cap = dev->config + dev->exp.aer_cap; + uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); + uint32_t enabled_cmd = pcie_aer_status_to_cmd(root_status); + uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND); + /* 6.2.4.1.2 Interrupt Generation */ + if (!msix_enabled(dev) && !msi_enabled(dev)) { + pci_set_irq(dev, !!(root_cmd & enabled_cmd)); + return; + } + + if ((root_cmd_prev & enabled_cmd) || !(root_cmd & enabled_cmd)) { + /* Send MSI on transition from false to true. */ + return; + } + + pcie_aer_root_notify(dev); +} + +static const VMStateDescription vmstate_pcie_aer_err = { + .name = "PCIE_AER_ERROR", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(status, PCIEAERErr), + VMSTATE_UINT16(source_id, PCIEAERErr), + VMSTATE_UINT16(flags, PCIEAERErr), + VMSTATE_UINT32_ARRAY(header, PCIEAERErr, 4), + VMSTATE_UINT32_ARRAY(prefix, PCIEAERErr, 4), + VMSTATE_END_OF_LIST() + } +}; + +static bool pcie_aer_state_log_num_valid(void *opaque, int version_id) +{ + PCIEAERLog *s = opaque; + + return s->log_num <= s->log_max; +} + +const VMStateDescription vmstate_pcie_aer_log = { + .name = "PCIE_AER_ERROR_LOG", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(log_num, PCIEAERLog), + VMSTATE_UINT16_EQUAL(log_max, PCIEAERLog, NULL), + VMSTATE_VALIDATE("log_num <= log_max", pcie_aer_state_log_num_valid), + VMSTATE_STRUCT_VARRAY_POINTER_UINT16(log, PCIEAERLog, log_num, + vmstate_pcie_aer_err, PCIEAERErr), + VMSTATE_END_OF_LIST() + } +}; + +typedef struct PCIEAERErrorName { + const char *name; + uint32_t val; + bool correctable; +} PCIEAERErrorName; + +/* + * AER error name -> value conversion table + * This naming scheme is same to linux aer-injection tool. + */ +static const struct PCIEAERErrorName pcie_aer_error_list[] = { + { + .name = "DLP", + .val = PCI_ERR_UNC_DLP, + .correctable = false, + }, { + .name = "SDN", + .val = PCI_ERR_UNC_SDN, + .correctable = false, + }, { + .name = "POISON_TLP", + .val = PCI_ERR_UNC_POISON_TLP, + .correctable = false, + }, { + .name = "FCP", + .val = PCI_ERR_UNC_FCP, + .correctable = false, + }, { + .name = "COMP_TIME", + .val = PCI_ERR_UNC_COMP_TIME, + .correctable = false, + }, { + .name = "COMP_ABORT", + .val = PCI_ERR_UNC_COMP_ABORT, + .correctable = false, + }, { + .name = "UNX_COMP", + .val = PCI_ERR_UNC_UNX_COMP, + .correctable = false, + }, { + .name = "RX_OVER", + .val = PCI_ERR_UNC_RX_OVER, + .correctable = false, + }, { + .name = "MALF_TLP", + .val = PCI_ERR_UNC_MALF_TLP, + .correctable = false, + }, { + .name = "ECRC", + .val = PCI_ERR_UNC_ECRC, + .correctable = false, + }, { + .name = "UNSUP", + .val = PCI_ERR_UNC_UNSUP, + .correctable = false, + }, { + .name = "ACSV", + .val = PCI_ERR_UNC_ACSV, + .correctable = false, + }, { + .name = "INTN", + .val = PCI_ERR_UNC_INTN, + .correctable = false, + }, { + .name = "MCBTLP", + .val = PCI_ERR_UNC_MCBTLP, + .correctable = false, + }, { + .name = "ATOP_EBLOCKED", + .val = PCI_ERR_UNC_ATOP_EBLOCKED, + .correctable = false, + }, { + .name = "TLP_PRF_BLOCKED", + .val = PCI_ERR_UNC_TLP_PRF_BLOCKED, + .correctable = false, + }, { + .name = "RCVR", + .val = PCI_ERR_COR_RCVR, + .correctable = true, + }, { + .name = "BAD_TLP", + .val = PCI_ERR_COR_BAD_TLP, + .correctable = true, + }, { + .name = "BAD_DLLP", + .val = PCI_ERR_COR_BAD_DLLP, + .correctable = true, + }, { + .name = "REP_ROLL", + .val = PCI_ERR_COR_REP_ROLL, + .correctable = true, + }, { + .name = "REP_TIMER", + .val = PCI_ERR_COR_REP_TIMER, + .correctable = true, + }, { + .name = "ADV_NONFATAL", + .val = PCI_ERR_COR_ADV_NONFATAL, + .correctable = true, + }, { + .name = "INTERNAL", + .val = PCI_ERR_COR_INTERNAL, + .correctable = true, + }, { + .name = "HL_OVERFLOW", + .val = PCI_ERR_COR_HL_OVERFLOW, + .correctable = true, + }, +}; + +static int pcie_aer_parse_error_string(const char *error_name, + uint32_t *status, bool *correctable) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pcie_aer_error_list); i++) { + const PCIEAERErrorName *e = &pcie_aer_error_list[i]; + if (strcmp(error_name, e->name)) { + continue; + } + + *status = e->val; + *correctable = e->correctable; + return 0; + } + return -EINVAL; +} + +/* + * Inject an error described by @qdict. + * On success, set @details to show where error was sent. + * Return negative errno if injection failed and a message was emitted. + */ +static int do_pcie_aer_inject_error(Monitor *mon, + const QDict *qdict, + PCIEErrorDetails *details) +{ + const char *id = qdict_get_str(qdict, "id"); + const char *error_name; + uint32_t error_status; + bool correctable; + PCIDevice *dev; + PCIEAERErr err; + int ret; + + ret = pci_qdev_find_device(id, &dev); + if (ret < 0) { + monitor_printf(mon, + "id or pci device path is invalid or device not " + "found. %s\n", id); + return ret; + } + if (!pci_is_express(dev)) { + monitor_printf(mon, "the device doesn't support pci express. %s\n", + id); + return -ENOSYS; + } + + error_name = qdict_get_str(qdict, "error_status"); + if (pcie_aer_parse_error_string(error_name, &error_status, &correctable)) { + char *e = NULL; + error_status = strtoul(error_name, &e, 0); + correctable = qdict_get_try_bool(qdict, "correctable", false); + if (!e || *e != '\0') { + monitor_printf(mon, "invalid error status value. \"%s\"", + error_name); + return -EINVAL; + } + } + err.status = error_status; + err.source_id = pci_requester_id(dev); + + err.flags = 0; + if (correctable) { + err.flags |= PCIE_AER_ERR_IS_CORRECTABLE; + } + if (qdict_get_try_bool(qdict, "advisory_non_fatal", false)) { + err.flags |= PCIE_AER_ERR_MAYBE_ADVISORY; + } + if (qdict_haskey(qdict, "header0")) { + err.flags |= PCIE_AER_ERR_HEADER_VALID; + } + if (qdict_haskey(qdict, "prefix0")) { + err.flags |= PCIE_AER_ERR_TLP_PREFIX_PRESENT; + } + + err.header[0] = qdict_get_try_int(qdict, "header0", 0); + err.header[1] = qdict_get_try_int(qdict, "header1", 0); + err.header[2] = qdict_get_try_int(qdict, "header2", 0); + err.header[3] = qdict_get_try_int(qdict, "header3", 0); + + err.prefix[0] = qdict_get_try_int(qdict, "prefix0", 0); + err.prefix[1] = qdict_get_try_int(qdict, "prefix1", 0); + err.prefix[2] = qdict_get_try_int(qdict, "prefix2", 0); + err.prefix[3] = qdict_get_try_int(qdict, "prefix3", 0); + + ret = pcie_aer_inject_error(dev, &err); + if (ret < 0) { + monitor_printf(mon, "failed to inject error: %s\n", + strerror(-ret)); + return ret; + } + details->id = id; + details->root_bus = pci_root_bus_path(dev); + details->bus = pci_dev_bus_num(dev); + details->devfn = dev->devfn; + + return 0; +} + +void hmp_pcie_aer_inject_error(Monitor *mon, const QDict *qdict) +{ + PCIEErrorDetails data; + + if (do_pcie_aer_inject_error(mon, qdict, &data) < 0) { + return; + } + + monitor_printf(mon, "OK id: %s root bus: %s, bus: %x devfn: %x.%x\n", + data.id, data.root_bus, data.bus, + PCI_SLOT(data.devfn), PCI_FUNC(data.devfn)); +} diff --git a/hw/pci/pcie_host.c b/hw/pci/pcie_host.c new file mode 100644 index 000000000..5abbe8322 --- /dev/null +++ b/hw/pci/pcie_host.c @@ -0,0 +1,136 @@ +/* + * pcie_host.c + * utility functions for pci express host bridge. + * + * Copyright (c) 2009 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/pci/pcie_host.h" +#include "qemu/module.h" + +/* a helper function to get a PCIDevice for a given mmconfig address */ +static inline PCIDevice *pcie_dev_find_by_mmcfg_addr(PCIBus *s, + uint32_t mmcfg_addr) +{ + return pci_find_device(s, PCIE_MMCFG_BUS(mmcfg_addr), + PCIE_MMCFG_DEVFN(mmcfg_addr)); +} + +static void pcie_mmcfg_data_write(void *opaque, hwaddr mmcfg_addr, + uint64_t val, unsigned len) +{ + PCIExpressHost *e = opaque; + PCIBus *s = e->pci.bus; + PCIDevice *pci_dev = pcie_dev_find_by_mmcfg_addr(s, mmcfg_addr); + uint32_t addr; + uint32_t limit; + + if (!pci_dev) { + return; + } + addr = PCIE_MMCFG_CONFOFFSET(mmcfg_addr); + limit = pci_config_size(pci_dev); + pci_host_config_write_common(pci_dev, addr, limit, val, len); +} + +static uint64_t pcie_mmcfg_data_read(void *opaque, + hwaddr mmcfg_addr, + unsigned len) +{ + PCIExpressHost *e = opaque; + PCIBus *s = e->pci.bus; + PCIDevice *pci_dev = pcie_dev_find_by_mmcfg_addr(s, mmcfg_addr); + uint32_t addr; + uint32_t limit; + + if (!pci_dev) { + return ~0x0; + } + addr = PCIE_MMCFG_CONFOFFSET(mmcfg_addr); + limit = pci_config_size(pci_dev); + return pci_host_config_read_common(pci_dev, addr, limit, len); +} + +static const MemoryRegionOps pcie_mmcfg_ops = { + .read = pcie_mmcfg_data_read, + .write = pcie_mmcfg_data_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void pcie_host_init(Object *obj) +{ + PCIExpressHost *e = PCIE_HOST_BRIDGE(obj); + + e->base_addr = PCIE_BASE_ADDR_UNMAPPED; + memory_region_init_io(&e->mmio, OBJECT(e), &pcie_mmcfg_ops, e, "pcie-mmcfg-mmio", + PCIE_MMCFG_SIZE_MAX); +} + +void pcie_host_mmcfg_unmap(PCIExpressHost *e) +{ + if (e->base_addr != PCIE_BASE_ADDR_UNMAPPED) { + memory_region_del_subregion(get_system_memory(), &e->mmio); + e->base_addr = PCIE_BASE_ADDR_UNMAPPED; + } +} + +void pcie_host_mmcfg_init(PCIExpressHost *e, uint32_t size) +{ + assert(!(size & (size - 1))); /* power of 2 */ + assert(size >= PCIE_MMCFG_SIZE_MIN); + assert(size <= PCIE_MMCFG_SIZE_MAX); + e->size = size; + memory_region_set_size(&e->mmio, e->size); +} + +void pcie_host_mmcfg_map(PCIExpressHost *e, hwaddr addr, + uint32_t size) +{ + pcie_host_mmcfg_init(e, size); + e->base_addr = addr; + memory_region_add_subregion(get_system_memory(), e->base_addr, &e->mmio); +} + +void pcie_host_mmcfg_update(PCIExpressHost *e, + int enable, + hwaddr addr, + uint32_t size) +{ + memory_region_transaction_begin(); + pcie_host_mmcfg_unmap(e); + if (enable) { + pcie_host_mmcfg_map(e, addr, size); + } + memory_region_transaction_commit(); +} + +static const TypeInfo pcie_host_type_info = { + .name = TYPE_PCIE_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .abstract = true, + .instance_size = sizeof(PCIExpressHost), + .instance_init = pcie_host_init, +}; + +static void pcie_host_register_types(void) +{ + type_register_static(&pcie_host_type_info); +} + +type_init(pcie_host_register_types) diff --git a/hw/pci/pcie_port.c b/hw/pci/pcie_port.c new file mode 100644 index 000000000..e95c1e551 --- /dev/null +++ b/hw/pci/pcie_port.c @@ -0,0 +1,185 @@ +/* + * pcie_port.c + * + * Copyright (c) 2010 Isaku Yamahata + * VA Linux Systems Japan K.K. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pcie_port.h" +#include "hw/qdev-properties.h" +#include "qemu/module.h" +#include "hw/hotplug.h" + +void pcie_port_init_reg(PCIDevice *d) +{ + /* Unlike pci bridge, + 66MHz and fast back to back don't apply to pci express port. */ + pci_set_word(d->config + PCI_STATUS, 0); + pci_set_word(d->config + PCI_SEC_STATUS, 0); + + /* + * Unlike conventional pci bridge, for some bits the spec states: + * Does not apply to PCI Express and must be hardwired to 0. + */ + pci_word_test_and_clear_mask(d->wmask + PCI_BRIDGE_CONTROL, + PCI_BRIDGE_CTL_MASTER_ABORT | + PCI_BRIDGE_CTL_FAST_BACK | + PCI_BRIDGE_CTL_DISCARD | + PCI_BRIDGE_CTL_SEC_DISCARD | + PCI_BRIDGE_CTL_DISCARD_STATUS | + PCI_BRIDGE_CTL_DISCARD_SERR); +} + +/************************************************************************** + * (chassis number, pcie physical slot number) -> pcie slot conversion + */ +struct PCIEChassis { + uint8_t number; + + QLIST_HEAD(, PCIESlot) slots; + QLIST_ENTRY(PCIEChassis) next; +}; + +static QLIST_HEAD(, PCIEChassis) chassis = QLIST_HEAD_INITIALIZER(chassis); + +static struct PCIEChassis *pcie_chassis_find(uint8_t chassis_number) +{ + struct PCIEChassis *c; + QLIST_FOREACH(c, &chassis, next) { + if (c->number == chassis_number) { + break; + } + } + return c; +} + +void pcie_chassis_create(uint8_t chassis_number) +{ + struct PCIEChassis *c; + c = pcie_chassis_find(chassis_number); + if (c) { + return; + } + c = g_malloc0(sizeof(*c)); + c->number = chassis_number; + QLIST_INIT(&c->slots); + QLIST_INSERT_HEAD(&chassis, c, next); +} + +static PCIESlot *pcie_chassis_find_slot_with_chassis(struct PCIEChassis *c, + uint8_t slot) +{ + PCIESlot *s; + QLIST_FOREACH(s, &c->slots, next) { + if (s->slot == slot) { + break; + } + } + return s; +} + +PCIESlot *pcie_chassis_find_slot(uint8_t chassis_number, uint16_t slot) +{ + struct PCIEChassis *c; + c = pcie_chassis_find(chassis_number); + if (!c) { + return NULL; + } + return pcie_chassis_find_slot_with_chassis(c, slot); +} + +int pcie_chassis_add_slot(struct PCIESlot *slot) +{ + struct PCIEChassis *c; + c = pcie_chassis_find(slot->chassis); + if (!c) { + return -ENODEV; + } + if (pcie_chassis_find_slot_with_chassis(c, slot->slot)) { + return -EBUSY; + } + QLIST_INSERT_HEAD(&c->slots, slot, next); + return 0; +} + +void pcie_chassis_del_slot(PCIESlot *s) +{ + QLIST_REMOVE(s, next); +} + +static Property pcie_port_props[] = { + DEFINE_PROP_UINT8("port", PCIEPort, port, 0), + DEFINE_PROP_UINT16("aer_log_max", PCIEPort, + parent_obj.parent_obj.exp.aer_log.log_max, + PCIE_AER_LOG_MAX_DEFAULT), + DEFINE_PROP_END_OF_LIST() +}; + +static void pcie_port_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, pcie_port_props); +} + +static const TypeInfo pcie_port_type_info = { + .name = TYPE_PCIE_PORT, + .parent = TYPE_PCI_BRIDGE, + .instance_size = sizeof(PCIEPort), + .abstract = true, + .class_init = pcie_port_class_init, +}; + +static Property pcie_slot_props[] = { + DEFINE_PROP_UINT8("chassis", PCIESlot, chassis, 0), + DEFINE_PROP_UINT16("slot", PCIESlot, slot, 0), + DEFINE_PROP_BOOL("hotplug", PCIESlot, hotplug, true), + DEFINE_PROP_BOOL("x-native-hotplug", PCIESlot, native_hotplug, true), + DEFINE_PROP_END_OF_LIST() +}; + +static void pcie_slot_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + + device_class_set_props(dc, pcie_slot_props); + hc->pre_plug = pcie_cap_slot_pre_plug_cb; + hc->plug = pcie_cap_slot_plug_cb; + hc->unplug = pcie_cap_slot_unplug_cb; + hc->unplug_request = pcie_cap_slot_unplug_request_cb; +} + +static const TypeInfo pcie_slot_type_info = { + .name = TYPE_PCIE_SLOT, + .parent = TYPE_PCIE_PORT, + .instance_size = sizeof(PCIESlot), + .abstract = true, + .class_init = pcie_slot_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static void pcie_port_register_types(void) +{ + type_register_static(&pcie_port_type_info); + type_register_static(&pcie_slot_type_info); +} + +type_init(pcie_port_register_types) diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c new file mode 100644 index 000000000..28e62174c --- /dev/null +++ b/hw/pci/shpc.c @@ -0,0 +1,728 @@ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/host-utils.h" +#include "qemu/range.h" +#include "qemu/error-report.h" +#include "hw/pci/shpc.h" +#include "migration/qemu-file-types.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/msi.h" + +/* TODO: model power only and disabled slot states. */ +/* TODO: handle SERR and wakeups */ +/* TODO: consider enabling 66MHz support */ + +/* TODO: remove fully only on state DISABLED and LED off. + * track state to properly record this. */ + +/* SHPC Working Register Set */ +#define SHPC_BASE_OFFSET 0x00 /* 4 bytes */ +#define SHPC_SLOTS_33 0x04 /* 4 bytes. Also encodes PCI-X slots. */ +#define SHPC_SLOTS_66 0x08 /* 4 bytes. */ +#define SHPC_NSLOTS 0x0C /* 1 byte */ +#define SHPC_FIRST_DEV 0x0D /* 1 byte */ +#define SHPC_PHYS_SLOT 0x0E /* 2 byte */ +#define SHPC_PHYS_NUM_MAX 0x7ff +#define SHPC_PHYS_NUM_UP 0x2000 +#define SHPC_PHYS_MRL 0x4000 +#define SHPC_PHYS_BUTTON 0x8000 +#define SHPC_SEC_BUS 0x10 /* 2 bytes */ +#define SHPC_SEC_BUS_33 0x0 +#define SHPC_SEC_BUS_66 0x1 /* Unused */ +#define SHPC_SEC_BUS_MASK 0x7 +#define SHPC_MSI_CTL 0x12 /* 1 byte */ +#define SHPC_PROG_IFC 0x13 /* 1 byte */ +#define SHPC_PROG_IFC_1_0 0x1 +#define SHPC_CMD_CODE 0x14 /* 1 byte */ +#define SHPC_CMD_TRGT 0x15 /* 1 byte */ +#define SHPC_CMD_TRGT_MIN 0x1 +#define SHPC_CMD_TRGT_MAX 0x1f +#define SHPC_CMD_STATUS 0x16 /* 2 bytes */ +#define SHPC_CMD_STATUS_BUSY 0x1 +#define SHPC_CMD_STATUS_MRL_OPEN 0x2 +#define SHPC_CMD_STATUS_INVALID_CMD 0x4 +#define SHPC_CMD_STATUS_INVALID_MODE 0x8 +#define SHPC_INT_LOCATOR 0x18 /* 4 bytes */ +#define SHPC_INT_COMMAND 0x1 +#define SHPC_SERR_LOCATOR 0x1C /* 4 bytes */ +#define SHPC_SERR_INT 0x20 /* 4 bytes */ +#define SHPC_INT_DIS 0x1 +#define SHPC_SERR_DIS 0x2 +#define SHPC_CMD_INT_DIS 0x4 +#define SHPC_ARB_SERR_DIS 0x8 +#define SHPC_CMD_DETECTED 0x10000 +#define SHPC_ARB_DETECTED 0x20000 + /* 4 bytes * slot # (start from 0) */ +#define SHPC_SLOT_REG(s) (0x24 + (s) * 4) + /* 2 bytes */ +#define SHPC_SLOT_STATUS(s) (0x0 + SHPC_SLOT_REG(s)) + +/* Same slot state masks are used for command and status registers */ +#define SHPC_SLOT_STATE_MASK 0x03 +#define SHPC_SLOT_STATE_SHIFT \ + ctz32(SHPC_SLOT_STATE_MASK) + +#define SHPC_STATE_NO 0x0 +#define SHPC_STATE_PWRONLY 0x1 +#define SHPC_STATE_ENABLED 0x2 +#define SHPC_STATE_DISABLED 0x3 + +#define SHPC_SLOT_PWR_LED_MASK 0xC +#define SHPC_SLOT_PWR_LED_SHIFT \ + ctz32(SHPC_SLOT_PWR_LED_MASK) +#define SHPC_SLOT_ATTN_LED_MASK 0x30 +#define SHPC_SLOT_ATTN_LED_SHIFT \ + ctz32(SHPC_SLOT_ATTN_LED_MASK) + +#define SHPC_LED_NO 0x0 +#define SHPC_LED_ON 0x1 +#define SHPC_LED_BLINK 0x2 +#define SHPC_LED_OFF 0x3 + +#define SHPC_SLOT_STATUS_PWR_FAULT 0x40 +#define SHPC_SLOT_STATUS_BUTTON 0x80 +#define SHPC_SLOT_STATUS_MRL_OPEN 0x100 +#define SHPC_SLOT_STATUS_66 0x200 +#define SHPC_SLOT_STATUS_PRSNT_MASK 0xC00 +#define SHPC_SLOT_STATUS_PRSNT_EMPTY 0x3 +#define SHPC_SLOT_STATUS_PRSNT_25W 0x1 +#define SHPC_SLOT_STATUS_PRSNT_15W 0x2 +#define SHPC_SLOT_STATUS_PRSNT_7_5W 0x0 + +#define SHPC_SLOT_STATUS_PRSNT_PCIX 0x3000 + + + /* 1 byte */ +#define SHPC_SLOT_EVENT_LATCH(s) (0x2 + SHPC_SLOT_REG(s)) + /* 1 byte */ +#define SHPC_SLOT_EVENT_SERR_INT_DIS(d, s) (0x3 + SHPC_SLOT_REG(s)) +#define SHPC_SLOT_EVENT_PRESENCE 0x01 +#define SHPC_SLOT_EVENT_ISOLATED_FAULT 0x02 +#define SHPC_SLOT_EVENT_BUTTON 0x04 +#define SHPC_SLOT_EVENT_MRL 0x08 +#define SHPC_SLOT_EVENT_CONNECTED_FAULT 0x10 +/* Bits below are used for Serr/Int disable only */ +#define SHPC_SLOT_EVENT_MRL_SERR_DIS 0x20 +#define SHPC_SLOT_EVENT_CONNECTED_FAULT_SERR_DIS 0x40 + +#define SHPC_MIN_SLOTS 1 +#define SHPC_MAX_SLOTS 31 +#define SHPC_SIZEOF(d) SHPC_SLOT_REG((d)->shpc->nslots) + +/* SHPC Slot identifiers */ + +/* Hotplug supported at 31 slots out of the total 32. We reserve slot 0, + and give the rest of them physical *and* pci numbers starting from 1, so + they match logical numbers. Note: this means that multiple slots must have + different chassis number values, to make chassis+physical slot unique. + TODO: make this configurable? */ +#define SHPC_IDX_TO_LOGICAL(slot) ((slot) + 1) +#define SHPC_LOGICAL_TO_IDX(target) ((target) - 1) +#define SHPC_IDX_TO_PCI(slot) ((slot) + 1) +#define SHPC_PCI_TO_IDX(pci_slot) ((pci_slot) - 1) +#define SHPC_IDX_TO_PHYSICAL(slot) ((slot) + 1) + +static uint16_t shpc_get_status(SHPCDevice *shpc, int slot, uint16_t msk) +{ + uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot); + return (pci_get_word(status) & msk) >> ctz32(msk); +} + +static void shpc_set_status(SHPCDevice *shpc, + int slot, uint8_t value, uint16_t msk) +{ + uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot); + pci_word_test_and_clear_mask(status, msk); + pci_word_test_and_set_mask(status, value << ctz32(msk)); +} + +static void shpc_interrupt_update(PCIDevice *d) +{ + SHPCDevice *shpc = d->shpc; + int slot; + int level = 0; + uint32_t serr_int; + uint32_t int_locator = 0; + + /* Update interrupt locator register */ + for (slot = 0; slot < shpc->nslots; ++slot) { + uint8_t event = shpc->config[SHPC_SLOT_EVENT_LATCH(slot)]; + uint8_t disable = shpc->config[SHPC_SLOT_EVENT_SERR_INT_DIS(d, slot)]; + uint32_t mask = 1U << SHPC_IDX_TO_LOGICAL(slot); + if (event & ~disable) { + int_locator |= mask; + } + } + serr_int = pci_get_long(shpc->config + SHPC_SERR_INT); + if ((serr_int & SHPC_CMD_DETECTED) && !(serr_int & SHPC_CMD_INT_DIS)) { + int_locator |= SHPC_INT_COMMAND; + } + pci_set_long(shpc->config + SHPC_INT_LOCATOR, int_locator); + level = (!(serr_int & SHPC_INT_DIS) && int_locator) ? 1 : 0; + if (msi_enabled(d) && shpc->msi_requested != level) + msi_notify(d, 0); + else + pci_set_irq(d, level); + shpc->msi_requested = level; +} + +static void shpc_set_sec_bus_speed(SHPCDevice *shpc, uint8_t speed) +{ + switch (speed) { + case SHPC_SEC_BUS_33: + shpc->config[SHPC_SEC_BUS] &= ~SHPC_SEC_BUS_MASK; + shpc->config[SHPC_SEC_BUS] |= speed; + break; + default: + pci_word_test_and_set_mask(shpc->config + SHPC_CMD_STATUS, + SHPC_CMD_STATUS_INVALID_MODE); + } +} + +void shpc_reset(PCIDevice *d) +{ + SHPCDevice *shpc = d->shpc; + int nslots = shpc->nslots; + int i; + memset(shpc->config, 0, SHPC_SIZEOF(d)); + pci_set_byte(shpc->config + SHPC_NSLOTS, nslots); + pci_set_long(shpc->config + SHPC_SLOTS_33, nslots); + pci_set_long(shpc->config + SHPC_SLOTS_66, 0); + pci_set_byte(shpc->config + SHPC_FIRST_DEV, SHPC_IDX_TO_PCI(0)); + pci_set_word(shpc->config + SHPC_PHYS_SLOT, + SHPC_IDX_TO_PHYSICAL(0) | + SHPC_PHYS_NUM_UP | + SHPC_PHYS_MRL | + SHPC_PHYS_BUTTON); + pci_set_long(shpc->config + SHPC_SERR_INT, SHPC_INT_DIS | + SHPC_SERR_DIS | + SHPC_CMD_INT_DIS | + SHPC_ARB_SERR_DIS); + pci_set_byte(shpc->config + SHPC_PROG_IFC, SHPC_PROG_IFC_1_0); + pci_set_word(shpc->config + SHPC_SEC_BUS, SHPC_SEC_BUS_33); + for (i = 0; i < shpc->nslots; ++i) { + pci_set_byte(shpc->config + SHPC_SLOT_EVENT_SERR_INT_DIS(d, i), + SHPC_SLOT_EVENT_PRESENCE | + SHPC_SLOT_EVENT_ISOLATED_FAULT | + SHPC_SLOT_EVENT_BUTTON | + SHPC_SLOT_EVENT_MRL | + SHPC_SLOT_EVENT_CONNECTED_FAULT | + SHPC_SLOT_EVENT_MRL_SERR_DIS | + SHPC_SLOT_EVENT_CONNECTED_FAULT_SERR_DIS); + if (shpc->sec_bus->devices[PCI_DEVFN(SHPC_IDX_TO_PCI(i), 0)]) { + shpc_set_status(shpc, i, SHPC_STATE_ENABLED, SHPC_SLOT_STATE_MASK); + shpc_set_status(shpc, i, 0, SHPC_SLOT_STATUS_MRL_OPEN); + shpc_set_status(shpc, i, SHPC_SLOT_STATUS_PRSNT_7_5W, + SHPC_SLOT_STATUS_PRSNT_MASK); + shpc_set_status(shpc, i, SHPC_LED_ON, SHPC_SLOT_PWR_LED_MASK); + } else { + shpc_set_status(shpc, i, SHPC_STATE_DISABLED, SHPC_SLOT_STATE_MASK); + shpc_set_status(shpc, i, 1, SHPC_SLOT_STATUS_MRL_OPEN); + shpc_set_status(shpc, i, SHPC_SLOT_STATUS_PRSNT_EMPTY, + SHPC_SLOT_STATUS_PRSNT_MASK); + shpc_set_status(shpc, i, SHPC_LED_OFF, SHPC_SLOT_PWR_LED_MASK); + } + shpc_set_status(shpc, i, 0, SHPC_SLOT_STATUS_66); + } + shpc_set_sec_bus_speed(shpc, SHPC_SEC_BUS_33); + shpc->msi_requested = 0; + shpc_interrupt_update(d); +} + +static void shpc_invalid_command(SHPCDevice *shpc) +{ + pci_word_test_and_set_mask(shpc->config + SHPC_CMD_STATUS, + SHPC_CMD_STATUS_INVALID_CMD); +} + +static void shpc_free_devices_in_slot(SHPCDevice *shpc, int slot) +{ + HotplugHandler *hotplug_ctrl; + int devfn; + int pci_slot = SHPC_IDX_TO_PCI(slot); + for (devfn = PCI_DEVFN(pci_slot, 0); + devfn <= PCI_DEVFN(pci_slot, PCI_FUNC_MAX - 1); + ++devfn) { + PCIDevice *affected_dev = shpc->sec_bus->devices[devfn]; + if (affected_dev) { + hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(affected_dev)); + hotplug_handler_unplug(hotplug_ctrl, DEVICE(affected_dev), + &error_abort); + object_unparent(OBJECT(affected_dev)); + } + } +} + +static void shpc_slot_command(SHPCDevice *shpc, uint8_t target, + uint8_t state, uint8_t power, uint8_t attn) +{ + uint8_t current_state; + int slot = SHPC_LOGICAL_TO_IDX(target); + if (target < SHPC_CMD_TRGT_MIN || slot >= shpc->nslots) { + shpc_invalid_command(shpc); + return; + } + current_state = shpc_get_status(shpc, slot, SHPC_SLOT_STATE_MASK); + if (current_state == SHPC_STATE_ENABLED && state == SHPC_STATE_PWRONLY) { + shpc_invalid_command(shpc); + return; + } + + switch (power) { + case SHPC_LED_NO: + break; + default: + /* TODO: send event to monitor */ + shpc_set_status(shpc, slot, power, SHPC_SLOT_PWR_LED_MASK); + } + switch (attn) { + case SHPC_LED_NO: + break; + default: + /* TODO: send event to monitor */ + shpc_set_status(shpc, slot, attn, SHPC_SLOT_ATTN_LED_MASK); + } + + if ((current_state == SHPC_STATE_DISABLED && state == SHPC_STATE_PWRONLY) || + (current_state == SHPC_STATE_DISABLED && state == SHPC_STATE_ENABLED)) { + shpc_set_status(shpc, slot, state, SHPC_SLOT_STATE_MASK); + } else if ((current_state == SHPC_STATE_ENABLED || + current_state == SHPC_STATE_PWRONLY) && + state == SHPC_STATE_DISABLED) { + shpc_set_status(shpc, slot, state, SHPC_SLOT_STATE_MASK); + power = shpc_get_status(shpc, slot, SHPC_SLOT_PWR_LED_MASK); + /* TODO: track what monitor requested. */ + /* Look at LED to figure out whether it's ok to remove the device. */ + if (power == SHPC_LED_OFF) { + shpc_free_devices_in_slot(shpc, slot); + shpc_set_status(shpc, slot, 1, SHPC_SLOT_STATUS_MRL_OPEN); + shpc_set_status(shpc, slot, SHPC_SLOT_STATUS_PRSNT_EMPTY, + SHPC_SLOT_STATUS_PRSNT_MASK); + shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |= + SHPC_SLOT_EVENT_MRL | + SHPC_SLOT_EVENT_PRESENCE; + } + } +} + +static void shpc_command(SHPCDevice *shpc) +{ + uint8_t code = pci_get_byte(shpc->config + SHPC_CMD_CODE); + uint8_t speed; + uint8_t target; + uint8_t attn; + uint8_t power; + uint8_t state; + int i; + + /* Clear status from the previous command. */ + pci_word_test_and_clear_mask(shpc->config + SHPC_CMD_STATUS, + SHPC_CMD_STATUS_BUSY | + SHPC_CMD_STATUS_MRL_OPEN | + SHPC_CMD_STATUS_INVALID_CMD | + SHPC_CMD_STATUS_INVALID_MODE); + switch (code) { + case 0x00 ... 0x3f: + target = shpc->config[SHPC_CMD_TRGT] & SHPC_CMD_TRGT_MAX; + state = (code & SHPC_SLOT_STATE_MASK) >> SHPC_SLOT_STATE_SHIFT; + power = (code & SHPC_SLOT_PWR_LED_MASK) >> SHPC_SLOT_PWR_LED_SHIFT; + attn = (code & SHPC_SLOT_ATTN_LED_MASK) >> SHPC_SLOT_ATTN_LED_SHIFT; + shpc_slot_command(shpc, target, state, power, attn); + break; + case 0x40 ... 0x47: + speed = code & SHPC_SEC_BUS_MASK; + shpc_set_sec_bus_speed(shpc, speed); + break; + case 0x48: + /* Power only all slots */ + /* first verify no slots are enabled */ + for (i = 0; i < shpc->nslots; ++i) { + state = shpc_get_status(shpc, i, SHPC_SLOT_STATE_MASK); + if (state == SHPC_STATE_ENABLED) { + shpc_invalid_command(shpc); + goto done; + } + } + for (i = 0; i < shpc->nslots; ++i) { + if (!(shpc_get_status(shpc, i, SHPC_SLOT_STATUS_MRL_OPEN))) { + shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + SHPC_STATE_PWRONLY, SHPC_LED_ON, SHPC_LED_NO); + } else { + shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + SHPC_STATE_NO, SHPC_LED_OFF, SHPC_LED_NO); + } + } + break; + case 0x49: + /* Enable all slots */ + /* TODO: Spec says this shall fail if some are already enabled. + * This doesn't make sense - why not? a spec bug? */ + for (i = 0; i < shpc->nslots; ++i) { + state = shpc_get_status(shpc, i, SHPC_SLOT_STATE_MASK); + if (state == SHPC_STATE_ENABLED) { + shpc_invalid_command(shpc); + goto done; + } + } + for (i = 0; i < shpc->nslots; ++i) { + if (!(shpc_get_status(shpc, i, SHPC_SLOT_STATUS_MRL_OPEN))) { + shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + SHPC_STATE_ENABLED, SHPC_LED_ON, SHPC_LED_NO); + } else { + shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + SHPC_STATE_NO, SHPC_LED_OFF, SHPC_LED_NO); + } + } + break; + default: + shpc_invalid_command(shpc); + break; + } +done: + pci_long_test_and_set_mask(shpc->config + SHPC_SERR_INT, SHPC_CMD_DETECTED); +} + +static void shpc_write(PCIDevice *d, unsigned addr, uint64_t val, int l) +{ + SHPCDevice *shpc = d->shpc; + int i; + if (addr >= SHPC_SIZEOF(d)) { + return; + } + l = MIN(l, SHPC_SIZEOF(d) - addr); + + /* TODO: code duplicated from pci.c */ + for (i = 0; i < l; val >>= 8, ++i) { + unsigned a = addr + i; + uint8_t wmask = shpc->wmask[a]; + uint8_t w1cmask = shpc->w1cmask[a]; + assert(!(wmask & w1cmask)); + shpc->config[a] = (shpc->config[a] & ~wmask) | (val & wmask); + shpc->config[a] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ + } + if (ranges_overlap(addr, l, SHPC_CMD_CODE, 2)) { + shpc_command(shpc); + } + shpc_interrupt_update(d); +} + +static uint64_t shpc_read(PCIDevice *d, unsigned addr, int l) +{ + uint64_t val = 0x0; + if (addr >= SHPC_SIZEOF(d)) { + return val; + } + l = MIN(l, SHPC_SIZEOF(d) - addr); + memcpy(&val, d->shpc->config + addr, l); + return val; +} + +/* SHPC Bridge Capability */ +#define SHPC_CAP_LENGTH 0x08 +#define SHPC_CAP_DWORD_SELECT 0x2 /* 1 byte */ +#define SHPC_CAP_CxP 0x3 /* 1 byte: CSP, CIP */ +#define SHPC_CAP_DWORD_DATA 0x4 /* 4 bytes */ +#define SHPC_CAP_CSP_MASK 0x4 +#define SHPC_CAP_CIP_MASK 0x8 + +static uint8_t shpc_cap_dword(PCIDevice *d) +{ + return pci_get_byte(d->config + d->shpc->cap + SHPC_CAP_DWORD_SELECT); +} + +/* Update dword data capability register */ +static void shpc_cap_update_dword(PCIDevice *d) +{ + unsigned data; + data = shpc_read(d, shpc_cap_dword(d) * 4, 4); + pci_set_long(d->config + d->shpc->cap + SHPC_CAP_DWORD_DATA, data); +} + +/* Add SHPC capability to the config space for the device. */ +static int shpc_cap_add_config(PCIDevice *d, Error **errp) +{ + uint8_t *config; + int config_offset; + config_offset = pci_add_capability(d, PCI_CAP_ID_SHPC, + 0, SHPC_CAP_LENGTH, + errp); + if (config_offset < 0) { + return config_offset; + } + config = d->config + config_offset; + + pci_set_byte(config + SHPC_CAP_DWORD_SELECT, 0); + pci_set_byte(config + SHPC_CAP_CxP, 0); + pci_set_long(config + SHPC_CAP_DWORD_DATA, 0); + d->shpc->cap = config_offset; + /* Make dword select and data writeable. */ + pci_set_byte(d->wmask + config_offset + SHPC_CAP_DWORD_SELECT, 0xff); + pci_set_long(d->wmask + config_offset + SHPC_CAP_DWORD_DATA, 0xffffffff); + return 0; +} + +static uint64_t shpc_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + return shpc_read(opaque, addr, size); +} + +static void shpc_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + shpc_write(opaque, addr, val, size); +} + +static const MemoryRegionOps shpc_mmio_ops = { + .read = shpc_mmio_read, + .write = shpc_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + /* SHPC ECN requires dword accesses, but the original 1.0 spec doesn't. + * It's easier to suppport all sizes than worry about it. */ + .min_access_size = 1, + .max_access_size = 4, + }, +}; +static void shpc_device_plug_common(PCIDevice *affected_dev, int *slot, + SHPCDevice *shpc, Error **errp) +{ + int pci_slot = PCI_SLOT(affected_dev->devfn); + *slot = SHPC_PCI_TO_IDX(pci_slot); + + if (pci_slot < SHPC_IDX_TO_PCI(0) || *slot >= shpc->nslots) { + error_setg(errp, "Unsupported PCI slot %d for standard hotplug " + "controller. Valid slots are between %d and %d.", + pci_slot, SHPC_IDX_TO_PCI(0), + SHPC_IDX_TO_PCI(shpc->nslots) - 1); + return; + } +} + +void shpc_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + Error *local_err = NULL; + PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev); + SHPCDevice *shpc = pci_hotplug_dev->shpc; + int slot; + + shpc_device_plug_common(PCI_DEVICE(dev), &slot, shpc, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Don't send event when device is enabled during qemu machine creation: + * it is present on boot, no hotplug event is necessary. We do send an + * event when the device is disabled later. */ + if (!dev->hotplugged) { + shpc_set_status(shpc, slot, 0, SHPC_SLOT_STATUS_MRL_OPEN); + shpc_set_status(shpc, slot, SHPC_SLOT_STATUS_PRSNT_7_5W, + SHPC_SLOT_STATUS_PRSNT_MASK); + return; + } + + /* This could be a cancellation of the previous removal. + * We check MRL state to figure out. */ + if (shpc_get_status(shpc, slot, SHPC_SLOT_STATUS_MRL_OPEN)) { + shpc_set_status(shpc, slot, 0, SHPC_SLOT_STATUS_MRL_OPEN); + shpc_set_status(shpc, slot, SHPC_SLOT_STATUS_PRSNT_7_5W, + SHPC_SLOT_STATUS_PRSNT_MASK); + shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |= + SHPC_SLOT_EVENT_BUTTON | + SHPC_SLOT_EVENT_MRL | + SHPC_SLOT_EVENT_PRESENCE; + } else { + /* Press attention button to cancel removal */ + shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |= + SHPC_SLOT_EVENT_BUTTON; + } + shpc_set_status(shpc, slot, 0, SHPC_SLOT_STATUS_66); + shpc_interrupt_update(pci_hotplug_dev); +} + +void shpc_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + qdev_unrealize(dev); +} + +void shpc_device_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + Error *local_err = NULL; + PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev); + SHPCDevice *shpc = pci_hotplug_dev->shpc; + uint8_t state; + uint8_t led; + int slot; + + shpc_device_plug_common(PCI_DEVICE(dev), &slot, shpc, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + state = shpc_get_status(shpc, slot, SHPC_SLOT_STATE_MASK); + led = shpc_get_status(shpc, slot, SHPC_SLOT_PWR_LED_MASK); + if (state == SHPC_STATE_DISABLED && led == SHPC_LED_OFF) { + shpc_free_devices_in_slot(shpc, slot); + shpc_set_status(shpc, slot, 1, SHPC_SLOT_STATUS_MRL_OPEN); + shpc_set_status(shpc, slot, SHPC_SLOT_STATUS_PRSNT_EMPTY, + SHPC_SLOT_STATUS_PRSNT_MASK); + shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |= + SHPC_SLOT_EVENT_MRL | + SHPC_SLOT_EVENT_PRESENCE; + } else { + shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |= SHPC_SLOT_EVENT_BUTTON; + } + shpc_set_status(shpc, slot, 0, SHPC_SLOT_STATUS_66); + shpc_interrupt_update(pci_hotplug_dev); +} + +/* Initialize the SHPC structure in bridge's BAR. */ +int shpc_init(PCIDevice *d, PCIBus *sec_bus, MemoryRegion *bar, + unsigned offset, Error **errp) +{ + int i, ret; + int nslots = SHPC_MAX_SLOTS; /* TODO: qdev property? */ + SHPCDevice *shpc = d->shpc = g_malloc0(sizeof(*d->shpc)); + shpc->sec_bus = sec_bus; + ret = shpc_cap_add_config(d, errp); + if (ret) { + g_free(d->shpc); + return ret; + } + if (nslots < SHPC_MIN_SLOTS) { + return 0; + } + if (nslots > SHPC_MAX_SLOTS || + SHPC_IDX_TO_PCI(nslots) > PCI_SLOT_MAX) { + /* TODO: report an error mesage that makes sense. */ + return -EINVAL; + } + shpc->nslots = nslots; + shpc->config = g_malloc0(SHPC_SIZEOF(d)); + shpc->cmask = g_malloc0(SHPC_SIZEOF(d)); + shpc->wmask = g_malloc0(SHPC_SIZEOF(d)); + shpc->w1cmask = g_malloc0(SHPC_SIZEOF(d)); + + shpc_reset(d); + + pci_set_long(shpc->config + SHPC_BASE_OFFSET, offset); + + pci_set_byte(shpc->wmask + SHPC_CMD_CODE, 0xff); + pci_set_byte(shpc->wmask + SHPC_CMD_TRGT, SHPC_CMD_TRGT_MAX); + pci_set_byte(shpc->wmask + SHPC_CMD_TRGT, SHPC_CMD_TRGT_MAX); + pci_set_long(shpc->wmask + SHPC_SERR_INT, + SHPC_INT_DIS | + SHPC_SERR_DIS | + SHPC_CMD_INT_DIS | + SHPC_ARB_SERR_DIS); + pci_set_long(shpc->w1cmask + SHPC_SERR_INT, + SHPC_CMD_DETECTED | + SHPC_ARB_DETECTED); + for (i = 0; i < nslots; ++i) { + pci_set_byte(shpc->wmask + + SHPC_SLOT_EVENT_SERR_INT_DIS(d, i), + SHPC_SLOT_EVENT_PRESENCE | + SHPC_SLOT_EVENT_ISOLATED_FAULT | + SHPC_SLOT_EVENT_BUTTON | + SHPC_SLOT_EVENT_MRL | + SHPC_SLOT_EVENT_CONNECTED_FAULT | + SHPC_SLOT_EVENT_MRL_SERR_DIS | + SHPC_SLOT_EVENT_CONNECTED_FAULT_SERR_DIS); + pci_set_byte(shpc->w1cmask + + SHPC_SLOT_EVENT_LATCH(i), + SHPC_SLOT_EVENT_PRESENCE | + SHPC_SLOT_EVENT_ISOLATED_FAULT | + SHPC_SLOT_EVENT_BUTTON | + SHPC_SLOT_EVENT_MRL | + SHPC_SLOT_EVENT_CONNECTED_FAULT); + } + + /* TODO: init cmask */ + memory_region_init_io(&shpc->mmio, OBJECT(d), &shpc_mmio_ops, + d, "shpc-mmio", SHPC_SIZEOF(d)); + shpc_cap_update_dword(d); + memory_region_add_subregion(bar, offset, &shpc->mmio); + + qbus_set_hotplug_handler(BUS(sec_bus), OBJECT(d)); + + d->cap_present |= QEMU_PCI_CAP_SHPC; + return 0; +} + +int shpc_bar_size(PCIDevice *d) +{ + return pow2roundup32(SHPC_SLOT_REG(SHPC_MAX_SLOTS)); +} + +void shpc_cleanup(PCIDevice *d, MemoryRegion *bar) +{ + SHPCDevice *shpc = d->shpc; + d->cap_present &= ~QEMU_PCI_CAP_SHPC; + memory_region_del_subregion(bar, &shpc->mmio); + /* TODO: cleanup config space changes? */ +} + +void shpc_free(PCIDevice *d) +{ + SHPCDevice *shpc = d->shpc; + if (!shpc) { + return; + } + object_unparent(OBJECT(&shpc->mmio)); + g_free(shpc->config); + g_free(shpc->cmask); + g_free(shpc->wmask); + g_free(shpc->w1cmask); + g_free(shpc); + d->shpc = NULL; +} + +void shpc_cap_write_config(PCIDevice *d, uint32_t addr, uint32_t val, int l) +{ + if (!ranges_overlap(addr, l, d->shpc->cap, SHPC_CAP_LENGTH)) { + return; + } + if (ranges_overlap(addr, l, d->shpc->cap + SHPC_CAP_DWORD_DATA, 4)) { + unsigned dword_data; + dword_data = pci_get_long(d->shpc->config + d->shpc->cap + + SHPC_CAP_DWORD_DATA); + shpc_write(d, shpc_cap_dword(d) * 4, dword_data, 4); + } + /* Update cap dword data in case guest is going to read it. */ + shpc_cap_update_dword(d); +} + +static int shpc_save(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + PCIDevice *d = container_of(pv, PCIDevice, shpc); + qemu_put_buffer(f, d->shpc->config, SHPC_SIZEOF(d)); + + return 0; +} + +static int shpc_load(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + PCIDevice *d = container_of(pv, PCIDevice, shpc); + int ret = qemu_get_buffer(f, d->shpc->config, SHPC_SIZEOF(d)); + if (ret != SHPC_SIZEOF(d)) { + return -EINVAL; + } + /* Make sure we don't lose notifications. An extra interrupt is harmless. */ + d->shpc->msi_requested = 0; + shpc_interrupt_update(d); + return 0; +} + +VMStateInfo shpc_vmstate_info = { + .name = "shpc", + .get = shpc_load, + .put = shpc_save, +}; diff --git a/hw/pci/slotid_cap.c b/hw/pci/slotid_cap.c new file mode 100644 index 000000000..36d021b4a --- /dev/null +++ b/hw/pci/slotid_cap.c @@ -0,0 +1,50 @@ +#include "qemu/osdep.h" +#include "hw/pci/slotid_cap.h" +#include "hw/pci/pci.h" +#include "qemu/error-report.h" +#include "qapi/error.h" + +#define SLOTID_CAP_LENGTH 4 +#define SLOTID_NSLOTS_SHIFT ctz32(PCI_SID_ESR_NSLOTS) + +int slotid_cap_init(PCIDevice *d, int nslots, + uint8_t chassis, + unsigned offset, + Error **errp) +{ + int cap; + + if (!chassis) { + error_setg(errp, "Bridge chassis not specified. Each bridge is required" + " to be assigned a unique chassis id > 0."); + return -EINVAL; + } + if (nslots < 0 || nslots > (PCI_SID_ESR_NSLOTS >> SLOTID_NSLOTS_SHIFT)) { + /* TODO: error report? */ + return -EINVAL; + } + + cap = pci_add_capability(d, PCI_CAP_ID_SLOTID, offset, + SLOTID_CAP_LENGTH, errp); + if (cap < 0) { + return cap; + } + /* We make each chassis unique, this way each bridge is First in Chassis */ + d->config[cap + PCI_SID_ESR] = PCI_SID_ESR_FIC | + (nslots << SLOTID_NSLOTS_SHIFT); + d->cmask[cap + PCI_SID_ESR] = 0xff; + d->config[cap + PCI_SID_CHASSIS_NR] = chassis; + /* Note: Chassis number register is non-volatile, + so we don't reset it. */ + /* TODO: store in eeprom? */ + d->wmask[cap + PCI_SID_CHASSIS_NR] = 0xff; + + d->cap_present |= QEMU_PCI_CAP_SLOTID; + return 0; +} + +void slotid_cap_cleanup(PCIDevice *d) +{ + /* TODO: cleanup config space? */ + d->cap_present &= ~QEMU_PCI_CAP_SLOTID; +} diff --git a/hw/pci/trace-events b/hw/pci/trace-events new file mode 100644 index 000000000..fc777d0b5 --- /dev/null +++ b/hw/pci/trace-events @@ -0,0 +1,12 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# pci.c +pci_update_mappings_del(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 +pci_update_mappings_add(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 + +# pci_host.c +pci_cfg_read(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x -> 0x%x" +pci_cfg_write(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x <- 0x%x" + +# msix.c +msix_write_config(char *name, bool enabled, bool masked) "dev %s enabled %d masked %d" diff --git a/hw/pci/trace.h b/hw/pci/trace.h new file mode 100644 index 000000000..3dd773e22 --- /dev/null +++ b/hw/pci/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_pci.h" diff --git a/hw/pcmcia/Kconfig b/hw/pcmcia/Kconfig new file mode 100644 index 000000000..41f2df913 --- /dev/null +++ b/hw/pcmcia/Kconfig @@ -0,0 +1,2 @@ +config PCMCIA + bool diff --git a/hw/pcmcia/meson.build b/hw/pcmcia/meson.build new file mode 100644 index 000000000..51f2512b8 --- /dev/null +++ b/hw/pcmcia/meson.build @@ -0,0 +1,2 @@ +softmmu_ss.add(when: 'CONFIG_PCMCIA', if_true: files('pcmcia.c')) +softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx.c')) diff --git a/hw/pcmcia/pcmcia.c b/hw/pcmcia/pcmcia.c new file mode 100644 index 000000000..03d13e7d6 --- /dev/null +++ b/hw/pcmcia/pcmcia.c @@ -0,0 +1,24 @@ +/* + * PCMCIA emulation + * + * Copyright 2013 SUSE LINUX Products GmbH + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "hw/pcmcia.h" + +static const TypeInfo pcmcia_card_type_info = { + .name = TYPE_PCMCIA_CARD, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PCMCIACardState), + .abstract = true, + .class_size = sizeof(PCMCIACardClass), +}; + +static void pcmcia_register_types(void) +{ + type_register_static(&pcmcia_card_type_info); +} + +type_init(pcmcia_register_types) diff --git a/hw/pcmcia/pxa2xx.c b/hw/pcmcia/pxa2xx.c new file mode 100644 index 000000000..fcca7e571 --- /dev/null +++ b/hw/pcmcia/pxa2xx.c @@ -0,0 +1,263 @@ +/* + * Intel XScale PXA255/270 PC Card and CompactFlash Interface. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This code is licensed under the GPLv2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pcmcia.h" +#include "hw/arm/pxa.h" + +struct PXA2xxPCMCIAState { + SysBusDevice parent_obj; + + PCMCIASocket slot; + MemoryRegion container_mem; + MemoryRegion common_iomem; + MemoryRegion attr_iomem; + MemoryRegion iomem; + + qemu_irq irq; + qemu_irq cd_irq; + + PCMCIACardState *card; +}; + +static uint64_t pxa2xx_pcmcia_common_read(void *opaque, + hwaddr offset, unsigned size) +{ + PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; + PCMCIACardClass *pcc; + + if (s->slot.attached) { + pcc = PCMCIA_CARD_GET_CLASS(s->card); + return pcc->common_read(s->card, offset); + } + + return 0; +} + +static void pxa2xx_pcmcia_common_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; + PCMCIACardClass *pcc; + + if (s->slot.attached) { + pcc = PCMCIA_CARD_GET_CLASS(s->card); + pcc->common_write(s->card, offset, value); + } +} + +static uint64_t pxa2xx_pcmcia_attr_read(void *opaque, + hwaddr offset, unsigned size) +{ + PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; + PCMCIACardClass *pcc; + + if (s->slot.attached) { + pcc = PCMCIA_CARD_GET_CLASS(s->card); + return pcc->attr_read(s->card, offset); + } + + return 0; +} + +static void pxa2xx_pcmcia_attr_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; + PCMCIACardClass *pcc; + + if (s->slot.attached) { + pcc = PCMCIA_CARD_GET_CLASS(s->card); + pcc->attr_write(s->card, offset, value); + } +} + +static uint64_t pxa2xx_pcmcia_io_read(void *opaque, + hwaddr offset, unsigned size) +{ + PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; + PCMCIACardClass *pcc; + + if (s->slot.attached) { + pcc = PCMCIA_CARD_GET_CLASS(s->card); + return pcc->io_read(s->card, offset); + } + + return 0; +} + +static void pxa2xx_pcmcia_io_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; + PCMCIACardClass *pcc; + + if (s->slot.attached) { + pcc = PCMCIA_CARD_GET_CLASS(s->card); + pcc->io_write(s->card, offset, value); + } +} + +static const MemoryRegionOps pxa2xx_pcmcia_common_ops = { + .read = pxa2xx_pcmcia_common_read, + .write = pxa2xx_pcmcia_common_write, + .endianness = DEVICE_NATIVE_ENDIAN +}; + +static const MemoryRegionOps pxa2xx_pcmcia_attr_ops = { + .read = pxa2xx_pcmcia_attr_read, + .write = pxa2xx_pcmcia_attr_write, + .endianness = DEVICE_NATIVE_ENDIAN +}; + +static const MemoryRegionOps pxa2xx_pcmcia_io_ops = { + .read = pxa2xx_pcmcia_io_read, + .write = pxa2xx_pcmcia_io_write, + .endianness = DEVICE_NATIVE_ENDIAN +}; + +static void pxa2xx_pcmcia_set_irq(void *opaque, int line, int level) +{ + PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; + if (!s->irq) + return; + + qemu_set_irq(s->irq, level); +} + +PXA2xxPCMCIAState *pxa2xx_pcmcia_init(MemoryRegion *sysmem, + hwaddr base) +{ + DeviceState *dev; + PXA2xxPCMCIAState *s; + + dev = qdev_new(TYPE_PXA2XX_PCMCIA); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); + s = PXA2XX_PCMCIA(dev); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + return s; +} + +static void pxa2xx_pcmcia_initfn(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + PXA2xxPCMCIAState *s = PXA2XX_PCMCIA(obj); + + memory_region_init(&s->container_mem, obj, "container", 0x10000000); + sysbus_init_mmio(sbd, &s->container_mem); + + /* Socket I/O Memory Space */ + memory_region_init_io(&s->iomem, obj, &pxa2xx_pcmcia_io_ops, s, + "pxa2xx-pcmcia-io", 0x04000000); + memory_region_add_subregion(&s->container_mem, 0x00000000, + &s->iomem); + + /* Then next 64 MB is reserved */ + + /* Socket Attribute Memory Space */ + memory_region_init_io(&s->attr_iomem, obj, &pxa2xx_pcmcia_attr_ops, s, + "pxa2xx-pcmcia-attribute", 0x04000000); + memory_region_add_subregion(&s->container_mem, 0x08000000, + &s->attr_iomem); + + /* Socket Common Memory Space */ + memory_region_init_io(&s->common_iomem, obj, &pxa2xx_pcmcia_common_ops, s, + "pxa2xx-pcmcia-common", 0x04000000); + memory_region_add_subregion(&s->container_mem, 0x0c000000, + &s->common_iomem); + + s->slot.irq = qemu_allocate_irq(pxa2xx_pcmcia_set_irq, s, 0); + + object_property_add_link(obj, "card", TYPE_PCMCIA_CARD, + (Object **)&s->card, + NULL, /* read-only property */ + 0); +} + +/* Insert a new card into a slot */ +int pxa2xx_pcmcia_attach(void *opaque, PCMCIACardState *card) +{ + PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; + PCMCIACardClass *pcc; + + if (s->slot.attached) { + return -EEXIST; + } + + if (s->cd_irq) { + qemu_irq_raise(s->cd_irq); + } + + s->card = card; + pcc = PCMCIA_CARD_GET_CLASS(s->card); + + s->slot.attached = true; + s->card->slot = &s->slot; + pcc->attach(s->card); + + return 0; +} + +/* Eject card from the slot */ +int pxa2xx_pcmcia_detach(void *opaque) +{ + PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; + PCMCIACardClass *pcc; + + if (!s->slot.attached) { + return -ENOENT; + } + + pcc = PCMCIA_CARD_GET_CLASS(s->card); + pcc->detach(s->card); + s->card->slot = NULL; + s->card = NULL; + + s->slot.attached = false; + + if (s->irq) { + qemu_irq_lower(s->irq); + } + if (s->cd_irq) { + qemu_irq_lower(s->cd_irq); + } + + return 0; +} + +/* Who to notify on card events */ +void pxa2xx_pcmcia_set_irq_cb(void *opaque, qemu_irq irq, qemu_irq cd_irq) +{ + PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque; + s->irq = irq; + s->cd_irq = cd_irq; +} + +static const TypeInfo pxa2xx_pcmcia_type_info = { + .name = TYPE_PXA2XX_PCMCIA, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxPCMCIAState), + .instance_init = pxa2xx_pcmcia_initfn, +}; + +static void pxa2xx_pcmcia_register_types(void) +{ + type_register_static(&pxa2xx_pcmcia_type_info); +} + +type_init(pxa2xx_pcmcia_register_types) diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig new file mode 100644 index 000000000..400511c6b --- /dev/null +++ b/hw/ppc/Kconfig @@ -0,0 +1,153 @@ +config PSERIES + bool + imply PCI_DEVICES + imply TEST_DEVICES + imply VIRTIO_VGA + imply NVDIMM + select DIMM + select PCI + select SPAPR_VSCSI + select VFIO if LINUX # needed by spapr_pci_vfio.c + select XICS + select XIVE + select MSI_NONBROKEN + select FDT_PPC + select CHRP_NVRAM + select VOF + +config SPAPR_RNG + bool + default y + depends on PSERIES + +config POWERNV + bool + imply PCI_DEVICES + imply TEST_DEVICES + select ISA_IPMI_BT + select IPMI_LOCAL + select ISA_BUS + select MC146818RTC + select XICS + select XIVE + select FDT_PPC + select PCI_POWERNV + +config PPC405 + bool + select M48T59 + select PFLASH_CFI02 + select PPC4XX + select SERIAL + +config PPC440 + bool + imply PCI_DEVICES + imply TEST_DEVICES + imply E1000_PCI + select PCI_EXPRESS + select PPC4XX + select SERIAL + select FDT_PPC + +config PPC4XX + bool + select BITBANG_I2C + select PCI + select PPC_UIC + +config SAM460EX + bool + select PPC405 + select PFLASH_CFI01 + select IDE_SII3112 + select M41T80 + select PPC440 + select SERIAL + select SM501 + select SMBUS_EEPROM + select USB_EHCI_SYSBUS + select USB_OHCI + select FDT_PPC + +config PEGASOS2 + bool + select MV64361 + select VT82C686 + select IDE_VIA + select SMBUS_EEPROM + select VOF +# This should come with VT82C686 + select ACPI_X86 + imply ATI_VGA + +config PREP + bool + imply PCI_DEVICES + imply TEST_DEVICES + select CS4231A + select RAVEN_PCI + select I82378 + select LSI_SCSI_PCI + select M48T59 + select PC87312 + select RS6000_MC + select FW_CFG_PPC + +config RS6000_MC + bool + +config MAC_OLDWORLD + bool + imply PCI_DEVICES + imply SUNGEM + imply TEST_DEVICES + select ADB + select GRACKLE_PCI + select HEATHROW_PIC + select MACIO + select FW_CFG_PPC + +config MAC_NEWWORLD + bool + imply PCI_DEVICES + imply SUNGEM + imply TEST_DEVICES + select ADB + select MACIO + select MACIO_GPIO + select MAC_PMU + select UNIN_PCI + select FW_CFG_PPC + +config E500 + bool + imply AT24C + imply VIRTIO_PCI + select ETSEC + select OPENPIC + select PLATFORM_BUS + select PPCE500_PCI + select SERIAL + select MPC_I2C + select FDT_PPC + select DS1338 + +config VIRTEX + bool + select PPC4XX + select PFLASH_CFI01 + select SERIAL + select XILINX + select XILINX_ETHLITE + select FDT_PPC + +# Only used by 64-bit targets +config FW_CFG_PPC + bool + +config FDT_PPC + bool + +config VOF + bool diff --git a/hw/ppc/e500-ccsr.h b/hw/ppc/e500-ccsr.h new file mode 100644 index 000000000..249c17be3 --- /dev/null +++ b/hw/ppc/e500-ccsr.h @@ -0,0 +1,18 @@ +#ifndef E500_CCSR_H +#define E500_CCSR_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +struct PPCE500CCSRState { + /*< private >*/ + SysBusDevice parent; + /*< public >*/ + + MemoryRegion ccsr_space; +}; + +#define TYPE_CCSR "e500-ccsr" +OBJECT_DECLARE_SIMPLE_TYPE(PPCE500CCSRState, CCSR) + +#endif /* E500_CCSR_H */ diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c new file mode 100644 index 000000000..960e7efcd --- /dev/null +++ b/hw/ppc/e500.c @@ -0,0 +1,1174 @@ +/* + * QEMU PowerPC e500-based platforms + * + * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Yu Liu, + * + * This file is derived from hw/ppc440_bamboo.c, + * the copyright for that material belongs to the original owners. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "e500.h" +#include "e500-ccsr.h" +#include "net/net.h" +#include "qemu/config-file.h" +#include "hw/char/serial.h" +#include "hw/pci/pci.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "kvm_ppc.h" +#include "sysemu/device_tree.h" +#include "hw/ppc/openpic.h" +#include "hw/ppc/openpic_kvm.h" +#include "hw/ppc/ppc.h" +#include "hw/qdev-properties.h" +#include "hw/loader.h" +#include "elf.h" +#include "hw/sysbus.h" +#include "qemu/host-utils.h" +#include "qemu/option.h" +#include "hw/pci-host/ppce500.h" +#include "qemu/error-report.h" +#include "hw/platform-bus.h" +#include "hw/net/fsl_etsec/etsec.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" + +#define EPAPR_MAGIC (0x45504150) +#define BINARY_DEVICE_TREE_FILE "mpc8544ds.dtb" +#define DTC_LOAD_PAD 0x1800000 +#define DTC_PAD_MASK 0xFFFFF +#define DTB_MAX_SIZE (8 * MiB) +#define INITRD_LOAD_PAD 0x2000000 +#define INITRD_PAD_MASK 0xFFFFFF + +#define RAM_SIZES_ALIGN (64 * MiB) + +/* TODO: parameterize */ +#define MPC8544_CCSRBAR_SIZE 0x00100000ULL +#define MPC8544_MPIC_REGS_OFFSET 0x40000ULL +#define MPC8544_MSI_REGS_OFFSET 0x41600ULL +#define MPC8544_SERIAL0_REGS_OFFSET 0x4500ULL +#define MPC8544_SERIAL1_REGS_OFFSET 0x4600ULL +#define MPC8544_PCI_REGS_OFFSET 0x8000ULL +#define MPC8544_PCI_REGS_SIZE 0x1000ULL +#define MPC8544_UTIL_OFFSET 0xe0000ULL +#define MPC8XXX_GPIO_OFFSET 0x000FF000ULL +#define MPC8544_I2C_REGS_OFFSET 0x3000ULL +#define MPC8XXX_GPIO_IRQ 47 +#define MPC8544_I2C_IRQ 43 +#define RTC_REGS_OFFSET 0x68 + +#define PLATFORM_CLK_FREQ_HZ (400 * 1000 * 1000) + +struct boot_info +{ + uint32_t dt_base; + uint32_t dt_size; + uint32_t entry; +}; + +static uint32_t *pci_map_create(void *fdt, uint32_t mpic, int first_slot, + int nr_slots, int *len) +{ + int i = 0; + int slot; + int pci_irq; + int host_irq; + int last_slot = first_slot + nr_slots; + uint32_t *pci_map; + + *len = nr_slots * 4 * 7 * sizeof(uint32_t); + pci_map = g_malloc(*len); + + for (slot = first_slot; slot < last_slot; slot++) { + for (pci_irq = 0; pci_irq < 4; pci_irq++) { + pci_map[i++] = cpu_to_be32(slot << 11); + pci_map[i++] = cpu_to_be32(0x0); + pci_map[i++] = cpu_to_be32(0x0); + pci_map[i++] = cpu_to_be32(pci_irq + 1); + pci_map[i++] = cpu_to_be32(mpic); + host_irq = ppce500_pci_map_irq_slot(slot, pci_irq); + pci_map[i++] = cpu_to_be32(host_irq + 1); + pci_map[i++] = cpu_to_be32(0x1); + } + } + + assert((i * sizeof(uint32_t)) == *len); + + return pci_map; +} + +static void dt_serial_create(void *fdt, unsigned long long offset, + const char *soc, const char *mpic, + const char *alias, int idx, bool defcon) +{ + char *ser; + + ser = g_strdup_printf("%s/serial@%llx", soc, offset); + qemu_fdt_add_subnode(fdt, ser); + qemu_fdt_setprop_string(fdt, ser, "device_type", "serial"); + qemu_fdt_setprop_string(fdt, ser, "compatible", "ns16550"); + qemu_fdt_setprop_cells(fdt, ser, "reg", offset, 0x100); + qemu_fdt_setprop_cell(fdt, ser, "cell-index", idx); + qemu_fdt_setprop_cell(fdt, ser, "clock-frequency", PLATFORM_CLK_FREQ_HZ); + qemu_fdt_setprop_cells(fdt, ser, "interrupts", 42, 2); + qemu_fdt_setprop_phandle(fdt, ser, "interrupt-parent", mpic); + qemu_fdt_setprop_string(fdt, "/aliases", alias, ser); + + if (defcon) { + /* + * "linux,stdout-path" and "stdout" properties are deprecated by linux + * kernel. New platforms should only use the "stdout-path" property. Set + * the new property and continue using older property to remain + * compatible with the existing firmware. + */ + qemu_fdt_setprop_string(fdt, "/chosen", "linux,stdout-path", ser); + qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", ser); + } + g_free(ser); +} + +static void create_dt_mpc8xxx_gpio(void *fdt, const char *soc, const char *mpic) +{ + hwaddr mmio0 = MPC8XXX_GPIO_OFFSET; + int irq0 = MPC8XXX_GPIO_IRQ; + gchar *node = g_strdup_printf("%s/gpio@%"PRIx64, soc, mmio0); + gchar *poweroff = g_strdup_printf("%s/power-off", soc); + int gpio_ph; + + qemu_fdt_add_subnode(fdt, node); + qemu_fdt_setprop_string(fdt, node, "compatible", "fsl,qoriq-gpio"); + qemu_fdt_setprop_cells(fdt, node, "reg", mmio0, 0x1000); + qemu_fdt_setprop_cells(fdt, node, "interrupts", irq0, 0x2); + qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", mpic); + qemu_fdt_setprop_cells(fdt, node, "#gpio-cells", 2); + qemu_fdt_setprop(fdt, node, "gpio-controller", NULL, 0); + gpio_ph = qemu_fdt_alloc_phandle(fdt); + qemu_fdt_setprop_cell(fdt, node, "phandle", gpio_ph); + qemu_fdt_setprop_cell(fdt, node, "linux,phandle", gpio_ph); + + /* Power Off Pin */ + qemu_fdt_add_subnode(fdt, poweroff); + qemu_fdt_setprop_string(fdt, poweroff, "compatible", "gpio-poweroff"); + qemu_fdt_setprop_cells(fdt, poweroff, "gpios", gpio_ph, 0, 0); + + g_free(node); + g_free(poweroff); +} + +static void dt_rtc_create(void *fdt, const char *i2c, const char *alias) +{ + int offset = RTC_REGS_OFFSET; + + gchar *rtc = g_strdup_printf("%s/rtc@%"PRIx32, i2c, offset); + qemu_fdt_add_subnode(fdt, rtc); + qemu_fdt_setprop_string(fdt, rtc, "compatible", "pericom,pt7c4338"); + qemu_fdt_setprop_cells(fdt, rtc, "reg", offset); + qemu_fdt_setprop_string(fdt, "/aliases", alias, rtc); + + g_free(rtc); +} + +static void dt_i2c_create(void *fdt, const char *soc, const char *mpic, + const char *alias) +{ + hwaddr mmio0 = MPC8544_I2C_REGS_OFFSET; + int irq0 = MPC8544_I2C_IRQ; + + gchar *i2c = g_strdup_printf("%s/i2c@%"PRIx64, soc, mmio0); + qemu_fdt_add_subnode(fdt, i2c); + qemu_fdt_setprop_string(fdt, i2c, "device_type", "i2c"); + qemu_fdt_setprop_string(fdt, i2c, "compatible", "fsl-i2c"); + qemu_fdt_setprop_cells(fdt, i2c, "reg", mmio0, 0x14); + qemu_fdt_setprop_cells(fdt, i2c, "cell-index", 0); + qemu_fdt_setprop_cells(fdt, i2c, "interrupts", irq0, 0x2); + qemu_fdt_setprop_phandle(fdt, i2c, "interrupt-parent", mpic); + qemu_fdt_setprop_string(fdt, "/aliases", alias, i2c); + + g_free(i2c); +} + + +typedef struct PlatformDevtreeData { + void *fdt; + const char *mpic; + int irq_start; + const char *node; + PlatformBusDevice *pbus; +} PlatformDevtreeData; + +static int create_devtree_etsec(SysBusDevice *sbdev, PlatformDevtreeData *data) +{ + eTSEC *etsec = ETSEC_COMMON(sbdev); + PlatformBusDevice *pbus = data->pbus; + hwaddr mmio0 = platform_bus_get_mmio_addr(pbus, sbdev, 0); + int irq0 = platform_bus_get_irqn(pbus, sbdev, 0); + int irq1 = platform_bus_get_irqn(pbus, sbdev, 1); + int irq2 = platform_bus_get_irqn(pbus, sbdev, 2); + gchar *node = g_strdup_printf("/platform/ethernet@%"PRIx64, mmio0); + gchar *group = g_strdup_printf("%s/queue-group", node); + void *fdt = data->fdt; + + assert((int64_t)mmio0 >= 0); + assert(irq0 >= 0); + assert(irq1 >= 0); + assert(irq2 >= 0); + + qemu_fdt_add_subnode(fdt, node); + qemu_fdt_setprop(fdt, node, "ranges", NULL, 0); + qemu_fdt_setprop_string(fdt, node, "device_type", "network"); + qemu_fdt_setprop_string(fdt, node, "compatible", "fsl,etsec2"); + qemu_fdt_setprop_string(fdt, node, "model", "eTSEC"); + qemu_fdt_setprop(fdt, node, "local-mac-address", etsec->conf.macaddr.a, 6); + qemu_fdt_setprop_cells(fdt, node, "fixed-link", 0, 1, 1000, 0, 0); + qemu_fdt_setprop_cells(fdt, node, "#size-cells", 1); + qemu_fdt_setprop_cells(fdt, node, "#address-cells", 1); + + qemu_fdt_add_subnode(fdt, group); + qemu_fdt_setprop_cells(fdt, group, "reg", mmio0, 0x1000); + qemu_fdt_setprop_cells(fdt, group, "interrupts", + data->irq_start + irq0, 0x2, + data->irq_start + irq1, 0x2, + data->irq_start + irq2, 0x2); + + g_free(node); + g_free(group); + + return 0; +} + +static void sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque) +{ + PlatformDevtreeData *data = opaque; + bool matched = false; + + if (object_dynamic_cast(OBJECT(sbdev), TYPE_ETSEC_COMMON)) { + create_devtree_etsec(sbdev, data); + matched = true; + } + + if (!matched) { + error_report("Device %s is not supported by this machine yet.", + qdev_fw_name(DEVICE(sbdev))); + exit(1); + } +} + +static void platform_bus_create_devtree(PPCE500MachineState *pms, + void *fdt, const char *mpic) +{ + const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(pms); + gchar *node = g_strdup_printf("/platform@%"PRIx64, pmc->platform_bus_base); + const char platcomp[] = "qemu,platform\0simple-bus"; + uint64_t addr = pmc->platform_bus_base; + uint64_t size = pmc->platform_bus_size; + int irq_start = pmc->platform_bus_first_irq; + + /* Create a /platform node that we can put all devices into */ + + qemu_fdt_add_subnode(fdt, node); + qemu_fdt_setprop(fdt, node, "compatible", platcomp, sizeof(platcomp)); + + /* Our platform bus region is less than 32bit big, so 1 cell is enough for + address and size */ + qemu_fdt_setprop_cells(fdt, node, "#size-cells", 1); + qemu_fdt_setprop_cells(fdt, node, "#address-cells", 1); + qemu_fdt_setprop_cells(fdt, node, "ranges", 0, addr >> 32, addr, size); + + qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", mpic); + + /* Create dt nodes for dynamic devices */ + PlatformDevtreeData data = { + .fdt = fdt, + .mpic = mpic, + .irq_start = irq_start, + .node = node, + .pbus = pms->pbus_dev, + }; + + /* Loop through all dynamic sysbus devices and create nodes for them */ + foreach_dynamic_sysbus_device(sysbus_device_create_devtree, &data); + + g_free(node); +} + +static int ppce500_load_device_tree(PPCE500MachineState *pms, + hwaddr addr, + hwaddr initrd_base, + hwaddr initrd_size, + hwaddr kernel_base, + hwaddr kernel_size, + bool dry_run) +{ + MachineState *machine = MACHINE(pms); + unsigned int smp_cpus = machine->smp.cpus; + const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(pms); + CPUPPCState *env = first_cpu->env_ptr; + int ret = -1; + uint64_t mem_reg_property[] = { 0, cpu_to_be64(machine->ram_size) }; + int fdt_size; + void *fdt; + uint8_t hypercall[16]; + uint32_t clock_freq = PLATFORM_CLK_FREQ_HZ; + uint32_t tb_freq = PLATFORM_CLK_FREQ_HZ; + int i; + char compatible_sb[] = "fsl,mpc8544-immr\0simple-bus"; + char *soc; + char *mpic; + uint32_t mpic_ph; + uint32_t msi_ph; + char *gutil; + char *pci; + char *msi; + uint32_t *pci_map = NULL; + int len; + uint32_t pci_ranges[14] = + { + 0x2000000, 0x0, pmc->pci_mmio_bus_base, + pmc->pci_mmio_base >> 32, pmc->pci_mmio_base, + 0x0, 0x20000000, + + 0x1000000, 0x0, 0x0, + pmc->pci_pio_base >> 32, pmc->pci_pio_base, + 0x0, 0x10000, + }; + const char *dtb_file = machine->dtb; + const char *toplevel_compat = machine->dt_compatible; + + if (dtb_file) { + char *filename; + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_file); + if (!filename) { + goto out; + } + + fdt = load_device_tree(filename, &fdt_size); + g_free(filename); + if (!fdt) { + goto out; + } + goto done; + } + + fdt = create_device_tree(&fdt_size); + if (fdt == NULL) { + goto out; + } + + /* Manipulate device tree in memory. */ + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 2); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 2); + + qemu_fdt_add_subnode(fdt, "/memory"); + qemu_fdt_setprop_string(fdt, "/memory", "device_type", "memory"); + qemu_fdt_setprop(fdt, "/memory", "reg", mem_reg_property, + sizeof(mem_reg_property)); + + qemu_fdt_add_subnode(fdt, "/chosen"); + if (initrd_size) { + ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", + initrd_base); + if (ret < 0) { + fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); + } + + ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", + (initrd_base + initrd_size)); + if (ret < 0) { + fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); + } + + } + + if (kernel_base != -1ULL) { + qemu_fdt_setprop_cells(fdt, "/chosen", "qemu,boot-kernel", + kernel_base >> 32, kernel_base, + kernel_size >> 32, kernel_size); + } + + ret = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", + machine->kernel_cmdline); + if (ret < 0) + fprintf(stderr, "couldn't set /chosen/bootargs\n"); + + if (kvm_enabled()) { + /* Read out host's frequencies */ + clock_freq = kvmppc_get_clockfreq(); + tb_freq = kvmppc_get_tbfreq(); + + /* indicate KVM hypercall interface */ + qemu_fdt_add_subnode(fdt, "/hypervisor"); + qemu_fdt_setprop_string(fdt, "/hypervisor", "compatible", + "linux,kvm"); + kvmppc_get_hypercall(env, hypercall, sizeof(hypercall)); + qemu_fdt_setprop(fdt, "/hypervisor", "hcall-instructions", + hypercall, sizeof(hypercall)); + /* if KVM supports the idle hcall, set property indicating this */ + if (kvmppc_get_hasidle(env)) { + qemu_fdt_setprop(fdt, "/hypervisor", "has-idle", NULL, 0); + } + } + + /* Create CPU nodes */ + qemu_fdt_add_subnode(fdt, "/cpus"); + qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 1); + qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0); + + /* We need to generate the cpu nodes in reverse order, so Linux can pick + the first node as boot node and be happy */ + for (i = smp_cpus - 1; i >= 0; i--) { + CPUState *cpu; + char *cpu_name; + uint64_t cpu_release_addr = pmc->spin_base + (i * 0x20); + + cpu = qemu_get_cpu(i); + if (cpu == NULL) { + continue; + } + env = cpu->env_ptr; + + cpu_name = g_strdup_printf("/cpus/PowerPC,8544@%x", i); + qemu_fdt_add_subnode(fdt, cpu_name); + qemu_fdt_setprop_cell(fdt, cpu_name, "clock-frequency", clock_freq); + qemu_fdt_setprop_cell(fdt, cpu_name, "timebase-frequency", tb_freq); + qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu"); + qemu_fdt_setprop_cell(fdt, cpu_name, "reg", i); + qemu_fdt_setprop_cell(fdt, cpu_name, "d-cache-line-size", + env->dcache_line_size); + qemu_fdt_setprop_cell(fdt, cpu_name, "i-cache-line-size", + env->icache_line_size); + qemu_fdt_setprop_cell(fdt, cpu_name, "d-cache-size", 0x8000); + qemu_fdt_setprop_cell(fdt, cpu_name, "i-cache-size", 0x8000); + qemu_fdt_setprop_cell(fdt, cpu_name, "bus-frequency", 0); + if (cpu->cpu_index) { + qemu_fdt_setprop_string(fdt, cpu_name, "status", "disabled"); + qemu_fdt_setprop_string(fdt, cpu_name, "enable-method", + "spin-table"); + qemu_fdt_setprop_u64(fdt, cpu_name, "cpu-release-addr", + cpu_release_addr); + } else { + qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay"); + } + g_free(cpu_name); + } + + qemu_fdt_add_subnode(fdt, "/aliases"); + /* XXX These should go into their respective devices' code */ + soc = g_strdup_printf("/soc@%"PRIx64, pmc->ccsrbar_base); + qemu_fdt_add_subnode(fdt, soc); + qemu_fdt_setprop_string(fdt, soc, "device_type", "soc"); + qemu_fdt_setprop(fdt, soc, "compatible", compatible_sb, + sizeof(compatible_sb)); + qemu_fdt_setprop_cell(fdt, soc, "#address-cells", 1); + qemu_fdt_setprop_cell(fdt, soc, "#size-cells", 1); + qemu_fdt_setprop_cells(fdt, soc, "ranges", 0x0, + pmc->ccsrbar_base >> 32, pmc->ccsrbar_base, + MPC8544_CCSRBAR_SIZE); + /* XXX should contain a reasonable value */ + qemu_fdt_setprop_cell(fdt, soc, "bus-frequency", 0); + + mpic = g_strdup_printf("%s/pic@%llx", soc, MPC8544_MPIC_REGS_OFFSET); + qemu_fdt_add_subnode(fdt, mpic); + qemu_fdt_setprop_string(fdt, mpic, "device_type", "open-pic"); + qemu_fdt_setprop_string(fdt, mpic, "compatible", "fsl,mpic"); + qemu_fdt_setprop_cells(fdt, mpic, "reg", MPC8544_MPIC_REGS_OFFSET, + 0x40000); + qemu_fdt_setprop_cell(fdt, mpic, "#address-cells", 0); + qemu_fdt_setprop_cell(fdt, mpic, "#interrupt-cells", 2); + mpic_ph = qemu_fdt_alloc_phandle(fdt); + qemu_fdt_setprop_cell(fdt, mpic, "phandle", mpic_ph); + qemu_fdt_setprop_cell(fdt, mpic, "linux,phandle", mpic_ph); + qemu_fdt_setprop(fdt, mpic, "interrupt-controller", NULL, 0); + + /* + * We have to generate ser1 first, because Linux takes the first + * device it finds in the dt as serial output device. And we generate + * devices in reverse order to the dt. + */ + if (serial_hd(1)) { + dt_serial_create(fdt, MPC8544_SERIAL1_REGS_OFFSET, + soc, mpic, "serial1", 1, false); + } + + if (serial_hd(0)) { + dt_serial_create(fdt, MPC8544_SERIAL0_REGS_OFFSET, + soc, mpic, "serial0", 0, true); + } + + /* i2c */ + dt_i2c_create(fdt, soc, mpic, "i2c"); + + dt_rtc_create(fdt, "i2c", "rtc"); + + + gutil = g_strdup_printf("%s/global-utilities@%llx", soc, + MPC8544_UTIL_OFFSET); + qemu_fdt_add_subnode(fdt, gutil); + qemu_fdt_setprop_string(fdt, gutil, "compatible", "fsl,mpc8544-guts"); + qemu_fdt_setprop_cells(fdt, gutil, "reg", MPC8544_UTIL_OFFSET, 0x1000); + qemu_fdt_setprop(fdt, gutil, "fsl,has-rstcr", NULL, 0); + g_free(gutil); + + msi = g_strdup_printf("/%s/msi@%llx", soc, MPC8544_MSI_REGS_OFFSET); + qemu_fdt_add_subnode(fdt, msi); + qemu_fdt_setprop_string(fdt, msi, "compatible", "fsl,mpic-msi"); + qemu_fdt_setprop_cells(fdt, msi, "reg", MPC8544_MSI_REGS_OFFSET, 0x200); + msi_ph = qemu_fdt_alloc_phandle(fdt); + qemu_fdt_setprop_cells(fdt, msi, "msi-available-ranges", 0x0, 0x100); + qemu_fdt_setprop_phandle(fdt, msi, "interrupt-parent", mpic); + qemu_fdt_setprop_cells(fdt, msi, "interrupts", + 0xe0, 0x0, + 0xe1, 0x0, + 0xe2, 0x0, + 0xe3, 0x0, + 0xe4, 0x0, + 0xe5, 0x0, + 0xe6, 0x0, + 0xe7, 0x0); + qemu_fdt_setprop_cell(fdt, msi, "phandle", msi_ph); + qemu_fdt_setprop_cell(fdt, msi, "linux,phandle", msi_ph); + g_free(msi); + + pci = g_strdup_printf("/pci@%llx", + pmc->ccsrbar_base + MPC8544_PCI_REGS_OFFSET); + qemu_fdt_add_subnode(fdt, pci); + qemu_fdt_setprop_cell(fdt, pci, "cell-index", 0); + qemu_fdt_setprop_string(fdt, pci, "compatible", "fsl,mpc8540-pci"); + qemu_fdt_setprop_string(fdt, pci, "device_type", "pci"); + qemu_fdt_setprop_cells(fdt, pci, "interrupt-map-mask", 0xf800, 0x0, + 0x0, 0x7); + pci_map = pci_map_create(fdt, qemu_fdt_get_phandle(fdt, mpic), + pmc->pci_first_slot, pmc->pci_nr_slots, + &len); + qemu_fdt_setprop(fdt, pci, "interrupt-map", pci_map, len); + qemu_fdt_setprop_phandle(fdt, pci, "interrupt-parent", mpic); + qemu_fdt_setprop_cells(fdt, pci, "interrupts", 24, 2); + qemu_fdt_setprop_cells(fdt, pci, "bus-range", 0, 255); + for (i = 0; i < 14; i++) { + pci_ranges[i] = cpu_to_be32(pci_ranges[i]); + } + qemu_fdt_setprop_cell(fdt, pci, "fsl,msi", msi_ph); + qemu_fdt_setprop(fdt, pci, "ranges", pci_ranges, sizeof(pci_ranges)); + qemu_fdt_setprop_cells(fdt, pci, "reg", + (pmc->ccsrbar_base + MPC8544_PCI_REGS_OFFSET) >> 32, + (pmc->ccsrbar_base + MPC8544_PCI_REGS_OFFSET), + 0, 0x1000); + qemu_fdt_setprop_cell(fdt, pci, "clock-frequency", 66666666); + qemu_fdt_setprop_cell(fdt, pci, "#interrupt-cells", 1); + qemu_fdt_setprop_cell(fdt, pci, "#size-cells", 2); + qemu_fdt_setprop_cell(fdt, pci, "#address-cells", 3); + qemu_fdt_setprop_string(fdt, "/aliases", "pci0", pci); + g_free(pci); + + if (pmc->has_mpc8xxx_gpio) { + create_dt_mpc8xxx_gpio(fdt, soc, mpic); + } + g_free(soc); + + if (pms->pbus_dev) { + platform_bus_create_devtree(pms, fdt, mpic); + } + g_free(mpic); + + pmc->fixup_devtree(fdt); + + if (toplevel_compat) { + qemu_fdt_setprop(fdt, "/", "compatible", toplevel_compat, + strlen(toplevel_compat) + 1); + } + +done: + if (!dry_run) { + qemu_fdt_dumpdtb(fdt, fdt_size); + cpu_physical_memory_write(addr, fdt, fdt_size); + } + ret = fdt_size; + g_free(fdt); + +out: + g_free(pci_map); + + return ret; +} + +typedef struct DeviceTreeParams { + PPCE500MachineState *machine; + hwaddr addr; + hwaddr initrd_base; + hwaddr initrd_size; + hwaddr kernel_base; + hwaddr kernel_size; + Notifier notifier; +} DeviceTreeParams; + +static void ppce500_reset_device_tree(void *opaque) +{ + DeviceTreeParams *p = opaque; + ppce500_load_device_tree(p->machine, p->addr, p->initrd_base, + p->initrd_size, p->kernel_base, p->kernel_size, + false); +} + +static void ppce500_init_notify(Notifier *notifier, void *data) +{ + DeviceTreeParams *p = container_of(notifier, DeviceTreeParams, notifier); + ppce500_reset_device_tree(p); +} + +static int ppce500_prep_device_tree(PPCE500MachineState *machine, + hwaddr addr, + hwaddr initrd_base, + hwaddr initrd_size, + hwaddr kernel_base, + hwaddr kernel_size) +{ + DeviceTreeParams *p = g_new(DeviceTreeParams, 1); + p->machine = machine; + p->addr = addr; + p->initrd_base = initrd_base; + p->initrd_size = initrd_size; + p->kernel_base = kernel_base; + p->kernel_size = kernel_size; + + qemu_register_reset(ppce500_reset_device_tree, p); + p->notifier.notify = ppce500_init_notify; + qemu_add_machine_init_done_notifier(&p->notifier); + + /* Issue the device tree loader once, so that we get the size of the blob */ + return ppce500_load_device_tree(machine, addr, initrd_base, initrd_size, + kernel_base, kernel_size, true); +} + +/* Create -kernel TLB entries for BookE. */ +hwaddr booke206_page_size_to_tlb(uint64_t size) +{ + return 63 - clz64(size / KiB); +} + +static int booke206_initial_map_tsize(CPUPPCState *env) +{ + struct boot_info *bi = env->load_info; + hwaddr dt_end; + int ps; + + /* Our initial TLB entry needs to cover everything from 0 to + the device tree top */ + dt_end = bi->dt_base + bi->dt_size; + ps = booke206_page_size_to_tlb(dt_end) + 1; + if (ps & 1) { + /* e500v2 can only do even TLB size bits */ + ps++; + } + return ps; +} + +static uint64_t mmubooke_initial_mapsize(CPUPPCState *env) +{ + int tsize; + + tsize = booke206_initial_map_tsize(env); + return (1ULL << 10 << tsize); +} + +static void mmubooke_create_initial_mapping(CPUPPCState *env) +{ + ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0); + hwaddr size; + int ps; + + ps = booke206_initial_map_tsize(env); + size = (ps << MAS1_TSIZE_SHIFT); + tlb->mas1 = MAS1_VALID | size; + tlb->mas2 = 0; + tlb->mas7_3 = 0; + tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; + + env->tlb_dirty = true; +} + +static void ppce500_cpu_reset_sec(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + + cpu_reset(cs); + + cs->exception_index = EXCP_HLT; +} + +static void ppce500_cpu_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + struct boot_info *bi = env->load_info; + + cpu_reset(cs); + + /* Set initial guest state. */ + cs->halted = 0; + env->gpr[1] = (16 * MiB) - 8; + env->gpr[3] = bi->dt_base; + env->gpr[4] = 0; + env->gpr[5] = 0; + env->gpr[6] = EPAPR_MAGIC; + env->gpr[7] = mmubooke_initial_mapsize(env); + env->gpr[8] = 0; + env->gpr[9] = 0; + env->nip = bi->entry; + mmubooke_create_initial_mapping(env); +} + +static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms, + IrqLines *irqs) +{ + DeviceState *dev; + SysBusDevice *s; + int i, j, k; + MachineState *machine = MACHINE(pms); + unsigned int smp_cpus = machine->smp.cpus; + const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(pms); + + dev = qdev_new(TYPE_OPENPIC); + object_property_add_child(OBJECT(machine), "pic", OBJECT(dev)); + qdev_prop_set_uint32(dev, "model", pmc->mpic_version); + qdev_prop_set_uint32(dev, "nb_cpus", smp_cpus); + + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + + k = 0; + for (i = 0; i < smp_cpus; i++) { + for (j = 0; j < OPENPIC_OUTPUT_NB; j++) { + sysbus_connect_irq(s, k++, irqs[i].irq[j]); + } + } + + return dev; +} + +static DeviceState *ppce500_init_mpic_kvm(const PPCE500MachineClass *pmc, + IrqLines *irqs, Error **errp) +{ + DeviceState *dev; + CPUState *cs; + + dev = qdev_new(TYPE_KVM_OPENPIC); + qdev_prop_set_uint32(dev, "model", pmc->mpic_version); + + if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp)) { + object_unparent(OBJECT(dev)); + return NULL; + } + + CPU_FOREACH(cs) { + if (kvm_openpic_connect_vcpu(dev, cs)) { + fprintf(stderr, "%s: failed to connect vcpu to irqchip\n", + __func__); + abort(); + } + } + + return dev; +} + +static DeviceState *ppce500_init_mpic(PPCE500MachineState *pms, + MemoryRegion *ccsr, + IrqLines *irqs) +{ + const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(pms); + DeviceState *dev = NULL; + SysBusDevice *s; + + if (kvm_enabled()) { + Error *err = NULL; + + if (kvm_kernel_irqchip_allowed()) { + dev = ppce500_init_mpic_kvm(pmc, irqs, &err); + } + if (kvm_kernel_irqchip_required() && !dev) { + error_reportf_err(err, + "kernel_irqchip requested but unavailable: "); + exit(1); + } + } + + if (!dev) { + dev = ppce500_init_mpic_qemu(pms, irqs); + } + + s = SYS_BUS_DEVICE(dev); + memory_region_add_subregion(ccsr, MPC8544_MPIC_REGS_OFFSET, + s->mmio[0].memory); + + return dev; +} + +static void ppce500_power_off(void *opaque, int line, int on) +{ + if (on) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } +} + +void ppce500_init(MachineState *machine) +{ + MemoryRegion *address_space_mem = get_system_memory(); + PPCE500MachineState *pms = PPCE500_MACHINE(machine); + const PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(machine); + PCIBus *pci_bus; + CPUPPCState *env = NULL; + uint64_t loadaddr; + hwaddr kernel_base = -1LL; + int kernel_size = 0; + hwaddr dt_base = 0; + hwaddr initrd_base = 0; + int initrd_size = 0; + hwaddr cur_base = 0; + char *filename; + const char *payload_name; + bool kernel_as_payload; + hwaddr bios_entry = 0; + target_long payload_size; + struct boot_info *boot_info; + int dt_size; + int i; + unsigned int smp_cpus = machine->smp.cpus; + /* irq num for pin INTA, INTB, INTC and INTD is 1, 2, 3 and + * 4 respectively */ + unsigned int pci_irq_nrs[PCI_NUM_PINS] = {1, 2, 3, 4}; + IrqLines *irqs; + DeviceState *dev, *mpicdev; + CPUPPCState *firstenv = NULL; + MemoryRegion *ccsr_addr_space; + SysBusDevice *s; + PPCE500CCSRState *ccsr; + I2CBus *i2c; + + irqs = g_new0(IrqLines, smp_cpus); + for (i = 0; i < smp_cpus; i++) { + PowerPCCPU *cpu; + CPUState *cs; + qemu_irq *input; + + cpu = POWERPC_CPU(object_new(machine->cpu_type)); + env = &cpu->env; + cs = CPU(cpu); + + if (env->mmu_model != POWERPC_MMU_BOOKE206) { + error_report("MMU model %i not supported by this machine", + env->mmu_model); + exit(1); + } + + /* + * Secondary CPU starts in halted state for now. Needs to change + * when implementing non-kernel boot. + */ + object_property_set_bool(OBJECT(cs), "start-powered-off", i != 0, + &error_fatal); + qdev_realize_and_unref(DEVICE(cs), NULL, &error_fatal); + + if (!firstenv) { + firstenv = env; + } + + input = (qemu_irq *)env->irq_inputs; + irqs[i].irq[OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT]; + irqs[i].irq[OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT]; + env->spr_cb[SPR_BOOKE_PIR].default_value = cs->cpu_index = i; + env->mpic_iack = pmc->ccsrbar_base + MPC8544_MPIC_REGS_OFFSET + 0xa0; + + ppc_booke_timers_init(cpu, PLATFORM_CLK_FREQ_HZ, PPC_TIMER_E500); + + /* Register reset handler */ + if (!i) { + /* Primary CPU */ + struct boot_info *boot_info; + boot_info = g_malloc0(sizeof(struct boot_info)); + qemu_register_reset(ppce500_cpu_reset, cpu); + env->load_info = boot_info; + } else { + /* Secondary CPUs */ + qemu_register_reset(ppce500_cpu_reset_sec, cpu); + } + } + + env = firstenv; + + if (!QEMU_IS_ALIGNED(machine->ram_size, RAM_SIZES_ALIGN)) { + error_report("RAM size must be multiple of %" PRIu64, RAM_SIZES_ALIGN); + exit(EXIT_FAILURE); + } + + /* Register Memory */ + memory_region_add_subregion(address_space_mem, 0, machine->ram); + + dev = qdev_new("e500-ccsr"); + object_property_add_child(qdev_get_machine(), "e500-ccsr", + OBJECT(dev)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + ccsr = CCSR(dev); + ccsr_addr_space = &ccsr->ccsr_space; + memory_region_add_subregion(address_space_mem, pmc->ccsrbar_base, + ccsr_addr_space); + + mpicdev = ppce500_init_mpic(pms, ccsr_addr_space, irqs); + g_free(irqs); + + /* Serial */ + if (serial_hd(0)) { + serial_mm_init(ccsr_addr_space, MPC8544_SERIAL0_REGS_OFFSET, + 0, qdev_get_gpio_in(mpicdev, 42), 399193, + serial_hd(0), DEVICE_BIG_ENDIAN); + } + + if (serial_hd(1)) { + serial_mm_init(ccsr_addr_space, MPC8544_SERIAL1_REGS_OFFSET, + 0, qdev_get_gpio_in(mpicdev, 42), 399193, + serial_hd(1), DEVICE_BIG_ENDIAN); + } + /* I2C */ + dev = qdev_new("mpc-i2c"); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(mpicdev, MPC8544_I2C_IRQ)); + memory_region_add_subregion(ccsr_addr_space, MPC8544_I2C_REGS_OFFSET, + sysbus_mmio_get_region(s, 0)); + i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); + i2c_slave_create_simple(i2c, "ds1338", RTC_REGS_OFFSET); + + + /* General Utility device */ + dev = qdev_new("mpc8544-guts"); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + memory_region_add_subregion(ccsr_addr_space, MPC8544_UTIL_OFFSET, + sysbus_mmio_get_region(s, 0)); + + /* PCI */ + dev = qdev_new("e500-pcihost"); + object_property_add_child(qdev_get_machine(), "pci-host", OBJECT(dev)); + qdev_prop_set_uint32(dev, "first_slot", pmc->pci_first_slot); + qdev_prop_set_uint32(dev, "first_pin_irq", pci_irq_nrs[0]); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + for (i = 0; i < PCI_NUM_PINS; i++) { + sysbus_connect_irq(s, i, qdev_get_gpio_in(mpicdev, pci_irq_nrs[i])); + } + + memory_region_add_subregion(ccsr_addr_space, MPC8544_PCI_REGS_OFFSET, + sysbus_mmio_get_region(s, 0)); + + pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci.0"); + if (!pci_bus) + printf("couldn't create PCI controller!\n"); + + if (pci_bus) { + /* Register network interfaces. */ + for (i = 0; i < nb_nics; i++) { + pci_nic_init_nofail(&nd_table[i], pci_bus, "virtio-net-pci", NULL); + } + } + + /* Register spinning region */ + sysbus_create_simple("e500-spin", pmc->spin_base, NULL); + + if (pmc->has_mpc8xxx_gpio) { + qemu_irq poweroff_irq; + + dev = qdev_new("mpc8xxx_gpio"); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(mpicdev, MPC8XXX_GPIO_IRQ)); + memory_region_add_subregion(ccsr_addr_space, MPC8XXX_GPIO_OFFSET, + sysbus_mmio_get_region(s, 0)); + + /* Power Off GPIO at Pin 0 */ + poweroff_irq = qemu_allocate_irq(ppce500_power_off, NULL, 0); + qdev_connect_gpio_out(dev, 0, poweroff_irq); + } + + /* Platform Bus Device */ + if (pmc->has_platform_bus) { + dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); + dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); + qdev_prop_set_uint32(dev, "num_irqs", pmc->platform_bus_num_irqs); + qdev_prop_set_uint32(dev, "mmio_size", pmc->platform_bus_size); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + pms->pbus_dev = PLATFORM_BUS_DEVICE(dev); + + s = SYS_BUS_DEVICE(pms->pbus_dev); + for (i = 0; i < pmc->platform_bus_num_irqs; i++) { + int irqn = pmc->platform_bus_first_irq + i; + sysbus_connect_irq(s, i, qdev_get_gpio_in(mpicdev, irqn)); + } + + memory_region_add_subregion(address_space_mem, + pmc->platform_bus_base, + sysbus_mmio_get_region(s, 0)); + } + + /* + * Smart firmware defaults ahead! + * + * We follow the following table to select which payload we execute. + * + * -kernel | -bios | payload + * ---------+-------+--------- + * N | Y | u-boot + * N | N | u-boot + * Y | Y | u-boot + * Y | N | kernel + * + * This ensures backwards compatibility with how we used to expose + * -kernel to users but allows them to run through u-boot as well. + */ + kernel_as_payload = false; + if (machine->firmware == NULL) { + if (machine->kernel_filename) { + payload_name = machine->kernel_filename; + kernel_as_payload = true; + } else { + payload_name = "u-boot.e500"; + } + } else { + payload_name = machine->firmware; + } + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, payload_name); + if (!filename) { + error_report("could not find firmware/kernel file '%s'", payload_name); + exit(1); + } + + payload_size = load_elf(filename, NULL, NULL, NULL, + &bios_entry, &loadaddr, NULL, NULL, + 1, PPC_ELF_MACHINE, 0, 0); + if (payload_size < 0) { + /* + * Hrm. No ELF image? Try a uImage, maybe someone is giving us an + * ePAPR compliant kernel + */ + loadaddr = LOAD_UIMAGE_LOADADDR_INVALID; + payload_size = load_uimage(filename, &bios_entry, &loadaddr, NULL, + NULL, NULL); + if (payload_size < 0) { + error_report("could not load firmware '%s'", filename); + exit(1); + } + } + + g_free(filename); + + if (kernel_as_payload) { + kernel_base = loadaddr; + kernel_size = payload_size; + } + + cur_base = loadaddr + payload_size; + if (cur_base < 32 * MiB) { + /* u-boot occupies memory up to 32MB, so load blobs above */ + cur_base = 32 * MiB; + } + + /* Load bare kernel only if no bios/u-boot has been provided */ + if (machine->kernel_filename && !kernel_as_payload) { + kernel_base = cur_base; + kernel_size = load_image_targphys(machine->kernel_filename, + cur_base, + machine->ram_size - cur_base); + if (kernel_size < 0) { + error_report("could not load kernel '%s'", + machine->kernel_filename); + exit(1); + } + + cur_base += kernel_size; + } + + /* Load initrd. */ + if (machine->initrd_filename) { + initrd_base = (cur_base + INITRD_LOAD_PAD) & ~INITRD_PAD_MASK; + initrd_size = load_image_targphys(machine->initrd_filename, initrd_base, + machine->ram_size - initrd_base); + + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + machine->initrd_filename); + exit(1); + } + + cur_base = initrd_base + initrd_size; + } + + /* + * Reserve space for dtb behind the kernel image because Linux has a bug + * where it can only handle the dtb if it's within the first 64MB of where + * starts. dtb cannot not reach initrd_base because INITRD_LOAD_PAD + * ensures enough space between kernel and initrd. + */ + dt_base = (loadaddr + payload_size + DTC_LOAD_PAD) & ~DTC_PAD_MASK; + if (dt_base + DTB_MAX_SIZE > machine->ram_size) { + error_report("not enough memory for device tree"); + exit(1); + } + + dt_size = ppce500_prep_device_tree(pms, dt_base, + initrd_base, initrd_size, + kernel_base, kernel_size); + if (dt_size < 0) { + error_report("couldn't load device tree"); + exit(1); + } + assert(dt_size < DTB_MAX_SIZE); + + boot_info = env->load_info; + boot_info->entry = bios_entry; + boot_info->dt_base = dt_base; + boot_info->dt_size = dt_size; +} + +static void e500_ccsr_initfn(Object *obj) +{ + PPCE500CCSRState *ccsr = CCSR(obj); + memory_region_init(&ccsr->ccsr_space, obj, "e500-ccsr", + MPC8544_CCSRBAR_SIZE); +} + +static const TypeInfo e500_ccsr_info = { + .name = TYPE_CCSR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PPCE500CCSRState), + .instance_init = e500_ccsr_initfn, +}; + +static const TypeInfo ppce500_info = { + .name = TYPE_PPCE500_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(PPCE500MachineState), + .class_size = sizeof(PPCE500MachineClass), +}; + +static void e500_register_types(void) +{ + type_register_static(&e500_ccsr_info); + type_register_static(&ppce500_info); +} + +type_init(e500_register_types) diff --git a/hw/ppc/e500.h b/hw/ppc/e500.h new file mode 100644 index 000000000..1e5853b03 --- /dev/null +++ b/hw/ppc/e500.h @@ -0,0 +1,49 @@ +#ifndef PPCE500_H +#define PPCE500_H + +#include "hw/boards.h" +#include "hw/platform-bus.h" +#include "qom/object.h" + +struct PPCE500MachineState { + /*< private >*/ + MachineState parent_obj; + + /* points to instance of TYPE_PLATFORM_BUS_DEVICE if + * board supports dynamic sysbus devices + */ + PlatformBusDevice *pbus_dev; +}; + +struct PPCE500MachineClass { + /*< private >*/ + MachineClass parent_class; + + /* required -- must at least add toplevel board compatible */ + void (*fixup_devtree)(void *fdt); + + int pci_first_slot; + int pci_nr_slots; + + int mpic_version; + bool has_mpc8xxx_gpio; + bool has_platform_bus; + hwaddr platform_bus_base; + hwaddr platform_bus_size; + int platform_bus_first_irq; + int platform_bus_num_irqs; + hwaddr ccsrbar_base; + hwaddr pci_pio_base; + hwaddr pci_mmio_base; + hwaddr pci_mmio_bus_base; + hwaddr spin_base; +}; + +void ppce500_init(MachineState *machine); + +hwaddr booke206_page_size_to_tlb(uint64_t size); + +#define TYPE_PPCE500_MACHINE "ppce500-base-machine" +OBJECT_DECLARE_TYPE(PPCE500MachineState, PPCE500MachineClass, PPCE500_MACHINE) + +#endif diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c new file mode 100644 index 000000000..fc911bbb7 --- /dev/null +++ b/hw/ppc/e500plat.c @@ -0,0 +1,122 @@ +/* + * Generic device-tree-driven paravirt PPC e500 platform + * + * Copyright 2012 Freescale Semiconductor, Inc. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "e500.h" +#include "hw/net/fsl_etsec/etsec.h" +#include "sysemu/device_tree.h" +#include "sysemu/kvm.h" +#include "hw/sysbus.h" +#include "hw/pci/pci.h" +#include "hw/ppc/openpic.h" +#include "kvm_ppc.h" + +static void e500plat_fixup_devtree(void *fdt) +{ + const char model[] = "QEMU ppce500"; + const char compatible[] = "fsl,qemu-e500"; + + qemu_fdt_setprop(fdt, "/", "model", model, sizeof(model)); + qemu_fdt_setprop(fdt, "/", "compatible", compatible, + sizeof(compatible)); +} + +static void e500plat_init(MachineState *machine) +{ + PPCE500MachineClass *pmc = PPCE500_MACHINE_GET_CLASS(machine); + /* Older KVM versions don't support EPR which breaks guests when we announce + MPIC variants that support EPR. Revert to an older one for those */ + if (kvm_enabled() && !kvmppc_has_cap_epr()) { + pmc->mpic_version = OPENPIC_MODEL_FSL_MPIC_20; + } + + ppce500_init(machine); +} + +static void e500plat_machine_device_plug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + PPCE500MachineState *pms = PPCE500_MACHINE(hotplug_dev); + + if (pms->pbus_dev) { + MachineClass *mc = MACHINE_GET_CLASS(pms); + + if (device_is_dynamic_sysbus(mc, dev)) { + platform_bus_link_device(pms->pbus_dev, SYS_BUS_DEVICE(dev)); + } + } +} + +static +HotplugHandler *e500plat_machine_get_hotpug_handler(MachineState *machine, + DeviceState *dev) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + + if (device_is_dynamic_sysbus(mc, dev)) { + return HOTPLUG_HANDLER(machine); + } + + return NULL; +} + +#define TYPE_E500PLAT_MACHINE MACHINE_TYPE_NAME("ppce500") + +static void e500plat_machine_class_init(ObjectClass *oc, void *data) +{ + PPCE500MachineClass *pmc = PPCE500_MACHINE_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + assert(!mc->get_hotplug_handler); + mc->get_hotplug_handler = e500plat_machine_get_hotpug_handler; + hc->plug = e500plat_machine_device_plug_cb; + + pmc->pci_first_slot = 0x1; + pmc->pci_nr_slots = PCI_SLOT_MAX - 1; + pmc->fixup_devtree = e500plat_fixup_devtree; + pmc->mpic_version = OPENPIC_MODEL_FSL_MPIC_42; + pmc->has_mpc8xxx_gpio = true; + pmc->has_platform_bus = true; + pmc->platform_bus_base = 0xf00000000ULL; + pmc->platform_bus_size = 128 * MiB; + pmc->platform_bus_first_irq = 5; + pmc->platform_bus_num_irqs = 10; + pmc->ccsrbar_base = 0xFE0000000ULL; + pmc->pci_pio_base = 0xFE1000000ULL; + pmc->pci_mmio_base = 0xC00000000ULL; + pmc->pci_mmio_bus_base = 0xE0000000ULL; + pmc->spin_base = 0xFEF000000ULL; + + mc->desc = "generic paravirt e500 platform"; + mc->init = e500plat_init; + mc->max_cpus = 32; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("e500v2_v30"); + mc->default_ram_id = "mpc8544ds.ram"; + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_ETSEC_COMMON); + } + +static const TypeInfo e500plat_info = { + .name = TYPE_E500PLAT_MACHINE, + .parent = TYPE_PPCE500_MACHINE, + .class_init = e500plat_machine_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static void e500plat_register_types(void) +{ + type_register_static(&e500plat_info); +} +type_init(e500plat_register_types) diff --git a/hw/ppc/fdt.c b/hw/ppc/fdt.c new file mode 100644 index 000000000..0828ad725 --- /dev/null +++ b/hw/ppc/fdt.c @@ -0,0 +1,49 @@ +/* + * QEMU PowerPC helper routines for the device tree. + * + * Copyright (C) 2016 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "target/ppc/cpu.h" +#include "target/ppc/mmu-hash64.h" + +#include "hw/ppc/fdt.h" + +#if defined(TARGET_PPC64) +size_t ppc_create_page_sizes_prop(PowerPCCPU *cpu, uint32_t *prop, + size_t maxsize) +{ + size_t maxcells = maxsize / sizeof(uint32_t); + int i, j, count; + uint32_t *p = prop; + + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { + PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i]; + + if (!sps->page_shift) { + break; + } + for (count = 0; count < PPC_PAGE_SIZES_MAX_SZ; count++) { + if (sps->enc[count].page_shift == 0) { + break; + } + } + if ((p - prop) >= (maxcells - 3 - count * 2)) { + break; + } + *(p++) = cpu_to_be32(sps->page_shift); + *(p++) = cpu_to_be32(sps->slb_enc); + *(p++) = cpu_to_be32(count); + for (j = 0; j < count; j++) { + *(p++) = cpu_to_be32(sps->enc[j].page_shift); + *(p++) = cpu_to_be32(sps->enc[j].pte_enc); + } + } + + return (p - prop) * sizeof(uint32_t); +} +#endif diff --git a/hw/ppc/fw_cfg.c b/hw/ppc/fw_cfg.c new file mode 100644 index 000000000..a88b5c4bd --- /dev/null +++ b/hw/ppc/fw_cfg.c @@ -0,0 +1,45 @@ +/* + * fw_cfg helpers (PPC specific) + * + * Copyright (c) 2019 Red Hat, Inc. + * + * Author: + * Philippe Mathieu-Daudé + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/ppc/ppc.h" +#include "hw/nvram/fw_cfg.h" + +const char *fw_cfg_arch_key_name(uint16_t key) +{ + static const struct { + uint16_t key; + const char *name; + } fw_cfg_arch_wellknown_keys[] = { + {FW_CFG_PPC_WIDTH, "width"}, + {FW_CFG_PPC_HEIGHT, "height"}, + {FW_CFG_PPC_DEPTH, "depth"}, + {FW_CFG_PPC_TBFREQ, "tbfreq"}, + {FW_CFG_PPC_CLOCKFREQ, "clockfreq"}, + {FW_CFG_PPC_IS_KVM, "is_kvm"}, + {FW_CFG_PPC_KVM_HC, "kvm_hc"}, + {FW_CFG_PPC_KVM_PID, "pid"}, + {FW_CFG_PPC_NVRAM_ADDR, "nvram_addr"}, + {FW_CFG_PPC_BUSFREQ, "busfreq"}, + {FW_CFG_PPC_NVRAM_FLAT, "nvram_flat"}, + {FW_CFG_PPC_VIACONFIG, "viaconfig"}, + }; + + for (size_t i = 0; i < ARRAY_SIZE(fw_cfg_arch_wellknown_keys); i++) { + if (fw_cfg_arch_wellknown_keys[i].key == key) { + return fw_cfg_arch_wellknown_keys[i].name; + } + } + return NULL; +} diff --git a/hw/ppc/mac.h b/hw/ppc/mac.h new file mode 100644 index 000000000..22c840807 --- /dev/null +++ b/hw/ppc/mac.h @@ -0,0 +1,108 @@ +/* + * QEMU PowerMac emulation shared definitions and prototypes + * + * Copyright (c) 2004-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef PPC_MAC_H +#define PPC_MAC_H + +#include "qemu/units.h" +#include "exec/memory.h" +#include "hw/boards.h" +#include "hw/sysbus.h" +#include "hw/input/adb.h" +#include "hw/misc/mos6522.h" +#include "hw/pci/pci_host.h" +#include "hw/pci-host/uninorth.h" +#include "qom/object.h" + +/* SMP is not enabled, for now */ +#define MAX_CPUS 1 + +#define NVRAM_SIZE 0x2000 +#define PROM_FILENAME "openbios-ppc" + +#define KERNEL_LOAD_ADDR 0x01000000 +#define KERNEL_GAP 0x00100000 + +#define ESCC_CLOCK 3686400 + +/* Old World IRQs */ +#define OLDWORLD_CUDA_IRQ 0x12 +#define OLDWORLD_ESCCB_IRQ 0x10 +#define OLDWORLD_ESCCA_IRQ 0xf +#define OLDWORLD_IDE0_IRQ 0xd +#define OLDWORLD_IDE0_DMA_IRQ 0x2 +#define OLDWORLD_IDE1_IRQ 0xe +#define OLDWORLD_IDE1_DMA_IRQ 0x3 + +/* New World IRQs */ +#define NEWWORLD_CUDA_IRQ 0x19 +#define NEWWORLD_PMU_IRQ 0x19 +#define NEWWORLD_ESCCB_IRQ 0x24 +#define NEWWORLD_ESCCA_IRQ 0x25 +#define NEWWORLD_IDE0_IRQ 0xd +#define NEWWORLD_IDE0_DMA_IRQ 0x2 +#define NEWWORLD_IDE1_IRQ 0xe +#define NEWWORLD_IDE1_DMA_IRQ 0x3 +#define NEWWORLD_EXTING_GPIO1 0x2f +#define NEWWORLD_EXTING_GPIO9 0x37 + +/* Core99 machine */ +#define TYPE_CORE99_MACHINE MACHINE_TYPE_NAME("mac99") +typedef struct Core99MachineState Core99MachineState; +DECLARE_INSTANCE_CHECKER(Core99MachineState, CORE99_MACHINE, + TYPE_CORE99_MACHINE) + +#define CORE99_VIA_CONFIG_CUDA 0x0 +#define CORE99_VIA_CONFIG_PMU 0x1 +#define CORE99_VIA_CONFIG_PMU_ADB 0x2 + +struct Core99MachineState { + /*< private >*/ + MachineState parent; + + uint8_t via_config; +}; + +/* Grackle PCI */ +#define TYPE_GRACKLE_PCI_HOST_BRIDGE "grackle-pcihost" + +/* Mac NVRAM */ +#define TYPE_MACIO_NVRAM "macio-nvram" +OBJECT_DECLARE_SIMPLE_TYPE(MacIONVRAMState, MACIO_NVRAM) + +struct MacIONVRAMState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + uint32_t size; + uint32_t it_shift; + + MemoryRegion mem; + uint8_t *data; +}; + +void pmac_format_nvram_partition (MacIONVRAMState *nvr, int len); +#endif /* PPC_MAC_H */ diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c new file mode 100644 index 000000000..7bb7ac399 --- /dev/null +++ b/hw/ppc/mac_newworld.c @@ -0,0 +1,663 @@ +/* + * QEMU PowerPC CHRP (currently NewWorld PowerMac) hardware System Emulator + * + * Copyright (c) 2004-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * PCI bus layout on a real G5 (U3 based): + * + * 0000:f0:0b.0 Host bridge [0600]: Apple Computer Inc. U3 AGP [106b:004b] + * 0000:f0:10.0 VGA compatible controller [0300]: ATI Technologies Inc RV350 AP [Radeon 9600] [1002:4150] + * 0001:00:00.0 Host bridge [0600]: Apple Computer Inc. CPC945 HT Bridge [106b:004a] + * 0001:00:01.0 PCI bridge [0604]: Advanced Micro Devices [AMD] AMD-8131 PCI-X Bridge [1022:7450] (rev 12) + * 0001:00:02.0 PCI bridge [0604]: Advanced Micro Devices [AMD] AMD-8131 PCI-X Bridge [1022:7450] (rev 12) + * 0001:00:03.0 PCI bridge [0604]: Apple Computer Inc. K2 HT-PCI Bridge [106b:0045] + * 0001:00:04.0 PCI bridge [0604]: Apple Computer Inc. K2 HT-PCI Bridge [106b:0046] + * 0001:00:05.0 PCI bridge [0604]: Apple Computer Inc. K2 HT-PCI Bridge [106b:0047] + * 0001:00:06.0 PCI bridge [0604]: Apple Computer Inc. K2 HT-PCI Bridge [106b:0048] + * 0001:00:07.0 PCI bridge [0604]: Apple Computer Inc. K2 HT-PCI Bridge [106b:0049] + * 0001:01:07.0 Class [ff00]: Apple Computer Inc. K2 KeyLargo Mac/IO [106b:0041] (rev 20) + * 0001:01:08.0 USB Controller [0c03]: Apple Computer Inc. K2 KeyLargo USB [106b:0040] + * 0001:01:09.0 USB Controller [0c03]: Apple Computer Inc. K2 KeyLargo USB [106b:0040] + * 0001:02:0b.0 USB Controller [0c03]: NEC Corporation USB [1033:0035] (rev 43) + * 0001:02:0b.1 USB Controller [0c03]: NEC Corporation USB [1033:0035] (rev 43) + * 0001:02:0b.2 USB Controller [0c03]: NEC Corporation USB 2.0 [1033:00e0] (rev 04) + * 0001:03:0d.0 Class [ff00]: Apple Computer Inc. K2 ATA/100 [106b:0043] + * 0001:03:0e.0 FireWire (IEEE 1394) [0c00]: Apple Computer Inc. K2 FireWire [106b:0042] + * 0001:04:0f.0 Ethernet controller [0200]: Apple Computer Inc. K2 GMAC (Sun GEM) [106b:004c] + * 0001:05:0c.0 IDE interface [0101]: Broadcom K2 SATA [1166:0240] + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "hw/ppc/ppc.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/mac.h" +#include "hw/input/adb.h" +#include "hw/ppc/mac_dbdma.h" +#include "hw/pci/pci.h" +#include "net/net.h" +#include "sysemu/sysemu.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/char/escc.h" +#include "hw/misc/macio/macio.h" +#include "hw/ppc/openpic.h" +#include "hw/loader.h" +#include "hw/fw-path-provider.h" +#include "elf.h" +#include "qemu/error-report.h" +#include "sysemu/kvm.h" +#include "sysemu/reset.h" +#include "kvm_ppc.h" +#include "hw/usb.h" +#include "hw/sysbus.h" +#include "trace.h" + +#define MAX_IDE_BUS 2 +#define CFG_ADDR 0xf0000510 +#define TBFREQ (100UL * 1000UL * 1000UL) +#define CLOCKFREQ (900UL * 1000UL * 1000UL) +#define BUSFREQ (100UL * 1000UL * 1000UL) + +#define NDRV_VGA_FILENAME "qemu_vga.ndrv" + +#define PROM_BASE 0xfff00000 +#define PROM_SIZE (1 * MiB) + +static void fw_cfg_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); +} + +static uint64_t translate_kernel_address(void *opaque, uint64_t addr) +{ + return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR; +} + +static void ppc_core99_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + cpu_reset(CPU(cpu)); + /* 970 CPUs want to get their initial IP as part of their boot protocol */ + cpu->env.nip = PROM_BASE + 0x100; +} + +/* PowerPC Mac99 hardware initialisation */ +static void ppc_core99_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + const char *bios_name = machine->firmware ?: PROM_FILENAME; + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *initrd_filename = machine->initrd_filename; + const char *boot_device = machine->boot_order; + Core99MachineState *core99_machine = CORE99_MACHINE(machine); + PowerPCCPU *cpu = NULL; + CPUPPCState *env = NULL; + char *filename; + IrqLines *openpic_irqs; + int linux_boot, i, j, k; + MemoryRegion *bios = g_new(MemoryRegion, 1); + hwaddr kernel_base, initrd_base, cmdline_base = 0; + long kernel_size, initrd_size; + UNINHostState *uninorth_pci; + PCIBus *pci_bus; + PCIDevice *macio; + ESCCState *escc; + bool has_pmu, has_adb; + MACIOIDEState *macio_ide; + BusState *adb_bus; + MacIONVRAMState *nvr; + int bios_size; + int ppc_boot_device; + DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; + void *fw_cfg; + int machine_arch; + SysBusDevice *s; + DeviceState *dev, *pic_dev; + DeviceState *uninorth_internal_dev = NULL, *uninorth_agp_dev = NULL; + hwaddr nvram_addr = 0xFFF04000; + uint64_t tbfreq; + unsigned int smp_cpus = machine->smp.cpus; + + linux_boot = (kernel_filename != NULL); + + /* init CPUs */ + for (i = 0; i < smp_cpus; i++) { + cpu = POWERPC_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + + /* Set time-base frequency to 100 Mhz */ + cpu_ppc_tb_init(env, TBFREQ); + qemu_register_reset(ppc_core99_reset, cpu); + } + + /* allocate RAM */ + if (machine->ram_size > 2 * GiB) { + error_report("RAM size more than 2 GiB is not supported"); + exit(1); + } + memory_region_add_subregion(get_system_memory(), 0, machine->ram); + + /* allocate and load firmware ROM */ + memory_region_init_rom(bios, NULL, "ppc_core99.bios", PROM_SIZE, + &error_fatal); + memory_region_add_subregion(get_system_memory(), PROM_BASE, bios); + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (filename) { + /* Load OpenBIOS (ELF) */ + bios_size = load_elf(filename, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); + + if (bios_size <= 0) { + /* or load binary ROM image */ + bios_size = load_image_targphys(filename, PROM_BASE, PROM_SIZE); + } + g_free(filename); + } else { + bios_size = -1; + } + if (bios_size < 0 || bios_size > PROM_SIZE) { + error_report("could not load PowerPC bios '%s'", bios_name); + exit(1); + } + + if (linux_boot) { + int bswap_needed; + +#ifdef BSWAP_NEEDED + bswap_needed = 1; +#else + bswap_needed = 0; +#endif + kernel_base = KERNEL_LOAD_ADDR; + + kernel_size = load_elf(kernel_filename, NULL, + translate_kernel_address, NULL, NULL, NULL, + NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); + if (kernel_size < 0) + kernel_size = load_aout(kernel_filename, kernel_base, + ram_size - kernel_base, bswap_needed, + TARGET_PAGE_SIZE); + if (kernel_size < 0) + kernel_size = load_image_targphys(kernel_filename, + kernel_base, + ram_size - kernel_base); + if (kernel_size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + /* load initrd */ + if (initrd_filename) { + initrd_base = TARGET_PAGE_ALIGN(kernel_base + kernel_size + KERNEL_GAP); + initrd_size = load_image_targphys(initrd_filename, initrd_base, + ram_size - initrd_base); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + initrd_filename); + exit(1); + } + cmdline_base = TARGET_PAGE_ALIGN(initrd_base + initrd_size); + } else { + initrd_base = 0; + initrd_size = 0; + cmdline_base = TARGET_PAGE_ALIGN(kernel_base + kernel_size + KERNEL_GAP); + } + ppc_boot_device = 'm'; + } else { + kernel_base = 0; + kernel_size = 0; + initrd_base = 0; + initrd_size = 0; + ppc_boot_device = '\0'; + /* We consider that NewWorld PowerMac never have any floppy drive + * For now, OHW cannot boot from the network. + */ + for (i = 0; boot_device[i] != '\0'; i++) { + if (boot_device[i] >= 'c' && boot_device[i] <= 'f') { + ppc_boot_device = boot_device[i]; + break; + } + } + if (ppc_boot_device == '\0') { + error_report("No valid boot device for Mac99 machine"); + exit(1); + } + } + + /* UniN init */ + dev = qdev_new(TYPE_UNI_NORTH); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + memory_region_add_subregion(get_system_memory(), 0xf8000000, + sysbus_mmio_get_region(s, 0)); + + openpic_irqs = g_new0(IrqLines, smp_cpus); + for (i = 0; i < smp_cpus; i++) { + /* Mac99 IRQ connection between OpenPIC outputs pins + * and PowerPC input pins + */ + switch (PPC_INPUT(env)) { + case PPC_FLAGS_INPUT_6xx: + openpic_irqs[i].irq[OPENPIC_OUTPUT_INT] = + ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; + openpic_irqs[i].irq[OPENPIC_OUTPUT_CINT] = + ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; + openpic_irqs[i].irq[OPENPIC_OUTPUT_MCK] = + ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_MCP]; + /* Not connected ? */ + openpic_irqs[i].irq[OPENPIC_OUTPUT_DEBUG] = NULL; + /* Check this */ + openpic_irqs[i].irq[OPENPIC_OUTPUT_RESET] = + ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_HRESET]; + break; +#if defined(TARGET_PPC64) + case PPC_FLAGS_INPUT_970: + openpic_irqs[i].irq[OPENPIC_OUTPUT_INT] = + ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; + openpic_irqs[i].irq[OPENPIC_OUTPUT_CINT] = + ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; + openpic_irqs[i].irq[OPENPIC_OUTPUT_MCK] = + ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_MCP]; + /* Not connected ? */ + openpic_irqs[i].irq[OPENPIC_OUTPUT_DEBUG] = NULL; + /* Check this */ + openpic_irqs[i].irq[OPENPIC_OUTPUT_RESET] = + ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_HRESET]; + break; +#endif /* defined(TARGET_PPC64) */ + default: + error_report("Bus model not supported on mac99 machine"); + exit(1); + } + } + + if (PPC_INPUT(env) == PPC_FLAGS_INPUT_970) { + /* 970 gets a U3 bus */ + /* Uninorth AGP bus */ + dev = qdev_new(TYPE_U3_AGP_HOST_BRIDGE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + uninorth_pci = U3_AGP_HOST_BRIDGE(dev); + s = SYS_BUS_DEVICE(dev); + /* PCI hole */ + memory_region_add_subregion(get_system_memory(), 0x80000000ULL, + sysbus_mmio_get_region(s, 2)); + /* Register 8 MB of ISA IO space */ + memory_region_add_subregion(get_system_memory(), 0xf2000000, + sysbus_mmio_get_region(s, 3)); + sysbus_mmio_map(s, 0, 0xf0800000); + sysbus_mmio_map(s, 1, 0xf0c00000); + + machine_arch = ARCH_MAC99_U3; + } else { + /* Use values found on a real PowerMac */ + /* Uninorth AGP bus */ + uninorth_agp_dev = qdev_new(TYPE_UNI_NORTH_AGP_HOST_BRIDGE); + s = SYS_BUS_DEVICE(uninorth_agp_dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, 0xf0800000); + sysbus_mmio_map(s, 1, 0xf0c00000); + + /* Uninorth internal bus */ + uninorth_internal_dev = qdev_new( + TYPE_UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE); + s = SYS_BUS_DEVICE(uninorth_internal_dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, 0xf4800000); + sysbus_mmio_map(s, 1, 0xf4c00000); + + /* Uninorth main bus */ + dev = qdev_new(TYPE_UNI_NORTH_PCI_HOST_BRIDGE); + qdev_prop_set_uint32(dev, "ofw-addr", 0xf2000000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + uninorth_pci = UNI_NORTH_PCI_HOST_BRIDGE(dev); + s = SYS_BUS_DEVICE(dev); + /* PCI hole */ + memory_region_add_subregion(get_system_memory(), 0x80000000ULL, + sysbus_mmio_get_region(s, 2)); + /* Register 8 MB of ISA IO space */ + memory_region_add_subregion(get_system_memory(), 0xf2000000, + sysbus_mmio_get_region(s, 3)); + sysbus_mmio_map(s, 0, 0xf2800000); + sysbus_mmio_map(s, 1, 0xf2c00000); + + machine_arch = ARCH_MAC99; + } + + machine->usb |= defaults_enabled() && !machine->usb_disabled; + has_pmu = (core99_machine->via_config != CORE99_VIA_CONFIG_CUDA); + has_adb = (core99_machine->via_config == CORE99_VIA_CONFIG_CUDA || + core99_machine->via_config == CORE99_VIA_CONFIG_PMU_ADB); + + /* Timebase Frequency */ + if (kvm_enabled()) { + tbfreq = kvmppc_get_tbfreq(); + } else { + tbfreq = TBFREQ; + } + + /* init basic PC hardware */ + pci_bus = PCI_HOST_BRIDGE(uninorth_pci)->bus; + + /* MacIO */ + macio = pci_new(-1, TYPE_NEWWORLD_MACIO); + dev = DEVICE(macio); + qdev_prop_set_uint64(dev, "frequency", tbfreq); + qdev_prop_set_bit(dev, "has-pmu", has_pmu); + qdev_prop_set_bit(dev, "has-adb", has_adb); + + escc = ESCC(object_resolve_path_component(OBJECT(macio), "escc")); + qdev_prop_set_chr(DEVICE(escc), "chrA", serial_hd(0)); + qdev_prop_set_chr(DEVICE(escc), "chrB", serial_hd(1)); + + pci_realize_and_unref(macio, pci_bus, &error_fatal); + + pic_dev = DEVICE(object_resolve_path_component(OBJECT(macio), "pic")); + for (i = 0; i < 4; i++) { + qdev_connect_gpio_out(DEVICE(uninorth_pci), i, + qdev_get_gpio_in(pic_dev, 0x1b + i)); + } + + /* TODO: additional PCI buses only wired up for 32-bit machines */ + if (PPC_INPUT(env) != PPC_FLAGS_INPUT_970) { + /* Uninorth AGP bus */ + for (i = 0; i < 4; i++) { + qdev_connect_gpio_out(uninorth_agp_dev, i, + qdev_get_gpio_in(pic_dev, 0x1b + i)); + } + + /* Uninorth internal bus */ + for (i = 0; i < 4; i++) { + qdev_connect_gpio_out(uninorth_internal_dev, i, + qdev_get_gpio_in(pic_dev, 0x1b + i)); + } + } + + /* OpenPIC */ + s = SYS_BUS_DEVICE(pic_dev); + k = 0; + for (i = 0; i < smp_cpus; i++) { + for (j = 0; j < OPENPIC_OUTPUT_NB; j++) { + sysbus_connect_irq(s, k++, openpic_irqs[i].irq[j]); + } + } + g_free(openpic_irqs); + + /* We only emulate 2 out of 3 IDE controllers for now */ + ide_drive_get(hd, ARRAY_SIZE(hd)); + + macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio), + "ide[0]")); + macio_ide_init_drives(macio_ide, hd); + + macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio), + "ide[1]")); + macio_ide_init_drives(macio_ide, &hd[MAX_IDE_DEVS]); + + if (has_adb) { + if (has_pmu) { + dev = DEVICE(object_resolve_path_component(OBJECT(macio), "pmu")); + } else { + dev = DEVICE(object_resolve_path_component(OBJECT(macio), "cuda")); + } + + adb_bus = qdev_get_child_bus(dev, "adb.0"); + dev = qdev_new(TYPE_ADB_KEYBOARD); + qdev_realize_and_unref(dev, adb_bus, &error_fatal); + + dev = qdev_new(TYPE_ADB_MOUSE); + qdev_realize_and_unref(dev, adb_bus, &error_fatal); + } + + if (machine->usb) { + pci_create_simple(pci_bus, -1, "pci-ohci"); + + /* U3 needs to use USB for input because Linux doesn't support via-cuda + on PPC64 */ + if (!has_adb || machine_arch == ARCH_MAC99_U3) { + USBBus *usb_bus = usb_bus_find(-1); + + usb_create_simple(usb_bus, "usb-kbd"); + usb_create_simple(usb_bus, "usb-mouse"); + } + } + + pci_vga_init(pci_bus); + + if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8) { + graphic_depth = 15; + } + + for (i = 0; i < nb_nics; i++) { + pci_nic_init_nofail(&nd_table[i], pci_bus, "sungem", NULL); + } + + /* The NewWorld NVRAM is not located in the MacIO device */ + if (kvm_enabled() && qemu_real_host_page_size > 4096) { + /* We can't combine read-write and read-only in a single page, so + move the NVRAM out of ROM again for KVM */ + nvram_addr = 0xFFE00000; + } + dev = qdev_new(TYPE_MACIO_NVRAM); + qdev_prop_set_uint32(dev, "size", 0x2000); + qdev_prop_set_uint32(dev, "it_shift", 1); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, nvram_addr); + nvr = MACIO_NVRAM(dev); + pmac_format_nvram_partition(nvr, 0x2000); + /* No PCI init: the BIOS will do it */ + + dev = qdev_new(TYPE_FW_CFG_MEM); + fw_cfg = FW_CFG(dev); + qdev_prop_set_uint32(dev, "data_width", 1); + qdev_prop_set_bit(dev, "dma_enabled", false); + object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG, + OBJECT(fw_cfg)); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, CFG_ADDR); + sysbus_mmio_map(s, 1, CFG_ADDR + 2); + + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)machine->smp.max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, machine_arch); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); + if (kernel_cmdline) { + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, cmdline_base); + pstrcpy_targphys("cmdline", cmdline_base, TARGET_PAGE_SIZE, kernel_cmdline); + } else { + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0); + } + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_base); + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ppc_boot_device); + + fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_WIDTH, graphic_width); + fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_HEIGHT, graphic_height); + fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_DEPTH, graphic_depth); + + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_VIACONFIG, core99_machine->via_config); + + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled()); + if (kvm_enabled()) { + uint8_t *hypercall; + + hypercall = g_malloc(16); + kvmppc_get_hypercall(env, hypercall, 16); + fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16); + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid()); + } + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq); + /* Mac OS X requires a "known good" clock-frequency value; pass it one. */ + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_CLOCKFREQ, CLOCKFREQ); + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_BUSFREQ, BUSFREQ); + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_NVRAM_ADDR, nvram_addr); + + /* MacOS NDRV VGA driver */ + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, NDRV_VGA_FILENAME); + if (filename) { + gchar *ndrv_file; + gsize ndrv_size; + + if (g_file_get_contents(filename, &ndrv_file, &ndrv_size, NULL)) { + fw_cfg_add_file(fw_cfg, "ndrv/qemu_vga.ndrv", ndrv_file, ndrv_size); + } + g_free(filename); + } + + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); +} + +/* + * Implementation of an interface to adjust firmware path + * for the bootindex property handling. + */ +static char *core99_fw_dev_path(FWPathProvider *p, BusState *bus, + DeviceState *dev) +{ + PCIDevice *pci; + MACIOIDEState *macio_ide; + + if (!strcmp(object_get_typename(OBJECT(dev)), "macio-newworld")) { + pci = PCI_DEVICE(dev); + return g_strdup_printf("mac-io@%x", PCI_SLOT(pci->devfn)); + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "macio-ide")) { + macio_ide = MACIO_IDE(dev); + return g_strdup_printf("ata-3@%x", macio_ide->addr); + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "ide-hd")) { + return g_strdup("disk"); + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "ide-cd")) { + return g_strdup("cdrom"); + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "virtio-blk-device")) { + return g_strdup("disk"); + } + + return NULL; +} +static int core99_kvm_type(MachineState *machine, const char *arg) +{ + /* Always force PR KVM */ + return 2; +} + +static void core99_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); + + mc->desc = "Mac99 based PowerMAC"; + mc->init = ppc_core99_init; + mc->block_default_type = IF_IDE; + mc->max_cpus = MAX_CPUS; + mc->default_boot_order = "cd"; + mc->default_display = "std"; + mc->kvm_type = core99_kvm_type; +#ifdef TARGET_PPC64 + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("970fx_v3.1"); +#else + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("7400_v2.9"); +#endif + mc->default_ram_id = "ppc_core99.ram"; + mc->ignore_boot_device_suffixes = true; + fwc->get_dev_path = core99_fw_dev_path; +} + +static char *core99_get_via_config(Object *obj, Error **errp) +{ + Core99MachineState *cms = CORE99_MACHINE(obj); + + switch (cms->via_config) { + default: + case CORE99_VIA_CONFIG_CUDA: + return g_strdup("cuda"); + + case CORE99_VIA_CONFIG_PMU: + return g_strdup("pmu"); + + case CORE99_VIA_CONFIG_PMU_ADB: + return g_strdup("pmu-adb"); + } +} + +static void core99_set_via_config(Object *obj, const char *value, Error **errp) +{ + Core99MachineState *cms = CORE99_MACHINE(obj); + + if (!strcmp(value, "cuda")) { + cms->via_config = CORE99_VIA_CONFIG_CUDA; + } else if (!strcmp(value, "pmu")) { + cms->via_config = CORE99_VIA_CONFIG_PMU; + } else if (!strcmp(value, "pmu-adb")) { + cms->via_config = CORE99_VIA_CONFIG_PMU_ADB; + } else { + error_setg(errp, "Invalid via value"); + error_append_hint(errp, "Valid values are cuda, pmu, pmu-adb.\n"); + } +} + +static void core99_instance_init(Object *obj) +{ + Core99MachineState *cms = CORE99_MACHINE(obj); + + /* Default via_config is CORE99_VIA_CONFIG_CUDA */ + cms->via_config = CORE99_VIA_CONFIG_CUDA; + object_property_add_str(obj, "via", core99_get_via_config, + core99_set_via_config); + object_property_set_description(obj, "via", + "Set VIA configuration. " + "Valid values are cuda, pmu and pmu-adb"); + + return; +} + +static const TypeInfo core99_machine_info = { + .name = MACHINE_TYPE_NAME("mac99"), + .parent = TYPE_MACHINE, + .class_init = core99_machine_class_init, + .instance_init = core99_instance_init, + .instance_size = sizeof(Core99MachineState), + .interfaces = (InterfaceInfo[]) { + { TYPE_FW_PATH_PROVIDER }, + { } + }, +}; + +static void mac_machine_register_types(void) +{ + type_register_static(&core99_machine_info); +} + +type_init(mac_machine_register_types) diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c new file mode 100644 index 000000000..de2be960e --- /dev/null +++ b/hw/ppc/mac_oldworld.c @@ -0,0 +1,455 @@ + +/* + * QEMU OldWorld PowerMac (currently ~G3 Beige) hardware System Emulator + * + * Copyright (c) 2004-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/ppc/ppc.h" +#include "hw/qdev-properties.h" +#include "mac.h" +#include "hw/input/adb.h" +#include "sysemu/sysemu.h" +#include "net/net.h" +#include "hw/isa/isa.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/char/escc.h" +#include "hw/misc/macio/macio.h" +#include "hw/loader.h" +#include "hw/fw-path-provider.h" +#include "elf.h" +#include "qemu/error-report.h" +#include "sysemu/kvm.h" +#include "sysemu/reset.h" +#include "kvm_ppc.h" + +#define MAX_IDE_BUS 2 +#define CFG_ADDR 0xf0000510 +#define TBFREQ 16600000UL +#define CLOCKFREQ 266000000UL +#define BUSFREQ 66000000UL + +#define NDRV_VGA_FILENAME "qemu_vga.ndrv" + +#define GRACKLE_BASE 0xfec00000 +#define PROM_BASE 0xffc00000 +#define PROM_SIZE (4 * MiB) + +static void fw_cfg_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); +} + +static uint64_t translate_kernel_address(void *opaque, uint64_t addr) +{ + return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR; +} + +static void ppc_heathrow_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + cpu_reset(CPU(cpu)); +} + +static void ppc_heathrow_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + const char *bios_name = machine->firmware ?: PROM_FILENAME; + const char *boot_device = machine->boot_order; + PowerPCCPU *cpu = NULL; + CPUPPCState *env = NULL; + char *filename; + int i; + MemoryRegion *bios = g_new(MemoryRegion, 1); + uint32_t kernel_base, initrd_base, cmdline_base = 0; + int32_t kernel_size, initrd_size; + PCIBus *pci_bus; + PCIDevice *macio; + MACIOIDEState *macio_ide; + ESCCState *escc; + SysBusDevice *s; + DeviceState *dev, *pic_dev, *grackle_dev; + BusState *adb_bus; + uint64_t bios_addr; + int bios_size; + unsigned int smp_cpus = machine->smp.cpus; + uint16_t ppc_boot_device; + DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; + void *fw_cfg; + uint64_t tbfreq; + + /* init CPUs */ + for (i = 0; i < smp_cpus; i++) { + cpu = POWERPC_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + + /* Set time-base frequency to 16.6 Mhz */ + cpu_ppc_tb_init(env, TBFREQ); + qemu_register_reset(ppc_heathrow_reset, cpu); + } + + /* allocate RAM */ + if (ram_size > 2047 * MiB) { + error_report("Too much memory for this machine: %" PRId64 " MB, " + "maximum 2047 MB", ram_size / MiB); + exit(1); + } + + memory_region_add_subregion(get_system_memory(), 0, machine->ram); + + /* allocate and load firmware ROM */ + memory_region_init_rom(bios, NULL, "ppc_heathrow.bios", PROM_SIZE, + &error_fatal); + memory_region_add_subregion(get_system_memory(), PROM_BASE, bios); + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (filename) { + /* Load OpenBIOS (ELF) */ + bios_size = load_elf(filename, NULL, NULL, NULL, NULL, &bios_addr, + NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); + /* Unfortunately, load_elf sign-extends reading elf32 */ + bios_addr = (uint32_t)bios_addr; + + if (bios_size <= 0) { + /* or if could not load ELF try loading a binary ROM image */ + bios_size = load_image_targphys(filename, PROM_BASE, PROM_SIZE); + bios_addr = PROM_BASE; + } + g_free(filename); + } else { + bios_size = -1; + } + if (bios_size < 0 || bios_addr - PROM_BASE + bios_size > PROM_SIZE) { + error_report("could not load PowerPC bios '%s'", bios_name); + exit(1); + } + + if (machine->kernel_filename) { + int bswap_needed; + +#ifdef BSWAP_NEEDED + bswap_needed = 1; +#else + bswap_needed = 0; +#endif + kernel_base = KERNEL_LOAD_ADDR; + kernel_size = load_elf(machine->kernel_filename, NULL, + translate_kernel_address, NULL, NULL, NULL, + NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); + if (kernel_size < 0) + kernel_size = load_aout(machine->kernel_filename, kernel_base, + ram_size - kernel_base, bswap_needed, + TARGET_PAGE_SIZE); + if (kernel_size < 0) + kernel_size = load_image_targphys(machine->kernel_filename, + kernel_base, + ram_size - kernel_base); + if (kernel_size < 0) { + error_report("could not load kernel '%s'", + machine->kernel_filename); + exit(1); + } + /* load initrd */ + if (machine->initrd_filename) { + initrd_base = TARGET_PAGE_ALIGN(kernel_base + kernel_size + + KERNEL_GAP); + initrd_size = load_image_targphys(machine->initrd_filename, + initrd_base, + ram_size - initrd_base); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + machine->initrd_filename); + exit(1); + } + cmdline_base = TARGET_PAGE_ALIGN(initrd_base + initrd_size); + } else { + initrd_base = 0; + initrd_size = 0; + cmdline_base = TARGET_PAGE_ALIGN(kernel_base + kernel_size + KERNEL_GAP); + } + ppc_boot_device = 'm'; + } else { + kernel_base = 0; + kernel_size = 0; + initrd_base = 0; + initrd_size = 0; + ppc_boot_device = '\0'; + for (i = 0; boot_device[i] != '\0'; i++) { + /* TOFIX: for now, the second IDE channel is not properly + * used by OHW. The Mac floppy disk are not emulated. + * For now, OHW cannot boot from the network. + */ +#if 0 + if (boot_device[i] >= 'a' && boot_device[i] <= 'f') { + ppc_boot_device = boot_device[i]; + break; + } +#else + if (boot_device[i] >= 'c' && boot_device[i] <= 'd') { + ppc_boot_device = boot_device[i]; + break; + } +#endif + } + if (ppc_boot_device == '\0') { + error_report("No valid boot device for G3 Beige machine"); + exit(1); + } + } + + /* Timebase Frequency */ + if (kvm_enabled()) { + tbfreq = kvmppc_get_tbfreq(); + } else { + tbfreq = TBFREQ; + } + + /* Grackle PCI host bridge */ + grackle_dev = qdev_new(TYPE_GRACKLE_PCI_HOST_BRIDGE); + qdev_prop_set_uint32(grackle_dev, "ofw-addr", 0x80000000); + s = SYS_BUS_DEVICE(grackle_dev); + sysbus_realize_and_unref(s, &error_fatal); + + sysbus_mmio_map(s, 0, GRACKLE_BASE); + sysbus_mmio_map(s, 1, GRACKLE_BASE + 0x200000); + /* PCI hole */ + memory_region_add_subregion(get_system_memory(), 0x80000000ULL, + sysbus_mmio_get_region(s, 2)); + /* Register 2 MB of ISA IO space */ + memory_region_add_subregion(get_system_memory(), 0xfe000000, + sysbus_mmio_get_region(s, 3)); + + pci_bus = PCI_HOST_BRIDGE(grackle_dev)->bus; + + /* MacIO */ + macio = pci_new(PCI_DEVFN(16, 0), TYPE_OLDWORLD_MACIO); + dev = DEVICE(macio); + qdev_prop_set_uint64(dev, "frequency", tbfreq); + + escc = ESCC(object_resolve_path_component(OBJECT(macio), "escc")); + qdev_prop_set_chr(DEVICE(escc), "chrA", serial_hd(0)); + qdev_prop_set_chr(DEVICE(escc), "chrB", serial_hd(1)); + + pci_realize_and_unref(macio, pci_bus, &error_fatal); + + pic_dev = DEVICE(object_resolve_path_component(OBJECT(macio), "pic")); + for (i = 0; i < 4; i++) { + qdev_connect_gpio_out(grackle_dev, i, + qdev_get_gpio_in(pic_dev, 0x15 + i)); + } + + /* Connect the heathrow PIC outputs to the 6xx bus */ + for (i = 0; i < smp_cpus; i++) { + switch (PPC_INPUT(env)) { + case PPC_FLAGS_INPUT_6xx: + /* XXX: we register only 1 output pin for heathrow PIC */ + qdev_connect_gpio_out(pic_dev, 0, + ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]); + break; + default: + error_report("Bus model not supported on OldWorld Mac machine"); + exit(1); + } + } + + pci_vga_init(pci_bus); + + for (i = 0; i < nb_nics; i++) { + pci_nic_init_nofail(&nd_table[i], pci_bus, "ne2k_pci", NULL); + } + + /* MacIO IDE */ + ide_drive_get(hd, ARRAY_SIZE(hd)); + macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio), + "ide[0]")); + macio_ide_init_drives(macio_ide, hd); + + macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio), + "ide[1]")); + macio_ide_init_drives(macio_ide, &hd[MAX_IDE_DEVS]); + + /* MacIO CUDA/ADB */ + dev = DEVICE(object_resolve_path_component(OBJECT(macio), "cuda")); + adb_bus = qdev_get_child_bus(dev, "adb.0"); + dev = qdev_new(TYPE_ADB_KEYBOARD); + qdev_realize_and_unref(dev, adb_bus, &error_fatal); + dev = qdev_new(TYPE_ADB_MOUSE); + qdev_realize_and_unref(dev, adb_bus, &error_fatal); + + if (machine_usb(machine)) { + pci_create_simple(pci_bus, -1, "pci-ohci"); + } + + if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8) + graphic_depth = 15; + + /* No PCI init: the BIOS will do it */ + + dev = qdev_new(TYPE_FW_CFG_MEM); + fw_cfg = FW_CFG(dev); + qdev_prop_set_uint32(dev, "data_width", 1); + qdev_prop_set_bit(dev, "dma_enabled", false); + object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG, + OBJECT(fw_cfg)); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, CFG_ADDR); + sysbus_mmio_map(s, 1, CFG_ADDR + 2); + + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)machine->smp.max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, ARCH_HEATHROW); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); + if (machine->kernel_cmdline) { + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, cmdline_base); + pstrcpy_targphys("cmdline", cmdline_base, TARGET_PAGE_SIZE, + machine->kernel_cmdline); + } else { + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0); + } + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_base); + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ppc_boot_device); + + fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_WIDTH, graphic_width); + fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_HEIGHT, graphic_height); + fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_DEPTH, graphic_depth); + + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled()); + if (kvm_enabled()) { + uint8_t *hypercall; + + hypercall = g_malloc(16); + kvmppc_get_hypercall(env, hypercall, 16); + fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16); + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid()); + } + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq); + /* Mac OS X requires a "known good" clock-frequency value; pass it one. */ + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_CLOCKFREQ, CLOCKFREQ); + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_BUSFREQ, BUSFREQ); + + /* MacOS NDRV VGA driver */ + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, NDRV_VGA_FILENAME); + if (filename) { + gchar *ndrv_file; + gsize ndrv_size; + + if (g_file_get_contents(filename, &ndrv_file, &ndrv_size, NULL)) { + fw_cfg_add_file(fw_cfg, "ndrv/qemu_vga.ndrv", ndrv_file, ndrv_size); + } + g_free(filename); + } + + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); +} + +/* + * Implementation of an interface to adjust firmware path + * for the bootindex property handling. + */ +static char *heathrow_fw_dev_path(FWPathProvider *p, BusState *bus, + DeviceState *dev) +{ + PCIDevice *pci; + MACIOIDEState *macio_ide; + + if (!strcmp(object_get_typename(OBJECT(dev)), "macio-oldworld")) { + pci = PCI_DEVICE(dev); + return g_strdup_printf("mac-io@%x", PCI_SLOT(pci->devfn)); + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "macio-ide")) { + macio_ide = MACIO_IDE(dev); + return g_strdup_printf("ata-3@%x", macio_ide->addr); + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "ide-hd")) { + return g_strdup("disk"); + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "ide-cd")) { + return g_strdup("cdrom"); + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "virtio-blk-device")) { + return g_strdup("disk"); + } + + return NULL; +} + +static int heathrow_kvm_type(MachineState *machine, const char *arg) +{ + /* Always force PR KVM */ + return 2; +} + +static void heathrow_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); + + mc->desc = "Heathrow based PowerMAC"; + mc->init = ppc_heathrow_init; + mc->block_default_type = IF_IDE; + mc->max_cpus = MAX_CPUS; +#ifndef TARGET_PPC64 + mc->is_default = true; +#endif + /* TOFIX "cad" when Mac floppy is implemented */ + mc->default_boot_order = "cd"; + mc->kvm_type = heathrow_kvm_type; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("750_v3.1"); + mc->default_display = "std"; + mc->ignore_boot_device_suffixes = true; + mc->default_ram_id = "ppc_heathrow.ram"; + fwc->get_dev_path = heathrow_fw_dev_path; +} + +static const TypeInfo ppc_heathrow_machine_info = { + .name = MACHINE_TYPE_NAME("g3beige"), + .parent = TYPE_MACHINE, + .class_init = heathrow_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_FW_PATH_PROVIDER }, + { } + }, +}; + +static void ppc_heathrow_register_types(void) +{ + type_register_static(&ppc_heathrow_machine_info); +} + +type_init(ppc_heathrow_register_types); diff --git a/hw/ppc/meson.build b/hw/ppc/meson.build new file mode 100644 index 000000000..aa4c8e6a2 --- /dev/null +++ b/hw/ppc/meson.build @@ -0,0 +1,90 @@ +ppc_ss = ss.source_set() +ppc_ss.add(files( + 'ppc.c', + 'ppc_booke.c', +)) +ppc_ss.add(when: 'CONFIG_FDT_PPC', if_true: [files( + 'fdt.c', +), fdt]) +ppc_ss.add(when: 'CONFIG_FW_CFG_PPC', if_true: files('fw_cfg.c')) + +# IBM pSeries (sPAPR) +ppc_ss.add(when: 'CONFIG_PSERIES', if_true: files( + 'spapr.c', + 'spapr_caps.c', + 'spapr_vio.c', + 'spapr_events.c', + 'spapr_hcall.c', + 'spapr_iommu.c', + 'spapr_rtas.c', + 'spapr_pci.c', + 'spapr_rtc.c', + 'spapr_drc.c', + 'spapr_cpu_core.c', + 'spapr_ovec.c', + 'spapr_irq.c', + 'spapr_tpm_proxy.c', + 'spapr_nvdimm.c', + 'spapr_rtas_ddw.c', + 'spapr_numa.c', + 'pef.c', +)) +ppc_ss.add(when: ['CONFIG_PSERIES', 'CONFIG_TCG'], if_true: files( + 'spapr_softmmu.c', +)) +ppc_ss.add(when: 'CONFIG_SPAPR_RNG', if_true: files('spapr_rng.c')) +ppc_ss.add(when: ['CONFIG_PSERIES', 'CONFIG_LINUX'], if_true: files( + 'spapr_pci_vfio.c', + 'spapr_pci_nvlink2.c' +)) + +# IBM PowerNV +ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files( + 'pnv.c', + 'pnv_xscom.c', + 'pnv_core.c', + 'pnv_lpc.c', + 'pnv_psi.c', + 'pnv_occ.c', + 'pnv_bmc.c', + 'pnv_homer.c', + 'pnv_pnor.c', +)) +# PowerPC 4xx boards +ppc_ss.add(when: 'CONFIG_PPC405', if_true: files( + 'ppc405_boards.c', + 'ppc405_uc.c')) +ppc_ss.add(when: 'CONFIG_PPC440', if_true: files( + 'ppc440_bamboo.c', + 'ppc440_pcix.c', 'ppc440_uc.c')) +ppc_ss.add(when: 'CONFIG_PPC4XX', if_true: files( + 'ppc4xx_pci.c', + 'ppc4xx_devs.c')) +ppc_ss.add(when: 'CONFIG_SAM460EX', if_true: files('sam460ex.c')) +# PReP +ppc_ss.add(when: 'CONFIG_PREP', if_true: files('prep.c')) +ppc_ss.add(when: 'CONFIG_PREP', if_true: files('prep_systemio.c')) +ppc_ss.add(when: 'CONFIG_RS6000_MC', if_true: files('rs6000_mc.c')) +# OldWorld PowerMac +ppc_ss.add(when: 'CONFIG_MAC_OLDWORLD', if_true: files('mac_oldworld.c')) +# NewWorld PowerMac +ppc_ss.add(when: 'CONFIG_MAC_NEWWORLD', if_true: files('mac_newworld.c')) +# e500 +ppc_ss.add(when: 'CONFIG_E500', if_true: files( + 'e500.c', + 'mpc8544ds.c', + 'e500plat.c' +)) +ppc_ss.add(when: 'CONFIG_E500', if_true: files( + 'mpc8544_guts.c', + 'ppce500_spin.c' +)) +# PowerPC 440 Xilinx ML507 reference board. +ppc_ss.add(when: 'CONFIG_VIRTEX', if_true: files('virtex_ml507.c')) +# Pegasos2 +ppc_ss.add(when: 'CONFIG_PEGASOS2', if_true: files('pegasos2.c')) + +ppc_ss.add(when: 'CONFIG_VOF', if_true: files('vof.c')) +ppc_ss.add(when: ['CONFIG_VOF', 'CONFIG_PSERIES'], if_true: files('spapr_vof.c')) + +hw_arch += {'ppc': ppc_ss} diff --git a/hw/ppc/mpc8544_guts.c b/hw/ppc/mpc8544_guts.c new file mode 100644 index 000000000..e8d2d51c2 --- /dev/null +++ b/hw/ppc/mpc8544_guts.c @@ -0,0 +1,142 @@ +/* + * QEMU PowerPC MPC8544 global util pseudo-device + * + * Copyright (C) 2011 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Alexander Graf, + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * ***************************************************************** + * + * The documentation for this device is noted in the MPC8544 documentation, + * file name "MPC8544ERM.pdf". You can easily find it on the web. + * + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "qom/object.h" + +#define MPC8544_GUTS_MMIO_SIZE 0x1000 +#define MPC8544_GUTS_RSTCR_RESET 0x02 + +#define MPC8544_GUTS_ADDR_PORPLLSR 0x00 +#define MPC8544_GUTS_ADDR_PORBMSR 0x04 +#define MPC8544_GUTS_ADDR_PORIMPSCR 0x08 +#define MPC8544_GUTS_ADDR_PORDEVSR 0x0C +#define MPC8544_GUTS_ADDR_PORDBGMSR 0x10 +#define MPC8544_GUTS_ADDR_PORDEVSR2 0x14 +#define MPC8544_GUTS_ADDR_GPPORCR 0x20 +#define MPC8544_GUTS_ADDR_GPIOCR 0x30 +#define MPC8544_GUTS_ADDR_GPOUTDR 0x40 +#define MPC8544_GUTS_ADDR_GPINDR 0x50 +#define MPC8544_GUTS_ADDR_PMUXCR 0x60 +#define MPC8544_GUTS_ADDR_DEVDISR 0x70 +#define MPC8544_GUTS_ADDR_POWMGTCSR 0x80 +#define MPC8544_GUTS_ADDR_MCPSUMR 0x90 +#define MPC8544_GUTS_ADDR_RSTRSCR 0x94 +#define MPC8544_GUTS_ADDR_PVR 0xA0 +#define MPC8544_GUTS_ADDR_SVR 0xA4 +#define MPC8544_GUTS_ADDR_RSTCR 0xB0 +#define MPC8544_GUTS_ADDR_IOVSELSR 0xC0 +#define MPC8544_GUTS_ADDR_DDRCSR 0xB20 +#define MPC8544_GUTS_ADDR_DDRCDR 0xB24 +#define MPC8544_GUTS_ADDR_DDRCLKDR 0xB28 +#define MPC8544_GUTS_ADDR_CLKOCR 0xE00 +#define MPC8544_GUTS_ADDR_SRDS1CR1 0xF04 +#define MPC8544_GUTS_ADDR_SRDS2CR1 0xF10 +#define MPC8544_GUTS_ADDR_SRDS2CR3 0xF18 + +#define TYPE_MPC8544_GUTS "mpc8544-guts" +OBJECT_DECLARE_SIMPLE_TYPE(GutsState, MPC8544_GUTS) + +struct GutsState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; +}; + + +static uint64_t mpc8544_guts_read(void *opaque, hwaddr addr, + unsigned size) +{ + uint32_t value = 0; + PowerPCCPU *cpu = POWERPC_CPU(current_cpu); + CPUPPCState *env = &cpu->env; + + addr &= MPC8544_GUTS_MMIO_SIZE - 1; + switch (addr) { + case MPC8544_GUTS_ADDR_PVR: + value = env->spr[SPR_PVR]; + break; + case MPC8544_GUTS_ADDR_SVR: + value = env->spr[SPR_E500_SVR]; + break; + default: + fprintf(stderr, "guts: Unknown register read: %x\n", (int)addr); + break; + } + + return value; +} + +static void mpc8544_guts_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + addr &= MPC8544_GUTS_MMIO_SIZE - 1; + + switch (addr) { + case MPC8544_GUTS_ADDR_RSTCR: + if (value & MPC8544_GUTS_RSTCR_RESET) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + default: + fprintf(stderr, "guts: Unknown register write: %x = %x\n", + (int)addr, (unsigned)value); + break; + } +} + +static const MemoryRegionOps mpc8544_guts_ops = { + .read = mpc8544_guts_read, + .write = mpc8544_guts_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void mpc8544_guts_initfn(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + GutsState *s = MPC8544_GUTS(obj); + + memory_region_init_io(&s->iomem, OBJECT(s), &mpc8544_guts_ops, s, + "mpc8544.guts", MPC8544_GUTS_MMIO_SIZE); + sysbus_init_mmio(d, &s->iomem); +} + +static const TypeInfo mpc8544_guts_info = { + .name = TYPE_MPC8544_GUTS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GutsState), + .instance_init = mpc8544_guts_initfn, +}; + +static void mpc8544_guts_register_types(void) +{ + type_register_static(&mpc8544_guts_info); +} + +type_init(mpc8544_guts_register_types) diff --git a/hw/ppc/mpc8544ds.c b/hw/ppc/mpc8544ds.c new file mode 100644 index 000000000..81177505f --- /dev/null +++ b/hw/ppc/mpc8544ds.c @@ -0,0 +1,74 @@ +/* + * Support for the PPC e500-based mpc8544ds board + * + * Copyright 2012 Freescale Semiconductor, Inc. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "e500.h" +#include "sysemu/device_tree.h" +#include "hw/ppc/openpic.h" +#include "qemu/error-report.h" +#include "cpu.h" + +static void mpc8544ds_fixup_devtree(void *fdt) +{ + const char model[] = "MPC8544DS"; + const char compatible[] = "MPC8544DS\0MPC85xxDS"; + + qemu_fdt_setprop(fdt, "/", "model", model, sizeof(model)); + qemu_fdt_setprop(fdt, "/", "compatible", compatible, + sizeof(compatible)); +} + +static void mpc8544ds_init(MachineState *machine) +{ + if (machine->ram_size > 0xc0000000) { + error_report("The MPC8544DS board only supports up to 3GB of RAM"); + exit(1); + } + + ppce500_init(machine); +} + +static void e500plat_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + PPCE500MachineClass *pmc = PPCE500_MACHINE_CLASS(oc); + + pmc->pci_first_slot = 0x11; + pmc->pci_nr_slots = 2; + pmc->fixup_devtree = mpc8544ds_fixup_devtree; + pmc->mpic_version = OPENPIC_MODEL_FSL_MPIC_20; + pmc->ccsrbar_base = 0xE0000000ULL; + pmc->pci_mmio_base = 0xC0000000ULL; + pmc->pci_mmio_bus_base = 0xC0000000ULL; + pmc->pci_pio_base = 0xE1000000ULL; + pmc->spin_base = 0xEF000000ULL; + + mc->desc = "mpc8544ds"; + mc->init = mpc8544ds_init; + mc->max_cpus = 15; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("e500v2_v30"); + mc->default_ram_id = "mpc8544ds.ram"; +} + +#define TYPE_MPC8544DS_MACHINE MACHINE_TYPE_NAME("mpc8544ds") + +static const TypeInfo mpc8544ds_info = { + .name = TYPE_MPC8544DS_MACHINE, + .parent = TYPE_PPCE500_MACHINE, + .class_init = e500plat_machine_class_init, +}; + +static void mpc8544ds_register_types(void) +{ + type_register_static(&mpc8544ds_info); +} + +type_init(mpc8544ds_register_types) diff --git a/hw/ppc/pef.c b/hw/ppc/pef.c new file mode 100644 index 000000000..cc44d5e33 --- /dev/null +++ b/hw/ppc/pef.c @@ -0,0 +1,142 @@ +/* + * PEF (Protected Execution Facility) for POWER support + * + * Copyright Red Hat. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "qapi/error.h" +#include "qom/object_interfaces.h" +#include "sysemu/kvm.h" +#include "migration/blocker.h" +#include "exec/confidential-guest-support.h" +#include "hw/ppc/pef.h" + +#define TYPE_PEF_GUEST "pef-guest" +OBJECT_DECLARE_SIMPLE_TYPE(PefGuest, PEF_GUEST) + +typedef struct PefGuest PefGuest; +typedef struct PefGuestClass PefGuestClass; + +struct PefGuestClass { + ConfidentialGuestSupportClass parent_class; +}; + +/** + * PefGuest: + * + * The PefGuest object is used for creating and managing a PEF + * guest. + * + * # $QEMU \ + * -object pef-guest,id=pef0 \ + * -machine ...,confidential-guest-support=pef0 + */ +struct PefGuest { + ConfidentialGuestSupport parent_obj; +}; + +static int kvmppc_svm_init(ConfidentialGuestSupport *cgs, Error **errp) +{ +#ifdef CONFIG_KVM + static Error *pef_mig_blocker; + + if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_SECURE_GUEST)) { + error_setg(errp, + "KVM implementation does not support Secure VMs (is an ultravisor running?)"); + return -1; + } else { + int ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SECURE_GUEST, 0, 1); + + if (ret < 0) { + error_setg(errp, + "Error enabling PEF with KVM"); + return -1; + } + } + + /* add migration blocker */ + error_setg(&pef_mig_blocker, "PEF: Migration is not implemented"); + /* NB: This can fail if --only-migratable is used */ + migrate_add_blocker(pef_mig_blocker, &error_fatal); + + cgs->ready = true; + + return 0; +#else + g_assert_not_reached(); +#endif +} + +/* + * Don't set error if KVM_PPC_SVM_OFF ioctl is invoked on kernels + * that don't support this ioctl. + */ +static int kvmppc_svm_off(Error **errp) +{ +#ifdef CONFIG_KVM + int rc; + + rc = kvm_vm_ioctl(KVM_STATE(current_accel()), KVM_PPC_SVM_OFF); + if (rc && rc != -ENOTTY) { + error_setg_errno(errp, -rc, "KVM_PPC_SVM_OFF ioctl failed"); + return rc; + } + return 0; +#else + g_assert_not_reached(); +#endif +} + +int pef_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) +{ + if (!object_dynamic_cast(OBJECT(cgs), TYPE_PEF_GUEST)) { + return 0; + } + + if (!kvm_enabled()) { + error_setg(errp, "PEF requires KVM"); + return -1; + } + + return kvmppc_svm_init(cgs, errp); +} + +int pef_kvm_reset(ConfidentialGuestSupport *cgs, Error **errp) +{ + if (!object_dynamic_cast(OBJECT(cgs), TYPE_PEF_GUEST)) { + return 0; + } + + /* + * If we don't have KVM we should never have been able to + * initialize PEF, so we should never get this far + */ + assert(kvm_enabled()); + + return kvmppc_svm_off(errp); +} + +OBJECT_DEFINE_TYPE_WITH_INTERFACES(PefGuest, + pef_guest, + PEF_GUEST, + CONFIDENTIAL_GUEST_SUPPORT, + { TYPE_USER_CREATABLE }, + { NULL }) + +static void pef_guest_class_init(ObjectClass *oc, void *data) +{ +} + +static void pef_guest_init(Object *obj) +{ +} + +static void pef_guest_finalize(Object *obj) +{ +} diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c new file mode 100644 index 000000000..298e6b93e --- /dev/null +++ b/hw/ppc/pegasos2.c @@ -0,0 +1,952 @@ +/* + * QEMU PowerPC CHRP (Genesi/bPlan Pegasos II) hardware System Emulator + * + * Copyright (c) 2018-2021 BALATON Zoltan + * + * This work is licensed under the GNU GPL license version 2 or later. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/hw.h" +#include "hw/ppc/ppc.h" +#include "hw/sysbus.h" +#include "hw/pci/pci_host.h" +#include "hw/irq.h" +#include "hw/pci-host/mv64361.h" +#include "hw/isa/vt82c686.h" +#include "hw/ide/pci.h" +#include "hw/i2c/smbus_eeprom.h" +#include "hw/qdev-properties.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "sysemu/qtest.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/fw-path-provider.h" +#include "elf.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "sysemu/kvm.h" +#include "kvm_ppc.h" +#include "exec/address-spaces.h" +#include "qom/qom-qobject.h" +#include "qapi/qmp/qdict.h" +#include "trace.h" +#include "qemu/datadir.h" +#include "sysemu/device_tree.h" +#include "hw/ppc/vof.h" + +#include + +#define PROM_FILENAME "vof.bin" +#define PROM_ADDR 0xfff00000 +#define PROM_SIZE 0x80000 + +#define KVMPPC_HCALL_BASE 0xf000 +#define KVMPPC_H_RTAS (KVMPPC_HCALL_BASE + 0x0) +#define KVMPPC_H_VOF_CLIENT (KVMPPC_HCALL_BASE + 0x5) + +#define H_SUCCESS 0 +#define H_PRIVILEGE -3 /* Caller not privileged */ +#define H_PARAMETER -4 /* Parameter invalid, out-of-range or conflicting */ + +#define BUS_FREQ_HZ 133333333 + +#define PCI0_CFG_ADDR 0xcf8 +#define PCI0_MEM_BASE 0xc0000000 +#define PCI0_MEM_SIZE 0x20000000 +#define PCI0_IO_BASE 0xf8000000 +#define PCI0_IO_SIZE 0x10000 + +#define PCI1_CFG_ADDR 0xc78 +#define PCI1_MEM_BASE 0x80000000 +#define PCI1_MEM_SIZE 0x40000000 +#define PCI1_IO_BASE 0xfe000000 +#define PCI1_IO_SIZE 0x10000 + +#define TYPE_PEGASOS2_MACHINE MACHINE_TYPE_NAME("pegasos2") +OBJECT_DECLARE_TYPE(Pegasos2MachineState, MachineClass, PEGASOS2_MACHINE) + +struct Pegasos2MachineState { + MachineState parent_obj; + PowerPCCPU *cpu; + DeviceState *mv; + Vof *vof; + void *fdt_blob; + uint64_t kernel_addr; + uint64_t kernel_entry; + uint64_t kernel_size; +}; + +static void *build_fdt(MachineState *machine, int *fdt_size); + +static void pegasos2_cpu_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + Pegasos2MachineState *pm = PEGASOS2_MACHINE(current_machine); + + cpu_reset(CPU(cpu)); + cpu->env.spr[SPR_HID1] = 7ULL << 28; + if (pm->vof) { + cpu->env.gpr[1] = 2 * VOF_STACK_SIZE - 0x20; + cpu->env.nip = 0x100; + } +} + +static void pegasos2_init(MachineState *machine) +{ + Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine); + CPUPPCState *env; + MemoryRegion *rom = g_new(MemoryRegion, 1); + PCIBus *pci_bus; + PCIDevice *dev; + I2CBus *i2c_bus; + const char *fwname = machine->firmware ?: PROM_FILENAME; + char *filename; + int sz; + uint8_t *spd_data; + + /* init CPU */ + pm->cpu = POWERPC_CPU(cpu_create(machine->cpu_type)); + env = &pm->cpu->env; + if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) { + error_report("Incompatible CPU, only 6xx bus supported"); + exit(1); + } + + /* Set time-base frequency */ + cpu_ppc_tb_init(env, BUS_FREQ_HZ / 4); + qemu_register_reset(pegasos2_cpu_reset, pm->cpu); + + /* RAM */ + if (machine->ram_size > 2 * GiB) { + error_report("RAM size more than 2 GiB is not supported"); + exit(1); + } + memory_region_add_subregion(get_system_memory(), 0, machine->ram); + + /* allocate and load firmware */ + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, fwname); + if (!filename) { + error_report("Could not find firmware '%s'", fwname); + exit(1); + } + if (!machine->firmware && !pm->vof) { + pm->vof = g_malloc0(sizeof(*pm->vof)); + } + memory_region_init_rom(rom, NULL, "pegasos2.rom", PROM_SIZE, &error_fatal); + memory_region_add_subregion(get_system_memory(), PROM_ADDR, rom); + sz = load_elf(filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1, + PPC_ELF_MACHINE, 0, 0); + if (sz <= 0) { + sz = load_image_targphys(filename, pm->vof ? 0 : PROM_ADDR, PROM_SIZE); + } + if (sz <= 0 || sz > PROM_SIZE) { + error_report("Could not load firmware '%s'", filename); + exit(1); + } + g_free(filename); + if (pm->vof) { + pm->vof->fw_size = sz; + } + + /* Marvell Discovery II system controller */ + pm->mv = DEVICE(sysbus_create_simple(TYPE_MV64361, -1, + ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT])); + pci_bus = mv64361_get_pci_bus(pm->mv, 1); + + /* VIA VT8231 South Bridge (multifunction PCI device) */ + /* VT8231 function 0: PCI-to-ISA Bridge */ + dev = pci_create_simple_multifunction(pci_bus, PCI_DEVFN(12, 0), true, + TYPE_VT8231_ISA); + qdev_connect_gpio_out(DEVICE(dev), 0, + qdev_get_gpio_in_named(pm->mv, "gpp", 31)); + + /* VT8231 function 1: IDE Controller */ + dev = pci_create_simple(pci_bus, PCI_DEVFN(12, 1), "via-ide"); + pci_ide_create_devs(dev); + + /* VT8231 function 2-3: USB Ports */ + pci_create_simple(pci_bus, PCI_DEVFN(12, 2), "vt82c686b-usb-uhci"); + pci_create_simple(pci_bus, PCI_DEVFN(12, 3), "vt82c686b-usb-uhci"); + + /* VT8231 function 4: Power Management Controller */ + dev = pci_create_simple(pci_bus, PCI_DEVFN(12, 4), TYPE_VT8231_PM); + i2c_bus = I2C_BUS(qdev_get_child_bus(DEVICE(dev), "i2c")); + spd_data = spd_data_generate(DDR, machine->ram_size); + smbus_eeprom_init_one(i2c_bus, 0x57, spd_data); + + /* VT8231 function 5-6: AC97 Audio & Modem */ + pci_create_simple(pci_bus, PCI_DEVFN(12, 5), TYPE_VIA_AC97); + pci_create_simple(pci_bus, PCI_DEVFN(12, 6), TYPE_VIA_MC97); + + /* other PC hardware */ + pci_vga_init(pci_bus); + + if (machine->kernel_filename) { + sz = load_elf(machine->kernel_filename, NULL, NULL, NULL, + &pm->kernel_entry, &pm->kernel_addr, NULL, NULL, 1, + PPC_ELF_MACHINE, 0, 0); + if (sz <= 0) { + error_report("Could not load kernel '%s'", + machine->kernel_filename); + exit(1); + } + pm->kernel_size = sz; + if (!pm->vof) { + warn_report("Option -kernel may be ineffective with -bios."); + } + } else if (pm->vof && !qtest_enabled()) { + warn_report("Using Virtual OpenFirmware but no -kernel option."); + } + + if (!pm->vof && machine->kernel_cmdline && machine->kernel_cmdline[0]) { + warn_report("Option -append may be ineffective with -bios."); + } +} + +static uint32_t pegasos2_mv_reg_read(Pegasos2MachineState *pm, + uint32_t addr, uint32_t len) +{ + MemoryRegion *r = sysbus_mmio_get_region(SYS_BUS_DEVICE(pm->mv), 0); + uint64_t val = 0xffffffffULL; + memory_region_dispatch_read(r, addr, &val, size_memop(len) | MO_LE, + MEMTXATTRS_UNSPECIFIED); + return val; +} + +static void pegasos2_mv_reg_write(Pegasos2MachineState *pm, uint32_t addr, + uint32_t len, uint32_t val) +{ + MemoryRegion *r = sysbus_mmio_get_region(SYS_BUS_DEVICE(pm->mv), 0); + memory_region_dispatch_write(r, addr, val, size_memop(len) | MO_LE, + MEMTXATTRS_UNSPECIFIED); +} + +static uint32_t pegasos2_pci_config_read(Pegasos2MachineState *pm, int bus, + uint32_t addr, uint32_t len) +{ + hwaddr pcicfg = bus ? PCI1_CFG_ADDR : PCI0_CFG_ADDR; + uint64_t val = 0xffffffffULL; + + if (len <= 4) { + pegasos2_mv_reg_write(pm, pcicfg, 4, addr | BIT(31)); + val = pegasos2_mv_reg_read(pm, pcicfg + 4, len); + } + return val; +} + +static void pegasos2_pci_config_write(Pegasos2MachineState *pm, int bus, + uint32_t addr, uint32_t len, uint32_t val) +{ + hwaddr pcicfg = bus ? PCI1_CFG_ADDR : PCI0_CFG_ADDR; + + pegasos2_mv_reg_write(pm, pcicfg, 4, addr | BIT(31)); + pegasos2_mv_reg_write(pm, pcicfg + 4, len, val); +} + +static void pegasos2_machine_reset(MachineState *machine) +{ + Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine); + void *fdt; + uint64_t d[2]; + int sz; + + qemu_devices_reset(); + if (!pm->vof) { + return; /* Firmware should set up machine so nothing to do */ + } + + /* Otherwise, set up devices that board firmware would normally do */ + pegasos2_mv_reg_write(pm, 0, 4, 0x28020ff); + pegasos2_mv_reg_write(pm, 0x278, 4, 0xa31fc); + pegasos2_mv_reg_write(pm, 0xf300, 4, 0x11ff0400); + pegasos2_mv_reg_write(pm, 0xf10c, 4, 0x80000000); + pegasos2_mv_reg_write(pm, 0x1c, 4, 0x8000000); + pegasos2_pci_config_write(pm, 0, PCI_COMMAND, 2, PCI_COMMAND_IO | + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pegasos2_pci_config_write(pm, 1, PCI_COMMAND, 2, PCI_COMMAND_IO | + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | + PCI_INTERRUPT_LINE, 2, 0x9); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | + 0x50, 1, 0x2); + + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | + PCI_INTERRUPT_LINE, 2, 0x109); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | + PCI_CLASS_PROG, 1, 0xf); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | + 0x40, 1, 0xb); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | + 0x50, 4, 0x17171717); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | + PCI_COMMAND, 2, 0x87); + + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 2) << 8) | + PCI_INTERRUPT_LINE, 2, 0x409); + + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 3) << 8) | + PCI_INTERRUPT_LINE, 2, 0x409); + + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | + PCI_INTERRUPT_LINE, 2, 0x9); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | + 0x48, 4, 0xf00); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | + 0x40, 4, 0x558020); + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | + 0x90, 4, 0xd00); + + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 5) << 8) | + PCI_INTERRUPT_LINE, 2, 0x309); + + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 6) << 8) | + PCI_INTERRUPT_LINE, 2, 0x309); + + /* Device tree and VOF set up */ + vof_init(pm->vof, machine->ram_size, &error_fatal); + if (vof_claim(pm->vof, 0, VOF_STACK_SIZE, VOF_STACK_SIZE) == -1) { + error_report("Memory allocation for stack failed"); + exit(1); + } + if (pm->kernel_size && + vof_claim(pm->vof, pm->kernel_addr, pm->kernel_size, 0) == -1) { + error_report("Memory for kernel is in use"); + exit(1); + } + fdt = build_fdt(machine, &sz); + /* FIXME: VOF assumes entry is same as load address */ + d[0] = cpu_to_be64(pm->kernel_entry); + d[1] = cpu_to_be64(pm->kernel_size - (pm->kernel_entry - pm->kernel_addr)); + qemu_fdt_setprop(fdt, "/chosen", "qemu,boot-kernel", d, sizeof(d)); + + qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); + g_free(pm->fdt_blob); + pm->fdt_blob = fdt; + + vof_build_dt(fdt, pm->vof); + vof_client_open_store(fdt, pm->vof, "/chosen", "stdout", "/failsafe"); + pm->cpu->vhyp = PPC_VIRTUAL_HYPERVISOR(machine); +} + +enum pegasos2_rtas_tokens { + RTAS_RESTART_RTAS = 0, + RTAS_NVRAM_FETCH = 1, + RTAS_NVRAM_STORE = 2, + RTAS_GET_TIME_OF_DAY = 3, + RTAS_SET_TIME_OF_DAY = 4, + RTAS_EVENT_SCAN = 6, + RTAS_CHECK_EXCEPTION = 7, + RTAS_READ_PCI_CONFIG = 8, + RTAS_WRITE_PCI_CONFIG = 9, + RTAS_DISPLAY_CHARACTER = 10, + RTAS_SET_INDICATOR = 11, + RTAS_POWER_OFF = 17, + RTAS_SUSPEND = 18, + RTAS_HIBERNATE = 19, + RTAS_SYSTEM_REBOOT = 20, +}; + +static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm, + target_ulong args_real) +{ + AddressSpace *as = CPU(cpu)->as; + uint32_t token = ldl_be_phys(as, args_real); + uint32_t nargs = ldl_be_phys(as, args_real + 4); + uint32_t nrets = ldl_be_phys(as, args_real + 8); + uint32_t args = args_real + 12; + uint32_t rets = args_real + 12 + nargs * 4; + + if (nrets < 1) { + qemu_log_mask(LOG_GUEST_ERROR, "Too few return values in RTAS call\n"); + return H_PARAMETER; + } + switch (token) { + case RTAS_GET_TIME_OF_DAY: + { + QObject *qo = object_property_get_qobject(qdev_get_machine(), + "rtc-time", &error_fatal); + QDict *qd = qobject_to(QDict, qo); + + if (nargs != 0 || nrets != 8 || !qd) { + stl_be_phys(as, rets, -1); + qobject_unref(qo); + return H_PARAMETER; + } + + stl_be_phys(as, rets, 0); + stl_be_phys(as, rets + 4, qdict_get_int(qd, "tm_year") + 1900); + stl_be_phys(as, rets + 8, qdict_get_int(qd, "tm_mon") + 1); + stl_be_phys(as, rets + 12, qdict_get_int(qd, "tm_mday")); + stl_be_phys(as, rets + 16, qdict_get_int(qd, "tm_hour")); + stl_be_phys(as, rets + 20, qdict_get_int(qd, "tm_min")); + stl_be_phys(as, rets + 24, qdict_get_int(qd, "tm_sec")); + stl_be_phys(as, rets + 28, 0); + qobject_unref(qo); + return H_SUCCESS; + } + case RTAS_READ_PCI_CONFIG: + { + uint32_t addr, len, val; + + if (nargs != 2 || nrets != 2) { + stl_be_phys(as, rets, -1); + return H_PARAMETER; + } + addr = ldl_be_phys(as, args); + len = ldl_be_phys(as, args + 4); + val = pegasos2_pci_config_read(pm, !(addr >> 24), + addr & 0x0fffffff, len); + stl_be_phys(as, rets, 0); + stl_be_phys(as, rets + 4, val); + return H_SUCCESS; + } + case RTAS_WRITE_PCI_CONFIG: + { + uint32_t addr, len, val; + + if (nargs != 3 || nrets != 1) { + stl_be_phys(as, rets, -1); + return H_PARAMETER; + } + addr = ldl_be_phys(as, args); + len = ldl_be_phys(as, args + 4); + val = ldl_be_phys(as, args + 8); + pegasos2_pci_config_write(pm, !(addr >> 24), + addr & 0x0fffffff, len, val); + stl_be_phys(as, rets, 0); + return H_SUCCESS; + } + case RTAS_DISPLAY_CHARACTER: + if (nargs != 1 || nrets != 1) { + stl_be_phys(as, rets, -1); + return H_PARAMETER; + } + qemu_log_mask(LOG_UNIMP, "%c", ldl_be_phys(as, args)); + stl_be_phys(as, rets, 0); + return H_SUCCESS; + case RTAS_POWER_OFF: + { + if (nargs != 2 || nrets != 1) { + stl_be_phys(as, rets, -1); + return H_PARAMETER; + } + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + stl_be_phys(as, rets, 0); + return H_SUCCESS; + } + default: + qemu_log_mask(LOG_UNIMP, "Unknown RTAS token %u (args=%u, rets=%u)\n", + token, nargs, nrets); + stl_be_phys(as, rets, 0); + return H_SUCCESS; + } +} + +static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) +{ + Pegasos2MachineState *pm = PEGASOS2_MACHINE(vhyp); + CPUPPCState *env = &cpu->env; + + /* The TCG path should also be holding the BQL at this point */ + g_assert(qemu_mutex_iothread_locked()); + + if (msr_pr) { + qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n"); + env->gpr[3] = H_PRIVILEGE; + } else if (env->gpr[3] == KVMPPC_H_RTAS) { + env->gpr[3] = pegasos2_rtas(cpu, pm, env->gpr[4]); + } else if (env->gpr[3] == KVMPPC_H_VOF_CLIENT) { + int ret = vof_client_call(MACHINE(pm), pm->vof, pm->fdt_blob, + env->gpr[4]); + env->gpr[3] = (ret ? H_PARAMETER : H_SUCCESS); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "Unsupported hypercall " TARGET_FMT_lx + "\n", env->gpr[3]); + env->gpr[3] = -1; + } +} + +static void vhyp_nop(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) +{ +} + +static target_ulong vhyp_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp) +{ + return POWERPC_CPU(current_cpu)->env.spr[SPR_SDR1]; +} + +static bool pegasos2_setprop(MachineState *ms, const char *path, + const char *propname, void *val, int vallen) +{ + return true; +} + +static void pegasos2_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc); + VofMachineIfClass *vmc = VOF_MACHINE_CLASS(oc); + + mc->desc = "Genesi/bPlan Pegasos II"; + mc->init = pegasos2_init; + mc->reset = pegasos2_machine_reset; + mc->block_default_type = IF_IDE; + mc->default_boot_order = "cd"; + mc->default_display = "std"; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("7400_v2.9"); + mc->default_ram_id = "pegasos2.ram"; + mc->default_ram_size = 512 * MiB; + + vhc->hypercall = pegasos2_hypercall; + vhc->cpu_exec_enter = vhyp_nop; + vhc->cpu_exec_exit = vhyp_nop; + vhc->encode_hpt_for_kvm_pr = vhyp_encode_hpt_for_kvm_pr; + + vmc->setprop = pegasos2_setprop; +} + +static const TypeInfo pegasos2_machine_info = { + .name = TYPE_PEGASOS2_MACHINE, + .parent = TYPE_MACHINE, + .class_init = pegasos2_machine_class_init, + .instance_size = sizeof(Pegasos2MachineState), + .interfaces = (InterfaceInfo[]) { + { TYPE_PPC_VIRTUAL_HYPERVISOR }, + { TYPE_VOF_MACHINE_IF }, + { } + }, +}; + +static void pegasos2_machine_register_types(void) +{ + type_register_static(&pegasos2_machine_info); +} + +type_init(pegasos2_machine_register_types) + +/* FDT creation for passing to firmware */ + +typedef struct { + void *fdt; + const char *path; +} FDTInfo; + +/* We do everything in reverse order so it comes out right in the tree */ + +static void dt_ide(PCIBus *bus, PCIDevice *d, FDTInfo *fi) +{ + qemu_fdt_setprop_string(fi->fdt, fi->path, "device_type", "spi"); +} + +static void dt_usb(PCIBus *bus, PCIDevice *d, FDTInfo *fi) +{ + qemu_fdt_setprop_cell(fi->fdt, fi->path, "#size-cells", 0); + qemu_fdt_setprop_cell(fi->fdt, fi->path, "#address-cells", 1); + qemu_fdt_setprop_string(fi->fdt, fi->path, "device_type", "usb"); +} + +static void dt_isa(PCIBus *bus, PCIDevice *d, FDTInfo *fi) +{ + GString *name = g_string_sized_new(64); + uint32_t cells[3]; + + qemu_fdt_setprop_cell(fi->fdt, fi->path, "#size-cells", 1); + qemu_fdt_setprop_cell(fi->fdt, fi->path, "#address-cells", 2); + qemu_fdt_setprop_string(fi->fdt, fi->path, "device_type", "isa"); + qemu_fdt_setprop_string(fi->fdt, fi->path, "name", "isa"); + + /* addional devices */ + g_string_printf(name, "%s/lpt@i3bc", fi->path); + qemu_fdt_add_subnode(fi->fdt, name->str); + qemu_fdt_setprop_cell(fi->fdt, name->str, "clock-frequency", 0); + cells[0] = cpu_to_be32(7); + cells[1] = 0; + qemu_fdt_setprop(fi->fdt, name->str, "interrupts", + cells, 2 * sizeof(cells[0])); + cells[0] = cpu_to_be32(1); + cells[1] = cpu_to_be32(0x3bc); + cells[2] = cpu_to_be32(8); + qemu_fdt_setprop(fi->fdt, name->str, "reg", cells, 3 * sizeof(cells[0])); + qemu_fdt_setprop_string(fi->fdt, name->str, "device_type", "lpt"); + qemu_fdt_setprop_string(fi->fdt, name->str, "name", "lpt"); + + g_string_printf(name, "%s/fdc@i3f0", fi->path); + qemu_fdt_add_subnode(fi->fdt, name->str); + qemu_fdt_setprop_cell(fi->fdt, name->str, "clock-frequency", 0); + cells[0] = cpu_to_be32(6); + cells[1] = 0; + qemu_fdt_setprop(fi->fdt, name->str, "interrupts", + cells, 2 * sizeof(cells[0])); + cells[0] = cpu_to_be32(1); + cells[1] = cpu_to_be32(0x3f0); + cells[2] = cpu_to_be32(8); + qemu_fdt_setprop(fi->fdt, name->str, "reg", cells, 3 * sizeof(cells[0])); + qemu_fdt_setprop_string(fi->fdt, name->str, "device_type", "fdc"); + qemu_fdt_setprop_string(fi->fdt, name->str, "name", "fdc"); + + g_string_printf(name, "%s/timer@i40", fi->path); + qemu_fdt_add_subnode(fi->fdt, name->str); + qemu_fdt_setprop_cell(fi->fdt, name->str, "clock-frequency", 0); + cells[0] = cpu_to_be32(1); + cells[1] = cpu_to_be32(0x40); + cells[2] = cpu_to_be32(8); + qemu_fdt_setprop(fi->fdt, name->str, "reg", cells, 3 * sizeof(cells[0])); + qemu_fdt_setprop_string(fi->fdt, name->str, "device_type", "timer"); + qemu_fdt_setprop_string(fi->fdt, name->str, "name", "timer"); + + g_string_printf(name, "%s/rtc@i70", fi->path); + qemu_fdt_add_subnode(fi->fdt, name->str); + qemu_fdt_setprop_string(fi->fdt, name->str, "compatible", "ds1385-rtc"); + qemu_fdt_setprop_cell(fi->fdt, name->str, "clock-frequency", 0); + cells[0] = cpu_to_be32(8); + cells[1] = 0; + qemu_fdt_setprop(fi->fdt, name->str, "interrupts", + cells, 2 * sizeof(cells[0])); + cells[0] = cpu_to_be32(1); + cells[1] = cpu_to_be32(0x70); + cells[2] = cpu_to_be32(2); + qemu_fdt_setprop(fi->fdt, name->str, "reg", cells, 3 * sizeof(cells[0])); + qemu_fdt_setprop_string(fi->fdt, name->str, "device_type", "rtc"); + qemu_fdt_setprop_string(fi->fdt, name->str, "name", "rtc"); + + g_string_printf(name, "%s/keyboard@i60", fi->path); + qemu_fdt_add_subnode(fi->fdt, name->str); + cells[0] = cpu_to_be32(1); + cells[1] = 0; + qemu_fdt_setprop(fi->fdt, name->str, "interrupts", + cells, 2 * sizeof(cells[0])); + cells[0] = cpu_to_be32(1); + cells[1] = cpu_to_be32(0x60); + cells[2] = cpu_to_be32(5); + qemu_fdt_setprop(fi->fdt, name->str, "reg", cells, 3 * sizeof(cells[0])); + qemu_fdt_setprop_string(fi->fdt, name->str, "device_type", "keyboard"); + qemu_fdt_setprop_string(fi->fdt, name->str, "name", "keyboard"); + + g_string_printf(name, "%s/8042@i60", fi->path); + qemu_fdt_add_subnode(fi->fdt, name->str); + qemu_fdt_setprop_cell(fi->fdt, name->str, "#interrupt-cells", 2); + qemu_fdt_setprop_cell(fi->fdt, name->str, "#size-cells", 0); + qemu_fdt_setprop_cell(fi->fdt, name->str, "#address-cells", 1); + qemu_fdt_setprop_string(fi->fdt, name->str, "interrupt-controller", ""); + qemu_fdt_setprop_cell(fi->fdt, name->str, "clock-frequency", 0); + cells[0] = cpu_to_be32(1); + cells[1] = cpu_to_be32(0x60); + cells[2] = cpu_to_be32(5); + qemu_fdt_setprop(fi->fdt, name->str, "reg", cells, 3 * sizeof(cells[0])); + qemu_fdt_setprop_string(fi->fdt, name->str, "device_type", ""); + qemu_fdt_setprop_string(fi->fdt, name->str, "name", "8042"); + + g_string_printf(name, "%s/serial@i2f8", fi->path); + qemu_fdt_add_subnode(fi->fdt, name->str); + qemu_fdt_setprop_cell(fi->fdt, name->str, "clock-frequency", 0); + cells[0] = cpu_to_be32(3); + cells[1] = 0; + qemu_fdt_setprop(fi->fdt, name->str, "interrupts", + cells, 2 * sizeof(cells[0])); + cells[0] = cpu_to_be32(1); + cells[1] = cpu_to_be32(0x2f8); + cells[2] = cpu_to_be32(8); + qemu_fdt_setprop(fi->fdt, name->str, "reg", cells, 3 * sizeof(cells[0])); + qemu_fdt_setprop_string(fi->fdt, name->str, "device_type", "serial"); + qemu_fdt_setprop_string(fi->fdt, name->str, "name", "serial"); + + g_string_free(name, TRUE); +} + +static struct { + const char *id; + const char *name; + void (*dtf)(PCIBus *bus, PCIDevice *d, FDTInfo *fi); +} device_map[] = { + { "pci11ab,6460", "host", NULL }, + { "pci1106,8231", "isa", dt_isa }, + { "pci1106,571", "ide", dt_ide }, + { "pci1106,3044", "firewire", NULL }, + { "pci1106,3038", "usb", dt_usb }, + { "pci1106,8235", "other", NULL }, + { "pci1106,3058", "sound", NULL }, + { NULL, NULL } +}; + +static void add_pci_device(PCIBus *bus, PCIDevice *d, void *opaque) +{ + FDTInfo *fi = opaque; + GString *node = g_string_new(NULL); + uint32_t cells[(PCI_NUM_REGIONS + 1) * 5]; + int i, j; + const char *name = NULL; + g_autofree const gchar *pn = g_strdup_printf("pci%x,%x", + pci_get_word(&d->config[PCI_VENDOR_ID]), + pci_get_word(&d->config[PCI_DEVICE_ID])); + + for (i = 0; device_map[i].id; i++) { + if (!strcmp(pn, device_map[i].id)) { + name = device_map[i].name; + break; + } + } + g_string_printf(node, "%s/%s@%x", fi->path, (name ?: pn), + PCI_SLOT(d->devfn)); + if (PCI_FUNC(d->devfn)) { + g_string_append_printf(node, ",%x", PCI_FUNC(d->devfn)); + } + + qemu_fdt_add_subnode(fi->fdt, node->str); + if (device_map[i].dtf) { + FDTInfo cfi = { fi->fdt, node->str }; + device_map[i].dtf(bus, d, &cfi); + } + cells[0] = cpu_to_be32(d->devfn << 8); + cells[1] = 0; + cells[2] = 0; + cells[3] = 0; + cells[4] = 0; + j = 5; + for (i = 0; i < PCI_NUM_REGIONS; i++) { + if (!d->io_regions[i].size) { + continue; + } + cells[j] = cpu_to_be32(d->devfn << 8 | (PCI_BASE_ADDRESS_0 + i * 4)); + if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) { + cells[j] |= cpu_to_be32(1 << 24); + } else { + cells[j] |= cpu_to_be32(2 << 24); + if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_PREFETCH) { + cells[j] |= cpu_to_be32(4 << 28); + } + } + cells[j + 1] = 0; + cells[j + 2] = 0; + cells[j + 3] = cpu_to_be32(d->io_regions[i].size >> 32); + cells[j + 4] = cpu_to_be32(d->io_regions[i].size); + j += 5; + } + qemu_fdt_setprop(fi->fdt, node->str, "reg", cells, j * sizeof(cells[0])); + qemu_fdt_setprop_string(fi->fdt, node->str, "name", name ?: pn); + if (pci_get_byte(&d->config[PCI_INTERRUPT_PIN])) { + qemu_fdt_setprop_cell(fi->fdt, node->str, "interrupts", + pci_get_byte(&d->config[PCI_INTERRUPT_PIN])); + } + /* Pegasos2 firmware has subsystem-id amd subsystem-vendor-id swapped */ + qemu_fdt_setprop_cell(fi->fdt, node->str, "subsystem-vendor-id", + pci_get_word(&d->config[PCI_SUBSYSTEM_ID])); + qemu_fdt_setprop_cell(fi->fdt, node->str, "subsystem-id", + pci_get_word(&d->config[PCI_SUBSYSTEM_VENDOR_ID])); + cells[0] = pci_get_long(&d->config[PCI_CLASS_REVISION]); + qemu_fdt_setprop_cell(fi->fdt, node->str, "class-code", cells[0] >> 8); + qemu_fdt_setprop_cell(fi->fdt, node->str, "revision-id", cells[0] & 0xff); + qemu_fdt_setprop_cell(fi->fdt, node->str, "device-id", + pci_get_word(&d->config[PCI_DEVICE_ID])); + qemu_fdt_setprop_cell(fi->fdt, node->str, "vendor-id", + pci_get_word(&d->config[PCI_VENDOR_ID])); + + g_string_free(node, TRUE); +} + +static void *build_fdt(MachineState *machine, int *fdt_size) +{ + Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine); + PowerPCCPU *cpu = pm->cpu; + PCIBus *pci_bus; + FDTInfo fi; + uint32_t cells[16]; + void *fdt = create_device_tree(fdt_size); + + fi.fdt = fdt; + + /* root node */ + qemu_fdt_setprop_string(fdt, "/", "CODEGEN,description", + "Pegasos CHRP PowerPC System"); + qemu_fdt_setprop_string(fdt, "/", "CODEGEN,board", "Pegasos2"); + qemu_fdt_setprop_string(fdt, "/", "CODEGEN,vendor", "bplan GmbH"); + qemu_fdt_setprop_string(fdt, "/", "revision", "2B"); + qemu_fdt_setprop_string(fdt, "/", "model", "Pegasos2"); + qemu_fdt_setprop_string(fdt, "/", "device_type", "chrp"); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 1); + qemu_fdt_setprop_string(fdt, "/", "name", "bplan,Pegasos2"); + + /* pci@c0000000 */ + qemu_fdt_add_subnode(fdt, "/pci@c0000000"); + cells[0] = 0; + cells[1] = 0; + qemu_fdt_setprop(fdt, "/pci@c0000000", "bus-range", + cells, 2 * sizeof(cells[0])); + qemu_fdt_setprop_cell(fdt, "/pci@c0000000", "pci-bridge-number", 1); + cells[0] = cpu_to_be32(PCI0_MEM_BASE); + cells[1] = cpu_to_be32(PCI0_MEM_SIZE); + qemu_fdt_setprop(fdt, "/pci@c0000000", "reg", cells, 2 * sizeof(cells[0])); + cells[0] = cpu_to_be32(0x01000000); + cells[1] = 0; + cells[2] = 0; + cells[3] = cpu_to_be32(PCI0_IO_BASE); + cells[4] = 0; + cells[5] = cpu_to_be32(PCI0_IO_SIZE); + cells[6] = cpu_to_be32(0x02000000); + cells[7] = 0; + cells[8] = cpu_to_be32(PCI0_MEM_BASE); + cells[9] = cpu_to_be32(PCI0_MEM_BASE); + cells[10] = 0; + cells[11] = cpu_to_be32(PCI0_MEM_SIZE); + qemu_fdt_setprop(fdt, "/pci@c0000000", "ranges", + cells, 12 * sizeof(cells[0])); + qemu_fdt_setprop_cell(fdt, "/pci@c0000000", "#size-cells", 2); + qemu_fdt_setprop_cell(fdt, "/pci@c0000000", "#address-cells", 3); + qemu_fdt_setprop_string(fdt, "/pci@c0000000", "device_type", "pci"); + qemu_fdt_setprop_string(fdt, "/pci@c0000000", "name", "pci"); + + fi.path = "/pci@c0000000"; + pci_bus = mv64361_get_pci_bus(pm->mv, 0); + pci_for_each_device_reverse(pci_bus, 0, add_pci_device, &fi); + + /* pci@80000000 */ + qemu_fdt_add_subnode(fdt, "/pci@80000000"); + cells[0] = 0; + cells[1] = 0; + qemu_fdt_setprop(fdt, "/pci@80000000", "bus-range", + cells, 2 * sizeof(cells[0])); + qemu_fdt_setprop_cell(fdt, "/pci@80000000", "pci-bridge-number", 0); + cells[0] = cpu_to_be32(PCI1_MEM_BASE); + cells[1] = cpu_to_be32(PCI1_MEM_SIZE); + qemu_fdt_setprop(fdt, "/pci@80000000", "reg", cells, 2 * sizeof(cells[0])); + qemu_fdt_setprop_cell(fdt, "/pci@80000000", "8259-interrupt-acknowledge", + 0xf1000cb4); + cells[0] = cpu_to_be32(0x01000000); + cells[1] = 0; + cells[2] = 0; + cells[3] = cpu_to_be32(PCI1_IO_BASE); + cells[4] = 0; + cells[5] = cpu_to_be32(PCI1_IO_SIZE); + cells[6] = cpu_to_be32(0x02000000); + cells[7] = 0; + cells[8] = cpu_to_be32(PCI1_MEM_BASE); + cells[9] = cpu_to_be32(PCI1_MEM_BASE); + cells[10] = 0; + cells[11] = cpu_to_be32(PCI1_MEM_SIZE); + qemu_fdt_setprop(fdt, "/pci@80000000", "ranges", + cells, 12 * sizeof(cells[0])); + qemu_fdt_setprop_cell(fdt, "/pci@80000000", "#size-cells", 2); + qemu_fdt_setprop_cell(fdt, "/pci@80000000", "#address-cells", 3); + qemu_fdt_setprop_string(fdt, "/pci@80000000", "device_type", "pci"); + qemu_fdt_setprop_string(fdt, "/pci@80000000", "name", "pci"); + + fi.path = "/pci@80000000"; + pci_bus = mv64361_get_pci_bus(pm->mv, 1); + pci_for_each_device_reverse(pci_bus, 0, add_pci_device, &fi); + + qemu_fdt_add_subnode(fdt, "/failsafe"); + qemu_fdt_setprop_string(fdt, "/failsafe", "device_type", "serial"); + qemu_fdt_setprop_string(fdt, "/failsafe", "name", "failsafe"); + + qemu_fdt_add_subnode(fdt, "/rtas"); + qemu_fdt_setprop_cell(fdt, "/rtas", "system-reboot", RTAS_SYSTEM_REBOOT); + qemu_fdt_setprop_cell(fdt, "/rtas", "hibernate", RTAS_HIBERNATE); + qemu_fdt_setprop_cell(fdt, "/rtas", "suspend", RTAS_SUSPEND); + qemu_fdt_setprop_cell(fdt, "/rtas", "power-off", RTAS_POWER_OFF); + qemu_fdt_setprop_cell(fdt, "/rtas", "set-indicator", RTAS_SET_INDICATOR); + qemu_fdt_setprop_cell(fdt, "/rtas", "display-character", + RTAS_DISPLAY_CHARACTER); + qemu_fdt_setprop_cell(fdt, "/rtas", "write-pci-config", + RTAS_WRITE_PCI_CONFIG); + qemu_fdt_setprop_cell(fdt, "/rtas", "read-pci-config", + RTAS_READ_PCI_CONFIG); + /* Pegasos2 firmware misspells check-exception and guests use that */ + qemu_fdt_setprop_cell(fdt, "/rtas", "check-execption", + RTAS_CHECK_EXCEPTION); + qemu_fdt_setprop_cell(fdt, "/rtas", "event-scan", RTAS_EVENT_SCAN); + qemu_fdt_setprop_cell(fdt, "/rtas", "set-time-of-day", + RTAS_SET_TIME_OF_DAY); + qemu_fdt_setprop_cell(fdt, "/rtas", "get-time-of-day", + RTAS_GET_TIME_OF_DAY); + qemu_fdt_setprop_cell(fdt, "/rtas", "nvram-store", RTAS_NVRAM_STORE); + qemu_fdt_setprop_cell(fdt, "/rtas", "nvram-fetch", RTAS_NVRAM_FETCH); + qemu_fdt_setprop_cell(fdt, "/rtas", "restart-rtas", RTAS_RESTART_RTAS); + qemu_fdt_setprop_cell(fdt, "/rtas", "rtas-error-log-max", 0); + qemu_fdt_setprop_cell(fdt, "/rtas", "rtas-event-scan-rate", 0); + qemu_fdt_setprop_cell(fdt, "/rtas", "rtas-display-device", 0); + qemu_fdt_setprop_cell(fdt, "/rtas", "rtas-size", 20); + qemu_fdt_setprop_cell(fdt, "/rtas", "rtas-version", 1); + + /* cpus */ + qemu_fdt_add_subnode(fdt, "/cpus"); + qemu_fdt_setprop_cell(fdt, "/cpus", "#cpus", 1); + qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 1); + qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0); + qemu_fdt_setprop_string(fdt, "/cpus", "name", "cpus"); + + /* FIXME Get CPU name from CPU object */ + const char *cp = "/cpus/PowerPC,G4"; + qemu_fdt_add_subnode(fdt, cp); + qemu_fdt_setprop_cell(fdt, cp, "l2cr", 0); + qemu_fdt_setprop_cell(fdt, cp, "d-cache-size", 0x8000); + qemu_fdt_setprop_cell(fdt, cp, "d-cache-block-size", + cpu->env.dcache_line_size); + qemu_fdt_setprop_cell(fdt, cp, "d-cache-line-size", + cpu->env.dcache_line_size); + qemu_fdt_setprop_cell(fdt, cp, "i-cache-size", 0x8000); + qemu_fdt_setprop_cell(fdt, cp, "i-cache-block-size", + cpu->env.icache_line_size); + qemu_fdt_setprop_cell(fdt, cp, "i-cache-line-size", + cpu->env.icache_line_size); + if (cpu->env.id_tlbs) { + qemu_fdt_setprop_cell(fdt, cp, "i-tlb-sets", cpu->env.nb_ways); + qemu_fdt_setprop_cell(fdt, cp, "i-tlb-size", cpu->env.tlb_per_way); + qemu_fdt_setprop_cell(fdt, cp, "d-tlb-sets", cpu->env.nb_ways); + qemu_fdt_setprop_cell(fdt, cp, "d-tlb-size", cpu->env.tlb_per_way); + qemu_fdt_setprop_string(fdt, cp, "tlb-split", ""); + } + qemu_fdt_setprop_cell(fdt, cp, "tlb-sets", cpu->env.nb_ways); + qemu_fdt_setprop_cell(fdt, cp, "tlb-size", cpu->env.nb_tlb); + qemu_fdt_setprop_string(fdt, cp, "state", "running"); + if (cpu->env.insns_flags & PPC_ALTIVEC) { + qemu_fdt_setprop_string(fdt, cp, "altivec", ""); + qemu_fdt_setprop_string(fdt, cp, "data-streams", ""); + } + /* + * FIXME What flags do data-streams, external-control and + * performance-monitor depend on? + */ + qemu_fdt_setprop_string(fdt, cp, "external-control", ""); + if (cpu->env.insns_flags & PPC_FLOAT_FSQRT) { + qemu_fdt_setprop_string(fdt, cp, "general-purpose", ""); + } + qemu_fdt_setprop_string(fdt, cp, "performance-monitor", ""); + if (cpu->env.insns_flags & PPC_FLOAT_FRES) { + qemu_fdt_setprop_string(fdt, cp, "graphics", ""); + } + qemu_fdt_setprop_cell(fdt, cp, "reservation-granule-size", 4); + qemu_fdt_setprop_cell(fdt, cp, "timebase-frequency", + cpu->env.tb_env->tb_freq); + qemu_fdt_setprop_cell(fdt, cp, "bus-frequency", BUS_FREQ_HZ); + qemu_fdt_setprop_cell(fdt, cp, "clock-frequency", BUS_FREQ_HZ * 7.5); + qemu_fdt_setprop_cell(fdt, cp, "cpu-version", cpu->env.spr[SPR_PVR]); + cells[0] = 0; + cells[1] = 0; + qemu_fdt_setprop(fdt, cp, "reg", cells, 2 * sizeof(cells[0])); + qemu_fdt_setprop_string(fdt, cp, "device_type", "cpu"); + qemu_fdt_setprop_string(fdt, cp, "name", strrchr(cp, '/') + 1); + + /* memory */ + qemu_fdt_add_subnode(fdt, "/memory@0"); + cells[0] = 0; + cells[1] = cpu_to_be32(machine->ram_size); + qemu_fdt_setprop(fdt, "/memory@0", "reg", cells, 2 * sizeof(cells[0])); + qemu_fdt_setprop_string(fdt, "/memory@0", "device_type", "memory"); + qemu_fdt_setprop_string(fdt, "/memory@0", "name", "memory"); + + qemu_fdt_add_subnode(fdt, "/chosen"); + qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", + machine->kernel_cmdline ?: ""); + qemu_fdt_setprop_string(fdt, "/chosen", "name", "chosen"); + + qemu_fdt_add_subnode(fdt, "/openprom"); + qemu_fdt_setprop_string(fdt, "/openprom", "model", "Pegasos2,1.1"); + + return fdt; +} diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c new file mode 100644 index 000000000..71e45515f --- /dev/null +++ b/hw/ppc/pnv.c @@ -0,0 +1,2132 @@ +/* + * QEMU PowerPC PowerNV machine model + * + * Copyright (c) 2016, IBM Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qapi/error.h" +#include "sysemu/qtest.h" +#include "sysemu/sysemu.h" +#include "sysemu/numa.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "sysemu/cpus.h" +#include "sysemu/device_tree.h" +#include "sysemu/hw_accel.h" +#include "target/ppc/cpu.h" +#include "hw/ppc/fdt.h" +#include "hw/ppc/ppc.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_core.h" +#include "hw/loader.h" +#include "hw/nmi.h" +#include "qapi/visitor.h" +#include "monitor/monitor.h" +#include "hw/intc/intc.h" +#include "hw/ipmi/ipmi.h" +#include "target/ppc/mmu-hash64.h" +#include "hw/pci/msi.h" + +#include "hw/ppc/xics.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/ppc/pnv_pnor.h" + +#include "hw/isa/isa.h" +#include "hw/char/serial.h" +#include "hw/rtc/mc146818rtc.h" + +#include + +#define FDT_MAX_SIZE (1 * MiB) + +#define FW_FILE_NAME "skiboot.lid" +#define FW_LOAD_ADDR 0x0 +#define FW_MAX_SIZE (16 * MiB) + +#define KERNEL_LOAD_ADDR 0x20000000 +#define KERNEL_MAX_SIZE (128 * MiB) +#define INITRD_LOAD_ADDR 0x28000000 +#define INITRD_MAX_SIZE (128 * MiB) + +static const char *pnv_chip_core_typename(const PnvChip *o) +{ + const char *chip_type = object_class_get_name(object_get_class(OBJECT(o))); + int len = strlen(chip_type) - strlen(PNV_CHIP_TYPE_SUFFIX); + char *s = g_strdup_printf(PNV_CORE_TYPE_NAME("%.*s"), len, chip_type); + const char *core_type = object_class_get_name(object_class_by_name(s)); + g_free(s); + return core_type; +} + +/* + * On Power Systems E880 (POWER8), the max cpus (threads) should be : + * 4 * 4 sockets * 12 cores * 8 threads = 1536 + * Let's make it 2^11 + */ +#define MAX_CPUS 2048 + +/* + * Memory nodes are created by hostboot, one for each range of memory + * that has a different "affinity". In practice, it means one range + * per chip. + */ +static void pnv_dt_memory(void *fdt, int chip_id, hwaddr start, hwaddr size) +{ + char *mem_name; + uint64_t mem_reg_property[2]; + int off; + + mem_reg_property[0] = cpu_to_be64(start); + mem_reg_property[1] = cpu_to_be64(size); + + mem_name = g_strdup_printf("memory@%"HWADDR_PRIx, start); + off = fdt_add_subnode(fdt, 0, mem_name); + g_free(mem_name); + + _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); + _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, + sizeof(mem_reg_property)))); + _FDT((fdt_setprop_cell(fdt, off, "ibm,chip-id", chip_id))); +} + +static int get_cpus_node(void *fdt) +{ + int cpus_offset = fdt_path_offset(fdt, "/cpus"); + + if (cpus_offset < 0) { + cpus_offset = fdt_add_subnode(fdt, 0, "cpus"); + if (cpus_offset) { + _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1))); + _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0))); + } + } + _FDT(cpus_offset); + return cpus_offset; +} + +/* + * The PowerNV cores (and threads) need to use real HW ids and not an + * incremental index like it has been done on other platforms. This HW + * id is stored in the CPU PIR, it is used to create cpu nodes in the + * device tree, used in XSCOM to address cores and in interrupt + * servers. + */ +static void pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt) +{ + PowerPCCPU *cpu = pc->threads[0]; + CPUState *cs = CPU(cpu); + DeviceClass *dc = DEVICE_GET_CLASS(cs); + int smt_threads = CPU_CORE(pc)->nr_threads; + CPUPPCState *env = &cpu->env; + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); + uint32_t servers_prop[smt_threads]; + int i; + uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), + 0xffffffff, 0xffffffff}; + uint32_t tbfreq = PNV_TIMEBASE_FREQ; + uint32_t cpufreq = 1000000000; + uint32_t page_sizes_prop[64]; + size_t page_sizes_prop_size; + const uint8_t pa_features[] = { 24, 0, + 0xf6, 0x3f, 0xc7, 0xc0, 0x80, 0xf0, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00 }; + int offset; + char *nodename; + int cpus_offset = get_cpus_node(fdt); + + nodename = g_strdup_printf("%s@%x", dc->fw_name, pc->pir); + offset = fdt_add_subnode(fdt, cpus_offset, nodename); + _FDT(offset); + g_free(nodename); + + _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", chip->chip_id))); + + _FDT((fdt_setprop_cell(fdt, offset, "reg", pc->pir))); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,pir", pc->pir))); + _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu"))); + + _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR]))); + _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size", + env->dcache_line_size))); + _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size", + env->dcache_line_size))); + _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size", + env->icache_line_size))); + _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size", + env->icache_line_size))); + + if (pcc->l1_dcache_size) { + _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size", + pcc->l1_dcache_size))); + } else { + warn_report("Unknown L1 dcache size for cpu"); + } + if (pcc->l1_icache_size) { + _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size", + pcc->l1_icache_size))); + } else { + warn_report("Unknown L1 icache size for cpu"); + } + + _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq))); + _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq))); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", + cpu->hash64_opts->slb_size))); + _FDT((fdt_setprop_string(fdt, offset, "status", "okay"))); + _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0))); + + if (ppc_has_spr(cpu, SPR_PURR)) { + _FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0))); + } + + if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) { + _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes", + segs, sizeof(segs)))); + } + + /* + * Advertise VMX/VSX (vector extensions) if available + * 0 / no property == no vector extensions + * 1 == VMX / Altivec available + * 2 == VSX available + */ + if (env->insns_flags & PPC_ALTIVEC) { + uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1; + + _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", vmx))); + } + + /* + * Advertise DFP (Decimal Floating Point) if available + * 0 / no property == no DFP + * 1 == DFP available + */ + if (env->insns_flags2 & PPC2_DFP) { + _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1))); + } + + page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop, + sizeof(page_sizes_prop)); + if (page_sizes_prop_size) { + _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes", + page_sizes_prop, page_sizes_prop_size))); + } + + _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", + pa_features, sizeof(pa_features)))); + + /* Build interrupt servers properties */ + for (i = 0; i < smt_threads; i++) { + servers_prop[i] = cpu_to_be32(pc->pir + i); + } + _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", + servers_prop, sizeof(servers_prop)))); +} + +static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t pir, + uint32_t nr_threads) +{ + uint64_t addr = PNV_ICP_BASE(chip) | (pir << 12); + char *name; + const char compat[] = "IBM,power8-icp\0IBM,ppc-xicp"; + uint32_t irange[2], i, rsize; + uint64_t *reg; + int offset; + + irange[0] = cpu_to_be32(pir); + irange[1] = cpu_to_be32(nr_threads); + + rsize = sizeof(uint64_t) * 2 * nr_threads; + reg = g_malloc(rsize); + for (i = 0; i < nr_threads; i++) { + reg[i * 2] = cpu_to_be64(addr | ((pir + i) * 0x1000)); + reg[i * 2 + 1] = cpu_to_be64(0x1000); + } + + name = g_strdup_printf("interrupt-controller@%"PRIX64, addr); + offset = fdt_add_subnode(fdt, 0, name); + _FDT(offset); + g_free(name); + + _FDT((fdt_setprop(fdt, offset, "compatible", compat, sizeof(compat)))); + _FDT((fdt_setprop(fdt, offset, "reg", reg, rsize))); + _FDT((fdt_setprop_string(fdt, offset, "device_type", + "PowerPC-External-Interrupt-Presentation"))); + _FDT((fdt_setprop(fdt, offset, "interrupt-controller", NULL, 0))); + _FDT((fdt_setprop(fdt, offset, "ibm,interrupt-server-ranges", + irange, sizeof(irange)))); + _FDT((fdt_setprop_cell(fdt, offset, "#interrupt-cells", 1))); + _FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 0))); + g_free(reg); +} + +static void pnv_chip_power8_dt_populate(PnvChip *chip, void *fdt) +{ + static const char compat[] = "ibm,power8-xscom\0ibm,xscom"; + int i; + + pnv_dt_xscom(chip, fdt, 0, + cpu_to_be64(PNV_XSCOM_BASE(chip)), + cpu_to_be64(PNV_XSCOM_SIZE), + compat, sizeof(compat)); + + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pnv_core = chip->cores[i]; + + pnv_dt_core(chip, pnv_core, fdt); + + /* Interrupt Control Presenters (ICP). One per core. */ + pnv_dt_icp(chip, fdt, pnv_core->pir, CPU_CORE(pnv_core)->nr_threads); + } + + if (chip->ram_size) { + pnv_dt_memory(fdt, chip->chip_id, chip->ram_start, chip->ram_size); + } +} + +static void pnv_chip_power9_dt_populate(PnvChip *chip, void *fdt) +{ + static const char compat[] = "ibm,power9-xscom\0ibm,xscom"; + int i; + + pnv_dt_xscom(chip, fdt, 0, + cpu_to_be64(PNV9_XSCOM_BASE(chip)), + cpu_to_be64(PNV9_XSCOM_SIZE), + compat, sizeof(compat)); + + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pnv_core = chip->cores[i]; + + pnv_dt_core(chip, pnv_core, fdt); + } + + if (chip->ram_size) { + pnv_dt_memory(fdt, chip->chip_id, chip->ram_start, chip->ram_size); + } + + pnv_dt_lpc(chip, fdt, 0, PNV9_LPCM_BASE(chip), PNV9_LPCM_SIZE); +} + +static void pnv_chip_power10_dt_populate(PnvChip *chip, void *fdt) +{ + static const char compat[] = "ibm,power10-xscom\0ibm,xscom"; + int i; + + pnv_dt_xscom(chip, fdt, 0, + cpu_to_be64(PNV10_XSCOM_BASE(chip)), + cpu_to_be64(PNV10_XSCOM_SIZE), + compat, sizeof(compat)); + + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pnv_core = chip->cores[i]; + + pnv_dt_core(chip, pnv_core, fdt); + } + + if (chip->ram_size) { + pnv_dt_memory(fdt, chip->chip_id, chip->ram_start, chip->ram_size); + } + + pnv_dt_lpc(chip, fdt, 0, PNV10_LPCM_BASE(chip), PNV10_LPCM_SIZE); +} + +static void pnv_dt_rtc(ISADevice *d, void *fdt, int lpc_off) +{ + uint32_t io_base = d->ioport_id; + uint32_t io_regs[] = { + cpu_to_be32(1), + cpu_to_be32(io_base), + cpu_to_be32(2) + }; + char *name; + int node; + + name = g_strdup_printf("%s@i%x", qdev_fw_name(DEVICE(d)), io_base); + node = fdt_add_subnode(fdt, lpc_off, name); + _FDT(node); + g_free(name); + + _FDT((fdt_setprop(fdt, node, "reg", io_regs, sizeof(io_regs)))); + _FDT((fdt_setprop_string(fdt, node, "compatible", "pnpPNP,b00"))); +} + +static void pnv_dt_serial(ISADevice *d, void *fdt, int lpc_off) +{ + const char compatible[] = "ns16550\0pnpPNP,501"; + uint32_t io_base = d->ioport_id; + uint32_t io_regs[] = { + cpu_to_be32(1), + cpu_to_be32(io_base), + cpu_to_be32(8) + }; + char *name; + int node; + + name = g_strdup_printf("%s@i%x", qdev_fw_name(DEVICE(d)), io_base); + node = fdt_add_subnode(fdt, lpc_off, name); + _FDT(node); + g_free(name); + + _FDT((fdt_setprop(fdt, node, "reg", io_regs, sizeof(io_regs)))); + _FDT((fdt_setprop(fdt, node, "compatible", compatible, + sizeof(compatible)))); + + _FDT((fdt_setprop_cell(fdt, node, "clock-frequency", 1843200))); + _FDT((fdt_setprop_cell(fdt, node, "current-speed", 115200))); + _FDT((fdt_setprop_cell(fdt, node, "interrupts", d->isairq[0]))); + _FDT((fdt_setprop_cell(fdt, node, "interrupt-parent", + fdt_get_phandle(fdt, lpc_off)))); + + /* This is needed by Linux */ + _FDT((fdt_setprop_string(fdt, node, "device_type", "serial"))); +} + +static void pnv_dt_ipmi_bt(ISADevice *d, void *fdt, int lpc_off) +{ + const char compatible[] = "bt\0ipmi-bt"; + uint32_t io_base; + uint32_t io_regs[] = { + cpu_to_be32(1), + 0, /* 'io_base' retrieved from the 'ioport' property of 'isa-ipmi-bt' */ + cpu_to_be32(3) + }; + uint32_t irq; + char *name; + int node; + + io_base = object_property_get_int(OBJECT(d), "ioport", &error_fatal); + io_regs[1] = cpu_to_be32(io_base); + + irq = object_property_get_int(OBJECT(d), "irq", &error_fatal); + + name = g_strdup_printf("%s@i%x", qdev_fw_name(DEVICE(d)), io_base); + node = fdt_add_subnode(fdt, lpc_off, name); + _FDT(node); + g_free(name); + + _FDT((fdt_setprop(fdt, node, "reg", io_regs, sizeof(io_regs)))); + _FDT((fdt_setprop(fdt, node, "compatible", compatible, + sizeof(compatible)))); + + /* Mark it as reserved to avoid Linux trying to claim it */ + _FDT((fdt_setprop_string(fdt, node, "status", "reserved"))); + _FDT((fdt_setprop_cell(fdt, node, "interrupts", irq))); + _FDT((fdt_setprop_cell(fdt, node, "interrupt-parent", + fdt_get_phandle(fdt, lpc_off)))); +} + +typedef struct ForeachPopulateArgs { + void *fdt; + int offset; +} ForeachPopulateArgs; + +static int pnv_dt_isa_device(DeviceState *dev, void *opaque) +{ + ForeachPopulateArgs *args = opaque; + ISADevice *d = ISA_DEVICE(dev); + + if (object_dynamic_cast(OBJECT(dev), TYPE_MC146818_RTC)) { + pnv_dt_rtc(d, args->fdt, args->offset); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_ISA_SERIAL)) { + pnv_dt_serial(d, args->fdt, args->offset); + } else if (object_dynamic_cast(OBJECT(dev), "isa-ipmi-bt")) { + pnv_dt_ipmi_bt(d, args->fdt, args->offset); + } else { + error_report("unknown isa device %s@i%x", qdev_fw_name(dev), + d->ioport_id); + } + + return 0; +} + +/* + * The default LPC bus of a multichip system is on chip 0. It's + * recognized by the firmware (skiboot) using a "primary" property. + */ +static void pnv_dt_isa(PnvMachineState *pnv, void *fdt) +{ + int isa_offset = fdt_path_offset(fdt, pnv->chips[0]->dt_isa_nodename); + ForeachPopulateArgs args = { + .fdt = fdt, + .offset = isa_offset, + }; + uint32_t phandle; + + _FDT((fdt_setprop(fdt, isa_offset, "primary", NULL, 0))); + + phandle = qemu_fdt_alloc_phandle(fdt); + assert(phandle > 0); + _FDT((fdt_setprop_cell(fdt, isa_offset, "phandle", phandle))); + + /* + * ISA devices are not necessarily parented to the ISA bus so we + * can not use object_child_foreach() + */ + qbus_walk_children(BUS(pnv->isa_bus), pnv_dt_isa_device, NULL, NULL, NULL, + &args); +} + +static void pnv_dt_power_mgt(PnvMachineState *pnv, void *fdt) +{ + int off; + + off = fdt_add_subnode(fdt, 0, "ibm,opal"); + off = fdt_add_subnode(fdt, off, "power-mgt"); + + _FDT(fdt_setprop_cell(fdt, off, "ibm,enabled-stop-levels", 0xc0000000)); +} + +static void *pnv_dt_create(MachineState *machine) +{ + PnvMachineClass *pmc = PNV_MACHINE_GET_CLASS(machine); + PnvMachineState *pnv = PNV_MACHINE(machine); + void *fdt; + char *buf; + int off; + int i; + + fdt = g_malloc0(FDT_MAX_SIZE); + _FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE))); + + /* /qemu node */ + _FDT((fdt_add_subnode(fdt, 0, "qemu"))); + + /* Root node */ + _FDT((fdt_setprop_cell(fdt, 0, "#address-cells", 0x2))); + _FDT((fdt_setprop_cell(fdt, 0, "#size-cells", 0x2))); + _FDT((fdt_setprop_string(fdt, 0, "model", + "IBM PowerNV (emulated by qemu)"))); + _FDT((fdt_setprop(fdt, 0, "compatible", pmc->compat, pmc->compat_size))); + + buf = qemu_uuid_unparse_strdup(&qemu_uuid); + _FDT((fdt_setprop_string(fdt, 0, "vm,uuid", buf))); + if (qemu_uuid_set) { + _FDT((fdt_property_string(fdt, "system-id", buf))); + } + g_free(buf); + + off = fdt_add_subnode(fdt, 0, "chosen"); + if (machine->kernel_cmdline) { + _FDT((fdt_setprop_string(fdt, off, "bootargs", + machine->kernel_cmdline))); + } + + if (pnv->initrd_size) { + uint32_t start_prop = cpu_to_be32(pnv->initrd_base); + uint32_t end_prop = cpu_to_be32(pnv->initrd_base + pnv->initrd_size); + + _FDT((fdt_setprop(fdt, off, "linux,initrd-start", + &start_prop, sizeof(start_prop)))); + _FDT((fdt_setprop(fdt, off, "linux,initrd-end", + &end_prop, sizeof(end_prop)))); + } + + /* Populate device tree for each chip */ + for (i = 0; i < pnv->num_chips; i++) { + PNV_CHIP_GET_CLASS(pnv->chips[i])->dt_populate(pnv->chips[i], fdt); + } + + /* Populate ISA devices on chip 0 */ + pnv_dt_isa(pnv, fdt); + + if (pnv->bmc) { + pnv_dt_bmc_sensors(pnv->bmc, fdt); + } + + /* Create an extra node for power management on machines that support it */ + if (pmc->dt_power_mgt) { + pmc->dt_power_mgt(pnv, fdt); + } + + return fdt; +} + +static void pnv_powerdown_notify(Notifier *n, void *opaque) +{ + PnvMachineState *pnv = container_of(n, PnvMachineState, powerdown_notifier); + + if (pnv->bmc) { + pnv_bmc_powerdown(pnv->bmc); + } +} + +static void pnv_reset(MachineState *machine) +{ + PnvMachineState *pnv = PNV_MACHINE(machine); + IPMIBmc *bmc; + void *fdt; + + qemu_devices_reset(); + + /* + * The machine should provide by default an internal BMC simulator. + * If not, try to use the BMC device that was provided on the command + * line. + */ + bmc = pnv_bmc_find(&error_fatal); + if (!pnv->bmc) { + if (!bmc) { + if (!qtest_enabled()) { + warn_report("machine has no BMC device. Use '-device " + "ipmi-bmc-sim,id=bmc0 -device isa-ipmi-bt,bmc=bmc0,irq=10' " + "to define one"); + } + } else { + pnv_bmc_set_pnor(bmc, pnv->pnor); + pnv->bmc = bmc; + } + } + + fdt = pnv_dt_create(machine); + + /* Pack resulting tree */ + _FDT((fdt_pack(fdt))); + + qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); + cpu_physical_memory_write(PNV_FDT_ADDR, fdt, fdt_totalsize(fdt)); + + g_free(fdt); +} + +static ISABus *pnv_chip_power8_isa_create(PnvChip *chip, Error **errp) +{ + Pnv8Chip *chip8 = PNV8_CHIP(chip); + return pnv_lpc_isa_create(&chip8->lpc, true, errp); +} + +static ISABus *pnv_chip_power8nvl_isa_create(PnvChip *chip, Error **errp) +{ + Pnv8Chip *chip8 = PNV8_CHIP(chip); + return pnv_lpc_isa_create(&chip8->lpc, false, errp); +} + +static ISABus *pnv_chip_power9_isa_create(PnvChip *chip, Error **errp) +{ + Pnv9Chip *chip9 = PNV9_CHIP(chip); + return pnv_lpc_isa_create(&chip9->lpc, false, errp); +} + +static ISABus *pnv_chip_power10_isa_create(PnvChip *chip, Error **errp) +{ + Pnv10Chip *chip10 = PNV10_CHIP(chip); + return pnv_lpc_isa_create(&chip10->lpc, false, errp); +} + +static ISABus *pnv_isa_create(PnvChip *chip, Error **errp) +{ + return PNV_CHIP_GET_CLASS(chip)->isa_create(chip, errp); +} + +static void pnv_chip_power8_pic_print_info(PnvChip *chip, Monitor *mon) +{ + Pnv8Chip *chip8 = PNV8_CHIP(chip); + int i; + + ics_pic_print_info(&chip8->psi.ics, mon); + for (i = 0; i < chip->num_phbs; i++) { + pnv_phb3_msi_pic_print_info(&chip8->phbs[i].msis, mon); + ics_pic_print_info(&chip8->phbs[i].lsis, mon); + } +} + +static void pnv_chip_power9_pic_print_info(PnvChip *chip, Monitor *mon) +{ + Pnv9Chip *chip9 = PNV9_CHIP(chip); + int i, j; + + pnv_xive_pic_print_info(&chip9->xive, mon); + pnv_psi_pic_print_info(&chip9->psi, mon); + + for (i = 0; i < PNV9_CHIP_MAX_PEC; i++) { + PnvPhb4PecState *pec = &chip9->pecs[i]; + for (j = 0; j < pec->num_stacks; j++) { + pnv_phb4_pic_print_info(&pec->stacks[j].phb, mon); + } + } +} + +static uint64_t pnv_chip_power8_xscom_core_base(PnvChip *chip, + uint32_t core_id) +{ + return PNV_XSCOM_EX_BASE(core_id); +} + +static uint64_t pnv_chip_power9_xscom_core_base(PnvChip *chip, + uint32_t core_id) +{ + return PNV9_XSCOM_EC_BASE(core_id); +} + +static uint64_t pnv_chip_power10_xscom_core_base(PnvChip *chip, + uint32_t core_id) +{ + return PNV10_XSCOM_EC_BASE(core_id); +} + +static bool pnv_match_cpu(const char *default_type, const char *cpu_type) +{ + PowerPCCPUClass *ppc_default = + POWERPC_CPU_CLASS(object_class_by_name(default_type)); + PowerPCCPUClass *ppc = + POWERPC_CPU_CLASS(object_class_by_name(cpu_type)); + + return ppc_default->pvr_match(ppc_default, ppc->pvr); +} + +static void pnv_ipmi_bt_init(ISABus *bus, IPMIBmc *bmc, uint32_t irq) +{ + ISADevice *dev = isa_new("isa-ipmi-bt"); + + object_property_set_link(OBJECT(dev), "bmc", OBJECT(bmc), &error_fatal); + object_property_set_int(OBJECT(dev), "irq", irq, &error_fatal); + isa_realize_and_unref(dev, bus, &error_fatal); +} + +static void pnv_chip_power10_pic_print_info(PnvChip *chip, Monitor *mon) +{ + Pnv10Chip *chip10 = PNV10_CHIP(chip); + + pnv_psi_pic_print_info(&chip10->psi, mon); +} + +/* Always give the first 1GB to chip 0 else we won't boot */ +static uint64_t pnv_chip_get_ram_size(PnvMachineState *pnv, int chip_id) +{ + MachineState *machine = MACHINE(pnv); + uint64_t ram_per_chip; + + assert(machine->ram_size >= 1 * GiB); + + ram_per_chip = machine->ram_size / pnv->num_chips; + if (ram_per_chip >= 1 * GiB) { + return QEMU_ALIGN_DOWN(ram_per_chip, 1 * MiB); + } + + assert(pnv->num_chips > 1); + + ram_per_chip = (machine->ram_size - 1 * GiB) / (pnv->num_chips - 1); + return chip_id == 0 ? 1 * GiB : QEMU_ALIGN_DOWN(ram_per_chip, 1 * MiB); +} + +static void pnv_init(MachineState *machine) +{ + const char *bios_name = machine->firmware ?: FW_FILE_NAME; + PnvMachineState *pnv = PNV_MACHINE(machine); + MachineClass *mc = MACHINE_GET_CLASS(machine); + char *fw_filename; + long fw_size; + uint64_t chip_ram_start = 0; + int i; + char *chip_typename; + DriveInfo *pnor = drive_get(IF_MTD, 0, 0); + DeviceState *dev; + + /* allocate RAM */ + if (machine->ram_size < mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be bigger than %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + memory_region_add_subregion(get_system_memory(), 0, machine->ram); + + /* + * Create our simple PNOR device + */ + dev = qdev_new(TYPE_PNV_PNOR); + if (pnor) { + qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(pnor)); + } + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + pnv->pnor = PNV_PNOR(dev); + + /* load skiboot firmware */ + fw_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!fw_filename) { + error_report("Could not find OPAL firmware '%s'", bios_name); + exit(1); + } + + fw_size = load_image_targphys(fw_filename, pnv->fw_load_addr, FW_MAX_SIZE); + if (fw_size < 0) { + error_report("Could not load OPAL firmware '%s'", fw_filename); + exit(1); + } + g_free(fw_filename); + + /* load kernel */ + if (machine->kernel_filename) { + long kernel_size; + + kernel_size = load_image_targphys(machine->kernel_filename, + KERNEL_LOAD_ADDR, KERNEL_MAX_SIZE); + if (kernel_size < 0) { + error_report("Could not load kernel '%s'", + machine->kernel_filename); + exit(1); + } + } + + /* load initrd */ + if (machine->initrd_filename) { + pnv->initrd_base = INITRD_LOAD_ADDR; + pnv->initrd_size = load_image_targphys(machine->initrd_filename, + pnv->initrd_base, INITRD_MAX_SIZE); + if (pnv->initrd_size < 0) { + error_report("Could not load initial ram disk '%s'", + machine->initrd_filename); + exit(1); + } + } + + /* MSIs are supported on this platform */ + msi_nonbroken = true; + + /* + * Check compatibility of the specified CPU with the machine + * default. + */ + if (!pnv_match_cpu(mc->default_cpu_type, machine->cpu_type)) { + error_report("invalid CPU model '%s' for %s machine", + machine->cpu_type, mc->name); + exit(1); + } + + /* Create the processor chips */ + i = strlen(machine->cpu_type) - strlen(POWERPC_CPU_TYPE_SUFFIX); + chip_typename = g_strdup_printf(PNV_CHIP_TYPE_NAME("%.*s"), + i, machine->cpu_type); + if (!object_class_by_name(chip_typename)) { + error_report("invalid chip model '%.*s' for %s machine", + i, machine->cpu_type, mc->name); + exit(1); + } + + pnv->num_chips = + machine->smp.max_cpus / (machine->smp.cores * machine->smp.threads); + /* + * TODO: should we decide on how many chips we can create based + * on #cores and Venice vs. Murano vs. Naples chip type etc..., + */ + if (!is_power_of_2(pnv->num_chips) || pnv->num_chips > 16) { + error_report("invalid number of chips: '%d'", pnv->num_chips); + error_printf( + "Try '-smp sockets=N'. Valid values are : 1, 2, 4, 8 and 16.\n"); + exit(1); + } + + pnv->chips = g_new0(PnvChip *, pnv->num_chips); + for (i = 0; i < pnv->num_chips; i++) { + char chip_name[32]; + Object *chip = OBJECT(qdev_new(chip_typename)); + uint64_t chip_ram_size = pnv_chip_get_ram_size(pnv, i); + + pnv->chips[i] = PNV_CHIP(chip); + + /* Distribute RAM among the chips */ + object_property_set_int(chip, "ram-start", chip_ram_start, + &error_fatal); + object_property_set_int(chip, "ram-size", chip_ram_size, + &error_fatal); + chip_ram_start += chip_ram_size; + + snprintf(chip_name, sizeof(chip_name), "chip[%d]", i); + object_property_add_child(OBJECT(pnv), chip_name, chip); + object_property_set_int(chip, "chip-id", i, &error_fatal); + object_property_set_int(chip, "nr-cores", machine->smp.cores, + &error_fatal); + object_property_set_int(chip, "nr-threads", machine->smp.threads, + &error_fatal); + /* + * The POWER8 machine use the XICS interrupt interface. + * Propagate the XICS fabric to the chip and its controllers. + */ + if (object_dynamic_cast(OBJECT(pnv), TYPE_XICS_FABRIC)) { + object_property_set_link(chip, "xics", OBJECT(pnv), &error_abort); + } + if (object_dynamic_cast(OBJECT(pnv), TYPE_XIVE_FABRIC)) { + object_property_set_link(chip, "xive-fabric", OBJECT(pnv), + &error_abort); + } + sysbus_realize_and_unref(SYS_BUS_DEVICE(chip), &error_fatal); + } + g_free(chip_typename); + + /* Instantiate ISA bus on chip 0 */ + pnv->isa_bus = pnv_isa_create(pnv->chips[0], &error_fatal); + + /* Create serial port */ + serial_hds_isa_init(pnv->isa_bus, 0, MAX_ISA_SERIAL_PORTS); + + /* Create an RTC ISA device too */ + mc146818_rtc_init(pnv->isa_bus, 2000, NULL); + + /* + * Create the machine BMC simulator and the IPMI BT device for + * communication with the BMC + */ + if (defaults_enabled()) { + pnv->bmc = pnv_bmc_create(pnv->pnor); + pnv_ipmi_bt_init(pnv->isa_bus, pnv->bmc, 10); + } + + /* + * The PNOR is mapped on the LPC FW address space by the BMC. + * Since we can not reach the remote BMC machine with LPC memops, + * map it always for now. + */ + memory_region_add_subregion(pnv->chips[0]->fw_mr, PNOR_SPI_OFFSET, + &pnv->pnor->mmio); + + /* + * OpenPOWER systems use a IPMI SEL Event message to notify the + * host to powerdown + */ + pnv->powerdown_notifier.notify = pnv_powerdown_notify; + qemu_register_powerdown_notifier(&pnv->powerdown_notifier); +} + +/* + * 0:21 Reserved - Read as zeros + * 22:24 Chip ID + * 25:28 Core number + * 29:31 Thread ID + */ +static uint32_t pnv_chip_core_pir_p8(PnvChip *chip, uint32_t core_id) +{ + return (chip->chip_id << 7) | (core_id << 3); +} + +static void pnv_chip_power8_intc_create(PnvChip *chip, PowerPCCPU *cpu, + Error **errp) +{ + Pnv8Chip *chip8 = PNV8_CHIP(chip); + Error *local_err = NULL; + Object *obj; + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + obj = icp_create(OBJECT(cpu), TYPE_PNV_ICP, chip8->xics, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + pnv_cpu->intc = obj; +} + + +static void pnv_chip_power8_intc_reset(PnvChip *chip, PowerPCCPU *cpu) +{ + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + icp_reset(ICP(pnv_cpu->intc)); +} + +static void pnv_chip_power8_intc_destroy(PnvChip *chip, PowerPCCPU *cpu) +{ + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + icp_destroy(ICP(pnv_cpu->intc)); + pnv_cpu->intc = NULL; +} + +static void pnv_chip_power8_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, + Monitor *mon) +{ + icp_pic_print_info(ICP(pnv_cpu_state(cpu)->intc), mon); +} + +/* + * 0:48 Reserved - Read as zeroes + * 49:52 Node ID + * 53:55 Chip ID + * 56 Reserved - Read as zero + * 57:61 Core number + * 62:63 Thread ID + * + * We only care about the lower bits. uint32_t is fine for the moment. + */ +static uint32_t pnv_chip_core_pir_p9(PnvChip *chip, uint32_t core_id) +{ + return (chip->chip_id << 8) | (core_id << 2); +} + +static uint32_t pnv_chip_core_pir_p10(PnvChip *chip, uint32_t core_id) +{ + return (chip->chip_id << 8) | (core_id << 2); +} + +static void pnv_chip_power9_intc_create(PnvChip *chip, PowerPCCPU *cpu, + Error **errp) +{ + Pnv9Chip *chip9 = PNV9_CHIP(chip); + Error *local_err = NULL; + Object *obj; + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + /* + * The core creates its interrupt presenter but the XIVE interrupt + * controller object is initialized afterwards. Hopefully, it's + * only used at runtime. + */ + obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(&chip9->xive), + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + pnv_cpu->intc = obj; +} + +static void pnv_chip_power9_intc_reset(PnvChip *chip, PowerPCCPU *cpu) +{ + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + xive_tctx_reset(XIVE_TCTX(pnv_cpu->intc)); +} + +static void pnv_chip_power9_intc_destroy(PnvChip *chip, PowerPCCPU *cpu) +{ + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + xive_tctx_destroy(XIVE_TCTX(pnv_cpu->intc)); + pnv_cpu->intc = NULL; +} + +static void pnv_chip_power9_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, + Monitor *mon) +{ + xive_tctx_pic_print_info(XIVE_TCTX(pnv_cpu_state(cpu)->intc), mon); +} + +static void pnv_chip_power10_intc_create(PnvChip *chip, PowerPCCPU *cpu, + Error **errp) +{ + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + /* Will be defined when the interrupt controller is */ + pnv_cpu->intc = NULL; +} + +static void pnv_chip_power10_intc_reset(PnvChip *chip, PowerPCCPU *cpu) +{ + ; +} + +static void pnv_chip_power10_intc_destroy(PnvChip *chip, PowerPCCPU *cpu) +{ + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + pnv_cpu->intc = NULL; +} + +static void pnv_chip_power10_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, + Monitor *mon) +{ +} + +/* + * Allowed core identifiers on a POWER8 Processor Chip : + * + * + * EX1 - Venice only + * EX2 - Venice only + * EX3 - Venice only + * EX4 + * EX5 + * EX6 + * + * EX9 - Venice only + * EX10 - Venice only + * EX11 - Venice only + * EX12 + * EX13 + * EX14 + * + */ +#define POWER8E_CORE_MASK (0x7070ull) +#define POWER8_CORE_MASK (0x7e7eull) + +/* + * POWER9 has 24 cores, ids starting at 0x0 + */ +#define POWER9_CORE_MASK (0xffffffffffffffull) + + +#define POWER10_CORE_MASK (0xffffffffffffffull) + +static void pnv_chip_power8_instance_init(Object *obj) +{ + PnvChip *chip = PNV_CHIP(obj); + Pnv8Chip *chip8 = PNV8_CHIP(obj); + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj); + int i; + + object_property_add_link(obj, "xics", TYPE_XICS_FABRIC, + (Object **)&chip8->xics, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + + object_initialize_child(obj, "psi", &chip8->psi, TYPE_PNV8_PSI); + + object_initialize_child(obj, "lpc", &chip8->lpc, TYPE_PNV8_LPC); + + object_initialize_child(obj, "occ", &chip8->occ, TYPE_PNV8_OCC); + + object_initialize_child(obj, "homer", &chip8->homer, TYPE_PNV8_HOMER); + + for (i = 0; i < pcc->num_phbs; i++) { + object_initialize_child(obj, "phb[*]", &chip8->phbs[i], TYPE_PNV_PHB3); + } + + /* + * Number of PHBs is the chip default + */ + chip->num_phbs = pcc->num_phbs; +} + +static void pnv_chip_icp_realize(Pnv8Chip *chip8, Error **errp) + { + PnvChip *chip = PNV_CHIP(chip8); + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip); + int i, j; + char *name; + + name = g_strdup_printf("icp-%x", chip->chip_id); + memory_region_init(&chip8->icp_mmio, OBJECT(chip), name, PNV_ICP_SIZE); + sysbus_init_mmio(SYS_BUS_DEVICE(chip), &chip8->icp_mmio); + g_free(name); + + sysbus_mmio_map(SYS_BUS_DEVICE(chip), 1, PNV_ICP_BASE(chip)); + + /* Map the ICP registers for each thread */ + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pnv_core = chip->cores[i]; + int core_hwid = CPU_CORE(pnv_core)->core_id; + + for (j = 0; j < CPU_CORE(pnv_core)->nr_threads; j++) { + uint32_t pir = pcc->core_pir(chip, core_hwid) + j; + PnvICPState *icp = PNV_ICP(xics_icp_get(chip8->xics, pir)); + + memory_region_add_subregion(&chip8->icp_mmio, pir << 12, + &icp->mmio); + } + } +} + +static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) +{ + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(dev); + PnvChip *chip = PNV_CHIP(dev); + Pnv8Chip *chip8 = PNV8_CHIP(dev); + Pnv8Psi *psi8 = &chip8->psi; + Error *local_err = NULL; + int i; + + assert(chip8->xics); + + /* XSCOM bridge is first */ + pnv_xscom_realize(chip, PNV_XSCOM_SIZE, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV_XSCOM_BASE(chip)); + + pcc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Processor Service Interface (PSI) Host Bridge */ + object_property_set_int(OBJECT(&chip8->psi), "bar", PNV_PSIHB_BASE(chip), + &error_fatal); + object_property_set_link(OBJECT(&chip8->psi), ICS_PROP_XICS, + OBJECT(chip8->xics), &error_abort); + if (!qdev_realize(DEVICE(&chip8->psi), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV_XSCOM_PSIHB_BASE, + &PNV_PSI(psi8)->xscom_regs); + + /* Create LPC controller */ + object_property_set_link(OBJECT(&chip8->lpc), "psi", OBJECT(&chip8->psi), + &error_abort); + qdev_realize(DEVICE(&chip8->lpc), NULL, &error_fatal); + pnv_xscom_add_subregion(chip, PNV_XSCOM_LPC_BASE, &chip8->lpc.xscom_regs); + + chip->fw_mr = &chip8->lpc.isa_fw; + chip->dt_isa_nodename = g_strdup_printf("/xscom@%" PRIx64 "/isa@%x", + (uint64_t) PNV_XSCOM_BASE(chip), + PNV_XSCOM_LPC_BASE); + + /* + * Interrupt Management Area. This is the memory region holding + * all the Interrupt Control Presenter (ICP) registers + */ + pnv_chip_icp_realize(chip8, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Create the simplified OCC model */ + object_property_set_link(OBJECT(&chip8->occ), "psi", OBJECT(&chip8->psi), + &error_abort); + if (!qdev_realize(DEVICE(&chip8->occ), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV_XSCOM_OCC_BASE, &chip8->occ.xscom_regs); + + /* OCC SRAM model */ + memory_region_add_subregion(get_system_memory(), PNV_OCC_SENSOR_BASE(chip), + &chip8->occ.sram_regs); + + /* HOMER */ + object_property_set_link(OBJECT(&chip8->homer), "chip", OBJECT(chip), + &error_abort); + if (!qdev_realize(DEVICE(&chip8->homer), NULL, errp)) { + return; + } + /* Homer Xscom region */ + pnv_xscom_add_subregion(chip, PNV_XSCOM_PBA_BASE, &chip8->homer.pba_regs); + + /* Homer mmio region */ + memory_region_add_subregion(get_system_memory(), PNV_HOMER_BASE(chip), + &chip8->homer.regs); + + /* PHB3 controllers */ + for (i = 0; i < chip->num_phbs; i++) { + PnvPHB3 *phb = &chip8->phbs[i]; + PnvPBCQState *pbcq = &phb->pbcq; + + object_property_set_int(OBJECT(phb), "index", i, &error_fatal); + object_property_set_int(OBJECT(phb), "chip-id", chip->chip_id, + &error_fatal); + if (!sysbus_realize(SYS_BUS_DEVICE(phb), errp)) { + return; + } + + /* Populate the XSCOM address space. */ + pnv_xscom_add_subregion(chip, + PNV_XSCOM_PBCQ_NEST_BASE + 0x400 * phb->phb_id, + &pbcq->xscom_nest_regs); + pnv_xscom_add_subregion(chip, + PNV_XSCOM_PBCQ_PCI_BASE + 0x400 * phb->phb_id, + &pbcq->xscom_pci_regs); + pnv_xscom_add_subregion(chip, + PNV_XSCOM_PBCQ_SPCI_BASE + 0x040 * phb->phb_id, + &pbcq->xscom_spci_regs); + } +} + +static uint32_t pnv_chip_power8_xscom_pcba(PnvChip *chip, uint64_t addr) +{ + addr &= (PNV_XSCOM_SIZE - 1); + return ((addr >> 4) & ~0xfull) | ((addr >> 3) & 0xf); +} + +static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvChipClass *k = PNV_CHIP_CLASS(klass); + + k->chip_cfam_id = 0x221ef04980000000ull; /* P8 Murano DD2.1 */ + k->cores_mask = POWER8E_CORE_MASK; + k->num_phbs = 3; + k->core_pir = pnv_chip_core_pir_p8; + k->intc_create = pnv_chip_power8_intc_create; + k->intc_reset = pnv_chip_power8_intc_reset; + k->intc_destroy = pnv_chip_power8_intc_destroy; + k->intc_print_info = pnv_chip_power8_intc_print_info; + k->isa_create = pnv_chip_power8_isa_create; + k->dt_populate = pnv_chip_power8_dt_populate; + k->pic_print_info = pnv_chip_power8_pic_print_info; + k->xscom_core_base = pnv_chip_power8_xscom_core_base; + k->xscom_pcba = pnv_chip_power8_xscom_pcba; + dc->desc = "PowerNV Chip POWER8E"; + + device_class_set_parent_realize(dc, pnv_chip_power8_realize, + &k->parent_realize); +} + +static void pnv_chip_power8_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvChipClass *k = PNV_CHIP_CLASS(klass); + + k->chip_cfam_id = 0x220ea04980000000ull; /* P8 Venice DD2.0 */ + k->cores_mask = POWER8_CORE_MASK; + k->num_phbs = 3; + k->core_pir = pnv_chip_core_pir_p8; + k->intc_create = pnv_chip_power8_intc_create; + k->intc_reset = pnv_chip_power8_intc_reset; + k->intc_destroy = pnv_chip_power8_intc_destroy; + k->intc_print_info = pnv_chip_power8_intc_print_info; + k->isa_create = pnv_chip_power8_isa_create; + k->dt_populate = pnv_chip_power8_dt_populate; + k->pic_print_info = pnv_chip_power8_pic_print_info; + k->xscom_core_base = pnv_chip_power8_xscom_core_base; + k->xscom_pcba = pnv_chip_power8_xscom_pcba; + dc->desc = "PowerNV Chip POWER8"; + + device_class_set_parent_realize(dc, pnv_chip_power8_realize, + &k->parent_realize); +} + +static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvChipClass *k = PNV_CHIP_CLASS(klass); + + k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */ + k->cores_mask = POWER8_CORE_MASK; + k->num_phbs = 3; + k->core_pir = pnv_chip_core_pir_p8; + k->intc_create = pnv_chip_power8_intc_create; + k->intc_reset = pnv_chip_power8_intc_reset; + k->intc_destroy = pnv_chip_power8_intc_destroy; + k->intc_print_info = pnv_chip_power8_intc_print_info; + k->isa_create = pnv_chip_power8nvl_isa_create; + k->dt_populate = pnv_chip_power8_dt_populate; + k->pic_print_info = pnv_chip_power8_pic_print_info; + k->xscom_core_base = pnv_chip_power8_xscom_core_base; + k->xscom_pcba = pnv_chip_power8_xscom_pcba; + dc->desc = "PowerNV Chip POWER8NVL"; + + device_class_set_parent_realize(dc, pnv_chip_power8_realize, + &k->parent_realize); +} + +static void pnv_chip_power9_instance_init(Object *obj) +{ + PnvChip *chip = PNV_CHIP(obj); + Pnv9Chip *chip9 = PNV9_CHIP(obj); + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj); + int i; + + object_initialize_child(obj, "xive", &chip9->xive, TYPE_PNV_XIVE); + object_property_add_alias(obj, "xive-fabric", OBJECT(&chip9->xive), + "xive-fabric"); + + object_initialize_child(obj, "psi", &chip9->psi, TYPE_PNV9_PSI); + + object_initialize_child(obj, "lpc", &chip9->lpc, TYPE_PNV9_LPC); + + object_initialize_child(obj, "occ", &chip9->occ, TYPE_PNV9_OCC); + + object_initialize_child(obj, "homer", &chip9->homer, TYPE_PNV9_HOMER); + + for (i = 0; i < PNV9_CHIP_MAX_PEC; i++) { + object_initialize_child(obj, "pec[*]", &chip9->pecs[i], + TYPE_PNV_PHB4_PEC); + } + + /* + * Number of PHBs is the chip default + */ + chip->num_phbs = pcc->num_phbs; +} + +static void pnv_chip_quad_realize(Pnv9Chip *chip9, Error **errp) +{ + PnvChip *chip = PNV_CHIP(chip9); + int i; + + chip9->nr_quads = DIV_ROUND_UP(chip->nr_cores, 4); + chip9->quads = g_new0(PnvQuad, chip9->nr_quads); + + for (i = 0; i < chip9->nr_quads; i++) { + char eq_name[32]; + PnvQuad *eq = &chip9->quads[i]; + PnvCore *pnv_core = chip->cores[i * 4]; + int core_id = CPU_CORE(pnv_core)->core_id; + + snprintf(eq_name, sizeof(eq_name), "eq[%d]", core_id); + object_initialize_child_with_props(OBJECT(chip), eq_name, eq, + sizeof(*eq), TYPE_PNV_QUAD, + &error_fatal, NULL); + + object_property_set_int(OBJECT(eq), "quad-id", core_id, &error_fatal); + qdev_realize(DEVICE(eq), NULL, &error_fatal); + + pnv_xscom_add_subregion(chip, PNV9_XSCOM_EQ_BASE(eq->quad_id), + &eq->xscom_regs); + } +} + +static void pnv_chip_power9_phb_realize(PnvChip *chip, Error **errp) +{ + Pnv9Chip *chip9 = PNV9_CHIP(chip); + int i, j; + int phb_id = 0; + + for (i = 0; i < PNV9_CHIP_MAX_PEC; i++) { + PnvPhb4PecState *pec = &chip9->pecs[i]; + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); + uint32_t pec_nest_base; + uint32_t pec_pci_base; + + object_property_set_int(OBJECT(pec), "index", i, &error_fatal); + /* + * PEC0 -> 1 stack + * PEC1 -> 2 stacks + * PEC2 -> 3 stacks + */ + object_property_set_int(OBJECT(pec), "num-stacks", i + 1, + &error_fatal); + object_property_set_int(OBJECT(pec), "chip-id", chip->chip_id, + &error_fatal); + object_property_set_link(OBJECT(pec), "system-memory", + OBJECT(get_system_memory()), &error_abort); + if (!qdev_realize(DEVICE(pec), NULL, errp)) { + return; + } + + pec_nest_base = pecc->xscom_nest_base(pec); + pec_pci_base = pecc->xscom_pci_base(pec); + + pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr); + pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr); + + for (j = 0; j < pec->num_stacks && phb_id < chip->num_phbs; + j++, phb_id++) { + PnvPhb4PecStack *stack = &pec->stacks[j]; + Object *obj = OBJECT(&stack->phb); + + object_property_set_int(obj, "index", phb_id, &error_fatal); + object_property_set_int(obj, "chip-id", chip->chip_id, + &error_fatal); + object_property_set_int(obj, "version", PNV_PHB4_VERSION, + &error_fatal); + object_property_set_int(obj, "device-id", PNV_PHB4_DEVICE_ID, + &error_fatal); + object_property_set_link(obj, "stack", OBJECT(stack), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(obj), errp)) { + return; + } + + /* Populate the XSCOM address space. */ + pnv_xscom_add_subregion(chip, + pec_nest_base + 0x40 * (stack->stack_no + 1), + &stack->nest_regs_mr); + pnv_xscom_add_subregion(chip, + pec_pci_base + 0x40 * (stack->stack_no + 1), + &stack->pci_regs_mr); + pnv_xscom_add_subregion(chip, + pec_pci_base + PNV9_XSCOM_PEC_PCI_STK0 + + 0x40 * stack->stack_no, + &stack->phb_regs_mr); + } + } +} + +static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) +{ + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(dev); + Pnv9Chip *chip9 = PNV9_CHIP(dev); + PnvChip *chip = PNV_CHIP(dev); + Pnv9Psi *psi9 = &chip9->psi; + Error *local_err = NULL; + + /* XSCOM bridge is first */ + pnv_xscom_realize(chip, PNV9_XSCOM_SIZE, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV9_XSCOM_BASE(chip)); + + pcc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + pnv_chip_quad_realize(chip9, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* XIVE interrupt controller (POWER9) */ + object_property_set_int(OBJECT(&chip9->xive), "ic-bar", + PNV9_XIVE_IC_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip9->xive), "vc-bar", + PNV9_XIVE_VC_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip9->xive), "pc-bar", + PNV9_XIVE_PC_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip9->xive), "tm-bar", + PNV9_XIVE_TM_BASE(chip), &error_fatal); + object_property_set_link(OBJECT(&chip9->xive), "chip", OBJECT(chip), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&chip9->xive), errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV9_XSCOM_XIVE_BASE, + &chip9->xive.xscom_regs); + + /* Processor Service Interface (PSI) Host Bridge */ + object_property_set_int(OBJECT(&chip9->psi), "bar", PNV9_PSIHB_BASE(chip), + &error_fatal); + if (!qdev_realize(DEVICE(&chip9->psi), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV9_XSCOM_PSIHB_BASE, + &PNV_PSI(psi9)->xscom_regs); + + /* LPC */ + object_property_set_link(OBJECT(&chip9->lpc), "psi", OBJECT(&chip9->psi), + &error_abort); + if (!qdev_realize(DEVICE(&chip9->lpc), NULL, errp)) { + return; + } + memory_region_add_subregion(get_system_memory(), PNV9_LPCM_BASE(chip), + &chip9->lpc.xscom_regs); + + chip->fw_mr = &chip9->lpc.isa_fw; + chip->dt_isa_nodename = g_strdup_printf("/lpcm-opb@%" PRIx64 "/lpc@0", + (uint64_t) PNV9_LPCM_BASE(chip)); + + /* Create the simplified OCC model */ + object_property_set_link(OBJECT(&chip9->occ), "psi", OBJECT(&chip9->psi), + &error_abort); + if (!qdev_realize(DEVICE(&chip9->occ), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV9_XSCOM_OCC_BASE, &chip9->occ.xscom_regs); + + /* OCC SRAM model */ + memory_region_add_subregion(get_system_memory(), PNV9_OCC_SENSOR_BASE(chip), + &chip9->occ.sram_regs); + + /* HOMER */ + object_property_set_link(OBJECT(&chip9->homer), "chip", OBJECT(chip), + &error_abort); + if (!qdev_realize(DEVICE(&chip9->homer), NULL, errp)) { + return; + } + /* Homer Xscom region */ + pnv_xscom_add_subregion(chip, PNV9_XSCOM_PBA_BASE, &chip9->homer.pba_regs); + + /* Homer mmio region */ + memory_region_add_subregion(get_system_memory(), PNV9_HOMER_BASE(chip), + &chip9->homer.regs); + + /* PHBs */ + pnv_chip_power9_phb_realize(chip, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } +} + +static uint32_t pnv_chip_power9_xscom_pcba(PnvChip *chip, uint64_t addr) +{ + addr &= (PNV9_XSCOM_SIZE - 1); + return addr >> 3; +} + +static void pnv_chip_power9_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvChipClass *k = PNV_CHIP_CLASS(klass); + + k->chip_cfam_id = 0x220d104900008000ull; /* P9 Nimbus DD2.0 */ + k->cores_mask = POWER9_CORE_MASK; + k->core_pir = pnv_chip_core_pir_p9; + k->intc_create = pnv_chip_power9_intc_create; + k->intc_reset = pnv_chip_power9_intc_reset; + k->intc_destroy = pnv_chip_power9_intc_destroy; + k->intc_print_info = pnv_chip_power9_intc_print_info; + k->isa_create = pnv_chip_power9_isa_create; + k->dt_populate = pnv_chip_power9_dt_populate; + k->pic_print_info = pnv_chip_power9_pic_print_info; + k->xscom_core_base = pnv_chip_power9_xscom_core_base; + k->xscom_pcba = pnv_chip_power9_xscom_pcba; + dc->desc = "PowerNV Chip POWER9"; + k->num_phbs = 6; + + device_class_set_parent_realize(dc, pnv_chip_power9_realize, + &k->parent_realize); +} + +static void pnv_chip_power10_instance_init(Object *obj) +{ + Pnv10Chip *chip10 = PNV10_CHIP(obj); + + object_initialize_child(obj, "psi", &chip10->psi, TYPE_PNV10_PSI); + object_initialize_child(obj, "lpc", &chip10->lpc, TYPE_PNV10_LPC); +} + +static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) +{ + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(dev); + PnvChip *chip = PNV_CHIP(dev); + Pnv10Chip *chip10 = PNV10_CHIP(dev); + Error *local_err = NULL; + + /* XSCOM bridge is first */ + pnv_xscom_realize(chip, PNV10_XSCOM_SIZE, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV10_XSCOM_BASE(chip)); + + pcc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Processor Service Interface (PSI) Host Bridge */ + object_property_set_int(OBJECT(&chip10->psi), "bar", + PNV10_PSIHB_BASE(chip), &error_fatal); + if (!qdev_realize(DEVICE(&chip10->psi), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV10_XSCOM_PSIHB_BASE, + &PNV_PSI(&chip10->psi)->xscom_regs); + + /* LPC */ + object_property_set_link(OBJECT(&chip10->lpc), "psi", + OBJECT(&chip10->psi), &error_abort); + if (!qdev_realize(DEVICE(&chip10->lpc), NULL, errp)) { + return; + } + memory_region_add_subregion(get_system_memory(), PNV10_LPCM_BASE(chip), + &chip10->lpc.xscom_regs); + + chip->fw_mr = &chip10->lpc.isa_fw; + chip->dt_isa_nodename = g_strdup_printf("/lpcm-opb@%" PRIx64 "/lpc@0", + (uint64_t) PNV10_LPCM_BASE(chip)); +} + +static uint32_t pnv_chip_power10_xscom_pcba(PnvChip *chip, uint64_t addr) +{ + addr &= (PNV10_XSCOM_SIZE - 1); + return addr >> 3; +} + +static void pnv_chip_power10_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvChipClass *k = PNV_CHIP_CLASS(klass); + + k->chip_cfam_id = 0x120da04900008000ull; /* P10 DD1.0 (with NX) */ + k->cores_mask = POWER10_CORE_MASK; + k->core_pir = pnv_chip_core_pir_p10; + k->intc_create = pnv_chip_power10_intc_create; + k->intc_reset = pnv_chip_power10_intc_reset; + k->intc_destroy = pnv_chip_power10_intc_destroy; + k->intc_print_info = pnv_chip_power10_intc_print_info; + k->isa_create = pnv_chip_power10_isa_create; + k->dt_populate = pnv_chip_power10_dt_populate; + k->pic_print_info = pnv_chip_power10_pic_print_info; + k->xscom_core_base = pnv_chip_power10_xscom_core_base; + k->xscom_pcba = pnv_chip_power10_xscom_pcba; + dc->desc = "PowerNV Chip POWER10"; + + device_class_set_parent_realize(dc, pnv_chip_power10_realize, + &k->parent_realize); +} + +static void pnv_chip_core_sanitize(PnvChip *chip, Error **errp) +{ + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip); + int cores_max; + + /* + * No custom mask for this chip, let's use the default one from * + * the chip class + */ + if (!chip->cores_mask) { + chip->cores_mask = pcc->cores_mask; + } + + /* filter alien core ids ! some are reserved */ + if ((chip->cores_mask & pcc->cores_mask) != chip->cores_mask) { + error_setg(errp, "warning: invalid core mask for chip Ox%"PRIx64" !", + chip->cores_mask); + return; + } + chip->cores_mask &= pcc->cores_mask; + + /* now that we have a sane layout, let check the number of cores */ + cores_max = ctpop64(chip->cores_mask); + if (chip->nr_cores > cores_max) { + error_setg(errp, "warning: too many cores for chip ! Limit is %d", + cores_max); + return; + } +} + +static void pnv_chip_core_realize(PnvChip *chip, Error **errp) +{ + Error *error = NULL; + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip); + const char *typename = pnv_chip_core_typename(chip); + int i, core_hwid; + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); + + if (!object_class_by_name(typename)) { + error_setg(errp, "Unable to find PowerNV CPU Core '%s'", typename); + return; + } + + /* Cores */ + pnv_chip_core_sanitize(chip, &error); + if (error) { + error_propagate(errp, error); + return; + } + + chip->cores = g_new0(PnvCore *, chip->nr_cores); + + for (i = 0, core_hwid = 0; (core_hwid < sizeof(chip->cores_mask) * 8) + && (i < chip->nr_cores); core_hwid++) { + char core_name[32]; + PnvCore *pnv_core; + uint64_t xscom_core_base; + + if (!(chip->cores_mask & (1ull << core_hwid))) { + continue; + } + + pnv_core = PNV_CORE(object_new(typename)); + + snprintf(core_name, sizeof(core_name), "core[%d]", core_hwid); + object_property_add_child(OBJECT(chip), core_name, OBJECT(pnv_core)); + chip->cores[i] = pnv_core; + object_property_set_int(OBJECT(pnv_core), "nr-threads", + chip->nr_threads, &error_fatal); + object_property_set_int(OBJECT(pnv_core), CPU_CORE_PROP_CORE_ID, + core_hwid, &error_fatal); + object_property_set_int(OBJECT(pnv_core), "pir", + pcc->core_pir(chip, core_hwid), &error_fatal); + object_property_set_int(OBJECT(pnv_core), "hrmor", pnv->fw_load_addr, + &error_fatal); + object_property_set_link(OBJECT(pnv_core), "chip", OBJECT(chip), + &error_abort); + qdev_realize(DEVICE(pnv_core), NULL, &error_fatal); + + /* Each core has an XSCOM MMIO region */ + xscom_core_base = pcc->xscom_core_base(chip, core_hwid); + + pnv_xscom_add_subregion(chip, xscom_core_base, + &pnv_core->xscom_regs); + i++; + } +} + +static void pnv_chip_realize(DeviceState *dev, Error **errp) +{ + PnvChip *chip = PNV_CHIP(dev); + Error *error = NULL; + + /* Cores */ + pnv_chip_core_realize(chip, &error); + if (error) { + error_propagate(errp, error); + return; + } +} + +static Property pnv_chip_properties[] = { + DEFINE_PROP_UINT32("chip-id", PnvChip, chip_id, 0), + DEFINE_PROP_UINT64("ram-start", PnvChip, ram_start, 0), + DEFINE_PROP_UINT64("ram-size", PnvChip, ram_size, 0), + DEFINE_PROP_UINT32("nr-cores", PnvChip, nr_cores, 1), + DEFINE_PROP_UINT64("cores-mask", PnvChip, cores_mask, 0x0), + DEFINE_PROP_UINT32("nr-threads", PnvChip, nr_threads, 1), + DEFINE_PROP_UINT32("num-phbs", PnvChip, num_phbs, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_chip_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_CPU, dc->categories); + dc->realize = pnv_chip_realize; + device_class_set_props(dc, pnv_chip_properties); + dc->desc = "PowerNV Chip"; +} + +PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir) +{ + int i, j; + + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pc = chip->cores[i]; + CPUCore *cc = CPU_CORE(pc); + + for (j = 0; j < cc->nr_threads; j++) { + if (ppc_cpu_pir(pc->threads[j]) == pir) { + return pc->threads[j]; + } + } + } + return NULL; +} + +static ICSState *pnv_ics_get(XICSFabric *xi, int irq) +{ + PnvMachineState *pnv = PNV_MACHINE(xi); + int i, j; + + for (i = 0; i < pnv->num_chips; i++) { + PnvChip *chip = pnv->chips[i]; + Pnv8Chip *chip8 = PNV8_CHIP(pnv->chips[i]); + + if (ics_valid_irq(&chip8->psi.ics, irq)) { + return &chip8->psi.ics; + } + for (j = 0; j < chip->num_phbs; j++) { + if (ics_valid_irq(&chip8->phbs[j].lsis, irq)) { + return &chip8->phbs[j].lsis; + } + if (ics_valid_irq(ICS(&chip8->phbs[j].msis), irq)) { + return ICS(&chip8->phbs[j].msis); + } + } + } + return NULL; +} + +static void pnv_ics_resend(XICSFabric *xi) +{ + PnvMachineState *pnv = PNV_MACHINE(xi); + int i, j; + + for (i = 0; i < pnv->num_chips; i++) { + PnvChip *chip = pnv->chips[i]; + Pnv8Chip *chip8 = PNV8_CHIP(pnv->chips[i]); + + ics_resend(&chip8->psi.ics); + for (j = 0; j < chip->num_phbs; j++) { + ics_resend(&chip8->phbs[j].lsis); + ics_resend(ICS(&chip8->phbs[j].msis)); + } + } +} + +static ICPState *pnv_icp_get(XICSFabric *xi, int pir) +{ + PowerPCCPU *cpu = ppc_get_vcpu_by_pir(pir); + + return cpu ? ICP(pnv_cpu_state(cpu)->intc) : NULL; +} + +static void pnv_pic_print_info(InterruptStatsProvider *obj, + Monitor *mon) +{ + PnvMachineState *pnv = PNV_MACHINE(obj); + int i; + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + /* XXX: loop on each chip/core/thread instead of CPU_FOREACH() */ + PNV_CHIP_GET_CLASS(pnv->chips[0])->intc_print_info(pnv->chips[0], cpu, + mon); + } + + for (i = 0; i < pnv->num_chips; i++) { + PNV_CHIP_GET_CLASS(pnv->chips[i])->pic_print_info(pnv->chips[i], mon); + } +} + +static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv, + XiveTCTXMatch *match) +{ + PnvMachineState *pnv = PNV_MACHINE(xfb); + int total_count = 0; + int i; + + for (i = 0; i < pnv->num_chips; i++) { + Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); + XivePresenter *xptr = XIVE_PRESENTER(&chip9->xive); + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); + int count; + + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, + priority, logic_serv, match); + + if (count < 0) { + return count; + } + + total_count += count; + } + + return total_count; +} + +static void pnv_machine_power8_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + XICSFabricClass *xic = XICS_FABRIC_CLASS(oc); + PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); + static const char compat[] = "qemu,powernv8\0qemu,powernv\0ibm,powernv"; + + mc->desc = "IBM PowerNV (Non-Virtualized) POWER8"; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); + + xic->icp_get = pnv_icp_get; + xic->ics_get = pnv_ics_get; + xic->ics_resend = pnv_ics_resend; + + pmc->compat = compat; + pmc->compat_size = sizeof(compat); +} + +static void pnv_machine_power9_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc); + PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); + static const char compat[] = "qemu,powernv9\0ibm,powernv"; + + mc->desc = "IBM PowerNV (Non-Virtualized) POWER9"; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); + xfc->match_nvt = pnv_match_nvt; + + mc->alias = "powernv"; + + pmc->compat = compat; + pmc->compat_size = sizeof(compat); + pmc->dt_power_mgt = pnv_dt_power_mgt; +} + +static void pnv_machine_power10_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); + static const char compat[] = "qemu,powernv10\0ibm,powernv"; + + mc->desc = "IBM PowerNV (Non-Virtualized) POWER10"; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0"); + + pmc->compat = compat; + pmc->compat_size = sizeof(compat); + pmc->dt_power_mgt = pnv_dt_power_mgt; +} + +static bool pnv_machine_get_hb(Object *obj, Error **errp) +{ + PnvMachineState *pnv = PNV_MACHINE(obj); + + return !!pnv->fw_load_addr; +} + +static void pnv_machine_set_hb(Object *obj, bool value, Error **errp) +{ + PnvMachineState *pnv = PNV_MACHINE(obj); + + if (value) { + pnv->fw_load_addr = 0x8000000; + } +} + +static void pnv_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + cpu_synchronize_state(cs); + ppc_cpu_do_system_reset(cs); + if (env->spr[SPR_SRR1] & SRR1_WAKESTATE) { + /* + * Power-save wakeups, as indicated by non-zero SRR1[46:47] put the + * wakeup reason in SRR1[42:45], system reset is indicated with 0b0100 + * (PPC_BIT(43)). + */ + if (!(env->spr[SPR_SRR1] & SRR1_WAKERESET)) { + warn_report("ppc_cpu_do_system_reset does not set system reset wakeup reason"); + env->spr[SPR_SRR1] |= SRR1_WAKERESET; + } + } else { + /* + * For non-powersave system resets, SRR1[42:45] are defined to be + * implementation-dependent. The POWER9 User Manual specifies that + * an external (SCOM driven, which may come from a BMC nmi command or + * another CPU requesting a NMI IPI) system reset exception should be + * 0b0010 (PPC_BIT(44)). + */ + env->spr[SPR_SRR1] |= SRR1_WAKESCOM; + } +} + +static void pnv_nmi(NMIState *n, int cpu_index, Error **errp) +{ + CPUState *cs; + + CPU_FOREACH(cs) { + async_run_on_cpu(cs, pnv_cpu_do_nmi_on_cpu, RUN_ON_CPU_NULL); + } +} + +static void pnv_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); + NMIClass *nc = NMI_CLASS(oc); + + mc->desc = "IBM PowerNV (Non-Virtualized)"; + mc->init = pnv_init; + mc->reset = pnv_reset; + mc->max_cpus = MAX_CPUS; + /* Pnv provides a AHCI device for storage */ + mc->block_default_type = IF_IDE; + mc->no_parallel = 1; + mc->default_boot_order = NULL; + /* + * RAM defaults to less than 2048 for 32-bit hosts, and large + * enough to fit the maximum initrd size at it's load address + */ + mc->default_ram_size = 1 * GiB; + mc->default_ram_id = "pnv.ram"; + ispc->print_info = pnv_pic_print_info; + nc->nmi_monitor_handler = pnv_nmi; + + object_class_property_add_bool(oc, "hb-mode", + pnv_machine_get_hb, pnv_machine_set_hb); + object_class_property_set_description(oc, "hb-mode", + "Use a hostboot like boot loader"); +} + +#define DEFINE_PNV8_CHIP_TYPE(type, class_initfn) \ + { \ + .name = type, \ + .class_init = class_initfn, \ + .parent = TYPE_PNV8_CHIP, \ + } + +#define DEFINE_PNV9_CHIP_TYPE(type, class_initfn) \ + { \ + .name = type, \ + .class_init = class_initfn, \ + .parent = TYPE_PNV9_CHIP, \ + } + +#define DEFINE_PNV10_CHIP_TYPE(type, class_initfn) \ + { \ + .name = type, \ + .class_init = class_initfn, \ + .parent = TYPE_PNV10_CHIP, \ + } + +static const TypeInfo types[] = { + { + .name = MACHINE_TYPE_NAME("powernv10"), + .parent = TYPE_PNV_MACHINE, + .class_init = pnv_machine_power10_class_init, + }, + { + .name = MACHINE_TYPE_NAME("powernv9"), + .parent = TYPE_PNV_MACHINE, + .class_init = pnv_machine_power9_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_XIVE_FABRIC }, + { }, + }, + }, + { + .name = MACHINE_TYPE_NAME("powernv8"), + .parent = TYPE_PNV_MACHINE, + .class_init = pnv_machine_power8_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_XICS_FABRIC }, + { }, + }, + }, + { + .name = TYPE_PNV_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(PnvMachineState), + .class_init = pnv_machine_class_init, + .class_size = sizeof(PnvMachineClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_INTERRUPT_STATS_PROVIDER }, + { TYPE_NMI }, + { }, + }, + }, + { + .name = TYPE_PNV_CHIP, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = pnv_chip_class_init, + .instance_size = sizeof(PnvChip), + .class_size = sizeof(PnvChipClass), + .abstract = true, + }, + + /* + * P10 chip and variants + */ + { + .name = TYPE_PNV10_CHIP, + .parent = TYPE_PNV_CHIP, + .instance_init = pnv_chip_power10_instance_init, + .instance_size = sizeof(Pnv10Chip), + }, + DEFINE_PNV10_CHIP_TYPE(TYPE_PNV_CHIP_POWER10, pnv_chip_power10_class_init), + + /* + * P9 chip and variants + */ + { + .name = TYPE_PNV9_CHIP, + .parent = TYPE_PNV_CHIP, + .instance_init = pnv_chip_power9_instance_init, + .instance_size = sizeof(Pnv9Chip), + }, + DEFINE_PNV9_CHIP_TYPE(TYPE_PNV_CHIP_POWER9, pnv_chip_power9_class_init), + + /* + * P8 chip and variants + */ + { + .name = TYPE_PNV8_CHIP, + .parent = TYPE_PNV_CHIP, + .instance_init = pnv_chip_power8_instance_init, + .instance_size = sizeof(Pnv8Chip), + }, + DEFINE_PNV8_CHIP_TYPE(TYPE_PNV_CHIP_POWER8, pnv_chip_power8_class_init), + DEFINE_PNV8_CHIP_TYPE(TYPE_PNV_CHIP_POWER8E, pnv_chip_power8e_class_init), + DEFINE_PNV8_CHIP_TYPE(TYPE_PNV_CHIP_POWER8NVL, + pnv_chip_power8nvl_class_init), +}; + +DEFINE_TYPES(types) diff --git a/hw/ppc/pnv_bmc.c b/hw/ppc/pnv_bmc.c new file mode 100644 index 000000000..75a22ce50 --- /dev/null +++ b/hw/ppc/pnv_bmc.c @@ -0,0 +1,313 @@ +/* + * QEMU PowerNV, BMC related functions + * + * Copyright (c) 2016-2017, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "qemu/log.h" +#include "hw/ipmi/ipmi.h" +#include "hw/ppc/fdt.h" + +#include "hw/ppc/pnv.h" + +#include + +/* TODO: include definition in ipmi.h */ +#define IPMI_SDR_FULL_TYPE 1 + +/* + * OEM SEL Event data packet sent by BMC in response of a Read Event + * Message Buffer command + */ +typedef struct OemSel { + /* SEL header */ + uint8_t id[2]; + uint8_t type; + uint8_t timestamp[4]; + uint8_t manuf_id[3]; + + /* OEM SEL data (6 bytes) follows */ + uint8_t netfun; + uint8_t cmd; + uint8_t data[4]; +} OemSel; + +#define SOFT_OFF 0x00 +#define SOFT_REBOOT 0x01 + +static bool pnv_bmc_is_simulator(IPMIBmc *bmc) +{ + return object_dynamic_cast(OBJECT(bmc), TYPE_IPMI_BMC_SIMULATOR); +} + +static void pnv_gen_oem_sel(IPMIBmc *bmc, uint8_t reboot) +{ + /* IPMI SEL Event are 16 bytes long */ + OemSel sel = { + .id = { 0x55 , 0x55 }, + .type = 0xC0, /* OEM */ + .manuf_id = { 0x0, 0x0, 0x0 }, + .timestamp = { 0x0, 0x0, 0x0, 0x0 }, + .netfun = 0x3A, /* IBM */ + .cmd = 0x04, /* AMI OEM SEL Power Notification */ + .data = { reboot, 0xFF, 0xFF, 0xFF }, + }; + + ipmi_bmc_gen_event(bmc, (uint8_t *) &sel, 0 /* do not log the event */); +} + +void pnv_bmc_powerdown(IPMIBmc *bmc) +{ + pnv_gen_oem_sel(bmc, SOFT_OFF); +} + +void pnv_dt_bmc_sensors(IPMIBmc *bmc, void *fdt) +{ + int offset; + int i; + const struct ipmi_sdr_compact *sdr; + uint16_t nextrec; + + if (!pnv_bmc_is_simulator(bmc)) { + return; + } + + offset = fdt_add_subnode(fdt, 0, "bmc"); + _FDT(offset); + + _FDT((fdt_setprop_string(fdt, offset, "name", "bmc"))); + offset = fdt_add_subnode(fdt, offset, "sensors"); + _FDT(offset); + + _FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 0x1))); + _FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 0x0))); + + for (i = 0; !ipmi_bmc_sdr_find(bmc, i, &sdr, &nextrec); i++) { + int off; + char *name; + + if (sdr->header.rec_type != IPMI_SDR_COMPACT_TYPE && + sdr->header.rec_type != IPMI_SDR_FULL_TYPE) { + continue; + } + + name = g_strdup_printf("sensor@%x", sdr->sensor_owner_number); + off = fdt_add_subnode(fdt, offset, name); + _FDT(off); + g_free(name); + + _FDT((fdt_setprop_cell(fdt, off, "reg", sdr->sensor_owner_number))); + _FDT((fdt_setprop_string(fdt, off, "name", "sensor"))); + _FDT((fdt_setprop_string(fdt, off, "compatible", "ibm,ipmi-sensor"))); + _FDT((fdt_setprop_cell(fdt, off, "ipmi-sensor-reading-type", + sdr->reading_type))); + _FDT((fdt_setprop_cell(fdt, off, "ipmi-entity-id", + sdr->entity_id))); + _FDT((fdt_setprop_cell(fdt, off, "ipmi-entity-instance", + sdr->entity_instance))); + _FDT((fdt_setprop_cell(fdt, off, "ipmi-sensor-type", + sdr->sensor_type))); + } +} + +/* + * HIOMAP protocol handler + */ +#define HIOMAP_C_RESET 1 +#define HIOMAP_C_GET_INFO 2 +#define HIOMAP_C_GET_FLASH_INFO 3 +#define HIOMAP_C_CREATE_READ_WINDOW 4 +#define HIOMAP_C_CLOSE_WINDOW 5 +#define HIOMAP_C_CREATE_WRITE_WINDOW 6 +#define HIOMAP_C_MARK_DIRTY 7 +#define HIOMAP_C_FLUSH 8 +#define HIOMAP_C_ACK 9 +#define HIOMAP_C_ERASE 10 +#define HIOMAP_C_DEVICE_NAME 11 +#define HIOMAP_C_LOCK 12 + +#define BLOCK_SHIFT 12 /* 4K */ + +static uint16_t bytes_to_blocks(uint32_t bytes) +{ + return bytes >> BLOCK_SHIFT; +} + +static uint32_t blocks_to_bytes(uint16_t blocks) +{ + return blocks << BLOCK_SHIFT; +} + +static int hiomap_erase(PnvPnor *pnor, uint32_t offset, uint32_t size) +{ + MemTxResult result; + int i; + + for (i = 0; i < size / 4; i++) { + result = memory_region_dispatch_write(&pnor->mmio, offset + i * 4, + 0xFFFFFFFF, MO_32, + MEMTXATTRS_UNSPECIFIED); + if (result != MEMTX_OK) { + return -1; + } + } + return 0; +} + +static void hiomap_cmd(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len, + RspBuffer *rsp) +{ + PnvPnor *pnor = PNV_PNOR(object_property_get_link(OBJECT(ibs), "pnor", + &error_abort)); + uint32_t pnor_size = pnor->size; + uint32_t pnor_addr = PNOR_SPI_OFFSET; + bool readonly = false; + + rsp_buffer_push(rsp, cmd[2]); + rsp_buffer_push(rsp, cmd[3]); + + switch (cmd[2]) { + case HIOMAP_C_MARK_DIRTY: + case HIOMAP_C_FLUSH: + case HIOMAP_C_ACK: + break; + + case HIOMAP_C_ERASE: + if (hiomap_erase(pnor, blocks_to_bytes(cmd[5] << 8 | cmd[4]), + blocks_to_bytes(cmd[7] << 8 | cmd[6]))) { + rsp_buffer_set_error(rsp, IPMI_CC_UNSPECIFIED); + } + break; + + case HIOMAP_C_GET_INFO: + rsp_buffer_push(rsp, 2); /* Version 2 */ + rsp_buffer_push(rsp, BLOCK_SHIFT); /* block size */ + rsp_buffer_push(rsp, 0); /* Timeout */ + rsp_buffer_push(rsp, 0); /* Timeout */ + break; + + case HIOMAP_C_GET_FLASH_INFO: + rsp_buffer_push(rsp, bytes_to_blocks(pnor_size) & 0xFF); + rsp_buffer_push(rsp, bytes_to_blocks(pnor_size) >> 8); + rsp_buffer_push(rsp, 0x01); /* erase size */ + rsp_buffer_push(rsp, 0x00); /* erase size */ + break; + + case HIOMAP_C_CREATE_READ_WINDOW: + readonly = true; + /* Fall through */ + + case HIOMAP_C_CREATE_WRITE_WINDOW: + memory_region_set_readonly(&pnor->mmio, readonly); + memory_region_set_enabled(&pnor->mmio, true); + + rsp_buffer_push(rsp, bytes_to_blocks(pnor_addr) & 0xFF); + rsp_buffer_push(rsp, bytes_to_blocks(pnor_addr) >> 8); + rsp_buffer_push(rsp, bytes_to_blocks(pnor_size) & 0xFF); + rsp_buffer_push(rsp, bytes_to_blocks(pnor_size) >> 8); + rsp_buffer_push(rsp, 0x00); /* offset */ + rsp_buffer_push(rsp, 0x00); /* offset */ + break; + + case HIOMAP_C_CLOSE_WINDOW: + memory_region_set_enabled(&pnor->mmio, false); + break; + + case HIOMAP_C_DEVICE_NAME: + case HIOMAP_C_RESET: + case HIOMAP_C_LOCK: + default: + qemu_log_mask(LOG_GUEST_ERROR, "HIOMAP: unknown command %02X\n", cmd[2]); + break; + } +} + +#define HIOMAP 0x5a + +static const IPMICmdHandler hiomap_cmds[] = { + [HIOMAP] = { hiomap_cmd, 3 }, +}; + +static const IPMINetfn hiomap_netfn = { + .cmd_nums = ARRAY_SIZE(hiomap_cmds), + .cmd_handlers = hiomap_cmds +}; + + +void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor) +{ + if (!pnv_bmc_is_simulator(bmc)) { + return; + } + + object_ref(OBJECT(pnor)); + object_property_add_const_link(OBJECT(bmc), "pnor", OBJECT(pnor)); + + /* Install the HIOMAP protocol handlers to access the PNOR */ + ipmi_sim_register_netfn(IPMI_BMC_SIMULATOR(bmc), IPMI_NETFN_OEM, + &hiomap_netfn); +} + +/* + * Instantiate the machine BMC. PowerNV uses the QEMU internal + * simulator but it could also be external. + */ +IPMIBmc *pnv_bmc_create(PnvPnor *pnor) +{ + Object *obj; + + obj = object_new(TYPE_IPMI_BMC_SIMULATOR); + qdev_realize(DEVICE(obj), NULL, &error_fatal); + pnv_bmc_set_pnor(IPMI_BMC(obj), pnor); + + return IPMI_BMC(obj); +} + +typedef struct ForeachArgs { + const char *name; + Object *obj; +} ForeachArgs; + +static int bmc_find(Object *child, void *opaque) +{ + ForeachArgs *args = opaque; + + if (object_dynamic_cast(child, args->name)) { + if (args->obj) { + return 1; + } + args->obj = child; + } + return 0; +} + +IPMIBmc *pnv_bmc_find(Error **errp) +{ + ForeachArgs args = { TYPE_IPMI_BMC, NULL }; + int ret; + + ret = object_child_foreach_recursive(object_get_root(), bmc_find, &args); + if (ret) { + error_setg(errp, "machine should have only one BMC device. " + "Use '-nodefaults'"); + return NULL; + } + + return args.obj ? IPMI_BMC(args.obj) : NULL; +} diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c new file mode 100644 index 000000000..19e8eb885 --- /dev/null +++ b/hw/ppc/pnv_core.c @@ -0,0 +1,441 @@ +/* + * QEMU PowerPC PowerNV CPU Core model + * + * Copyright (c) 2016, IBM Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "sysemu/reset.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "target/ppc/cpu.h" +#include "hw/ppc/ppc.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_core.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/ppc/xics.h" +#include "hw/qdev-properties.h" +#include "helper_regs.h" + +static const char *pnv_core_cpu_typename(PnvCore *pc) +{ + const char *core_type = object_class_get_name(object_get_class(OBJECT(pc))); + int len = strlen(core_type) - strlen(PNV_CORE_TYPE_SUFFIX); + char *s = g_strdup_printf(POWERPC_CPU_TYPE_NAME("%.*s"), len, core_type); + const char *cpu_type = object_class_get_name(object_class_by_name(s)); + g_free(s); + return cpu_type; +} + +static void pnv_core_cpu_reset(PnvCore *pc, PowerPCCPU *cpu) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(pc->chip); + + cpu_reset(cs); + + /* + * the skiboot firmware elects a primary thread to initialize the + * system and it can be any. + */ + env->gpr[3] = PNV_FDT_ADDR; + env->nip = 0x10; + env->msr |= MSR_HVB; /* Hypervisor mode */ + env->spr[SPR_HRMOR] = pc->hrmor; + hreg_compute_hflags(env); + + pcc->intc_reset(pc->chip, cpu); +} + +/* + * These values are read by the PowerNV HW monitors under Linux + */ +#define PNV_XSCOM_EX_DTS_RESULT0 0x50000 +#define PNV_XSCOM_EX_DTS_RESULT1 0x50001 + +static uint64_t pnv_core_power8_xscom_read(void *opaque, hwaddr addr, + unsigned int width) +{ + uint32_t offset = addr >> 3; + uint64_t val = 0; + + /* The result should be 38 C */ + switch (offset) { + case PNV_XSCOM_EX_DTS_RESULT0: + val = 0x26f024f023f0000ull; + break; + case PNV_XSCOM_EX_DTS_RESULT1: + val = 0x24f000000000000ull; + break; + default: + qemu_log_mask(LOG_UNIMP, "Warning: reading reg=0x%" HWADDR_PRIx "\n", + addr); + } + + return val; +} + +static void pnv_core_power8_xscom_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int width) +{ + qemu_log_mask(LOG_UNIMP, "Warning: writing to reg=0x%" HWADDR_PRIx "\n", + addr); +} + +static const MemoryRegionOps pnv_core_power8_xscom_ops = { + .read = pnv_core_power8_xscom_read, + .write = pnv_core_power8_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + + +/* + * POWER9 core controls + */ +#define PNV9_XSCOM_EC_PPM_SPECIAL_WKUP_HYP 0xf010d +#define PNV9_XSCOM_EC_PPM_SPECIAL_WKUP_OTR 0xf010a + +static uint64_t pnv_core_power9_xscom_read(void *opaque, hwaddr addr, + unsigned int width) +{ + uint32_t offset = addr >> 3; + uint64_t val = 0; + + /* The result should be 38 C */ + switch (offset) { + case PNV_XSCOM_EX_DTS_RESULT0: + val = 0x26f024f023f0000ull; + break; + case PNV_XSCOM_EX_DTS_RESULT1: + val = 0x24f000000000000ull; + break; + case PNV9_XSCOM_EC_PPM_SPECIAL_WKUP_HYP: + case PNV9_XSCOM_EC_PPM_SPECIAL_WKUP_OTR: + val = 0x0; + break; + default: + qemu_log_mask(LOG_UNIMP, "Warning: reading reg=0x%" HWADDR_PRIx "\n", + addr); + } + + return val; +} + +static void pnv_core_power9_xscom_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int width) +{ + uint32_t offset = addr >> 3; + + switch (offset) { + case PNV9_XSCOM_EC_PPM_SPECIAL_WKUP_HYP: + case PNV9_XSCOM_EC_PPM_SPECIAL_WKUP_OTR: + break; + default: + qemu_log_mask(LOG_UNIMP, "Warning: writing to reg=0x%" HWADDR_PRIx "\n", + addr); + } +} + +static const MemoryRegionOps pnv_core_power9_xscom_ops = { + .read = pnv_core_power9_xscom_read, + .write = pnv_core_power9_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_core_cpu_realize(PnvCore *pc, PowerPCCPU *cpu, Error **errp) +{ + CPUPPCState *env = &cpu->env; + int core_pir; + int thread_index = 0; /* TODO: TCG supports only one thread */ + ppc_spr_t *pir = &env->spr_cb[SPR_PIR]; + Error *local_err = NULL; + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(pc->chip); + + if (!qdev_realize(DEVICE(cpu), NULL, errp)) { + return; + } + + pcc->intc_create(pc->chip, cpu, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + core_pir = object_property_get_uint(OBJECT(pc), "pir", &error_abort); + + /* + * The PIR of a thread is the core PIR + the thread index. We will + * need to find a way to get the thread index when TCG supports + * more than 1. We could use the object name ? + */ + pir->default_value = core_pir + thread_index; + + /* Set time-base frequency to 512 MHz */ + cpu_ppc_tb_init(env, PNV_TIMEBASE_FREQ); +} + +static void pnv_core_reset(void *dev) +{ + CPUCore *cc = CPU_CORE(dev); + PnvCore *pc = PNV_CORE(dev); + int i; + + for (i = 0; i < cc->nr_threads; i++) { + pnv_core_cpu_reset(pc, pc->threads[i]); + } +} + +static void pnv_core_realize(DeviceState *dev, Error **errp) +{ + PnvCore *pc = PNV_CORE(OBJECT(dev)); + PnvCoreClass *pcc = PNV_CORE_GET_CLASS(pc); + CPUCore *cc = CPU_CORE(OBJECT(dev)); + const char *typename = pnv_core_cpu_typename(pc); + Error *local_err = NULL; + void *obj; + int i, j; + char name[32]; + + assert(pc->chip); + + pc->threads = g_new(PowerPCCPU *, cc->nr_threads); + for (i = 0; i < cc->nr_threads; i++) { + PowerPCCPU *cpu; + + obj = object_new(typename); + cpu = POWERPC_CPU(obj); + + pc->threads[i] = POWERPC_CPU(obj); + + snprintf(name, sizeof(name), "thread[%d]", i); + object_property_add_child(OBJECT(pc), name, obj); + + cpu->machine_data = g_new0(PnvCPUState, 1); + + object_unref(obj); + } + + for (j = 0; j < cc->nr_threads; j++) { + pnv_core_cpu_realize(pc, pc->threads[j], &local_err); + if (local_err) { + goto err; + } + } + + snprintf(name, sizeof(name), "xscom-core.%d", cc->core_id); + /* TODO: check PNV_XSCOM_EX_SIZE for p10 */ + pnv_xscom_region_init(&pc->xscom_regs, OBJECT(dev), pcc->xscom_ops, + pc, name, PNV_XSCOM_EX_SIZE); + + qemu_register_reset(pnv_core_reset, pc); + return; + +err: + while (--i >= 0) { + obj = OBJECT(pc->threads[i]); + object_unparent(obj); + } + g_free(pc->threads); + error_propagate(errp, local_err); +} + +static void pnv_core_cpu_unrealize(PnvCore *pc, PowerPCCPU *cpu) +{ + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(pc->chip); + + pcc->intc_destroy(pc->chip, cpu); + cpu_remove_sync(CPU(cpu)); + cpu->machine_data = NULL; + g_free(pnv_cpu); + object_unparent(OBJECT(cpu)); +} + +static void pnv_core_unrealize(DeviceState *dev) +{ + PnvCore *pc = PNV_CORE(dev); + CPUCore *cc = CPU_CORE(dev); + int i; + + qemu_unregister_reset(pnv_core_reset, pc); + + for (i = 0; i < cc->nr_threads; i++) { + pnv_core_cpu_unrealize(pc, pc->threads[i]); + } + g_free(pc->threads); +} + +static Property pnv_core_properties[] = { + DEFINE_PROP_UINT32("pir", PnvCore, pir, 0), + DEFINE_PROP_UINT64("hrmor", PnvCore, hrmor, 0), + DEFINE_PROP_LINK("chip", PnvCore, chip, TYPE_PNV_CHIP, PnvChip *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_core_power8_class_init(ObjectClass *oc, void *data) +{ + PnvCoreClass *pcc = PNV_CORE_CLASS(oc); + + pcc->xscom_ops = &pnv_core_power8_xscom_ops; +} + +static void pnv_core_power9_class_init(ObjectClass *oc, void *data) +{ + PnvCoreClass *pcc = PNV_CORE_CLASS(oc); + + pcc->xscom_ops = &pnv_core_power9_xscom_ops; +} + +static void pnv_core_power10_class_init(ObjectClass *oc, void *data) +{ + PnvCoreClass *pcc = PNV_CORE_CLASS(oc); + + /* TODO: Use the P9 XSCOMs for now on P10 */ + pcc->xscom_ops = &pnv_core_power9_xscom_ops; +} + +static void pnv_core_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = pnv_core_realize; + dc->unrealize = pnv_core_unrealize; + device_class_set_props(dc, pnv_core_properties); + dc->user_creatable = false; +} + +#define DEFINE_PNV_CORE_TYPE(family, cpu_model) \ + { \ + .parent = TYPE_PNV_CORE, \ + .name = PNV_CORE_TYPE_NAME(cpu_model), \ + .class_init = pnv_core_##family##_class_init, \ + } + +static const TypeInfo pnv_core_infos[] = { + { + .name = TYPE_PNV_CORE, + .parent = TYPE_CPU_CORE, + .instance_size = sizeof(PnvCore), + .class_size = sizeof(PnvCoreClass), + .class_init = pnv_core_class_init, + .abstract = true, + }, + DEFINE_PNV_CORE_TYPE(power8, "power8e_v2.1"), + DEFINE_PNV_CORE_TYPE(power8, "power8_v2.0"), + DEFINE_PNV_CORE_TYPE(power8, "power8nvl_v1.0"), + DEFINE_PNV_CORE_TYPE(power9, "power9_v2.0"), + DEFINE_PNV_CORE_TYPE(power10, "power10_v2.0"), +}; + +DEFINE_TYPES(pnv_core_infos) + +/* + * POWER9 Quads + */ + +#define P9X_EX_NCU_SPEC_BAR 0x11010 + +static uint64_t pnv_quad_xscom_read(void *opaque, hwaddr addr, + unsigned int width) +{ + uint32_t offset = addr >> 3; + uint64_t val = -1; + + switch (offset) { + case P9X_EX_NCU_SPEC_BAR: + case P9X_EX_NCU_SPEC_BAR + 0x400: /* Second EX */ + val = 0; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: writing @0x%08x\n", __func__, + offset); + } + + return val; +} + +static void pnv_quad_xscom_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int width) +{ + uint32_t offset = addr >> 3; + + switch (offset) { + case P9X_EX_NCU_SPEC_BAR: + case P9X_EX_NCU_SPEC_BAR + 0x400: /* Second EX */ + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: writing @0x%08x\n", __func__, + offset); + } +} + +static const MemoryRegionOps pnv_quad_xscom_ops = { + .read = pnv_quad_xscom_read, + .write = pnv_quad_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_quad_realize(DeviceState *dev, Error **errp) +{ + PnvQuad *eq = PNV_QUAD(dev); + char name[32]; + + snprintf(name, sizeof(name), "xscom-quad.%d", eq->quad_id); + pnv_xscom_region_init(&eq->xscom_regs, OBJECT(dev), &pnv_quad_xscom_ops, + eq, name, PNV9_XSCOM_EQ_SIZE); +} + +static Property pnv_quad_properties[] = { + DEFINE_PROP_UINT32("quad-id", PnvQuad, quad_id, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_quad_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = pnv_quad_realize; + device_class_set_props(dc, pnv_quad_properties); + dc->user_creatable = false; +} + +static const TypeInfo pnv_quad_info = { + .name = TYPE_PNV_QUAD, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvQuad), + .class_init = pnv_quad_class_init, +}; + +static void pnv_core_register_types(void) +{ + type_register_static(&pnv_quad_info); +} + +type_init(pnv_core_register_types) diff --git a/hw/ppc/pnv_homer.c b/hw/ppc/pnv_homer.c new file mode 100644 index 000000000..9a262629b --- /dev/null +++ b/hw/ppc/pnv_homer.c @@ -0,0 +1,382 @@ +/* + * QEMU PowerPC PowerNV Emulation of a few HOMER related registers + * + * Copyright (c) 2019, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "exec/hwaddr.h" +#include "exec/memory.h" +#include "sysemu/cpus.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_homer.h" +#include "hw/ppc/pnv_xscom.h" + + +static bool core_max_array(PnvHomer *homer, hwaddr addr) +{ + int i; + PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer); + + for (i = 0; i <= homer->chip->nr_cores; i++) { + if (addr == (hmrc->core_max_base + i)) { + return true; + } + } + return false; +} + +/* P8 Pstate table */ + +#define PNV8_OCC_PSTATE_VERSION 0x1f8001 +#define PNV8_OCC_PSTATE_MIN 0x1f8003 +#define PNV8_OCC_PSTATE_VALID 0x1f8000 +#define PNV8_OCC_PSTATE_THROTTLE 0x1f8002 +#define PNV8_OCC_PSTATE_NOM 0x1f8004 +#define PNV8_OCC_PSTATE_TURBO 0x1f8005 +#define PNV8_OCC_PSTATE_ULTRA_TURBO 0x1f8006 +#define PNV8_OCC_PSTATE_DATA 0x1f8008 +#define PNV8_OCC_PSTATE_ID_ZERO 0x1f8010 +#define PNV8_OCC_PSTATE_ID_ONE 0x1f8018 +#define PNV8_OCC_PSTATE_ID_TWO 0x1f8020 +#define PNV8_OCC_VDD_VOLTAGE_IDENTIFIER 0x1f8012 +#define PNV8_OCC_VCS_VOLTAGE_IDENTIFIER 0x1f8013 +#define PNV8_OCC_PSTATE_ZERO_FREQUENCY 0x1f8014 +#define PNV8_OCC_PSTATE_ONE_FREQUENCY 0x1f801c +#define PNV8_OCC_PSTATE_TWO_FREQUENCY 0x1f8024 +#define PNV8_CORE_MAX_BASE 0x1f8810 + + +static uint64_t pnv_power8_homer_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvHomer *homer = PNV_HOMER(opaque); + + switch (addr) { + case PNV8_OCC_PSTATE_VERSION: + case PNV8_OCC_PSTATE_MIN: + case PNV8_OCC_PSTATE_ID_ZERO: + return 0; + case PNV8_OCC_PSTATE_VALID: + case PNV8_OCC_PSTATE_THROTTLE: + case PNV8_OCC_PSTATE_NOM: + case PNV8_OCC_PSTATE_TURBO: + case PNV8_OCC_PSTATE_ID_ONE: + case PNV8_OCC_VDD_VOLTAGE_IDENTIFIER: + case PNV8_OCC_VCS_VOLTAGE_IDENTIFIER: + return 1; + case PNV8_OCC_PSTATE_ULTRA_TURBO: + case PNV8_OCC_PSTATE_ID_TWO: + return 2; + case PNV8_OCC_PSTATE_DATA: + return 0x1000000000000000; + /* P8 frequency for 0, 1, and 2 pstates */ + case PNV8_OCC_PSTATE_ZERO_FREQUENCY: + case PNV8_OCC_PSTATE_ONE_FREQUENCY: + case PNV8_OCC_PSTATE_TWO_FREQUENCY: + return 3000; + } + /* pstate table core max array */ + if (core_max_array(homer, addr)) { + return 1; + } + return 0; +} + +static void pnv_power8_homer_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + /* callback function defined to homer write */ + return; +} + +static const MemoryRegionOps pnv_power8_homer_ops = { + .read = pnv_power8_homer_read, + .write = pnv_power8_homer_write, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .impl.min_access_size = 1, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +/* P8 PBA BARs */ +#define PBA_BAR0 0x00 +#define PBA_BAR1 0x01 +#define PBA_BAR2 0x02 +#define PBA_BAR3 0x03 +#define PBA_BARMASK0 0x04 +#define PBA_BARMASK1 0x05 +#define PBA_BARMASK2 0x06 +#define PBA_BARMASK3 0x07 + +static uint64_t pnv_homer_power8_pba_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvHomer *homer = PNV_HOMER(opaque); + PnvChip *chip = homer->chip; + uint32_t reg = addr >> 3; + uint64_t val = 0; + + switch (reg) { + case PBA_BAR0: + val = PNV_HOMER_BASE(chip); + break; + case PBA_BARMASK0: /* P8 homer region mask */ + val = (PNV_HOMER_SIZE - 1) & 0x300000; + break; + case PBA_BAR3: /* P8 occ common area */ + val = PNV_OCC_COMMON_AREA_BASE; + break; + case PBA_BARMASK3: /* P8 occ common area mask */ + val = (PNV_OCC_COMMON_AREA_SIZE - 1) & 0x700000; + break; + default: + qemu_log_mask(LOG_UNIMP, "PBA: read to unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } + return val; +} + +static void pnv_homer_power8_pba_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "PBA: write to unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); +} + +static const MemoryRegionOps pnv_homer_power8_pba_ops = { + .read = pnv_homer_power8_pba_read, + .write = pnv_homer_power8_pba_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_homer_power8_class_init(ObjectClass *klass, void *data) +{ + PnvHomerClass *homer = PNV_HOMER_CLASS(klass); + + homer->pba_size = PNV_XSCOM_PBA_SIZE; + homer->pba_ops = &pnv_homer_power8_pba_ops; + homer->homer_size = PNV_HOMER_SIZE; + homer->homer_ops = &pnv_power8_homer_ops; + homer->core_max_base = PNV8_CORE_MAX_BASE; +} + +static const TypeInfo pnv_homer_power8_type_info = { + .name = TYPE_PNV8_HOMER, + .parent = TYPE_PNV_HOMER, + .instance_size = sizeof(PnvHomer), + .class_init = pnv_homer_power8_class_init, +}; + +/* P9 Pstate table */ + +#define PNV9_OCC_PSTATE_ID_ZERO 0xe2018 +#define PNV9_OCC_PSTATE_ID_ONE 0xe2020 +#define PNV9_OCC_PSTATE_ID_TWO 0xe2028 +#define PNV9_OCC_PSTATE_DATA 0xe2000 +#define PNV9_OCC_PSTATE_DATA_AREA 0xe2008 +#define PNV9_OCC_PSTATE_MIN 0xe2003 +#define PNV9_OCC_PSTATE_NOM 0xe2004 +#define PNV9_OCC_PSTATE_TURBO 0xe2005 +#define PNV9_OCC_PSTATE_ULTRA_TURBO 0xe2818 +#define PNV9_OCC_MAX_PSTATE_ULTRA_TURBO 0xe2006 +#define PNV9_OCC_PSTATE_MAJOR_VERSION 0xe2001 +#define PNV9_OCC_OPAL_RUNTIME_DATA 0xe2b85 +#define PNV9_CHIP_HOMER_IMAGE_POINTER 0x200008 +#define PNV9_CHIP_HOMER_BASE 0x0 +#define PNV9_OCC_PSTATE_ZERO_FREQUENCY 0xe201c +#define PNV9_OCC_PSTATE_ONE_FREQUENCY 0xe2024 +#define PNV9_OCC_PSTATE_TWO_FREQUENCY 0xe202c +#define PNV9_OCC_ROLE_MASTER_OR_SLAVE 0xe2002 +#define PNV9_CORE_MAX_BASE 0xe2819 + + +static uint64_t pnv_power9_homer_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvHomer *homer = PNV_HOMER(opaque); + + switch (addr) { + case PNV9_OCC_MAX_PSTATE_ULTRA_TURBO: + case PNV9_OCC_PSTATE_ID_ZERO: + return 0; + case PNV9_OCC_PSTATE_DATA: + case PNV9_OCC_ROLE_MASTER_OR_SLAVE: + case PNV9_OCC_PSTATE_NOM: + case PNV9_OCC_PSTATE_TURBO: + case PNV9_OCC_PSTATE_ID_ONE: + case PNV9_OCC_PSTATE_ULTRA_TURBO: + case PNV9_OCC_OPAL_RUNTIME_DATA: + return 1; + case PNV9_OCC_PSTATE_MIN: + case PNV9_OCC_PSTATE_ID_TWO: + return 2; + + /* 3000 khz frequency for 0, 1, and 2 pstates */ + case PNV9_OCC_PSTATE_ZERO_FREQUENCY: + case PNV9_OCC_PSTATE_ONE_FREQUENCY: + case PNV9_OCC_PSTATE_TWO_FREQUENCY: + return 3000; + case PNV9_OCC_PSTATE_MAJOR_VERSION: + return 0x90; + case PNV9_CHIP_HOMER_BASE: + case PNV9_OCC_PSTATE_DATA_AREA: + case PNV9_CHIP_HOMER_IMAGE_POINTER: + return 0x1000000000000000; + } + /* pstate table core max array */ + if (core_max_array(homer, addr)) { + return 1; + } + return 0; +} + +static void pnv_power9_homer_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + /* callback function defined to homer write */ + return; +} + +static const MemoryRegionOps pnv_power9_homer_ops = { + .read = pnv_power9_homer_read, + .write = pnv_power9_homer_write, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .impl.min_access_size = 1, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static uint64_t pnv_homer_power9_pba_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvHomer *homer = PNV_HOMER(opaque); + PnvChip *chip = homer->chip; + uint32_t reg = addr >> 3; + uint64_t val = 0; + + switch (reg) { + case PBA_BAR0: + val = PNV9_HOMER_BASE(chip); + break; + case PBA_BARMASK0: /* P9 homer region mask */ + val = (PNV9_HOMER_SIZE - 1) & 0x300000; + break; + case PBA_BAR2: /* P9 occ common area */ + val = PNV9_OCC_COMMON_AREA_BASE; + break; + case PBA_BARMASK2: /* P9 occ common area size */ + val = (PNV9_OCC_COMMON_AREA_SIZE - 1) & 0x700000; + break; + default: + qemu_log_mask(LOG_UNIMP, "PBA: read to unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } + return val; +} + +static void pnv_homer_power9_pba_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + qemu_log_mask(LOG_UNIMP, "PBA: write to unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); +} + +static const MemoryRegionOps pnv_homer_power9_pba_ops = { + .read = pnv_homer_power9_pba_read, + .write = pnv_homer_power9_pba_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_homer_power9_class_init(ObjectClass *klass, void *data) +{ + PnvHomerClass *homer = PNV_HOMER_CLASS(klass); + + homer->pba_size = PNV9_XSCOM_PBA_SIZE; + homer->pba_ops = &pnv_homer_power9_pba_ops; + homer->homer_size = PNV9_HOMER_SIZE; + homer->homer_ops = &pnv_power9_homer_ops; + homer->core_max_base = PNV9_CORE_MAX_BASE; +} + +static const TypeInfo pnv_homer_power9_type_info = { + .name = TYPE_PNV9_HOMER, + .parent = TYPE_PNV_HOMER, + .instance_size = sizeof(PnvHomer), + .class_init = pnv_homer_power9_class_init, +}; + +static void pnv_homer_realize(DeviceState *dev, Error **errp) +{ + PnvHomer *homer = PNV_HOMER(dev); + PnvHomerClass *hmrc = PNV_HOMER_GET_CLASS(homer); + + assert(homer->chip); + + pnv_xscom_region_init(&homer->pba_regs, OBJECT(dev), hmrc->pba_ops, + homer, "xscom-pba", hmrc->pba_size); + + /* homer region */ + memory_region_init_io(&homer->regs, OBJECT(dev), + hmrc->homer_ops, homer, "homer-main-memory", + hmrc->homer_size); +} + +static Property pnv_homer_properties[] = { + DEFINE_PROP_LINK("chip", PnvHomer, chip, TYPE_PNV_CHIP, PnvChip *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_homer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pnv_homer_realize; + dc->desc = "PowerNV HOMER Memory"; + device_class_set_props(dc, pnv_homer_properties); + dc->user_creatable = false; +} + +static const TypeInfo pnv_homer_type_info = { + .name = TYPE_PNV_HOMER, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvHomer), + .class_init = pnv_homer_class_init, + .class_size = sizeof(PnvHomerClass), + .abstract = true, +}; + +static void pnv_homer_register_types(void) +{ + type_register_static(&pnv_homer_type_info); + type_register_static(&pnv_homer_power8_type_info); + type_register_static(&pnv_homer_power9_type_info); +} + +type_init(pnv_homer_register_types); diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c new file mode 100644 index 000000000..bcbca3db9 --- /dev/null +++ b/hw/ppc/pnv_lpc.c @@ -0,0 +1,853 @@ +/* + * QEMU PowerPC PowerNV LPC controller + * + * Copyright (c) 2016, IBM Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "target/ppc/cpu.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_lpc.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/ppc/fdt.h" + +#include + +enum { + ECCB_CTL = 0, + ECCB_RESET = 1, + ECCB_STAT = 2, + ECCB_DATA = 3, +}; + +/* OPB Master LS registers */ +#define OPB_MASTER_LS_ROUTE0 0x8 +#define OPB_MASTER_LS_ROUTE1 0xC +#define OPB_MASTER_LS_IRQ_STAT 0x50 +#define OPB_MASTER_IRQ_LPC 0x00000800 +#define OPB_MASTER_LS_IRQ_MASK 0x54 +#define OPB_MASTER_LS_IRQ_POL 0x58 +#define OPB_MASTER_LS_IRQ_INPUT 0x5c + +/* LPC HC registers */ +#define LPC_HC_FW_SEG_IDSEL 0x24 +#define LPC_HC_FW_RD_ACC_SIZE 0x28 +#define LPC_HC_FW_RD_1B 0x00000000 +#define LPC_HC_FW_RD_2B 0x01000000 +#define LPC_HC_FW_RD_4B 0x02000000 +#define LPC_HC_FW_RD_16B 0x04000000 +#define LPC_HC_FW_RD_128B 0x07000000 +#define LPC_HC_IRQSER_CTRL 0x30 +#define LPC_HC_IRQSER_EN 0x80000000 +#define LPC_HC_IRQSER_QMODE 0x40000000 +#define LPC_HC_IRQSER_START_MASK 0x03000000 +#define LPC_HC_IRQSER_START_4CLK 0x00000000 +#define LPC_HC_IRQSER_START_6CLK 0x01000000 +#define LPC_HC_IRQSER_START_8CLK 0x02000000 +#define LPC_HC_IRQMASK 0x34 /* same bit defs as LPC_HC_IRQSTAT */ +#define LPC_HC_IRQSTAT 0x38 +#define LPC_HC_IRQ_SERIRQ0 0x80000000 /* all bits down to ... */ +#define LPC_HC_IRQ_SERIRQ16 0x00008000 /* IRQ16=IOCHK#, IRQ2=SMI# */ +#define LPC_HC_IRQ_SERIRQ_ALL 0xffff8000 +#define LPC_HC_IRQ_LRESET 0x00000400 +#define LPC_HC_IRQ_SYNC_ABNORM_ERR 0x00000080 +#define LPC_HC_IRQ_SYNC_NORESP_ERR 0x00000040 +#define LPC_HC_IRQ_SYNC_NORM_ERR 0x00000020 +#define LPC_HC_IRQ_SYNC_TIMEOUT_ERR 0x00000010 +#define LPC_HC_IRQ_SYNC_TARG_TAR_ERR 0x00000008 +#define LPC_HC_IRQ_SYNC_BM_TAR_ERR 0x00000004 +#define LPC_HC_IRQ_SYNC_BM0_REQ 0x00000002 +#define LPC_HC_IRQ_SYNC_BM1_REQ 0x00000001 +#define LPC_HC_ERROR_ADDRESS 0x40 + +#define LPC_OPB_SIZE 0x100000000ull + +#define ISA_IO_SIZE 0x00010000 +#define ISA_MEM_SIZE 0x10000000 +#define ISA_FW_SIZE 0x10000000 +#define LPC_IO_OPB_ADDR 0xd0010000 +#define LPC_IO_OPB_SIZE 0x00010000 +#define LPC_MEM_OPB_ADDR 0xe0000000 +#define LPC_MEM_OPB_SIZE 0x10000000 +#define LPC_FW_OPB_ADDR 0xf0000000 +#define LPC_FW_OPB_SIZE 0x10000000 + +#define LPC_OPB_REGS_OPB_ADDR 0xc0010000 +#define LPC_OPB_REGS_OPB_SIZE 0x00000060 +#define LPC_OPB_REGS_OPBA_ADDR 0xc0011000 +#define LPC_OPB_REGS_OPBA_SIZE 0x00000008 +#define LPC_HC_REGS_OPB_ADDR 0xc0012000 +#define LPC_HC_REGS_OPB_SIZE 0x00000100 + +static int pnv_lpc_dt_xscom(PnvXScomInterface *dev, void *fdt, int xscom_offset) +{ + const char compat[] = "ibm,power8-lpc\0ibm,lpc"; + char *name; + int offset; + uint32_t lpc_pcba = PNV_XSCOM_LPC_BASE; + uint32_t reg[] = { + cpu_to_be32(lpc_pcba), + cpu_to_be32(PNV_XSCOM_LPC_SIZE) + }; + + name = g_strdup_printf("isa@%x", lpc_pcba); + offset = fdt_add_subnode(fdt, xscom_offset, name); + _FDT(offset); + g_free(name); + + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); + _FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 2))); + _FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 1))); + _FDT((fdt_setprop(fdt, offset, "compatible", compat, sizeof(compat)))); + return 0; +} + +/* POWER9 only */ +int pnv_dt_lpc(PnvChip *chip, void *fdt, int root_offset, uint64_t lpcm_addr, + uint64_t lpcm_size) +{ + const char compat[] = "ibm,power9-lpcm-opb\0simple-bus"; + const char lpc_compat[] = "ibm,power9-lpc\0ibm,lpc"; + char *name; + int offset, lpcm_offset; + uint32_t opb_ranges[8] = { 0, + cpu_to_be32(lpcm_addr >> 32), + cpu_to_be32((uint32_t)lpcm_addr), + cpu_to_be32(lpcm_size / 2), + cpu_to_be32(lpcm_size / 2), + cpu_to_be32(lpcm_addr >> 32), + cpu_to_be32(lpcm_size / 2), + cpu_to_be32(lpcm_size / 2), + }; + uint32_t opb_reg[4] = { cpu_to_be32(lpcm_addr >> 32), + cpu_to_be32((uint32_t)lpcm_addr), + cpu_to_be32(lpcm_size >> 32), + cpu_to_be32((uint32_t)lpcm_size), + }; + uint32_t lpc_ranges[12] = { 0, 0, + cpu_to_be32(LPC_MEM_OPB_ADDR), + cpu_to_be32(LPC_MEM_OPB_SIZE), + cpu_to_be32(1), 0, + cpu_to_be32(LPC_IO_OPB_ADDR), + cpu_to_be32(LPC_IO_OPB_SIZE), + cpu_to_be32(3), 0, + cpu_to_be32(LPC_FW_OPB_ADDR), + cpu_to_be32(LPC_FW_OPB_SIZE), + }; + uint32_t reg[2]; + + /* + * OPB bus + */ + name = g_strdup_printf("lpcm-opb@%"PRIx64, lpcm_addr); + lpcm_offset = fdt_add_subnode(fdt, root_offset, name); + _FDT(lpcm_offset); + g_free(name); + + _FDT((fdt_setprop(fdt, lpcm_offset, "reg", opb_reg, sizeof(opb_reg)))); + _FDT((fdt_setprop_cell(fdt, lpcm_offset, "#address-cells", 1))); + _FDT((fdt_setprop_cell(fdt, lpcm_offset, "#size-cells", 1))); + _FDT((fdt_setprop(fdt, lpcm_offset, "compatible", compat, sizeof(compat)))); + _FDT((fdt_setprop_cell(fdt, lpcm_offset, "ibm,chip-id", chip->chip_id))); + _FDT((fdt_setprop(fdt, lpcm_offset, "ranges", opb_ranges, + sizeof(opb_ranges)))); + + /* + * OPB Master registers + */ + name = g_strdup_printf("opb-master@%x", LPC_OPB_REGS_OPB_ADDR); + offset = fdt_add_subnode(fdt, lpcm_offset, name); + _FDT(offset); + g_free(name); + + reg[0] = cpu_to_be32(LPC_OPB_REGS_OPB_ADDR); + reg[1] = cpu_to_be32(LPC_OPB_REGS_OPB_SIZE); + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); + _FDT((fdt_setprop_string(fdt, offset, "compatible", + "ibm,power9-lpcm-opb-master"))); + + /* + * OPB arbitrer registers + */ + name = g_strdup_printf("opb-arbitrer@%x", LPC_OPB_REGS_OPBA_ADDR); + offset = fdt_add_subnode(fdt, lpcm_offset, name); + _FDT(offset); + g_free(name); + + reg[0] = cpu_to_be32(LPC_OPB_REGS_OPBA_ADDR); + reg[1] = cpu_to_be32(LPC_OPB_REGS_OPBA_SIZE); + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); + _FDT((fdt_setprop_string(fdt, offset, "compatible", + "ibm,power9-lpcm-opb-arbiter"))); + + /* + * LPC Host Controller registers + */ + name = g_strdup_printf("lpc-controller@%x", LPC_HC_REGS_OPB_ADDR); + offset = fdt_add_subnode(fdt, lpcm_offset, name); + _FDT(offset); + g_free(name); + + reg[0] = cpu_to_be32(LPC_HC_REGS_OPB_ADDR); + reg[1] = cpu_to_be32(LPC_HC_REGS_OPB_SIZE); + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); + _FDT((fdt_setprop_string(fdt, offset, "compatible", + "ibm,power9-lpc-controller"))); + + name = g_strdup_printf("lpc@0"); + offset = fdt_add_subnode(fdt, lpcm_offset, name); + _FDT(offset); + g_free(name); + _FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 2))); + _FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 1))); + _FDT((fdt_setprop(fdt, offset, "compatible", lpc_compat, + sizeof(lpc_compat)))); + _FDT((fdt_setprop(fdt, offset, "ranges", lpc_ranges, + sizeof(lpc_ranges)))); + + return 0; +} + +/* + * These read/write handlers of the OPB address space should be common + * with the P9 LPC Controller which uses direct MMIOs. + * + * TODO: rework to use address_space_stq() and address_space_ldq() + * instead. + */ +static bool opb_read(PnvLpcController *lpc, uint32_t addr, uint8_t *data, + int sz) +{ + /* XXX Handle access size limits and FW read caching here */ + return !address_space_read(&lpc->opb_as, addr, MEMTXATTRS_UNSPECIFIED, + data, sz); +} + +static bool opb_write(PnvLpcController *lpc, uint32_t addr, uint8_t *data, + int sz) +{ + /* XXX Handle access size limits here */ + return !address_space_write(&lpc->opb_as, addr, MEMTXATTRS_UNSPECIFIED, + data, sz); +} + +#define ECCB_CTL_READ PPC_BIT(15) +#define ECCB_CTL_SZ_LSH (63 - 7) +#define ECCB_CTL_SZ_MASK PPC_BITMASK(4, 7) +#define ECCB_CTL_ADDR_MASK PPC_BITMASK(32, 63) + +#define ECCB_STAT_OP_DONE PPC_BIT(52) +#define ECCB_STAT_OP_ERR PPC_BIT(52) +#define ECCB_STAT_RD_DATA_LSH (63 - 37) +#define ECCB_STAT_RD_DATA_MASK (0xffffffff << ECCB_STAT_RD_DATA_LSH) + +static void pnv_lpc_do_eccb(PnvLpcController *lpc, uint64_t cmd) +{ + /* XXX Check for magic bits at the top, addr size etc... */ + unsigned int sz = (cmd & ECCB_CTL_SZ_MASK) >> ECCB_CTL_SZ_LSH; + uint32_t opb_addr = cmd & ECCB_CTL_ADDR_MASK; + uint8_t data[8]; + bool success; + + if (sz > sizeof(data)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ECCB: invalid operation at @0x%08x size %d\n", opb_addr, sz); + return; + } + + if (cmd & ECCB_CTL_READ) { + success = opb_read(lpc, opb_addr, data, sz); + if (success) { + lpc->eccb_stat_reg = ECCB_STAT_OP_DONE | + (((uint64_t)data[0]) << 24 | + ((uint64_t)data[1]) << 16 | + ((uint64_t)data[2]) << 8 | + ((uint64_t)data[3])) << ECCB_STAT_RD_DATA_LSH; + } else { + lpc->eccb_stat_reg = ECCB_STAT_OP_DONE | + (0xffffffffull << ECCB_STAT_RD_DATA_LSH); + } + } else { + data[0] = lpc->eccb_data_reg >> 24; + data[1] = lpc->eccb_data_reg >> 16; + data[2] = lpc->eccb_data_reg >> 8; + data[3] = lpc->eccb_data_reg; + + success = opb_write(lpc, opb_addr, data, sz); + lpc->eccb_stat_reg = ECCB_STAT_OP_DONE; + } + /* XXX Which error bit (if any) to signal OPB error ? */ +} + +static uint64_t pnv_lpc_xscom_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvLpcController *lpc = PNV_LPC(opaque); + uint32_t offset = addr >> 3; + uint64_t val = 0; + + switch (offset & 3) { + case ECCB_CTL: + case ECCB_RESET: + val = 0; + break; + case ECCB_STAT: + val = lpc->eccb_stat_reg; + lpc->eccb_stat_reg = 0; + break; + case ECCB_DATA: + val = ((uint64_t)lpc->eccb_data_reg) << 32; + break; + } + return val; +} + +static void pnv_lpc_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvLpcController *lpc = PNV_LPC(opaque); + uint32_t offset = addr >> 3; + + switch (offset & 3) { + case ECCB_CTL: + pnv_lpc_do_eccb(lpc, val); + break; + case ECCB_RESET: + /* XXXX */ + break; + case ECCB_STAT: + break; + case ECCB_DATA: + lpc->eccb_data_reg = val >> 32; + break; + } +} + +static const MemoryRegionOps pnv_lpc_xscom_ops = { + .read = pnv_lpc_xscom_read, + .write = pnv_lpc_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static uint64_t pnv_lpc_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvLpcController *lpc = PNV_LPC(opaque); + uint64_t val = 0; + uint32_t opb_addr = addr & ECCB_CTL_ADDR_MASK; + MemTxResult result; + + switch (size) { + case 4: + val = address_space_ldl(&lpc->opb_as, opb_addr, MEMTXATTRS_UNSPECIFIED, + &result); + break; + case 1: + val = address_space_ldub(&lpc->opb_as, opb_addr, MEMTXATTRS_UNSPECIFIED, + &result); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "OPB read failed at @0x%" + HWADDR_PRIx " invalid size %d\n", addr, size); + return 0; + } + + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "OPB read failed at @0x%" + HWADDR_PRIx "\n", addr); + } + + return val; +} + +static void pnv_lpc_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvLpcController *lpc = PNV_LPC(opaque); + uint32_t opb_addr = addr & ECCB_CTL_ADDR_MASK; + MemTxResult result; + + switch (size) { + case 4: + address_space_stl(&lpc->opb_as, opb_addr, val, MEMTXATTRS_UNSPECIFIED, + &result); + break; + case 1: + address_space_stb(&lpc->opb_as, opb_addr, val, MEMTXATTRS_UNSPECIFIED, + &result); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "OPB write failed at @0x%" + HWADDR_PRIx " invalid size %d\n", addr, size); + return; + } + + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "OPB write failed at @0x%" + HWADDR_PRIx "\n", addr); + } +} + +static const MemoryRegionOps pnv_lpc_mmio_ops = { + .read = pnv_lpc_mmio_read, + .write = pnv_lpc_mmio_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_lpc_eval_irqs(PnvLpcController *lpc) +{ + bool lpc_to_opb_irq = false; + PnvLpcClass *plc = PNV_LPC_GET_CLASS(lpc); + + /* Update LPC controller to OPB line */ + if (lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN) { + uint32_t irqs; + + irqs = lpc->lpc_hc_irqstat & lpc->lpc_hc_irqmask; + lpc_to_opb_irq = (irqs != 0); + } + + /* We don't honor the polarity register, it's pointless and unused + * anyway + */ + if (lpc_to_opb_irq) { + lpc->opb_irq_input |= OPB_MASTER_IRQ_LPC; + } else { + lpc->opb_irq_input &= ~OPB_MASTER_IRQ_LPC; + } + + /* Update OPB internal latch */ + lpc->opb_irq_stat |= lpc->opb_irq_input & lpc->opb_irq_mask; + + /* Reflect the interrupt */ + pnv_psi_irq_set(lpc->psi, plc->psi_irq, lpc->opb_irq_stat != 0); +} + +static uint64_t lpc_hc_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvLpcController *lpc = opaque; + uint64_t val = 0xfffffffffffffffful; + + switch (addr) { + case LPC_HC_FW_SEG_IDSEL: + val = lpc->lpc_hc_fw_seg_idsel; + break; + case LPC_HC_FW_RD_ACC_SIZE: + val = lpc->lpc_hc_fw_rd_acc_size; + break; + case LPC_HC_IRQSER_CTRL: + val = lpc->lpc_hc_irqser_ctrl; + break; + case LPC_HC_IRQMASK: + val = lpc->lpc_hc_irqmask; + break; + case LPC_HC_IRQSTAT: + val = lpc->lpc_hc_irqstat; + break; + case LPC_HC_ERROR_ADDRESS: + val = lpc->lpc_hc_error_addr; + break; + default: + qemu_log_mask(LOG_UNIMP, "LPC HC Unimplemented register: 0x%" + HWADDR_PRIx "\n", addr); + } + return val; +} + +static void lpc_hc_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PnvLpcController *lpc = opaque; + + /* XXX Filter out reserved bits */ + + switch (addr) { + case LPC_HC_FW_SEG_IDSEL: + /* XXX Actually figure out how that works as this impact + * memory regions/aliases + */ + lpc->lpc_hc_fw_seg_idsel = val; + break; + case LPC_HC_FW_RD_ACC_SIZE: + lpc->lpc_hc_fw_rd_acc_size = val; + break; + case LPC_HC_IRQSER_CTRL: + lpc->lpc_hc_irqser_ctrl = val; + pnv_lpc_eval_irqs(lpc); + break; + case LPC_HC_IRQMASK: + lpc->lpc_hc_irqmask = val; + pnv_lpc_eval_irqs(lpc); + break; + case LPC_HC_IRQSTAT: + lpc->lpc_hc_irqstat &= ~val; + pnv_lpc_eval_irqs(lpc); + break; + case LPC_HC_ERROR_ADDRESS: + break; + default: + qemu_log_mask(LOG_UNIMP, "LPC HC Unimplemented register: 0x%" + HWADDR_PRIx "\n", addr); + } +} + +static const MemoryRegionOps lpc_hc_ops = { + .read = lpc_hc_read, + .write = lpc_hc_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint64_t opb_master_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvLpcController *lpc = opaque; + uint64_t val = 0xfffffffffffffffful; + + switch (addr) { + case OPB_MASTER_LS_ROUTE0: /* TODO */ + val = lpc->opb_irq_route0; + break; + case OPB_MASTER_LS_ROUTE1: /* TODO */ + val = lpc->opb_irq_route1; + break; + case OPB_MASTER_LS_IRQ_STAT: + val = lpc->opb_irq_stat; + break; + case OPB_MASTER_LS_IRQ_MASK: + val = lpc->opb_irq_mask; + break; + case OPB_MASTER_LS_IRQ_POL: + val = lpc->opb_irq_pol; + break; + case OPB_MASTER_LS_IRQ_INPUT: + val = lpc->opb_irq_input; + break; + default: + qemu_log_mask(LOG_UNIMP, "OPBM: read on unimplemented register: 0x%" + HWADDR_PRIx "\n", addr); + } + + return val; +} + +static void opb_master_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvLpcController *lpc = opaque; + + switch (addr) { + case OPB_MASTER_LS_ROUTE0: /* TODO */ + lpc->opb_irq_route0 = val; + break; + case OPB_MASTER_LS_ROUTE1: /* TODO */ + lpc->opb_irq_route1 = val; + break; + case OPB_MASTER_LS_IRQ_STAT: + lpc->opb_irq_stat &= ~val; + pnv_lpc_eval_irqs(lpc); + break; + case OPB_MASTER_LS_IRQ_MASK: + lpc->opb_irq_mask = val; + pnv_lpc_eval_irqs(lpc); + break; + case OPB_MASTER_LS_IRQ_POL: + lpc->opb_irq_pol = val; + pnv_lpc_eval_irqs(lpc); + break; + case OPB_MASTER_LS_IRQ_INPUT: + /* Read only */ + break; + default: + qemu_log_mask(LOG_UNIMP, "OPBM: write on unimplemented register: 0x%" + HWADDR_PRIx " val=0x%08"PRIx64"\n", addr, val); + } +} + +static const MemoryRegionOps opb_master_ops = { + .read = opb_master_read, + .write = opb_master_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void pnv_lpc_power8_realize(DeviceState *dev, Error **errp) +{ + PnvLpcController *lpc = PNV_LPC(dev); + PnvLpcClass *plc = PNV_LPC_GET_CLASS(dev); + Error *local_err = NULL; + + plc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* P8 uses a XSCOM region for LPC registers */ + pnv_xscom_region_init(&lpc->xscom_regs, OBJECT(lpc), + &pnv_lpc_xscom_ops, lpc, "xscom-lpc", + PNV_XSCOM_LPC_SIZE); +} + +static void pnv_lpc_power8_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); + PnvLpcClass *plc = PNV_LPC_CLASS(klass); + + dc->desc = "PowerNV LPC Controller POWER8"; + + xdc->dt_xscom = pnv_lpc_dt_xscom; + + plc->psi_irq = PSIHB_IRQ_LPC_I2C; + + device_class_set_parent_realize(dc, pnv_lpc_power8_realize, + &plc->parent_realize); +} + +static const TypeInfo pnv_lpc_power8_info = { + .name = TYPE_PNV8_LPC, + .parent = TYPE_PNV_LPC, + .class_init = pnv_lpc_power8_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_lpc_power9_realize(DeviceState *dev, Error **errp) +{ + PnvLpcController *lpc = PNV_LPC(dev); + PnvLpcClass *plc = PNV_LPC_GET_CLASS(dev); + Error *local_err = NULL; + + plc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* P9 uses a MMIO region */ + memory_region_init_io(&lpc->xscom_regs, OBJECT(lpc), &pnv_lpc_mmio_ops, + lpc, "lpcm", PNV9_LPCM_SIZE); +} + +static void pnv_lpc_power9_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvLpcClass *plc = PNV_LPC_CLASS(klass); + + dc->desc = "PowerNV LPC Controller POWER9"; + + plc->psi_irq = PSIHB9_IRQ_LPCHC; + + device_class_set_parent_realize(dc, pnv_lpc_power9_realize, + &plc->parent_realize); +} + +static const TypeInfo pnv_lpc_power9_info = { + .name = TYPE_PNV9_LPC, + .parent = TYPE_PNV_LPC, + .class_init = pnv_lpc_power9_class_init, +}; + +static void pnv_lpc_power10_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PowerNV LPC Controller POWER10"; +} + +static const TypeInfo pnv_lpc_power10_info = { + .name = TYPE_PNV10_LPC, + .parent = TYPE_PNV9_LPC, + .class_init = pnv_lpc_power10_class_init, +}; + +static void pnv_lpc_realize(DeviceState *dev, Error **errp) +{ + PnvLpcController *lpc = PNV_LPC(dev); + + assert(lpc->psi); + + /* Reg inits */ + lpc->lpc_hc_fw_rd_acc_size = LPC_HC_FW_RD_4B; + + /* Create address space and backing MR for the OPB bus */ + memory_region_init(&lpc->opb_mr, OBJECT(dev), "lpc-opb", 0x100000000ull); + address_space_init(&lpc->opb_as, &lpc->opb_mr, "lpc-opb"); + + /* Create ISA IO and Mem space regions which are the root of + * the ISA bus (ie, ISA address spaces). We don't create a + * separate one for FW which we alias to memory. + */ + memory_region_init(&lpc->isa_io, OBJECT(dev), "isa-io", ISA_IO_SIZE); + memory_region_init(&lpc->isa_mem, OBJECT(dev), "isa-mem", ISA_MEM_SIZE); + memory_region_init(&lpc->isa_fw, OBJECT(dev), "isa-fw", ISA_FW_SIZE); + + /* Create windows from the OPB space to the ISA space */ + memory_region_init_alias(&lpc->opb_isa_io, OBJECT(dev), "lpc-isa-io", + &lpc->isa_io, 0, LPC_IO_OPB_SIZE); + memory_region_add_subregion(&lpc->opb_mr, LPC_IO_OPB_ADDR, + &lpc->opb_isa_io); + memory_region_init_alias(&lpc->opb_isa_mem, OBJECT(dev), "lpc-isa-mem", + &lpc->isa_mem, 0, LPC_MEM_OPB_SIZE); + memory_region_add_subregion(&lpc->opb_mr, LPC_MEM_OPB_ADDR, + &lpc->opb_isa_mem); + memory_region_init_alias(&lpc->opb_isa_fw, OBJECT(dev), "lpc-isa-fw", + &lpc->isa_fw, 0, LPC_FW_OPB_SIZE); + memory_region_add_subregion(&lpc->opb_mr, LPC_FW_OPB_ADDR, + &lpc->opb_isa_fw); + + /* Create MMIO regions for LPC HC and OPB registers */ + memory_region_init_io(&lpc->opb_master_regs, OBJECT(dev), &opb_master_ops, + lpc, "lpc-opb-master", LPC_OPB_REGS_OPB_SIZE); + memory_region_add_subregion(&lpc->opb_mr, LPC_OPB_REGS_OPB_ADDR, + &lpc->opb_master_regs); + memory_region_init_io(&lpc->lpc_hc_regs, OBJECT(dev), &lpc_hc_ops, lpc, + "lpc-hc", LPC_HC_REGS_OPB_SIZE); + memory_region_add_subregion(&lpc->opb_mr, LPC_HC_REGS_OPB_ADDR, + &lpc->lpc_hc_regs); +} + +static Property pnv_lpc_properties[] = { + DEFINE_PROP_LINK("psi", PnvLpcController, psi, TYPE_PNV_PSI, PnvPsi *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_lpc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pnv_lpc_realize; + dc->desc = "PowerNV LPC Controller"; + device_class_set_props(dc, pnv_lpc_properties); + dc->user_creatable = false; +} + +static const TypeInfo pnv_lpc_info = { + .name = TYPE_PNV_LPC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvLpcController), + .class_init = pnv_lpc_class_init, + .class_size = sizeof(PnvLpcClass), + .abstract = true, +}; + +static void pnv_lpc_register_types(void) +{ + type_register_static(&pnv_lpc_info); + type_register_static(&pnv_lpc_power8_info); + type_register_static(&pnv_lpc_power9_info); + type_register_static(&pnv_lpc_power10_info); +} + +type_init(pnv_lpc_register_types) + +/* If we don't use the built-in LPC interrupt deserializer, we need + * to provide a set of qirqs for the ISA bus or things will go bad. + * + * Most machines using pre-Naples chips (without said deserializer) + * have a CPLD that will collect the SerIRQ and shoot them as a + * single level interrupt to the P8 chip. So let's setup a hook + * for doing just that. + */ +static void pnv_lpc_isa_irq_handler_cpld(void *opaque, int n, int level) +{ + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); + uint32_t old_state = pnv->cpld_irqstate; + PnvLpcController *lpc = PNV_LPC(opaque); + + if (level) { + pnv->cpld_irqstate |= 1u << n; + } else { + pnv->cpld_irqstate &= ~(1u << n); + } + + if (pnv->cpld_irqstate != old_state) { + pnv_psi_irq_set(lpc->psi, PSIHB_IRQ_EXTERNAL, pnv->cpld_irqstate != 0); + } +} + +static void pnv_lpc_isa_irq_handler(void *opaque, int n, int level) +{ + PnvLpcController *lpc = PNV_LPC(opaque); + + /* The Naples HW latches the 1 levels, clearing is done by SW */ + if (level) { + lpc->lpc_hc_irqstat |= LPC_HC_IRQ_SERIRQ0 >> n; + pnv_lpc_eval_irqs(lpc); + } +} + +ISABus *pnv_lpc_isa_create(PnvLpcController *lpc, bool use_cpld, Error **errp) +{ + Error *local_err = NULL; + ISABus *isa_bus; + qemu_irq *irqs; + qemu_irq_handler handler; + + /* let isa_bus_new() create its own bridge on SysBus otherwise + * devices specified on the command line won't find the bus and + * will fail to create. + */ + isa_bus = isa_bus_new(NULL, &lpc->isa_mem, &lpc->isa_io, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return NULL; + } + + /* Not all variants have a working serial irq decoder. If not, + * handling of LPC interrupts becomes a platform issue (some + * platforms have a CPLD to do it). + */ + if (use_cpld) { + handler = pnv_lpc_isa_irq_handler_cpld; + } else { + handler = pnv_lpc_isa_irq_handler; + } + + irqs = qemu_allocate_irqs(handler, lpc, ISA_NUM_IRQS); + + isa_bus_irqs(isa_bus, irqs); + + return isa_bus; +} diff --git a/hw/ppc/pnv_occ.c b/hw/ppc/pnv_occ.c new file mode 100644 index 000000000..5a716c256 --- /dev/null +++ b/hw/ppc/pnv_occ.c @@ -0,0 +1,302 @@ +/* + * QEMU PowerPC PowerNV Emulation of a few OCC related registers + * + * Copyright (c) 2015-2017, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "target/ppc/cpu.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/ppc/pnv_occ.h" + +#define OCB_OCI_OCCMISC 0x4020 +#define OCB_OCI_OCCMISC_AND 0x4021 +#define OCB_OCI_OCCMISC_OR 0x4022 + +/* OCC sensors */ +#define OCC_SENSOR_DATA_BLOCK_OFFSET 0x580000 +#define OCC_SENSOR_DATA_VALID 0x580001 +#define OCC_SENSOR_DATA_VERSION 0x580002 +#define OCC_SENSOR_DATA_READING_VERSION 0x580004 +#define OCC_SENSOR_DATA_NR_SENSORS 0x580008 +#define OCC_SENSOR_DATA_NAMES_OFFSET 0x580010 +#define OCC_SENSOR_DATA_READING_PING_OFFSET 0x580014 +#define OCC_SENSOR_DATA_READING_PONG_OFFSET 0x58000c +#define OCC_SENSOR_DATA_NAME_LENGTH 0x58000d +#define OCC_SENSOR_NAME_STRUCTURE_TYPE 0x580023 +#define OCC_SENSOR_LOC_CORE 0x580022 +#define OCC_SENSOR_LOC_GPU 0x580020 +#define OCC_SENSOR_TYPE_POWER 0x580003 +#define OCC_SENSOR_NAME 0x580005 +#define HWMON_SENSORS_MASK 0x58001e +#define SLW_IMAGE_BASE 0x0 + +static void pnv_occ_set_misc(PnvOCC *occ, uint64_t val) +{ + bool irq_state; + PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); + + val &= 0xffff000000000000ull; + + occ->occmisc = val; + irq_state = !!(val >> 63); + pnv_psi_irq_set(occ->psi, poc->psi_irq, irq_state); +} + +static uint64_t pnv_occ_power8_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvOCC *occ = PNV_OCC(opaque); + uint32_t offset = addr >> 3; + uint64_t val = 0; + + switch (offset) { + case OCB_OCI_OCCMISC: + val = occ->occmisc; + break; + default: + qemu_log_mask(LOG_UNIMP, "OCC Unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } + return val; +} + +static void pnv_occ_power8_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvOCC *occ = PNV_OCC(opaque); + uint32_t offset = addr >> 3; + + switch (offset) { + case OCB_OCI_OCCMISC_AND: + pnv_occ_set_misc(occ, occ->occmisc & val); + break; + case OCB_OCI_OCCMISC_OR: + pnv_occ_set_misc(occ, occ->occmisc | val); + break; + case OCB_OCI_OCCMISC: + pnv_occ_set_misc(occ, val); + break; + default: + qemu_log_mask(LOG_UNIMP, "OCC Unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } +} + +static uint64_t pnv_occ_common_area_read(void *opaque, hwaddr addr, + unsigned width) +{ + switch (addr) { + /* + * occ-sensor sanity check that asserts the sensor + * header block + */ + case OCC_SENSOR_DATA_BLOCK_OFFSET: + case OCC_SENSOR_DATA_VALID: + case OCC_SENSOR_DATA_VERSION: + case OCC_SENSOR_DATA_READING_VERSION: + case OCC_SENSOR_DATA_NR_SENSORS: + case OCC_SENSOR_DATA_NAMES_OFFSET: + case OCC_SENSOR_DATA_READING_PING_OFFSET: + case OCC_SENSOR_DATA_READING_PONG_OFFSET: + case OCC_SENSOR_NAME_STRUCTURE_TYPE: + return 1; + case OCC_SENSOR_DATA_NAME_LENGTH: + return 0x30; + case OCC_SENSOR_LOC_CORE: + return 0x0040; + case OCC_SENSOR_TYPE_POWER: + return 0x0080; + case OCC_SENSOR_NAME: + return 0x1000; + case HWMON_SENSORS_MASK: + case OCC_SENSOR_LOC_GPU: + return 0x8e00; + case SLW_IMAGE_BASE: + return 0x1000000000000000; + } + return 0; +} + +static void pnv_occ_common_area_write(void *opaque, hwaddr addr, + uint64_t val, unsigned width) +{ + /* callback function defined to occ common area write */ + return; +} + +static const MemoryRegionOps pnv_occ_power8_xscom_ops = { + .read = pnv_occ_power8_xscom_read, + .write = pnv_occ_power8_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +const MemoryRegionOps pnv_occ_sram_ops = { + .read = pnv_occ_common_area_read, + .write = pnv_occ_common_area_write, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .impl.min_access_size = 1, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_occ_power8_class_init(ObjectClass *klass, void *data) +{ + PnvOCCClass *poc = PNV_OCC_CLASS(klass); + + poc->xscom_size = PNV_XSCOM_OCC_SIZE; + poc->xscom_ops = &pnv_occ_power8_xscom_ops; + poc->psi_irq = PSIHB_IRQ_OCC; +} + +static const TypeInfo pnv_occ_power8_type_info = { + .name = TYPE_PNV8_OCC, + .parent = TYPE_PNV_OCC, + .instance_size = sizeof(PnvOCC), + .class_init = pnv_occ_power8_class_init, +}; + +#define P9_OCB_OCI_OCCMISC 0x6080 +#define P9_OCB_OCI_OCCMISC_CLEAR 0x6081 +#define P9_OCB_OCI_OCCMISC_OR 0x6082 + + +static uint64_t pnv_occ_power9_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvOCC *occ = PNV_OCC(opaque); + uint32_t offset = addr >> 3; + uint64_t val = 0; + + switch (offset) { + case P9_OCB_OCI_OCCMISC: + val = occ->occmisc; + break; + default: + qemu_log_mask(LOG_UNIMP, "OCC Unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } + return val; +} + +static void pnv_occ_power9_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvOCC *occ = PNV_OCC(opaque); + uint32_t offset = addr >> 3; + + switch (offset) { + case P9_OCB_OCI_OCCMISC_CLEAR: + pnv_occ_set_misc(occ, 0); + break; + case P9_OCB_OCI_OCCMISC_OR: + pnv_occ_set_misc(occ, occ->occmisc | val); + break; + case P9_OCB_OCI_OCCMISC: + pnv_occ_set_misc(occ, val); + break; + default: + qemu_log_mask(LOG_UNIMP, "OCC Unimplemented register: Ox%" + HWADDR_PRIx "\n", addr >> 3); + } +} + +static const MemoryRegionOps pnv_occ_power9_xscom_ops = { + .read = pnv_occ_power9_xscom_read, + .write = pnv_occ_power9_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_occ_power9_class_init(ObjectClass *klass, void *data) +{ + PnvOCCClass *poc = PNV_OCC_CLASS(klass); + + poc->xscom_size = PNV9_XSCOM_OCC_SIZE; + poc->xscom_ops = &pnv_occ_power9_xscom_ops; + poc->psi_irq = PSIHB9_IRQ_OCC; +} + +static const TypeInfo pnv_occ_power9_type_info = { + .name = TYPE_PNV9_OCC, + .parent = TYPE_PNV_OCC, + .instance_size = sizeof(PnvOCC), + .class_init = pnv_occ_power9_class_init, +}; + +static void pnv_occ_realize(DeviceState *dev, Error **errp) +{ + PnvOCC *occ = PNV_OCC(dev); + PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); + + assert(occ->psi); + + occ->occmisc = 0; + + /* XScom region for OCC registers */ + pnv_xscom_region_init(&occ->xscom_regs, OBJECT(dev), poc->xscom_ops, + occ, "xscom-occ", poc->xscom_size); + + /* OCC common area mmio region for OCC SRAM registers */ + memory_region_init_io(&occ->sram_regs, OBJECT(dev), &pnv_occ_sram_ops, + occ, "occ-common-area", + PNV_OCC_SENSOR_DATA_BLOCK_SIZE); +} + +static Property pnv_occ_properties[] = { + DEFINE_PROP_LINK("psi", PnvOCC, psi, TYPE_PNV_PSI, PnvPsi *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_occ_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pnv_occ_realize; + dc->desc = "PowerNV OCC Controller"; + device_class_set_props(dc, pnv_occ_properties); + dc->user_creatable = false; +} + +static const TypeInfo pnv_occ_type_info = { + .name = TYPE_PNV_OCC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvOCC), + .class_init = pnv_occ_class_init, + .class_size = sizeof(PnvOCCClass), + .abstract = true, +}; + +static void pnv_occ_register_types(void) +{ + type_register_static(&pnv_occ_type_info); + type_register_static(&pnv_occ_power8_type_info); + type_register_static(&pnv_occ_power9_type_info); +} + +type_init(pnv_occ_register_types); diff --git a/hw/ppc/pnv_pnor.c b/hw/ppc/pnv_pnor.c new file mode 100644 index 000000000..83ecccca2 --- /dev/null +++ b/hw/ppc/pnv_pnor.c @@ -0,0 +1,141 @@ +/* + * QEMU PowerNV PNOR simple model + * + * Copyright (c) 2015-2019, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/units.h" +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" +#include "hw/loader.h" +#include "hw/ppc/pnv_pnor.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" + +static uint64_t pnv_pnor_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvPnor *s = PNV_PNOR(opaque); + uint64_t ret = 0; + int i; + + for (i = 0; i < size; i++) { + ret |= (uint64_t) s->storage[addr + i] << (8 * (size - i - 1)); + } + + return ret; +} + +static void pnv_pnor_update(PnvPnor *s, int offset, int size) +{ + int offset_end; + int ret; + + if (!s->blk || !blk_is_writable(s->blk)) { + return; + } + + offset_end = offset + size; + offset = QEMU_ALIGN_DOWN(offset, BDRV_SECTOR_SIZE); + offset_end = QEMU_ALIGN_UP(offset_end, BDRV_SECTOR_SIZE); + + ret = blk_pwrite(s->blk, offset, s->storage + offset, + offset_end - offset, 0); + if (ret < 0) { + error_report("Could not update PNOR offset=0x%" PRIx32" : %s", offset, + strerror(-ret)); + } +} + +static void pnv_pnor_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + PnvPnor *s = PNV_PNOR(opaque); + int i; + + for (i = 0; i < size; i++) { + s->storage[addr + i] = (data >> (8 * (size - i - 1))) & 0xFF; + } + pnv_pnor_update(s, addr, size); +} + +/* + * TODO: Check endianness: skiboot is BIG, Aspeed AHB is LITTLE, flash + * is BIG. + */ +static const MemoryRegionOps pnv_pnor_ops = { + .read = pnv_pnor_read, + .write = pnv_pnor_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void pnv_pnor_realize(DeviceState *dev, Error **errp) +{ + PnvPnor *s = PNV_PNOR(dev); + int ret; + + if (s->blk) { + uint64_t perm = BLK_PERM_CONSISTENT_READ | + (blk_supports_write_perm(s->blk) ? BLK_PERM_WRITE : 0); + ret = blk_set_perm(s->blk, perm, BLK_PERM_ALL, errp); + if (ret < 0) { + return; + } + + s->size = blk_getlength(s->blk); + if (s->size <= 0) { + error_setg(errp, "failed to get flash size"); + return; + } + + s->storage = blk_blockalign(s->blk, s->size); + + if (blk_pread(s->blk, 0, s->storage, s->size) != s->size) { + error_setg(errp, "failed to read the initial flash content"); + return; + } + } else { + s->storage = blk_blockalign(NULL, s->size); + memset(s->storage, 0xFF, s->size); + } + + memory_region_init_io(&s->mmio, OBJECT(s), &pnv_pnor_ops, s, + TYPE_PNV_PNOR, s->size); +} + +static Property pnv_pnor_properties[] = { + DEFINE_PROP_INT64("size", PnvPnor, size, 128 * MiB), + DEFINE_PROP_DRIVE("drive", PnvPnor, blk), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_pnor_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pnv_pnor_realize; + device_class_set_props(dc, pnv_pnor_properties); +} + +static const TypeInfo pnv_pnor_info = { + .name = TYPE_PNV_PNOR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PnvPnor), + .class_init = pnv_pnor_class_init, +}; + +static void pnv_pnor_register_types(void) +{ + type_register_static(&pnv_pnor_info); +} + +type_init(pnv_pnor_register_types) diff --git a/hw/ppc/pnv_psi.c b/hw/ppc/pnv_psi.c new file mode 100644 index 000000000..cd9a2c595 --- /dev/null +++ b/hw/ppc/pnv_psi.c @@ -0,0 +1,967 @@ +/* + * QEMU PowerPC PowerNV Processor Service Interface (PSI) model + * + * Copyright (c) 2015-2017, IBM Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "target/ppc/cpu.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "sysemu/reset.h" +#include "qapi/error.h" +#include "monitor/monitor.h" + + +#include "hw/ppc/fdt.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/qdev-properties.h" +#include "hw/ppc/pnv_psi.h" + +#include + +#define PSIHB_XSCOM_FIR_RW 0x00 +#define PSIHB_XSCOM_FIR_AND 0x01 +#define PSIHB_XSCOM_FIR_OR 0x02 +#define PSIHB_XSCOM_FIRMASK_RW 0x03 +#define PSIHB_XSCOM_FIRMASK_AND 0x04 +#define PSIHB_XSCOM_FIRMASK_OR 0x05 +#define PSIHB_XSCOM_FIRACT0 0x06 +#define PSIHB_XSCOM_FIRACT1 0x07 + +/* Host Bridge Base Address Register */ +#define PSIHB_XSCOM_BAR 0x0a +#define PSIHB_BAR_EN 0x0000000000000001ull + +/* FSP Base Address Register */ +#define PSIHB_XSCOM_FSPBAR 0x0b + +/* PSI Host Bridge Control/Status Register */ +#define PSIHB_XSCOM_CR 0x0e +#define PSIHB_CR_FSP_CMD_ENABLE 0x8000000000000000ull +#define PSIHB_CR_FSP_MMIO_ENABLE 0x4000000000000000ull +#define PSIHB_CR_FSP_IRQ_ENABLE 0x1000000000000000ull +#define PSIHB_CR_FSP_ERR_RSP_ENABLE 0x0800000000000000ull +#define PSIHB_CR_PSI_LINK_ENABLE 0x0400000000000000ull +#define PSIHB_CR_FSP_RESET 0x0200000000000000ull +#define PSIHB_CR_PSIHB_RESET 0x0100000000000000ull +#define PSIHB_CR_PSI_IRQ 0x0000800000000000ull +#define PSIHB_CR_FSP_IRQ 0x0000400000000000ull +#define PSIHB_CR_FSP_LINK_ACTIVE 0x0000200000000000ull +#define PSIHB_CR_IRQ_CMD_EXPECT 0x0000010000000000ull + /* and more ... */ + +/* PSIHB Status / Error Mask Register */ +#define PSIHB_XSCOM_SEMR 0x0f + +/* XIVR, to signal interrupts to the CEC firmware. more XIVR below. */ +#define PSIHB_XSCOM_XIVR_FSP 0x10 +#define PSIHB_XIVR_SERVER_SH 40 +#define PSIHB_XIVR_SERVER_MSK (0xffffull << PSIHB_XIVR_SERVER_SH) +#define PSIHB_XIVR_PRIO_SH 32 +#define PSIHB_XIVR_PRIO_MSK (0xffull << PSIHB_XIVR_PRIO_SH) +#define PSIHB_XIVR_SRC_SH 29 +#define PSIHB_XIVR_SRC_MSK (0x7ull << PSIHB_XIVR_SRC_SH) +#define PSIHB_XIVR_PENDING 0x01000000ull + +/* PSI Host Bridge Set Control/ Status Register */ +#define PSIHB_XSCOM_SCR 0x12 + +/* PSI Host Bridge Clear Control/ Status Register */ +#define PSIHB_XSCOM_CCR 0x13 + +/* DMA Upper Address Register */ +#define PSIHB_XSCOM_DMA_UPADD 0x14 + +/* Interrupt Status */ +#define PSIHB_XSCOM_IRQ_STAT 0x15 +#define PSIHB_IRQ_STAT_OCC 0x0000001000000000ull +#define PSIHB_IRQ_STAT_FSI 0x0000000800000000ull +#define PSIHB_IRQ_STAT_LPCI2C 0x0000000400000000ull +#define PSIHB_IRQ_STAT_LOCERR 0x0000000200000000ull +#define PSIHB_IRQ_STAT_EXT 0x0000000100000000ull + +/* remaining XIVR */ +#define PSIHB_XSCOM_XIVR_OCC 0x16 +#define PSIHB_XSCOM_XIVR_FSI 0x17 +#define PSIHB_XSCOM_XIVR_LPCI2C 0x18 +#define PSIHB_XSCOM_XIVR_LOCERR 0x19 +#define PSIHB_XSCOM_XIVR_EXT 0x1a + +/* Interrupt Requester Source Compare Register */ +#define PSIHB_XSCOM_IRSN 0x1b +#define PSIHB_IRSN_COMP_SH 45 +#define PSIHB_IRSN_COMP_MSK (0x7ffffull << PSIHB_IRSN_COMP_SH) +#define PSIHB_IRSN_IRQ_MUX 0x0000000800000000ull +#define PSIHB_IRSN_IRQ_RESET 0x0000000400000000ull +#define PSIHB_IRSN_DOWNSTREAM_EN 0x0000000200000000ull +#define PSIHB_IRSN_UPSTREAM_EN 0x0000000100000000ull +#define PSIHB_IRSN_COMPMASK_SH 13 +#define PSIHB_IRSN_COMPMASK_MSK (0x7ffffull << PSIHB_IRSN_COMPMASK_SH) + +#define PSIHB_BAR_MASK 0x0003fffffff00000ull +#define PSIHB_FSPBAR_MASK 0x0003ffff00000000ull + +#define PSIHB9_BAR_MASK 0x00fffffffff00000ull +#define PSIHB9_FSPBAR_MASK 0x00ffffff00000000ull + +#define PSIHB_REG(addr) (((addr) >> 3) + PSIHB_XSCOM_BAR) + +static void pnv_psi_set_bar(PnvPsi *psi, uint64_t bar) +{ + PnvPsiClass *ppc = PNV_PSI_GET_CLASS(psi); + MemoryRegion *sysmem = get_system_memory(); + uint64_t old = psi->regs[PSIHB_XSCOM_BAR]; + + psi->regs[PSIHB_XSCOM_BAR] = bar & (ppc->bar_mask | PSIHB_BAR_EN); + + /* Update MR, always remove it first */ + if (old & PSIHB_BAR_EN) { + memory_region_del_subregion(sysmem, &psi->regs_mr); + } + + /* Then add it back if needed */ + if (bar & PSIHB_BAR_EN) { + uint64_t addr = bar & ppc->bar_mask; + memory_region_add_subregion(sysmem, addr, &psi->regs_mr); + } +} + +static void pnv_psi_update_fsp_mr(PnvPsi *psi) +{ + /* TODO: Update FSP MR if/when we support FSP BAR */ +} + +static void pnv_psi_set_cr(PnvPsi *psi, uint64_t cr) +{ + uint64_t old = psi->regs[PSIHB_XSCOM_CR]; + + psi->regs[PSIHB_XSCOM_CR] = cr; + + /* Check some bit changes */ + if ((old ^ psi->regs[PSIHB_XSCOM_CR]) & PSIHB_CR_FSP_MMIO_ENABLE) { + pnv_psi_update_fsp_mr(psi); + } +} + +static void pnv_psi_set_irsn(PnvPsi *psi, uint64_t val) +{ + ICSState *ics = &PNV8_PSI(psi)->ics; + + /* In this model we ignore the up/down enable bits for now + * as SW doesn't use them (other than setting them at boot). + * We ignore IRQ_MUX, its meaning isn't clear and we don't use + * it and finally we ignore reset (XXX fix that ?) + */ + psi->regs[PSIHB_XSCOM_IRSN] = val & (PSIHB_IRSN_COMP_MSK | + PSIHB_IRSN_IRQ_MUX | + PSIHB_IRSN_IRQ_RESET | + PSIHB_IRSN_DOWNSTREAM_EN | + PSIHB_IRSN_UPSTREAM_EN); + + /* We ignore the compare mask as well, our ICS emulation is too + * simplistic to make any use if it, and we extract the offset + * from the compare value + */ + ics->offset = (val & PSIHB_IRSN_COMP_MSK) >> PSIHB_IRSN_COMP_SH; +} + +/* + * FSP and PSI interrupts are muxed under the same number. + */ +static const uint32_t xivr_regs[] = { + [PSIHB_IRQ_PSI] = PSIHB_XSCOM_XIVR_FSP, + [PSIHB_IRQ_FSP] = PSIHB_XSCOM_XIVR_FSP, + [PSIHB_IRQ_OCC] = PSIHB_XSCOM_XIVR_OCC, + [PSIHB_IRQ_FSI] = PSIHB_XSCOM_XIVR_FSI, + [PSIHB_IRQ_LPC_I2C] = PSIHB_XSCOM_XIVR_LPCI2C, + [PSIHB_IRQ_LOCAL_ERR] = PSIHB_XSCOM_XIVR_LOCERR, + [PSIHB_IRQ_EXTERNAL] = PSIHB_XSCOM_XIVR_EXT, +}; + +static const uint32_t stat_regs[] = { + [PSIHB_IRQ_PSI] = PSIHB_XSCOM_CR, + [PSIHB_IRQ_FSP] = PSIHB_XSCOM_CR, + [PSIHB_IRQ_OCC] = PSIHB_XSCOM_IRQ_STAT, + [PSIHB_IRQ_FSI] = PSIHB_XSCOM_IRQ_STAT, + [PSIHB_IRQ_LPC_I2C] = PSIHB_XSCOM_IRQ_STAT, + [PSIHB_IRQ_LOCAL_ERR] = PSIHB_XSCOM_IRQ_STAT, + [PSIHB_IRQ_EXTERNAL] = PSIHB_XSCOM_IRQ_STAT, +}; + +static const uint64_t stat_bits[] = { + [PSIHB_IRQ_PSI] = PSIHB_CR_PSI_IRQ, + [PSIHB_IRQ_FSP] = PSIHB_CR_FSP_IRQ, + [PSIHB_IRQ_OCC] = PSIHB_IRQ_STAT_OCC, + [PSIHB_IRQ_FSI] = PSIHB_IRQ_STAT_FSI, + [PSIHB_IRQ_LPC_I2C] = PSIHB_IRQ_STAT_LPCI2C, + [PSIHB_IRQ_LOCAL_ERR] = PSIHB_IRQ_STAT_LOCERR, + [PSIHB_IRQ_EXTERNAL] = PSIHB_IRQ_STAT_EXT, +}; + +void pnv_psi_irq_set(PnvPsi *psi, int irq, bool state) +{ + PNV_PSI_GET_CLASS(psi)->irq_set(psi, irq, state); +} + +static void pnv_psi_power8_irq_set(PnvPsi *psi, int irq, bool state) +{ + uint32_t xivr_reg; + uint32_t stat_reg; + uint32_t src; + bool masked; + + if (irq > PSIHB_IRQ_EXTERNAL) { + qemu_log_mask(LOG_GUEST_ERROR, "PSI: Unsupported irq %d\n", irq); + return; + } + + xivr_reg = xivr_regs[irq]; + stat_reg = stat_regs[irq]; + + src = (psi->regs[xivr_reg] & PSIHB_XIVR_SRC_MSK) >> PSIHB_XIVR_SRC_SH; + if (state) { + psi->regs[stat_reg] |= stat_bits[irq]; + /* TODO: optimization, check mask here. That means + * re-evaluating when unmasking + */ + qemu_irq_raise(psi->qirqs[src]); + } else { + psi->regs[stat_reg] &= ~stat_bits[irq]; + + /* FSP and PSI are muxed so don't lower if either is still set */ + if (stat_reg != PSIHB_XSCOM_CR || + !(psi->regs[stat_reg] & (PSIHB_CR_PSI_IRQ | PSIHB_CR_FSP_IRQ))) { + qemu_irq_lower(psi->qirqs[src]); + } else { + state = true; + } + } + + /* Note about the emulation of the pending bit: This isn't + * entirely correct. The pending bit should be cleared when the + * EOI has been received. However, we don't have callbacks on EOI + * (especially not under KVM) so no way to emulate that properly, + * so instead we just set that bit as the logical "output" of the + * XIVR (ie pending & !masked) + * + * CLG: We could define a new ICS object with a custom eoi() + * handler to clear the pending bit. But I am not sure this would + * be useful for the software anyhow. + */ + masked = (psi->regs[xivr_reg] & PSIHB_XIVR_PRIO_MSK) == PSIHB_XIVR_PRIO_MSK; + if (state && !masked) { + psi->regs[xivr_reg] |= PSIHB_XIVR_PENDING; + } else { + psi->regs[xivr_reg] &= ~PSIHB_XIVR_PENDING; + } +} + +static void pnv_psi_set_xivr(PnvPsi *psi, uint32_t reg, uint64_t val) +{ + ICSState *ics = &PNV8_PSI(psi)->ics; + uint16_t server; + uint8_t prio; + uint8_t src; + + psi->regs[reg] = (psi->regs[reg] & PSIHB_XIVR_PENDING) | + (val & (PSIHB_XIVR_SERVER_MSK | + PSIHB_XIVR_PRIO_MSK | + PSIHB_XIVR_SRC_MSK)); + val = psi->regs[reg]; + server = (val & PSIHB_XIVR_SERVER_MSK) >> PSIHB_XIVR_SERVER_SH; + prio = (val & PSIHB_XIVR_PRIO_MSK) >> PSIHB_XIVR_PRIO_SH; + src = (val & PSIHB_XIVR_SRC_MSK) >> PSIHB_XIVR_SRC_SH; + + if (src >= PSI_NUM_INTERRUPTS) { + qemu_log_mask(LOG_GUEST_ERROR, "PSI: Unsupported irq %d\n", src); + return; + } + + /* Remove pending bit if the IRQ is masked */ + if ((psi->regs[reg] & PSIHB_XIVR_PRIO_MSK) == PSIHB_XIVR_PRIO_MSK) { + psi->regs[reg] &= ~PSIHB_XIVR_PENDING; + } + + /* The low order 2 bits are the link pointer (Type II interrupts). + * Shift back to get a valid IRQ server. + */ + server >>= 2; + + /* Now because of source remapping, weird things can happen + * if you change the source number dynamically, our simple ICS + * doesn't deal with remapping. So we just poke a different + * ICS entry based on what source number was written. This will + * do for now but a more accurate implementation would instead + * use a fixed server/prio and a remapper of the generated irq. + */ + ics_write_xive(ics, src, server, prio, prio); +} + +static uint64_t pnv_psi_reg_read(PnvPsi *psi, uint32_t offset, bool mmio) +{ + uint64_t val = 0xffffffffffffffffull; + + switch (offset) { + case PSIHB_XSCOM_FIR_RW: + case PSIHB_XSCOM_FIRACT0: + case PSIHB_XSCOM_FIRACT1: + case PSIHB_XSCOM_BAR: + case PSIHB_XSCOM_FSPBAR: + case PSIHB_XSCOM_CR: + case PSIHB_XSCOM_XIVR_FSP: + case PSIHB_XSCOM_XIVR_OCC: + case PSIHB_XSCOM_XIVR_FSI: + case PSIHB_XSCOM_XIVR_LPCI2C: + case PSIHB_XSCOM_XIVR_LOCERR: + case PSIHB_XSCOM_XIVR_EXT: + case PSIHB_XSCOM_IRQ_STAT: + case PSIHB_XSCOM_SEMR: + case PSIHB_XSCOM_DMA_UPADD: + case PSIHB_XSCOM_IRSN: + val = psi->regs[offset]; + break; + default: + qemu_log_mask(LOG_UNIMP, "PSI: read at 0x%" PRIx32 "\n", offset); + } + return val; +} + +static void pnv_psi_reg_write(PnvPsi *psi, uint32_t offset, uint64_t val, + bool mmio) +{ + switch (offset) { + case PSIHB_XSCOM_FIR_RW: + case PSIHB_XSCOM_FIRACT0: + case PSIHB_XSCOM_FIRACT1: + case PSIHB_XSCOM_SEMR: + case PSIHB_XSCOM_DMA_UPADD: + psi->regs[offset] = val; + break; + case PSIHB_XSCOM_FIR_OR: + psi->regs[PSIHB_XSCOM_FIR_RW] |= val; + break; + case PSIHB_XSCOM_FIR_AND: + psi->regs[PSIHB_XSCOM_FIR_RW] &= val; + break; + case PSIHB_XSCOM_BAR: + /* Only XSCOM can write this one */ + if (!mmio) { + pnv_psi_set_bar(psi, val); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "PSI: invalid write of BAR\n"); + } + break; + case PSIHB_XSCOM_FSPBAR: + psi->regs[PSIHB_XSCOM_FSPBAR] = val & PSIHB_FSPBAR_MASK; + pnv_psi_update_fsp_mr(psi); + break; + case PSIHB_XSCOM_CR: + pnv_psi_set_cr(psi, val); + break; + case PSIHB_XSCOM_SCR: + pnv_psi_set_cr(psi, psi->regs[PSIHB_XSCOM_CR] | val); + break; + case PSIHB_XSCOM_CCR: + pnv_psi_set_cr(psi, psi->regs[PSIHB_XSCOM_CR] & ~val); + break; + case PSIHB_XSCOM_XIVR_FSP: + case PSIHB_XSCOM_XIVR_OCC: + case PSIHB_XSCOM_XIVR_FSI: + case PSIHB_XSCOM_XIVR_LPCI2C: + case PSIHB_XSCOM_XIVR_LOCERR: + case PSIHB_XSCOM_XIVR_EXT: + pnv_psi_set_xivr(psi, offset, val); + break; + case PSIHB_XSCOM_IRQ_STAT: + /* Read only */ + qemu_log_mask(LOG_GUEST_ERROR, "PSI: invalid write of IRQ_STAT\n"); + break; + case PSIHB_XSCOM_IRSN: + pnv_psi_set_irsn(psi, val); + break; + default: + qemu_log_mask(LOG_UNIMP, "PSI: write at 0x%" PRIx32 "\n", offset); + } +} + +/* + * The values of the registers when accessed through the MMIO region + * follow the relation : xscom = (mmio + 0x50) >> 3 + */ +static uint64_t pnv_psi_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + return pnv_psi_reg_read(opaque, PSIHB_REG(addr), true); +} + +static void pnv_psi_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + pnv_psi_reg_write(opaque, PSIHB_REG(addr), val, true); +} + +static const MemoryRegionOps psi_mmio_ops = { + .read = pnv_psi_mmio_read, + .write = pnv_psi_mmio_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static uint64_t pnv_psi_xscom_read(void *opaque, hwaddr addr, unsigned size) +{ + return pnv_psi_reg_read(opaque, addr >> 3, false); +} + +static void pnv_psi_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + pnv_psi_reg_write(opaque, addr >> 3, val, false); +} + +static const MemoryRegionOps pnv_psi_xscom_ops = { + .read = pnv_psi_xscom_read, + .write = pnv_psi_xscom_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + } +}; + +static void pnv_psi_reset(DeviceState *dev) +{ + PnvPsi *psi = PNV_PSI(dev); + + memset(psi->regs, 0x0, sizeof(psi->regs)); + + psi->regs[PSIHB_XSCOM_BAR] = psi->bar | PSIHB_BAR_EN; +} + +static void pnv_psi_reset_handler(void *dev) +{ + device_cold_reset(DEVICE(dev)); +} + +static void pnv_psi_realize(DeviceState *dev, Error **errp) +{ + PnvPsi *psi = PNV_PSI(dev); + + /* Default BAR for MMIO region */ + pnv_psi_set_bar(psi, psi->bar | PSIHB_BAR_EN); + + qemu_register_reset(pnv_psi_reset_handler, dev); +} + +static void pnv_psi_power8_instance_init(Object *obj) +{ + Pnv8Psi *psi8 = PNV8_PSI(obj); + + object_initialize_child(obj, "ics-psi", &psi8->ics, TYPE_ICS); + object_property_add_alias(obj, ICS_PROP_XICS, OBJECT(&psi8->ics), + ICS_PROP_XICS); +} + +static const uint8_t irq_to_xivr[] = { + PSIHB_XSCOM_XIVR_FSP, + PSIHB_XSCOM_XIVR_OCC, + PSIHB_XSCOM_XIVR_FSI, + PSIHB_XSCOM_XIVR_LPCI2C, + PSIHB_XSCOM_XIVR_LOCERR, + PSIHB_XSCOM_XIVR_EXT, +}; + +static void pnv_psi_power8_realize(DeviceState *dev, Error **errp) +{ + PnvPsi *psi = PNV_PSI(dev); + ICSState *ics = &PNV8_PSI(psi)->ics; + unsigned int i; + + /* Create PSI interrupt control source */ + if (!object_property_set_int(OBJECT(ics), "nr-irqs", PSI_NUM_INTERRUPTS, + errp)) { + return; + } + if (!qdev_realize(DEVICE(ics), NULL, errp)) { + return; + } + + for (i = 0; i < ics->nr_irqs; i++) { + ics_set_irq_type(ics, i, true); + } + + psi->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); + + /* XSCOM region for PSI registers */ + pnv_xscom_region_init(&psi->xscom_regs, OBJECT(dev), &pnv_psi_xscom_ops, + psi, "xscom-psi", PNV_XSCOM_PSIHB_SIZE); + + /* Initialize MMIO region */ + memory_region_init_io(&psi->regs_mr, OBJECT(dev), &psi_mmio_ops, psi, + "psihb", PNV_PSIHB_SIZE); + + /* Default sources in XIVR */ + for (i = 0; i < PSI_NUM_INTERRUPTS; i++) { + uint8_t xivr = irq_to_xivr[i]; + psi->regs[xivr] = PSIHB_XIVR_PRIO_MSK | + ((uint64_t) i << PSIHB_XIVR_SRC_SH); + } + + pnv_psi_realize(dev, errp); +} + +static int pnv_psi_dt_xscom(PnvXScomInterface *dev, void *fdt, int xscom_offset) +{ + PnvPsiClass *ppc = PNV_PSI_GET_CLASS(dev); + char *name; + int offset; + uint32_t reg[] = { + cpu_to_be32(ppc->xscom_pcba), + cpu_to_be32(ppc->xscom_size) + }; + + name = g_strdup_printf("psihb@%x", ppc->xscom_pcba); + offset = fdt_add_subnode(fdt, xscom_offset, name); + _FDT(offset); + g_free(name); + + _FDT(fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))); + _FDT(fdt_setprop_cell(fdt, offset, "#address-cells", 2)); + _FDT(fdt_setprop_cell(fdt, offset, "#size-cells", 1)); + _FDT(fdt_setprop(fdt, offset, "compatible", ppc->compat, + ppc->compat_size)); + return 0; +} + +static Property pnv_psi_properties[] = { + DEFINE_PROP_UINT64("bar", PnvPsi, bar, 0), + DEFINE_PROP_UINT64("fsp-bar", PnvPsi, fsp_bar, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_psi_power8_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvPsiClass *ppc = PNV_PSI_CLASS(klass); + static const char compat[] = "ibm,power8-psihb-x\0ibm,psihb-x"; + + dc->desc = "PowerNV PSI Controller POWER8"; + dc->realize = pnv_psi_power8_realize; + + ppc->xscom_pcba = PNV_XSCOM_PSIHB_BASE; + ppc->xscom_size = PNV_XSCOM_PSIHB_SIZE; + ppc->bar_mask = PSIHB_BAR_MASK; + ppc->irq_set = pnv_psi_power8_irq_set; + ppc->compat = compat; + ppc->compat_size = sizeof(compat); +} + +static const TypeInfo pnv_psi_power8_info = { + .name = TYPE_PNV8_PSI, + .parent = TYPE_PNV_PSI, + .instance_size = sizeof(Pnv8Psi), + .instance_init = pnv_psi_power8_instance_init, + .class_init = pnv_psi_power8_class_init, +}; + + +/* Common registers */ + +#define PSIHB9_CR 0x20 +#define PSIHB9_SEMR 0x28 + +/* P9 registers */ + +#define PSIHB9_INTERRUPT_CONTROL 0x58 +#define PSIHB9_IRQ_METHOD PPC_BIT(0) +#define PSIHB9_IRQ_RESET PPC_BIT(1) +#define PSIHB9_ESB_CI_BASE 0x60 +#define PSIHB9_ESB_CI_64K PPC_BIT(1) +#define PSIHB9_ESB_CI_ADDR_MASK PPC_BITMASK(8, 47) +#define PSIHB9_ESB_CI_VALID PPC_BIT(63) +#define PSIHB9_ESB_NOTIF_ADDR 0x68 +#define PSIHB9_ESB_NOTIF_ADDR_MASK PPC_BITMASK(8, 60) +#define PSIHB9_ESB_NOTIF_VALID PPC_BIT(63) +#define PSIHB9_IVT_OFFSET 0x70 +#define PSIHB9_IVT_OFF_SHIFT 32 + +#define PSIHB9_IRQ_LEVEL 0x78 /* assertion */ +#define PSIHB9_IRQ_LEVEL_PSI PPC_BIT(0) +#define PSIHB9_IRQ_LEVEL_OCC PPC_BIT(1) +#define PSIHB9_IRQ_LEVEL_FSI PPC_BIT(2) +#define PSIHB9_IRQ_LEVEL_LPCHC PPC_BIT(3) +#define PSIHB9_IRQ_LEVEL_LOCAL_ERR PPC_BIT(4) +#define PSIHB9_IRQ_LEVEL_GLOBAL_ERR PPC_BIT(5) +#define PSIHB9_IRQ_LEVEL_TPM PPC_BIT(6) +#define PSIHB9_IRQ_LEVEL_LPC_SIRQ1 PPC_BIT(7) +#define PSIHB9_IRQ_LEVEL_LPC_SIRQ2 PPC_BIT(8) +#define PSIHB9_IRQ_LEVEL_LPC_SIRQ3 PPC_BIT(9) +#define PSIHB9_IRQ_LEVEL_LPC_SIRQ4 PPC_BIT(10) +#define PSIHB9_IRQ_LEVEL_SBE_I2C PPC_BIT(11) +#define PSIHB9_IRQ_LEVEL_DIO PPC_BIT(12) +#define PSIHB9_IRQ_LEVEL_PSU PPC_BIT(13) +#define PSIHB9_IRQ_LEVEL_I2C_C PPC_BIT(14) +#define PSIHB9_IRQ_LEVEL_I2C_D PPC_BIT(15) +#define PSIHB9_IRQ_LEVEL_I2C_E PPC_BIT(16) +#define PSIHB9_IRQ_LEVEL_SBE PPC_BIT(19) + +#define PSIHB9_IRQ_STAT 0x80 /* P bit */ +#define PSIHB9_IRQ_STAT_PSI PPC_BIT(0) +#define PSIHB9_IRQ_STAT_OCC PPC_BIT(1) +#define PSIHB9_IRQ_STAT_FSI PPC_BIT(2) +#define PSIHB9_IRQ_STAT_LPCHC PPC_BIT(3) +#define PSIHB9_IRQ_STAT_LOCAL_ERR PPC_BIT(4) +#define PSIHB9_IRQ_STAT_GLOBAL_ERR PPC_BIT(5) +#define PSIHB9_IRQ_STAT_TPM PPC_BIT(6) +#define PSIHB9_IRQ_STAT_LPC_SIRQ1 PPC_BIT(7) +#define PSIHB9_IRQ_STAT_LPC_SIRQ2 PPC_BIT(8) +#define PSIHB9_IRQ_STAT_LPC_SIRQ3 PPC_BIT(9) +#define PSIHB9_IRQ_STAT_LPC_SIRQ4 PPC_BIT(10) +#define PSIHB9_IRQ_STAT_SBE_I2C PPC_BIT(11) +#define PSIHB9_IRQ_STAT_DIO PPC_BIT(12) +#define PSIHB9_IRQ_STAT_PSU PPC_BIT(13) + +static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno) +{ + PnvPsi *psi = PNV_PSI(xf); + uint64_t notif_port = psi->regs[PSIHB_REG(PSIHB9_ESB_NOTIF_ADDR)]; + bool valid = notif_port & PSIHB9_ESB_NOTIF_VALID; + uint64_t notify_addr = notif_port & ~PSIHB9_ESB_NOTIF_VALID; + + uint32_t offset = + (psi->regs[PSIHB_REG(PSIHB9_IVT_OFFSET)] >> PSIHB9_IVT_OFF_SHIFT); + uint64_t data = XIVE_TRIGGER_PQ | offset | srcno; + MemTxResult result; + + if (!valid) { + return; + } + + address_space_stq_be(&address_space_memory, notify_addr, data, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: trigger failed @%" + HWADDR_PRIx "\n", __func__, notif_port); + return; + } +} + +static uint64_t pnv_psi_p9_mmio_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvPsi *psi = PNV_PSI(opaque); + uint32_t reg = PSIHB_REG(addr); + uint64_t val = -1; + + switch (addr) { + case PSIHB9_CR: + case PSIHB9_SEMR: + /* FSP stuff */ + case PSIHB9_INTERRUPT_CONTROL: + case PSIHB9_ESB_CI_BASE: + case PSIHB9_ESB_NOTIF_ADDR: + case PSIHB9_IVT_OFFSET: + val = psi->regs[reg]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "PSI: read at 0x%" PRIx64 "\n", addr); + } + + return val; +} + +static void pnv_psi_p9_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPsi *psi = PNV_PSI(opaque); + Pnv9Psi *psi9 = PNV9_PSI(psi); + uint32_t reg = PSIHB_REG(addr); + MemoryRegion *sysmem = get_system_memory(); + + switch (addr) { + case PSIHB9_CR: + case PSIHB9_SEMR: + /* FSP stuff */ + break; + case PSIHB9_INTERRUPT_CONTROL: + if (val & PSIHB9_IRQ_RESET) { + device_cold_reset(DEVICE(&psi9->source)); + } + psi->regs[reg] = val; + break; + + case PSIHB9_ESB_CI_BASE: + if (!(val & PSIHB9_ESB_CI_VALID)) { + if (psi->regs[reg] & PSIHB9_ESB_CI_VALID) { + memory_region_del_subregion(sysmem, &psi9->source.esb_mmio); + } + } else { + if (!(psi->regs[reg] & PSIHB9_ESB_CI_VALID)) { + memory_region_add_subregion(sysmem, + val & ~PSIHB9_ESB_CI_VALID, + &psi9->source.esb_mmio); + } + } + psi->regs[reg] = val; + break; + + case PSIHB9_ESB_NOTIF_ADDR: + psi->regs[reg] = val; + break; + case PSIHB9_IVT_OFFSET: + psi->regs[reg] = val; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "PSI: write at 0x%" PRIx64 "\n", addr); + } +} + +static const MemoryRegionOps pnv_psi_p9_mmio_ops = { + .read = pnv_psi_p9_mmio_read, + .write = pnv_psi_p9_mmio_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static uint64_t pnv_psi_p9_xscom_read(void *opaque, hwaddr addr, unsigned size) +{ + /* No read are expected */ + qemu_log_mask(LOG_GUEST_ERROR, "PSI: xscom read at 0x%" PRIx64 "\n", addr); + return -1; +} + +static void pnv_psi_p9_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPsi *psi = PNV_PSI(opaque); + + /* XSCOM is only used to set the PSIHB MMIO region */ + switch (addr >> 3) { + case PSIHB_XSCOM_BAR: + pnv_psi_set_bar(psi, val); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "PSI: xscom write at 0x%" PRIx64 "\n", + addr); + } +} + +static const MemoryRegionOps pnv_psi_p9_xscom_ops = { + .read = pnv_psi_p9_xscom_read, + .write = pnv_psi_p9_xscom_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + } +}; + +static void pnv_psi_power9_irq_set(PnvPsi *psi, int irq, bool state) +{ + uint64_t irq_method = psi->regs[PSIHB_REG(PSIHB9_INTERRUPT_CONTROL)]; + + if (irq > PSIHB9_NUM_IRQS) { + qemu_log_mask(LOG_GUEST_ERROR, "PSI: Unsupported irq %d\n", irq); + return; + } + + if (irq_method & PSIHB9_IRQ_METHOD) { + qemu_log_mask(LOG_GUEST_ERROR, "PSI: LSI IRQ method no supported\n"); + return; + } + + /* Update LSI levels */ + if (state) { + psi->regs[PSIHB_REG(PSIHB9_IRQ_LEVEL)] |= PPC_BIT(irq); + } else { + psi->regs[PSIHB_REG(PSIHB9_IRQ_LEVEL)] &= ~PPC_BIT(irq); + } + + qemu_set_irq(psi->qirqs[irq], state); +} + +static void pnv_psi_power9_reset(DeviceState *dev) +{ + Pnv9Psi *psi = PNV9_PSI(dev); + + pnv_psi_reset(dev); + + if (memory_region_is_mapped(&psi->source.esb_mmio)) { + memory_region_del_subregion(get_system_memory(), &psi->source.esb_mmio); + } +} + +static void pnv_psi_power9_instance_init(Object *obj) +{ + Pnv9Psi *psi = PNV9_PSI(obj); + + object_initialize_child(obj, "source", &psi->source, TYPE_XIVE_SOURCE); +} + +static void pnv_psi_power9_realize(DeviceState *dev, Error **errp) +{ + PnvPsi *psi = PNV_PSI(dev); + XiveSource *xsrc = &PNV9_PSI(psi)->source; + int i; + + /* This is the only device with 4k ESB pages */ + object_property_set_int(OBJECT(xsrc), "shift", XIVE_ESB_4K, &error_fatal); + object_property_set_int(OBJECT(xsrc), "nr-irqs", PSIHB9_NUM_IRQS, + &error_fatal); + object_property_set_link(OBJECT(xsrc), "xive", OBJECT(psi), &error_abort); + if (!qdev_realize(DEVICE(xsrc), NULL, errp)) { + return; + } + + for (i = 0; i < xsrc->nr_irqs; i++) { + xive_source_irq_set_lsi(xsrc, i); + } + + psi->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc, xsrc->nr_irqs); + + /* XSCOM region for PSI registers */ + pnv_xscom_region_init(&psi->xscom_regs, OBJECT(dev), &pnv_psi_p9_xscom_ops, + psi, "xscom-psi", PNV9_XSCOM_PSIHB_SIZE); + + /* MMIO region for PSI registers */ + memory_region_init_io(&psi->regs_mr, OBJECT(dev), &pnv_psi_p9_mmio_ops, psi, + "psihb", PNV9_PSIHB_SIZE); + + pnv_psi_realize(dev, errp); +} + +static void pnv_psi_power9_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvPsiClass *ppc = PNV_PSI_CLASS(klass); + XiveNotifierClass *xfc = XIVE_NOTIFIER_CLASS(klass); + static const char compat[] = "ibm,power9-psihb-x\0ibm,psihb-x"; + + dc->desc = "PowerNV PSI Controller POWER9"; + dc->realize = pnv_psi_power9_realize; + dc->reset = pnv_psi_power9_reset; + + ppc->xscom_pcba = PNV9_XSCOM_PSIHB_BASE; + ppc->xscom_size = PNV9_XSCOM_PSIHB_SIZE; + ppc->bar_mask = PSIHB9_BAR_MASK; + ppc->irq_set = pnv_psi_power9_irq_set; + ppc->compat = compat; + ppc->compat_size = sizeof(compat); + + xfc->notify = pnv_psi_notify; +} + +static const TypeInfo pnv_psi_power9_info = { + .name = TYPE_PNV9_PSI, + .parent = TYPE_PNV_PSI, + .instance_size = sizeof(Pnv9Psi), + .instance_init = pnv_psi_power9_instance_init, + .class_init = pnv_psi_power9_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_XIVE_NOTIFIER }, + { }, + }, +}; + +static void pnv_psi_power10_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvPsiClass *ppc = PNV_PSI_CLASS(klass); + static const char compat[] = "ibm,power10-psihb-x\0ibm,psihb-x"; + + dc->desc = "PowerNV PSI Controller POWER10"; + + ppc->xscom_pcba = PNV10_XSCOM_PSIHB_BASE; + ppc->xscom_size = PNV10_XSCOM_PSIHB_SIZE; + ppc->compat = compat; + ppc->compat_size = sizeof(compat); +} + +static const TypeInfo pnv_psi_power10_info = { + .name = TYPE_PNV10_PSI, + .parent = TYPE_PNV9_PSI, + .class_init = pnv_psi_power10_class_init, +}; + +static void pnv_psi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); + + xdc->dt_xscom = pnv_psi_dt_xscom; + + dc->desc = "PowerNV PSI Controller"; + device_class_set_props(dc, pnv_psi_properties); + dc->reset = pnv_psi_reset; + dc->user_creatable = false; +} + +static const TypeInfo pnv_psi_info = { + .name = TYPE_PNV_PSI, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvPsi), + .class_init = pnv_psi_class_init, + .class_size = sizeof(PnvPsiClass), + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_psi_register_types(void) +{ + type_register_static(&pnv_psi_info); + type_register_static(&pnv_psi_power8_info); + type_register_static(&pnv_psi_power9_info); + type_register_static(&pnv_psi_power10_info); +} + +type_init(pnv_psi_register_types); + +void pnv_psi_pic_print_info(Pnv9Psi *psi9, Monitor *mon) +{ + PnvPsi *psi = PNV_PSI(psi9); + + uint32_t offset = + (psi->regs[PSIHB_REG(PSIHB9_IVT_OFFSET)] >> PSIHB9_IVT_OFF_SHIFT); + + monitor_printf(mon, "PSIHB Source %08x .. %08x\n", + offset, offset + psi9->source.nr_irqs - 1); + xive_source_pic_print_info(&psi9->source, offset, mon); +} diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c new file mode 100644 index 000000000..9ce018dbc --- /dev/null +++ b/hw/ppc/pnv_xscom.c @@ -0,0 +1,324 @@ +/* + * QEMU PowerPC PowerNV XSCOM bus + * + * Copyright (c) 2016, IBM Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "sysemu/hw_accel.h" +#include "target/ppc/cpu.h" +#include "hw/sysbus.h" + +#include "hw/ppc/fdt.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_xscom.h" + +#include + +/* PRD registers */ +#define PRD_P8_IPOLL_REG_MASK 0x01020013 +#define PRD_P8_IPOLL_REG_STATUS 0x01020014 +#define PRD_P9_IPOLL_REG_MASK 0x000F0033 +#define PRD_P9_IPOLL_REG_STATUS 0x000F0034 + +static void xscom_complete(CPUState *cs, uint64_t hmer_bits) +{ + /* + * TODO: When the read/write comes from the monitor, NULL is + * passed for the cpu, and no CPU completion is generated. + */ + if (cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + /* + * TODO: Need a CPU helper to set HMER, also handle generation + * of HMIs + */ + cpu_synchronize_state(cs); + env->spr[SPR_HMER] |= hmer_bits; + } +} + +static uint32_t pnv_xscom_pcba(PnvChip *chip, uint64_t addr) +{ + return PNV_CHIP_GET_CLASS(chip)->xscom_pcba(chip, addr); +} + +static uint64_t xscom_read_default(PnvChip *chip, uint32_t pcba) +{ + switch (pcba) { + case 0xf000f: + return PNV_CHIP_GET_CLASS(chip)->chip_cfam_id; + case 0x18002: /* ECID2 */ + return 0; + + case 0x1010c00: /* PIBAM FIR */ + case 0x1010c03: /* PIBAM FIR MASK */ + + /* PRD registers */ + case PRD_P8_IPOLL_REG_MASK: + case PRD_P8_IPOLL_REG_STATUS: + case PRD_P9_IPOLL_REG_MASK: + case PRD_P9_IPOLL_REG_STATUS: + + /* P9 xscom reset */ + case 0x0090018: /* Receive status reg */ + case 0x0090012: /* log register */ + case 0x0090013: /* error register */ + + /* P8 xscom reset */ + case 0x2020007: /* ADU stuff, log register */ + case 0x2020009: /* ADU stuff, error register */ + case 0x202000f: /* ADU stuff, receive status register*/ + return 0; + case 0x2013f01: /* PBA stuff */ + case 0x2013f05: /* PBA stuff */ + return 0; + case 0x2013028: /* CAPP stuff */ + case 0x201302a: /* CAPP stuff */ + case 0x2013801: /* CAPP stuff */ + case 0x2013802: /* CAPP stuff */ + + /* P9 CAPP regs */ + case 0x2010841: + case 0x2010842: + case 0x201082a: + case 0x2010828: + case 0x4010841: + case 0x4010842: + case 0x401082a: + case 0x4010828: + return 0; + default: + return -1; + } +} + +static bool xscom_write_default(PnvChip *chip, uint32_t pcba, uint64_t val) +{ + /* We ignore writes to these */ + switch (pcba) { + case 0xf000f: /* chip id is RO */ + case 0x1010c00: /* PIBAM FIR */ + case 0x1010c01: /* PIBAM FIR */ + case 0x1010c02: /* PIBAM FIR */ + case 0x1010c03: /* PIBAM FIR MASK */ + case 0x1010c04: /* PIBAM FIR MASK */ + case 0x1010c05: /* PIBAM FIR MASK */ + /* P9 xscom reset */ + case 0x0090018: /* Receive status reg */ + case 0x0090012: /* log register */ + case 0x0090013: /* error register */ + + /* P8 xscom reset */ + case 0x2020007: /* ADU stuff, log register */ + case 0x2020009: /* ADU stuff, error register */ + case 0x202000f: /* ADU stuff, receive status register*/ + + case 0x2013028: /* CAPP stuff */ + case 0x201302a: /* CAPP stuff */ + case 0x2013801: /* CAPP stuff */ + case 0x2013802: /* CAPP stuff */ + + /* P9 CAPP regs */ + case 0x2010841: + case 0x2010842: + case 0x201082a: + case 0x2010828: + case 0x4010841: + case 0x4010842: + case 0x401082a: + case 0x4010828: + + /* P8 PRD registers */ + case PRD_P8_IPOLL_REG_MASK: + case PRD_P8_IPOLL_REG_STATUS: + case PRD_P9_IPOLL_REG_MASK: + case PRD_P9_IPOLL_REG_STATUS: + return true; + default: + return false; + } +} + +static uint64_t xscom_read(void *opaque, hwaddr addr, unsigned width) +{ + PnvChip *chip = opaque; + uint32_t pcba = pnv_xscom_pcba(chip, addr); + uint64_t val = 0; + MemTxResult result; + + /* Handle some SCOMs here before dispatch */ + val = xscom_read_default(chip, pcba); + if (val != -1) { + goto complete; + } + + val = address_space_ldq(&chip->xscom_as, (uint64_t) pcba << 3, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "XSCOM read failed at @0x%" + HWADDR_PRIx " pcba=0x%08x\n", addr, pcba); + xscom_complete(current_cpu, HMER_XSCOM_FAIL | HMER_XSCOM_DONE); + return 0; + } + +complete: + xscom_complete(current_cpu, HMER_XSCOM_DONE); + return val; +} + +static void xscom_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + PnvChip *chip = opaque; + uint32_t pcba = pnv_xscom_pcba(chip, addr); + MemTxResult result; + + /* Handle some SCOMs here before dispatch */ + if (xscom_write_default(chip, pcba, val)) { + goto complete; + } + + address_space_stq(&chip->xscom_as, (uint64_t) pcba << 3, val, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "XSCOM write failed at @0x%" + HWADDR_PRIx " pcba=0x%08x data=0x%" PRIx64 "\n", + addr, pcba, val); + xscom_complete(current_cpu, HMER_XSCOM_FAIL | HMER_XSCOM_DONE); + return; + } + +complete: + xscom_complete(current_cpu, HMER_XSCOM_DONE); +} + +const MemoryRegionOps pnv_xscom_ops = { + .read = xscom_read, + .write = xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +void pnv_xscom_realize(PnvChip *chip, uint64_t size, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(chip); + char *name; + + name = g_strdup_printf("xscom-%x", chip->chip_id); + memory_region_init_io(&chip->xscom_mmio, OBJECT(chip), &pnv_xscom_ops, + chip, name, size); + sysbus_init_mmio(sbd, &chip->xscom_mmio); + + memory_region_init(&chip->xscom, OBJECT(chip), name, size); + address_space_init(&chip->xscom_as, &chip->xscom, name); + g_free(name); +} + +static const TypeInfo pnv_xscom_interface_info = { + .name = TYPE_PNV_XSCOM_INTERFACE, + .parent = TYPE_INTERFACE, + .class_size = sizeof(PnvXScomInterfaceClass), +}; + +static void pnv_xscom_register_types(void) +{ + type_register_static(&pnv_xscom_interface_info); +} + +type_init(pnv_xscom_register_types) + +typedef struct ForeachPopulateArgs { + void *fdt; + int xscom_offset; +} ForeachPopulateArgs; + +static int xscom_dt_child(Object *child, void *opaque) +{ + if (object_dynamic_cast(child, TYPE_PNV_XSCOM_INTERFACE)) { + ForeachPopulateArgs *args = opaque; + PnvXScomInterface *xd = PNV_XSCOM_INTERFACE(child); + PnvXScomInterfaceClass *xc = PNV_XSCOM_INTERFACE_GET_CLASS(xd); + + /* + * Only "realized" devices should be configured in the DT + */ + if (xc->dt_xscom && DEVICE(child)->realized) { + _FDT((xc->dt_xscom(xd, args->fdt, args->xscom_offset))); + } + } + return 0; +} + +int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset, + uint64_t xscom_base, uint64_t xscom_size, + const char *compat, int compat_size) +{ + uint64_t reg[] = { xscom_base, xscom_size }; + int xscom_offset; + ForeachPopulateArgs args; + char *name; + + name = g_strdup_printf("xscom@%" PRIx64, be64_to_cpu(reg[0])); + xscom_offset = fdt_add_subnode(fdt, root_offset, name); + _FDT(xscom_offset); + g_free(name); + _FDT((fdt_setprop_cell(fdt, xscom_offset, "ibm,chip-id", chip->chip_id))); + /* + * On P10, the xscom bus id has been deprecated and the chip id is + * calculated from the "Primary topology table index". See skiboot. + */ + _FDT((fdt_setprop_cell(fdt, xscom_offset, "ibm,primary-topology-index", + chip->chip_id))); + _FDT((fdt_setprop_cell(fdt, xscom_offset, "#address-cells", 1))); + _FDT((fdt_setprop_cell(fdt, xscom_offset, "#size-cells", 1))); + _FDT((fdt_setprop(fdt, xscom_offset, "reg", reg, sizeof(reg)))); + _FDT((fdt_setprop(fdt, xscom_offset, "compatible", compat, compat_size))); + _FDT((fdt_setprop(fdt, xscom_offset, "scom-controller", NULL, 0))); + + args.fdt = fdt; + args.xscom_offset = xscom_offset; + + /* + * Loop on the whole object hierarchy to catch all + * PnvXScomInterface objects which can lie a bit deeper than the + * first layer. + */ + object_child_foreach_recursive(OBJECT(chip), xscom_dt_child, &args); + return 0; +} + +void pnv_xscom_add_subregion(PnvChip *chip, hwaddr offset, MemoryRegion *mr) +{ + memory_region_add_subregion(&chip->xscom, offset << 3, mr); +} + +void pnv_xscom_region_init(MemoryRegion *mr, + Object *owner, + const MemoryRegionOps *ops, + void *opaque, + const char *name, + uint64_t size) +{ + memory_region_init_io(mr, owner, ops, opaque, name, size << 3); +} diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c new file mode 100644 index 000000000..e8127599c --- /dev/null +++ b/hw/ppc/ppc.c @@ -0,0 +1,1465 @@ +/* + * QEMU generic PowerPC hardware System Emulator + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/ppc/ppc.h" +#include "hw/ppc/ppc_e500.h" +#include "qemu/timer.h" +#include "sysemu/cpus.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "qemu/error-report.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "kvm_ppc.h" +#include "migration/vmstate.h" +#include "trace.h" + +static void cpu_ppc_tb_stop (CPUPPCState *env); +static void cpu_ppc_tb_start (CPUPPCState *env); + +void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + unsigned int old_pending; + bool locked = false; + + /* We may already have the BQL if coming from the reset path */ + if (!qemu_mutex_iothread_locked()) { + locked = true; + qemu_mutex_lock_iothread(); + } + + old_pending = env->pending_interrupts; + + if (level) { + env->pending_interrupts |= 1 << n_IRQ; + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + env->pending_interrupts &= ~(1 << n_IRQ); + if (env->pending_interrupts == 0) { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + } + + if (old_pending != env->pending_interrupts) { + kvmppc_set_interrupt(cpu, n_IRQ, level); + } + + + trace_ppc_irq_set_exit(env, n_IRQ, level, env->pending_interrupts, + CPU(cpu)->interrupt_request); + + if (locked) { + qemu_mutex_unlock_iothread(); + } +} + +/* PowerPC 6xx / 7xx internal IRQ controller */ +static void ppc6xx_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int cur_level; + + trace_ppc_irq_set(env, pin, level); + + cur_level = (env->irq_input_state >> pin) & 1; + /* Don't generate spurious events */ + if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { + CPUState *cs = CPU(cpu); + + switch (pin) { + case PPC6xx_INPUT_TBEN: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("time base", level); + if (level) { + cpu_ppc_tb_start(env); + } else { + cpu_ppc_tb_stop(env); + } + break; + case PPC6xx_INPUT_INT: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("external IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + case PPC6xx_INPUT_SMI: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("SMI IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level); + break; + case PPC6xx_INPUT_MCP: + /* Negative edge sensitive */ + /* XXX: TODO: actual reaction may depends on HID0 status + * 603/604/740/750: check HID0[EMCP] + */ + if (cur_level == 1 && level == 0) { + trace_ppc_irq_set_state("machine check", 1); + ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); + } + break; + case PPC6xx_INPUT_CKSTP_IN: + /* Level sensitive - active low */ + /* XXX: TODO: relay the signal to CKSTP_OUT pin */ + /* XXX: Note that the only way to restart the CPU is to reset it */ + if (level) { + trace_ppc_irq_cpu("stop"); + cs->halted = 1; + } + break; + case PPC6xx_INPUT_HRESET: + /* Level sensitive - active low */ + if (level) { + trace_ppc_irq_reset("CPU"); + cpu_interrupt(cs, CPU_INTERRUPT_RESET); + } + break; + case PPC6xx_INPUT_SRESET: + trace_ppc_irq_set_state("RESET IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); + break; + default: + g_assert_not_reached(); + } + if (level) + env->irq_input_state |= 1 << pin; + else + env->irq_input_state &= ~(1 << pin); + } +} + +void ppc6xx_irq_init(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu, + PPC6xx_INPUT_NB); +} + +#if defined(TARGET_PPC64) +/* PowerPC 970 internal IRQ controller */ +static void ppc970_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int cur_level; + + trace_ppc_irq_set(env, pin, level); + + cur_level = (env->irq_input_state >> pin) & 1; + /* Don't generate spurious events */ + if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { + CPUState *cs = CPU(cpu); + + switch (pin) { + case PPC970_INPUT_INT: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("external IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + case PPC970_INPUT_THINT: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("SMI IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level); + break; + case PPC970_INPUT_MCP: + /* Negative edge sensitive */ + /* XXX: TODO: actual reaction may depends on HID0 status + * 603/604/740/750: check HID0[EMCP] + */ + if (cur_level == 1 && level == 0) { + trace_ppc_irq_set_state("machine check", 1); + ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); + } + break; + case PPC970_INPUT_CKSTP: + /* Level sensitive - active low */ + /* XXX: TODO: relay the signal to CKSTP_OUT pin */ + if (level) { + trace_ppc_irq_cpu("stop"); + cs->halted = 1; + } else { + trace_ppc_irq_cpu("restart"); + cs->halted = 0; + qemu_cpu_kick(cs); + } + break; + case PPC970_INPUT_HRESET: + /* Level sensitive - active low */ + if (level) { + cpu_interrupt(cs, CPU_INTERRUPT_RESET); + } + break; + case PPC970_INPUT_SRESET: + trace_ppc_irq_set_state("RESET IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); + break; + case PPC970_INPUT_TBEN: + trace_ppc_irq_set_state("TBEN IRQ", level); + /* XXX: TODO */ + break; + default: + g_assert_not_reached(); + } + if (level) + env->irq_input_state |= 1 << pin; + else + env->irq_input_state &= ~(1 << pin); + } +} + +void ppc970_irq_init(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu, + PPC970_INPUT_NB); +} + +/* POWER7 internal IRQ controller */ +static void power7_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + + trace_ppc_irq_set(&cpu->env, pin, level); + + switch (pin) { + case POWER7_INPUT_INT: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("external IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + default: + g_assert_not_reached(); + } +} + +void ppcPOWER7_irq_init(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu, + POWER7_INPUT_NB); +} + +/* POWER9 internal IRQ controller */ +static void power9_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + + trace_ppc_irq_set(&cpu->env, pin, level); + + switch (pin) { + case POWER9_INPUT_INT: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("external IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + case POWER9_INPUT_HINT: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("HV external IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level); + break; + default: + g_assert_not_reached(); + return; + } +} + +void ppcPOWER9_irq_init(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&power9_set_irq, cpu, + POWER9_INPUT_NB); +} +#endif /* defined(TARGET_PPC64) */ + +void ppc40x_core_reset(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + target_ulong dbsr; + + qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n"); + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET); + dbsr = env->spr[SPR_40x_DBSR]; + dbsr &= ~0x00000300; + dbsr |= 0x00000100; + env->spr[SPR_40x_DBSR] = dbsr; +} + +void ppc40x_chip_reset(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + target_ulong dbsr; + + qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n"); + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET); + /* XXX: TODO reset all internal peripherals */ + dbsr = env->spr[SPR_40x_DBSR]; + dbsr &= ~0x00000300; + dbsr |= 0x00000200; + env->spr[SPR_40x_DBSR] = dbsr; +} + +void ppc40x_system_reset(PowerPCCPU *cpu) +{ + qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n"); + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); +} + +void store_40x_dbcr0(CPUPPCState *env, uint32_t val) +{ + PowerPCCPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + + switch ((val >> 28) & 0x3) { + case 0x0: + /* No action */ + break; + case 0x1: + /* Core reset */ + ppc40x_core_reset(cpu); + break; + case 0x2: + /* Chip reset */ + ppc40x_chip_reset(cpu); + break; + case 0x3: + /* System reset */ + ppc40x_system_reset(cpu); + break; + } + + qemu_mutex_unlock_iothread(); +} + +/* PowerPC 40x internal IRQ controller */ +static void ppc40x_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int cur_level; + + trace_ppc_irq_set(env, pin, level); + + cur_level = (env->irq_input_state >> pin) & 1; + /* Don't generate spurious events */ + if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { + CPUState *cs = CPU(cpu); + + switch (pin) { + case PPC40x_INPUT_RESET_SYS: + if (level) { + trace_ppc_irq_reset("system"); + ppc40x_system_reset(cpu); + } + break; + case PPC40x_INPUT_RESET_CHIP: + if (level) { + trace_ppc_irq_reset("chip"); + ppc40x_chip_reset(cpu); + } + break; + case PPC40x_INPUT_RESET_CORE: + /* XXX: TODO: update DBSR[MRR] */ + if (level) { + trace_ppc_irq_reset("core"); + ppc40x_core_reset(cpu); + } + break; + case PPC40x_INPUT_CINT: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("critical IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); + break; + case PPC40x_INPUT_INT: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("external IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + case PPC40x_INPUT_HALT: + /* Level sensitive - active low */ + if (level) { + trace_ppc_irq_cpu("stop"); + cs->halted = 1; + } else { + trace_ppc_irq_cpu("restart"); + cs->halted = 0; + qemu_cpu_kick(cs); + } + break; + case PPC40x_INPUT_DEBUG: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("debug pin", level); + ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); + break; + default: + g_assert_not_reached(); + } + if (level) + env->irq_input_state |= 1 << pin; + else + env->irq_input_state &= ~(1 << pin); + } +} + +void ppc40x_irq_init(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq, + cpu, PPC40x_INPUT_NB); +} + +/* PowerPC E500 internal IRQ controller */ +static void ppce500_set_irq(void *opaque, int pin, int level) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + int cur_level; + + trace_ppc_irq_set(env, pin, level); + + cur_level = (env->irq_input_state >> pin) & 1; + /* Don't generate spurious events */ + if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { + switch (pin) { + case PPCE500_INPUT_MCK: + if (level) { + trace_ppc_irq_reset("system"); + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + case PPCE500_INPUT_RESET_CORE: + if (level) { + trace_ppc_irq_reset("core"); + ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level); + } + break; + case PPCE500_INPUT_CINT: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("critical IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); + break; + case PPCE500_INPUT_INT: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("core IRQ", level); + ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); + break; + case PPCE500_INPUT_DEBUG: + /* Level sensitive - active high */ + trace_ppc_irq_set_state("debug pin", level); + ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); + break; + default: + g_assert_not_reached(); + } + if (level) + env->irq_input_state |= 1 << pin; + else + env->irq_input_state &= ~(1 << pin); + } +} + +void ppce500_irq_init(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq, + cpu, PPCE500_INPUT_NB); +} + +/* Enable or Disable the E500 EPR capability */ +void ppce500_set_mpic_proxy(bool enabled) +{ + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + cpu->env.mpic_proxy = enabled; + if (kvm_enabled()) { + kvmppc_set_mpic_proxy(cpu, enabled); + } + } +} + +/*****************************************************************************/ +/* PowerPC time base and decrementer emulation */ + +uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset) +{ + /* TB time in tb periods */ + return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset; +} + +uint64_t cpu_ppc_load_tbl (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + if (kvm_enabled()) { + return env->spr[SPR_TBL]; + } + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); + trace_ppc_tb_load(tb); + + return tb; +} + +static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); + trace_ppc_tb_load(tb); + + return tb >> 32; +} + +uint32_t cpu_ppc_load_tbu (CPUPPCState *env) +{ + if (kvm_enabled()) { + return env->spr[SPR_TBU]; + } + + return _cpu_ppc_load_tbu(env); +} + +static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk, + int64_t *tb_offsetp, uint64_t value) +{ + *tb_offsetp = value - + muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND); + + trace_ppc_tb_store(value, *tb_offsetp); +} + +void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); + tb &= 0xFFFFFFFF00000000ULL; + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->tb_offset, tb | (uint64_t)value); +} + +static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); + tb &= 0x00000000FFFFFFFFULL; + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->tb_offset, ((uint64_t)value << 32) | tb); +} + +void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value) +{ + _cpu_ppc_store_tbu(env, value); +} + +uint64_t cpu_ppc_load_atbl (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); + trace_ppc_tb_load(tb); + + return tb; +} + +uint32_t cpu_ppc_load_atbu (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); + trace_ppc_tb_load(tb); + + return tb >> 32; +} + +void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); + tb &= 0xFFFFFFFF00000000ULL; + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->atb_offset, tb | (uint64_t)value); +} + +void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); + tb &= 0x00000000FFFFFFFFULL; + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->atb_offset, ((uint64_t)value << 32) | tb); +} + +uint64_t cpu_ppc_load_vtb(CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + + return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + tb_env->vtb_offset); +} + +void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->vtb_offset, value); +} + +void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb; + + tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + tb_env->tb_offset); + tb &= 0xFFFFFFUL; + tb |= (value & ~0xFFFFFFUL); + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->tb_offset, tb); +} + +static void cpu_ppc_tb_stop (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb, atb, vmclk; + + /* If the time base is already frozen, do nothing */ + if (tb_env->tb_freq != 0) { + vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + /* Get the time base */ + tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset); + /* Get the alternate time base */ + atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset); + /* Store the time base value (ie compute the current offset) */ + cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb); + /* Store the alternate time base value (compute the current offset) */ + cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb); + /* Set the time base frequency to zero */ + tb_env->tb_freq = 0; + /* Now, the time bases are frozen to tb_offset / atb_offset value */ + } +} + +static void cpu_ppc_tb_start (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t tb, atb, vmclk; + + /* If the time base is not frozen, do nothing */ + if (tb_env->tb_freq == 0) { + vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + /* Get the time base from tb_offset */ + tb = tb_env->tb_offset; + /* Get the alternate time base from atb_offset */ + atb = tb_env->atb_offset; + /* Restore the tb frequency from the decrementer frequency */ + tb_env->tb_freq = tb_env->decr_freq; + /* Store the time base value */ + cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb); + /* Store the alternate time base value */ + cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb); + } +} + +bool ppc_decr_clear_on_delivery(CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL; + return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED); +} + +static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next) +{ + ppc_tb_t *tb_env = env->tb_env; + int64_t decr, diff; + + diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + if (diff >= 0) { + decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); + } else if (tb_env->flags & PPC_TIMER_BOOKE) { + decr = 0; + } else { + decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); + } + trace_ppc_decr_load(decr); + + return decr; +} + +target_ulong cpu_ppc_load_decr(CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t decr; + + if (kvm_enabled()) { + return env->spr[SPR_DECR]; + } + + decr = _cpu_ppc_load_decr(env, tb_env->decr_next); + + /* + * If large decrementer is enabled then the decrementer is signed extened + * to 64 bits, otherwise it is a 32 bit value. + */ + if (env->spr[SPR_LPCR] & LPCR_LD) { + return decr; + } + return (uint32_t) decr; +} + +target_ulong cpu_ppc_load_hdecr(CPUPPCState *env) +{ + PowerPCCPU *cpu = env_archcpu(env); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + ppc_tb_t *tb_env = env->tb_env; + uint64_t hdecr; + + hdecr = _cpu_ppc_load_decr(env, tb_env->hdecr_next); + + /* + * If we have a large decrementer (POWER9 or later) then hdecr is sign + * extended to 64 bits, otherwise it is 32 bits. + */ + if (pcc->lrg_decr_bits > 32) { + return hdecr; + } + return (uint32_t) hdecr; +} + +uint64_t cpu_ppc_load_purr (CPUPPCState *env) +{ + ppc_tb_t *tb_env = env->tb_env; + + return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + tb_env->purr_offset); +} + +/* When decrementer expires, + * all we need to do is generate or queue a CPU exception + */ +static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu) +{ + /* Raise it */ + trace_ppc_decr_excp("raise"); + ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1); +} + +static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu) +{ + ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0); +} + +static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + /* Raise it */ + trace_ppc_decr_excp("raise HV"); + + /* The architecture specifies that we don't deliver HDEC + * interrupts in a PM state. Not only they don't cause a + * wakeup but they also get effectively discarded. + */ + if (!env->resume_as_sreset) { + ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1); + } +} + +static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu) +{ + ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0); +} + +static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, + QEMUTimer *timer, + void (*raise_excp)(void *), + void (*lower_excp)(PowerPCCPU *), + target_ulong decr, target_ulong value, + int nr_bits) +{ + CPUPPCState *env = &cpu->env; + ppc_tb_t *tb_env = env->tb_env; + uint64_t now, next; + int64_t signed_value; + int64_t signed_decr; + + /* Truncate value to decr_width and sign extend for simplicity */ + signed_value = sextract64(value, 0, nr_bits); + signed_decr = sextract64(decr, 0, nr_bits); + + trace_ppc_decr_store(nr_bits, decr, value); + + if (kvm_enabled()) { + /* KVM handles decrementer exceptions, we don't need our own timer */ + return; + } + + /* + * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC + * interrupt. + * + * If we get a really small DEC value, we can assume that by the time we + * handled it we should inject an interrupt already. + * + * On MSB level based DEC implementations the MSB always means the interrupt + * is pending, so raise it on those. + * + * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers + * an edge interrupt, so raise it here too. + */ + if ((value < 3) || + ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) || + ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0 + && signed_decr >= 0)) { + (*raise_excp)(cpu); + return; + } + + /* On MSB level based systems a 0 for the MSB stops interrupt delivery */ + if (signed_value >= 0 && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { + (*lower_excp)(cpu); + } + + /* Calculate the next timer event */ + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq); + *nextp = next; + + /* Adjust timer */ + timer_mod(timer, next); +} + +static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr, + target_ulong value, int nr_bits) +{ + ppc_tb_t *tb_env = cpu->env.tb_env; + + __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer, + tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr, + value, nr_bits); +} + +void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value) +{ + PowerPCCPU *cpu = env_archcpu(env); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + int nr_bits = 32; + + if (env->spr[SPR_LPCR] & LPCR_LD) { + nr_bits = pcc->lrg_decr_bits; + } + + _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits); +} + +static void cpu_ppc_decr_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + cpu_ppc_decr_excp(cpu); +} + +static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr, + target_ulong value, int nr_bits) +{ + ppc_tb_t *tb_env = cpu->env.tb_env; + + if (tb_env->hdecr_timer != NULL) { + __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer, + tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower, + hdecr, value, nr_bits); + } +} + +void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value) +{ + PowerPCCPU *cpu = env_archcpu(env); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value, + pcc->lrg_decr_bits); +} + +static void cpu_ppc_hdecr_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + cpu_ppc_hdecr_excp(cpu); +} + +void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value) +{ + ppc_tb_t *tb_env = env->tb_env; + + cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + &tb_env->purr_offset, value); +} + +static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq) +{ + CPUPPCState *env = opaque; + PowerPCCPU *cpu = env_archcpu(env); + ppc_tb_t *tb_env = env->tb_env; + + tb_env->tb_freq = freq; + tb_env->decr_freq = freq; + /* There is a bug in Linux 2.4 kernels: + * if a decrementer exception is pending when it enables msr_ee at startup, + * it's not ready to handle it... + */ + _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32); + _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32); + cpu_ppc_store_purr(env, 0x0000000000000000ULL); +} + +static void timebase_save(PPCTimebase *tb) +{ + uint64_t ticks = cpu_get_host_ticks(); + PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); + + if (!first_ppc_cpu->env.tb_env) { + error_report("No timebase object"); + return; + } + + /* not used anymore, we keep it for compatibility */ + tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST); + /* + * tb_offset is only expected to be changed by QEMU so + * there is no need to update it from KVM here + */ + tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset; + + tb->runstate_paused = + runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM); +} + +static void timebase_load(PPCTimebase *tb) +{ + CPUState *cpu; + PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); + int64_t tb_off_adj, tb_off; + unsigned long freq; + + if (!first_ppc_cpu->env.tb_env) { + error_report("No timebase object"); + return; + } + + freq = first_ppc_cpu->env.tb_env->tb_freq; + + tb_off_adj = tb->guest_timebase - cpu_get_host_ticks(); + + tb_off = first_ppc_cpu->env.tb_env->tb_offset; + trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off, + (tb_off_adj - tb_off) / freq); + + /* Set new offset to all CPUs */ + CPU_FOREACH(cpu) { + PowerPCCPU *pcpu = POWERPC_CPU(cpu); + pcpu->env.tb_env->tb_offset = tb_off_adj; + kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset); + } +} + +void cpu_ppc_clock_vm_state_change(void *opaque, bool running, + RunState state) +{ + PPCTimebase *tb = opaque; + + if (running) { + timebase_load(tb); + } else { + timebase_save(tb); + } +} + +/* + * When migrating a running guest, read the clock just + * before migration, so that the guest clock counts + * during the events between: + * + * * vm_stop() + * * + * * pre_save() + * + * This reduces clock difference on migration from 5s + * to 0.1s (when max_downtime == 5s), because sending the + * final pages of memory (which happens between vm_stop() + * and pre_save()) takes max_downtime. + */ +static int timebase_pre_save(void *opaque) +{ + PPCTimebase *tb = opaque; + + /* guest_timebase won't be overridden in case of paused guest or savevm */ + if (!tb->runstate_paused) { + timebase_save(tb); + } + + return 0; +} + +const VMStateDescription vmstate_ppc_timebase = { + .name = "timebase", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .pre_save = timebase_pre_save, + .fields = (VMStateField []) { + VMSTATE_UINT64(guest_timebase, PPCTimebase), + VMSTATE_INT64(time_of_the_day_ns, PPCTimebase), + VMSTATE_END_OF_LIST() + }, +}; + +/* Set up (once) timebase frequency (in Hz) */ +clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq) +{ + PowerPCCPU *cpu = env_archcpu(env); + ppc_tb_t *tb_env; + + tb_env = g_malloc0(sizeof(ppc_tb_t)); + env->tb_env = tb_env; + tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; + if (is_book3s_arch2x(env)) { + /* All Book3S 64bit CPUs implement level based DEC logic */ + tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL; + } + /* Create new timer */ + tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu); + if (env->has_hv_mode) { + tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb, + cpu); + } else { + tb_env->hdecr_timer = NULL; + } + cpu_ppc_set_tb_clk(env, freq); + + return &cpu_ppc_set_tb_clk; +} + +/* Specific helpers for POWER & PowerPC 601 RTC */ +void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value) +{ + _cpu_ppc_store_tbu(env, value); +} + +uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env) +{ + return _cpu_ppc_load_tbu(env); +} + +void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value) +{ + cpu_ppc_store_tbl(env, value & 0x3FFFFF80); +} + +uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env) +{ + return cpu_ppc_load_tbl(env) & 0x3FFFFF80; +} + +/*****************************************************************************/ +/* PowerPC 40x timers */ + +/* PIT, FIT & WDT */ +typedef struct ppc40x_timer_t ppc40x_timer_t; +struct ppc40x_timer_t { + uint64_t pit_reload; /* PIT auto-reload value */ + uint64_t fit_next; /* Tick for next FIT interrupt */ + QEMUTimer *fit_timer; + uint64_t wdt_next; /* Tick for next WDT interrupt */ + QEMUTimer *wdt_timer; + + /* 405 have the PIT, 440 have a DECR. */ + unsigned int decr_excp; +}; + +/* Fixed interval timer */ +static void cpu_4xx_fit_cb (void *opaque) +{ + PowerPCCPU *cpu; + CPUPPCState *env; + ppc_tb_t *tb_env; + ppc40x_timer_t *ppc40x_timer; + uint64_t now, next; + + env = opaque; + cpu = env_archcpu(env); + tb_env = env->tb_env; + ppc40x_timer = tb_env->opaque; + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) { + case 0: + next = 1 << 9; + break; + case 1: + next = 1 << 13; + break; + case 2: + next = 1 << 17; + break; + case 3: + next = 1 << 21; + break; + default: + /* Cannot occur, but makes gcc happy */ + return; + } + next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq); + if (next == now) + next++; + timer_mod(ppc40x_timer->fit_timer, next); + env->spr[SPR_40x_TSR] |= 1 << 26; + if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) { + ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1); + } + trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), + env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); +} + +/* Programmable interval timer */ +static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp) +{ + ppc40x_timer_t *ppc40x_timer; + uint64_t now, next; + + ppc40x_timer = tb_env->opaque; + if (ppc40x_timer->pit_reload <= 1 || + !((env->spr[SPR_40x_TCR] >> 26) & 0x1) || + (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) { + /* Stop PIT */ + trace_ppc4xx_pit_stop(); + timer_del(tb_env->decr_timer); + } else { + trace_ppc4xx_pit_start(ppc40x_timer->pit_reload); + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + next = now + muldiv64(ppc40x_timer->pit_reload, + NANOSECONDS_PER_SECOND, tb_env->decr_freq); + if (is_excp) + next += tb_env->decr_next - now; + if (next == now) + next++; + timer_mod(tb_env->decr_timer, next); + tb_env->decr_next = next; + } +} + +static void cpu_4xx_pit_cb (void *opaque) +{ + PowerPCCPU *cpu; + CPUPPCState *env; + ppc_tb_t *tb_env; + ppc40x_timer_t *ppc40x_timer; + + env = opaque; + cpu = env_archcpu(env); + tb_env = env->tb_env; + ppc40x_timer = tb_env->opaque; + env->spr[SPR_40x_TSR] |= 1 << 27; + if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) { + ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1); + } + start_stop_pit(env, tb_env, 1); + trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), + (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1), + env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR], + ppc40x_timer->pit_reload); +} + +/* Watchdog timer */ +static void cpu_4xx_wdt_cb (void *opaque) +{ + PowerPCCPU *cpu; + CPUPPCState *env; + ppc_tb_t *tb_env; + ppc40x_timer_t *ppc40x_timer; + uint64_t now, next; + + env = opaque; + cpu = env_archcpu(env); + tb_env = env->tb_env; + ppc40x_timer = tb_env->opaque; + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) { + case 0: + next = 1 << 17; + break; + case 1: + next = 1 << 21; + break; + case 2: + next = 1 << 25; + break; + case 3: + next = 1 << 29; + break; + default: + /* Cannot occur, but makes gcc happy */ + return; + } + next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq); + if (next == now) + next++; + trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); + switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) { + case 0x0: + case 0x1: + timer_mod(ppc40x_timer->wdt_timer, next); + ppc40x_timer->wdt_next = next; + env->spr[SPR_40x_TSR] |= 1U << 31; + break; + case 0x2: + timer_mod(ppc40x_timer->wdt_timer, next); + ppc40x_timer->wdt_next = next; + env->spr[SPR_40x_TSR] |= 1 << 30; + if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) { + ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1); + } + break; + case 0x3: + env->spr[SPR_40x_TSR] &= ~0x30000000; + env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000; + switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) { + case 0x0: + /* No reset */ + break; + case 0x1: /* Core reset */ + ppc40x_core_reset(cpu); + break; + case 0x2: /* Chip reset */ + ppc40x_chip_reset(cpu); + break; + case 0x3: /* System reset */ + ppc40x_system_reset(cpu); + break; + } + } +} + +void store_40x_pit (CPUPPCState *env, target_ulong val) +{ + ppc_tb_t *tb_env; + ppc40x_timer_t *ppc40x_timer; + + tb_env = env->tb_env; + ppc40x_timer = tb_env->opaque; + trace_ppc40x_store_pit(val); + ppc40x_timer->pit_reload = val; + start_stop_pit(env, tb_env, 0); +} + +target_ulong load_40x_pit (CPUPPCState *env) +{ + return cpu_ppc_load_decr(env); +} + +static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq) +{ + CPUPPCState *env = opaque; + ppc_tb_t *tb_env = env->tb_env; + + trace_ppc40x_set_tb_clk(freq); + tb_env->tb_freq = freq; + tb_env->decr_freq = freq; + /* XXX: we should also update all timers */ +} + +clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq, + unsigned int decr_excp) +{ + ppc_tb_t *tb_env; + ppc40x_timer_t *ppc40x_timer; + + tb_env = g_malloc0(sizeof(ppc_tb_t)); + env->tb_env = tb_env; + tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED; + ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t)); + tb_env->tb_freq = freq; + tb_env->decr_freq = freq; + tb_env->opaque = ppc40x_timer; + trace_ppc40x_timers_init(freq); + if (ppc40x_timer != NULL) { + /* We use decr timer for PIT */ + tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env); + ppc40x_timer->fit_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env); + ppc40x_timer->wdt_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env); + ppc40x_timer->decr_excp = decr_excp; + } + + return &ppc_40x_set_tb_clk; +} + +/*****************************************************************************/ +/* Embedded PowerPC Device Control Registers */ +typedef struct ppc_dcrn_t ppc_dcrn_t; +struct ppc_dcrn_t { + dcr_read_cb dcr_read; + dcr_write_cb dcr_write; + void *opaque; +}; + +/* XXX: on 460, DCR addresses are 32 bits wide, + * using DCRIPR to get the 22 upper bits of the DCR address + */ +#define DCRN_NB 1024 +struct ppc_dcr_t { + ppc_dcrn_t dcrn[DCRN_NB]; + int (*read_error)(int dcrn); + int (*write_error)(int dcrn); +}; + +int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp) +{ + ppc_dcrn_t *dcr; + + if (dcrn < 0 || dcrn >= DCRN_NB) + goto error; + dcr = &dcr_env->dcrn[dcrn]; + if (dcr->dcr_read == NULL) + goto error; + *valp = (*dcr->dcr_read)(dcr->opaque, dcrn); + + return 0; + + error: + if (dcr_env->read_error != NULL) + return (*dcr_env->read_error)(dcrn); + + return -1; +} + +int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val) +{ + ppc_dcrn_t *dcr; + + if (dcrn < 0 || dcrn >= DCRN_NB) + goto error; + dcr = &dcr_env->dcrn[dcrn]; + if (dcr->dcr_write == NULL) + goto error; + (*dcr->dcr_write)(dcr->opaque, dcrn, val); + + return 0; + + error: + if (dcr_env->write_error != NULL) + return (*dcr_env->write_error)(dcrn); + + return -1; +} + +int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque, + dcr_read_cb dcr_read, dcr_write_cb dcr_write) +{ + ppc_dcr_t *dcr_env; + ppc_dcrn_t *dcr; + + dcr_env = env->dcr_env; + if (dcr_env == NULL) + return -1; + if (dcrn < 0 || dcrn >= DCRN_NB) + return -1; + dcr = &dcr_env->dcrn[dcrn]; + if (dcr->opaque != NULL || + dcr->dcr_read != NULL || + dcr->dcr_write != NULL) + return -1; + dcr->opaque = opaque; + dcr->dcr_read = dcr_read; + dcr->dcr_write = dcr_write; + + return 0; +} + +int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn), + int (*write_error)(int dcrn)) +{ + ppc_dcr_t *dcr_env; + + dcr_env = g_malloc0(sizeof(ppc_dcr_t)); + dcr_env->read_error = read_error; + dcr_env->write_error = write_error; + env->dcr_env = dcr_env; + + return 0; +} + +/*****************************************************************************/ + +int ppc_cpu_pir(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + return env->spr_cb[SPR_PIR].default_value; +} + +PowerPCCPU *ppc_get_vcpu_by_pir(int pir) +{ + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + if (ppc_cpu_pir(cpu) == pir) { + return cpu; + } + } + + return NULL; +} + +void ppc_irq_reset(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + env->irq_input_state = 0; + kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0); +} diff --git a/hw/ppc/ppc405.h b/hw/ppc/ppc405.h new file mode 100644 index 000000000..c58f73988 --- /dev/null +++ b/hw/ppc/ppc405.h @@ -0,0 +1,72 @@ +/* + * QEMU PowerPC 405 shared definitions + * + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef PPC405_H +#define PPC405_H + +#include "hw/ppc/ppc4xx.h" + +/* Bootinfo as set-up by u-boot */ +typedef struct ppc4xx_bd_info_t ppc4xx_bd_info_t; +struct ppc4xx_bd_info_t { + uint32_t bi_memstart; + uint32_t bi_memsize; + uint32_t bi_flashstart; + uint32_t bi_flashsize; + uint32_t bi_flashoffset; /* 0x10 */ + uint32_t bi_sramstart; + uint32_t bi_sramsize; + uint32_t bi_bootflags; + uint32_t bi_ipaddr; /* 0x20 */ + uint8_t bi_enetaddr[6]; + uint16_t bi_ethspeed; + uint32_t bi_intfreq; + uint32_t bi_busfreq; /* 0x30 */ + uint32_t bi_baudrate; + uint8_t bi_s_version[4]; + uint8_t bi_r_version[32]; + uint32_t bi_procfreq; + uint32_t bi_plb_busfreq; + uint32_t bi_pci_busfreq; + uint8_t bi_pci_enetaddr[6]; + uint32_t bi_pci_enetaddr2[6]; + uint32_t bi_opbfreq; + uint32_t bi_iic_fast[2]; +}; + +/* PowerPC 405 core */ +ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd, + uint32_t flags); + +void ppc4xx_plb_init(CPUPPCState *env); +void ppc405_ebc_init(CPUPPCState *env); + +CPUPPCState *ppc405ep_init(MemoryRegion *address_space_mem, + MemoryRegion ram_memories[2], + hwaddr ram_bases[2], + hwaddr ram_sizes[2], + uint32_t sysclk, DeviceState **uicdev, + int do_init); + +#endif /* PPC405_H */ diff --git a/hw/ppc/ppc405_boards.c b/hw/ppc/ppc405_boards.c new file mode 100644 index 000000000..972a7a4a3 --- /dev/null +++ b/hw/ppc/ppc405_boards.c @@ -0,0 +1,564 @@ +/* + * QEMU PowerPC 405 evaluation boards emulation + * + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "cpu.h" +#include "hw/ppc/ppc.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "ppc405.h" +#include "hw/rtc/m48t59.h" +#include "hw/block/flash.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" +#include "sysemu/block-backend.h" +#include "hw/boards.h" +#include "qemu/error-report.h" +#include "hw/loader.h" +#include "qemu/cutils.h" + +#define BIOS_FILENAME "ppc405_rom.bin" +#define BIOS_SIZE (2 * MiB) + +#define KERNEL_LOAD_ADDR 0x00000000 +#define INITRD_LOAD_ADDR 0x01800000 + +#define USE_FLASH_BIOS + +/*****************************************************************************/ +/* PPC405EP reference board (IBM) */ +/* Standalone board with: + * - PowerPC 405EP CPU + * - SDRAM (0x00000000) + * - Flash (0xFFF80000) + * - SRAM (0xFFF00000) + * - NVRAM (0xF0000000) + * - FPGA (0xF0300000) + */ +typedef struct ref405ep_fpga_t ref405ep_fpga_t; +struct ref405ep_fpga_t { + uint8_t reg0; + uint8_t reg1; +}; + +static uint64_t ref405ep_fpga_readb(void *opaque, hwaddr addr, unsigned size) +{ + ref405ep_fpga_t *fpga; + uint32_t ret; + + fpga = opaque; + switch (addr) { + case 0x0: + ret = fpga->reg0; + break; + case 0x1: + ret = fpga->reg1; + break; + default: + ret = 0; + break; + } + + return ret; +} + +static void ref405ep_fpga_writeb(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + ref405ep_fpga_t *fpga; + + fpga = opaque; + switch (addr) { + case 0x0: + /* Read only */ + break; + case 0x1: + fpga->reg1 = value; + break; + default: + break; + } +} + +static const MemoryRegionOps ref405ep_fpga_ops = { + .read = ref405ep_fpga_readb, + .write = ref405ep_fpga_writeb, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void ref405ep_fpga_reset (void *opaque) +{ + ref405ep_fpga_t *fpga; + + fpga = opaque; + fpga->reg0 = 0x00; + fpga->reg1 = 0x0F; +} + +static void ref405ep_fpga_init(MemoryRegion *sysmem, uint32_t base) +{ + ref405ep_fpga_t *fpga; + MemoryRegion *fpga_memory = g_new(MemoryRegion, 1); + + fpga = g_malloc0(sizeof(ref405ep_fpga_t)); + memory_region_init_io(fpga_memory, NULL, &ref405ep_fpga_ops, fpga, + "fpga", 0x00000100); + memory_region_add_subregion(sysmem, base, fpga_memory); + qemu_register_reset(&ref405ep_fpga_reset, fpga); +} + +static void ref405ep_init(MachineState *machine) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + const char *bios_name = machine->firmware ?: BIOS_FILENAME; + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *initrd_filename = machine->initrd_filename; + char *filename; + ppc4xx_bd_info_t bd; + CPUPPCState *env; + DeviceState *dev; + SysBusDevice *s; + MemoryRegion *bios; + MemoryRegion *sram = g_new(MemoryRegion, 1); + ram_addr_t bdloc; + MemoryRegion *ram_memories = g_new(MemoryRegion, 2); + hwaddr ram_bases[2], ram_sizes[2]; + target_ulong sram_size; + long bios_size; + //int phy_addr = 0; + //static int phy_addr = 1; + target_ulong kernel_base, initrd_base; + long kernel_size, initrd_size; + int linux_boot; + int len; + DriveInfo *dinfo; + MemoryRegion *sysmem = get_system_memory(); + DeviceState *uicdev; + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + /* XXX: fix this */ + memory_region_init_alias(&ram_memories[0], NULL, "ef405ep.ram.alias", + machine->ram, 0, machine->ram_size); + ram_bases[0] = 0; + ram_sizes[0] = machine->ram_size; + memory_region_init(&ram_memories[1], NULL, "ef405ep.ram1", 0); + ram_bases[1] = 0x00000000; + ram_sizes[1] = 0x00000000; + env = ppc405ep_init(sysmem, ram_memories, ram_bases, ram_sizes, + 33333333, &uicdev, kernel_filename == NULL ? 0 : 1); + /* allocate SRAM */ + sram_size = 512 * KiB; + memory_region_init_ram(sram, NULL, "ef405ep.sram", sram_size, + &error_fatal); + memory_region_add_subregion(sysmem, 0xFFF00000, sram); + /* allocate and load BIOS */ +#ifdef USE_FLASH_BIOS + dinfo = drive_get(IF_PFLASH, 0, 0); + if (dinfo) { + bios_size = 8 * MiB; + pflash_cfi02_register((uint32_t)(-bios_size), + "ef405ep.bios", bios_size, + blk_by_legacy_dinfo(dinfo), + 64 * KiB, 1, + 2, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA, + 1); + } else +#endif + { + bios = g_new(MemoryRegion, 1); + memory_region_init_rom(bios, NULL, "ef405ep.bios", BIOS_SIZE, + &error_fatal); + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (filename) { + bios_size = load_image_size(filename, + memory_region_get_ram_ptr(bios), + BIOS_SIZE); + g_free(filename); + if (bios_size < 0) { + error_report("Could not load PowerPC BIOS '%s'", bios_name); + exit(1); + } + bios_size = (bios_size + 0xfff) & ~0xfff; + memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios); + } else if (!qtest_enabled() || kernel_filename != NULL) { + error_report("Could not load PowerPC BIOS '%s'", bios_name); + exit(1); + } else { + /* Avoid an uninitialized variable warning */ + bios_size = -1; + } + } + /* Register FPGA */ + ref405ep_fpga_init(sysmem, 0xF0300000); + /* Register NVRAM */ + dev = qdev_new("sysbus-m48t08"); + qdev_prop_set_int32(dev, "base-year", 1968); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, 0xF0000000); + /* Load kernel */ + linux_boot = (kernel_filename != NULL); + if (linux_boot) { + memset(&bd, 0, sizeof(bd)); + bd.bi_memstart = 0x00000000; + bd.bi_memsize = machine->ram_size; + bd.bi_flashstart = -bios_size; + bd.bi_flashsize = -bios_size; + bd.bi_flashoffset = 0; + bd.bi_sramstart = 0xFFF00000; + bd.bi_sramsize = sram_size; + bd.bi_bootflags = 0; + bd.bi_intfreq = 133333333; + bd.bi_busfreq = 33333333; + bd.bi_baudrate = 115200; + bd.bi_s_version[0] = 'Q'; + bd.bi_s_version[1] = 'M'; + bd.bi_s_version[2] = 'U'; + bd.bi_s_version[3] = '\0'; + bd.bi_r_version[0] = 'Q'; + bd.bi_r_version[1] = 'E'; + bd.bi_r_version[2] = 'M'; + bd.bi_r_version[3] = 'U'; + bd.bi_r_version[4] = '\0'; + bd.bi_procfreq = 133333333; + bd.bi_plb_busfreq = 33333333; + bd.bi_pci_busfreq = 33333333; + bd.bi_opbfreq = 33333333; + bdloc = ppc405_set_bootinfo(env, &bd, 0x00000001); + env->gpr[3] = bdloc; + kernel_base = KERNEL_LOAD_ADDR; + /* now we can load the kernel */ + kernel_size = load_image_targphys(kernel_filename, kernel_base, + machine->ram_size - kernel_base); + if (kernel_size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + printf("Load kernel size %ld at " TARGET_FMT_lx, + kernel_size, kernel_base); + /* load initrd */ + if (initrd_filename) { + initrd_base = INITRD_LOAD_ADDR; + initrd_size = load_image_targphys(initrd_filename, initrd_base, + machine->ram_size - initrd_base); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + initrd_filename); + exit(1); + } + } else { + initrd_base = 0; + initrd_size = 0; + } + env->gpr[4] = initrd_base; + env->gpr[5] = initrd_size; + if (kernel_cmdline != NULL) { + len = strlen(kernel_cmdline); + bdloc -= ((len + 255) & ~255); + cpu_physical_memory_write(bdloc, kernel_cmdline, len + 1); + env->gpr[6] = bdloc; + env->gpr[7] = bdloc + len; + } else { + env->gpr[6] = 0; + env->gpr[7] = 0; + } + env->nip = KERNEL_LOAD_ADDR; + } else { + kernel_base = 0; + kernel_size = 0; + initrd_base = 0; + initrd_size = 0; + bdloc = 0; + } +} + +static void ref405ep_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "ref405ep"; + mc->init = ref405ep_init; + mc->default_ram_size = 0x08000000; + mc->default_ram_id = "ef405ep.ram"; +} + +static const TypeInfo ref405ep_type = { + .name = MACHINE_TYPE_NAME("ref405ep"), + .parent = TYPE_MACHINE, + .class_init = ref405ep_class_init, +}; + +/*****************************************************************************/ +/* AMCC Taihu evaluation board */ +/* - PowerPC 405EP processor + * - SDRAM 128 MB at 0x00000000 + * - Boot flash 2 MB at 0xFFE00000 + * - Application flash 32 MB at 0xFC000000 + * - 2 serial ports + * - 2 ethernet PHY + * - 1 USB 1.1 device 0x50000000 + * - 1 LCD display 0x50100000 + * - 1 CPLD 0x50100000 + * - 1 I2C EEPROM + * - 1 I2C thermal sensor + * - a set of LEDs + * - bit-bang SPI port using GPIOs + * - 1 EBC interface connector 0 0x50200000 + * - 1 cardbus controller + expansion slot. + * - 1 PCI expansion slot. + */ +typedef struct taihu_cpld_t taihu_cpld_t; +struct taihu_cpld_t { + uint8_t reg0; + uint8_t reg1; +}; + +static uint64_t taihu_cpld_read(void *opaque, hwaddr addr, unsigned size) +{ + taihu_cpld_t *cpld; + uint32_t ret; + + cpld = opaque; + switch (addr) { + case 0x0: + ret = cpld->reg0; + break; + case 0x1: + ret = cpld->reg1; + break; + default: + ret = 0; + break; + } + + return ret; +} + +static void taihu_cpld_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + taihu_cpld_t *cpld; + + cpld = opaque; + switch (addr) { + case 0x0: + /* Read only */ + break; + case 0x1: + cpld->reg1 = value; + break; + default: + break; + } +} + +static const MemoryRegionOps taihu_cpld_ops = { + .read = taihu_cpld_read, + .write = taihu_cpld_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void taihu_cpld_reset (void *opaque) +{ + taihu_cpld_t *cpld; + + cpld = opaque; + cpld->reg0 = 0x01; + cpld->reg1 = 0x80; +} + +static void taihu_cpld_init(MemoryRegion *sysmem, uint32_t base) +{ + taihu_cpld_t *cpld; + MemoryRegion *cpld_memory = g_new(MemoryRegion, 1); + + cpld = g_malloc0(sizeof(taihu_cpld_t)); + memory_region_init_io(cpld_memory, NULL, &taihu_cpld_ops, cpld, "cpld", 0x100); + memory_region_add_subregion(sysmem, base, cpld_memory); + qemu_register_reset(&taihu_cpld_reset, cpld); +} + +static void taihu_405ep_init(MachineState *machine) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + const char *bios_name = machine->firmware ?: BIOS_FILENAME; + const char *kernel_filename = machine->kernel_filename; + const char *initrd_filename = machine->initrd_filename; + char *filename; + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *bios; + MemoryRegion *ram_memories = g_new(MemoryRegion, 2); + hwaddr ram_bases[2], ram_sizes[2]; + long bios_size; + target_ulong kernel_base, initrd_base; + long kernel_size, initrd_size; + int linux_boot; + int fl_idx; + DriveInfo *dinfo; + DeviceState *uicdev; + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + ram_bases[0] = 0; + ram_sizes[0] = 0x04000000; + memory_region_init_alias(&ram_memories[0], NULL, + "taihu_405ep.ram-0", machine->ram, ram_bases[0], + ram_sizes[0]); + ram_bases[1] = 0x04000000; + ram_sizes[1] = 0x04000000; + memory_region_init_alias(&ram_memories[1], NULL, + "taihu_405ep.ram-1", machine->ram, ram_bases[1], + ram_sizes[1]); + ppc405ep_init(sysmem, ram_memories, ram_bases, ram_sizes, + 33333333, &uicdev, kernel_filename == NULL ? 0 : 1); + /* allocate and load BIOS */ + fl_idx = 0; +#if defined(USE_FLASH_BIOS) + dinfo = drive_get(IF_PFLASH, 0, fl_idx); + if (dinfo) { + bios_size = 2 * MiB; + pflash_cfi02_register(0xFFE00000, + "taihu_405ep.bios", bios_size, + blk_by_legacy_dinfo(dinfo), + 64 * KiB, 1, + 4, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA, + 1); + fl_idx++; + } else +#endif + { + bios = g_new(MemoryRegion, 1); + memory_region_init_rom(bios, NULL, "taihu_405ep.bios", BIOS_SIZE, + &error_fatal); + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (filename) { + bios_size = load_image_size(filename, + memory_region_get_ram_ptr(bios), + BIOS_SIZE); + g_free(filename); + if (bios_size < 0) { + error_report("Could not load PowerPC BIOS '%s'", bios_name); + exit(1); + } + bios_size = (bios_size + 0xfff) & ~0xfff; + memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios); + } else if (!qtest_enabled()) { + error_report("Could not load PowerPC BIOS '%s'", bios_name); + exit(1); + } + } + /* Register Linux flash */ + dinfo = drive_get(IF_PFLASH, 0, fl_idx); + if (dinfo) { + bios_size = 32 * MiB; + pflash_cfi02_register(0xfc000000, "taihu_405ep.flash", bios_size, + blk_by_legacy_dinfo(dinfo), + 64 * KiB, 1, + 4, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA, + 1); + fl_idx++; + } + /* Register CLPD & LCD display */ + taihu_cpld_init(sysmem, 0x50100000); + /* Load kernel */ + linux_boot = (kernel_filename != NULL); + if (linux_boot) { + kernel_base = KERNEL_LOAD_ADDR; + /* now we can load the kernel */ + kernel_size = load_image_targphys(kernel_filename, kernel_base, + machine->ram_size - kernel_base); + if (kernel_size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + /* load initrd */ + if (initrd_filename) { + initrd_base = INITRD_LOAD_ADDR; + initrd_size = load_image_targphys(initrd_filename, initrd_base, + machine->ram_size - initrd_base); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + initrd_filename); + exit(1); + } + } else { + initrd_base = 0; + initrd_size = 0; + } + } else { + kernel_base = 0; + kernel_size = 0; + initrd_base = 0; + initrd_size = 0; + } +} + +static void taihu_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "taihu"; + mc->init = taihu_405ep_init; + mc->default_ram_size = 0x08000000; + mc->default_ram_id = "taihu_405ep.ram"; +} + +static const TypeInfo taihu_type = { + .name = MACHINE_TYPE_NAME("taihu"), + .parent = TYPE_MACHINE, + .class_init = taihu_class_init, +}; + +static void ppc405_machine_init(void) +{ + type_register_static(&ref405ep_type); + type_register_static(&taihu_type); +} + +type_init(ppc405_machine_init) diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c new file mode 100644 index 000000000..e632c408b --- /dev/null +++ b/hw/ppc/ppc405_uc.c @@ -0,0 +1,1547 @@ +/* + * QEMU PowerPC 405 embedded processors emulation + * + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/ppc/ppc.h" +#include "hw/i2c/ppc4xx_i2c.h" +#include "hw/irq.h" +#include "ppc405.h" +#include "hw/char/serial.h" +#include "qemu/timer.h" +#include "sysemu/reset.h" +#include "sysemu/sysemu.h" +#include "exec/address-spaces.h" +#include "hw/intc/ppc-uic.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" + +//#define DEBUG_OPBA +//#define DEBUG_SDRAM +//#define DEBUG_GPIO +//#define DEBUG_SERIAL +//#define DEBUG_OCM +//#define DEBUG_GPT +//#define DEBUG_CLOCKS +//#define DEBUG_CLOCKS_LL + +ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd, + uint32_t flags) +{ + CPUState *cs = env_cpu(env); + ram_addr_t bdloc; + int i, n; + + /* We put the bd structure at the top of memory */ + if (bd->bi_memsize >= 0x01000000UL) + bdloc = 0x01000000UL - sizeof(struct ppc4xx_bd_info_t); + else + bdloc = bd->bi_memsize - sizeof(struct ppc4xx_bd_info_t); + stl_be_phys(cs->as, bdloc + 0x00, bd->bi_memstart); + stl_be_phys(cs->as, bdloc + 0x04, bd->bi_memsize); + stl_be_phys(cs->as, bdloc + 0x08, bd->bi_flashstart); + stl_be_phys(cs->as, bdloc + 0x0C, bd->bi_flashsize); + stl_be_phys(cs->as, bdloc + 0x10, bd->bi_flashoffset); + stl_be_phys(cs->as, bdloc + 0x14, bd->bi_sramstart); + stl_be_phys(cs->as, bdloc + 0x18, bd->bi_sramsize); + stl_be_phys(cs->as, bdloc + 0x1C, bd->bi_bootflags); + stl_be_phys(cs->as, bdloc + 0x20, bd->bi_ipaddr); + for (i = 0; i < 6; i++) { + stb_phys(cs->as, bdloc + 0x24 + i, bd->bi_enetaddr[i]); + } + stw_be_phys(cs->as, bdloc + 0x2A, bd->bi_ethspeed); + stl_be_phys(cs->as, bdloc + 0x2C, bd->bi_intfreq); + stl_be_phys(cs->as, bdloc + 0x30, bd->bi_busfreq); + stl_be_phys(cs->as, bdloc + 0x34, bd->bi_baudrate); + for (i = 0; i < 4; i++) { + stb_phys(cs->as, bdloc + 0x38 + i, bd->bi_s_version[i]); + } + for (i = 0; i < 32; i++) { + stb_phys(cs->as, bdloc + 0x3C + i, bd->bi_r_version[i]); + } + stl_be_phys(cs->as, bdloc + 0x5C, bd->bi_plb_busfreq); + stl_be_phys(cs->as, bdloc + 0x60, bd->bi_pci_busfreq); + for (i = 0; i < 6; i++) { + stb_phys(cs->as, bdloc + 0x64 + i, bd->bi_pci_enetaddr[i]); + } + n = 0x6A; + if (flags & 0x00000001) { + for (i = 0; i < 6; i++) + stb_phys(cs->as, bdloc + n++, bd->bi_pci_enetaddr2[i]); + } + stl_be_phys(cs->as, bdloc + n, bd->bi_opbfreq); + n += 4; + for (i = 0; i < 2; i++) { + stl_be_phys(cs->as, bdloc + n, bd->bi_iic_fast[i]); + n += 4; + } + + return bdloc; +} + +/*****************************************************************************/ +/* Shared peripherals */ + +/*****************************************************************************/ +/* Peripheral local bus arbitrer */ +enum { + PLB3A0_ACR = 0x077, + PLB4A0_ACR = 0x081, + PLB0_BESR = 0x084, + PLB0_BEAR = 0x086, + PLB0_ACR = 0x087, + PLB4A1_ACR = 0x089, +}; + +typedef struct ppc4xx_plb_t ppc4xx_plb_t; +struct ppc4xx_plb_t { + uint32_t acr; + uint32_t bear; + uint32_t besr; +}; + +static uint32_t dcr_read_plb (void *opaque, int dcrn) +{ + ppc4xx_plb_t *plb; + uint32_t ret; + + plb = opaque; + switch (dcrn) { + case PLB0_ACR: + ret = plb->acr; + break; + case PLB0_BEAR: + ret = plb->bear; + break; + case PLB0_BESR: + ret = plb->besr; + break; + default: + /* Avoid gcc warning */ + ret = 0; + break; + } + + return ret; +} + +static void dcr_write_plb (void *opaque, int dcrn, uint32_t val) +{ + ppc4xx_plb_t *plb; + + plb = opaque; + switch (dcrn) { + case PLB0_ACR: + /* We don't care about the actual parameters written as + * we don't manage any priorities on the bus + */ + plb->acr = val & 0xF8000000; + break; + case PLB0_BEAR: + /* Read only */ + break; + case PLB0_BESR: + /* Write-clear */ + plb->besr &= ~val; + break; + } +} + +static void ppc4xx_plb_reset (void *opaque) +{ + ppc4xx_plb_t *plb; + + plb = opaque; + plb->acr = 0x00000000; + plb->bear = 0x00000000; + plb->besr = 0x00000000; +} + +void ppc4xx_plb_init(CPUPPCState *env) +{ + ppc4xx_plb_t *plb; + + plb = g_malloc0(sizeof(ppc4xx_plb_t)); + ppc_dcr_register(env, PLB3A0_ACR, plb, &dcr_read_plb, &dcr_write_plb); + ppc_dcr_register(env, PLB4A0_ACR, plb, &dcr_read_plb, &dcr_write_plb); + ppc_dcr_register(env, PLB0_ACR, plb, &dcr_read_plb, &dcr_write_plb); + ppc_dcr_register(env, PLB0_BEAR, plb, &dcr_read_plb, &dcr_write_plb); + ppc_dcr_register(env, PLB0_BESR, plb, &dcr_read_plb, &dcr_write_plb); + ppc_dcr_register(env, PLB4A1_ACR, plb, &dcr_read_plb, &dcr_write_plb); + qemu_register_reset(ppc4xx_plb_reset, plb); +} + +/*****************************************************************************/ +/* PLB to OPB bridge */ +enum { + POB0_BESR0 = 0x0A0, + POB0_BESR1 = 0x0A2, + POB0_BEAR = 0x0A4, +}; + +typedef struct ppc4xx_pob_t ppc4xx_pob_t; +struct ppc4xx_pob_t { + uint32_t bear; + uint32_t besr0; + uint32_t besr1; +}; + +static uint32_t dcr_read_pob (void *opaque, int dcrn) +{ + ppc4xx_pob_t *pob; + uint32_t ret; + + pob = opaque; + switch (dcrn) { + case POB0_BEAR: + ret = pob->bear; + break; + case POB0_BESR0: + ret = pob->besr0; + break; + case POB0_BESR1: + ret = pob->besr1; + break; + default: + /* Avoid gcc warning */ + ret = 0; + break; + } + + return ret; +} + +static void dcr_write_pob (void *opaque, int dcrn, uint32_t val) +{ + ppc4xx_pob_t *pob; + + pob = opaque; + switch (dcrn) { + case POB0_BEAR: + /* Read only */ + break; + case POB0_BESR0: + /* Write-clear */ + pob->besr0 &= ~val; + break; + case POB0_BESR1: + /* Write-clear */ + pob->besr1 &= ~val; + break; + } +} + +static void ppc4xx_pob_reset (void *opaque) +{ + ppc4xx_pob_t *pob; + + pob = opaque; + /* No error */ + pob->bear = 0x00000000; + pob->besr0 = 0x0000000; + pob->besr1 = 0x0000000; +} + +static void ppc4xx_pob_init(CPUPPCState *env) +{ + ppc4xx_pob_t *pob; + + pob = g_malloc0(sizeof(ppc4xx_pob_t)); + ppc_dcr_register(env, POB0_BEAR, pob, &dcr_read_pob, &dcr_write_pob); + ppc_dcr_register(env, POB0_BESR0, pob, &dcr_read_pob, &dcr_write_pob); + ppc_dcr_register(env, POB0_BESR1, pob, &dcr_read_pob, &dcr_write_pob); + qemu_register_reset(ppc4xx_pob_reset, pob); +} + +/*****************************************************************************/ +/* OPB arbitrer */ +typedef struct ppc4xx_opba_t ppc4xx_opba_t; +struct ppc4xx_opba_t { + MemoryRegion io; + uint8_t cr; + uint8_t pr; +}; + +static uint64_t opba_readb(void *opaque, hwaddr addr, unsigned size) +{ + ppc4xx_opba_t *opba; + uint32_t ret; + +#ifdef DEBUG_OPBA + printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); +#endif + opba = opaque; + switch (addr) { + case 0x00: + ret = opba->cr; + break; + case 0x01: + ret = opba->pr; + break; + default: + ret = 0x00; + break; + } + + return ret; +} + +static void opba_writeb(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + ppc4xx_opba_t *opba; + +#ifdef DEBUG_OPBA + printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr, + value); +#endif + opba = opaque; + switch (addr) { + case 0x00: + opba->cr = value & 0xF8; + break; + case 0x01: + opba->pr = value & 0xFF; + break; + default: + break; + } +} +static const MemoryRegionOps opba_ops = { + .read = opba_readb, + .write = opba_writeb, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void ppc4xx_opba_reset (void *opaque) +{ + ppc4xx_opba_t *opba; + + opba = opaque; + opba->cr = 0x00; /* No dynamic priorities - park disabled */ + opba->pr = 0x11; +} + +static void ppc4xx_opba_init(hwaddr base) +{ + ppc4xx_opba_t *opba; + + opba = g_malloc0(sizeof(ppc4xx_opba_t)); +#ifdef DEBUG_OPBA + printf("%s: offset " TARGET_FMT_plx "\n", __func__, base); +#endif + memory_region_init_io(&opba->io, NULL, &opba_ops, opba, "opba", 0x002); + memory_region_add_subregion(get_system_memory(), base, &opba->io); + qemu_register_reset(ppc4xx_opba_reset, opba); +} + +/*****************************************************************************/ +/* Code decompression controller */ +/* XXX: TODO */ + +/*****************************************************************************/ +/* Peripheral controller */ +typedef struct ppc4xx_ebc_t ppc4xx_ebc_t; +struct ppc4xx_ebc_t { + uint32_t addr; + uint32_t bcr[8]; + uint32_t bap[8]; + uint32_t bear; + uint32_t besr0; + uint32_t besr1; + uint32_t cfg; +}; + +enum { + EBC0_CFGADDR = 0x012, + EBC0_CFGDATA = 0x013, +}; + +static uint32_t dcr_read_ebc (void *opaque, int dcrn) +{ + ppc4xx_ebc_t *ebc; + uint32_t ret; + + ebc = opaque; + switch (dcrn) { + case EBC0_CFGADDR: + ret = ebc->addr; + break; + case EBC0_CFGDATA: + switch (ebc->addr) { + case 0x00: /* B0CR */ + ret = ebc->bcr[0]; + break; + case 0x01: /* B1CR */ + ret = ebc->bcr[1]; + break; + case 0x02: /* B2CR */ + ret = ebc->bcr[2]; + break; + case 0x03: /* B3CR */ + ret = ebc->bcr[3]; + break; + case 0x04: /* B4CR */ + ret = ebc->bcr[4]; + break; + case 0x05: /* B5CR */ + ret = ebc->bcr[5]; + break; + case 0x06: /* B6CR */ + ret = ebc->bcr[6]; + break; + case 0x07: /* B7CR */ + ret = ebc->bcr[7]; + break; + case 0x10: /* B0AP */ + ret = ebc->bap[0]; + break; + case 0x11: /* B1AP */ + ret = ebc->bap[1]; + break; + case 0x12: /* B2AP */ + ret = ebc->bap[2]; + break; + case 0x13: /* B3AP */ + ret = ebc->bap[3]; + break; + case 0x14: /* B4AP */ + ret = ebc->bap[4]; + break; + case 0x15: /* B5AP */ + ret = ebc->bap[5]; + break; + case 0x16: /* B6AP */ + ret = ebc->bap[6]; + break; + case 0x17: /* B7AP */ + ret = ebc->bap[7]; + break; + case 0x20: /* BEAR */ + ret = ebc->bear; + break; + case 0x21: /* BESR0 */ + ret = ebc->besr0; + break; + case 0x22: /* BESR1 */ + ret = ebc->besr1; + break; + case 0x23: /* CFG */ + ret = ebc->cfg; + break; + default: + ret = 0x00000000; + break; + } + break; + default: + ret = 0x00000000; + break; + } + + return ret; +} + +static void dcr_write_ebc (void *opaque, int dcrn, uint32_t val) +{ + ppc4xx_ebc_t *ebc; + + ebc = opaque; + switch (dcrn) { + case EBC0_CFGADDR: + ebc->addr = val; + break; + case EBC0_CFGDATA: + switch (ebc->addr) { + case 0x00: /* B0CR */ + break; + case 0x01: /* B1CR */ + break; + case 0x02: /* B2CR */ + break; + case 0x03: /* B3CR */ + break; + case 0x04: /* B4CR */ + break; + case 0x05: /* B5CR */ + break; + case 0x06: /* B6CR */ + break; + case 0x07: /* B7CR */ + break; + case 0x10: /* B0AP */ + break; + case 0x11: /* B1AP */ + break; + case 0x12: /* B2AP */ + break; + case 0x13: /* B3AP */ + break; + case 0x14: /* B4AP */ + break; + case 0x15: /* B5AP */ + break; + case 0x16: /* B6AP */ + break; + case 0x17: /* B7AP */ + break; + case 0x20: /* BEAR */ + break; + case 0x21: /* BESR0 */ + break; + case 0x22: /* BESR1 */ + break; + case 0x23: /* CFG */ + break; + default: + break; + } + break; + default: + break; + } +} + +static void ebc_reset (void *opaque) +{ + ppc4xx_ebc_t *ebc; + int i; + + ebc = opaque; + ebc->addr = 0x00000000; + ebc->bap[0] = 0x7F8FFE80; + ebc->bcr[0] = 0xFFE28000; + for (i = 0; i < 8; i++) { + ebc->bap[i] = 0x00000000; + ebc->bcr[i] = 0x00000000; + } + ebc->besr0 = 0x00000000; + ebc->besr1 = 0x00000000; + ebc->cfg = 0x80400000; +} + +void ppc405_ebc_init(CPUPPCState *env) +{ + ppc4xx_ebc_t *ebc; + + ebc = g_malloc0(sizeof(ppc4xx_ebc_t)); + qemu_register_reset(&ebc_reset, ebc); + ppc_dcr_register(env, EBC0_CFGADDR, + ebc, &dcr_read_ebc, &dcr_write_ebc); + ppc_dcr_register(env, EBC0_CFGDATA, + ebc, &dcr_read_ebc, &dcr_write_ebc); +} + +/*****************************************************************************/ +/* DMA controller */ +enum { + DMA0_CR0 = 0x100, + DMA0_CT0 = 0x101, + DMA0_DA0 = 0x102, + DMA0_SA0 = 0x103, + DMA0_SG0 = 0x104, + DMA0_CR1 = 0x108, + DMA0_CT1 = 0x109, + DMA0_DA1 = 0x10A, + DMA0_SA1 = 0x10B, + DMA0_SG1 = 0x10C, + DMA0_CR2 = 0x110, + DMA0_CT2 = 0x111, + DMA0_DA2 = 0x112, + DMA0_SA2 = 0x113, + DMA0_SG2 = 0x114, + DMA0_CR3 = 0x118, + DMA0_CT3 = 0x119, + DMA0_DA3 = 0x11A, + DMA0_SA3 = 0x11B, + DMA0_SG3 = 0x11C, + DMA0_SR = 0x120, + DMA0_SGC = 0x123, + DMA0_SLP = 0x125, + DMA0_POL = 0x126, +}; + +typedef struct ppc405_dma_t ppc405_dma_t; +struct ppc405_dma_t { + qemu_irq irqs[4]; + uint32_t cr[4]; + uint32_t ct[4]; + uint32_t da[4]; + uint32_t sa[4]; + uint32_t sg[4]; + uint32_t sr; + uint32_t sgc; + uint32_t slp; + uint32_t pol; +}; + +static uint32_t dcr_read_dma (void *opaque, int dcrn) +{ + return 0; +} + +static void dcr_write_dma (void *opaque, int dcrn, uint32_t val) +{ +} + +static void ppc405_dma_reset (void *opaque) +{ + ppc405_dma_t *dma; + int i; + + dma = opaque; + for (i = 0; i < 4; i++) { + dma->cr[i] = 0x00000000; + dma->ct[i] = 0x00000000; + dma->da[i] = 0x00000000; + dma->sa[i] = 0x00000000; + dma->sg[i] = 0x00000000; + } + dma->sr = 0x00000000; + dma->sgc = 0x00000000; + dma->slp = 0x7C000000; + dma->pol = 0x00000000; +} + +static void ppc405_dma_init(CPUPPCState *env, qemu_irq irqs[4]) +{ + ppc405_dma_t *dma; + + dma = g_malloc0(sizeof(ppc405_dma_t)); + memcpy(dma->irqs, irqs, 4 * sizeof(qemu_irq)); + qemu_register_reset(&ppc405_dma_reset, dma); + ppc_dcr_register(env, DMA0_CR0, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_CT0, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_DA0, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SA0, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SG0, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_CR1, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_CT1, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_DA1, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SA1, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SG1, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_CR2, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_CT2, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_DA2, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SA2, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SG2, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_CR3, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_CT3, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_DA3, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SA3, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SG3, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SR, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SGC, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_SLP, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, DMA0_POL, + dma, &dcr_read_dma, &dcr_write_dma); +} + +/*****************************************************************************/ +/* GPIO */ +typedef struct ppc405_gpio_t ppc405_gpio_t; +struct ppc405_gpio_t { + MemoryRegion io; + uint32_t or; + uint32_t tcr; + uint32_t osrh; + uint32_t osrl; + uint32_t tsrh; + uint32_t tsrl; + uint32_t odr; + uint32_t ir; + uint32_t rr1; + uint32_t isr1h; + uint32_t isr1l; +}; + +static uint64_t ppc405_gpio_read(void *opaque, hwaddr addr, unsigned size) +{ +#ifdef DEBUG_GPIO + printf("%s: addr " TARGET_FMT_plx " size %d\n", __func__, addr, size); +#endif + + return 0; +} + +static void ppc405_gpio_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ +#ifdef DEBUG_GPIO + printf("%s: addr " TARGET_FMT_plx " size %d val %08" PRIx32 "\n", + __func__, addr, size, value); +#endif +} + +static const MemoryRegionOps ppc405_gpio_ops = { + .read = ppc405_gpio_read, + .write = ppc405_gpio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void ppc405_gpio_reset (void *opaque) +{ +} + +static void ppc405_gpio_init(hwaddr base) +{ + ppc405_gpio_t *gpio; + + gpio = g_malloc0(sizeof(ppc405_gpio_t)); +#ifdef DEBUG_GPIO + printf("%s: offset " TARGET_FMT_plx "\n", __func__, base); +#endif + memory_region_init_io(&gpio->io, NULL, &ppc405_gpio_ops, gpio, "pgio", 0x038); + memory_region_add_subregion(get_system_memory(), base, &gpio->io); + qemu_register_reset(&ppc405_gpio_reset, gpio); +} + +/*****************************************************************************/ +/* On Chip Memory */ +enum { + OCM0_ISARC = 0x018, + OCM0_ISACNTL = 0x019, + OCM0_DSARC = 0x01A, + OCM0_DSACNTL = 0x01B, +}; + +typedef struct ppc405_ocm_t ppc405_ocm_t; +struct ppc405_ocm_t { + MemoryRegion ram; + MemoryRegion isarc_ram; + MemoryRegion dsarc_ram; + uint32_t isarc; + uint32_t isacntl; + uint32_t dsarc; + uint32_t dsacntl; +}; + +static void ocm_update_mappings (ppc405_ocm_t *ocm, + uint32_t isarc, uint32_t isacntl, + uint32_t dsarc, uint32_t dsacntl) +{ +#ifdef DEBUG_OCM + printf("OCM update ISA %08" PRIx32 " %08" PRIx32 " (%08" PRIx32 + " %08" PRIx32 ") DSA %08" PRIx32 " %08" PRIx32 + " (%08" PRIx32 " %08" PRIx32 ")\n", + isarc, isacntl, dsarc, dsacntl, + ocm->isarc, ocm->isacntl, ocm->dsarc, ocm->dsacntl); +#endif + if (ocm->isarc != isarc || + (ocm->isacntl & 0x80000000) != (isacntl & 0x80000000)) { + if (ocm->isacntl & 0x80000000) { + /* Unmap previously assigned memory region */ + printf("OCM unmap ISA %08" PRIx32 "\n", ocm->isarc); + memory_region_del_subregion(get_system_memory(), &ocm->isarc_ram); + } + if (isacntl & 0x80000000) { + /* Map new instruction memory region */ +#ifdef DEBUG_OCM + printf("OCM map ISA %08" PRIx32 "\n", isarc); +#endif + memory_region_add_subregion(get_system_memory(), isarc, + &ocm->isarc_ram); + } + } + if (ocm->dsarc != dsarc || + (ocm->dsacntl & 0x80000000) != (dsacntl & 0x80000000)) { + if (ocm->dsacntl & 0x80000000) { + /* Beware not to unmap the region we just mapped */ + if (!(isacntl & 0x80000000) || ocm->dsarc != isarc) { + /* Unmap previously assigned memory region */ +#ifdef DEBUG_OCM + printf("OCM unmap DSA %08" PRIx32 "\n", ocm->dsarc); +#endif + memory_region_del_subregion(get_system_memory(), + &ocm->dsarc_ram); + } + } + if (dsacntl & 0x80000000) { + /* Beware not to remap the region we just mapped */ + if (!(isacntl & 0x80000000) || dsarc != isarc) { + /* Map new data memory region */ +#ifdef DEBUG_OCM + printf("OCM map DSA %08" PRIx32 "\n", dsarc); +#endif + memory_region_add_subregion(get_system_memory(), dsarc, + &ocm->dsarc_ram); + } + } + } +} + +static uint32_t dcr_read_ocm (void *opaque, int dcrn) +{ + ppc405_ocm_t *ocm; + uint32_t ret; + + ocm = opaque; + switch (dcrn) { + case OCM0_ISARC: + ret = ocm->isarc; + break; + case OCM0_ISACNTL: + ret = ocm->isacntl; + break; + case OCM0_DSARC: + ret = ocm->dsarc; + break; + case OCM0_DSACNTL: + ret = ocm->dsacntl; + break; + default: + ret = 0; + break; + } + + return ret; +} + +static void dcr_write_ocm (void *opaque, int dcrn, uint32_t val) +{ + ppc405_ocm_t *ocm; + uint32_t isarc, dsarc, isacntl, dsacntl; + + ocm = opaque; + isarc = ocm->isarc; + dsarc = ocm->dsarc; + isacntl = ocm->isacntl; + dsacntl = ocm->dsacntl; + switch (dcrn) { + case OCM0_ISARC: + isarc = val & 0xFC000000; + break; + case OCM0_ISACNTL: + isacntl = val & 0xC0000000; + break; + case OCM0_DSARC: + isarc = val & 0xFC000000; + break; + case OCM0_DSACNTL: + isacntl = val & 0xC0000000; + break; + } + ocm_update_mappings(ocm, isarc, isacntl, dsarc, dsacntl); + ocm->isarc = isarc; + ocm->dsarc = dsarc; + ocm->isacntl = isacntl; + ocm->dsacntl = dsacntl; +} + +static void ocm_reset (void *opaque) +{ + ppc405_ocm_t *ocm; + uint32_t isarc, dsarc, isacntl, dsacntl; + + ocm = opaque; + isarc = 0x00000000; + isacntl = 0x00000000; + dsarc = 0x00000000; + dsacntl = 0x00000000; + ocm_update_mappings(ocm, isarc, isacntl, dsarc, dsacntl); + ocm->isarc = isarc; + ocm->dsarc = dsarc; + ocm->isacntl = isacntl; + ocm->dsacntl = dsacntl; +} + +static void ppc405_ocm_init(CPUPPCState *env) +{ + ppc405_ocm_t *ocm; + + ocm = g_malloc0(sizeof(ppc405_ocm_t)); + /* XXX: Size is 4096 or 0x04000000 */ + memory_region_init_ram(&ocm->isarc_ram, NULL, "ppc405.ocm", 4 * KiB, + &error_fatal); + memory_region_init_alias(&ocm->dsarc_ram, NULL, "ppc405.dsarc", + &ocm->isarc_ram, 0, 4 * KiB); + qemu_register_reset(&ocm_reset, ocm); + ppc_dcr_register(env, OCM0_ISARC, + ocm, &dcr_read_ocm, &dcr_write_ocm); + ppc_dcr_register(env, OCM0_ISACNTL, + ocm, &dcr_read_ocm, &dcr_write_ocm); + ppc_dcr_register(env, OCM0_DSARC, + ocm, &dcr_read_ocm, &dcr_write_ocm); + ppc_dcr_register(env, OCM0_DSACNTL, + ocm, &dcr_read_ocm, &dcr_write_ocm); +} + +/*****************************************************************************/ +/* General purpose timers */ +typedef struct ppc4xx_gpt_t ppc4xx_gpt_t; +struct ppc4xx_gpt_t { + MemoryRegion iomem; + int64_t tb_offset; + uint32_t tb_freq; + QEMUTimer *timer; + qemu_irq irqs[5]; + uint32_t oe; + uint32_t ol; + uint32_t im; + uint32_t is; + uint32_t ie; + uint32_t comp[5]; + uint32_t mask[5]; +}; + +static int ppc4xx_gpt_compare (ppc4xx_gpt_t *gpt, int n) +{ + /* XXX: TODO */ + return 0; +} + +static void ppc4xx_gpt_set_output (ppc4xx_gpt_t *gpt, int n, int level) +{ + /* XXX: TODO */ +} + +static void ppc4xx_gpt_set_outputs (ppc4xx_gpt_t *gpt) +{ + uint32_t mask; + int i; + + mask = 0x80000000; + for (i = 0; i < 5; i++) { + if (gpt->oe & mask) { + /* Output is enabled */ + if (ppc4xx_gpt_compare(gpt, i)) { + /* Comparison is OK */ + ppc4xx_gpt_set_output(gpt, i, gpt->ol & mask); + } else { + /* Comparison is KO */ + ppc4xx_gpt_set_output(gpt, i, gpt->ol & mask ? 0 : 1); + } + } + mask = mask >> 1; + } +} + +static void ppc4xx_gpt_set_irqs (ppc4xx_gpt_t *gpt) +{ + uint32_t mask; + int i; + + mask = 0x00008000; + for (i = 0; i < 5; i++) { + if (gpt->is & gpt->im & mask) + qemu_irq_raise(gpt->irqs[i]); + else + qemu_irq_lower(gpt->irqs[i]); + mask = mask >> 1; + } +} + +static void ppc4xx_gpt_compute_timer (ppc4xx_gpt_t *gpt) +{ + /* XXX: TODO */ +} + +static uint64_t ppc4xx_gpt_read(void *opaque, hwaddr addr, unsigned size) +{ + ppc4xx_gpt_t *gpt; + uint32_t ret; + int idx; + +#ifdef DEBUG_GPT + printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr); +#endif + gpt = opaque; + switch (addr) { + case 0x00: + /* Time base counter */ + ret = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + gpt->tb_offset, + gpt->tb_freq, NANOSECONDS_PER_SECOND); + break; + case 0x10: + /* Output enable */ + ret = gpt->oe; + break; + case 0x14: + /* Output level */ + ret = gpt->ol; + break; + case 0x18: + /* Interrupt mask */ + ret = gpt->im; + break; + case 0x1C: + case 0x20: + /* Interrupt status */ + ret = gpt->is; + break; + case 0x24: + /* Interrupt enable */ + ret = gpt->ie; + break; + case 0x80 ... 0x90: + /* Compare timer */ + idx = (addr - 0x80) >> 2; + ret = gpt->comp[idx]; + break; + case 0xC0 ... 0xD0: + /* Compare mask */ + idx = (addr - 0xC0) >> 2; + ret = gpt->mask[idx]; + break; + default: + ret = -1; + break; + } + + return ret; +} + +static void ppc4xx_gpt_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + ppc4xx_gpt_t *gpt; + int idx; + +#ifdef DEBUG_I2C + printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr, + value); +#endif + gpt = opaque; + switch (addr) { + case 0x00: + /* Time base counter */ + gpt->tb_offset = muldiv64(value, NANOSECONDS_PER_SECOND, gpt->tb_freq) + - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + ppc4xx_gpt_compute_timer(gpt); + break; + case 0x10: + /* Output enable */ + gpt->oe = value & 0xF8000000; + ppc4xx_gpt_set_outputs(gpt); + break; + case 0x14: + /* Output level */ + gpt->ol = value & 0xF8000000; + ppc4xx_gpt_set_outputs(gpt); + break; + case 0x18: + /* Interrupt mask */ + gpt->im = value & 0x0000F800; + break; + case 0x1C: + /* Interrupt status set */ + gpt->is |= value & 0x0000F800; + ppc4xx_gpt_set_irqs(gpt); + break; + case 0x20: + /* Interrupt status clear */ + gpt->is &= ~(value & 0x0000F800); + ppc4xx_gpt_set_irqs(gpt); + break; + case 0x24: + /* Interrupt enable */ + gpt->ie = value & 0x0000F800; + ppc4xx_gpt_set_irqs(gpt); + break; + case 0x80 ... 0x90: + /* Compare timer */ + idx = (addr - 0x80) >> 2; + gpt->comp[idx] = value & 0xF8000000; + ppc4xx_gpt_compute_timer(gpt); + break; + case 0xC0 ... 0xD0: + /* Compare mask */ + idx = (addr - 0xC0) >> 2; + gpt->mask[idx] = value & 0xF8000000; + ppc4xx_gpt_compute_timer(gpt); + break; + } +} + +static const MemoryRegionOps gpt_ops = { + .read = ppc4xx_gpt_read, + .write = ppc4xx_gpt_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void ppc4xx_gpt_cb (void *opaque) +{ + ppc4xx_gpt_t *gpt; + + gpt = opaque; + ppc4xx_gpt_set_irqs(gpt); + ppc4xx_gpt_set_outputs(gpt); + ppc4xx_gpt_compute_timer(gpt); +} + +static void ppc4xx_gpt_reset (void *opaque) +{ + ppc4xx_gpt_t *gpt; + int i; + + gpt = opaque; + timer_del(gpt->timer); + gpt->oe = 0x00000000; + gpt->ol = 0x00000000; + gpt->im = 0x00000000; + gpt->is = 0x00000000; + gpt->ie = 0x00000000; + for (i = 0; i < 5; i++) { + gpt->comp[i] = 0x00000000; + gpt->mask[i] = 0x00000000; + } +} + +static void ppc4xx_gpt_init(hwaddr base, qemu_irq irqs[5]) +{ + ppc4xx_gpt_t *gpt; + int i; + + gpt = g_malloc0(sizeof(ppc4xx_gpt_t)); + for (i = 0; i < 5; i++) { + gpt->irqs[i] = irqs[i]; + } + gpt->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &ppc4xx_gpt_cb, gpt); +#ifdef DEBUG_GPT + printf("%s: offset " TARGET_FMT_plx "\n", __func__, base); +#endif + memory_region_init_io(&gpt->iomem, NULL, &gpt_ops, gpt, "gpt", 0x0d4); + memory_region_add_subregion(get_system_memory(), base, &gpt->iomem); + qemu_register_reset(ppc4xx_gpt_reset, gpt); +} + +/*****************************************************************************/ +/* PowerPC 405EP */ +/* CPU control */ +enum { + PPC405EP_CPC0_PLLMR0 = 0x0F0, + PPC405EP_CPC0_BOOT = 0x0F1, + PPC405EP_CPC0_EPCTL = 0x0F3, + PPC405EP_CPC0_PLLMR1 = 0x0F4, + PPC405EP_CPC0_UCR = 0x0F5, + PPC405EP_CPC0_SRR = 0x0F6, + PPC405EP_CPC0_JTAGID = 0x0F7, + PPC405EP_CPC0_PCI = 0x0F9, +#if 0 + PPC405EP_CPC0_ER = xxx, + PPC405EP_CPC0_FR = xxx, + PPC405EP_CPC0_SR = xxx, +#endif +}; + +enum { + PPC405EP_CPU_CLK = 0, + PPC405EP_PLB_CLK = 1, + PPC405EP_OPB_CLK = 2, + PPC405EP_EBC_CLK = 3, + PPC405EP_MAL_CLK = 4, + PPC405EP_PCI_CLK = 5, + PPC405EP_UART0_CLK = 6, + PPC405EP_UART1_CLK = 7, + PPC405EP_CLK_NB = 8, +}; + +typedef struct ppc405ep_cpc_t ppc405ep_cpc_t; +struct ppc405ep_cpc_t { + uint32_t sysclk; + clk_setup_t clk_setup[PPC405EP_CLK_NB]; + uint32_t boot; + uint32_t epctl; + uint32_t pllmr[2]; + uint32_t ucr; + uint32_t srr; + uint32_t jtagid; + uint32_t pci; + /* Clock and power management */ + uint32_t er; + uint32_t fr; + uint32_t sr; +}; + +static void ppc405ep_compute_clocks (ppc405ep_cpc_t *cpc) +{ + uint32_t CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk; + uint32_t UART0_clk, UART1_clk; + uint64_t VCO_out, PLL_out; + int M, D; + + VCO_out = 0; + if ((cpc->pllmr[1] & 0x80000000) && !(cpc->pllmr[1] & 0x40000000)) { + M = (((cpc->pllmr[1] >> 20) - 1) & 0xF) + 1; /* FBMUL */ +#ifdef DEBUG_CLOCKS_LL + printf("FBMUL %01" PRIx32 " %d\n", (cpc->pllmr[1] >> 20) & 0xF, M); +#endif + D = 8 - ((cpc->pllmr[1] >> 16) & 0x7); /* FWDA */ +#ifdef DEBUG_CLOCKS_LL + printf("FWDA %01" PRIx32 " %d\n", (cpc->pllmr[1] >> 16) & 0x7, D); +#endif + VCO_out = (uint64_t)cpc->sysclk * M * D; + if (VCO_out < 500000000UL || VCO_out > 1000000000UL) { + /* Error - unlock the PLL */ + printf("VCO out of range %" PRIu64 "\n", VCO_out); +#if 0 + cpc->pllmr[1] &= ~0x80000000; + goto pll_bypass; +#endif + } + PLL_out = VCO_out / D; + /* Pretend the PLL is locked */ + cpc->boot |= 0x00000001; + } else { +#if 0 + pll_bypass: +#endif + PLL_out = cpc->sysclk; + if (cpc->pllmr[1] & 0x40000000) { + /* Pretend the PLL is not locked */ + cpc->boot &= ~0x00000001; + } + } + /* Now, compute all other clocks */ + D = ((cpc->pllmr[0] >> 20) & 0x3) + 1; /* CCDV */ +#ifdef DEBUG_CLOCKS_LL + printf("CCDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 20) & 0x3, D); +#endif + CPU_clk = PLL_out / D; + D = ((cpc->pllmr[0] >> 16) & 0x3) + 1; /* CBDV */ +#ifdef DEBUG_CLOCKS_LL + printf("CBDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 16) & 0x3, D); +#endif + PLB_clk = CPU_clk / D; + D = ((cpc->pllmr[0] >> 12) & 0x3) + 1; /* OPDV */ +#ifdef DEBUG_CLOCKS_LL + printf("OPDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 12) & 0x3, D); +#endif + OPB_clk = PLB_clk / D; + D = ((cpc->pllmr[0] >> 8) & 0x3) + 2; /* EPDV */ +#ifdef DEBUG_CLOCKS_LL + printf("EPDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 8) & 0x3, D); +#endif + EBC_clk = PLB_clk / D; + D = ((cpc->pllmr[0] >> 4) & 0x3) + 1; /* MPDV */ +#ifdef DEBUG_CLOCKS_LL + printf("MPDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 4) & 0x3, D); +#endif + MAL_clk = PLB_clk / D; + D = (cpc->pllmr[0] & 0x3) + 1; /* PPDV */ +#ifdef DEBUG_CLOCKS_LL + printf("PPDV %01" PRIx32 " %d\n", cpc->pllmr[0] & 0x3, D); +#endif + PCI_clk = PLB_clk / D; + D = ((cpc->ucr - 1) & 0x7F) + 1; /* U0DIV */ +#ifdef DEBUG_CLOCKS_LL + printf("U0DIV %01" PRIx32 " %d\n", cpc->ucr & 0x7F, D); +#endif + UART0_clk = PLL_out / D; + D = (((cpc->ucr >> 8) - 1) & 0x7F) + 1; /* U1DIV */ +#ifdef DEBUG_CLOCKS_LL + printf("U1DIV %01" PRIx32 " %d\n", (cpc->ucr >> 8) & 0x7F, D); +#endif + UART1_clk = PLL_out / D; +#ifdef DEBUG_CLOCKS + printf("Setup PPC405EP clocks - sysclk %" PRIu32 " VCO %" PRIu64 + " PLL out %" PRIu64 " Hz\n", cpc->sysclk, VCO_out, PLL_out); + printf("CPU %" PRIu32 " PLB %" PRIu32 " OPB %" PRIu32 " EBC %" PRIu32 + " MAL %" PRIu32 " PCI %" PRIu32 " UART0 %" PRIu32 + " UART1 %" PRIu32 "\n", + CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk, + UART0_clk, UART1_clk); +#endif + /* Setup CPU clocks */ + clk_setup(&cpc->clk_setup[PPC405EP_CPU_CLK], CPU_clk); + /* Setup PLB clock */ + clk_setup(&cpc->clk_setup[PPC405EP_PLB_CLK], PLB_clk); + /* Setup OPB clock */ + clk_setup(&cpc->clk_setup[PPC405EP_OPB_CLK], OPB_clk); + /* Setup external clock */ + clk_setup(&cpc->clk_setup[PPC405EP_EBC_CLK], EBC_clk); + /* Setup MAL clock */ + clk_setup(&cpc->clk_setup[PPC405EP_MAL_CLK], MAL_clk); + /* Setup PCI clock */ + clk_setup(&cpc->clk_setup[PPC405EP_PCI_CLK], PCI_clk); + /* Setup UART0 clock */ + clk_setup(&cpc->clk_setup[PPC405EP_UART0_CLK], UART0_clk); + /* Setup UART1 clock */ + clk_setup(&cpc->clk_setup[PPC405EP_UART1_CLK], UART1_clk); +} + +static uint32_t dcr_read_epcpc (void *opaque, int dcrn) +{ + ppc405ep_cpc_t *cpc; + uint32_t ret; + + cpc = opaque; + switch (dcrn) { + case PPC405EP_CPC0_BOOT: + ret = cpc->boot; + break; + case PPC405EP_CPC0_EPCTL: + ret = cpc->epctl; + break; + case PPC405EP_CPC0_PLLMR0: + ret = cpc->pllmr[0]; + break; + case PPC405EP_CPC0_PLLMR1: + ret = cpc->pllmr[1]; + break; + case PPC405EP_CPC0_UCR: + ret = cpc->ucr; + break; + case PPC405EP_CPC0_SRR: + ret = cpc->srr; + break; + case PPC405EP_CPC0_JTAGID: + ret = cpc->jtagid; + break; + case PPC405EP_CPC0_PCI: + ret = cpc->pci; + break; + default: + /* Avoid gcc warning */ + ret = 0; + break; + } + + return ret; +} + +static void dcr_write_epcpc (void *opaque, int dcrn, uint32_t val) +{ + ppc405ep_cpc_t *cpc; + + cpc = opaque; + switch (dcrn) { + case PPC405EP_CPC0_BOOT: + /* Read-only register */ + break; + case PPC405EP_CPC0_EPCTL: + /* Don't care for now */ + cpc->epctl = val & 0xC00000F3; + break; + case PPC405EP_CPC0_PLLMR0: + cpc->pllmr[0] = val & 0x00633333; + ppc405ep_compute_clocks(cpc); + break; + case PPC405EP_CPC0_PLLMR1: + cpc->pllmr[1] = val & 0xC0F73FFF; + ppc405ep_compute_clocks(cpc); + break; + case PPC405EP_CPC0_UCR: + /* UART control - don't care for now */ + cpc->ucr = val & 0x003F7F7F; + break; + case PPC405EP_CPC0_SRR: + cpc->srr = val; + break; + case PPC405EP_CPC0_JTAGID: + /* Read-only */ + break; + case PPC405EP_CPC0_PCI: + cpc->pci = val; + break; + } +} + +static void ppc405ep_cpc_reset (void *opaque) +{ + ppc405ep_cpc_t *cpc = opaque; + + cpc->boot = 0x00000010; /* Boot from PCI - IIC EEPROM disabled */ + cpc->epctl = 0x00000000; + cpc->pllmr[0] = 0x00011010; + cpc->pllmr[1] = 0x40000000; + cpc->ucr = 0x00000000; + cpc->srr = 0x00040000; + cpc->pci = 0x00000000; + cpc->er = 0x00000000; + cpc->fr = 0x00000000; + cpc->sr = 0x00000000; + ppc405ep_compute_clocks(cpc); +} + +/* XXX: sysclk should be between 25 and 100 MHz */ +static void ppc405ep_cpc_init (CPUPPCState *env, clk_setup_t clk_setup[8], + uint32_t sysclk) +{ + ppc405ep_cpc_t *cpc; + + cpc = g_malloc0(sizeof(ppc405ep_cpc_t)); + memcpy(cpc->clk_setup, clk_setup, + PPC405EP_CLK_NB * sizeof(clk_setup_t)); + cpc->jtagid = 0x20267049; + cpc->sysclk = sysclk; + qemu_register_reset(&ppc405ep_cpc_reset, cpc); + ppc_dcr_register(env, PPC405EP_CPC0_BOOT, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc_dcr_register(env, PPC405EP_CPC0_EPCTL, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc_dcr_register(env, PPC405EP_CPC0_PLLMR0, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc_dcr_register(env, PPC405EP_CPC0_PLLMR1, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc_dcr_register(env, PPC405EP_CPC0_UCR, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc_dcr_register(env, PPC405EP_CPC0_SRR, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc_dcr_register(env, PPC405EP_CPC0_JTAGID, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc_dcr_register(env, PPC405EP_CPC0_PCI, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); +#if 0 + ppc_dcr_register(env, PPC405EP_CPC0_ER, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc_dcr_register(env, PPC405EP_CPC0_FR, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); + ppc_dcr_register(env, PPC405EP_CPC0_SR, cpc, + &dcr_read_epcpc, &dcr_write_epcpc); +#endif +} + +CPUPPCState *ppc405ep_init(MemoryRegion *address_space_mem, + MemoryRegion ram_memories[2], + hwaddr ram_bases[2], + hwaddr ram_sizes[2], + uint32_t sysclk, DeviceState **uicdevp, + int do_init) +{ + clk_setup_t clk_setup[PPC405EP_CLK_NB], tlb_clk_setup; + qemu_irq dma_irqs[4], gpt_irqs[5], mal_irqs[4]; + PowerPCCPU *cpu; + CPUPPCState *env; + DeviceState *uicdev; + SysBusDevice *uicsbd; + + memset(clk_setup, 0, sizeof(clk_setup)); + /* init CPUs */ + cpu = ppc4xx_init(POWERPC_CPU_TYPE_NAME("405ep"), + &clk_setup[PPC405EP_CPU_CLK], + &tlb_clk_setup, sysclk); + env = &cpu->env; + clk_setup[PPC405EP_CPU_CLK].cb = tlb_clk_setup.cb; + clk_setup[PPC405EP_CPU_CLK].opaque = tlb_clk_setup.opaque; + /* Internal devices init */ + /* Memory mapped devices registers */ + /* PLB arbitrer */ + ppc4xx_plb_init(env); + /* PLB to OPB bridge */ + ppc4xx_pob_init(env); + /* OBP arbitrer */ + ppc4xx_opba_init(0xef600600); + /* Initialize timers */ + ppc_booke_timers_init(cpu, sysclk, 0); + /* Universal interrupt controller */ + uicdev = qdev_new(TYPE_PPC_UIC); + uicsbd = SYS_BUS_DEVICE(uicdev); + + object_property_set_link(OBJECT(uicdev), "cpu", OBJECT(cpu), + &error_fatal); + sysbus_realize_and_unref(uicsbd, &error_fatal); + + sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_INT, + ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); + sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_CINT, + ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + + *uicdevp = uicdev; + + /* SDRAM controller */ + /* XXX 405EP has no ECC interrupt */ + ppc4xx_sdram_init(env, qdev_get_gpio_in(uicdev, 17), 2, ram_memories, + ram_bases, ram_sizes, do_init); + /* External bus controller */ + ppc405_ebc_init(env); + /* DMA controller */ + dma_irqs[0] = qdev_get_gpio_in(uicdev, 5); + dma_irqs[1] = qdev_get_gpio_in(uicdev, 6); + dma_irqs[2] = qdev_get_gpio_in(uicdev, 7); + dma_irqs[3] = qdev_get_gpio_in(uicdev, 8); + ppc405_dma_init(env, dma_irqs); + /* IIC controller */ + sysbus_create_simple(TYPE_PPC4xx_I2C, 0xef600500, + qdev_get_gpio_in(uicdev, 2)); + /* GPIO */ + ppc405_gpio_init(0xef600700); + /* Serial ports */ + if (serial_hd(0) != NULL) { + serial_mm_init(address_space_mem, 0xef600300, 0, + qdev_get_gpio_in(uicdev, 0), + PPC_SERIAL_MM_BAUDBASE, serial_hd(0), + DEVICE_BIG_ENDIAN); + } + if (serial_hd(1) != NULL) { + serial_mm_init(address_space_mem, 0xef600400, 0, + qdev_get_gpio_in(uicdev, 1), + PPC_SERIAL_MM_BAUDBASE, serial_hd(1), + DEVICE_BIG_ENDIAN); + } + /* OCM */ + ppc405_ocm_init(env); + /* GPT */ + gpt_irqs[0] = qdev_get_gpio_in(uicdev, 19); + gpt_irqs[1] = qdev_get_gpio_in(uicdev, 20); + gpt_irqs[2] = qdev_get_gpio_in(uicdev, 21); + gpt_irqs[3] = qdev_get_gpio_in(uicdev, 22); + gpt_irqs[4] = qdev_get_gpio_in(uicdev, 23); + ppc4xx_gpt_init(0xef600000, gpt_irqs); + /* PCI */ + /* Uses UIC IRQs 3, 16, 18 */ + /* MAL */ + mal_irqs[0] = qdev_get_gpio_in(uicdev, 11); + mal_irqs[1] = qdev_get_gpio_in(uicdev, 12); + mal_irqs[2] = qdev_get_gpio_in(uicdev, 13); + mal_irqs[3] = qdev_get_gpio_in(uicdev, 14); + ppc4xx_mal_init(env, 4, 2, mal_irqs); + /* Ethernet */ + /* Uses UIC IRQs 9, 15, 17 */ + /* CPU control */ + ppc405ep_cpc_init(env, clk_setup, sysclk); + + return env; +} diff --git a/hw/ppc/ppc440.h b/hw/ppc/ppc440.h new file mode 100644 index 000000000..7cef93612 --- /dev/null +++ b/hw/ppc/ppc440.h @@ -0,0 +1,27 @@ +/* + * QEMU PowerPC 440 shared definitions + * + * Copyright (c) 2012 François Revol + * Copyright (c) 2016-2018 BALATON Zoltan + * + * This work is licensed under the GNU GPL license version 2 or later. + * + */ + +#ifndef PPC440_H +#define PPC440_H + +#include "hw/ppc/ppc.h" + +void ppc4xx_l2sram_init(CPUPPCState *env); +void ppc4xx_cpr_init(CPUPPCState *env); +void ppc4xx_sdr_init(CPUPPCState *env); +void ppc440_sdram_init(CPUPPCState *env, int nbanks, + MemoryRegion *ram_memories, + hwaddr *ram_bases, hwaddr *ram_sizes, + int do_init); +void ppc4xx_ahb_init(CPUPPCState *env); +void ppc4xx_dma_init(CPUPPCState *env, int dcr_base); +void ppc460ex_pcie_init(CPUPPCState *env); + +#endif /* PPC440_H */ diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c new file mode 100644 index 000000000..7fb620b9a --- /dev/null +++ b/hw/ppc/ppc440_bamboo.c @@ -0,0 +1,307 @@ +/* + * QEMU PowerPC 440 Bamboo board emulation + * + * Copyright 2007 IBM Corporation. + * Authors: + * Jerone Young + * Christian Ehrhardt + * Hollis Blanchard + * + * This work is licensed under the GNU GPL license version 2 or later. + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/error-report.h" +#include "net/net.h" +#include "hw/pci/pci.h" +#include "hw/boards.h" +#include "sysemu/kvm.h" +#include "kvm_ppc.h" +#include "sysemu/device_tree.h" +#include "hw/loader.h" +#include "elf.h" +#include "hw/char/serial.h" +#include "hw/ppc/ppc.h" +#include "ppc405.h" +#include "sysemu/sysemu.h" +#include "sysemu/reset.h" +#include "hw/sysbus.h" +#include "hw/intc/ppc-uic.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" + +#define BINARY_DEVICE_TREE_FILE "bamboo.dtb" + +/* from u-boot */ +#define KERNEL_ADDR 0x1000000 +#define FDT_ADDR 0x1800000 +#define RAMDISK_ADDR 0x1900000 + +#define PPC440EP_PCI_CONFIG 0xeec00000 +#define PPC440EP_PCI_INTACK 0xeed00000 +#define PPC440EP_PCI_SPECIAL 0xeed00000 +#define PPC440EP_PCI_REGS 0xef400000 +#define PPC440EP_PCI_IO 0xe8000000 +#define PPC440EP_PCI_IOLEN 0x00010000 + +#define PPC440EP_SDRAM_NR_BANKS 4 + +static const ram_addr_t ppc440ep_sdram_bank_sizes[] = { + 256 * MiB, 128 * MiB, 64 * MiB, 32 * MiB, 16 * MiB, 8 * MiB, 0 +}; + +static hwaddr entry; + +static int bamboo_load_device_tree(hwaddr addr, + uint32_t ramsize, + hwaddr initrd_base, + hwaddr initrd_size, + const char *kernel_cmdline) +{ + int ret = -1; + uint32_t mem_reg_property[] = { 0, 0, cpu_to_be32(ramsize) }; + char *filename; + int fdt_size; + void *fdt; + uint32_t tb_freq = 400000000; + uint32_t clock_freq = 400000000; + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE); + if (!filename) { + return -1; + } + fdt = load_device_tree(filename, &fdt_size); + g_free(filename); + if (fdt == NULL) { + return -1; + } + + /* Manipulate device tree in memory. */ + + ret = qemu_fdt_setprop(fdt, "/memory", "reg", mem_reg_property, + sizeof(mem_reg_property)); + if (ret < 0) + fprintf(stderr, "couldn't set /memory/reg\n"); + + ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", + initrd_base); + if (ret < 0) + fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); + + ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", + (initrd_base + initrd_size)); + if (ret < 0) + fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); + + ret = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", + kernel_cmdline); + if (ret < 0) + fprintf(stderr, "couldn't set /chosen/bootargs\n"); + + /* Copy data from the host device tree into the guest. Since the guest can + * directly access the timebase without host involvement, we must expose + * the correct frequencies. */ + if (kvm_enabled()) { + tb_freq = kvmppc_get_tbfreq(); + clock_freq = kvmppc_get_clockfreq(); + } + + qemu_fdt_setprop_cell(fdt, "/cpus/cpu@0", "clock-frequency", + clock_freq); + qemu_fdt_setprop_cell(fdt, "/cpus/cpu@0", "timebase-frequency", + tb_freq); + + rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr); + g_free(fdt); + return 0; +} + +/* Create reset TLB entries for BookE, spanning the 32bit addr space. */ +static void mmubooke_create_initial_mapping(CPUPPCState *env, + target_ulong va, + hwaddr pa) +{ + ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; + + tlb->attr = 0; + tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); + tlb->size = 1U << 31; /* up to 0x80000000 */ + tlb->EPN = va & TARGET_PAGE_MASK; + tlb->RPN = pa & TARGET_PAGE_MASK; + tlb->PID = 0; + + tlb = &env->tlb.tlbe[1]; + tlb->attr = 0; + tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); + tlb->size = 1U << 31; /* up to 0xffffffff */ + tlb->EPN = 0x80000000 & TARGET_PAGE_MASK; + tlb->RPN = 0x80000000 & TARGET_PAGE_MASK; + tlb->PID = 0; +} + +static void main_cpu_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + cpu_reset(CPU(cpu)); + env->gpr[1] = (16 * MiB) - 8; + env->gpr[3] = FDT_ADDR; + env->nip = entry; + + /* Create a mapping for the kernel. */ + mmubooke_create_initial_mapping(env, 0, 0); +} + +static void bamboo_init(MachineState *machine) +{ + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *initrd_filename = machine->initrd_filename; + unsigned int pci_irq_nrs[4] = { 28, 27, 26, 25 }; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *isa = g_new(MemoryRegion, 1); + MemoryRegion *ram_memories = g_new(MemoryRegion, PPC440EP_SDRAM_NR_BANKS); + hwaddr ram_bases[PPC440EP_SDRAM_NR_BANKS]; + hwaddr ram_sizes[PPC440EP_SDRAM_NR_BANKS]; + PCIBus *pcibus; + PowerPCCPU *cpu; + CPUPPCState *env; + target_long initrd_size = 0; + DeviceState *dev; + DeviceState *uicdev; + SysBusDevice *uicsbd; + int success; + int i; + + cpu = POWERPC_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + + if (env->mmu_model != POWERPC_MMU_BOOKE) { + error_report("MMU model %i not supported by this machine", + env->mmu_model); + exit(1); + } + + qemu_register_reset(main_cpu_reset, cpu); + ppc_booke_timers_init(cpu, 400000000, 0); + ppc_dcr_init(env, NULL, NULL); + + /* interrupt controller */ + uicdev = qdev_new(TYPE_PPC_UIC); + uicsbd = SYS_BUS_DEVICE(uicdev); + + object_property_set_link(OBJECT(uicdev), "cpu", OBJECT(cpu), + &error_fatal); + sysbus_realize_and_unref(uicsbd, &error_fatal); + + sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_INT, + ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); + sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_CINT, + ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + + /* SDRAM controller */ + memset(ram_bases, 0, sizeof(ram_bases)); + memset(ram_sizes, 0, sizeof(ram_sizes)); + ppc4xx_sdram_banks(machine->ram, PPC440EP_SDRAM_NR_BANKS, ram_memories, + ram_bases, ram_sizes, ppc440ep_sdram_bank_sizes); + /* XXX 440EP's ECC interrupts are on UIC1, but we've only created UIC0. */ + ppc4xx_sdram_init(env, + qdev_get_gpio_in(uicdev, 14), + PPC440EP_SDRAM_NR_BANKS, ram_memories, + ram_bases, ram_sizes, 1); + + /* PCI */ + dev = sysbus_create_varargs(TYPE_PPC4xx_PCI_HOST_BRIDGE, + PPC440EP_PCI_CONFIG, + qdev_get_gpio_in(uicdev, pci_irq_nrs[0]), + qdev_get_gpio_in(uicdev, pci_irq_nrs[1]), + qdev_get_gpio_in(uicdev, pci_irq_nrs[2]), + qdev_get_gpio_in(uicdev, pci_irq_nrs[3]), + NULL); + pcibus = (PCIBus *)qdev_get_child_bus(dev, "pci.0"); + if (!pcibus) { + error_report("couldn't create PCI controller"); + exit(1); + } + + memory_region_init_alias(isa, NULL, "isa_mmio", + get_system_io(), 0, PPC440EP_PCI_IOLEN); + memory_region_add_subregion(get_system_memory(), PPC440EP_PCI_IO, isa); + + if (serial_hd(0) != NULL) { + serial_mm_init(address_space_mem, 0xef600300, 0, + qdev_get_gpio_in(uicdev, 0), + PPC_SERIAL_MM_BAUDBASE, serial_hd(0), + DEVICE_BIG_ENDIAN); + } + if (serial_hd(1) != NULL) { + serial_mm_init(address_space_mem, 0xef600400, 0, + qdev_get_gpio_in(uicdev, 1), + PPC_SERIAL_MM_BAUDBASE, serial_hd(1), + DEVICE_BIG_ENDIAN); + } + + if (pcibus) { + /* Register network interfaces. */ + for (i = 0; i < nb_nics; i++) { + /* There are no PCI NICs on the Bamboo board, but there are + * PCI slots, so we can pick whatever default model we want. */ + pci_nic_init_nofail(&nd_table[i], pcibus, "e1000", NULL); + } + } + + /* Load kernel. */ + if (kernel_filename) { + hwaddr loadaddr = LOAD_UIMAGE_LOADADDR_INVALID; + success = load_uimage(kernel_filename, &entry, &loadaddr, NULL, + NULL, NULL); + if (success < 0) { + uint64_t elf_entry; + success = load_elf(kernel_filename, NULL, NULL, NULL, &elf_entry, + NULL, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); + entry = elf_entry; + } + /* XXX try again as binary */ + if (success < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + } + + /* Load initrd. */ + if (initrd_filename) { + initrd_size = load_image_targphys(initrd_filename, RAMDISK_ADDR, + machine->ram_size - RAMDISK_ADDR); + + if (initrd_size < 0) { + error_report("could not load ram disk '%s' at %x", + initrd_filename, RAMDISK_ADDR); + exit(1); + } + } + + /* If we're loading a kernel directly, we must load the device tree too. */ + if (kernel_filename) { + if (bamboo_load_device_tree(FDT_ADDR, machine->ram_size, RAMDISK_ADDR, + initrd_size, kernel_cmdline) < 0) { + error_report("couldn't load device tree"); + exit(1); + } + } +} + +static void bamboo_machine_init(MachineClass *mc) +{ + mc->desc = "bamboo"; + mc->init = bamboo_init; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("440epb"); + mc->default_ram_id = "ppc4xx.sdram"; +} + +DEFINE_MACHINE("bamboo", bamboo_machine_init) diff --git a/hw/ppc/ppc440_pcix.c b/hw/ppc/ppc440_pcix.c new file mode 100644 index 000000000..788d25514 --- /dev/null +++ b/hw/ppc/ppc440_pcix.c @@ -0,0 +1,538 @@ +/* + * Emulation of the ibm,plb-pcix PCI controller + * This is found in some 440 SoCs e.g. the 460EX. + * + * Copyright (c) 2016-2018 BALATON Zoltan + * + * Derived from ppc4xx_pci.c and pci-host/ppce500.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/irq.h" +#include "hw/ppc/ppc.h" +#include "hw/ppc/ppc4xx.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "trace.h" +#include "qom/object.h" + +struct PLBOutMap { + uint64_t la; + uint64_t pcia; + uint32_t sa; + MemoryRegion mr; +}; + +struct PLBInMap { + uint64_t sa; + uint64_t la; + MemoryRegion mr; +}; + +#define TYPE_PPC440_PCIX_HOST_BRIDGE "ppc440-pcix-host" +OBJECT_DECLARE_SIMPLE_TYPE(PPC440PCIXState, PPC440_PCIX_HOST_BRIDGE) + +#define PPC440_PCIX_NR_POMS 3 +#define PPC440_PCIX_NR_PIMS 3 + +struct PPC440PCIXState { + PCIHostState parent_obj; + + PCIDevice *dev; + struct PLBOutMap pom[PPC440_PCIX_NR_POMS]; + struct PLBInMap pim[PPC440_PCIX_NR_PIMS]; + uint32_t sts; + qemu_irq irq; + AddressSpace bm_as; + MemoryRegion bm; + + MemoryRegion container; + MemoryRegion iomem; + MemoryRegion busmem; +}; + +#define PPC440_REG_BASE 0x80000 +#define PPC440_REG_SIZE 0xff + +#define PCIC0_CFGADDR 0x0 +#define PCIC0_CFGDATA 0x4 + +#define PCIX0_POM0LAL 0x68 +#define PCIX0_POM0LAH 0x6c +#define PCIX0_POM0SA 0x70 +#define PCIX0_POM0PCIAL 0x74 +#define PCIX0_POM0PCIAH 0x78 +#define PCIX0_POM1LAL 0x7c +#define PCIX0_POM1LAH 0x80 +#define PCIX0_POM1SA 0x84 +#define PCIX0_POM1PCIAL 0x88 +#define PCIX0_POM1PCIAH 0x8c +#define PCIX0_POM2SA 0x90 + +#define PCIX0_PIM0SAL 0x98 +#define PCIX0_PIM0LAL 0x9c +#define PCIX0_PIM0LAH 0xa0 +#define PCIX0_PIM1SA 0xa4 +#define PCIX0_PIM1LAL 0xa8 +#define PCIX0_PIM1LAH 0xac +#define PCIX0_PIM2SAL 0xb0 +#define PCIX0_PIM2LAL 0xb4 +#define PCIX0_PIM2LAH 0xb8 +#define PCIX0_PIM0SAH 0xf8 +#define PCIX0_PIM2SAH 0xfc + +#define PCIX0_STS 0xe0 + +#define PCI_ALL_SIZE (PPC440_REG_BASE + PPC440_REG_SIZE) + +static void ppc440_pcix_clear_region(MemoryRegion *parent, + MemoryRegion *mem) +{ + if (memory_region_is_mapped(mem)) { + memory_region_del_subregion(parent, mem); + object_unparent(OBJECT(mem)); + } +} + +/* DMA mapping */ +static void ppc440_pcix_update_pim(PPC440PCIXState *s, int idx) +{ + MemoryRegion *mem = &s->pim[idx].mr; + char *name; + uint64_t size; + + /* Before we modify anything, unmap and destroy the region */ + ppc440_pcix_clear_region(&s->bm, mem); + + if (!(s->pim[idx].sa & 1)) { + /* Not enabled, nothing to do */ + return; + } + + name = g_strdup_printf("PCI Inbound Window %d", idx); + size = ~(s->pim[idx].sa & ~7ULL) + 1; + memory_region_init_alias(mem, OBJECT(s), name, get_system_memory(), + s->pim[idx].la, size); + memory_region_add_subregion_overlap(&s->bm, 0, mem, -1); + g_free(name); + + trace_ppc440_pcix_update_pim(idx, size, s->pim[idx].la); +} + +/* BAR mapping */ +static void ppc440_pcix_update_pom(PPC440PCIXState *s, int idx) +{ + MemoryRegion *mem = &s->pom[idx].mr; + MemoryRegion *address_space_mem = get_system_memory(); + char *name; + uint32_t size; + + /* Before we modify anything, unmap and destroy the region */ + ppc440_pcix_clear_region(address_space_mem, mem); + + if (!(s->pom[idx].sa & 1)) { + /* Not enabled, nothing to do */ + return; + } + + name = g_strdup_printf("PCI Outbound Window %d", idx); + size = ~(s->pom[idx].sa & 0xfffffffe) + 1; + if (!size) { + size = 0xffffffff; + } + memory_region_init_alias(mem, OBJECT(s), name, &s->busmem, + s->pom[idx].pcia, size); + memory_region_add_subregion(address_space_mem, s->pom[idx].la, mem); + g_free(name); + + trace_ppc440_pcix_update_pom(idx, size, s->pom[idx].la, s->pom[idx].pcia); +} + +static void ppc440_pcix_reg_write4(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + struct PPC440PCIXState *s = opaque; + + trace_ppc440_pcix_reg_write(addr, val, size); + switch (addr) { + case PCI_VENDOR_ID ... PCI_MAX_LAT: + stl_le_p(s->dev->config + addr, val); + break; + + case PCIX0_POM0LAL: + s->pom[0].la &= 0xffffffff00000000ULL; + s->pom[0].la |= val; + ppc440_pcix_update_pom(s, 0); + break; + case PCIX0_POM0LAH: + s->pom[0].la &= 0xffffffffULL; + s->pom[0].la |= val << 32; + ppc440_pcix_update_pom(s, 0); + break; + case PCIX0_POM0SA: + s->pom[0].sa = val; + ppc440_pcix_update_pom(s, 0); + break; + case PCIX0_POM0PCIAL: + s->pom[0].pcia &= 0xffffffff00000000ULL; + s->pom[0].pcia |= val; + ppc440_pcix_update_pom(s, 0); + break; + case PCIX0_POM0PCIAH: + s->pom[0].pcia &= 0xffffffffULL; + s->pom[0].pcia |= val << 32; + ppc440_pcix_update_pom(s, 0); + break; + case PCIX0_POM1LAL: + s->pom[1].la &= 0xffffffff00000000ULL; + s->pom[1].la |= val; + ppc440_pcix_update_pom(s, 1); + break; + case PCIX0_POM1LAH: + s->pom[1].la &= 0xffffffffULL; + s->pom[1].la |= val << 32; + ppc440_pcix_update_pom(s, 1); + break; + case PCIX0_POM1SA: + s->pom[1].sa = val; + ppc440_pcix_update_pom(s, 1); + break; + case PCIX0_POM1PCIAL: + s->pom[1].pcia &= 0xffffffff00000000ULL; + s->pom[1].pcia |= val; + ppc440_pcix_update_pom(s, 1); + break; + case PCIX0_POM1PCIAH: + s->pom[1].pcia &= 0xffffffffULL; + s->pom[1].pcia |= val << 32; + ppc440_pcix_update_pom(s, 1); + break; + case PCIX0_POM2SA: + s->pom[2].sa = val; + break; + + case PCIX0_PIM0SAL: + s->pim[0].sa &= 0xffffffff00000000ULL; + s->pim[0].sa |= val; + ppc440_pcix_update_pim(s, 0); + break; + case PCIX0_PIM0LAL: + s->pim[0].la &= 0xffffffff00000000ULL; + s->pim[0].la |= val; + ppc440_pcix_update_pim(s, 0); + break; + case PCIX0_PIM0LAH: + s->pim[0].la &= 0xffffffffULL; + s->pim[0].la |= val << 32; + ppc440_pcix_update_pim(s, 0); + break; + case PCIX0_PIM1SA: + s->pim[1].sa = val; + ppc440_pcix_update_pim(s, 1); + break; + case PCIX0_PIM1LAL: + s->pim[1].la &= 0xffffffff00000000ULL; + s->pim[1].la |= val; + ppc440_pcix_update_pim(s, 1); + break; + case PCIX0_PIM1LAH: + s->pim[1].la &= 0xffffffffULL; + s->pim[1].la |= val << 32; + ppc440_pcix_update_pim(s, 1); + break; + case PCIX0_PIM2SAL: + s->pim[2].sa &= 0xffffffff00000000ULL; + s->pim[2].sa |= val; + ppc440_pcix_update_pim(s, 2); + break; + case PCIX0_PIM2LAL: + s->pim[2].la &= 0xffffffff00000000ULL; + s->pim[2].la |= val; + ppc440_pcix_update_pim(s, 2); + break; + case PCIX0_PIM2LAH: + s->pim[2].la &= 0xffffffffULL; + s->pim[2].la |= val << 32; + ppc440_pcix_update_pim(s, 2); + break; + + case PCIX0_STS: + s->sts = val; + break; + + case PCIX0_PIM0SAH: + s->pim[0].sa &= 0xffffffffULL; + s->pim[0].sa |= val << 32; + ppc440_pcix_update_pim(s, 0); + break; + case PCIX0_PIM2SAH: + s->pim[2].sa &= 0xffffffffULL; + s->pim[2].sa |= val << 32; + ppc440_pcix_update_pim(s, 2); + break; + + default: + qemu_log_mask(LOG_UNIMP, + "%s: unhandled PCI internal register 0x%"HWADDR_PRIx"\n", + __func__, addr); + break; + } +} + +static uint64_t ppc440_pcix_reg_read4(void *opaque, hwaddr addr, + unsigned size) +{ + struct PPC440PCIXState *s = opaque; + uint32_t val; + + switch (addr) { + case PCI_VENDOR_ID ... PCI_MAX_LAT: + val = ldl_le_p(s->dev->config + addr); + break; + + case PCIX0_POM0LAL: + val = s->pom[0].la; + break; + case PCIX0_POM0LAH: + val = s->pom[0].la >> 32; + break; + case PCIX0_POM0SA: + val = s->pom[0].sa; + break; + case PCIX0_POM0PCIAL: + val = s->pom[0].pcia; + break; + case PCIX0_POM0PCIAH: + val = s->pom[0].pcia >> 32; + break; + case PCIX0_POM1LAL: + val = s->pom[1].la; + break; + case PCIX0_POM1LAH: + val = s->pom[1].la >> 32; + break; + case PCIX0_POM1SA: + val = s->pom[1].sa; + break; + case PCIX0_POM1PCIAL: + val = s->pom[1].pcia; + break; + case PCIX0_POM1PCIAH: + val = s->pom[1].pcia >> 32; + break; + case PCIX0_POM2SA: + val = s->pom[2].sa; + break; + + case PCIX0_PIM0SAL: + val = s->pim[0].sa; + break; + case PCIX0_PIM0LAL: + val = s->pim[0].la; + break; + case PCIX0_PIM0LAH: + val = s->pim[0].la >> 32; + break; + case PCIX0_PIM1SA: + val = s->pim[1].sa; + break; + case PCIX0_PIM1LAL: + val = s->pim[1].la; + break; + case PCIX0_PIM1LAH: + val = s->pim[1].la >> 32; + break; + case PCIX0_PIM2SAL: + val = s->pim[2].sa; + break; + case PCIX0_PIM2LAL: + val = s->pim[2].la; + break; + case PCIX0_PIM2LAH: + val = s->pim[2].la >> 32; + break; + + case PCIX0_STS: + val = s->sts; + break; + + case PCIX0_PIM0SAH: + val = s->pim[0].sa >> 32; + break; + case PCIX0_PIM2SAH: + val = s->pim[2].sa >> 32; + break; + + default: + qemu_log_mask(LOG_UNIMP, + "%s: invalid PCI internal register 0x%" HWADDR_PRIx "\n", + __func__, addr); + val = 0; + } + + trace_ppc440_pcix_reg_read(addr, val); + return val; +} + +static const MemoryRegionOps pci_reg_ops = { + .read = ppc440_pcix_reg_read4, + .write = ppc440_pcix_reg_write4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ppc440_pcix_reset(DeviceState *dev) +{ + struct PPC440PCIXState *s = PPC440_PCIX_HOST_BRIDGE(dev); + int i; + + for (i = 0; i < PPC440_PCIX_NR_POMS; i++) { + ppc440_pcix_clear_region(get_system_memory(), &s->pom[i].mr); + } + for (i = 0; i < PPC440_PCIX_NR_PIMS; i++) { + ppc440_pcix_clear_region(&s->bm, &s->pim[i].mr); + } + memset(s->pom, 0, sizeof(s->pom)); + memset(s->pim, 0, sizeof(s->pim)); + for (i = 0; i < PPC440_PCIX_NR_PIMS; i++) { + s->pim[i].sa = 0xffffffff00000000ULL; + } + s->sts = 0; +} + +/* + * All four IRQ[ABCD] pins from all slots are tied to a single board + * IRQ, so our mapping function here maps everything to IRQ 0. + * The code in pci_change_irq_level() tracks the number of times + * the mapped IRQ is asserted and deasserted, so if multiple devices + * assert an IRQ at the same time the behaviour is correct. + * + * This may need further refactoring for boards that use multiple IRQ lines. + */ +static int ppc440_pcix_map_irq(PCIDevice *pci_dev, int irq_num) +{ + trace_ppc440_pcix_map_irq(pci_dev->devfn, irq_num, 0); + return 0; +} + +static void ppc440_pcix_set_irq(void *opaque, int irq_num, int level) +{ + qemu_irq *pci_irq = opaque; + + trace_ppc440_pcix_set_irq(irq_num); + if (irq_num < 0) { + error_report("%s: PCI irq %d", __func__, irq_num); + return; + } + qemu_set_irq(*pci_irq, level); +} + +static AddressSpace *ppc440_pcix_set_iommu(PCIBus *b, void *opaque, int devfn) +{ + PPC440PCIXState *s = opaque; + + return &s->bm_as; +} + +/* + * Some guests on sam460ex write all kinds of garbage here such as + * missing enable bit and low bits set and still expect this to work + * (apparently it does on real hardware because these boot there) so + * we have to override these ops here and fix it up + */ +static void pci_host_config_write(void *opaque, hwaddr addr, + uint64_t val, unsigned len) +{ + PCIHostState *s = opaque; + + if (addr != 0 || len != 4) { + return; + } + s->config_reg = (val & 0xfffffffcULL) | (1UL << 31); +} + +static uint64_t pci_host_config_read(void *opaque, hwaddr addr, + unsigned len) +{ + PCIHostState *s = opaque; + uint32_t val = s->config_reg; + + return val; +} + +const MemoryRegionOps ppc440_pcix_host_conf_ops = { + .read = pci_host_config_read, + .write = pci_host_config_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ppc440_pcix_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + PPC440PCIXState *s; + PCIHostState *h; + + h = PCI_HOST_BRIDGE(dev); + s = PPC440_PCIX_HOST_BRIDGE(dev); + + sysbus_init_irq(sbd, &s->irq); + memory_region_init(&s->busmem, OBJECT(dev), "pci bus memory", UINT64_MAX); + h->bus = pci_register_root_bus(dev, NULL, ppc440_pcix_set_irq, + ppc440_pcix_map_irq, &s->irq, &s->busmem, + get_system_io(), PCI_DEVFN(0, 0), 1, TYPE_PCI_BUS); + + s->dev = pci_create_simple(h->bus, PCI_DEVFN(0, 0), "ppc4xx-host-bridge"); + + memory_region_init(&s->bm, OBJECT(s), "bm-ppc440-pcix", UINT64_MAX); + memory_region_add_subregion(&s->bm, 0x0, &s->busmem); + address_space_init(&s->bm_as, &s->bm, "pci-bm"); + pci_setup_iommu(h->bus, ppc440_pcix_set_iommu, s); + + memory_region_init(&s->container, OBJECT(s), "pci-container", PCI_ALL_SIZE); + memory_region_init_io(&h->conf_mem, OBJECT(s), &ppc440_pcix_host_conf_ops, + h, "pci-conf-idx", 4); + memory_region_init_io(&h->data_mem, OBJECT(s), &pci_host_data_le_ops, + h, "pci-conf-data", 4); + memory_region_init_io(&s->iomem, OBJECT(s), &pci_reg_ops, s, + "pci.reg", PPC440_REG_SIZE); + memory_region_add_subregion(&s->container, PCIC0_CFGADDR, &h->conf_mem); + memory_region_add_subregion(&s->container, PCIC0_CFGDATA, &h->data_mem); + memory_region_add_subregion(&s->container, PPC440_REG_BASE, &s->iomem); + sysbus_init_mmio(sbd, &s->container); +} + +static void ppc440_pcix_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = ppc440_pcix_realize; + dc->reset = ppc440_pcix_reset; +} + +static const TypeInfo ppc440_pcix_info = { + .name = TYPE_PPC440_PCIX_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(PPC440PCIXState), + .class_init = ppc440_pcix_class_init, +}; + +static void ppc440_pcix_register_types(void) +{ + type_register_static(&ppc440_pcix_info); +} + +type_init(ppc440_pcix_register_types) diff --git a/hw/ppc/ppc440_uc.c b/hw/ppc/ppc440_uc.c new file mode 100644 index 000000000..993e3ba95 --- /dev/null +++ b/hw/ppc/ppc440_uc.c @@ -0,0 +1,1377 @@ +/* + * QEMU PowerPC 440 embedded processors emulation + * + * Copyright (c) 2012 François Revol + * Copyright (c) 2016-2019 BALATON Zoltan + * + * This work is licensed under the GNU GPL license version 2 or later. + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/irq.h" +#include "exec/memory.h" +#include "hw/ppc/ppc.h" +#include "hw/qdev-properties.h" +#include "hw/pci/pci.h" +#include "sysemu/block-backend.h" +#include "sysemu/reset.h" +#include "ppc440.h" +#include "qom/object.h" + +/*****************************************************************************/ +/* L2 Cache as SRAM */ +/* FIXME:fix names */ +enum { + DCR_L2CACHE_BASE = 0x30, + DCR_L2CACHE_CFG = DCR_L2CACHE_BASE, + DCR_L2CACHE_CMD, + DCR_L2CACHE_ADDR, + DCR_L2CACHE_DATA, + DCR_L2CACHE_STAT, + DCR_L2CACHE_CVER, + DCR_L2CACHE_SNP0, + DCR_L2CACHE_SNP1, + DCR_L2CACHE_END = DCR_L2CACHE_SNP1, +}; + +/* base is 460ex-specific, cf. U-Boot, ppc4xx-isram.h */ +enum { + DCR_ISRAM0_BASE = 0x20, + DCR_ISRAM0_SB0CR = DCR_ISRAM0_BASE, + DCR_ISRAM0_SB1CR, + DCR_ISRAM0_SB2CR, + DCR_ISRAM0_SB3CR, + DCR_ISRAM0_BEAR, + DCR_ISRAM0_BESR0, + DCR_ISRAM0_BESR1, + DCR_ISRAM0_PMEG, + DCR_ISRAM0_CID, + DCR_ISRAM0_REVID, + DCR_ISRAM0_DPC, + DCR_ISRAM0_END = DCR_ISRAM0_DPC +}; + +enum { + DCR_ISRAM1_BASE = 0xb0, + DCR_ISRAM1_SB0CR = DCR_ISRAM1_BASE, + /* single bank */ + DCR_ISRAM1_BEAR = DCR_ISRAM1_BASE + 0x04, + DCR_ISRAM1_BESR0, + DCR_ISRAM1_BESR1, + DCR_ISRAM1_PMEG, + DCR_ISRAM1_CID, + DCR_ISRAM1_REVID, + DCR_ISRAM1_DPC, + DCR_ISRAM1_END = DCR_ISRAM1_DPC +}; + +typedef struct ppc4xx_l2sram_t { + MemoryRegion bank[4]; + uint32_t l2cache[8]; + uint32_t isram0[11]; +} ppc4xx_l2sram_t; + +#ifdef MAP_L2SRAM +static void l2sram_update_mappings(ppc4xx_l2sram_t *l2sram, + uint32_t isarc, uint32_t isacntl, + uint32_t dsarc, uint32_t dsacntl) +{ + if (l2sram->isarc != isarc || + (l2sram->isacntl & 0x80000000) != (isacntl & 0x80000000)) { + if (l2sram->isacntl & 0x80000000) { + /* Unmap previously assigned memory region */ + memory_region_del_subregion(get_system_memory(), + &l2sram->isarc_ram); + } + if (isacntl & 0x80000000) { + /* Map new instruction memory region */ + memory_region_add_subregion(get_system_memory(), isarc, + &l2sram->isarc_ram); + } + } + if (l2sram->dsarc != dsarc || + (l2sram->dsacntl & 0x80000000) != (dsacntl & 0x80000000)) { + if (l2sram->dsacntl & 0x80000000) { + /* Beware not to unmap the region we just mapped */ + if (!(isacntl & 0x80000000) || l2sram->dsarc != isarc) { + /* Unmap previously assigned memory region */ + memory_region_del_subregion(get_system_memory(), + &l2sram->dsarc_ram); + } + } + if (dsacntl & 0x80000000) { + /* Beware not to remap the region we just mapped */ + if (!(isacntl & 0x80000000) || dsarc != isarc) { + /* Map new data memory region */ + memory_region_add_subregion(get_system_memory(), dsarc, + &l2sram->dsarc_ram); + } + } + } +} +#endif + +static uint32_t dcr_read_l2sram(void *opaque, int dcrn) +{ + ppc4xx_l2sram_t *l2sram = opaque; + uint32_t ret = 0; + + switch (dcrn) { + case DCR_L2CACHE_CFG: + case DCR_L2CACHE_CMD: + case DCR_L2CACHE_ADDR: + case DCR_L2CACHE_DATA: + case DCR_L2CACHE_STAT: + case DCR_L2CACHE_CVER: + case DCR_L2CACHE_SNP0: + case DCR_L2CACHE_SNP1: + ret = l2sram->l2cache[dcrn - DCR_L2CACHE_BASE]; + break; + + case DCR_ISRAM0_SB0CR: + case DCR_ISRAM0_SB1CR: + case DCR_ISRAM0_SB2CR: + case DCR_ISRAM0_SB3CR: + case DCR_ISRAM0_BEAR: + case DCR_ISRAM0_BESR0: + case DCR_ISRAM0_BESR1: + case DCR_ISRAM0_PMEG: + case DCR_ISRAM0_CID: + case DCR_ISRAM0_REVID: + case DCR_ISRAM0_DPC: + ret = l2sram->isram0[dcrn - DCR_ISRAM0_BASE]; + break; + + default: + break; + } + + return ret; +} + +static void dcr_write_l2sram(void *opaque, int dcrn, uint32_t val) +{ + /*ppc4xx_l2sram_t *l2sram = opaque;*/ + /* FIXME: Actually handle L2 cache mapping */ + + switch (dcrn) { + case DCR_L2CACHE_CFG: + case DCR_L2CACHE_CMD: + case DCR_L2CACHE_ADDR: + case DCR_L2CACHE_DATA: + case DCR_L2CACHE_STAT: + case DCR_L2CACHE_CVER: + case DCR_L2CACHE_SNP0: + case DCR_L2CACHE_SNP1: + /*l2sram->l2cache[dcrn - DCR_L2CACHE_BASE] = val;*/ + break; + + case DCR_ISRAM0_SB0CR: + case DCR_ISRAM0_SB1CR: + case DCR_ISRAM0_SB2CR: + case DCR_ISRAM0_SB3CR: + case DCR_ISRAM0_BEAR: + case DCR_ISRAM0_BESR0: + case DCR_ISRAM0_BESR1: + case DCR_ISRAM0_PMEG: + case DCR_ISRAM0_CID: + case DCR_ISRAM0_REVID: + case DCR_ISRAM0_DPC: + /*l2sram->isram0[dcrn - DCR_L2CACHE_BASE] = val;*/ + break; + + case DCR_ISRAM1_SB0CR: + case DCR_ISRAM1_BEAR: + case DCR_ISRAM1_BESR0: + case DCR_ISRAM1_BESR1: + case DCR_ISRAM1_PMEG: + case DCR_ISRAM1_CID: + case DCR_ISRAM1_REVID: + case DCR_ISRAM1_DPC: + /*l2sram->isram1[dcrn - DCR_L2CACHE_BASE] = val;*/ + break; + } + /*l2sram_update_mappings(l2sram, isarc, isacntl, dsarc, dsacntl);*/ +} + +static void l2sram_reset(void *opaque) +{ + ppc4xx_l2sram_t *l2sram = opaque; + + memset(l2sram->l2cache, 0, sizeof(l2sram->l2cache)); + l2sram->l2cache[DCR_L2CACHE_STAT - DCR_L2CACHE_BASE] = 0x80000000; + memset(l2sram->isram0, 0, sizeof(l2sram->isram0)); + /*l2sram_update_mappings(l2sram, isarc, isacntl, dsarc, dsacntl);*/ +} + +void ppc4xx_l2sram_init(CPUPPCState *env) +{ + ppc4xx_l2sram_t *l2sram; + + l2sram = g_malloc0(sizeof(*l2sram)); + /* XXX: Size is 4*64kB for 460ex, cf. U-Boot, ppc4xx-isram.h */ + memory_region_init_ram(&l2sram->bank[0], NULL, "ppc4xx.l2sram_bank0", + 64 * KiB, &error_abort); + memory_region_init_ram(&l2sram->bank[1], NULL, "ppc4xx.l2sram_bank1", + 64 * KiB, &error_abort); + memory_region_init_ram(&l2sram->bank[2], NULL, "ppc4xx.l2sram_bank2", + 64 * KiB, &error_abort); + memory_region_init_ram(&l2sram->bank[3], NULL, "ppc4xx.l2sram_bank3", + 64 * KiB, &error_abort); + qemu_register_reset(&l2sram_reset, l2sram); + ppc_dcr_register(env, DCR_L2CACHE_CFG, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_L2CACHE_CMD, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_L2CACHE_ADDR, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_L2CACHE_DATA, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_L2CACHE_STAT, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_L2CACHE_CVER, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_L2CACHE_SNP0, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_L2CACHE_SNP1, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + + ppc_dcr_register(env, DCR_ISRAM0_SB0CR, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_ISRAM0_SB1CR, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_ISRAM0_SB2CR, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_ISRAM0_SB3CR, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_ISRAM0_PMEG, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_ISRAM0_DPC, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + + ppc_dcr_register(env, DCR_ISRAM1_SB0CR, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_ISRAM1_PMEG, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); + ppc_dcr_register(env, DCR_ISRAM1_DPC, + l2sram, &dcr_read_l2sram, &dcr_write_l2sram); +} + +/*****************************************************************************/ +/* Clocking Power on Reset */ +enum { + CPR0_CFGADDR = 0xC, + CPR0_CFGDATA = 0xD, + + CPR0_PLLD = 0x060, + CPR0_PLBED = 0x080, + CPR0_OPBD = 0x0C0, + CPR0_PERD = 0x0E0, + CPR0_AHBD = 0x100, +}; + +typedef struct ppc4xx_cpr_t { + uint32_t addr; +} ppc4xx_cpr_t; + +static uint32_t dcr_read_cpr(void *opaque, int dcrn) +{ + ppc4xx_cpr_t *cpr = opaque; + uint32_t ret = 0; + + switch (dcrn) { + case CPR0_CFGADDR: + ret = cpr->addr; + break; + case CPR0_CFGDATA: + switch (cpr->addr) { + case CPR0_PLLD: + ret = (0xb5 << 24) | (1 << 16) | (9 << 8); + break; + case CPR0_PLBED: + ret = (5 << 24); + break; + case CPR0_OPBD: + ret = (2 << 24); + break; + case CPR0_PERD: + case CPR0_AHBD: + ret = (1 << 24); + break; + default: + break; + } + break; + default: + break; + } + + return ret; +} + +static void dcr_write_cpr(void *opaque, int dcrn, uint32_t val) +{ + ppc4xx_cpr_t *cpr = opaque; + + switch (dcrn) { + case CPR0_CFGADDR: + cpr->addr = val; + break; + case CPR0_CFGDATA: + break; + default: + break; + } +} + +static void ppc4xx_cpr_reset(void *opaque) +{ + ppc4xx_cpr_t *cpr = opaque; + + cpr->addr = 0; +} + +void ppc4xx_cpr_init(CPUPPCState *env) +{ + ppc4xx_cpr_t *cpr; + + cpr = g_malloc0(sizeof(*cpr)); + ppc_dcr_register(env, CPR0_CFGADDR, cpr, &dcr_read_cpr, &dcr_write_cpr); + ppc_dcr_register(env, CPR0_CFGDATA, cpr, &dcr_read_cpr, &dcr_write_cpr); + qemu_register_reset(ppc4xx_cpr_reset, cpr); +} + +/*****************************************************************************/ +/* System DCRs */ +typedef struct ppc4xx_sdr_t ppc4xx_sdr_t; +struct ppc4xx_sdr_t { + uint32_t addr; +}; + +enum { + SDR0_CFGADDR = 0x00e, + SDR0_CFGDATA, + SDR0_STRP0 = 0x020, + SDR0_STRP1, + SDR0_102 = 0x66, + SDR0_103, + SDR0_128 = 0x80, + SDR0_ECID3 = 0x083, + SDR0_DDR0 = 0x0e1, + SDR0_USB0 = 0x320, +}; + +enum { + PESDR0_LOOP = 0x303, + PESDR0_RCSSET, + PESDR0_RCSSTS, + PESDR0_RSTSTA = 0x310, + PESDR1_LOOP = 0x343, + PESDR1_RCSSET, + PESDR1_RCSSTS, + PESDR1_RSTSTA = 0x365, +}; + +#define SDR0_DDR0_DDRM_ENCODE(n) ((((unsigned long)(n)) & 0x03) << 29) +#define SDR0_DDR0_DDRM_DDR1 0x20000000 +#define SDR0_DDR0_DDRM_DDR2 0x40000000 + +static uint32_t dcr_read_sdr(void *opaque, int dcrn) +{ + ppc4xx_sdr_t *sdr = opaque; + uint32_t ret = 0; + + switch (dcrn) { + case SDR0_CFGADDR: + ret = sdr->addr; + break; + case SDR0_CFGDATA: + switch (sdr->addr) { + case SDR0_STRP0: + ret = (0xb5 << 8) | (1 << 4) | 9; + break; + case SDR0_STRP1: + ret = (5 << 29) | (2 << 26) | (1 << 24); + break; + case SDR0_ECID3: + ret = 1 << 20; /* No Security/Kasumi support */ + break; + case SDR0_DDR0: + ret = SDR0_DDR0_DDRM_ENCODE(1) | SDR0_DDR0_DDRM_DDR1; + break; + case PESDR0_RCSSET: + case PESDR1_RCSSET: + ret = (1 << 24) | (1 << 16); + break; + case PESDR0_RCSSTS: + case PESDR1_RCSSTS: + ret = (1 << 16) | (1 << 12); + break; + case PESDR0_RSTSTA: + case PESDR1_RSTSTA: + ret = 1; + break; + case PESDR0_LOOP: + case PESDR1_LOOP: + ret = 1 << 12; + break; + default: + break; + } + break; + default: + break; + } + + return ret; +} + +static void dcr_write_sdr(void *opaque, int dcrn, uint32_t val) +{ + ppc4xx_sdr_t *sdr = opaque; + + switch (dcrn) { + case SDR0_CFGADDR: + sdr->addr = val; + break; + case SDR0_CFGDATA: + switch (sdr->addr) { + case 0x00: /* B0CR */ + break; + default: + break; + } + break; + default: + break; + } +} + +static void sdr_reset(void *opaque) +{ + ppc4xx_sdr_t *sdr = opaque; + + sdr->addr = 0; +} + +void ppc4xx_sdr_init(CPUPPCState *env) +{ + ppc4xx_sdr_t *sdr; + + sdr = g_malloc0(sizeof(*sdr)); + qemu_register_reset(&sdr_reset, sdr); + ppc_dcr_register(env, SDR0_CFGADDR, + sdr, &dcr_read_sdr, &dcr_write_sdr); + ppc_dcr_register(env, SDR0_CFGDATA, + sdr, &dcr_read_sdr, &dcr_write_sdr); + ppc_dcr_register(env, SDR0_102, + sdr, &dcr_read_sdr, &dcr_write_sdr); + ppc_dcr_register(env, SDR0_103, + sdr, &dcr_read_sdr, &dcr_write_sdr); + ppc_dcr_register(env, SDR0_128, + sdr, &dcr_read_sdr, &dcr_write_sdr); + ppc_dcr_register(env, SDR0_USB0, + sdr, &dcr_read_sdr, &dcr_write_sdr); +} + +/*****************************************************************************/ +/* SDRAM controller */ +typedef struct ppc440_sdram_t { + uint32_t addr; + int nbanks; + MemoryRegion containers[4]; /* used for clipping */ + MemoryRegion *ram_memories; + hwaddr ram_bases[4]; + hwaddr ram_sizes[4]; + uint32_t bcr[4]; +} ppc440_sdram_t; + +enum { + SDRAM0_CFGADDR = 0x10, + SDRAM0_CFGDATA, + SDRAM_R0BAS = 0x40, + SDRAM_R1BAS, + SDRAM_R2BAS, + SDRAM_R3BAS, + SDRAM_CONF1HB = 0x45, + SDRAM_PLBADDULL = 0x4a, + SDRAM_CONF1LL = 0x4b, + SDRAM_CONFPATHB = 0x4f, + SDRAM_PLBADDUHB = 0x50, +}; + +static uint32_t sdram_bcr(hwaddr ram_base, hwaddr ram_size) +{ + uint32_t bcr; + + switch (ram_size) { + case (8 * MiB): + bcr = 0xffc0; + break; + case (16 * MiB): + bcr = 0xff80; + break; + case (32 * MiB): + bcr = 0xff00; + break; + case (64 * MiB): + bcr = 0xfe00; + break; + case (128 * MiB): + bcr = 0xfc00; + break; + case (256 * MiB): + bcr = 0xf800; + break; + case (512 * MiB): + bcr = 0xf000; + break; + case (1 * GiB): + bcr = 0xe000; + break; + case (2 * GiB): + bcr = 0xc000; + break; + case (4 * GiB): + bcr = 0x8000; + break; + default: + error_report("invalid RAM size " TARGET_FMT_plx, ram_size); + return 0; + } + bcr |= ram_base >> 2 & 0xffe00000; + bcr |= 1; + + return bcr; +} + +static inline hwaddr sdram_base(uint32_t bcr) +{ + return (bcr & 0xffe00000) << 2; +} + +static uint64_t sdram_size(uint32_t bcr) +{ + uint64_t size; + int sh; + + sh = 1024 - ((bcr >> 6) & 0x3ff); + size = 8 * MiB * sh; + + return size; +} + +static void sdram_set_bcr(ppc440_sdram_t *sdram, int i, + uint32_t bcr, int enabled) +{ + if (sdram->bcr[i] & 1) { + /* First unmap RAM if enabled */ + memory_region_del_subregion(get_system_memory(), + &sdram->containers[i]); + memory_region_del_subregion(&sdram->containers[i], + &sdram->ram_memories[i]); + object_unparent(OBJECT(&sdram->containers[i])); + } + sdram->bcr[i] = bcr & 0xffe0ffc1; + if (enabled && (bcr & 1)) { + memory_region_init(&sdram->containers[i], NULL, "sdram-containers", + sdram_size(bcr)); + memory_region_add_subregion(&sdram->containers[i], 0, + &sdram->ram_memories[i]); + memory_region_add_subregion(get_system_memory(), + sdram_base(bcr), + &sdram->containers[i]); + } +} + +static void sdram_map_bcr(ppc440_sdram_t *sdram) +{ + int i; + + for (i = 0; i < sdram->nbanks; i++) { + if (sdram->ram_sizes[i] != 0) { + sdram_set_bcr(sdram, i, sdram_bcr(sdram->ram_bases[i], + sdram->ram_sizes[i]), 1); + } else { + sdram_set_bcr(sdram, i, 0, 0); + } + } +} + +static uint32_t dcr_read_sdram(void *opaque, int dcrn) +{ + ppc440_sdram_t *sdram = opaque; + uint32_t ret = 0; + + switch (dcrn) { + case SDRAM_R0BAS: + case SDRAM_R1BAS: + case SDRAM_R2BAS: + case SDRAM_R3BAS: + if (sdram->ram_sizes[dcrn - SDRAM_R0BAS]) { + ret = sdram_bcr(sdram->ram_bases[dcrn - SDRAM_R0BAS], + sdram->ram_sizes[dcrn - SDRAM_R0BAS]); + } + break; + case SDRAM_CONF1HB: + case SDRAM_CONF1LL: + case SDRAM_CONFPATHB: + case SDRAM_PLBADDULL: + case SDRAM_PLBADDUHB: + break; + case SDRAM0_CFGADDR: + ret = sdram->addr; + break; + case SDRAM0_CFGDATA: + switch (sdram->addr) { + case 0x14: /* SDRAM_MCSTAT (405EX) */ + case 0x1F: + ret = 0x80000000; + break; + case 0x21: /* SDRAM_MCOPT2 */ + ret = 0x08000000; + break; + case 0x40: /* SDRAM_MB0CF */ + ret = 0x00008001; + break; + case 0x7A: /* SDRAM_DLCR */ + ret = 0x02000000; + break; + case 0xE1: /* SDR0_DDR0 */ + ret = SDR0_DDR0_DDRM_ENCODE(1) | SDR0_DDR0_DDRM_DDR1; + break; + default: + break; + } + break; + default: + break; + } + + return ret; +} + +static void dcr_write_sdram(void *opaque, int dcrn, uint32_t val) +{ + ppc440_sdram_t *sdram = opaque; + + switch (dcrn) { + case SDRAM_R0BAS: + case SDRAM_R1BAS: + case SDRAM_R2BAS: + case SDRAM_R3BAS: + case SDRAM_CONF1HB: + case SDRAM_CONF1LL: + case SDRAM_CONFPATHB: + case SDRAM_PLBADDULL: + case SDRAM_PLBADDUHB: + break; + case SDRAM0_CFGADDR: + sdram->addr = val; + break; + case SDRAM0_CFGDATA: + switch (sdram->addr) { + case 0x00: /* B0CR */ + break; + default: + break; + } + break; + default: + break; + } +} + +static void sdram_reset(void *opaque) +{ + ppc440_sdram_t *sdram = opaque; + + sdram->addr = 0; +} + +void ppc440_sdram_init(CPUPPCState *env, int nbanks, + MemoryRegion *ram_memories, + hwaddr *ram_bases, hwaddr *ram_sizes, + int do_init) +{ + ppc440_sdram_t *sdram; + + sdram = g_malloc0(sizeof(*sdram)); + sdram->nbanks = nbanks; + sdram->ram_memories = ram_memories; + memcpy(sdram->ram_bases, ram_bases, nbanks * sizeof(hwaddr)); + memcpy(sdram->ram_sizes, ram_sizes, nbanks * sizeof(hwaddr)); + qemu_register_reset(&sdram_reset, sdram); + ppc_dcr_register(env, SDRAM0_CFGADDR, + sdram, &dcr_read_sdram, &dcr_write_sdram); + ppc_dcr_register(env, SDRAM0_CFGDATA, + sdram, &dcr_read_sdram, &dcr_write_sdram); + if (do_init) { + sdram_map_bcr(sdram); + } + + ppc_dcr_register(env, SDRAM_R0BAS, + sdram, &dcr_read_sdram, &dcr_write_sdram); + ppc_dcr_register(env, SDRAM_R1BAS, + sdram, &dcr_read_sdram, &dcr_write_sdram); + ppc_dcr_register(env, SDRAM_R2BAS, + sdram, &dcr_read_sdram, &dcr_write_sdram); + ppc_dcr_register(env, SDRAM_R3BAS, + sdram, &dcr_read_sdram, &dcr_write_sdram); + ppc_dcr_register(env, SDRAM_CONF1HB, + sdram, &dcr_read_sdram, &dcr_write_sdram); + ppc_dcr_register(env, SDRAM_PLBADDULL, + sdram, &dcr_read_sdram, &dcr_write_sdram); + ppc_dcr_register(env, SDRAM_CONF1LL, + sdram, &dcr_read_sdram, &dcr_write_sdram); + ppc_dcr_register(env, SDRAM_CONFPATHB, + sdram, &dcr_read_sdram, &dcr_write_sdram); + ppc_dcr_register(env, SDRAM_PLBADDUHB, + sdram, &dcr_read_sdram, &dcr_write_sdram); +} + +/*****************************************************************************/ +/* PLB to AHB bridge */ +enum { + AHB_TOP = 0xA4, + AHB_BOT = 0xA5, +}; + +typedef struct ppc4xx_ahb_t { + uint32_t top; + uint32_t bot; +} ppc4xx_ahb_t; + +static uint32_t dcr_read_ahb(void *opaque, int dcrn) +{ + ppc4xx_ahb_t *ahb = opaque; + uint32_t ret = 0; + + switch (dcrn) { + case AHB_TOP: + ret = ahb->top; + break; + case AHB_BOT: + ret = ahb->bot; + break; + default: + break; + } + + return ret; +} + +static void dcr_write_ahb(void *opaque, int dcrn, uint32_t val) +{ + ppc4xx_ahb_t *ahb = opaque; + + switch (dcrn) { + case AHB_TOP: + ahb->top = val; + break; + case AHB_BOT: + ahb->bot = val; + break; + } +} + +static void ppc4xx_ahb_reset(void *opaque) +{ + ppc4xx_ahb_t *ahb = opaque; + + /* No error */ + ahb->top = 0; + ahb->bot = 0; +} + +void ppc4xx_ahb_init(CPUPPCState *env) +{ + ppc4xx_ahb_t *ahb; + + ahb = g_malloc0(sizeof(*ahb)); + ppc_dcr_register(env, AHB_TOP, ahb, &dcr_read_ahb, &dcr_write_ahb); + ppc_dcr_register(env, AHB_BOT, ahb, &dcr_read_ahb, &dcr_write_ahb); + qemu_register_reset(ppc4xx_ahb_reset, ahb); +} + +/*****************************************************************************/ +/* DMA controller */ + +#define DMA0_CR_CE (1 << 31) +#define DMA0_CR_PW (1 << 26 | 1 << 25) +#define DMA0_CR_DAI (1 << 24) +#define DMA0_CR_SAI (1 << 23) +#define DMA0_CR_DEC (1 << 2) + +enum { + DMA0_CR = 0x00, + DMA0_CT, + DMA0_SAH, + DMA0_SAL, + DMA0_DAH, + DMA0_DAL, + DMA0_SGH, + DMA0_SGL, + + DMA0_SR = 0x20, + DMA0_SGC = 0x23, + DMA0_SLP = 0x25, + DMA0_POL = 0x26, +}; + +typedef struct { + uint32_t cr; + uint32_t ct; + uint64_t sa; + uint64_t da; + uint64_t sg; +} PPC4xxDmaChnl; + +typedef struct { + int base; + PPC4xxDmaChnl ch[4]; + uint32_t sr; +} PPC4xxDmaState; + +static uint32_t dcr_read_dma(void *opaque, int dcrn) +{ + PPC4xxDmaState *dma = opaque; + uint32_t val = 0; + int addr = dcrn - dma->base; + int chnl = addr / 8; + + switch (addr) { + case 0x00 ... 0x1f: + switch (addr % 8) { + case DMA0_CR: + val = dma->ch[chnl].cr; + break; + case DMA0_CT: + val = dma->ch[chnl].ct; + break; + case DMA0_SAH: + val = dma->ch[chnl].sa >> 32; + break; + case DMA0_SAL: + val = dma->ch[chnl].sa; + break; + case DMA0_DAH: + val = dma->ch[chnl].da >> 32; + break; + case DMA0_DAL: + val = dma->ch[chnl].da; + break; + case DMA0_SGH: + val = dma->ch[chnl].sg >> 32; + break; + case DMA0_SGL: + val = dma->ch[chnl].sg; + break; + } + break; + case DMA0_SR: + val = dma->sr; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented register %x (%d, %x)\n", + __func__, dcrn, chnl, addr); + } + + return val; +} + +static void dcr_write_dma(void *opaque, int dcrn, uint32_t val) +{ + PPC4xxDmaState *dma = opaque; + int addr = dcrn - dma->base; + int chnl = addr / 8; + + switch (addr) { + case 0x00 ... 0x1f: + switch (addr % 8) { + case DMA0_CR: + dma->ch[chnl].cr = val; + if (val & DMA0_CR_CE) { + int count = dma->ch[chnl].ct & 0xffff; + + if (count) { + int width, i, sidx, didx; + uint8_t *rptr, *wptr; + hwaddr rlen, wlen; + + sidx = didx = 0; + width = 1 << ((val & DMA0_CR_PW) >> 25); + rptr = cpu_physical_memory_map(dma->ch[chnl].sa, &rlen, + false); + wptr = cpu_physical_memory_map(dma->ch[chnl].da, &wlen, + true); + if (rptr && wptr) { + if (!(val & DMA0_CR_DEC) && + val & DMA0_CR_SAI && val & DMA0_CR_DAI) { + /* optimise common case */ + memmove(wptr, rptr, count * width); + sidx = didx = count * width; + } else { + /* do it the slow way */ + for (sidx = didx = i = 0; i < count; i++) { + uint64_t v = ldn_le_p(rptr + sidx, width); + stn_le_p(wptr + didx, width, v); + if (val & DMA0_CR_SAI) { + sidx += width; + } + if (val & DMA0_CR_DAI) { + didx += width; + } + } + } + } + if (wptr) { + cpu_physical_memory_unmap(wptr, wlen, 1, didx); + } + if (rptr) { + cpu_physical_memory_unmap(rptr, rlen, 0, sidx); + } + } + } + break; + case DMA0_CT: + dma->ch[chnl].ct = val; + break; + case DMA0_SAH: + dma->ch[chnl].sa &= 0xffffffffULL; + dma->ch[chnl].sa |= (uint64_t)val << 32; + break; + case DMA0_SAL: + dma->ch[chnl].sa &= 0xffffffff00000000ULL; + dma->ch[chnl].sa |= val; + break; + case DMA0_DAH: + dma->ch[chnl].da &= 0xffffffffULL; + dma->ch[chnl].da |= (uint64_t)val << 32; + break; + case DMA0_DAL: + dma->ch[chnl].da &= 0xffffffff00000000ULL; + dma->ch[chnl].da |= val; + break; + case DMA0_SGH: + dma->ch[chnl].sg &= 0xffffffffULL; + dma->ch[chnl].sg |= (uint64_t)val << 32; + break; + case DMA0_SGL: + dma->ch[chnl].sg &= 0xffffffff00000000ULL; + dma->ch[chnl].sg |= val; + break; + } + break; + case DMA0_SR: + dma->sr &= ~val; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented register %x (%d, %x)\n", + __func__, dcrn, chnl, addr); + } +} + +static void ppc4xx_dma_reset(void *opaque) +{ + PPC4xxDmaState *dma = opaque; + int dma_base = dma->base; + + memset(dma, 0, sizeof(*dma)); + dma->base = dma_base; +} + +void ppc4xx_dma_init(CPUPPCState *env, int dcr_base) +{ + PPC4xxDmaState *dma; + int i; + + dma = g_malloc0(sizeof(*dma)); + dma->base = dcr_base; + qemu_register_reset(&ppc4xx_dma_reset, dma); + for (i = 0; i < 4; i++) { + ppc_dcr_register(env, dcr_base + i * 8 + DMA0_CR, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, dcr_base + i * 8 + DMA0_CT, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, dcr_base + i * 8 + DMA0_SAH, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, dcr_base + i * 8 + DMA0_SAL, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, dcr_base + i * 8 + DMA0_DAH, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, dcr_base + i * 8 + DMA0_DAL, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, dcr_base + i * 8 + DMA0_SGH, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, dcr_base + i * 8 + DMA0_SGL, + dma, &dcr_read_dma, &dcr_write_dma); + } + ppc_dcr_register(env, dcr_base + DMA0_SR, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, dcr_base + DMA0_SGC, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, dcr_base + DMA0_SLP, + dma, &dcr_read_dma, &dcr_write_dma); + ppc_dcr_register(env, dcr_base + DMA0_POL, + dma, &dcr_read_dma, &dcr_write_dma); +} + +/*****************************************************************************/ +/* PCI Express controller */ +/* FIXME: This is not complete and does not work, only implemented partially + * to allow firmware and guests to find an empty bus. Cards should use PCI. + */ +#include "hw/pci/pcie_host.h" + +#define TYPE_PPC460EX_PCIE_HOST "ppc460ex-pcie-host" +OBJECT_DECLARE_SIMPLE_TYPE(PPC460EXPCIEState, PPC460EX_PCIE_HOST) + +struct PPC460EXPCIEState { + PCIExpressHost host; + + MemoryRegion iomem; + qemu_irq irq[4]; + int32_t dcrn_base; + + uint64_t cfg_base; + uint32_t cfg_mask; + uint64_t msg_base; + uint32_t msg_mask; + uint64_t omr1_base; + uint64_t omr1_mask; + uint64_t omr2_base; + uint64_t omr2_mask; + uint64_t omr3_base; + uint64_t omr3_mask; + uint64_t reg_base; + uint32_t reg_mask; + uint32_t special; + uint32_t cfg; +}; + +#define DCRN_PCIE0_BASE 0x100 +#define DCRN_PCIE1_BASE 0x120 + +enum { + PEGPL_CFGBAH = 0x0, + PEGPL_CFGBAL, + PEGPL_CFGMSK, + PEGPL_MSGBAH, + PEGPL_MSGBAL, + PEGPL_MSGMSK, + PEGPL_OMR1BAH, + PEGPL_OMR1BAL, + PEGPL_OMR1MSKH, + PEGPL_OMR1MSKL, + PEGPL_OMR2BAH, + PEGPL_OMR2BAL, + PEGPL_OMR2MSKH, + PEGPL_OMR2MSKL, + PEGPL_OMR3BAH, + PEGPL_OMR3BAL, + PEGPL_OMR3MSKH, + PEGPL_OMR3MSKL, + PEGPL_REGBAH, + PEGPL_REGBAL, + PEGPL_REGMSK, + PEGPL_SPECIAL, + PEGPL_CFG, +}; + +static uint32_t dcr_read_pcie(void *opaque, int dcrn) +{ + PPC460EXPCIEState *state = opaque; + uint32_t ret = 0; + + switch (dcrn - state->dcrn_base) { + case PEGPL_CFGBAH: + ret = state->cfg_base >> 32; + break; + case PEGPL_CFGBAL: + ret = state->cfg_base; + break; + case PEGPL_CFGMSK: + ret = state->cfg_mask; + break; + case PEGPL_MSGBAH: + ret = state->msg_base >> 32; + break; + case PEGPL_MSGBAL: + ret = state->msg_base; + break; + case PEGPL_MSGMSK: + ret = state->msg_mask; + break; + case PEGPL_OMR1BAH: + ret = state->omr1_base >> 32; + break; + case PEGPL_OMR1BAL: + ret = state->omr1_base; + break; + case PEGPL_OMR1MSKH: + ret = state->omr1_mask >> 32; + break; + case PEGPL_OMR1MSKL: + ret = state->omr1_mask; + break; + case PEGPL_OMR2BAH: + ret = state->omr2_base >> 32; + break; + case PEGPL_OMR2BAL: + ret = state->omr2_base; + break; + case PEGPL_OMR2MSKH: + ret = state->omr2_mask >> 32; + break; + case PEGPL_OMR2MSKL: + ret = state->omr3_mask; + break; + case PEGPL_OMR3BAH: + ret = state->omr3_base >> 32; + break; + case PEGPL_OMR3BAL: + ret = state->omr3_base; + break; + case PEGPL_OMR3MSKH: + ret = state->omr3_mask >> 32; + break; + case PEGPL_OMR3MSKL: + ret = state->omr3_mask; + break; + case PEGPL_REGBAH: + ret = state->reg_base >> 32; + break; + case PEGPL_REGBAL: + ret = state->reg_base; + break; + case PEGPL_REGMSK: + ret = state->reg_mask; + break; + case PEGPL_SPECIAL: + ret = state->special; + break; + case PEGPL_CFG: + ret = state->cfg; + break; + } + + return ret; +} + +static void dcr_write_pcie(void *opaque, int dcrn, uint32_t val) +{ + PPC460EXPCIEState *s = opaque; + uint64_t size; + + switch (dcrn - s->dcrn_base) { + case PEGPL_CFGBAH: + s->cfg_base = ((uint64_t)val << 32) | (s->cfg_base & 0xffffffff); + break; + case PEGPL_CFGBAL: + s->cfg_base = (s->cfg_base & 0xffffffff00000000ULL) | val; + break; + case PEGPL_CFGMSK: + s->cfg_mask = val; + size = ~(val & 0xfffffffe) + 1; + pcie_host_mmcfg_update(PCIE_HOST_BRIDGE(s), val & 1, s->cfg_base, size); + break; + case PEGPL_MSGBAH: + s->msg_base = ((uint64_t)val << 32) | (s->msg_base & 0xffffffff); + break; + case PEGPL_MSGBAL: + s->msg_base = (s->msg_base & 0xffffffff00000000ULL) | val; + break; + case PEGPL_MSGMSK: + s->msg_mask = val; + break; + case PEGPL_OMR1BAH: + s->omr1_base = ((uint64_t)val << 32) | (s->omr1_base & 0xffffffff); + break; + case PEGPL_OMR1BAL: + s->omr1_base = (s->omr1_base & 0xffffffff00000000ULL) | val; + break; + case PEGPL_OMR1MSKH: + s->omr1_mask = ((uint64_t)val << 32) | (s->omr1_mask & 0xffffffff); + break; + case PEGPL_OMR1MSKL: + s->omr1_mask = (s->omr1_mask & 0xffffffff00000000ULL) | val; + break; + case PEGPL_OMR2BAH: + s->omr2_base = ((uint64_t)val << 32) | (s->omr2_base & 0xffffffff); + break; + case PEGPL_OMR2BAL: + s->omr2_base = (s->omr2_base & 0xffffffff00000000ULL) | val; + break; + case PEGPL_OMR2MSKH: + s->omr2_mask = ((uint64_t)val << 32) | (s->omr2_mask & 0xffffffff); + break; + case PEGPL_OMR2MSKL: + s->omr2_mask = (s->omr2_mask & 0xffffffff00000000ULL) | val; + break; + case PEGPL_OMR3BAH: + s->omr3_base = ((uint64_t)val << 32) | (s->omr3_base & 0xffffffff); + break; + case PEGPL_OMR3BAL: + s->omr3_base = (s->omr3_base & 0xffffffff00000000ULL) | val; + break; + case PEGPL_OMR3MSKH: + s->omr3_mask = ((uint64_t)val << 32) | (s->omr3_mask & 0xffffffff); + break; + case PEGPL_OMR3MSKL: + s->omr3_mask = (s->omr3_mask & 0xffffffff00000000ULL) | val; + break; + case PEGPL_REGBAH: + s->reg_base = ((uint64_t)val << 32) | (s->reg_base & 0xffffffff); + break; + case PEGPL_REGBAL: + s->reg_base = (s->reg_base & 0xffffffff00000000ULL) | val; + break; + case PEGPL_REGMSK: + s->reg_mask = val; + /* FIXME: how is size encoded? */ + size = (val == 0x7001 ? 4096 : ~(val & 0xfffffffe) + 1); + break; + case PEGPL_SPECIAL: + s->special = val; + break; + case PEGPL_CFG: + s->cfg = val; + break; + } +} + +static void ppc460ex_set_irq(void *opaque, int irq_num, int level) +{ + PPC460EXPCIEState *s = opaque; + qemu_set_irq(s->irq[irq_num], level); +} + +static void ppc460ex_pcie_realize(DeviceState *dev, Error **errp) +{ + PPC460EXPCIEState *s = PPC460EX_PCIE_HOST(dev); + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + int i, id; + char buf[16]; + + switch (s->dcrn_base) { + case DCRN_PCIE0_BASE: + id = 0; + break; + case DCRN_PCIE1_BASE: + id = 1; + break; + default: + error_setg(errp, "invalid PCIe DCRN base"); + return; + } + snprintf(buf, sizeof(buf), "pcie%d-io", id); + memory_region_init(&s->iomem, OBJECT(s), buf, UINT64_MAX); + for (i = 0; i < 4; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); + } + snprintf(buf, sizeof(buf), "pcie.%d", id); + pci->bus = pci_register_root_bus(DEVICE(s), buf, ppc460ex_set_irq, + pci_swizzle_map_irq_fn, s, &s->iomem, + get_system_io(), 0, 4, TYPE_PCIE_BUS); +} + +static Property ppc460ex_pcie_props[] = { + DEFINE_PROP_INT32("dcrn-base", PPC460EXPCIEState, dcrn_base, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ppc460ex_pcie_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->realize = ppc460ex_pcie_realize; + device_class_set_props(dc, ppc460ex_pcie_props); + dc->hotpluggable = false; +} + +static const TypeInfo ppc460ex_pcie_host_info = { + .name = TYPE_PPC460EX_PCIE_HOST, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_size = sizeof(PPC460EXPCIEState), + .class_init = ppc460ex_pcie_class_init, +}; + +static void ppc460ex_pcie_register(void) +{ + type_register_static(&ppc460ex_pcie_host_info); +} + +type_init(ppc460ex_pcie_register) + +static void ppc460ex_pcie_register_dcrs(PPC460EXPCIEState *s, CPUPPCState *env) +{ + ppc_dcr_register(env, s->dcrn_base + PEGPL_CFGBAH, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_CFGBAL, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_CFGMSK, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_MSGBAH, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_MSGBAL, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_MSGMSK, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR1BAH, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR1BAL, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR1MSKH, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR1MSKL, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR2BAH, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR2BAL, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR2MSKH, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR2MSKL, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR3BAH, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR3BAL, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR3MSKH, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_OMR3MSKL, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_REGBAH, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_REGBAL, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_REGMSK, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_SPECIAL, s, + &dcr_read_pcie, &dcr_write_pcie); + ppc_dcr_register(env, s->dcrn_base + PEGPL_CFG, s, + &dcr_read_pcie, &dcr_write_pcie); +} + +void ppc460ex_pcie_init(CPUPPCState *env) +{ + DeviceState *dev; + + dev = qdev_new(TYPE_PPC460EX_PCIE_HOST); + qdev_prop_set_int32(dev, "dcrn-base", DCRN_PCIE0_BASE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + ppc460ex_pcie_register_dcrs(PPC460EX_PCIE_HOST(dev), env); + + dev = qdev_new(TYPE_PPC460EX_PCIE_HOST); + qdev_prop_set_int32(dev, "dcrn-base", DCRN_PCIE1_BASE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + ppc460ex_pcie_register_dcrs(PPC460EX_PCIE_HOST(dev), env); +} diff --git a/hw/ppc/ppc4xx_devs.c b/hw/ppc/ppc4xx_devs.c new file mode 100644 index 000000000..980c48944 --- /dev/null +++ b/hw/ppc/ppc4xx_devs.c @@ -0,0 +1,715 @@ +/* + * QEMU PowerPC 4xx embedded processors shared devices emulation + * + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "sysemu/reset.h" +#include "cpu.h" +#include "hw/irq.h" +#include "hw/ppc/ppc.h" +#include "hw/ppc/ppc4xx.h" +#include "hw/intc/ppc-uic.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "exec/address-spaces.h" +#include "qemu/error-report.h" +#include "qapi/error.h" + +/*#define DEBUG_UIC*/ + +#ifdef DEBUG_UIC +# define LOG_UIC(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__) +#else +# define LOG_UIC(...) do { } while (0) +#endif + +static void ppc4xx_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + cpu_reset(CPU(cpu)); +} + +/*****************************************************************************/ +/* Generic PowerPC 4xx processor instantiation */ +PowerPCCPU *ppc4xx_init(const char *cpu_type, + clk_setup_t *cpu_clk, clk_setup_t *tb_clk, + uint32_t sysclk) +{ + PowerPCCPU *cpu; + CPUPPCState *env; + + /* init CPUs */ + cpu = POWERPC_CPU(cpu_create(cpu_type)); + env = &cpu->env; + + cpu_clk->cb = NULL; /* We don't care about CPU clock frequency changes */ + cpu_clk->opaque = env; + /* Set time-base frequency to sysclk */ + tb_clk->cb = ppc_40x_timers_init(env, sysclk, PPC_INTERRUPT_PIT); + tb_clk->opaque = env; + ppc_dcr_init(env, NULL, NULL); + /* Register qemu callbacks */ + qemu_register_reset(ppc4xx_reset, cpu); + + return cpu; +} + +/*****************************************************************************/ +/* SDRAM controller */ +typedef struct ppc4xx_sdram_t ppc4xx_sdram_t; +struct ppc4xx_sdram_t { + uint32_t addr; + int nbanks; + MemoryRegion containers[4]; /* used for clipping */ + MemoryRegion *ram_memories; + hwaddr ram_bases[4]; + hwaddr ram_sizes[4]; + uint32_t besr0; + uint32_t besr1; + uint32_t bear; + uint32_t cfg; + uint32_t status; + uint32_t rtr; + uint32_t pmit; + uint32_t bcr[4]; + uint32_t tr; + uint32_t ecccfg; + uint32_t eccesr; + qemu_irq irq; +}; + +enum { + SDRAM0_CFGADDR = 0x010, + SDRAM0_CFGDATA = 0x011, +}; + +/* XXX: TOFIX: some patches have made this code become inconsistent: + * there are type inconsistencies, mixing hwaddr, target_ulong + * and uint32_t + */ +static uint32_t sdram_bcr (hwaddr ram_base, + hwaddr ram_size) +{ + uint32_t bcr; + + switch (ram_size) { + case 4 * MiB: + bcr = 0x00000000; + break; + case 8 * MiB: + bcr = 0x00020000; + break; + case 16 * MiB: + bcr = 0x00040000; + break; + case 32 * MiB: + bcr = 0x00060000; + break; + case 64 * MiB: + bcr = 0x00080000; + break; + case 128 * MiB: + bcr = 0x000A0000; + break; + case 256 * MiB: + bcr = 0x000C0000; + break; + default: + printf("%s: invalid RAM size " TARGET_FMT_plx "\n", __func__, + ram_size); + return 0x00000000; + } + bcr |= ram_base & 0xFF800000; + bcr |= 1; + + return bcr; +} + +static inline hwaddr sdram_base(uint32_t bcr) +{ + return bcr & 0xFF800000; +} + +static target_ulong sdram_size (uint32_t bcr) +{ + target_ulong size; + int sh; + + sh = (bcr >> 17) & 0x7; + if (sh == 7) + size = -1; + else + size = (4 * MiB) << sh; + + return size; +} + +static void sdram_set_bcr(ppc4xx_sdram_t *sdram, int i, + uint32_t bcr, int enabled) +{ + if (sdram->bcr[i] & 0x00000001) { + /* Unmap RAM */ +#ifdef DEBUG_SDRAM + printf("%s: unmap RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n", + __func__, sdram_base(sdram->bcr[i]), sdram_size(sdram->bcr[i])); +#endif + memory_region_del_subregion(get_system_memory(), + &sdram->containers[i]); + memory_region_del_subregion(&sdram->containers[i], + &sdram->ram_memories[i]); + object_unparent(OBJECT(&sdram->containers[i])); + } + sdram->bcr[i] = bcr & 0xFFDEE001; + if (enabled && (bcr & 0x00000001)) { +#ifdef DEBUG_SDRAM + printf("%s: Map RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n", + __func__, sdram_base(bcr), sdram_size(bcr)); +#endif + memory_region_init(&sdram->containers[i], NULL, "sdram-containers", + sdram_size(bcr)); + memory_region_add_subregion(&sdram->containers[i], 0, + &sdram->ram_memories[i]); + memory_region_add_subregion(get_system_memory(), + sdram_base(bcr), + &sdram->containers[i]); + } +} + +static void sdram_map_bcr (ppc4xx_sdram_t *sdram) +{ + int i; + + for (i = 0; i < sdram->nbanks; i++) { + if (sdram->ram_sizes[i] != 0) { + sdram_set_bcr(sdram, i, sdram_bcr(sdram->ram_bases[i], + sdram->ram_sizes[i]), 1); + } else { + sdram_set_bcr(sdram, i, 0x00000000, 0); + } + } +} + +static void sdram_unmap_bcr (ppc4xx_sdram_t *sdram) +{ + int i; + + for (i = 0; i < sdram->nbanks; i++) { +#ifdef DEBUG_SDRAM + printf("%s: Unmap RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n", + __func__, sdram_base(sdram->bcr[i]), sdram_size(sdram->bcr[i])); +#endif + memory_region_del_subregion(get_system_memory(), + &sdram->ram_memories[i]); + } +} + +static uint32_t dcr_read_sdram (void *opaque, int dcrn) +{ + ppc4xx_sdram_t *sdram; + uint32_t ret; + + sdram = opaque; + switch (dcrn) { + case SDRAM0_CFGADDR: + ret = sdram->addr; + break; + case SDRAM0_CFGDATA: + switch (sdram->addr) { + case 0x00: /* SDRAM_BESR0 */ + ret = sdram->besr0; + break; + case 0x08: /* SDRAM_BESR1 */ + ret = sdram->besr1; + break; + case 0x10: /* SDRAM_BEAR */ + ret = sdram->bear; + break; + case 0x20: /* SDRAM_CFG */ + ret = sdram->cfg; + break; + case 0x24: /* SDRAM_STATUS */ + ret = sdram->status; + break; + case 0x30: /* SDRAM_RTR */ + ret = sdram->rtr; + break; + case 0x34: /* SDRAM_PMIT */ + ret = sdram->pmit; + break; + case 0x40: /* SDRAM_B0CR */ + ret = sdram->bcr[0]; + break; + case 0x44: /* SDRAM_B1CR */ + ret = sdram->bcr[1]; + break; + case 0x48: /* SDRAM_B2CR */ + ret = sdram->bcr[2]; + break; + case 0x4C: /* SDRAM_B3CR */ + ret = sdram->bcr[3]; + break; + case 0x80: /* SDRAM_TR */ + ret = -1; /* ? */ + break; + case 0x94: /* SDRAM_ECCCFG */ + ret = sdram->ecccfg; + break; + case 0x98: /* SDRAM_ECCESR */ + ret = sdram->eccesr; + break; + default: /* Error */ + ret = -1; + break; + } + break; + default: + /* Avoid gcc warning */ + ret = 0x00000000; + break; + } + + return ret; +} + +static void dcr_write_sdram (void *opaque, int dcrn, uint32_t val) +{ + ppc4xx_sdram_t *sdram; + + sdram = opaque; + switch (dcrn) { + case SDRAM0_CFGADDR: + sdram->addr = val; + break; + case SDRAM0_CFGDATA: + switch (sdram->addr) { + case 0x00: /* SDRAM_BESR0 */ + sdram->besr0 &= ~val; + break; + case 0x08: /* SDRAM_BESR1 */ + sdram->besr1 &= ~val; + break; + case 0x10: /* SDRAM_BEAR */ + sdram->bear = val; + break; + case 0x20: /* SDRAM_CFG */ + val &= 0xFFE00000; + if (!(sdram->cfg & 0x80000000) && (val & 0x80000000)) { +#ifdef DEBUG_SDRAM + printf("%s: enable SDRAM controller\n", __func__); +#endif + /* validate all RAM mappings */ + sdram_map_bcr(sdram); + sdram->status &= ~0x80000000; + } else if ((sdram->cfg & 0x80000000) && !(val & 0x80000000)) { +#ifdef DEBUG_SDRAM + printf("%s: disable SDRAM controller\n", __func__); +#endif + /* invalidate all RAM mappings */ + sdram_unmap_bcr(sdram); + sdram->status |= 0x80000000; + } + if (!(sdram->cfg & 0x40000000) && (val & 0x40000000)) + sdram->status |= 0x40000000; + else if ((sdram->cfg & 0x40000000) && !(val & 0x40000000)) + sdram->status &= ~0x40000000; + sdram->cfg = val; + break; + case 0x24: /* SDRAM_STATUS */ + /* Read-only register */ + break; + case 0x30: /* SDRAM_RTR */ + sdram->rtr = val & 0x3FF80000; + break; + case 0x34: /* SDRAM_PMIT */ + sdram->pmit = (val & 0xF8000000) | 0x07C00000; + break; + case 0x40: /* SDRAM_B0CR */ + sdram_set_bcr(sdram, 0, val, sdram->cfg & 0x80000000); + break; + case 0x44: /* SDRAM_B1CR */ + sdram_set_bcr(sdram, 1, val, sdram->cfg & 0x80000000); + break; + case 0x48: /* SDRAM_B2CR */ + sdram_set_bcr(sdram, 2, val, sdram->cfg & 0x80000000); + break; + case 0x4C: /* SDRAM_B3CR */ + sdram_set_bcr(sdram, 3, val, sdram->cfg & 0x80000000); + break; + case 0x80: /* SDRAM_TR */ + sdram->tr = val & 0x018FC01F; + break; + case 0x94: /* SDRAM_ECCCFG */ + sdram->ecccfg = val & 0x00F00000; + break; + case 0x98: /* SDRAM_ECCESR */ + val &= 0xFFF0F000; + if (sdram->eccesr == 0 && val != 0) + qemu_irq_raise(sdram->irq); + else if (sdram->eccesr != 0 && val == 0) + qemu_irq_lower(sdram->irq); + sdram->eccesr = val; + break; + default: /* Error */ + break; + } + break; + } +} + +static void sdram_reset (void *opaque) +{ + ppc4xx_sdram_t *sdram; + + sdram = opaque; + sdram->addr = 0x00000000; + sdram->bear = 0x00000000; + sdram->besr0 = 0x00000000; /* No error */ + sdram->besr1 = 0x00000000; /* No error */ + sdram->cfg = 0x00000000; + sdram->ecccfg = 0x00000000; /* No ECC */ + sdram->eccesr = 0x00000000; /* No error */ + sdram->pmit = 0x07C00000; + sdram->rtr = 0x05F00000; + sdram->tr = 0x00854009; + /* We pre-initialize RAM banks */ + sdram->status = 0x00000000; + sdram->cfg = 0x00800000; +} + +void ppc4xx_sdram_init (CPUPPCState *env, qemu_irq irq, int nbanks, + MemoryRegion *ram_memories, + hwaddr *ram_bases, + hwaddr *ram_sizes, + int do_init) +{ + ppc4xx_sdram_t *sdram; + + sdram = g_malloc0(sizeof(ppc4xx_sdram_t)); + sdram->irq = irq; + sdram->nbanks = nbanks; + sdram->ram_memories = ram_memories; + memset(sdram->ram_bases, 0, 4 * sizeof(hwaddr)); + memcpy(sdram->ram_bases, ram_bases, + nbanks * sizeof(hwaddr)); + memset(sdram->ram_sizes, 0, 4 * sizeof(hwaddr)); + memcpy(sdram->ram_sizes, ram_sizes, + nbanks * sizeof(hwaddr)); + qemu_register_reset(&sdram_reset, sdram); + ppc_dcr_register(env, SDRAM0_CFGADDR, + sdram, &dcr_read_sdram, &dcr_write_sdram); + ppc_dcr_register(env, SDRAM0_CFGDATA, + sdram, &dcr_read_sdram, &dcr_write_sdram); + if (do_init) + sdram_map_bcr(sdram); +} + +/* + * Split RAM between SDRAM banks. + * + * sdram_bank_sizes[] must be in descending order, that is sizes[i] > sizes[i+1] + * and must be 0-terminated. + * + * The 4xx SDRAM controller supports a small number of banks, and each bank + * must be one of a small set of sizes. The number of banks and the supported + * sizes varies by SoC. + */ +void ppc4xx_sdram_banks(MemoryRegion *ram, int nr_banks, + MemoryRegion ram_memories[], + hwaddr ram_bases[], hwaddr ram_sizes[], + const ram_addr_t sdram_bank_sizes[]) +{ + ram_addr_t size_left = memory_region_size(ram); + ram_addr_t base = 0; + ram_addr_t bank_size; + int i; + int j; + + for (i = 0; i < nr_banks; i++) { + for (j = 0; sdram_bank_sizes[j] != 0; j++) { + bank_size = sdram_bank_sizes[j]; + if (bank_size <= size_left) { + char name[32]; + + ram_bases[i] = base; + ram_sizes[i] = bank_size; + base += bank_size; + size_left -= bank_size; + snprintf(name, sizeof(name), "ppc4xx.sdram%d", i); + memory_region_init_alias(&ram_memories[i], NULL, name, ram, + ram_bases[i], ram_sizes[i]); + break; + } + } + if (!size_left) { + /* No need to use the remaining banks. */ + break; + } + } + + if (size_left) { + ram_addr_t used_size = memory_region_size(ram) - size_left; + GString *s = g_string_new(NULL); + + for (i = 0; sdram_bank_sizes[i]; i++) { + g_string_append_printf(s, "%" PRIi64 "%s", + sdram_bank_sizes[i] / MiB, + sdram_bank_sizes[i + 1] ? ", " : ""); + } + error_report("at most %d bank%s of %s MiB each supported", + nr_banks, nr_banks == 1 ? "" : "s", s->str); + error_printf("Possible valid RAM size: %" PRIi64 " MiB \n", + used_size ? used_size / MiB : sdram_bank_sizes[i - 1] / MiB); + + g_string_free(s, true); + exit(EXIT_FAILURE); + } +} + +/*****************************************************************************/ +/* MAL */ + +enum { + MAL0_CFG = 0x180, + MAL0_ESR = 0x181, + MAL0_IER = 0x182, + MAL0_TXCASR = 0x184, + MAL0_TXCARR = 0x185, + MAL0_TXEOBISR = 0x186, + MAL0_TXDEIR = 0x187, + MAL0_RXCASR = 0x190, + MAL0_RXCARR = 0x191, + MAL0_RXEOBISR = 0x192, + MAL0_RXDEIR = 0x193, + MAL0_TXCTP0R = 0x1A0, + MAL0_RXCTP0R = 0x1C0, + MAL0_RCBS0 = 0x1E0, + MAL0_RCBS1 = 0x1E1, +}; + +typedef struct ppc4xx_mal_t ppc4xx_mal_t; +struct ppc4xx_mal_t { + qemu_irq irqs[4]; + uint32_t cfg; + uint32_t esr; + uint32_t ier; + uint32_t txcasr; + uint32_t txcarr; + uint32_t txeobisr; + uint32_t txdeir; + uint32_t rxcasr; + uint32_t rxcarr; + uint32_t rxeobisr; + uint32_t rxdeir; + uint32_t *txctpr; + uint32_t *rxctpr; + uint32_t *rcbs; + uint8_t txcnum; + uint8_t rxcnum; +}; + +static void ppc4xx_mal_reset(void *opaque) +{ + ppc4xx_mal_t *mal; + + mal = opaque; + mal->cfg = 0x0007C000; + mal->esr = 0x00000000; + mal->ier = 0x00000000; + mal->rxcasr = 0x00000000; + mal->rxdeir = 0x00000000; + mal->rxeobisr = 0x00000000; + mal->txcasr = 0x00000000; + mal->txdeir = 0x00000000; + mal->txeobisr = 0x00000000; +} + +static uint32_t dcr_read_mal(void *opaque, int dcrn) +{ + ppc4xx_mal_t *mal; + uint32_t ret; + + mal = opaque; + switch (dcrn) { + case MAL0_CFG: + ret = mal->cfg; + break; + case MAL0_ESR: + ret = mal->esr; + break; + case MAL0_IER: + ret = mal->ier; + break; + case MAL0_TXCASR: + ret = mal->txcasr; + break; + case MAL0_TXCARR: + ret = mal->txcarr; + break; + case MAL0_TXEOBISR: + ret = mal->txeobisr; + break; + case MAL0_TXDEIR: + ret = mal->txdeir; + break; + case MAL0_RXCASR: + ret = mal->rxcasr; + break; + case MAL0_RXCARR: + ret = mal->rxcarr; + break; + case MAL0_RXEOBISR: + ret = mal->rxeobisr; + break; + case MAL0_RXDEIR: + ret = mal->rxdeir; + break; + default: + ret = 0; + break; + } + if (dcrn >= MAL0_TXCTP0R && dcrn < MAL0_TXCTP0R + mal->txcnum) { + ret = mal->txctpr[dcrn - MAL0_TXCTP0R]; + } + if (dcrn >= MAL0_RXCTP0R && dcrn < MAL0_RXCTP0R + mal->rxcnum) { + ret = mal->rxctpr[dcrn - MAL0_RXCTP0R]; + } + if (dcrn >= MAL0_RCBS0 && dcrn < MAL0_RCBS0 + mal->rxcnum) { + ret = mal->rcbs[dcrn - MAL0_RCBS0]; + } + + return ret; +} + +static void dcr_write_mal(void *opaque, int dcrn, uint32_t val) +{ + ppc4xx_mal_t *mal; + + mal = opaque; + switch (dcrn) { + case MAL0_CFG: + if (val & 0x80000000) { + ppc4xx_mal_reset(mal); + } + mal->cfg = val & 0x00FFC087; + break; + case MAL0_ESR: + /* Read/clear */ + mal->esr &= ~val; + break; + case MAL0_IER: + mal->ier = val & 0x0000001F; + break; + case MAL0_TXCASR: + mal->txcasr = val & 0xF0000000; + break; + case MAL0_TXCARR: + mal->txcarr = val & 0xF0000000; + break; + case MAL0_TXEOBISR: + /* Read/clear */ + mal->txeobisr &= ~val; + break; + case MAL0_TXDEIR: + /* Read/clear */ + mal->txdeir &= ~val; + break; + case MAL0_RXCASR: + mal->rxcasr = val & 0xC0000000; + break; + case MAL0_RXCARR: + mal->rxcarr = val & 0xC0000000; + break; + case MAL0_RXEOBISR: + /* Read/clear */ + mal->rxeobisr &= ~val; + break; + case MAL0_RXDEIR: + /* Read/clear */ + mal->rxdeir &= ~val; + break; + } + if (dcrn >= MAL0_TXCTP0R && dcrn < MAL0_TXCTP0R + mal->txcnum) { + mal->txctpr[dcrn - MAL0_TXCTP0R] = val; + } + if (dcrn >= MAL0_RXCTP0R && dcrn < MAL0_RXCTP0R + mal->rxcnum) { + mal->rxctpr[dcrn - MAL0_RXCTP0R] = val; + } + if (dcrn >= MAL0_RCBS0 && dcrn < MAL0_RCBS0 + mal->rxcnum) { + mal->rcbs[dcrn - MAL0_RCBS0] = val & 0x000000FF; + } +} + +void ppc4xx_mal_init(CPUPPCState *env, uint8_t txcnum, uint8_t rxcnum, + qemu_irq irqs[4]) +{ + ppc4xx_mal_t *mal; + int i; + + assert(txcnum <= 32 && rxcnum <= 32); + mal = g_malloc0(sizeof(*mal)); + mal->txcnum = txcnum; + mal->rxcnum = rxcnum; + mal->txctpr = g_new0(uint32_t, txcnum); + mal->rxctpr = g_new0(uint32_t, rxcnum); + mal->rcbs = g_new0(uint32_t, rxcnum); + for (i = 0; i < 4; i++) { + mal->irqs[i] = irqs[i]; + } + qemu_register_reset(&ppc4xx_mal_reset, mal); + ppc_dcr_register(env, MAL0_CFG, + mal, &dcr_read_mal, &dcr_write_mal); + ppc_dcr_register(env, MAL0_ESR, + mal, &dcr_read_mal, &dcr_write_mal); + ppc_dcr_register(env, MAL0_IER, + mal, &dcr_read_mal, &dcr_write_mal); + ppc_dcr_register(env, MAL0_TXCASR, + mal, &dcr_read_mal, &dcr_write_mal); + ppc_dcr_register(env, MAL0_TXCARR, + mal, &dcr_read_mal, &dcr_write_mal); + ppc_dcr_register(env, MAL0_TXEOBISR, + mal, &dcr_read_mal, &dcr_write_mal); + ppc_dcr_register(env, MAL0_TXDEIR, + mal, &dcr_read_mal, &dcr_write_mal); + ppc_dcr_register(env, MAL0_RXCASR, + mal, &dcr_read_mal, &dcr_write_mal); + ppc_dcr_register(env, MAL0_RXCARR, + mal, &dcr_read_mal, &dcr_write_mal); + ppc_dcr_register(env, MAL0_RXEOBISR, + mal, &dcr_read_mal, &dcr_write_mal); + ppc_dcr_register(env, MAL0_RXDEIR, + mal, &dcr_read_mal, &dcr_write_mal); + for (i = 0; i < txcnum; i++) { + ppc_dcr_register(env, MAL0_TXCTP0R + i, + mal, &dcr_read_mal, &dcr_write_mal); + } + for (i = 0; i < rxcnum; i++) { + ppc_dcr_register(env, MAL0_RXCTP0R + i, + mal, &dcr_read_mal, &dcr_write_mal); + } + for (i = 0; i < rxcnum; i++) { + ppc_dcr_register(env, MAL0_RCBS0 + i, + mal, &dcr_read_mal, &dcr_write_mal); + } +} diff --git a/hw/ppc/ppc4xx_pci.c b/hw/ppc/ppc4xx_pci.c new file mode 100644 index 000000000..304a29349 --- /dev/null +++ b/hw/ppc/ppc4xx_pci.c @@ -0,0 +1,389 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard + */ + +/* This file implements emulation of the 32-bit PCI controller found in some + * 4xx SoCs, such as the 440EP. */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/ppc/ppc.h" +#include "hw/ppc/ppc4xx.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "sysemu/reset.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "trace.h" +#include "qom/object.h" + +struct PCIMasterMap { + uint32_t la; + uint32_t ma; + uint32_t pcila; + uint32_t pciha; +}; + +struct PCITargetMap { + uint32_t ms; + uint32_t la; +}; + +OBJECT_DECLARE_SIMPLE_TYPE(PPC4xxPCIState, PPC4xx_PCI_HOST_BRIDGE) + +#define PPC4xx_PCI_NR_PMMS 3 +#define PPC4xx_PCI_NR_PTMS 2 + +#define PPC4xx_PCI_NUM_DEVS 5 + +struct PPC4xxPCIState { + PCIHostState parent_obj; + + struct PCIMasterMap pmm[PPC4xx_PCI_NR_PMMS]; + struct PCITargetMap ptm[PPC4xx_PCI_NR_PTMS]; + qemu_irq irq[PPC4xx_PCI_NUM_DEVS]; + + MemoryRegion container; + MemoryRegion iomem; +}; + +#define PCIC0_CFGADDR 0x0 +#define PCIC0_CFGDATA 0x4 + +/* PLB Memory Map (PMM) registers specify which PLB addresses are translated to + * PCI accesses. */ +#define PCIL0_PMM0LA 0x0 +#define PCIL0_PMM0MA 0x4 +#define PCIL0_PMM0PCILA 0x8 +#define PCIL0_PMM0PCIHA 0xc +#define PCIL0_PMM1LA 0x10 +#define PCIL0_PMM1MA 0x14 +#define PCIL0_PMM1PCILA 0x18 +#define PCIL0_PMM1PCIHA 0x1c +#define PCIL0_PMM2LA 0x20 +#define PCIL0_PMM2MA 0x24 +#define PCIL0_PMM2PCILA 0x28 +#define PCIL0_PMM2PCIHA 0x2c + +/* PCI Target Map (PTM) registers specify which PCI addresses are translated to + * PLB accesses. */ +#define PCIL0_PTM1MS 0x30 +#define PCIL0_PTM1LA 0x34 +#define PCIL0_PTM2MS 0x38 +#define PCIL0_PTM2LA 0x3c +#define PCI_REG_BASE 0x800000 +#define PCI_REG_SIZE 0x40 + +#define PCI_ALL_SIZE (PCI_REG_BASE + PCI_REG_SIZE) + +static void ppc4xx_pci_reg_write4(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + struct PPC4xxPCIState *pci = opaque; + + /* We ignore all target attempts at PCI configuration, effectively + * assuming a bidirectional 1:1 mapping of PLB and PCI space. */ + + switch (offset) { + case PCIL0_PMM0LA: + pci->pmm[0].la = value; + break; + case PCIL0_PMM0MA: + pci->pmm[0].ma = value; + break; + case PCIL0_PMM0PCIHA: + pci->pmm[0].pciha = value; + break; + case PCIL0_PMM0PCILA: + pci->pmm[0].pcila = value; + break; + + case PCIL0_PMM1LA: + pci->pmm[1].la = value; + break; + case PCIL0_PMM1MA: + pci->pmm[1].ma = value; + break; + case PCIL0_PMM1PCIHA: + pci->pmm[1].pciha = value; + break; + case PCIL0_PMM1PCILA: + pci->pmm[1].pcila = value; + break; + + case PCIL0_PMM2LA: + pci->pmm[2].la = value; + break; + case PCIL0_PMM2MA: + pci->pmm[2].ma = value; + break; + case PCIL0_PMM2PCIHA: + pci->pmm[2].pciha = value; + break; + case PCIL0_PMM2PCILA: + pci->pmm[2].pcila = value; + break; + + case PCIL0_PTM1MS: + pci->ptm[0].ms = value; + break; + case PCIL0_PTM1LA: + pci->ptm[0].la = value; + break; + case PCIL0_PTM2MS: + pci->ptm[1].ms = value; + break; + case PCIL0_PTM2LA: + pci->ptm[1].la = value; + break; + + default: + printf("%s: unhandled PCI internal register 0x%lx\n", __func__, + (unsigned long)offset); + break; + } +} + +static uint64_t ppc4xx_pci_reg_read4(void *opaque, hwaddr offset, + unsigned size) +{ + struct PPC4xxPCIState *pci = opaque; + uint32_t value; + + switch (offset) { + case PCIL0_PMM0LA: + value = pci->pmm[0].la; + break; + case PCIL0_PMM0MA: + value = pci->pmm[0].ma; + break; + case PCIL0_PMM0PCIHA: + value = pci->pmm[0].pciha; + break; + case PCIL0_PMM0PCILA: + value = pci->pmm[0].pcila; + break; + + case PCIL0_PMM1LA: + value = pci->pmm[1].la; + break; + case PCIL0_PMM1MA: + value = pci->pmm[1].ma; + break; + case PCIL0_PMM1PCIHA: + value = pci->pmm[1].pciha; + break; + case PCIL0_PMM1PCILA: + value = pci->pmm[1].pcila; + break; + + case PCIL0_PMM2LA: + value = pci->pmm[2].la; + break; + case PCIL0_PMM2MA: + value = pci->pmm[2].ma; + break; + case PCIL0_PMM2PCIHA: + value = pci->pmm[2].pciha; + break; + case PCIL0_PMM2PCILA: + value = pci->pmm[2].pcila; + break; + + case PCIL0_PTM1MS: + value = pci->ptm[0].ms; + break; + case PCIL0_PTM1LA: + value = pci->ptm[0].la; + break; + case PCIL0_PTM2MS: + value = pci->ptm[1].ms; + break; + case PCIL0_PTM2LA: + value = pci->ptm[1].la; + break; + + default: + printf("%s: invalid PCI internal register 0x%lx\n", __func__, + (unsigned long)offset); + value = 0; + } + + return value; +} + +static const MemoryRegionOps pci_reg_ops = { + .read = ppc4xx_pci_reg_read4, + .write = ppc4xx_pci_reg_write4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ppc4xx_pci_reset(void *opaque) +{ + struct PPC4xxPCIState *pci = opaque; + + memset(pci->pmm, 0, sizeof(pci->pmm)); + memset(pci->ptm, 0, sizeof(pci->ptm)); +} + +/* On Bamboo, all pins from each slot are tied to a single board IRQ. This + * may need further refactoring for other boards. */ +static int ppc4xx_pci_map_irq(PCIDevice *pci_dev, int irq_num) +{ + int slot = PCI_SLOT(pci_dev->devfn); + + trace_ppc4xx_pci_map_irq(pci_dev->devfn, irq_num, slot); + + return slot > 0 ? slot - 1 : PPC4xx_PCI_NUM_DEVS - 1; +} + +static void ppc4xx_pci_set_irq(void *opaque, int irq_num, int level) +{ + qemu_irq *pci_irqs = opaque; + + trace_ppc4xx_pci_set_irq(irq_num); + assert(irq_num >= 0 && irq_num < PPC4xx_PCI_NUM_DEVS); + qemu_set_irq(pci_irqs[irq_num], level); +} + +static const VMStateDescription vmstate_pci_master_map = { + .name = "pci_master_map", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(la, struct PCIMasterMap), + VMSTATE_UINT32(ma, struct PCIMasterMap), + VMSTATE_UINT32(pcila, struct PCIMasterMap), + VMSTATE_UINT32(pciha, struct PCIMasterMap), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pci_target_map = { + .name = "pci_target_map", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ms, struct PCITargetMap), + VMSTATE_UINT32(la, struct PCITargetMap), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ppc4xx_pci = { + .name = "ppc4xx_pci", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(pmm, PPC4xxPCIState, PPC4xx_PCI_NR_PMMS, 1, + vmstate_pci_master_map, + struct PCIMasterMap), + VMSTATE_STRUCT_ARRAY(ptm, PPC4xxPCIState, PPC4xx_PCI_NR_PTMS, 1, + vmstate_pci_target_map, + struct PCITargetMap), + VMSTATE_END_OF_LIST() + } +}; + +/* XXX Interrupt acknowledge cycles not supported. */ +static void ppc4xx_pcihost_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + PPC4xxPCIState *s; + PCIHostState *h; + PCIBus *b; + int i; + + h = PCI_HOST_BRIDGE(dev); + s = PPC4xx_PCI_HOST_BRIDGE(dev); + + for (i = 0; i < ARRAY_SIZE(s->irq); i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } + + b = pci_register_root_bus(dev, NULL, ppc4xx_pci_set_irq, + ppc4xx_pci_map_irq, s->irq, get_system_memory(), + get_system_io(), 0, ARRAY_SIZE(s->irq), + TYPE_PCI_BUS); + h->bus = b; + + pci_create_simple(b, 0, "ppc4xx-host-bridge"); + + /* XXX split into 2 memory regions, one for config space, one for regs */ + memory_region_init(&s->container, OBJECT(s), "pci-container", PCI_ALL_SIZE); + memory_region_init_io(&h->conf_mem, OBJECT(s), &pci_host_conf_le_ops, h, + "pci-conf-idx", 4); + memory_region_init_io(&h->data_mem, OBJECT(s), &pci_host_data_le_ops, h, + "pci-conf-data", 4); + memory_region_init_io(&s->iomem, OBJECT(s), &pci_reg_ops, s, + "pci.reg", PCI_REG_SIZE); + memory_region_add_subregion(&s->container, PCIC0_CFGADDR, &h->conf_mem); + memory_region_add_subregion(&s->container, PCIC0_CFGDATA, &h->data_mem); + memory_region_add_subregion(&s->container, PCI_REG_BASE, &s->iomem); + sysbus_init_mmio(sbd, &s->container); + qemu_register_reset(ppc4xx_pci_reset, s); +} + +static void ppc4xx_host_bridge_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Host bridge"; + k->vendor_id = PCI_VENDOR_ID_IBM; + k->device_id = PCI_DEVICE_ID_IBM_440GX; + k->class_id = PCI_CLASS_BRIDGE_OTHER; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; +} + +static const TypeInfo ppc4xx_host_bridge_info = { + .name = "ppc4xx-host-bridge", + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIDevice), + .class_init = ppc4xx_host_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void ppc4xx_pcihost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = ppc4xx_pcihost_realize; + dc->vmsd = &vmstate_ppc4xx_pci; +} + +static const TypeInfo ppc4xx_pcihost_info = { + .name = TYPE_PPC4xx_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(PPC4xxPCIState), + .class_init = ppc4xx_pcihost_class_init, +}; + +static void ppc4xx_pci_register_types(void) +{ + type_register_static(&ppc4xx_pcihost_info); + type_register_static(&ppc4xx_host_bridge_info); +} + +type_init(ppc4xx_pci_register_types) diff --git a/hw/ppc/ppc_booke.c b/hw/ppc/ppc_booke.c new file mode 100644 index 000000000..10b643861 --- /dev/null +++ b/hw/ppc/ppc_booke.c @@ -0,0 +1,369 @@ +/* + * QEMU PowerPC Booke hardware System Emulator + * + * Copyright (c) 2011 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/ppc/ppc.h" +#include "qemu/timer.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "hw/loader.h" +#include "kvm_ppc.h" + + +/* Timer Control Register */ + +#define TCR_WP_SHIFT 30 /* Watchdog Timer Period */ +#define TCR_WP_MASK (0x3U << TCR_WP_SHIFT) +#define TCR_WRC_SHIFT 28 /* Watchdog Timer Reset Control */ +#define TCR_WRC_MASK (0x3U << TCR_WRC_SHIFT) +#define TCR_WIE (1U << 27) /* Watchdog Timer Interrupt Enable */ +#define TCR_DIE (1U << 26) /* Decrementer Interrupt Enable */ +#define TCR_FP_SHIFT 24 /* Fixed-Interval Timer Period */ +#define TCR_FP_MASK (0x3U << TCR_FP_SHIFT) +#define TCR_FIE (1U << 23) /* Fixed-Interval Timer Interrupt Enable */ +#define TCR_ARE (1U << 22) /* Auto-Reload Enable */ + +/* Timer Control Register (e500 specific fields) */ + +#define TCR_E500_FPEXT_SHIFT 13 /* Fixed-Interval Timer Period Extension */ +#define TCR_E500_FPEXT_MASK (0xf << TCR_E500_FPEXT_SHIFT) +#define TCR_E500_WPEXT_SHIFT 17 /* Watchdog Timer Period Extension */ +#define TCR_E500_WPEXT_MASK (0xf << TCR_E500_WPEXT_SHIFT) + +/* Timer Status Register */ + +#define TSR_FIS (1U << 26) /* Fixed-Interval Timer Interrupt Status */ +#define TSR_DIS (1U << 27) /* Decrementer Interrupt Status */ +#define TSR_WRS_SHIFT 28 /* Watchdog Timer Reset Status */ +#define TSR_WRS_MASK (0x3U << TSR_WRS_SHIFT) +#define TSR_WIS (1U << 30) /* Watchdog Timer Interrupt Status */ +#define TSR_ENW (1U << 31) /* Enable Next Watchdog Timer */ + +typedef struct booke_timer_t booke_timer_t; +struct booke_timer_t { + + uint64_t fit_next; + QEMUTimer *fit_timer; + + uint64_t wdt_next; + QEMUTimer *wdt_timer; + + uint32_t flags; +}; + +static void booke_update_irq(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + ppc_set_irq(cpu, PPC_INTERRUPT_DECR, + (env->spr[SPR_BOOKE_TSR] & TSR_DIS + && env->spr[SPR_BOOKE_TCR] & TCR_DIE)); + + ppc_set_irq(cpu, PPC_INTERRUPT_WDT, + (env->spr[SPR_BOOKE_TSR] & TSR_WIS + && env->spr[SPR_BOOKE_TCR] & TCR_WIE)); + + ppc_set_irq(cpu, PPC_INTERRUPT_FIT, + (env->spr[SPR_BOOKE_TSR] & TSR_FIS + && env->spr[SPR_BOOKE_TCR] & TCR_FIE)); +} + +/* Return the location of the bit of time base at which the FIT will raise an + interrupt */ +static uint8_t booke_get_fit_target(CPUPPCState *env, ppc_tb_t *tb_env) +{ + uint8_t fp = (env->spr[SPR_BOOKE_TCR] & TCR_FP_MASK) >> TCR_FP_SHIFT; + + if (tb_env->flags & PPC_TIMER_E500) { + /* e500 Fixed-interval timer period extension */ + uint32_t fpext = (env->spr[SPR_BOOKE_TCR] & TCR_E500_FPEXT_MASK) + >> TCR_E500_FPEXT_SHIFT; + fp = 63 - (fp | fpext << 2); + } else { + fp = env->fit_period[fp]; + } + + return fp; +} + +/* Return the location of the bit of time base at which the WDT will raise an + interrupt */ +static uint8_t booke_get_wdt_target(CPUPPCState *env, ppc_tb_t *tb_env) +{ + uint8_t wp = (env->spr[SPR_BOOKE_TCR] & TCR_WP_MASK) >> TCR_WP_SHIFT; + + if (tb_env->flags & PPC_TIMER_E500) { + /* e500 Watchdog timer period extension */ + uint32_t wpext = (env->spr[SPR_BOOKE_TCR] & TCR_E500_WPEXT_MASK) + >> TCR_E500_WPEXT_SHIFT; + wp = 63 - (wp | wpext << 2); + } else { + wp = env->wdt_period[wp]; + } + + return wp; +} + +static void booke_update_fixed_timer(CPUPPCState *env, + uint8_t target_bit, + uint64_t *next, + QEMUTimer *timer, + int tsr_bit) +{ + ppc_tb_t *tb_env = env->tb_env; + uint64_t delta_tick, ticks = 0; + uint64_t tb; + uint64_t period; + uint64_t now; + + if (!(env->spr[SPR_BOOKE_TSR] & tsr_bit)) { + /* + * Don't arm the timer again when the guest has the current + * interrupt still pending. Wait for it to ack it. + */ + return; + } + + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + tb = cpu_ppc_get_tb(tb_env, now, tb_env->tb_offset); + period = 1ULL << target_bit; + delta_tick = period - (tb & (period - 1)); + + /* the timer triggers only when the selected bit toggles from 0 to 1 */ + if (tb & period) { + ticks = period; + } + + if (ticks + delta_tick < ticks) { + /* Overflow, so assume the biggest number we can express. */ + ticks = UINT64_MAX; + } else { + ticks += delta_tick; + } + + *next = now + muldiv64(ticks, NANOSECONDS_PER_SECOND, tb_env->tb_freq); + if ((*next < now) || (*next > INT64_MAX)) { + /* Overflow, so assume the biggest number the qemu timer supports. */ + *next = INT64_MAX; + } + + /* XXX: If expire time is now. We can't run the callback because we don't + * have access to it. So we just set the timer one nanosecond later. + */ + + if (*next == now) { + (*next)++; + } else { + /* + * There's no point to fake any granularity that's more fine grained + * than milliseconds. Anything beyond that just overloads the system. + */ + *next = MAX(*next, now + SCALE_MS); + } + + /* Fire the next timer */ + timer_mod(timer, *next); +} + +static void booke_decr_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + env->spr[SPR_BOOKE_TSR] |= TSR_DIS; + booke_update_irq(cpu); + + if (env->spr[SPR_BOOKE_TCR] & TCR_ARE) { + /* Do not reload 0, it is already there. It would just trigger + * the timer again and lead to infinite loop */ + if (env->spr[SPR_BOOKE_DECAR] != 0) { + /* Auto Reload */ + cpu_ppc_store_decr(env, env->spr[SPR_BOOKE_DECAR]); + } + } +} + +static void booke_fit_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + ppc_tb_t *tb_env; + booke_timer_t *booke_timer; + + tb_env = env->tb_env; + booke_timer = tb_env->opaque; + env->spr[SPR_BOOKE_TSR] |= TSR_FIS; + + booke_update_irq(cpu); + + booke_update_fixed_timer(env, + booke_get_fit_target(env, tb_env), + &booke_timer->fit_next, + booke_timer->fit_timer, + TSR_FIS); +} + +static void booke_wdt_cb(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + ppc_tb_t *tb_env; + booke_timer_t *booke_timer; + + tb_env = env->tb_env; + booke_timer = tb_env->opaque; + + /* TODO: There's lots of complicated stuff to do here */ + + booke_update_irq(cpu); + + booke_update_fixed_timer(env, + booke_get_wdt_target(env, tb_env), + &booke_timer->wdt_next, + booke_timer->wdt_timer, + TSR_WIS); +} + +void store_booke_tsr(CPUPPCState *env, target_ulong val) +{ + PowerPCCPU *cpu = env_archcpu(env); + ppc_tb_t *tb_env = env->tb_env; + booke_timer_t *booke_timer = tb_env->opaque; + + env->spr[SPR_BOOKE_TSR] &= ~val; + kvmppc_clear_tsr_bits(cpu, val); + + if (val & TSR_FIS) { + booke_update_fixed_timer(env, + booke_get_fit_target(env, tb_env), + &booke_timer->fit_next, + booke_timer->fit_timer, + TSR_FIS); + } + + if (val & TSR_WIS) { + booke_update_fixed_timer(env, + booke_get_wdt_target(env, tb_env), + &booke_timer->wdt_next, + booke_timer->wdt_timer, + TSR_WIS); + } + + booke_update_irq(cpu); +} + +void store_booke_tcr(CPUPPCState *env, target_ulong val) +{ + PowerPCCPU *cpu = env_archcpu(env); + ppc_tb_t *tb_env = env->tb_env; + booke_timer_t *booke_timer = tb_env->opaque; + + env->spr[SPR_BOOKE_TCR] = val; + kvmppc_set_tcr(cpu); + + booke_update_irq(cpu); + + booke_update_fixed_timer(env, + booke_get_fit_target(env, tb_env), + &booke_timer->fit_next, + booke_timer->fit_timer, + TSR_FIS); + + booke_update_fixed_timer(env, + booke_get_wdt_target(env, tb_env), + &booke_timer->wdt_next, + booke_timer->wdt_timer, + TSR_WIS); +} + +static void ppc_booke_timer_reset_handle(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + store_booke_tcr(env, 0); + store_booke_tsr(env, -1); +} + +/* + * This function will be called whenever the CPU state changes. + * CPU states are defined "typedef enum RunState". + * Regarding timer, When CPU state changes to running after debug halt + * or similar cases which takes time then in between final watchdog + * expiry happenes. This will cause exit to QEMU and configured watchdog + * action will be taken. To avoid this we always clear the watchdog state when + * state changes to running. + */ +static void cpu_state_change_handler(void *opaque, bool running, RunState state) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + + if (!running) { + return; + } + + /* + * Clear watchdog interrupt condition by clearing TSR. + */ + store_booke_tsr(env, TSR_ENW | TSR_WIS | TSR_WRS_MASK); +} + +void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags) +{ + ppc_tb_t *tb_env; + booke_timer_t *booke_timer; + int ret = 0; + + tb_env = g_malloc0(sizeof(ppc_tb_t)); + booke_timer = g_malloc0(sizeof(booke_timer_t)); + + cpu->env.tb_env = tb_env; + tb_env->flags = flags | PPC_TIMER_BOOKE | PPC_DECR_ZERO_TRIGGERED; + + tb_env->tb_freq = freq; + tb_env->decr_freq = freq; + tb_env->opaque = booke_timer; + tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_decr_cb, cpu); + + booke_timer->fit_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_fit_cb, cpu); + booke_timer->wdt_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_wdt_cb, cpu); + + ret = kvmppc_booke_watchdog_enable(cpu); + + if (ret) { + /* TODO: Start the QEMU emulated watchdog if not running on KVM. + * Also start the QEMU emulated watchdog if KVM does not support + * emulated watchdog or somehow it is not enabled (supported but + * not enabled is though some bug and requires debugging :)). + */ + } + + qemu_add_vm_change_state_handler(cpu_state_change_handler, cpu); + + qemu_register_reset(ppc_booke_timer_reset_handle, cpu); +} diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c new file mode 100644 index 000000000..d57b19979 --- /dev/null +++ b/hw/ppc/ppce500_spin.c @@ -0,0 +1,209 @@ +/* + * QEMU PowerPC e500v2 ePAPR spinning code + * + * Copyright (C) 2011 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Alexander Graf, + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + * This code is not really a device, but models an interface that usually + * firmware takes care of. It's used when QEMU plays the role of firmware. + * + * Specification: + * + * https://www.power.org/resources/downloads/Power_ePAPR_APPROVED_v1.1.pdf + * + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "hw/hw.h" +#include "hw/sysbus.h" +#include "sysemu/hw_accel.h" +#include "e500.h" +#include "qom/object.h" + +#define MAX_CPUS 32 + +typedef struct spin_info { + uint64_t addr; + uint64_t r3; + uint32_t resv; + uint32_t pir; + uint64_t reserved; +} QEMU_PACKED SpinInfo; + +#define TYPE_E500_SPIN "e500-spin" +OBJECT_DECLARE_SIMPLE_TYPE(SpinState, E500_SPIN) + +struct SpinState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + SpinInfo spin[MAX_CPUS]; +}; + +static void spin_reset(DeviceState *dev) +{ + SpinState *s = E500_SPIN(dev); + int i; + + for (i = 0; i < MAX_CPUS; i++) { + SpinInfo *info = &s->spin[i]; + + stl_p(&info->pir, i); + stq_p(&info->r3, i); + stq_p(&info->addr, 1); + } +} + +static void mmubooke_create_initial_mapping(CPUPPCState *env, + target_ulong va, + hwaddr pa, + hwaddr len) +{ + ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 1); + hwaddr size; + + size = (booke206_page_size_to_tlb(len) << MAS1_TSIZE_SHIFT); + tlb->mas1 = MAS1_VALID | size; + tlb->mas2 = (va & TARGET_PAGE_MASK) | MAS2_M; + tlb->mas7_3 = pa & TARGET_PAGE_MASK; + tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; + env->tlb_dirty = true; +} + +static void spin_kick(CPUState *cs, run_on_cpu_data data) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + SpinInfo *curspin = data.host_ptr; + hwaddr map_size = 64 * MiB; + hwaddr map_start; + + cpu_synchronize_state(cs); + stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]); + env->nip = ldq_p(&curspin->addr) & (map_size - 1); + env->gpr[3] = ldq_p(&curspin->r3); + env->gpr[4] = 0; + env->gpr[5] = 0; + env->gpr[6] = 0; + env->gpr[7] = map_size; + env->gpr[8] = 0; + env->gpr[9] = 0; + + map_start = ldq_p(&curspin->addr) & ~(map_size - 1); + mmubooke_create_initial_mapping(env, 0, map_start, map_size); + + cs->halted = 0; + cs->exception_index = -1; + cs->stopped = false; + qemu_cpu_kick(cs); +} + +static void spin_write(void *opaque, hwaddr addr, uint64_t value, + unsigned len) +{ + SpinState *s = opaque; + int env_idx = addr / sizeof(SpinInfo); + CPUState *cpu; + SpinInfo *curspin = &s->spin[env_idx]; + uint8_t *curspin_p = (uint8_t*)curspin; + + cpu = qemu_get_cpu(env_idx); + if (cpu == NULL) { + /* Unknown CPU */ + return; + } + + if (cpu->cpu_index == 0) { + /* primary CPU doesn't spin */ + return; + } + + curspin_p = &curspin_p[addr % sizeof(SpinInfo)]; + switch (len) { + case 1: + stb_p(curspin_p, value); + break; + case 2: + stw_p(curspin_p, value); + break; + case 4: + stl_p(curspin_p, value); + break; + } + + if (!(ldq_p(&curspin->addr) & 1)) { + /* run CPU */ + run_on_cpu(cpu, spin_kick, RUN_ON_CPU_HOST_PTR(curspin)); + } +} + +static uint64_t spin_read(void *opaque, hwaddr addr, unsigned len) +{ + SpinState *s = opaque; + uint8_t *spin_p = &((uint8_t*)s->spin)[addr]; + + switch (len) { + case 1: + return ldub_p(spin_p); + case 2: + return lduw_p(spin_p); + case 4: + return ldl_p(spin_p); + default: + hw_error("ppce500: unexpected %s with len = %u", __func__, len); + } +} + +static const MemoryRegionOps spin_rw_ops = { + .read = spin_read, + .write = spin_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void ppce500_spin_initfn(Object *obj) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + SpinState *s = E500_SPIN(dev); + + memory_region_init_io(&s->iomem, obj, &spin_rw_ops, s, + "e500 spin pv device", sizeof(SpinInfo) * MAX_CPUS); + sysbus_init_mmio(dev, &s->iomem); +} + +static void ppce500_spin_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = spin_reset; +} + +static const TypeInfo ppce500_spin_info = { + .name = TYPE_E500_SPIN, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SpinState), + .instance_init = ppce500_spin_initfn, + .class_init = ppce500_spin_class_init, +}; + +static void ppce500_spin_register_types(void) +{ + type_register_static(&ppce500_spin_info); +} + +type_init(ppce500_spin_register_types) diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c new file mode 100644 index 000000000..25a2e86b4 --- /dev/null +++ b/hw/ppc/prep.c @@ -0,0 +1,440 @@ +/* + * QEMU PPC PREP hardware System Emulator + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * Copyright (c) 2017 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/rtc/m48t59.h" +#include "hw/char/serial.h" +#include "hw/block/fdc.h" +#include "net/net.h" +#include "hw/isa/isa.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" +#include "hw/ppc/ppc.h" +#include "hw/boards.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "hw/loader.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/isa/pc87312.h" +#include "hw/qdev-properties.h" +#include "sysemu/kvm.h" +#include "sysemu/reset.h" +#include "trace.h" +#include "elf.h" +#include "qemu/units.h" +#include "kvm_ppc.h" + +/* SMP is not enabled, for now */ +#define MAX_CPUS 1 + +#define MAX_IDE_BUS 2 + +#define CFG_ADDR 0xf0000510 + +#define KERNEL_LOAD_ADDR 0x01000000 +#define INITRD_LOAD_ADDR 0x01800000 + +#define NVRAM_SIZE 0x2000 + +static void fw_cfg_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); +} + +static void ppc_prep_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + cpu_reset(CPU(cpu)); +} + + +/*****************************************************************************/ +/* NVRAM helpers */ +static inline uint32_t nvram_read(Nvram *nvram, uint32_t addr) +{ + NvramClass *k = NVRAM_GET_CLASS(nvram); + return (k->read)(nvram, addr); +} + +static inline void nvram_write(Nvram *nvram, uint32_t addr, uint32_t val) +{ + NvramClass *k = NVRAM_GET_CLASS(nvram); + (k->write)(nvram, addr, val); +} + +static void NVRAM_set_byte(Nvram *nvram, uint32_t addr, uint8_t value) +{ + nvram_write(nvram, addr, value); +} + +static uint8_t NVRAM_get_byte(Nvram *nvram, uint32_t addr) +{ + return nvram_read(nvram, addr); +} + +static void NVRAM_set_word(Nvram *nvram, uint32_t addr, uint16_t value) +{ + nvram_write(nvram, addr, value >> 8); + nvram_write(nvram, addr + 1, value & 0xFF); +} + +static uint16_t NVRAM_get_word(Nvram *nvram, uint32_t addr) +{ + uint16_t tmp; + + tmp = nvram_read(nvram, addr) << 8; + tmp |= nvram_read(nvram, addr + 1); + + return tmp; +} + +static void NVRAM_set_lword(Nvram *nvram, uint32_t addr, uint32_t value) +{ + nvram_write(nvram, addr, value >> 24); + nvram_write(nvram, addr + 1, (value >> 16) & 0xFF); + nvram_write(nvram, addr + 2, (value >> 8) & 0xFF); + nvram_write(nvram, addr + 3, value & 0xFF); +} + +static void NVRAM_set_string(Nvram *nvram, uint32_t addr, const char *str, + uint32_t max) +{ + int i; + + for (i = 0; i < max && str[i] != '\0'; i++) { + nvram_write(nvram, addr + i, str[i]); + } + nvram_write(nvram, addr + i, str[i]); + nvram_write(nvram, addr + max - 1, '\0'); +} + +static uint16_t NVRAM_crc_update (uint16_t prev, uint16_t value) +{ + uint16_t tmp; + uint16_t pd, pd1, pd2; + + tmp = prev >> 8; + pd = prev ^ value; + pd1 = pd & 0x000F; + pd2 = ((pd >> 4) & 0x000F) ^ pd1; + tmp ^= (pd1 << 3) | (pd1 << 8); + tmp ^= pd2 | (pd2 << 7) | (pd2 << 12); + + return tmp; +} + +static uint16_t NVRAM_compute_crc (Nvram *nvram, uint32_t start, uint32_t count) +{ + uint32_t i; + uint16_t crc = 0xFFFF; + int odd; + + odd = count & 1; + count &= ~1; + for (i = 0; i != count; i++) { + crc = NVRAM_crc_update(crc, NVRAM_get_word(nvram, start + i)); + } + if (odd) { + crc = NVRAM_crc_update(crc, NVRAM_get_byte(nvram, start + i) << 8); + } + + return crc; +} + +#define CMDLINE_ADDR 0x017ff000 + +static int PPC_NVRAM_set_params (Nvram *nvram, uint16_t NVRAM_size, + const char *arch, + uint32_t RAM_size, int boot_device, + uint32_t kernel_image, uint32_t kernel_size, + const char *cmdline, + uint32_t initrd_image, uint32_t initrd_size, + uint32_t NVRAM_image, + int width, int height, int depth) +{ + uint16_t crc; + + /* Set parameters for Open Hack'Ware BIOS */ + NVRAM_set_string(nvram, 0x00, "QEMU_BIOS", 16); + NVRAM_set_lword(nvram, 0x10, 0x00000002); /* structure v2 */ + NVRAM_set_word(nvram, 0x14, NVRAM_size); + NVRAM_set_string(nvram, 0x20, arch, 16); + NVRAM_set_lword(nvram, 0x30, RAM_size); + NVRAM_set_byte(nvram, 0x34, boot_device); + NVRAM_set_lword(nvram, 0x38, kernel_image); + NVRAM_set_lword(nvram, 0x3C, kernel_size); + if (cmdline) { + /* XXX: put the cmdline in NVRAM too ? */ + pstrcpy_targphys("cmdline", CMDLINE_ADDR, RAM_size - CMDLINE_ADDR, + cmdline); + NVRAM_set_lword(nvram, 0x40, CMDLINE_ADDR); + NVRAM_set_lword(nvram, 0x44, strlen(cmdline)); + } else { + NVRAM_set_lword(nvram, 0x40, 0); + NVRAM_set_lword(nvram, 0x44, 0); + } + NVRAM_set_lword(nvram, 0x48, initrd_image); + NVRAM_set_lword(nvram, 0x4C, initrd_size); + NVRAM_set_lword(nvram, 0x50, NVRAM_image); + + NVRAM_set_word(nvram, 0x54, width); + NVRAM_set_word(nvram, 0x56, height); + NVRAM_set_word(nvram, 0x58, depth); + crc = NVRAM_compute_crc(nvram, 0x00, 0xF8); + NVRAM_set_word(nvram, 0xFC, crc); + + return 0; +} + +static int prep_set_cmos_checksum(DeviceState *dev, void *opaque) +{ + uint16_t checksum = *(uint16_t *)opaque; + ISADevice *rtc; + + if (object_dynamic_cast(OBJECT(dev), TYPE_MC146818_RTC)) { + rtc = ISA_DEVICE(dev); + rtc_set_memory(rtc, 0x2e, checksum & 0xff); + rtc_set_memory(rtc, 0x3e, checksum & 0xff); + rtc_set_memory(rtc, 0x2f, checksum >> 8); + rtc_set_memory(rtc, 0x3f, checksum >> 8); + + object_property_add_alias(qdev_get_machine(), "rtc-time", OBJECT(rtc), + "date"); + } + return 0; +} + +static void ibm_40p_init(MachineState *machine) +{ + const char *bios_name = machine->firmware ?: "openbios-ppc"; + CPUPPCState *env = NULL; + uint16_t cmos_checksum; + PowerPCCPU *cpu; + DeviceState *dev, *i82378_dev; + SysBusDevice *pcihost, *s; + Nvram *m48t59 = NULL; + PCIBus *pci_bus; + ISADevice *isa_dev; + ISABus *isa_bus; + void *fw_cfg; + int i; + uint32_t kernel_base = 0, initrd_base = 0; + long kernel_size = 0, initrd_size = 0; + char boot_device; + + /* init CPU */ + cpu = POWERPC_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) { + error_report("only 6xx bus is supported on this machine"); + exit(1); + } + + if (env->flags & POWERPC_FLAG_RTC_CLK) { + /* POWER / PowerPC 601 RTC clock frequency is 7.8125 MHz */ + cpu_ppc_tb_init(env, 7812500UL); + } else { + /* Set time-base frequency to 100 Mhz */ + cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL); + } + qemu_register_reset(ppc_prep_reset, cpu); + + /* PCI host */ + dev = qdev_new("raven-pcihost"); + qdev_prop_set_string(dev, "bios-name", bios_name); + qdev_prop_set_uint32(dev, "elf-machine", PPC_ELF_MACHINE); + pcihost = SYS_BUS_DEVICE(dev); + object_property_add_child(qdev_get_machine(), "raven", OBJECT(dev)); + sysbus_realize_and_unref(pcihost, &error_fatal); + pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci.0")); + if (!pci_bus) { + error_report("could not create PCI host controller"); + exit(1); + } + + /* PCI -> ISA bridge */ + i82378_dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(11, 0), "i82378")); + qdev_connect_gpio_out(i82378_dev, 0, + cpu->env.irq_inputs[PPC6xx_INPUT_INT]); + sysbus_connect_irq(pcihost, 0, qdev_get_gpio_in(i82378_dev, 15)); + isa_bus = ISA_BUS(qdev_get_child_bus(i82378_dev, "isa.0")); + + /* Memory controller */ + isa_dev = isa_new("rs6000-mc"); + dev = DEVICE(isa_dev); + qdev_prop_set_uint32(dev, "ram-size", machine->ram_size); + isa_realize_and_unref(isa_dev, isa_bus, &error_fatal); + + /* RTC */ + isa_dev = isa_new(TYPE_MC146818_RTC); + dev = DEVICE(isa_dev); + qdev_prop_set_int32(dev, "base_year", 1900); + isa_realize_and_unref(isa_dev, isa_bus, &error_fatal); + + /* initialize CMOS checksums */ + cmos_checksum = 0x6aa9; + qbus_walk_children(BUS(isa_bus), prep_set_cmos_checksum, NULL, NULL, NULL, + &cmos_checksum); + + /* add some more devices */ + if (defaults_enabled()) { + m48t59 = NVRAM(isa_create_simple(isa_bus, "isa-m48t59")); + + isa_dev = isa_new("cs4231a"); + dev = DEVICE(isa_dev); + qdev_prop_set_uint32(dev, "iobase", 0x830); + qdev_prop_set_uint32(dev, "irq", 10); + isa_realize_and_unref(isa_dev, isa_bus, &error_fatal); + + isa_dev = isa_new("pc87312"); + dev = DEVICE(isa_dev); + qdev_prop_set_uint32(dev, "config", 12); + isa_realize_and_unref(isa_dev, isa_bus, &error_fatal); + + isa_dev = isa_new("prep-systemio"); + dev = DEVICE(isa_dev); + qdev_prop_set_uint32(dev, "ibm-planar-id", 0xfc); + qdev_prop_set_uint32(dev, "equipment", 0xc0); + isa_realize_and_unref(isa_dev, isa_bus, &error_fatal); + + dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(1, 0), + "lsi53c810")); + lsi53c8xx_handle_legacy_cmdline(dev); + qdev_connect_gpio_out(dev, 0, qdev_get_gpio_in(i82378_dev, 13)); + + /* XXX: s3-trio at PCI_DEVFN(2, 0) */ + pci_vga_init(pci_bus); + + for (i = 0; i < nb_nics; i++) { + pci_nic_init_nofail(&nd_table[i], pci_bus, "pcnet", + i == 0 ? "3" : NULL); + } + } + + /* Prepare firmware configuration for OpenBIOS */ + dev = qdev_new(TYPE_FW_CFG_MEM); + fw_cfg = FW_CFG(dev); + qdev_prop_set_uint32(dev, "data_width", 1); + qdev_prop_set_bit(dev, "dma_enabled", false); + object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG, + OBJECT(fw_cfg)); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, CFG_ADDR); + sysbus_mmio_map(s, 1, CFG_ADDR + 2); + + if (machine->kernel_filename) { + /* load kernel */ + kernel_base = KERNEL_LOAD_ADDR; + kernel_size = load_image_targphys(machine->kernel_filename, + kernel_base, + machine->ram_size - kernel_base); + if (kernel_size < 0) { + error_report("could not load kernel '%s'", + machine->kernel_filename); + exit(1); + } + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); + /* load initrd */ + if (machine->initrd_filename) { + initrd_base = INITRD_LOAD_ADDR; + initrd_size = load_image_targphys(machine->initrd_filename, + initrd_base, + machine->ram_size - initrd_base); + if (initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + machine->initrd_filename); + exit(1); + } + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_base); + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); + } + if (machine->kernel_cmdline && *machine->kernel_cmdline) { + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, CMDLINE_ADDR); + pstrcpy_targphys("cmdline", CMDLINE_ADDR, TARGET_PAGE_SIZE, + machine->kernel_cmdline); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, + machine->kernel_cmdline); + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, + strlen(machine->kernel_cmdline) + 1); + } + boot_device = 'm'; + } else { + boot_device = machine->boot_order[0]; + } + + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)machine->smp.max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)machine->ram_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, ARCH_PREP); + + fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_WIDTH, graphic_width); + fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_HEIGHT, graphic_height); + fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_DEPTH, graphic_depth); + + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled()); + if (kvm_enabled()) { + uint8_t *hypercall; + + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, kvmppc_get_tbfreq()); + hypercall = g_malloc(16); + kvmppc_get_hypercall(env, hypercall, 16); + fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16); + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid()); + } else { + fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, NANOSECONDS_PER_SECOND); + } + fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, boot_device); + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); + + /* Prepare firmware configuration for Open Hack'Ware */ + if (m48t59) { + PPC_NVRAM_set_params(m48t59, NVRAM_SIZE, "PREP", machine->ram_size, + boot_device, + kernel_base, kernel_size, + machine->kernel_cmdline, + initrd_base, initrd_size, + /* XXX: need an option to load a NVRAM image */ + 0, + graphic_width, graphic_height, graphic_depth); + } +} + +static void ibm_40p_machine_init(MachineClass *mc) +{ + mc->desc = "IBM RS/6000 7020 (40p)", + mc->init = ibm_40p_init; + mc->max_cpus = 1; + mc->default_ram_size = 128 * MiB; + mc->block_default_type = IF_SCSI; + mc->default_boot_order = "c"; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("604"); + mc->default_display = "std"; +} + +DEFINE_MACHINE("40p", ibm_40p_machine_init) diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c new file mode 100644 index 000000000..b2bd78324 --- /dev/null +++ b/hw/ppc/prep_systemio.c @@ -0,0 +1,315 @@ +/* + * QEMU PReP System I/O emulation + * + * Copyright (c) 2017 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "exec/address-spaces.h" +#include "qom/object.h" +#include "qemu/error-report.h" /* for error_report() */ +#include "qemu/module.h" +#include "sysemu/runstate.h" +#include "cpu.h" +#include "trace.h" + +#define TYPE_PREP_SYSTEMIO "prep-systemio" +OBJECT_DECLARE_SIMPLE_TYPE(PrepSystemIoState, PREP_SYSTEMIO) + +/* Bit as defined in PowerPC Reference Plaform v1.1, sect. 6.1.5, p. 132 */ +#define PREP_BIT(n) (1 << (7 - (n))) + +struct PrepSystemIoState { + ISADevice parent_obj; + MemoryRegion ppc_parity_mem; + + qemu_irq non_contiguous_io_map_irq; + uint8_t sreset; /* 0x0092 */ + uint8_t equipment; /* 0x080c */ + uint8_t system_control; /* 0x081c */ + uint8_t iomap_type; /* 0x0850 */ + uint8_t ibm_planar_id; /* 0x0852 */ + qemu_irq softreset_irq; + PortioList portio; +}; + +/* PORT 0092 -- Special Port 92 (Read/Write) */ + +enum { + PORT0092_SOFTRESET = PREP_BIT(7), + PORT0092_LE_MODE = PREP_BIT(6), +}; + +static void prep_port0092_write(void *opaque, uint32_t addr, uint32_t val) +{ + PrepSystemIoState *s = opaque; + + trace_prep_systemio_write(addr, val); + + s->sreset = val & PORT0092_SOFTRESET; + qemu_set_irq(s->softreset_irq, s->sreset); + + if ((val & PORT0092_LE_MODE) != 0) { + /* XXX Not supported yet */ + error_report("little-endian mode not supported"); + vm_stop(RUN_STATE_PAUSED); + } else { + /* Nothing to do */ + } +} + +static uint32_t prep_port0092_read(void *opaque, uint32_t addr) +{ + PrepSystemIoState *s = opaque; + trace_prep_systemio_read(addr, s->sreset); + return s->sreset; +} + +/* PORT 0808 -- Hardfile Light Register (Write Only) */ + +enum { + PORT0808_HARDFILE_LIGHT_ON = PREP_BIT(7), +}; + +static void prep_port0808_write(void *opaque, uint32_t addr, uint32_t val) +{ + trace_prep_systemio_write(addr, val); +} + +/* PORT 0810 -- Password Protect 1 Register (Write Only) */ + +/* reset by port 0x4D in the SIO */ +static void prep_port0810_write(void *opaque, uint32_t addr, uint32_t val) +{ + trace_prep_systemio_write(addr, val); +} + +/* PORT 0812 -- Password Protect 2 Register (Write Only) */ + +/* reset by port 0x4D in the SIO */ +static void prep_port0812_write(void *opaque, uint32_t addr, uint32_t val) +{ + trace_prep_systemio_write(addr, val); +} + +/* PORT 0814 -- L2 Invalidate Register (Write Only) */ + +static void prep_port0814_write(void *opaque, uint32_t addr, uint32_t val) +{ + trace_prep_systemio_write(addr, val); +} + +/* PORT 0818 -- Reserved for Keylock (Read Only) */ + +enum { + PORT0818_KEYLOCK_SIGNAL_HIGH = PREP_BIT(7), +}; + +static uint32_t prep_port0818_read(void *opaque, uint32_t addr) +{ + uint32_t val = 0; + trace_prep_systemio_read(addr, val); + return val; +} + +/* PORT 080C -- Equipment */ + +enum { + PORT080C_SCSIFUSE = PREP_BIT(1), + PORT080C_L2_COPYBACK = PREP_BIT(4), + PORT080C_L2_256 = PREP_BIT(5), + PORT080C_UPGRADE_CPU = PREP_BIT(6), + PORT080C_L2 = PREP_BIT(7), +}; + +static uint32_t prep_port080c_read(void *opaque, uint32_t addr) +{ + PrepSystemIoState *s = opaque; + trace_prep_systemio_read(addr, s->equipment); + return s->equipment; +} + +/* PORT 081C -- System Control Register (Read/Write) */ + +enum { + PORT081C_FLOPPY_MOTOR_INHIBIT = PREP_BIT(3), + PORT081C_MASK_TEA = PREP_BIT(2), + PORT081C_L2_UPDATE_INHIBIT = PREP_BIT(1), + PORT081C_L2_CACHEMISS_INHIBIT = PREP_BIT(0), +}; + +static void prep_port081c_write(void *opaque, uint32_t addr, uint32_t val) +{ + static const uint8_t mask = PORT081C_FLOPPY_MOTOR_INHIBIT | + PORT081C_MASK_TEA | + PORT081C_L2_UPDATE_INHIBIT | + PORT081C_L2_CACHEMISS_INHIBIT; + PrepSystemIoState *s = opaque; + trace_prep_systemio_write(addr, val); + s->system_control = val & mask; +} + +static uint32_t prep_port081c_read(void *opaque, uint32_t addr) +{ + PrepSystemIoState *s = opaque; + trace_prep_systemio_read(addr, s->system_control); + return s->system_control; +} + +/* System Board Identification */ + +static uint32_t prep_port0852_read(void *opaque, uint32_t addr) +{ + PrepSystemIoState *s = opaque; + trace_prep_systemio_read(addr, s->ibm_planar_id); + return s->ibm_planar_id; +} + +/* PORT 0850 -- I/O Map Type Register (Read/Write) */ + +enum { + PORT0850_IOMAP_NONCONTIGUOUS = PREP_BIT(7), +}; + +static uint32_t prep_port0850_read(void *opaque, uint32_t addr) +{ + PrepSystemIoState *s = opaque; + trace_prep_systemio_read(addr, s->iomap_type); + return s->iomap_type; +} + +static void prep_port0850_write(void *opaque, uint32_t addr, uint32_t val) +{ + PrepSystemIoState *s = opaque; + + trace_prep_systemio_write(addr, val); + qemu_set_irq(s->non_contiguous_io_map_irq, + val & PORT0850_IOMAP_NONCONTIGUOUS); + s->iomap_type = val & PORT0850_IOMAP_NONCONTIGUOUS; +} + +static const MemoryRegionPortio ppc_io800_port_list[] = { + { 0x092, 1, 1, .read = prep_port0092_read, + .write = prep_port0092_write, }, + { 0x808, 1, 1, .write = prep_port0808_write, }, + { 0x80c, 1, 1, .read = prep_port080c_read, }, + { 0x810, 1, 1, .write = prep_port0810_write, }, + { 0x812, 1, 1, .write = prep_port0812_write, }, + { 0x814, 1, 1, .write = prep_port0814_write, }, + { 0x818, 1, 1, .read = prep_port0818_read }, + { 0x81c, 1, 1, .read = prep_port081c_read, + .write = prep_port081c_write, }, + { 0x850, 1, 1, .read = prep_port0850_read, + .write = prep_port0850_write, }, + { 0x852, 1, 1, .read = prep_port0852_read, }, + PORTIO_END_OF_LIST() +}; + +static uint64_t ppc_parity_error_readl(void *opaque, hwaddr addr, + unsigned int size) +{ + uint32_t val = 0; + trace_prep_systemio_read((unsigned int)addr, val); + return val; +} + +static void ppc_parity_error_writel(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__); +} + +static const MemoryRegionOps ppc_parity_error_ops = { + .read = ppc_parity_error_readl, + .write = ppc_parity_error_writel, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void prep_systemio_realize(DeviceState *dev, Error **errp) +{ + ISADevice *isa = ISA_DEVICE(dev); + PrepSystemIoState *s = PREP_SYSTEMIO(dev); + PowerPCCPU *cpu; + + qdev_init_gpio_out(dev, &s->non_contiguous_io_map_irq, 1); + s->iomap_type = PORT0850_IOMAP_NONCONTIGUOUS; + qemu_set_irq(s->non_contiguous_io_map_irq, + s->iomap_type & PORT0850_IOMAP_NONCONTIGUOUS); + cpu = POWERPC_CPU(first_cpu); + s->softreset_irq = cpu->env.irq_inputs[PPC6xx_INPUT_HRESET]; + + isa_register_portio_list(isa, &s->portio, 0x0, ppc_io800_port_list, s, + "systemio800"); + + memory_region_init_io(&s->ppc_parity_mem, OBJECT(dev), + &ppc_parity_error_ops, s, "ppc-parity", 0x4); + memory_region_add_subregion(get_system_memory(), 0xbfffeff0, + &s->ppc_parity_mem); +} + +static const VMStateDescription vmstate_prep_systemio = { + .name = "prep_systemio", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(sreset, PrepSystemIoState), + VMSTATE_UINT8(system_control, PrepSystemIoState), + VMSTATE_UINT8(iomap_type, PrepSystemIoState), + VMSTATE_END_OF_LIST() + }, +}; + +static Property prep_systemio_properties[] = { + DEFINE_PROP_UINT8("ibm-planar-id", PrepSystemIoState, ibm_planar_id, 0), + DEFINE_PROP_UINT8("equipment", PrepSystemIoState, equipment, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void prep_systemio_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = prep_systemio_realize; + dc->vmsd = &vmstate_prep_systemio; + device_class_set_props(dc, prep_systemio_properties); +} + +static TypeInfo prep_systemio800_info = { + .name = TYPE_PREP_SYSTEMIO, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(PrepSystemIoState), + .class_init = prep_systemio_class_initfn, +}; + +static void prep_systemio_register_types(void) +{ + type_register_static(&prep_systemio800_info); +} + +type_init(prep_systemio_register_types) diff --git a/hw/ppc/rs6000_mc.c b/hw/ppc/rs6000_mc.c new file mode 100644 index 000000000..c0bc212e9 --- /dev/null +++ b/hw/ppc/rs6000_mc.c @@ -0,0 +1,238 @@ +/* + * QEMU RS/6000 memory controller + * + * Copyright (c) 2017 Hervé Poussineau + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) version 3 or any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "exec/address-spaces.h" +#include "qapi/error.h" +#include "trace.h" +#include "qom/object.h" + +#define TYPE_RS6000MC "rs6000-mc" +OBJECT_DECLARE_SIMPLE_TYPE(RS6000MCState, RS6000MC) + +struct RS6000MCState { + ISADevice parent_obj; + /* see US patent 5,684,979 for details (expired 2001-11-04) */ + uint32_t ram_size; + bool autoconfigure; + MemoryRegion simm[6]; + unsigned int simm_size[6]; + uint32_t end_address[8]; + uint8_t port0820_index; + PortioList portio; +}; + +/* P0RT 0803 -- SIMM ID Register (32/8 MB) (Read Only) */ + +static uint32_t rs6000mc_port0803_read(void *opaque, uint32_t addr) +{ + RS6000MCState *s = opaque; + uint32_t val = 0; + int socket; + + /* (1 << socket) indicates 32 MB SIMM at given socket */ + for (socket = 0; socket < 6; socket++) { + if (s->simm_size[socket] == 32) { + val |= (1 << socket); + } + } + + trace_rs6000mc_id_read(addr, val); + return val; +} + +/* PORT 0804 -- SIMM Presence Register (Read Only) */ + +static uint32_t rs6000mc_port0804_read(void *opaque, uint32_t addr) +{ + RS6000MCState *s = opaque; + uint32_t val = 0xff; + int socket; + + /* (1 << socket) indicates SIMM absence at given socket */ + for (socket = 0; socket < 6; socket++) { + if (s->simm_size[socket]) { + val &= ~(1 << socket); + } + } + s->port0820_index = 0; + + trace_rs6000mc_presence_read(addr, val); + return val; +} + +/* Memory Controller Size Programming Register */ + +static uint32_t rs6000mc_port0820_read(void *opaque, uint32_t addr) +{ + RS6000MCState *s = opaque; + uint32_t val = s->end_address[s->port0820_index] & 0x1f; + s->port0820_index = (s->port0820_index + 1) & 7; + trace_rs6000mc_size_read(addr, val); + return val; +} + +static void rs6000mc_port0820_write(void *opaque, uint32_t addr, uint32_t val) +{ + RS6000MCState *s = opaque; + uint8_t socket = val >> 5; + uint32_t end_address = val & 0x1f; + + trace_rs6000mc_size_write(addr, val); + s->end_address[socket] = end_address; + if (socket > 0 && socket < 7) { + if (s->simm_size[socket - 1]) { + uint32_t size; + uint32_t start_address = 0; + if (socket > 1) { + start_address = s->end_address[socket - 1]; + } + + size = end_address - start_address; + memory_region_set_enabled(&s->simm[socket - 1], size != 0); + memory_region_set_address(&s->simm[socket - 1], + start_address * 8 * MiB); + } + } +} + +/* Read Memory Parity Error */ + +enum { + PORT0841_NO_ERROR_DETECTED = 0x01, +}; + +static uint32_t rs6000mc_port0841_read(void *opaque, uint32_t addr) +{ + uint32_t val = PORT0841_NO_ERROR_DETECTED; + trace_rs6000mc_parity_read(addr, val); + return val; +} + +static const MemoryRegionPortio rs6000mc_port_list[] = { + { 0x803, 1, 1, .read = rs6000mc_port0803_read }, + { 0x804, 1, 1, .read = rs6000mc_port0804_read }, + { 0x820, 1, 1, .read = rs6000mc_port0820_read, + .write = rs6000mc_port0820_write, }, + { 0x841, 1, 1, .read = rs6000mc_port0841_read }, + PORTIO_END_OF_LIST() +}; + +static void rs6000mc_realize(DeviceState *dev, Error **errp) +{ + RS6000MCState *s = RS6000MC(dev); + int socket = 0; + unsigned int ram_size = s->ram_size / MiB; + Error *local_err = NULL; + + while (socket < 6) { + if (ram_size >= 64) { + s->simm_size[socket] = 32; + s->simm_size[socket + 1] = 32; + ram_size -= 64; + } else if (ram_size >= 16) { + s->simm_size[socket] = 8; + s->simm_size[socket + 1] = 8; + ram_size -= 16; + } else { + /* Not enough memory */ + break; + } + socket += 2; + } + + for (socket = 0; socket < 6; socket++) { + if (s->simm_size[socket]) { + char name[] = "simm.?"; + name[5] = socket + '0'; + memory_region_init_ram(&s->simm[socket], OBJECT(dev), name, + s->simm_size[socket] * MiB, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + memory_region_add_subregion_overlap(get_system_memory(), 0, + &s->simm[socket], socket); + } + } + if (ram_size) { + /* unable to push all requested RAM in SIMMs */ + error_setg(errp, "RAM size incompatible with this board. " + "Try again with something else, like %" PRId64 " MB", + s->ram_size / MiB - ram_size); + return; + } + + if (s->autoconfigure) { + uint32_t start_address = 0; + for (socket = 0; socket < 6; socket++) { + if (s->simm_size[socket]) { + memory_region_set_enabled(&s->simm[socket], true); + memory_region_set_address(&s->simm[socket], start_address); + start_address += memory_region_size(&s->simm[socket]); + } + } + } + + isa_register_portio_list(ISA_DEVICE(dev), &s->portio, 0x0, + rs6000mc_port_list, s, "rs6000mc"); +} + +static const VMStateDescription vmstate_rs6000mc = { + .name = "rs6000-mc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(port0820_index, RS6000MCState), + VMSTATE_END_OF_LIST() + }, +}; + +static Property rs6000mc_properties[] = { + DEFINE_PROP_UINT32("ram-size", RS6000MCState, ram_size, 0), + DEFINE_PROP_BOOL("auto-configure", RS6000MCState, autoconfigure, true), + DEFINE_PROP_END_OF_LIST() +}; + +static void rs6000mc_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = rs6000mc_realize; + dc->vmsd = &vmstate_rs6000mc; + device_class_set_props(dc, rs6000mc_properties); +} + +static const TypeInfo rs6000mc_info = { + .name = TYPE_RS6000MC, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(RS6000MCState), + .class_init = rs6000mc_class_initfn, +}; + +static void rs6000mc_types(void) +{ + type_register_static(&rs6000mc_info); +} + +type_init(rs6000mc_types) diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c new file mode 100644 index 000000000..0737234d6 --- /dev/null +++ b/hw/ppc/sam460ex.c @@ -0,0 +1,516 @@ +/* + * QEMU aCube Sam460ex board emulation + * + * Copyright (c) 2012 François Revol + * Copyright (c) 2016-2019 BALATON Zoltan + * + * This file is derived from hw/ppc440_bamboo.c, + * the copyright for that material belongs to the original owners. + * + * This work is licensed under the GNU GPL license version 2 or later. + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "sysemu/kvm.h" +#include "kvm_ppc.h" +#include "sysemu/device_tree.h" +#include "sysemu/block-backend.h" +#include "hw/loader.h" +#include "elf.h" +#include "exec/memory.h" +#include "ppc440.h" +#include "ppc405.h" +#include "hw/block/flash.h" +#include "sysemu/sysemu.h" +#include "sysemu/reset.h" +#include "hw/sysbus.h" +#include "hw/char/serial.h" +#include "hw/i2c/ppc4xx_i2c.h" +#include "hw/i2c/smbus_eeprom.h" +#include "hw/usb/hcd-ehci.h" +#include "hw/ppc/fdt.h" +#include "hw/qdev-properties.h" +#include "hw/intc/ppc-uic.h" + +#include + +#define BINARY_DEVICE_TREE_FILE "canyonlands.dtb" +#define UBOOT_FILENAME "u-boot-sam460-20100605.bin" +/* to extract the official U-Boot bin from the updater: */ +/* dd bs=1 skip=$(($(stat -c '%s' updater/updater-460) - 0x80000)) \ + if=updater/updater-460 of=u-boot-sam460-20100605.bin */ + +/* from Sam460 U-Boot include/configs/Sam460ex.h */ +#define FLASH_BASE 0xfff00000 +#define FLASH_BASE_H 0x4 +#define FLASH_SIZE (1 * MiB) +#define UBOOT_LOAD_BASE 0xfff80000 +#define UBOOT_SIZE 0x00080000 +#define UBOOT_ENTRY 0xfffffffc + +/* from U-Boot */ +#define EPAPR_MAGIC (0x45504150) +#define KERNEL_ADDR 0x1000000 +#define FDT_ADDR 0x1800000 +#define RAMDISK_ADDR 0x1900000 + +/* Sam460ex IRQ MAP: + IRQ0 = ETH_INT + IRQ1 = FPGA_INT + IRQ2 = PCI_INT (PCIA, PCIB, PCIC, PCIB) + IRQ3 = FPGA_INT2 + IRQ11 = RTC_INT + IRQ12 = SM502_INT +*/ + +#define CPU_FREQ 1150000000 +#define PLB_FREQ 230000000 +#define OPB_FREQ 115000000 +#define EBC_FREQ 115000000 +#define UART_FREQ 11059200 +#define SDRAM_NR_BANKS 4 + +/* The SoC could also handle 4 GiB but firmware does not work with that. */ +/* Maybe it overflows a signed 32 bit number somewhere? */ +static const ram_addr_t ppc460ex_sdram_bank_sizes[] = { + 2 * GiB, 1 * GiB, 512 * MiB, 256 * MiB, 128 * MiB, 64 * MiB, + 32 * MiB, 0 +}; + +struct boot_info { + uint32_t dt_base; + uint32_t dt_size; + uint32_t entry; +}; + +static int sam460ex_load_uboot(void) +{ + /* + * This first creates 1MiB of flash memory mapped at the end of + * the 32-bit address space (0xFFF00000..0xFFFFFFFF). + * + * If_PFLASH unit 0 is defined, the flash memory is initialized + * from that block backend. + * + * Else, it's initialized to zero. And then 512KiB of ROM get + * mapped on top of its second half (0xFFF80000..0xFFFFFFFF), + * initialized from u-boot-sam460-20100605.bin. + * + * This doesn't smell right. + * + * The physical hardware appears to have 512KiB flash memory. + * + * TODO Figure out what we really need here, and clean this up. + */ + + DriveInfo *dinfo; + + dinfo = drive_get(IF_PFLASH, 0, 0); + if (!pflash_cfi01_register(FLASH_BASE | ((hwaddr)FLASH_BASE_H << 32), + "sam460ex.flash", FLASH_SIZE, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + 64 * KiB, 1, 0x89, 0x18, 0x0000, 0x0, 1)) { + error_report("Error registering flash memory"); + /* XXX: return an error instead? */ + exit(1); + } + + if (!dinfo) { + /*error_report("No flash image given with the 'pflash' parameter," + " using default u-boot image");*/ + rom_add_file_fixed(UBOOT_FILENAME, + UBOOT_LOAD_BASE | ((hwaddr)FLASH_BASE_H << 32), + -1); + } + + return 0; +} + +static int sam460ex_load_device_tree(hwaddr addr, + uint32_t ramsize, + hwaddr initrd_base, + hwaddr initrd_size, + const char *kernel_cmdline) +{ + uint32_t mem_reg_property[] = { 0, 0, cpu_to_be32(ramsize) }; + char *filename; + int fdt_size; + void *fdt; + uint32_t tb_freq = CPU_FREQ; + uint32_t clock_freq = CPU_FREQ; + int offset; + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE); + if (!filename) { + error_report("Couldn't find dtb file `%s'", BINARY_DEVICE_TREE_FILE); + exit(1); + } + fdt = load_device_tree(filename, &fdt_size); + if (!fdt) { + error_report("Couldn't load dtb file `%s'", filename); + g_free(filename); + exit(1); + } + g_free(filename); + + /* Manipulate device tree in memory. */ + + qemu_fdt_setprop(fdt, "/memory", "reg", mem_reg_property, + sizeof(mem_reg_property)); + + /* default FDT doesn't have a /chosen node... */ + qemu_fdt_add_subnode(fdt, "/chosen"); + + qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", initrd_base); + + qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", + (initrd_base + initrd_size)); + + qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); + + /* Copy data from the host device tree into the guest. Since the guest can + * directly access the timebase without host involvement, we must expose + * the correct frequencies. */ + if (kvm_enabled()) { + tb_freq = kvmppc_get_tbfreq(); + clock_freq = kvmppc_get_clockfreq(); + } + + qemu_fdt_setprop_cell(fdt, "/cpus/cpu@0", "clock-frequency", + clock_freq); + qemu_fdt_setprop_cell(fdt, "/cpus/cpu@0", "timebase-frequency", + tb_freq); + + /* Remove cpm node if it exists (it is not emulated) */ + offset = fdt_path_offset(fdt, "/cpm"); + if (offset >= 0) { + _FDT(fdt_nop_node(fdt, offset)); + } + + /* set serial port clocks */ + offset = fdt_node_offset_by_compatible(fdt, -1, "ns16550"); + while (offset >= 0) { + _FDT(fdt_setprop_cell(fdt, offset, "clock-frequency", UART_FREQ)); + offset = fdt_node_offset_by_compatible(fdt, offset, "ns16550"); + } + + /* some more clocks */ + qemu_fdt_setprop_cell(fdt, "/plb", "clock-frequency", + PLB_FREQ); + qemu_fdt_setprop_cell(fdt, "/plb/opb", "clock-frequency", + OPB_FREQ); + qemu_fdt_setprop_cell(fdt, "/plb/opb/ebc", "clock-frequency", + EBC_FREQ); + + rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr); + g_free(fdt); + + return fdt_size; +} + +/* Create reset TLB entries for BookE, mapping only the flash memory. */ +static void mmubooke_create_initial_mapping_uboot(CPUPPCState *env) +{ + ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; + + /* on reset the flash is mapped by a shadow TLB, + * but since we don't implement them we need to use + * the same values U-Boot will use to avoid a fault. + */ + tlb->attr = 0; + tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); + tlb->size = 0x10000000; /* up to 0xffffffff */ + tlb->EPN = 0xf0000000 & TARGET_PAGE_MASK; + tlb->RPN = (0xf0000000 & TARGET_PAGE_MASK) | 0x4; + tlb->PID = 0; +} + +/* Create reset TLB entries for BookE, spanning the 32bit addr space. */ +static void mmubooke_create_initial_mapping(CPUPPCState *env, + target_ulong va, + hwaddr pa) +{ + ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; + + tlb->attr = 0; + tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); + tlb->size = 1 << 31; /* up to 0x80000000 */ + tlb->EPN = va & TARGET_PAGE_MASK; + tlb->RPN = pa & TARGET_PAGE_MASK; + tlb->PID = 0; +} + +static void main_cpu_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + struct boot_info *bi = env->load_info; + + cpu_reset(CPU(cpu)); + + /* either we have a kernel to boot or we jump to U-Boot */ + if (bi->entry != UBOOT_ENTRY) { + env->gpr[1] = (16 * MiB) - 8; + env->gpr[3] = FDT_ADDR; + env->nip = bi->entry; + + /* Create a mapping for the kernel. */ + mmubooke_create_initial_mapping(env, 0, 0); + env->gpr[6] = tswap32(EPAPR_MAGIC); + env->gpr[7] = (16 * MiB) - 8; /* bi->ima_size; */ + + } else { + env->nip = UBOOT_ENTRY; + mmubooke_create_initial_mapping_uboot(env); + } +} + +static void sam460ex_init(MachineState *machine) +{ + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *isa = g_new(MemoryRegion, 1); + MemoryRegion *ram_memories = g_new(MemoryRegion, SDRAM_NR_BANKS); + hwaddr ram_bases[SDRAM_NR_BANKS] = {0}; + hwaddr ram_sizes[SDRAM_NR_BANKS] = {0}; + MemoryRegion *l2cache_ram = g_new(MemoryRegion, 1); + DeviceState *uic[4]; + qemu_irq mal_irqs[4]; + int i; + PCIBus *pci_bus; + PowerPCCPU *cpu; + CPUPPCState *env; + I2CBus *i2c; + hwaddr entry = UBOOT_ENTRY; + target_long initrd_size = 0; + DeviceState *dev; + SysBusDevice *sbdev; + struct boot_info *boot_info; + uint8_t *spd_data; + int success; + + cpu = POWERPC_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + if (env->mmu_model != POWERPC_MMU_BOOKE) { + error_report("Only MMU model BookE is supported by this machine."); + exit(1); + } + + qemu_register_reset(main_cpu_reset, cpu); + boot_info = g_malloc0(sizeof(*boot_info)); + env->load_info = boot_info; + + ppc_booke_timers_init(cpu, CPU_FREQ, 0); + ppc_dcr_init(env, NULL, NULL); + + /* PLB arbitrer */ + ppc4xx_plb_init(env); + + /* interrupt controllers */ + for (i = 0; i < ARRAY_SIZE(uic); i++) { + SysBusDevice *sbd; + /* + * UICs 1, 2 and 3 are cascaded through UIC 0. + * input_ints[n] is the interrupt number on UIC 0 which + * the INT output of UIC n is connected to. The CINT output + * of UIC n connects to input_ints[n] + 1. + * The entry in input_ints[] for UIC 0 is ignored, because UIC 0's + * INT and CINT outputs are connected to the CPU. + */ + const int input_ints[] = { -1, 30, 10, 16 }; + + uic[i] = qdev_new(TYPE_PPC_UIC); + sbd = SYS_BUS_DEVICE(uic[i]); + + qdev_prop_set_uint32(uic[i], "dcr-base", 0xc0 + i * 0x10); + object_property_set_link(OBJECT(uic[i]), "cpu", OBJECT(cpu), + &error_fatal); + sysbus_realize_and_unref(sbd, &error_fatal); + + if (i == 0) { + sysbus_connect_irq(sbd, PPCUIC_OUTPUT_INT, + ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); + sysbus_connect_irq(sbd, PPCUIC_OUTPUT_CINT, + ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + } else { + sysbus_connect_irq(sbd, PPCUIC_OUTPUT_INT, + qdev_get_gpio_in(uic[0], input_ints[i])); + sysbus_connect_irq(sbd, PPCUIC_OUTPUT_CINT, + qdev_get_gpio_in(uic[0], input_ints[i] + 1)); + } + } + + /* SDRAM controller */ + /* put all RAM on first bank because board has one slot + * and firmware only checks that */ + ppc4xx_sdram_banks(machine->ram, 1, ram_memories, ram_bases, ram_sizes, + ppc460ex_sdram_bank_sizes); + + /* FIXME: does 460EX have ECC interrupts? */ + ppc440_sdram_init(env, SDRAM_NR_BANKS, ram_memories, + ram_bases, ram_sizes, 1); + + /* IIC controllers and devices */ + dev = sysbus_create_simple(TYPE_PPC4xx_I2C, 0x4ef600700, + qdev_get_gpio_in(uic[0], 2)); + i2c = PPC4xx_I2C(dev)->bus; + /* SPD EEPROM on RAM module */ + spd_data = spd_data_generate(ram_sizes[0] < 128 * MiB ? DDR : DDR2, + ram_sizes[0]); + spd_data[20] = 4; /* SO-DIMM module */ + smbus_eeprom_init_one(i2c, 0x50, spd_data); + /* RTC */ + i2c_slave_create_simple(i2c, "m41t80", 0x68); + + dev = sysbus_create_simple(TYPE_PPC4xx_I2C, 0x4ef600800, + qdev_get_gpio_in(uic[0], 3)); + + /* External bus controller */ + ppc405_ebc_init(env); + + /* CPR */ + ppc4xx_cpr_init(env); + + /* PLB to AHB bridge */ + ppc4xx_ahb_init(env); + + /* System DCRs */ + ppc4xx_sdr_init(env); + + /* MAL */ + for (i = 0; i < ARRAY_SIZE(mal_irqs); i++) { + mal_irqs[0] = qdev_get_gpio_in(uic[2], 3 + i); + } + ppc4xx_mal_init(env, 4, 16, mal_irqs); + + /* DMA */ + ppc4xx_dma_init(env, 0x200); + + /* 256K of L2 cache as memory */ + ppc4xx_l2sram_init(env); + /* FIXME: remove this after fixing l2sram mapping in ppc440_uc.c? */ + memory_region_init_ram(l2cache_ram, NULL, "ppc440.l2cache_ram", 256 * KiB, + &error_abort); + memory_region_add_subregion(address_space_mem, 0x400000000LL, l2cache_ram); + + /* USB */ + sysbus_create_simple(TYPE_PPC4xx_EHCI, 0x4bffd0400, + qdev_get_gpio_in(uic[2], 29)); + dev = qdev_new("sysbus-ohci"); + qdev_prop_set_string(dev, "masterbus", "usb-bus.0"); + qdev_prop_set_uint32(dev, "num-ports", 6); + sbdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sbdev, &error_fatal); + sysbus_mmio_map(sbdev, 0, 0x4bffd0000); + sysbus_connect_irq(sbdev, 0, qdev_get_gpio_in(uic[2], 30)); + usb_create_simple(usb_bus_find(-1), "usb-kbd"); + usb_create_simple(usb_bus_find(-1), "usb-mouse"); + + /* PCI bus */ + ppc460ex_pcie_init(env); + /* All PCI irqs are connected to the same UIC pin (cf. UBoot source) */ + dev = sysbus_create_simple("ppc440-pcix-host", 0xc0ec00000, + qdev_get_gpio_in(uic[1], 0)); + pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci.0")); + + memory_region_init_alias(isa, NULL, "isa_mmio", get_system_io(), + 0, 0x10000); + memory_region_add_subregion(get_system_memory(), 0xc08000000, isa); + + /* PCI devices */ + pci_create_simple(pci_bus, PCI_DEVFN(6, 0), "sm501"); + /* SoC has a single SATA port but we don't emulate that yet + * However, firmware and usual clients have driver for SiI311x + * so add one for convenience by default */ + if (defaults_enabled()) { + pci_create_simple(pci_bus, -1, "sii3112"); + } + + /* SoC has 4 UARTs + * but board has only one wired and two are present in fdt */ + if (serial_hd(0) != NULL) { + serial_mm_init(address_space_mem, 0x4ef600300, 0, + qdev_get_gpio_in(uic[1], 1), + PPC_SERIAL_MM_BAUDBASE, serial_hd(0), + DEVICE_BIG_ENDIAN); + } + if (serial_hd(1) != NULL) { + serial_mm_init(address_space_mem, 0x4ef600400, 0, + qdev_get_gpio_in(uic[0], 1), + PPC_SERIAL_MM_BAUDBASE, serial_hd(1), + DEVICE_BIG_ENDIAN); + } + + /* Load U-Boot image. */ + if (!machine->kernel_filename) { + success = sam460ex_load_uboot(); + if (success < 0) { + error_report("could not load firmware"); + exit(1); + } + } + + /* Load kernel. */ + if (machine->kernel_filename) { + hwaddr loadaddr = LOAD_UIMAGE_LOADADDR_INVALID; + success = load_uimage(machine->kernel_filename, &entry, &loadaddr, + NULL, NULL, NULL); + if (success < 0) { + uint64_t elf_entry; + + success = load_elf(machine->kernel_filename, NULL, NULL, NULL, + &elf_entry, NULL, NULL, NULL, + 1, PPC_ELF_MACHINE, 0, 0); + entry = elf_entry; + } + /* XXX try again as binary */ + if (success < 0) { + error_report("could not load kernel '%s'", + machine->kernel_filename); + exit(1); + } + } + + /* Load initrd. */ + if (machine->initrd_filename) { + initrd_size = load_image_targphys(machine->initrd_filename, + RAMDISK_ADDR, + machine->ram_size - RAMDISK_ADDR); + if (initrd_size < 0) { + error_report("could not load ram disk '%s' at %x", + machine->initrd_filename, RAMDISK_ADDR); + exit(1); + } + } + + /* If we're loading a kernel directly, we must load the device tree too. */ + if (machine->kernel_filename) { + int dt_size; + + dt_size = sam460ex_load_device_tree(FDT_ADDR, machine->ram_size, + RAMDISK_ADDR, initrd_size, + machine->kernel_cmdline); + + boot_info->dt_base = FDT_ADDR; + boot_info->dt_size = dt_size; + } + + boot_info->entry = entry; +} + +static void sam460ex_machine_init(MachineClass *mc) +{ + mc->desc = "aCube Sam460ex"; + mc->init = sam460ex_init; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("460exb"); + mc->default_ram_size = 512 * MiB; + mc->default_ram_id = "ppc4xx.sdram"; +} + +DEFINE_MACHINE("sam460ex", sam460ex_machine_init) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c new file mode 100644 index 000000000..3b5fd749b --- /dev/null +++ b/hw/ppc/spapr.c @@ -0,0 +1,5136 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * Copyright (c) 2004-2007 Fabrice Bellard + * Copyright (c) 2007 Jocelyn Mayer + * Copyright (c) 2010 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "qapi/qapi-events-machine.h" +#include "qapi/qapi-events-qdev.h" +#include "qapi/visitor.h" +#include "sysemu/sysemu.h" +#include "sysemu/hostmem.h" +#include "sysemu/numa.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "qemu/log.h" +#include "hw/fw-path-provider.h" +#include "elf.h" +#include "net/net.h" +#include "sysemu/device_tree.h" +#include "sysemu/cpus.h" +#include "sysemu/hw_accel.h" +#include "kvm_ppc.h" +#include "migration/misc.h" +#include "migration/qemu-file-types.h" +#include "migration/global_state.h" +#include "migration/register.h" +#include "migration/blocker.h" +#include "mmu-hash64.h" +#include "mmu-book3s-v3.h" +#include "cpu-models.h" +#include "hw/core/cpu.h" + +#include "hw/ppc/ppc.h" +#include "hw/loader.h" + +#include "hw/ppc/fdt.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" +#include "hw/qdev-properties.h" +#include "hw/pci-host/spapr.h" +#include "hw/pci/msi.h" + +#include "hw/pci/pci.h" +#include "hw/scsi/scsi.h" +#include "hw/virtio/virtio-scsi.h" +#include "hw/virtio/vhost-scsi-common.h" + +#include "exec/ram_addr.h" +#include "hw/usb.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" +#include "trace.h" +#include "hw/nmi.h" +#include "hw/intc/intc.h" + +#include "hw/ppc/spapr_cpu_core.h" +#include "hw/mem/memory-device.h" +#include "hw/ppc/spapr_tpm_proxy.h" +#include "hw/ppc/spapr_nvdimm.h" +#include "hw/ppc/spapr_numa.h" +#include "hw/ppc/pef.h" + +#include "monitor/monitor.h" + +#include + +/* SLOF memory layout: + * + * SLOF raw image loaded at 0, copies its romfs right below the flat + * device-tree, then position SLOF itself 31M below that + * + * So we set FW_OVERHEAD to 40MB which should account for all of that + * and more + * + * We load our kernel at 4M, leaving space for SLOF initial image + */ +#define FDT_MAX_ADDR 0x80000000 /* FDT must stay below that */ +#define FW_MAX_SIZE 0x400000 +#define FW_FILE_NAME "slof.bin" +#define FW_FILE_NAME_VOF "vof.bin" +#define FW_OVERHEAD 0x2800000 +#define KERNEL_LOAD_ADDR FW_MAX_SIZE + +#define MIN_RMA_SLOF (128 * MiB) + +#define PHANDLE_INTC 0x00001111 + +/* These two functions implement the VCPU id numbering: one to compute them + * all and one to identify thread 0 of a VCORE. Any change to the first one + * is likely to have an impact on the second one, so let's keep them close. + */ +static int spapr_vcpu_id(SpaprMachineState *spapr, int cpu_index) +{ + MachineState *ms = MACHINE(spapr); + unsigned int smp_threads = ms->smp.threads; + + assert(spapr->vsmt); + return + (cpu_index / smp_threads) * spapr->vsmt + cpu_index % smp_threads; +} +static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr, + PowerPCCPU *cpu) +{ + assert(spapr->vsmt); + return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0; +} + +static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque) +{ + /* Dummy entries correspond to unused ICPState objects in older QEMUs, + * and newer QEMUs don't even have them. In both cases, we don't want + * to send anything on the wire. + */ + return false; +} + +static const VMStateDescription pre_2_10_vmstate_dummy_icp = { + .name = "icp/server", + .version_id = 1, + .minimum_version_id = 1, + .needed = pre_2_10_vmstate_dummy_icp_needed, + .fields = (VMStateField[]) { + VMSTATE_UNUSED(4), /* uint32_t xirr */ + VMSTATE_UNUSED(1), /* uint8_t pending_priority */ + VMSTATE_UNUSED(1), /* uint8_t mfrr */ + VMSTATE_END_OF_LIST() + }, +}; + +static void pre_2_10_vmstate_register_dummy_icp(int i) +{ + vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp, + (void *)(uintptr_t) i); +} + +static void pre_2_10_vmstate_unregister_dummy_icp(int i) +{ + vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp, + (void *)(uintptr_t) i); +} + +int spapr_max_server_number(SpaprMachineState *spapr) +{ + MachineState *ms = MACHINE(spapr); + + assert(spapr->vsmt); + return DIV_ROUND_UP(ms->smp.max_cpus * spapr->vsmt, ms->smp.threads); +} + +static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, + int smt_threads) +{ + int i, ret = 0; + uint32_t servers_prop[smt_threads]; + uint32_t gservers_prop[smt_threads * 2]; + int index = spapr_get_vcpu_id(cpu); + + if (cpu->compat_pvr) { + ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->compat_pvr); + if (ret < 0) { + return ret; + } + } + + /* Build interrupt servers and gservers properties */ + for (i = 0; i < smt_threads; i++) { + servers_prop[i] = cpu_to_be32(index + i); + /* Hack, direct the group queues back to cpu 0 */ + gservers_prop[i*2] = cpu_to_be32(index + i); + gservers_prop[i*2 + 1] = 0; + } + ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", + servers_prop, sizeof(servers_prop)); + if (ret < 0) { + return ret; + } + ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s", + gservers_prop, sizeof(gservers_prop)); + + return ret; +} + +static void spapr_dt_pa_features(SpaprMachineState *spapr, + PowerPCCPU *cpu, + void *fdt, int offset) +{ + uint8_t pa_features_206[] = { 6, 0, + 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 }; + uint8_t pa_features_207[] = { 24, 0, + 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 }; + uint8_t pa_features_300[] = { 66, 0, + /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */ + /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */ + 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */ + /* 6: DS207 */ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ + /* 16: Vector */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ + /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ + /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */ + /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */ + 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */ + /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */ + /* 42: PM, 44: PC RA, 46: SC vec'd */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */ + /* 48: SIMD, 50: QP BFP, 52: String */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */ + /* 54: DecFP, 56: DecI, 58: SHA */ + 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */ + /* 60: NM atomic, 62: RNG */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */ + }; + uint8_t *pa_features = NULL; + size_t pa_size; + + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_06, 0, cpu->compat_pvr)) { + pa_features = pa_features_206; + pa_size = sizeof(pa_features_206); + } + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_2_07, 0, cpu->compat_pvr)) { + pa_features = pa_features_207; + pa_size = sizeof(pa_features_207); + } + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, cpu->compat_pvr)) { + pa_features = pa_features_300; + pa_size = sizeof(pa_features_300); + } + if (!pa_features) { + return; + } + + if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) { + /* + * Note: we keep CI large pages off by default because a 64K capable + * guest provisioned with large pages might otherwise try to map a qemu + * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages + * even if that qemu runs on a 4k host. + * We dd this bit back here if we are confident this is not an issue + */ + pa_features[3] |= 0x20; + } + if ((spapr_get_cap(spapr, SPAPR_CAP_HTM) != 0) && pa_size > 24) { + pa_features[24] |= 0x80; /* Transactional memory support */ + } + if (spapr->cas_pre_isa3_guest && pa_size > 40) { + /* Workaround for broken kernels that attempt (guest) radix + * mode when they can't handle it, if they see the radix bit set + * in pa-features. So hide it from them. */ + pa_features[40 + 2] &= ~0x80; /* Radix MMU */ + } + + _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size))); +} + +static hwaddr spapr_node0_size(MachineState *machine) +{ + if (machine->numa_state->num_nodes) { + int i; + for (i = 0; i < machine->numa_state->num_nodes; ++i) { + if (machine->numa_state->nodes[i].node_mem) { + return MIN(pow2floor(machine->numa_state->nodes[i].node_mem), + machine->ram_size); + } + } + } + return machine->ram_size; +} + +static void add_str(GString *s, const gchar *s1) +{ + g_string_append_len(s, s1, strlen(s1) + 1); +} + +static int spapr_dt_memory_node(SpaprMachineState *spapr, void *fdt, int nodeid, + hwaddr start, hwaddr size) +{ + char mem_name[32]; + uint64_t mem_reg_property[2]; + int off; + + mem_reg_property[0] = cpu_to_be64(start); + mem_reg_property[1] = cpu_to_be64(size); + + sprintf(mem_name, "memory@%" HWADDR_PRIx, start); + off = fdt_add_subnode(fdt, 0, mem_name); + _FDT(off); + _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); + _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property, + sizeof(mem_reg_property)))); + spapr_numa_write_associativity_dt(spapr, fdt, off, nodeid); + return off; +} + +static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr) +{ + MemoryDeviceInfoList *info; + + for (info = list; info; info = info->next) { + MemoryDeviceInfo *value = info->value; + + if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) { + PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data; + + if (addr >= pcdimm_info->addr && + addr < (pcdimm_info->addr + pcdimm_info->size)) { + return pcdimm_info->node; + } + } + } + + return -1; +} + +struct sPAPRDrconfCellV2 { + uint32_t seq_lmbs; + uint64_t base_addr; + uint32_t drc_index; + uint32_t aa_index; + uint32_t flags; +} QEMU_PACKED; + +typedef struct DrconfCellQueue { + struct sPAPRDrconfCellV2 cell; + QSIMPLEQ_ENTRY(DrconfCellQueue) entry; +} DrconfCellQueue; + +static DrconfCellQueue * +spapr_get_drconf_cell(uint32_t seq_lmbs, uint64_t base_addr, + uint32_t drc_index, uint32_t aa_index, + uint32_t flags) +{ + DrconfCellQueue *elem; + + elem = g_malloc0(sizeof(*elem)); + elem->cell.seq_lmbs = cpu_to_be32(seq_lmbs); + elem->cell.base_addr = cpu_to_be64(base_addr); + elem->cell.drc_index = cpu_to_be32(drc_index); + elem->cell.aa_index = cpu_to_be32(aa_index); + elem->cell.flags = cpu_to_be32(flags); + + return elem; +} + +static int spapr_dt_dynamic_memory_v2(SpaprMachineState *spapr, void *fdt, + int offset, MemoryDeviceInfoList *dimms) +{ + MachineState *machine = MACHINE(spapr); + uint8_t *int_buf, *cur_index; + int ret; + uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; + uint64_t addr, cur_addr, size; + uint32_t nr_boot_lmbs = (machine->device_memory->base / lmb_size); + uint64_t mem_end = machine->device_memory->base + + memory_region_size(&machine->device_memory->mr); + uint32_t node, buf_len, nr_entries = 0; + SpaprDrc *drc; + DrconfCellQueue *elem, *next; + MemoryDeviceInfoList *info; + QSIMPLEQ_HEAD(, DrconfCellQueue) drconf_queue + = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue); + + /* Entry to cover RAM and the gap area */ + elem = spapr_get_drconf_cell(nr_boot_lmbs, 0, 0, -1, + SPAPR_LMB_FLAGS_RESERVED | + SPAPR_LMB_FLAGS_DRC_INVALID); + QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); + nr_entries++; + + cur_addr = machine->device_memory->base; + for (info = dimms; info; info = info->next) { + PCDIMMDeviceInfo *di = info->value->u.dimm.data; + + addr = di->addr; + size = di->size; + node = di->node; + + /* + * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The + * area is marked hotpluggable in the next iteration for the bigger + * chunk including the NVDIMM occupied area. + */ + if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM) + continue; + + /* Entry for hot-pluggable area */ + if (cur_addr < addr) { + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); + g_assert(drc); + elem = spapr_get_drconf_cell((addr - cur_addr) / lmb_size, + cur_addr, spapr_drc_index(drc), -1, 0); + QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); + nr_entries++; + } + + /* Entry for DIMM */ + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, addr / lmb_size); + g_assert(drc); + elem = spapr_get_drconf_cell(size / lmb_size, addr, + spapr_drc_index(drc), node, + (SPAPR_LMB_FLAGS_ASSIGNED | + SPAPR_LMB_FLAGS_HOTREMOVABLE)); + QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); + nr_entries++; + cur_addr = addr + size; + } + + /* Entry for remaining hotpluggable area */ + if (cur_addr < mem_end) { + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); + g_assert(drc); + elem = spapr_get_drconf_cell((mem_end - cur_addr) / lmb_size, + cur_addr, spapr_drc_index(drc), -1, 0); + QSIMPLEQ_INSERT_TAIL(&drconf_queue, elem, entry); + nr_entries++; + } + + buf_len = nr_entries * sizeof(struct sPAPRDrconfCellV2) + sizeof(uint32_t); + int_buf = cur_index = g_malloc0(buf_len); + *(uint32_t *)int_buf = cpu_to_be32(nr_entries); + cur_index += sizeof(nr_entries); + + QSIMPLEQ_FOREACH_SAFE(elem, &drconf_queue, entry, next) { + memcpy(cur_index, &elem->cell, sizeof(elem->cell)); + cur_index += sizeof(elem->cell); + QSIMPLEQ_REMOVE(&drconf_queue, elem, DrconfCellQueue, entry); + g_free(elem); + } + + ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory-v2", int_buf, buf_len); + g_free(int_buf); + if (ret < 0) { + return -1; + } + return 0; +} + +static int spapr_dt_dynamic_memory(SpaprMachineState *spapr, void *fdt, + int offset, MemoryDeviceInfoList *dimms) +{ + MachineState *machine = MACHINE(spapr); + int i, ret; + uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; + uint32_t device_lmb_start = machine->device_memory->base / lmb_size; + uint32_t nr_lmbs = (machine->device_memory->base + + memory_region_size(&machine->device_memory->mr)) / + lmb_size; + uint32_t *int_buf, *cur_index, buf_len; + + /* + * Allocate enough buffer size to fit in ibm,dynamic-memory + */ + buf_len = (nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1) * sizeof(uint32_t); + cur_index = int_buf = g_malloc0(buf_len); + int_buf[0] = cpu_to_be32(nr_lmbs); + cur_index++; + for (i = 0; i < nr_lmbs; i++) { + uint64_t addr = i * lmb_size; + uint32_t *dynamic_memory = cur_index; + + if (i >= device_lmb_start) { + SpaprDrc *drc; + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, i); + g_assert(drc); + + dynamic_memory[0] = cpu_to_be32(addr >> 32); + dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); + dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc)); + dynamic_memory[3] = cpu_to_be32(0); /* reserved */ + dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr)); + if (memory_region_present(get_system_memory(), addr)) { + dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED); + } else { + dynamic_memory[5] = cpu_to_be32(0); + } + } else { + /* + * LMB information for RMA, boot time RAM and gap b/n RAM and + * device memory region -- all these are marked as reserved + * and as having no valid DRC. + */ + dynamic_memory[0] = cpu_to_be32(addr >> 32); + dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff); + dynamic_memory[2] = cpu_to_be32(0); + dynamic_memory[3] = cpu_to_be32(0); /* reserved */ + dynamic_memory[4] = cpu_to_be32(-1); + dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED | + SPAPR_LMB_FLAGS_DRC_INVALID); + } + + cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE; + } + ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len); + g_free(int_buf); + if (ret < 0) { + return -1; + } + return 0; +} + +/* + * Adds ibm,dynamic-reconfiguration-memory node. + * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation + * of this device tree node. + */ +static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr, + void *fdt) +{ + MachineState *machine = MACHINE(spapr); + int ret, offset; + uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; + uint32_t prop_lmb_size[] = {cpu_to_be32(lmb_size >> 32), + cpu_to_be32(lmb_size & 0xffffffff)}; + MemoryDeviceInfoList *dimms = NULL; + + /* + * Don't create the node if there is no device memory + */ + if (machine->ram_size == machine->maxram_size) { + return 0; + } + + offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory"); + + ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size, + sizeof(prop_lmb_size)); + if (ret < 0) { + return ret; + } + + ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff); + if (ret < 0) { + return ret; + } + + ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0); + if (ret < 0) { + return ret; + } + + /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */ + dimms = qmp_memory_device_list(); + if (spapr_ovec_test(spapr->ov5_cas, OV5_DRMEM_V2)) { + ret = spapr_dt_dynamic_memory_v2(spapr, fdt, offset, dimms); + } else { + ret = spapr_dt_dynamic_memory(spapr, fdt, offset, dimms); + } + qapi_free_MemoryDeviceInfoList(dimms); + + if (ret < 0) { + return ret; + } + + ret = spapr_numa_write_assoc_lookup_arrays(spapr, fdt, offset); + + return ret; +} + +static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt) +{ + MachineState *machine = MACHINE(spapr); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + hwaddr mem_start, node_size; + int i, nb_nodes = machine->numa_state->num_nodes; + NodeInfo *nodes = machine->numa_state->nodes; + + for (i = 0, mem_start = 0; i < nb_nodes; ++i) { + if (!nodes[i].node_mem) { + continue; + } + if (mem_start >= machine->ram_size) { + node_size = 0; + } else { + node_size = nodes[i].node_mem; + if (node_size > machine->ram_size - mem_start) { + node_size = machine->ram_size - mem_start; + } + } + if (!mem_start) { + /* spapr_machine_init() checks for rma_size <= node0_size + * already */ + spapr_dt_memory_node(spapr, fdt, i, 0, spapr->rma_size); + mem_start += spapr->rma_size; + node_size -= spapr->rma_size; + } + for ( ; node_size; ) { + hwaddr sizetmp = pow2floor(node_size); + + /* mem_start != 0 here */ + if (ctzl(mem_start) < ctzl(sizetmp)) { + sizetmp = 1ULL << ctzl(mem_start); + } + + spapr_dt_memory_node(spapr, fdt, i, mem_start, sizetmp); + node_size -= sizetmp; + mem_start += sizetmp; + } + } + + /* Generate ibm,dynamic-reconfiguration-memory node if required */ + if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) { + int ret; + + g_assert(smc->dr_lmb_enabled); + ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt); + if (ret) { + return ret; + } + } + + return 0; +} + +static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset, + SpaprMachineState *spapr) +{ + MachineState *ms = MACHINE(spapr); + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs); + int index = spapr_get_vcpu_id(cpu); + uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), + 0xffffffff, 0xffffffff}; + uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() + : SPAPR_TIMEBASE_FREQ; + uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; + uint32_t page_sizes_prop[64]; + size_t page_sizes_prop_size; + unsigned int smp_threads = ms->smp.threads; + uint32_t vcpus_per_socket = smp_threads * ms->smp.cores; + uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)}; + int compat_smt = MIN(smp_threads, ppc_compat_max_vthreads(cpu)); + SpaprDrc *drc; + int drc_index; + uint32_t radix_AP_encodings[PPC_PAGE_SIZES_MAX_SZ]; + int i; + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, index); + if (drc) { + drc_index = spapr_drc_index(drc); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index))); + } + + _FDT((fdt_setprop_cell(fdt, offset, "reg", index))); + _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu"))); + + _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR]))); + _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size", + env->dcache_line_size))); + _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size", + env->dcache_line_size))); + _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size", + env->icache_line_size))); + _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size", + env->icache_line_size))); + + if (pcc->l1_dcache_size) { + _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size", + pcc->l1_dcache_size))); + } else { + warn_report("Unknown L1 dcache size for cpu"); + } + if (pcc->l1_icache_size) { + _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size", + pcc->l1_icache_size))); + } else { + warn_report("Unknown L1 icache size for cpu"); + } + + _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq))); + _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq))); + _FDT((fdt_setprop_cell(fdt, offset, "slb-size", cpu->hash64_opts->slb_size))); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", cpu->hash64_opts->slb_size))); + _FDT((fdt_setprop_string(fdt, offset, "status", "okay"))); + _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0))); + + if (ppc_has_spr(cpu, SPR_PURR)) { + _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1))); + } + if (ppc_has_spr(cpu, SPR_PURR)) { + _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1))); + } + + if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) { + _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes", + segs, sizeof(segs)))); + } + + /* Advertise VSX (vector extensions) if available + * 1 == VMX / Altivec available + * 2 == VSX available + * + * Only CPUs for which we create core types in spapr_cpu_core.c + * are possible, and all of those have VMX */ + if (spapr_get_cap(spapr, SPAPR_CAP_VSX) != 0) { + _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 2))); + } else { + _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", 1))); + } + + /* Advertise DFP (Decimal Floating Point) if available + * 0 / no property == no DFP + * 1 == DFP available */ + if (spapr_get_cap(spapr, SPAPR_CAP_DFP) != 0) { + _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1))); + } + + page_sizes_prop_size = ppc_create_page_sizes_prop(cpu, page_sizes_prop, + sizeof(page_sizes_prop)); + if (page_sizes_prop_size) { + _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes", + page_sizes_prop, page_sizes_prop_size))); + } + + spapr_dt_pa_features(spapr, cpu, fdt, offset); + + _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", + cs->cpu_index / vcpus_per_socket))); + + _FDT((fdt_setprop(fdt, offset, "ibm,pft-size", + pft_size_prop, sizeof(pft_size_prop)))); + + if (ms->numa_state->num_nodes > 1) { + _FDT(spapr_numa_fixup_cpu_dt(spapr, fdt, offset, cpu)); + } + + _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu, compat_smt)); + + if (pcc->radix_page_info) { + for (i = 0; i < pcc->radix_page_info->count; i++) { + radix_AP_encodings[i] = + cpu_to_be32(pcc->radix_page_info->entries[i]); + } + _FDT((fdt_setprop(fdt, offset, "ibm,processor-radix-AP-encodings", + radix_AP_encodings, + pcc->radix_page_info->count * + sizeof(radix_AP_encodings[0])))); + } + + /* + * We set this property to let the guest know that it can use the large + * decrementer and its width in bits. + */ + if (spapr_get_cap(spapr, SPAPR_CAP_LARGE_DECREMENTER) != SPAPR_CAP_OFF) + _FDT((fdt_setprop_u32(fdt, offset, "ibm,dec-bits", + pcc->lrg_decr_bits))); +} + +static void spapr_dt_cpus(void *fdt, SpaprMachineState *spapr) +{ + CPUState **rev; + CPUState *cs; + int n_cpus; + int cpus_offset; + int i; + + cpus_offset = fdt_add_subnode(fdt, 0, "cpus"); + _FDT(cpus_offset); + _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1))); + _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0))); + + /* + * We walk the CPUs in reverse order to ensure that CPU DT nodes + * created by fdt_add_subnode() end up in the right order in FDT + * for the guest kernel the enumerate the CPUs correctly. + * + * The CPU list cannot be traversed in reverse order, so we need + * to do extra work. + */ + n_cpus = 0; + rev = NULL; + CPU_FOREACH(cs) { + rev = g_renew(CPUState *, rev, n_cpus + 1); + rev[n_cpus++] = cs; + } + + for (i = n_cpus - 1; i >= 0; i--) { + CPUState *cs = rev[i]; + PowerPCCPU *cpu = POWERPC_CPU(cs); + int index = spapr_get_vcpu_id(cpu); + DeviceClass *dc = DEVICE_GET_CLASS(cs); + g_autofree char *nodename = NULL; + int offset; + + if (!spapr_is_thread0_in_vcore(spapr, cpu)) { + continue; + } + + nodename = g_strdup_printf("%s@%x", dc->fw_name, index); + offset = fdt_add_subnode(fdt, cpus_offset, nodename); + _FDT(offset); + spapr_dt_cpu(cs, fdt, offset, spapr); + } + + g_free(rev); +} + +static int spapr_dt_rng(void *fdt) +{ + int node; + int ret; + + node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities"); + if (node <= 0) { + return -1; + } + ret = fdt_setprop_string(fdt, node, "device_type", + "ibm,platform-facilities"); + ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1); + ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0); + + node = fdt_add_subnode(fdt, node, "ibm,random-v1"); + if (node <= 0) { + return -1; + } + ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random"); + + return ret ? -1 : 0; +} + +static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) +{ + MachineState *ms = MACHINE(spapr); + int rtas; + GString *hypertas = g_string_sized_new(256); + GString *qemu_hypertas = g_string_sized_new(256); + uint64_t max_device_addr = MACHINE(spapr)->device_memory->base + + memory_region_size(&MACHINE(spapr)->device_memory->mr); + uint32_t lrdr_capacity[] = { + cpu_to_be32(max_device_addr >> 32), + cpu_to_be32(max_device_addr & 0xffffffff), + cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE >> 32), + cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE & 0xffffffff), + cpu_to_be32(ms->smp.max_cpus / ms->smp.threads), + }; + + _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas")); + + /* hypertas */ + add_str(hypertas, "hcall-pft"); + add_str(hypertas, "hcall-term"); + add_str(hypertas, "hcall-dabr"); + add_str(hypertas, "hcall-interrupt"); + add_str(hypertas, "hcall-tce"); + add_str(hypertas, "hcall-vio"); + add_str(hypertas, "hcall-splpar"); + add_str(hypertas, "hcall-join"); + add_str(hypertas, "hcall-bulk"); + add_str(hypertas, "hcall-set-mode"); + add_str(hypertas, "hcall-sprg0"); + add_str(hypertas, "hcall-copy"); + add_str(hypertas, "hcall-debug"); + add_str(hypertas, "hcall-vphn"); + if (spapr_get_cap(spapr, SPAPR_CAP_RPT_INVALIDATE) == SPAPR_CAP_ON) { + add_str(hypertas, "hcall-rpt-invalidate"); + } + + add_str(qemu_hypertas, "hcall-memop1"); + + if (!kvm_enabled() || kvmppc_spapr_use_multitce()) { + add_str(hypertas, "hcall-multi-tce"); + } + + if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { + add_str(hypertas, "hcall-hpt-resize"); + } + + _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions", + hypertas->str, hypertas->len)); + g_string_free(hypertas, TRUE); + _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions", + qemu_hypertas->str, qemu_hypertas->len)); + g_string_free(qemu_hypertas, TRUE); + + spapr_numa_write_rtas_dt(spapr, fdt, rtas); + + /* + * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log, + * and 16 bytes per CPU for system reset error log plus an extra 8 bytes. + * + * The system reset requirements are driven by existing Linux and PowerVM + * implementation which (contrary to PAPR) saves r3 in the error log + * structure like machine check, so Linux expects to find the saved r3 + * value at the address in r3 upon FWNMI-enabled sreset interrupt (and + * does not look at the error value). + * + * System reset interrupts are not subject to interlock like machine + * check, so this memory area could be corrupted if the sreset is + * interrupted by a machine check (or vice versa) if it was shared. To + * prevent this, system reset uses per-CPU areas for the sreset save + * area. A system reset that interrupts a system reset handler could + * still overwrite this area, but Linux doesn't try to recover in that + * case anyway. + * + * The extra 8 bytes is required because Linux's FWNMI error log check + * is off-by-one. + * + * RTAS_MIN_SIZE is required for the RTAS blob itself. + */ + _FDT(fdt_setprop_cell(fdt, rtas, "rtas-size", RTAS_MIN_SIZE + + RTAS_ERROR_LOG_MAX + + ms->smp.max_cpus * sizeof(uint64_t) * 2 + + sizeof(uint64_t))); + _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max", + RTAS_ERROR_LOG_MAX)); + _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate", + RTAS_EVENT_SCAN_RATE)); + + g_assert(msi_nonbroken); + _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0)); + + /* + * According to PAPR, rtas ibm,os-term does not guarantee a return + * back to the guest cpu. + * + * While an additional ibm,extended-os-term property indicates + * that rtas call return will always occur. Set this property. + */ + _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0)); + + _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity", + lrdr_capacity, sizeof(lrdr_capacity))); + + spapr_dt_rtas_tokens(fdt, rtas); +} + +/* + * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU + * and the XIVE features that the guest may request and thus the valid + * values for bytes 23..26 of option vector 5: + */ +static void spapr_dt_ov5_platform_support(SpaprMachineState *spapr, void *fdt, + int chosen) +{ + PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); + + char val[2 * 4] = { + 23, 0x00, /* XICS / XIVE mode */ + 24, 0x00, /* Hash/Radix, filled in below. */ + 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */ + 26, 0x40, /* Radix options: GTSE == yes. */ + }; + + if (spapr->irq->xics && spapr->irq->xive) { + val[1] = SPAPR_OV5_XIVE_BOTH; + } else if (spapr->irq->xive) { + val[1] = SPAPR_OV5_XIVE_EXPLOIT; + } else { + assert(spapr->irq->xics); + val[1] = SPAPR_OV5_XIVE_LEGACY; + } + + if (!ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0, + first_ppc_cpu->compat_pvr)) { + /* + * If we're in a pre POWER9 compat mode then the guest should + * do hash and use the legacy interrupt mode + */ + val[1] = SPAPR_OV5_XIVE_LEGACY; /* XICS */ + val[3] = 0x00; /* Hash */ + spapr_check_mmu_mode(false); + } else if (kvm_enabled()) { + if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) { + val[3] = 0x80; /* OV5_MMU_BOTH */ + } else if (kvmppc_has_cap_mmu_radix()) { + val[3] = 0x40; /* OV5_MMU_RADIX_300 */ + } else { + val[3] = 0x00; /* Hash */ + } + } else { + /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */ + val[3] = 0xC0; + } + _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support", + val, sizeof(val))); +} + +static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset) +{ + MachineState *machine = MACHINE(spapr); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); + int chosen; + + _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen")); + + if (reset) { + const char *boot_device = spapr->boot_device; + char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); + size_t cb = 0; + char *bootlist = get_boot_devices_list(&cb); + + if (machine->kernel_cmdline && machine->kernel_cmdline[0]) { + _FDT(fdt_setprop_string(fdt, chosen, "bootargs", + machine->kernel_cmdline)); + } + + if (spapr->initrd_size) { + _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start", + spapr->initrd_base)); + _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end", + spapr->initrd_base + spapr->initrd_size)); + } + + if (spapr->kernel_size) { + uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr), + cpu_to_be64(spapr->kernel_size) }; + + _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel", + &kprop, sizeof(kprop))); + if (spapr->kernel_le) { + _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0)); + } + } + if (boot_menu) { + _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu))); + } + _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width)); + _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height)); + _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth)); + + if (cb && bootlist) { + int i; + + for (i = 0; i < cb; i++) { + if (bootlist[i] == '\n') { + bootlist[i] = ' '; + } + } + _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist)); + } + + if (boot_device && strlen(boot_device)) { + _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device)); + } + + if (!spapr->has_graphics && stdout_path) { + /* + * "linux,stdout-path" and "stdout" properties are + * deprecated by linux kernel. New platforms should only + * use the "stdout-path" property. Set the new property + * and continue using older property to remain compatible + * with the existing firmware. + */ + _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path)); + _FDT(fdt_setprop_string(fdt, chosen, "stdout-path", stdout_path)); + } + + /* + * We can deal with BAR reallocation just fine, advertise it + * to the guest + */ + if (smc->linux_pci_probe) { + _FDT(fdt_setprop_cell(fdt, chosen, "linux,pci-probe-only", 0)); + } + + spapr_dt_ov5_platform_support(spapr, fdt, chosen); + + g_free(stdout_path); + g_free(bootlist); + } + + _FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5")); +} + +static void spapr_dt_hypervisor(SpaprMachineState *spapr, void *fdt) +{ + /* The /hypervisor node isn't in PAPR - this is a hack to allow PR + * KVM to work under pHyp with some guest co-operation */ + int hypervisor; + uint8_t hypercall[16]; + + _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor")); + /* indicate KVM hypercall interface */ + _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm")); + if (kvmppc_has_cap_fixup_hcalls()) { + /* + * Older KVM versions with older guest kernels were broken + * with the magic page, don't allow the guest to map it. + */ + if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall, + sizeof(hypercall))) { + _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions", + hypercall, sizeof(hypercall))); + } + } +} + +void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space) +{ + MachineState *machine = MACHINE(spapr); + MachineClass *mc = MACHINE_GET_CLASS(machine); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); + uint32_t root_drc_type_mask = 0; + int ret; + void *fdt; + SpaprPhbState *phb; + char *buf; + + fdt = g_malloc0(space); + _FDT((fdt_create_empty_tree(fdt, space))); + + /* Root node */ + _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp")); + _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)")); + _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries")); + + /* Guest UUID & Name*/ + buf = qemu_uuid_unparse_strdup(&qemu_uuid); + _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf)); + if (qemu_uuid_set) { + _FDT(fdt_setprop_string(fdt, 0, "system-id", buf)); + } + g_free(buf); + + if (qemu_get_vm_name()) { + _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name", + qemu_get_vm_name())); + } + + /* Host Model & Serial Number */ + if (spapr->host_model) { + _FDT(fdt_setprop_string(fdt, 0, "host-model", spapr->host_model)); + } else if (smc->broken_host_serial_model && kvmppc_get_host_model(&buf)) { + _FDT(fdt_setprop_string(fdt, 0, "host-model", buf)); + g_free(buf); + } + + if (spapr->host_serial) { + _FDT(fdt_setprop_string(fdt, 0, "host-serial", spapr->host_serial)); + } else if (smc->broken_host_serial_model && kvmppc_get_host_serial(&buf)) { + _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf)); + g_free(buf); + } + + _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2)); + _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); + + /* /interrupt controller */ + spapr_irq_dt(spapr, spapr_max_server_number(spapr), fdt, PHANDLE_INTC); + + ret = spapr_dt_memory(spapr, fdt); + if (ret < 0) { + error_report("couldn't setup memory nodes in fdt"); + exit(1); + } + + /* /vdevice */ + spapr_dt_vdevice(spapr->vio_bus, fdt); + + if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) { + ret = spapr_dt_rng(fdt); + if (ret < 0) { + error_report("could not set up rng device in the fdt"); + exit(1); + } + } + + QLIST_FOREACH(phb, &spapr->phbs, list) { + ret = spapr_dt_phb(spapr, phb, PHANDLE_INTC, fdt, NULL); + if (ret < 0) { + error_report("couldn't setup PCI devices in fdt"); + exit(1); + } + } + + spapr_dt_cpus(fdt, spapr); + + /* ibm,drc-indexes and friends */ + if (smc->dr_lmb_enabled) { + root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB; + } + if (smc->dr_phb_enabled) { + root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB; + } + if (mc->nvdimm_supported) { + root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PMEM; + } + if (root_drc_type_mask) { + _FDT(spapr_dt_drc(fdt, 0, NULL, root_drc_type_mask)); + } + + if (mc->has_hotpluggable_cpus) { + int offset = fdt_path_offset(fdt, "/cpus"); + ret = spapr_dt_drc(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU); + if (ret < 0) { + error_report("Couldn't set up CPU DR device tree properties"); + exit(1); + } + } + + /* /event-sources */ + spapr_dt_events(spapr, fdt); + + /* /rtas */ + spapr_dt_rtas(spapr, fdt); + + /* /chosen */ + spapr_dt_chosen(spapr, fdt, reset); + + /* /hypervisor */ + if (kvm_enabled()) { + spapr_dt_hypervisor(spapr, fdt); + } + + /* Build memory reserve map */ + if (reset) { + if (spapr->kernel_size) { + _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr, + spapr->kernel_size))); + } + if (spapr->initrd_size) { + _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, + spapr->initrd_size))); + } + } + + /* NVDIMM devices */ + if (mc->nvdimm_supported) { + spapr_dt_persistent_memory(spapr, fdt); + } + + return fdt; +} + +static uint64_t translate_kernel_address(void *opaque, uint64_t addr) +{ + SpaprMachineState *spapr = opaque; + + return (addr & 0x0fffffff) + spapr->kernel_addr; +} + +static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, + PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + + /* The TCG path should also be holding the BQL at this point */ + g_assert(qemu_mutex_iothread_locked()); + + if (msr_pr) { + hcall_dprintf("Hypercall made with MSR[PR]=1\n"); + env->gpr[3] = H_PRIVILEGE; + } else { + env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]); + } +} + +struct LPCRSyncState { + target_ulong value; + target_ulong mask; +}; + +static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg) +{ + struct LPCRSyncState *s = arg.host_ptr; + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + target_ulong lpcr; + + cpu_synchronize_state(cs); + lpcr = env->spr[SPR_LPCR]; + lpcr &= ~s->mask; + lpcr |= s->value; + ppc_store_lpcr(cpu, lpcr); +} + +void spapr_set_all_lpcrs(target_ulong value, target_ulong mask) +{ + CPUState *cs; + struct LPCRSyncState s = { + .value = value, + .mask = mask + }; + CPU_FOREACH(cs) { + run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s)); + } +} + +static void spapr_get_pate(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + + /* Copy PATE1:GR into PATE0:HR */ + entry->dw0 = spapr->patb_entry & PATE0_HR; + entry->dw1 = spapr->patb_entry; +} + +#define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) +#define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) +#define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) +#define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY)) +#define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY)) + +/* + * Get the fd to access the kernel htab, re-opening it if necessary + */ +static int get_htab_fd(SpaprMachineState *spapr) +{ + Error *local_err = NULL; + + if (spapr->htab_fd >= 0) { + return spapr->htab_fd; + } + + spapr->htab_fd = kvmppc_get_htab_fd(false, 0, &local_err); + if (spapr->htab_fd < 0) { + error_report_err(local_err); + } + + return spapr->htab_fd; +} + +void close_htab_fd(SpaprMachineState *spapr) +{ + if (spapr->htab_fd >= 0) { + close(spapr->htab_fd); + } + spapr->htab_fd = -1; +} + +static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + + return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1; +} + +static target_ulong spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + + assert(kvm_enabled()); + + if (!spapr->htab) { + return 0; + } + + return (target_ulong)(uintptr_t)spapr->htab | (spapr->htab_shift - 18); +} + +static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp, + hwaddr ptex, int n) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; + + if (!spapr->htab) { + /* + * HTAB is controlled by KVM. Fetch into temporary buffer + */ + ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64); + kvmppc_read_hptes(hptes, ptex, n); + return hptes; + } + + /* + * HTAB is controlled by QEMU. Just point to the internally + * accessible PTEG. + */ + return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset); +} + +static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp, + const ppc_hash_pte64_t *hptes, + hwaddr ptex, int n) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + + if (!spapr->htab) { + g_free((void *)hptes); + } + + /* Nothing to do for qemu managed HPT */ +} + +void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, + uint64_t pte0, uint64_t pte1) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp); + hwaddr offset = ptex * HASH_PTE_SIZE_64; + + if (!spapr->htab) { + kvmppc_write_hpte(ptex, pte0, pte1); + } else { + if (pte0 & HPTE64_V_VALID) { + stq_p(spapr->htab + offset + HPTE64_DW1, pte1); + /* + * When setting valid, we write PTE1 first. This ensures + * proper synchronization with the reading code in + * ppc_hash64_pteg_search() + */ + smp_wmb(); + stq_p(spapr->htab + offset, pte0); + } else { + stq_p(spapr->htab + offset, pte0); + /* + * When clearing it we set PTE0 first. This ensures proper + * synchronization with the reading code in + * ppc_hash64_pteg_search() + */ + smp_wmb(); + stq_p(spapr->htab + offset + HPTE64_DW1, pte1); + } + } +} + +static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex, + uint64_t pte1) +{ + hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C; + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + + if (!spapr->htab) { + /* There should always be a hash table when this is called */ + error_report("spapr_hpte_set_c called with no hash table !"); + return; + } + + /* The HW performs a non-atomic byte update */ + stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80); +} + +static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex, + uint64_t pte1) +{ + hwaddr offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R; + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); + + if (!spapr->htab) { + /* There should always be a hash table when this is called */ + error_report("spapr_hpte_set_r called with no hash table !"); + return; + } + + /* The HW performs a non-atomic byte update */ + stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01); +} + +int spapr_hpt_shift_for_ramsize(uint64_t ramsize) +{ + int shift; + + /* We aim for a hash table of size 1/128 the size of RAM (rounded + * up). The PAPR recommendation is actually 1/64 of RAM size, but + * that's much more than is needed for Linux guests */ + shift = ctz64(pow2ceil(ramsize)) - 7; + shift = MAX(shift, 18); /* Minimum architected size */ + shift = MIN(shift, 46); /* Maximum architected size */ + return shift; +} + +void spapr_free_hpt(SpaprMachineState *spapr) +{ + g_free(spapr->htab); + spapr->htab = NULL; + spapr->htab_shift = 0; + close_htab_fd(spapr); +} + +int spapr_reallocate_hpt(SpaprMachineState *spapr, int shift, Error **errp) +{ + ERRP_GUARD(); + long rc; + + /* Clean up any HPT info from a previous boot */ + spapr_free_hpt(spapr); + + rc = kvmppc_reset_htab(shift); + + if (rc == -EOPNOTSUPP) { + error_setg(errp, "HPT not supported in nested guests"); + return -EOPNOTSUPP; + } + + if (rc < 0) { + /* kernel-side HPT needed, but couldn't allocate one */ + error_setg_errno(errp, errno, "Failed to allocate KVM HPT of order %d", + shift); + error_append_hint(errp, "Try smaller maxmem?\n"); + return -errno; + } else if (rc > 0) { + /* kernel-side HPT allocated */ + if (rc != shift) { + error_setg(errp, + "Requested order %d HPT, but kernel allocated order %ld", + shift, rc); + error_append_hint(errp, "Try smaller maxmem?\n"); + return -ENOSPC; + } + + spapr->htab_shift = shift; + spapr->htab = NULL; + } else { + /* kernel-side HPT not needed, allocate in userspace instead */ + size_t size = 1ULL << shift; + int i; + + spapr->htab = qemu_memalign(size, size); + memset(spapr->htab, 0, size); + spapr->htab_shift = shift; + + for (i = 0; i < size / HASH_PTE_SIZE_64; i++) { + DIRTY_HPTE(HPTE(spapr->htab, i)); + } + } + /* We're setting up a hash table, so that means we're not radix */ + spapr->patb_entry = 0; + spapr_set_all_lpcrs(0, LPCR_HR | LPCR_UPRT); + return 0; +} + +void spapr_setup_hpt(SpaprMachineState *spapr) +{ + int hpt_shift; + + if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { + hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); + } else { + uint64_t current_ram_size; + + current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size(); + hpt_shift = spapr_hpt_shift_for_ramsize(current_ram_size); + } + spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal); + + if (kvm_enabled()) { + hwaddr vrma_limit = kvmppc_vrma_limit(spapr->htab_shift); + + /* Check our RMA fits in the possible VRMA */ + if (vrma_limit < spapr->rma_size) { + error_report("Unable to create %" HWADDR_PRIu + "MiB RMA (VRMA only allows %" HWADDR_PRIu "MiB", + spapr->rma_size / MiB, vrma_limit / MiB); + exit(EXIT_FAILURE); + } + } +} + +void spapr_check_mmu_mode(bool guest_radix) +{ + if (guest_radix) { + if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) { + error_report("Guest requested unavailable MMU mode (radix)."); + exit(EXIT_FAILURE); + } + } else { + if (kvm_enabled() && kvmppc_has_cap_mmu_radix() + && !kvmppc_has_cap_mmu_hash_v3()) { + error_report("Guest requested unavailable MMU mode (hash)."); + exit(EXIT_FAILURE); + } + } +} + +static void spapr_machine_reset(MachineState *machine) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(machine); + PowerPCCPU *first_ppc_cpu; + hwaddr fdt_addr; + void *fdt; + int rc; + + pef_kvm_reset(machine->cgs, &error_fatal); + spapr_caps_apply(spapr); + + first_ppc_cpu = POWERPC_CPU(first_cpu); + if (kvm_enabled() && kvmppc_has_cap_mmu_radix() && + ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, + spapr->max_compat_pvr)) { + /* + * If using KVM with radix mode available, VCPUs can be started + * without a HPT because KVM will start them in radix mode. + * Set the GR bit in PATE so that we know there is no HPT. + */ + spapr->patb_entry = PATE1_GR; + spapr_set_all_lpcrs(LPCR_HR | LPCR_UPRT, LPCR_HR | LPCR_UPRT); + } else { + spapr_setup_hpt(spapr); + } + + qemu_devices_reset(); + + spapr_ovec_cleanup(spapr->ov5_cas); + spapr->ov5_cas = spapr_ovec_new(); + + ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal); + + /* + * This is fixing some of the default configuration of the XIVE + * devices. To be called after the reset of the machine devices. + */ + spapr_irq_reset(spapr, &error_fatal); + + /* + * There is no CAS under qtest. Simulate one to please the code that + * depends on spapr->ov5_cas. This is especially needed to test device + * unplug, so we do that before resetting the DRCs. + */ + if (qtest_enabled()) { + spapr_ovec_cleanup(spapr->ov5_cas); + spapr->ov5_cas = spapr_ovec_clone(spapr->ov5); + } + + /* DRC reset may cause a device to be unplugged. This will cause troubles + * if this device is used by another device (eg, a running vhost backend + * will crash QEMU if the DIMM holding the vring goes away). To avoid such + * situations, we reset DRCs after all devices have been reset. + */ + spapr_drc_reset_all(spapr); + + spapr_clear_pending_events(spapr); + + /* + * We place the device tree just below either the top of the RMA, + * or just below 2GB, whichever is lower, so that it can be + * processed with 32-bit real mode code if necessary + */ + fdt_addr = MIN(spapr->rma_size, FDT_MAX_ADDR) - FDT_MAX_SIZE; + + fdt = spapr_build_fdt(spapr, true, FDT_MAX_SIZE); + if (spapr->vof) { + spapr_vof_reset(spapr, fdt, &error_fatal); + /* + * Do not pack the FDT as the client may change properties. + * VOF client does not expect the FDT so we do not load it to the VM. + */ + } else { + rc = fdt_pack(fdt); + /* Should only fail if we've built a corrupted tree */ + assert(rc == 0); + + spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, + 0, fdt_addr, 0); + cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt)); + } + qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); + + g_free(spapr->fdt_blob); + spapr->fdt_size = fdt_totalsize(fdt); + spapr->fdt_initial_size = spapr->fdt_size; + spapr->fdt_blob = fdt; + + /* Set up the entry state */ + first_ppc_cpu->env.gpr[5] = 0; + + spapr->fwnmi_system_reset_addr = -1; + spapr->fwnmi_machine_check_addr = -1; + spapr->fwnmi_machine_check_interlock = -1; + + /* Signal all vCPUs waiting on this condition */ + qemu_cond_broadcast(&spapr->fwnmi_machine_check_interlock_cond); + + migrate_del_blocker(spapr->fwnmi_migration_blocker); +} + +static void spapr_create_nvram(SpaprMachineState *spapr) +{ + DeviceState *dev = qdev_new("spapr-nvram"); + DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); + + if (dinfo) { + qdev_prop_set_drive_err(dev, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + } + + qdev_realize_and_unref(dev, &spapr->vio_bus->bus, &error_fatal); + + spapr->nvram = (struct SpaprNvram *)dev; +} + +static void spapr_rtc_create(SpaprMachineState *spapr) +{ + object_initialize_child_with_props(OBJECT(spapr), "rtc", &spapr->rtc, + sizeof(spapr->rtc), TYPE_SPAPR_RTC, + &error_fatal, NULL); + qdev_realize(DEVICE(&spapr->rtc), NULL, &error_fatal); + object_property_add_alias(OBJECT(spapr), "rtc-time", OBJECT(&spapr->rtc), + "date"); +} + +/* Returns whether we want to use VGA or not */ +static bool spapr_vga_init(PCIBus *pci_bus, Error **errp) +{ + switch (vga_interface_type) { + case VGA_NONE: + return false; + case VGA_DEVICE: + return true; + case VGA_STD: + case VGA_VIRTIO: + case VGA_CIRRUS: + return pci_vga_init(pci_bus) != NULL; + default: + error_setg(errp, + "Unsupported VGA mode, only -vga std or -vga virtio is supported"); + return false; + } +} + +static int spapr_pre_load(void *opaque) +{ + int rc; + + rc = spapr_caps_pre_load(opaque); + if (rc) { + return rc; + } + + return 0; +} + +static int spapr_post_load(void *opaque, int version_id) +{ + SpaprMachineState *spapr = (SpaprMachineState *)opaque; + int err = 0; + + err = spapr_caps_post_migration(spapr); + if (err) { + return err; + } + + /* + * In earlier versions, there was no separate qdev for the PAPR + * RTC, so the RTC offset was stored directly in sPAPREnvironment. + * So when migrating from those versions, poke the incoming offset + * value into the RTC device + */ + if (version_id < 3) { + err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset); + if (err) { + return err; + } + } + + if (kvm_enabled() && spapr->patb_entry) { + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + bool radix = !!(spapr->patb_entry & PATE1_GR); + bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE); + + /* + * Update LPCR:HR and UPRT as they may not be set properly in + * the stream + */ + spapr_set_all_lpcrs(radix ? (LPCR_HR | LPCR_UPRT) : 0, + LPCR_HR | LPCR_UPRT); + + err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry); + if (err) { + error_report("Process table config unsupported by the host"); + return -EINVAL; + } + } + + err = spapr_irq_post_load(spapr, version_id); + if (err) { + return err; + } + + return err; +} + +static int spapr_pre_save(void *opaque) +{ + int rc; + + rc = spapr_caps_pre_save(opaque); + if (rc) { + return rc; + } + + return 0; +} + +static bool version_before_3(void *opaque, int version_id) +{ + return version_id < 3; +} + +static bool spapr_pending_events_needed(void *opaque) +{ + SpaprMachineState *spapr = (SpaprMachineState *)opaque; + return !QTAILQ_EMPTY(&spapr->pending_events); +} + +static const VMStateDescription vmstate_spapr_event_entry = { + .name = "spapr_event_log_entry", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(summary, SpaprEventLogEntry), + VMSTATE_UINT32(extended_length, SpaprEventLogEntry), + VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0, + NULL, extended_length), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_spapr_pending_events = { + .name = "spapr_pending_events", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_pending_events_needed, + .fields = (VMStateField[]) { + VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1, + vmstate_spapr_event_entry, SpaprEventLogEntry, next), + VMSTATE_END_OF_LIST() + }, +}; + +static bool spapr_ov5_cas_needed(void *opaque) +{ + SpaprMachineState *spapr = opaque; + SpaprOptionVector *ov5_mask = spapr_ovec_new(); + bool cas_needed; + + /* Prior to the introduction of SpaprOptionVector, we had two option + * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY. + * Both of these options encode machine topology into the device-tree + * in such a way that the now-booted OS should still be able to interact + * appropriately with QEMU regardless of what options were actually + * negotiatied on the source side. + * + * As such, we can avoid migrating the CAS-negotiated options if these + * are the only options available on the current machine/platform. + * Since these are the only options available for pseries-2.7 and + * earlier, this allows us to maintain old->new/new->old migration + * compatibility. + * + * For QEMU 2.8+, there are additional CAS-negotiatable options available + * via default pseries-2.8 machines and explicit command-line parameters. + * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware + * of the actual CAS-negotiated values to continue working properly. For + * example, availability of memory unplug depends on knowing whether + * OV5_HP_EVT was negotiated via CAS. + * + * Thus, for any cases where the set of available CAS-negotiatable + * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we + * include the CAS-negotiated options in the migration stream, unless + * if they affect boot time behaviour only. + */ + spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY); + spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY); + spapr_ovec_set(ov5_mask, OV5_DRMEM_V2); + + /* We need extra information if we have any bits outside the mask + * defined above */ + cas_needed = !spapr_ovec_subset(spapr->ov5, ov5_mask); + + spapr_ovec_cleanup(ov5_mask); + + return cas_needed; +} + +static const VMStateDescription vmstate_spapr_ov5_cas = { + .name = "spapr_option_vector_ov5_cas", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_ov5_cas_needed, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1, + vmstate_spapr_ovec, SpaprOptionVector), + VMSTATE_END_OF_LIST() + }, +}; + +static bool spapr_patb_entry_needed(void *opaque) +{ + SpaprMachineState *spapr = opaque; + + return !!spapr->patb_entry; +} + +static const VMStateDescription vmstate_spapr_patb_entry = { + .name = "spapr_patb_entry", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_patb_entry_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(patb_entry, SpaprMachineState), + VMSTATE_END_OF_LIST() + }, +}; + +static bool spapr_irq_map_needed(void *opaque) +{ + SpaprMachineState *spapr = opaque; + + return spapr->irq_map && !bitmap_empty(spapr->irq_map, spapr->irq_map_nr); +} + +static const VMStateDescription vmstate_spapr_irq_map = { + .name = "spapr_irq_map", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_irq_map_needed, + .fields = (VMStateField[]) { + VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr), + VMSTATE_END_OF_LIST() + }, +}; + +static bool spapr_dtb_needed(void *opaque) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(opaque); + + return smc->update_dt_enabled; +} + +static int spapr_dtb_pre_load(void *opaque) +{ + SpaprMachineState *spapr = (SpaprMachineState *)opaque; + + g_free(spapr->fdt_blob); + spapr->fdt_blob = NULL; + spapr->fdt_size = 0; + + return 0; +} + +static const VMStateDescription vmstate_spapr_dtb = { + .name = "spapr_dtb", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_dtb_needed, + .pre_load = spapr_dtb_pre_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(fdt_initial_size, SpaprMachineState), + VMSTATE_UINT32(fdt_size, SpaprMachineState), + VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL, + fdt_size), + VMSTATE_END_OF_LIST() + }, +}; + +static bool spapr_fwnmi_needed(void *opaque) +{ + SpaprMachineState *spapr = (SpaprMachineState *)opaque; + + return spapr->fwnmi_machine_check_addr != -1; +} + +static int spapr_fwnmi_pre_save(void *opaque) +{ + SpaprMachineState *spapr = (SpaprMachineState *)opaque; + + /* + * Check if machine check handling is in progress and print a + * warning message. + */ + if (spapr->fwnmi_machine_check_interlock != -1) { + warn_report("A machine check is being handled during migration. The" + "handler may run and log hardware error on the destination"); + } + + return 0; +} + +static const VMStateDescription vmstate_spapr_fwnmi = { + .name = "spapr_fwnmi", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_fwnmi_needed, + .pre_save = spapr_fwnmi_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState), + VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState), + VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_spapr = { + .name = "spapr", + .version_id = 3, + .minimum_version_id = 1, + .pre_load = spapr_pre_load, + .post_load = spapr_post_load, + .pre_save = spapr_pre_save, + .fields = (VMStateField[]) { + /* used to be @next_irq */ + VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4), + + /* RTC offset */ + VMSTATE_UINT64_TEST(rtc_offset, SpaprMachineState, version_before_3), + + VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_spapr_ov5_cas, + &vmstate_spapr_patb_entry, + &vmstate_spapr_pending_events, + &vmstate_spapr_cap_htm, + &vmstate_spapr_cap_vsx, + &vmstate_spapr_cap_dfp, + &vmstate_spapr_cap_cfpc, + &vmstate_spapr_cap_sbbc, + &vmstate_spapr_cap_ibs, + &vmstate_spapr_cap_hpt_maxpagesize, + &vmstate_spapr_irq_map, + &vmstate_spapr_cap_nested_kvm_hv, + &vmstate_spapr_dtb, + &vmstate_spapr_cap_large_decr, + &vmstate_spapr_cap_ccf_assist, + &vmstate_spapr_cap_fwnmi, + &vmstate_spapr_fwnmi, + &vmstate_spapr_cap_rpt_invalidate, + NULL + } +}; + +static int htab_save_setup(QEMUFile *f, void *opaque) +{ + SpaprMachineState *spapr = opaque; + + /* "Iteration" header */ + if (!spapr->htab_shift) { + qemu_put_be32(f, -1); + } else { + qemu_put_be32(f, spapr->htab_shift); + } + + if (spapr->htab) { + spapr->htab_save_index = 0; + spapr->htab_first_pass = true; + } else { + if (spapr->htab_shift) { + assert(kvm_enabled()); + } + } + + + return 0; +} + +static void htab_save_chunk(QEMUFile *f, SpaprMachineState *spapr, + int chunkstart, int n_valid, int n_invalid) +{ + qemu_put_be32(f, chunkstart); + qemu_put_be16(f, n_valid); + qemu_put_be16(f, n_invalid); + qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), + HASH_PTE_SIZE_64 * n_valid); +} + +static void htab_save_end_marker(QEMUFile *f) +{ + qemu_put_be32(f, 0); + qemu_put_be16(f, 0); + qemu_put_be16(f, 0); +} + +static void htab_save_first_pass(QEMUFile *f, SpaprMachineState *spapr, + int64_t max_ns) +{ + bool has_timeout = max_ns != -1; + int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; + int index = spapr->htab_save_index; + int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + + assert(spapr->htab_first_pass); + + do { + int chunkstart; + + /* Consume invalid HPTEs */ + while ((index < htabslots) + && !HPTE_VALID(HPTE(spapr->htab, index))) { + CLEAN_HPTE(HPTE(spapr->htab, index)); + index++; + } + + /* Consume valid HPTEs */ + chunkstart = index; + while ((index < htabslots) && (index - chunkstart < USHRT_MAX) + && HPTE_VALID(HPTE(spapr->htab, index))) { + CLEAN_HPTE(HPTE(spapr->htab, index)); + index++; + } + + if (index > chunkstart) { + int n_valid = index - chunkstart; + + htab_save_chunk(f, spapr, chunkstart, n_valid, 0); + + if (has_timeout && + (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { + break; + } + } + } while ((index < htabslots) && !qemu_file_rate_limit(f)); + + if (index >= htabslots) { + assert(index == htabslots); + index = 0; + spapr->htab_first_pass = false; + } + spapr->htab_save_index = index; +} + +static int htab_save_later_pass(QEMUFile *f, SpaprMachineState *spapr, + int64_t max_ns) +{ + bool final = max_ns < 0; + int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; + int examined = 0, sent = 0; + int index = spapr->htab_save_index; + int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + + assert(!spapr->htab_first_pass); + + do { + int chunkstart, invalidstart; + + /* Consume non-dirty HPTEs */ + while ((index < htabslots) + && !HPTE_DIRTY(HPTE(spapr->htab, index))) { + index++; + examined++; + } + + chunkstart = index; + /* Consume valid dirty HPTEs */ + while ((index < htabslots) && (index - chunkstart < USHRT_MAX) + && HPTE_DIRTY(HPTE(spapr->htab, index)) + && HPTE_VALID(HPTE(spapr->htab, index))) { + CLEAN_HPTE(HPTE(spapr->htab, index)); + index++; + examined++; + } + + invalidstart = index; + /* Consume invalid dirty HPTEs */ + while ((index < htabslots) && (index - invalidstart < USHRT_MAX) + && HPTE_DIRTY(HPTE(spapr->htab, index)) + && !HPTE_VALID(HPTE(spapr->htab, index))) { + CLEAN_HPTE(HPTE(spapr->htab, index)); + index++; + examined++; + } + + if (index > chunkstart) { + int n_valid = invalidstart - chunkstart; + int n_invalid = index - invalidstart; + + htab_save_chunk(f, spapr, chunkstart, n_valid, n_invalid); + sent += index - chunkstart; + + if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { + break; + } + } + + if (examined >= htabslots) { + break; + } + + if (index >= htabslots) { + assert(index == htabslots); + index = 0; + } + } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final)); + + if (index >= htabslots) { + assert(index == htabslots); + index = 0; + } + + spapr->htab_save_index = index; + + return (examined >= htabslots) && (sent == 0) ? 1 : 0; +} + +#define MAX_ITERATION_NS 5000000 /* 5 ms */ +#define MAX_KVM_BUF_SIZE 2048 + +static int htab_save_iterate(QEMUFile *f, void *opaque) +{ + SpaprMachineState *spapr = opaque; + int fd; + int rc = 0; + + /* Iteration header */ + if (!spapr->htab_shift) { + qemu_put_be32(f, -1); + return 1; + } else { + qemu_put_be32(f, 0); + } + + if (!spapr->htab) { + assert(kvm_enabled()); + + fd = get_htab_fd(spapr); + if (fd < 0) { + return fd; + } + + rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, MAX_ITERATION_NS); + if (rc < 0) { + return rc; + } + } else if (spapr->htab_first_pass) { + htab_save_first_pass(f, spapr, MAX_ITERATION_NS); + } else { + rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS); + } + + htab_save_end_marker(f); + + return rc; +} + +static int htab_save_complete(QEMUFile *f, void *opaque) +{ + SpaprMachineState *spapr = opaque; + int fd; + + /* Iteration header */ + if (!spapr->htab_shift) { + qemu_put_be32(f, -1); + return 0; + } else { + qemu_put_be32(f, 0); + } + + if (!spapr->htab) { + int rc; + + assert(kvm_enabled()); + + fd = get_htab_fd(spapr); + if (fd < 0) { + return fd; + } + + rc = kvmppc_save_htab(f, fd, MAX_KVM_BUF_SIZE, -1); + if (rc < 0) { + return rc; + } + } else { + if (spapr->htab_first_pass) { + htab_save_first_pass(f, spapr, -1); + } + htab_save_later_pass(f, spapr, -1); + } + + /* End marker */ + htab_save_end_marker(f); + + return 0; +} + +static int htab_load(QEMUFile *f, void *opaque, int version_id) +{ + SpaprMachineState *spapr = opaque; + uint32_t section_hdr; + int fd = -1; + Error *local_err = NULL; + + if (version_id < 1 || version_id > 1) { + error_report("htab_load() bad version"); + return -EINVAL; + } + + section_hdr = qemu_get_be32(f); + + if (section_hdr == -1) { + spapr_free_hpt(spapr); + return 0; + } + + if (section_hdr) { + int ret; + + /* First section gives the htab size */ + ret = spapr_reallocate_hpt(spapr, section_hdr, &local_err); + if (ret < 0) { + error_report_err(local_err); + return ret; + } + return 0; + } + + if (!spapr->htab) { + assert(kvm_enabled()); + + fd = kvmppc_get_htab_fd(true, 0, &local_err); + if (fd < 0) { + error_report_err(local_err); + return fd; + } + } + + while (true) { + uint32_t index; + uint16_t n_valid, n_invalid; + + index = qemu_get_be32(f); + n_valid = qemu_get_be16(f); + n_invalid = qemu_get_be16(f); + + if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) { + /* End of Stream */ + break; + } + + if ((index + n_valid + n_invalid) > + (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) { + /* Bad index in stream */ + error_report( + "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)", + index, n_valid, n_invalid, spapr->htab_shift); + return -EINVAL; + } + + if (spapr->htab) { + if (n_valid) { + qemu_get_buffer(f, HPTE(spapr->htab, index), + HASH_PTE_SIZE_64 * n_valid); + } + if (n_invalid) { + memset(HPTE(spapr->htab, index + n_valid), 0, + HASH_PTE_SIZE_64 * n_invalid); + } + } else { + int rc; + + assert(fd >= 0); + + rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid, + &local_err); + if (rc < 0) { + error_report_err(local_err); + return rc; + } + } + } + + if (!spapr->htab) { + assert(fd >= 0); + close(fd); + } + + return 0; +} + +static void htab_save_cleanup(void *opaque) +{ + SpaprMachineState *spapr = opaque; + + close_htab_fd(spapr); +} + +static SaveVMHandlers savevm_htab_handlers = { + .save_setup = htab_save_setup, + .save_live_iterate = htab_save_iterate, + .save_live_complete_precopy = htab_save_complete, + .save_cleanup = htab_save_cleanup, + .load_state = htab_load, +}; + +static void spapr_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(opaque); + + g_free(spapr->boot_device); + spapr->boot_device = g_strdup(boot_device); +} + +static void spapr_create_lmb_dr_connectors(SpaprMachineState *spapr) +{ + MachineState *machine = MACHINE(spapr); + uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE; + uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size; + int i; + + for (i = 0; i < nr_lmbs; i++) { + uint64_t addr; + + addr = i * lmb_size + machine->device_memory->base; + spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB, + addr / lmb_size); + } +} + +/* + * If RAM size, maxmem size and individual node mem sizes aren't aligned + * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest + * since we can't support such unaligned sizes with DRCONF_MEMORY. + */ +static void spapr_validate_node_memory(MachineState *machine, Error **errp) +{ + int i; + + if (machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) { + error_setg(errp, "Memory size 0x" RAM_ADDR_FMT + " is not aligned to %" PRIu64 " MiB", + machine->ram_size, + SPAPR_MEMORY_BLOCK_SIZE / MiB); + return; + } + + if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE) { + error_setg(errp, "Maximum memory size 0x" RAM_ADDR_FMT + " is not aligned to %" PRIu64 " MiB", + machine->ram_size, + SPAPR_MEMORY_BLOCK_SIZE / MiB); + return; + } + + for (i = 0; i < machine->numa_state->num_nodes; i++) { + if (machine->numa_state->nodes[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) { + error_setg(errp, + "Node %d memory size 0x%" PRIx64 + " is not aligned to %" PRIu64 " MiB", + i, machine->numa_state->nodes[i].node_mem, + SPAPR_MEMORY_BLOCK_SIZE / MiB); + return; + } + } +} + +/* find cpu slot in machine->possible_cpus by core_id */ +static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx) +{ + int index = id / ms->smp.threads; + + if (index >= ms->possible_cpus->len) { + return NULL; + } + if (idx) { + *idx = index; + } + return &ms->possible_cpus->cpus[index]; +} + +static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp) +{ + MachineState *ms = MACHINE(spapr); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + Error *local_err = NULL; + bool vsmt_user = !!spapr->vsmt; + int kvm_smt = kvmppc_smt_threads(); + int ret; + unsigned int smp_threads = ms->smp.threads; + + if (!kvm_enabled() && (smp_threads > 1)) { + error_setg(errp, "TCG cannot support more than 1 thread/core " + "on a pseries machine"); + return; + } + if (!is_power_of_2(smp_threads)) { + error_setg(errp, "Cannot support %d threads/core on a pseries " + "machine because it must be a power of 2", smp_threads); + return; + } + + /* Detemine the VSMT mode to use: */ + if (vsmt_user) { + if (spapr->vsmt < smp_threads) { + error_setg(errp, "Cannot support VSMT mode %d" + " because it must be >= threads/core (%d)", + spapr->vsmt, smp_threads); + return; + } + /* In this case, spapr->vsmt has been set by the command line */ + } else if (!smc->smp_threads_vsmt) { + /* + * Default VSMT value is tricky, because we need it to be as + * consistent as possible (for migration), but this requires + * changing it for at least some existing cases. We pick 8 as + * the value that we'd get with KVM on POWER8, the + * overwhelmingly common case in production systems. + */ + spapr->vsmt = MAX(8, smp_threads); + } else { + spapr->vsmt = smp_threads; + } + + /* KVM: If necessary, set the SMT mode: */ + if (kvm_enabled() && (spapr->vsmt != kvm_smt)) { + ret = kvmppc_set_smt_threads(spapr->vsmt); + if (ret) { + /* Looks like KVM isn't able to change VSMT mode */ + error_setg(&local_err, + "Failed to set KVM's VSMT mode to %d (errno %d)", + spapr->vsmt, ret); + /* We can live with that if the default one is big enough + * for the number of threads, and a submultiple of the one + * we want. In this case we'll waste some vcpu ids, but + * behaviour will be correct */ + if ((kvm_smt >= smp_threads) && ((spapr->vsmt % kvm_smt) == 0)) { + warn_report_err(local_err); + } else { + if (!vsmt_user) { + error_append_hint(&local_err, + "On PPC, a VM with %d threads/core" + " on a host with %d threads/core" + " requires the use of VSMT mode %d.\n", + smp_threads, kvm_smt, spapr->vsmt); + } + kvmppc_error_append_smt_possible_hint(&local_err); + error_propagate(errp, local_err); + } + } + } + /* else TCG: nothing to do currently */ +} + +static void spapr_init_cpus(SpaprMachineState *spapr) +{ + MachineState *machine = MACHINE(spapr); + MachineClass *mc = MACHINE_GET_CLASS(machine); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); + const char *type = spapr_get_cpu_core_type(machine->cpu_type); + const CPUArchIdList *possible_cpus; + unsigned int smp_cpus = machine->smp.cpus; + unsigned int smp_threads = machine->smp.threads; + unsigned int max_cpus = machine->smp.max_cpus; + int boot_cores_nr = smp_cpus / smp_threads; + int i; + + possible_cpus = mc->possible_cpu_arch_ids(machine); + if (mc->has_hotpluggable_cpus) { + if (smp_cpus % smp_threads) { + error_report("smp_cpus (%u) must be multiple of threads (%u)", + smp_cpus, smp_threads); + exit(1); + } + if (max_cpus % smp_threads) { + error_report("max_cpus (%u) must be multiple of threads (%u)", + max_cpus, smp_threads); + exit(1); + } + } else { + if (max_cpus != smp_cpus) { + error_report("This machine version does not support CPU hotplug"); + exit(1); + } + boot_cores_nr = possible_cpus->len; + } + + if (smc->pre_2_10_has_unused_icps) { + int i; + + for (i = 0; i < spapr_max_server_number(spapr); i++) { + /* Dummy entries get deregistered when real ICPState objects + * are registered during CPU core hotplug. + */ + pre_2_10_vmstate_register_dummy_icp(i); + } + } + + for (i = 0; i < possible_cpus->len; i++) { + int core_id = i * smp_threads; + + if (mc->has_hotpluggable_cpus) { + spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU, + spapr_vcpu_id(spapr, core_id)); + } + + if (i < boot_cores_nr) { + Object *core = object_new(type); + int nr_threads = smp_threads; + + /* Handle the partially filled core for older machine types */ + if ((i + 1) * smp_threads >= smp_cpus) { + nr_threads = smp_cpus - i * smp_threads; + } + + object_property_set_int(core, "nr-threads", nr_threads, + &error_fatal); + object_property_set_int(core, CPU_CORE_PROP_CORE_ID, core_id, + &error_fatal); + qdev_realize(DEVICE(core), NULL, &error_fatal); + + object_unref(core); + } + } +} + +static PCIHostState *spapr_create_default_phb(void) +{ + DeviceState *dev; + + dev = qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE); + qdev_prop_set_uint32(dev, "index", 0); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + return PCI_HOST_BRIDGE(dev); +} + +static hwaddr spapr_rma_size(SpaprMachineState *spapr, Error **errp) +{ + MachineState *machine = MACHINE(spapr); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + hwaddr rma_size = machine->ram_size; + hwaddr node0_size = spapr_node0_size(machine); + + /* RMA has to fit in the first NUMA node */ + rma_size = MIN(rma_size, node0_size); + + /* + * VRMA access is via a special 1TiB SLB mapping, so the RMA can + * never exceed that + */ + rma_size = MIN(rma_size, 1 * TiB); + + /* + * Clamp the RMA size based on machine type. This is for + * migration compatibility with older qemu versions, which limited + * the RMA size for complicated and mostly bad reasons. + */ + if (smc->rma_limit) { + rma_size = MIN(rma_size, smc->rma_limit); + } + + if (rma_size < MIN_RMA_SLOF) { + error_setg(errp, + "pSeries SLOF firmware requires >= %" HWADDR_PRIx + "ldMiB guest RMA (Real Mode Area memory)", + MIN_RMA_SLOF / MiB); + return 0; + } + + return rma_size; +} + +static void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr) +{ + MachineState *machine = MACHINE(spapr); + int i; + + for (i = 0; i < machine->ram_slots; i++) { + spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i); + } +} + +/* pSeries LPAR / sPAPR hardware init */ +static void spapr_machine_init(MachineState *machine) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(machine); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const char *bios_default = spapr->vof ? FW_FILE_NAME_VOF : FW_FILE_NAME; + const char *bios_name = machine->firmware ?: bios_default; + const char *kernel_filename = machine->kernel_filename; + const char *initrd_filename = machine->initrd_filename; + PCIHostState *phb; + int i; + MemoryRegion *sysmem = get_system_memory(); + long load_limit, fw_size; + char *filename; + Error *resize_hpt_err = NULL; + + /* + * if Secure VM (PEF) support is configured, then initialize it + */ + pef_kvm_init(machine->cgs, &error_fatal); + + msi_nonbroken = true; + + QLIST_INIT(&spapr->phbs); + QTAILQ_INIT(&spapr->pending_dimm_unplugs); + + /* Determine capabilities to run with */ + spapr_caps_init(spapr); + + kvmppc_check_papr_resize_hpt(&resize_hpt_err); + if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) { + /* + * If the user explicitly requested a mode we should either + * supply it, or fail completely (which we do below). But if + * it's not set explicitly, we reset our mode to something + * that works + */ + if (resize_hpt_err) { + spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; + error_free(resize_hpt_err); + resize_hpt_err = NULL; + } else { + spapr->resize_hpt = smc->resize_hpt_default; + } + } + + assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT); + + if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) { + /* + * User requested HPT resize, but this host can't supply it. Bail out + */ + error_report_err(resize_hpt_err); + exit(1); + } + error_free(resize_hpt_err); + + spapr->rma_size = spapr_rma_size(spapr, &error_fatal); + + /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */ + load_limit = MIN(spapr->rma_size, FDT_MAX_ADDR) - FW_OVERHEAD; + + /* + * VSMT must be set in order to be able to compute VCPU ids, ie to + * call spapr_max_server_number() or spapr_vcpu_id(). + */ + spapr_set_vsmt_mode(spapr, &error_fatal); + + /* Set up Interrupt Controller before we create the VCPUs */ + spapr_irq_init(spapr, &error_fatal); + + /* Set up containers for ibm,client-architecture-support negotiated options + */ + spapr->ov5 = spapr_ovec_new(); + spapr->ov5_cas = spapr_ovec_new(); + + if (smc->dr_lmb_enabled) { + spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY); + spapr_validate_node_memory(machine, &error_fatal); + } + + spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); + + /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */ + if (!smc->pre_6_2_numa_affinity) { + spapr_ovec_set(spapr->ov5, OV5_FORM2_AFFINITY); + } + + /* advertise support for dedicated HP event source to guests */ + if (spapr->use_hotplug_event_source) { + spapr_ovec_set(spapr->ov5, OV5_HP_EVT); + } + + /* advertise support for HPT resizing */ + if (spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) { + spapr_ovec_set(spapr->ov5, OV5_HPT_RESIZE); + } + + /* advertise support for ibm,dyamic-memory-v2 */ + spapr_ovec_set(spapr->ov5, OV5_DRMEM_V2); + + /* advertise XIVE on POWER9 machines */ + if (spapr->irq->xive) { + spapr_ovec_set(spapr->ov5, OV5_XIVE_EXPLOIT); + } + + /* init CPUs */ + spapr_init_cpus(spapr); + + spapr->gpu_numa_id = spapr_numa_initial_nvgpu_numa_id(machine); + + /* Init numa_assoc_array */ + spapr_numa_associativity_init(spapr, machine); + + if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) && + ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, + spapr->max_compat_pvr)) { + spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300); + /* KVM and TCG always allow GTSE with radix... */ + spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE); + } + /* ... but not with hash (currently). */ + + if (kvm_enabled()) { + /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */ + kvmppc_enable_logical_ci_hcalls(); + kvmppc_enable_set_mode_hcall(); + + /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */ + kvmppc_enable_clear_ref_mod_hcalls(); + + /* Enable H_PAGE_INIT */ + kvmppc_enable_h_page_init(); + } + + /* map RAM */ + memory_region_add_subregion(sysmem, 0, machine->ram); + + /* always allocate the device memory information */ + machine->device_memory = g_malloc0(sizeof(*machine->device_memory)); + + /* initialize hotplug memory address space */ + if (machine->ram_size < machine->maxram_size) { + ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size; + /* + * Limit the number of hotpluggable memory slots to half the number + * slots that KVM supports, leaving the other half for PCI and other + * devices. However ensure that number of slots doesn't drop below 32. + */ + int max_memslots = kvm_enabled() ? kvm_get_max_memslots() / 2 : + SPAPR_MAX_RAM_SLOTS; + + if (max_memslots < SPAPR_MAX_RAM_SLOTS) { + max_memslots = SPAPR_MAX_RAM_SLOTS; + } + if (machine->ram_slots > max_memslots) { + error_report("Specified number of memory slots %" + PRIu64" exceeds max supported %d", + machine->ram_slots, max_memslots); + exit(1); + } + + machine->device_memory->base = ROUND_UP(machine->ram_size, + SPAPR_DEVICE_MEM_ALIGN); + memory_region_init(&machine->device_memory->mr, OBJECT(spapr), + "device-memory", device_mem_size); + memory_region_add_subregion(sysmem, machine->device_memory->base, + &machine->device_memory->mr); + } + + if (smc->dr_lmb_enabled) { + spapr_create_lmb_dr_connectors(spapr); + } + + if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_ON) { + /* Create the error string for live migration blocker */ + error_setg(&spapr->fwnmi_migration_blocker, + "A machine check is being handled during migration. The handler" + "may run and log hardware error on the destination"); + } + + if (mc->nvdimm_supported) { + spapr_create_nvdimm_dr_connectors(spapr); + } + + /* Set up RTAS event infrastructure */ + spapr_events_init(spapr); + + /* Set up the RTC RTAS interfaces */ + spapr_rtc_create(spapr); + + /* Set up VIO bus */ + spapr->vio_bus = spapr_vio_bus_init(); + + for (i = 0; serial_hd(i); i++) { + spapr_vty_create(spapr->vio_bus, serial_hd(i)); + } + + /* We always have at least the nvram device on VIO */ + spapr_create_nvram(spapr); + + /* + * Setup hotplug / dynamic-reconfiguration connectors. top-level + * connectors (described in root DT node's "ibm,drc-types" property) + * are pre-initialized here. additional child connectors (such as + * connectors for a PHBs PCI slots) are added as needed during their + * parent's realization. + */ + if (smc->dr_phb_enabled) { + for (i = 0; i < SPAPR_MAX_PHBS; i++) { + spapr_dr_connector_new(OBJECT(machine), TYPE_SPAPR_DRC_PHB, i); + } + } + + /* Set up PCI */ + spapr_pci_rtas_init(); + + phb = spapr_create_default_phb(); + + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + + if (!nd->model) { + nd->model = g_strdup("spapr-vlan"); + } + + if (g_str_equal(nd->model, "spapr-vlan") || + g_str_equal(nd->model, "ibmveth")) { + spapr_vlan_create(spapr->vio_bus, nd); + } else { + pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL); + } + } + + for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) { + spapr_vscsi_create(spapr->vio_bus); + } + + /* Graphics */ + if (spapr_vga_init(phb->bus, &error_fatal)) { + spapr->has_graphics = true; + machine->usb |= defaults_enabled() && !machine->usb_disabled; + } + + if (machine->usb) { + if (smc->use_ohci_by_default) { + pci_create_simple(phb->bus, -1, "pci-ohci"); + } else { + pci_create_simple(phb->bus, -1, "nec-usb-xhci"); + } + + if (spapr->has_graphics) { + USBBus *usb_bus = usb_bus_find(-1); + + usb_create_simple(usb_bus, "usb-kbd"); + usb_create_simple(usb_bus, "usb-mouse"); + } + } + + if (kernel_filename) { + spapr->kernel_size = load_elf(kernel_filename, NULL, + translate_kernel_address, spapr, + NULL, NULL, NULL, NULL, 1, + PPC_ELF_MACHINE, 0, 0); + if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) { + spapr->kernel_size = load_elf(kernel_filename, NULL, + translate_kernel_address, spapr, + NULL, NULL, NULL, NULL, 0, + PPC_ELF_MACHINE, 0, 0); + spapr->kernel_le = spapr->kernel_size > 0; + } + if (spapr->kernel_size < 0) { + error_report("error loading %s: %s", kernel_filename, + load_elf_strerror(spapr->kernel_size)); + exit(1); + } + + /* load initrd */ + if (initrd_filename) { + /* Try to locate the initrd in the gap between the kernel + * and the firmware. Add a bit of space just in case + */ + spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size + + 0x1ffff) & ~0xffff; + spapr->initrd_size = load_image_targphys(initrd_filename, + spapr->initrd_base, + load_limit + - spapr->initrd_base); + if (spapr->initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + initrd_filename); + exit(1); + } + } + } + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!filename) { + error_report("Could not find LPAR firmware '%s'", bios_name); + exit(1); + } + fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE); + if (fw_size <= 0) { + error_report("Could not load LPAR firmware '%s'", filename); + exit(1); + } + g_free(filename); + + /* FIXME: Should register things through the MachineState's qdev + * interface, this is a legacy from the sPAPREnvironment structure + * which predated MachineState but had a similar function */ + vmstate_register(NULL, 0, &vmstate_spapr, spapr); + register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY, 1, + &savevm_htab_handlers, spapr); + + qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine)); + + qemu_register_boot_set(spapr_boot_set, spapr); + + /* + * Nothing needs to be done to resume a suspended guest because + * suspending does not change the machine state, so no need for + * a ->wakeup method. + */ + qemu_register_wakeup_support(); + + if (kvm_enabled()) { + /* to stop and start vmclock */ + qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change, + &spapr->tb); + + kvmppc_spapr_enable_inkernel_multitce(); + } + + qemu_cond_init(&spapr->fwnmi_machine_check_interlock_cond); + if (spapr->vof) { + spapr->vof->fw_size = fw_size; /* for claim() on itself */ + spapr_register_hypercall(KVMPPC_H_VOF_CLIENT, spapr_h_vof_client); + } +} + +#define DEFAULT_KVM_TYPE "auto" +static int spapr_kvm_type(MachineState *machine, const char *vm_type) +{ + /* + * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to + * accomodate the 'HV' and 'PV' formats that exists in the + * wild. The 'auto' mode is being introduced already as + * lower-case, thus we don't need to bother checking for + * "AUTO". + */ + if (!vm_type || !strcmp(vm_type, DEFAULT_KVM_TYPE)) { + return 0; + } + + if (!g_ascii_strcasecmp(vm_type, "hv")) { + return 1; + } + + if (!g_ascii_strcasecmp(vm_type, "pr")) { + return 2; + } + + error_report("Unknown kvm-type specified '%s'", vm_type); + exit(1); +} + +/* + * Implementation of an interface to adjust firmware path + * for the bootindex property handling. + */ +static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus, + DeviceState *dev) +{ +#define CAST(type, obj, name) \ + ((type *)object_dynamic_cast(OBJECT(obj), (name))) + SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE); + SpaprPhbState *phb = CAST(SpaprPhbState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE); + VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON); + PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE); + + if (d) { + void *spapr = CAST(void, bus->parent, "spapr-vscsi"); + VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI); + USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE); + + if (spapr) { + /* + * Replace "channel@0/disk@0,0" with "disk@8000000000000000": + * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form + * 0x8000 | (target << 8) | (bus << 5) | lun + * (see the "Logical unit addressing format" table in SAM5) + */ + unsigned id = 0x8000 | (d->id << 8) | (d->channel << 5) | d->lun; + return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), + (uint64_t)id << 48); + } else if (virtio) { + /* + * We use SRP luns of the form 01000000 | (target << 8) | lun + * in the top 32 bits of the 64-bit LUN + * Note: the quote above is from SLOF and it is wrong, + * the actual binding is: + * swap 0100 or 10 << or 20 << ( target lun-id -- srplun ) + */ + unsigned id = 0x1000000 | (d->id << 16) | d->lun; + if (d->lun >= 256) { + /* Use the LUN "flat space addressing method" */ + id |= 0x4000; + } + return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), + (uint64_t)id << 32); + } else if (usb) { + /* + * We use SRP luns of the form 01000000 | (usb-port << 16) | lun + * in the top 32 bits of the 64-bit LUN + */ + unsigned usb_port = atoi(usb->port->path); + unsigned id = 0x1000000 | (usb_port << 16) | d->lun; + return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev), + (uint64_t)id << 32); + } + } + + /* + * SLOF probes the USB devices, and if it recognizes that the device is a + * storage device, it changes its name to "storage" instead of "usb-host", + * and additionally adds a child node for the SCSI LUN, so the correct + * boot path in SLOF is something like .../storage@1/disk@xxx" instead. + */ + if (strcmp("usb-host", qdev_fw_name(dev)) == 0) { + USBDevice *usbdev = CAST(USBDevice, dev, TYPE_USB_DEVICE); + if (usb_device_is_scsi_storage(usbdev)) { + return g_strdup_printf("storage@%s/disk", usbdev->port->path); + } + } + + if (phb) { + /* Replace "pci" with "pci@800000020000000" */ + return g_strdup_printf("pci@%"PRIX64, phb->buid); + } + + if (vsc) { + /* Same logic as virtio above */ + unsigned id = 0x1000000 | (vsc->target << 16) | vsc->lun; + return g_strdup_printf("disk@%"PRIX64, (uint64_t)id << 32); + } + + if (g_str_equal("pci-bridge", qdev_fw_name(dev))) { + /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */ + PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE); + return g_strdup_printf("pci@%x", PCI_SLOT(pcidev->devfn)); + } + + if (pcidev) { + return spapr_pci_fw_dev_name(pcidev); + } + + return NULL; +} + +static char *spapr_get_kvm_type(Object *obj, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + return g_strdup(spapr->kvm_type); +} + +static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + g_free(spapr->kvm_type); + spapr->kvm_type = g_strdup(value); +} + +static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + return spapr->use_hotplug_event_source; +} + +static void spapr_set_modern_hotplug_events(Object *obj, bool value, + Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + spapr->use_hotplug_event_source = value; +} + +static bool spapr_get_msix_emulation(Object *obj, Error **errp) +{ + return true; +} + +static char *spapr_get_resize_hpt(Object *obj, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + switch (spapr->resize_hpt) { + case SPAPR_RESIZE_HPT_DEFAULT: + return g_strdup("default"); + case SPAPR_RESIZE_HPT_DISABLED: + return g_strdup("disabled"); + case SPAPR_RESIZE_HPT_ENABLED: + return g_strdup("enabled"); + case SPAPR_RESIZE_HPT_REQUIRED: + return g_strdup("required"); + } + g_assert_not_reached(); +} + +static void spapr_set_resize_hpt(Object *obj, const char *value, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + if (strcmp(value, "default") == 0) { + spapr->resize_hpt = SPAPR_RESIZE_HPT_DEFAULT; + } else if (strcmp(value, "disabled") == 0) { + spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; + } else if (strcmp(value, "enabled") == 0) { + spapr->resize_hpt = SPAPR_RESIZE_HPT_ENABLED; + } else if (strcmp(value, "required") == 0) { + spapr->resize_hpt = SPAPR_RESIZE_HPT_REQUIRED; + } else { + error_setg(errp, "Bad value for \"resize-hpt\" property"); + } +} + +static bool spapr_get_vof(Object *obj, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + return spapr->vof != NULL; +} + +static void spapr_set_vof(Object *obj, bool value, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + if (spapr->vof) { + vof_cleanup(spapr->vof); + g_free(spapr->vof); + spapr->vof = NULL; + } + if (!value) { + return; + } + spapr->vof = g_malloc0(sizeof(*spapr->vof)); +} + +static char *spapr_get_ic_mode(Object *obj, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + if (spapr->irq == &spapr_irq_xics_legacy) { + return g_strdup("legacy"); + } else if (spapr->irq == &spapr_irq_xics) { + return g_strdup("xics"); + } else if (spapr->irq == &spapr_irq_xive) { + return g_strdup("xive"); + } else if (spapr->irq == &spapr_irq_dual) { + return g_strdup("dual"); + } + g_assert_not_reached(); +} + +static void spapr_set_ic_mode(Object *obj, const char *value, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { + error_setg(errp, "This machine only uses the legacy XICS backend, don't pass ic-mode"); + return; + } + + /* The legacy IRQ backend can not be set */ + if (strcmp(value, "xics") == 0) { + spapr->irq = &spapr_irq_xics; + } else if (strcmp(value, "xive") == 0) { + spapr->irq = &spapr_irq_xive; + } else if (strcmp(value, "dual") == 0) { + spapr->irq = &spapr_irq_dual; + } else { + error_setg(errp, "Bad value for \"ic-mode\" property"); + } +} + +static char *spapr_get_host_model(Object *obj, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + return g_strdup(spapr->host_model); +} + +static void spapr_set_host_model(Object *obj, const char *value, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + g_free(spapr->host_model); + spapr->host_model = g_strdup(value); +} + +static char *spapr_get_host_serial(Object *obj, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + return g_strdup(spapr->host_serial); +} + +static void spapr_set_host_serial(Object *obj, const char *value, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + g_free(spapr->host_serial); + spapr->host_serial = g_strdup(value); +} + +static void spapr_instance_init(Object *obj) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + MachineState *ms = MACHINE(spapr); + MachineClass *mc = MACHINE_GET_CLASS(ms); + + /* + * NVDIMM support went live in 5.1 without considering that, in + * other archs, the user needs to enable NVDIMM support with the + * 'nvdimm' machine option and the default behavior is NVDIMM + * support disabled. It is too late to roll back to the standard + * behavior without breaking 5.1 guests. + */ + if (mc->nvdimm_supported) { + ms->nvdimms_state->is_enabled = true; + } + + spapr->htab_fd = -1; + spapr->use_hotplug_event_source = true; + spapr->kvm_type = g_strdup(DEFAULT_KVM_TYPE); + object_property_add_str(obj, "kvm-type", + spapr_get_kvm_type, spapr_set_kvm_type); + object_property_set_description(obj, "kvm-type", + "Specifies the KVM virtualization mode (auto," + " hv, pr). Defaults to 'auto'. This mode will use" + " any available KVM module loaded in the host," + " where kvm_hv takes precedence if both kvm_hv and" + " kvm_pr are loaded."); + object_property_add_bool(obj, "modern-hotplug-events", + spapr_get_modern_hotplug_events, + spapr_set_modern_hotplug_events); + object_property_set_description(obj, "modern-hotplug-events", + "Use dedicated hotplug event mechanism in" + " place of standard EPOW events when possible" + " (required for memory hot-unplug support)"); + ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr, + "Maximum permitted CPU compatibility mode"); + + object_property_add_str(obj, "resize-hpt", + spapr_get_resize_hpt, spapr_set_resize_hpt); + object_property_set_description(obj, "resize-hpt", + "Resizing of the Hash Page Table (enabled, disabled, required)"); + object_property_add_uint32_ptr(obj, "vsmt", + &spapr->vsmt, OBJ_PROP_FLAG_READWRITE); + object_property_set_description(obj, "vsmt", + "Virtual SMT: KVM behaves as if this were" + " the host's SMT mode"); + + object_property_add_bool(obj, "vfio-no-msix-emulation", + spapr_get_msix_emulation, NULL); + + object_property_add_uint64_ptr(obj, "kernel-addr", + &spapr->kernel_addr, OBJ_PROP_FLAG_READWRITE); + object_property_set_description(obj, "kernel-addr", + stringify(KERNEL_LOAD_ADDR) + " for -kernel is the default"); + spapr->kernel_addr = KERNEL_LOAD_ADDR; + + object_property_add_bool(obj, "x-vof", spapr_get_vof, spapr_set_vof); + object_property_set_description(obj, "x-vof", + "Enable Virtual Open Firmware (experimental)"); + + /* The machine class defines the default interrupt controller mode */ + spapr->irq = smc->irq; + object_property_add_str(obj, "ic-mode", spapr_get_ic_mode, + spapr_set_ic_mode); + object_property_set_description(obj, "ic-mode", + "Specifies the interrupt controller mode (xics, xive, dual)"); + + object_property_add_str(obj, "host-model", + spapr_get_host_model, spapr_set_host_model); + object_property_set_description(obj, "host-model", + "Host model to advertise in guest device tree"); + object_property_add_str(obj, "host-serial", + spapr_get_host_serial, spapr_set_host_serial); + object_property_set_description(obj, "host-serial", + "Host serial number to advertise in guest device tree"); +} + +static void spapr_machine_finalizefn(Object *obj) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + g_free(spapr->kvm_type); +} + +void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + + cpu_synchronize_state(cs); + /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */ + if (spapr->fwnmi_system_reset_addr != -1) { + uint64_t rtas_addr, addr; + + /* get rtas addr from fdt */ + rtas_addr = spapr_get_rtas_addr(); + if (!rtas_addr) { + qemu_system_guest_panicked(NULL); + return; + } + + addr = rtas_addr + RTAS_ERROR_LOG_MAX + cs->cpu_index * sizeof(uint64_t)*2; + stq_be_phys(&address_space_memory, addr, env->gpr[3]); + stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0); + env->gpr[3] = addr; + } + ppc_cpu_do_system_reset(cs); + if (spapr->fwnmi_system_reset_addr != -1) { + env->nip = spapr->fwnmi_system_reset_addr; + } +} + +static void spapr_nmi(NMIState *n, int cpu_index, Error **errp) +{ + CPUState *cs; + + CPU_FOREACH(cs) { + async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); + } +} + +int spapr_lmb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, + void *fdt, int *fdt_start_offset, Error **errp) +{ + uint64_t addr; + uint32_t node; + + addr = spapr_drc_index(drc) * SPAPR_MEMORY_BLOCK_SIZE; + node = object_property_get_uint(OBJECT(drc->dev), PC_DIMM_NODE_PROP, + &error_abort); + *fdt_start_offset = spapr_dt_memory_node(spapr, fdt, node, addr, + SPAPR_MEMORY_BLOCK_SIZE); + return 0; +} + +static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size, + bool dedicated_hp_event_source) +{ + SpaprDrc *drc; + uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE; + int i; + uint64_t addr = addr_start; + bool hotplugged = spapr_drc_hotplugged(dev); + + for (i = 0; i < nr_lmbs; i++) { + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, + addr / SPAPR_MEMORY_BLOCK_SIZE); + g_assert(drc); + + /* + * memory_device_get_free_addr() provided a range of free addresses + * that doesn't overlap with any existing mapping at pre-plug. The + * corresponding LMB DRCs are thus assumed to be all attachable. + */ + spapr_drc_attach(drc, dev); + if (!hotplugged) { + spapr_drc_reset(drc); + } + addr += SPAPR_MEMORY_BLOCK_SIZE; + } + /* send hotplug notification to the + * guest only in case of hotplugged memory + */ + if (hotplugged) { + if (dedicated_hp_event_source) { + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, + addr_start / SPAPR_MEMORY_BLOCK_SIZE); + g_assert(drc); + spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, + nr_lmbs, + spapr_drc_index(drc)); + } else { + spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB, + nr_lmbs); + } + } +} + +static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev); + PCDIMMDevice *dimm = PC_DIMM(dev); + uint64_t size, addr; + int64_t slot; + bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); + + size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort); + + pc_dimm_plug(dimm, MACHINE(ms)); + + if (!is_nvdimm) { + addr = object_property_get_uint(OBJECT(dimm), + PC_DIMM_ADDR_PROP, &error_abort); + spapr_add_lmbs(dev, addr, size, + spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT)); + } else { + slot = object_property_get_int(OBJECT(dimm), + PC_DIMM_SLOT_PROP, &error_abort); + /* We should have valid slot number at this point */ + g_assert(slot >= 0); + spapr_add_nvdimm(dev, slot); + } +} + +static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev); + SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); + bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); + PCDIMMDevice *dimm = PC_DIMM(dev); + Error *local_err = NULL; + uint64_t size; + Object *memdev; + hwaddr pagesize; + + if (!smc->dr_lmb_enabled) { + error_setg(errp, "Memory hotplug not supported for this machine"); + return; + } + + size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (is_nvdimm) { + if (!spapr_nvdimm_validate(hotplug_dev, NVDIMM(dev), size, errp)) { + return; + } + } else if (size % SPAPR_MEMORY_BLOCK_SIZE) { + error_setg(errp, "Hotplugged memory size must be a multiple of " + "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB); + return; + } + + memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, + &error_abort); + pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(memdev)); + if (!spapr_check_pagesize(spapr, pagesize, errp)) { + return; + } + + pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp); +} + +struct SpaprDimmState { + PCDIMMDevice *dimm; + uint32_t nr_lmbs; + QTAILQ_ENTRY(SpaprDimmState) next; +}; + +static SpaprDimmState *spapr_pending_dimm_unplugs_find(SpaprMachineState *s, + PCDIMMDevice *dimm) +{ + SpaprDimmState *dimm_state = NULL; + + QTAILQ_FOREACH(dimm_state, &s->pending_dimm_unplugs, next) { + if (dimm_state->dimm == dimm) { + break; + } + } + return dimm_state; +} + +static SpaprDimmState *spapr_pending_dimm_unplugs_add(SpaprMachineState *spapr, + uint32_t nr_lmbs, + PCDIMMDevice *dimm) +{ + SpaprDimmState *ds = NULL; + + /* + * If this request is for a DIMM whose removal had failed earlier + * (due to guest's refusal to remove the LMBs), we would have this + * dimm already in the pending_dimm_unplugs list. In that + * case don't add again. + */ + ds = spapr_pending_dimm_unplugs_find(spapr, dimm); + if (!ds) { + ds = g_malloc0(sizeof(SpaprDimmState)); + ds->nr_lmbs = nr_lmbs; + ds->dimm = dimm; + QTAILQ_INSERT_HEAD(&spapr->pending_dimm_unplugs, ds, next); + } + return ds; +} + +static void spapr_pending_dimm_unplugs_remove(SpaprMachineState *spapr, + SpaprDimmState *dimm_state) +{ + QTAILQ_REMOVE(&spapr->pending_dimm_unplugs, dimm_state, next); + g_free(dimm_state); +} + +static SpaprDimmState *spapr_recover_pending_dimm_state(SpaprMachineState *ms, + PCDIMMDevice *dimm) +{ + SpaprDrc *drc; + uint64_t size = memory_device_get_region_size(MEMORY_DEVICE(dimm), + &error_abort); + uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; + uint32_t avail_lmbs = 0; + uint64_t addr_start, addr; + int i; + + addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, + &error_abort); + + addr = addr_start; + for (i = 0; i < nr_lmbs; i++) { + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, + addr / SPAPR_MEMORY_BLOCK_SIZE); + g_assert(drc); + if (drc->dev) { + avail_lmbs++; + } + addr += SPAPR_MEMORY_BLOCK_SIZE; + } + + return spapr_pending_dimm_unplugs_add(ms, avail_lmbs, dimm); +} + +void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev) +{ + SpaprDimmState *ds; + PCDIMMDevice *dimm; + SpaprDrc *drc; + uint32_t nr_lmbs; + uint64_t size, addr_start, addr; + g_autofree char *qapi_error = NULL; + int i; + + if (!dev) { + return; + } + + dimm = PC_DIMM(dev); + ds = spapr_pending_dimm_unplugs_find(spapr, dimm); + + /* + * 'ds == NULL' would mean that the DIMM doesn't have a pending + * unplug state, but one of its DRC is marked as unplug_requested. + * This is bad and weird enough to g_assert() out. + */ + g_assert(ds); + + spapr_pending_dimm_unplugs_remove(spapr, ds); + + size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort); + nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; + + addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, + &error_abort); + + addr = addr_start; + for (i = 0; i < nr_lmbs; i++) { + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, + addr / SPAPR_MEMORY_BLOCK_SIZE); + g_assert(drc); + + drc->unplug_requested = false; + addr += SPAPR_MEMORY_BLOCK_SIZE; + } + + /* + * Tell QAPI that something happened and the memory + * hotunplug wasn't successful. Keep sending + * MEM_UNPLUG_ERROR even while sending + * DEVICE_UNPLUG_GUEST_ERROR until the deprecation of + * MEM_UNPLUG_ERROR is due. + */ + qapi_error = g_strdup_printf("Memory hotunplug rejected by the guest " + "for device %s", dev->id); + + qapi_event_send_mem_unplug_error(dev->id ? : "", qapi_error); + + qapi_event_send_device_unplug_guest_error(!!dev->id, dev->id, + dev->canonical_path); +} + +/* Callback to be called during DRC release. */ +void spapr_lmb_release(DeviceState *dev) +{ + HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); + SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_ctrl); + SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); + + /* This information will get lost if a migration occurs + * during the unplug process. In this case recover it. */ + if (ds == NULL) { + ds = spapr_recover_pending_dimm_state(spapr, PC_DIMM(dev)); + g_assert(ds); + /* The DRC being examined by the caller at least must be counted */ + g_assert(ds->nr_lmbs); + } + + if (--ds->nr_lmbs) { + return; + } + + /* + * Now that all the LMBs have been removed by the guest, call the + * unplug handler chain. This can never fail. + */ + hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); + object_unparent(OBJECT(dev)); +} + +static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); + SpaprDimmState *ds = spapr_pending_dimm_unplugs_find(spapr, PC_DIMM(dev)); + + /* We really shouldn't get this far without anything to unplug */ + g_assert(ds); + + pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev)); + qdev_unrealize(dev); + spapr_pending_dimm_unplugs_remove(spapr, ds); +} + +static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); + PCDIMMDevice *dimm = PC_DIMM(dev); + uint32_t nr_lmbs; + uint64_t size, addr_start, addr; + int i; + SpaprDrc *drc; + + if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { + error_setg(errp, "nvdimm device hot unplug is not supported yet."); + return; + } + + size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort); + nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; + + addr_start = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, + &error_abort); + + /* + * An existing pending dimm state for this DIMM means that there is an + * unplug operation in progress, waiting for the spapr_lmb_release + * callback to complete the job (BQL can't cover that far). In this case, + * bail out to avoid detaching DRCs that were already released. + */ + if (spapr_pending_dimm_unplugs_find(spapr, dimm)) { + error_setg(errp, "Memory unplug already in progress for device %s", + dev->id); + return; + } + + spapr_pending_dimm_unplugs_add(spapr, nr_lmbs, dimm); + + addr = addr_start; + for (i = 0; i < nr_lmbs; i++) { + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, + addr / SPAPR_MEMORY_BLOCK_SIZE); + g_assert(drc); + + spapr_drc_unplug_request(drc); + addr += SPAPR_MEMORY_BLOCK_SIZE; + } + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, + addr_start / SPAPR_MEMORY_BLOCK_SIZE); + spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB, + nr_lmbs, spapr_drc_index(drc)); +} + +/* Callback to be called during DRC release. */ +void spapr_core_release(DeviceState *dev) +{ + HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); + + /* Call the unplug handler chain. This can never fail. */ + hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); + object_unparent(OBJECT(dev)); +} + +static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + MachineState *ms = MACHINE(hotplug_dev); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms); + CPUCore *cc = CPU_CORE(dev); + CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL); + + if (smc->pre_2_10_has_unused_icps) { + SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); + int i; + + for (i = 0; i < cc->nr_threads; i++) { + CPUState *cs = CPU(sc->threads[i]); + + pre_2_10_vmstate_register_dummy_icp(cs->cpu_index); + } + } + + assert(core_slot); + core_slot->cpu = NULL; + qdev_unrealize(dev); +} + +static +void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); + int index; + SpaprDrc *drc; + CPUCore *cc = CPU_CORE(dev); + + if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) { + error_setg(errp, "Unable to find CPU core with core-id: %d", + cc->core_id); + return; + } + if (index == 0) { + error_setg(errp, "Boot CPU core may not be unplugged"); + return; + } + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, + spapr_vcpu_id(spapr, cc->core_id)); + g_assert(drc); + + if (!spapr_drc_unplug_requested(drc)) { + spapr_drc_unplug_request(drc); + } + + /* + * spapr_hotplug_req_remove_by_index is left unguarded, out of the + * "!spapr_drc_unplug_requested" check, to allow for multiple IRQ + * pulses removing the same CPU. Otherwise, in an failed hotunplug + * attempt (e.g. the kernel will refuse to remove the last online + * CPU), we will never attempt it again because unplug_requested + * will still be 'true' in that case. + */ + spapr_hotplug_req_remove_by_index(drc); +} + +int spapr_core_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, + void *fdt, int *fdt_start_offset, Error **errp) +{ + SpaprCpuCore *core = SPAPR_CPU_CORE(drc->dev); + CPUState *cs = CPU(core->threads[0]); + PowerPCCPU *cpu = POWERPC_CPU(cs); + DeviceClass *dc = DEVICE_GET_CLASS(cs); + int id = spapr_get_vcpu_id(cpu); + g_autofree char *nodename = NULL; + int offset; + + nodename = g_strdup_printf("%s@%x", dc->fw_name, id); + offset = fdt_add_subnode(fdt, 0, nodename); + + spapr_dt_cpu(cs, fdt, offset, spapr); + + /* + * spapr_dt_cpu() does not fill the 'name' property in the + * CPU node. The function is called during boot process, before + * and after CAS, and overwriting the 'name' property written + * by SLOF is not allowed. + * + * Write it manually after spapr_dt_cpu(). This makes the hotplug + * CPUs more compatible with the coldplugged ones, which have + * the 'name' property. Linux Kernel also relies on this + * property to identify CPU nodes. + */ + _FDT((fdt_setprop_string(fdt, offset, "name", nodename))); + + *fdt_start_offset = offset; + return 0; +} + +static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); + MachineClass *mc = MACHINE_GET_CLASS(spapr); + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev)); + CPUCore *cc = CPU_CORE(dev); + CPUState *cs; + SpaprDrc *drc; + CPUArchId *core_slot; + int index; + bool hotplugged = spapr_drc_hotplugged(dev); + int i; + + core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); + g_assert(core_slot); /* Already checked in spapr_core_pre_plug() */ + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_CPU, + spapr_vcpu_id(spapr, cc->core_id)); + + g_assert(drc || !mc->has_hotpluggable_cpus); + + if (drc) { + /* + * spapr_core_pre_plug() already buys us this is a brand new + * core being plugged into a free slot. Nothing should already + * be attached to the corresponding DRC. + */ + spapr_drc_attach(drc, dev); + + if (hotplugged) { + /* + * Send hotplug notification interrupt to the guest only + * in case of hotplugged CPUs. + */ + spapr_hotplug_req_add_by_index(drc); + } else { + spapr_drc_reset(drc); + } + } + + core_slot->cpu = OBJECT(dev); + + /* + * Set compatibility mode to match the boot CPU, which was either set + * by the machine reset code or by CAS. This really shouldn't fail at + * this point. + */ + if (hotplugged) { + for (i = 0; i < cc->nr_threads; i++) { + ppc_set_compat(core->threads[i], POWERPC_CPU(first_cpu)->compat_pvr, + &error_abort); + } + } + + if (smc->pre_2_10_has_unused_icps) { + for (i = 0; i < cc->nr_threads; i++) { + cs = CPU(core->threads[i]); + pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index); + } + } +} + +static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + MachineState *machine = MACHINE(OBJECT(hotplug_dev)); + MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); + CPUCore *cc = CPU_CORE(dev); + const char *base_core_type = spapr_get_cpu_core_type(machine->cpu_type); + const char *type = object_get_typename(OBJECT(dev)); + CPUArchId *core_slot; + int index; + unsigned int smp_threads = machine->smp.threads; + + if (dev->hotplugged && !mc->has_hotpluggable_cpus) { + error_setg(errp, "CPU hotplug not supported for this machine"); + return; + } + + if (strcmp(base_core_type, type)) { + error_setg(errp, "CPU core type should be %s", base_core_type); + return; + } + + if (cc->core_id % smp_threads) { + error_setg(errp, "invalid core id %d", cc->core_id); + return; + } + + /* + * In general we should have homogeneous threads-per-core, but old + * (pre hotplug support) machine types allow the last core to have + * reduced threads as a compatibility hack for when we allowed + * total vcpus not a multiple of threads-per-core. + */ + if (mc->has_hotpluggable_cpus && (cc->nr_threads != smp_threads)) { + error_setg(errp, "invalid nr-threads %d, must be %d", cc->nr_threads, + smp_threads); + return; + } + + core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); + if (!core_slot) { + error_setg(errp, "core id %d out of range", cc->core_id); + return; + } + + if (core_slot->cpu) { + error_setg(errp, "core %d already populated", cc->core_id); + return; + } + + numa_cpu_pre_plug(core_slot, dev, errp); +} + +int spapr_phb_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, + void *fdt, int *fdt_start_offset, Error **errp) +{ + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(drc->dev); + int intc_phandle; + + intc_phandle = spapr_irq_get_phandle(spapr, spapr->fdt_blob, errp); + if (intc_phandle <= 0) { + return -1; + } + + if (spapr_dt_phb(spapr, sphb, intc_phandle, fdt, fdt_start_offset)) { + error_setg(errp, "unable to create FDT node for PHB %d", sphb->index); + return -1; + } + + /* generally SLOF creates these, for hotplug it's up to QEMU */ + _FDT(fdt_setprop_string(fdt, *fdt_start_offset, "name", "pci")); + + return 0; +} + +static bool spapr_phb_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + const unsigned windows_supported = spapr_phb_windows_supported(sphb); + SpaprDrc *drc; + + if (dev->hotplugged && !smc->dr_phb_enabled) { + error_setg(errp, "PHB hotplug not supported for this machine"); + return false; + } + + if (sphb->index == (uint32_t)-1) { + error_setg(errp, "\"index\" for PAPR PHB is mandatory"); + return false; + } + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); + if (drc && drc->dev) { + error_setg(errp, "PHB %d already attached", sphb->index); + return false; + } + + /* + * This will check that sphb->index doesn't exceed the maximum number of + * PHBs for the current machine type. + */ + return + smc->phb_placement(spapr, sphb->index, + &sphb->buid, &sphb->io_win_addr, + &sphb->mem_win_addr, &sphb->mem64_win_addr, + windows_supported, sphb->dma_liobn, + &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr, + errp); +} + +static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); + SpaprDrc *drc; + bool hotplugged = spapr_drc_hotplugged(dev); + + if (!smc->dr_phb_enabled) { + return; + } + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); + /* hotplug hooks should check it's enabled before getting this far */ + assert(drc); + + /* spapr_phb_pre_plug() already checked the DRC is attachable */ + spapr_drc_attach(drc, dev); + + if (hotplugged) { + spapr_hotplug_req_add_by_index(drc); + } else { + spapr_drc_reset(drc); + } +} + +void spapr_phb_release(DeviceState *dev) +{ + HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); + + hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); + object_unparent(OBJECT(dev)); +} + +static void spapr_phb_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + qdev_unrealize(dev); +} + +static void spapr_phb_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(dev); + SpaprDrc *drc; + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, sphb->index); + assert(drc); + + if (!spapr_drc_unplug_requested(drc)) { + spapr_drc_unplug_request(drc); + spapr_hotplug_req_remove_by_index(drc); + } else { + error_setg(errp, + "PCI Host Bridge unplug already in progress for device %s", + dev->id); + } +} + +static +bool spapr_tpm_proxy_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); + + if (spapr->tpm_proxy != NULL) { + error_setg(errp, "Only one TPM proxy can be specified for this machine"); + return false; + } + + return true; +} + +static void spapr_tpm_proxy_plug(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); + SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(dev); + + /* Already checked in spapr_tpm_proxy_pre_plug() */ + g_assert(spapr->tpm_proxy == NULL); + + spapr->tpm_proxy = tpm_proxy; +} + +static void spapr_tpm_proxy_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); + + qdev_unrealize(dev); + object_unparent(OBJECT(dev)); + spapr->tpm_proxy = NULL; +} + +static void spapr_machine_device_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + spapr_memory_plug(hotplug_dev, dev); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { + spapr_core_plug(hotplug_dev, dev); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { + spapr_phb_plug(hotplug_dev, dev); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { + spapr_tpm_proxy_plug(hotplug_dev, dev); + } +} + +static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + spapr_memory_unplug(hotplug_dev, dev); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { + spapr_core_unplug(hotplug_dev, dev); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { + spapr_phb_unplug(hotplug_dev, dev); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { + spapr_tpm_proxy_unplug(hotplug_dev, dev); + } +} + +bool spapr_memory_hot_unplug_supported(SpaprMachineState *spapr) +{ + return spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT) || + /* + * CAS will process all pending unplug requests. + * + * HACK: a guest could theoretically have cleared all bits in OV5, + * but none of the guests we care for do. + */ + spapr_ovec_empty(spapr->ov5_cas); +} + +static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + SpaprMachineState *sms = SPAPR_MACHINE(OBJECT(hotplug_dev)); + MachineClass *mc = MACHINE_GET_CLASS(sms); + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (spapr_memory_hot_unplug_supported(sms)) { + spapr_memory_unplug_request(hotplug_dev, dev, errp); + } else { + error_setg(errp, "Memory hot unplug not supported for this guest"); + } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { + if (!mc->has_hotpluggable_cpus) { + error_setg(errp, "CPU hot unplug not supported on this machine"); + return; + } + spapr_core_unplug_request(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { + if (!smc->dr_phb_enabled) { + error_setg(errp, "PHB hot unplug not supported on this machine"); + return; + } + spapr_phb_unplug_request(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { + spapr_tpm_proxy_unplug(hotplug_dev, dev); + } +} + +static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + spapr_memory_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { + spapr_core_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE)) { + spapr_phb_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { + spapr_tpm_proxy_pre_plug(hotplug_dev, dev, errp); + } +} + +static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine, + DeviceState *dev) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || + object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE) || + object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_PCI_HOST_BRIDGE) || + object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_TPM_PROXY)) { + return HOTPLUG_HANDLER(machine); + } + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pcidev = PCI_DEVICE(dev); + PCIBus *root = pci_device_root_bus(pcidev); + SpaprPhbState *phb = + (SpaprPhbState *)object_dynamic_cast(OBJECT(BUS(root)->parent), + TYPE_SPAPR_PCI_HOST_BRIDGE); + + if (phb) { + return HOTPLUG_HANDLER(phb); + } + } + return NULL; +} + +static CpuInstanceProperties +spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index) +{ + CPUArchId *core_slot; + MachineClass *mc = MACHINE_GET_CLASS(machine); + + /* make sure possible_cpu are intialized */ + mc->possible_cpu_arch_ids(machine); + /* get CPU core slot containing thread that matches cpu_index */ + core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL); + assert(core_slot); + return core_slot->props; +} + +static int64_t spapr_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + return idx / ms->smp.cores % ms->numa_state->num_nodes; +} + +static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) +{ + int i; + unsigned int smp_threads = machine->smp.threads; + unsigned int smp_cpus = machine->smp.cpus; + const char *core_type; + int spapr_max_cores = machine->smp.max_cpus / smp_threads; + MachineClass *mc = MACHINE_GET_CLASS(machine); + + if (!mc->has_hotpluggable_cpus) { + spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads; + } + if (machine->possible_cpus) { + assert(machine->possible_cpus->len == spapr_max_cores); + return machine->possible_cpus; + } + + core_type = spapr_get_cpu_core_type(machine->cpu_type); + if (!core_type) { + error_report("Unable to find sPAPR CPU Core definition"); + exit(1); + } + + machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * spapr_max_cores); + machine->possible_cpus->len = spapr_max_cores; + for (i = 0; i < machine->possible_cpus->len; i++) { + int core_id = i * smp_threads; + + machine->possible_cpus->cpus[i].type = core_type; + machine->possible_cpus->cpus[i].vcpus_count = smp_threads; + machine->possible_cpus->cpus[i].arch_id = core_id; + machine->possible_cpus->cpus[i].props.has_core_id = true; + machine->possible_cpus->cpus[i].props.core_id = core_id; + } + return machine->possible_cpus; +} + +static bool spapr_phb_placement(SpaprMachineState *spapr, uint32_t index, + uint64_t *buid, hwaddr *pio, + hwaddr *mmio32, hwaddr *mmio64, + unsigned n_dma, uint32_t *liobns, + hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) +{ + /* + * New-style PHB window placement. + * + * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window + * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO + * windows. + * + * Some guest kernels can't work with MMIO windows above 1<<46 + * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB + * + * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each + * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the + * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the + * 1TiB 64-bit MMIO windows for each PHB. + */ + const uint64_t base_buid = 0x800000020000000ULL; + int i; + + /* Sanity check natural alignments */ + QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0); + QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0); + QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0); + QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0); + /* Sanity check bounds */ + QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_IO_WIN_SIZE) > + SPAPR_PCI_MEM32_WIN_SIZE); + QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS * SPAPR_PCI_MEM32_WIN_SIZE) > + SPAPR_PCI_MEM64_WIN_SIZE); + + if (index >= SPAPR_MAX_PHBS) { + error_setg(errp, "\"index\" for PAPR PHB is too large (max %llu)", + SPAPR_MAX_PHBS - 1); + return false; + } + + *buid = base_buid + index; + for (i = 0; i < n_dma; ++i) { + liobns[i] = SPAPR_PCI_LIOBN(index, i); + } + + *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE; + *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE; + *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; + + *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE; + *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE; + return true; +} + +static ICSState *spapr_ics_get(XICSFabric *dev, int irq) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(dev); + + return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL; +} + +static void spapr_ics_resend(XICSFabric *dev) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(dev); + + ics_resend(spapr->ics); +} + +static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id) +{ + PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); + + return cpu ? spapr_cpu_state(cpu)->icp : NULL; +} + +static void spapr_pic_print_info(InterruptStatsProvider *obj, + Monitor *mon) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + + spapr_irq_print_info(spapr, mon); + monitor_printf(mon, "irqchip: %s\n", + kvm_irqchip_in_kernel() ? "in-kernel" : "emulated"); +} + +/* + * This is a XIVE only operation + */ +static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(xfb); + XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc); + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); + int count; + + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, + priority, logic_serv, match); + if (count < 0) { + return count; + } + + /* + * When we implement the save and restore of the thread interrupt + * contexts in the enter/exit CPU handlers of the machine and the + * escalations in QEMU, we should be able to handle non dispatched + * vCPUs. + * + * Until this is done, the sPAPR machine should find at least one + * matching context always. + */ + if (count == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n", + nvt_blk, nvt_idx); + } + + return count; +} + +int spapr_get_vcpu_id(PowerPCCPU *cpu) +{ + return cpu->vcpu_id; +} + +bool spapr_set_vcpu_id(PowerPCCPU *cpu, int cpu_index, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + MachineState *ms = MACHINE(spapr); + int vcpu_id; + + vcpu_id = spapr_vcpu_id(spapr, cpu_index); + + if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id)) { + error_setg(errp, "Can't create CPU with id %d in KVM", vcpu_id); + error_append_hint(errp, "Adjust the number of cpus to %d " + "or try to raise the number of threads per core\n", + vcpu_id * ms->smp.threads / spapr->vsmt); + return false; + } + + cpu->vcpu_id = vcpu_id; + return true; +} + +PowerPCCPU *spapr_find_cpu(int vcpu_id) +{ + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + if (spapr_get_vcpu_id(cpu) == vcpu_id) { + return cpu; + } + } + + return NULL; +} + +static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + /* These are only called by TCG, KVM maintains dispatch state */ + + spapr_cpu->prod = false; + if (spapr_cpu->vpa_addr) { + CPUState *cs = CPU(cpu); + uint32_t dispatch; + + dispatch = ldl_be_phys(cs->as, + spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); + dispatch++; + if ((dispatch & 1) != 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "VPA: incorrect dispatch counter value for " + "dispatched partition %u, correcting.\n", dispatch); + dispatch++; + } + stl_be_phys(cs->as, + spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch); + } +} + +static void spapr_cpu_exec_exit(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + if (spapr_cpu->vpa_addr) { + CPUState *cs = CPU(cpu); + uint32_t dispatch; + + dispatch = ldl_be_phys(cs->as, + spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); + dispatch++; + if ((dispatch & 1) != 1) { + qemu_log_mask(LOG_GUEST_ERROR, + "VPA: incorrect dispatch counter value for " + "preempted partition %u, correcting.\n", dispatch); + dispatch++; + } + stl_be_phys(cs->as, + spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER, dispatch); + } +} + +static void spapr_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(oc); + FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); + NMIClass *nc = NMI_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc); + XICSFabricClass *xic = XICS_FABRIC_CLASS(oc); + InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); + XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc); + VofMachineIfClass *vmc = VOF_MACHINE_CLASS(oc); + + mc->desc = "pSeries Logical Partition (PAPR compliant)"; + mc->ignore_boot_device_suffixes = true; + + /* + * We set up the default / latest behaviour here. The class_init + * functions for the specific versioned machine types can override + * these details for backwards compatibility + */ + mc->init = spapr_machine_init; + mc->reset = spapr_machine_reset; + mc->block_default_type = IF_SCSI; + + /* + * Setting max_cpus to INT32_MAX. Both KVM and TCG max_cpus values + * should be limited by the host capability instead of hardcoded. + * max_cpus for KVM guests will be checked in kvm_init(), and TCG + * guests are welcome to have as many CPUs as the host are capable + * of emulate. + */ + mc->max_cpus = INT32_MAX; + + mc->no_parallel = 1; + mc->default_boot_order = ""; + mc->default_ram_size = 512 * MiB; + mc->default_ram_id = "ppc_spapr.ram"; + mc->default_display = "std"; + mc->kvm_type = spapr_kvm_type; + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_SPAPR_PCI_HOST_BRIDGE); + mc->pci_allow_0_address = true; + assert(!mc->get_hotplug_handler); + mc->get_hotplug_handler = spapr_get_hotplug_handler; + hc->pre_plug = spapr_machine_device_pre_plug; + hc->plug = spapr_machine_device_plug; + mc->cpu_index_to_instance_props = spapr_cpu_index_to_props; + mc->get_default_cpu_node_id = spapr_get_default_cpu_node_id; + mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids; + hc->unplug_request = spapr_machine_device_unplug_request; + hc->unplug = spapr_machine_device_unplug; + + smc->dr_lmb_enabled = true; + smc->update_dt_enabled = true; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); + mc->has_hotpluggable_cpus = true; + mc->nvdimm_supported = true; + smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; + fwc->get_dev_path = spapr_get_fw_dev_path; + nc->nmi_monitor_handler = spapr_nmi; + smc->phb_placement = spapr_phb_placement; + vhc->hypercall = emulate_spapr_hypercall; + vhc->hpt_mask = spapr_hpt_mask; + vhc->map_hptes = spapr_map_hptes; + vhc->unmap_hptes = spapr_unmap_hptes; + vhc->hpte_set_c = spapr_hpte_set_c; + vhc->hpte_set_r = spapr_hpte_set_r; + vhc->get_pate = spapr_get_pate; + vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr; + vhc->cpu_exec_enter = spapr_cpu_exec_enter; + vhc->cpu_exec_exit = spapr_cpu_exec_exit; + xic->ics_get = spapr_ics_get; + xic->ics_resend = spapr_ics_resend; + xic->icp_get = spapr_icp_get; + ispc->print_info = spapr_pic_print_info; + /* Force NUMA node memory size to be a multiple of + * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity + * in which LMBs are represented and hot-added + */ + mc->numa_mem_align_shift = 28; + mc->auto_enable_numa = true; + + smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF; + smc->default_caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_ON; + smc->default_caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_ON; + smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; + smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; + smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_WORKAROUND; + smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 16; /* 64kiB */ + smc->default_caps.caps[SPAPR_CAP_NESTED_KVM_HV] = SPAPR_CAP_OFF; + smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON; + smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_ON; + smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_ON; + smc->default_caps.caps[SPAPR_CAP_RPT_INVALIDATE] = SPAPR_CAP_OFF; + spapr_caps_add_properties(smc); + smc->irq = &spapr_irq_dual; + smc->dr_phb_enabled = true; + smc->linux_pci_probe = true; + smc->smp_threads_vsmt = true; + smc->nr_xirqs = SPAPR_NR_XIRQS; + xfc->match_nvt = spapr_match_nvt; + vmc->client_architecture_support = spapr_vof_client_architecture_support; + vmc->quiesce = spapr_vof_quiesce; + vmc->setprop = spapr_vof_setprop; +} + +static const TypeInfo spapr_machine_info = { + .name = TYPE_SPAPR_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(SpaprMachineState), + .instance_init = spapr_instance_init, + .instance_finalize = spapr_machine_finalizefn, + .class_size = sizeof(SpaprMachineClass), + .class_init = spapr_machine_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_FW_PATH_PROVIDER }, + { TYPE_NMI }, + { TYPE_HOTPLUG_HANDLER }, + { TYPE_PPC_VIRTUAL_HYPERVISOR }, + { TYPE_XICS_FABRIC }, + { TYPE_INTERRUPT_STATS_PROVIDER }, + { TYPE_XIVE_FABRIC }, + { TYPE_VOF_MACHINE_IF }, + { } + }, +}; + +static void spapr_machine_latest_class_options(MachineClass *mc) +{ + mc->alias = "pseries"; + mc->is_default = true; +} + +#define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \ + static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \ + void *data) \ + { \ + MachineClass *mc = MACHINE_CLASS(oc); \ + spapr_machine_##suffix##_class_options(mc); \ + if (latest) { \ + spapr_machine_latest_class_options(mc); \ + } \ + } \ + static const TypeInfo spapr_machine_##suffix##_info = { \ + .name = MACHINE_TYPE_NAME("pseries-" verstr), \ + .parent = TYPE_SPAPR_MACHINE, \ + .class_init = spapr_machine_##suffix##_class_init, \ + }; \ + static void spapr_machine_register_##suffix(void) \ + { \ + type_register(&spapr_machine_##suffix##_info); \ + } \ + type_init(spapr_machine_register_##suffix) + +/* + * pseries-6.2 + */ +static void spapr_machine_6_2_class_options(MachineClass *mc) +{ + /* Defaults for the latest behaviour inherited from the base class */ +} + +DEFINE_SPAPR_MACHINE(6_2, "6.2", true); + +/* + * pseries-6.1 + */ +static void spapr_machine_6_1_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_6_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); + smc->pre_6_2_numa_affinity = true; + mc->smp_props.prefer_sockets = true; +} + +DEFINE_SPAPR_MACHINE(6_1, "6.1", false); + +/* + * pseries-6.0 + */ +static void spapr_machine_6_0_class_options(MachineClass *mc) +{ + spapr_machine_6_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len); +} + +DEFINE_SPAPR_MACHINE(6_0, "6.0", false); + +/* + * pseries-5.2 + */ +static void spapr_machine_5_2_class_options(MachineClass *mc) +{ + spapr_machine_6_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len); +} + +DEFINE_SPAPR_MACHINE(5_2, "5.2", false); + +/* + * pseries-5.1 + */ +static void spapr_machine_5_1_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_5_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len); + smc->pre_5_2_numa_associativity = true; +} + +DEFINE_SPAPR_MACHINE(5_1, "5.1", false); + +/* + * pseries-5.0 + */ +static void spapr_machine_5_0_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + static GlobalProperty compat[] = { + { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-5.1-associativity", "on" }, + }; + + spapr_machine_5_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); + mc->numa_mem_supported = true; + smc->pre_5_1_assoc_refpoints = true; +} + +DEFINE_SPAPR_MACHINE(5_0, "5.0", false); + +/* + * pseries-4.2 + */ +static void spapr_machine_4_2_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_5_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len); + smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF; + smc->default_caps.caps[SPAPR_CAP_FWNMI] = SPAPR_CAP_OFF; + smc->rma_limit = 16 * GiB; + mc->nvdimm_supported = false; +} + +DEFINE_SPAPR_MACHINE(4_2, "4.2", false); + +/* + * pseries-4.1 + */ +static void spapr_machine_4_1_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + static GlobalProperty compat[] = { + /* Only allow 4kiB and 64kiB IOMMU pagesizes */ + { TYPE_SPAPR_PCI_HOST_BRIDGE, "pgsz", "0x11000" }, + }; + + spapr_machine_4_2_class_options(mc); + smc->linux_pci_probe = false; + smc->smp_threads_vsmt = false; + compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); +} + +DEFINE_SPAPR_MACHINE(4_1, "4.1", false); + +/* + * pseries-4.0 + */ +static bool phb_placement_4_0(SpaprMachineState *spapr, uint32_t index, + uint64_t *buid, hwaddr *pio, + hwaddr *mmio32, hwaddr *mmio64, + unsigned n_dma, uint32_t *liobns, + hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) +{ + if (!spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, + liobns, nv2gpa, nv2atsd, errp)) { + return false; + } + + *nv2gpa = 0; + *nv2atsd = 0; + return true; +} +static void spapr_machine_4_0_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_4_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); + smc->phb_placement = phb_placement_4_0; + smc->irq = &spapr_irq_xics; + smc->pre_4_1_migration = true; +} + +DEFINE_SPAPR_MACHINE(4_0, "4.0", false); + +/* + * pseries-3.1 + */ +static void spapr_machine_3_1_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_4_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len); + + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power8_v2.0"); + smc->update_dt_enabled = false; + smc->dr_phb_enabled = false; + smc->broken_host_serial_model = true; + smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN; + smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN; + smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN; + smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF; +} + +DEFINE_SPAPR_MACHINE(3_1, "3.1", false); + +/* + * pseries-3.0 + */ + +static void spapr_machine_3_0_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_3_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len); + + smc->legacy_irq_allocation = true; + smc->nr_xirqs = 0x400; + smc->irq = &spapr_irq_xics_legacy; +} + +DEFINE_SPAPR_MACHINE(3_0, "3.0", false); + +/* + * pseries-2.12 + */ +static void spapr_machine_2_12_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + static GlobalProperty compat[] = { + { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" }, + { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" }, + }; + + spapr_machine_3_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); + + /* We depend on kvm_enabled() to choose a default value for the + * hpt-max-page-size capability. Of course we can't do it here + * because this is too early and the HW accelerator isn't initialzed + * yet. Postpone this to machine init (see default_caps_with_cpu()). + */ + smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0; +} + +DEFINE_SPAPR_MACHINE(2_12, "2.12", false); + +static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_2_12_class_options(mc); + smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; + smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; + smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD; +} + +DEFINE_SPAPR_MACHINE(2_12_sxxm, "2.12-sxxm", false); + +/* + * pseries-2.11 + */ + +static void spapr_machine_2_11_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_2_12_class_options(mc); + smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON; + compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); +} + +DEFINE_SPAPR_MACHINE(2_11, "2.11", false); + +/* + * pseries-2.10 + */ + +static void spapr_machine_2_10_class_options(MachineClass *mc) +{ + spapr_machine_2_11_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); +} + +DEFINE_SPAPR_MACHINE(2_10, "2.10", false); + +/* + * pseries-2.9 + */ + +static void spapr_machine_2_9_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + static GlobalProperty compat[] = { + { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" }, + }; + + spapr_machine_2_10_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); + smc->pre_2_10_has_unused_icps = true; + smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED; +} + +DEFINE_SPAPR_MACHINE(2_9, "2.9", false); + +/* + * pseries-2.8 + */ + +static void spapr_machine_2_8_class_options(MachineClass *mc) +{ + static GlobalProperty compat[] = { + { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" }, + }; + + spapr_machine_2_9_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); + mc->numa_mem_align_shift = 23; +} + +DEFINE_SPAPR_MACHINE(2_8, "2.8", false); + +/* + * pseries-2.7 + */ + +static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, + uint64_t *buid, hwaddr *pio, + hwaddr *mmio32, hwaddr *mmio64, + unsigned n_dma, uint32_t *liobns, + hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) +{ + /* Legacy PHB placement for pseries-2.7 and earlier machine types */ + const uint64_t base_buid = 0x800000020000000ULL; + const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */ + const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */ + const hwaddr pio_offset = 0x80000000; /* 2 GiB */ + const uint32_t max_index = 255; + const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */ + + uint64_t ram_top = MACHINE(spapr)->ram_size; + hwaddr phb0_base, phb_base; + int i; + + /* Do we have device memory? */ + if (MACHINE(spapr)->maxram_size > ram_top) { + /* Can't just use maxram_size, because there may be an + * alignment gap between normal and device memory regions + */ + ram_top = MACHINE(spapr)->device_memory->base + + memory_region_size(&MACHINE(spapr)->device_memory->mr); + } + + phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment); + + if (index > max_index) { + error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)", + max_index); + return false; + } + + *buid = base_buid + index; + for (i = 0; i < n_dma; ++i) { + liobns[i] = SPAPR_PCI_LIOBN(index, i); + } + + phb_base = phb0_base + index * phb_spacing; + *pio = phb_base + pio_offset; + *mmio32 = phb_base + mmio_offset; + /* + * We don't set the 64-bit MMIO window, relying on the PHB's + * fallback behaviour of automatically splitting a large "32-bit" + * window into contiguous 32-bit and 64-bit windows + */ + + *nv2gpa = 0; + *nv2atsd = 0; + return true; +} + +static void spapr_machine_2_7_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + static GlobalProperty compat[] = { + { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", }, + { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", }, + { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", }, + { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", }, + }; + + spapr_machine_2_8_class_options(mc); + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3"); + mc->default_machine_opts = "modern-hotplug-events=off"; + compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); + smc->phb_placement = phb_placement_2_7; +} + +DEFINE_SPAPR_MACHINE(2_7, "2.7", false); + +/* + * pseries-2.6 + */ + +static void spapr_machine_2_6_class_options(MachineClass *mc) +{ + static GlobalProperty compat[] = { + { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" }, + }; + + spapr_machine_2_7_class_options(mc); + mc->has_hotpluggable_cpus = false; + compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); +} + +DEFINE_SPAPR_MACHINE(2_6, "2.6", false); + +/* + * pseries-2.5 + */ + +static void spapr_machine_2_5_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + static GlobalProperty compat[] = { + { "spapr-vlan", "use-rx-buffer-pools", "off" }, + }; + + spapr_machine_2_6_class_options(mc); + smc->use_ohci_by_default = true; + compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); +} + +DEFINE_SPAPR_MACHINE(2_5, "2.5", false); + +/* + * pseries-2.4 + */ + +static void spapr_machine_2_4_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_2_5_class_options(mc); + smc->dr_lmb_enabled = false; + compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len); +} + +DEFINE_SPAPR_MACHINE(2_4, "2.4", false); + +/* + * pseries-2.3 + */ + +static void spapr_machine_2_3_class_options(MachineClass *mc) +{ + static GlobalProperty compat[] = { + { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" }, + }; + spapr_machine_2_4_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); +} +DEFINE_SPAPR_MACHINE(2_3, "2.3", false); + +/* + * pseries-2.2 + */ + +static void spapr_machine_2_2_class_options(MachineClass *mc) +{ + static GlobalProperty compat[] = { + { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" }, + }; + + spapr_machine_2_3_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); + mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on"; +} +DEFINE_SPAPR_MACHINE(2_2, "2.2", false); + +/* + * pseries-2.1 + */ + +static void spapr_machine_2_1_class_options(MachineClass *mc) +{ + spapr_machine_2_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len); +} +DEFINE_SPAPR_MACHINE(2_1, "2.1", false); + +static void spapr_machine_register_types(void) +{ + type_register_static(&spapr_machine_info); +} + +type_init(spapr_machine_register_types) diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c new file mode 100644 index 000000000..ed7c077a0 --- /dev/null +++ b/hw/ppc/spapr_caps.c @@ -0,0 +1,944 @@ +/* + * QEMU PowerPC pSeries Logical Partition capabilities handling + * + * Copyright (c) 2017 David Gibson, Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "sysemu/hw_accel.h" +#include "exec/ram_addr.h" +#include "target/ppc/cpu.h" +#include "target/ppc/mmu-hash64.h" +#include "cpu-models.h" +#include "kvm_ppc.h" +#include "migration/vmstate.h" +#include "sysemu/tcg.h" + +#include "hw/ppc/spapr.h" + +typedef struct SpaprCapPossible { + int num; /* size of vals array below */ + const char *help; /* help text for vals */ + /* + * Note: + * - because of the way compatibility is determined vals MUST be ordered + * such that later options are a superset of all preceding options. + * - the order of vals must be preserved, that is their index is important, + * however vals may be added to the end of the list so long as the above + * point is observed + */ + const char *vals[]; +} SpaprCapPossible; + +typedef struct SpaprCapabilityInfo { + const char *name; + const char *description; + int index; + + /* Getter and Setter Function Pointers */ + ObjectPropertyAccessor *get; + ObjectPropertyAccessor *set; + const char *type; + /* Possible values if this is a custom string type */ + SpaprCapPossible *possible; + /* Make sure the virtual hardware can support this capability */ + void (*apply)(SpaprMachineState *spapr, uint8_t val, Error **errp); + void (*cpu_apply)(SpaprMachineState *spapr, PowerPCCPU *cpu, + uint8_t val, Error **errp); + bool (*migrate_needed)(void *opaque); +} SpaprCapabilityInfo; + +static void spapr_cap_get_bool(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + SpaprCapabilityInfo *cap = opaque; + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + bool value = spapr_get_cap(spapr, cap->index) == SPAPR_CAP_ON; + + visit_type_bool(v, name, &value, errp); +} + +static void spapr_cap_set_bool(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + SpaprCapabilityInfo *cap = opaque; + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + bool value; + + if (!visit_type_bool(v, name, &value, errp)) { + return; + } + + spapr->cmd_line_caps[cap->index] = true; + spapr->eff.caps[cap->index] = value ? SPAPR_CAP_ON : SPAPR_CAP_OFF; +} + + +static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + SpaprCapabilityInfo *cap = opaque; + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + char *val = NULL; + uint8_t value = spapr_get_cap(spapr, cap->index); + + if (value >= cap->possible->num) { + error_setg(errp, "Invalid value (%d) for cap-%s", value, cap->name); + return; + } + + val = g_strdup(cap->possible->vals[value]); + + visit_type_str(v, name, &val, errp); + g_free(val); +} + +static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + SpaprCapabilityInfo *cap = opaque; + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + uint8_t i; + char *val; + + if (!visit_type_str(v, name, &val, errp)) { + return; + } + + if (!strcmp(val, "?")) { + error_setg(errp, "%s", cap->possible->help); + goto out; + } + for (i = 0; i < cap->possible->num; i++) { + if (!strcasecmp(val, cap->possible->vals[i])) { + spapr->cmd_line_caps[cap->index] = true; + spapr->eff.caps[cap->index] = i; + goto out; + } + } + + error_setg(errp, "Invalid capability mode \"%s\" for cap-%s", val, + cap->name); +out: + g_free(val); +} + +static void spapr_cap_get_pagesize(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + SpaprCapabilityInfo *cap = opaque; + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + uint8_t val = spapr_get_cap(spapr, cap->index); + uint64_t pagesize = (1ULL << val); + + visit_type_size(v, name, &pagesize, errp); +} + +static void spapr_cap_set_pagesize(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + SpaprCapabilityInfo *cap = opaque; + SpaprMachineState *spapr = SPAPR_MACHINE(obj); + uint64_t pagesize; + uint8_t val; + + if (!visit_type_size(v, name, &pagesize, errp)) { + return; + } + + if (!is_power_of_2(pagesize)) { + error_setg(errp, "cap-%s must be a power of 2", cap->name); + return; + } + + val = ctz64(pagesize); + spapr->cmd_line_caps[cap->index] = true; + spapr->eff.caps[cap->index] = val; +} + +static void cap_htm_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) +{ + ERRP_GUARD(); + if (!val) { + /* TODO: We don't support disabling htm yet */ + return; + } + if (tcg_enabled()) { + error_setg(errp, "No Transactional Memory support in TCG"); + error_append_hint(errp, "Try appending -machine cap-htm=off\n"); + } else if (kvm_enabled() && !kvmppc_has_cap_htm()) { + error_setg(errp, + "KVM implementation does not support Transactional Memory"); + error_append_hint(errp, "Try appending -machine cap-htm=off\n"); + } +} + +static void cap_vsx_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) +{ + ERRP_GUARD(); + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + CPUPPCState *env = &cpu->env; + + if (!val) { + /* TODO: We don't support disabling vsx yet */ + return; + } + /* Allowable CPUs in spapr_cpu_core.c should already have gotten + * rid of anything that doesn't do VMX */ + g_assert(env->insns_flags & PPC_ALTIVEC); + if (!(env->insns_flags2 & PPC2_VSX)) { + error_setg(errp, "VSX support not available"); + error_append_hint(errp, "Try appending -machine cap-vsx=off\n"); + } +} + +static void cap_dfp_apply(SpaprMachineState *spapr, uint8_t val, Error **errp) +{ + ERRP_GUARD(); + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + CPUPPCState *env = &cpu->env; + + if (!val) { + /* TODO: We don't support disabling dfp yet */ + return; + } + if (!(env->insns_flags2 & PPC2_DFP)) { + error_setg(errp, "DFP support not available"); + error_append_hint(errp, "Try appending -machine cap-dfp=off\n"); + } +} + +SpaprCapPossible cap_cfpc_possible = { + .num = 3, + .vals = {"broken", "workaround", "fixed"}, + .help = "broken - no protection, workaround - workaround available," + " fixed - fixed in hardware", +}; + +static void cap_safe_cache_apply(SpaprMachineState *spapr, uint8_t val, + Error **errp) +{ + ERRP_GUARD(); + uint8_t kvm_val = kvmppc_get_cap_safe_cache(); + + if (tcg_enabled() && val) { + /* TCG only supports broken, allow other values and print a warning */ + warn_report("TCG doesn't support requested feature, cap-cfpc=%s", + cap_cfpc_possible.vals[val]); + } else if (kvm_enabled() && (val > kvm_val)) { + error_setg(errp, + "Requested safe cache capability level not supported by KVM"); + error_append_hint(errp, "Try appending -machine cap-cfpc=%s\n", + cap_cfpc_possible.vals[kvm_val]); + } +} + +SpaprCapPossible cap_sbbc_possible = { + .num = 3, + .vals = {"broken", "workaround", "fixed"}, + .help = "broken - no protection, workaround - workaround available," + " fixed - fixed in hardware", +}; + +static void cap_safe_bounds_check_apply(SpaprMachineState *spapr, uint8_t val, + Error **errp) +{ + ERRP_GUARD(); + uint8_t kvm_val = kvmppc_get_cap_safe_bounds_check(); + + if (tcg_enabled() && val) { + /* TCG only supports broken, allow other values and print a warning */ + warn_report("TCG doesn't support requested feature, cap-sbbc=%s", + cap_sbbc_possible.vals[val]); + } else if (kvm_enabled() && (val > kvm_val)) { + error_setg(errp, +"Requested safe bounds check capability level not supported by KVM"); + error_append_hint(errp, "Try appending -machine cap-sbbc=%s\n", + cap_sbbc_possible.vals[kvm_val]); + } +} + +SpaprCapPossible cap_ibs_possible = { + .num = 5, + /* Note workaround only maintained for compatibility */ + .vals = {"broken", "workaround", "fixed-ibs", "fixed-ccd", "fixed-na"}, + .help = "broken - no protection, workaround - count cache flush" + ", fixed-ibs - indirect branch serialisation," + " fixed-ccd - cache count disabled," + " fixed-na - fixed in hardware (no longer applicable)", +}; + +static void cap_safe_indirect_branch_apply(SpaprMachineState *spapr, + uint8_t val, Error **errp) +{ + ERRP_GUARD(); + uint8_t kvm_val = kvmppc_get_cap_safe_indirect_branch(); + + if (tcg_enabled() && val) { + /* TCG only supports broken, allow other values and print a warning */ + warn_report("TCG doesn't support requested feature, cap-ibs=%s", + cap_ibs_possible.vals[val]); + } else if (kvm_enabled() && (val > kvm_val)) { + error_setg(errp, +"Requested safe indirect branch capability level not supported by KVM"); + error_append_hint(errp, "Try appending -machine cap-ibs=%s\n", + cap_ibs_possible.vals[kvm_val]); + } +} + +#define VALUE_DESC_TRISTATE " (broken, workaround, fixed)" + +bool spapr_check_pagesize(SpaprMachineState *spapr, hwaddr pagesize, + Error **errp) +{ + hwaddr maxpagesize = (1ULL << spapr->eff.caps[SPAPR_CAP_HPT_MAXPAGESIZE]); + + if (!kvmppc_hpt_needs_host_contiguous_pages()) { + return true; + } + + if (maxpagesize > pagesize) { + error_setg(errp, + "Can't support %"HWADDR_PRIu" kiB guest pages with %" + HWADDR_PRIu" kiB host pages with this KVM implementation", + maxpagesize >> 10, pagesize >> 10); + return false; + } + + return true; +} + +static void cap_hpt_maxpagesize_apply(SpaprMachineState *spapr, + uint8_t val, Error **errp) +{ + if (val < 12) { + error_setg(errp, "Require at least 4kiB hpt-max-page-size"); + return; + } else if (val < 16) { + warn_report("Many guests require at least 64kiB hpt-max-page-size"); + } + + spapr_check_pagesize(spapr, qemu_minrampagesize(), errp); +} + +static bool cap_hpt_maxpagesize_migrate_needed(void *opaque) +{ + return !SPAPR_MACHINE_GET_CLASS(opaque)->pre_4_1_migration; +} + +static bool spapr_pagesize_cb(void *opaque, uint32_t seg_pshift, + uint32_t pshift) +{ + unsigned maxshift = *((unsigned *)opaque); + + assert(pshift >= seg_pshift); + + /* Don't allow the guest to use pages bigger than the configured + * maximum size */ + if (pshift > maxshift) { + return false; + } + + /* For whatever reason, KVM doesn't allow multiple pagesizes + * within a segment, *except* for the case of 16M pages in a 4k or + * 64k segment. Always exclude other cases, so that TCG and KVM + * guests see a consistent environment */ + if ((pshift != seg_pshift) && (pshift != 24)) { + return false; + } + + return true; +} + +static void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, + bool (*cb)(void *, uint32_t, uint32_t), + void *opaque) +{ + PPCHash64Options *opts = cpu->hash64_opts; + int i; + int n = 0; + bool ci_largepage = false; + + assert(opts); + + n = 0; + for (i = 0; i < ARRAY_SIZE(opts->sps); i++) { + PPCHash64SegmentPageSizes *sps = &opts->sps[i]; + int j; + int m = 0; + + assert(n <= i); + + if (!sps->page_shift) { + break; + } + + for (j = 0; j < ARRAY_SIZE(sps->enc); j++) { + PPCHash64PageSize *ps = &sps->enc[j]; + + assert(m <= j); + if (!ps->page_shift) { + break; + } + + if (cb(opaque, sps->page_shift, ps->page_shift)) { + if (ps->page_shift >= 16) { + ci_largepage = true; + } + sps->enc[m++] = *ps; + } + } + + /* Clear rest of the row */ + for (j = m; j < ARRAY_SIZE(sps->enc); j++) { + memset(&sps->enc[j], 0, sizeof(sps->enc[j])); + } + + if (m) { + n++; + } + } + + /* Clear the rest of the table */ + for (i = n; i < ARRAY_SIZE(opts->sps); i++) { + memset(&opts->sps[i], 0, sizeof(opts->sps[i])); + } + + if (!ci_largepage) { + opts->flags &= ~PPC_HASH64_CI_LARGEPAGE; + } +} + +static void cap_hpt_maxpagesize_cpu_apply(SpaprMachineState *spapr, + PowerPCCPU *cpu, + uint8_t val, Error **errp) +{ + unsigned maxshift = val; + + ppc_hash64_filter_pagesizes(cpu, spapr_pagesize_cb, &maxshift); +} + +static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr, + uint8_t val, Error **errp) +{ + ERRP_GUARD(); + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + + if (!val) { + /* capability disabled by default */ + return; + } + + if (tcg_enabled()) { + error_setg(errp, "No Nested KVM-HV support in TCG"); + error_append_hint(errp, "Try appending -machine cap-nested-hv=off\n"); + } else if (kvm_enabled()) { + if (!ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, + spapr->max_compat_pvr)) { + error_setg(errp, "Nested KVM-HV only supported on POWER9"); + error_append_hint(errp, + "Try appending -machine max-cpu-compat=power9\n"); + return; + } + + if (!kvmppc_has_cap_nested_kvm_hv()) { + error_setg(errp, + "KVM implementation does not support Nested KVM-HV"); + error_append_hint(errp, + "Try appending -machine cap-nested-hv=off\n"); + } else if (kvmppc_set_cap_nested_kvm_hv(val) < 0) { + error_setg(errp, "Error enabling cap-nested-hv with KVM"); + error_append_hint(errp, + "Try appending -machine cap-nested-hv=off\n"); + } + } +} + +static void cap_large_decr_apply(SpaprMachineState *spapr, + uint8_t val, Error **errp) +{ + ERRP_GUARD(); + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + if (!val) { + return; /* Disabled by default */ + } + + if (tcg_enabled()) { + if (!ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, + spapr->max_compat_pvr)) { + error_setg(errp, "Large decrementer only supported on POWER9"); + error_append_hint(errp, "Try -cpu POWER9\n"); + return; + } + } else if (kvm_enabled()) { + int kvm_nr_bits = kvmppc_get_cap_large_decr(); + + if (!kvm_nr_bits) { + error_setg(errp, "No large decrementer support"); + error_append_hint(errp, + "Try appending -machine cap-large-decr=off\n"); + } else if (pcc->lrg_decr_bits != kvm_nr_bits) { + error_setg(errp, + "KVM large decrementer size (%d) differs to model (%d)", + kvm_nr_bits, pcc->lrg_decr_bits); + error_append_hint(errp, + "Try appending -machine cap-large-decr=off\n"); + } + } +} + +static void cap_large_decr_cpu_apply(SpaprMachineState *spapr, + PowerPCCPU *cpu, + uint8_t val, Error **errp) +{ + ERRP_GUARD(); + CPUPPCState *env = &cpu->env; + target_ulong lpcr = env->spr[SPR_LPCR]; + + if (kvm_enabled()) { + if (kvmppc_enable_cap_large_decr(cpu, val)) { + error_setg(errp, "No large decrementer support"); + error_append_hint(errp, + "Try appending -machine cap-large-decr=off\n"); + } + } + + if (val) { + lpcr |= LPCR_LD; + } else { + lpcr &= ~LPCR_LD; + } + ppc_store_lpcr(cpu, lpcr); +} + +static void cap_ccf_assist_apply(SpaprMachineState *spapr, uint8_t val, + Error **errp) +{ + ERRP_GUARD(); + uint8_t kvm_val = kvmppc_get_cap_count_cache_flush_assist(); + + if (tcg_enabled() && val) { + /* TCG doesn't implement anything here, but allow with a warning */ + warn_report("TCG doesn't support requested feature, cap-ccf-assist=on"); + } else if (kvm_enabled() && (val > kvm_val)) { + uint8_t kvm_ibs = kvmppc_get_cap_safe_indirect_branch(); + + if (kvm_ibs == SPAPR_CAP_FIXED_CCD) { + /* + * If we don't have CCF assist on the host, the assist + * instruction is a harmless no-op. It won't correctly + * implement the cache count flush *but* if we have + * count-cache-disabled in the host, that flush is + * unnnecessary. So, specifically allow this case. This + * allows us to have better performance on POWER9 DD2.3, + * while still working on POWER9 DD2.2 and POWER8 host + * cpus. + */ + return; + } + error_setg(errp, + "Requested count cache flush assist capability level not supported by KVM"); + error_append_hint(errp, "Try appending -machine cap-ccf-assist=off\n"); + } +} + +static void cap_fwnmi_apply(SpaprMachineState *spapr, uint8_t val, + Error **errp) +{ + ERRP_GUARD(); + if (!val) { + return; /* Disabled by default */ + } + + if (kvm_enabled()) { + if (!kvmppc_get_fwnmi()) { + error_setg(errp, +"Firmware Assisted Non-Maskable Interrupts(FWNMI) not supported by KVM."); + error_append_hint(errp, "Try appending -machine cap-fwnmi=off\n"); + } + } +} + +static void cap_rpt_invalidate_apply(SpaprMachineState *spapr, + uint8_t val, Error **errp) +{ + ERRP_GUARD(); + + if (!val) { + /* capability disabled by default */ + return; + } + + if (tcg_enabled()) { + error_setg(errp, "No H_RPT_INVALIDATE support in TCG"); + error_append_hint(errp, + "Try appending -machine cap-rpt-invalidate=off\n"); + } else if (kvm_enabled()) { + if (!kvmppc_has_cap_mmu_radix()) { + error_setg(errp, "H_RPT_INVALIDATE only supported on Radix"); + return; + } + + if (!kvmppc_has_cap_rpt_invalidate()) { + error_setg(errp, + "KVM implementation does not support H_RPT_INVALIDATE"); + error_append_hint(errp, + "Try appending -machine cap-rpt-invalidate=off\n"); + } else { + kvmppc_enable_h_rpt_invalidate(); + } + } +} + +SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = { + [SPAPR_CAP_HTM] = { + .name = "htm", + .description = "Allow Hardware Transactional Memory (HTM)", + .index = SPAPR_CAP_HTM, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_htm_apply, + }, + [SPAPR_CAP_VSX] = { + .name = "vsx", + .description = "Allow Vector Scalar Extensions (VSX)", + .index = SPAPR_CAP_VSX, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_vsx_apply, + }, + [SPAPR_CAP_DFP] = { + .name = "dfp", + .description = "Allow Decimal Floating Point (DFP)", + .index = SPAPR_CAP_DFP, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_dfp_apply, + }, + [SPAPR_CAP_CFPC] = { + .name = "cfpc", + .description = "Cache Flush on Privilege Change" VALUE_DESC_TRISTATE, + .index = SPAPR_CAP_CFPC, + .get = spapr_cap_get_string, + .set = spapr_cap_set_string, + .type = "string", + .possible = &cap_cfpc_possible, + .apply = cap_safe_cache_apply, + }, + [SPAPR_CAP_SBBC] = { + .name = "sbbc", + .description = "Speculation Barrier Bounds Checking" VALUE_DESC_TRISTATE, + .index = SPAPR_CAP_SBBC, + .get = spapr_cap_get_string, + .set = spapr_cap_set_string, + .type = "string", + .possible = &cap_sbbc_possible, + .apply = cap_safe_bounds_check_apply, + }, + [SPAPR_CAP_IBS] = { + .name = "ibs", + .description = + "Indirect Branch Speculation (broken, workaround, fixed-ibs," + "fixed-ccd, fixed-na)", + .index = SPAPR_CAP_IBS, + .get = spapr_cap_get_string, + .set = spapr_cap_set_string, + .type = "string", + .possible = &cap_ibs_possible, + .apply = cap_safe_indirect_branch_apply, + }, + [SPAPR_CAP_HPT_MAXPAGESIZE] = { + .name = "hpt-max-page-size", + .description = "Maximum page size for Hash Page Table guests", + .index = SPAPR_CAP_HPT_MAXPAGESIZE, + .get = spapr_cap_get_pagesize, + .set = spapr_cap_set_pagesize, + .type = "int", + .apply = cap_hpt_maxpagesize_apply, + .cpu_apply = cap_hpt_maxpagesize_cpu_apply, + .migrate_needed = cap_hpt_maxpagesize_migrate_needed, + }, + [SPAPR_CAP_NESTED_KVM_HV] = { + .name = "nested-hv", + .description = "Allow Nested KVM-HV", + .index = SPAPR_CAP_NESTED_KVM_HV, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_nested_kvm_hv_apply, + }, + [SPAPR_CAP_LARGE_DECREMENTER] = { + .name = "large-decr", + .description = "Allow Large Decrementer", + .index = SPAPR_CAP_LARGE_DECREMENTER, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_large_decr_apply, + .cpu_apply = cap_large_decr_cpu_apply, + }, + [SPAPR_CAP_CCF_ASSIST] = { + .name = "ccf-assist", + .description = "Count Cache Flush Assist via HW Instruction", + .index = SPAPR_CAP_CCF_ASSIST, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_ccf_assist_apply, + }, + [SPAPR_CAP_FWNMI] = { + .name = "fwnmi", + .description = "Implements PAPR FWNMI option", + .index = SPAPR_CAP_FWNMI, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_fwnmi_apply, + }, + [SPAPR_CAP_RPT_INVALIDATE] = { + .name = "rpt-invalidate", + .description = "Allow H_RPT_INVALIDATE", + .index = SPAPR_CAP_RPT_INVALIDATE, + .get = spapr_cap_get_bool, + .set = spapr_cap_set_bool, + .type = "bool", + .apply = cap_rpt_invalidate_apply, + }, +}; + +static SpaprCapabilities default_caps_with_cpu(SpaprMachineState *spapr, + const char *cputype) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + SpaprCapabilities caps; + + caps = smc->default_caps; + + if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_3_00, + 0, spapr->max_compat_pvr)) { + caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF; + } + + if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_2_07, + 0, spapr->max_compat_pvr)) { + caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_OFF; + caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_BROKEN; + } + + if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_2_06_PLUS, + 0, spapr->max_compat_pvr)) { + caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN; + } + + if (!ppc_type_check_compat(cputype, CPU_POWERPC_LOGICAL_2_06, + 0, spapr->max_compat_pvr)) { + caps.caps[SPAPR_CAP_VSX] = SPAPR_CAP_OFF; + caps.caps[SPAPR_CAP_DFP] = SPAPR_CAP_OFF; + caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN; + } + + /* This is for pseries-2.12 and older */ + if (smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] == 0) { + uint8_t mps; + + if (kvmppc_hpt_needs_host_contiguous_pages()) { + mps = ctz64(qemu_minrampagesize()); + } else { + mps = 34; /* allow everything up to 16GiB, i.e. everything */ + } + + caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = mps; + } + + return caps; +} + +int spapr_caps_pre_load(void *opaque) +{ + SpaprMachineState *spapr = opaque; + + /* Set to default so we can tell if this came in with the migration */ + spapr->mig = spapr->def; + return 0; +} + +int spapr_caps_pre_save(void *opaque) +{ + SpaprMachineState *spapr = opaque; + + spapr->mig = spapr->eff; + return 0; +} + +/* This has to be called from the top-level spapr post_load, not the + * caps specific one. Otherwise it wouldn't be called when the source + * caps are all defaults, which could still conflict with overridden + * caps on the destination */ +int spapr_caps_post_migration(SpaprMachineState *spapr) +{ + int i; + bool ok = true; + SpaprCapabilities dstcaps = spapr->eff; + SpaprCapabilities srccaps; + + srccaps = default_caps_with_cpu(spapr, MACHINE(spapr)->cpu_type); + for (i = 0; i < SPAPR_CAP_NUM; i++) { + /* If not default value then assume came in with the migration */ + if (spapr->mig.caps[i] != spapr->def.caps[i]) { + srccaps.caps[i] = spapr->mig.caps[i]; + } + } + + for (i = 0; i < SPAPR_CAP_NUM; i++) { + SpaprCapabilityInfo *info = &capability_table[i]; + + if (srccaps.caps[i] > dstcaps.caps[i]) { + error_report("cap-%s higher level (%d) in incoming stream than on destination (%d)", + info->name, srccaps.caps[i], dstcaps.caps[i]); + ok = false; + } + + if (srccaps.caps[i] < dstcaps.caps[i]) { + warn_report("cap-%s lower level (%d) in incoming stream than on destination (%d)", + info->name, srccaps.caps[i], dstcaps.caps[i]); + } + } + + return ok ? 0 : -EINVAL; +} + +/* Used to generate the migration field and needed function for a spapr cap */ +#define SPAPR_CAP_MIG_STATE(sname, cap) \ +static bool spapr_cap_##sname##_needed(void *opaque) \ +{ \ + SpaprMachineState *spapr = opaque; \ + bool (*needed)(void *opaque) = \ + capability_table[cap].migrate_needed; \ + \ + return needed ? needed(opaque) : true && \ + spapr->cmd_line_caps[cap] && \ + (spapr->eff.caps[cap] != \ + spapr->def.caps[cap]); \ +} \ + \ +const VMStateDescription vmstate_spapr_cap_##sname = { \ + .name = "spapr/cap/" #sname, \ + .version_id = 1, \ + .minimum_version_id = 1, \ + .needed = spapr_cap_##sname##_needed, \ + .fields = (VMStateField[]) { \ + VMSTATE_UINT8(mig.caps[cap], \ + SpaprMachineState), \ + VMSTATE_END_OF_LIST() \ + }, \ +} + +SPAPR_CAP_MIG_STATE(htm, SPAPR_CAP_HTM); +SPAPR_CAP_MIG_STATE(vsx, SPAPR_CAP_VSX); +SPAPR_CAP_MIG_STATE(dfp, SPAPR_CAP_DFP); +SPAPR_CAP_MIG_STATE(cfpc, SPAPR_CAP_CFPC); +SPAPR_CAP_MIG_STATE(sbbc, SPAPR_CAP_SBBC); +SPAPR_CAP_MIG_STATE(ibs, SPAPR_CAP_IBS); +SPAPR_CAP_MIG_STATE(hpt_maxpagesize, SPAPR_CAP_HPT_MAXPAGESIZE); +SPAPR_CAP_MIG_STATE(nested_kvm_hv, SPAPR_CAP_NESTED_KVM_HV); +SPAPR_CAP_MIG_STATE(large_decr, SPAPR_CAP_LARGE_DECREMENTER); +SPAPR_CAP_MIG_STATE(ccf_assist, SPAPR_CAP_CCF_ASSIST); +SPAPR_CAP_MIG_STATE(fwnmi, SPAPR_CAP_FWNMI); +SPAPR_CAP_MIG_STATE(rpt_invalidate, SPAPR_CAP_RPT_INVALIDATE); + +void spapr_caps_init(SpaprMachineState *spapr) +{ + SpaprCapabilities default_caps; + int i; + + /* Compute the actual set of caps we should run with */ + default_caps = default_caps_with_cpu(spapr, MACHINE(spapr)->cpu_type); + + for (i = 0; i < SPAPR_CAP_NUM; i++) { + /* Store the defaults */ + spapr->def.caps[i] = default_caps.caps[i]; + /* If not set on the command line then apply the default value */ + if (!spapr->cmd_line_caps[i]) { + spapr->eff.caps[i] = default_caps.caps[i]; + } + } +} + +void spapr_caps_apply(SpaprMachineState *spapr) +{ + int i; + + for (i = 0; i < SPAPR_CAP_NUM; i++) { + SpaprCapabilityInfo *info = &capability_table[i]; + + /* + * If the apply function can't set the desired level and thinks it's + * fatal, it should cause that. + */ + info->apply(spapr, spapr->eff.caps[i], &error_fatal); + } +} + +void spapr_caps_cpu_apply(SpaprMachineState *spapr, PowerPCCPU *cpu) +{ + int i; + + for (i = 0; i < SPAPR_CAP_NUM; i++) { + SpaprCapabilityInfo *info = &capability_table[i]; + + /* + * If the apply function can't set the desired level and thinks it's + * fatal, it should cause that. + */ + if (info->cpu_apply) { + info->cpu_apply(spapr, cpu, spapr->eff.caps[i], &error_fatal); + } + } +} + +void spapr_caps_add_properties(SpaprMachineClass *smc) +{ + ObjectClass *klass = OBJECT_CLASS(smc); + int i; + + for (i = 0; i < ARRAY_SIZE(capability_table); i++) { + SpaprCapabilityInfo *cap = &capability_table[i]; + char *name = g_strdup_printf("cap-%s", cap->name); + char *desc; + + object_class_property_add(klass, name, cap->type, + cap->get, cap->set, + NULL, cap); + + desc = g_strdup_printf("%s", cap->description); + object_class_property_set_description(klass, name, desc); + g_free(name); + g_free(desc); + } +} diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c new file mode 100644 index 000000000..58e7341cb --- /dev/null +++ b/hw/ppc/spapr_cpu_core.c @@ -0,0 +1,391 @@ +/* + * sPAPR CPU core device, acts as container of CPU thread devices. + * + * Copyright (C) 2016 Bharata B Rao + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/cpu/core.h" +#include "hw/ppc/spapr_cpu_core.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "target/ppc/cpu.h" +#include "hw/ppc/spapr.h" +#include "qapi/error.h" +#include "sysemu/cpus.h" +#include "sysemu/kvm.h" +#include "target/ppc/kvm_ppc.h" +#include "hw/ppc/ppc.h" +#include "target/ppc/mmu-hash64.h" +#include "sysemu/numa.h" +#include "sysemu/reset.h" +#include "sysemu/hw_accel.h" +#include "qemu/error-report.h" + +static void spapr_reset_vcpu(PowerPCCPU *cpu) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + target_ulong lpcr; + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + + cpu_reset(cs); + + env->spr[SPR_HIOR] = 0; + + lpcr = env->spr[SPR_LPCR]; + + /* Set emulated LPCR to not send interrupts to hypervisor. Note that + * under KVM, the actual HW LPCR will be set differently by KVM itself, + * the settings below ensure proper operations with TCG in absence of + * a real hypervisor. + * + * Disable Power-saving mode Exit Cause exceptions for the CPU, so + * we don't get spurious wakups before an RTAS start-cpu call. + * For the same reason, set PSSCR_EC. + */ + lpcr &= ~(LPCR_VPM1 | LPCR_ISL | LPCR_KBV | pcc->lpcr_pm); + lpcr |= LPCR_LPES0 | LPCR_LPES1; + env->spr[SPR_PSSCR] |= PSSCR_EC; + + ppc_store_lpcr(cpu, lpcr); + + /* Set a full AMOR so guest can use the AMR as it sees fit */ + env->spr[SPR_AMOR] = 0xffffffffffffffffull; + + spapr_cpu->vpa_addr = 0; + spapr_cpu->slb_shadow_addr = 0; + spapr_cpu->slb_shadow_size = 0; + spapr_cpu->dtl_addr = 0; + spapr_cpu->dtl_size = 0; + + spapr_caps_cpu_apply(spapr, cpu); + + kvm_check_mmu(cpu, &error_fatal); + + spapr_irq_cpu_intc_reset(spapr, cpu); +} + +void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip, + target_ulong r1, target_ulong r3, + target_ulong r4) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + CPUPPCState *env = &cpu->env; + + env->nip = nip; + env->gpr[1] = r1; + env->gpr[3] = r3; + env->gpr[4] = r4; + kvmppc_set_reg_ppc_online(cpu, 1); + CPU(cpu)->halted = 0; + /* Enable Power-saving mode Exit Cause exceptions */ + ppc_store_lpcr(cpu, env->spr[SPR_LPCR] | pcc->lpcr_pm); +} + +/* + * Return the sPAPR CPU core type for @model which essentially is the CPU + * model specified with -cpu cmdline option. + */ +const char *spapr_get_cpu_core_type(const char *cpu_type) +{ + int len = strlen(cpu_type) - strlen(POWERPC_CPU_TYPE_SUFFIX); + char *core_type = g_strdup_printf(SPAPR_CPU_CORE_TYPE_NAME("%.*s"), + len, cpu_type); + ObjectClass *oc = object_class_by_name(core_type); + + g_free(core_type); + if (!oc) { + return NULL; + } + + return object_class_get_name(oc); +} + +static bool slb_shadow_needed(void *opaque) +{ + SpaprCpuState *spapr_cpu = opaque; + + return spapr_cpu->slb_shadow_addr != 0; +} + +static const VMStateDescription vmstate_spapr_cpu_slb_shadow = { + .name = "spapr_cpu/vpa/slb_shadow", + .version_id = 1, + .minimum_version_id = 1, + .needed = slb_shadow_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(slb_shadow_addr, SpaprCpuState), + VMSTATE_UINT64(slb_shadow_size, SpaprCpuState), + VMSTATE_END_OF_LIST() + } +}; + +static bool dtl_needed(void *opaque) +{ + SpaprCpuState *spapr_cpu = opaque; + + return spapr_cpu->dtl_addr != 0; +} + +static const VMStateDescription vmstate_spapr_cpu_dtl = { + .name = "spapr_cpu/vpa/dtl", + .version_id = 1, + .minimum_version_id = 1, + .needed = dtl_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(dtl_addr, SpaprCpuState), + VMSTATE_UINT64(dtl_size, SpaprCpuState), + VMSTATE_END_OF_LIST() + } +}; + +static bool vpa_needed(void *opaque) +{ + SpaprCpuState *spapr_cpu = opaque; + + return spapr_cpu->vpa_addr != 0; +} + +static const VMStateDescription vmstate_spapr_cpu_vpa = { + .name = "spapr_cpu/vpa", + .version_id = 1, + .minimum_version_id = 1, + .needed = vpa_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(vpa_addr, SpaprCpuState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_spapr_cpu_slb_shadow, + &vmstate_spapr_cpu_dtl, + NULL + } +}; + +static const VMStateDescription vmstate_spapr_cpu_state = { + .name = "spapr_cpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_spapr_cpu_vpa, + NULL + } +}; + +static void spapr_unrealize_vcpu(PowerPCCPU *cpu, SpaprCpuCore *sc) +{ + if (!sc->pre_3_0_migration) { + vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data); + } + spapr_irq_cpu_intc_destroy(SPAPR_MACHINE(qdev_get_machine()), cpu); + qdev_unrealize(DEVICE(cpu)); +} + +/* + * Called when CPUs are hot-plugged. + */ +static void spapr_cpu_core_reset(DeviceState *dev) +{ + CPUCore *cc = CPU_CORE(dev); + SpaprCpuCore *sc = SPAPR_CPU_CORE(dev); + int i; + + for (i = 0; i < cc->nr_threads; i++) { + spapr_reset_vcpu(sc->threads[i]); + } +} + +/* + * Called by the machine reset. + */ +static void spapr_cpu_core_reset_handler(void *opaque) +{ + spapr_cpu_core_reset(opaque); +} + +static void spapr_delete_vcpu(PowerPCCPU *cpu) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + cpu->machine_data = NULL; + g_free(spapr_cpu); + object_unparent(OBJECT(cpu)); +} + +static void spapr_cpu_core_unrealize(DeviceState *dev) +{ + SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); + CPUCore *cc = CPU_CORE(dev); + int i; + + for (i = 0; i < cc->nr_threads; i++) { + if (sc->threads[i]) { + /* + * Since this we can get here from the error path of + * spapr_cpu_core_realize(), make sure we only unrealize + * vCPUs that have already been realized. + */ + if (object_property_get_bool(OBJECT(sc->threads[i]), "realized", + &error_abort)) { + spapr_unrealize_vcpu(sc->threads[i], sc); + } + spapr_delete_vcpu(sc->threads[i]); + } + } + g_free(sc->threads); + qemu_unregister_reset(spapr_cpu_core_reset_handler, sc); +} + +static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr, + SpaprCpuCore *sc, Error **errp) +{ + CPUPPCState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + if (!qdev_realize(DEVICE(cpu), NULL, errp)) { + return false; + } + + /* Set time-base frequency to 512 MHz */ + cpu_ppc_tb_init(env, SPAPR_TIMEBASE_FREQ); + + cpu_ppc_set_vhyp(cpu, PPC_VIRTUAL_HYPERVISOR(spapr)); + kvmppc_set_papr(cpu); + + if (spapr_irq_cpu_intc_create(spapr, cpu, errp) < 0) { + qdev_unrealize(DEVICE(cpu)); + return false; + } + + if (!sc->pre_3_0_migration) { + vmstate_register(NULL, cs->cpu_index, &vmstate_spapr_cpu_state, + cpu->machine_data); + } + return true; +} + +static PowerPCCPU *spapr_create_vcpu(SpaprCpuCore *sc, int i, Error **errp) +{ + SpaprCpuCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(sc); + CPUCore *cc = CPU_CORE(sc); + g_autoptr(Object) obj = NULL; + g_autofree char *id = NULL; + CPUState *cs; + PowerPCCPU *cpu; + + obj = object_new(scc->cpu_type); + + cs = CPU(obj); + cpu = POWERPC_CPU(obj); + /* + * All CPUs start halted. CPU0 is unhalted from the machine level reset code + * and the rest are explicitly started up by the guest using an RTAS call. + */ + cs->start_powered_off = true; + cs->cpu_index = cc->core_id + i; + if (!spapr_set_vcpu_id(cpu, cs->cpu_index, errp)) { + return NULL; + } + + cpu->node_id = sc->node_id; + + id = g_strdup_printf("thread[%d]", i); + object_property_add_child(OBJECT(sc), id, obj); + + cpu->machine_data = g_new0(SpaprCpuState, 1); + + return cpu; +} + +static void spapr_cpu_core_realize(DeviceState *dev, Error **errp) +{ + /* We don't use SPAPR_MACHINE() in order to exit gracefully if the user + * tries to add a sPAPR CPU core to a non-pseries machine. + */ + SpaprMachineState *spapr = + (SpaprMachineState *) object_dynamic_cast(qdev_get_machine(), + TYPE_SPAPR_MACHINE); + SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); + CPUCore *cc = CPU_CORE(OBJECT(dev)); + int i; + + if (!spapr) { + error_setg(errp, TYPE_SPAPR_CPU_CORE " needs a pseries machine"); + return; + } + + qemu_register_reset(spapr_cpu_core_reset_handler, sc); + sc->threads = g_new0(PowerPCCPU *, cc->nr_threads); + for (i = 0; i < cc->nr_threads; i++) { + sc->threads[i] = spapr_create_vcpu(sc, i, errp); + if (!sc->threads[i] || + !spapr_realize_vcpu(sc->threads[i], spapr, sc, errp)) { + spapr_cpu_core_unrealize(dev); + return; + } + } +} + +static Property spapr_cpu_core_properties[] = { + DEFINE_PROP_INT32("node-id", SpaprCpuCore, node_id, CPU_UNSET_NUMA_NODE_ID), + DEFINE_PROP_BOOL("pre-3.0-migration", SpaprCpuCore, pre_3_0_migration, + false), + DEFINE_PROP_END_OF_LIST() +}; + +static void spapr_cpu_core_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + SpaprCpuCoreClass *scc = SPAPR_CPU_CORE_CLASS(oc); + + dc->realize = spapr_cpu_core_realize; + dc->unrealize = spapr_cpu_core_unrealize; + dc->reset = spapr_cpu_core_reset; + device_class_set_props(dc, spapr_cpu_core_properties); + scc->cpu_type = data; +} + +#define DEFINE_SPAPR_CPU_CORE_TYPE(cpu_model) \ + { \ + .parent = TYPE_SPAPR_CPU_CORE, \ + .class_data = (void *) POWERPC_CPU_TYPE_NAME(cpu_model), \ + .class_init = spapr_cpu_core_class_init, \ + .name = SPAPR_CPU_CORE_TYPE_NAME(cpu_model), \ + } + +static const TypeInfo spapr_cpu_core_type_infos[] = { + { + .name = TYPE_SPAPR_CPU_CORE, + .parent = TYPE_CPU_CORE, + .abstract = true, + .instance_size = sizeof(SpaprCpuCore), + .class_size = sizeof(SpaprCpuCoreClass), + }, + DEFINE_SPAPR_CPU_CORE_TYPE("970_v2.2"), + DEFINE_SPAPR_CPU_CORE_TYPE("970mp_v1.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("970mp_v1.1"), + DEFINE_SPAPR_CPU_CORE_TYPE("power5+_v2.1"), + DEFINE_SPAPR_CPU_CORE_TYPE("power7_v2.3"), + DEFINE_SPAPR_CPU_CORE_TYPE("power7+_v2.1"), + DEFINE_SPAPR_CPU_CORE_TYPE("power8_v2.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power8e_v2.1"), + DEFINE_SPAPR_CPU_CORE_TYPE("power8nvl_v1.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power9_v1.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power10_v1.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"), +#ifdef CONFIG_KVM + DEFINE_SPAPR_CPU_CORE_TYPE("host"), +#endif +}; + +DEFINE_TYPES(spapr_cpu_core_type_infos) diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c new file mode 100644 index 000000000..f8ac0a10d --- /dev/null +++ b/hw/ppc/spapr_drc.c @@ -0,0 +1,1326 @@ +/* + * QEMU SPAPR Dynamic Reconfiguration Connector Implementation + * + * Copyright IBM Corp. 2014 + * + * Authors: + * Michael Roth + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qmp/qnull.h" +#include "qemu/cutils.h" +#include "hw/ppc/spapr_drc.h" +#include "qom/object.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/qapi-events-qdev.h" +#include "qapi/visitor.h" +#include "qemu/error-report.h" +#include "hw/ppc/spapr.h" /* for RTAS return codes */ +#include "hw/pci-host/spapr.h" /* spapr_phb_remove_pci_device_cb callback */ +#include "hw/ppc/spapr_nvdimm.h" +#include "sysemu/device_tree.h" +#include "sysemu/reset.h" +#include "trace.h" + +#define DRC_CONTAINER_PATH "/dr-connector" +#define DRC_INDEX_TYPE_SHIFT 28 +#define DRC_INDEX_ID_MASK ((1ULL << DRC_INDEX_TYPE_SHIFT) - 1) + +SpaprDrcType spapr_drc_type(SpaprDrc *drc) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + + return 1 << drck->typeshift; +} + +uint32_t spapr_drc_index(SpaprDrc *drc) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + + /* no set format for a drc index: it only needs to be globally + * unique. this is how we encode the DRC type on bare-metal + * however, so might as well do that here + */ + return (drck->typeshift << DRC_INDEX_TYPE_SHIFT) + | (drc->id & DRC_INDEX_ID_MASK); +} + +static void spapr_drc_release(SpaprDrc *drc) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + + drck->release(drc->dev); + + drc->unplug_requested = false; + g_free(drc->fdt); + drc->fdt = NULL; + drc->fdt_start_offset = 0; + object_property_del(OBJECT(drc), "device"); + drc->dev = NULL; +} + +static uint32_t drc_isolate_physical(SpaprDrc *drc) +{ + switch (drc->state) { + case SPAPR_DRC_STATE_PHYSICAL_POWERON: + return RTAS_OUT_SUCCESS; /* Nothing to do */ + case SPAPR_DRC_STATE_PHYSICAL_CONFIGURED: + break; /* see below */ + case SPAPR_DRC_STATE_PHYSICAL_UNISOLATE: + return RTAS_OUT_PARAM_ERROR; /* not allowed */ + default: + g_assert_not_reached(); + } + + drc->state = SPAPR_DRC_STATE_PHYSICAL_POWERON; + + if (drc->unplug_requested) { + uint32_t drc_index = spapr_drc_index(drc); + trace_spapr_drc_set_isolation_state_finalizing(drc_index); + spapr_drc_release(drc); + } + + return RTAS_OUT_SUCCESS; +} + +static uint32_t drc_unisolate_physical(SpaprDrc *drc) +{ + switch (drc->state) { + case SPAPR_DRC_STATE_PHYSICAL_UNISOLATE: + case SPAPR_DRC_STATE_PHYSICAL_CONFIGURED: + return RTAS_OUT_SUCCESS; /* Nothing to do */ + case SPAPR_DRC_STATE_PHYSICAL_POWERON: + break; /* see below */ + default: + g_assert_not_reached(); + } + + /* cannot unisolate a non-existent resource, and, or resources + * which are in an 'UNUSABLE' allocation state. (PAPR 2.7, + * 13.5.3.5) + */ + if (!drc->dev) { + return RTAS_OUT_NO_SUCH_INDICATOR; + } + + drc->state = SPAPR_DRC_STATE_PHYSICAL_UNISOLATE; + drc->ccs_offset = drc->fdt_start_offset; + drc->ccs_depth = 0; + + return RTAS_OUT_SUCCESS; +} + +static uint32_t drc_isolate_logical(SpaprDrc *drc) +{ + switch (drc->state) { + case SPAPR_DRC_STATE_LOGICAL_AVAILABLE: + case SPAPR_DRC_STATE_LOGICAL_UNUSABLE: + return RTAS_OUT_SUCCESS; /* Nothing to do */ + case SPAPR_DRC_STATE_LOGICAL_CONFIGURED: + break; /* see below */ + case SPAPR_DRC_STATE_LOGICAL_UNISOLATE: + return RTAS_OUT_PARAM_ERROR; /* not allowed */ + default: + g_assert_not_reached(); + } + + /* + * Fail any requests to ISOLATE the LMB DRC if this LMB doesn't + * belong to a DIMM device that is marked for removal. + * + * Currently the guest userspace tool drmgr that drives the memory + * hotplug/unplug will just try to remove a set of 'removable' LMBs + * in response to a hot unplug request that is based on drc-count. + * If the LMB being removed doesn't belong to a DIMM device that is + * actually being unplugged, fail the isolation request here. + */ + if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_LMB + && !drc->unplug_requested) { + return RTAS_OUT_HW_ERROR; + } + + drc->state = SPAPR_DRC_STATE_LOGICAL_AVAILABLE; + + return RTAS_OUT_SUCCESS; +} + +static uint32_t drc_unisolate_logical(SpaprDrc *drc) +{ + SpaprMachineState *spapr = NULL; + + switch (drc->state) { + case SPAPR_DRC_STATE_LOGICAL_UNISOLATE: + case SPAPR_DRC_STATE_LOGICAL_CONFIGURED: + /* + * Unisolating a logical DRC that was marked for unplug + * means that the kernel is refusing the removal. + */ + if (drc->unplug_requested && drc->dev) { + if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_LMB) { + spapr = SPAPR_MACHINE(qdev_get_machine()); + + spapr_memory_unplug_rollback(spapr, drc->dev); + } + + drc->unplug_requested = false; + + if (drc->dev->id) { + error_report("Device hotunplug rejected by the guest " + "for device %s", drc->dev->id); + } + + qapi_event_send_device_unplug_guest_error(!!drc->dev->id, + drc->dev->id, + drc->dev->canonical_path); + } + + return RTAS_OUT_SUCCESS; /* Nothing to do */ + case SPAPR_DRC_STATE_LOGICAL_AVAILABLE: + break; /* see below */ + case SPAPR_DRC_STATE_LOGICAL_UNUSABLE: + return RTAS_OUT_NO_SUCH_INDICATOR; /* not allowed */ + default: + g_assert_not_reached(); + } + + /* Move to AVAILABLE state should have ensured device was present */ + g_assert(drc->dev); + + drc->state = SPAPR_DRC_STATE_LOGICAL_UNISOLATE; + drc->ccs_offset = drc->fdt_start_offset; + drc->ccs_depth = 0; + + return RTAS_OUT_SUCCESS; +} + +static uint32_t drc_set_usable(SpaprDrc *drc) +{ + switch (drc->state) { + case SPAPR_DRC_STATE_LOGICAL_AVAILABLE: + case SPAPR_DRC_STATE_LOGICAL_UNISOLATE: + case SPAPR_DRC_STATE_LOGICAL_CONFIGURED: + return RTAS_OUT_SUCCESS; /* Nothing to do */ + case SPAPR_DRC_STATE_LOGICAL_UNUSABLE: + break; /* see below */ + default: + g_assert_not_reached(); + } + + /* if there's no resource/device associated with the DRC, there's + * no way for us to put it in an allocation state consistent with + * being 'USABLE'. PAPR 2.7, 13.5.3.4 documents that this should + * result in an RTAS return code of -3 / "no such indicator" + */ + if (!drc->dev) { + return RTAS_OUT_NO_SUCH_INDICATOR; + } + if (drc->unplug_requested) { + /* Don't allow the guest to move a device away from UNUSABLE + * state when we want to unplug it */ + return RTAS_OUT_NO_SUCH_INDICATOR; + } + + drc->state = SPAPR_DRC_STATE_LOGICAL_AVAILABLE; + + return RTAS_OUT_SUCCESS; +} + +static uint32_t drc_set_unusable(SpaprDrc *drc) +{ + switch (drc->state) { + case SPAPR_DRC_STATE_LOGICAL_UNUSABLE: + return RTAS_OUT_SUCCESS; /* Nothing to do */ + case SPAPR_DRC_STATE_LOGICAL_AVAILABLE: + break; /* see below */ + case SPAPR_DRC_STATE_LOGICAL_UNISOLATE: + case SPAPR_DRC_STATE_LOGICAL_CONFIGURED: + return RTAS_OUT_NO_SUCH_INDICATOR; /* not allowed */ + default: + g_assert_not_reached(); + } + + drc->state = SPAPR_DRC_STATE_LOGICAL_UNUSABLE; + if (drc->unplug_requested) { + uint32_t drc_index = spapr_drc_index(drc); + trace_spapr_drc_set_allocation_state_finalizing(drc_index); + spapr_drc_release(drc); + } + + return RTAS_OUT_SUCCESS; +} + +static char *spapr_drc_name(SpaprDrc *drc) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + + /* human-readable name for a DRC to encode into the DT + * description. this is mainly only used within a guest in place + * of the unique DRC index. + * + * in the case of VIO/PCI devices, it corresponds to a "location + * code" that maps a logical device/function (DRC index) to a + * physical (or virtual in the case of VIO) location in the system + * by chaining together the "location label" for each + * encapsulating component. + * + * since this is more to do with diagnosing physical hardware + * issues than guest compatibility, we choose location codes/DRC + * names that adhere to the documented format, but avoid encoding + * the entire topology information into the label/code, instead + * just using the location codes based on the labels for the + * endpoints (VIO/PCI adaptor connectors), which is basically just + * "C" followed by an integer ID. + * + * DRC names as documented by PAPR+ v2.7, 13.5.2.4 + * location codes as documented by PAPR+ v2.7, 12.3.1.5 + */ + return g_strdup_printf("%s%d", drck->drc_name_prefix, drc->id); +} + +/* + * dr-entity-sense sensor value + * returned via get-sensor-state RTAS calls + * as expected by state diagram in PAPR+ 2.7, 13.4 + * based on the current allocation/indicator/power states + * for the DR connector. + */ +static SpaprDREntitySense physical_entity_sense(SpaprDrc *drc) +{ + /* this assumes all PCI devices are assigned to a 'live insertion' + * power domain, where QEMU manages power state automatically as + * opposed to the guest. present, non-PCI resources are unaffected + * by power state. + */ + return drc->dev ? SPAPR_DR_ENTITY_SENSE_PRESENT + : SPAPR_DR_ENTITY_SENSE_EMPTY; +} + +static SpaprDREntitySense logical_entity_sense(SpaprDrc *drc) +{ + switch (drc->state) { + case SPAPR_DRC_STATE_LOGICAL_UNUSABLE: + return SPAPR_DR_ENTITY_SENSE_UNUSABLE; + case SPAPR_DRC_STATE_LOGICAL_AVAILABLE: + case SPAPR_DRC_STATE_LOGICAL_UNISOLATE: + case SPAPR_DRC_STATE_LOGICAL_CONFIGURED: + g_assert(drc->dev); + return SPAPR_DR_ENTITY_SENSE_PRESENT; + default: + g_assert_not_reached(); + } +} + +static void prop_get_index(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + SpaprDrc *drc = SPAPR_DR_CONNECTOR(obj); + uint32_t value = spapr_drc_index(drc); + visit_type_uint32(v, name, &value, errp); +} + +static void prop_get_fdt(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + SpaprDrc *drc = SPAPR_DR_CONNECTOR(obj); + QNull *null = NULL; + int fdt_offset_next, fdt_offset, fdt_depth; + void *fdt; + + if (!drc->fdt) { + visit_type_null(v, NULL, &null, errp); + qobject_unref(null); + return; + } + + fdt = drc->fdt; + fdt_offset = drc->fdt_start_offset; + fdt_depth = 0; + + do { + const char *name = NULL; + const struct fdt_property *prop = NULL; + int prop_len = 0, name_len = 0; + uint32_t tag; + bool ok; + + tag = fdt_next_tag(fdt, fdt_offset, &fdt_offset_next); + switch (tag) { + case FDT_BEGIN_NODE: + fdt_depth++; + name = fdt_get_name(fdt, fdt_offset, &name_len); + if (!visit_start_struct(v, name, NULL, 0, errp)) { + return; + } + break; + case FDT_END_NODE: + /* shouldn't ever see an FDT_END_NODE before FDT_BEGIN_NODE */ + g_assert(fdt_depth > 0); + ok = visit_check_struct(v, errp); + visit_end_struct(v, NULL); + if (!ok) { + return; + } + fdt_depth--; + break; + case FDT_PROP: { + int i; + prop = fdt_get_property_by_offset(fdt, fdt_offset, &prop_len); + name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff)); + if (!visit_start_list(v, name, NULL, 0, errp)) { + return; + } + for (i = 0; i < prop_len; i++) { + if (!visit_type_uint8(v, NULL, (uint8_t *)&prop->data[i], + errp)) { + return; + } + } + ok = visit_check_list(v, errp); + visit_end_list(v, NULL); + if (!ok) { + return; + } + break; + } + default: + error_report("device FDT in unexpected state: %d", tag); + abort(); + } + fdt_offset = fdt_offset_next; + } while (fdt_depth != 0); +} + +void spapr_drc_attach(SpaprDrc *drc, DeviceState *d) +{ + trace_spapr_drc_attach(spapr_drc_index(drc)); + + g_assert(!drc->dev); + g_assert((drc->state == SPAPR_DRC_STATE_LOGICAL_UNUSABLE) + || (drc->state == SPAPR_DRC_STATE_PHYSICAL_POWERON)); + + drc->dev = d; + + object_property_add_link(OBJECT(drc), "device", + object_get_typename(OBJECT(drc->dev)), + (Object **)(&drc->dev), + NULL, 0); +} + +void spapr_drc_unplug_request(SpaprDrc *drc) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + + trace_spapr_drc_unplug_request(spapr_drc_index(drc)); + + g_assert(drc->dev); + + drc->unplug_requested = true; + + if (drc->state != drck->empty_state) { + trace_spapr_drc_awaiting_quiesce(spapr_drc_index(drc)); + return; + } + + spapr_drc_release(drc); +} + +bool spapr_drc_reset(SpaprDrc *drc) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + bool unplug_completed = false; + + trace_spapr_drc_reset(spapr_drc_index(drc)); + + /* immediately upon reset we can safely assume DRCs whose devices + * are pending removal can be safely removed. + */ + if (drc->unplug_requested) { + spapr_drc_release(drc); + unplug_completed = true; + } + + if (drc->dev) { + /* A device present at reset is ready to go, same as coldplugged */ + drc->state = drck->ready_state; + /* + * Ensure that we are able to send the FDT fragment again + * via configure-connector call if the guest requests. + */ + drc->ccs_offset = drc->fdt_start_offset; + drc->ccs_depth = 0; + } else { + drc->state = drck->empty_state; + drc->ccs_offset = -1; + drc->ccs_depth = -1; + } + + return unplug_completed; +} + +static bool spapr_drc_unplug_requested_needed(void *opaque) +{ + return spapr_drc_unplug_requested(opaque); +} + +static const VMStateDescription vmstate_spapr_drc_unplug_requested = { + .name = "spapr_drc/unplug_requested", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_drc_unplug_requested_needed, + .fields = (VMStateField []) { + VMSTATE_BOOL(unplug_requested, SpaprDrc), + VMSTATE_END_OF_LIST() + } +}; + +static bool spapr_drc_needed(void *opaque) +{ + SpaprDrc *drc = opaque; + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + + /* + * If no dev is plugged in there is no need to migrate the DRC state + * nor to reset the DRC at CAS. + */ + if (!drc->dev) { + return false; + } + + /* + * We need to reset the DRC at CAS or to migrate the DRC state if it's + * not equal to the expected long-term state, which is the same as the + * coldplugged initial state, or if an unplug request is pending. + */ + return drc->state != drck->ready_state || + spapr_drc_unplug_requested(drc); +} + +static const VMStateDescription vmstate_spapr_drc = { + .name = "spapr_drc", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_drc_needed, + .fields = (VMStateField []) { + VMSTATE_UINT32(state, SpaprDrc), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_spapr_drc_unplug_requested, + NULL + } +}; + +static void drc_realize(DeviceState *d, Error **errp) +{ + SpaprDrc *drc = SPAPR_DR_CONNECTOR(d); + Object *root_container; + gchar *link_name; + const char *child_name; + + trace_spapr_drc_realize(spapr_drc_index(drc)); + /* NOTE: we do this as part of realize/unrealize due to the fact + * that the guest will communicate with the DRC via RTAS calls + * referencing the global DRC index. By unlinking the DRC + * from DRC_CONTAINER_PATH/ we effectively make it + * inaccessible by the guest, since lookups rely on this path + * existing in the composition tree + */ + root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); + link_name = g_strdup_printf("%x", spapr_drc_index(drc)); + child_name = object_get_canonical_path_component(OBJECT(drc)); + trace_spapr_drc_realize_child(spapr_drc_index(drc), child_name); + object_property_add_alias(root_container, link_name, + drc->owner, child_name); + g_free(link_name); + vmstate_register(VMSTATE_IF(drc), spapr_drc_index(drc), &vmstate_spapr_drc, + drc); + trace_spapr_drc_realize_complete(spapr_drc_index(drc)); +} + +static void drc_unrealize(DeviceState *d) +{ + SpaprDrc *drc = SPAPR_DR_CONNECTOR(d); + Object *root_container; + gchar *name; + + trace_spapr_drc_unrealize(spapr_drc_index(drc)); + vmstate_unregister(VMSTATE_IF(drc), &vmstate_spapr_drc, drc); + root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); + name = g_strdup_printf("%x", spapr_drc_index(drc)); + object_property_del(root_container, name); + g_free(name); +} + +SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type, + uint32_t id) +{ + SpaprDrc *drc = SPAPR_DR_CONNECTOR(object_new(type)); + char *prop_name; + + drc->id = id; + drc->owner = owner; + prop_name = g_strdup_printf("dr-connector[%"PRIu32"]", + spapr_drc_index(drc)); + object_property_add_child(owner, prop_name, OBJECT(drc)); + object_unref(OBJECT(drc)); + qdev_realize(DEVICE(drc), NULL, NULL); + g_free(prop_name); + + return drc; +} + +static void spapr_dr_connector_instance_init(Object *obj) +{ + SpaprDrc *drc = SPAPR_DR_CONNECTOR(obj); + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + + object_property_add_uint32_ptr(obj, "id", &drc->id, OBJ_PROP_FLAG_READ); + object_property_add(obj, "index", "uint32", prop_get_index, + NULL, NULL, NULL); + object_property_add(obj, "fdt", "struct", prop_get_fdt, + NULL, NULL, NULL); + drc->state = drck->empty_state; +} + +static void spapr_dr_connector_class_init(ObjectClass *k, void *data) +{ + DeviceClass *dk = DEVICE_CLASS(k); + + dk->realize = drc_realize; + dk->unrealize = drc_unrealize; + /* + * Reason: DR connector needs to be wired to either the machine or to a + * PHB in spapr_dr_connector_new(). + */ + dk->user_creatable = false; +} + +static bool drc_physical_needed(void *opaque) +{ + SpaprDrcPhysical *drcp = (SpaprDrcPhysical *)opaque; + SpaprDrc *drc = SPAPR_DR_CONNECTOR(drcp); + + if ((drc->dev && (drcp->dr_indicator == SPAPR_DR_INDICATOR_ACTIVE)) + || (!drc->dev && (drcp->dr_indicator == SPAPR_DR_INDICATOR_INACTIVE))) { + return false; + } + return true; +} + +static const VMStateDescription vmstate_spapr_drc_physical = { + .name = "spapr_drc/physical", + .version_id = 1, + .minimum_version_id = 1, + .needed = drc_physical_needed, + .fields = (VMStateField []) { + VMSTATE_UINT32(dr_indicator, SpaprDrcPhysical), + VMSTATE_END_OF_LIST() + } +}; + +static void drc_physical_reset(void *opaque) +{ + SpaprDrc *drc = SPAPR_DR_CONNECTOR(opaque); + SpaprDrcPhysical *drcp = SPAPR_DRC_PHYSICAL(drc); + + if (drc->dev) { + drcp->dr_indicator = SPAPR_DR_INDICATOR_ACTIVE; + } else { + drcp->dr_indicator = SPAPR_DR_INDICATOR_INACTIVE; + } +} + +static void realize_physical(DeviceState *d, Error **errp) +{ + SpaprDrcPhysical *drcp = SPAPR_DRC_PHYSICAL(d); + Error *local_err = NULL; + + drc_realize(d, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + vmstate_register(VMSTATE_IF(drcp), + spapr_drc_index(SPAPR_DR_CONNECTOR(drcp)), + &vmstate_spapr_drc_physical, drcp); + qemu_register_reset(drc_physical_reset, drcp); +} + +static void unrealize_physical(DeviceState *d) +{ + SpaprDrcPhysical *drcp = SPAPR_DRC_PHYSICAL(d); + + drc_unrealize(d); + vmstate_unregister(VMSTATE_IF(drcp), &vmstate_spapr_drc_physical, drcp); + qemu_unregister_reset(drc_physical_reset, drcp); +} + +static void spapr_drc_physical_class_init(ObjectClass *k, void *data) +{ + DeviceClass *dk = DEVICE_CLASS(k); + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); + + dk->realize = realize_physical; + dk->unrealize = unrealize_physical; + drck->dr_entity_sense = physical_entity_sense; + drck->isolate = drc_isolate_physical; + drck->unisolate = drc_unisolate_physical; + drck->ready_state = SPAPR_DRC_STATE_PHYSICAL_CONFIGURED; + drck->empty_state = SPAPR_DRC_STATE_PHYSICAL_POWERON; +} + +static void spapr_drc_logical_class_init(ObjectClass *k, void *data) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); + + drck->dr_entity_sense = logical_entity_sense; + drck->isolate = drc_isolate_logical; + drck->unisolate = drc_unisolate_logical; + drck->ready_state = SPAPR_DRC_STATE_LOGICAL_CONFIGURED; + drck->empty_state = SPAPR_DRC_STATE_LOGICAL_UNUSABLE; +} + +static void spapr_drc_cpu_class_init(ObjectClass *k, void *data) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); + + drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_CPU; + drck->typename = "CPU"; + drck->drc_name_prefix = "CPU "; + drck->release = spapr_core_release; + drck->dt_populate = spapr_core_dt_populate; +} + +static void spapr_drc_pci_class_init(ObjectClass *k, void *data) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); + + drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_PCI; + drck->typename = "28"; + drck->drc_name_prefix = "C"; + drck->release = spapr_phb_remove_pci_device_cb; + drck->dt_populate = spapr_pci_dt_populate; +} + +static void spapr_drc_lmb_class_init(ObjectClass *k, void *data) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); + + drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_LMB; + drck->typename = "MEM"; + drck->drc_name_prefix = "LMB "; + drck->release = spapr_lmb_release; + drck->dt_populate = spapr_lmb_dt_populate; +} + +static void spapr_drc_phb_class_init(ObjectClass *k, void *data) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); + + drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_PHB; + drck->typename = "PHB"; + drck->drc_name_prefix = "PHB "; + drck->release = spapr_phb_release; + drck->dt_populate = spapr_phb_dt_populate; +} + +static void spapr_drc_pmem_class_init(ObjectClass *k, void *data) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); + + drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_PMEM; + drck->typename = "PMEM"; + drck->drc_name_prefix = "PMEM "; + drck->release = NULL; + drck->dt_populate = spapr_pmem_dt_populate; +} + +static const TypeInfo spapr_dr_connector_info = { + .name = TYPE_SPAPR_DR_CONNECTOR, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SpaprDrc), + .instance_init = spapr_dr_connector_instance_init, + .class_size = sizeof(SpaprDrcClass), + .class_init = spapr_dr_connector_class_init, + .abstract = true, +}; + +static const TypeInfo spapr_drc_physical_info = { + .name = TYPE_SPAPR_DRC_PHYSICAL, + .parent = TYPE_SPAPR_DR_CONNECTOR, + .instance_size = sizeof(SpaprDrcPhysical), + .class_init = spapr_drc_physical_class_init, + .abstract = true, +}; + +static const TypeInfo spapr_drc_logical_info = { + .name = TYPE_SPAPR_DRC_LOGICAL, + .parent = TYPE_SPAPR_DR_CONNECTOR, + .class_init = spapr_drc_logical_class_init, + .abstract = true, +}; + +static const TypeInfo spapr_drc_cpu_info = { + .name = TYPE_SPAPR_DRC_CPU, + .parent = TYPE_SPAPR_DRC_LOGICAL, + .class_init = spapr_drc_cpu_class_init, +}; + +static const TypeInfo spapr_drc_pci_info = { + .name = TYPE_SPAPR_DRC_PCI, + .parent = TYPE_SPAPR_DRC_PHYSICAL, + .class_init = spapr_drc_pci_class_init, +}; + +static const TypeInfo spapr_drc_lmb_info = { + .name = TYPE_SPAPR_DRC_LMB, + .parent = TYPE_SPAPR_DRC_LOGICAL, + .class_init = spapr_drc_lmb_class_init, +}; + +static const TypeInfo spapr_drc_phb_info = { + .name = TYPE_SPAPR_DRC_PHB, + .parent = TYPE_SPAPR_DRC_LOGICAL, + .instance_size = sizeof(SpaprDrc), + .class_init = spapr_drc_phb_class_init, +}; + +static const TypeInfo spapr_drc_pmem_info = { + .name = TYPE_SPAPR_DRC_PMEM, + .parent = TYPE_SPAPR_DRC_LOGICAL, + .class_init = spapr_drc_pmem_class_init, +}; + +/* helper functions for external users */ + +SpaprDrc *spapr_drc_by_index(uint32_t index) +{ + Object *obj; + gchar *name; + + name = g_strdup_printf("%s/%x", DRC_CONTAINER_PATH, index); + obj = object_resolve_path(name, NULL); + g_free(name); + + return !obj ? NULL : SPAPR_DR_CONNECTOR(obj); +} + +SpaprDrc *spapr_drc_by_id(const char *type, uint32_t id) +{ + SpaprDrcClass *drck + = SPAPR_DR_CONNECTOR_CLASS(object_class_by_name(type)); + + return spapr_drc_by_index(drck->typeshift << DRC_INDEX_TYPE_SHIFT + | (id & DRC_INDEX_ID_MASK)); +} + +/** + * spapr_dt_drc + * + * @fdt: libfdt device tree + * @path: path in the DT to generate properties + * @owner: parent Object/DeviceState for which to generate DRC + * descriptions for + * @drc_type_mask: mask of SpaprDrcType values corresponding + * to the types of DRCs to generate entries for + * + * generate OF properties to describe DRC topology/indices to guests + * + * as documented in PAPR+ v2.1, 13.5.2 + */ +int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask) +{ + Object *root_container; + ObjectProperty *prop; + ObjectPropertyIterator iter; + uint32_t drc_count = 0; + GArray *drc_indexes, *drc_power_domains; + GString *drc_names, *drc_types; + int ret; + + /* + * This should really be only called once per node since it overwrites + * the OF properties if they already exist. + */ + g_assert(!fdt_get_property(fdt, offset, "ibm,drc-indexes", NULL)); + + /* the first entry of each properties is a 32-bit integer encoding + * the number of elements in the array. we won't know this until + * we complete the iteration through all the matching DRCs, but + * reserve the space now and set the offsets accordingly so we + * can fill them in later. + */ + drc_indexes = g_array_new(false, true, sizeof(uint32_t)); + drc_indexes = g_array_set_size(drc_indexes, 1); + drc_power_domains = g_array_new(false, true, sizeof(uint32_t)); + drc_power_domains = g_array_set_size(drc_power_domains, 1); + drc_names = g_string_set_size(g_string_new(NULL), sizeof(uint32_t)); + drc_types = g_string_set_size(g_string_new(NULL), sizeof(uint32_t)); + + /* aliases for all DRConnector objects will be rooted in QOM + * composition tree at DRC_CONTAINER_PATH + */ + root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); + + object_property_iter_init(&iter, root_container); + while ((prop = object_property_iter_next(&iter))) { + Object *obj; + SpaprDrc *drc; + SpaprDrcClass *drck; + char *drc_name = NULL; + uint32_t drc_index, drc_power_domain; + + if (!strstart(prop->type, "link<", NULL)) { + continue; + } + + obj = object_property_get_link(root_container, prop->name, + &error_abort); + drc = SPAPR_DR_CONNECTOR(obj); + drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + + if (owner && (drc->owner != owner)) { + continue; + } + + if ((spapr_drc_type(drc) & drc_type_mask) == 0) { + continue; + } + + drc_count++; + + /* ibm,drc-indexes */ + drc_index = cpu_to_be32(spapr_drc_index(drc)); + g_array_append_val(drc_indexes, drc_index); + + /* ibm,drc-power-domains */ + drc_power_domain = cpu_to_be32(-1); + g_array_append_val(drc_power_domains, drc_power_domain); + + /* ibm,drc-names */ + drc_name = spapr_drc_name(drc); + drc_names = g_string_append(drc_names, drc_name); + drc_names = g_string_insert_len(drc_names, -1, "\0", 1); + g_free(drc_name); + + /* ibm,drc-types */ + drc_types = g_string_append(drc_types, drck->typename); + drc_types = g_string_insert_len(drc_types, -1, "\0", 1); + } + + /* now write the drc count into the space we reserved at the + * beginning of the arrays previously + */ + *(uint32_t *)drc_indexes->data = cpu_to_be32(drc_count); + *(uint32_t *)drc_power_domains->data = cpu_to_be32(drc_count); + *(uint32_t *)drc_names->str = cpu_to_be32(drc_count); + *(uint32_t *)drc_types->str = cpu_to_be32(drc_count); + + ret = fdt_setprop(fdt, offset, "ibm,drc-indexes", + drc_indexes->data, + drc_indexes->len * sizeof(uint32_t)); + if (ret) { + error_report("Couldn't create ibm,drc-indexes property"); + goto out; + } + + ret = fdt_setprop(fdt, offset, "ibm,drc-power-domains", + drc_power_domains->data, + drc_power_domains->len * sizeof(uint32_t)); + if (ret) { + error_report("Couldn't finalize ibm,drc-power-domains property"); + goto out; + } + + ret = fdt_setprop(fdt, offset, "ibm,drc-names", + drc_names->str, drc_names->len); + if (ret) { + error_report("Couldn't finalize ibm,drc-names property"); + goto out; + } + + ret = fdt_setprop(fdt, offset, "ibm,drc-types", + drc_types->str, drc_types->len); + if (ret) { + error_report("Couldn't finalize ibm,drc-types property"); + goto out; + } + +out: + g_array_free(drc_indexes, true); + g_array_free(drc_power_domains, true); + g_string_free(drc_names, true); + g_string_free(drc_types, true); + + return ret; +} + +void spapr_drc_reset_all(SpaprMachineState *spapr) +{ + Object *drc_container; + ObjectProperty *prop; + ObjectPropertyIterator iter; + + drc_container = container_get(object_get_root(), DRC_CONTAINER_PATH); +restart: + object_property_iter_init(&iter, drc_container); + while ((prop = object_property_iter_next(&iter))) { + SpaprDrc *drc; + + if (!strstart(prop->type, "link<", NULL)) { + continue; + } + drc = SPAPR_DR_CONNECTOR(object_property_get_link(drc_container, + prop->name, + &error_abort)); + + /* + * This will complete any pending plug/unplug requests. + * In case of a unplugged PHB or PCI bridge, this will + * cause some DRCs to be destroyed and thus potentially + * invalidate the iterator. + */ + if (spapr_drc_reset(drc)) { + goto restart; + } + } +} + +/* + * RTAS calls + */ + +static uint32_t rtas_set_isolation_state(uint32_t idx, uint32_t state) +{ + SpaprDrc *drc = spapr_drc_by_index(idx); + SpaprDrcClass *drck; + + if (!drc) { + return RTAS_OUT_NO_SUCH_INDICATOR; + } + + trace_spapr_drc_set_isolation_state(spapr_drc_index(drc), state); + + drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + + switch (state) { + case SPAPR_DR_ISOLATION_STATE_ISOLATED: + return drck->isolate(drc); + + case SPAPR_DR_ISOLATION_STATE_UNISOLATED: + return drck->unisolate(drc); + + default: + return RTAS_OUT_PARAM_ERROR; + } +} + +static uint32_t rtas_set_allocation_state(uint32_t idx, uint32_t state) +{ + SpaprDrc *drc = spapr_drc_by_index(idx); + + if (!drc || !object_dynamic_cast(OBJECT(drc), TYPE_SPAPR_DRC_LOGICAL)) { + return RTAS_OUT_NO_SUCH_INDICATOR; + } + + trace_spapr_drc_set_allocation_state(spapr_drc_index(drc), state); + + switch (state) { + case SPAPR_DR_ALLOCATION_STATE_USABLE: + return drc_set_usable(drc); + + case SPAPR_DR_ALLOCATION_STATE_UNUSABLE: + return drc_set_unusable(drc); + + default: + return RTAS_OUT_PARAM_ERROR; + } +} + +static uint32_t rtas_set_dr_indicator(uint32_t idx, uint32_t state) +{ + SpaprDrc *drc = spapr_drc_by_index(idx); + + if (!drc || !object_dynamic_cast(OBJECT(drc), TYPE_SPAPR_DRC_PHYSICAL)) { + return RTAS_OUT_NO_SUCH_INDICATOR; + } + if ((state != SPAPR_DR_INDICATOR_INACTIVE) + && (state != SPAPR_DR_INDICATOR_ACTIVE) + && (state != SPAPR_DR_INDICATOR_IDENTIFY) + && (state != SPAPR_DR_INDICATOR_ACTION)) { + return RTAS_OUT_PARAM_ERROR; /* bad state parameter */ + } + + trace_spapr_drc_set_dr_indicator(idx, state); + SPAPR_DRC_PHYSICAL(drc)->dr_indicator = state; + return RTAS_OUT_SUCCESS; +} + +static void rtas_set_indicator(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, + uint32_t nargs, target_ulong args, + uint32_t nret, target_ulong rets) +{ + uint32_t type, idx, state; + uint32_t ret = RTAS_OUT_SUCCESS; + + if (nargs != 3 || nret != 1) { + ret = RTAS_OUT_PARAM_ERROR; + goto out; + } + + type = rtas_ld(args, 0); + idx = rtas_ld(args, 1); + state = rtas_ld(args, 2); + + switch (type) { + case RTAS_SENSOR_TYPE_ISOLATION_STATE: + ret = rtas_set_isolation_state(idx, state); + break; + case RTAS_SENSOR_TYPE_DR: + ret = rtas_set_dr_indicator(idx, state); + break; + case RTAS_SENSOR_TYPE_ALLOCATION_STATE: + ret = rtas_set_allocation_state(idx, state); + break; + default: + ret = RTAS_OUT_NOT_SUPPORTED; + } + +out: + rtas_st(rets, 0, ret); +} + +static void rtas_get_sensor_state(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + uint32_t sensor_type; + uint32_t sensor_index; + uint32_t sensor_state = 0; + SpaprDrc *drc; + SpaprDrcClass *drck; + uint32_t ret = RTAS_OUT_SUCCESS; + + if (nargs != 2 || nret != 2) { + ret = RTAS_OUT_PARAM_ERROR; + goto out; + } + + sensor_type = rtas_ld(args, 0); + sensor_index = rtas_ld(args, 1); + + if (sensor_type != RTAS_SENSOR_TYPE_ENTITY_SENSE) { + /* currently only DR-related sensors are implemented */ + trace_spapr_rtas_get_sensor_state_not_supported(sensor_index, + sensor_type); + ret = RTAS_OUT_NOT_SUPPORTED; + goto out; + } + + drc = spapr_drc_by_index(sensor_index); + if (!drc) { + trace_spapr_rtas_get_sensor_state_invalid(sensor_index); + ret = RTAS_OUT_PARAM_ERROR; + goto out; + } + drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + sensor_state = drck->dr_entity_sense(drc); + +out: + rtas_st(rets, 0, ret); + rtas_st(rets, 1, sensor_state); +} + +/* configure-connector work area offsets, int32_t units for field + * indexes, bytes for field offset/len values. + * + * as documented by PAPR+ v2.7, 13.5.3.5 + */ +#define CC_IDX_NODE_NAME_OFFSET 2 +#define CC_IDX_PROP_NAME_OFFSET 2 +#define CC_IDX_PROP_LEN 3 +#define CC_IDX_PROP_DATA_OFFSET 4 +#define CC_VAL_DATA_OFFSET ((CC_IDX_PROP_DATA_OFFSET + 1) * 4) +#define CC_WA_LEN 4096 + +static void configure_connector_st(target_ulong addr, target_ulong offset, + const void *buf, size_t len) +{ + cpu_physical_memory_write(ppc64_phys_to_real(addr + offset), + buf, MIN(len, CC_WA_LEN - offset)); +} + +static void rtas_ibm_configure_connector(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + uint64_t wa_addr; + uint64_t wa_offset; + uint32_t drc_index; + SpaprDrc *drc; + SpaprDrcClass *drck; + SpaprDRCCResponse resp = SPAPR_DR_CC_RESPONSE_CONTINUE; + int rc; + + if (nargs != 2 || nret != 1) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + wa_addr = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 0); + + drc_index = rtas_ld(wa_addr, 0); + drc = spapr_drc_by_index(drc_index); + if (!drc) { + trace_spapr_rtas_ibm_configure_connector_invalid(drc_index); + rc = RTAS_OUT_PARAM_ERROR; + goto out; + } + + if ((drc->state != SPAPR_DRC_STATE_LOGICAL_UNISOLATE) + && (drc->state != SPAPR_DRC_STATE_PHYSICAL_UNISOLATE) + && (drc->state != SPAPR_DRC_STATE_LOGICAL_CONFIGURED) + && (drc->state != SPAPR_DRC_STATE_PHYSICAL_CONFIGURED)) { + /* + * Need to unisolate the device before configuring + * or it should already be in configured state to + * allow configure-connector be called repeatedly. + */ + rc = SPAPR_DR_CC_RESPONSE_NOT_CONFIGURABLE; + goto out; + } + + drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + + /* + * This indicates that the kernel is reconfiguring a LMB due to + * a failed hotunplug. Rollback the DIMM unplug process. + */ + if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_LMB && + drc->unplug_requested) { + spapr_memory_unplug_rollback(spapr, drc->dev); + } + + if (!drc->fdt) { + void *fdt; + int fdt_size; + + fdt = create_device_tree(&fdt_size); + + if (drck->dt_populate(drc, spapr, fdt, &drc->fdt_start_offset, + NULL)) { + g_free(fdt); + rc = SPAPR_DR_CC_RESPONSE_ERROR; + goto out; + } + + drc->fdt = fdt; + drc->ccs_offset = drc->fdt_start_offset; + drc->ccs_depth = 0; + } + + do { + uint32_t tag; + const char *name; + const struct fdt_property *prop; + int fdt_offset_next, prop_len; + + tag = fdt_next_tag(drc->fdt, drc->ccs_offset, &fdt_offset_next); + + switch (tag) { + case FDT_BEGIN_NODE: + drc->ccs_depth++; + name = fdt_get_name(drc->fdt, drc->ccs_offset, NULL); + + /* provide the name of the next OF node */ + wa_offset = CC_VAL_DATA_OFFSET; + rtas_st(wa_addr, CC_IDX_NODE_NAME_OFFSET, wa_offset); + configure_connector_st(wa_addr, wa_offset, name, strlen(name) + 1); + resp = SPAPR_DR_CC_RESPONSE_NEXT_CHILD; + break; + case FDT_END_NODE: + drc->ccs_depth--; + if (drc->ccs_depth == 0) { + uint32_t drc_index = spapr_drc_index(drc); + + /* done sending the device tree, move to configured state */ + trace_spapr_drc_set_configured(drc_index); + drc->state = drck->ready_state; + /* + * Ensure that we are able to send the FDT fragment + * again via configure-connector call if the guest requests. + */ + drc->ccs_offset = drc->fdt_start_offset; + drc->ccs_depth = 0; + fdt_offset_next = drc->fdt_start_offset; + resp = SPAPR_DR_CC_RESPONSE_SUCCESS; + } else { + resp = SPAPR_DR_CC_RESPONSE_PREV_PARENT; + } + break; + case FDT_PROP: + prop = fdt_get_property_by_offset(drc->fdt, drc->ccs_offset, + &prop_len); + name = fdt_string(drc->fdt, fdt32_to_cpu(prop->nameoff)); + + /* provide the name of the next OF property */ + wa_offset = CC_VAL_DATA_OFFSET; + rtas_st(wa_addr, CC_IDX_PROP_NAME_OFFSET, wa_offset); + configure_connector_st(wa_addr, wa_offset, name, strlen(name) + 1); + + /* provide the length and value of the OF property. data gets + * placed immediately after NULL terminator of the OF property's + * name string + */ + wa_offset += strlen(name) + 1, + rtas_st(wa_addr, CC_IDX_PROP_LEN, prop_len); + rtas_st(wa_addr, CC_IDX_PROP_DATA_OFFSET, wa_offset); + configure_connector_st(wa_addr, wa_offset, prop->data, prop_len); + resp = SPAPR_DR_CC_RESPONSE_NEXT_PROPERTY; + break; + case FDT_END: + resp = SPAPR_DR_CC_RESPONSE_ERROR; + default: + /* keep seeking for an actionable tag */ + break; + } + if (drc->ccs_offset >= 0) { + drc->ccs_offset = fdt_offset_next; + } + } while (resp == SPAPR_DR_CC_RESPONSE_CONTINUE); + + rc = resp; +out: + rtas_st(rets, 0, rc); +} + +static void spapr_drc_register_types(void) +{ + type_register_static(&spapr_dr_connector_info); + type_register_static(&spapr_drc_physical_info); + type_register_static(&spapr_drc_logical_info); + type_register_static(&spapr_drc_cpu_info); + type_register_static(&spapr_drc_pci_info); + type_register_static(&spapr_drc_lmb_info); + type_register_static(&spapr_drc_phb_info); + type_register_static(&spapr_drc_pmem_info); + + spapr_rtas_register(RTAS_SET_INDICATOR, "set-indicator", + rtas_set_indicator); + spapr_rtas_register(RTAS_GET_SENSOR_STATE, "get-sensor-state", + rtas_get_sensor_state); + spapr_rtas_register(RTAS_IBM_CONFIGURE_CONNECTOR, "ibm,configure-connector", + rtas_ibm_configure_connector); +} +type_init(spapr_drc_register_types) diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c new file mode 100644 index 000000000..630e86282 --- /dev/null +++ b/hw/ppc/spapr_events.c @@ -0,0 +1,1082 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * RTAS events handling + * + * Copyright (c) 2012 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "sysemu/device_tree.h" +#include "sysemu/runstate.h" + +#include "hw/ppc/fdt.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" +#include "hw/pci/pci.h" +#include "hw/irq.h" +#include "hw/pci-host/spapr.h" +#include "hw/ppc/spapr_drc.h" +#include "qemu/help_option.h" +#include "qemu/bcd.h" +#include "qemu/main-loop.h" +#include "hw/ppc/spapr_ovec.h" +#include +#include "migration/blocker.h" + +#define RTAS_LOG_VERSION_MASK 0xff000000 +#define RTAS_LOG_VERSION_6 0x06000000 +#define RTAS_LOG_SEVERITY_MASK 0x00e00000 +#define RTAS_LOG_SEVERITY_ALREADY_REPORTED 0x00c00000 +#define RTAS_LOG_SEVERITY_FATAL 0x00a00000 +#define RTAS_LOG_SEVERITY_ERROR 0x00800000 +#define RTAS_LOG_SEVERITY_ERROR_SYNC 0x00600000 +#define RTAS_LOG_SEVERITY_WARNING 0x00400000 +#define RTAS_LOG_SEVERITY_EVENT 0x00200000 +#define RTAS_LOG_SEVERITY_NO_ERROR 0x00000000 +#define RTAS_LOG_DISPOSITION_MASK 0x00180000 +#define RTAS_LOG_DISPOSITION_FULLY_RECOVERED 0x00000000 +#define RTAS_LOG_DISPOSITION_LIMITED_RECOVERY 0x00080000 +#define RTAS_LOG_DISPOSITION_NOT_RECOVERED 0x00100000 +#define RTAS_LOG_OPTIONAL_PART_PRESENT 0x00040000 +#define RTAS_LOG_INITIATOR_MASK 0x0000f000 +#define RTAS_LOG_INITIATOR_UNKNOWN 0x00000000 +#define RTAS_LOG_INITIATOR_CPU 0x00001000 +#define RTAS_LOG_INITIATOR_PCI 0x00002000 +#define RTAS_LOG_INITIATOR_MEMORY 0x00004000 +#define RTAS_LOG_INITIATOR_HOTPLUG 0x00006000 +#define RTAS_LOG_TARGET_MASK 0x00000f00 +#define RTAS_LOG_TARGET_UNKNOWN 0x00000000 +#define RTAS_LOG_TARGET_CPU 0x00000100 +#define RTAS_LOG_TARGET_PCI 0x00000200 +#define RTAS_LOG_TARGET_MEMORY 0x00000400 +#define RTAS_LOG_TARGET_HOTPLUG 0x00000600 +#define RTAS_LOG_TYPE_MASK 0x000000ff +#define RTAS_LOG_TYPE_OTHER 0x00000000 +#define RTAS_LOG_TYPE_RETRY 0x00000001 +#define RTAS_LOG_TYPE_TCE_ERR 0x00000002 +#define RTAS_LOG_TYPE_INTERN_DEV_FAIL 0x00000003 +#define RTAS_LOG_TYPE_TIMEOUT 0x00000004 +#define RTAS_LOG_TYPE_DATA_PARITY 0x00000005 +#define RTAS_LOG_TYPE_ADDR_PARITY 0x00000006 +#define RTAS_LOG_TYPE_CACHE_PARITY 0x00000007 +#define RTAS_LOG_TYPE_ADDR_INVALID 0x00000008 +#define RTAS_LOG_TYPE_ECC_UNCORR 0x00000009 +#define RTAS_LOG_TYPE_ECC_CORR 0x0000000a +#define RTAS_LOG_TYPE_EPOW 0x00000040 +#define RTAS_LOG_TYPE_HOTPLUG 0x000000e5 + +struct rtas_error_log { + uint32_t summary; + uint32_t extended_length; +} QEMU_PACKED; + +struct rtas_event_log_v6 { + uint8_t b0; +#define RTAS_LOG_V6_B0_VALID 0x80 +#define RTAS_LOG_V6_B0_UNRECOVERABLE_ERROR 0x40 +#define RTAS_LOG_V6_B0_RECOVERABLE_ERROR 0x20 +#define RTAS_LOG_V6_B0_DEGRADED_OPERATION 0x10 +#define RTAS_LOG_V6_B0_PREDICTIVE_ERROR 0x08 +#define RTAS_LOG_V6_B0_NEW_LOG 0x04 +#define RTAS_LOG_V6_B0_BIGENDIAN 0x02 + uint8_t _resv1; + uint8_t b2; +#define RTAS_LOG_V6_B2_POWERPC_FORMAT 0x80 +#define RTAS_LOG_V6_B2_LOG_FORMAT_MASK 0x0f +#define RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT 0x0e + uint8_t _resv2[9]; + uint32_t company; +#define RTAS_LOG_V6_COMPANY_IBM 0x49424d00 /* IBM */ +} QEMU_PACKED; + +struct rtas_event_log_v6_section_header { + uint16_t section_id; + uint16_t section_length; + uint8_t section_version; + uint8_t section_subtype; + uint16_t creator_component_id; +} QEMU_PACKED; + +struct rtas_event_log_v6_maina { +#define RTAS_LOG_V6_SECTION_ID_MAINA 0x5048 /* PH */ + struct rtas_event_log_v6_section_header hdr; + uint32_t creation_date; /* BCD: YYYYMMDD */ + uint32_t creation_time; /* BCD: HHMMSS00 */ + uint8_t _platform1[8]; + char creator_id; + uint8_t _resv1[2]; + uint8_t section_count; + uint8_t _resv2[4]; + uint8_t _platform2[8]; + uint32_t plid; + uint8_t _platform3[4]; +} QEMU_PACKED; + +struct rtas_event_log_v6_mainb { +#define RTAS_LOG_V6_SECTION_ID_MAINB 0x5548 /* UH */ + struct rtas_event_log_v6_section_header hdr; + uint8_t subsystem_id; + uint8_t _platform1; + uint8_t event_severity; + uint8_t event_subtype; + uint8_t _platform2[4]; + uint8_t _resv1[2]; + uint16_t action_flags; + uint8_t _resv2[4]; +} QEMU_PACKED; + +struct rtas_event_log_v6_epow { +#define RTAS_LOG_V6_SECTION_ID_EPOW 0x4550 /* EP */ + struct rtas_event_log_v6_section_header hdr; + uint8_t sensor_value; +#define RTAS_LOG_V6_EPOW_ACTION_RESET 0 +#define RTAS_LOG_V6_EPOW_ACTION_WARN_COOLING 1 +#define RTAS_LOG_V6_EPOW_ACTION_WARN_POWER 2 +#define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN 3 +#define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_HALT 4 +#define RTAS_LOG_V6_EPOW_ACTION_MAIN_ENCLOSURE 5 +#define RTAS_LOG_V6_EPOW_ACTION_POWER_OFF 7 + uint8_t event_modifier; +#define RTAS_LOG_V6_EPOW_MODIFIER_NORMAL 1 +#define RTAS_LOG_V6_EPOW_MODIFIER_ON_UPS 2 +#define RTAS_LOG_V6_EPOW_MODIFIER_CRITICAL 3 +#define RTAS_LOG_V6_EPOW_MODIFIER_TEMPERATURE 4 + uint8_t extended_modifier; +#define RTAS_LOG_V6_EPOW_XMODIFIER_SYSTEM_WIDE 0 +#define RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC 1 + uint8_t _resv; + uint64_t reason_code; +} QEMU_PACKED; + +struct epow_extended_log { + struct rtas_event_log_v6 v6hdr; + struct rtas_event_log_v6_maina maina; + struct rtas_event_log_v6_mainb mainb; + struct rtas_event_log_v6_epow epow; +} QEMU_PACKED; + +union drc_identifier { + uint32_t index; + uint32_t count; + struct { + uint32_t count; + uint32_t index; + } count_indexed; + char name[1]; +} QEMU_PACKED; + +struct rtas_event_log_v6_hp { +#define RTAS_LOG_V6_SECTION_ID_HOTPLUG 0x4850 /* HP */ + struct rtas_event_log_v6_section_header hdr; + uint8_t hotplug_type; +#define RTAS_LOG_V6_HP_TYPE_CPU 1 +#define RTAS_LOG_V6_HP_TYPE_MEMORY 2 +#define RTAS_LOG_V6_HP_TYPE_SLOT 3 +#define RTAS_LOG_V6_HP_TYPE_PHB 4 +#define RTAS_LOG_V6_HP_TYPE_PCI 5 +#define RTAS_LOG_V6_HP_TYPE_PMEM 6 + uint8_t hotplug_action; +#define RTAS_LOG_V6_HP_ACTION_ADD 1 +#define RTAS_LOG_V6_HP_ACTION_REMOVE 2 + uint8_t hotplug_identifier; +#define RTAS_LOG_V6_HP_ID_DRC_NAME 1 +#define RTAS_LOG_V6_HP_ID_DRC_INDEX 2 +#define RTAS_LOG_V6_HP_ID_DRC_COUNT 3 +#define RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED 4 + uint8_t reserved; + union drc_identifier drc_id; +} QEMU_PACKED; + +struct hp_extended_log { + struct rtas_event_log_v6 v6hdr; + struct rtas_event_log_v6_maina maina; + struct rtas_event_log_v6_mainb mainb; + struct rtas_event_log_v6_hp hp; +} QEMU_PACKED; + +struct rtas_event_log_v6_mc { +#define RTAS_LOG_V6_SECTION_ID_MC 0x4D43 /* MC */ + struct rtas_event_log_v6_section_header hdr; + uint32_t fru_id; + uint32_t proc_id; + uint8_t error_type; +#define RTAS_LOG_V6_MC_TYPE_UE 0 +#define RTAS_LOG_V6_MC_TYPE_SLB 1 +#define RTAS_LOG_V6_MC_TYPE_ERAT 2 +#define RTAS_LOG_V6_MC_TYPE_TLB 4 +#define RTAS_LOG_V6_MC_TYPE_D_CACHE 5 +#define RTAS_LOG_V6_MC_TYPE_I_CACHE 7 + uint8_t sub_err_type; +#define RTAS_LOG_V6_MC_UE_INDETERMINATE 0 +#define RTAS_LOG_V6_MC_UE_IFETCH 1 +#define RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_IFETCH 2 +#define RTAS_LOG_V6_MC_UE_LOAD_STORE 3 +#define RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_LOAD_STORE 4 +#define RTAS_LOG_V6_MC_SLB_PARITY 0 +#define RTAS_LOG_V6_MC_SLB_MULTIHIT 1 +#define RTAS_LOG_V6_MC_SLB_INDETERMINATE 2 +#define RTAS_LOG_V6_MC_ERAT_PARITY 1 +#define RTAS_LOG_V6_MC_ERAT_MULTIHIT 2 +#define RTAS_LOG_V6_MC_ERAT_INDETERMINATE 3 +#define RTAS_LOG_V6_MC_TLB_PARITY 1 +#define RTAS_LOG_V6_MC_TLB_MULTIHIT 2 +#define RTAS_LOG_V6_MC_TLB_INDETERMINATE 3 +/* + * Per PAPR, + * For UE error type, set bit 1 of sub_err_type to indicate effective addr is + * provided. For other error types (SLB/ERAT/TLB), set bit 0 to indicate + * same. + */ +#define RTAS_LOG_V6_MC_UE_EA_ADDR_PROVIDED 0x40 +#define RTAS_LOG_V6_MC_EA_ADDR_PROVIDED 0x80 + uint8_t reserved_1[6]; + uint64_t effective_address; + uint64_t logical_address; +} QEMU_PACKED; + +struct mc_extended_log { + struct rtas_event_log_v6 v6hdr; + struct rtas_event_log_v6_mc mc; +} QEMU_PACKED; + +struct MC_ierror_table { + unsigned long srr1_mask; + unsigned long srr1_value; + bool nip_valid; /* nip is a valid indicator of faulting address */ + uint8_t error_type; + uint8_t error_subtype; + unsigned int initiator; + unsigned int severity; +}; + +static const struct MC_ierror_table mc_ierror_table[] = { +{ 0x00000000081c0000, 0x0000000000040000, true, + RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_IFETCH, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, }, +{ 0x00000000081c0000, 0x0000000000080000, true, + RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_PARITY, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, }, +{ 0x00000000081c0000, 0x00000000000c0000, true, + RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_MULTIHIT, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, }, +{ 0x00000000081c0000, 0x0000000000100000, true, + RTAS_LOG_V6_MC_TYPE_ERAT, RTAS_LOG_V6_MC_ERAT_MULTIHIT, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, }, +{ 0x00000000081c0000, 0x0000000000140000, true, + RTAS_LOG_V6_MC_TYPE_TLB, RTAS_LOG_V6_MC_TLB_MULTIHIT, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, }, +{ 0x00000000081c0000, 0x0000000000180000, true, + RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_IFETCH, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, } }; + +struct MC_derror_table { + unsigned long dsisr_value; + bool dar_valid; /* dar is a valid indicator of faulting address */ + uint8_t error_type; + uint8_t error_subtype; + unsigned int initiator; + unsigned int severity; +}; + +static const struct MC_derror_table mc_derror_table[] = { +{ 0x00008000, false, + RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_LOAD_STORE, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, }, +{ 0x00004000, true, + RTAS_LOG_V6_MC_TYPE_UE, RTAS_LOG_V6_MC_UE_PAGE_TABLE_WALK_LOAD_STORE, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, }, +{ 0x00000800, true, + RTAS_LOG_V6_MC_TYPE_ERAT, RTAS_LOG_V6_MC_ERAT_MULTIHIT, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, }, +{ 0x00000400, true, + RTAS_LOG_V6_MC_TYPE_TLB, RTAS_LOG_V6_MC_TLB_MULTIHIT, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, }, +{ 0x00000080, true, + RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_MULTIHIT, /* Before PARITY */ + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, }, +{ 0x00000100, true, + RTAS_LOG_V6_MC_TYPE_SLB, RTAS_LOG_V6_MC_SLB_PARITY, + RTAS_LOG_INITIATOR_CPU, RTAS_LOG_SEVERITY_ERROR_SYNC, } }; + +#define SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42)) + +typedef enum EventClass { + EVENT_CLASS_INTERNAL_ERRORS = 0, + EVENT_CLASS_EPOW = 1, + EVENT_CLASS_RESERVED = 2, + EVENT_CLASS_HOT_PLUG = 3, + EVENT_CLASS_IO = 4, + EVENT_CLASS_MAX +} EventClassIndex; +#define EVENT_CLASS_MASK(index) (1 << (31 - index)) + +static const char * const event_names[EVENT_CLASS_MAX] = { + [EVENT_CLASS_INTERNAL_ERRORS] = "internal-errors", + [EVENT_CLASS_EPOW] = "epow-events", + [EVENT_CLASS_HOT_PLUG] = "hot-plug-events", + [EVENT_CLASS_IO] = "ibm,io-events", +}; + +struct SpaprEventSource { + int irq; + uint32_t mask; + bool enabled; +}; + +static SpaprEventSource *spapr_event_sources_new(void) +{ + return g_new0(SpaprEventSource, EVENT_CLASS_MAX); +} + +static void spapr_event_sources_register(SpaprEventSource *event_sources, + EventClassIndex index, int irq) +{ + /* we only support 1 irq per event class at the moment */ + g_assert(event_sources); + g_assert(!event_sources[index].enabled); + event_sources[index].irq = irq; + event_sources[index].mask = EVENT_CLASS_MASK(index); + event_sources[index].enabled = true; +} + +static const SpaprEventSource * +spapr_event_sources_get_source(SpaprEventSource *event_sources, + EventClassIndex index) +{ + g_assert(index < EVENT_CLASS_MAX); + g_assert(event_sources); + + return &event_sources[index]; +} + +void spapr_dt_events(SpaprMachineState *spapr, void *fdt) +{ + uint32_t irq_ranges[EVENT_CLASS_MAX * 2]; + int i, count = 0, event_sources; + SpaprEventSource *events = spapr->event_sources; + + g_assert(events); + + _FDT(event_sources = fdt_add_subnode(fdt, 0, "event-sources")); + + for (i = 0, count = 0; i < EVENT_CLASS_MAX; i++) { + int node_offset; + uint32_t interrupts[2]; + const SpaprEventSource *source = + spapr_event_sources_get_source(events, i); + const char *source_name = event_names[i]; + + if (!source->enabled) { + continue; + } + + spapr_dt_irq(interrupts, source->irq, false); + + _FDT(node_offset = fdt_add_subnode(fdt, event_sources, source_name)); + _FDT(fdt_setprop(fdt, node_offset, "interrupts", interrupts, + sizeof(interrupts))); + + irq_ranges[count++] = interrupts[0]; + irq_ranges[count++] = cpu_to_be32(1); + } + + _FDT((fdt_setprop(fdt, event_sources, "interrupt-controller", NULL, 0))); + _FDT((fdt_setprop_cell(fdt, event_sources, "#interrupt-cells", 2))); + _FDT((fdt_setprop(fdt, event_sources, "interrupt-ranges", + irq_ranges, count * sizeof(uint32_t)))); +} + +static const SpaprEventSource * +rtas_event_log_to_source(SpaprMachineState *spapr, int log_type) +{ + const SpaprEventSource *source; + + g_assert(spapr->event_sources); + + switch (log_type) { + case RTAS_LOG_TYPE_HOTPLUG: + source = spapr_event_sources_get_source(spapr->event_sources, + EVENT_CLASS_HOT_PLUG); + if (spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT)) { + g_assert(source->enabled); + break; + } + /* fall through back to epow for legacy hotplug interrupt source */ + case RTAS_LOG_TYPE_EPOW: + source = spapr_event_sources_get_source(spapr->event_sources, + EVENT_CLASS_EPOW); + break; + default: + source = NULL; + } + + return source; +} + +static int rtas_event_log_to_irq(SpaprMachineState *spapr, int log_type) +{ + const SpaprEventSource *source; + + source = rtas_event_log_to_source(spapr, log_type); + g_assert(source); + g_assert(source->enabled); + + return source->irq; +} + +static uint32_t spapr_event_log_entry_type(SpaprEventLogEntry *entry) +{ + return entry->summary & RTAS_LOG_TYPE_MASK; +} + +static void rtas_event_log_queue(SpaprMachineState *spapr, + SpaprEventLogEntry *entry) +{ + QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next); +} + +static SpaprEventLogEntry *rtas_event_log_dequeue(SpaprMachineState *spapr, + uint32_t event_mask) +{ + SpaprEventLogEntry *entry = NULL; + + QTAILQ_FOREACH(entry, &spapr->pending_events, next) { + const SpaprEventSource *source = + rtas_event_log_to_source(spapr, + spapr_event_log_entry_type(entry)); + + g_assert(source); + if (source->mask & event_mask) { + break; + } + } + + if (entry) { + QTAILQ_REMOVE(&spapr->pending_events, entry, next); + } + + return entry; +} + +static bool rtas_event_log_contains(SpaprMachineState *spapr, uint32_t event_mask) +{ + SpaprEventLogEntry *entry = NULL; + + QTAILQ_FOREACH(entry, &spapr->pending_events, next) { + const SpaprEventSource *source = + rtas_event_log_to_source(spapr, + spapr_event_log_entry_type(entry)); + + if (source->mask & event_mask) { + return true; + } + } + + return false; +} + +static uint32_t next_plid; + +static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr) +{ + v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG + | RTAS_LOG_V6_B0_BIGENDIAN; + v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT + | RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT; + v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM); +} + +static void spapr_init_maina(SpaprMachineState *spapr, + struct rtas_event_log_v6_maina *maina, + int section_count) +{ + struct tm tm; + int year; + + maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA); + maina->hdr.section_length = cpu_to_be16(sizeof(*maina)); + /* FIXME: section version, subtype and creator id? */ + spapr_rtc_read(&spapr->rtc, &tm, NULL); + year = tm.tm_year + 1900; + maina->creation_date = cpu_to_be32((to_bcd(year / 100) << 24) + | (to_bcd(year % 100) << 16) + | (to_bcd(tm.tm_mon + 1) << 8) + | to_bcd(tm.tm_mday)); + maina->creation_time = cpu_to_be32((to_bcd(tm.tm_hour) << 24) + | (to_bcd(tm.tm_min) << 16) + | (to_bcd(tm.tm_sec) << 8)); + maina->creator_id = 'H'; /* Hypervisor */ + maina->section_count = section_count; + maina->plid = next_plid++; +} + +static void spapr_powerdown_req(Notifier *n, void *opaque) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + SpaprEventLogEntry *entry; + struct rtas_event_log_v6 *v6hdr; + struct rtas_event_log_v6_maina *maina; + struct rtas_event_log_v6_mainb *mainb; + struct rtas_event_log_v6_epow *epow; + struct epow_extended_log *new_epow; + + entry = g_new(SpaprEventLogEntry, 1); + new_epow = g_malloc0(sizeof(*new_epow)); + entry->extended_log = new_epow; + + v6hdr = &new_epow->v6hdr; + maina = &new_epow->maina; + mainb = &new_epow->mainb; + epow = &new_epow->epow; + + entry->summary = RTAS_LOG_VERSION_6 + | RTAS_LOG_SEVERITY_EVENT + | RTAS_LOG_DISPOSITION_NOT_RECOVERED + | RTAS_LOG_OPTIONAL_PART_PRESENT + | RTAS_LOG_TYPE_EPOW; + entry->extended_length = sizeof(*new_epow); + + spapr_init_v6hdr(v6hdr); + spapr_init_maina(spapr, maina, 3 /* Main-A, Main-B and EPOW */); + + mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB); + mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb)); + /* FIXME: section version, subtype and creator id? */ + mainb->subsystem_id = 0xa0; /* External environment */ + mainb->event_severity = 0x00; /* Informational / non-error */ + mainb->event_subtype = 0xd0; /* Normal shutdown */ + + epow->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_EPOW); + epow->hdr.section_length = cpu_to_be16(sizeof(*epow)); + epow->hdr.section_version = 2; /* includes extended modifier */ + /* FIXME: section subtype and creator id? */ + epow->sensor_value = RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN; + epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL; + epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC; + + rtas_event_log_queue(spapr, entry); + + qemu_irq_pulse(spapr_qirq(spapr, + rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_EPOW))); +} + +static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action, + SpaprDrcType drc_type, + union drc_identifier *drc_id) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + SpaprEventLogEntry *entry; + struct hp_extended_log *new_hp; + struct rtas_event_log_v6 *v6hdr; + struct rtas_event_log_v6_maina *maina; + struct rtas_event_log_v6_mainb *mainb; + struct rtas_event_log_v6_hp *hp; + + entry = g_new(SpaprEventLogEntry, 1); + new_hp = g_malloc0(sizeof(struct hp_extended_log)); + entry->extended_log = new_hp; + + v6hdr = &new_hp->v6hdr; + maina = &new_hp->maina; + mainb = &new_hp->mainb; + hp = &new_hp->hp; + + entry->summary = RTAS_LOG_VERSION_6 + | RTAS_LOG_SEVERITY_EVENT + | RTAS_LOG_DISPOSITION_NOT_RECOVERED + | RTAS_LOG_OPTIONAL_PART_PRESENT + | RTAS_LOG_INITIATOR_HOTPLUG + | RTAS_LOG_TYPE_HOTPLUG; + entry->extended_length = sizeof(*new_hp); + + spapr_init_v6hdr(v6hdr); + spapr_init_maina(spapr, maina, 3 /* Main-A, Main-B, HP */); + + mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB); + mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb)); + mainb->subsystem_id = 0x80; /* External environment */ + mainb->event_severity = 0x00; /* Informational / non-error */ + mainb->event_subtype = 0x00; /* Normal shutdown */ + + hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG); + hp->hdr.section_length = cpu_to_be16(sizeof(*hp)); + hp->hdr.section_version = 1; /* includes extended modifier */ + hp->hotplug_action = hp_action; + hp->hotplug_identifier = hp_id; + + switch (drc_type) { + case SPAPR_DR_CONNECTOR_TYPE_PCI: + hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI; + break; + case SPAPR_DR_CONNECTOR_TYPE_LMB: + hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY; + break; + case SPAPR_DR_CONNECTOR_TYPE_CPU: + hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU; + break; + case SPAPR_DR_CONNECTOR_TYPE_PHB: + hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PHB; + break; + case SPAPR_DR_CONNECTOR_TYPE_PMEM: + hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PMEM; + break; + default: + /* we shouldn't be signaling hotplug events for resources + * that don't support them + */ + g_assert(false); + return; + } + + if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) { + hp->drc_id.count = cpu_to_be32(drc_id->count); + } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) { + hp->drc_id.index = cpu_to_be32(drc_id->index); + } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED) { + /* we should not be using count_indexed value unless the guest + * supports dedicated hotplug event source + */ + g_assert(spapr_memory_hot_unplug_supported(spapr)); + hp->drc_id.count_indexed.count = + cpu_to_be32(drc_id->count_indexed.count); + hp->drc_id.count_indexed.index = + cpu_to_be32(drc_id->count_indexed.index); + } + + rtas_event_log_queue(spapr, entry); + + qemu_irq_pulse(spapr_qirq(spapr, + rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_HOTPLUG))); +} + +void spapr_hotplug_req_add_by_index(SpaprDrc *drc) +{ + SpaprDrcType drc_type = spapr_drc_type(drc); + union drc_identifier drc_id; + + drc_id.index = spapr_drc_index(drc); + spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX, + RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id); +} + +void spapr_hotplug_req_remove_by_index(SpaprDrc *drc) +{ + SpaprDrcType drc_type = spapr_drc_type(drc); + union drc_identifier drc_id; + + drc_id.index = spapr_drc_index(drc); + spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX, + RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id); +} + +void spapr_hotplug_req_add_by_count(SpaprDrcType drc_type, + uint32_t count) +{ + union drc_identifier drc_id; + + drc_id.count = count; + spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT, + RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id); +} + +void spapr_hotplug_req_remove_by_count(SpaprDrcType drc_type, + uint32_t count) +{ + union drc_identifier drc_id; + + drc_id.count = count; + spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT, + RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id); +} + +void spapr_hotplug_req_add_by_count_indexed(SpaprDrcType drc_type, + uint32_t count, uint32_t index) +{ + union drc_identifier drc_id; + + drc_id.count_indexed.count = count; + drc_id.count_indexed.index = index; + spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED, + RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id); +} + +void spapr_hotplug_req_remove_by_count_indexed(SpaprDrcType drc_type, + uint32_t count, uint32_t index) +{ + union drc_identifier drc_id; + + drc_id.count_indexed.count = count; + drc_id.count_indexed.index = index; + spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED, + RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id); +} + +static void spapr_mc_set_ea_provided_flag(struct mc_extended_log *ext_elog) +{ + switch (ext_elog->mc.error_type) { + case RTAS_LOG_V6_MC_TYPE_UE: + ext_elog->mc.sub_err_type |= RTAS_LOG_V6_MC_UE_EA_ADDR_PROVIDED; + break; + case RTAS_LOG_V6_MC_TYPE_SLB: + case RTAS_LOG_V6_MC_TYPE_ERAT: + case RTAS_LOG_V6_MC_TYPE_TLB: + ext_elog->mc.sub_err_type |= RTAS_LOG_V6_MC_EA_ADDR_PROVIDED; + break; + default: + break; + } +} + +static uint32_t spapr_mce_get_elog_type(PowerPCCPU *cpu, bool recovered, + struct mc_extended_log *ext_elog) +{ + int i; + CPUPPCState *env = &cpu->env; + uint32_t summary; + uint64_t dsisr = env->spr[SPR_DSISR]; + + summary = RTAS_LOG_VERSION_6 | RTAS_LOG_OPTIONAL_PART_PRESENT; + if (recovered) { + summary |= RTAS_LOG_DISPOSITION_FULLY_RECOVERED; + } else { + summary |= RTAS_LOG_DISPOSITION_NOT_RECOVERED; + } + + if (SRR1_MC_LOADSTORE(env->spr[SPR_SRR1])) { + for (i = 0; i < ARRAY_SIZE(mc_derror_table); i++) { + if (!(dsisr & mc_derror_table[i].dsisr_value)) { + continue; + } + + ext_elog->mc.error_type = mc_derror_table[i].error_type; + ext_elog->mc.sub_err_type = mc_derror_table[i].error_subtype; + if (mc_derror_table[i].dar_valid) { + ext_elog->mc.effective_address = cpu_to_be64(env->spr[SPR_DAR]); + spapr_mc_set_ea_provided_flag(ext_elog); + } + + summary |= mc_derror_table[i].initiator + | mc_derror_table[i].severity; + + return summary; + } + } else { + for (i = 0; i < ARRAY_SIZE(mc_ierror_table); i++) { + if ((env->spr[SPR_SRR1] & mc_ierror_table[i].srr1_mask) != + mc_ierror_table[i].srr1_value) { + continue; + } + + ext_elog->mc.error_type = mc_ierror_table[i].error_type; + ext_elog->mc.sub_err_type = mc_ierror_table[i].error_subtype; + if (mc_ierror_table[i].nip_valid) { + ext_elog->mc.effective_address = cpu_to_be64(env->nip); + spapr_mc_set_ea_provided_flag(ext_elog); + } + + summary |= mc_ierror_table[i].initiator + | mc_ierror_table[i].severity; + + return summary; + } + } + + summary |= RTAS_LOG_INITIATOR_CPU; + return summary; +} + +static void spapr_mce_dispatch_elog(SpaprMachineState *spapr, PowerPCCPU *cpu, + bool recovered) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + uint64_t rtas_addr; + struct rtas_error_log log; + struct mc_extended_log *ext_elog; + uint32_t summary; + + ext_elog = g_malloc0(sizeof(*ext_elog)); + summary = spapr_mce_get_elog_type(cpu, recovered, ext_elog); + + log.summary = cpu_to_be32(summary); + log.extended_length = cpu_to_be32(sizeof(*ext_elog)); + + spapr_init_v6hdr(&ext_elog->v6hdr); + ext_elog->mc.hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MC); + ext_elog->mc.hdr.section_length = + cpu_to_be16(sizeof(struct rtas_event_log_v6_mc)); + ext_elog->mc.hdr.section_version = 1; + + /* get rtas addr from fdt */ + rtas_addr = spapr_get_rtas_addr(); + if (!rtas_addr) { + if (!recovered) { + error_report( +"FWNMI: Unable to deliver machine check to guest: rtas_addr not found."); + qemu_system_guest_panicked(NULL); + } else { + warn_report( +"FWNMI: Unable to deliver machine check to guest: rtas_addr not found. " +"Machine check recovered."); + } + g_free(ext_elog); + return; + } + + /* + * By taking the interlock, we assume that the MCE will be + * delivered to the guest. CAUTION: don't add anything that could + * prevent the MCE to be delivered after this line, otherwise the + * guest won't be able to release the interlock and ultimately + * hang/crash? + */ + spapr->fwnmi_machine_check_interlock = cpu->vcpu_id; + + stq_be_phys(&address_space_memory, rtas_addr + RTAS_ERROR_LOG_OFFSET, + env->gpr[3]); + cpu_physical_memory_write(rtas_addr + RTAS_ERROR_LOG_OFFSET + + sizeof(env->gpr[3]), &log, sizeof(log)); + cpu_physical_memory_write(rtas_addr + RTAS_ERROR_LOG_OFFSET + + sizeof(env->gpr[3]) + sizeof(log), ext_elog, + sizeof(*ext_elog)); + g_free(ext_elog); + + env->gpr[3] = rtas_addr + RTAS_ERROR_LOG_OFFSET; + + ppc_cpu_do_fwnmi_machine_check(cs, spapr->fwnmi_machine_check_addr); +} + +void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + CPUState *cs = CPU(cpu); + int ret; + + if (spapr->fwnmi_machine_check_addr == -1) { + /* Non-FWNMI case, deliver it like an architected CPU interrupt. */ + cs->exception_index = POWERPC_EXCP_MCHECK; + ppc_cpu_do_interrupt(cs); + return; + } + + /* Wait for FWNMI interlock. */ + while (spapr->fwnmi_machine_check_interlock != -1) { + /* + * Check whether the same CPU got machine check error + * while still handling the mc error (i.e., before + * that CPU called "ibm,nmi-interlock") + */ + if (spapr->fwnmi_machine_check_interlock == cpu->vcpu_id) { + if (!recovered) { + error_report( +"FWNMI: Unable to deliver machine check to guest: nested machine check."); + qemu_system_guest_panicked(NULL); + } else { + warn_report( +"FWNMI: Unable to deliver machine check to guest: nested machine check. " +"Machine check recovered."); + } + return; + } + qemu_cond_wait_iothread(&spapr->fwnmi_machine_check_interlock_cond); + if (spapr->fwnmi_machine_check_addr == -1) { + /* + * If the machine was reset while waiting for the interlock, + * abort the delivery. The machine check applies to a context + * that no longer exists, so it wouldn't make sense to deliver + * it now. + */ + return; + } + } + + /* + * Try to block migration while FWNMI is being handled, so the + * machine check handler runs where the information passed to it + * actually makes sense. This shouldn't actually block migration, + * only delay it slightly, assuming migration is retried. If the + * attempt to block fails, carry on. Unfortunately, it always + * fails when running with -only-migrate. A proper interface to + * delay migration completion for a bit could avoid that. + */ + ret = migrate_add_blocker(spapr->fwnmi_migration_blocker, NULL); + if (ret == -EBUSY) { + warn_report("Received a fwnmi while migration was in progress"); + } + + spapr_mce_dispatch_elog(spapr, cpu, recovered); +} + +static void check_exception(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + uint32_t mask, buf, len, event_len; + SpaprEventLogEntry *event; + struct rtas_error_log header; + int i; + + if ((nargs < 6) || (nargs > 7) || nret != 1) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + mask = rtas_ld(args, 2); + buf = rtas_ld(args, 4); + len = rtas_ld(args, 5); + + event = rtas_event_log_dequeue(spapr, mask); + if (!event) { + goto out_no_events; + } + + event_len = event->extended_length + sizeof(header); + + if (event_len < len) { + len = event_len; + } + + header.summary = cpu_to_be32(event->summary); + header.extended_length = cpu_to_be32(event->extended_length); + cpu_physical_memory_write(buf, &header, sizeof(header)); + cpu_physical_memory_write(buf + sizeof(header), event->extended_log, + event->extended_length); + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + g_free(event->extended_log); + g_free(event); + + /* according to PAPR+, the IRQ must be left asserted, or re-asserted, if + * there are still pending events to be fetched via check-exception. We + * do the latter here, since our code relies on edge-triggered + * interrupts. + */ + for (i = 0; i < EVENT_CLASS_MAX; i++) { + if (rtas_event_log_contains(spapr, EVENT_CLASS_MASK(i))) { + const SpaprEventSource *source = + spapr_event_sources_get_source(spapr->event_sources, i); + + g_assert(source->enabled); + qemu_irq_pulse(spapr_qirq(spapr, source->irq)); + } + } + + return; + +out_no_events: + rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND); +} + +static void event_scan(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + int i; + if (nargs != 4 || nret != 1) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + for (i = 0; i < EVENT_CLASS_MAX; i++) { + if (rtas_event_log_contains(spapr, EVENT_CLASS_MASK(i))) { + const SpaprEventSource *source = + spapr_event_sources_get_source(spapr->event_sources, i); + + g_assert(source->enabled); + qemu_irq_pulse(spapr_qirq(spapr, source->irq)); + } + } + + rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND); +} + +void spapr_clear_pending_events(SpaprMachineState *spapr) +{ + SpaprEventLogEntry *entry = NULL, *next_entry; + + QTAILQ_FOREACH_SAFE(entry, &spapr->pending_events, next, next_entry) { + QTAILQ_REMOVE(&spapr->pending_events, entry, next); + g_free(entry->extended_log); + g_free(entry); + } +} + +void spapr_clear_pending_hotplug_events(SpaprMachineState *spapr) +{ + SpaprEventLogEntry *entry = NULL, *next_entry; + + QTAILQ_FOREACH_SAFE(entry, &spapr->pending_events, next, next_entry) { + if (spapr_event_log_entry_type(entry) == RTAS_LOG_TYPE_HOTPLUG) { + QTAILQ_REMOVE(&spapr->pending_events, entry, next); + g_free(entry->extended_log); + g_free(entry); + } + } +} + +void spapr_events_init(SpaprMachineState *spapr) +{ + int epow_irq = SPAPR_IRQ_EPOW; + + if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { + epow_irq = spapr_irq_findone(spapr, &error_fatal); + } + + spapr_irq_claim(spapr, epow_irq, false, &error_fatal); + + QTAILQ_INIT(&spapr->pending_events); + + spapr->event_sources = spapr_event_sources_new(); + + spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_EPOW, + epow_irq); + + /* NOTE: if machine supports modern/dedicated hotplug event source, + * we add it to the device-tree unconditionally. This means we may + * have cases where the source is enabled in QEMU, but unused by the + * guest because it does not support modern hotplug events, so we + * take care to rely on checking for negotiation of OV5_HP_EVT option + * before attempting to use it to signal events, rather than simply + * checking that it's enabled. + */ + if (spapr->use_hotplug_event_source) { + int hp_irq = SPAPR_IRQ_HOTPLUG; + + if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { + hp_irq = spapr_irq_findone(spapr, &error_fatal); + } + + spapr_irq_claim(spapr, hp_irq, false, &error_fatal); + + spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_HOT_PLUG, + hp_irq); + } + + spapr->epow_notifier.notify = spapr_powerdown_req; + qemu_register_powerdown_notifier(&spapr->epow_notifier); + spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception", + check_exception); + spapr_rtas_register(RTAS_EVENT_SCAN, "event-scan", event_scan); +} diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c new file mode 100644 index 000000000..222c1b6bb --- /dev/null +++ b/hw/ppc/spapr_hcall.c @@ -0,0 +1,1557 @@ +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qapi/error.h" +#include "sysemu/hw_accel.h" +#include "sysemu/runstate.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "exec/exec-all.h" +#include "helper_regs.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_cpu_core.h" +#include "mmu-hash64.h" +#include "cpu-models.h" +#include "trace.h" +#include "kvm_ppc.h" +#include "hw/ppc/fdt.h" +#include "hw/ppc/spapr_ovec.h" +#include "hw/ppc/spapr_numa.h" +#include "mmu-book3s-v3.h" +#include "hw/mem/memory-device.h" + +bool is_ram_address(SpaprMachineState *spapr, hwaddr addr) +{ + MachineState *machine = MACHINE(spapr); + DeviceMemoryState *dms = machine->device_memory; + + if (addr < machine->ram_size) { + return true; + } + if ((addr >= dms->base) + && ((addr - dms->base) < memory_region_size(&dms->mr))) { + return true; + } + + return false; +} + +/* Convert a return code from the KVM ioctl()s implementing resize HPT + * into a PAPR hypercall return code */ +static target_ulong resize_hpt_convert_rc(int ret) +{ + if (ret >= 100000) { + return H_LONG_BUSY_ORDER_100_SEC; + } else if (ret >= 10000) { + return H_LONG_BUSY_ORDER_10_SEC; + } else if (ret >= 1000) { + return H_LONG_BUSY_ORDER_1_SEC; + } else if (ret >= 100) { + return H_LONG_BUSY_ORDER_100_MSEC; + } else if (ret >= 10) { + return H_LONG_BUSY_ORDER_10_MSEC; + } else if (ret > 0) { + return H_LONG_BUSY_ORDER_1_MSEC; + } + + switch (ret) { + case 0: + return H_SUCCESS; + case -EPERM: + return H_AUTHORITY; + case -EINVAL: + return H_PARAMETER; + case -ENXIO: + return H_CLOSED; + case -ENOSPC: + return H_PTEG_FULL; + case -EBUSY: + return H_BUSY; + case -ENOMEM: + return H_NO_MEM; + default: + return H_HARDWARE; + } +} + +static target_ulong h_resize_hpt_prepare(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong flags = args[0]; + int shift = args[1]; + uint64_t current_ram_size; + int rc; + + if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { + return H_AUTHORITY; + } + + if (!spapr->htab_shift) { + /* Radix guest, no HPT */ + return H_NOT_AVAILABLE; + } + + trace_spapr_h_resize_hpt_prepare(flags, shift); + + if (flags != 0) { + return H_PARAMETER; + } + + if (shift && ((shift < 18) || (shift > 46))) { + return H_PARAMETER; + } + + current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size(); + + /* We only allow the guest to allocate an HPT one order above what + * we'd normally give them (to stop a small guest claiming a huge + * chunk of resources in the HPT */ + if (shift > (spapr_hpt_shift_for_ramsize(current_ram_size) + 1)) { + return H_RESOURCE; + } + + rc = kvmppc_resize_hpt_prepare(cpu, flags, shift); + if (rc != -ENOSYS) { + return resize_hpt_convert_rc(rc); + } + + if (kvm_enabled()) { + return H_HARDWARE; + } + + return softmmu_resize_hpt_prepare(cpu, spapr, shift); +} + +static void do_push_sregs_to_kvm_pr(CPUState *cs, run_on_cpu_data data) +{ + int ret; + + cpu_synchronize_state(cs); + + ret = kvmppc_put_books_sregs(POWERPC_CPU(cs)); + if (ret < 0) { + error_report("failed to push sregs to KVM: %s", strerror(-ret)); + exit(1); + } +} + +void push_sregs_to_kvm_pr(SpaprMachineState *spapr) +{ + CPUState *cs; + + /* + * This is a hack for the benefit of KVM PR - it abuses the SDR1 + * slot in kvm_sregs to communicate the userspace address of the + * HPT + */ + if (!kvm_enabled() || !spapr->htab) { + return; + } + + CPU_FOREACH(cs) { + run_on_cpu(cs, do_push_sregs_to_kvm_pr, RUN_ON_CPU_NULL); + } +} + +static target_ulong h_resize_hpt_commit(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong flags = args[0]; + target_ulong shift = args[1]; + int rc; + + if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { + return H_AUTHORITY; + } + + if (!spapr->htab_shift) { + /* Radix guest, no HPT */ + return H_NOT_AVAILABLE; + } + + trace_spapr_h_resize_hpt_commit(flags, shift); + + rc = kvmppc_resize_hpt_commit(cpu, flags, shift); + if (rc != -ENOSYS) { + rc = resize_hpt_convert_rc(rc); + if (rc == H_SUCCESS) { + /* Need to set the new htab_shift in the machine state */ + spapr->htab_shift = shift; + } + return rc; + } + + if (kvm_enabled()) { + return H_HARDWARE; + } + + return softmmu_resize_hpt_commit(cpu, spapr, flags, shift); +} + + + +static target_ulong h_set_sprg0(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + cpu_synchronize_state(CPU(cpu)); + cpu->env.spr[SPR_SPRG0] = args[0]; + + return H_SUCCESS; +} + +static target_ulong h_set_dabr(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + if (!ppc_has_spr(cpu, SPR_DABR)) { + return H_HARDWARE; /* DABR register not available */ + } + cpu_synchronize_state(CPU(cpu)); + + if (ppc_has_spr(cpu, SPR_DABRX)) { + cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */ + } else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */ + return H_RESERVED_DABR; + } + + cpu->env.spr[SPR_DABR] = args[0]; + return H_SUCCESS; +} + +static target_ulong h_set_xdabr(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong dabrx = args[1]; + + if (!ppc_has_spr(cpu, SPR_DABR) || !ppc_has_spr(cpu, SPR_DABRX)) { + return H_HARDWARE; + } + + if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0 + || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) { + return H_PARAMETER; + } + + cpu_synchronize_state(CPU(cpu)); + cpu->env.spr[SPR_DABRX] = dabrx; + cpu->env.spr[SPR_DABR] = args[0]; + + return H_SUCCESS; +} + +static target_ulong h_page_init(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong flags = args[0]; + hwaddr dst = args[1]; + hwaddr src = args[2]; + hwaddr len = TARGET_PAGE_SIZE; + uint8_t *pdst, *psrc; + target_long ret = H_SUCCESS; + + if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE + | H_COPY_PAGE | H_ZERO_PAGE)) { + qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n", + flags); + return H_PARAMETER; + } + + /* Map-in destination */ + if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) { + return H_PARAMETER; + } + pdst = cpu_physical_memory_map(dst, &len, true); + if (!pdst || len != TARGET_PAGE_SIZE) { + return H_PARAMETER; + } + + if (flags & H_COPY_PAGE) { + /* Map-in source, copy to destination, and unmap source again */ + if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) { + ret = H_PARAMETER; + goto unmap_out; + } + psrc = cpu_physical_memory_map(src, &len, false); + if (!psrc || len != TARGET_PAGE_SIZE) { + ret = H_PARAMETER; + goto unmap_out; + } + memcpy(pdst, psrc, len); + cpu_physical_memory_unmap(psrc, len, 0, len); + } else if (flags & H_ZERO_PAGE) { + memset(pdst, 0, len); /* Just clear the destination page */ + } + + if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) { + kvmppc_dcbst_range(cpu, pdst, len); + } + if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) { + if (kvm_enabled()) { + kvmppc_icbi_range(cpu, pdst, len); + } else { + tb_flush(CPU(cpu)); + } + } + +unmap_out: + cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len); + return ret; +} + +#define FLAGS_REGISTER_VPA 0x0000200000000000ULL +#define FLAGS_REGISTER_DTL 0x0000400000000000ULL +#define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL +#define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL +#define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL +#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL + +static target_ulong register_vpa(PowerPCCPU *cpu, target_ulong vpa) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + uint16_t size; + uint8_t tmp; + + if (vpa == 0) { + hcall_dprintf("Can't cope with registering a VPA at logical 0\n"); + return H_HARDWARE; + } + + if (vpa % env->dcache_line_size) { + return H_PARAMETER; + } + /* FIXME: bounds check the address */ + + size = lduw_be_phys(cs->as, vpa + 0x4); + + if (size < VPA_MIN_SIZE) { + return H_PARAMETER; + } + + /* VPA is not allowed to cross a page boundary */ + if ((vpa / 4096) != ((vpa + size - 1) / 4096)) { + return H_PARAMETER; + } + + spapr_cpu->vpa_addr = vpa; + + tmp = ldub_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET); + tmp |= VPA_SHARED_PROC_VAL; + stb_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp); + + return H_SUCCESS; +} + +static target_ulong deregister_vpa(PowerPCCPU *cpu, target_ulong vpa) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + if (spapr_cpu->slb_shadow_addr) { + return H_RESOURCE; + } + + if (spapr_cpu->dtl_addr) { + return H_RESOURCE; + } + + spapr_cpu->vpa_addr = 0; + return H_SUCCESS; +} + +static target_ulong register_slb_shadow(PowerPCCPU *cpu, target_ulong addr) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + uint32_t size; + + if (addr == 0) { + hcall_dprintf("Can't cope with SLB shadow at logical 0\n"); + return H_HARDWARE; + } + + size = ldl_be_phys(CPU(cpu)->as, addr + 0x4); + if (size < 0x8) { + return H_PARAMETER; + } + + if ((addr / 4096) != ((addr + size - 1) / 4096)) { + return H_PARAMETER; + } + + if (!spapr_cpu->vpa_addr) { + return H_RESOURCE; + } + + spapr_cpu->slb_shadow_addr = addr; + spapr_cpu->slb_shadow_size = size; + + return H_SUCCESS; +} + +static target_ulong deregister_slb_shadow(PowerPCCPU *cpu, target_ulong addr) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + spapr_cpu->slb_shadow_addr = 0; + spapr_cpu->slb_shadow_size = 0; + return H_SUCCESS; +} + +static target_ulong register_dtl(PowerPCCPU *cpu, target_ulong addr) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + uint32_t size; + + if (addr == 0) { + hcall_dprintf("Can't cope with DTL at logical 0\n"); + return H_HARDWARE; + } + + size = ldl_be_phys(CPU(cpu)->as, addr + 0x4); + + if (size < 48) { + return H_PARAMETER; + } + + if (!spapr_cpu->vpa_addr) { + return H_RESOURCE; + } + + spapr_cpu->dtl_addr = addr; + spapr_cpu->dtl_size = size; + + return H_SUCCESS; +} + +static target_ulong deregister_dtl(PowerPCCPU *cpu, target_ulong addr) +{ + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + spapr_cpu->dtl_addr = 0; + spapr_cpu->dtl_size = 0; + + return H_SUCCESS; +} + +static target_ulong h_register_vpa(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong flags = args[0]; + target_ulong procno = args[1]; + target_ulong vpa = args[2]; + target_ulong ret = H_PARAMETER; + PowerPCCPU *tcpu; + + tcpu = spapr_find_cpu(procno); + if (!tcpu) { + return H_PARAMETER; + } + + switch (flags) { + case FLAGS_REGISTER_VPA: + ret = register_vpa(tcpu, vpa); + break; + + case FLAGS_DEREGISTER_VPA: + ret = deregister_vpa(tcpu, vpa); + break; + + case FLAGS_REGISTER_SLBSHADOW: + ret = register_slb_shadow(tcpu, vpa); + break; + + case FLAGS_DEREGISTER_SLBSHADOW: + ret = deregister_slb_shadow(tcpu, vpa); + break; + + case FLAGS_REGISTER_DTL: + ret = register_dtl(tcpu, vpa); + break; + + case FLAGS_DEREGISTER_DTL: + ret = deregister_dtl(tcpu, vpa); + break; + } + + return ret; +} + +static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + CPUState *cs = CPU(cpu); + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + env->msr |= (1ULL << MSR_EE); + hreg_compute_hflags(env); + + if (spapr_cpu->prod) { + spapr_cpu->prod = false; + return H_SUCCESS; + } + + if (!cpu_has_work(cs)) { + cs->halted = 1; + cs->exception_index = EXCP_HLT; + cs->exit_request = 1; + } + + return H_SUCCESS; +} + +/* + * Confer to self, aka join. Cede could use the same pattern as well, if + * EXCP_HLT can be changed to ECXP_HALTED. + */ +static target_ulong h_confer_self(PowerPCCPU *cpu) +{ + CPUState *cs = CPU(cpu); + SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); + + if (spapr_cpu->prod) { + spapr_cpu->prod = false; + return H_SUCCESS; + } + cs->halted = 1; + cs->exception_index = EXCP_HALTED; + cs->exit_request = 1; + + return H_SUCCESS; +} + +static target_ulong h_join(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + CPUState *cs; + bool last_unjoined = true; + + if (env->msr & (1ULL << MSR_EE)) { + return H_BAD_MODE; + } + + /* + * Must not join the last CPU running. Interestingly, no such restriction + * for H_CONFER-to-self, but that is probably not intended to be used + * when H_JOIN is available. + */ + CPU_FOREACH(cs) { + PowerPCCPU *c = POWERPC_CPU(cs); + CPUPPCState *e = &c->env; + if (c == cpu) { + continue; + } + + /* Don't have a way to indicate joined, so use halted && MSR[EE]=0 */ + if (!cs->halted || (e->msr & (1ULL << MSR_EE))) { + last_unjoined = false; + break; + } + } + if (last_unjoined) { + return H_CONTINUE; + } + + return h_confer_self(cpu); +} + +static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_long target = args[0]; + uint32_t dispatch = args[1]; + CPUState *cs = CPU(cpu); + SpaprCpuState *spapr_cpu; + + /* + * -1 means confer to all other CPUs without dispatch counter check, + * otherwise it's a targeted confer. + */ + if (target != -1) { + PowerPCCPU *target_cpu = spapr_find_cpu(target); + uint32_t target_dispatch; + + if (!target_cpu) { + return H_PARAMETER; + } + + /* + * target == self is a special case, we wait until prodded, without + * dispatch counter check. + */ + if (cpu == target_cpu) { + return h_confer_self(cpu); + } + + spapr_cpu = spapr_cpu_state(target_cpu); + if (!spapr_cpu->vpa_addr || ((dispatch & 1) == 0)) { + return H_SUCCESS; + } + + target_dispatch = ldl_be_phys(cs->as, + spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); + if (target_dispatch != dispatch) { + return H_SUCCESS; + } + + /* + * The targeted confer does not do anything special beyond yielding + * the current vCPU, but even this should be better than nothing. + * At least for single-threaded tcg, it gives the target a chance to + * run before we run again. Multi-threaded tcg does not really do + * anything with EXCP_YIELD yet. + */ + } + + cs->exception_index = EXCP_YIELD; + cs->exit_request = 1; + cpu_loop_exit(cs); + + return H_SUCCESS; +} + +static target_ulong h_prod(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_long target = args[0]; + PowerPCCPU *tcpu; + CPUState *cs; + SpaprCpuState *spapr_cpu; + + tcpu = spapr_find_cpu(target); + cs = CPU(tcpu); + if (!cs) { + return H_PARAMETER; + } + + spapr_cpu = spapr_cpu_state(tcpu); + spapr_cpu->prod = true; + cs->halted = 0; + qemu_cpu_kick(cs); + + return H_SUCCESS; +} + +static target_ulong h_rtas(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong rtas_r3 = args[0]; + uint32_t token = rtas_ld(rtas_r3, 0); + uint32_t nargs = rtas_ld(rtas_r3, 1); + uint32_t nret = rtas_ld(rtas_r3, 2); + + return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12, + nret, rtas_r3 + 12 + 4*nargs); +} + +static target_ulong h_logical_load(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + CPUState *cs = CPU(cpu); + target_ulong size = args[0]; + target_ulong addr = args[1]; + + switch (size) { + case 1: + args[0] = ldub_phys(cs->as, addr); + return H_SUCCESS; + case 2: + args[0] = lduw_phys(cs->as, addr); + return H_SUCCESS; + case 4: + args[0] = ldl_phys(cs->as, addr); + return H_SUCCESS; + case 8: + args[0] = ldq_phys(cs->as, addr); + return H_SUCCESS; + } + return H_PARAMETER; +} + +static target_ulong h_logical_store(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + CPUState *cs = CPU(cpu); + + target_ulong size = args[0]; + target_ulong addr = args[1]; + target_ulong val = args[2]; + + switch (size) { + case 1: + stb_phys(cs->as, addr, val); + return H_SUCCESS; + case 2: + stw_phys(cs->as, addr, val); + return H_SUCCESS; + case 4: + stl_phys(cs->as, addr, val); + return H_SUCCESS; + case 8: + stq_phys(cs->as, addr, val); + return H_SUCCESS; + } + return H_PARAMETER; +} + +static target_ulong h_logical_memop(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + CPUState *cs = CPU(cpu); + + target_ulong dst = args[0]; /* Destination address */ + target_ulong src = args[1]; /* Source address */ + target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */ + target_ulong count = args[3]; /* Element count */ + target_ulong op = args[4]; /* 0 = copy, 1 = invert */ + uint64_t tmp; + unsigned int mask = (1 << esize) - 1; + int step = 1 << esize; + + if (count > 0x80000000) { + return H_PARAMETER; + } + + if ((dst & mask) || (src & mask) || (op > 1)) { + return H_PARAMETER; + } + + if (dst >= src && dst < (src + (count << esize))) { + dst = dst + ((count - 1) << esize); + src = src + ((count - 1) << esize); + step = -step; + } + + while (count--) { + switch (esize) { + case 0: + tmp = ldub_phys(cs->as, src); + break; + case 1: + tmp = lduw_phys(cs->as, src); + break; + case 2: + tmp = ldl_phys(cs->as, src); + break; + case 3: + tmp = ldq_phys(cs->as, src); + break; + default: + return H_PARAMETER; + } + if (op == 1) { + tmp = ~tmp; + } + switch (esize) { + case 0: + stb_phys(cs->as, dst, tmp); + break; + case 1: + stw_phys(cs->as, dst, tmp); + break; + case 2: + stl_phys(cs->as, dst, tmp); + break; + case 3: + stq_phys(cs->as, dst, tmp); + break; + } + dst = dst + step; + src = src + step; + } + + return H_SUCCESS; +} + +static target_ulong h_logical_icbi(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + /* Nothing to do on emulation, KVM will trap this in the kernel */ + return H_SUCCESS; +} + +static target_ulong h_logical_dcbf(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + /* Nothing to do on emulation, KVM will trap this in the kernel */ + return H_SUCCESS; +} + +static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong mflags, + target_ulong value1, + target_ulong value2) +{ + if (value1) { + return H_P3; + } + if (value2) { + return H_P4; + } + + switch (mflags) { + case H_SET_MODE_ENDIAN_BIG: + spapr_set_all_lpcrs(0, LPCR_ILE); + spapr_pci_switch_vga(spapr, true); + return H_SUCCESS; + + case H_SET_MODE_ENDIAN_LITTLE: + spapr_set_all_lpcrs(LPCR_ILE, LPCR_ILE); + spapr_pci_switch_vga(spapr, false); + return H_SUCCESS; + } + + return H_UNSUPPORTED_FLAG; +} + +static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu, + target_ulong mflags, + target_ulong value1, + target_ulong value2) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + if (!(pcc->insns_flags2 & PPC2_ISA207S)) { + return H_P2; + } + if (value1) { + return H_P3; + } + if (value2) { + return H_P4; + } + + if (mflags == 1) { + /* AIL=1 is reserved in POWER8/POWER9/POWER10 */ + return H_UNSUPPORTED_FLAG; + } + + if (mflags == 2 && (pcc->insns_flags2 & PPC2_ISA310)) { + /* AIL=2 is reserved in POWER10 (ISA v3.1) */ + return H_UNSUPPORTED_FLAG; + } + + spapr_set_all_lpcrs(mflags << LPCR_AIL_SHIFT, LPCR_AIL); + + return H_SUCCESS; +} + +static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong resource = args[1]; + target_ulong ret = H_P2; + + switch (resource) { + case H_SET_MODE_RESOURCE_LE: + ret = h_set_mode_resource_le(cpu, spapr, args[0], args[2], args[3]); + break; + case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: + ret = h_set_mode_resource_addr_trans_mode(cpu, args[0], + args[2], args[3]); + break; + } + + return ret; +} + +static target_ulong h_clean_slb(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n", + opcode, " (H_CLEAN_SLB)"); + return H_FUNCTION; +} + +static target_ulong h_invalidate_pid(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n", + opcode, " (H_INVALIDATE_PID)"); + return H_FUNCTION; +} + +static void spapr_check_setup_free_hpt(SpaprMachineState *spapr, + uint64_t patbe_old, uint64_t patbe_new) +{ + /* + * We have 4 Options: + * HASH->HASH || RADIX->RADIX || NOTHING->RADIX : Do Nothing + * HASH->RADIX : Free HPT + * RADIX->HASH : Allocate HPT + * NOTHING->HASH : Allocate HPT + * Note: NOTHING implies the case where we said the guest could choose + * later and so assumed radix and now it's called H_REG_PROC_TBL + */ + + if ((patbe_old & PATE1_GR) == (patbe_new & PATE1_GR)) { + /* We assume RADIX, so this catches all the "Do Nothing" cases */ + } else if (!(patbe_old & PATE1_GR)) { + /* HASH->RADIX : Free HPT */ + spapr_free_hpt(spapr); + } else if (!(patbe_new & PATE1_GR)) { + /* RADIX->HASH || NOTHING->HASH : Allocate HPT */ + spapr_setup_hpt(spapr); + } + return; +} + +#define FLAGS_MASK 0x01FULL +#define FLAG_MODIFY 0x10 +#define FLAG_REGISTER 0x08 +#define FLAG_RADIX 0x04 +#define FLAG_HASH_PROC_TBL 0x02 +#define FLAG_GTSE 0x01 + +static target_ulong h_register_process_table(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong flags = args[0]; + target_ulong proc_tbl = args[1]; + target_ulong page_size = args[2]; + target_ulong table_size = args[3]; + target_ulong update_lpcr = 0; + uint64_t cproc; + + if (flags & ~FLAGS_MASK) { /* Check no reserved bits are set */ + return H_PARAMETER; + } + if (flags & FLAG_MODIFY) { + if (flags & FLAG_REGISTER) { + if (flags & FLAG_RADIX) { /* Register new RADIX process table */ + if (proc_tbl & 0xfff || proc_tbl >> 60) { + return H_P2; + } else if (page_size) { + return H_P3; + } else if (table_size > 24) { + return H_P4; + } + cproc = PATE1_GR | proc_tbl | table_size; + } else { /* Register new HPT process table */ + if (flags & FLAG_HASH_PROC_TBL) { /* Hash with Segment Tables */ + /* TODO - Not Supported */ + /* Technically caused by flag bits => H_PARAMETER */ + return H_PARAMETER; + } else { /* Hash with SLB */ + if (proc_tbl >> 38) { + return H_P2; + } else if (page_size & ~0x7) { + return H_P3; + } else if (table_size > 24) { + return H_P4; + } + } + cproc = (proc_tbl << 25) | page_size << 5 | table_size; + } + + } else { /* Deregister current process table */ + /* + * Set to benign value: (current GR) | 0. This allows + * deregistration in KVM to succeed even if the radix bit + * in flags doesn't match the radix bit in the old PATE. + */ + cproc = spapr->patb_entry & PATE1_GR; + } + } else { /* Maintain current registration */ + if (!(flags & FLAG_RADIX) != !(spapr->patb_entry & PATE1_GR)) { + /* Technically caused by flag bits => H_PARAMETER */ + return H_PARAMETER; /* Existing Process Table Mismatch */ + } + cproc = spapr->patb_entry; + } + + /* Check if we need to setup OR free the hpt */ + spapr_check_setup_free_hpt(spapr, spapr->patb_entry, cproc); + + spapr->patb_entry = cproc; /* Save new process table */ + + /* Update the UPRT, HR and GTSE bits in the LPCR for all cpus */ + if (flags & FLAG_RADIX) /* Radix must use process tables, also set HR */ + update_lpcr |= (LPCR_UPRT | LPCR_HR); + else if (flags & FLAG_HASH_PROC_TBL) /* Hash with process tables */ + update_lpcr |= LPCR_UPRT; + if (flags & FLAG_GTSE) /* Guest translation shootdown enable */ + update_lpcr |= LPCR_GTSE; + + spapr_set_all_lpcrs(update_lpcr, LPCR_UPRT | LPCR_HR | LPCR_GTSE); + + if (kvm_enabled()) { + return kvmppc_configure_v3_mmu(cpu, flags & FLAG_RADIX, + flags & FLAG_GTSE, cproc); + } + return H_SUCCESS; +} + +#define H_SIGNAL_SYS_RESET_ALL -1 +#define H_SIGNAL_SYS_RESET_ALLBUTSELF -2 + +static target_ulong h_signal_sys_reset(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_long target = args[0]; + CPUState *cs; + + if (target < 0) { + /* Broadcast */ + if (target < H_SIGNAL_SYS_RESET_ALLBUTSELF) { + return H_PARAMETER; + } + + CPU_FOREACH(cs) { + PowerPCCPU *c = POWERPC_CPU(cs); + + if (target == H_SIGNAL_SYS_RESET_ALLBUTSELF) { + if (c == cpu) { + continue; + } + } + run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); + } + return H_SUCCESS; + + } else { + /* Unicast */ + cs = CPU(spapr_find_cpu(target)); + if (cs) { + run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); + return H_SUCCESS; + } + return H_PARAMETER; + } +} + +/* Returns either a logical PVR or zero if none was found */ +static uint32_t cas_check_pvr(PowerPCCPU *cpu, uint32_t max_compat, + target_ulong *addr, bool *raw_mode_supported) +{ + bool explicit_match = false; /* Matched the CPU's real PVR */ + uint32_t best_compat = 0; + int i; + + /* + * We scan the supplied table of PVRs looking for two things + * 1. Is our real CPU PVR in the list? + * 2. What's the "best" listed logical PVR + */ + for (i = 0; i < 512; ++i) { + uint32_t pvr, pvr_mask; + + pvr_mask = ldl_be_phys(&address_space_memory, *addr); + pvr = ldl_be_phys(&address_space_memory, *addr + 4); + *addr += 8; + + if (~pvr_mask & pvr) { + break; /* Terminator record */ + } + + if ((cpu->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask)) { + explicit_match = true; + } else { + if (ppc_check_compat(cpu, pvr, best_compat, max_compat)) { + best_compat = pvr; + } + } + } + + *raw_mode_supported = explicit_match; + + /* Parsing finished */ + trace_spapr_cas_pvr(cpu->compat_pvr, explicit_match, best_compat); + + return best_compat; +} + +static +target_ulong do_client_architecture_support(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong vec, + target_ulong fdt_bufsize) +{ + target_ulong ov_table; /* Working address in data buffer */ + uint32_t cas_pvr; + SpaprOptionVector *ov1_guest, *ov5_guest; + bool guest_radix; + bool raw_mode_supported = false; + bool guest_xive; + CPUState *cs; + void *fdt; + uint32_t max_compat = spapr->max_compat_pvr; + + /* CAS is supposed to be called early when only the boot vCPU is active. */ + CPU_FOREACH(cs) { + if (cs == CPU(cpu)) { + continue; + } + if (!cs->halted) { + warn_report("guest has multiple active vCPUs at CAS, which is not allowed"); + return H_MULTI_THREADS_ACTIVE; + } + } + + cas_pvr = cas_check_pvr(cpu, max_compat, &vec, &raw_mode_supported); + if (!cas_pvr && (!raw_mode_supported || max_compat)) { + /* + * We couldn't find a suitable compatibility mode, and either + * the guest doesn't support "raw" mode for this CPU, or "raw" + * mode is disabled because a maximum compat mode is set. + */ + error_report("Couldn't negotiate a suitable PVR during CAS"); + return H_HARDWARE; + } + + /* Update CPUs */ + if (cpu->compat_pvr != cas_pvr) { + Error *local_err = NULL; + + if (ppc_set_compat_all(cas_pvr, &local_err) < 0) { + /* We fail to set compat mode (likely because running with KVM PR), + * but maybe we can fallback to raw mode if the guest supports it. + */ + if (!raw_mode_supported) { + error_report_err(local_err); + return H_HARDWARE; + } + error_free(local_err); + } + } + + /* For the future use: here @ov_table points to the first option vector */ + ov_table = vec; + + ov1_guest = spapr_ovec_parse_vector(ov_table, 1); + if (!ov1_guest) { + warn_report("guest didn't provide option vector 1"); + return H_PARAMETER; + } + ov5_guest = spapr_ovec_parse_vector(ov_table, 5); + if (!ov5_guest) { + spapr_ovec_cleanup(ov1_guest); + warn_report("guest didn't provide option vector 5"); + return H_PARAMETER; + } + if (spapr_ovec_test(ov5_guest, OV5_MMU_BOTH)) { + error_report("guest requested hash and radix MMU, which is invalid."); + exit(EXIT_FAILURE); + } + if (spapr_ovec_test(ov5_guest, OV5_XIVE_BOTH)) { + error_report("guest requested an invalid interrupt mode"); + exit(EXIT_FAILURE); + } + + guest_radix = spapr_ovec_test(ov5_guest, OV5_MMU_RADIX_300); + + guest_xive = spapr_ovec_test(ov5_guest, OV5_XIVE_EXPLOIT); + + /* + * HPT resizing is a bit of a special case, because when enabled + * we assume an HPT guest will support it until it says it + * doesn't, instead of assuming it won't support it until it says + * it does. Strictly speaking that approach could break for + * guests which don't make a CAS call, but those are so old we + * don't care about them. Without that assumption we'd have to + * make at least a temporary allocation of an HPT sized for max + * memory, which could be impossibly difficult under KVM HV if + * maxram is large. + */ + if (!guest_radix && !spapr_ovec_test(ov5_guest, OV5_HPT_RESIZE)) { + int maxshift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); + + if (spapr->resize_hpt == SPAPR_RESIZE_HPT_REQUIRED) { + error_report( + "h_client_architecture_support: Guest doesn't support HPT resizing, but resize-hpt=required"); + exit(1); + } + + if (spapr->htab_shift < maxshift) { + /* Guest doesn't know about HPT resizing, so we + * pre-emptively resize for the maximum permitted RAM. At + * the point this is called, nothing should have been + * entered into the existing HPT */ + spapr_reallocate_hpt(spapr, maxshift, &error_fatal); + push_sregs_to_kvm_pr(spapr); + } + } + + /* NOTE: there are actually a number of ov5 bits where input from the + * guest is always zero, and the platform/QEMU enables them independently + * of guest input. To model these properly we'd want some sort of mask, + * but since they only currently apply to memory migration as defined + * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need + * to worry about this for now. + */ + + /* full range of negotiated ov5 capabilities */ + spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest); + spapr_ovec_cleanup(ov5_guest); + + spapr_check_mmu_mode(guest_radix); + + spapr->cas_pre_isa3_guest = !spapr_ovec_test(ov1_guest, OV1_PPC_3_00); + spapr_ovec_cleanup(ov1_guest); + + /* + * Check for NUMA affinity conditions now that we know which NUMA + * affinity the guest will use. + */ + spapr_numa_associativity_check(spapr); + + /* + * Ensure the guest asks for an interrupt mode we support; + * otherwise terminate the boot. + */ + if (guest_xive) { + if (!spapr->irq->xive) { + error_report( +"Guest requested unavailable interrupt mode (XIVE), try the ic-mode=xive or ic-mode=dual machine property"); + exit(EXIT_FAILURE); + } + } else { + if (!spapr->irq->xics) { + error_report( +"Guest requested unavailable interrupt mode (XICS), either don't set the ic-mode machine property or try ic-mode=xics or ic-mode=dual"); + exit(EXIT_FAILURE); + } + } + + spapr_irq_update_active_intc(spapr); + + /* + * Process all pending hot-plug/unplug requests now. An updated full + * rendered FDT will be returned to the guest. + */ + spapr_drc_reset_all(spapr); + spapr_clear_pending_hotplug_events(spapr); + + /* + * If spapr_machine_reset() did not set up a HPT but one is necessary + * (because the guest isn't going to use radix) then set it up here. + */ + if ((spapr->patb_entry & PATE1_GR) && !guest_radix) { + /* legacy hash or new hash: */ + spapr_setup_hpt(spapr); + } + + fdt = spapr_build_fdt(spapr, spapr->vof != NULL, fdt_bufsize); + g_free(spapr->fdt_blob); + spapr->fdt_size = fdt_totalsize(fdt); + spapr->fdt_initial_size = spapr->fdt_size; + spapr->fdt_blob = fdt; + + return H_SUCCESS; +} + +static target_ulong h_client_architecture_support(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong vec = ppc64_phys_to_real(args[0]); + target_ulong fdt_buf = args[1]; + target_ulong fdt_bufsize = args[2]; + target_ulong ret; + SpaprDeviceTreeUpdateHeader hdr = { .version_id = 1 }; + + if (fdt_bufsize < sizeof(hdr)) { + error_report("SLOF provided insufficient CAS buffer " + TARGET_FMT_lu " (min: %zu)", fdt_bufsize, sizeof(hdr)); + exit(EXIT_FAILURE); + } + + fdt_bufsize -= sizeof(hdr); + + ret = do_client_architecture_support(cpu, spapr, vec, fdt_bufsize); + if (ret == H_SUCCESS) { + _FDT((fdt_pack(spapr->fdt_blob))); + spapr->fdt_size = fdt_totalsize(spapr->fdt_blob); + spapr->fdt_initial_size = spapr->fdt_size; + + cpu_physical_memory_write(fdt_buf, &hdr, sizeof(hdr)); + cpu_physical_memory_write(fdt_buf + sizeof(hdr), spapr->fdt_blob, + spapr->fdt_size); + trace_spapr_cas_continue(spapr->fdt_size + sizeof(hdr)); + } + + return ret; +} + +target_ulong spapr_vof_client_architecture_support(MachineState *ms, + CPUState *cs, + target_ulong ovec_addr) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(ms); + + target_ulong ret = do_client_architecture_support(POWERPC_CPU(cs), spapr, + ovec_addr, FDT_MAX_SIZE); + + /* + * This adds stdout and generates phandles for boottime and CAS FDTs. + * It is alright to update the FDT here as do_client_architecture_support() + * does not pack it. + */ + spapr_vof_client_dt_finalize(spapr, spapr->fdt_blob); + + return ret; +} + +static target_ulong h_get_cpu_characteristics(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + uint64_t characteristics = H_CPU_CHAR_HON_BRANCH_HINTS & + ~H_CPU_CHAR_THR_RECONF_TRIG; + uint64_t behaviour = H_CPU_BEHAV_FAVOUR_SECURITY; + uint8_t safe_cache = spapr_get_cap(spapr, SPAPR_CAP_CFPC); + uint8_t safe_bounds_check = spapr_get_cap(spapr, SPAPR_CAP_SBBC); + uint8_t safe_indirect_branch = spapr_get_cap(spapr, SPAPR_CAP_IBS); + uint8_t count_cache_flush_assist = spapr_get_cap(spapr, + SPAPR_CAP_CCF_ASSIST); + + switch (safe_cache) { + case SPAPR_CAP_WORKAROUND: + characteristics |= H_CPU_CHAR_L1D_FLUSH_ORI30; + characteristics |= H_CPU_CHAR_L1D_FLUSH_TRIG2; + characteristics |= H_CPU_CHAR_L1D_THREAD_PRIV; + behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR; + break; + case SPAPR_CAP_FIXED: + behaviour |= H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY; + behaviour |= H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS; + break; + default: /* broken */ + assert(safe_cache == SPAPR_CAP_BROKEN); + behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR; + break; + } + + switch (safe_bounds_check) { + case SPAPR_CAP_WORKAROUND: + characteristics |= H_CPU_CHAR_SPEC_BAR_ORI31; + behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR; + break; + case SPAPR_CAP_FIXED: + break; + default: /* broken */ + assert(safe_bounds_check == SPAPR_CAP_BROKEN); + behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR; + break; + } + + switch (safe_indirect_branch) { + case SPAPR_CAP_FIXED_NA: + break; + case SPAPR_CAP_FIXED_CCD: + characteristics |= H_CPU_CHAR_CACHE_COUNT_DIS; + break; + case SPAPR_CAP_FIXED_IBS: + characteristics |= H_CPU_CHAR_BCCTRL_SERIALISED; + break; + case SPAPR_CAP_WORKAROUND: + behaviour |= H_CPU_BEHAV_FLUSH_COUNT_CACHE; + if (count_cache_flush_assist) { + characteristics |= H_CPU_CHAR_BCCTR_FLUSH_ASSIST; + } + break; + default: /* broken */ + assert(safe_indirect_branch == SPAPR_CAP_BROKEN); + break; + } + + args[0] = characteristics; + args[1] = behaviour; + return H_SUCCESS; +} + +static target_ulong h_update_dt(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong dt = ppc64_phys_to_real(args[0]); + struct fdt_header hdr = { 0 }; + unsigned cb; + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + void *fdt; + + cpu_physical_memory_read(dt, &hdr, sizeof(hdr)); + cb = fdt32_to_cpu(hdr.totalsize); + + if (!smc->update_dt_enabled) { + return H_SUCCESS; + } + + /* Check that the fdt did not grow out of proportion */ + if (cb > spapr->fdt_initial_size * 2) { + trace_spapr_update_dt_failed_size(spapr->fdt_initial_size, cb, + fdt32_to_cpu(hdr.magic)); + return H_PARAMETER; + } + + fdt = g_malloc0(cb); + cpu_physical_memory_read(dt, fdt, cb); + + /* Check the fdt consistency */ + if (fdt_check_full(fdt, cb)) { + trace_spapr_update_dt_failed_check(spapr->fdt_initial_size, cb, + fdt32_to_cpu(hdr.magic)); + return H_PARAMETER; + } + + g_free(spapr->fdt_blob); + spapr->fdt_size = cb; + spapr->fdt_blob = fdt; + trace_spapr_update_dt(cb); + + return H_SUCCESS; +} + +static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1]; +static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1]; +static spapr_hcall_fn svm_hypercall_table[(SVM_HCALL_MAX - SVM_HCALL_BASE) / 4 + 1]; + +void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) +{ + spapr_hcall_fn *slot; + + if (opcode <= MAX_HCALL_OPCODE) { + assert((opcode & 0x3) == 0); + + slot = &papr_hypercall_table[opcode / 4]; + } else if (opcode >= SVM_HCALL_BASE && opcode <= SVM_HCALL_MAX) { + /* we only have SVM-related hcall numbers assigned in multiples of 4 */ + assert((opcode & 0x3) == 0); + + slot = &svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4]; + } else { + assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX)); + + slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; + } + + assert(!(*slot)); + *slot = fn; +} + +target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, + target_ulong *args) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + + if ((opcode <= MAX_HCALL_OPCODE) + && ((opcode & 0x3) == 0)) { + spapr_hcall_fn fn = papr_hypercall_table[opcode / 4]; + + if (fn) { + return fn(cpu, spapr, opcode, args); + } + } else if ((opcode >= SVM_HCALL_BASE) && + (opcode <= SVM_HCALL_MAX)) { + spapr_hcall_fn fn = svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4]; + + if (fn) { + return fn(cpu, spapr, opcode, args); + } + } else if ((opcode >= KVMPPC_HCALL_BASE) && + (opcode <= KVMPPC_HCALL_MAX)) { + spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; + + if (fn) { + return fn(cpu, spapr, opcode, args); + } + } + + qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n", + opcode); + return H_FUNCTION; +} + +#ifndef CONFIG_TCG +static target_ulong h_softmmu(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + g_assert_not_reached(); +} + +static void hypercall_register_softmmu(void) +{ + /* hcall-pft */ + spapr_register_hypercall(H_ENTER, h_softmmu); + spapr_register_hypercall(H_REMOVE, h_softmmu); + spapr_register_hypercall(H_PROTECT, h_softmmu); + spapr_register_hypercall(H_READ, h_softmmu); + + /* hcall-bulk */ + spapr_register_hypercall(H_BULK_REMOVE, h_softmmu); +} +#else +static void hypercall_register_softmmu(void) +{ + /* DO NOTHING */ +} +#endif + +static void hypercall_register_types(void) +{ + hypercall_register_softmmu(); + + /* hcall-hpt-resize */ + spapr_register_hypercall(H_RESIZE_HPT_PREPARE, h_resize_hpt_prepare); + spapr_register_hypercall(H_RESIZE_HPT_COMMIT, h_resize_hpt_commit); + + /* hcall-splpar */ + spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa); + spapr_register_hypercall(H_CEDE, h_cede); + spapr_register_hypercall(H_CONFER, h_confer); + spapr_register_hypercall(H_PROD, h_prod); + + /* hcall-join */ + spapr_register_hypercall(H_JOIN, h_join); + + spapr_register_hypercall(H_SIGNAL_SYS_RESET, h_signal_sys_reset); + + /* processor register resource access h-calls */ + spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0); + spapr_register_hypercall(H_SET_DABR, h_set_dabr); + spapr_register_hypercall(H_SET_XDABR, h_set_xdabr); + spapr_register_hypercall(H_PAGE_INIT, h_page_init); + spapr_register_hypercall(H_SET_MODE, h_set_mode); + + /* In Memory Table MMU h-calls */ + spapr_register_hypercall(H_CLEAN_SLB, h_clean_slb); + spapr_register_hypercall(H_INVALIDATE_PID, h_invalidate_pid); + spapr_register_hypercall(H_REGISTER_PROC_TBL, h_register_process_table); + + /* hcall-get-cpu-characteristics */ + spapr_register_hypercall(H_GET_CPU_CHARACTERISTICS, + h_get_cpu_characteristics); + + /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate + * here between the "CI" and the "CACHE" variants, they will use whatever + * mapping attributes qemu is using. When using KVM, the kernel will + * enforce the attributes more strongly + */ + spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load); + spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store); + spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load); + spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store); + spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi); + spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf); + spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop); + + /* qemu/KVM-PPC specific hcalls */ + spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas); + + /* ibm,client-architecture-support support */ + spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support); + + spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt); +} + +type_init(hypercall_register_types) diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c new file mode 100644 index 000000000..db0107185 --- /dev/null +++ b/hw/ppc/spapr_iommu.c @@ -0,0 +1,718 @@ +/* + * QEMU sPAPR IOMMU (TCE) code + * + * Copyright (c) 2010 David Gibson, IBM Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "sysemu/kvm.h" +#include "kvm_ppc.h" +#include "migration/vmstate.h" +#include "sysemu/dma.h" +#include "trace.h" + +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" + +#include + +enum SpaprTceAccess { + SPAPR_TCE_FAULT = 0, + SPAPR_TCE_RO = 1, + SPAPR_TCE_WO = 2, + SPAPR_TCE_RW = 3, +}; + +#define IOMMU_PAGE_SIZE(shift) (1ULL << (shift)) +#define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1)) + +static QLIST_HEAD(, SpaprTceTable) spapr_tce_tables; + +SpaprTceTable *spapr_tce_find_by_liobn(target_ulong liobn) +{ + SpaprTceTable *tcet; + + if (liobn & 0xFFFFFFFF00000000ULL) { + hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n", + liobn); + return NULL; + } + + QLIST_FOREACH(tcet, &spapr_tce_tables, list) { + if (tcet->liobn == (uint32_t)liobn) { + return tcet; + } + } + + return NULL; +} + +static IOMMUAccessFlags spapr_tce_iommu_access_flags(uint64_t tce) +{ + switch (tce & SPAPR_TCE_RW) { + case SPAPR_TCE_FAULT: + return IOMMU_NONE; + case SPAPR_TCE_RO: + return IOMMU_RO; + case SPAPR_TCE_WO: + return IOMMU_WO; + default: /* SPAPR_TCE_RW */ + return IOMMU_RW; + } +} + +static uint64_t *spapr_tce_alloc_table(uint32_t liobn, + uint32_t page_shift, + uint64_t bus_offset, + uint32_t nb_table, + int *fd, + bool need_vfio) +{ + uint64_t *table = NULL; + + if (kvm_enabled()) { + table = kvmppc_create_spapr_tce(liobn, page_shift, bus_offset, nb_table, + fd, need_vfio); + } + + if (!table) { + *fd = -1; + table = g_new0(uint64_t, nb_table); + } + + trace_spapr_iommu_new_table(liobn, table, *fd); + + return table; +} + +static void spapr_tce_free_table(uint64_t *table, int fd, uint32_t nb_table) +{ + if (!kvm_enabled() || + (kvmppc_remove_spapr_tce(table, fd, nb_table) != 0)) { + g_free(table); + } +} + +/* Called from RCU critical section */ +static IOMMUTLBEntry spapr_tce_translate_iommu(IOMMUMemoryRegion *iommu, + hwaddr addr, + IOMMUAccessFlags flag, + int iommu_idx) +{ + SpaprTceTable *tcet = container_of(iommu, SpaprTceTable, iommu); + uint64_t tce; + IOMMUTLBEntry ret = { + .target_as = &address_space_memory, + .iova = 0, + .translated_addr = 0, + .addr_mask = ~(hwaddr)0, + .perm = IOMMU_NONE, + }; + + if ((addr >> tcet->page_shift) < tcet->nb_table) { + /* Check if we are in bound */ + hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); + + tce = tcet->table[addr >> tcet->page_shift]; + ret.iova = addr & page_mask; + ret.translated_addr = tce & page_mask; + ret.addr_mask = ~page_mask; + ret.perm = spapr_tce_iommu_access_flags(tce); + } + trace_spapr_iommu_xlate(tcet->liobn, addr, ret.translated_addr, ret.perm, + ret.addr_mask); + + return ret; +} + +static void spapr_tce_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) +{ + MemoryRegion *mr = MEMORY_REGION(iommu_mr); + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); + hwaddr addr, granularity; + IOMMUTLBEntry iotlb; + SpaprTceTable *tcet = container_of(iommu_mr, SpaprTceTable, iommu); + + if (tcet->skipping_replay) { + return; + } + + granularity = memory_region_iommu_get_min_page_size(iommu_mr); + + for (addr = 0; addr < memory_region_size(mr); addr += granularity) { + iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx); + if (iotlb.perm != IOMMU_NONE) { + n->notify(n, &iotlb); + } + + /* + * if (2^64 - MR size) < granularity, it's possible to get an + * infinite loop here. This should catch such a wraparound. + */ + if ((addr + granularity) < addr) { + break; + } + } +} + +static int spapr_tce_table_pre_save(void *opaque) +{ + SpaprTceTable *tcet = SPAPR_TCE_TABLE(opaque); + + tcet->mig_table = tcet->table; + tcet->mig_nb_table = tcet->nb_table; + + trace_spapr_iommu_pre_save(tcet->liobn, tcet->mig_nb_table, + tcet->bus_offset, tcet->page_shift); + + return 0; +} + +static uint64_t spapr_tce_get_min_page_size(IOMMUMemoryRegion *iommu) +{ + SpaprTceTable *tcet = container_of(iommu, SpaprTceTable, iommu); + + return 1ULL << tcet->page_shift; +} + +static int spapr_tce_get_attr(IOMMUMemoryRegion *iommu, + enum IOMMUMemoryRegionAttr attr, void *data) +{ + SpaprTceTable *tcet = container_of(iommu, SpaprTceTable, iommu); + + if (attr == IOMMU_ATTR_SPAPR_TCE_FD && kvmppc_has_cap_spapr_vfio()) { + *(int *) data = tcet->fd; + return 0; + } + + return -EINVAL; +} + +static int spapr_tce_notify_flag_changed(IOMMUMemoryRegion *iommu, + IOMMUNotifierFlag old, + IOMMUNotifierFlag new, + Error **errp) +{ + struct SpaprTceTable *tbl = container_of(iommu, SpaprTceTable, iommu); + + if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) { + error_setg(errp, "spart_tce does not support dev-iotlb yet"); + return -EINVAL; + } + + if (old == IOMMU_NOTIFIER_NONE && new != IOMMU_NOTIFIER_NONE) { + spapr_tce_set_need_vfio(tbl, true); + } else if (old != IOMMU_NOTIFIER_NONE && new == IOMMU_NOTIFIER_NONE) { + spapr_tce_set_need_vfio(tbl, false); + } + return 0; +} + +static int spapr_tce_table_post_load(void *opaque, int version_id) +{ + SpaprTceTable *tcet = SPAPR_TCE_TABLE(opaque); + uint32_t old_nb_table = tcet->nb_table; + uint64_t old_bus_offset = tcet->bus_offset; + uint32_t old_page_shift = tcet->page_shift; + + if (tcet->vdev) { + spapr_vio_set_bypass(tcet->vdev, tcet->bypass); + } + + if (tcet->mig_nb_table != tcet->nb_table) { + spapr_tce_table_disable(tcet); + } + + if (tcet->mig_nb_table) { + if (!tcet->nb_table) { + spapr_tce_table_enable(tcet, old_page_shift, old_bus_offset, + tcet->mig_nb_table); + } + + memcpy(tcet->table, tcet->mig_table, + tcet->nb_table * sizeof(tcet->table[0])); + + free(tcet->mig_table); + tcet->mig_table = NULL; + } + + trace_spapr_iommu_post_load(tcet->liobn, old_nb_table, tcet->nb_table, + tcet->bus_offset, tcet->page_shift); + + return 0; +} + +static bool spapr_tce_table_ex_needed(void *opaque) +{ + SpaprTceTable *tcet = opaque; + + return tcet->bus_offset || tcet->page_shift != 0xC; +} + +static const VMStateDescription vmstate_spapr_tce_table_ex = { + .name = "spapr_iommu_ex", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_tce_table_ex_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(bus_offset, SpaprTceTable), + VMSTATE_UINT32(page_shift, SpaprTceTable), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_spapr_tce_table = { + .name = "spapr_iommu", + .version_id = 2, + .minimum_version_id = 2, + .pre_save = spapr_tce_table_pre_save, + .post_load = spapr_tce_table_post_load, + .fields = (VMStateField []) { + /* Sanity check */ + VMSTATE_UINT32_EQUAL(liobn, SpaprTceTable, NULL), + + /* IOMMU state */ + VMSTATE_UINT32(mig_nb_table, SpaprTceTable), + VMSTATE_BOOL(bypass, SpaprTceTable), + VMSTATE_VARRAY_UINT32_ALLOC(mig_table, SpaprTceTable, mig_nb_table, 0, + vmstate_info_uint64, uint64_t), + + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_spapr_tce_table_ex, + NULL + } +}; + +static void spapr_tce_table_realize(DeviceState *dev, Error **errp) +{ + SpaprTceTable *tcet = SPAPR_TCE_TABLE(dev); + Object *tcetobj = OBJECT(tcet); + gchar *tmp; + + tcet->fd = -1; + tcet->need_vfio = false; + tmp = g_strdup_printf("tce-root-%x", tcet->liobn); + memory_region_init(&tcet->root, tcetobj, tmp, UINT64_MAX); + g_free(tmp); + + tmp = g_strdup_printf("tce-iommu-%x", tcet->liobn); + memory_region_init_iommu(&tcet->iommu, sizeof(tcet->iommu), + TYPE_SPAPR_IOMMU_MEMORY_REGION, + tcetobj, tmp, 0); + g_free(tmp); + + QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); + + vmstate_register(VMSTATE_IF(tcet), tcet->liobn, &vmstate_spapr_tce_table, + tcet); +} + +void spapr_tce_set_need_vfio(SpaprTceTable *tcet, bool need_vfio) +{ + size_t table_size = tcet->nb_table * sizeof(uint64_t); + uint64_t *oldtable; + int newfd = -1; + + g_assert(need_vfio != tcet->need_vfio); + + tcet->need_vfio = need_vfio; + + if (!need_vfio || (tcet->fd != -1 && kvmppc_has_cap_spapr_vfio())) { + return; + } + + oldtable = tcet->table; + + tcet->table = spapr_tce_alloc_table(tcet->liobn, + tcet->page_shift, + tcet->bus_offset, + tcet->nb_table, + &newfd, + need_vfio); + memcpy(tcet->table, oldtable, table_size); + + spapr_tce_free_table(oldtable, tcet->fd, tcet->nb_table); + + tcet->fd = newfd; +} + +SpaprTceTable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn) +{ + SpaprTceTable *tcet; + gchar *tmp; + + if (spapr_tce_find_by_liobn(liobn)) { + error_report("Attempted to create TCE table with duplicate" + " LIOBN 0x%x", liobn); + return NULL; + } + + tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE)); + tcet->liobn = liobn; + + tmp = g_strdup_printf("tce-table-%x", liobn); + object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet)); + g_free(tmp); + object_unref(OBJECT(tcet)); + + qdev_realize(DEVICE(tcet), NULL, NULL); + + return tcet; +} + +void spapr_tce_table_enable(SpaprTceTable *tcet, + uint32_t page_shift, uint64_t bus_offset, + uint32_t nb_table) +{ + if (tcet->nb_table) { + warn_report("trying to enable already enabled TCE table"); + return; + } + + tcet->bus_offset = bus_offset; + tcet->page_shift = page_shift; + tcet->nb_table = nb_table; + tcet->table = spapr_tce_alloc_table(tcet->liobn, + tcet->page_shift, + tcet->bus_offset, + tcet->nb_table, + &tcet->fd, + tcet->need_vfio); + + memory_region_set_size(MEMORY_REGION(&tcet->iommu), + (uint64_t)tcet->nb_table << tcet->page_shift); + memory_region_add_subregion(&tcet->root, tcet->bus_offset, + MEMORY_REGION(&tcet->iommu)); +} + +void spapr_tce_table_disable(SpaprTceTable *tcet) +{ + if (!tcet->nb_table) { + return; + } + + memory_region_del_subregion(&tcet->root, MEMORY_REGION(&tcet->iommu)); + memory_region_set_size(MEMORY_REGION(&tcet->iommu), 0); + + spapr_tce_free_table(tcet->table, tcet->fd, tcet->nb_table); + tcet->fd = -1; + tcet->table = NULL; + tcet->bus_offset = 0; + tcet->page_shift = 0; + tcet->nb_table = 0; +} + +static void spapr_tce_table_unrealize(DeviceState *dev) +{ + SpaprTceTable *tcet = SPAPR_TCE_TABLE(dev); + + vmstate_unregister(VMSTATE_IF(tcet), &vmstate_spapr_tce_table, tcet); + + QLIST_REMOVE(tcet, list); + + spapr_tce_table_disable(tcet); +} + +MemoryRegion *spapr_tce_get_iommu(SpaprTceTable *tcet) +{ + return &tcet->root; +} + +static void spapr_tce_reset(DeviceState *dev) +{ + SpaprTceTable *tcet = SPAPR_TCE_TABLE(dev); + size_t table_size = tcet->nb_table * sizeof(uint64_t); + + if (tcet->nb_table) { + memset(tcet->table, 0, table_size); + } +} + +static target_ulong put_tce_emu(SpaprTceTable *tcet, target_ulong ioba, + target_ulong tce) +{ + IOMMUTLBEvent event; + hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); + unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; + + if (index >= tcet->nb_table) { + hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x" + TARGET_FMT_lx "\n", ioba); + return H_PARAMETER; + } + + tcet->table[index] = tce; + + event.entry.target_as = &address_space_memory, + event.entry.iova = (ioba - tcet->bus_offset) & page_mask; + event.entry.translated_addr = tce & page_mask; + event.entry.addr_mask = ~page_mask; + event.entry.perm = spapr_tce_iommu_access_flags(tce); + event.type = event.entry.perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP; + memory_region_notify_iommu(&tcet->iommu, 0, event); + + return H_SUCCESS; +} + +static target_ulong h_put_tce_indirect(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + int i; + target_ulong liobn = args[0]; + target_ulong ioba = args[1]; + target_ulong ioba1 = ioba; + target_ulong tce_list = args[2]; + target_ulong npages = args[3]; + target_ulong ret = H_PARAMETER, tce = 0; + SpaprTceTable *tcet = spapr_tce_find_by_liobn(liobn); + CPUState *cs = CPU(cpu); + hwaddr page_mask, page_size; + + if (!tcet) { + return H_PARAMETER; + } + + if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) { + return H_PARAMETER; + } + + page_mask = IOMMU_PAGE_MASK(tcet->page_shift); + page_size = IOMMU_PAGE_SIZE(tcet->page_shift); + ioba &= page_mask; + + for (i = 0; i < npages; ++i, ioba += page_size) { + tce = ldq_be_phys(cs->as, tce_list + i * sizeof(target_ulong)); + + ret = put_tce_emu(tcet, ioba, tce); + if (ret) { + break; + } + } + + /* Trace last successful or the first problematic entry */ + i = i ? (i - 1) : 0; + if (SPAPR_IS_PCI_LIOBN(liobn)) { + trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret); + } else { + trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret); + } + return ret; +} + +static target_ulong h_stuff_tce(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + int i; + target_ulong liobn = args[0]; + target_ulong ioba = args[1]; + target_ulong tce_value = args[2]; + target_ulong npages = args[3]; + target_ulong ret = H_PARAMETER; + SpaprTceTable *tcet = spapr_tce_find_by_liobn(liobn); + hwaddr page_mask, page_size; + + if (!tcet) { + return H_PARAMETER; + } + + if (npages > tcet->nb_table) { + return H_PARAMETER; + } + + page_mask = IOMMU_PAGE_MASK(tcet->page_shift); + page_size = IOMMU_PAGE_SIZE(tcet->page_shift); + ioba &= page_mask; + + for (i = 0; i < npages; ++i, ioba += page_size) { + ret = put_tce_emu(tcet, ioba, tce_value); + if (ret) { + break; + } + } + if (SPAPR_IS_PCI_LIOBN(liobn)) { + trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret); + } else { + trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret); + } + + return ret; +} + +static target_ulong h_put_tce(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong liobn = args[0]; + target_ulong ioba = args[1]; + target_ulong tce = args[2]; + target_ulong ret = H_PARAMETER; + SpaprTceTable *tcet = spapr_tce_find_by_liobn(liobn); + + if (tcet) { + hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); + + ioba &= page_mask; + + ret = put_tce_emu(tcet, ioba, tce); + } + if (SPAPR_IS_PCI_LIOBN(liobn)) { + trace_spapr_iommu_pci_put(liobn, ioba, tce, ret); + } else { + trace_spapr_iommu_put(liobn, ioba, tce, ret); + } + + return ret; +} + +static target_ulong get_tce_emu(SpaprTceTable *tcet, target_ulong ioba, + target_ulong *tce) +{ + unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; + + if (index >= tcet->nb_table) { + hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x" + TARGET_FMT_lx "\n", ioba); + return H_PARAMETER; + } + + *tce = tcet->table[index]; + + return H_SUCCESS; +} + +static target_ulong h_get_tce(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong liobn = args[0]; + target_ulong ioba = args[1]; + target_ulong tce = 0; + target_ulong ret = H_PARAMETER; + SpaprTceTable *tcet = spapr_tce_find_by_liobn(liobn); + + if (tcet) { + hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); + + ioba &= page_mask; + + ret = get_tce_emu(tcet, ioba, &tce); + if (!ret) { + args[0] = tce; + } + } + if (SPAPR_IS_PCI_LIOBN(liobn)) { + trace_spapr_iommu_pci_get(liobn, ioba, ret, tce); + } else { + trace_spapr_iommu_get(liobn, ioba, ret, tce); + } + + return ret; +} + +int spapr_dma_dt(void *fdt, int node_off, const char *propname, + uint32_t liobn, uint64_t window, uint32_t size) +{ + uint32_t dma_prop[5]; + int ret; + + dma_prop[0] = cpu_to_be32(liobn); + dma_prop[1] = cpu_to_be32(window >> 32); + dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF); + dma_prop[3] = 0; /* window size is 32 bits */ + dma_prop[4] = cpu_to_be32(size); + + ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2); + if (ret < 0) { + return ret; + } + + ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2); + if (ret < 0) { + return ret; + } + + ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop)); + if (ret < 0) { + return ret; + } + + return 0; +} + +int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname, + SpaprTceTable *tcet) +{ + if (!tcet) { + return 0; + } + + return spapr_dma_dt(fdt, node_off, propname, + tcet->liobn, 0, tcet->nb_table << tcet->page_shift); +} + +static void spapr_tce_table_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = spapr_tce_table_realize; + dc->reset = spapr_tce_reset; + dc->unrealize = spapr_tce_table_unrealize; + /* Reason: This is just an internal device for handling the hypercalls */ + dc->user_creatable = false; + + QLIST_INIT(&spapr_tce_tables); + + /* hcall-tce */ + spapr_register_hypercall(H_PUT_TCE, h_put_tce); + spapr_register_hypercall(H_GET_TCE, h_get_tce); + spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect); + spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce); +} + +static TypeInfo spapr_tce_table_info = { + .name = TYPE_SPAPR_TCE_TABLE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SpaprTceTable), + .class_init = spapr_tce_table_class_init, +}; + +static void spapr_iommu_memory_region_class_init(ObjectClass *klass, void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = spapr_tce_translate_iommu; + imrc->replay = spapr_tce_replay; + imrc->get_min_page_size = spapr_tce_get_min_page_size; + imrc->notify_flag_changed = spapr_tce_notify_flag_changed; + imrc->get_attr = spapr_tce_get_attr; +} + +static const TypeInfo spapr_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_SPAPR_IOMMU_MEMORY_REGION, + .class_init = spapr_iommu_memory_region_class_init, +}; + +static void register_types(void) +{ + type_register_static(&spapr_tce_table_info); + type_register_static(&spapr_iommu_memory_region_info); +} + +type_init(register_types); diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c new file mode 100644 index 000000000..a0d1e1298 --- /dev/null +++ b/hw/ppc/spapr_irq.c @@ -0,0 +1,599 @@ +/* + * QEMU PowerPC sPAPR IRQ interface + * + * Copyright (c) 2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_cpu_core.h" +#include "hw/ppc/spapr_xive.h" +#include "hw/ppc/xics.h" +#include "hw/ppc/xics_spapr.h" +#include "hw/qdev-properties.h" +#include "cpu-models.h" +#include "sysemu/kvm.h" + +#include "trace.h" + +static const TypeInfo spapr_intc_info = { + .name = TYPE_SPAPR_INTC, + .parent = TYPE_INTERFACE, + .class_size = sizeof(SpaprInterruptControllerClass), +}; + +static void spapr_irq_msi_init(SpaprMachineState *spapr) +{ + if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { + /* Legacy mode doesn't use this allocator */ + return; + } + + spapr->irq_map_nr = spapr_irq_nr_msis(spapr); + spapr->irq_map = bitmap_new(spapr->irq_map_nr); +} + +int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align, + Error **errp) +{ + int irq; + + /* + * The 'align_mask' parameter of bitmap_find_next_zero_area() + * should be one less than a power of 2; 0 means no + * alignment. Adapt the 'align' value of the former allocator + * to fit the requirements of bitmap_find_next_zero_area() + */ + align -= 1; + + irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num, + align); + if (irq == spapr->irq_map_nr) { + error_setg(errp, "can't find a free %d-IRQ block", num); + return -1; + } + + bitmap_set(spapr->irq_map, irq, num); + + return irq + SPAPR_IRQ_MSI; +} + +void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num) +{ + bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num); +} + +int spapr_irq_init_kvm(SpaprInterruptControllerInitKvm fn, + SpaprInterruptController *intc, + uint32_t nr_servers, + Error **errp) +{ + Error *local_err = NULL; + + if (kvm_enabled() && kvm_kernel_irqchip_allowed()) { + if (fn(intc, nr_servers, &local_err) < 0) { + if (kvm_kernel_irqchip_required()) { + error_prepend(&local_err, + "kernel_irqchip requested but unavailable: "); + error_propagate(errp, local_err); + return -1; + } + + /* + * We failed to initialize the KVM device, fallback to + * emulated mode + */ + error_prepend(&local_err, + "kernel_irqchip allowed but unavailable: "); + error_append_hint(&local_err, + "Falling back to kernel-irqchip=off\n"); + warn_report_err(local_err); + } + } + + return 0; +} + +/* + * XICS IRQ backend. + */ + +SpaprIrq spapr_irq_xics = { + .xics = true, + .xive = false, +}; + +/* + * XIVE IRQ backend. + */ + +SpaprIrq spapr_irq_xive = { + .xics = false, + .xive = true, +}; + +/* + * Dual XIVE and XICS IRQ backend. + * + * Both interrupt mode, XIVE and XICS, objects are created but the + * machine starts in legacy interrupt mode (XICS). It can be changed + * by the CAS negotiation process and, in that case, the new mode is + * activated after an extra machine reset. + */ + +/* + * Define values in sync with the XIVE and XICS backend + */ +SpaprIrq spapr_irq_dual = { + .xics = true, + .xive = true, +}; + + +static int spapr_irq_check(SpaprMachineState *spapr, Error **errp) +{ + ERRP_GUARD(); + MachineState *machine = MACHINE(spapr); + + /* + * Sanity checks on non-P9 machines. On these, XIVE is not + * advertised, see spapr_dt_ov5_platform_support() + */ + if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, + 0, spapr->max_compat_pvr)) { + /* + * If the 'dual' interrupt mode is selected, force XICS as CAS + * negotiation is useless. + */ + if (spapr->irq == &spapr_irq_dual) { + spapr->irq = &spapr_irq_xics; + return 0; + } + + /* + * Non-P9 machines using only XIVE is a bogus setup. We have two + * scenarios to take into account because of the compat mode: + * + * 1. POWER7/8 machines should fail to init later on when creating + * the XIVE interrupt presenters because a POWER9 exception + * model is required. + + * 2. POWER9 machines using the POWER8 compat mode won't fail and + * will let the OS boot with a partial XIVE setup : DT + * properties but no hcalls. + * + * To cover both and not confuse the OS, add an early failure in + * QEMU. + */ + if (!spapr->irq->xics) { + error_setg(errp, "XIVE-only machines require a POWER9 CPU"); + return -1; + } + } + + /* + * On a POWER9 host, some older KVM XICS devices cannot be destroyed and + * re-created. Same happens with KVM nested guests. Detect that early to + * avoid QEMU to exit later when the guest reboots. + */ + if (kvm_enabled() && + spapr->irq == &spapr_irq_dual && + kvm_kernel_irqchip_required() && + xics_kvm_has_broken_disconnect()) { + error_setg(errp, + "KVM is incompatible with ic-mode=dual,kernel-irqchip=on"); + error_append_hint(errp, + "This can happen with an old KVM or in a KVM nested guest.\n"); + error_append_hint(errp, + "Try without kernel-irqchip or with kernel-irqchip=off.\n"); + return -1; + } + + return 0; +} + +/* + * sPAPR IRQ frontend routines for devices + */ +#define ALL_INTCS(spapr_) \ + { SPAPR_INTC((spapr_)->ics), SPAPR_INTC((spapr_)->xive), } + +int spapr_irq_cpu_intc_create(SpaprMachineState *spapr, + PowerPCCPU *cpu, Error **errp) +{ + SpaprInterruptController *intcs[] = ALL_INTCS(spapr); + int i; + int rc; + + for (i = 0; i < ARRAY_SIZE(intcs); i++) { + SpaprInterruptController *intc = intcs[i]; + if (intc) { + SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc); + rc = sicc->cpu_intc_create(intc, cpu, errp); + if (rc < 0) { + return rc; + } + } + } + + return 0; +} + +void spapr_irq_cpu_intc_reset(SpaprMachineState *spapr, PowerPCCPU *cpu) +{ + SpaprInterruptController *intcs[] = ALL_INTCS(spapr); + int i; + + for (i = 0; i < ARRAY_SIZE(intcs); i++) { + SpaprInterruptController *intc = intcs[i]; + if (intc) { + SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc); + sicc->cpu_intc_reset(intc, cpu); + } + } +} + +void spapr_irq_cpu_intc_destroy(SpaprMachineState *spapr, PowerPCCPU *cpu) +{ + SpaprInterruptController *intcs[] = ALL_INTCS(spapr); + int i; + + for (i = 0; i < ARRAY_SIZE(intcs); i++) { + SpaprInterruptController *intc = intcs[i]; + if (intc) { + SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc); + sicc->cpu_intc_destroy(intc, cpu); + } + } +} + +static void spapr_set_irq(void *opaque, int irq, int level) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(opaque); + SpaprInterruptControllerClass *sicc + = SPAPR_INTC_GET_CLASS(spapr->active_intc); + + sicc->set_irq(spapr->active_intc, irq, level); +} + +void spapr_irq_print_info(SpaprMachineState *spapr, Monitor *mon) +{ + SpaprInterruptControllerClass *sicc + = SPAPR_INTC_GET_CLASS(spapr->active_intc); + + sicc->print_info(spapr->active_intc, mon); +} + +void spapr_irq_dt(SpaprMachineState *spapr, uint32_t nr_servers, + void *fdt, uint32_t phandle) +{ + SpaprInterruptControllerClass *sicc + = SPAPR_INTC_GET_CLASS(spapr->active_intc); + + sicc->dt(spapr->active_intc, nr_servers, fdt, phandle); +} + +uint32_t spapr_irq_nr_msis(SpaprMachineState *spapr) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + + if (smc->legacy_irq_allocation) { + return smc->nr_xirqs; + } else { + return SPAPR_XIRQ_BASE + smc->nr_xirqs - SPAPR_IRQ_MSI; + } +} + +void spapr_irq_init(SpaprMachineState *spapr, Error **errp) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + + if (kvm_enabled() && kvm_kernel_irqchip_split()) { + error_setg(errp, "kernel_irqchip split mode not supported on pseries"); + return; + } + + if (spapr_irq_check(spapr, errp) < 0) { + return; + } + + /* Initialize the MSI IRQ allocator. */ + spapr_irq_msi_init(spapr); + + if (spapr->irq->xics) { + Object *obj; + + obj = object_new(TYPE_ICS_SPAPR); + + object_property_add_child(OBJECT(spapr), "ics", obj); + object_property_set_link(obj, ICS_PROP_XICS, OBJECT(spapr), + &error_abort); + object_property_set_int(obj, "nr-irqs", smc->nr_xirqs, &error_abort); + if (!qdev_realize(DEVICE(obj), NULL, errp)) { + return; + } + + spapr->ics = ICS_SPAPR(obj); + } + + if (spapr->irq->xive) { + uint32_t nr_servers = spapr_max_server_number(spapr); + DeviceState *dev; + int i; + + dev = qdev_new(TYPE_SPAPR_XIVE); + qdev_prop_set_uint32(dev, "nr-irqs", smc->nr_xirqs + SPAPR_XIRQ_BASE); + /* + * 8 XIVE END structures per CPU. One for each available + * priority + */ + qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3); + object_property_set_link(OBJECT(dev), "xive-fabric", OBJECT(spapr), + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + spapr->xive = SPAPR_XIVE(dev); + + /* Enable the CPU IPIs */ + for (i = 0; i < nr_servers; ++i) { + SpaprInterruptControllerClass *sicc + = SPAPR_INTC_GET_CLASS(spapr->xive); + + if (sicc->claim_irq(SPAPR_INTC(spapr->xive), SPAPR_IRQ_IPI + i, + false, errp) < 0) { + return; + } + } + + spapr_xive_hcall_init(spapr); + } + + spapr->qirqs = qemu_allocate_irqs(spapr_set_irq, spapr, + smc->nr_xirqs + SPAPR_XIRQ_BASE); + + /* + * Mostly we don't actually need this until reset, except that not + * having this set up can cause VFIO devices to issue a + * false-positive warning during realize(), because they don't yet + * have an in-kernel irq chip. + */ + spapr_irq_update_active_intc(spapr); +} + +int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp) +{ + SpaprInterruptController *intcs[] = ALL_INTCS(spapr); + int i; + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + int rc; + + assert(irq >= SPAPR_XIRQ_BASE); + assert(irq < (smc->nr_xirqs + SPAPR_XIRQ_BASE)); + + for (i = 0; i < ARRAY_SIZE(intcs); i++) { + SpaprInterruptController *intc = intcs[i]; + if (intc) { + SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc); + rc = sicc->claim_irq(intc, irq, lsi, errp); + if (rc < 0) { + return rc; + } + } + } + + return 0; +} + +void spapr_irq_free(SpaprMachineState *spapr, int irq, int num) +{ + SpaprInterruptController *intcs[] = ALL_INTCS(spapr); + int i, j; + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + + assert(irq >= SPAPR_XIRQ_BASE); + assert((irq + num) <= (smc->nr_xirqs + SPAPR_XIRQ_BASE)); + + for (i = irq; i < (irq + num); i++) { + for (j = 0; j < ARRAY_SIZE(intcs); j++) { + SpaprInterruptController *intc = intcs[j]; + + if (intc) { + SpaprInterruptControllerClass *sicc + = SPAPR_INTC_GET_CLASS(intc); + sicc->free_irq(intc, i); + } + } + } +} + +qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + + /* + * This interface is basically for VIO and PHB devices to find the + * right qemu_irq to manipulate, so we only allow access to the + * external irqs for now. Currently anything which needs to + * access the IPIs most naturally gets there via the guest side + * interfaces, we can change this if we need to in future. + */ + assert(irq >= SPAPR_XIRQ_BASE); + assert(irq < (smc->nr_xirqs + SPAPR_XIRQ_BASE)); + + if (spapr->ics) { + assert(ics_valid_irq(spapr->ics, irq)); + } + if (spapr->xive) { + assert(irq < spapr->xive->nr_irqs); + assert(xive_eas_is_valid(&spapr->xive->eat[irq])); + } + + return spapr->qirqs[irq]; +} + +int spapr_irq_post_load(SpaprMachineState *spapr, int version_id) +{ + SpaprInterruptControllerClass *sicc; + + spapr_irq_update_active_intc(spapr); + sicc = SPAPR_INTC_GET_CLASS(spapr->active_intc); + return sicc->post_load(spapr->active_intc, version_id); +} + +void spapr_irq_reset(SpaprMachineState *spapr, Error **errp) +{ + assert(!spapr->irq_map || bitmap_empty(spapr->irq_map, spapr->irq_map_nr)); + + spapr_irq_update_active_intc(spapr); +} + +int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp) +{ + const char *nodename = "interrupt-controller"; + int offset, phandle; + + offset = fdt_subnode_offset(fdt, 0, nodename); + if (offset < 0) { + error_setg(errp, "Can't find node \"%s\": %s", + nodename, fdt_strerror(offset)); + return -1; + } + + phandle = fdt_get_phandle(fdt, offset); + if (!phandle) { + error_setg(errp, "Can't get phandle of node \"%s\"", nodename); + return -1; + } + + return phandle; +} + +static void set_active_intc(SpaprMachineState *spapr, + SpaprInterruptController *new_intc) +{ + SpaprInterruptControllerClass *sicc; + uint32_t nr_servers = spapr_max_server_number(spapr); + + assert(new_intc); + + if (new_intc == spapr->active_intc) { + /* Nothing to do */ + return; + } + + if (spapr->active_intc) { + sicc = SPAPR_INTC_GET_CLASS(spapr->active_intc); + if (sicc->deactivate) { + sicc->deactivate(spapr->active_intc); + } + } + + sicc = SPAPR_INTC_GET_CLASS(new_intc); + if (sicc->activate) { + sicc->activate(new_intc, nr_servers, &error_fatal); + } + + spapr->active_intc = new_intc; + + /* + * We've changed the kernel irqchip, let VFIO devices know they + * need to readjust. + */ + kvm_irqchip_change_notify(); +} + +void spapr_irq_update_active_intc(SpaprMachineState *spapr) +{ + SpaprInterruptController *new_intc; + + if (!spapr->ics) { + /* + * XXX before we run CAS, ov5_cas is initialized empty, which + * indicates XICS, even if we have ic-mode=xive. TODO: clean + * up the CAS path so that we have a clearer way of handling + * this. + */ + new_intc = SPAPR_INTC(spapr->xive); + } else if (spapr->ov5_cas + && spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + new_intc = SPAPR_INTC(spapr->xive); + } else { + new_intc = SPAPR_INTC(spapr->ics); + } + + set_active_intc(spapr, new_intc); +} + +/* + * XICS legacy routines - to deprecate one day + */ + +static int ics_find_free_block(ICSState *ics, int num, int alignnum) +{ + int first, i; + + for (first = 0; first < ics->nr_irqs; first += alignnum) { + if (num > (ics->nr_irqs - first)) { + return -1; + } + for (i = first; i < first + num; ++i) { + if (!ics_irq_free(ics, i)) { + break; + } + } + if (i == (first + num)) { + return first; + } + } + + return -1; +} + +int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp) +{ + ICSState *ics = spapr->ics; + int first = -1; + + assert(ics); + + /* + * MSIMesage::data is used for storing VIRQ so + * it has to be aligned to num to support multiple + * MSI vectors. MSI-X is not affected by this. + * The hint is used for the first IRQ, the rest should + * be allocated continuously. + */ + if (align) { + assert((num == 1) || (num == 2) || (num == 4) || + (num == 8) || (num == 16) || (num == 32)); + first = ics_find_free_block(ics, num, num); + } else { + first = ics_find_free_block(ics, num, 1); + } + + if (first < 0) { + error_setg(errp, "can't find a free %d-IRQ block", num); + return -1; + } + + return first + ics->offset; +} + +SpaprIrq spapr_irq_xics_legacy = { + .xics = true, + .xive = false, +}; + +static void spapr_irq_register_types(void) +{ + type_register_static(&spapr_intc_info); +} + +type_init(spapr_irq_register_types) diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c new file mode 100644 index 000000000..e9ef7e764 --- /dev/null +++ b/hw/ppc/spapr_numa.c @@ -0,0 +1,697 @@ +/* + * QEMU PowerPC pSeries Logical Partition NUMA associativity handling + * + * Copyright IBM Corp. 2020 + * + * Authors: + * Daniel Henrique Barboza + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/ppc/spapr_numa.h" +#include "hw/pci-host/spapr.h" +#include "hw/ppc/fdt.h" + +/* Moved from hw/ppc/spapr_pci_nvlink2.c */ +#define SPAPR_GPU_NUMA_ID (cpu_to_be32(1)) + +/* + * Retrieves max_dist_ref_points of the current NUMA affinity. + */ +static int get_max_dist_ref_points(SpaprMachineState *spapr) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return FORM2_DIST_REF_POINTS; + } + + return FORM1_DIST_REF_POINTS; +} + +/* + * Retrieves numa_assoc_size of the current NUMA affinity. + */ +static int get_numa_assoc_size(SpaprMachineState *spapr) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return FORM2_NUMA_ASSOC_SIZE; + } + + return FORM1_NUMA_ASSOC_SIZE; +} + +/* + * Retrieves vcpu_assoc_size of the current NUMA affinity. + * + * vcpu_assoc_size is the size of ibm,associativity array + * for CPUs, which has an extra element (vcpu_id) in the end. + */ +static int get_vcpu_assoc_size(SpaprMachineState *spapr) +{ + return get_numa_assoc_size(spapr) + 1; +} + +/* + * Retrieves the ibm,associativity array of NUMA node 'node_id' + * for the current NUMA affinity. + */ +static const uint32_t *get_associativity(SpaprMachineState *spapr, int node_id) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return spapr->FORM2_assoc_array[node_id]; + } + return spapr->FORM1_assoc_array[node_id]; +} + +/* + * Wrapper that returns node distance from ms->numa_state->nodes + * after handling edge cases where the distance might be absent. + */ +static int get_numa_distance(MachineState *ms, int src, int dst) +{ + NodeInfo *numa_info = ms->numa_state->nodes; + int ret = numa_info[src].distance[dst]; + + if (ret != 0) { + return ret; + } + + /* + * In case QEMU adds a default NUMA single node when the user + * did not add any, or where the user did not supply distances, + * the distance will be absent (zero). Return local/remote + * distance in this case. + */ + if (src == dst) { + return NUMA_DISTANCE_MIN; + } + + return NUMA_DISTANCE_DEFAULT; +} + +static bool spapr_numa_is_symmetrical(MachineState *ms) +{ + int nb_numa_nodes = ms->numa_state->num_nodes; + int src, dst; + + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = src; dst < nb_numa_nodes; dst++) { + if (get_numa_distance(ms, src, dst) != + get_numa_distance(ms, dst, src)) { + return false; + } + } + } + + return true; +} + +/* + * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. + * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is + * called from vPHB reset handler so we initialize the counter here. + * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM + * must be equally distant from any other node. + * The final value of spapr->gpu_numa_id is going to be written to + * max-associativity-domains in spapr_build_fdt(). + */ +unsigned int spapr_numa_initial_nvgpu_numa_id(MachineState *machine) +{ + return MAX(1, machine->numa_state->num_nodes); +} + +/* + * This function will translate the user distances into + * what the kernel understand as possible values: 10 + * (local distance), 20, 40, 80 and 160, and return the equivalent + * NUMA level for each. Current heuristic is: + * - local distance (10) returns numa_level = 0x4, meaning there is + * no rounding for local distance + * - distances between 11 and 30 inclusive -> rounded to 20, + * numa_level = 0x3 + * - distances between 31 and 60 inclusive -> rounded to 40, + * numa_level = 0x2 + * - distances between 61 and 120 inclusive -> rounded to 80, + * numa_level = 0x1 + * - everything above 120 returns numa_level = 0 to indicate that + * there is no match. This will be calculated as disntace = 160 + * by the kernel (as of v5.9) + */ +static uint8_t spapr_numa_get_numa_level(uint8_t distance) +{ + if (distance == 10) { + return 0x4; + } else if (distance > 11 && distance <= 30) { + return 0x3; + } else if (distance > 31 && distance <= 60) { + return 0x2; + } else if (distance > 61 && distance <= 120) { + return 0x1; + } + + return 0; +} + +static void spapr_numa_define_FORM1_domains(SpaprMachineState *spapr) +{ + MachineState *ms = MACHINE(spapr); + int nb_numa_nodes = ms->numa_state->num_nodes; + int src, dst, i, j; + + /* + * Fill all associativity domains of non-zero NUMA nodes with + * node_id. This is required because the default value (0) is + * considered a match with associativity domains of node 0. + */ + for (i = 1; i < nb_numa_nodes; i++) { + for (j = 1; j < FORM1_DIST_REF_POINTS; j++) { + spapr->FORM1_assoc_array[i][j] = cpu_to_be32(i); + } + } + + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = src; dst < nb_numa_nodes; dst++) { + /* + * This is how the associativity domain between A and B + * is calculated: + * + * - get the distance D between them + * - get the correspondent NUMA level 'n_level' for D + * - all associativity arrays were initialized with their own + * numa_ids, and we're calculating the distance in node_id + * ascending order, starting from node id 0 (the first node + * retrieved by numa_state). This will have a cascade effect in + * the algorithm because the associativity domains that node 0 + * defines will be carried over to other nodes, and node 1 + * associativities will be carried over after taking node 0 + * associativities into account, and so on. This happens because + * we'll assign assoc_src as the associativity domain of dst + * as well, for all NUMA levels beyond and including n_level. + * + * The PPC kernel expects the associativity domains of node 0 to + * be always 0, and this algorithm will grant that by default. + */ + uint8_t distance = get_numa_distance(ms, src, dst); + uint8_t n_level = spapr_numa_get_numa_level(distance); + uint32_t assoc_src; + + /* + * n_level = 0 means that the distance is greater than our last + * rounded value (120). In this case there is no NUMA level match + * between src and dst and we can skip the remaining of the loop. + * + * The Linux kernel will assume that the distance between src and + * dst, in this case of no match, is 10 (local distance) doubled + * for each NUMA it didn't match. We have FORM1_DIST_REF_POINTS + * levels (4), so this gives us 10*2*2*2*2 = 160. + * + * This logic can be seen in the Linux kernel source code, as of + * v5.9, in arch/powerpc/mm/numa.c, function __node_distance(). + */ + if (n_level == 0) { + continue; + } + + /* + * We must assign all assoc_src to dst, starting from n_level + * and going up to 0x1. + */ + for (i = n_level; i > 0; i--) { + assoc_src = spapr->FORM1_assoc_array[src][i]; + spapr->FORM1_assoc_array[dst][i] = assoc_src; + } + } + } + +} + +static void spapr_numa_FORM1_affinity_check(MachineState *machine) +{ + int i; + + /* + * Check we don't have a memory-less/cpu-less NUMA node + * Firmware relies on the existing memory/cpu topology to provide the + * NUMA topology to the kernel. + * And the linux kernel needs to know the NUMA topology at start + * to be able to hotplug CPUs later. + */ + if (machine->numa_state->num_nodes) { + for (i = 0; i < machine->numa_state->num_nodes; ++i) { + /* check for memory-less node */ + if (machine->numa_state->nodes[i].node_mem == 0) { + CPUState *cs; + int found = 0; + /* check for cpu-less node */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + if (cpu->node_id == i) { + found = 1; + break; + } + } + /* memory-less and cpu-less node */ + if (!found) { + error_report( +"Memory-less/cpu-less nodes are not supported with FORM1 NUMA (node %d)", i); + exit(EXIT_FAILURE); + } + } + } + } + + if (!spapr_numa_is_symmetrical(machine)) { + error_report( +"Asymmetrical NUMA topologies aren't supported in the pSeries machine using FORM1 NUMA"); + exit(EXIT_FAILURE); + } +} + +/* + * Set NUMA machine state data based on FORM1 affinity semantics. + */ +static void spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr, + MachineState *machine) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + int nb_numa_nodes = machine->numa_state->num_nodes; + int i, j, max_nodes_with_gpus; + + /* + * For all associativity arrays: first position is the size, + * position FORM1_DIST_REF_POINTS is always the numa_id, + * represented by the index 'i'. + * + * This will break on sparse NUMA setups, when/if QEMU starts + * to support it, because there will be no more guarantee that + * 'i' will be a valid node_id set by the user. + */ + for (i = 0; i < nb_numa_nodes; i++) { + spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS); + spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i); + } + + /* + * Initialize NVLink GPU associativity arrays. We know that + * the first GPU will take the first available NUMA id, and + * we'll have a maximum of NVGPU_MAX_NUM GPUs in the machine. + * At this point we're not sure if there are GPUs or not, but + * let's initialize the associativity arrays and allow NVLink + * GPUs to be handled like regular NUMA nodes later on. + */ + max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM; + + for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) { + spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS); + + for (j = 1; j < FORM1_DIST_REF_POINTS; j++) { + uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ? + SPAPR_GPU_NUMA_ID : cpu_to_be32(i); + spapr->FORM1_assoc_array[i][j] = gpu_assoc; + } + + spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i); + } + + /* + * Guests pseries-5.1 and older uses zeroed associativity domains, + * i.e. no domain definition based on NUMA distance input. + * + * Same thing with guests that have only one NUMA node. + */ + if (smc->pre_5_2_numa_associativity || + machine->numa_state->num_nodes <= 1) { + return; + } + + spapr_numa_define_FORM1_domains(spapr); +} + +/* + * Init NUMA FORM2 machine state data + */ +static void spapr_numa_FORM2_affinity_init(SpaprMachineState *spapr) +{ + int i; + + /* + * For all resources but CPUs, FORM2 associativity arrays will + * be a size 2 array with the following format: + * + * ibm,associativity = {1, numa_id} + * + * CPUs will write an additional 'vcpu_id' on top of the arrays + * being initialized here. 'numa_id' is represented by the + * index 'i' of the loop. + * + * Given that this initialization is also valid for GPU associativity + * arrays, handle everything in one single step by populating the + * arrays up to NUMA_NODES_MAX_NUM. + */ + for (i = 0; i < NUMA_NODES_MAX_NUM; i++) { + spapr->FORM2_assoc_array[i][0] = cpu_to_be32(1); + spapr->FORM2_assoc_array[i][1] = cpu_to_be32(i); + } +} + +void spapr_numa_associativity_init(SpaprMachineState *spapr, + MachineState *machine) +{ + spapr_numa_FORM1_affinity_init(spapr, machine); + spapr_numa_FORM2_affinity_init(spapr); +} + +void spapr_numa_associativity_check(SpaprMachineState *spapr) +{ + /* + * FORM2 does not have any restrictions we need to handle + * at CAS time, for now. + */ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return; + } + + spapr_numa_FORM1_affinity_check(MACHINE(spapr)); +} + +void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt, + int offset, int nodeid) +{ + const uint32_t *associativity = get_associativity(spapr, nodeid); + + _FDT((fdt_setprop(fdt, offset, "ibm,associativity", + associativity, + get_numa_assoc_size(spapr) * sizeof(uint32_t)))); +} + +static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr, + PowerPCCPU *cpu) +{ + const uint32_t *associativity = get_associativity(spapr, cpu->node_id); + int max_distance_ref_points = get_max_dist_ref_points(spapr); + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); + uint32_t *vcpu_assoc = g_new(uint32_t, vcpu_assoc_size); + int index = spapr_get_vcpu_id(cpu); + + /* + * VCPUs have an extra 'cpu_id' value in ibm,associativity + * compared to other resources. Increment the size at index + * 0, put cpu_id last, then copy the remaining associativity + * domains. + */ + vcpu_assoc[0] = cpu_to_be32(max_distance_ref_points + 1); + vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index); + memcpy(vcpu_assoc + 1, associativity + 1, + (vcpu_assoc_size - 2) * sizeof(uint32_t)); + + return vcpu_assoc; +} + +int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt, + int offset, PowerPCCPU *cpu) +{ + g_autofree uint32_t *vcpu_assoc = NULL; + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); + + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu); + + /* Advertise NUMA via ibm,associativity */ + return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc, + vcpu_assoc_size * sizeof(uint32_t)); +} + + +int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt, + int offset) +{ + MachineState *machine = MACHINE(spapr); + int max_distance_ref_points = get_max_dist_ref_points(spapr); + int nb_numa_nodes = machine->numa_state->num_nodes; + int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1; + uint32_t *int_buf, *cur_index, buf_len; + int ret, i; + + /* ibm,associativity-lookup-arrays */ + buf_len = (nr_nodes * max_distance_ref_points + 2) * sizeof(uint32_t); + cur_index = int_buf = g_malloc0(buf_len); + int_buf[0] = cpu_to_be32(nr_nodes); + /* Number of entries per associativity list */ + int_buf[1] = cpu_to_be32(max_distance_ref_points); + cur_index += 2; + for (i = 0; i < nr_nodes; i++) { + /* + * For the lookup-array we use the ibm,associativity array of the + * current NUMA affinity, without the first element (size). + */ + const uint32_t *associativity = get_associativity(spapr, i); + memcpy(cur_index, ++associativity, + sizeof(uint32_t) * max_distance_ref_points); + cur_index += max_distance_ref_points; + } + ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf, + (cur_index - int_buf) * sizeof(uint32_t)); + g_free(int_buf); + + return ret; +} + +static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr, + void *fdt, int rtas) +{ + MachineState *ms = MACHINE(spapr); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + uint32_t number_nvgpus_nodes = spapr->gpu_numa_id - + spapr_numa_initial_nvgpu_numa_id(ms); + uint32_t refpoints[] = { + cpu_to_be32(0x4), + cpu_to_be32(0x3), + cpu_to_be32(0x2), + cpu_to_be32(0x1), + }; + uint32_t nr_refpoints = ARRAY_SIZE(refpoints); + uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes; + uint32_t maxdomains[] = { + cpu_to_be32(4), + cpu_to_be32(maxdomain), + cpu_to_be32(maxdomain), + cpu_to_be32(maxdomain), + cpu_to_be32(maxdomain) + }; + + if (smc->pre_5_2_numa_associativity || + ms->numa_state->num_nodes <= 1) { + uint32_t legacy_refpoints[] = { + cpu_to_be32(0x4), + cpu_to_be32(0x4), + cpu_to_be32(0x2), + }; + uint32_t legacy_maxdomain = spapr->gpu_numa_id > 1 ? 1 : 0; + uint32_t legacy_maxdomains[] = { + cpu_to_be32(4), + cpu_to_be32(legacy_maxdomain), + cpu_to_be32(legacy_maxdomain), + cpu_to_be32(legacy_maxdomain), + cpu_to_be32(spapr->gpu_numa_id), + }; + + G_STATIC_ASSERT(sizeof(legacy_refpoints) <= sizeof(refpoints)); + G_STATIC_ASSERT(sizeof(legacy_maxdomains) <= sizeof(maxdomains)); + + nr_refpoints = 3; + + memcpy(refpoints, legacy_refpoints, sizeof(legacy_refpoints)); + memcpy(maxdomains, legacy_maxdomains, sizeof(legacy_maxdomains)); + + /* pseries-5.0 and older reference-points array is {0x4, 0x4} */ + if (smc->pre_5_1_assoc_refpoints) { + nr_refpoints = 2; + } + } + + _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points", + refpoints, nr_refpoints * sizeof(refpoints[0]))); + + _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains", + maxdomains, sizeof(maxdomains))); +} + +static void spapr_numa_FORM2_write_rtas_tables(SpaprMachineState *spapr, + void *fdt, int rtas) +{ + MachineState *ms = MACHINE(spapr); + int nb_numa_nodes = ms->numa_state->num_nodes; + int distance_table_entries = nb_numa_nodes * nb_numa_nodes; + g_autofree uint32_t *lookup_index_table = NULL; + g_autofree uint8_t *distance_table = NULL; + int src, dst, i, distance_table_size; + + /* + * ibm,numa-lookup-index-table: array with length and a + * list of NUMA ids present in the guest. + */ + lookup_index_table = g_new0(uint32_t, nb_numa_nodes + 1); + lookup_index_table[0] = cpu_to_be32(nb_numa_nodes); + + for (i = 0; i < nb_numa_nodes; i++) { + lookup_index_table[i + 1] = cpu_to_be32(i); + } + + _FDT(fdt_setprop(fdt, rtas, "ibm,numa-lookup-index-table", + lookup_index_table, + (nb_numa_nodes + 1) * sizeof(uint32_t))); + + /* + * ibm,numa-distance-table: contains all node distances. First + * element is the size of the table as uint32, followed up + * by all the uint8 distances from the first NUMA node, then all + * distances from the second NUMA node and so on. + * + * ibm,numa-lookup-index-table is used by guest to navigate this + * array because NUMA ids can be sparse (node 0 is the first, + * node 8 is the second ...). + */ + distance_table_size = distance_table_entries * sizeof(uint8_t) + + sizeof(uint32_t); + distance_table = g_new0(uint8_t, distance_table_size); + stl_be_p(distance_table, distance_table_entries); + + /* Skip the uint32_t array length at the start */ + i = sizeof(uint32_t); + + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = 0; dst < nb_numa_nodes; dst++) { + distance_table[i++] = get_numa_distance(ms, src, dst); + } + } + + _FDT(fdt_setprop(fdt, rtas, "ibm,numa-distance-table", + distance_table, distance_table_size)); +} + +/* + * This helper could be compressed in a single function with + * FORM1 logic since we're setting the same DT values, with the + * difference being a call to spapr_numa_FORM2_write_rtas_tables() + * in the end. The separation was made to avoid clogging FORM1 code + * which already has to deal with compat modes from previous + * QEMU machine types. + */ +static void spapr_numa_FORM2_write_rtas_dt(SpaprMachineState *spapr, + void *fdt, int rtas) +{ + MachineState *ms = MACHINE(spapr); + uint32_t number_nvgpus_nodes = spapr->gpu_numa_id - + spapr_numa_initial_nvgpu_numa_id(ms); + + /* + * In FORM2, ibm,associativity-reference-points will point to + * the element in the ibm,associativity array that contains the + * primary domain index (for FORM2, the first element). + * + * This value (in our case, the numa-id) is then used as an index + * to retrieve all other attributes of the node (distance, + * bandwidth, latency) via ibm,numa-lookup-index-table and other + * ibm,numa-*-table properties. + */ + uint32_t refpoints[] = { cpu_to_be32(1) }; + + uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes; + uint32_t maxdomains[] = { cpu_to_be32(1), cpu_to_be32(maxdomain) }; + + _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points", + refpoints, sizeof(refpoints))); + + _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains", + maxdomains, sizeof(maxdomains))); + + spapr_numa_FORM2_write_rtas_tables(spapr, fdt, rtas); +} + +/* + * Helper that writes ibm,associativity-reference-points and + * max-associativity-domains in the RTAS pointed by @rtas + * in the DT @fdt. + */ +void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + spapr_numa_FORM2_write_rtas_dt(spapr, fdt, rtas); + return; + } + + spapr_numa_FORM1_write_rtas_dt(spapr, fdt, rtas); +} + +static target_ulong h_home_node_associativity(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + g_autofree uint32_t *vcpu_assoc = NULL; + target_ulong flags = args[0]; + target_ulong procno = args[1]; + PowerPCCPU *tcpu; + int idx, assoc_idx; + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); + + /* only support procno from H_REGISTER_VPA */ + if (flags != 0x1) { + return H_FUNCTION; + } + + tcpu = spapr_find_cpu(procno); + if (tcpu == NULL) { + return H_P2; + } + + /* + * Given that we want to be flexible with the sizes and indexes, + * we must consider that there is a hard limit of how many + * associativities domain we can fit in R4 up to R9, which would be + * 12 associativity domains for vcpus. Assert and bail if that's + * not the case. + */ + g_assert((vcpu_assoc_size - 1) <= 12); + + vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, tcpu); + /* assoc_idx starts at 1 to skip associativity size */ + assoc_idx = 1; + +#define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \ + ((uint64_t)(b) & 0xffffffff)) + + for (idx = 0; idx < 6; idx++) { + int32_t a, b; + + /* + * vcpu_assoc[] will contain the associativity domains for tcpu, + * including tcpu->node_id and procno, meaning that we don't + * need to use these variables here. + * + * We'll read 2 values at a time to fill up the ASSOCIATIVITY() + * macro. The ternary will fill the remaining registers with -1 + * after we went through vcpu_assoc[]. + */ + a = assoc_idx < vcpu_assoc_size ? + be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1; + b = assoc_idx < vcpu_assoc_size ? + be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1; + + args[idx] = ASSOCIATIVITY(a, b); + } +#undef ASSOCIATIVITY + + return H_SUCCESS; +} + +static void spapr_numa_register_types(void) +{ + /* Virtual Processor Home Node */ + spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY, + h_home_node_associativity); +} + +type_init(spapr_numa_register_types) diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c new file mode 100644 index 000000000..91de1052f --- /dev/null +++ b/hw/ppc/spapr_nvdimm.c @@ -0,0 +1,528 @@ +/* + * QEMU PAPR Storage Class Memory Interfaces + * + * Copyright (c) 2019-2020, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/ppc/spapr_drc.h" +#include "hw/ppc/spapr_nvdimm.h" +#include "hw/mem/nvdimm.h" +#include "qemu/nvdimm-utils.h" +#include "hw/ppc/fdt.h" +#include "qemu/range.h" +#include "hw/ppc/spapr_numa.h" + +/* DIMM health bitmap bitmap indicators. Taken from kernel's papr_scm.c */ +/* SCM device is unable to persist memory contents */ +#define PAPR_PMEM_UNARMED PPC_BIT(0) + +/* + * The nvdimm size should be aligned to SCM block size. + * The SCM block size should be aligned to SPAPR_MEMORY_BLOCK_SIZE + * in order to have SCM regions not to overlap with dimm memory regions. + * The SCM devices can have variable block sizes. For now, fixing the + * block size to the minimum value. + */ +#define SPAPR_MINIMUM_SCM_BLOCK_SIZE SPAPR_MEMORY_BLOCK_SIZE + +/* Have an explicit check for alignment */ +QEMU_BUILD_BUG_ON(SPAPR_MINIMUM_SCM_BLOCK_SIZE % SPAPR_MEMORY_BLOCK_SIZE); + +bool spapr_nvdimm_validate(HotplugHandler *hotplug_dev, NVDIMMDevice *nvdimm, + uint64_t size, Error **errp) +{ + const MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); + const MachineState *ms = MACHINE(hotplug_dev); + g_autofree char *uuidstr = NULL; + QemuUUID uuid; + int ret; + + if (!mc->nvdimm_supported) { + error_setg(errp, "NVDIMM hotplug not supported for this machine"); + return false; + } + + if (!ms->nvdimms_state->is_enabled) { + error_setg(errp, "nvdimm device found but 'nvdimm=off' was set"); + return false; + } + + if (object_property_get_int(OBJECT(nvdimm), NVDIMM_LABEL_SIZE_PROP, + &error_abort) == 0) { + error_setg(errp, "PAPR requires NVDIMM devices to have label-size set"); + return false; + } + + if (size % SPAPR_MINIMUM_SCM_BLOCK_SIZE) { + error_setg(errp, "PAPR requires NVDIMM memory size (excluding label)" + " to be a multiple of %" PRIu64 "MB", + SPAPR_MINIMUM_SCM_BLOCK_SIZE / MiB); + return false; + } + + uuidstr = object_property_get_str(OBJECT(nvdimm), NVDIMM_UUID_PROP, + &error_abort); + ret = qemu_uuid_parse(uuidstr, &uuid); + g_assert(!ret); + + if (qemu_uuid_is_null(&uuid)) { + error_setg(errp, "NVDIMM device requires the uuid to be set"); + return false; + } + + return true; +} + + +void spapr_add_nvdimm(DeviceState *dev, uint64_t slot) +{ + SpaprDrc *drc; + bool hotplugged = spapr_drc_hotplugged(dev); + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PMEM, slot); + g_assert(drc); + + /* + * pc_dimm_get_free_slot() provided a free slot at pre-plug. The + * corresponding DRC is thus assumed to be attachable. + */ + spapr_drc_attach(drc, dev); + + if (hotplugged) { + spapr_hotplug_req_add_by_index(drc); + } +} + +static int spapr_dt_nvdimm(SpaprMachineState *spapr, void *fdt, + int parent_offset, NVDIMMDevice *nvdimm) +{ + int child_offset; + char *buf; + SpaprDrc *drc; + uint32_t drc_idx; + uint32_t node = object_property_get_uint(OBJECT(nvdimm), PC_DIMM_NODE_PROP, + &error_abort); + uint64_t slot = object_property_get_uint(OBJECT(nvdimm), PC_DIMM_SLOT_PROP, + &error_abort); + uint64_t lsize = nvdimm->label_size; + uint64_t size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP, + NULL); + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PMEM, slot); + g_assert(drc); + + drc_idx = spapr_drc_index(drc); + + buf = g_strdup_printf("ibm,pmemory@%x", drc_idx); + child_offset = fdt_add_subnode(fdt, parent_offset, buf); + g_free(buf); + + _FDT(child_offset); + + _FDT((fdt_setprop_cell(fdt, child_offset, "reg", drc_idx))); + _FDT((fdt_setprop_string(fdt, child_offset, "compatible", "ibm,pmemory"))); + _FDT((fdt_setprop_string(fdt, child_offset, "device_type", "ibm,pmemory"))); + + spapr_numa_write_associativity_dt(spapr, fdt, child_offset, node); + + buf = qemu_uuid_unparse_strdup(&nvdimm->uuid); + _FDT((fdt_setprop_string(fdt, child_offset, "ibm,unit-guid", buf))); + g_free(buf); + + _FDT((fdt_setprop_cell(fdt, child_offset, "ibm,my-drc-index", drc_idx))); + + _FDT((fdt_setprop_u64(fdt, child_offset, "ibm,block-size", + SPAPR_MINIMUM_SCM_BLOCK_SIZE))); + _FDT((fdt_setprop_u64(fdt, child_offset, "ibm,number-of-blocks", + size / SPAPR_MINIMUM_SCM_BLOCK_SIZE))); + _FDT((fdt_setprop_cell(fdt, child_offset, "ibm,metadata-size", lsize))); + + _FDT((fdt_setprop_string(fdt, child_offset, "ibm,pmem-application", + "operating-system"))); + _FDT(fdt_setprop(fdt, child_offset, "ibm,cache-flush-required", NULL, 0)); + + return child_offset; +} + +int spapr_pmem_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, + void *fdt, int *fdt_start_offset, Error **errp) +{ + NVDIMMDevice *nvdimm = NVDIMM(drc->dev); + + *fdt_start_offset = spapr_dt_nvdimm(spapr, fdt, 0, nvdimm); + + return 0; +} + +void spapr_dt_persistent_memory(SpaprMachineState *spapr, void *fdt) +{ + int offset = fdt_subnode_offset(fdt, 0, "ibm,persistent-memory"); + GSList *iter, *nvdimms = nvdimm_get_device_list(); + + if (offset < 0) { + offset = fdt_add_subnode(fdt, 0, "ibm,persistent-memory"); + _FDT(offset); + _FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 0x1))); + _FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 0x0))); + _FDT((fdt_setprop_string(fdt, offset, "device_type", + "ibm,persistent-memory"))); + } + + /* Create DT entries for cold plugged NVDIMM devices */ + for (iter = nvdimms; iter; iter = iter->next) { + NVDIMMDevice *nvdimm = iter->data; + + spapr_dt_nvdimm(spapr, fdt, offset, nvdimm); + } + g_slist_free(nvdimms); + + return; +} + +static target_ulong h_scm_read_metadata(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + uint32_t drc_index = args[0]; + uint64_t offset = args[1]; + uint64_t len = args[2]; + SpaprDrc *drc = spapr_drc_by_index(drc_index); + NVDIMMDevice *nvdimm; + NVDIMMClass *ddc; + uint64_t data = 0; + uint8_t buf[8] = { 0 }; + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_PARAMETER; + } + + if (len != 1 && len != 2 && + len != 4 && len != 8) { + return H_P3; + } + + nvdimm = NVDIMM(drc->dev); + if ((offset + len < offset) || + (nvdimm->label_size < len + offset)) { + return H_P2; + } + + ddc = NVDIMM_GET_CLASS(nvdimm); + ddc->read_label_data(nvdimm, buf, len, offset); + + switch (len) { + case 1: + data = ldub_p(buf); + break; + case 2: + data = lduw_be_p(buf); + break; + case 4: + data = ldl_be_p(buf); + break; + case 8: + data = ldq_be_p(buf); + break; + default: + g_assert_not_reached(); + } + + args[0] = data; + + return H_SUCCESS; +} + +static target_ulong h_scm_write_metadata(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + uint32_t drc_index = args[0]; + uint64_t offset = args[1]; + uint64_t data = args[2]; + uint64_t len = args[3]; + SpaprDrc *drc = spapr_drc_by_index(drc_index); + NVDIMMDevice *nvdimm; + NVDIMMClass *ddc; + uint8_t buf[8] = { 0 }; + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_PARAMETER; + } + + if (len != 1 && len != 2 && + len != 4 && len != 8) { + return H_P4; + } + + nvdimm = NVDIMM(drc->dev); + if ((offset + len < offset) || + (nvdimm->label_size < len + offset)) { + return H_P2; + } + + switch (len) { + case 1: + if (data & 0xffffffffffffff00) { + return H_P2; + } + stb_p(buf, data); + break; + case 2: + if (data & 0xffffffffffff0000) { + return H_P2; + } + stw_be_p(buf, data); + break; + case 4: + if (data & 0xffffffff00000000) { + return H_P2; + } + stl_be_p(buf, data); + break; + case 8: + stq_be_p(buf, data); + break; + default: + g_assert_not_reached(); + } + + ddc = NVDIMM_GET_CLASS(nvdimm); + ddc->write_label_data(nvdimm, buf, len, offset); + + return H_SUCCESS; +} + +static target_ulong h_scm_bind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + uint32_t drc_index = args[0]; + uint64_t starting_idx = args[1]; + uint64_t no_of_scm_blocks_to_bind = args[2]; + uint64_t target_logical_mem_addr = args[3]; + uint64_t continue_token = args[4]; + uint64_t size; + uint64_t total_no_of_scm_blocks; + SpaprDrc *drc = spapr_drc_by_index(drc_index); + hwaddr addr; + NVDIMMDevice *nvdimm; + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_PARAMETER; + } + + /* + * Currently continue token should be zero qemu has already bound + * everything and this hcall doesnt return H_BUSY. + */ + if (continue_token > 0) { + return H_P5; + } + + /* Currently qemu assigns the address. */ + if (target_logical_mem_addr != 0xffffffffffffffff) { + return H_OVERLAP; + } + + nvdimm = NVDIMM(drc->dev); + + size = object_property_get_uint(OBJECT(nvdimm), + PC_DIMM_SIZE_PROP, &error_abort); + + total_no_of_scm_blocks = size / SPAPR_MINIMUM_SCM_BLOCK_SIZE; + + if (starting_idx > total_no_of_scm_blocks) { + return H_P2; + } + + if (((starting_idx + no_of_scm_blocks_to_bind) < starting_idx) || + ((starting_idx + no_of_scm_blocks_to_bind) > total_no_of_scm_blocks)) { + return H_P3; + } + + addr = object_property_get_uint(OBJECT(nvdimm), + PC_DIMM_ADDR_PROP, &error_abort); + + addr += starting_idx * SPAPR_MINIMUM_SCM_BLOCK_SIZE; + + /* Already bound, Return target logical address in R5 */ + args[1] = addr; + args[2] = no_of_scm_blocks_to_bind; + + return H_SUCCESS; +} + +static target_ulong h_scm_unbind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + uint32_t drc_index = args[0]; + uint64_t starting_scm_logical_addr = args[1]; + uint64_t no_of_scm_blocks_to_unbind = args[2]; + uint64_t continue_token = args[3]; + uint64_t size_to_unbind; + Range blockrange = range_empty; + Range nvdimmrange = range_empty; + SpaprDrc *drc = spapr_drc_by_index(drc_index); + NVDIMMDevice *nvdimm; + uint64_t size, addr; + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_PARAMETER; + } + + /* continue_token should be zero as this hcall doesn't return H_BUSY. */ + if (continue_token > 0) { + return H_P4; + } + + /* Check if starting_scm_logical_addr is block aligned */ + if (!QEMU_IS_ALIGNED(starting_scm_logical_addr, + SPAPR_MINIMUM_SCM_BLOCK_SIZE)) { + return H_P2; + } + + size_to_unbind = no_of_scm_blocks_to_unbind * SPAPR_MINIMUM_SCM_BLOCK_SIZE; + if (no_of_scm_blocks_to_unbind == 0 || no_of_scm_blocks_to_unbind != + size_to_unbind / SPAPR_MINIMUM_SCM_BLOCK_SIZE) { + return H_P3; + } + + nvdimm = NVDIMM(drc->dev); + size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP, + &error_abort); + addr = object_property_get_int(OBJECT(nvdimm), PC_DIMM_ADDR_PROP, + &error_abort); + + range_init_nofail(&nvdimmrange, addr, size); + range_init_nofail(&blockrange, starting_scm_logical_addr, size_to_unbind); + + if (!range_contains_range(&nvdimmrange, &blockrange)) { + return H_P3; + } + + args[1] = no_of_scm_blocks_to_unbind; + + /* let unplug take care of actual unbind */ + return H_SUCCESS; +} + +#define H_UNBIND_SCOPE_ALL 0x1 +#define H_UNBIND_SCOPE_DRC 0x2 + +static target_ulong h_scm_unbind_all(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + uint64_t target_scope = args[0]; + uint32_t drc_index = args[1]; + uint64_t continue_token = args[2]; + NVDIMMDevice *nvdimm; + uint64_t size; + uint64_t no_of_scm_blocks_unbound = 0; + + /* continue_token should be zero as this hcall doesn't return H_BUSY. */ + if (continue_token > 0) { + return H_P4; + } + + if (target_scope == H_UNBIND_SCOPE_DRC) { + SpaprDrc *drc = spapr_drc_by_index(drc_index); + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_P2; + } + + nvdimm = NVDIMM(drc->dev); + size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP, + &error_abort); + + no_of_scm_blocks_unbound = size / SPAPR_MINIMUM_SCM_BLOCK_SIZE; + } else if (target_scope == H_UNBIND_SCOPE_ALL) { + GSList *list, *nvdimms; + + nvdimms = nvdimm_get_device_list(); + for (list = nvdimms; list; list = list->next) { + nvdimm = list->data; + size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP, + &error_abort); + + no_of_scm_blocks_unbound += size / SPAPR_MINIMUM_SCM_BLOCK_SIZE; + } + g_slist_free(nvdimms); + } else { + return H_PARAMETER; + } + + args[1] = no_of_scm_blocks_unbound; + + /* let unplug take care of actual unbind */ + return H_SUCCESS; +} + +static target_ulong h_scm_health(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + + NVDIMMDevice *nvdimm; + uint64_t hbitmap = 0; + uint32_t drc_index = args[0]; + SpaprDrc *drc = spapr_drc_by_index(drc_index); + const uint64_t hbitmap_mask = PAPR_PMEM_UNARMED; + + + /* Ensure that the drc is valid & is valid PMEM dimm and is plugged in */ + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_PARAMETER; + } + + nvdimm = NVDIMM(drc->dev); + + /* Update if the nvdimm is unarmed and send its status via health bitmaps */ + if (object_property_get_bool(OBJECT(nvdimm), NVDIMM_UNARMED_PROP, NULL)) { + hbitmap |= PAPR_PMEM_UNARMED; + } + + /* Update the out args with health bitmap/mask */ + args[0] = hbitmap; + args[1] = hbitmap_mask; + + return H_SUCCESS; +} + +static void spapr_scm_register_types(void) +{ + /* qemu/scm specific hcalls */ + spapr_register_hypercall(H_SCM_READ_METADATA, h_scm_read_metadata); + spapr_register_hypercall(H_SCM_WRITE_METADATA, h_scm_write_metadata); + spapr_register_hypercall(H_SCM_BIND_MEM, h_scm_bind_mem); + spapr_register_hypercall(H_SCM_UNBIND_MEM, h_scm_unbind_mem); + spapr_register_hypercall(H_SCM_UNBIND_ALL, h_scm_unbind_all); + spapr_register_hypercall(H_SCM_HEALTH, h_scm_health); +} + +type_init(spapr_scm_register_types) diff --git a/hw/ppc/spapr_ovec.c b/hw/ppc/spapr_ovec.c new file mode 100644 index 000000000..b2567caa5 --- /dev/null +++ b/hw/ppc/spapr_ovec.c @@ -0,0 +1,241 @@ +/* + * QEMU SPAPR Architecture Option Vector Helper Functions + * + * Copyright IBM Corp. 2016 + * + * Authors: + * Bharata B Rao + * Michael Roth + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/ppc/spapr_ovec.h" +#include "migration/vmstate.h" +#include "qemu/bitmap.h" +#include "exec/address-spaces.h" +#include "qemu/error-report.h" +#include "trace.h" +#include + +#define OV_MAXBYTES 256 /* not including length byte */ +#define OV_MAXBITS (OV_MAXBYTES * BITS_PER_BYTE) + +/* we *could* work with bitmaps directly, but handling the bitmap privately + * allows us to more safely make assumptions about the bitmap size and + * simplify the calling code somewhat + */ +struct SpaprOptionVector { + unsigned long *bitmap; + int32_t bitmap_size; /* only used for migration */ +}; + +const VMStateDescription vmstate_spapr_ovec = { + .name = "spapr_option_vector", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BITMAP(bitmap, SpaprOptionVector, 1, bitmap_size), + VMSTATE_END_OF_LIST() + } +}; + +SpaprOptionVector *spapr_ovec_new(void) +{ + SpaprOptionVector *ov; + + ov = g_new0(SpaprOptionVector, 1); + ov->bitmap = bitmap_new(OV_MAXBITS); + ov->bitmap_size = OV_MAXBITS; + + return ov; +} + +SpaprOptionVector *spapr_ovec_clone(SpaprOptionVector *ov_orig) +{ + SpaprOptionVector *ov; + + g_assert(ov_orig); + + ov = spapr_ovec_new(); + bitmap_copy(ov->bitmap, ov_orig->bitmap, OV_MAXBITS); + + return ov; +} + +void spapr_ovec_intersect(SpaprOptionVector *ov, + SpaprOptionVector *ov1, + SpaprOptionVector *ov2) +{ + g_assert(ov); + g_assert(ov1); + g_assert(ov2); + + bitmap_and(ov->bitmap, ov1->bitmap, ov2->bitmap, OV_MAXBITS); +} + +/* returns true if ov1 has a subset of bits in ov2 */ +bool spapr_ovec_subset(SpaprOptionVector *ov1, SpaprOptionVector *ov2) +{ + unsigned long *tmp = bitmap_new(OV_MAXBITS); + bool result; + + g_assert(ov1); + g_assert(ov2); + + bitmap_andnot(tmp, ov1->bitmap, ov2->bitmap, OV_MAXBITS); + result = bitmap_empty(tmp, OV_MAXBITS); + + g_free(tmp); + + return result; +} + +void spapr_ovec_cleanup(SpaprOptionVector *ov) +{ + if (ov) { + g_free(ov->bitmap); + g_free(ov); + } +} + +void spapr_ovec_set(SpaprOptionVector *ov, long bitnr) +{ + g_assert(ov); + g_assert(bitnr < OV_MAXBITS); + + set_bit(bitnr, ov->bitmap); +} + +void spapr_ovec_clear(SpaprOptionVector *ov, long bitnr) +{ + g_assert(ov); + g_assert(bitnr < OV_MAXBITS); + + clear_bit(bitnr, ov->bitmap); +} + +bool spapr_ovec_test(SpaprOptionVector *ov, long bitnr) +{ + g_assert(ov); + g_assert(bitnr < OV_MAXBITS); + + return test_bit(bitnr, ov->bitmap) ? true : false; +} + +bool spapr_ovec_empty(SpaprOptionVector *ov) +{ + g_assert(ov); + + return bitmap_empty(ov->bitmap, OV_MAXBITS); +} + +static void guest_byte_to_bitmap(uint8_t entry, unsigned long *bitmap, + long bitmap_offset) +{ + int i; + + for (i = 0; i < BITS_PER_BYTE; i++) { + if (entry & (1 << (BITS_PER_BYTE - 1 - i))) { + bitmap_set(bitmap, bitmap_offset + i, 1); + } + } +} + +static uint8_t guest_byte_from_bitmap(unsigned long *bitmap, long bitmap_offset) +{ + uint8_t entry = 0; + int i; + + for (i = 0; i < BITS_PER_BYTE; i++) { + if (test_bit(bitmap_offset + i, bitmap)) { + entry |= (1 << (BITS_PER_BYTE - 1 - i)); + } + } + + return entry; +} + +static target_ulong vector_addr(target_ulong table_addr, int vector) +{ + uint16_t vector_count, vector_len; + int i; + + vector_count = ldub_phys(&address_space_memory, table_addr) + 1; + if (vector > vector_count) { + return 0; + } + table_addr++; /* skip nr option vectors */ + + for (i = 0; i < vector - 1; i++) { + vector_len = ldub_phys(&address_space_memory, table_addr) + 1; + table_addr += vector_len + 1; /* bit-vector + length byte */ + } + return table_addr; +} + +SpaprOptionVector *spapr_ovec_parse_vector(target_ulong table_addr, int vector) +{ + SpaprOptionVector *ov; + target_ulong addr; + uint16_t vector_len; + int i; + + g_assert(table_addr); + g_assert(vector >= 1); /* vector numbering starts at 1 */ + + addr = vector_addr(table_addr, vector); + if (!addr) { + /* specified vector isn't present */ + return NULL; + } + + vector_len = ldub_phys(&address_space_memory, addr++) + 1; + g_assert(vector_len <= OV_MAXBYTES); + ov = spapr_ovec_new(); + + for (i = 0; i < vector_len; i++) { + uint8_t entry = ldub_phys(&address_space_memory, addr + i); + if (entry) { + trace_spapr_ovec_parse_vector(vector, i + 1, vector_len, entry); + guest_byte_to_bitmap(entry, ov->bitmap, i * BITS_PER_BYTE); + } + } + + return ov; +} + +int spapr_dt_ovec(void *fdt, int fdt_offset, + SpaprOptionVector *ov, const char *name) +{ + uint8_t vec[OV_MAXBYTES + 1]; + uint16_t vec_len; + unsigned long lastbit; + int i; + + g_assert(ov); + + lastbit = find_last_bit(ov->bitmap, OV_MAXBITS); + /* if no bits are set, include at least 1 byte of the vector so we can + * still encoded this in the device tree while abiding by the same + * encoding/sizing expected in ibm,client-architecture-support + */ + vec_len = (lastbit == OV_MAXBITS) ? 1 : lastbit / BITS_PER_BYTE + 1; + g_assert(vec_len <= OV_MAXBYTES); + /* guest expects vector len encoded as vec_len - 1, since the length byte + * is assumed and not included, and the first byte of the vector + * is assumed as well + */ + vec[0] = vec_len - 1; + + for (i = 1; i < vec_len + 1; i++) { + vec[i] = guest_byte_from_bitmap(ov->bitmap, (i - 1) * BITS_PER_BYTE); + if (vec[i]) { + trace_spapr_ovec_populate_dt(i, vec_len, vec[i]); + } + } + + return fdt_setprop(fdt, fdt_offset, name, vec, vec_len + 1); +} diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c new file mode 100644 index 000000000..5bfd4aa9e --- /dev/null +++ b/hw/ppc/spapr_pci.c @@ -0,0 +1,2530 @@ +/* + * QEMU sPAPR PCI host originated from Uninorth PCI host + * + * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation. + * Copyright (C) 2011 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/pci/pci.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/pci/pci_host.h" +#include "hw/ppc/spapr.h" +#include "hw/pci-host/spapr.h" +#include "exec/ram_addr.h" +#include +#include "trace.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/qmp/qerror.h" +#include "hw/ppc/fdt.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_ids.h" +#include "hw/ppc/spapr_drc.h" +#include "hw/qdev-properties.h" +#include "sysemu/device_tree.h" +#include "sysemu/kvm.h" +#include "sysemu/hostmem.h" +#include "sysemu/numa.h" +#include "hw/ppc/spapr_numa.h" +#include "qemu/log.h" + +/* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */ +#define RTAS_QUERY_FN 0 +#define RTAS_CHANGE_FN 1 +#define RTAS_RESET_FN 2 +#define RTAS_CHANGE_MSI_FN 3 +#define RTAS_CHANGE_MSIX_FN 4 + +/* Interrupt types to return on RTAS_CHANGE_* */ +#define RTAS_TYPE_MSI 1 +#define RTAS_TYPE_MSIX 2 + +SpaprPhbState *spapr_pci_find_phb(SpaprMachineState *spapr, uint64_t buid) +{ + SpaprPhbState *sphb; + + QLIST_FOREACH(sphb, &spapr->phbs, list) { + if (sphb->buid != buid) { + continue; + } + return sphb; + } + + return NULL; +} + +PCIDevice *spapr_pci_find_dev(SpaprMachineState *spapr, uint64_t buid, + uint32_t config_addr) +{ + SpaprPhbState *sphb = spapr_pci_find_phb(spapr, buid); + PCIHostState *phb = PCI_HOST_BRIDGE(sphb); + int bus_num = (config_addr >> 16) & 0xFF; + int devfn = (config_addr >> 8) & 0xFF; + + if (!phb) { + return NULL; + } + + return pci_find_device(phb->bus, bus_num, devfn); +} + +static uint32_t rtas_pci_cfgaddr(uint32_t arg) +{ + /* This handles the encoding of extended config space addresses */ + return ((arg >> 20) & 0xf00) | (arg & 0xff); +} + +static void finish_read_pci_config(SpaprMachineState *spapr, uint64_t buid, + uint32_t addr, uint32_t size, + target_ulong rets) +{ + PCIDevice *pci_dev; + uint32_t val; + + if ((size != 1) && (size != 2) && (size != 4)) { + /* access must be 1, 2 or 4 bytes */ + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + pci_dev = spapr_pci_find_dev(spapr, buid, addr); + addr = rtas_pci_cfgaddr(addr); + + if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) { + /* Access must be to a valid device, within bounds and + * naturally aligned */ + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + val = pci_host_config_read_common(pci_dev, addr, + pci_config_size(pci_dev), size); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, val); +} + +static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + uint64_t buid; + uint32_t size, addr; + + if ((nargs != 4) || (nret != 2)) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + buid = rtas_ldq(args, 1); + size = rtas_ld(args, 3); + addr = rtas_ld(args, 0); + + finish_read_pci_config(spapr, buid, addr, size, rets); +} + +static void rtas_read_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + uint32_t size, addr; + + if ((nargs != 2) || (nret != 2)) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + size = rtas_ld(args, 1); + addr = rtas_ld(args, 0); + + finish_read_pci_config(spapr, 0, addr, size, rets); +} + +static void finish_write_pci_config(SpaprMachineState *spapr, uint64_t buid, + uint32_t addr, uint32_t size, + uint32_t val, target_ulong rets) +{ + PCIDevice *pci_dev; + + if ((size != 1) && (size != 2) && (size != 4)) { + /* access must be 1, 2 or 4 bytes */ + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + pci_dev = spapr_pci_find_dev(spapr, buid, addr); + addr = rtas_pci_cfgaddr(addr); + + if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) { + /* Access must be to a valid device, within bounds and + * naturally aligned */ + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev), + val, size); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + uint64_t buid; + uint32_t val, size, addr; + + if ((nargs != 5) || (nret != 1)) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + buid = rtas_ldq(args, 1); + val = rtas_ld(args, 4); + size = rtas_ld(args, 3); + addr = rtas_ld(args, 0); + + finish_write_pci_config(spapr, buid, addr, size, val, rets); +} + +static void rtas_write_pci_config(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + uint32_t val, size, addr; + + if ((nargs != 3) || (nret != 1)) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + + val = rtas_ld(args, 2); + size = rtas_ld(args, 1); + addr = rtas_ld(args, 0); + + finish_write_pci_config(spapr, 0, addr, size, val, rets); +} + +/* + * Set MSI/MSIX message data. + * This is required for msi_notify()/msix_notify() which + * will write at the addresses via spapr_msi_write(). + * + * If hwaddr == 0, all entries will have .data == first_irq i.e. + * table will be reset. + */ +static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix, + unsigned first_irq, unsigned req_num) +{ + unsigned i; + MSIMessage msg = { .address = addr, .data = first_irq }; + + if (!msix) { + msi_set_message(pdev, msg); + trace_spapr_pci_msi_setup(pdev->name, 0, msg.address); + return; + } + + for (i = 0; i < req_num; ++i) { + msix_set_message(pdev, i, msg); + trace_spapr_pci_msi_setup(pdev->name, i, msg.address); + if (addr) { + ++msg.data; + } + } +} + +static void rtas_ibm_change_msi(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + uint32_t config_addr = rtas_ld(args, 0); + uint64_t buid = rtas_ldq(args, 1); + unsigned int func = rtas_ld(args, 3); + unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */ + unsigned int seq_num = rtas_ld(args, 5); + unsigned int ret_intr_type; + unsigned int irq, max_irqs = 0; + SpaprPhbState *phb = NULL; + PCIDevice *pdev = NULL; + SpaprPciMsi *msi; + int *config_addr_key; + Error *err = NULL; + int i; + + /* Fins SpaprPhbState */ + phb = spapr_pci_find_phb(spapr, buid); + if (phb) { + pdev = spapr_pci_find_dev(spapr, buid, config_addr); + } + if (!phb || !pdev) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + switch (func) { + case RTAS_CHANGE_FN: + if (msi_present(pdev)) { + ret_intr_type = RTAS_TYPE_MSI; + } else if (msix_present(pdev)) { + ret_intr_type = RTAS_TYPE_MSIX; + } else { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + break; + case RTAS_CHANGE_MSI_FN: + if (msi_present(pdev)) { + ret_intr_type = RTAS_TYPE_MSI; + } else { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + break; + case RTAS_CHANGE_MSIX_FN: + if (msix_present(pdev)) { + ret_intr_type = RTAS_TYPE_MSIX; + } else { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + break; + default: + error_report("rtas_ibm_change_msi(%u) is not implemented", func); + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + msi = (SpaprPciMsi *) g_hash_table_lookup(phb->msi, &config_addr); + + /* Releasing MSIs */ + if (!req_num) { + if (!msi) { + trace_spapr_pci_msi("Releasing wrong config", config_addr); + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + if (msi_present(pdev)) { + spapr_msi_setmsg(pdev, 0, false, 0, 0); + } + if (msix_present(pdev)) { + spapr_msi_setmsg(pdev, 0, true, 0, 0); + } + g_hash_table_remove(phb->msi, &config_addr); + + trace_spapr_pci_msi("Released MSIs", config_addr); + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, 0); + return; + } + + /* Enabling MSI */ + + /* Check if the device supports as many IRQs as requested */ + if (ret_intr_type == RTAS_TYPE_MSI) { + max_irqs = msi_nr_vectors_allocated(pdev); + } else if (ret_intr_type == RTAS_TYPE_MSIX) { + max_irqs = pdev->msix_entries_nr; + } + if (!max_irqs) { + error_report("Requested interrupt type %d is not enabled for device %x", + ret_intr_type, config_addr); + rtas_st(rets, 0, -1); /* Hardware error */ + return; + } + /* Correct the number if the guest asked for too many */ + if (req_num > max_irqs) { + trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs); + req_num = max_irqs; + irq = 0; /* to avoid misleading trace */ + goto out; + } + + /* Allocate MSIs */ + if (smc->legacy_irq_allocation) { + irq = spapr_irq_find(spapr, req_num, ret_intr_type == RTAS_TYPE_MSI, + &err); + } else { + irq = spapr_irq_msi_alloc(spapr, req_num, + ret_intr_type == RTAS_TYPE_MSI, &err); + } + if (err) { + error_reportf_err(err, "Can't allocate MSIs for device %x: ", + config_addr); + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + for (i = 0; i < req_num; i++) { + spapr_irq_claim(spapr, irq + i, false, &err); + if (err) { + if (i) { + spapr_irq_free(spapr, irq, i); + } + if (!smc->legacy_irq_allocation) { + spapr_irq_msi_free(spapr, irq, req_num); + } + error_reportf_err(err, "Can't allocate MSIs for device %x: ", + config_addr); + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + } + + /* Release previous MSIs */ + if (msi) { + g_hash_table_remove(phb->msi, &config_addr); + } + + /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */ + spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX, + irq, req_num); + + /* Add MSI device to cache */ + msi = g_new(SpaprPciMsi, 1); + msi->first_irq = irq; + msi->num = req_num; + config_addr_key = g_new(int, 1); + *config_addr_key = config_addr; + g_hash_table_insert(phb->msi, config_addr_key, msi); + +out: + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, req_num); + rtas_st(rets, 2, ++seq_num); + if (nret > 3) { + rtas_st(rets, 3, ret_intr_type); + } + + trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq); +} + +static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, + uint32_t nargs, + target_ulong args, + uint32_t nret, + target_ulong rets) +{ + uint32_t config_addr = rtas_ld(args, 0); + uint64_t buid = rtas_ldq(args, 1); + unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3); + SpaprPhbState *phb = NULL; + PCIDevice *pdev = NULL; + SpaprPciMsi *msi; + + /* Find SpaprPhbState */ + phb = spapr_pci_find_phb(spapr, buid); + if (phb) { + pdev = spapr_pci_find_dev(spapr, buid, config_addr); + } + if (!phb || !pdev) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + /* Find device descriptor and start IRQ */ + msi = (SpaprPciMsi *) g_hash_table_lookup(phb->msi, &config_addr); + if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) { + trace_spapr_pci_msi("Failed to return vector", config_addr); + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + intr_src_num = msi->first_irq + ioa_intr_num; + trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num, + intr_src_num); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, intr_src_num); + rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */ +} + +static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + SpaprPhbState *sphb; + uint32_t addr, option; + uint64_t buid; + int ret; + + if ((nargs != 4) || (nret != 1)) { + goto param_error_exit; + } + + buid = rtas_ldq(args, 1); + addr = rtas_ld(args, 0); + option = rtas_ld(args, 3); + + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb) { + goto param_error_exit; + } + + if (!spapr_phb_eeh_available(sphb)) { + goto param_error_exit; + } + + ret = spapr_phb_vfio_eeh_set_option(sphb, addr, option); + rtas_st(rets, 0, ret); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + SpaprPhbState *sphb; + PCIDevice *pdev; + uint32_t addr, option; + uint64_t buid; + + if ((nargs != 4) || (nret != 2)) { + goto param_error_exit; + } + + buid = rtas_ldq(args, 1); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb) { + goto param_error_exit; + } + + if (!spapr_phb_eeh_available(sphb)) { + goto param_error_exit; + } + + /* + * We always have PE address of form "00BB0001". "BB" + * represents the bus number of PE's primary bus. + */ + option = rtas_ld(args, 3); + switch (option) { + case RTAS_GET_PE_ADDR: + addr = rtas_ld(args, 0); + pdev = spapr_pci_find_dev(spapr, buid, addr); + if (!pdev) { + goto param_error_exit; + } + + rtas_st(rets, 1, (pci_bus_num(pci_get_bus(pdev)) << 16) + 1); + break; + case RTAS_GET_PE_MODE: + rtas_st(rets, 1, RTAS_PE_MODE_SHARED); + break; + default: + goto param_error_exit; + } + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + SpaprPhbState *sphb; + uint64_t buid; + int state, ret; + + if ((nargs != 3) || (nret != 4 && nret != 5)) { + goto param_error_exit; + } + + buid = rtas_ldq(args, 1); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb) { + goto param_error_exit; + } + + if (!spapr_phb_eeh_available(sphb)) { + goto param_error_exit; + } + + ret = spapr_phb_vfio_eeh_get_state(sphb, &state); + rtas_st(rets, 0, ret); + if (ret != RTAS_OUT_SUCCESS) { + return; + } + + rtas_st(rets, 1, state); + rtas_st(rets, 2, RTAS_EEH_SUPPORT); + rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO); + if (nret >= 5) { + rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO); + } + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + SpaprPhbState *sphb; + uint32_t option; + uint64_t buid; + int ret; + + if ((nargs != 4) || (nret != 1)) { + goto param_error_exit; + } + + buid = rtas_ldq(args, 1); + option = rtas_ld(args, 3); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb) { + goto param_error_exit; + } + + if (!spapr_phb_eeh_available(sphb)) { + goto param_error_exit; + } + + ret = spapr_phb_vfio_eeh_reset(sphb, option); + rtas_st(rets, 0, ret); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_ibm_configure_pe(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + SpaprPhbState *sphb; + uint64_t buid; + int ret; + + if ((nargs != 3) || (nret != 1)) { + goto param_error_exit; + } + + buid = rtas_ldq(args, 1); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb) { + goto param_error_exit; + } + + if (!spapr_phb_eeh_available(sphb)) { + goto param_error_exit; + } + + ret = spapr_phb_vfio_eeh_configure(sphb); + rtas_st(rets, 0, ret); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +/* To support it later */ +static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + SpaprPhbState *sphb; + int option; + uint64_t buid; + + if ((nargs != 8) || (nret != 1)) { + goto param_error_exit; + } + + buid = rtas_ldq(args, 1); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb) { + goto param_error_exit; + } + + if (!spapr_phb_eeh_available(sphb)) { + goto param_error_exit; + } + + option = rtas_ld(args, 7); + switch (option) { + case RTAS_SLOT_TEMP_ERR_LOG: + case RTAS_SLOT_PERM_ERR_LOG: + break; + default: + goto param_error_exit; + } + + /* We don't have error log yet */ + rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void pci_spapr_set_irq(void *opaque, int irq_num, int level) +{ + /* + * Here we use the number returned by pci_swizzle_map_irq_fn to find a + * corresponding qemu_irq. + */ + SpaprPhbState *phb = opaque; + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + + trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq); + qemu_set_irq(spapr_qirq(spapr, phb->lsi_table[irq_num].irq), level); +} + +static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin) +{ + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque); + PCIINTxRoute route; + + route.mode = PCI_INTX_ENABLED; + route.irq = sphb->lsi_table[pin].irq; + + return route; +} + +static uint64_t spapr_msi_read(void *opaque, hwaddr addr, unsigned size) +{ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__); + return 0; +} + +/* + * MSI/MSIX memory region implementation. + * The handler handles both MSI and MSIX. + * The vector number is encoded in least bits in data. + */ +static void spapr_msi_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + SpaprMachineState *spapr = opaque; + uint32_t irq = data; + + trace_spapr_pci_msi_write(addr, data, irq); + + qemu_irq_pulse(spapr_qirq(spapr, irq)); +} + +static const MemoryRegionOps spapr_msi_ops = { + /* + * .read result is undefined by PCI spec. + * define .read method to avoid assert failure in memory_region_init_io + */ + .read = spapr_msi_read, + .write = spapr_msi_write, + .endianness = DEVICE_LITTLE_ENDIAN +}; + +/* + * PHB PCI device + */ +static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + SpaprPhbState *phb = opaque; + + return &phb->iommu_as; +} + +static char *spapr_phb_vfio_get_loc_code(SpaprPhbState *sphb, PCIDevice *pdev) +{ + g_autofree char *path = NULL; + g_autofree char *host = NULL; + g_autofree char *devspec = NULL; + char *buf = NULL; + + /* Get the PCI VFIO host id */ + host = object_property_get_str(OBJECT(pdev), "host", NULL); + if (!host) { + return NULL; + } + + /* Construct the path of the file that will give us the DT location */ + path = g_strdup_printf("/sys/bus/pci/devices/%s/devspec", host); + if (!g_file_get_contents(path, &devspec, NULL, NULL)) { + return NULL; + } + + /* Construct and read from host device tree the loc-code */ + path = g_strdup_printf("/proc/device-tree%s/ibm,loc-code", devspec); + if (!g_file_get_contents(path, &buf, NULL, NULL)) { + return NULL; + } + return buf; +} + +static char *spapr_phb_get_loc_code(SpaprPhbState *sphb, PCIDevice *pdev) +{ + char *buf; + const char *devtype = "qemu"; + uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)))); + + if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) { + buf = spapr_phb_vfio_get_loc_code(sphb, pdev); + if (buf) { + return buf; + } + devtype = "vfio"; + } + /* + * For emulated devices and VFIO-failure case, make up + * the loc-code. + */ + buf = g_strdup_printf("%s_%s:%04x:%02x:%02x.%x", + devtype, pdev->name, sphb->index, busnr, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + return buf; +} + +/* Macros to operate with address in OF binding to PCI */ +#define b_x(x, p, l) (((x) & ((1<<(l))-1)) << (p)) +#define b_n(x) b_x((x), 31, 1) /* 0 if relocatable */ +#define b_p(x) b_x((x), 30, 1) /* 1 if prefetchable */ +#define b_t(x) b_x((x), 29, 1) /* 1 if the address is aliased */ +#define b_ss(x) b_x((x), 24, 2) /* the space code */ +#define b_bbbbbbbb(x) b_x((x), 16, 8) /* bus number */ +#define b_ddddd(x) b_x((x), 11, 5) /* device number */ +#define b_fff(x) b_x((x), 8, 3) /* function number */ +#define b_rrrrrrrr(x) b_x((x), 0, 8) /* register number */ + +/* for 'reg' OF properties */ +#define RESOURCE_CELLS_SIZE 2 +#define RESOURCE_CELLS_ADDRESS 3 + +typedef struct ResourceFields { + uint32_t phys_hi; + uint32_t phys_mid; + uint32_t phys_lo; + uint32_t size_hi; + uint32_t size_lo; +} QEMU_PACKED ResourceFields; + +typedef struct ResourceProps { + ResourceFields reg[8]; + uint32_t reg_len; +} ResourceProps; + +/* fill in the 'reg' OF properties for + * a PCI device. 'reg' describes resource requirements for a + * device's IO/MEM regions. + * + * the property is an array of ('phys-addr', 'size') pairs describing + * the addressable regions of the PCI device, where 'phys-addr' is a + * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to + * (phys.hi, phys.mid, phys.lo), and 'size' is a + * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo). + * + * phys.hi = 0xYYXXXXZZ, where: + * 0xYY = npt000ss + * ||| | + * ||| +-- space code + * ||| | + * ||| + 00 if configuration space + * ||| + 01 if IO region, + * ||| + 10 if 32-bit MEM region + * ||| + 11 if 64-bit MEM region + * ||| + * ||+------ for non-relocatable IO: 1 if aliased + * || for relocatable IO: 1 if below 64KB + * || for MEM: 1 if below 1MB + * |+------- 1 if region is prefetchable + * +-------- 1 if region is non-relocatable + * 0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function + * bits respectively + * 0xZZ = rrrrrrrr, the register number of the BAR corresponding + * to the region + * + * phys.mid and phys.lo correspond respectively to the hi/lo portions + * of the actual address of the region. + * + * note also that addresses defined in this property are, at least + * for PAPR guests, relative to the PHBs IO/MEM windows, and + * correspond directly to the addresses in the BARs. + * + * in accordance with PCI Bus Binding to Open Firmware, + * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7, + * Appendix C. + */ +static void populate_resource_props(PCIDevice *d, ResourceProps *rp) +{ + int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d)))); + uint32_t dev_id = (b_bbbbbbbb(bus_num) | + b_ddddd(PCI_SLOT(d->devfn)) | + b_fff(PCI_FUNC(d->devfn))); + ResourceFields *reg; + int i, reg_idx = 0; + + /* config space region */ + reg = &rp->reg[reg_idx++]; + reg->phys_hi = cpu_to_be32(dev_id); + reg->phys_mid = 0; + reg->phys_lo = 0; + reg->size_hi = 0; + reg->size_lo = 0; + + for (i = 0; i < PCI_NUM_REGIONS; i++) { + if (!d->io_regions[i].size) { + continue; + } + + reg = &rp->reg[reg_idx++]; + + reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i))); + if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) { + reg->phys_hi |= cpu_to_be32(b_ss(1)); + } else if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_TYPE_64) { + reg->phys_hi |= cpu_to_be32(b_ss(3)); + } else { + reg->phys_hi |= cpu_to_be32(b_ss(2)); + } + reg->phys_mid = 0; + reg->phys_lo = 0; + reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32); + reg->size_lo = cpu_to_be32(d->io_regions[i].size); + } + + rp->reg_len = reg_idx * sizeof(ResourceFields); +} + +typedef struct PCIClass PCIClass; +typedef struct PCISubClass PCISubClass; +typedef struct PCIIFace PCIIFace; + +struct PCIIFace { + int iface; + const char *name; +}; + +struct PCISubClass { + int subclass; + const char *name; + const PCIIFace *iface; +}; + +struct PCIClass { + const char *name; + const PCISubClass *subc; +}; + +static const PCISubClass undef_subclass[] = { + { PCI_CLASS_NOT_DEFINED_VGA, "display", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass mass_subclass[] = { + { PCI_CLASS_STORAGE_SCSI, "scsi", NULL }, + { PCI_CLASS_STORAGE_IDE, "ide", NULL }, + { PCI_CLASS_STORAGE_FLOPPY, "fdc", NULL }, + { PCI_CLASS_STORAGE_IPI, "ipi", NULL }, + { PCI_CLASS_STORAGE_RAID, "raid", NULL }, + { PCI_CLASS_STORAGE_ATA, "ata", NULL }, + { PCI_CLASS_STORAGE_SATA, "sata", NULL }, + { PCI_CLASS_STORAGE_SAS, "sas", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass net_subclass[] = { + { PCI_CLASS_NETWORK_ETHERNET, "ethernet", NULL }, + { PCI_CLASS_NETWORK_TOKEN_RING, "token-ring", NULL }, + { PCI_CLASS_NETWORK_FDDI, "fddi", NULL }, + { PCI_CLASS_NETWORK_ATM, "atm", NULL }, + { PCI_CLASS_NETWORK_ISDN, "isdn", NULL }, + { PCI_CLASS_NETWORK_WORLDFIP, "worldfip", NULL }, + { PCI_CLASS_NETWORK_PICMG214, "picmg", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass displ_subclass[] = { + { PCI_CLASS_DISPLAY_VGA, "vga", NULL }, + { PCI_CLASS_DISPLAY_XGA, "xga", NULL }, + { PCI_CLASS_DISPLAY_3D, "3d-controller", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass media_subclass[] = { + { PCI_CLASS_MULTIMEDIA_VIDEO, "video", NULL }, + { PCI_CLASS_MULTIMEDIA_AUDIO, "sound", NULL }, + { PCI_CLASS_MULTIMEDIA_PHONE, "telephony", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass mem_subclass[] = { + { PCI_CLASS_MEMORY_RAM, "memory", NULL }, + { PCI_CLASS_MEMORY_FLASH, "flash", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass bridg_subclass[] = { + { PCI_CLASS_BRIDGE_HOST, "host", NULL }, + { PCI_CLASS_BRIDGE_ISA, "isa", NULL }, + { PCI_CLASS_BRIDGE_EISA, "eisa", NULL }, + { PCI_CLASS_BRIDGE_MC, "mca", NULL }, + { PCI_CLASS_BRIDGE_PCI, "pci", NULL }, + { PCI_CLASS_BRIDGE_PCMCIA, "pcmcia", NULL }, + { PCI_CLASS_BRIDGE_NUBUS, "nubus", NULL }, + { PCI_CLASS_BRIDGE_CARDBUS, "cardbus", NULL }, + { PCI_CLASS_BRIDGE_RACEWAY, "raceway", NULL }, + { PCI_CLASS_BRIDGE_PCI_SEMITP, "semi-transparent-pci", NULL }, + { PCI_CLASS_BRIDGE_IB_PCI, "infiniband", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass comm_subclass[] = { + { PCI_CLASS_COMMUNICATION_SERIAL, "serial", NULL }, + { PCI_CLASS_COMMUNICATION_PARALLEL, "parallel", NULL }, + { PCI_CLASS_COMMUNICATION_MULTISERIAL, "multiport-serial", NULL }, + { PCI_CLASS_COMMUNICATION_MODEM, "modem", NULL }, + { PCI_CLASS_COMMUNICATION_GPIB, "gpib", NULL }, + { PCI_CLASS_COMMUNICATION_SC, "smart-card", NULL }, + { 0xFF, NULL, NULL, }, +}; + +static const PCIIFace pic_iface[] = { + { PCI_CLASS_SYSTEM_PIC_IOAPIC, "io-apic" }, + { PCI_CLASS_SYSTEM_PIC_IOXAPIC, "io-xapic" }, + { 0xFF, NULL }, +}; + +static const PCISubClass sys_subclass[] = { + { PCI_CLASS_SYSTEM_PIC, "interrupt-controller", pic_iface }, + { PCI_CLASS_SYSTEM_DMA, "dma-controller", NULL }, + { PCI_CLASS_SYSTEM_TIMER, "timer", NULL }, + { PCI_CLASS_SYSTEM_RTC, "rtc", NULL }, + { PCI_CLASS_SYSTEM_PCI_HOTPLUG, "hot-plug-controller", NULL }, + { PCI_CLASS_SYSTEM_SDHCI, "sd-host-controller", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass inp_subclass[] = { + { PCI_CLASS_INPUT_KEYBOARD, "keyboard", NULL }, + { PCI_CLASS_INPUT_PEN, "pen", NULL }, + { PCI_CLASS_INPUT_MOUSE, "mouse", NULL }, + { PCI_CLASS_INPUT_SCANNER, "scanner", NULL }, + { PCI_CLASS_INPUT_GAMEPORT, "gameport", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass dock_subclass[] = { + { PCI_CLASS_DOCKING_GENERIC, "dock", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass cpu_subclass[] = { + { PCI_CLASS_PROCESSOR_PENTIUM, "pentium", NULL }, + { PCI_CLASS_PROCESSOR_POWERPC, "powerpc", NULL }, + { PCI_CLASS_PROCESSOR_MIPS, "mips", NULL }, + { PCI_CLASS_PROCESSOR_CO, "co-processor", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCIIFace usb_iface[] = { + { PCI_CLASS_SERIAL_USB_UHCI, "usb-uhci" }, + { PCI_CLASS_SERIAL_USB_OHCI, "usb-ohci", }, + { PCI_CLASS_SERIAL_USB_EHCI, "usb-ehci" }, + { PCI_CLASS_SERIAL_USB_XHCI, "usb-xhci" }, + { PCI_CLASS_SERIAL_USB_UNKNOWN, "usb-unknown" }, + { PCI_CLASS_SERIAL_USB_DEVICE, "usb-device" }, + { 0xFF, NULL }, +}; + +static const PCISubClass ser_subclass[] = { + { PCI_CLASS_SERIAL_FIREWIRE, "firewire", NULL }, + { PCI_CLASS_SERIAL_ACCESS, "access-bus", NULL }, + { PCI_CLASS_SERIAL_SSA, "ssa", NULL }, + { PCI_CLASS_SERIAL_USB, "usb", usb_iface }, + { PCI_CLASS_SERIAL_FIBER, "fibre-channel", NULL }, + { PCI_CLASS_SERIAL_SMBUS, "smb", NULL }, + { PCI_CLASS_SERIAL_IB, "infiniband", NULL }, + { PCI_CLASS_SERIAL_IPMI, "ipmi", NULL }, + { PCI_CLASS_SERIAL_SERCOS, "sercos", NULL }, + { PCI_CLASS_SERIAL_CANBUS, "canbus", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass wrl_subclass[] = { + { PCI_CLASS_WIRELESS_IRDA, "irda", NULL }, + { PCI_CLASS_WIRELESS_CIR, "consumer-ir", NULL }, + { PCI_CLASS_WIRELESS_RF_CONTROLLER, "rf-controller", NULL }, + { PCI_CLASS_WIRELESS_BLUETOOTH, "bluetooth", NULL }, + { PCI_CLASS_WIRELESS_BROADBAND, "broadband", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass sat_subclass[] = { + { PCI_CLASS_SATELLITE_TV, "satellite-tv", NULL }, + { PCI_CLASS_SATELLITE_AUDIO, "satellite-audio", NULL }, + { PCI_CLASS_SATELLITE_VOICE, "satellite-voice", NULL }, + { PCI_CLASS_SATELLITE_DATA, "satellite-data", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass crypt_subclass[] = { + { PCI_CLASS_CRYPT_NETWORK, "network-encryption", NULL }, + { PCI_CLASS_CRYPT_ENTERTAINMENT, + "entertainment-encryption", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCISubClass spc_subclass[] = { + { PCI_CLASS_SP_DPIO, "dpio", NULL }, + { PCI_CLASS_SP_PERF, "counter", NULL }, + { PCI_CLASS_SP_SYNCH, "measurement", NULL }, + { PCI_CLASS_SP_MANAGEMENT, "management-card", NULL }, + { 0xFF, NULL, NULL }, +}; + +static const PCIClass pci_classes[] = { + { "legacy-device", undef_subclass }, + { "mass-storage", mass_subclass }, + { "network", net_subclass }, + { "display", displ_subclass, }, + { "multimedia-device", media_subclass }, + { "memory-controller", mem_subclass }, + { "unknown-bridge", bridg_subclass }, + { "communication-controller", comm_subclass}, + { "system-peripheral", sys_subclass }, + { "input-controller", inp_subclass }, + { "docking-station", dock_subclass }, + { "cpu", cpu_subclass }, + { "serial-bus", ser_subclass }, + { "wireless-controller", wrl_subclass }, + { "intelligent-io", NULL }, + { "satellite-device", sat_subclass }, + { "encryption", crypt_subclass }, + { "data-processing-controller", spc_subclass }, +}; + +static const char *dt_name_from_class(uint8_t class, uint8_t subclass, + uint8_t iface) +{ + const PCIClass *pclass; + const PCISubClass *psubclass; + const PCIIFace *piface; + const char *name; + + if (class >= ARRAY_SIZE(pci_classes)) { + return "pci"; + } + + pclass = pci_classes + class; + name = pclass->name; + + if (pclass->subc == NULL) { + return name; + } + + psubclass = pclass->subc; + while ((psubclass->subclass & 0xff) != 0xff) { + if ((psubclass->subclass & 0xff) == subclass) { + name = psubclass->name; + break; + } + psubclass++; + } + + piface = psubclass->iface; + if (piface == NULL) { + return name; + } + while ((piface->iface & 0xff) != 0xff) { + if ((piface->iface & 0xff) == iface) { + name = piface->name; + break; + } + piface++; + } + + return name; +} + +/* + * DRC helper functions + */ + +static uint32_t drc_id_from_devfn(SpaprPhbState *phb, + uint8_t chassis, int32_t devfn) +{ + return (phb->index << 16) | (chassis << 8) | devfn; +} + +static SpaprDrc *drc_from_devfn(SpaprPhbState *phb, + uint8_t chassis, int32_t devfn) +{ + return spapr_drc_by_id(TYPE_SPAPR_DRC_PCI, + drc_id_from_devfn(phb, chassis, devfn)); +} + +static uint8_t chassis_from_bus(PCIBus *bus) +{ + if (pci_bus_is_root(bus)) { + return 0; + } else { + PCIDevice *bridge = pci_bridge_get_device(bus); + + return object_property_get_uint(OBJECT(bridge), "chassis_nr", + &error_abort); + } +} + +static SpaprDrc *drc_from_dev(SpaprPhbState *phb, PCIDevice *dev) +{ + uint8_t chassis = chassis_from_bus(pci_get_bus(dev)); + + return drc_from_devfn(phb, chassis, dev->devfn); +} + +static void add_drcs(SpaprPhbState *phb, PCIBus *bus) +{ + Object *owner; + int i; + uint8_t chassis; + + if (!phb->dr_enabled) { + return; + } + + chassis = chassis_from_bus(bus); + + if (pci_bus_is_root(bus)) { + owner = OBJECT(phb); + } else { + owner = OBJECT(pci_bridge_get_device(bus)); + } + + for (i = 0; i < PCI_SLOT_MAX * PCI_FUNC_MAX; i++) { + spapr_dr_connector_new(owner, TYPE_SPAPR_DRC_PCI, + drc_id_from_devfn(phb, chassis, i)); + } +} + +static void remove_drcs(SpaprPhbState *phb, PCIBus *bus) +{ + int i; + uint8_t chassis; + + if (!phb->dr_enabled) { + return; + } + + chassis = chassis_from_bus(bus); + + for (i = PCI_SLOT_MAX * PCI_FUNC_MAX - 1; i >= 0; i--) { + SpaprDrc *drc = drc_from_devfn(phb, chassis, i); + + if (drc) { + object_unparent(OBJECT(drc)); + } + } +} + +typedef struct PciWalkFdt { + void *fdt; + int offset; + SpaprPhbState *sphb; + int err; +} PciWalkFdt; + +static int spapr_dt_pci_device(SpaprPhbState *sphb, PCIDevice *dev, + void *fdt, int parent_offset); + +static void spapr_dt_pci_device_cb(PCIBus *bus, PCIDevice *pdev, + void *opaque) +{ + PciWalkFdt *p = opaque; + int err; + + if (p->err) { + /* Something's already broken, don't keep going */ + return; + } + + err = spapr_dt_pci_device(p->sphb, pdev, p->fdt, p->offset); + if (err < 0) { + p->err = err; + } +} + +/* Augment PCI device node with bridge specific information */ +static int spapr_dt_pci_bus(SpaprPhbState *sphb, PCIBus *bus, + void *fdt, int offset) +{ + Object *owner; + PciWalkFdt cbinfo = { + .fdt = fdt, + .offset = offset, + .sphb = sphb, + .err = 0, + }; + int ret; + + _FDT(fdt_setprop_cell(fdt, offset, "#address-cells", + RESOURCE_CELLS_ADDRESS)); + _FDT(fdt_setprop_cell(fdt, offset, "#size-cells", + RESOURCE_CELLS_SIZE)); + + assert(bus); + pci_for_each_device_under_bus_reverse(bus, spapr_dt_pci_device_cb, &cbinfo); + if (cbinfo.err) { + return cbinfo.err; + } + + if (pci_bus_is_root(bus)) { + owner = OBJECT(sphb); + } else { + owner = OBJECT(pci_bridge_get_device(bus)); + } + + ret = spapr_dt_drc(fdt, offset, owner, + SPAPR_DR_CONNECTOR_TYPE_PCI); + if (ret) { + return ret; + } + + return offset; +} + +char *spapr_pci_fw_dev_name(PCIDevice *dev) +{ + const gchar *basename; + int slot = PCI_SLOT(dev->devfn); + int func = PCI_FUNC(dev->devfn); + uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3); + + basename = dt_name_from_class((ccode >> 16) & 0xff, (ccode >> 8) & 0xff, + ccode & 0xff); + + if (func != 0) { + return g_strdup_printf("%s@%x,%x", basename, slot, func); + } else { + return g_strdup_printf("%s@%x", basename, slot); + } +} + +/* create OF node for pci device and required OF DT properties */ +static int spapr_dt_pci_device(SpaprPhbState *sphb, PCIDevice *dev, + void *fdt, int parent_offset) +{ + int offset; + g_autofree gchar *nodename = spapr_pci_fw_dev_name(dev); + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); + ResourceProps rp; + SpaprDrc *drc = drc_from_dev(sphb, dev); + uint32_t vendor_id = pci_default_read_config(dev, PCI_VENDOR_ID, 2); + uint32_t device_id = pci_default_read_config(dev, PCI_DEVICE_ID, 2); + uint32_t revision_id = pci_default_read_config(dev, PCI_REVISION_ID, 1); + uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3); + uint32_t irq_pin = pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1); + uint32_t subsystem_id = pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2); + uint32_t subsystem_vendor_id = + pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2); + uint32_t cache_line_size = + pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1); + uint32_t pci_status = pci_default_read_config(dev, PCI_STATUS, 2); + gchar *loc_code; + + _FDT(offset = fdt_add_subnode(fdt, parent_offset, nodename)); + + /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */ + _FDT(fdt_setprop_cell(fdt, offset, "vendor-id", vendor_id)); + _FDT(fdt_setprop_cell(fdt, offset, "device-id", device_id)); + _FDT(fdt_setprop_cell(fdt, offset, "revision-id", revision_id)); + + _FDT(fdt_setprop_cell(fdt, offset, "class-code", ccode)); + if (irq_pin) { + _FDT(fdt_setprop_cell(fdt, offset, "interrupts", irq_pin)); + } + + if (subsystem_id) { + _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id", subsystem_id)); + } + + if (subsystem_vendor_id) { + _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id", + subsystem_vendor_id)); + } + + _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size", cache_line_size)); + + + /* the following fdt cells are masked off the pci status register */ + _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed", + PCI_STATUS_DEVSEL_MASK & pci_status)); + + if (pci_status & PCI_STATUS_FAST_BACK) { + _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0)); + } + if (pci_status & PCI_STATUS_66MHZ) { + _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0)); + } + if (pci_status & PCI_STATUS_UDF) { + _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0)); + } + + loc_code = spapr_phb_get_loc_code(sphb, dev); + _FDT(fdt_setprop_string(fdt, offset, "ibm,loc-code", loc_code)); + g_free(loc_code); + + if (drc) { + _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", + spapr_drc_index(drc))); + } + + if (msi_present(dev)) { + uint32_t max_msi = msi_nr_vectors_allocated(dev); + if (max_msi) { + _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi", max_msi)); + } + } + if (msix_present(dev)) { + uint32_t max_msix = dev->msix_entries_nr; + if (max_msix) { + _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x", max_msix)); + } + } + + populate_resource_props(dev, &rp); + _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len)); + + if (sphb->pcie_ecs && pci_is_express(dev)) { + _FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1)); + } + + spapr_phb_nvgpu_populate_pcidev_dt(dev, fdt, offset, sphb); + + if (!pc->is_bridge) { + /* Properties only for non-bridges */ + uint32_t min_grant = pci_default_read_config(dev, PCI_MIN_GNT, 1); + uint32_t max_latency = pci_default_read_config(dev, PCI_MAX_LAT, 1); + _FDT(fdt_setprop_cell(fdt, offset, "min-grant", min_grant)); + _FDT(fdt_setprop_cell(fdt, offset, "max-latency", max_latency)); + return offset; + } else { + PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); + + return spapr_dt_pci_bus(sphb, sec_bus, fdt, offset); + } +} + +/* Callback to be called during DRC release. */ +void spapr_phb_remove_pci_device_cb(DeviceState *dev) +{ + HotplugHandler *hotplug_ctrl = qdev_get_hotplug_handler(dev); + + hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); + object_unparent(OBJECT(dev)); +} + +int spapr_pci_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, + void *fdt, int *fdt_start_offset, Error **errp) +{ + HotplugHandler *plug_handler = qdev_get_hotplug_handler(drc->dev); + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(plug_handler); + PCIDevice *pdev = PCI_DEVICE(drc->dev); + + *fdt_start_offset = spapr_dt_pci_device(sphb, pdev, fdt, 0); + return 0; +} + +static void spapr_pci_bridge_plug(SpaprPhbState *phb, + PCIBridge *bridge) +{ + PCIBus *bus = pci_bridge_get_sec_bus(bridge); + + add_drcs(phb, bus); +} + +/* Returns non-zero if the value of "chassis_nr" is already in use */ +static int check_chassis_nr(Object *obj, void *opaque) +{ + int new_chassis_nr = + object_property_get_uint(opaque, "chassis_nr", &error_abort); + int chassis_nr = + object_property_get_uint(obj, "chassis_nr", NULL); + + if (!object_dynamic_cast(obj, TYPE_PCI_BRIDGE)) { + return 0; + } + + /* Skip unsupported bridge types */ + if (!chassis_nr) { + return 0; + } + + /* Skip self */ + if (obj == opaque) { + return 0; + } + + return chassis_nr == new_chassis_nr; +} + +static bool bridge_has_valid_chassis_nr(Object *bridge, Error **errp) +{ + int chassis_nr = + object_property_get_uint(bridge, "chassis_nr", NULL); + + /* + * slotid_cap_init() already ensures that "chassis_nr" isn't null for + * standard PCI bridges, so this really tells if "chassis_nr" is present + * or not. + */ + if (!chassis_nr) { + error_setg(errp, "PCI Bridge lacks a \"chassis_nr\" property"); + error_append_hint(errp, "Try -device pci-bridge instead.\n"); + return false; + } + + /* We want unique values for "chassis_nr" */ + if (object_child_foreach_recursive(object_get_root(), check_chassis_nr, + bridge)) { + error_setg(errp, "Bridge chassis %d already in use", chassis_nr); + return false; + } + + return true; +} + +static void spapr_pci_pre_plug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, Error **errp) +{ + SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler)); + PCIDevice *pdev = PCI_DEVICE(plugged_dev); + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(plugged_dev); + SpaprDrc *drc = drc_from_dev(phb, pdev); + PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))); + uint32_t slotnr = PCI_SLOT(pdev->devfn); + + if (!phb->dr_enabled) { + /* if this is a hotplug operation initiated by the user + * we need to let them know it's not enabled + */ + if (plugged_dev->hotplugged) { + error_setg(errp, QERR_BUS_NO_HOTPLUG, + object_get_typename(OBJECT(phb))); + return; + } + } + + if (pc->is_bridge) { + if (!bridge_has_valid_chassis_nr(OBJECT(plugged_dev), errp)) { + return; + } + } + + /* Following the QEMU convention used for PCIe multifunction + * hotplug, we do not allow functions to be hotplugged to a + * slot that already has function 0 present + */ + if (plugged_dev->hotplugged && bus->devices[PCI_DEVFN(slotnr, 0)] && + PCI_FUNC(pdev->devfn) != 0) { + error_setg(errp, "PCI: slot %d function 0 already occupied by %s," + " additional functions can no longer be exposed to guest.", + slotnr, bus->devices[PCI_DEVFN(slotnr, 0)]->name); + } + + if (drc && drc->dev) { + error_setg(errp, "PCI: slot %d already occupied by %s", slotnr, + pci_get_function_0(PCI_DEVICE(drc->dev))->name); + return; + } +} + +static void spapr_pci_plug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, Error **errp) +{ + SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler)); + PCIDevice *pdev = PCI_DEVICE(plugged_dev); + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(plugged_dev); + SpaprDrc *drc = drc_from_dev(phb, pdev); + uint32_t slotnr = PCI_SLOT(pdev->devfn); + + /* + * If DR is disabled we don't need to do anything in the case of + * hotplug or coldplug callbacks. + */ + if (!phb->dr_enabled) { + return; + } + + g_assert(drc); + + if (pc->is_bridge) { + spapr_pci_bridge_plug(phb, PCI_BRIDGE(plugged_dev)); + } + + /* spapr_pci_pre_plug() already checked the DRC is attachable */ + spapr_drc_attach(drc, DEVICE(pdev)); + + /* If this is function 0, signal hotplug for all the device functions. + * Otherwise defer sending the hotplug event. + */ + if (!spapr_drc_hotplugged(plugged_dev)) { + spapr_drc_reset(drc); + } else if (PCI_FUNC(pdev->devfn) == 0) { + int i; + uint8_t chassis = chassis_from_bus(pci_get_bus(pdev)); + + for (i = 0; i < 8; i++) { + SpaprDrc *func_drc; + SpaprDrcClass *func_drck; + SpaprDREntitySense state; + + func_drc = drc_from_devfn(phb, chassis, PCI_DEVFN(slotnr, i)); + func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc); + state = func_drck->dr_entity_sense(func_drc); + + if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) { + spapr_hotplug_req_add_by_index(func_drc); + } + } + } +} + +static void spapr_pci_bridge_unplug(SpaprPhbState *phb, + PCIBridge *bridge) +{ + PCIBus *bus = pci_bridge_get_sec_bus(bridge); + + remove_drcs(phb, bus); +} + +static void spapr_pci_unplug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, Error **errp) +{ + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(plugged_dev); + SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler)); + + /* some version guests do not wait for completion of a device + * cleanup (generally done asynchronously by the kernel) before + * signaling to QEMU that the device is safe, but instead sleep + * for some 'safe' period of time. unfortunately on a busy host + * this sleep isn't guaranteed to be long enough, resulting in + * bad things like IRQ lines being left asserted during final + * device removal. to deal with this we call reset just prior + * to finalizing the device, which will put the device back into + * an 'idle' state, as the device cleanup code expects. + */ + pci_device_reset(PCI_DEVICE(plugged_dev)); + + if (pc->is_bridge) { + spapr_pci_bridge_unplug(phb, PCI_BRIDGE(plugged_dev)); + return; + } + + qdev_unrealize(plugged_dev); +} + +static void spapr_pci_unplug_request(HotplugHandler *plug_handler, + DeviceState *plugged_dev, Error **errp) +{ + SpaprPhbState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler)); + PCIDevice *pdev = PCI_DEVICE(plugged_dev); + SpaprDrc *drc = drc_from_dev(phb, pdev); + + if (!phb->dr_enabled) { + error_setg(errp, QERR_BUS_NO_HOTPLUG, + object_get_typename(OBJECT(phb))); + return; + } + + g_assert(drc); + g_assert(drc->dev == plugged_dev); + + if (!spapr_drc_unplug_requested(drc)) { + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(plugged_dev); + uint32_t slotnr = PCI_SLOT(pdev->devfn); + SpaprDrc *func_drc; + SpaprDrcClass *func_drck; + SpaprDREntitySense state; + int i; + uint8_t chassis = chassis_from_bus(pci_get_bus(pdev)); + + if (pc->is_bridge) { + error_setg(errp, "PCI: Hot unplug of PCI bridges not supported"); + return; + } + if (object_property_get_uint(OBJECT(pdev), "nvlink2-tgt", NULL)) { + error_setg(errp, "PCI: Cannot unplug NVLink2 devices"); + return; + } + + /* ensure any other present functions are pending unplug */ + if (PCI_FUNC(pdev->devfn) == 0) { + for (i = 1; i < 8; i++) { + func_drc = drc_from_devfn(phb, chassis, PCI_DEVFN(slotnr, i)); + func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc); + state = func_drck->dr_entity_sense(func_drc); + if (state == SPAPR_DR_ENTITY_SENSE_PRESENT + && !spapr_drc_unplug_requested(func_drc)) { + /* + * Attempting to remove function 0 of a multifunction + * device will will cascade into removing all child + * functions, even if their unplug weren't requested + * beforehand. + */ + spapr_drc_unplug_request(func_drc); + } + } + } + + spapr_drc_unplug_request(drc); + + /* if this isn't func 0, defer unplug event. otherwise signal removal + * for all present functions + */ + if (PCI_FUNC(pdev->devfn) == 0) { + for (i = 7; i >= 0; i--) { + func_drc = drc_from_devfn(phb, chassis, PCI_DEVFN(slotnr, i)); + func_drck = SPAPR_DR_CONNECTOR_GET_CLASS(func_drc); + state = func_drck->dr_entity_sense(func_drc); + if (state == SPAPR_DR_ENTITY_SENSE_PRESENT) { + spapr_hotplug_req_remove_by_index(func_drc); + } + } + } + } else { + error_setg(errp, + "PCI device unplug already in progress for device %s", + drc->dev->id); + } +} + +static void spapr_phb_finalizefn(Object *obj) +{ + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(obj); + + g_free(sphb->dtbusname); + sphb->dtbusname = NULL; +} + +static void spapr_phb_unrealize(DeviceState *dev) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + SysBusDevice *s = SYS_BUS_DEVICE(dev); + PCIHostState *phb = PCI_HOST_BRIDGE(s); + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(phb); + SpaprTceTable *tcet; + int i; + const unsigned windows_supported = spapr_phb_windows_supported(sphb); + + spapr_phb_nvgpu_free(sphb); + + if (sphb->msi) { + g_hash_table_unref(sphb->msi); + sphb->msi = NULL; + } + + /* + * Remove IO/MMIO subregions and aliases, rest should get cleaned + * via PHB's unrealize->object_finalize + */ + for (i = windows_supported - 1; i >= 0; i--) { + tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[i]); + if (tcet) { + memory_region_del_subregion(&sphb->iommu_root, + spapr_tce_get_iommu(tcet)); + } + } + + remove_drcs(sphb, phb->bus); + + for (i = PCI_NUM_PINS - 1; i >= 0; i--) { + if (sphb->lsi_table[i].irq) { + spapr_irq_free(spapr, sphb->lsi_table[i].irq, 1); + sphb->lsi_table[i].irq = 0; + } + } + + QLIST_REMOVE(sphb, list); + + memory_region_del_subregion(&sphb->iommu_root, &sphb->msiwindow); + + /* + * An attached PCI device may have memory listeners, eg. VFIO PCI. We have + * unmapped all sections. Remove the listeners now, before destroying the + * address space. + */ + address_space_remove_listeners(&sphb->iommu_as); + address_space_destroy(&sphb->iommu_as); + + qbus_set_hotplug_handler(BUS(phb->bus), NULL); + pci_unregister_root_bus(phb->bus); + + memory_region_del_subregion(get_system_memory(), &sphb->iowindow); + if (sphb->mem64_win_pciaddr != (hwaddr)-1) { + memory_region_del_subregion(get_system_memory(), &sphb->mem64window); + } + memory_region_del_subregion(get_system_memory(), &sphb->mem32window); +} + +static void spapr_phb_destroy_msi(gpointer opaque) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + SpaprPciMsi *msi = opaque; + + if (!smc->legacy_irq_allocation) { + spapr_irq_msi_free(spapr, msi->first_irq, msi->num); + } + spapr_irq_free(spapr, msi->first_irq, msi->num); + g_free(msi); +} + +static void spapr_phb_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + /* We don't use SPAPR_MACHINE() in order to exit gracefully if the user + * tries to add a sPAPR PHB to a non-pseries machine. + */ + SpaprMachineState *spapr = + (SpaprMachineState *) object_dynamic_cast(qdev_get_machine(), + TYPE_SPAPR_MACHINE); + SpaprMachineClass *smc = spapr ? SPAPR_MACHINE_GET_CLASS(spapr) : NULL; + SysBusDevice *s = SYS_BUS_DEVICE(dev); + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(s); + PCIHostState *phb = PCI_HOST_BRIDGE(s); + MachineState *ms = MACHINE(spapr); + char *namebuf; + int i; + PCIBus *bus; + uint64_t msi_window_size = 4096; + SpaprTceTable *tcet; + const unsigned windows_supported = spapr_phb_windows_supported(sphb); + + if (!spapr) { + error_setg(errp, TYPE_SPAPR_PCI_HOST_BRIDGE " needs a pseries machine"); + return; + } + + assert(sphb->index != (uint32_t)-1); /* checked in spapr_phb_pre_plug() */ + + if (sphb->mem64_win_size != 0) { + if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) { + error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx + " (max 2 GiB)", sphb->mem_win_size); + return; + } + + /* 64-bit window defaults to identity mapping */ + sphb->mem64_win_pciaddr = sphb->mem64_win_addr; + } else if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) { + /* + * For compatibility with old configuration, if no 64-bit MMIO + * window is specified, but the ordinary (32-bit) memory + * window is specified as > 2GiB, we treat it as a 2GiB 32-bit + * window, with a 64-bit MMIO window following on immediately + * afterwards + */ + sphb->mem64_win_size = sphb->mem_win_size - SPAPR_PCI_MEM32_WIN_SIZE; + sphb->mem64_win_addr = sphb->mem_win_addr + SPAPR_PCI_MEM32_WIN_SIZE; + sphb->mem64_win_pciaddr = + SPAPR_PCI_MEM_WIN_BUS_OFFSET + SPAPR_PCI_MEM32_WIN_SIZE; + sphb->mem_win_size = SPAPR_PCI_MEM32_WIN_SIZE; + } + + if (spapr_pci_find_phb(spapr, sphb->buid)) { + SpaprPhbState *s; + + error_setg(errp, "PCI host bridges must have unique indexes"); + error_append_hint(errp, "The following indexes are already in use:"); + QLIST_FOREACH(s, &spapr->phbs, list) { + error_append_hint(errp, " %d", s->index); + } + error_append_hint(errp, "\nTry another value for the index property\n"); + return; + } + + if (sphb->numa_node != -1 && + (sphb->numa_node >= MAX_NODES || + !ms->numa_state->nodes[sphb->numa_node].present)) { + error_setg(errp, "Invalid NUMA node ID for PCI host bridge"); + return; + } + + sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid); + + /* Initialize memory regions */ + namebuf = g_strdup_printf("%s.mmio", sphb->dtbusname); + memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX); + g_free(namebuf); + + namebuf = g_strdup_printf("%s.mmio32-alias", sphb->dtbusname); + memory_region_init_alias(&sphb->mem32window, OBJECT(sphb), + namebuf, &sphb->memspace, + SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size); + g_free(namebuf); + memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr, + &sphb->mem32window); + + if (sphb->mem64_win_size != 0) { + namebuf = g_strdup_printf("%s.mmio64-alias", sphb->dtbusname); + memory_region_init_alias(&sphb->mem64window, OBJECT(sphb), + namebuf, &sphb->memspace, + sphb->mem64_win_pciaddr, sphb->mem64_win_size); + g_free(namebuf); + + memory_region_add_subregion(get_system_memory(), + sphb->mem64_win_addr, + &sphb->mem64window); + } + + /* Initialize IO regions */ + namebuf = g_strdup_printf("%s.io", sphb->dtbusname); + memory_region_init(&sphb->iospace, OBJECT(sphb), + namebuf, SPAPR_PCI_IO_WIN_SIZE); + g_free(namebuf); + + namebuf = g_strdup_printf("%s.io-alias", sphb->dtbusname); + memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf, + &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE); + g_free(namebuf); + memory_region_add_subregion(get_system_memory(), sphb->io_win_addr, + &sphb->iowindow); + + bus = pci_register_root_bus(dev, NULL, + pci_spapr_set_irq, pci_swizzle_map_irq_fn, sphb, + &sphb->memspace, &sphb->iospace, + PCI_DEVFN(0, 0), PCI_NUM_PINS, + TYPE_PCI_BUS); + + /* + * Despite resembling a vanilla PCI bus in most ways, the PAPR + * para-virtualized PCI bus *does* permit PCI-E extended config + * space access + */ + if (sphb->pcie_ecs) { + bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; + } + phb->bus = bus; + qbus_set_hotplug_handler(BUS(phb->bus), OBJECT(sphb)); + + /* + * Initialize PHB address space. + * By default there will be at least one subregion for default + * 32bit DMA window. + * Later the guest might want to create another DMA window + * which will become another memory subregion. + */ + namebuf = g_strdup_printf("%s.iommu-root", sphb->dtbusname); + memory_region_init(&sphb->iommu_root, OBJECT(sphb), + namebuf, UINT64_MAX); + g_free(namebuf); + address_space_init(&sphb->iommu_as, &sphb->iommu_root, + sphb->dtbusname); + + /* + * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors, + * we need to allocate some memory to catch those writes coming + * from msi_notify()/msix_notify(). + * As MSIMessage:addr is going to be the same and MSIMessage:data + * is going to be a VIRQ number, 4 bytes of the MSI MR will only + * be used. + * + * For KVM we want to ensure that this memory is a full page so that + * our memory slot is of page size granularity. + */ + if (kvm_enabled()) { + msi_window_size = qemu_real_host_page_size; + } + + memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr, + "msi", msi_window_size); + memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW, + &sphb->msiwindow); + + pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb); + + pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq); + + QLIST_INSERT_HEAD(&spapr->phbs, sphb, list); + + /* Initialize the LSI table */ + for (i = 0; i < PCI_NUM_PINS; i++) { + int irq = SPAPR_IRQ_PCI_LSI + sphb->index * PCI_NUM_PINS + i; + + if (smc->legacy_irq_allocation) { + irq = spapr_irq_findone(spapr, errp); + if (irq < 0) { + error_prepend(errp, "can't allocate LSIs: "); + /* + * Older machines will never support PHB hotplug, ie, this is an + * init only path and QEMU will terminate. No need to rollback. + */ + return; + } + } + + if (spapr_irq_claim(spapr, irq, true, errp) < 0) { + error_prepend(errp, "can't allocate LSIs: "); + goto unrealize; + } + + sphb->lsi_table[i].irq = irq; + } + + /* allocate connectors for child PCI devices */ + add_drcs(sphb, phb->bus); + + /* DMA setup */ + for (i = 0; i < windows_supported; ++i) { + tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn[i]); + if (!tcet) { + error_setg(errp, "Creating window#%d failed for %s", + i, sphb->dtbusname); + goto unrealize; + } + memory_region_add_subregion(&sphb->iommu_root, 0, + spapr_tce_get_iommu(tcet)); + } + + sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, + spapr_phb_destroy_msi); + return; + +unrealize: + spapr_phb_unrealize(dev); +} + +static int spapr_phb_children_reset(Object *child, void *opaque) +{ + DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE); + + if (dev) { + device_legacy_reset(dev); + } + + return 0; +} + +void spapr_phb_dma_reset(SpaprPhbState *sphb) +{ + int i; + SpaprTceTable *tcet; + + for (i = 0; i < SPAPR_PCI_DMA_MAX_WINDOWS; ++i) { + tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[i]); + + if (tcet && tcet->nb_table) { + spapr_tce_table_disable(tcet); + } + } + + /* Register default 32bit DMA window */ + tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[0]); + spapr_tce_table_enable(tcet, SPAPR_TCE_PAGE_SHIFT, sphb->dma_win_addr, + sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT); +} + +static void spapr_phb_reset(DeviceState *qdev) +{ + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev); + Error *err = NULL; + + spapr_phb_dma_reset(sphb); + spapr_phb_nvgpu_free(sphb); + spapr_phb_nvgpu_setup(sphb, &err); + if (err) { + error_report_err(err); + } + + /* Reset the IOMMU state */ + object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL); + + if (spapr_phb_eeh_available(SPAPR_PCI_HOST_BRIDGE(qdev))) { + spapr_phb_vfio_reset(qdev); + } + + g_hash_table_remove_all(sphb->msi); +} + +static Property spapr_phb_properties[] = { + DEFINE_PROP_UINT32("index", SpaprPhbState, index, -1), + DEFINE_PROP_UINT64("mem_win_size", SpaprPhbState, mem_win_size, + SPAPR_PCI_MEM32_WIN_SIZE), + DEFINE_PROP_UINT64("mem64_win_size", SpaprPhbState, mem64_win_size, + SPAPR_PCI_MEM64_WIN_SIZE), + DEFINE_PROP_UINT64("io_win_size", SpaprPhbState, io_win_size, + SPAPR_PCI_IO_WIN_SIZE), + DEFINE_PROP_BOOL("dynamic-reconfiguration", SpaprPhbState, dr_enabled, + true), + /* Default DMA window is 0..1GB */ + DEFINE_PROP_UINT64("dma_win_addr", SpaprPhbState, dma_win_addr, 0), + DEFINE_PROP_UINT64("dma_win_size", SpaprPhbState, dma_win_size, 0x40000000), + DEFINE_PROP_UINT64("dma64_win_addr", SpaprPhbState, dma64_win_addr, + 0x800000000000000ULL), + DEFINE_PROP_BOOL("ddw", SpaprPhbState, ddw_enabled, true), + DEFINE_PROP_UINT64("pgsz", SpaprPhbState, page_size_mask, + (1ULL << 12) | (1ULL << 16) + | (1ULL << 21) | (1ULL << 24)), + DEFINE_PROP_UINT32("numa_node", SpaprPhbState, numa_node, -1), + DEFINE_PROP_BOOL("pre-2.8-migration", SpaprPhbState, + pre_2_8_migration, false), + DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState, + pcie_ecs, true), + DEFINE_PROP_UINT64("gpa", SpaprPhbState, nv2_gpa_win_addr, 0), + DEFINE_PROP_UINT64("atsd", SpaprPhbState, nv2_atsd_win_addr, 0), + DEFINE_PROP_BOOL("pre-5.1-associativity", SpaprPhbState, + pre_5_1_assoc, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_spapr_pci_lsi = { + .name = "spapr_pci/lsi", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_EQUAL(irq, SpaprPciLsi, NULL), + + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_spapr_pci_msi = { + .name = "spapr_pci/msi", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField []) { + VMSTATE_UINT32(key, SpaprPciMsiMig), + VMSTATE_UINT32(value.first_irq, SpaprPciMsiMig), + VMSTATE_UINT32(value.num, SpaprPciMsiMig), + VMSTATE_END_OF_LIST() + }, +}; + +static int spapr_pci_pre_save(void *opaque) +{ + SpaprPhbState *sphb = opaque; + GHashTableIter iter; + gpointer key, value; + int i; + + if (sphb->pre_2_8_migration) { + sphb->mig_liobn = sphb->dma_liobn[0]; + sphb->mig_mem_win_addr = sphb->mem_win_addr; + sphb->mig_mem_win_size = sphb->mem_win_size; + sphb->mig_io_win_addr = sphb->io_win_addr; + sphb->mig_io_win_size = sphb->io_win_size; + + if ((sphb->mem64_win_size != 0) + && (sphb->mem64_win_addr + == (sphb->mem_win_addr + sphb->mem_win_size))) { + sphb->mig_mem_win_size += sphb->mem64_win_size; + } + } + + g_free(sphb->msi_devs); + sphb->msi_devs = NULL; + sphb->msi_devs_num = g_hash_table_size(sphb->msi); + if (!sphb->msi_devs_num) { + return 0; + } + sphb->msi_devs = g_new(SpaprPciMsiMig, sphb->msi_devs_num); + + g_hash_table_iter_init(&iter, sphb->msi); + for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) { + sphb->msi_devs[i].key = *(uint32_t *) key; + sphb->msi_devs[i].value = *(SpaprPciMsi *) value; + } + + return 0; +} + +static int spapr_pci_post_save(void *opaque) +{ + SpaprPhbState *sphb = opaque; + + g_free(sphb->msi_devs); + sphb->msi_devs = NULL; + sphb->msi_devs_num = 0; + return 0; +} + +static int spapr_pci_post_load(void *opaque, int version_id) +{ + SpaprPhbState *sphb = opaque; + gpointer key, value; + int i; + + for (i = 0; i < sphb->msi_devs_num; ++i) { + key = g_memdup(&sphb->msi_devs[i].key, + sizeof(sphb->msi_devs[i].key)); + value = g_memdup(&sphb->msi_devs[i].value, + sizeof(sphb->msi_devs[i].value)); + g_hash_table_insert(sphb->msi, key, value); + } + g_free(sphb->msi_devs); + sphb->msi_devs = NULL; + sphb->msi_devs_num = 0; + + return 0; +} + +static bool pre_2_8_migration(void *opaque, int version_id) +{ + SpaprPhbState *sphb = opaque; + + return sphb->pre_2_8_migration; +} + +static const VMStateDescription vmstate_spapr_pci = { + .name = "spapr_pci", + .version_id = 2, + .minimum_version_id = 2, + .pre_save = spapr_pci_pre_save, + .post_save = spapr_pci_post_save, + .post_load = spapr_pci_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64_EQUAL(buid, SpaprPhbState, NULL), + VMSTATE_UINT32_TEST(mig_liobn, SpaprPhbState, pre_2_8_migration), + VMSTATE_UINT64_TEST(mig_mem_win_addr, SpaprPhbState, pre_2_8_migration), + VMSTATE_UINT64_TEST(mig_mem_win_size, SpaprPhbState, pre_2_8_migration), + VMSTATE_UINT64_TEST(mig_io_win_addr, SpaprPhbState, pre_2_8_migration), + VMSTATE_UINT64_TEST(mig_io_win_size, SpaprPhbState, pre_2_8_migration), + VMSTATE_STRUCT_ARRAY(lsi_table, SpaprPhbState, PCI_NUM_PINS, 0, + vmstate_spapr_pci_lsi, SpaprPciLsi), + VMSTATE_INT32(msi_devs_num, SpaprPhbState), + VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, SpaprPhbState, msi_devs_num, 0, + vmstate_spapr_pci_msi, SpaprPciMsiMig), + VMSTATE_END_OF_LIST() + }, +}; + +static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge); + + return sphb->dtbusname; +} + +static void spapr_phb_class_init(ObjectClass *klass, void *data) +{ + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass); + + hc->root_bus_path = spapr_phb_root_bus_path; + dc->realize = spapr_phb_realize; + dc->unrealize = spapr_phb_unrealize; + device_class_set_props(dc, spapr_phb_properties); + dc->reset = spapr_phb_reset; + dc->vmsd = &vmstate_spapr_pci; + /* Supported by TYPE_SPAPR_MACHINE */ + dc->user_creatable = true; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + hp->pre_plug = spapr_pci_pre_plug; + hp->plug = spapr_pci_plug; + hp->unplug = spapr_pci_unplug; + hp->unplug_request = spapr_pci_unplug_request; +} + +static const TypeInfo spapr_phb_info = { + .name = TYPE_SPAPR_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(SpaprPhbState), + .instance_finalize = spapr_phb_finalizefn, + .class_init = spapr_phb_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, + void *opaque) +{ + unsigned int *bus_no = opaque; + PCIBus *sec_bus = NULL; + + if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != + PCI_HEADER_TYPE_BRIDGE)) { + return; + } + + (*bus_no)++; + pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1); + pci_default_write_config(pdev, PCI_SECONDARY_BUS, *bus_no, 1); + pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1); + + sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); + if (!sec_bus) { + return; + } + + pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_enumerate_bridge, + bus_no); + pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1); +} + +static void spapr_phb_pci_enumerate(SpaprPhbState *phb) +{ + PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus; + unsigned int bus_no = 0; + + pci_for_each_device_under_bus(bus, spapr_phb_pci_enumerate_bridge, + &bus_no); + +} + +int spapr_dt_phb(SpaprMachineState *spapr, SpaprPhbState *phb, + uint32_t intc_phandle, void *fdt, int *node_offset) +{ + int bus_off, i, j, ret; + uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) }; + struct { + uint32_t hi; + uint64_t child; + uint64_t parent; + uint64_t size; + } QEMU_PACKED ranges[] = { + { + cpu_to_be32(b_ss(1)), cpu_to_be64(0), + cpu_to_be64(phb->io_win_addr), + cpu_to_be64(memory_region_size(&phb->iospace)), + }, + { + cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET), + cpu_to_be64(phb->mem_win_addr), + cpu_to_be64(phb->mem_win_size), + }, + { + cpu_to_be32(b_ss(3)), cpu_to_be64(phb->mem64_win_pciaddr), + cpu_to_be64(phb->mem64_win_addr), + cpu_to_be64(phb->mem64_win_size), + }, + }; + const unsigned sizeof_ranges = + (phb->mem64_win_size ? 3 : 2) * sizeof(ranges[0]); + uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 }; + uint32_t interrupt_map_mask[] = { + cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)}; + uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7]; + uint32_t ddw_applicable[] = { + cpu_to_be32(RTAS_IBM_QUERY_PE_DMA_WINDOW), + cpu_to_be32(RTAS_IBM_CREATE_PE_DMA_WINDOW), + cpu_to_be32(RTAS_IBM_REMOVE_PE_DMA_WINDOW) + }; + uint32_t ddw_extensions[] = { + cpu_to_be32(1), + cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW) + }; + SpaprTceTable *tcet; + SpaprDrc *drc; + Error *err = NULL; + + /* Start populating the FDT */ + _FDT(bus_off = fdt_add_subnode(fdt, 0, phb->dtbusname)); + if (node_offset) { + *node_offset = bus_off; + } + + /* Write PHB properties */ + _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci")); + _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB")); + _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1)); + _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0)); + _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range))); + _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges)); + _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg))); + _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1)); + _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", + spapr_irq_nr_msis(spapr))); + + /* Dynamic DMA window */ + if (phb->ddw_enabled) { + _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-applicable", &ddw_applicable, + sizeof(ddw_applicable))); + _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-extensions", + &ddw_extensions, sizeof(ddw_extensions))); + } + + /* Advertise NUMA via ibm,associativity */ + if (phb->numa_node != -1) { + spapr_numa_write_associativity_dt(spapr, fdt, bus_off, phb->numa_node); + } + + /* Build the interrupt-map, this must matches what is done + * in pci_swizzle_map_irq_fn + */ + _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask", + &interrupt_map_mask, sizeof(interrupt_map_mask))); + for (i = 0; i < PCI_SLOT_MAX; i++) { + for (j = 0; j < PCI_NUM_PINS; j++) { + uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j]; + int lsi_num = pci_swizzle(i, j); + + irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0)); + irqmap[1] = 0; + irqmap[2] = 0; + irqmap[3] = cpu_to_be32(j+1); + irqmap[4] = cpu_to_be32(intc_phandle); + spapr_dt_irq(&irqmap[5], phb->lsi_table[lsi_num].irq, true); + } + } + /* Write interrupt map */ + _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map, + sizeof(interrupt_map))); + + tcet = spapr_tce_find_by_liobn(phb->dma_liobn[0]); + if (!tcet) { + return -1; + } + spapr_dma_dt(fdt, bus_off, "ibm,dma-window", + tcet->liobn, tcet->bus_offset, + tcet->nb_table << tcet->page_shift); + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PHB, phb->index); + if (drc) { + uint32_t drc_index = cpu_to_be32(spapr_drc_index(drc)); + + _FDT(fdt_setprop(fdt, bus_off, "ibm,my-drc-index", &drc_index, + sizeof(drc_index))); + } + + /* Walk the bridges and program the bus numbers*/ + spapr_phb_pci_enumerate(phb); + _FDT(fdt_setprop_cell(fdt, bus_off, "qemu,phb-enumerated", 0x1)); + + /* Walk the bridge and subordinate buses */ + ret = spapr_dt_pci_bus(phb, PCI_HOST_BRIDGE(phb)->bus, fdt, bus_off); + if (ret < 0) { + return ret; + } + + spapr_phb_nvgpu_populate_dt(phb, fdt, bus_off, &err); + if (err) { + error_report_err(err); + } + spapr_phb_nvgpu_ram_populate_dt(phb, fdt); + + return 0; +} + +void spapr_pci_rtas_init(void) +{ + spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config", + rtas_read_pci_config); + spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config", + rtas_write_pci_config); + spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config", + rtas_ibm_read_pci_config); + spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config", + rtas_ibm_write_pci_config); + if (msi_nonbroken) { + spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER, + "ibm,query-interrupt-source-number", + rtas_ibm_query_interrupt_source_number); + spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi", + rtas_ibm_change_msi); + } + + spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION, + "ibm,set-eeh-option", + rtas_ibm_set_eeh_option); + spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2, + "ibm,get-config-addr-info2", + rtas_ibm_get_config_addr_info2); + spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2, + "ibm,read-slot-reset-state2", + rtas_ibm_read_slot_reset_state2); + spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET, + "ibm,set-slot-reset", + rtas_ibm_set_slot_reset); + spapr_rtas_register(RTAS_IBM_CONFIGURE_PE, + "ibm,configure-pe", + rtas_ibm_configure_pe); + spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL, + "ibm,slot-error-detail", + rtas_ibm_slot_error_detail); +} + +static void spapr_pci_register_types(void) +{ + type_register_static(&spapr_phb_info); +} + +type_init(spapr_pci_register_types) + +static int spapr_switch_one_vga(DeviceState *dev, void *opaque) +{ + bool be = *(bool *)opaque; + + if (object_dynamic_cast(OBJECT(dev), "VGA") + || object_dynamic_cast(OBJECT(dev), "secondary-vga") + || object_dynamic_cast(OBJECT(dev), "bochs-display") + || object_dynamic_cast(OBJECT(dev), "virtio-vga")) { + object_property_set_bool(OBJECT(dev), "big-endian-framebuffer", be, + &error_abort); + } + return 0; +} + +void spapr_pci_switch_vga(SpaprMachineState *spapr, bool big_endian) +{ + SpaprPhbState *sphb; + + /* + * For backward compatibility with existing guests, we switch + * the endianness of the VGA controller when changing the guest + * interrupt mode + */ + QLIST_FOREACH(sphb, &spapr->phbs, list) { + BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus; + qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL, + &big_endian); + } +} diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c new file mode 100644 index 000000000..7fb0cf4d0 --- /dev/null +++ b/hw/ppc/spapr_pci_nvlink2.c @@ -0,0 +1,445 @@ +/* + * QEMU sPAPR PCI for NVLink2 pass through + * + * Copyright (c) 2019 Alexey Kardashevskiy, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "hw/pci/pci.h" +#include "hw/pci-host/spapr.h" +#include "hw/ppc/spapr_numa.h" +#include "qemu/error-report.h" +#include "hw/ppc/fdt.h" +#include "hw/pci/pci_bridge.h" + +#define PHANDLE_PCIDEV(phb, pdev) (0x12000000 | \ + (((phb)->index) << 16) | ((pdev)->devfn)) +#define PHANDLE_GPURAM(phb, n) (0x110000FF | ((n) << 8) | \ + (((phb)->index) << 16)) +#define PHANDLE_NVLINK(phb, gn, nn) (0x00130000 | (((phb)->index) << 8) | \ + ((gn) << 4) | (nn)) + +typedef struct SpaprPhbPciNvGpuSlot { + uint64_t tgt; + uint64_t gpa; + unsigned numa_id; + PCIDevice *gpdev; + int linknum; + struct { + uint64_t atsd_gpa; + PCIDevice *npdev; + uint32_t link_speed; + } links[NVGPU_MAX_LINKS]; +} SpaprPhbPciNvGpuSlot; + +struct SpaprPhbPciNvGpuConfig { + uint64_t nv2_ram_current; + uint64_t nv2_atsd_current; + int num; /* number of non empty (i.e. tgt!=0) entries in slots[] */ + SpaprPhbPciNvGpuSlot slots[NVGPU_MAX_NUM]; + Error *err; +}; + +static SpaprPhbPciNvGpuSlot * +spapr_nvgpu_get_slot(SpaprPhbPciNvGpuConfig *nvgpus, uint64_t tgt) +{ + int i; + + /* Search for partially collected "slot" */ + for (i = 0; i < nvgpus->num; ++i) { + if (nvgpus->slots[i].tgt == tgt) { + return &nvgpus->slots[i]; + } + } + + if (nvgpus->num == ARRAY_SIZE(nvgpus->slots)) { + return NULL; + } + + i = nvgpus->num; + nvgpus->slots[i].tgt = tgt; + ++nvgpus->num; + + return &nvgpus->slots[i]; +} + +static void spapr_pci_collect_nvgpu(SpaprPhbPciNvGpuConfig *nvgpus, + PCIDevice *pdev, uint64_t tgt, + MemoryRegion *mr, Error **errp) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + SpaprMachineState *spapr = SPAPR_MACHINE(machine); + SpaprPhbPciNvGpuSlot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt); + + if (!nvslot) { + error_setg(errp, "Found too many GPUs per vPHB"); + return; + } + g_assert(!nvslot->gpdev); + nvslot->gpdev = pdev; + + nvslot->gpa = nvgpus->nv2_ram_current; + nvgpus->nv2_ram_current += memory_region_size(mr); + nvslot->numa_id = spapr->gpu_numa_id; + ++spapr->gpu_numa_id; +} + +static void spapr_pci_collect_nvnpu(SpaprPhbPciNvGpuConfig *nvgpus, + PCIDevice *pdev, uint64_t tgt, + MemoryRegion *mr, Error **errp) +{ + SpaprPhbPciNvGpuSlot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt); + int j; + + if (!nvslot) { + error_setg(errp, "Found too many NVLink bridges per vPHB"); + return; + } + + j = nvslot->linknum; + if (j == ARRAY_SIZE(nvslot->links)) { + error_setg(errp, "Found too many NVLink bridges per GPU"); + return; + } + ++nvslot->linknum; + + g_assert(!nvslot->links[j].npdev); + nvslot->links[j].npdev = pdev; + nvslot->links[j].atsd_gpa = nvgpus->nv2_atsd_current; + nvgpus->nv2_atsd_current += memory_region_size(mr); + nvslot->links[j].link_speed = + object_property_get_uint(OBJECT(pdev), "nvlink2-link-speed", NULL); +} + +static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev, + void *opaque) +{ + PCIBus *sec_bus; + Object *po = OBJECT(pdev); + uint64_t tgt = object_property_get_uint(po, "nvlink2-tgt", NULL); + + if (tgt) { + Error *local_err = NULL; + SpaprPhbPciNvGpuConfig *nvgpus = opaque; + Object *mr_gpu = object_property_get_link(po, "nvlink2-mr[0]", NULL); + Object *mr_npu = object_property_get_link(po, "nvlink2-atsd-mr[0]", + NULL); + + g_assert(mr_gpu || mr_npu); + if (mr_gpu) { + spapr_pci_collect_nvgpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_gpu), + &local_err); + } else { + spapr_pci_collect_nvnpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_npu), + &local_err); + } + error_propagate(&nvgpus->err, local_err); + } + if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != + PCI_HEADER_TYPE_BRIDGE)) { + return; + } + + sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); + if (!sec_bus) { + return; + } + + pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_collect_nvgpu, opaque); +} + +void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp) +{ + int i, j, valid_gpu_num; + PCIBus *bus; + + /* Search for GPUs and NPUs */ + if (!sphb->nv2_gpa_win_addr || !sphb->nv2_atsd_win_addr) { + return; + } + + sphb->nvgpus = g_new0(SpaprPhbPciNvGpuConfig, 1); + sphb->nvgpus->nv2_ram_current = sphb->nv2_gpa_win_addr; + sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr; + + bus = PCI_HOST_BRIDGE(sphb)->bus; + pci_for_each_device_under_bus(bus, spapr_phb_pci_collect_nvgpu, + sphb->nvgpus); + + if (sphb->nvgpus->err) { + error_propagate(errp, sphb->nvgpus->err); + sphb->nvgpus->err = NULL; + goto cleanup_exit; + } + + /* Add found GPU RAM and ATSD MRs if found */ + for (i = 0, valid_gpu_num = 0; i < sphb->nvgpus->num; ++i) { + Object *nvmrobj; + SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i]; + + if (!nvslot->gpdev) { + continue; + } + nvmrobj = object_property_get_link(OBJECT(nvslot->gpdev), + "nvlink2-mr[0]", NULL); + /* ATSD is pointless without GPU RAM MR so skip those */ + if (!nvmrobj) { + continue; + } + + ++valid_gpu_num; + memory_region_add_subregion(get_system_memory(), nvslot->gpa, + MEMORY_REGION(nvmrobj)); + + for (j = 0; j < nvslot->linknum; ++j) { + Object *atsdmrobj; + + atsdmrobj = object_property_get_link(OBJECT(nvslot->links[j].npdev), + "nvlink2-atsd-mr[0]", NULL); + if (!atsdmrobj) { + continue; + } + memory_region_add_subregion(get_system_memory(), + nvslot->links[j].atsd_gpa, + MEMORY_REGION(atsdmrobj)); + } + } + + if (valid_gpu_num) { + return; + } + /* We did not find any interesting GPU */ +cleanup_exit: + g_free(sphb->nvgpus); + sphb->nvgpus = NULL; +} + +void spapr_phb_nvgpu_free(SpaprPhbState *sphb) +{ + int i, j; + + if (!sphb->nvgpus) { + return; + } + + for (i = 0; i < sphb->nvgpus->num; ++i) { + SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i]; + Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), + "nvlink2-mr[0]", NULL); + + if (nv_mrobj) { + memory_region_del_subregion(get_system_memory(), + MEMORY_REGION(nv_mrobj)); + } + for (j = 0; j < nvslot->linknum; ++j) { + PCIDevice *npdev = nvslot->links[j].npdev; + Object *atsd_mrobj; + atsd_mrobj = object_property_get_link(OBJECT(npdev), + "nvlink2-atsd-mr[0]", NULL); + if (atsd_mrobj) { + memory_region_del_subregion(get_system_memory(), + MEMORY_REGION(atsd_mrobj)); + } + } + } + g_free(sphb->nvgpus); + sphb->nvgpus = NULL; +} + +void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off, + Error **errp) +{ + int i, j, atsdnum = 0; + uint64_t atsd[8]; /* The existing limitation of known guests */ + + if (!sphb->nvgpus) { + return; + } + + for (i = 0; (i < sphb->nvgpus->num) && (atsdnum < ARRAY_SIZE(atsd)); ++i) { + SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i]; + + if (!nvslot->gpdev) { + continue; + } + for (j = 0; j < nvslot->linknum; ++j) { + if (!nvslot->links[j].atsd_gpa) { + continue; + } + + if (atsdnum == ARRAY_SIZE(atsd)) { + error_report("Only %"PRIuPTR" ATSD registers supported", + ARRAY_SIZE(atsd)); + break; + } + atsd[atsdnum] = cpu_to_be64(nvslot->links[j].atsd_gpa); + ++atsdnum; + } + } + + if (!atsdnum) { + error_setg(errp, "No ATSD registers found"); + return; + } + + if (!spapr_phb_eeh_available(sphb)) { + /* + * ibm,mmio-atsd contains ATSD registers; these belong to an NPU PHB + * which we do not emulate as a separate device. Instead we put + * ibm,mmio-atsd to the vPHB with GPU and make sure that we do not + * put GPUs from different IOMMU groups to the same vPHB to ensure + * that the guest will use ATSDs from the corresponding NPU. + */ + error_setg(errp, "ATSD requires separate vPHB per GPU IOMMU group"); + return; + } + + _FDT((fdt_setprop(fdt, bus_off, "ibm,mmio-atsd", atsd, + atsdnum * sizeof(atsd[0])))); +} + +void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) +{ + int i, j, linkidx, npuoff; + char *npuname; + + if (!sphb->nvgpus) { + return; + } + + npuname = g_strdup_printf("npuphb%d", sphb->index); + npuoff = fdt_add_subnode(fdt, 0, npuname); + _FDT(npuoff); + _FDT(fdt_setprop_cell(fdt, npuoff, "#address-cells", 1)); + _FDT(fdt_setprop_cell(fdt, npuoff, "#size-cells", 0)); + /* Advertise NPU as POWER9 so the guest can enable NPU2 contexts */ + _FDT((fdt_setprop_string(fdt, npuoff, "compatible", "ibm,power9-npu"))); + g_free(npuname); + + for (i = 0, linkidx = 0; i < sphb->nvgpus->num; ++i) { + for (j = 0; j < sphb->nvgpus->slots[i].linknum; ++j) { + char *linkname = g_strdup_printf("link@%d", linkidx); + int off = fdt_add_subnode(fdt, npuoff, linkname); + + _FDT(off); + /* _FDT((fdt_setprop_cell(fdt, off, "reg", linkidx))); */ + _FDT((fdt_setprop_string(fdt, off, "compatible", + "ibm,npu-link"))); + _FDT((fdt_setprop_cell(fdt, off, "phandle", + PHANDLE_NVLINK(sphb, i, j)))); + _FDT((fdt_setprop_cell(fdt, off, "ibm,npu-link-index", linkidx))); + g_free(linkname); + ++linkidx; + } + } + + /* Add memory nodes for GPU RAM and mark them unusable */ + for (i = 0; i < sphb->nvgpus->num; ++i) { + SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i]; + Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), + "nvlink2-mr[0]", + &error_abort); + uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL); + uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) }; + char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa); + int off = fdt_add_subnode(fdt, 0, mem_name); + + _FDT(off); + _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); + _FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg)))); + + spapr_numa_write_associativity_dt(SPAPR_MACHINE(qdev_get_machine()), + fdt, off, nvslot->numa_id); + + _FDT((fdt_setprop_string(fdt, off, "compatible", + "ibm,coherent-device-memory"))); + + mem_reg[1] = cpu_to_be64(0); + _FDT((fdt_setprop(fdt, off, "linux,usable-memory", mem_reg, + sizeof(mem_reg)))); + _FDT((fdt_setprop_cell(fdt, off, "phandle", + PHANDLE_GPURAM(sphb, i)))); + g_free(mem_name); + } + +} + +void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset, + SpaprPhbState *sphb) +{ + int i, j; + + if (!sphb->nvgpus) { + return; + } + + for (i = 0; i < sphb->nvgpus->num; ++i) { + SpaprPhbPciNvGpuSlot *nvslot = &sphb->nvgpus->slots[i]; + + /* Skip "slot" without attached GPU */ + if (!nvslot->gpdev) { + continue; + } + if (dev == nvslot->gpdev) { + uint32_t npus[nvslot->linknum]; + + for (j = 0; j < nvslot->linknum; ++j) { + PCIDevice *npdev = nvslot->links[j].npdev; + + npus[j] = cpu_to_be32(PHANDLE_PCIDEV(sphb, npdev)); + } + _FDT(fdt_setprop(fdt, offset, "ibm,npu", npus, + j * sizeof(npus[0]))); + _FDT((fdt_setprop_cell(fdt, offset, "phandle", + PHANDLE_PCIDEV(sphb, dev)))); + continue; + } + + for (j = 0; j < nvslot->linknum; ++j) { + if (dev != nvslot->links[j].npdev) { + continue; + } + + _FDT((fdt_setprop_cell(fdt, offset, "phandle", + PHANDLE_PCIDEV(sphb, dev)))); + _FDT(fdt_setprop_cell(fdt, offset, "ibm,gpu", + PHANDLE_PCIDEV(sphb, nvslot->gpdev))); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,nvlink", + PHANDLE_NVLINK(sphb, i, j)))); + /* + * If we ever want to emulate GPU RAM at the same location as on + * the host - here is the encoding GPA->TGT: + * + * gta = ((sphb->nv2_gpa >> 42) & 0x1) << 42; + * gta |= ((sphb->nv2_gpa >> 45) & 0x3) << 43; + * gta |= ((sphb->nv2_gpa >> 49) & 0x3) << 45; + * gta |= sphb->nv2_gpa & ((1UL << 43) - 1); + */ + _FDT(fdt_setprop_cell(fdt, offset, "memory-region", + PHANDLE_GPURAM(sphb, i))); + _FDT(fdt_setprop_u64(fdt, offset, "ibm,device-tgt-addr", + nvslot->tgt)); + _FDT(fdt_setprop_cell(fdt, offset, "ibm,nvlink-speed", + nvslot->links[j].link_speed)); + } + } +} diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c new file mode 100644 index 000000000..2a76b4e0b --- /dev/null +++ b/hw/ppc/spapr_pci_vfio.c @@ -0,0 +1,217 @@ +/* + * QEMU sPAPR PCI host for VFIO + * + * Copyright (c) 2011-2014 Alexey Kardashevskiy, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include +#include "hw/ppc/spapr.h" +#include "hw/pci-host/spapr.h" +#include "hw/pci/msix.h" +#include "hw/vfio/vfio.h" +#include "qemu/error-report.h" + +bool spapr_phb_eeh_available(SpaprPhbState *sphb) +{ + return vfio_eeh_as_ok(&sphb->iommu_as); +} + +static void spapr_phb_vfio_eeh_reenable(SpaprPhbState *sphb) +{ + vfio_eeh_as_op(&sphb->iommu_as, VFIO_EEH_PE_ENABLE); +} + +void spapr_phb_vfio_reset(DeviceState *qdev) +{ + /* + * The PE might be in frozen state. To reenable the EEH + * functionality on it will clean the frozen state, which + * ensures that the contained PCI devices will work properly + * after reboot. + */ + spapr_phb_vfio_eeh_reenable(SPAPR_PCI_HOST_BRIDGE(qdev)); +} + +static void spapr_eeh_pci_find_device(PCIBus *bus, PCIDevice *pdev, + void *opaque) +{ + bool *found = opaque; + + if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) { + *found = true; + } +} + +int spapr_phb_vfio_eeh_set_option(SpaprPhbState *sphb, + unsigned int addr, int option) +{ + uint32_t op; + int ret; + + switch (option) { + case RTAS_EEH_DISABLE: + op = VFIO_EEH_PE_DISABLE; + break; + case RTAS_EEH_ENABLE: { + PCIHostState *phb; + bool found = false; + + /* + * The EEH functionality is enabled per sphb level instead of + * per PCI device. We have already identified this specific sphb + * based on buid passed as argument to ibm,set-eeh-option rtas + * call. Now we just need to check the validity of the PCI + * pass-through devices (vfio-pci) under this sphb bus. + * We have already validated that all the devices under this sphb + * are from same iommu group (within same PE) before comming here. + * + * Prior to linux commit 98ba956f6a389 ("powerpc/pseries/eeh: + * Rework device EEH PE determination") kernel would call + * eeh-set-option for each device in the PE using the device's + * config_address as the argument rather than the PE address. + * Hence if we check validity of supplied config_addr whether + * it matches to this PHB will cause issues with older kernel + * versions v5.9 and older. If we return an error from + * eeh-set-option when the argument isn't a valid PE address + * then older kernels (v5.9 and older) will interpret that as + * EEH not being supported. + */ + phb = PCI_HOST_BRIDGE(sphb); + pci_for_each_device(phb->bus, (addr >> 16) & 0xFF, + spapr_eeh_pci_find_device, &found); + + if (!found) { + return RTAS_OUT_PARAM_ERROR; + } + + op = VFIO_EEH_PE_ENABLE; + break; + } + case RTAS_EEH_THAW_IO: + op = VFIO_EEH_PE_UNFREEZE_IO; + break; + case RTAS_EEH_THAW_DMA: + op = VFIO_EEH_PE_UNFREEZE_DMA; + break; + default: + return RTAS_OUT_PARAM_ERROR; + } + + ret = vfio_eeh_as_op(&sphb->iommu_as, op); + if (ret < 0) { + return RTAS_OUT_HW_ERROR; + } + + return RTAS_OUT_SUCCESS; +} + +int spapr_phb_vfio_eeh_get_state(SpaprPhbState *sphb, int *state) +{ + int ret; + + ret = vfio_eeh_as_op(&sphb->iommu_as, VFIO_EEH_PE_GET_STATE); + if (ret < 0) { + return RTAS_OUT_PARAM_ERROR; + } + + *state = ret; + return RTAS_OUT_SUCCESS; +} + +static void spapr_phb_vfio_eeh_clear_dev_msix(PCIBus *bus, + PCIDevice *pdev, + void *opaque) +{ + /* Check if the device is VFIO PCI device */ + if (!object_dynamic_cast(OBJECT(pdev), "vfio-pci")) { + return; + } + + /* + * The MSIx table will be cleaned out by reset. We need + * disable it so that it can be reenabled properly. Also, + * the cached MSIx table should be cleared as it's not + * reflecting the contents in hardware. + */ + if (msix_enabled(pdev)) { + uint16_t flags; + + flags = pci_host_config_read_common(pdev, + pdev->msix_cap + PCI_MSIX_FLAGS, + pci_config_size(pdev), 2); + flags &= ~PCI_MSIX_FLAGS_ENABLE; + pci_host_config_write_common(pdev, + pdev->msix_cap + PCI_MSIX_FLAGS, + pci_config_size(pdev), flags, 2); + } + + msix_reset(pdev); +} + +static void spapr_phb_vfio_eeh_clear_bus_msix(PCIBus *bus, void *opaque) +{ + pci_for_each_device_under_bus(bus, spapr_phb_vfio_eeh_clear_dev_msix, + NULL); +} + +static void spapr_phb_vfio_eeh_pre_reset(SpaprPhbState *sphb) +{ + PCIHostState *phb = PCI_HOST_BRIDGE(sphb); + + pci_for_each_bus(phb->bus, spapr_phb_vfio_eeh_clear_bus_msix, NULL); +} + +int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option) +{ + uint32_t op; + int ret; + + switch (option) { + case RTAS_SLOT_RESET_DEACTIVATE: + op = VFIO_EEH_PE_RESET_DEACTIVATE; + break; + case RTAS_SLOT_RESET_HOT: + spapr_phb_vfio_eeh_pre_reset(sphb); + op = VFIO_EEH_PE_RESET_HOT; + break; + case RTAS_SLOT_RESET_FUNDAMENTAL: + spapr_phb_vfio_eeh_pre_reset(sphb); + op = VFIO_EEH_PE_RESET_FUNDAMENTAL; + break; + default: + return RTAS_OUT_PARAM_ERROR; + } + + ret = vfio_eeh_as_op(&sphb->iommu_as, op); + if (ret < 0) { + return RTAS_OUT_HW_ERROR; + } + + return RTAS_OUT_SUCCESS; +} + +int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb) +{ + int ret; + + ret = vfio_eeh_as_op(&sphb->iommu_as, VFIO_EEH_PE_CONFIGURE); + if (ret < 0) { + return RTAS_OUT_PARAM_ERROR; + } + + return RTAS_OUT_SUCCESS; +} diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c new file mode 100644 index 000000000..df5c4b968 --- /dev/null +++ b/hw/ppc/spapr_rng.c @@ -0,0 +1,162 @@ +/* + * QEMU sPAPR random number generator "device" for H_RANDOM hypercall + * + * Copyright 2015 Thomas Huth, Red Hat Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "sysemu/device_tree.h" +#include "sysemu/rng.h" +#include "hw/ppc/spapr.h" +#include "hw/qdev-properties.h" +#include "kvm_ppc.h" +#include "qom/object.h" + +OBJECT_DECLARE_SIMPLE_TYPE(SpaprRngState, SPAPR_RNG) + +struct SpaprRngState { + /*< private >*/ + DeviceState ds; + RngBackend *backend; + bool use_kvm; +}; + +struct HRandomData { + QemuSemaphore sem; + union { + uint64_t v64; + uint8_t v8[8]; + } val; + int received; +}; +typedef struct HRandomData HRandomData; + +/* Callback function for the RngBackend */ +static void random_recv(void *dest, const void *src, size_t size) +{ + HRandomData *hrdp = dest; + + if (src && size > 0) { + assert(size + hrdp->received <= sizeof(hrdp->val.v8)); + memcpy(&hrdp->val.v8[hrdp->received], src, size); + hrdp->received += size; + } + + qemu_sem_post(&hrdp->sem); +} + +/* Handler for the H_RANDOM hypercall */ +static target_ulong h_random(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + SpaprRngState *rngstate; + HRandomData hrdata; + + rngstate = SPAPR_RNG(object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)); + + if (!rngstate || !rngstate->backend) { + return H_HARDWARE; + } + + qemu_sem_init(&hrdata.sem, 0); + hrdata.val.v64 = 0; + hrdata.received = 0; + + while (hrdata.received < 8) { + rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received, + random_recv, &hrdata); + qemu_mutex_unlock_iothread(); + qemu_sem_wait(&hrdata.sem); + qemu_mutex_lock_iothread(); + } + + qemu_sem_destroy(&hrdata.sem); + args[0] = hrdata.val.v64; + + return H_SUCCESS; +} + +static void spapr_rng_instance_init(Object *obj) +{ + if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL) != NULL) { + error_report("spapr-rng can not be instantiated twice!"); + return; + } + + object_property_set_description(obj, "rng", + "ID of the random number generator backend"); +} + +static void spapr_rng_realize(DeviceState *dev, Error **errp) +{ + + SpaprRngState *rngstate = SPAPR_RNG(dev); + + if (rngstate->use_kvm) { + if (kvmppc_enable_hwrng() == 0) { + return; + } + /* + * If user specified both, use-kvm and a backend, we fall back to + * the backend now. If not, provide an appropriate error message. + */ + if (!rngstate->backend) { + error_setg(errp, "Could not initialize in-kernel H_RANDOM call!"); + return; + } + } + + if (rngstate->backend) { + spapr_register_hypercall(H_RANDOM, h_random); + } else { + error_setg(errp, "spapr-rng needs an RNG backend!"); + } +} + +static Property spapr_rng_properties[] = { + DEFINE_PROP_BOOL("use-kvm", SpaprRngState, use_kvm, false), + DEFINE_PROP_LINK("rng", SpaprRngState, backend, TYPE_RNG_BACKEND, + RngBackend *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void spapr_rng_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = spapr_rng_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, spapr_rng_properties); + dc->hotpluggable = false; +} + +static const TypeInfo spapr_rng_info = { + .name = TYPE_SPAPR_RNG, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SpaprRngState), + .instance_init = spapr_rng_instance_init, + .class_init = spapr_rng_class_init, +}; + +static void spapr_rng_register_type(void) +{ + type_register_static(&spapr_rng_info); +} +type_init(spapr_rng_register_type) diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c new file mode 100644 index 000000000..b476382ae --- /dev/null +++ b/hw/ppc/spapr_rtas.c @@ -0,0 +1,636 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * Hypercall based emulated RTAS + * + * Copyright (c) 2010-2011 David Gibson, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "sysemu/sysemu.h" +#include "sysemu/device_tree.h" +#include "sysemu/cpus.h" +#include "sysemu/hw_accel.h" +#include "sysemu/runstate.h" +#include "kvm_ppc.h" + +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" +#include "hw/ppc/spapr_rtas.h" +#include "hw/ppc/spapr_cpu_core.h" +#include "hw/ppc/ppc.h" + +#include +#include "hw/ppc/spapr_drc.h" +#include "qemu/cutils.h" +#include "trace.h" +#include "hw/ppc/fdt.h" +#include "target/ppc/mmu-hash64.h" +#include "target/ppc/mmu-book3s-v3.h" +#include "migration/blocker.h" +#include "helper_regs.h" + +static void rtas_display_character(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + uint8_t c = rtas_ld(args, 0); + SpaprVioDevice *sdev = vty_lookup(spapr, 0); + + if (!sdev) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + } else { + vty_putchars(sdev, &c, sizeof(c)); + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + } +} + +static void rtas_power_off(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, target_ulong args, + uint32_t nret, target_ulong rets) +{ + if (nargs != 2 || nret != 1) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + cpu_stop_current(); + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_system_reboot(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + if (nargs != 0 || nret != 1) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + target_ulong id; + PowerPCCPU *cpu; + + if (nargs != 1 || nret != 2) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + id = rtas_ld(args, 0); + cpu = spapr_find_cpu(id); + if (cpu != NULL) { + if (CPU(cpu)->halted) { + rtas_st(rets, 1, 0); + } else { + rtas_st(rets, 1, 2); + } + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + return; + } + + /* Didn't find a matching cpu */ + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_start_cpu(PowerPCCPU *callcpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + target_ulong id, start, r3; + PowerPCCPU *newcpu; + CPUPPCState *env; + target_ulong lpcr; + target_ulong caller_lpcr; + + if (nargs != 3 || nret != 1) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + id = rtas_ld(args, 0); + start = rtas_ld(args, 1); + r3 = rtas_ld(args, 2); + + newcpu = spapr_find_cpu(id); + if (!newcpu) { + /* Didn't find a matching cpu */ + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + env = &newcpu->env; + + if (!CPU(newcpu)->halted) { + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + } + + cpu_synchronize_state(CPU(newcpu)); + + env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME); + hreg_compute_hflags(env); + + caller_lpcr = callcpu->env.spr[SPR_LPCR]; + lpcr = env->spr[SPR_LPCR]; + + /* Set ILE the same way */ + lpcr = (lpcr & ~LPCR_ILE) | (caller_lpcr & LPCR_ILE); + + /* Set AIL the same way */ + lpcr = (lpcr & ~LPCR_AIL) | (caller_lpcr & LPCR_AIL); + + if (env->mmu_model == POWERPC_MMU_3_00) { + /* + * New cpus are expected to start in the same radix/hash mode + * as the existing CPUs + */ + if (ppc64_v3_radix(callcpu)) { + lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; + } else { + lpcr &= ~(LPCR_UPRT | LPCR_GTSE | LPCR_HR); + } + env->spr[SPR_PSSCR] &= ~PSSCR_EC; + } + ppc_store_lpcr(newcpu, lpcr); + + /* + * Set the timebase offset of the new CPU to that of the invoking + * CPU. This helps hotplugged CPU to have the correct timebase + * offset. + */ + newcpu->env.tb_env->tb_offset = callcpu->env.tb_env->tb_offset; + + spapr_cpu_set_entry_state(newcpu, start, 0, r3, 0); + + qemu_cpu_kick(CPU(newcpu)); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_stop_self(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + /* Disable Power-saving mode Exit Cause exceptions for the CPU. + * This could deliver an interrupt on a dying CPU and crash the + * guest. + * For the same reason, set PSSCR_EC. + */ + ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm); + env->spr[SPR_PSSCR] |= PSSCR_EC; + cs->halted = 1; + kvmppc_set_reg_ppc_online(cpu, 0); + qemu_cpu_kick(cs); +} + +static void rtas_ibm_suspend_me(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + CPUState *cs; + + if (nargs != 0 || nret != 1) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + CPU_FOREACH(cs) { + PowerPCCPU *c = POWERPC_CPU(cs); + CPUPPCState *e = &c->env; + if (c == cpu) { + continue; + } + + /* See h_join */ + if (!cs->halted || (e->msr & (1ULL << MSR_EE))) { + rtas_st(rets, 0, H_MULTI_THREADS_ACTIVE); + return; + } + } + + qemu_system_suspend_request(); + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static inline int sysparm_st(target_ulong addr, target_ulong len, + const void *val, uint16_t vallen) +{ + hwaddr phys = ppc64_phys_to_real(addr); + + if (len < 2) { + return RTAS_OUT_SYSPARM_PARAM_ERROR; + } + stw_be_phys(&address_space_memory, phys, vallen); + cpu_physical_memory_write(phys + 2, val, MIN(len - 2, vallen)); + return RTAS_OUT_SUCCESS; +} + +static void rtas_ibm_get_system_parameter(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + MachineState *ms = MACHINE(spapr); + target_ulong parameter = rtas_ld(args, 0); + target_ulong buffer = rtas_ld(args, 1); + target_ulong length = rtas_ld(args, 2); + target_ulong ret; + + switch (parameter) { + case RTAS_SYSPARM_SPLPAR_CHARACTERISTICS: { + char *param_val = g_strdup_printf("MaxEntCap=%d," + "DesMem=%" PRIu64 "," + "DesProcs=%d," + "MaxPlatProcs=%d", + ms->smp.max_cpus, + ms->ram_size / MiB, + ms->smp.cpus, + ms->smp.max_cpus); + if (pcc->n_host_threads > 0) { + char *hostthr_val, *old = param_val; + + /* + * Add HostThrs property. This property is not present in PAPR but + * is expected by some guests to communicate the number of physical + * host threads per core on the system so that they can scale + * information which varies based on the thread configuration. + */ + hostthr_val = g_strdup_printf(",HostThrs=%d", pcc->n_host_threads); + param_val = g_strconcat(param_val, hostthr_val, NULL); + g_free(hostthr_val); + g_free(old); + } + ret = sysparm_st(buffer, length, param_val, strlen(param_val) + 1); + g_free(param_val); + break; + } + case RTAS_SYSPARM_DIAGNOSTICS_RUN_MODE: { + uint8_t param_val = DIAGNOSTICS_RUN_MODE_DISABLED; + + ret = sysparm_st(buffer, length, ¶m_val, sizeof(param_val)); + break; + } + case RTAS_SYSPARM_UUID: + ret = sysparm_st(buffer, length, (unsigned char *)&qemu_uuid, + (qemu_uuid_set ? 16 : 0)); + break; + default: + ret = RTAS_OUT_NOT_SUPPORTED; + } + + rtas_st(rets, 0, ret); +} + +static void rtas_ibm_set_system_parameter(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + target_ulong parameter = rtas_ld(args, 0); + target_ulong ret = RTAS_OUT_NOT_SUPPORTED; + + switch (parameter) { + case RTAS_SYSPARM_SPLPAR_CHARACTERISTICS: + case RTAS_SYSPARM_DIAGNOSTICS_RUN_MODE: + case RTAS_SYSPARM_UUID: + ret = RTAS_OUT_NOT_AUTHORIZED; + break; + } + + rtas_st(rets, 0, ret); +} + +static void rtas_ibm_os_term(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + target_ulong msgaddr = rtas_ld(args, 0); + char msg[512]; + + cpu_physical_memory_read(msgaddr, msg, sizeof(msg) - 1); + msg[sizeof(msg) - 1] = 0; + + error_report("OS terminated: %s", msg); + qemu_system_guest_panicked(NULL); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_set_power_level(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + int32_t power_domain; + + if (nargs != 2 || nret != 2) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + /* we currently only use a single, "live insert" powerdomain for + * hotplugged/dlpar'd resources, so the power is always live/full (100) + */ + power_domain = rtas_ld(args, 0); + if (power_domain != -1) { + rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED); + return; + } + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, 100); +} + +static void rtas_get_power_level(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, uint32_t nret, + target_ulong rets) +{ + int32_t power_domain; + + if (nargs != 1 || nret != 2) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + /* we currently only use a single, "live insert" powerdomain for + * hotplugged/dlpar'd resources, so the power is always live/full (100) + */ + power_domain = rtas_ld(args, 0); + if (power_domain != -1) { + rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED); + return; + } + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, 100); +} + +static void rtas_ibm_nmi_register(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + hwaddr rtas_addr; + target_ulong sreset_addr, mce_addr; + + if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_OFF) { + rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED); + return; + } + + rtas_addr = spapr_get_rtas_addr(); + if (!rtas_addr) { + rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED); + return; + } + + sreset_addr = rtas_ld(args, 0); + mce_addr = rtas_ld(args, 1); + + /* PAPR requires these are in the first 32M of memory and within RMA */ + if (sreset_addr >= 32 * MiB || sreset_addr >= spapr->rma_size || + mce_addr >= 32 * MiB || mce_addr >= spapr->rma_size) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + if (kvm_enabled()) { + if (kvmppc_set_fwnmi(cpu) < 0) { + rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED); + return; + } + } + + spapr->fwnmi_system_reset_addr = sreset_addr; + spapr->fwnmi_machine_check_addr = mce_addr; + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_ibm_nmi_interlock(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + if (spapr_get_cap(spapr, SPAPR_CAP_FWNMI) == SPAPR_CAP_OFF) { + rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED); + return; + } + + if (spapr->fwnmi_machine_check_addr == -1) { + qemu_log_mask(LOG_GUEST_ERROR, +"FWNMI: ibm,nmi-interlock RTAS called with FWNMI not registered.\n"); + + /* NMI register not called */ + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + if (spapr->fwnmi_machine_check_interlock != cpu->vcpu_id) { + /* + * The vCPU that hit the NMI should invoke "ibm,nmi-interlock" + * This should be PARAM_ERROR, but Linux calls "ibm,nmi-interlock" + * for system reset interrupts, despite them not being interlocked. + * PowerVM silently ignores this and returns success here. Returning + * failure causes Linux to print the error "FWNMI: nmi-interlock + * failed: -3", although no other apparent ill effects, this is a + * regression for the user when enabling FWNMI. So for now, match + * PowerVM. When most Linux clients are fixed, this could be + * changed. + */ + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + return; + } + + /* + * vCPU issuing "ibm,nmi-interlock" is done with NMI handling, + * hence unset fwnmi_machine_check_interlock. + */ + spapr->fwnmi_machine_check_interlock = -1; + qemu_cond_signal(&spapr->fwnmi_machine_check_interlock_cond); + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + migrate_del_blocker(spapr->fwnmi_migration_blocker); +} + +static struct rtas_call { + const char *name; + spapr_rtas_fn fn; +} rtas_table[RTAS_TOKEN_MAX - RTAS_TOKEN_BASE]; + +target_ulong spapr_rtas_call(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, target_ulong args, + uint32_t nret, target_ulong rets) +{ + if ((token >= RTAS_TOKEN_BASE) && (token < RTAS_TOKEN_MAX)) { + struct rtas_call *call = rtas_table + (token - RTAS_TOKEN_BASE); + + if (call->fn) { + call->fn(cpu, spapr, token, nargs, args, nret, rets); + return H_SUCCESS; + } + } + + /* HACK: Some Linux early debug code uses RTAS display-character, + * but assumes the token value is 0xa (which it is on some real + * machines) without looking it up in the device tree. This + * special case makes this work */ + if (token == 0xa) { + rtas_display_character(cpu, spapr, 0xa, nargs, args, nret, rets); + return H_SUCCESS; + } + + hcall_dprintf("Unknown RTAS token 0x%x\n", token); + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return H_PARAMETER; +} + +uint64_t qtest_rtas_call(char *cmd, uint32_t nargs, uint64_t args, + uint32_t nret, uint64_t rets) +{ + int token; + + for (token = 0; token < RTAS_TOKEN_MAX - RTAS_TOKEN_BASE; token++) { + if (strcmp(cmd, rtas_table[token].name) == 0) { + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + + rtas_table[token].fn(cpu, spapr, token + RTAS_TOKEN_BASE, + nargs, args, nret, rets); + return H_SUCCESS; + } + } + return H_PARAMETER; +} + +void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn) +{ + assert((token >= RTAS_TOKEN_BASE) && (token < RTAS_TOKEN_MAX)); + + token -= RTAS_TOKEN_BASE; + + assert(!name || !rtas_table[token].name); + + rtas_table[token].name = name; + rtas_table[token].fn = fn; +} + +void spapr_dt_rtas_tokens(void *fdt, int rtas) +{ + int i; + + for (i = 0; i < RTAS_TOKEN_MAX - RTAS_TOKEN_BASE; i++) { + struct rtas_call *call = &rtas_table[i]; + + if (!call->name) { + continue; + } + + _FDT(fdt_setprop_cell(fdt, rtas, call->name, i + RTAS_TOKEN_BASE)); + } +} + +hwaddr spapr_get_rtas_addr(void) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + int rtas_node; + const fdt32_t *rtas_data; + void *fdt = spapr->fdt_blob; + + /* fetch rtas addr from fdt */ + rtas_node = fdt_path_offset(fdt, "/rtas"); + if (rtas_node < 0) { + return 0; + } + + rtas_data = fdt_getprop(fdt, rtas_node, "linux,rtas-base", NULL); + if (!rtas_data) { + return 0; + } + + /* + * We assume that the OS called RTAS instantiate-rtas, but some other + * OS might call RTAS instantiate-rtas-64 instead. This fine as of now + * as SLOF only supports 32-bit variant. + */ + return (hwaddr)fdt32_to_cpu(*rtas_data); +} + +static void core_rtas_register_types(void) +{ + spapr_rtas_register(RTAS_DISPLAY_CHARACTER, "display-character", + rtas_display_character); + spapr_rtas_register(RTAS_POWER_OFF, "power-off", rtas_power_off); + spapr_rtas_register(RTAS_SYSTEM_REBOOT, "system-reboot", + rtas_system_reboot); + spapr_rtas_register(RTAS_QUERY_CPU_STOPPED_STATE, "query-cpu-stopped-state", + rtas_query_cpu_stopped_state); + spapr_rtas_register(RTAS_START_CPU, "start-cpu", rtas_start_cpu); + spapr_rtas_register(RTAS_STOP_SELF, "stop-self", rtas_stop_self); + spapr_rtas_register(RTAS_IBM_SUSPEND_ME, "ibm,suspend-me", + rtas_ibm_suspend_me); + spapr_rtas_register(RTAS_IBM_GET_SYSTEM_PARAMETER, + "ibm,get-system-parameter", + rtas_ibm_get_system_parameter); + spapr_rtas_register(RTAS_IBM_SET_SYSTEM_PARAMETER, + "ibm,set-system-parameter", + rtas_ibm_set_system_parameter); + spapr_rtas_register(RTAS_IBM_OS_TERM, "ibm,os-term", + rtas_ibm_os_term); + spapr_rtas_register(RTAS_SET_POWER_LEVEL, "set-power-level", + rtas_set_power_level); + spapr_rtas_register(RTAS_GET_POWER_LEVEL, "get-power-level", + rtas_get_power_level); + spapr_rtas_register(RTAS_IBM_NMI_REGISTER, "ibm,nmi-register", + rtas_ibm_nmi_register); + spapr_rtas_register(RTAS_IBM_NMI_INTERLOCK, "ibm,nmi-interlock", + rtas_ibm_nmi_interlock); +} + +type_init(core_rtas_register_types) diff --git a/hw/ppc/spapr_rtas_ddw.c b/hw/ppc/spapr_rtas_ddw.c new file mode 100644 index 000000000..3e826e130 --- /dev/null +++ b/hw/ppc/spapr_rtas_ddw.c @@ -0,0 +1,291 @@ +/* + * QEMU sPAPR Dynamic DMA windows support + * + * Copyright (c) 2015 Alexey Kardashevskiy, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/ppc/spapr.h" +#include "hw/pci-host/spapr.h" +#include "trace.h" + +static int spapr_phb_get_active_win_num_cb(Object *child, void *opaque) +{ + SpaprTceTable *tcet; + + tcet = (SpaprTceTable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE); + if (tcet && tcet->nb_table) { + ++*(unsigned *)opaque; + } + return 0; +} + +static unsigned spapr_phb_get_active_win_num(SpaprPhbState *sphb) +{ + unsigned ret = 0; + + object_child_foreach(OBJECT(sphb), spapr_phb_get_active_win_num_cb, &ret); + + return ret; +} + +static int spapr_phb_get_free_liobn_cb(Object *child, void *opaque) +{ + SpaprTceTable *tcet; + + tcet = (SpaprTceTable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE); + if (tcet && !tcet->nb_table) { + *(uint32_t *)opaque = tcet->liobn; + return 1; + } + return 0; +} + +static unsigned spapr_phb_get_free_liobn(SpaprPhbState *sphb) +{ + uint32_t liobn = 0; + + object_child_foreach(OBJECT(sphb), spapr_phb_get_free_liobn_cb, &liobn); + + return liobn; +} + +static uint32_t spapr_page_mask_to_query_mask(uint64_t page_mask) +{ + int i; + uint32_t mask = 0; + const struct { int shift; uint32_t mask; } masks[] = { + { 12, RTAS_DDW_PGSIZE_4K }, + { 16, RTAS_DDW_PGSIZE_64K }, + { 24, RTAS_DDW_PGSIZE_16M }, + { 25, RTAS_DDW_PGSIZE_32M }, + { 26, RTAS_DDW_PGSIZE_64M }, + { 27, RTAS_DDW_PGSIZE_128M }, + { 28, RTAS_DDW_PGSIZE_256M }, + { 34, RTAS_DDW_PGSIZE_16G }, + }; + + for (i = 0; i < ARRAY_SIZE(masks); ++i) { + if (page_mask & (1ULL << masks[i].shift)) { + mask |= masks[i].mask; + } + } + + return mask; +} + +static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + SpaprPhbState *sphb; + uint64_t buid; + uint32_t avail, addr, pgmask = 0; + + if ((nargs != 3) || (nret != 5)) { + goto param_error_exit; + } + + buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2); + addr = rtas_ld(args, 0); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb || !sphb->ddw_enabled) { + goto param_error_exit; + } + + /* Translate page mask to LoPAPR format */ + pgmask = spapr_page_mask_to_query_mask(sphb->page_size_mask); + + avail = SPAPR_PCI_DMA_MAX_WINDOWS - spapr_phb_get_active_win_num(sphb); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, avail); + rtas_st(rets, 2, 0x80000000); /* The largest window we can possibly have */ + rtas_st(rets, 3, pgmask); + rtas_st(rets, 4, 0); /* DMA migration mask, not supported */ + + trace_spapr_iommu_ddw_query(buid, addr, avail, 0x80000000, pgmask); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_ibm_create_pe_dma_window(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + SpaprPhbState *sphb; + SpaprTceTable *tcet = NULL; + uint32_t addr, page_shift, window_shift, liobn; + uint64_t buid, win_addr; + int windows; + + if ((nargs != 5) || (nret != 4)) { + goto param_error_exit; + } + + buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2); + addr = rtas_ld(args, 0); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb || !sphb->ddw_enabled) { + goto param_error_exit; + } + + page_shift = rtas_ld(args, 3); + window_shift = rtas_ld(args, 4); + liobn = spapr_phb_get_free_liobn(sphb); + windows = spapr_phb_get_active_win_num(sphb); + + if (!(sphb->page_size_mask & (1ULL << page_shift)) || + (window_shift < page_shift)) { + goto param_error_exit; + } + + if (!liobn || !sphb->ddw_enabled || windows == SPAPR_PCI_DMA_MAX_WINDOWS) { + goto hw_error_exit; + } + + tcet = spapr_tce_find_by_liobn(liobn); + if (!tcet) { + goto hw_error_exit; + } + + win_addr = (windows == 0) ? sphb->dma_win_addr : sphb->dma64_win_addr; + /* + * We have just created a window, we know for the fact that it is empty, + * use a hack to avoid iterating over the table as it is quite possible + * to have billions of TCEs, all empty. + * Note that we cannot delay this to the first H_PUT_TCE as this hcall is + * mostly likely to be handled in KVM so QEMU just does not know if it + * happened. + */ + tcet->skipping_replay = true; + spapr_tce_table_enable(tcet, page_shift, win_addr, + 1ULL << (window_shift - page_shift)); + tcet->skipping_replay = false; + if (!tcet->nb_table) { + goto hw_error_exit; + } + + trace_spapr_iommu_ddw_create(buid, addr, 1ULL << page_shift, + 1ULL << window_shift, tcet->bus_offset, liobn); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, liobn); + rtas_st(rets, 2, tcet->bus_offset >> 32); + rtas_st(rets, 3, tcet->bus_offset & ((uint32_t) -1)); + + return; + +hw_error_exit: + rtas_st(rets, 0, RTAS_OUT_HW_ERROR); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_ibm_remove_pe_dma_window(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + SpaprPhbState *sphb; + SpaprTceTable *tcet; + uint32_t liobn; + + if ((nargs != 1) || (nret != 1)) { + goto param_error_exit; + } + + liobn = rtas_ld(args, 0); + tcet = spapr_tce_find_by_liobn(liobn); + if (!tcet) { + goto param_error_exit; + } + + sphb = SPAPR_PCI_HOST_BRIDGE(OBJECT(tcet)->parent); + if (!sphb || !sphb->ddw_enabled || !tcet->nb_table) { + goto param_error_exit; + } + + spapr_tce_table_disable(tcet); + trace_spapr_iommu_ddw_remove(liobn); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void rtas_ibm_reset_pe_dma_window(PowerPCCPU *cpu, + SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + SpaprPhbState *sphb; + uint64_t buid; + uint32_t addr; + + if ((nargs != 3) || (nret != 1)) { + goto param_error_exit; + } + + buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2); + addr = rtas_ld(args, 0); + sphb = spapr_pci_find_phb(spapr, buid); + if (!sphb || !sphb->ddw_enabled) { + goto param_error_exit; + } + + spapr_phb_dma_reset(sphb); + trace_spapr_iommu_ddw_reset(buid, addr); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + + return; + +param_error_exit: + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); +} + +static void spapr_rtas_ddw_init(void) +{ + spapr_rtas_register(RTAS_IBM_QUERY_PE_DMA_WINDOW, + "ibm,query-pe-dma-window", + rtas_ibm_query_pe_dma_window); + spapr_rtas_register(RTAS_IBM_CREATE_PE_DMA_WINDOW, + "ibm,create-pe-dma-window", + rtas_ibm_create_pe_dma_window); + spapr_rtas_register(RTAS_IBM_REMOVE_PE_DMA_WINDOW, + "ibm,remove-pe-dma-window", + rtas_ibm_remove_pe_dma_window); + spapr_rtas_register(RTAS_IBM_RESET_PE_DMA_WINDOW, + "ibm,reset-pe-dma-window", + rtas_ibm_reset_pe_dma_window); +} + +type_init(spapr_rtas_ddw_init) diff --git a/hw/ppc/spapr_rtc.c b/hw/ppc/spapr_rtc.c new file mode 100644 index 000000000..fba4dfca3 --- /dev/null +++ b/hw/ppc/spapr_rtc.c @@ -0,0 +1,190 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * RTAS Real Time Clock + * + * Copyright (c) 2010-2011 David Gibson, IBM Corporation. + * Copyright 2014 David Gibson, Red Hat. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "hw/ppc/spapr.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/qapi-events-misc-target.h" +#include "qemu/cutils.h" +#include "qemu/module.h" + +void spapr_rtc_read(SpaprRtcState *rtc, struct tm *tm, uint32_t *ns) +{ + int64_t host_ns = qemu_clock_get_ns(rtc_clock); + int64_t guest_ns; + time_t guest_s; + + assert(rtc); + + guest_ns = host_ns + rtc->ns_offset; + guest_s = guest_ns / NANOSECONDS_PER_SECOND; + + if (tm) { + gmtime_r(&guest_s, tm); + } + if (ns) { + *ns = guest_ns; + } +} + +int spapr_rtc_import_offset(SpaprRtcState *rtc, int64_t legacy_offset) +{ + if (!rtc) { + return -ENODEV; + } + + rtc->ns_offset = legacy_offset * NANOSECONDS_PER_SECOND; + + return 0; +} + +static void rtas_get_time_of_day(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + struct tm tm; + uint32_t ns; + + if ((nargs != 0) || (nret != 8)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + spapr_rtc_read(&spapr->rtc, &tm, &ns); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); + rtas_st(rets, 1, tm.tm_year + 1900); + rtas_st(rets, 2, tm.tm_mon + 1); + rtas_st(rets, 3, tm.tm_mday); + rtas_st(rets, 4, tm.tm_hour); + rtas_st(rets, 5, tm.tm_min); + rtas_st(rets, 6, tm.tm_sec); + rtas_st(rets, 7, ns); +} + +static void rtas_set_time_of_day(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, uint32_t nargs, + target_ulong args, + uint32_t nret, target_ulong rets) +{ + SpaprRtcState *rtc = &spapr->rtc; + struct tm tm; + time_t new_s; + int64_t host_ns; + + if ((nargs != 7) || (nret != 1)) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + tm.tm_year = rtas_ld(args, 0) - 1900; + tm.tm_mon = rtas_ld(args, 1) - 1; + tm.tm_mday = rtas_ld(args, 2); + tm.tm_hour = rtas_ld(args, 3); + tm.tm_min = rtas_ld(args, 4); + tm.tm_sec = rtas_ld(args, 5); + + new_s = mktimegm(&tm); + if (new_s == -1) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + /* Generate a monitor event for the change */ + qapi_event_send_rtc_change(qemu_timedate_diff(&tm)); + + host_ns = qemu_clock_get_ns(rtc_clock); + + rtc->ns_offset = (new_s * NANOSECONDS_PER_SECOND) - host_ns; + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void spapr_rtc_qom_date(Object *obj, struct tm *current_tm, Error **errp) +{ + spapr_rtc_read(SPAPR_RTC(obj), current_tm, NULL); +} + +static void spapr_rtc_realize(DeviceState *dev, Error **errp) +{ + SpaprRtcState *rtc = SPAPR_RTC(dev); + struct tm tm; + time_t host_s; + int64_t rtc_ns; + + /* Initialize the RTAS RTC from host time */ + + qemu_get_timedate(&tm, 0); + host_s = mktimegm(&tm); + rtc_ns = qemu_clock_get_ns(rtc_clock); + rtc->ns_offset = host_s * NANOSECONDS_PER_SECOND - rtc_ns; + + object_property_add_tm(OBJECT(rtc), "date", spapr_rtc_qom_date); +} + +static const VMStateDescription vmstate_spapr_rtc = { + .name = "spapr/rtc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT64(ns_offset, SpaprRtcState), + VMSTATE_END_OF_LIST() + }, +}; + +static void spapr_rtc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = spapr_rtc_realize; + dc->vmsd = &vmstate_spapr_rtc; + /* Reason: This is an internal device only for handling the hypercalls */ + dc->user_creatable = false; + + spapr_rtas_register(RTAS_GET_TIME_OF_DAY, "get-time-of-day", + rtas_get_time_of_day); + spapr_rtas_register(RTAS_SET_TIME_OF_DAY, "set-time-of-day", + rtas_set_time_of_day); +} + +static const TypeInfo spapr_rtc_info = { + .name = TYPE_SPAPR_RTC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SpaprRtcState), + .class_init = spapr_rtc_class_init, +}; + +static void spapr_rtc_register_types(void) +{ + type_register_static(&spapr_rtc_info); +} +type_init(spapr_rtc_register_types) diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c new file mode 100644 index 000000000..4ee03c83e --- /dev/null +++ b/hw/ppc/spapr_softmmu.c @@ -0,0 +1,612 @@ +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "cpu.h" +#include "helper_regs.h" +#include "hw/ppc/spapr.h" +#include "mmu-hash64.h" +#include "mmu-book3s-v3.h" + +static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex) +{ + /* + * hash value/pteg group index is normalized by HPT mask + */ + if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) { + return false; + } + return true; +} + +static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong flags = args[0]; + target_ulong ptex = args[1]; + target_ulong pteh = args[2]; + target_ulong ptel = args[3]; + unsigned apshift; + target_ulong raddr; + target_ulong slot; + const ppc_hash_pte64_t *hptes; + + apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel); + if (!apshift) { + /* Bad page size encoding */ + return H_PARAMETER; + } + + raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1); + + if (is_ram_address(spapr, raddr)) { + /* Regular RAM - should have WIMG=0010 */ + if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) { + return H_PARAMETER; + } + } else { + target_ulong wimg_flags; + /* Looks like an IO address */ + /* FIXME: What WIMG combinations could be sensible for IO? + * For now we allow WIMG=010x, but are there others? */ + /* FIXME: Should we check against registered IO addresses? */ + wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)); + + if (wimg_flags != HPTE64_R_I && + wimg_flags != (HPTE64_R_I | HPTE64_R_M)) { + return H_PARAMETER; + } + } + + pteh &= ~0x60ULL; + + if (!valid_ptex(cpu, ptex)) { + return H_PARAMETER; + } + + slot = ptex & 7ULL; + ptex = ptex & ~7ULL; + + if (likely((flags & H_EXACT) == 0)) { + hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); + for (slot = 0; slot < 8; slot++) { + if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) { + break; + } + } + ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP); + if (slot == 8) { + return H_PTEG_FULL; + } + } else { + hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1); + if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) { + ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1); + return H_PTEG_FULL; + } + ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); + } + + spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel); + + args[0] = ptex + slot; + return H_SUCCESS; +} + +typedef enum { + REMOVE_SUCCESS = 0, + REMOVE_NOT_FOUND = 1, + REMOVE_PARM = 2, + REMOVE_HW = 3, +} RemoveResult; + +static RemoveResult remove_hpte(PowerPCCPU *cpu + , target_ulong ptex, + target_ulong avpn, + target_ulong flags, + target_ulong *vp, target_ulong *rp) +{ + const ppc_hash_pte64_t *hptes; + target_ulong v, r; + + if (!valid_ptex(cpu, ptex)) { + return REMOVE_PARM; + } + + hptes = ppc_hash64_map_hptes(cpu, ptex, 1); + v = ppc_hash64_hpte0(cpu, hptes, 0); + r = ppc_hash64_hpte1(cpu, hptes, 0); + ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); + + if ((v & HPTE64_V_VALID) == 0 || + ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || + ((flags & H_ANDCOND) && (v & avpn) != 0)) { + return REMOVE_NOT_FOUND; + } + *vp = v; + *rp = r; + spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0); + ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); + return REMOVE_SUCCESS; +} + +static target_ulong h_remove(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + target_ulong flags = args[0]; + target_ulong ptex = args[1]; + target_ulong avpn = args[2]; + RemoveResult ret; + + ret = remove_hpte(cpu, ptex, avpn, flags, + &args[0], &args[1]); + + switch (ret) { + case REMOVE_SUCCESS: + check_tlb_flush(env, true); + return H_SUCCESS; + + case REMOVE_NOT_FOUND: + return H_NOT_FOUND; + + case REMOVE_PARM: + return H_PARAMETER; + + case REMOVE_HW: + return H_HARDWARE; + } + + g_assert_not_reached(); +} + +#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL +#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL +#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL +#define H_BULK_REMOVE_END 0xc000000000000000ULL +#define H_BULK_REMOVE_CODE 0x3000000000000000ULL +#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL +#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL +#define H_BULK_REMOVE_PARM 0x2000000000000000ULL +#define H_BULK_REMOVE_HW 0x3000000000000000ULL +#define H_BULK_REMOVE_RC 0x0c00000000000000ULL +#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL +#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL +#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL +#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL +#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL + +#define H_BULK_REMOVE_MAX_BATCH 4 + +static target_ulong h_bulk_remove(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + int i; + target_ulong rc = H_SUCCESS; + + for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { + target_ulong *tsh = &args[i*2]; + target_ulong tsl = args[i*2 + 1]; + target_ulong v, r, ret; + + if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { + break; + } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { + return H_PARAMETER; + } + + *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; + *tsh |= H_BULK_REMOVE_RESPONSE; + + if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) { + *tsh |= H_BULK_REMOVE_PARM; + return H_PARAMETER; + } + + ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl, + (*tsh & H_BULK_REMOVE_FLAGS) >> 26, + &v, &r); + + *tsh |= ret << 60; + + switch (ret) { + case REMOVE_SUCCESS: + *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43; + break; + + case REMOVE_PARM: + rc = H_PARAMETER; + goto exit; + + case REMOVE_HW: + rc = H_HARDWARE; + goto exit; + } + } + exit: + check_tlb_flush(env, true); + + return rc; +} + +static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + CPUPPCState *env = &cpu->env; + target_ulong flags = args[0]; + target_ulong ptex = args[1]; + target_ulong avpn = args[2]; + const ppc_hash_pte64_t *hptes; + target_ulong v, r; + + if (!valid_ptex(cpu, ptex)) { + return H_PARAMETER; + } + + hptes = ppc_hash64_map_hptes(cpu, ptex, 1); + v = ppc_hash64_hpte0(cpu, hptes, 0); + r = ppc_hash64_hpte1(cpu, hptes, 0); + ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); + + if ((v & HPTE64_V_VALID) == 0 || + ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { + return H_NOT_FOUND; + } + + r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N | + HPTE64_R_KEY_HI | HPTE64_R_KEY_LO); + r |= (flags << 55) & HPTE64_R_PP0; + r |= (flags << 48) & HPTE64_R_KEY_HI; + r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO); + spapr_store_hpte(cpu, ptex, + (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); + ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); + /* Flush the tlb */ + check_tlb_flush(env, true); + /* Don't need a memory barrier, due to qemu's global lock */ + spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r); + return H_SUCCESS; +} + +static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong flags = args[0]; + target_ulong ptex = args[1]; + int i, ridx, n_entries = 1; + const ppc_hash_pte64_t *hptes; + + if (!valid_ptex(cpu, ptex)) { + return H_PARAMETER; + } + + if (flags & H_READ_4) { + /* Clear the two low order bits */ + ptex &= ~(3ULL); + n_entries = 4; + } + + hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries); + for (i = 0, ridx = 0; i < n_entries; i++) { + args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i); + args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i); + } + ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries); + + return H_SUCCESS; +} + +struct SpaprPendingHpt { + /* These fields are read-only after initialization */ + int shift; + QemuThread thread; + + /* These fields are protected by the BQL */ + bool complete; + + /* These fields are private to the preparation thread if + * !complete, otherwise protected by the BQL */ + int ret; + void *hpt; +}; + +static void free_pending_hpt(SpaprPendingHpt *pending) +{ + if (pending->hpt) { + qemu_vfree(pending->hpt); + } + + g_free(pending); +} + +static void *hpt_prepare_thread(void *opaque) +{ + SpaprPendingHpt *pending = opaque; + size_t size = 1ULL << pending->shift; + + pending->hpt = qemu_try_memalign(size, size); + if (pending->hpt) { + memset(pending->hpt, 0, size); + pending->ret = H_SUCCESS; + } else { + pending->ret = H_NO_MEM; + } + + qemu_mutex_lock_iothread(); + + if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) { + /* Ready to go */ + pending->complete = true; + } else { + /* We've been cancelled, clean ourselves up */ + free_pending_hpt(pending); + } + + qemu_mutex_unlock_iothread(); + return NULL; +} + +/* Must be called with BQL held */ +static void cancel_hpt_prepare(SpaprMachineState *spapr) +{ + SpaprPendingHpt *pending = spapr->pending_hpt; + + /* Let the thread know it's cancelled */ + spapr->pending_hpt = NULL; + + if (!pending) { + /* Nothing to do */ + return; + } + + if (!pending->complete) { + /* thread will clean itself up */ + return; + } + + free_pending_hpt(pending); +} + +target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong shift) +{ + SpaprPendingHpt *pending = spapr->pending_hpt; + + if (pending) { + /* something already in progress */ + if (pending->shift == shift) { + /* and it's suitable */ + if (pending->complete) { + return pending->ret; + } else { + return H_LONG_BUSY_ORDER_100_MSEC; + } + } + + /* not suitable, cancel and replace */ + cancel_hpt_prepare(spapr); + } + + if (!shift) { + /* nothing to do */ + return H_SUCCESS; + } + + /* start new prepare */ + + pending = g_new0(SpaprPendingHpt, 1); + pending->shift = shift; + pending->ret = H_HARDWARE; + + qemu_thread_create(&pending->thread, "sPAPR HPT prepare", + hpt_prepare_thread, pending, QEMU_THREAD_DETACHED); + + spapr->pending_hpt = pending; + + /* In theory we could estimate the time more accurately based on + * the new size, but there's not much point */ + return H_LONG_BUSY_ORDER_100_MSEC; +} + +static uint64_t new_hpte_load0(void *htab, uint64_t pteg, int slot) +{ + uint8_t *addr = htab; + + addr += pteg * HASH_PTEG_SIZE_64; + addr += slot * HASH_PTE_SIZE_64; + return ldq_p(addr); +} + +static void new_hpte_store(void *htab, uint64_t pteg, int slot, + uint64_t pte0, uint64_t pte1) +{ + uint8_t *addr = htab; + + addr += pteg * HASH_PTEG_SIZE_64; + addr += slot * HASH_PTE_SIZE_64; + + stq_p(addr, pte0); + stq_p(addr + HPTE64_DW1, pte1); +} + +static int rehash_hpte(PowerPCCPU *cpu, + const ppc_hash_pte64_t *hptes, + void *old_hpt, uint64_t oldsize, + void *new_hpt, uint64_t newsize, + uint64_t pteg, int slot) +{ + uint64_t old_hash_mask = (oldsize >> 7) - 1; + uint64_t new_hash_mask = (newsize >> 7) - 1; + target_ulong pte0 = ppc_hash64_hpte0(cpu, hptes, slot); + target_ulong pte1; + uint64_t avpn; + unsigned base_pg_shift; + uint64_t hash, new_pteg, replace_pte0; + + if (!(pte0 & HPTE64_V_VALID) || !(pte0 & HPTE64_V_BOLTED)) { + return H_SUCCESS; + } + + pte1 = ppc_hash64_hpte1(cpu, hptes, slot); + + base_pg_shift = ppc_hash64_hpte_page_shift_noslb(cpu, pte0, pte1); + assert(base_pg_shift); /* H_ENTER shouldn't allow a bad encoding */ + avpn = HPTE64_V_AVPN_VAL(pte0) & ~(((1ULL << base_pg_shift) - 1) >> 23); + + if (pte0 & HPTE64_V_SECONDARY) { + pteg = ~pteg; + } + + if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_256M) { + uint64_t offset, vsid; + + /* We only have 28 - 23 bits of offset in avpn */ + offset = (avpn & 0x1f) << 23; + vsid = avpn >> 5; + /* We can find more bits from the pteg value */ + if (base_pg_shift < 23) { + offset |= ((vsid ^ pteg) & old_hash_mask) << base_pg_shift; + } + + hash = vsid ^ (offset >> base_pg_shift); + } else if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_1T) { + uint64_t offset, vsid; + + /* We only have 40 - 23 bits of seg_off in avpn */ + offset = (avpn & 0x1ffff) << 23; + vsid = avpn >> 17; + if (base_pg_shift < 23) { + offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) + << base_pg_shift; + } + + hash = vsid ^ (vsid << 25) ^ (offset >> base_pg_shift); + } else { + error_report("rehash_pte: Bad segment size in HPTE"); + return H_HARDWARE; + } + + new_pteg = hash & new_hash_mask; + if (pte0 & HPTE64_V_SECONDARY) { + assert(~pteg == (hash & old_hash_mask)); + new_pteg = ~new_pteg; + } else { + assert(pteg == (hash & old_hash_mask)); + } + assert((oldsize != newsize) || (pteg == new_pteg)); + replace_pte0 = new_hpte_load0(new_hpt, new_pteg, slot); + /* + * Strictly speaking, we don't need all these tests, since we only + * ever rehash bolted HPTEs. We might in future handle non-bolted + * HPTEs, though so make the logic correct for those cases as + * well. + */ + if (replace_pte0 & HPTE64_V_VALID) { + assert(newsize < oldsize); + if (replace_pte0 & HPTE64_V_BOLTED) { + if (pte0 & HPTE64_V_BOLTED) { + /* Bolted collision, nothing we can do */ + return H_PTEG_FULL; + } else { + /* Discard this hpte */ + return H_SUCCESS; + } + } + } + + new_hpte_store(new_hpt, new_pteg, slot, pte0, pte1); + return H_SUCCESS; +} + +static int rehash_hpt(PowerPCCPU *cpu, + void *old_hpt, uint64_t oldsize, + void *new_hpt, uint64_t newsize) +{ + uint64_t n_ptegs = oldsize >> 7; + uint64_t pteg; + int slot; + int rc; + + for (pteg = 0; pteg < n_ptegs; pteg++) { + hwaddr ptex = pteg * HPTES_PER_GROUP; + const ppc_hash_pte64_t *hptes + = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); + + if (!hptes) { + return H_HARDWARE; + } + + for (slot = 0; slot < HPTES_PER_GROUP; slot++) { + rc = rehash_hpte(cpu, hptes, old_hpt, oldsize, new_hpt, newsize, + pteg, slot); + if (rc != H_SUCCESS) { + ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP); + return rc; + } + } + ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP); + } + + return H_SUCCESS; +} + +target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong flags, + target_ulong shift) +{ + SpaprPendingHpt *pending = spapr->pending_hpt; + int rc; + size_t newsize; + + if (flags != 0) { + return H_PARAMETER; + } + + if (!pending || (pending->shift != shift)) { + /* no matching prepare */ + return H_CLOSED; + } + + if (!pending->complete) { + /* prepare has not completed */ + return H_BUSY; + } + + /* Shouldn't have got past PREPARE without an HPT */ + g_assert(spapr->htab_shift); + + newsize = 1ULL << pending->shift; + rc = rehash_hpt(cpu, spapr->htab, HTAB_SIZE(spapr), + pending->hpt, newsize); + if (rc == H_SUCCESS) { + qemu_vfree(spapr->htab); + spapr->htab = pending->hpt; + spapr->htab_shift = pending->shift; + + push_sregs_to_kvm_pr(spapr); + + pending->hpt = NULL; /* so it's not free()d */ + } + + /* Clean up */ + spapr->pending_hpt = NULL; + free_pending_hpt(pending); + + return rc; +} + +static void hypercall_register_types(void) +{ + /* hcall-pft */ + spapr_register_hypercall(H_ENTER, h_enter); + spapr_register_hypercall(H_REMOVE, h_remove); + spapr_register_hypercall(H_PROTECT, h_protect); + spapr_register_hypercall(H_READ, h_read); + + /* hcall-bulk */ + spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove); + +} + +type_init(hypercall_register_types) diff --git a/hw/ppc/spapr_tpm_proxy.c b/hw/ppc/spapr_tpm_proxy.c new file mode 100644 index 000000000..245408674 --- /dev/null +++ b/hw/ppc/spapr_tpm_proxy.c @@ -0,0 +1,177 @@ +/* + * SPAPR TPM Proxy/Hypercall + * + * Copyright IBM Corp. 2019 + * + * Authors: + * Michael Roth + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "sysemu/reset.h" +#include "hw/ppc/spapr.h" +#include "hw/qdev-properties.h" +#include "trace.h" + +#define TPM_SPAPR_BUFSIZE 4096 + +enum { + TPM_COMM_OP_EXECUTE = 1, + TPM_COMM_OP_CLOSE_SESSION = 2, +}; + +static void spapr_tpm_proxy_reset(void *opaque) +{ + SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(opaque); + + if (tpm_proxy->host_fd != -1) { + close(tpm_proxy->host_fd); + tpm_proxy->host_fd = -1; + } +} + +static ssize_t tpm_execute(SpaprTpmProxy *tpm_proxy, target_ulong *args) +{ + uint64_t data_in = ppc64_phys_to_real(args[1]); + target_ulong data_in_size = args[2]; + uint64_t data_out = ppc64_phys_to_real(args[3]); + target_ulong data_out_size = args[4]; + uint8_t buf_in[TPM_SPAPR_BUFSIZE]; + uint8_t buf_out[TPM_SPAPR_BUFSIZE]; + ssize_t ret; + + trace_spapr_tpm_execute(data_in, data_in_size, data_out, data_out_size); + + if (data_in_size > TPM_SPAPR_BUFSIZE) { + error_report("invalid TPM input buffer size: " TARGET_FMT_lu, + data_in_size); + return H_P3; + } + + if (data_out_size < TPM_SPAPR_BUFSIZE) { + error_report("invalid TPM output buffer size: " TARGET_FMT_lu, + data_out_size); + return H_P5; + } + + if (tpm_proxy->host_fd == -1) { + tpm_proxy->host_fd = open(tpm_proxy->host_path, O_RDWR); + if (tpm_proxy->host_fd == -1) { + error_report("failed to open TPM device %s: %d", + tpm_proxy->host_path, errno); + return H_RESOURCE; + } + } + + cpu_physical_memory_read(data_in, buf_in, data_in_size); + + do { + ret = write(tpm_proxy->host_fd, buf_in, data_in_size); + if (ret > 0) { + data_in_size -= ret; + } + } while ((ret >= 0 && data_in_size > 0) || (ret == -1 && errno == EINTR)); + + if (ret == -1) { + error_report("failed to write to TPM device %s: %d", + tpm_proxy->host_path, errno); + return H_RESOURCE; + } + + do { + ret = read(tpm_proxy->host_fd, buf_out, data_out_size); + } while (ret == 0 || (ret == -1 && errno == EINTR)); + + if (ret == -1) { + error_report("failed to read from TPM device %s: %d", + tpm_proxy->host_path, errno); + return H_RESOURCE; + } + + cpu_physical_memory_write(data_out, buf_out, ret); + args[0] = ret; + + return H_SUCCESS; +} + +static target_ulong h_tpm_comm(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong op = args[0]; + SpaprTpmProxy *tpm_proxy = spapr->tpm_proxy; + + if (!tpm_proxy) { + error_report("TPM proxy not available"); + return H_FUNCTION; + } + + trace_spapr_h_tpm_comm(tpm_proxy->host_path, op); + + switch (op) { + case TPM_COMM_OP_EXECUTE: + return tpm_execute(tpm_proxy, args); + case TPM_COMM_OP_CLOSE_SESSION: + spapr_tpm_proxy_reset(tpm_proxy); + return H_SUCCESS; + default: + return H_PARAMETER; + } +} + +static void spapr_tpm_proxy_realize(DeviceState *d, Error **errp) +{ + SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(d); + + if (tpm_proxy->host_path == NULL) { + error_setg(errp, "must specify 'host-path' option for device"); + return; + } + + tpm_proxy->host_fd = -1; + qemu_register_reset(spapr_tpm_proxy_reset, tpm_proxy); +} + +static void spapr_tpm_proxy_unrealize(DeviceState *d) +{ + SpaprTpmProxy *tpm_proxy = SPAPR_TPM_PROXY(d); + + qemu_unregister_reset(spapr_tpm_proxy_reset, tpm_proxy); +} + +static Property spapr_tpm_proxy_properties[] = { + DEFINE_PROP_STRING("host-path", SpaprTpmProxy, host_path), + DEFINE_PROP_END_OF_LIST(), +}; + +static void spapr_tpm_proxy_class_init(ObjectClass *k, void *data) +{ + DeviceClass *dk = DEVICE_CLASS(k); + + dk->realize = spapr_tpm_proxy_realize; + dk->unrealize = spapr_tpm_proxy_unrealize; + dk->user_creatable = true; + device_class_set_props(dk, spapr_tpm_proxy_properties); +} + +static const TypeInfo spapr_tpm_proxy_info = { + .name = TYPE_SPAPR_TPM_PROXY, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SpaprTpmProxy), + .class_init = spapr_tpm_proxy_class_init, +}; + +static void spapr_tpm_proxy_register_types(void) +{ + type_register_static(&spapr_tpm_proxy_info); + spapr_register_hypercall(SVM_H_TPM_COMM, h_tpm_comm); +} + +type_init(spapr_tpm_proxy_register_types) diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c new file mode 100644 index 000000000..b975ed29c --- /dev/null +++ b/hw/ppc/spapr_vio.c @@ -0,0 +1,741 @@ +/* + * QEMU sPAPR VIO code + * + * Copyright (c) 2010 David Gibson, IBM Corporation + * Based on the s390 virtio bus code: + * Copyright (c) 2009 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/log.h" +#include "hw/loader.h" +#include "elf.h" +#include "hw/sysbus.h" +#include "sysemu/kvm.h" +#include "sysemu/device_tree.h" +#include "kvm_ppc.h" +#include "migration/vmstate.h" + +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" +#include "hw/ppc/fdt.h" +#include "trace.h" + +#include + +#define SPAPR_VIO_REG_BASE 0x71000000 + +static char *spapr_vio_get_dev_name(DeviceState *qdev) +{ + SpaprVioDevice *dev = VIO_SPAPR_DEVICE(qdev); + SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); + + /* Device tree style name device@reg */ + return g_strdup_printf("%s@%x", pc->dt_name, dev->reg); +} + +static void spapr_vio_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->get_dev_path = spapr_vio_get_dev_name; + k->get_fw_dev_path = spapr_vio_get_dev_name; +} + +static const TypeInfo spapr_vio_bus_info = { + .name = TYPE_SPAPR_VIO_BUS, + .parent = TYPE_BUS, + .class_init = spapr_vio_bus_class_init, + .instance_size = sizeof(SpaprVioBus), +}; + +SpaprVioDevice *spapr_vio_find_by_reg(SpaprVioBus *bus, uint32_t reg) +{ + BusChild *kid; + SpaprVioDevice *dev = NULL; + + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + dev = (SpaprVioDevice *)kid->child; + if (dev->reg == reg) { + return dev; + } + } + + return NULL; +} + +static int vio_make_devnode(SpaprVioDevice *dev, + void *fdt) +{ + SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); + int vdevice_off, node_off, ret; + char *dt_name; + const char *dt_compatible; + + vdevice_off = fdt_path_offset(fdt, "/vdevice"); + if (vdevice_off < 0) { + return vdevice_off; + } + + dt_name = spapr_vio_get_dev_name(DEVICE(dev)); + node_off = fdt_add_subnode(fdt, vdevice_off, dt_name); + g_free(dt_name); + if (node_off < 0) { + return node_off; + } + + ret = fdt_setprop_cell(fdt, node_off, "reg", dev->reg); + if (ret < 0) { + return ret; + } + + if (pc->dt_type) { + ret = fdt_setprop_string(fdt, node_off, "device_type", + pc->dt_type); + if (ret < 0) { + return ret; + } + } + + if (pc->get_dt_compatible) { + dt_compatible = pc->get_dt_compatible(dev); + } else { + dt_compatible = pc->dt_compatible; + } + + if (dt_compatible) { + ret = fdt_setprop_string(fdt, node_off, "compatible", + dt_compatible); + if (ret < 0) { + return ret; + } + } + + if (dev->irq) { + uint32_t ints_prop[2]; + + spapr_dt_irq(ints_prop, dev->irq, false); + ret = fdt_setprop(fdt, node_off, "interrupts", ints_prop, + sizeof(ints_prop)); + if (ret < 0) { + return ret; + } + } + + ret = spapr_tcet_dma_dt(fdt, node_off, "ibm,my-dma-window", dev->tcet); + if (ret < 0) { + return ret; + } + + if (pc->devnode) { + ret = (pc->devnode)(dev, fdt, node_off); + if (ret < 0) { + return ret; + } + } + + return node_off; +} + +/* + * CRQ handling + */ +static target_ulong h_reg_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong reg = args[0]; + target_ulong queue_addr = args[1]; + target_ulong queue_len = args[2]; + SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + + if (!dev) { + hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); + return H_PARAMETER; + } + + /* We can't grok a queue size bigger than 256M for now */ + if (queue_len < 0x1000 || queue_len > 0x10000000) { + hcall_dprintf("Queue size too small or too big (0x" TARGET_FMT_lx + ")\n", queue_len); + return H_PARAMETER; + } + + /* Check queue alignment */ + if (queue_addr & 0xfff) { + hcall_dprintf("Queue not aligned (0x" TARGET_FMT_lx ")\n", queue_addr); + return H_PARAMETER; + } + + /* Check if device supports CRQs */ + if (!dev->crq.SendFunc) { + hcall_dprintf("Device does not support CRQ\n"); + return H_NOT_FOUND; + } + + /* Already a queue ? */ + if (dev->crq.qsize) { + hcall_dprintf("CRQ already registered\n"); + return H_RESOURCE; + } + dev->crq.qladdr = queue_addr; + dev->crq.qsize = queue_len; + dev->crq.qnext = 0; + + trace_spapr_vio_h_reg_crq(reg, queue_addr, queue_len); + return H_SUCCESS; +} + +static target_ulong free_crq(SpaprVioDevice *dev) +{ + dev->crq.qladdr = 0; + dev->crq.qsize = 0; + dev->crq.qnext = 0; + + trace_spapr_vio_free_crq(dev->reg); + + return H_SUCCESS; +} + +static target_ulong h_free_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong reg = args[0]; + SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + + if (!dev) { + hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); + return H_PARAMETER; + } + + return free_crq(dev); +} + +static target_ulong h_send_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong reg = args[0]; + target_ulong msg_hi = args[1]; + target_ulong msg_lo = args[2]; + SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + uint64_t crq_mangle[2]; + + if (!dev) { + hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); + return H_PARAMETER; + } + crq_mangle[0] = cpu_to_be64(msg_hi); + crq_mangle[1] = cpu_to_be64(msg_lo); + + if (dev->crq.SendFunc) { + return dev->crq.SendFunc(dev, (uint8_t *)crq_mangle); + } + + return H_HARDWARE; +} + +static target_ulong h_enable_crq(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong reg = args[0]; + SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + + if (!dev) { + hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg); + return H_PARAMETER; + } + + return 0; +} + +/* Returns negative error, 0 success, or positive: queue full */ +int spapr_vio_send_crq(SpaprVioDevice *dev, uint8_t *crq) +{ + int rc; + uint8_t byte; + + if (!dev->crq.qsize) { + error_report("spapr_vio_send_creq on uninitialized queue"); + return -1; + } + + /* Maybe do a fast path for KVM just writing to the pages */ + rc = spapr_vio_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1); + if (rc) { + return rc; + } + if (byte != 0) { + return 1; + } + + rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8, + &crq[8], 8); + if (rc) { + return rc; + } + + kvmppc_eieio(); + + rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8); + if (rc) { + return rc; + } + + dev->crq.qnext = (dev->crq.qnext + 16) % dev->crq.qsize; + + if (dev->signal_state & 1) { + spapr_vio_irq_pulse(dev); + } + + return 0; +} + +/* "quiesce" handling */ + +static void spapr_vio_quiesce_one(SpaprVioDevice *dev) +{ + if (dev->tcet) { + device_cold_reset(DEVICE(dev->tcet)); + } + free_crq(dev); +} + +void spapr_vio_set_bypass(SpaprVioDevice *dev, bool bypass) +{ + if (!dev->tcet) { + return; + } + + memory_region_set_enabled(&dev->mrbypass, bypass); + memory_region_set_enabled(spapr_tce_get_iommu(dev->tcet), !bypass); + + dev->tcet->bypass = bypass; +} + +static void rtas_set_tce_bypass(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, + uint32_t nargs, target_ulong args, + uint32_t nret, target_ulong rets) +{ + SpaprVioBus *bus = spapr->vio_bus; + SpaprVioDevice *dev; + uint32_t unit, enable; + + if (nargs != 2) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + unit = rtas_ld(args, 0); + enable = rtas_ld(args, 1); + dev = spapr_vio_find_by_reg(bus, unit); + if (!dev) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + if (!dev->tcet) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + spapr_vio_set_bypass(dev, !!enable); + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static void rtas_quiesce(PowerPCCPU *cpu, SpaprMachineState *spapr, + uint32_t token, + uint32_t nargs, target_ulong args, + uint32_t nret, target_ulong rets) +{ + SpaprVioBus *bus = spapr->vio_bus; + BusChild *kid; + SpaprVioDevice *dev = NULL; + + if (nargs != 0) { + rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); + return; + } + + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + dev = (SpaprVioDevice *)kid->child; + spapr_vio_quiesce_one(dev); + } + + rtas_st(rets, 0, RTAS_OUT_SUCCESS); +} + +static SpaprVioDevice *reg_conflict(SpaprVioDevice *dev) +{ + SpaprVioBus *bus = SPAPR_VIO_BUS(dev->qdev.parent_bus); + BusChild *kid; + SpaprVioDevice *other; + + /* + * Check for a device other than the given one which is already + * using the requested address. We have to open code this because + * the given dev might already be in the list. + */ + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + other = VIO_SPAPR_DEVICE(kid->child); + + if (other != dev && other->reg == dev->reg) { + return other; + } + } + + return 0; +} + +static void spapr_vio_busdev_reset(DeviceState *qdev) +{ + SpaprVioDevice *dev = VIO_SPAPR_DEVICE(qdev); + SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); + + /* Shut down the request queue and TCEs if necessary */ + spapr_vio_quiesce_one(dev); + + dev->signal_state = 0; + + spapr_vio_set_bypass(dev, false); + if (pc->reset) { + pc->reset(dev); + } +} + +/* + * The register property of a VIO device is defined in libvirt using + * 0x1000 as a base register number plus a 0x1000 increment. For the + * VIO tty device, the base number is changed to 0x30000000. QEMU uses + * a base register number of 0x71000000 and then a simple increment. + * + * The formula below tries to compute a unique index number from the + * register value that will be used to define the IRQ number of the + * VIO device. + * + * A maximum of 256 VIO devices is covered. Collisions are possible + * but they will be detected when the IRQ is claimed. + */ +static inline uint32_t spapr_vio_reg_to_irq(uint32_t reg) +{ + uint32_t irq; + + if (reg >= SPAPR_VIO_REG_BASE) { + /* + * VIO device register values when allocated by QEMU. For + * these, we simply mask the high bits to fit the overall + * range: [0x00 - 0xff]. + * + * The nvram VIO device (reg=0x71000000) is a static device of + * the pseries machine and so is always allocated by QEMU. Its + * IRQ number is 0x0. + */ + irq = reg & 0xff; + + } else if (reg >= 0x30000000) { + /* + * VIO tty devices register values, when allocated by libvirt, + * are mapped in range [0xf0 - 0xff], gives us a maximum of 16 + * vtys. + */ + irq = 0xf0 | ((reg >> 12) & 0xf); + + } else { + /* + * Other VIO devices register values, when allocated by + * libvirt, should be mapped in range [0x00 - 0xef]. Conflicts + * will be detected when IRQ is claimed. + */ + irq = (reg >> 12) & 0xff; + } + + return SPAPR_IRQ_VIO | irq; +} + +static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + SpaprVioDevice *dev = (SpaprVioDevice *)qdev; + SpaprVioDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); + char *id; + + if (dev->reg != -1) { + /* + * Explicitly assigned address, just verify that no-one else + * is using it. other mechanism). We have to open code this + * rather than using spapr_vio_find_by_reg() because sdev + * itself is already in the list. + */ + SpaprVioDevice *other = reg_conflict(dev); + + if (other) { + error_setg(errp, "%s and %s devices conflict at address %#x", + object_get_typename(OBJECT(qdev)), + object_get_typename(OBJECT(&other->qdev)), + dev->reg); + return; + } + } else { + /* Need to assign an address */ + SpaprVioBus *bus = SPAPR_VIO_BUS(dev->qdev.parent_bus); + + do { + dev->reg = bus->next_reg++; + } while (reg_conflict(dev)); + } + + /* Don't overwrite ids assigned on the command line */ + if (!dev->qdev.id) { + id = spapr_vio_get_dev_name(DEVICE(dev)); + dev->qdev.id = id; + } + + dev->irq = spapr_vio_reg_to_irq(dev->reg); + + if (SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { + int irq = spapr_irq_findone(spapr, errp); + + if (irq < 0) { + return; + } + dev->irq = irq; + } + + if (spapr_irq_claim(spapr, dev->irq, false, errp) < 0) { + return; + } + + if (pc->rtce_window_size) { + uint32_t liobn = SPAPR_VIO_LIOBN(dev->reg); + + memory_region_init(&dev->mrroot, OBJECT(dev), "iommu-spapr-root", + MACHINE(spapr)->ram_size); + memory_region_init_alias(&dev->mrbypass, OBJECT(dev), + "iommu-spapr-bypass", get_system_memory(), + 0, MACHINE(spapr)->ram_size); + memory_region_add_subregion_overlap(&dev->mrroot, 0, &dev->mrbypass, 1); + address_space_init(&dev->as, &dev->mrroot, qdev->id); + + dev->tcet = spapr_tce_new_table(qdev, liobn); + spapr_tce_table_enable(dev->tcet, SPAPR_TCE_PAGE_SHIFT, 0, + pc->rtce_window_size >> SPAPR_TCE_PAGE_SHIFT); + dev->tcet->vdev = dev; + memory_region_add_subregion_overlap(&dev->mrroot, 0, + spapr_tce_get_iommu(dev->tcet), 2); + } + + pc->realize(dev, errp); +} + +static target_ulong h_vio_signal(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + target_ulong reg = args[0]; + target_ulong mode = args[1]; + SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg); + SpaprVioDeviceClass *pc; + + if (!dev) { + return H_PARAMETER; + } + + pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); + + if (mode & ~pc->signal_mask) { + return H_PARAMETER; + } + + dev->signal_state = mode; + + return H_SUCCESS; +} + +SpaprVioBus *spapr_vio_bus_init(void) +{ + SpaprVioBus *bus; + BusState *qbus; + DeviceState *dev; + + /* Create bridge device */ + dev = qdev_new(TYPE_SPAPR_VIO_BRIDGE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* Create bus on bridge device */ + qbus = qbus_new(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio"); + bus = SPAPR_VIO_BUS(qbus); + bus->next_reg = SPAPR_VIO_REG_BASE; + + /* hcall-vio */ + spapr_register_hypercall(H_VIO_SIGNAL, h_vio_signal); + + /* hcall-crq */ + spapr_register_hypercall(H_REG_CRQ, h_reg_crq); + spapr_register_hypercall(H_FREE_CRQ, h_free_crq); + spapr_register_hypercall(H_SEND_CRQ, h_send_crq); + spapr_register_hypercall(H_ENABLE_CRQ, h_enable_crq); + + /* RTAS calls */ + spapr_rtas_register(RTAS_IBM_SET_TCE_BYPASS, "ibm,set-tce-bypass", + rtas_set_tce_bypass); + spapr_rtas_register(RTAS_QUIESCE, "quiesce", rtas_quiesce); + + return bus; +} + +static void spapr_vio_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->fw_name = "vdevice"; +} + +static const TypeInfo spapr_vio_bridge_info = { + .name = TYPE_SPAPR_VIO_BRIDGE, + .parent = TYPE_SYS_BUS_DEVICE, + .class_init = spapr_vio_bridge_class_init, +}; + +const VMStateDescription vmstate_spapr_vio = { + .name = "spapr_vio", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + /* Sanity check */ + VMSTATE_UINT32_EQUAL(reg, SpaprVioDevice, NULL), + VMSTATE_UINT32_EQUAL(irq, SpaprVioDevice, NULL), + + /* General VIO device state */ + VMSTATE_UINT64(signal_state, SpaprVioDevice), + VMSTATE_UINT64(crq.qladdr, SpaprVioDevice), + VMSTATE_UINT32(crq.qsize, SpaprVioDevice), + VMSTATE_UINT32(crq.qnext, SpaprVioDevice), + + VMSTATE_END_OF_LIST() + }, +}; + +static void vio_spapr_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + k->realize = spapr_vio_busdev_realize; + k->reset = spapr_vio_busdev_reset; + k->bus_type = TYPE_SPAPR_VIO_BUS; +} + +static const TypeInfo spapr_vio_type_info = { + .name = TYPE_VIO_SPAPR_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SpaprVioDevice), + .abstract = true, + .class_size = sizeof(SpaprVioDeviceClass), + .class_init = vio_spapr_device_class_init, +}; + +static void spapr_vio_register_types(void) +{ + type_register_static(&spapr_vio_bus_info); + type_register_static(&spapr_vio_bridge_info); + type_register_static(&spapr_vio_type_info); +} + +type_init(spapr_vio_register_types) + +static int compare_reg(const void *p1, const void *p2) +{ + SpaprVioDevice const *dev1, *dev2; + + dev1 = (SpaprVioDevice *)*(DeviceState **)p1; + dev2 = (SpaprVioDevice *)*(DeviceState **)p2; + + if (dev1->reg < dev2->reg) { + return -1; + } + if (dev1->reg == dev2->reg) { + return 0; + } + + /* dev1->reg > dev2->reg */ + return 1; +} + +void spapr_dt_vdevice(SpaprVioBus *bus, void *fdt) +{ + DeviceState *qdev, **qdevs; + BusChild *kid; + int i, num, ret = 0; + int node; + + _FDT(node = fdt_add_subnode(fdt, 0, "vdevice")); + + _FDT(fdt_setprop_string(fdt, node, "device_type", "vdevice")); + _FDT(fdt_setprop_string(fdt, node, "compatible", "IBM,vdevice")); + _FDT(fdt_setprop_cell(fdt, node, "#address-cells", 1)); + _FDT(fdt_setprop_cell(fdt, node, "#size-cells", 0)); + _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); + _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); + + /* Count qdevs on the bus list */ + num = 0; + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + num++; + } + + /* Copy out into an array of pointers */ + qdevs = g_new(DeviceState *, num); + num = 0; + QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { + qdevs[num++] = kid->child; + } + + /* Sort the array */ + qsort(qdevs, num, sizeof(qdev), compare_reg); + + /* Hack alert. Give the devices to libfdt in reverse order, we happen + * to know that will mean they are in forward order in the tree. */ + for (i = num - 1; i >= 0; i--) { + SpaprVioDevice *dev = (SpaprVioDevice *)(qdevs[i]); + SpaprVioDeviceClass *vdc = VIO_SPAPR_DEVICE_GET_CLASS(dev); + + ret = vio_make_devnode(dev, fdt); + if (ret < 0) { + error_report("Couldn't create device node /vdevice/%s@%"PRIx32, + vdc->dt_name, dev->reg); + exit(1); + } + } + + g_free(qdevs); +} + +gchar *spapr_vio_stdout_path(SpaprVioBus *bus) +{ + SpaprVioDevice *dev; + char *name, *path; + + dev = spapr_vty_get_default(bus); + if (!dev) { + return NULL; + } + + name = spapr_vio_get_dev_name(DEVICE(dev)); + path = g_strdup_printf("/vdevice/%s", name); + + g_free(name); + return path; +} diff --git a/hw/ppc/spapr_vof.c b/hw/ppc/spapr_vof.c new file mode 100644 index 000000000..40ce8fe00 --- /dev/null +++ b/hw/ppc/spapr_vof.c @@ -0,0 +1,167 @@ +/* + * SPAPR machine hooks to Virtual Open Firmware, + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qapi/error.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" +#include "hw/ppc/spapr_cpu_core.h" +#include "hw/ppc/fdt.h" +#include "hw/ppc/vof.h" +#include "sysemu/sysemu.h" +#include "qom/qom-qobject.h" +#include "trace.h" + +target_ulong spapr_h_vof_client(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *_args) +{ + int ret = vof_client_call(MACHINE(spapr), spapr->vof, spapr->fdt_blob, + ppc64_phys_to_real(_args[0])); + + if (ret) { + return H_PARAMETER; + } + return H_SUCCESS; +} + +void spapr_vof_client_dt_finalize(SpaprMachineState *spapr, void *fdt) +{ + char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); + + vof_build_dt(fdt, spapr->vof); + + if (spapr->vof->bootargs) { + int chosen; + + _FDT(chosen = fdt_path_offset(fdt, "/chosen")); + /* + * If the client did not change "bootargs", spapr_dt_chosen() must have + * stored machine->kernel_cmdline in it before getting here. + */ + _FDT(fdt_setprop_string(fdt, chosen, "bootargs", spapr->vof->bootargs)); + } + + /* + * SLOF-less setup requires an open instance of stdout for early + * kernel printk. By now all phandles are settled so we can open + * the default serial console. + */ + if (stdout_path) { + _FDT(vof_client_open_store(fdt, spapr->vof, "/chosen", "stdout", + stdout_path)); + } +} + +void spapr_vof_reset(SpaprMachineState *spapr, void *fdt, Error **errp) +{ + target_ulong stack_ptr; + Vof *vof = spapr->vof; + PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); + + vof_init(vof, spapr->rma_size, errp); + + stack_ptr = vof_claim(vof, 0, VOF_STACK_SIZE, VOF_STACK_SIZE); + if (stack_ptr == -1) { + error_setg(errp, "Memory allocation for stack failed"); + return; + } + /* Stack grows downwards plus reserve space for the minimum stack frame */ + stack_ptr += VOF_STACK_SIZE - 0x20; + + if (spapr->kernel_size && + vof_claim(vof, spapr->kernel_addr, spapr->kernel_size, 0) == -1) { + error_setg(errp, "Memory for kernel is in use"); + return; + } + + if (spapr->initrd_size && + vof_claim(vof, spapr->initrd_base, spapr->initrd_size, 0) == -1) { + error_setg(errp, "Memory for initramdisk is in use"); + return; + } + + spapr_vof_client_dt_finalize(spapr, fdt); + + spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, + stack_ptr, spapr->initrd_base, + spapr->initrd_size); + /* VOF is 32bit BE so enforce MSR here */ + first_ppc_cpu->env.msr &= ~((1ULL << MSR_SF) | (1ULL << MSR_LE)); + + /* + * At this point the expected allocation map is: + * + * 0..c38 - the initial firmware + * 8000..10000 - stack + * 400000.. - kernel + * 3ea0000.. - initramdisk + * + * We skip writing FDT as nothing expects it; OF client interface is + * going to be used for reading the device tree. + */ +} + +void spapr_vof_quiesce(MachineState *ms) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(ms); + + spapr->fdt_size = fdt_totalsize(spapr->fdt_blob); + spapr->fdt_initial_size = spapr->fdt_size; +} + +bool spapr_vof_setprop(MachineState *ms, const char *path, const char *propname, + void *val, int vallen) +{ + SpaprMachineState *spapr = SPAPR_MACHINE(ms); + + /* + * We only allow changing properties which we know how to update in QEMU + * OR + * the ones which we know that they need to survive during "quiesce". + */ + + if (strcmp(path, "/rtas") == 0) { + if (strcmp(propname, "linux,rtas-base") == 0 || + strcmp(propname, "linux,rtas-entry") == 0) { + /* These need to survive quiesce so let them store in the FDT */ + return true; + } + } + + if (strcmp(path, "/chosen") == 0) { + if (strcmp(propname, "bootargs") == 0) { + Vof *vof = spapr->vof; + + g_free(vof->bootargs); + vof->bootargs = g_strndup(val, vallen); + return true; + } + if (strcmp(propname, "linux,initrd-start") == 0) { + if (vallen == sizeof(uint32_t)) { + spapr->initrd_base = ldl_be_p(val); + return true; + } + if (vallen == sizeof(uint64_t)) { + spapr->initrd_base = ldq_be_p(val); + return true; + } + return false; + } + if (strcmp(propname, "linux,initrd-end") == 0) { + if (vallen == sizeof(uint32_t)) { + spapr->initrd_size = ldl_be_p(val) - spapr->initrd_base; + return true; + } + if (vallen == sizeof(uint64_t)) { + spapr->initrd_size = ldq_be_p(val) - spapr->initrd_base; + return true; + } + return false; + } + } + + return true; +} diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events new file mode 100644 index 000000000..3bf43fa34 --- /dev/null +++ b/hw/ppc/trace-events @@ -0,0 +1,143 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# spapr_pci.c +spapr_pci_msi(const char *msg, uint32_t ca) "%s (cfg=0x%x)" +spapr_pci_msi_setup(const char *name, unsigned vector, uint64_t addr) "dev\"%s\" vector %u, addr=0x%"PRIx64 +spapr_pci_rtas_ibm_change_msi(unsigned cfg, unsigned func, unsigned req, unsigned first) "cfgaddr 0x%x func %u, requested %u, first irq %u" +spapr_pci_rtas_ibm_query_interrupt_source_number(unsigned ioa, unsigned intr) "queries for #%u, IRQ%u" +spapr_pci_msi_write(uint64_t addr, uint64_t data, uint32_t dt_irq) "@0x%"PRIx64"<=0x%"PRIx64" IRQ %u" +spapr_pci_lsi_set(const char *busname, int pin, uint32_t irq) "%s PIN%d IRQ %u" +spapr_pci_msi_retry(unsigned config_addr, unsigned req_num, unsigned max_irqs) "Guest device at 0x%x asked %u, have only %u" + +# spapr_hcall.c +spapr_cas_continue(unsigned long n) "Copy changes to the guest: %ld bytes" +spapr_cas_pvr(uint32_t cur_pvr, bool explicit_match, uint32_t new_pvr) "current=0x%x, explicit_match=%u, new=0x%x" +spapr_h_resize_hpt_prepare(uint64_t flags, uint64_t shift) "flags=0x%"PRIx64", shift=%"PRIu64 +spapr_h_resize_hpt_commit(uint64_t flags, uint64_t shift) "flags=0x%"PRIx64", shift=%"PRIu64 +spapr_update_dt(unsigned cb) "New blob %u bytes" +spapr_update_dt_failed_size(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x" +spapr_update_dt_failed_check(unsigned cbold, unsigned cbnew, unsigned magic) "Old blob %u bytes, new blob %u bytes, magic 0x%x" + +# spapr_tpm_proxy.c +spapr_h_tpm_comm(const char *device_path, uint64_t operation) "tpm_device_path=%s operation=0x%"PRIx64 +spapr_tpm_execute(uint64_t data_in, uint64_t data_in_sz, uint64_t data_out, uint64_t data_out_sz) "data_in=0x%"PRIx64", data_in_sz=%"PRIu64", data_out=0x%"PRIx64", data_out_sz=%"PRIu64 + +# spapr_iommu.c +spapr_iommu_put(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tce=0x%"PRIx64" ret=%"PRId64 +spapr_iommu_get(uint64_t liobn, uint64_t ioba, uint64_t ret, uint64_t tce) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" ret=%"PRId64" tce=0x%"PRIx64 +spapr_iommu_indirect(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t iobaN, uint64_t tceN, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tcelist=0x%"PRIx64" iobaN=0x%"PRIx64" tceN=0x%"PRIx64" ret=%"PRId64 +spapr_iommu_stuff(uint64_t liobn, uint64_t ioba, uint64_t tce_value, uint64_t npages, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tcevalue=0x%"PRIx64" npages=%"PRId64" ret=%"PRId64 +spapr_iommu_pci_put(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tce=0x%"PRIx64" ret=%"PRId64 +spapr_iommu_pci_get(uint64_t liobn, uint64_t ioba, uint64_t ret, uint64_t tce) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" ret=%"PRId64" tce=0x%"PRIx64 +spapr_iommu_pci_indirect(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t iobaN, uint64_t tceN, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tcelist=0x%"PRIx64" iobaN=0x%"PRIx64" tceN=0x%"PRIx64" ret=%"PRId64 +spapr_iommu_pci_stuff(uint64_t liobn, uint64_t ioba, uint64_t tce_value, uint64_t npages, uint64_t ret) "liobn=0x%"PRIx64" ioba=0x%"PRIx64" tcevalue=0x%"PRIx64" npages=%"PRId64" ret=%"PRId64 +spapr_iommu_xlate(uint64_t liobn, uint64_t ioba, uint64_t tce, unsigned perm, unsigned pgsize) "liobn=0x%"PRIx64" 0x%"PRIx64" -> 0x%"PRIx64" perm=%u mask=0x%x" +spapr_iommu_new_table(uint64_t liobn, void *table, int fd) "liobn=0x%"PRIx64" table=%p fd=%d" +spapr_iommu_pre_save(uint64_t liobn, uint32_t nb, uint64_t offs, uint32_t ps) "liobn=%"PRIx64" %"PRIx32" bus_offset=0x%"PRIx64" ps=%"PRIu32 +spapr_iommu_post_load(uint64_t liobn, uint32_t pre_nb, uint32_t post_nb, uint64_t offs, uint32_t ps) "liobn=%"PRIx64" %"PRIx32" => 0x%"PRIx32" bus_offset=0x%"PRIx64" ps=%"PRIu32 + +# spapr_rtas_ddw.c +spapr_iommu_ddw_query(uint64_t buid, uint32_t cfgaddr, unsigned wa, uint64_t win_size, uint32_t pgmask) "buid=0x%"PRIx64" addr=0x%"PRIx32", %u windows available, max window size=0x%"PRIx64", mask=0x%"PRIx32 +spapr_iommu_ddw_create(uint64_t buid, uint32_t cfgaddr, uint64_t pg_size, uint64_t req_size, uint64_t start, uint32_t liobn) "buid=0x%"PRIx64" addr=0x%"PRIx32", page size=0x%"PRIx64", requested=0x%"PRIx64", start addr=0x%"PRIx64", liobn=0x%"PRIx32 +spapr_iommu_ddw_remove(uint32_t liobn) "liobn=0x%"PRIx32 +spapr_iommu_ddw_reset(uint64_t buid, uint32_t cfgaddr) "buid=0x%"PRIx64" addr=0x%"PRIx32 + +# spapr_drc.c +spapr_drc_set_isolation_state(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%"PRIx32 +spapr_drc_set_isolation_state_finalizing(uint32_t index) "drc: 0x%"PRIx32 +spapr_drc_set_dr_indicator(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%x" +spapr_drc_set_allocation_state(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%x" +spapr_drc_set_allocation_state_finalizing(uint32_t index) "drc: 0x%"PRIx32 +spapr_drc_set_configured(uint32_t index) "drc: 0x%"PRIx32 +spapr_drc_attach(uint32_t index) "drc: 0x%"PRIx32 +spapr_drc_unplug_request(uint32_t index) "drc: 0x%"PRIx32 +spapr_drc_awaiting_quiesce(uint32_t index) "drc: 0x%"PRIx32 +spapr_drc_reset(uint32_t index) "drc: 0x%"PRIx32 +spapr_drc_realize(uint32_t index) "drc: 0x%"PRIx32 +spapr_drc_realize_child(uint32_t index, const char *childname) "drc: 0x%"PRIx32", child name: %s" +spapr_drc_realize_complete(uint32_t index) "drc: 0x%"PRIx32 +spapr_drc_unrealize(uint32_t index) "drc: 0x%"PRIx32 + +# spapr_ovec.c +spapr_ovec_parse_vector(int vector, int byte, uint16_t vec_len, uint8_t entry) "read guest vector %2d, byte %3d / %3d: 0x%.2x" +spapr_ovec_populate_dt(int byte, uint16_t vec_len, uint8_t entry) "encoding guest vector byte %3d / %3d: 0x%.2x" + +# spapr_drc.c +spapr_rtas_get_sensor_state_not_supported(uint32_t index, uint32_t type) "sensor index: 0x%"PRIx32", type: %"PRIu32 +spapr_rtas_get_sensor_state_invalid(uint32_t index) "sensor index: 0x%"PRIx32 +spapr_rtas_ibm_configure_connector_invalid(uint32_t index) "DRC index: 0x%"PRIx32 + +# spapr_vio.c +spapr_vio_h_reg_crq(uint64_t reg, uint64_t queue_addr, uint64_t queue_len) "CRQ for dev 0x%" PRIx64 " registered at 0x%" PRIx64 "/0x%" PRIx64 +spapr_vio_free_crq(uint32_t reg) "CRQ for dev 0x%" PRIx32 " freed" + +# vof.c +vof_error_str_truncated(const char *s, int len) "%s truncated to %d" +vof_error_param(const char *method, int nargscheck, int nretcheck, int nargs, int nret) "%s takes/returns %d/%d, not %d/%d" +vof_error_unknown_service(const char *service, int nargs, int nret) "\"%s\" args=%d rets=%d" +vof_error_unknown_method(const char *method) "\"%s\"" +vof_error_unknown_ihandle_close(uint32_t ih) "ih=0x%x" +vof_error_unknown_path(const char *path) "\"%s\"" +vof_error_write(uint32_t ih) "ih=0x%x" +vof_finddevice(const char *path, uint32_t ph) "\"%s\" => ph=0x%x" +vof_claim(uint32_t virt, uint32_t size, uint32_t align, uint32_t ret) "virt=0x%x size=0x%x align=0x%x => 0x%x" +vof_release(uint32_t virt, uint32_t size, uint32_t ret) "virt=0x%x size=0x%x => 0x%x" +vof_method(uint32_t ihandle, const char *method, uint32_t param, uint32_t ret, uint32_t ret2) "ih=0x%x \"%s\"(0x%x) => 0x%x 0x%x" +vof_getprop(uint32_t ph, const char *prop, uint32_t ret, const char *val) "ph=0x%x \"%s\" => len=%d [%s]" +vof_getproplen(uint32_t ph, const char *prop, uint32_t ret) "ph=0x%x \"%s\" => len=%d" +vof_setprop(uint32_t ph, const char *prop, const char *val, uint32_t vallen, uint32_t ret) "ph=0x%x \"%s\" [%s] len=%d => ret=%d" +vof_open(const char *path, uint32_t ph, uint32_t ih) "%s ph=0x%x => ih=0x%x" +vof_interpret(const char *cmd, uint32_t param1, uint32_t param2, uint32_t ret, uint32_t ret2) "[%s] 0x%x 0x%x => 0x%x 0x%x" +vof_package_to_path(uint32_t ph, const char *tmp, int ret) "ph=0x%x => %s len=%d" +vof_instance_to_path(uint32_t ih, uint32_t ph, const char *tmp, int ret) "ih=0x%x ph=0x%x => %s len=%d" +vof_instance_to_package(uint32_t ih, uint32_t ph) "ih=0x%x => ph=0x%x" +vof_write(uint32_t ih, unsigned cb, const char *msg) "ih=0x%x [%u] \"%s\"" +vof_avail(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx64" size=0x%"PRIx64 +vof_claimed(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx64" size=0x%"PRIx64 + +# ppc.c +ppc_tb_adjust(uint64_t offs1, uint64_t offs2, int64_t diff, int64_t seconds) "adjusted from 0x%"PRIx64" to 0x%"PRIx64", diff %"PRId64" (%"PRId64"s)" +ppc_tb_load(uint64_t tb) "tb 0x%016" PRIx64 +ppc_tb_store(uint64_t tb, uint64_t offset) "tb 0x%016" PRIx64 " offset 0x%08" PRIx64 + +ppc_decr_load(uint64_t tb) "decr 0x%016" PRIx64 +ppc_decr_excp(const char *action) "%s decrementer" +ppc_decr_store(uint32_t nr_bits, uint64_t decr, uint64_t value) "%d-bit 0x%016" PRIx64 " => 0x%016" PRIx64 + +ppc4xx_fit(uint32_t ir, uint64_t tcr, uint64_t tsr) "ir %d TCR 0x%" PRIx64 " TSR 0x%" PRIx64 +ppc4xx_pit_stop(void) "" +ppc4xx_pit_start(uint64_t reload) "PIT 0x%016" PRIx64 +ppc4xx_pit(uint32_t ar, uint32_t ir, uint64_t tcr, uint64_t tsr, uint64_t reload) "ar %d ir %d TCR 0x%" PRIx64 " TSR 0x%" PRIx64 " PIT 0x%016" PRIx64 +ppc4xx_wdt(uint64_t tcr, uint64_t tsr) "TCR 0x%" PRIx64 " TSR 0x%" PRIx64 +ppc40x_store_pit(uint64_t value) "val 0x%" PRIx64 +ppc40x_set_tb_clk(uint32_t value) "new frequency %" PRIu32 +ppc40x_timers_init(uint32_t value) "frequency %" PRIu32 + +ppc_irq_set(void *env, uint32_t pin, uint32_t level) "env [%p] pin %d level %d" +ppc_irq_set_exit(void *env, uint32_t n_IRQ, uint32_t level, uint32_t pending, uint32_t request) "env [%p] n_IRQ %d level %d => pending 0x%08" PRIx32 " req 0x%08" PRIx32 +ppc_irq_set_state(const char *name, uint32_t level) "\"%s\" level %d" +ppc_irq_reset(const char *name) "%s" +ppc_irq_cpu(const char *action) "%s" + +# prep_systemio.c +prep_systemio_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" +prep_systemio_write(uint32_t addr, uint32_t val) "write addr=0x%x val=0x%x" + +# rs6000_mc.c +rs6000mc_id_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" +rs6000mc_presence_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" +rs6000mc_size_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" +rs6000mc_size_write(uint32_t addr, uint32_t val) "write addr=0x%x val=0x%x" +rs6000mc_parity_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" + +# ppc4xx_pci.c +ppc4xx_pci_map_irq(int32_t devfn, int irq_num, int slot) "devfn 0x%x irq %d -> %d" +ppc4xx_pci_set_irq(int irq_num) "PCI irq %d" + +# ppc440_pcix.c +ppc440_pcix_map_irq(int32_t devfn, int irq_num, int slot) "devfn 0x%x irq %d -> %d" +ppc440_pcix_set_irq(int irq_num) "PCI irq %d" +ppc440_pcix_update_pim(int idx, uint64_t size, uint64_t la) "Added window %d of size=0x%" PRIx64 " to CPU=0x%" PRIx64 +ppc440_pcix_update_pom(int idx, uint32_t size, uint64_t la, uint64_t pcia) "Added window %d of size=0x%x from CPU=0x%" PRIx64 " to PCI=0x%" PRIx64 +ppc440_pcix_reg_read(uint64_t addr, uint32_t val) "addr 0x%" PRIx64 " = 0x%" PRIx32 +ppc440_pcix_reg_write(uint64_t addr, uint32_t val, uint32_t size) "addr 0x%" PRIx64 " = 0x%" PRIx32 " size 0x%" PRIx32 diff --git a/hw/ppc/trace.h b/hw/ppc/trace.h new file mode 100644 index 000000000..87c4198e6 --- /dev/null +++ b/hw/ppc/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_ppc.h" diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c new file mode 100644 index 000000000..9c575403b --- /dev/null +++ b/hw/ppc/virtex_ml507.c @@ -0,0 +1,316 @@ +/* + * Model of Xilinx Virtex5 ML507 PPC-440 refdesign. + * + * Copyright (c) 2010 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/units.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "hw/char/serial.h" +#include "hw/block/flash.h" +#include "sysemu/sysemu.h" +#include "sysemu/reset.h" +#include "hw/boards.h" +#include "sysemu/device_tree.h" +#include "hw/loader.h" +#include "elf.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/option.h" + +#include "hw/intc/ppc-uic.h" +#include "hw/ppc/ppc.h" +#include "hw/ppc/ppc4xx.h" +#include "hw/qdev-properties.h" +#include "ppc405.h" + +#define EPAPR_MAGIC (0x45504150) +#define FLASH_SIZE (16 * MiB) + +#define INTC_BASEADDR 0x81800000 +#define UART16550_BASEADDR 0x83e01003 +#define TIMER_BASEADDR 0x83c00000 +#define PFLASH_BASEADDR 0xfc000000 + +#define TIMER_IRQ 3 +#define UART16550_IRQ 9 + +static struct boot_info +{ + uint32_t bootstrap_pc; + uint32_t cmdline; + uint32_t fdt; + uint32_t ima_size; + void *vfdt; +} boot_info; + +/* Create reset TLB entries for BookE, spanning the 32bit addr space. */ +static void mmubooke_create_initial_mapping(CPUPPCState *env, + target_ulong va, + hwaddr pa) +{ + ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; + + tlb->attr = 0; + tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); + tlb->size = 1U << 31; /* up to 0x80000000 */ + tlb->EPN = va & TARGET_PAGE_MASK; + tlb->RPN = pa & TARGET_PAGE_MASK; + tlb->PID = 0; + + tlb = &env->tlb.tlbe[1]; + tlb->attr = 0; + tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); + tlb->size = 1U << 31; /* up to 0xffffffff */ + tlb->EPN = 0x80000000 & TARGET_PAGE_MASK; + tlb->RPN = 0x80000000 & TARGET_PAGE_MASK; + tlb->PID = 0; +} + +static PowerPCCPU *ppc440_init_xilinx(const char *cpu_type, uint32_t sysclk) +{ + PowerPCCPU *cpu; + CPUPPCState *env; + DeviceState *uicdev; + SysBusDevice *uicsbd; + + cpu = POWERPC_CPU(cpu_create(cpu_type)); + env = &cpu->env; + + ppc_booke_timers_init(cpu, sysclk, 0/* no flags */); + + ppc_dcr_init(env, NULL, NULL); + + /* interrupt controller */ + uicdev = qdev_new(TYPE_PPC_UIC); + uicsbd = SYS_BUS_DEVICE(uicdev); + + object_property_set_link(OBJECT(uicdev), "cpu", OBJECT(cpu), + &error_fatal); + sysbus_realize_and_unref(uicsbd, &error_fatal); + + sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_INT, + ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT]); + sysbus_connect_irq(uicsbd, PPCUIC_OUTPUT_CINT, + ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT]); + + /* This board doesn't wire anything up to the inputs of the UIC. */ + return cpu; +} + +static void main_cpu_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + CPUPPCState *env = &cpu->env; + struct boot_info *bi = env->load_info; + + cpu_reset(CPU(cpu)); + /* Linux Kernel Parameters (passing device tree): + * r3: pointer to the fdt + * r4: 0 + * r5: 0 + * r6: epapr magic + * r7: size of IMA in bytes + * r8: 0 + * r9: 0 + */ + env->gpr[1] = (16 * MiB) - 8; + /* Provide a device-tree. */ + env->gpr[3] = bi->fdt; + env->nip = bi->bootstrap_pc; + + /* Create a mapping for the kernel. */ + mmubooke_create_initial_mapping(env, 0, 0); + env->gpr[6] = tswap32(EPAPR_MAGIC); + env->gpr[7] = bi->ima_size; +} + +#define BINARY_DEVICE_TREE_FILE "virtex-ml507.dtb" +static int xilinx_load_device_tree(hwaddr addr, + uint32_t ramsize, + hwaddr initrd_base, + hwaddr initrd_size, + const char *kernel_cmdline) +{ + char *path; + int fdt_size; + void *fdt = NULL; + int r; + const char *dtb_filename; + + dtb_filename = current_machine->dtb; + if (dtb_filename) { + fdt = load_device_tree(dtb_filename, &fdt_size); + if (!fdt) { + error_report("Error while loading device tree file '%s'", + dtb_filename); + } + } else { + /* Try the local "ppc.dtb" override. */ + fdt = load_device_tree("ppc.dtb", &fdt_size); + if (!fdt) { + path = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE); + if (path) { + fdt = load_device_tree(path, &fdt_size); + g_free(path); + } + } + } + if (!fdt) { + return 0; + } + + r = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", + initrd_base); + if (r < 0) { + error_report("couldn't set /chosen/linux,initrd-start"); + } + + r = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", + (initrd_base + initrd_size)); + if (r < 0) { + error_report("couldn't set /chosen/linux,initrd-end"); + } + + r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); + if (r < 0) + fprintf(stderr, "couldn't set /chosen/bootargs\n"); + cpu_physical_memory_write(addr, fdt, fdt_size); + g_free(fdt); + return fdt_size; +} + +static void virtex_init(MachineState *machine) +{ + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + hwaddr initrd_base = 0; + int initrd_size = 0; + MemoryRegion *address_space_mem = get_system_memory(); + DeviceState *dev; + PowerPCCPU *cpu; + CPUPPCState *env; + hwaddr ram_base = 0; + DriveInfo *dinfo; + qemu_irq irq[32], *cpu_irq; + int kernel_size; + int i; + + /* init CPUs */ + cpu = ppc440_init_xilinx(machine->cpu_type, 400000000); + env = &cpu->env; + + if (env->mmu_model != POWERPC_MMU_BOOKE) { + error_report("MMU model %i not supported by this machine", + env->mmu_model); + exit(1); + } + + qemu_register_reset(main_cpu_reset, cpu); + + memory_region_add_subregion(address_space_mem, ram_base, machine->ram); + + dinfo = drive_get(IF_PFLASH, 0, 0); + pflash_cfi01_register(PFLASH_BASEADDR, "virtex.flash", FLASH_SIZE, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + 64 * KiB, 1, 0x89, 0x18, 0x0000, 0x0, 1); + + cpu_irq = (qemu_irq *) &env->irq_inputs[PPC40x_INPUT_INT]; + dev = qdev_new("xlnx.xps-intc"); + qdev_prop_set_uint32(dev, "kind-of-intr", 0); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq[0]); + for (i = 0; i < 32; i++) { + irq[i] = qdev_get_gpio_in(dev, i); + } + + serial_mm_init(address_space_mem, UART16550_BASEADDR, 2, irq[UART16550_IRQ], + 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN); + + /* 2 timers at irq 2 @ 62 Mhz. */ + dev = qdev_new("xlnx.xps-timer"); + qdev_prop_set_uint32(dev, "one-timer-only", 0); + qdev_prop_set_uint32(dev, "clock-frequency", 62 * 1000000); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]); + + if (kernel_filename) { + uint64_t entry, high; + hwaddr boot_offset; + + /* Boots a kernel elf binary. */ + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, + &entry, NULL, &high, NULL, 1, PPC_ELF_MACHINE, + 0, 0); + boot_info.bootstrap_pc = entry & 0x00ffffff; + + if (kernel_size < 0) { + boot_offset = 0x1200000; + /* If we failed loading ELF's try a raw image. */ + kernel_size = load_image_targphys(kernel_filename, + boot_offset, + machine->ram_size); + boot_info.bootstrap_pc = boot_offset; + high = boot_info.bootstrap_pc + kernel_size + 8192; + } + + boot_info.ima_size = kernel_size; + + /* Load initrd. */ + if (machine->initrd_filename) { + initrd_base = high = ROUND_UP(high, 4); + initrd_size = load_image_targphys(machine->initrd_filename, + high, machine->ram_size - high); + + if (initrd_size < 0) { + error_report("couldn't load ram disk '%s'", + machine->initrd_filename); + exit(1); + } + high = ROUND_UP(high + initrd_size, 4); + } + + /* Provide a device-tree. */ + boot_info.fdt = high + (8192 * 2); + boot_info.fdt &= ~8191; + + xilinx_load_device_tree(boot_info.fdt, machine->ram_size, + initrd_base, initrd_size, + kernel_cmdline); + } + env->load_info = &boot_info; +} + +static void virtex_machine_init(MachineClass *mc) +{ + mc->desc = "Xilinx Virtex ML507 reference design"; + mc->init = virtex_init; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("440-xilinx"); + mc->default_ram_id = "ram"; +} + +DEFINE_MACHINE("virtex-ml507", virtex_machine_init) diff --git a/hw/ppc/vof.c b/hw/ppc/vof.c new file mode 100644 index 000000000..73adc44ec --- /dev/null +++ b/hw/ppc/vof.c @@ -0,0 +1,1062 @@ +/* + * QEMU PowerPC Virtual Open Firmware. + * + * This implements client interface from OpenFirmware IEEE1275 on the QEMU + * side to leave only a very basic firmware in the VM. + * + * Copyright (c) 2021 IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/timer.h" +#include "qemu/range.h" +#include "qemu/units.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "exec/ram_addr.h" +#include "exec/address-spaces.h" +#include "hw/ppc/vof.h" +#include "hw/ppc/fdt.h" +#include "sysemu/runstate.h" +#include "qom/qom-qobject.h" +#include "trace.h" + +#include + +/* + * OF 1275 "nextprop" description suggests is it 32 bytes max but + * LoPAPR defines "ibm,query-interrupt-source-number" which is 33 chars long. + */ +#define OF_PROPNAME_LEN_MAX 64 + +#define VOF_MAX_PATH 256 +#define VOF_MAX_SETPROPLEN 2048 +#define VOF_MAX_METHODLEN 256 +#define VOF_MAX_FORTHCODE 256 +#define VOF_VTY_BUF_SIZE 256 + +typedef struct { + uint64_t start; + uint64_t size; +} OfClaimed; + +typedef struct { + char *path; /* the path used to open the instance */ + uint32_t phandle; +} OfInstance; + +static int readstr(hwaddr pa, char *buf, int size) +{ + if (VOF_MEM_READ(pa, buf, size) != MEMTX_OK) { + return -1; + } + if (strnlen(buf, size) == size) { + buf[size - 1] = '\0'; + trace_vof_error_str_truncated(buf, size); + return -1; + } + return 0; +} + +static bool cmpservice(const char *s, unsigned nargs, unsigned nret, + const char *s1, unsigned nargscheck, unsigned nretcheck) +{ + if (strcmp(s, s1)) { + return false; + } + if ((nargscheck && (nargs != nargscheck)) || + (nretcheck && (nret != nretcheck))) { + trace_vof_error_param(s, nargscheck, nretcheck, nargs, nret); + return false; + } + + return true; +} + +static void prop_format(char *tval, int tlen, const void *prop, int len) +{ + int i; + const unsigned char *c; + char *t; + const char bin[] = "..."; + + for (i = 0, c = prop; i < len; ++i, ++c) { + if (*c == '\0' && i == len - 1) { + strncpy(tval, prop, tlen - 1); + return; + } + if (*c < 0x20 || *c >= 0x80) { + break; + } + } + + for (i = 0, c = prop, t = tval; i < len; ++i, ++c) { + if (t >= tval + tlen - sizeof(bin) - 1 - 2 - 1) { + strcpy(t, bin); + return; + } + if (i && i % 4 == 0 && i != len - 1) { + strcat(t, " "); + ++t; + } + t += sprintf(t, "%02X", *c & 0xFF); + } +} + +static int get_path(const void *fdt, int offset, char *buf, int len) +{ + int ret; + + ret = fdt_get_path(fdt, offset, buf, len - 1); + if (ret < 0) { + return ret; + } + + buf[len - 1] = '\0'; + + return strlen(buf) + 1; +} + +static int phandle_to_path(const void *fdt, uint32_t ph, char *buf, int len) +{ + int ret; + + ret = fdt_node_offset_by_phandle(fdt, ph); + if (ret < 0) { + return ret; + } + + return get_path(fdt, ret, buf, len); +} + +static int path_offset(const void *fdt, const char *path) +{ + g_autofree char *p = NULL; + char *at; + + /* + * https://www.devicetree.org/open-firmware/bindings/ppc/release/ppc-2_1.html#HDR16 + * + * "Conversion from numeric representation to text representation shall use + * the lower case forms of the hexadecimal digits in the range a..f, + * suppressing leading zeros". + */ + p = g_strdup(path); + for (at = strchr(p, '@'); at && *at; ) { + if (*at == '/') { + at = strchr(at, '@'); + } else { + *at = tolower(*at); + ++at; + } + } + + return fdt_path_offset(fdt, p); +} + +static uint32_t vof_finddevice(const void *fdt, uint32_t nodeaddr) +{ + char fullnode[VOF_MAX_PATH]; + uint32_t ret = PROM_ERROR; + int offset; + + if (readstr(nodeaddr, fullnode, sizeof(fullnode))) { + return (uint32_t) ret; + } + + offset = path_offset(fdt, fullnode); + if (offset >= 0) { + ret = fdt_get_phandle(fdt, offset); + } + trace_vof_finddevice(fullnode, ret); + return ret; +} + +static const void *getprop(const void *fdt, int nodeoff, const char *propname, + int *proplen, bool *write0) +{ + const char *unit, *prop; + const void *ret = fdt_getprop(fdt, nodeoff, propname, proplen); + + if (ret) { + if (write0) { + *write0 = false; + } + return ret; + } + + if (strcmp(propname, "name")) { + return NULL; + } + /* + * We return a value for "name" from path if queried but property does not + * exist. @proplen does not include the unit part in this case. + */ + prop = fdt_get_name(fdt, nodeoff, proplen); + if (!prop) { + *proplen = 0; + return NULL; + } + + unit = memchr(prop, '@', *proplen); + if (unit) { + *proplen = unit - prop; + } + *proplen += 1; + + /* + * Since it might be cut at "@" and there will be no trailing zero + * in the prop buffer, tell the caller to write zero at the end. + */ + if (write0) { + *write0 = true; + } + return prop; +} + +static uint32_t vof_getprop(const void *fdt, uint32_t nodeph, uint32_t pname, + uint32_t valaddr, uint32_t vallen) +{ + char propname[OF_PROPNAME_LEN_MAX + 1]; + uint32_t ret = 0; + int proplen = 0; + const void *prop; + char trval[64] = ""; + int nodeoff = fdt_node_offset_by_phandle(fdt, nodeph); + bool write0; + + if (nodeoff < 0) { + return PROM_ERROR; + } + if (readstr(pname, propname, sizeof(propname))) { + return PROM_ERROR; + } + prop = getprop(fdt, nodeoff, propname, &proplen, &write0); + if (prop) { + const char zero = 0; + int cb = MIN(proplen, vallen); + + if (VOF_MEM_WRITE(valaddr, prop, cb) != MEMTX_OK || + /* if that was "name" with a unit address, overwrite '@' with '0' */ + (write0 && + cb == proplen && + VOF_MEM_WRITE(valaddr + cb - 1, &zero, 1) != MEMTX_OK)) { + ret = PROM_ERROR; + } else { + /* + * OF1275 says: + * "Size is either the actual size of the property, or -1 if name + * does not exist", hence returning proplen instead of cb. + */ + ret = proplen; + /* Do not format a value if tracepoint is silent, for performance */ + if (trace_event_get_state(TRACE_VOF_GETPROP) && + qemu_loglevel_mask(LOG_TRACE)) { + prop_format(trval, sizeof(trval), prop, ret); + } + } + } else { + ret = PROM_ERROR; + } + trace_vof_getprop(nodeph, propname, ret, trval); + + return ret; +} + +static uint32_t vof_getproplen(const void *fdt, uint32_t nodeph, uint32_t pname) +{ + char propname[OF_PROPNAME_LEN_MAX + 1]; + uint32_t ret = 0; + int proplen = 0; + const void *prop; + int nodeoff = fdt_node_offset_by_phandle(fdt, nodeph); + + if (nodeoff < 0) { + return PROM_ERROR; + } + if (readstr(pname, propname, sizeof(propname))) { + return PROM_ERROR; + } + prop = getprop(fdt, nodeoff, propname, &proplen, NULL); + if (prop) { + ret = proplen; + } else { + ret = PROM_ERROR; + } + trace_vof_getproplen(nodeph, propname, ret); + + return ret; +} + +static uint32_t vof_setprop(MachineState *ms, void *fdt, Vof *vof, + uint32_t nodeph, uint32_t pname, + uint32_t valaddr, uint32_t vallen) +{ + char propname[OF_PROPNAME_LEN_MAX + 1]; + uint32_t ret = PROM_ERROR; + int offset, rc; + char trval[64] = ""; + char nodepath[VOF_MAX_PATH] = ""; + Object *vmo = object_dynamic_cast(OBJECT(ms), TYPE_VOF_MACHINE_IF); + VofMachineIfClass *vmc; + g_autofree char *val = NULL; + + if (vallen > VOF_MAX_SETPROPLEN) { + goto trace_exit; + } + if (readstr(pname, propname, sizeof(propname))) { + goto trace_exit; + } + offset = fdt_node_offset_by_phandle(fdt, nodeph); + if (offset < 0) { + goto trace_exit; + } + rc = get_path(fdt, offset, nodepath, sizeof(nodepath)); + if (rc <= 0) { + goto trace_exit; + } + + val = g_malloc0(vallen); + if (VOF_MEM_READ(valaddr, val, vallen) != MEMTX_OK) { + goto trace_exit; + } + + if (!vmo) { + goto trace_exit; + } + + vmc = VOF_MACHINE_GET_CLASS(vmo); + if (!vmc->setprop || !vmc->setprop(ms, nodepath, propname, val, vallen)) { + goto trace_exit; + } + + rc = fdt_setprop(fdt, offset, propname, val, vallen); + if (rc) { + goto trace_exit; + } + + if (trace_event_get_state(TRACE_VOF_SETPROP) && + qemu_loglevel_mask(LOG_TRACE)) { + prop_format(trval, sizeof(trval), val, vallen); + } + ret = vallen; + +trace_exit: + trace_vof_setprop(nodeph, propname, trval, vallen, ret); + + return ret; +} + +static uint32_t vof_nextprop(const void *fdt, uint32_t phandle, + uint32_t prevaddr, uint32_t nameaddr) +{ + int offset, nodeoff = fdt_node_offset_by_phandle(fdt, phandle); + char prev[OF_PROPNAME_LEN_MAX + 1]; + const char *tmp; + + if (readstr(prevaddr, prev, sizeof(prev))) { + return PROM_ERROR; + } + + fdt_for_each_property_offset(offset, fdt, nodeoff) { + if (!fdt_getprop_by_offset(fdt, offset, &tmp, NULL)) { + return 0; + } + if (prev[0] == '\0' || strcmp(prev, tmp) == 0) { + if (prev[0] != '\0') { + offset = fdt_next_property_offset(fdt, offset); + if (offset < 0) { + return 0; + } + } + if (!fdt_getprop_by_offset(fdt, offset, &tmp, NULL)) { + return 0; + } + + if (VOF_MEM_WRITE(nameaddr, tmp, strlen(tmp) + 1) != MEMTX_OK) { + return PROM_ERROR; + } + return 1; + } + } + + return 0; +} + +static uint32_t vof_peer(const void *fdt, uint32_t phandle) +{ + uint32_t ret = 0; + int rc; + + if (phandle == 0) { + rc = fdt_path_offset(fdt, "/"); + } else { + rc = fdt_next_subnode(fdt, fdt_node_offset_by_phandle(fdt, phandle)); + } + + if (rc >= 0) { + ret = fdt_get_phandle(fdt, rc); + } + + return ret; +} + +static uint32_t vof_child(const void *fdt, uint32_t phandle) +{ + uint32_t ret = 0; + int rc = fdt_first_subnode(fdt, fdt_node_offset_by_phandle(fdt, phandle)); + + if (rc >= 0) { + ret = fdt_get_phandle(fdt, rc); + } + + return ret; +} + +static uint32_t vof_parent(const void *fdt, uint32_t phandle) +{ + uint32_t ret = 0; + int rc = fdt_parent_offset(fdt, fdt_node_offset_by_phandle(fdt, phandle)); + + if (rc >= 0) { + ret = fdt_get_phandle(fdt, rc); + } + + return ret; +} + +static uint32_t vof_do_open(void *fdt, Vof *vof, int offset, const char *path) +{ + uint32_t ret = PROM_ERROR; + OfInstance *inst = NULL; + + if (vof->of_instance_last == 0xFFFFFFFF) { + /* We do not recycle ihandles yet */ + goto trace_exit; + } + + inst = g_new0(OfInstance, 1); + inst->phandle = fdt_get_phandle(fdt, offset); + g_assert(inst->phandle); + ++vof->of_instance_last; + + inst->path = g_strdup(path); + g_hash_table_insert(vof->of_instances, + GINT_TO_POINTER(vof->of_instance_last), + inst); + ret = vof->of_instance_last; + +trace_exit: + trace_vof_open(path, inst ? inst->phandle : 0, ret); + + return ret; +} + +uint32_t vof_client_open_store(void *fdt, Vof *vof, const char *nodename, + const char *prop, const char *path) +{ + int offset, node = fdt_path_offset(fdt, nodename); + uint32_t inst; + + offset = fdt_path_offset(fdt, path); + if (offset < 0) { + trace_vof_error_unknown_path(path); + return PROM_ERROR; + } + + inst = vof_do_open(fdt, vof, offset, path); + + return fdt_setprop_cell(fdt, node, prop, inst) >= 0 ? 0 : PROM_ERROR; +} + +static uint32_t vof_open(void *fdt, Vof *vof, uint32_t pathaddr) +{ + char path[VOF_MAX_PATH]; + int offset; + + if (readstr(pathaddr, path, sizeof(path))) { + return PROM_ERROR; + } + + offset = path_offset(fdt, path); + if (offset < 0) { + trace_vof_error_unknown_path(path); + return PROM_ERROR; + } + + return vof_do_open(fdt, vof, offset, path); +} + +static void vof_close(Vof *vof, uint32_t ihandle) +{ + if (!g_hash_table_remove(vof->of_instances, GINT_TO_POINTER(ihandle))) { + trace_vof_error_unknown_ihandle_close(ihandle); + } +} + +static uint32_t vof_instance_to_package(Vof *vof, uint32_t ihandle) +{ + gpointer instp = g_hash_table_lookup(vof->of_instances, + GINT_TO_POINTER(ihandle)); + uint32_t ret = PROM_ERROR; + + if (instp) { + ret = ((OfInstance *)instp)->phandle; + } + trace_vof_instance_to_package(ihandle, ret); + + return ret; +} + +static uint32_t vof_package_to_path(const void *fdt, uint32_t phandle, + uint32_t buf, uint32_t len) +{ + int rc; + char tmp[VOF_MAX_PATH] = ""; + + rc = phandle_to_path(fdt, phandle, tmp, sizeof(tmp)); + if (rc > 0) { + if (VOF_MEM_WRITE(buf, tmp, rc) != MEMTX_OK) { + rc = -1; + } + } + + trace_vof_package_to_path(phandle, tmp, rc); + + return rc > 0 ? (uint32_t)rc : PROM_ERROR; +} + +static uint32_t vof_instance_to_path(void *fdt, Vof *vof, uint32_t ihandle, + uint32_t buf, uint32_t len) +{ + int rc = -1; + uint32_t phandle = vof_instance_to_package(vof, ihandle); + char tmp[VOF_MAX_PATH] = ""; + + if (phandle != -1) { + rc = phandle_to_path(fdt, phandle, tmp, sizeof(tmp)); + if (rc > 0) { + if (VOF_MEM_WRITE(buf, tmp, rc) != MEMTX_OK) { + rc = -1; + } + } + } + trace_vof_instance_to_path(ihandle, phandle, tmp, rc); + + return rc > 0 ? (uint32_t)rc : PROM_ERROR; +} + +static uint32_t vof_write(Vof *vof, uint32_t ihandle, uint32_t buf, + uint32_t len) +{ + char tmp[VOF_VTY_BUF_SIZE]; + unsigned cb; + OfInstance *inst = (OfInstance *) + g_hash_table_lookup(vof->of_instances, GINT_TO_POINTER(ihandle)); + + if (!inst) { + trace_vof_error_write(ihandle); + return PROM_ERROR; + } + + for ( ; len > 0; len -= cb) { + cb = MIN(len, sizeof(tmp) - 1); + if (VOF_MEM_READ(buf, tmp, cb) != MEMTX_OK) { + return PROM_ERROR; + } + + /* FIXME: there is no backend(s) yet so just call a trace */ + if (trace_event_get_state(TRACE_VOF_WRITE) && + qemu_loglevel_mask(LOG_TRACE)) { + tmp[cb] = '\0'; + trace_vof_write(ihandle, cb, tmp); + } + } + + return len; +} + +static void vof_claimed_dump(GArray *claimed) +{ + int i; + OfClaimed c; + + if (trace_event_get_state(TRACE_VOF_CLAIMED) && + qemu_loglevel_mask(LOG_TRACE)) { + + for (i = 0; i < claimed->len; ++i) { + c = g_array_index(claimed, OfClaimed, i); + trace_vof_claimed(c.start, c.start + c.size, c.size); + } + } +} + +static bool vof_claim_avail(GArray *claimed, uint64_t virt, uint64_t size) +{ + int i; + OfClaimed c; + + for (i = 0; i < claimed->len; ++i) { + c = g_array_index(claimed, OfClaimed, i); + if (ranges_overlap(c.start, c.size, virt, size)) { + return false; + } + } + + return true; +} + +static void vof_claim_add(GArray *claimed, uint64_t virt, uint64_t size) +{ + OfClaimed newclaim; + + newclaim.start = virt; + newclaim.size = size; + g_array_append_val(claimed, newclaim); +} + +static gint of_claimed_compare_func(gconstpointer a, gconstpointer b) +{ + return ((OfClaimed *)a)->start - ((OfClaimed *)b)->start; +} + +static void vof_dt_memory_available(void *fdt, GArray *claimed, uint64_t base) +{ + int i, n, offset, proplen = 0, sc, ac; + target_ulong mem0_end; + const uint8_t *mem0_reg; + g_autofree uint8_t *avail = NULL; + uint8_t *availcur; + + if (!fdt || !claimed) { + return; + } + + offset = fdt_path_offset(fdt, "/"); + _FDT(offset); + ac = fdt_address_cells(fdt, offset); + g_assert(ac == 1 || ac == 2); + sc = fdt_size_cells(fdt, offset); + g_assert(sc == 1 || sc == 2); + + offset = fdt_path_offset(fdt, "/memory@0"); + _FDT(offset); + + mem0_reg = fdt_getprop(fdt, offset, "reg", &proplen); + g_assert(mem0_reg && proplen == sizeof(uint32_t) * (ac + sc)); + if (sc == 2) { + mem0_end = be64_to_cpu(*(uint64_t *)(mem0_reg + sizeof(uint32_t) * ac)); + } else { + mem0_end = be32_to_cpu(*(uint32_t *)(mem0_reg + sizeof(uint32_t) * ac)); + } + + g_array_sort(claimed, of_claimed_compare_func); + vof_claimed_dump(claimed); + + /* + * VOF resides in the first page so we do not need to check if there is + * available memory before the first claimed block + */ + g_assert(claimed->len && (g_array_index(claimed, OfClaimed, 0).start == 0)); + + avail = g_malloc0(sizeof(uint32_t) * (ac + sc) * claimed->len); + for (i = 0, n = 0, availcur = avail; i < claimed->len; ++i) { + OfClaimed c = g_array_index(claimed, OfClaimed, i); + uint64_t start, size; + + start = c.start + c.size; + if (i < claimed->len - 1) { + OfClaimed cn = g_array_index(claimed, OfClaimed, i + 1); + + size = cn.start - start; + } else { + size = mem0_end - start; + } + + if (ac == 2) { + *(uint64_t *) availcur = cpu_to_be64(start); + } else { + *(uint32_t *) availcur = cpu_to_be32(start); + } + availcur += sizeof(uint32_t) * ac; + if (sc == 2) { + *(uint64_t *) availcur = cpu_to_be64(size); + } else { + *(uint32_t *) availcur = cpu_to_be32(size); + } + availcur += sizeof(uint32_t) * sc; + + if (size) { + trace_vof_avail(c.start + c.size, c.start + c.size + size, size); + ++n; + } + } + _FDT((fdt_setprop(fdt, offset, "available", avail, availcur - avail))); +} + +/* + * OF1275: + * "Allocates size bytes of memory. If align is zero, the allocated range + * begins at the virtual address virt. Otherwise, an aligned address is + * automatically chosen and the input argument virt is ignored". + * + * In other words, exactly one of @virt and @align is non-zero. + */ +uint64_t vof_claim(Vof *vof, uint64_t virt, uint64_t size, + uint64_t align) +{ + uint64_t ret; + + if (size == 0) { + ret = -1; + } else if (align == 0) { + if (!vof_claim_avail(vof->claimed, virt, size)) { + ret = -1; + } else { + ret = virt; + } + } else { + vof->claimed_base = QEMU_ALIGN_UP(vof->claimed_base, align); + while (1) { + if (vof->claimed_base >= vof->top_addr) { + error_report("Out of RMA memory for the OF client"); + return -1; + } + if (vof_claim_avail(vof->claimed, vof->claimed_base, size)) { + break; + } + vof->claimed_base += size; + } + ret = vof->claimed_base; + } + + if (ret != -1) { + vof->claimed_base = MAX(vof->claimed_base, ret + size); + vof_claim_add(vof->claimed, ret, size); + } + trace_vof_claim(virt, size, align, ret); + + return ret; +} + +static uint32_t vof_release(Vof *vof, uint64_t virt, uint64_t size) +{ + uint32_t ret = PROM_ERROR; + int i; + GArray *claimed = vof->claimed; + OfClaimed c; + + for (i = 0; i < claimed->len; ++i) { + c = g_array_index(claimed, OfClaimed, i); + if (c.start == virt && c.size == size) { + g_array_remove_index(claimed, i); + ret = 0; + break; + } + } + + trace_vof_release(virt, size, ret); + + return ret; +} + +static void vof_instantiate_rtas(Error **errp) +{ + error_setg(errp, "The firmware should have instantiated RTAS"); +} + +static uint32_t vof_call_method(MachineState *ms, Vof *vof, uint32_t methodaddr, + uint32_t ihandle, uint32_t param1, + uint32_t param2, uint32_t param3, + uint32_t param4, uint32_t *ret2) +{ + uint32_t ret = PROM_ERROR; + char method[VOF_MAX_METHODLEN] = ""; + OfInstance *inst; + + if (!ihandle) { + goto trace_exit; + } + + inst = (OfInstance *)g_hash_table_lookup(vof->of_instances, + GINT_TO_POINTER(ihandle)); + if (!inst) { + goto trace_exit; + } + + if (readstr(methodaddr, method, sizeof(method))) { + goto trace_exit; + } + + if (strcmp(inst->path, "/") == 0) { + if (strcmp(method, "ibm,client-architecture-support") == 0) { + Object *vmo = object_dynamic_cast(OBJECT(ms), TYPE_VOF_MACHINE_IF); + + if (vmo) { + VofMachineIfClass *vmc = VOF_MACHINE_GET_CLASS(vmo); + + g_assert(vmc->client_architecture_support); + ret = (uint32_t)vmc->client_architecture_support(ms, first_cpu, + param1); + } + + *ret2 = 0; + } + } else if (strcmp(inst->path, "/rtas") == 0) { + if (strcmp(method, "instantiate-rtas") == 0) { + vof_instantiate_rtas(&error_fatal); + ret = 0; + *ret2 = param1; /* rtas-base */ + } + } else { + trace_vof_error_unknown_method(method); + } + +trace_exit: + trace_vof_method(ihandle, method, param1, ret, *ret2); + + return ret; +} + +static uint32_t vof_call_interpret(uint32_t cmdaddr, uint32_t param1, + uint32_t param2, uint32_t *ret2) +{ + uint32_t ret = PROM_ERROR; + char cmd[VOF_MAX_FORTHCODE] = ""; + + /* No interpret implemented so just call a trace */ + readstr(cmdaddr, cmd, sizeof(cmd)); + trace_vof_interpret(cmd, param1, param2, ret, *ret2); + + return ret; +} + +static void vof_quiesce(MachineState *ms, void *fdt, Vof *vof) +{ + Object *vmo = object_dynamic_cast(OBJECT(ms), TYPE_VOF_MACHINE_IF); + /* After "quiesce", no change is expected to the FDT, pack FDT to ensure */ + int rc = fdt_pack(fdt); + + assert(rc == 0); + + if (vmo) { + VofMachineIfClass *vmc = VOF_MACHINE_GET_CLASS(vmo); + + if (vmc->quiesce) { + vmc->quiesce(ms); + } + } + + vof_claimed_dump(vof->claimed); +} + +static uint32_t vof_client_handle(MachineState *ms, void *fdt, Vof *vof, + const char *service, + uint32_t *args, unsigned nargs, + uint32_t *rets, unsigned nrets) +{ + uint32_t ret = 0; + + /* @nrets includes the value which this function returns */ +#define cmpserv(s, a, r) \ + cmpservice(service, nargs, nrets, (s), (a), (r)) + + if (cmpserv("finddevice", 1, 1)) { + ret = vof_finddevice(fdt, args[0]); + } else if (cmpserv("getprop", 4, 1)) { + ret = vof_getprop(fdt, args[0], args[1], args[2], args[3]); + } else if (cmpserv("getproplen", 2, 1)) { + ret = vof_getproplen(fdt, args[0], args[1]); + } else if (cmpserv("setprop", 4, 1)) { + ret = vof_setprop(ms, fdt, vof, args[0], args[1], args[2], args[3]); + } else if (cmpserv("nextprop", 3, 1)) { + ret = vof_nextprop(fdt, args[0], args[1], args[2]); + } else if (cmpserv("peer", 1, 1)) { + ret = vof_peer(fdt, args[0]); + } else if (cmpserv("child", 1, 1)) { + ret = vof_child(fdt, args[0]); + } else if (cmpserv("parent", 1, 1)) { + ret = vof_parent(fdt, args[0]); + } else if (cmpserv("open", 1, 1)) { + ret = vof_open(fdt, vof, args[0]); + } else if (cmpserv("close", 1, 0)) { + vof_close(vof, args[0]); + } else if (cmpserv("instance-to-package", 1, 1)) { + ret = vof_instance_to_package(vof, args[0]); + } else if (cmpserv("package-to-path", 3, 1)) { + ret = vof_package_to_path(fdt, args[0], args[1], args[2]); + } else if (cmpserv("instance-to-path", 3, 1)) { + ret = vof_instance_to_path(fdt, vof, args[0], args[1], args[2]); + } else if (cmpserv("write", 3, 1)) { + ret = vof_write(vof, args[0], args[1], args[2]); + } else if (cmpserv("claim", 3, 1)) { + uint64_t ret64 = vof_claim(vof, args[0], args[1], args[2]); + + if (ret64 < 0x100000000UL) { + vof_dt_memory_available(fdt, vof->claimed, vof->claimed_base); + ret = (uint32_t)ret64; + } else { + if (ret64 != -1) { + vof_release(vof, ret, args[1]); + } + ret = PROM_ERROR; + } + } else if (cmpserv("release", 2, 0)) { + ret = vof_release(vof, args[0], args[1]); + if (ret != PROM_ERROR) { + vof_dt_memory_available(fdt, vof->claimed, vof->claimed_base); + } + } else if (cmpserv("call-method", 0, 0)) { + ret = vof_call_method(ms, vof, args[0], args[1], args[2], args[3], + args[4], args[5], rets); + } else if (cmpserv("interpret", 0, 0)) { + ret = vof_call_interpret(args[0], args[1], args[2], rets); + } else if (cmpserv("milliseconds", 0, 1)) { + ret = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + } else if (cmpserv("quiesce", 0, 0)) { + vof_quiesce(ms, fdt, vof); + } else if (cmpserv("exit", 0, 0)) { + error_report("Stopped as the VM requested \"exit\""); + vm_stop(RUN_STATE_PAUSED); + } else { + trace_vof_error_unknown_service(service, nargs, nrets); + ret = -1; + } + +#undef cmpserv + + return ret; +} + +/* Defined as Big Endian */ +struct prom_args { + uint32_t service; + uint32_t nargs; + uint32_t nret; + uint32_t args[10]; +} QEMU_PACKED; + +int vof_client_call(MachineState *ms, Vof *vof, void *fdt, + target_ulong args_real) +{ + struct prom_args args_be; + uint32_t args[ARRAY_SIZE(args_be.args)]; + uint32_t rets[ARRAY_SIZE(args_be.args)] = { 0 }, ret; + char service[64]; + unsigned nargs, nret, i; + + if (VOF_MEM_READ(args_real, &args_be, sizeof(args_be)) != MEMTX_OK) { + return -EINVAL; + } + nargs = be32_to_cpu(args_be.nargs); + if (nargs >= ARRAY_SIZE(args_be.args)) { + return -EINVAL; + } + + if (VOF_MEM_READ(be32_to_cpu(args_be.service), service, sizeof(service)) != + MEMTX_OK) { + return -EINVAL; + } + if (strnlen(service, sizeof(service)) == sizeof(service)) { + /* Too long service name */ + return -EINVAL; + } + + for (i = 0; i < nargs; ++i) { + args[i] = be32_to_cpu(args_be.args[i]); + } + + nret = be32_to_cpu(args_be.nret); + if (nret > ARRAY_SIZE(args_be.args) - nargs) { + return -EINVAL; + } + ret = vof_client_handle(ms, fdt, vof, service, args, nargs, rets, nret); + if (!nret) { + return 0; + } + + /* @nrets includes the value which this function returns */ + args_be.args[nargs] = cpu_to_be32(ret); + for (i = 1; i < nret; ++i) { + args_be.args[nargs + i] = cpu_to_be32(rets[i - 1]); + } + + if (VOF_MEM_WRITE(args_real + offsetof(struct prom_args, args[nargs]), + args_be.args + nargs, sizeof(args_be.args[0]) * nret) != + MEMTX_OK) { + return -EINVAL; + } + + return 0; +} + +static void vof_instance_free(gpointer data) +{ + OfInstance *inst = (OfInstance *)data; + + g_free(inst->path); + g_free(inst); +} + +void vof_init(Vof *vof, uint64_t top_addr, Error **errp) +{ + vof_cleanup(vof); + + vof->of_instances = g_hash_table_new_full(g_direct_hash, g_direct_equal, + NULL, vof_instance_free); + vof->claimed = g_array_new(false, false, sizeof(OfClaimed)); + + /* Keep allocations in 32bit as CLI ABI can only return cells==32bit */ + vof->top_addr = MIN(top_addr, 4 * GiB); + if (vof_claim(vof, 0, vof->fw_size, 0) == -1) { + error_setg(errp, "Memory for firmware is in use"); + } +} + +void vof_cleanup(Vof *vof) +{ + if (vof->claimed) { + g_array_unref(vof->claimed); + } + if (vof->of_instances) { + g_hash_table_unref(vof->of_instances); + } + vof->claimed = NULL; + vof->of_instances = NULL; +} + +void vof_build_dt(void *fdt, Vof *vof) +{ + uint32_t phandle = fdt_get_max_phandle(fdt); + int offset, proplen = 0; + const void *prop; + + /* Assign phandles to nodes without predefined phandles (like XICS/XIVE) */ + for (offset = fdt_next_node(fdt, -1, NULL); + offset >= 0; + offset = fdt_next_node(fdt, offset, NULL)) { + prop = fdt_getprop(fdt, offset, "phandle", &proplen); + if (prop) { + continue; + } + ++phandle; + _FDT(fdt_setprop_cell(fdt, offset, "phandle", phandle)); + } + + vof_dt_memory_available(fdt, vof->claimed, vof->claimed_base); +} + +static const TypeInfo vof_machine_if_info = { + .name = TYPE_VOF_MACHINE_IF, + .parent = TYPE_INTERFACE, + .class_size = sizeof(VofMachineIfClass), +}; + +static void vof_machine_if_register_types(void) +{ + type_register_static(&vof_machine_if_info); +} +type_init(vof_machine_if_register_types) diff --git a/hw/rdma/Kconfig b/hw/rdma/Kconfig new file mode 100644 index 000000000..8e2211288 --- /dev/null +++ b/hw/rdma/Kconfig @@ -0,0 +1,3 @@ +config VMW_PVRDMA + default y if PCI_DEVICES + depends on PVRDMA && PCI && MSI_NONBROKEN diff --git a/hw/rdma/meson.build b/hw/rdma/meson.build new file mode 100644 index 000000000..7325f40c3 --- /dev/null +++ b/hw/rdma/meson.build @@ -0,0 +1,10 @@ +specific_ss.add(when: 'CONFIG_VMW_PVRDMA', if_true: files( + 'rdma.c', + 'rdma_backend.c', + 'rdma_rm.c', + 'rdma_utils.c', + 'vmw/pvrdma_cmd.c', + 'vmw/pvrdma_dev_ring.c', + 'vmw/pvrdma_main.c', + 'vmw/pvrdma_qp_ops.c', +)) diff --git a/hw/rdma/rdma.c b/hw/rdma/rdma.c new file mode 100644 index 000000000..7bec0d0d2 --- /dev/null +++ b/hw/rdma/rdma.c @@ -0,0 +1,30 @@ +/* + * RDMA device interface + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/rdma/rdma.h" +#include "qemu/module.h" + +static const TypeInfo rdma_hmp_info = { + .name = INTERFACE_RDMA_PROVIDER, + .parent = TYPE_INTERFACE, + .class_size = sizeof(RdmaProviderClass), +}; + +static void rdma_register_types(void) +{ + type_register_static(&rdma_hmp_info); +} + +type_init(rdma_register_types) diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c new file mode 100644 index 000000000..6dcdfbbbe --- /dev/null +++ b/hw/rdma/rdma_backend.c @@ -0,0 +1,1401 @@ +/* + * QEMU paravirtual RDMA - Generic RDMA backend + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/qapi-events-rdma.h" + +#include + +#include "contrib/rdmacm-mux/rdmacm-mux.h" +#include "trace.h" +#include "rdma_utils.h" +#include "rdma_rm.h" +#include "rdma_backend.h" + +#define THR_NAME_LEN 16 +#define THR_POLL_TO 5000 + +#define MAD_HDR_SIZE sizeof(struct ibv_grh) + +typedef struct BackendCtx { + void *up_ctx; + struct ibv_sge sge; /* Used to save MAD recv buffer */ + RdmaBackendQP *backend_qp; /* To maintain recv buffers */ + RdmaBackendSRQ *backend_srq; +} BackendCtx; + +struct backend_umad { + struct ib_user_mad hdr; + char mad[RDMA_MAX_PRIVATE_DATA]; +}; + +static void (*comp_handler)(void *ctx, struct ibv_wc *wc); + +static void dummy_comp_handler(void *ctx, struct ibv_wc *wc) +{ + rdma_error_report("No completion handler is registered"); +} + +static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err, + void *ctx) +{ + struct ibv_wc wc = {}; + + wc.status = status; + wc.vendor_err = vendor_err; + + comp_handler(ctx, &wc); +} + +static void free_cqe_ctx(gpointer data, gpointer user_data) +{ + BackendCtx *bctx; + RdmaDeviceResources *rdma_dev_res = user_data; + unsigned long cqe_ctx_id = GPOINTER_TO_INT(data); + + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); + if (bctx) { + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); + qatomic_dec(&rdma_dev_res->stats.missing_cqe); + } + g_free(bctx); +} + +static void clean_recv_mads(RdmaBackendDev *backend_dev) +{ + unsigned long cqe_ctx_id; + + do { + cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev-> + recv_mads_list); + if (cqe_ctx_id != -ENOENT) { + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), + backend_dev->rdma_dev_res); + } + } while (cqe_ctx_id != -ENOENT); +} + +static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) +{ + int i, ne, total_ne = 0; + BackendCtx *bctx; + struct ibv_wc wc[2]; + RdmaProtectedGSList *cqe_ctx_list; + + WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) { + do { + ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc); + + trace_rdma_poll_cq(ne, ibcq); + + for (i = 0; i < ne; i++) { + bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id); + if (unlikely(!bctx)) { + rdma_error_report("No matching ctx for req %"PRId64, + wc[i].wr_id); + continue; + } + + comp_handler(bctx->up_ctx, &wc[i]); + + if (bctx->backend_qp) { + cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list; + } else { + cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list; + } + + rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id); + rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id); + g_free(bctx); + } + total_ne += ne; + } while (ne > 0); + qatomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); + } + + if (ne < 0) { + rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno); + } + + rdma_dev_res->stats.completions += total_ne; + + return total_ne; +} + +static void *comp_handler_thread(void *arg) +{ + RdmaBackendDev *backend_dev = (RdmaBackendDev *)arg; + int rc; + struct ibv_cq *ev_cq; + void *ev_ctx; + int flags; + GPollFD pfds[1]; + + /* Change to non-blocking mode */ + flags = fcntl(backend_dev->channel->fd, F_GETFL); + rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK); + if (rc < 0) { + rdma_error_report("Failed to change backend channel FD to non-blocking"); + return NULL; + } + + pfds[0].fd = backend_dev->channel->fd; + pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; + + backend_dev->comp_thread.is_running = true; + + while (backend_dev->comp_thread.run) { + do { + rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS); + if (!rc) { + backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++; + } + } while (!rc && backend_dev->comp_thread.run); + + if (backend_dev->comp_thread.run) { + rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx); + if (unlikely(rc)) { + rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc, + errno); + continue; + } + + rc = ibv_req_notify_cq(ev_cq, 0); + if (unlikely(rc)) { + rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, + errno); + } + + backend_dev->rdma_dev_res->stats.poll_cq_from_bk++; + rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq); + + ibv_ack_cq_events(ev_cq, 1); + } + } + + backend_dev->comp_thread.is_running = false; + + qemu_thread_exit(0); + + return NULL; +} + +static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev) +{ + qatomic_set(&backend_dev->rdmacm_mux.can_receive, 0); +} + +static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev) +{ + qatomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg)); +} + +static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev) +{ + return qatomic_read(&backend_dev->rdmacm_mux.can_receive); +} + +static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be) +{ + RdmaCmMuxMsg msg = {}; + int ret; + + ret = qemu_chr_fe_read_all(mad_chr_be, (uint8_t *)&msg, sizeof(msg)); + if (ret != sizeof(msg)) { + rdma_error_report("Got invalid message from mux: size %d, expecting %d", + ret, (int)sizeof(msg)); + return -EIO; + } + + trace_rdmacm_mux_check_op_status(msg.hdr.msg_type, msg.hdr.op_code, + msg.hdr.err_code); + + if (msg.hdr.msg_type != RDMACM_MUX_MSG_TYPE_RESP) { + rdma_error_report("Got invalid message type %d", msg.hdr.msg_type); + return -EIO; + } + + if (msg.hdr.err_code != RDMACM_MUX_ERR_CODE_OK) { + rdma_error_report("Operation failed in mux, error code %d", + msg.hdr.err_code); + return -EIO; + } + + return 0; +} + +static int rdmacm_mux_send(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg) +{ + int rc = 0; + + msg->hdr.msg_type = RDMACM_MUX_MSG_TYPE_REQ; + trace_rdmacm_mux("send", msg->hdr.msg_type, msg->hdr.op_code); + disable_rdmacm_mux_async(backend_dev); + rc = qemu_chr_fe_write(backend_dev->rdmacm_mux.chr_be, + (const uint8_t *)msg, sizeof(*msg)); + if (rc != sizeof(*msg)) { + enable_rdmacm_mux_async(backend_dev); + rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc); + return -EIO; + } + + rc = rdmacm_mux_check_op_status(backend_dev->rdmacm_mux.chr_be); + if (rc) { + rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)", + msg->hdr.op_code, rc); + } + + enable_rdmacm_mux_async(backend_dev); + + return 0; +} + +static void stop_backend_thread(RdmaBackendThread *thread) +{ + thread->run = false; + while (thread->is_running) { + sleep(THR_POLL_TO / SCALE_US / 2); + } +} + +static void start_comp_thread(RdmaBackendDev *backend_dev) +{ + char thread_name[THR_NAME_LEN] = {}; + + stop_backend_thread(&backend_dev->comp_thread); + + snprintf(thread_name, sizeof(thread_name), "rdma_comp_%s", + ibv_get_device_name(backend_dev->ib_dev)); + backend_dev->comp_thread.run = true; + qemu_thread_create(&backend_dev->comp_thread.thread, thread_name, + comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED); +} + +void rdma_backend_register_comp_handler(void (*handler)(void *ctx, + struct ibv_wc *wc)) +{ + comp_handler = handler; +} + +void rdma_backend_unregister_comp_handler(void) +{ + rdma_backend_register_comp_handler(dummy_comp_handler); +} + +int rdma_backend_query_port(RdmaBackendDev *backend_dev, + struct ibv_port_attr *port_attr) +{ + int rc; + + rc = ibv_query_port(backend_dev->context, backend_dev->port_num, port_attr); + if (rc) { + rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc, errno); + return -EIO; + } + + return 0; +} + +void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq) +{ + int polled; + + rdma_dev_res->stats.poll_cq_from_guest++; + polled = rdma_poll_cq(rdma_dev_res, cq->ibcq); + if (!polled) { + rdma_dev_res->stats.poll_cq_from_guest_empty++; + } +} + +static GHashTable *ah_hash; + +static struct ibv_ah *create_ah(RdmaBackendDev *backend_dev, struct ibv_pd *pd, + uint8_t sgid_idx, union ibv_gid *dgid) +{ + GBytes *ah_key = g_bytes_new(dgid, sizeof(*dgid)); + struct ibv_ah *ah = g_hash_table_lookup(ah_hash, ah_key); + + if (ah) { + trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid->global.subnet_prefix), + be64_to_cpu(dgid->global.interface_id)); + g_bytes_unref(ah_key); + } else { + struct ibv_ah_attr ah_attr = { + .is_global = 1, + .port_num = backend_dev->port_num, + .grh.hop_limit = 1, + }; + + ah_attr.grh.dgid = *dgid; + ah_attr.grh.sgid_index = sgid_idx; + + ah = ibv_create_ah(pd, &ah_attr); + if (ah) { + g_hash_table_insert(ah_hash, ah_key, ah); + } else { + g_bytes_unref(ah_key); + rdma_error_report("Failed to create AH for gid <0x%" PRIx64", 0x%"PRIx64">", + be64_to_cpu(dgid->global.subnet_prefix), + be64_to_cpu(dgid->global.interface_id)); + } + + trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid->global.subnet_prefix), + be64_to_cpu(dgid->global.interface_id)); + } + + return ah; +} + +static void destroy_ah_hash_key(gpointer data) +{ + g_bytes_unref(data); +} + +static void destroy_ah_hast_data(gpointer data) +{ + struct ibv_ah *ah = data; + + ibv_destroy_ah(ah); +} + +static void ah_cache_init(void) +{ + ah_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal, + destroy_ah_hash_key, destroy_ah_hast_data); +} + +#ifdef LEGACY_RDMA_REG_MR +static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res, + struct ibv_sge *sge, uint8_t num_sge, + uint64_t *total_length) +{ + RdmaRmMR *mr; + int idx; + + for (idx = 0; idx < num_sge; idx++) { + mr = rdma_rm_get_mr(rdma_dev_res, sge[idx].lkey); + if (unlikely(!mr)) { + rdma_error_report("Invalid lkey 0x%x", sge[idx].lkey); + return VENDOR_ERR_INVLKEY | sge[idx].lkey; + } + + sge[idx].addr = (uintptr_t)mr->virt + sge[idx].addr - mr->start; + sge[idx].lkey = rdma_backend_mr_lkey(&mr->backend_mr); + + *total_length += sge[idx].length; + } + + return 0; +} +#else +static inline int build_host_sge_array(RdmaDeviceResources *rdma_dev_res, + struct ibv_sge *sge, uint8_t num_sge, + uint64_t *total_length) +{ + int idx; + + for (idx = 0; idx < num_sge; idx++) { + *total_length += sge[idx].length; + } + return 0; +} +#endif + +static void trace_mad_message(const char *title, char *buf, int len) +{ + int i; + char *b = g_malloc0(len * 3 + 1); + char b1[4]; + + for (i = 0; i < len; i++) { + sprintf(b1, "%.2X ", buf[i] & 0x000000FF); + strcat(b, b1); + } + + trace_rdma_mad_message(title, len, b); + + g_free(b); +} + +static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx, + union ibv_gid *sgid, struct ibv_sge *sge, uint32_t num_sge) +{ + RdmaCmMuxMsg msg = {}; + char *hdr, *data; + int ret; + + if (num_sge != 2) { + return -EINVAL; + } + + msg.hdr.op_code = RDMACM_MUX_OP_CODE_MAD; + memcpy(msg.hdr.sgid.raw, sgid->raw, sizeof(msg.hdr.sgid)); + + msg.umad_len = sge[0].length + sge[1].length; + + if (msg.umad_len > sizeof(msg.umad.mad)) { + return -ENOMEM; + } + + msg.umad.hdr.addr.qpn = htobe32(1); + msg.umad.hdr.addr.grh_present = 1; + msg.umad.hdr.addr.gid_index = sgid_idx; + memcpy(msg.umad.hdr.addr.gid, sgid->raw, sizeof(msg.umad.hdr.addr.gid)); + msg.umad.hdr.addr.hop_limit = 0xFF; + + hdr = rdma_pci_dma_map(backend_dev->dev, sge[0].addr, sge[0].length); + if (!hdr) { + return -ENOMEM; + } + data = rdma_pci_dma_map(backend_dev->dev, sge[1].addr, sge[1].length); + if (!data) { + rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length); + return -ENOMEM; + } + + memcpy(&msg.umad.mad[0], hdr, sge[0].length); + memcpy(&msg.umad.mad[sge[0].length], data, sge[1].length); + + rdma_pci_dma_unmap(backend_dev->dev, data, sge[1].length); + rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length); + + trace_mad_message("send", msg.umad.mad, msg.umad_len); + + ret = rdmacm_mux_send(backend_dev, &msg); + if (ret) { + rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret); + return -EIO; + } + + return 0; +} + +void rdma_backend_post_send(RdmaBackendDev *backend_dev, + RdmaBackendQP *qp, uint8_t qp_type, + struct ibv_sge *sge, uint32_t num_sge, + uint8_t sgid_idx, union ibv_gid *sgid, + union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey, + void *ctx) +{ + BackendCtx *bctx; + uint32_t bctx_id; + int rc; + struct ibv_send_wr wr = {}, *bad_wr; + + if (!qp->ibqp) { /* This field is not initialized for QP0 and QP1 */ + if (qp_type == IBV_QPT_SMI) { + rdma_error_report("Got QP0 request"); + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx); + } else if (qp_type == IBV_QPT_GSI) { + rc = mad_send(backend_dev, sgid_idx, sgid, sge, num_sge); + if (rc) { + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx); + backend_dev->rdma_dev_res->stats.mad_tx_err++; + } else { + complete_work(IBV_WC_SUCCESS, 0, ctx); + backend_dev->rdma_dev_res->stats.mad_tx++; + } + } + return; + } + + bctx = g_malloc0(sizeof(*bctx)); + bctx->up_ctx = ctx; + bctx->backend_qp = qp; + + rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); + if (unlikely(rc)) { + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); + goto err_free_bctx; + } + + rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id); + + rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge, + &backend_dev->rdma_dev_res->stats.tx_len); + if (rc) { + complete_work(IBV_WC_GENERAL_ERR, rc, ctx); + goto err_dealloc_cqe_ctx; + } + + if (qp_type == IBV_QPT_UD) { + wr.wr.ud.ah = create_ah(backend_dev, qp->ibpd, sgid_idx, dgid); + if (!wr.wr.ud.ah) { + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); + goto err_dealloc_cqe_ctx; + } + wr.wr.ud.remote_qpn = dqpn; + wr.wr.ud.remote_qkey = dqkey; + } + + wr.num_sge = num_sge; + wr.opcode = IBV_WR_SEND; + wr.send_flags = IBV_SEND_SIGNALED; + wr.sg_list = sge; + wr.wr_id = bctx_id; + + rc = ibv_post_send(qp->ibqp, &wr, &bad_wr); + if (rc) { + rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d", + qp->ibqp->qp_num, rc, errno); + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); + goto err_dealloc_cqe_ctx; + } + + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + backend_dev->rdma_dev_res->stats.tx++; + + return; + +err_dealloc_cqe_ctx: + backend_dev->rdma_dev_res->stats.tx_err++; + rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); + +err_free_bctx: + g_free(bctx); +} + +static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev, + struct ibv_sge *sge, uint32_t num_sge, + void *ctx) +{ + BackendCtx *bctx; + int rc; + uint32_t bctx_id; + + if (num_sge != 1) { + rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge); + return VENDOR_ERR_INV_NUM_SGE; + } + + if (sge[0].length < RDMA_MAX_PRIVATE_DATA + sizeof(struct ibv_grh)) { + rdma_error_report("Too small buffer for MAD"); + return VENDOR_ERR_INV_MAD_BUFF; + } + + bctx = g_malloc0(sizeof(*bctx)); + + rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); + if (unlikely(rc)) { + g_free(bctx); + return VENDOR_ERR_NOMEM; + } + + bctx->up_ctx = ctx; + bctx->sge = *sge; + + rdma_protected_gqueue_append_int64(&backend_dev->recv_mads_list, bctx_id); + + return 0; +} + +void rdma_backend_post_recv(RdmaBackendDev *backend_dev, + RdmaBackendQP *qp, uint8_t qp_type, + struct ibv_sge *sge, uint32_t num_sge, void *ctx) +{ + BackendCtx *bctx; + uint32_t bctx_id; + int rc; + struct ibv_recv_wr wr = {}, *bad_wr; + + if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */ + if (qp_type == IBV_QPT_SMI) { + rdma_error_report("Got QP0 request"); + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx); + } + if (qp_type == IBV_QPT_GSI) { + rc = save_mad_recv_buffer(backend_dev, sge, num_sge, ctx); + if (rc) { + complete_work(IBV_WC_GENERAL_ERR, rc, ctx); + backend_dev->rdma_dev_res->stats.mad_rx_bufs_err++; + } else { + backend_dev->rdma_dev_res->stats.mad_rx_bufs++; + } + } + return; + } + + bctx = g_malloc0(sizeof(*bctx)); + bctx->up_ctx = ctx; + bctx->backend_qp = qp; + + rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); + if (unlikely(rc)) { + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); + goto err_free_bctx; + } + + rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id); + + rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge, + &backend_dev->rdma_dev_res->stats.rx_bufs_len); + if (rc) { + complete_work(IBV_WC_GENERAL_ERR, rc, ctx); + goto err_dealloc_cqe_ctx; + } + + wr.num_sge = num_sge; + wr.sg_list = sge; + wr.wr_id = bctx_id; + rc = ibv_post_recv(qp->ibqp, &wr, &bad_wr); + if (rc) { + rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d", + qp->ibqp->qp_num, rc, errno); + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); + goto err_dealloc_cqe_ctx; + } + + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + backend_dev->rdma_dev_res->stats.rx_bufs++; + + return; + +err_dealloc_cqe_ctx: + backend_dev->rdma_dev_res->stats.rx_bufs_err++; + rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); + +err_free_bctx: + g_free(bctx); +} + +void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev, + RdmaBackendSRQ *srq, struct ibv_sge *sge, + uint32_t num_sge, void *ctx) +{ + BackendCtx *bctx; + uint32_t bctx_id; + int rc; + struct ibv_recv_wr wr = {}, *bad_wr; + + bctx = g_malloc0(sizeof(*bctx)); + bctx->up_ctx = ctx; + bctx->backend_srq = srq; + + rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); + if (unlikely(rc)) { + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); + goto err_free_bctx; + } + + rdma_protected_gslist_append_int32(&srq->cqe_ctx_list, bctx_id); + + rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge, + &backend_dev->rdma_dev_res->stats.rx_bufs_len); + if (rc) { + complete_work(IBV_WC_GENERAL_ERR, rc, ctx); + goto err_dealloc_cqe_ctx; + } + + wr.num_sge = num_sge; + wr.sg_list = sge; + wr.wr_id = bctx_id; + rc = ibv_post_srq_recv(srq->ibsrq, &wr, &bad_wr); + if (rc) { + rdma_error_report("ibv_post_srq_recv fail, srqn=0x%x, rc=%d, errno=%d", + srq->ibsrq->handle, rc, errno); + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); + goto err_dealloc_cqe_ctx; + } + + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); + backend_dev->rdma_dev_res->stats.rx_bufs++; + backend_dev->rdma_dev_res->stats.rx_srq++; + + return; + +err_dealloc_cqe_ctx: + backend_dev->rdma_dev_res->stats.rx_bufs_err++; + rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); + +err_free_bctx: + g_free(bctx); +} + +int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd) +{ + pd->ibpd = ibv_alloc_pd(backend_dev->context); + + if (!pd->ibpd) { + rdma_error_report("ibv_alloc_pd fail, errno=%d", errno); + return -EIO; + } + + return 0; +} + +void rdma_backend_destroy_pd(RdmaBackendPD *pd) +{ + if (pd->ibpd) { + ibv_dealloc_pd(pd->ibpd); + } +} + +int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr, + size_t length, uint64_t guest_start, int access) +{ +#ifdef LEGACY_RDMA_REG_MR + mr->ibmr = ibv_reg_mr(pd->ibpd, addr, length, access); +#else + mr->ibmr = ibv_reg_mr_iova(pd->ibpd, addr, length, guest_start, access); +#endif + if (!mr->ibmr) { + rdma_error_report("ibv_reg_mr fail, errno=%d", errno); + return -EIO; + } + + mr->ibpd = pd->ibpd; + + return 0; +} + +void rdma_backend_destroy_mr(RdmaBackendMR *mr) +{ + if (mr->ibmr) { + ibv_dereg_mr(mr->ibmr); + } +} + +int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq, + int cqe) +{ + int rc; + + cq->ibcq = ibv_create_cq(backend_dev->context, cqe + 1, NULL, + backend_dev->channel, 0); + if (!cq->ibcq) { + rdma_error_report("ibv_create_cq fail, errno=%d", errno); + return -EIO; + } + + rc = ibv_req_notify_cq(cq->ibcq, 0); + if (rc) { + rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno); + } + + cq->backend_dev = backend_dev; + + return 0; +} + +void rdma_backend_destroy_cq(RdmaBackendCQ *cq) +{ + if (cq->ibcq) { + ibv_destroy_cq(cq->ibcq); + } +} + +int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type, + RdmaBackendPD *pd, RdmaBackendCQ *scq, + RdmaBackendCQ *rcq, RdmaBackendSRQ *srq, + uint32_t max_send_wr, uint32_t max_recv_wr, + uint32_t max_send_sge, uint32_t max_recv_sge) +{ + struct ibv_qp_init_attr attr = {}; + + qp->ibqp = 0; + + switch (qp_type) { + case IBV_QPT_GSI: + return 0; + + case IBV_QPT_RC: + /* fall through */ + case IBV_QPT_UD: + /* do nothing */ + break; + + default: + rdma_error_report("Unsupported QP type %d", qp_type); + return -EIO; + } + + attr.qp_type = qp_type; + attr.send_cq = scq->ibcq; + attr.recv_cq = rcq->ibcq; + attr.cap.max_send_wr = max_send_wr; + attr.cap.max_recv_wr = max_recv_wr; + attr.cap.max_send_sge = max_send_sge; + attr.cap.max_recv_sge = max_recv_sge; + if (srq) { + attr.srq = srq->ibsrq; + } + + qp->ibqp = ibv_create_qp(pd->ibpd, &attr); + if (!qp->ibqp) { + rdma_error_report("ibv_create_qp fail, errno=%d", errno); + return -EIO; + } + + rdma_protected_gslist_init(&qp->cqe_ctx_list); + + qp->ibpd = pd->ibpd; + + /* TODO: Query QP to get max_inline_data and save it to be used in send */ + + return 0; +} + +int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, + uint8_t qp_type, uint32_t qkey) +{ + struct ibv_qp_attr attr = {}; + int rc, attr_mask; + + attr_mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT; + attr.qp_state = IBV_QPS_INIT; + attr.pkey_index = 0; + attr.port_num = backend_dev->port_num; + + switch (qp_type) { + case IBV_QPT_RC: + attr_mask |= IBV_QP_ACCESS_FLAGS; + trace_rdma_backend_rc_qp_state_init(qp->ibqp->qp_num); + break; + + case IBV_QPT_UD: + attr.qkey = qkey; + attr_mask |= IBV_QP_QKEY; + trace_rdma_backend_ud_qp_state_init(qp->ibqp->qp_num, qkey); + break; + + default: + rdma_error_report("Unsupported QP type %d", qp_type); + return -EIO; + } + + rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); + if (rc) { + rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); + return -EIO; + } + + return 0; +} + +int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, + uint8_t qp_type, uint8_t sgid_idx, + union ibv_gid *dgid, uint32_t dqpn, + uint32_t rq_psn, uint32_t qkey, bool use_qkey) +{ + struct ibv_qp_attr attr = {}; + union ibv_gid ibv_gid = { + .global.interface_id = dgid->global.interface_id, + .global.subnet_prefix = dgid->global.subnet_prefix + }; + int rc, attr_mask; + + attr.qp_state = IBV_QPS_RTR; + attr_mask = IBV_QP_STATE; + + qp->sgid_idx = sgid_idx; + + switch (qp_type) { + case IBV_QPT_RC: + attr.path_mtu = IBV_MTU_1024; + attr.dest_qp_num = dqpn; + attr.max_dest_rd_atomic = 1; + attr.min_rnr_timer = 12; + attr.ah_attr.port_num = backend_dev->port_num; + attr.ah_attr.is_global = 1; + attr.ah_attr.grh.hop_limit = 1; + attr.ah_attr.grh.dgid = ibv_gid; + attr.ah_attr.grh.sgid_index = qp->sgid_idx; + attr.rq_psn = rq_psn; + + attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | + IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | + IBV_QP_MIN_RNR_TIMER; + + trace_rdma_backend_rc_qp_state_rtr(qp->ibqp->qp_num, + be64_to_cpu(ibv_gid.global. + subnet_prefix), + be64_to_cpu(ibv_gid.global. + interface_id), + qp->sgid_idx, dqpn, rq_psn); + break; + + case IBV_QPT_UD: + if (use_qkey) { + attr.qkey = qkey; + attr_mask |= IBV_QP_QKEY; + } + trace_rdma_backend_ud_qp_state_rtr(qp->ibqp->qp_num, use_qkey ? qkey : + 0); + break; + } + + rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); + if (rc) { + rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); + return -EIO; + } + + return 0; +} + +int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type, + uint32_t sq_psn, uint32_t qkey, bool use_qkey) +{ + struct ibv_qp_attr attr = {}; + int rc, attr_mask; + + attr.qp_state = IBV_QPS_RTS; + attr.sq_psn = sq_psn; + attr_mask = IBV_QP_STATE | IBV_QP_SQ_PSN; + + switch (qp_type) { + case IBV_QPT_RC: + attr.timeout = 14; + attr.retry_cnt = 7; + attr.rnr_retry = 7; + attr.max_rd_atomic = 1; + + attr_mask |= IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | + IBV_QP_MAX_QP_RD_ATOMIC; + trace_rdma_backend_rc_qp_state_rts(qp->ibqp->qp_num, sq_psn); + break; + + case IBV_QPT_UD: + if (use_qkey) { + attr.qkey = qkey; + attr_mask |= IBV_QP_QKEY; + } + trace_rdma_backend_ud_qp_state_rts(qp->ibqp->qp_num, sq_psn, + use_qkey ? qkey : 0); + break; + } + + rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); + if (rc) { + rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); + return -EIO; + } + + return 0; +} + +int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr, + int attr_mask, struct ibv_qp_init_attr *init_attr) +{ + if (!qp->ibqp) { + attr->qp_state = IBV_QPS_RTS; + return 0; + } + + return ibv_query_qp(qp->ibqp, attr, attr_mask, init_attr); +} + +void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res) +{ + if (qp->ibqp) { + ibv_destroy_qp(qp->ibqp); + } + g_slist_foreach(qp->cqe_ctx_list.list, free_cqe_ctx, dev_res); + rdma_protected_gslist_destroy(&qp->cqe_ctx_list); +} + +int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd, + uint32_t max_wr, uint32_t max_sge, + uint32_t srq_limit) +{ + struct ibv_srq_init_attr srq_init_attr = {}; + + srq_init_attr.attr.max_wr = max_wr; + srq_init_attr.attr.max_sge = max_sge; + srq_init_attr.attr.srq_limit = srq_limit; + + srq->ibsrq = ibv_create_srq(pd->ibpd, &srq_init_attr); + if (!srq->ibsrq) { + rdma_error_report("ibv_create_srq failed, errno=%d", errno); + return -EIO; + } + + rdma_protected_gslist_init(&srq->cqe_ctx_list); + + return 0; +} + +int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr) +{ + if (!srq->ibsrq) { + return -EINVAL; + } + + return ibv_query_srq(srq->ibsrq, srq_attr); +} + +int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr, + int srq_attr_mask) +{ + if (!srq->ibsrq) { + return -EINVAL; + } + + return ibv_modify_srq(srq->ibsrq, srq_attr, srq_attr_mask); +} + +void rdma_backend_destroy_srq(RdmaBackendSRQ *srq, RdmaDeviceResources *dev_res) +{ + if (srq->ibsrq) { + ibv_destroy_srq(srq->ibsrq); + } + g_slist_foreach(srq->cqe_ctx_list.list, free_cqe_ctx, dev_res); + rdma_protected_gslist_destroy(&srq->cqe_ctx_list); +} + +#define CHK_ATTR(req, dev, member, fmt) ({ \ + trace_rdma_check_dev_attr(#member, dev.member, req->member); \ + if (req->member > dev.member) { \ + rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \ + #member, req->member, dev.member); \ + req->member = dev.member; \ + } \ +}) + +static int init_device_caps(RdmaBackendDev *backend_dev, + struct ibv_device_attr *dev_attr) +{ + struct ibv_device_attr bk_dev_attr; + int rc; + + rc = ibv_query_device(backend_dev->context, &bk_dev_attr); + if (rc) { + rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc, errno); + return -EIO; + } + + dev_attr->max_sge = MAX_SGE; + dev_attr->max_srq_sge = MAX_SGE; + + CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64); + CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d"); + CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d"); + CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d"); + CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d"); + CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d"); + CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d"); + CHK_ATTR(dev_attr, bk_dev_attr, max_qp_init_rd_atom, "%d"); + CHK_ATTR(dev_attr, bk_dev_attr, max_ah, "%d"); + CHK_ATTR(dev_attr, bk_dev_attr, max_srq, "%d"); + + return 0; +} + +static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid, + union ibv_gid *my_gid, int paylen) +{ + grh->paylen = htons(paylen); + grh->sgid = *sgid; + grh->dgid = *my_gid; +} + +static void process_incoming_mad_req(RdmaBackendDev *backend_dev, + RdmaCmMuxMsg *msg) +{ + unsigned long cqe_ctx_id; + BackendCtx *bctx; + char *mad; + + trace_mad_message("recv", msg->umad.mad, msg->umad_len); + + cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev->recv_mads_list); + if (cqe_ctx_id == -ENOENT) { + rdma_warn_report("No more free MADs buffers, waiting for a while"); + sleep(THR_POLL_TO); + return; + } + + bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id); + if (unlikely(!bctx)) { + rdma_error_report("No matching ctx for req %ld", cqe_ctx_id); + backend_dev->rdma_dev_res->stats.mad_rx_err++; + return; + } + + mad = rdma_pci_dma_map(backend_dev->dev, bctx->sge.addr, + bctx->sge.length); + if (!mad || bctx->sge.length < msg->umad_len + MAD_HDR_SIZE) { + backend_dev->rdma_dev_res->stats.mad_rx_err++; + complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_INV_MAD_BUFF, + bctx->up_ctx); + } else { + struct ibv_wc wc = {}; + memset(mad, 0, bctx->sge.length); + build_mad_hdr((struct ibv_grh *)mad, + (union ibv_gid *)&msg->umad.hdr.addr.gid, &msg->hdr.sgid, + msg->umad_len); + memcpy(&mad[MAD_HDR_SIZE], msg->umad.mad, msg->umad_len); + rdma_pci_dma_unmap(backend_dev->dev, mad, bctx->sge.length); + + wc.byte_len = msg->umad_len; + wc.status = IBV_WC_SUCCESS; + wc.wc_flags = IBV_WC_GRH; + backend_dev->rdma_dev_res->stats.mad_rx++; + comp_handler(bctx->up_ctx, &wc); + } + + g_free(bctx); + rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id); +} + +static inline int rdmacm_mux_can_receive(void *opaque) +{ + RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque; + + return rdmacm_mux_can_process_async(backend_dev); +} + +static void rdmacm_mux_read(void *opaque, const uint8_t *buf, int size) +{ + RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque; + RdmaCmMuxMsg *msg = (RdmaCmMuxMsg *)buf; + + trace_rdmacm_mux("read", msg->hdr.msg_type, msg->hdr.op_code); + + if (msg->hdr.msg_type != RDMACM_MUX_MSG_TYPE_REQ && + msg->hdr.op_code != RDMACM_MUX_OP_CODE_MAD) { + rdma_error_report("Error: Not a MAD request, skipping"); + return; + } + process_incoming_mad_req(backend_dev, msg); +} + +static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be) +{ + int ret; + + backend_dev->rdmacm_mux.chr_be = mad_chr_be; + + ret = qemu_chr_fe_backend_connected(backend_dev->rdmacm_mux.chr_be); + if (!ret) { + rdma_error_report("Missing chardev for MAD multiplexer"); + return -EIO; + } + + rdma_protected_gqueue_init(&backend_dev->recv_mads_list); + + enable_rdmacm_mux_async(backend_dev); + + qemu_chr_fe_set_handlers(backend_dev->rdmacm_mux.chr_be, + rdmacm_mux_can_receive, rdmacm_mux_read, NULL, + NULL, backend_dev, NULL, true); + + return 0; +} + +static void mad_stop(RdmaBackendDev *backend_dev) +{ + clean_recv_mads(backend_dev); +} + +static void mad_fini(RdmaBackendDev *backend_dev) +{ + disable_rdmacm_mux_async(backend_dev); + qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be); + rdma_protected_gqueue_destroy(&backend_dev->recv_mads_list); +} + +int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev, + union ibv_gid *gid) +{ + union ibv_gid sgid; + int ret; + int i = 0; + + do { + ret = ibv_query_gid(backend_dev->context, backend_dev->port_num, i, + &sgid); + i++; + } while (!ret && (memcmp(&sgid, gid, sizeof(*gid)))); + + trace_rdma_backend_get_gid_index(be64_to_cpu(gid->global.subnet_prefix), + be64_to_cpu(gid->global.interface_id), + i - 1); + + return ret ? ret : i - 1; +} + +int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname, + union ibv_gid *gid) +{ + RdmaCmMuxMsg msg = {}; + int ret; + + trace_rdma_backend_gid_change("add", be64_to_cpu(gid->global.subnet_prefix), + be64_to_cpu(gid->global.interface_id)); + + msg.hdr.op_code = RDMACM_MUX_OP_CODE_REG; + memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid)); + + ret = rdmacm_mux_send(backend_dev, &msg); + if (ret) { + rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret); + return -EIO; + } + + qapi_event_send_rdma_gid_status_changed(ifname, true, + gid->global.subnet_prefix, + gid->global.interface_id); + + return ret; +} + +int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname, + union ibv_gid *gid) +{ + RdmaCmMuxMsg msg = {}; + int ret; + + trace_rdma_backend_gid_change("del", be64_to_cpu(gid->global.subnet_prefix), + be64_to_cpu(gid->global.interface_id)); + + msg.hdr.op_code = RDMACM_MUX_OP_CODE_UNREG; + memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid)); + + ret = rdmacm_mux_send(backend_dev, &msg); + if (ret) { + rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)", + ret); + return -EIO; + } + + qapi_event_send_rdma_gid_status_changed(ifname, false, + gid->global.subnet_prefix, + gid->global.interface_id); + + return 0; +} + +int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, + RdmaDeviceResources *rdma_dev_res, + const char *backend_device_name, uint8_t port_num, + struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be) +{ + int i; + int ret = 0; + int num_ibv_devices; + struct ibv_device **dev_list; + + memset(backend_dev, 0, sizeof(*backend_dev)); + + backend_dev->dev = pdev; + backend_dev->port_num = port_num; + backend_dev->rdma_dev_res = rdma_dev_res; + + rdma_backend_register_comp_handler(dummy_comp_handler); + + dev_list = ibv_get_device_list(&num_ibv_devices); + if (!dev_list) { + rdma_error_report("Failed to get IB devices list"); + return -EIO; + } + + if (num_ibv_devices == 0) { + rdma_error_report("No IB devices were found"); + ret = -ENXIO; + goto out_free_dev_list; + } + + if (backend_device_name) { + for (i = 0; dev_list[i]; ++i) { + if (!strcmp(ibv_get_device_name(dev_list[i]), + backend_device_name)) { + break; + } + } + + backend_dev->ib_dev = dev_list[i]; + if (!backend_dev->ib_dev) { + rdma_error_report("Failed to find IB device %s", + backend_device_name); + ret = -EIO; + goto out_free_dev_list; + } + } else { + backend_dev->ib_dev = *dev_list; + } + + rdma_info_report("uverb device %s", backend_dev->ib_dev->dev_name); + + backend_dev->context = ibv_open_device(backend_dev->ib_dev); + if (!backend_dev->context) { + rdma_error_report("Failed to open IB device %s", + ibv_get_device_name(backend_dev->ib_dev)); + ret = -EIO; + goto out; + } + + backend_dev->channel = ibv_create_comp_channel(backend_dev->context); + if (!backend_dev->channel) { + rdma_error_report("Failed to create IB communication channel"); + ret = -EIO; + goto out_close_device; + } + + ret = init_device_caps(backend_dev, dev_attr); + if (ret) { + rdma_error_report("Failed to initialize device capabilities"); + ret = -EIO; + goto out_destroy_comm_channel; + } + + + ret = mad_init(backend_dev, mad_chr_be); + if (ret) { + rdma_error_report("Failed to initialize mad"); + ret = -EIO; + goto out_destroy_comm_channel; + } + + backend_dev->comp_thread.run = false; + backend_dev->comp_thread.is_running = false; + + ah_cache_init(); + + goto out_free_dev_list; + +out_destroy_comm_channel: + ibv_destroy_comp_channel(backend_dev->channel); + +out_close_device: + ibv_close_device(backend_dev->context); + +out_free_dev_list: + ibv_free_device_list(dev_list); + +out: + return ret; +} + + +void rdma_backend_start(RdmaBackendDev *backend_dev) +{ + start_comp_thread(backend_dev); +} + +void rdma_backend_stop(RdmaBackendDev *backend_dev) +{ + mad_stop(backend_dev); + stop_backend_thread(&backend_dev->comp_thread); +} + +void rdma_backend_fini(RdmaBackendDev *backend_dev) +{ + mad_fini(backend_dev); + g_hash_table_destroy(ah_hash); + ibv_destroy_comp_channel(backend_dev->channel); + ibv_close_device(backend_dev->context); +} diff --git a/hw/rdma/rdma_backend.h b/hw/rdma/rdma_backend.h new file mode 100644 index 000000000..225af481e --- /dev/null +++ b/hw/rdma/rdma_backend.h @@ -0,0 +1,129 @@ +/* + * RDMA device: Definitions of Backend Device functions + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef RDMA_BACKEND_H +#define RDMA_BACKEND_H + +#include "qapi/error.h" +#include "chardev/char-fe.h" + +#include "rdma_rm_defs.h" +#include "rdma_backend_defs.h" + +/* Vendor Errors */ +#define VENDOR_ERR_FAIL_BACKEND 0x201 +#define VENDOR_ERR_TOO_MANY_SGES 0x202 +#define VENDOR_ERR_NOMEM 0x203 +#define VENDOR_ERR_QP0 0x204 +#define VENDOR_ERR_INV_NUM_SGE 0x205 +#define VENDOR_ERR_MAD_SEND 0x206 +#define VENDOR_ERR_INVLKEY 0x207 +#define VENDOR_ERR_MR_SMALL 0x208 +#define VENDOR_ERR_INV_MAD_BUFF 0x209 +#define VENDOR_ERR_INV_GID_IDX 0x210 + +/* Add definition for QP0 and QP1 as there is no userspace enums for them */ +enum ibv_special_qp_type { + IBV_QPT_SMI = 0, + IBV_QPT_GSI = 1, +}; + +static inline uint32_t rdma_backend_qpn(const RdmaBackendQP *qp) +{ + return qp->ibqp ? qp->ibqp->qp_num : 1; +} + +static inline uint32_t rdma_backend_mr_lkey(const RdmaBackendMR *mr) +{ + return mr->ibmr ? mr->ibmr->lkey : 0; +} + +static inline uint32_t rdma_backend_mr_rkey(const RdmaBackendMR *mr) +{ + return mr->ibmr ? mr->ibmr->rkey : 0; +} + +int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, + RdmaDeviceResources *rdma_dev_res, + const char *backend_device_name, uint8_t port_num, + struct ibv_device_attr *dev_attr, + CharBackend *mad_chr_be); +void rdma_backend_fini(RdmaBackendDev *backend_dev); +int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname, + union ibv_gid *gid); +int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname, + union ibv_gid *gid); +int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev, + union ibv_gid *gid); +void rdma_backend_start(RdmaBackendDev *backend_dev); +void rdma_backend_stop(RdmaBackendDev *backend_dev); +void rdma_backend_register_comp_handler(void (*handler)(void *ctx, + struct ibv_wc *wc)); +void rdma_backend_unregister_comp_handler(void); + +int rdma_backend_query_port(RdmaBackendDev *backend_dev, + struct ibv_port_attr *port_attr); +int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd); +void rdma_backend_destroy_pd(RdmaBackendPD *pd); + +int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr, + size_t length, uint64_t guest_start, int access); +void rdma_backend_destroy_mr(RdmaBackendMR *mr); + +int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq, + int cqe); +void rdma_backend_destroy_cq(RdmaBackendCQ *cq); +void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq); + +int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type, + RdmaBackendPD *pd, RdmaBackendCQ *scq, + RdmaBackendCQ *rcq, RdmaBackendSRQ *srq, + uint32_t max_send_wr, uint32_t max_recv_wr, + uint32_t max_send_sge, uint32_t max_recv_sge); +int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, + uint8_t qp_type, uint32_t qkey); +int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, + uint8_t qp_type, uint8_t sgid_idx, + union ibv_gid *dgid, uint32_t dqpn, + uint32_t rq_psn, uint32_t qkey, bool use_qkey); +int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type, + uint32_t sq_psn, uint32_t qkey, bool use_qkey); +int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr, + int attr_mask, struct ibv_qp_init_attr *init_attr); +void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res); + +void rdma_backend_post_send(RdmaBackendDev *backend_dev, + RdmaBackendQP *qp, uint8_t qp_type, + struct ibv_sge *sge, uint32_t num_sge, + uint8_t sgid_idx, union ibv_gid *sgid, + union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey, + void *ctx); +void rdma_backend_post_recv(RdmaBackendDev *backend_dev, + RdmaBackendQP *qp, uint8_t qp_type, + struct ibv_sge *sge, uint32_t num_sge, void *ctx); + +int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd, + uint32_t max_wr, uint32_t max_sge, + uint32_t srq_limit); +int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr); +int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr, + int srq_attr_mask); +void rdma_backend_destroy_srq(RdmaBackendSRQ *srq, + RdmaDeviceResources *dev_res); +void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev, + RdmaBackendSRQ *srq, struct ibv_sge *sge, + uint32_t num_sge, void *ctx); + +#endif diff --git a/hw/rdma/rdma_backend_defs.h b/hw/rdma/rdma_backend_defs.h new file mode 100644 index 000000000..4e6c0ad69 --- /dev/null +++ b/hw/rdma/rdma_backend_defs.h @@ -0,0 +1,76 @@ +/* + * RDMA device: Definitions of Backend Device structures + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef RDMA_BACKEND_DEFS_H +#define RDMA_BACKEND_DEFS_H + +#include "qemu/thread.h" +#include "chardev/char-fe.h" +#include +#include "contrib/rdmacm-mux/rdmacm-mux.h" +#include "rdma_utils.h" + +typedef struct RdmaDeviceResources RdmaDeviceResources; + +typedef struct RdmaBackendThread { + QemuThread thread; + bool run; /* Set by thread manager to let thread know it should exit */ + bool is_running; /* Set by the thread to report its status */ +} RdmaBackendThread; + +typedef struct RdmaCmMux { + CharBackend *chr_be; + int can_receive; +} RdmaCmMux; + +typedef struct RdmaBackendDev { + RdmaBackendThread comp_thread; + PCIDevice *dev; + RdmaDeviceResources *rdma_dev_res; + struct ibv_device *ib_dev; + struct ibv_context *context; + struct ibv_comp_channel *channel; + uint8_t port_num; + RdmaProtectedGQueue recv_mads_list; + RdmaCmMux rdmacm_mux; +} RdmaBackendDev; + +typedef struct RdmaBackendPD { + struct ibv_pd *ibpd; +} RdmaBackendPD; + +typedef struct RdmaBackendMR { + struct ibv_pd *ibpd; + struct ibv_mr *ibmr; +} RdmaBackendMR; + +typedef struct RdmaBackendCQ { + RdmaBackendDev *backend_dev; + struct ibv_cq *ibcq; +} RdmaBackendCQ; + +typedef struct RdmaBackendQP { + struct ibv_pd *ibpd; + struct ibv_qp *ibqp; + uint8_t sgid_idx; + RdmaProtectedGSList cqe_ctx_list; +} RdmaBackendQP; + +typedef struct RdmaBackendSRQ { + struct ibv_srq *ibsrq; + RdmaProtectedGSList cqe_ctx_list; +} RdmaBackendSRQ; + +#endif diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c new file mode 100644 index 000000000..cfd85de3e --- /dev/null +++ b/hw/rdma/rdma_rm.c @@ -0,0 +1,816 @@ +/* + * QEMU paravirtual RDMA - Resource Manager Implementation + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "monitor/monitor.h" + +#include "trace.h" +#include "rdma_utils.h" +#include "rdma_backend.h" +#include "rdma_rm.h" + +/* Page directory and page tables */ +#define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } +#define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } + +void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf) +{ + g_string_append_printf(buf, "\ttx : %" PRId64 "\n", + dev_res->stats.tx); + g_string_append_printf(buf, "\ttx_len : %" PRId64 "\n", + dev_res->stats.tx_len); + g_string_append_printf(buf, "\ttx_err : %" PRId64 "\n", + dev_res->stats.tx_err); + g_string_append_printf(buf, "\trx_bufs : %" PRId64 "\n", + dev_res->stats.rx_bufs); + g_string_append_printf(buf, "\trx_srq : %" PRId64 "\n", + dev_res->stats.rx_srq); + g_string_append_printf(buf, "\trx_bufs_len : %" PRId64 "\n", + dev_res->stats.rx_bufs_len); + g_string_append_printf(buf, "\trx_bufs_err : %" PRId64 "\n", + dev_res->stats.rx_bufs_err); + g_string_append_printf(buf, "\tcomps : %" PRId64 "\n", + dev_res->stats.completions); + g_string_append_printf(buf, "\tmissing_comps : %" PRId32 "\n", + dev_res->stats.missing_cqe); + g_string_append_printf(buf, "\tpoll_cq (bk) : %" PRId64 "\n", + dev_res->stats.poll_cq_from_bk); + g_string_append_printf(buf, "\tpoll_cq_ppoll_to : %" PRId64 "\n", + dev_res->stats.poll_cq_ppoll_to); + g_string_append_printf(buf, "\tpoll_cq (fe) : %" PRId64 "\n", + dev_res->stats.poll_cq_from_guest); + g_string_append_printf(buf, "\tpoll_cq_empty : %" PRId64 "\n", + dev_res->stats.poll_cq_from_guest_empty); + g_string_append_printf(buf, "\tmad_tx : %" PRId64 "\n", + dev_res->stats.mad_tx); + g_string_append_printf(buf, "\tmad_tx_err : %" PRId64 "\n", + dev_res->stats.mad_tx_err); + g_string_append_printf(buf, "\tmad_rx : %" PRId64 "\n", + dev_res->stats.mad_rx); + g_string_append_printf(buf, "\tmad_rx_err : %" PRId64 "\n", + dev_res->stats.mad_rx_err); + g_string_append_printf(buf, "\tmad_rx_bufs : %" PRId64 "\n", + dev_res->stats.mad_rx_bufs); + g_string_append_printf(buf, "\tmad_rx_bufs_err : %" PRId64 "\n", + dev_res->stats.mad_rx_bufs_err); + g_string_append_printf(buf, "\tPDs : %" PRId32 "\n", + dev_res->pd_tbl.used); + g_string_append_printf(buf, "\tMRs : %" PRId32 "\n", + dev_res->mr_tbl.used); + g_string_append_printf(buf, "\tUCs : %" PRId32 "\n", + dev_res->uc_tbl.used); + g_string_append_printf(buf, "\tQPs : %" PRId32 "\n", + dev_res->qp_tbl.used); + g_string_append_printf(buf, "\tCQs : %" PRId32 "\n", + dev_res->cq_tbl.used); + g_string_append_printf(buf, "\tCEQ_CTXs : %" PRId32 "\n", + dev_res->cqe_ctx_tbl.used); +} + +static inline void res_tbl_init(const char *name, RdmaRmResTbl *tbl, + uint32_t tbl_sz, uint32_t res_sz) +{ + tbl->tbl = g_malloc(tbl_sz * res_sz); + + strncpy(tbl->name, name, MAX_RM_TBL_NAME); + tbl->name[MAX_RM_TBL_NAME - 1] = 0; + + tbl->bitmap = bitmap_new(tbl_sz); + tbl->tbl_sz = tbl_sz; + tbl->res_sz = res_sz; + tbl->used = 0; + qemu_mutex_init(&tbl->lock); +} + +static inline void res_tbl_free(RdmaRmResTbl *tbl) +{ + if (!tbl->bitmap) { + return; + } + qemu_mutex_destroy(&tbl->lock); + g_free(tbl->tbl); + g_free(tbl->bitmap); +} + +static inline void *rdma_res_tbl_get(RdmaRmResTbl *tbl, uint32_t handle) +{ + trace_rdma_res_tbl_get(tbl->name, handle); + + if ((handle < tbl->tbl_sz) && (test_bit(handle, tbl->bitmap))) { + return tbl->tbl + handle * tbl->res_sz; + } else { + rdma_error_report("Table %s, invalid handle %d", tbl->name, handle); + return NULL; + } +} + +static inline void *rdma_res_tbl_alloc(RdmaRmResTbl *tbl, uint32_t *handle) +{ + qemu_mutex_lock(&tbl->lock); + + *handle = find_first_zero_bit(tbl->bitmap, tbl->tbl_sz); + if (*handle > tbl->tbl_sz) { + rdma_error_report("Table %s, failed to allocate, bitmap is full", + tbl->name); + qemu_mutex_unlock(&tbl->lock); + return NULL; + } + + set_bit(*handle, tbl->bitmap); + + tbl->used++; + + qemu_mutex_unlock(&tbl->lock); + + memset(tbl->tbl + *handle * tbl->res_sz, 0, tbl->res_sz); + + trace_rdma_res_tbl_alloc(tbl->name, *handle); + + return tbl->tbl + *handle * tbl->res_sz; +} + +static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle) +{ + trace_rdma_res_tbl_dealloc(tbl->name, handle); + + QEMU_LOCK_GUARD(&tbl->lock); + + if (handle < tbl->tbl_sz) { + clear_bit(handle, tbl->bitmap); + tbl->used--; + } + +} + +int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + uint32_t *pd_handle, uint32_t ctx_handle) +{ + RdmaRmPD *pd; + int ret = -ENOMEM; + + pd = rdma_res_tbl_alloc(&dev_res->pd_tbl, pd_handle); + if (!pd) { + goto out; + } + + ret = rdma_backend_create_pd(backend_dev, &pd->backend_pd); + if (ret) { + ret = -EIO; + goto out_tbl_dealloc; + } + + pd->ctx_handle = ctx_handle; + + return 0; + +out_tbl_dealloc: + rdma_res_tbl_dealloc(&dev_res->pd_tbl, *pd_handle); + +out: + return ret; +} + +RdmaRmPD *rdma_rm_get_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle) +{ + return rdma_res_tbl_get(&dev_res->pd_tbl, pd_handle); +} + +void rdma_rm_dealloc_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle) +{ + RdmaRmPD *pd = rdma_rm_get_pd(dev_res, pd_handle); + + if (pd) { + rdma_backend_destroy_pd(&pd->backend_pd); + rdma_res_tbl_dealloc(&dev_res->pd_tbl, pd_handle); + } +} + +int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, + uint64_t guest_start, uint64_t guest_length, + void *host_virt, int access_flags, uint32_t *mr_handle, + uint32_t *lkey, uint32_t *rkey) +{ + RdmaRmMR *mr; + int ret = 0; + RdmaRmPD *pd; + + pd = rdma_rm_get_pd(dev_res, pd_handle); + if (!pd) { + return -EINVAL; + } + + mr = rdma_res_tbl_alloc(&dev_res->mr_tbl, mr_handle); + if (!mr) { + return -ENOMEM; + } + trace_rdma_rm_alloc_mr(*mr_handle, host_virt, guest_start, guest_length, + access_flags); + + if (host_virt) { + mr->virt = host_virt; + mr->start = guest_start; + mr->length = guest_length; + mr->virt += (mr->start & (TARGET_PAGE_SIZE - 1)); + + ret = rdma_backend_create_mr(&mr->backend_mr, &pd->backend_pd, mr->virt, + mr->length, guest_start, access_flags); + if (ret) { + ret = -EIO; + goto out_dealloc_mr; + } +#ifdef LEGACY_RDMA_REG_MR + /* We keep mr_handle in lkey so send and recv get get mr ptr */ + *lkey = *mr_handle; +#else + *lkey = rdma_backend_mr_lkey(&mr->backend_mr); +#endif + } + + *rkey = -1; + + mr->pd_handle = pd_handle; + + return 0; + +out_dealloc_mr: + rdma_res_tbl_dealloc(&dev_res->mr_tbl, *mr_handle); + + return ret; +} + +RdmaRmMR *rdma_rm_get_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle) +{ + return rdma_res_tbl_get(&dev_res->mr_tbl, mr_handle); +} + +void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle) +{ + RdmaRmMR *mr = rdma_rm_get_mr(dev_res, mr_handle); + + if (mr) { + rdma_backend_destroy_mr(&mr->backend_mr); + trace_rdma_rm_dealloc_mr(mr_handle, mr->start); + if (mr->start) { + mr->virt -= (mr->start & (TARGET_PAGE_SIZE - 1)); + munmap(mr->virt, mr->length); + } + rdma_res_tbl_dealloc(&dev_res->mr_tbl, mr_handle); + } +} + +int rdma_rm_alloc_uc(RdmaDeviceResources *dev_res, uint32_t pfn, + uint32_t *uc_handle) +{ + RdmaRmUC *uc; + + /* TODO: Need to make sure pfn is between bar start address and + * bsd+RDMA_BAR2_UAR_SIZE + if (pfn > RDMA_BAR2_UAR_SIZE) { + rdma_error_report("pfn out of range (%d > %d)", pfn, + RDMA_BAR2_UAR_SIZE); + return -ENOMEM; + } + */ + + uc = rdma_res_tbl_alloc(&dev_res->uc_tbl, uc_handle); + if (!uc) { + return -ENOMEM; + } + + return 0; +} + +RdmaRmUC *rdma_rm_get_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle) +{ + return rdma_res_tbl_get(&dev_res->uc_tbl, uc_handle); +} + +void rdma_rm_dealloc_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle) +{ + RdmaRmUC *uc = rdma_rm_get_uc(dev_res, uc_handle); + + if (uc) { + rdma_res_tbl_dealloc(&dev_res->uc_tbl, uc_handle); + } +} + +RdmaRmCQ *rdma_rm_get_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle) +{ + return rdma_res_tbl_get(&dev_res->cq_tbl, cq_handle); +} + +int rdma_rm_alloc_cq(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + uint32_t cqe, uint32_t *cq_handle, void *opaque) +{ + int rc; + RdmaRmCQ *cq; + + cq = rdma_res_tbl_alloc(&dev_res->cq_tbl, cq_handle); + if (!cq) { + return -ENOMEM; + } + + cq->opaque = opaque; + cq->notify = CNT_CLEAR; + + rc = rdma_backend_create_cq(backend_dev, &cq->backend_cq, cqe); + if (rc) { + rc = -EIO; + goto out_dealloc_cq; + } + + return 0; + +out_dealloc_cq: + rdma_rm_dealloc_cq(dev_res, *cq_handle); + + return rc; +} + +void rdma_rm_req_notify_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle, + bool notify) +{ + RdmaRmCQ *cq; + + cq = rdma_rm_get_cq(dev_res, cq_handle); + if (!cq) { + return; + } + + if (cq->notify != CNT_SET) { + cq->notify = notify ? CNT_ARM : CNT_CLEAR; + } +} + +void rdma_rm_dealloc_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle) +{ + RdmaRmCQ *cq; + + cq = rdma_rm_get_cq(dev_res, cq_handle); + if (!cq) { + return; + } + + rdma_backend_destroy_cq(&cq->backend_cq); + + rdma_res_tbl_dealloc(&dev_res->cq_tbl, cq_handle); +} + +RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn) +{ + GBytes *key = g_bytes_new(&qpn, sizeof(qpn)); + + RdmaRmQP *qp = g_hash_table_lookup(dev_res->qp_hash, key); + + g_bytes_unref(key); + + if (!qp) { + rdma_error_report("Invalid QP handle %d", qpn); + } + + return qp; +} + +int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle, + uint8_t qp_type, uint32_t max_send_wr, + uint32_t max_send_sge, uint32_t send_cq_handle, + uint32_t max_recv_wr, uint32_t max_recv_sge, + uint32_t recv_cq_handle, void *opaque, uint32_t *qpn, + uint8_t is_srq, uint32_t srq_handle) +{ + int rc; + RdmaRmQP *qp; + RdmaRmCQ *scq, *rcq; + RdmaRmPD *pd; + RdmaRmSRQ *srq = NULL; + uint32_t rm_qpn; + + pd = rdma_rm_get_pd(dev_res, pd_handle); + if (!pd) { + return -EINVAL; + } + + scq = rdma_rm_get_cq(dev_res, send_cq_handle); + rcq = rdma_rm_get_cq(dev_res, recv_cq_handle); + + if (!scq || !rcq) { + rdma_error_report("Invalid send_cqn or recv_cqn (%d, %d)", + send_cq_handle, recv_cq_handle); + return -EINVAL; + } + + if (is_srq) { + srq = rdma_rm_get_srq(dev_res, srq_handle); + if (!srq) { + rdma_error_report("Invalid srqn %d", srq_handle); + return -EINVAL; + } + + srq->recv_cq_handle = recv_cq_handle; + } + + if (qp_type == IBV_QPT_GSI) { + scq->notify = CNT_SET; + rcq->notify = CNT_SET; + } + + qp = rdma_res_tbl_alloc(&dev_res->qp_tbl, &rm_qpn); + if (!qp) { + return -ENOMEM; + } + + qp->qpn = rm_qpn; + qp->qp_state = IBV_QPS_RESET; + qp->qp_type = qp_type; + qp->send_cq_handle = send_cq_handle; + qp->recv_cq_handle = recv_cq_handle; + qp->opaque = opaque; + qp->is_srq = is_srq; + + rc = rdma_backend_create_qp(&qp->backend_qp, qp_type, &pd->backend_pd, + &scq->backend_cq, &rcq->backend_cq, + is_srq ? &srq->backend_srq : NULL, + max_send_wr, max_recv_wr, max_send_sge, + max_recv_sge); + + if (rc) { + rc = -EIO; + goto out_dealloc_qp; + } + + *qpn = rdma_backend_qpn(&qp->backend_qp); + trace_rdma_rm_alloc_qp(rm_qpn, *qpn, qp_type); + g_hash_table_insert(dev_res->qp_hash, g_bytes_new(qpn, sizeof(*qpn)), qp); + + return 0; + +out_dealloc_qp: + rdma_res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); + + return rc; +} + +int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + uint32_t qp_handle, uint32_t attr_mask, uint8_t sgid_idx, + union ibv_gid *dgid, uint32_t dqpn, + enum ibv_qp_state qp_state, uint32_t qkey, + uint32_t rq_psn, uint32_t sq_psn) +{ + RdmaRmQP *qp; + int ret; + + qp = rdma_rm_get_qp(dev_res, qp_handle); + if (!qp) { + return -EINVAL; + } + + if (qp->qp_type == IBV_QPT_SMI) { + rdma_error_report("Got QP0 request"); + return -EPERM; + } else if (qp->qp_type == IBV_QPT_GSI) { + return 0; + } + + trace_rdma_rm_modify_qp(qp_handle, attr_mask, qp_state, sgid_idx); + + if (attr_mask & IBV_QP_STATE) { + qp->qp_state = qp_state; + + if (qp->qp_state == IBV_QPS_INIT) { + ret = rdma_backend_qp_state_init(backend_dev, &qp->backend_qp, + qp->qp_type, qkey); + if (ret) { + return -EIO; + } + } + + if (qp->qp_state == IBV_QPS_RTR) { + /* Get backend gid index */ + sgid_idx = rdma_rm_get_backend_gid_index(dev_res, backend_dev, + sgid_idx); + if (sgid_idx <= 0) { /* TODO check also less than bk.max_sgid */ + rdma_error_report("Failed to get bk sgid_idx for sgid_idx %d", + sgid_idx); + return -EIO; + } + + ret = rdma_backend_qp_state_rtr(backend_dev, &qp->backend_qp, + qp->qp_type, sgid_idx, dgid, dqpn, + rq_psn, qkey, + attr_mask & IBV_QP_QKEY); + if (ret) { + return -EIO; + } + } + + if (qp->qp_state == IBV_QPS_RTS) { + ret = rdma_backend_qp_state_rts(&qp->backend_qp, qp->qp_type, + sq_psn, qkey, + attr_mask & IBV_QP_QKEY); + if (ret) { + return -EIO; + } + } + } + + return 0; +} + +int rdma_rm_query_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + uint32_t qp_handle, struct ibv_qp_attr *attr, + int attr_mask, struct ibv_qp_init_attr *init_attr) +{ + RdmaRmQP *qp; + + qp = rdma_rm_get_qp(dev_res, qp_handle); + if (!qp) { + return -EINVAL; + } + + return rdma_backend_query_qp(&qp->backend_qp, attr, attr_mask, init_attr); +} + +void rdma_rm_dealloc_qp(RdmaDeviceResources *dev_res, uint32_t qp_handle) +{ + RdmaRmQP *qp; + GBytes *key; + + key = g_bytes_new(&qp_handle, sizeof(qp_handle)); + qp = g_hash_table_lookup(dev_res->qp_hash, key); + g_hash_table_remove(dev_res->qp_hash, key); + g_bytes_unref(key); + + if (!qp) { + return; + } + + rdma_backend_destroy_qp(&qp->backend_qp, dev_res); + + rdma_res_tbl_dealloc(&dev_res->qp_tbl, qp->qpn); +} + +RdmaRmSRQ *rdma_rm_get_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle) +{ + return rdma_res_tbl_get(&dev_res->srq_tbl, srq_handle); +} + +int rdma_rm_alloc_srq(RdmaDeviceResources *dev_res, uint32_t pd_handle, + uint32_t max_wr, uint32_t max_sge, uint32_t srq_limit, + uint32_t *srq_handle, void *opaque) +{ + RdmaRmSRQ *srq; + RdmaRmPD *pd; + int rc; + + pd = rdma_rm_get_pd(dev_res, pd_handle); + if (!pd) { + return -EINVAL; + } + + srq = rdma_res_tbl_alloc(&dev_res->srq_tbl, srq_handle); + if (!srq) { + return -ENOMEM; + } + + rc = rdma_backend_create_srq(&srq->backend_srq, &pd->backend_pd, + max_wr, max_sge, srq_limit); + if (rc) { + rc = -EIO; + goto out_dealloc_srq; + } + + srq->opaque = opaque; + + return 0; + +out_dealloc_srq: + rdma_res_tbl_dealloc(&dev_res->srq_tbl, *srq_handle); + + return rc; +} + +int rdma_rm_query_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle, + struct ibv_srq_attr *srq_attr) +{ + RdmaRmSRQ *srq; + + srq = rdma_rm_get_srq(dev_res, srq_handle); + if (!srq) { + return -EINVAL; + } + + return rdma_backend_query_srq(&srq->backend_srq, srq_attr); +} + +int rdma_rm_modify_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle, + struct ibv_srq_attr *srq_attr, int srq_attr_mask) +{ + RdmaRmSRQ *srq; + + srq = rdma_rm_get_srq(dev_res, srq_handle); + if (!srq) { + return -EINVAL; + } + + if ((srq_attr_mask & IBV_SRQ_LIMIT) && + (srq_attr->srq_limit == 0)) { + return -EINVAL; + } + + if ((srq_attr_mask & IBV_SRQ_MAX_WR) && + (srq_attr->max_wr == 0)) { + return -EINVAL; + } + + return rdma_backend_modify_srq(&srq->backend_srq, srq_attr, + srq_attr_mask); +} + +void rdma_rm_dealloc_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle) +{ + RdmaRmSRQ *srq; + + srq = rdma_rm_get_srq(dev_res, srq_handle); + if (!srq) { + return; + } + + rdma_backend_destroy_srq(&srq->backend_srq, dev_res); + rdma_res_tbl_dealloc(&dev_res->srq_tbl, srq_handle); +} + +void *rdma_rm_get_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id) +{ + void **cqe_ctx; + + cqe_ctx = rdma_res_tbl_get(&dev_res->cqe_ctx_tbl, cqe_ctx_id); + if (!cqe_ctx) { + return NULL; + } + + return *cqe_ctx; +} + +int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t *cqe_ctx_id, + void *ctx) +{ + void **cqe_ctx; + + cqe_ctx = rdma_res_tbl_alloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id); + if (!cqe_ctx) { + return -ENOMEM; + } + + *cqe_ctx = ctx; + + return 0; +} + +void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id) +{ + rdma_res_tbl_dealloc(&dev_res->cqe_ctx_tbl, cqe_ctx_id); +} + +int rdma_rm_add_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + const char *ifname, union ibv_gid *gid, int gid_idx) +{ + int rc; + + rc = rdma_backend_add_gid(backend_dev, ifname, gid); + if (rc) { + return -EINVAL; + } + + memcpy(&dev_res->port.gid_tbl[gid_idx].gid, gid, sizeof(*gid)); + + return 0; +} + +int rdma_rm_del_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + const char *ifname, int gid_idx) +{ + int rc; + + if (!dev_res->port.gid_tbl[gid_idx].gid.global.interface_id) { + return 0; + } + + rc = rdma_backend_del_gid(backend_dev, ifname, + &dev_res->port.gid_tbl[gid_idx].gid); + if (rc) { + return -EINVAL; + } + + memset(dev_res->port.gid_tbl[gid_idx].gid.raw, 0, + sizeof(dev_res->port.gid_tbl[gid_idx].gid)); + dev_res->port.gid_tbl[gid_idx].backend_gid_index = -1; + + return 0; +} + +int rdma_rm_get_backend_gid_index(RdmaDeviceResources *dev_res, + RdmaBackendDev *backend_dev, int sgid_idx) +{ + if (unlikely(sgid_idx < 0 || sgid_idx >= MAX_PORT_GIDS)) { + rdma_error_report("Got invalid sgid_idx %d", sgid_idx); + return -EINVAL; + } + + if (unlikely(dev_res->port.gid_tbl[sgid_idx].backend_gid_index == -1)) { + dev_res->port.gid_tbl[sgid_idx].backend_gid_index = + rdma_backend_get_gid_index(backend_dev, + &dev_res->port.gid_tbl[sgid_idx].gid); + } + + return dev_res->port.gid_tbl[sgid_idx].backend_gid_index; +} + +static void destroy_qp_hash_key(gpointer data) +{ + g_bytes_unref(data); +} + +static void init_ports(RdmaDeviceResources *dev_res) +{ + int i; + + memset(&dev_res->port, 0, sizeof(dev_res->port)); + + dev_res->port.state = IBV_PORT_DOWN; + for (i = 0; i < MAX_PORT_GIDS; i++) { + dev_res->port.gid_tbl[i].backend_gid_index = -1; + } +} + +static void fini_ports(RdmaDeviceResources *dev_res, + RdmaBackendDev *backend_dev, const char *ifname) +{ + int i; + + dev_res->port.state = IBV_PORT_DOWN; + for (i = 0; i < MAX_PORT_GIDS; i++) { + rdma_rm_del_gid(dev_res, backend_dev, ifname, i); + } +} + +int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr) +{ + dev_res->qp_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal, + destroy_qp_hash_key, NULL); + if (!dev_res->qp_hash) { + return -ENOMEM; + } + + res_tbl_init("PD", &dev_res->pd_tbl, dev_attr->max_pd, sizeof(RdmaRmPD)); + res_tbl_init("CQ", &dev_res->cq_tbl, dev_attr->max_cq, sizeof(RdmaRmCQ)); + res_tbl_init("MR", &dev_res->mr_tbl, dev_attr->max_mr, sizeof(RdmaRmMR)); + res_tbl_init("QP", &dev_res->qp_tbl, dev_attr->max_qp, sizeof(RdmaRmQP)); + res_tbl_init("CQE_CTX", &dev_res->cqe_ctx_tbl, dev_attr->max_qp * + dev_attr->max_qp_wr, sizeof(void *)); + res_tbl_init("UC", &dev_res->uc_tbl, MAX_UCS, sizeof(RdmaRmUC)); + res_tbl_init("SRQ", &dev_res->srq_tbl, dev_attr->max_srq, + sizeof(RdmaRmSRQ)); + + init_ports(dev_res); + + qemu_mutex_init(&dev_res->lock); + + memset(&dev_res->stats, 0, sizeof(dev_res->stats)); + qatomic_set(&dev_res->stats.missing_cqe, 0); + + return 0; +} + +void rdma_rm_fini(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + const char *ifname) +{ + qemu_mutex_destroy(&dev_res->lock); + + fini_ports(dev_res, backend_dev, ifname); + + res_tbl_free(&dev_res->srq_tbl); + res_tbl_free(&dev_res->uc_tbl); + res_tbl_free(&dev_res->cqe_ctx_tbl); + res_tbl_free(&dev_res->qp_tbl); + res_tbl_free(&dev_res->mr_tbl); + res_tbl_free(&dev_res->cq_tbl); + res_tbl_free(&dev_res->pd_tbl); + + if (dev_res->qp_hash) { + g_hash_table_destroy(dev_res->qp_hash); + } +} diff --git a/hw/rdma/rdma_rm.h b/hw/rdma/rdma_rm.h new file mode 100644 index 000000000..d69a91779 --- /dev/null +++ b/hw/rdma/rdma_rm.h @@ -0,0 +1,97 @@ +/* + * RDMA device: Definitions of Resource Manager functions + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef RDMA_RM_H +#define RDMA_RM_H + +#include "qapi/error.h" +#include "rdma_backend_defs.h" +#include "rdma_rm_defs.h" + +int rdma_rm_init(RdmaDeviceResources *dev_res, + struct ibv_device_attr *dev_attr); +void rdma_rm_fini(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + const char *ifname); + +int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + uint32_t *pd_handle, uint32_t ctx_handle); +RdmaRmPD *rdma_rm_get_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle); +void rdma_rm_dealloc_pd(RdmaDeviceResources *dev_res, uint32_t pd_handle); + +int rdma_rm_alloc_mr(RdmaDeviceResources *dev_res, uint32_t pd_handle, + uint64_t guest_start, uint64_t guest_length, + void *host_virt, int access_flags, uint32_t *mr_handle, + uint32_t *lkey, uint32_t *rkey); +RdmaRmMR *rdma_rm_get_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle); +void rdma_rm_dealloc_mr(RdmaDeviceResources *dev_res, uint32_t mr_handle); + +int rdma_rm_alloc_uc(RdmaDeviceResources *dev_res, uint32_t pfn, + uint32_t *uc_handle); +RdmaRmUC *rdma_rm_get_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle); +void rdma_rm_dealloc_uc(RdmaDeviceResources *dev_res, uint32_t uc_handle); + +int rdma_rm_alloc_cq(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + uint32_t cqe, uint32_t *cq_handle, void *opaque); +RdmaRmCQ *rdma_rm_get_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle); +void rdma_rm_req_notify_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle, + bool notify); +void rdma_rm_dealloc_cq(RdmaDeviceResources *dev_res, uint32_t cq_handle); + +int rdma_rm_alloc_qp(RdmaDeviceResources *dev_res, uint32_t pd_handle, + uint8_t qp_type, uint32_t max_send_wr, + uint32_t max_send_sge, uint32_t send_cq_handle, + uint32_t max_recv_wr, uint32_t max_recv_sge, + uint32_t recv_cq_handle, void *opaque, uint32_t *qpn, + uint8_t is_srq, uint32_t srq_handle); +RdmaRmQP *rdma_rm_get_qp(RdmaDeviceResources *dev_res, uint32_t qpn); +int rdma_rm_modify_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + uint32_t qp_handle, uint32_t attr_mask, uint8_t sgid_idx, + union ibv_gid *dgid, uint32_t dqpn, + enum ibv_qp_state qp_state, uint32_t qkey, + uint32_t rq_psn, uint32_t sq_psn); +int rdma_rm_query_qp(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + uint32_t qp_handle, struct ibv_qp_attr *attr, + int attr_mask, struct ibv_qp_init_attr *init_attr); +void rdma_rm_dealloc_qp(RdmaDeviceResources *dev_res, uint32_t qp_handle); + +RdmaRmSRQ *rdma_rm_get_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle); +int rdma_rm_alloc_srq(RdmaDeviceResources *dev_res, uint32_t pd_handle, + uint32_t max_wr, uint32_t max_sge, uint32_t srq_limit, + uint32_t *srq_handle, void *opaque); +int rdma_rm_query_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle, + struct ibv_srq_attr *srq_attr); +int rdma_rm_modify_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle, + struct ibv_srq_attr *srq_attr, int srq_attr_mask); +void rdma_rm_dealloc_srq(RdmaDeviceResources *dev_res, uint32_t srq_handle); + +int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t *cqe_ctx_id, + void *ctx); +void *rdma_rm_get_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id); +void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources *dev_res, uint32_t cqe_ctx_id); + +int rdma_rm_add_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + const char *ifname, union ibv_gid *gid, int gid_idx); +int rdma_rm_del_gid(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, + const char *ifname, int gid_idx); +int rdma_rm_get_backend_gid_index(RdmaDeviceResources *dev_res, + RdmaBackendDev *backend_dev, int sgid_idx); +static inline union ibv_gid *rdma_rm_get_gid(RdmaDeviceResources *dev_res, + int sgid_idx) +{ + return &dev_res->port.gid_tbl[sgid_idx].gid; +} +void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf); + +#endif diff --git a/hw/rdma/rdma_rm_defs.h b/hw/rdma/rdma_rm_defs.h new file mode 100644 index 000000000..534f2f74d --- /dev/null +++ b/hw/rdma/rdma_rm_defs.h @@ -0,0 +1,146 @@ +/* + * RDMA device: Definitions of Resource Manager structures + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef RDMA_RM_DEFS_H +#define RDMA_RM_DEFS_H + +#include "rdma_backend_defs.h" + +#define MAX_PORTS 1 /* Do not change - we support only one port */ +#define MAX_PORT_GIDS 255 +#define MAX_GIDS MAX_PORT_GIDS +#define MAX_PORT_PKEYS 1 +#define MAX_PKEYS MAX_PORT_PKEYS +#define MAX_UCS 512 +#define MAX_MR_SIZE (1UL << 27) +#define MAX_QP 1024 +#define MAX_SGE 4 +#define MAX_CQ 2048 +#define MAX_MR 1024 +#define MAX_PD 1024 +#define MAX_QP_RD_ATOM 16 +#define MAX_QP_INIT_RD_ATOM 16 +#define MAX_AH 64 +#define MAX_SRQ 512 + +#define MAX_RM_TBL_NAME 16 +#define MAX_CONSEQ_EMPTY_POLL_CQ 4096 /* considered as error above this */ + +typedef struct RdmaRmResTbl { + char name[MAX_RM_TBL_NAME]; + QemuMutex lock; + unsigned long *bitmap; + size_t tbl_sz; + size_t res_sz; + void *tbl; + uint32_t used; /* number of used entries in the table */ +} RdmaRmResTbl; + +typedef struct RdmaRmPD { + RdmaBackendPD backend_pd; + uint32_t ctx_handle; +} RdmaRmPD; + +typedef enum CQNotificationType { + CNT_CLEAR, + CNT_ARM, + CNT_SET, +} CQNotificationType; + +typedef struct RdmaRmCQ { + RdmaBackendCQ backend_cq; + void *opaque; + CQNotificationType notify; +} RdmaRmCQ; + +/* MR (DMA region) */ +typedef struct RdmaRmMR { + RdmaBackendMR backend_mr; + void *virt; + uint64_t start; + size_t length; + uint32_t pd_handle; + uint32_t lkey; + uint32_t rkey; +} RdmaRmMR; + +typedef struct RdmaRmUC { + uint64_t uc_handle; +} RdmaRmUC; + +typedef struct RdmaRmQP { + RdmaBackendQP backend_qp; + void *opaque; + uint32_t qp_type; + uint32_t qpn; + uint32_t send_cq_handle; + uint32_t recv_cq_handle; + enum ibv_qp_state qp_state; + uint8_t is_srq; +} RdmaRmQP; + +typedef struct RdmaRmSRQ { + RdmaBackendSRQ backend_srq; + uint32_t recv_cq_handle; + void *opaque; +} RdmaRmSRQ; + +typedef struct RdmaRmGid { + union ibv_gid gid; + int backend_gid_index; +} RdmaRmGid; + +typedef struct RdmaRmPort { + RdmaRmGid gid_tbl[MAX_PORT_GIDS]; + enum ibv_port_state state; +} RdmaRmPort; + +typedef struct RdmaRmStats { + uint64_t tx; + uint64_t tx_len; + uint64_t tx_err; + uint64_t rx_bufs; + uint64_t rx_bufs_len; + uint64_t rx_bufs_err; + uint64_t rx_srq; + uint64_t completions; + uint64_t mad_tx; + uint64_t mad_tx_err; + uint64_t mad_rx; + uint64_t mad_rx_err; + uint64_t mad_rx_bufs; + uint64_t mad_rx_bufs_err; + uint64_t poll_cq_from_bk; + uint64_t poll_cq_from_guest; + uint64_t poll_cq_from_guest_empty; + uint64_t poll_cq_ppoll_to; + uint32_t missing_cqe; +} RdmaRmStats; + +struct RdmaDeviceResources { + RdmaRmPort port; + RdmaRmResTbl pd_tbl; + RdmaRmResTbl mr_tbl; + RdmaRmResTbl uc_tbl; + RdmaRmResTbl qp_tbl; + RdmaRmResTbl cq_tbl; + RdmaRmResTbl cqe_ctx_tbl; + RdmaRmResTbl srq_tbl; + GHashTable *qp_hash; /* Keeps mapping between real and emulated */ + QemuMutex lock; + RdmaRmStats stats; +}; + +#endif diff --git a/hw/rdma/rdma_utils.c b/hw/rdma/rdma_utils.c new file mode 100644 index 000000000..98df58f68 --- /dev/null +++ b/hw/rdma/rdma_utils.c @@ -0,0 +1,125 @@ +/* + * QEMU paravirtual RDMA - Generic RDMA backend + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "trace.h" +#include "rdma_utils.h" + +void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen) +{ + void *p; + hwaddr len = plen; + + if (!addr) { + rdma_error_report("addr is NULL"); + return NULL; + } + + p = pci_dma_map(dev, addr, &len, DMA_DIRECTION_TO_DEVICE); + if (!p) { + rdma_error_report("pci_dma_map fail, addr=0x%"PRIx64", len=%"PRId64, + addr, len); + return NULL; + } + + if (len != plen) { + rdma_pci_dma_unmap(dev, p, len); + return NULL; + } + + trace_rdma_pci_dma_map(addr, p, len); + + return p; +} + +void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len) +{ + trace_rdma_pci_dma_unmap(buffer); + if (buffer) { + pci_dma_unmap(dev, buffer, len, DMA_DIRECTION_TO_DEVICE, 0); + } +} + +void rdma_protected_gqueue_init(RdmaProtectedGQueue *list) +{ + qemu_mutex_init(&list->lock); + list->list = g_queue_new(); +} + +void rdma_protected_gqueue_destroy(RdmaProtectedGQueue *list) +{ + if (list->list) { + g_queue_free_full(list->list, g_free); + qemu_mutex_destroy(&list->lock); + list->list = NULL; + } +} + +void rdma_protected_gqueue_append_int64(RdmaProtectedGQueue *list, + int64_t value) +{ + qemu_mutex_lock(&list->lock); + g_queue_push_tail(list->list, g_memdup(&value, sizeof(value))); + qemu_mutex_unlock(&list->lock); +} + +int64_t rdma_protected_gqueue_pop_int64(RdmaProtectedGQueue *list) +{ + int64_t *valp; + int64_t val; + + qemu_mutex_lock(&list->lock); + + valp = g_queue_pop_head(list->list); + qemu_mutex_unlock(&list->lock); + + if (!valp) { + return -ENOENT; + } + + val = *valp; + g_free(valp); + return val; +} + +void rdma_protected_gslist_init(RdmaProtectedGSList *list) +{ + qemu_mutex_init(&list->lock); +} + +void rdma_protected_gslist_destroy(RdmaProtectedGSList *list) +{ + if (list->list) { + g_slist_free(list->list); + qemu_mutex_destroy(&list->lock); + list->list = NULL; + } +} + +void rdma_protected_gslist_append_int32(RdmaProtectedGSList *list, + int32_t value) +{ + qemu_mutex_lock(&list->lock); + list->list = g_slist_prepend(list->list, GINT_TO_POINTER(value)); + qemu_mutex_unlock(&list->lock); +} + +void rdma_protected_gslist_remove_int32(RdmaProtectedGSList *list, + int32_t value) +{ + qemu_mutex_lock(&list->lock); + list->list = g_slist_remove(list->list, GINT_TO_POINTER(value)); + qemu_mutex_unlock(&list->lock); +} diff --git a/hw/rdma/rdma_utils.h b/hw/rdma/rdma_utils.h new file mode 100644 index 000000000..9fd0efd94 --- /dev/null +++ b/hw/rdma/rdma_utils.h @@ -0,0 +1,64 @@ +/* + * RDMA device: Debug utilities + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef RDMA_UTILS_H +#define RDMA_UTILS_H + +#include "qemu/error-report.h" +#include "hw/pci/pci.h" +#include "sysemu/dma.h" + +#define rdma_error_report(fmt, ...) \ + error_report("%s: " fmt, "rdma", ## __VA_ARGS__) +#define rdma_warn_report(fmt, ...) \ + warn_report("%s: " fmt, "rdma", ## __VA_ARGS__) +#define rdma_info_report(fmt, ...) \ + info_report("%s: " fmt, "rdma", ## __VA_ARGS__) + +typedef struct RdmaProtectedGQueue { + QemuMutex lock; + GQueue *list; +} RdmaProtectedGQueue; + +typedef struct RdmaProtectedGSList { + QemuMutex lock; + GSList *list; +} RdmaProtectedGSList; + +void *rdma_pci_dma_map(PCIDevice *dev, dma_addr_t addr, dma_addr_t plen); +void rdma_pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len); +void rdma_protected_gqueue_init(RdmaProtectedGQueue *list); +void rdma_protected_gqueue_destroy(RdmaProtectedGQueue *list); +void rdma_protected_gqueue_append_int64(RdmaProtectedGQueue *list, + int64_t value); +int64_t rdma_protected_gqueue_pop_int64(RdmaProtectedGQueue *list); +void rdma_protected_gslist_init(RdmaProtectedGSList *list); +void rdma_protected_gslist_destroy(RdmaProtectedGSList *list); +void rdma_protected_gslist_append_int32(RdmaProtectedGSList *list, + int32_t value); +void rdma_protected_gslist_remove_int32(RdmaProtectedGSList *list, + int32_t value); + +static inline void addrconf_addr_eui48(uint8_t *eui, const char *addr) +{ + memcpy(eui, addr, 3); + eui[3] = 0xFF; + eui[4] = 0xFE; + memcpy(eui + 5, addr + 3, 3); + eui[0] ^= 2; +} + +#endif diff --git a/hw/rdma/trace-events b/hw/rdma/trace-events new file mode 100644 index 000000000..9accb1497 --- /dev/null +++ b/hw/rdma/trace-events @@ -0,0 +1,31 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# rdma_backend.c +rdma_check_dev_attr(const char *name, int max_bk, int max_fe) "%s: be=%d, fe=%d" +rdma_create_ah_cache_hit(uint64_t subnet, uint64_t if_id) "subnet=0x%"PRIx64",if_id=0x%"PRIx64 +rdma_create_ah_cache_miss(uint64_t subnet, uint64_t if_id) "subnet=0x%"PRIx64",if_id=0x%"PRIx64 +rdma_poll_cq(int ne, void *ibcq) "Got %d completion(s) from cq %p" +rdmacm_mux(const char *title, int msg_type, int op_code) "%s: msg_type=%d, op_code=%d" +rdmacm_mux_check_op_status(int msg_type, int op_code, int err_code) "resp: msg_type=%d, op_code=%d, err_code=%d" +rdma_mad_message(const char *title, int len, char *data) "mad %s (%d): %s" +rdma_backend_rc_qp_state_init(uint32_t qpn) "RC QP 0x%x switch to INIT" +rdma_backend_ud_qp_state_init(uint32_t qpn, uint32_t qkey) "UD QP 0x%x switch to INIT, qkey=0x%x" +rdma_backend_rc_qp_state_rtr(uint32_t qpn, uint64_t subnet, uint64_t ifid, uint8_t sgid_idx, uint32_t dqpn, uint32_t rq_psn) "RC QP 0x%x switch to RTR, subnet = 0x%"PRIx64", ifid = 0x%"PRIx64 ", sgid_idx=%d, dqpn=0x%x, rq_psn=0x%x" +rdma_backend_ud_qp_state_rtr(uint32_t qpn, uint32_t qkey) "UD QP 0x%x switch to RTR, qkey=0x%x" +rdma_backend_rc_qp_state_rts(uint32_t qpn, uint32_t sq_psn) "RC QP 0x%x switch to RTS, sq_psn=0x%x, " +rdma_backend_ud_qp_state_rts(uint32_t qpn, uint32_t sq_psn, uint32_t qkey) "UD QP 0x%x switch to RTS, sq_psn=0x%x, qkey=0x%x" +rdma_backend_get_gid_index(uint64_t subnet, uint64_t ifid, int gid_idx) "subnet=0x%"PRIx64", ifid=0x%"PRIx64 ", gid_idx=%d" +rdma_backend_gid_change(const char *op, uint64_t subnet, uint64_t ifid) "%s subnet=0x%"PRIx64", ifid=0x%"PRIx64 + +# rdma_rm.c +rdma_res_tbl_get(char *name, uint32_t handle) "tbl %s, handle %d" +rdma_res_tbl_alloc(char *name, uint32_t handle) "tbl %s, handle %d" +rdma_res_tbl_dealloc(char *name, uint32_t handle) "tbl %s, handle %d" +rdma_rm_alloc_mr(uint32_t mr_handle, void *host_virt, uint64_t guest_start, uint64_t guest_length, int access_flags) "mr_handle=%d, host_virt=%p, guest_start=0x%"PRIx64", length=%" PRId64", access_flags=0x%x" +rdma_rm_dealloc_mr(uint32_t mr_handle, uint64_t guest_start) "mr_handle=%d, guest_start=0x%"PRIx64 +rdma_rm_alloc_qp(uint32_t rm_qpn, uint32_t backend_qpn, uint8_t qp_type) "rm_qpn=%d, backend_qpn=0x%x, qp_type=%d" +rdma_rm_modify_qp(uint32_t qpn, uint32_t attr_mask, int qp_state, uint8_t sgid_idx) "qpn=0x%x, attr_mask=0x%x, qp_state=%d, sgid_idx=%d" + +# rdma_utils.c +rdma_pci_dma_map(uint64_t addr, void *vaddr, uint64_t len) "0x%"PRIx64" -> %p (len=%" PRId64")" +rdma_pci_dma_unmap(void *vaddr) "%p" diff --git a/hw/rdma/trace.h b/hw/rdma/trace.h new file mode 100644 index 000000000..b3fa8ebc5 --- /dev/null +++ b/hw/rdma/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_rdma.h" diff --git a/hw/rdma/vmw/pvrdma.h b/hw/rdma/vmw/pvrdma.h new file mode 100644 index 000000000..d08965d3e --- /dev/null +++ b/hw/rdma/vmw/pvrdma.h @@ -0,0 +1,144 @@ +/* + * QEMU VMWARE paravirtual RDMA device definitions + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef PVRDMA_PVRDMA_H +#define PVRDMA_PVRDMA_H + +#include "qemu/units.h" +#include "qemu/notify.h" +#include "hw/pci/pci.h" +#include "hw/pci/msix.h" +#include "chardev/char-fe.h" +#include "hw/net/vmxnet3_defs.h" + +#include "../rdma_backend_defs.h" +#include "../rdma_rm_defs.h" + +#include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h" +#include "pvrdma_dev_ring.h" +#include "qom/object.h" + +/* BARs */ +#define RDMA_MSIX_BAR_IDX 0 +#define RDMA_REG_BAR_IDX 1 +#define RDMA_UAR_BAR_IDX 2 +#define RDMA_BAR0_MSIX_SIZE (16 * KiB) +#define RDMA_BAR1_REGS_SIZE 64 +#define RDMA_BAR2_UAR_SIZE (0x1000 * MAX_UCS) /* each uc gets page */ + +/* MSIX */ +#define RDMA_MAX_INTRS 3 +#define RDMA_MSIX_TABLE 0x0000 +#define RDMA_MSIX_PBA 0x2000 + +/* Interrupts Vectors */ +#define INTR_VEC_CMD_RING 0 +#define INTR_VEC_CMD_ASYNC_EVENTS 1 +#define INTR_VEC_CMD_COMPLETION_Q 2 + +/* HW attributes */ +#define PVRDMA_HW_NAME "pvrdma" +#define PVRDMA_HW_VERSION 17 +#define PVRDMA_FW_VERSION 14 + +/* Some defaults */ +#define PVRDMA_PKEY 0xFFFF + +typedef struct DSRInfo { + dma_addr_t dma; + struct pvrdma_device_shared_region *dsr; + + union pvrdma_cmd_req *req; + union pvrdma_cmd_resp *rsp; + + PvrdmaRingState *async_ring_state; + PvrdmaRing async; + + PvrdmaRingState *cq_ring_state; + PvrdmaRing cq; +} DSRInfo; + +typedef struct PVRDMADevStats { + uint64_t commands; + uint64_t regs_reads; + uint64_t regs_writes; + uint64_t uar_writes; + uint64_t interrupts; +} PVRDMADevStats; + +struct PVRDMADev { + PCIDevice parent_obj; + MemoryRegion msix; + MemoryRegion regs; + uint32_t regs_data[RDMA_BAR1_REGS_SIZE]; + MemoryRegion uar; + uint32_t uar_data[RDMA_BAR2_UAR_SIZE]; + DSRInfo dsr_info; + int interrupt_mask; + struct ibv_device_attr dev_attr; + uint64_t node_guid; + char *backend_eth_device_name; + char *backend_device_name; + uint8_t backend_port_num; + RdmaBackendDev backend_dev; + RdmaDeviceResources rdma_dev_res; + CharBackend mad_chr; + VMXNET3State *func0; + Notifier shutdown_notifier; + PVRDMADevStats stats; +}; +typedef struct PVRDMADev PVRDMADev; +DECLARE_INSTANCE_CHECKER(PVRDMADev, PVRDMA_DEV, + PVRDMA_HW_NAME) + +static inline int get_reg_val(PVRDMADev *dev, hwaddr addr, uint32_t *val) +{ + int idx = addr >> 2; + + if (idx >= RDMA_BAR1_REGS_SIZE) { + return -EINVAL; + } + + *val = dev->regs_data[idx]; + + return 0; +} + +static inline int set_reg_val(PVRDMADev *dev, hwaddr addr, uint32_t val) +{ + int idx = addr >> 2; + + if (idx >= RDMA_BAR1_REGS_SIZE) { + return -EINVAL; + } + + dev->regs_data[idx] = val; + + return 0; +} + +static inline void post_interrupt(PVRDMADev *dev, unsigned vector) +{ + PCIDevice *pci_dev = PCI_DEVICE(dev); + + if (likely(!dev->interrupt_mask)) { + dev->stats.interrupts++; + msix_notify(pci_dev, vector); + } +} + +int pvrdma_exec_cmd(PVRDMADev *dev); + +#endif diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c new file mode 100644 index 000000000..da7ddfa54 --- /dev/null +++ b/hw/rdma/vmw/pvrdma_cmd.c @@ -0,0 +1,825 @@ +/* + * QEMU paravirtual RDMA - Command channel + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_ids.h" + +#include "../rdma_backend.h" +#include "../rdma_rm.h" +#include "../rdma_utils.h" + +#include "trace.h" +#include "pvrdma.h" +#include "standard-headers/rdma/vmw_pvrdma-abi.h" + +static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma, + uint32_t nchunks, size_t length) +{ + uint64_t *dir, *tbl; + int tbl_idx, dir_idx, addr_idx; + void *host_virt = NULL, *curr_page; + + if (!nchunks) { + rdma_error_report("Got nchunks=0"); + return NULL; + } + + length = ROUND_UP(length, TARGET_PAGE_SIZE); + if (nchunks * TARGET_PAGE_SIZE != length) { + rdma_error_report("Invalid nchunks/length (%u, %lu)", nchunks, + (unsigned long)length); + return NULL; + } + + dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE); + if (!dir) { + rdma_error_report("Failed to map to page directory"); + return NULL; + } + + tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to page table 0"); + goto out_unmap_dir; + } + + curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE); + if (!curr_page) { + rdma_error_report("Failed to map the page 0"); + goto out_unmap_tbl; + } + + host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE); + if (host_virt == MAP_FAILED) { + host_virt = NULL; + rdma_error_report("Failed to remap memory for host_virt"); + goto out_unmap_tbl; + } + trace_pvrdma_map_to_pdir_host_virt(curr_page, host_virt); + + rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE); + + dir_idx = 0; + tbl_idx = 1; + addr_idx = 1; + while (addr_idx < nchunks) { + if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) { + tbl_idx = 0; + dir_idx++; + rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE); + tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to page table %d", dir_idx); + goto out_unmap_host_virt; + } + } + + curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx], + TARGET_PAGE_SIZE); + if (!curr_page) { + rdma_error_report("Failed to map to page %d, dir %d", tbl_idx, + dir_idx); + goto out_unmap_host_virt; + } + + mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED, + host_virt + TARGET_PAGE_SIZE * addr_idx); + + trace_pvrdma_map_to_pdir_next_page(addr_idx, curr_page, host_virt + + TARGET_PAGE_SIZE * addr_idx); + + rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE); + + addr_idx++; + + tbl_idx++; + } + + goto out_unmap_tbl; + +out_unmap_host_virt: + munmap(host_virt, length); + host_virt = NULL; + +out_unmap_tbl: + rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE); + +out_unmap_dir: + rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE); + + return host_virt; +} + +static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_query_port *cmd = &req->query_port; + struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp; + struct pvrdma_port_attr attrs = {}; + + if (cmd->port_num > MAX_PORTS) { + return -EINVAL; + } + + if (rdma_backend_query_port(&dev->backend_dev, + (struct ibv_port_attr *)&attrs)) { + return -ENOMEM; + } + + memset(resp, 0, sizeof(*resp)); + + resp->attrs.state = dev->func0->device_active ? attrs.state : + PVRDMA_PORT_DOWN; + resp->attrs.max_mtu = attrs.max_mtu; + resp->attrs.active_mtu = attrs.active_mtu; + resp->attrs.phys_state = attrs.phys_state; + resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len); + resp->attrs.max_msg_sz = 1024; + resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len); + resp->attrs.active_width = 1; + resp->attrs.active_speed = 1; + + return 0; +} + +static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey; + struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp; + + if (cmd->port_num > MAX_PORTS) { + return -EINVAL; + } + + if (cmd->index > MAX_PKEYS) { + return -EINVAL; + } + + memset(resp, 0, sizeof(*resp)); + + resp->pkey = PVRDMA_PKEY; + + return 0; +} + +static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_pd *cmd = &req->create_pd; + struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp; + int rc; + + memset(resp, 0, sizeof(*resp)); + rc = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev, + &resp->pd_handle, cmd->ctx_handle); + + return rc; +} + +static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd; + + rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle); + + return 0; +} + +static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_mr *cmd = &req->create_mr; + struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp; + PCIDevice *pci_dev = PCI_DEVICE(dev); + void *host_virt = NULL; + int rc = 0; + + memset(resp, 0, sizeof(*resp)); + + if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) { + host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks, + cmd->length); + if (!host_virt) { + rdma_error_report("Failed to map to pdir"); + return -EINVAL; + } + } + + rc = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle, cmd->start, + cmd->length, host_virt, cmd->access_flags, + &resp->mr_handle, &resp->lkey, &resp->rkey); + if (rc && host_virt) { + munmap(host_virt, cmd->length); + } + + return rc; +} + +static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr; + + rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle); + + return 0; +} + +static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring, + uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe) +{ + uint64_t *dir = NULL, *tbl = NULL; + PvrdmaRing *r; + int rc = -EINVAL; + char ring_name[MAX_RING_NAME_SZ]; + + if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) { + rdma_error_report("Got invalid nchunks: %d", nchunks); + return rc; + } + + dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE); + if (!dir) { + rdma_error_report("Failed to map to CQ page directory"); + goto out; + } + + tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to CQ page table"); + goto out; + } + + r = g_malloc(sizeof(*r)); + *ring = r; + + r->ring_state = (PvrdmaRingState *) + rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + + if (!r->ring_state) { + rdma_error_report("Failed to map to CQ ring state"); + goto out_free_ring; + } + + sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma); + rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], + cqe, sizeof(struct pvrdma_cqe), + /* first page is ring state */ + (dma_addr_t *)&tbl[1], nchunks - 1); + if (rc) { + goto out_unmap_ring_state; + } + + goto out; + +out_unmap_ring_state: + /* ring_state was in slot 1, not 0 so need to jump back */ + rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE); + +out_free_ring: + g_free(r); + +out: + rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE); + rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE); + + return rc; +} + +static void destroy_cq_ring(PvrdmaRing *ring) +{ + pvrdma_ring_free(ring); + /* ring_state was in slot 1, not 0 so need to jump back */ + rdma_pci_dma_unmap(ring->dev, --ring->ring_state, TARGET_PAGE_SIZE); + g_free(ring); +} + +static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_cq *cmd = &req->create_cq; + struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp; + PvrdmaRing *ring = NULL; + int rc; + + memset(resp, 0, sizeof(*resp)); + + resp->cqe = cmd->cqe; + + rc = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma, cmd->nchunks, + cmd->cqe); + if (rc) { + return rc; + } + + rc = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev, cmd->cqe, + &resp->cq_handle, ring); + if (rc) { + destroy_cq_ring(ring); + } + + resp->cqe = cmd->cqe; + + return rc; +} + +static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq; + RdmaRmCQ *cq; + PvrdmaRing *ring; + + cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle); + if (!cq) { + rdma_error_report("Got invalid CQ handle"); + return -EINVAL; + } + + ring = (PvrdmaRing *)cq->opaque; + destroy_cq_ring(ring); + + rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle); + + return 0; +} + +static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma, + PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge, + uint32_t spages, uint32_t rcqe, uint32_t rmax_sge, + uint32_t rpages, uint8_t is_srq) +{ + uint64_t *dir = NULL, *tbl = NULL; + PvrdmaRing *sr, *rr; + int rc = -EINVAL; + char ring_name[MAX_RING_NAME_SZ]; + uint32_t wqe_sz; + + if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES) { + rdma_error_report("Got invalid send page count for QP ring: %d", + spages); + return rc; + } + + if (!is_srq && (!rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES)) { + rdma_error_report("Got invalid recv page count for QP ring: %d", + rpages); + return rc; + } + + dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE); + if (!dir) { + rdma_error_report("Failed to map to QP page directory"); + goto out; + } + + tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to QP page table"); + goto out; + } + + if (!is_srq) { + sr = g_malloc(2 * sizeof(*rr)); + rr = &sr[1]; + } else { + sr = g_malloc(sizeof(*sr)); + } + + *rings = sr; + + /* Create send ring */ + sr->ring_state = (PvrdmaRingState *) + rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + if (!sr->ring_state) { + rdma_error_report("Failed to map to QP ring state"); + goto out_free_sr_mem; + } + + wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) + + sizeof(struct pvrdma_sge) * smax_sge - 1); + + sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma); + rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state, + scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages); + if (rc) { + goto out_unmap_ring_state; + } + + if (!is_srq) { + /* Create recv ring */ + rr->ring_state = &sr->ring_state[1]; + wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) + + sizeof(struct pvrdma_sge) * rmax_sge - 1); + sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma); + rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state, + rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages], + rpages); + if (rc) { + goto out_free_sr; + } + } + + goto out; + +out_free_sr: + pvrdma_ring_free(sr); + +out_unmap_ring_state: + rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE); + +out_free_sr_mem: + g_free(sr); + +out: + rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE); + rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE); + + return rc; +} + +static void destroy_qp_rings(PvrdmaRing *ring, uint8_t is_srq) +{ + pvrdma_ring_free(&ring[0]); + if (!is_srq) { + pvrdma_ring_free(&ring[1]); + } + + rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE); + g_free(ring); +} + +static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_qp *cmd = &req->create_qp; + struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp; + PvrdmaRing *rings = NULL; + int rc; + + memset(resp, 0, sizeof(*resp)); + + rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings, + cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks, + cmd->max_recv_wr, cmd->max_recv_sge, + cmd->total_chunks - cmd->send_chunks - 1, cmd->is_srq); + if (rc) { + return rc; + } + + rc = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle, cmd->qp_type, + cmd->max_send_wr, cmd->max_send_sge, + cmd->send_cq_handle, cmd->max_recv_wr, + cmd->max_recv_sge, cmd->recv_cq_handle, rings, + &resp->qpn, cmd->is_srq, cmd->srq_handle); + if (rc) { + destroy_qp_rings(rings, cmd->is_srq); + return rc; + } + + resp->max_send_wr = cmd->max_send_wr; + resp->max_recv_wr = cmd->max_recv_wr; + resp->max_send_sge = cmd->max_send_sge; + resp->max_recv_sge = cmd->max_recv_sge; + resp->max_inline_data = cmd->max_inline_data; + + return 0; +} + +static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp; + int rc; + + /* No need to verify sgid_index since it is u8 */ + + rc = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev, + cmd->qp_handle, cmd->attr_mask, + cmd->attrs.ah_attr.grh.sgid_index, + (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid, + cmd->attrs.dest_qp_num, + (enum ibv_qp_state)cmd->attrs.qp_state, + cmd->attrs.qkey, cmd->attrs.rq_psn, + cmd->attrs.sq_psn); + + return rc; +} + +static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_query_qp *cmd = &req->query_qp; + struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp; + struct ibv_qp_init_attr init_attr; + int rc; + + memset(resp, 0, sizeof(*resp)); + + rc = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle, + (struct ibv_qp_attr *)&resp->attrs, cmd->attr_mask, + &init_attr); + + return rc; +} + +static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp; + RdmaRmQP *qp; + PvrdmaRing *ring; + + qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle); + if (!qp) { + return -EINVAL; + } + + ring = (PvrdmaRing *)qp->opaque; + destroy_qp_rings(ring, qp->is_srq); + rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle); + + return 0; +} + +static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_bind *cmd = &req->create_bind; + int rc; + union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid; + + if (cmd->index >= MAX_PORT_GIDS) { + return -EINVAL; + } + + rc = rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev, + dev->backend_eth_device_name, gid, cmd->index); + + return rc; +} + +static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + int rc; + + struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind; + + if (cmd->index >= MAX_PORT_GIDS) { + return -EINVAL; + } + + rc = rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev, + dev->backend_eth_device_name, cmd->index); + + return rc; +} + +static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_uc *cmd = &req->create_uc; + struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp; + int rc; + + memset(resp, 0, sizeof(*resp)); + rc = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle); + + return rc; +} + +static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc; + + rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle); + + return 0; +} + +static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring, + uint64_t pdir_dma, uint32_t max_wr, + uint32_t max_sge, uint32_t nchunks) +{ + uint64_t *dir = NULL, *tbl = NULL; + PvrdmaRing *r; + int rc = -EINVAL; + char ring_name[MAX_RING_NAME_SZ]; + uint32_t wqe_sz; + + if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) { + rdma_error_report("Got invalid page count for SRQ ring: %d", + nchunks); + return rc; + } + + dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE); + if (!dir) { + rdma_error_report("Failed to map to SRQ page directory"); + goto out; + } + + tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to SRQ page table"); + goto out; + } + + r = g_malloc(sizeof(*r)); + *ring = r; + + r->ring_state = (PvrdmaRingState *) + rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + if (!r->ring_state) { + rdma_error_report("Failed to map tp SRQ ring state"); + goto out_free_ring_mem; + } + + wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) + + sizeof(struct pvrdma_sge) * max_sge - 1); + sprintf(ring_name, "srq_ring_%" PRIx64, pdir_dma); + rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], max_wr, + wqe_sz, (dma_addr_t *)&tbl[1], nchunks - 1); + if (rc) { + goto out_unmap_ring_state; + } + + goto out; + +out_unmap_ring_state: + rdma_pci_dma_unmap(pci_dev, r->ring_state, TARGET_PAGE_SIZE); + +out_free_ring_mem: + g_free(r); + +out: + rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE); + rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE); + + return rc; +} + +static void destroy_srq_ring(PvrdmaRing *ring) +{ + pvrdma_ring_free(ring); + rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE); + g_free(ring); +} + +static int create_srq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_create_srq *cmd = &req->create_srq; + struct pvrdma_cmd_create_srq_resp *resp = &rsp->create_srq_resp; + PvrdmaRing *ring = NULL; + int rc; + + memset(resp, 0, sizeof(*resp)); + + rc = create_srq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma, + cmd->attrs.max_wr, cmd->attrs.max_sge, + cmd->nchunks); + if (rc) { + return rc; + } + + rc = rdma_rm_alloc_srq(&dev->rdma_dev_res, cmd->pd_handle, + cmd->attrs.max_wr, cmd->attrs.max_sge, + cmd->attrs.srq_limit, &resp->srqn, ring); + if (rc) { + destroy_srq_ring(ring); + return rc; + } + + return 0; +} + +static int query_srq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_query_srq *cmd = &req->query_srq; + struct pvrdma_cmd_query_srq_resp *resp = &rsp->query_srq_resp; + + memset(resp, 0, sizeof(*resp)); + + return rdma_rm_query_srq(&dev->rdma_dev_res, cmd->srq_handle, + (struct ibv_srq_attr *)&resp->attrs); +} + +static int modify_srq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_modify_srq *cmd = &req->modify_srq; + + /* Only support SRQ limit */ + if (!(cmd->attr_mask & IBV_SRQ_LIMIT) || + (cmd->attr_mask & IBV_SRQ_MAX_WR)) + return -EINVAL; + + return rdma_rm_modify_srq(&dev->rdma_dev_res, cmd->srq_handle, + (struct ibv_srq_attr *)&cmd->attrs, + cmd->attr_mask); +} + +static int destroy_srq(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp) +{ + struct pvrdma_cmd_destroy_srq *cmd = &req->destroy_srq; + RdmaRmSRQ *srq; + PvrdmaRing *ring; + + srq = rdma_rm_get_srq(&dev->rdma_dev_res, cmd->srq_handle); + if (!srq) { + return -EINVAL; + } + + ring = (PvrdmaRing *)srq->opaque; + destroy_srq_ring(ring); + rdma_rm_dealloc_srq(&dev->rdma_dev_res, cmd->srq_handle); + + return 0; +} + +struct cmd_handler { + uint32_t cmd; + uint32_t ack; + int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp); +}; + +static struct cmd_handler cmd_handlers[] = { + {PVRDMA_CMD_QUERY_PORT, PVRDMA_CMD_QUERY_PORT_RESP, query_port}, + {PVRDMA_CMD_QUERY_PKEY, PVRDMA_CMD_QUERY_PKEY_RESP, query_pkey}, + {PVRDMA_CMD_CREATE_PD, PVRDMA_CMD_CREATE_PD_RESP, create_pd}, + {PVRDMA_CMD_DESTROY_PD, PVRDMA_CMD_DESTROY_PD_RESP_NOOP, destroy_pd}, + {PVRDMA_CMD_CREATE_MR, PVRDMA_CMD_CREATE_MR_RESP, create_mr}, + {PVRDMA_CMD_DESTROY_MR, PVRDMA_CMD_DESTROY_MR_RESP_NOOP, destroy_mr}, + {PVRDMA_CMD_CREATE_CQ, PVRDMA_CMD_CREATE_CQ_RESP, create_cq}, + {PVRDMA_CMD_RESIZE_CQ, PVRDMA_CMD_RESIZE_CQ_RESP, NULL}, + {PVRDMA_CMD_DESTROY_CQ, PVRDMA_CMD_DESTROY_CQ_RESP_NOOP, destroy_cq}, + {PVRDMA_CMD_CREATE_QP, PVRDMA_CMD_CREATE_QP_RESP, create_qp}, + {PVRDMA_CMD_MODIFY_QP, PVRDMA_CMD_MODIFY_QP_RESP, modify_qp}, + {PVRDMA_CMD_QUERY_QP, PVRDMA_CMD_QUERY_QP_RESP, query_qp}, + {PVRDMA_CMD_DESTROY_QP, PVRDMA_CMD_DESTROY_QP_RESP, destroy_qp}, + {PVRDMA_CMD_CREATE_UC, PVRDMA_CMD_CREATE_UC_RESP, create_uc}, + {PVRDMA_CMD_DESTROY_UC, PVRDMA_CMD_DESTROY_UC_RESP_NOOP, destroy_uc}, + {PVRDMA_CMD_CREATE_BIND, PVRDMA_CMD_CREATE_BIND_RESP_NOOP, create_bind}, + {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind}, + {PVRDMA_CMD_CREATE_SRQ, PVRDMA_CMD_CREATE_SRQ_RESP, create_srq}, + {PVRDMA_CMD_QUERY_SRQ, PVRDMA_CMD_QUERY_SRQ_RESP, query_srq}, + {PVRDMA_CMD_MODIFY_SRQ, PVRDMA_CMD_MODIFY_SRQ_RESP, modify_srq}, + {PVRDMA_CMD_DESTROY_SRQ, PVRDMA_CMD_DESTROY_SRQ_RESP, destroy_srq}, +}; + +int pvrdma_exec_cmd(PVRDMADev *dev) +{ + int err = 0xFFFF; + DSRInfo *dsr_info; + + dsr_info = &dev->dsr_info; + + if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) / + sizeof(struct cmd_handler)) { + rdma_error_report("Unsupported command"); + goto out; + } + + if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) { + rdma_error_report("Unsupported command (not implemented yet)"); + goto out; + } + + err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req, + dsr_info->rsp); + dsr_info->rsp->hdr.response = dsr_info->req->hdr.response; + dsr_info->rsp->hdr.ack = cmd_handlers[dsr_info->req->hdr.cmd].ack; + dsr_info->rsp->hdr.err = err < 0 ? -err : 0; + + trace_pvrdma_exec_cmd(dsr_info->req->hdr.cmd, dsr_info->rsp->hdr.err); + + dev->stats.commands++; + +out: + set_reg_val(dev, PVRDMA_REG_ERR, err); + post_interrupt(dev, INTR_VEC_CMD_RING); + + return (err == 0) ? 0 : -EINVAL; +} diff --git a/hw/rdma/vmw/pvrdma_dev_ring.c b/hw/rdma/vmw/pvrdma_dev_ring.c new file mode 100644 index 000000000..42130667a --- /dev/null +++ b/hw/rdma/vmw/pvrdma_dev_ring.c @@ -0,0 +1,142 @@ +/* + * QEMU paravirtual RDMA - Device rings + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "hw/pci/pci.h" +#include "cpu.h" +#include "qemu/cutils.h" + +#include "trace.h" + +#include "../rdma_utils.h" +#include "pvrdma_dev_ring.h" + +int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev, + PvrdmaRingState *ring_state, uint32_t max_elems, + size_t elem_sz, dma_addr_t *tbl, uint32_t npages) +{ + int i; + int rc = 0; + + pstrcpy(ring->name, MAX_RING_NAME_SZ, name); + ring->dev = dev; + ring->ring_state = ring_state; + ring->max_elems = max_elems; + ring->elem_sz = elem_sz; + /* TODO: Give a moment to think if we want to redo driver settings + qatomic_set(&ring->ring_state->prod_tail, 0); + qatomic_set(&ring->ring_state->cons_head, 0); + */ + ring->npages = npages; + ring->pages = g_malloc0(npages * sizeof(void *)); + + for (i = 0; i < npages; i++) { + if (!tbl[i]) { + rdma_error_report("npages=%d but tbl[%d] is NULL", npages, i); + continue; + } + + ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE); + if (!ring->pages[i]) { + rc = -ENOMEM; + rdma_error_report("Failed to map to page %d in ring %s", i, name); + goto out_free; + } + memset(ring->pages[i], 0, TARGET_PAGE_SIZE); + } + + goto out; + +out_free: + while (i--) { + rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE); + } + g_free(ring->pages); + +out: + return rc; +} + +void *pvrdma_ring_next_elem_read(PvrdmaRing *ring) +{ + unsigned int idx, offset; + const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail); + const uint32_t head = qatomic_read(&ring->ring_state->cons_head); + + if (tail & ~((ring->max_elems << 1) - 1) || + head & ~((ring->max_elems << 1) - 1) || + tail == head) { + trace_pvrdma_ring_next_elem_read_no_data(ring->name); + return NULL; + } + + idx = head & (ring->max_elems - 1); + offset = idx * ring->elem_sz; + return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE); +} + +void pvrdma_ring_read_inc(PvrdmaRing *ring) +{ + uint32_t idx = qatomic_read(&ring->ring_state->cons_head); + + idx = (idx + 1) & ((ring->max_elems << 1) - 1); + qatomic_set(&ring->ring_state->cons_head, idx); +} + +void *pvrdma_ring_next_elem_write(PvrdmaRing *ring) +{ + unsigned int idx, offset; + const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail); + const uint32_t head = qatomic_read(&ring->ring_state->cons_head); + + if (tail & ~((ring->max_elems << 1) - 1) || + head & ~((ring->max_elems << 1) - 1) || + tail == (head ^ ring->max_elems)) { + rdma_error_report("CQ is full"); + return NULL; + } + + idx = tail & (ring->max_elems - 1); + offset = idx * ring->elem_sz; + return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE); +} + +void pvrdma_ring_write_inc(PvrdmaRing *ring) +{ + uint32_t idx = qatomic_read(&ring->ring_state->prod_tail); + + idx = (idx + 1) & ((ring->max_elems << 1) - 1); + qatomic_set(&ring->ring_state->prod_tail, idx); +} + +void pvrdma_ring_free(PvrdmaRing *ring) +{ + if (!ring) { + return; + } + + if (!ring->pages) { + return; + } + + while (ring->npages--) { + rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages], + TARGET_PAGE_SIZE); + } + + g_free(ring->pages); + ring->pages = NULL; +} diff --git a/hw/rdma/vmw/pvrdma_dev_ring.h b/hw/rdma/vmw/pvrdma_dev_ring.h new file mode 100644 index 000000000..d231588ce --- /dev/null +++ b/hw/rdma/vmw/pvrdma_dev_ring.h @@ -0,0 +1,46 @@ +/* + * QEMU VMWARE paravirtual RDMA ring utilities + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef PVRDMA_DEV_RING_H +#define PVRDMA_DEV_RING_H + + +#define MAX_RING_NAME_SZ 32 + +typedef struct PvrdmaRingState { + int prod_tail; /* producer tail */ + int cons_head; /* consumer head */ +} PvrdmaRingState; + +typedef struct PvrdmaRing { + char name[MAX_RING_NAME_SZ]; + PCIDevice *dev; + uint32_t max_elems; + size_t elem_sz; + PvrdmaRingState *ring_state; /* used only for unmap */ + int npages; + void **pages; +} PvrdmaRing; + +int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev, + PvrdmaRingState *ring_state, uint32_t max_elems, + size_t elem_sz, dma_addr_t *tbl, uint32_t npages); +void *pvrdma_ring_next_elem_read(PvrdmaRing *ring); +void pvrdma_ring_read_inc(PvrdmaRing *ring); +void *pvrdma_ring_next_elem_write(PvrdmaRing *ring); +void pvrdma_ring_write_inc(PvrdmaRing *ring); +void pvrdma_ring_free(PvrdmaRing *ring); + +#endif diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c new file mode 100644 index 000000000..91206dbb8 --- /dev/null +++ b/hw/rdma/vmw/pvrdma_main.c @@ -0,0 +1,723 @@ +/* + * QEMU paravirtual RDMA + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_ids.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "cpu.h" +#include "trace.h" +#include "monitor/monitor.h" +#include "hw/rdma/rdma.h" + +#include "../rdma_rm.h" +#include "../rdma_backend.h" +#include "../rdma_utils.h" + +#include +#include "pvrdma.h" +#include "standard-headers/rdma/vmw_pvrdma-abi.h" +#include "sysemu/runstate.h" +#include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h" +#include "pvrdma_qp_ops.h" + +static Property pvrdma_dev_properties[] = { + DEFINE_PROP_STRING("netdev", PVRDMADev, backend_eth_device_name), + DEFINE_PROP_STRING("ibdev", PVRDMADev, backend_device_name), + DEFINE_PROP_UINT8("ibport", PVRDMADev, backend_port_num, 1), + DEFINE_PROP_UINT64("dev-caps-max-mr-size", PVRDMADev, dev_attr.max_mr_size, + MAX_MR_SIZE), + DEFINE_PROP_INT32("dev-caps-max-qp", PVRDMADev, dev_attr.max_qp, MAX_QP), + DEFINE_PROP_INT32("dev-caps-max-cq", PVRDMADev, dev_attr.max_cq, MAX_CQ), + DEFINE_PROP_INT32("dev-caps-max-mr", PVRDMADev, dev_attr.max_mr, MAX_MR), + DEFINE_PROP_INT32("dev-caps-max-pd", PVRDMADev, dev_attr.max_pd, MAX_PD), + DEFINE_PROP_INT32("dev-caps-qp-rd-atom", PVRDMADev, dev_attr.max_qp_rd_atom, + MAX_QP_RD_ATOM), + DEFINE_PROP_INT32("dev-caps-max-qp-init-rd-atom", PVRDMADev, + dev_attr.max_qp_init_rd_atom, MAX_QP_INIT_RD_ATOM), + DEFINE_PROP_INT32("dev-caps-max-ah", PVRDMADev, dev_attr.max_ah, MAX_AH), + DEFINE_PROP_INT32("dev-caps-max-srq", PVRDMADev, dev_attr.max_srq, MAX_SRQ), + DEFINE_PROP_CHR("mad-chardev", PVRDMADev, mad_chr), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pvrdma_format_statistics(RdmaProvider *obj, GString *buf) +{ + PVRDMADev *dev = PVRDMA_DEV(obj); + PCIDevice *pdev = PCI_DEVICE(dev); + + g_string_append_printf(buf, "%s, %x.%x\n", + pdev->name, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + g_string_append_printf(buf, "\tcommands : %" PRId64 "\n", + dev->stats.commands); + g_string_append_printf(buf, "\tregs_reads : %" PRId64 "\n", + dev->stats.regs_reads); + g_string_append_printf(buf, "\tregs_writes : %" PRId64 "\n", + dev->stats.regs_writes); + g_string_append_printf(buf, "\tuar_writes : %" PRId64 "\n", + dev->stats.uar_writes); + g_string_append_printf(buf, "\tinterrupts : %" PRId64 "\n", + dev->stats.interrupts); + rdma_format_device_counters(&dev->rdma_dev_res, buf); +} + +static void free_dev_ring(PCIDevice *pci_dev, PvrdmaRing *ring, + void *ring_state) +{ + pvrdma_ring_free(ring); + rdma_pci_dma_unmap(pci_dev, ring_state, TARGET_PAGE_SIZE); +} + +static int init_dev_ring(PvrdmaRing *ring, PvrdmaRingState **ring_state, + const char *name, PCIDevice *pci_dev, + dma_addr_t dir_addr, uint32_t num_pages) +{ + uint64_t *dir, *tbl; + int rc = 0; + + if (!num_pages) { + rdma_error_report("Ring pages count must be strictly positive"); + return -EINVAL; + } + + dir = rdma_pci_dma_map(pci_dev, dir_addr, TARGET_PAGE_SIZE); + if (!dir) { + rdma_error_report("Failed to map to page directory (ring %s)", name); + rc = -ENOMEM; + goto out; + } + tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); + if (!tbl) { + rdma_error_report("Failed to map to page table (ring %s)", name); + rc = -ENOMEM; + goto out_free_dir; + } + + *ring_state = rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE); + if (!*ring_state) { + rdma_error_report("Failed to map to ring state (ring %s)", name); + rc = -ENOMEM; + goto out_free_tbl; + } + /* RX ring is the second */ + (*ring_state)++; + rc = pvrdma_ring_init(ring, name, pci_dev, + (PvrdmaRingState *)*ring_state, + (num_pages - 1) * TARGET_PAGE_SIZE / + sizeof(struct pvrdma_cqne), + sizeof(struct pvrdma_cqne), + (dma_addr_t *)&tbl[1], (dma_addr_t)num_pages - 1); + if (rc) { + rc = -ENOMEM; + goto out_free_ring_state; + } + + goto out_free_tbl; + +out_free_ring_state: + rdma_pci_dma_unmap(pci_dev, *ring_state, TARGET_PAGE_SIZE); + +out_free_tbl: + rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE); + +out_free_dir: + rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE); + +out: + return rc; +} + +static void free_dsr(PVRDMADev *dev) +{ + PCIDevice *pci_dev = PCI_DEVICE(dev); + + if (!dev->dsr_info.dsr) { + return; + } + + free_dev_ring(pci_dev, &dev->dsr_info.async, + dev->dsr_info.async_ring_state); + + free_dev_ring(pci_dev, &dev->dsr_info.cq, dev->dsr_info.cq_ring_state); + + rdma_pci_dma_unmap(pci_dev, dev->dsr_info.req, + sizeof(union pvrdma_cmd_req)); + + rdma_pci_dma_unmap(pci_dev, dev->dsr_info.rsp, + sizeof(union pvrdma_cmd_resp)); + + rdma_pci_dma_unmap(pci_dev, dev->dsr_info.dsr, + sizeof(struct pvrdma_device_shared_region)); + + dev->dsr_info.dsr = NULL; +} + +static int load_dsr(PVRDMADev *dev) +{ + int rc = 0; + PCIDevice *pci_dev = PCI_DEVICE(dev); + DSRInfo *dsr_info; + struct pvrdma_device_shared_region *dsr; + + free_dsr(dev); + + /* Map to DSR */ + dev->dsr_info.dsr = rdma_pci_dma_map(pci_dev, dev->dsr_info.dma, + sizeof(struct pvrdma_device_shared_region)); + if (!dev->dsr_info.dsr) { + rdma_error_report("Failed to map to DSR"); + rc = -ENOMEM; + goto out; + } + + /* Shortcuts */ + dsr_info = &dev->dsr_info; + dsr = dsr_info->dsr; + + /* Map to command slot */ + dsr_info->req = rdma_pci_dma_map(pci_dev, dsr->cmd_slot_dma, + sizeof(union pvrdma_cmd_req)); + if (!dsr_info->req) { + rdma_error_report("Failed to map to command slot address"); + rc = -ENOMEM; + goto out_free_dsr; + } + + /* Map to response slot */ + dsr_info->rsp = rdma_pci_dma_map(pci_dev, dsr->resp_slot_dma, + sizeof(union pvrdma_cmd_resp)); + if (!dsr_info->rsp) { + rdma_error_report("Failed to map to response slot address"); + rc = -ENOMEM; + goto out_free_req; + } + + /* Map to CQ notification ring */ + rc = init_dev_ring(&dsr_info->cq, &dsr_info->cq_ring_state, "dev_cq", + pci_dev, dsr->cq_ring_pages.pdir_dma, + dsr->cq_ring_pages.num_pages); + if (rc) { + rc = -ENOMEM; + goto out_free_rsp; + } + + /* Map to event notification ring */ + rc = init_dev_ring(&dsr_info->async, &dsr_info->async_ring_state, + "dev_async", pci_dev, dsr->async_ring_pages.pdir_dma, + dsr->async_ring_pages.num_pages); + if (rc) { + rc = -ENOMEM; + goto out_free_rsp; + } + + goto out; + +out_free_rsp: + rdma_pci_dma_unmap(pci_dev, dsr_info->rsp, sizeof(union pvrdma_cmd_resp)); + +out_free_req: + rdma_pci_dma_unmap(pci_dev, dsr_info->req, sizeof(union pvrdma_cmd_req)); + +out_free_dsr: + rdma_pci_dma_unmap(pci_dev, dsr_info->dsr, + sizeof(struct pvrdma_device_shared_region)); + dsr_info->dsr = NULL; + +out: + return rc; +} + +static void init_dsr_dev_caps(PVRDMADev *dev) +{ + struct pvrdma_device_shared_region *dsr; + + if (dev->dsr_info.dsr == NULL) { + rdma_error_report("Can't initialized DSR"); + return; + } + + dsr = dev->dsr_info.dsr; + dsr->caps.fw_ver = PVRDMA_FW_VERSION; + dsr->caps.mode = PVRDMA_DEVICE_MODE_ROCE; + dsr->caps.gid_types |= PVRDMA_GID_TYPE_FLAG_ROCE_V1; + dsr->caps.max_uar = RDMA_BAR2_UAR_SIZE; + dsr->caps.max_mr_size = dev->dev_attr.max_mr_size; + dsr->caps.max_qp = dev->dev_attr.max_qp; + dsr->caps.max_qp_wr = dev->dev_attr.max_qp_wr; + dsr->caps.max_sge = dev->dev_attr.max_sge; + dsr->caps.max_cq = dev->dev_attr.max_cq; + dsr->caps.max_cqe = dev->dev_attr.max_cqe; + dsr->caps.max_mr = dev->dev_attr.max_mr; + dsr->caps.max_pd = dev->dev_attr.max_pd; + dsr->caps.max_ah = dev->dev_attr.max_ah; + dsr->caps.max_srq = dev->dev_attr.max_srq; + dsr->caps.max_srq_wr = dev->dev_attr.max_srq_wr; + dsr->caps.max_srq_sge = dev->dev_attr.max_srq_sge; + dsr->caps.gid_tbl_len = MAX_GIDS; + dsr->caps.sys_image_guid = 0; + dsr->caps.node_guid = dev->node_guid; + dsr->caps.phys_port_cnt = MAX_PORTS; + dsr->caps.max_pkeys = MAX_PKEYS; +} + +static void uninit_msix(PCIDevice *pdev, int used_vectors) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + int i; + + for (i = 0; i < used_vectors; i++) { + msix_vector_unuse(pdev, i); + } + + msix_uninit(pdev, &dev->msix, &dev->msix); +} + +static int init_msix(PCIDevice *pdev) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + int i; + int rc; + + rc = msix_init(pdev, RDMA_MAX_INTRS, &dev->msix, RDMA_MSIX_BAR_IDX, + RDMA_MSIX_TABLE, &dev->msix, RDMA_MSIX_BAR_IDX, + RDMA_MSIX_PBA, 0, NULL); + + if (rc < 0) { + rdma_error_report("Failed to initialize MSI-X"); + return rc; + } + + for (i = 0; i < RDMA_MAX_INTRS; i++) { + rc = msix_vector_use(PCI_DEVICE(dev), i); + if (rc < 0) { + rdma_error_report("Fail mark MSI-X vector %d", i); + uninit_msix(pdev, i); + return rc; + } + } + + return 0; +} + +static void pvrdma_fini(PCIDevice *pdev) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + + notifier_remove(&dev->shutdown_notifier); + + pvrdma_qp_ops_fini(); + + rdma_backend_stop(&dev->backend_dev); + + rdma_rm_fini(&dev->rdma_dev_res, &dev->backend_dev, + dev->backend_eth_device_name); + + rdma_backend_fini(&dev->backend_dev); + + free_dsr(dev); + + if (msix_enabled(pdev)) { + uninit_msix(pdev, RDMA_MAX_INTRS); + } + + rdma_info_report("Device %s %x.%x is down", pdev->name, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); +} + +static void pvrdma_stop(PVRDMADev *dev) +{ + rdma_backend_stop(&dev->backend_dev); +} + +static void pvrdma_start(PVRDMADev *dev) +{ + rdma_backend_start(&dev->backend_dev); +} + +static void activate_device(PVRDMADev *dev) +{ + pvrdma_start(dev); + set_reg_val(dev, PVRDMA_REG_ERR, 0); +} + +static int unquiesce_device(PVRDMADev *dev) +{ + return 0; +} + +static void reset_device(PVRDMADev *dev) +{ + pvrdma_stop(dev); +} + +static uint64_t pvrdma_regs_read(void *opaque, hwaddr addr, unsigned size) +{ + PVRDMADev *dev = opaque; + uint32_t val; + + dev->stats.regs_reads++; + + if (get_reg_val(dev, addr, &val)) { + rdma_error_report("Failed to read REG value from address 0x%x", + (uint32_t)addr); + return -EINVAL; + } + + trace_pvrdma_regs_read(addr, val); + + return val; +} + +static void pvrdma_regs_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PVRDMADev *dev = opaque; + + dev->stats.regs_writes++; + + if (set_reg_val(dev, addr, val)) { + rdma_error_report("Failed to set REG value, addr=0x%"PRIx64 ", val=0x%"PRIx64, + addr, val); + return; + } + + switch (addr) { + case PVRDMA_REG_DSRLOW: + trace_pvrdma_regs_write(addr, val, "DSRLOW", ""); + dev->dsr_info.dma = val; + break; + case PVRDMA_REG_DSRHIGH: + trace_pvrdma_regs_write(addr, val, "DSRHIGH", ""); + dev->dsr_info.dma |= val << 32; + load_dsr(dev); + init_dsr_dev_caps(dev); + break; + case PVRDMA_REG_CTL: + switch (val) { + case PVRDMA_DEVICE_CTL_ACTIVATE: + trace_pvrdma_regs_write(addr, val, "CTL", "ACTIVATE"); + activate_device(dev); + break; + case PVRDMA_DEVICE_CTL_UNQUIESCE: + trace_pvrdma_regs_write(addr, val, "CTL", "UNQUIESCE"); + unquiesce_device(dev); + break; + case PVRDMA_DEVICE_CTL_RESET: + trace_pvrdma_regs_write(addr, val, "CTL", "URESET"); + reset_device(dev); + break; + } + break; + case PVRDMA_REG_IMR: + trace_pvrdma_regs_write(addr, val, "INTR_MASK", ""); + dev->interrupt_mask = val; + break; + case PVRDMA_REG_REQUEST: + if (val == 0) { + trace_pvrdma_regs_write(addr, val, "REQUEST", ""); + pvrdma_exec_cmd(dev); + } + break; + default: + break; + } +} + +static const MemoryRegionOps regs_ops = { + .read = pvrdma_regs_read, + .write = pvrdma_regs_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = sizeof(uint32_t), + .max_access_size = sizeof(uint32_t), + }, +}; + +static uint64_t pvrdma_uar_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0xffffffff; +} + +static void pvrdma_uar_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PVRDMADev *dev = opaque; + + dev->stats.uar_writes++; + + switch (addr & 0xFFF) { /* Mask with 0xFFF as each UC gets page */ + case PVRDMA_UAR_QP_OFFSET: + if (val & PVRDMA_UAR_QP_SEND) { + trace_pvrdma_uar_write(addr, val, "QP", "SEND", + val & PVRDMA_UAR_HANDLE_MASK, 0); + pvrdma_qp_send(dev, val & PVRDMA_UAR_HANDLE_MASK); + } + if (val & PVRDMA_UAR_QP_RECV) { + trace_pvrdma_uar_write(addr, val, "QP", "RECV", + val & PVRDMA_UAR_HANDLE_MASK, 0); + pvrdma_qp_recv(dev, val & PVRDMA_UAR_HANDLE_MASK); + } + break; + case PVRDMA_UAR_CQ_OFFSET: + if (val & PVRDMA_UAR_CQ_ARM) { + trace_pvrdma_uar_write(addr, val, "CQ", "ARM", + val & PVRDMA_UAR_HANDLE_MASK, + !!(val & PVRDMA_UAR_CQ_ARM_SOL)); + rdma_rm_req_notify_cq(&dev->rdma_dev_res, + val & PVRDMA_UAR_HANDLE_MASK, + !!(val & PVRDMA_UAR_CQ_ARM_SOL)); + } + if (val & PVRDMA_UAR_CQ_ARM_SOL) { + trace_pvrdma_uar_write(addr, val, "CQ", "ARMSOL - not supported", 0, + 0); + } + if (val & PVRDMA_UAR_CQ_POLL) { + trace_pvrdma_uar_write(addr, val, "CQ", "POLL", + val & PVRDMA_UAR_HANDLE_MASK, 0); + pvrdma_cq_poll(&dev->rdma_dev_res, val & PVRDMA_UAR_HANDLE_MASK); + } + break; + case PVRDMA_UAR_SRQ_OFFSET: + if (val & PVRDMA_UAR_SRQ_RECV) { + trace_pvrdma_uar_write(addr, val, "QP", "SRQ", + val & PVRDMA_UAR_HANDLE_MASK, 0); + pvrdma_srq_recv(dev, val & PVRDMA_UAR_HANDLE_MASK); + } + break; + default: + rdma_error_report("Unsupported command, addr=0x%"PRIx64", val=0x%"PRIx64, + addr, val); + break; + } +} + +static const MemoryRegionOps uar_ops = { + .read = pvrdma_uar_read, + .write = pvrdma_uar_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = sizeof(uint32_t), + .max_access_size = sizeof(uint32_t), + }, +}; + +static void init_pci_config(PCIDevice *pdev) +{ + pdev->config[PCI_INTERRUPT_PIN] = 1; +} + +static void init_bars(PCIDevice *pdev) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + + /* BAR 0 - MSI-X */ + memory_region_init(&dev->msix, OBJECT(dev), "pvrdma-msix", + RDMA_BAR0_MSIX_SIZE); + pci_register_bar(pdev, RDMA_MSIX_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, + &dev->msix); + + /* BAR 1 - Registers */ + memset(&dev->regs_data, 0, sizeof(dev->regs_data)); + memory_region_init_io(&dev->regs, OBJECT(dev), ®s_ops, dev, + "pvrdma-regs", sizeof(dev->regs_data)); + pci_register_bar(pdev, RDMA_REG_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, + &dev->regs); + + /* BAR 2 - UAR */ + memset(&dev->uar_data, 0, sizeof(dev->uar_data)); + memory_region_init_io(&dev->uar, OBJECT(dev), &uar_ops, dev, "rdma-uar", + sizeof(dev->uar_data)); + pci_register_bar(pdev, RDMA_UAR_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, + &dev->uar); +} + +static void init_regs(PCIDevice *pdev) +{ + PVRDMADev *dev = PVRDMA_DEV(pdev); + + set_reg_val(dev, PVRDMA_REG_VERSION, PVRDMA_HW_VERSION); + set_reg_val(dev, PVRDMA_REG_ERR, 0xFFFF); +} + +static void init_dev_caps(PVRDMADev *dev) +{ + size_t pg_tbl_bytes = TARGET_PAGE_SIZE * + (TARGET_PAGE_SIZE / sizeof(uint64_t)); + size_t wr_sz = MAX(sizeof(struct pvrdma_sq_wqe_hdr), + sizeof(struct pvrdma_rq_wqe_hdr)); + + dev->dev_attr.max_qp_wr = pg_tbl_bytes / + (wr_sz + sizeof(struct pvrdma_sge) * + dev->dev_attr.max_sge) - TARGET_PAGE_SIZE; + /* First page is ring state ^^^^ */ + + dev->dev_attr.max_cqe = pg_tbl_bytes / sizeof(struct pvrdma_cqe) - + TARGET_PAGE_SIZE; /* First page is ring state */ + + dev->dev_attr.max_srq_wr = pg_tbl_bytes / + ((sizeof(struct pvrdma_rq_wqe_hdr) + + sizeof(struct pvrdma_sge)) * + dev->dev_attr.max_sge) - TARGET_PAGE_SIZE; +} + +static int pvrdma_check_ram_shared(Object *obj, void *opaque) +{ + bool *shared = opaque; + + if (object_dynamic_cast(obj, "memory-backend-ram")) { + *shared = object_property_get_bool(obj, "share", NULL); + } + + return 0; +} + +static void pvrdma_shutdown_notifier(Notifier *n, void *opaque) +{ + PVRDMADev *dev = container_of(n, PVRDMADev, shutdown_notifier); + PCIDevice *pci_dev = PCI_DEVICE(dev); + + pvrdma_fini(pci_dev); +} + +static void pvrdma_realize(PCIDevice *pdev, Error **errp) +{ + int rc = 0; + PVRDMADev *dev = PVRDMA_DEV(pdev); + Object *memdev_root; + bool ram_shared = false; + PCIDevice *func0; + + rdma_info_report("Initializing device %s %x.%x", pdev->name, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + + if (TARGET_PAGE_SIZE != qemu_real_host_page_size) { + error_setg(errp, "Target page size must be the same as host page size"); + return; + } + + func0 = pci_get_function_0(pdev); + /* Break if not vmxnet3 device in slot 0 */ + if (strcmp(object_get_typename(OBJECT(func0)), TYPE_VMXNET3)) { + error_setg(errp, "Device on %x.0 must be %s", PCI_SLOT(pdev->devfn), + TYPE_VMXNET3); + return; + } + dev->func0 = VMXNET3(func0); + + addrconf_addr_eui48((unsigned char *)&dev->node_guid, + (const char *)&dev->func0->conf.macaddr.a); + + memdev_root = object_resolve_path("/objects", NULL); + if (memdev_root) { + object_child_foreach(memdev_root, pvrdma_check_ram_shared, &ram_shared); + } + if (!ram_shared) { + error_setg(errp, "Only shared memory backed ram is supported"); + return; + } + + dev->dsr_info.dsr = NULL; + + init_pci_config(pdev); + + init_bars(pdev); + + init_regs(pdev); + + rc = init_msix(pdev); + if (rc) { + goto out; + } + + rc = rdma_backend_init(&dev->backend_dev, pdev, &dev->rdma_dev_res, + dev->backend_device_name, dev->backend_port_num, + &dev->dev_attr, &dev->mad_chr); + if (rc) { + goto out; + } + + init_dev_caps(dev); + + rc = rdma_rm_init(&dev->rdma_dev_res, &dev->dev_attr); + if (rc) { + goto out; + } + + rc = pvrdma_qp_ops_init(); + if (rc) { + goto out; + } + + memset(&dev->stats, 0, sizeof(dev->stats)); + + dev->shutdown_notifier.notify = pvrdma_shutdown_notifier; + qemu_register_shutdown_notifier(&dev->shutdown_notifier); + +#ifdef LEGACY_RDMA_REG_MR + rdma_info_report("Using legacy reg_mr"); +#else + rdma_info_report("Using iova reg_mr"); +#endif + +out: + if (rc) { + pvrdma_fini(pdev); + error_append_hint(errp, "Device failed to load\n"); + } +} + +static void pvrdma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + RdmaProviderClass *ir = RDMA_PROVIDER_CLASS(klass); + + k->realize = pvrdma_realize; + k->vendor_id = PCI_VENDOR_ID_VMWARE; + k->device_id = PCI_DEVICE_ID_VMWARE_PVRDMA; + k->revision = 0x00; + k->class_id = PCI_CLASS_NETWORK_OTHER; + + dc->desc = "RDMA Device"; + device_class_set_props(dc, pvrdma_dev_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + + ir->format_statistics = pvrdma_format_statistics; +} + +static const TypeInfo pvrdma_info = { + .name = PVRDMA_HW_NAME, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PVRDMADev), + .class_init = pvrdma_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { INTERFACE_RDMA_PROVIDER }, + { } + } +}; + +static void register_types(void) +{ + type_register_static(&pvrdma_info); +} + +type_init(register_types) diff --git a/hw/rdma/vmw/pvrdma_qp_ops.c b/hw/rdma/vmw/pvrdma_qp_ops.c new file mode 100644 index 000000000..8050287a6 --- /dev/null +++ b/hw/rdma/vmw/pvrdma_qp_ops.c @@ -0,0 +1,298 @@ +/* + * QEMU paravirtual RDMA - QP implementation + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "../rdma_utils.h" +#include "../rdma_rm.h" +#include "../rdma_backend.h" + +#include "trace.h" + +#include "pvrdma.h" +#include "standard-headers/rdma/vmw_pvrdma-abi.h" +#include "pvrdma_qp_ops.h" + +typedef struct CompHandlerCtx { + PVRDMADev *dev; + uint32_t cq_handle; + struct pvrdma_cqe cqe; +} CompHandlerCtx; + +/* Send Queue WQE */ +typedef struct PvrdmaSqWqe { + struct pvrdma_sq_wqe_hdr hdr; + struct pvrdma_sge sge[]; +} PvrdmaSqWqe; + +/* Recv Queue WQE */ +typedef struct PvrdmaRqWqe { + struct pvrdma_rq_wqe_hdr hdr; + struct pvrdma_sge sge[]; +} PvrdmaRqWqe; + +/* + * 1. Put CQE on send CQ ring + * 2. Put CQ number on dsr completion ring + * 3. Interrupt host + */ +static int pvrdma_post_cqe(PVRDMADev *dev, uint32_t cq_handle, + struct pvrdma_cqe *cqe, struct ibv_wc *wc) +{ + struct pvrdma_cqe *cqe1; + struct pvrdma_cqne *cqne; + PvrdmaRing *ring; + RdmaRmCQ *cq = rdma_rm_get_cq(&dev->rdma_dev_res, cq_handle); + + if (unlikely(!cq)) { + return -EINVAL; + } + + ring = (PvrdmaRing *)cq->opaque; + + /* Step #1: Put CQE on CQ ring */ + cqe1 = pvrdma_ring_next_elem_write(ring); + if (unlikely(!cqe1)) { + return -EINVAL; + } + + memset(cqe1, 0, sizeof(*cqe1)); + cqe1->wr_id = cqe->wr_id; + cqe1->qp = cqe->qp ? cqe->qp : wc->qp_num; + cqe1->opcode = cqe->opcode; + cqe1->status = wc->status; + cqe1->byte_len = wc->byte_len; + cqe1->src_qp = wc->src_qp; + cqe1->wc_flags = wc->wc_flags; + cqe1->vendor_err = wc->vendor_err; + + trace_pvrdma_post_cqe(cq_handle, cq->notify, cqe1->wr_id, cqe1->qp, + cqe1->opcode, cqe1->status, cqe1->byte_len, + cqe1->src_qp, cqe1->wc_flags, cqe1->vendor_err); + + pvrdma_ring_write_inc(ring); + + /* Step #2: Put CQ number on dsr completion ring */ + cqne = pvrdma_ring_next_elem_write(&dev->dsr_info.cq); + if (unlikely(!cqne)) { + return -EINVAL; + } + + cqne->info = cq_handle; + pvrdma_ring_write_inc(&dev->dsr_info.cq); + + if (cq->notify != CNT_CLEAR) { + if (cq->notify == CNT_ARM) { + cq->notify = CNT_CLEAR; + } + post_interrupt(dev, INTR_VEC_CMD_COMPLETION_Q); + } + + return 0; +} + +static void pvrdma_qp_ops_comp_handler(void *ctx, struct ibv_wc *wc) +{ + CompHandlerCtx *comp_ctx = (CompHandlerCtx *)ctx; + + pvrdma_post_cqe(comp_ctx->dev, comp_ctx->cq_handle, &comp_ctx->cqe, wc); + + g_free(ctx); +} + +static void complete_with_error(uint32_t vendor_err, void *ctx) +{ + struct ibv_wc wc = {}; + + wc.status = IBV_WC_GENERAL_ERR; + wc.vendor_err = vendor_err; + + pvrdma_qp_ops_comp_handler(ctx, &wc); +} + +void pvrdma_qp_ops_fini(void) +{ + rdma_backend_unregister_comp_handler(); +} + +int pvrdma_qp_ops_init(void) +{ + rdma_backend_register_comp_handler(pvrdma_qp_ops_comp_handler); + + return 0; +} + +void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle) +{ + RdmaRmQP *qp; + PvrdmaSqWqe *wqe; + PvrdmaRing *ring; + int sgid_idx; + union ibv_gid *sgid; + + qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle); + if (unlikely(!qp)) { + return; + } + + ring = (PvrdmaRing *)qp->opaque; + + wqe = (struct PvrdmaSqWqe *)pvrdma_ring_next_elem_read(ring); + while (wqe) { + CompHandlerCtx *comp_ctx; + + /* Prepare CQE */ + comp_ctx = g_malloc(sizeof(CompHandlerCtx)); + comp_ctx->dev = dev; + comp_ctx->cq_handle = qp->send_cq_handle; + comp_ctx->cqe.wr_id = wqe->hdr.wr_id; + comp_ctx->cqe.qp = qp_handle; + comp_ctx->cqe.opcode = IBV_WC_SEND; + + sgid = rdma_rm_get_gid(&dev->rdma_dev_res, wqe->hdr.wr.ud.av.gid_index); + if (!sgid) { + rdma_error_report("Failed to get gid for idx %d", + wqe->hdr.wr.ud.av.gid_index); + complete_with_error(VENDOR_ERR_INV_GID_IDX, comp_ctx); + continue; + } + + sgid_idx = rdma_rm_get_backend_gid_index(&dev->rdma_dev_res, + &dev->backend_dev, + wqe->hdr.wr.ud.av.gid_index); + if (sgid_idx <= 0) { + rdma_error_report("Failed to get bk sgid_idx for sgid_idx %d", + wqe->hdr.wr.ud.av.gid_index); + complete_with_error(VENDOR_ERR_INV_GID_IDX, comp_ctx); + continue; + } + + if (wqe->hdr.num_sge > dev->dev_attr.max_sge) { + rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge, + dev->dev_attr.max_sge); + complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx); + continue; + } + + rdma_backend_post_send(&dev->backend_dev, &qp->backend_qp, qp->qp_type, + (struct ibv_sge *)&wqe->sge[0], wqe->hdr.num_sge, + sgid_idx, sgid, + (union ibv_gid *)wqe->hdr.wr.ud.av.dgid, + wqe->hdr.wr.ud.remote_qpn, + wqe->hdr.wr.ud.remote_qkey, comp_ctx); + + pvrdma_ring_read_inc(ring); + + wqe = pvrdma_ring_next_elem_read(ring); + } +} + +void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle) +{ + RdmaRmQP *qp; + PvrdmaRqWqe *wqe; + PvrdmaRing *ring; + + qp = rdma_rm_get_qp(&dev->rdma_dev_res, qp_handle); + if (unlikely(!qp)) { + return; + } + + ring = &((PvrdmaRing *)qp->opaque)[1]; + + wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring); + while (wqe) { + CompHandlerCtx *comp_ctx; + + /* Prepare CQE */ + comp_ctx = g_malloc(sizeof(CompHandlerCtx)); + comp_ctx->dev = dev; + comp_ctx->cq_handle = qp->recv_cq_handle; + comp_ctx->cqe.wr_id = wqe->hdr.wr_id; + comp_ctx->cqe.qp = qp_handle; + comp_ctx->cqe.opcode = IBV_WC_RECV; + + if (wqe->hdr.num_sge > dev->dev_attr.max_sge) { + rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge, + dev->dev_attr.max_sge); + complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx); + continue; + } + + rdma_backend_post_recv(&dev->backend_dev, &qp->backend_qp, qp->qp_type, + (struct ibv_sge *)&wqe->sge[0], wqe->hdr.num_sge, + comp_ctx); + + pvrdma_ring_read_inc(ring); + + wqe = pvrdma_ring_next_elem_read(ring); + } +} + +void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle) +{ + RdmaRmSRQ *srq; + PvrdmaRqWqe *wqe; + PvrdmaRing *ring; + + srq = rdma_rm_get_srq(&dev->rdma_dev_res, srq_handle); + if (unlikely(!srq)) { + return; + } + + ring = (PvrdmaRing *)srq->opaque; + + wqe = (struct PvrdmaRqWqe *)pvrdma_ring_next_elem_read(ring); + while (wqe) { + CompHandlerCtx *comp_ctx; + + /* Prepare CQE */ + comp_ctx = g_malloc(sizeof(CompHandlerCtx)); + comp_ctx->dev = dev; + comp_ctx->cq_handle = srq->recv_cq_handle; + comp_ctx->cqe.wr_id = wqe->hdr.wr_id; + comp_ctx->cqe.qp = 0; + comp_ctx->cqe.opcode = IBV_WC_RECV; + + if (wqe->hdr.num_sge > dev->dev_attr.max_sge) { + rdma_error_report("Invalid num_sge=%d (max %d)", wqe->hdr.num_sge, + dev->dev_attr.max_sge); + complete_with_error(VENDOR_ERR_INV_NUM_SGE, comp_ctx); + continue; + } + + rdma_backend_post_srq_recv(&dev->backend_dev, &srq->backend_srq, + (struct ibv_sge *)&wqe->sge[0], + wqe->hdr.num_sge, + comp_ctx); + + pvrdma_ring_read_inc(ring); + + wqe = pvrdma_ring_next_elem_read(ring); + } + +} + +void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle) +{ + RdmaRmCQ *cq; + + cq = rdma_rm_get_cq(dev_res, cq_handle); + if (!cq) { + return; + } + + rdma_backend_poll_cq(dev_res, &cq->backend_cq); +} diff --git a/hw/rdma/vmw/pvrdma_qp_ops.h b/hw/rdma/vmw/pvrdma_qp_ops.h new file mode 100644 index 000000000..bf2b15c5c --- /dev/null +++ b/hw/rdma/vmw/pvrdma_qp_ops.h @@ -0,0 +1,28 @@ +/* + * QEMU VMWARE paravirtual RDMA QP Operations + * + * Copyright (C) 2018 Oracle + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * Yuval Shaia + * Marcel Apfelbaum + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef PVRDMA_QP_OPS_H +#define PVRDMA_QP_OPS_H + +#include "pvrdma.h" + +int pvrdma_qp_ops_init(void); +void pvrdma_qp_ops_fini(void); +void pvrdma_qp_send(PVRDMADev *dev, uint32_t qp_handle); +void pvrdma_qp_recv(PVRDMADev *dev, uint32_t qp_handle); +void pvrdma_srq_recv(PVRDMADev *dev, uint32_t srq_handle); +void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle); + +#endif diff --git a/hw/rdma/vmw/trace-events b/hw/rdma/vmw/trace-events new file mode 100644 index 000000000..a6c77e1e1 --- /dev/null +++ b/hw/rdma/vmw/trace-events @@ -0,0 +1,17 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# pvrdma_main.c +pvrdma_regs_read(uint64_t addr, uint64_t val) "pvrdma.regs[0x%"PRIx64"]=0x%"PRIx64 +pvrdma_regs_write(uint64_t addr, uint64_t val, const char *reg_name, const char *val_name) "pvrdma.regs[0x%"PRIx64"]=0x%"PRIx64" (%s %s)" +pvrdma_uar_write(uint64_t addr, uint64_t val, const char *reg_name, const char *val_name, int val1, int val2) "uar[0x%"PRIx64"]=0x%"PRIx64" (cls=%s, op=%s, obj=%d, val=%d)" + +# pvrdma_cmd.c +pvrdma_map_to_pdir_host_virt(void *vfirst, void *vremaped) "mremap %p -> %p" +pvrdma_map_to_pdir_next_page(int page_idx, void *vnext, void *vremaped) "mremap [%d] %p -> %p" +pvrdma_exec_cmd(int cmd, int err) "cmd=%d, err=%d" + +# pvrdma_dev_ring.c +pvrdma_ring_next_elem_read_no_data(char *ring_name) "pvrdma_ring %s is empty" + +# pvrdma_qp_ops.c +pvrdma_post_cqe(uint32_t cq_handle, int notify, uint64_t wr_id, uint64_t qpn, uint32_t op_code, uint32_t status, uint32_t byte_len, uint32_t src_qp, uint32_t wc_flags, uint32_t vendor_err) "cq_handle=%d, notify=%d, wr_id=0x%"PRIx64", qpn=0x%"PRIx64", opcode=%d, status=%d, byte_len=%d, src_qp=%d, wc_flags=%d, vendor_err=%d" diff --git a/hw/rdma/vmw/trace.h b/hw/rdma/vmw/trace.h new file mode 100644 index 000000000..3ebc9fb7a --- /dev/null +++ b/hw/rdma/vmw/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_rdma_vmw.h" diff --git a/hw/remote/Kconfig b/hw/remote/Kconfig new file mode 100644 index 000000000..08c16e235 --- /dev/null +++ b/hw/remote/Kconfig @@ -0,0 +1,4 @@ +config MULTIPROCESS + bool + depends on PCI && PCI_EXPRESS && KVM + select REMOTE_PCIHOST diff --git a/hw/remote/iohub.c b/hw/remote/iohub.c new file mode 100644 index 000000000..547d597f0 --- /dev/null +++ b/hw/remote/iohub.c @@ -0,0 +1,118 @@ +/* + * Remote IO Hub + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#include "hw/pci/pci.h" +#include "hw/pci/pci_ids.h" +#include "hw/pci/pci_bus.h" +#include "qemu/thread.h" +#include "hw/remote/machine.h" +#include "hw/remote/iohub.h" +#include "qemu/main-loop.h" + +void remote_iohub_init(RemoteIOHubState *iohub) +{ + int pirq; + + memset(&iohub->irqfds, 0, sizeof(iohub->irqfds)); + memset(&iohub->resamplefds, 0, sizeof(iohub->resamplefds)); + + for (pirq = 0; pirq < REMOTE_IOHUB_NB_PIRQS; pirq++) { + qemu_mutex_init(&iohub->irq_level_lock[pirq]); + iohub->irq_level[pirq] = 0; + event_notifier_init_fd(&iohub->irqfds[pirq], -1); + event_notifier_init_fd(&iohub->resamplefds[pirq], -1); + } +} + +void remote_iohub_finalize(RemoteIOHubState *iohub) +{ + int pirq; + + for (pirq = 0; pirq < REMOTE_IOHUB_NB_PIRQS; pirq++) { + qemu_set_fd_handler(event_notifier_get_fd(&iohub->resamplefds[pirq]), + NULL, NULL, NULL); + event_notifier_cleanup(&iohub->irqfds[pirq]); + event_notifier_cleanup(&iohub->resamplefds[pirq]); + qemu_mutex_destroy(&iohub->irq_level_lock[pirq]); + } +} + +int remote_iohub_map_irq(PCIDevice *pci_dev, int intx) +{ + return pci_dev->devfn; +} + +void remote_iohub_set_irq(void *opaque, int pirq, int level) +{ + RemoteIOHubState *iohub = opaque; + + assert(pirq >= 0); + assert(pirq < PCI_DEVFN_MAX); + + QEMU_LOCK_GUARD(&iohub->irq_level_lock[pirq]); + + if (level) { + if (++iohub->irq_level[pirq] == 1) { + event_notifier_set(&iohub->irqfds[pirq]); + } + } else if (iohub->irq_level[pirq] > 0) { + iohub->irq_level[pirq]--; + } +} + +static void intr_resample_handler(void *opaque) +{ + ResampleToken *token = opaque; + RemoteIOHubState *iohub = token->iohub; + int pirq, s; + + pirq = token->pirq; + + s = event_notifier_test_and_clear(&iohub->resamplefds[pirq]); + + assert(s >= 0); + + QEMU_LOCK_GUARD(&iohub->irq_level_lock[pirq]); + + if (iohub->irq_level[pirq]) { + event_notifier_set(&iohub->irqfds[pirq]); + } +} + +void process_set_irqfd_msg(PCIDevice *pci_dev, MPQemuMsg *msg) +{ + RemoteMachineState *machine = REMOTE_MACHINE(current_machine); + RemoteIOHubState *iohub = &machine->iohub; + int pirq, intx; + + intx = pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1; + + pirq = remote_iohub_map_irq(pci_dev, intx); + + if (event_notifier_get_fd(&iohub->irqfds[pirq]) != -1) { + qemu_set_fd_handler(event_notifier_get_fd(&iohub->resamplefds[pirq]), + NULL, NULL, NULL); + event_notifier_cleanup(&iohub->irqfds[pirq]); + event_notifier_cleanup(&iohub->resamplefds[pirq]); + memset(&iohub->token[pirq], 0, sizeof(ResampleToken)); + } + + event_notifier_init_fd(&iohub->irqfds[pirq], msg->fds[0]); + event_notifier_init_fd(&iohub->resamplefds[pirq], msg->fds[1]); + + iohub->token[pirq].iohub = iohub; + iohub->token[pirq].pirq = pirq; + + qemu_set_fd_handler(msg->fds[1], intr_resample_handler, NULL, + &iohub->token[pirq]); +} diff --git a/hw/remote/machine.c b/hw/remote/machine.c new file mode 100644 index 000000000..952105eab --- /dev/null +++ b/hw/remote/machine.c @@ -0,0 +1,79 @@ +/* + * Machine for remote device + * + * This machine type is used by the remote device process in multi-process + * QEMU. QEMU device models depend on parent busses, interrupt controllers, + * memory regions, etc. The remote machine type offers this environment so + * that QEMU device models can be used as remote devices. + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#include "hw/remote/machine.h" +#include "exec/memory.h" +#include "qapi/error.h" +#include "hw/pci/pci_host.h" +#include "hw/remote/iohub.h" + +static void remote_machine_init(MachineState *machine) +{ + MemoryRegion *system_memory, *system_io, *pci_memory; + RemoteMachineState *s = REMOTE_MACHINE(machine); + RemotePCIHost *rem_host; + PCIHostState *pci_host; + + system_memory = get_system_memory(); + system_io = get_system_io(); + + pci_memory = g_new(MemoryRegion, 1); + memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); + + rem_host = REMOTE_PCIHOST(qdev_new(TYPE_REMOTE_PCIHOST)); + + rem_host->mr_pci_mem = pci_memory; + rem_host->mr_sys_mem = system_memory; + rem_host->mr_sys_io = system_io; + + s->host = rem_host; + + object_property_add_child(OBJECT(s), "remote-pcihost", OBJECT(rem_host)); + memory_region_add_subregion_overlap(system_memory, 0x0, pci_memory, -1); + + qdev_realize(DEVICE(rem_host), sysbus_get_default(), &error_fatal); + + pci_host = PCI_HOST_BRIDGE(rem_host); + + remote_iohub_init(&s->iohub); + + pci_bus_irqs(pci_host->bus, remote_iohub_set_irq, remote_iohub_map_irq, + &s->iohub, REMOTE_IOHUB_NB_PIRQS); +} + +static void remote_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = remote_machine_init; + mc->desc = "Experimental remote machine"; +} + +static const TypeInfo remote_machine = { + .name = TYPE_REMOTE_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(RemoteMachineState), + .class_init = remote_machine_class_init, +}; + +static void remote_machine_register_types(void) +{ + type_register_static(&remote_machine); +} + +type_init(remote_machine_register_types); diff --git a/hw/remote/memory.c b/hw/remote/memory.c new file mode 100644 index 000000000..6e21ab1a4 --- /dev/null +++ b/hw/remote/memory.c @@ -0,0 +1,63 @@ +/* + * Memory manager for remote device + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#include "hw/remote/memory.h" +#include "exec/ram_addr.h" +#include "qapi/error.h" + +static void remote_sysmem_reset(void) +{ + MemoryRegion *sysmem, *subregion, *next; + + sysmem = get_system_memory(); + + QTAILQ_FOREACH_SAFE(subregion, &sysmem->subregions, subregions_link, next) { + if (subregion->ram) { + memory_region_del_subregion(sysmem, subregion); + object_unparent(OBJECT(subregion)); + } + } +} + +void remote_sysmem_reconfig(MPQemuMsg *msg, Error **errp) +{ + ERRP_GUARD(); + SyncSysmemMsg *sysmem_info = &msg->data.sync_sysmem; + MemoryRegion *sysmem, *subregion; + static unsigned int suffix; + int region; + + sysmem = get_system_memory(); + + remote_sysmem_reset(); + + for (region = 0; region < msg->num_fds; region++, suffix++) { + g_autofree char *name = g_strdup_printf("remote-mem-%u", suffix); + subregion = g_new(MemoryRegion, 1); + memory_region_init_ram_from_fd(subregion, NULL, + name, sysmem_info->sizes[region], + RAM_SHARED, msg->fds[region], + sysmem_info->offsets[region], + errp); + + if (*errp) { + g_free(subregion); + remote_sysmem_reset(); + return; + } + + memory_region_add_subregion(sysmem, sysmem_info->gpas[region], + subregion); + + } +} diff --git a/hw/remote/meson.build b/hw/remote/meson.build new file mode 100644 index 000000000..e6a557424 --- /dev/null +++ b/hw/remote/meson.build @@ -0,0 +1,13 @@ +remote_ss = ss.source_set() + +remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('machine.c')) +remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('mpqemu-link.c')) +remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('message.c')) +remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('remote-obj.c')) +remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('proxy.c')) +remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('iohub.c')) + +specific_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('memory.c')) +specific_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('proxy-memory-listener.c')) + +softmmu_ss.add_all(when: 'CONFIG_MULTIPROCESS', if_true: remote_ss) diff --git a/hw/remote/message.c b/hw/remote/message.c new file mode 100644 index 000000000..11d729845 --- /dev/null +++ b/hw/remote/message.c @@ -0,0 +1,230 @@ +/* + * Copyright © 2020, 2021 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL-v2, version 2 or later. + * + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#include "hw/remote/machine.h" +#include "io/channel.h" +#include "hw/remote/mpqemu-link.h" +#include "qapi/error.h" +#include "sysemu/runstate.h" +#include "hw/pci/pci.h" +#include "exec/memattrs.h" +#include "hw/remote/memory.h" +#include "hw/remote/iohub.h" +#include "sysemu/reset.h" + +static void process_config_write(QIOChannel *ioc, PCIDevice *dev, + MPQemuMsg *msg, Error **errp); +static void process_config_read(QIOChannel *ioc, PCIDevice *dev, + MPQemuMsg *msg, Error **errp); +static void process_bar_write(QIOChannel *ioc, MPQemuMsg *msg, Error **errp); +static void process_bar_read(QIOChannel *ioc, MPQemuMsg *msg, Error **errp); +static void process_device_reset_msg(QIOChannel *ioc, PCIDevice *dev, + Error **errp); + +void coroutine_fn mpqemu_remote_msg_loop_co(void *data) +{ + g_autofree RemoteCommDev *com = (RemoteCommDev *)data; + PCIDevice *pci_dev = NULL; + Error *local_err = NULL; + + assert(com->ioc); + + pci_dev = com->dev; + for (; !local_err;) { + MPQemuMsg msg = {0}; + + if (!mpqemu_msg_recv(&msg, com->ioc, &local_err)) { + break; + } + + if (!mpqemu_msg_valid(&msg)) { + error_setg(&local_err, "Received invalid message from proxy" + "in remote process pid="FMT_pid"", + getpid()); + break; + } + + switch (msg.cmd) { + case MPQEMU_CMD_PCI_CFGWRITE: + process_config_write(com->ioc, pci_dev, &msg, &local_err); + break; + case MPQEMU_CMD_PCI_CFGREAD: + process_config_read(com->ioc, pci_dev, &msg, &local_err); + break; + case MPQEMU_CMD_BAR_WRITE: + process_bar_write(com->ioc, &msg, &local_err); + break; + case MPQEMU_CMD_BAR_READ: + process_bar_read(com->ioc, &msg, &local_err); + break; + case MPQEMU_CMD_SYNC_SYSMEM: + remote_sysmem_reconfig(&msg, &local_err); + break; + case MPQEMU_CMD_SET_IRQFD: + process_set_irqfd_msg(pci_dev, &msg); + break; + case MPQEMU_CMD_DEVICE_RESET: + process_device_reset_msg(com->ioc, pci_dev, &local_err); + break; + default: + error_setg(&local_err, + "Unknown command (%d) received for device %s" + " (pid="FMT_pid")", + msg.cmd, DEVICE(pci_dev)->id, getpid()); + } + } + + if (local_err) { + error_report_err(local_err); + qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR); + } else { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } +} + +static void process_config_write(QIOChannel *ioc, PCIDevice *dev, + MPQemuMsg *msg, Error **errp) +{ + ERRP_GUARD(); + PciConfDataMsg *conf = (PciConfDataMsg *)&msg->data.pci_conf_data; + MPQemuMsg ret = { 0 }; + + if ((conf->addr + sizeof(conf->val)) > pci_config_size(dev)) { + error_setg(errp, "Bad address for PCI config write, pid "FMT_pid".", + getpid()); + ret.data.u64 = UINT64_MAX; + } else { + pci_default_write_config(dev, conf->addr, conf->val, conf->len); + } + + ret.cmd = MPQEMU_CMD_RET; + ret.size = sizeof(ret.data.u64); + + if (!mpqemu_msg_send(&ret, ioc, NULL)) { + error_prepend(errp, "Error returning code to proxy, pid "FMT_pid": ", + getpid()); + } +} + +static void process_config_read(QIOChannel *ioc, PCIDevice *dev, + MPQemuMsg *msg, Error **errp) +{ + ERRP_GUARD(); + PciConfDataMsg *conf = (PciConfDataMsg *)&msg->data.pci_conf_data; + MPQemuMsg ret = { 0 }; + + if ((conf->addr + sizeof(conf->val)) > pci_config_size(dev)) { + error_setg(errp, "Bad address for PCI config read, pid "FMT_pid".", + getpid()); + ret.data.u64 = UINT64_MAX; + } else { + ret.data.u64 = pci_default_read_config(dev, conf->addr, conf->len); + } + + ret.cmd = MPQEMU_CMD_RET; + ret.size = sizeof(ret.data.u64); + + if (!mpqemu_msg_send(&ret, ioc, NULL)) { + error_prepend(errp, "Error returning code to proxy, pid "FMT_pid": ", + getpid()); + } +} + +static void process_bar_write(QIOChannel *ioc, MPQemuMsg *msg, Error **errp) +{ + ERRP_GUARD(); + BarAccessMsg *bar_access = &msg->data.bar_access; + AddressSpace *as = + bar_access->memory ? &address_space_memory : &address_space_io; + MPQemuMsg ret = { 0 }; + MemTxResult res; + uint64_t val; + + if (!is_power_of_2(bar_access->size) || + (bar_access->size > sizeof(uint64_t))) { + ret.data.u64 = UINT64_MAX; + goto fail; + } + + val = cpu_to_le64(bar_access->val); + + res = address_space_rw(as, bar_access->addr, MEMTXATTRS_UNSPECIFIED, + (void *)&val, bar_access->size, true); + + if (res != MEMTX_OK) { + error_setg(errp, "Bad address %"PRIx64" for mem write, pid "FMT_pid".", + bar_access->addr, getpid()); + ret.data.u64 = -1; + } + +fail: + ret.cmd = MPQEMU_CMD_RET; + ret.size = sizeof(ret.data.u64); + + if (!mpqemu_msg_send(&ret, ioc, NULL)) { + error_prepend(errp, "Error returning code to proxy, pid "FMT_pid": ", + getpid()); + } +} + +static void process_bar_read(QIOChannel *ioc, MPQemuMsg *msg, Error **errp) +{ + ERRP_GUARD(); + BarAccessMsg *bar_access = &msg->data.bar_access; + MPQemuMsg ret = { 0 }; + AddressSpace *as; + MemTxResult res; + uint64_t val = 0; + + as = bar_access->memory ? &address_space_memory : &address_space_io; + + if (!is_power_of_2(bar_access->size) || + (bar_access->size > sizeof(uint64_t))) { + val = UINT64_MAX; + goto fail; + } + + res = address_space_rw(as, bar_access->addr, MEMTXATTRS_UNSPECIFIED, + (void *)&val, bar_access->size, false); + + if (res != MEMTX_OK) { + error_setg(errp, "Bad address %"PRIx64" for mem read, pid "FMT_pid".", + bar_access->addr, getpid()); + val = UINT64_MAX; + } + +fail: + ret.cmd = MPQEMU_CMD_RET; + ret.data.u64 = le64_to_cpu(val); + ret.size = sizeof(ret.data.u64); + + if (!mpqemu_msg_send(&ret, ioc, NULL)) { + error_prepend(errp, "Error returning code to proxy, pid "FMT_pid": ", + getpid()); + } +} + +static void process_device_reset_msg(QIOChannel *ioc, PCIDevice *dev, + Error **errp) +{ + DeviceClass *dc = DEVICE_GET_CLASS(dev); + DeviceState *s = DEVICE(dev); + MPQemuMsg ret = { 0 }; + + if (dc->reset) { + dc->reset(s); + } + + ret.cmd = MPQEMU_CMD_RET; + + mpqemu_msg_send(&ret, ioc, errp); +} diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c new file mode 100644 index 000000000..7e841820e --- /dev/null +++ b/hw/remote/mpqemu-link.c @@ -0,0 +1,264 @@ +/* + * Communication channel between QEMU and remote device process + * + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#include "qemu/module.h" +#include "hw/remote/mpqemu-link.h" +#include "qapi/error.h" +#include "qemu/iov.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "io/channel.h" +#include "sysemu/iothread.h" +#include "trace.h" + +/* + * Send message over the ioc QIOChannel. + * This function is safe to call from: + * - main loop in co-routine context. Will block the main loop if not in + * co-routine context; + * - vCPU thread with no co-routine context and if the channel is not part + * of the main loop handling; + * - IOThread within co-routine context, outside of co-routine context + * will block IOThread; + * Returns true if no errors were encountered, false otherwise. + */ +bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) +{ + bool iolock = qemu_mutex_iothread_locked(); + bool iothread = qemu_in_iothread(); + struct iovec send[2] = {}; + int *fds = NULL; + size_t nfds = 0; + bool ret = false; + + send[0].iov_base = msg; + send[0].iov_len = MPQEMU_MSG_HDR_SIZE; + + send[1].iov_base = (void *)&msg->data; + send[1].iov_len = msg->size; + + if (msg->num_fds) { + nfds = msg->num_fds; + fds = msg->fds; + } + + /* + * Dont use in IOThread out of co-routine context as + * it will block IOThread. + */ + assert(qemu_in_coroutine() || !iothread); + + /* + * Skip unlocking/locking iothread lock when the IOThread is running + * in co-routine context. Co-routine context is asserted above + * for IOThread case. + * Also skip lock handling while in a co-routine in the main context. + */ + if (iolock && !iothread && !qemu_in_coroutine()) { + qemu_mutex_unlock_iothread(); + } + + if (!qio_channel_writev_full_all(ioc, send, G_N_ELEMENTS(send), + fds, nfds, errp)) { + ret = true; + } else { + trace_mpqemu_send_io_error(msg->cmd, msg->size, nfds); + } + + if (iolock && !iothread && !qemu_in_coroutine()) { + /* See above comment why skip locking here. */ + qemu_mutex_lock_iothread(); + } + + return ret; +} + +/* + * Read message from the ioc QIOChannel. + * This function is safe to call from: + * - From main loop in co-routine context. Will block the main loop if not in + * co-routine context; + * - From vCPU thread with no co-routine context and if the channel is not part + * of the main loop handling; + * - From IOThread within co-routine context, outside of co-routine context + * will block IOThread; + */ +static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds, + size_t *nfds, Error **errp) +{ + struct iovec iov = { .iov_base = buf, .iov_len = len }; + bool iolock = qemu_mutex_iothread_locked(); + bool iothread = qemu_in_iothread(); + int ret = -1; + + /* + * Dont use in IOThread out of co-routine context as + * it will block IOThread. + */ + assert(qemu_in_coroutine() || !iothread); + + if (iolock && !iothread && !qemu_in_coroutine()) { + qemu_mutex_unlock_iothread(); + } + + ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp); + + if (iolock && !iothread && !qemu_in_coroutine()) { + qemu_mutex_lock_iothread(); + } + + return (ret <= 0) ? ret : iov.iov_len; +} + +bool mpqemu_msg_recv(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) +{ + ERRP_GUARD(); + g_autofree int *fds = NULL; + size_t nfds = 0; + ssize_t len; + bool ret = false; + + len = mpqemu_read(ioc, msg, MPQEMU_MSG_HDR_SIZE, &fds, &nfds, errp); + if (len <= 0) { + goto fail; + } else if (len != MPQEMU_MSG_HDR_SIZE) { + error_setg(errp, "Message header corrupted"); + goto fail; + } + + if (msg->size > sizeof(msg->data)) { + error_setg(errp, "Invalid size for message"); + goto fail; + } + + if (!msg->size) { + goto copy_fds; + } + + len = mpqemu_read(ioc, &msg->data, msg->size, NULL, NULL, errp); + if (len <= 0) { + goto fail; + } + if (len != msg->size) { + error_setg(errp, "Unable to read full message"); + goto fail; + } + +copy_fds: + msg->num_fds = nfds; + if (nfds > G_N_ELEMENTS(msg->fds)) { + error_setg(errp, + "Overflow error: received %zu fds, more than max of %d fds", + nfds, REMOTE_MAX_FDS); + goto fail; + } + if (nfds) { + memcpy(msg->fds, fds, nfds * sizeof(int)); + } + + ret = true; + +fail: + if (*errp) { + trace_mpqemu_recv_io_error(msg->cmd, msg->size, nfds); + } + while (*errp && nfds) { + close(fds[nfds - 1]); + nfds--; + } + + return ret; +} + +/* + * Send msg and wait for a reply with command code RET_MSG. + * Returns the message received of size u64 or UINT64_MAX + * on error. + * Called from VCPU thread in non-coroutine context. + * Used by the Proxy object to communicate to remote processes. + */ +uint64_t mpqemu_msg_send_and_await_reply(MPQemuMsg *msg, PCIProxyDev *pdev, + Error **errp) +{ + MPQemuMsg msg_reply = {0}; + uint64_t ret = UINT64_MAX; + + assert(!qemu_in_coroutine()); + + QEMU_LOCK_GUARD(&pdev->io_mutex); + if (!mpqemu_msg_send(msg, pdev->ioc, errp)) { + return ret; + } + + if (!mpqemu_msg_recv(&msg_reply, pdev->ioc, errp)) { + return ret; + } + + if (!mpqemu_msg_valid(&msg_reply) || msg_reply.cmd != MPQEMU_CMD_RET) { + error_setg(errp, "ERROR: Invalid reply received for command %d", + msg->cmd); + return ret; + } + + return msg_reply.data.u64; +} + +bool mpqemu_msg_valid(MPQemuMsg *msg) +{ + if (msg->cmd >= MPQEMU_CMD_MAX || msg->cmd < 0) { + return false; + } + + /* Verify FDs. */ + if (msg->num_fds >= REMOTE_MAX_FDS) { + return false; + } + + if (msg->num_fds > 0) { + for (int i = 0; i < msg->num_fds; i++) { + if (fcntl(msg->fds[i], F_GETFL) == -1) { + return false; + } + } + } + + /* Verify message specific fields. */ + switch (msg->cmd) { + case MPQEMU_CMD_SYNC_SYSMEM: + if (msg->num_fds == 0 || msg->size != sizeof(SyncSysmemMsg)) { + return false; + } + break; + case MPQEMU_CMD_PCI_CFGWRITE: + case MPQEMU_CMD_PCI_CFGREAD: + if (msg->size != sizeof(PciConfDataMsg)) { + return false; + } + break; + case MPQEMU_CMD_BAR_WRITE: + case MPQEMU_CMD_BAR_READ: + if ((msg->size != sizeof(BarAccessMsg)) || (msg->num_fds != 0)) { + return false; + } + break; + case MPQEMU_CMD_SET_IRQFD: + if (msg->size || (msg->num_fds != 2)) { + return false; + } + break; + default: + break; + } + + return true; +} diff --git a/hw/remote/proxy-memory-listener.c b/hw/remote/proxy-memory-listener.c new file mode 100644 index 000000000..882c9b485 --- /dev/null +++ b/hw/remote/proxy-memory-listener.c @@ -0,0 +1,226 @@ +/* + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#include "qemu/compiler.h" +#include "qemu/int128.h" +#include "qemu/range.h" +#include "exec/memory.h" +#include "exec/cpu-common.h" +#include "exec/ram_addr.h" +#include "qapi/error.h" +#include "hw/remote/mpqemu-link.h" +#include "hw/remote/proxy-memory-listener.h" + +/* + * TODO: get_fd_from_hostaddr(), proxy_mrs_can_merge() and + * proxy_memory_listener_commit() defined below perform tasks similar to the + * functions defined in vhost-user.c. These functions are good candidates + * for refactoring. + * + */ + +static void proxy_memory_listener_reset(MemoryListener *listener) +{ + ProxyMemoryListener *proxy_listener = container_of(listener, + ProxyMemoryListener, + listener); + int mrs; + + for (mrs = 0; mrs < proxy_listener->n_mr_sections; mrs++) { + memory_region_unref(proxy_listener->mr_sections[mrs].mr); + } + + g_free(proxy_listener->mr_sections); + proxy_listener->mr_sections = NULL; + proxy_listener->n_mr_sections = 0; +} + +static int get_fd_from_hostaddr(uint64_t host, ram_addr_t *offset) +{ + MemoryRegion *mr; + ram_addr_t off; + + /** + * Assumes that the host address is a valid address as it's + * coming from the MemoryListener system. In the case host + * address is not valid, the following call would return + * the default subregion of "system_memory" region, and + * not NULL. So it's not possible to check for NULL here. + */ + mr = memory_region_from_host((void *)(uintptr_t)host, &off); + + if (offset) { + *offset = off; + } + + return memory_region_get_fd(mr); +} + +static bool proxy_mrs_can_merge(uint64_t host, uint64_t prev_host, size_t size) +{ + if (((prev_host + size) != host)) { + return false; + } + + if (get_fd_from_hostaddr(host, NULL) != + get_fd_from_hostaddr(prev_host, NULL)) { + return false; + } + + return true; +} + +static bool try_merge(ProxyMemoryListener *proxy_listener, + MemoryRegionSection *section) +{ + uint64_t mrs_size, mrs_gpa, mrs_page; + MemoryRegionSection *prev_sec; + bool merged = false; + uintptr_t mrs_host; + RAMBlock *mrs_rb; + + if (!proxy_listener->n_mr_sections) { + return false; + } + + mrs_rb = section->mr->ram_block; + mrs_page = (uint64_t)qemu_ram_pagesize(mrs_rb); + mrs_size = int128_get64(section->size); + mrs_gpa = section->offset_within_address_space; + mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + + section->offset_within_region; + + if (get_fd_from_hostaddr(mrs_host, NULL) < 0) { + return true; + } + + mrs_host = mrs_host & ~(mrs_page - 1); + mrs_gpa = mrs_gpa & ~(mrs_page - 1); + mrs_size = ROUND_UP(mrs_size, mrs_page); + + prev_sec = proxy_listener->mr_sections + + (proxy_listener->n_mr_sections - 1); + uint64_t prev_gpa_start = prev_sec->offset_within_address_space; + uint64_t prev_size = int128_get64(prev_sec->size); + uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size); + uint64_t prev_host_start = + (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + + prev_sec->offset_within_region; + uint64_t prev_host_end = range_get_last(prev_host_start, prev_size); + + if (mrs_gpa <= (prev_gpa_end + 1)) { + g_assert(mrs_gpa > prev_gpa_start); + + if ((section->mr == prev_sec->mr) && + proxy_mrs_can_merge(mrs_host, prev_host_start, + (mrs_gpa - prev_gpa_start))) { + uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size); + merged = true; + prev_sec->offset_within_address_space = + MIN(prev_gpa_start, mrs_gpa); + prev_sec->offset_within_region = + MIN(prev_host_start, mrs_host) - + (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr); + prev_sec->size = int128_make64(max_end - MIN(prev_host_start, + mrs_host)); + } + } + + return merged; +} + +static void proxy_memory_listener_region_addnop(MemoryListener *listener, + MemoryRegionSection *section) +{ + ProxyMemoryListener *proxy_listener = container_of(listener, + ProxyMemoryListener, + listener); + + if (!memory_region_is_ram(section->mr) || + memory_region_is_rom(section->mr)) { + return; + } + + if (try_merge(proxy_listener, section)) { + return; + } + + ++proxy_listener->n_mr_sections; + proxy_listener->mr_sections = g_renew(MemoryRegionSection, + proxy_listener->mr_sections, + proxy_listener->n_mr_sections); + proxy_listener->mr_sections[proxy_listener->n_mr_sections - 1] = *section; + proxy_listener->mr_sections[proxy_listener->n_mr_sections - 1].fv = NULL; + memory_region_ref(section->mr); +} + +static void proxy_memory_listener_commit(MemoryListener *listener) +{ + ProxyMemoryListener *proxy_listener = container_of(listener, + ProxyMemoryListener, + listener); + MPQemuMsg msg; + MemoryRegionSection *section; + ram_addr_t offset; + uintptr_t host_addr; + int region; + Error *local_err = NULL; + + memset(&msg, 0, sizeof(MPQemuMsg)); + + msg.cmd = MPQEMU_CMD_SYNC_SYSMEM; + msg.num_fds = proxy_listener->n_mr_sections; + msg.size = sizeof(SyncSysmemMsg); + if (msg.num_fds > REMOTE_MAX_FDS) { + error_report("Number of fds is more than %d", REMOTE_MAX_FDS); + return; + } + + for (region = 0; region < proxy_listener->n_mr_sections; region++) { + section = &proxy_listener->mr_sections[region]; + msg.data.sync_sysmem.gpas[region] = + section->offset_within_address_space; + msg.data.sync_sysmem.sizes[region] = int128_get64(section->size); + host_addr = (uintptr_t)memory_region_get_ram_ptr(section->mr) + + section->offset_within_region; + msg.fds[region] = get_fd_from_hostaddr(host_addr, &offset); + msg.data.sync_sysmem.offsets[region] = offset; + } + if (!mpqemu_msg_send(&msg, proxy_listener->ioc, &local_err)) { + error_report_err(local_err); + } +} + +void proxy_memory_listener_deconfigure(ProxyMemoryListener *proxy_listener) +{ + memory_listener_unregister(&proxy_listener->listener); + + proxy_memory_listener_reset(&proxy_listener->listener); +} + +void proxy_memory_listener_configure(ProxyMemoryListener *proxy_listener, + QIOChannel *ioc) +{ + proxy_listener->n_mr_sections = 0; + proxy_listener->mr_sections = NULL; + + proxy_listener->ioc = ioc; + + proxy_listener->listener.begin = proxy_memory_listener_reset; + proxy_listener->listener.commit = proxy_memory_listener_commit; + proxy_listener->listener.region_add = proxy_memory_listener_region_addnop; + proxy_listener->listener.region_nop = proxy_memory_listener_region_addnop; + proxy_listener->listener.priority = 10; + proxy_listener->listener.name = "proxy"; + + memory_listener_register(&proxy_listener->listener, + &address_space_memory); +} diff --git a/hw/remote/proxy.c b/hw/remote/proxy.c new file mode 100644 index 000000000..bad164299 --- /dev/null +++ b/hw/remote/proxy.c @@ -0,0 +1,387 @@ +/* + * Copyright © 2018, 2021 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#include "hw/remote/proxy.h" +#include "hw/pci/pci.h" +#include "qapi/error.h" +#include "io/channel-util.h" +#include "hw/qdev-properties.h" +#include "monitor/monitor.h" +#include "migration/blocker.h" +#include "qemu/sockets.h" +#include "hw/remote/mpqemu-link.h" +#include "qemu/error-report.h" +#include "hw/remote/proxy-memory-listener.h" +#include "qom/object.h" +#include "qemu/event_notifier.h" +#include "sysemu/kvm.h" +#include "util/event_notifier-posix.c" + +static void probe_pci_info(PCIDevice *dev, Error **errp); +static void proxy_device_reset(DeviceState *dev); + +static void proxy_intx_update(PCIDevice *pci_dev) +{ + PCIProxyDev *dev = PCI_PROXY_DEV(pci_dev); + PCIINTxRoute route; + int pin = pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1; + + if (dev->virq != -1) { + kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &dev->intr, dev->virq); + dev->virq = -1; + } + + route = pci_device_route_intx_to_irq(pci_dev, pin); + + dev->virq = route.irq; + + if (dev->virq != -1) { + kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &dev->intr, + &dev->resample, dev->virq); + } +} + +static void setup_irqfd(PCIProxyDev *dev) +{ + PCIDevice *pci_dev = PCI_DEVICE(dev); + MPQemuMsg msg; + Error *local_err = NULL; + + event_notifier_init(&dev->intr, 0); + event_notifier_init(&dev->resample, 0); + + memset(&msg, 0, sizeof(MPQemuMsg)); + msg.cmd = MPQEMU_CMD_SET_IRQFD; + msg.num_fds = 2; + msg.fds[0] = event_notifier_get_fd(&dev->intr); + msg.fds[1] = event_notifier_get_fd(&dev->resample); + msg.size = 0; + + if (!mpqemu_msg_send(&msg, dev->ioc, &local_err)) { + error_report_err(local_err); + } + + dev->virq = -1; + + proxy_intx_update(pci_dev); + + pci_device_set_intx_routing_notifier(pci_dev, proxy_intx_update); +} + +static void pci_proxy_dev_realize(PCIDevice *device, Error **errp) +{ + ERRP_GUARD(); + PCIProxyDev *dev = PCI_PROXY_DEV(device); + uint8_t *pci_conf = device->config; + int fd; + + if (!dev->fd) { + error_setg(errp, "fd parameter not specified for %s", + DEVICE(device)->id); + return; + } + + fd = monitor_fd_param(monitor_cur(), dev->fd, errp); + if (fd == -1) { + error_prepend(errp, "proxy: unable to parse fd %s: ", dev->fd); + return; + } + + if (!fd_is_socket(fd)) { + error_setg(errp, "proxy: fd %d is not a socket", fd); + close(fd); + return; + } + + dev->ioc = qio_channel_new_fd(fd, errp); + if (!dev->ioc) { + close(fd); + return; + } + + error_setg(&dev->migration_blocker, "%s does not support migration", + TYPE_PCI_PROXY_DEV); + if (migrate_add_blocker(dev->migration_blocker, errp) < 0) { + error_free(dev->migration_blocker); + object_unref(dev->ioc); + return; + } + + qemu_mutex_init(&dev->io_mutex); + qio_channel_set_blocking(dev->ioc, true, NULL); + + pci_conf[PCI_LATENCY_TIMER] = 0xff; + pci_conf[PCI_INTERRUPT_PIN] = 0x01; + + proxy_memory_listener_configure(&dev->proxy_listener, dev->ioc); + + setup_irqfd(dev); + + probe_pci_info(PCI_DEVICE(dev), errp); +} + +static void pci_proxy_dev_exit(PCIDevice *pdev) +{ + PCIProxyDev *dev = PCI_PROXY_DEV(pdev); + + if (dev->ioc) { + qio_channel_close(dev->ioc, NULL); + } + + migrate_del_blocker(dev->migration_blocker); + + error_free(dev->migration_blocker); + + proxy_memory_listener_deconfigure(&dev->proxy_listener); + + event_notifier_cleanup(&dev->intr); + event_notifier_cleanup(&dev->resample); +} + +static void config_op_send(PCIProxyDev *pdev, uint32_t addr, uint32_t *val, + int len, unsigned int op) +{ + MPQemuMsg msg = { 0 }; + uint64_t ret = -EINVAL; + Error *local_err = NULL; + + msg.cmd = op; + msg.data.pci_conf_data.addr = addr; + msg.data.pci_conf_data.val = (op == MPQEMU_CMD_PCI_CFGWRITE) ? *val : 0; + msg.data.pci_conf_data.len = len; + msg.size = sizeof(PciConfDataMsg); + + ret = mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err); + if (local_err) { + error_report_err(local_err); + } + + if (ret == UINT64_MAX) { + error_report("Failed to perform PCI config %s operation", + (op == MPQEMU_CMD_PCI_CFGREAD) ? "READ" : "WRITE"); + } + + if (op == MPQEMU_CMD_PCI_CFGREAD) { + *val = (uint32_t)ret; + } +} + +static uint32_t pci_proxy_read_config(PCIDevice *d, uint32_t addr, int len) +{ + uint32_t val; + + config_op_send(PCI_PROXY_DEV(d), addr, &val, len, MPQEMU_CMD_PCI_CFGREAD); + + return val; +} + +static void pci_proxy_write_config(PCIDevice *d, uint32_t addr, uint32_t val, + int len) +{ + /* + * Some of the functions access the copy of remote device's PCI config + * space which is cached in the proxy device. Therefore, maintain + * it updated. + */ + pci_default_write_config(d, addr, val, len); + + config_op_send(PCI_PROXY_DEV(d), addr, &val, len, MPQEMU_CMD_PCI_CFGWRITE); +} + +static Property proxy_properties[] = { + DEFINE_PROP_STRING("fd", PCIProxyDev, fd), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pci_proxy_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_proxy_dev_realize; + k->exit = pci_proxy_dev_exit; + k->config_read = pci_proxy_read_config; + k->config_write = pci_proxy_write_config; + + dc->reset = proxy_device_reset; + + device_class_set_props(dc, proxy_properties); +} + +static const TypeInfo pci_proxy_dev_type_info = { + .name = TYPE_PCI_PROXY_DEV, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIProxyDev), + .class_init = pci_proxy_dev_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void pci_proxy_dev_register_types(void) +{ + type_register_static(&pci_proxy_dev_type_info); +} + +type_init(pci_proxy_dev_register_types) + +static void send_bar_access_msg(PCIProxyDev *pdev, MemoryRegion *mr, + bool write, hwaddr addr, uint64_t *val, + unsigned size, bool memory) +{ + MPQemuMsg msg = { 0 }; + long ret = -EINVAL; + Error *local_err = NULL; + + msg.size = sizeof(BarAccessMsg); + msg.data.bar_access.addr = mr->addr + addr; + msg.data.bar_access.size = size; + msg.data.bar_access.memory = memory; + + if (write) { + msg.cmd = MPQEMU_CMD_BAR_WRITE; + msg.data.bar_access.val = *val; + } else { + msg.cmd = MPQEMU_CMD_BAR_READ; + } + + ret = mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err); + if (local_err) { + error_report_err(local_err); + } + + if (!write) { + *val = ret; + } +} + +static void proxy_bar_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + ProxyMemoryRegion *pmr = opaque; + + send_bar_access_msg(pmr->dev, &pmr->mr, true, addr, &val, size, + pmr->memory); +} + +static uint64_t proxy_bar_read(void *opaque, hwaddr addr, unsigned size) +{ + ProxyMemoryRegion *pmr = opaque; + uint64_t val; + + send_bar_access_msg(pmr->dev, &pmr->mr, false, addr, &val, size, + pmr->memory); + + return val; +} + +const MemoryRegionOps proxy_mr_ops = { + .read = proxy_bar_read, + .write = proxy_bar_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static void probe_pci_info(PCIDevice *dev, Error **errp) +{ + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); + uint32_t orig_val, new_val, base_class, val; + PCIProxyDev *pdev = PCI_PROXY_DEV(dev); + DeviceClass *dc = DEVICE_CLASS(pc); + uint8_t type; + int i, size; + + config_op_send(pdev, PCI_VENDOR_ID, &val, 2, MPQEMU_CMD_PCI_CFGREAD); + pc->vendor_id = (uint16_t)val; + + config_op_send(pdev, PCI_DEVICE_ID, &val, 2, MPQEMU_CMD_PCI_CFGREAD); + pc->device_id = (uint16_t)val; + + config_op_send(pdev, PCI_CLASS_DEVICE, &val, 2, MPQEMU_CMD_PCI_CFGREAD); + pc->class_id = (uint16_t)val; + + config_op_send(pdev, PCI_SUBSYSTEM_ID, &val, 2, MPQEMU_CMD_PCI_CFGREAD); + pc->subsystem_id = (uint16_t)val; + + base_class = pc->class_id >> 4; + switch (base_class) { + case PCI_BASE_CLASS_BRIDGE: + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + break; + case PCI_BASE_CLASS_STORAGE: + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + break; + case PCI_BASE_CLASS_NETWORK: + case PCI_BASE_CLASS_WIRELESS: + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + break; + case PCI_BASE_CLASS_INPUT: + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + break; + case PCI_BASE_CLASS_DISPLAY: + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + break; + case PCI_BASE_CLASS_PROCESSOR: + set_bit(DEVICE_CATEGORY_CPU, dc->categories); + break; + default: + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + break; + } + + for (i = 0; i < PCI_NUM_REGIONS; i++) { + config_op_send(pdev, PCI_BASE_ADDRESS_0 + (4 * i), &orig_val, 4, + MPQEMU_CMD_PCI_CFGREAD); + new_val = 0xffffffff; + config_op_send(pdev, PCI_BASE_ADDRESS_0 + (4 * i), &new_val, 4, + MPQEMU_CMD_PCI_CFGWRITE); + config_op_send(pdev, PCI_BASE_ADDRESS_0 + (4 * i), &new_val, 4, + MPQEMU_CMD_PCI_CFGREAD); + size = (~(new_val & 0xFFFFFFF0)) + 1; + config_op_send(pdev, PCI_BASE_ADDRESS_0 + (4 * i), &orig_val, 4, + MPQEMU_CMD_PCI_CFGWRITE); + type = (new_val & 0x1) ? + PCI_BASE_ADDRESS_SPACE_IO : PCI_BASE_ADDRESS_SPACE_MEMORY; + + if (size) { + g_autofree char *name = g_strdup_printf("bar-region-%d", i); + pdev->region[i].dev = pdev; + pdev->region[i].present = true; + if (type == PCI_BASE_ADDRESS_SPACE_MEMORY) { + pdev->region[i].memory = true; + } + memory_region_init_io(&pdev->region[i].mr, OBJECT(pdev), + &proxy_mr_ops, &pdev->region[i], + name, size); + pci_register_bar(dev, i, type, &pdev->region[i].mr); + } + } +} + +static void proxy_device_reset(DeviceState *dev) +{ + PCIProxyDev *pdev = PCI_PROXY_DEV(dev); + MPQemuMsg msg = { 0 }; + Error *local_err = NULL; + + msg.cmd = MPQEMU_CMD_DEVICE_RESET; + msg.size = 0; + + mpqemu_msg_send_and_await_reply(&msg, pdev, &local_err); + if (local_err) { + error_report_err(local_err); + } + +} diff --git a/hw/remote/remote-obj.c b/hw/remote/remote-obj.c new file mode 100644 index 000000000..4f2125421 --- /dev/null +++ b/hw/remote/remote-obj.c @@ -0,0 +1,203 @@ +/* + * Copyright © 2020, 2021 Oracle and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL-v2, version 2 or later. + * + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#include "qemu/error-report.h" +#include "qemu/notify.h" +#include "qom/object_interfaces.h" +#include "hw/qdev-core.h" +#include "io/channel.h" +#include "hw/qdev-core.h" +#include "hw/remote/machine.h" +#include "io/channel-util.h" +#include "qapi/error.h" +#include "sysemu/sysemu.h" +#include "hw/pci/pci.h" +#include "qemu/sockets.h" +#include "monitor/monitor.h" + +#define TYPE_REMOTE_OBJECT "x-remote-object" +OBJECT_DECLARE_TYPE(RemoteObject, RemoteObjectClass, REMOTE_OBJECT) + +struct RemoteObjectClass { + ObjectClass parent_class; + + unsigned int nr_devs; + unsigned int max_devs; +}; + +struct RemoteObject { + /* private */ + Object parent; + + Notifier machine_done; + + int32_t fd; + char *devid; + + QIOChannel *ioc; + + DeviceState *dev; + DeviceListener listener; +}; + +static void remote_object_set_fd(Object *obj, const char *str, Error **errp) +{ + RemoteObject *o = REMOTE_OBJECT(obj); + int fd = -1; + + fd = monitor_fd_param(monitor_cur(), str, errp); + if (fd == -1) { + error_prepend(errp, "Could not parse remote object fd %s:", str); + return; + } + + if (!fd_is_socket(fd)) { + error_setg(errp, "File descriptor '%s' is not a socket", str); + close(fd); + return; + } + + o->fd = fd; +} + +static void remote_object_set_devid(Object *obj, const char *str, Error **errp) +{ + RemoteObject *o = REMOTE_OBJECT(obj); + + g_free(o->devid); + + o->devid = g_strdup(str); +} + +static void remote_object_unrealize_listener(DeviceListener *listener, + DeviceState *dev) +{ + RemoteObject *o = container_of(listener, RemoteObject, listener); + + if (o->dev == dev) { + object_unref(OBJECT(o)); + } +} + +static void remote_object_machine_done(Notifier *notifier, void *data) +{ + RemoteObject *o = container_of(notifier, RemoteObject, machine_done); + DeviceState *dev = NULL; + QIOChannel *ioc = NULL; + Coroutine *co = NULL; + RemoteCommDev *comdev = NULL; + Error *err = NULL; + + dev = qdev_find_recursive(sysbus_get_default(), o->devid); + if (!dev || !object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + error_report("%s is not a PCI device", o->devid); + return; + } + + ioc = qio_channel_new_fd(o->fd, &err); + if (!ioc) { + error_report_err(err); + return; + } + qio_channel_set_blocking(ioc, false, NULL); + + o->dev = dev; + + o->listener.unrealize = remote_object_unrealize_listener; + device_listener_register(&o->listener); + + /* co-routine should free this. */ + comdev = g_new0(RemoteCommDev, 1); + *comdev = (RemoteCommDev) { + .ioc = ioc, + .dev = PCI_DEVICE(dev), + }; + + co = qemu_coroutine_create(mpqemu_remote_msg_loop_co, comdev); + qemu_coroutine_enter(co); +} + +static void remote_object_init(Object *obj) +{ + RemoteObjectClass *k = REMOTE_OBJECT_GET_CLASS(obj); + RemoteObject *o = REMOTE_OBJECT(obj); + + if (k->nr_devs >= k->max_devs) { + error_report("Reached maximum number of devices: %u", k->max_devs); + return; + } + + o->ioc = NULL; + o->fd = -1; + o->devid = NULL; + + k->nr_devs++; + + o->machine_done.notify = remote_object_machine_done; + qemu_add_machine_init_done_notifier(&o->machine_done); +} + +static void remote_object_finalize(Object *obj) +{ + RemoteObjectClass *k = REMOTE_OBJECT_GET_CLASS(obj); + RemoteObject *o = REMOTE_OBJECT(obj); + + device_listener_unregister(&o->listener); + + if (o->ioc) { + qio_channel_shutdown(o->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); + qio_channel_close(o->ioc, NULL); + } + + object_unref(OBJECT(o->ioc)); + + k->nr_devs--; + g_free(o->devid); +} + +static void remote_object_class_init(ObjectClass *klass, void *data) +{ + RemoteObjectClass *k = REMOTE_OBJECT_CLASS(klass); + + /* + * Limit number of supported devices to 1. This is done to avoid devices + * from one VM accessing the RAM of another VM. This is done until we + * start using separate address spaces for individual devices. + */ + k->max_devs = 1; + k->nr_devs = 0; + + object_class_property_add_str(klass, "fd", NULL, remote_object_set_fd); + object_class_property_add_str(klass, "devid", NULL, + remote_object_set_devid); +} + +static const TypeInfo remote_object_info = { + .name = TYPE_REMOTE_OBJECT, + .parent = TYPE_OBJECT, + .instance_size = sizeof(RemoteObject), + .instance_init = remote_object_init, + .instance_finalize = remote_object_finalize, + .class_size = sizeof(RemoteObjectClass), + .class_init = remote_object_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_USER_CREATABLE }, + { } + } +}; + +static void register_types(void) +{ + type_register_static(&remote_object_info); +} + +type_init(register_types); diff --git a/hw/remote/trace-events b/hw/remote/trace-events new file mode 100644 index 000000000..0b23974f9 --- /dev/null +++ b/hw/remote/trace-events @@ -0,0 +1,4 @@ +# multi-process trace events + +mpqemu_send_io_error(int cmd, int size, int nfds) "send command %d size %d, %d file descriptors to remote process" +mpqemu_recv_io_error(int cmd, int size, int nfds) "failed to receive %d size %d, %d file descriptors to remote process" diff --git a/hw/remote/trace.h b/hw/remote/trace.h new file mode 100644 index 000000000..5d5e3ac72 --- /dev/null +++ b/hw/remote/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_remote.h" diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig new file mode 100644 index 000000000..d2d869aaa --- /dev/null +++ b/hw/riscv/Kconfig @@ -0,0 +1,83 @@ +config RISCV_NUMA + bool + +config IBEX + bool + +config MICROCHIP_PFSOC + bool + select CADENCE_SDHCI + select MCHP_PFSOC_DMC + select MCHP_PFSOC_IOSCB + select MCHP_PFSOC_MMUART + select MCHP_PFSOC_SYSREG + select MSI_NONBROKEN + select RISCV_ACLINT + select SIFIVE_PDMA + select SIFIVE_PLIC + select UNIMP + +config OPENTITAN + bool + select IBEX + select UNIMP + +config SHAKTI_C + bool + select UNIMP + select SHAKTI_UART + select RISCV_ACLINT + select SIFIVE_PLIC + +config RISCV_VIRT + bool + imply PCI_DEVICES + imply VIRTIO_VGA + imply TEST_DEVICES + select RISCV_NUMA + select GOLDFISH_RTC + select MSI_NONBROKEN + select PCI + select PCI_EXPRESS_GENERIC_BRIDGE + select PFLASH_CFI01 + select SERIAL + select RISCV_ACLINT + select SIFIVE_PLIC + select SIFIVE_TEST + select VIRTIO_MMIO + select FW_CFG_DMA + +config SIFIVE_E + bool + select MSI_NONBROKEN + select RISCV_ACLINT + select SIFIVE_GPIO + select SIFIVE_PLIC + select SIFIVE_UART + select SIFIVE_E_PRCI + select UNIMP + +config SIFIVE_U + bool + select CADENCE + select MSI_NONBROKEN + select RISCV_ACLINT + select SIFIVE_GPIO + select SIFIVE_PDMA + select SIFIVE_PLIC + select SIFIVE_SPI + select SIFIVE_UART + select SIFIVE_U_OTP + select SIFIVE_U_PRCI + select SIFIVE_PWM + select SSI_M25P80 + select SSI_SD + select UNIMP + +config SPIKE + bool + select RISCV_NUMA + select HTIF + select MSI_NONBROKEN + select RISCV_ACLINT + select SIFIVE_PLIC diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c new file mode 100644 index 000000000..519fa455a --- /dev/null +++ b/hw/riscv/boot.c @@ -0,0 +1,319 @@ +/* + * QEMU RISC-V Boot Helper + * + * Copyright (c) 2017 SiFive, Inc. + * Copyright (c) 2019 Alistair Francis + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "exec/cpu-defs.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/riscv/boot.h" +#include "hw/riscv/boot_opensbi.h" +#include "elf.h" +#include "sysemu/device_tree.h" +#include "sysemu/qtest.h" + +#include + +bool riscv_is_32bit(RISCVHartArrayState *harts) +{ + return harts->harts[0].env.misa_mxl_max == MXL_RV32; +} + +/* + * Return the per-socket PLIC hart topology configuration string + * (caller must free with g_free()) + */ +char *riscv_plic_hart_config_string(int hart_count) +{ + g_autofree const char **vals = g_new(const char *, hart_count + 1); + int i; + + for (i = 0; i < hart_count; i++) { + CPUState *cs = qemu_get_cpu(i); + CPURISCVState *env = &RISCV_CPU(cs)->env; + + if (riscv_has_ext(env, RVS)) { + vals[i] = "MS"; + } else { + vals[i] = "M"; + } + } + vals[i] = NULL; + + /* g_strjoinv() obliges us to cast away const here */ + return g_strjoinv(",", (char **)vals); +} + +target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts, + target_ulong firmware_end_addr) { + if (riscv_is_32bit(harts)) { + return QEMU_ALIGN_UP(firmware_end_addr, 4 * MiB); + } else { + return QEMU_ALIGN_UP(firmware_end_addr, 2 * MiB); + } +} + +target_ulong riscv_find_and_load_firmware(MachineState *machine, + const char *default_machine_firmware, + hwaddr firmware_load_addr, + symbol_fn_t sym_cb) +{ + char *firmware_filename = NULL; + target_ulong firmware_end_addr = firmware_load_addr; + + if ((!machine->firmware) || (!strcmp(machine->firmware, "default"))) { + /* + * The user didn't specify -bios, or has specified "-bios default". + * That means we are going to load the OpenSBI binary included in + * the QEMU source. + */ + firmware_filename = riscv_find_firmware(default_machine_firmware); + } else if (strcmp(machine->firmware, "none")) { + firmware_filename = riscv_find_firmware(machine->firmware); + } + + if (firmware_filename) { + /* If not "none" load the firmware */ + firmware_end_addr = riscv_load_firmware(firmware_filename, + firmware_load_addr, sym_cb); + g_free(firmware_filename); + } + + return firmware_end_addr; +} + +char *riscv_find_firmware(const char *firmware_filename) +{ + char *filename; + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, firmware_filename); + if (filename == NULL) { + if (!qtest_enabled()) { + /* + * We only ship plain binary bios images in the QEMU source. + * With Spike machine that uses ELF images as the default bios, + * running QEMU test will complain hence let's suppress the error + * report for QEMU testing. + */ + error_report("Unable to load the RISC-V firmware \"%s\"", + firmware_filename); + exit(1); + } + } + + return filename; +} + +target_ulong riscv_load_firmware(const char *firmware_filename, + hwaddr firmware_load_addr, + symbol_fn_t sym_cb) +{ + uint64_t firmware_entry, firmware_size, firmware_end; + + if (load_elf_ram_sym(firmware_filename, NULL, NULL, NULL, + &firmware_entry, NULL, &firmware_end, NULL, + 0, EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) { + return firmware_end; + } + + firmware_size = load_image_targphys_as(firmware_filename, + firmware_load_addr, + current_machine->ram_size, NULL); + + if (firmware_size > 0) { + return firmware_load_addr + firmware_size; + } + + error_report("could not load firmware '%s'", firmware_filename); + exit(1); +} + +target_ulong riscv_load_kernel(const char *kernel_filename, + target_ulong kernel_start_addr, + symbol_fn_t sym_cb) +{ + uint64_t kernel_entry; + + if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL, + &kernel_entry, NULL, NULL, NULL, 0, + EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) { + return kernel_entry; + } + + if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL, + NULL, NULL, NULL) > 0) { + return kernel_entry; + } + + if (load_image_targphys_as(kernel_filename, kernel_start_addr, + current_machine->ram_size, NULL) > 0) { + return kernel_start_addr; + } + + error_report("could not load kernel '%s'", kernel_filename); + exit(1); +} + +hwaddr riscv_load_initrd(const char *filename, uint64_t mem_size, + uint64_t kernel_entry, hwaddr *start) +{ + int size; + + /* + * We want to put the initrd far enough into RAM that when the + * kernel is uncompressed it will not clobber the initrd. However + * on boards without much RAM we must ensure that we still leave + * enough room for a decent sized initrd, and on boards with large + * amounts of RAM we must avoid the initrd being so far up in RAM + * that it is outside lowmem and inaccessible to the kernel. + * So for boards with less than 256MB of RAM we put the initrd + * halfway into RAM, and for boards with 256MB of RAM or more we put + * the initrd at 128MB. + */ + *start = kernel_entry + MIN(mem_size / 2, 128 * MiB); + + size = load_ramdisk(filename, *start, mem_size - *start); + if (size == -1) { + size = load_image_targphys(filename, *start, mem_size - *start); + if (size == -1) { + error_report("could not load ramdisk '%s'", filename); + exit(1); + } + } + + return *start + size; +} + +uint32_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt) +{ + uint32_t temp, fdt_addr; + hwaddr dram_end = dram_base + mem_size; + int ret, fdtsize = fdt_totalsize(fdt); + + if (fdtsize <= 0) { + error_report("invalid device-tree"); + exit(1); + } + + /* + * We should put fdt as far as possible to avoid kernel/initrd overwriting + * its content. But it should be addressable by 32 bit system as well. + * Thus, put it at an 16MB aligned address that less than fdt size from the + * end of dram or 3GB whichever is lesser. + */ + temp = MIN(dram_end, 3072 * MiB); + fdt_addr = QEMU_ALIGN_DOWN(temp - fdtsize, 16 * MiB); + + ret = fdt_pack(fdt); + /* Should only fail if we've built a corrupted tree */ + g_assert(ret == 0); + /* copy in the device tree */ + qemu_fdt_dumpdtb(fdt, fdtsize); + + rom_add_blob_fixed_as("fdt", fdt, fdtsize, fdt_addr, + &address_space_memory); + + return fdt_addr; +} + +void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base, + hwaddr rom_size, uint32_t reset_vec_size, + uint64_t kernel_entry) +{ + struct fw_dynamic_info dinfo; + size_t dinfo_len; + + if (sizeof(dinfo.magic) == 4) { + dinfo.magic = cpu_to_le32(FW_DYNAMIC_INFO_MAGIC_VALUE); + dinfo.version = cpu_to_le32(FW_DYNAMIC_INFO_VERSION); + dinfo.next_mode = cpu_to_le32(FW_DYNAMIC_INFO_NEXT_MODE_S); + dinfo.next_addr = cpu_to_le32(kernel_entry); + } else { + dinfo.magic = cpu_to_le64(FW_DYNAMIC_INFO_MAGIC_VALUE); + dinfo.version = cpu_to_le64(FW_DYNAMIC_INFO_VERSION); + dinfo.next_mode = cpu_to_le64(FW_DYNAMIC_INFO_NEXT_MODE_S); + dinfo.next_addr = cpu_to_le64(kernel_entry); + } + dinfo.options = 0; + dinfo.boot_hart = 0; + dinfo_len = sizeof(dinfo); + + /** + * copy the dynamic firmware info. This information is specific to + * OpenSBI but doesn't break any other firmware as long as they don't + * expect any certain value in "a2" register. + */ + if (dinfo_len > (rom_size - reset_vec_size)) { + error_report("not enough space to store dynamic firmware info"); + exit(1); + } + + rom_add_blob_fixed_as("mrom.finfo", &dinfo, dinfo_len, + rom_base + reset_vec_size, + &address_space_memory); +} + +void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts, + hwaddr start_addr, + hwaddr rom_base, hwaddr rom_size, + uint64_t kernel_entry, + uint32_t fdt_load_addr, void *fdt) +{ + int i; + uint32_t start_addr_hi32 = 0x00000000; + + if (!riscv_is_32bit(harts)) { + start_addr_hi32 = start_addr >> 32; + } + /* reset vector */ + uint32_t reset_vec[10] = { + 0x00000297, /* 1: auipc t0, %pcrel_hi(fw_dyn) */ + 0x02828613, /* addi a2, t0, %pcrel_lo(1b) */ + 0xf1402573, /* csrr a0, mhartid */ + 0, + 0, + 0x00028067, /* jr t0 */ + start_addr, /* start: .dword */ + start_addr_hi32, + fdt_load_addr, /* fdt_laddr: .dword */ + 0x00000000, + /* fw_dyn: */ + }; + if (riscv_is_32bit(harts)) { + reset_vec[3] = 0x0202a583; /* lw a1, 32(t0) */ + reset_vec[4] = 0x0182a283; /* lw t0, 24(t0) */ + } else { + reset_vec[3] = 0x0202b583; /* ld a1, 32(t0) */ + reset_vec[4] = 0x0182b283; /* ld t0, 24(t0) */ + } + + /* copy in the reset vector in little_endian byte order */ + for (i = 0; i < ARRAY_SIZE(reset_vec); i++) { + reset_vec[i] = cpu_to_le32(reset_vec[i]); + } + rom_add_blob_fixed_as("mrom.reset", reset_vec, sizeof(reset_vec), + rom_base, &address_space_memory); + riscv_rom_copy_firmware_info(machine, rom_base, rom_size, sizeof(reset_vec), + kernel_entry); + + return; +} diff --git a/hw/riscv/meson.build b/hw/riscv/meson.build new file mode 100644 index 000000000..ab6cae57e --- /dev/null +++ b/hw/riscv/meson.build @@ -0,0 +1,13 @@ +riscv_ss = ss.source_set() +riscv_ss.add(files('boot.c'), fdt) +riscv_ss.add(when: 'CONFIG_RISCV_NUMA', if_true: files('numa.c')) +riscv_ss.add(files('riscv_hart.c')) +riscv_ss.add(when: 'CONFIG_OPENTITAN', if_true: files('opentitan.c')) +riscv_ss.add(when: 'CONFIG_RISCV_VIRT', if_true: files('virt.c')) +riscv_ss.add(when: 'CONFIG_SHAKTI_C', if_true: files('shakti_c.c')) +riscv_ss.add(when: 'CONFIG_SIFIVE_E', if_true: files('sifive_e.c')) +riscv_ss.add(when: 'CONFIG_SIFIVE_U', if_true: files('sifive_u.c')) +riscv_ss.add(when: 'CONFIG_SPIKE', if_true: files('spike.c')) +riscv_ss.add(when: 'CONFIG_MICROCHIP_PFSOC', if_true: files('microchip_pfsoc.c')) + +hw_arch += {'riscv': riscv_ss} diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c new file mode 100644 index 000000000..57d779fb5 --- /dev/null +++ b/hw/riscv/microchip_pfsoc.c @@ -0,0 +1,624 @@ +/* + * QEMU RISC-V Board Compatible with Microchip PolarFire SoC Icicle Kit + * + * Copyright (c) 2020 Wind River Systems, Inc. + * + * Author: + * Bin Meng + * + * Provides a board compatible with the Microchip PolarFire SoC Icicle Kit + * + * 0) CLINT (Core Level Interruptor) + * 1) PLIC (Platform Level Interrupt Controller) + * 2) eNVM (Embedded Non-Volatile Memory) + * 3) MMUARTs (Multi-Mode UART) + * 4) Cadence eMMC/SDHC controller and an SD card connected to it + * 5) SiFive Platform DMA (Direct Memory Access Controller) + * 6) GEM (Gigabit Ethernet MAC Controller) + * 7) DMC (DDR Memory Controller) + * 8) IOSCB modules + * + * This board currently generates devicetree dynamically that indicates at least + * two harts and up to five harts. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/sysbus.h" +#include "chardev/char.h" +#include "hw/cpu/cluster.h" +#include "target/riscv/cpu.h" +#include "hw/misc/unimp.h" +#include "hw/riscv/boot.h" +#include "hw/riscv/riscv_hart.h" +#include "hw/riscv/microchip_pfsoc.h" +#include "hw/intc/riscv_aclint.h" +#include "hw/intc/sifive_plic.h" +#include "sysemu/device_tree.h" +#include "sysemu/sysemu.h" + +/* + * The BIOS image used by this machine is called Hart Software Services (HSS). + * See https://github.com/polarfire-soc/hart-software-services + */ +#define BIOS_FILENAME "hss.bin" +#define RESET_VECTOR 0x20220000 + +/* CLINT timebase frequency */ +#define CLINT_TIMEBASE_FREQ 1000000 + +/* GEM version */ +#define GEM_REVISION 0x0107010c + +/* + * The complete description of the whole PolarFire SoC memory map is scattered + * in different documents. There are several places to look at for memory maps: + * + * 1 Chapter 11 "MSS Memory Map", in the doc "UG0880: PolarFire SoC FPGA + * Microprocessor Subsystem (MSS) User Guide", which can be downloaded from + * https://www.microsemi.com/document-portal/doc_download/ + * 1244570-ug0880-polarfire-soc-fpga-microprocessor-subsystem-mss-user-guide, + * describes the whole picture of the PolarFire SoC memory map. + * + * 2 A zip file for PolarFire soC memory map, which can be downloaded from + * https://www.microsemi.com/document-portal/doc_download/ + * 1244581-polarfire-soc-register-map, contains the following 2 major parts: + * - Register Map/PF_SoC_RegMap_V1_1/pfsoc_regmap.htm + * describes the complete integrated peripherals memory map + * - Register Map/PF_SoC_RegMap_V1_1/MPFS250T/mpfs250t_ioscb_memmap_dri.htm + * describes the complete IOSCB modules memory maps + */ +static const MemMapEntry microchip_pfsoc_memmap[] = { + [MICROCHIP_PFSOC_RSVD0] = { 0x0, 0x100 }, + [MICROCHIP_PFSOC_DEBUG] = { 0x100, 0xf00 }, + [MICROCHIP_PFSOC_E51_DTIM] = { 0x1000000, 0x2000 }, + [MICROCHIP_PFSOC_BUSERR_UNIT0] = { 0x1700000, 0x1000 }, + [MICROCHIP_PFSOC_BUSERR_UNIT1] = { 0x1701000, 0x1000 }, + [MICROCHIP_PFSOC_BUSERR_UNIT2] = { 0x1702000, 0x1000 }, + [MICROCHIP_PFSOC_BUSERR_UNIT3] = { 0x1703000, 0x1000 }, + [MICROCHIP_PFSOC_BUSERR_UNIT4] = { 0x1704000, 0x1000 }, + [MICROCHIP_PFSOC_CLINT] = { 0x2000000, 0x10000 }, + [MICROCHIP_PFSOC_L2CC] = { 0x2010000, 0x1000 }, + [MICROCHIP_PFSOC_DMA] = { 0x3000000, 0x100000 }, + [MICROCHIP_PFSOC_L2LIM] = { 0x8000000, 0x2000000 }, + [MICROCHIP_PFSOC_PLIC] = { 0xc000000, 0x4000000 }, + [MICROCHIP_PFSOC_MMUART0] = { 0x20000000, 0x1000 }, + [MICROCHIP_PFSOC_SYSREG] = { 0x20002000, 0x2000 }, + [MICROCHIP_PFSOC_MPUCFG] = { 0x20005000, 0x1000 }, + [MICROCHIP_PFSOC_DDR_SGMII_PHY] = { 0x20007000, 0x1000 }, + [MICROCHIP_PFSOC_EMMC_SD] = { 0x20008000, 0x1000 }, + [MICROCHIP_PFSOC_DDR_CFG] = { 0x20080000, 0x40000 }, + [MICROCHIP_PFSOC_MMUART1] = { 0x20100000, 0x1000 }, + [MICROCHIP_PFSOC_MMUART2] = { 0x20102000, 0x1000 }, + [MICROCHIP_PFSOC_MMUART3] = { 0x20104000, 0x1000 }, + [MICROCHIP_PFSOC_MMUART4] = { 0x20106000, 0x1000 }, + [MICROCHIP_PFSOC_SPI0] = { 0x20108000, 0x1000 }, + [MICROCHIP_PFSOC_SPI1] = { 0x20109000, 0x1000 }, + [MICROCHIP_PFSOC_I2C1] = { 0x2010b000, 0x1000 }, + [MICROCHIP_PFSOC_GEM0] = { 0x20110000, 0x2000 }, + [MICROCHIP_PFSOC_GEM1] = { 0x20112000, 0x2000 }, + [MICROCHIP_PFSOC_GPIO0] = { 0x20120000, 0x1000 }, + [MICROCHIP_PFSOC_GPIO1] = { 0x20121000, 0x1000 }, + [MICROCHIP_PFSOC_GPIO2] = { 0x20122000, 0x1000 }, + [MICROCHIP_PFSOC_ENVM_CFG] = { 0x20200000, 0x1000 }, + [MICROCHIP_PFSOC_ENVM_DATA] = { 0x20220000, 0x20000 }, + [MICROCHIP_PFSOC_QSPI_XIP] = { 0x21000000, 0x1000000 }, + [MICROCHIP_PFSOC_IOSCB] = { 0x30000000, 0x10000000 }, + [MICROCHIP_PFSOC_EMMC_SD_MUX] = { 0x4f000000, 0x4 }, + [MICROCHIP_PFSOC_DRAM_LO] = { 0x80000000, 0x40000000 }, + [MICROCHIP_PFSOC_DRAM_LO_ALIAS] = { 0xc0000000, 0x40000000 }, + [MICROCHIP_PFSOC_DRAM_HI] = { 0x1000000000, 0x0 }, + [MICROCHIP_PFSOC_DRAM_HI_ALIAS] = { 0x1400000000, 0x0 }, +}; + +static void microchip_pfsoc_soc_instance_init(Object *obj) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + MicrochipPFSoCState *s = MICROCHIP_PFSOC(obj); + + object_initialize_child(obj, "e-cluster", &s->e_cluster, TYPE_CPU_CLUSTER); + qdev_prop_set_uint32(DEVICE(&s->e_cluster), "cluster-id", 0); + + object_initialize_child(OBJECT(&s->e_cluster), "e-cpus", &s->e_cpus, + TYPE_RISCV_HART_ARRAY); + qdev_prop_set_uint32(DEVICE(&s->e_cpus), "num-harts", 1); + qdev_prop_set_uint32(DEVICE(&s->e_cpus), "hartid-base", 0); + qdev_prop_set_string(DEVICE(&s->e_cpus), "cpu-type", + TYPE_RISCV_CPU_SIFIVE_E51); + qdev_prop_set_uint64(DEVICE(&s->e_cpus), "resetvec", RESET_VECTOR); + + object_initialize_child(obj, "u-cluster", &s->u_cluster, TYPE_CPU_CLUSTER); + qdev_prop_set_uint32(DEVICE(&s->u_cluster), "cluster-id", 1); + + object_initialize_child(OBJECT(&s->u_cluster), "u-cpus", &s->u_cpus, + TYPE_RISCV_HART_ARRAY); + qdev_prop_set_uint32(DEVICE(&s->u_cpus), "num-harts", ms->smp.cpus - 1); + qdev_prop_set_uint32(DEVICE(&s->u_cpus), "hartid-base", 1); + qdev_prop_set_string(DEVICE(&s->u_cpus), "cpu-type", + TYPE_RISCV_CPU_SIFIVE_U54); + qdev_prop_set_uint64(DEVICE(&s->u_cpus), "resetvec", RESET_VECTOR); + + object_initialize_child(obj, "dma-controller", &s->dma, + TYPE_SIFIVE_PDMA); + + object_initialize_child(obj, "sysreg", &s->sysreg, + TYPE_MCHP_PFSOC_SYSREG); + + object_initialize_child(obj, "ddr-sgmii-phy", &s->ddr_sgmii_phy, + TYPE_MCHP_PFSOC_DDR_SGMII_PHY); + object_initialize_child(obj, "ddr-cfg", &s->ddr_cfg, + TYPE_MCHP_PFSOC_DDR_CFG); + + object_initialize_child(obj, "gem0", &s->gem0, TYPE_CADENCE_GEM); + object_initialize_child(obj, "gem1", &s->gem1, TYPE_CADENCE_GEM); + + object_initialize_child(obj, "sd-controller", &s->sdhci, + TYPE_CADENCE_SDHCI); + + object_initialize_child(obj, "ioscb", &s->ioscb, TYPE_MCHP_PFSOC_IOSCB); +} + +static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + MicrochipPFSoCState *s = MICROCHIP_PFSOC(dev); + const MemMapEntry *memmap = microchip_pfsoc_memmap; + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *rsvd0_mem = g_new(MemoryRegion, 1); + MemoryRegion *e51_dtim_mem = g_new(MemoryRegion, 1); + MemoryRegion *l2lim_mem = g_new(MemoryRegion, 1); + MemoryRegion *envm_data = g_new(MemoryRegion, 1); + MemoryRegion *qspi_xip_mem = g_new(MemoryRegion, 1); + char *plic_hart_config; + NICInfo *nd; + int i; + + sysbus_realize(SYS_BUS_DEVICE(&s->e_cpus), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->u_cpus), &error_abort); + /* + * The cluster must be realized after the RISC-V hart array container, + * as the container's CPU object is only created on realize, and the + * CPU must exist and have been parented into the cluster before the + * cluster is realized. + */ + qdev_realize(DEVICE(&s->e_cluster), NULL, &error_abort); + qdev_realize(DEVICE(&s->u_cluster), NULL, &error_abort); + + /* Reserved Memory at address 0 */ + memory_region_init_ram(rsvd0_mem, NULL, "microchip.pfsoc.rsvd0_mem", + memmap[MICROCHIP_PFSOC_RSVD0].size, &error_fatal); + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_RSVD0].base, + rsvd0_mem); + + /* E51 DTIM */ + memory_region_init_ram(e51_dtim_mem, NULL, "microchip.pfsoc.e51_dtim_mem", + memmap[MICROCHIP_PFSOC_E51_DTIM].size, &error_fatal); + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_E51_DTIM].base, + e51_dtim_mem); + + /* Bus Error Units */ + create_unimplemented_device("microchip.pfsoc.buserr_unit0_mem", + memmap[MICROCHIP_PFSOC_BUSERR_UNIT0].base, + memmap[MICROCHIP_PFSOC_BUSERR_UNIT0].size); + create_unimplemented_device("microchip.pfsoc.buserr_unit1_mem", + memmap[MICROCHIP_PFSOC_BUSERR_UNIT1].base, + memmap[MICROCHIP_PFSOC_BUSERR_UNIT1].size); + create_unimplemented_device("microchip.pfsoc.buserr_unit2_mem", + memmap[MICROCHIP_PFSOC_BUSERR_UNIT2].base, + memmap[MICROCHIP_PFSOC_BUSERR_UNIT2].size); + create_unimplemented_device("microchip.pfsoc.buserr_unit3_mem", + memmap[MICROCHIP_PFSOC_BUSERR_UNIT3].base, + memmap[MICROCHIP_PFSOC_BUSERR_UNIT3].size); + create_unimplemented_device("microchip.pfsoc.buserr_unit4_mem", + memmap[MICROCHIP_PFSOC_BUSERR_UNIT4].base, + memmap[MICROCHIP_PFSOC_BUSERR_UNIT4].size); + + /* CLINT */ + riscv_aclint_swi_create(memmap[MICROCHIP_PFSOC_CLINT].base, + 0, ms->smp.cpus, false); + riscv_aclint_mtimer_create( + memmap[MICROCHIP_PFSOC_CLINT].base + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, ms->smp.cpus, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, + CLINT_TIMEBASE_FREQ, false); + + /* L2 cache controller */ + create_unimplemented_device("microchip.pfsoc.l2cc", + memmap[MICROCHIP_PFSOC_L2CC].base, memmap[MICROCHIP_PFSOC_L2CC].size); + + /* + * Add L2-LIM at reset size. + * This should be reduced in size as the L2 Cache Controller WayEnable + * register is incremented. Unfortunately I don't see a nice (or any) way + * to handle reducing or blocking out the L2 LIM while still allowing it + * be re returned to all enabled after a reset. For the time being, just + * leave it enabled all the time. This won't break anything, but will be + * too generous to misbehaving guests. + */ + memory_region_init_ram(l2lim_mem, NULL, "microchip.pfsoc.l2lim", + memmap[MICROCHIP_PFSOC_L2LIM].size, &error_fatal); + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_L2LIM].base, + l2lim_mem); + + /* create PLIC hart topology configuration string */ + plic_hart_config = riscv_plic_hart_config_string(ms->smp.cpus); + + /* PLIC */ + s->plic = sifive_plic_create(memmap[MICROCHIP_PFSOC_PLIC].base, + plic_hart_config, ms->smp.cpus, 0, + MICROCHIP_PFSOC_PLIC_NUM_SOURCES, + MICROCHIP_PFSOC_PLIC_NUM_PRIORITIES, + MICROCHIP_PFSOC_PLIC_PRIORITY_BASE, + MICROCHIP_PFSOC_PLIC_PENDING_BASE, + MICROCHIP_PFSOC_PLIC_ENABLE_BASE, + MICROCHIP_PFSOC_PLIC_ENABLE_STRIDE, + MICROCHIP_PFSOC_PLIC_CONTEXT_BASE, + MICROCHIP_PFSOC_PLIC_CONTEXT_STRIDE, + memmap[MICROCHIP_PFSOC_PLIC].size); + g_free(plic_hart_config); + + /* DMA */ + sysbus_realize(SYS_BUS_DEVICE(&s->dma), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dma), 0, + memmap[MICROCHIP_PFSOC_DMA].base); + for (i = 0; i < SIFIVE_PDMA_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), i, + qdev_get_gpio_in(DEVICE(s->plic), + MICROCHIP_PFSOC_DMA_IRQ0 + i)); + } + + /* SYSREG */ + sysbus_realize(SYS_BUS_DEVICE(&s->sysreg), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sysreg), 0, + memmap[MICROCHIP_PFSOC_SYSREG].base); + + /* MPUCFG */ + create_unimplemented_device("microchip.pfsoc.mpucfg", + memmap[MICROCHIP_PFSOC_MPUCFG].base, + memmap[MICROCHIP_PFSOC_MPUCFG].size); + + /* DDR SGMII PHY */ + sysbus_realize(SYS_BUS_DEVICE(&s->ddr_sgmii_phy), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ddr_sgmii_phy), 0, + memmap[MICROCHIP_PFSOC_DDR_SGMII_PHY].base); + + /* DDR CFG */ + sysbus_realize(SYS_BUS_DEVICE(&s->ddr_cfg), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ddr_cfg), 0, + memmap[MICROCHIP_PFSOC_DDR_CFG].base); + + /* SDHCI */ + sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdhci), 0, + memmap[MICROCHIP_PFSOC_EMMC_SD].base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, + qdev_get_gpio_in(DEVICE(s->plic), MICROCHIP_PFSOC_EMMC_SD_IRQ)); + + /* MMUARTs */ + s->serial0 = mchp_pfsoc_mmuart_create(system_memory, + memmap[MICROCHIP_PFSOC_MMUART0].base, + qdev_get_gpio_in(DEVICE(s->plic), MICROCHIP_PFSOC_MMUART0_IRQ), + serial_hd(0)); + s->serial1 = mchp_pfsoc_mmuart_create(system_memory, + memmap[MICROCHIP_PFSOC_MMUART1].base, + qdev_get_gpio_in(DEVICE(s->plic), MICROCHIP_PFSOC_MMUART1_IRQ), + serial_hd(1)); + s->serial2 = mchp_pfsoc_mmuart_create(system_memory, + memmap[MICROCHIP_PFSOC_MMUART2].base, + qdev_get_gpio_in(DEVICE(s->plic), MICROCHIP_PFSOC_MMUART2_IRQ), + serial_hd(2)); + s->serial3 = mchp_pfsoc_mmuart_create(system_memory, + memmap[MICROCHIP_PFSOC_MMUART3].base, + qdev_get_gpio_in(DEVICE(s->plic), MICROCHIP_PFSOC_MMUART3_IRQ), + serial_hd(3)); + s->serial4 = mchp_pfsoc_mmuart_create(system_memory, + memmap[MICROCHIP_PFSOC_MMUART4].base, + qdev_get_gpio_in(DEVICE(s->plic), MICROCHIP_PFSOC_MMUART4_IRQ), + serial_hd(4)); + + /* SPI */ + create_unimplemented_device("microchip.pfsoc.spi0", + memmap[MICROCHIP_PFSOC_SPI0].base, + memmap[MICROCHIP_PFSOC_SPI0].size); + create_unimplemented_device("microchip.pfsoc.spi1", + memmap[MICROCHIP_PFSOC_SPI1].base, + memmap[MICROCHIP_PFSOC_SPI1].size); + + /* I2C1 */ + create_unimplemented_device("microchip.pfsoc.i2c1", + memmap[MICROCHIP_PFSOC_I2C1].base, + memmap[MICROCHIP_PFSOC_I2C1].size); + + /* GEMs */ + + nd = &nd_table[0]; + if (nd->used) { + qemu_check_nic_model(nd, TYPE_CADENCE_GEM); + qdev_set_nic_properties(DEVICE(&s->gem0), nd); + } + nd = &nd_table[1]; + if (nd->used) { + qemu_check_nic_model(nd, TYPE_CADENCE_GEM); + qdev_set_nic_properties(DEVICE(&s->gem1), nd); + } + + object_property_set_int(OBJECT(&s->gem0), "revision", GEM_REVISION, errp); + object_property_set_int(OBJECT(&s->gem0), "phy-addr", 8, errp); + sysbus_realize(SYS_BUS_DEVICE(&s->gem0), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gem0), 0, + memmap[MICROCHIP_PFSOC_GEM0].base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gem0), 0, + qdev_get_gpio_in(DEVICE(s->plic), MICROCHIP_PFSOC_GEM0_IRQ)); + + object_property_set_int(OBJECT(&s->gem1), "revision", GEM_REVISION, errp); + object_property_set_int(OBJECT(&s->gem1), "phy-addr", 9, errp); + sysbus_realize(SYS_BUS_DEVICE(&s->gem1), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gem1), 0, + memmap[MICROCHIP_PFSOC_GEM1].base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gem1), 0, + qdev_get_gpio_in(DEVICE(s->plic), MICROCHIP_PFSOC_GEM1_IRQ)); + + /* GPIOs */ + create_unimplemented_device("microchip.pfsoc.gpio0", + memmap[MICROCHIP_PFSOC_GPIO0].base, + memmap[MICROCHIP_PFSOC_GPIO0].size); + create_unimplemented_device("microchip.pfsoc.gpio1", + memmap[MICROCHIP_PFSOC_GPIO1].base, + memmap[MICROCHIP_PFSOC_GPIO1].size); + create_unimplemented_device("microchip.pfsoc.gpio2", + memmap[MICROCHIP_PFSOC_GPIO2].base, + memmap[MICROCHIP_PFSOC_GPIO2].size); + + /* eNVM */ + memory_region_init_rom(envm_data, OBJECT(dev), "microchip.pfsoc.envm.data", + memmap[MICROCHIP_PFSOC_ENVM_DATA].size, + &error_fatal); + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_ENVM_DATA].base, + envm_data); + + /* IOSCB */ + sysbus_realize(SYS_BUS_DEVICE(&s->ioscb), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ioscb), 0, + memmap[MICROCHIP_PFSOC_IOSCB].base); + + /* eMMC/SD mux */ + create_unimplemented_device("microchip.pfsoc.emmc_sd_mux", + memmap[MICROCHIP_PFSOC_EMMC_SD_MUX].base, + memmap[MICROCHIP_PFSOC_EMMC_SD_MUX].size); + + /* QSPI Flash */ + memory_region_init_rom(qspi_xip_mem, OBJECT(dev), + "microchip.pfsoc.qspi_xip", + memmap[MICROCHIP_PFSOC_QSPI_XIP].size, + &error_fatal); + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_QSPI_XIP].base, + qspi_xip_mem); +} + +static void microchip_pfsoc_soc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = microchip_pfsoc_soc_realize; + /* Reason: Uses serial_hds in realize function, thus can't be used twice */ + dc->user_creatable = false; +} + +static const TypeInfo microchip_pfsoc_soc_type_info = { + .name = TYPE_MICROCHIP_PFSOC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(MicrochipPFSoCState), + .instance_init = microchip_pfsoc_soc_instance_init, + .class_init = microchip_pfsoc_soc_class_init, +}; + +static void microchip_pfsoc_soc_register_types(void) +{ + type_register_static(µchip_pfsoc_soc_type_info); +} + +type_init(microchip_pfsoc_soc_register_types) + +static void microchip_icicle_kit_machine_init(MachineState *machine) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + const MemMapEntry *memmap = microchip_pfsoc_memmap; + MicrochipIcicleKitState *s = MICROCHIP_ICICLE_KIT_MACHINE(machine); + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *mem_low = g_new(MemoryRegion, 1); + MemoryRegion *mem_low_alias = g_new(MemoryRegion, 1); + MemoryRegion *mem_high = g_new(MemoryRegion, 1); + MemoryRegion *mem_high_alias = g_new(MemoryRegion, 1); + uint64_t mem_low_size, mem_high_size; + hwaddr firmware_load_addr; + const char *firmware_name; + bool kernel_as_payload = false; + target_ulong firmware_end_addr, kernel_start_addr; + uint64_t kernel_entry; + uint32_t fdt_load_addr; + DriveInfo *dinfo = drive_get_next(IF_SD); + + /* Sanity check on RAM size */ + if (machine->ram_size < mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be bigger than %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + /* Initialize SoC */ + object_initialize_child(OBJECT(machine), "soc", &s->soc, + TYPE_MICROCHIP_PFSOC); + qdev_realize(DEVICE(&s->soc), NULL, &error_abort); + + /* Split RAM into low and high regions using aliases to machine->ram */ + mem_low_size = memmap[MICROCHIP_PFSOC_DRAM_LO].size; + mem_high_size = machine->ram_size - mem_low_size; + memory_region_init_alias(mem_low, NULL, + "microchip.icicle.kit.ram_low", machine->ram, + 0, mem_low_size); + memory_region_init_alias(mem_high, NULL, + "microchip.icicle.kit.ram_high", machine->ram, + mem_low_size, mem_high_size); + + /* Register RAM */ + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_DRAM_LO].base, + mem_low); + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_DRAM_HI].base, + mem_high); + + /* Create aliases for the low and high RAM regions */ + memory_region_init_alias(mem_low_alias, NULL, + "microchip.icicle.kit.ram_low.alias", + mem_low, 0, mem_low_size); + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_DRAM_LO_ALIAS].base, + mem_low_alias); + memory_region_init_alias(mem_high_alias, NULL, + "microchip.icicle.kit.ram_high.alias", + mem_high, 0, mem_high_size); + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_DRAM_HI_ALIAS].base, + mem_high_alias); + + /* Attach an SD card */ + if (dinfo) { + CadenceSDHCIState *sdhci = &(s->soc.sdhci); + DeviceState *card = qdev_new(TYPE_SD_CARD); + + qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo), + &error_fatal); + qdev_realize_and_unref(card, sdhci->bus, &error_fatal); + } + + /* + * We follow the following table to select which payload we execute. + * + * -bios | -kernel | payload + * -------+------------+-------- + * N | N | HSS + * Y | don't care | HSS + * N | Y | kernel + * + * This ensures backwards compatibility with how we used to expose -bios + * to users but allows them to run through direct kernel booting as well. + * + * When -kernel is used for direct boot, -dtb must be present to provide + * a valid device tree for the board, as we don't generate device tree. + */ + + if (machine->kernel_filename && machine->dtb) { + int fdt_size; + machine->fdt = load_device_tree(machine->dtb, &fdt_size); + if (!machine->fdt) { + error_report("load_device_tree() failed"); + exit(1); + } + + firmware_name = RISCV64_BIOS_BIN; + firmware_load_addr = memmap[MICROCHIP_PFSOC_DRAM_LO].base; + kernel_as_payload = true; + } + + if (!kernel_as_payload) { + firmware_name = BIOS_FILENAME; + firmware_load_addr = RESET_VECTOR; + } + + /* Load the firmware */ + firmware_end_addr = riscv_find_and_load_firmware(machine, firmware_name, + firmware_load_addr, NULL); + + if (kernel_as_payload) { + kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus, + firmware_end_addr); + + kernel_entry = riscv_load_kernel(machine->kernel_filename, + kernel_start_addr, NULL); + + if (machine->initrd_filename) { + hwaddr start; + hwaddr end = riscv_load_initrd(machine->initrd_filename, + machine->ram_size, kernel_entry, + &start); + qemu_fdt_setprop_cell(machine->fdt, "/chosen", + "linux,initrd-start", start); + qemu_fdt_setprop_cell(machine->fdt, "/chosen", + "linux,initrd-end", end); + } + + if (machine->kernel_cmdline) { + qemu_fdt_setprop_string(machine->fdt, "/chosen", + "bootargs", machine->kernel_cmdline); + } + + /* Compute the fdt load address in dram */ + fdt_load_addr = riscv_load_fdt(memmap[MICROCHIP_PFSOC_DRAM_LO].base, + machine->ram_size, machine->fdt); + /* Load the reset vector */ + riscv_setup_rom_reset_vec(machine, &s->soc.u_cpus, firmware_load_addr, + memmap[MICROCHIP_PFSOC_ENVM_DATA].base, + memmap[MICROCHIP_PFSOC_ENVM_DATA].size, + kernel_entry, fdt_load_addr, machine->fdt); + } +} + +static void microchip_icicle_kit_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Microchip PolarFire SoC Icicle Kit"; + mc->init = microchip_icicle_kit_machine_init; + mc->max_cpus = MICROCHIP_PFSOC_MANAGEMENT_CPU_COUNT + + MICROCHIP_PFSOC_COMPUTE_CPU_COUNT; + mc->min_cpus = MICROCHIP_PFSOC_MANAGEMENT_CPU_COUNT + 1; + mc->default_cpus = mc->min_cpus; + mc->default_ram_id = "microchip.icicle.kit.ram"; + + /* + * Map 513 MiB high memory, the mimimum required high memory size, because + * HSS will do memory test against the high memory address range regardless + * of physical memory installed. + * + * See memory_tests() in mss_ddr.c in the HSS source code. + */ + mc->default_ram_size = 1537 * MiB; +} + +static const TypeInfo microchip_icicle_kit_machine_typeinfo = { + .name = MACHINE_TYPE_NAME("microchip-icicle-kit"), + .parent = TYPE_MACHINE, + .class_init = microchip_icicle_kit_machine_class_init, + .instance_size = sizeof(MicrochipIcicleKitState), +}; + +static void microchip_icicle_kit_machine_init_register_types(void) +{ + type_register_static(µchip_icicle_kit_machine_typeinfo); +} + +type_init(microchip_icicle_kit_machine_init_register_types) diff --git a/hw/riscv/numa.c b/hw/riscv/numa.c new file mode 100644 index 000000000..7fe92d402 --- /dev/null +++ b/hw/riscv/numa.c @@ -0,0 +1,241 @@ +/* + * QEMU RISC-V NUMA Helper + * + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/riscv/numa.h" +#include "sysemu/device_tree.h" + +static bool numa_enabled(const MachineState *ms) +{ + return (ms->numa_state && ms->numa_state->num_nodes) ? true : false; +} + +int riscv_socket_count(const MachineState *ms) +{ + return (numa_enabled(ms)) ? ms->numa_state->num_nodes : 1; +} + +int riscv_socket_first_hartid(const MachineState *ms, int socket_id) +{ + int i, first_hartid = ms->smp.cpus; + + if (!numa_enabled(ms)) { + return (!socket_id) ? 0 : -1; + } + + for (i = 0; i < ms->smp.cpus; i++) { + if (ms->possible_cpus->cpus[i].props.node_id != socket_id) { + continue; + } + if (i < first_hartid) { + first_hartid = i; + } + } + + return (first_hartid < ms->smp.cpus) ? first_hartid : -1; +} + +int riscv_socket_last_hartid(const MachineState *ms, int socket_id) +{ + int i, last_hartid = -1; + + if (!numa_enabled(ms)) { + return (!socket_id) ? ms->smp.cpus - 1 : -1; + } + + for (i = 0; i < ms->smp.cpus; i++) { + if (ms->possible_cpus->cpus[i].props.node_id != socket_id) { + continue; + } + if (i > last_hartid) { + last_hartid = i; + } + } + + return (last_hartid < ms->smp.cpus) ? last_hartid : -1; +} + +int riscv_socket_hart_count(const MachineState *ms, int socket_id) +{ + int first_hartid, last_hartid; + + if (!numa_enabled(ms)) { + return (!socket_id) ? ms->smp.cpus : -1; + } + + first_hartid = riscv_socket_first_hartid(ms, socket_id); + if (first_hartid < 0) { + return -1; + } + + last_hartid = riscv_socket_last_hartid(ms, socket_id); + if (last_hartid < 0) { + return -1; + } + + if (first_hartid > last_hartid) { + return -1; + } + + return last_hartid - first_hartid + 1; +} + +bool riscv_socket_check_hartids(const MachineState *ms, int socket_id) +{ + int i, first_hartid, last_hartid; + + if (!numa_enabled(ms)) { + return (!socket_id) ? true : false; + } + + first_hartid = riscv_socket_first_hartid(ms, socket_id); + if (first_hartid < 0) { + return false; + } + + last_hartid = riscv_socket_last_hartid(ms, socket_id); + if (last_hartid < 0) { + return false; + } + + for (i = first_hartid; i <= last_hartid; i++) { + if (ms->possible_cpus->cpus[i].props.node_id != socket_id) { + return false; + } + } + + return true; +} + +uint64_t riscv_socket_mem_offset(const MachineState *ms, int socket_id) +{ + int i; + uint64_t mem_offset = 0; + + if (!numa_enabled(ms)) { + return 0; + } + + for (i = 0; i < ms->numa_state->num_nodes; i++) { + if (i == socket_id) { + break; + } + mem_offset += ms->numa_state->nodes[i].node_mem; + } + + return (i == socket_id) ? mem_offset : 0; +} + +uint64_t riscv_socket_mem_size(const MachineState *ms, int socket_id) +{ + if (!numa_enabled(ms)) { + return (!socket_id) ? ms->ram_size : 0; + } + + return (socket_id < ms->numa_state->num_nodes) ? + ms->numa_state->nodes[socket_id].node_mem : 0; +} + +void riscv_socket_fdt_write_id(const MachineState *ms, void *fdt, + const char *node_name, int socket_id) +{ + if (numa_enabled(ms)) { + qemu_fdt_setprop_cell(fdt, node_name, "numa-node-id", socket_id); + } +} + +void riscv_socket_fdt_write_distance_matrix(const MachineState *ms, void *fdt) +{ + int i, j, idx; + uint32_t *dist_matrix, dist_matrix_size; + + if (numa_enabled(ms) && ms->numa_state->have_numa_distance) { + dist_matrix_size = riscv_socket_count(ms) * riscv_socket_count(ms); + dist_matrix_size *= (3 * sizeof(uint32_t)); + dist_matrix = g_malloc0(dist_matrix_size); + + for (i = 0; i < riscv_socket_count(ms); i++) { + for (j = 0; j < riscv_socket_count(ms); j++) { + idx = (i * riscv_socket_count(ms) + j) * 3; + dist_matrix[idx + 0] = cpu_to_be32(i); + dist_matrix[idx + 1] = cpu_to_be32(j); + dist_matrix[idx + 2] = + cpu_to_be32(ms->numa_state->nodes[i].distance[j]); + } + } + + qemu_fdt_add_subnode(fdt, "/distance-map"); + qemu_fdt_setprop_string(fdt, "/distance-map", "compatible", + "numa-distance-map-v1"); + qemu_fdt_setprop(fdt, "/distance-map", "distance-matrix", + dist_matrix, dist_matrix_size); + g_free(dist_matrix); + } +} + +CpuInstanceProperties +riscv_numa_cpu_index_to_props(MachineState *ms, unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + int64_t nidx = 0; + + if (ms->numa_state->num_nodes) { + nidx = idx / (ms->smp.cpus / ms->numa_state->num_nodes); + if (ms->numa_state->num_nodes <= nidx) { + nidx = ms->numa_state->num_nodes - 1; + } + } + + return nidx; +} + +const CPUArchIdList *riscv_numa_possible_cpu_arch_ids(MachineState *ms) +{ + int n; + unsigned int max_cpus = ms->smp.max_cpus; + + if (ms->possible_cpus) { + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (n = 0; n < ms->possible_cpus->len; n++) { + ms->possible_cpus->cpus[n].type = ms->cpu_type; + ms->possible_cpus->cpus[n].arch_id = n; + ms->possible_cpus->cpus[n].props.has_core_id = true; + ms->possible_cpus->cpus[n].props.core_id = n; + } + + return ms->possible_cpus; +} diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c new file mode 100644 index 000000000..c531450b9 --- /dev/null +++ b/hw/riscv/opentitan.c @@ -0,0 +1,284 @@ +/* + * QEMU RISC-V Board Compatible with OpenTitan FPGA platform + * + * Copyright (c) 2020 Western Digital + * + * Provides a board compatible with the OpenTitan FPGA platform: + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "hw/riscv/opentitan.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/misc/unimp.h" +#include "hw/riscv/boot.h" +#include "qemu/units.h" +#include "sysemu/sysemu.h" + +static const MemMapEntry ibex_memmap[] = { + [IBEX_DEV_ROM] = { 0x00008000, 16 * KiB }, + [IBEX_DEV_RAM] = { 0x10000000, 0x10000 }, + [IBEX_DEV_FLASH] = { 0x20000000, 0x80000 }, + [IBEX_DEV_UART] = { 0x40000000, 0x1000 }, + [IBEX_DEV_GPIO] = { 0x40040000, 0x1000 }, + [IBEX_DEV_SPI] = { 0x40050000, 0x1000 }, + [IBEX_DEV_I2C] = { 0x40080000, 0x1000 }, + [IBEX_DEV_PATTGEN] = { 0x400e0000, 0x1000 }, + [IBEX_DEV_TIMER] = { 0x40100000, 0x1000 }, + [IBEX_DEV_SENSOR_CTRL] = { 0x40110000, 0x1000 }, + [IBEX_DEV_OTP_CTRL] = { 0x40130000, 0x4000 }, + [IBEX_DEV_USBDEV] = { 0x40150000, 0x1000 }, + [IBEX_DEV_PWRMGR] = { 0x40400000, 0x1000 }, + [IBEX_DEV_RSTMGR] = { 0x40410000, 0x1000 }, + [IBEX_DEV_CLKMGR] = { 0x40420000, 0x1000 }, + [IBEX_DEV_PINMUX] = { 0x40460000, 0x1000 }, + [IBEX_DEV_PADCTRL] = { 0x40470000, 0x1000 }, + [IBEX_DEV_FLASH_CTRL] = { 0x41000000, 0x1000 }, + [IBEX_DEV_AES] = { 0x41100000, 0x1000 }, + [IBEX_DEV_HMAC] = { 0x41110000, 0x1000 }, + [IBEX_DEV_KMAC] = { 0x41120000, 0x1000 }, + [IBEX_DEV_OTBN] = { 0x41130000, 0x10000 }, + [IBEX_DEV_KEYMGR] = { 0x41140000, 0x1000 }, + [IBEX_DEV_CSRNG] = { 0x41150000, 0x1000 }, + [IBEX_DEV_ENTROPY] = { 0x41160000, 0x1000 }, + [IBEX_DEV_EDNO] = { 0x41170000, 0x1000 }, + [IBEX_DEV_EDN1] = { 0x41180000, 0x1000 }, + [IBEX_DEV_ALERT_HANDLER] = { 0x411b0000, 0x1000 }, + [IBEX_DEV_NMI_GEN] = { 0x411c0000, 0x1000 }, + [IBEX_DEV_PERI] = { 0x411f0000, 0x10000 }, + [IBEX_DEV_PLIC] = { 0x48000000, 0x4005000 }, + [IBEX_DEV_FLASH_VIRTUAL] = { 0x80000000, 0x80000 }, +}; + +static void opentitan_board_init(MachineState *machine) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + const MemMapEntry *memmap = ibex_memmap; + OpenTitanState *s = g_new0(OpenTitanState, 1); + MemoryRegion *sys_mem = get_system_memory(); + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + /* Initialize SoC */ + object_initialize_child(OBJECT(machine), "soc", &s->soc, + TYPE_RISCV_IBEX_SOC); + qdev_realize(DEVICE(&s->soc), NULL, &error_abort); + + memory_region_add_subregion(sys_mem, + memmap[IBEX_DEV_RAM].base, machine->ram); + + if (machine->firmware) { + riscv_load_firmware(machine->firmware, memmap[IBEX_DEV_RAM].base, NULL); + } + + if (machine->kernel_filename) { + riscv_load_kernel(machine->kernel_filename, + memmap[IBEX_DEV_RAM].base, NULL); + } +} + +static void opentitan_machine_init(MachineClass *mc) +{ + mc->desc = "RISC-V Board compatible with OpenTitan"; + mc->init = opentitan_board_init; + mc->max_cpus = 1; + mc->default_cpu_type = TYPE_RISCV_CPU_IBEX; + mc->default_ram_id = "riscv.lowrisc.ibex.ram"; + mc->default_ram_size = ibex_memmap[IBEX_DEV_RAM].size; +} + +DEFINE_MACHINE("opentitan", opentitan_machine_init) + +static void lowrisc_ibex_soc_init(Object *obj) +{ + LowRISCIbexSoCState *s = RISCV_IBEX_SOC(obj); + + object_initialize_child(obj, "cpus", &s->cpus, TYPE_RISCV_HART_ARRAY); + + object_initialize_child(obj, "plic", &s->plic, TYPE_SIFIVE_PLIC); + + object_initialize_child(obj, "uart", &s->uart, TYPE_IBEX_UART); + + object_initialize_child(obj, "timer", &s->timer, TYPE_IBEX_TIMER); +} + +static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) +{ + const MemMapEntry *memmap = ibex_memmap; + MachineState *ms = MACHINE(qdev_get_machine()); + LowRISCIbexSoCState *s = RISCV_IBEX_SOC(dev_soc); + MemoryRegion *sys_mem = get_system_memory(); + int i; + + object_property_set_str(OBJECT(&s->cpus), "cpu-type", ms->cpu_type, + &error_abort); + object_property_set_int(OBJECT(&s->cpus), "num-harts", ms->smp.cpus, + &error_abort); + object_property_set_int(OBJECT(&s->cpus), "resetvec", 0x8080, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->cpus), &error_abort); + + /* Boot ROM */ + memory_region_init_rom(&s->rom, OBJECT(dev_soc), "riscv.lowrisc.ibex.rom", + memmap[IBEX_DEV_ROM].size, &error_fatal); + memory_region_add_subregion(sys_mem, + memmap[IBEX_DEV_ROM].base, &s->rom); + + /* Flash memory */ + memory_region_init_rom(&s->flash_mem, OBJECT(dev_soc), "riscv.lowrisc.ibex.flash", + memmap[IBEX_DEV_FLASH].size, &error_fatal); + memory_region_init_alias(&s->flash_alias, OBJECT(dev_soc), + "riscv.lowrisc.ibex.flash_virtual", &s->flash_mem, 0, + memmap[IBEX_DEV_FLASH_VIRTUAL].size); + memory_region_add_subregion(sys_mem, memmap[IBEX_DEV_FLASH].base, + &s->flash_mem); + memory_region_add_subregion(sys_mem, memmap[IBEX_DEV_FLASH_VIRTUAL].base, + &s->flash_alias); + + /* PLIC */ + qdev_prop_set_string(DEVICE(&s->plic), "hart-config", "M"); + qdev_prop_set_uint32(DEVICE(&s->plic), "hartid-base", 0); + qdev_prop_set_uint32(DEVICE(&s->plic), "num-sources", 180); + qdev_prop_set_uint32(DEVICE(&s->plic), "num-priorities", 3); + qdev_prop_set_uint32(DEVICE(&s->plic), "priority-base", 0x00); + qdev_prop_set_uint32(DEVICE(&s->plic), "pending-base", 0x1000); + qdev_prop_set_uint32(DEVICE(&s->plic), "enable-base", 0x2000); + qdev_prop_set_uint32(DEVICE(&s->plic), "enable-stride", 0x18); + qdev_prop_set_uint32(DEVICE(&s->plic), "context-base", 0x200000); + qdev_prop_set_uint32(DEVICE(&s->plic), "context-stride", 8); + qdev_prop_set_uint32(DEVICE(&s->plic), "aperture-size", memmap[IBEX_DEV_PLIC].size); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->plic), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->plic), 0, memmap[IBEX_DEV_PLIC].base); + + for (i = 0; i < ms->smp.cpus; i++) { + CPUState *cpu = qemu_get_cpu(i); + + qdev_connect_gpio_out(DEVICE(&s->plic), ms->smp.cpus + i, + qdev_get_gpio_in(DEVICE(cpu), IRQ_M_EXT)); + } + + /* UART */ + qdev_prop_set_chr(DEVICE(&(s->uart)), "chardev", serial_hd(0)); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart), 0, memmap[IBEX_DEV_UART].base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart), + 0, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_UART0_TX_WATERMARK_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart), + 1, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_UART0_RX_WATERMARK_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart), + 2, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_UART0_TX_EMPTY_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart), + 3, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_UART0_RX_OVERFLOW_IRQ)); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->timer), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->timer), 0, memmap[IBEX_DEV_TIMER].base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->timer), + 0, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_TIMER_TIMEREXPIRED0_0)); + qdev_connect_gpio_out(DEVICE(&s->timer), 0, + qdev_get_gpio_in(DEVICE(qemu_get_cpu(0)), + IRQ_M_TIMER)); + + create_unimplemented_device("riscv.lowrisc.ibex.gpio", + memmap[IBEX_DEV_GPIO].base, memmap[IBEX_DEV_GPIO].size); + create_unimplemented_device("riscv.lowrisc.ibex.spi", + memmap[IBEX_DEV_SPI].base, memmap[IBEX_DEV_SPI].size); + create_unimplemented_device("riscv.lowrisc.ibex.i2c", + memmap[IBEX_DEV_I2C].base, memmap[IBEX_DEV_I2C].size); + create_unimplemented_device("riscv.lowrisc.ibex.pattgen", + memmap[IBEX_DEV_PATTGEN].base, memmap[IBEX_DEV_PATTGEN].size); + create_unimplemented_device("riscv.lowrisc.ibex.sensor_ctrl", + memmap[IBEX_DEV_SENSOR_CTRL].base, memmap[IBEX_DEV_SENSOR_CTRL].size); + create_unimplemented_device("riscv.lowrisc.ibex.otp_ctrl", + memmap[IBEX_DEV_OTP_CTRL].base, memmap[IBEX_DEV_OTP_CTRL].size); + create_unimplemented_device("riscv.lowrisc.ibex.pwrmgr", + memmap[IBEX_DEV_PWRMGR].base, memmap[IBEX_DEV_PWRMGR].size); + create_unimplemented_device("riscv.lowrisc.ibex.rstmgr", + memmap[IBEX_DEV_RSTMGR].base, memmap[IBEX_DEV_RSTMGR].size); + create_unimplemented_device("riscv.lowrisc.ibex.clkmgr", + memmap[IBEX_DEV_CLKMGR].base, memmap[IBEX_DEV_CLKMGR].size); + create_unimplemented_device("riscv.lowrisc.ibex.pinmux", + memmap[IBEX_DEV_PINMUX].base, memmap[IBEX_DEV_PINMUX].size); + create_unimplemented_device("riscv.lowrisc.ibex.padctrl", + memmap[IBEX_DEV_PADCTRL].base, memmap[IBEX_DEV_PADCTRL].size); + create_unimplemented_device("riscv.lowrisc.ibex.usbdev", + memmap[IBEX_DEV_USBDEV].base, memmap[IBEX_DEV_USBDEV].size); + create_unimplemented_device("riscv.lowrisc.ibex.flash_ctrl", + memmap[IBEX_DEV_FLASH_CTRL].base, memmap[IBEX_DEV_FLASH_CTRL].size); + create_unimplemented_device("riscv.lowrisc.ibex.aes", + memmap[IBEX_DEV_AES].base, memmap[IBEX_DEV_AES].size); + create_unimplemented_device("riscv.lowrisc.ibex.hmac", + memmap[IBEX_DEV_HMAC].base, memmap[IBEX_DEV_HMAC].size); + create_unimplemented_device("riscv.lowrisc.ibex.kmac", + memmap[IBEX_DEV_KMAC].base, memmap[IBEX_DEV_KMAC].size); + create_unimplemented_device("riscv.lowrisc.ibex.keymgr", + memmap[IBEX_DEV_KEYMGR].base, memmap[IBEX_DEV_KEYMGR].size); + create_unimplemented_device("riscv.lowrisc.ibex.csrng", + memmap[IBEX_DEV_CSRNG].base, memmap[IBEX_DEV_CSRNG].size); + create_unimplemented_device("riscv.lowrisc.ibex.entropy", + memmap[IBEX_DEV_ENTROPY].base, memmap[IBEX_DEV_ENTROPY].size); + create_unimplemented_device("riscv.lowrisc.ibex.edn0", + memmap[IBEX_DEV_EDNO].base, memmap[IBEX_DEV_EDNO].size); + create_unimplemented_device("riscv.lowrisc.ibex.edn1", + memmap[IBEX_DEV_EDN1].base, memmap[IBEX_DEV_EDN1].size); + create_unimplemented_device("riscv.lowrisc.ibex.alert_handler", + memmap[IBEX_DEV_ALERT_HANDLER].base, memmap[IBEX_DEV_ALERT_HANDLER].size); + create_unimplemented_device("riscv.lowrisc.ibex.nmi_gen", + memmap[IBEX_DEV_NMI_GEN].base, memmap[IBEX_DEV_NMI_GEN].size); + create_unimplemented_device("riscv.lowrisc.ibex.otbn", + memmap[IBEX_DEV_OTBN].base, memmap[IBEX_DEV_OTBN].size); + create_unimplemented_device("riscv.lowrisc.ibex.peri", + memmap[IBEX_DEV_PERI].base, memmap[IBEX_DEV_PERI].size); +} + +static void lowrisc_ibex_soc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = lowrisc_ibex_soc_realize; + /* Reason: Uses serial_hds in realize function, thus can't be used twice */ + dc->user_creatable = false; +} + +static const TypeInfo lowrisc_ibex_soc_type_info = { + .name = TYPE_RISCV_IBEX_SOC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(LowRISCIbexSoCState), + .instance_init = lowrisc_ibex_soc_init, + .class_init = lowrisc_ibex_soc_class_init, +}; + +static void lowrisc_ibex_soc_register_types(void) +{ + type_register_static(&lowrisc_ibex_soc_type_info); +} + +type_init(lowrisc_ibex_soc_register_types) diff --git a/hw/riscv/riscv_hart.c b/hw/riscv/riscv_hart.c new file mode 100644 index 000000000..613ea2aaa --- /dev/null +++ b/hw/riscv/riscv_hart.c @@ -0,0 +1,89 @@ +/* + * QEMU RISCV Hart Array + * + * Copyright (c) 2017 SiFive, Inc. + * + * Holds the state of a homogeneous array of RISC-V harts + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "sysemu/reset.h" +#include "hw/sysbus.h" +#include "target/riscv/cpu.h" +#include "hw/qdev-properties.h" +#include "hw/riscv/riscv_hart.h" + +static Property riscv_harts_props[] = { + DEFINE_PROP_UINT32("num-harts", RISCVHartArrayState, num_harts, 1), + DEFINE_PROP_UINT32("hartid-base", RISCVHartArrayState, hartid_base, 0), + DEFINE_PROP_STRING("cpu-type", RISCVHartArrayState, cpu_type), + DEFINE_PROP_UINT64("resetvec", RISCVHartArrayState, resetvec, + DEFAULT_RSTVEC), + DEFINE_PROP_END_OF_LIST(), +}; + +static void riscv_harts_cpu_reset(void *opaque) +{ + RISCVCPU *cpu = opaque; + cpu_reset(CPU(cpu)); +} + +static bool riscv_hart_realize(RISCVHartArrayState *s, int idx, + char *cpu_type, Error **errp) +{ + object_initialize_child(OBJECT(s), "harts[*]", &s->harts[idx], cpu_type); + qdev_prop_set_uint64(DEVICE(&s->harts[idx]), "resetvec", s->resetvec); + s->harts[idx].env.mhartid = s->hartid_base + idx; + qemu_register_reset(riscv_harts_cpu_reset, &s->harts[idx]); + return qdev_realize(DEVICE(&s->harts[idx]), NULL, errp); +} + +static void riscv_harts_realize(DeviceState *dev, Error **errp) +{ + RISCVHartArrayState *s = RISCV_HART_ARRAY(dev); + int n; + + s->harts = g_new0(RISCVCPU, s->num_harts); + + for (n = 0; n < s->num_harts; n++) { + if (!riscv_hart_realize(s, n, s->cpu_type, errp)) { + return; + } + } +} + +static void riscv_harts_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, riscv_harts_props); + dc->realize = riscv_harts_realize; +} + +static const TypeInfo riscv_harts_info = { + .name = TYPE_RISCV_HART_ARRAY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RISCVHartArrayState), + .class_init = riscv_harts_class_init, +}; + +static void riscv_harts_register_types(void) +{ + type_register_static(&riscv_harts_info); +} + +type_init(riscv_harts_register_types) diff --git a/hw/riscv/shakti_c.c b/hw/riscv/shakti_c.c new file mode 100644 index 000000000..90e2cf609 --- /dev/null +++ b/hw/riscv/shakti_c.c @@ -0,0 +1,190 @@ +/* + * Shakti C-class SoC emulation + * + * Copyright (c) 2021 Vijai Kumar K + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "hw/boards.h" +#include "hw/riscv/shakti_c.h" +#include "qapi/error.h" +#include "hw/intc/sifive_plic.h" +#include "hw/intc/riscv_aclint.h" +#include "sysemu/sysemu.h" +#include "hw/qdev-properties.h" +#include "exec/address-spaces.h" +#include "hw/riscv/boot.h" + + +static const struct MemmapEntry { + hwaddr base; + hwaddr size; +} shakti_c_memmap[] = { + [SHAKTI_C_ROM] = { 0x00001000, 0x2000 }, + [SHAKTI_C_RAM] = { 0x80000000, 0x0 }, + [SHAKTI_C_UART] = { 0x00011300, 0x00040 }, + [SHAKTI_C_GPIO] = { 0x020d0000, 0x00100 }, + [SHAKTI_C_PLIC] = { 0x0c000000, 0x20000 }, + [SHAKTI_C_CLINT] = { 0x02000000, 0xc0000 }, + [SHAKTI_C_I2C] = { 0x20c00000, 0x00100 }, +}; + +static void shakti_c_machine_state_init(MachineState *mstate) +{ + ShaktiCMachineState *sms = RISCV_SHAKTI_MACHINE(mstate); + MemoryRegion *system_memory = get_system_memory(); + + /* Allow only Shakti C CPU for this platform */ + if (strcmp(mstate->cpu_type, TYPE_RISCV_CPU_SHAKTI_C) != 0) { + error_report("This board can only be used with Shakti C CPU"); + exit(1); + } + + /* Initialize SoC */ + object_initialize_child(OBJECT(mstate), "soc", &sms->soc, + TYPE_RISCV_SHAKTI_SOC); + qdev_realize(DEVICE(&sms->soc), NULL, &error_abort); + + /* register RAM */ + memory_region_add_subregion(system_memory, + shakti_c_memmap[SHAKTI_C_RAM].base, + mstate->ram); + + /* ROM reset vector */ + riscv_setup_rom_reset_vec(mstate, &sms->soc.cpus, + shakti_c_memmap[SHAKTI_C_RAM].base, + shakti_c_memmap[SHAKTI_C_ROM].base, + shakti_c_memmap[SHAKTI_C_ROM].size, 0, 0, + NULL); + if (mstate->firmware) { + riscv_load_firmware(mstate->firmware, + shakti_c_memmap[SHAKTI_C_RAM].base, + NULL); + } +} + +static void shakti_c_machine_instance_init(Object *obj) +{ +} + +static void shakti_c_machine_class_init(ObjectClass *klass, void *data) +{ + MachineClass *mc = MACHINE_CLASS(klass); + mc->desc = "RISC-V Board compatible with Shakti SDK"; + mc->init = shakti_c_machine_state_init; + mc->default_cpu_type = TYPE_RISCV_CPU_SHAKTI_C; + mc->default_ram_id = "riscv.shakti.c.ram"; +} + +static const TypeInfo shakti_c_machine_type_info = { + .name = TYPE_RISCV_SHAKTI_MACHINE, + .parent = TYPE_MACHINE, + .class_init = shakti_c_machine_class_init, + .instance_init = shakti_c_machine_instance_init, + .instance_size = sizeof(ShaktiCMachineState), +}; + +static void shakti_c_machine_type_info_register(void) +{ + type_register_static(&shakti_c_machine_type_info); +} +type_init(shakti_c_machine_type_info_register) + +static void shakti_c_soc_state_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + ShaktiCSoCState *sss = RISCV_SHAKTI_SOC(dev); + MemoryRegion *system_memory = get_system_memory(); + + sysbus_realize(SYS_BUS_DEVICE(&sss->cpus), &error_abort); + + sss->plic = sifive_plic_create(shakti_c_memmap[SHAKTI_C_PLIC].base, + (char *)SHAKTI_C_PLIC_HART_CONFIG, ms->smp.cpus, 0, + SHAKTI_C_PLIC_NUM_SOURCES, + SHAKTI_C_PLIC_NUM_PRIORITIES, + SHAKTI_C_PLIC_PRIORITY_BASE, + SHAKTI_C_PLIC_PENDING_BASE, + SHAKTI_C_PLIC_ENABLE_BASE, + SHAKTI_C_PLIC_ENABLE_STRIDE, + SHAKTI_C_PLIC_CONTEXT_BASE, + SHAKTI_C_PLIC_CONTEXT_STRIDE, + shakti_c_memmap[SHAKTI_C_PLIC].size); + + riscv_aclint_swi_create(shakti_c_memmap[SHAKTI_C_CLINT].base, + 0, 1, false); + riscv_aclint_mtimer_create(shakti_c_memmap[SHAKTI_C_CLINT].base + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, 1, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, false); + + qdev_prop_set_chr(DEVICE(&(sss->uart)), "chardev", serial_hd(0)); + if (!sysbus_realize(SYS_BUS_DEVICE(&sss->uart), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&sss->uart), 0, + shakti_c_memmap[SHAKTI_C_UART].base); + + /* ROM */ + memory_region_init_rom(&sss->rom, OBJECT(dev), "riscv.shakti.c.rom", + shakti_c_memmap[SHAKTI_C_ROM].size, &error_fatal); + memory_region_add_subregion(system_memory, + shakti_c_memmap[SHAKTI_C_ROM].base, &sss->rom); +} + +static void shakti_c_soc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = shakti_c_soc_state_realize; + /* + * Reasons: + * - Creates CPUS in riscv_hart_realize(), and can create unintended + * CPUs + * - Uses serial_hds in realize function, thus can't be used twice + */ + dc->user_creatable = false; +} + +static void shakti_c_soc_instance_init(Object *obj) +{ + ShaktiCSoCState *sss = RISCV_SHAKTI_SOC(obj); + + object_initialize_child(obj, "cpus", &sss->cpus, TYPE_RISCV_HART_ARRAY); + object_initialize_child(obj, "uart", &sss->uart, TYPE_SHAKTI_UART); + + /* + * CPU type is fixed and we are not supporting passing from commandline yet. + * So let it be in instance_init. When supported should use ms->cpu_type + * instead of TYPE_RISCV_CPU_SHAKTI_C + */ + object_property_set_str(OBJECT(&sss->cpus), "cpu-type", + TYPE_RISCV_CPU_SHAKTI_C, &error_abort); + object_property_set_int(OBJECT(&sss->cpus), "num-harts", 1, + &error_abort); +} + +static const TypeInfo shakti_c_type_info = { + .name = TYPE_RISCV_SHAKTI_SOC, + .parent = TYPE_DEVICE, + .class_init = shakti_c_soc_class_init, + .instance_init = shakti_c_soc_instance_init, + .instance_size = sizeof(ShaktiCSoCState), +}; + +static void shakti_c_type_info_register(void) +{ + type_register_static(&shakti_c_type_info); +} +type_init(shakti_c_type_info_register) diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c new file mode 100644 index 000000000..9b206407a --- /dev/null +++ b/hw/riscv/sifive_e.c @@ -0,0 +1,294 @@ +/* + * QEMU RISC-V Board Compatible with SiFive Freedom E SDK + * + * Copyright (c) 2017 SiFive, Inc. + * + * Provides a board compatible with the SiFive Freedom E SDK: + * + * 0) UART + * 1) CLINT (Core Level Interruptor) + * 2) PLIC (Platform Level Interrupt Controller) + * 3) PRCI (Power, Reset, Clock, Interrupt) + * 4) Registers emulated as RAM: AON, GPIO, QSPI, PWM + * 5) Flash memory emulated as RAM + * + * The Mask ROM reset vector jumps to the flash payload at 0x2040_0000. + * The OTP ROM and Flash boot code will be emulated in a future version. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/sysbus.h" +#include "hw/char/serial.h" +#include "hw/misc/unimp.h" +#include "target/riscv/cpu.h" +#include "hw/riscv/riscv_hart.h" +#include "hw/riscv/sifive_e.h" +#include "hw/riscv/boot.h" +#include "hw/char/sifive_uart.h" +#include "hw/intc/riscv_aclint.h" +#include "hw/intc/sifive_plic.h" +#include "hw/misc/sifive_e_prci.h" +#include "chardev/char.h" +#include "sysemu/sysemu.h" + +static const MemMapEntry sifive_e_memmap[] = { + [SIFIVE_E_DEV_DEBUG] = { 0x0, 0x1000 }, + [SIFIVE_E_DEV_MROM] = { 0x1000, 0x2000 }, + [SIFIVE_E_DEV_OTP] = { 0x20000, 0x2000 }, + [SIFIVE_E_DEV_CLINT] = { 0x2000000, 0x10000 }, + [SIFIVE_E_DEV_PLIC] = { 0xc000000, 0x4000000 }, + [SIFIVE_E_DEV_AON] = { 0x10000000, 0x8000 }, + [SIFIVE_E_DEV_PRCI] = { 0x10008000, 0x8000 }, + [SIFIVE_E_DEV_OTP_CTRL] = { 0x10010000, 0x1000 }, + [SIFIVE_E_DEV_GPIO0] = { 0x10012000, 0x1000 }, + [SIFIVE_E_DEV_UART0] = { 0x10013000, 0x1000 }, + [SIFIVE_E_DEV_QSPI0] = { 0x10014000, 0x1000 }, + [SIFIVE_E_DEV_PWM0] = { 0x10015000, 0x1000 }, + [SIFIVE_E_DEV_UART1] = { 0x10023000, 0x1000 }, + [SIFIVE_E_DEV_QSPI1] = { 0x10024000, 0x1000 }, + [SIFIVE_E_DEV_PWM1] = { 0x10025000, 0x1000 }, + [SIFIVE_E_DEV_QSPI2] = { 0x10034000, 0x1000 }, + [SIFIVE_E_DEV_PWM2] = { 0x10035000, 0x1000 }, + [SIFIVE_E_DEV_XIP] = { 0x20000000, 0x20000000 }, + [SIFIVE_E_DEV_DTIM] = { 0x80000000, 0x4000 } +}; + +static void sifive_e_machine_init(MachineState *machine) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + const MemMapEntry *memmap = sifive_e_memmap; + + SiFiveEState *s = RISCV_E_MACHINE(machine); + MemoryRegion *sys_mem = get_system_memory(); + int i; + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + + /* Initialize SoC */ + object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_E_SOC); + qdev_realize(DEVICE(&s->soc), NULL, &error_abort); + + /* Data Tightly Integrated Memory */ + memory_region_add_subregion(sys_mem, + memmap[SIFIVE_E_DEV_DTIM].base, machine->ram); + + /* Mask ROM reset vector */ + uint32_t reset_vec[4]; + + if (s->revb) { + reset_vec[1] = 0x200102b7; /* 0x1004: lui t0,0x20010 */ + } else { + reset_vec[1] = 0x204002b7; /* 0x1004: lui t0,0x20400 */ + } + reset_vec[2] = 0x00028067; /* 0x1008: jr t0 */ + + reset_vec[0] = reset_vec[3] = 0; + + /* copy in the reset vector in little_endian byte order */ + for (i = 0; i < sizeof(reset_vec) >> 2; i++) { + reset_vec[i] = cpu_to_le32(reset_vec[i]); + } + rom_add_blob_fixed_as("mrom.reset", reset_vec, sizeof(reset_vec), + memmap[SIFIVE_E_DEV_MROM].base, &address_space_memory); + + if (machine->kernel_filename) { + riscv_load_kernel(machine->kernel_filename, + memmap[SIFIVE_E_DEV_DTIM].base, NULL); + } +} + +static bool sifive_e_machine_get_revb(Object *obj, Error **errp) +{ + SiFiveEState *s = RISCV_E_MACHINE(obj); + + return s->revb; +} + +static void sifive_e_machine_set_revb(Object *obj, bool value, Error **errp) +{ + SiFiveEState *s = RISCV_E_MACHINE(obj); + + s->revb = value; +} + +static void sifive_e_machine_instance_init(Object *obj) +{ + SiFiveEState *s = RISCV_E_MACHINE(obj); + + s->revb = false; +} + +static void sifive_e_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "RISC-V Board compatible with SiFive E SDK"; + mc->init = sifive_e_machine_init; + mc->max_cpus = 1; + mc->default_cpu_type = SIFIVE_E_CPU; + mc->default_ram_id = "riscv.sifive.e.ram"; + mc->default_ram_size = sifive_e_memmap[SIFIVE_E_DEV_DTIM].size; + + object_class_property_add_bool(oc, "revb", sifive_e_machine_get_revb, + sifive_e_machine_set_revb); + object_class_property_set_description(oc, "revb", + "Set on to tell QEMU that it should model " + "the revB HiFive1 board"); +} + +static const TypeInfo sifive_e_machine_typeinfo = { + .name = MACHINE_TYPE_NAME("sifive_e"), + .parent = TYPE_MACHINE, + .class_init = sifive_e_machine_class_init, + .instance_init = sifive_e_machine_instance_init, + .instance_size = sizeof(SiFiveEState), +}; + +static void sifive_e_machine_init_register_types(void) +{ + type_register_static(&sifive_e_machine_typeinfo); +} + +type_init(sifive_e_machine_init_register_types) + +static void sifive_e_soc_init(Object *obj) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + SiFiveESoCState *s = RISCV_E_SOC(obj); + + object_initialize_child(obj, "cpus", &s->cpus, TYPE_RISCV_HART_ARRAY); + object_property_set_int(OBJECT(&s->cpus), "num-harts", ms->smp.cpus, + &error_abort); + object_property_set_int(OBJECT(&s->cpus), "resetvec", 0x1004, &error_abort); + object_initialize_child(obj, "riscv.sifive.e.gpio0", &s->gpio, + TYPE_SIFIVE_GPIO); +} + +static void sifive_e_soc_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + const MemMapEntry *memmap = sifive_e_memmap; + SiFiveESoCState *s = RISCV_E_SOC(dev); + MemoryRegion *sys_mem = get_system_memory(); + + object_property_set_str(OBJECT(&s->cpus), "cpu-type", ms->cpu_type, + &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->cpus), &error_abort); + + /* Mask ROM */ + memory_region_init_rom(&s->mask_rom, OBJECT(dev), "riscv.sifive.e.mrom", + memmap[SIFIVE_E_DEV_MROM].size, &error_fatal); + memory_region_add_subregion(sys_mem, + memmap[SIFIVE_E_DEV_MROM].base, &s->mask_rom); + + /* MMIO */ + s->plic = sifive_plic_create(memmap[SIFIVE_E_DEV_PLIC].base, + (char *)SIFIVE_E_PLIC_HART_CONFIG, ms->smp.cpus, 0, + SIFIVE_E_PLIC_NUM_SOURCES, + SIFIVE_E_PLIC_NUM_PRIORITIES, + SIFIVE_E_PLIC_PRIORITY_BASE, + SIFIVE_E_PLIC_PENDING_BASE, + SIFIVE_E_PLIC_ENABLE_BASE, + SIFIVE_E_PLIC_ENABLE_STRIDE, + SIFIVE_E_PLIC_CONTEXT_BASE, + SIFIVE_E_PLIC_CONTEXT_STRIDE, + memmap[SIFIVE_E_DEV_PLIC].size); + riscv_aclint_swi_create(memmap[SIFIVE_E_DEV_CLINT].base, + 0, ms->smp.cpus, false); + riscv_aclint_mtimer_create(memmap[SIFIVE_E_DEV_CLINT].base + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, ms->smp.cpus, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, false); + create_unimplemented_device("riscv.sifive.e.aon", + memmap[SIFIVE_E_DEV_AON].base, memmap[SIFIVE_E_DEV_AON].size); + sifive_e_prci_create(memmap[SIFIVE_E_DEV_PRCI].base); + + /* GPIO */ + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { + return; + } + + /* Map GPIO registers */ + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio), 0, memmap[SIFIVE_E_DEV_GPIO0].base); + + /* Pass all GPIOs to the SOC layer so they are available to the board */ + qdev_pass_gpios(DEVICE(&s->gpio), dev, NULL); + + /* Connect GPIO interrupts to the PLIC */ + for (int i = 0; i < 32; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), i, + qdev_get_gpio_in(DEVICE(s->plic), + SIFIVE_E_GPIO0_IRQ0 + i)); + } + + sifive_uart_create(sys_mem, memmap[SIFIVE_E_DEV_UART0].base, + serial_hd(0), qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_E_UART0_IRQ)); + create_unimplemented_device("riscv.sifive.e.qspi0", + memmap[SIFIVE_E_DEV_QSPI0].base, memmap[SIFIVE_E_DEV_QSPI0].size); + create_unimplemented_device("riscv.sifive.e.pwm0", + memmap[SIFIVE_E_DEV_PWM0].base, memmap[SIFIVE_E_DEV_PWM0].size); + sifive_uart_create(sys_mem, memmap[SIFIVE_E_DEV_UART1].base, + serial_hd(1), qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_E_UART1_IRQ)); + create_unimplemented_device("riscv.sifive.e.qspi1", + memmap[SIFIVE_E_DEV_QSPI1].base, memmap[SIFIVE_E_DEV_QSPI1].size); + create_unimplemented_device("riscv.sifive.e.pwm1", + memmap[SIFIVE_E_DEV_PWM1].base, memmap[SIFIVE_E_DEV_PWM1].size); + create_unimplemented_device("riscv.sifive.e.qspi2", + memmap[SIFIVE_E_DEV_QSPI2].base, memmap[SIFIVE_E_DEV_QSPI2].size); + create_unimplemented_device("riscv.sifive.e.pwm2", + memmap[SIFIVE_E_DEV_PWM2].base, memmap[SIFIVE_E_DEV_PWM2].size); + + /* Flash memory */ + memory_region_init_rom(&s->xip_mem, OBJECT(dev), "riscv.sifive.e.xip", + memmap[SIFIVE_E_DEV_XIP].size, &error_fatal); + memory_region_add_subregion(sys_mem, memmap[SIFIVE_E_DEV_XIP].base, + &s->xip_mem); +} + +static void sifive_e_soc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = sifive_e_soc_realize; + /* Reason: Uses serial_hds in realize function, thus can't be used twice */ + dc->user_creatable = false; +} + +static const TypeInfo sifive_e_soc_type_info = { + .name = TYPE_RISCV_E_SOC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SiFiveESoCState), + .instance_init = sifive_e_soc_init, + .class_init = sifive_e_soc_class_init, +}; + +static void sifive_e_soc_register_types(void) +{ + type_register_static(&sifive_e_soc_type_info); +} + +type_init(sifive_e_soc_register_types) diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c new file mode 100644 index 000000000..589ae72a5 --- /dev/null +++ b/hw/riscv/sifive_u.c @@ -0,0 +1,999 @@ +/* + * QEMU RISC-V Board Compatible with SiFive Freedom U SDK + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017 SiFive, Inc. + * Copyright (c) 2019 Bin Meng + * + * Provides a board compatible with the SiFive Freedom U SDK: + * + * 0) UART + * 1) CLINT (Core Level Interruptor) + * 2) PLIC (Platform Level Interrupt Controller) + * 3) PRCI (Power, Reset, Clock, Interrupt) + * 4) GPIO (General Purpose Input/Output Controller) + * 5) OTP (One-Time Programmable) memory with stored serial number + * 6) GEM (Gigabit Ethernet Controller) and management block + * 7) DMA (Direct Memory Access Controller) + * 8) SPI0 connected to an SPI flash + * 9) SPI2 connected to an SD card + * 10) PWM0 and PWM1 + * + * This board currently generates devicetree dynamically that indicates at least + * two harts and up to five harts. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "hw/boards.h" +#include "hw/irq.h" +#include "hw/loader.h" +#include "hw/sysbus.h" +#include "hw/char/serial.h" +#include "hw/cpu/cluster.h" +#include "hw/misc/unimp.h" +#include "hw/ssi/ssi.h" +#include "target/riscv/cpu.h" +#include "hw/riscv/riscv_hart.h" +#include "hw/riscv/sifive_u.h" +#include "hw/riscv/boot.h" +#include "hw/char/sifive_uart.h" +#include "hw/intc/riscv_aclint.h" +#include "hw/intc/sifive_plic.h" +#include "chardev/char.h" +#include "net/eth.h" +#include "sysemu/device_tree.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" + +#include + +/* CLINT timebase frequency */ +#define CLINT_TIMEBASE_FREQ 1000000 + +static const MemMapEntry sifive_u_memmap[] = { + [SIFIVE_U_DEV_DEBUG] = { 0x0, 0x100 }, + [SIFIVE_U_DEV_MROM] = { 0x1000, 0xf000 }, + [SIFIVE_U_DEV_CLINT] = { 0x2000000, 0x10000 }, + [SIFIVE_U_DEV_L2CC] = { 0x2010000, 0x1000 }, + [SIFIVE_U_DEV_PDMA] = { 0x3000000, 0x100000 }, + [SIFIVE_U_DEV_L2LIM] = { 0x8000000, 0x2000000 }, + [SIFIVE_U_DEV_PLIC] = { 0xc000000, 0x4000000 }, + [SIFIVE_U_DEV_PRCI] = { 0x10000000, 0x1000 }, + [SIFIVE_U_DEV_UART0] = { 0x10010000, 0x1000 }, + [SIFIVE_U_DEV_UART1] = { 0x10011000, 0x1000 }, + [SIFIVE_U_DEV_PWM0] = { 0x10020000, 0x1000 }, + [SIFIVE_U_DEV_PWM1] = { 0x10021000, 0x1000 }, + [SIFIVE_U_DEV_QSPI0] = { 0x10040000, 0x1000 }, + [SIFIVE_U_DEV_QSPI2] = { 0x10050000, 0x1000 }, + [SIFIVE_U_DEV_GPIO] = { 0x10060000, 0x1000 }, + [SIFIVE_U_DEV_OTP] = { 0x10070000, 0x1000 }, + [SIFIVE_U_DEV_GEM] = { 0x10090000, 0x2000 }, + [SIFIVE_U_DEV_GEM_MGMT] = { 0x100a0000, 0x1000 }, + [SIFIVE_U_DEV_DMC] = { 0x100b0000, 0x10000 }, + [SIFIVE_U_DEV_FLASH0] = { 0x20000000, 0x10000000 }, + [SIFIVE_U_DEV_DRAM] = { 0x80000000, 0x0 }, +}; + +#define OTP_SERIAL 1 +#define GEM_REVISION 0x10070109 + +static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, + uint64_t mem_size, const char *cmdline, bool is_32_bit) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + void *fdt; + int cpu; + uint32_t *cells; + char *nodename; + uint32_t plic_phandle, prci_phandle, gpio_phandle, phandle = 1; + uint32_t hfclk_phandle, rtcclk_phandle, phy_phandle; + static const char * const ethclk_names[2] = { "pclk", "hclk" }; + static const char * const clint_compat[2] = { + "sifive,clint0", "riscv,clint0" + }; + static const char * const plic_compat[2] = { + "sifive,plic-1.0.0", "riscv,plic0" + }; + + if (ms->dtb) { + fdt = s->fdt = load_device_tree(ms->dtb, &s->fdt_size); + if (!fdt) { + error_report("load_device_tree() failed"); + exit(1); + } + goto update_bootargs; + } else { + fdt = s->fdt = create_device_tree(&s->fdt_size); + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + } + + qemu_fdt_setprop_string(fdt, "/", "model", "SiFive HiFive Unleashed A00"); + qemu_fdt_setprop_string(fdt, "/", "compatible", + "sifive,hifive-unleashed-a00"); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2); + + qemu_fdt_add_subnode(fdt, "/soc"); + qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0); + qemu_fdt_setprop_string(fdt, "/soc", "compatible", "simple-bus"); + qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2); + qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2); + + hfclk_phandle = phandle++; + nodename = g_strdup_printf("/hfclk"); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", hfclk_phandle); + qemu_fdt_setprop_string(fdt, nodename, "clock-output-names", "hfclk"); + qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", + SIFIVE_U_HFCLK_FREQ); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "fixed-clock"); + qemu_fdt_setprop_cell(fdt, nodename, "#clock-cells", 0x0); + g_free(nodename); + + rtcclk_phandle = phandle++; + nodename = g_strdup_printf("/rtcclk"); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", rtcclk_phandle); + qemu_fdt_setprop_string(fdt, nodename, "clock-output-names", "rtcclk"); + qemu_fdt_setprop_cell(fdt, nodename, "clock-frequency", + SIFIVE_U_RTCCLK_FREQ); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "fixed-clock"); + qemu_fdt_setprop_cell(fdt, nodename, "#clock-cells", 0x0); + g_free(nodename); + + nodename = g_strdup_printf("/memory@%lx", + (long)memmap[SIFIVE_U_DEV_DRAM].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + memmap[SIFIVE_U_DEV_DRAM].base >> 32, memmap[SIFIVE_U_DEV_DRAM].base, + mem_size >> 32, mem_size); + qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory"); + g_free(nodename); + + qemu_fdt_add_subnode(fdt, "/cpus"); + qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency", + CLINT_TIMEBASE_FREQ); + qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); + qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); + + for (cpu = ms->smp.cpus - 1; cpu >= 0; cpu--) { + int cpu_phandle = phandle++; + nodename = g_strdup_printf("/cpus/cpu@%d", cpu); + char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu); + char *isa; + qemu_fdt_add_subnode(fdt, nodename); + /* cpu 0 is the management hart that does not have mmu */ + if (cpu != 0) { + if (is_32_bit) { + qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv32"); + } else { + qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48"); + } + isa = riscv_isa_string(&s->soc.u_cpus.harts[cpu - 1]); + } else { + isa = riscv_isa_string(&s->soc.e_cpus.harts[0]); + } + qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv"); + qemu_fdt_setprop_string(fdt, nodename, "status", "okay"); + qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu); + qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu"); + qemu_fdt_add_subnode(fdt, intc); + qemu_fdt_setprop_cell(fdt, intc, "phandle", cpu_phandle); + qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc"); + qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1); + g_free(isa); + g_free(intc); + g_free(nodename); + } + + cells = g_new0(uint32_t, ms->smp.cpus * 4); + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { + nodename = + g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu); + uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename); + cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle); + cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT); + cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle); + cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER); + g_free(nodename); + } + nodename = g_strdup_printf("/soc/clint@%lx", + (long)memmap[SIFIVE_U_DEV_CLINT].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string_array(fdt, nodename, "compatible", + (char **)&clint_compat, ARRAY_SIZE(clint_compat)); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_CLINT].base, + 0x0, memmap[SIFIVE_U_DEV_CLINT].size); + qemu_fdt_setprop(fdt, nodename, "interrupts-extended", + cells, ms->smp.cpus * sizeof(uint32_t) * 4); + g_free(cells); + g_free(nodename); + + nodename = g_strdup_printf("/soc/otp@%lx", + (long)memmap[SIFIVE_U_DEV_OTP].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "fuse-count", SIFIVE_U_OTP_REG_SIZE); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_OTP].base, + 0x0, memmap[SIFIVE_U_DEV_OTP].size); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "sifive,fu540-c000-otp"); + g_free(nodename); + + prci_phandle = phandle++; + nodename = g_strdup_printf("/soc/clock-controller@%lx", + (long)memmap[SIFIVE_U_DEV_PRCI].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", prci_phandle); + qemu_fdt_setprop_cell(fdt, nodename, "#clock-cells", 0x1); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + hfclk_phandle, rtcclk_phandle); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_PRCI].base, + 0x0, memmap[SIFIVE_U_DEV_PRCI].size); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "sifive,fu540-c000-prci"); + g_free(nodename); + + plic_phandle = phandle++; + cells = g_new0(uint32_t, ms->smp.cpus * 4 - 2); + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { + nodename = + g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu); + uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename); + /* cpu 0 is the management hart that does not have S-mode */ + if (cpu == 0) { + cells[0] = cpu_to_be32(intc_phandle); + cells[1] = cpu_to_be32(IRQ_M_EXT); + } else { + cells[cpu * 4 - 2] = cpu_to_be32(intc_phandle); + cells[cpu * 4 - 1] = cpu_to_be32(IRQ_M_EXT); + cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle); + cells[cpu * 4 + 1] = cpu_to_be32(IRQ_S_EXT); + } + g_free(nodename); + } + nodename = g_strdup_printf("/soc/interrupt-controller@%lx", + (long)memmap[SIFIVE_U_DEV_PLIC].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "#interrupt-cells", 1); + qemu_fdt_setprop_string_array(fdt, nodename, "compatible", + (char **)&plic_compat, ARRAY_SIZE(plic_compat)); + qemu_fdt_setprop(fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop(fdt, nodename, "interrupts-extended", + cells, (ms->smp.cpus * 4 - 2) * sizeof(uint32_t)); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_PLIC].base, + 0x0, memmap[SIFIVE_U_DEV_PLIC].size); + qemu_fdt_setprop_cell(fdt, nodename, "riscv,ndev", 0x35); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", plic_phandle); + plic_phandle = qemu_fdt_get_phandle(fdt, nodename); + g_free(cells); + g_free(nodename); + + gpio_phandle = phandle++; + nodename = g_strdup_printf("/soc/gpio@%lx", + (long)memmap[SIFIVE_U_DEV_GPIO].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", gpio_phandle); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + prci_phandle, PRCI_CLK_TLCLK); + qemu_fdt_setprop_cell(fdt, nodename, "#interrupt-cells", 2); + qemu_fdt_setprop(fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, nodename, "#gpio-cells", 2); + qemu_fdt_setprop(fdt, nodename, "gpio-controller", NULL, 0); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_GPIO].base, + 0x0, memmap[SIFIVE_U_DEV_GPIO].size); + qemu_fdt_setprop_cells(fdt, nodename, "interrupts", SIFIVE_U_GPIO_IRQ0, + SIFIVE_U_GPIO_IRQ1, SIFIVE_U_GPIO_IRQ2, SIFIVE_U_GPIO_IRQ3, + SIFIVE_U_GPIO_IRQ4, SIFIVE_U_GPIO_IRQ5, SIFIVE_U_GPIO_IRQ6, + SIFIVE_U_GPIO_IRQ7, SIFIVE_U_GPIO_IRQ8, SIFIVE_U_GPIO_IRQ9, + SIFIVE_U_GPIO_IRQ10, SIFIVE_U_GPIO_IRQ11, SIFIVE_U_GPIO_IRQ12, + SIFIVE_U_GPIO_IRQ13, SIFIVE_U_GPIO_IRQ14, SIFIVE_U_GPIO_IRQ15); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,gpio0"); + g_free(nodename); + + nodename = g_strdup_printf("/gpio-restart"); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cells(fdt, nodename, "gpios", gpio_phandle, 10, 1); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "gpio-restart"); + g_free(nodename); + + nodename = g_strdup_printf("/soc/dma@%lx", + (long)memmap[SIFIVE_U_DEV_PDMA].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "#dma-cells", 1); + qemu_fdt_setprop_cells(fdt, nodename, "interrupts", + SIFIVE_U_PDMA_IRQ0, SIFIVE_U_PDMA_IRQ1, SIFIVE_U_PDMA_IRQ2, + SIFIVE_U_PDMA_IRQ3, SIFIVE_U_PDMA_IRQ4, SIFIVE_U_PDMA_IRQ5, + SIFIVE_U_PDMA_IRQ6, SIFIVE_U_PDMA_IRQ7); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_PDMA].base, + 0x0, memmap[SIFIVE_U_DEV_PDMA].size); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "sifive,fu540-c000-pdma"); + g_free(nodename); + + nodename = g_strdup_printf("/soc/cache-controller@%lx", + (long)memmap[SIFIVE_U_DEV_L2CC].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_L2CC].base, + 0x0, memmap[SIFIVE_U_DEV_L2CC].size); + qemu_fdt_setprop_cells(fdt, nodename, "interrupts", + SIFIVE_U_L2CC_IRQ0, SIFIVE_U_L2CC_IRQ1, SIFIVE_U_L2CC_IRQ2); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop(fdt, nodename, "cache-unified", NULL, 0); + qemu_fdt_setprop_cell(fdt, nodename, "cache-size", 2097152); + qemu_fdt_setprop_cell(fdt, nodename, "cache-sets", 1024); + qemu_fdt_setprop_cell(fdt, nodename, "cache-level", 2); + qemu_fdt_setprop_cell(fdt, nodename, "cache-block-size", 64); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "sifive,fu540-c000-ccache"); + g_free(nodename); + + nodename = g_strdup_printf("/soc/spi@%lx", + (long)memmap[SIFIVE_U_DEV_QSPI2].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "#size-cells", 0); + qemu_fdt_setprop_cell(fdt, nodename, "#address-cells", 1); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + prci_phandle, PRCI_CLK_TLCLK); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", SIFIVE_U_QSPI2_IRQ); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_QSPI2].base, + 0x0, memmap[SIFIVE_U_DEV_QSPI2].size); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,spi0"); + g_free(nodename); + + nodename = g_strdup_printf("/soc/spi@%lx/mmc@0", + (long)memmap[SIFIVE_U_DEV_QSPI2].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop(fdt, nodename, "disable-wp", NULL, 0); + qemu_fdt_setprop_cells(fdt, nodename, "voltage-ranges", 3300, 3300); + qemu_fdt_setprop_cell(fdt, nodename, "spi-max-frequency", 20000000); + qemu_fdt_setprop_cell(fdt, nodename, "reg", 0); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "mmc-spi-slot"); + g_free(nodename); + + nodename = g_strdup_printf("/soc/spi@%lx", + (long)memmap[SIFIVE_U_DEV_QSPI0].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "#size-cells", 0); + qemu_fdt_setprop_cell(fdt, nodename, "#address-cells", 1); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + prci_phandle, PRCI_CLK_TLCLK); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", SIFIVE_U_QSPI0_IRQ); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_QSPI0].base, + 0x0, memmap[SIFIVE_U_DEV_QSPI0].size); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,spi0"); + g_free(nodename); + + nodename = g_strdup_printf("/soc/spi@%lx/flash@0", + (long)memmap[SIFIVE_U_DEV_QSPI0].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "spi-rx-bus-width", 4); + qemu_fdt_setprop_cell(fdt, nodename, "spi-tx-bus-width", 4); + qemu_fdt_setprop(fdt, nodename, "m25p,fast-read", NULL, 0); + qemu_fdt_setprop_cell(fdt, nodename, "spi-max-frequency", 50000000); + qemu_fdt_setprop_cell(fdt, nodename, "reg", 0); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "jedec,spi-nor"); + g_free(nodename); + + phy_phandle = phandle++; + nodename = g_strdup_printf("/soc/ethernet@%lx", + (long)memmap[SIFIVE_U_DEV_GEM].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", + "sifive,fu540-c000-gem"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_GEM].base, + 0x0, memmap[SIFIVE_U_DEV_GEM].size, + 0x0, memmap[SIFIVE_U_DEV_GEM_MGMT].base, + 0x0, memmap[SIFIVE_U_DEV_GEM_MGMT].size); + qemu_fdt_setprop_string(fdt, nodename, "reg-names", "control"); + qemu_fdt_setprop_string(fdt, nodename, "phy-mode", "gmii"); + qemu_fdt_setprop_cell(fdt, nodename, "phy-handle", phy_phandle); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", SIFIVE_U_GEM_IRQ); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + prci_phandle, PRCI_CLK_GEMGXLPLL, prci_phandle, PRCI_CLK_GEMGXLPLL); + qemu_fdt_setprop_string_array(fdt, nodename, "clock-names", + (char **)ðclk_names, ARRAY_SIZE(ethclk_names)); + qemu_fdt_setprop(fdt, nodename, "local-mac-address", + s->soc.gem.conf.macaddr.a, ETH_ALEN); + qemu_fdt_setprop_cell(fdt, nodename, "#address-cells", 1); + qemu_fdt_setprop_cell(fdt, nodename, "#size-cells", 0); + + qemu_fdt_add_subnode(fdt, "/aliases"); + qemu_fdt_setprop_string(fdt, "/aliases", "ethernet0", nodename); + + g_free(nodename); + + nodename = g_strdup_printf("/soc/ethernet@%lx/ethernet-phy@0", + (long)memmap[SIFIVE_U_DEV_GEM].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_cell(fdt, nodename, "phandle", phy_phandle); + qemu_fdt_setprop_cell(fdt, nodename, "reg", 0x0); + g_free(nodename); + + nodename = g_strdup_printf("/soc/pwm@%lx", + (long)memmap[SIFIVE_U_DEV_PWM0].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,pwm0"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_PWM0].base, + 0x0, memmap[SIFIVE_U_DEV_PWM0].size); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_cells(fdt, nodename, "interrupts", + SIFIVE_U_PWM0_IRQ0, SIFIVE_U_PWM0_IRQ1, + SIFIVE_U_PWM0_IRQ2, SIFIVE_U_PWM0_IRQ3); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + prci_phandle, PRCI_CLK_TLCLK); + qemu_fdt_setprop_cell(fdt, nodename, "#pwm-cells", 0); + g_free(nodename); + + nodename = g_strdup_printf("/soc/pwm@%lx", + (long)memmap[SIFIVE_U_DEV_PWM1].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,pwm0"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_PWM1].base, + 0x0, memmap[SIFIVE_U_DEV_PWM1].size); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_cells(fdt, nodename, "interrupts", + SIFIVE_U_PWM1_IRQ0, SIFIVE_U_PWM1_IRQ1, + SIFIVE_U_PWM1_IRQ2, SIFIVE_U_PWM1_IRQ3); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + prci_phandle, PRCI_CLK_TLCLK); + qemu_fdt_setprop_cell(fdt, nodename, "#pwm-cells", 0); + g_free(nodename); + + nodename = g_strdup_printf("/soc/serial@%lx", + (long)memmap[SIFIVE_U_DEV_UART1].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,uart0"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_UART1].base, + 0x0, memmap[SIFIVE_U_DEV_UART1].size); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + prci_phandle, PRCI_CLK_TLCLK); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", SIFIVE_U_UART1_IRQ); + + qemu_fdt_setprop_string(fdt, "/aliases", "serial1", nodename); + g_free(nodename); + + nodename = g_strdup_printf("/soc/serial@%lx", + (long)memmap[SIFIVE_U_DEV_UART0].base); + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "sifive,uart0"); + qemu_fdt_setprop_cells(fdt, nodename, "reg", + 0x0, memmap[SIFIVE_U_DEV_UART0].base, + 0x0, memmap[SIFIVE_U_DEV_UART0].size); + qemu_fdt_setprop_cells(fdt, nodename, "clocks", + prci_phandle, PRCI_CLK_TLCLK); + qemu_fdt_setprop_cell(fdt, nodename, "interrupt-parent", plic_phandle); + qemu_fdt_setprop_cell(fdt, nodename, "interrupts", SIFIVE_U_UART0_IRQ); + + qemu_fdt_add_subnode(fdt, "/chosen"); + qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename); + qemu_fdt_setprop_string(fdt, "/aliases", "serial0", nodename); + + g_free(nodename); + +update_bootargs: + if (cmdline) { + qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); + } +} + +static void sifive_u_machine_reset(void *opaque, int n, int level) +{ + /* gpio pin active low triggers reset */ + if (!level) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } +} + +static void sifive_u_machine_init(MachineState *machine) +{ + const MemMapEntry *memmap = sifive_u_memmap; + SiFiveUState *s = RISCV_U_MACHINE(machine); + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *flash0 = g_new(MemoryRegion, 1); + target_ulong start_addr = memmap[SIFIVE_U_DEV_DRAM].base; + target_ulong firmware_end_addr, kernel_start_addr; + uint32_t start_addr_hi32 = 0x00000000; + int i; + uint32_t fdt_load_addr; + uint64_t kernel_entry; + DriveInfo *dinfo; + DeviceState *flash_dev, *sd_dev; + qemu_irq flash_cs, sd_cs; + + /* Initialize SoC */ + object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_U_SOC); + object_property_set_uint(OBJECT(&s->soc), "serial", s->serial, + &error_abort); + object_property_set_str(OBJECT(&s->soc), "cpu-type", machine->cpu_type, + &error_abort); + qdev_realize(DEVICE(&s->soc), NULL, &error_abort); + + /* register RAM */ + memory_region_add_subregion(system_memory, memmap[SIFIVE_U_DEV_DRAM].base, + machine->ram); + + /* register QSPI0 Flash */ + memory_region_init_ram(flash0, NULL, "riscv.sifive.u.flash0", + memmap[SIFIVE_U_DEV_FLASH0].size, &error_fatal); + memory_region_add_subregion(system_memory, memmap[SIFIVE_U_DEV_FLASH0].base, + flash0); + + /* register gpio-restart */ + qdev_connect_gpio_out(DEVICE(&(s->soc.gpio)), 10, + qemu_allocate_irq(sifive_u_machine_reset, NULL, 0)); + + /* create device tree */ + create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline, + riscv_is_32bit(&s->soc.u_cpus)); + + if (s->start_in_flash) { + /* + * If start_in_flash property is given, assign s->msel to a value + * that representing booting from QSPI0 memory-mapped flash. + * + * This also means that when both start_in_flash and msel properties + * are given, start_in_flash takes the precedence over msel. + * + * Note this is to keep backward compatibility not to break existing + * users that use start_in_flash property. + */ + s->msel = MSEL_MEMMAP_QSPI0_FLASH; + } + + switch (s->msel) { + case MSEL_MEMMAP_QSPI0_FLASH: + start_addr = memmap[SIFIVE_U_DEV_FLASH0].base; + break; + case MSEL_L2LIM_QSPI0_FLASH: + case MSEL_L2LIM_QSPI2_SD: + start_addr = memmap[SIFIVE_U_DEV_L2LIM].base; + break; + default: + start_addr = memmap[SIFIVE_U_DEV_DRAM].base; + break; + } + + if (riscv_is_32bit(&s->soc.u_cpus)) { + firmware_end_addr = riscv_find_and_load_firmware(machine, + RISCV32_BIOS_BIN, start_addr, NULL); + } else { + firmware_end_addr = riscv_find_and_load_firmware(machine, + RISCV64_BIOS_BIN, start_addr, NULL); + } + + if (machine->kernel_filename) { + kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus, + firmware_end_addr); + + kernel_entry = riscv_load_kernel(machine->kernel_filename, + kernel_start_addr, NULL); + + if (machine->initrd_filename) { + hwaddr start; + hwaddr end = riscv_load_initrd(machine->initrd_filename, + machine->ram_size, kernel_entry, + &start); + qemu_fdt_setprop_cell(s->fdt, "/chosen", + "linux,initrd-start", start); + qemu_fdt_setprop_cell(s->fdt, "/chosen", "linux,initrd-end", + end); + } + } else { + /* + * If dynamic firmware is used, it doesn't know where is the next mode + * if kernel argument is not set. + */ + kernel_entry = 0; + } + + /* Compute the fdt load address in dram */ + fdt_load_addr = riscv_load_fdt(memmap[SIFIVE_U_DEV_DRAM].base, + machine->ram_size, s->fdt); + if (!riscv_is_32bit(&s->soc.u_cpus)) { + start_addr_hi32 = (uint64_t)start_addr >> 32; + } + + /* reset vector */ + uint32_t reset_vec[12] = { + s->msel, /* MSEL pin state */ + 0x00000297, /* 1: auipc t0, %pcrel_hi(fw_dyn) */ + 0x02c28613, /* addi a2, t0, %pcrel_lo(1b) */ + 0xf1402573, /* csrr a0, mhartid */ + 0, + 0, + 0x00028067, /* jr t0 */ + start_addr, /* start: .dword */ + start_addr_hi32, + fdt_load_addr, /* fdt_laddr: .dword */ + 0x00000000, + 0x00000000, + /* fw_dyn: */ + }; + if (riscv_is_32bit(&s->soc.u_cpus)) { + reset_vec[4] = 0x0202a583; /* lw a1, 32(t0) */ + reset_vec[5] = 0x0182a283; /* lw t0, 24(t0) */ + } else { + reset_vec[4] = 0x0202b583; /* ld a1, 32(t0) */ + reset_vec[5] = 0x0182b283; /* ld t0, 24(t0) */ + } + + + /* copy in the reset vector in little_endian byte order */ + for (i = 0; i < ARRAY_SIZE(reset_vec); i++) { + reset_vec[i] = cpu_to_le32(reset_vec[i]); + } + rom_add_blob_fixed_as("mrom.reset", reset_vec, sizeof(reset_vec), + memmap[SIFIVE_U_DEV_MROM].base, &address_space_memory); + + riscv_rom_copy_firmware_info(machine, memmap[SIFIVE_U_DEV_MROM].base, + memmap[SIFIVE_U_DEV_MROM].size, + sizeof(reset_vec), kernel_entry); + + /* Connect an SPI flash to SPI0 */ + flash_dev = qdev_new("is25wp256"); + dinfo = drive_get_next(IF_MTD); + if (dinfo) { + qdev_prop_set_drive_err(flash_dev, "drive", + blk_by_legacy_dinfo(dinfo), + &error_fatal); + } + qdev_realize_and_unref(flash_dev, BUS(s->soc.spi0.spi), &error_fatal); + + flash_cs = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.spi0), 1, flash_cs); + + /* Connect an SD card to SPI2 */ + sd_dev = ssi_create_peripheral(s->soc.spi2.spi, "ssi-sd"); + + sd_cs = qdev_get_gpio_in_named(sd_dev, SSI_GPIO_CS, 0); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.spi2), 1, sd_cs); +} + +static bool sifive_u_machine_get_start_in_flash(Object *obj, Error **errp) +{ + SiFiveUState *s = RISCV_U_MACHINE(obj); + + return s->start_in_flash; +} + +static void sifive_u_machine_set_start_in_flash(Object *obj, bool value, Error **errp) +{ + SiFiveUState *s = RISCV_U_MACHINE(obj); + + s->start_in_flash = value; +} + +static void sifive_u_machine_get_uint32_prop(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + visit_type_uint32(v, name, (uint32_t *)opaque, errp); +} + +static void sifive_u_machine_set_uint32_prop(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + visit_type_uint32(v, name, (uint32_t *)opaque, errp); +} + +static void sifive_u_machine_instance_init(Object *obj) +{ + SiFiveUState *s = RISCV_U_MACHINE(obj); + + s->start_in_flash = false; + s->msel = 0; + object_property_add(obj, "msel", "uint32", + sifive_u_machine_get_uint32_prop, + sifive_u_machine_set_uint32_prop, NULL, &s->msel); + object_property_set_description(obj, "msel", + "Mode Select (MSEL[3:0]) pin state"); + + s->serial = OTP_SERIAL; + object_property_add(obj, "serial", "uint32", + sifive_u_machine_get_uint32_prop, + sifive_u_machine_set_uint32_prop, NULL, &s->serial); + object_property_set_description(obj, "serial", "Board serial number"); +} + +static void sifive_u_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "RISC-V Board compatible with SiFive U SDK"; + mc->init = sifive_u_machine_init; + mc->max_cpus = SIFIVE_U_MANAGEMENT_CPU_COUNT + SIFIVE_U_COMPUTE_CPU_COUNT; + mc->min_cpus = SIFIVE_U_MANAGEMENT_CPU_COUNT + 1; + mc->default_cpu_type = SIFIVE_U_CPU; + mc->default_cpus = mc->min_cpus; + mc->default_ram_id = "riscv.sifive.u.ram"; + + object_class_property_add_bool(oc, "start-in-flash", + sifive_u_machine_get_start_in_flash, + sifive_u_machine_set_start_in_flash); + object_class_property_set_description(oc, "start-in-flash", + "Set on to tell QEMU's ROM to jump to " + "flash. Otherwise QEMU will jump to DRAM " + "or L2LIM depending on the msel value"); +} + +static const TypeInfo sifive_u_machine_typeinfo = { + .name = MACHINE_TYPE_NAME("sifive_u"), + .parent = TYPE_MACHINE, + .class_init = sifive_u_machine_class_init, + .instance_init = sifive_u_machine_instance_init, + .instance_size = sizeof(SiFiveUState), +}; + +static void sifive_u_machine_init_register_types(void) +{ + type_register_static(&sifive_u_machine_typeinfo); +} + +type_init(sifive_u_machine_init_register_types) + +static void sifive_u_soc_instance_init(Object *obj) +{ + SiFiveUSoCState *s = RISCV_U_SOC(obj); + + object_initialize_child(obj, "e-cluster", &s->e_cluster, TYPE_CPU_CLUSTER); + qdev_prop_set_uint32(DEVICE(&s->e_cluster), "cluster-id", 0); + + object_initialize_child(OBJECT(&s->e_cluster), "e-cpus", &s->e_cpus, + TYPE_RISCV_HART_ARRAY); + qdev_prop_set_uint32(DEVICE(&s->e_cpus), "num-harts", 1); + qdev_prop_set_uint32(DEVICE(&s->e_cpus), "hartid-base", 0); + qdev_prop_set_string(DEVICE(&s->e_cpus), "cpu-type", SIFIVE_E_CPU); + qdev_prop_set_uint64(DEVICE(&s->e_cpus), "resetvec", 0x1004); + + object_initialize_child(obj, "u-cluster", &s->u_cluster, TYPE_CPU_CLUSTER); + qdev_prop_set_uint32(DEVICE(&s->u_cluster), "cluster-id", 1); + + object_initialize_child(OBJECT(&s->u_cluster), "u-cpus", &s->u_cpus, + TYPE_RISCV_HART_ARRAY); + + object_initialize_child(obj, "prci", &s->prci, TYPE_SIFIVE_U_PRCI); + object_initialize_child(obj, "otp", &s->otp, TYPE_SIFIVE_U_OTP); + object_initialize_child(obj, "gem", &s->gem, TYPE_CADENCE_GEM); + object_initialize_child(obj, "gpio", &s->gpio, TYPE_SIFIVE_GPIO); + object_initialize_child(obj, "pdma", &s->dma, TYPE_SIFIVE_PDMA); + object_initialize_child(obj, "spi0", &s->spi0, TYPE_SIFIVE_SPI); + object_initialize_child(obj, "spi2", &s->spi2, TYPE_SIFIVE_SPI); + object_initialize_child(obj, "pwm0", &s->pwm[0], TYPE_SIFIVE_PWM); + object_initialize_child(obj, "pwm1", &s->pwm[1], TYPE_SIFIVE_PWM); +} + +static void sifive_u_soc_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + SiFiveUSoCState *s = RISCV_U_SOC(dev); + const MemMapEntry *memmap = sifive_u_memmap; + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *mask_rom = g_new(MemoryRegion, 1); + MemoryRegion *l2lim_mem = g_new(MemoryRegion, 1); + char *plic_hart_config; + int i, j; + NICInfo *nd = &nd_table[0]; + + qdev_prop_set_uint32(DEVICE(&s->u_cpus), "num-harts", ms->smp.cpus - 1); + qdev_prop_set_uint32(DEVICE(&s->u_cpus), "hartid-base", 1); + qdev_prop_set_string(DEVICE(&s->u_cpus), "cpu-type", s->cpu_type); + qdev_prop_set_uint64(DEVICE(&s->u_cpus), "resetvec", 0x1004); + + sysbus_realize(SYS_BUS_DEVICE(&s->e_cpus), &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->u_cpus), &error_abort); + /* + * The cluster must be realized after the RISC-V hart array container, + * as the container's CPU object is only created on realize, and the + * CPU must exist and have been parented into the cluster before the + * cluster is realized. + */ + qdev_realize(DEVICE(&s->e_cluster), NULL, &error_abort); + qdev_realize(DEVICE(&s->u_cluster), NULL, &error_abort); + + /* boot rom */ + memory_region_init_rom(mask_rom, OBJECT(dev), "riscv.sifive.u.mrom", + memmap[SIFIVE_U_DEV_MROM].size, &error_fatal); + memory_region_add_subregion(system_memory, memmap[SIFIVE_U_DEV_MROM].base, + mask_rom); + + /* + * Add L2-LIM at reset size. + * This should be reduced in size as the L2 Cache Controller WayEnable + * register is incremented. Unfortunately I don't see a nice (or any) way + * to handle reducing or blocking out the L2 LIM while still allowing it + * be re returned to all enabled after a reset. For the time being, just + * leave it enabled all the time. This won't break anything, but will be + * too generous to misbehaving guests. + */ + memory_region_init_ram(l2lim_mem, NULL, "riscv.sifive.u.l2lim", + memmap[SIFIVE_U_DEV_L2LIM].size, &error_fatal); + memory_region_add_subregion(system_memory, memmap[SIFIVE_U_DEV_L2LIM].base, + l2lim_mem); + + /* create PLIC hart topology configuration string */ + plic_hart_config = riscv_plic_hart_config_string(ms->smp.cpus); + + /* MMIO */ + s->plic = sifive_plic_create(memmap[SIFIVE_U_DEV_PLIC].base, + plic_hart_config, ms->smp.cpus, 0, + SIFIVE_U_PLIC_NUM_SOURCES, + SIFIVE_U_PLIC_NUM_PRIORITIES, + SIFIVE_U_PLIC_PRIORITY_BASE, + SIFIVE_U_PLIC_PENDING_BASE, + SIFIVE_U_PLIC_ENABLE_BASE, + SIFIVE_U_PLIC_ENABLE_STRIDE, + SIFIVE_U_PLIC_CONTEXT_BASE, + SIFIVE_U_PLIC_CONTEXT_STRIDE, + memmap[SIFIVE_U_DEV_PLIC].size); + g_free(plic_hart_config); + sifive_uart_create(system_memory, memmap[SIFIVE_U_DEV_UART0].base, + serial_hd(0), qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_U_UART0_IRQ)); + sifive_uart_create(system_memory, memmap[SIFIVE_U_DEV_UART1].base, + serial_hd(1), qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_U_UART1_IRQ)); + riscv_aclint_swi_create(memmap[SIFIVE_U_DEV_CLINT].base, 0, + ms->smp.cpus, false); + riscv_aclint_mtimer_create(memmap[SIFIVE_U_DEV_CLINT].base + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, 0, ms->smp.cpus, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, + CLINT_TIMEBASE_FREQ, false); + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->prci), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->prci), 0, memmap[SIFIVE_U_DEV_PRCI].base); + + qdev_prop_set_uint32(DEVICE(&s->gpio), "ngpio", 16); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio), 0, memmap[SIFIVE_U_DEV_GPIO].base); + + /* Pass all GPIOs to the SOC layer so they are available to the board */ + qdev_pass_gpios(DEVICE(&s->gpio), dev, NULL); + + /* Connect GPIO interrupts to the PLIC */ + for (i = 0; i < 16; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), i, + qdev_get_gpio_in(DEVICE(s->plic), + SIFIVE_U_GPIO_IRQ0 + i)); + } + + /* PDMA */ + sysbus_realize(SYS_BUS_DEVICE(&s->dma), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->dma), 0, memmap[SIFIVE_U_DEV_PDMA].base); + + /* Connect PDMA interrupts to the PLIC */ + for (i = 0; i < SIFIVE_PDMA_IRQS; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), i, + qdev_get_gpio_in(DEVICE(s->plic), + SIFIVE_U_PDMA_IRQ0 + i)); + } + + qdev_prop_set_uint32(DEVICE(&s->otp), "serial", s->serial); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->otp), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->otp), 0, memmap[SIFIVE_U_DEV_OTP].base); + + /* FIXME use qdev NIC properties instead of nd_table[] */ + if (nd->used) { + qemu_check_nic_model(nd, TYPE_CADENCE_GEM); + qdev_set_nic_properties(DEVICE(&s->gem), nd); + } + object_property_set_int(OBJECT(&s->gem), "revision", GEM_REVISION, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->gem), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->gem), 0, memmap[SIFIVE_U_DEV_GEM].base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->gem), 0, + qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_U_GEM_IRQ)); + + /* PWM */ + for (i = 0; i < 2; i++) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->pwm[i]), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->pwm[i]), 0, + memmap[SIFIVE_U_DEV_PWM0].base + (0x1000 * i)); + + /* Connect PWM interrupts to the PLIC */ + for (j = 0; j < SIFIVE_PWM_IRQS; j++) { + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pwm[i]), j, + qdev_get_gpio_in(DEVICE(s->plic), + SIFIVE_U_PWM0_IRQ0 + (i * 4) + j)); + } + } + + create_unimplemented_device("riscv.sifive.u.gem-mgmt", + memmap[SIFIVE_U_DEV_GEM_MGMT].base, memmap[SIFIVE_U_DEV_GEM_MGMT].size); + + create_unimplemented_device("riscv.sifive.u.dmc", + memmap[SIFIVE_U_DEV_DMC].base, memmap[SIFIVE_U_DEV_DMC].size); + + create_unimplemented_device("riscv.sifive.u.l2cc", + memmap[SIFIVE_U_DEV_L2CC].base, memmap[SIFIVE_U_DEV_L2CC].size); + + sysbus_realize(SYS_BUS_DEVICE(&s->spi0), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi0), 0, + memmap[SIFIVE_U_DEV_QSPI0].base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi0), 0, + qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_U_QSPI0_IRQ)); + sysbus_realize(SYS_BUS_DEVICE(&s->spi2), errp); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi2), 0, + memmap[SIFIVE_U_DEV_QSPI2].base); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi2), 0, + qdev_get_gpio_in(DEVICE(s->plic), SIFIVE_U_QSPI2_IRQ)); +} + +static Property sifive_u_soc_props[] = { + DEFINE_PROP_UINT32("serial", SiFiveUSoCState, serial, OTP_SERIAL), + DEFINE_PROP_STRING("cpu-type", SiFiveUSoCState, cpu_type), + DEFINE_PROP_END_OF_LIST() +}; + +static void sifive_u_soc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, sifive_u_soc_props); + dc->realize = sifive_u_soc_realize; + /* Reason: Uses serial_hds in realize function, thus can't be used twice */ + dc->user_creatable = false; +} + +static const TypeInfo sifive_u_soc_type_info = { + .name = TYPE_RISCV_U_SOC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SiFiveUSoCState), + .instance_init = sifive_u_soc_instance_init, + .class_init = sifive_u_soc_class_init, +}; + +static void sifive_u_soc_register_types(void) +{ + type_register_static(&sifive_u_soc_type_info); +} + +type_init(sifive_u_soc_register_types) diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c new file mode 100644 index 000000000..288d69cd9 --- /dev/null +++ b/hw/riscv/spike.c @@ -0,0 +1,342 @@ +/* + * QEMU RISC-V Spike Board + * + * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu + * Copyright (c) 2017-2018 SiFive, Inc. + * + * This provides a RISC-V Board with the following devices: + * + * 0) HTIF Console and Poweroff + * 1) CLINT (Timer and IPI) + * 2) PLIC (Platform Level Interrupt Controller) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/sysbus.h" +#include "target/riscv/cpu.h" +#include "hw/riscv/riscv_hart.h" +#include "hw/riscv/spike.h" +#include "hw/riscv/boot.h" +#include "hw/riscv/numa.h" +#include "hw/char/riscv_htif.h" +#include "hw/intc/riscv_aclint.h" +#include "chardev/char.h" +#include "sysemu/device_tree.h" +#include "sysemu/sysemu.h" + +static const MemMapEntry spike_memmap[] = { + [SPIKE_MROM] = { 0x1000, 0xf000 }, + [SPIKE_CLINT] = { 0x2000000, 0x10000 }, + [SPIKE_DRAM] = { 0x80000000, 0x0 }, +}; + +static void create_fdt(SpikeState *s, const MemMapEntry *memmap, + uint64_t mem_size, const char *cmdline, bool is_32_bit) +{ + void *fdt; + uint64_t addr, size; + unsigned long clint_addr; + int cpu, socket; + MachineState *mc = MACHINE(s); + uint32_t *clint_cells; + uint32_t cpu_phandle, intc_phandle, phandle = 1; + char *name, *mem_name, *clint_name, *clust_name; + char *core_name, *cpu_name, *intc_name; + static const char * const clint_compat[2] = { + "sifive,clint0", "riscv,clint0" + }; + + fdt = s->fdt = create_device_tree(&s->fdt_size); + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + qemu_fdt_setprop_string(fdt, "/", "model", "ucbbar,spike-bare,qemu"); + qemu_fdt_setprop_string(fdt, "/", "compatible", "ucbbar,spike-bare-dev"); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2); + + qemu_fdt_add_subnode(fdt, "/htif"); + qemu_fdt_setprop_string(fdt, "/htif", "compatible", "ucb,htif0"); + + qemu_fdt_add_subnode(fdt, "/soc"); + qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0); + qemu_fdt_setprop_string(fdt, "/soc", "compatible", "simple-bus"); + qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2); + qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2); + + qemu_fdt_add_subnode(fdt, "/cpus"); + qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency", + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ); + qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); + qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); + qemu_fdt_add_subnode(fdt, "/cpus/cpu-map"); + + for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) { + clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket); + qemu_fdt_add_subnode(fdt, clust_name); + + clint_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4); + + for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) { + cpu_phandle = phandle++; + + cpu_name = g_strdup_printf("/cpus/cpu@%d", + s->soc[socket].hartid_base + cpu); + qemu_fdt_add_subnode(fdt, cpu_name); + if (is_32_bit) { + qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv32"); + } else { + qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv48"); + } + name = riscv_isa_string(&s->soc[socket].harts[cpu]); + qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name); + g_free(name); + qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv"); + qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay"); + qemu_fdt_setprop_cell(fdt, cpu_name, "reg", + s->soc[socket].hartid_base + cpu); + qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu"); + riscv_socket_fdt_write_id(mc, fdt, cpu_name, socket); + qemu_fdt_setprop_cell(fdt, cpu_name, "phandle", cpu_phandle); + + intc_name = g_strdup_printf("%s/interrupt-controller", cpu_name); + qemu_fdt_add_subnode(fdt, intc_name); + intc_phandle = phandle++; + qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_phandle); + qemu_fdt_setprop_string(fdt, intc_name, "compatible", + "riscv,cpu-intc"); + qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, intc_name, "#interrupt-cells", 1); + + clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle); + clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT); + clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle); + clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER); + + core_name = g_strdup_printf("%s/core%d", clust_name, cpu); + qemu_fdt_add_subnode(fdt, core_name); + qemu_fdt_setprop_cell(fdt, core_name, "cpu", cpu_phandle); + + g_free(core_name); + g_free(intc_name); + g_free(cpu_name); + } + + addr = memmap[SPIKE_DRAM].base + riscv_socket_mem_offset(mc, socket); + size = riscv_socket_mem_size(mc, socket); + mem_name = g_strdup_printf("/memory@%lx", (long)addr); + qemu_fdt_add_subnode(fdt, mem_name); + qemu_fdt_setprop_cells(fdt, mem_name, "reg", + addr >> 32, addr, size >> 32, size); + qemu_fdt_setprop_string(fdt, mem_name, "device_type", "memory"); + riscv_socket_fdt_write_id(mc, fdt, mem_name, socket); + g_free(mem_name); + + clint_addr = memmap[SPIKE_CLINT].base + + (memmap[SPIKE_CLINT].size * socket); + clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr); + qemu_fdt_add_subnode(fdt, clint_name); + qemu_fdt_setprop_string_array(fdt, clint_name, "compatible", + (char **)&clint_compat, ARRAY_SIZE(clint_compat)); + qemu_fdt_setprop_cells(fdt, clint_name, "reg", + 0x0, clint_addr, 0x0, memmap[SPIKE_CLINT].size); + qemu_fdt_setprop(fdt, clint_name, "interrupts-extended", + clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); + riscv_socket_fdt_write_id(mc, fdt, clint_name, socket); + + g_free(clint_name); + g_free(clint_cells); + g_free(clust_name); + } + + riscv_socket_fdt_write_distance_matrix(mc, fdt); + + if (cmdline) { + qemu_fdt_add_subnode(fdt, "/chosen"); + qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline); + } +} + +static void spike_board_init(MachineState *machine) +{ + const MemMapEntry *memmap = spike_memmap; + SpikeState *s = SPIKE_MACHINE(machine); + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *mask_rom = g_new(MemoryRegion, 1); + target_ulong firmware_end_addr, kernel_start_addr; + uint32_t fdt_load_addr; + uint64_t kernel_entry; + char *soc_name; + int i, base_hartid, hart_count; + + /* Check socket count limit */ + if (SPIKE_SOCKETS_MAX < riscv_socket_count(machine)) { + error_report("number of sockets/nodes should be less than %d", + SPIKE_SOCKETS_MAX); + exit(1); + } + + /* Initialize sockets */ + for (i = 0; i < riscv_socket_count(machine); i++) { + if (!riscv_socket_check_hartids(machine, i)) { + error_report("discontinuous hartids in socket%d", i); + exit(1); + } + + base_hartid = riscv_socket_first_hartid(machine, i); + if (base_hartid < 0) { + error_report("can't find hartid base for socket%d", i); + exit(1); + } + + hart_count = riscv_socket_hart_count(machine, i); + if (hart_count < 0) { + error_report("can't find hart count for socket%d", i); + exit(1); + } + + soc_name = g_strdup_printf("soc%d", i); + object_initialize_child(OBJECT(machine), soc_name, &s->soc[i], + TYPE_RISCV_HART_ARRAY); + g_free(soc_name); + object_property_set_str(OBJECT(&s->soc[i]), "cpu-type", + machine->cpu_type, &error_abort); + object_property_set_int(OBJECT(&s->soc[i]), "hartid-base", + base_hartid, &error_abort); + object_property_set_int(OBJECT(&s->soc[i]), "num-harts", + hart_count, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_abort); + + /* Core Local Interruptor (timer and IPI) for each socket */ + riscv_aclint_swi_create( + memmap[SPIKE_CLINT].base + i * memmap[SPIKE_CLINT].size, + base_hartid, hart_count, false); + riscv_aclint_mtimer_create( + memmap[SPIKE_CLINT].base + i * memmap[SPIKE_CLINT].size + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, false); + } + + /* register system main memory (actual RAM) */ + memory_region_add_subregion(system_memory, memmap[SPIKE_DRAM].base, + machine->ram); + + /* create device tree */ + create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline, + riscv_is_32bit(&s->soc[0])); + + /* boot rom */ + memory_region_init_rom(mask_rom, NULL, "riscv.spike.mrom", + memmap[SPIKE_MROM].size, &error_fatal); + memory_region_add_subregion(system_memory, memmap[SPIKE_MROM].base, + mask_rom); + + /* + * Not like other RISC-V machines that use plain binary bios images, + * keeping ELF files here was intentional because BIN files don't work + * for the Spike machine as HTIF emulation depends on ELF parsing. + */ + if (riscv_is_32bit(&s->soc[0])) { + firmware_end_addr = riscv_find_and_load_firmware(machine, + RISCV32_BIOS_ELF, memmap[SPIKE_DRAM].base, + htif_symbol_callback); + } else { + firmware_end_addr = riscv_find_and_load_firmware(machine, + RISCV64_BIOS_ELF, memmap[SPIKE_DRAM].base, + htif_symbol_callback); + } + + if (machine->kernel_filename) { + kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0], + firmware_end_addr); + + kernel_entry = riscv_load_kernel(machine->kernel_filename, + kernel_start_addr, + htif_symbol_callback); + + if (machine->initrd_filename) { + hwaddr start; + hwaddr end = riscv_load_initrd(machine->initrd_filename, + machine->ram_size, kernel_entry, + &start); + qemu_fdt_setprop_cell(s->fdt, "/chosen", + "linux,initrd-start", start); + qemu_fdt_setprop_cell(s->fdt, "/chosen", "linux,initrd-end", + end); + } + } else { + /* + * If dynamic firmware is used, it doesn't know where is the next mode + * if kernel argument is not set. + */ + kernel_entry = 0; + } + + /* Compute the fdt load address in dram */ + fdt_load_addr = riscv_load_fdt(memmap[SPIKE_DRAM].base, + machine->ram_size, s->fdt); + /* load the reset vector */ + riscv_setup_rom_reset_vec(machine, &s->soc[0], memmap[SPIKE_DRAM].base, + memmap[SPIKE_MROM].base, + memmap[SPIKE_MROM].size, kernel_entry, + fdt_load_addr, s->fdt); + + /* initialize HTIF using symbols found in load_kernel */ + htif_mm_init(system_memory, mask_rom, + &s->soc[0].harts[0].env, serial_hd(0)); +} + +static void spike_machine_instance_init(Object *obj) +{ +} + +static void spike_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "RISC-V Spike board"; + mc->init = spike_board_init; + mc->max_cpus = SPIKE_CPUS_MAX; + mc->is_default = true; + mc->default_cpu_type = TYPE_RISCV_CPU_BASE; + mc->possible_cpu_arch_ids = riscv_numa_possible_cpu_arch_ids; + mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props; + mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id; + mc->numa_mem_supported = true; + mc->default_ram_id = "riscv.spike.ram"; +} + +static const TypeInfo spike_machine_typeinfo = { + .name = MACHINE_TYPE_NAME("spike"), + .parent = TYPE_MACHINE, + .class_init = spike_machine_class_init, + .instance_init = spike_machine_instance_init, + .instance_size = sizeof(SpikeState), +}; + +static void spike_machine_init_register_types(void) +{ + type_register_static(&spike_machine_typeinfo); +} + +type_init(spike_machine_init_register_types) diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c new file mode 100644 index 000000000..3af074148 --- /dev/null +++ b/hw/riscv/virt.c @@ -0,0 +1,1038 @@ +/* + * QEMU RISC-V VirtIO Board + * + * Copyright (c) 2017 SiFive, Inc. + * + * RISC-V machine with 16550a UART and VirtIO MMIO + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/sysbus.h" +#include "hw/qdev-properties.h" +#include "hw/char/serial.h" +#include "target/riscv/cpu.h" +#include "hw/riscv/riscv_hart.h" +#include "hw/riscv/virt.h" +#include "hw/riscv/boot.h" +#include "hw/riscv/numa.h" +#include "hw/intc/riscv_aclint.h" +#include "hw/intc/sifive_plic.h" +#include "hw/misc/sifive_test.h" +#include "chardev/char.h" +#include "sysemu/device_tree.h" +#include "sysemu/sysemu.h" +#include "hw/pci/pci.h" +#include "hw/pci-host/gpex.h" +#include "hw/display/ramfb.h" + +static const MemMapEntry virt_memmap[] = { + [VIRT_DEBUG] = { 0x0, 0x100 }, + [VIRT_MROM] = { 0x1000, 0xf000 }, + [VIRT_TEST] = { 0x100000, 0x1000 }, + [VIRT_RTC] = { 0x101000, 0x1000 }, + [VIRT_CLINT] = { 0x2000000, 0x10000 }, + [VIRT_ACLINT_SSWI] = { 0x2F00000, 0x4000 }, + [VIRT_PCIE_PIO] = { 0x3000000, 0x10000 }, + [VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) }, + [VIRT_UART0] = { 0x10000000, 0x100 }, + [VIRT_VIRTIO] = { 0x10001000, 0x1000 }, + [VIRT_FW_CFG] = { 0x10100000, 0x18 }, + [VIRT_FLASH] = { 0x20000000, 0x4000000 }, + [VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 }, + [VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 }, + [VIRT_DRAM] = { 0x80000000, 0x0 }, +}; + +/* PCIe high mmio is fixed for RV32 */ +#define VIRT32_HIGH_PCIE_MMIO_BASE 0x300000000ULL +#define VIRT32_HIGH_PCIE_MMIO_SIZE (4 * GiB) + +/* PCIe high mmio for RV64, size is fixed but base depends on top of RAM */ +#define VIRT64_HIGH_PCIE_MMIO_SIZE (16 * GiB) + +static MemMapEntry virt_high_pcie_memmap; + +#define VIRT_FLASH_SECTOR_SIZE (256 * KiB) + +static PFlashCFI01 *virt_flash_create1(RISCVVirtState *s, + const char *name, + const char *alias_prop_name) +{ + /* + * Create a single flash device. We use the same parameters as + * the flash devices on the ARM virt board. + */ + DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01); + + qdev_prop_set_uint64(dev, "sector-length", VIRT_FLASH_SECTOR_SIZE); + qdev_prop_set_uint8(dev, "width", 4); + qdev_prop_set_uint8(dev, "device-width", 2); + qdev_prop_set_bit(dev, "big-endian", false); + qdev_prop_set_uint16(dev, "id0", 0x89); + qdev_prop_set_uint16(dev, "id1", 0x18); + qdev_prop_set_uint16(dev, "id2", 0x00); + qdev_prop_set_uint16(dev, "id3", 0x00); + qdev_prop_set_string(dev, "name", name); + + object_property_add_child(OBJECT(s), name, OBJECT(dev)); + object_property_add_alias(OBJECT(s), alias_prop_name, + OBJECT(dev), "drive"); + + return PFLASH_CFI01(dev); +} + +static void virt_flash_create(RISCVVirtState *s) +{ + s->flash[0] = virt_flash_create1(s, "virt.flash0", "pflash0"); + s->flash[1] = virt_flash_create1(s, "virt.flash1", "pflash1"); +} + +static void virt_flash_map1(PFlashCFI01 *flash, + hwaddr base, hwaddr size, + MemoryRegion *sysmem) +{ + DeviceState *dev = DEVICE(flash); + + assert(QEMU_IS_ALIGNED(size, VIRT_FLASH_SECTOR_SIZE)); + assert(size / VIRT_FLASH_SECTOR_SIZE <= UINT32_MAX); + qdev_prop_set_uint32(dev, "num-blocks", size / VIRT_FLASH_SECTOR_SIZE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + memory_region_add_subregion(sysmem, base, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), + 0)); +} + +static void virt_flash_map(RISCVVirtState *s, + MemoryRegion *sysmem) +{ + hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2; + hwaddr flashbase = virt_memmap[VIRT_FLASH].base; + + virt_flash_map1(s->flash[0], flashbase, flashsize, + sysmem); + virt_flash_map1(s->flash[1], flashbase + flashsize, flashsize, + sysmem); +} + +static void create_pcie_irq_map(void *fdt, char *nodename, + uint32_t plic_phandle) +{ + int pin, dev; + uint32_t + full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * FDT_INT_MAP_WIDTH] = {}; + uint32_t *irq_map = full_irq_map; + + /* This code creates a standard swizzle of interrupts such that + * each device's first interrupt is based on it's PCI_SLOT number. + * (See pci_swizzle_map_irq_fn()) + * + * We only need one entry per interrupt in the table (not one per + * possible slot) seeing the interrupt-map-mask will allow the table + * to wrap to any number of devices. + */ + for (dev = 0; dev < GPEX_NUM_IRQS; dev++) { + int devfn = dev * 0x8; + + for (pin = 0; pin < GPEX_NUM_IRQS; pin++) { + int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS); + int i = 0; + + irq_map[i] = cpu_to_be32(devfn << 8); + + i += FDT_PCI_ADDR_CELLS; + irq_map[i] = cpu_to_be32(pin + 1); + + i += FDT_PCI_INT_CELLS; + irq_map[i++] = cpu_to_be32(plic_phandle); + + i += FDT_PLIC_ADDR_CELLS; + irq_map[i] = cpu_to_be32(irq_nr); + + irq_map += FDT_INT_MAP_WIDTH; + } + } + + qemu_fdt_setprop(fdt, nodename, "interrupt-map", + full_irq_map, sizeof(full_irq_map)); + + qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask", + 0x1800, 0, 0, 0x7); +} + +static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, + char *clust_name, uint32_t *phandle, + bool is_32_bit, uint32_t *intc_phandles) +{ + int cpu; + uint32_t cpu_phandle; + MachineState *mc = MACHINE(s); + char *name, *cpu_name, *core_name, *intc_name; + + for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) { + cpu_phandle = (*phandle)++; + + cpu_name = g_strdup_printf("/cpus/cpu@%d", + s->soc[socket].hartid_base + cpu); + qemu_fdt_add_subnode(mc->fdt, cpu_name); + qemu_fdt_setprop_string(mc->fdt, cpu_name, "mmu-type", + (is_32_bit) ? "riscv,sv32" : "riscv,sv48"); + name = riscv_isa_string(&s->soc[socket].harts[cpu]); + qemu_fdt_setprop_string(mc->fdt, cpu_name, "riscv,isa", name); + g_free(name); + qemu_fdt_setprop_string(mc->fdt, cpu_name, "compatible", "riscv"); + qemu_fdt_setprop_string(mc->fdt, cpu_name, "status", "okay"); + qemu_fdt_setprop_cell(mc->fdt, cpu_name, "reg", + s->soc[socket].hartid_base + cpu); + qemu_fdt_setprop_string(mc->fdt, cpu_name, "device_type", "cpu"); + riscv_socket_fdt_write_id(mc, mc->fdt, cpu_name, socket); + qemu_fdt_setprop_cell(mc->fdt, cpu_name, "phandle", cpu_phandle); + + intc_phandles[cpu] = (*phandle)++; + + intc_name = g_strdup_printf("%s/interrupt-controller", cpu_name); + qemu_fdt_add_subnode(mc->fdt, intc_name); + qemu_fdt_setprop_cell(mc->fdt, intc_name, "phandle", + intc_phandles[cpu]); + qemu_fdt_setprop_string(mc->fdt, intc_name, "compatible", + "riscv,cpu-intc"); + qemu_fdt_setprop(mc->fdt, intc_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(mc->fdt, intc_name, "#interrupt-cells", 1); + + core_name = g_strdup_printf("%s/core%d", clust_name, cpu); + qemu_fdt_add_subnode(mc->fdt, core_name); + qemu_fdt_setprop_cell(mc->fdt, core_name, "cpu", cpu_phandle); + + g_free(core_name); + g_free(intc_name); + g_free(cpu_name); + } +} + +static void create_fdt_socket_memory(RISCVVirtState *s, + const MemMapEntry *memmap, int socket) +{ + char *mem_name; + uint64_t addr, size; + MachineState *mc = MACHINE(s); + + addr = memmap[VIRT_DRAM].base + riscv_socket_mem_offset(mc, socket); + size = riscv_socket_mem_size(mc, socket); + mem_name = g_strdup_printf("/memory@%lx", (long)addr); + qemu_fdt_add_subnode(mc->fdt, mem_name); + qemu_fdt_setprop_cells(mc->fdt, mem_name, "reg", + addr >> 32, addr, size >> 32, size); + qemu_fdt_setprop_string(mc->fdt, mem_name, "device_type", "memory"); + riscv_socket_fdt_write_id(mc, mc->fdt, mem_name, socket); + g_free(mem_name); +} + +static void create_fdt_socket_clint(RISCVVirtState *s, + const MemMapEntry *memmap, int socket, + uint32_t *intc_phandles) +{ + int cpu; + char *clint_name; + uint32_t *clint_cells; + unsigned long clint_addr; + MachineState *mc = MACHINE(s); + static const char * const clint_compat[2] = { + "sifive,clint0", "riscv,clint0" + }; + + clint_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4); + + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]); + clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT); + clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]); + clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER); + } + + clint_addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket); + clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr); + qemu_fdt_add_subnode(mc->fdt, clint_name); + qemu_fdt_setprop_string_array(mc->fdt, clint_name, "compatible", + (char **)&clint_compat, + ARRAY_SIZE(clint_compat)); + qemu_fdt_setprop_cells(mc->fdt, clint_name, "reg", + 0x0, clint_addr, 0x0, memmap[VIRT_CLINT].size); + qemu_fdt_setprop(mc->fdt, clint_name, "interrupts-extended", + clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); + riscv_socket_fdt_write_id(mc, mc->fdt, clint_name, socket); + g_free(clint_name); + + g_free(clint_cells); +} + +static void create_fdt_socket_aclint(RISCVVirtState *s, + const MemMapEntry *memmap, int socket, + uint32_t *intc_phandles) +{ + int cpu; + char *name; + unsigned long addr; + uint32_t aclint_cells_size; + uint32_t *aclint_mswi_cells; + uint32_t *aclint_sswi_cells; + uint32_t *aclint_mtimer_cells; + MachineState *mc = MACHINE(s); + + aclint_mswi_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); + aclint_mtimer_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); + aclint_sswi_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); + + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + aclint_mswi_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + aclint_mswi_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_SOFT); + aclint_mtimer_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + aclint_mtimer_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_TIMER); + aclint_sswi_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + aclint_sswi_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_SOFT); + } + aclint_cells_size = s->soc[socket].num_harts * sizeof(uint32_t) * 2; + + addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket); + name = g_strdup_printf("/soc/mswi@%lx", addr); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "riscv,aclint-mswi"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, addr, 0x0, RISCV_ACLINT_SWI_SIZE); + qemu_fdt_setprop(mc->fdt, name, "interrupts-extended", + aclint_mswi_cells, aclint_cells_size); + qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0); + riscv_socket_fdt_write_id(mc, mc->fdt, name, socket); + g_free(name); + + addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE + + (memmap[VIRT_CLINT].size * socket); + name = g_strdup_printf("/soc/mtimer@%lx", addr); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", + "riscv,aclint-mtimer"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, addr + RISCV_ACLINT_DEFAULT_MTIME, + 0x0, memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE - + RISCV_ACLINT_DEFAULT_MTIME, + 0x0, addr + RISCV_ACLINT_DEFAULT_MTIMECMP, + 0x0, RISCV_ACLINT_DEFAULT_MTIME); + qemu_fdt_setprop(mc->fdt, name, "interrupts-extended", + aclint_mtimer_cells, aclint_cells_size); + riscv_socket_fdt_write_id(mc, mc->fdt, name, socket); + g_free(name); + + addr = memmap[VIRT_ACLINT_SSWI].base + + (memmap[VIRT_ACLINT_SSWI].size * socket); + name = g_strdup_printf("/soc/sswi@%lx", addr); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "riscv,aclint-sswi"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size); + qemu_fdt_setprop(mc->fdt, name, "interrupts-extended", + aclint_sswi_cells, aclint_cells_size); + qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0); + riscv_socket_fdt_write_id(mc, mc->fdt, name, socket); + g_free(name); + + g_free(aclint_mswi_cells); + g_free(aclint_mtimer_cells); + g_free(aclint_sswi_cells); +} + +static void create_fdt_socket_plic(RISCVVirtState *s, + const MemMapEntry *memmap, int socket, + uint32_t *phandle, uint32_t *intc_phandles, + uint32_t *plic_phandles) +{ + int cpu; + char *plic_name; + uint32_t *plic_cells; + unsigned long plic_addr; + MachineState *mc = MACHINE(s); + static const char * const plic_compat[2] = { + "sifive,plic-1.0.0", "riscv,plic0" + }; + + plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4); + + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT); + plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT); + } + + plic_phandles[socket] = (*phandle)++; + plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket); + plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr); + qemu_fdt_add_subnode(mc->fdt, plic_name); + qemu_fdt_setprop_cell(mc->fdt, plic_name, + "#address-cells", FDT_PLIC_ADDR_CELLS); + qemu_fdt_setprop_cell(mc->fdt, plic_name, + "#interrupt-cells", FDT_PLIC_INT_CELLS); + qemu_fdt_setprop_string_array(mc->fdt, plic_name, "compatible", + (char **)&plic_compat, + ARRAY_SIZE(plic_compat)); + qemu_fdt_setprop(mc->fdt, plic_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop(mc->fdt, plic_name, "interrupts-extended", + plic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); + qemu_fdt_setprop_cells(mc->fdt, plic_name, "reg", + 0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size); + qemu_fdt_setprop_cell(mc->fdt, plic_name, "riscv,ndev", VIRTIO_NDEV); + riscv_socket_fdt_write_id(mc, mc->fdt, plic_name, socket); + qemu_fdt_setprop_cell(mc->fdt, plic_name, "phandle", + plic_phandles[socket]); + g_free(plic_name); + + g_free(plic_cells); +} + +static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap, + bool is_32_bit, uint32_t *phandle, + uint32_t *irq_mmio_phandle, + uint32_t *irq_pcie_phandle, + uint32_t *irq_virtio_phandle) +{ + int socket; + char *clust_name; + uint32_t *intc_phandles; + MachineState *mc = MACHINE(s); + uint32_t xplic_phandles[MAX_NODES]; + + qemu_fdt_add_subnode(mc->fdt, "/cpus"); + qemu_fdt_setprop_cell(mc->fdt, "/cpus", "timebase-frequency", + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ); + qemu_fdt_setprop_cell(mc->fdt, "/cpus", "#size-cells", 0x0); + qemu_fdt_setprop_cell(mc->fdt, "/cpus", "#address-cells", 0x1); + qemu_fdt_add_subnode(mc->fdt, "/cpus/cpu-map"); + + for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) { + clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket); + qemu_fdt_add_subnode(mc->fdt, clust_name); + + intc_phandles = g_new0(uint32_t, s->soc[socket].num_harts); + + create_fdt_socket_cpus(s, socket, clust_name, phandle, + is_32_bit, intc_phandles); + + create_fdt_socket_memory(s, memmap, socket); + + if (s->have_aclint) { + create_fdt_socket_aclint(s, memmap, socket, intc_phandles); + } else { + create_fdt_socket_clint(s, memmap, socket, intc_phandles); + } + + create_fdt_socket_plic(s, memmap, socket, phandle, + intc_phandles, xplic_phandles); + + g_free(intc_phandles); + g_free(clust_name); + } + + for (socket = 0; socket < riscv_socket_count(mc); socket++) { + if (socket == 0) { + *irq_mmio_phandle = xplic_phandles[socket]; + *irq_virtio_phandle = xplic_phandles[socket]; + *irq_pcie_phandle = xplic_phandles[socket]; + } + if (socket == 1) { + *irq_virtio_phandle = xplic_phandles[socket]; + *irq_pcie_phandle = xplic_phandles[socket]; + } + if (socket == 2) { + *irq_pcie_phandle = xplic_phandles[socket]; + } + } + + riscv_socket_fdt_write_distance_matrix(mc, mc->fdt); +} + +static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t irq_virtio_phandle) +{ + int i; + char *name; + MachineState *mc = MACHINE(s); + + for (i = 0; i < VIRTIO_COUNT; i++) { + name = g_strdup_printf("/soc/virtio_mmio@%lx", + (long)(memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size)); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "virtio,mmio"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size, + 0x0, memmap[VIRT_VIRTIO].size); + qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent", + irq_virtio_phandle); + qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", VIRTIO_IRQ + i); + g_free(name); + } +} + +static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t irq_pcie_phandle) +{ + char *name; + MachineState *mc = MACHINE(s); + + name = g_strdup_printf("/soc/pci@%lx", + (long) memmap[VIRT_PCIE_ECAM].base); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_cell(mc->fdt, name, "#address-cells", + FDT_PCI_ADDR_CELLS); + qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", + FDT_PCI_INT_CELLS); + qemu_fdt_setprop_cell(mc->fdt, name, "#size-cells", 0x2); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", + "pci-host-ecam-generic"); + qemu_fdt_setprop_string(mc->fdt, name, "device_type", "pci"); + qemu_fdt_setprop_cell(mc->fdt, name, "linux,pci-domain", 0); + qemu_fdt_setprop_cells(mc->fdt, name, "bus-range", 0, + memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1); + qemu_fdt_setprop(mc->fdt, name, "dma-coherent", NULL, 0); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", 0, + memmap[VIRT_PCIE_ECAM].base, 0, memmap[VIRT_PCIE_ECAM].size); + qemu_fdt_setprop_sized_cells(mc->fdt, name, "ranges", + 1, FDT_PCI_RANGE_IOPORT, 2, 0, + 2, memmap[VIRT_PCIE_PIO].base, 2, memmap[VIRT_PCIE_PIO].size, + 1, FDT_PCI_RANGE_MMIO, + 2, memmap[VIRT_PCIE_MMIO].base, + 2, memmap[VIRT_PCIE_MMIO].base, 2, memmap[VIRT_PCIE_MMIO].size, + 1, FDT_PCI_RANGE_MMIO_64BIT, + 2, virt_high_pcie_memmap.base, + 2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size); + + create_pcie_irq_map(mc->fdt, name, irq_pcie_phandle); + g_free(name); +} + +static void create_fdt_reset(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t *phandle) +{ + char *name; + uint32_t test_phandle; + MachineState *mc = MACHINE(s); + + test_phandle = (*phandle)++; + name = g_strdup_printf("/soc/test@%lx", + (long)memmap[VIRT_TEST].base); + qemu_fdt_add_subnode(mc->fdt, name); + { + static const char * const compat[3] = { + "sifive,test1", "sifive,test0", "syscon" + }; + qemu_fdt_setprop_string_array(mc->fdt, name, "compatible", + (char **)&compat, ARRAY_SIZE(compat)); + } + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, memmap[VIRT_TEST].base, 0x0, memmap[VIRT_TEST].size); + qemu_fdt_setprop_cell(mc->fdt, name, "phandle", test_phandle); + test_phandle = qemu_fdt_get_phandle(mc->fdt, name); + g_free(name); + + name = g_strdup_printf("/soc/reboot"); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "syscon-reboot"); + qemu_fdt_setprop_cell(mc->fdt, name, "regmap", test_phandle); + qemu_fdt_setprop_cell(mc->fdt, name, "offset", 0x0); + qemu_fdt_setprop_cell(mc->fdt, name, "value", FINISHER_RESET); + g_free(name); + + name = g_strdup_printf("/soc/poweroff"); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "syscon-poweroff"); + qemu_fdt_setprop_cell(mc->fdt, name, "regmap", test_phandle); + qemu_fdt_setprop_cell(mc->fdt, name, "offset", 0x0); + qemu_fdt_setprop_cell(mc->fdt, name, "value", FINISHER_PASS); + g_free(name); +} + +static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t irq_mmio_phandle) +{ + char *name; + MachineState *mc = MACHINE(s); + + name = g_strdup_printf("/soc/uart@%lx", (long)memmap[VIRT_UART0].base); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "ns16550a"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, memmap[VIRT_UART0].base, + 0x0, memmap[VIRT_UART0].size); + qemu_fdt_setprop_cell(mc->fdt, name, "clock-frequency", 3686400); + qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent", irq_mmio_phandle); + qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", UART0_IRQ); + + qemu_fdt_add_subnode(mc->fdt, "/chosen"); + qemu_fdt_setprop_string(mc->fdt, "/chosen", "stdout-path", name); + g_free(name); +} + +static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap, + uint32_t irq_mmio_phandle) +{ + char *name; + MachineState *mc = MACHINE(s); + + name = g_strdup_printf("/soc/rtc@%lx", (long)memmap[VIRT_RTC].base); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", + "google,goldfish-rtc"); + qemu_fdt_setprop_cells(mc->fdt, name, "reg", + 0x0, memmap[VIRT_RTC].base, 0x0, memmap[VIRT_RTC].size); + qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent", + irq_mmio_phandle); + qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", RTC_IRQ); + g_free(name); +} + +static void create_fdt_flash(RISCVVirtState *s, const MemMapEntry *memmap) +{ + char *name; + MachineState *mc = MACHINE(s); + hwaddr flashsize = virt_memmap[VIRT_FLASH].size / 2; + hwaddr flashbase = virt_memmap[VIRT_FLASH].base; + + name = g_strdup_printf("/flash@%" PRIx64, flashbase); + qemu_fdt_add_subnode(mc->fdt, name); + qemu_fdt_setprop_string(mc->fdt, name, "compatible", "cfi-flash"); + qemu_fdt_setprop_sized_cells(mc->fdt, name, "reg", + 2, flashbase, 2, flashsize, + 2, flashbase + flashsize, 2, flashsize); + qemu_fdt_setprop_cell(mc->fdt, name, "bank-width", 4); + g_free(name); +} + +static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap, + uint64_t mem_size, const char *cmdline, bool is_32_bit) +{ + MachineState *mc = MACHINE(s); + uint32_t phandle = 1, irq_mmio_phandle = 1; + uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1; + + if (mc->dtb) { + mc->fdt = load_device_tree(mc->dtb, &s->fdt_size); + if (!mc->fdt) { + error_report("load_device_tree() failed"); + exit(1); + } + goto update_bootargs; + } else { + mc->fdt = create_device_tree(&s->fdt_size); + if (!mc->fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + } + + qemu_fdt_setprop_string(mc->fdt, "/", "model", "riscv-virtio,qemu"); + qemu_fdt_setprop_string(mc->fdt, "/", "compatible", "riscv-virtio"); + qemu_fdt_setprop_cell(mc->fdt, "/", "#size-cells", 0x2); + qemu_fdt_setprop_cell(mc->fdt, "/", "#address-cells", 0x2); + + qemu_fdt_add_subnode(mc->fdt, "/soc"); + qemu_fdt_setprop(mc->fdt, "/soc", "ranges", NULL, 0); + qemu_fdt_setprop_string(mc->fdt, "/soc", "compatible", "simple-bus"); + qemu_fdt_setprop_cell(mc->fdt, "/soc", "#size-cells", 0x2); + qemu_fdt_setprop_cell(mc->fdt, "/soc", "#address-cells", 0x2); + + create_fdt_sockets(s, memmap, is_32_bit, &phandle, + &irq_mmio_phandle, &irq_pcie_phandle, &irq_virtio_phandle); + + create_fdt_virtio(s, memmap, irq_virtio_phandle); + + create_fdt_pcie(s, memmap, irq_pcie_phandle); + + create_fdt_reset(s, memmap, &phandle); + + create_fdt_uart(s, memmap, irq_mmio_phandle); + + create_fdt_rtc(s, memmap, irq_mmio_phandle); + + create_fdt_flash(s, memmap); + +update_bootargs: + if (cmdline) { + qemu_fdt_setprop_string(mc->fdt, "/chosen", "bootargs", cmdline); + } +} + +static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, + hwaddr ecam_base, hwaddr ecam_size, + hwaddr mmio_base, hwaddr mmio_size, + hwaddr high_mmio_base, + hwaddr high_mmio_size, + hwaddr pio_base, + DeviceState *plic) +{ + DeviceState *dev; + MemoryRegion *ecam_alias, *ecam_reg; + MemoryRegion *mmio_alias, *high_mmio_alias, *mmio_reg; + qemu_irq irq; + int i; + + dev = qdev_new(TYPE_GPEX_HOST); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + ecam_alias = g_new0(MemoryRegion, 1); + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam", + ecam_reg, 0, ecam_size); + memory_region_add_subregion(get_system_memory(), ecam_base, ecam_alias); + + mmio_alias = g_new0(MemoryRegion, 1); + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio", + mmio_reg, mmio_base, mmio_size); + memory_region_add_subregion(get_system_memory(), mmio_base, mmio_alias); + + /* Map high MMIO space */ + high_mmio_alias = g_new0(MemoryRegion, 1); + memory_region_init_alias(high_mmio_alias, OBJECT(dev), "pcie-mmio-high", + mmio_reg, high_mmio_base, high_mmio_size); + memory_region_add_subregion(get_system_memory(), high_mmio_base, + high_mmio_alias); + + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base); + + for (i = 0; i < GPEX_NUM_IRQS; i++) { + irq = qdev_get_gpio_in(plic, PCIE_IRQ + i); + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq); + gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i); + } + + return dev; +} + +static FWCfgState *create_fw_cfg(const MachineState *mc) +{ + hwaddr base = virt_memmap[VIRT_FW_CFG].base; + hwaddr size = virt_memmap[VIRT_FW_CFG].size; + FWCfgState *fw_cfg; + char *nodename; + + fw_cfg = fw_cfg_init_mem_wide(base + 8, base, 8, base + 16, + &address_space_memory); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)mc->smp.cpus); + + nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base); + qemu_fdt_add_subnode(mc->fdt, nodename); + qemu_fdt_setprop_string(mc->fdt, nodename, + "compatible", "qemu,fw-cfg-mmio"); + qemu_fdt_setprop_sized_cells(mc->fdt, nodename, "reg", + 2, base, 2, size); + qemu_fdt_setprop(mc->fdt, nodename, "dma-coherent", NULL, 0); + g_free(nodename); + return fw_cfg; +} + +static void virt_machine_init(MachineState *machine) +{ + const MemMapEntry *memmap = virt_memmap; + RISCVVirtState *s = RISCV_VIRT_MACHINE(machine); + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *mask_rom = g_new(MemoryRegion, 1); + char *plic_hart_config, *soc_name; + target_ulong start_addr = memmap[VIRT_DRAM].base; + target_ulong firmware_end_addr, kernel_start_addr; + uint32_t fdt_load_addr; + uint64_t kernel_entry; + DeviceState *mmio_plic, *virtio_plic, *pcie_plic; + int i, base_hartid, hart_count; + + /* Check socket count limit */ + if (VIRT_SOCKETS_MAX < riscv_socket_count(machine)) { + error_report("number of sockets/nodes should be less than %d", + VIRT_SOCKETS_MAX); + exit(1); + } + + /* Initialize sockets */ + mmio_plic = virtio_plic = pcie_plic = NULL; + for (i = 0; i < riscv_socket_count(machine); i++) { + if (!riscv_socket_check_hartids(machine, i)) { + error_report("discontinuous hartids in socket%d", i); + exit(1); + } + + base_hartid = riscv_socket_first_hartid(machine, i); + if (base_hartid < 0) { + error_report("can't find hartid base for socket%d", i); + exit(1); + } + + hart_count = riscv_socket_hart_count(machine, i); + if (hart_count < 0) { + error_report("can't find hart count for socket%d", i); + exit(1); + } + + soc_name = g_strdup_printf("soc%d", i); + object_initialize_child(OBJECT(machine), soc_name, &s->soc[i], + TYPE_RISCV_HART_ARRAY); + g_free(soc_name); + object_property_set_str(OBJECT(&s->soc[i]), "cpu-type", + machine->cpu_type, &error_abort); + object_property_set_int(OBJECT(&s->soc[i]), "hartid-base", + base_hartid, &error_abort); + object_property_set_int(OBJECT(&s->soc[i]), "num-harts", + hart_count, &error_abort); + sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_abort); + + /* Per-socket CLINT */ + riscv_aclint_swi_create( + memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size, + base_hartid, hart_count, false); + riscv_aclint_mtimer_create( + memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size + + RISCV_ACLINT_SWI_SIZE, + RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count, + RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true); + + /* Per-socket ACLINT SSWI */ + if (s->have_aclint) { + riscv_aclint_swi_create( + memmap[VIRT_ACLINT_SSWI].base + + i * memmap[VIRT_ACLINT_SSWI].size, + base_hartid, hart_count, true); + } + + /* Per-socket PLIC hart topology configuration string */ + plic_hart_config = riscv_plic_hart_config_string(hart_count); + + /* Per-socket PLIC */ + s->plic[i] = sifive_plic_create( + memmap[VIRT_PLIC].base + i * memmap[VIRT_PLIC].size, + plic_hart_config, hart_count, base_hartid, + VIRT_PLIC_NUM_SOURCES, + VIRT_PLIC_NUM_PRIORITIES, + VIRT_PLIC_PRIORITY_BASE, + VIRT_PLIC_PENDING_BASE, + VIRT_PLIC_ENABLE_BASE, + VIRT_PLIC_ENABLE_STRIDE, + VIRT_PLIC_CONTEXT_BASE, + VIRT_PLIC_CONTEXT_STRIDE, + memmap[VIRT_PLIC].size); + g_free(plic_hart_config); + + /* Try to use different PLIC instance based device type */ + if (i == 0) { + mmio_plic = s->plic[i]; + virtio_plic = s->plic[i]; + pcie_plic = s->plic[i]; + } + if (i == 1) { + virtio_plic = s->plic[i]; + pcie_plic = s->plic[i]; + } + if (i == 2) { + pcie_plic = s->plic[i]; + } + } + + if (riscv_is_32bit(&s->soc[0])) { +#if HOST_LONG_BITS == 64 + /* limit RAM size in a 32-bit system */ + if (machine->ram_size > 10 * GiB) { + machine->ram_size = 10 * GiB; + error_report("Limiting RAM size to 10 GiB"); + } +#endif + virt_high_pcie_memmap.base = VIRT32_HIGH_PCIE_MMIO_BASE; + virt_high_pcie_memmap.size = VIRT32_HIGH_PCIE_MMIO_SIZE; + } else { + virt_high_pcie_memmap.size = VIRT64_HIGH_PCIE_MMIO_SIZE; + virt_high_pcie_memmap.base = memmap[VIRT_DRAM].base + machine->ram_size; + virt_high_pcie_memmap.base = + ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size); + } + + /* register system main memory (actual RAM) */ + memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base, + machine->ram); + + /* create device tree */ + create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline, + riscv_is_32bit(&s->soc[0])); + + /* boot rom */ + memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom", + memmap[VIRT_MROM].size, &error_fatal); + memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base, + mask_rom); + + if (riscv_is_32bit(&s->soc[0])) { + firmware_end_addr = riscv_find_and_load_firmware(machine, + RISCV32_BIOS_BIN, start_addr, NULL); + } else { + firmware_end_addr = riscv_find_and_load_firmware(machine, + RISCV64_BIOS_BIN, start_addr, NULL); + } + + if (machine->kernel_filename) { + kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0], + firmware_end_addr); + + kernel_entry = riscv_load_kernel(machine->kernel_filename, + kernel_start_addr, NULL); + + if (machine->initrd_filename) { + hwaddr start; + hwaddr end = riscv_load_initrd(machine->initrd_filename, + machine->ram_size, kernel_entry, + &start); + qemu_fdt_setprop_cell(machine->fdt, "/chosen", + "linux,initrd-start", start); + qemu_fdt_setprop_cell(machine->fdt, "/chosen", "linux,initrd-end", + end); + } + } else { + /* + * If dynamic firmware is used, it doesn't know where is the next mode + * if kernel argument is not set. + */ + kernel_entry = 0; + } + + if (drive_get(IF_PFLASH, 0, 0)) { + /* + * Pflash was supplied, let's overwrite the address we jump to after + * reset to the base of the flash. + */ + start_addr = virt_memmap[VIRT_FLASH].base; + } + + /* + * Init fw_cfg. Must be done before riscv_load_fdt, otherwise the device + * tree cannot be altered and we get FDT_ERR_NOSPACE. + */ + s->fw_cfg = create_fw_cfg(machine); + rom_set_fw(s->fw_cfg); + + /* Compute the fdt load address in dram */ + fdt_load_addr = riscv_load_fdt(memmap[VIRT_DRAM].base, + machine->ram_size, machine->fdt); + /* load the reset vector */ + riscv_setup_rom_reset_vec(machine, &s->soc[0], start_addr, + virt_memmap[VIRT_MROM].base, + virt_memmap[VIRT_MROM].size, kernel_entry, + fdt_load_addr, machine->fdt); + + /* SiFive Test MMIO device */ + sifive_test_create(memmap[VIRT_TEST].base); + + /* VirtIO MMIO devices */ + for (i = 0; i < VIRTIO_COUNT; i++) { + sysbus_create_simple("virtio-mmio", + memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size, + qdev_get_gpio_in(DEVICE(virtio_plic), VIRTIO_IRQ + i)); + } + + gpex_pcie_init(system_memory, + memmap[VIRT_PCIE_ECAM].base, + memmap[VIRT_PCIE_ECAM].size, + memmap[VIRT_PCIE_MMIO].base, + memmap[VIRT_PCIE_MMIO].size, + virt_high_pcie_memmap.base, + virt_high_pcie_memmap.size, + memmap[VIRT_PCIE_PIO].base, + DEVICE(pcie_plic)); + + serial_mm_init(system_memory, memmap[VIRT_UART0].base, + 0, qdev_get_gpio_in(DEVICE(mmio_plic), UART0_IRQ), 399193, + serial_hd(0), DEVICE_LITTLE_ENDIAN); + + sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base, + qdev_get_gpio_in(DEVICE(mmio_plic), RTC_IRQ)); + + virt_flash_create(s); + + for (i = 0; i < ARRAY_SIZE(s->flash); i++) { + /* Map legacy -drive if=pflash to machine properties */ + pflash_cfi01_legacy_drive(s->flash[i], + drive_get(IF_PFLASH, 0, i)); + } + virt_flash_map(s, system_memory); +} + +static void virt_machine_instance_init(Object *obj) +{ +} + +static bool virt_get_aclint(Object *obj, Error **errp) +{ + MachineState *ms = MACHINE(obj); + RISCVVirtState *s = RISCV_VIRT_MACHINE(ms); + + return s->have_aclint; +} + +static void virt_set_aclint(Object *obj, bool value, Error **errp) +{ + MachineState *ms = MACHINE(obj); + RISCVVirtState *s = RISCV_VIRT_MACHINE(ms); + + s->have_aclint = value; +} + +static void virt_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "RISC-V VirtIO board"; + mc->init = virt_machine_init; + mc->max_cpus = VIRT_CPUS_MAX; + mc->default_cpu_type = TYPE_RISCV_CPU_BASE; + mc->pci_allow_0_address = true; + mc->possible_cpu_arch_ids = riscv_numa_possible_cpu_arch_ids; + mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props; + mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id; + mc->numa_mem_supported = true; + mc->default_ram_id = "riscv_virt_board.ram"; + + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); + + object_class_property_add_bool(oc, "aclint", virt_get_aclint, + virt_set_aclint); + object_class_property_set_description(oc, "aclint", + "Set on/off to enable/disable " + "emulating ACLINT devices"); +} + +static const TypeInfo virt_machine_typeinfo = { + .name = MACHINE_TYPE_NAME("virt"), + .parent = TYPE_MACHINE, + .class_init = virt_machine_class_init, + .instance_init = virt_machine_instance_init, + .instance_size = sizeof(RISCVVirtState), +}; + +static void virt_machine_init_register_types(void) +{ + type_register_static(&virt_machine_typeinfo); +} + +type_init(virt_machine_init_register_types) diff --git a/hw/rtc/Kconfig b/hw/rtc/Kconfig new file mode 100644 index 000000000..f06e133b8 --- /dev/null +++ b/hw/rtc/Kconfig @@ -0,0 +1,27 @@ +config DS1338 + bool + depends on I2C + +config M41T80 + bool + depends on I2C + +config M48T59 + bool + +config PL031 + bool + +config TWL92230 + bool + depends on I2C + +config MC146818RTC + depends on ISA_BUS + bool + +config SUN4V_RTC + bool + +config GOLDFISH_RTC + bool diff --git a/hw/rtc/allwinner-rtc.c b/hw/rtc/allwinner-rtc.c new file mode 100644 index 000000000..5606a51d5 --- /dev/null +++ b/hw/rtc/allwinner-rtc.c @@ -0,0 +1,411 @@ +/* + * Allwinner Real Time Clock emulation + * + * Copyright (C) 2019 Niek Linnenbank + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu-common.h" +#include "hw/qdev-properties.h" +#include "hw/rtc/allwinner-rtc.h" +#include "trace.h" + +/* RTC registers */ +enum { + REG_LOSC = 1, /* Low Oscillator Control */ + REG_YYMMDD, /* RTC Year-Month-Day */ + REG_HHMMSS, /* RTC Hour-Minute-Second */ + REG_ALARM1_WKHHMMSS, /* Alarm1 Week Hour-Minute-Second */ + REG_ALARM1_EN, /* Alarm1 Enable */ + REG_ALARM1_IRQ_EN, /* Alarm1 IRQ Enable */ + REG_ALARM1_IRQ_STA, /* Alarm1 IRQ Status */ + REG_GP0, /* General Purpose Register 0 */ + REG_GP1, /* General Purpose Register 1 */ + REG_GP2, /* General Purpose Register 2 */ + REG_GP3, /* General Purpose Register 3 */ + + /* sun4i registers */ + REG_ALARM1_DDHHMMSS, /* Alarm1 Day Hour-Minute-Second */ + REG_CPUCFG, /* CPU Configuration Register */ + + /* sun6i registers */ + REG_LOSC_AUTOSTA, /* LOSC Auto Switch Status */ + REG_INT_OSC_PRE, /* Internal OSC Clock Prescaler */ + REG_ALARM0_COUNTER, /* Alarm0 Counter */ + REG_ALARM0_CUR_VLU, /* Alarm0 Counter Current Value */ + REG_ALARM0_ENABLE, /* Alarm0 Enable */ + REG_ALARM0_IRQ_EN, /* Alarm0 IRQ Enable */ + REG_ALARM0_IRQ_STA, /* Alarm0 IRQ Status */ + REG_ALARM_CONFIG, /* Alarm Config */ + REG_LOSC_OUT_GATING, /* LOSC Output Gating Register */ + REG_GP4, /* General Purpose Register 4 */ + REG_GP5, /* General Purpose Register 5 */ + REG_GP6, /* General Purpose Register 6 */ + REG_GP7, /* General Purpose Register 7 */ + REG_RTC_DBG, /* RTC Debug Register */ + REG_GPL_HOLD_OUT, /* GPL Hold Output Register */ + REG_VDD_RTC, /* VDD RTC Regulate Register */ + REG_IC_CHARA, /* IC Characteristics Register */ +}; + +/* RTC register flags */ +enum { + REG_LOSC_YMD = (1 << 7), + REG_LOSC_HMS = (1 << 8), +}; + +/* RTC sun4i register map (offset to name) */ +const uint8_t allwinner_rtc_sun4i_regmap[] = { + [0x0000] = REG_LOSC, + [0x0004] = REG_YYMMDD, + [0x0008] = REG_HHMMSS, + [0x000C] = REG_ALARM1_DDHHMMSS, + [0x0010] = REG_ALARM1_WKHHMMSS, + [0x0014] = REG_ALARM1_EN, + [0x0018] = REG_ALARM1_IRQ_EN, + [0x001C] = REG_ALARM1_IRQ_STA, + [0x0020] = REG_GP0, + [0x0024] = REG_GP1, + [0x0028] = REG_GP2, + [0x002C] = REG_GP3, + [0x003C] = REG_CPUCFG, +}; + +/* RTC sun6i register map (offset to name) */ +const uint8_t allwinner_rtc_sun6i_regmap[] = { + [0x0000] = REG_LOSC, + [0x0004] = REG_LOSC_AUTOSTA, + [0x0008] = REG_INT_OSC_PRE, + [0x0010] = REG_YYMMDD, + [0x0014] = REG_HHMMSS, + [0x0020] = REG_ALARM0_COUNTER, + [0x0024] = REG_ALARM0_CUR_VLU, + [0x0028] = REG_ALARM0_ENABLE, + [0x002C] = REG_ALARM0_IRQ_EN, + [0x0030] = REG_ALARM0_IRQ_STA, + [0x0040] = REG_ALARM1_WKHHMMSS, + [0x0044] = REG_ALARM1_EN, + [0x0048] = REG_ALARM1_IRQ_EN, + [0x004C] = REG_ALARM1_IRQ_STA, + [0x0050] = REG_ALARM_CONFIG, + [0x0060] = REG_LOSC_OUT_GATING, + [0x0100] = REG_GP0, + [0x0104] = REG_GP1, + [0x0108] = REG_GP2, + [0x010C] = REG_GP3, + [0x0110] = REG_GP4, + [0x0114] = REG_GP5, + [0x0118] = REG_GP6, + [0x011C] = REG_GP7, + [0x0170] = REG_RTC_DBG, + [0x0180] = REG_GPL_HOLD_OUT, + [0x0190] = REG_VDD_RTC, + [0x01F0] = REG_IC_CHARA, +}; + +static bool allwinner_rtc_sun4i_read(AwRtcState *s, uint32_t offset) +{ + /* no sun4i specific registers currently implemented */ + return false; +} + +static bool allwinner_rtc_sun4i_write(AwRtcState *s, uint32_t offset, + uint32_t data) +{ + /* no sun4i specific registers currently implemented */ + return false; +} + +static bool allwinner_rtc_sun6i_read(AwRtcState *s, uint32_t offset) +{ + const AwRtcClass *c = AW_RTC_GET_CLASS(s); + + switch (c->regmap[offset]) { + case REG_GP4: /* General Purpose Register 4 */ + case REG_GP5: /* General Purpose Register 5 */ + case REG_GP6: /* General Purpose Register 6 */ + case REG_GP7: /* General Purpose Register 7 */ + return true; + default: + break; + } + return false; +} + +static bool allwinner_rtc_sun6i_write(AwRtcState *s, uint32_t offset, + uint32_t data) +{ + const AwRtcClass *c = AW_RTC_GET_CLASS(s); + + switch (c->regmap[offset]) { + case REG_GP4: /* General Purpose Register 4 */ + case REG_GP5: /* General Purpose Register 5 */ + case REG_GP6: /* General Purpose Register 6 */ + case REG_GP7: /* General Purpose Register 7 */ + return true; + default: + break; + } + return false; +} + +static uint64_t allwinner_rtc_read(void *opaque, hwaddr offset, + unsigned size) +{ + AwRtcState *s = AW_RTC(opaque); + const AwRtcClass *c = AW_RTC_GET_CLASS(s); + uint64_t val = 0; + + if (offset >= c->regmap_size) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + if (!c->regmap[offset]) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid register 0x%04x\n", + __func__, (uint32_t)offset); + return 0; + } + + switch (c->regmap[offset]) { + case REG_LOSC: /* Low Oscillator Control */ + val = s->regs[REG_LOSC]; + s->regs[REG_LOSC] &= ~(REG_LOSC_YMD | REG_LOSC_HMS); + break; + case REG_YYMMDD: /* RTC Year-Month-Day */ + case REG_HHMMSS: /* RTC Hour-Minute-Second */ + case REG_GP0: /* General Purpose Register 0 */ + case REG_GP1: /* General Purpose Register 1 */ + case REG_GP2: /* General Purpose Register 2 */ + case REG_GP3: /* General Purpose Register 3 */ + val = s->regs[c->regmap[offset]]; + break; + default: + if (!c->read(s, offset)) { + qemu_log_mask(LOG_UNIMP, "%s: unimplemented register 0x%04x\n", + __func__, (uint32_t)offset); + } + val = s->regs[c->regmap[offset]]; + break; + } + + trace_allwinner_rtc_read(offset, val); + return val; +} + +static void allwinner_rtc_write(void *opaque, hwaddr offset, + uint64_t val, unsigned size) +{ + AwRtcState *s = AW_RTC(opaque); + const AwRtcClass *c = AW_RTC_GET_CLASS(s); + + if (offset >= c->regmap_size) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset 0x%04x\n", + __func__, (uint32_t)offset); + return; + } + + if (!c->regmap[offset]) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid register 0x%04x\n", + __func__, (uint32_t)offset); + return; + } + + trace_allwinner_rtc_write(offset, val); + + switch (c->regmap[offset]) { + case REG_YYMMDD: /* RTC Year-Month-Day */ + s->regs[REG_YYMMDD] = val; + s->regs[REG_LOSC] |= REG_LOSC_YMD; + break; + case REG_HHMMSS: /* RTC Hour-Minute-Second */ + s->regs[REG_HHMMSS] = val; + s->regs[REG_LOSC] |= REG_LOSC_HMS; + break; + case REG_GP0: /* General Purpose Register 0 */ + case REG_GP1: /* General Purpose Register 1 */ + case REG_GP2: /* General Purpose Register 2 */ + case REG_GP3: /* General Purpose Register 3 */ + s->regs[c->regmap[offset]] = val; + break; + default: + if (!c->write(s, offset, val)) { + qemu_log_mask(LOG_UNIMP, "%s: unimplemented register 0x%04x\n", + __func__, (uint32_t)offset); + } + break; + } +} + +static const MemoryRegionOps allwinner_rtc_ops = { + .read = allwinner_rtc_read, + .write = allwinner_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static void allwinner_rtc_reset(DeviceState *dev) +{ + AwRtcState *s = AW_RTC(dev); + struct tm now; + + /* Clear registers */ + memset(s->regs, 0, sizeof(s->regs)); + + /* Get current datetime */ + qemu_get_timedate(&now, 0); + + /* Set RTC with current datetime */ + if (s->base_year > 1900) { + s->regs[REG_YYMMDD] = ((now.tm_year + 1900 - s->base_year) << 16) | + ((now.tm_mon + 1) << 8) | + now.tm_mday; + s->regs[REG_HHMMSS] = (((now.tm_wday + 6) % 7) << 29) | + (now.tm_hour << 16) | + (now.tm_min << 8) | + now.tm_sec; + } +} + +static void allwinner_rtc_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + AwRtcState *s = AW_RTC(obj); + + /* Memory mapping */ + memory_region_init_io(&s->iomem, OBJECT(s), &allwinner_rtc_ops, s, + TYPE_AW_RTC, 1 * KiB); + sysbus_init_mmio(sbd, &s->iomem); +} + +static const VMStateDescription allwinner_rtc_vmstate = { + .name = "allwinner-rtc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AwRtcState, AW_RTC_REGS_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static Property allwinner_rtc_properties[] = { + DEFINE_PROP_INT32("base-year", AwRtcState, base_year, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void allwinner_rtc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = allwinner_rtc_reset; + dc->vmsd = &allwinner_rtc_vmstate; + device_class_set_props(dc, allwinner_rtc_properties); +} + +static void allwinner_rtc_sun4i_init(Object *obj) +{ + AwRtcState *s = AW_RTC(obj); + s->base_year = 2010; +} + +static void allwinner_rtc_sun4i_class_init(ObjectClass *klass, void *data) +{ + AwRtcClass *arc = AW_RTC_CLASS(klass); + + arc->regmap = allwinner_rtc_sun4i_regmap; + arc->regmap_size = sizeof(allwinner_rtc_sun4i_regmap); + arc->read = allwinner_rtc_sun4i_read; + arc->write = allwinner_rtc_sun4i_write; +} + +static void allwinner_rtc_sun6i_init(Object *obj) +{ + AwRtcState *s = AW_RTC(obj); + s->base_year = 1970; +} + +static void allwinner_rtc_sun6i_class_init(ObjectClass *klass, void *data) +{ + AwRtcClass *arc = AW_RTC_CLASS(klass); + + arc->regmap = allwinner_rtc_sun6i_regmap; + arc->regmap_size = sizeof(allwinner_rtc_sun6i_regmap); + arc->read = allwinner_rtc_sun6i_read; + arc->write = allwinner_rtc_sun6i_write; +} + +static void allwinner_rtc_sun7i_init(Object *obj) +{ + AwRtcState *s = AW_RTC(obj); + s->base_year = 1970; +} + +static void allwinner_rtc_sun7i_class_init(ObjectClass *klass, void *data) +{ + AwRtcClass *arc = AW_RTC_CLASS(klass); + allwinner_rtc_sun4i_class_init(klass, arc); +} + +static const TypeInfo allwinner_rtc_info = { + .name = TYPE_AW_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_rtc_init, + .instance_size = sizeof(AwRtcState), + .class_init = allwinner_rtc_class_init, + .class_size = sizeof(AwRtcClass), + .abstract = true, +}; + +static const TypeInfo allwinner_rtc_sun4i_info = { + .name = TYPE_AW_RTC_SUN4I, + .parent = TYPE_AW_RTC, + .class_init = allwinner_rtc_sun4i_class_init, + .instance_init = allwinner_rtc_sun4i_init, +}; + +static const TypeInfo allwinner_rtc_sun6i_info = { + .name = TYPE_AW_RTC_SUN6I, + .parent = TYPE_AW_RTC, + .class_init = allwinner_rtc_sun6i_class_init, + .instance_init = allwinner_rtc_sun6i_init, +}; + +static const TypeInfo allwinner_rtc_sun7i_info = { + .name = TYPE_AW_RTC_SUN7I, + .parent = TYPE_AW_RTC, + .class_init = allwinner_rtc_sun7i_class_init, + .instance_init = allwinner_rtc_sun7i_init, +}; + +static void allwinner_rtc_register(void) +{ + type_register_static(&allwinner_rtc_info); + type_register_static(&allwinner_rtc_sun4i_info); + type_register_static(&allwinner_rtc_sun6i_info); + type_register_static(&allwinner_rtc_sun7i_info); +} + +type_init(allwinner_rtc_register) diff --git a/hw/rtc/aspeed_rtc.c b/hw/rtc/aspeed_rtc.c new file mode 100644 index 000000000..3ca118355 --- /dev/null +++ b/hw/rtc/aspeed_rtc.c @@ -0,0 +1,181 @@ +/* + * ASPEED Real Time Clock + * Joel Stanley + * + * Copyright 2019 IBM Corp + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/rtc/aspeed_rtc.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/timer.h" + +#include "trace.h" + +#define COUNTER1 (0x00 / 4) +#define COUNTER2 (0x04 / 4) +#define ALARM (0x08 / 4) +#define CONTROL (0x10 / 4) +#define ALARM_STATUS (0x14 / 4) + +#define RTC_UNLOCKED BIT(1) +#define RTC_ENABLED BIT(0) + +static void aspeed_rtc_calc_offset(AspeedRtcState *rtc) +{ + struct tm tm; + uint32_t year, cent; + uint32_t reg1 = rtc->reg[COUNTER1]; + uint32_t reg2 = rtc->reg[COUNTER2]; + + tm.tm_mday = (reg1 >> 24) & 0x1f; + tm.tm_hour = (reg1 >> 16) & 0x1f; + tm.tm_min = (reg1 >> 8) & 0x3f; + tm.tm_sec = (reg1 >> 0) & 0x3f; + + cent = (reg2 >> 16) & 0x1f; + year = (reg2 >> 8) & 0x7f; + tm.tm_mon = ((reg2 >> 0) & 0x0f) - 1; + tm.tm_year = year + (cent * 100) - 1900; + + rtc->offset = qemu_timedate_diff(&tm); +} + +static uint32_t aspeed_rtc_get_counter(AspeedRtcState *rtc, int r) +{ + uint32_t year, cent; + struct tm now; + + qemu_get_timedate(&now, rtc->offset); + + switch (r) { + case COUNTER1: + return (now.tm_mday << 24) | (now.tm_hour << 16) | + (now.tm_min << 8) | now.tm_sec; + case COUNTER2: + cent = (now.tm_year + 1900) / 100; + year = now.tm_year % 100; + return ((cent & 0x1f) << 16) | ((year & 0x7f) << 8) | + ((now.tm_mon + 1) & 0xf); + default: + g_assert_not_reached(); + } +} + +static uint64_t aspeed_rtc_read(void *opaque, hwaddr addr, + unsigned size) +{ + AspeedRtcState *rtc = opaque; + uint64_t val; + uint32_t r = addr >> 2; + + switch (r) { + case COUNTER1: + case COUNTER2: + if (rtc->reg[CONTROL] & RTC_ENABLED) { + rtc->reg[r] = aspeed_rtc_get_counter(rtc, r); + } + /* fall through */ + case CONTROL: + val = rtc->reg[r]; + break; + case ALARM: + case ALARM_STATUS: + default: + qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx "\n", __func__, addr); + return 0; + } + + trace_aspeed_rtc_read(addr, val); + + return val; +} + +static void aspeed_rtc_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + AspeedRtcState *rtc = opaque; + uint32_t r = addr >> 2; + + switch (r) { + case COUNTER1: + case COUNTER2: + if (!(rtc->reg[CONTROL] & RTC_UNLOCKED)) { + break; + } + /* fall through */ + case CONTROL: + rtc->reg[r] = val; + aspeed_rtc_calc_offset(rtc); + break; + case ALARM: + case ALARM_STATUS: + default: + qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx "\n", __func__, addr); + break; + } + trace_aspeed_rtc_write(addr, val); +} + +static void aspeed_rtc_reset(DeviceState *d) +{ + AspeedRtcState *rtc = ASPEED_RTC(d); + + rtc->offset = 0; + memset(rtc->reg, 0, sizeof(rtc->reg)); +} + +static const MemoryRegionOps aspeed_rtc_ops = { + .read = aspeed_rtc_read, + .write = aspeed_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_aspeed_rtc = { + .name = TYPE_ASPEED_RTC, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, AspeedRtcState, 0x18), + VMSTATE_INT32(offset, AspeedRtcState), + VMSTATE_INT32(offset, AspeedRtcState), + VMSTATE_END_OF_LIST() + } +}; + +static void aspeed_rtc_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedRtcState *s = ASPEED_RTC(dev); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_rtc_ops, s, + "aspeed-rtc", 0x18ULL); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void aspeed_rtc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_rtc_realize; + dc->vmsd = &vmstate_aspeed_rtc; + dc->reset = aspeed_rtc_reset; +} + +static const TypeInfo aspeed_rtc_info = { + .name = TYPE_ASPEED_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedRtcState), + .class_init = aspeed_rtc_class_init, +}; + +static void aspeed_rtc_register_types(void) +{ + type_register_static(&aspeed_rtc_info); +} + +type_init(aspeed_rtc_register_types) diff --git a/hw/rtc/ds1338.c b/hw/rtc/ds1338.c new file mode 100644 index 000000000..bc5ce1a9f --- /dev/null +++ b/hw/rtc/ds1338.c @@ -0,0 +1,242 @@ +/* + * MAXIM DS1338 I2C RTC+NVRAM + * + * Copyright (c) 2009 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/i2c/i2c.h" +#include "migration/vmstate.h" +#include "qemu/bcd.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* Size of NVRAM including both the user-accessible area and the + * secondary register area. + */ +#define NVRAM_SIZE 64 + +/* Flags definitions */ +#define SECONDS_CH 0x80 +#define HOURS_12 0x40 +#define HOURS_PM 0x20 +#define CTRL_OSF 0x20 + +#define TYPE_DS1338 "ds1338" +OBJECT_DECLARE_SIMPLE_TYPE(DS1338State, DS1338) + +struct DS1338State { + I2CSlave parent_obj; + + int64_t offset; + uint8_t wday_offset; + uint8_t nvram[NVRAM_SIZE]; + int32_t ptr; + bool addr_byte; +}; + +static const VMStateDescription vmstate_ds1338 = { + .name = "ds1338", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_I2C_SLAVE(parent_obj, DS1338State), + VMSTATE_INT64(offset, DS1338State), + VMSTATE_UINT8_V(wday_offset, DS1338State, 2), + VMSTATE_UINT8_ARRAY(nvram, DS1338State, NVRAM_SIZE), + VMSTATE_INT32(ptr, DS1338State), + VMSTATE_BOOL(addr_byte, DS1338State), + VMSTATE_END_OF_LIST() + } +}; + +static void capture_current_time(DS1338State *s) +{ + /* Capture the current time into the secondary registers + * which will be actually read by the data transfer operation. + */ + struct tm now; + qemu_get_timedate(&now, s->offset); + s->nvram[0] = to_bcd(now.tm_sec); + s->nvram[1] = to_bcd(now.tm_min); + if (s->nvram[2] & HOURS_12) { + int tmp = now.tm_hour; + if (tmp % 12 == 0) { + tmp += 12; + } + if (tmp <= 12) { + s->nvram[2] = HOURS_12 | to_bcd(tmp); + } else { + s->nvram[2] = HOURS_12 | HOURS_PM | to_bcd(tmp - 12); + } + } else { + s->nvram[2] = to_bcd(now.tm_hour); + } + s->nvram[3] = (now.tm_wday + s->wday_offset) % 7 + 1; + s->nvram[4] = to_bcd(now.tm_mday); + s->nvram[5] = to_bcd(now.tm_mon + 1); + s->nvram[6] = to_bcd(now.tm_year - 100); +} + +static void inc_regptr(DS1338State *s) +{ + /* The register pointer wraps around after 0x3F; wraparound + * causes the current time/date to be retransferred into + * the secondary registers. + */ + s->ptr = (s->ptr + 1) & (NVRAM_SIZE - 1); + if (!s->ptr) { + capture_current_time(s); + } +} + +static int ds1338_event(I2CSlave *i2c, enum i2c_event event) +{ + DS1338State *s = DS1338(i2c); + + switch (event) { + case I2C_START_RECV: + /* In h/w, capture happens on any START condition, not just a + * START_RECV, but there is no need to actually capture on + * START_SEND, because the guest can't get at that data + * without going through a START_RECV which would overwrite it. + */ + capture_current_time(s); + break; + case I2C_START_SEND: + s->addr_byte = true; + break; + default: + break; + } + + return 0; +} + +static uint8_t ds1338_recv(I2CSlave *i2c) +{ + DS1338State *s = DS1338(i2c); + uint8_t res; + + res = s->nvram[s->ptr]; + inc_regptr(s); + return res; +} + +static int ds1338_send(I2CSlave *i2c, uint8_t data) +{ + DS1338State *s = DS1338(i2c); + + if (s->addr_byte) { + s->ptr = data & (NVRAM_SIZE - 1); + s->addr_byte = false; + return 0; + } + if (s->ptr < 7) { + /* Time register. */ + struct tm now; + qemu_get_timedate(&now, s->offset); + switch(s->ptr) { + case 0: + /* TODO: Implement CH (stop) bit. */ + now.tm_sec = from_bcd(data & 0x7f); + break; + case 1: + now.tm_min = from_bcd(data & 0x7f); + break; + case 2: + if (data & HOURS_12) { + int tmp = from_bcd(data & (HOURS_PM - 1)); + if (data & HOURS_PM) { + tmp += 12; + } + if (tmp % 12 == 0) { + tmp -= 12; + } + now.tm_hour = tmp; + } else { + now.tm_hour = from_bcd(data & (HOURS_12 - 1)); + } + break; + case 3: + { + /* The day field is supposed to contain a value in + the range 1-7. Otherwise behavior is undefined. + */ + int user_wday = (data & 7) - 1; + s->wday_offset = (user_wday - now.tm_wday + 7) % 7; + } + break; + case 4: + now.tm_mday = from_bcd(data & 0x3f); + break; + case 5: + now.tm_mon = from_bcd(data & 0x1f) - 1; + break; + case 6: + now.tm_year = from_bcd(data) + 100; + break; + } + s->offset = qemu_timedate_diff(&now); + } else if (s->ptr == 7) { + /* Control register. */ + + /* Ensure bits 2, 3 and 6 will read back as zero. */ + data &= 0xB3; + + /* Attempting to write the OSF flag to logic 1 leaves the + value unchanged. */ + data = (data & ~CTRL_OSF) | (data & s->nvram[s->ptr] & CTRL_OSF); + + s->nvram[s->ptr] = data; + } else { + s->nvram[s->ptr] = data; + } + inc_regptr(s); + return 0; +} + +static void ds1338_reset(DeviceState *dev) +{ + DS1338State *s = DS1338(dev); + + /* The clock is running and synchronized with the host */ + s->offset = 0; + s->wday_offset = 0; + memset(s->nvram, 0, NVRAM_SIZE); + s->ptr = 0; + s->addr_byte = false; +} + +static void ds1338_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + k->event = ds1338_event; + k->recv = ds1338_recv; + k->send = ds1338_send; + dc->reset = ds1338_reset; + dc->vmsd = &vmstate_ds1338; +} + +static const TypeInfo ds1338_info = { + .name = TYPE_DS1338, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(DS1338State), + .class_init = ds1338_class_init, +}; + +static void ds1338_register_types(void) +{ + type_register_static(&ds1338_info); +} + +type_init(ds1338_register_types) diff --git a/hw/rtc/exynos4210_rtc.c b/hw/rtc/exynos4210_rtc.c new file mode 100644 index 000000000..45c0a951c --- /dev/null +++ b/hw/rtc/exynos4210_rtc.c @@ -0,0 +1,617 @@ +/* + * Samsung exynos4210 Real Time Clock + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * Ogurtsov Oleg + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +/* Description: + * Register RTCCON: + * CLKSEL Bit[1] not used + * CLKOUTEN Bit[9] not used + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/timer.h" +#include "qemu/bcd.h" +#include "hw/ptimer.h" + +#include "hw/irq.h" + +#include "hw/arm/exynos4210.h" +#include "qom/object.h" + +#define DEBUG_RTC 0 + +#if DEBUG_RTC +#define DPRINTF(fmt, ...) \ + do { fprintf(stdout, "RTC: [%24s:%5d] " fmt, __func__, __LINE__, \ + ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#define EXYNOS4210_RTC_REG_MEM_SIZE 0x0100 + +#define INTP 0x0030 +#define RTCCON 0x0040 +#define TICCNT 0x0044 +#define RTCALM 0x0050 +#define ALMSEC 0x0054 +#define ALMMIN 0x0058 +#define ALMHOUR 0x005C +#define ALMDAY 0x0060 +#define ALMMON 0x0064 +#define ALMYEAR 0x0068 +#define BCDSEC 0x0070 +#define BCDMIN 0x0074 +#define BCDHOUR 0x0078 +#define BCDDAY 0x007C +#define BCDDAYWEEK 0x0080 +#define BCDMON 0x0084 +#define BCDYEAR 0x0088 +#define CURTICNT 0x0090 + +#define TICK_TIMER_ENABLE 0x0100 +#define TICNT_THRESHOLD 2 + + +#define RTC_ENABLE 0x0001 + +#define INTP_TICK_ENABLE 0x0001 +#define INTP_ALM_ENABLE 0x0002 + +#define ALARM_INT_ENABLE 0x0040 + +#define RTC_BASE_FREQ 32768 + +#define TYPE_EXYNOS4210_RTC "exynos4210.rtc" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210RTCState, EXYNOS4210_RTC) + +struct Exynos4210RTCState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + /* registers */ + uint32_t reg_intp; + uint32_t reg_rtccon; + uint32_t reg_ticcnt; + uint32_t reg_rtcalm; + uint32_t reg_almsec; + uint32_t reg_almmin; + uint32_t reg_almhour; + uint32_t reg_almday; + uint32_t reg_almmon; + uint32_t reg_almyear; + uint32_t reg_curticcnt; + + ptimer_state *ptimer; /* tick timer */ + ptimer_state *ptimer_1Hz; /* clock timer */ + uint32_t freq; + + qemu_irq tick_irq; /* Time Tick Generator irq */ + qemu_irq alm_irq; /* alarm irq */ + + struct tm current_tm; /* current time */ +}; + +#define TICCKSEL(value) ((value & (0x0F << 4)) >> 4) + +/*** VMState ***/ +static const VMStateDescription vmstate_exynos4210_rtc_state = { + .name = "exynos4210.rtc", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(reg_intp, Exynos4210RTCState), + VMSTATE_UINT32(reg_rtccon, Exynos4210RTCState), + VMSTATE_UINT32(reg_ticcnt, Exynos4210RTCState), + VMSTATE_UINT32(reg_rtcalm, Exynos4210RTCState), + VMSTATE_UINT32(reg_almsec, Exynos4210RTCState), + VMSTATE_UINT32(reg_almmin, Exynos4210RTCState), + VMSTATE_UINT32(reg_almhour, Exynos4210RTCState), + VMSTATE_UINT32(reg_almday, Exynos4210RTCState), + VMSTATE_UINT32(reg_almmon, Exynos4210RTCState), + VMSTATE_UINT32(reg_almyear, Exynos4210RTCState), + VMSTATE_UINT32(reg_curticcnt, Exynos4210RTCState), + VMSTATE_PTIMER(ptimer, Exynos4210RTCState), + VMSTATE_PTIMER(ptimer_1Hz, Exynos4210RTCState), + VMSTATE_UINT32(freq, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_sec, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_min, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_hour, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_wday, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_mday, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_mon, Exynos4210RTCState), + VMSTATE_INT32(current_tm.tm_year, Exynos4210RTCState), + VMSTATE_END_OF_LIST() + } +}; + +#define BCD3DIGITS(x) \ + ((uint32_t)to_bcd((uint8_t)(x % 100)) + \ + ((uint32_t)to_bcd((uint8_t)((x % 1000) / 100)) << 8)) + +static void check_alarm_raise(Exynos4210RTCState *s) +{ + unsigned int alarm_raise = 0; + struct tm stm = s->current_tm; + + if ((s->reg_rtcalm & 0x01) && + (to_bcd((uint8_t)stm.tm_sec) == (uint8_t)s->reg_almsec)) { + alarm_raise = 1; + } + if ((s->reg_rtcalm & 0x02) && + (to_bcd((uint8_t)stm.tm_min) == (uint8_t)s->reg_almmin)) { + alarm_raise = 1; + } + if ((s->reg_rtcalm & 0x04) && + (to_bcd((uint8_t)stm.tm_hour) == (uint8_t)s->reg_almhour)) { + alarm_raise = 1; + } + if ((s->reg_rtcalm & 0x08) && + (to_bcd((uint8_t)stm.tm_mday) == (uint8_t)s->reg_almday)) { + alarm_raise = 1; + } + if ((s->reg_rtcalm & 0x10) && + (to_bcd((uint8_t)stm.tm_mon) == (uint8_t)s->reg_almmon)) { + alarm_raise = 1; + } + if ((s->reg_rtcalm & 0x20) && + (BCD3DIGITS(stm.tm_year) == s->reg_almyear)) { + alarm_raise = 1; + } + + if (alarm_raise) { + DPRINTF("ALARM IRQ\n"); + /* set irq status */ + s->reg_intp |= INTP_ALM_ENABLE; + qemu_irq_raise(s->alm_irq); + } +} + +/* + * RTC update frequency + * Parameters: + * reg_value - current RTCCON register or his new value + * Must be called within a ptimer_transaction_begin/commit block for s->ptimer. + */ +static void exynos4210_rtc_update_freq(Exynos4210RTCState *s, + uint32_t reg_value) +{ + uint32_t freq; + + freq = s->freq; + /* set frequncy for time generator */ + s->freq = RTC_BASE_FREQ / (1 << TICCKSEL(reg_value)); + + if (freq != s->freq) { + ptimer_set_freq(s->ptimer, s->freq); + DPRINTF("freq=%dHz\n", s->freq); + } +} + +/* month is between 0 and 11. */ +static int get_days_in_month(int month, int year) +{ + static const int days_tab[12] = { + 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 + }; + int d; + if ((unsigned)month >= 12) { + return 31; + } + d = days_tab[month]; + if (month == 1) { + if ((year % 4) == 0 && ((year % 100) != 0 || (year % 400) == 0)) { + d++; + } + } + return d; +} + +/* update 'tm' to the next second */ +static void rtc_next_second(struct tm *tm) +{ + int days_in_month; + + tm->tm_sec++; + if ((unsigned)tm->tm_sec >= 60) { + tm->tm_sec = 0; + tm->tm_min++; + if ((unsigned)tm->tm_min >= 60) { + tm->tm_min = 0; + tm->tm_hour++; + if ((unsigned)tm->tm_hour >= 24) { + tm->tm_hour = 0; + /* next day */ + tm->tm_wday++; + if ((unsigned)tm->tm_wday >= 7) { + tm->tm_wday = 0; + } + days_in_month = get_days_in_month(tm->tm_mon, + tm->tm_year + 1900); + tm->tm_mday++; + if (tm->tm_mday < 1) { + tm->tm_mday = 1; + } else if (tm->tm_mday > days_in_month) { + tm->tm_mday = 1; + tm->tm_mon++; + if (tm->tm_mon >= 12) { + tm->tm_mon = 0; + tm->tm_year++; + } + } + } + } + } +} + +/* + * tick handler + */ +static void exynos4210_rtc_tick(void *opaque) +{ + Exynos4210RTCState *s = (Exynos4210RTCState *)opaque; + + DPRINTF("TICK IRQ\n"); + /* set irq status */ + s->reg_intp |= INTP_TICK_ENABLE; + /* raise IRQ */ + qemu_irq_raise(s->tick_irq); + + /* restart timer */ + ptimer_set_count(s->ptimer, s->reg_ticcnt); + ptimer_run(s->ptimer, 1); +} + +/* + * 1Hz clock handler + */ +static void exynos4210_rtc_1Hz_tick(void *opaque) +{ + Exynos4210RTCState *s = (Exynos4210RTCState *)opaque; + + rtc_next_second(&s->current_tm); + /* DPRINTF("1Hz tick\n"); */ + + /* raise IRQ */ + if (s->reg_rtcalm & ALARM_INT_ENABLE) { + check_alarm_raise(s); + } + + ptimer_set_count(s->ptimer_1Hz, RTC_BASE_FREQ); + ptimer_run(s->ptimer_1Hz, 1); +} + +/* + * RTC Read + */ +static uint64_t exynos4210_rtc_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t value = 0; + Exynos4210RTCState *s = (Exynos4210RTCState *)opaque; + + switch (offset) { + case INTP: + value = s->reg_intp; + break; + case RTCCON: + value = s->reg_rtccon; + break; + case TICCNT: + value = s->reg_ticcnt; + break; + case RTCALM: + value = s->reg_rtcalm; + break; + case ALMSEC: + value = s->reg_almsec; + break; + case ALMMIN: + value = s->reg_almmin; + break; + case ALMHOUR: + value = s->reg_almhour; + break; + case ALMDAY: + value = s->reg_almday; + break; + case ALMMON: + value = s->reg_almmon; + break; + case ALMYEAR: + value = s->reg_almyear; + break; + + case BCDSEC: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_sec); + break; + case BCDMIN: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_min); + break; + case BCDHOUR: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_hour); + break; + case BCDDAYWEEK: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_wday); + break; + case BCDDAY: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_mday); + break; + case BCDMON: + value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_mon + 1); + break; + case BCDYEAR: + value = BCD3DIGITS(s->current_tm.tm_year); + break; + + case CURTICNT: + s->reg_curticcnt = ptimer_get_count(s->ptimer); + value = s->reg_curticcnt; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "exynos4210.rtc: bad read offset " TARGET_FMT_plx, + offset); + break; + } + return value; +} + +/* + * RTC Write + */ +static void exynos4210_rtc_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + Exynos4210RTCState *s = (Exynos4210RTCState *)opaque; + + switch (offset) { + case INTP: + if (value & INTP_ALM_ENABLE) { + qemu_irq_lower(s->alm_irq); + s->reg_intp &= (~INTP_ALM_ENABLE); + } + if (value & INTP_TICK_ENABLE) { + qemu_irq_lower(s->tick_irq); + s->reg_intp &= (~INTP_TICK_ENABLE); + } + break; + case RTCCON: + ptimer_transaction_begin(s->ptimer_1Hz); + ptimer_transaction_begin(s->ptimer); + if (value & RTC_ENABLE) { + exynos4210_rtc_update_freq(s, value); + } + if ((value & RTC_ENABLE) > (s->reg_rtccon & RTC_ENABLE)) { + /* clock timer */ + ptimer_set_count(s->ptimer_1Hz, RTC_BASE_FREQ); + ptimer_run(s->ptimer_1Hz, 1); + DPRINTF("run clock timer\n"); + } + if ((value & RTC_ENABLE) < (s->reg_rtccon & RTC_ENABLE)) { + /* tick timer */ + ptimer_stop(s->ptimer); + /* clock timer */ + ptimer_stop(s->ptimer_1Hz); + DPRINTF("stop all timers\n"); + } + if (value & RTC_ENABLE) { + if ((value & TICK_TIMER_ENABLE) > + (s->reg_rtccon & TICK_TIMER_ENABLE) && + (s->reg_ticcnt)) { + ptimer_set_count(s->ptimer, s->reg_ticcnt); + ptimer_run(s->ptimer, 1); + DPRINTF("run tick timer\n"); + } + if ((value & TICK_TIMER_ENABLE) < + (s->reg_rtccon & TICK_TIMER_ENABLE)) { + ptimer_stop(s->ptimer); + } + } + ptimer_transaction_commit(s->ptimer_1Hz); + ptimer_transaction_commit(s->ptimer); + s->reg_rtccon = value; + break; + case TICCNT: + if (value > TICNT_THRESHOLD) { + s->reg_ticcnt = value; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "exynos4210.rtc: bad TICNT value %u", + (uint32_t)value); + } + break; + + case RTCALM: + s->reg_rtcalm = value; + break; + case ALMSEC: + s->reg_almsec = (value & 0x7f); + break; + case ALMMIN: + s->reg_almmin = (value & 0x7f); + break; + case ALMHOUR: + s->reg_almhour = (value & 0x3f); + break; + case ALMDAY: + s->reg_almday = (value & 0x3f); + break; + case ALMMON: + s->reg_almmon = (value & 0x1f); + break; + case ALMYEAR: + s->reg_almyear = (value & 0x0fff); + break; + + case BCDSEC: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_sec = (int)from_bcd((uint8_t)value); + } + break; + case BCDMIN: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_min = (int)from_bcd((uint8_t)value); + } + break; + case BCDHOUR: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_hour = (int)from_bcd((uint8_t)value); + } + break; + case BCDDAYWEEK: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_wday = (int)from_bcd((uint8_t)value); + } + break; + case BCDDAY: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_mday = (int)from_bcd((uint8_t)value); + } + break; + case BCDMON: + if (s->reg_rtccon & RTC_ENABLE) { + s->current_tm.tm_mon = (int)from_bcd((uint8_t)value) - 1; + } + break; + case BCDYEAR: + if (s->reg_rtccon & RTC_ENABLE) { + /* 3 digits */ + s->current_tm.tm_year = (int)from_bcd((uint8_t)value) + + (int)from_bcd((uint8_t)((value >> 8) & 0x0f)) * 100; + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "exynos4210.rtc: bad write offset " TARGET_FMT_plx, + offset); + break; + + } +} + +/* + * Set default values to timer fields and registers + */ +static void exynos4210_rtc_reset(DeviceState *d) +{ + Exynos4210RTCState *s = EXYNOS4210_RTC(d); + + qemu_get_timedate(&s->current_tm, 0); + + DPRINTF("Get time from host: %d-%d-%d %2d:%02d:%02d\n", + s->current_tm.tm_year, s->current_tm.tm_mon, s->current_tm.tm_mday, + s->current_tm.tm_hour, s->current_tm.tm_min, s->current_tm.tm_sec); + + s->reg_intp = 0; + s->reg_rtccon = 0; + s->reg_ticcnt = 0; + s->reg_rtcalm = 0; + s->reg_almsec = 0; + s->reg_almmin = 0; + s->reg_almhour = 0; + s->reg_almday = 0; + s->reg_almmon = 0; + s->reg_almyear = 0; + + s->reg_curticcnt = 0; + + ptimer_transaction_begin(s->ptimer); + exynos4210_rtc_update_freq(s, s->reg_rtccon); + ptimer_stop(s->ptimer); + ptimer_transaction_commit(s->ptimer); + ptimer_transaction_begin(s->ptimer_1Hz); + ptimer_stop(s->ptimer_1Hz); + ptimer_transaction_commit(s->ptimer_1Hz); +} + +static const MemoryRegionOps exynos4210_rtc_ops = { + .read = exynos4210_rtc_read, + .write = exynos4210_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* + * RTC timer initialization + */ +static void exynos4210_rtc_init(Object *obj) +{ + Exynos4210RTCState *s = EXYNOS4210_RTC(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + s->ptimer = ptimer_init(exynos4210_rtc_tick, s, PTIMER_POLICY_DEFAULT); + ptimer_transaction_begin(s->ptimer); + ptimer_set_freq(s->ptimer, RTC_BASE_FREQ); + exynos4210_rtc_update_freq(s, 0); + ptimer_transaction_commit(s->ptimer); + + s->ptimer_1Hz = ptimer_init(exynos4210_rtc_1Hz_tick, + s, PTIMER_POLICY_DEFAULT); + ptimer_transaction_begin(s->ptimer_1Hz); + ptimer_set_freq(s->ptimer_1Hz, RTC_BASE_FREQ); + ptimer_transaction_commit(s->ptimer_1Hz); + + sysbus_init_irq(dev, &s->alm_irq); + sysbus_init_irq(dev, &s->tick_irq); + + memory_region_init_io(&s->iomem, obj, &exynos4210_rtc_ops, s, + "exynos4210-rtc", EXYNOS4210_RTC_REG_MEM_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static void exynos4210_rtc_finalize(Object *obj) +{ + Exynos4210RTCState *s = EXYNOS4210_RTC(obj); + + ptimer_free(s->ptimer); + ptimer_free(s->ptimer_1Hz); +} + +static void exynos4210_rtc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = exynos4210_rtc_reset; + dc->vmsd = &vmstate_exynos4210_rtc_state; +} + +static const TypeInfo exynos4210_rtc_info = { + .name = TYPE_EXYNOS4210_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210RTCState), + .instance_init = exynos4210_rtc_init, + .instance_finalize = exynos4210_rtc_finalize, + .class_init = exynos4210_rtc_class_init, +}; + +static void exynos4210_rtc_register_types(void) +{ + type_register_static(&exynos4210_rtc_info); +} + +type_init(exynos4210_rtc_register_types) diff --git a/hw/rtc/goldfish_rtc.c b/hw/rtc/goldfish_rtc.c new file mode 100644 index 000000000..e07ff0164 --- /dev/null +++ b/hw/rtc/goldfish_rtc.c @@ -0,0 +1,298 @@ +/* + * Goldfish virtual platform RTC + * + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * For more details on Google Goldfish virtual platform refer: + * https://android.googlesource.com/platform/external/qemu/+/refs/heads/emu-2.0-release/docs/GOLDFISH-VIRTUAL-HARDWARE.TXT + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/rtc/goldfish_rtc.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "qemu/bitops.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "qemu/cutils.h" +#include "qemu/log.h" + +#include "trace.h" + +#define RTC_TIME_LOW 0x00 +#define RTC_TIME_HIGH 0x04 +#define RTC_ALARM_LOW 0x08 +#define RTC_ALARM_HIGH 0x0c +#define RTC_IRQ_ENABLED 0x10 +#define RTC_CLEAR_ALARM 0x14 +#define RTC_ALARM_STATUS 0x18 +#define RTC_CLEAR_INTERRUPT 0x1c + +static void goldfish_rtc_update(GoldfishRTCState *s) +{ + qemu_set_irq(s->irq, (s->irq_pending & s->irq_enabled) ? 1 : 0); +} + +static void goldfish_rtc_interrupt(void *opaque) +{ + GoldfishRTCState *s = (GoldfishRTCState *)opaque; + + s->alarm_running = 0; + s->irq_pending = 1; + goldfish_rtc_update(s); +} + +static uint64_t goldfish_rtc_get_count(GoldfishRTCState *s) +{ + return s->tick_offset + (uint64_t)qemu_clock_get_ns(rtc_clock); +} + +static void goldfish_rtc_clear_alarm(GoldfishRTCState *s) +{ + timer_del(s->timer); + s->alarm_running = 0; +} + +static void goldfish_rtc_set_alarm(GoldfishRTCState *s) +{ + uint64_t ticks = goldfish_rtc_get_count(s); + uint64_t event = s->alarm_next; + + if (event <= ticks) { + goldfish_rtc_clear_alarm(s); + goldfish_rtc_interrupt(s); + } else { + /* + * We should be setting timer expiry to: + * qemu_clock_get_ns(rtc_clock) + (event - ticks) + * but this is equivalent to: + * event - s->tick_offset + */ + timer_mod(s->timer, event - s->tick_offset); + s->alarm_running = 1; + } +} + +static uint64_t goldfish_rtc_read(void *opaque, hwaddr offset, + unsigned size) +{ + GoldfishRTCState *s = opaque; + uint64_t r = 0; + + /* + * From the documentation linked at the top of the file: + * + * To read the value, the kernel must perform an IO_READ(TIME_LOW), which + * returns an unsigned 32-bit value, before an IO_READ(TIME_HIGH), which + * returns a signed 32-bit value, corresponding to the higher half of the + * full value. + */ + switch (offset) { + case RTC_TIME_LOW: + r = goldfish_rtc_get_count(s); + s->time_high = r >> 32; + r &= 0xffffffff; + break; + case RTC_TIME_HIGH: + r = s->time_high; + break; + case RTC_ALARM_LOW: + r = s->alarm_next & 0xffffffff; + break; + case RTC_ALARM_HIGH: + r = s->alarm_next >> 32; + break; + case RTC_IRQ_ENABLED: + r = s->irq_enabled; + break; + case RTC_ALARM_STATUS: + r = s->alarm_running; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%x is UNIMP.\n", __func__, (uint32_t)offset); + break; + } + + trace_goldfish_rtc_read(offset, r); + + return r; +} + +static void goldfish_rtc_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + GoldfishRTCState *s = opaque; + uint64_t current_tick, new_tick; + + switch (offset) { + case RTC_TIME_LOW: + current_tick = goldfish_rtc_get_count(s); + new_tick = deposit64(current_tick, 0, 32, value); + s->tick_offset += new_tick - current_tick; + break; + case RTC_TIME_HIGH: + current_tick = goldfish_rtc_get_count(s); + new_tick = deposit64(current_tick, 32, 32, value); + s->tick_offset += new_tick - current_tick; + break; + case RTC_ALARM_LOW: + s->alarm_next = deposit64(s->alarm_next, 0, 32, value); + goldfish_rtc_set_alarm(s); + break; + case RTC_ALARM_HIGH: + s->alarm_next = deposit64(s->alarm_next, 32, 32, value); + break; + case RTC_IRQ_ENABLED: + s->irq_enabled = (uint32_t)(value & 0x1); + goldfish_rtc_update(s); + break; + case RTC_CLEAR_ALARM: + goldfish_rtc_clear_alarm(s); + break; + case RTC_CLEAR_INTERRUPT: + s->irq_pending = 0; + goldfish_rtc_update(s); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: offset 0x%x is UNIMP.\n", __func__, (uint32_t)offset); + break; + } + + trace_goldfish_rtc_write(offset, value); +} + +static int goldfish_rtc_pre_save(void *opaque) +{ + uint64_t delta; + GoldfishRTCState *s = opaque; + + /* + * We want to migrate this offset, which sounds straightforward. + * Unfortunately, we cannot directly pass tick_offset because + * rtc_clock on destination Host might not be same source Host. + * + * To tackle, this we pass tick_offset relative to vm_clock from + * source Host and make it relative to rtc_clock at destination Host. + */ + delta = qemu_clock_get_ns(rtc_clock) - + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->tick_offset_vmstate = s->tick_offset + delta; + + return 0; +} + +static int goldfish_rtc_post_load(void *opaque, int version_id) +{ + uint64_t delta; + GoldfishRTCState *s = opaque; + + /* + * We extract tick_offset from tick_offset_vmstate by doing + * reverse math compared to pre_save() function. + */ + delta = qemu_clock_get_ns(rtc_clock) - + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->tick_offset = s->tick_offset_vmstate - delta; + + goldfish_rtc_set_alarm(s); + + return 0; +} + +static const MemoryRegionOps goldfish_rtc_ops = { + .read = goldfish_rtc_read, + .write = goldfish_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static const VMStateDescription goldfish_rtc_vmstate = { + .name = TYPE_GOLDFISH_RTC, + .version_id = 2, + .pre_save = goldfish_rtc_pre_save, + .post_load = goldfish_rtc_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64(tick_offset_vmstate, GoldfishRTCState), + VMSTATE_UINT64(alarm_next, GoldfishRTCState), + VMSTATE_UINT32(alarm_running, GoldfishRTCState), + VMSTATE_UINT32(irq_pending, GoldfishRTCState), + VMSTATE_UINT32(irq_enabled, GoldfishRTCState), + VMSTATE_UINT32(time_high, GoldfishRTCState), + VMSTATE_END_OF_LIST() + } +}; + +static void goldfish_rtc_reset(DeviceState *dev) +{ + GoldfishRTCState *s = GOLDFISH_RTC(dev); + struct tm tm; + + timer_del(s->timer); + + qemu_get_timedate(&tm, 0); + s->tick_offset = mktimegm(&tm); + s->tick_offset *= NANOSECONDS_PER_SECOND; + s->tick_offset -= qemu_clock_get_ns(rtc_clock); + s->tick_offset_vmstate = 0; + s->alarm_next = 0; + s->alarm_running = 0; + s->irq_pending = 0; + s->irq_enabled = 0; +} + +static void goldfish_rtc_realize(DeviceState *d, Error **errp) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(d); + GoldfishRTCState *s = GOLDFISH_RTC(d); + + memory_region_init_io(&s->iomem, OBJECT(s), &goldfish_rtc_ops, s, + "goldfish_rtc", 0x24); + sysbus_init_mmio(dev, &s->iomem); + + sysbus_init_irq(dev, &s->irq); + + s->timer = timer_new_ns(rtc_clock, goldfish_rtc_interrupt, s); +} + +static void goldfish_rtc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = goldfish_rtc_realize; + dc->reset = goldfish_rtc_reset; + dc->vmsd = &goldfish_rtc_vmstate; +} + +static const TypeInfo goldfish_rtc_info = { + .name = TYPE_GOLDFISH_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GoldfishRTCState), + .class_init = goldfish_rtc_class_init, +}; + +static void goldfish_rtc_register_types(void) +{ + type_register_static(&goldfish_rtc_info); +} + +type_init(goldfish_rtc_register_types) diff --git a/hw/rtc/m41t80.c b/hw/rtc/m41t80.c new file mode 100644 index 000000000..396d110ba --- /dev/null +++ b/hw/rtc/m41t80.c @@ -0,0 +1,120 @@ +/* + * M41T80 serial rtc emulation + * + * Copyright (c) 2018 BALATON Zoltan + * + * This work is licensed under the GNU GPL license version 2 or later. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qemu/bcd.h" +#include "hw/i2c/i2c.h" +#include "qom/object.h" + +#define TYPE_M41T80 "m41t80" +OBJECT_DECLARE_SIMPLE_TYPE(M41t80State, M41T80) + +struct M41t80State { + I2CSlave parent_obj; + int8_t addr; +}; + +static void m41t80_realize(DeviceState *dev, Error **errp) +{ + M41t80State *s = M41T80(dev); + + s->addr = -1; +} + +static int m41t80_send(I2CSlave *i2c, uint8_t data) +{ + M41t80State *s = M41T80(i2c); + + if (s->addr < 0) { + s->addr = data; + } else { + s->addr++; + } + return 0; +} + +static uint8_t m41t80_recv(I2CSlave *i2c) +{ + M41t80State *s = M41T80(i2c); + struct tm now; + qemu_timeval tv; + + if (s->addr < 0) { + s->addr = 0; + } + if (s->addr >= 1 && s->addr <= 7) { + qemu_get_timedate(&now, -1); + } + switch (s->addr++) { + case 0: + qemu_gettimeofday(&tv); + return to_bcd(tv.tv_usec / 10000); + case 1: + return to_bcd(now.tm_sec); + case 2: + return to_bcd(now.tm_min); + case 3: + return to_bcd(now.tm_hour); + case 4: + return to_bcd(now.tm_wday); + case 5: + return to_bcd(now.tm_mday); + case 6: + return to_bcd(now.tm_mon + 1); + case 7: + return to_bcd(now.tm_year % 100); + case 8 ... 19: + qemu_log_mask(LOG_UNIMP, "%s: unimplemented register: %d\n", + __func__, s->addr - 1); + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid register: %d\n", + __func__, s->addr - 1); + return 0; + } +} + +static int m41t80_event(I2CSlave *i2c, enum i2c_event event) +{ + M41t80State *s = M41T80(i2c); + + if (event == I2C_START_SEND) { + s->addr = -1; + } + return 0; +} + +static void m41t80_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); + + dc->realize = m41t80_realize; + sc->send = m41t80_send; + sc->recv = m41t80_recv; + sc->event = m41t80_event; +} + +static const TypeInfo m41t80_info = { + .name = TYPE_M41T80, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(M41t80State), + .class_init = m41t80_class_init, +}; + +static void m41t80_register_types(void) +{ + type_register_static(&m41t80_info); +} + +type_init(m41t80_register_types) diff --git a/hw/rtc/m48t59-internal.h b/hw/rtc/m48t59-internal.h new file mode 100644 index 000000000..cd648241e --- /dev/null +++ b/hw/rtc/m48t59-internal.h @@ -0,0 +1,75 @@ +/* + * QEMU M48T59 and M48T08 NVRAM emulation (common header) + * + * Copyright (c) 2003-2005, 2007 Jocelyn Mayer + * Copyright (c) 2013 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef HW_M48T59_INTERNAL_H +#define HW_M48T59_INTERNAL_H + +/* + * The M48T02, M48T08 and M48T59 chips are very similar. The newer '59 has + * alarm and a watchdog timer and related control registers. In the + * PPC platform there is also a nvram lock function. + */ + +typedef struct M48txxInfo { + const char *bus_name; + uint32_t model; /* 2 = m48t02, 8 = m48t08, 59 = m48t59 */ + uint32_t size; +} M48txxInfo; + +typedef struct M48t59State { + /* Hardware parameters */ + qemu_irq IRQ; + MemoryRegion iomem; + uint32_t size; + int32_t base_year; + /* RTC management */ + time_t time_offset; + time_t stop_time; + /* Alarm & watchdog */ + struct tm alarm; + QEMUTimer *alrm_timer; + QEMUTimer *wd_timer; + /* NVRAM storage */ + uint8_t *buffer; + /* Model parameters */ + uint32_t model; /* 2 = m48t02, 8 = m48t08, 59 = m48t59 */ + /* NVRAM storage */ + uint16_t addr; + uint8_t lock; +} M48t59State; + +uint32_t m48t59_read(M48t59State *NVRAM, uint32_t addr); +void m48t59_write(M48t59State *NVRAM, uint32_t addr, uint32_t val); +void m48t59_reset_common(M48t59State *NVRAM); +void m48t59_realize_common(M48t59State *s, Error **errp); + +static inline void m48t59_toggle_lock(M48t59State *NVRAM, int lock) +{ + NVRAM->lock ^= 1 << lock; +} + +extern const MemoryRegionOps m48t59_io_ops; + +#endif /* HW_M48T59_INTERNAL_H */ diff --git a/hw/rtc/m48t59-isa.c b/hw/rtc/m48t59-isa.c new file mode 100644 index 000000000..dc21fb10a --- /dev/null +++ b/hw/rtc/m48t59-isa.c @@ -0,0 +1,161 @@ +/* + * QEMU M48T59 and M48T08 NVRAM emulation (ISA bus interface) + * + * Copyright (c) 2003-2005, 2007 Jocelyn Mayer + * Copyright (c) 2013 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "hw/rtc/m48t59.h" +#include "m48t59-internal.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define TYPE_M48TXX_ISA "isa-m48txx" +typedef struct M48txxISADeviceClass M48txxISADeviceClass; +typedef struct M48txxISAState M48txxISAState; +DECLARE_OBJ_CHECKERS(M48txxISAState, M48txxISADeviceClass, + M48TXX_ISA, TYPE_M48TXX_ISA) + +struct M48txxISAState { + ISADevice parent_obj; + M48t59State state; + uint32_t io_base; + MemoryRegion io; +}; + +struct M48txxISADeviceClass { + ISADeviceClass parent_class; + M48txxInfo info; +}; + +static M48txxInfo m48txx_isa_info[] = { + { + .bus_name = "isa-m48t59", + .model = 59, + .size = 0x2000, + } +}; + +static uint32_t m48txx_isa_read(Nvram *obj, uint32_t addr) +{ + M48txxISAState *d = M48TXX_ISA(obj); + return m48t59_read(&d->state, addr); +} + +static void m48txx_isa_write(Nvram *obj, uint32_t addr, uint32_t val) +{ + M48txxISAState *d = M48TXX_ISA(obj); + m48t59_write(&d->state, addr, val); +} + +static void m48txx_isa_toggle_lock(Nvram *obj, int lock) +{ + M48txxISAState *d = M48TXX_ISA(obj); + m48t59_toggle_lock(&d->state, lock); +} + +static Property m48t59_isa_properties[] = { + DEFINE_PROP_INT32("base-year", M48txxISAState, state.base_year, 0), + DEFINE_PROP_UINT32("iobase", M48txxISAState, io_base, 0x74), + DEFINE_PROP_END_OF_LIST(), +}; + +static void m48t59_reset_isa(DeviceState *d) +{ + M48txxISAState *isa = M48TXX_ISA(d); + M48t59State *NVRAM = &isa->state; + + m48t59_reset_common(NVRAM); +} + +static void m48t59_isa_realize(DeviceState *dev, Error **errp) +{ + M48txxISADeviceClass *u = M48TXX_ISA_GET_CLASS(dev); + ISADevice *isadev = ISA_DEVICE(dev); + M48txxISAState *d = M48TXX_ISA(dev); + M48t59State *s = &d->state; + + s->model = u->info.model; + s->size = u->info.size; + isa_init_irq(isadev, &s->IRQ, 8); + m48t59_realize_common(s, errp); + memory_region_init_io(&d->io, OBJECT(dev), &m48t59_io_ops, s, "m48t59", 4); + if (d->io_base != 0) { + isa_register_ioport(isadev, &d->io, d->io_base); + } +} + +static void m48txx_isa_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + NvramClass *nc = NVRAM_CLASS(klass); + + dc->realize = m48t59_isa_realize; + dc->reset = m48t59_reset_isa; + device_class_set_props(dc, m48t59_isa_properties); + nc->read = m48txx_isa_read; + nc->write = m48txx_isa_write; + nc->toggle_lock = m48txx_isa_toggle_lock; +} + +static void m48txx_isa_concrete_class_init(ObjectClass *klass, void *data) +{ + M48txxISADeviceClass *u = M48TXX_ISA_CLASS(klass); + M48txxInfo *info = data; + + u->info = *info; +} + +static const TypeInfo m48txx_isa_type_info = { + .name = TYPE_M48TXX_ISA, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(M48txxISAState), + .abstract = true, + .class_init = m48txx_isa_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NVRAM }, + { } + } +}; + +static void m48t59_isa_register_types(void) +{ + TypeInfo isa_type_info = { + .parent = TYPE_M48TXX_ISA, + .class_size = sizeof(M48txxISADeviceClass), + .class_init = m48txx_isa_concrete_class_init, + }; + int i; + + type_register_static(&m48txx_isa_type_info); + + for (i = 0; i < ARRAY_SIZE(m48txx_isa_info); i++) { + isa_type_info.name = m48txx_isa_info[i].bus_name; + isa_type_info.class_data = &m48txx_isa_info[i]; + type_register(&isa_type_info); + } +} + +type_init(m48t59_isa_register_types) diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c new file mode 100644 index 000000000..690f4e071 --- /dev/null +++ b/hw/rtc/m48t59.c @@ -0,0 +1,686 @@ +/* + * QEMU M48T59 and M48T08 NVRAM emulation for PPC PREP and Sparc platforms + * + * Copyright (c) 2003-2005, 2007, 2017 Jocelyn Mayer + * Copyright (c) 2013 Hervé Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/rtc/m48t59.h" +#include "qemu/timer.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "qemu/bcd.h" +#include "qemu/module.h" +#include "trace.h" + +#include "m48t59-internal.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#define TYPE_M48TXX_SYS_BUS "sysbus-m48txx" +typedef struct M48txxSysBusDeviceClass M48txxSysBusDeviceClass; +typedef struct M48txxSysBusState M48txxSysBusState; +DECLARE_OBJ_CHECKERS(M48txxSysBusState, M48txxSysBusDeviceClass, + M48TXX_SYS_BUS, TYPE_M48TXX_SYS_BUS) + +/* + * Chipset docs: + * http://www.st.com/stonline/products/literature/ds/2410/m48t02.pdf + * http://www.st.com/stonline/products/literature/ds/2411/m48t08.pdf + * http://www.st.com/stonline/products/literature/od/7001/m48t59y.pdf + */ + +struct M48txxSysBusState { + SysBusDevice parent_obj; + M48t59State state; + MemoryRegion io; +}; + +struct M48txxSysBusDeviceClass { + SysBusDeviceClass parent_class; + M48txxInfo info; +}; + +static M48txxInfo m48txx_sysbus_info[] = { + { + .bus_name = "sysbus-m48t02", + .model = 2, + .size = 0x800, + },{ + .bus_name = "sysbus-m48t08", + .model = 8, + .size = 0x2000, + },{ + .bus_name = "sysbus-m48t59", + .model = 59, + .size = 0x2000, + } +}; + + +/* Fake timer functions */ + +/* Alarm management */ +static void alarm_cb (void *opaque) +{ + struct tm tm; + uint64_t next_time; + M48t59State *NVRAM = opaque; + + qemu_set_irq(NVRAM->IRQ, 1); + if ((NVRAM->buffer[0x1FF5] & 0x80) == 0 && + (NVRAM->buffer[0x1FF4] & 0x80) == 0 && + (NVRAM->buffer[0x1FF3] & 0x80) == 0 && + (NVRAM->buffer[0x1FF2] & 0x80) == 0) { + /* Repeat once a month */ + qemu_get_timedate(&tm, NVRAM->time_offset); + tm.tm_mon++; + if (tm.tm_mon == 13) { + tm.tm_mon = 1; + tm.tm_year++; + } + next_time = qemu_timedate_diff(&tm) - NVRAM->time_offset; + } else if ((NVRAM->buffer[0x1FF5] & 0x80) != 0 && + (NVRAM->buffer[0x1FF4] & 0x80) == 0 && + (NVRAM->buffer[0x1FF3] & 0x80) == 0 && + (NVRAM->buffer[0x1FF2] & 0x80) == 0) { + /* Repeat once a day */ + next_time = 24 * 60 * 60; + } else if ((NVRAM->buffer[0x1FF5] & 0x80) != 0 && + (NVRAM->buffer[0x1FF4] & 0x80) != 0 && + (NVRAM->buffer[0x1FF3] & 0x80) == 0 && + (NVRAM->buffer[0x1FF2] & 0x80) == 0) { + /* Repeat once an hour */ + next_time = 60 * 60; + } else if ((NVRAM->buffer[0x1FF5] & 0x80) != 0 && + (NVRAM->buffer[0x1FF4] & 0x80) != 0 && + (NVRAM->buffer[0x1FF3] & 0x80) != 0 && + (NVRAM->buffer[0x1FF2] & 0x80) == 0) { + /* Repeat once a minute */ + next_time = 60; + } else { + /* Repeat once a second */ + next_time = 1; + } + timer_mod(NVRAM->alrm_timer, qemu_clock_get_ns(rtc_clock) + + next_time * 1000); + qemu_set_irq(NVRAM->IRQ, 0); +} + +static void set_alarm(M48t59State *NVRAM) +{ + int diff; + if (NVRAM->alrm_timer != NULL) { + timer_del(NVRAM->alrm_timer); + diff = qemu_timedate_diff(&NVRAM->alarm) - NVRAM->time_offset; + if (diff > 0) + timer_mod(NVRAM->alrm_timer, diff * 1000); + } +} + +/* RTC management helpers */ +static inline void get_time(M48t59State *NVRAM, struct tm *tm) +{ + qemu_get_timedate(tm, NVRAM->time_offset); +} + +static void set_time(M48t59State *NVRAM, struct tm *tm) +{ + NVRAM->time_offset = qemu_timedate_diff(tm); + set_alarm(NVRAM); +} + +/* Watchdog management */ +static void watchdog_cb (void *opaque) +{ + M48t59State *NVRAM = opaque; + + NVRAM->buffer[0x1FF0] |= 0x80; + if (NVRAM->buffer[0x1FF7] & 0x80) { + NVRAM->buffer[0x1FF7] = 0x00; + NVRAM->buffer[0x1FFC] &= ~0x40; + /* May it be a hw CPU Reset instead ? */ + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } else { + qemu_set_irq(NVRAM->IRQ, 1); + qemu_set_irq(NVRAM->IRQ, 0); + } +} + +static void set_up_watchdog(M48t59State *NVRAM, uint8_t value) +{ + uint64_t interval; /* in 1/16 seconds */ + + NVRAM->buffer[0x1FF0] &= ~0x80; + if (NVRAM->wd_timer != NULL) { + timer_del(NVRAM->wd_timer); + if (value != 0) { + interval = (1 << (2 * (value & 0x03))) * ((value >> 2) & 0x1F); + timer_mod(NVRAM->wd_timer, ((uint64_t)time(NULL) * 1000) + + ((interval * 1000) >> 4)); + } + } +} + +/* Direct access to NVRAM */ +void m48t59_write(M48t59State *NVRAM, uint32_t addr, uint32_t val) +{ + struct tm tm; + int tmp; + + trace_m48txx_nvram_mem_write(addr, val); + + /* check for NVRAM access */ + if ((NVRAM->model == 2 && addr < 0x7f8) || + (NVRAM->model == 8 && addr < 0x1ff8) || + (NVRAM->model == 59 && addr < 0x1ff0)) { + goto do_write; + } + + /* TOD access */ + switch (addr) { + case 0x1FF0: + /* flags register : read-only */ + break; + case 0x1FF1: + /* unused */ + break; + case 0x1FF2: + /* alarm seconds */ + tmp = from_bcd(val & 0x7F); + if (tmp >= 0 && tmp <= 59) { + NVRAM->alarm.tm_sec = tmp; + NVRAM->buffer[0x1FF2] = val; + set_alarm(NVRAM); + } + break; + case 0x1FF3: + /* alarm minutes */ + tmp = from_bcd(val & 0x7F); + if (tmp >= 0 && tmp <= 59) { + NVRAM->alarm.tm_min = tmp; + NVRAM->buffer[0x1FF3] = val; + set_alarm(NVRAM); + } + break; + case 0x1FF4: + /* alarm hours */ + tmp = from_bcd(val & 0x3F); + if (tmp >= 0 && tmp <= 23) { + NVRAM->alarm.tm_hour = tmp; + NVRAM->buffer[0x1FF4] = val; + set_alarm(NVRAM); + } + break; + case 0x1FF5: + /* alarm date */ + tmp = from_bcd(val & 0x3F); + if (tmp != 0) { + NVRAM->alarm.tm_mday = tmp; + NVRAM->buffer[0x1FF5] = val; + set_alarm(NVRAM); + } + break; + case 0x1FF6: + /* interrupts */ + NVRAM->buffer[0x1FF6] = val; + break; + case 0x1FF7: + /* watchdog */ + NVRAM->buffer[0x1FF7] = val; + set_up_watchdog(NVRAM, val); + break; + case 0x1FF8: + case 0x07F8: + /* control */ + NVRAM->buffer[addr] = (val & ~0xA0) | 0x90; + break; + case 0x1FF9: + case 0x07F9: + /* seconds (BCD) */ + tmp = from_bcd(val & 0x7F); + if (tmp >= 0 && tmp <= 59) { + get_time(NVRAM, &tm); + tm.tm_sec = tmp; + set_time(NVRAM, &tm); + } + if ((val & 0x80) ^ (NVRAM->buffer[addr] & 0x80)) { + if (val & 0x80) { + NVRAM->stop_time = time(NULL); + } else { + NVRAM->time_offset += NVRAM->stop_time - time(NULL); + NVRAM->stop_time = 0; + } + } + NVRAM->buffer[addr] = val & 0x80; + break; + case 0x1FFA: + case 0x07FA: + /* minutes (BCD) */ + tmp = from_bcd(val & 0x7F); + if (tmp >= 0 && tmp <= 59) { + get_time(NVRAM, &tm); + tm.tm_min = tmp; + set_time(NVRAM, &tm); + } + break; + case 0x1FFB: + case 0x07FB: + /* hours (BCD) */ + tmp = from_bcd(val & 0x3F); + if (tmp >= 0 && tmp <= 23) { + get_time(NVRAM, &tm); + tm.tm_hour = tmp; + set_time(NVRAM, &tm); + } + break; + case 0x1FFC: + case 0x07FC: + /* day of the week / century */ + tmp = from_bcd(val & 0x07); + get_time(NVRAM, &tm); + tm.tm_wday = tmp; + set_time(NVRAM, &tm); + NVRAM->buffer[addr] = val & 0x40; + break; + case 0x1FFD: + case 0x07FD: + /* date (BCD) */ + tmp = from_bcd(val & 0x3F); + if (tmp != 0) { + get_time(NVRAM, &tm); + tm.tm_mday = tmp; + set_time(NVRAM, &tm); + } + break; + case 0x1FFE: + case 0x07FE: + /* month */ + tmp = from_bcd(val & 0x1F); + if (tmp >= 1 && tmp <= 12) { + get_time(NVRAM, &tm); + tm.tm_mon = tmp - 1; + set_time(NVRAM, &tm); + } + break; + case 0x1FFF: + case 0x07FF: + /* year */ + tmp = from_bcd(val); + if (tmp >= 0 && tmp <= 99) { + get_time(NVRAM, &tm); + tm.tm_year = from_bcd(val) + NVRAM->base_year - 1900; + set_time(NVRAM, &tm); + } + break; + default: + /* Check lock registers state */ + if (addr >= 0x20 && addr <= 0x2F && (NVRAM->lock & 1)) + break; + if (addr >= 0x30 && addr <= 0x3F && (NVRAM->lock & 2)) + break; + do_write: + if (addr < NVRAM->size) { + NVRAM->buffer[addr] = val & 0xFF; + } + break; + } +} + +uint32_t m48t59_read(M48t59State *NVRAM, uint32_t addr) +{ + struct tm tm; + uint32_t retval = 0xFF; + + /* check for NVRAM access */ + if ((NVRAM->model == 2 && addr < 0x078f) || + (NVRAM->model == 8 && addr < 0x1ff8) || + (NVRAM->model == 59 && addr < 0x1ff0)) { + goto do_read; + } + + /* TOD access */ + switch (addr) { + case 0x1FF0: + /* flags register */ + goto do_read; + case 0x1FF1: + /* unused */ + retval = 0; + break; + case 0x1FF2: + /* alarm seconds */ + goto do_read; + case 0x1FF3: + /* alarm minutes */ + goto do_read; + case 0x1FF4: + /* alarm hours */ + goto do_read; + case 0x1FF5: + /* alarm date */ + goto do_read; + case 0x1FF6: + /* interrupts */ + goto do_read; + case 0x1FF7: + /* A read resets the watchdog */ + set_up_watchdog(NVRAM, NVRAM->buffer[0x1FF7]); + goto do_read; + case 0x1FF8: + case 0x07F8: + /* control */ + goto do_read; + case 0x1FF9: + case 0x07F9: + /* seconds (BCD) */ + get_time(NVRAM, &tm); + retval = (NVRAM->buffer[addr] & 0x80) | to_bcd(tm.tm_sec); + break; + case 0x1FFA: + case 0x07FA: + /* minutes (BCD) */ + get_time(NVRAM, &tm); + retval = to_bcd(tm.tm_min); + break; + case 0x1FFB: + case 0x07FB: + /* hours (BCD) */ + get_time(NVRAM, &tm); + retval = to_bcd(tm.tm_hour); + break; + case 0x1FFC: + case 0x07FC: + /* day of the week / century */ + get_time(NVRAM, &tm); + retval = NVRAM->buffer[addr] | tm.tm_wday; + break; + case 0x1FFD: + case 0x07FD: + /* date */ + get_time(NVRAM, &tm); + retval = to_bcd(tm.tm_mday); + break; + case 0x1FFE: + case 0x07FE: + /* month */ + get_time(NVRAM, &tm); + retval = to_bcd(tm.tm_mon + 1); + break; + case 0x1FFF: + case 0x07FF: + /* year */ + get_time(NVRAM, &tm); + retval = to_bcd((tm.tm_year + 1900 - NVRAM->base_year) % 100); + break; + default: + /* Check lock registers state */ + if (addr >= 0x20 && addr <= 0x2F && (NVRAM->lock & 1)) + break; + if (addr >= 0x30 && addr <= 0x3F && (NVRAM->lock & 2)) + break; + do_read: + if (addr < NVRAM->size) { + retval = NVRAM->buffer[addr]; + } + break; + } + trace_m48txx_nvram_mem_read(addr, retval); + + return retval; +} + +/* IO access to NVRAM */ +static void NVRAM_writeb(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + M48t59State *NVRAM = opaque; + + trace_m48txx_nvram_io_write(addr, val); + switch (addr) { + case 0: + NVRAM->addr &= ~0x00FF; + NVRAM->addr |= val; + break; + case 1: + NVRAM->addr &= ~0xFF00; + NVRAM->addr |= val << 8; + break; + case 3: + m48t59_write(NVRAM, NVRAM->addr, val); + NVRAM->addr = 0x0000; + break; + default: + break; + } +} + +static uint64_t NVRAM_readb(void *opaque, hwaddr addr, unsigned size) +{ + M48t59State *NVRAM = opaque; + uint32_t retval; + + switch (addr) { + case 3: + retval = m48t59_read(NVRAM, NVRAM->addr); + break; + default: + retval = -1; + break; + } + trace_m48txx_nvram_io_read(addr, retval); + + return retval; +} + +static uint64_t nvram_read(void *opaque, hwaddr addr, unsigned size) +{ + M48t59State *NVRAM = opaque; + + return m48t59_read(NVRAM, addr); +} + +static void nvram_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + M48t59State *NVRAM = opaque; + + return m48t59_write(NVRAM, addr, value); +} + +static const MemoryRegionOps nvram_ops = { + .read = nvram_read, + .write = nvram_write, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static const VMStateDescription vmstate_m48t59 = { + .name = "m48t59", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(lock, M48t59State), + VMSTATE_UINT16(addr, M48t59State), + VMSTATE_VBUFFER_UINT32(buffer, M48t59State, 0, NULL, size), + VMSTATE_END_OF_LIST() + } +}; + +void m48t59_reset_common(M48t59State *NVRAM) +{ + NVRAM->addr = 0; + NVRAM->lock = 0; + if (NVRAM->alrm_timer != NULL) + timer_del(NVRAM->alrm_timer); + + if (NVRAM->wd_timer != NULL) + timer_del(NVRAM->wd_timer); +} + +static void m48t59_reset_sysbus(DeviceState *d) +{ + M48txxSysBusState *sys = M48TXX_SYS_BUS(d); + M48t59State *NVRAM = &sys->state; + + m48t59_reset_common(NVRAM); +} + +const MemoryRegionOps m48t59_io_ops = { + .read = NVRAM_readb, + .write = NVRAM_writeb, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +void m48t59_realize_common(M48t59State *s, Error **errp) +{ + s->buffer = g_malloc0(s->size); + if (s->model == 59) { + s->alrm_timer = timer_new_ns(rtc_clock, &alarm_cb, s); + s->wd_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &watchdog_cb, s); + } + qemu_get_timedate(&s->alarm, 0); +} + +static void m48t59_init1(Object *obj) +{ + M48txxSysBusDeviceClass *u = M48TXX_SYS_BUS_GET_CLASS(obj); + M48txxSysBusState *d = M48TXX_SYS_BUS(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + M48t59State *s = &d->state; + + s->model = u->info.model; + s->size = u->info.size; + sysbus_init_irq(dev, &s->IRQ); + + memory_region_init_io(&s->iomem, obj, &nvram_ops, s, "m48t59.nvram", + s->size); + memory_region_init_io(&d->io, obj, &m48t59_io_ops, s, "m48t59", 4); +} + +static void m48t59_realize(DeviceState *dev, Error **errp) +{ + M48txxSysBusState *d = M48TXX_SYS_BUS(dev); + M48t59State *s = &d->state; + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_mmio(sbd, &d->io); + m48t59_realize_common(s, errp); +} + +static uint32_t m48txx_sysbus_read(Nvram *obj, uint32_t addr) +{ + M48txxSysBusState *d = M48TXX_SYS_BUS(obj); + return m48t59_read(&d->state, addr); +} + +static void m48txx_sysbus_write(Nvram *obj, uint32_t addr, uint32_t val) +{ + M48txxSysBusState *d = M48TXX_SYS_BUS(obj); + m48t59_write(&d->state, addr, val); +} + +static void m48txx_sysbus_toggle_lock(Nvram *obj, int lock) +{ + M48txxSysBusState *d = M48TXX_SYS_BUS(obj); + m48t59_toggle_lock(&d->state, lock); +} + +static Property m48t59_sysbus_properties[] = { + DEFINE_PROP_INT32("base-year", M48txxSysBusState, state.base_year, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void m48txx_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + NvramClass *nc = NVRAM_CLASS(klass); + + dc->realize = m48t59_realize; + dc->reset = m48t59_reset_sysbus; + device_class_set_props(dc, m48t59_sysbus_properties); + dc->vmsd = &vmstate_m48t59; + nc->read = m48txx_sysbus_read; + nc->write = m48txx_sysbus_write; + nc->toggle_lock = m48txx_sysbus_toggle_lock; +} + +static void m48txx_sysbus_concrete_class_init(ObjectClass *klass, void *data) +{ + M48txxSysBusDeviceClass *u = M48TXX_SYS_BUS_CLASS(klass); + M48txxInfo *info = data; + + u->info = *info; +} + +static const TypeInfo nvram_info = { + .name = TYPE_NVRAM, + .parent = TYPE_INTERFACE, + .class_size = sizeof(NvramClass), +}; + +static const TypeInfo m48txx_sysbus_type_info = { + .name = TYPE_M48TXX_SYS_BUS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(M48txxSysBusState), + .instance_init = m48t59_init1, + .abstract = true, + .class_init = m48txx_sysbus_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NVRAM }, + { } + } +}; + +static void m48t59_register_types(void) +{ + TypeInfo sysbus_type_info = { + .parent = TYPE_M48TXX_SYS_BUS, + .class_size = sizeof(M48txxSysBusDeviceClass), + .class_init = m48txx_sysbus_concrete_class_init, + }; + int i; + + type_register_static(&nvram_info); + type_register_static(&m48txx_sysbus_type_info); + + for (i = 0; i < ARRAY_SIZE(m48txx_sysbus_info); i++) { + sysbus_type_info.name = m48txx_sysbus_info[i].bus_name; + sysbus_type_info.class_data = &m48txx_sysbus_info[i]; + type_register(&sysbus_type_info); + } +} + +type_init(m48t59_register_types) diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c new file mode 100644 index 000000000..4fbafddb2 --- /dev/null +++ b/hw/rtc/mc146818rtc.c @@ -0,0 +1,1059 @@ +/* + * QEMU MC146818 RTC emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/cutils.h" +#include "qemu/module.h" +#include "qemu/bcd.h" +#include "hw/acpi/aml-build.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "sysemu/replay.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/rtc/mc146818rtc_regs.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/qapi-events-misc-target.h" +#include "qapi/visitor.h" +#include "hw/rtc/mc146818rtc_regs.h" + +#ifdef TARGET_I386 +#include "qapi/qapi-commands-misc-target.h" +#include "hw/i386/apic.h" +#endif + +//#define DEBUG_CMOS +//#define DEBUG_COALESCED + +#ifdef DEBUG_CMOS +# define CMOS_DPRINTF(format, ...) printf(format, ## __VA_ARGS__) +#else +# define CMOS_DPRINTF(format, ...) do { } while (0) +#endif + +#ifdef DEBUG_COALESCED +# define DPRINTF_C(format, ...) printf(format, ## __VA_ARGS__) +#else +# define DPRINTF_C(format, ...) do { } while (0) +#endif + +#define SEC_PER_MIN 60 +#define MIN_PER_HOUR 60 +#define SEC_PER_HOUR 3600 +#define HOUR_PER_DAY 24 +#define SEC_PER_DAY 86400 + +#define RTC_REINJECT_ON_ACK_COUNT 20 +#define RTC_CLOCK_RATE 32768 +#define UIP_HOLD_LENGTH (8 * NANOSECONDS_PER_SECOND / 32768) + +static void rtc_set_time(RTCState *s); +static void rtc_update_time(RTCState *s); +static void rtc_set_cmos(RTCState *s, const struct tm *tm); +static inline int rtc_from_bcd(RTCState *s, int a); +static uint64_t get_next_alarm(RTCState *s); + +static inline bool rtc_running(RTCState *s) +{ + return (!(s->cmos_data[RTC_REG_B] & REG_B_SET) && + (s->cmos_data[RTC_REG_A] & 0x70) <= 0x20); +} + +static uint64_t get_guest_rtc_ns(RTCState *s) +{ + uint64_t guest_clock = qemu_clock_get_ns(rtc_clock); + + return s->base_rtc * NANOSECONDS_PER_SECOND + + guest_clock - s->last_update + s->offset; +} + +static void rtc_coalesced_timer_update(RTCState *s) +{ + if (s->irq_coalesced == 0) { + timer_del(s->coalesced_timer); + } else { + /* divide each RTC interval to 2 - 8 smaller intervals */ + int c = MIN(s->irq_coalesced, 7) + 1; + int64_t next_clock = qemu_clock_get_ns(rtc_clock) + + periodic_clock_to_ns(s->period / c); + timer_mod(s->coalesced_timer, next_clock); + } +} + +static QLIST_HEAD(, RTCState) rtc_devices = + QLIST_HEAD_INITIALIZER(rtc_devices); + +#ifdef TARGET_I386 +void qmp_rtc_reset_reinjection(Error **errp) +{ + RTCState *s; + + QLIST_FOREACH(s, &rtc_devices, link) { + s->irq_coalesced = 0; + } +} + +static bool rtc_policy_slew_deliver_irq(RTCState *s) +{ + apic_reset_irq_delivered(); + qemu_irq_raise(s->irq); + return apic_get_irq_delivered(); +} + +static void rtc_coalesced_timer(void *opaque) +{ + RTCState *s = opaque; + + if (s->irq_coalesced != 0) { + s->cmos_data[RTC_REG_C] |= 0xc0; + DPRINTF_C("cmos: injecting from timer\n"); + if (rtc_policy_slew_deliver_irq(s)) { + s->irq_coalesced--; + DPRINTF_C("cmos: coalesced irqs decreased to %d\n", + s->irq_coalesced); + } + } + + rtc_coalesced_timer_update(s); +} +#else +static bool rtc_policy_slew_deliver_irq(RTCState *s) +{ + assert(0); + return false; +} +#endif + +static uint32_t rtc_periodic_clock_ticks(RTCState *s) +{ + int period_code; + + if (!(s->cmos_data[RTC_REG_B] & REG_B_PIE)) { + return 0; + } + + period_code = s->cmos_data[RTC_REG_A] & 0x0f; + + return periodic_period_to_clock(period_code); +} + +/* + * handle periodic timer. @old_period indicates the periodic timer update + * is just due to period adjustment. + */ +static void +periodic_timer_update(RTCState *s, int64_t current_time, uint32_t old_period, bool period_change) +{ + uint32_t period; + int64_t cur_clock, next_irq_clock, lost_clock = 0; + + period = rtc_periodic_clock_ticks(s); + s->period = period; + + if (!period) { + s->irq_coalesced = 0; + timer_del(s->periodic_timer); + return; + } + + /* compute 32 khz clock */ + cur_clock = + muldiv64(current_time, RTC_CLOCK_RATE, NANOSECONDS_PER_SECOND); + + /* + * if the periodic timer's update is due to period re-configuration, + * we should count the clock since last interrupt. + */ + if (old_period && period_change) { + int64_t last_periodic_clock, next_periodic_clock; + + next_periodic_clock = muldiv64(s->next_periodic_time, + RTC_CLOCK_RATE, NANOSECONDS_PER_SECOND); + last_periodic_clock = next_periodic_clock - old_period; + lost_clock = cur_clock - last_periodic_clock; + assert(lost_clock >= 0); + } + + /* + * s->irq_coalesced can change for two reasons: + * + * a) if one or more periodic timer interrupts have been lost, + * lost_clock will be more that a period. + * + * b) when the period may be reconfigured, we expect the OS to + * treat delayed tick as the new period. So, when switching + * from a shorter to a longer period, scale down the missing, + * because the OS will treat past delayed ticks as longer + * (leftovers are put back into lost_clock). When switching + * to a shorter period, scale up the missing ticks since the + * OS handler will treat past delayed ticks as shorter. + */ + if (s->lost_tick_policy == LOST_TICK_POLICY_SLEW) { + uint32_t old_irq_coalesced = s->irq_coalesced; + + lost_clock += old_irq_coalesced * old_period; + s->irq_coalesced = lost_clock / s->period; + lost_clock %= s->period; + if (old_irq_coalesced != s->irq_coalesced || + old_period != s->period) { + DPRINTF_C("cmos: coalesced irqs scaled from %d to %d, " + "period scaled from %d to %d\n", old_irq_coalesced, + s->irq_coalesced, old_period, s->period); + rtc_coalesced_timer_update(s); + } + } else { + /* + * no way to compensate the interrupt if LOST_TICK_POLICY_SLEW + * is not used, we should make the time progress anyway. + */ + lost_clock = MIN(lost_clock, period); + } + + assert(lost_clock >= 0 && lost_clock <= period); + + next_irq_clock = cur_clock + period - lost_clock; + s->next_periodic_time = periodic_clock_to_ns(next_irq_clock) + 1; + timer_mod(s->periodic_timer, s->next_periodic_time); +} + +static void rtc_periodic_timer(void *opaque) +{ + RTCState *s = opaque; + + periodic_timer_update(s, s->next_periodic_time, s->period, false); + s->cmos_data[RTC_REG_C] |= REG_C_PF; + if (s->cmos_data[RTC_REG_B] & REG_B_PIE) { + s->cmos_data[RTC_REG_C] |= REG_C_IRQF; + if (s->lost_tick_policy == LOST_TICK_POLICY_SLEW) { + if (s->irq_reinject_on_ack_count >= RTC_REINJECT_ON_ACK_COUNT) + s->irq_reinject_on_ack_count = 0; + if (!rtc_policy_slew_deliver_irq(s)) { + s->irq_coalesced++; + rtc_coalesced_timer_update(s); + DPRINTF_C("cmos: coalesced irqs increased to %d\n", + s->irq_coalesced); + } + } else + qemu_irq_raise(s->irq); + } +} + +/* handle update-ended timer */ +static void check_update_timer(RTCState *s) +{ + uint64_t next_update_time; + uint64_t guest_nsec; + int next_alarm_sec; + + /* From the data sheet: "Holding the dividers in reset prevents + * interrupts from operating, while setting the SET bit allows" + * them to occur. + */ + if ((s->cmos_data[RTC_REG_A] & 0x60) == 0x60) { + assert((s->cmos_data[RTC_REG_A] & REG_A_UIP) == 0); + timer_del(s->update_timer); + return; + } + + guest_nsec = get_guest_rtc_ns(s) % NANOSECONDS_PER_SECOND; + next_update_time = qemu_clock_get_ns(rtc_clock) + + NANOSECONDS_PER_SECOND - guest_nsec; + + /* Compute time of next alarm. One second is already accounted + * for in next_update_time. + */ + next_alarm_sec = get_next_alarm(s); + s->next_alarm_time = next_update_time + + (next_alarm_sec - 1) * NANOSECONDS_PER_SECOND; + + /* If update_in_progress latched the UIP bit, we must keep the timer + * programmed to the next second, so that UIP is cleared. Otherwise, + * if UF is already set, we might be able to optimize. + */ + if (!(s->cmos_data[RTC_REG_A] & REG_A_UIP) && + (s->cmos_data[RTC_REG_C] & REG_C_UF)) { + /* If AF cannot change (i.e. either it is set already, or + * SET=1 and then the time is not updated), nothing to do. + */ + if ((s->cmos_data[RTC_REG_B] & REG_B_SET) || + (s->cmos_data[RTC_REG_C] & REG_C_AF)) { + timer_del(s->update_timer); + return; + } + + /* UF is set, but AF is clear. Program the timer to target + * the alarm time. */ + next_update_time = s->next_alarm_time; + } + if (next_update_time != timer_expire_time_ns(s->update_timer)) { + timer_mod(s->update_timer, next_update_time); + } +} + +static inline uint8_t convert_hour(RTCState *s, uint8_t hour) +{ + if (!(s->cmos_data[RTC_REG_B] & REG_B_24H)) { + hour %= 12; + if (s->cmos_data[RTC_HOURS] & 0x80) { + hour += 12; + } + } + return hour; +} + +static uint64_t get_next_alarm(RTCState *s) +{ + int32_t alarm_sec, alarm_min, alarm_hour, cur_hour, cur_min, cur_sec; + int32_t hour, min, sec; + + rtc_update_time(s); + + alarm_sec = rtc_from_bcd(s, s->cmos_data[RTC_SECONDS_ALARM]); + alarm_min = rtc_from_bcd(s, s->cmos_data[RTC_MINUTES_ALARM]); + alarm_hour = rtc_from_bcd(s, s->cmos_data[RTC_HOURS_ALARM]); + alarm_hour = alarm_hour == -1 ? -1 : convert_hour(s, alarm_hour); + + cur_sec = rtc_from_bcd(s, s->cmos_data[RTC_SECONDS]); + cur_min = rtc_from_bcd(s, s->cmos_data[RTC_MINUTES]); + cur_hour = rtc_from_bcd(s, s->cmos_data[RTC_HOURS]); + cur_hour = convert_hour(s, cur_hour); + + if (alarm_hour == -1) { + alarm_hour = cur_hour; + if (alarm_min == -1) { + alarm_min = cur_min; + if (alarm_sec == -1) { + alarm_sec = cur_sec + 1; + } else if (cur_sec > alarm_sec) { + alarm_min++; + } + } else if (cur_min == alarm_min) { + if (alarm_sec == -1) { + alarm_sec = cur_sec + 1; + } else { + if (cur_sec > alarm_sec) { + alarm_hour++; + } + } + if (alarm_sec == SEC_PER_MIN) { + /* wrap to next hour, minutes is not in don't care mode */ + alarm_sec = 0; + alarm_hour++; + } + } else if (cur_min > alarm_min) { + alarm_hour++; + } + } else if (cur_hour == alarm_hour) { + if (alarm_min == -1) { + alarm_min = cur_min; + if (alarm_sec == -1) { + alarm_sec = cur_sec + 1; + } else if (cur_sec > alarm_sec) { + alarm_min++; + } + + if (alarm_sec == SEC_PER_MIN) { + alarm_sec = 0; + alarm_min++; + } + /* wrap to next day, hour is not in don't care mode */ + alarm_min %= MIN_PER_HOUR; + } else if (cur_min == alarm_min) { + if (alarm_sec == -1) { + alarm_sec = cur_sec + 1; + } + /* wrap to next day, hours+minutes not in don't care mode */ + alarm_sec %= SEC_PER_MIN; + } + } + + /* values that are still don't care fire at the next min/sec */ + if (alarm_min == -1) { + alarm_min = 0; + } + if (alarm_sec == -1) { + alarm_sec = 0; + } + + /* keep values in range */ + if (alarm_sec == SEC_PER_MIN) { + alarm_sec = 0; + alarm_min++; + } + if (alarm_min == MIN_PER_HOUR) { + alarm_min = 0; + alarm_hour++; + } + alarm_hour %= HOUR_PER_DAY; + + hour = alarm_hour - cur_hour; + min = hour * MIN_PER_HOUR + alarm_min - cur_min; + sec = min * SEC_PER_MIN + alarm_sec - cur_sec; + return sec <= 0 ? sec + SEC_PER_DAY : sec; +} + +static void rtc_update_timer(void *opaque) +{ + RTCState *s = opaque; + int32_t irqs = REG_C_UF; + int32_t new_irqs; + + assert((s->cmos_data[RTC_REG_A] & 0x60) != 0x60); + + /* UIP might have been latched, update time and clear it. */ + rtc_update_time(s); + s->cmos_data[RTC_REG_A] &= ~REG_A_UIP; + + if (qemu_clock_get_ns(rtc_clock) >= s->next_alarm_time) { + irqs |= REG_C_AF; + if (s->cmos_data[RTC_REG_B] & REG_B_AIE) { + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_RTC, NULL); + } + } + + new_irqs = irqs & ~s->cmos_data[RTC_REG_C]; + s->cmos_data[RTC_REG_C] |= irqs; + if ((new_irqs & s->cmos_data[RTC_REG_B]) != 0) { + s->cmos_data[RTC_REG_C] |= REG_C_IRQF; + qemu_irq_raise(s->irq); + } + check_update_timer(s); +} + +static void cmos_ioport_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + RTCState *s = opaque; + uint32_t old_period; + bool update_periodic_timer; + + if ((addr & 1) == 0) { + s->cmos_index = data & 0x7f; + } else { + CMOS_DPRINTF("cmos: write index=0x%02x val=0x%02" PRIx64 "\n", + s->cmos_index, data); + switch(s->cmos_index) { + case RTC_SECONDS_ALARM: + case RTC_MINUTES_ALARM: + case RTC_HOURS_ALARM: + s->cmos_data[s->cmos_index] = data; + check_update_timer(s); + break; + case RTC_IBM_PS2_CENTURY_BYTE: + s->cmos_index = RTC_CENTURY; + /* fall through */ + case RTC_CENTURY: + case RTC_SECONDS: + case RTC_MINUTES: + case RTC_HOURS: + case RTC_DAY_OF_WEEK: + case RTC_DAY_OF_MONTH: + case RTC_MONTH: + case RTC_YEAR: + s->cmos_data[s->cmos_index] = data; + /* if in set mode, do not update the time */ + if (rtc_running(s)) { + rtc_set_time(s); + check_update_timer(s); + } + break; + case RTC_REG_A: + update_periodic_timer = (s->cmos_data[RTC_REG_A] ^ data) & 0x0f; + old_period = rtc_periodic_clock_ticks(s); + + if ((data & 0x60) == 0x60) { + if (rtc_running(s)) { + rtc_update_time(s); + } + /* What happens to UIP when divider reset is enabled is + * unclear from the datasheet. Shouldn't matter much + * though. + */ + s->cmos_data[RTC_REG_A] &= ~REG_A_UIP; + } else if (((s->cmos_data[RTC_REG_A] & 0x60) == 0x60) && + (data & 0x70) <= 0x20) { + /* when the divider reset is removed, the first update cycle + * begins one-half second later*/ + if (!(s->cmos_data[RTC_REG_B] & REG_B_SET)) { + s->offset = 500000000; + rtc_set_time(s); + } + s->cmos_data[RTC_REG_A] &= ~REG_A_UIP; + } + /* UIP bit is read only */ + s->cmos_data[RTC_REG_A] = (data & ~REG_A_UIP) | + (s->cmos_data[RTC_REG_A] & REG_A_UIP); + + if (update_periodic_timer) { + periodic_timer_update(s, qemu_clock_get_ns(rtc_clock), + old_period, true); + } + + check_update_timer(s); + break; + case RTC_REG_B: + update_periodic_timer = (s->cmos_data[RTC_REG_B] ^ data) + & REG_B_PIE; + old_period = rtc_periodic_clock_ticks(s); + + if (data & REG_B_SET) { + /* update cmos to when the rtc was stopping */ + if (rtc_running(s)) { + rtc_update_time(s); + } + /* set mode: reset UIP mode */ + s->cmos_data[RTC_REG_A] &= ~REG_A_UIP; + data &= ~REG_B_UIE; + } else { + /* if disabling set mode, update the time */ + if ((s->cmos_data[RTC_REG_B] & REG_B_SET) && + (s->cmos_data[RTC_REG_A] & 0x70) <= 0x20) { + s->offset = get_guest_rtc_ns(s) % NANOSECONDS_PER_SECOND; + rtc_set_time(s); + } + } + /* if an interrupt flag is already set when the interrupt + * becomes enabled, raise an interrupt immediately. */ + if (data & s->cmos_data[RTC_REG_C] & REG_C_MASK) { + s->cmos_data[RTC_REG_C] |= REG_C_IRQF; + qemu_irq_raise(s->irq); + } else { + s->cmos_data[RTC_REG_C] &= ~REG_C_IRQF; + qemu_irq_lower(s->irq); + } + s->cmos_data[RTC_REG_B] = data; + + if (update_periodic_timer) { + periodic_timer_update(s, qemu_clock_get_ns(rtc_clock), + old_period, true); + } + + check_update_timer(s); + break; + case RTC_REG_C: + case RTC_REG_D: + /* cannot write to them */ + break; + default: + s->cmos_data[s->cmos_index] = data; + break; + } + } +} + +static inline int rtc_to_bcd(RTCState *s, int a) +{ + if (s->cmos_data[RTC_REG_B] & REG_B_DM) { + return a; + } else { + return ((a / 10) << 4) | (a % 10); + } +} + +static inline int rtc_from_bcd(RTCState *s, int a) +{ + if ((a & 0xc0) == 0xc0) { + return -1; + } + if (s->cmos_data[RTC_REG_B] & REG_B_DM) { + return a; + } else { + return ((a >> 4) * 10) + (a & 0x0f); + } +} + +static void rtc_get_time(RTCState *s, struct tm *tm) +{ + tm->tm_sec = rtc_from_bcd(s, s->cmos_data[RTC_SECONDS]); + tm->tm_min = rtc_from_bcd(s, s->cmos_data[RTC_MINUTES]); + tm->tm_hour = rtc_from_bcd(s, s->cmos_data[RTC_HOURS] & 0x7f); + if (!(s->cmos_data[RTC_REG_B] & REG_B_24H)) { + tm->tm_hour %= 12; + if (s->cmos_data[RTC_HOURS] & 0x80) { + tm->tm_hour += 12; + } + } + tm->tm_wday = rtc_from_bcd(s, s->cmos_data[RTC_DAY_OF_WEEK]) - 1; + tm->tm_mday = rtc_from_bcd(s, s->cmos_data[RTC_DAY_OF_MONTH]); + tm->tm_mon = rtc_from_bcd(s, s->cmos_data[RTC_MONTH]) - 1; + tm->tm_year = + rtc_from_bcd(s, s->cmos_data[RTC_YEAR]) + s->base_year + + rtc_from_bcd(s, s->cmos_data[RTC_CENTURY]) * 100 - 1900; +} + +static void rtc_set_time(RTCState *s) +{ + struct tm tm; + + rtc_get_time(s, &tm); + s->base_rtc = mktimegm(&tm); + s->last_update = qemu_clock_get_ns(rtc_clock); + + qapi_event_send_rtc_change(qemu_timedate_diff(&tm)); +} + +static void rtc_set_cmos(RTCState *s, const struct tm *tm) +{ + int year; + + s->cmos_data[RTC_SECONDS] = rtc_to_bcd(s, tm->tm_sec); + s->cmos_data[RTC_MINUTES] = rtc_to_bcd(s, tm->tm_min); + if (s->cmos_data[RTC_REG_B] & REG_B_24H) { + /* 24 hour format */ + s->cmos_data[RTC_HOURS] = rtc_to_bcd(s, tm->tm_hour); + } else { + /* 12 hour format */ + int h = (tm->tm_hour % 12) ? tm->tm_hour % 12 : 12; + s->cmos_data[RTC_HOURS] = rtc_to_bcd(s, h); + if (tm->tm_hour >= 12) + s->cmos_data[RTC_HOURS] |= 0x80; + } + s->cmos_data[RTC_DAY_OF_WEEK] = rtc_to_bcd(s, tm->tm_wday + 1); + s->cmos_data[RTC_DAY_OF_MONTH] = rtc_to_bcd(s, tm->tm_mday); + s->cmos_data[RTC_MONTH] = rtc_to_bcd(s, tm->tm_mon + 1); + year = tm->tm_year + 1900 - s->base_year; + s->cmos_data[RTC_YEAR] = rtc_to_bcd(s, year % 100); + s->cmos_data[RTC_CENTURY] = rtc_to_bcd(s, year / 100); +} + +static void rtc_update_time(RTCState *s) +{ + struct tm ret; + time_t guest_sec; + int64_t guest_nsec; + + guest_nsec = get_guest_rtc_ns(s); + guest_sec = guest_nsec / NANOSECONDS_PER_SECOND; + gmtime_r(&guest_sec, &ret); + + /* Is SET flag of Register B disabled? */ + if ((s->cmos_data[RTC_REG_B] & REG_B_SET) == 0) { + rtc_set_cmos(s, &ret); + } +} + +static int update_in_progress(RTCState *s) +{ + int64_t guest_nsec; + + if (!rtc_running(s)) { + return 0; + } + if (timer_pending(s->update_timer)) { + int64_t next_update_time = timer_expire_time_ns(s->update_timer); + /* Latch UIP until the timer expires. */ + if (qemu_clock_get_ns(rtc_clock) >= + (next_update_time - UIP_HOLD_LENGTH)) { + s->cmos_data[RTC_REG_A] |= REG_A_UIP; + return 1; + } + } + + guest_nsec = get_guest_rtc_ns(s); + /* UIP bit will be set at last 244us of every second. */ + if ((guest_nsec % NANOSECONDS_PER_SECOND) >= + (NANOSECONDS_PER_SECOND - UIP_HOLD_LENGTH)) { + return 1; + } + return 0; +} + +static uint64_t cmos_ioport_read(void *opaque, hwaddr addr, + unsigned size) +{ + RTCState *s = opaque; + int ret; + if ((addr & 1) == 0) { + return 0xff; + } else { + switch(s->cmos_index) { + case RTC_IBM_PS2_CENTURY_BYTE: + s->cmos_index = RTC_CENTURY; + /* fall through */ + case RTC_CENTURY: + case RTC_SECONDS: + case RTC_MINUTES: + case RTC_HOURS: + case RTC_DAY_OF_WEEK: + case RTC_DAY_OF_MONTH: + case RTC_MONTH: + case RTC_YEAR: + /* if not in set mode, calibrate cmos before + * reading*/ + if (rtc_running(s)) { + rtc_update_time(s); + } + ret = s->cmos_data[s->cmos_index]; + break; + case RTC_REG_A: + ret = s->cmos_data[s->cmos_index]; + if (update_in_progress(s)) { + ret |= REG_A_UIP; + } + break; + case RTC_REG_C: + ret = s->cmos_data[s->cmos_index]; + qemu_irq_lower(s->irq); + s->cmos_data[RTC_REG_C] = 0x00; + if (ret & (REG_C_UF | REG_C_AF)) { + check_update_timer(s); + } + + if(s->irq_coalesced && + (s->cmos_data[RTC_REG_B] & REG_B_PIE) && + s->irq_reinject_on_ack_count < RTC_REINJECT_ON_ACK_COUNT) { + s->irq_reinject_on_ack_count++; + s->cmos_data[RTC_REG_C] |= REG_C_IRQF | REG_C_PF; + DPRINTF_C("cmos: injecting on ack\n"); + if (rtc_policy_slew_deliver_irq(s)) { + s->irq_coalesced--; + DPRINTF_C("cmos: coalesced irqs decreased to %d\n", + s->irq_coalesced); + } + } + break; + default: + ret = s->cmos_data[s->cmos_index]; + break; + } + CMOS_DPRINTF("cmos: read index=0x%02x val=0x%02x\n", + s->cmos_index, ret); + return ret; + } +} + +void rtc_set_memory(ISADevice *dev, int addr, int val) +{ + RTCState *s = MC146818_RTC(dev); + if (addr >= 0 && addr <= 127) + s->cmos_data[addr] = val; +} + +int rtc_get_memory(ISADevice *dev, int addr) +{ + RTCState *s = MC146818_RTC(dev); + assert(addr >= 0 && addr <= 127); + return s->cmos_data[addr]; +} + +static void rtc_set_date_from_host(ISADevice *dev) +{ + RTCState *s = MC146818_RTC(dev); + struct tm tm; + + qemu_get_timedate(&tm, 0); + + s->base_rtc = mktimegm(&tm); + s->last_update = qemu_clock_get_ns(rtc_clock); + s->offset = 0; + + /* set the CMOS date */ + rtc_set_cmos(s, &tm); +} + +static int rtc_pre_save(void *opaque) +{ + RTCState *s = opaque; + + rtc_update_time(s); + + return 0; +} + +static int rtc_post_load(void *opaque, int version_id) +{ + RTCState *s = opaque; + + if (version_id <= 2 || rtc_clock == QEMU_CLOCK_REALTIME) { + rtc_set_time(s); + s->offset = 0; + check_update_timer(s); + } + s->period = rtc_periodic_clock_ticks(s); + + /* The periodic timer is deterministic in record/replay mode, + * so there is no need to update it after loading the vmstate. + * Reading RTC here would misalign record and replay. + */ + if (replay_mode == REPLAY_MODE_NONE) { + uint64_t now = qemu_clock_get_ns(rtc_clock); + if (now < s->next_periodic_time || + now > (s->next_periodic_time + get_max_clock_jump())) { + periodic_timer_update(s, qemu_clock_get_ns(rtc_clock), s->period, false); + } + } + + if (version_id >= 2) { + if (s->lost_tick_policy == LOST_TICK_POLICY_SLEW) { + rtc_coalesced_timer_update(s); + } + } + return 0; +} + +static bool rtc_irq_reinject_on_ack_count_needed(void *opaque) +{ + RTCState *s = (RTCState *)opaque; + return s->irq_reinject_on_ack_count != 0; +} + +static const VMStateDescription vmstate_rtc_irq_reinject_on_ack_count = { + .name = "mc146818rtc/irq_reinject_on_ack_count", + .version_id = 1, + .minimum_version_id = 1, + .needed = rtc_irq_reinject_on_ack_count_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT16(irq_reinject_on_ack_count, RTCState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_rtc = { + .name = "mc146818rtc", + .version_id = 3, + .minimum_version_id = 1, + .pre_save = rtc_pre_save, + .post_load = rtc_post_load, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(cmos_data, RTCState), + VMSTATE_UINT8(cmos_index, RTCState), + VMSTATE_UNUSED(7*4), + VMSTATE_TIMER_PTR(periodic_timer, RTCState), + VMSTATE_INT64(next_periodic_time, RTCState), + VMSTATE_UNUSED(3*8), + VMSTATE_UINT32_V(irq_coalesced, RTCState, 2), + VMSTATE_UINT32_V(period, RTCState, 2), + VMSTATE_UINT64_V(base_rtc, RTCState, 3), + VMSTATE_UINT64_V(last_update, RTCState, 3), + VMSTATE_INT64_V(offset, RTCState, 3), + VMSTATE_TIMER_PTR_V(update_timer, RTCState, 3), + VMSTATE_UINT64_V(next_alarm_time, RTCState, 3), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_rtc_irq_reinject_on_ack_count, + NULL + } +}; + +/* set CMOS shutdown status register (index 0xF) as S3_resume(0xFE) + BIOS will read it and start S3 resume at POST Entry */ +static void rtc_notify_suspend(Notifier *notifier, void *data) +{ + RTCState *s = container_of(notifier, RTCState, suspend_notifier); + rtc_set_memory(ISA_DEVICE(s), 0xF, 0xFE); +} + +static const MemoryRegionOps cmos_ops = { + .read = cmos_ioport_read, + .write = cmos_ioport_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void rtc_get_date(Object *obj, struct tm *current_tm, Error **errp) +{ + RTCState *s = MC146818_RTC(obj); + + rtc_update_time(s); + rtc_get_time(s, current_tm); +} + +static void rtc_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + RTCState *s = MC146818_RTC(dev); + + s->cmos_data[RTC_REG_A] = 0x26; + s->cmos_data[RTC_REG_B] = 0x02; + s->cmos_data[RTC_REG_C] = 0x00; + s->cmos_data[RTC_REG_D] = 0x80; + + /* This is for historical reasons. The default base year qdev property + * was set to 2000 for most machine types before the century byte was + * implemented. + * + * This if statement means that the century byte will be always 0 + * (at least until 2079...) for base_year = 1980, but will be set + * correctly for base_year = 2000. + */ + if (s->base_year == 2000) { + s->base_year = 0; + } + + rtc_set_date_from_host(isadev); + + switch (s->lost_tick_policy) { +#ifdef TARGET_I386 + case LOST_TICK_POLICY_SLEW: + s->coalesced_timer = + timer_new_ns(rtc_clock, rtc_coalesced_timer, s); + break; +#endif + case LOST_TICK_POLICY_DISCARD: + break; + default: + error_setg(errp, "Invalid lost tick policy."); + return; + } + + s->periodic_timer = timer_new_ns(rtc_clock, rtc_periodic_timer, s); + s->update_timer = timer_new_ns(rtc_clock, rtc_update_timer, s); + check_update_timer(s); + + s->suspend_notifier.notify = rtc_notify_suspend; + qemu_register_suspend_notifier(&s->suspend_notifier); + + memory_region_init_io(&s->io, OBJECT(s), &cmos_ops, s, "rtc", 2); + isa_register_ioport(isadev, &s->io, RTC_ISA_BASE); + + /* register rtc 0x70 port for coalesced_pio */ + memory_region_set_flush_coalesced(&s->io); + memory_region_init_io(&s->coalesced_io, OBJECT(s), &cmos_ops, + s, "rtc-index", 1); + memory_region_add_subregion(&s->io, 0, &s->coalesced_io); + memory_region_add_coalescing(&s->coalesced_io, 0, 1); + + qdev_set_legacy_instance_id(dev, RTC_ISA_BASE, 3); + + object_property_add_tm(OBJECT(s), "date", rtc_get_date); + + qdev_init_gpio_out(dev, &s->irq, 1); + QLIST_INSERT_HEAD(&rtc_devices, s, link); +} + +ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq) +{ + DeviceState *dev; + ISADevice *isadev; + + isadev = isa_new(TYPE_MC146818_RTC); + dev = DEVICE(isadev); + qdev_prop_set_int32(dev, "base_year", base_year); + isa_realize_and_unref(isadev, bus, &error_fatal); + if (intercept_irq) { + qdev_connect_gpio_out(dev, 0, intercept_irq); + } else { + isa_connect_gpio_out(isadev, 0, RTC_ISA_IRQ); + } + + object_property_add_alias(qdev_get_machine(), "rtc-time", OBJECT(isadev), + "date"); + + return isadev; +} + +static Property mc146818rtc_properties[] = { + DEFINE_PROP_INT32("base_year", RTCState, base_year, 1980), + DEFINE_PROP_LOSTTICKPOLICY("lost_tick_policy", RTCState, + lost_tick_policy, LOST_TICK_POLICY_DISCARD), + DEFINE_PROP_END_OF_LIST(), +}; + +static void rtc_reset_enter(Object *obj, ResetType type) +{ + RTCState *s = MC146818_RTC(obj); + + /* Reason: VM do suspend self will set 0xfe + * Reset any values other than 0xfe(Guest suspend case) */ + if (s->cmos_data[0x0f] != 0xfe) { + s->cmos_data[0x0f] = 0x00; + } + + s->cmos_data[RTC_REG_B] &= ~(REG_B_PIE | REG_B_AIE | REG_B_SQWE); + s->cmos_data[RTC_REG_C] &= ~(REG_C_UF | REG_C_IRQF | REG_C_PF | REG_C_AF); + check_update_timer(s); + + + if (s->lost_tick_policy == LOST_TICK_POLICY_SLEW) { + s->irq_coalesced = 0; + s->irq_reinject_on_ack_count = 0; + } +} + +static void rtc_reset_hold(Object *obj) +{ + RTCState *s = MC146818_RTC(obj); + + qemu_irq_lower(s->irq); +} + +static void rtc_build_aml(ISADevice *isadev, Aml *scope) +{ + Aml *dev; + Aml *crs; + + /* + * Reserving 8 io ports here, following what physical hardware + * does, even though qemu only responds to the first two ports. + */ + crs = aml_resource_template(); + aml_append(crs, aml_io(AML_DECODE16, RTC_ISA_BASE, RTC_ISA_BASE, + 0x01, 0x08)); + aml_append(crs, aml_irq_no_flags(RTC_ISA_IRQ)); + + dev = aml_device("RTC"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0B00"))); + aml_append(dev, aml_name_decl("_CRS", crs)); + + aml_append(scope, dev); +} + +static void rtc_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + ISADeviceClass *isa = ISA_DEVICE_CLASS(klass); + + dc->realize = rtc_realizefn; + dc->vmsd = &vmstate_rtc; + rc->phases.enter = rtc_reset_enter; + rc->phases.hold = rtc_reset_hold; + isa->build_aml = rtc_build_aml; + device_class_set_props(dc, mc146818rtc_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo mc146818rtc_info = { + .name = TYPE_MC146818_RTC, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(RTCState), + .class_init = rtc_class_initfn, +}; + +static void mc146818rtc_register_types(void) +{ + type_register_static(&mc146818rtc_info); +} + +type_init(mc146818rtc_register_types) diff --git a/hw/rtc/meson.build b/hw/rtc/meson.build new file mode 100644 index 000000000..8fd8d8f9a --- /dev/null +++ b/hw/rtc/meson.build @@ -0,0 +1,16 @@ + +softmmu_ss.add(when: 'CONFIG_DS1338', if_true: files('ds1338.c')) +softmmu_ss.add(when: 'CONFIG_M41T80', if_true: files('m41t80.c')) +softmmu_ss.add(when: 'CONFIG_M48T59', if_true: files('m48t59.c')) +specific_ss.add(when: 'CONFIG_PL031', if_true: files('pl031.c')) +softmmu_ss.add(when: 'CONFIG_TWL92230', if_true: files('twl92230.c')) +softmmu_ss.add(when: ['CONFIG_ISA_BUS', 'CONFIG_M48T59'], if_true: files('m48t59-isa.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_ZYNQMP', if_true: files('xlnx-zynqmp-rtc.c')) + +softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_rtc.c')) +softmmu_ss.add(when: 'CONFIG_SUN4V_RTC', if_true: files('sun4v-rtc.c')) +softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_rtc.c')) +softmmu_ss.add(when: 'CONFIG_GOLDFISH_RTC', if_true: files('goldfish_rtc.c')) +softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-rtc.c')) + +specific_ss.add(when: 'CONFIG_MC146818RTC', if_true: files('mc146818rtc.c')) diff --git a/hw/rtc/pl031.c b/hw/rtc/pl031.c new file mode 100644 index 000000000..e7ced90b0 --- /dev/null +++ b/hw/rtc/pl031.c @@ -0,0 +1,356 @@ +/* + * ARM AMBA PrimeCell PL031 RTC + * + * Copyright (c) 2007 CodeSourcery + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/rtc/pl031.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "qapi/qapi-events-misc-target.h" + +#define RTC_DR 0x00 /* Data read register */ +#define RTC_MR 0x04 /* Match register */ +#define RTC_LR 0x08 /* Data load register */ +#define RTC_CR 0x0c /* Control register */ +#define RTC_IMSC 0x10 /* Interrupt mask and set register */ +#define RTC_RIS 0x14 /* Raw interrupt status register */ +#define RTC_MIS 0x18 /* Masked interrupt status register */ +#define RTC_ICR 0x1c /* Interrupt clear register */ + +static const unsigned char pl031_id[] = { + 0x31, 0x10, 0x14, 0x00, /* Device ID */ + 0x0d, 0xf0, 0x05, 0xb1 /* Cell ID */ +}; + +static void pl031_update(PL031State *s) +{ + uint32_t flags = s->is & s->im; + + trace_pl031_irq_state(flags); + qemu_set_irq(s->irq, flags); +} + +static void pl031_interrupt(void * opaque) +{ + PL031State *s = (PL031State *)opaque; + + s->is = 1; + trace_pl031_alarm_raised(); + pl031_update(s); +} + +static uint32_t pl031_get_count(PL031State *s) +{ + int64_t now = qemu_clock_get_ns(rtc_clock); + return s->tick_offset + now / NANOSECONDS_PER_SECOND; +} + +static void pl031_set_alarm(PL031State *s) +{ + uint32_t ticks; + + /* The timer wraps around. This subtraction also wraps in the same way, + and gives correct results when alarm < now_ticks. */ + ticks = s->mr - pl031_get_count(s); + trace_pl031_set_alarm(ticks); + if (ticks == 0) { + timer_del(s->timer); + pl031_interrupt(s); + } else { + int64_t now = qemu_clock_get_ns(rtc_clock); + timer_mod(s->timer, now + (int64_t)ticks * NANOSECONDS_PER_SECOND); + } +} + +static uint64_t pl031_read(void *opaque, hwaddr offset, + unsigned size) +{ + PL031State *s = (PL031State *)opaque; + uint64_t r; + + switch (offset) { + case RTC_DR: + r = pl031_get_count(s); + break; + case RTC_MR: + r = s->mr; + break; + case RTC_IMSC: + r = s->im; + break; + case RTC_RIS: + r = s->is; + break; + case RTC_LR: + r = s->lr; + break; + case RTC_CR: + /* RTC is permanently enabled. */ + r = 1; + break; + case RTC_MIS: + r = s->is & s->im; + break; + case 0xfe0 ... 0xfff: + r = pl031_id[(offset - 0xfe0) >> 2]; + break; + case RTC_ICR: + qemu_log_mask(LOG_GUEST_ERROR, + "pl031: read of write-only register at offset 0x%x\n", + (int)offset); + r = 0; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl031_read: Bad offset 0x%x\n", (int)offset); + r = 0; + break; + } + + trace_pl031_read(offset, r); + return r; +} + +static void pl031_write(void * opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PL031State *s = (PL031State *)opaque; + + trace_pl031_write(offset, value); + + switch (offset) { + case RTC_LR: { + struct tm tm; + + s->tick_offset += value - pl031_get_count(s); + + qemu_get_timedate(&tm, s->tick_offset); + qapi_event_send_rtc_change(qemu_timedate_diff(&tm)); + + pl031_set_alarm(s); + break; + } + case RTC_MR: + s->mr = value; + pl031_set_alarm(s); + break; + case RTC_IMSC: + s->im = value & 1; + pl031_update(s); + break; + case RTC_ICR: + s->is &= ~value; + pl031_update(s); + break; + case RTC_CR: + /* Written value is ignored. */ + break; + + case RTC_DR: + case RTC_MIS: + case RTC_RIS: + qemu_log_mask(LOG_GUEST_ERROR, + "pl031: write to read-only register at offset 0x%x\n", + (int)offset); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl031_write: Bad offset 0x%x\n", (int)offset); + break; + } +} + +static const MemoryRegionOps pl031_ops = { + .read = pl031_read, + .write = pl031_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pl031_init(Object *obj) +{ + PL031State *s = PL031(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + struct tm tm; + + memory_region_init_io(&s->iomem, obj, &pl031_ops, s, "pl031", 0x1000); + sysbus_init_mmio(dev, &s->iomem); + + sysbus_init_irq(dev, &s->irq); + qemu_get_timedate(&tm, 0); + s->tick_offset = mktimegm(&tm) - + qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND; + + s->timer = timer_new_ns(rtc_clock, pl031_interrupt, s); +} + +static void pl031_finalize(Object *obj) +{ + PL031State *s = PL031(obj); + + timer_free(s->timer); +} + +static int pl031_pre_save(void *opaque) +{ + PL031State *s = opaque; + + /* + * The PL031 device model code uses the tick_offset field, which is + * the offset between what the guest RTC should read and what the + * QEMU rtc_clock reads: + * guest_rtc = rtc_clock + tick_offset + * and so + * tick_offset = guest_rtc - rtc_clock + * + * We want to migrate this offset, which sounds straightforward. + * Unfortunately older versions of QEMU migrated a conversion of this + * offset into an offset from the vm_clock. (This was in turn an + * attempt to be compatible with even older QEMU versions, but it + * has incorrect behaviour if the rtc_clock is not the same as the + * vm_clock.) So we put the actual tick_offset into a migration + * subsection, and the backwards-compatible time-relative-to-vm_clock + * in the main migration state. + * + * Calculate base time relative to QEMU_CLOCK_VIRTUAL: + */ + int64_t delta = qemu_clock_get_ns(rtc_clock) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->tick_offset_vmstate = s->tick_offset + delta / NANOSECONDS_PER_SECOND; + + return 0; +} + +static int pl031_pre_load(void *opaque) +{ + PL031State *s = opaque; + + s->tick_offset_migrated = false; + return 0; +} + +static int pl031_post_load(void *opaque, int version_id) +{ + PL031State *s = opaque; + + /* + * If we got the tick_offset subsection, then we can just use + * the value in that. Otherwise the source is an older QEMU and + * has given us the offset from the vm_clock; convert it back to + * an offset from the rtc_clock. This will cause time to incorrectly + * go backwards compared to the host RTC, but this is unavoidable. + */ + + if (!s->tick_offset_migrated) { + int64_t delta = qemu_clock_get_ns(rtc_clock) - + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->tick_offset = s->tick_offset_vmstate - + delta / NANOSECONDS_PER_SECOND; + } + pl031_set_alarm(s); + return 0; +} + +static int pl031_tick_offset_post_load(void *opaque, int version_id) +{ + PL031State *s = opaque; + + s->tick_offset_migrated = true; + return 0; +} + +static bool pl031_tick_offset_needed(void *opaque) +{ + PL031State *s = opaque; + + return s->migrate_tick_offset; +} + +static const VMStateDescription vmstate_pl031_tick_offset = { + .name = "pl031/tick-offset", + .version_id = 1, + .minimum_version_id = 1, + .needed = pl031_tick_offset_needed, + .post_load = pl031_tick_offset_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(tick_offset, PL031State), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pl031 = { + .name = "pl031", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = pl031_pre_save, + .pre_load = pl031_pre_load, + .post_load = pl031_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(tick_offset_vmstate, PL031State), + VMSTATE_UINT32(mr, PL031State), + VMSTATE_UINT32(lr, PL031State), + VMSTATE_UINT32(cr, PL031State), + VMSTATE_UINT32(im, PL031State), + VMSTATE_UINT32(is, PL031State), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_pl031_tick_offset, + NULL + } +}; + +static Property pl031_properties[] = { + /* + * True to correctly migrate the tick offset of the RTC. False to + * obtain backward migration compatibility with older QEMU versions, + * at the expense of the guest RTC going backwards compared with the + * host RTC when the VM is saved/restored if using -rtc host. + * (Even if set to 'true' older QEMU can migrate forward to newer QEMU; + * 'false' also permits newer QEMU to migrate to older QEMU.) + */ + DEFINE_PROP_BOOL("migrate-tick-offset", + PL031State, migrate_tick_offset, true), + DEFINE_PROP_END_OF_LIST() +}; + +static void pl031_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_pl031; + device_class_set_props(dc, pl031_properties); +} + +static const TypeInfo pl031_info = { + .name = TYPE_PL031, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL031State), + .instance_init = pl031_init, + .instance_finalize = pl031_finalize, + .class_init = pl031_class_init, +}; + +static void pl031_register_types(void) +{ + type_register_static(&pl031_info); +} + +type_init(pl031_register_types) diff --git a/hw/rtc/sun4v-rtc.c b/hw/rtc/sun4v-rtc.c new file mode 100644 index 000000000..e037acd1b --- /dev/null +++ b/hw/rtc/sun4v-rtc.c @@ -0,0 +1,97 @@ +/* + * QEMU sun4v Real Time Clock device + * + * The sun4v_rtc device (sun4v tod clock) + * + * Copyright (c) 2016 Artyom Tarasenko + * + * This code is licensed under the GNU GPL v3 or (at your option) any later + * version. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/rtc/sun4v-rtc.h" +#include "trace.h" +#include "qom/object.h" + + +#define TYPE_SUN4V_RTC "sun4v_rtc" +OBJECT_DECLARE_SIMPLE_TYPE(Sun4vRtc, SUN4V_RTC) + +struct Sun4vRtc { + SysBusDevice parent_obj; + + MemoryRegion iomem; +}; + +static uint64_t sun4v_rtc_read(void *opaque, hwaddr addr, + unsigned size) +{ + uint64_t val = get_clock_realtime() / NANOSECONDS_PER_SECOND; + if (!(addr & 4ULL)) { + /* accessing the high 32 bits */ + val >>= 32; + } + trace_sun4v_rtc_read(addr, val); + return val; +} + +static void sun4v_rtc_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + trace_sun4v_rtc_write(addr, val); +} + +static const MemoryRegionOps sun4v_rtc_ops = { + .read = sun4v_rtc_read, + .write = sun4v_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +void sun4v_rtc_init(hwaddr addr) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new(TYPE_SUN4V_RTC); + s = SYS_BUS_DEVICE(dev); + + sysbus_realize_and_unref(s, &error_fatal); + + sysbus_mmio_map(s, 0, addr); +} + +static void sun4v_rtc_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + Sun4vRtc *s = SUN4V_RTC(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &sun4v_rtc_ops, s, + "sun4v-rtc", 0x08ULL); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void sun4v_rtc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sun4v_rtc_realize; +} + +static const TypeInfo sun4v_rtc_info = { + .name = TYPE_SUN4V_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Sun4vRtc), + .class_init = sun4v_rtc_class_init, +}; + +static void sun4v_rtc_register_types(void) +{ + type_register_static(&sun4v_rtc_info); +} + +type_init(sun4v_rtc_register_types) diff --git a/hw/rtc/trace-events b/hw/rtc/trace-events new file mode 100644 index 000000000..ebb311a5b --- /dev/null +++ b/hw/rtc/trace-events @@ -0,0 +1,33 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# allwinner-rtc.c +allwinner_rtc_read(uint64_t addr, uint64_t value) "addr 0x%" PRIx64 " value 0x%" PRIx64 +allwinner_rtc_write(uint64_t addr, uint64_t value) "addr 0x%" PRIx64 " value 0x%" PRIx64 + +# sun4v-rtc.c +sun4v_rtc_read(uint64_t addr, uint64_t value) "read: addr 0x%" PRIx64 " value 0x%" PRIx64 +sun4v_rtc_write(uint64_t addr, uint64_t value) "write: addr 0x%" PRIx64 " value 0x%" PRIx64 + +# xlnx-zynqmp-rtc.c +xlnx_zynqmp_rtc_gettime(int year, int month, int day, int hour, int min, int sec) "Get time from host: %d-%d-%d %2d:%02d:%02d" + +# pl031.c +pl031_irq_state(int level) "irq state %d" +pl031_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" +pl031_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x" +pl031_alarm_raised(void) "alarm raised" +pl031_set_alarm(uint32_t ticks) "alarm set for %u ticks" + +# aspeed_rtc.c +aspeed_rtc_read(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 +aspeed_rtc_write(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 + +# m48t59.c +m48txx_nvram_io_read(uint64_t addr, uint64_t value) "io read addr:0x%04" PRIx64 " value:0x%02" PRIx64 +m48txx_nvram_io_write(uint64_t addr, uint64_t value) "io write addr:0x%04" PRIx64 " value:0x%02" PRIx64 +m48txx_nvram_mem_read(uint32_t addr, uint32_t value) "mem read addr:0x%04x value:0x%02x" +m48txx_nvram_mem_write(uint32_t addr, uint32_t value) "mem write addr:0x%04x value:0x%02x" + +# goldfish_rtc.c +goldfish_rtc_read(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 +goldfish_rtc_write(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 diff --git a/hw/rtc/trace.h b/hw/rtc/trace.h new file mode 100644 index 000000000..cfd5d6ee6 --- /dev/null +++ b/hw/rtc/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_rtc.h" diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c new file mode 100644 index 000000000..0922df5ad --- /dev/null +++ b/hw/rtc/twl92230.c @@ -0,0 +1,882 @@ +/* + * TI TWL92230C energy-management companion device for the OMAP24xx. + * Aka. Menelaus (N4200 MENELAUS1_V2.2) + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/timer.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "migration/qemu-file-types.h" +#include "migration/vmstate.h" +#include "sysemu/sysemu.h" +#include "qemu/bcd.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define VERBOSE 1 + +#define TYPE_TWL92230 "twl92230" +OBJECT_DECLARE_SIMPLE_TYPE(MenelausState, TWL92230) + +struct MenelausState { + I2CSlave parent_obj; + + int firstbyte; + uint8_t reg; + + uint8_t vcore[5]; + uint8_t dcdc[3]; + uint8_t ldo[8]; + uint8_t sleep[2]; + uint8_t osc; + uint8_t detect; + uint16_t mask; + uint16_t status; + uint8_t dir; + uint8_t inputs; + uint8_t outputs; + uint8_t bbsms; + uint8_t pull[4]; + uint8_t mmc_ctrl[3]; + uint8_t mmc_debounce; + struct { + uint8_t ctrl; + uint16_t comp; + QEMUTimer *hz_tm; + int64_t next; + struct tm tm; + struct tm new; + struct tm alm; + int sec_offset; + int alm_sec; + int next_comp; + } rtc; + uint16_t rtc_next_vmstate; + qemu_irq out[4]; + uint8_t pwrbtn_state; +}; + +static inline void menelaus_update(MenelausState *s) +{ + qemu_set_irq(s->out[3], s->status & ~s->mask); +} + +static inline void menelaus_rtc_start(MenelausState *s) +{ + s->rtc.next += qemu_clock_get_ms(rtc_clock); + timer_mod(s->rtc.hz_tm, s->rtc.next); +} + +static inline void menelaus_rtc_stop(MenelausState *s) +{ + timer_del(s->rtc.hz_tm); + s->rtc.next -= qemu_clock_get_ms(rtc_clock); + if (s->rtc.next < 1) + s->rtc.next = 1; +} + +static void menelaus_rtc_update(MenelausState *s) +{ + qemu_get_timedate(&s->rtc.tm, s->rtc.sec_offset); +} + +static void menelaus_alm_update(MenelausState *s) +{ + if ((s->rtc.ctrl & 3) == 3) + s->rtc.alm_sec = qemu_timedate_diff(&s->rtc.alm) - s->rtc.sec_offset; +} + +static void menelaus_rtc_hz(void *opaque) +{ + MenelausState *s = (MenelausState *) opaque; + + s->rtc.next_comp --; + s->rtc.alm_sec --; + s->rtc.next += 1000; + timer_mod(s->rtc.hz_tm, s->rtc.next); + if ((s->rtc.ctrl >> 3) & 3) { /* EVERY */ + menelaus_rtc_update(s); + if (((s->rtc.ctrl >> 3) & 3) == 1 && !s->rtc.tm.tm_sec) + s->status |= 1 << 8; /* RTCTMR */ + else if (((s->rtc.ctrl >> 3) & 3) == 2 && !s->rtc.tm.tm_min) + s->status |= 1 << 8; /* RTCTMR */ + else if (!s->rtc.tm.tm_hour) + s->status |= 1 << 8; /* RTCTMR */ + } else + s->status |= 1 << 8; /* RTCTMR */ + if ((s->rtc.ctrl >> 1) & 1) { /* RTC_AL_EN */ + if (s->rtc.alm_sec == 0) + s->status |= 1 << 9; /* RTCALM */ + /* TODO: wake-up */ + } + if (s->rtc.next_comp <= 0) { + s->rtc.next -= muldiv64((int16_t) s->rtc.comp, 1000, 0x8000); + s->rtc.next_comp = 3600; + } + menelaus_update(s); +} + +static void menelaus_reset(I2CSlave *i2c) +{ + MenelausState *s = TWL92230(i2c); + + s->reg = 0x00; + + s->vcore[0] = 0x0c; /* XXX: X-loader needs 0x8c? check! */ + s->vcore[1] = 0x05; + s->vcore[2] = 0x02; + s->vcore[3] = 0x0c; + s->vcore[4] = 0x03; + s->dcdc[0] = 0x33; /* Depends on wiring */ + s->dcdc[1] = 0x03; + s->dcdc[2] = 0x00; + s->ldo[0] = 0x95; + s->ldo[1] = 0x7e; + s->ldo[2] = 0x00; + s->ldo[3] = 0x00; /* Depends on wiring */ + s->ldo[4] = 0x03; /* Depends on wiring */ + s->ldo[5] = 0x00; + s->ldo[6] = 0x00; + s->ldo[7] = 0x00; + s->sleep[0] = 0x00; + s->sleep[1] = 0x00; + s->osc = 0x01; + s->detect = 0x09; + s->mask = 0x0fff; + s->status = 0; + s->dir = 0x07; + s->outputs = 0x00; + s->bbsms = 0x00; + s->pull[0] = 0x00; + s->pull[1] = 0x00; + s->pull[2] = 0x00; + s->pull[3] = 0x00; + s->mmc_ctrl[0] = 0x03; + s->mmc_ctrl[1] = 0xc0; + s->mmc_ctrl[2] = 0x00; + s->mmc_debounce = 0x05; + + if (s->rtc.ctrl & 1) + menelaus_rtc_stop(s); + s->rtc.ctrl = 0x00; + s->rtc.comp = 0x0000; + s->rtc.next = 1000; + s->rtc.sec_offset = 0; + s->rtc.next_comp = 1800; + s->rtc.alm_sec = 1800; + s->rtc.alm.tm_sec = 0x00; + s->rtc.alm.tm_min = 0x00; + s->rtc.alm.tm_hour = 0x00; + s->rtc.alm.tm_mday = 0x01; + s->rtc.alm.tm_mon = 0x00; + s->rtc.alm.tm_year = 2004; + menelaus_update(s); +} + +static void menelaus_gpio_set(void *opaque, int line, int level) +{ + MenelausState *s = (MenelausState *) opaque; + + if (line < 3) { + /* No interrupt generated */ + s->inputs &= ~(1 << line); + s->inputs |= level << line; + return; + } + + if (!s->pwrbtn_state && level) { + s->status |= 1 << 11; /* PSHBTN */ + menelaus_update(s); + } + s->pwrbtn_state = level; +} + +#define MENELAUS_REV 0x01 +#define MENELAUS_VCORE_CTRL1 0x02 +#define MENELAUS_VCORE_CTRL2 0x03 +#define MENELAUS_VCORE_CTRL3 0x04 +#define MENELAUS_VCORE_CTRL4 0x05 +#define MENELAUS_VCORE_CTRL5 0x06 +#define MENELAUS_DCDC_CTRL1 0x07 +#define MENELAUS_DCDC_CTRL2 0x08 +#define MENELAUS_DCDC_CTRL3 0x09 +#define MENELAUS_LDO_CTRL1 0x0a +#define MENELAUS_LDO_CTRL2 0x0b +#define MENELAUS_LDO_CTRL3 0x0c +#define MENELAUS_LDO_CTRL4 0x0d +#define MENELAUS_LDO_CTRL5 0x0e +#define MENELAUS_LDO_CTRL6 0x0f +#define MENELAUS_LDO_CTRL7 0x10 +#define MENELAUS_LDO_CTRL8 0x11 +#define MENELAUS_SLEEP_CTRL1 0x12 +#define MENELAUS_SLEEP_CTRL2 0x13 +#define MENELAUS_DEVICE_OFF 0x14 +#define MENELAUS_OSC_CTRL 0x15 +#define MENELAUS_DETECT_CTRL 0x16 +#define MENELAUS_INT_MASK1 0x17 +#define MENELAUS_INT_MASK2 0x18 +#define MENELAUS_INT_STATUS1 0x19 +#define MENELAUS_INT_STATUS2 0x1a +#define MENELAUS_INT_ACK1 0x1b +#define MENELAUS_INT_ACK2 0x1c +#define MENELAUS_GPIO_CTRL 0x1d +#define MENELAUS_GPIO_IN 0x1e +#define MENELAUS_GPIO_OUT 0x1f +#define MENELAUS_BBSMS 0x20 +#define MENELAUS_RTC_CTRL 0x21 +#define MENELAUS_RTC_UPDATE 0x22 +#define MENELAUS_RTC_SEC 0x23 +#define MENELAUS_RTC_MIN 0x24 +#define MENELAUS_RTC_HR 0x25 +#define MENELAUS_RTC_DAY 0x26 +#define MENELAUS_RTC_MON 0x27 +#define MENELAUS_RTC_YR 0x28 +#define MENELAUS_RTC_WKDAY 0x29 +#define MENELAUS_RTC_AL_SEC 0x2a +#define MENELAUS_RTC_AL_MIN 0x2b +#define MENELAUS_RTC_AL_HR 0x2c +#define MENELAUS_RTC_AL_DAY 0x2d +#define MENELAUS_RTC_AL_MON 0x2e +#define MENELAUS_RTC_AL_YR 0x2f +#define MENELAUS_RTC_COMP_MSB 0x30 +#define MENELAUS_RTC_COMP_LSB 0x31 +#define MENELAUS_S1_PULL_EN 0x32 +#define MENELAUS_S1_PULL_DIR 0x33 +#define MENELAUS_S2_PULL_EN 0x34 +#define MENELAUS_S2_PULL_DIR 0x35 +#define MENELAUS_MCT_CTRL1 0x36 +#define MENELAUS_MCT_CTRL2 0x37 +#define MENELAUS_MCT_CTRL3 0x38 +#define MENELAUS_MCT_PIN_ST 0x39 +#define MENELAUS_DEBOUNCE1 0x3a + +static uint8_t menelaus_read(void *opaque, uint8_t addr) +{ + MenelausState *s = (MenelausState *) opaque; + + switch (addr) { + case MENELAUS_REV: + return 0x22; + + case MENELAUS_VCORE_CTRL1 ... MENELAUS_VCORE_CTRL5: + return s->vcore[addr - MENELAUS_VCORE_CTRL1]; + + case MENELAUS_DCDC_CTRL1 ... MENELAUS_DCDC_CTRL3: + return s->dcdc[addr - MENELAUS_DCDC_CTRL1]; + + case MENELAUS_LDO_CTRL1 ... MENELAUS_LDO_CTRL8: + return s->ldo[addr - MENELAUS_LDO_CTRL1]; + + case MENELAUS_SLEEP_CTRL1: + case MENELAUS_SLEEP_CTRL2: + return s->sleep[addr - MENELAUS_SLEEP_CTRL1]; + + case MENELAUS_DEVICE_OFF: + return 0; + + case MENELAUS_OSC_CTRL: + return s->osc | (1 << 7); /* CLK32K_GOOD */ + + case MENELAUS_DETECT_CTRL: + return s->detect; + + case MENELAUS_INT_MASK1: + return (s->mask >> 0) & 0xff; + case MENELAUS_INT_MASK2: + return (s->mask >> 8) & 0xff; + + case MENELAUS_INT_STATUS1: + return (s->status >> 0) & 0xff; + case MENELAUS_INT_STATUS2: + return (s->status >> 8) & 0xff; + + case MENELAUS_INT_ACK1: + case MENELAUS_INT_ACK2: + return 0; + + case MENELAUS_GPIO_CTRL: + return s->dir; + case MENELAUS_GPIO_IN: + return s->inputs | (~s->dir & s->outputs); + case MENELAUS_GPIO_OUT: + return s->outputs; + + case MENELAUS_BBSMS: + return s->bbsms; + + case MENELAUS_RTC_CTRL: + return s->rtc.ctrl; + case MENELAUS_RTC_UPDATE: + return 0x00; + case MENELAUS_RTC_SEC: + menelaus_rtc_update(s); + return to_bcd(s->rtc.tm.tm_sec); + case MENELAUS_RTC_MIN: + menelaus_rtc_update(s); + return to_bcd(s->rtc.tm.tm_min); + case MENELAUS_RTC_HR: + menelaus_rtc_update(s); + if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */ + return to_bcd((s->rtc.tm.tm_hour % 12) + 1) | + (!!(s->rtc.tm.tm_hour >= 12) << 7); /* PM_nAM */ + else + return to_bcd(s->rtc.tm.tm_hour); + case MENELAUS_RTC_DAY: + menelaus_rtc_update(s); + return to_bcd(s->rtc.tm.tm_mday); + case MENELAUS_RTC_MON: + menelaus_rtc_update(s); + return to_bcd(s->rtc.tm.tm_mon + 1); + case MENELAUS_RTC_YR: + menelaus_rtc_update(s); + return to_bcd(s->rtc.tm.tm_year - 2000); + case MENELAUS_RTC_WKDAY: + menelaus_rtc_update(s); + return to_bcd(s->rtc.tm.tm_wday); + case MENELAUS_RTC_AL_SEC: + return to_bcd(s->rtc.alm.tm_sec); + case MENELAUS_RTC_AL_MIN: + return to_bcd(s->rtc.alm.tm_min); + case MENELAUS_RTC_AL_HR: + if ((s->rtc.ctrl >> 2) & 1) /* MODE12_n24 */ + return to_bcd((s->rtc.alm.tm_hour % 12) + 1) | + (!!(s->rtc.alm.tm_hour >= 12) << 7);/* AL_PM_nAM */ + else + return to_bcd(s->rtc.alm.tm_hour); + case MENELAUS_RTC_AL_DAY: + return to_bcd(s->rtc.alm.tm_mday); + case MENELAUS_RTC_AL_MON: + return to_bcd(s->rtc.alm.tm_mon + 1); + case MENELAUS_RTC_AL_YR: + return to_bcd(s->rtc.alm.tm_year - 2000); + case MENELAUS_RTC_COMP_MSB: + return (s->rtc.comp >> 8) & 0xff; + case MENELAUS_RTC_COMP_LSB: + return (s->rtc.comp >> 0) & 0xff; + + case MENELAUS_S1_PULL_EN: + return s->pull[0]; + case MENELAUS_S1_PULL_DIR: + return s->pull[1]; + case MENELAUS_S2_PULL_EN: + return s->pull[2]; + case MENELAUS_S2_PULL_DIR: + return s->pull[3]; + + case MENELAUS_MCT_CTRL1 ... MENELAUS_MCT_CTRL3: + return s->mmc_ctrl[addr - MENELAUS_MCT_CTRL1]; + case MENELAUS_MCT_PIN_ST: + /* TODO: return the real Card Detect */ + return 0; + case MENELAUS_DEBOUNCE1: + return s->mmc_debounce; + + default: +#ifdef VERBOSE + printf("%s: unknown register %02x\n", __func__, addr); +#endif + break; + } + return 0; +} + +static void menelaus_write(void *opaque, uint8_t addr, uint8_t value) +{ + MenelausState *s = (MenelausState *) opaque; + int line; + struct tm tm; + + switch (addr) { + case MENELAUS_VCORE_CTRL1: + s->vcore[0] = (value & 0xe) | MIN(value & 0x1f, 0x12); + break; + case MENELAUS_VCORE_CTRL2: + s->vcore[1] = value; + break; + case MENELAUS_VCORE_CTRL3: + s->vcore[2] = MIN(value & 0x1f, 0x12); + break; + case MENELAUS_VCORE_CTRL4: + s->vcore[3] = MIN(value & 0x1f, 0x12); + break; + case MENELAUS_VCORE_CTRL5: + s->vcore[4] = value & 3; + /* XXX + * auto set to 3 on M_Active, nRESWARM + * auto set to 0 on M_WaitOn, M_Backup + */ + break; + + case MENELAUS_DCDC_CTRL1: + s->dcdc[0] = value & 0x3f; + break; + case MENELAUS_DCDC_CTRL2: + s->dcdc[1] = value & 0x07; + /* XXX + * auto set to 3 on M_Active, nRESWARM + * auto set to 0 on M_WaitOn, M_Backup + */ + break; + case MENELAUS_DCDC_CTRL3: + s->dcdc[2] = value & 0x07; + break; + + case MENELAUS_LDO_CTRL1: + s->ldo[0] = value; + break; + case MENELAUS_LDO_CTRL2: + s->ldo[1] = value & 0x7f; + /* XXX + * auto set to 0x7e on M_WaitOn, M_Backup + */ + break; + case MENELAUS_LDO_CTRL3: + s->ldo[2] = value & 3; + /* XXX + * auto set to 3 on M_Active, nRESWARM + * auto set to 0 on M_WaitOn, M_Backup + */ + break; + case MENELAUS_LDO_CTRL4: + s->ldo[3] = value & 3; + /* XXX + * auto set to 3 on M_Active, nRESWARM + * auto set to 0 on M_WaitOn, M_Backup + */ + break; + case MENELAUS_LDO_CTRL5: + s->ldo[4] = value & 3; + /* XXX + * auto set to 3 on M_Active, nRESWARM + * auto set to 0 on M_WaitOn, M_Backup + */ + break; + case MENELAUS_LDO_CTRL6: + s->ldo[5] = value & 3; + break; + case MENELAUS_LDO_CTRL7: + s->ldo[6] = value & 3; + break; + case MENELAUS_LDO_CTRL8: + s->ldo[7] = value & 3; + break; + + case MENELAUS_SLEEP_CTRL1: + case MENELAUS_SLEEP_CTRL2: + s->sleep[addr - MENELAUS_SLEEP_CTRL1] = value; + break; + + case MENELAUS_DEVICE_OFF: + if (value & 1) { + menelaus_reset(I2C_SLAVE(s)); + } + break; + + case MENELAUS_OSC_CTRL: + s->osc = value & 7; + break; + + case MENELAUS_DETECT_CTRL: + s->detect = value & 0x7f; + break; + + case MENELAUS_INT_MASK1: + s->mask &= 0xf00; + s->mask |= value << 0; + menelaus_update(s); + break; + case MENELAUS_INT_MASK2: + s->mask &= 0x0ff; + s->mask |= value << 8; + menelaus_update(s); + break; + + case MENELAUS_INT_ACK1: + s->status &= ~(((uint16_t) value) << 0); + menelaus_update(s); + break; + case MENELAUS_INT_ACK2: + s->status &= ~(((uint16_t) value) << 8); + menelaus_update(s); + break; + + case MENELAUS_GPIO_CTRL: + for (line = 0; line < 3; line ++) { + if (((s->dir ^ value) >> line) & 1) { + qemu_set_irq(s->out[line], + ((s->outputs & ~s->dir) >> line) & 1); + } + } + s->dir = value & 0x67; + break; + case MENELAUS_GPIO_OUT: + for (line = 0; line < 3; line ++) { + if ((((s->outputs ^ value) & ~s->dir) >> line) & 1) { + qemu_set_irq(s->out[line], (s->outputs >> line) & 1); + } + } + s->outputs = value & 0x07; + break; + + case MENELAUS_BBSMS: + s->bbsms = 0x0d; + break; + + case MENELAUS_RTC_CTRL: + if ((s->rtc.ctrl ^ value) & 1) { /* RTC_EN */ + if (value & 1) + menelaus_rtc_start(s); + else + menelaus_rtc_stop(s); + } + s->rtc.ctrl = value & 0x1f; + menelaus_alm_update(s); + break; + case MENELAUS_RTC_UPDATE: + menelaus_rtc_update(s); + memcpy(&tm, &s->rtc.tm, sizeof(tm)); + switch (value & 0xf) { + case 0: + break; + case 1: + tm.tm_sec = s->rtc.new.tm_sec; + break; + case 2: + tm.tm_min = s->rtc.new.tm_min; + break; + case 3: + if (s->rtc.new.tm_hour > 23) + goto rtc_badness; + tm.tm_hour = s->rtc.new.tm_hour; + break; + case 4: + if (s->rtc.new.tm_mday < 1) + goto rtc_badness; + /* TODO check range */ + tm.tm_mday = s->rtc.new.tm_mday; + break; + case 5: + if (s->rtc.new.tm_mon < 0 || s->rtc.new.tm_mon > 11) + goto rtc_badness; + tm.tm_mon = s->rtc.new.tm_mon; + break; + case 6: + tm.tm_year = s->rtc.new.tm_year; + break; + case 7: + /* TODO set .tm_mday instead */ + tm.tm_wday = s->rtc.new.tm_wday; + break; + case 8: + if (s->rtc.new.tm_hour > 23) + goto rtc_badness; + if (s->rtc.new.tm_mday < 1) + goto rtc_badness; + if (s->rtc.new.tm_mon < 0 || s->rtc.new.tm_mon > 11) + goto rtc_badness; + tm.tm_sec = s->rtc.new.tm_sec; + tm.tm_min = s->rtc.new.tm_min; + tm.tm_hour = s->rtc.new.tm_hour; + tm.tm_mday = s->rtc.new.tm_mday; + tm.tm_mon = s->rtc.new.tm_mon; + tm.tm_year = s->rtc.new.tm_year; + break; + rtc_badness: + default: + fprintf(stderr, "%s: bad RTC_UPDATE value %02x\n", + __func__, value); + s->status |= 1 << 10; /* RTCERR */ + menelaus_update(s); + } + s->rtc.sec_offset = qemu_timedate_diff(&tm); + break; + case MENELAUS_RTC_SEC: + s->rtc.tm.tm_sec = from_bcd(value & 0x7f); + break; + case MENELAUS_RTC_MIN: + s->rtc.tm.tm_min = from_bcd(value & 0x7f); + break; + case MENELAUS_RTC_HR: + s->rtc.tm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */ + MIN(from_bcd(value & 0x3f), 12) + ((value >> 7) ? 11 : -1) : + from_bcd(value & 0x3f); + break; + case MENELAUS_RTC_DAY: + s->rtc.tm.tm_mday = from_bcd(value); + break; + case MENELAUS_RTC_MON: + s->rtc.tm.tm_mon = MAX(1, from_bcd(value)) - 1; + break; + case MENELAUS_RTC_YR: + s->rtc.tm.tm_year = 2000 + from_bcd(value); + break; + case MENELAUS_RTC_WKDAY: + s->rtc.tm.tm_mday = from_bcd(value); + break; + case MENELAUS_RTC_AL_SEC: + s->rtc.alm.tm_sec = from_bcd(value & 0x7f); + menelaus_alm_update(s); + break; + case MENELAUS_RTC_AL_MIN: + s->rtc.alm.tm_min = from_bcd(value & 0x7f); + menelaus_alm_update(s); + break; + case MENELAUS_RTC_AL_HR: + s->rtc.alm.tm_hour = (s->rtc.ctrl & (1 << 2)) ? /* MODE12_n24 */ + MIN(from_bcd(value & 0x3f), 12) + ((value >> 7) ? 11 : -1) : + from_bcd(value & 0x3f); + menelaus_alm_update(s); + break; + case MENELAUS_RTC_AL_DAY: + s->rtc.alm.tm_mday = from_bcd(value); + menelaus_alm_update(s); + break; + case MENELAUS_RTC_AL_MON: + s->rtc.alm.tm_mon = MAX(1, from_bcd(value)) - 1; + menelaus_alm_update(s); + break; + case MENELAUS_RTC_AL_YR: + s->rtc.alm.tm_year = 2000 + from_bcd(value); + menelaus_alm_update(s); + break; + case MENELAUS_RTC_COMP_MSB: + s->rtc.comp &= 0xff; + s->rtc.comp |= value << 8; + break; + case MENELAUS_RTC_COMP_LSB: + s->rtc.comp &= 0xff << 8; + s->rtc.comp |= value; + break; + + case MENELAUS_S1_PULL_EN: + s->pull[0] = value; + break; + case MENELAUS_S1_PULL_DIR: + s->pull[1] = value & 0x1f; + break; + case MENELAUS_S2_PULL_EN: + s->pull[2] = value; + break; + case MENELAUS_S2_PULL_DIR: + s->pull[3] = value & 0x1f; + break; + + case MENELAUS_MCT_CTRL1: + s->mmc_ctrl[0] = value & 0x7f; + break; + case MENELAUS_MCT_CTRL2: + s->mmc_ctrl[1] = value; + /* TODO update Card Detect interrupts */ + break; + case MENELAUS_MCT_CTRL3: + s->mmc_ctrl[2] = value & 0xf; + break; + case MENELAUS_DEBOUNCE1: + s->mmc_debounce = value & 0x3f; + break; + + default: +#ifdef VERBOSE + printf("%s: unknown register %02x\n", __func__, addr); +#endif + break; + } +} + +static int menelaus_event(I2CSlave *i2c, enum i2c_event event) +{ + MenelausState *s = TWL92230(i2c); + + if (event == I2C_START_SEND) + s->firstbyte = 1; + + return 0; +} + +static int menelaus_tx(I2CSlave *i2c, uint8_t data) +{ + MenelausState *s = TWL92230(i2c); + + /* Interpret register address byte */ + if (s->firstbyte) { + s->reg = data; + s->firstbyte = 0; + } else + menelaus_write(s, s->reg ++, data); + + return 0; +} + +static uint8_t menelaus_rx(I2CSlave *i2c) +{ + MenelausState *s = TWL92230(i2c); + + return menelaus_read(s, s->reg ++); +} + +/* Save restore 32 bit int as uint16_t + This is a Big hack, but it is how the old state did it. + Or we broke compatibility in the state, or we can't use struct tm + */ + +static int get_int32_as_uint16(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + int *v = pv; + *v = qemu_get_be16(f); + return 0; +} + +static int put_int32_as_uint16(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + int *v = pv; + qemu_put_be16(f, *v); + + return 0; +} + +static const VMStateInfo vmstate_hack_int32_as_uint16 = { + .name = "int32_as_uint16", + .get = get_int32_as_uint16, + .put = put_int32_as_uint16, +}; + +#define VMSTATE_UINT16_HACK(_f, _s) \ + VMSTATE_SINGLE(_f, _s, 0, vmstate_hack_int32_as_uint16, int32_t) + + +static const VMStateDescription vmstate_menelaus_tm = { + .name = "menelaus_tm", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT16_HACK(tm_sec, struct tm), + VMSTATE_UINT16_HACK(tm_min, struct tm), + VMSTATE_UINT16_HACK(tm_hour, struct tm), + VMSTATE_UINT16_HACK(tm_mday, struct tm), + VMSTATE_UINT16_HACK(tm_min, struct tm), + VMSTATE_UINT16_HACK(tm_year, struct tm), + VMSTATE_END_OF_LIST() + } +}; + +static int menelaus_pre_save(void *opaque) +{ + MenelausState *s = opaque; + /* Should be <= 1000 */ + s->rtc_next_vmstate = s->rtc.next - qemu_clock_get_ms(rtc_clock); + + return 0; +} + +static int menelaus_post_load(void *opaque, int version_id) +{ + MenelausState *s = opaque; + + if (s->rtc.ctrl & 1) /* RTC_EN */ + menelaus_rtc_stop(s); + + s->rtc.next = s->rtc_next_vmstate; + + menelaus_alm_update(s); + menelaus_update(s); + if (s->rtc.ctrl & 1) /* RTC_EN */ + menelaus_rtc_start(s); + return 0; +} + +static const VMStateDescription vmstate_menelaus = { + .name = "menelaus", + .version_id = 0, + .minimum_version_id = 0, + .pre_save = menelaus_pre_save, + .post_load = menelaus_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT32(firstbyte, MenelausState), + VMSTATE_UINT8(reg, MenelausState), + VMSTATE_UINT8_ARRAY(vcore, MenelausState, 5), + VMSTATE_UINT8_ARRAY(dcdc, MenelausState, 3), + VMSTATE_UINT8_ARRAY(ldo, MenelausState, 8), + VMSTATE_UINT8_ARRAY(sleep, MenelausState, 2), + VMSTATE_UINT8(osc, MenelausState), + VMSTATE_UINT8(detect, MenelausState), + VMSTATE_UINT16(mask, MenelausState), + VMSTATE_UINT16(status, MenelausState), + VMSTATE_UINT8(dir, MenelausState), + VMSTATE_UINT8(inputs, MenelausState), + VMSTATE_UINT8(outputs, MenelausState), + VMSTATE_UINT8(bbsms, MenelausState), + VMSTATE_UINT8_ARRAY(pull, MenelausState, 4), + VMSTATE_UINT8_ARRAY(mmc_ctrl, MenelausState, 3), + VMSTATE_UINT8(mmc_debounce, MenelausState), + VMSTATE_UINT8(rtc.ctrl, MenelausState), + VMSTATE_UINT16(rtc.comp, MenelausState), + VMSTATE_UINT16(rtc_next_vmstate, MenelausState), + VMSTATE_STRUCT(rtc.new, MenelausState, 0, vmstate_menelaus_tm, + struct tm), + VMSTATE_STRUCT(rtc.alm, MenelausState, 0, vmstate_menelaus_tm, + struct tm), + VMSTATE_UINT8(pwrbtn_state, MenelausState), + VMSTATE_I2C_SLAVE(parent_obj, MenelausState), + VMSTATE_END_OF_LIST() + } +}; + +static void twl92230_realize(DeviceState *dev, Error **errp) +{ + MenelausState *s = TWL92230(dev); + + s->rtc.hz_tm = timer_new_ms(rtc_clock, menelaus_rtc_hz, s); + /* Three output pins plus one interrupt pin. */ + qdev_init_gpio_out(dev, s->out, 4); + + /* Three input pins plus one power-button pin. */ + qdev_init_gpio_in(dev, menelaus_gpio_set, 4); + + menelaus_reset(I2C_SLAVE(dev)); +} + +static void twl92230_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); + + dc->realize = twl92230_realize; + sc->event = menelaus_event; + sc->recv = menelaus_rx; + sc->send = menelaus_tx; + dc->vmsd = &vmstate_menelaus; +} + +static const TypeInfo twl92230_info = { + .name = TYPE_TWL92230, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(MenelausState), + .class_init = twl92230_class_init, +}; + +static void twl92230_register_types(void) +{ + type_register_static(&twl92230_info); +} + +type_init(twl92230_register_types) diff --git a/hw/rtc/xlnx-zynqmp-rtc.c b/hw/rtc/xlnx-zynqmp-rtc.c new file mode 100644 index 000000000..2bcd14d77 --- /dev/null +++ b/hw/rtc/xlnx-zynqmp-rtc.c @@ -0,0 +1,275 @@ +/* + * QEMU model of the Xilinx ZynqMP Real Time Clock (RTC). + * + * Copyright (c) 2017 Xilinx Inc. + * + * Written-by: Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/irq.h" +#include "qemu/cutils.h" +#include "sysemu/sysemu.h" +#include "trace.h" +#include "hw/rtc/xlnx-zynqmp-rtc.h" +#include "migration/vmstate.h" + +#ifndef XLNX_ZYNQMP_RTC_ERR_DEBUG +#define XLNX_ZYNQMP_RTC_ERR_DEBUG 0 +#endif + +static void rtc_int_update_irq(XlnxZynqMPRTC *s) +{ + bool pending = s->regs[R_RTC_INT_STATUS] & ~s->regs[R_RTC_INT_MASK]; + qemu_set_irq(s->irq_rtc_int, pending); +} + +static void addr_error_int_update_irq(XlnxZynqMPRTC *s) +{ + bool pending = s->regs[R_ADDR_ERROR] & ~s->regs[R_ADDR_ERROR_INT_MASK]; + qemu_set_irq(s->irq_addr_error_int, pending); +} + +static uint32_t rtc_get_count(XlnxZynqMPRTC *s) +{ + int64_t now = qemu_clock_get_ns(rtc_clock); + return s->tick_offset + now / NANOSECONDS_PER_SECOND; +} + +static uint64_t current_time_postr(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPRTC *s = XLNX_ZYNQMP_RTC(reg->opaque); + + return rtc_get_count(s); +} + +static void rtc_int_status_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPRTC *s = XLNX_ZYNQMP_RTC(reg->opaque); + rtc_int_update_irq(s); +} + +static uint64_t rtc_int_en_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPRTC *s = XLNX_ZYNQMP_RTC(reg->opaque); + + s->regs[R_RTC_INT_MASK] &= (uint32_t) ~val64; + rtc_int_update_irq(s); + return 0; +} + +static uint64_t rtc_int_dis_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPRTC *s = XLNX_ZYNQMP_RTC(reg->opaque); + + s->regs[R_RTC_INT_MASK] |= (uint32_t) val64; + rtc_int_update_irq(s); + return 0; +} + +static void addr_error_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPRTC *s = XLNX_ZYNQMP_RTC(reg->opaque); + addr_error_int_update_irq(s); +} + +static uint64_t addr_error_int_en_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPRTC *s = XLNX_ZYNQMP_RTC(reg->opaque); + + s->regs[R_ADDR_ERROR_INT_MASK] &= (uint32_t) ~val64; + addr_error_int_update_irq(s); + return 0; +} + +static uint64_t addr_error_int_dis_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPRTC *s = XLNX_ZYNQMP_RTC(reg->opaque); + + s->regs[R_ADDR_ERROR_INT_MASK] |= (uint32_t) val64; + addr_error_int_update_irq(s); + return 0; +} + +static const RegisterAccessInfo rtc_regs_info[] = { + { .name = "SET_TIME_WRITE", .addr = A_SET_TIME_WRITE, + .unimp = MAKE_64BIT_MASK(0, 32), + },{ .name = "SET_TIME_READ", .addr = A_SET_TIME_READ, + .ro = 0xffffffff, + .post_read = current_time_postr, + },{ .name = "CALIB_WRITE", .addr = A_CALIB_WRITE, + .unimp = MAKE_64BIT_MASK(0, 32), + },{ .name = "CALIB_READ", .addr = A_CALIB_READ, + .ro = 0x1fffff, + },{ .name = "CURRENT_TIME", .addr = A_CURRENT_TIME, + .ro = 0xffffffff, + .post_read = current_time_postr, + },{ .name = "CURRENT_TICK", .addr = A_CURRENT_TICK, + .ro = 0xffff, + },{ .name = "ALARM", .addr = A_ALARM, + },{ .name = "RTC_INT_STATUS", .addr = A_RTC_INT_STATUS, + .w1c = 0x3, + .post_write = rtc_int_status_postw, + },{ .name = "RTC_INT_MASK", .addr = A_RTC_INT_MASK, + .reset = 0x3, + .ro = 0x3, + },{ .name = "RTC_INT_EN", .addr = A_RTC_INT_EN, + .pre_write = rtc_int_en_prew, + },{ .name = "RTC_INT_DIS", .addr = A_RTC_INT_DIS, + .pre_write = rtc_int_dis_prew, + },{ .name = "ADDR_ERROR", .addr = A_ADDR_ERROR, + .w1c = 0x1, + .post_write = addr_error_postw, + },{ .name = "ADDR_ERROR_INT_MASK", .addr = A_ADDR_ERROR_INT_MASK, + .reset = 0x1, + .ro = 0x1, + },{ .name = "ADDR_ERROR_INT_EN", .addr = A_ADDR_ERROR_INT_EN, + .pre_write = addr_error_int_en_prew, + },{ .name = "ADDR_ERROR_INT_DIS", .addr = A_ADDR_ERROR_INT_DIS, + .pre_write = addr_error_int_dis_prew, + },{ .name = "CONTROL", .addr = A_CONTROL, + .reset = 0x1000000, + .rsvd = 0x70fffffe, + },{ .name = "SAFETY_CHK", .addr = A_SAFETY_CHK, + } +}; + +static void rtc_reset(DeviceState *dev) +{ + XlnxZynqMPRTC *s = XLNX_ZYNQMP_RTC(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } + + rtc_int_update_irq(s); + addr_error_int_update_irq(s); +} + +static const MemoryRegionOps rtc_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void rtc_init(Object *obj) +{ + XlnxZynqMPRTC *s = XLNX_ZYNQMP_RTC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + struct tm current_tm; + + memory_region_init(&s->iomem, obj, TYPE_XLNX_ZYNQMP_RTC, + XLNX_ZYNQMP_RTC_R_MAX * 4); + reg_array = + register_init_block32(DEVICE(obj), rtc_regs_info, + ARRAY_SIZE(rtc_regs_info), + s->regs_info, s->regs, + &rtc_ops, + XLNX_ZYNQMP_RTC_ERR_DEBUG, + XLNX_ZYNQMP_RTC_R_MAX * 4); + memory_region_add_subregion(&s->iomem, + 0x0, + ®_array->mem); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq_rtc_int); + sysbus_init_irq(sbd, &s->irq_addr_error_int); + + qemu_get_timedate(¤t_tm, 0); + s->tick_offset = mktimegm(¤t_tm) - + qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND; + + trace_xlnx_zynqmp_rtc_gettime(current_tm.tm_year, current_tm.tm_mon, + current_tm.tm_mday, current_tm.tm_hour, + current_tm.tm_min, current_tm.tm_sec); +} + +static int rtc_pre_save(void *opaque) +{ + XlnxZynqMPRTC *s = opaque; + int64_t now = qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND; + + /* Add the time at migration */ + s->tick_offset = s->tick_offset + now; + + return 0; +} + +static int rtc_post_load(void *opaque, int version_id) +{ + XlnxZynqMPRTC *s = opaque; + int64_t now = qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND; + + /* Subtract the time after migration. This combined with the pre_save + * action results in us having subtracted the time that the guest was + * stopped to the offset. + */ + s->tick_offset = s->tick_offset - now; + + return 0; +} + +static const VMStateDescription vmstate_rtc = { + .name = TYPE_XLNX_ZYNQMP_RTC, + .version_id = 1, + .minimum_version_id = 1, + .pre_save = rtc_pre_save, + .post_load = rtc_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPRTC, XLNX_ZYNQMP_RTC_R_MAX), + VMSTATE_UINT32(tick_offset, XlnxZynqMPRTC), + VMSTATE_END_OF_LIST(), + } +}; + +static void rtc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = rtc_reset; + dc->vmsd = &vmstate_rtc; +} + +static const TypeInfo rtc_info = { + .name = TYPE_XLNX_ZYNQMP_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZynqMPRTC), + .class_init = rtc_class_init, + .instance_init = rtc_init, +}; + +static void rtc_register_types(void) +{ + type_register_static(&rtc_info); +} + +type_init(rtc_register_types) diff --git a/hw/rx/Kconfig b/hw/rx/Kconfig new file mode 100644 index 000000000..2b297c5a6 --- /dev/null +++ b/hw/rx/Kconfig @@ -0,0 +1,10 @@ +config RX62N_MCU + bool + select RX_ICU + select RENESAS_TMR + select RENESAS_CMT + select RENESAS_SCI + +config RX_GDBSIM + bool + select RX62N_MCU diff --git a/hw/rx/meson.build b/hw/rx/meson.build new file mode 100644 index 000000000..d223512a7 --- /dev/null +++ b/hw/rx/meson.build @@ -0,0 +1,5 @@ +rx_ss = ss.source_set() +rx_ss.add(when: 'CONFIG_RX_GDBSIM', if_true: files('rx-gdbsim.c')) +rx_ss.add(when: 'CONFIG_RX62N_MCU', if_true: files('rx62n.c')) + +hw_arch += {'rx': rx_ss} diff --git a/hw/rx/rx-gdbsim.c b/hw/rx/rx-gdbsim.c new file mode 100644 index 000000000..75d1fec6c --- /dev/null +++ b/hw/rx/rx-gdbsim.c @@ -0,0 +1,203 @@ +/* + * RX QEMU GDB simulator + * + * Copyright (c) 2019 Yoshinori Sato + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "hw/loader.h" +#include "hw/rx/rx62n.h" +#include "sysemu/qtest.h" +#include "sysemu/device_tree.h" +#include "hw/boards.h" +#include "qom/object.h" + +/* Same address of GDB integrated simulator */ +#define SDRAM_BASE EXT_CS_BASE + +struct RxGdbSimMachineClass { + /*< private >*/ + MachineClass parent_class; + /*< public >*/ + const char *mcu_name; + uint32_t xtal_freq_hz; +}; +typedef struct RxGdbSimMachineClass RxGdbSimMachineClass; + +struct RxGdbSimMachineState { + /*< private >*/ + MachineState parent_obj; + /*< public >*/ + RX62NState mcu; +}; +typedef struct RxGdbSimMachineState RxGdbSimMachineState; + +#define TYPE_RX_GDBSIM_MACHINE MACHINE_TYPE_NAME("rx62n-common") + +DECLARE_OBJ_CHECKERS(RxGdbSimMachineState, RxGdbSimMachineClass, + RX_GDBSIM_MACHINE, TYPE_RX_GDBSIM_MACHINE) + + +static void rx_load_image(RXCPU *cpu, const char *filename, + uint32_t start, uint32_t size) +{ + static uint32_t extable[32]; + long kernel_size; + int i; + + kernel_size = load_image_targphys(filename, start, size); + if (kernel_size < 0) { + fprintf(stderr, "qemu: could not load kernel '%s'\n", filename); + exit(1); + } + cpu->env.pc = start; + + /* setup exception trap trampoline */ + /* linux kernel only works little-endian mode */ + for (i = 0; i < ARRAY_SIZE(extable); i++) { + extable[i] = cpu_to_le32(0x10 + i * 4); + } + rom_add_blob_fixed("extable", extable, sizeof(extable), VECTOR_TABLE_BASE); +} + +static void rx_gdbsim_init(MachineState *machine) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + RxGdbSimMachineState *s = RX_GDBSIM_MACHINE(machine); + RxGdbSimMachineClass *rxc = RX_GDBSIM_MACHINE_GET_CLASS(machine); + MemoryRegion *sysmem = get_system_memory(); + const char *kernel_filename = machine->kernel_filename; + const char *dtb_filename = machine->dtb; + + if (machine->ram_size < mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be more than %s", sz); + g_free(sz); + exit(1); + } + + /* Allocate memory space */ + memory_region_add_subregion(sysmem, SDRAM_BASE, machine->ram); + + /* Initialize MCU */ + object_initialize_child(OBJECT(machine), "mcu", &s->mcu, rxc->mcu_name); + object_property_set_link(OBJECT(&s->mcu), "main-bus", OBJECT(sysmem), + &error_abort); + object_property_set_uint(OBJECT(&s->mcu), "xtal-frequency-hz", + rxc->xtal_freq_hz, &error_abort); + object_property_set_bool(OBJECT(&s->mcu), "load-kernel", + kernel_filename != NULL, &error_abort); + + if (!kernel_filename) { + if (machine->firmware) { + rom_add_file_fixed(machine->firmware, RX62N_CFLASH_BASE, 0); + } else if (!qtest_enabled()) { + error_report("No bios or kernel specified"); + exit(1); + } + } + + qdev_realize(DEVICE(&s->mcu), NULL, &error_abort); + + /* Load kernel and dtb */ + if (kernel_filename) { + ram_addr_t kernel_offset; + + /* + * The kernel image is loaded into + * the latter half of the SDRAM space. + */ + kernel_offset = machine->ram_size / 2; + rx_load_image(RX_CPU(first_cpu), kernel_filename, + SDRAM_BASE + kernel_offset, kernel_offset); + if (dtb_filename) { + ram_addr_t dtb_offset; + int dtb_size; + g_autofree void *dtb = load_device_tree(dtb_filename, &dtb_size); + + if (dtb == NULL) { + error_report("Couldn't open dtb file %s", dtb_filename); + exit(1); + } + if (machine->kernel_cmdline && + qemu_fdt_setprop_string(dtb, "/chosen", "bootargs", + machine->kernel_cmdline) < 0) { + error_report("Couldn't set /chosen/bootargs"); + exit(1); + } + /* DTB is located at the end of SDRAM space. */ + dtb_offset = machine->ram_size - dtb_size; + rom_add_blob_fixed("dtb", dtb, dtb_size, + SDRAM_BASE + dtb_offset); + /* Set dtb address to R1 */ + RX_CPU(first_cpu)->env.regs[1] = SDRAM_BASE + dtb_offset; + } + } +} + +static void rx_gdbsim_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = rx_gdbsim_init; + mc->default_cpu_type = TYPE_RX62N_CPU; + mc->default_ram_size = 16 * MiB; + mc->default_ram_id = "ext-sdram"; +} + +static void rx62n7_class_init(ObjectClass *oc, void *data) +{ + RxGdbSimMachineClass *rxc = RX_GDBSIM_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + rxc->mcu_name = TYPE_R5F562N7_MCU; + rxc->xtal_freq_hz = 12 * 1000 * 1000; + mc->desc = "gdb simulator (R5F562N7 MCU and external RAM)"; +}; + +static void rx62n8_class_init(ObjectClass *oc, void *data) +{ + RxGdbSimMachineClass *rxc = RX_GDBSIM_MACHINE_CLASS(oc); + MachineClass *mc = MACHINE_CLASS(oc); + + rxc->mcu_name = TYPE_R5F562N8_MCU; + rxc->xtal_freq_hz = 12 * 1000 * 1000; + mc->desc = "gdb simulator (R5F562N8 MCU and external RAM)"; +}; + +static const TypeInfo rx_gdbsim_types[] = { + { + .name = MACHINE_TYPE_NAME("gdbsim-r5f562n7"), + .parent = TYPE_RX_GDBSIM_MACHINE, + .class_init = rx62n7_class_init, + }, { + .name = MACHINE_TYPE_NAME("gdbsim-r5f562n8"), + .parent = TYPE_RX_GDBSIM_MACHINE, + .class_init = rx62n8_class_init, + }, { + .name = TYPE_RX_GDBSIM_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(RxGdbSimMachineState), + .class_size = sizeof(RxGdbSimMachineClass), + .class_init = rx_gdbsim_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(rx_gdbsim_types) diff --git a/hw/rx/rx62n.c b/hw/rx/rx62n.c new file mode 100644 index 000000000..fa5add9f9 --- /dev/null +++ b/hw/rx/rx62n.c @@ -0,0 +1,311 @@ +/* + * RX62N Microcontroller + * + * Datasheet: RX62N Group, RX621 Group User's Manual: Hardware + * (Rev.1.40 R01UH0033EJ0140) + * + * Copyright (c) 2019 Yoshinori Sato + * Copyright (c) 2020 Philippe Mathieu-Daudé + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/rx/rx62n.h" +#include "hw/loader.h" +#include "hw/sysbus.h" +#include "hw/qdev-properties.h" +#include "sysemu/sysemu.h" +#include "qom/object.h" + +/* + * RX62N Internal Memory + */ +#define RX62N_IRAM_BASE 0x00000000 +#define RX62N_DFLASH_BASE 0x00100000 +#define RX62N_CFLASH_BASE 0xfff80000 + +/* + * RX62N Peripheral Address + * See users manual section 5 + */ +#define RX62N_ICU_BASE 0x00087000 +#define RX62N_TMR_BASE 0x00088200 +#define RX62N_CMT_BASE 0x00088000 +#define RX62N_SCI_BASE 0x00088240 + +/* + * RX62N Peripheral IRQ + * See users manual section 11 + */ +#define RX62N_TMR_IRQ 174 +#define RX62N_CMT_IRQ 28 +#define RX62N_SCI_IRQ 214 + +#define RX62N_XTAL_MIN_HZ (8 * 1000 * 1000) +#define RX62N_XTAL_MAX_HZ (14 * 1000 * 1000) +#define RX62N_PCLK_MAX_HZ (50 * 1000 * 1000) + +struct RX62NClass { + /*< private >*/ + DeviceClass parent_class; + /*< public >*/ + const char *name; + uint64_t ram_size; + uint64_t rom_flash_size; + uint64_t data_flash_size; +}; +typedef struct RX62NClass RX62NClass; + +DECLARE_CLASS_CHECKERS(RX62NClass, RX62N_MCU, + TYPE_RX62N_MCU) + +/* + * IRQ -> IPR mapping table + * 0x00 - 0x91: IPR no (IPR00 to IPR91) + * 0xff: IPR not assigned + * See "11.3.1 Interrupt Vector Table" in hardware manual. + */ +static const uint8_t ipr_table[NR_IRQS] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 15 */ + 0x00, 0xff, 0xff, 0xff, 0xff, 0x01, 0xff, 0x02, + 0xff, 0xff, 0xff, 0x03, 0x04, 0x05, 0x06, 0x07, /* 31 */ + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x14, 0x14, 0x14, /* 47 */ + 0x15, 0x15, 0x15, 0x15, 0xff, 0xff, 0xff, 0xff, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x1d, 0x1e, 0x1f, /* 63 */ + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 79 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x3a, 0x3b, 0x3c, 0xff, 0xff, 0xff, /* 95 */ + 0x40, 0xff, 0x44, 0x45, 0xff, 0xff, 0x48, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 111 */ + 0xff, 0xff, 0x51, 0x51, 0x51, 0x51, 0x52, 0x52, + 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, /* 127 */ + 0x56, 0x57, 0x57, 0x57, 0x57, 0x58, 0x59, 0x59, + 0x59, 0x59, 0x5a, 0x5b, 0x5b, 0x5b, 0x5c, 0x5c, /* 143 */ + 0x5c, 0x5c, 0x5d, 0x5d, 0x5d, 0x5e, 0x5e, 0x5f, + 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x62, /* 159 */ + 0x62, 0x63, 0x64, 0x64, 0x64, 0x64, 0x65, 0x66, + 0x66, 0x66, 0x67, 0x67, 0x67, 0x67, 0x68, 0x68, /* 175 */ + 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6b, + 0x6b, 0x6b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 191 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x70, 0x71, + 0x72, 0x73, 0x74, 0x75, 0xff, 0xff, 0xff, 0xff, /* 207 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80, 0x80, + 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, /* 223 */ + 0x82, 0x82, 0x83, 0x83, 0x83, 0x83, 0xff, 0xff, + 0xff, 0xff, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, /* 239 */ + 0x86, 0x86, 0xff, 0xff, 0xff, 0xff, 0x88, 0x89, + 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, /* 255 */ +}; + +/* + * Level triggerd IRQ list + * Not listed IRQ is Edge trigger. + * See "11.3.1 Interrupt Vector Table" in hardware manual. + */ +static const uint8_t levelirq[] = { + 16, 21, 32, 44, 47, 48, 51, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 90, 91, 170, 171, 172, 173, 214, + 217, 218, 221, 222, 225, 226, 229, 234, 237, 238, + 241, 246, 249, 250, 253, +}; + +static void register_icu(RX62NState *s) +{ + int i; + SysBusDevice *icu; + + object_initialize_child(OBJECT(s), "icu", &s->icu, TYPE_RX_ICU); + icu = SYS_BUS_DEVICE(&s->icu); + qdev_prop_set_uint32(DEVICE(icu), "len-ipr-map", NR_IRQS); + for (i = 0; i < NR_IRQS; i++) { + char propname[32]; + snprintf(propname, sizeof(propname), "ipr-map[%d]", i); + qdev_prop_set_uint32(DEVICE(icu), propname, ipr_table[i]); + } + qdev_prop_set_uint32(DEVICE(icu), "len-trigger-level", + ARRAY_SIZE(levelirq)); + for (i = 0; i < ARRAY_SIZE(levelirq); i++) { + char propname[32]; + snprintf(propname, sizeof(propname), "trigger-level[%d]", i); + qdev_prop_set_uint32(DEVICE(icu), propname, levelirq[i]); + } + + for (i = 0; i < NR_IRQS; i++) { + s->irq[i] = qdev_get_gpio_in(DEVICE(icu), i); + } + sysbus_realize(icu, &error_abort); + sysbus_connect_irq(icu, 0, qdev_get_gpio_in(DEVICE(&s->cpu), RX_CPU_IRQ)); + sysbus_connect_irq(icu, 1, qdev_get_gpio_in(DEVICE(&s->cpu), RX_CPU_FIR)); + sysbus_connect_irq(icu, 2, s->irq[SWI]); + sysbus_mmio_map(SYS_BUS_DEVICE(icu), 0, RX62N_ICU_BASE); +} + +static void register_tmr(RX62NState *s, int unit) +{ + SysBusDevice *tmr; + int i, irqbase; + + object_initialize_child(OBJECT(s), "tmr[*]", + &s->tmr[unit], TYPE_RENESAS_TMR); + tmr = SYS_BUS_DEVICE(&s->tmr[unit]); + qdev_prop_set_uint64(DEVICE(tmr), "input-freq", s->pclk_freq_hz); + sysbus_realize(tmr, &error_abort); + + irqbase = RX62N_TMR_IRQ + TMR_NR_IRQ * unit; + for (i = 0; i < TMR_NR_IRQ; i++) { + sysbus_connect_irq(tmr, i, s->irq[irqbase + i]); + } + sysbus_mmio_map(tmr, 0, RX62N_TMR_BASE + unit * 0x10); +} + +static void register_cmt(RX62NState *s, int unit) +{ + SysBusDevice *cmt; + int i, irqbase; + + object_initialize_child(OBJECT(s), "cmt[*]", + &s->cmt[unit], TYPE_RENESAS_CMT); + cmt = SYS_BUS_DEVICE(&s->cmt[unit]); + qdev_prop_set_uint64(DEVICE(cmt), "input-freq", s->pclk_freq_hz); + sysbus_realize(cmt, &error_abort); + + irqbase = RX62N_CMT_IRQ + CMT_NR_IRQ * unit; + for (i = 0; i < CMT_NR_IRQ; i++) { + sysbus_connect_irq(cmt, i, s->irq[irqbase + i]); + } + sysbus_mmio_map(cmt, 0, RX62N_CMT_BASE + unit * 0x10); +} + +static void register_sci(RX62NState *s, int unit) +{ + SysBusDevice *sci; + int i, irqbase; + + object_initialize_child(OBJECT(s), "sci[*]", + &s->sci[unit], TYPE_RENESAS_SCI); + sci = SYS_BUS_DEVICE(&s->sci[unit]); + qdev_prop_set_chr(DEVICE(sci), "chardev", serial_hd(unit)); + qdev_prop_set_uint64(DEVICE(sci), "input-freq", s->pclk_freq_hz); + sysbus_realize(sci, &error_abort); + + irqbase = RX62N_SCI_IRQ + SCI_NR_IRQ * unit; + for (i = 0; i < SCI_NR_IRQ; i++) { + sysbus_connect_irq(sci, i, s->irq[irqbase + i]); + } + sysbus_mmio_map(sci, 0, RX62N_SCI_BASE + unit * 0x08); +} + +static void rx62n_realize(DeviceState *dev, Error **errp) +{ + RX62NState *s = RX62N_MCU(dev); + RX62NClass *rxc = RX62N_MCU_GET_CLASS(dev); + + if (s->xtal_freq_hz == 0) { + error_setg(errp, "\"xtal-frequency-hz\" property must be provided."); + return; + } + /* XTAL range: 8-14 MHz */ + if (s->xtal_freq_hz < RX62N_XTAL_MIN_HZ + || s->xtal_freq_hz > RX62N_XTAL_MAX_HZ) { + error_setg(errp, "\"xtal-frequency-hz\" property in incorrect range."); + return; + } + /* Use a 4x fixed multiplier */ + s->pclk_freq_hz = 4 * s->xtal_freq_hz; + /* PCLK range: 8-50 MHz */ + assert(s->pclk_freq_hz <= RX62N_PCLK_MAX_HZ); + + memory_region_init_ram(&s->iram, OBJECT(dev), "iram", + rxc->ram_size, &error_abort); + memory_region_add_subregion(s->sysmem, RX62N_IRAM_BASE, &s->iram); + memory_region_init_rom(&s->d_flash, OBJECT(dev), "flash-data", + rxc->data_flash_size, &error_abort); + memory_region_add_subregion(s->sysmem, RX62N_DFLASH_BASE, &s->d_flash); + memory_region_init_rom(&s->c_flash, OBJECT(dev), "flash-code", + rxc->rom_flash_size, &error_abort); + memory_region_add_subregion(s->sysmem, RX62N_CFLASH_BASE, &s->c_flash); + + /* Initialize CPU */ + object_initialize_child(OBJECT(s), "cpu", &s->cpu, TYPE_RX62N_CPU); + qdev_realize(DEVICE(&s->cpu), NULL, &error_abort); + + register_icu(s); + s->cpu.env.ack = qdev_get_gpio_in_named(DEVICE(&s->icu), "ack", 0); + register_tmr(s, 0); + register_tmr(s, 1); + register_cmt(s, 0); + register_cmt(s, 1); + register_sci(s, 0); +} + +static Property rx62n_properties[] = { + DEFINE_PROP_LINK("main-bus", RX62NState, sysmem, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_BOOL("load-kernel", RX62NState, kernel, false), + DEFINE_PROP_UINT32("xtal-frequency-hz", RX62NState, xtal_freq_hz, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void rx62n_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = rx62n_realize; + device_class_set_props(dc, rx62n_properties); +} + +static void r5f562n7_class_init(ObjectClass *oc, void *data) +{ + RX62NClass *rxc = RX62N_MCU_CLASS(oc); + + rxc->ram_size = 64 * KiB; + rxc->rom_flash_size = 384 * KiB; + rxc->data_flash_size = 32 * KiB; +}; + +static void r5f562n8_class_init(ObjectClass *oc, void *data) +{ + RX62NClass *rxc = RX62N_MCU_CLASS(oc); + + rxc->ram_size = 96 * KiB; + rxc->rom_flash_size = 512 * KiB; + rxc->data_flash_size = 32 * KiB; +}; + +static const TypeInfo rx62n_types[] = { + { + .name = TYPE_R5F562N7_MCU, + .parent = TYPE_RX62N_MCU, + .class_init = r5f562n7_class_init, + }, { + .name = TYPE_R5F562N8_MCU, + .parent = TYPE_RX62N_MCU, + .class_init = r5f562n8_class_init, + }, { + .name = TYPE_RX62N_MCU, + .parent = TYPE_DEVICE, + .instance_size = sizeof(RX62NState), + .class_size = sizeof(RX62NClass), + .class_init = rx62n_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(rx62n_types) diff --git a/hw/s390x/3270-ccw.c b/hw/s390x/3270-ccw.c new file mode 100644 index 000000000..69e6783ad --- /dev/null +++ b/hw/s390x/3270-ccw.c @@ -0,0 +1,181 @@ +/* + * Emulated ccw-attached 3270 implementation + * + * Copyright 2017 IBM Corp. + * Author(s): Yang Chen + * Jing Liu + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/s390x/css.h" +#include "hw/s390x/css-bridge.h" +#include "hw/qdev-properties.h" +#include "hw/s390x/3270-ccw.h" + +/* Handle READ ccw commands from guest */ +static int handle_payload_3270_read(EmulatedCcw3270Device *dev, CCW1 *ccw) +{ + EmulatedCcw3270Class *ck = EMULATED_CCW_3270_GET_CLASS(dev); + CcwDevice *ccw_dev = CCW_DEVICE(dev); + int len; + + if (!ccw->cda) { + return -EFAULT; + } + + len = ck->read_payload_3270(dev); + if (len < 0) { + return len; + } + ccw_dev->sch->curr_status.scsw.count = ccw->count - len; + + return 0; +} + +/* Handle WRITE ccw commands to write data to client */ +static int handle_payload_3270_write(EmulatedCcw3270Device *dev, CCW1 *ccw) +{ + EmulatedCcw3270Class *ck = EMULATED_CCW_3270_GET_CLASS(dev); + CcwDevice *ccw_dev = CCW_DEVICE(dev); + int len; + + if (!ccw->cda) { + return -EFAULT; + } + + len = ck->write_payload_3270(dev, ccw->cmd_code); + + if (len <= 0) { + return len ? len : -EIO; + } + + ccw_dev->sch->curr_status.scsw.count = ccw->count - len; + return 0; +} + +static int emulated_ccw_3270_cb(SubchDev *sch, CCW1 ccw) +{ + int rc = 0; + EmulatedCcw3270Device *dev = sch->driver_data; + + switch (ccw.cmd_code) { + case TC_WRITESF: + case TC_WRITE: + case TC_EWRITE: + case TC_EWRITEA: + rc = handle_payload_3270_write(dev, &ccw); + break; + case TC_RDBUF: + case TC_READMOD: + rc = handle_payload_3270_read(dev, &ccw); + break; + default: + rc = -ENOSYS; + break; + } + + if (rc == -EIO) { + /* I/O error, specific devices generate specific conditions */ + SCHIB *schib = &sch->curr_status; + + sch->curr_status.scsw.dstat = SCSW_DSTAT_UNIT_CHECK; + sch->sense_data[0] = 0x40; /* intervention-req */ + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; + } + + return rc; +} + +static void emulated_ccw_3270_realize(DeviceState *ds, Error **errp) +{ + uint16_t chpid; + EmulatedCcw3270Device *dev = EMULATED_CCW_3270(ds); + EmulatedCcw3270Class *ck = EMULATED_CCW_3270_GET_CLASS(dev); + CcwDevice *cdev = CCW_DEVICE(ds); + CCWDeviceClass *cdk = CCW_DEVICE_GET_CLASS(cdev); + SubchDev *sch; + Error *err = NULL; + + sch = css_create_sch(cdev->devno, errp); + if (!sch) { + return; + } + + if (!ck->init) { + goto out_err; + } + + sch->driver_data = dev; + cdev->sch = sch; + chpid = css_find_free_chpid(sch->cssid); + + if (chpid > MAX_CHPID) { + error_setg(&err, "No available chpid to use."); + goto out_err; + } + + sch->id.reserved = 0xff; + sch->id.cu_type = EMULATED_CCW_3270_CU_TYPE; + css_sch_build_virtual_schib(sch, (uint8_t)chpid, + EMULATED_CCW_3270_CHPID_TYPE); + sch->do_subchannel_work = do_subchannel_work_virtual; + sch->ccw_cb = emulated_ccw_3270_cb; + sch->irb_cb = build_irb_virtual; + + ck->init(dev, &err); + if (err) { + goto out_err; + } + + cdk->realize(cdev, &err); + if (err) { + goto out_err; + } + + return; + +out_err: + error_propagate(errp, err); + css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); + cdev->sch = NULL; + g_free(sch); +} + +static Property emulated_ccw_3270_properties[] = { + DEFINE_PROP_END_OF_LIST(), +}; + +static void emulated_ccw_3270_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, emulated_ccw_3270_properties); + dc->realize = emulated_ccw_3270_realize; + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); +} + +static const TypeInfo emulated_ccw_3270_info = { + .name = TYPE_EMULATED_CCW_3270, + .parent = TYPE_CCW_DEVICE, + .instance_size = sizeof(EmulatedCcw3270Device), + .class_init = emulated_ccw_3270_class_init, + .class_size = sizeof(EmulatedCcw3270Class), + .abstract = true, +}; + +static void emulated_ccw_register(void) +{ + type_register_static(&emulated_ccw_3270_info); +} + +type_init(emulated_ccw_register) diff --git a/hw/s390x/Kconfig b/hw/s390x/Kconfig new file mode 100644 index 000000000..5e7d8a2ba --- /dev/null +++ b/hw/s390x/Kconfig @@ -0,0 +1,12 @@ +config S390_CCW_VIRTIO + bool + imply VIRTIO_PCI + imply TERMINAL3270 + imply VFIO_AP + imply VFIO_CCW + imply WDT_DIAG288 + select PCI + select S390_FLIC + select SCLPCONSOLE + select VIRTIO_CCW + select MSI_NONBROKEN diff --git a/hw/s390x/ap-bridge.c b/hw/s390x/ap-bridge.c new file mode 100644 index 000000000..ef8fa2b15 --- /dev/null +++ b/hw/s390x/ap-bridge.c @@ -0,0 +1,90 @@ +/* + * ap bridge + * + * Copyright 2018 IBM Corp. + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "qemu/bitops.h" +#include "qemu/module.h" +#include "hw/s390x/ap-bridge.h" +#include "cpu.h" + +static char *ap_bus_get_dev_path(DeviceState *dev) +{ + /* at most one */ + return g_strdup_printf("/1"); +} + +static void ap_bus_class_init(ObjectClass *oc, void *data) +{ + BusClass *k = BUS_CLASS(oc); + + k->get_dev_path = ap_bus_get_dev_path; + /* More than one ap device does not make sense */ + k->max_dev = 1; +} + +static const TypeInfo ap_bus_info = { + .name = TYPE_AP_BUS, + .parent = TYPE_BUS, + .instance_size = 0, + .class_init = ap_bus_class_init, +}; + +void s390_init_ap(void) +{ + DeviceState *dev; + BusState *bus; + + /* If no AP instructions then no need for AP bridge */ + if (!s390_has_feat(S390_FEAT_AP)) { + return; + } + + /* Create bridge device */ + dev = qdev_new(TYPE_AP_BRIDGE); + object_property_add_child(qdev_get_machine(), TYPE_AP_BRIDGE, + OBJECT(dev)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* Create bus on bridge device */ + bus = qbus_new(TYPE_AP_BUS, dev, TYPE_AP_BUS); + + /* Enable hotplugging */ + qbus_set_hotplug_handler(bus, OBJECT(dev)); + } + +static void ap_bridge_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + + hc->unplug = qdev_simple_device_unplug_cb; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo ap_bridge_info = { + .name = TYPE_AP_BRIDGE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = 0, + .class_init = ap_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static void ap_register(void) +{ + type_register_static(&ap_bridge_info); + type_register_static(&ap_bus_info); +} + +type_init(ap_register) diff --git a/hw/s390x/ap-device.c b/hw/s390x/ap-device.c new file mode 100644 index 000000000..237d1f19c --- /dev/null +++ b/hw/s390x/ap-device.c @@ -0,0 +1,37 @@ +/* + * Adjunct Processor (AP) matrix device + * + * Copyright 2018 IBM Corp. + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/s390x/ap-device.h" + +static void ap_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "AP device class"; + dc->hotpluggable = false; +} + +static const TypeInfo ap_device_info = { + .name = TYPE_AP_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(APDevice), + .class_size = sizeof(DeviceClass), + .class_init = ap_class_init, + .abstract = true, +}; + +static void ap_device_register(void) +{ + type_register_static(&ap_device_info); +} + +type_init(ap_device_register) diff --git a/hw/s390x/ccw-device.c b/hw/s390x/ccw-device.c new file mode 100644 index 000000000..95f269ab4 --- /dev/null +++ b/hw/s390x/ccw-device.c @@ -0,0 +1,89 @@ +/* + * Common device infrastructure for devices in the virtual css + * + * Copyright 2016 IBM Corp. + * Author(s): Jing Liu + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "ccw-device.h" +#include "hw/qdev-properties.h" +#include "qemu/module.h" + +static void ccw_device_refill_ids(CcwDevice *dev) +{ + SubchDev *sch = dev->sch; + + assert(sch); + + dev->dev_id.cssid = sch->cssid; + dev->dev_id.ssid = sch->ssid; + dev->dev_id.devid = sch->devno; + dev->dev_id.valid = true; + + dev->subch_id.cssid = sch->cssid; + dev->subch_id.ssid = sch->ssid; + dev->subch_id.devid = sch->schid; + dev->subch_id.valid = true; +} + +static void ccw_device_realize(CcwDevice *dev, Error **errp) +{ + ccw_device_refill_ids(dev); +} + +static Property ccw_device_properties[] = { + DEFINE_PROP_CSS_DEV_ID("devno", CcwDevice, devno), + DEFINE_PROP_CSS_DEV_ID_RO("dev_id", CcwDevice, dev_id), + DEFINE_PROP_CSS_DEV_ID_RO("subch_id", CcwDevice, subch_id), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ccw_device_reset(DeviceState *d) +{ + CcwDevice *ccw_dev = CCW_DEVICE(d); + + css_reset_sch(ccw_dev->sch); +} + +static void ccw_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + CCWDeviceClass *k = CCW_DEVICE_CLASS(klass); + + k->realize = ccw_device_realize; + k->refill_ids = ccw_device_refill_ids; + device_class_set_props(dc, ccw_device_properties); + dc->reset = ccw_device_reset; + dc->bus_type = TYPE_VIRTUAL_CSS_BUS; +} + +const VMStateDescription vmstate_ccw_dev = { + .name = "s390_ccw_dev", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_POINTER(sch, CcwDevice, vmstate_subch_dev, SubchDev), + VMSTATE_END_OF_LIST() + } +}; + +static const TypeInfo ccw_device_info = { + .name = TYPE_CCW_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(CcwDevice), + .class_size = sizeof(CCWDeviceClass), + .class_init = ccw_device_class_init, + .abstract = true, +}; + +static void ccw_device_register(void) +{ + type_register_static(&ccw_device_info); +} + +type_init(ccw_device_register) diff --git a/hw/s390x/ccw-device.h b/hw/s390x/ccw-device.h new file mode 100644 index 000000000..6dff95225 --- /dev/null +++ b/hw/s390x/ccw-device.h @@ -0,0 +1,52 @@ +/* + * Common device infrastructure for devices in the virtual css + * + * Copyright 2016 IBM Corp. + * Author(s): Jing Liu + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef HW_S390X_CCW_DEVICE_H +#define HW_S390X_CCW_DEVICE_H +#include "qom/object.h" +#include "hw/qdev-core.h" +#include "hw/s390x/css.h" +#include "hw/s390x/css-bridge.h" + +struct CcwDevice { + DeviceState parent_obj; + SubchDev *sch; + /* .. */ + /* The user-set busid of the virtual ccw device. */ + CssDevId devno; + /* The actual busid of the virtual ccw device. */ + CssDevId dev_id; + /* The actual busid of the virtual subchannel. */ + CssDevId subch_id; +}; +typedef struct CcwDevice CcwDevice; + +extern const VMStateDescription vmstate_ccw_dev; +#define VMSTATE_CCW_DEVICE(_field, _state) \ + VMSTATE_STRUCT(_field, _state, 1, vmstate_ccw_dev, CcwDevice) + +struct CCWDeviceClass { + DeviceClass parent_class; + void (*unplug)(HotplugHandler *, DeviceState *, Error **); + void (*realize)(CcwDevice *, Error **); + void (*refill_ids)(CcwDevice *); +}; + +static inline CcwDevice *to_ccw_dev_fast(DeviceState *d) +{ + return container_of(d, CcwDevice, parent_obj); +} + +#define TYPE_CCW_DEVICE "ccw-device" + +OBJECT_DECLARE_TYPE(CcwDevice, CCWDeviceClass, CCW_DEVICE) + +#endif diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c new file mode 100644 index 000000000..4017081d4 --- /dev/null +++ b/hw/s390x/css-bridge.c @@ -0,0 +1,166 @@ +/* + * css bridge implementation + * + * Copyright 2012,2016 IBM Corp. + * Author(s): Cornelia Huck + * Pierre Morel + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/hotplug.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "qemu/bitops.h" +#include "qemu/module.h" +#include "hw/s390x/css.h" +#include "ccw-device.h" +#include "hw/s390x/css-bridge.h" + +/* + * Invoke device-specific unplug handler, disable the subchannel + * (including sending a channel report to the guest) and remove the + * device from the virtual css bus. + */ +static void ccw_device_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + CcwDevice *ccw_dev = CCW_DEVICE(dev); + CCWDeviceClass *k = CCW_DEVICE_GET_CLASS(ccw_dev); + SubchDev *sch = ccw_dev->sch; + Error *err = NULL; + + if (k->unplug) { + k->unplug(hotplug_dev, dev, &err); + if (err) { + error_propagate(errp, err); + return; + } + } + + /* + * We should arrive here only for device_del, since we don't support + * direct hot(un)plug of channels. + */ + assert(sch != NULL); + /* Subchannel is now disabled and no longer valid. */ + sch->curr_status.pmcw.flags &= ~(PMCW_FLAGS_MASK_ENA | + PMCW_FLAGS_MASK_DNV); + + css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, 1, 0); + + qdev_unrealize(dev); +} + +static void virtual_css_bus_reset(BusState *qbus) +{ + /* This should actually be modelled via the generic css */ + css_reset(); +} + +static char *virtual_css_bus_get_dev_path(DeviceState *dev) +{ + CcwDevice *ccw_dev = CCW_DEVICE(dev); + SubchDev *sch = ccw_dev->sch; + VirtualCssBridge *bridge = + VIRTUAL_CSS_BRIDGE(qdev_get_parent_bus(dev)->parent); + + /* + * We can't provide a dev path for backward compatibility on + * older machines, as it is visible in the migration stream. + */ + return bridge->css_dev_path ? + g_strdup_printf("/%02x.%1x.%04x", sch->cssid, sch->ssid, sch->devno) : + NULL; +} + +static void virtual_css_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->reset = virtual_css_bus_reset; + k->get_dev_path = virtual_css_bus_get_dev_path; +} + +static const TypeInfo virtual_css_bus_info = { + .name = TYPE_VIRTUAL_CSS_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(VirtualCssBus), + .class_init = virtual_css_bus_class_init, +}; + +VirtualCssBus *virtual_css_bus_init(void) +{ + VirtualCssBus *cbus; + BusState *bus; + DeviceState *dev; + + /* Create bridge device */ + dev = qdev_new(TYPE_VIRTUAL_CSS_BRIDGE); + object_property_add_child(qdev_get_machine(), TYPE_VIRTUAL_CSS_BRIDGE, + OBJECT(dev)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* Create bus on bridge device */ + bus = qbus_new(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css"); + cbus = VIRTUAL_CSS_BUS(bus); + + /* Enable hotplugging */ + qbus_set_hotplug_handler(bus, OBJECT(dev)); + + css_register_io_adapters(CSS_IO_ADAPTER_VIRTIO, true, false, + 0, &error_abort); + + return cbus; + } + +/***************** Virtual-css Bus Bridge Device ********************/ + +static Property virtual_css_bridge_properties[] = { + DEFINE_PROP_BOOL("css_dev_path", VirtualCssBridge, css_dev_path, + true), + DEFINE_PROP_END_OF_LIST(), +}; + +static bool prop_get_true(Object *obj, Error **errp) +{ + return true; +} + +static void virtual_css_bridge_class_init(ObjectClass *klass, void *data) +{ + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + hc->unplug = ccw_device_unplug; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + device_class_set_props(dc, virtual_css_bridge_properties); + object_class_property_add_bool(klass, "cssid-unrestricted", + prop_get_true, NULL); + object_class_property_set_description(klass, "cssid-unrestricted", + "A css device can use any cssid, regardless whether virtual" + " or not (read only, always true)"); +} + +static const TypeInfo virtual_css_bridge_info = { + .name = TYPE_VIRTUAL_CSS_BRIDGE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VirtualCssBridge), + .class_init = virtual_css_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static void virtual_css_register(void) +{ + type_register_static(&virtual_css_bridge_info); + type_register_static(&virtual_css_bus_info); +} + +type_init(virtual_css_register) diff --git a/hw/s390x/css.c b/hw/s390x/css.c new file mode 100644 index 000000000..7d9523f81 --- /dev/null +++ b/hw/s390x/css.c @@ -0,0 +1,2673 @@ +/* + * Channel subsystem base support. + * + * Copyright 2012 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "exec/address-spaces.h" +#include "hw/s390x/ioinst.h" +#include "hw/qdev-properties.h" +#include "hw/s390x/css.h" +#include "trace.h" +#include "hw/s390x/s390_flic.h" +#include "hw/s390x/s390-virtio-ccw.h" +#include "hw/s390x/s390-ccw.h" + +typedef struct CrwContainer { + CRW crw; + QTAILQ_ENTRY(CrwContainer) sibling; +} CrwContainer; + +static const VMStateDescription vmstate_crw = { + .name = "s390_crw", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(flags, CRW), + VMSTATE_UINT16(rsid, CRW), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_crw_container = { + .name = "s390_crw_container", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(crw, CrwContainer, 0, vmstate_crw, CRW), + VMSTATE_END_OF_LIST() + }, +}; + +typedef struct ChpInfo { + uint8_t in_use; + uint8_t type; + uint8_t is_virtual; +} ChpInfo; + +static const VMStateDescription vmstate_chp_info = { + .name = "s390_chp_info", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(in_use, ChpInfo), + VMSTATE_UINT8(type, ChpInfo), + VMSTATE_UINT8(is_virtual, ChpInfo), + VMSTATE_END_OF_LIST() + } +}; + +typedef struct SubchSet { + SubchDev *sch[MAX_SCHID + 1]; + unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)]; + unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)]; +} SubchSet; + +static const VMStateDescription vmstate_scsw = { + .name = "s390_scsw", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(flags, SCSW), + VMSTATE_UINT16(ctrl, SCSW), + VMSTATE_UINT32(cpa, SCSW), + VMSTATE_UINT8(dstat, SCSW), + VMSTATE_UINT8(cstat, SCSW), + VMSTATE_UINT16(count, SCSW), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pmcw = { + .name = "s390_pmcw", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(intparm, PMCW), + VMSTATE_UINT16(flags, PMCW), + VMSTATE_UINT16(devno, PMCW), + VMSTATE_UINT8(lpm, PMCW), + VMSTATE_UINT8(pnom, PMCW), + VMSTATE_UINT8(lpum, PMCW), + VMSTATE_UINT8(pim, PMCW), + VMSTATE_UINT16(mbi, PMCW), + VMSTATE_UINT8(pom, PMCW), + VMSTATE_UINT8(pam, PMCW), + VMSTATE_UINT8_ARRAY(chpid, PMCW, 8), + VMSTATE_UINT32(chars, PMCW), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_schib = { + .name = "s390_schib", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(pmcw, SCHIB, 0, vmstate_pmcw, PMCW), + VMSTATE_STRUCT(scsw, SCHIB, 0, vmstate_scsw, SCSW), + VMSTATE_UINT64(mba, SCHIB), + VMSTATE_UINT8_ARRAY(mda, SCHIB, 4), + VMSTATE_END_OF_LIST() + } +}; + + +static const VMStateDescription vmstate_ccw1 = { + .name = "s390_ccw1", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(cmd_code, CCW1), + VMSTATE_UINT8(flags, CCW1), + VMSTATE_UINT16(count, CCW1), + VMSTATE_UINT32(cda, CCW1), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ciw = { + .name = "s390_ciw", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(type, CIW), + VMSTATE_UINT8(command, CIW), + VMSTATE_UINT16(count, CIW), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_sense_id = { + .name = "s390_sense_id", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(reserved, SenseId), + VMSTATE_UINT16(cu_type, SenseId), + VMSTATE_UINT8(cu_model, SenseId), + VMSTATE_UINT16(dev_type, SenseId), + VMSTATE_UINT8(dev_model, SenseId), + VMSTATE_UINT8(unused, SenseId), + VMSTATE_STRUCT_ARRAY(ciw, SenseId, MAX_CIWS, 0, vmstate_ciw, CIW), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_orb = { + .name = "s390_orb", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(intparm, ORB), + VMSTATE_UINT16(ctrl0, ORB), + VMSTATE_UINT8(lpm, ORB), + VMSTATE_UINT8(ctrl1, ORB), + VMSTATE_UINT32(cpa, ORB), + VMSTATE_END_OF_LIST() + } +}; + +static bool vmstate_schdev_orb_needed(void *opaque) +{ + return css_migration_enabled(); +} + +static const VMStateDescription vmstate_schdev_orb = { + .name = "s390_subch_dev/orb", + .version_id = 1, + .minimum_version_id = 1, + .needed = vmstate_schdev_orb_needed, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(orb, SubchDev, 1, vmstate_orb, ORB), + VMSTATE_END_OF_LIST() + } +}; + +static int subch_dev_post_load(void *opaque, int version_id); +static int subch_dev_pre_save(void *opaque); + +const char err_hint_devno[] = "Devno mismatch, tried to load wrong section!" + " Likely reason: some sequences of plug and unplug can break" + " migration for machine versions prior to 2.7 (known design flaw)."; + +const VMStateDescription vmstate_subch_dev = { + .name = "s390_subch_dev", + .version_id = 1, + .minimum_version_id = 1, + .post_load = subch_dev_post_load, + .pre_save = subch_dev_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT8_EQUAL(cssid, SubchDev, "Bug!"), + VMSTATE_UINT8_EQUAL(ssid, SubchDev, "Bug!"), + VMSTATE_UINT16(migrated_schid, SubchDev), + VMSTATE_UINT16_EQUAL(devno, SubchDev, err_hint_devno), + VMSTATE_BOOL(thinint_active, SubchDev), + VMSTATE_STRUCT(curr_status, SubchDev, 0, vmstate_schib, SCHIB), + VMSTATE_UINT8_ARRAY(sense_data, SubchDev, 32), + VMSTATE_UINT64(channel_prog, SubchDev), + VMSTATE_STRUCT(last_cmd, SubchDev, 0, vmstate_ccw1, CCW1), + VMSTATE_BOOL(last_cmd_valid, SubchDev), + VMSTATE_STRUCT(id, SubchDev, 0, vmstate_sense_id, SenseId), + VMSTATE_BOOL(ccw_fmt_1, SubchDev), + VMSTATE_UINT8(ccw_no_data_cnt, SubchDev), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_schdev_orb, + NULL + } +}; + +typedef struct IndAddrPtrTmp { + IndAddr **parent; + uint64_t addr; + int32_t len; +} IndAddrPtrTmp; + +static int post_load_ind_addr(void *opaque, int version_id) +{ + IndAddrPtrTmp *ptmp = opaque; + IndAddr **ind_addr = ptmp->parent; + + if (ptmp->len != 0) { + *ind_addr = get_indicator(ptmp->addr, ptmp->len); + } else { + *ind_addr = NULL; + } + return 0; +} + +static int pre_save_ind_addr(void *opaque) +{ + IndAddrPtrTmp *ptmp = opaque; + IndAddr *ind_addr = *(ptmp->parent); + + if (ind_addr != NULL) { + ptmp->len = ind_addr->len; + ptmp->addr = ind_addr->addr; + } else { + ptmp->len = 0; + ptmp->addr = 0L; + } + + return 0; +} + +const VMStateDescription vmstate_ind_addr_tmp = { + .name = "s390_ind_addr_tmp", + .pre_save = pre_save_ind_addr, + .post_load = post_load_ind_addr, + + .fields = (VMStateField[]) { + VMSTATE_INT32(len, IndAddrPtrTmp), + VMSTATE_UINT64(addr, IndAddrPtrTmp), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_ind_addr = { + .name = "s390_ind_addr_tmp", + .fields = (VMStateField[]) { + VMSTATE_WITH_TMP(IndAddr*, IndAddrPtrTmp, vmstate_ind_addr_tmp), + VMSTATE_END_OF_LIST() + } +}; + +typedef struct CssImage { + SubchSet *sch_set[MAX_SSID + 1]; + ChpInfo chpids[MAX_CHPID + 1]; +} CssImage; + +static const VMStateDescription vmstate_css_img = { + .name = "s390_css_img", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + /* Subchannel sets have no relevant state. */ + VMSTATE_STRUCT_ARRAY(chpids, CssImage, MAX_CHPID + 1, 0, + vmstate_chp_info, ChpInfo), + VMSTATE_END_OF_LIST() + } + +}; + +typedef struct IoAdapter { + uint32_t id; + uint8_t type; + uint8_t isc; + uint8_t flags; +} IoAdapter; + +typedef struct ChannelSubSys { + QTAILQ_HEAD(, CrwContainer) pending_crws; + bool sei_pending; + bool do_crw_mchk; + bool crws_lost; + uint8_t max_cssid; + uint8_t max_ssid; + bool chnmon_active; + uint64_t chnmon_area; + CssImage *css[MAX_CSSID + 1]; + uint8_t default_cssid; + /* don't migrate, see css_register_io_adapters */ + IoAdapter *io_adapters[CSS_IO_ADAPTER_TYPE_NUMS][MAX_ISC + 1]; + /* don't migrate, see get_indicator and IndAddrPtrTmp */ + QTAILQ_HEAD(, IndAddr) indicator_addresses; +} ChannelSubSys; + +static const VMStateDescription vmstate_css = { + .name = "s390_css", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_QTAILQ_V(pending_crws, ChannelSubSys, 1, vmstate_crw_container, + CrwContainer, sibling), + VMSTATE_BOOL(sei_pending, ChannelSubSys), + VMSTATE_BOOL(do_crw_mchk, ChannelSubSys), + VMSTATE_BOOL(crws_lost, ChannelSubSys), + /* These were kind of migrated by virtio */ + VMSTATE_UINT8(max_cssid, ChannelSubSys), + VMSTATE_UINT8(max_ssid, ChannelSubSys), + VMSTATE_BOOL(chnmon_active, ChannelSubSys), + VMSTATE_UINT64(chnmon_area, ChannelSubSys), + VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(css, ChannelSubSys, MAX_CSSID + 1, + 0, vmstate_css_img, CssImage), + VMSTATE_UINT8(default_cssid, ChannelSubSys), + VMSTATE_END_OF_LIST() + } +}; + +static ChannelSubSys channel_subsys = { + .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws), + .do_crw_mchk = true, + .sei_pending = false, + .crws_lost = false, + .chnmon_active = false, + .indicator_addresses = + QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses), +}; + +static int subch_dev_pre_save(void *opaque) +{ + SubchDev *s = opaque; + + /* Prepare remote_schid for save */ + s->migrated_schid = s->schid; + + return 0; +} + +static int subch_dev_post_load(void *opaque, int version_id) +{ + + SubchDev *s = opaque; + + /* Re-assign the subchannel to remote_schid if necessary */ + if (s->migrated_schid != s->schid) { + if (css_find_subch(true, s->cssid, s->ssid, s->schid) == s) { + /* + * Cleanup the slot before moving to s->migrated_schid provided + * it still belongs to us, i.e. it was not changed by previous + * invocation of this function. + */ + css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, NULL); + } + /* It's OK to re-assign without a prior de-assign. */ + s->schid = s->migrated_schid; + css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s); + } + + if (css_migration_enabled()) { + /* No compat voodoo to do ;) */ + return 0; + } + /* + * Hack alert. If we don't migrate the channel subsystem status + * we still need to find out if the guest enabled mss/mcss-e. + * If the subchannel is enabled, it certainly was able to access it, + * so adjust the max_ssid/max_cssid values for relevant ssid/cssid + * values. This is not watertight, but better than nothing. + */ + if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) { + if (s->ssid) { + channel_subsys.max_ssid = MAX_SSID; + } + if (s->cssid != channel_subsys.default_cssid) { + channel_subsys.max_cssid = MAX_CSSID; + } + } + return 0; +} + +void css_register_vmstate(void) +{ + vmstate_register(NULL, 0, &vmstate_css, &channel_subsys); +} + +IndAddr *get_indicator(hwaddr ind_addr, int len) +{ + IndAddr *indicator; + + QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) { + if (indicator->addr == ind_addr) { + indicator->refcnt++; + return indicator; + } + } + indicator = g_new0(IndAddr, 1); + indicator->addr = ind_addr; + indicator->len = len; + indicator->refcnt = 1; + QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses, + indicator, sibling); + return indicator; +} + +static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr, + bool do_map) +{ + S390FLICState *fs = s390_get_flic(); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + + return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map); +} + +void release_indicator(AdapterInfo *adapter, IndAddr *indicator) +{ + assert(indicator->refcnt > 0); + indicator->refcnt--; + if (indicator->refcnt > 0) { + return; + } + QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling); + if (indicator->map) { + s390_io_adapter_map(adapter, indicator->map, false); + } + g_free(indicator); +} + +int map_indicator(AdapterInfo *adapter, IndAddr *indicator) +{ + int ret; + + if (indicator->map) { + return 0; /* already mapped is not an error */ + } + indicator->map = indicator->addr; + ret = s390_io_adapter_map(adapter, indicator->map, true); + if ((ret != 0) && (ret != -ENOSYS)) { + goto out_err; + } + return 0; + +out_err: + indicator->map = 0; + return ret; +} + +int css_create_css_image(uint8_t cssid, bool default_image) +{ + trace_css_new_image(cssid, default_image ? "(default)" : ""); + /* 255 is reserved */ + if (cssid == 255) { + return -EINVAL; + } + if (channel_subsys.css[cssid]) { + return -EBUSY; + } + channel_subsys.css[cssid] = g_new0(CssImage, 1); + if (default_image) { + channel_subsys.default_cssid = cssid; + } + return 0; +} + +uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc) +{ + if (type >= CSS_IO_ADAPTER_TYPE_NUMS || isc > MAX_ISC || + !channel_subsys.io_adapters[type][isc]) { + return -1; + } + + return channel_subsys.io_adapters[type][isc]->id; +} + +/** + * css_register_io_adapters: Register I/O adapters per ISC during init + * + * @swap: an indication if byte swap is needed. + * @maskable: an indication if the adapter is subject to the mask operation. + * @flags: further characteristics of the adapter. + * e.g. suppressible, an indication if the adapter is subject to AIS. + * @errp: location to store error information. + */ +void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable, + uint8_t flags, Error **errp) +{ + uint32_t id; + int ret, isc; + IoAdapter *adapter; + S390FLICState *fs = s390_get_flic(); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + + /* + * Disallow multiple registrations for the same device type. + * Report an error if registering for an already registered type. + */ + if (channel_subsys.io_adapters[type][0]) { + error_setg(errp, "Adapters for type %d already registered", type); + } + + for (isc = 0; isc <= MAX_ISC; isc++) { + id = (type << 3) | isc; + ret = fsc->register_io_adapter(fs, id, isc, swap, maskable, flags); + if (ret == 0) { + adapter = g_new0(IoAdapter, 1); + adapter->id = id; + adapter->isc = isc; + adapter->type = type; + adapter->flags = flags; + channel_subsys.io_adapters[type][isc] = adapter; + } else { + error_setg_errno(errp, -ret, "Unexpected error %d when " + "registering adapter %d", ret, id); + break; + } + } + + /* + * No need to free registered adapters in kvm: kvm will clean up + * when the machine goes away. + */ + if (ret) { + for (isc--; isc >= 0; isc--) { + g_free(channel_subsys.io_adapters[type][isc]); + channel_subsys.io_adapters[type][isc] = NULL; + } + } + +} + +static void css_clear_io_interrupt(uint16_t subchannel_id, + uint16_t subchannel_nr) +{ + Error *err = NULL; + static bool no_clear_irq; + S390FLICState *fs = s390_get_flic(); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + int r; + + if (unlikely(no_clear_irq)) { + return; + } + r = fsc->clear_io_irq(fs, subchannel_id, subchannel_nr); + switch (r) { + case 0: + break; + case -ENOSYS: + no_clear_irq = true; + /* + * Ignore unavailability, as the user can't do anything + * about it anyway. + */ + break; + default: + error_setg_errno(&err, -r, "unexpected error condition"); + error_propagate(&error_abort, err); + } +} + +static inline uint16_t css_do_build_subchannel_id(uint8_t cssid, uint8_t ssid) +{ + if (channel_subsys.max_cssid > 0) { + return (cssid << 8) | (1 << 3) | (ssid << 1) | 1; + } + return (ssid << 1) | 1; +} + +uint16_t css_build_subchannel_id(SubchDev *sch) +{ + return css_do_build_subchannel_id(sch->cssid, sch->ssid); +} + +void css_inject_io_interrupt(SubchDev *sch) +{ + uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11; + + trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid, + sch->curr_status.pmcw.intparm, isc, ""); + s390_io_interrupt(css_build_subchannel_id(sch), + sch->schid, + sch->curr_status.pmcw.intparm, + isc << 27); +} + +void css_conditional_io_interrupt(SubchDev *sch) +{ + /* + * If the subchannel is not enabled, it is not made status pending + * (see PoP p. 16-17, "Status Control"). + */ + if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA)) { + return; + } + + /* + * If the subchannel is not currently status pending, make it pending + * with alert status. + */ + if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) { + uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11; + + trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid, + sch->curr_status.pmcw.intparm, isc, + "(unsolicited)"); + sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + sch->curr_status.scsw.ctrl |= + SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; + /* Inject an I/O interrupt. */ + s390_io_interrupt(css_build_subchannel_id(sch), + sch->schid, + sch->curr_status.pmcw.intparm, + isc << 27); + } +} + +int css_do_sic(CPUS390XState *env, uint8_t isc, uint16_t mode) +{ + S390FLICState *fs = s390_get_flic(); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + int r; + + if (env->psw.mask & PSW_MASK_PSTATE) { + r = -PGM_PRIVILEGED; + goto out; + } + + trace_css_do_sic(mode, isc); + switch (mode) { + case SIC_IRQ_MODE_ALL: + case SIC_IRQ_MODE_SINGLE: + break; + default: + r = -PGM_OPERAND; + goto out; + } + + r = fsc->modify_ais_mode(fs, isc, mode) ? -PGM_OPERATION : 0; +out: + return r; +} + +void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc) +{ + S390FLICState *fs = s390_get_flic(); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI; + IoAdapter *adapter = channel_subsys.io_adapters[type][isc]; + + if (!adapter) { + return; + } + + trace_css_adapter_interrupt(isc); + if (fs->ais_supported) { + if (fsc->inject_airq(fs, type, isc, adapter->flags)) { + error_report("Failed to inject airq with AIS supported"); + exit(1); + } + } else { + s390_io_interrupt(0, 0, 0, io_int_word); + } +} + +static void sch_handle_clear_func(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + int path; + + /* Path management: In our simple css, we always choose the only path. */ + path = 0x80; + + /* Reset values prior to 'issuing the clear signal'. */ + schib->pmcw.lpum = 0; + schib->pmcw.pom = 0xff; + schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; + + /* We always 'attempt to issue the clear signal', and we always succeed. */ + sch->channel_prog = 0x0; + sch->last_cmd_valid = false; + schib->scsw.ctrl &= ~SCSW_ACTL_CLEAR_PEND; + schib->scsw.ctrl |= SCSW_STCTL_STATUS_PEND; + + schib->scsw.dstat = 0; + schib->scsw.cstat = 0; + schib->pmcw.lpum = path; + +} + +static void sch_handle_halt_func(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + hwaddr curr_ccw = sch->channel_prog; + int path; + + /* Path management: In our simple css, we always choose the only path. */ + path = 0x80; + + /* We always 'attempt to issue the halt signal', and we always succeed. */ + sch->channel_prog = 0x0; + sch->last_cmd_valid = false; + schib->scsw.ctrl &= ~SCSW_ACTL_HALT_PEND; + schib->scsw.ctrl |= SCSW_STCTL_STATUS_PEND; + + if ((schib->scsw.ctrl & (SCSW_ACTL_SUBCH_ACTIVE | + SCSW_ACTL_DEVICE_ACTIVE)) || + !((schib->scsw.ctrl & SCSW_ACTL_START_PEND) || + (schib->scsw.ctrl & SCSW_ACTL_SUSP))) { + schib->scsw.dstat = SCSW_DSTAT_DEVICE_END; + } + if ((schib->scsw.ctrl & (SCSW_ACTL_SUBCH_ACTIVE | + SCSW_ACTL_DEVICE_ACTIVE)) || + (schib->scsw.ctrl & SCSW_ACTL_SUSP)) { + schib->scsw.cpa = curr_ccw + 8; + } + schib->scsw.cstat = 0; + schib->pmcw.lpum = path; + +} + +/* + * As the SenseId struct cannot be packed (would cause unaligned accesses), we + * have to copy the individual fields to an unstructured area using the correct + * layout (see SA22-7204-01 "Common I/O-Device Commands"). + */ +static void copy_sense_id_to_guest(uint8_t *dest, SenseId *src) +{ + int i; + + dest[0] = src->reserved; + stw_be_p(dest + 1, src->cu_type); + dest[3] = src->cu_model; + stw_be_p(dest + 4, src->dev_type); + dest[6] = src->dev_model; + dest[7] = src->unused; + for (i = 0; i < ARRAY_SIZE(src->ciw); i++) { + dest[8 + i * 4] = src->ciw[i].type; + dest[9 + i * 4] = src->ciw[i].command; + stw_be_p(dest + 10 + i * 4, src->ciw[i].count); + } +} + +static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1) +{ + CCW0 tmp0; + CCW1 tmp1; + CCW1 ret; + + if (fmt1) { + cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1)); + ret.cmd_code = tmp1.cmd_code; + ret.flags = tmp1.flags; + ret.count = be16_to_cpu(tmp1.count); + ret.cda = be32_to_cpu(tmp1.cda); + } else { + cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0)); + if ((tmp0.cmd_code & 0x0f) == CCW_CMD_TIC) { + ret.cmd_code = CCW_CMD_TIC; + ret.flags = 0; + ret.count = 0; + } else { + ret.cmd_code = tmp0.cmd_code; + ret.flags = tmp0.flags; + ret.count = be16_to_cpu(tmp0.count); + } + ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16); + } + return ret; +} +/** + * If out of bounds marks the stream broken. If broken returns -EINVAL, + * otherwise the requested length (may be zero) + */ +static inline int cds_check_len(CcwDataStream *cds, int len) +{ + if (cds->at_byte + len > cds->count) { + cds->flags |= CDS_F_STREAM_BROKEN; + } + return cds->flags & CDS_F_STREAM_BROKEN ? -EINVAL : len; +} + +static inline bool cds_ccw_addrs_ok(hwaddr addr, int len, bool ccw_fmt1) +{ + return (addr + len) < (ccw_fmt1 ? (1UL << 31) : (1UL << 24)); +} + +static int ccw_dstream_rw_noflags(CcwDataStream *cds, void *buff, int len, + CcwDataStreamOp op) +{ + int ret; + + ret = cds_check_len(cds, len); + if (ret <= 0) { + return ret; + } + if (!cds_ccw_addrs_ok(cds->cda, len, cds->flags & CDS_F_FMT)) { + return -EINVAL; /* channel program check */ + } + if (op == CDS_OP_A) { + goto incr; + } + if (!cds->do_skip) { + ret = address_space_rw(&address_space_memory, cds->cda, + MEMTXATTRS_UNSPECIFIED, buff, len, op); + } else { + ret = MEMTX_OK; + } + if (ret != MEMTX_OK) { + cds->flags |= CDS_F_STREAM_BROKEN; + return -EINVAL; + } +incr: + cds->at_byte += len; + cds->cda += len; + return 0; +} + +/* returns values between 1 and bsz, where bsz is a power of 2 */ +static inline uint16_t ida_continuous_left(hwaddr cda, uint64_t bsz) +{ + return bsz - (cda & (bsz - 1)); +} + +static inline uint64_t ccw_ida_block_size(uint8_t flags) +{ + if ((flags & CDS_F_C64) && !(flags & CDS_F_I2K)) { + return 1ULL << 12; + } + return 1ULL << 11; +} + +static inline int ida_read_next_idaw(CcwDataStream *cds) +{ + union {uint64_t fmt2; uint32_t fmt1; } idaw; + int ret; + hwaddr idaw_addr; + bool idaw_fmt2 = cds->flags & CDS_F_C64; + bool ccw_fmt1 = cds->flags & CDS_F_FMT; + + if (idaw_fmt2) { + idaw_addr = cds->cda_orig + sizeof(idaw.fmt2) * cds->at_idaw; + if (idaw_addr & 0x07 || !cds_ccw_addrs_ok(idaw_addr, 0, ccw_fmt1)) { + return -EINVAL; /* channel program check */ + } + ret = address_space_read(&address_space_memory, idaw_addr, + MEMTXATTRS_UNSPECIFIED, &idaw.fmt2, + sizeof(idaw.fmt2)); + cds->cda = be64_to_cpu(idaw.fmt2); + } else { + idaw_addr = cds->cda_orig + sizeof(idaw.fmt1) * cds->at_idaw; + if (idaw_addr & 0x03 || !cds_ccw_addrs_ok(idaw_addr, 0, ccw_fmt1)) { + return -EINVAL; /* channel program check */ + } + ret = address_space_read(&address_space_memory, idaw_addr, + MEMTXATTRS_UNSPECIFIED, &idaw.fmt1, + sizeof(idaw.fmt1)); + cds->cda = be64_to_cpu(idaw.fmt1); + if (cds->cda & 0x80000000) { + return -EINVAL; /* channel program check */ + } + } + ++(cds->at_idaw); + if (ret != MEMTX_OK) { + /* assume inaccessible address */ + return -EINVAL; /* channel program check */ + } + return 0; +} + +static int ccw_dstream_rw_ida(CcwDataStream *cds, void *buff, int len, + CcwDataStreamOp op) +{ + uint64_t bsz = ccw_ida_block_size(cds->flags); + int ret = 0; + uint16_t cont_left, iter_len; + + ret = cds_check_len(cds, len); + if (ret <= 0) { + return ret; + } + if (!cds->at_idaw) { + /* read first idaw */ + ret = ida_read_next_idaw(cds); + if (ret) { + goto err; + } + cont_left = ida_continuous_left(cds->cda, bsz); + } else { + cont_left = ida_continuous_left(cds->cda, bsz); + if (cont_left == bsz) { + ret = ida_read_next_idaw(cds); + if (ret) { + goto err; + } + if (cds->cda & (bsz - 1)) { + ret = -EINVAL; /* channel program check */ + goto err; + } + } + } + do { + iter_len = MIN(len, cont_left); + if (op != CDS_OP_A) { + if (!cds->do_skip) { + ret = address_space_rw(&address_space_memory, cds->cda, + MEMTXATTRS_UNSPECIFIED, buff, iter_len, + op); + } else { + ret = MEMTX_OK; + } + if (ret != MEMTX_OK) { + /* assume inaccessible address */ + ret = -EINVAL; /* channel program check */ + goto err; + } + } + cds->at_byte += iter_len; + cds->cda += iter_len; + len -= iter_len; + if (!len) { + break; + } + ret = ida_read_next_idaw(cds); + if (ret) { + goto err; + } + cont_left = bsz; + } while (true); + return ret; +err: + cds->flags |= CDS_F_STREAM_BROKEN; + return ret; +} + +void ccw_dstream_init(CcwDataStream *cds, CCW1 const *ccw, ORB const *orb) +{ + /* + * We don't support MIDA (an optional facility) yet and we + * catch this earlier. Just for expressing the precondition. + */ + g_assert(!(orb->ctrl1 & ORB_CTRL1_MASK_MIDAW)); + cds->flags = (orb->ctrl0 & ORB_CTRL0_MASK_I2K ? CDS_F_I2K : 0) | + (orb->ctrl0 & ORB_CTRL0_MASK_C64 ? CDS_F_C64 : 0) | + (orb->ctrl0 & ORB_CTRL0_MASK_FMT ? CDS_F_FMT : 0) | + (ccw->flags & CCW_FLAG_IDA ? CDS_F_IDA : 0); + + cds->count = ccw->count; + cds->cda_orig = ccw->cda; + /* skip is only effective for read, read backwards, or sense commands */ + cds->do_skip = (ccw->flags & CCW_FLAG_SKIP) && + ((ccw->cmd_code & 0x0f) == CCW_CMD_BASIC_SENSE || + (ccw->cmd_code & 0x03) == 0x02 /* read */ || + (ccw->cmd_code & 0x0f) == 0x0c /* read backwards */); + ccw_dstream_rewind(cds); + if (!(cds->flags & CDS_F_IDA)) { + cds->op_handler = ccw_dstream_rw_noflags; + } else { + cds->op_handler = ccw_dstream_rw_ida; + } +} + +static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr, + bool suspend_allowed) +{ + int ret; + bool check_len; + int len; + CCW1 ccw; + + if (!ccw_addr) { + return -EINVAL; /* channel-program check */ + } + /* Check doubleword aligned and 31 or 24 (fmt 0) bit addressable. */ + if (ccw_addr & (sch->ccw_fmt_1 ? 0x80000007 : 0xff000007)) { + return -EINVAL; + } + + /* Translate everything to format-1 ccws - the information is the same. */ + ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1); + + /* Check for invalid command codes. */ + if ((ccw.cmd_code & 0x0f) == 0) { + return -EINVAL; + } + if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) && + ((ccw.cmd_code & 0xf0) != 0)) { + return -EINVAL; + } + if (!sch->ccw_fmt_1 && (ccw.count == 0) && + (ccw.cmd_code != CCW_CMD_TIC)) { + return -EINVAL; + } + + /* We don't support MIDA. */ + if (ccw.flags & CCW_FLAG_MIDA) { + return -EINVAL; + } + + if (ccw.flags & CCW_FLAG_SUSPEND) { + return suspend_allowed ? -EINPROGRESS : -EINVAL; + } + + check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); + + if (!ccw.cda) { + if (sch->ccw_no_data_cnt == 255) { + return -EINVAL; + } + sch->ccw_no_data_cnt++; + } + + /* Look at the command. */ + ccw_dstream_init(&sch->cds, &ccw, &(sch->orb)); + switch (ccw.cmd_code) { + case CCW_CMD_NOOP: + /* Nothing to do. */ + ret = 0; + break; + case CCW_CMD_BASIC_SENSE: + if (check_len) { + if (ccw.count != sizeof(sch->sense_data)) { + ret = -EINVAL; + break; + } + } + len = MIN(ccw.count, sizeof(sch->sense_data)); + ret = ccw_dstream_write_buf(&sch->cds, sch->sense_data, len); + sch->curr_status.scsw.count = ccw_dstream_residual_count(&sch->cds); + if (!ret) { + memset(sch->sense_data, 0, sizeof(sch->sense_data)); + } + break; + case CCW_CMD_SENSE_ID: + { + /* According to SA22-7204-01, Sense-ID can store up to 256 bytes */ + uint8_t sense_id[256]; + + copy_sense_id_to_guest(sense_id, &sch->id); + /* Sense ID information is device specific. */ + if (check_len) { + if (ccw.count != sizeof(sense_id)) { + ret = -EINVAL; + break; + } + } + len = MIN(ccw.count, sizeof(sense_id)); + /* + * Only indicate 0xff in the first sense byte if we actually + * have enough place to store at least bytes 0-3. + */ + if (len >= 4) { + sense_id[0] = 0xff; + } else { + sense_id[0] = 0; + } + ret = ccw_dstream_write_buf(&sch->cds, sense_id, len); + if (!ret) { + sch->curr_status.scsw.count = ccw_dstream_residual_count(&sch->cds); + } + break; + } + case CCW_CMD_TIC: + if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) { + ret = -EINVAL; + break; + } + if (ccw.flags || ccw.count) { + /* We have already sanitized these if converted from fmt 0. */ + ret = -EINVAL; + break; + } + sch->channel_prog = ccw.cda; + ret = -EAGAIN; + break; + default: + if (sch->ccw_cb) { + /* Handle device specific commands. */ + ret = sch->ccw_cb(sch, ccw); + } else { + ret = -ENOSYS; + } + break; + } + sch->last_cmd = ccw; + sch->last_cmd_valid = true; + if (ret == 0) { + if (ccw.flags & CCW_FLAG_CC) { + sch->channel_prog += 8; + ret = -EAGAIN; + } + } + + return ret; +} + +static void sch_handle_start_func_virtual(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + int path; + int ret; + bool suspend_allowed; + + /* Path management: In our simple css, we always choose the only path. */ + path = 0x80; + + if (!(schib->scsw.ctrl & SCSW_ACTL_SUSP)) { + /* Start Function triggered via ssch, i.e. we have an ORB */ + ORB *orb = &sch->orb; + schib->scsw.cstat = 0; + schib->scsw.dstat = 0; + /* Look at the orb and try to execute the channel program. */ + schib->pmcw.intparm = orb->intparm; + if (!(orb->lpm & path)) { + /* Generate a deferred cc 3 condition. */ + schib->scsw.flags |= SCSW_FLAGS_MASK_CC; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND); + return; + } + sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT); + schib->scsw.flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0; + sch->ccw_no_data_cnt = 0; + suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND); + } else { + /* Start Function resumed via rsch */ + schib->scsw.ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND); + /* The channel program had been suspended before. */ + suspend_allowed = true; + } + sch->last_cmd_valid = false; + do { + ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed); + switch (ret) { + case -EAGAIN: + /* ccw chain, continue processing */ + break; + case 0: + /* success */ + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + SCSW_STCTL_STATUS_PEND; + schib->scsw.dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END; + schib->scsw.cpa = sch->channel_prog + 8; + break; + case -EIO: + /* I/O errors, status depends on specific devices */ + break; + case -ENOSYS: + /* unsupported command, generate unit check (command reject) */ + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.dstat = SCSW_DSTAT_UNIT_CHECK; + /* Set sense bit 0 in ecw0. */ + sch->sense_data[0] = 0x80; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; + schib->scsw.cpa = sch->channel_prog + 8; + break; + case -EINPROGRESS: + /* channel program has been suspended */ + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.ctrl |= SCSW_ACTL_SUSP; + break; + default: + /* error, generate channel program check */ + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; + schib->scsw.cpa = sch->channel_prog + 8; + break; + } + } while (ret == -EAGAIN); + +} + +static IOInstEnding sch_handle_halt_func_passthrough(SubchDev *sch) +{ + int ret; + + ret = s390_ccw_halt(sch); + if (ret == -ENOSYS) { + sch_handle_halt_func(sch); + return IOINST_CC_EXPECTED; + } + /* + * Some conditions may have been detected prior to starting the halt + * function; map them to the correct cc. + * Note that we map both -ENODEV and -EACCES to cc 3 (there's not really + * anything else we can do.) + */ + switch (ret) { + case -EBUSY: + return IOINST_CC_BUSY; + case -ENODEV: + case -EACCES: + return IOINST_CC_NOT_OPERATIONAL; + default: + return IOINST_CC_EXPECTED; + } +} + +static IOInstEnding sch_handle_clear_func_passthrough(SubchDev *sch) +{ + int ret; + + ret = s390_ccw_clear(sch); + if (ret == -ENOSYS) { + sch_handle_clear_func(sch); + return IOINST_CC_EXPECTED; + } + /* + * Some conditions may have been detected prior to starting the clear + * function; map them to the correct cc. + * Note that we map both -ENODEV and -EACCES to cc 3 (there's not really + * anything else we can do.) + */ + switch (ret) { + case -ENODEV: + case -EACCES: + return IOINST_CC_NOT_OPERATIONAL; + default: + return IOINST_CC_EXPECTED; + } +} + +static IOInstEnding sch_handle_start_func_passthrough(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + ORB *orb = &sch->orb; + if (!(schib->scsw.ctrl & SCSW_ACTL_SUSP)) { + assert(orb != NULL); + schib->pmcw.intparm = orb->intparm; + } + return s390_ccw_cmd_request(sch); +} + +/* + * On real machines, this would run asynchronously to the main vcpus. + * We might want to make some parts of the ssch handling (interpreting + * read/writes) asynchronous later on if we start supporting more than + * our current very simple devices. + */ +IOInstEnding do_subchannel_work_virtual(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + + if (schib->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) { + sch_handle_clear_func(sch); + } else if (schib->scsw.ctrl & SCSW_FCTL_HALT_FUNC) { + sch_handle_halt_func(sch); + } else if (schib->scsw.ctrl & SCSW_FCTL_START_FUNC) { + /* Triggered by both ssch and rsch. */ + sch_handle_start_func_virtual(sch); + } + css_inject_io_interrupt(sch); + /* inst must succeed if this func is called */ + return IOINST_CC_EXPECTED; +} + +IOInstEnding do_subchannel_work_passthrough(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + + if (schib->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) { + return sch_handle_clear_func_passthrough(sch); + } else if (schib->scsw.ctrl & SCSW_FCTL_HALT_FUNC) { + return sch_handle_halt_func_passthrough(sch); + } else if (schib->scsw.ctrl & SCSW_FCTL_START_FUNC) { + return sch_handle_start_func_passthrough(sch); + } + return IOINST_CC_EXPECTED; +} + +static IOInstEnding do_subchannel_work(SubchDev *sch) +{ + if (!sch->do_subchannel_work) { + return IOINST_CC_STATUS_PRESENT; + } + g_assert(sch->curr_status.scsw.ctrl & SCSW_CTRL_MASK_FCTL); + return sch->do_subchannel_work(sch); +} + +static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src) +{ + int i; + + dest->intparm = cpu_to_be32(src->intparm); + dest->flags = cpu_to_be16(src->flags); + dest->devno = cpu_to_be16(src->devno); + dest->lpm = src->lpm; + dest->pnom = src->pnom; + dest->lpum = src->lpum; + dest->pim = src->pim; + dest->mbi = cpu_to_be16(src->mbi); + dest->pom = src->pom; + dest->pam = src->pam; + for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) { + dest->chpid[i] = src->chpid[i]; + } + dest->chars = cpu_to_be32(src->chars); +} + +void copy_scsw_to_guest(SCSW *dest, const SCSW *src) +{ + dest->flags = cpu_to_be16(src->flags); + dest->ctrl = cpu_to_be16(src->ctrl); + dest->cpa = cpu_to_be32(src->cpa); + dest->dstat = src->dstat; + dest->cstat = src->cstat; + dest->count = cpu_to_be16(src->count); +} + +static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src) +{ + int i; + /* + * We copy the PMCW and SCSW in and out of local variables to + * avoid taking the address of members of a packed struct. + */ + PMCW src_pmcw, dest_pmcw; + SCSW src_scsw, dest_scsw; + + src_pmcw = src->pmcw; + copy_pmcw_to_guest(&dest_pmcw, &src_pmcw); + dest->pmcw = dest_pmcw; + src_scsw = src->scsw; + copy_scsw_to_guest(&dest_scsw, &src_scsw); + dest->scsw = dest_scsw; + dest->mba = cpu_to_be64(src->mba); + for (i = 0; i < ARRAY_SIZE(dest->mda); i++) { + dest->mda[i] = src->mda[i]; + } +} + +void copy_esw_to_guest(ESW *dest, const ESW *src) +{ + dest->word0 = cpu_to_be32(src->word0); + dest->erw = cpu_to_be32(src->erw); + dest->word2 = cpu_to_be64(src->word2); + dest->word4 = cpu_to_be32(src->word4); +} + +IOInstEnding css_do_stsch(SubchDev *sch, SCHIB *schib) +{ + int ret; + + /* + * For some subchannels, we may want to update parts of + * the schib (e.g., update path masks from the host device + * for passthrough subchannels). + */ + ret = s390_ccw_store(sch); + + /* Use current status. */ + copy_schib_to_guest(schib, &sch->curr_status); + return ret; +} + +static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src) +{ + int i; + + dest->intparm = be32_to_cpu(src->intparm); + dest->flags = be16_to_cpu(src->flags); + dest->devno = be16_to_cpu(src->devno); + dest->lpm = src->lpm; + dest->pnom = src->pnom; + dest->lpum = src->lpum; + dest->pim = src->pim; + dest->mbi = be16_to_cpu(src->mbi); + dest->pom = src->pom; + dest->pam = src->pam; + for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) { + dest->chpid[i] = src->chpid[i]; + } + dest->chars = be32_to_cpu(src->chars); +} + +static void copy_scsw_from_guest(SCSW *dest, const SCSW *src) +{ + dest->flags = be16_to_cpu(src->flags); + dest->ctrl = be16_to_cpu(src->ctrl); + dest->cpa = be32_to_cpu(src->cpa); + dest->dstat = src->dstat; + dest->cstat = src->cstat; + dest->count = be16_to_cpu(src->count); +} + +static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src) +{ + int i; + /* + * We copy the PMCW and SCSW in and out of local variables to + * avoid taking the address of members of a packed struct. + */ + PMCW src_pmcw, dest_pmcw; + SCSW src_scsw, dest_scsw; + + src_pmcw = src->pmcw; + copy_pmcw_from_guest(&dest_pmcw, &src_pmcw); + dest->pmcw = dest_pmcw; + src_scsw = src->scsw; + copy_scsw_from_guest(&dest_scsw, &src_scsw); + dest->scsw = dest_scsw; + dest->mba = be64_to_cpu(src->mba); + for (i = 0; i < ARRAY_SIZE(dest->mda); i++) { + dest->mda[i] = src->mda[i]; + } +} + +IOInstEnding css_do_msch(SubchDev *sch, const SCHIB *orig_schib) +{ + SCHIB *schib = &sch->curr_status; + uint16_t oldflags; + SCHIB schib_copy; + + if (!(schib->pmcw.flags & PMCW_FLAGS_MASK_DNV)) { + return IOINST_CC_EXPECTED; + } + + if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) { + return IOINST_CC_STATUS_PRESENT; + } + + if (schib->scsw.ctrl & + (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) { + return IOINST_CC_BUSY; + } + + copy_schib_from_guest(&schib_copy, orig_schib); + /* Only update the program-modifiable fields. */ + schib->pmcw.intparm = schib_copy.pmcw.intparm; + oldflags = schib->pmcw.flags; + schib->pmcw.flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | + PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | + PMCW_FLAGS_MASK_MP); + schib->pmcw.flags |= schib_copy.pmcw.flags & + (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | + PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | + PMCW_FLAGS_MASK_MP); + schib->pmcw.lpm = schib_copy.pmcw.lpm; + schib->pmcw.mbi = schib_copy.pmcw.mbi; + schib->pmcw.pom = schib_copy.pmcw.pom; + schib->pmcw.chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); + schib->pmcw.chars |= schib_copy.pmcw.chars & + (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); + schib->mba = schib_copy.mba; + + /* Has the channel been disabled? */ + if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0 + && (schib->pmcw.flags & PMCW_FLAGS_MASK_ENA) == 0) { + sch->disable_cb(sch); + } + return IOINST_CC_EXPECTED; +} + +IOInstEnding css_do_xsch(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + return IOINST_CC_NOT_OPERATIONAL; + } + + if (schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) { + return IOINST_CC_STATUS_PRESENT; + } + + if (!(schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) || + ((schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || + (!(schib->scsw.ctrl & + (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) || + (schib->scsw.ctrl & SCSW_ACTL_SUBCH_ACTIVE)) { + return IOINST_CC_BUSY; + } + + /* Cancel the current operation. */ + schib->scsw.ctrl &= ~(SCSW_FCTL_START_FUNC | + SCSW_ACTL_RESUME_PEND | + SCSW_ACTL_START_PEND | + SCSW_ACTL_SUSP); + sch->channel_prog = 0x0; + sch->last_cmd_valid = false; + schib->scsw.dstat = 0; + schib->scsw.cstat = 0; + return IOINST_CC_EXPECTED; +} + +IOInstEnding css_do_csch(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + return IOINST_CC_NOT_OPERATIONAL; + } + + /* Trigger the clear function. */ + schib->scsw.ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL); + schib->scsw.ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND; + + return do_subchannel_work(sch); +} + +IOInstEnding css_do_hsch(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + return IOINST_CC_NOT_OPERATIONAL; + } + + if (((schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) || + (schib->scsw.ctrl & (SCSW_STCTL_PRIMARY | + SCSW_STCTL_SECONDARY | + SCSW_STCTL_ALERT))) { + return IOINST_CC_STATUS_PRESENT; + } + + if (schib->scsw.ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { + return IOINST_CC_BUSY; + } + + /* Trigger the halt function. */ + schib->scsw.ctrl |= SCSW_FCTL_HALT_FUNC; + schib->scsw.ctrl &= ~SCSW_FCTL_START_FUNC; + if (((schib->scsw.ctrl & SCSW_CTRL_MASK_ACTL) == + (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) && + ((schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) == + SCSW_STCTL_INTERMEDIATE)) { + schib->scsw.ctrl &= ~SCSW_STCTL_STATUS_PEND; + } + schib->scsw.ctrl |= SCSW_ACTL_HALT_PEND; + + return do_subchannel_work(sch); +} + +static void css_update_chnmon(SubchDev *sch) +{ + if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) { + /* Not active. */ + return; + } + /* The counter is conveniently located at the beginning of the struct. */ + if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) { + /* Format 1, per-subchannel area. */ + uint32_t count; + + count = address_space_ldl(&address_space_memory, + sch->curr_status.mba, + MEMTXATTRS_UNSPECIFIED, + NULL); + count++; + address_space_stl(&address_space_memory, sch->curr_status.mba, count, + MEMTXATTRS_UNSPECIFIED, NULL); + } else { + /* Format 0, global area. */ + uint32_t offset; + uint16_t count; + + offset = sch->curr_status.pmcw.mbi << 5; + count = address_space_lduw(&address_space_memory, + channel_subsys.chnmon_area + offset, + MEMTXATTRS_UNSPECIFIED, + NULL); + count++; + address_space_stw(&address_space_memory, + channel_subsys.chnmon_area + offset, count, + MEMTXATTRS_UNSPECIFIED, NULL); + } +} + +IOInstEnding css_do_ssch(SubchDev *sch, ORB *orb) +{ + SCHIB *schib = &sch->curr_status; + + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + return IOINST_CC_NOT_OPERATIONAL; + } + + if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) { + return IOINST_CC_STATUS_PRESENT; + } + + if (schib->scsw.ctrl & (SCSW_FCTL_START_FUNC | + SCSW_FCTL_HALT_FUNC | + SCSW_FCTL_CLEAR_FUNC)) { + return IOINST_CC_BUSY; + } + + /* If monitoring is active, update counter. */ + if (channel_subsys.chnmon_active) { + css_update_chnmon(sch); + } + sch->orb = *orb; + sch->channel_prog = orb->cpa; + /* Trigger the start function. */ + schib->scsw.ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND); + schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; + + return do_subchannel_work(sch); +} + +static void copy_irb_to_guest(IRB *dest, const IRB *src, const PMCW *pmcw, + int *irb_len) +{ + int i; + uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL; + uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL; + + copy_scsw_to_guest(&dest->scsw, &src->scsw); + + copy_esw_to_guest(&dest->esw, &src->esw); + + for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) { + dest->ecw[i] = cpu_to_be32(src->ecw[i]); + } + *irb_len = sizeof(*dest) - sizeof(dest->emw); + + /* extended measurements enabled? */ + if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) || + !(pmcw->flags & PMCW_FLAGS_MASK_TF) || + !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) { + return; + } + /* extended measurements pending? */ + if (!(stctl & SCSW_STCTL_STATUS_PEND)) { + return; + } + if ((stctl & SCSW_STCTL_PRIMARY) || + (stctl == SCSW_STCTL_SECONDARY) || + ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) { + for (i = 0; i < ARRAY_SIZE(dest->emw); i++) { + dest->emw[i] = cpu_to_be32(src->emw[i]); + } + } + *irb_len = sizeof(*dest); +} + +static void build_irb_sense_data(SubchDev *sch, IRB *irb) +{ + int i; + + /* Attention: sense_data is already BE! */ + memcpy(irb->ecw, sch->sense_data, sizeof(sch->sense_data)); + for (i = 0; i < ARRAY_SIZE(irb->ecw); i++) { + irb->ecw[i] = be32_to_cpu(irb->ecw[i]); + } +} + +void build_irb_passthrough(SubchDev *sch, IRB *irb) +{ + /* Copy ESW from hardware */ + irb->esw = sch->esw; + + /* + * If (irb->esw.erw & ESW_ERW_SENSE) is true, then the contents + * of the ECW is sense data. If false, then it is model-dependent + * information. Either way, copy it into the IRB for the guest to + * read/decide what to do with. + */ + build_irb_sense_data(sch, irb); +} + +void build_irb_virtual(SubchDev *sch, IRB *irb) +{ + SCHIB *schib = &sch->curr_status; + uint16_t stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL; + + if (stctl & SCSW_STCTL_STATUS_PEND) { + if (schib->scsw.cstat & (SCSW_CSTAT_DATA_CHECK | + SCSW_CSTAT_CHN_CTRL_CHK | + SCSW_CSTAT_INTF_CTRL_CHK)) { + irb->scsw.flags |= SCSW_FLAGS_MASK_ESWF; + irb->esw.word0 = 0x04804000; + } else { + irb->esw.word0 = 0x00800000; + } + /* If a unit check is pending, copy sense data. */ + if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) && + (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) { + irb->scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL; + build_irb_sense_data(sch, irb); + irb->esw.erw = ESW_ERW_SENSE | (sizeof(sch->sense_data) << 8); + } + } +} + +int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len) +{ + SCHIB *schib = &sch->curr_status; + PMCW p; + uint16_t stctl; + IRB irb; + + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + return 3; + } + + stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL; + + /* Prepare the irb for the guest. */ + memset(&irb, 0, sizeof(IRB)); + + /* Copy scsw from current status. */ + irb.scsw = schib->scsw; + + /* Build other IRB data, if necessary */ + if (sch->irb_cb) { + sch->irb_cb(sch, &irb); + } + + /* Store the irb to the guest. */ + p = schib->pmcw; + copy_irb_to_guest(target_irb, &irb, &p, irb_len); + + return ((stctl & SCSW_STCTL_STATUS_PEND) == 0); +} + +void css_do_tsch_update_subch(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + uint16_t stctl; + uint16_t fctl; + uint16_t actl; + + stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL; + fctl = schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL; + actl = schib->scsw.ctrl & SCSW_CTRL_MASK_ACTL; + + /* Clear conditions on subchannel, if applicable. */ + if (stctl & SCSW_STCTL_STATUS_PEND) { + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) || + ((fctl & SCSW_FCTL_HALT_FUNC) && + (actl & SCSW_ACTL_SUSP))) { + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_FCTL; + } + if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) { + schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; + schib->scsw.ctrl &= ~(SCSW_ACTL_RESUME_PEND | + SCSW_ACTL_START_PEND | + SCSW_ACTL_HALT_PEND | + SCSW_ACTL_CLEAR_PEND | + SCSW_ACTL_SUSP); + } else { + if ((actl & SCSW_ACTL_SUSP) && + (fctl & SCSW_FCTL_START_FUNC)) { + schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; + if (fctl & SCSW_FCTL_HALT_FUNC) { + schib->scsw.ctrl &= ~(SCSW_ACTL_RESUME_PEND | + SCSW_ACTL_START_PEND | + SCSW_ACTL_HALT_PEND | + SCSW_ACTL_CLEAR_PEND | + SCSW_ACTL_SUSP); + } else { + schib->scsw.ctrl &= ~SCSW_ACTL_RESUME_PEND; + } + } + } + /* Clear pending sense data. */ + if (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE) { + memset(sch->sense_data, 0 , sizeof(sch->sense_data)); + } + } +} + +static void copy_crw_to_guest(CRW *dest, const CRW *src) +{ + dest->flags = cpu_to_be16(src->flags); + dest->rsid = cpu_to_be16(src->rsid); +} + +int css_do_stcrw(CRW *crw) +{ + CrwContainer *crw_cont; + int ret; + + crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws); + if (crw_cont) { + QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling); + copy_crw_to_guest(crw, &crw_cont->crw); + g_free(crw_cont); + ret = 0; + } else { + /* List was empty, turn crw machine checks on again. */ + memset(crw, 0, sizeof(*crw)); + channel_subsys.do_crw_mchk = true; + ret = 1; + } + + return ret; +} + +static void copy_crw_from_guest(CRW *dest, const CRW *src) +{ + dest->flags = be16_to_cpu(src->flags); + dest->rsid = be16_to_cpu(src->rsid); +} + +void css_undo_stcrw(CRW *crw) +{ + CrwContainer *crw_cont; + + crw_cont = g_try_new0(CrwContainer, 1); + if (!crw_cont) { + channel_subsys.crws_lost = true; + return; + } + copy_crw_from_guest(&crw_cont->crw, crw); + + QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling); +} + +int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid, + int rfmt, void *buf) +{ + int i, desc_size; + uint32_t words[8]; + uint32_t chpid_type_word; + CssImage *css; + + if (!m && !cssid) { + css = channel_subsys.css[channel_subsys.default_cssid]; + } else { + css = channel_subsys.css[cssid]; + } + if (!css) { + return 0; + } + desc_size = 0; + for (i = f_chpid; i <= l_chpid; i++) { + if (css->chpids[i].in_use) { + chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i; + if (rfmt == 0) { + words[0] = cpu_to_be32(chpid_type_word); + words[1] = 0; + memcpy(buf + desc_size, words, 8); + desc_size += 8; + } else if (rfmt == 1) { + words[0] = cpu_to_be32(chpid_type_word); + words[1] = 0; + words[2] = 0; + words[3] = 0; + words[4] = 0; + words[5] = 0; + words[6] = 0; + words[7] = 0; + memcpy(buf + desc_size, words, 32); + desc_size += 32; + } + } + } + return desc_size; +} + +void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo) +{ + /* dct is currently ignored (not really meaningful for our devices) */ + /* TODO: Don't ignore mbk. */ + if (update && !channel_subsys.chnmon_active) { + /* Enable measuring. */ + channel_subsys.chnmon_area = mbo; + channel_subsys.chnmon_active = true; + } + if (!update && channel_subsys.chnmon_active) { + /* Disable measuring. */ + channel_subsys.chnmon_area = 0; + channel_subsys.chnmon_active = false; + } +} + +IOInstEnding css_do_rsch(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + + if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { + return IOINST_CC_NOT_OPERATIONAL; + } + + if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) { + return IOINST_CC_STATUS_PRESENT; + } + + if (((schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || + (schib->scsw.ctrl & SCSW_ACTL_RESUME_PEND) || + (!(schib->scsw.ctrl & SCSW_ACTL_SUSP))) { + return IOINST_CC_BUSY; + } + + /* If monitoring is active, update counter. */ + if (channel_subsys.chnmon_active) { + css_update_chnmon(sch); + } + + schib->scsw.ctrl |= SCSW_ACTL_RESUME_PEND; + return do_subchannel_work(sch); +} + +int css_do_rchp(uint8_t cssid, uint8_t chpid) +{ + uint8_t real_cssid; + + if (cssid > channel_subsys.max_cssid) { + return -EINVAL; + } + if (channel_subsys.max_cssid == 0) { + real_cssid = channel_subsys.default_cssid; + } else { + real_cssid = cssid; + } + if (!channel_subsys.css[real_cssid]) { + return -EINVAL; + } + + if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) { + return -ENODEV; + } + + if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) { + fprintf(stderr, + "rchp unsupported for non-virtual chpid %x.%02x!\n", + real_cssid, chpid); + return -ENODEV; + } + + /* We don't really use a channel path, so we're done here. */ + css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 1, + channel_subsys.max_cssid > 0 ? 1 : 0, chpid); + if (channel_subsys.max_cssid > 0) { + css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 1, 0, real_cssid << 8); + } + return 0; +} + +bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid) +{ + SubchSet *set; + uint8_t real_cssid; + + real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid; + if (ssid > MAX_SSID || + !channel_subsys.css[real_cssid] || + !channel_subsys.css[real_cssid]->sch_set[ssid]) { + return true; + } + set = channel_subsys.css[real_cssid]->sch_set[ssid]; + return schid > find_last_bit(set->schids_used, + (MAX_SCHID + 1) / sizeof(unsigned long)); +} + +unsigned int css_find_free_chpid(uint8_t cssid) +{ + CssImage *css = channel_subsys.css[cssid]; + unsigned int chpid; + + if (!css) { + return MAX_CHPID + 1; + } + + for (chpid = 0; chpid <= MAX_CHPID; chpid++) { + /* skip reserved chpid */ + if (chpid == VIRTIO_CCW_CHPID) { + continue; + } + if (!css->chpids[chpid].in_use) { + return chpid; + } + } + return MAX_CHPID + 1; +} + +static int css_add_chpid(uint8_t cssid, uint8_t chpid, uint8_t type, + bool is_virt) +{ + CssImage *css; + + trace_css_chpid_add(cssid, chpid, type); + css = channel_subsys.css[cssid]; + if (!css) { + return -EINVAL; + } + if (css->chpids[chpid].in_use) { + return -EEXIST; + } + css->chpids[chpid].in_use = 1; + css->chpids[chpid].type = type; + css->chpids[chpid].is_virtual = is_virt; + + css_generate_chp_crws(cssid, chpid); + + return 0; +} + +void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type) +{ + SCHIB *schib = &sch->curr_status; + int i; + CssImage *css = channel_subsys.css[sch->cssid]; + + assert(css != NULL); + memset(&schib->pmcw, 0, sizeof(PMCW)); + schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV; + schib->pmcw.devno = sch->devno; + /* single path */ + schib->pmcw.pim = 0x80; + schib->pmcw.pom = 0xff; + schib->pmcw.pam = 0x80; + schib->pmcw.chpid[0] = chpid; + if (!css->chpids[chpid].in_use) { + css_add_chpid(sch->cssid, chpid, type, true); + } + + memset(&schib->scsw, 0, sizeof(SCSW)); + schib->mba = 0; + for (i = 0; i < ARRAY_SIZE(schib->mda); i++) { + schib->mda[i] = 0; + } +} + +SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid) +{ + uint8_t real_cssid; + + real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid; + + if (!channel_subsys.css[real_cssid]) { + return NULL; + } + + if (!channel_subsys.css[real_cssid]->sch_set[ssid]) { + return NULL; + } + + return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid]; +} + +/** + * Return free device number in subchannel set. + * + * Return index of the first free device number in the subchannel set + * identified by @p cssid and @p ssid, beginning the search at @p + * start and wrapping around at MAX_DEVNO. Return a value exceeding + * MAX_SCHID if there are no free device numbers in the subchannel + * set. + */ +static uint32_t css_find_free_devno(uint8_t cssid, uint8_t ssid, + uint16_t start) +{ + uint32_t round; + + for (round = 0; round <= MAX_DEVNO; round++) { + uint16_t devno = (start + round) % MAX_DEVNO; + + if (!css_devno_used(cssid, ssid, devno)) { + return devno; + } + } + return MAX_DEVNO + 1; +} + +/** + * Return first free subchannel (id) in subchannel set. + * + * Return index of the first free subchannel in the subchannel set + * identified by @p cssid and @p ssid, if there is any. Return a value + * exceeding MAX_SCHID if there are no free subchannels in the + * subchannel set. + */ +static uint32_t css_find_free_subch(uint8_t cssid, uint8_t ssid) +{ + uint32_t schid; + + for (schid = 0; schid <= MAX_SCHID; schid++) { + if (!css_find_subch(1, cssid, ssid, schid)) { + return schid; + } + } + return MAX_SCHID + 1; +} + +/** + * Return first free subchannel (id) in subchannel set for a device number + * + * Verify the device number @p devno is not used yet in the subchannel + * set identified by @p cssid and @p ssid. Set @p schid to the index + * of the first free subchannel in the subchannel set, if there is + * any. Return true if everything succeeded and false otherwise. + */ +static bool css_find_free_subch_for_devno(uint8_t cssid, uint8_t ssid, + uint16_t devno, uint16_t *schid, + Error **errp) +{ + uint32_t free_schid; + + assert(schid); + if (css_devno_used(cssid, ssid, devno)) { + error_setg(errp, "Device %x.%x.%04x already exists", + cssid, ssid, devno); + return false; + } + free_schid = css_find_free_subch(cssid, ssid); + if (free_schid > MAX_SCHID) { + error_setg(errp, "No free subchannel found for %x.%x.%04x", + cssid, ssid, devno); + return false; + } + *schid = free_schid; + return true; +} + +/** + * Return first free subchannel (id) and device number + * + * Locate the first free subchannel and first free device number in + * any of the subchannel sets of the channel subsystem identified by + * @p cssid. Return false if no free subchannel / device number could + * be found. Otherwise set @p ssid, @p devno and @p schid to identify + * the available subchannel and device number and return true. + * + * May modify @p ssid, @p devno and / or @p schid even if no free + * subchannel / device number could be found. + */ +static bool css_find_free_subch_and_devno(uint8_t cssid, uint8_t *ssid, + uint16_t *devno, uint16_t *schid, + Error **errp) +{ + uint32_t free_schid, free_devno; + + assert(ssid && devno && schid); + for (*ssid = 0; *ssid <= MAX_SSID; (*ssid)++) { + free_schid = css_find_free_subch(cssid, *ssid); + if (free_schid > MAX_SCHID) { + continue; + } + free_devno = css_find_free_devno(cssid, *ssid, free_schid); + if (free_devno > MAX_DEVNO) { + continue; + } + *schid = free_schid; + *devno = free_devno; + return true; + } + error_setg(errp, "Virtual channel subsystem is full!"); + return false; +} + +bool css_subch_visible(SubchDev *sch) +{ + if (sch->ssid > channel_subsys.max_ssid) { + return false; + } + + if (sch->cssid != channel_subsys.default_cssid) { + return (channel_subsys.max_cssid > 0); + } + + return true; +} + +bool css_present(uint8_t cssid) +{ + return (channel_subsys.css[cssid] != NULL); +} + +bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno) +{ + if (!channel_subsys.css[cssid]) { + return false; + } + if (!channel_subsys.css[cssid]->sch_set[ssid]) { + return false; + } + + return !!test_bit(devno, + channel_subsys.css[cssid]->sch_set[ssid]->devnos_used); +} + +void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid, + uint16_t devno, SubchDev *sch) +{ + CssImage *css; + SubchSet *s_set; + + trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid, + devno); + if (!channel_subsys.css[cssid]) { + fprintf(stderr, + "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n", + __func__, cssid, ssid, schid); + return; + } + css = channel_subsys.css[cssid]; + + if (!css->sch_set[ssid]) { + css->sch_set[ssid] = g_new0(SubchSet, 1); + } + s_set = css->sch_set[ssid]; + + s_set->sch[schid] = sch; + if (sch) { + set_bit(schid, s_set->schids_used); + set_bit(devno, s_set->devnos_used); + } else { + clear_bit(schid, s_set->schids_used); + clear_bit(devno, s_set->devnos_used); + } +} + +void css_crw_add_to_queue(CRW crw) +{ + CrwContainer *crw_cont; + + trace_css_crw((crw.flags & CRW_FLAGS_MASK_RSC) >> 8, + crw.flags & CRW_FLAGS_MASK_ERC, + crw.rsid, + (crw.flags & CRW_FLAGS_MASK_C) ? "(chained)" : ""); + + /* TODO: Maybe use a static crw pool? */ + crw_cont = g_try_new0(CrwContainer, 1); + if (!crw_cont) { + channel_subsys.crws_lost = true; + return; + } + + crw_cont->crw = crw; + + QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling); + + if (channel_subsys.do_crw_mchk) { + channel_subsys.do_crw_mchk = false; + /* Inject crw pending machine check. */ + s390_crw_mchk(); + } +} + +void css_queue_crw(uint8_t rsc, uint8_t erc, int solicited, + int chain, uint16_t rsid) +{ + CRW crw; + + crw.flags = (rsc << 8) | erc; + if (solicited) { + crw.flags |= CRW_FLAGS_MASK_S; + } + if (chain) { + crw.flags |= CRW_FLAGS_MASK_C; + } + crw.rsid = rsid; + if (channel_subsys.crws_lost) { + crw.flags |= CRW_FLAGS_MASK_R; + channel_subsys.crws_lost = false; + } + + css_crw_add_to_queue(crw); +} + +void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid, + int hotplugged, int add) +{ + uint8_t guest_cssid; + bool chain_crw; + + if (add && !hotplugged) { + return; + } + if (channel_subsys.max_cssid == 0) { + /* Default cssid shows up as 0. */ + guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid; + } else { + /* Show real cssid to the guest. */ + guest_cssid = cssid; + } + /* + * Only notify for higher subchannel sets/channel subsystems if the + * guest has enabled it. + */ + if ((ssid > channel_subsys.max_ssid) || + (guest_cssid > channel_subsys.max_cssid) || + ((channel_subsys.max_cssid == 0) && + (cssid != channel_subsys.default_cssid))) { + return; + } + chain_crw = (channel_subsys.max_ssid > 0) || + (channel_subsys.max_cssid > 0); + css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, chain_crw ? 1 : 0, schid); + if (chain_crw) { + css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, 0, + (guest_cssid << 8) | (ssid << 4)); + } + /* RW_ERC_IPI --> clear pending interrupts */ + css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid); +} + +void css_generate_chp_crws(uint8_t cssid, uint8_t chpid) +{ + /* TODO */ +} + +void css_generate_css_crws(uint8_t cssid) +{ + if (!channel_subsys.sei_pending) { + css_queue_crw(CRW_RSC_CSS, CRW_ERC_EVENT, 0, 0, cssid); + } + channel_subsys.sei_pending = true; +} + +void css_clear_sei_pending(void) +{ + channel_subsys.sei_pending = false; +} + +int css_enable_mcsse(void) +{ + trace_css_enable_facility("mcsse"); + channel_subsys.max_cssid = MAX_CSSID; + return 0; +} + +int css_enable_mss(void) +{ + trace_css_enable_facility("mss"); + channel_subsys.max_ssid = MAX_SSID; + return 0; +} + +void css_reset_sch(SubchDev *sch) +{ + SCHIB *schib = &sch->curr_status; + + if ((schib->pmcw.flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) { + sch->disable_cb(sch); + } + + schib->pmcw.intparm = 0; + schib->pmcw.flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | + PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | + PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF); + schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV; + schib->pmcw.devno = sch->devno; + schib->pmcw.pim = 0x80; + schib->pmcw.lpm = schib->pmcw.pim; + schib->pmcw.pnom = 0; + schib->pmcw.lpum = 0; + schib->pmcw.mbi = 0; + schib->pmcw.pom = 0xff; + schib->pmcw.pam = 0x80; + schib->pmcw.chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME | + PMCW_CHARS_MASK_CSENSE); + + memset(&schib->scsw, 0, sizeof(schib->scsw)); + schib->mba = 0; + + sch->channel_prog = 0x0; + sch->last_cmd_valid = false; + sch->thinint_active = false; +} + +void css_reset(void) +{ + CrwContainer *crw_cont; + + /* Clean up monitoring. */ + channel_subsys.chnmon_active = false; + channel_subsys.chnmon_area = 0; + + /* Clear pending CRWs. */ + while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) { + QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling); + g_free(crw_cont); + } + channel_subsys.sei_pending = false; + channel_subsys.do_crw_mchk = true; + channel_subsys.crws_lost = false; + + /* Reset maximum ids. */ + channel_subsys.max_cssid = 0; + channel_subsys.max_ssid = 0; +} + +static void get_css_devid(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + CssDevId *dev_id = object_field_prop_ptr(obj, prop); + char buffer[] = "xx.x.xxxx"; + char *p = buffer; + int r; + + if (dev_id->valid) { + + r = snprintf(buffer, sizeof(buffer), "%02x.%1x.%04x", dev_id->cssid, + dev_id->ssid, dev_id->devid); + assert(r == sizeof(buffer) - 1); + + /* drop leading zero */ + if (dev_id->cssid <= 0xf) { + p++; + } + } else { + snprintf(buffer, sizeof(buffer), ""); + } + + visit_type_str(v, name, &p, errp); +} + +/* + * parse .. and assert valid range for cssid/ssid + */ +static void set_css_devid(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + CssDevId *dev_id = object_field_prop_ptr(obj, prop); + char *str; + int num, n1, n2; + unsigned int cssid, ssid, devid; + + if (!visit_type_str(v, name, &str, errp)) { + return; + } + + num = sscanf(str, "%2x.%1x%n.%4x%n", &cssid, &ssid, &n1, &devid, &n2); + if (num != 3 || (n2 - n1) != 5 || strlen(str) != n2) { + error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str); + goto out; + } + if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) { + error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x", + cssid, ssid); + goto out; + } + + dev_id->cssid = cssid; + dev_id->ssid = ssid; + dev_id->devid = devid; + dev_id->valid = true; + +out: + g_free(str); +} + +const PropertyInfo css_devid_propinfo = { + .name = "str", + .description = "Identifier of an I/O device in the channel " + "subsystem, example: fe.1.23ab", + .get = get_css_devid, + .set = set_css_devid, +}; + +const PropertyInfo css_devid_ro_propinfo = { + .name = "str", + .description = "Read-only identifier of an I/O device in the channel " + "subsystem, example: fe.1.23ab", + .get = get_css_devid, +}; + +SubchDev *css_create_sch(CssDevId bus_id, Error **errp) +{ + uint16_t schid = 0; + SubchDev *sch; + + if (bus_id.valid) { + if (!channel_subsys.css[bus_id.cssid]) { + css_create_css_image(bus_id.cssid, false); + } + + if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid, + bus_id.devid, &schid, errp)) { + return NULL; + } + } else { + for (bus_id.cssid = channel_subsys.default_cssid;;) { + if (!channel_subsys.css[bus_id.cssid]) { + css_create_css_image(bus_id.cssid, false); + } + + if (css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid, + &bus_id.devid, &schid, + NULL)) { + break; + } + bus_id.cssid = (bus_id.cssid + 1) % MAX_CSSID; + if (bus_id.cssid == channel_subsys.default_cssid) { + error_setg(errp, "Virtual channel subsystem is full!"); + return NULL; + } + } + } + + sch = g_new0(SubchDev, 1); + sch->cssid = bus_id.cssid; + sch->ssid = bus_id.ssid; + sch->devno = bus_id.devid; + sch->schid = schid; + css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch); + return sch; +} + +static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id) +{ + char *fid_path; + FILE *fd; + uint32_t chpid[8]; + int i; + SCHIB *schib = &sch->curr_status; + + fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/chpids", + dev_id->cssid, dev_id->ssid, dev_id->devid); + fd = fopen(fid_path, "r"); + if (fd == NULL) { + error_report("%s: open %s failed", __func__, fid_path); + g_free(fid_path); + return -EINVAL; + } + + if (fscanf(fd, "%x %x %x %x %x %x %x %x", + &chpid[0], &chpid[1], &chpid[2], &chpid[3], + &chpid[4], &chpid[5], &chpid[6], &chpid[7]) != 8) { + fclose(fd); + g_free(fid_path); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(schib->pmcw.chpid); i++) { + schib->pmcw.chpid[i] = chpid[i]; + } + + fclose(fd); + g_free(fid_path); + + return 0; +} + +static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id) +{ + char *fid_path; + FILE *fd; + uint32_t pim, pam, pom; + SCHIB *schib = &sch->curr_status; + + fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/pimpampom", + dev_id->cssid, dev_id->ssid, dev_id->devid); + fd = fopen(fid_path, "r"); + if (fd == NULL) { + error_report("%s: open %s failed", __func__, fid_path); + g_free(fid_path); + return -EINVAL; + } + + if (fscanf(fd, "%x %x %x", &pim, &pam, &pom) != 3) { + fclose(fd); + g_free(fid_path); + return -EINVAL; + } + + schib->pmcw.pim = pim; + schib->pmcw.pam = pam; + schib->pmcw.pom = pom; + fclose(fd); + g_free(fid_path); + + return 0; +} + +static int css_sch_get_chpid_type(uint8_t chpid, uint32_t *type, + CssDevId *dev_id) +{ + char *fid_path; + FILE *fd; + + fid_path = g_strdup_printf("/sys/devices/css%x/chp0.%02x/type", + dev_id->cssid, chpid); + fd = fopen(fid_path, "r"); + if (fd == NULL) { + error_report("%s: open %s failed", __func__, fid_path); + g_free(fid_path); + return -EINVAL; + } + + if (fscanf(fd, "%x", type) != 1) { + fclose(fd); + g_free(fid_path); + return -EINVAL; + } + + fclose(fd); + g_free(fid_path); + + return 0; +} + +/* + * We currently retrieve the real device information from sysfs to build the + * guest subchannel information block without considering the migration feature. + * We need to revisit this problem when we want to add migration support. + */ +int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id) +{ + CssImage *css = channel_subsys.css[sch->cssid]; + SCHIB *schib = &sch->curr_status; + uint32_t type; + int i, ret; + + assert(css != NULL); + memset(&schib->pmcw, 0, sizeof(PMCW)); + schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV; + /* We are dealing with I/O subchannels only. */ + schib->pmcw.devno = sch->devno; + + /* Grab path mask from sysfs. */ + ret = css_sch_get_path_masks(sch, dev_id); + if (ret) { + return ret; + } + + /* Grab chpids from sysfs. */ + ret = css_sch_get_chpids(sch, dev_id); + if (ret) { + return ret; + } + + /* Build chpid type. */ + for (i = 0; i < ARRAY_SIZE(schib->pmcw.chpid); i++) { + if (schib->pmcw.chpid[i] && !css->chpids[schib->pmcw.chpid[i]].in_use) { + ret = css_sch_get_chpid_type(schib->pmcw.chpid[i], &type, dev_id); + if (ret) { + return ret; + } + css_add_chpid(sch->cssid, schib->pmcw.chpid[i], type, false); + } + } + + memset(&schib->scsw, 0, sizeof(SCSW)); + schib->mba = 0; + for (i = 0; i < ARRAY_SIZE(schib->mda); i++) { + schib->mda[i] = 0; + } + + return 0; +} diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c new file mode 100644 index 000000000..6fa47b889 --- /dev/null +++ b/hw/s390x/event-facility.c @@ -0,0 +1,539 @@ +/* + * SCLP + * Event Facility + * handles SCLP event types + * - Signal Quiesce - system power down + * - ASCII Console Data - VT220 read and write + * + * Copyright IBM, Corp. 2012 + * + * Authors: + * Heinz Graalfs + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" + +#include "hw/s390x/sclp.h" +#include "migration/vmstate.h" +#include "hw/s390x/event-facility.h" + +typedef struct SCLPEventsBus { + BusState qbus; +} SCLPEventsBus; + +/* we need to save 32 bit chunks for compatibility */ +#ifdef HOST_WORDS_BIGENDIAN +#define RECV_MASK_LOWER 1 +#define RECV_MASK_UPPER 0 +#else /* little endian host */ +#define RECV_MASK_LOWER 0 +#define RECV_MASK_UPPER 1 +#endif + +struct SCLPEventFacility { + SysBusDevice parent_obj; + SCLPEventsBus sbus; + SCLPEvent quiesce, cpu_hotplug; + /* guest's receive mask */ + union { + uint32_t receive_mask_pieces[2]; + sccb_mask_t receive_mask; + }; + /* + * when false, we keep the same broken, backwards compatible behaviour as + * before, allowing only masks of size exactly 4; when true, we implement + * the architecture correctly, allowing all valid mask sizes. Needed for + * migration toward older versions. + */ + bool allow_all_mask_sizes; + /* length of the receive mask */ + uint16_t mask_length; +}; + +/* return true if any child has event pending set */ +static bool event_pending(SCLPEventFacility *ef) +{ + BusChild *kid; + SCLPEvent *event; + SCLPEventClass *event_class; + + QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { + DeviceState *qdev = kid->child; + event = DO_UPCAST(SCLPEvent, qdev, qdev); + event_class = SCLP_EVENT_GET_CLASS(event); + if (event->event_pending && + event_class->get_send_mask() & ef->receive_mask) { + return true; + } + } + return false; +} + +static sccb_mask_t get_host_send_mask(SCLPEventFacility *ef) +{ + sccb_mask_t mask; + BusChild *kid; + SCLPEventClass *child; + + mask = 0; + + QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { + DeviceState *qdev = kid->child; + child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev); + mask |= child->get_send_mask(); + } + return mask; +} + +static sccb_mask_t get_host_receive_mask(SCLPEventFacility *ef) +{ + sccb_mask_t mask; + BusChild *kid; + SCLPEventClass *child; + + mask = 0; + + QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { + DeviceState *qdev = kid->child; + child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev); + mask |= child->get_receive_mask(); + } + return mask; +} + +static uint16_t write_event_length_check(SCCB *sccb) +{ + int slen; + unsigned elen = 0; + EventBufferHeader *event; + WriteEventData *wed = (WriteEventData *) sccb; + + event = (EventBufferHeader *) &wed->ebh; + for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) { + elen = be16_to_cpu(event->length); + if (elen < sizeof(*event) || elen > slen) { + return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR; + } + event = (void *) event + elen; + } + if (slen) { + return SCLP_RC_INCONSISTENT_LENGTHS; + } + return SCLP_RC_NORMAL_COMPLETION; +} + +static uint16_t handle_write_event_buf(SCLPEventFacility *ef, + EventBufferHeader *event_buf, SCCB *sccb) +{ + uint16_t rc; + BusChild *kid; + SCLPEvent *event; + SCLPEventClass *ec; + + rc = SCLP_RC_INVALID_FUNCTION; + + QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { + DeviceState *qdev = kid->child; + event = (SCLPEvent *) qdev; + ec = SCLP_EVENT_GET_CLASS(event); + + if (ec->write_event_data && + ec->can_handle_event(event_buf->type)) { + rc = ec->write_event_data(event, event_buf); + break; + } + } + return rc; +} + +static uint16_t handle_sccb_write_events(SCLPEventFacility *ef, SCCB *sccb) +{ + uint16_t rc; + int slen; + unsigned elen = 0; + EventBufferHeader *event_buf; + WriteEventData *wed = (WriteEventData *) sccb; + + event_buf = &wed->ebh; + rc = SCLP_RC_NORMAL_COMPLETION; + + /* loop over all contained event buffers */ + for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) { + elen = be16_to_cpu(event_buf->length); + + /* in case of a previous error mark all trailing buffers + * as not accepted */ + if (rc != SCLP_RC_NORMAL_COMPLETION) { + event_buf->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED); + } else { + rc = handle_write_event_buf(ef, event_buf, sccb); + } + event_buf = (void *) event_buf + elen; + } + return rc; +} + +static void write_event_data(SCLPEventFacility *ef, SCCB *sccb) +{ + if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) { + sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION); + return; + } + if (be16_to_cpu(sccb->h.length) < 8) { + sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); + return; + } + /* first do a sanity check of the write events */ + sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb)); + + /* if no early error, then execute */ + if (sccb->h.response_code == be16_to_cpu(SCLP_RC_NORMAL_COMPLETION)) { + sccb->h.response_code = + cpu_to_be16(handle_sccb_write_events(ef, sccb)); + } +} + +static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb, + sccb_mask_t mask) +{ + uint16_t rc; + int slen; + unsigned elen; + BusChild *kid; + SCLPEvent *event; + SCLPEventClass *ec; + EventBufferHeader *event_buf; + ReadEventData *red = (ReadEventData *) sccb; + + event_buf = &red->ebh; + event_buf->length = 0; + slen = sccb_data_len(sccb); + + rc = SCLP_RC_NO_EVENT_BUFFERS_STORED; + + QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) { + DeviceState *qdev = kid->child; + event = (SCLPEvent *) qdev; + ec = SCLP_EVENT_GET_CLASS(event); + + if (mask & ec->get_send_mask()) { + if (ec->read_event_data(event, event_buf, &slen)) { + elen = be16_to_cpu(event_buf->length); + event_buf = (EventBufferHeader *) ((char *)event_buf + elen); + rc = SCLP_RC_NORMAL_COMPLETION; + } + } + } + + if (sccb->h.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE) { + /* architecture suggests to reset variable-length-response bit */ + sccb->h.control_mask[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE; + /* with a new length value */ + sccb->h.length = cpu_to_be16(SCCB_SIZE - slen); + } + return rc; +} + +/* copy up to src_len bytes and fill the rest of dst with zeroes */ +static void copy_mask(uint8_t *dst, uint8_t *src, uint16_t dst_len, + uint16_t src_len) +{ + int i; + + for (i = 0; i < dst_len; i++) { + dst[i] = i < src_len ? src[i] : 0; + } +} + +static void read_event_data(SCLPEventFacility *ef, SCCB *sccb) +{ + sccb_mask_t sclp_active_selection_mask; + sccb_mask_t sclp_cp_receive_mask; + + ReadEventData *red = (ReadEventData *) sccb; + + if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) { + sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); + return; + } + + switch (sccb->h.function_code) { + case SCLP_UNCONDITIONAL_READ: + sccb->h.response_code = cpu_to_be16( + handle_sccb_read_events(ef, sccb, ef->receive_mask)); + break; + case SCLP_SELECTIVE_READ: + /* get active selection mask */ + sclp_cp_receive_mask = ef->receive_mask; + + copy_mask((uint8_t *)&sclp_active_selection_mask, (uint8_t *)&red->mask, + sizeof(sclp_active_selection_mask), ef->mask_length); + sclp_active_selection_mask = be64_to_cpu(sclp_active_selection_mask); + if (!sclp_cp_receive_mask || + (sclp_active_selection_mask & ~sclp_cp_receive_mask)) { + sccb->h.response_code = + cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK); + } else { + sccb->h.response_code = cpu_to_be16( + handle_sccb_read_events(ef, sccb, sclp_active_selection_mask)); + } + break; + default: + sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION); + } +} + +static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb) +{ + WriteEventMask *we_mask = (WriteEventMask *) sccb; + uint16_t mask_length = be16_to_cpu(we_mask->mask_length); + sccb_mask_t tmp_mask; + + if (!mask_length || (mask_length > SCLP_EVENT_MASK_LEN_MAX) || + ((mask_length != 4) && !ef->allow_all_mask_sizes)) { + sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH); + return; + } + + /* + * Note: We currently only support masks up to 8 byte length; + * the remainder is filled up with zeroes. Older Linux + * kernels use a 4 byte mask length, newer ones can use both + * 8 or 4 depending on what is available on the host. + */ + + /* keep track of the guest's capability masks */ + copy_mask((uint8_t *)&tmp_mask, WEM_CP_RECEIVE_MASK(we_mask, mask_length), + sizeof(tmp_mask), mask_length); + ef->receive_mask = be64_to_cpu(tmp_mask); + + /* return the SCLP's capability masks to the guest */ + tmp_mask = cpu_to_be64(get_host_receive_mask(ef)); + copy_mask(WEM_RECEIVE_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask, + mask_length, sizeof(tmp_mask)); + tmp_mask = cpu_to_be64(get_host_send_mask(ef)); + copy_mask(WEM_SEND_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask, + mask_length, sizeof(tmp_mask)); + + sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); + ef->mask_length = mask_length; +} + +/* qemu object creation and initialization functions */ + +#define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus" + +static const TypeInfo sclp_events_bus_info = { + .name = TYPE_SCLP_EVENTS_BUS, + .parent = TYPE_BUS, +}; + +static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code) +{ + switch (code & SCLP_CMD_CODE_MASK) { + case SCLP_CMD_READ_EVENT_DATA: + read_event_data(ef, sccb); + break; + case SCLP_CMD_WRITE_EVENT_DATA: + write_event_data(ef, sccb); + break; + case SCLP_CMD_WRITE_EVENT_MASK: + write_event_mask(ef, sccb); + break; + } +} + +static bool vmstate_event_facility_mask64_needed(void *opaque) +{ + SCLPEventFacility *ef = opaque; + + return (ef->receive_mask & 0xFFFFFFFF) != 0; +} + +static bool vmstate_event_facility_mask_length_needed(void *opaque) +{ + SCLPEventFacility *ef = opaque; + + return ef->allow_all_mask_sizes; +} + +static const VMStateDescription vmstate_event_facility_mask64 = { + .name = "vmstate-event-facility/mask64", + .version_id = 0, + .minimum_version_id = 0, + .needed = vmstate_event_facility_mask64_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_LOWER], SCLPEventFacility), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_event_facility_mask_length = { + .name = "vmstate-event-facility/mask_length", + .version_id = 0, + .minimum_version_id = 0, + .needed = vmstate_event_facility_mask_length_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT16(mask_length, SCLPEventFacility), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_event_facility = { + .name = "vmstate-event-facility", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_UPPER], SCLPEventFacility), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_event_facility_mask64, + &vmstate_event_facility_mask_length, + NULL + } +}; + +static void sclp_event_set_allow_all_mask_sizes(Object *obj, bool value, + Error **errp) +{ + SCLPEventFacility *ef = (SCLPEventFacility *)obj; + + ef->allow_all_mask_sizes = value; +} + +static bool sclp_event_get_allow_all_mask_sizes(Object *obj, Error **errp) +{ + SCLPEventFacility *ef = (SCLPEventFacility *)obj; + + return ef->allow_all_mask_sizes; +} + +static void init_event_facility(Object *obj) +{ + SCLPEventFacility *event_facility = EVENT_FACILITY(obj); + DeviceState *sdev = DEVICE(obj); + + event_facility->mask_length = 4; + event_facility->allow_all_mask_sizes = true; + object_property_add_bool(obj, "allow_all_mask_sizes", + sclp_event_get_allow_all_mask_sizes, + sclp_event_set_allow_all_mask_sizes); + + /* Spawn a new bus for SCLP events */ + qbus_init(&event_facility->sbus, sizeof(event_facility->sbus), + TYPE_SCLP_EVENTS_BUS, sdev, NULL); + + object_initialize_child(obj, TYPE_SCLP_QUIESCE, + &event_facility->quiesce, + TYPE_SCLP_QUIESCE); + + object_initialize_child(obj, TYPE_SCLP_CPU_HOTPLUG, + &event_facility->cpu_hotplug, + TYPE_SCLP_CPU_HOTPLUG); +} + +static void realize_event_facility(DeviceState *dev, Error **errp) +{ + SCLPEventFacility *event_facility = EVENT_FACILITY(dev); + + if (!qdev_realize(DEVICE(&event_facility->quiesce), + BUS(&event_facility->sbus), errp)) { + return; + } + if (!qdev_realize(DEVICE(&event_facility->cpu_hotplug), + BUS(&event_facility->sbus), errp)) { + qdev_unrealize(DEVICE(&event_facility->quiesce)); + return; + } +} + +static void reset_event_facility(DeviceState *dev) +{ + SCLPEventFacility *sdev = EVENT_FACILITY(dev); + + sdev->receive_mask = 0; +} + +static void init_event_facility_class(ObjectClass *klass, void *data) +{ + SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(sbdc); + SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc); + + dc->realize = realize_event_facility; + dc->reset = reset_event_facility; + dc->vmsd = &vmstate_event_facility; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + k->command_handler = command_handler; + k->event_pending = event_pending; +} + +static const TypeInfo sclp_event_facility_info = { + .name = TYPE_SCLP_EVENT_FACILITY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = init_event_facility, + .instance_size = sizeof(SCLPEventFacility), + .class_init = init_event_facility_class, + .class_size = sizeof(SCLPEventFacilityClass), +}; + +static void event_realize(DeviceState *qdev, Error **errp) +{ + SCLPEvent *event = SCLP_EVENT(qdev); + SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event); + + if (child->init) { + int rc = child->init(event); + if (rc < 0) { + error_setg(errp, "SCLP event initialization failed."); + return; + } + } +} + +static void event_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->bus_type = TYPE_SCLP_EVENTS_BUS; + dc->realize = event_realize; +} + +static const TypeInfo sclp_event_type_info = { + .name = TYPE_SCLP_EVENT, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SCLPEvent), + .class_init = event_class_init, + .class_size = sizeof(SCLPEventClass), + .abstract = true, +}; + +static void register_types(void) +{ + type_register_static(&sclp_events_bus_info); + type_register_static(&sclp_event_facility_info); + type_register_static(&sclp_event_type_info); +} + +type_init(register_types) + +BusState *sclp_get_event_facility_bus(void) +{ + Object *busobj; + SCLPEventsBus *sbus; + + busobj = object_resolve_path_type("", TYPE_SCLP_EVENTS_BUS, NULL); + sbus = OBJECT_CHECK(SCLPEventsBus, busobj, TYPE_SCLP_EVENTS_BUS); + if (!sbus) { + return NULL; + } + + return &sbus->qbus; +} diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c new file mode 100644 index 000000000..7ddca0127 --- /dev/null +++ b/hw/s390x/ipl.c @@ -0,0 +1,777 @@ +/* + * bootloader support + * + * Copyright IBM, Corp. 2012, 2020 + * + * Authors: + * Christian Borntraeger + * Janosch Frank + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "qapi/error.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "sysemu/tcg.h" +#include "elf.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "hw/boards.h" +#include "hw/s390x/virtio-ccw.h" +#include "hw/s390x/vfio-ccw.h" +#include "hw/s390x/css.h" +#include "hw/s390x/ebcdic.h" +#include "hw/s390x/pv.h" +#include "ipl.h" +#include "qemu/error-report.h" +#include "qemu/config-file.h" +#include "qemu/cutils.h" +#include "qemu/option.h" +#include "exec/exec-all.h" + +#define KERN_IMAGE_START 0x010000UL +#define LINUX_MAGIC_ADDR 0x010008UL +#define KERN_PARM_AREA 0x010480UL +#define KERN_PARM_AREA_SIZE 0x000380UL +#define INITRD_START 0x800000UL +#define INITRD_PARM_START 0x010408UL +#define PARMFILE_START 0x001000UL +#define ZIPL_IMAGE_START 0x009000UL +#define IPL_PSW_MASK (PSW_MASK_32 | PSW_MASK_64) + +static bool iplb_extended_needed(void *opaque) +{ + S390IPLState *ipl = S390_IPL(object_resolve_path(TYPE_S390_IPL, NULL)); + + return ipl->iplbext_migration; +} + +static const VMStateDescription vmstate_iplb_extended = { + .name = "ipl/iplb_extended", + .version_id = 0, + .minimum_version_id = 0, + .needed = iplb_extended_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(reserved_ext, IplParameterBlock, 4096 - 200), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_iplb = { + .name = "ipl/iplb", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(reserved1, IplParameterBlock, 110), + VMSTATE_UINT16(devno, IplParameterBlock), + VMSTATE_UINT8_ARRAY(reserved2, IplParameterBlock, 88), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_iplb_extended, + NULL + } +}; + +static const VMStateDescription vmstate_ipl = { + .name = "ipl", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT64(compat_start_addr, S390IPLState), + VMSTATE_UINT64(compat_bios_start_addr, S390IPLState), + VMSTATE_STRUCT(iplb, S390IPLState, 0, vmstate_iplb, IplParameterBlock), + VMSTATE_BOOL(iplb_valid, S390IPLState), + VMSTATE_UINT8(cssid, S390IPLState), + VMSTATE_UINT8(ssid, S390IPLState), + VMSTATE_UINT16(devno, S390IPLState), + VMSTATE_END_OF_LIST() + } +}; + +static S390IPLState *get_ipl_device(void) +{ + return S390_IPL(object_resolve_path_type("", TYPE_S390_IPL, NULL)); +} + +static uint64_t bios_translate_addr(void *opaque, uint64_t srcaddr) +{ + uint64_t dstaddr = *(uint64_t *) opaque; + /* + * Assuming that our s390-ccw.img was linked for starting at address 0, + * we can simply add the destination address for the final location + */ + return srcaddr + dstaddr; +} + +static void s390_ipl_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + S390IPLState *ipl = S390_IPL(dev); + uint32_t *ipl_psw; + uint64_t pentry; + char *magic; + int kernel_size; + + int bios_size; + char *bios_filename; + + /* + * Always load the bios if it was enforced, + * even if an external kernel has been defined. + */ + if (!ipl->kernel || ipl->enforce_bios) { + uint64_t fwbase = (MIN(ms->ram_size, 0x80000000U) - 0x200000) & ~0xffffUL; + + bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, ipl->firmware); + if (bios_filename == NULL) { + error_setg(errp, "could not find stage1 bootloader"); + return; + } + + bios_size = load_elf(bios_filename, NULL, + bios_translate_addr, &fwbase, + &ipl->bios_start_addr, NULL, NULL, NULL, 1, + EM_S390, 0, 0); + if (bios_size > 0) { + /* Adjust ELF start address to final location */ + ipl->bios_start_addr += fwbase; + } else { + /* Try to load non-ELF file */ + bios_size = load_image_targphys(bios_filename, ZIPL_IMAGE_START, + 4096); + ipl->bios_start_addr = ZIPL_IMAGE_START; + } + g_free(bios_filename); + + if (bios_size == -1) { + error_setg(errp, "could not load bootloader '%s'", ipl->firmware); + return; + } + + /* default boot target is the bios */ + ipl->start_addr = ipl->bios_start_addr; + } + + if (ipl->kernel) { + kernel_size = load_elf(ipl->kernel, NULL, NULL, NULL, + &pentry, NULL, + NULL, NULL, 1, EM_S390, 0, 0); + if (kernel_size < 0) { + kernel_size = load_image_targphys(ipl->kernel, 0, ms->ram_size); + if (kernel_size < 0) { + error_setg(errp, "could not load kernel '%s'", ipl->kernel); + return; + } + /* if this is Linux use KERN_IMAGE_START */ + magic = rom_ptr(LINUX_MAGIC_ADDR, 6); + if (magic && !memcmp(magic, "S390EP", 6)) { + pentry = KERN_IMAGE_START; + } else { + /* if not Linux load the address of the (short) IPL PSW */ + ipl_psw = rom_ptr(4, 4); + if (ipl_psw) { + pentry = be32_to_cpu(*ipl_psw) & PSW_MASK_SHORT_ADDR; + } else { + error_setg(errp, "Could not get IPL PSW"); + return; + } + } + } + /* + * Is it a Linux kernel (starting at 0x10000)? If yes, we fill in the + * kernel parameters here as well. Note: For old kernels (up to 3.2) + * we can not rely on the ELF entry point - it was 0x800 (the SALIPL + * loader) and it won't work. For this case we force it to 0x10000, too. + */ + if (pentry == KERN_IMAGE_START || pentry == 0x800) { + size_t cmdline_size = strlen(ipl->cmdline) + 1; + char *parm_area = rom_ptr(KERN_PARM_AREA, cmdline_size); + + ipl->start_addr = KERN_IMAGE_START; + /* Overwrite parameters in the kernel image, which are "rom" */ + if (parm_area) { + if (cmdline_size > KERN_PARM_AREA_SIZE) { + error_setg(errp, + "kernel command line exceeds maximum size: %zu > %lu", + cmdline_size, KERN_PARM_AREA_SIZE); + return; + } + + strcpy(parm_area, ipl->cmdline); + } + } else { + ipl->start_addr = pentry; + } + + if (ipl->initrd) { + ram_addr_t initrd_offset; + int initrd_size; + uint64_t *romptr; + + initrd_offset = INITRD_START; + while (kernel_size + 0x100000 > initrd_offset) { + initrd_offset += 0x100000; + } + initrd_size = load_image_targphys(ipl->initrd, initrd_offset, + ms->ram_size - initrd_offset); + if (initrd_size == -1) { + error_setg(errp, "could not load initrd '%s'", ipl->initrd); + return; + } + + /* + * we have to overwrite values in the kernel image, + * which are "rom" + */ + romptr = rom_ptr(INITRD_PARM_START, 16); + if (romptr) { + stq_p(romptr, initrd_offset); + stq_p(romptr + 1, initrd_size); + } + } + } + /* + * Don't ever use the migrated values, they could come from a different + * BIOS and therefore don't work. But still migrate the values, so + * QEMUs relying on it don't break. + */ + ipl->compat_start_addr = ipl->start_addr; + ipl->compat_bios_start_addr = ipl->bios_start_addr; + /* + * Because this Device is not on any bus in the qbus tree (it is + * not a sysbus device and it's not on some other bus like a PCI + * bus) it will not be automatically reset by the 'reset the + * sysbus' hook registered by vl.c like most devices. So we must + * manually register a reset hook for it. + * TODO: there should be a better way to do this. + */ + qemu_register_reset(resettable_cold_reset_fn, dev); +} + +static Property s390_ipl_properties[] = { + DEFINE_PROP_STRING("kernel", S390IPLState, kernel), + DEFINE_PROP_STRING("initrd", S390IPLState, initrd), + DEFINE_PROP_STRING("cmdline", S390IPLState, cmdline), + DEFINE_PROP_STRING("firmware", S390IPLState, firmware), + DEFINE_PROP_STRING("netboot_fw", S390IPLState, netboot_fw), + DEFINE_PROP_BOOL("enforce_bios", S390IPLState, enforce_bios, false), + DEFINE_PROP_BOOL("iplbext_migration", S390IPLState, iplbext_migration, + true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void s390_ipl_set_boot_menu(S390IPLState *ipl) +{ + QemuOptsList *plist = qemu_find_opts("boot-opts"); + QemuOpts *opts = QTAILQ_FIRST(&plist->head); + const char *tmp; + unsigned long splash_time = 0; + + if (!get_boot_device(0)) { + if (boot_menu) { + error_report("boot menu requires a bootindex to be specified for " + "the IPL device"); + } + return; + } + + switch (ipl->iplb.pbt) { + case S390_IPL_TYPE_CCW: + /* In the absence of -boot menu, use zipl parameters */ + if (!qemu_opt_get(opts, "menu")) { + ipl->qipl.qipl_flags |= QIPL_FLAG_BM_OPTS_ZIPL; + return; + } + break; + case S390_IPL_TYPE_QEMU_SCSI: + break; + default: + if (boot_menu) { + error_report("boot menu is not supported for this device type"); + } + return; + } + + if (!boot_menu) { + return; + } + + ipl->qipl.qipl_flags |= QIPL_FLAG_BM_OPTS_CMD; + + tmp = qemu_opt_get(opts, "splash-time"); + + if (tmp && qemu_strtoul(tmp, NULL, 10, &splash_time)) { + error_report("splash-time is invalid, forcing it to 0"); + ipl->qipl.boot_menu_timeout = 0; + return; + } + + if (splash_time > 0xffffffff) { + error_report("splash-time is too large, forcing it to max value"); + ipl->qipl.boot_menu_timeout = 0xffffffff; + return; + } + + ipl->qipl.boot_menu_timeout = cpu_to_be32(splash_time); +} + +#define CCW_DEVTYPE_NONE 0x00 +#define CCW_DEVTYPE_VIRTIO 0x01 +#define CCW_DEVTYPE_VIRTIO_NET 0x02 +#define CCW_DEVTYPE_SCSI 0x03 +#define CCW_DEVTYPE_VFIO 0x04 + +static CcwDevice *s390_get_ccw_device(DeviceState *dev_st, int *devtype) +{ + CcwDevice *ccw_dev = NULL; + int tmp_dt = CCW_DEVTYPE_NONE; + + if (dev_st) { + VirtIONet *virtio_net_dev = (VirtIONet *) + object_dynamic_cast(OBJECT(dev_st), TYPE_VIRTIO_NET); + VirtioCcwDevice *virtio_ccw_dev = (VirtioCcwDevice *) + object_dynamic_cast(OBJECT(qdev_get_parent_bus(dev_st)->parent), + TYPE_VIRTIO_CCW_DEVICE); + VFIOCCWDevice *vfio_ccw_dev = (VFIOCCWDevice *) + object_dynamic_cast(OBJECT(dev_st), TYPE_VFIO_CCW); + + if (virtio_ccw_dev) { + ccw_dev = CCW_DEVICE(virtio_ccw_dev); + if (virtio_net_dev) { + tmp_dt = CCW_DEVTYPE_VIRTIO_NET; + } else { + tmp_dt = CCW_DEVTYPE_VIRTIO; + } + } else if (vfio_ccw_dev) { + ccw_dev = CCW_DEVICE(vfio_ccw_dev); + tmp_dt = CCW_DEVTYPE_VFIO; + } else { + SCSIDevice *sd = (SCSIDevice *) + object_dynamic_cast(OBJECT(dev_st), + TYPE_SCSI_DEVICE); + if (sd) { + SCSIBus *bus = scsi_bus_from_device(sd); + VirtIOSCSI *vdev = container_of(bus, VirtIOSCSI, bus); + VirtIOSCSICcw *scsi_ccw = container_of(vdev, VirtIOSCSICcw, + vdev); + + ccw_dev = (CcwDevice *)object_dynamic_cast(OBJECT(scsi_ccw), + TYPE_CCW_DEVICE); + tmp_dt = CCW_DEVTYPE_SCSI; + } + } + } + if (devtype) { + *devtype = tmp_dt; + } + return ccw_dev; +} + +static bool s390_gen_initial_iplb(S390IPLState *ipl) +{ + DeviceState *dev_st; + CcwDevice *ccw_dev = NULL; + SCSIDevice *sd; + int devtype; + + dev_st = get_boot_device(0); + if (dev_st) { + ccw_dev = s390_get_ccw_device(dev_st, &devtype); + } + + /* + * Currently allow IPL only from CCW devices. + */ + if (ccw_dev) { + switch (devtype) { + case CCW_DEVTYPE_SCSI: + sd = SCSI_DEVICE(dev_st); + ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN); + ipl->iplb.blk0_len = + cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN - S390_IPLB_HEADER_LEN); + ipl->iplb.pbt = S390_IPL_TYPE_QEMU_SCSI; + ipl->iplb.scsi.lun = cpu_to_be32(sd->lun); + ipl->iplb.scsi.target = cpu_to_be16(sd->id); + ipl->iplb.scsi.channel = cpu_to_be16(sd->channel); + ipl->iplb.scsi.devno = cpu_to_be16(ccw_dev->sch->devno); + ipl->iplb.scsi.ssid = ccw_dev->sch->ssid & 3; + break; + case CCW_DEVTYPE_VFIO: + ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); + ipl->iplb.pbt = S390_IPL_TYPE_CCW; + ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno); + ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3; + break; + case CCW_DEVTYPE_VIRTIO_NET: + ipl->netboot = true; + /* Fall through to CCW_DEVTYPE_VIRTIO case */ + case CCW_DEVTYPE_VIRTIO: + ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); + ipl->iplb.blk0_len = + cpu_to_be32(S390_IPLB_MIN_CCW_LEN - S390_IPLB_HEADER_LEN); + ipl->iplb.pbt = S390_IPL_TYPE_CCW; + ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno); + ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3; + break; + } + + if (!s390_ipl_set_loadparm(ipl->iplb.loadparm)) { + ipl->iplb.flags |= DIAG308_FLAGS_LP_VALID; + } + + return true; + } + + return false; +} + +int s390_ipl_set_loadparm(uint8_t *loadparm) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + char *lp = object_property_get_str(OBJECT(machine), "loadparm", NULL); + + if (lp) { + int i; + + /* lp is an uppercase string without leading/embedded spaces */ + for (i = 0; i < 8 && lp[i]; i++) { + loadparm[i] = ascii2ebcdic[(uint8_t) lp[i]]; + } + + if (i < 8) { + memset(loadparm + i, 0x40, 8 - i); /* fill with EBCDIC spaces */ + } + + g_free(lp); + return 0; + } + + return -1; +} + +static int load_netboot_image(Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + S390IPLState *ipl = get_ipl_device(); + char *netboot_filename; + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *mr = NULL; + void *ram_ptr = NULL; + int img_size = -1; + + mr = memory_region_find(sysmem, 0, 1).mr; + if (!mr) { + error_setg(errp, "Failed to find memory region at address 0"); + return -1; + } + + ram_ptr = memory_region_get_ram_ptr(mr); + if (!ram_ptr) { + error_setg(errp, "No RAM found"); + goto unref_mr; + } + + netboot_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, ipl->netboot_fw); + if (netboot_filename == NULL) { + error_setg(errp, "Could not find network bootloader '%s'", + ipl->netboot_fw); + goto unref_mr; + } + + img_size = load_elf_ram(netboot_filename, NULL, NULL, NULL, + &ipl->start_addr, + NULL, NULL, NULL, 1, EM_S390, 0, 0, NULL, + false); + + if (img_size < 0) { + img_size = load_image_size(netboot_filename, ram_ptr, ms->ram_size); + ipl->start_addr = KERN_IMAGE_START; + } + + if (img_size < 0) { + error_setg(errp, "Failed to load network bootloader"); + } + + g_free(netboot_filename); + +unref_mr: + memory_region_unref(mr); + return img_size; +} + +static bool is_virtio_ccw_device_of_type(IplParameterBlock *iplb, + int virtio_id) +{ + uint8_t cssid; + uint8_t ssid; + uint16_t devno; + uint16_t schid; + SubchDev *sch = NULL; + + if (iplb->pbt != S390_IPL_TYPE_CCW) { + return false; + } + + devno = be16_to_cpu(iplb->ccw.devno); + ssid = iplb->ccw.ssid & 3; + + for (schid = 0; schid < MAX_SCHID; schid++) { + for (cssid = 0; cssid < MAX_CSSID; cssid++) { + sch = css_find_subch(1, cssid, ssid, schid); + + if (sch && sch->devno == devno) { + return sch->id.cu_model == virtio_id; + } + } + } + return false; +} + +static bool is_virtio_net_device(IplParameterBlock *iplb) +{ + return is_virtio_ccw_device_of_type(iplb, VIRTIO_ID_NET); +} + +static bool is_virtio_scsi_device(IplParameterBlock *iplb) +{ + return is_virtio_ccw_device_of_type(iplb, VIRTIO_ID_SCSI); +} + +static void update_machine_ipl_properties(IplParameterBlock *iplb) +{ + Object *machine = qdev_get_machine(); + Error *err = NULL; + + /* Sync loadparm */ + if (iplb->flags & DIAG308_FLAGS_LP_VALID) { + uint8_t *ebcdic_loadparm = iplb->loadparm; + char ascii_loadparm[9]; + int i; + + for (i = 0; i < 8 && ebcdic_loadparm[i]; i++) { + ascii_loadparm[i] = ebcdic2ascii[(uint8_t) ebcdic_loadparm[i]]; + } + ascii_loadparm[i] = 0; + object_property_set_str(machine, "loadparm", ascii_loadparm, &err); + } else { + object_property_set_str(machine, "loadparm", "", &err); + } + if (err) { + warn_report_err(err); + } +} + +void s390_ipl_update_diag308(IplParameterBlock *iplb) +{ + S390IPLState *ipl = get_ipl_device(); + + /* + * The IPLB set and retrieved by subcodes 8/9 is completely + * separate from the one managed via subcodes 5/6. + */ + if (iplb->pbt == S390_IPL_TYPE_PV) { + ipl->iplb_pv = *iplb; + ipl->iplb_valid_pv = true; + } else { + ipl->iplb = *iplb; + ipl->iplb_valid = true; + } + ipl->netboot = is_virtio_net_device(iplb); + update_machine_ipl_properties(iplb); +} + +IplParameterBlock *s390_ipl_get_iplb_pv(void) +{ + S390IPLState *ipl = get_ipl_device(); + + if (!ipl->iplb_valid_pv) { + return NULL; + } + return &ipl->iplb_pv; +} + +IplParameterBlock *s390_ipl_get_iplb(void) +{ + S390IPLState *ipl = get_ipl_device(); + + if (!ipl->iplb_valid) { + return NULL; + } + return &ipl->iplb; +} + +void s390_ipl_reset_request(CPUState *cs, enum s390_reset reset_type) +{ + S390IPLState *ipl = get_ipl_device(); + + if (reset_type == S390_RESET_EXTERNAL || reset_type == S390_RESET_REIPL) { + /* use CPU 0 for full resets */ + ipl->reset_cpu_index = 0; + } else { + ipl->reset_cpu_index = cs->cpu_index; + } + ipl->reset_type = reset_type; + + if (reset_type == S390_RESET_REIPL && + ipl->iplb_valid && + !ipl->netboot && + ipl->iplb.pbt == S390_IPL_TYPE_CCW && + is_virtio_scsi_device(&ipl->iplb)) { + CcwDevice *ccw_dev = s390_get_ccw_device(get_boot_device(0), NULL); + + if (ccw_dev && + cpu_to_be16(ccw_dev->sch->devno) == ipl->iplb.ccw.devno && + (ccw_dev->sch->ssid & 3) == ipl->iplb.ccw.ssid) { + /* + * this is the original boot device's SCSI + * so restore IPL parameter info from it + */ + ipl->iplb_valid = s390_gen_initial_iplb(ipl); + } + } + if (reset_type == S390_RESET_MODIFIED_CLEAR || + reset_type == S390_RESET_LOAD_NORMAL || + reset_type == S390_RESET_PV) { + /* ignore -no-reboot, send no event */ + qemu_system_reset_request(SHUTDOWN_CAUSE_SUBSYSTEM_RESET); + } else { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + /* as this is triggered by a CPU, make sure to exit the loop */ + if (tcg_enabled()) { + cpu_loop_exit(cs); + } +} + +void s390_ipl_get_reset_request(CPUState **cs, enum s390_reset *reset_type) +{ + S390IPLState *ipl = get_ipl_device(); + + *cs = qemu_get_cpu(ipl->reset_cpu_index); + if (!*cs) { + /* use any CPU */ + *cs = first_cpu; + } + *reset_type = ipl->reset_type; +} + +void s390_ipl_clear_reset_request(void) +{ + S390IPLState *ipl = get_ipl_device(); + + ipl->reset_type = S390_RESET_EXTERNAL; + /* use CPU 0 for full resets */ + ipl->reset_cpu_index = 0; +} + +static void s390_ipl_prepare_qipl(S390CPU *cpu) +{ + S390IPLState *ipl = get_ipl_device(); + uint8_t *addr; + uint64_t len = 4096; + + addr = cpu_physical_memory_map(cpu->env.psa, &len, true); + if (!addr || len < QIPL_ADDRESS + sizeof(QemuIplParameters)) { + error_report("Cannot set QEMU IPL parameters"); + return; + } + memcpy(addr + QIPL_ADDRESS, &ipl->qipl, sizeof(QemuIplParameters)); + cpu_physical_memory_unmap(addr, len, 1, len); +} + +int s390_ipl_prepare_pv_header(void) +{ + IplParameterBlock *ipib = s390_ipl_get_iplb_pv(); + IPLBlockPV *ipib_pv = &ipib->pv; + void *hdr = g_malloc(ipib_pv->pv_header_len); + int rc; + + cpu_physical_memory_read(ipib_pv->pv_header_addr, hdr, + ipib_pv->pv_header_len); + rc = s390_pv_set_sec_parms((uintptr_t)hdr, + ipib_pv->pv_header_len); + g_free(hdr); + return rc; +} + +int s390_ipl_pv_unpack(void) +{ + IplParameterBlock *ipib = s390_ipl_get_iplb_pv(); + IPLBlockPV *ipib_pv = &ipib->pv; + int i, rc = 0; + + for (i = 0; i < ipib_pv->num_comp; i++) { + rc = s390_pv_unpack(ipib_pv->components[i].addr, + TARGET_PAGE_ALIGN(ipib_pv->components[i].size), + ipib_pv->components[i].tweak_pref); + if (rc) { + break; + } + } + return rc; +} + +void s390_ipl_prepare_cpu(S390CPU *cpu) +{ + S390IPLState *ipl = get_ipl_device(); + + cpu->env.psw.addr = ipl->start_addr; + cpu->env.psw.mask = IPL_PSW_MASK; + + if (!ipl->kernel || ipl->iplb_valid) { + cpu->env.psw.addr = ipl->bios_start_addr; + if (!ipl->iplb_valid) { + ipl->iplb_valid = s390_gen_initial_iplb(ipl); + } + } + if (ipl->netboot) { + load_netboot_image(&error_fatal); + ipl->qipl.netboot_start_addr = cpu_to_be64(ipl->start_addr); + } + s390_ipl_set_boot_menu(ipl); + s390_ipl_prepare_qipl(cpu); +} + +static void s390_ipl_reset(DeviceState *dev) +{ + S390IPLState *ipl = S390_IPL(dev); + + if (ipl->reset_type != S390_RESET_REIPL) { + ipl->iplb_valid = false; + memset(&ipl->iplb, 0, sizeof(IplParameterBlock)); + } +} + +static void s390_ipl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = s390_ipl_realize; + device_class_set_props(dc, s390_ipl_properties); + dc->reset = s390_ipl_reset; + dc->vmsd = &vmstate_ipl; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + /* Reason: Loads the ROMs and thus can only be used one time - internally */ + dc->user_creatable = false; +} + +static const TypeInfo s390_ipl_info = { + .class_init = s390_ipl_class_init, + .parent = TYPE_DEVICE, + .name = TYPE_S390_IPL, + .instance_size = sizeof(S390IPLState), +}; + +static void s390_ipl_register_types(void) +{ + type_register_static(&s390_ipl_info); +} + +type_init(s390_ipl_register_types) diff --git a/hw/s390x/ipl.h b/hw/s390x/ipl.h new file mode 100644 index 000000000..dfc6dfd89 --- /dev/null +++ b/hw/s390x/ipl.h @@ -0,0 +1,289 @@ +/* + * s390 IPL device + * + * Copyright 2015, 2020 IBM Corp. + * Author(s): Zhang Fan + * Janosch Frank + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef HW_S390_IPL_H +#define HW_S390_IPL_H + +#include "cpu.h" +#include "exec/address-spaces.h" +#include "hw/qdev-core.h" +#include "qom/object.h" + +struct IPLBlockPVComp { + uint64_t tweak_pref; + uint64_t addr; + uint64_t size; +} QEMU_PACKED; +typedef struct IPLBlockPVComp IPLBlockPVComp; + +struct IPLBlockPV { + uint8_t reserved18[87]; /* 0x18 */ + uint8_t version; /* 0x6f */ + uint32_t reserved70; /* 0x70 */ + uint32_t num_comp; /* 0x74 */ + uint64_t pv_header_addr; /* 0x78 */ + uint64_t pv_header_len; /* 0x80 */ + struct IPLBlockPVComp components[0]; +} QEMU_PACKED; +typedef struct IPLBlockPV IPLBlockPV; + +struct IplBlockCcw { + uint8_t reserved0[85]; + uint8_t ssid; + uint16_t devno; + uint8_t vm_flags; + uint8_t reserved3[3]; + uint32_t vm_parm_len; + uint8_t nss_name[8]; + uint8_t vm_parm[64]; + uint8_t reserved4[8]; +} QEMU_PACKED; +typedef struct IplBlockCcw IplBlockCcw; + +struct IplBlockFcp { + uint8_t reserved1[305 - 1]; + uint8_t opt; + uint8_t reserved2[3]; + uint16_t reserved3; + uint16_t devno; + uint8_t reserved4[4]; + uint64_t wwpn; + uint64_t lun; + uint32_t bootprog; + uint8_t reserved5[12]; + uint64_t br_lba; + uint32_t scp_data_len; + uint8_t reserved6[260]; + uint8_t scp_data[0]; +} QEMU_PACKED; +typedef struct IplBlockFcp IplBlockFcp; + +struct IplBlockQemuScsi { + uint32_t lun; + uint16_t target; + uint16_t channel; + uint8_t reserved0[77]; + uint8_t ssid; + uint16_t devno; +} QEMU_PACKED; +typedef struct IplBlockQemuScsi IplBlockQemuScsi; + +#define DIAG308_FLAGS_LP_VALID 0x80 + +union IplParameterBlock { + struct { + uint32_t len; + uint8_t reserved0[3]; + uint8_t version; + uint32_t blk0_len; + uint8_t pbt; + uint8_t flags; + uint16_t reserved01; + uint8_t loadparm[8]; + union { + IplBlockCcw ccw; + IplBlockFcp fcp; + IPLBlockPV pv; + IplBlockQemuScsi scsi; + }; + } QEMU_PACKED; + struct { + uint8_t reserved1[110]; + uint16_t devno; + uint8_t reserved2[88]; + uint8_t reserved_ext[4096 - 200]; + } QEMU_PACKED; +} QEMU_PACKED; +typedef union IplParameterBlock IplParameterBlock; + +int s390_ipl_set_loadparm(uint8_t *loadparm); +void s390_ipl_update_diag308(IplParameterBlock *iplb); +int s390_ipl_prepare_pv_header(void); +int s390_ipl_pv_unpack(void); +void s390_ipl_prepare_cpu(S390CPU *cpu); +IplParameterBlock *s390_ipl_get_iplb(void); +IplParameterBlock *s390_ipl_get_iplb_pv(void); + +enum s390_reset { + /* default is a reset not triggered by a CPU e.g. issued by QMP */ + S390_RESET_EXTERNAL = 0, + S390_RESET_REIPL, + S390_RESET_MODIFIED_CLEAR, + S390_RESET_LOAD_NORMAL, + S390_RESET_PV, +}; +void s390_ipl_reset_request(CPUState *cs, enum s390_reset reset_type); +void s390_ipl_get_reset_request(CPUState **cs, enum s390_reset *reset_type); +void s390_ipl_clear_reset_request(void); + +#define QIPL_ADDRESS 0xcc + +/* Boot Menu flags */ +#define QIPL_FLAG_BM_OPTS_CMD 0x80 +#define QIPL_FLAG_BM_OPTS_ZIPL 0x40 + +/* + * The QEMU IPL Parameters will be stored at absolute address + * 204 (0xcc) which means it is 32-bit word aligned but not + * double-word aligned. + * Placement of data fields in this area must account for + * their alignment needs. E.g., netboot_start_address must + * have an offset of 4 + n * 8 bytes within the struct in order + * to keep it double-word aligned. + * The total size of the struct must never exceed 28 bytes. + * This definition must be kept in sync with the defininition + * in pc-bios/s390-ccw/iplb.h. + */ +struct QemuIplParameters { + uint8_t qipl_flags; + uint8_t reserved1[3]; + uint64_t netboot_start_addr; + uint32_t boot_menu_timeout; + uint8_t reserved2[12]; +} QEMU_PACKED; +typedef struct QemuIplParameters QemuIplParameters; + +#define TYPE_S390_IPL "s390-ipl" +OBJECT_DECLARE_SIMPLE_TYPE(S390IPLState, S390_IPL) + +struct S390IPLState { + /*< private >*/ + DeviceState parent_obj; + IplParameterBlock iplb; + IplParameterBlock iplb_pv; + QemuIplParameters qipl; + uint64_t start_addr; + uint64_t compat_start_addr; + uint64_t bios_start_addr; + uint64_t compat_bios_start_addr; + bool enforce_bios; + bool iplb_valid; + bool iplb_valid_pv; + bool netboot; + /* reset related properties don't have to be migrated or reset */ + enum s390_reset reset_type; + int reset_cpu_index; + + /*< public >*/ + char *kernel; + char *initrd; + char *cmdline; + char *firmware; + char *netboot_fw; + uint8_t cssid; + uint8_t ssid; + uint16_t devno; + bool iplbext_migration; +}; +QEMU_BUILD_BUG_MSG(offsetof(S390IPLState, iplb) & 3, "alignment of iplb wrong"); + +#define DIAG_308_RC_OK 0x0001 +#define DIAG_308_RC_NO_CONF 0x0102 +#define DIAG_308_RC_INVALID 0x0402 +#define DIAG_308_RC_NO_PV_CONF 0x0902 +#define DIAG_308_RC_INVAL_FOR_PV 0x0a02 + +#define DIAG308_RESET_MOD_CLR 0 +#define DIAG308_RESET_LOAD_NORM 1 +#define DIAG308_LOAD_CLEAR 3 +#define DIAG308_LOAD_NORMAL_DUMP 4 +#define DIAG308_SET 5 +#define DIAG308_STORE 6 +#define DIAG308_PV_SET 8 +#define DIAG308_PV_STORE 9 +#define DIAG308_PV_START 10 + +#define S390_IPL_TYPE_FCP 0x00 +#define S390_IPL_TYPE_CCW 0x02 +#define S390_IPL_TYPE_PV 0x05 +#define S390_IPL_TYPE_QEMU_SCSI 0xff + +#define S390_IPLB_HEADER_LEN 8 +#define S390_IPLB_MIN_PV_LEN 148 +#define S390_IPLB_MIN_CCW_LEN 200 +#define S390_IPLB_MIN_FCP_LEN 384 +#define S390_IPLB_MIN_QEMU_SCSI_LEN 200 + +static inline bool iplb_valid_len(IplParameterBlock *iplb) +{ + return be32_to_cpu(iplb->len) <= sizeof(IplParameterBlock); +} + +static inline bool ipl_valid_pv_components(IplParameterBlock *iplb) +{ + IPLBlockPV *ipib_pv = &iplb->pv; + int i; + + if (ipib_pv->num_comp == 0) { + return false; + } + + for (i = 0; i < ipib_pv->num_comp; i++) { + /* Addr must be 4k aligned */ + if (ipib_pv->components[i].addr & ~TARGET_PAGE_MASK) { + return false; + } + + /* Tweak prefix is monotonically increasing with each component */ + if (i < ipib_pv->num_comp - 1 && + ipib_pv->components[i].tweak_pref >= + ipib_pv->components[i + 1].tweak_pref) { + return false; + } + } + return true; +} + +static inline bool ipl_valid_pv_header(IplParameterBlock *iplb) +{ + IPLBlockPV *ipib_pv = &iplb->pv; + + if (ipib_pv->pv_header_len > 2 * TARGET_PAGE_SIZE) { + return false; + } + + if (!address_space_access_valid(&address_space_memory, + ipib_pv->pv_header_addr, + ipib_pv->pv_header_len, + false, + MEMTXATTRS_UNSPECIFIED)) { + return false; + } + + return true; +} + +static inline bool iplb_valid_pv(IplParameterBlock *iplb) +{ + if (iplb->pbt != S390_IPL_TYPE_PV || + be32_to_cpu(iplb->len) < S390_IPLB_MIN_PV_LEN) { + return false; + } + if (!ipl_valid_pv_header(iplb)) { + return false; + } + return ipl_valid_pv_components(iplb); +} + +static inline bool iplb_valid(IplParameterBlock *iplb) +{ + switch (iplb->pbt) { + case S390_IPL_TYPE_FCP: + return be32_to_cpu(iplb->len) >= S390_IPLB_MIN_FCP_LEN; + case S390_IPL_TYPE_CCW: + return be32_to_cpu(iplb->len) >= S390_IPLB_MIN_CCW_LEN; + default: + return false; + } +} + +#endif diff --git a/hw/s390x/meson.build b/hw/s390x/meson.build new file mode 100644 index 000000000..28484256e --- /dev/null +++ b/hw/s390x/meson.build @@ -0,0 +1,58 @@ +s390x_ss = ss.source_set() +s390x_ss.add(files( + 'ap-bridge.c', + 'ap-device.c', + 'ccw-device.c', + 'css-bridge.c', + 'css.c', + 'event-facility.c', + 'ipl.c', + 's390-ccw.c', + 's390-pci-bus.c', + 's390-pci-inst.c', + 's390-skeys.c', + 's390-stattrib.c', + 's390-virtio-hcall.c', + 'sclp.c', + 'sclpcpu.c', + 'sclpquiesce.c', + 'tod.c', +)) +s390x_ss.add(when: 'CONFIG_KVM', if_true: files( + 'tod-kvm.c', + 's390-skeys-kvm.c', + 's390-stattrib-kvm.c', + 'pv.c', +)) +s390x_ss.add(when: 'CONFIG_TCG', if_true: files( + 'tod-tcg.c', +)) +s390x_ss.add(when: 'CONFIG_S390_CCW_VIRTIO', if_true: files('s390-virtio-ccw.c')) +s390x_ss.add(when: 'CONFIG_TERMINAL3270', if_true: files('3270-ccw.c')) +s390x_ss.add(when: 'CONFIG_VFIO', if_true: files('s390-pci-vfio.c')) + +virtio_ss = ss.source_set() +virtio_ss.add(files('virtio-ccw.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-ccw-balloon.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-ccw-blk.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-ccw-crypto.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-ccw-input.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('virtio-ccw-net.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-ccw-rng.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_SCSI', if_true: files('virtio-ccw-scsi.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-ccw-serial.c')) +if have_virtfs + virtio_ss.add(when: 'CONFIG_VIRTIO_9P', if_true: files('virtio-ccw-9p.c')) +endif +virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-ccw.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-ccw.c')) +s390x_ss.add_all(when: 'CONFIG_VIRTIO_CCW', if_true: virtio_ss) + +hw_arch += {'s390x': s390x_ss} + +hw_s390x_modules = {} +virtio_gpu_ccw_ss = ss.source_set() +virtio_gpu_ccw_ss.add(when: ['CONFIG_VIRTIO_GPU', 'CONFIG_VIRTIO_CCW'], + if_true: [files('virtio-ccw-gpu.c'), pixman]) +hw_s390x_modules += {'virtio-gpu-ccw': virtio_gpu_ccw_ss} +modules += {'hw-s390x': hw_s390x_modules} diff --git a/hw/s390x/pv.c b/hw/s390x/pv.c new file mode 100644 index 000000000..401b63d6c --- /dev/null +++ b/hw/s390x/pv.c @@ -0,0 +1,174 @@ +/* + * Protected Virtualization functions + * + * Copyright IBM Corp. 2020 + * Author(s): + * Janosch Frank + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ +#include "qemu/osdep.h" + +#include + +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "sysemu/kvm.h" +#include "qom/object_interfaces.h" +#include "exec/confidential-guest-support.h" +#include "hw/s390x/ipl.h" +#include "hw/s390x/pv.h" + +static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data) +{ + struct kvm_pv_cmd pv_cmd = { + .cmd = cmd, + .data = (uint64_t)data, + }; + int rc; + + do { + rc = kvm_vm_ioctl(kvm_state, KVM_S390_PV_COMMAND, &pv_cmd); + } while (rc == -EINTR); + + if (rc) { + error_report("KVM PV command %d (%s) failed: header rc %x rrc %x " + "IOCTL rc: %d", cmd, cmdname, pv_cmd.rc, pv_cmd.rrc, + rc); + } + return rc; +} + +/* + * This macro lets us pass the command as a string to the function so + * we can print it on an error. + */ +#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data); +#define s390_pv_cmd_exit(cmd, data) \ +{ \ + int rc; \ + \ + rc = __s390_pv_cmd(cmd, #cmd, data);\ + if (rc) { \ + exit(1); \ + } \ +} + +int s390_pv_vm_enable(void) +{ + return s390_pv_cmd(KVM_PV_ENABLE, NULL); +} + +void s390_pv_vm_disable(void) +{ + s390_pv_cmd_exit(KVM_PV_DISABLE, NULL); +} + +int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) +{ + struct kvm_s390_pv_sec_parm args = { + .origin = origin, + .length = length, + }; + + return s390_pv_cmd(KVM_PV_SET_SEC_PARMS, &args); +} + +/* + * Called for each component in the SE type IPL parameter block 0. + */ +int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) +{ + struct kvm_s390_pv_unp args = { + .addr = addr, + .size = size, + .tweak = tweak, + }; + + return s390_pv_cmd(KVM_PV_UNPACK, &args); +} + +void s390_pv_prep_reset(void) +{ + s390_pv_cmd_exit(KVM_PV_PREP_RESET, NULL); +} + +int s390_pv_verify(void) +{ + return s390_pv_cmd(KVM_PV_VERIFY, NULL); +} + +void s390_pv_unshare(void) +{ + s390_pv_cmd_exit(KVM_PV_UNSHARE_ALL, NULL); +} + +void s390_pv_inject_reset_error(CPUState *cs) +{ + int r1 = (cs->kvm_run->s390_sieic.ipa & 0x00f0) >> 4; + CPUS390XState *env = &S390_CPU(cs)->env; + + /* Report that we are unable to enter protected mode */ + env->regs[r1 + 1] = DIAG_308_RC_INVAL_FOR_PV; +} + +#define TYPE_S390_PV_GUEST "s390-pv-guest" +OBJECT_DECLARE_SIMPLE_TYPE(S390PVGuest, S390_PV_GUEST) + +/** + * S390PVGuest: + * + * The S390PVGuest object is basically a dummy used to tell the + * confidential guest support system to use s390's PV mechanism. + * + * # $QEMU \ + * -object s390-pv-guest,id=pv0 \ + * -machine ...,confidential-guest-support=pv0 + */ +struct S390PVGuest { + ConfidentialGuestSupport parent_obj; +}; + +typedef struct S390PVGuestClass S390PVGuestClass; + +struct S390PVGuestClass { + ConfidentialGuestSupportClass parent_class; +}; + +int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) +{ + if (!object_dynamic_cast(OBJECT(cgs), TYPE_S390_PV_GUEST)) { + return 0; + } + + if (!s390_has_feat(S390_FEAT_UNPACK)) { + error_setg(errp, + "CPU model does not support Protected Virtualization"); + return -1; + } + + cgs->ready = true; + + return 0; +} + +OBJECT_DEFINE_TYPE_WITH_INTERFACES(S390PVGuest, + s390_pv_guest, + S390_PV_GUEST, + CONFIDENTIAL_GUEST_SUPPORT, + { TYPE_USER_CREATABLE }, + { NULL }) + +static void s390_pv_guest_class_init(ObjectClass *oc, void *data) +{ +} + +static void s390_pv_guest_init(Object *obj) +{ +} + +static void s390_pv_guest_finalize(Object *obj) +{ +} diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c new file mode 100644 index 000000000..2fc8bb9c2 --- /dev/null +++ b/hw/s390x/s390-ccw.c @@ -0,0 +1,201 @@ +/* + * s390 CCW Assignment Support + * + * Copyright 2017 IBM Corp + * Author(s): Dong Jia Shi + * Xiao Feng Ren + * Pierre Morel + * + * This work is licensed under the terms of the GNU GPL, version 2 + * or (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/s390x/css.h" +#include "hw/s390x/css-bridge.h" +#include "hw/s390x/s390-ccw.h" +#include "sysemu/sysemu.h" + +IOInstEnding s390_ccw_cmd_request(SubchDev *sch) +{ + S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(sch->driver_data); + + if (!cdc->handle_request) { + return IOINST_CC_STATUS_PRESENT; + } + return cdc->handle_request(sch); +} + +int s390_ccw_halt(SubchDev *sch) +{ + S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(sch->driver_data); + + if (!cdc->handle_halt) { + return -ENOSYS; + } + return cdc->handle_halt(sch); +} + +int s390_ccw_clear(SubchDev *sch) +{ + S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(sch->driver_data); + + if (!cdc->handle_clear) { + return -ENOSYS; + } + return cdc->handle_clear(sch); +} + +IOInstEnding s390_ccw_store(SubchDev *sch) +{ + S390CCWDeviceClass *cdc = NULL; + int ret = IOINST_CC_EXPECTED; + + /* + * This code is called for both virtual and passthrough devices, + * but only applies to to the latter. This ugly check makes that + * distinction for us. + */ + if (object_dynamic_cast(OBJECT(sch->driver_data), TYPE_S390_CCW)) { + cdc = S390_CCW_DEVICE_GET_CLASS(sch->driver_data); + } + + if (cdc && cdc->handle_store) { + ret = cdc->handle_store(sch); + } + + return ret; +} + +static void s390_ccw_get_dev_info(S390CCWDevice *cdev, + char *sysfsdev, + Error **errp) +{ + unsigned int cssid, ssid, devid; + char dev_path[PATH_MAX] = {0}, *tmp; + + if (!sysfsdev) { + error_setg(errp, "No host device provided"); + error_append_hint(errp, + "Use -device vfio-ccw,sysfsdev=PATH_TO_DEVICE\n"); + return; + } + + if (!realpath(sysfsdev, dev_path)) { + error_setg_errno(errp, errno, "Host device '%s' not found", sysfsdev); + return; + } + + cdev->mdevid = g_path_get_basename(dev_path); + + tmp = basename(dirname(dev_path)); + if (sscanf(tmp, "%2x.%1x.%4x", &cssid, &ssid, &devid) != 3) { + error_setg_errno(errp, errno, "Failed to read %s", tmp); + return; + } + + cdev->hostid.cssid = cssid; + cdev->hostid.ssid = ssid; + cdev->hostid.devid = devid; + cdev->hostid.valid = true; +} + +static void s390_ccw_realize(S390CCWDevice *cdev, char *sysfsdev, Error **errp) +{ + CcwDevice *ccw_dev = CCW_DEVICE(cdev); + CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); + DeviceState *parent = DEVICE(ccw_dev); + SubchDev *sch; + int ret; + Error *err = NULL; + + s390_ccw_get_dev_info(cdev, sysfsdev, &err); + if (err) { + goto out_err_propagate; + } + + sch = css_create_sch(ccw_dev->devno, &err); + if (!sch) { + goto out_mdevid_free; + } + sch->driver_data = cdev; + sch->do_subchannel_work = do_subchannel_work_passthrough; + sch->irb_cb = build_irb_passthrough; + + ccw_dev->sch = sch; + ret = css_sch_build_schib(sch, &cdev->hostid); + if (ret) { + error_setg_errno(&err, -ret, "%s: Failed to build initial schib", + __func__); + goto out_err; + } + + ck->realize(ccw_dev, &err); + if (err) { + goto out_err; + } + + css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, + parent->hotplugged, 1); + return; + +out_err: + css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); + ccw_dev->sch = NULL; + g_free(sch); +out_mdevid_free: + g_free(cdev->mdevid); +out_err_propagate: + error_propagate(errp, err); +} + +static void s390_ccw_unrealize(S390CCWDevice *cdev) +{ + CcwDevice *ccw_dev = CCW_DEVICE(cdev); + SubchDev *sch = ccw_dev->sch; + + if (sch) { + css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); + g_free(sch); + ccw_dev->sch = NULL; + } + + g_free(cdev->mdevid); +} + +static void s390_ccw_instance_init(Object *obj) +{ + S390CCWDevice *dev = S390_CCW_DEVICE(obj); + + device_add_bootindex_property(obj, &dev->bootindex, "bootindex", + "/disk@0,0", DEVICE(obj)); +} + +static void s390_ccw_class_init(ObjectClass *klass, void *data) +{ + S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass); + + cdc->realize = s390_ccw_realize; + cdc->unrealize = s390_ccw_unrealize; +} + +static const TypeInfo s390_ccw_info = { + .name = TYPE_S390_CCW, + .parent = TYPE_CCW_DEVICE, + .instance_init = s390_ccw_instance_init, + .instance_size = sizeof(S390CCWDevice), + .class_size = sizeof(S390CCWDeviceClass), + .class_init = s390_ccw_class_init, + .abstract = true, +}; + +static void register_s390_ccw_type(void) +{ + type_register_static(&s390_ccw_info); +} + +type_init(register_s390_ccw_type) diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c new file mode 100644 index 000000000..1b51a7283 --- /dev/null +++ b/hw/s390x/s390-pci-bus.c @@ -0,0 +1,1423 @@ +/* + * s390 PCI BUS + * + * Copyright 2014 IBM Corp. + * Author(s): Frank Blaschka + * Hong Bo Li + * Yi Min Zhao + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "hw/s390x/s390-pci-bus.h" +#include "hw/s390x/s390-pci-inst.h" +#include "hw/s390x/s390-pci-vfio.h" +#include "hw/pci/pci_bus.h" +#include "hw/qdev-properties.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/msi.h" +#include "qemu/error-report.h" +#include "qemu/module.h" + +#ifndef DEBUG_S390PCI_BUS +#define DEBUG_S390PCI_BUS 0 +#endif + +#define DPRINTF(fmt, ...) \ + do { \ + if (DEBUG_S390PCI_BUS) { \ + fprintf(stderr, "S390pci-bus: " fmt, ## __VA_ARGS__); \ + } \ + } while (0) + +S390pciState *s390_get_phb(void) +{ + static S390pciState *phb; + + if (!phb) { + phb = S390_PCI_HOST_BRIDGE( + object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); + assert(phb != NULL); + } + + return phb; +} + +int pci_chsc_sei_nt2_get_event(void *res) +{ + ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res; + PciCcdfAvail *accdf; + PciCcdfErr *eccdf; + int rc = 1; + SeiContainer *sei_cont; + S390pciState *s = s390_get_phb(); + + sei_cont = QTAILQ_FIRST(&s->pending_sei); + if (sei_cont) { + QTAILQ_REMOVE(&s->pending_sei, sei_cont, link); + nt2_res->nt = 2; + nt2_res->cc = sei_cont->cc; + nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res)); + switch (sei_cont->cc) { + case 1: /* error event */ + eccdf = (PciCcdfErr *)nt2_res->ccdf; + eccdf->fid = cpu_to_be32(sei_cont->fid); + eccdf->fh = cpu_to_be32(sei_cont->fh); + eccdf->e = cpu_to_be32(sei_cont->e); + eccdf->faddr = cpu_to_be64(sei_cont->faddr); + eccdf->pec = cpu_to_be16(sei_cont->pec); + break; + case 2: /* availability event */ + accdf = (PciCcdfAvail *)nt2_res->ccdf; + accdf->fid = cpu_to_be32(sei_cont->fid); + accdf->fh = cpu_to_be32(sei_cont->fh); + accdf->pec = cpu_to_be16(sei_cont->pec); + break; + default: + abort(); + } + g_free(sei_cont); + rc = 0; + } + + return rc; +} + +int pci_chsc_sei_nt2_have_event(void) +{ + S390pciState *s = s390_get_phb(); + + return !QTAILQ_EMPTY(&s->pending_sei); +} + +S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s, + S390PCIBusDevice *pbdev) +{ + S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) : + QTAILQ_FIRST(&s->zpci_devs); + + while (ret && ret->state == ZPCI_FS_RESERVED) { + ret = QTAILQ_NEXT(ret, link); + } + + return ret; +} + +S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid) +{ + S390PCIBusDevice *pbdev; + + QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { + if (pbdev->fid == fid) { + return pbdev; + } + } + + return NULL; +} + +void s390_pci_sclp_configure(SCCB *sccb) +{ + IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; + S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), + be32_to_cpu(psccb->aid)); + uint16_t rc; + + if (!pbdev) { + DPRINTF("sclp config no dev found\n"); + rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; + goto out; + } + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; + break; + case ZPCI_FS_STANDBY: + pbdev->state = ZPCI_FS_DISABLED; + rc = SCLP_RC_NORMAL_COMPLETION; + break; + default: + rc = SCLP_RC_NO_ACTION_REQUIRED; + } +out: + psccb->header.response_code = cpu_to_be16(rc); +} + +static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev) +{ + HotplugHandler *hotplug_ctrl; + + /* Unplug the PCI device */ + if (pbdev->pdev) { + DeviceState *pdev = DEVICE(pbdev->pdev); + + hotplug_ctrl = qdev_get_hotplug_handler(pdev); + hotplug_handler_unplug(hotplug_ctrl, pdev, &error_abort); + object_unparent(OBJECT(pdev)); + } + + /* Unplug the zPCI device */ + hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(pbdev)); + hotplug_handler_unplug(hotplug_ctrl, DEVICE(pbdev), &error_abort); + object_unparent(OBJECT(pbdev)); +} + +void s390_pci_sclp_deconfigure(SCCB *sccb) +{ + IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; + S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), + be32_to_cpu(psccb->aid)); + uint16_t rc; + + if (!pbdev) { + DPRINTF("sclp deconfig no dev found\n"); + rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; + goto out; + } + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; + break; + case ZPCI_FS_STANDBY: + rc = SCLP_RC_NO_ACTION_REQUIRED; + break; + default: + if (pbdev->summary_ind) { + pci_dereg_irqs(pbdev); + } + if (pbdev->iommu->enabled) { + pci_dereg_ioat(pbdev->iommu); + } + pbdev->state = ZPCI_FS_STANDBY; + rc = SCLP_RC_NORMAL_COMPLETION; + + if (pbdev->unplug_requested) { + s390_pci_perform_unplug(pbdev); + } + } +out: + psccb->header.response_code = cpu_to_be16(rc); +} + +static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid) +{ + S390PCIBusDevice *pbdev; + + QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { + if (pbdev->uid == uid) { + return pbdev; + } + } + + return NULL; +} + +S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s, + const char *target) +{ + S390PCIBusDevice *pbdev; + + if (!target) { + return NULL; + } + + QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { + if (!strcmp(pbdev->target, target)) { + return pbdev; + } + } + + return NULL; +} + +static S390PCIBusDevice *s390_pci_find_dev_by_pci(S390pciState *s, + PCIDevice *pci_dev) +{ + S390PCIBusDevice *pbdev; + + if (!pci_dev) { + return NULL; + } + + QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { + if (pbdev->pdev == pci_dev) { + return pbdev; + } + } + + return NULL; +} + +S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx) +{ + return g_hash_table_lookup(s->zpci_table, &idx); +} + +S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh) +{ + uint32_t idx = FH_MASK_INDEX & fh; + S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx); + + if (pbdev && pbdev->fh == fh) { + return pbdev; + } + + return NULL; +} + +static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh, + uint32_t fid, uint64_t faddr, uint32_t e) +{ + SeiContainer *sei_cont; + S390pciState *s = s390_get_phb(); + + sei_cont = g_new0(SeiContainer, 1); + sei_cont->fh = fh; + sei_cont->fid = fid; + sei_cont->cc = cc; + sei_cont->pec = pec; + sei_cont->faddr = faddr; + sei_cont->e = e; + + QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link); + css_generate_css_crws(0); +} + +static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh, + uint32_t fid) +{ + s390_pci_generate_event(2, pec, fh, fid, 0, 0); +} + +void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid, + uint64_t faddr, uint32_t e) +{ + s390_pci_generate_event(1, pec, fh, fid, faddr, e); +} + +static void s390_pci_set_irq(void *opaque, int irq, int level) +{ + /* nothing to do */ +} + +static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num) +{ + /* nothing to do */ + return 0; +} + +static uint64_t s390_pci_get_table_origin(uint64_t iota) +{ + return iota & ~ZPCI_IOTA_RTTO_FLAG; +} + +static unsigned int calc_rtx(dma_addr_t ptr) +{ + return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; +} + +static unsigned int calc_sx(dma_addr_t ptr) +{ + return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK; +} + +static unsigned int calc_px(dma_addr_t ptr) +{ + return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK; +} + +static uint64_t get_rt_sto(uint64_t entry) +{ + return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) + ? (entry & ZPCI_RTE_ADDR_MASK) + : 0; +} + +static uint64_t get_st_pto(uint64_t entry) +{ + return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) + ? (entry & ZPCI_STE_ADDR_MASK) + : 0; +} + +static bool rt_entry_isvalid(uint64_t entry) +{ + return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; +} + +static bool pt_entry_isvalid(uint64_t entry) +{ + return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; +} + +static bool entry_isprotected(uint64_t entry) +{ + return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED; +} + +/* ett is expected table type, -1 page table, 0 segment table, 1 region table */ +static uint64_t get_table_index(uint64_t iova, int8_t ett) +{ + switch (ett) { + case ZPCI_ETT_PT: + return calc_px(iova); + case ZPCI_ETT_ST: + return calc_sx(iova); + case ZPCI_ETT_RT: + return calc_rtx(iova); + } + + return -1; +} + +static bool entry_isvalid(uint64_t entry, int8_t ett) +{ + switch (ett) { + case ZPCI_ETT_PT: + return pt_entry_isvalid(entry); + case ZPCI_ETT_ST: + case ZPCI_ETT_RT: + return rt_entry_isvalid(entry); + } + + return false; +} + +/* Return true if address translation is done */ +static bool translate_iscomplete(uint64_t entry, int8_t ett) +{ + switch (ett) { + case 0: + return (entry & ZPCI_TABLE_FC) ? true : false; + case 1: + return false; + } + + return true; +} + +static uint64_t get_frame_size(int8_t ett) +{ + switch (ett) { + case ZPCI_ETT_PT: + return 1ULL << 12; + case ZPCI_ETT_ST: + return 1ULL << 20; + case ZPCI_ETT_RT: + return 1ULL << 31; + } + + return 0; +} + +static uint64_t get_next_table_origin(uint64_t entry, int8_t ett) +{ + switch (ett) { + case ZPCI_ETT_PT: + return entry & ZPCI_PTE_ADDR_MASK; + case ZPCI_ETT_ST: + return get_st_pto(entry); + case ZPCI_ETT_RT: + return get_rt_sto(entry); + } + + return 0; +} + +/** + * table_translate: do translation within one table and return the following + * table origin + * + * @entry: the entry being translated, the result is stored in this. + * @to: the address of table origin. + * @ett: expected table type, 1 region table, 0 segment table and -1 page table. + * @error: error code + */ +static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett, + uint16_t *error) +{ + uint64_t tx, te, nto = 0; + uint16_t err = 0; + + tx = get_table_index(entry->iova, ett); + te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t), + MEMTXATTRS_UNSPECIFIED, NULL); + + if (!te) { + err = ERR_EVENT_INVALTE; + goto out; + } + + if (!entry_isvalid(te, ett)) { + entry->perm &= IOMMU_NONE; + goto out; + } + + if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX + || te & ZPCI_TABLE_OFFSET_MASK)) { + err = ERR_EVENT_INVALTL; + goto out; + } + + nto = get_next_table_origin(te, ett); + if (!nto) { + err = ERR_EVENT_TT; + goto out; + } + + if (entry_isprotected(te)) { + entry->perm &= IOMMU_RO; + } else { + entry->perm &= IOMMU_RW; + } + + if (translate_iscomplete(te, ett)) { + switch (ett) { + case ZPCI_ETT_PT: + entry->translated_addr = te & ZPCI_PTE_ADDR_MASK; + break; + case ZPCI_ETT_ST: + entry->translated_addr = (te & ZPCI_SFAA_MASK) | + (entry->iova & ~ZPCI_SFAA_MASK); + break; + } + nto = 0; + } +out: + if (err) { + entry->perm = IOMMU_NONE; + *error = err; + } + entry->len = get_frame_size(ett); + return nto; +} + +uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr, + S390IOTLBEntry *entry) +{ + uint64_t to = s390_pci_get_table_origin(g_iota); + int8_t ett = 1; + uint16_t error = 0; + + entry->iova = addr & TARGET_PAGE_MASK; + entry->translated_addr = 0; + entry->perm = IOMMU_RW; + + if (entry_isprotected(g_iota)) { + entry->perm &= IOMMU_RO; + } + + while (to) { + to = table_translate(entry, to, ett--, &error); + } + + return error; +} + +static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr, + IOMMUAccessFlags flag, int iommu_idx) +{ + S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr); + S390IOTLBEntry *entry; + uint64_t iova = addr & TARGET_PAGE_MASK; + uint16_t error = 0; + IOMMUTLBEntry ret = { + .target_as = &address_space_memory, + .iova = 0, + .translated_addr = 0, + .addr_mask = ~(hwaddr)0, + .perm = IOMMU_NONE, + }; + + switch (iommu->pbdev->state) { + case ZPCI_FS_ENABLED: + case ZPCI_FS_BLOCKED: + if (!iommu->enabled) { + return ret; + } + break; + default: + return ret; + } + + DPRINTF("iommu trans addr 0x%" PRIx64 "\n", addr); + + if (addr < iommu->pba || addr > iommu->pal) { + error = ERR_EVENT_OORANGE; + goto err; + } + + entry = g_hash_table_lookup(iommu->iotlb, &iova); + if (entry) { + ret.iova = entry->iova; + ret.translated_addr = entry->translated_addr; + ret.addr_mask = entry->len - 1; + ret.perm = entry->perm; + } else { + ret.iova = iova; + ret.addr_mask = ~TARGET_PAGE_MASK; + ret.perm = IOMMU_NONE; + } + + if (flag != IOMMU_NONE && !(flag & ret.perm)) { + error = ERR_EVENT_TPROTE; + } +err: + if (error) { + iommu->pbdev->state = ZPCI_FS_ERROR; + s390_pci_generate_error_event(error, iommu->pbdev->fh, + iommu->pbdev->fid, addr, 0); + } + return ret; +} + +static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu, + IOMMUNotifier *notifier) +{ + /* It's impossible to plug a pci device on s390x that already has iommu + * mappings which need to be replayed, that is due to the "one iommu per + * zpci device" construct. But when we support migration of vfio-pci + * devices in future, we need to revisit this. + */ + return; +} + +static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus, + int devfn) +{ + uint64_t key = (uintptr_t)bus; + S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); + S390PCIIOMMU *iommu; + + if (!table) { + table = g_new0(S390PCIIOMMUTable, 1); + table->key = key; + g_hash_table_insert(s->iommu_table, &table->key, table); + } + + iommu = table->iommu[PCI_SLOT(devfn)]; + if (!iommu) { + iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU)); + + char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x", + pci_bus_num(bus), + PCI_SLOT(devfn), + PCI_FUNC(devfn)); + char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x", + pci_bus_num(bus), + PCI_SLOT(devfn), + PCI_FUNC(devfn)); + memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX); + address_space_init(&iommu->as, &iommu->mr, as_name); + iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal, + NULL, g_free); + table->iommu[PCI_SLOT(devfn)] = iommu; + + g_free(mr_name); + g_free(as_name); + } + + return iommu; +} + +static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + S390pciState *s = opaque; + S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn); + + return &iommu->as; +} + +static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) +{ + uint8_t expected, actual; + hwaddr len = 1; + /* avoid multiple fetches */ + uint8_t volatile *ind_addr; + + ind_addr = cpu_physical_memory_map(ind_loc, &len, true); + if (!ind_addr) { + s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0); + return -1; + } + actual = *ind_addr; + do { + expected = actual; + actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); + } while (actual != expected); + cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); + + return actual; +} + +static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + S390PCIBusDevice *pbdev = opaque; + uint32_t vec = data & ZPCI_MSI_VEC_MASK; + uint64_t ind_bit; + uint32_t sum_bit; + + assert(pbdev); + DPRINTF("write_msix data 0x%" PRIx64 " idx %d vec 0x%x\n", data, + pbdev->idx, vec); + + if (pbdev->state != ZPCI_FS_ENABLED) { + return; + } + + ind_bit = pbdev->routes.adapter.ind_offset; + sum_bit = pbdev->routes.adapter.summary_offset; + + set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8, + 0x80 >> ((ind_bit + vec) % 8)); + if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8, + 0x80 >> (sum_bit % 8))) { + css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc); + } +} + +static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0xffffffff; +} + +static const MemoryRegionOps s390_msi_ctrl_ops = { + .write = s390_msi_ctrl_write, + .read = s390_msi_ctrl_read, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +void s390_pci_iommu_enable(S390PCIIOMMU *iommu) +{ + /* + * The iommu region is initialized against a 0-mapped address space, + * so the smallest IOMMU region we can define runs from 0 to the end + * of the PCI address space. + */ + char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid); + memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr), + TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr), + name, iommu->pal + 1); + iommu->enabled = true; + memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr)); + g_free(name); +} + +void s390_pci_iommu_disable(S390PCIIOMMU *iommu) +{ + iommu->enabled = false; + g_hash_table_remove_all(iommu->iotlb); + memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr)); + object_unparent(OBJECT(&iommu->iommu_mr)); +} + +static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn) +{ + uint64_t key = (uintptr_t)bus; + S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); + S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL; + + if (!table || !iommu) { + return; + } + + table->iommu[PCI_SLOT(devfn)] = NULL; + g_hash_table_destroy(iommu->iotlb); + /* + * An attached PCI device may have memory listeners, eg. VFIO PCI. + * The associated subregion will already have been unmapped in + * s390_pci_iommu_disable in response to the guest deconfigure request. + * Remove the listeners now before destroying the address space. + */ + address_space_remove_listeners(&iommu->as); + address_space_destroy(&iommu->as); + object_unparent(OBJECT(&iommu->mr)); + object_unparent(OBJECT(iommu)); + object_unref(OBJECT(iommu)); +} + +S390PCIGroup *s390_group_create(int id) +{ + S390PCIGroup *group; + S390pciState *s = s390_get_phb(); + + group = g_new0(S390PCIGroup, 1); + group->id = id; + QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link); + return group; +} + +S390PCIGroup *s390_group_find(int id) +{ + S390PCIGroup *group; + S390pciState *s = s390_get_phb(); + + QTAILQ_FOREACH(group, &s->zpci_groups, link) { + if (group->id == id) { + return group; + } + } + return NULL; +} + +static void s390_pci_init_default_group(void) +{ + S390PCIGroup *group; + ClpRspQueryPciGrp *resgrp; + + group = s390_group_create(ZPCI_DEFAULT_FN_GRP); + resgrp = &group->zpci_group; + resgrp->fr = 1; + resgrp->dasm = 0; + resgrp->msia = ZPCI_MSI_ADDR; + resgrp->mui = DEFAULT_MUI; + resgrp->i = 128; + resgrp->maxstbl = 128; + resgrp->version = 0; +} + +static void set_pbdev_info(S390PCIBusDevice *pbdev) +{ + pbdev->zpci_fn.sdma = ZPCI_SDMA_ADDR; + pbdev->zpci_fn.edma = ZPCI_EDMA_ADDR; + pbdev->zpci_fn.pchid = 0; + pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP; + pbdev->zpci_fn.fid = pbdev->fid; + pbdev->zpci_fn.uid = pbdev->uid; + pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP); +} + +static void s390_pcihost_realize(DeviceState *dev, Error **errp) +{ + PCIBus *b; + BusState *bus; + PCIHostState *phb = PCI_HOST_BRIDGE(dev); + S390pciState *s = S390_PCI_HOST_BRIDGE(dev); + + DPRINTF("host_init\n"); + + b = pci_register_root_bus(dev, NULL, s390_pci_set_irq, s390_pci_map_irq, + NULL, get_system_memory(), get_system_io(), 0, + 64, TYPE_PCI_BUS); + pci_setup_iommu(b, s390_pci_dma_iommu, s); + + bus = BUS(b); + qbus_set_hotplug_handler(bus, OBJECT(dev)); + phb->bus = b; + + s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL)); + qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev)); + + s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal, + NULL, g_free); + s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL); + s->bus_no = 0; + QTAILQ_INIT(&s->pending_sei); + QTAILQ_INIT(&s->zpci_devs); + QTAILQ_INIT(&s->zpci_dma_limit); + QTAILQ_INIT(&s->zpci_groups); + + s390_pci_init_default_group(); + css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false, + S390_ADAPTER_SUPPRESSIBLE, errp); +} + +static void s390_pcihost_unrealize(DeviceState *dev) +{ + S390PCIGroup *group; + S390pciState *s = S390_PCI_HOST_BRIDGE(dev); + + while (!QTAILQ_EMPTY(&s->zpci_groups)) { + group = QTAILQ_FIRST(&s->zpci_groups); + QTAILQ_REMOVE(&s->zpci_groups, group, link); + } +} + +static int s390_pci_msix_init(S390PCIBusDevice *pbdev) +{ + char *name; + uint8_t pos; + uint16_t ctrl; + uint32_t table, pba; + + pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX); + if (!pos) { + return -1; + } + + ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS, + pci_config_size(pbdev->pdev), sizeof(ctrl)); + table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE, + pci_config_size(pbdev->pdev), sizeof(table)); + pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA, + pci_config_size(pbdev->pdev), sizeof(pba)); + + pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK; + pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; + pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; + pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; + pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; + + name = g_strdup_printf("msix-s390-%04x", pbdev->uid); + memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev), + &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE); + memory_region_add_subregion(&pbdev->iommu->mr, + pbdev->pci_group->zpci_group.msia, + &pbdev->msix_notify_mr); + g_free(name); + + return 0; +} + +static void s390_pci_msix_free(S390PCIBusDevice *pbdev) +{ + memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr); + object_unparent(OBJECT(&pbdev->msix_notify_mr)); +} + +static S390PCIBusDevice *s390_pci_device_new(S390pciState *s, + const char *target, Error **errp) +{ + Error *local_err = NULL; + DeviceState *dev; + + dev = qdev_try_new(TYPE_S390_PCI_DEVICE); + if (!dev) { + error_setg(errp, "zPCI device could not be created"); + return NULL; + } + + if (!object_property_set_str(OBJECT(dev), "target", target, &local_err)) { + object_unparent(OBJECT(dev)); + error_propagate_prepend(errp, local_err, + "zPCI device could not be created: "); + return NULL; + } + if (!qdev_realize_and_unref(dev, BUS(s->bus), &local_err)) { + object_unparent(OBJECT(dev)); + error_propagate_prepend(errp, local_err, + "zPCI device could not be created: "); + return NULL; + } + + return S390_PCI_DEVICE(dev); +} + +static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev) +{ + uint32_t idx; + + idx = s->next_idx; + while (s390_pci_find_dev_by_idx(s, idx)) { + idx = (idx + 1) & FH_MASK_INDEX; + if (idx == s->next_idx) { + return false; + } + } + + pbdev->idx = idx; + return true; +} + +static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); + + if (!s390_has_feat(S390_FEAT_ZPCI)) { + warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU " + "feature enabled; the guest will not be able to see/use " + "this device"); + } + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pdev = PCI_DEVICE(dev); + + if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { + error_setg(errp, "multifunction not supported in s390"); + return; + } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { + S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); + + if (!s390_pci_alloc_idx(s, pbdev)) { + error_setg(errp, "no slot for plugging zpci device"); + return; + } + } +} + +static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr) +{ + uint32_t old_nr; + + pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); + while (!pci_bus_is_root(pci_get_bus(dev))) { + dev = pci_get_bus(dev)->parent_dev; + + old_nr = pci_default_read_config(dev, PCI_SUBORDINATE_BUS, 1); + if (old_nr < nr) { + pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); + } + } +} + +static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); + PCIDevice *pdev = NULL; + S390PCIBusDevice *pbdev = NULL; + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { + PCIBridge *pb = PCI_BRIDGE(dev); + + pdev = PCI_DEVICE(dev); + pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq); + pci_setup_iommu(&pb->sec_bus, s390_pci_dma_iommu, s); + + qbus_set_hotplug_handler(BUS(&pb->sec_bus), OBJECT(s)); + + if (dev->hotplugged) { + pci_default_write_config(pdev, PCI_PRIMARY_BUS, + pci_dev_bus_num(pdev), 1); + s->bus_no += 1; + pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); + + s390_pci_update_subordinate(pdev, s->bus_no); + } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + pdev = PCI_DEVICE(dev); + + if (!dev->id) { + /* In the case the PCI device does not define an id */ + /* we generate one based on the PCI address */ + dev->id = g_strdup_printf("auto_%02x:%02x.%01x", + pci_dev_bus_num(pdev), + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + } + + pbdev = s390_pci_find_dev_by_target(s, dev->id); + if (!pbdev) { + pbdev = s390_pci_device_new(s, dev->id, errp); + if (!pbdev) { + return; + } + } + + pbdev->pdev = pdev; + pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn); + pbdev->iommu->pbdev = pbdev; + pbdev->state = ZPCI_FS_DISABLED; + set_pbdev_info(pbdev); + + if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) { + pbdev->fh |= FH_SHM_VFIO; + pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev); + /* Fill in CLP information passed via the vfio region */ + s390_pci_get_clp_info(pbdev); + } else { + pbdev->fh |= FH_SHM_EMUL; + } + + if (s390_pci_msix_init(pbdev)) { + error_setg(errp, "MSI-X support is mandatory " + "in the S390 architecture"); + return; + } + + if (dev->hotplugged) { + s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED , + pbdev->fh, pbdev->fid); + } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { + pbdev = S390_PCI_DEVICE(dev); + + /* the allocated idx is actually getting used */ + s->next_idx = (pbdev->idx + 1) & FH_MASK_INDEX; + pbdev->fh = pbdev->idx; + QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link); + g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); + } else { + g_assert_not_reached(); + } +} + +static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); + S390PCIBusDevice *pbdev = NULL; + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pci_dev = PCI_DEVICE(dev); + PCIBus *bus; + int32_t devfn; + + pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); + g_assert(pbdev); + + s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED, + pbdev->fh, pbdev->fid); + bus = pci_get_bus(pci_dev); + devfn = pci_dev->devfn; + qdev_unrealize(dev); + + s390_pci_msix_free(pbdev); + s390_pci_iommu_free(s, bus, devfn); + pbdev->pdev = NULL; + pbdev->state = ZPCI_FS_RESERVED; + } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { + pbdev = S390_PCI_DEVICE(dev); + pbdev->fid = 0; + QTAILQ_REMOVE(&s->zpci_devs, pbdev, link); + g_hash_table_remove(s->zpci_table, &pbdev->idx); + if (pbdev->iommu->dma_limit) { + s390_pci_end_dma_count(s, pbdev->iommu->dma_limit); + } + qdev_unrealize(dev); + } +} + +static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, + Error **errp) +{ + S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); + S390PCIBusDevice *pbdev; + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { + error_setg(errp, "PCI bridge hot unplug currently not supported"); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + /* + * Redirect the unplug request to the zPCI device and remember that + * we've checked the PCI device already (to prevent endless recursion). + */ + pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); + g_assert(pbdev); + pbdev->pci_unplug_request_processed = true; + qdev_unplug(DEVICE(pbdev), errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { + pbdev = S390_PCI_DEVICE(dev); + + /* + * If unplug was initially requested for the zPCI device, we + * first have to redirect to the PCI device, which will in return + * redirect back to us after performing its checks (if the request + * is not blocked, e.g. because it's a PCI bridge). + */ + if (pbdev->pdev && !pbdev->pci_unplug_request_processed) { + qdev_unplug(DEVICE(pbdev->pdev), errp); + return; + } + pbdev->pci_unplug_request_processed = false; + + switch (pbdev->state) { + case ZPCI_FS_STANDBY: + case ZPCI_FS_RESERVED: + s390_pci_perform_unplug(pbdev); + break; + default: + /* + * Allow to send multiple requests, e.g. if the guest crashed + * before releasing the device, we would not be able to send + * another request to the same VM (e.g. fresh OS). + */ + pbdev->unplug_requested = true; + s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST, + pbdev->fh, pbdev->fid); + } + } else { + g_assert_not_reached(); + } +} + +static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, + void *opaque) +{ + S390pciState *s = opaque; + PCIBus *sec_bus = NULL; + + if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != + PCI_HEADER_TYPE_BRIDGE)) { + return; + } + + (s->bus_no)++; + pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1); + pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); + pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); + + sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); + if (!sec_bus) { + return; + } + + /* Assign numbers to all child bridges. The last is the highest number. */ + pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s); + pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); +} + +static void s390_pcihost_reset(DeviceState *dev) +{ + S390pciState *s = S390_PCI_HOST_BRIDGE(dev); + PCIBus *bus = s->parent_obj.bus; + S390PCIBusDevice *pbdev, *next; + + /* Process all pending unplug requests */ + QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) { + if (pbdev->unplug_requested) { + if (pbdev->summary_ind) { + pci_dereg_irqs(pbdev); + } + if (pbdev->iommu->enabled) { + pci_dereg_ioat(pbdev->iommu); + } + pbdev->state = ZPCI_FS_STANDBY; + s390_pci_perform_unplug(pbdev); + } + } + + /* + * When resetting a PCI bridge, the assigned numbers are set to 0. So + * on every system reset, we also have to reassign numbers. + */ + s->bus_no = 0; + pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s); +} + +static void s390_pcihost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + + dc->reset = s390_pcihost_reset; + dc->realize = s390_pcihost_realize; + dc->unrealize = s390_pcihost_unrealize; + hc->pre_plug = s390_pcihost_pre_plug; + hc->plug = s390_pcihost_plug; + hc->unplug_request = s390_pcihost_unplug_request; + hc->unplug = s390_pcihost_unplug; + msi_nonbroken = true; +} + +static const TypeInfo s390_pcihost_info = { + .name = TYPE_S390_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(S390pciState), + .class_init = s390_pcihost_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static const TypeInfo s390_pcibus_info = { + .name = TYPE_S390_PCI_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(S390PCIBus), +}; + +static uint16_t s390_pci_generate_uid(S390pciState *s) +{ + uint16_t uid = 0; + + do { + uid++; + if (!s390_pci_find_dev_by_uid(s, uid)) { + return uid; + } + } while (uid < ZPCI_MAX_UID); + + return UID_UNDEFINED; +} + +static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp) +{ + uint32_t fid = 0; + + do { + if (!s390_pci_find_dev_by_fid(s, fid)) { + return fid; + } + } while (fid++ != ZPCI_MAX_FID); + + error_setg(errp, "no free fid could be found"); + return 0; +} + +static void s390_pci_device_realize(DeviceState *dev, Error **errp) +{ + S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev); + S390pciState *s = s390_get_phb(); + + if (!zpci->target) { + error_setg(errp, "target must be defined"); + return; + } + + if (s390_pci_find_dev_by_target(s, zpci->target)) { + error_setg(errp, "target %s already has an associated zpci device", + zpci->target); + return; + } + + if (zpci->uid == UID_UNDEFINED) { + zpci->uid = s390_pci_generate_uid(s); + if (!zpci->uid) { + error_setg(errp, "no free uid could be found"); + return; + } + } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) { + error_setg(errp, "uid %u already in use", zpci->uid); + return; + } + + if (!zpci->fid_defined) { + Error *local_error = NULL; + + zpci->fid = s390_pci_generate_fid(s, &local_error); + if (local_error) { + error_propagate(errp, local_error); + return; + } + } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) { + error_setg(errp, "fid %u already in use", zpci->fid); + return; + } + + zpci->state = ZPCI_FS_RESERVED; + zpci->fmb.format = ZPCI_FMB_FORMAT; +} + +static void s390_pci_device_reset(DeviceState *dev) +{ + S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + return; + case ZPCI_FS_STANDBY: + break; + default: + pbdev->fh &= ~FH_MASK_ENABLE; + pbdev->state = ZPCI_FS_DISABLED; + break; + } + + if (pbdev->summary_ind) { + pci_dereg_irqs(pbdev); + } + if (pbdev->iommu->enabled) { + pci_dereg_ioat(pbdev->iommu); + } + + fmb_timer_free(pbdev); +} + +static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Property *prop = opaque; + uint32_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint32(v, name, ptr, errp); +} + +static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj); + Property *prop = opaque; + uint32_t *ptr = object_field_prop_ptr(obj, prop); + + if (!visit_type_uint32(v, name, ptr, errp)) { + return; + } + zpci->fid_defined = true; +} + +static const PropertyInfo s390_pci_fid_propinfo = { + .name = "zpci_fid", + .get = s390_pci_get_fid, + .set = s390_pci_set_fid, +}; + +#define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \ + DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t) + +static Property s390_pci_device_properties[] = { + DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED), + DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid), + DEFINE_PROP_STRING("target", S390PCIBusDevice, target), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription s390_pci_device_vmstate = { + .name = TYPE_S390_PCI_DEVICE, + /* + * TODO: add state handling here, so migration works at least with + * emulated pci devices on s390x + */ + .unmigratable = 1, +}; + +static void s390_pci_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "zpci device"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->reset = s390_pci_device_reset; + dc->bus_type = TYPE_S390_PCI_BUS; + dc->realize = s390_pci_device_realize; + device_class_set_props(dc, s390_pci_device_properties); + dc->vmsd = &s390_pci_device_vmstate; +} + +static const TypeInfo s390_pci_device_info = { + .name = TYPE_S390_PCI_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(S390PCIBusDevice), + .class_init = s390_pci_device_class_init, +}; + +static TypeInfo s390_pci_iommu_info = { + .name = TYPE_S390_PCI_IOMMU, + .parent = TYPE_OBJECT, + .instance_size = sizeof(S390PCIIOMMU), +}; + +static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = s390_translate_iommu; + imrc->replay = s390_pci_iommu_replay; +} + +static const TypeInfo s390_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_S390_IOMMU_MEMORY_REGION, + .class_init = s390_iommu_memory_region_class_init, +}; + +static void s390_pci_register_types(void) +{ + type_register_static(&s390_pcihost_info); + type_register_static(&s390_pcibus_info); + type_register_static(&s390_pci_device_info); + type_register_static(&s390_pci_iommu_info); + type_register_static(&s390_iommu_memory_region_info); +} + +type_init(s390_pci_register_types) diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c new file mode 100644 index 000000000..1c8ad9117 --- /dev/null +++ b/hw/s390x/s390-pci-inst.c @@ -0,0 +1,1310 @@ +/* + * s390 PCI instructions + * + * Copyright 2014 IBM Corp. + * Author(s): Frank Blaschka + * Hong Bo Li + * Yi Min Zhao + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "exec/memop.h" +#include "exec/memory-internal.h" +#include "qemu/error-report.h" +#include "sysemu/hw_accel.h" +#include "hw/s390x/s390-pci-inst.h" +#include "hw/s390x/s390-pci-bus.h" +#include "hw/s390x/tod.h" + +#ifndef DEBUG_S390PCI_INST +#define DEBUG_S390PCI_INST 0 +#endif + +#define DPRINTF(fmt, ...) \ + do { \ + if (DEBUG_S390PCI_INST) { \ + fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); \ + } \ + } while (0) + +static inline void inc_dma_avail(S390PCIIOMMU *iommu) +{ + if (iommu->dma_limit) { + iommu->dma_limit->avail++; + } +} + +static inline void dec_dma_avail(S390PCIIOMMU *iommu) +{ + if (iommu->dma_limit) { + iommu->dma_limit->avail--; + } +} + +static void s390_set_status_code(CPUS390XState *env, + uint8_t r, uint64_t status_code) +{ + env->regs[r] &= ~0xff000000ULL; + env->regs[r] |= (status_code & 0xff) << 24; +} + +static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) +{ + S390PCIBusDevice *pbdev = NULL; + S390pciState *s = s390_get_phb(); + uint32_t res_code, initial_l2, g_l2; + int rc, i; + uint64_t resume_token; + + rc = 0; + if (lduw_p(&rrb->request.hdr.len) != 32) { + res_code = CLP_RC_LEN; + rc = -EINVAL; + goto out; + } + + if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) { + res_code = CLP_RC_FMT; + rc = -EINVAL; + goto out; + } + + if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 || + ldq_p(&rrb->request.reserved1) != 0) { + res_code = CLP_RC_RESNOT0; + rc = -EINVAL; + goto out; + } + + resume_token = ldq_p(&rrb->request.resume_token); + + if (resume_token) { + pbdev = s390_pci_find_dev_by_idx(s, resume_token); + if (!pbdev) { + res_code = CLP_RC_LISTPCI_BADRT; + rc = -EINVAL; + goto out; + } + } else { + pbdev = s390_pci_find_next_avail_dev(s, NULL); + } + + if (lduw_p(&rrb->response.hdr.len) < 48) { + res_code = CLP_RC_8K; + rc = -EINVAL; + goto out; + } + + initial_l2 = lduw_p(&rrb->response.hdr.len); + if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry) + != 0) { + res_code = CLP_RC_LEN; + rc = -EINVAL; + *cc = 3; + goto out; + } + + stl_p(&rrb->response.fmt, 0); + stq_p(&rrb->response.reserved1, 0); + stl_p(&rrb->response.mdd, FH_MASK_SHM); + stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS); + rrb->response.flags = UID_CHECKING_ENABLED; + rrb->response.entry_size = sizeof(ClpFhListEntry); + + i = 0; + g_l2 = LIST_PCI_HDR_LEN; + while (g_l2 < initial_l2 && pbdev) { + stw_p(&rrb->response.fh_list[i].device_id, + pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID)); + stw_p(&rrb->response.fh_list[i].vendor_id, + pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID)); + /* Ignore RESERVED devices. */ + stl_p(&rrb->response.fh_list[i].config, + pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31); + stl_p(&rrb->response.fh_list[i].fid, pbdev->fid); + stl_p(&rrb->response.fh_list[i].fh, pbdev->fh); + + g_l2 += sizeof(ClpFhListEntry); + /* Add endian check for DPRINTF? */ + DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n", + g_l2, + lduw_p(&rrb->response.fh_list[i].vendor_id), + lduw_p(&rrb->response.fh_list[i].device_id), + ldl_p(&rrb->response.fh_list[i].fid), + ldl_p(&rrb->response.fh_list[i].fh)); + pbdev = s390_pci_find_next_avail_dev(s, pbdev); + i++; + } + + if (!pbdev) { + resume_token = 0; + } else { + resume_token = pbdev->fh & FH_MASK_INDEX; + } + stq_p(&rrb->response.resume_token, resume_token); + stw_p(&rrb->response.hdr.len, g_l2); + stw_p(&rrb->response.hdr.rsp, CLP_RC_OK); +out: + if (rc) { + DPRINTF("list pci failed rc 0x%x\n", rc); + stw_p(&rrb->response.hdr.rsp, res_code); + } + return rc; +} + +int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) +{ + ClpReqHdr *reqh; + ClpRspHdr *resh; + S390PCIBusDevice *pbdev; + uint32_t req_len; + uint32_t res_len; + uint8_t buffer[4096 * 2]; + uint8_t cc = 0; + CPUS390XState *env = &cpu->env; + S390pciState *s = s390_get_phb(); + int i; + + if (env->psw.mask & PSW_MASK_PSTATE) { + s390_program_interrupt(env, PGM_PRIVILEGED, ra); + return 0; + } + + if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return 0; + } + reqh = (ClpReqHdr *)buffer; + req_len = lduw_p(&reqh->len); + if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + + if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, + req_len + sizeof(*resh))) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return 0; + } + resh = (ClpRspHdr *)(buffer + req_len); + res_len = lduw_p(&resh->len); + if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + if ((req_len + res_len) > 8192) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + + if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, + req_len + res_len)) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return 0; + } + + if (req_len != 32) { + stw_p(&resh->rsp, CLP_RC_LEN); + goto out; + } + + switch (lduw_p(&reqh->cmd)) { + case CLP_LIST_PCI: { + ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer; + list_pci(rrb, &cc); + break; + } + case CLP_SET_PCI_FN: { + ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh; + ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh; + + pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqsetpci->fh)); + if (!pbdev) { + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); + goto out; + } + + switch (reqsetpci->oc) { + case CLP_SET_ENABLE_PCI_FN: + switch (reqsetpci->ndas) { + case 0: + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS); + goto out; + case 1: + break; + default: + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES); + goto out; + } + + if (pbdev->fh & FH_MASK_ENABLE) { + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); + goto out; + } + + pbdev->fh |= FH_MASK_ENABLE; + pbdev->state = ZPCI_FS_ENABLED; + stl_p(&ressetpci->fh, pbdev->fh); + stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); + break; + case CLP_SET_DISABLE_PCI_FN: + if (!(pbdev->fh & FH_MASK_ENABLE)) { + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); + goto out; + } + device_legacy_reset(DEVICE(pbdev)); + pbdev->fh &= ~FH_MASK_ENABLE; + pbdev->state = ZPCI_FS_DISABLED; + stl_p(&ressetpci->fh, pbdev->fh); + stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); + break; + default: + DPRINTF("unknown set pci command\n"); + stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); + break; + } + break; + } + case CLP_QUERY_PCI_FN: { + ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh; + ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh; + + pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqquery->fh)); + if (!pbdev) { + DPRINTF("query pci no pci dev\n"); + stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH); + goto out; + } + + stq_p(&resquery->sdma, pbdev->zpci_fn.sdma); + stq_p(&resquery->edma, pbdev->zpci_fn.edma); + stw_p(&resquery->pchid, pbdev->zpci_fn.pchid); + stw_p(&resquery->vfn, pbdev->zpci_fn.vfn); + resquery->flags = pbdev->zpci_fn.flags; + resquery->pfgid = pbdev->zpci_fn.pfgid; + resquery->pft = pbdev->zpci_fn.pft; + resquery->fmbl = pbdev->zpci_fn.fmbl; + stl_p(&resquery->fid, pbdev->zpci_fn.fid); + stl_p(&resquery->uid, pbdev->zpci_fn.uid); + memcpy(resquery->pfip, pbdev->zpci_fn.pfip, CLP_PFIP_NR_SEGMENTS); + memcpy(resquery->util_str, pbdev->zpci_fn.util_str, CLP_UTIL_STR_LEN); + + for (i = 0; i < PCI_BAR_COUNT; i++) { + uint32_t data = pci_get_long(pbdev->pdev->config + + PCI_BASE_ADDRESS_0 + (i * 4)); + + stl_p(&resquery->bar[i], data); + resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ? + ctz64(pbdev->pdev->io_regions[i].size) : 0; + DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i, + ldl_p(&resquery->bar[i]), + pbdev->pdev->io_regions[i].size, + resquery->bar_size[i]); + } + + stw_p(&resquery->hdr.rsp, CLP_RC_OK); + break; + } + case CLP_QUERY_PCI_FNGRP: { + ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh; + + ClpReqQueryPciGrp *reqgrp = (ClpReqQueryPciGrp *)reqh; + S390PCIGroup *group; + + group = s390_group_find(reqgrp->g); + if (!group) { + /* We do not allow access to unknown groups */ + /* The group must have been obtained with a vfio device */ + stw_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID); + goto out; + } + resgrp->fr = group->zpci_group.fr; + stq_p(&resgrp->dasm, group->zpci_group.dasm); + stq_p(&resgrp->msia, group->zpci_group.msia); + stw_p(&resgrp->mui, group->zpci_group.mui); + stw_p(&resgrp->i, group->zpci_group.i); + stw_p(&resgrp->maxstbl, group->zpci_group.maxstbl); + resgrp->version = group->zpci_group.version; + stw_p(&resgrp->hdr.rsp, CLP_RC_OK); + break; + } + default: + DPRINTF("unknown clp command\n"); + stw_p(&resh->rsp, CLP_RC_CMD); + break; + } + +out: + if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer, + req_len + res_len)) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return 0; + } + setcc(cpu, cc); + return 0; +} + +/** + * Swap data contained in s390x big endian registers to little endian + * PCI bars. + * + * @ptr: a pointer to a uint64_t data field + * @len: the length of the valid data, must be 1,2,4 or 8 + */ +static int zpci_endian_swap(uint64_t *ptr, uint8_t len) +{ + uint64_t data = *ptr; + + switch (len) { + case 1: + break; + case 2: + data = bswap16(data); + break; + case 4: + data = bswap32(data); + break; + case 8: + data = bswap64(data); + break; + default: + return -EINVAL; + } + *ptr = data; + return 0; +} + +static MemoryRegion *s390_get_subregion(MemoryRegion *mr, uint64_t offset, + uint8_t len) +{ + MemoryRegion *subregion; + uint64_t subregion_size; + + QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { + subregion_size = int128_get64(subregion->size); + if ((offset >= subregion->addr) && + (offset + len) <= (subregion->addr + subregion_size)) { + mr = subregion; + break; + } + } + return mr; +} + +static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias, + uint64_t offset, uint64_t *data, uint8_t len) +{ + MemoryRegion *mr; + + mr = pbdev->pdev->io_regions[pcias].memory; + mr = s390_get_subregion(mr, offset, len); + offset -= mr->addr; + return memory_region_dispatch_read(mr, offset, data, + size_memop(len) | MO_BE, + MEMTXATTRS_UNSPECIFIED); +} + +int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) +{ + CPUS390XState *env = &cpu->env; + S390PCIBusDevice *pbdev; + uint64_t offset; + uint64_t data; + MemTxResult result; + uint8_t len; + uint32_t fh; + uint8_t pcias; + + if (env->psw.mask & PSW_MASK_PSTATE) { + s390_program_interrupt(env, PGM_PRIVILEGED, ra); + return 0; + } + + if (r2 & 0x1) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return 0; + } + + fh = env->regs[r2] >> 32; + pcias = (env->regs[r2] >> 16) & 0xf; + len = env->regs[r2] & 0xf; + offset = env->regs[r2 + 1]; + + if (!(fh & FH_MASK_ENABLE)) { + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + + pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); + if (!pbdev) { + DPRINTF("pcilg no pci dev\n"); + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + + switch (pbdev->state) { + case ZPCI_FS_PERMANENT_ERROR: + case ZPCI_FS_ERROR: + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); + return 0; + default: + break; + } + + switch (pcias) { + case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX: + if (!len || (len > (8 - (offset & 0x7)))) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + result = zpci_read_bar(pbdev, pcias, offset, &data, len); + if (result != MEMTX_OK) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + break; + case ZPCI_CONFIG_BAR: + if (!len || (len > (4 - (offset & 0x3))) || len == 3) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + data = pci_host_config_read_common( + pbdev->pdev, offset, pci_config_size(pbdev->pdev), len); + + if (zpci_endian_swap(&data, len)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + break; + default: + DPRINTF("pcilg invalid space\n"); + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); + return 0; + } + + pbdev->fmb.counter[ZPCI_FMB_CNT_LD]++; + + env->regs[r1] = data; + setcc(cpu, ZPCI_PCI_LS_OK); + return 0; +} + +static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias, + uint64_t offset, uint64_t data, uint8_t len) +{ + MemoryRegion *mr; + + mr = pbdev->pdev->io_regions[pcias].memory; + mr = s390_get_subregion(mr, offset, len); + offset -= mr->addr; + return memory_region_dispatch_write(mr, offset, data, + size_memop(len) | MO_BE, + MEMTXATTRS_UNSPECIFIED); +} + +int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) +{ + CPUS390XState *env = &cpu->env; + uint64_t offset, data; + S390PCIBusDevice *pbdev; + MemTxResult result; + uint8_t len; + uint32_t fh; + uint8_t pcias; + + if (env->psw.mask & PSW_MASK_PSTATE) { + s390_program_interrupt(env, PGM_PRIVILEGED, ra); + return 0; + } + + if (r2 & 0x1) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return 0; + } + + fh = env->regs[r2] >> 32; + pcias = (env->regs[r2] >> 16) & 0xf; + len = env->regs[r2] & 0xf; + offset = env->regs[r2 + 1]; + data = env->regs[r1]; + + if (!(fh & FH_MASK_ENABLE)) { + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + + pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); + if (!pbdev) { + DPRINTF("pcistg no pci dev\n"); + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + + switch (pbdev->state) { + /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED + * are already covered by the FH_MASK_ENABLE check above + */ + case ZPCI_FS_PERMANENT_ERROR: + case ZPCI_FS_ERROR: + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); + return 0; + default: + break; + } + + switch (pcias) { + /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */ + case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX: + /* Check length: + * A length of 0 is invalid and length should not cross a double word + */ + if (!len || (len > (8 - (offset & 0x7)))) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + + result = zpci_write_bar(pbdev, pcias, offset, data, len); + if (result != MEMTX_OK) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + break; + case ZPCI_CONFIG_BAR: + /* ZPCI uses the pseudo BAR number 15 as configuration space */ + /* possible access lengths are 1,2,4 and must not cross a word */ + if (!len || (len > (4 - (offset & 0x3))) || len == 3) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + /* len = 1,2,4 so we do not need to test */ + zpci_endian_swap(&data, len); + pci_host_config_write_common(pbdev->pdev, offset, + pci_config_size(pbdev->pdev), + data, len); + break; + default: + DPRINTF("pcistg invalid space\n"); + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); + return 0; + } + + pbdev->fmb.counter[ZPCI_FMB_CNT_ST]++; + + setcc(cpu, ZPCI_PCI_LS_OK); + return 0; +} + +static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu, + S390IOTLBEntry *entry) +{ + S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova); + IOMMUTLBEvent event = { + .type = entry->perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP, + .entry = { + .target_as = &address_space_memory, + .iova = entry->iova, + .translated_addr = entry->translated_addr, + .perm = entry->perm, + .addr_mask = ~TARGET_PAGE_MASK, + }, + }; + + if (event.type == IOMMU_NOTIFIER_UNMAP) { + if (!cache) { + goto out; + } + g_hash_table_remove(iommu->iotlb, &entry->iova); + inc_dma_avail(iommu); + } else { + if (cache) { + if (cache->perm == entry->perm && + cache->translated_addr == entry->translated_addr) { + goto out; + } + + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.perm = IOMMU_NONE; + memory_region_notify_iommu(&iommu->iommu_mr, 0, event); + event.type = IOMMU_NOTIFIER_MAP; + event.entry.perm = entry->perm; + } + + cache = g_new(S390IOTLBEntry, 1); + cache->iova = entry->iova; + cache->translated_addr = entry->translated_addr; + cache->len = TARGET_PAGE_SIZE; + cache->perm = entry->perm; + g_hash_table_replace(iommu->iotlb, &cache->iova, cache); + dec_dma_avail(iommu); + } + + memory_region_notify_iommu(&iommu->iommu_mr, 0, event); + +out: + return iommu->dma_limit ? iommu->dma_limit->avail : 1; +} + +int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) +{ + CPUS390XState *env = &cpu->env; + uint32_t fh; + uint16_t error = 0; + S390PCIBusDevice *pbdev; + S390PCIIOMMU *iommu; + S390IOTLBEntry entry; + hwaddr start, end; + uint32_t dma_avail; + + if (env->psw.mask & PSW_MASK_PSTATE) { + s390_program_interrupt(env, PGM_PRIVILEGED, ra); + return 0; + } + + if (r2 & 0x1) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return 0; + } + + fh = env->regs[r1] >> 32; + start = env->regs[r2]; + end = start + env->regs[r2 + 1]; + + pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); + if (!pbdev) { + DPRINTF("rpcit no pci dev\n"); + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + case ZPCI_FS_STANDBY: + case ZPCI_FS_DISABLED: + case ZPCI_FS_PERMANENT_ERROR: + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + case ZPCI_FS_ERROR: + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER); + return 0; + default: + break; + } + + iommu = pbdev->iommu; + if (iommu->dma_limit) { + dma_avail = iommu->dma_limit->avail; + } else { + dma_avail = 1; + } + if (!iommu->g_iota) { + error = ERR_EVENT_INVALAS; + goto err; + } + + if (end < iommu->pba || start > iommu->pal) { + error = ERR_EVENT_OORANGE; + goto err; + } + + while (start < end) { + error = s390_guest_io_table_walk(iommu->g_iota, start, &entry); + if (error) { + break; + } + + start += entry.len; + while (entry.iova < start && entry.iova < end && + (dma_avail > 0 || entry.perm == IOMMU_NONE)) { + dma_avail = s390_pci_update_iotlb(iommu, &entry); + entry.iova += TARGET_PAGE_SIZE; + entry.translated_addr += TARGET_PAGE_SIZE; + } + } +err: + if (error) { + pbdev->state = ZPCI_FS_ERROR; + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_PCI_ST_FUNC_IN_ERR); + s390_pci_generate_error_event(error, pbdev->fh, pbdev->fid, start, 0); + } else { + pbdev->fmb.counter[ZPCI_FMB_CNT_RPCIT]++; + if (dma_avail > 0) { + setcc(cpu, ZPCI_PCI_LS_OK); + } else { + /* vfio DMA mappings are exhausted, trigger a RPCIT */ + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_RPCIT_ST_INSUFF_RES); + } + } + return 0; +} + +int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, + uint8_t ar, uintptr_t ra) +{ + CPUS390XState *env = &cpu->env; + S390PCIBusDevice *pbdev; + MemoryRegion *mr; + MemTxResult result; + uint64_t offset; + int i; + uint32_t fh; + uint8_t pcias; + uint16_t len; + uint8_t buffer[128]; + + if (env->psw.mask & PSW_MASK_PSTATE) { + s390_program_interrupt(env, PGM_PRIVILEGED, ra); + return 0; + } + + fh = env->regs[r1] >> 32; + pcias = (env->regs[r1] >> 16) & 0xf; + len = env->regs[r1] & 0x1fff; + offset = env->regs[r3]; + + if (!(fh & FH_MASK_ENABLE)) { + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + + pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); + if (!pbdev) { + DPRINTF("pcistb no pci dev fh 0x%x\n", fh); + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + + switch (pbdev->state) { + case ZPCI_FS_PERMANENT_ERROR: + case ZPCI_FS_ERROR: + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED); + return 0; + default: + break; + } + + if (pcias > ZPCI_IO_BAR_MAX) { + DPRINTF("pcistb invalid space\n"); + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS); + return 0; + } + + /* Verify the address, offset and length */ + /* offset must be a multiple of 8 */ + if (offset % 8) { + goto specification_error; + } + /* Length must be greater than 8, a multiple of 8 */ + /* and not greater than maxstbl */ + if ((len <= 8) || (len % 8) || + (len > pbdev->pci_group->zpci_group.maxstbl)) { + goto specification_error; + } + /* Do not cross a 4K-byte boundary */ + if (((offset & 0xfff) + len) > 0x1000) { + goto specification_error; + } + /* Guest address must be double word aligned */ + if (gaddr & 0x07UL) { + goto specification_error; + } + + mr = pbdev->pdev->io_regions[pcias].memory; + mr = s390_get_subregion(mr, offset, len); + offset -= mr->addr; + + for (i = 0; i < len; i += 8) { + if (!memory_region_access_valid(mr, offset + i, 8, true, + MEMTXATTRS_UNSPECIFIED)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + } + + if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return 0; + } + + for (i = 0; i < len / 8; i++) { + result = memory_region_dispatch_write(mr, offset + i * 8, + ldq_p(buffer + i * 8), + MO_64, MEMTXATTRS_UNSPECIFIED); + if (result != MEMTX_OK) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + } + + pbdev->fmb.counter[ZPCI_FMB_CNT_STB]++; + + setcc(cpu, ZPCI_PCI_LS_OK); + return 0; + +specification_error: + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return 0; +} + +static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) +{ + int ret, len; + uint8_t isc = FIB_DATA_ISC(ldl_p(&fib.data)); + + pbdev->routes.adapter.adapter_id = css_get_adapter_id( + CSS_IO_ADAPTER_PCI, isc); + pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t)); + len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long); + pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len); + + ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind); + if (ret) { + goto out; + } + + ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator); + if (ret) { + goto out; + } + + pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb); + pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data)); + pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv); + pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data)); + pbdev->isc = isc; + pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data)); + pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data)); + + DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); + return 0; +out: + release_indicator(&pbdev->routes.adapter, pbdev->summary_ind); + release_indicator(&pbdev->routes.adapter, pbdev->indicator); + pbdev->summary_ind = NULL; + pbdev->indicator = NULL; + return ret; +} + +int pci_dereg_irqs(S390PCIBusDevice *pbdev) +{ + release_indicator(&pbdev->routes.adapter, pbdev->summary_ind); + release_indicator(&pbdev->routes.adapter, pbdev->indicator); + + pbdev->summary_ind = NULL; + pbdev->indicator = NULL; + pbdev->routes.adapter.summary_addr = 0; + pbdev->routes.adapter.summary_offset = 0; + pbdev->routes.adapter.ind_addr = 0; + pbdev->routes.adapter.ind_offset = 0; + pbdev->isc = 0; + pbdev->noi = 0; + pbdev->sum = 0; + + DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); + return 0; +} + +static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib, + uintptr_t ra) +{ + uint64_t pba = ldq_p(&fib.pba); + uint64_t pal = ldq_p(&fib.pal); + uint64_t g_iota = ldq_p(&fib.iota); + uint8_t dt = (g_iota >> 2) & 0x7; + uint8_t t = (g_iota >> 11) & 0x1; + + pba &= ~0xfff; + pal |= 0xfff; + if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return -EINVAL; + } + + /* currently we only support designation type 1 with translation */ + if (!(dt == ZPCI_IOTA_RTTO && t)) { + error_report("unsupported ioat dt %d t %d", dt, t); + s390_program_interrupt(env, PGM_OPERAND, ra); + return -EINVAL; + } + + iommu->pba = pba; + iommu->pal = pal; + iommu->g_iota = g_iota; + + s390_pci_iommu_enable(iommu); + + return 0; +} + +void pci_dereg_ioat(S390PCIIOMMU *iommu) +{ + s390_pci_iommu_disable(iommu); + iommu->pba = 0; + iommu->pal = 0; + iommu->g_iota = 0; +} + +void fmb_timer_free(S390PCIBusDevice *pbdev) +{ + if (pbdev->fmb_timer) { + timer_free(pbdev->fmb_timer); + pbdev->fmb_timer = NULL; + } + pbdev->fmb_addr = 0; + memset(&pbdev->fmb, 0, sizeof(ZpciFmb)); +} + +static int fmb_do_update(S390PCIBusDevice *pbdev, int offset, uint64_t val, + int len) +{ + MemTxResult ret; + uint64_t dst = pbdev->fmb_addr + offset; + + switch (len) { + case 8: + address_space_stq_be(&address_space_memory, dst, val, + MEMTXATTRS_UNSPECIFIED, + &ret); + break; + case 4: + address_space_stl_be(&address_space_memory, dst, val, + MEMTXATTRS_UNSPECIFIED, + &ret); + break; + case 2: + address_space_stw_be(&address_space_memory, dst, val, + MEMTXATTRS_UNSPECIFIED, + &ret); + break; + case 1: + address_space_stb(&address_space_memory, dst, val, + MEMTXATTRS_UNSPECIFIED, + &ret); + break; + default: + ret = MEMTX_ERROR; + break; + } + if (ret != MEMTX_OK) { + s390_pci_generate_error_event(ERR_EVENT_FMBA, pbdev->fh, pbdev->fid, + pbdev->fmb_addr, 0); + fmb_timer_free(pbdev); + } + + return ret; +} + +static void fmb_update(void *opaque) +{ + S390PCIBusDevice *pbdev = opaque; + int64_t t = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + int i; + + /* Update U bit */ + pbdev->fmb.last_update *= 2; + pbdev->fmb.last_update |= UPDATE_U_BIT; + if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update), + pbdev->fmb.last_update, + sizeof(pbdev->fmb.last_update))) { + return; + } + + /* Update FMB sample count */ + if (fmb_do_update(pbdev, offsetof(ZpciFmb, sample), + pbdev->fmb.sample++, + sizeof(pbdev->fmb.sample))) { + return; + } + + /* Update FMB counters */ + for (i = 0; i < ZPCI_FMB_CNT_MAX; i++) { + if (fmb_do_update(pbdev, offsetof(ZpciFmb, counter[i]), + pbdev->fmb.counter[i], + sizeof(pbdev->fmb.counter[0]))) { + return; + } + } + + /* Clear U bit and update the time */ + pbdev->fmb.last_update = time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + pbdev->fmb.last_update *= 2; + if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update), + pbdev->fmb.last_update, + sizeof(pbdev->fmb.last_update))) { + return; + } + timer_mod(pbdev->fmb_timer, t + DEFAULT_MUI); +} + +int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, + uintptr_t ra) +{ + CPUS390XState *env = &cpu->env; + uint8_t oc, dmaas; + uint32_t fh; + ZpciFib fib; + S390PCIBusDevice *pbdev; + uint64_t cc = ZPCI_PCI_LS_OK; + + if (env->psw.mask & PSW_MASK_PSTATE) { + s390_program_interrupt(env, PGM_PRIVILEGED, ra); + return 0; + } + + oc = env->regs[r1] & 0xff; + dmaas = (env->regs[r1] >> 16) & 0xff; + fh = env->regs[r1] >> 32; + + if (fiba & 0x7) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return 0; + } + + pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); + if (!pbdev) { + DPRINTF("mpcifc no pci dev fh 0x%x\n", fh); + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + case ZPCI_FS_STANDBY: + case ZPCI_FS_DISABLED: + case ZPCI_FS_PERMANENT_ERROR: + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + default: + break; + } + + if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return 0; + } + + if (fib.fmt != 0) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return 0; + } + + switch (oc) { + case ZPCI_MOD_FC_REG_INT: + if (pbdev->summary_ind) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } else if (reg_irqs(env, pbdev, fib)) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL); + } + break; + case ZPCI_MOD_FC_DEREG_INT: + if (!pbdev->summary_ind) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } else { + pci_dereg_irqs(pbdev); + } + break; + case ZPCI_MOD_FC_REG_IOAT: + if (dmaas != 0) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); + } else if (pbdev->iommu->enabled) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } else if (reg_ioat(env, pbdev->iommu, fib, ra)) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES); + } + break; + case ZPCI_MOD_FC_DEREG_IOAT: + if (dmaas != 0) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); + } else if (!pbdev->iommu->enabled) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } else { + pci_dereg_ioat(pbdev->iommu); + } + break; + case ZPCI_MOD_FC_REREG_IOAT: + if (dmaas != 0) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); + } else if (!pbdev->iommu->enabled) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } else { + pci_dereg_ioat(pbdev->iommu); + if (reg_ioat(env, pbdev->iommu, fib, ra)) { + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES); + } + } + break; + case ZPCI_MOD_FC_RESET_ERROR: + switch (pbdev->state) { + case ZPCI_FS_BLOCKED: + case ZPCI_FS_ERROR: + pbdev->state = ZPCI_FS_ENABLED; + break; + default: + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } + break; + case ZPCI_MOD_FC_RESET_BLOCK: + switch (pbdev->state) { + case ZPCI_FS_ERROR: + pbdev->state = ZPCI_FS_BLOCKED; + break; + default: + cc = ZPCI_PCI_LS_ERR; + s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); + } + break; + case ZPCI_MOD_FC_SET_MEASURE: { + uint64_t fmb_addr = ldq_p(&fib.fmb_addr); + + if (fmb_addr & FMBK_MASK) { + cc = ZPCI_PCI_LS_ERR; + s390_pci_generate_error_event(ERR_EVENT_FMBPRO, pbdev->fh, + pbdev->fid, fmb_addr, 0); + fmb_timer_free(pbdev); + break; + } + + if (!fmb_addr) { + /* Stop updating FMB. */ + fmb_timer_free(pbdev); + break; + } + + if (!pbdev->fmb_timer) { + pbdev->fmb_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, + fmb_update, pbdev); + } else if (timer_pending(pbdev->fmb_timer)) { + /* Remove pending timer to update FMB address. */ + timer_del(pbdev->fmb_timer); + } + pbdev->fmb_addr = fmb_addr; + timer_mod(pbdev->fmb_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + DEFAULT_MUI); + break; + } + default: + s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); + cc = ZPCI_PCI_LS_ERR; + } + + setcc(cpu, cc); + return 0; +} + +int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, + uintptr_t ra) +{ + CPUS390XState *env = &cpu->env; + uint8_t dmaas; + uint32_t fh; + ZpciFib fib; + S390PCIBusDevice *pbdev; + uint32_t data; + uint64_t cc = ZPCI_PCI_LS_OK; + + if (env->psw.mask & PSW_MASK_PSTATE) { + s390_program_interrupt(env, PGM_PRIVILEGED, ra); + return 0; + } + + fh = env->regs[r1] >> 32; + dmaas = (env->regs[r1] >> 16) & 0xff; + + if (dmaas) { + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS); + return 0; + } + + if (fiba & 0x7) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return 0; + } + + pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), fh & FH_MASK_INDEX); + if (!pbdev) { + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + + memset(&fib, 0, sizeof(fib)); + + switch (pbdev->state) { + case ZPCI_FS_RESERVED: + case ZPCI_FS_STANDBY: + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + case ZPCI_FS_DISABLED: + if (fh & FH_MASK_ENABLE) { + setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); + return 0; + } + goto out; + /* BLOCKED bit is set to one coincident with the setting of ERROR bit. + * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */ + case ZPCI_FS_ERROR: + fib.fc |= 0x20; + /* fallthrough */ + case ZPCI_FS_BLOCKED: + fib.fc |= 0x40; + /* fallthrough */ + case ZPCI_FS_ENABLED: + fib.fc |= 0x80; + if (pbdev->iommu->enabled) { + fib.fc |= 0x10; + } + if (!(fh & FH_MASK_ENABLE)) { + env->regs[r1] |= 1ULL << 63; + } + break; + case ZPCI_FS_PERMANENT_ERROR: + setcc(cpu, ZPCI_PCI_LS_ERR); + s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR); + return 0; + } + + stq_p(&fib.pba, pbdev->iommu->pba); + stq_p(&fib.pal, pbdev->iommu->pal); + stq_p(&fib.iota, pbdev->iommu->g_iota); + stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr); + stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr); + stq_p(&fib.fmb_addr, pbdev->fmb_addr); + + data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) | + ((uint32_t)pbdev->routes.adapter.ind_offset << 8) | + ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset; + stl_p(&fib.data, data); + +out: + if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return 0; + } + + setcc(cpu, cc); + return 0; +} diff --git a/hw/s390x/s390-pci-vfio.c b/hw/s390x/s390-pci-vfio.c new file mode 100644 index 000000000..2a153fa8c --- /dev/null +++ b/hw/s390x/s390-pci-vfio.c @@ -0,0 +1,274 @@ +/* + * s390 vfio-pci interfaces + * + * Copyright 2020 IBM Corp. + * Author(s): Matthew Rosato + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" + +#include +#include +#include + +#include "trace.h" +#include "hw/s390x/s390-pci-bus.h" +#include "hw/s390x/s390-pci-clp.h" +#include "hw/s390x/s390-pci-vfio.h" +#include "hw/vfio/pci.h" +#include "hw/vfio/vfio-common.h" + +/* + * Get the current DMA available count from vfio. Returns true if vfio is + * limiting DMA requests, false otherwise. The current available count read + * from vfio is returned in avail. + */ +bool s390_pci_update_dma_avail(int fd, unsigned int *avail) +{ + uint32_t argsz = sizeof(struct vfio_iommu_type1_info); + g_autofree struct vfio_iommu_type1_info *info = g_malloc0(argsz); + + assert(avail); + + /* + * If the specified argsz is not large enough to contain all capabilities + * it will be updated upon return from the ioctl. Retry until we have + * a big enough buffer to hold the entire capability chain. + */ +retry: + info->argsz = argsz; + + if (ioctl(fd, VFIO_IOMMU_GET_INFO, info)) { + return false; + } + + if (info->argsz > argsz) { + argsz = info->argsz; + info = g_realloc(info, argsz); + goto retry; + } + + /* If the capability exists, update with the current value */ + return vfio_get_info_dma_avail(info, avail); +} + +S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s, + S390PCIBusDevice *pbdev) +{ + S390PCIDMACount *cnt; + uint32_t avail; + VFIOPCIDevice *vpdev = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + int id; + + assert(vpdev); + + id = vpdev->vbasedev.group->container->fd; + + if (!s390_pci_update_dma_avail(id, &avail)) { + return NULL; + } + + QTAILQ_FOREACH(cnt, &s->zpci_dma_limit, link) { + if (cnt->id == id) { + cnt->users++; + return cnt; + } + } + + cnt = g_new0(S390PCIDMACount, 1); + cnt->id = id; + cnt->users = 1; + cnt->avail = avail; + QTAILQ_INSERT_TAIL(&s->zpci_dma_limit, cnt, link); + return cnt; +} + +void s390_pci_end_dma_count(S390pciState *s, S390PCIDMACount *cnt) +{ + assert(cnt); + + cnt->users--; + if (cnt->users == 0) { + QTAILQ_REMOVE(&s->zpci_dma_limit, cnt, link); + } +} + +static void s390_pci_read_base(S390PCIBusDevice *pbdev, + struct vfio_device_info *info) +{ + struct vfio_info_cap_header *hdr; + struct vfio_device_info_cap_zpci_base *cap; + VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + + hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_BASE); + + /* If capability not provided, just leave the defaults in place */ + if (hdr == NULL) { + trace_s390_pci_clp_cap(vpci->vbasedev.name, + VFIO_DEVICE_INFO_CAP_ZPCI_BASE); + return; + } + cap = (void *) hdr; + + pbdev->zpci_fn.sdma = cap->start_dma; + pbdev->zpci_fn.edma = cap->end_dma; + pbdev->zpci_fn.pchid = cap->pchid; + pbdev->zpci_fn.vfn = cap->vfn; + pbdev->zpci_fn.pfgid = cap->gid; + /* The following values remain 0 until we support other FMB formats */ + pbdev->zpci_fn.fmbl = 0; + pbdev->zpci_fn.pft = 0; +} + +static void s390_pci_read_group(S390PCIBusDevice *pbdev, + struct vfio_device_info *info) +{ + struct vfio_info_cap_header *hdr; + struct vfio_device_info_cap_zpci_group *cap; + ClpRspQueryPciGrp *resgrp; + VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + + hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_GROUP); + + /* If capability not provided, just use the default group */ + if (hdr == NULL) { + trace_s390_pci_clp_cap(vpci->vbasedev.name, + VFIO_DEVICE_INFO_CAP_ZPCI_GROUP); + pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP; + pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP); + return; + } + cap = (void *) hdr; + + /* See if the PCI group is already defined, create if not */ + pbdev->pci_group = s390_group_find(pbdev->zpci_fn.pfgid); + + if (!pbdev->pci_group) { + pbdev->pci_group = s390_group_create(pbdev->zpci_fn.pfgid); + + resgrp = &pbdev->pci_group->zpci_group; + if (cap->flags & VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH) { + resgrp->fr = 1; + } + resgrp->dasm = cap->dasm; + resgrp->msia = cap->msi_addr; + resgrp->mui = cap->mui; + resgrp->i = cap->noi; + resgrp->maxstbl = cap->maxstbl; + resgrp->version = cap->version; + } +} + +static void s390_pci_read_util(S390PCIBusDevice *pbdev, + struct vfio_device_info *info) +{ + struct vfio_info_cap_header *hdr; + struct vfio_device_info_cap_zpci_util *cap; + VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + + hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_UTIL); + + /* If capability not provided, just leave the defaults in place */ + if (hdr == NULL) { + trace_s390_pci_clp_cap(vpci->vbasedev.name, + VFIO_DEVICE_INFO_CAP_ZPCI_UTIL); + return; + } + cap = (void *) hdr; + + if (cap->size > CLP_UTIL_STR_LEN) { + trace_s390_pci_clp_cap_size(vpci->vbasedev.name, cap->size, + VFIO_DEVICE_INFO_CAP_ZPCI_UTIL); + return; + } + + pbdev->zpci_fn.flags |= CLP_RSP_QPCI_MASK_UTIL; + memcpy(pbdev->zpci_fn.util_str, cap->util_str, CLP_UTIL_STR_LEN); +} + +static void s390_pci_read_pfip(S390PCIBusDevice *pbdev, + struct vfio_device_info *info) +{ + struct vfio_info_cap_header *hdr; + struct vfio_device_info_cap_zpci_pfip *cap; + VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + + hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_PFIP); + + /* If capability not provided, just leave the defaults in place */ + if (hdr == NULL) { + trace_s390_pci_clp_cap(vpci->vbasedev.name, + VFIO_DEVICE_INFO_CAP_ZPCI_PFIP); + return; + } + cap = (void *) hdr; + + if (cap->size > CLP_PFIP_NR_SEGMENTS) { + trace_s390_pci_clp_cap_size(vpci->vbasedev.name, cap->size, + VFIO_DEVICE_INFO_CAP_ZPCI_PFIP); + return; + } + + memcpy(pbdev->zpci_fn.pfip, cap->pfip, CLP_PFIP_NR_SEGMENTS); +} + +/* + * This function will issue the VFIO_DEVICE_GET_INFO ioctl and look for + * capabilities that contain information about CLP features provided by the + * underlying host. + * On entry, defaults have already been placed into the guest CLP response + * buffers. On exit, defaults will have been overwritten for any CLP features + * found in the capability chain; defaults will remain for any CLP features not + * found in the chain. + */ +void s390_pci_get_clp_info(S390PCIBusDevice *pbdev) +{ + g_autofree struct vfio_device_info *info = NULL; + VFIOPCIDevice *vfio_pci; + uint32_t argsz; + int fd; + + argsz = sizeof(*info); + info = g_malloc0(argsz); + + vfio_pci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + fd = vfio_pci->vbasedev.fd; + + /* + * If the specified argsz is not large enough to contain all capabilities + * it will be updated upon return from the ioctl. Retry until we have + * a big enough buffer to hold the entire capability chain. On error, + * just exit and rely on CLP defaults. + */ +retry: + info->argsz = argsz; + + if (ioctl(fd, VFIO_DEVICE_GET_INFO, info)) { + trace_s390_pci_clp_dev_info(vfio_pci->vbasedev.name); + return; + } + + if (info->argsz > argsz) { + argsz = info->argsz; + info = g_realloc(info, argsz); + goto retry; + } + + /* + * Find the CLP features provided and fill in the guest CLP responses. + * Always call s390_pci_read_base first as information from this could + * determine which function group is used in s390_pci_read_group. + * For any feature not found, the default values will remain in the CLP + * response. + */ + s390_pci_read_base(pbdev, info); + s390_pci_read_group(pbdev, info); + s390_pci_read_util(pbdev, info); + s390_pci_read_pfip(pbdev, info); + + return; +} diff --git a/hw/s390x/s390-skeys-kvm.c b/hw/s390x/s390-skeys-kvm.c new file mode 100644 index 000000000..3ff9d94b8 --- /dev/null +++ b/hw/s390x/s390-skeys-kvm.c @@ -0,0 +1,81 @@ +/* + * s390 storage key device + * + * Copyright 2015 IBM Corp. + * Author(s): Jason J. Herne + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/s390x/storage-keys.h" +#include "sysemu/kvm.h" +#include "qemu/error-report.h" +#include "qemu/module.h" + +static bool kvm_s390_skeys_are_enabled(S390SKeysState *ss) +{ + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); + uint8_t single_key; + int r; + + r = skeyclass->get_skeys(ss, 0, 1, &single_key); + if (r != 0 && r != KVM_S390_GET_SKEYS_NONE) { + error_report("S390_GET_KEYS error %d", r); + } + return (r == 0); +} + +static int kvm_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn, + uint64_t count, uint8_t *keys) +{ + struct kvm_s390_skeys args = { + .start_gfn = start_gfn, + .count = count, + .skeydata_addr = (__u64)keys + }; + + return kvm_vm_ioctl(kvm_state, KVM_S390_GET_SKEYS, &args); +} + +static int kvm_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn, + uint64_t count, uint8_t *keys) +{ + struct kvm_s390_skeys args = { + .start_gfn = start_gfn, + .count = count, + .skeydata_addr = (__u64)keys + }; + + return kvm_vm_ioctl(kvm_state, KVM_S390_SET_SKEYS, &args); +} + +static void kvm_s390_skeys_class_init(ObjectClass *oc, void *data) +{ + S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + skeyclass->skeys_are_enabled = kvm_s390_skeys_are_enabled; + skeyclass->get_skeys = kvm_s390_skeys_get; + skeyclass->set_skeys = kvm_s390_skeys_set; + + /* Reason: Internal device (only one skeys device for the whole memory) */ + dc->user_creatable = false; +} + +static const TypeInfo kvm_s390_skeys_info = { + .name = TYPE_KVM_S390_SKEYS, + .parent = TYPE_S390_SKEYS, + .instance_size = sizeof(S390SKeysState), + .class_init = kvm_s390_skeys_class_init, + .class_size = sizeof(S390SKeysClass), +}; + +static void kvm_s390_skeys_register_types(void) +{ + type_register_static(&kvm_s390_skeys_info); +} + +type_init(kvm_s390_skeys_register_types) diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c new file mode 100644 index 000000000..5024faf41 --- /dev/null +++ b/hw/s390x/s390-skeys.c @@ -0,0 +1,499 @@ +/* + * s390 storage key device + * + * Copyright 2015 IBM Corp. + * Author(s): Jason J. Herne + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/boards.h" +#include "hw/s390x/storage-keys.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qmp/qdict.h" +#include "qemu/error-report.h" +#include "sysemu/memory_mapping.h" +#include "exec/address-spaces.h" +#include "sysemu/kvm.h" +#include "migration/qemu-file-types.h" +#include "migration/register.h" + +#define S390_SKEYS_BUFFER_SIZE (128 * KiB) /* Room for 128k storage keys */ +#define S390_SKEYS_SAVE_FLAG_EOS 0x01 +#define S390_SKEYS_SAVE_FLAG_SKEYS 0x02 +#define S390_SKEYS_SAVE_FLAG_ERROR 0x04 + +S390SKeysState *s390_get_skeys_device(void) +{ + S390SKeysState *ss; + + ss = S390_SKEYS(object_resolve_path_type("", TYPE_S390_SKEYS, NULL)); + assert(ss); + return ss; +} + +void s390_skeys_init(void) +{ + Object *obj; + + if (kvm_enabled()) { + obj = object_new(TYPE_KVM_S390_SKEYS); + } else { + obj = object_new(TYPE_QEMU_S390_SKEYS); + } + object_property_add_child(qdev_get_machine(), TYPE_S390_SKEYS, + obj); + object_unref(obj); + + qdev_realize(DEVICE(obj), NULL, &error_fatal); +} + +static void write_keys(FILE *f, uint8_t *keys, uint64_t startgfn, + uint64_t count, Error **errp) +{ + uint64_t curpage = startgfn; + uint64_t maxpage = curpage + count - 1; + + for (; curpage <= maxpage; curpage++) { + uint8_t acc = (*keys & 0xF0) >> 4; + int fp = (*keys & 0x08); + int ref = (*keys & 0x04); + int ch = (*keys & 0x02); + int res = (*keys & 0x01); + + fprintf(f, "page=%03" PRIx64 ": key(%d) => ACC=%X, FP=%d, REF=%d," + " ch=%d, reserved=%d\n", + curpage, *keys, acc, fp, ref, ch, res); + keys++; + } +} + +void hmp_info_skeys(Monitor *mon, const QDict *qdict) +{ + S390SKeysState *ss = s390_get_skeys_device(); + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); + uint64_t addr = qdict_get_int(qdict, "addr"); + uint8_t key; + int r; + + /* Quick check to see if guest is using storage keys*/ + if (!skeyclass->skeys_are_enabled(ss)) { + monitor_printf(mon, "Error: This guest is not using storage keys\n"); + return; + } + + if (!address_space_access_valid(&address_space_memory, + addr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE, + false, MEMTXATTRS_UNSPECIFIED)) { + monitor_printf(mon, "Error: The given address is not valid\n"); + return; + } + + r = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); + if (r < 0) { + monitor_printf(mon, "Error: %s\n", strerror(-r)); + return; + } + + monitor_printf(mon, " key: 0x%X\n", key); +} + +void hmp_dump_skeys(Monitor *mon, const QDict *qdict) +{ + const char *filename = qdict_get_str(qdict, "filename"); + Error *err = NULL; + + qmp_dump_skeys(filename, &err); + if (err) { + error_report_err(err); + } +} + +void qmp_dump_skeys(const char *filename, Error **errp) +{ + S390SKeysState *ss = s390_get_skeys_device(); + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); + GuestPhysBlockList guest_phys_blocks; + GuestPhysBlock *block; + uint64_t pages, gfn; + Error *lerr = NULL; + uint8_t *buf; + int ret; + int fd; + FILE *f; + + /* Quick check to see if guest is using storage keys*/ + if (!skeyclass->skeys_are_enabled(ss)) { + error_setg(errp, "This guest is not using storage keys - " + "nothing to dump"); + return; + } + + fd = qemu_open_old(filename, O_WRONLY | O_CREAT | O_TRUNC, 0600); + if (fd < 0) { + error_setg_file_open(errp, errno, filename); + return; + } + f = fdopen(fd, "wb"); + if (!f) { + close(fd); + error_setg_file_open(errp, errno, filename); + return; + } + + buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE); + if (!buf) { + error_setg(errp, "Could not allocate memory"); + goto out; + } + + assert(qemu_mutex_iothread_locked()); + guest_phys_blocks_init(&guest_phys_blocks); + guest_phys_blocks_append(&guest_phys_blocks); + + QTAILQ_FOREACH(block, &guest_phys_blocks.head, next) { + assert(QEMU_IS_ALIGNED(block->target_start, TARGET_PAGE_SIZE)); + assert(QEMU_IS_ALIGNED(block->target_end, TARGET_PAGE_SIZE)); + + gfn = block->target_start / TARGET_PAGE_SIZE; + pages = (block->target_end - block->target_start) / TARGET_PAGE_SIZE; + + while (pages) { + const uint64_t cur_pages = MIN(pages, S390_SKEYS_BUFFER_SIZE); + + ret = skeyclass->get_skeys(ss, gfn, cur_pages, buf); + if (ret < 0) { + error_setg_errno(errp, -ret, "get_keys error"); + goto out_free; + } + + /* write keys to stream */ + write_keys(f, buf, gfn, cur_pages, &lerr); + if (lerr) { + goto out_free; + } + + gfn += cur_pages; + pages -= cur_pages; + } + } + +out_free: + guest_phys_blocks_free(&guest_phys_blocks); + error_propagate(errp, lerr); + g_free(buf); +out: + fclose(f); +} + +static bool qemu_s390_skeys_are_enabled(S390SKeysState *ss) +{ + QEMUS390SKeysState *skeys = QEMU_S390_SKEYS(ss); + + /* Lockless check is sufficient. */ + return !!skeys->keydata; +} + +static bool qemu_s390_enable_skeys(S390SKeysState *ss) +{ + QEMUS390SKeysState *skeys = QEMU_S390_SKEYS(ss); + static gsize initialized; + + if (likely(skeys->keydata)) { + return true; + } + + /* + * TODO: Modern Linux doesn't use storage keys unless running KVM guests + * that use storage keys. Therefore, we keep it simple for now. + * + * 1) We should initialize to "referenced+changed" for an initial + * over-indication. Let's avoid touching megabytes of data for now and + * assume that any sane user will issue a storage key instruction before + * actually relying on this data. + * 2) Relying on ram_size and allocating a big array is ugly. We should + * allocate and manage storage key data per RAMBlock or optimally using + * some sparse data structure. + * 3) We only ever have a single S390SKeysState, so relying on + * g_once_init_enter() is good enough. + */ + if (g_once_init_enter(&initialized)) { + MachineState *machine = MACHINE(qdev_get_machine()); + + skeys->key_count = machine->ram_size / TARGET_PAGE_SIZE; + skeys->keydata = g_malloc0(skeys->key_count); + g_once_init_leave(&initialized, 1); + } + return false; +} + +static int qemu_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn, + uint64_t count, uint8_t *keys) +{ + QEMUS390SKeysState *skeydev = QEMU_S390_SKEYS(ss); + int i; + + /* Check for uint64 overflow and access beyond end of key data */ + if (unlikely(!skeydev->keydata || start_gfn + count > skeydev->key_count || + start_gfn + count < count)) { + error_report("Error: Setting storage keys for pages with unallocated " + "storage key memory: gfn=%" PRIx64 " count=%" PRId64, + start_gfn, count); + return -EINVAL; + } + + for (i = 0; i < count; i++) { + skeydev->keydata[start_gfn + i] = keys[i]; + } + return 0; +} + +static int qemu_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn, + uint64_t count, uint8_t *keys) +{ + QEMUS390SKeysState *skeydev = QEMU_S390_SKEYS(ss); + int i; + + /* Check for uint64 overflow and access beyond end of key data */ + if (unlikely(!skeydev->keydata || start_gfn + count > skeydev->key_count || + start_gfn + count < count)) { + error_report("Error: Getting storage keys for pages with unallocated " + "storage key memory: gfn=%" PRIx64 " count=%" PRId64, + start_gfn, count); + return -EINVAL; + } + + for (i = 0; i < count; i++) { + keys[i] = skeydev->keydata[start_gfn + i]; + } + return 0; +} + +static void qemu_s390_skeys_class_init(ObjectClass *oc, void *data) +{ + S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + skeyclass->skeys_are_enabled = qemu_s390_skeys_are_enabled; + skeyclass->enable_skeys = qemu_s390_enable_skeys; + skeyclass->get_skeys = qemu_s390_skeys_get; + skeyclass->set_skeys = qemu_s390_skeys_set; + + /* Reason: Internal device (only one skeys device for the whole memory) */ + dc->user_creatable = false; +} + +static const TypeInfo qemu_s390_skeys_info = { + .name = TYPE_QEMU_S390_SKEYS, + .parent = TYPE_S390_SKEYS, + .instance_size = sizeof(QEMUS390SKeysState), + .class_init = qemu_s390_skeys_class_init, + .class_size = sizeof(S390SKeysClass), +}; + +static void s390_storage_keys_save(QEMUFile *f, void *opaque) +{ + S390SKeysState *ss = S390_SKEYS(opaque); + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); + GuestPhysBlockList guest_phys_blocks; + GuestPhysBlock *block; + uint64_t pages, gfn; + int error = 0; + uint8_t *buf; + + if (!skeyclass->skeys_are_enabled(ss)) { + goto end_stream; + } + + buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE); + if (!buf) { + error_report("storage key save could not allocate memory"); + goto end_stream; + } + + guest_phys_blocks_init(&guest_phys_blocks); + guest_phys_blocks_append(&guest_phys_blocks); + + /* Send each contiguous physical memory range separately. */ + QTAILQ_FOREACH(block, &guest_phys_blocks.head, next) { + assert(QEMU_IS_ALIGNED(block->target_start, TARGET_PAGE_SIZE)); + assert(QEMU_IS_ALIGNED(block->target_end, TARGET_PAGE_SIZE)); + + gfn = block->target_start / TARGET_PAGE_SIZE; + pages = (block->target_end - block->target_start) / TARGET_PAGE_SIZE; + qemu_put_be64(f, block->target_start | S390_SKEYS_SAVE_FLAG_SKEYS); + qemu_put_be64(f, pages); + + while (pages) { + const uint64_t cur_pages = MIN(pages, S390_SKEYS_BUFFER_SIZE); + + if (!error) { + error = skeyclass->get_skeys(ss, gfn, cur_pages, buf); + if (error) { + /* + * Create a valid stream with all 0x00 and indicate + * S390_SKEYS_SAVE_FLAG_ERROR to the destination. + */ + error_report("S390_GET_KEYS error %d", error); + memset(buf, 0, S390_SKEYS_BUFFER_SIZE); + } + } + + qemu_put_buffer(f, buf, cur_pages); + gfn += cur_pages; + pages -= cur_pages; + } + + if (error) { + break; + } + } + + guest_phys_blocks_free(&guest_phys_blocks); + g_free(buf); +end_stream: + if (error) { + qemu_put_be64(f, S390_SKEYS_SAVE_FLAG_ERROR); + } else { + qemu_put_be64(f, S390_SKEYS_SAVE_FLAG_EOS); + } +} + +static int s390_storage_keys_load(QEMUFile *f, void *opaque, int version_id) +{ + S390SKeysState *ss = S390_SKEYS(opaque); + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); + int ret = 0; + + /* + * Make sure to lazy-enable if required to be done explicitly. No need to + * flush any TLB as the VM is not running yet. + */ + if (skeyclass->enable_skeys) { + skeyclass->enable_skeys(ss); + } + + while (!ret) { + ram_addr_t addr; + int flags; + + addr = qemu_get_be64(f); + flags = addr & ~TARGET_PAGE_MASK; + addr &= TARGET_PAGE_MASK; + + switch (flags) { + case S390_SKEYS_SAVE_FLAG_SKEYS: { + const uint64_t total_count = qemu_get_be64(f); + uint64_t handled_count = 0, cur_count; + uint64_t cur_gfn = addr / TARGET_PAGE_SIZE; + uint8_t *buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE); + + if (!buf) { + error_report("storage key load could not allocate memory"); + ret = -ENOMEM; + break; + } + + while (handled_count < total_count) { + cur_count = MIN(total_count - handled_count, + S390_SKEYS_BUFFER_SIZE); + qemu_get_buffer(f, buf, cur_count); + + ret = skeyclass->set_skeys(ss, cur_gfn, cur_count, buf); + if (ret < 0) { + error_report("S390_SET_KEYS error %d", ret); + break; + } + handled_count += cur_count; + cur_gfn += cur_count; + } + g_free(buf); + break; + } + case S390_SKEYS_SAVE_FLAG_ERROR: { + error_report("Storage key data is incomplete"); + ret = -EINVAL; + break; + } + case S390_SKEYS_SAVE_FLAG_EOS: + /* normal exit */ + return 0; + default: + error_report("Unexpected storage key flag data: %#x", flags); + ret = -EINVAL; + } + } + + return ret; +} + +static inline bool s390_skeys_get_migration_enabled(Object *obj, Error **errp) +{ + S390SKeysState *ss = S390_SKEYS(obj); + + return ss->migration_enabled; +} + +static SaveVMHandlers savevm_s390_storage_keys = { + .save_state = s390_storage_keys_save, + .load_state = s390_storage_keys_load, +}; + +static inline void s390_skeys_set_migration_enabled(Object *obj, bool value, + Error **errp) +{ + S390SKeysState *ss = S390_SKEYS(obj); + + /* Prevent double registration of savevm handler */ + if (ss->migration_enabled == value) { + return; + } + + ss->migration_enabled = value; + + if (ss->migration_enabled) { + register_savevm_live(TYPE_S390_SKEYS, 0, 1, + &savevm_s390_storage_keys, ss); + } else { + unregister_savevm(VMSTATE_IF(ss), TYPE_S390_SKEYS, ss); + } +} + +static void s390_skeys_instance_init(Object *obj) +{ + object_property_add_bool(obj, "migration-enabled", + s390_skeys_get_migration_enabled, + s390_skeys_set_migration_enabled); + object_property_set_bool(obj, "migration-enabled", true, NULL); +} + +static void s390_skeys_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo s390_skeys_info = { + .name = TYPE_S390_SKEYS, + .parent = TYPE_DEVICE, + .instance_init = s390_skeys_instance_init, + .instance_size = sizeof(S390SKeysState), + .class_init = s390_skeys_class_init, + .class_size = sizeof(S390SKeysClass), + .abstract = true, +}; + +static void qemu_s390_skeys_register_types(void) +{ + type_register_static(&s390_skeys_info); + type_register_static(&qemu_s390_skeys_info); +} + +type_init(qemu_s390_skeys_register_types) diff --git a/hw/s390x/s390-stattrib-kvm.c b/hw/s390x/s390-stattrib-kvm.c new file mode 100644 index 000000000..24cd01382 --- /dev/null +++ b/hw/s390x/s390-stattrib-kvm.c @@ -0,0 +1,195 @@ +/* + * s390 storage attributes device -- KVM object + * + * Copyright 2016 IBM Corp. + * Author(s): Claudio Imbrenda + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/boards.h" +#include "migration/qemu-file.h" +#include "hw/s390x/storage-attributes.h" +#include "qemu/error-report.h" +#include "sysemu/kvm.h" +#include "exec/ram_addr.h" +#include "kvm/kvm_s390x.h" + +Object *kvm_s390_stattrib_create(void) +{ + if (kvm_enabled() && + kvm_check_extension(kvm_state, KVM_CAP_S390_CMMA_MIGRATION)) { + return object_new(TYPE_KVM_S390_STATTRIB); + } + return NULL; +} + +static void kvm_s390_stattrib_instance_init(Object *obj) +{ + KVMS390StAttribState *sas = KVM_S390_STATTRIB(obj); + + sas->still_dirty = 0; +} + +static int kvm_s390_stattrib_read_helper(S390StAttribState *sa, + uint64_t *start_gfn, + uint32_t count, + uint8_t *values, + uint32_t flags) +{ + KVMS390StAttribState *sas = KVM_S390_STATTRIB(sa); + int r; + struct kvm_s390_cmma_log clog = { + .values = (uint64_t)values, + .start_gfn = *start_gfn, + .count = count, + .flags = flags, + }; + + r = kvm_vm_ioctl(kvm_state, KVM_S390_GET_CMMA_BITS, &clog); + if (r < 0) { + error_report("KVM_S390_GET_CMMA_BITS failed: %s", strerror(-r)); + return r; + } + + *start_gfn = clog.start_gfn; + sas->still_dirty = clog.remaining; + return clog.count; +} + +static int kvm_s390_stattrib_get_stattr(S390StAttribState *sa, + uint64_t *start_gfn, + uint32_t count, + uint8_t *values) +{ + return kvm_s390_stattrib_read_helper(sa, start_gfn, count, values, 0); +} + +static int kvm_s390_stattrib_peek_stattr(S390StAttribState *sa, + uint64_t start_gfn, + uint32_t count, + uint8_t *values) +{ + return kvm_s390_stattrib_read_helper(sa, &start_gfn, count, values, + KVM_S390_CMMA_PEEK); +} + +static int kvm_s390_stattrib_set_stattr(S390StAttribState *sa, + uint64_t start_gfn, + uint32_t count, + uint8_t *values) +{ + KVMS390StAttribState *sas = KVM_S390_STATTRIB(sa); + MachineState *machine = MACHINE(qdev_get_machine()); + unsigned long max = machine->ram_size / TARGET_PAGE_SIZE; + + if (start_gfn + count > max) { + error_report("Out of memory bounds when setting storage attributes"); + return -1; + } + if (!sas->incoming_buffer) { + sas->incoming_buffer = g_malloc0(max); + } + + memcpy(sas->incoming_buffer + start_gfn, values, count); + + return 0; +} + +static void kvm_s390_stattrib_synchronize(S390StAttribState *sa) +{ + KVMS390StAttribState *sas = KVM_S390_STATTRIB(sa); + MachineState *machine = MACHINE(qdev_get_machine()); + unsigned long max = machine->ram_size / TARGET_PAGE_SIZE; + /* We do not need to reach the maximum buffer size allowed */ + unsigned long cx, len = KVM_S390_SKEYS_MAX / 2; + int r; + struct kvm_s390_cmma_log clog = { + .flags = 0, + .mask = ~0ULL, + }; + + if (sas->incoming_buffer) { + for (cx = 0; cx + len <= max; cx += len) { + clog.start_gfn = cx; + clog.count = len; + clog.values = (uint64_t)(sas->incoming_buffer + cx); + r = kvm_vm_ioctl(kvm_state, KVM_S390_SET_CMMA_BITS, &clog); + if (r) { + error_report("KVM_S390_SET_CMMA_BITS failed: %s", strerror(-r)); + return; + } + } + if (cx < max) { + clog.start_gfn = cx; + clog.count = max - cx; + clog.values = (uint64_t)(sas->incoming_buffer + cx); + r = kvm_vm_ioctl(kvm_state, KVM_S390_SET_CMMA_BITS, &clog); + if (r) { + error_report("KVM_S390_SET_CMMA_BITS failed: %s", strerror(-r)); + } + } + g_free(sas->incoming_buffer); + sas->incoming_buffer = NULL; + } +} + +static int kvm_s390_stattrib_set_migrationmode(S390StAttribState *sa, bool val) +{ + struct kvm_device_attr attr = { + .group = KVM_S390_VM_MIGRATION, + .attr = val, + .addr = 0, + }; + return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); +} + +static long long kvm_s390_stattrib_get_dirtycount(S390StAttribState *sa) +{ + KVMS390StAttribState *sas = KVM_S390_STATTRIB(sa); + uint8_t val[8]; + + kvm_s390_stattrib_peek_stattr(sa, 0, 1, val); + return sas->still_dirty; +} + +static int kvm_s390_stattrib_get_active(S390StAttribState *sa) +{ + return kvm_s390_cmma_active() && sa->migration_enabled; +} + +static void kvm_s390_stattrib_class_init(ObjectClass *oc, void *data) +{ + S390StAttribClass *sac = S390_STATTRIB_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + sac->get_stattr = kvm_s390_stattrib_get_stattr; + sac->peek_stattr = kvm_s390_stattrib_peek_stattr; + sac->set_stattr = kvm_s390_stattrib_set_stattr; + sac->set_migrationmode = kvm_s390_stattrib_set_migrationmode; + sac->get_dirtycount = kvm_s390_stattrib_get_dirtycount; + sac->synchronize = kvm_s390_stattrib_synchronize; + sac->get_active = kvm_s390_stattrib_get_active; + + /* Reason: Can only be instantiated one time (internally) */ + dc->user_creatable = false; +} + +static const TypeInfo kvm_s390_stattrib_info = { + .name = TYPE_KVM_S390_STATTRIB, + .parent = TYPE_S390_STATTRIB, + .instance_init = kvm_s390_stattrib_instance_init, + .instance_size = sizeof(KVMS390StAttribState), + .class_init = kvm_s390_stattrib_class_init, + .class_size = sizeof(S390StAttribClass), +}; + +static void kvm_s390_stattrib_register_types(void) +{ + type_register_static(&kvm_s390_stattrib_info); +} + +type_init(kvm_s390_stattrib_register_types) diff --git a/hw/s390x/s390-stattrib.c b/hw/s390x/s390-stattrib.c new file mode 100644 index 000000000..9eda1c3b2 --- /dev/null +++ b/hw/s390x/s390-stattrib.c @@ -0,0 +1,410 @@ +/* + * s390 storage attributes device + * + * Copyright 2016 IBM Corp. + * Author(s): Claudio Imbrenda + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "migration/qemu-file.h" +#include "migration/register.h" +#include "hw/s390x/storage-attributes.h" +#include "qemu/error-report.h" +#include "exec/ram_addr.h" +#include "qapi/error.h" +#include "qapi/qmp/qdict.h" + +/* 512KiB cover 2GB of guest memory */ +#define CMMA_BLOCK_SIZE (512 * KiB) + +#define STATTR_FLAG_EOS 0x01ULL +#define STATTR_FLAG_MORE 0x02ULL +#define STATTR_FLAG_ERROR 0x04ULL +#define STATTR_FLAG_DONE 0x08ULL + +static S390StAttribState *s390_get_stattrib_device(void) +{ + S390StAttribState *sas; + + sas = S390_STATTRIB(object_resolve_path_type("", TYPE_S390_STATTRIB, NULL)); + assert(sas); + return sas; +} + +void s390_stattrib_init(void) +{ + Object *obj; + + obj = kvm_s390_stattrib_create(); + if (!obj) { + obj = object_new(TYPE_QEMU_S390_STATTRIB); + } + + object_property_add_child(qdev_get_machine(), TYPE_S390_STATTRIB, + obj); + object_unref(obj); + + qdev_realize(DEVICE(obj), NULL, &error_fatal); +} + +/* Console commands: */ + +void hmp_migrationmode(Monitor *mon, const QDict *qdict) +{ + S390StAttribState *sas = s390_get_stattrib_device(); + S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas); + uint64_t what = qdict_get_int(qdict, "mode"); + int r; + + r = sac->set_migrationmode(sas, what); + if (r < 0) { + monitor_printf(mon, "Error: %s", strerror(-r)); + } +} + +void hmp_info_cmma(Monitor *mon, const QDict *qdict) +{ + S390StAttribState *sas = s390_get_stattrib_device(); + S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas); + uint64_t addr = qdict_get_int(qdict, "addr"); + uint64_t buflen = qdict_get_try_int(qdict, "count", 8); + uint8_t *vals; + int cx, len; + + vals = g_try_malloc(buflen); + if (!vals) { + monitor_printf(mon, "Error: %s\n", strerror(errno)); + return; + } + + len = sac->peek_stattr(sas, addr / TARGET_PAGE_SIZE, buflen, vals); + if (len < 0) { + monitor_printf(mon, "Error: %s", strerror(-len)); + goto out; + } + + monitor_printf(mon, " CMMA attributes, " + "pages %" PRIu64 "+%d (0x%" PRIx64 "):\n", + addr / TARGET_PAGE_SIZE, len, addr & ~TARGET_PAGE_MASK); + for (cx = 0; cx < len; cx++) { + if (cx % 8 == 7) { + monitor_printf(mon, "%02x\n", vals[cx]); + } else { + monitor_printf(mon, "%02x", vals[cx]); + } + } + monitor_printf(mon, "\n"); + +out: + g_free(vals); +} + +/* Migration support: */ + +static int cmma_load(QEMUFile *f, void *opaque, int version_id) +{ + S390StAttribState *sas = S390_STATTRIB(opaque); + S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas); + uint64_t count, cur_gfn; + int flags, ret = 0; + ram_addr_t addr; + uint8_t *buf; + + while (!ret) { + addr = qemu_get_be64(f); + flags = addr & ~TARGET_PAGE_MASK; + addr &= TARGET_PAGE_MASK; + + switch (flags) { + case STATTR_FLAG_MORE: { + cur_gfn = addr / TARGET_PAGE_SIZE; + count = qemu_get_be64(f); + buf = g_try_malloc(count); + if (!buf) { + error_report("cmma_load could not allocate memory"); + ret = -ENOMEM; + break; + } + + qemu_get_buffer(f, buf, count); + ret = sac->set_stattr(sas, cur_gfn, count, buf); + if (ret < 0) { + error_report("Error %d while setting storage attributes", ret); + } + g_free(buf); + break; + } + case STATTR_FLAG_ERROR: { + error_report("Storage attributes data is incomplete"); + ret = -EINVAL; + break; + } + case STATTR_FLAG_DONE: + /* This is after the last pre-copied value has been sent, nothing + * more will be sent after this. Pre-copy has finished, and we + * are done flushing all the remaining values. Now the target + * system is about to take over. We synchronize the buffer to + * apply the actual correct values where needed. + */ + sac->synchronize(sas); + break; + case STATTR_FLAG_EOS: + /* Normal exit */ + return 0; + default: + error_report("Unexpected storage attribute flag data: %#x", flags); + ret = -EINVAL; + } + } + + return ret; +} + +static int cmma_save_setup(QEMUFile *f, void *opaque) +{ + S390StAttribState *sas = S390_STATTRIB(opaque); + S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas); + int res; + /* + * Signal that we want to start a migration, thus needing PGSTE dirty + * tracking. + */ + res = sac->set_migrationmode(sas, 1); + if (res) { + return res; + } + qemu_put_be64(f, STATTR_FLAG_EOS); + return 0; +} + +static void cmma_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, + uint64_t *res_precopy_only, + uint64_t *res_compatible, + uint64_t *res_postcopy_only) +{ + S390StAttribState *sas = S390_STATTRIB(opaque); + S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas); + long long res = sac->get_dirtycount(sas); + + if (res >= 0) { + *res_precopy_only += res; + } +} + +static int cmma_save(QEMUFile *f, void *opaque, int final) +{ + S390StAttribState *sas = S390_STATTRIB(opaque); + S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas); + uint8_t *buf; + int r, cx, reallen = 0, ret = 0; + uint32_t buflen = CMMA_BLOCK_SIZE; + uint64_t start_gfn = sas->migration_cur_gfn; + + buf = g_try_malloc(buflen); + if (!buf) { + error_report("Could not allocate memory to save storage attributes"); + return -ENOMEM; + } + + while (final ? 1 : qemu_file_rate_limit(f) == 0) { + reallen = sac->get_stattr(sas, &start_gfn, buflen, buf); + if (reallen < 0) { + g_free(buf); + return reallen; + } + + ret = 1; + if (!reallen) { + break; + } + qemu_put_be64(f, (start_gfn << TARGET_PAGE_BITS) | STATTR_FLAG_MORE); + qemu_put_be64(f, reallen); + for (cx = 0; cx < reallen; cx++) { + qemu_put_byte(f, buf[cx]); + } + if (!sac->get_dirtycount(sas)) { + break; + } + } + + sas->migration_cur_gfn = start_gfn + reallen; + g_free(buf); + if (final) { + qemu_put_be64(f, STATTR_FLAG_DONE); + } + qemu_put_be64(f, STATTR_FLAG_EOS); + + r = qemu_file_get_error(f); + if (r < 0) { + return r; + } + + return ret; +} + +static int cmma_save_iterate(QEMUFile *f, void *opaque) +{ + return cmma_save(f, opaque, 0); +} + +static int cmma_save_complete(QEMUFile *f, void *opaque) +{ + return cmma_save(f, opaque, 1); +} + +static void cmma_save_cleanup(void *opaque) +{ + S390StAttribState *sas = S390_STATTRIB(opaque); + S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas); + sac->set_migrationmode(sas, 0); +} + +static bool cmma_active(void *opaque) +{ + S390StAttribState *sas = S390_STATTRIB(opaque); + S390StAttribClass *sac = S390_STATTRIB_GET_CLASS(sas); + return sac->get_active(sas); +} + +/* QEMU object: */ + +static void qemu_s390_stattrib_instance_init(Object *obj) +{ +} + +static int qemu_s390_peek_stattr_stub(S390StAttribState *sa, uint64_t start_gfn, + uint32_t count, uint8_t *values) +{ + return 0; +} +static void qemu_s390_synchronize_stub(S390StAttribState *sa) +{ +} +static int qemu_s390_get_stattr_stub(S390StAttribState *sa, uint64_t *start_gfn, + uint32_t count, uint8_t *values) +{ + return 0; +} +static long long qemu_s390_get_dirtycount_stub(S390StAttribState *sa) +{ + return 0; +} +static int qemu_s390_set_migrationmode_stub(S390StAttribState *sa, bool value) +{ + return 0; +} + +static int qemu_s390_get_active(S390StAttribState *sa) +{ + return sa->migration_enabled; +} + +static void qemu_s390_stattrib_class_init(ObjectClass *oc, void *data) +{ + S390StAttribClass *sa_cl = S390_STATTRIB_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + sa_cl->synchronize = qemu_s390_synchronize_stub; + sa_cl->get_stattr = qemu_s390_get_stattr_stub; + sa_cl->set_stattr = qemu_s390_peek_stattr_stub; + sa_cl->peek_stattr = qemu_s390_peek_stattr_stub; + sa_cl->set_migrationmode = qemu_s390_set_migrationmode_stub; + sa_cl->get_dirtycount = qemu_s390_get_dirtycount_stub; + sa_cl->get_active = qemu_s390_get_active; + + /* Reason: Can only be instantiated one time (internally) */ + dc->user_creatable = false; +} + +static const TypeInfo qemu_s390_stattrib_info = { + .name = TYPE_QEMU_S390_STATTRIB, + .parent = TYPE_S390_STATTRIB, + .instance_init = qemu_s390_stattrib_instance_init, + .instance_size = sizeof(QEMUS390StAttribState), + .class_init = qemu_s390_stattrib_class_init, + .class_size = sizeof(S390StAttribClass), +}; + +/* Generic abstract object: */ + +static void s390_stattrib_realize(DeviceState *dev, Error **errp) +{ + bool ambiguous = false; + + object_resolve_path_type("", TYPE_S390_STATTRIB, &ambiguous); + if (ambiguous) { + error_setg(errp, "storage_attributes device already exists"); + } +} + +static void s390_stattrib_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->realize = s390_stattrib_realize; +} + +static inline bool s390_stattrib_get_migration_enabled(Object *obj, + Error **errp) +{ + S390StAttribState *s = S390_STATTRIB(obj); + + return s->migration_enabled; +} + +static inline void s390_stattrib_set_migration_enabled(Object *obj, bool value, + Error **errp) +{ + S390StAttribState *s = S390_STATTRIB(obj); + + s->migration_enabled = value; +} + +static SaveVMHandlers savevm_s390_stattrib_handlers = { + .save_setup = cmma_save_setup, + .save_live_iterate = cmma_save_iterate, + .save_live_complete_precopy = cmma_save_complete, + .save_live_pending = cmma_save_pending, + .save_cleanup = cmma_save_cleanup, + .load_state = cmma_load, + .is_active = cmma_active, +}; + +static void s390_stattrib_instance_init(Object *obj) +{ + S390StAttribState *sas = S390_STATTRIB(obj); + + register_savevm_live(TYPE_S390_STATTRIB, 0, 0, + &savevm_s390_stattrib_handlers, sas); + + object_property_add_bool(obj, "migration-enabled", + s390_stattrib_get_migration_enabled, + s390_stattrib_set_migration_enabled); + object_property_set_bool(obj, "migration-enabled", true, NULL); + sas->migration_cur_gfn = 0; +} + +static const TypeInfo s390_stattrib_info = { + .name = TYPE_S390_STATTRIB, + .parent = TYPE_DEVICE, + .instance_init = s390_stattrib_instance_init, + .instance_size = sizeof(S390StAttribState), + .class_init = s390_stattrib_class_init, + .class_size = sizeof(S390StAttribClass), + .abstract = true, +}; + +static void s390_stattrib_register_types(void) +{ + type_register_static(&s390_stattrib_info); + type_register_static(&qemu_s390_stattrib_info); +} + +type_init(s390_stattrib_register_types) diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c new file mode 100644 index 000000000..653587ea6 --- /dev/null +++ b/hw/s390x/s390-virtio-ccw.c @@ -0,0 +1,1109 @@ +/* + * virtio ccw machine + * + * Copyright 2012, 2020 IBM Corp. + * Copyright (c) 2009 Alexander Graf + * Author(s): Cornelia Huck + * Janosch Frank + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "exec/ram_addr.h" +#include "hw/s390x/s390-virtio-hcall.h" +#include "hw/s390x/sclp.h" +#include "hw/s390x/s390_flic.h" +#include "hw/s390x/ioinst.h" +#include "hw/s390x/css.h" +#include "virtio-ccw.h" +#include "qemu/config-file.h" +#include "qemu/ctype.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "qemu/qemu-print.h" +#include "hw/s390x/s390-pci-bus.h" +#include "sysemu/reset.h" +#include "hw/s390x/storage-keys.h" +#include "hw/s390x/storage-attributes.h" +#include "hw/s390x/event-facility.h" +#include "ipl.h" +#include "hw/s390x/s390-virtio-ccw.h" +#include "hw/s390x/css-bridge.h" +#include "hw/s390x/ap-bridge.h" +#include "migration/register.h" +#include "cpu_models.h" +#include "hw/nmi.h" +#include "hw/qdev-properties.h" +#include "hw/s390x/tod.h" +#include "sysemu/sysemu.h" +#include "hw/s390x/pv.h" +#include "migration/blocker.h" + +static Error *pv_mig_blocker; + +S390CPU *s390_cpu_addr2state(uint16_t cpu_addr) +{ + static MachineState *ms; + + if (!ms) { + ms = MACHINE(qdev_get_machine()); + g_assert(ms->possible_cpus); + } + + /* CPU address corresponds to the core_id and the index */ + if (cpu_addr >= ms->possible_cpus->len) { + return NULL; + } + return S390_CPU(ms->possible_cpus->cpus[cpu_addr].cpu); +} + +static S390CPU *s390x_new_cpu(const char *typename, uint32_t core_id, + Error **errp) +{ + S390CPU *cpu = S390_CPU(object_new(typename)); + S390CPU *ret = NULL; + + if (!object_property_set_int(OBJECT(cpu), "core-id", core_id, errp)) { + goto out; + } + if (!qdev_realize(DEVICE(cpu), NULL, errp)) { + goto out; + } + ret = cpu; + +out: + object_unref(OBJECT(cpu)); + return ret; +} + +static void s390_init_cpus(MachineState *machine) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + int i; + + /* initialize possible_cpus */ + mc->possible_cpu_arch_ids(machine); + + for (i = 0; i < machine->smp.cpus; i++) { + s390x_new_cpu(machine->cpu_type, i, &error_fatal); + } +} + +static const char *const reset_dev_types[] = { + TYPE_VIRTUAL_CSS_BRIDGE, + "s390-sclp-event-facility", + "s390-flic", + "diag288", + TYPE_S390_PCI_HOST_BRIDGE, +}; + +static void subsystem_reset(void) +{ + DeviceState *dev; + int i; + + for (i = 0; i < ARRAY_SIZE(reset_dev_types); i++) { + dev = DEVICE(object_resolve_path_type("", reset_dev_types[i], NULL)); + if (dev) { + qdev_reset_all(dev); + } + } +} + +static int virtio_ccw_hcall_notify(const uint64_t *args) +{ + uint64_t subch_id = args[0]; + uint64_t queue = args[1]; + SubchDev *sch; + int cssid, ssid, schid, m; + + if (ioinst_disassemble_sch_ident(subch_id, &m, &cssid, &ssid, &schid)) { + return -EINVAL; + } + sch = css_find_subch(m, cssid, ssid, schid); + if (!sch || !css_subch_visible(sch)) { + return -EINVAL; + } + if (queue >= VIRTIO_QUEUE_MAX) { + return -EINVAL; + } + virtio_queue_notify(virtio_ccw_get_vdev(sch), queue); + return 0; + +} + +static int virtio_ccw_hcall_early_printk(const uint64_t *args) +{ + uint64_t mem = args[0]; + MachineState *ms = MACHINE(qdev_get_machine()); + + if (mem < ms->ram_size) { + /* Early printk */ + return 0; + } + return -EINVAL; +} + +static void virtio_ccw_register_hcalls(void) +{ + s390_register_virtio_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY, + virtio_ccw_hcall_notify); + /* Tolerate early printk. */ + s390_register_virtio_hypercall(KVM_S390_VIRTIO_NOTIFY, + virtio_ccw_hcall_early_printk); +} + +static void s390_memory_init(MemoryRegion *ram) +{ + MemoryRegion *sysmem = get_system_memory(); + + /* allocate RAM for core */ + memory_region_add_subregion(sysmem, 0, ram); + + /* + * Configure the maximum page size. As no memory devices were created + * yet, this is the page size of initial memory only. + */ + s390_set_max_pagesize(qemu_maxrampagesize(), &error_fatal); + /* Initialize storage key device */ + s390_skeys_init(); + /* Initialize storage attributes device */ + s390_stattrib_init(); +} + +static void s390_init_ipl_dev(const char *kernel_filename, + const char *kernel_cmdline, + const char *initrd_filename, const char *firmware, + const char *netboot_fw, bool enforce_bios) +{ + Object *new = object_new(TYPE_S390_IPL); + DeviceState *dev = DEVICE(new); + char *netboot_fw_prop; + + if (kernel_filename) { + qdev_prop_set_string(dev, "kernel", kernel_filename); + } + if (initrd_filename) { + qdev_prop_set_string(dev, "initrd", initrd_filename); + } + qdev_prop_set_string(dev, "cmdline", kernel_cmdline); + qdev_prop_set_string(dev, "firmware", firmware); + qdev_prop_set_bit(dev, "enforce_bios", enforce_bios); + netboot_fw_prop = object_property_get_str(new, "netboot_fw", &error_abort); + if (!strlen(netboot_fw_prop)) { + qdev_prop_set_string(dev, "netboot_fw", netboot_fw); + } + g_free(netboot_fw_prop); + object_property_add_child(qdev_get_machine(), TYPE_S390_IPL, + new); + object_unref(new); + qdev_realize(dev, NULL, &error_fatal); +} + +static void s390_create_virtio_net(BusState *bus, const char *name) +{ + int i; + + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + DeviceState *dev; + + if (!nd->model) { + nd->model = g_strdup("virtio"); + } + + qemu_check_nic_model(nd, "virtio"); + + dev = qdev_new(name); + qdev_set_nic_properties(dev, nd); + qdev_realize_and_unref(dev, bus, &error_fatal); + } +} + +static void s390_create_sclpconsole(const char *type, Chardev *chardev) +{ + DeviceState *dev; + + dev = qdev_new(type); + qdev_prop_set_chr(dev, "chardev", chardev); + qdev_realize_and_unref(dev, sclp_get_event_facility_bus(), &error_fatal); +} + +static void ccw_init(MachineState *machine) +{ + int ret; + VirtualCssBus *css_bus; + DeviceState *dev; + + s390_sclp_init(); + /* init memory + setup max page size. Required for the CPU model */ + s390_memory_init(machine->ram); + + /* init CPUs (incl. CPU model) early so s390_has_feature() works */ + s390_init_cpus(machine); + + /* Need CPU model to be determined before we can set up PV */ + s390_pv_init(machine->cgs, &error_fatal); + + s390_flic_init(); + + /* init the SIGP facility */ + s390_init_sigp(); + + /* create AP bridge and bus(es) */ + s390_init_ap(); + + /* get a BUS */ + css_bus = virtual_css_bus_init(); + s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline, + machine->initrd_filename, + machine->firmware ?: "s390-ccw.img", + "s390-netboot.img", true); + + dev = qdev_new(TYPE_S390_PCI_HOST_BRIDGE); + object_property_add_child(qdev_get_machine(), TYPE_S390_PCI_HOST_BRIDGE, + OBJECT(dev)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* register hypercalls */ + virtio_ccw_register_hcalls(); + + s390_enable_css_support(s390_cpu_addr2state(0)); + + ret = css_create_css_image(VIRTUAL_CSSID, true); + + assert(ret == 0); + if (css_migration_enabled()) { + css_register_vmstate(); + } + + /* Create VirtIO network adapters */ + s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw"); + + /* init consoles */ + if (serial_hd(0)) { + s390_create_sclpconsole("sclpconsole", serial_hd(0)); + } + if (serial_hd(1)) { + s390_create_sclpconsole("sclplmconsole", serial_hd(1)); + } + + /* init the TOD clock */ + s390_init_tod(); +} + +static void s390_cpu_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(hotplug_dev); + S390CPU *cpu = S390_CPU(dev); + + g_assert(!ms->possible_cpus->cpus[cpu->env.core_id].cpu); + ms->possible_cpus->cpus[cpu->env.core_id].cpu = OBJECT(dev); + + if (dev->hotplugged) { + raise_irq_cpu_hotplug(); + } +} + +static inline void s390_do_cpu_ipl(CPUState *cs, run_on_cpu_data arg) +{ + S390CPU *cpu = S390_CPU(cs); + + s390_ipl_prepare_cpu(cpu); + s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); +} + +static void s390_machine_unprotect(S390CcwMachineState *ms) +{ + s390_pv_vm_disable(); + ms->pv = false; + migrate_del_blocker(pv_mig_blocker); + error_free_or_abort(&pv_mig_blocker); + ram_block_discard_disable(false); +} + +static int s390_machine_protect(S390CcwMachineState *ms) +{ + Error *local_err = NULL; + int rc; + + /* + * Discarding of memory in RAM blocks does not work as expected with + * protected VMs. Sharing and unsharing pages would be required. Disable + * it for now, until until we have a solution to make at least Linux + * guests either support it (e.g., virtio-balloon) or fail gracefully. + */ + rc = ram_block_discard_disable(true); + if (rc) { + error_report("protected VMs: cannot disable RAM discard"); + return rc; + } + + error_setg(&pv_mig_blocker, + "protected VMs are currently not migrateable."); + rc = migrate_add_blocker(pv_mig_blocker, &local_err); + if (rc) { + ram_block_discard_disable(false); + error_report_err(local_err); + error_free_or_abort(&pv_mig_blocker); + return rc; + } + + /* Create SE VM */ + rc = s390_pv_vm_enable(); + if (rc) { + ram_block_discard_disable(false); + migrate_del_blocker(pv_mig_blocker); + error_free_or_abort(&pv_mig_blocker); + return rc; + } + + ms->pv = true; + + /* Set SE header and unpack */ + rc = s390_ipl_prepare_pv_header(); + if (rc) { + goto out_err; + } + + /* Decrypt image */ + rc = s390_ipl_pv_unpack(); + if (rc) { + goto out_err; + } + + /* Verify integrity */ + rc = s390_pv_verify(); + if (rc) { + goto out_err; + } + return rc; + +out_err: + s390_machine_unprotect(ms); + return rc; +} + +static void s390_pv_prepare_reset(S390CcwMachineState *ms) +{ + CPUState *cs; + + if (!s390_is_pv()) { + return; + } + /* Unsharing requires all cpus to be stopped */ + CPU_FOREACH(cs) { + s390_cpu_set_state(S390_CPU_STATE_STOPPED, S390_CPU(cs)); + } + s390_pv_unshare(); + s390_pv_prep_reset(); +} + +static void s390_machine_reset(MachineState *machine) +{ + S390CcwMachineState *ms = S390_CCW_MACHINE(machine); + enum s390_reset reset_type; + CPUState *cs, *t; + S390CPU *cpu; + + /* get the reset parameters, reset them once done */ + s390_ipl_get_reset_request(&cs, &reset_type); + + /* all CPUs are paused and synchronized at this point */ + s390_cmma_reset(); + + cpu = S390_CPU(cs); + + switch (reset_type) { + case S390_RESET_EXTERNAL: + case S390_RESET_REIPL: + if (s390_is_pv()) { + s390_machine_unprotect(ms); + } + + qemu_devices_reset(); + s390_crypto_reset(); + + /* configure and start the ipl CPU only */ + run_on_cpu(cs, s390_do_cpu_ipl, RUN_ON_CPU_NULL); + break; + case S390_RESET_MODIFIED_CLEAR: + /* + * Susbsystem reset needs to be done before we unshare memory + * and lose access to VIRTIO structures in guest memory. + */ + subsystem_reset(); + s390_crypto_reset(); + s390_pv_prepare_reset(ms); + CPU_FOREACH(t) { + run_on_cpu(t, s390_do_cpu_full_reset, RUN_ON_CPU_NULL); + } + run_on_cpu(cs, s390_do_cpu_load_normal, RUN_ON_CPU_NULL); + break; + case S390_RESET_LOAD_NORMAL: + /* + * Susbsystem reset needs to be done before we unshare memory + * and lose access to VIRTIO structures in guest memory. + */ + subsystem_reset(); + s390_pv_prepare_reset(ms); + CPU_FOREACH(t) { + if (t == cs) { + continue; + } + run_on_cpu(t, s390_do_cpu_reset, RUN_ON_CPU_NULL); + } + run_on_cpu(cs, s390_do_cpu_initial_reset, RUN_ON_CPU_NULL); + run_on_cpu(cs, s390_do_cpu_load_normal, RUN_ON_CPU_NULL); + break; + case S390_RESET_PV: /* Subcode 10 */ + subsystem_reset(); + s390_crypto_reset(); + + CPU_FOREACH(t) { + if (t == cs) { + continue; + } + run_on_cpu(t, s390_do_cpu_full_reset, RUN_ON_CPU_NULL); + } + run_on_cpu(cs, s390_do_cpu_reset, RUN_ON_CPU_NULL); + + if (s390_machine_protect(ms)) { + s390_pv_inject_reset_error(cs); + /* + * Continue after the diag308 so the guest knows something + * went wrong. + */ + s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); + return; + } + + run_on_cpu(cs, s390_do_cpu_load_normal, RUN_ON_CPU_NULL); + break; + default: + g_assert_not_reached(); + } + + CPU_FOREACH(t) { + run_on_cpu(t, s390_do_cpu_set_diag318, RUN_ON_CPU_HOST_ULONG(0)); + } + s390_ipl_clear_reset_request(); +} + +static void s390_machine_device_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + s390_cpu_plug(hotplug_dev, dev, errp); + } +} + +static void s390_machine_device_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + error_setg(errp, "CPU hot unplug not supported on this machine"); + return; + } +} + +static CpuInstanceProperties s390_cpu_index_to_props(MachineState *ms, + unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +static const CPUArchIdList *s390_possible_cpu_arch_ids(MachineState *ms) +{ + int i; + unsigned int max_cpus = ms->smp.max_cpus; + + if (ms->possible_cpus) { + g_assert(ms->possible_cpus && ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (i = 0; i < ms->possible_cpus->len; i++) { + ms->possible_cpus->cpus[i].type = ms->cpu_type; + ms->possible_cpus->cpus[i].vcpus_count = 1; + ms->possible_cpus->cpus[i].arch_id = i; + ms->possible_cpus->cpus[i].props.has_core_id = true; + ms->possible_cpus->cpus[i].props.core_id = i; + } + + return ms->possible_cpus; +} + +static HotplugHandler *s390_get_hotplug_handler(MachineState *machine, + DeviceState *dev) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + return HOTPLUG_HANDLER(machine); + } + return NULL; +} + +static void s390_nmi(NMIState *n, int cpu_index, Error **errp) +{ + CPUState *cs = qemu_get_cpu(cpu_index); + + s390_cpu_restart(S390_CPU(cs)); +} + +static ram_addr_t s390_fixup_ram_size(ram_addr_t sz) +{ + /* same logic as in sclp.c */ + int increment_size = 20; + ram_addr_t newsz; + + while ((sz >> increment_size) > MAX_STORAGE_INCREMENTS) { + increment_size++; + } + newsz = sz >> increment_size << increment_size; + + if (sz != newsz) { + qemu_printf("Ram size %" PRIu64 "MB was fixed up to %" PRIu64 + "MB to match machine restrictions. Consider updating " + "the guest definition.\n", (uint64_t) (sz / MiB), + (uint64_t) (newsz / MiB)); + } + return newsz; +} + +static void ccw_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + NMIClass *nc = NMI_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); + + s390mc->ri_allowed = true; + s390mc->cpu_model_allowed = true; + s390mc->css_migration_enabled = true; + s390mc->hpage_1m_allowed = true; + mc->init = ccw_init; + mc->reset = s390_machine_reset; + mc->block_default_type = IF_VIRTIO; + mc->no_cdrom = 1; + mc->no_floppy = 1; + mc->no_parallel = 1; + mc->no_sdcard = 1; + mc->max_cpus = S390_MAX_CPUS; + mc->has_hotpluggable_cpus = true; + assert(!mc->get_hotplug_handler); + mc->get_hotplug_handler = s390_get_hotplug_handler; + mc->cpu_index_to_instance_props = s390_cpu_index_to_props; + mc->possible_cpu_arch_ids = s390_possible_cpu_arch_ids; + /* it is overridden with 'host' cpu *in kvm_arch_init* */ + mc->default_cpu_type = S390_CPU_TYPE_NAME("qemu"); + hc->plug = s390_machine_device_plug; + hc->unplug_request = s390_machine_device_unplug_request; + nc->nmi_monitor_handler = s390_nmi; + mc->default_ram_id = "s390.ram"; +} + +static inline bool machine_get_aes_key_wrap(Object *obj, Error **errp) +{ + S390CcwMachineState *ms = S390_CCW_MACHINE(obj); + + return ms->aes_key_wrap; +} + +static inline void machine_set_aes_key_wrap(Object *obj, bool value, + Error **errp) +{ + S390CcwMachineState *ms = S390_CCW_MACHINE(obj); + + ms->aes_key_wrap = value; +} + +static inline bool machine_get_dea_key_wrap(Object *obj, Error **errp) +{ + S390CcwMachineState *ms = S390_CCW_MACHINE(obj); + + return ms->dea_key_wrap; +} + +static inline void machine_set_dea_key_wrap(Object *obj, bool value, + Error **errp) +{ + S390CcwMachineState *ms = S390_CCW_MACHINE(obj); + + ms->dea_key_wrap = value; +} + +static S390CcwMachineClass *current_mc; + +/* + * Get the class of the s390-ccw-virtio machine that is currently in use. + * Note: libvirt is using the "none" machine to probe for the features of the + * host CPU, so in case this is called with the "none" machine, the function + * returns the TYPE_S390_CCW_MACHINE base class. In this base class, all the + * various "*_allowed" variables are enabled, so that the *_allowed() wrappers + * below return the correct default value for the "none" machine. + * + * Attention! Do *not* add additional new wrappers for CPU features (e.g. like + * the ri_allowed() wrapper) via this mechanism anymore. CPU features should + * be handled via the CPU models, i.e. checking with cpu_model_allowed() during + * CPU initialization and s390_has_feat() later should be sufficient. + */ +static S390CcwMachineClass *get_machine_class(void) +{ + if (unlikely(!current_mc)) { + /* + * No s390 ccw machine was instantiated, we are likely to + * be called for the 'none' machine. The properties will + * have their after-initialization values. + */ + current_mc = S390_CCW_MACHINE_CLASS( + object_class_by_name(TYPE_S390_CCW_MACHINE)); + } + return current_mc; +} + +bool ri_allowed(void) +{ + return get_machine_class()->ri_allowed; +} + +bool cpu_model_allowed(void) +{ + return get_machine_class()->cpu_model_allowed; +} + +bool hpage_1m_allowed(void) +{ + return get_machine_class()->hpage_1m_allowed; +} + +static char *machine_get_loadparm(Object *obj, Error **errp) +{ + S390CcwMachineState *ms = S390_CCW_MACHINE(obj); + + /* make a NUL-terminated string */ + return g_strndup((char *) ms->loadparm, sizeof(ms->loadparm)); +} + +static void machine_set_loadparm(Object *obj, const char *val, Error **errp) +{ + S390CcwMachineState *ms = S390_CCW_MACHINE(obj); + int i; + + for (i = 0; i < sizeof(ms->loadparm) && val[i]; i++) { + uint8_t c = qemu_toupper(val[i]); /* mimic HMC */ + + if (('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || (c == '.') || + (c == ' ')) { + ms->loadparm[i] = c; + } else { + error_setg(errp, "LOADPARM: invalid character '%c' (ASCII 0x%02x)", + c, c); + return; + } + } + + for (; i < sizeof(ms->loadparm); i++) { + ms->loadparm[i] = ' '; /* pad right with spaces */ + } +} +static inline void s390_machine_initfn(Object *obj) +{ + object_property_add_bool(obj, "aes-key-wrap", + machine_get_aes_key_wrap, + machine_set_aes_key_wrap); + object_property_set_description(obj, "aes-key-wrap", + "enable/disable AES key wrapping using the CPACF wrapping key"); + object_property_set_bool(obj, "aes-key-wrap", true, NULL); + + object_property_add_bool(obj, "dea-key-wrap", + machine_get_dea_key_wrap, + machine_set_dea_key_wrap); + object_property_set_description(obj, "dea-key-wrap", + "enable/disable DEA key wrapping using the CPACF wrapping key"); + object_property_set_bool(obj, "dea-key-wrap", true, NULL); + object_property_add_str(obj, "loadparm", + machine_get_loadparm, machine_set_loadparm); + object_property_set_description(obj, "loadparm", + "Up to 8 chars in set of [A-Za-z0-9. ] (lower case chars converted" + " to upper case) to pass to machine loader, boot manager," + " and guest kernel"); +} + +static const TypeInfo ccw_machine_info = { + .name = TYPE_S390_CCW_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(S390CcwMachineState), + .instance_init = s390_machine_initfn, + .class_size = sizeof(S390CcwMachineClass), + .class_init = ccw_machine_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NMI }, + { TYPE_HOTPLUG_HANDLER}, + { } + }, +}; + +bool css_migration_enabled(void) +{ + return get_machine_class()->css_migration_enabled; +} + +#define DEFINE_CCW_MACHINE(suffix, verstr, latest) \ + static void ccw_machine_##suffix##_class_init(ObjectClass *oc, \ + void *data) \ + { \ + MachineClass *mc = MACHINE_CLASS(oc); \ + ccw_machine_##suffix##_class_options(mc); \ + mc->desc = "VirtIO-ccw based S390 machine v" verstr; \ + if (latest) { \ + mc->alias = "s390-ccw-virtio"; \ + mc->is_default = true; \ + } \ + } \ + static void ccw_machine_##suffix##_instance_init(Object *obj) \ + { \ + MachineState *machine = MACHINE(obj); \ + current_mc = S390_CCW_MACHINE_CLASS(MACHINE_GET_CLASS(machine)); \ + ccw_machine_##suffix##_instance_options(machine); \ + } \ + static const TypeInfo ccw_machine_##suffix##_info = { \ + .name = MACHINE_TYPE_NAME("s390-ccw-virtio-" verstr), \ + .parent = TYPE_S390_CCW_MACHINE, \ + .class_init = ccw_machine_##suffix##_class_init, \ + .instance_init = ccw_machine_##suffix##_instance_init, \ + }; \ + static void ccw_machine_register_##suffix(void) \ + { \ + type_register_static(&ccw_machine_##suffix##_info); \ + } \ + type_init(ccw_machine_register_##suffix) + +static void ccw_machine_6_2_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_6_2_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE(6_2, "6.2", true); + +static void ccw_machine_6_1_instance_options(MachineState *machine) +{ + ccw_machine_6_2_instance_options(machine); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_NNPA); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_BEAR_ENH); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_RDP); + s390_cpudef_featoff_greater(16, 1, S390_FEAT_PAI); +} + +static void ccw_machine_6_1_class_options(MachineClass *mc) +{ + ccw_machine_6_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); + mc->smp_props.prefer_sockets = true; +} +DEFINE_CCW_MACHINE(6_1, "6.1", false); + +static void ccw_machine_6_0_instance_options(MachineState *machine) +{ + static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V6_0 }; + + ccw_machine_6_1_instance_options(machine); + s390_set_qemu_cpu_model(0x2964, 13, 2, qemu_cpu_feat); +} + +static void ccw_machine_6_0_class_options(MachineClass *mc) +{ + ccw_machine_6_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len); +} +DEFINE_CCW_MACHINE(6_0, "6.0", false); + +static void ccw_machine_5_2_instance_options(MachineState *machine) +{ + ccw_machine_6_0_instance_options(machine); +} + +static void ccw_machine_5_2_class_options(MachineClass *mc) +{ + ccw_machine_6_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_5_2, hw_compat_5_2_len); +} +DEFINE_CCW_MACHINE(5_2, "5.2", false); + +static void ccw_machine_5_1_instance_options(MachineState *machine) +{ + ccw_machine_5_2_instance_options(machine); +} + +static void ccw_machine_5_1_class_options(MachineClass *mc) +{ + ccw_machine_5_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len); +} +DEFINE_CCW_MACHINE(5_1, "5.1", false); + +static void ccw_machine_5_0_instance_options(MachineState *machine) +{ + ccw_machine_5_1_instance_options(machine); +} + +static void ccw_machine_5_0_class_options(MachineClass *mc) +{ + ccw_machine_5_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_5_0, hw_compat_5_0_len); +} +DEFINE_CCW_MACHINE(5_0, "5.0", false); + +static void ccw_machine_4_2_instance_options(MachineState *machine) +{ + ccw_machine_5_0_instance_options(machine); +} + +static void ccw_machine_4_2_class_options(MachineClass *mc) +{ + ccw_machine_5_0_class_options(mc); + mc->fixup_ram_size = s390_fixup_ram_size; + compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len); +} +DEFINE_CCW_MACHINE(4_2, "4.2", false); + +static void ccw_machine_4_1_instance_options(MachineState *machine) +{ + static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V4_1 }; + ccw_machine_4_2_instance_options(machine); + s390_set_qemu_cpu_model(0x2964, 13, 2, qemu_cpu_feat); +} + +static void ccw_machine_4_1_class_options(MachineClass *mc) +{ + ccw_machine_4_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len); +} +DEFINE_CCW_MACHINE(4_1, "4.1", false); + +static void ccw_machine_4_0_instance_options(MachineState *machine) +{ + static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V4_0 }; + ccw_machine_4_1_instance_options(machine); + s390_set_qemu_cpu_model(0x2827, 12, 2, qemu_cpu_feat); +} + +static void ccw_machine_4_0_class_options(MachineClass *mc) +{ + ccw_machine_4_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); +} +DEFINE_CCW_MACHINE(4_0, "4.0", false); + +static void ccw_machine_3_1_instance_options(MachineState *machine) +{ + static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V3_1 }; + ccw_machine_4_0_instance_options(machine); + s390_cpudef_featoff_greater(14, 1, S390_FEAT_MULTIPLE_EPOCH); + s390_cpudef_group_featoff_greater(14, 1, S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF); + s390_set_qemu_cpu_model(0x2827, 12, 2, qemu_cpu_feat); +} + +static void ccw_machine_3_1_class_options(MachineClass *mc) +{ + ccw_machine_4_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_3_1, hw_compat_3_1_len); +} +DEFINE_CCW_MACHINE(3_1, "3.1", false); + +static void ccw_machine_3_0_instance_options(MachineState *machine) +{ + ccw_machine_3_1_instance_options(machine); +} + +static void ccw_machine_3_0_class_options(MachineClass *mc) +{ + S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); + + s390mc->hpage_1m_allowed = false; + ccw_machine_3_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_3_0, hw_compat_3_0_len); +} +DEFINE_CCW_MACHINE(3_0, "3.0", false); + +static void ccw_machine_2_12_instance_options(MachineState *machine) +{ + ccw_machine_3_0_instance_options(machine); + s390_cpudef_featoff_greater(11, 1, S390_FEAT_PPA15); + s390_cpudef_featoff_greater(11, 1, S390_FEAT_BPB); +} + +static void ccw_machine_2_12_class_options(MachineClass *mc) +{ + ccw_machine_3_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); +} +DEFINE_CCW_MACHINE(2_12, "2.12", false); + +static void ccw_machine_2_11_instance_options(MachineState *machine) +{ + static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V2_11 }; + ccw_machine_2_12_instance_options(machine); + + /* before 2.12 we emulated the very first z900 */ + s390_set_qemu_cpu_model(0x2064, 7, 1, qemu_cpu_feat); +} + +static void ccw_machine_2_11_class_options(MachineClass *mc) +{ + static GlobalProperty compat[] = { + { TYPE_SCLP_EVENT_FACILITY, "allow_all_mask_sizes", "off", }, + }; + + ccw_machine_2_12_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); +} +DEFINE_CCW_MACHINE(2_11, "2.11", false); + +static void ccw_machine_2_10_instance_options(MachineState *machine) +{ + ccw_machine_2_11_instance_options(machine); +} + +static void ccw_machine_2_10_class_options(MachineClass *mc) +{ + ccw_machine_2_11_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); +} +DEFINE_CCW_MACHINE(2_10, "2.10", false); + +static void ccw_machine_2_9_instance_options(MachineState *machine) +{ + ccw_machine_2_10_instance_options(machine); + s390_cpudef_featoff_greater(12, 1, S390_FEAT_ESOP); + s390_cpudef_featoff_greater(12, 1, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2); + s390_cpudef_featoff_greater(12, 1, S390_FEAT_ZPCI); + s390_cpudef_featoff_greater(12, 1, S390_FEAT_ADAPTER_INT_SUPPRESSION); + s390_cpudef_featoff_greater(12, 1, S390_FEAT_ADAPTER_EVENT_NOTIFICATION); +} + +static void ccw_machine_2_9_class_options(MachineClass *mc) +{ + S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); + static GlobalProperty compat[] = { + { TYPE_S390_STATTRIB, "migration-enabled", "off", }, + }; + + ccw_machine_2_10_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); + s390mc->css_migration_enabled = false; +} +DEFINE_CCW_MACHINE(2_9, "2.9", false); + +static void ccw_machine_2_8_instance_options(MachineState *machine) +{ + ccw_machine_2_9_instance_options(machine); +} + +static void ccw_machine_2_8_class_options(MachineClass *mc) +{ + static GlobalProperty compat[] = { + { TYPE_S390_FLIC_COMMON, "adapter_routes_max_batch", "64", }, + }; + + ccw_machine_2_9_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); +} +DEFINE_CCW_MACHINE(2_8, "2.8", false); + +static void ccw_machine_2_7_instance_options(MachineState *machine) +{ + ccw_machine_2_8_instance_options(machine); +} + +static void ccw_machine_2_7_class_options(MachineClass *mc) +{ + S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); + + s390mc->cpu_model_allowed = false; + ccw_machine_2_8_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); +} +DEFINE_CCW_MACHINE(2_7, "2.7", false); + +static void ccw_machine_2_6_instance_options(MachineState *machine) +{ + ccw_machine_2_7_instance_options(machine); +} + +static void ccw_machine_2_6_class_options(MachineClass *mc) +{ + S390CcwMachineClass *s390mc = S390_CCW_MACHINE_CLASS(mc); + static GlobalProperty compat[] = { + { TYPE_S390_IPL, "iplbext_migration", "off", }, + { TYPE_VIRTUAL_CSS_BRIDGE, "css_dev_path", "off", }, + }; + + s390mc->ri_allowed = false; + ccw_machine_2_7_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); +} +DEFINE_CCW_MACHINE(2_6, "2.6", false); + +static void ccw_machine_2_5_instance_options(MachineState *machine) +{ + ccw_machine_2_6_instance_options(machine); +} + +static void ccw_machine_2_5_class_options(MachineClass *mc) +{ + ccw_machine_2_6_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len); +} +DEFINE_CCW_MACHINE(2_5, "2.5", false); + +static void ccw_machine_2_4_instance_options(MachineState *machine) +{ + ccw_machine_2_5_instance_options(machine); +} + +static void ccw_machine_2_4_class_options(MachineClass *mc) +{ + static GlobalProperty compat[] = { + { TYPE_S390_SKEYS, "migration-enabled", "off", }, + { "virtio-blk-ccw", "max_revision", "0", }, + { "virtio-balloon-ccw", "max_revision", "0", }, + { "virtio-serial-ccw", "max_revision", "0", }, + { "virtio-9p-ccw", "max_revision", "0", }, + { "virtio-rng-ccw", "max_revision", "0", }, + { "virtio-net-ccw", "max_revision", "0", }, + { "virtio-scsi-ccw", "max_revision", "0", }, + { "vhost-scsi-ccw", "max_revision", "0", }, + }; + + ccw_machine_2_5_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len); + compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); +} +DEFINE_CCW_MACHINE(2_4, "2.4", false); + +static void ccw_machine_register_types(void) +{ + type_register_static(&ccw_machine_info); +} + +type_init(ccw_machine_register_types) diff --git a/hw/s390x/s390-virtio-hcall.c b/hw/s390x/s390-virtio-hcall.c new file mode 100644 index 000000000..ec7cf8beb --- /dev/null +++ b/hw/s390x/s390-virtio-hcall.c @@ -0,0 +1,41 @@ +/* + * Support for virtio hypercalls on s390 + * + * Copyright 2012 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/s390x/s390-virtio-hcall.h" + +#define MAX_DIAG_SUBCODES 255 + +static s390_virtio_fn s390_diag500_table[MAX_DIAG_SUBCODES]; + +void s390_register_virtio_hypercall(uint64_t code, s390_virtio_fn fn) +{ + assert(code < MAX_DIAG_SUBCODES); + assert(!s390_diag500_table[code]); + + s390_diag500_table[code] = fn; +} + +int s390_virtio_hypercall(CPUS390XState *env) +{ + s390_virtio_fn fn; + + if (env->regs[1] < MAX_DIAG_SUBCODES) { + fn = s390_diag500_table[env->regs[1]]; + if (fn) { + env->regs[2] = fn(&env->regs[2]); + return 0; + } + } + + return -EINVAL; +} diff --git a/hw/s390x/s390-virtio-hcall.h b/hw/s390x/s390-virtio-hcall.h new file mode 100644 index 000000000..9800c4b35 --- /dev/null +++ b/hw/s390x/s390-virtio-hcall.h @@ -0,0 +1,23 @@ +/* + * Support for virtio hypercalls on s390x + * + * Copyright IBM Corp. 2012, 2017 + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef HW_S390_VIRTIO_HCALL_H +#define HW_S390_VIRTIO_HCALL_H + +#include "standard-headers/asm-s390/virtio-ccw.h" + +/* The only thing that we need from the old kvm_virtio.h file */ +#define KVM_S390_VIRTIO_NOTIFY 0 + +typedef int (*s390_virtio_fn)(const uint64_t *args); +void s390_register_virtio_hypercall(uint64_t code, s390_virtio_fn fn); +int s390_virtio_hypercall(CPUS390XState *env); +#endif /* HW_S390_VIRTIO_HCALL_H */ diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c new file mode 100644 index 000000000..89c30a8a9 --- /dev/null +++ b/hw/s390x/sclp.c @@ -0,0 +1,476 @@ +/* + * SCLP Support + * + * Copyright IBM, Corp. 2012 + * + * Authors: + * Christian Borntraeger + * Heinz Graalfs + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/s390x/sclp.h" +#include "hw/s390x/event-facility.h" +#include "hw/s390x/s390-pci-bus.h" +#include "hw/s390x/ipl.h" + +static inline SCLPDevice *get_sclp_device(void) +{ + static SCLPDevice *sclp; + + if (!sclp) { + sclp = SCLP(object_resolve_path_type("", TYPE_SCLP, NULL)); + } + return sclp; +} + +static inline bool sclp_command_code_valid(uint32_t code) +{ + switch (code & SCLP_CMD_CODE_MASK) { + case SCLP_CMDW_READ_SCP_INFO: + case SCLP_CMDW_READ_SCP_INFO_FORCED: + case SCLP_CMDW_READ_CPU_INFO: + case SCLP_CMDW_CONFIGURE_IOA: + case SCLP_CMDW_DECONFIGURE_IOA: + case SCLP_CMD_READ_EVENT_DATA: + case SCLP_CMD_WRITE_EVENT_DATA: + case SCLP_CMD_WRITE_EVENT_MASK: + return true; + } + return false; +} + +static bool sccb_verify_boundary(uint64_t sccb_addr, uint16_t sccb_len, + uint32_t code) +{ + uint64_t sccb_max_addr = sccb_addr + sccb_len - 1; + uint64_t sccb_boundary = (sccb_addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + + switch (code & SCLP_CMD_CODE_MASK) { + case SCLP_CMDW_READ_SCP_INFO: + case SCLP_CMDW_READ_SCP_INFO_FORCED: + case SCLP_CMDW_READ_CPU_INFO: + /* + * An extended-length SCCB is only allowed for Read SCP/CPU Info and + * is allowed to exceed the 4k boundary. The respective commands will + * set the length field to the required length if an insufficient + * SCCB length is provided. + */ + if (s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB)) { + return true; + } + /* fallthrough */ + default: + if (sccb_max_addr < sccb_boundary) { + return true; + } + } + + return false; +} + +static void prepare_cpu_entries(MachineState *ms, CPUEntry *entry, int *count) +{ + uint8_t features[SCCB_CPU_FEATURE_LEN] = { 0 }; + int i; + + s390_get_feat_block(S390_FEAT_TYPE_SCLP_CPU, features); + for (i = 0, *count = 0; i < ms->possible_cpus->len; i++) { + if (!ms->possible_cpus->cpus[i].cpu) { + continue; + } + entry[*count].address = ms->possible_cpus->cpus[i].arch_id; + entry[*count].type = 0; + memcpy(entry[*count].features, features, sizeof(features)); + (*count)++; + } +} + +#define SCCB_REQ_LEN(s, max_cpus) (sizeof(s) + max_cpus * sizeof(CPUEntry)) + +static inline bool ext_len_sccb_supported(SCCBHeader header) +{ + return s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB) && + header.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE; +} + +/* Provide information about the configuration, CPUs and storage */ +static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb) +{ + ReadInfo *read_info = (ReadInfo *) sccb; + MachineState *machine = MACHINE(qdev_get_machine()); + int cpu_count; + int rnsize, rnmax; + IplParameterBlock *ipib = s390_ipl_get_iplb(); + int required_len = SCCB_REQ_LEN(ReadInfo, machine->possible_cpus->len); + int offset_cpu = s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB) ? + offsetof(ReadInfo, entries) : + SCLP_READ_SCP_INFO_FIXED_CPU_OFFSET; + CPUEntry *entries_start = (void *)sccb + offset_cpu; + + if (be16_to_cpu(sccb->h.length) < required_len) { + if (ext_len_sccb_supported(sccb->h)) { + sccb->h.length = cpu_to_be16(required_len); + } + sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); + return; + } + + /* CPU information */ + prepare_cpu_entries(machine, entries_start, &cpu_count); + read_info->entries_cpu = cpu_to_be16(cpu_count); + read_info->offset_cpu = cpu_to_be16(offset_cpu); + read_info->highest_cpu = cpu_to_be16(machine->smp.max_cpus - 1); + + read_info->ibc_val = cpu_to_be32(s390_get_ibc_val()); + + /* Configuration Characteristic (Extension) */ + s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR, + read_info->conf_char); + s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, + read_info->conf_char_ext); + + if (s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB)) { + s390_get_feat_block(S390_FEAT_TYPE_SCLP_FAC134, + &read_info->fac134); + } + + read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO | + SCLP_HAS_IOA_RECONFIG); + + read_info->mha_pow = s390_get_mha_pow(); + read_info->hmfai = cpu_to_be32(s390_get_hmfai()); + + rnsize = 1 << (sclp->increment_size - 20); + if (rnsize <= 128) { + read_info->rnsize = rnsize; + } else { + read_info->rnsize = 0; + read_info->rnsize2 = cpu_to_be32(rnsize); + } + + /* we don't support standby memory, maxram_size is never exposed */ + rnmax = machine->ram_size >> sclp->increment_size; + if (rnmax < 0x10000) { + read_info->rnmax = cpu_to_be16(rnmax); + } else { + read_info->rnmax = cpu_to_be16(0); + read_info->rnmax2 = cpu_to_be64(rnmax); + } + + if (ipib && ipib->flags & DIAG308_FLAGS_LP_VALID) { + memcpy(&read_info->loadparm, &ipib->loadparm, + sizeof(read_info->loadparm)); + } else { + s390_ipl_set_loadparm(read_info->loadparm); + } + + sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION); +} + +/* Provide information about the CPU */ +static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb; + int cpu_count; + int required_len = SCCB_REQ_LEN(ReadCpuInfo, machine->possible_cpus->len); + + if (be16_to_cpu(sccb->h.length) < required_len) { + if (ext_len_sccb_supported(sccb->h)) { + sccb->h.length = cpu_to_be16(required_len); + } + sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH); + return; + } + + prepare_cpu_entries(machine, cpu_info->entries, &cpu_count); + cpu_info->nr_configured = cpu_to_be16(cpu_count); + cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries)); + cpu_info->nr_standby = cpu_to_be16(0); + + /* The standby offset is 16-byte for each CPU */ + cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured + + cpu_info->nr_configured*sizeof(CPUEntry)); + + + sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION); +} + +static void sclp_configure_io_adapter(SCLPDevice *sclp, SCCB *sccb, + bool configure) +{ + int rc; + + if (be16_to_cpu(sccb->h.length) < 16) { + rc = SCLP_RC_INSUFFICIENT_SCCB_LENGTH; + goto out_err; + } + + switch (((IoaCfgSccb *)sccb)->atype) { + case SCLP_RECONFIG_PCI_ATYPE: + if (s390_has_feat(S390_FEAT_ZPCI)) { + if (configure) { + s390_pci_sclp_configure(sccb); + } else { + s390_pci_sclp_deconfigure(sccb); + } + return; + } + /* fallthrough */ + default: + rc = SCLP_RC_ADAPTER_TYPE_NOT_RECOGNIZED; + } + + out_err: + sccb->h.response_code = cpu_to_be16(rc); +} + +static void sclp_execute(SCLPDevice *sclp, SCCB *sccb, uint32_t code) +{ + SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); + SCLPEventFacility *ef = sclp->event_facility; + SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef); + + switch (code & SCLP_CMD_CODE_MASK) { + case SCLP_CMDW_READ_SCP_INFO: + case SCLP_CMDW_READ_SCP_INFO_FORCED: + sclp_c->read_SCP_info(sclp, sccb); + break; + case SCLP_CMDW_READ_CPU_INFO: + sclp_c->read_cpu_info(sclp, sccb); + break; + case SCLP_CMDW_CONFIGURE_IOA: + sclp_configure_io_adapter(sclp, sccb, true); + break; + case SCLP_CMDW_DECONFIGURE_IOA: + sclp_configure_io_adapter(sclp, sccb, false); + break; + default: + efc->command_handler(ef, sccb, code); + break; + } +} + +/* + * We only need the address to have something valid for the + * service_interrupt call. + */ +#define SCLP_PV_DUMMY_ADDR 0x4000 +int sclp_service_call_protected(CPUS390XState *env, uint64_t sccb, + uint32_t code) +{ + SCLPDevice *sclp = get_sclp_device(); + SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); + SCCBHeader header; + g_autofree SCCB *work_sccb = NULL; + + s390_cpu_pv_mem_read(env_archcpu(env), 0, &header, sizeof(SCCBHeader)); + + work_sccb = g_malloc0(be16_to_cpu(header.length)); + s390_cpu_pv_mem_read(env_archcpu(env), 0, work_sccb, + be16_to_cpu(header.length)); + + if (!sclp_command_code_valid(code)) { + work_sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); + goto out_write; + } + + sclp_c->execute(sclp, work_sccb, code); +out_write: + s390_cpu_pv_mem_write(env_archcpu(env), 0, work_sccb, + be16_to_cpu(work_sccb->h.length)); + sclp_c->service_interrupt(sclp, SCLP_PV_DUMMY_ADDR); + return 0; +} + +int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code) +{ + SCLPDevice *sclp = get_sclp_device(); + SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); + SCCBHeader header; + g_autofree SCCB *work_sccb = NULL; + + /* first some basic checks on program checks */ + if (env->psw.mask & PSW_MASK_PSTATE) { + return -PGM_PRIVILEGED; + } + if (cpu_physical_memory_is_io(sccb)) { + return -PGM_ADDRESSING; + } + if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa + || (sccb & ~0x7ffffff8UL) != 0) { + return -PGM_SPECIFICATION; + } + + /* the header contains the actual length of the sccb */ + cpu_physical_memory_read(sccb, &header, sizeof(SCCBHeader)); + + /* Valid sccb sizes */ + if (be16_to_cpu(header.length) < sizeof(SCCBHeader)) { + return -PGM_SPECIFICATION; + } + + /* + * we want to work on a private copy of the sccb, to prevent guests + * from playing dirty tricks by modifying the memory content after + * the host has checked the values + */ + work_sccb = g_malloc0(be16_to_cpu(header.length)); + cpu_physical_memory_read(sccb, work_sccb, be16_to_cpu(header.length)); + + if (!sclp_command_code_valid(code)) { + work_sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); + goto out_write; + } + + if (!sccb_verify_boundary(sccb, be16_to_cpu(work_sccb->h.length), code)) { + work_sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION); + goto out_write; + } + + sclp_c->execute(sclp, work_sccb, code); +out_write: + cpu_physical_memory_write(sccb, work_sccb, + be16_to_cpu(work_sccb->h.length)); + + sclp_c->service_interrupt(sclp, sccb); + + return 0; +} + +static void service_interrupt(SCLPDevice *sclp, uint32_t sccb) +{ + SCLPEventFacility *ef = sclp->event_facility; + SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef); + + uint32_t param = sccb & ~3; + + /* Indicate whether an event is still pending */ + param |= efc->event_pending(ef) ? 1 : 0; + + if (!param) { + /* No need to send an interrupt, there's nothing to be notified about */ + return; + } + s390_sclp_extint(param); +} + +void sclp_service_interrupt(uint32_t sccb) +{ + SCLPDevice *sclp = get_sclp_device(); + SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); + + sclp_c->service_interrupt(sclp, sccb); +} + +/* qemu object creation and initialization functions */ + +void s390_sclp_init(void) +{ + Object *new = object_new(TYPE_SCLP); + + object_property_add_child(qdev_get_machine(), TYPE_SCLP, new); + object_unref(new); + qdev_realize(DEVICE(new), NULL, &error_fatal); +} + +static void sclp_realize(DeviceState *dev, Error **errp) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + SCLPDevice *sclp = SCLP(dev); + uint64_t hw_limit; + int ret; + + /* + * qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS. As long + * as we can't find a fitting bus via the qom tree, we have to add the + * event facility to the sysbus, so e.g. a sclp console can be created. + */ + if (!sysbus_realize(SYS_BUS_DEVICE(sclp->event_facility), errp)) { + return; + } + + ret = s390_set_memory_limit(machine->maxram_size, &hw_limit); + if (ret == -E2BIG) { + error_setg(errp, "host supports a maximum of %" PRIu64 " GB", + hw_limit / GiB); + } else if (ret) { + error_setg(errp, "setting the guest size failed"); + } +} + +static void sclp_memory_init(SCLPDevice *sclp) +{ + MachineState *machine = MACHINE(qdev_get_machine()); + MachineClass *machine_class = MACHINE_GET_CLASS(qdev_get_machine()); + ram_addr_t initial_mem = machine->ram_size; + int increment_size = 20; + + /* The storage increment size is a multiple of 1M and is a power of 2. + * For some machine types, the number of storage increments must be + * MAX_STORAGE_INCREMENTS or fewer. + * The variable 'increment_size' is an exponent of 2 that can be + * used to calculate the size (in bytes) of an increment. */ + while (machine_class->fixup_ram_size != NULL && + (initial_mem >> increment_size) > MAX_STORAGE_INCREMENTS) { + increment_size++; + } + sclp->increment_size = increment_size; +} + +static void sclp_init(Object *obj) +{ + SCLPDevice *sclp = SCLP(obj); + Object *new; + + new = object_new(TYPE_SCLP_EVENT_FACILITY); + object_property_add_child(obj, TYPE_SCLP_EVENT_FACILITY, new); + object_unref(new); + sclp->event_facility = EVENT_FACILITY(new); + + sclp_memory_init(sclp); +} + +static void sclp_class_init(ObjectClass *oc, void *data) +{ + SCLPDeviceClass *sc = SCLP_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->desc = "SCLP (Service-Call Logical Processor)"; + dc->realize = sclp_realize; + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + /* + * Reason: Creates TYPE_SCLP_EVENT_FACILITY in sclp_init + * which is a non-pluggable sysbus device + */ + dc->user_creatable = false; + + sc->read_SCP_info = read_SCP_info; + sc->read_cpu_info = sclp_read_cpu_info; + sc->execute = sclp_execute; + sc->service_interrupt = service_interrupt; +} + +static TypeInfo sclp_info = { + .name = TYPE_SCLP, + .parent = TYPE_DEVICE, + .instance_init = sclp_init, + .instance_size = sizeof(SCLPDevice), + .class_init = sclp_class_init, + .class_size = sizeof(SCLPDeviceClass), +}; + +static void register_types(void) +{ + type_register_static(&sclp_info); +} +type_init(register_types); diff --git a/hw/s390x/sclpcpu.c b/hw/s390x/sclpcpu.c new file mode 100644 index 000000000..f2b1a4b03 --- /dev/null +++ b/hw/s390x/sclpcpu.c @@ -0,0 +1,106 @@ +/* + * SCLP event type + * Signal CPU - Trigger SCLP interrupt for system CPU configure or + * de-configure + * + * Copyright IBM, Corp. 2013 + * + * Authors: + * Thang Pham + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/s390x/sclp.h" +#include "qemu/module.h" +#include "hw/s390x/event-facility.h" +#include "sysemu/cpus.h" + +typedef struct ConfigMgtData { + EventBufferHeader ebh; + uint8_t reserved; + uint8_t event_qualifier; +} QEMU_PACKED ConfigMgtData; + +#define EVENT_QUAL_CPU_CHANGE 1 + +void raise_irq_cpu_hotplug(void) +{ + Object *obj = object_resolve_path_type("", TYPE_SCLP_CPU_HOTPLUG, NULL); + + SCLP_EVENT(obj)->event_pending = true; + + /* Trigger SCLP read operation */ + sclp_service_interrupt(0); +} + +static sccb_mask_t send_mask(void) +{ + return SCLP_EVENT_MASK_CONFIG_MGT_DATA; +} + +static sccb_mask_t receive_mask(void) +{ + return 0; +} + +static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr, + int *slen) +{ + ConfigMgtData *cdata = (ConfigMgtData *) evt_buf_hdr; + if (*slen < sizeof(ConfigMgtData)) { + return 0; + } + + /* Event is no longer pending */ + if (!event->event_pending) { + return 0; + } + event->event_pending = false; + + /* Event header data */ + cdata->ebh.length = cpu_to_be16(sizeof(ConfigMgtData)); + cdata->ebh.type = SCLP_EVENT_CONFIG_MGT_DATA; + cdata->ebh.flags |= SCLP_EVENT_BUFFER_ACCEPTED; + + /* Trigger a rescan of CPUs by setting event qualifier */ + cdata->event_qualifier = EVENT_QUAL_CPU_CHANGE; + *slen -= sizeof(ConfigMgtData); + + return 1; +} + +static void cpu_class_init(ObjectClass *oc, void *data) +{ + SCLPEventClass *k = SCLP_EVENT_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + k->get_send_mask = send_mask; + k->get_receive_mask = receive_mask; + k->read_event_data = read_event_data; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + /* + * Reason: raise_irq_cpu_hotplug() depends on an unique + * TYPE_SCLP_CPU_HOTPLUG device, which is already created + * by the sclp event facility + */ + dc->user_creatable = false; +} + +static const TypeInfo sclp_cpu_info = { + .name = TYPE_SCLP_CPU_HOTPLUG, + .parent = TYPE_SCLP_EVENT, + .instance_size = sizeof(SCLPEvent), + .class_init = cpu_class_init, + .class_size = sizeof(SCLPEventClass), +}; + +static void sclp_cpu_register_types(void) +{ + type_register_static(&sclp_cpu_info); +} + +type_init(sclp_cpu_register_types) diff --git a/hw/s390x/sclpquiesce.c b/hw/s390x/sclpquiesce.c new file mode 100644 index 000000000..ce07b1688 --- /dev/null +++ b/hw/s390x/sclpquiesce.c @@ -0,0 +1,150 @@ +/* + * SCLP event type + * Signal Quiesce - trigger system powerdown request + * + * Copyright IBM, Corp. 2012 + * + * Authors: + * Heinz Graalfs + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/s390x/sclp.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" +#include "hw/s390x/event-facility.h" + +typedef struct SignalQuiesce { + EventBufferHeader ebh; + uint16_t timeout; + uint8_t unit; +} QEMU_PACKED SignalQuiesce; + +static bool can_handle_event(uint8_t type) +{ + return type == SCLP_EVENT_SIGNAL_QUIESCE; +} + +static sccb_mask_t send_mask(void) +{ + return SCLP_EVENT_MASK_SIGNAL_QUIESCE; +} + +static sccb_mask_t receive_mask(void) +{ + return 0; +} + +static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr, + int *slen) +{ + SignalQuiesce *sq = (SignalQuiesce *) evt_buf_hdr; + + if (*slen < sizeof(SignalQuiesce)) { + return 0; + } + + if (!event->event_pending) { + return 0; + } + event->event_pending = false; + + sq->ebh.length = cpu_to_be16(sizeof(SignalQuiesce)); + sq->ebh.type = SCLP_EVENT_SIGNAL_QUIESCE; + sq->ebh.flags |= SCLP_EVENT_BUFFER_ACCEPTED; + /* + * system_powerdown does not have a timeout. Fortunately the + * timeout value is currently ignored by Linux, anyway + */ + sq->timeout = cpu_to_be16(0); + sq->unit = cpu_to_be16(0); + *slen -= sizeof(SignalQuiesce); + + return 1; +} + +static const VMStateDescription vmstate_sclpquiesce = { + .name = TYPE_SCLP_QUIESCE, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_BOOL(event_pending, SCLPEvent), + VMSTATE_END_OF_LIST() + } +}; + +typedef struct QuiesceNotifier QuiesceNotifier; + +static struct QuiesceNotifier { + Notifier notifier; + SCLPEvent *event; +} qn; + +static void quiesce_powerdown_req(Notifier *n, void *opaque) +{ + QuiesceNotifier *qn = container_of(n, QuiesceNotifier, notifier); + SCLPEvent *event = qn->event; + + event->event_pending = true; + /* trigger SCLP read operation */ + sclp_service_interrupt(0); +} + +static int quiesce_init(SCLPEvent *event) +{ + qn.notifier.notify = quiesce_powerdown_req; + qn.event = event; + + qemu_register_powerdown_notifier(&qn.notifier); + + return 0; +} + +static void quiesce_reset(DeviceState *dev) +{ + SCLPEvent *event = SCLP_EVENT(dev); + + event->event_pending = false; +} + +static void quiesce_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SCLPEventClass *k = SCLP_EVENT_CLASS(klass); + + dc->reset = quiesce_reset; + dc->vmsd = &vmstate_sclpquiesce; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + /* + * Reason: This is just an internal device - the notifier should + * not be registered multiple times in quiesce_init() + */ + dc->user_creatable = false; + + k->init = quiesce_init; + k->get_send_mask = send_mask; + k->get_receive_mask = receive_mask; + k->can_handle_event = can_handle_event; + k->read_event_data = read_event_data; + k->write_event_data = NULL; +} + +static const TypeInfo sclp_quiesce_info = { + .name = TYPE_SCLP_QUIESCE, + .parent = TYPE_SCLP_EVENT, + .instance_size = sizeof(SCLPEvent), + .class_init = quiesce_class_init, + .class_size = sizeof(SCLPEventClass), +}; + +static void register_types(void) +{ + type_register_static(&sclp_quiesce_info); +} + +type_init(register_types) diff --git a/hw/s390x/tod-kvm.c b/hw/s390x/tod-kvm.c new file mode 100644 index 000000000..ec855811a --- /dev/null +++ b/hw/s390x/tod-kvm.c @@ -0,0 +1,163 @@ +/* + * TOD (Time Of Day) clock - KVM implementation + * + * Copyright 2018 Red Hat, Inc. + * Author(s): David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" +#include "hw/s390x/tod.h" +#include "kvm/kvm_s390x.h" + +static void kvm_s390_get_tod_raw(S390TOD *tod, Error **errp) +{ + int r; + + r = kvm_s390_get_clock_ext(&tod->high, &tod->low); + if (r == -ENXIO) { + r = kvm_s390_get_clock(&tod->high, &tod->low); + } + if (r) { + error_setg(errp, "Unable to get KVM guest TOD clock: %s", + strerror(-r)); + } +} + +static void kvm_s390_tod_get(const S390TODState *td, S390TOD *tod, Error **errp) +{ + if (td->stopped) { + *tod = td->base; + return; + } + + kvm_s390_get_tod_raw(tod, errp); +} + +static void kvm_s390_set_tod_raw(const S390TOD *tod, Error **errp) +{ + int r; + + r = kvm_s390_set_clock_ext(tod->high, tod->low); + if (r == -ENXIO) { + r = kvm_s390_set_clock(tod->high, tod->low); + } + if (r) { + error_setg(errp, "Unable to set KVM guest TOD clock: %s", + strerror(-r)); + } +} + +static void kvm_s390_tod_set(S390TODState *td, const S390TOD *tod, Error **errp) +{ + Error *local_err = NULL; + + /* + * Somebody (e.g. migration) set the TOD. We'll store it into KVM to + * properly detect errors now but take a look at the runstate to decide + * whether really to keep the tod running. E.g. during migration, this + * is the point where we want to stop the initially running TOD to fire + * it back up when actually starting the migrated guest. + */ + kvm_s390_set_tod_raw(tod, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (runstate_is_running()) { + td->stopped = false; + } else { + td->stopped = true; + td->base = *tod; + } +} + +static void kvm_s390_tod_vm_state_change(void *opaque, bool running, + RunState state) +{ + S390TODState *td = opaque; + Error *local_err = NULL; + + if (running && td->stopped) { + /* Set the old TOD when running the VM - start the TOD clock. */ + kvm_s390_set_tod_raw(&td->base, &local_err); + if (local_err) { + warn_report_err(local_err); + } + /* Treat errors like the TOD was running all the time. */ + td->stopped = false; + } else if (!running && !td->stopped) { + /* Store the TOD when stopping the VM - stop the TOD clock. */ + kvm_s390_get_tod_raw(&td->base, &local_err); + if (local_err) { + /* Keep the TOD running in case we could not back it up. */ + warn_report_err(local_err); + } else { + td->stopped = true; + } + } +} + +static void kvm_s390_tod_realize(DeviceState *dev, Error **errp) +{ + S390TODState *td = S390_TOD(dev); + S390TODClass *tdc = S390_TOD_GET_CLASS(td); + Error *local_err = NULL; + + tdc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* + * We need to know when the VM gets started/stopped to start/stop the TOD. + * As we can never have more than one TOD instance (and that will never be + * removed), registering here and never unregistering is good enough. + */ + qemu_add_vm_change_state_handler(kvm_s390_tod_vm_state_change, td); +} + +static void kvm_s390_tod_class_init(ObjectClass *oc, void *data) +{ + S390TODClass *tdc = S390_TOD_CLASS(oc); + + device_class_set_parent_realize(DEVICE_CLASS(oc), kvm_s390_tod_realize, + &tdc->parent_realize); + tdc->get = kvm_s390_tod_get; + tdc->set = kvm_s390_tod_set; +} + +static void kvm_s390_tod_init(Object *obj) +{ + S390TODState *td = S390_TOD(obj); + + /* + * The TOD is initially running (value stored in KVM). Avoid needless + * loading/storing of the TOD when starting a simple VM, so let it + * run although the (never started) VM is stopped. For migration, we + * will properly set the TOD later. + */ + td->stopped = false; +} + +static TypeInfo kvm_s390_tod_info = { + .name = TYPE_KVM_S390_TOD, + .parent = TYPE_S390_TOD, + .instance_size = sizeof(S390TODState), + .instance_init = kvm_s390_tod_init, + .class_init = kvm_s390_tod_class_init, + .class_size = sizeof(S390TODClass), +}; + +static void register_types(void) +{ + type_register_static(&kvm_s390_tod_info); +} +type_init(register_types); diff --git a/hw/s390x/tod-tcg.c b/hw/s390x/tod-tcg.c new file mode 100644 index 000000000..9bb94ff72 --- /dev/null +++ b/hw/s390x/tod-tcg.c @@ -0,0 +1,89 @@ +/* + * TOD (Time Of Day) clock - TCG implementation + * + * Copyright 2018 Red Hat, Inc. + * Author(s): David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qapi/error.h" +#include "hw/s390x/tod.h" +#include "qemu/timer.h" +#include "qemu/cutils.h" +#include "qemu/module.h" +#include "cpu.h" +#include "tcg/tcg_s390x.h" + +static void qemu_s390_tod_get(const S390TODState *td, S390TOD *tod, + Error **errp) +{ + *tod = td->base; + + tod->low += time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + if (tod->low < td->base.low) { + tod->high++; + } +} + +static void qemu_s390_tod_set(S390TODState *td, const S390TOD *tod, + Error **errp) +{ + CPUState *cpu; + + td->base = *tod; + + td->base.low -= time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + if (td->base.low > tod->low) { + td->base.high--; + } + + /* + * The TOD has been changed and we have to recalculate the CKC values + * for all CPUs. We do this asynchronously, as "SET CLOCK should be + * issued only while all other activity on all CPUs .. has been + * suspended". + */ + CPU_FOREACH(cpu) { + async_run_on_cpu(cpu, tcg_s390_tod_updated, RUN_ON_CPU_NULL); + } +} + +static void qemu_s390_tod_class_init(ObjectClass *oc, void *data) +{ + S390TODClass *tdc = S390_TOD_CLASS(oc); + + tdc->get = qemu_s390_tod_get; + tdc->set = qemu_s390_tod_set; +} + +static void qemu_s390_tod_init(Object *obj) +{ + S390TODState *td = S390_TOD(obj); + struct tm tm; + + qemu_get_timedate(&tm, 0); + td->base.high = 0; + td->base.low = TOD_UNIX_EPOCH + (time2tod(mktimegm(&tm)) * 1000000000ULL); + if (td->base.low < TOD_UNIX_EPOCH) { + td->base.high += 1; + } +} + +static TypeInfo qemu_s390_tod_info = { + .name = TYPE_QEMU_S390_TOD, + .parent = TYPE_S390_TOD, + .instance_size = sizeof(S390TODState), + .instance_init = qemu_s390_tod_init, + .class_init = qemu_s390_tod_class_init, + .class_size = sizeof(S390TODClass), +}; + +static void register_types(void) +{ + type_register_static(&qemu_s390_tod_info); +} +type_init(register_types); diff --git a/hw/s390x/tod.c b/hw/s390x/tod.c new file mode 100644 index 000000000..fd5a36bf2 --- /dev/null +++ b/hw/s390x/tod.c @@ -0,0 +1,139 @@ +/* + * TOD (Time Of Day) clock + * + * Copyright 2018 Red Hat, Inc. + * Author(s): David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/s390x/tod.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "sysemu/kvm.h" +#include "sysemu/tcg.h" +#include "sysemu/qtest.h" +#include "migration/qemu-file-types.h" +#include "migration/register.h" + +void s390_init_tod(void) +{ + Object *obj; + + if (kvm_enabled()) { + obj = object_new(TYPE_KVM_S390_TOD); + } else if (tcg_enabled()) { + obj = object_new(TYPE_QEMU_S390_TOD); + } else if (qtest_enabled()) { + return; + } else { + error_report("current accelerator not handled in s390_init_tod!"); + abort(); + } + object_property_add_child(qdev_get_machine(), TYPE_S390_TOD, obj); + object_unref(obj); + + qdev_realize(DEVICE(obj), NULL, &error_fatal); +} + +S390TODState *s390_get_todstate(void) +{ + static S390TODState *ts; + + if (!ts) { + ts = S390_TOD(object_resolve_path_type("", TYPE_S390_TOD, NULL)); + } + + return ts; +} + +#define S390_TOD_CLOCK_VALUE_MISSING 0x00 +#define S390_TOD_CLOCK_VALUE_PRESENT 0x01 + +static void s390_tod_save(QEMUFile *f, void *opaque) +{ + S390TODState *td = opaque; + S390TODClass *tdc = S390_TOD_GET_CLASS(td); + Error *err = NULL; + S390TOD tod; + + tdc->get(td, &tod, &err); + if (err) { + warn_report_err(err); + error_printf("Guest clock will not be migrated " + "which could cause the guest to hang."); + qemu_put_byte(f, S390_TOD_CLOCK_VALUE_MISSING); + return; + } + + qemu_put_byte(f, S390_TOD_CLOCK_VALUE_PRESENT); + qemu_put_byte(f, tod.high); + qemu_put_be64(f, tod.low); +} + +static int s390_tod_load(QEMUFile *f, void *opaque, int version_id) +{ + S390TODState *td = opaque; + S390TODClass *tdc = S390_TOD_GET_CLASS(td); + Error *err = NULL; + S390TOD tod; + + if (qemu_get_byte(f) == S390_TOD_CLOCK_VALUE_MISSING) { + warn_report("Guest clock was not migrated. This could " + "cause the guest to hang."); + return 0; + } + + tod.high = qemu_get_byte(f); + tod.low = qemu_get_be64(f); + + tdc->set(td, &tod, &err); + if (err) { + error_report_err(err); + return -1; + } + return 0; +} + +static SaveVMHandlers savevm_tod = { + .save_state = s390_tod_save, + .load_state = s390_tod_load, +}; + +static void s390_tod_realize(DeviceState *dev, Error **errp) +{ + S390TODState *td = S390_TOD(dev); + + /* Legacy migration interface */ + register_savevm_live("todclock", 0, 1, &savevm_tod, td); +} + +static void s390_tod_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->desc = "TOD (Time Of Day) Clock"; + dc->realize = s390_tod_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + + /* We only have one TOD clock in the system attached to the machine */ + dc->user_creatable = false; +} + +static TypeInfo s390_tod_info = { + .name = TYPE_S390_TOD, + .parent = TYPE_DEVICE, + .instance_size = sizeof(S390TODState), + .class_init = s390_tod_class_init, + .class_size = sizeof(S390TODClass), + .abstract = true, +}; + +static void register_types(void) +{ + type_register_static(&s390_tod_info); +} +type_init(register_types); diff --git a/hw/s390x/trace-events b/hw/s390x/trace-events new file mode 100644 index 000000000..8b9213eab --- /dev/null +++ b/hw/s390x/trace-events @@ -0,0 +1,21 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# css.c +css_enable_facility(const char *facility) "CSS: enable %s" +css_crw(uint8_t rsc, uint8_t erc, uint16_t rsid, const char *chained) "CSS: queueing crw: rsc=0x%x, erc=0x%x, rsid=0x%x %s" +css_chpid_add(uint8_t cssid, uint8_t chpid, uint8_t type) "CSS: add chpid %x.%02x (type 0x%02x)" +css_new_image(uint8_t cssid, const char *default_cssid) "CSS: add css image 0x%02x %s" +css_assign_subch(const char *do_assign, uint8_t cssid, uint8_t ssid, uint16_t schid, uint16_t devno) "CSS: %s %x.%x.%04x (devno 0x%04x)" +css_io_interrupt(int cssid, int ssid, int schid, uint32_t intparm, uint8_t isc, const char *conditional) "CSS: I/O interrupt on sch %x.%x.%04x (intparm 0x%08x, isc 0x%x) %s" +css_adapter_interrupt(uint8_t isc) "CSS: adapter I/O interrupt (isc 0x%x)" +css_do_sic(uint16_t mode, uint8_t isc) "CSS: set interruption mode 0x%x on isc 0x%x" + +# virtio-ccw.c +virtio_ccw_interpret_ccw(int cssid, int ssid, int schid, int cmd_code) "VIRTIO-CCW: %x.%x.%04x: interpret command 0x%x" +virtio_ccw_new_device(int cssid, int ssid, int schid, int devno, const char *devno_mode) "VIRTIO-CCW: add subchannel %x.%x.%04x, devno 0x%04x (%s)" +virtio_ccw_set_ind(uint64_t ind_loc, uint8_t ind_old, uint8_t ind_new) "VIRTIO-CCW: indicator at %" PRIu64 ": 0x%x->0x%x" + +# s390-pci-vfio.c +s390_pci_clp_cap(const char *id, uint32_t cap) "PCI: %s: missing expected CLP capability %u" +s390_pci_clp_cap_size(const char *id, uint32_t size, uint32_t cap) "PCI: %s: bad size (%u) for CLP capability %u" +s390_pci_clp_dev_info(const char *id) "PCI: %s: cannot read vfio device info" diff --git a/hw/s390x/trace.h b/hw/s390x/trace.h new file mode 100644 index 000000000..d6568b364 --- /dev/null +++ b/hw/s390x/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_s390x.h" diff --git a/hw/s390x/vhost-user-fs-ccw.c b/hw/s390x/vhost-user-fs-ccw.c new file mode 100644 index 000000000..6c6f26929 --- /dev/null +++ b/hw/s390x/vhost-user-fs-ccw.c @@ -0,0 +1,75 @@ +/* + * virtio ccw vhost-user-fs implementation + * + * Copyright 2020 IBM Corp. + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "hw/virtio/vhost-user-fs.h" +#include "virtio-ccw.h" + +typedef struct VHostUserFSCcw { + VirtioCcwDevice parent_obj; + VHostUserFS vdev; +} VHostUserFSCcw; + +#define TYPE_VHOST_USER_FS_CCW "vhost-user-fs-ccw" +#define VHOST_USER_FS_CCW(obj) \ + OBJECT_CHECK(VHostUserFSCcw, (obj), TYPE_VHOST_USER_FS_CCW) + + +static Property vhost_user_fs_ccw_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_user_fs_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VHostUserFSCcw *dev = VHOST_USER_FS_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void vhost_user_fs_ccw_instance_init(Object *obj) +{ + VHostUserFSCcw *dev = VHOST_USER_FS_CCW(obj); + VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj); + + ccw_dev->force_revision_1 = true; + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_FS); +} + +static void vhost_user_fs_ccw_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = vhost_user_fs_ccw_realize; + device_class_set_props(dc, vhost_user_fs_ccw_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo vhost_user_fs_ccw = { + .name = TYPE_VHOST_USER_FS_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VHostUserFSCcw), + .instance_init = vhost_user_fs_ccw_instance_init, + .class_init = vhost_user_fs_ccw_class_init, +}; + +static void vhost_user_fs_ccw_register(void) +{ + type_register_static(&vhost_user_fs_ccw); +} + +type_init(vhost_user_fs_ccw_register) diff --git a/hw/s390x/vhost-vsock-ccw.c b/hw/s390x/vhost-vsock-ccw.c new file mode 100644 index 000000000..246416a8f --- /dev/null +++ b/hw/s390x/vhost-vsock-ccw.c @@ -0,0 +1,73 @@ +/* + * vhost vsock ccw implementation + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" + +static Property vhost_vsock_ccw_properties[] = { + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_vsock_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VHostVSockCCWState *dev = VHOST_VSOCK_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void vhost_vsock_ccw_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = vhost_vsock_ccw_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, vhost_vsock_ccw_properties); +} + +static void vhost_vsock_ccw_instance_init(Object *obj) +{ + VHostVSockCCWState *dev = VHOST_VSOCK_CCW(obj); + VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj); + VirtIODevice *virtio_dev; + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_VSOCK); + + virtio_dev = VIRTIO_DEVICE(&dev->vdev); + + /* + * To avoid migration issues, we force virtio version 1 only when + * legacy check is enabled in the new machine types (>= 5.1). + */ + if (!virtio_legacy_check_disabled(virtio_dev)) { + ccw_dev->force_revision_1 = true; + } +} + +static const TypeInfo vhost_vsock_ccw_info = { + .name = TYPE_VHOST_VSOCK_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VHostVSockCCWState), + .instance_init = vhost_vsock_ccw_instance_init, + .class_init = vhost_vsock_ccw_class_init, +}; + +static void vhost_vsock_ccw_register(void) +{ + type_register_static(&vhost_vsock_ccw_info); +} + +type_init(vhost_vsock_ccw_register) diff --git a/hw/s390x/virtio-ccw-9p.c b/hw/s390x/virtio-ccw-9p.c new file mode 100644 index 000000000..88c8884fc --- /dev/null +++ b/hw/s390x/virtio-ccw-9p.c @@ -0,0 +1,66 @@ +/* + * virtio ccw 9p implementation + * + * Copyright 2012, 2015 IBM Corp. + * Author(s): Pierre Morel + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" + +static void virtio_ccw_9p_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + V9fsCCWState *dev = VIRTIO_9P_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void virtio_ccw_9p_instance_init(Object *obj) +{ + V9fsCCWState *dev = VIRTIO_9P_CCW(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_9P); +} + +static Property virtio_ccw_9p_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_9p_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = virtio_ccw_9p_realize; + device_class_set_props(dc, virtio_ccw_9p_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo virtio_ccw_9p_info = { + .name = TYPE_VIRTIO_9P_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(V9fsCCWState), + .instance_init = virtio_ccw_9p_instance_init, + .class_init = virtio_ccw_9p_class_init, +}; + +static void virtio_ccw_9p_register(void) +{ + type_register_static(&virtio_ccw_9p_info); +} + +type_init(virtio_ccw_9p_register) diff --git a/hw/s390x/virtio-ccw-balloon.c b/hw/s390x/virtio-ccw-balloon.c new file mode 100644 index 000000000..4c7631a43 --- /dev/null +++ b/hw/s390x/virtio-ccw-balloon.c @@ -0,0 +1,71 @@ +/* + * virtio ccw balloon implementation + * + * Copyright 2012, 2015 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" + +static void virtio_ccw_balloon_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VirtIOBalloonCcw *dev = VIRTIO_BALLOON_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void virtio_ccw_balloon_instance_init(Object *obj) +{ + VirtIOBalloonCcw *dev = VIRTIO_BALLOON_CCW(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_BALLOON); + object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev), + "guest-stats"); + object_property_add_alias(obj, "guest-stats-polling-interval", + OBJECT(&dev->vdev), + "guest-stats-polling-interval"); +} + +static Property virtio_ccw_balloon_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_balloon_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = virtio_ccw_balloon_realize; + device_class_set_props(dc, virtio_ccw_balloon_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo virtio_ccw_balloon = { + .name = TYPE_VIRTIO_BALLOON_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VirtIOBalloonCcw), + .instance_init = virtio_ccw_balloon_instance_init, + .class_init = virtio_ccw_balloon_class_init, +}; + +static void virtio_ccw_balloon_register(void) +{ + type_register_static(&virtio_ccw_balloon); +} + +type_init(virtio_ccw_balloon_register) diff --git a/hw/s390x/virtio-ccw-blk.c b/hw/s390x/virtio-ccw-blk.c new file mode 100644 index 000000000..2294ce1ce --- /dev/null +++ b/hw/s390x/virtio-ccw-blk.c @@ -0,0 +1,68 @@ +/* + * virtio ccw block implementation + * + * Copyright 2012, 2015 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" + +static void virtio_ccw_blk_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VirtIOBlkCcw *dev = VIRTIO_BLK_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void virtio_ccw_blk_instance_init(Object *obj) +{ + VirtIOBlkCcw *dev = VIRTIO_BLK_CCW(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_BLK); + object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), + "bootindex"); +} + +static Property virtio_ccw_blk_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_blk_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = virtio_ccw_blk_realize; + device_class_set_props(dc, virtio_ccw_blk_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo virtio_ccw_blk = { + .name = TYPE_VIRTIO_BLK_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VirtIOBlkCcw), + .instance_init = virtio_ccw_blk_instance_init, + .class_init = virtio_ccw_blk_class_init, +}; + +static void virtio_ccw_blk_register(void) +{ + type_register_static(&virtio_ccw_blk); +} + +type_init(virtio_ccw_blk_register) diff --git a/hw/s390x/virtio-ccw-crypto.c b/hw/s390x/virtio-ccw-crypto.c new file mode 100644 index 000000000..358c74fb4 --- /dev/null +++ b/hw/s390x/virtio-ccw-crypto.c @@ -0,0 +1,69 @@ +/* + * virtio ccw crypto implementation + * + * Copyright 2012, 2015 IBM Corp. + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" + +static void virtio_ccw_crypto_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VirtIOCryptoCcw *dev = VIRTIO_CRYPTO_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + if (!qdev_realize(vdev, BUS(&ccw_dev->bus), errp)) { + return; + } +} + +static void virtio_ccw_crypto_instance_init(Object *obj) +{ + VirtIOCryptoCcw *dev = VIRTIO_CRYPTO_CCW(obj); + VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj); + + ccw_dev->force_revision_1 = true; + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_CRYPTO); +} + +static Property virtio_ccw_crypto_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_crypto_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = virtio_ccw_crypto_realize; + device_class_set_props(dc, virtio_ccw_crypto_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo virtio_ccw_crypto = { + .name = TYPE_VIRTIO_CRYPTO_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VirtIOCryptoCcw), + .instance_init = virtio_ccw_crypto_instance_init, + .class_init = virtio_ccw_crypto_class_init, +}; + +static void virtio_ccw_crypto_register(void) +{ + type_register_static(&virtio_ccw_crypto); +} + +type_init(virtio_ccw_crypto_register) diff --git a/hw/s390x/virtio-ccw-gpu.c b/hw/s390x/virtio-ccw-gpu.c new file mode 100644 index 000000000..5868a2a07 --- /dev/null +++ b/hw/s390x/virtio-ccw-gpu.c @@ -0,0 +1,73 @@ +/* + * virtio ccw gpu implementation + * + * Copyright 2012, 2015 IBM Corp. + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" + +static void virtio_ccw_gpu_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VirtIOGPUCcw *dev = VIRTIO_GPU_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void virtio_ccw_gpu_instance_init(Object *obj) +{ + VirtIOGPUCcw *dev = VIRTIO_GPU_CCW(obj); + VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj); + + ccw_dev->force_revision_1 = true; + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_GPU); +} + +static Property virtio_ccw_gpu_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_gpu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = virtio_ccw_gpu_realize; + device_class_set_props(dc, virtio_ccw_gpu_properties); + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); +} + +static const TypeInfo virtio_ccw_gpu = { + .name = TYPE_VIRTIO_GPU_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VirtIOGPUCcw), + .instance_init = virtio_ccw_gpu_instance_init, + .class_init = virtio_ccw_gpu_class_init, +}; +module_obj(TYPE_VIRTIO_GPU_CCW); + +static void virtio_ccw_gpu_register(void) +{ + if (have_virtio_ccw) { + type_register_static(&virtio_ccw_gpu); + } +} + +type_init(virtio_ccw_gpu_register) + +module_arch("s390x"); diff --git a/hw/s390x/virtio-ccw-input.c b/hw/s390x/virtio-ccw-input.c new file mode 100644 index 000000000..83136fbba --- /dev/null +++ b/hw/s390x/virtio-ccw-input.c @@ -0,0 +1,119 @@ +/* + * virtio ccw input implementation + * + * Copyright 2012, 2015 IBM Corp. + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" + +static void virtio_ccw_input_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VirtIOInputCcw *dev = VIRTIO_INPUT_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static Property virtio_ccw_input_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_input_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = virtio_ccw_input_realize; + device_class_set_props(dc, virtio_ccw_input_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static void virtio_ccw_keyboard_instance_init(Object *obj) +{ + VirtIOInputHIDCcw *dev = VIRTIO_INPUT_HID_CCW(obj); + VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj); + + ccw_dev->force_revision_1 = true; + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_KEYBOARD); +} + +static void virtio_ccw_mouse_instance_init(Object *obj) +{ + VirtIOInputHIDCcw *dev = VIRTIO_INPUT_HID_CCW(obj); + VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj); + + ccw_dev->force_revision_1 = true; + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_MOUSE); +} + +static void virtio_ccw_tablet_instance_init(Object *obj) +{ + VirtIOInputHIDCcw *dev = VIRTIO_INPUT_HID_CCW(obj); + VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj); + + ccw_dev->force_revision_1 = true; + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_TABLET); +} + +static const TypeInfo virtio_ccw_input = { + .name = TYPE_VIRTIO_INPUT_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VirtIOInputCcw), + .class_init = virtio_ccw_input_class_init, + .abstract = true, +}; + +static const TypeInfo virtio_ccw_input_hid = { + .name = TYPE_VIRTIO_INPUT_HID_CCW, + .parent = TYPE_VIRTIO_INPUT_CCW, + .instance_size = sizeof(VirtIOInputHIDCcw), + .abstract = true, +}; + +static const TypeInfo virtio_ccw_keyboard = { + .name = TYPE_VIRTIO_KEYBOARD_CCW, + .parent = TYPE_VIRTIO_INPUT_HID_CCW, + .instance_size = sizeof(VirtIOInputHIDCcw), + .instance_init = virtio_ccw_keyboard_instance_init, +}; + +static const TypeInfo virtio_ccw_mouse = { + .name = TYPE_VIRTIO_MOUSE_CCW, + .parent = TYPE_VIRTIO_INPUT_HID_CCW, + .instance_size = sizeof(VirtIOInputHIDCcw), + .instance_init = virtio_ccw_mouse_instance_init, +}; + +static const TypeInfo virtio_ccw_tablet = { + .name = TYPE_VIRTIO_TABLET_CCW, + .parent = TYPE_VIRTIO_INPUT_HID_CCW, + .instance_size = sizeof(VirtIOInputHIDCcw), + .instance_init = virtio_ccw_tablet_instance_init, +}; + +static void virtio_ccw_input_register(void) +{ + type_register_static(&virtio_ccw_input); + type_register_static(&virtio_ccw_input_hid); + type_register_static(&virtio_ccw_keyboard); + type_register_static(&virtio_ccw_mouse); + type_register_static(&virtio_ccw_tablet); +} + +type_init(virtio_ccw_input_register) diff --git a/hw/s390x/virtio-ccw-net.c b/hw/s390x/virtio-ccw-net.c new file mode 100644 index 000000000..3860d4e6e --- /dev/null +++ b/hw/s390x/virtio-ccw-net.c @@ -0,0 +1,71 @@ +/* + * virtio ccw net implementation + * + * Copyright 2012, 2015 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" + +static void virtio_ccw_net_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + DeviceState *qdev = DEVICE(ccw_dev); + VirtIONetCcw *dev = VIRTIO_NET_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + virtio_net_set_netclient_name(&dev->vdev, qdev->id, + object_get_typename(OBJECT(qdev))); + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void virtio_ccw_net_instance_init(Object *obj) +{ + VirtIONetCcw *dev = VIRTIO_NET_CCW(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_NET); + object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), + "bootindex"); +} + +static Property virtio_ccw_net_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_net_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = virtio_ccw_net_realize; + device_class_set_props(dc, virtio_ccw_net_properties); + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); +} + +static const TypeInfo virtio_ccw_net = { + .name = TYPE_VIRTIO_NET_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VirtIONetCcw), + .instance_init = virtio_ccw_net_instance_init, + .class_init = virtio_ccw_net_class_init, +}; + +static void virtio_ccw_net_register(void) +{ + type_register_static(&virtio_ccw_net); +} + +type_init(virtio_ccw_net_register) diff --git a/hw/s390x/virtio-ccw-rng.c b/hw/s390x/virtio-ccw-rng.c new file mode 100644 index 000000000..2e3a9da5e --- /dev/null +++ b/hw/s390x/virtio-ccw-rng.c @@ -0,0 +1,68 @@ +/* + * virtio ccw random number generator implementation + * + * Copyright 2012, 2015 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" + +static void virtio_ccw_rng_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VirtIORNGCcw *dev = VIRTIO_RNG_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + if (!qdev_realize(vdev, BUS(&ccw_dev->bus), errp)) { + return; + } +} + +static void virtio_ccw_rng_instance_init(Object *obj) +{ + VirtIORNGCcw *dev = VIRTIO_RNG_CCW(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_RNG); +} + +static Property virtio_ccw_rng_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_rng_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = virtio_ccw_rng_realize; + device_class_set_props(dc, virtio_ccw_rng_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo virtio_ccw_rng = { + .name = TYPE_VIRTIO_RNG_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VirtIORNGCcw), + .instance_init = virtio_ccw_rng_instance_init, + .class_init = virtio_ccw_rng_class_init, +}; + +static void virtio_ccw_rng_register(void) +{ + type_register_static(&virtio_ccw_rng); +} + +type_init(virtio_ccw_rng_register) diff --git a/hw/s390x/virtio-ccw-scsi.c b/hw/s390x/virtio-ccw-scsi.c new file mode 100644 index 000000000..6e4beef70 --- /dev/null +++ b/hw/s390x/virtio-ccw-scsi.c @@ -0,0 +1,125 @@ +/* + * virtio ccw scsi implementation + * + * Copyright 2012, 2015 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-ccw.h" + +static void virtio_ccw_scsi_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VirtIOSCSICcw *dev = VIRTIO_SCSI_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + DeviceState *qdev = DEVICE(ccw_dev); + char *bus_name; + + /* + * For command line compatibility, this sets the virtio-scsi-device bus + * name as before. + */ + if (qdev->id) { + bus_name = g_strdup_printf("%s.0", qdev->id); + virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name); + g_free(bus_name); + } + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void virtio_ccw_scsi_instance_init(Object *obj) +{ + VirtIOSCSICcw *dev = VIRTIO_SCSI_CCW(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_SCSI); +} + +static Property virtio_ccw_scsi_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_scsi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = virtio_ccw_scsi_realize; + device_class_set_props(dc, virtio_ccw_scsi_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo virtio_ccw_scsi = { + .name = TYPE_VIRTIO_SCSI_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VirtIOSCSICcw), + .instance_init = virtio_ccw_scsi_instance_init, + .class_init = virtio_ccw_scsi_class_init, +}; + +#ifdef CONFIG_VHOST_SCSI + +static void vhost_ccw_scsi_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VHostSCSICcw *dev = VHOST_SCSI_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + +static void vhost_ccw_scsi_instance_init(Object *obj) +{ + VHostSCSICcw *dev = VHOST_SCSI_CCW(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_SCSI); +} + +static Property vhost_ccw_scsi_properties[] = { + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_ccw_scsi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = vhost_ccw_scsi_realize; + device_class_set_props(dc, vhost_ccw_scsi_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo vhost_ccw_scsi = { + .name = TYPE_VHOST_SCSI_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VHostSCSICcw), + .instance_init = vhost_ccw_scsi_instance_init, + .class_init = vhost_ccw_scsi_class_init, +}; + +#endif + +static void virtio_ccw_scsi_register(void) +{ + type_register_static(&virtio_ccw_scsi); +#ifdef CONFIG_VHOST_SCSI + type_register_static(&vhost_ccw_scsi); +#endif +} + +type_init(virtio_ccw_scsi_register) diff --git a/hw/s390x/virtio-ccw-serial.c b/hw/s390x/virtio-ccw-serial.c new file mode 100644 index 000000000..61958228d --- /dev/null +++ b/hw/s390x/virtio-ccw-serial.c @@ -0,0 +1,79 @@ +/* + * virtio ccw serial implementation + * + * Copyright 2012, 2015 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "hw/virtio/virtio.h" +#include "qemu/module.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-serial.h" +#include "virtio-ccw.h" + +static void virtio_ccw_serial_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + VirtioSerialCcw *dev = VIRTIO_SERIAL_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + DeviceState *proxy = DEVICE(ccw_dev); + char *bus_name; + + /* + * For command line compatibility, this sets the virtio-serial-device bus + * name as before. + */ + if (proxy->id) { + bus_name = g_strdup_printf("%s.0", proxy->id); + virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name); + g_free(bus_name); + } + + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); +} + + +static void virtio_ccw_serial_instance_init(Object *obj) +{ + VirtioSerialCcw *dev = VIRTIO_SERIAL_CCW(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_SERIAL); +} + +static Property virtio_ccw_serial_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, + VIRTIO_CCW_MAX_REV), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_serial_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->realize = virtio_ccw_serial_realize; + device_class_set_props(dc, virtio_ccw_serial_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo virtio_ccw_serial = { + .name = TYPE_VIRTIO_SERIAL_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(VirtioSerialCcw), + .instance_init = virtio_ccw_serial_instance_init, + .class_init = virtio_ccw_serial_class_init, +}; + +static void virtio_ccw_serial_register(void) +{ + type_register_static(&virtio_ccw_serial); +} + +type_init(virtio_ccw_serial_register) diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c new file mode 100644 index 000000000..c845a92c3 --- /dev/null +++ b/hw/s390x/virtio-ccw.c @@ -0,0 +1,1302 @@ +/* + * virtio ccw target implementation + * + * Copyright 2012,2015 IBM Corp. + * Author(s): Cornelia Huck + * Pierre Morel + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "sysemu/kvm.h" +#include "net/net.h" +#include "hw/virtio/virtio.h" +#include "migration/qemu-file-types.h" +#include "hw/virtio/virtio-net.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/virtio/virtio-access.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/s390x/adapter.h" +#include "hw/s390x/s390_flic.h" + +#include "hw/s390x/ioinst.h" +#include "hw/s390x/css.h" +#include "virtio-ccw.h" +#include "trace.h" +#include "hw/s390x/css-bridge.h" +#include "hw/s390x/s390-virtio-ccw.h" +#include "sysemu/replay.h" + +#define NR_CLASSIC_INDICATOR_BITS 64 + +bool have_virtio_ccw = true; + +static int virtio_ccw_dev_post_load(void *opaque, int version_id) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(opaque); + CcwDevice *ccw_dev = CCW_DEVICE(dev); + CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); + + ccw_dev->sch->driver_data = dev; + if (ccw_dev->sch->thinint_active) { + dev->routes.adapter.adapter_id = css_get_adapter_id( + CSS_IO_ADAPTER_VIRTIO, + dev->thinint_isc); + } + /* Re-fill subch_id after loading the subchannel states.*/ + if (ck->refill_ids) { + ck->refill_ids(ccw_dev); + } + return 0; +} + +typedef struct VirtioCcwDeviceTmp { + VirtioCcwDevice *parent; + uint16_t config_vector; +} VirtioCcwDeviceTmp; + +static int virtio_ccw_dev_tmp_pre_save(void *opaque) +{ + VirtioCcwDeviceTmp *tmp = opaque; + VirtioCcwDevice *dev = tmp->parent; + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + + tmp->config_vector = vdev->config_vector; + + return 0; +} + +static int virtio_ccw_dev_tmp_post_load(void *opaque, int version_id) +{ + VirtioCcwDeviceTmp *tmp = opaque; + VirtioCcwDevice *dev = tmp->parent; + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + + vdev->config_vector = tmp->config_vector; + return 0; +} + +const VMStateDescription vmstate_virtio_ccw_dev_tmp = { + .name = "s390_virtio_ccw_dev_tmp", + .pre_save = virtio_ccw_dev_tmp_pre_save, + .post_load = virtio_ccw_dev_tmp_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_virtio_ccw_dev = { + .name = "s390_virtio_ccw_dev", + .version_id = 1, + .minimum_version_id = 1, + .post_load = virtio_ccw_dev_post_load, + .fields = (VMStateField[]) { + VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice), + VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice), + VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice), + VMSTATE_PTR_TO_IND_ADDR(summary_indicator, VirtioCcwDevice), + /* + * Ugly hack because VirtIODevice does not migrate itself. + * This also makes legacy via vmstate_save_state possible. + */ + VMSTATE_WITH_TMP(VirtioCcwDevice, VirtioCcwDeviceTmp, + vmstate_virtio_ccw_dev_tmp), + VMSTATE_STRUCT(routes, VirtioCcwDevice, 1, vmstate_adapter_routes, + AdapterRoutes), + VMSTATE_UINT8(thinint_isc, VirtioCcwDevice), + VMSTATE_INT32(revision, VirtioCcwDevice), + VMSTATE_END_OF_LIST() + } +}; + +static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, + VirtioCcwDevice *dev); + +VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch) +{ + VirtIODevice *vdev = NULL; + VirtioCcwDevice *dev = sch->driver_data; + + if (dev) { + vdev = virtio_bus_get_device(&dev->bus); + } + return vdev; +} + +static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev) +{ + virtio_bus_start_ioeventfd(&dev->bus); +} + +static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev) +{ + virtio_bus_stop_ioeventfd(&dev->bus); +} + +static bool virtio_ccw_ioeventfd_enabled(DeviceState *d) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + + return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0; +} + +static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, + int n, bool assign) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + CcwDevice *ccw_dev = CCW_DEVICE(dev); + SubchDev *sch = ccw_dev->sch; + uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid; + + return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign); +} + +/* Communication blocks used by several channel commands. */ +typedef struct VqInfoBlockLegacy { + uint64_t queue; + uint32_t align; + uint16_t index; + uint16_t num; +} QEMU_PACKED VqInfoBlockLegacy; + +typedef struct VqInfoBlock { + uint64_t desc; + uint32_t res0; + uint16_t index; + uint16_t num; + uint64_t avail; + uint64_t used; +} QEMU_PACKED VqInfoBlock; + +typedef struct VqConfigBlock { + uint16_t index; + uint16_t num_max; +} QEMU_PACKED VqConfigBlock; + +typedef struct VirtioFeatDesc { + uint32_t features; + uint8_t index; +} QEMU_PACKED VirtioFeatDesc; + +typedef struct VirtioThinintInfo { + hwaddr summary_indicator; + hwaddr device_indicator; + uint64_t ind_bit; + uint8_t isc; +} QEMU_PACKED VirtioThinintInfo; + +typedef struct VirtioRevInfo { + uint16_t revision; + uint16_t length; + uint8_t data[]; +} QEMU_PACKED VirtioRevInfo; + +/* Specify where the virtqueues for the subchannel are in guest memory. */ +static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, + VqInfoBlockLegacy *linfo) +{ + VirtIODevice *vdev = virtio_ccw_get_vdev(sch); + uint16_t index = info ? info->index : linfo->index; + uint16_t num = info ? info->num : linfo->num; + uint64_t desc = info ? info->desc : linfo->queue; + + if (index >= VIRTIO_QUEUE_MAX) { + return -EINVAL; + } + + /* Current code in virtio.c relies on 4K alignment. */ + if (linfo && desc && (linfo->align != 4096)) { + return -EINVAL; + } + + if (!vdev) { + return -EINVAL; + } + + if (info) { + virtio_queue_set_rings(vdev, index, desc, info->avail, info->used); + } else { + virtio_queue_set_addr(vdev, index, desc); + } + if (!desc) { + virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR); + } else { + if (info) { + /* virtio-1 allows changing the ring size. */ + if (virtio_queue_get_max_num(vdev, index) < num) { + /* Fail if we exceed the maximum number. */ + return -EINVAL; + } + virtio_queue_set_num(vdev, index, num); + } else if (virtio_queue_get_num(vdev, index) > num) { + /* Fail if we don't have a big enough queue. */ + return -EINVAL; + } + /* We ignore possible increased num for legacy for compatibility. */ + virtio_queue_set_vector(vdev, index, index); + } + /* tell notify handler in case of config change */ + vdev->config_vector = VIRTIO_QUEUE_MAX; + return 0; +} + +static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev) +{ + CcwDevice *ccw_dev = CCW_DEVICE(dev); + + virtio_ccw_stop_ioeventfd(dev); + virtio_reset(vdev); + if (dev->indicators) { + release_indicator(&dev->routes.adapter, dev->indicators); + dev->indicators = NULL; + } + if (dev->indicators2) { + release_indicator(&dev->routes.adapter, dev->indicators2); + dev->indicators2 = NULL; + } + if (dev->summary_indicator) { + release_indicator(&dev->routes.adapter, dev->summary_indicator); + dev->summary_indicator = NULL; + } + ccw_dev->sch->thinint_active = false; +} + +static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len, + bool is_legacy) +{ + int ret; + VqInfoBlock info; + VqInfoBlockLegacy linfo; + size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info); + + if (check_len) { + if (ccw.count != info_len) { + return -EINVAL; + } + } else if (ccw.count < info_len) { + /* Can't execute command. */ + return -EINVAL; + } + if (!ccw.cda) { + return -EFAULT; + } + if (is_legacy) { + ret = ccw_dstream_read(&sch->cds, linfo); + if (ret) { + return ret; + } + linfo.queue = be64_to_cpu(linfo.queue); + linfo.align = be32_to_cpu(linfo.align); + linfo.index = be16_to_cpu(linfo.index); + linfo.num = be16_to_cpu(linfo.num); + ret = virtio_ccw_set_vqs(sch, NULL, &linfo); + } else { + ret = ccw_dstream_read(&sch->cds, info); + if (ret) { + return ret; + } + info.desc = be64_to_cpu(info.desc); + info.index = be16_to_cpu(info.index); + info.num = be16_to_cpu(info.num); + info.avail = be64_to_cpu(info.avail); + info.used = be64_to_cpu(info.used); + ret = virtio_ccw_set_vqs(sch, &info, NULL); + } + sch->curr_status.scsw.count = 0; + return ret; +} + +static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) +{ + int ret; + VirtioRevInfo revinfo; + uint8_t status; + VirtioFeatDesc features; + hwaddr indicators; + VqConfigBlock vq_config; + VirtioCcwDevice *dev = sch->driver_data; + VirtIODevice *vdev = virtio_ccw_get_vdev(sch); + bool check_len; + int len; + VirtioThinintInfo thinint; + + if (!dev) { + return -EINVAL; + } + + trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid, + ccw.cmd_code); + check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); + + if (dev->revision < 0 && ccw.cmd_code != CCW_CMD_SET_VIRTIO_REV) { + if (dev->force_revision_1) { + /* + * virtio-1 drivers must start with negotiating to a revision >= 1, + * so post a command reject for all other commands + */ + return -ENOSYS; + } else { + /* + * If the driver issues any command that is not SET_VIRTIO_REV, + * we'll have to operate the device in legacy mode. + */ + dev->revision = 0; + } + } + + /* Look at the command. */ + switch (ccw.cmd_code) { + case CCW_CMD_SET_VQ: + ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1); + break; + case CCW_CMD_VDEV_RESET: + virtio_ccw_reset_virtio(dev, vdev); + ret = 0; + break; + case CCW_CMD_READ_FEAT: + if (check_len) { + if (ccw.count != sizeof(features)) { + ret = -EINVAL; + break; + } + } else if (ccw.count < sizeof(features)) { + /* Can't execute command. */ + ret = -EINVAL; + break; + } + if (!ccw.cda) { + ret = -EFAULT; + } else { + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + + ccw_dstream_advance(&sch->cds, sizeof(features.features)); + ret = ccw_dstream_read(&sch->cds, features.index); + if (ret) { + break; + } + if (features.index == 0) { + if (dev->revision >= 1) { + /* Don't offer legacy features for modern devices. */ + features.features = (uint32_t) + (vdev->host_features & ~vdc->legacy_features); + } else { + features.features = (uint32_t)vdev->host_features; + } + } else if ((features.index == 1) && (dev->revision >= 1)) { + /* + * Only offer feature bits beyond 31 if the guest has + * negotiated at least revision 1. + */ + features.features = (uint32_t)(vdev->host_features >> 32); + } else { + /* Return zeroes if the guest supports more feature bits. */ + features.features = 0; + } + ccw_dstream_rewind(&sch->cds); + features.features = cpu_to_le32(features.features); + ret = ccw_dstream_write(&sch->cds, features.features); + if (!ret) { + sch->curr_status.scsw.count = ccw.count - sizeof(features); + } + } + break; + case CCW_CMD_WRITE_FEAT: + if (check_len) { + if (ccw.count != sizeof(features)) { + ret = -EINVAL; + break; + } + } else if (ccw.count < sizeof(features)) { + /* Can't execute command. */ + ret = -EINVAL; + break; + } + if (!ccw.cda) { + ret = -EFAULT; + } else { + ret = ccw_dstream_read(&sch->cds, features); + if (ret) { + break; + } + features.features = le32_to_cpu(features.features); + if (features.index == 0) { + virtio_set_features(vdev, + (vdev->guest_features & 0xffffffff00000000ULL) | + features.features); + } else if ((features.index == 1) && (dev->revision >= 1)) { + /* + * If the guest did not negotiate at least revision 1, + * we did not offer it any feature bits beyond 31. Such a + * guest passing us any bit here is therefore buggy. + */ + virtio_set_features(vdev, + (vdev->guest_features & 0x00000000ffffffffULL) | + ((uint64_t)features.features << 32)); + } else { + /* + * If the guest supports more feature bits, assert that it + * passes us zeroes for those we don't support. + */ + if (features.features) { + qemu_log_mask(LOG_GUEST_ERROR, + "Guest bug: features[%i]=%x (expected 0)", + features.index, features.features); + /* XXX: do a unit check here? */ + } + } + sch->curr_status.scsw.count = ccw.count - sizeof(features); + ret = 0; + } + break; + case CCW_CMD_READ_CONF: + if (check_len) { + if (ccw.count > vdev->config_len) { + ret = -EINVAL; + break; + } + } + len = MIN(ccw.count, vdev->config_len); + if (!ccw.cda) { + ret = -EFAULT; + } else { + virtio_bus_get_vdev_config(&dev->bus, vdev->config); + ret = ccw_dstream_write_buf(&sch->cds, vdev->config, len); + if (ret) { + sch->curr_status.scsw.count = ccw.count - len; + } + } + break; + case CCW_CMD_WRITE_CONF: + if (check_len) { + if (ccw.count > vdev->config_len) { + ret = -EINVAL; + break; + } + } + len = MIN(ccw.count, vdev->config_len); + if (!ccw.cda) { + ret = -EFAULT; + } else { + ret = ccw_dstream_read_buf(&sch->cds, vdev->config, len); + if (!ret) { + virtio_bus_set_vdev_config(&dev->bus, vdev->config); + sch->curr_status.scsw.count = ccw.count - len; + } + } + break; + case CCW_CMD_READ_STATUS: + if (check_len) { + if (ccw.count != sizeof(status)) { + ret = -EINVAL; + break; + } + } else if (ccw.count < sizeof(status)) { + /* Can't execute command. */ + ret = -EINVAL; + break; + } + if (!ccw.cda) { + ret = -EFAULT; + } else { + address_space_stb(&address_space_memory, ccw.cda, vdev->status, + MEMTXATTRS_UNSPECIFIED, NULL); + sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status); + ret = 0; + } + break; + case CCW_CMD_WRITE_STATUS: + if (check_len) { + if (ccw.count != sizeof(status)) { + ret = -EINVAL; + break; + } + } else if (ccw.count < sizeof(status)) { + /* Can't execute command. */ + ret = -EINVAL; + break; + } + if (!ccw.cda) { + ret = -EFAULT; + } else { + ret = ccw_dstream_read(&sch->cds, status); + if (ret) { + break; + } + if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { + virtio_ccw_stop_ioeventfd(dev); + } + if (virtio_set_status(vdev, status) == 0) { + if (vdev->status == 0) { + virtio_ccw_reset_virtio(dev, vdev); + } + if (status & VIRTIO_CONFIG_S_DRIVER_OK) { + virtio_ccw_start_ioeventfd(dev); + } + sch->curr_status.scsw.count = ccw.count - sizeof(status); + ret = 0; + } else { + /* Trigger a command reject. */ + ret = -ENOSYS; + } + } + break; + case CCW_CMD_SET_IND: + if (check_len) { + if (ccw.count != sizeof(indicators)) { + ret = -EINVAL; + break; + } + } else if (ccw.count < sizeof(indicators)) { + /* Can't execute command. */ + ret = -EINVAL; + break; + } + if (sch->thinint_active) { + /* Trigger a command reject. */ + ret = -ENOSYS; + break; + } + if (virtio_get_num_queues(vdev) > NR_CLASSIC_INDICATOR_BITS) { + /* More queues than indicator bits --> trigger a reject */ + ret = -ENOSYS; + break; + } + if (!ccw.cda) { + ret = -EFAULT; + } else { + ret = ccw_dstream_read(&sch->cds, indicators); + if (ret) { + break; + } + indicators = be64_to_cpu(indicators); + dev->indicators = get_indicator(indicators, sizeof(uint64_t)); + sch->curr_status.scsw.count = ccw.count - sizeof(indicators); + ret = 0; + } + break; + case CCW_CMD_SET_CONF_IND: + if (check_len) { + if (ccw.count != sizeof(indicators)) { + ret = -EINVAL; + break; + } + } else if (ccw.count < sizeof(indicators)) { + /* Can't execute command. */ + ret = -EINVAL; + break; + } + if (!ccw.cda) { + ret = -EFAULT; + } else { + ret = ccw_dstream_read(&sch->cds, indicators); + if (ret) { + break; + } + indicators = be64_to_cpu(indicators); + dev->indicators2 = get_indicator(indicators, sizeof(uint64_t)); + sch->curr_status.scsw.count = ccw.count - sizeof(indicators); + ret = 0; + } + break; + case CCW_CMD_READ_VQ_CONF: + if (check_len) { + if (ccw.count != sizeof(vq_config)) { + ret = -EINVAL; + break; + } + } else if (ccw.count < sizeof(vq_config)) { + /* Can't execute command. */ + ret = -EINVAL; + break; + } + if (!ccw.cda) { + ret = -EFAULT; + } else { + ret = ccw_dstream_read(&sch->cds, vq_config.index); + if (ret) { + break; + } + vq_config.index = be16_to_cpu(vq_config.index); + if (vq_config.index >= VIRTIO_QUEUE_MAX) { + ret = -EINVAL; + break; + } + vq_config.num_max = virtio_queue_get_num(vdev, + vq_config.index); + vq_config.num_max = cpu_to_be16(vq_config.num_max); + ret = ccw_dstream_write(&sch->cds, vq_config.num_max); + if (!ret) { + sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); + } + } + break; + case CCW_CMD_SET_IND_ADAPTER: + if (check_len) { + if (ccw.count != sizeof(thinint)) { + ret = -EINVAL; + break; + } + } else if (ccw.count < sizeof(thinint)) { + /* Can't execute command. */ + ret = -EINVAL; + break; + } + if (!ccw.cda) { + ret = -EFAULT; + } else if (dev->indicators && !sch->thinint_active) { + /* Trigger a command reject. */ + ret = -ENOSYS; + } else { + if (ccw_dstream_read(&sch->cds, thinint)) { + ret = -EFAULT; + } else { + thinint.ind_bit = be64_to_cpu(thinint.ind_bit); + thinint.summary_indicator = + be64_to_cpu(thinint.summary_indicator); + thinint.device_indicator = + be64_to_cpu(thinint.device_indicator); + + dev->summary_indicator = + get_indicator(thinint.summary_indicator, sizeof(uint8_t)); + dev->indicators = + get_indicator(thinint.device_indicator, + thinint.ind_bit / 8 + 1); + dev->thinint_isc = thinint.isc; + dev->routes.adapter.ind_offset = thinint.ind_bit; + dev->routes.adapter.summary_offset = 7; + dev->routes.adapter.adapter_id = css_get_adapter_id( + CSS_IO_ADAPTER_VIRTIO, + dev->thinint_isc); + sch->thinint_active = ((dev->indicators != NULL) && + (dev->summary_indicator != NULL)); + sch->curr_status.scsw.count = ccw.count - sizeof(thinint); + ret = 0; + } + } + break; + case CCW_CMD_SET_VIRTIO_REV: + len = sizeof(revinfo); + if (ccw.count < len) { + ret = -EINVAL; + break; + } + if (!ccw.cda) { + ret = -EFAULT; + break; + } + ret = ccw_dstream_read_buf(&sch->cds, &revinfo, 4); + if (ret < 0) { + break; + } + revinfo.revision = be16_to_cpu(revinfo.revision); + revinfo.length = be16_to_cpu(revinfo.length); + if (ccw.count < len + revinfo.length || + (check_len && ccw.count > len + revinfo.length)) { + ret = -EINVAL; + break; + } + /* + * Once we start to support revisions with additional data, we'll + * need to fetch it here. Nothing to do for now, though. + */ + if (dev->revision >= 0 || + revinfo.revision > virtio_ccw_rev_max(dev) || + (dev->force_revision_1 && !revinfo.revision)) { + ret = -ENOSYS; + break; + } + ret = 0; + dev->revision = revinfo.revision; + break; + default: + ret = -ENOSYS; + break; + } + return ret; +} + +static void virtio_sch_disable_cb(SubchDev *sch) +{ + VirtioCcwDevice *dev = sch->driver_data; + + dev->revision = -1; +} + +static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp) +{ + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev); + CcwDevice *ccw_dev = CCW_DEVICE(dev); + CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); + SubchDev *sch; + Error *err = NULL; + int i; + + sch = css_create_sch(ccw_dev->devno, errp); + if (!sch) { + return; + } + if (!virtio_ccw_rev_max(dev) && dev->force_revision_1) { + error_setg(&err, "Invalid value of property max_rev " + "(is %d expected >= 1)", virtio_ccw_rev_max(dev)); + goto out_err; + } + + sch->driver_data = dev; + sch->ccw_cb = virtio_ccw_cb; + sch->disable_cb = virtio_sch_disable_cb; + sch->id.reserved = 0xff; + sch->id.cu_type = VIRTIO_CCW_CU_TYPE; + sch->do_subchannel_work = do_subchannel_work_virtual; + sch->irb_cb = build_irb_virtual; + ccw_dev->sch = sch; + dev->indicators = NULL; + dev->revision = -1; + for (i = 0; i < ADAPTER_ROUTES_MAX_GSI; i++) { + dev->routes.gsi[i] = -1; + } + css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); + + trace_virtio_ccw_new_device( + sch->cssid, sch->ssid, sch->schid, sch->devno, + ccw_dev->devno.valid ? "user-configured" : "auto-configured"); + + if (kvm_enabled() && !kvm_eventfds_enabled()) { + dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; + } + + /* fd-based ioevents can't be synchronized in record/replay */ + if (replay_mode != REPLAY_MODE_NONE) { + dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD; + } + + if (k->realize) { + k->realize(dev, &err); + if (err) { + goto out_err; + } + } + + ck->realize(ccw_dev, &err); + if (err) { + goto out_err; + } + + return; + +out_err: + error_propagate(errp, err); + css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); + ccw_dev->sch = NULL; + g_free(sch); +} + +static void virtio_ccw_device_unrealize(VirtioCcwDevice *dev) +{ + VirtIOCCWDeviceClass *dc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); + CcwDevice *ccw_dev = CCW_DEVICE(dev); + SubchDev *sch = ccw_dev->sch; + + if (dc->unrealize) { + dc->unrealize(dev); + } + + if (sch) { + css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); + g_free(sch); + ccw_dev->sch = NULL; + } + if (dev->indicators) { + release_indicator(&dev->routes.adapter, dev->indicators); + dev->indicators = NULL; + } +} + +/* DeviceState to VirtioCcwDevice. Note: used on datapath, + * be careful and test performance if you change this. + */ +static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d) +{ + CcwDevice *ccw_dev = to_ccw_dev_fast(d); + + return container_of(ccw_dev, VirtioCcwDevice, parent_obj); +} + +static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc, + uint8_t to_be_set) +{ + uint8_t expected, actual; + hwaddr len = 1; + /* avoid multiple fetches */ + uint8_t volatile *ind_addr; + + ind_addr = cpu_physical_memory_map(ind_loc, &len, true); + if (!ind_addr) { + error_report("%s(%x.%x.%04x): unable to access indicator", + __func__, sch->cssid, sch->ssid, sch->schid); + return -1; + } + actual = *ind_addr; + do { + expected = actual; + actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); + } while (actual != expected); + trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set); + cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); + + return actual; +} + +static void virtio_ccw_notify(DeviceState *d, uint16_t vector) +{ + VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d); + CcwDevice *ccw_dev = to_ccw_dev_fast(d); + SubchDev *sch = ccw_dev->sch; + uint64_t indicators; + + if (vector == VIRTIO_NO_VECTOR) { + return; + } + /* + * vector < VIRTIO_QUEUE_MAX: notification for a virtqueue + * vector == VIRTIO_QUEUE_MAX: configuration change notification + * bits beyond that are unused and should never be notified for + */ + assert(vector <= VIRTIO_QUEUE_MAX); + + if (vector < VIRTIO_QUEUE_MAX) { + if (!dev->indicators) { + return; + } + if (sch->thinint_active) { + /* + * In the adapter interrupt case, indicators points to a + * memory area that may be (way) larger than 64 bit and + * ind_bit indicates the start of the indicators in a big + * endian notation. + */ + uint64_t ind_bit = dev->routes.adapter.ind_offset; + + virtio_set_ind_atomic(sch, dev->indicators->addr + + (ind_bit + vector) / 8, + 0x80 >> ((ind_bit + vector) % 8)); + if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr, + 0x01)) { + css_adapter_interrupt(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc); + } + } else { + assert(vector < NR_CLASSIC_INDICATOR_BITS); + indicators = address_space_ldq(&address_space_memory, + dev->indicators->addr, + MEMTXATTRS_UNSPECIFIED, + NULL); + indicators |= 1ULL << vector; + address_space_stq(&address_space_memory, dev->indicators->addr, + indicators, MEMTXATTRS_UNSPECIFIED, NULL); + css_conditional_io_interrupt(sch); + } + } else { + if (!dev->indicators2) { + return; + } + indicators = address_space_ldq(&address_space_memory, + dev->indicators2->addr, + MEMTXATTRS_UNSPECIFIED, + NULL); + indicators |= 1ULL; + address_space_stq(&address_space_memory, dev->indicators2->addr, + indicators, MEMTXATTRS_UNSPECIFIED, NULL); + css_conditional_io_interrupt(sch); + } +} + +static void virtio_ccw_reset(DeviceState *d) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev); + + virtio_ccw_reset_virtio(dev, vdev); + if (vdc->parent_reset) { + vdc->parent_reset(d); + } +} + +static void virtio_ccw_vmstate_change(DeviceState *d, bool running) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + + if (running) { + virtio_ccw_start_ioeventfd(dev); + } else { + virtio_ccw_stop_ioeventfd(dev); + } +} + +static bool virtio_ccw_query_guest_notifiers(DeviceState *d) +{ + CcwDevice *dev = CCW_DEVICE(d); + + return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA); +} + +static int virtio_ccw_get_mappings(VirtioCcwDevice *dev) +{ + int r; + CcwDevice *ccw_dev = CCW_DEVICE(dev); + + if (!ccw_dev->sch->thinint_active) { + return -EINVAL; + } + + r = map_indicator(&dev->routes.adapter, dev->summary_indicator); + if (r) { + return r; + } + r = map_indicator(&dev->routes.adapter, dev->indicators); + if (r) { + return r; + } + dev->routes.adapter.summary_addr = dev->summary_indicator->map; + dev->routes.adapter.ind_addr = dev->indicators->map; + + return 0; +} + +static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs) +{ + int i; + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + int ret; + S390FLICState *fs = s390_get_flic(); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + + ret = virtio_ccw_get_mappings(dev); + if (ret) { + return ret; + } + for (i = 0; i < nvqs; i++) { + if (!virtio_queue_get_num(vdev, i)) { + break; + } + } + dev->routes.num_routes = i; + return fsc->add_adapter_routes(fs, &dev->routes); +} + +static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs) +{ + S390FLICState *fs = s390_get_flic(); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + + fsc->release_adapter_routes(fs, &dev->routes); +} + +static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n) +{ + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + VirtQueue *vq = virtio_get_queue(vdev, n); + EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); + + return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL, + dev->routes.gsi[n]); +} + +static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n) +{ + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + VirtQueue *vq = virtio_get_queue(vdev, n); + EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); + int ret; + + ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier, + dev->routes.gsi[n]); + assert(ret == 0); +} + +static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n, + bool assign, bool with_irqfd) +{ + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + VirtQueue *vq = virtio_get_queue(vdev, n); + EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + + if (assign) { + int r = event_notifier_init(notifier, 0); + + if (r < 0) { + return r; + } + virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); + if (with_irqfd) { + r = virtio_ccw_add_irqfd(dev, n); + if (r) { + virtio_queue_set_guest_notifier_fd_handler(vq, false, + with_irqfd); + return r; + } + } + /* + * We do not support individual masking for channel devices, so we + * need to manually trigger any guest masking callbacks here. + */ + if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { + k->guest_notifier_mask(vdev, n, false); + } + /* get lost events and re-inject */ + if (k->guest_notifier_pending && + k->guest_notifier_pending(vdev, n)) { + event_notifier_set(notifier); + } + } else { + if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) { + k->guest_notifier_mask(vdev, n, true); + } + if (with_irqfd) { + virtio_ccw_remove_irqfd(dev, n); + } + virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); + event_notifier_cleanup(notifier); + } + return 0; +} + +static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs, + bool assigned) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + CcwDevice *ccw_dev = CCW_DEVICE(d); + bool with_irqfd = ccw_dev->sch->thinint_active && kvm_irqfds_enabled(); + int r, n; + + if (with_irqfd && assigned) { + /* irq routes need to be set up before assigning irqfds */ + r = virtio_ccw_setup_irqroutes(dev, nvqs); + if (r < 0) { + goto irqroute_error; + } + } + for (n = 0; n < nvqs; n++) { + if (!virtio_queue_get_num(vdev, n)) { + break; + } + r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd); + if (r < 0) { + goto assign_error; + } + } + if (with_irqfd && !assigned) { + /* release irq routes after irqfds have been released */ + virtio_ccw_release_irqroutes(dev, nvqs); + } + return 0; + +assign_error: + while (--n >= 0) { + virtio_ccw_set_guest_notifier(dev, n, !assigned, false); + } +irqroute_error: + if (with_irqfd && assigned) { + virtio_ccw_release_irqroutes(dev, nvqs); + } + return r; +} + +static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + + qemu_put_be16(f, virtio_queue_vector(vdev, n)); +} + +static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + uint16_t vector; + + qemu_get_be16s(f, &vector); + virtio_queue_set_vector(vdev, n , vector); + + return 0; +} + +static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL); +} + +static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1); +} + +static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + + if (dev->max_rev >= 1) { + virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); + } +} + +/* This is called by virtio-bus just after the device is plugged. */ +static void virtio_ccw_device_plugged(DeviceState *d, Error **errp) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + VirtIODevice *vdev = virtio_bus_get_device(&dev->bus); + CcwDevice *ccw_dev = CCW_DEVICE(d); + SubchDev *sch = ccw_dev->sch; + int n = virtio_get_num_queues(vdev); + S390FLICState *flic = s390_get_flic(); + + if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { + dev->max_rev = 0; + } + + if (!virtio_ccw_rev_max(dev) && !virtio_legacy_allowed(vdev)) { + /* + * To avoid migration issues, we allow legacy mode when legacy + * check is disabled in the old machine types (< 5.1). + */ + if (virtio_legacy_check_disabled(vdev)) { + warn_report("device requires revision >= 1, but for backward " + "compatibility max_revision=0 is allowed"); + } else { + error_setg(errp, "Invalid value of property max_rev " + "(is %d expected >= 1)", virtio_ccw_rev_max(dev)); + return; + } + } + + if (virtio_get_num_queues(vdev) > VIRTIO_QUEUE_MAX) { + error_setg(errp, "The number of virtqueues %d " + "exceeds virtio limit %d", n, + VIRTIO_QUEUE_MAX); + return; + } + if (virtio_get_num_queues(vdev) > flic->adapter_routes_max_batch) { + error_setg(errp, "The number of virtqueues %d " + "exceeds flic adapter route limit %d", n, + flic->adapter_routes_max_batch); + return; + } + + sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus); + + + css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, + d->hotplugged, 1); +} + +static void virtio_ccw_device_unplugged(DeviceState *d) +{ + VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); + + virtio_ccw_stop_ioeventfd(dev); +} +/**************** Virtio-ccw Bus Device Descriptions *******************/ + +static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp) +{ + VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; + + virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev); + virtio_ccw_device_realize(_dev, errp); +} + +static void virtio_ccw_busdev_unrealize(DeviceState *dev) +{ + VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev; + + virtio_ccw_device_unrealize(_dev); +} + +static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + VirtioCcwDevice *_dev = to_virtio_ccw_dev_fast(dev); + + virtio_ccw_stop_ioeventfd(_dev); +} + +static void virtio_ccw_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + CCWDeviceClass *k = CCW_DEVICE_CLASS(dc); + VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->unplug = virtio_ccw_busdev_unplug; + dc->realize = virtio_ccw_busdev_realize; + dc->unrealize = virtio_ccw_busdev_unrealize; + device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset); +} + +static const TypeInfo virtio_ccw_device_info = { + .name = TYPE_VIRTIO_CCW_DEVICE, + .parent = TYPE_CCW_DEVICE, + .instance_size = sizeof(VirtioCcwDevice), + .class_init = virtio_ccw_device_class_init, + .class_size = sizeof(VirtIOCCWDeviceClass), + .abstract = true, +}; + +/* virtio-ccw-bus */ + +static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, + VirtioCcwDevice *dev) +{ + DeviceState *qdev = DEVICE(dev); + char virtio_bus_name[] = "virtio-bus"; + + qbus_init(bus, bus_size, TYPE_VIRTIO_CCW_BUS, qdev, virtio_bus_name); +} + +static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) +{ + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); + BusClass *bus_class = BUS_CLASS(klass); + + bus_class->max_dev = 1; + k->notify = virtio_ccw_notify; + k->vmstate_change = virtio_ccw_vmstate_change; + k->query_guest_notifiers = virtio_ccw_query_guest_notifiers; + k->set_guest_notifiers = virtio_ccw_set_guest_notifiers; + k->save_queue = virtio_ccw_save_queue; + k->load_queue = virtio_ccw_load_queue; + k->save_config = virtio_ccw_save_config; + k->load_config = virtio_ccw_load_config; + k->pre_plugged = virtio_ccw_pre_plugged; + k->device_plugged = virtio_ccw_device_plugged; + k->device_unplugged = virtio_ccw_device_unplugged; + k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled; + k->ioeventfd_assign = virtio_ccw_ioeventfd_assign; +} + +static const TypeInfo virtio_ccw_bus_info = { + .name = TYPE_VIRTIO_CCW_BUS, + .parent = TYPE_VIRTIO_BUS, + .instance_size = sizeof(VirtioCcwBusState), + .class_size = sizeof(VirtioCcwBusClass), + .class_init = virtio_ccw_bus_class_init, +}; + +static void virtio_ccw_register(void) +{ + type_register_static(&virtio_ccw_bus_info); + type_register_static(&virtio_ccw_device_info); +} + +type_init(virtio_ccw_register) diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h new file mode 100644 index 000000000..0168232e3 --- /dev/null +++ b/hw/s390x/virtio-ccw.h @@ -0,0 +1,242 @@ +/* + * virtio ccw target definitions + * + * Copyright 2012,2015 IBM Corp. + * Author(s): Cornelia Huck + * Pierre Morel + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef HW_S390X_VIRTIO_CCW_H +#define HW_S390X_VIRTIO_CCW_H + +#include "hw/virtio/virtio-blk.h" +#include "hw/virtio/virtio-net.h" +#include "hw/virtio/virtio-serial.h" +#include "hw/virtio/virtio-scsi.h" +#include "qom/object.h" +#ifdef CONFIG_VHOST_SCSI +#include "hw/virtio/vhost-scsi.h" +#endif +#include "hw/virtio/virtio-balloon.h" +#include "hw/virtio/virtio-rng.h" +#include "hw/virtio/virtio-crypto.h" +#include "hw/virtio/virtio-bus.h" +#ifdef CONFIG_VHOST_VSOCK +#include "hw/virtio/vhost-vsock.h" +#endif /* CONFIG_VHOST_VSOCK */ +#include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-input.h" + +#include "hw/s390x/s390_flic.h" +#include "hw/s390x/css.h" +#include "ccw-device.h" +#include "hw/s390x/css-bridge.h" + +#define VIRTIO_CCW_CU_TYPE 0x3832 +#define VIRTIO_CCW_CHPID_TYPE 0x32 + +#define CCW_CMD_SET_VQ 0x13 +#define CCW_CMD_VDEV_RESET 0x33 +#define CCW_CMD_READ_FEAT 0x12 +#define CCW_CMD_WRITE_FEAT 0x11 +#define CCW_CMD_READ_CONF 0x22 +#define CCW_CMD_WRITE_CONF 0x21 +#define CCW_CMD_WRITE_STATUS 0x31 +#define CCW_CMD_SET_IND 0x43 +#define CCW_CMD_SET_CONF_IND 0x53 +#define CCW_CMD_READ_VQ_CONF 0x32 +#define CCW_CMD_READ_STATUS 0x72 +#define CCW_CMD_SET_IND_ADAPTER 0x73 +#define CCW_CMD_SET_VIRTIO_REV 0x83 + +#define TYPE_VIRTIO_CCW_DEVICE "virtio-ccw-device" +OBJECT_DECLARE_TYPE(VirtioCcwDevice, VirtIOCCWDeviceClass, VIRTIO_CCW_DEVICE) + +typedef struct VirtioBusState VirtioCcwBusState; +typedef struct VirtioBusClass VirtioCcwBusClass; + +#define TYPE_VIRTIO_CCW_BUS "virtio-ccw-bus" +DECLARE_OBJ_CHECKERS(VirtioCcwBusState, VirtioCcwBusClass, + VIRTIO_CCW_BUS, TYPE_VIRTIO_CCW_BUS) + +/* + * modules can reference this symbol to avoid being loaded + * into system emulators without ccw support + */ +extern bool have_virtio_ccw; + +struct VirtIOCCWDeviceClass { + CCWDeviceClass parent_class; + void (*realize)(VirtioCcwDevice *dev, Error **errp); + void (*unrealize)(VirtioCcwDevice *dev); + void (*parent_reset)(DeviceState *dev); +}; + +/* Performance improves when virtqueue kick processing is decoupled from the + * vcpu thread using ioeventfd for some devices. */ +#define VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT 1 +#define VIRTIO_CCW_FLAG_USE_IOEVENTFD (1 << VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT) + +struct VirtioCcwDevice { + CcwDevice parent_obj; + int revision; + uint32_t max_rev; + VirtioBusState bus; + uint32_t flags; + uint8_t thinint_isc; + AdapterRoutes routes; + /* Guest provided values: */ + IndAddr *indicators; + IndAddr *indicators2; + IndAddr *summary_indicator; + uint64_t ind_bit; + bool force_revision_1; +}; + +/* The maximum virtio revision we support. */ +#define VIRTIO_CCW_MAX_REV 2 +static inline int virtio_ccw_rev_max(VirtioCcwDevice *dev) +{ + return dev->max_rev; +} + +/* virtio-scsi-ccw */ + +#define TYPE_VIRTIO_SCSI_CCW "virtio-scsi-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOSCSICcw, VIRTIO_SCSI_CCW) + +struct VirtIOSCSICcw { + VirtioCcwDevice parent_obj; + VirtIOSCSI vdev; +}; + +#ifdef CONFIG_VHOST_SCSI +/* vhost-scsi-ccw */ + +#define TYPE_VHOST_SCSI_CCW "vhost-scsi-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VHostSCSICcw, VHOST_SCSI_CCW) + +struct VHostSCSICcw { + VirtioCcwDevice parent_obj; + VHostSCSI vdev; +}; +#endif + +/* virtio-blk-ccw */ + +#define TYPE_VIRTIO_BLK_CCW "virtio-blk-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlkCcw, VIRTIO_BLK_CCW) + +struct VirtIOBlkCcw { + VirtioCcwDevice parent_obj; + VirtIOBlock vdev; +}; + +/* virtio-balloon-ccw */ + +#define TYPE_VIRTIO_BALLOON_CCW "virtio-balloon-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBalloonCcw, VIRTIO_BALLOON_CCW) + +struct VirtIOBalloonCcw { + VirtioCcwDevice parent_obj; + VirtIOBalloon vdev; +}; + +/* virtio-serial-ccw */ + +#define TYPE_VIRTIO_SERIAL_CCW "virtio-serial-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtioSerialCcw, VIRTIO_SERIAL_CCW) + +struct VirtioSerialCcw { + VirtioCcwDevice parent_obj; + VirtIOSerial vdev; +}; + +/* virtio-net-ccw */ + +#define TYPE_VIRTIO_NET_CCW "virtio-net-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIONetCcw, VIRTIO_NET_CCW) + +struct VirtIONetCcw { + VirtioCcwDevice parent_obj; + VirtIONet vdev; +}; + +/* virtio-rng-ccw */ + +#define TYPE_VIRTIO_RNG_CCW "virtio-rng-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIORNGCcw, VIRTIO_RNG_CCW) + +struct VirtIORNGCcw { + VirtioCcwDevice parent_obj; + VirtIORNG vdev; +}; + +/* virtio-crypto-ccw */ + +#define TYPE_VIRTIO_CRYPTO_CCW "virtio-crypto-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOCryptoCcw, VIRTIO_CRYPTO_CCW) + +struct VirtIOCryptoCcw { + VirtioCcwDevice parent_obj; + VirtIOCrypto vdev; +}; + +VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch); + +#ifdef CONFIG_VIRTFS +#include "hw/9pfs/virtio-9p.h" + +#define TYPE_VIRTIO_9P_CCW "virtio-9p-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(V9fsCCWState, VIRTIO_9P_CCW) + +struct V9fsCCWState { + VirtioCcwDevice parent_obj; + V9fsVirtioState vdev; +}; + +#endif /* CONFIG_VIRTFS */ + +#ifdef CONFIG_VHOST_VSOCK +#define TYPE_VHOST_VSOCK_CCW "vhost-vsock-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VHostVSockCCWState, VHOST_VSOCK_CCW) + +struct VHostVSockCCWState { + VirtioCcwDevice parent_obj; + VHostVSock vdev; +}; + +#endif /* CONFIG_VHOST_VSOCK */ + +#define TYPE_VIRTIO_GPU_CCW "virtio-gpu-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUCcw, VIRTIO_GPU_CCW) + +struct VirtIOGPUCcw { + VirtioCcwDevice parent_obj; + VirtIOGPU vdev; +}; + +#define TYPE_VIRTIO_INPUT_CCW "virtio-input-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputCcw, VIRTIO_INPUT_CCW) + +struct VirtIOInputCcw { + VirtioCcwDevice parent_obj; + VirtIOInput vdev; +}; + +#define TYPE_VIRTIO_INPUT_HID_CCW "virtio-input-hid-ccw" +#define TYPE_VIRTIO_KEYBOARD_CCW "virtio-keyboard-ccw" +#define TYPE_VIRTIO_MOUSE_CCW "virtio-mouse-ccw" +#define TYPE_VIRTIO_TABLET_CCW "virtio-tablet-ccw" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHIDCcw, VIRTIO_INPUT_HID_CCW) + +struct VirtIOInputHIDCcw { + VirtioCcwDevice parent_obj; + VirtIOInputHID vdev; +}; + +#endif diff --git a/hw/scsi/Kconfig b/hw/scsi/Kconfig new file mode 100644 index 000000000..77d397c94 --- /dev/null +++ b/hw/scsi/Kconfig @@ -0,0 +1,55 @@ +config SCSI + bool + +config LSI_SCSI_PCI + bool + default y if PCI_DEVICES + depends on PCI + select SCSI + +config MPTSAS_SCSI_PCI + bool + default y if PCI_DEVICES + depends on PCI + select SCSI + +config MEGASAS_SCSI_PCI + bool + default y if PCI_DEVICES + depends on PCI + select SCSI + +config VMW_PVSCSI_SCSI_PCI + bool + default y if PCI_DEVICES + depends on PCI + select SCSI + +config ESP + bool + select SCSI + +config ESP_PCI + bool + default y if PCI_DEVICES + depends on PCI + select ESP + select NMC93XX_EEPROM + +config SPAPR_VSCSI + bool + default y + depends on PSERIES + select SCSI + +config VIRTIO_SCSI + bool + default y + depends on VIRTIO + select SCSI + +config VHOST_USER_SCSI + bool + # Only PCI devices are provided for now + default y if VIRTIO_PCI + depends on VIRTIO && VHOST_USER && LINUX diff --git a/hw/scsi/emulation.c b/hw/scsi/emulation.c new file mode 100644 index 000000000..06d62f3c3 --- /dev/null +++ b/hw/scsi/emulation.c @@ -0,0 +1,42 @@ +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/bswap.h" +#include "hw/scsi/emulation.h" + +int scsi_emulate_block_limits(uint8_t *outbuf, const SCSIBlockLimits *bl) +{ + /* required VPD size with unmap support */ + memset(outbuf, 0, 0x3c); + + outbuf[0] = bl->wsnz; /* wsnz */ + + if (bl->max_io_sectors) { + /* optimal transfer length granularity. This field and the optimal + * transfer length can't be greater than maximum transfer length. + */ + stw_be_p(outbuf + 2, MIN(bl->min_io_size, bl->max_io_sectors)); + + /* maximum transfer length */ + stl_be_p(outbuf + 4, bl->max_io_sectors); + + /* optimal transfer length */ + stl_be_p(outbuf + 8, MIN(bl->opt_io_size, bl->max_io_sectors)); + } else { + stw_be_p(outbuf + 2, bl->min_io_size); + stl_be_p(outbuf + 8, bl->opt_io_size); + } + + /* max unmap LBA count */ + stl_be_p(outbuf + 16, bl->max_unmap_sectors); + + /* max unmap descriptors */ + stl_be_p(outbuf + 20, bl->max_unmap_descr); + + /* optimal unmap granularity; alignment is zero */ + stl_be_p(outbuf + 24, bl->unmap_sectors); + + /* max write same size, make it the same as maximum transfer length */ + stl_be_p(outbuf + 36, bl->max_io_sectors); + + return 0x3c; +} diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c new file mode 100644 index 000000000..dac054aee --- /dev/null +++ b/hw/scsi/esp-pci.c @@ -0,0 +1,564 @@ +/* + * QEMU ESP/NCR53C9x emulation + * + * Copyright (c) 2005-2006 Fabrice Bellard + * Copyright (c) 2012 Herve Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/irq.h" +#include "hw/nvram/eeprom93xx.h" +#include "hw/scsi/esp.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define TYPE_AM53C974_DEVICE "am53c974" + +typedef struct PCIESPState PCIESPState; +DECLARE_INSTANCE_CHECKER(PCIESPState, PCI_ESP, + TYPE_AM53C974_DEVICE) + +#define DMA_CMD 0x0 +#define DMA_STC 0x1 +#define DMA_SPA 0x2 +#define DMA_WBC 0x3 +#define DMA_WAC 0x4 +#define DMA_STAT 0x5 +#define DMA_SMDLA 0x6 +#define DMA_WMAC 0x7 + +#define DMA_CMD_MASK 0x03 +#define DMA_CMD_DIAG 0x04 +#define DMA_CMD_MDL 0x10 +#define DMA_CMD_INTE_P 0x20 +#define DMA_CMD_INTE_D 0x40 +#define DMA_CMD_DIR 0x80 + +#define DMA_STAT_PWDN 0x01 +#define DMA_STAT_ERROR 0x02 +#define DMA_STAT_ABORT 0x04 +#define DMA_STAT_DONE 0x08 +#define DMA_STAT_SCSIINT 0x10 +#define DMA_STAT_BCMBLT 0x20 + +#define SBAC_STATUS (1 << 24) + +struct PCIESPState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + MemoryRegion io; + uint32_t dma_regs[8]; + uint32_t sbac; + ESPState esp; +}; + +static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val) +{ + ESPState *s = ESP(&pci->esp); + + trace_esp_pci_dma_idle(val); + esp_dma_enable(s, 0, 0); +} + +static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val) +{ + trace_esp_pci_dma_blast(val); + qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n"); +} + +static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val) +{ + ESPState *s = ESP(&pci->esp); + + trace_esp_pci_dma_abort(val); + if (s->current_req) { + scsi_req_cancel(s->current_req); + } +} + +static void esp_pci_handle_start(PCIESPState *pci, uint32_t val) +{ + ESPState *s = ESP(&pci->esp); + + trace_esp_pci_dma_start(val); + + pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC]; + pci->dma_regs[DMA_WAC] = pci->dma_regs[DMA_SPA]; + pci->dma_regs[DMA_WMAC] = pci->dma_regs[DMA_SMDLA]; + + pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT + | DMA_STAT_DONE | DMA_STAT_ABORT + | DMA_STAT_ERROR | DMA_STAT_PWDN); + + esp_dma_enable(s, 0, 1); +} + +static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val) +{ + trace_esp_pci_dma_write(saddr, pci->dma_regs[saddr], val); + switch (saddr) { + case DMA_CMD: + pci->dma_regs[saddr] = val; + switch (val & DMA_CMD_MASK) { + case 0x0: /* IDLE */ + esp_pci_handle_idle(pci, val); + break; + case 0x1: /* BLAST */ + esp_pci_handle_blast(pci, val); + break; + case 0x2: /* ABORT */ + esp_pci_handle_abort(pci, val); + break; + case 0x3: /* START */ + esp_pci_handle_start(pci, val); + break; + default: /* can't happen */ + abort(); + } + break; + case DMA_STC: + case DMA_SPA: + case DMA_SMDLA: + pci->dma_regs[saddr] = val; + break; + case DMA_STAT: + if (pci->sbac & SBAC_STATUS) { + /* clear some bits on write */ + uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE; + pci->dma_regs[DMA_STAT] &= ~(val & mask); + } + break; + default: + trace_esp_pci_error_invalid_write_dma(val, saddr); + return; + } +} + +static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr) +{ + ESPState *s = ESP(&pci->esp); + uint32_t val; + + val = pci->dma_regs[saddr]; + if (saddr == DMA_STAT) { + if (s->rregs[ESP_RSTAT] & STAT_INT) { + val |= DMA_STAT_SCSIINT; + } + if (!(pci->sbac & SBAC_STATUS)) { + pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT | + DMA_STAT_DONE); + } + } + + trace_esp_pci_dma_read(saddr, val); + return val; +} + +static void esp_pci_io_write(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + PCIESPState *pci = opaque; + ESPState *s = ESP(&pci->esp); + + if (size < 4 || addr & 3) { + /* need to upgrade request: we only support 4-bytes accesses */ + uint32_t current = 0, mask; + int shift; + + if (addr < 0x40) { + current = s->wregs[addr >> 2]; + } else if (addr < 0x60) { + current = pci->dma_regs[(addr - 0x40) >> 2]; + } else if (addr < 0x74) { + current = pci->sbac; + } + + shift = (4 - size) * 8; + mask = (~(uint32_t)0 << shift) >> shift; + + shift = ((4 - (addr & 3)) & 3) * 8; + val <<= shift; + val |= current & ~(mask << shift); + addr &= ~3; + size = 4; + } + g_assert(size >= 4); + + if (addr < 0x40) { + /* SCSI core reg */ + esp_reg_write(s, addr >> 2, val); + } else if (addr < 0x60) { + /* PCI DMA CCB */ + esp_pci_dma_write(pci, (addr - 0x40) >> 2, val); + } else if (addr == 0x70) { + /* DMA SCSI Bus and control */ + trace_esp_pci_sbac_write(pci->sbac, val); + pci->sbac = val; + } else { + trace_esp_pci_error_invalid_write((int)addr); + } +} + +static uint64_t esp_pci_io_read(void *opaque, hwaddr addr, + unsigned int size) +{ + PCIESPState *pci = opaque; + ESPState *s = ESP(&pci->esp); + uint32_t ret; + + if (addr < 0x40) { + /* SCSI core reg */ + ret = esp_reg_read(s, addr >> 2); + } else if (addr < 0x60) { + /* PCI DMA CCB */ + ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2); + } else if (addr == 0x70) { + /* DMA SCSI Bus and control */ + trace_esp_pci_sbac_read(pci->sbac); + ret = pci->sbac; + } else { + /* Invalid region */ + trace_esp_pci_error_invalid_read((int)addr); + ret = 0; + } + + /* give only requested data */ + ret >>= (addr & 3) * 8; + ret &= ~(~(uint64_t)0 << (8 * size)); + + return ret; +} + +static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len, + DMADirection dir) +{ + dma_addr_t addr; + DMADirection expected_dir; + + if (pci->dma_regs[DMA_CMD] & DMA_CMD_DIR) { + expected_dir = DMA_DIRECTION_FROM_DEVICE; + } else { + expected_dir = DMA_DIRECTION_TO_DEVICE; + } + + if (dir != expected_dir) { + trace_esp_pci_error_invalid_dma_direction(); + return; + } + + if (pci->dma_regs[DMA_STAT] & DMA_CMD_MDL) { + qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n"); + } + + addr = pci->dma_regs[DMA_SPA]; + if (pci->dma_regs[DMA_WBC] < len) { + len = pci->dma_regs[DMA_WBC]; + } + + pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir); + + /* update status registers */ + pci->dma_regs[DMA_WBC] -= len; + pci->dma_regs[DMA_WAC] += len; + if (pci->dma_regs[DMA_WBC] == 0) { + pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE; + } +} + +static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len) +{ + PCIESPState *pci = opaque; + esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_TO_DEVICE); +} + +static void esp_pci_dma_memory_write(void *opaque, uint8_t *buf, int len) +{ + PCIESPState *pci = opaque; + esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_FROM_DEVICE); +} + +static const MemoryRegionOps esp_pci_io_ops = { + .read = esp_pci_io_read, + .write = esp_pci_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void esp_pci_hard_reset(DeviceState *dev) +{ + PCIESPState *pci = PCI_ESP(dev); + ESPState *s = ESP(&pci->esp); + + esp_hard_reset(s); + pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P + | DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK); + pci->dma_regs[DMA_WBC] &= ~0xffff; + pci->dma_regs[DMA_WAC] = 0xffffffff; + pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT + | DMA_STAT_DONE | DMA_STAT_ABORT + | DMA_STAT_ERROR); + pci->dma_regs[DMA_WMAC] = 0xfffffffd; +} + +static const VMStateDescription vmstate_esp_pci_scsi = { + .name = "pciespscsi", + .version_id = 2, + .minimum_version_id = 1, + .pre_save = esp_pre_save, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCIESPState), + VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)), + VMSTATE_UINT8_V(esp.mig_version_id, PCIESPState, 2), + VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState), + VMSTATE_END_OF_LIST() + } +}; + +static void esp_pci_command_complete(SCSIRequest *req, size_t resid) +{ + ESPState *s = req->hba_private; + PCIESPState *pci = container_of(s, PCIESPState, esp); + + esp_command_complete(req, resid); + pci->dma_regs[DMA_WBC] = 0; + pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE; +} + +static const struct SCSIBusInfo esp_pci_scsi_info = { + .tcq = false, + .max_target = ESP_MAX_DEVS, + .max_lun = 7, + + .transfer_data = esp_transfer_data, + .complete = esp_pci_command_complete, + .cancel = esp_request_cancelled, +}; + +static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp) +{ + PCIESPState *pci = PCI_ESP(dev); + DeviceState *d = DEVICE(dev); + ESPState *s = ESP(&pci->esp); + uint8_t *pci_conf; + + if (!qdev_realize(DEVICE(s), NULL, errp)) { + return; + } + + pci_conf = dev->config; + + /* Interrupt pin A */ + pci_conf[PCI_INTERRUPT_PIN] = 0x01; + + s->dma_memory_read = esp_pci_dma_memory_read; + s->dma_memory_write = esp_pci_dma_memory_write; + s->dma_opaque = pci; + s->chip_id = TCHI_AM53C974; + memory_region_init_io(&pci->io, OBJECT(pci), &esp_pci_io_ops, pci, + "esp-io", 0x80); + + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io); + s->irq = pci_allocate_irq(dev); + + scsi_bus_init(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info); +} + +static void esp_pci_scsi_exit(PCIDevice *d) +{ + PCIESPState *pci = PCI_ESP(d); + ESPState *s = ESP(&pci->esp); + + qemu_free_irq(s->irq); +} + +static void esp_pci_init(Object *obj) +{ + PCIESPState *pci = PCI_ESP(obj); + + object_initialize_child(obj, "esp", &pci->esp, TYPE_ESP); +} + +static void esp_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = esp_pci_scsi_realize; + k->exit = esp_pci_scsi_exit; + k->vendor_id = PCI_VENDOR_ID_AMD; + k->device_id = PCI_DEVICE_ID_AMD_SCSI; + k->revision = 0x10; + k->class_id = PCI_CLASS_STORAGE_SCSI; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter"; + dc->reset = esp_pci_hard_reset; + dc->vmsd = &vmstate_esp_pci_scsi; +} + +static const TypeInfo esp_pci_info = { + .name = TYPE_AM53C974_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_init = esp_pci_init, + .instance_size = sizeof(PCIESPState), + .class_init = esp_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +struct DC390State { + PCIESPState pci; + eeprom_t *eeprom; +}; +typedef struct DC390State DC390State; + +#define TYPE_DC390_DEVICE "dc390" +DECLARE_INSTANCE_CHECKER(DC390State, DC390, + TYPE_DC390_DEVICE) + +#define EE_ADAPT_SCSI_ID 64 +#define EE_MODE2 65 +#define EE_DELAY 66 +#define EE_TAG_CMD_NUM 67 +#define EE_ADAPT_OPTIONS 68 +#define EE_BOOT_SCSI_ID 69 +#define EE_BOOT_SCSI_LUN 70 +#define EE_CHKSUM1 126 +#define EE_CHKSUM2 127 + +#define EE_ADAPT_OPTION_F6_F8_AT_BOOT 0x01 +#define EE_ADAPT_OPTION_BOOT_FROM_CDROM 0x02 +#define EE_ADAPT_OPTION_INT13 0x04 +#define EE_ADAPT_OPTION_SCAM_SUPPORT 0x08 + + +static uint32_t dc390_read_config(PCIDevice *dev, uint32_t addr, int l) +{ + DC390State *pci = DC390(dev); + uint32_t val; + + val = pci_default_read_config(dev, addr, l); + + if (addr == 0x00 && l == 1) { + /* First byte of address space is AND-ed with EEPROM DO line */ + if (!eeprom93xx_read(pci->eeprom)) { + val &= ~0xff; + } + } + + return val; +} + +static void dc390_write_config(PCIDevice *dev, + uint32_t addr, uint32_t val, int l) +{ + DC390State *pci = DC390(dev); + if (addr == 0x80) { + /* EEPROM write */ + int eesk = val & 0x80 ? 1 : 0; + int eedi = val & 0x40 ? 1 : 0; + eeprom93xx_write(pci->eeprom, 1, eesk, eedi); + } else if (addr == 0xc0) { + /* EEPROM CS low */ + eeprom93xx_write(pci->eeprom, 0, 0, 0); + } else { + pci_default_write_config(dev, addr, val, l); + } +} + +static void dc390_scsi_realize(PCIDevice *dev, Error **errp) +{ + DC390State *pci = DC390(dev); + Error *err = NULL; + uint8_t *contents; + uint16_t chksum = 0; + int i; + + /* init base class */ + esp_pci_scsi_realize(dev, &err); + if (err) { + error_propagate(errp, err); + return; + } + + /* EEPROM */ + pci->eeprom = eeprom93xx_new(DEVICE(dev), 64); + + /* set default eeprom values */ + contents = (uint8_t *)eeprom93xx_data(pci->eeprom); + + for (i = 0; i < 16; i++) { + contents[i * 2] = 0x57; + contents[i * 2 + 1] = 0x00; + } + contents[EE_ADAPT_SCSI_ID] = 7; + contents[EE_MODE2] = 0x0f; + contents[EE_TAG_CMD_NUM] = 0x04; + contents[EE_ADAPT_OPTIONS] = EE_ADAPT_OPTION_F6_F8_AT_BOOT + | EE_ADAPT_OPTION_BOOT_FROM_CDROM + | EE_ADAPT_OPTION_INT13; + + /* update eeprom checksum */ + for (i = 0; i < EE_CHKSUM1; i += 2) { + chksum += contents[i] + (((uint16_t)contents[i + 1]) << 8); + } + chksum = 0x1234 - chksum; + contents[EE_CHKSUM1] = chksum & 0xff; + contents[EE_CHKSUM2] = chksum >> 8; +} + +static void dc390_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = dc390_scsi_realize; + k->config_read = dc390_read_config; + k->config_write = dc390_write_config; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->desc = "Tekram DC-390 SCSI adapter"; +} + +static const TypeInfo dc390_info = { + .name = TYPE_DC390_DEVICE, + .parent = TYPE_AM53C974_DEVICE, + .instance_size = sizeof(DC390State), + .class_init = dc390_class_init, +}; + +static void esp_pci_register_types(void) +{ + type_register_static(&esp_pci_info); + type_register_static(&dc390_info); +} + +type_init(esp_pci_register_types) diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c new file mode 100644 index 000000000..58d0edbd5 --- /dev/null +++ b/hw/scsi/esp.c @@ -0,0 +1,1440 @@ +/* + * QEMU ESP/NCR53C9x emulation + * + * Copyright (c) 2005-2006 Fabrice Bellard + * Copyright (c) 2012 Herve Poussineau + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/scsi/esp.h" +#include "trace.h" +#include "qemu/log.h" +#include "qemu/module.h" + +/* + * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), + * also produced as NCR89C100. See + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt + * and + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt + * + * On Macintosh Quadra it is a NCR53C96. + */ + +static void esp_raise_irq(ESPState *s) +{ + if (!(s->rregs[ESP_RSTAT] & STAT_INT)) { + s->rregs[ESP_RSTAT] |= STAT_INT; + qemu_irq_raise(s->irq); + trace_esp_raise_irq(); + } +} + +static void esp_lower_irq(ESPState *s) +{ + if (s->rregs[ESP_RSTAT] & STAT_INT) { + s->rregs[ESP_RSTAT] &= ~STAT_INT; + qemu_irq_lower(s->irq); + trace_esp_lower_irq(); + } +} + +static void esp_raise_drq(ESPState *s) +{ + qemu_irq_raise(s->irq_data); + trace_esp_raise_drq(); +} + +static void esp_lower_drq(ESPState *s) +{ + qemu_irq_lower(s->irq_data); + trace_esp_lower_drq(); +} + +void esp_dma_enable(ESPState *s, int irq, int level) +{ + if (level) { + s->dma_enabled = 1; + trace_esp_dma_enable(); + if (s->dma_cb) { + s->dma_cb(s); + s->dma_cb = NULL; + } + } else { + trace_esp_dma_disable(); + s->dma_enabled = 0; + } +} + +void esp_request_cancelled(SCSIRequest *req) +{ + ESPState *s = req->hba_private; + + if (req == s->current_req) { + scsi_req_unref(s->current_req); + s->current_req = NULL; + s->current_dev = NULL; + s->async_len = 0; + } +} + +static void esp_fifo_push(Fifo8 *fifo, uint8_t val) +{ + if (fifo8_num_used(fifo) == fifo->capacity) { + trace_esp_error_fifo_overrun(); + return; + } + + fifo8_push(fifo, val); +} + +static uint8_t esp_fifo_pop(Fifo8 *fifo) +{ + if (fifo8_is_empty(fifo)) { + return 0; + } + + return fifo8_pop(fifo); +} + +static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen) +{ + const uint8_t *buf; + uint32_t n; + + if (maxlen == 0) { + return 0; + } + + buf = fifo8_pop_buf(fifo, maxlen, &n); + if (dest) { + memcpy(dest, buf, n); + } + + return n; +} + +static uint32_t esp_get_tc(ESPState *s) +{ + uint32_t dmalen; + + dmalen = s->rregs[ESP_TCLO]; + dmalen |= s->rregs[ESP_TCMID] << 8; + dmalen |= s->rregs[ESP_TCHI] << 16; + + return dmalen; +} + +static void esp_set_tc(ESPState *s, uint32_t dmalen) +{ + s->rregs[ESP_TCLO] = dmalen; + s->rregs[ESP_TCMID] = dmalen >> 8; + s->rregs[ESP_TCHI] = dmalen >> 16; +} + +static uint32_t esp_get_stc(ESPState *s) +{ + uint32_t dmalen; + + dmalen = s->wregs[ESP_TCLO]; + dmalen |= s->wregs[ESP_TCMID] << 8; + dmalen |= s->wregs[ESP_TCHI] << 16; + + return dmalen; +} + +static uint8_t esp_pdma_read(ESPState *s) +{ + uint8_t val; + + if (s->do_cmd) { + val = esp_fifo_pop(&s->cmdfifo); + } else { + val = esp_fifo_pop(&s->fifo); + } + + return val; +} + +static void esp_pdma_write(ESPState *s, uint8_t val) +{ + uint32_t dmalen = esp_get_tc(s); + + if (dmalen == 0) { + return; + } + + if (s->do_cmd) { + esp_fifo_push(&s->cmdfifo, val); + } else { + esp_fifo_push(&s->fifo, val); + } + + dmalen--; + esp_set_tc(s, dmalen); +} + +static int esp_select(ESPState *s) +{ + int target; + + target = s->wregs[ESP_WBUSID] & BUSID_DID; + + s->ti_size = 0; + fifo8_reset(&s->fifo); + + s->current_dev = scsi_device_find(&s->bus, 0, target, 0); + if (!s->current_dev) { + /* No such drive */ + s->rregs[ESP_RSTAT] = 0; + s->rregs[ESP_RINTR] = INTR_DC; + s->rregs[ESP_RSEQ] = SEQ_0; + esp_raise_irq(s); + return -1; + } + + /* + * Note that we deliberately don't raise the IRQ here: this will be done + * either in do_command_phase() for DATA OUT transfers or by the deferred + * IRQ mechanism in esp_transfer_data() for DATA IN transfers + */ + s->rregs[ESP_RINTR] |= INTR_FC; + s->rregs[ESP_RSEQ] = SEQ_CD; + return 0; +} + +static uint32_t get_cmd(ESPState *s, uint32_t maxlen) +{ + uint8_t buf[ESP_CMDFIFO_SZ]; + uint32_t dmalen, n; + int target; + + if (s->current_req) { + /* Started a new command before the old one finished. Cancel it. */ + scsi_req_cancel(s->current_req); + } + + target = s->wregs[ESP_WBUSID] & BUSID_DID; + if (s->dma) { + dmalen = MIN(esp_get_tc(s), maxlen); + if (dmalen == 0) { + return 0; + } + if (s->dma_memory_read) { + s->dma_memory_read(s->dma_opaque, buf, dmalen); + dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen); + fifo8_push_all(&s->cmdfifo, buf, dmalen); + } else { + if (esp_select(s) < 0) { + fifo8_reset(&s->cmdfifo); + return -1; + } + esp_raise_drq(s); + fifo8_reset(&s->cmdfifo); + return 0; + } + } else { + dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); + if (dmalen == 0) { + return 0; + } + n = esp_fifo_pop_buf(&s->fifo, buf, dmalen); + n = MIN(fifo8_num_free(&s->cmdfifo), n); + fifo8_push_all(&s->cmdfifo, buf, n); + } + trace_esp_get_cmd(dmalen, target); + + if (esp_select(s) < 0) { + fifo8_reset(&s->cmdfifo); + return -1; + } + return dmalen; +} + +static void do_command_phase(ESPState *s) +{ + uint32_t cmdlen; + int32_t datalen; + SCSIDevice *current_lun; + uint8_t buf[ESP_CMDFIFO_SZ]; + + trace_esp_do_command_phase(s->lun); + cmdlen = fifo8_num_used(&s->cmdfifo); + if (!cmdlen || !s->current_dev) { + return; + } + esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen); + + current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun); + s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, s); + datalen = scsi_req_enqueue(s->current_req); + s->ti_size = datalen; + fifo8_reset(&s->cmdfifo); + if (datalen != 0) { + s->rregs[ESP_RSTAT] = STAT_TC; + s->rregs[ESP_RSEQ] = SEQ_CD; + s->ti_cmd = 0; + esp_set_tc(s, 0); + if (datalen > 0) { + /* + * Switch to DATA IN phase but wait until initial data xfer is + * complete before raising the command completion interrupt + */ + s->data_in_ready = false; + s->rregs[ESP_RSTAT] |= STAT_DI; + } else { + s->rregs[ESP_RSTAT] |= STAT_DO; + s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; + esp_raise_irq(s); + esp_lower_drq(s); + } + scsi_req_continue(s->current_req); + return; + } +} + +static void do_message_phase(ESPState *s) +{ + if (s->cmdfifo_cdb_offset) { + uint8_t message = esp_fifo_pop(&s->cmdfifo); + + trace_esp_do_identify(message); + s->lun = message & 7; + s->cmdfifo_cdb_offset--; + } + + /* Ignore extended messages for now */ + if (s->cmdfifo_cdb_offset) { + int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo)); + esp_fifo_pop_buf(&s->cmdfifo, NULL, len); + s->cmdfifo_cdb_offset = 0; + } +} + +static void do_cmd(ESPState *s) +{ + do_message_phase(s); + assert(s->cmdfifo_cdb_offset == 0); + do_command_phase(s); +} + +static void satn_pdma_cb(ESPState *s) +{ + if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { + s->cmdfifo_cdb_offset = 1; + s->do_cmd = 0; + do_cmd(s); + } +} + +static void handle_satn(ESPState *s) +{ + int32_t cmdlen; + + if (s->dma && !s->dma_enabled) { + s->dma_cb = handle_satn; + return; + } + s->pdma_cb = satn_pdma_cb; + cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); + if (cmdlen > 0) { + s->cmdfifo_cdb_offset = 1; + s->do_cmd = 0; + do_cmd(s); + } else if (cmdlen == 0) { + s->do_cmd = 1; + /* Target present, but no cmd yet - switch to command phase */ + s->rregs[ESP_RSEQ] = SEQ_CD; + s->rregs[ESP_RSTAT] = STAT_CD; + } +} + +static void s_without_satn_pdma_cb(ESPState *s) +{ + if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { + s->cmdfifo_cdb_offset = 0; + s->do_cmd = 0; + do_cmd(s); + } +} + +static void handle_s_without_atn(ESPState *s) +{ + int32_t cmdlen; + + if (s->dma && !s->dma_enabled) { + s->dma_cb = handle_s_without_atn; + return; + } + s->pdma_cb = s_without_satn_pdma_cb; + cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); + if (cmdlen > 0) { + s->cmdfifo_cdb_offset = 0; + s->do_cmd = 0; + do_cmd(s); + } else if (cmdlen == 0) { + s->do_cmd = 1; + /* Target present, but no cmd yet - switch to command phase */ + s->rregs[ESP_RSEQ] = SEQ_CD; + s->rregs[ESP_RSTAT] = STAT_CD; + } +} + +static void satn_stop_pdma_cb(ESPState *s) +{ + if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { + trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); + s->do_cmd = 1; + s->cmdfifo_cdb_offset = 1; + s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; + s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; + s->rregs[ESP_RSEQ] = SEQ_CD; + esp_raise_irq(s); + } +} + +static void handle_satn_stop(ESPState *s) +{ + int32_t cmdlen; + + if (s->dma && !s->dma_enabled) { + s->dma_cb = handle_satn_stop; + return; + } + s->pdma_cb = satn_stop_pdma_cb; + cmdlen = get_cmd(s, 1); + if (cmdlen > 0) { + trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); + s->do_cmd = 1; + s->cmdfifo_cdb_offset = 1; + s->rregs[ESP_RSTAT] = STAT_MO; + s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; + s->rregs[ESP_RSEQ] = SEQ_MO; + esp_raise_irq(s); + } else if (cmdlen == 0) { + s->do_cmd = 1; + /* Target present, switch to message out phase */ + s->rregs[ESP_RSEQ] = SEQ_MO; + s->rregs[ESP_RSTAT] = STAT_MO; + } +} + +static void write_response_pdma_cb(ESPState *s) +{ + s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; + s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; + s->rregs[ESP_RSEQ] = SEQ_CD; + esp_raise_irq(s); +} + +static void write_response(ESPState *s) +{ + uint8_t buf[2]; + + trace_esp_write_response(s->status); + + buf[0] = s->status; + buf[1] = 0; + + if (s->dma) { + if (s->dma_memory_write) { + s->dma_memory_write(s->dma_opaque, buf, 2); + s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; + s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; + s->rregs[ESP_RSEQ] = SEQ_CD; + } else { + s->pdma_cb = write_response_pdma_cb; + esp_raise_drq(s); + return; + } + } else { + fifo8_reset(&s->fifo); + fifo8_push_all(&s->fifo, buf, 2); + s->rregs[ESP_RFLAGS] = 2; + } + esp_raise_irq(s); +} + +static void esp_dma_done(ESPState *s) +{ + s->rregs[ESP_RSTAT] |= STAT_TC; + s->rregs[ESP_RINTR] |= INTR_BS; + s->rregs[ESP_RFLAGS] = 0; + esp_set_tc(s, 0); + esp_raise_irq(s); +} + +static void do_dma_pdma_cb(ESPState *s) +{ + int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); + int len; + uint32_t n; + + if (s->do_cmd) { + /* Ensure we have received complete command after SATN and stop */ + if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) { + return; + } + + s->ti_size = 0; + if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { + /* No command received */ + if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { + return; + } + + /* Command has been received */ + s->do_cmd = 0; + do_cmd(s); + } else { + /* + * Extra message out bytes received: update cmdfifo_cdb_offset + * and then switch to commmand phase + */ + s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); + s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; + s->rregs[ESP_RSEQ] = SEQ_CD; + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); + } + return; + } + + if (!s->current_req) { + return; + } + + if (to_device) { + /* Copy FIFO data to device */ + len = MIN(s->async_len, ESP_FIFO_SZ); + len = MIN(len, fifo8_num_used(&s->fifo)); + n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); + s->async_buf += n; + s->async_len -= n; + s->ti_size += n; + + if (n < len) { + /* Unaligned accesses can cause FIFO wraparound */ + len = len - n; + n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); + s->async_buf += n; + s->async_len -= n; + s->ti_size += n; + } + + if (s->async_len == 0) { + scsi_req_continue(s->current_req); + return; + } + + if (esp_get_tc(s) == 0) { + esp_lower_drq(s); + esp_dma_done(s); + } + + return; + } else { + if (s->async_len == 0) { + /* Defer until the scsi layer has completed */ + scsi_req_continue(s->current_req); + s->data_in_ready = false; + return; + } + + if (esp_get_tc(s) != 0) { + /* Copy device data to FIFO */ + len = MIN(s->async_len, esp_get_tc(s)); + len = MIN(len, fifo8_num_free(&s->fifo)); + fifo8_push_all(&s->fifo, s->async_buf, len); + s->async_buf += len; + s->async_len -= len; + s->ti_size -= len; + esp_set_tc(s, esp_get_tc(s) - len); + + if (esp_get_tc(s) == 0) { + /* Indicate transfer to FIFO is complete */ + s->rregs[ESP_RSTAT] |= STAT_TC; + } + return; + } + + /* Partially filled a scsi buffer. Complete immediately. */ + esp_lower_drq(s); + esp_dma_done(s); + } +} + +static void esp_do_dma(ESPState *s) +{ + uint32_t len, cmdlen; + int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); + uint8_t buf[ESP_CMDFIFO_SZ]; + + len = esp_get_tc(s); + if (s->do_cmd) { + /* + * handle_ti_cmd() case: esp_do_dma() is called only from + * handle_ti_cmd() with do_cmd != NULL (see the assert()) + */ + cmdlen = fifo8_num_used(&s->cmdfifo); + trace_esp_do_dma(cmdlen, len); + if (s->dma_memory_read) { + len = MIN(len, fifo8_num_free(&s->cmdfifo)); + s->dma_memory_read(s->dma_opaque, buf, len); + fifo8_push_all(&s->cmdfifo, buf, len); + } else { + s->pdma_cb = do_dma_pdma_cb; + esp_raise_drq(s); + return; + } + trace_esp_handle_ti_cmd(cmdlen); + s->ti_size = 0; + if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { + /* No command received */ + if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { + return; + } + + /* Command has been received */ + s->do_cmd = 0; + do_cmd(s); + } else { + /* + * Extra message out bytes received: update cmdfifo_cdb_offset + * and then switch to commmand phase + */ + s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); + s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; + s->rregs[ESP_RSEQ] = SEQ_CD; + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); + } + return; + } + if (!s->current_req) { + return; + } + if (s->async_len == 0) { + /* Defer until data is available. */ + return; + } + if (len > s->async_len) { + len = s->async_len; + } + if (to_device) { + if (s->dma_memory_read) { + s->dma_memory_read(s->dma_opaque, s->async_buf, len); + } else { + s->pdma_cb = do_dma_pdma_cb; + esp_raise_drq(s); + return; + } + } else { + if (s->dma_memory_write) { + s->dma_memory_write(s->dma_opaque, s->async_buf, len); + } else { + /* Adjust TC for any leftover data in the FIFO */ + if (!fifo8_is_empty(&s->fifo)) { + esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo)); + } + + /* Copy device data to FIFO */ + len = MIN(len, fifo8_num_free(&s->fifo)); + fifo8_push_all(&s->fifo, s->async_buf, len); + s->async_buf += len; + s->async_len -= len; + s->ti_size -= len; + + /* + * MacOS toolbox uses a TI length of 16 bytes for all commands, so + * commands shorter than this must be padded accordingly + */ + if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) { + while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) { + esp_fifo_push(&s->fifo, 0); + len++; + } + } + + esp_set_tc(s, esp_get_tc(s) - len); + s->pdma_cb = do_dma_pdma_cb; + esp_raise_drq(s); + + /* Indicate transfer to FIFO is complete */ + s->rregs[ESP_RSTAT] |= STAT_TC; + return; + } + } + esp_set_tc(s, esp_get_tc(s) - len); + s->async_buf += len; + s->async_len -= len; + if (to_device) { + s->ti_size += len; + } else { + s->ti_size -= len; + } + if (s->async_len == 0) { + scsi_req_continue(s->current_req); + /* + * If there is still data to be read from the device then + * complete the DMA operation immediately. Otherwise defer + * until the scsi layer has completed. + */ + if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) { + return; + } + } + + /* Partially filled a scsi buffer. Complete immediately. */ + esp_dma_done(s); + esp_lower_drq(s); +} + +static void esp_do_nodma(ESPState *s) +{ + int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); + uint32_t cmdlen; + int len; + + if (s->do_cmd) { + cmdlen = fifo8_num_used(&s->cmdfifo); + trace_esp_handle_ti_cmd(cmdlen); + s->ti_size = 0; + if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { + /* No command received */ + if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { + return; + } + + /* Command has been received */ + s->do_cmd = 0; + do_cmd(s); + } else { + /* + * Extra message out bytes received: update cmdfifo_cdb_offset + * and then switch to commmand phase + */ + s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); + s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; + s->rregs[ESP_RSEQ] = SEQ_CD; + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); + } + return; + } + + if (!s->current_req) { + return; + } + + if (s->async_len == 0) { + /* Defer until data is available. */ + return; + } + + if (to_device) { + len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ); + esp_fifo_pop_buf(&s->fifo, s->async_buf, len); + s->async_buf += len; + s->async_len -= len; + s->ti_size += len; + } else { + if (fifo8_is_empty(&s->fifo)) { + fifo8_push(&s->fifo, s->async_buf[0]); + s->async_buf++; + s->async_len--; + s->ti_size--; + } + } + + if (s->async_len == 0) { + scsi_req_continue(s->current_req); + return; + } + + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); +} + +void esp_command_complete(SCSIRequest *req, size_t resid) +{ + ESPState *s = req->hba_private; + int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); + + trace_esp_command_complete(); + + /* + * Non-DMA transfers from the target will leave the last byte in + * the FIFO so don't reset ti_size in this case + */ + if (s->dma || to_device) { + if (s->ti_size != 0) { + trace_esp_command_complete_unexpected(); + } + s->ti_size = 0; + } + + s->async_len = 0; + if (req->status) { + trace_esp_command_complete_fail(); + } + s->status = req->status; + + /* + * If the transfer is finished, switch to status phase. For non-DMA + * transfers from the target the last byte is still in the FIFO + */ + if (s->ti_size == 0) { + s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; + esp_dma_done(s); + esp_lower_drq(s); + } + + if (s->current_req) { + scsi_req_unref(s->current_req); + s->current_req = NULL; + s->current_dev = NULL; + } +} + +void esp_transfer_data(SCSIRequest *req, uint32_t len) +{ + ESPState *s = req->hba_private; + int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); + uint32_t dmalen = esp_get_tc(s); + + assert(!s->do_cmd); + trace_esp_transfer_data(dmalen, s->ti_size); + s->async_len = len; + s->async_buf = scsi_req_get_buf(req); + + if (!to_device && !s->data_in_ready) { + /* + * Initial incoming data xfer is complete so raise command + * completion interrupt + */ + s->data_in_ready = true; + s->rregs[ESP_RSTAT] |= STAT_TC; + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); + } + + if (s->ti_cmd == 0) { + /* + * Always perform the initial transfer upon reception of the next TI + * command to ensure the DMA/non-DMA status of the command is correct. + * It is not possible to use s->dma directly in the section below as + * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the + * async data transfer is delayed then s->dma is set incorrectly. + */ + return; + } + + if (s->ti_cmd == (CMD_TI | CMD_DMA)) { + if (dmalen) { + esp_do_dma(s); + } else if (s->ti_size <= 0) { + /* + * If this was the last part of a DMA transfer then the + * completion interrupt is deferred to here. + */ + esp_dma_done(s); + esp_lower_drq(s); + } + } else if (s->ti_cmd == CMD_TI) { + esp_do_nodma(s); + } +} + +static void handle_ti(ESPState *s) +{ + uint32_t dmalen; + + if (s->dma && !s->dma_enabled) { + s->dma_cb = handle_ti; + return; + } + + s->ti_cmd = s->rregs[ESP_CMD]; + if (s->dma) { + dmalen = esp_get_tc(s); + trace_esp_handle_ti(dmalen); + s->rregs[ESP_RSTAT] &= ~STAT_TC; + esp_do_dma(s); + } else { + trace_esp_handle_ti(s->ti_size); + esp_do_nodma(s); + } +} + +void esp_hard_reset(ESPState *s) +{ + memset(s->rregs, 0, ESP_REGS); + memset(s->wregs, 0, ESP_REGS); + s->tchi_written = 0; + s->ti_size = 0; + s->async_len = 0; + fifo8_reset(&s->fifo); + fifo8_reset(&s->cmdfifo); + s->dma = 0; + s->do_cmd = 0; + s->dma_cb = NULL; + + s->rregs[ESP_CFG1] = 7; +} + +static void esp_soft_reset(ESPState *s) +{ + qemu_irq_lower(s->irq); + qemu_irq_lower(s->irq_data); + esp_hard_reset(s); +} + +static void parent_esp_reset(ESPState *s, int irq, int level) +{ + if (level) { + esp_soft_reset(s); + } +} + +uint64_t esp_reg_read(ESPState *s, uint32_t saddr) +{ + uint32_t val; + + switch (saddr) { + case ESP_FIFO: + if (s->dma_memory_read && s->dma_memory_write && + (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { + /* Data out. */ + qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); + s->rregs[ESP_FIFO] = 0; + } else { + if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) { + if (s->ti_size) { + esp_do_nodma(s); + } else { + /* + * The last byte of a non-DMA transfer has been read out + * of the FIFO so switch to status phase + */ + s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; + } + } + s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo); + } + val = s->rregs[ESP_FIFO]; + break; + case ESP_RINTR: + /* + * Clear sequence step, interrupt register and all status bits + * except TC + */ + val = s->rregs[ESP_RINTR]; + s->rregs[ESP_RINTR] = 0; + s->rregs[ESP_RSTAT] &= ~STAT_TC; + /* + * According to the datasheet ESP_RSEQ should be cleared, but as the + * emulation currently defers information transfers to the next TI + * command leave it for now so that pedantic guests such as the old + * Linux 2.6 driver see the correct flags before the next SCSI phase + * transition. + * + * s->rregs[ESP_RSEQ] = SEQ_0; + */ + esp_lower_irq(s); + break; + case ESP_TCHI: + /* Return the unique id if the value has never been written */ + if (!s->tchi_written) { + val = s->chip_id; + } else { + val = s->rregs[saddr]; + } + break; + case ESP_RFLAGS: + /* Bottom 5 bits indicate number of bytes in FIFO */ + val = fifo8_num_used(&s->fifo); + break; + default: + val = s->rregs[saddr]; + break; + } + + trace_esp_mem_readb(saddr, val); + return val; +} + +void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) +{ + trace_esp_mem_writeb(saddr, s->wregs[saddr], val); + switch (saddr) { + case ESP_TCHI: + s->tchi_written = true; + /* fall through */ + case ESP_TCLO: + case ESP_TCMID: + s->rregs[ESP_RSTAT] &= ~STAT_TC; + break; + case ESP_FIFO: + if (s->do_cmd) { + esp_fifo_push(&s->cmdfifo, val); + + /* + * If any unexpected message out/command phase data is + * transferred using non-DMA, raise the interrupt + */ + if (s->rregs[ESP_CMD] == CMD_TI) { + s->rregs[ESP_RINTR] |= INTR_BS; + esp_raise_irq(s); + } + } else { + esp_fifo_push(&s->fifo, val); + } + break; + case ESP_CMD: + s->rregs[saddr] = val; + if (val & CMD_DMA) { + s->dma = 1; + /* Reload DMA counter. */ + if (esp_get_stc(s) == 0) { + esp_set_tc(s, 0x10000); + } else { + esp_set_tc(s, esp_get_stc(s)); + } + } else { + s->dma = 0; + } + switch (val & CMD_CMD) { + case CMD_NOP: + trace_esp_mem_writeb_cmd_nop(val); + break; + case CMD_FLUSH: + trace_esp_mem_writeb_cmd_flush(val); + fifo8_reset(&s->fifo); + break; + case CMD_RESET: + trace_esp_mem_writeb_cmd_reset(val); + esp_soft_reset(s); + break; + case CMD_BUSRESET: + trace_esp_mem_writeb_cmd_bus_reset(val); + if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { + s->rregs[ESP_RINTR] |= INTR_RST; + esp_raise_irq(s); + } + break; + case CMD_TI: + trace_esp_mem_writeb_cmd_ti(val); + handle_ti(s); + break; + case CMD_ICCS: + trace_esp_mem_writeb_cmd_iccs(val); + write_response(s); + s->rregs[ESP_RINTR] |= INTR_FC; + s->rregs[ESP_RSTAT] |= STAT_MI; + break; + case CMD_MSGACC: + trace_esp_mem_writeb_cmd_msgacc(val); + s->rregs[ESP_RINTR] |= INTR_DC; + s->rregs[ESP_RSEQ] = 0; + s->rregs[ESP_RFLAGS] = 0; + esp_raise_irq(s); + break; + case CMD_PAD: + trace_esp_mem_writeb_cmd_pad(val); + s->rregs[ESP_RSTAT] = STAT_TC; + s->rregs[ESP_RINTR] |= INTR_FC; + s->rregs[ESP_RSEQ] = 0; + break; + case CMD_SATN: + trace_esp_mem_writeb_cmd_satn(val); + break; + case CMD_RSTATN: + trace_esp_mem_writeb_cmd_rstatn(val); + break; + case CMD_SEL: + trace_esp_mem_writeb_cmd_sel(val); + handle_s_without_atn(s); + break; + case CMD_SELATN: + trace_esp_mem_writeb_cmd_selatn(val); + handle_satn(s); + break; + case CMD_SELATNS: + trace_esp_mem_writeb_cmd_selatns(val); + handle_satn_stop(s); + break; + case CMD_ENSEL: + trace_esp_mem_writeb_cmd_ensel(val); + s->rregs[ESP_RINTR] = 0; + break; + case CMD_DISSEL: + trace_esp_mem_writeb_cmd_dissel(val); + s->rregs[ESP_RINTR] = 0; + esp_raise_irq(s); + break; + default: + trace_esp_error_unhandled_command(val); + break; + } + break; + case ESP_WBUSID ... ESP_WSYNO: + break; + case ESP_CFG1: + case ESP_CFG2: case ESP_CFG3: + case ESP_RES3: case ESP_RES4: + s->rregs[saddr] = val; + break; + case ESP_WCCF ... ESP_WTEST: + break; + default: + trace_esp_error_invalid_write(val, saddr); + return; + } + s->wregs[saddr] = val; +} + +static bool esp_mem_accepts(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + return (size == 1) || (is_write && size == 4); +} + +static bool esp_is_before_version_5(void *opaque, int version_id) +{ + ESPState *s = ESP(opaque); + + version_id = MIN(version_id, s->mig_version_id); + return version_id < 5; +} + +static bool esp_is_version_5(void *opaque, int version_id) +{ + ESPState *s = ESP(opaque); + + version_id = MIN(version_id, s->mig_version_id); + return version_id >= 5; +} + +static bool esp_is_version_6(void *opaque, int version_id) +{ + ESPState *s = ESP(opaque); + + version_id = MIN(version_id, s->mig_version_id); + return version_id >= 6; +} + +int esp_pre_save(void *opaque) +{ + ESPState *s = ESP(object_resolve_path_component( + OBJECT(opaque), "esp")); + + s->mig_version_id = vmstate_esp.version_id; + return 0; +} + +static int esp_post_load(void *opaque, int version_id) +{ + ESPState *s = ESP(opaque); + int len, i; + + version_id = MIN(version_id, s->mig_version_id); + + if (version_id < 5) { + esp_set_tc(s, s->mig_dma_left); + + /* Migrate ti_buf to fifo */ + len = s->mig_ti_wptr - s->mig_ti_rptr; + for (i = 0; i < len; i++) { + fifo8_push(&s->fifo, s->mig_ti_buf[i]); + } + + /* Migrate cmdbuf to cmdfifo */ + for (i = 0; i < s->mig_cmdlen; i++) { + fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]); + } + } + + s->mig_version_id = vmstate_esp.version_id; + return 0; +} + +const VMStateDescription vmstate_esp = { + .name = "esp", + .version_id = 6, + .minimum_version_id = 3, + .post_load = esp_post_load, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(rregs, ESPState), + VMSTATE_BUFFER(wregs, ESPState), + VMSTATE_INT32(ti_size, ESPState), + VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5), + VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5), + VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5), + VMSTATE_UINT32(status, ESPState), + VMSTATE_UINT32_TEST(mig_deferred_status, ESPState, + esp_is_before_version_5), + VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState, + esp_is_before_version_5), + VMSTATE_UINT32(dma, ESPState), + VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0, + esp_is_before_version_5, 0, 16), + VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4, + esp_is_before_version_5, 16, + sizeof(typeof_field(ESPState, mig_cmdbuf))), + VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5), + VMSTATE_UINT32(do_cmd, ESPState), + VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5), + VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5), + VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5), + VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5), + VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5), + VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5), + VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), + VMSTATE_END_OF_LIST() + }, +}; + +static void sysbus_esp_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + SysBusESPState *sysbus = opaque; + ESPState *s = ESP(&sysbus->esp); + uint32_t saddr; + + saddr = addr >> sysbus->it_shift; + esp_reg_write(s, saddr, val); +} + +static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr, + unsigned int size) +{ + SysBusESPState *sysbus = opaque; + ESPState *s = ESP(&sysbus->esp); + uint32_t saddr; + + saddr = addr >> sysbus->it_shift; + return esp_reg_read(s, saddr); +} + +static const MemoryRegionOps sysbus_esp_mem_ops = { + .read = sysbus_esp_mem_read, + .write = sysbus_esp_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.accepts = esp_mem_accepts, +}; + +static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, + uint64_t val, unsigned int size) +{ + SysBusESPState *sysbus = opaque; + ESPState *s = ESP(&sysbus->esp); + + trace_esp_pdma_write(size); + + switch (size) { + case 1: + esp_pdma_write(s, val); + break; + case 2: + esp_pdma_write(s, val >> 8); + esp_pdma_write(s, val); + break; + } + s->pdma_cb(s); +} + +static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, + unsigned int size) +{ + SysBusESPState *sysbus = opaque; + ESPState *s = ESP(&sysbus->esp); + uint64_t val = 0; + + trace_esp_pdma_read(size); + + switch (size) { + case 1: + val = esp_pdma_read(s); + break; + case 2: + val = esp_pdma_read(s); + val = (val << 8) | esp_pdma_read(s); + break; + } + if (fifo8_num_used(&s->fifo) < 2) { + s->pdma_cb(s); + } + return val; +} + +static const MemoryRegionOps sysbus_esp_pdma_ops = { + .read = sysbus_esp_pdma_read, + .write = sysbus_esp_pdma_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 2, +}; + +static const struct SCSIBusInfo esp_scsi_info = { + .tcq = false, + .max_target = ESP_MAX_DEVS, + .max_lun = 7, + + .transfer_data = esp_transfer_data, + .complete = esp_command_complete, + .cancel = esp_request_cancelled +}; + +static void sysbus_esp_gpio_demux(void *opaque, int irq, int level) +{ + SysBusESPState *sysbus = SYSBUS_ESP(opaque); + ESPState *s = ESP(&sysbus->esp); + + switch (irq) { + case 0: + parent_esp_reset(s, irq, level); + break; + case 1: + esp_dma_enable(opaque, irq, level); + break; + } +} + +static void sysbus_esp_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + SysBusESPState *sysbus = SYSBUS_ESP(dev); + ESPState *s = ESP(&sysbus->esp); + + if (!qdev_realize(DEVICE(s), NULL, errp)) { + return; + } + + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->irq_data); + assert(sysbus->it_shift != -1); + + s->chip_id = TCHI_FAS100A; + memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops, + sysbus, "esp-regs", ESP_REGS << sysbus->it_shift); + sysbus_init_mmio(sbd, &sysbus->iomem); + memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops, + sysbus, "esp-pdma", 4); + sysbus_init_mmio(sbd, &sysbus->pdma); + + qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); + + scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info); +} + +static void sysbus_esp_hard_reset(DeviceState *dev) +{ + SysBusESPState *sysbus = SYSBUS_ESP(dev); + ESPState *s = ESP(&sysbus->esp); + + esp_hard_reset(s); +} + +static void sysbus_esp_init(Object *obj) +{ + SysBusESPState *sysbus = SYSBUS_ESP(obj); + + object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP); +} + +static const VMStateDescription vmstate_sysbus_esp_scsi = { + .name = "sysbusespscsi", + .version_id = 2, + .minimum_version_id = 1, + .pre_save = esp_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), + VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), + VMSTATE_END_OF_LIST() + } +}; + +static void sysbus_esp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sysbus_esp_realize; + dc->reset = sysbus_esp_hard_reset; + dc->vmsd = &vmstate_sysbus_esp_scsi; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo sysbus_esp_info = { + .name = TYPE_SYSBUS_ESP, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = sysbus_esp_init, + .instance_size = sizeof(SysBusESPState), + .class_init = sysbus_esp_class_init, +}; + +static void esp_finalize(Object *obj) +{ + ESPState *s = ESP(obj); + + fifo8_destroy(&s->fifo); + fifo8_destroy(&s->cmdfifo); +} + +static void esp_init(Object *obj) +{ + ESPState *s = ESP(obj); + + fifo8_create(&s->fifo, ESP_FIFO_SZ); + fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ); +} + +static void esp_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + /* internal device for sysbusesp/pciespscsi, not user-creatable */ + dc->user_creatable = false; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo esp_info = { + .name = TYPE_ESP, + .parent = TYPE_DEVICE, + .instance_init = esp_init, + .instance_finalize = esp_finalize, + .instance_size = sizeof(ESPState), + .class_init = esp_class_init, +}; + +static void esp_register_types(void) +{ + type_register_static(&sysbus_esp_info); + type_register_static(&esp_info); +} + +type_init(esp_register_types) diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c new file mode 100644 index 000000000..85e907a78 --- /dev/null +++ b/hw/scsi/lsi53c895a.c @@ -0,0 +1,2375 @@ +/* + * QEMU LSI53C895A SCSI Host Bus Adapter emulation + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the LGPL. + */ + +/* Note: + * LSI53C810 emulation is incorrect, in the sense that it supports + * features added in later evolutions. This should not be a problem, + * as well-behaved operating systems will not try to use them. + */ + +#include "qemu/osdep.h" + +#include "hw/irq.h" +#include "hw/pci/pci.h" +#include "hw/scsi/scsi.h" +#include "migration/vmstate.h" +#include "sysemu/dma.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" + +static const char *names[] = { + "SCNTL0", "SCNTL1", "SCNTL2", "SCNTL3", "SCID", "SXFER", "SDID", "GPREG", + "SFBR", "SOCL", "SSID", "SBCL", "DSTAT", "SSTAT0", "SSTAT1", "SSTAT2", + "DSA0", "DSA1", "DSA2", "DSA3", "ISTAT", "0x15", "0x16", "0x17", + "CTEST0", "CTEST1", "CTEST2", "CTEST3", "TEMP0", "TEMP1", "TEMP2", "TEMP3", + "DFIFO", "CTEST4", "CTEST5", "CTEST6", "DBC0", "DBC1", "DBC2", "DCMD", + "DNAD0", "DNAD1", "DNAD2", "DNAD3", "DSP0", "DSP1", "DSP2", "DSP3", + "DSPS0", "DSPS1", "DSPS2", "DSPS3", "SCRATCHA0", "SCRATCHA1", "SCRATCHA2", "SCRATCHA3", + "DMODE", "DIEN", "SBR", "DCNTL", "ADDER0", "ADDER1", "ADDER2", "ADDER3", + "SIEN0", "SIEN1", "SIST0", "SIST1", "SLPAR", "0x45", "MACNTL", "GPCNTL", + "STIME0", "STIME1", "RESPID", "0x4b", "STEST0", "STEST1", "STEST2", "STEST3", + "SIDL", "0x51", "0x52", "0x53", "SODL", "0x55", "0x56", "0x57", + "SBDL", "0x59", "0x5a", "0x5b", "SCRATCHB0", "SCRATCHB1", "SCRATCHB2", "SCRATCHB3", +}; + +#define LSI_MAX_DEVS 7 + +#define LSI_SCNTL0_TRG 0x01 +#define LSI_SCNTL0_AAP 0x02 +#define LSI_SCNTL0_EPC 0x08 +#define LSI_SCNTL0_WATN 0x10 +#define LSI_SCNTL0_START 0x20 + +#define LSI_SCNTL1_SST 0x01 +#define LSI_SCNTL1_IARB 0x02 +#define LSI_SCNTL1_AESP 0x04 +#define LSI_SCNTL1_RST 0x08 +#define LSI_SCNTL1_CON 0x10 +#define LSI_SCNTL1_DHP 0x20 +#define LSI_SCNTL1_ADB 0x40 +#define LSI_SCNTL1_EXC 0x80 + +#define LSI_SCNTL2_WSR 0x01 +#define LSI_SCNTL2_VUE0 0x02 +#define LSI_SCNTL2_VUE1 0x04 +#define LSI_SCNTL2_WSS 0x08 +#define LSI_SCNTL2_SLPHBEN 0x10 +#define LSI_SCNTL2_SLPMD 0x20 +#define LSI_SCNTL2_CHM 0x40 +#define LSI_SCNTL2_SDU 0x80 + +#define LSI_ISTAT0_DIP 0x01 +#define LSI_ISTAT0_SIP 0x02 +#define LSI_ISTAT0_INTF 0x04 +#define LSI_ISTAT0_CON 0x08 +#define LSI_ISTAT0_SEM 0x10 +#define LSI_ISTAT0_SIGP 0x20 +#define LSI_ISTAT0_SRST 0x40 +#define LSI_ISTAT0_ABRT 0x80 + +#define LSI_ISTAT1_SI 0x01 +#define LSI_ISTAT1_SRUN 0x02 +#define LSI_ISTAT1_FLSH 0x04 + +#define LSI_SSTAT0_SDP0 0x01 +#define LSI_SSTAT0_RST 0x02 +#define LSI_SSTAT0_WOA 0x04 +#define LSI_SSTAT0_LOA 0x08 +#define LSI_SSTAT0_AIP 0x10 +#define LSI_SSTAT0_OLF 0x20 +#define LSI_SSTAT0_ORF 0x40 +#define LSI_SSTAT0_ILF 0x80 + +#define LSI_SIST0_PAR 0x01 +#define LSI_SIST0_RST 0x02 +#define LSI_SIST0_UDC 0x04 +#define LSI_SIST0_SGE 0x08 +#define LSI_SIST0_RSL 0x10 +#define LSI_SIST0_SEL 0x20 +#define LSI_SIST0_CMP 0x40 +#define LSI_SIST0_MA 0x80 + +#define LSI_SIST1_HTH 0x01 +#define LSI_SIST1_GEN 0x02 +#define LSI_SIST1_STO 0x04 +#define LSI_SIST1_SBMC 0x10 + +#define LSI_SOCL_IO 0x01 +#define LSI_SOCL_CD 0x02 +#define LSI_SOCL_MSG 0x04 +#define LSI_SOCL_ATN 0x08 +#define LSI_SOCL_SEL 0x10 +#define LSI_SOCL_BSY 0x20 +#define LSI_SOCL_ACK 0x40 +#define LSI_SOCL_REQ 0x80 + +#define LSI_DSTAT_IID 0x01 +#define LSI_DSTAT_SIR 0x04 +#define LSI_DSTAT_SSI 0x08 +#define LSI_DSTAT_ABRT 0x10 +#define LSI_DSTAT_BF 0x20 +#define LSI_DSTAT_MDPE 0x40 +#define LSI_DSTAT_DFE 0x80 + +#define LSI_DCNTL_COM 0x01 +#define LSI_DCNTL_IRQD 0x02 +#define LSI_DCNTL_STD 0x04 +#define LSI_DCNTL_IRQM 0x08 +#define LSI_DCNTL_SSM 0x10 +#define LSI_DCNTL_PFEN 0x20 +#define LSI_DCNTL_PFF 0x40 +#define LSI_DCNTL_CLSE 0x80 + +#define LSI_DMODE_MAN 0x01 +#define LSI_DMODE_BOF 0x02 +#define LSI_DMODE_ERMP 0x04 +#define LSI_DMODE_ERL 0x08 +#define LSI_DMODE_DIOM 0x10 +#define LSI_DMODE_SIOM 0x20 + +#define LSI_CTEST2_DACK 0x01 +#define LSI_CTEST2_DREQ 0x02 +#define LSI_CTEST2_TEOP 0x04 +#define LSI_CTEST2_PCICIE 0x08 +#define LSI_CTEST2_CM 0x10 +#define LSI_CTEST2_CIO 0x20 +#define LSI_CTEST2_SIGP 0x40 +#define LSI_CTEST2_DDIR 0x80 + +#define LSI_CTEST5_BL2 0x04 +#define LSI_CTEST5_DDIR 0x08 +#define LSI_CTEST5_MASR 0x10 +#define LSI_CTEST5_DFSN 0x20 +#define LSI_CTEST5_BBCK 0x40 +#define LSI_CTEST5_ADCK 0x80 + +#define LSI_CCNTL0_DILS 0x01 +#define LSI_CCNTL0_DISFC 0x10 +#define LSI_CCNTL0_ENNDJ 0x20 +#define LSI_CCNTL0_PMJCTL 0x40 +#define LSI_CCNTL0_ENPMJ 0x80 + +#define LSI_CCNTL1_EN64DBMV 0x01 +#define LSI_CCNTL1_EN64TIBMV 0x02 +#define LSI_CCNTL1_64TIMOD 0x04 +#define LSI_CCNTL1_DDAC 0x08 +#define LSI_CCNTL1_ZMOD 0x80 + +#define LSI_SBCL_ATN 0x08 +#define LSI_SBCL_BSY 0x20 +#define LSI_SBCL_ACK 0x40 +#define LSI_SBCL_REQ 0x80 + +/* Enable Response to Reselection */ +#define LSI_SCID_RRE 0x60 + +#define LSI_CCNTL1_40BIT (LSI_CCNTL1_EN64TIBMV|LSI_CCNTL1_64TIMOD) + +#define PHASE_DO 0 +#define PHASE_DI 1 +#define PHASE_CMD 2 +#define PHASE_ST 3 +#define PHASE_MO 6 +#define PHASE_MI 7 +#define PHASE_MASK 7 + +/* Maximum length of MSG IN data. */ +#define LSI_MAX_MSGIN_LEN 8 + +/* Flag set if this is a tagged command. */ +#define LSI_TAG_VALID (1 << 16) + +/* Maximum instructions to process. */ +#define LSI_MAX_INSN 10000 + +typedef struct lsi_request { + SCSIRequest *req; + uint32_t tag; + uint32_t dma_len; + uint8_t *dma_buf; + uint32_t pending; + int out; + QTAILQ_ENTRY(lsi_request) next; +} lsi_request; + +enum { + LSI_NOWAIT, /* SCRIPTS are running or stopped */ + LSI_WAIT_RESELECT, /* Wait Reselect instruction has been issued */ + LSI_DMA_SCRIPTS, /* processing DMA from lsi_execute_script */ + LSI_DMA_IN_PROGRESS, /* DMA operation is in progress */ +}; + +enum { + LSI_MSG_ACTION_COMMAND = 0, + LSI_MSG_ACTION_DISCONNECT = 1, + LSI_MSG_ACTION_DOUT = 2, + LSI_MSG_ACTION_DIN = 3, +}; + +struct LSIState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + qemu_irq ext_irq; + MemoryRegion mmio_io; + MemoryRegion ram_io; + MemoryRegion io_io; + AddressSpace pci_io_as; + + int carry; /* ??? Should this be an a visible register somewhere? */ + int status; + int msg_action; + int msg_len; + uint8_t msg[LSI_MAX_MSGIN_LEN]; + int waiting; + SCSIBus bus; + int current_lun; + /* The tag is a combination of the device ID and the SCSI tag. */ + uint32_t select_tag; + int command_complete; + QTAILQ_HEAD(, lsi_request) queue; + lsi_request *current; + + uint32_t dsa; + uint32_t temp; + uint32_t dnad; + uint32_t dbc; + uint8_t istat0; + uint8_t istat1; + uint8_t dcmd; + uint8_t dstat; + uint8_t dien; + uint8_t sist0; + uint8_t sist1; + uint8_t sien0; + uint8_t sien1; + uint8_t mbox0; + uint8_t mbox1; + uint8_t dfifo; + uint8_t ctest2; + uint8_t ctest3; + uint8_t ctest4; + uint8_t ctest5; + uint8_t ccntl0; + uint8_t ccntl1; + uint32_t dsp; + uint32_t dsps; + uint8_t dmode; + uint8_t dcntl; + uint8_t scntl0; + uint8_t scntl1; + uint8_t scntl2; + uint8_t scntl3; + uint8_t sstat0; + uint8_t sstat1; + uint8_t scid; + uint8_t sxfer; + uint8_t socl; + uint8_t sdid; + uint8_t ssid; + uint8_t sfbr; + uint8_t sbcl; + uint8_t stest1; + uint8_t stest2; + uint8_t stest3; + uint8_t sidl; + uint8_t stime0; + uint8_t respid0; + uint8_t respid1; + uint32_t mmrs; + uint32_t mmws; + uint32_t sfs; + uint32_t drs; + uint32_t sbms; + uint32_t dbms; + uint32_t dnad64; + uint32_t pmjad1; + uint32_t pmjad2; + uint32_t rbc; + uint32_t ua; + uint32_t ia; + uint32_t sbc; + uint32_t csbc; + uint32_t scratch[18]; /* SCRATCHA-SCRATCHR */ + uint8_t sbr; + uint32_t adder; + + uint8_t script_ram[2048 * sizeof(uint32_t)]; +}; + +#define TYPE_LSI53C810 "lsi53c810" +#define TYPE_LSI53C895A "lsi53c895a" + +OBJECT_DECLARE_SIMPLE_TYPE(LSIState, LSI53C895A) + +static const char *scsi_phases[] = { + "DOUT", + "DIN", + "CMD", + "STATUS", + "RSVOUT", + "RSVIN", + "MSGOUT", + "MSGIN" +}; + +static const char *scsi_phase_name(int phase) +{ + return scsi_phases[phase & PHASE_MASK]; +} + +static inline int lsi_irq_on_rsl(LSIState *s) +{ + return (s->sien0 & LSI_SIST0_RSL) && (s->scid & LSI_SCID_RRE); +} + +static lsi_request *get_pending_req(LSIState *s) +{ + lsi_request *p; + + QTAILQ_FOREACH(p, &s->queue, next) { + if (p->pending) { + return p; + } + } + return NULL; +} + +static void lsi_soft_reset(LSIState *s) +{ + trace_lsi_reset(); + s->carry = 0; + + s->msg_action = LSI_MSG_ACTION_COMMAND; + s->msg_len = 0; + s->waiting = LSI_NOWAIT; + s->dsa = 0; + s->dnad = 0; + s->dbc = 0; + s->temp = 0; + memset(s->scratch, 0, sizeof(s->scratch)); + s->istat0 = 0; + s->istat1 = 0; + s->dcmd = 0x40; + s->dstat = 0; + s->dien = 0; + s->sist0 = 0; + s->sist1 = 0; + s->sien0 = 0; + s->sien1 = 0; + s->mbox0 = 0; + s->mbox1 = 0; + s->dfifo = 0; + s->ctest2 = LSI_CTEST2_DACK; + s->ctest3 = 0; + s->ctest4 = 0; + s->ctest5 = 0; + s->ccntl0 = 0; + s->ccntl1 = 0; + s->dsp = 0; + s->dsps = 0; + s->dmode = 0; + s->dcntl = 0; + s->scntl0 = 0xc0; + s->scntl1 = 0; + s->scntl2 = 0; + s->scntl3 = 0; + s->sstat0 = 0; + s->sstat1 = 0; + s->scid = 7; + s->sxfer = 0; + s->socl = 0; + s->sdid = 0; + s->ssid = 0; + s->sbcl = 0; + s->stest1 = 0; + s->stest2 = 0; + s->stest3 = 0; + s->sidl = 0; + s->stime0 = 0; + s->respid0 = 0x80; + s->respid1 = 0; + s->mmrs = 0; + s->mmws = 0; + s->sfs = 0; + s->drs = 0; + s->sbms = 0; + s->dbms = 0; + s->dnad64 = 0; + s->pmjad1 = 0; + s->pmjad2 = 0; + s->rbc = 0; + s->ua = 0; + s->ia = 0; + s->sbc = 0; + s->csbc = 0; + s->sbr = 0; + assert(QTAILQ_EMPTY(&s->queue)); + assert(!s->current); +} + +static int lsi_dma_40bit(LSIState *s) +{ + if ((s->ccntl1 & LSI_CCNTL1_40BIT) == LSI_CCNTL1_40BIT) + return 1; + return 0; +} + +static int lsi_dma_ti64bit(LSIState *s) +{ + if ((s->ccntl1 & LSI_CCNTL1_EN64TIBMV) == LSI_CCNTL1_EN64TIBMV) + return 1; + return 0; +} + +static int lsi_dma_64bit(LSIState *s) +{ + if ((s->ccntl1 & LSI_CCNTL1_EN64DBMV) == LSI_CCNTL1_EN64DBMV) + return 1; + return 0; +} + +static uint8_t lsi_reg_readb(LSIState *s, int offset); +static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val); +static void lsi_execute_script(LSIState *s); +static void lsi_reselect(LSIState *s, lsi_request *p); + +static inline void lsi_mem_read(LSIState *s, dma_addr_t addr, + void *buf, dma_addr_t len) +{ + if (s->dmode & LSI_DMODE_SIOM) { + address_space_read(&s->pci_io_as, addr, MEMTXATTRS_UNSPECIFIED, + buf, len); + } else { + pci_dma_read(PCI_DEVICE(s), addr, buf, len); + } +} + +static inline void lsi_mem_write(LSIState *s, dma_addr_t addr, + const void *buf, dma_addr_t len) +{ + if (s->dmode & LSI_DMODE_DIOM) { + address_space_write(&s->pci_io_as, addr, MEMTXATTRS_UNSPECIFIED, + buf, len); + } else { + pci_dma_write(PCI_DEVICE(s), addr, buf, len); + } +} + +static inline uint32_t read_dword(LSIState *s, uint32_t addr) +{ + uint32_t buf; + + pci_dma_read(PCI_DEVICE(s), addr, &buf, 4); + return cpu_to_le32(buf); +} + +static void lsi_stop_script(LSIState *s) +{ + s->istat1 &= ~LSI_ISTAT1_SRUN; +} + +static void lsi_set_irq(LSIState *s, int level) +{ + PCIDevice *d = PCI_DEVICE(s); + + if (s->ext_irq) { + qemu_set_irq(s->ext_irq, level); + } else { + pci_set_irq(d, level); + } +} + +static void lsi_update_irq(LSIState *s) +{ + int level; + static int last_level; + + /* It's unclear whether the DIP/SIP bits should be cleared when the + Interrupt Status Registers are cleared or when istat0 is read. + We currently do the formwer, which seems to work. */ + level = 0; + if (s->dstat) { + if (s->dstat & s->dien) + level = 1; + s->istat0 |= LSI_ISTAT0_DIP; + } else { + s->istat0 &= ~LSI_ISTAT0_DIP; + } + + if (s->sist0 || s->sist1) { + if ((s->sist0 & s->sien0) || (s->sist1 & s->sien1)) + level = 1; + s->istat0 |= LSI_ISTAT0_SIP; + } else { + s->istat0 &= ~LSI_ISTAT0_SIP; + } + if (s->istat0 & LSI_ISTAT0_INTF) + level = 1; + + if (level != last_level) { + trace_lsi_update_irq(level, s->dstat, s->sist1, s->sist0); + last_level = level; + } + lsi_set_irq(s, level); + + if (!s->current && !level && lsi_irq_on_rsl(s) && !(s->scntl1 & LSI_SCNTL1_CON)) { + lsi_request *p; + + trace_lsi_update_irq_disconnected(); + p = get_pending_req(s); + if (p) { + lsi_reselect(s, p); + } + } +} + +/* Stop SCRIPTS execution and raise a SCSI interrupt. */ +static void lsi_script_scsi_interrupt(LSIState *s, int stat0, int stat1) +{ + uint32_t mask0; + uint32_t mask1; + + trace_lsi_script_scsi_interrupt(stat1, stat0, s->sist1, s->sist0); + s->sist0 |= stat0; + s->sist1 |= stat1; + /* Stop processor on fatal or unmasked interrupt. As a special hack + we don't stop processing when raising STO. Instead continue + execution and stop at the next insn that accesses the SCSI bus. */ + mask0 = s->sien0 | ~(LSI_SIST0_CMP | LSI_SIST0_SEL | LSI_SIST0_RSL); + mask1 = s->sien1 | ~(LSI_SIST1_GEN | LSI_SIST1_HTH); + mask1 &= ~LSI_SIST1_STO; + if (s->sist0 & mask0 || s->sist1 & mask1) { + lsi_stop_script(s); + } + lsi_update_irq(s); +} + +/* Stop SCRIPTS execution and raise a DMA interrupt. */ +static void lsi_script_dma_interrupt(LSIState *s, int stat) +{ + trace_lsi_script_dma_interrupt(stat, s->dstat); + s->dstat |= stat; + lsi_update_irq(s); + lsi_stop_script(s); +} + +static inline void lsi_set_phase(LSIState *s, int phase) +{ + s->sbcl &= ~PHASE_MASK; + s->sbcl |= phase | LSI_SBCL_REQ; + s->sstat1 = (s->sstat1 & ~PHASE_MASK) | phase; +} + +static void lsi_bad_phase(LSIState *s, int out, int new_phase) +{ + /* Trigger a phase mismatch. */ + if (s->ccntl0 & LSI_CCNTL0_ENPMJ) { + if ((s->ccntl0 & LSI_CCNTL0_PMJCTL)) { + s->dsp = out ? s->pmjad1 : s->pmjad2; + } else { + s->dsp = (s->scntl2 & LSI_SCNTL2_WSR ? s->pmjad2 : s->pmjad1); + } + trace_lsi_bad_phase_jump(s->dsp); + } else { + trace_lsi_bad_phase_interrupt(); + lsi_script_scsi_interrupt(s, LSI_SIST0_MA, 0); + lsi_stop_script(s); + } + lsi_set_phase(s, new_phase); +} + + +/* Resume SCRIPTS execution after a DMA operation. */ +static void lsi_resume_script(LSIState *s) +{ + if (s->waiting != 2) { + s->waiting = LSI_NOWAIT; + lsi_execute_script(s); + } else { + s->waiting = LSI_NOWAIT; + } +} + +static void lsi_disconnect(LSIState *s) +{ + s->scntl1 &= ~LSI_SCNTL1_CON; + s->sstat1 &= ~PHASE_MASK; + s->sbcl = 0; +} + +static void lsi_bad_selection(LSIState *s, uint32_t id) +{ + trace_lsi_bad_selection(id); + lsi_script_scsi_interrupt(s, 0, LSI_SIST1_STO); + lsi_disconnect(s); +} + +/* Initiate a SCSI layer data transfer. */ +static void lsi_do_dma(LSIState *s, int out) +{ + uint32_t count; + dma_addr_t addr; + SCSIDevice *dev; + + assert(s->current); + if (!s->current->dma_len) { + /* Wait until data is available. */ + trace_lsi_do_dma_unavailable(); + return; + } + + dev = s->current->req->dev; + assert(dev); + + count = s->dbc; + if (count > s->current->dma_len) + count = s->current->dma_len; + + addr = s->dnad; + /* both 40 and Table Indirect 64-bit DMAs store upper bits in dnad64 */ + if (lsi_dma_40bit(s) || lsi_dma_ti64bit(s)) + addr |= ((uint64_t)s->dnad64 << 32); + else if (s->dbms) + addr |= ((uint64_t)s->dbms << 32); + else if (s->sbms) + addr |= ((uint64_t)s->sbms << 32); + + trace_lsi_do_dma(addr, count); + s->csbc += count; + s->dnad += count; + s->dbc -= count; + if (s->current->dma_buf == NULL) { + s->current->dma_buf = scsi_req_get_buf(s->current->req); + } + /* ??? Set SFBR to first data byte. */ + if (out) { + lsi_mem_read(s, addr, s->current->dma_buf, count); + } else { + lsi_mem_write(s, addr, s->current->dma_buf, count); + } + s->current->dma_len -= count; + if (s->current->dma_len == 0) { + s->current->dma_buf = NULL; + scsi_req_continue(s->current->req); + } else { + s->current->dma_buf += count; + lsi_resume_script(s); + } +} + + +/* Add a command to the queue. */ +static void lsi_queue_command(LSIState *s) +{ + lsi_request *p = s->current; + + trace_lsi_queue_command(p->tag); + assert(s->current != NULL); + assert(s->current->dma_len == 0); + QTAILQ_INSERT_TAIL(&s->queue, s->current, next); + s->current = NULL; + + p->pending = 0; + p->out = (s->sstat1 & PHASE_MASK) == PHASE_DO; +} + +/* Queue a byte for a MSG IN phase. */ +static void lsi_add_msg_byte(LSIState *s, uint8_t data) +{ + if (s->msg_len >= LSI_MAX_MSGIN_LEN) { + trace_lsi_add_msg_byte_error(); + } else { + trace_lsi_add_msg_byte(data); + s->msg[s->msg_len++] = data; + } +} + +/* Perform reselection to continue a command. */ +static void lsi_reselect(LSIState *s, lsi_request *p) +{ + int id; + + assert(s->current == NULL); + QTAILQ_REMOVE(&s->queue, p, next); + s->current = p; + + id = (p->tag >> 8) & 0xf; + s->ssid = id | 0x80; + /* LSI53C700 Family Compatibility, see LSI53C895A 4-73 */ + if (!(s->dcntl & LSI_DCNTL_COM)) { + s->sfbr = 1 << (id & 0x7); + } + trace_lsi_reselect(id); + s->scntl1 |= LSI_SCNTL1_CON; + lsi_set_phase(s, PHASE_MI); + s->msg_action = p->out ? LSI_MSG_ACTION_DOUT : LSI_MSG_ACTION_DIN; + s->current->dma_len = p->pending; + lsi_add_msg_byte(s, 0x80); + if (s->current->tag & LSI_TAG_VALID) { + lsi_add_msg_byte(s, 0x20); + lsi_add_msg_byte(s, p->tag & 0xff); + } + + if (lsi_irq_on_rsl(s)) { + lsi_script_scsi_interrupt(s, LSI_SIST0_RSL, 0); + } +} + +static lsi_request *lsi_find_by_tag(LSIState *s, uint32_t tag) +{ + lsi_request *p; + + QTAILQ_FOREACH(p, &s->queue, next) { + if (p->tag == tag) { + return p; + } + } + + return NULL; +} + +static void lsi_request_free(LSIState *s, lsi_request *p) +{ + if (p == s->current) { + s->current = NULL; + } else { + QTAILQ_REMOVE(&s->queue, p, next); + } + g_free(p); +} + +static void lsi_request_cancelled(SCSIRequest *req) +{ + LSIState *s = LSI53C895A(req->bus->qbus.parent); + lsi_request *p = req->hba_private; + + req->hba_private = NULL; + lsi_request_free(s, p); + scsi_req_unref(req); +} + +/* Record that data is available for a queued command. Returns zero if + the device was reselected, nonzero if the IO is deferred. */ +static int lsi_queue_req(LSIState *s, SCSIRequest *req, uint32_t len) +{ + lsi_request *p = req->hba_private; + + if (p->pending) { + trace_lsi_queue_req_error(p); + } + p->pending = len; + /* Reselect if waiting for it, or if reselection triggers an IRQ + and the bus is free. + Since no interrupt stacking is implemented in the emulation, it + is also required that there are no pending interrupts waiting + for service from the device driver. */ + if (s->waiting == LSI_WAIT_RESELECT || + (lsi_irq_on_rsl(s) && !(s->scntl1 & LSI_SCNTL1_CON) && + !(s->istat0 & (LSI_ISTAT0_SIP | LSI_ISTAT0_DIP)))) { + /* Reselect device. */ + lsi_reselect(s, p); + return 0; + } else { + trace_lsi_queue_req(p->tag); + p->pending = len; + return 1; + } +} + + /* Callback to indicate that the SCSI layer has completed a command. */ +static void lsi_command_complete(SCSIRequest *req, size_t resid) +{ + LSIState *s = LSI53C895A(req->bus->qbus.parent); + int out; + + out = (s->sstat1 & PHASE_MASK) == PHASE_DO; + trace_lsi_command_complete(req->status); + s->status = req->status; + s->command_complete = 2; + if (s->waiting && s->dbc != 0) { + /* Raise phase mismatch for short transfers. */ + lsi_bad_phase(s, out, PHASE_ST); + } else { + lsi_set_phase(s, PHASE_ST); + } + + if (req->hba_private == s->current) { + req->hba_private = NULL; + lsi_request_free(s, s->current); + scsi_req_unref(req); + } + lsi_resume_script(s); +} + + /* Callback to indicate that the SCSI layer has completed a transfer. */ +static void lsi_transfer_data(SCSIRequest *req, uint32_t len) +{ + LSIState *s = LSI53C895A(req->bus->qbus.parent); + int out; + + assert(req->hba_private); + if (s->waiting == LSI_WAIT_RESELECT || req->hba_private != s->current || + (lsi_irq_on_rsl(s) && !(s->scntl1 & LSI_SCNTL1_CON))) { + if (lsi_queue_req(s, req, len)) { + return; + } + } + + out = (s->sstat1 & PHASE_MASK) == PHASE_DO; + + /* host adapter (re)connected */ + trace_lsi_transfer_data(req->tag, len); + s->current->dma_len = len; + s->command_complete = 1; + if (s->waiting) { + if (s->waiting == LSI_WAIT_RESELECT || s->dbc == 0) { + lsi_resume_script(s); + } else { + lsi_do_dma(s, out); + } + } +} + +static void lsi_do_command(LSIState *s) +{ + SCSIDevice *dev; + uint8_t buf[16]; + uint32_t id; + int n; + + trace_lsi_do_command(s->dbc); + if (s->dbc > 16) + s->dbc = 16; + pci_dma_read(PCI_DEVICE(s), s->dnad, buf, s->dbc); + s->sfbr = buf[0]; + s->command_complete = 0; + + id = (s->select_tag >> 8) & 0xf; + dev = scsi_device_find(&s->bus, 0, id, s->current_lun); + if (!dev) { + lsi_bad_selection(s, id); + return; + } + + assert(s->current == NULL); + s->current = g_new0(lsi_request, 1); + s->current->tag = s->select_tag; + s->current->req = scsi_req_new(dev, s->current->tag, s->current_lun, buf, + s->current); + + n = scsi_req_enqueue(s->current->req); + if (n) { + if (n > 0) { + lsi_set_phase(s, PHASE_DI); + } else if (n < 0) { + lsi_set_phase(s, PHASE_DO); + } + scsi_req_continue(s->current->req); + } + if (!s->command_complete) { + if (n) { + /* Command did not complete immediately so disconnect. */ + lsi_add_msg_byte(s, 2); /* SAVE DATA POINTER */ + lsi_add_msg_byte(s, 4); /* DISCONNECT */ + /* wait data */ + lsi_set_phase(s, PHASE_MI); + s->msg_action = LSI_MSG_ACTION_DISCONNECT; + lsi_queue_command(s); + } else { + /* wait command complete */ + lsi_set_phase(s, PHASE_DI); + } + } +} + +static void lsi_do_status(LSIState *s) +{ + uint8_t status; + trace_lsi_do_status(s->dbc, s->status); + if (s->dbc != 1) { + trace_lsi_do_status_error(); + } + s->dbc = 1; + status = s->status; + s->sfbr = status; + pci_dma_write(PCI_DEVICE(s), s->dnad, &status, 1); + lsi_set_phase(s, PHASE_MI); + s->msg_action = LSI_MSG_ACTION_DISCONNECT; + lsi_add_msg_byte(s, 0); /* COMMAND COMPLETE */ +} + +static void lsi_do_msgin(LSIState *s) +{ + uint8_t len; + trace_lsi_do_msgin(s->dbc, s->msg_len); + s->sfbr = s->msg[0]; + len = s->msg_len; + assert(len > 0 && len <= LSI_MAX_MSGIN_LEN); + if (len > s->dbc) + len = s->dbc; + pci_dma_write(PCI_DEVICE(s), s->dnad, s->msg, len); + /* Linux drivers rely on the last byte being in the SIDL. */ + s->sidl = s->msg[len - 1]; + s->msg_len -= len; + if (s->msg_len) { + memmove(s->msg, s->msg + len, s->msg_len); + } else { + /* ??? Check if ATN (not yet implemented) is asserted and maybe + switch to PHASE_MO. */ + switch (s->msg_action) { + case LSI_MSG_ACTION_COMMAND: + lsi_set_phase(s, PHASE_CMD); + break; + case LSI_MSG_ACTION_DISCONNECT: + lsi_disconnect(s); + break; + case LSI_MSG_ACTION_DOUT: + lsi_set_phase(s, PHASE_DO); + break; + case LSI_MSG_ACTION_DIN: + lsi_set_phase(s, PHASE_DI); + break; + default: + abort(); + } + } +} + +/* Read the next byte during a MSGOUT phase. */ +static uint8_t lsi_get_msgbyte(LSIState *s) +{ + uint8_t data; + pci_dma_read(PCI_DEVICE(s), s->dnad, &data, 1); + s->dnad++; + s->dbc--; + return data; +} + +/* Skip the next n bytes during a MSGOUT phase. */ +static void lsi_skip_msgbytes(LSIState *s, unsigned int n) +{ + s->dnad += n; + s->dbc -= n; +} + +static void lsi_do_msgout(LSIState *s) +{ + uint8_t msg; + int len; + uint32_t current_tag; + lsi_request *current_req, *p, *p_next; + + if (s->current) { + current_tag = s->current->tag; + current_req = s->current; + } else { + current_tag = s->select_tag; + current_req = lsi_find_by_tag(s, current_tag); + } + + trace_lsi_do_msgout(s->dbc); + while (s->dbc) { + msg = lsi_get_msgbyte(s); + s->sfbr = msg; + + switch (msg) { + case 0x04: + trace_lsi_do_msgout_disconnect(); + lsi_disconnect(s); + break; + case 0x08: + trace_lsi_do_msgout_noop(); + lsi_set_phase(s, PHASE_CMD); + break; + case 0x01: + len = lsi_get_msgbyte(s); + msg = lsi_get_msgbyte(s); + (void)len; /* avoid a warning about unused variable*/ + trace_lsi_do_msgout_extended(msg, len); + switch (msg) { + case 1: + trace_lsi_do_msgout_ignored("SDTR"); + lsi_skip_msgbytes(s, 2); + break; + case 3: + trace_lsi_do_msgout_ignored("WDTR"); + lsi_skip_msgbytes(s, 1); + break; + case 4: + trace_lsi_do_msgout_ignored("PPR"); + lsi_skip_msgbytes(s, 5); + break; + default: + goto bad; + } + break; + case 0x20: /* SIMPLE queue */ + s->select_tag |= lsi_get_msgbyte(s) | LSI_TAG_VALID; + trace_lsi_do_msgout_simplequeue(s->select_tag & 0xff); + break; + case 0x21: /* HEAD of queue */ + qemu_log_mask(LOG_UNIMP, "lsi_scsi: HEAD queue not implemented\n"); + s->select_tag |= lsi_get_msgbyte(s) | LSI_TAG_VALID; + break; + case 0x22: /* ORDERED queue */ + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: ORDERED queue not implemented\n"); + s->select_tag |= lsi_get_msgbyte(s) | LSI_TAG_VALID; + break; + case 0x0d: + /* The ABORT TAG message clears the current I/O process only. */ + trace_lsi_do_msgout_abort(current_tag); + if (current_req) { + scsi_req_cancel(current_req->req); + } + lsi_disconnect(s); + break; + case 0x06: + case 0x0e: + case 0x0c: + /* The ABORT message clears all I/O processes for the selecting + initiator on the specified logical unit of the target. */ + if (msg == 0x06) { + trace_lsi_do_msgout_abort(current_tag); + } + /* The CLEAR QUEUE message clears all I/O processes for all + initiators on the specified logical unit of the target. */ + if (msg == 0x0e) { + trace_lsi_do_msgout_clearqueue(current_tag); + } + /* The BUS DEVICE RESET message clears all I/O processes for all + initiators on all logical units of the target. */ + if (msg == 0x0c) { + trace_lsi_do_msgout_busdevicereset(current_tag); + } + + /* clear the current I/O process */ + if (s->current) { + scsi_req_cancel(s->current->req); + } + + /* As the current implemented devices scsi_disk and scsi_generic + only support one LUN, we don't need to keep track of LUNs. + Clearing I/O processes for other initiators could be possible + for scsi_generic by sending a SG_SCSI_RESET to the /dev/sgX + device, but this is currently not implemented (and seems not + to be really necessary). So let's simply clear all queued + commands for the current device: */ + QTAILQ_FOREACH_SAFE(p, &s->queue, next, p_next) { + if ((p->tag & 0x0000ff00) == (current_tag & 0x0000ff00)) { + scsi_req_cancel(p->req); + } + } + + lsi_disconnect(s); + break; + default: + if ((msg & 0x80) == 0) { + goto bad; + } + s->current_lun = msg & 7; + trace_lsi_do_msgout_select(s->current_lun); + lsi_set_phase(s, PHASE_CMD); + break; + } + } + return; +bad: + qemu_log_mask(LOG_UNIMP, "Unimplemented message 0x%02x\n", msg); + lsi_set_phase(s, PHASE_MI); + lsi_add_msg_byte(s, 7); /* MESSAGE REJECT */ + s->msg_action = LSI_MSG_ACTION_COMMAND; +} + +#define LSI_BUF_SIZE 4096 +static void lsi_memcpy(LSIState *s, uint32_t dest, uint32_t src, int count) +{ + int n; + uint8_t buf[LSI_BUF_SIZE]; + + trace_lsi_memcpy(dest, src, count); + while (count) { + n = (count > LSI_BUF_SIZE) ? LSI_BUF_SIZE : count; + lsi_mem_read(s, src, buf, n); + lsi_mem_write(s, dest, buf, n); + src += n; + dest += n; + count -= n; + } +} + +static void lsi_wait_reselect(LSIState *s) +{ + lsi_request *p; + + trace_lsi_wait_reselect(); + + if (s->current) { + return; + } + p = get_pending_req(s); + if (p) { + lsi_reselect(s, p); + } + if (s->current == NULL) { + s->waiting = LSI_WAIT_RESELECT; + } +} + +static void lsi_execute_script(LSIState *s) +{ + PCIDevice *pci_dev = PCI_DEVICE(s); + uint32_t insn; + uint32_t addr, addr_high; + int opcode; + int insn_processed = 0; + + s->istat1 |= LSI_ISTAT1_SRUN; +again: + if (++insn_processed > LSI_MAX_INSN) { + /* Some windows drivers make the device spin waiting for a memory + location to change. If we have been executed a lot of code then + assume this is the case and force an unexpected device disconnect. + This is apparently sufficient to beat the drivers into submission. + */ + if (!(s->sien0 & LSI_SIST0_UDC)) { + qemu_log_mask(LOG_GUEST_ERROR, + "lsi_scsi: inf. loop with UDC masked"); + } + lsi_script_scsi_interrupt(s, LSI_SIST0_UDC, 0); + lsi_disconnect(s); + trace_lsi_execute_script_stop(); + return; + } + insn = read_dword(s, s->dsp); + if (!insn) { + /* If we receive an empty opcode increment the DSP by 4 bytes + instead of 8 and execute the next opcode at that location */ + s->dsp += 4; + goto again; + } + addr = read_dword(s, s->dsp + 4); + addr_high = 0; + trace_lsi_execute_script(s->dsp, insn, addr); + s->dsps = addr; + s->dcmd = insn >> 24; + s->dsp += 8; + switch (insn >> 30) { + case 0: /* Block move. */ + if (s->sist1 & LSI_SIST1_STO) { + trace_lsi_execute_script_blockmove_delayed(); + lsi_stop_script(s); + break; + } + s->dbc = insn & 0xffffff; + s->rbc = s->dbc; + /* ??? Set ESA. */ + s->ia = s->dsp - 8; + if (insn & (1 << 29)) { + /* Indirect addressing. */ + addr = read_dword(s, addr); + } else if (insn & (1 << 28)) { + uint32_t buf[2]; + int32_t offset; + /* Table indirect addressing. */ + + /* 32-bit Table indirect */ + offset = sextract32(addr, 0, 24); + pci_dma_read(pci_dev, s->dsa + offset, buf, 8); + /* byte count is stored in bits 0:23 only */ + s->dbc = cpu_to_le32(buf[0]) & 0xffffff; + s->rbc = s->dbc; + addr = cpu_to_le32(buf[1]); + + /* 40-bit DMA, upper addr bits [39:32] stored in first DWORD of + * table, bits [31:24] */ + if (lsi_dma_40bit(s)) + addr_high = cpu_to_le32(buf[0]) >> 24; + else if (lsi_dma_ti64bit(s)) { + int selector = (cpu_to_le32(buf[0]) >> 24) & 0x1f; + switch (selector) { + case 0 ... 0x0f: + /* offset index into scratch registers since + * TI64 mode can use registers C to R */ + addr_high = s->scratch[2 + selector]; + break; + case 0x10: + addr_high = s->mmrs; + break; + case 0x11: + addr_high = s->mmws; + break; + case 0x12: + addr_high = s->sfs; + break; + case 0x13: + addr_high = s->drs; + break; + case 0x14: + addr_high = s->sbms; + break; + case 0x15: + addr_high = s->dbms; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "lsi_scsi: Illegal selector specified (0x%x > 0x15) " + "for 64-bit DMA block move", selector); + break; + } + } + } else if (lsi_dma_64bit(s)) { + /* fetch a 3rd dword if 64-bit direct move is enabled and + only if we're not doing table indirect or indirect addressing */ + s->dbms = read_dword(s, s->dsp); + s->dsp += 4; + s->ia = s->dsp - 12; + } + if ((s->sstat1 & PHASE_MASK) != ((insn >> 24) & 7)) { + trace_lsi_execute_script_blockmove_badphase( + scsi_phase_name(s->sstat1), + scsi_phase_name(insn >> 24)); + lsi_script_scsi_interrupt(s, LSI_SIST0_MA, 0); + break; + } + s->dnad = addr; + s->dnad64 = addr_high; + switch (s->sstat1 & 0x7) { + case PHASE_DO: + s->waiting = LSI_DMA_SCRIPTS; + lsi_do_dma(s, 1); + if (s->waiting) + s->waiting = LSI_DMA_IN_PROGRESS; + break; + case PHASE_DI: + s->waiting = LSI_DMA_SCRIPTS; + lsi_do_dma(s, 0); + if (s->waiting) + s->waiting = LSI_DMA_IN_PROGRESS; + break; + case PHASE_CMD: + lsi_do_command(s); + break; + case PHASE_ST: + lsi_do_status(s); + break; + case PHASE_MO: + lsi_do_msgout(s); + break; + case PHASE_MI: + lsi_do_msgin(s); + break; + default: + qemu_log_mask(LOG_UNIMP, "lsi_scsi: Unimplemented phase %s\n", + scsi_phase_name(s->sstat1)); + } + s->dfifo = s->dbc & 0xff; + s->ctest5 = (s->ctest5 & 0xfc) | ((s->dbc >> 8) & 3); + s->sbc = s->dbc; + s->rbc -= s->dbc; + s->ua = addr + s->dbc; + break; + + case 1: /* IO or Read/Write instruction. */ + opcode = (insn >> 27) & 7; + if (opcode < 5) { + uint32_t id; + + if (insn & (1 << 25)) { + id = read_dword(s, s->dsa + sextract32(insn, 0, 24)); + } else { + id = insn; + } + id = (id >> 16) & 0xf; + if (insn & (1 << 26)) { + addr = s->dsp + sextract32(addr, 0, 24); + } + s->dnad = addr; + switch (opcode) { + case 0: /* Select */ + s->sdid = id; + if (s->scntl1 & LSI_SCNTL1_CON) { + trace_lsi_execute_script_io_alreadyreselected(); + s->dsp = s->dnad; + break; + } + s->sstat0 |= LSI_SSTAT0_WOA; + s->scntl1 &= ~LSI_SCNTL1_IARB; + if (!scsi_device_find(&s->bus, 0, id, 0)) { + lsi_bad_selection(s, id); + break; + } + trace_lsi_execute_script_io_selected(id, + insn & (1 << 3) ? " ATN" : ""); + /* ??? Linux drivers compain when this is set. Maybe + it only applies in low-level mode (unimplemented). + lsi_script_scsi_interrupt(s, LSI_SIST0_CMP, 0); */ + s->select_tag = id << 8; + s->scntl1 |= LSI_SCNTL1_CON; + if (insn & (1 << 3)) { + s->socl |= LSI_SOCL_ATN; + s->sbcl |= LSI_SBCL_ATN; + } + s->sbcl |= LSI_SBCL_BSY; + lsi_set_phase(s, PHASE_MO); + s->waiting = LSI_NOWAIT; + break; + case 1: /* Disconnect */ + trace_lsi_execute_script_io_disconnect(); + s->scntl1 &= ~LSI_SCNTL1_CON; + /* FIXME: this is not entirely correct; the target need not ask + * for reselection until it has to send data, while here we force a + * reselection as soon as the bus is free. The correct flow would + * reselect before lsi_transfer_data and disconnect as soon as + * DMA ends. + */ + if (!s->current) { + lsi_request *p = get_pending_req(s); + if (p) { + lsi_reselect(s, p); + } + } + break; + case 2: /* Wait Reselect */ + if (s->istat0 & LSI_ISTAT0_SIGP) { + s->dsp = s->dnad; + } else if (!lsi_irq_on_rsl(s)) { + lsi_wait_reselect(s); + } + break; + case 3: /* Set */ + trace_lsi_execute_script_io_set( + insn & (1 << 3) ? " ATN" : "", + insn & (1 << 6) ? " ACK" : "", + insn & (1 << 9) ? " TM" : "", + insn & (1 << 10) ? " CC" : ""); + if (insn & (1 << 3)) { + s->socl |= LSI_SOCL_ATN; + s->sbcl |= LSI_SBCL_ATN; + lsi_set_phase(s, PHASE_MO); + } + + if (insn & (1 << 6)) { + s->sbcl |= LSI_SBCL_ACK; + } + + if (insn & (1 << 9)) { + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: Target mode not implemented\n"); + } + if (insn & (1 << 10)) + s->carry = 1; + break; + case 4: /* Clear */ + trace_lsi_execute_script_io_clear( + insn & (1 << 3) ? " ATN" : "", + insn & (1 << 6) ? " ACK" : "", + insn & (1 << 9) ? " TM" : "", + insn & (1 << 10) ? " CC" : ""); + if (insn & (1 << 3)) { + s->socl &= ~LSI_SOCL_ATN; + s->sbcl &= ~LSI_SBCL_ATN; + } + + if (insn & (1 << 6)) { + s->sbcl &= ~LSI_SBCL_ACK; + } + + if (insn & (1 << 10)) + s->carry = 0; + break; + } + } else { + uint8_t op0; + uint8_t op1; + uint8_t data8; + int reg; + int operator; + + static const char *opcode_names[3] = + {"Write", "Read", "Read-Modify-Write"}; + static const char *operator_names[8] = + {"MOV", "SHL", "OR", "XOR", "AND", "SHR", "ADD", "ADC"}; + + reg = ((insn >> 16) & 0x7f) | (insn & 0x80); + data8 = (insn >> 8) & 0xff; + opcode = (insn >> 27) & 7; + operator = (insn >> 24) & 7; + trace_lsi_execute_script_io_opcode( + opcode_names[opcode - 5], reg, + operator_names[operator], data8, s->sfbr, + (insn & (1 << 23)) ? " SFBR" : ""); + op0 = op1 = 0; + switch (opcode) { + case 5: /* From SFBR */ + op0 = s->sfbr; + op1 = data8; + break; + case 6: /* To SFBR */ + if (operator) + op0 = lsi_reg_readb(s, reg); + op1 = data8; + break; + case 7: /* Read-modify-write */ + if (operator) + op0 = lsi_reg_readb(s, reg); + if (insn & (1 << 23)) { + op1 = s->sfbr; + } else { + op1 = data8; + } + break; + } + + switch (operator) { + case 0: /* move */ + op0 = op1; + break; + case 1: /* Shift left */ + op1 = op0 >> 7; + op0 = (op0 << 1) | s->carry; + s->carry = op1; + break; + case 2: /* OR */ + op0 |= op1; + break; + case 3: /* XOR */ + op0 ^= op1; + break; + case 4: /* AND */ + op0 &= op1; + break; + case 5: /* SHR */ + op1 = op0 & 1; + op0 = (op0 >> 1) | (s->carry << 7); + s->carry = op1; + break; + case 6: /* ADD */ + op0 += op1; + s->carry = op0 < op1; + break; + case 7: /* ADC */ + op0 += op1 + s->carry; + if (s->carry) + s->carry = op0 <= op1; + else + s->carry = op0 < op1; + break; + } + + switch (opcode) { + case 5: /* From SFBR */ + case 7: /* Read-modify-write */ + lsi_reg_writeb(s, reg, op0); + break; + case 6: /* To SFBR */ + s->sfbr = op0; + break; + } + } + break; + + case 2: /* Transfer Control. */ + { + int cond; + int jmp; + + if ((insn & 0x002e0000) == 0) { + trace_lsi_execute_script_tc_nop(); + break; + } + if (s->sist1 & LSI_SIST1_STO) { + trace_lsi_execute_script_tc_delayedselect_timeout(); + lsi_stop_script(s); + break; + } + cond = jmp = (insn & (1 << 19)) != 0; + if (cond == jmp && (insn & (1 << 21))) { + trace_lsi_execute_script_tc_compc(s->carry == jmp); + cond = s->carry != 0; + } + if (cond == jmp && (insn & (1 << 17))) { + trace_lsi_execute_script_tc_compp(scsi_phase_name(s->sstat1), + jmp ? '=' : '!', scsi_phase_name(insn >> 24)); + cond = (s->sstat1 & PHASE_MASK) == ((insn >> 24) & 7); + } + if (cond == jmp && (insn & (1 << 18))) { + uint8_t mask; + + mask = (~insn >> 8) & 0xff; + trace_lsi_execute_script_tc_compd( + s->sfbr, mask, jmp ? '=' : '!', insn & mask); + cond = (s->sfbr & mask) == (insn & mask); + } + if (cond == jmp) { + if (insn & (1 << 23)) { + /* Relative address. */ + addr = s->dsp + sextract32(addr, 0, 24); + } + switch ((insn >> 27) & 7) { + case 0: /* Jump */ + trace_lsi_execute_script_tc_jump(addr); + s->adder = addr; + s->dsp = addr; + break; + case 1: /* Call */ + trace_lsi_execute_script_tc_call(addr); + s->temp = s->dsp; + s->dsp = addr; + break; + case 2: /* Return */ + trace_lsi_execute_script_tc_return(s->temp); + s->dsp = s->temp; + break; + case 3: /* Interrupt */ + trace_lsi_execute_script_tc_interrupt(s->dsps); + if ((insn & (1 << 20)) != 0) { + s->istat0 |= LSI_ISTAT0_INTF; + lsi_update_irq(s); + } else { + lsi_script_dma_interrupt(s, LSI_DSTAT_SIR); + } + break; + default: + trace_lsi_execute_script_tc_illegal(); + lsi_script_dma_interrupt(s, LSI_DSTAT_IID); + break; + } + } else { + trace_lsi_execute_script_tc_cc_failed(); + } + } + break; + + case 3: + if ((insn & (1 << 29)) == 0) { + /* Memory move. */ + uint32_t dest; + /* ??? The docs imply the destination address is loaded into + the TEMP register. However the Linux drivers rely on + the value being presrved. */ + dest = read_dword(s, s->dsp); + s->dsp += 4; + lsi_memcpy(s, dest, addr, insn & 0xffffff); + } else { + uint8_t data[7]; + int reg; + int n; + int i; + + if (insn & (1 << 28)) { + addr = s->dsa + sextract32(addr, 0, 24); + } + n = (insn & 7); + reg = (insn >> 16) & 0xff; + if (insn & (1 << 24)) { + pci_dma_read(pci_dev, addr, data, n); + trace_lsi_execute_script_mm_load(reg, n, addr, *(int *)data); + for (i = 0; i < n; i++) { + lsi_reg_writeb(s, reg + i, data[i]); + } + } else { + trace_lsi_execute_script_mm_store(reg, n, addr); + for (i = 0; i < n; i++) { + data[i] = lsi_reg_readb(s, reg + i); + } + pci_dma_write(pci_dev, addr, data, n); + } + } + } + if (s->istat1 & LSI_ISTAT1_SRUN && s->waiting == LSI_NOWAIT) { + if (s->dcntl & LSI_DCNTL_SSM) { + lsi_script_dma_interrupt(s, LSI_DSTAT_SSI); + } else { + goto again; + } + } + trace_lsi_execute_script_stop(); +} + +static uint8_t lsi_reg_readb(LSIState *s, int offset) +{ + uint8_t ret; + +#define CASE_GET_REG24(name, addr) \ + case addr: ret = s->name & 0xff; break; \ + case addr + 1: ret = (s->name >> 8) & 0xff; break; \ + case addr + 2: ret = (s->name >> 16) & 0xff; break; + +#define CASE_GET_REG32(name, addr) \ + case addr: ret = s->name & 0xff; break; \ + case addr + 1: ret = (s->name >> 8) & 0xff; break; \ + case addr + 2: ret = (s->name >> 16) & 0xff; break; \ + case addr + 3: ret = (s->name >> 24) & 0xff; break; + + switch (offset) { + case 0x00: /* SCNTL0 */ + ret = s->scntl0; + break; + case 0x01: /* SCNTL1 */ + ret = s->scntl1; + break; + case 0x02: /* SCNTL2 */ + ret = s->scntl2; + break; + case 0x03: /* SCNTL3 */ + ret = s->scntl3; + break; + case 0x04: /* SCID */ + ret = s->scid; + break; + case 0x05: /* SXFER */ + ret = s->sxfer; + break; + case 0x06: /* SDID */ + ret = s->sdid; + break; + case 0x07: /* GPREG0 */ + ret = 0x7f; + break; + case 0x08: /* Revision ID */ + ret = 0x00; + break; + case 0x09: /* SOCL */ + ret = s->socl; + break; + case 0xa: /* SSID */ + ret = s->ssid; + break; + case 0xb: /* SBCL */ + ret = s->sbcl; + break; + case 0xc: /* DSTAT */ + ret = s->dstat | LSI_DSTAT_DFE; + if ((s->istat0 & LSI_ISTAT0_INTF) == 0) + s->dstat = 0; + lsi_update_irq(s); + break; + case 0x0d: /* SSTAT0 */ + ret = s->sstat0; + break; + case 0x0e: /* SSTAT1 */ + ret = s->sstat1; + break; + case 0x0f: /* SSTAT2 */ + ret = s->scntl1 & LSI_SCNTL1_CON ? 0 : 2; + break; + CASE_GET_REG32(dsa, 0x10) + case 0x14: /* ISTAT0 */ + ret = s->istat0; + break; + case 0x15: /* ISTAT1 */ + ret = s->istat1; + break; + case 0x16: /* MBOX0 */ + ret = s->mbox0; + break; + case 0x17: /* MBOX1 */ + ret = s->mbox1; + break; + case 0x18: /* CTEST0 */ + ret = 0xff; + break; + case 0x19: /* CTEST1 */ + ret = 0; + break; + case 0x1a: /* CTEST2 */ + ret = s->ctest2 | LSI_CTEST2_DACK | LSI_CTEST2_CM; + if (s->istat0 & LSI_ISTAT0_SIGP) { + s->istat0 &= ~LSI_ISTAT0_SIGP; + ret |= LSI_CTEST2_SIGP; + } + break; + case 0x1b: /* CTEST3 */ + ret = s->ctest3; + break; + CASE_GET_REG32(temp, 0x1c) + case 0x20: /* DFIFO */ + ret = s->dfifo; + break; + case 0x21: /* CTEST4 */ + ret = s->ctest4; + break; + case 0x22: /* CTEST5 */ + ret = s->ctest5; + break; + case 0x23: /* CTEST6 */ + ret = 0; + break; + CASE_GET_REG24(dbc, 0x24) + case 0x27: /* DCMD */ + ret = s->dcmd; + break; + CASE_GET_REG32(dnad, 0x28) + CASE_GET_REG32(dsp, 0x2c) + CASE_GET_REG32(dsps, 0x30) + CASE_GET_REG32(scratch[0], 0x34) + case 0x38: /* DMODE */ + ret = s->dmode; + break; + case 0x39: /* DIEN */ + ret = s->dien; + break; + case 0x3a: /* SBR */ + ret = s->sbr; + break; + case 0x3b: /* DCNTL */ + ret = s->dcntl; + break; + /* ADDER Output (Debug of relative jump address) */ + CASE_GET_REG32(adder, 0x3c) + case 0x40: /* SIEN0 */ + ret = s->sien0; + break; + case 0x41: /* SIEN1 */ + ret = s->sien1; + break; + case 0x42: /* SIST0 */ + ret = s->sist0; + s->sist0 = 0; + lsi_update_irq(s); + break; + case 0x43: /* SIST1 */ + ret = s->sist1; + s->sist1 = 0; + lsi_update_irq(s); + break; + case 0x46: /* MACNTL */ + ret = 0x0f; + break; + case 0x47: /* GPCNTL0 */ + ret = 0x0f; + break; + case 0x48: /* STIME0 */ + ret = s->stime0; + break; + case 0x4a: /* RESPID0 */ + ret = s->respid0; + break; + case 0x4b: /* RESPID1 */ + ret = s->respid1; + break; + case 0x4d: /* STEST1 */ + ret = s->stest1; + break; + case 0x4e: /* STEST2 */ + ret = s->stest2; + break; + case 0x4f: /* STEST3 */ + ret = s->stest3; + break; + case 0x50: /* SIDL */ + /* This is needed by the linux drivers. We currently only update it + during the MSG IN phase. */ + ret = s->sidl; + break; + case 0x52: /* STEST4 */ + ret = 0xe0; + break; + case 0x56: /* CCNTL0 */ + ret = s->ccntl0; + break; + case 0x57: /* CCNTL1 */ + ret = s->ccntl1; + break; + case 0x58: /* SBDL */ + /* Some drivers peek at the data bus during the MSG IN phase. */ + if ((s->sstat1 & PHASE_MASK) == PHASE_MI) { + assert(s->msg_len > 0); + return s->msg[0]; + } + ret = 0; + break; + case 0x59: /* SBDL high */ + ret = 0; + break; + CASE_GET_REG32(mmrs, 0xa0) + CASE_GET_REG32(mmws, 0xa4) + CASE_GET_REG32(sfs, 0xa8) + CASE_GET_REG32(drs, 0xac) + CASE_GET_REG32(sbms, 0xb0) + CASE_GET_REG32(dbms, 0xb4) + CASE_GET_REG32(dnad64, 0xb8) + CASE_GET_REG32(pmjad1, 0xc0) + CASE_GET_REG32(pmjad2, 0xc4) + CASE_GET_REG32(rbc, 0xc8) + CASE_GET_REG32(ua, 0xcc) + CASE_GET_REG32(ia, 0xd4) + CASE_GET_REG32(sbc, 0xd8) + CASE_GET_REG32(csbc, 0xdc) + case 0x5c ... 0x9f: + { + int n; + int shift; + n = (offset - 0x58) >> 2; + shift = (offset & 3) * 8; + ret = (s->scratch[n] >> shift) & 0xff; + break; + } + default: + { + qemu_log_mask(LOG_GUEST_ERROR, + "lsi_scsi: invalid read from reg %s %x\n", + offset < ARRAY_SIZE(names) ? names[offset] : "???", + offset); + ret = 0xff; + break; + } + } +#undef CASE_GET_REG24 +#undef CASE_GET_REG32 + + trace_lsi_reg_read(offset < ARRAY_SIZE(names) ? names[offset] : "???", + offset, ret); + + return ret; +} + +static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val) +{ +#define CASE_SET_REG24(name, addr) \ + case addr : s->name &= 0xffffff00; s->name |= val; break; \ + case addr + 1: s->name &= 0xffff00ff; s->name |= val << 8; break; \ + case addr + 2: s->name &= 0xff00ffff; s->name |= val << 16; break; + +#define CASE_SET_REG32(name, addr) \ + case addr : s->name &= 0xffffff00; s->name |= val; break; \ + case addr + 1: s->name &= 0xffff00ff; s->name |= val << 8; break; \ + case addr + 2: s->name &= 0xff00ffff; s->name |= val << 16; break; \ + case addr + 3: s->name &= 0x00ffffff; s->name |= val << 24; break; + + trace_lsi_reg_write(offset < ARRAY_SIZE(names) ? names[offset] : "???", + offset, val); + + switch (offset) { + case 0x00: /* SCNTL0 */ + s->scntl0 = val; + if (val & LSI_SCNTL0_START) { + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: Start sequence not implemented\n"); + } + break; + case 0x01: /* SCNTL1 */ + s->scntl1 = val & ~LSI_SCNTL1_SST; + if (val & LSI_SCNTL1_IARB) { + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: Immediate Arbritration not implemented\n"); + } + if (val & LSI_SCNTL1_RST) { + if (!(s->sstat0 & LSI_SSTAT0_RST)) { + qbus_reset_all(BUS(&s->bus)); + s->sstat0 |= LSI_SSTAT0_RST; + lsi_script_scsi_interrupt(s, LSI_SIST0_RST, 0); + } + } else { + s->sstat0 &= ~LSI_SSTAT0_RST; + } + break; + case 0x02: /* SCNTL2 */ + val &= ~(LSI_SCNTL2_WSR | LSI_SCNTL2_WSS); + s->scntl2 = val; + break; + case 0x03: /* SCNTL3 */ + s->scntl3 = val; + break; + case 0x04: /* SCID */ + s->scid = val; + break; + case 0x05: /* SXFER */ + s->sxfer = val; + break; + case 0x06: /* SDID */ + if ((s->ssid & 0x80) && (val & 0xf) != (s->ssid & 0xf)) { + qemu_log_mask(LOG_GUEST_ERROR, + "lsi_scsi: Destination ID does not match SSID\n"); + } + s->sdid = val & 0xf; + break; + case 0x07: /* GPREG0 */ + break; + case 0x08: /* SFBR */ + /* The CPU is not allowed to write to this register. However the + SCRIPTS register move instructions are. */ + s->sfbr = val; + break; + case 0x0a: case 0x0b: + /* Openserver writes to these readonly registers on startup */ + return; + case 0x0c: case 0x0d: case 0x0e: case 0x0f: + /* Linux writes to these readonly registers on startup. */ + return; + CASE_SET_REG32(dsa, 0x10) + case 0x14: /* ISTAT0 */ + s->istat0 = (s->istat0 & 0x0f) | (val & 0xf0); + if (val & LSI_ISTAT0_ABRT) { + lsi_script_dma_interrupt(s, LSI_DSTAT_ABRT); + } + if (val & LSI_ISTAT0_INTF) { + s->istat0 &= ~LSI_ISTAT0_INTF; + lsi_update_irq(s); + } + if (s->waiting == LSI_WAIT_RESELECT && val & LSI_ISTAT0_SIGP) { + trace_lsi_awoken(); + s->waiting = LSI_NOWAIT; + s->dsp = s->dnad; + lsi_execute_script(s); + } + if (val & LSI_ISTAT0_SRST) { + qdev_reset_all(DEVICE(s)); + } + break; + case 0x16: /* MBOX0 */ + s->mbox0 = val; + break; + case 0x17: /* MBOX1 */ + s->mbox1 = val; + break; + case 0x18: /* CTEST0 */ + /* nothing to do */ + break; + case 0x1a: /* CTEST2 */ + s->ctest2 = val & LSI_CTEST2_PCICIE; + break; + case 0x1b: /* CTEST3 */ + s->ctest3 = val & 0x0f; + break; + CASE_SET_REG32(temp, 0x1c) + case 0x21: /* CTEST4 */ + if (val & 7) { + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: Unimplemented CTEST4-FBL 0x%x\n", val); + } + s->ctest4 = val; + break; + case 0x22: /* CTEST5 */ + if (val & (LSI_CTEST5_ADCK | LSI_CTEST5_BBCK)) { + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: CTEST5 DMA increment not implemented\n"); + } + s->ctest5 = val; + break; + CASE_SET_REG24(dbc, 0x24) + CASE_SET_REG32(dnad, 0x28) + case 0x2c: /* DSP[0:7] */ + s->dsp &= 0xffffff00; + s->dsp |= val; + break; + case 0x2d: /* DSP[8:15] */ + s->dsp &= 0xffff00ff; + s->dsp |= val << 8; + break; + case 0x2e: /* DSP[16:23] */ + s->dsp &= 0xff00ffff; + s->dsp |= val << 16; + break; + case 0x2f: /* DSP[24:31] */ + s->dsp &= 0x00ffffff; + s->dsp |= val << 24; + /* + * FIXME: if s->waiting != LSI_NOWAIT, this will only execute one + * instruction. Is this correct? + */ + if ((s->dmode & LSI_DMODE_MAN) == 0 + && (s->istat1 & LSI_ISTAT1_SRUN) == 0) + lsi_execute_script(s); + break; + CASE_SET_REG32(dsps, 0x30) + CASE_SET_REG32(scratch[0], 0x34) + case 0x38: /* DMODE */ + s->dmode = val; + break; + case 0x39: /* DIEN */ + s->dien = val; + lsi_update_irq(s); + break; + case 0x3a: /* SBR */ + s->sbr = val; + break; + case 0x3b: /* DCNTL */ + s->dcntl = val & ~(LSI_DCNTL_PFF | LSI_DCNTL_STD); + /* + * FIXME: if s->waiting != LSI_NOWAIT, this will only execute one + * instruction. Is this correct? + */ + if ((val & LSI_DCNTL_STD) && (s->istat1 & LSI_ISTAT1_SRUN) == 0) + lsi_execute_script(s); + break; + case 0x40: /* SIEN0 */ + s->sien0 = val; + lsi_update_irq(s); + break; + case 0x41: /* SIEN1 */ + s->sien1 = val; + lsi_update_irq(s); + break; + case 0x47: /* GPCNTL0 */ + break; + case 0x48: /* STIME0 */ + s->stime0 = val; + break; + case 0x49: /* STIME1 */ + if (val & 0xf) { + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: General purpose timer not implemented\n"); + /* ??? Raising the interrupt immediately seems to be sufficient + to keep the FreeBSD driver happy. */ + lsi_script_scsi_interrupt(s, 0, LSI_SIST1_GEN); + } + break; + case 0x4a: /* RESPID0 */ + s->respid0 = val; + break; + case 0x4b: /* RESPID1 */ + s->respid1 = val; + break; + case 0x4d: /* STEST1 */ + s->stest1 = val; + break; + case 0x4e: /* STEST2 */ + if (val & 1) { + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: Low level mode not implemented\n"); + } + s->stest2 = val; + break; + case 0x4f: /* STEST3 */ + if (val & 0x41) { + qemu_log_mask(LOG_UNIMP, + "lsi_scsi: SCSI FIFO test mode not implemented\n"); + } + s->stest3 = val; + break; + case 0x56: /* CCNTL0 */ + s->ccntl0 = val; + break; + case 0x57: /* CCNTL1 */ + s->ccntl1 = val; + break; + CASE_SET_REG32(mmrs, 0xa0) + CASE_SET_REG32(mmws, 0xa4) + CASE_SET_REG32(sfs, 0xa8) + CASE_SET_REG32(drs, 0xac) + CASE_SET_REG32(sbms, 0xb0) + CASE_SET_REG32(dbms, 0xb4) + CASE_SET_REG32(dnad64, 0xb8) + CASE_SET_REG32(pmjad1, 0xc0) + CASE_SET_REG32(pmjad2, 0xc4) + CASE_SET_REG32(rbc, 0xc8) + CASE_SET_REG32(ua, 0xcc) + CASE_SET_REG32(ia, 0xd4) + CASE_SET_REG32(sbc, 0xd8) + CASE_SET_REG32(csbc, 0xdc) + default: + if (offset >= 0x5c && offset < 0xa0) { + int n; + int shift; + n = (offset - 0x58) >> 2; + shift = (offset & 3) * 8; + s->scratch[n] = deposit32(s->scratch[n], shift, 8, val); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "lsi_scsi: invalid write to reg %s %x (0x%02x)\n", + offset < ARRAY_SIZE(names) ? names[offset] : "???", + offset, val); + } + } +#undef CASE_SET_REG24 +#undef CASE_SET_REG32 +} + +static void lsi_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + LSIState *s = opaque; + + lsi_reg_writeb(s, addr & 0xff, val); +} + +static uint64_t lsi_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + LSIState *s = opaque; + return lsi_reg_readb(s, addr & 0xff); +} + +static const MemoryRegionOps lsi_mmio_ops = { + .read = lsi_mmio_read, + .write = lsi_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void lsi_ram_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + LSIState *s = opaque; + stn_le_p(s->script_ram + addr, size, val); +} + +static uint64_t lsi_ram_read(void *opaque, hwaddr addr, + unsigned size) +{ + LSIState *s = opaque; + return ldn_le_p(s->script_ram + addr, size); +} + +static const MemoryRegionOps lsi_ram_ops = { + .read = lsi_ram_read, + .write = lsi_ram_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t lsi_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + LSIState *s = opaque; + return lsi_reg_readb(s, addr & 0xff); +} + +static void lsi_io_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + LSIState *s = opaque; + lsi_reg_writeb(s, addr & 0xff, val); +} + +static const MemoryRegionOps lsi_io_ops = { + .read = lsi_io_read, + .write = lsi_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, +}; + +static void lsi_scsi_reset(DeviceState *dev) +{ + LSIState *s = LSI53C895A(dev); + + lsi_soft_reset(s); +} + +static int lsi_pre_save(void *opaque) +{ + LSIState *s = opaque; + + if (s->current) { + assert(s->current->dma_buf == NULL); + assert(s->current->dma_len == 0); + } + assert(QTAILQ_EMPTY(&s->queue)); + + return 0; +} + +static int lsi_post_load(void *opaque, int version_id) +{ + LSIState *s = opaque; + + if (s->msg_len < 0 || s->msg_len > LSI_MAX_MSGIN_LEN) { + return -EINVAL; + } + + return 0; +} + +static const VMStateDescription vmstate_lsi_scsi = { + .name = "lsiscsi", + .version_id = 1, + .minimum_version_id = 0, + .pre_save = lsi_pre_save, + .post_load = lsi_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, LSIState), + + VMSTATE_INT32(carry, LSIState), + VMSTATE_INT32(status, LSIState), + VMSTATE_INT32(msg_action, LSIState), + VMSTATE_INT32(msg_len, LSIState), + VMSTATE_BUFFER(msg, LSIState), + VMSTATE_INT32(waiting, LSIState), + + VMSTATE_UINT32(dsa, LSIState), + VMSTATE_UINT32(temp, LSIState), + VMSTATE_UINT32(dnad, LSIState), + VMSTATE_UINT32(dbc, LSIState), + VMSTATE_UINT8(istat0, LSIState), + VMSTATE_UINT8(istat1, LSIState), + VMSTATE_UINT8(dcmd, LSIState), + VMSTATE_UINT8(dstat, LSIState), + VMSTATE_UINT8(dien, LSIState), + VMSTATE_UINT8(sist0, LSIState), + VMSTATE_UINT8(sist1, LSIState), + VMSTATE_UINT8(sien0, LSIState), + VMSTATE_UINT8(sien1, LSIState), + VMSTATE_UINT8(mbox0, LSIState), + VMSTATE_UINT8(mbox1, LSIState), + VMSTATE_UINT8(dfifo, LSIState), + VMSTATE_UINT8(ctest2, LSIState), + VMSTATE_UINT8(ctest3, LSIState), + VMSTATE_UINT8(ctest4, LSIState), + VMSTATE_UINT8(ctest5, LSIState), + VMSTATE_UINT8(ccntl0, LSIState), + VMSTATE_UINT8(ccntl1, LSIState), + VMSTATE_UINT32(dsp, LSIState), + VMSTATE_UINT32(dsps, LSIState), + VMSTATE_UINT8(dmode, LSIState), + VMSTATE_UINT8(dcntl, LSIState), + VMSTATE_UINT8(scntl0, LSIState), + VMSTATE_UINT8(scntl1, LSIState), + VMSTATE_UINT8(scntl2, LSIState), + VMSTATE_UINT8(scntl3, LSIState), + VMSTATE_UINT8(sstat0, LSIState), + VMSTATE_UINT8(sstat1, LSIState), + VMSTATE_UINT8(scid, LSIState), + VMSTATE_UINT8(sxfer, LSIState), + VMSTATE_UINT8(socl, LSIState), + VMSTATE_UINT8(sdid, LSIState), + VMSTATE_UINT8(ssid, LSIState), + VMSTATE_UINT8(sfbr, LSIState), + VMSTATE_UINT8(stest1, LSIState), + VMSTATE_UINT8(stest2, LSIState), + VMSTATE_UINT8(stest3, LSIState), + VMSTATE_UINT8(sidl, LSIState), + VMSTATE_UINT8(stime0, LSIState), + VMSTATE_UINT8(respid0, LSIState), + VMSTATE_UINT8(respid1, LSIState), + VMSTATE_UINT8_V(sbcl, LSIState, 1), + VMSTATE_UINT32(mmrs, LSIState), + VMSTATE_UINT32(mmws, LSIState), + VMSTATE_UINT32(sfs, LSIState), + VMSTATE_UINT32(drs, LSIState), + VMSTATE_UINT32(sbms, LSIState), + VMSTATE_UINT32(dbms, LSIState), + VMSTATE_UINT32(dnad64, LSIState), + VMSTATE_UINT32(pmjad1, LSIState), + VMSTATE_UINT32(pmjad2, LSIState), + VMSTATE_UINT32(rbc, LSIState), + VMSTATE_UINT32(ua, LSIState), + VMSTATE_UINT32(ia, LSIState), + VMSTATE_UINT32(sbc, LSIState), + VMSTATE_UINT32(csbc, LSIState), + VMSTATE_BUFFER_UNSAFE(scratch, LSIState, 0, 18 * sizeof(uint32_t)), + VMSTATE_UINT8(sbr, LSIState), + + VMSTATE_BUFFER_UNSAFE(script_ram, LSIState, 0, 8192), + VMSTATE_END_OF_LIST() + } +}; + +static const struct SCSIBusInfo lsi_scsi_info = { + .tcq = true, + .max_target = LSI_MAX_DEVS, + .max_lun = 0, /* LUN support is buggy */ + + .transfer_data = lsi_transfer_data, + .complete = lsi_command_complete, + .cancel = lsi_request_cancelled +}; + +static void lsi_scsi_realize(PCIDevice *dev, Error **errp) +{ + LSIState *s = LSI53C895A(dev); + DeviceState *d = DEVICE(dev); + uint8_t *pci_conf; + + pci_conf = dev->config; + + /* PCI latency timer = 255 */ + pci_conf[PCI_LATENCY_TIMER] = 0xff; + /* Interrupt pin A */ + pci_conf[PCI_INTERRUPT_PIN] = 0x01; + + memory_region_init_io(&s->mmio_io, OBJECT(s), &lsi_mmio_ops, s, + "lsi-mmio", 0x400); + memory_region_init_io(&s->ram_io, OBJECT(s), &lsi_ram_ops, s, + "lsi-ram", 0x2000); + memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s, + "lsi-io", 256); + + address_space_init(&s->pci_io_as, pci_address_space_io(dev), "lsi-pci-io"); + qdev_init_gpio_out(d, &s->ext_irq, 1); + + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_io); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio_io); + pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->ram_io); + QTAILQ_INIT(&s->queue); + + scsi_bus_init(&s->bus, sizeof(s->bus), d, &lsi_scsi_info); +} + +static void lsi_scsi_exit(PCIDevice *dev) +{ + LSIState *s = LSI53C895A(dev); + + address_space_destroy(&s->pci_io_as); +} + +static void lsi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = lsi_scsi_realize; + k->exit = lsi_scsi_exit; + k->vendor_id = PCI_VENDOR_ID_LSI_LOGIC; + k->device_id = PCI_DEVICE_ID_LSI_53C895A; + k->class_id = PCI_CLASS_STORAGE_SCSI; + k->subsystem_id = 0x1000; + dc->reset = lsi_scsi_reset; + dc->vmsd = &vmstate_lsi_scsi; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo lsi_info = { + .name = TYPE_LSI53C895A, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(LSIState), + .class_init = lsi_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void lsi53c810_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->device_id = PCI_DEVICE_ID_LSI_53C810; +} + +static TypeInfo lsi53c810_info = { + .name = TYPE_LSI53C810, + .parent = TYPE_LSI53C895A, + .class_init = lsi53c810_class_init, +}; + +static void lsi53c895a_register_types(void) +{ + type_register_static(&lsi_info); + type_register_static(&lsi53c810_info); +} + +type_init(lsi53c895a_register_types) + +void lsi53c8xx_handle_legacy_cmdline(DeviceState *lsi_dev) +{ + LSIState *s = LSI53C895A(lsi_dev); + + scsi_bus_legacy_handle_cmdline(&s->bus); +} diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c new file mode 100644 index 000000000..4ff51221d --- /dev/null +++ b/hw/scsi/megasas.c @@ -0,0 +1,2554 @@ +/* + * QEMU MegaRAID SAS 8708EM2 Host Bus Adapter emulation + * Based on the linux driver code at drivers/scsi/megaraid + * + * Copyright (c) 2009-2012 Hannes Reinecke, SUSE Labs + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "sysemu/dma.h" +#include "sysemu/block-backend.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "hw/scsi/scsi.h" +#include "scsi/constants.h" +#include "trace.h" +#include "qapi/error.h" +#include "mfi.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +#define MEGASAS_VERSION_GEN1 "1.70" +#define MEGASAS_VERSION_GEN2 "1.80" +#define MEGASAS_MAX_FRAMES 2048 /* Firmware limit at 65535 */ +#define MEGASAS_DEFAULT_FRAMES 1000 /* Windows requires this */ +#define MEGASAS_GEN2_DEFAULT_FRAMES 1008 /* Windows requires this */ +#define MEGASAS_MAX_SGE 128 /* Firmware limit */ +#define MEGASAS_DEFAULT_SGE 80 +#define MEGASAS_MAX_SECTORS 0xFFFF /* No real limit */ +#define MEGASAS_MAX_ARRAYS 128 + +#define MEGASAS_HBA_SERIAL "QEMU123456" +#define NAA_LOCALLY_ASSIGNED_ID 0x3ULL +#define IEEE_COMPANY_LOCALLY_ASSIGNED 0x525400 + +#define MEGASAS_FLAG_USE_JBOD 0 +#define MEGASAS_MASK_USE_JBOD (1 << MEGASAS_FLAG_USE_JBOD) +#define MEGASAS_FLAG_USE_QUEUE64 1 +#define MEGASAS_MASK_USE_QUEUE64 (1 << MEGASAS_FLAG_USE_QUEUE64) + +typedef struct MegasasCmd { + uint32_t index; + uint16_t flags; + uint16_t count; + uint64_t context; + + hwaddr pa; + hwaddr pa_size; + uint32_t dcmd_opcode; + union mfi_frame *frame; + SCSIRequest *req; + QEMUSGList qsg; + void *iov_buf; + size_t iov_size; + size_t iov_offset; + struct MegasasState *state; +} MegasasCmd; + +struct MegasasState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + MemoryRegion mmio_io; + MemoryRegion port_io; + MemoryRegion queue_io; + uint32_t frame_hi; + + uint32_t fw_state; + uint32_t fw_sge; + uint32_t fw_cmds; + uint32_t flags; + uint32_t fw_luns; + uint32_t intr_mask; + uint32_t doorbell; + uint32_t busy; + uint32_t diag; + uint32_t adp_reset; + OnOffAuto msi; + OnOffAuto msix; + + MegasasCmd *event_cmd; + uint16_t event_locale; + int event_class; + uint32_t event_count; + uint32_t shutdown_event; + uint32_t boot_event; + + uint64_t sas_addr; + char *hba_serial; + + uint64_t reply_queue_pa; + void *reply_queue; + uint16_t reply_queue_len; + uint16_t reply_queue_head; + uint16_t reply_queue_tail; + uint64_t consumer_pa; + uint64_t producer_pa; + + MegasasCmd frames[MEGASAS_MAX_FRAMES]; + DECLARE_BITMAP(frame_map, MEGASAS_MAX_FRAMES); + SCSIBus bus; +}; +typedef struct MegasasState MegasasState; + +struct MegasasBaseClass { + PCIDeviceClass parent_class; + const char *product_name; + const char *product_version; + int mmio_bar; + int ioport_bar; + int osts; +}; +typedef struct MegasasBaseClass MegasasBaseClass; + +#define TYPE_MEGASAS_BASE "megasas-base" +#define TYPE_MEGASAS_GEN1 "megasas" +#define TYPE_MEGASAS_GEN2 "megasas-gen2" + +DECLARE_OBJ_CHECKERS(MegasasState, MegasasBaseClass, + MEGASAS, TYPE_MEGASAS_BASE) + + +#define MEGASAS_INTR_DISABLED_MASK 0xFFFFFFFF + +static bool megasas_intr_enabled(MegasasState *s) +{ + if ((s->intr_mask & MEGASAS_INTR_DISABLED_MASK) != + MEGASAS_INTR_DISABLED_MASK) { + return true; + } + return false; +} + +static bool megasas_use_queue64(MegasasState *s) +{ + return s->flags & MEGASAS_MASK_USE_QUEUE64; +} + +static bool megasas_use_msix(MegasasState *s) +{ + return s->msix != ON_OFF_AUTO_OFF; +} + +static bool megasas_is_jbod(MegasasState *s) +{ + return s->flags & MEGASAS_MASK_USE_JBOD; +} + +static void megasas_frame_set_cmd_status(MegasasState *s, + unsigned long frame, uint8_t v) +{ + PCIDevice *pci = &s->parent_obj; + stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, cmd_status), v); +} + +static void megasas_frame_set_scsi_status(MegasasState *s, + unsigned long frame, uint8_t v) +{ + PCIDevice *pci = &s->parent_obj; + stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, scsi_status), v); +} + +static inline const char *mfi_frame_desc(unsigned int cmd) +{ + static const char *mfi_frame_descs[] = { + "MFI init", "LD Read", "LD Write", "LD SCSI", "PD SCSI", + "MFI Doorbell", "MFI Abort", "MFI SMP", "MFI Stop" + }; + + if (cmd < ARRAY_SIZE(mfi_frame_descs)) { + return mfi_frame_descs[cmd]; + } + + return "Unknown"; +} + +/* + * Context is considered opaque, but the HBA firmware is running + * in little endian mode. So convert it to little endian, too. + */ +static uint64_t megasas_frame_get_context(MegasasState *s, + unsigned long frame) +{ + PCIDevice *pci = &s->parent_obj; + return ldq_le_pci_dma(pci, frame + offsetof(struct mfi_frame_header, context)); +} + +static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd) +{ + return cmd->flags & MFI_FRAME_IEEE_SGL; +} + +static bool megasas_frame_is_sgl64(MegasasCmd *cmd) +{ + return cmd->flags & MFI_FRAME_SGL64; +} + +static bool megasas_frame_is_sense64(MegasasCmd *cmd) +{ + return cmd->flags & MFI_FRAME_SENSE64; +} + +static uint64_t megasas_sgl_get_addr(MegasasCmd *cmd, + union mfi_sgl *sgl) +{ + uint64_t addr; + + if (megasas_frame_is_ieee_sgl(cmd)) { + addr = le64_to_cpu(sgl->sg_skinny->addr); + } else if (megasas_frame_is_sgl64(cmd)) { + addr = le64_to_cpu(sgl->sg64->addr); + } else { + addr = le32_to_cpu(sgl->sg32->addr); + } + return addr; +} + +static uint32_t megasas_sgl_get_len(MegasasCmd *cmd, + union mfi_sgl *sgl) +{ + uint32_t len; + + if (megasas_frame_is_ieee_sgl(cmd)) { + len = le32_to_cpu(sgl->sg_skinny->len); + } else if (megasas_frame_is_sgl64(cmd)) { + len = le32_to_cpu(sgl->sg64->len); + } else { + len = le32_to_cpu(sgl->sg32->len); + } + return len; +} + +static union mfi_sgl *megasas_sgl_next(MegasasCmd *cmd, + union mfi_sgl *sgl) +{ + uint8_t *next = (uint8_t *)sgl; + + if (megasas_frame_is_ieee_sgl(cmd)) { + next += sizeof(struct mfi_sg_skinny); + } else if (megasas_frame_is_sgl64(cmd)) { + next += sizeof(struct mfi_sg64); + } else { + next += sizeof(struct mfi_sg32); + } + + if (next >= (uint8_t *)cmd->frame + cmd->pa_size) { + return NULL; + } + return (union mfi_sgl *)next; +} + +static void megasas_soft_reset(MegasasState *s); + +static int megasas_map_sgl(MegasasState *s, MegasasCmd *cmd, union mfi_sgl *sgl) +{ + int i; + int iov_count = 0; + size_t iov_size = 0; + + cmd->flags = le16_to_cpu(cmd->frame->header.flags); + iov_count = cmd->frame->header.sge_count; + if (!iov_count || iov_count > MEGASAS_MAX_SGE) { + trace_megasas_iovec_sgl_overflow(cmd->index, iov_count, + MEGASAS_MAX_SGE); + return -1; + } + pci_dma_sglist_init(&cmd->qsg, PCI_DEVICE(s), iov_count); + for (i = 0; i < iov_count; i++) { + dma_addr_t iov_pa, iov_size_p; + + if (!sgl) { + trace_megasas_iovec_sgl_underflow(cmd->index, i); + goto unmap; + } + iov_pa = megasas_sgl_get_addr(cmd, sgl); + iov_size_p = megasas_sgl_get_len(cmd, sgl); + if (!iov_pa || !iov_size_p) { + trace_megasas_iovec_sgl_invalid(cmd->index, i, + iov_pa, iov_size_p); + goto unmap; + } + qemu_sglist_add(&cmd->qsg, iov_pa, iov_size_p); + sgl = megasas_sgl_next(cmd, sgl); + iov_size += (size_t)iov_size_p; + } + if (cmd->iov_size > iov_size) { + trace_megasas_iovec_overflow(cmd->index, iov_size, cmd->iov_size); + } else if (cmd->iov_size < iov_size) { + trace_megasas_iovec_underflow(cmd->index, iov_size, cmd->iov_size); + } + cmd->iov_offset = 0; + return 0; +unmap: + qemu_sglist_destroy(&cmd->qsg); + return -1; +} + +/* + * passthrough sense and io sense are at the same offset + */ +static int megasas_build_sense(MegasasCmd *cmd, uint8_t *sense_ptr, + uint8_t sense_len) +{ + PCIDevice *pcid = PCI_DEVICE(cmd->state); + uint32_t pa_hi = 0, pa_lo; + hwaddr pa; + int frame_sense_len; + + frame_sense_len = cmd->frame->header.sense_len; + if (sense_len > frame_sense_len) { + sense_len = frame_sense_len; + } + if (sense_len) { + pa_lo = le32_to_cpu(cmd->frame->pass.sense_addr_lo); + if (megasas_frame_is_sense64(cmd)) { + pa_hi = le32_to_cpu(cmd->frame->pass.sense_addr_hi); + } + pa = ((uint64_t) pa_hi << 32) | pa_lo; + pci_dma_write(pcid, pa, sense_ptr, sense_len); + cmd->frame->header.sense_len = sense_len; + } + return sense_len; +} + +static void megasas_write_sense(MegasasCmd *cmd, SCSISense sense) +{ + uint8_t sense_buf[SCSI_SENSE_BUF_SIZE]; + uint8_t sense_len = 18; + + memset(sense_buf, 0, sense_len); + sense_buf[0] = 0xf0; + sense_buf[2] = sense.key; + sense_buf[7] = 10; + sense_buf[12] = sense.asc; + sense_buf[13] = sense.ascq; + megasas_build_sense(cmd, sense_buf, sense_len); +} + +static void megasas_copy_sense(MegasasCmd *cmd) +{ + uint8_t sense_buf[SCSI_SENSE_BUF_SIZE]; + uint8_t sense_len; + + sense_len = scsi_req_get_sense(cmd->req, sense_buf, + SCSI_SENSE_BUF_SIZE); + megasas_build_sense(cmd, sense_buf, sense_len); +} + +/* + * Format an INQUIRY CDB + */ +static int megasas_setup_inquiry(uint8_t *cdb, int pg, int len) +{ + memset(cdb, 0, 6); + cdb[0] = INQUIRY; + if (pg > 0) { + cdb[1] = 0x1; + cdb[2] = pg; + } + cdb[3] = (len >> 8) & 0xff; + cdb[4] = (len & 0xff); + return len; +} + +/* + * Encode lba and len into a READ_16/WRITE_16 CDB + */ +static void megasas_encode_lba(uint8_t *cdb, uint64_t lba, + uint32_t len, bool is_write) +{ + memset(cdb, 0x0, 16); + if (is_write) { + cdb[0] = WRITE_16; + } else { + cdb[0] = READ_16; + } + cdb[2] = (lba >> 56) & 0xff; + cdb[3] = (lba >> 48) & 0xff; + cdb[4] = (lba >> 40) & 0xff; + cdb[5] = (lba >> 32) & 0xff; + cdb[6] = (lba >> 24) & 0xff; + cdb[7] = (lba >> 16) & 0xff; + cdb[8] = (lba >> 8) & 0xff; + cdb[9] = (lba) & 0xff; + cdb[10] = (len >> 24) & 0xff; + cdb[11] = (len >> 16) & 0xff; + cdb[12] = (len >> 8) & 0xff; + cdb[13] = (len) & 0xff; +} + +/* + * Utility functions + */ +static uint64_t megasas_fw_time(void) +{ + struct tm curtime; + + qemu_get_timedate(&curtime, 0); + return ((uint64_t)curtime.tm_sec & 0xff) << 48 | + ((uint64_t)curtime.tm_min & 0xff) << 40 | + ((uint64_t)curtime.tm_hour & 0xff) << 32 | + ((uint64_t)curtime.tm_mday & 0xff) << 24 | + ((uint64_t)curtime.tm_mon & 0xff) << 16 | + ((uint64_t)(curtime.tm_year + 1900) & 0xffff); +} + +/* + * Default disk sata address + * 0x1221 is the magic number as + * present in real hardware, + * so use it here, too. + */ +static uint64_t megasas_get_sata_addr(uint16_t id) +{ + uint64_t addr = (0x1221ULL << 48); + return addr | ((uint64_t)id << 24); +} + +/* + * Frame handling + */ +static int megasas_next_index(MegasasState *s, int index, int limit) +{ + index++; + if (index == limit) { + index = 0; + } + return index; +} + +static MegasasCmd *megasas_lookup_frame(MegasasState *s, + hwaddr frame) +{ + MegasasCmd *cmd = NULL; + int num = 0, index; + + index = s->reply_queue_head; + + while (num < s->fw_cmds && index < MEGASAS_MAX_FRAMES) { + if (s->frames[index].pa && s->frames[index].pa == frame) { + cmd = &s->frames[index]; + break; + } + index = megasas_next_index(s, index, s->fw_cmds); + num++; + } + + return cmd; +} + +static void megasas_unmap_frame(MegasasState *s, MegasasCmd *cmd) +{ + PCIDevice *p = PCI_DEVICE(s); + + if (cmd->pa_size) { + pci_dma_unmap(p, cmd->frame, cmd->pa_size, 0, 0); + } + cmd->frame = NULL; + cmd->pa = 0; + cmd->pa_size = 0; + qemu_sglist_destroy(&cmd->qsg); + clear_bit(cmd->index, s->frame_map); +} + +/* + * This absolutely needs to be locked if + * qemu ever goes multithreaded. + */ +static MegasasCmd *megasas_enqueue_frame(MegasasState *s, + hwaddr frame, uint64_t context, int count) +{ + PCIDevice *pcid = PCI_DEVICE(s); + MegasasCmd *cmd = NULL; + int frame_size = MEGASAS_MAX_SGE * sizeof(union mfi_sgl); + hwaddr frame_size_p = frame_size; + unsigned long index; + + index = 0; + while (index < s->fw_cmds) { + index = find_next_zero_bit(s->frame_map, s->fw_cmds, index); + if (!s->frames[index].pa) + break; + /* Busy frame found */ + trace_megasas_qf_mapped(index); + } + if (index >= s->fw_cmds) { + /* All frames busy */ + trace_megasas_qf_busy(frame); + return NULL; + } + cmd = &s->frames[index]; + set_bit(index, s->frame_map); + trace_megasas_qf_new(index, frame); + + cmd->pa = frame; + /* Map all possible frames */ + cmd->frame = pci_dma_map(pcid, frame, &frame_size_p, 0); + if (!cmd->frame || frame_size_p != frame_size) { + trace_megasas_qf_map_failed(cmd->index, (unsigned long)frame); + if (cmd->frame) { + megasas_unmap_frame(s, cmd); + } + s->event_count++; + return NULL; + } + cmd->pa_size = frame_size_p; + cmd->context = context; + if (!megasas_use_queue64(s)) { + cmd->context &= (uint64_t)0xFFFFFFFF; + } + cmd->count = count; + cmd->dcmd_opcode = -1; + s->busy++; + + if (s->consumer_pa) { + s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa); + } + trace_megasas_qf_enqueue(cmd->index, cmd->count, cmd->context, + s->reply_queue_head, s->reply_queue_tail, s->busy); + + return cmd; +} + +static void megasas_complete_frame(MegasasState *s, uint64_t context) +{ + PCIDevice *pci_dev = PCI_DEVICE(s); + int tail, queue_offset; + + /* Decrement busy count */ + s->busy--; + if (s->reply_queue_pa) { + /* + * Put command on the reply queue. + * Context is opaque, but emulation is running in + * little endian. So convert it. + */ + if (megasas_use_queue64(s)) { + queue_offset = s->reply_queue_head * sizeof(uint64_t); + stq_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, context); + } else { + queue_offset = s->reply_queue_head * sizeof(uint32_t); + stl_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, context); + } + s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa); + trace_megasas_qf_complete(context, s->reply_queue_head, + s->reply_queue_tail, s->busy); + } + + if (megasas_intr_enabled(s)) { + /* Update reply queue pointer */ + s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa); + tail = s->reply_queue_head; + s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds); + trace_megasas_qf_update(s->reply_queue_head, s->reply_queue_tail, + s->busy); + stl_le_pci_dma(pci_dev, s->producer_pa, s->reply_queue_head); + /* Notify HBA */ + if (msix_enabled(pci_dev)) { + trace_megasas_msix_raise(0); + msix_notify(pci_dev, 0); + } else if (msi_enabled(pci_dev)) { + trace_megasas_msi_raise(0); + msi_notify(pci_dev, 0); + } else { + s->doorbell++; + if (s->doorbell == 1) { + trace_megasas_irq_raise(); + pci_irq_assert(pci_dev); + } + } + } else { + trace_megasas_qf_complete_noirq(context); + } +} + +static void megasas_complete_command(MegasasCmd *cmd) +{ + cmd->iov_size = 0; + cmd->iov_offset = 0; + + cmd->req->hba_private = NULL; + scsi_req_unref(cmd->req); + cmd->req = NULL; + + megasas_unmap_frame(cmd->state, cmd); + megasas_complete_frame(cmd->state, cmd->context); +} + +static void megasas_reset_frames(MegasasState *s) +{ + int i; + MegasasCmd *cmd; + + for (i = 0; i < s->fw_cmds; i++) { + cmd = &s->frames[i]; + if (cmd->pa) { + megasas_unmap_frame(s, cmd); + } + } + bitmap_zero(s->frame_map, MEGASAS_MAX_FRAMES); +} + +static void megasas_abort_command(MegasasCmd *cmd) +{ + /* Never abort internal commands. */ + if (cmd->dcmd_opcode != -1) { + return; + } + if (cmd->req != NULL) { + scsi_req_cancel(cmd->req); + } +} + +static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd) +{ + PCIDevice *pcid = PCI_DEVICE(s); + uint32_t pa_hi, pa_lo; + hwaddr iq_pa, initq_size = sizeof(struct mfi_init_qinfo); + struct mfi_init_qinfo *initq = NULL; + uint32_t flags; + int ret = MFI_STAT_OK; + + if (s->reply_queue_pa) { + trace_megasas_initq_mapped(s->reply_queue_pa); + goto out; + } + pa_lo = le32_to_cpu(cmd->frame->init.qinfo_new_addr_lo); + pa_hi = le32_to_cpu(cmd->frame->init.qinfo_new_addr_hi); + iq_pa = (((uint64_t) pa_hi << 32) | pa_lo); + trace_megasas_init_firmware((uint64_t)iq_pa); + initq = pci_dma_map(pcid, iq_pa, &initq_size, 0); + if (!initq || initq_size != sizeof(*initq)) { + trace_megasas_initq_map_failed(cmd->index); + s->event_count++; + ret = MFI_STAT_MEMORY_NOT_AVAILABLE; + goto out; + } + s->reply_queue_len = le32_to_cpu(initq->rq_entries) & 0xFFFF; + if (s->reply_queue_len > s->fw_cmds) { + trace_megasas_initq_mismatch(s->reply_queue_len, s->fw_cmds); + s->event_count++; + ret = MFI_STAT_INVALID_PARAMETER; + goto out; + } + pa_lo = le32_to_cpu(initq->rq_addr_lo); + pa_hi = le32_to_cpu(initq->rq_addr_hi); + s->reply_queue_pa = ((uint64_t) pa_hi << 32) | pa_lo; + pa_lo = le32_to_cpu(initq->ci_addr_lo); + pa_hi = le32_to_cpu(initq->ci_addr_hi); + s->consumer_pa = ((uint64_t) pa_hi << 32) | pa_lo; + pa_lo = le32_to_cpu(initq->pi_addr_lo); + pa_hi = le32_to_cpu(initq->pi_addr_hi); + s->producer_pa = ((uint64_t) pa_hi << 32) | pa_lo; + s->reply_queue_head = ldl_le_pci_dma(pcid, s->producer_pa); + s->reply_queue_head %= MEGASAS_MAX_FRAMES; + s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa); + s->reply_queue_tail %= MEGASAS_MAX_FRAMES; + flags = le32_to_cpu(initq->flags); + if (flags & MFI_QUEUE_FLAG_CONTEXT64) { + s->flags |= MEGASAS_MASK_USE_QUEUE64; + } + trace_megasas_init_queue((unsigned long)s->reply_queue_pa, + s->reply_queue_len, s->reply_queue_head, + s->reply_queue_tail, flags); + megasas_reset_frames(s); + s->fw_state = MFI_FWSTATE_OPERATIONAL; +out: + if (initq) { + pci_dma_unmap(pcid, initq, initq_size, 0, 0); + } + return ret; +} + +static int megasas_map_dcmd(MegasasState *s, MegasasCmd *cmd) +{ + dma_addr_t iov_pa, iov_size; + int iov_count; + + cmd->flags = le16_to_cpu(cmd->frame->header.flags); + iov_count = cmd->frame->header.sge_count; + if (!iov_count) { + trace_megasas_dcmd_zero_sge(cmd->index); + cmd->iov_size = 0; + return 0; + } else if (iov_count > 1) { + trace_megasas_dcmd_invalid_sge(cmd->index, iov_count); + cmd->iov_size = 0; + return -EINVAL; + } + iov_pa = megasas_sgl_get_addr(cmd, &cmd->frame->dcmd.sgl); + iov_size = megasas_sgl_get_len(cmd, &cmd->frame->dcmd.sgl); + pci_dma_sglist_init(&cmd->qsg, PCI_DEVICE(s), 1); + qemu_sglist_add(&cmd->qsg, iov_pa, iov_size); + cmd->iov_size = iov_size; + return 0; +} + +static void megasas_finish_dcmd(MegasasCmd *cmd, uint32_t iov_size) +{ + trace_megasas_finish_dcmd(cmd->index, iov_size); + + if (iov_size > cmd->iov_size) { + if (megasas_frame_is_ieee_sgl(cmd)) { + cmd->frame->dcmd.sgl.sg_skinny->len = cpu_to_le32(iov_size); + } else if (megasas_frame_is_sgl64(cmd)) { + cmd->frame->dcmd.sgl.sg64->len = cpu_to_le32(iov_size); + } else { + cmd->frame->dcmd.sgl.sg32->len = cpu_to_le32(iov_size); + } + } +} + +static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd) +{ + PCIDevice *pci_dev = PCI_DEVICE(s); + PCIDeviceClass *pci_class = PCI_DEVICE_GET_CLASS(pci_dev); + MegasasBaseClass *base_class = MEGASAS_GET_CLASS(s); + struct mfi_ctrl_info info; + size_t dcmd_size = sizeof(info); + BusChild *kid; + int num_pd_disks = 0; + + memset(&info, 0x0, dcmd_size); + if (cmd->iov_size < dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + + info.pci.vendor = cpu_to_le16(pci_class->vendor_id); + info.pci.device = cpu_to_le16(pci_class->device_id); + info.pci.subvendor = cpu_to_le16(pci_class->subsystem_vendor_id); + info.pci.subdevice = cpu_to_le16(pci_class->subsystem_id); + + /* + * For some reason the firmware supports + * only up to 8 device ports. + * Despite supporting a far larger number + * of devices for the physical devices. + * So just display the first 8 devices + * in the device port list, independent + * of how many logical devices are actually + * present. + */ + info.host.type = MFI_INFO_HOST_PCIE; + info.device.type = MFI_INFO_DEV_SAS3G; + info.device.port_count = 8; + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *sdev = SCSI_DEVICE(kid->child); + uint16_t pd_id; + + if (num_pd_disks < 8) { + pd_id = ((sdev->id & 0xFF) << 8) | (sdev->lun & 0xFF); + info.device.port_addr[num_pd_disks] = + cpu_to_le64(megasas_get_sata_addr(pd_id)); + } + num_pd_disks++; + } + + memcpy(info.product_name, base_class->product_name, 24); + snprintf(info.serial_number, 32, "%s", s->hba_serial); + snprintf(info.package_version, 0x60, "%s-QEMU", qemu_hw_version()); + memcpy(info.image_component[0].name, "APP", 3); + snprintf(info.image_component[0].version, 10, "%s-QEMU", + base_class->product_version); + memcpy(info.image_component[0].build_date, "Apr 1 2014", 11); + memcpy(info.image_component[0].build_time, "12:34:56", 8); + info.image_component_count = 1; + if (pci_dev->has_rom) { + uint8_t biosver[32]; + uint8_t *ptr; + + ptr = memory_region_get_ram_ptr(&pci_dev->rom); + memcpy(biosver, ptr + 0x41, 31); + biosver[31] = 0; + memcpy(info.image_component[1].name, "BIOS", 4); + memcpy(info.image_component[1].version, biosver, + strlen((const char *)biosver)); + info.image_component_count++; + } + info.current_fw_time = cpu_to_le32(megasas_fw_time()); + info.max_arms = 32; + info.max_spans = 8; + info.max_arrays = MEGASAS_MAX_ARRAYS; + info.max_lds = MFI_MAX_LD; + info.max_cmds = cpu_to_le16(s->fw_cmds); + info.max_sg_elements = cpu_to_le16(s->fw_sge); + info.max_request_size = cpu_to_le32(MEGASAS_MAX_SECTORS); + if (!megasas_is_jbod(s)) + info.lds_present = cpu_to_le16(num_pd_disks); + info.pd_present = cpu_to_le16(num_pd_disks); + info.pd_disks_present = cpu_to_le16(num_pd_disks); + info.hw_present = cpu_to_le32(MFI_INFO_HW_NVRAM | + MFI_INFO_HW_MEM | + MFI_INFO_HW_FLASH); + info.memory_size = cpu_to_le16(512); + info.nvram_size = cpu_to_le16(32); + info.flash_size = cpu_to_le16(16); + info.raid_levels = cpu_to_le32(MFI_INFO_RAID_0); + info.adapter_ops = cpu_to_le32(MFI_INFO_AOPS_RBLD_RATE | + MFI_INFO_AOPS_SELF_DIAGNOSTIC | + MFI_INFO_AOPS_MIXED_ARRAY); + info.ld_ops = cpu_to_le32(MFI_INFO_LDOPS_DISK_CACHE_POLICY | + MFI_INFO_LDOPS_ACCESS_POLICY | + MFI_INFO_LDOPS_IO_POLICY | + MFI_INFO_LDOPS_WRITE_POLICY | + MFI_INFO_LDOPS_READ_POLICY); + info.max_strips_per_io = cpu_to_le16(s->fw_sge); + info.stripe_sz_ops.min = 3; + info.stripe_sz_ops.max = ctz32(MEGASAS_MAX_SECTORS + 1); + info.properties.pred_fail_poll_interval = cpu_to_le16(300); + info.properties.intr_throttle_cnt = cpu_to_le16(16); + info.properties.intr_throttle_timeout = cpu_to_le16(50); + info.properties.rebuild_rate = 30; + info.properties.patrol_read_rate = 30; + info.properties.bgi_rate = 30; + info.properties.cc_rate = 30; + info.properties.recon_rate = 30; + info.properties.cache_flush_interval = 4; + info.properties.spinup_drv_cnt = 2; + info.properties.spinup_delay = 6; + info.properties.ecc_bucket_size = 15; + info.properties.ecc_bucket_leak_rate = cpu_to_le16(1440); + info.properties.expose_encl_devices = 1; + info.properties.OnOffProperties = cpu_to_le32(MFI_CTRL_PROP_EnableJBOD); + info.pd_ops = cpu_to_le32(MFI_INFO_PDOPS_FORCE_ONLINE | + MFI_INFO_PDOPS_FORCE_OFFLINE); + info.pd_mix_support = cpu_to_le32(MFI_INFO_PDMIX_SAS | + MFI_INFO_PDMIX_SATA | + MFI_INFO_PDMIX_LD); + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_mfc_get_defaults(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_defaults info; + size_t dcmd_size = sizeof(struct mfi_defaults); + + memset(&info, 0x0, dcmd_size); + if (cmd->iov_size < dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + + info.sas_addr = cpu_to_le64(s->sas_addr); + info.stripe_size = 3; + info.flush_time = 4; + info.background_rate = 30; + info.allow_mix_in_enclosure = 1; + info.allow_mix_in_ld = 1; + info.direct_pd_mapping = 1; + /* Enable for BIOS support */ + info.bios_enumerate_lds = 1; + info.disable_ctrl_r = 1; + info.expose_enclosure_devices = 1; + info.disable_preboot_cli = 1; + info.cluster_disable = 1; + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_dcmd_get_bios_info(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_bios_data info; + size_t dcmd_size = sizeof(info); + + memset(&info, 0x0, dcmd_size); + if (cmd->iov_size < dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + info.continue_on_error = 1; + info.verbose = 1; + if (megasas_is_jbod(s)) { + info.expose_all_drives = 1; + } + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_dcmd_get_fw_time(MegasasState *s, MegasasCmd *cmd) +{ + uint64_t fw_time; + size_t dcmd_size = sizeof(fw_time); + + fw_time = cpu_to_le64(megasas_fw_time()); + + cmd->iov_size -= dma_buf_read((uint8_t *)&fw_time, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_dcmd_set_fw_time(MegasasState *s, MegasasCmd *cmd) +{ + uint64_t fw_time; + + /* This is a dummy; setting of firmware time is not allowed */ + memcpy(&fw_time, cmd->frame->dcmd.mbox, sizeof(fw_time)); + + trace_megasas_dcmd_set_fw_time(cmd->index, fw_time); + fw_time = cpu_to_le64(megasas_fw_time()); + return MFI_STAT_OK; +} + +static int megasas_event_info(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_evt_log_state info; + size_t dcmd_size = sizeof(info); + + memset(&info, 0, dcmd_size); + + info.newest_seq_num = cpu_to_le32(s->event_count); + info.shutdown_seq_num = cpu_to_le32(s->shutdown_event); + info.boot_seq_num = cpu_to_le32(s->boot_event); + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_event_wait(MegasasState *s, MegasasCmd *cmd) +{ + union mfi_evt event; + + if (cmd->iov_size < sizeof(struct mfi_evt_detail)) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + sizeof(struct mfi_evt_detail)); + return MFI_STAT_INVALID_PARAMETER; + } + s->event_count = cpu_to_le32(cmd->frame->dcmd.mbox[0]); + event.word = cpu_to_le32(cmd->frame->dcmd.mbox[4]); + s->event_locale = event.members.locale; + s->event_class = event.members.class; + s->event_cmd = cmd; + /* Decrease busy count; event frame doesn't count here */ + s->busy--; + cmd->iov_size = sizeof(struct mfi_evt_detail); + return MFI_STAT_INVALID_STATUS; +} + +static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_pd_list info; + size_t dcmd_size = sizeof(info); + BusChild *kid; + uint32_t offset, dcmd_limit, num_pd_disks = 0, max_pd_disks; + + memset(&info, 0, dcmd_size); + offset = 8; + dcmd_limit = offset + sizeof(struct mfi_pd_address); + if (cmd->iov_size < dcmd_limit) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_limit); + return MFI_STAT_INVALID_PARAMETER; + } + + max_pd_disks = (cmd->iov_size - offset) / sizeof(struct mfi_pd_address); + if (max_pd_disks > MFI_MAX_SYS_PDS) { + max_pd_disks = MFI_MAX_SYS_PDS; + } + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *sdev = SCSI_DEVICE(kid->child); + uint16_t pd_id; + + if (num_pd_disks >= max_pd_disks) + break; + + pd_id = ((sdev->id & 0xFF) << 8) | (sdev->lun & 0xFF); + info.addr[num_pd_disks].device_id = cpu_to_le16(pd_id); + info.addr[num_pd_disks].encl_device_id = 0xFFFF; + info.addr[num_pd_disks].encl_index = 0; + info.addr[num_pd_disks].slot_number = sdev->id & 0xFF; + info.addr[num_pd_disks].scsi_dev_type = sdev->type; + info.addr[num_pd_disks].connect_port_bitmap = 0x1; + info.addr[num_pd_disks].sas_addr[0] = + cpu_to_le64(megasas_get_sata_addr(pd_id)); + num_pd_disks++; + offset += sizeof(struct mfi_pd_address); + } + trace_megasas_dcmd_pd_get_list(cmd->index, num_pd_disks, + max_pd_disks, offset); + + info.size = cpu_to_le32(offset); + info.count = cpu_to_le32(num_pd_disks); + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, offset, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_dcmd_pd_list_query(MegasasState *s, MegasasCmd *cmd) +{ + uint16_t flags; + + /* mbox0 contains flags */ + flags = le16_to_cpu(cmd->frame->dcmd.mbox[0]); + trace_megasas_dcmd_pd_list_query(cmd->index, flags); + if (flags == MR_PD_QUERY_TYPE_ALL || + megasas_is_jbod(s)) { + return megasas_dcmd_pd_get_list(s, cmd); + } + + return MFI_STAT_OK; +} + +static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun, + MegasasCmd *cmd) +{ + struct mfi_pd_info *info = cmd->iov_buf; + size_t dcmd_size = sizeof(struct mfi_pd_info); + uint64_t pd_size; + uint16_t pd_id = ((sdev->id & 0xFF) << 8) | (lun & 0xFF); + uint8_t cmdbuf[6]; + size_t len, resid; + + if (!cmd->iov_buf) { + cmd->iov_buf = g_malloc0(dcmd_size); + info = cmd->iov_buf; + info->inquiry_data[0] = 0x7f; /* Force PQual 0x3, PType 0x1f */ + info->vpd_page83[0] = 0x7f; + megasas_setup_inquiry(cmdbuf, 0, sizeof(info->inquiry_data)); + cmd->req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd); + if (!cmd->req) { + trace_megasas_dcmd_req_alloc_failed(cmd->index, + "PD get info std inquiry"); + g_free(cmd->iov_buf); + cmd->iov_buf = NULL; + return MFI_STAT_FLASH_ALLOC_FAIL; + } + trace_megasas_dcmd_internal_submit(cmd->index, + "PD get info std inquiry", lun); + len = scsi_req_enqueue(cmd->req); + if (len > 0) { + cmd->iov_size = len; + scsi_req_continue(cmd->req); + } + return MFI_STAT_INVALID_STATUS; + } else if (info->inquiry_data[0] != 0x7f && info->vpd_page83[0] == 0x7f) { + megasas_setup_inquiry(cmdbuf, 0x83, sizeof(info->vpd_page83)); + cmd->req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd); + if (!cmd->req) { + trace_megasas_dcmd_req_alloc_failed(cmd->index, + "PD get info vpd inquiry"); + return MFI_STAT_FLASH_ALLOC_FAIL; + } + trace_megasas_dcmd_internal_submit(cmd->index, + "PD get info vpd inquiry", lun); + len = scsi_req_enqueue(cmd->req); + if (len > 0) { + cmd->iov_size = len; + scsi_req_continue(cmd->req); + } + return MFI_STAT_INVALID_STATUS; + } + /* Finished, set FW state */ + if ((info->inquiry_data[0] >> 5) == 0) { + if (megasas_is_jbod(cmd->state)) { + info->fw_state = cpu_to_le16(MFI_PD_STATE_SYSTEM); + } else { + info->fw_state = cpu_to_le16(MFI_PD_STATE_ONLINE); + } + } else { + info->fw_state = cpu_to_le16(MFI_PD_STATE_OFFLINE); + } + + info->ref.v.device_id = cpu_to_le16(pd_id); + info->state.ddf.pd_type = cpu_to_le16(MFI_PD_DDF_TYPE_IN_VD| + MFI_PD_DDF_TYPE_INTF_SAS); + blk_get_geometry(sdev->conf.blk, &pd_size); + info->raw_size = cpu_to_le64(pd_size); + info->non_coerced_size = cpu_to_le64(pd_size); + info->coerced_size = cpu_to_le64(pd_size); + info->encl_device_id = 0xFFFF; + info->slot_number = (sdev->id & 0xFF); + info->path_info.count = 1; + info->path_info.sas_addr[0] = + cpu_to_le64(megasas_get_sata_addr(pd_id)); + info->connected_port_bitmap = 0x1; + info->device_speed = 1; + info->link_speed = 1; + resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); + g_free(cmd->iov_buf); + cmd->iov_size = dcmd_size - resid; + cmd->iov_buf = NULL; + return MFI_STAT_OK; +} + +static int megasas_dcmd_pd_get_info(MegasasState *s, MegasasCmd *cmd) +{ + size_t dcmd_size = sizeof(struct mfi_pd_info); + uint16_t pd_id; + uint8_t target_id, lun_id; + SCSIDevice *sdev = NULL; + int retval = MFI_STAT_DEVICE_NOT_FOUND; + + if (cmd->iov_size < dcmd_size) { + return MFI_STAT_INVALID_PARAMETER; + } + + /* mbox0 has the ID */ + pd_id = le16_to_cpu(cmd->frame->dcmd.mbox[0]); + target_id = (pd_id >> 8) & 0xFF; + lun_id = pd_id & 0xFF; + sdev = scsi_device_find(&s->bus, 0, target_id, lun_id); + trace_megasas_dcmd_pd_get_info(cmd->index, pd_id); + + if (sdev) { + /* Submit inquiry */ + retval = megasas_pd_get_info_submit(sdev, pd_id, cmd); + } + + return retval; +} + +static int megasas_dcmd_ld_get_list(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_ld_list info; + size_t dcmd_size = sizeof(info), resid; + uint32_t num_ld_disks = 0, max_ld_disks; + uint64_t ld_size; + BusChild *kid; + + memset(&info, 0, dcmd_size); + if (cmd->iov_size > dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + + max_ld_disks = (cmd->iov_size - 8) / 16; + if (megasas_is_jbod(s)) { + max_ld_disks = 0; + } + if (max_ld_disks > MFI_MAX_LD) { + max_ld_disks = MFI_MAX_LD; + } + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *sdev = SCSI_DEVICE(kid->child); + + if (num_ld_disks >= max_ld_disks) { + break; + } + /* Logical device size is in blocks */ + blk_get_geometry(sdev->conf.blk, &ld_size); + info.ld_list[num_ld_disks].ld.v.target_id = sdev->id; + info.ld_list[num_ld_disks].state = MFI_LD_STATE_OPTIMAL; + info.ld_list[num_ld_disks].size = cpu_to_le64(ld_size); + num_ld_disks++; + } + info.ld_count = cpu_to_le32(num_ld_disks); + trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks); + + resid = dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + cmd->iov_size = dcmd_size - resid; + return MFI_STAT_OK; +} + +static int megasas_dcmd_ld_list_query(MegasasState *s, MegasasCmd *cmd) +{ + uint16_t flags; + struct mfi_ld_targetid_list info; + size_t dcmd_size = sizeof(info), resid; + uint32_t num_ld_disks = 0, max_ld_disks = s->fw_luns; + BusChild *kid; + + /* mbox0 contains flags */ + flags = le16_to_cpu(cmd->frame->dcmd.mbox[0]); + trace_megasas_dcmd_ld_list_query(cmd->index, flags); + if (flags != MR_LD_QUERY_TYPE_ALL && + flags != MR_LD_QUERY_TYPE_EXPOSED_TO_HOST) { + max_ld_disks = 0; + } + + memset(&info, 0, dcmd_size); + if (cmd->iov_size < 12) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + dcmd_size = sizeof(uint32_t) * 2 + 3; + max_ld_disks = cmd->iov_size - dcmd_size; + if (megasas_is_jbod(s)) { + max_ld_disks = 0; + } + if (max_ld_disks > MFI_MAX_LD) { + max_ld_disks = MFI_MAX_LD; + } + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *sdev = SCSI_DEVICE(kid->child); + + if (num_ld_disks >= max_ld_disks) { + break; + } + info.targetid[num_ld_disks] = sdev->lun; + num_ld_disks++; + dcmd_size++; + } + info.ld_count = cpu_to_le32(num_ld_disks); + info.size = dcmd_size; + trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks); + + resid = dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + cmd->iov_size = dcmd_size - resid; + return MFI_STAT_OK; +} + +static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun, + MegasasCmd *cmd) +{ + struct mfi_ld_info *info = cmd->iov_buf; + size_t dcmd_size = sizeof(struct mfi_ld_info); + uint8_t cdb[6]; + ssize_t len, resid; + uint16_t sdev_id = ((sdev->id & 0xFF) << 8) | (lun & 0xFF); + uint64_t ld_size; + + if (!cmd->iov_buf) { + cmd->iov_buf = g_malloc0(dcmd_size); + info = cmd->iov_buf; + megasas_setup_inquiry(cdb, 0x83, sizeof(info->vpd_page83)); + cmd->req = scsi_req_new(sdev, cmd->index, lun, cdb, cmd); + if (!cmd->req) { + trace_megasas_dcmd_req_alloc_failed(cmd->index, + "LD get info vpd inquiry"); + g_free(cmd->iov_buf); + cmd->iov_buf = NULL; + return MFI_STAT_FLASH_ALLOC_FAIL; + } + trace_megasas_dcmd_internal_submit(cmd->index, + "LD get info vpd inquiry", lun); + len = scsi_req_enqueue(cmd->req); + if (len > 0) { + cmd->iov_size = len; + scsi_req_continue(cmd->req); + } + return MFI_STAT_INVALID_STATUS; + } + + info->ld_config.params.state = MFI_LD_STATE_OPTIMAL; + info->ld_config.properties.ld.v.target_id = lun; + info->ld_config.params.stripe_size = 3; + info->ld_config.params.num_drives = 1; + info->ld_config.params.is_consistent = 1; + /* Logical device size is in blocks */ + blk_get_geometry(sdev->conf.blk, &ld_size); + info->size = cpu_to_le64(ld_size); + memset(info->ld_config.span, 0, sizeof(info->ld_config.span)); + info->ld_config.span[0].start_block = 0; + info->ld_config.span[0].num_blocks = info->size; + info->ld_config.span[0].array_ref = cpu_to_le16(sdev_id); + + resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); + g_free(cmd->iov_buf); + cmd->iov_size = dcmd_size - resid; + cmd->iov_buf = NULL; + return MFI_STAT_OK; +} + +static int megasas_dcmd_ld_get_info(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_ld_info info; + size_t dcmd_size = sizeof(info); + uint16_t ld_id; + uint32_t max_ld_disks = s->fw_luns; + SCSIDevice *sdev = NULL; + int retval = MFI_STAT_DEVICE_NOT_FOUND; + + if (cmd->iov_size < dcmd_size) { + return MFI_STAT_INVALID_PARAMETER; + } + + /* mbox0 has the ID */ + ld_id = le16_to_cpu(cmd->frame->dcmd.mbox[0]); + trace_megasas_dcmd_ld_get_info(cmd->index, ld_id); + + if (megasas_is_jbod(s)) { + return MFI_STAT_DEVICE_NOT_FOUND; + } + + if (ld_id < max_ld_disks) { + sdev = scsi_device_find(&s->bus, 0, ld_id, 0); + } + + if (sdev) { + retval = megasas_ld_get_info_submit(sdev, ld_id, cmd); + } + + return retval; +} + +static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd) +{ + uint8_t data[4096] = { 0 }; + struct mfi_config_data *info; + int num_pd_disks = 0, array_offset, ld_offset; + BusChild *kid; + + if (cmd->iov_size > 4096) { + return MFI_STAT_INVALID_PARAMETER; + } + + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + num_pd_disks++; + } + info = (struct mfi_config_data *)&data; + /* + * Array mapping: + * - One array per SCSI device + * - One logical drive per SCSI device + * spanning the entire device + */ + info->array_count = num_pd_disks; + info->array_size = sizeof(struct mfi_array) * num_pd_disks; + info->log_drv_count = num_pd_disks; + info->log_drv_size = sizeof(struct mfi_ld_config) * num_pd_disks; + info->spares_count = 0; + info->spares_size = sizeof(struct mfi_spare); + info->size = sizeof(struct mfi_config_data) + info->array_size + + info->log_drv_size; + if (info->size > 4096) { + return MFI_STAT_INVALID_PARAMETER; + } + + array_offset = sizeof(struct mfi_config_data); + ld_offset = array_offset + sizeof(struct mfi_array) * num_pd_disks; + + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *sdev = SCSI_DEVICE(kid->child); + uint16_t sdev_id = ((sdev->id & 0xFF) << 8) | (sdev->lun & 0xFF); + struct mfi_array *array; + struct mfi_ld_config *ld; + uint64_t pd_size; + int i; + + array = (struct mfi_array *)(data + array_offset); + blk_get_geometry(sdev->conf.blk, &pd_size); + array->size = cpu_to_le64(pd_size); + array->num_drives = 1; + array->array_ref = cpu_to_le16(sdev_id); + array->pd[0].ref.v.device_id = cpu_to_le16(sdev_id); + array->pd[0].ref.v.seq_num = 0; + array->pd[0].fw_state = MFI_PD_STATE_ONLINE; + array->pd[0].encl.pd = 0xFF; + array->pd[0].encl.slot = (sdev->id & 0xFF); + for (i = 1; i < MFI_MAX_ROW_SIZE; i++) { + array->pd[i].ref.v.device_id = 0xFFFF; + array->pd[i].ref.v.seq_num = 0; + array->pd[i].fw_state = MFI_PD_STATE_UNCONFIGURED_GOOD; + array->pd[i].encl.pd = 0xFF; + array->pd[i].encl.slot = 0xFF; + } + array_offset += sizeof(struct mfi_array); + ld = (struct mfi_ld_config *)(data + ld_offset); + memset(ld, 0, sizeof(struct mfi_ld_config)); + ld->properties.ld.v.target_id = sdev->id; + ld->properties.default_cache_policy = MR_LD_CACHE_READ_AHEAD | + MR_LD_CACHE_READ_ADAPTIVE; + ld->properties.current_cache_policy = MR_LD_CACHE_READ_AHEAD | + MR_LD_CACHE_READ_ADAPTIVE; + ld->params.state = MFI_LD_STATE_OPTIMAL; + ld->params.stripe_size = 3; + ld->params.num_drives = 1; + ld->params.span_depth = 1; + ld->params.is_consistent = 1; + ld->span[0].start_block = 0; + ld->span[0].num_blocks = cpu_to_le64(pd_size); + ld->span[0].array_ref = cpu_to_le16(sdev_id); + ld_offset += sizeof(struct mfi_ld_config); + } + + cmd->iov_size -= dma_buf_read((uint8_t *)data, info->size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_ctrl_props info; + size_t dcmd_size = sizeof(info); + + memset(&info, 0x0, dcmd_size); + if (cmd->iov_size < dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + info.pred_fail_poll_interval = cpu_to_le16(300); + info.intr_throttle_cnt = cpu_to_le16(16); + info.intr_throttle_timeout = cpu_to_le16(50); + info.rebuild_rate = 30; + info.patrol_read_rate = 30; + info.bgi_rate = 30; + info.cc_rate = 30; + info.recon_rate = 30; + info.cache_flush_interval = 4; + info.spinup_drv_cnt = 2; + info.spinup_delay = 6; + info.ecc_bucket_size = 15; + info.ecc_bucket_leak_rate = cpu_to_le16(1440); + info.expose_encl_devices = 1; + + cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + return MFI_STAT_OK; +} + +static int megasas_cache_flush(MegasasState *s, MegasasCmd *cmd) +{ + blk_drain_all(); + return MFI_STAT_OK; +} + +static int megasas_ctrl_shutdown(MegasasState *s, MegasasCmd *cmd) +{ + s->fw_state = MFI_FWSTATE_READY; + return MFI_STAT_OK; +} + +/* Some implementations use CLUSTER RESET LD to simulate a device reset */ +static int megasas_cluster_reset_ld(MegasasState *s, MegasasCmd *cmd) +{ + uint16_t target_id; + int i; + + /* mbox0 contains the device index */ + target_id = le16_to_cpu(cmd->frame->dcmd.mbox[0]); + trace_megasas_dcmd_reset_ld(cmd->index, target_id); + for (i = 0; i < s->fw_cmds; i++) { + MegasasCmd *tmp_cmd = &s->frames[i]; + if (tmp_cmd->req && tmp_cmd->req->dev->id == target_id) { + SCSIDevice *d = tmp_cmd->req->dev; + qdev_reset_all(&d->qdev); + } + } + return MFI_STAT_OK; +} + +static int megasas_dcmd_set_properties(MegasasState *s, MegasasCmd *cmd) +{ + struct mfi_ctrl_props info; + size_t dcmd_size = sizeof(info); + + if (cmd->iov_size < dcmd_size) { + trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, + dcmd_size); + return MFI_STAT_INVALID_PARAMETER; + } + dma_buf_write((uint8_t *)&info, dcmd_size, &cmd->qsg); + trace_megasas_dcmd_unsupported(cmd->index, cmd->iov_size); + return MFI_STAT_OK; +} + +static int megasas_dcmd_dummy(MegasasState *s, MegasasCmd *cmd) +{ + trace_megasas_dcmd_dummy(cmd->index, cmd->iov_size); + return MFI_STAT_OK; +} + +static const struct dcmd_cmd_tbl_t { + int opcode; + const char *desc; + int (*func)(MegasasState *s, MegasasCmd *cmd); +} dcmd_cmd_tbl[] = { + { MFI_DCMD_CTRL_MFI_HOST_MEM_ALLOC, "CTRL_HOST_MEM_ALLOC", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_GET_INFO, "CTRL_GET_INFO", + megasas_ctrl_get_info }, + { MFI_DCMD_CTRL_GET_PROPERTIES, "CTRL_GET_PROPERTIES", + megasas_dcmd_get_properties }, + { MFI_DCMD_CTRL_SET_PROPERTIES, "CTRL_SET_PROPERTIES", + megasas_dcmd_set_properties }, + { MFI_DCMD_CTRL_ALARM_GET, "CTRL_ALARM_GET", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_ALARM_ENABLE, "CTRL_ALARM_ENABLE", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_ALARM_DISABLE, "CTRL_ALARM_DISABLE", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_ALARM_SILENCE, "CTRL_ALARM_SILENCE", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_ALARM_TEST, "CTRL_ALARM_TEST", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_EVENT_GETINFO, "CTRL_EVENT_GETINFO", + megasas_event_info }, + { MFI_DCMD_CTRL_EVENT_GET, "CTRL_EVENT_GET", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_EVENT_WAIT, "CTRL_EVENT_WAIT", + megasas_event_wait }, + { MFI_DCMD_CTRL_SHUTDOWN, "CTRL_SHUTDOWN", + megasas_ctrl_shutdown }, + { MFI_DCMD_HIBERNATE_STANDBY, "CTRL_STANDBY", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_GET_TIME, "CTRL_GET_TIME", + megasas_dcmd_get_fw_time }, + { MFI_DCMD_CTRL_SET_TIME, "CTRL_SET_TIME", + megasas_dcmd_set_fw_time }, + { MFI_DCMD_CTRL_BIOS_DATA_GET, "CTRL_BIOS_DATA_GET", + megasas_dcmd_get_bios_info }, + { MFI_DCMD_CTRL_FACTORY_DEFAULTS, "CTRL_FACTORY_DEFAULTS", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_MFC_DEFAULTS_GET, "CTRL_MFC_DEFAULTS_GET", + megasas_mfc_get_defaults }, + { MFI_DCMD_CTRL_MFC_DEFAULTS_SET, "CTRL_MFC_DEFAULTS_SET", + megasas_dcmd_dummy }, + { MFI_DCMD_CTRL_CACHE_FLUSH, "CTRL_CACHE_FLUSH", + megasas_cache_flush }, + { MFI_DCMD_PD_GET_LIST, "PD_GET_LIST", + megasas_dcmd_pd_get_list }, + { MFI_DCMD_PD_LIST_QUERY, "PD_LIST_QUERY", + megasas_dcmd_pd_list_query }, + { MFI_DCMD_PD_GET_INFO, "PD_GET_INFO", + megasas_dcmd_pd_get_info }, + { MFI_DCMD_PD_STATE_SET, "PD_STATE_SET", + megasas_dcmd_dummy }, + { MFI_DCMD_PD_REBUILD, "PD_REBUILD", + megasas_dcmd_dummy }, + { MFI_DCMD_PD_BLINK, "PD_BLINK", + megasas_dcmd_dummy }, + { MFI_DCMD_PD_UNBLINK, "PD_UNBLINK", + megasas_dcmd_dummy }, + { MFI_DCMD_LD_GET_LIST, "LD_GET_LIST", + megasas_dcmd_ld_get_list}, + { MFI_DCMD_LD_LIST_QUERY, "LD_LIST_QUERY", + megasas_dcmd_ld_list_query }, + { MFI_DCMD_LD_GET_INFO, "LD_GET_INFO", + megasas_dcmd_ld_get_info }, + { MFI_DCMD_LD_GET_PROP, "LD_GET_PROP", + megasas_dcmd_dummy }, + { MFI_DCMD_LD_SET_PROP, "LD_SET_PROP", + megasas_dcmd_dummy }, + { MFI_DCMD_LD_DELETE, "LD_DELETE", + megasas_dcmd_dummy }, + { MFI_DCMD_CFG_READ, "CFG_READ", + megasas_dcmd_cfg_read }, + { MFI_DCMD_CFG_ADD, "CFG_ADD", + megasas_dcmd_dummy }, + { MFI_DCMD_CFG_CLEAR, "CFG_CLEAR", + megasas_dcmd_dummy }, + { MFI_DCMD_CFG_FOREIGN_READ, "CFG_FOREIGN_READ", + megasas_dcmd_dummy }, + { MFI_DCMD_CFG_FOREIGN_IMPORT, "CFG_FOREIGN_IMPORT", + megasas_dcmd_dummy }, + { MFI_DCMD_BBU_STATUS, "BBU_STATUS", + megasas_dcmd_dummy }, + { MFI_DCMD_BBU_CAPACITY_INFO, "BBU_CAPACITY_INFO", + megasas_dcmd_dummy }, + { MFI_DCMD_BBU_DESIGN_INFO, "BBU_DESIGN_INFO", + megasas_dcmd_dummy }, + { MFI_DCMD_BBU_PROP_GET, "BBU_PROP_GET", + megasas_dcmd_dummy }, + { MFI_DCMD_CLUSTER, "CLUSTER", + megasas_dcmd_dummy }, + { MFI_DCMD_CLUSTER_RESET_ALL, "CLUSTER_RESET_ALL", + megasas_dcmd_dummy }, + { MFI_DCMD_CLUSTER_RESET_LD, "CLUSTER_RESET_LD", + megasas_cluster_reset_ld }, + { -1, NULL, NULL } +}; + +static int megasas_handle_dcmd(MegasasState *s, MegasasCmd *cmd) +{ + int retval = 0; + size_t len; + const struct dcmd_cmd_tbl_t *cmdptr = dcmd_cmd_tbl; + + cmd->dcmd_opcode = le32_to_cpu(cmd->frame->dcmd.opcode); + trace_megasas_handle_dcmd(cmd->index, cmd->dcmd_opcode); + if (megasas_map_dcmd(s, cmd) < 0) { + return MFI_STAT_MEMORY_NOT_AVAILABLE; + } + while (cmdptr->opcode != -1 && cmdptr->opcode != cmd->dcmd_opcode) { + cmdptr++; + } + len = cmd->iov_size; + if (cmdptr->opcode == -1) { + trace_megasas_dcmd_unhandled(cmd->index, cmd->dcmd_opcode, len); + retval = megasas_dcmd_dummy(s, cmd); + } else { + trace_megasas_dcmd_enter(cmd->index, cmdptr->desc, len); + retval = cmdptr->func(s, cmd); + } + if (retval != MFI_STAT_INVALID_STATUS) { + megasas_finish_dcmd(cmd, len); + } + return retval; +} + +static int megasas_finish_internal_dcmd(MegasasCmd *cmd, + SCSIRequest *req, size_t resid) +{ + int retval = MFI_STAT_OK; + int lun = req->lun; + + trace_megasas_dcmd_internal_finish(cmd->index, cmd->dcmd_opcode, lun); + cmd->iov_size -= resid; + switch (cmd->dcmd_opcode) { + case MFI_DCMD_PD_GET_INFO: + retval = megasas_pd_get_info_submit(req->dev, lun, cmd); + break; + case MFI_DCMD_LD_GET_INFO: + retval = megasas_ld_get_info_submit(req->dev, lun, cmd); + break; + default: + trace_megasas_dcmd_internal_invalid(cmd->index, cmd->dcmd_opcode); + retval = MFI_STAT_INVALID_DCMD; + break; + } + if (retval != MFI_STAT_INVALID_STATUS) { + megasas_finish_dcmd(cmd, cmd->iov_size); + } + return retval; +} + +static int megasas_enqueue_req(MegasasCmd *cmd, bool is_write) +{ + int len; + + len = scsi_req_enqueue(cmd->req); + if (len < 0) { + len = -len; + } + if (len > 0) { + if (len > cmd->iov_size) { + if (is_write) { + trace_megasas_iov_write_overflow(cmd->index, len, + cmd->iov_size); + } else { + trace_megasas_iov_read_overflow(cmd->index, len, + cmd->iov_size); + } + } + if (len < cmd->iov_size) { + if (is_write) { + trace_megasas_iov_write_underflow(cmd->index, len, + cmd->iov_size); + } else { + trace_megasas_iov_read_underflow(cmd->index, len, + cmd->iov_size); + } + cmd->iov_size = len; + } + scsi_req_continue(cmd->req); + } + return len; +} + +static int megasas_handle_scsi(MegasasState *s, MegasasCmd *cmd, + int frame_cmd) +{ + uint8_t *cdb; + int target_id, lun_id, cdb_len; + bool is_write; + struct SCSIDevice *sdev = NULL; + bool is_logical = (frame_cmd == MFI_CMD_LD_SCSI_IO); + + cdb = cmd->frame->pass.cdb; + target_id = cmd->frame->header.target_id; + lun_id = cmd->frame->header.lun_id; + cdb_len = cmd->frame->header.cdb_len; + + if (is_logical) { + if (target_id >= MFI_MAX_LD || lun_id != 0) { + trace_megasas_scsi_target_not_present( + mfi_frame_desc(frame_cmd), is_logical, target_id, lun_id); + return MFI_STAT_DEVICE_NOT_FOUND; + } + } + sdev = scsi_device_find(&s->bus, 0, target_id, lun_id); + + cmd->iov_size = le32_to_cpu(cmd->frame->header.data_len); + trace_megasas_handle_scsi(mfi_frame_desc(frame_cmd), is_logical, + target_id, lun_id, sdev, cmd->iov_size); + + if (!sdev || (megasas_is_jbod(s) && is_logical)) { + trace_megasas_scsi_target_not_present( + mfi_frame_desc(frame_cmd), is_logical, target_id, lun_id); + return MFI_STAT_DEVICE_NOT_FOUND; + } + + if (cdb_len > 16) { + trace_megasas_scsi_invalid_cdb_len( + mfi_frame_desc(frame_cmd), is_logical, + target_id, lun_id, cdb_len); + megasas_write_sense(cmd, SENSE_CODE(INVALID_OPCODE)); + cmd->frame->header.scsi_status = CHECK_CONDITION; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + + if (megasas_map_sgl(s, cmd, &cmd->frame->pass.sgl)) { + megasas_write_sense(cmd, SENSE_CODE(TARGET_FAILURE)); + cmd->frame->header.scsi_status = CHECK_CONDITION; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + + cmd->req = scsi_req_new(sdev, cmd->index, lun_id, cdb, cmd); + if (!cmd->req) { + trace_megasas_scsi_req_alloc_failed( + mfi_frame_desc(frame_cmd), target_id, lun_id); + megasas_write_sense(cmd, SENSE_CODE(NO_SENSE)); + cmd->frame->header.scsi_status = BUSY; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + + is_write = (cmd->req->cmd.mode == SCSI_XFER_TO_DEV); + if (cmd->iov_size) { + if (is_write) { + trace_megasas_scsi_write_start(cmd->index, cmd->iov_size); + } else { + trace_megasas_scsi_read_start(cmd->index, cmd->iov_size); + } + } else { + trace_megasas_scsi_nodata(cmd->index); + } + megasas_enqueue_req(cmd, is_write); + return MFI_STAT_INVALID_STATUS; +} + +static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd) +{ + uint32_t lba_count, lba_start_hi, lba_start_lo; + uint64_t lba_start; + bool is_write = (frame_cmd == MFI_CMD_LD_WRITE); + uint8_t cdb[16]; + int len; + struct SCSIDevice *sdev = NULL; + int target_id, lun_id, cdb_len; + + lba_count = le32_to_cpu(cmd->frame->io.header.data_len); + lba_start_lo = le32_to_cpu(cmd->frame->io.lba_lo); + lba_start_hi = le32_to_cpu(cmd->frame->io.lba_hi); + lba_start = ((uint64_t)lba_start_hi << 32) | lba_start_lo; + + target_id = cmd->frame->header.target_id; + lun_id = cmd->frame->header.lun_id; + cdb_len = cmd->frame->header.cdb_len; + + if (target_id < MFI_MAX_LD && lun_id == 0) { + sdev = scsi_device_find(&s->bus, 0, target_id, lun_id); + } + + trace_megasas_handle_io(cmd->index, + mfi_frame_desc(frame_cmd), target_id, lun_id, + (unsigned long)lba_start, (unsigned long)lba_count); + if (!sdev) { + trace_megasas_io_target_not_present(cmd->index, + mfi_frame_desc(frame_cmd), target_id, lun_id); + return MFI_STAT_DEVICE_NOT_FOUND; + } + + if (cdb_len > 16) { + trace_megasas_scsi_invalid_cdb_len( + mfi_frame_desc(frame_cmd), 1, target_id, lun_id, cdb_len); + megasas_write_sense(cmd, SENSE_CODE(INVALID_OPCODE)); + cmd->frame->header.scsi_status = CHECK_CONDITION; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + + cmd->iov_size = lba_count * sdev->blocksize; + if (megasas_map_sgl(s, cmd, &cmd->frame->io.sgl)) { + megasas_write_sense(cmd, SENSE_CODE(TARGET_FAILURE)); + cmd->frame->header.scsi_status = CHECK_CONDITION; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + + megasas_encode_lba(cdb, lba_start, lba_count, is_write); + cmd->req = scsi_req_new(sdev, cmd->index, + lun_id, cdb, cmd); + if (!cmd->req) { + trace_megasas_scsi_req_alloc_failed( + mfi_frame_desc(frame_cmd), target_id, lun_id); + megasas_write_sense(cmd, SENSE_CODE(NO_SENSE)); + cmd->frame->header.scsi_status = BUSY; + s->event_count++; + return MFI_STAT_SCSI_DONE_WITH_ERROR; + } + len = megasas_enqueue_req(cmd, is_write); + if (len > 0) { + if (is_write) { + trace_megasas_io_write_start(cmd->index, lba_start, lba_count, len); + } else { + trace_megasas_io_read_start(cmd->index, lba_start, lba_count, len); + } + } + return MFI_STAT_INVALID_STATUS; +} + +static QEMUSGList *megasas_get_sg_list(SCSIRequest *req) +{ + MegasasCmd *cmd = req->hba_private; + + if (cmd->dcmd_opcode != -1) { + return NULL; + } else { + return &cmd->qsg; + } +} + +static void megasas_xfer_complete(SCSIRequest *req, uint32_t len) +{ + MegasasCmd *cmd = req->hba_private; + uint8_t *buf; + + trace_megasas_io_complete(cmd->index, len); + + if (cmd->dcmd_opcode != -1) { + scsi_req_continue(req); + return; + } + + buf = scsi_req_get_buf(req); + if (cmd->dcmd_opcode == MFI_DCMD_PD_GET_INFO && cmd->iov_buf) { + struct mfi_pd_info *info = cmd->iov_buf; + + if (info->inquiry_data[0] == 0x7f) { + memset(info->inquiry_data, 0, sizeof(info->inquiry_data)); + memcpy(info->inquiry_data, buf, len); + } else if (info->vpd_page83[0] == 0x7f) { + memset(info->vpd_page83, 0, sizeof(info->vpd_page83)); + memcpy(info->vpd_page83, buf, len); + } + scsi_req_continue(req); + } else if (cmd->dcmd_opcode == MFI_DCMD_LD_GET_INFO) { + struct mfi_ld_info *info = cmd->iov_buf; + + if (cmd->iov_buf) { + memcpy(info->vpd_page83, buf, sizeof(info->vpd_page83)); + scsi_req_continue(req); + } + } +} + +static void megasas_command_complete(SCSIRequest *req, size_t resid) +{ + MegasasCmd *cmd = req->hba_private; + uint8_t cmd_status = MFI_STAT_OK; + + trace_megasas_command_complete(cmd->index, req->status, resid); + + if (req->io_canceled) { + return; + } + + if (cmd->dcmd_opcode != -1) { + /* + * Internal command complete + */ + cmd_status = megasas_finish_internal_dcmd(cmd, req, resid); + if (cmd_status == MFI_STAT_INVALID_STATUS) { + return; + } + } else { + trace_megasas_scsi_complete(cmd->index, req->status, + cmd->iov_size, req->cmd.xfer); + if (req->status != GOOD) { + cmd_status = MFI_STAT_SCSI_DONE_WITH_ERROR; + } + if (req->status == CHECK_CONDITION) { + megasas_copy_sense(cmd); + } + + cmd->frame->header.scsi_status = req->status; + } + cmd->frame->header.cmd_status = cmd_status; + megasas_complete_command(cmd); +} + +static void megasas_command_cancelled(SCSIRequest *req) +{ + MegasasCmd *cmd = req->hba_private; + + if (!cmd) { + return; + } + cmd->frame->header.cmd_status = MFI_STAT_SCSI_IO_FAILED; + megasas_complete_command(cmd); +} + +static int megasas_handle_abort(MegasasState *s, MegasasCmd *cmd) +{ + uint64_t abort_ctx = le64_to_cpu(cmd->frame->abort.abort_context); + hwaddr abort_addr, addr_hi, addr_lo; + MegasasCmd *abort_cmd; + + addr_hi = le32_to_cpu(cmd->frame->abort.abort_mfi_addr_hi); + addr_lo = le32_to_cpu(cmd->frame->abort.abort_mfi_addr_lo); + abort_addr = ((uint64_t)addr_hi << 32) | addr_lo; + + abort_cmd = megasas_lookup_frame(s, abort_addr); + if (!abort_cmd) { + trace_megasas_abort_no_cmd(cmd->index, abort_ctx); + s->event_count++; + return MFI_STAT_OK; + } + if (!megasas_use_queue64(s)) { + abort_ctx &= (uint64_t)0xFFFFFFFF; + } + if (abort_cmd->context != abort_ctx) { + trace_megasas_abort_invalid_context(cmd->index, abort_cmd->context, + abort_cmd->index); + s->event_count++; + return MFI_STAT_ABORT_NOT_POSSIBLE; + } + trace_megasas_abort_frame(cmd->index, abort_cmd->index); + megasas_abort_command(abort_cmd); + if (!s->event_cmd || abort_cmd != s->event_cmd) { + s->event_cmd = NULL; + } + s->event_count++; + return MFI_STAT_OK; +} + +static void megasas_handle_frame(MegasasState *s, uint64_t frame_addr, + uint32_t frame_count) +{ + uint8_t frame_status = MFI_STAT_INVALID_CMD; + uint64_t frame_context; + int frame_cmd; + MegasasCmd *cmd; + + /* + * Always read 64bit context, top bits will be + * masked out if required in megasas_enqueue_frame() + */ + frame_context = megasas_frame_get_context(s, frame_addr); + + cmd = megasas_enqueue_frame(s, frame_addr, frame_context, frame_count); + if (!cmd) { + /* reply queue full */ + trace_megasas_frame_busy(frame_addr); + megasas_frame_set_scsi_status(s, frame_addr, BUSY); + megasas_frame_set_cmd_status(s, frame_addr, MFI_STAT_SCSI_DONE_WITH_ERROR); + megasas_complete_frame(s, frame_context); + s->event_count++; + return; + } + frame_cmd = cmd->frame->header.frame_cmd; + switch (frame_cmd) { + case MFI_CMD_INIT: + frame_status = megasas_init_firmware(s, cmd); + break; + case MFI_CMD_DCMD: + frame_status = megasas_handle_dcmd(s, cmd); + break; + case MFI_CMD_ABORT: + frame_status = megasas_handle_abort(s, cmd); + break; + case MFI_CMD_PD_SCSI_IO: + case MFI_CMD_LD_SCSI_IO: + frame_status = megasas_handle_scsi(s, cmd, frame_cmd); + break; + case MFI_CMD_LD_READ: + case MFI_CMD_LD_WRITE: + frame_status = megasas_handle_io(s, cmd, frame_cmd); + break; + default: + trace_megasas_unhandled_frame_cmd(cmd->index, frame_cmd); + s->event_count++; + break; + } + if (frame_status != MFI_STAT_INVALID_STATUS) { + if (cmd->frame) { + cmd->frame->header.cmd_status = frame_status; + } else { + megasas_frame_set_cmd_status(s, frame_addr, frame_status); + } + megasas_unmap_frame(s, cmd); + megasas_complete_frame(s, cmd->context); + } +} + +static uint64_t megasas_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + MegasasState *s = opaque; + PCIDevice *pci_dev = PCI_DEVICE(s); + MegasasBaseClass *base_class = MEGASAS_GET_CLASS(s); + uint32_t retval = 0; + + switch (addr) { + case MFI_IDB: + retval = 0; + trace_megasas_mmio_readl("MFI_IDB", retval); + break; + case MFI_OMSG0: + case MFI_OSP0: + retval = (msix_present(pci_dev) ? MFI_FWSTATE_MSIX_SUPPORTED : 0) | + (s->fw_state & MFI_FWSTATE_MASK) | + ((s->fw_sge & 0xff) << 16) | + (s->fw_cmds & 0xFFFF); + trace_megasas_mmio_readl(addr == MFI_OMSG0 ? "MFI_OMSG0" : "MFI_OSP0", + retval); + break; + case MFI_OSTS: + if (megasas_intr_enabled(s) && s->doorbell) { + retval = base_class->osts; + } + trace_megasas_mmio_readl("MFI_OSTS", retval); + break; + case MFI_OMSK: + retval = s->intr_mask; + trace_megasas_mmio_readl("MFI_OMSK", retval); + break; + case MFI_ODCR0: + retval = s->doorbell ? 1 : 0; + trace_megasas_mmio_readl("MFI_ODCR0", retval); + break; + case MFI_DIAG: + retval = s->diag; + trace_megasas_mmio_readl("MFI_DIAG", retval); + break; + case MFI_OSP1: + retval = 15; + trace_megasas_mmio_readl("MFI_OSP1", retval); + break; + default: + trace_megasas_mmio_invalid_readl(addr); + break; + } + return retval; +} + +static int adp_reset_seq[] = {0x00, 0x04, 0x0b, 0x02, 0x07, 0x0d}; + +static void megasas_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MegasasState *s = opaque; + PCIDevice *pci_dev = PCI_DEVICE(s); + uint64_t frame_addr; + uint32_t frame_count; + int i; + + switch (addr) { + case MFI_IDB: + trace_megasas_mmio_writel("MFI_IDB", val); + if (val & MFI_FWINIT_ABORT) { + /* Abort all pending cmds */ + for (i = 0; i < s->fw_cmds; i++) { + megasas_abort_command(&s->frames[i]); + } + } + if (val & MFI_FWINIT_READY) { + /* move to FW READY */ + megasas_soft_reset(s); + } + if (val & MFI_FWINIT_MFIMODE) { + /* discard MFIs */ + } + if (val & MFI_FWINIT_STOP_ADP) { + /* Terminal error, stop processing */ + s->fw_state = MFI_FWSTATE_FAULT; + } + break; + case MFI_OMSK: + trace_megasas_mmio_writel("MFI_OMSK", val); + s->intr_mask = val; + if (!megasas_intr_enabled(s) && + !msi_enabled(pci_dev) && + !msix_enabled(pci_dev)) { + trace_megasas_irq_lower(); + pci_irq_deassert(pci_dev); + } + if (megasas_intr_enabled(s)) { + if (msix_enabled(pci_dev)) { + trace_megasas_msix_enabled(0); + } else if (msi_enabled(pci_dev)) { + trace_megasas_msi_enabled(0); + } else { + trace_megasas_intr_enabled(); + } + } else { + trace_megasas_intr_disabled(); + megasas_soft_reset(s); + } + break; + case MFI_ODCR0: + trace_megasas_mmio_writel("MFI_ODCR0", val); + s->doorbell = 0; + if (megasas_intr_enabled(s)) { + if (!msix_enabled(pci_dev) && !msi_enabled(pci_dev)) { + trace_megasas_irq_lower(); + pci_irq_deassert(pci_dev); + } + } + break; + case MFI_IQPH: + trace_megasas_mmio_writel("MFI_IQPH", val); + /* Received high 32 bits of a 64 bit MFI frame address */ + s->frame_hi = val; + break; + case MFI_IQPL: + trace_megasas_mmio_writel("MFI_IQPL", val); + /* Received low 32 bits of a 64 bit MFI frame address */ + /* Fallthrough */ + case MFI_IQP: + if (addr == MFI_IQP) { + trace_megasas_mmio_writel("MFI_IQP", val); + /* Received 64 bit MFI frame address */ + s->frame_hi = 0; + } + frame_addr = (val & ~0x1F); + /* Add possible 64 bit offset */ + frame_addr |= ((uint64_t)s->frame_hi << 32); + s->frame_hi = 0; + frame_count = (val >> 1) & 0xF; + megasas_handle_frame(s, frame_addr, frame_count); + break; + case MFI_SEQ: + trace_megasas_mmio_writel("MFI_SEQ", val); + /* Magic sequence to start ADP reset */ + if (adp_reset_seq[s->adp_reset++] == val) { + if (s->adp_reset == 6) { + s->adp_reset = 0; + s->diag = MFI_DIAG_WRITE_ENABLE; + } + } else { + s->adp_reset = 0; + s->diag = 0; + } + break; + case MFI_DIAG: + trace_megasas_mmio_writel("MFI_DIAG", val); + /* ADP reset */ + if ((s->diag & MFI_DIAG_WRITE_ENABLE) && + (val & MFI_DIAG_RESET_ADP)) { + s->diag |= MFI_DIAG_RESET_ADP; + megasas_soft_reset(s); + s->adp_reset = 0; + s->diag = 0; + } + break; + default: + trace_megasas_mmio_invalid_writel(addr, val); + break; + } +} + +static const MemoryRegionOps megasas_mmio_ops = { + .read = megasas_mmio_read, + .write = megasas_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + } +}; + +static uint64_t megasas_port_read(void *opaque, hwaddr addr, + unsigned size) +{ + return megasas_mmio_read(opaque, addr & 0xff, size); +} + +static void megasas_port_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + megasas_mmio_write(opaque, addr & 0xff, val, size); +} + +static const MemoryRegionOps megasas_port_ops = { + .read = megasas_port_read, + .write = megasas_port_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static uint64_t megasas_queue_read(void *opaque, hwaddr addr, + unsigned size) +{ + return 0; +} + +static void megasas_queue_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + return; +} + +static const MemoryRegionOps megasas_queue_ops = { + .read = megasas_queue_read, + .write = megasas_queue_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + } +}; + +static void megasas_soft_reset(MegasasState *s) +{ + int i; + MegasasCmd *cmd; + + trace_megasas_reset(s->fw_state); + for (i = 0; i < s->fw_cmds; i++) { + cmd = &s->frames[i]; + megasas_abort_command(cmd); + } + if (s->fw_state == MFI_FWSTATE_READY) { + BusChild *kid; + + /* + * The EFI firmware doesn't handle UA, + * so we need to clear the Power On/Reset UA + * after the initial reset. + */ + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *sdev = SCSI_DEVICE(kid->child); + + sdev->unit_attention = SENSE_CODE(NO_SENSE); + scsi_device_unit_attention_reported(sdev); + } + } + megasas_reset_frames(s); + s->reply_queue_len = s->fw_cmds; + s->reply_queue_pa = 0; + s->consumer_pa = 0; + s->producer_pa = 0; + s->fw_state = MFI_FWSTATE_READY; + s->doorbell = 0; + s->intr_mask = MEGASAS_INTR_DISABLED_MASK; + s->frame_hi = 0; + s->flags &= ~MEGASAS_MASK_USE_QUEUE64; + s->event_count++; + s->boot_event = s->event_count; +} + +static void megasas_scsi_reset(DeviceState *dev) +{ + MegasasState *s = MEGASAS(dev); + + megasas_soft_reset(s); +} + +static const VMStateDescription vmstate_megasas_gen1 = { + .name = "megasas", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, MegasasState), + VMSTATE_MSIX(parent_obj, MegasasState), + + VMSTATE_UINT32(fw_state, MegasasState), + VMSTATE_UINT32(intr_mask, MegasasState), + VMSTATE_UINT32(doorbell, MegasasState), + VMSTATE_UINT64(reply_queue_pa, MegasasState), + VMSTATE_UINT64(consumer_pa, MegasasState), + VMSTATE_UINT64(producer_pa, MegasasState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_megasas_gen2 = { + .name = "megasas-gen2", + .version_id = 0, + .minimum_version_id = 0, + .minimum_version_id_old = 0, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, MegasasState), + VMSTATE_MSIX(parent_obj, MegasasState), + + VMSTATE_UINT32(fw_state, MegasasState), + VMSTATE_UINT32(intr_mask, MegasasState), + VMSTATE_UINT32(doorbell, MegasasState), + VMSTATE_UINT64(reply_queue_pa, MegasasState), + VMSTATE_UINT64(consumer_pa, MegasasState), + VMSTATE_UINT64(producer_pa, MegasasState), + VMSTATE_END_OF_LIST() + } +}; + +static void megasas_scsi_uninit(PCIDevice *d) +{ + MegasasState *s = MEGASAS(d); + + if (megasas_use_msix(s)) { + msix_uninit(d, &s->mmio_io, &s->mmio_io); + } + msi_uninit(d); +} + +static const struct SCSIBusInfo megasas_scsi_info = { + .tcq = true, + .max_target = MFI_MAX_LD, + .max_lun = 255, + + .transfer_data = megasas_xfer_complete, + .get_sg_list = megasas_get_sg_list, + .complete = megasas_command_complete, + .cancel = megasas_command_cancelled, +}; + +static void megasas_scsi_realize(PCIDevice *dev, Error **errp) +{ + MegasasState *s = MEGASAS(dev); + MegasasBaseClass *b = MEGASAS_GET_CLASS(s); + uint8_t *pci_conf; + int i, bar_type; + Error *err = NULL; + int ret; + + pci_conf = dev->config; + + /* PCI latency timer = 0 */ + pci_conf[PCI_LATENCY_TIMER] = 0; + /* Interrupt pin 1 */ + pci_conf[PCI_INTERRUPT_PIN] = 0x01; + + if (s->msi != ON_OFF_AUTO_OFF) { + ret = msi_init(dev, 0x50, 1, true, false, &err); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error */ + assert(!ret || ret == -ENOTSUP); + if (ret && s->msi == ON_OFF_AUTO_ON) { + /* Can't satisfy user's explicit msi=on request, fail */ + error_append_hint(&err, "You have to use msi=auto (default) or " + "msi=off with this machine type.\n"); + error_propagate(errp, err); + return; + } else if (ret) { + /* With msi=auto, we fall back to MSI off silently */ + s->msi = ON_OFF_AUTO_OFF; + error_free(err); + } + } + + memory_region_init_io(&s->mmio_io, OBJECT(s), &megasas_mmio_ops, s, + "megasas-mmio", 0x4000); + memory_region_init_io(&s->port_io, OBJECT(s), &megasas_port_ops, s, + "megasas-io", 256); + memory_region_init_io(&s->queue_io, OBJECT(s), &megasas_queue_ops, s, + "megasas-queue", 0x40000); + + if (megasas_use_msix(s) && + msix_init(dev, 15, &s->mmio_io, b->mmio_bar, 0x2000, + &s->mmio_io, b->mmio_bar, 0x3800, 0x68, NULL)) { + /* TODO: check msix_init's error, and should fail on msix=on */ + s->msix = ON_OFF_AUTO_OFF; + } + + if (pci_is_express(dev)) { + pcie_endpoint_cap_init(dev, 0xa0); + } + + bar_type = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64; + pci_register_bar(dev, b->ioport_bar, + PCI_BASE_ADDRESS_SPACE_IO, &s->port_io); + pci_register_bar(dev, b->mmio_bar, bar_type, &s->mmio_io); + pci_register_bar(dev, 3, bar_type, &s->queue_io); + + if (megasas_use_msix(s)) { + msix_vector_use(dev, 0); + } + + s->fw_state = MFI_FWSTATE_READY; + if (!s->sas_addr) { + s->sas_addr = ((NAA_LOCALLY_ASSIGNED_ID << 24) | + IEEE_COMPANY_LOCALLY_ASSIGNED) << 36; + s->sas_addr |= pci_dev_bus_num(dev) << 16; + s->sas_addr |= PCI_SLOT(dev->devfn) << 8; + s->sas_addr |= PCI_FUNC(dev->devfn); + } + if (!s->hba_serial) { + s->hba_serial = g_strdup(MEGASAS_HBA_SERIAL); + } + if (s->fw_sge >= MEGASAS_MAX_SGE - MFI_PASS_FRAME_SIZE) { + s->fw_sge = MEGASAS_MAX_SGE - MFI_PASS_FRAME_SIZE; + } else if (s->fw_sge >= 128 - MFI_PASS_FRAME_SIZE) { + s->fw_sge = 128 - MFI_PASS_FRAME_SIZE; + } else { + s->fw_sge = 64 - MFI_PASS_FRAME_SIZE; + } + if (s->fw_cmds > MEGASAS_MAX_FRAMES) { + s->fw_cmds = MEGASAS_MAX_FRAMES; + } + trace_megasas_init(s->fw_sge, s->fw_cmds, + megasas_is_jbod(s) ? "jbod" : "raid"); + + if (megasas_is_jbod(s)) { + s->fw_luns = MFI_MAX_SYS_PDS; + } else { + s->fw_luns = MFI_MAX_LD; + } + s->producer_pa = 0; + s->consumer_pa = 0; + for (i = 0; i < s->fw_cmds; i++) { + s->frames[i].index = i; + s->frames[i].context = -1; + s->frames[i].pa = 0; + s->frames[i].state = s; + } + + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &megasas_scsi_info); +} + +static Property megasas_properties_gen1[] = { + DEFINE_PROP_UINT32("max_sge", MegasasState, fw_sge, + MEGASAS_DEFAULT_SGE), + DEFINE_PROP_UINT32("max_cmds", MegasasState, fw_cmds, + MEGASAS_DEFAULT_FRAMES), + DEFINE_PROP_STRING("hba_serial", MegasasState, hba_serial), + DEFINE_PROP_UINT64("sas_address", MegasasState, sas_addr, 0), + DEFINE_PROP_ON_OFF_AUTO("msi", MegasasState, msi, ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO("msix", MegasasState, msix, ON_OFF_AUTO_AUTO), + DEFINE_PROP_BIT("use_jbod", MegasasState, flags, + MEGASAS_FLAG_USE_JBOD, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static Property megasas_properties_gen2[] = { + DEFINE_PROP_UINT32("max_sge", MegasasState, fw_sge, + MEGASAS_DEFAULT_SGE), + DEFINE_PROP_UINT32("max_cmds", MegasasState, fw_cmds, + MEGASAS_GEN2_DEFAULT_FRAMES), + DEFINE_PROP_STRING("hba_serial", MegasasState, hba_serial), + DEFINE_PROP_UINT64("sas_address", MegasasState, sas_addr, 0), + DEFINE_PROP_ON_OFF_AUTO("msi", MegasasState, msi, ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO("msix", MegasasState, msix, ON_OFF_AUTO_AUTO), + DEFINE_PROP_BIT("use_jbod", MegasasState, flags, + MEGASAS_FLAG_USE_JBOD, false), + DEFINE_PROP_END_OF_LIST(), +}; + +typedef struct MegasasInfo { + const char *name; + const char *desc; + const char *product_name; + const char *product_version; + uint16_t device_id; + uint16_t subsystem_id; + int ioport_bar; + int mmio_bar; + int osts; + const VMStateDescription *vmsd; + Property *props; + InterfaceInfo *interfaces; +} MegasasInfo; + +static struct MegasasInfo megasas_devices[] = { + { + .name = TYPE_MEGASAS_GEN1, + .desc = "LSI MegaRAID SAS 1078", + .product_name = "LSI MegaRAID SAS 8708EM2", + .product_version = MEGASAS_VERSION_GEN1, + .device_id = PCI_DEVICE_ID_LSI_SAS1078, + .subsystem_id = 0x1013, + .ioport_bar = 2, + .mmio_bar = 0, + .osts = MFI_1078_RM | 1, + .vmsd = &vmstate_megasas_gen1, + .props = megasas_properties_gen1, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, + },{ + .name = TYPE_MEGASAS_GEN2, + .desc = "LSI MegaRAID SAS 2108", + .product_name = "LSI MegaRAID SAS 9260-8i", + .product_version = MEGASAS_VERSION_GEN2, + .device_id = PCI_DEVICE_ID_LSI_SAS0079, + .subsystem_id = 0x9261, + .ioport_bar = 0, + .mmio_bar = 1, + .osts = MFI_GEN2_RM, + .vmsd = &vmstate_megasas_gen2, + .props = megasas_properties_gen2, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, + } +}; + +static void megasas_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); + MegasasBaseClass *e = MEGASAS_CLASS(oc); + const MegasasInfo *info = data; + + pc->realize = megasas_scsi_realize; + pc->exit = megasas_scsi_uninit; + pc->vendor_id = PCI_VENDOR_ID_LSI_LOGIC; + pc->device_id = info->device_id; + pc->subsystem_vendor_id = PCI_VENDOR_ID_LSI_LOGIC; + pc->subsystem_id = info->subsystem_id; + pc->class_id = PCI_CLASS_STORAGE_RAID; + e->mmio_bar = info->mmio_bar; + e->ioport_bar = info->ioport_bar; + e->osts = info->osts; + e->product_name = info->product_name; + e->product_version = info->product_version; + device_class_set_props(dc, info->props); + dc->reset = megasas_scsi_reset; + dc->vmsd = info->vmsd; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->desc = info->desc; +} + +static const TypeInfo megasas_info = { + .name = TYPE_MEGASAS_BASE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(MegasasState), + .class_size = sizeof(MegasasBaseClass), + .abstract = true, +}; + +static void megasas_register_types(void) +{ + int i; + + type_register_static(&megasas_info); + for (i = 0; i < ARRAY_SIZE(megasas_devices); i++) { + const MegasasInfo *info = &megasas_devices[i]; + TypeInfo type_info = {}; + + type_info.name = info->name; + type_info.parent = TYPE_MEGASAS_BASE; + type_info.class_data = (void *)info; + type_info.class_init = megasas_class_init; + type_info.interfaces = info->interfaces; + + type_register(&type_info); + } +} + +type_init(megasas_register_types) diff --git a/hw/scsi/meson.build b/hw/scsi/meson.build new file mode 100644 index 000000000..923a34f34 --- /dev/null +++ b/hw/scsi/meson.build @@ -0,0 +1,26 @@ +scsi_ss = ss.source_set() +scsi_ss.add(files( + 'emulation.c', + 'scsi-bus.c', + 'scsi-disk.c', + 'scsi-generic.c', +)) +scsi_ss.add(when: 'CONFIG_ESP', if_true: files('esp.c')) +scsi_ss.add(when: 'CONFIG_ESP_PCI', if_true: files('esp-pci.c')) +scsi_ss.add(when: 'CONFIG_LSI_SCSI_PCI', if_true: files('lsi53c895a.c')) +scsi_ss.add(when: 'CONFIG_MEGASAS_SCSI_PCI', if_true: files('megasas.c')) +scsi_ss.add(when: 'CONFIG_MPTSAS_SCSI_PCI', if_true: files('mptsas.c', 'mptconfig.c', 'mptendian.c')) +scsi_ss.add(when: 'CONFIG_VMW_PVSCSI_SCSI_PCI', if_true: files('vmw_pvscsi.c')) +softmmu_ss.add_all(when: 'CONFIG_SCSI', if_true: scsi_ss) + +specific_scsi_ss = ss.source_set() + +virtio_scsi_ss = ss.source_set() +virtio_scsi_ss.add(files('virtio-scsi.c', 'virtio-scsi-dataplane.c')) +virtio_scsi_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-common.c', 'vhost-scsi.c')) +virtio_scsi_ss.add(when: 'CONFIG_VHOST_USER_SCSI', if_true: files('vhost-scsi-common.c', 'vhost-user-scsi.c')) +specific_scsi_ss.add_all(when: 'CONFIG_VIRTIO_SCSI', if_true: virtio_scsi_ss) + +specific_scsi_ss.add(when: 'CONFIG_SPAPR_VSCSI', if_true: files('spapr_vscsi.c')) + +specific_ss.add_all(when: 'CONFIG_SCSI', if_true: specific_scsi_ss) diff --git a/hw/scsi/mfi.h b/hw/scsi/mfi.h new file mode 100644 index 000000000..e67a5c0b4 --- /dev/null +++ b/hw/scsi/mfi.h @@ -0,0 +1,1272 @@ +/* + * NetBSD header file, copied from + * http://gitorious.org/freebsd/freebsd/blobs/HEAD/sys/dev/mfi/mfireg.h + */ +/*- + * Copyright (c) 2006 IronPort Systems + * Copyright (c) 2007 LSI Corp. + * Copyright (c) 2007 Rajesh Prabhakaran. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef SCSI_MFI_H +#define SCSI_MFI_H + +/* + * MegaRAID SAS MFI firmware definitions + */ + +/* + * Start with the register set. All registers are 32 bits wide. + * The usual Intel IOP style setup. + */ +#define MFI_IMSG0 0x10 /* Inbound message 0 */ +#define MFI_IMSG1 0x14 /* Inbound message 1 */ +#define MFI_OMSG0 0x18 /* Outbound message 0 */ +#define MFI_OMSG1 0x1c /* Outbound message 1 */ +#define MFI_IDB 0x20 /* Inbound doorbell */ +#define MFI_ISTS 0x24 /* Inbound interrupt status */ +#define MFI_IMSK 0x28 /* Inbound interrupt mask */ +#define MFI_ODB 0x2c /* Outbound doorbell */ +#define MFI_OSTS 0x30 /* Outbound interrupt status */ +#define MFI_OMSK 0x34 /* Outbound interrupt mask */ +#define MFI_IQP 0x40 /* Inbound queue port */ +#define MFI_OQP 0x44 /* Outbound queue port */ + +/* + * 1078 specific related register + */ +#define MFI_ODR0 0x9c /* outbound doorbell register0 */ +#define MFI_ODCR0 0xa0 /* outbound doorbell clear register0 */ +#define MFI_OSP0 0xb0 /* outbound scratch pad0 */ +#define MFI_OSP1 0xb4 /* outbound scratch pad1 */ +#define MFI_IQPL 0xc0 /* Inbound queue port (low bytes) */ +#define MFI_IQPH 0xc4 /* Inbound queue port (high bytes) */ +#define MFI_DIAG 0xf8 /* Host diag */ +#define MFI_SEQ 0xfc /* Sequencer offset */ +#define MFI_1078_EIM 0x80000004 /* 1078 enable intrrupt mask */ +#define MFI_RMI 0x2 /* reply message interrupt */ +#define MFI_1078_RM 0x80000000 /* reply 1078 message interrupt */ +#define MFI_ODC 0x4 /* outbound doorbell change interrupt */ + +/* + * gen2 specific changes + */ +#define MFI_GEN2_EIM 0x00000005 /* gen2 enable interrupt mask */ +#define MFI_GEN2_RM 0x00000001 /* reply gen2 message interrupt */ + +/* + * skinny specific changes + */ +#define MFI_SKINNY_IDB 0x00 /* Inbound doorbell is at 0x00 for skinny */ +#define MFI_SKINNY_RM 0x00000001 /* reply skinny message interrupt */ + +/* Bits for MFI_OSTS */ +#define MFI_OSTS_INTR_VALID 0x00000002 + +/* + * Firmware state values. Found in OMSG0 during initialization. + */ +#define MFI_FWSTATE_MASK 0xf0000000 +#define MFI_FWSTATE_UNDEFINED 0x00000000 +#define MFI_FWSTATE_BB_INIT 0x10000000 +#define MFI_FWSTATE_FW_INIT 0x40000000 +#define MFI_FWSTATE_WAIT_HANDSHAKE 0x60000000 +#define MFI_FWSTATE_FW_INIT_2 0x70000000 +#define MFI_FWSTATE_DEVICE_SCAN 0x80000000 +#define MFI_FWSTATE_BOOT_MSG_PENDING 0x90000000 +#define MFI_FWSTATE_FLUSH_CACHE 0xa0000000 +#define MFI_FWSTATE_READY 0xb0000000 +#define MFI_FWSTATE_OPERATIONAL 0xc0000000 +#define MFI_FWSTATE_FAULT 0xf0000000 +#define MFI_FWSTATE_MAXSGL_MASK 0x00ff0000 +#define MFI_FWSTATE_MAXCMD_MASK 0x0000ffff +#define MFI_FWSTATE_MSIX_SUPPORTED 0x04000000 +#define MFI_FWSTATE_HOSTMEMREQD_MASK 0x08000000 + +/* + * Control bits to drive the card to ready state. These go into the IDB + * register. + */ +#define MFI_FWINIT_ABORT 0x00000001 /* Abort all pending commands */ +#define MFI_FWINIT_READY 0x00000002 /* Move from operational to ready */ +#define MFI_FWINIT_MFIMODE 0x00000004 /* unknown */ +#define MFI_FWINIT_CLEAR_HANDSHAKE 0x00000008 /* Respond to WAIT_HANDSHAKE */ +#define MFI_FWINIT_HOTPLUG 0x00000010 +#define MFI_FWINIT_STOP_ADP 0x00000020 /* Move to operational, stop */ +#define MFI_FWINIT_ADP_RESET 0x00000040 /* Reset ADP */ + +/* + * Control bits for the DIAG register + */ +#define MFI_DIAG_WRITE_ENABLE 0x00000080 +#define MFI_DIAG_RESET_ADP 0x00000004 + +/* MFI Commands */ +typedef enum { + MFI_CMD_INIT = 0x00, + MFI_CMD_LD_READ, + MFI_CMD_LD_WRITE, + MFI_CMD_LD_SCSI_IO, + MFI_CMD_PD_SCSI_IO, + MFI_CMD_DCMD, + MFI_CMD_ABORT, + MFI_CMD_SMP, + MFI_CMD_STP +} mfi_cmd_t; + +/* Direct commands */ +typedef enum { + MFI_DCMD_CTRL_MFI_HOST_MEM_ALLOC = 0x0100e100, + MFI_DCMD_CTRL_GET_INFO = 0x01010000, + MFI_DCMD_CTRL_GET_PROPERTIES = 0x01020100, + MFI_DCMD_CTRL_SET_PROPERTIES = 0x01020200, + MFI_DCMD_CTRL_ALARM = 0x01030000, + MFI_DCMD_CTRL_ALARM_GET = 0x01030100, + MFI_DCMD_CTRL_ALARM_ENABLE = 0x01030200, + MFI_DCMD_CTRL_ALARM_DISABLE = 0x01030300, + MFI_DCMD_CTRL_ALARM_SILENCE = 0x01030400, + MFI_DCMD_CTRL_ALARM_TEST = 0x01030500, + MFI_DCMD_CTRL_EVENT_GETINFO = 0x01040100, + MFI_DCMD_CTRL_EVENT_CLEAR = 0x01040200, + MFI_DCMD_CTRL_EVENT_GET = 0x01040300, + MFI_DCMD_CTRL_EVENT_COUNT = 0x01040400, + MFI_DCMD_CTRL_EVENT_WAIT = 0x01040500, + MFI_DCMD_CTRL_SHUTDOWN = 0x01050000, + MFI_DCMD_HIBERNATE_STANDBY = 0x01060000, + MFI_DCMD_CTRL_GET_TIME = 0x01080101, + MFI_DCMD_CTRL_SET_TIME = 0x01080102, + MFI_DCMD_CTRL_BIOS_DATA_GET = 0x010c0100, + MFI_DCMD_CTRL_BIOS_DATA_SET = 0x010c0200, + MFI_DCMD_CTRL_FACTORY_DEFAULTS = 0x010d0000, + MFI_DCMD_CTRL_MFC_DEFAULTS_GET = 0x010e0201, + MFI_DCMD_CTRL_MFC_DEFAULTS_SET = 0x010e0202, + MFI_DCMD_CTRL_CACHE_FLUSH = 0x01101000, + MFI_DCMD_PD_GET_LIST = 0x02010000, + MFI_DCMD_PD_LIST_QUERY = 0x02010100, + MFI_DCMD_PD_GET_INFO = 0x02020000, + MFI_DCMD_PD_STATE_SET = 0x02030100, + MFI_DCMD_PD_REBUILD = 0x02040100, + MFI_DCMD_PD_BLINK = 0x02070100, + MFI_DCMD_PD_UNBLINK = 0x02070200, + MFI_DCMD_LD_GET_LIST = 0x03010000, + MFI_DCMD_LD_LIST_QUERY = 0x03010100, + MFI_DCMD_LD_GET_INFO = 0x03020000, + MFI_DCMD_LD_GET_PROP = 0x03030000, + MFI_DCMD_LD_SET_PROP = 0x03040000, + MFI_DCMD_LD_DELETE = 0x03090000, + MFI_DCMD_CFG_READ = 0x04010000, + MFI_DCMD_CFG_ADD = 0x04020000, + MFI_DCMD_CFG_CLEAR = 0x04030000, + MFI_DCMD_CFG_FOREIGN_READ = 0x04060100, + MFI_DCMD_CFG_FOREIGN_IMPORT = 0x04060400, + MFI_DCMD_BBU_STATUS = 0x05010000, + MFI_DCMD_BBU_CAPACITY_INFO = 0x05020000, + MFI_DCMD_BBU_DESIGN_INFO = 0x05030000, + MFI_DCMD_BBU_PROP_GET = 0x05050100, + MFI_DCMD_CLUSTER = 0x08000000, + MFI_DCMD_CLUSTER_RESET_ALL = 0x08010100, + MFI_DCMD_CLUSTER_RESET_LD = 0x08010200 +} mfi_dcmd_t; + +/* Modifiers for MFI_DCMD_CTRL_FLUSHCACHE */ +#define MFI_FLUSHCACHE_CTRL 0x01 +#define MFI_FLUSHCACHE_DISK 0x02 + +/* Modifiers for MFI_DCMD_CTRL_SHUTDOWN */ +#define MFI_SHUTDOWN_SPINDOWN 0x01 + +/* + * MFI Frame flags + */ +typedef enum { + MFI_FRAME_DONT_POST_IN_REPLY_QUEUE = 0x0001, + MFI_FRAME_SGL64 = 0x0002, + MFI_FRAME_SENSE64 = 0x0004, + MFI_FRAME_DIR_WRITE = 0x0008, + MFI_FRAME_DIR_READ = 0x0010, + MFI_FRAME_IEEE_SGL = 0x0020, +} mfi_frame_flags; + +/* MFI Status codes */ +typedef enum { + MFI_STAT_OK = 0x00, + MFI_STAT_INVALID_CMD, + MFI_STAT_INVALID_DCMD, + MFI_STAT_INVALID_PARAMETER, + MFI_STAT_INVALID_SEQUENCE_NUMBER, + MFI_STAT_ABORT_NOT_POSSIBLE, + MFI_STAT_APP_HOST_CODE_NOT_FOUND, + MFI_STAT_APP_IN_USE, + MFI_STAT_APP_NOT_INITIALIZED, + MFI_STAT_ARRAY_INDEX_INVALID, + MFI_STAT_ARRAY_ROW_NOT_EMPTY, + MFI_STAT_CONFIG_RESOURCE_CONFLICT, + MFI_STAT_DEVICE_NOT_FOUND, + MFI_STAT_DRIVE_TOO_SMALL, + MFI_STAT_FLASH_ALLOC_FAIL, + MFI_STAT_FLASH_BUSY, + MFI_STAT_FLASH_ERROR = 0x10, + MFI_STAT_FLASH_IMAGE_BAD, + MFI_STAT_FLASH_IMAGE_INCOMPLETE, + MFI_STAT_FLASH_NOT_OPEN, + MFI_STAT_FLASH_NOT_STARTED, + MFI_STAT_FLUSH_FAILED, + MFI_STAT_HOST_CODE_NOT_FOUNT, + MFI_STAT_LD_CC_IN_PROGRESS, + MFI_STAT_LD_INIT_IN_PROGRESS, + MFI_STAT_LD_LBA_OUT_OF_RANGE, + MFI_STAT_LD_MAX_CONFIGURED, + MFI_STAT_LD_NOT_OPTIMAL, + MFI_STAT_LD_RBLD_IN_PROGRESS, + MFI_STAT_LD_RECON_IN_PROGRESS, + MFI_STAT_LD_WRONG_RAID_LEVEL, + MFI_STAT_MAX_SPARES_EXCEEDED, + MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20, + MFI_STAT_MFC_HW_ERROR, + MFI_STAT_NO_HW_PRESENT, + MFI_STAT_NOT_FOUND, + MFI_STAT_NOT_IN_ENCL, + MFI_STAT_PD_CLEAR_IN_PROGRESS, + MFI_STAT_PD_TYPE_WRONG, + MFI_STAT_PR_DISABLED, + MFI_STAT_ROW_INDEX_INVALID, + MFI_STAT_SAS_CONFIG_INVALID_ACTION, + MFI_STAT_SAS_CONFIG_INVALID_DATA, + MFI_STAT_SAS_CONFIG_INVALID_PAGE, + MFI_STAT_SAS_CONFIG_INVALID_TYPE, + MFI_STAT_SCSI_DONE_WITH_ERROR, + MFI_STAT_SCSI_IO_FAILED, + MFI_STAT_SCSI_RESERVATION_CONFLICT, + MFI_STAT_SHUTDOWN_FAILED = 0x30, + MFI_STAT_TIME_NOT_SET, + MFI_STAT_WRONG_STATE, + MFI_STAT_LD_OFFLINE, + MFI_STAT_PEER_NOTIFICATION_REJECTED, + MFI_STAT_PEER_NOTIFICATION_FAILED, + MFI_STAT_RESERVATION_IN_PROGRESS, + MFI_STAT_I2C_ERRORS_DETECTED, + MFI_STAT_PCI_ERRORS_DETECTED, + MFI_STAT_DIAG_FAILED, + MFI_STAT_BOOT_MSG_PENDING, + MFI_STAT_FOREIGN_CONFIG_INCOMPLETE, + MFI_STAT_INVALID_SGL, + MFI_STAT_UNSUPPORTED_HW, + MFI_STAT_CC_SCHEDULE_DISABLED, + MFI_STAT_PD_COPYBACK_IN_PROGRESS, + MFI_STAT_MULTIPLE_PDS_IN_ARRAY = 0x40, + MFI_STAT_FW_DOWNLOAD_ERROR, + MFI_STAT_FEATURE_SECURITY_NOT_ENABLED, + MFI_STAT_LOCK_KEY_ALREADY_EXISTS, + MFI_STAT_LOCK_KEY_BACKUP_NOT_ALLOWED, + MFI_STAT_LOCK_KEY_VERIFY_NOT_ALLOWED, + MFI_STAT_LOCK_KEY_VERIFY_FAILED, + MFI_STAT_LOCK_KEY_REKEY_NOT_ALLOWED, + MFI_STAT_LOCK_KEY_INVALID, + MFI_STAT_LOCK_KEY_ESCROW_INVALID, + MFI_STAT_LOCK_KEY_BACKUP_REQUIRED, + MFI_STAT_SECURE_LD_EXISTS, + MFI_STAT_LD_SECURE_NOT_ALLOWED, + MFI_STAT_REPROVISION_NOT_ALLOWED, + MFI_STAT_PD_SECURITY_TYPE_WRONG, + MFI_STAT_LD_ENCRYPTION_TYPE_INVALID, + MFI_STAT_CONFIG_FDE_NON_FDE_MIX_NOT_ALLOWED = 0x50, + MFI_STAT_CONFIG_LD_ENCRYPTION_TYPE_MIX_NOT_ALLOWED, + MFI_STAT_SECRET_KEY_NOT_ALLOWED, + MFI_STAT_PD_HW_ERRORS_DETECTED, + MFI_STAT_LD_CACHE_PINNED, + MFI_STAT_POWER_STATE_SET_IN_PROGRESS, + MFI_STAT_POWER_STATE_SET_BUSY, + MFI_STAT_POWER_STATE_WRONG, + MFI_STAT_PR_NO_AVAILABLE_PD_FOUND, + MFI_STAT_CTRL_RESET_REQUIRED, + MFI_STAT_LOCK_KEY_EKM_NO_BOOT_AGENT, + MFI_STAT_SNAP_NO_SPACE, + MFI_STAT_SNAP_PARTIAL_FAILURE, + MFI_STAT_UPGRADE_KEY_INCOMPATIBLE, + MFI_STAT_PFK_INCOMPATIBLE, + MFI_STAT_PD_MAX_UNCONFIGURED, + MFI_STAT_IO_METRICS_DISABLED = 0x60, + MFI_STAT_AEC_NOT_STOPPED, + MFI_STAT_PI_TYPE_WRONG, + MFI_STAT_LD_PD_PI_INCOMPATIBLE, + MFI_STAT_PI_NOT_ENABLED, + MFI_STAT_LD_BLOCK_SIZE_MISMATCH, + MFI_STAT_INVALID_STATUS = 0xFF +} mfi_status_t; + +/* Event classes */ +typedef enum { + MFI_EVT_CLASS_DEBUG = -2, + MFI_EVT_CLASS_PROGRESS = -1, + MFI_EVT_CLASS_INFO = 0, + MFI_EVT_CLASS_WARNING = 1, + MFI_EVT_CLASS_CRITICAL = 2, + MFI_EVT_CLASS_FATAL = 3, + MFI_EVT_CLASS_DEAD = 4 +} mfi_evt_class_t; + +/* Event locales */ +typedef enum { + MFI_EVT_LOCALE_LD = 0x0001, + MFI_EVT_LOCALE_PD = 0x0002, + MFI_EVT_LOCALE_ENCL = 0x0004, + MFI_EVT_LOCALE_BBU = 0x0008, + MFI_EVT_LOCALE_SAS = 0x0010, + MFI_EVT_LOCALE_CTRL = 0x0020, + MFI_EVT_LOCALE_CONFIG = 0x0040, + MFI_EVT_LOCALE_CLUSTER = 0x0080, + MFI_EVT_LOCALE_ALL = 0xffff +} mfi_evt_locale_t; + +/* Event args */ +typedef enum { + MR_EVT_ARGS_NONE = 0x00, + MR_EVT_ARGS_CDB_SENSE, + MR_EVT_ARGS_LD, + MR_EVT_ARGS_LD_COUNT, + MR_EVT_ARGS_LD_LBA, + MR_EVT_ARGS_LD_OWNER, + MR_EVT_ARGS_LD_LBA_PD_LBA, + MR_EVT_ARGS_LD_PROG, + MR_EVT_ARGS_LD_STATE, + MR_EVT_ARGS_LD_STRIP, + MR_EVT_ARGS_PD, + MR_EVT_ARGS_PD_ERR, + MR_EVT_ARGS_PD_LBA, + MR_EVT_ARGS_PD_LBA_LD, + MR_EVT_ARGS_PD_PROG, + MR_EVT_ARGS_PD_STATE, + MR_EVT_ARGS_PCI, + MR_EVT_ARGS_RATE, + MR_EVT_ARGS_STR, + MR_EVT_ARGS_TIME, + MR_EVT_ARGS_ECC, + MR_EVT_ARGS_LD_PROP, + MR_EVT_ARGS_PD_SPARE, + MR_EVT_ARGS_PD_INDEX, + MR_EVT_ARGS_DIAG_PASS, + MR_EVT_ARGS_DIAG_FAIL, + MR_EVT_ARGS_PD_LBA_LBA, + MR_EVT_ARGS_PORT_PHY, + MR_EVT_ARGS_PD_MISSING, + MR_EVT_ARGS_PD_ADDRESS, + MR_EVT_ARGS_BITMAP, + MR_EVT_ARGS_CONNECTOR, + MR_EVT_ARGS_PD_PD, + MR_EVT_ARGS_PD_FRU, + MR_EVT_ARGS_PD_PATHINFO, + MR_EVT_ARGS_PD_POWER_STATE, + MR_EVT_ARGS_GENERIC, +} mfi_evt_args; + +/* Event codes */ +#define MR_EVT_CFG_CLEARED 0x0004 +#define MR_EVT_CTRL_SHUTDOWN 0x002a +#define MR_EVT_LD_STATE_CHANGE 0x0051 +#define MR_EVT_PD_INSERTED 0x005b +#define MR_EVT_PD_REMOVED 0x0070 +#define MR_EVT_PD_STATE_CHANGED 0x0072 +#define MR_EVT_LD_CREATED 0x008a +#define MR_EVT_LD_DELETED 0x008b +#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db +#define MR_EVT_LD_OFFLINE 0x00fc +#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 + +typedef enum { + MR_LD_CACHE_WRITE_BACK = 0x01, + MR_LD_CACHE_WRITE_ADAPTIVE = 0x02, + MR_LD_CACHE_READ_AHEAD = 0x04, + MR_LD_CACHE_READ_ADAPTIVE = 0x08, + MR_LD_CACHE_WRITE_CACHE_BAD_BBU = 0x10, + MR_LD_CACHE_ALLOW_WRITE_CACHE = 0x20, + MR_LD_CACHE_ALLOW_READ_CACHE = 0x40 +} mfi_ld_cache; + +typedef enum { + MR_PD_CACHE_UNCHANGED = 0, + MR_PD_CACHE_ENABLE = 1, + MR_PD_CACHE_DISABLE = 2 +} mfi_pd_cache; + +typedef enum { + MR_PD_QUERY_TYPE_ALL = 0, + MR_PD_QUERY_TYPE_STATE = 1, + MR_PD_QUERY_TYPE_POWER_STATE = 2, + MR_PD_QUERY_TYPE_MEDIA_TYPE = 3, + MR_PD_QUERY_TYPE_SPEED = 4, + MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, /*query for system drives */ +} mfi_pd_query_type; + +typedef enum { + MR_LD_QUERY_TYPE_ALL = 0, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1, + MR_LD_QUERY_TYPE_USED_TGT_IDS = 2, + MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3, + MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4, +} mfi_ld_query_type; + +/* + * Other propertities and definitions + */ +#define MFI_MAX_PD_CHANNELS 2 +#define MFI_MAX_LD_CHANNELS 2 +#define MFI_MAX_CHANNELS (MFI_MAX_PD_CHANNELS + MFI_MAX_LD_CHANNELS) +#define MFI_MAX_CHANNEL_DEVS 128 +#define MFI_DEFAULT_ID -1 +#define MFI_MAX_LUN 8 +#define MFI_MAX_LD 64 + +#define MFI_FRAME_SIZE 64 +#define MFI_MBOX_SIZE 12 + +/* Firmware flashing can take 40s */ +#define MFI_POLL_TIMEOUT_SECS 50 + +/* Allow for speedier math calculations */ +#define MFI_SECTOR_LEN 512 + +/* Scatter Gather elements */ +struct mfi_sg32 { + uint32_t addr; + uint32_t len; +} QEMU_PACKED; + +struct mfi_sg64 { + uint64_t addr; + uint32_t len; +} QEMU_PACKED; + +struct mfi_sg_skinny { + uint64_t addr; + uint32_t len; + uint32_t flag; +} QEMU_PACKED; + +union mfi_sgl { + struct mfi_sg32 sg32[1]; + struct mfi_sg64 sg64[1]; + struct mfi_sg_skinny sg_skinny[1]; +} QEMU_PACKED; + +/* Message frames. All messages have a common header */ +struct mfi_frame_header { + uint8_t frame_cmd; + uint8_t sense_len; + uint8_t cmd_status; + uint8_t scsi_status; + uint8_t target_id; + uint8_t lun_id; + uint8_t cdb_len; + uint8_t sge_count; + uint64_t context; + uint16_t flags; + uint16_t timeout; + uint32_t data_len; +} QEMU_PACKED; + +struct mfi_init_frame { + struct mfi_frame_header header; + uint32_t qinfo_new_addr_lo; + uint32_t qinfo_new_addr_hi; + uint32_t qinfo_old_addr_lo; + uint32_t qinfo_old_addr_hi; + uint32_t reserved[6]; +}; + +#define MFI_IO_FRAME_SIZE 40 +struct mfi_io_frame { + struct mfi_frame_header header; + uint32_t sense_addr_lo; + uint32_t sense_addr_hi; + uint32_t lba_lo; + uint32_t lba_hi; + union mfi_sgl sgl; +} QEMU_PACKED; + +#define MFI_PASS_FRAME_SIZE 48 +struct mfi_pass_frame { + struct mfi_frame_header header; + uint32_t sense_addr_lo; + uint32_t sense_addr_hi; + uint8_t cdb[16]; + union mfi_sgl sgl; +} QEMU_PACKED; + +#define MFI_DCMD_FRAME_SIZE 40 +struct mfi_dcmd_frame { + struct mfi_frame_header header; + uint32_t opcode; + uint8_t mbox[MFI_MBOX_SIZE]; + union mfi_sgl sgl; +} QEMU_PACKED; + +struct mfi_abort_frame { + struct mfi_frame_header header; + uint64_t abort_context; + uint32_t abort_mfi_addr_lo; + uint32_t abort_mfi_addr_hi; + uint32_t reserved1[6]; +} QEMU_PACKED; + +struct mfi_smp_frame { + struct mfi_frame_header header; + uint64_t sas_addr; + union { + struct mfi_sg32 sg32[2]; + struct mfi_sg64 sg64[2]; + } sgl; +} QEMU_PACKED; + +struct mfi_stp_frame { + struct mfi_frame_header header; + uint16_t fis[10]; + uint32_t stp_flags; + union { + struct mfi_sg32 sg32[2]; + struct mfi_sg64 sg64[2]; + } sgl; +} QEMU_PACKED; + +union mfi_frame { + struct mfi_frame_header header; + struct mfi_init_frame init; + struct mfi_io_frame io; + struct mfi_pass_frame pass; + struct mfi_dcmd_frame dcmd; + struct mfi_abort_frame abort; + struct mfi_smp_frame smp; + struct mfi_stp_frame stp; + uint64_t raw[8]; + uint8_t bytes[MFI_FRAME_SIZE]; +}; + +#define MFI_SENSE_LEN 128 +struct mfi_sense { + uint8_t data[MFI_SENSE_LEN]; +}; + +#define MFI_QUEUE_FLAG_CONTEXT64 0x00000002 + +/* The queue init structure that is passed with the init message */ +struct mfi_init_qinfo { + uint32_t flags; + uint32_t rq_entries; + uint32_t rq_addr_lo; + uint32_t rq_addr_hi; + uint32_t pi_addr_lo; + uint32_t pi_addr_hi; + uint32_t ci_addr_lo; + uint32_t ci_addr_hi; +} QEMU_PACKED; + +/* Controller properties */ +struct mfi_ctrl_props { + uint16_t seq_num; + uint16_t pred_fail_poll_interval; + uint16_t intr_throttle_cnt; + uint16_t intr_throttle_timeout; + uint8_t rebuild_rate; + uint8_t patrol_read_rate; + uint8_t bgi_rate; + uint8_t cc_rate; + uint8_t recon_rate; + uint8_t cache_flush_interval; + uint8_t spinup_drv_cnt; + uint8_t spinup_delay; + uint8_t cluster_enable; + uint8_t coercion_mode; + uint8_t alarm_enable; + uint8_t disable_auto_rebuild; + uint8_t disable_battery_warn; + uint8_t ecc_bucket_size; + uint16_t ecc_bucket_leak_rate; + uint8_t restore_hotspare_on_insertion; + uint8_t expose_encl_devices; + uint8_t maintainPdFailHistory; + uint8_t disallowHostRequestReordering; + uint8_t abortCCOnError; + uint8_t loadBalanceMode; + uint8_t disableAutoDetectBackplane; + uint8_t snapVDSpace; + uint32_t OnOffProperties; +/* set TRUE to disable copyBack (0=copyback enabled) */ +#define MFI_CTRL_PROP_CopyBackDisabled (1 << 0) +#define MFI_CTRL_PROP_SMARTerEnabled (1 << 1) +#define MFI_CTRL_PROP_PRCorrectUnconfiguredAreas (1 << 2) +#define MFI_CTRL_PROP_UseFdeOnly (1 << 3) +#define MFI_CTRL_PROP_DisableNCQ (1 << 4) +#define MFI_CTRL_PROP_SSDSMARTerEnabled (1 << 5) +#define MFI_CTRL_PROP_SSDPatrolReadEnabled (1 << 6) +#define MFI_CTRL_PROP_EnableSpinDownUnconfigured (1 << 7) +#define MFI_CTRL_PROP_AutoEnhancedImport (1 << 8) +#define MFI_CTRL_PROP_EnableSecretKeyControl (1 << 9) +#define MFI_CTRL_PROP_DisableOnlineCtrlReset (1 << 10) +#define MFI_CTRL_PROP_AllowBootWithPinnedCache (1 << 11) +#define MFI_CTRL_PROP_DisableSpinDownHS (1 << 12) +#define MFI_CTRL_PROP_EnableJBOD (1 << 13) + + uint8_t autoSnapVDSpace; /* % of source LD to be + * reserved for auto snapshot + * in snapshot repository, for + * metadata and user data + * 1=5%, 2=10%, 3=15% and so on + */ + uint8_t viewSpace; /* snapshot writeable VIEWs + * capacity as a % of source LD + * capacity. 0=READ only + * 1=5%, 2=10%, 3=15% and so on + */ + uint16_t spinDownTime; /* # of idle minutes before device + * is spun down (0=use FW defaults) + */ + uint8_t reserved[24]; +} QEMU_PACKED; + +/* PCI information about the card. */ +struct mfi_info_pci { + uint16_t vendor; + uint16_t device; + uint16_t subvendor; + uint16_t subdevice; + uint8_t reserved[24]; +} QEMU_PACKED; + +/* Host (front end) interface information */ +struct mfi_info_host { + uint8_t type; +#define MFI_INFO_HOST_PCIX 0x01 +#define MFI_INFO_HOST_PCIE 0x02 +#define MFI_INFO_HOST_ISCSI 0x04 +#define MFI_INFO_HOST_SAS3G 0x08 + uint8_t reserved[6]; + uint8_t port_count; + uint64_t port_addr[8]; +} QEMU_PACKED; + +/* Device (back end) interface information */ +struct mfi_info_device { + uint8_t type; +#define MFI_INFO_DEV_SPI 0x01 +#define MFI_INFO_DEV_SAS3G 0x02 +#define MFI_INFO_DEV_SATA1 0x04 +#define MFI_INFO_DEV_SATA3G 0x08 +#define MFI_INFO_DEV_PCIE 0x10 + uint8_t reserved[6]; + uint8_t port_count; + uint64_t port_addr[8]; +} QEMU_PACKED; + +/* Firmware component information */ +struct mfi_info_component { + char name[8]; + char version[32]; + char build_date[16]; + char build_time[16]; +} QEMU_PACKED; + +/* Controller default settings */ +struct mfi_defaults { + uint64_t sas_addr; + uint8_t phy_polarity; + uint8_t background_rate; + uint8_t stripe_size; + uint8_t flush_time; + uint8_t write_back; + uint8_t read_ahead; + uint8_t cache_when_bbu_bad; + uint8_t cached_io; + uint8_t smart_mode; + uint8_t alarm_disable; + uint8_t coercion; + uint8_t zrc_config; + uint8_t dirty_led_shows_drive_activity; + uint8_t bios_continue_on_error; + uint8_t spindown_mode; + uint8_t allowed_device_types; + uint8_t allow_mix_in_enclosure; + uint8_t allow_mix_in_ld; + uint8_t allow_sata_in_cluster; + uint8_t max_chained_enclosures; + uint8_t disable_ctrl_r; + uint8_t enable_web_bios; + uint8_t phy_polarity_split; + uint8_t direct_pd_mapping; + uint8_t bios_enumerate_lds; + uint8_t restored_hot_spare_on_insertion; + uint8_t expose_enclosure_devices; + uint8_t maintain_pd_fail_history; + uint8_t disable_puncture; + uint8_t zero_based_enumeration; + uint8_t disable_preboot_cli; + uint8_t show_drive_led_on_activity; + uint8_t cluster_disable; + uint8_t sas_disable; + uint8_t auto_detect_backplane; + uint8_t fde_only; + uint8_t delay_during_post; + uint8_t resv[19]; +} QEMU_PACKED; + +/* Controller default settings */ +struct mfi_bios_data { + uint16_t boot_target_id; + uint8_t do_not_int_13; + uint8_t continue_on_error; + uint8_t verbose; + uint8_t geometry; + uint8_t expose_all_drives; + uint8_t reserved[56]; + uint8_t check_sum; +} QEMU_PACKED; + +/* SAS (?) controller info, returned from MFI_DCMD_CTRL_GETINFO. */ +struct mfi_ctrl_info { + struct mfi_info_pci pci; + struct mfi_info_host host; + struct mfi_info_device device; + + /* Firmware components that are present and active. */ + uint32_t image_check_word; + uint32_t image_component_count; + struct mfi_info_component image_component[8]; + + /* Firmware components that have been flashed but are inactive */ + uint32_t pending_image_component_count; + struct mfi_info_component pending_image_component[8]; + + uint8_t max_arms; + uint8_t max_spans; + uint8_t max_arrays; + uint8_t max_lds; + char product_name[80]; + char serial_number[32]; + uint32_t hw_present; +#define MFI_INFO_HW_BBU 0x01 +#define MFI_INFO_HW_ALARM 0x02 +#define MFI_INFO_HW_NVRAM 0x04 +#define MFI_INFO_HW_UART 0x08 +#define MFI_INFO_HW_MEM 0x10 +#define MFI_INFO_HW_FLASH 0x20 + uint32_t current_fw_time; + uint16_t max_cmds; + uint16_t max_sg_elements; + uint32_t max_request_size; + uint16_t lds_present; + uint16_t lds_degraded; + uint16_t lds_offline; + uint16_t pd_present; + uint16_t pd_disks_present; + uint16_t pd_disks_pred_failure; + uint16_t pd_disks_failed; + uint16_t nvram_size; + uint16_t memory_size; + uint16_t flash_size; + uint16_t ram_correctable_errors; + uint16_t ram_uncorrectable_errors; + uint8_t cluster_allowed; + uint8_t cluster_active; + uint16_t max_strips_per_io; + + uint32_t raid_levels; +#define MFI_INFO_RAID_0 0x01 +#define MFI_INFO_RAID_1 0x02 +#define MFI_INFO_RAID_5 0x04 +#define MFI_INFO_RAID_1E 0x08 +#define MFI_INFO_RAID_6 0x10 + + uint32_t adapter_ops; +#define MFI_INFO_AOPS_RBLD_RATE 0x0001 +#define MFI_INFO_AOPS_CC_RATE 0x0002 +#define MFI_INFO_AOPS_BGI_RATE 0x0004 +#define MFI_INFO_AOPS_RECON_RATE 0x0008 +#define MFI_INFO_AOPS_PATROL_RATE 0x0010 +#define MFI_INFO_AOPS_ALARM_CONTROL 0x0020 +#define MFI_INFO_AOPS_CLUSTER_SUPPORTED 0x0040 +#define MFI_INFO_AOPS_BBU 0x0080 +#define MFI_INFO_AOPS_SPANNING_ALLOWED 0x0100 +#define MFI_INFO_AOPS_DEDICATED_SPARES 0x0200 +#define MFI_INFO_AOPS_REVERTIBLE_SPARES 0x0400 +#define MFI_INFO_AOPS_FOREIGN_IMPORT 0x0800 +#define MFI_INFO_AOPS_SELF_DIAGNOSTIC 0x1000 +#define MFI_INFO_AOPS_MIXED_ARRAY 0x2000 +#define MFI_INFO_AOPS_GLOBAL_SPARES 0x4000 + + uint32_t ld_ops; +#define MFI_INFO_LDOPS_READ_POLICY 0x01 +#define MFI_INFO_LDOPS_WRITE_POLICY 0x02 +#define MFI_INFO_LDOPS_IO_POLICY 0x04 +#define MFI_INFO_LDOPS_ACCESS_POLICY 0x08 +#define MFI_INFO_LDOPS_DISK_CACHE_POLICY 0x10 + + struct { + uint8_t min; + uint8_t max; + uint8_t reserved[2]; + } QEMU_PACKED stripe_sz_ops; + + uint32_t pd_ops; +#define MFI_INFO_PDOPS_FORCE_ONLINE 0x01 +#define MFI_INFO_PDOPS_FORCE_OFFLINE 0x02 +#define MFI_INFO_PDOPS_FORCE_REBUILD 0x04 + + uint32_t pd_mix_support; +#define MFI_INFO_PDMIX_SAS 0x01 +#define MFI_INFO_PDMIX_SATA 0x02 +#define MFI_INFO_PDMIX_ENCL 0x04 +#define MFI_INFO_PDMIX_LD 0x08 +#define MFI_INFO_PDMIX_SATA_CLUSTER 0x10 + + uint8_t ecc_bucket_count; + uint8_t reserved2[11]; + struct mfi_ctrl_props properties; + char package_version[0x60]; + uint8_t pad[0x800 - 0x6a0]; +} QEMU_PACKED; + +/* keep track of an event. */ +union mfi_evt { + struct { + uint16_t locale; + uint8_t reserved; + int8_t class; + } members; + uint32_t word; +} QEMU_PACKED; + +/* event log state. */ +struct mfi_evt_log_state { + uint32_t newest_seq_num; + uint32_t oldest_seq_num; + uint32_t clear_seq_num; + uint32_t shutdown_seq_num; + uint32_t boot_seq_num; +} QEMU_PACKED; + +struct mfi_progress { + uint16_t progress; + uint16_t elapsed_seconds; +} QEMU_PACKED; + +struct mfi_evt_ld { + uint16_t target_id; + uint8_t ld_index; + uint8_t reserved; +} QEMU_PACKED; + +struct mfi_evt_pd { + uint16_t device_id; + uint8_t enclosure_index; + uint8_t slot_number; +} QEMU_PACKED; + +/* event detail, returned from MFI_DCMD_CTRL_EVENT_WAIT. */ +struct mfi_evt_detail { + uint32_t seq; + uint32_t time; + uint32_t code; + union mfi_evt class; + uint8_t arg_type; + uint8_t reserved1[15]; + + union { + struct { + struct mfi_evt_pd pd; + uint8_t cdb_len; + uint8_t sense_len; + uint8_t reserved[2]; + uint8_t cdb[16]; + uint8_t sense[64]; + } cdb_sense; + + struct mfi_evt_ld ld; + + struct { + struct mfi_evt_ld ld; + uint64_t count; + } ld_count; + + struct { + uint64_t lba; + struct mfi_evt_ld ld; + } ld_lba; + + struct { + struct mfi_evt_ld ld; + uint32_t pre_owner; + uint32_t new_owner; + } ld_owner; + + struct { + uint64_t ld_lba; + uint64_t pd_lba; + struct mfi_evt_ld ld; + struct mfi_evt_pd pd; + } ld_lba_pd_lba; + + struct { + struct mfi_evt_ld ld; + struct mfi_progress prog; + } ld_prog; + + struct { + struct mfi_evt_ld ld; + uint32_t prev_state; + uint32_t new_state; + } ld_state; + + struct { + uint64_t strip; + struct mfi_evt_ld ld; + } ld_strip; + + struct mfi_evt_pd pd; + + struct { + struct mfi_evt_pd pd; + uint32_t err; + } pd_err; + + struct { + uint64_t lba; + struct mfi_evt_pd pd; + } pd_lba; + + struct { + uint64_t lba; + struct mfi_evt_pd pd; + struct mfi_evt_ld ld; + } pd_lba_ld; + + struct { + struct mfi_evt_pd pd; + struct mfi_progress prog; + } pd_prog; + + struct { + struct mfi_evt_pd ld; + uint32_t prev_state; + uint32_t new_state; + } pd_state; + + struct { + uint16_t venderId; + uint16_t deviceId; + uint16_t subVenderId; + uint16_t subDeviceId; + } pci; + + uint32_t rate; + + char str[96]; + + struct { + uint32_t rtc; + uint16_t elapsedSeconds; + } time; + + struct { + uint32_t ecar; + uint32_t elog; + char str[64]; + } ecc; + + uint8_t b[96]; + uint16_t s[48]; + uint32_t w[24]; + uint64_t d[12]; + } args; + + char description[128]; +} QEMU_PACKED; + +struct mfi_evt_list { + uint32_t count; + uint32_t reserved; + struct mfi_evt_detail event[1]; +} QEMU_PACKED; + +union mfi_pd_ref { + struct { + uint16_t device_id; + uint16_t seq_num; + } v; + uint32_t ref; +} QEMU_PACKED; + +union mfi_pd_ddf_type { + struct { + uint16_t pd_type; +#define MFI_PD_DDF_TYPE_FORCED_PD_GUID (1 << 0) +#define MFI_PD_DDF_TYPE_IN_VD (1 << 1) +#define MFI_PD_DDF_TYPE_IS_GLOBAL_SPARE (1 << 2) +#define MFI_PD_DDF_TYPE_IS_SPARE (1 << 3) +#define MFI_PD_DDF_TYPE_IS_FOREIGN (1 << 4) +#define MFI_PD_DDF_TYPE_INTF_SPI (1 << 12) +#define MFI_PD_DDF_TYPE_INTF_SAS (1 << 13) +#define MFI_PD_DDF_TYPE_INTF_SATA1 (1 << 14) +#define MFI_PD_DDF_TYPE_INTF_SATA3G (1 << 15) + uint16_t reserved; + } ddf; + struct { + uint32_t reserved; + } non_disk; + uint32_t type; +} QEMU_PACKED; + +struct mfi_pd_progress { + uint32_t active; +#define PD_PROGRESS_ACTIVE_REBUILD (1 << 0) +#define PD_PROGRESS_ACTIVE_PATROL (1 << 1) +#define PD_PROGRESS_ACTIVE_CLEAR (1 << 2) + struct mfi_progress rbld; + struct mfi_progress patrol; + struct mfi_progress clear; + struct mfi_progress reserved[4]; +} QEMU_PACKED; + +struct mfi_pd_info { + union mfi_pd_ref ref; + uint8_t inquiry_data[96]; + uint8_t vpd_page83[64]; + uint8_t not_supported; + uint8_t scsi_dev_type; + uint8_t connected_port_bitmap; + uint8_t device_speed; + uint32_t media_err_count; + uint32_t other_err_count; + uint32_t pred_fail_count; + uint32_t last_pred_fail_event_seq_num; + uint16_t fw_state; + uint8_t disable_for_removal; + uint8_t link_speed; + union mfi_pd_ddf_type state; + struct { + uint8_t count; + uint8_t is_path_broken; + uint8_t reserved[6]; + uint64_t sas_addr[4]; + } path_info; + uint64_t raw_size; + uint64_t non_coerced_size; + uint64_t coerced_size; + uint16_t encl_device_id; + uint8_t encl_index; + uint8_t slot_number; + struct mfi_pd_progress prog_info; + uint8_t bad_block_table_full; + uint8_t unusable_in_current_config; + uint8_t vpd_page83_ext[64]; + uint8_t reserved[512-358]; +} QEMU_PACKED; + +struct mfi_pd_address { + uint16_t device_id; + uint16_t encl_device_id; + uint8_t encl_index; + uint8_t slot_number; + uint8_t scsi_dev_type; + uint8_t connect_port_bitmap; + uint64_t sas_addr[2]; +} QEMU_PACKED; + +#define MFI_MAX_SYS_PDS 240 +struct mfi_pd_list { + uint32_t size; + uint32_t count; + struct mfi_pd_address addr[MFI_MAX_SYS_PDS]; +} QEMU_PACKED; + +union mfi_ld_ref { + struct { + uint8_t target_id; + uint8_t reserved; + uint16_t seq; + } v; + uint32_t ref; +} QEMU_PACKED; + +struct mfi_ld_list { + uint32_t ld_count; + uint32_t reserved1; + struct { + union mfi_ld_ref ld; + uint8_t state; + uint8_t reserved2[3]; + uint64_t size; + } ld_list[MFI_MAX_LD]; +} QEMU_PACKED; + +struct mfi_ld_targetid_list { + uint32_t size; + uint32_t ld_count; + uint8_t pad[3]; + uint8_t targetid[MFI_MAX_LD]; +} QEMU_PACKED; + +enum mfi_ld_access { + MFI_LD_ACCESS_RW = 0, + MFI_LD_ACCSSS_RO = 2, + MFI_LD_ACCESS_BLOCKED = 3, +}; +#define MFI_LD_ACCESS_MASK 3 + +enum mfi_ld_state { + MFI_LD_STATE_OFFLINE = 0, + MFI_LD_STATE_PARTIALLY_DEGRADED = 1, + MFI_LD_STATE_DEGRADED = 2, + MFI_LD_STATE_OPTIMAL = 3 +}; + +enum mfi_syspd_state { + MFI_PD_STATE_UNCONFIGURED_GOOD = 0x00, + MFI_PD_STATE_UNCONFIGURED_BAD = 0x01, + MFI_PD_STATE_HOT_SPARE = 0x02, + MFI_PD_STATE_OFFLINE = 0x10, + MFI_PD_STATE_FAILED = 0x11, + MFI_PD_STATE_REBUILD = 0x14, + MFI_PD_STATE_ONLINE = 0x18, + MFI_PD_STATE_COPYBACK = 0x20, + MFI_PD_STATE_SYSTEM = 0x40 +}; + +struct mfi_ld_props { + union mfi_ld_ref ld; + char name[16]; + uint8_t default_cache_policy; + uint8_t access_policy; + uint8_t disk_cache_policy; + uint8_t current_cache_policy; + uint8_t no_bgi; + uint8_t reserved[7]; +} QEMU_PACKED; + +struct mfi_ld_params { + uint8_t primary_raid_level; + uint8_t raid_level_qualifier; + uint8_t secondary_raid_level; + uint8_t stripe_size; + uint8_t num_drives; + uint8_t span_depth; + uint8_t state; + uint8_t init_state; + uint8_t is_consistent; + uint8_t reserved[23]; +} QEMU_PACKED; + +struct mfi_ld_progress { + uint32_t active; +#define MFI_LD_PROGRESS_CC (1<<0) +#define MFI_LD_PROGRESS_BGI (1<<1) +#define MFI_LD_PROGRESS_FGI (1<<2) +#define MFI_LD_PORGRESS_RECON (1<<3) + struct mfi_progress cc; + struct mfi_progress bgi; + struct mfi_progress fgi; + struct mfi_progress recon; + struct mfi_progress reserved[4]; +} QEMU_PACKED; + +struct mfi_span { + uint64_t start_block; + uint64_t num_blocks; + uint16_t array_ref; + uint8_t reserved[6]; +} QEMU_PACKED; + +#define MFI_MAX_SPAN_DEPTH 8 +struct mfi_ld_config { + struct mfi_ld_props properties; + struct mfi_ld_params params; + struct mfi_span span[MFI_MAX_SPAN_DEPTH]; +} QEMU_PACKED; + +struct mfi_ld_info { + struct mfi_ld_config ld_config; + uint64_t size; + struct mfi_ld_progress progress; + uint16_t cluster_owner; + uint8_t reconstruct_active; + uint8_t reserved1[1]; + uint8_t vpd_page83[64]; + uint8_t reserved2[16]; +} QEMU_PACKED; + +union mfi_spare_type { + uint8_t flags; +#define MFI_SPARE_IS_DEDICATED (1 << 0) +#define MFI_SPARE_IS_REVERTABLE (1 << 1) +#define MFI_SPARE_IS_ENCL_AFFINITY (1 << 2) + uint8_t type; +} QEMU_PACKED; + +#define MFI_MAX_ARRAYS 16 +struct mfi_spare { + union mfi_pd_ref ref; + union mfi_spare_type spare_type; + uint8_t reserved[2]; + uint8_t array_count; + uint16_t array_refd[MFI_MAX_ARRAYS]; +} QEMU_PACKED; + +#define MFI_MAX_ROW_SIZE 32 +struct mfi_array { + uint64_t size; + uint8_t num_drives; + uint8_t reserved; + uint16_t array_ref; + uint8_t pad[20]; + struct { + union mfi_pd_ref ref; + uint16_t fw_state; /* enum mfi_syspd_state */ + struct { + uint8_t pd; + uint8_t slot; + } encl; + } pd[MFI_MAX_ROW_SIZE]; +} QEMU_PACKED; + +struct mfi_config_data { + uint32_t size; + uint16_t array_count; + uint16_t array_size; + uint16_t log_drv_count; + uint16_t log_drv_size; + uint16_t spares_count; + uint16_t spares_size; + uint8_t reserved[16]; + /* + struct mfi_array array[]; + struct mfi_ld_config ld[]; + struct mfi_spare spare[]; + */ +} QEMU_PACKED; + +#define MFI_SCSI_MAX_TARGETS 128 +#define MFI_SCSI_MAX_LUNS 8 +#define MFI_SCSI_INITIATOR_ID 255 +#define MFI_SCSI_MAX_CMDS 8 +#define MFI_SCSI_MAX_CDB_LEN 16 + +#endif /* SCSI_MFI_H */ diff --git a/hw/scsi/mpi.h b/hw/scsi/mpi.h new file mode 100644 index 000000000..0568e1950 --- /dev/null +++ b/hw/scsi/mpi.h @@ -0,0 +1,1153 @@ +/*- + * Based on FreeBSD sys/dev/mpt/mpilib headers. + * + * Copyright (c) 2000-2010, LSI Logic Corporation and its contributors. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the name of the LSI Logic Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef MPI_H +#define MPI_H + +enum { + MPI_FUNCTION_SCSI_IO_REQUEST = 0x00, + MPI_FUNCTION_SCSI_TASK_MGMT = 0x01, + MPI_FUNCTION_IOC_INIT = 0x02, + MPI_FUNCTION_IOC_FACTS = 0x03, + MPI_FUNCTION_CONFIG = 0x04, + MPI_FUNCTION_PORT_FACTS = 0x05, + MPI_FUNCTION_PORT_ENABLE = 0x06, + MPI_FUNCTION_EVENT_NOTIFICATION = 0x07, + MPI_FUNCTION_EVENT_ACK = 0x08, + MPI_FUNCTION_FW_DOWNLOAD = 0x09, + MPI_FUNCTION_TARGET_CMD_BUFFER_POST = 0x0A, + MPI_FUNCTION_TARGET_ASSIST = 0x0B, + MPI_FUNCTION_TARGET_STATUS_SEND = 0x0C, + MPI_FUNCTION_TARGET_MODE_ABORT = 0x0D, + MPI_FUNCTION_FC_LINK_SRVC_BUF_POST = 0x0E, + MPI_FUNCTION_FC_LINK_SRVC_RSP = 0x0F, + MPI_FUNCTION_FC_EX_LINK_SRVC_SEND = 0x10, + MPI_FUNCTION_FC_ABORT = 0x11, + MPI_FUNCTION_FW_UPLOAD = 0x12, + MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND = 0x13, + MPI_FUNCTION_FC_PRIMITIVE_SEND = 0x14, + + MPI_FUNCTION_RAID_ACTION = 0x15, + MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH = 0x16, + + MPI_FUNCTION_TOOLBOX = 0x17, + + MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR = 0x18, + + MPI_FUNCTION_MAILBOX = 0x19, + + MPI_FUNCTION_SMP_PASSTHROUGH = 0x1A, + MPI_FUNCTION_SAS_IO_UNIT_CONTROL = 0x1B, + MPI_FUNCTION_SATA_PASSTHROUGH = 0x1C, + + MPI_FUNCTION_DIAG_BUFFER_POST = 0x1D, + MPI_FUNCTION_DIAG_RELEASE = 0x1E, + + MPI_FUNCTION_SCSI_IO_32 = 0x1F, + + MPI_FUNCTION_LAN_SEND = 0x20, + MPI_FUNCTION_LAN_RECEIVE = 0x21, + MPI_FUNCTION_LAN_RESET = 0x22, + + MPI_FUNCTION_TARGET_ASSIST_EXTENDED = 0x23, + MPI_FUNCTION_TARGET_CMD_BUF_BASE_POST = 0x24, + MPI_FUNCTION_TARGET_CMD_BUF_LIST_POST = 0x25, + + MPI_FUNCTION_INBAND_BUFFER_POST = 0x28, + MPI_FUNCTION_INBAND_SEND = 0x29, + MPI_FUNCTION_INBAND_RSP = 0x2A, + MPI_FUNCTION_INBAND_ABORT = 0x2B, + + MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET = 0x40, + MPI_FUNCTION_IO_UNIT_RESET = 0x41, + MPI_FUNCTION_HANDSHAKE = 0x42, + MPI_FUNCTION_REPLY_FRAME_REMOVAL = 0x43, + MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL = 0x44, +}; + +/****************************************************************************/ +/* Registers */ +/****************************************************************************/ + +enum { + MPI_IOC_STATE_RESET = 0x00000000, + MPI_IOC_STATE_READY = 0x10000000, + MPI_IOC_STATE_OPERATIONAL = 0x20000000, + MPI_IOC_STATE_FAULT = 0x40000000, + + MPI_DOORBELL_OFFSET = 0x00000000, + MPI_DOORBELL_ACTIVE = 0x08000000, /* DoorbellUsed */ + MPI_DOORBELL_WHO_INIT_MASK = 0x07000000, + MPI_DOORBELL_WHO_INIT_SHIFT = 24, + MPI_DOORBELL_FUNCTION_MASK = 0xFF000000, + MPI_DOORBELL_FUNCTION_SHIFT = 24, + MPI_DOORBELL_ADD_DWORDS_MASK = 0x00FF0000, + MPI_DOORBELL_ADD_DWORDS_SHIFT = 16, + MPI_DOORBELL_DATA_MASK = 0x0000FFFF, + MPI_DOORBELL_FUNCTION_SPECIFIC_MASK = 0x0000FFFF, + + MPI_DB_HPBAC_VALUE_MASK = 0x0000F000, + MPI_DB_HPBAC_ENABLE_ACCESS = 0x01, + MPI_DB_HPBAC_DISABLE_ACCESS = 0x02, + MPI_DB_HPBAC_FREE_BUFFER = 0x03, + + MPI_WRITE_SEQUENCE_OFFSET = 0x00000004, + MPI_WRSEQ_KEY_VALUE_MASK = 0x0000000F, + MPI_WRSEQ_1ST_KEY_VALUE = 0x04, + MPI_WRSEQ_2ND_KEY_VALUE = 0x0B, + MPI_WRSEQ_3RD_KEY_VALUE = 0x02, + MPI_WRSEQ_4TH_KEY_VALUE = 0x07, + MPI_WRSEQ_5TH_KEY_VALUE = 0x0D, + + MPI_DIAGNOSTIC_OFFSET = 0x00000008, + MPI_DIAG_CLEAR_FLASH_BAD_SIG = 0x00000400, + MPI_DIAG_PREVENT_IOC_BOOT = 0x00000200, + MPI_DIAG_DRWE = 0x00000080, + MPI_DIAG_FLASH_BAD_SIG = 0x00000040, + MPI_DIAG_RESET_HISTORY = 0x00000020, + MPI_DIAG_RW_ENABLE = 0x00000010, + MPI_DIAG_RESET_ADAPTER = 0x00000004, + MPI_DIAG_DISABLE_ARM = 0x00000002, + MPI_DIAG_MEM_ENABLE = 0x00000001, + + MPI_TEST_BASE_ADDRESS_OFFSET = 0x0000000C, + + MPI_DIAG_RW_DATA_OFFSET = 0x00000010, + + MPI_DIAG_RW_ADDRESS_OFFSET = 0x00000014, + + MPI_HOST_INTERRUPT_STATUS_OFFSET = 0x00000030, + MPI_HIS_IOP_DOORBELL_STATUS = 0x80000000, + MPI_HIS_REPLY_MESSAGE_INTERRUPT = 0x00000008, + MPI_HIS_DOORBELL_INTERRUPT = 0x00000001, + + MPI_HOST_INTERRUPT_MASK_OFFSET = 0x00000034, + MPI_HIM_RIM = 0x00000008, + MPI_HIM_DIM = 0x00000001, + + MPI_REQUEST_QUEUE_OFFSET = 0x00000040, + MPI_REQUEST_POST_FIFO_OFFSET = 0x00000040, + + MPI_REPLY_QUEUE_OFFSET = 0x00000044, + MPI_REPLY_POST_FIFO_OFFSET = 0x00000044, + MPI_REPLY_FREE_FIFO_OFFSET = 0x00000044, + + MPI_HI_PRI_REQUEST_QUEUE_OFFSET = 0x00000048, +}; + +#define MPI_ADDRESS_REPLY_A_BIT 0x80000000 + +/****************************************************************************/ +/* Scatter/gather elements */ +/****************************************************************************/ + +typedef struct MPISGEntry { + uint32_t FlagsLength; + union + { + uint32_t Address32; + uint64_t Address64; + } u; +} QEMU_PACKED MPISGEntry; + +/* Flags field bit definitions */ + +enum { + MPI_SGE_FLAGS_LAST_ELEMENT = 0x80000000, + MPI_SGE_FLAGS_END_OF_BUFFER = 0x40000000, + MPI_SGE_FLAGS_ELEMENT_TYPE_MASK = 0x30000000, + MPI_SGE_FLAGS_LOCAL_ADDRESS = 0x08000000, + MPI_SGE_FLAGS_DIRECTION = 0x04000000, + MPI_SGE_FLAGS_64_BIT_ADDRESSING = 0x02000000, + MPI_SGE_FLAGS_END_OF_LIST = 0x01000000, + + MPI_SGE_LENGTH_MASK = 0x00FFFFFF, + MPI_SGE_CHAIN_LENGTH_MASK = 0x0000FFFF, + + MPI_SGE_FLAGS_TRANSACTION_ELEMENT = 0x00000000, + MPI_SGE_FLAGS_SIMPLE_ELEMENT = 0x10000000, + MPI_SGE_FLAGS_CHAIN_ELEMENT = 0x30000000, + + /* Direction */ + + MPI_SGE_FLAGS_IOC_TO_HOST = 0x00000000, + MPI_SGE_FLAGS_HOST_TO_IOC = 0x04000000, + + MPI_SGE_CHAIN_OFFSET_MASK = 0x00FF0000, +}; + +#define MPI_SGE_CHAIN_OFFSET_SHIFT 16 + +/****************************************************************************/ +/* Standard message request header for all request messages */ +/****************************************************************************/ + +typedef struct MPIRequestHeader { + uint8_t Reserved[2]; /* function specific */ + uint8_t ChainOffset; + uint8_t Function; + uint8_t Reserved1[3]; /* function specific */ + uint8_t MsgFlags; + uint32_t MsgContext; +} QEMU_PACKED MPIRequestHeader; + + +typedef struct MPIDefaultReply { + uint8_t Reserved[2]; /* function specific */ + uint8_t MsgLength; + uint8_t Function; + uint8_t Reserved1[3]; /* function specific */ + uint8_t MsgFlags; + uint32_t MsgContext; + uint8_t Reserved2[2]; /* function specific */ + uint16_t IOCStatus; + uint32_t IOCLogInfo; +} QEMU_PACKED MPIDefaultReply; + +/* MsgFlags definition for all replies */ + +#define MPI_MSGFLAGS_CONTINUATION_REPLY (0x80) + +enum { + + /************************************************************************/ + /* Common IOCStatus values for all replies */ + /************************************************************************/ + + MPI_IOCSTATUS_SUCCESS = 0x0000, + MPI_IOCSTATUS_INVALID_FUNCTION = 0x0001, + MPI_IOCSTATUS_BUSY = 0x0002, + MPI_IOCSTATUS_INVALID_SGL = 0x0003, + MPI_IOCSTATUS_INTERNAL_ERROR = 0x0004, + MPI_IOCSTATUS_RESERVED = 0x0005, + MPI_IOCSTATUS_INSUFFICIENT_RESOURCES = 0x0006, + MPI_IOCSTATUS_INVALID_FIELD = 0x0007, + MPI_IOCSTATUS_INVALID_STATE = 0x0008, + MPI_IOCSTATUS_OP_STATE_NOT_SUPPORTED = 0x0009, + + /************************************************************************/ + /* Config IOCStatus values */ + /************************************************************************/ + + MPI_IOCSTATUS_CONFIG_INVALID_ACTION = 0x0020, + MPI_IOCSTATUS_CONFIG_INVALID_TYPE = 0x0021, + MPI_IOCSTATUS_CONFIG_INVALID_PAGE = 0x0022, + MPI_IOCSTATUS_CONFIG_INVALID_DATA = 0x0023, + MPI_IOCSTATUS_CONFIG_NO_DEFAULTS = 0x0024, + MPI_IOCSTATUS_CONFIG_CANT_COMMIT = 0x0025, + + /************************************************************************/ + /* SCSIIO Reply = SPI & FCP, initiator values */ + /************************************************************************/ + + MPI_IOCSTATUS_SCSI_RECOVERED_ERROR = 0x0040, + MPI_IOCSTATUS_SCSI_INVALID_BUS = 0x0041, + MPI_IOCSTATUS_SCSI_INVALID_TARGETID = 0x0042, + MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE = 0x0043, + MPI_IOCSTATUS_SCSI_DATA_OVERRUN = 0x0044, + MPI_IOCSTATUS_SCSI_DATA_UNDERRUN = 0x0045, + MPI_IOCSTATUS_SCSI_IO_DATA_ERROR = 0x0046, + MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR = 0x0047, + MPI_IOCSTATUS_SCSI_TASK_TERMINATED = 0x0048, + MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH = 0x0049, + MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED = 0x004A, + MPI_IOCSTATUS_SCSI_IOC_TERMINATED = 0x004B, + MPI_IOCSTATUS_SCSI_EXT_TERMINATED = 0x004C, + + /************************************************************************/ + /* For use by SCSI Initiator and SCSI Target end-to-end data protection*/ + /************************************************************************/ + + MPI_IOCSTATUS_EEDP_GUARD_ERROR = 0x004D, + MPI_IOCSTATUS_EEDP_REF_TAG_ERROR = 0x004E, + MPI_IOCSTATUS_EEDP_APP_TAG_ERROR = 0x004F, + + /************************************************************************/ + /* SCSI Target values */ + /************************************************************************/ + + MPI_IOCSTATUS_TARGET_PRIORITY_IO = 0x0060, + MPI_IOCSTATUS_TARGET_INVALID_PORT = 0x0061, + MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX = 0x0062, + MPI_IOCSTATUS_TARGET_ABORTED = 0x0063, + MPI_IOCSTATUS_TARGET_NO_CONN_RETRYABLE = 0x0064, + MPI_IOCSTATUS_TARGET_NO_CONNECTION = 0x0065, + MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH = 0x006A, + MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT = 0x006B, + MPI_IOCSTATUS_TARGET_DATA_OFFSET_ERROR = 0x006D, + MPI_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA = 0x006E, + MPI_IOCSTATUS_TARGET_IU_TOO_SHORT = 0x006F, + MPI_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT = 0x0070, + MPI_IOCSTATUS_TARGET_NAK_RECEIVED = 0x0071, + + /************************************************************************/ + /* Fibre Channel Direct Access values */ + /************************************************************************/ + + MPI_IOCSTATUS_FC_ABORTED = 0x0066, + MPI_IOCSTATUS_FC_RX_ID_INVALID = 0x0067, + MPI_IOCSTATUS_FC_DID_INVALID = 0x0068, + MPI_IOCSTATUS_FC_NODE_LOGGED_OUT = 0x0069, + MPI_IOCSTATUS_FC_EXCHANGE_CANCELED = 0x006C, + + /************************************************************************/ + /* LAN values */ + /************************************************************************/ + + MPI_IOCSTATUS_LAN_DEVICE_NOT_FOUND = 0x0080, + MPI_IOCSTATUS_LAN_DEVICE_FAILURE = 0x0081, + MPI_IOCSTATUS_LAN_TRANSMIT_ERROR = 0x0082, + MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED = 0x0083, + MPI_IOCSTATUS_LAN_RECEIVE_ERROR = 0x0084, + MPI_IOCSTATUS_LAN_RECEIVE_ABORTED = 0x0085, + MPI_IOCSTATUS_LAN_PARTIAL_PACKET = 0x0086, + MPI_IOCSTATUS_LAN_CANCELED = 0x0087, + + /************************************************************************/ + /* Serial Attached SCSI values */ + /************************************************************************/ + + MPI_IOCSTATUS_SAS_SMP_REQUEST_FAILED = 0x0090, + MPI_IOCSTATUS_SAS_SMP_DATA_OVERRUN = 0x0091, + + /************************************************************************/ + /* Inband values */ + /************************************************************************/ + + MPI_IOCSTATUS_INBAND_ABORTED = 0x0098, + MPI_IOCSTATUS_INBAND_NO_CONNECTION = 0x0099, + + /************************************************************************/ + /* Diagnostic Tools values */ + /************************************************************************/ + + MPI_IOCSTATUS_DIAGNOSTIC_RELEASED = 0x00A0, + + /************************************************************************/ + /* IOCStatus flag to indicate that log info is available */ + /************************************************************************/ + + MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE = 0x8000, + MPI_IOCSTATUS_MASK = 0x7FFF, + + /************************************************************************/ + /* LogInfo Types */ + /************************************************************************/ + + MPI_IOCLOGINFO_TYPE_MASK = 0xF0000000, + MPI_IOCLOGINFO_TYPE_SHIFT = 28, + MPI_IOCLOGINFO_TYPE_NONE = 0x0, + MPI_IOCLOGINFO_TYPE_SCSI = 0x1, + MPI_IOCLOGINFO_TYPE_FC = 0x2, + MPI_IOCLOGINFO_TYPE_SAS = 0x3, + MPI_IOCLOGINFO_TYPE_ISCSI = 0x4, + MPI_IOCLOGINFO_LOG_DATA_MASK = 0x0FFFFFFF, +}; + +/****************************************************************************/ +/* SCSI IO messages and associated structures */ +/****************************************************************************/ + +typedef struct MPIMsgSCSIIORequest { + uint8_t TargetID; /* 00h */ + uint8_t Bus; /* 01h */ + uint8_t ChainOffset; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t CDBLength; /* 04h */ + uint8_t SenseBufferLength; /* 05h */ + uint8_t Reserved; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint8_t LUN[8]; /* 0Ch */ + uint32_t Control; /* 14h */ + uint8_t CDB[16]; /* 18h */ + uint32_t DataLength; /* 28h */ + uint32_t SenseBufferLowAddr; /* 2Ch */ +} QEMU_PACKED MPIMsgSCSIIORequest; + +/* SCSI IO MsgFlags bits */ + +#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH (0x01) +#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32 (0x00) +#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 (0x01) + +#define MPI_SCSIIO_MSGFLGS_SENSE_LOCATION (0x02) +#define MPI_SCSIIO_MSGFLGS_SENSE_LOC_HOST (0x00) +#define MPI_SCSIIO_MSGFLGS_SENSE_LOC_IOC (0x02) + +#define MPI_SCSIIO_MSGFLGS_CMD_DETERMINES_DATA_DIR (0x04) + +/* SCSI IO LUN fields */ + +#define MPI_SCSIIO_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI_SCSIIO_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI_SCSIIO_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI_SCSIIO_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI_SCSIIO_LUN_LEVEL_1_WORD (0xFF00) +#define MPI_SCSIIO_LUN_LEVEL_1_DWORD (0x0000FF00) + +/* SCSI IO Control bits */ + +#define MPI_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000) +#define MPI_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) +#define MPI_SCSIIO_CONTROL_WRITE (0x01000000) +#define MPI_SCSIIO_CONTROL_READ (0x02000000) + +#define MPI_SCSIIO_CONTROL_ADDCDBLEN_MASK (0x3C000000) +#define MPI_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) + +#define MPI_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700) +#define MPI_SCSIIO_CONTROL_SIMPLEQ (0x00000000) +#define MPI_SCSIIO_CONTROL_HEADOFQ (0x00000100) +#define MPI_SCSIIO_CONTROL_ORDEREDQ (0x00000200) +#define MPI_SCSIIO_CONTROL_ACAQ (0x00000400) +#define MPI_SCSIIO_CONTROL_UNTAGGED (0x00000500) +#define MPI_SCSIIO_CONTROL_NO_DISCONNECT (0x00000700) + +#define MPI_SCSIIO_CONTROL_TASKMANAGE_MASK (0x00FF0000) +#define MPI_SCSIIO_CONTROL_OBSOLETE (0x00800000) +#define MPI_SCSIIO_CONTROL_CLEAR_ACA_RSV (0x00400000) +#define MPI_SCSIIO_CONTROL_TARGET_RESET (0x00200000) +#define MPI_SCSIIO_CONTROL_LUN_RESET_RSV (0x00100000) +#define MPI_SCSIIO_CONTROL_RESERVED (0x00080000) +#define MPI_SCSIIO_CONTROL_CLR_TASK_SET_RSV (0x00040000) +#define MPI_SCSIIO_CONTROL_ABORT_TASK_SET (0x00020000) +#define MPI_SCSIIO_CONTROL_RESERVED2 (0x00010000) + +/* SCSI IO reply structure */ +typedef struct MPIMsgSCSIIOReply +{ + uint8_t TargetID; /* 00h */ + uint8_t Bus; /* 01h */ + uint8_t MsgLength; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t CDBLength; /* 04h */ + uint8_t SenseBufferLength; /* 05h */ + uint8_t Reserved; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint8_t SCSIStatus; /* 0Ch */ + uint8_t SCSIState; /* 0Dh */ + uint16_t IOCStatus; /* 0Eh */ + uint32_t IOCLogInfo; /* 10h */ + uint32_t TransferCount; /* 14h */ + uint32_t SenseCount; /* 18h */ + uint32_t ResponseInfo; /* 1Ch */ + uint16_t TaskTag; /* 20h */ + uint16_t Reserved1; /* 22h */ +} QEMU_PACKED MPIMsgSCSIIOReply; + +/* SCSI IO Reply SCSIStatus values (SAM-2 status codes) */ + +#define MPI_SCSI_STATUS_SUCCESS (0x00) +#define MPI_SCSI_STATUS_CHECK_CONDITION (0x02) +#define MPI_SCSI_STATUS_CONDITION_MET (0x04) +#define MPI_SCSI_STATUS_BUSY (0x08) +#define MPI_SCSI_STATUS_INTERMEDIATE (0x10) +#define MPI_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) +#define MPI_SCSI_STATUS_RESERVATION_CONFLICT (0x18) +#define MPI_SCSI_STATUS_COMMAND_TERMINATED (0x22) +#define MPI_SCSI_STATUS_TASK_SET_FULL (0x28) +#define MPI_SCSI_STATUS_ACA_ACTIVE (0x30) + +#define MPI_SCSI_STATUS_FCPEXT_DEVICE_LOGGED_OUT (0x80) +#define MPI_SCSI_STATUS_FCPEXT_NO_LINK (0x81) +#define MPI_SCSI_STATUS_FCPEXT_UNASSIGNED (0x82) + + +/* SCSI IO Reply SCSIState values */ + +#define MPI_SCSI_STATE_AUTOSENSE_VALID (0x01) +#define MPI_SCSI_STATE_AUTOSENSE_FAILED (0x02) +#define MPI_SCSI_STATE_NO_SCSI_STATUS (0x04) +#define MPI_SCSI_STATE_TERMINATED (0x08) +#define MPI_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define MPI_SCSI_STATE_QUEUE_TAG_REJECTED (0x20) + +/* SCSI IO Reply ResponseInfo values */ +/* (FCP-1 RSP_CODE values and SPI-3 Packetized Failure codes) */ + +#define MPI_SCSI_RSP_INFO_FUNCTION_COMPLETE (0x00000000) +#define MPI_SCSI_RSP_INFO_FCP_BURST_LEN_ERROR (0x01000000) +#define MPI_SCSI_RSP_INFO_CMND_FIELDS_INVALID (0x02000000) +#define MPI_SCSI_RSP_INFO_FCP_DATA_RO_ERROR (0x03000000) +#define MPI_SCSI_RSP_INFO_TASK_MGMT_UNSUPPORTED (0x04000000) +#define MPI_SCSI_RSP_INFO_TASK_MGMT_FAILED (0x05000000) +#define MPI_SCSI_RSP_INFO_SPI_LQ_INVALID_TYPE (0x06000000) + +#define MPI_SCSI_TASKTAG_UNKNOWN (0xFFFF) + + +/****************************************************************************/ +/* SCSI Task Management messages */ +/****************************************************************************/ + +typedef struct MPIMsgSCSITaskMgmt { + uint8_t TargetID; /* 00h */ + uint8_t Bus; /* 01h */ + uint8_t ChainOffset; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t Reserved; /* 04h */ + uint8_t TaskType; /* 05h */ + uint8_t Reserved1; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint8_t LUN[8]; /* 0Ch */ + uint32_t Reserved2[7]; /* 14h */ + uint32_t TaskMsgContext; /* 30h */ +} QEMU_PACKED MPIMsgSCSITaskMgmt; + +enum { + /* TaskType values */ + + MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK = 0x01, + MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET = 0x02, + MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET = 0x03, + MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS = 0x04, + MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET = 0x05, + MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET = 0x06, + MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK = 0x07, + MPI_SCSITASKMGMT_TASKTYPE_CLR_ACA = 0x08, + + /* MsgFlags bits */ + + MPI_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU = 0x01, + + MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION = 0x00, + MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION = 0x02, + MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION = 0x04, + + MPI_SCSITASKMGMT_MSGFLAGS_SOFT_RESET_OPTION = 0x08, +}; + +/* SCSI Task Management Reply */ +typedef struct MPIMsgSCSITaskMgmtReply { + uint8_t TargetID; /* 00h */ + uint8_t Bus; /* 01h */ + uint8_t MsgLength; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t ResponseCode; /* 04h */ + uint8_t TaskType; /* 05h */ + uint8_t Reserved1; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint8_t Reserved2[2]; /* 0Ch */ + uint16_t IOCStatus; /* 0Eh */ + uint32_t IOCLogInfo; /* 10h */ + uint32_t TerminationCount; /* 14h */ +} QEMU_PACKED MPIMsgSCSITaskMgmtReply; + +/* ResponseCode values */ +enum { + MPI_SCSITASKMGMT_RSP_TM_COMPLETE = 0x00, + MPI_SCSITASKMGMT_RSP_INVALID_FRAME = 0x02, + MPI_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED = 0x04, + MPI_SCSITASKMGMT_RSP_TM_FAILED = 0x05, + MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED = 0x08, + MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN = 0x09, + MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC = 0x80, +}; + +/****************************************************************************/ +/* IOCInit message */ +/****************************************************************************/ + +typedef struct MPIMsgIOCInit { + uint8_t WhoInit; /* 00h */ + uint8_t Reserved; /* 01h */ + uint8_t ChainOffset; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t Flags; /* 04h */ + uint8_t MaxDevices; /* 05h */ + uint8_t MaxBuses; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint16_t ReplyFrameSize; /* 0Ch */ + uint8_t Reserved1[2]; /* 0Eh */ + uint32_t HostMfaHighAddr; /* 10h */ + uint32_t SenseBufferHighAddr; /* 14h */ + uint32_t ReplyFifoHostSignalingAddr; /* 18h */ + MPISGEntry HostPageBufferSGE; /* 1Ch */ + uint16_t MsgVersion; /* 28h */ + uint16_t HeaderVersion; /* 2Ah */ +} QEMU_PACKED MPIMsgIOCInit; + +enum { + /* WhoInit values */ + + MPI_WHOINIT_NO_ONE = 0x00, + MPI_WHOINIT_SYSTEM_BIOS = 0x01, + MPI_WHOINIT_ROM_BIOS = 0x02, + MPI_WHOINIT_PCI_PEER = 0x03, + MPI_WHOINIT_HOST_DRIVER = 0x04, + MPI_WHOINIT_MANUFACTURER = 0x05, + + /* Flags values */ + + MPI_IOCINIT_FLAGS_HOST_PAGE_BUFFER_PERSISTENT = 0x04, + MPI_IOCINIT_FLAGS_REPLY_FIFO_HOST_SIGNAL = 0x02, + MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE = 0x01, + + /* MsgVersion */ + + MPI_IOCINIT_MSGVERSION_MAJOR_MASK = 0xFF00, + MPI_IOCINIT_MSGVERSION_MAJOR_SHIFT = 8, + MPI_IOCINIT_MSGVERSION_MINOR_MASK = 0x00FF, + MPI_IOCINIT_MSGVERSION_MINOR_SHIFT = 0, + + /* HeaderVersion */ + + MPI_IOCINIT_HEADERVERSION_UNIT_MASK = 0xFF00, + MPI_IOCINIT_HEADERVERSION_UNIT_SHIFT = 8, + MPI_IOCINIT_HEADERVERSION_DEV_MASK = 0x00FF, + MPI_IOCINIT_HEADERVERSION_DEV_SHIFT = 0, +}; + +typedef struct MPIMsgIOCInitReply { + uint8_t WhoInit; /* 00h */ + uint8_t Reserved; /* 01h */ + uint8_t MsgLength; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t Flags; /* 04h */ + uint8_t MaxDevices; /* 05h */ + uint8_t MaxBuses; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint16_t Reserved2; /* 0Ch */ + uint16_t IOCStatus; /* 0Eh */ + uint32_t IOCLogInfo; /* 10h */ +} QEMU_PACKED MPIMsgIOCInitReply; + + + +/****************************************************************************/ +/* IOC Facts message */ +/****************************************************************************/ + +typedef struct MPIMsgIOCFacts { + uint8_t Reserved[2]; /* 00h */ + uint8_t ChainOffset; /* 01h */ + uint8_t Function; /* 02h */ + uint8_t Reserved1[3]; /* 03h */ + uint8_t MsgFlags; /* 04h */ + uint32_t MsgContext; /* 08h */ +} QEMU_PACKED MPIMsgIOCFacts; + +/* IOC Facts Reply */ +typedef struct MPIMsgIOCFactsReply { + uint16_t MsgVersion; /* 00h */ + uint8_t MsgLength; /* 02h */ + uint8_t Function; /* 03h */ + uint16_t HeaderVersion; /* 04h */ + uint8_t IOCNumber; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint16_t IOCExceptions; /* 0Ch */ + uint16_t IOCStatus; /* 0Eh */ + uint32_t IOCLogInfo; /* 10h */ + uint8_t MaxChainDepth; /* 14h */ + uint8_t WhoInit; /* 15h */ + uint8_t BlockSize; /* 16h */ + uint8_t Flags; /* 17h */ + uint16_t ReplyQueueDepth; /* 18h */ + uint16_t RequestFrameSize; /* 1Ah */ + uint16_t Reserved_0101_FWVersion; /* 1Ch */ /* obsolete 16-bit FWVersion */ + uint16_t ProductID; /* 1Eh */ + uint32_t CurrentHostMfaHighAddr; /* 20h */ + uint16_t GlobalCredits; /* 24h */ + uint8_t NumberOfPorts; /* 26h */ + uint8_t EventState; /* 27h */ + uint32_t CurrentSenseBufferHighAddr; /* 28h */ + uint16_t CurReplyFrameSize; /* 2Ch */ + uint8_t MaxDevices; /* 2Eh */ + uint8_t MaxBuses; /* 2Fh */ + uint32_t FWImageSize; /* 30h */ + uint32_t IOCCapabilities; /* 34h */ + uint8_t FWVersionDev; /* 38h */ + uint8_t FWVersionUnit; /* 39h */ + uint8_t FWVersionMinor; /* 3ah */ + uint8_t FWVersionMajor; /* 3bh */ + uint16_t HighPriorityQueueDepth; /* 3Ch */ + uint16_t Reserved2; /* 3Eh */ + MPISGEntry HostPageBufferSGE; /* 40h */ + uint32_t ReplyFifoHostSignalingAddr; /* 4Ch */ +} QEMU_PACKED MPIMsgIOCFactsReply; + +enum { + MPI_IOCFACTS_MSGVERSION_MAJOR_MASK = 0xFF00, + MPI_IOCFACTS_MSGVERSION_MAJOR_SHIFT = 8, + MPI_IOCFACTS_MSGVERSION_MINOR_MASK = 0x00FF, + MPI_IOCFACTS_MSGVERSION_MINOR_SHIFT = 0, + + MPI_IOCFACTS_HDRVERSION_UNIT_MASK = 0xFF00, + MPI_IOCFACTS_HDRVERSION_UNIT_SHIFT = 8, + MPI_IOCFACTS_HDRVERSION_DEV_MASK = 0x00FF, + MPI_IOCFACTS_HDRVERSION_DEV_SHIFT = 0, + + MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL = 0x0001, + MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID = 0x0002, + MPI_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL = 0x0004, + MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL = 0x0008, + MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED = 0x0010, + + MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT = 0x01, + MPI_IOCFACTS_FLAGS_REPLY_FIFO_HOST_SIGNAL = 0x02, + MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT = 0x04, + + MPI_IOCFACTS_EVENTSTATE_DISABLED = 0x00, + MPI_IOCFACTS_EVENTSTATE_ENABLED = 0x01, + + MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q = 0x00000001, + MPI_IOCFACTS_CAPABILITY_REPLY_HOST_SIGNAL = 0x00000002, + MPI_IOCFACTS_CAPABILITY_QUEUE_FULL_HANDLING = 0x00000004, + MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER = 0x00000008, + MPI_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER = 0x00000010, + MPI_IOCFACTS_CAPABILITY_EXTENDED_BUFFER = 0x00000020, + MPI_IOCFACTS_CAPABILITY_EEDP = 0x00000040, + MPI_IOCFACTS_CAPABILITY_BIDIRECTIONAL = 0x00000080, + MPI_IOCFACTS_CAPABILITY_MULTICAST = 0x00000100, + MPI_IOCFACTS_CAPABILITY_SCSIIO32 = 0x00000200, + MPI_IOCFACTS_CAPABILITY_NO_SCSIIO16 = 0x00000400, + MPI_IOCFACTS_CAPABILITY_TLR = 0x00000800, +}; + +/****************************************************************************/ +/* Port Facts message and Reply */ +/****************************************************************************/ + +typedef struct MPIMsgPortFacts { + uint8_t Reserved[2]; /* 00h */ + uint8_t ChainOffset; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t Reserved1[2]; /* 04h */ + uint8_t PortNumber; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ +} QEMU_PACKED MPIMsgPortFacts; + +typedef struct MPIMsgPortFactsReply { + uint16_t Reserved; /* 00h */ + uint8_t MsgLength; /* 02h */ + uint8_t Function; /* 03h */ + uint16_t Reserved1; /* 04h */ + uint8_t PortNumber; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint16_t Reserved2; /* 0Ch */ + uint16_t IOCStatus; /* 0Eh */ + uint32_t IOCLogInfo; /* 10h */ + uint8_t Reserved3; /* 14h */ + uint8_t PortType; /* 15h */ + uint16_t MaxDevices; /* 16h */ + uint16_t PortSCSIID; /* 18h */ + uint16_t ProtocolFlags; /* 1Ah */ + uint16_t MaxPostedCmdBuffers; /* 1Ch */ + uint16_t MaxPersistentIDs; /* 1Eh */ + uint16_t MaxLanBuckets; /* 20h */ + uint8_t MaxInitiators; /* 22h */ + uint8_t Reserved4; /* 23h */ + uint32_t Reserved5; /* 24h */ +} QEMU_PACKED MPIMsgPortFactsReply; + + +enum { + /* PortTypes values */ + MPI_PORTFACTS_PORTTYPE_INACTIVE = 0x00, + MPI_PORTFACTS_PORTTYPE_SCSI = 0x01, + MPI_PORTFACTS_PORTTYPE_FC = 0x10, + MPI_PORTFACTS_PORTTYPE_ISCSI = 0x20, + MPI_PORTFACTS_PORTTYPE_SAS = 0x30, + + /* ProtocolFlags values */ + MPI_PORTFACTS_PROTOCOL_LOGBUSADDR = 0x01, + MPI_PORTFACTS_PROTOCOL_LAN = 0x02, + MPI_PORTFACTS_PROTOCOL_TARGET = 0x04, + MPI_PORTFACTS_PROTOCOL_INITIATOR = 0x08, +}; + + +/****************************************************************************/ +/* Port Enable Message */ +/****************************************************************************/ + +typedef struct MPIMsgPortEnable { + uint8_t Reserved[2]; /* 00h */ + uint8_t ChainOffset; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t Reserved1[2]; /* 04h */ + uint8_t PortNumber; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ +} QEMU_PACKED MPIMsgPortEnable; + +typedef struct MPIMsgPortEnableReply { + uint8_t Reserved[2]; /* 00h */ + uint8_t MsgLength; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t Reserved1[2]; /* 04h */ + uint8_t PortNumber; /* 05h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint16_t Reserved2; /* 0Ch */ + uint16_t IOCStatus; /* 0Eh */ + uint32_t IOCLogInfo; /* 10h */ +} QEMU_PACKED MPIMsgPortEnableReply; + +/****************************************************************************/ +/* Event Notification messages */ +/****************************************************************************/ + +typedef struct MPIMsgEventNotify { + uint8_t Switch; /* 00h */ + uint8_t Reserved; /* 01h */ + uint8_t ChainOffset; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t Reserved1[3]; /* 04h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ +} QEMU_PACKED MPIMsgEventNotify; + +/* Event Notification Reply */ + +typedef struct MPIMsgEventNotifyReply { + uint16_t EventDataLength; /* 00h */ + uint8_t MsgLength; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t Reserved1[2]; /* 04h */ + uint8_t AckRequired; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint8_t Reserved2[2]; /* 0Ch */ + uint16_t IOCStatus; /* 0Eh */ + uint32_t IOCLogInfo; /* 10h */ + uint32_t Event; /* 14h */ + uint32_t EventContext; /* 18h */ + uint32_t Data[1]; /* 1Ch */ +} QEMU_PACKED MPIMsgEventNotifyReply; + +/* Event Acknowledge */ + +typedef struct MPIMsgEventAck { + uint8_t Reserved[2]; /* 00h */ + uint8_t ChainOffset; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t Reserved1[3]; /* 04h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint32_t Event; /* 0Ch */ + uint32_t EventContext; /* 10h */ +} QEMU_PACKED MPIMsgEventAck; + +typedef struct MPIMsgEventAckReply { + uint8_t Reserved[2]; /* 00h */ + uint8_t MsgLength; /* 02h */ + uint8_t Function; /* 03h */ + uint8_t Reserved1[3]; /* 04h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint16_t Reserved2; /* 0Ch */ + uint16_t IOCStatus; /* 0Eh */ + uint32_t IOCLogInfo; /* 10h */ +} QEMU_PACKED MPIMsgEventAckReply; + +enum { + /* Switch */ + + MPI_EVENT_NOTIFICATION_SWITCH_OFF = 0x00, + MPI_EVENT_NOTIFICATION_SWITCH_ON = 0x01, + + /* Event */ + + MPI_EVENT_NONE = 0x00000000, + MPI_EVENT_LOG_DATA = 0x00000001, + MPI_EVENT_STATE_CHANGE = 0x00000002, + MPI_EVENT_UNIT_ATTENTION = 0x00000003, + MPI_EVENT_IOC_BUS_RESET = 0x00000004, + MPI_EVENT_EXT_BUS_RESET = 0x00000005, + MPI_EVENT_RESCAN = 0x00000006, + MPI_EVENT_LINK_STATUS_CHANGE = 0x00000007, + MPI_EVENT_LOOP_STATE_CHANGE = 0x00000008, + MPI_EVENT_LOGOUT = 0x00000009, + MPI_EVENT_EVENT_CHANGE = 0x0000000A, + MPI_EVENT_INTEGRATED_RAID = 0x0000000B, + MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE = 0x0000000C, + MPI_EVENT_ON_BUS_TIMER_EXPIRED = 0x0000000D, + MPI_EVENT_QUEUE_FULL = 0x0000000E, + MPI_EVENT_SAS_DEVICE_STATUS_CHANGE = 0x0000000F, + MPI_EVENT_SAS_SES = 0x00000010, + MPI_EVENT_PERSISTENT_TABLE_FULL = 0x00000011, + MPI_EVENT_SAS_PHY_LINK_STATUS = 0x00000012, + MPI_EVENT_SAS_DISCOVERY_ERROR = 0x00000013, + MPI_EVENT_IR_RESYNC_UPDATE = 0x00000014, + MPI_EVENT_IR2 = 0x00000015, + MPI_EVENT_SAS_DISCOVERY = 0x00000016, + MPI_EVENT_SAS_BROADCAST_PRIMITIVE = 0x00000017, + MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE = 0x00000018, + MPI_EVENT_SAS_INIT_TABLE_OVERFLOW = 0x00000019, + MPI_EVENT_SAS_SMP_ERROR = 0x0000001A, + MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE = 0x0000001B, + MPI_EVENT_LOG_ENTRY_ADDED = 0x00000021, + + /* AckRequired field values */ + + MPI_EVENT_NOTIFICATION_ACK_NOT_REQUIRED = 0x00, + MPI_EVENT_NOTIFICATION_ACK_REQUIRED = 0x01, +}; + +/**************************************************************************** +* Config Request Message +****************************************************************************/ + +typedef struct MPIMsgConfig { + uint8_t Action; /* 00h */ + uint8_t Reserved; /* 01h */ + uint8_t ChainOffset; /* 02h */ + uint8_t Function; /* 03h */ + uint16_t ExtPageLength; /* 04h */ + uint8_t ExtPageType; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint8_t Reserved2[8]; /* 0Ch */ + uint8_t PageVersion; /* 14h */ + uint8_t PageLength; /* 15h */ + uint8_t PageNumber; /* 16h */ + uint8_t PageType; /* 17h */ + uint32_t PageAddress; /* 18h */ + MPISGEntry PageBufferSGE; /* 1Ch */ +} QEMU_PACKED MPIMsgConfig; + +/* Action field values */ + +enum { + MPI_CONFIG_ACTION_PAGE_HEADER = 0x00, + MPI_CONFIG_ACTION_PAGE_READ_CURRENT = 0x01, + MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT = 0x02, + MPI_CONFIG_ACTION_PAGE_DEFAULT = 0x03, + MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM = 0x04, + MPI_CONFIG_ACTION_PAGE_READ_DEFAULT = 0x05, + MPI_CONFIG_ACTION_PAGE_READ_NVRAM = 0x06, +}; + + +/* Config Reply Message */ +typedef struct MPIMsgConfigReply { + uint8_t Action; /* 00h */ + uint8_t Reserved; /* 01h */ + uint8_t MsgLength; /* 02h */ + uint8_t Function; /* 03h */ + uint16_t ExtPageLength; /* 04h */ + uint8_t ExtPageType; /* 06h */ + uint8_t MsgFlags; /* 07h */ + uint32_t MsgContext; /* 08h */ + uint8_t Reserved2[2]; /* 0Ch */ + uint16_t IOCStatus; /* 0Eh */ + uint32_t IOCLogInfo; /* 10h */ + uint8_t PageVersion; /* 14h */ + uint8_t PageLength; /* 15h */ + uint8_t PageNumber; /* 16h */ + uint8_t PageType; /* 17h */ +} QEMU_PACKED MPIMsgConfigReply; + +enum { + /* PageAddress field values */ + MPI_CONFIG_PAGEATTR_READ_ONLY = 0x00, + MPI_CONFIG_PAGEATTR_CHANGEABLE = 0x10, + MPI_CONFIG_PAGEATTR_PERSISTENT = 0x20, + MPI_CONFIG_PAGEATTR_RO_PERSISTENT = 0x30, + MPI_CONFIG_PAGEATTR_MASK = 0xF0, + + MPI_CONFIG_PAGETYPE_IO_UNIT = 0x00, + MPI_CONFIG_PAGETYPE_IOC = 0x01, + MPI_CONFIG_PAGETYPE_BIOS = 0x02, + MPI_CONFIG_PAGETYPE_SCSI_PORT = 0x03, + MPI_CONFIG_PAGETYPE_SCSI_DEVICE = 0x04, + MPI_CONFIG_PAGETYPE_FC_PORT = 0x05, + MPI_CONFIG_PAGETYPE_FC_DEVICE = 0x06, + MPI_CONFIG_PAGETYPE_LAN = 0x07, + MPI_CONFIG_PAGETYPE_RAID_VOLUME = 0x08, + MPI_CONFIG_PAGETYPE_MANUFACTURING = 0x09, + MPI_CONFIG_PAGETYPE_RAID_PHYSDISK = 0x0A, + MPI_CONFIG_PAGETYPE_INBAND = 0x0B, + MPI_CONFIG_PAGETYPE_EXTENDED = 0x0F, + MPI_CONFIG_PAGETYPE_MASK = 0x0F, + + MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT = 0x10, + MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER = 0x11, + MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE = 0x12, + MPI_CONFIG_EXTPAGETYPE_SAS_PHY = 0x13, + MPI_CONFIG_EXTPAGETYPE_LOG = 0x14, + MPI_CONFIG_EXTPAGETYPE_ENCLOSURE = 0x15, + + MPI_SCSI_PORT_PGAD_PORT_MASK = 0x000000FF, + + MPI_SCSI_DEVICE_FORM_MASK = 0xF0000000, + MPI_SCSI_DEVICE_FORM_BUS_TID = 0x00000000, + MPI_SCSI_DEVICE_TARGET_ID_MASK = 0x000000FF, + MPI_SCSI_DEVICE_TARGET_ID_SHIFT = 0, + MPI_SCSI_DEVICE_BUS_MASK = 0x0000FF00, + MPI_SCSI_DEVICE_BUS_SHIFT = 8, + MPI_SCSI_DEVICE_FORM_TARGET_MODE = 0x10000000, + MPI_SCSI_DEVICE_TM_RESPOND_ID_MASK = 0x000000FF, + MPI_SCSI_DEVICE_TM_RESPOND_ID_SHIFT = 0, + MPI_SCSI_DEVICE_TM_BUS_MASK = 0x0000FF00, + MPI_SCSI_DEVICE_TM_BUS_SHIFT = 8, + MPI_SCSI_DEVICE_TM_INIT_ID_MASK = 0x00FF0000, + MPI_SCSI_DEVICE_TM_INIT_ID_SHIFT = 16, + + MPI_FC_PORT_PGAD_PORT_MASK = 0xF0000000, + MPI_FC_PORT_PGAD_PORT_SHIFT = 28, + MPI_FC_PORT_PGAD_FORM_MASK = 0x0F000000, + MPI_FC_PORT_PGAD_FORM_INDEX = 0x01000000, + MPI_FC_PORT_PGAD_INDEX_MASK = 0x0000FFFF, + MPI_FC_PORT_PGAD_INDEX_SHIFT = 0, + + MPI_FC_DEVICE_PGAD_PORT_MASK = 0xF0000000, + MPI_FC_DEVICE_PGAD_PORT_SHIFT = 28, + MPI_FC_DEVICE_PGAD_FORM_MASK = 0x0F000000, + MPI_FC_DEVICE_PGAD_FORM_NEXT_DID = 0x00000000, + MPI_FC_DEVICE_PGAD_ND_PORT_MASK = 0xF0000000, + MPI_FC_DEVICE_PGAD_ND_PORT_SHIFT = 28, + MPI_FC_DEVICE_PGAD_ND_DID_MASK = 0x00FFFFFF, + MPI_FC_DEVICE_PGAD_ND_DID_SHIFT = 0, + MPI_FC_DEVICE_PGAD_FORM_BUS_TID = 0x01000000, + MPI_FC_DEVICE_PGAD_BT_BUS_MASK = 0x0000FF00, + MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT = 8, + MPI_FC_DEVICE_PGAD_BT_TID_MASK = 0x000000FF, + MPI_FC_DEVICE_PGAD_BT_TID_SHIFT = 0, + + MPI_PHYSDISK_PGAD_PHYSDISKNUM_MASK = 0x000000FF, + MPI_PHYSDISK_PGAD_PHYSDISKNUM_SHIFT = 0, + + MPI_SAS_EXPAND_PGAD_FORM_MASK = 0xF0000000, + MPI_SAS_EXPAND_PGAD_FORM_SHIFT = 28, + MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE = 0x00000000, + MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM = 0x00000001, + MPI_SAS_EXPAND_PGAD_FORM_HANDLE = 0x00000002, + MPI_SAS_EXPAND_PGAD_GNH_MASK_HANDLE = 0x0000FFFF, + MPI_SAS_EXPAND_PGAD_GNH_SHIFT_HANDLE = 0, + MPI_SAS_EXPAND_PGAD_HPN_MASK_PHY = 0x00FF0000, + MPI_SAS_EXPAND_PGAD_HPN_SHIFT_PHY = 16, + MPI_SAS_EXPAND_PGAD_HPN_MASK_HANDLE = 0x0000FFFF, + MPI_SAS_EXPAND_PGAD_HPN_SHIFT_HANDLE = 0, + MPI_SAS_EXPAND_PGAD_H_MASK_HANDLE = 0x0000FFFF, + MPI_SAS_EXPAND_PGAD_H_SHIFT_HANDLE = 0, + + MPI_SAS_DEVICE_PGAD_FORM_MASK = 0xF0000000, + MPI_SAS_DEVICE_PGAD_FORM_SHIFT = 28, + MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE = 0x00000000, + MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID = 0x00000001, + MPI_SAS_DEVICE_PGAD_FORM_HANDLE = 0x00000002, + MPI_SAS_DEVICE_PGAD_GNH_HANDLE_MASK = 0x0000FFFF, + MPI_SAS_DEVICE_PGAD_GNH_HANDLE_SHIFT = 0, + MPI_SAS_DEVICE_PGAD_BT_BUS_MASK = 0x0000FF00, + MPI_SAS_DEVICE_PGAD_BT_BUS_SHIFT = 8, + MPI_SAS_DEVICE_PGAD_BT_TID_MASK = 0x000000FF, + MPI_SAS_DEVICE_PGAD_BT_TID_SHIFT = 0, + MPI_SAS_DEVICE_PGAD_H_HANDLE_MASK = 0x0000FFFF, + MPI_SAS_DEVICE_PGAD_H_HANDLE_SHIFT = 0, + + MPI_SAS_PHY_PGAD_FORM_MASK = 0xF0000000, + MPI_SAS_PHY_PGAD_FORM_SHIFT = 28, + MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER = 0x0, + MPI_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX = 0x1, + MPI_SAS_PHY_PGAD_PHY_NUMBER_MASK = 0x000000FF, + MPI_SAS_PHY_PGAD_PHY_NUMBER_SHIFT = 0, + MPI_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK = 0x0000FFFF, + MPI_SAS_PHY_PGAD_PHY_TBL_INDEX_SHIFT = 0, + + MPI_SAS_ENCLOS_PGAD_FORM_MASK = 0xF0000000, + MPI_SAS_ENCLOS_PGAD_FORM_SHIFT = 28, + MPI_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE = 0x00000000, + MPI_SAS_ENCLOS_PGAD_FORM_HANDLE = 0x00000001, + MPI_SAS_ENCLOS_PGAD_GNH_HANDLE_MASK = 0x0000FFFF, + MPI_SAS_ENCLOS_PGAD_GNH_HANDLE_SHIFT = 0, + MPI_SAS_ENCLOS_PGAD_H_HANDLE_MASK = 0x0000FFFF, + MPI_SAS_ENCLOS_PGAD_H_HANDLE_SHIFT = 0, +}; + +/* Too many structs and definitions... see mptconfig.c for the few + * that are used. + */ + +/****************************************************************************/ +/* Firmware Upload message and associated structures */ +/****************************************************************************/ + +enum { + /* defines for using the ProductId field */ + MPI_FW_HEADER_PID_TYPE_MASK = 0xF000, + MPI_FW_HEADER_PID_TYPE_SCSI = 0x0000, + MPI_FW_HEADER_PID_TYPE_FC = 0x1000, + MPI_FW_HEADER_PID_TYPE_SAS = 0x2000, + + MPI_FW_HEADER_PID_PROD_MASK = 0x0F00, + MPI_FW_HEADER_PID_PROD_INITIATOR_SCSI = 0x0100, + MPI_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI = 0x0200, + MPI_FW_HEADER_PID_PROD_TARGET_SCSI = 0x0300, + MPI_FW_HEADER_PID_PROD_IM_SCSI = 0x0400, + MPI_FW_HEADER_PID_PROD_IS_SCSI = 0x0500, + MPI_FW_HEADER_PID_PROD_CTX_SCSI = 0x0600, + MPI_FW_HEADER_PID_PROD_IR_SCSI = 0x0700, + + MPI_FW_HEADER_PID_FAMILY_MASK = 0x00FF, + + /* SCSI */ + MPI_FW_HEADER_PID_FAMILY_1030A0_SCSI = 0x0001, + MPI_FW_HEADER_PID_FAMILY_1030B0_SCSI = 0x0002, + MPI_FW_HEADER_PID_FAMILY_1030B1_SCSI = 0x0003, + MPI_FW_HEADER_PID_FAMILY_1030C0_SCSI = 0x0004, + MPI_FW_HEADER_PID_FAMILY_1020A0_SCSI = 0x0005, + MPI_FW_HEADER_PID_FAMILY_1020B0_SCSI = 0x0006, + MPI_FW_HEADER_PID_FAMILY_1020B1_SCSI = 0x0007, + MPI_FW_HEADER_PID_FAMILY_1020C0_SCSI = 0x0008, + MPI_FW_HEADER_PID_FAMILY_1035A0_SCSI = 0x0009, + MPI_FW_HEADER_PID_FAMILY_1035B0_SCSI = 0x000A, + MPI_FW_HEADER_PID_FAMILY_1030TA0_SCSI = 0x000B, + MPI_FW_HEADER_PID_FAMILY_1020TA0_SCSI = 0x000C, + + /* Fibre Channel */ + MPI_FW_HEADER_PID_FAMILY_909_FC = 0x0000, + MPI_FW_HEADER_PID_FAMILY_919_FC = 0x0001, /* 919 and 929 */ + MPI_FW_HEADER_PID_FAMILY_919X_FC = 0x0002, /* 919X and 929X */ + MPI_FW_HEADER_PID_FAMILY_919XL_FC = 0x0003, /* 919XL and 929XL */ + MPI_FW_HEADER_PID_FAMILY_939X_FC = 0x0004, /* 939X and 949X */ + MPI_FW_HEADER_PID_FAMILY_959_FC = 0x0005, + MPI_FW_HEADER_PID_FAMILY_949E_FC = 0x0006, + + /* SAS */ + MPI_FW_HEADER_PID_FAMILY_1064_SAS = 0x0001, + MPI_FW_HEADER_PID_FAMILY_1068_SAS = 0x0002, + MPI_FW_HEADER_PID_FAMILY_1078_SAS = 0x0003, + MPI_FW_HEADER_PID_FAMILY_106xE_SAS = 0x0004, /* 1068E, 1066E, and 1064E */ +}; + +#endif diff --git a/hw/scsi/mptconfig.c b/hw/scsi/mptconfig.c new file mode 100644 index 000000000..19d01f39f --- /dev/null +++ b/hw/scsi/mptconfig.c @@ -0,0 +1,904 @@ +/* + * QEMU LSI SAS1068 Host Bus Adapter emulation - configuration pages + * + * Copyright (c) 2016 Red Hat, Inc. + * + * Author: Paolo Bonzini + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + */ +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/scsi/scsi.h" + +#include "mptsas.h" +#include "mpi.h" +#include "trace.h" + +/* Generic functions for marshaling and unmarshaling. */ + +#define repl1(x) x +#define repl2(x) x x +#define repl3(x) x x x +#define repl4(x) x x x x +#define repl5(x) x x x x x +#define repl6(x) x x x x x x +#define repl7(x) x x x x x x x +#define repl8(x) x x x x x x x x + +#define repl(n, x) glue(repl, n)(x) + +typedef union PackValue { + uint64_t ll; + char *str; +} PackValue; + +static size_t vfill(uint8_t *data, size_t size, const char *fmt, va_list ap) +{ + size_t ofs; + PackValue val; + const char *p; + + ofs = 0; + p = fmt; + while (*p) { + memset(&val, 0, sizeof(val)); + switch (*p) { + case '*': + p++; + break; + case 'b': + case 'w': + case 'l': + val.ll = va_arg(ap, int); + break; + case 'q': + val.ll = va_arg(ap, int64_t); + break; + case 's': + val.str = va_arg(ap, void *); + break; + } + switch (*p++) { + case 'b': + if (data) { + stb_p(data + ofs, val.ll); + } + ofs++; + break; + case 'w': + if (data) { + stw_le_p(data + ofs, val.ll); + } + ofs += 2; + break; + case 'l': + if (data) { + stl_le_p(data + ofs, val.ll); + } + ofs += 4; + break; + case 'q': + if (data) { + stq_le_p(data + ofs, val.ll); + } + ofs += 8; + break; + case 's': + { + int cnt = atoi(p); + if (data) { + if (val.str) { + strncpy((void *)data + ofs, val.str, cnt); + } else { + memset((void *)data + ofs, 0, cnt); + } + } + ofs += cnt; + break; + } + } + } + + return ofs; +} + +static size_t vpack(uint8_t **p_data, const char *fmt, va_list ap1) +{ + size_t size = 0; + uint8_t *data = NULL; + + if (p_data) { + va_list ap2; + + va_copy(ap2, ap1); + size = vfill(NULL, 0, fmt, ap2); + *p_data = data = g_malloc(size); + va_end(ap2); + } + return vfill(data, size, fmt, ap1); +} + +static size_t fill(uint8_t *data, size_t size, const char *fmt, ...) +{ + va_list ap; + size_t ret; + + va_start(ap, fmt); + ret = vfill(data, size, fmt, ap); + va_end(ap); + + return ret; +} + +/* Functions to build the page header and fill in the length, always used + * through the macros. + */ + +#define MPTSAS_CONFIG_PACK(number, type, version, fmt, ...) \ + mptsas_config_pack(data, "b*bbb" fmt, version, number, type, \ + ## __VA_ARGS__) + +static size_t mptsas_config_pack(uint8_t **data, const char *fmt, ...) +{ + va_list ap; + size_t ret; + + va_start(ap, fmt); + ret = vpack(data, fmt, ap); + va_end(ap); + + if (data) { + assert(ret / 4 < 256 && (ret % 4) == 0); + stb_p(*data + 1, ret / 4); + } + return ret; +} + +#define MPTSAS_CONFIG_PACK_EXT(number, type, version, fmt, ...) \ + mptsas_config_pack_ext(data, "b*bbb*wb*b" fmt, version, number, \ + MPI_CONFIG_PAGETYPE_EXTENDED, type, ## __VA_ARGS__) + +static size_t mptsas_config_pack_ext(uint8_t **data, const char *fmt, ...) +{ + va_list ap; + size_t ret; + + va_start(ap, fmt); + ret = vpack(data, fmt, ap); + va_end(ap); + + if (data) { + assert(ret < 65536 && (ret % 4) == 0); + stw_le_p(*data + 4, ret / 4); + } + return ret; +} + +/* Manufacturing pages */ + +static +size_t mptsas_config_manufacturing_0(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(0, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00, + "s16s8s16s16s16", + "QEMU MPT Fusion", + "2.5", + "QEMU MPT Fusion", + "QEMU", + "0000111122223333"); +} + +static +size_t mptsas_config_manufacturing_1(MPTSASState *s, uint8_t **data, int address) +{ + /* VPD - all zeros */ + return MPTSAS_CONFIG_PACK(1, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00, + "*s256"); +} + +static +size_t mptsas_config_manufacturing_2(MPTSASState *s, uint8_t **data, int address) +{ + PCIDeviceClass *pcic = PCI_DEVICE_GET_CLASS(s); + return MPTSAS_CONFIG_PACK(2, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00, + "wb*b*l", + pcic->device_id, pcic->revision); +} + +static +size_t mptsas_config_manufacturing_3(MPTSASState *s, uint8_t **data, int address) +{ + PCIDeviceClass *pcic = PCI_DEVICE_GET_CLASS(s); + return MPTSAS_CONFIG_PACK(3, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00, + "wb*b*l", + pcic->device_id, pcic->revision); +} + +static +size_t mptsas_config_manufacturing_4(MPTSASState *s, uint8_t **data, int address) +{ + /* All zeros */ + return MPTSAS_CONFIG_PACK(4, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x05, + "*l*b*b*b*b*b*b*w*s56*l*l*l*l*l*l" + "*b*b*w*b*b*w*l*l"); +} + +static +size_t mptsas_config_manufacturing_5(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(5, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x02, + "q*b*b*w*l*l", s->sas_addr); +} + +static +size_t mptsas_config_manufacturing_6(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(6, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00, + "*l"); +} + +static +size_t mptsas_config_manufacturing_7(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(7, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00, + "*l*l*l*s16*b*b*w", MPTSAS_NUM_PORTS); +} + +static +size_t mptsas_config_manufacturing_8(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(8, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00, + "*l"); +} + +static +size_t mptsas_config_manufacturing_9(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(9, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00, + "*l"); +} + +static +size_t mptsas_config_manufacturing_10(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(10, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00, + "*l"); +} + +/* I/O unit pages */ + +static +size_t mptsas_config_io_unit_0(MPTSASState *s, uint8_t **data, int address) +{ + PCIDevice *pci = PCI_DEVICE(s); + uint64_t unique_value = 0x53504D554D4551LL; /* "QEMUMPTx" */ + + unique_value |= (uint64_t)pci->devfn << 56; + return MPTSAS_CONFIG_PACK(0, MPI_CONFIG_PAGETYPE_IO_UNIT, 0x00, + "q", unique_value); +} + +static +size_t mptsas_config_io_unit_1(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(1, MPI_CONFIG_PAGETYPE_IO_UNIT, 0x02, "l", + 0x41 /* single function, RAID disabled */ ); +} + +static +size_t mptsas_config_io_unit_2(MPTSASState *s, uint8_t **data, int address) +{ + PCIDevice *pci = PCI_DEVICE(s); + uint8_t devfn = pci->devfn; + return MPTSAS_CONFIG_PACK(2, MPI_CONFIG_PAGETYPE_IO_UNIT, 0x02, + "llbbw*b*b*w*b*b*w*b*b*w*l", + 0, 0x100, 0 /* pci bus? */, devfn, 0); +} + +static +size_t mptsas_config_io_unit_3(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(3, MPI_CONFIG_PAGETYPE_IO_UNIT, 0x01, + "*b*b*w*l"); +} + +static +size_t mptsas_config_io_unit_4(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(4, MPI_CONFIG_PAGETYPE_IO_UNIT, 0x00, "*l*l*q"); +} + +/* I/O controller pages */ + +static +size_t mptsas_config_ioc_0(MPTSASState *s, uint8_t **data, int address) +{ + PCIDeviceClass *pcic = PCI_DEVICE_GET_CLASS(s); + + return MPTSAS_CONFIG_PACK(0, MPI_CONFIG_PAGETYPE_IOC, 0x01, + "*l*lwwb*b*b*blww", + pcic->vendor_id, pcic->device_id, pcic->revision, + pcic->class_id, pcic->subsystem_vendor_id, + pcic->subsystem_id); +} + +static +size_t mptsas_config_ioc_1(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(1, MPI_CONFIG_PAGETYPE_IOC, 0x03, + "*l*l*b*b*b*b"); +} + +static +size_t mptsas_config_ioc_2(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(2, MPI_CONFIG_PAGETYPE_IOC, 0x04, + "*l*b*b*b*b"); +} + +static +size_t mptsas_config_ioc_3(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(3, MPI_CONFIG_PAGETYPE_IOC, 0x00, + "*b*b*w"); +} + +static +size_t mptsas_config_ioc_4(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(4, MPI_CONFIG_PAGETYPE_IOC, 0x00, + "*b*b*w"); +} + +static +size_t mptsas_config_ioc_5(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(5, MPI_CONFIG_PAGETYPE_IOC, 0x00, + "*l*b*b*w"); +} + +static +size_t mptsas_config_ioc_6(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK(6, MPI_CONFIG_PAGETYPE_IOC, 0x01, + "*l*b*b*b*b*b*b*b*b*b*b*w*l*l*l*l*b*b*w" + "*w*w*w*w*l*l*l"); +} + +/* SAS I/O unit pages (extended) */ + +#define MPTSAS_CONFIG_SAS_IO_UNIT_0_SIZE 16 + +#define MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION 0x02 +#define MPI_SAS_IOUNIT0_RATE_1_5 0x08 +#define MPI_SAS_IOUNIT0_RATE_3_0 0x09 + +#define MPI_SAS_DEVICE_INFO_NO_DEVICE 0x00000000 +#define MPI_SAS_DEVICE_INFO_END_DEVICE 0x00000001 +#define MPI_SAS_DEVICE_INFO_SSP_TARGET 0x00000400 + +#define MPI_SAS_DEVICE0_ASTATUS_NO_ERRORS 0x00 + +#define MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT 0x0001 +#define MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED 0x0002 +#define MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT 0x0004 + + + +static SCSIDevice *mptsas_phy_get_device(MPTSASState *s, int i, + int *phy_handle, int *dev_handle) +{ + SCSIDevice *d = scsi_device_find(&s->bus, 0, i, 0); + + if (phy_handle) { + *phy_handle = i + 1; + } + if (dev_handle) { + *dev_handle = d ? i + 1 + MPTSAS_NUM_PORTS : 0; + } + return d; +} + +static +size_t mptsas_config_sas_io_unit_0(MPTSASState *s, uint8_t **data, int address) +{ + size_t size = MPTSAS_CONFIG_PACK_EXT(0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 0x04, + "*w*wb*b*w" + repl(MPTSAS_NUM_PORTS, "*s16"), + MPTSAS_NUM_PORTS); + + if (data) { + size_t ofs = size - MPTSAS_NUM_PORTS * MPTSAS_CONFIG_SAS_IO_UNIT_0_SIZE; + int i; + + for (i = 0; i < MPTSAS_NUM_PORTS; i++) { + int phy_handle, dev_handle; + SCSIDevice *dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle); + + fill(*data + ofs, MPTSAS_CONFIG_SAS_IO_UNIT_0_SIZE, + "bbbblwwl", i, 0, 0, + (dev + ? MPI_SAS_IOUNIT0_RATE_3_0 + : MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION), + (dev + ? MPI_SAS_DEVICE_INFO_END_DEVICE | MPI_SAS_DEVICE_INFO_SSP_TARGET + : MPI_SAS_DEVICE_INFO_NO_DEVICE), + dev_handle, + dev_handle, + 0); + ofs += MPTSAS_CONFIG_SAS_IO_UNIT_0_SIZE; + } + assert(ofs == size); + } + return size; +} + +#define MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE 12 + +static +size_t mptsas_config_sas_io_unit_1(MPTSASState *s, uint8_t **data, int address) +{ + size_t size = MPTSAS_CONFIG_PACK_EXT(1, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 0x07, + "*w*w*w*wb*b*b*b" + repl(MPTSAS_NUM_PORTS, "*s12"), + MPTSAS_NUM_PORTS); + + if (data) { + size_t ofs = size - MPTSAS_NUM_PORTS * MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE; + int i; + + for (i = 0; i < MPTSAS_NUM_PORTS; i++) { + SCSIDevice *dev = mptsas_phy_get_device(s, i, NULL, NULL); + fill(*data + ofs, MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE, + "bbbblww", i, 0, 0, + (MPI_SAS_IOUNIT0_RATE_3_0 << 4) | MPI_SAS_IOUNIT0_RATE_1_5, + (dev + ? MPI_SAS_DEVICE_INFO_END_DEVICE | MPI_SAS_DEVICE_INFO_SSP_TARGET + : MPI_SAS_DEVICE_INFO_NO_DEVICE), + 0, 0); + ofs += MPTSAS_CONFIG_SAS_IO_UNIT_1_SIZE; + } + assert(ofs == size); + } + return size; +} + +static +size_t mptsas_config_sas_io_unit_2(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK_EXT(2, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 0x06, + "*b*b*w*w*w*b*b*w"); +} + +static +size_t mptsas_config_sas_io_unit_3(MPTSASState *s, uint8_t **data, int address) +{ + return MPTSAS_CONFIG_PACK_EXT(3, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 0x06, + "*l*l*l*l*l*l*l*l*l"); +} + +/* SAS PHY pages (extended) */ + +static int mptsas_phy_addr_get(MPTSASState *s, int address) +{ + int i; + if ((address >> MPI_SAS_PHY_PGAD_FORM_SHIFT) == 0) { + i = address & 255; + } else if ((address >> MPI_SAS_PHY_PGAD_FORM_SHIFT) == 1) { + i = address & 65535; + } else { + return -EINVAL; + } + + if (i >= MPTSAS_NUM_PORTS) { + return -EINVAL; + } + + return i; +} + +static +size_t mptsas_config_phy_0(MPTSASState *s, uint8_t **data, int address) +{ + int phy_handle = -1; + int dev_handle = -1; + int i = mptsas_phy_addr_get(s, address); + SCSIDevice *dev; + + if (i < 0) { + trace_mptsas_config_sas_phy(s, address, i, phy_handle, dev_handle, 0); + return i; + } + + dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle); + trace_mptsas_config_sas_phy(s, address, i, phy_handle, dev_handle, 0); + + return MPTSAS_CONFIG_PACK_EXT(0, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, 0x01, + "w*wqwb*blbb*b*b*l", + dev_handle, s->sas_addr, dev_handle, i, + (dev + ? MPI_SAS_DEVICE_INFO_END_DEVICE /* | MPI_SAS_DEVICE_INFO_SSP_TARGET?? */ + : MPI_SAS_DEVICE_INFO_NO_DEVICE), + (MPI_SAS_IOUNIT0_RATE_3_0 << 4) | MPI_SAS_IOUNIT0_RATE_1_5, + (MPI_SAS_IOUNIT0_RATE_3_0 << 4) | MPI_SAS_IOUNIT0_RATE_1_5); +} + +static +size_t mptsas_config_phy_1(MPTSASState *s, uint8_t **data, int address) +{ + int phy_handle = -1; + int dev_handle = -1; + int i = mptsas_phy_addr_get(s, address); + + if (i < 0) { + trace_mptsas_config_sas_phy(s, address, i, phy_handle, dev_handle, 1); + return i; + } + + (void) mptsas_phy_get_device(s, i, &phy_handle, &dev_handle); + trace_mptsas_config_sas_phy(s, address, i, phy_handle, dev_handle, 1); + + return MPTSAS_CONFIG_PACK_EXT(1, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, 0x01, + "*l*l*l*l*l"); +} + +/* SAS device pages (extended) */ + +static int mptsas_device_addr_get(MPTSASState *s, int address) +{ + uint32_t handle, i; + uint32_t form = address >> MPI_SAS_PHY_PGAD_FORM_SHIFT; + if (form == MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE) { + handle = address & MPI_SAS_DEVICE_PGAD_GNH_HANDLE_MASK; + do { + if (handle == 65535) { + handle = MPTSAS_NUM_PORTS + 1; + } else { + ++handle; + } + i = handle - 1 - MPTSAS_NUM_PORTS; + } while (i < MPTSAS_NUM_PORTS && !scsi_device_find(&s->bus, 0, i, 0)); + + } else if (form == MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID) { + if (address & MPI_SAS_DEVICE_PGAD_BT_BUS_MASK) { + return -EINVAL; + } + i = address & MPI_SAS_DEVICE_PGAD_BT_TID_MASK; + + } else if (form == MPI_SAS_DEVICE_PGAD_FORM_HANDLE) { + handle = address & MPI_SAS_DEVICE_PGAD_H_HANDLE_MASK; + i = handle - 1 - MPTSAS_NUM_PORTS; + + } else { + return -EINVAL; + } + + if (i >= MPTSAS_NUM_PORTS) { + return -EINVAL; + } + + return i; +} + +static +size_t mptsas_config_sas_device_0(MPTSASState *s, uint8_t **data, int address) +{ + int phy_handle = -1; + int dev_handle = -1; + int i = mptsas_device_addr_get(s, address); + SCSIDevice *dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle); + + trace_mptsas_config_sas_device(s, address, i, phy_handle, dev_handle, 0); + if (!dev) { + return -ENOENT; + } + + return MPTSAS_CONFIG_PACK_EXT(0, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 0x05, + "*w*wqwbbwbblwb*b", + dev->wwn, phy_handle, i, + MPI_SAS_DEVICE0_ASTATUS_NO_ERRORS, + dev_handle, i, 0, + MPI_SAS_DEVICE_INFO_END_DEVICE | MPI_SAS_DEVICE_INFO_SSP_TARGET, + (MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT | + MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED | + MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT), i); +} + +static +size_t mptsas_config_sas_device_1(MPTSASState *s, uint8_t **data, int address) +{ + int phy_handle = -1; + int dev_handle = -1; + int i = mptsas_device_addr_get(s, address); + SCSIDevice *dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle); + + trace_mptsas_config_sas_device(s, address, i, phy_handle, dev_handle, 1); + if (!dev) { + return -ENOENT; + } + + return MPTSAS_CONFIG_PACK_EXT(1, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 0x00, + "*lq*lwbb*s20", + dev->wwn, dev_handle, i, 0); +} + +static +size_t mptsas_config_sas_device_2(MPTSASState *s, uint8_t **data, int address) +{ + int phy_handle = -1; + int dev_handle = -1; + int i = mptsas_device_addr_get(s, address); + SCSIDevice *dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle); + + trace_mptsas_config_sas_device(s, address, i, phy_handle, dev_handle, 2); + if (!dev) { + return -ENOENT; + } + + return MPTSAS_CONFIG_PACK_EXT(2, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 0x01, + "ql", dev->wwn, 0); +} + +typedef struct MPTSASConfigPage { + uint8_t number; + uint8_t type; + size_t (*mpt_config_build)(MPTSASState *s, uint8_t **data, int address); +} MPTSASConfigPage; + +static const MPTSASConfigPage mptsas_config_pages[] = { + { + 0, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_0, + }, { + 1, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_1, + }, { + 2, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_2, + }, { + 3, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_3, + }, { + 4, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_4, + }, { + 5, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_5, + }, { + 6, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_6, + }, { + 7, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_7, + }, { + 8, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_8, + }, { + 9, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_9, + }, { + 10, MPI_CONFIG_PAGETYPE_MANUFACTURING, + mptsas_config_manufacturing_10, + }, { + 0, MPI_CONFIG_PAGETYPE_IO_UNIT, + mptsas_config_io_unit_0, + }, { + 1, MPI_CONFIG_PAGETYPE_IO_UNIT, + mptsas_config_io_unit_1, + }, { + 2, MPI_CONFIG_PAGETYPE_IO_UNIT, + mptsas_config_io_unit_2, + }, { + 3, MPI_CONFIG_PAGETYPE_IO_UNIT, + mptsas_config_io_unit_3, + }, { + 4, MPI_CONFIG_PAGETYPE_IO_UNIT, + mptsas_config_io_unit_4, + }, { + 0, MPI_CONFIG_PAGETYPE_IOC, + mptsas_config_ioc_0, + }, { + 1, MPI_CONFIG_PAGETYPE_IOC, + mptsas_config_ioc_1, + }, { + 2, MPI_CONFIG_PAGETYPE_IOC, + mptsas_config_ioc_2, + }, { + 3, MPI_CONFIG_PAGETYPE_IOC, + mptsas_config_ioc_3, + }, { + 4, MPI_CONFIG_PAGETYPE_IOC, + mptsas_config_ioc_4, + }, { + 5, MPI_CONFIG_PAGETYPE_IOC, + mptsas_config_ioc_5, + }, { + 6, MPI_CONFIG_PAGETYPE_IOC, + mptsas_config_ioc_6, + }, { + 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, + mptsas_config_sas_io_unit_0, + }, { + 1, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, + mptsas_config_sas_io_unit_1, + }, { + 2, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, + mptsas_config_sas_io_unit_2, + }, { + 3, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, + mptsas_config_sas_io_unit_3, + }, { + 0, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, + mptsas_config_phy_0, + }, { + 1, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, + mptsas_config_phy_1, + }, { + 0, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, + mptsas_config_sas_device_0, + }, { + 1, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, + mptsas_config_sas_device_1, + }, { + 2, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, + mptsas_config_sas_device_2, + } +}; + +static const MPTSASConfigPage *mptsas_find_config_page(int type, int number) +{ + const MPTSASConfigPage *page; + int i; + + for (i = 0; i < ARRAY_SIZE(mptsas_config_pages); i++) { + page = &mptsas_config_pages[i]; + if (page->type == type && page->number == number) { + return page; + } + } + + return NULL; +} + +void mptsas_process_config(MPTSASState *s, MPIMsgConfig *req) +{ + PCIDevice *pci = PCI_DEVICE(s); + + MPIMsgConfigReply reply; + const MPTSASConfigPage *page; + size_t length; + uint8_t type; + uint8_t *data = NULL; + uint32_t flags_and_length; + uint32_t dmalen; + uint64_t pa; + + mptsas_fix_config_endianness(req); + + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply)); + + /* Copy common bits from the request into the reply. */ + memset(&reply, 0, sizeof(reply)); + reply.Action = req->Action; + reply.Function = req->Function; + reply.MsgContext = req->MsgContext; + reply.MsgLength = sizeof(reply) / 4; + reply.PageType = req->PageType; + reply.PageNumber = req->PageNumber; + reply.PageLength = req->PageLength; + reply.PageVersion = req->PageVersion; + + type = req->PageType & MPI_CONFIG_PAGETYPE_MASK; + if (type == MPI_CONFIG_PAGETYPE_EXTENDED) { + type = req->ExtPageType; + if (type <= MPI_CONFIG_PAGETYPE_MASK) { + reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_TYPE; + goto out; + } + + reply.ExtPageType = req->ExtPageType; + } + + page = mptsas_find_config_page(type, req->PageNumber); + + switch(req->Action) { + case MPI_CONFIG_ACTION_PAGE_DEFAULT: + case MPI_CONFIG_ACTION_PAGE_HEADER: + case MPI_CONFIG_ACTION_PAGE_READ_NVRAM: + case MPI_CONFIG_ACTION_PAGE_READ_CURRENT: + case MPI_CONFIG_ACTION_PAGE_READ_DEFAULT: + case MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT: + case MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM: + break; + + default: + reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_ACTION; + goto out; + } + + if (!page) { + page = mptsas_find_config_page(type, 1); + if (page) { + reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_PAGE; + } else { + reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_TYPE; + } + goto out; + } + + if (req->Action == MPI_CONFIG_ACTION_PAGE_DEFAULT || + req->Action == MPI_CONFIG_ACTION_PAGE_HEADER) { + length = page->mpt_config_build(s, NULL, req->PageAddress); + if ((ssize_t)length < 0) { + reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_PAGE; + goto out; + } else { + goto done; + } + } + + if (req->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT || + req->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) { + length = page->mpt_config_build(s, NULL, req->PageAddress); + if ((ssize_t)length < 0) { + reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_PAGE; + } else { + reply.IOCStatus = MPI_IOCSTATUS_CONFIG_CANT_COMMIT; + } + goto out; + } + + flags_and_length = req->PageBufferSGE.FlagsLength; + dmalen = flags_and_length & MPI_SGE_LENGTH_MASK; + if (dmalen == 0) { + length = page->mpt_config_build(s, NULL, req->PageAddress); + if ((ssize_t)length < 0) { + reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_PAGE; + goto out; + } else { + goto done; + } + } + + if (flags_and_length & MPI_SGE_FLAGS_64_BIT_ADDRESSING) { + pa = req->PageBufferSGE.u.Address64; + } else { + pa = req->PageBufferSGE.u.Address32; + } + + /* Only read actions left. */ + length = page->mpt_config_build(s, &data, req->PageAddress); + if ((ssize_t)length < 0) { + reply.IOCStatus = MPI_IOCSTATUS_CONFIG_INVALID_PAGE; + goto out; + } else { + assert(data[2] == page->number); + pci_dma_write(pci, pa, data, MIN(length, dmalen)); + goto done; + } + + abort(); + +done: + if (type > MPI_CONFIG_PAGETYPE_MASK) { + reply.ExtPageLength = length / 4; + reply.ExtPageType = req->ExtPageType; + } else { + reply.PageLength = length / 4; + } + +out: + mptsas_fix_config_reply_endianness(&reply); + mptsas_reply(s, (MPIDefaultReply *)&reply); + g_free(data); +} diff --git a/hw/scsi/mptendian.c b/hw/scsi/mptendian.c new file mode 100644 index 000000000..0d5abb4b6 --- /dev/null +++ b/hw/scsi/mptendian.c @@ -0,0 +1,205 @@ +/* + * QEMU LSI SAS1068 Host Bus Adapter emulation + * Endianness conversion for MPI data structures + * + * Copyright (c) 2016 Red Hat, Inc. + * + * Authors: Paolo Bonzini + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "sysemu/dma.h" +#include "hw/pci/msi.h" +#include "qemu/iov.h" +#include "hw/scsi/scsi.h" +#include "scsi/constants.h" +#include "trace.h" + +#include "mptsas.h" +#include "mpi.h" + +static void mptsas_fix_sgentry_endianness(MPISGEntry *sge) +{ + sge->FlagsLength = le32_to_cpu(sge->FlagsLength); + if (sge->FlagsLength & MPI_SGE_FLAGS_64_BIT_ADDRESSING) { + sge->u.Address64 = le64_to_cpu(sge->u.Address64); + } else { + sge->u.Address32 = le32_to_cpu(sge->u.Address32); + } +} + +static void mptsas_fix_sgentry_endianness_reply(MPISGEntry *sge) +{ + if (sge->FlagsLength & MPI_SGE_FLAGS_64_BIT_ADDRESSING) { + sge->u.Address64 = cpu_to_le64(sge->u.Address64); + } else { + sge->u.Address32 = cpu_to_le32(sge->u.Address32); + } + sge->FlagsLength = cpu_to_le32(sge->FlagsLength); +} + +void mptsas_fix_scsi_io_endianness(MPIMsgSCSIIORequest *req) +{ + req->MsgContext = le32_to_cpu(req->MsgContext); + req->Control = le32_to_cpu(req->Control); + req->DataLength = le32_to_cpu(req->DataLength); + req->SenseBufferLowAddr = le32_to_cpu(req->SenseBufferLowAddr); +} + +void mptsas_fix_scsi_io_reply_endianness(MPIMsgSCSIIOReply *reply) +{ + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); + reply->TransferCount = cpu_to_le32(reply->TransferCount); + reply->SenseCount = cpu_to_le32(reply->SenseCount); + reply->ResponseInfo = cpu_to_le32(reply->ResponseInfo); + reply->TaskTag = cpu_to_le16(reply->TaskTag); +} + +void mptsas_fix_scsi_task_mgmt_endianness(MPIMsgSCSITaskMgmt *req) +{ + req->MsgContext = le32_to_cpu(req->MsgContext); + req->TaskMsgContext = le32_to_cpu(req->TaskMsgContext); +} + +void mptsas_fix_scsi_task_mgmt_reply_endianness(MPIMsgSCSITaskMgmtReply *reply) +{ + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); + reply->TerminationCount = cpu_to_le32(reply->TerminationCount); +} + +void mptsas_fix_ioc_init_endianness(MPIMsgIOCInit *req) +{ + req->MsgContext = le32_to_cpu(req->MsgContext); + req->ReplyFrameSize = le16_to_cpu(req->ReplyFrameSize); + req->HostMfaHighAddr = le32_to_cpu(req->HostMfaHighAddr); + req->SenseBufferHighAddr = le32_to_cpu(req->SenseBufferHighAddr); + req->ReplyFifoHostSignalingAddr = + le32_to_cpu(req->ReplyFifoHostSignalingAddr); + mptsas_fix_sgentry_endianness(&req->HostPageBufferSGE); + req->MsgVersion = le16_to_cpu(req->MsgVersion); + req->HeaderVersion = le16_to_cpu(req->HeaderVersion); +} + +void mptsas_fix_ioc_init_reply_endianness(MPIMsgIOCInitReply *reply) +{ + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); +} + +void mptsas_fix_ioc_facts_endianness(MPIMsgIOCFacts *req) +{ + req->MsgContext = le32_to_cpu(req->MsgContext); +} + +void mptsas_fix_ioc_facts_reply_endianness(MPIMsgIOCFactsReply *reply) +{ + reply->MsgVersion = cpu_to_le16(reply->MsgVersion); + reply->HeaderVersion = cpu_to_le16(reply->HeaderVersion); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCExceptions = cpu_to_le16(reply->IOCExceptions); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); + reply->ReplyQueueDepth = cpu_to_le16(reply->ReplyQueueDepth); + reply->RequestFrameSize = cpu_to_le16(reply->RequestFrameSize); + reply->ProductID = cpu_to_le16(reply->ProductID); + reply->CurrentHostMfaHighAddr = cpu_to_le32(reply->CurrentHostMfaHighAddr); + reply->GlobalCredits = cpu_to_le16(reply->GlobalCredits); + reply->CurrentSenseBufferHighAddr = + cpu_to_le32(reply->CurrentSenseBufferHighAddr); + reply->CurReplyFrameSize = cpu_to_le16(reply->CurReplyFrameSize); + reply->FWImageSize = cpu_to_le32(reply->FWImageSize); + reply->IOCCapabilities = cpu_to_le32(reply->IOCCapabilities); + reply->HighPriorityQueueDepth = cpu_to_le16(reply->HighPriorityQueueDepth); + mptsas_fix_sgentry_endianness_reply(&reply->HostPageBufferSGE); + reply->ReplyFifoHostSignalingAddr = + cpu_to_le32(reply->ReplyFifoHostSignalingAddr); +} + +void mptsas_fix_config_endianness(MPIMsgConfig *req) +{ + req->ExtPageLength = le16_to_cpu(req->ExtPageLength); + req->MsgContext = le32_to_cpu(req->MsgContext); + req->PageAddress = le32_to_cpu(req->PageAddress); + mptsas_fix_sgentry_endianness(&req->PageBufferSGE); +} + +void mptsas_fix_config_reply_endianness(MPIMsgConfigReply *reply) +{ + reply->ExtPageLength = cpu_to_le16(reply->ExtPageLength); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); +} + +void mptsas_fix_port_facts_endianness(MPIMsgPortFacts *req) +{ + req->MsgContext = le32_to_cpu(req->MsgContext); +} + +void mptsas_fix_port_facts_reply_endianness(MPIMsgPortFactsReply *reply) +{ + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); + reply->MaxDevices = cpu_to_le16(reply->MaxDevices); + reply->PortSCSIID = cpu_to_le16(reply->PortSCSIID); + reply->ProtocolFlags = cpu_to_le16(reply->ProtocolFlags); + reply->MaxPostedCmdBuffers = cpu_to_le16(reply->MaxPostedCmdBuffers); + reply->MaxPersistentIDs = cpu_to_le16(reply->MaxPersistentIDs); + reply->MaxLanBuckets = cpu_to_le16(reply->MaxLanBuckets); +} + +void mptsas_fix_port_enable_endianness(MPIMsgPortEnable *req) +{ + req->MsgContext = le32_to_cpu(req->MsgContext); +} + +void mptsas_fix_port_enable_reply_endianness(MPIMsgPortEnableReply *reply) +{ + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); +} + +void mptsas_fix_event_notification_endianness(MPIMsgEventNotify *req) +{ + req->MsgContext = le32_to_cpu(req->MsgContext); +} + +void mptsas_fix_event_notification_reply_endianness(MPIMsgEventNotifyReply *reply) +{ + int length = reply->EventDataLength; + int i; + + reply->EventDataLength = cpu_to_le16(reply->EventDataLength); + reply->MsgContext = cpu_to_le32(reply->MsgContext); + reply->IOCStatus = cpu_to_le16(reply->IOCStatus); + reply->IOCLogInfo = cpu_to_le32(reply->IOCLogInfo); + reply->Event = cpu_to_le32(reply->Event); + reply->EventContext = cpu_to_le32(reply->EventContext); + + /* Really depends on the event kind. This will do for now. */ + for (i = 0; i < length; i++) { + reply->Data[i] = cpu_to_le32(reply->Data[i]); + } +} + diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c new file mode 100644 index 000000000..f6c776554 --- /dev/null +++ b/hw/scsi/mptsas.c @@ -0,0 +1,1447 @@ +/* + * QEMU LSI SAS1068 Host Bus Adapter emulation + * Based on the QEMU Megaraid emulator + * + * Copyright (c) 2009-2012 Hannes Reinecke, SUSE Labs + * Copyright (c) 2012 Verizon, Inc. + * Copyright (c) 2016 Red Hat, Inc. + * + * Authors: Don Slutz, Paolo Bonzini + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "sysemu/dma.h" +#include "hw/pci/msi.h" +#include "qemu/iov.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "hw/scsi/scsi.h" +#include "scsi/constants.h" +#include "trace.h" +#include "qapi/error.h" +#include "mptsas.h" +#include "migration/qemu-file-types.h" +#include "migration/vmstate.h" +#include "mpi.h" + +#define NAA_LOCALLY_ASSIGNED_ID 0x3ULL +#define IEEE_COMPANY_LOCALLY_ASSIGNED 0x525400 + +#define MPTSAS1068_PRODUCT_ID \ + (MPI_FW_HEADER_PID_FAMILY_1068_SAS | \ + MPI_FW_HEADER_PID_PROD_INITIATOR_SCSI | \ + MPI_FW_HEADER_PID_TYPE_SAS) + +struct MPTSASRequest { + MPIMsgSCSIIORequest scsi_io; + SCSIRequest *sreq; + QEMUSGList qsg; + MPTSASState *dev; + + QTAILQ_ENTRY(MPTSASRequest) next; +}; + +static void mptsas_update_interrupt(MPTSASState *s) +{ + PCIDevice *pci = (PCIDevice *) s; + uint32_t state = s->intr_status & ~(s->intr_mask | MPI_HIS_IOP_DOORBELL_STATUS); + + if (msi_enabled(pci)) { + if (state) { + trace_mptsas_irq_msi(s); + msi_notify(pci, 0); + } + } + + trace_mptsas_irq_intx(s, !!state); + pci_set_irq(pci, !!state); +} + +static void mptsas_set_fault(MPTSASState *s, uint32_t code) +{ + if ((s->state & MPI_IOC_STATE_FAULT) == 0) { + s->state = MPI_IOC_STATE_FAULT | code; + } +} + +#define MPTSAS_FIFO_INVALID(s, name) \ + ((s)->name##_head > ARRAY_SIZE((s)->name) || \ + (s)->name##_tail > ARRAY_SIZE((s)->name)) + +#define MPTSAS_FIFO_EMPTY(s, name) \ + ((s)->name##_head == (s)->name##_tail) + +#define MPTSAS_FIFO_FULL(s, name) \ + ((s)->name##_head == ((s)->name##_tail + 1) % ARRAY_SIZE((s)->name)) + +#define MPTSAS_FIFO_GET(s, name) ({ \ + uint32_t _val = (s)->name[(s)->name##_head++]; \ + (s)->name##_head %= ARRAY_SIZE((s)->name); \ + _val; \ +}) + +#define MPTSAS_FIFO_PUT(s, name, val) do { \ + (s)->name[(s)->name##_tail++] = (val); \ + (s)->name##_tail %= ARRAY_SIZE((s)->name); \ +} while(0) + +static void mptsas_post_reply(MPTSASState *s, MPIDefaultReply *reply) +{ + PCIDevice *pci = (PCIDevice *) s; + uint32_t addr_lo; + + if (MPTSAS_FIFO_EMPTY(s, reply_free) || MPTSAS_FIFO_FULL(s, reply_post)) { + mptsas_set_fault(s, MPI_IOCSTATUS_INSUFFICIENT_RESOURCES); + return; + } + + addr_lo = MPTSAS_FIFO_GET(s, reply_free); + + pci_dma_write(pci, addr_lo | s->host_mfa_high_addr, reply, + MIN(s->reply_frame_size, 4 * reply->MsgLength)); + + MPTSAS_FIFO_PUT(s, reply_post, MPI_ADDRESS_REPLY_A_BIT | (addr_lo >> 1)); + + s->intr_status |= MPI_HIS_REPLY_MESSAGE_INTERRUPT; + if (s->doorbell_state == DOORBELL_WRITE) { + s->doorbell_state = DOORBELL_NONE; + s->intr_status |= MPI_HIS_DOORBELL_INTERRUPT; + } + mptsas_update_interrupt(s); +} + +void mptsas_reply(MPTSASState *s, MPIDefaultReply *reply) +{ + if (s->doorbell_state == DOORBELL_WRITE) { + /* The reply is sent out in 16 bit chunks, while the size + * in the reply is in 32 bit units. + */ + s->doorbell_state = DOORBELL_READ; + s->doorbell_reply_idx = 0; + s->doorbell_reply_size = reply->MsgLength * 2; + memcpy(s->doorbell_reply, reply, s->doorbell_reply_size * 2); + s->intr_status |= MPI_HIS_DOORBELL_INTERRUPT; + mptsas_update_interrupt(s); + } else { + mptsas_post_reply(s, reply); + } +} + +static void mptsas_turbo_reply(MPTSASState *s, uint32_t msgctx) +{ + if (MPTSAS_FIFO_FULL(s, reply_post)) { + mptsas_set_fault(s, MPI_IOCSTATUS_INSUFFICIENT_RESOURCES); + return; + } + + /* The reply is just the message context ID (bit 31 = clear). */ + MPTSAS_FIFO_PUT(s, reply_post, msgctx); + + s->intr_status |= MPI_HIS_REPLY_MESSAGE_INTERRUPT; + mptsas_update_interrupt(s); +} + +#define MPTSAS_MAX_REQUEST_SIZE 52 + +static const int mpi_request_sizes[] = { + [MPI_FUNCTION_SCSI_IO_REQUEST] = sizeof(MPIMsgSCSIIORequest), + [MPI_FUNCTION_SCSI_TASK_MGMT] = sizeof(MPIMsgSCSITaskMgmt), + [MPI_FUNCTION_IOC_INIT] = sizeof(MPIMsgIOCInit), + [MPI_FUNCTION_IOC_FACTS] = sizeof(MPIMsgIOCFacts), + [MPI_FUNCTION_CONFIG] = sizeof(MPIMsgConfig), + [MPI_FUNCTION_PORT_FACTS] = sizeof(MPIMsgPortFacts), + [MPI_FUNCTION_PORT_ENABLE] = sizeof(MPIMsgPortEnable), + [MPI_FUNCTION_EVENT_NOTIFICATION] = sizeof(MPIMsgEventNotify), +}; + +static dma_addr_t mptsas_ld_sg_base(MPTSASState *s, uint32_t flags_and_length, + dma_addr_t *sgaddr) +{ + PCIDevice *pci = (PCIDevice *) s; + dma_addr_t addr; + + if (flags_and_length & MPI_SGE_FLAGS_64_BIT_ADDRESSING) { + addr = ldq_le_pci_dma(pci, *sgaddr + 4); + *sgaddr += 12; + } else { + addr = ldl_le_pci_dma(pci, *sgaddr + 4); + *sgaddr += 8; + } + return addr; +} + +static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr) +{ + PCIDevice *pci = (PCIDevice *) s; + hwaddr next_chain_addr; + uint32_t left; + hwaddr sgaddr; + uint32_t chain_offset; + + chain_offset = req->scsi_io.ChainOffset; + next_chain_addr = addr + chain_offset * sizeof(uint32_t); + sgaddr = addr + sizeof(MPIMsgSCSIIORequest); + pci_dma_sglist_init(&req->qsg, pci, 4); + left = req->scsi_io.DataLength; + + for(;;) { + dma_addr_t addr, len; + uint32_t flags_and_length; + + flags_and_length = ldl_le_pci_dma(pci, sgaddr); + len = flags_and_length & MPI_SGE_LENGTH_MASK; + if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK) + != MPI_SGE_FLAGS_SIMPLE_ELEMENT || + (!len && + !(flags_and_length & MPI_SGE_FLAGS_END_OF_LIST) && + !(flags_and_length & MPI_SGE_FLAGS_END_OF_BUFFER))) { + return MPI_IOCSTATUS_INVALID_SGL; + } + + len = MIN(len, left); + if (!len) { + /* We reached the desired transfer length, ignore extra + * elements of the s/g list. + */ + break; + } + + addr = mptsas_ld_sg_base(s, flags_and_length, &sgaddr); + qemu_sglist_add(&req->qsg, addr, len); + left -= len; + + if (flags_and_length & MPI_SGE_FLAGS_END_OF_LIST) { + break; + } + + if (flags_and_length & MPI_SGE_FLAGS_LAST_ELEMENT) { + if (!chain_offset) { + break; + } + + flags_and_length = ldl_le_pci_dma(pci, next_chain_addr); + if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK) + != MPI_SGE_FLAGS_CHAIN_ELEMENT) { + return MPI_IOCSTATUS_INVALID_SGL; + } + + sgaddr = mptsas_ld_sg_base(s, flags_and_length, &next_chain_addr); + chain_offset = + (flags_and_length & MPI_SGE_CHAIN_OFFSET_MASK) >> MPI_SGE_CHAIN_OFFSET_SHIFT; + next_chain_addr = sgaddr + chain_offset * sizeof(uint32_t); + } + } + return 0; +} + +static void mptsas_free_request(MPTSASRequest *req) +{ + if (req->sreq != NULL) { + req->sreq->hba_private = NULL; + scsi_req_unref(req->sreq); + req->sreq = NULL; + } + qemu_sglist_destroy(&req->qsg); + g_free(req); +} + +static int mptsas_scsi_device_find(MPTSASState *s, int bus, int target, + uint8_t *lun, SCSIDevice **sdev) +{ + if (bus != 0) { + return MPI_IOCSTATUS_SCSI_INVALID_BUS; + } + + if (target >= s->max_devices) { + return MPI_IOCSTATUS_SCSI_INVALID_TARGETID; + } + + *sdev = scsi_device_find(&s->bus, bus, target, lun[1]); + if (!*sdev) { + return MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE; + } + + return 0; +} + +static int mptsas_process_scsi_io_request(MPTSASState *s, + MPIMsgSCSIIORequest *scsi_io, + hwaddr addr) +{ + MPTSASRequest *req; + MPIMsgSCSIIOReply reply; + SCSIDevice *sdev; + int status; + + mptsas_fix_scsi_io_endianness(scsi_io); + + trace_mptsas_process_scsi_io_request(s, scsi_io->Bus, scsi_io->TargetID, + scsi_io->LUN[1], scsi_io->DataLength); + + status = mptsas_scsi_device_find(s, scsi_io->Bus, scsi_io->TargetID, + scsi_io->LUN, &sdev); + if (status) { + goto bad; + } + + req = g_new0(MPTSASRequest, 1); + req->scsi_io = *scsi_io; + req->dev = s; + + status = mptsas_build_sgl(s, req, addr); + if (status) { + goto free_bad; + } + + if (req->qsg.size < scsi_io->DataLength) { + trace_mptsas_sgl_overflow(s, scsi_io->MsgContext, scsi_io->DataLength, + req->qsg.size); + status = MPI_IOCSTATUS_INVALID_SGL; + goto free_bad; + } + + req->sreq = scsi_req_new(sdev, scsi_io->MsgContext, + scsi_io->LUN[1], scsi_io->CDB, req); + + if (req->sreq->cmd.xfer > scsi_io->DataLength) { + goto overrun; + } + switch (scsi_io->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK) { + case MPI_SCSIIO_CONTROL_NODATATRANSFER: + if (req->sreq->cmd.mode != SCSI_XFER_NONE) { + goto overrun; + } + break; + + case MPI_SCSIIO_CONTROL_WRITE: + if (req->sreq->cmd.mode != SCSI_XFER_TO_DEV) { + goto overrun; + } + break; + + case MPI_SCSIIO_CONTROL_READ: + if (req->sreq->cmd.mode != SCSI_XFER_FROM_DEV) { + goto overrun; + } + break; + } + + if (scsi_req_enqueue(req->sreq)) { + scsi_req_continue(req->sreq); + } + return 0; + +overrun: + trace_mptsas_scsi_overflow(s, scsi_io->MsgContext, req->sreq->cmd.xfer, + scsi_io->DataLength); + status = MPI_IOCSTATUS_SCSI_DATA_OVERRUN; +free_bad: + mptsas_free_request(req); +bad: + memset(&reply, 0, sizeof(reply)); + reply.TargetID = scsi_io->TargetID; + reply.Bus = scsi_io->Bus; + reply.MsgLength = sizeof(reply) / 4; + reply.Function = scsi_io->Function; + reply.CDBLength = scsi_io->CDBLength; + reply.SenseBufferLength = scsi_io->SenseBufferLength; + reply.MsgContext = scsi_io->MsgContext; + reply.SCSIState = MPI_SCSI_STATE_NO_SCSI_STATUS; + reply.IOCStatus = status; + + mptsas_fix_scsi_io_reply_endianness(&reply); + mptsas_reply(s, (MPIDefaultReply *)&reply); + + return 0; +} + +typedef struct { + Notifier notifier; + MPTSASState *s; + MPIMsgSCSITaskMgmtReply *reply; +} MPTSASCancelNotifier; + +static void mptsas_cancel_notify(Notifier *notifier, void *data) +{ + MPTSASCancelNotifier *n = container_of(notifier, + MPTSASCancelNotifier, + notifier); + + /* Abusing IOCLogInfo to store the expected number of requests... */ + if (++n->reply->TerminationCount == n->reply->IOCLogInfo) { + n->reply->IOCLogInfo = 0; + mptsas_fix_scsi_task_mgmt_reply_endianness(n->reply); + mptsas_post_reply(n->s, (MPIDefaultReply *)n->reply); + g_free(n->reply); + } + g_free(n); +} + +static void mptsas_process_scsi_task_mgmt(MPTSASState *s, MPIMsgSCSITaskMgmt *req) +{ + MPIMsgSCSITaskMgmtReply reply; + MPIMsgSCSITaskMgmtReply *reply_async; + int status, count; + SCSIDevice *sdev; + SCSIRequest *r, *next; + BusChild *kid; + + mptsas_fix_scsi_task_mgmt_endianness(req); + + QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply)); + + memset(&reply, 0, sizeof(reply)); + reply.TargetID = req->TargetID; + reply.Bus = req->Bus; + reply.MsgLength = sizeof(reply) / 4; + reply.Function = req->Function; + reply.TaskType = req->TaskType; + reply.MsgContext = req->MsgContext; + + switch (req->TaskType) { + case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + case MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + status = mptsas_scsi_device_find(s, req->Bus, req->TargetID, + req->LUN, &sdev); + if (status) { + reply.IOCStatus = status; + goto out; + } + if (sdev->lun != req->LUN[1]) { + reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN; + goto out; + } + + QTAILQ_FOREACH_SAFE(r, &sdev->requests, next, next) { + MPTSASRequest *cmd_req = r->hba_private; + if (cmd_req && cmd_req->scsi_io.MsgContext == req->TaskMsgContext) { + break; + } + } + if (r) { + /* + * Assert that the request has not been completed yet, we + * check for it in the loop above. + */ + assert(r->hba_private); + if (req->TaskType == MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { + /* "If the specified command is present in the task set, then + * return a service response set to FUNCTION SUCCEEDED". + */ + reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED; + } else { + MPTSASCancelNotifier *notifier; + + reply_async = g_memdup(&reply, sizeof(MPIMsgSCSITaskMgmtReply)); + reply_async->IOCLogInfo = INT_MAX; + + count = 1; + notifier = g_new(MPTSASCancelNotifier, 1); + notifier->s = s; + notifier->reply = reply_async; + notifier->notifier.notify = mptsas_cancel_notify; + scsi_req_cancel_async(r, ¬ifier->notifier); + goto reply_maybe_async; + } + } + break; + + case MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET: + status = mptsas_scsi_device_find(s, req->Bus, req->TargetID, + req->LUN, &sdev); + if (status) { + reply.IOCStatus = status; + goto out; + } + if (sdev->lun != req->LUN[1]) { + reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN; + goto out; + } + + reply_async = g_memdup(&reply, sizeof(MPIMsgSCSITaskMgmtReply)); + reply_async->IOCLogInfo = INT_MAX; + + count = 0; + QTAILQ_FOREACH_SAFE(r, &sdev->requests, next, next) { + if (r->hba_private) { + MPTSASCancelNotifier *notifier; + + count++; + notifier = g_new(MPTSASCancelNotifier, 1); + notifier->s = s; + notifier->reply = reply_async; + notifier->notifier.notify = mptsas_cancel_notify; + scsi_req_cancel_async(r, ¬ifier->notifier); + } + } + +reply_maybe_async: + if (reply_async->TerminationCount < count) { + reply_async->IOCLogInfo = count; + return; + } + g_free(reply_async); + reply.TerminationCount = count; + break; + + case MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + status = mptsas_scsi_device_find(s, req->Bus, req->TargetID, + req->LUN, &sdev); + if (status) { + reply.IOCStatus = status; + goto out; + } + if (sdev->lun != req->LUN[1]) { + reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN; + goto out; + } + qdev_reset_all(&sdev->qdev); + break; + + case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if (req->Bus != 0) { + reply.IOCStatus = MPI_IOCSTATUS_SCSI_INVALID_BUS; + goto out; + } + if (req->TargetID > s->max_devices) { + reply.IOCStatus = MPI_IOCSTATUS_SCSI_INVALID_TARGETID; + goto out; + } + + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + sdev = SCSI_DEVICE(kid->child); + if (sdev->channel == 0 && sdev->id == req->TargetID) { + qdev_reset_all(kid->child); + } + } + break; + + case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS: + qbus_reset_all(BUS(&s->bus)); + break; + + default: + reply.ResponseCode = MPI_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED; + break; + } + +out: + mptsas_fix_scsi_task_mgmt_reply_endianness(&reply); + mptsas_post_reply(s, (MPIDefaultReply *)&reply); +} + +static void mptsas_process_ioc_init(MPTSASState *s, MPIMsgIOCInit *req) +{ + MPIMsgIOCInitReply reply; + + mptsas_fix_ioc_init_endianness(req); + + QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply)); + + s->who_init = req->WhoInit; + s->reply_frame_size = req->ReplyFrameSize; + s->max_buses = req->MaxBuses; + s->max_devices = req->MaxDevices ? req->MaxDevices : 256; + s->host_mfa_high_addr = (hwaddr)req->HostMfaHighAddr << 32; + s->sense_buffer_high_addr = (hwaddr)req->SenseBufferHighAddr << 32; + + if (s->state == MPI_IOC_STATE_READY) { + s->state = MPI_IOC_STATE_OPERATIONAL; + } + + memset(&reply, 0, sizeof(reply)); + reply.WhoInit = s->who_init; + reply.MsgLength = sizeof(reply) / 4; + reply.Function = req->Function; + reply.MaxDevices = s->max_devices; + reply.MaxBuses = s->max_buses; + reply.MsgContext = req->MsgContext; + + mptsas_fix_ioc_init_reply_endianness(&reply); + mptsas_reply(s, (MPIDefaultReply *)&reply); +} + +static void mptsas_process_ioc_facts(MPTSASState *s, + MPIMsgIOCFacts *req) +{ + MPIMsgIOCFactsReply reply; + + mptsas_fix_ioc_facts_endianness(req); + + QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply)); + + memset(&reply, 0, sizeof(reply)); + reply.MsgVersion = 0x0105; + reply.MsgLength = sizeof(reply) / 4; + reply.Function = req->Function; + reply.MsgContext = req->MsgContext; + reply.MaxChainDepth = MPTSAS_MAXIMUM_CHAIN_DEPTH; + reply.WhoInit = s->who_init; + reply.BlockSize = MPTSAS_MAX_REQUEST_SIZE / sizeof(uint32_t); + reply.ReplyQueueDepth = ARRAY_SIZE(s->reply_post) - 1; + QEMU_BUILD_BUG_ON(ARRAY_SIZE(s->reply_post) != ARRAY_SIZE(s->reply_free)); + + reply.RequestFrameSize = 128; + reply.ProductID = MPTSAS1068_PRODUCT_ID; + reply.CurrentHostMfaHighAddr = s->host_mfa_high_addr >> 32; + reply.GlobalCredits = ARRAY_SIZE(s->request_post) - 1; + reply.NumberOfPorts = MPTSAS_NUM_PORTS; + reply.CurrentSenseBufferHighAddr = s->sense_buffer_high_addr >> 32; + reply.CurReplyFrameSize = s->reply_frame_size; + reply.MaxDevices = s->max_devices; + reply.MaxBuses = s->max_buses; + reply.FWVersionDev = 0; + reply.FWVersionUnit = 0x92; + reply.FWVersionMinor = 0x32; + reply.FWVersionMajor = 0x1; + + mptsas_fix_ioc_facts_reply_endianness(&reply); + mptsas_reply(s, (MPIDefaultReply *)&reply); +} + +static void mptsas_process_port_facts(MPTSASState *s, + MPIMsgPortFacts *req) +{ + MPIMsgPortFactsReply reply; + + mptsas_fix_port_facts_endianness(req); + + QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply)); + + memset(&reply, 0, sizeof(reply)); + reply.MsgLength = sizeof(reply) / 4; + reply.Function = req->Function; + reply.PortNumber = req->PortNumber; + reply.MsgContext = req->MsgContext; + + if (req->PortNumber < MPTSAS_NUM_PORTS) { + reply.PortType = MPI_PORTFACTS_PORTTYPE_SAS; + reply.MaxDevices = MPTSAS_NUM_PORTS; + reply.PortSCSIID = MPTSAS_NUM_PORTS; + reply.ProtocolFlags = MPI_PORTFACTS_PROTOCOL_LOGBUSADDR | MPI_PORTFACTS_PROTOCOL_INITIATOR; + } + + mptsas_fix_port_facts_reply_endianness(&reply); + mptsas_reply(s, (MPIDefaultReply *)&reply); +} + +static void mptsas_process_port_enable(MPTSASState *s, + MPIMsgPortEnable *req) +{ + MPIMsgPortEnableReply reply; + + mptsas_fix_port_enable_endianness(req); + + QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply)); + + memset(&reply, 0, sizeof(reply)); + reply.MsgLength = sizeof(reply) / 4; + reply.PortNumber = req->PortNumber; + reply.Function = req->Function; + reply.MsgContext = req->MsgContext; + + mptsas_fix_port_enable_reply_endianness(&reply); + mptsas_reply(s, (MPIDefaultReply *)&reply); +} + +static void mptsas_process_event_notification(MPTSASState *s, + MPIMsgEventNotify *req) +{ + MPIMsgEventNotifyReply reply; + + mptsas_fix_event_notification_endianness(req); + + QEMU_BUILD_BUG_ON(MPTSAS_MAX_REQUEST_SIZE < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_msg) < sizeof(*req)); + QEMU_BUILD_BUG_ON(sizeof(s->doorbell_reply) < sizeof(reply)); + + /* Don't even bother storing whether event notification is enabled, + * since it is not accessible. + */ + + memset(&reply, 0, sizeof(reply)); + reply.EventDataLength = sizeof(reply.Data) / 4; + reply.MsgLength = sizeof(reply) / 4; + reply.Function = req->Function; + + /* This is set because events are sent through the reply FIFOs. */ + reply.MsgFlags = MPI_MSGFLAGS_CONTINUATION_REPLY; + + reply.MsgContext = req->MsgContext; + reply.Event = MPI_EVENT_EVENT_CHANGE; + reply.Data[0] = !!req->Switch; + + mptsas_fix_event_notification_reply_endianness(&reply); + mptsas_reply(s, (MPIDefaultReply *)&reply); +} + +static void mptsas_process_message(MPTSASState *s, MPIRequestHeader *req) +{ + trace_mptsas_process_message(s, req->Function, req->MsgContext); + switch (req->Function) { + case MPI_FUNCTION_SCSI_TASK_MGMT: + mptsas_process_scsi_task_mgmt(s, (MPIMsgSCSITaskMgmt *)req); + break; + + case MPI_FUNCTION_IOC_INIT: + mptsas_process_ioc_init(s, (MPIMsgIOCInit *)req); + break; + + case MPI_FUNCTION_IOC_FACTS: + mptsas_process_ioc_facts(s, (MPIMsgIOCFacts *)req); + break; + + case MPI_FUNCTION_PORT_FACTS: + mptsas_process_port_facts(s, (MPIMsgPortFacts *)req); + break; + + case MPI_FUNCTION_PORT_ENABLE: + mptsas_process_port_enable(s, (MPIMsgPortEnable *)req); + break; + + case MPI_FUNCTION_EVENT_NOTIFICATION: + mptsas_process_event_notification(s, (MPIMsgEventNotify *)req); + break; + + case MPI_FUNCTION_CONFIG: + mptsas_process_config(s, (MPIMsgConfig *)req); + break; + + default: + trace_mptsas_unhandled_cmd(s, req->Function, 0); + mptsas_set_fault(s, MPI_IOCSTATUS_INVALID_FUNCTION); + break; + } +} + +static void mptsas_fetch_request(MPTSASState *s) +{ + PCIDevice *pci = (PCIDevice *) s; + char req[MPTSAS_MAX_REQUEST_SIZE]; + MPIRequestHeader *hdr = (MPIRequestHeader *)req; + hwaddr addr; + int size; + + /* Read the message header from the guest first. */ + addr = s->host_mfa_high_addr | MPTSAS_FIFO_GET(s, request_post); + pci_dma_read(pci, addr, req, sizeof(*hdr)); + + if (hdr->Function < ARRAY_SIZE(mpi_request_sizes) && + mpi_request_sizes[hdr->Function]) { + /* Read the rest of the request based on the type. Do not + * reread everything, as that could cause a TOC/TOU mismatch + * and leak data from the QEMU stack. + */ + size = mpi_request_sizes[hdr->Function]; + assert(size <= MPTSAS_MAX_REQUEST_SIZE); + pci_dma_read(pci, addr + sizeof(*hdr), &req[sizeof(*hdr)], + size - sizeof(*hdr)); + } + + if (hdr->Function == MPI_FUNCTION_SCSI_IO_REQUEST) { + /* SCSI I/O requests are separate from mptsas_process_message + * because they cannot be sent through the doorbell yet. + */ + mptsas_process_scsi_io_request(s, (MPIMsgSCSIIORequest *)req, addr); + } else { + mptsas_process_message(s, (MPIRequestHeader *)req); + } +} + +static void mptsas_fetch_requests(void *opaque) +{ + MPTSASState *s = opaque; + + if (s->state != MPI_IOC_STATE_OPERATIONAL) { + mptsas_set_fault(s, MPI_IOCSTATUS_INVALID_STATE); + return; + } + while (!MPTSAS_FIFO_EMPTY(s, request_post)) { + mptsas_fetch_request(s); + } +} + +static void mptsas_soft_reset(MPTSASState *s) +{ + uint32_t save_mask; + + trace_mptsas_reset(s); + + /* Temporarily disable interrupts */ + save_mask = s->intr_mask; + s->intr_mask = MPI_HIM_DIM | MPI_HIM_RIM; + mptsas_update_interrupt(s); + + qbus_reset_all(BUS(&s->bus)); + s->intr_status = 0; + s->intr_mask = save_mask; + + s->reply_free_tail = 0; + s->reply_free_head = 0; + s->reply_post_tail = 0; + s->reply_post_head = 0; + s->request_post_tail = 0; + s->request_post_head = 0; + qemu_bh_cancel(s->request_bh); + + s->state = MPI_IOC_STATE_READY; +} + +static uint32_t mptsas_doorbell_read(MPTSASState *s) +{ + uint32_t ret; + + ret = (s->who_init << MPI_DOORBELL_WHO_INIT_SHIFT) & MPI_DOORBELL_WHO_INIT_MASK; + ret |= s->state; + switch (s->doorbell_state) { + case DOORBELL_NONE: + break; + + case DOORBELL_WRITE: + ret |= MPI_DOORBELL_ACTIVE; + break; + + case DOORBELL_READ: + /* Get rid of the IOC fault code. */ + ret &= ~MPI_DOORBELL_DATA_MASK; + + assert(s->intr_status & MPI_HIS_DOORBELL_INTERRUPT); + assert(s->doorbell_reply_idx <= s->doorbell_reply_size); + + ret |= MPI_DOORBELL_ACTIVE; + if (s->doorbell_reply_idx < s->doorbell_reply_size) { + /* For more information about this endian switch, see the + * commit message for commit 36b62ae ("fw_cfg: fix endianness in + * fw_cfg_data_mem_read() / _write()", 2015-01-16). + */ + ret |= le16_to_cpu(s->doorbell_reply[s->doorbell_reply_idx++]); + } + break; + + default: + abort(); + } + + return ret; +} + +static void mptsas_doorbell_write(MPTSASState *s, uint32_t val) +{ + if (s->doorbell_state == DOORBELL_WRITE) { + if (s->doorbell_idx < s->doorbell_cnt) { + /* For more information about this endian switch, see the + * commit message for commit 36b62ae ("fw_cfg: fix endianness in + * fw_cfg_data_mem_read() / _write()", 2015-01-16). + */ + s->doorbell_msg[s->doorbell_idx++] = cpu_to_le32(val); + if (s->doorbell_idx == s->doorbell_cnt) { + mptsas_process_message(s, (MPIRequestHeader *)s->doorbell_msg); + } + } + return; + } + + switch ((val & MPI_DOORBELL_FUNCTION_MASK) >> MPI_DOORBELL_FUNCTION_SHIFT) { + case MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET: + mptsas_soft_reset(s); + break; + case MPI_FUNCTION_IO_UNIT_RESET: + break; + case MPI_FUNCTION_HANDSHAKE: + s->doorbell_state = DOORBELL_WRITE; + s->doorbell_idx = 0; + s->doorbell_cnt = (val & MPI_DOORBELL_ADD_DWORDS_MASK) + >> MPI_DOORBELL_ADD_DWORDS_SHIFT; + s->intr_status |= MPI_HIS_DOORBELL_INTERRUPT; + mptsas_update_interrupt(s); + break; + default: + trace_mptsas_unhandled_doorbell_cmd(s, val); + break; + } +} + +static void mptsas_write_sequence_write(MPTSASState *s, uint32_t val) +{ + /* If the diagnostic register is enabled, any write to this register + * will disable it. Otherwise, the guest has to do a magic five-write + * sequence. + */ + if (s->diagnostic & MPI_DIAG_DRWE) { + goto disable; + } + + switch (s->diagnostic_idx) { + case 0: + if ((val & MPI_WRSEQ_KEY_VALUE_MASK) != MPI_WRSEQ_1ST_KEY_VALUE) { + goto disable; + } + break; + case 1: + if ((val & MPI_WRSEQ_KEY_VALUE_MASK) != MPI_WRSEQ_2ND_KEY_VALUE) { + goto disable; + } + break; + case 2: + if ((val & MPI_WRSEQ_KEY_VALUE_MASK) != MPI_WRSEQ_3RD_KEY_VALUE) { + goto disable; + } + break; + case 3: + if ((val & MPI_WRSEQ_KEY_VALUE_MASK) != MPI_WRSEQ_4TH_KEY_VALUE) { + goto disable; + } + break; + case 4: + if ((val & MPI_WRSEQ_KEY_VALUE_MASK) != MPI_WRSEQ_5TH_KEY_VALUE) { + goto disable; + } + /* Prepare Spaceball One for departure, and change the + * combination on my luggage! + */ + s->diagnostic |= MPI_DIAG_DRWE; + break; + } + s->diagnostic_idx++; + return; + +disable: + s->diagnostic &= ~MPI_DIAG_DRWE; + s->diagnostic_idx = 0; +} + +static int mptsas_hard_reset(MPTSASState *s) +{ + mptsas_soft_reset(s); + + s->intr_mask = MPI_HIM_DIM | MPI_HIM_RIM; + + s->host_mfa_high_addr = 0; + s->sense_buffer_high_addr = 0; + s->reply_frame_size = 0; + s->max_devices = MPTSAS_NUM_PORTS; + s->max_buses = 1; + + return 0; +} + +static void mptsas_interrupt_status_write(MPTSASState *s) +{ + switch (s->doorbell_state) { + case DOORBELL_NONE: + case DOORBELL_WRITE: + s->intr_status &= ~MPI_HIS_DOORBELL_INTERRUPT; + break; + + case DOORBELL_READ: + /* The reply can be read continuously, so leave the interrupt up. */ + assert(s->intr_status & MPI_HIS_DOORBELL_INTERRUPT); + if (s->doorbell_reply_idx == s->doorbell_reply_size) { + s->doorbell_state = DOORBELL_NONE; + } + break; + + default: + abort(); + } + mptsas_update_interrupt(s); +} + +static uint32_t mptsas_reply_post_read(MPTSASState *s) +{ + uint32_t ret; + + if (!MPTSAS_FIFO_EMPTY(s, reply_post)) { + ret = MPTSAS_FIFO_GET(s, reply_post); + } else { + ret = -1; + s->intr_status &= ~MPI_HIS_REPLY_MESSAGE_INTERRUPT; + mptsas_update_interrupt(s); + } + + return ret; +} + +static uint64_t mptsas_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + MPTSASState *s = opaque; + uint32_t ret = 0; + + switch (addr & ~3) { + case MPI_DOORBELL_OFFSET: + ret = mptsas_doorbell_read(s); + break; + + case MPI_DIAGNOSTIC_OFFSET: + ret = s->diagnostic; + break; + + case MPI_HOST_INTERRUPT_STATUS_OFFSET: + ret = s->intr_status; + break; + + case MPI_HOST_INTERRUPT_MASK_OFFSET: + ret = s->intr_mask; + break; + + case MPI_REPLY_POST_FIFO_OFFSET: + ret = mptsas_reply_post_read(s); + break; + + default: + trace_mptsas_mmio_unhandled_read(s, addr); + break; + } + trace_mptsas_mmio_read(s, addr, ret); + return ret; +} + +static void mptsas_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MPTSASState *s = opaque; + + trace_mptsas_mmio_write(s, addr, val); + switch (addr) { + case MPI_DOORBELL_OFFSET: + mptsas_doorbell_write(s, val); + break; + + case MPI_WRITE_SEQUENCE_OFFSET: + mptsas_write_sequence_write(s, val); + break; + + case MPI_DIAGNOSTIC_OFFSET: + if (val & MPI_DIAG_RESET_ADAPTER) { + mptsas_hard_reset(s); + } + break; + + case MPI_HOST_INTERRUPT_STATUS_OFFSET: + mptsas_interrupt_status_write(s); + break; + + case MPI_HOST_INTERRUPT_MASK_OFFSET: + s->intr_mask = val & (MPI_HIM_RIM | MPI_HIM_DIM); + mptsas_update_interrupt(s); + break; + + case MPI_REQUEST_POST_FIFO_OFFSET: + if (MPTSAS_FIFO_FULL(s, request_post)) { + mptsas_set_fault(s, MPI_IOCSTATUS_INSUFFICIENT_RESOURCES); + } else { + MPTSAS_FIFO_PUT(s, request_post, val & ~0x03); + qemu_bh_schedule(s->request_bh); + } + break; + + case MPI_REPLY_FREE_FIFO_OFFSET: + if (MPTSAS_FIFO_FULL(s, reply_free)) { + mptsas_set_fault(s, MPI_IOCSTATUS_INSUFFICIENT_RESOURCES); + } else { + MPTSAS_FIFO_PUT(s, reply_free, val); + } + break; + + default: + trace_mptsas_mmio_unhandled_write(s, addr, val); + break; + } +} + +static const MemoryRegionOps mptsas_mmio_ops = { + .read = mptsas_mmio_read, + .write = mptsas_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static const MemoryRegionOps mptsas_port_ops = { + .read = mptsas_mmio_read, + .write = mptsas_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static uint64_t mptsas_diag_read(void *opaque, hwaddr addr, + unsigned size) +{ + MPTSASState *s = opaque; + trace_mptsas_diag_read(s, addr, 0); + return 0; +} + +static void mptsas_diag_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MPTSASState *s = opaque; + trace_mptsas_diag_write(s, addr, val); +} + +static const MemoryRegionOps mptsas_diag_ops = { + .read = mptsas_diag_read, + .write = mptsas_diag_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static QEMUSGList *mptsas_get_sg_list(SCSIRequest *sreq) +{ + MPTSASRequest *req = sreq->hba_private; + + return &req->qsg; +} + +static void mptsas_command_complete(SCSIRequest *sreq, + size_t resid) +{ + MPTSASRequest *req = sreq->hba_private; + MPTSASState *s = req->dev; + uint8_t sense_buf[SCSI_SENSE_BUF_SIZE]; + uint8_t sense_len; + + hwaddr sense_buffer_addr = req->dev->sense_buffer_high_addr | + req->scsi_io.SenseBufferLowAddr; + + trace_mptsas_command_complete(s, req->scsi_io.MsgContext, + sreq->status, resid); + + sense_len = scsi_req_get_sense(sreq, sense_buf, SCSI_SENSE_BUF_SIZE); + if (sense_len > 0) { + pci_dma_write(PCI_DEVICE(s), sense_buffer_addr, sense_buf, + MIN(req->scsi_io.SenseBufferLength, sense_len)); + } + + if (sreq->status != GOOD || resid || + req->dev->doorbell_state == DOORBELL_WRITE) { + MPIMsgSCSIIOReply reply; + + memset(&reply, 0, sizeof(reply)); + reply.TargetID = req->scsi_io.TargetID; + reply.Bus = req->scsi_io.Bus; + reply.MsgLength = sizeof(reply) / 4; + reply.Function = req->scsi_io.Function; + reply.CDBLength = req->scsi_io.CDBLength; + reply.SenseBufferLength = req->scsi_io.SenseBufferLength; + reply.MsgFlags = req->scsi_io.MsgFlags; + reply.MsgContext = req->scsi_io.MsgContext; + reply.SCSIStatus = sreq->status; + if (sreq->status == GOOD) { + reply.TransferCount = req->scsi_io.DataLength - resid; + if (resid) { + reply.IOCStatus = MPI_IOCSTATUS_SCSI_DATA_UNDERRUN; + } + } else { + reply.SCSIState = MPI_SCSI_STATE_AUTOSENSE_VALID; + reply.SenseCount = sense_len; + reply.IOCStatus = MPI_IOCSTATUS_SCSI_DATA_UNDERRUN; + } + + mptsas_fix_scsi_io_reply_endianness(&reply); + mptsas_post_reply(req->dev, (MPIDefaultReply *)&reply); + } else { + mptsas_turbo_reply(req->dev, req->scsi_io.MsgContext); + } + + mptsas_free_request(req); +} + +static void mptsas_request_cancelled(SCSIRequest *sreq) +{ + MPTSASRequest *req = sreq->hba_private; + MPIMsgSCSIIOReply reply; + + memset(&reply, 0, sizeof(reply)); + reply.TargetID = req->scsi_io.TargetID; + reply.Bus = req->scsi_io.Bus; + reply.MsgLength = sizeof(reply) / 4; + reply.Function = req->scsi_io.Function; + reply.CDBLength = req->scsi_io.CDBLength; + reply.SenseBufferLength = req->scsi_io.SenseBufferLength; + reply.MsgFlags = req->scsi_io.MsgFlags; + reply.MsgContext = req->scsi_io.MsgContext; + reply.SCSIState = MPI_SCSI_STATE_NO_SCSI_STATUS; + reply.IOCStatus = MPI_IOCSTATUS_SCSI_TASK_TERMINATED; + + mptsas_fix_scsi_io_reply_endianness(&reply); + mptsas_post_reply(req->dev, (MPIDefaultReply *)&reply); + mptsas_free_request(req); +} + +static void mptsas_save_request(QEMUFile *f, SCSIRequest *sreq) +{ + MPTSASRequest *req = sreq->hba_private; + int i; + + qemu_put_buffer(f, (unsigned char *)&req->scsi_io, sizeof(req->scsi_io)); + qemu_put_be32(f, req->qsg.nsg); + for (i = 0; i < req->qsg.nsg; i++) { + qemu_put_be64(f, req->qsg.sg[i].base); + qemu_put_be64(f, req->qsg.sg[i].len); + } +} + +static void *mptsas_load_request(QEMUFile *f, SCSIRequest *sreq) +{ + SCSIBus *bus = sreq->bus; + MPTSASState *s = container_of(bus, MPTSASState, bus); + PCIDevice *pci = PCI_DEVICE(s); + MPTSASRequest *req; + int i, n; + + req = g_new(MPTSASRequest, 1); + qemu_get_buffer(f, (unsigned char *)&req->scsi_io, sizeof(req->scsi_io)); + + n = qemu_get_be32(f); + /* TODO: add a way for SCSIBusInfo's load_request to fail, + * and fail migration instead of asserting here. + * This is just one thing (there are probably more) that must be + * fixed before we can allow NDEBUG compilation. + */ + assert(n >= 0); + + pci_dma_sglist_init(&req->qsg, pci, n); + for (i = 0; i < n; i++) { + uint64_t base = qemu_get_be64(f); + uint64_t len = qemu_get_be64(f); + qemu_sglist_add(&req->qsg, base, len); + } + + scsi_req_ref(sreq); + req->sreq = sreq; + req->dev = s; + + return req; +} + +static const struct SCSIBusInfo mptsas_scsi_info = { + .tcq = true, + .max_target = MPTSAS_NUM_PORTS, + .max_lun = 1, + + .get_sg_list = mptsas_get_sg_list, + .complete = mptsas_command_complete, + .cancel = mptsas_request_cancelled, + .save_request = mptsas_save_request, + .load_request = mptsas_load_request, +}; + +static void mptsas_scsi_realize(PCIDevice *dev, Error **errp) +{ + MPTSASState *s = MPT_SAS(dev); + Error *err = NULL; + int ret; + + dev->config[PCI_LATENCY_TIMER] = 0; + dev->config[PCI_INTERRUPT_PIN] = 0x01; + + if (s->msi != ON_OFF_AUTO_OFF) { + ret = msi_init(dev, 0, 1, true, false, &err); + /* Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error */ + assert(!ret || ret == -ENOTSUP); + if (ret && s->msi == ON_OFF_AUTO_ON) { + /* Can't satisfy user's explicit msi=on request, fail */ + error_append_hint(&err, "You have to use msi=auto (default) or " + "msi=off with this machine type.\n"); + error_propagate(errp, err); + return; + } + assert(!err || s->msi == ON_OFF_AUTO_AUTO); + /* With msi=auto, we fall back to MSI off silently */ + error_free(err); + + /* Only used for migration. */ + s->msi_in_use = (ret == 0); + } + + memory_region_init_io(&s->mmio_io, OBJECT(s), &mptsas_mmio_ops, s, + "mptsas-mmio", 0x4000); + memory_region_init_io(&s->port_io, OBJECT(s), &mptsas_port_ops, s, + "mptsas-io", 256); + memory_region_init_io(&s->diag_io, OBJECT(s), &mptsas_diag_ops, s, + "mptsas-diag", 0x10000); + + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->port_io); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_32, &s->mmio_io); + pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_32, &s->diag_io); + + if (!s->sas_addr) { + s->sas_addr = ((NAA_LOCALLY_ASSIGNED_ID << 24) | + IEEE_COMPANY_LOCALLY_ASSIGNED) << 36; + s->sas_addr |= (pci_dev_bus_num(dev) << 16); + s->sas_addr |= (PCI_SLOT(dev->devfn) << 8); + s->sas_addr |= PCI_FUNC(dev->devfn); + } + s->max_devices = MPTSAS_NUM_PORTS; + + s->request_bh = qemu_bh_new(mptsas_fetch_requests, s); + + scsi_bus_init(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info); +} + +static void mptsas_scsi_uninit(PCIDevice *dev) +{ + MPTSASState *s = MPT_SAS(dev); + + qemu_bh_delete(s->request_bh); + msi_uninit(dev); +} + +static void mptsas_reset(DeviceState *dev) +{ + MPTSASState *s = MPT_SAS(dev); + + mptsas_hard_reset(s); +} + +static int mptsas_post_load(void *opaque, int version_id) +{ + MPTSASState *s = opaque; + + if (s->doorbell_idx > s->doorbell_cnt || + s->doorbell_cnt > ARRAY_SIZE(s->doorbell_msg) || + s->doorbell_reply_idx > s->doorbell_reply_size || + s->doorbell_reply_size > ARRAY_SIZE(s->doorbell_reply) || + MPTSAS_FIFO_INVALID(s, request_post) || + MPTSAS_FIFO_INVALID(s, reply_post) || + MPTSAS_FIFO_INVALID(s, reply_free) || + s->diagnostic_idx > 4) { + return -EINVAL; + } + + return 0; +} + +static const VMStateDescription vmstate_mptsas = { + .name = "mptsas", + .version_id = 0, + .minimum_version_id = 0, + .minimum_version_id_old = 0, + .post_load = mptsas_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, MPTSASState), + VMSTATE_BOOL(msi_in_use, MPTSASState), + VMSTATE_UINT32(state, MPTSASState), + VMSTATE_UINT8(who_init, MPTSASState), + VMSTATE_UINT8(doorbell_state, MPTSASState), + VMSTATE_UINT32_ARRAY(doorbell_msg, MPTSASState, 256), + VMSTATE_INT32(doorbell_idx, MPTSASState), + VMSTATE_INT32(doorbell_cnt, MPTSASState), + + VMSTATE_UINT16_ARRAY(doorbell_reply, MPTSASState, 256), + VMSTATE_INT32(doorbell_reply_idx, MPTSASState), + VMSTATE_INT32(doorbell_reply_size, MPTSASState), + + VMSTATE_UINT32(diagnostic, MPTSASState), + VMSTATE_UINT8(diagnostic_idx, MPTSASState), + + VMSTATE_UINT32(intr_status, MPTSASState), + VMSTATE_UINT32(intr_mask, MPTSASState), + + VMSTATE_UINT32_ARRAY(request_post, MPTSASState, + MPTSAS_REQUEST_QUEUE_DEPTH + 1), + VMSTATE_UINT16(request_post_head, MPTSASState), + VMSTATE_UINT16(request_post_tail, MPTSASState), + + VMSTATE_UINT32_ARRAY(reply_post, MPTSASState, + MPTSAS_REPLY_QUEUE_DEPTH + 1), + VMSTATE_UINT16(reply_post_head, MPTSASState), + VMSTATE_UINT16(reply_post_tail, MPTSASState), + + VMSTATE_UINT32_ARRAY(reply_free, MPTSASState, + MPTSAS_REPLY_QUEUE_DEPTH + 1), + VMSTATE_UINT16(reply_free_head, MPTSASState), + VMSTATE_UINT16(reply_free_tail, MPTSASState), + + VMSTATE_UINT16(max_buses, MPTSASState), + VMSTATE_UINT16(max_devices, MPTSASState), + VMSTATE_UINT16(reply_frame_size, MPTSASState), + VMSTATE_UINT64(host_mfa_high_addr, MPTSASState), + VMSTATE_UINT64(sense_buffer_high_addr, MPTSASState), + VMSTATE_END_OF_LIST() + } +}; + +static Property mptsas_properties[] = { + DEFINE_PROP_UINT64("sas_address", MPTSASState, sas_addr, 0), + /* TODO: test MSI support under Windows */ + DEFINE_PROP_ON_OFF_AUTO("msi", MPTSASState, msi, ON_OFF_AUTO_AUTO), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mptsas1068_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); + + pc->realize = mptsas_scsi_realize; + pc->exit = mptsas_scsi_uninit; + pc->romfile = 0; + pc->vendor_id = PCI_VENDOR_ID_LSI_LOGIC; + pc->device_id = PCI_DEVICE_ID_LSI_SAS1068; + pc->subsystem_vendor_id = PCI_VENDOR_ID_LSI_LOGIC; + pc->subsystem_id = 0x8000; + pc->class_id = PCI_CLASS_STORAGE_SCSI; + device_class_set_props(dc, mptsas_properties); + dc->reset = mptsas_reset; + dc->vmsd = &vmstate_mptsas; + dc->desc = "LSI SAS 1068"; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo mptsas_info = { + .name = TYPE_MPTSAS1068, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(MPTSASState), + .class_init = mptsas1068_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void mptsas_register_types(void) +{ + type_register(&mptsas_info); +} + +type_init(mptsas_register_types) diff --git a/hw/scsi/mptsas.h b/hw/scsi/mptsas.h new file mode 100644 index 000000000..c046497db --- /dev/null +++ b/hw/scsi/mptsas.h @@ -0,0 +1,105 @@ +#ifndef MPTSAS_H +#define MPTSAS_H + +#include "mpi.h" +#include "qom/object.h" + +#define MPTSAS_NUM_PORTS 8 +#define MPTSAS_MAX_FRAMES 2048 /* Firmware limit at 65535 */ + +#define MPTSAS_REQUEST_QUEUE_DEPTH 128 +#define MPTSAS_REPLY_QUEUE_DEPTH 128 + +#define MPTSAS_MAXIMUM_CHAIN_DEPTH 0x22 + +typedef struct MPTSASRequest MPTSASRequest; + +#define TYPE_MPTSAS1068 "mptsas1068" +typedef struct MPTSASState MPTSASState; +DECLARE_INSTANCE_CHECKER(MPTSASState, MPT_SAS, + TYPE_MPTSAS1068) + +enum { + DOORBELL_NONE, + DOORBELL_WRITE, + DOORBELL_READ +}; + +struct MPTSASState { + PCIDevice dev; + MemoryRegion mmio_io; + MemoryRegion port_io; + MemoryRegion diag_io; + QEMUBH *request_bh; + + /* properties */ + OnOffAuto msi; + uint64_t sas_addr; + + bool msi_in_use; + + /* Doorbell register */ + uint32_t state; + uint8_t who_init; + uint8_t doorbell_state; + + /* Buffer for requests that are sent through the doorbell register. */ + uint32_t doorbell_msg[256]; + int doorbell_idx; + int doorbell_cnt; + + uint16_t doorbell_reply[256]; + int doorbell_reply_idx; + int doorbell_reply_size; + + /* Other registers */ + uint8_t diagnostic_idx; + uint32_t diagnostic; + uint32_t intr_mask; + uint32_t intr_status; + + /* Request queues */ + uint32_t request_post[MPTSAS_REQUEST_QUEUE_DEPTH + 1]; + uint16_t request_post_head; + uint16_t request_post_tail; + + uint32_t reply_post[MPTSAS_REPLY_QUEUE_DEPTH + 1]; + uint16_t reply_post_head; + uint16_t reply_post_tail; + + uint32_t reply_free[MPTSAS_REPLY_QUEUE_DEPTH + 1]; + uint16_t reply_free_head; + uint16_t reply_free_tail; + + /* IOC Facts */ + hwaddr host_mfa_high_addr; + hwaddr sense_buffer_high_addr; + uint16_t max_devices; + uint16_t max_buses; + uint16_t reply_frame_size; + + SCSIBus bus; +}; + +void mptsas_fix_scsi_io_endianness(MPIMsgSCSIIORequest *req); +void mptsas_fix_scsi_io_reply_endianness(MPIMsgSCSIIOReply *reply); +void mptsas_fix_scsi_task_mgmt_endianness(MPIMsgSCSITaskMgmt *req); +void mptsas_fix_scsi_task_mgmt_reply_endianness(MPIMsgSCSITaskMgmtReply *reply); +void mptsas_fix_ioc_init_endianness(MPIMsgIOCInit *req); +void mptsas_fix_ioc_init_reply_endianness(MPIMsgIOCInitReply *reply); +void mptsas_fix_ioc_facts_endianness(MPIMsgIOCFacts *req); +void mptsas_fix_ioc_facts_reply_endianness(MPIMsgIOCFactsReply *reply); +void mptsas_fix_config_endianness(MPIMsgConfig *req); +void mptsas_fix_config_reply_endianness(MPIMsgConfigReply *reply); +void mptsas_fix_port_facts_endianness(MPIMsgPortFacts *req); +void mptsas_fix_port_facts_reply_endianness(MPIMsgPortFactsReply *reply); +void mptsas_fix_port_enable_endianness(MPIMsgPortEnable *req); +void mptsas_fix_port_enable_reply_endianness(MPIMsgPortEnableReply *reply); +void mptsas_fix_event_notification_endianness(MPIMsgEventNotify *req); +void mptsas_fix_event_notification_reply_endianness(MPIMsgEventNotifyReply *reply); + +void mptsas_reply(MPTSASState *s, MPIDefaultReply *reply); + +void mptsas_process_config(MPTSASState *s, MPIMsgConfig *req); + +#endif /* MPTSAS_H */ diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c new file mode 100644 index 000000000..77325d8cc --- /dev/null +++ b/hw/scsi/scsi-bus.c @@ -0,0 +1,1855 @@ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "hw/qdev-properties.h" +#include "hw/scsi/scsi.h" +#include "migration/qemu-file-types.h" +#include "migration/vmstate.h" +#include "scsi/constants.h" +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" +#include "trace.h" +#include "sysemu/dma.h" +#include "qemu/cutils.h" + +static char *scsibus_get_dev_path(DeviceState *dev); +static char *scsibus_get_fw_dev_path(DeviceState *dev); +static void scsi_req_dequeue(SCSIRequest *req); +static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len); +static void scsi_target_free_buf(SCSIRequest *req); + +static int next_scsi_bus; + +static SCSIDevice *do_scsi_device_find(SCSIBus *bus, + int channel, int id, int lun, + bool include_unrealized) +{ + BusChild *kid; + SCSIDevice *retval = NULL; + + QTAILQ_FOREACH_RCU(kid, &bus->qbus.children, sibling) { + DeviceState *qdev = kid->child; + SCSIDevice *dev = SCSI_DEVICE(qdev); + + if (dev->channel == channel && dev->id == id) { + if (dev->lun == lun) { + retval = dev; + break; + } + + /* + * If we don't find exact match (channel/bus/lun), + * we will return the first device which matches channel/bus + */ + + if (!retval) { + retval = dev; + } + } + } + + /* + * This function might run on the IO thread and we might race against + * main thread hot-plugging the device. + * We assume that as soon as .realized is set to true we can let + * the user access the device. + */ + + if (retval && !include_unrealized && + !qatomic_load_acquire(&retval->qdev.realized)) { + retval = NULL; + } + + return retval; +} + +SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun) +{ + RCU_READ_LOCK_GUARD(); + return do_scsi_device_find(bus, channel, id, lun, false); +} + +SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun) +{ + SCSIDevice *d; + RCU_READ_LOCK_GUARD(); + d = do_scsi_device_find(bus, channel, id, lun, false); + if (d) { + object_ref(d); + } + return d; +} + +static void scsi_device_realize(SCSIDevice *s, Error **errp) +{ + SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); + if (sc->realize) { + sc->realize(s, errp); + } +} + +static void scsi_device_unrealize(SCSIDevice *s) +{ + SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); + if (sc->unrealize) { + sc->unrealize(s); + } +} + +int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, + void *hba_private) +{ + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); + int rc; + + assert(cmd->len == 0); + rc = scsi_req_parse_cdb(dev, cmd, buf); + if (bus->info->parse_cdb) { + rc = bus->info->parse_cdb(dev, cmd, buf, hba_private); + } + return rc; +} + +static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun, + uint8_t *buf, void *hba_private) +{ + SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); + if (sc->alloc_req) { + return sc->alloc_req(s, tag, lun, buf, hba_private); + } + + return NULL; +} + +void scsi_device_unit_attention_reported(SCSIDevice *s) +{ + SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); + if (sc->unit_attention_reported) { + sc->unit_attention_reported(s); + } +} + +/* Create a scsi bus, and attach devices to it. */ +void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host, + const SCSIBusInfo *info, const char *bus_name) +{ + qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); + bus->busnr = next_scsi_bus++; + bus->info = info; + qbus_set_bus_hotplug_handler(BUS(bus)); +} + +static void scsi_dma_restart_bh(void *opaque) +{ + SCSIDevice *s = opaque; + SCSIRequest *req, *next; + + qemu_bh_delete(s->bh); + s->bh = NULL; + + aio_context_acquire(blk_get_aio_context(s->conf.blk)); + QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { + scsi_req_ref(req); + if (req->retry) { + req->retry = false; + switch (req->cmd.mode) { + case SCSI_XFER_FROM_DEV: + case SCSI_XFER_TO_DEV: + scsi_req_continue(req); + break; + case SCSI_XFER_NONE: + scsi_req_dequeue(req); + scsi_req_enqueue(req); + break; + } + } + scsi_req_unref(req); + } + aio_context_release(blk_get_aio_context(s->conf.blk)); + /* Drop the reference that was acquired in scsi_dma_restart_cb */ + object_unref(OBJECT(s)); +} + +void scsi_req_retry(SCSIRequest *req) +{ + /* No need to save a reference, because scsi_dma_restart_bh just + * looks at the request list. */ + req->retry = true; +} + +static void scsi_dma_restart_cb(void *opaque, bool running, RunState state) +{ + SCSIDevice *s = opaque; + + if (!running) { + return; + } + if (!s->bh) { + AioContext *ctx = blk_get_aio_context(s->conf.blk); + /* The reference is dropped in scsi_dma_restart_bh.*/ + object_ref(OBJECT(s)); + s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s); + qemu_bh_schedule(s->bh); + } +} + +static bool scsi_bus_is_address_free(SCSIBus *bus, + int channel, int target, int lun, + SCSIDevice **p_dev) +{ + SCSIDevice *d; + + RCU_READ_LOCK_GUARD(); + d = do_scsi_device_find(bus, channel, target, lun, true); + if (d && d->lun == lun) { + if (p_dev) { + *p_dev = d; + } + return false; + } + if (p_dev) { + *p_dev = NULL; + } + return true; +} + +static bool scsi_bus_check_address(BusState *qbus, DeviceState *qdev, Error **errp) +{ + SCSIDevice *dev = SCSI_DEVICE(qdev); + SCSIBus *bus = SCSI_BUS(qbus); + + if (dev->channel > bus->info->max_channel) { + error_setg(errp, "bad scsi channel id: %d", dev->channel); + return false; + } + if (dev->id != -1 && dev->id > bus->info->max_target) { + error_setg(errp, "bad scsi device id: %d", dev->id); + return false; + } + if (dev->lun != -1 && dev->lun > bus->info->max_lun) { + error_setg(errp, "bad scsi device lun: %d", dev->lun); + return false; + } + + if (dev->id != -1 && dev->lun != -1) { + SCSIDevice *d; + if (!scsi_bus_is_address_free(bus, dev->channel, dev->id, dev->lun, &d)) { + error_setg(errp, "lun already used by '%s'", d->qdev.id); + return false; + } + } + + return true; +} + +static void scsi_qdev_realize(DeviceState *qdev, Error **errp) +{ + SCSIDevice *dev = SCSI_DEVICE(qdev); + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); + bool is_free; + Error *local_err = NULL; + + if (dev->id == -1) { + int id = -1; + if (dev->lun == -1) { + dev->lun = 0; + } + do { + is_free = scsi_bus_is_address_free(bus, dev->channel, ++id, dev->lun, NULL); + } while (!is_free && id < bus->info->max_target); + if (!is_free) { + error_setg(errp, "no free target"); + return; + } + dev->id = id; + } else if (dev->lun == -1) { + int lun = -1; + do { + is_free = scsi_bus_is_address_free(bus, dev->channel, dev->id, ++lun, NULL); + } while (!is_free && lun < bus->info->max_lun); + if (!is_free) { + error_setg(errp, "no free lun"); + return; + } + dev->lun = lun; + } + + QTAILQ_INIT(&dev->requests); + scsi_device_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev), + scsi_dma_restart_cb, dev); +} + +static void scsi_qdev_unrealize(DeviceState *qdev) +{ + SCSIDevice *dev = SCSI_DEVICE(qdev); + + if (dev->vmsentry) { + qemu_del_vm_change_state_handler(dev->vmsentry); + } + + scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE)); + + scsi_device_unrealize(dev); + + blockdev_mark_auto_del(dev->conf.blk); +} + +/* handle legacy '-drive if=scsi,...' cmd line args */ +SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, + int unit, bool removable, int bootindex, + bool share_rw, + BlockdevOnError rerror, + BlockdevOnError werror, + const char *serial, Error **errp) +{ + const char *driver; + char *name; + DeviceState *dev; + DriveInfo *dinfo; + + if (blk_is_sg(blk)) { + driver = "scsi-generic"; + } else { + dinfo = blk_legacy_dinfo(blk); + if (dinfo && dinfo->media_cd) { + driver = "scsi-cd"; + } else { + driver = "scsi-hd"; + } + } + dev = qdev_new(driver); + name = g_strdup_printf("legacy[%d]", unit); + object_property_add_child(OBJECT(bus), name, OBJECT(dev)); + g_free(name); + + qdev_prop_set_uint32(dev, "scsi-id", unit); + if (bootindex >= 0) { + object_property_set_int(OBJECT(dev), "bootindex", bootindex, + &error_abort); + } + if (object_property_find(OBJECT(dev), "removable")) { + qdev_prop_set_bit(dev, "removable", removable); + } + if (serial && object_property_find(OBJECT(dev), "serial")) { + qdev_prop_set_string(dev, "serial", serial); + } + if (!qdev_prop_set_drive_err(dev, "drive", blk, errp)) { + object_unparent(OBJECT(dev)); + return NULL; + } + if (!object_property_set_bool(OBJECT(dev), "share-rw", share_rw, errp)) { + object_unparent(OBJECT(dev)); + return NULL; + } + + qdev_prop_set_enum(dev, "rerror", rerror); + qdev_prop_set_enum(dev, "werror", werror); + + if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) { + object_unparent(OBJECT(dev)); + return NULL; + } + return SCSI_DEVICE(dev); +} + +void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) +{ + Location loc; + DriveInfo *dinfo; + int unit; + + loc_push_none(&loc); + for (unit = 0; unit <= bus->info->max_target; unit++) { + dinfo = drive_get(IF_SCSI, bus->busnr, unit); + if (dinfo == NULL) { + continue; + } + qemu_opts_loc_restore(dinfo->opts); + scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo), + unit, false, -1, false, + BLOCKDEV_ON_ERROR_AUTO, + BLOCKDEV_ON_ERROR_AUTO, + NULL, &error_fatal); + } + loc_pop(&loc); +} + +static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf) +{ + scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); + scsi_req_complete(req, CHECK_CONDITION); + return 0; +} + +static const struct SCSIReqOps reqops_invalid_field = { + .size = sizeof(SCSIRequest), + .send_command = scsi_invalid_field +}; + +/* SCSIReqOps implementation for invalid commands. */ + +static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf) +{ + scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); + scsi_req_complete(req, CHECK_CONDITION); + return 0; +} + +static const struct SCSIReqOps reqops_invalid_opcode = { + .size = sizeof(SCSIRequest), + .send_command = scsi_invalid_command +}; + +/* SCSIReqOps implementation for unit attention conditions. */ + +static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf) +{ + if (req->dev->unit_attention.key == UNIT_ATTENTION) { + scsi_req_build_sense(req, req->dev->unit_attention); + } else if (req->bus->unit_attention.key == UNIT_ATTENTION) { + scsi_req_build_sense(req, req->bus->unit_attention); + } + scsi_req_complete(req, CHECK_CONDITION); + return 0; +} + +static const struct SCSIReqOps reqops_unit_attention = { + .size = sizeof(SCSIRequest), + .send_command = scsi_unit_attention +}; + +/* SCSIReqOps implementation for REPORT LUNS and for commands sent to + an invalid LUN. */ + +typedef struct SCSITargetReq SCSITargetReq; + +struct SCSITargetReq { + SCSIRequest req; + int len; + uint8_t *buf; + int buf_len; +}; + +static void store_lun(uint8_t *outbuf, int lun) +{ + if (lun < 256) { + /* Simple logical unit addressing method*/ + outbuf[0] = 0; + outbuf[1] = lun; + } else { + /* Flat space addressing method */ + outbuf[0] = 0x40 | (lun >> 8); + outbuf[1] = (lun & 255); + } +} + +static bool scsi_target_emulate_report_luns(SCSITargetReq *r) +{ + BusChild *kid; + int channel, id; + uint8_t tmp[8] = {0}; + int len = 0; + GByteArray *buf; + + if (r->req.cmd.xfer < 16) { + return false; + } + if (r->req.cmd.buf[2] > 2) { + return false; + } + + /* reserve space for 63 LUNs*/ + buf = g_byte_array_sized_new(512); + + channel = r->req.dev->channel; + id = r->req.dev->id; + + /* add size (will be updated later to correct value */ + g_byte_array_append(buf, tmp, 8); + len += 8; + + /* add LUN0 */ + g_byte_array_append(buf, tmp, 8); + len += 8; + + WITH_RCU_READ_LOCK_GUARD() { + QTAILQ_FOREACH_RCU(kid, &r->req.bus->qbus.children, sibling) { + DeviceState *qdev = kid->child; + SCSIDevice *dev = SCSI_DEVICE(qdev); + + if (dev->channel == channel && dev->id == id && dev->lun != 0) { + store_lun(tmp, dev->lun); + g_byte_array_append(buf, tmp, 8); + len += 8; + } + } + } + + r->buf_len = len; + r->buf = g_byte_array_free(buf, FALSE); + r->len = MIN(len, r->req.cmd.xfer & ~7); + + /* store the LUN list length */ + stl_be_p(&r->buf[0], len - 8); + return true; +} + +static bool scsi_target_emulate_inquiry(SCSITargetReq *r) +{ + assert(r->req.dev->lun != r->req.lun); + + scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN); + + if (r->req.cmd.buf[1] & 0x2) { + /* Command support data - optional, not implemented */ + return false; + } + + if (r->req.cmd.buf[1] & 0x1) { + /* Vital product data */ + uint8_t page_code = r->req.cmd.buf[2]; + r->buf[r->len++] = page_code ; /* this page */ + r->buf[r->len++] = 0x00; + + switch (page_code) { + case 0x00: /* Supported page codes, mandatory */ + { + int pages; + pages = r->len++; + r->buf[r->len++] = 0x00; /* list of supported pages (this page) */ + r->buf[pages] = r->len - pages - 1; /* number of pages */ + break; + } + default: + return false; + } + /* done with EVPD */ + assert(r->len < r->buf_len); + r->len = MIN(r->req.cmd.xfer, r->len); + return true; + } + + /* Standard INQUIRY data */ + if (r->req.cmd.buf[2] != 0) { + return false; + } + + /* PAGE CODE == 0 */ + r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN); + memset(r->buf, 0, r->len); + if (r->req.lun != 0) { + r->buf[0] = TYPE_NO_LUN; + } else { + r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE; + r->buf[2] = 5; /* Version */ + r->buf[3] = 2 | 0x10; /* HiSup, response data format */ + r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */ + r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */ + memcpy(&r->buf[8], "QEMU ", 8); + memcpy(&r->buf[16], "QEMU TARGET ", 16); + pstrcpy((char *) &r->buf[32], 4, qemu_hw_version()); + } + return true; +} + +static size_t scsi_sense_len(SCSIRequest *req) +{ + if (req->dev->type == TYPE_SCANNER) + return SCSI_SENSE_LEN_SCANNER; + else + return SCSI_SENSE_LEN; +} + +static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf) +{ + SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); + int fixed_sense = (req->cmd.buf[1] & 1) == 0; + + if (req->lun != 0 && + buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) { + scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED)); + scsi_req_complete(req, CHECK_CONDITION); + return 0; + } + switch (buf[0]) { + case REPORT_LUNS: + if (!scsi_target_emulate_report_luns(r)) { + goto illegal_request; + } + break; + case INQUIRY: + if (!scsi_target_emulate_inquiry(r)) { + goto illegal_request; + } + break; + case REQUEST_SENSE: + scsi_target_alloc_buf(&r->req, scsi_sense_len(req)); + if (req->lun != 0) { + const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED); + + r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer, + sense, fixed_sense); + } else { + r->len = scsi_device_get_sense(r->req.dev, r->buf, + MIN(req->cmd.xfer, r->buf_len), + fixed_sense); + } + if (r->req.dev->sense_is_ua) { + scsi_device_unit_attention_reported(req->dev); + r->req.dev->sense_len = 0; + r->req.dev->sense_is_ua = false; + } + break; + case TEST_UNIT_READY: + break; + default: + scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); + scsi_req_complete(req, CHECK_CONDITION); + return 0; + illegal_request: + scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); + scsi_req_complete(req, CHECK_CONDITION); + return 0; + } + + if (!r->len) { + scsi_req_complete(req, GOOD); + } + return r->len; +} + +static void scsi_target_read_data(SCSIRequest *req) +{ + SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); + uint32_t n; + + n = r->len; + if (n > 0) { + r->len = 0; + scsi_req_data(&r->req, n); + } else { + scsi_req_complete(&r->req, GOOD); + } +} + +static uint8_t *scsi_target_get_buf(SCSIRequest *req) +{ + SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); + + return r->buf; +} + +static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len) +{ + SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); + + r->buf = g_malloc(len); + r->buf_len = len; + + return r->buf; +} + +static void scsi_target_free_buf(SCSIRequest *req) +{ + SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); + + g_free(r->buf); +} + +static const struct SCSIReqOps reqops_target_command = { + .size = sizeof(SCSITargetReq), + .send_command = scsi_target_send_command, + .read_data = scsi_target_read_data, + .get_buf = scsi_target_get_buf, + .free_req = scsi_target_free_buf, +}; + + +SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, + uint32_t tag, uint32_t lun, void *hba_private) +{ + SCSIRequest *req; + SCSIBus *bus = scsi_bus_from_device(d); + BusState *qbus = BUS(bus); + const int memset_off = offsetof(SCSIRequest, sense) + + sizeof(req->sense); + + req = g_malloc(reqops->size); + memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off); + req->refcount = 1; + req->bus = bus; + req->dev = d; + req->tag = tag; + req->lun = lun; + req->hba_private = hba_private; + req->status = -1; + req->host_status = -1; + req->ops = reqops; + object_ref(OBJECT(d)); + object_ref(OBJECT(qbus->parent)); + notifier_list_init(&req->cancel_notifiers); + trace_scsi_req_alloc(req->dev->id, req->lun, req->tag); + return req; +} + +SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, + uint8_t *buf, void *hba_private) +{ + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus); + const SCSIReqOps *ops; + SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d); + SCSIRequest *req; + SCSICommand cmd = { .len = 0 }; + int ret; + + if ((d->unit_attention.key == UNIT_ATTENTION || + bus->unit_attention.key == UNIT_ATTENTION) && + (buf[0] != INQUIRY && + buf[0] != REPORT_LUNS && + buf[0] != GET_CONFIGURATION && + buf[0] != GET_EVENT_STATUS_NOTIFICATION && + + /* + * If we already have a pending unit attention condition, + * report this one before triggering another one. + */ + !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) { + ops = &reqops_unit_attention; + } else if (lun != d->lun || + buf[0] == REPORT_LUNS || + (buf[0] == REQUEST_SENSE && d->sense_len)) { + ops = &reqops_target_command; + } else { + ops = NULL; + } + + if (ops != NULL || !sc->parse_cdb) { + ret = scsi_req_parse_cdb(d, &cmd, buf); + } else { + ret = sc->parse_cdb(d, &cmd, buf, hba_private); + } + + if (ret != 0) { + trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]); + req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private); + } else { + assert(cmd.len != 0); + trace_scsi_req_parsed(d->id, lun, tag, buf[0], + cmd.mode, cmd.xfer); + if (cmd.lba != -1) { + trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0], + cmd.lba); + } + + if (cmd.xfer > INT32_MAX) { + req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private); + } else if (ops) { + req = scsi_req_alloc(ops, d, tag, lun, hba_private); + } else { + req = scsi_device_alloc_req(d, tag, lun, buf, hba_private); + } + } + + req->cmd = cmd; + req->resid = req->cmd.xfer; + + switch (buf[0]) { + case INQUIRY: + trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]); + break; + case TEST_UNIT_READY: + trace_scsi_test_unit_ready(d->id, lun, tag); + break; + case REPORT_LUNS: + trace_scsi_report_luns(d->id, lun, tag); + break; + case REQUEST_SENSE: + trace_scsi_request_sense(d->id, lun, tag); + break; + default: + break; + } + + return req; +} + +uint8_t *scsi_req_get_buf(SCSIRequest *req) +{ + return req->ops->get_buf(req); +} + +static void scsi_clear_unit_attention(SCSIRequest *req) +{ + SCSISense *ua; + if (req->dev->unit_attention.key != UNIT_ATTENTION && + req->bus->unit_attention.key != UNIT_ATTENTION) { + return; + } + + /* + * If an INQUIRY command enters the enabled command state, + * the device server shall [not] clear any unit attention condition; + * See also MMC-6, paragraphs 6.5 and 6.6.2. + */ + if (req->cmd.buf[0] == INQUIRY || + req->cmd.buf[0] == GET_CONFIGURATION || + req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) { + return; + } + + if (req->dev->unit_attention.key == UNIT_ATTENTION) { + ua = &req->dev->unit_attention; + } else { + ua = &req->bus->unit_attention; + } + + /* + * If a REPORT LUNS command enters the enabled command state, [...] + * the device server shall clear any pending unit attention condition + * with an additional sense code of REPORTED LUNS DATA HAS CHANGED. + */ + if (req->cmd.buf[0] == REPORT_LUNS && + !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc && + ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) { + return; + } + + *ua = SENSE_CODE(NO_SENSE); +} + +int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len) +{ + int ret; + + assert(len >= 14); + if (!req->sense_len) { + return 0; + } + + ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true); + + /* + * FIXME: clearing unit attention conditions upon autosense should be done + * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b + * (SAM-5, 5.14). + * + * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and + * 10b for HBAs that do not support it (do not call scsi_req_get_sense). + * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b. + */ + if (req->dev->sense_is_ua) { + scsi_device_unit_attention_reported(req->dev); + req->dev->sense_len = 0; + req->dev->sense_is_ua = false; + } + return ret; +} + +int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed) +{ + return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed); +} + +void scsi_req_build_sense(SCSIRequest *req, SCSISense sense) +{ + trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag, + sense.key, sense.asc, sense.ascq); + req->sense_len = scsi_build_sense(req->sense, sense); +} + +static void scsi_req_enqueue_internal(SCSIRequest *req) +{ + assert(!req->enqueued); + scsi_req_ref(req); + if (req->bus->info->get_sg_list) { + req->sg = req->bus->info->get_sg_list(req); + } else { + req->sg = NULL; + } + req->enqueued = true; + QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); +} + +int32_t scsi_req_enqueue(SCSIRequest *req) +{ + int32_t rc; + + assert(!req->retry); + scsi_req_enqueue_internal(req); + scsi_req_ref(req); + rc = req->ops->send_command(req, req->cmd.buf); + scsi_req_unref(req); + return rc; +} + +static void scsi_req_dequeue(SCSIRequest *req) +{ + trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); + req->retry = false; + if (req->enqueued) { + QTAILQ_REMOVE(&req->dev->requests, req, next); + req->enqueued = false; + scsi_req_unref(req); + } +} + +static int scsi_get_performance_length(int num_desc, int type, int data_type) +{ + /* MMC-6, paragraph 6.7. */ + switch (type) { + case 0: + if ((data_type & 3) == 0) { + /* Each descriptor is as in Table 295 - Nominal performance. */ + return 16 * num_desc + 8; + } else { + /* Each descriptor is as in Table 296 - Exceptions. */ + return 6 * num_desc + 8; + } + case 1: + case 4: + case 5: + return 8 * num_desc + 8; + case 2: + return 2048 * num_desc + 8; + case 3: + return 16 * num_desc + 8; + default: + return 8; + } +} + +static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf) +{ + int byte_block = (buf[2] >> 2) & 0x1; + int type = (buf[2] >> 4) & 0x1; + int xfer_unit; + + if (byte_block) { + if (type) { + xfer_unit = dev->blocksize; + } else { + xfer_unit = 512; + } + } else { + xfer_unit = 1; + } + + return xfer_unit; +} + +static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf) +{ + int length = buf[2] & 0x3; + int xfer; + int unit = ata_passthrough_xfer_unit(dev, buf); + + switch (length) { + case 0: + case 3: /* USB-specific. */ + default: + xfer = 0; + break; + case 1: + xfer = buf[3]; + break; + case 2: + xfer = buf[4]; + break; + } + + return xfer * unit; +} + +static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf) +{ + int extend = buf[1] & 0x1; + int length = buf[2] & 0x3; + int xfer; + int unit = ata_passthrough_xfer_unit(dev, buf); + + switch (length) { + case 0: + case 3: /* USB-specific. */ + default: + xfer = 0; + break; + case 1: + xfer = buf[4]; + xfer |= (extend ? buf[3] << 8 : 0); + break; + case 2: + xfer = buf[6]; + xfer |= (extend ? buf[5] << 8 : 0); + break; + } + + return xfer * unit; +} + +static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) +{ + cmd->xfer = scsi_cdb_xfer(buf); + switch (buf[0]) { + case TEST_UNIT_READY: + case REWIND: + case START_STOP: + case SET_CAPACITY: + case WRITE_FILEMARKS: + case WRITE_FILEMARKS_16: + case SPACE: + case RESERVE: + case RELEASE: + case ERASE: + case ALLOW_MEDIUM_REMOVAL: + case SEEK_10: + case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: + case LOCATE_16: + case LOCK_UNLOCK_CACHE: + case SET_CD_SPEED: + case SET_LIMITS: + case WRITE_LONG_10: + case UPDATE_BLOCK: + case RESERVE_TRACK: + case SET_READ_AHEAD: + case PRE_FETCH: + case PRE_FETCH_16: + case ALLOW_OVERWRITE: + cmd->xfer = 0; + break; + case VERIFY_10: + case VERIFY_12: + case VERIFY_16: + if ((buf[1] & 2) == 0) { + cmd->xfer = 0; + } else if ((buf[1] & 4) != 0) { + cmd->xfer = 1; + } + cmd->xfer *= dev->blocksize; + break; + case MODE_SENSE: + break; + case WRITE_SAME_10: + case WRITE_SAME_16: + cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize; + break; + case READ_CAPACITY_10: + cmd->xfer = 8; + break; + case READ_BLOCK_LIMITS: + cmd->xfer = 6; + break; + case SEND_VOLUME_TAG: + /* GPCMD_SET_STREAMING from multimedia commands. */ + if (dev->type == TYPE_ROM) { + cmd->xfer = buf[10] | (buf[9] << 8); + } else { + cmd->xfer = buf[9] | (buf[8] << 8); + } + break; + case WRITE_6: + /* length 0 means 256 blocks */ + if (cmd->xfer == 0) { + cmd->xfer = 256; + } + /* fall through */ + case WRITE_10: + case WRITE_VERIFY_10: + case WRITE_12: + case WRITE_VERIFY_12: + case WRITE_16: + case WRITE_VERIFY_16: + cmd->xfer *= dev->blocksize; + break; + case READ_6: + case READ_REVERSE: + /* length 0 means 256 blocks */ + if (cmd->xfer == 0) { + cmd->xfer = 256; + } + /* fall through */ + case READ_10: + case READ_12: + case READ_16: + cmd->xfer *= dev->blocksize; + break; + case FORMAT_UNIT: + /* MMC mandates the parameter list to be 12-bytes long. Parameters + * for block devices are restricted to the header right now. */ + if (dev->type == TYPE_ROM && (buf[1] & 16)) { + cmd->xfer = 12; + } else { + cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4); + } + break; + case INQUIRY: + case RECEIVE_DIAGNOSTIC: + case SEND_DIAGNOSTIC: + cmd->xfer = buf[4] | (buf[3] << 8); + break; + case READ_CD: + case READ_BUFFER: + case WRITE_BUFFER: + case SEND_CUE_SHEET: + cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); + break; + case PERSISTENT_RESERVE_OUT: + cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL; + break; + case ERASE_12: + if (dev->type == TYPE_ROM) { + /* MMC command GET PERFORMANCE. */ + cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8), + buf[10], buf[1] & 0x1f); + } + break; + case MECHANISM_STATUS: + case READ_DVD_STRUCTURE: + case SEND_DVD_STRUCTURE: + case MAINTENANCE_OUT: + case MAINTENANCE_IN: + if (dev->type == TYPE_ROM) { + /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */ + cmd->xfer = buf[9] | (buf[8] << 8); + } + break; + case ATA_PASSTHROUGH_12: + if (dev->type == TYPE_ROM) { + /* BLANK command of MMC */ + cmd->xfer = 0; + } else { + cmd->xfer = ata_passthrough_12_xfer(dev, buf); + } + break; + case ATA_PASSTHROUGH_16: + cmd->xfer = ata_passthrough_16_xfer(dev, buf); + break; + } + return 0; +} + +static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) +{ + switch (buf[0]) { + /* stream commands */ + case ERASE_12: + case ERASE_16: + cmd->xfer = 0; + break; + case READ_6: + case READ_REVERSE: + case RECOVER_BUFFERED_DATA: + case WRITE_6: + cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16); + if (buf[1] & 0x01) { /* fixed */ + cmd->xfer *= dev->blocksize; + } + break; + case READ_16: + case READ_REVERSE_16: + case VERIFY_16: + case WRITE_16: + cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16); + if (buf[1] & 0x01) { /* fixed */ + cmd->xfer *= dev->blocksize; + } + break; + case REWIND: + case LOAD_UNLOAD: + cmd->xfer = 0; + break; + case SPACE_16: + cmd->xfer = buf[13] | (buf[12] << 8); + break; + case READ_POSITION: + switch (buf[1] & 0x1f) /* operation code */ { + case SHORT_FORM_BLOCK_ID: + case SHORT_FORM_VENDOR_SPECIFIC: + cmd->xfer = 20; + break; + case LONG_FORM: + cmd->xfer = 32; + break; + case EXTENDED_FORM: + cmd->xfer = buf[8] | (buf[7] << 8); + break; + default: + return -1; + } + + break; + case FORMAT_UNIT: + cmd->xfer = buf[4] | (buf[3] << 8); + break; + /* generic commands */ + default: + return scsi_req_xfer(cmd, dev, buf); + } + return 0; +} + +static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) +{ + switch (buf[0]) { + /* medium changer commands */ + case EXCHANGE_MEDIUM: + case INITIALIZE_ELEMENT_STATUS: + case INITIALIZE_ELEMENT_STATUS_WITH_RANGE: + case MOVE_MEDIUM: + case POSITION_TO_ELEMENT: + cmd->xfer = 0; + break; + case READ_ELEMENT_STATUS: + cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16); + break; + + /* generic commands */ + default: + return scsi_req_xfer(cmd, dev, buf); + } + return 0; +} + +static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) +{ + switch (buf[0]) { + /* Scanner commands */ + case OBJECT_POSITION: + cmd->xfer = 0; + break; + case SCAN: + cmd->xfer = buf[4]; + break; + case READ_10: + case SEND: + case GET_WINDOW: + case SET_WINDOW: + cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); + break; + default: + /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */ + return scsi_req_xfer(cmd, dev, buf); + } + + return 0; +} + +static void scsi_cmd_xfer_mode(SCSICommand *cmd) +{ + if (!cmd->xfer) { + cmd->mode = SCSI_XFER_NONE; + return; + } + switch (cmd->buf[0]) { + case WRITE_6: + case WRITE_10: + case WRITE_VERIFY_10: + case WRITE_12: + case WRITE_VERIFY_12: + case WRITE_16: + case WRITE_VERIFY_16: + case VERIFY_10: + case VERIFY_12: + case VERIFY_16: + case COPY: + case COPY_VERIFY: + case COMPARE: + case CHANGE_DEFINITION: + case LOG_SELECT: + case MODE_SELECT: + case MODE_SELECT_10: + case SEND_DIAGNOSTIC: + case WRITE_BUFFER: + case FORMAT_UNIT: + case REASSIGN_BLOCKS: + case SEARCH_EQUAL: + case SEARCH_HIGH: + case SEARCH_LOW: + case UPDATE_BLOCK: + case WRITE_LONG_10: + case WRITE_SAME_10: + case WRITE_SAME_16: + case UNMAP: + case SEARCH_HIGH_12: + case SEARCH_EQUAL_12: + case SEARCH_LOW_12: + case MEDIUM_SCAN: + case SEND_VOLUME_TAG: + case SEND_CUE_SHEET: + case SEND_DVD_STRUCTURE: + case PERSISTENT_RESERVE_OUT: + case MAINTENANCE_OUT: + case SET_WINDOW: + case SCAN: + /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for + * non-scanner devices, so we only get here for SCAN and not for START_STOP. + */ + cmd->mode = SCSI_XFER_TO_DEV; + break; + case ATA_PASSTHROUGH_12: + case ATA_PASSTHROUGH_16: + /* T_DIR */ + cmd->mode = (cmd->buf[2] & 0x8) ? + SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV; + break; + default: + cmd->mode = SCSI_XFER_FROM_DEV; + break; + } +} + +int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf) +{ + int rc; + int len; + + cmd->lba = -1; + len = scsi_cdb_length(buf); + if (len < 0) { + return -1; + } + + cmd->len = len; + switch (dev->type) { + case TYPE_TAPE: + rc = scsi_req_stream_xfer(cmd, dev, buf); + break; + case TYPE_MEDIUM_CHANGER: + rc = scsi_req_medium_changer_xfer(cmd, dev, buf); + break; + case TYPE_SCANNER: + rc = scsi_req_scanner_length(cmd, dev, buf); + break; + default: + rc = scsi_req_xfer(cmd, dev, buf); + break; + } + + if (rc != 0) + return rc; + + memcpy(cmd->buf, buf, cmd->len); + scsi_cmd_xfer_mode(cmd); + cmd->lba = scsi_cmd_lba(cmd); + return 0; +} + +void scsi_device_report_change(SCSIDevice *dev, SCSISense sense) +{ + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); + + scsi_device_set_ua(dev, sense); + if (bus->info->change) { + bus->info->change(bus, dev, sense); + } +} + +SCSIRequest *scsi_req_ref(SCSIRequest *req) +{ + assert(req->refcount > 0); + req->refcount++; + return req; +} + +void scsi_req_unref(SCSIRequest *req) +{ + assert(req->refcount > 0); + if (--req->refcount == 0) { + BusState *qbus = req->dev->qdev.parent_bus; + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus); + + if (bus->info->free_request && req->hba_private) { + bus->info->free_request(bus, req->hba_private); + } + if (req->ops->free_req) { + req->ops->free_req(req); + } + object_unref(OBJECT(req->dev)); + object_unref(OBJECT(qbus->parent)); + g_free(req); + } +} + +/* Tell the device that we finished processing this chunk of I/O. It + will start the next chunk or complete the command. */ +void scsi_req_continue(SCSIRequest *req) +{ + if (req->io_canceled) { + trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag); + return; + } + trace_scsi_req_continue(req->dev->id, req->lun, req->tag); + if (req->cmd.mode == SCSI_XFER_TO_DEV) { + req->ops->write_data(req); + } else { + req->ops->read_data(req); + } +} + +/* Called by the devices when data is ready for the HBA. The HBA should + start a DMA operation to read or fill the device's data buffer. + Once it completes, calling scsi_req_continue will restart I/O. */ +void scsi_req_data(SCSIRequest *req, int len) +{ + uint8_t *buf; + if (req->io_canceled) { + trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len); + return; + } + trace_scsi_req_data(req->dev->id, req->lun, req->tag, len); + assert(req->cmd.mode != SCSI_XFER_NONE); + if (!req->sg) { + req->resid -= len; + req->bus->info->transfer_data(req, len); + return; + } + + /* If the device calls scsi_req_data and the HBA specified a + * scatter/gather list, the transfer has to happen in a single + * step. */ + assert(!req->dma_started); + req->dma_started = true; + + buf = scsi_req_get_buf(req); + if (req->cmd.mode == SCSI_XFER_FROM_DEV) { + req->resid = dma_buf_read(buf, len, req->sg); + } else { + req->resid = dma_buf_write(buf, len, req->sg); + } + scsi_req_continue(req); +} + +void scsi_req_print(SCSIRequest *req) +{ + FILE *fp = stderr; + int i; + + fprintf(fp, "[%s id=%d] %s", + req->dev->qdev.parent_bus->name, + req->dev->id, + scsi_command_name(req->cmd.buf[0])); + for (i = 1; i < req->cmd.len; i++) { + fprintf(fp, " 0x%02x", req->cmd.buf[i]); + } + switch (req->cmd.mode) { + case SCSI_XFER_NONE: + fprintf(fp, " - none\n"); + break; + case SCSI_XFER_FROM_DEV: + fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer); + break; + case SCSI_XFER_TO_DEV: + fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer); + break; + default: + fprintf(fp, " - Oops\n"); + break; + } +} + +void scsi_req_complete_failed(SCSIRequest *req, int host_status) +{ + SCSISense sense; + int status; + + assert(req->status == -1 && req->host_status == -1); + assert(req->ops != &reqops_unit_attention); + + if (!req->bus->info->fail) { + status = scsi_sense_from_host_status(req->host_status, &sense); + if (status == CHECK_CONDITION) { + scsi_req_build_sense(req, sense); + } + scsi_req_complete(req, status); + return; + } + + req->host_status = host_status; + scsi_req_ref(req); + scsi_req_dequeue(req); + req->bus->info->fail(req); + + /* Cancelled requests might end up being completed instead of cancelled */ + notifier_list_notify(&req->cancel_notifiers, req); + scsi_req_unref(req); +} + +void scsi_req_complete(SCSIRequest *req, int status) +{ + assert(req->status == -1 && req->host_status == -1); + req->status = status; + req->host_status = SCSI_HOST_OK; + + assert(req->sense_len <= sizeof(req->sense)); + if (status == GOOD) { + req->sense_len = 0; + } + + if (req->sense_len) { + memcpy(req->dev->sense, req->sense, req->sense_len); + req->dev->sense_len = req->sense_len; + req->dev->sense_is_ua = (req->ops == &reqops_unit_attention); + } else { + req->dev->sense_len = 0; + req->dev->sense_is_ua = false; + } + + /* + * Unit attention state is now stored in the device's sense buffer + * if the HBA didn't do autosense. Clear the pending unit attention + * flags. + */ + scsi_clear_unit_attention(req); + + scsi_req_ref(req); + scsi_req_dequeue(req); + req->bus->info->complete(req, req->resid); + + /* Cancelled requests might end up being completed instead of cancelled */ + notifier_list_notify(&req->cancel_notifiers, req); + scsi_req_unref(req); +} + +/* Called by the devices when the request is canceled. */ +void scsi_req_cancel_complete(SCSIRequest *req) +{ + assert(req->io_canceled); + if (req->bus->info->cancel) { + req->bus->info->cancel(req); + } + notifier_list_notify(&req->cancel_notifiers, req); + scsi_req_unref(req); +} + +/* Cancel @req asynchronously. @notifier is added to @req's cancellation + * notifier list, the bus will be notified the requests cancellation is + * completed. + * */ +void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier) +{ + trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); + if (notifier) { + notifier_list_add(&req->cancel_notifiers, notifier); + } + if (req->io_canceled) { + /* A blk_aio_cancel_async is pending; when it finishes, + * scsi_req_cancel_complete will be called and will + * call the notifier we just added. Just wait for that. + */ + assert(req->aiocb); + return; + } + /* Dropped in scsi_req_cancel_complete. */ + scsi_req_ref(req); + scsi_req_dequeue(req); + req->io_canceled = true; + if (req->aiocb) { + blk_aio_cancel_async(req->aiocb); + } else { + scsi_req_cancel_complete(req); + } +} + +void scsi_req_cancel(SCSIRequest *req) +{ + trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); + if (!req->enqueued) { + return; + } + assert(!req->io_canceled); + /* Dropped in scsi_req_cancel_complete. */ + scsi_req_ref(req); + scsi_req_dequeue(req); + req->io_canceled = true; + if (req->aiocb) { + blk_aio_cancel(req->aiocb); + } else { + scsi_req_cancel_complete(req); + } +} + +static int scsi_ua_precedence(SCSISense sense) +{ + if (sense.key != UNIT_ATTENTION) { + return INT_MAX; + } + if (sense.asc == 0x29 && sense.ascq == 0x04) { + /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */ + return 1; + } else if (sense.asc == 0x3F && sense.ascq == 0x01) { + /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */ + return 2; + } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) { + /* These two go with "all others". */ + ; + } else if (sense.asc == 0x29 && sense.ascq <= 0x07) { + /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0 + * POWER ON OCCURRED = 1 + * SCSI BUS RESET OCCURRED = 2 + * BUS DEVICE RESET FUNCTION OCCURRED = 3 + * I_T NEXUS LOSS OCCURRED = 7 + */ + return sense.ascq; + } else if (sense.asc == 0x2F && sense.ascq == 0x01) { + /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */ + return 8; + } + return (sense.asc << 8) | sense.ascq; +} + +void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) +{ + int prec1, prec2; + if (sense.key != UNIT_ATTENTION) { + return; + } + trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key, + sense.asc, sense.ascq); + + /* + * Override a pre-existing unit attention condition, except for a more + * important reset condition. + */ + prec1 = scsi_ua_precedence(sdev->unit_attention); + prec2 = scsi_ua_precedence(sense); + if (prec2 < prec1) { + sdev->unit_attention = sense; + } +} + +void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) +{ + SCSIRequest *req; + + aio_context_acquire(blk_get_aio_context(sdev->conf.blk)); + while (!QTAILQ_EMPTY(&sdev->requests)) { + req = QTAILQ_FIRST(&sdev->requests); + scsi_req_cancel_async(req, NULL); + } + blk_drain(sdev->conf.blk); + aio_context_release(blk_get_aio_context(sdev->conf.blk)); + scsi_device_set_ua(sdev, sense); +} + +static char *scsibus_get_dev_path(DeviceState *dev) +{ + SCSIDevice *d = SCSI_DEVICE(dev); + DeviceState *hba = dev->parent_bus->parent; + char *id; + char *path; + + id = qdev_get_dev_path(hba); + if (id) { + path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun); + } else { + path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun); + } + g_free(id); + return path; +} + +static char *scsibus_get_fw_dev_path(DeviceState *dev) +{ + SCSIDevice *d = SCSI_DEVICE(dev); + return g_strdup_printf("channel@%x/%s@%x,%x", d->channel, + qdev_fw_name(dev), d->id, d->lun); +} + +/* SCSI request list. For simplicity, pv points to the whole device */ + +static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + SCSIDevice *s = pv; + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); + SCSIRequest *req; + + QTAILQ_FOREACH(req, &s->requests, next) { + assert(!req->io_canceled); + assert(req->status == -1 && req->host_status == -1); + assert(req->enqueued); + + qemu_put_sbyte(f, req->retry ? 1 : 2); + qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); + qemu_put_be32s(f, &req->tag); + qemu_put_be32s(f, &req->lun); + if (bus->info->save_request) { + bus->info->save_request(f, req); + } + if (req->ops->save_request) { + req->ops->save_request(f, req); + } + } + qemu_put_sbyte(f, 0); + + return 0; +} + +static int get_scsi_requests(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + SCSIDevice *s = pv; + SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); + int8_t sbyte; + + while ((sbyte = qemu_get_sbyte(f)) > 0) { + uint8_t buf[SCSI_CMD_BUF_SIZE]; + uint32_t tag; + uint32_t lun; + SCSIRequest *req; + + qemu_get_buffer(f, buf, sizeof(buf)); + qemu_get_be32s(f, &tag); + qemu_get_be32s(f, &lun); + req = scsi_req_new(s, tag, lun, buf, NULL); + req->retry = (sbyte == 1); + if (bus->info->load_request) { + req->hba_private = bus->info->load_request(f, req); + } + if (req->ops->load_request) { + req->ops->load_request(f, req); + } + + /* Just restart it later. */ + scsi_req_enqueue_internal(req); + + /* At this point, the request will be kept alive by the reference + * added by scsi_req_enqueue_internal, so we can release our reference. + * The HBA of course will add its own reference in the load_request + * callback if it needs to hold on the SCSIRequest. + */ + scsi_req_unref(req); + } + + return 0; +} + +static const VMStateInfo vmstate_info_scsi_requests = { + .name = "scsi-requests", + .get = get_scsi_requests, + .put = put_scsi_requests, +}; + +static bool scsi_sense_state_needed(void *opaque) +{ + SCSIDevice *s = opaque; + + return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD; +} + +static const VMStateDescription vmstate_scsi_sense_state = { + .name = "SCSIDevice/sense", + .version_id = 1, + .minimum_version_id = 1, + .needed = scsi_sense_state_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, + SCSI_SENSE_BUF_SIZE_OLD, + SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_scsi_device = { + .name = "SCSIDevice", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(unit_attention.key, SCSIDevice), + VMSTATE_UINT8(unit_attention.asc, SCSIDevice), + VMSTATE_UINT8(unit_attention.ascq, SCSIDevice), + VMSTATE_BOOL(sense_is_ua, SCSIDevice), + VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD), + VMSTATE_UINT32(sense_len, SCSIDevice), + { + .name = "requests", + .version_id = 0, + .field_exists = NULL, + .size = 0, /* ouch */ + .info = &vmstate_info_scsi_requests, + .flags = VMS_SINGLE, + .offset = 0, + }, + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_scsi_sense_state, + NULL + } +}; + +static Property scsi_props[] = { + DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), + DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), + DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void scsi_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + set_bit(DEVICE_CATEGORY_STORAGE, k->categories); + k->bus_type = TYPE_SCSI_BUS; + k->realize = scsi_qdev_realize; + k->unrealize = scsi_qdev_unrealize; + device_class_set_props(k, scsi_props); +} + +static void scsi_dev_instance_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + SCSIDevice *s = SCSI_DEVICE(dev); + + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", NULL, + &s->qdev); +} + +static const TypeInfo scsi_device_type_info = { + .name = TYPE_SCSI_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SCSIDevice), + .abstract = true, + .class_size = sizeof(SCSIDeviceClass), + .class_init = scsi_device_class_init, + .instance_init = scsi_dev_instance_init, +}; + +static void scsi_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + + k->get_dev_path = scsibus_get_dev_path; + k->get_fw_dev_path = scsibus_get_fw_dev_path; + k->check_address = scsi_bus_check_address; + hc->unplug = qdev_simple_device_unplug_cb; +} + +static const TypeInfo scsi_bus_info = { + .name = TYPE_SCSI_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(SCSIBus), + .class_init = scsi_bus_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static void scsi_register_types(void) +{ + type_register_static(&scsi_bus_info); + type_register_static(&scsi_device_type_info); +} + +type_init(scsi_register_types) diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c new file mode 100644 index 000000000..d4914178e --- /dev/null +++ b/hw/scsi/scsi-disk.c @@ -0,0 +1,3160 @@ +/* + * SCSI Device emulation + * + * Copyright (c) 2006 CodeSourcery. + * Based on code by Fabrice Bellard + * + * Written by Paul Brook + * Modifications: + * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case + * when the allocation length of CDB is smaller + * than 36. + * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the + * MODE SENSE response. + * + * This code is licensed under the LGPL. + * + * Note that this file only handles the SCSI architecture model and device + * commands. Emulation of interface/link layer protocols is handled by + * the host adapter emulator. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "hw/scsi/scsi.h" +#include "migration/qemu-file-types.h" +#include "migration/vmstate.h" +#include "hw/scsi/emulation.h" +#include "scsi/constants.h" +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" +#include "hw/block/block.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "sysemu/dma.h" +#include "sysemu/sysemu.h" +#include "qemu/cutils.h" +#include "trace.h" +#include "qom/object.h" + +#ifdef __linux +#include +#endif + +#define SCSI_WRITE_SAME_MAX (512 * KiB) +#define SCSI_DMA_BUF_SIZE (128 * KiB) +#define SCSI_MAX_INQUIRY_LEN 256 +#define SCSI_MAX_MODE_LEN 256 + +#define DEFAULT_DISCARD_GRANULARITY (4 * KiB) +#define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) +#define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ + +#define TYPE_SCSI_DISK_BASE "scsi-disk-base" + +OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE) + +struct SCSIDiskClass { + SCSIDeviceClass parent_class; + DMAIOFunc *dma_readv; + DMAIOFunc *dma_writev; + bool (*need_fua_emulation)(SCSICommand *cmd); + void (*update_sense)(SCSIRequest *r); +}; + +typedef struct SCSIDiskReq { + SCSIRequest req; + /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */ + uint64_t sector; + uint32_t sector_count; + uint32_t buflen; + bool started; + bool need_fua_emulation; + struct iovec iov; + QEMUIOVector qiov; + BlockAcctCookie acct; +} SCSIDiskReq; + +#define SCSI_DISK_F_REMOVABLE 0 +#define SCSI_DISK_F_DPOFUA 1 +#define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 + +struct SCSIDiskState { + SCSIDevice qdev; + uint32_t features; + bool media_changed; + bool media_event; + bool eject_request; + uint16_t port_index; + uint64_t max_unmap_size; + uint64_t max_io_size; + QEMUBH *bh; + char *version; + char *serial; + char *vendor; + char *product; + char *device_id; + bool tray_open; + bool tray_locked; + /* + * 0x0000 - rotation rate not reported + * 0x0001 - non-rotating medium (SSD) + * 0x0002-0x0400 - reserved + * 0x0401-0xffe - rotations per minute + * 0xffff - reserved + */ + uint16_t rotation_rate; +}; + +static void scsi_free_request(SCSIRequest *req) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + + qemu_vfree(r->iov.iov_base); +} + +/* Helper function for command completion with sense. */ +static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) +{ + trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, + sense.ascq); + scsi_req_build_sense(&r->req, sense); + scsi_req_complete(&r->req, CHECK_CONDITION); +} + +static void scsi_init_iovec(SCSIDiskReq *r, size_t size) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + if (!r->iov.iov_base) { + r->buflen = size; + r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); + } + r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen); + qemu_iovec_init_external(&r->qiov, &r->iov, 1); +} + +static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + + qemu_put_be64s(f, &r->sector); + qemu_put_be32s(f, &r->sector_count); + qemu_put_be32s(f, &r->buflen); + if (r->buflen) { + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { + qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); + } else if (!req->retry) { + uint32_t len = r->iov.iov_len; + qemu_put_be32s(f, &len); + qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); + } + } +} + +static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + + qemu_get_be64s(f, &r->sector); + qemu_get_be32s(f, &r->sector_count); + qemu_get_be32s(f, &r->buflen); + if (r->buflen) { + scsi_init_iovec(r, r->buflen); + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { + qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); + } else if (!r->req.retry) { + uint32_t len; + qemu_get_be32s(f, &len); + r->iov.iov_len = len; + assert(r->iov.iov_len <= r->buflen); + qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); + } + } + + qemu_iovec_init_external(&r->qiov, &r->iov, 1); +} + +/* + * scsi_handle_rw_error has two return values. False means that the error + * must be ignored, true means that the error has been processed and the + * caller should not do anything else for this request. Note that + * scsi_handle_rw_error always manages its reference counts, independent + * of the return value. + */ +static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed) +{ + bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); + SCSISense sense = SENSE_CODE(NO_SENSE); + int error = 0; + bool req_has_sense = false; + BlockErrorAction action; + int status; + + if (ret < 0) { + status = scsi_sense_from_errno(-ret, &sense); + error = -ret; + } else { + /* A passthrough command has completed with nonzero status. */ + status = ret; + if (status == CHECK_CONDITION) { + req_has_sense = true; + error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); + } else { + error = EINVAL; + } + } + + /* + * Check whether the error has to be handled by the guest or should + * rather follow the rerror=/werror= settings. Guest-handled errors + * are usually retried immediately, so do not post them to QMP and + * do not account them as failed I/O. + */ + if (req_has_sense && + scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { + action = BLOCK_ERROR_ACTION_REPORT; + acct_failed = false; + } else { + action = blk_get_error_action(s->qdev.conf.blk, is_read, error); + blk_error_action(s->qdev.conf.blk, action, is_read, error); + } + + switch (action) { + case BLOCK_ERROR_ACTION_REPORT: + if (acct_failed) { + block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); + } + if (req_has_sense) { + sdc->update_sense(&r->req); + } else if (status == CHECK_CONDITION) { + scsi_req_build_sense(&r->req, sense); + } + scsi_req_complete(&r->req, status); + return true; + + case BLOCK_ERROR_ACTION_IGNORE: + return false; + + case BLOCK_ERROR_ACTION_STOP: + scsi_req_retry(&r->req); + return true; + + default: + g_assert_not_reached(); + } +} + +static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) +{ + if (r->req.io_canceled) { + scsi_req_cancel_complete(&r->req); + return true; + } + + if (ret < 0) { + return scsi_handle_rw_error(r, ret, acct_failed); + } + + return false; +} + +static void scsi_aio_complete(void *opaque, int ret) +{ + SCSIDiskReq *r = (SCSIDiskReq *)opaque; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + assert(r->req.aiocb != NULL); + r->req.aiocb = NULL; + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); + if (scsi_disk_req_check_error(r, ret, true)) { + goto done; + } + + block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); + scsi_req_complete(&r->req, GOOD); + +done: + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); + scsi_req_unref(&r->req); +} + +static bool scsi_is_cmd_fua(SCSICommand *cmd) +{ + switch (cmd->buf[0]) { + case READ_10: + case READ_12: + case READ_16: + case WRITE_10: + case WRITE_12: + case WRITE_16: + return (cmd->buf[1] & 8) != 0; + + case VERIFY_10: + case VERIFY_12: + case VERIFY_16: + case WRITE_VERIFY_10: + case WRITE_VERIFY_12: + case WRITE_VERIFY_16: + return true; + + case READ_6: + case WRITE_6: + default: + return false; + } +} + +static void scsi_write_do_fua(SCSIDiskReq *r) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + assert(r->req.aiocb == NULL); + assert(!r->req.io_canceled); + + if (r->need_fua_emulation) { + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, + BLOCK_ACCT_FLUSH); + r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); + return; + } + + scsi_req_complete(&r->req, GOOD); + scsi_req_unref(&r->req); +} + +static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) +{ + assert(r->req.aiocb == NULL); + if (scsi_disk_req_check_error(r, ret, false)) { + goto done; + } + + r->sector += r->sector_count; + r->sector_count = 0; + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { + scsi_write_do_fua(r); + return; + } else { + scsi_req_complete(&r->req, GOOD); + } + +done: + scsi_req_unref(&r->req); +} + +static void scsi_dma_complete(void *opaque, int ret) +{ + SCSIDiskReq *r = (SCSIDiskReq *)opaque; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + assert(r->req.aiocb != NULL); + r->req.aiocb = NULL; + + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); + if (ret < 0) { + block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); + } else { + block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); + } + scsi_dma_complete_noio(r, ret); + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); +} + +static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) +{ + uint32_t n; + + assert(r->req.aiocb == NULL); + if (scsi_disk_req_check_error(r, ret, false)) { + goto done; + } + + n = r->qiov.size / BDRV_SECTOR_SIZE; + r->sector += n; + r->sector_count -= n; + scsi_req_data(&r->req, r->qiov.size); + +done: + scsi_req_unref(&r->req); +} + +static void scsi_read_complete(void *opaque, int ret) +{ + SCSIDiskReq *r = (SCSIDiskReq *)opaque; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + assert(r->req.aiocb != NULL); + r->req.aiocb = NULL; + + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); + if (ret < 0) { + block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); + } else { + block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); + trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); + } + scsi_read_complete_noio(r, ret); + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); +} + +/* Actually issue a read to the block device. */ +static void scsi_do_read(SCSIDiskReq *r, int ret) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); + + assert (r->req.aiocb == NULL); + if (scsi_disk_req_check_error(r, ret, false)) { + goto done; + } + + /* The request is used as the AIO opaque value, so add a ref. */ + scsi_req_ref(&r->req); + + if (r->req.sg) { + dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); + r->req.resid -= r->req.sg->size; + r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), + r->req.sg, r->sector << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, + sdc->dma_readv, r, scsi_dma_complete, r, + DMA_DIRECTION_FROM_DEVICE); + } else { + scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, + r->qiov.size, BLOCK_ACCT_READ); + r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, + scsi_read_complete, r, r); + } + +done: + scsi_req_unref(&r->req); +} + +static void scsi_do_read_cb(void *opaque, int ret) +{ + SCSIDiskReq *r = (SCSIDiskReq *)opaque; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + assert (r->req.aiocb != NULL); + r->req.aiocb = NULL; + + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); + if (ret < 0) { + block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); + } else { + block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); + } + scsi_do_read(opaque, ret); + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); +} + +/* Read more data from scsi device into buffer. */ +static void scsi_read_data(SCSIRequest *req) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + bool first; + + trace_scsi_disk_read_data_count(r->sector_count); + if (r->sector_count == 0) { + /* This also clears the sense buffer for REQUEST SENSE. */ + scsi_req_complete(&r->req, GOOD); + return; + } + + /* No data transfer may already be in progress */ + assert(r->req.aiocb == NULL); + + /* The request is used as the AIO opaque value, so add a ref. */ + scsi_req_ref(&r->req); + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { + trace_scsi_disk_read_data_invalid(); + scsi_read_complete_noio(r, -EINVAL); + return; + } + + if (!blk_is_available(req->dev->conf.blk)) { + scsi_read_complete_noio(r, -ENOMEDIUM); + return; + } + + first = !r->started; + r->started = true; + if (first && r->need_fua_emulation) { + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, + BLOCK_ACCT_FLUSH); + r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); + } else { + scsi_do_read(r, 0); + } +} + +static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) +{ + uint32_t n; + + assert (r->req.aiocb == NULL); + if (scsi_disk_req_check_error(r, ret, false)) { + goto done; + } + + n = r->qiov.size / BDRV_SECTOR_SIZE; + r->sector += n; + r->sector_count -= n; + if (r->sector_count == 0) { + scsi_write_do_fua(r); + return; + } else { + scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); + trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); + scsi_req_data(&r->req, r->qiov.size); + } + +done: + scsi_req_unref(&r->req); +} + +static void scsi_write_complete(void * opaque, int ret) +{ + SCSIDiskReq *r = (SCSIDiskReq *)opaque; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + assert (r->req.aiocb != NULL); + r->req.aiocb = NULL; + + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); + if (ret < 0) { + block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); + } else { + block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); + } + scsi_write_complete_noio(r, ret); + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); +} + +static void scsi_write_data(SCSIRequest *req) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); + + /* No data transfer may already be in progress */ + assert(r->req.aiocb == NULL); + + /* The request is used as the AIO opaque value, so add a ref. */ + scsi_req_ref(&r->req); + if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { + trace_scsi_disk_write_data_invalid(); + scsi_write_complete_noio(r, -EINVAL); + return; + } + + if (!r->req.sg && !r->qiov.size) { + /* Called for the first time. Ask the driver to send us more data. */ + r->started = true; + scsi_write_complete_noio(r, 0); + return; + } + if (!blk_is_available(req->dev->conf.blk)) { + scsi_write_complete_noio(r, -ENOMEDIUM); + return; + } + + if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || + r->req.cmd.buf[0] == VERIFY_16) { + if (r->req.sg) { + scsi_dma_complete_noio(r, 0); + } else { + scsi_write_complete_noio(r, 0); + } + return; + } + + if (r->req.sg) { + dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); + r->req.resid -= r->req.sg->size; + r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), + r->req.sg, r->sector << BDRV_SECTOR_BITS, + BDRV_SECTOR_SIZE, + sdc->dma_writev, r, scsi_dma_complete, r, + DMA_DIRECTION_TO_DEVICE); + } else { + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, + r->qiov.size, BLOCK_ACCT_WRITE); + r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, + scsi_write_complete, r, r); + } +} + +/* Return a pointer to the data buffer. */ +static uint8_t *scsi_get_buf(SCSIRequest *req) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + + return (uint8_t *)r->iov.iov_base; +} + +static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); + uint8_t page_code = req->cmd.buf[2]; + int start, buflen = 0; + + outbuf[buflen++] = s->qdev.type & 0x1f; + outbuf[buflen++] = page_code; + outbuf[buflen++] = 0x00; + outbuf[buflen++] = 0x00; + start = buflen; + + switch (page_code) { + case 0x00: /* Supported page codes, mandatory */ + { + trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); + outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ + if (s->serial) { + outbuf[buflen++] = 0x80; /* unit serial number */ + } + outbuf[buflen++] = 0x83; /* device identification */ + if (s->qdev.type == TYPE_DISK) { + outbuf[buflen++] = 0xb0; /* block limits */ + outbuf[buflen++] = 0xb1; /* block device characteristics */ + outbuf[buflen++] = 0xb2; /* thin provisioning */ + } + break; + } + case 0x80: /* Device serial number, optional */ + { + int l; + + if (!s->serial) { + trace_scsi_disk_emulate_vpd_page_80_not_supported(); + return -1; + } + + l = strlen(s->serial); + if (l > 36) { + l = 36; + } + + trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); + memcpy(outbuf + buflen, s->serial, l); + buflen += l; + break; + } + + case 0x83: /* Device identification page, mandatory */ + { + int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; + + trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); + + if (id_len) { + outbuf[buflen++] = 0x2; /* ASCII */ + outbuf[buflen++] = 0; /* not officially assigned */ + outbuf[buflen++] = 0; /* reserved */ + outbuf[buflen++] = id_len; /* length of data following */ + memcpy(outbuf + buflen, s->device_id, id_len); + buflen += id_len; + } + + if (s->qdev.wwn) { + outbuf[buflen++] = 0x1; /* Binary */ + outbuf[buflen++] = 0x3; /* NAA */ + outbuf[buflen++] = 0; /* reserved */ + outbuf[buflen++] = 8; + stq_be_p(&outbuf[buflen], s->qdev.wwn); + buflen += 8; + } + + if (s->qdev.port_wwn) { + outbuf[buflen++] = 0x61; /* SAS / Binary */ + outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ + outbuf[buflen++] = 0; /* reserved */ + outbuf[buflen++] = 8; + stq_be_p(&outbuf[buflen], s->qdev.port_wwn); + buflen += 8; + } + + if (s->port_index) { + outbuf[buflen++] = 0x61; /* SAS / Binary */ + + /* PIV/Target port/relative target port */ + outbuf[buflen++] = 0x94; + + outbuf[buflen++] = 0; /* reserved */ + outbuf[buflen++] = 4; + stw_be_p(&outbuf[buflen + 2], s->port_index); + buflen += 4; + } + break; + } + case 0xb0: /* block limits */ + { + SCSIBlockLimits bl = {}; + + if (s->qdev.type == TYPE_ROM) { + trace_scsi_disk_emulate_vpd_page_b0_not_supported(); + return -1; + } + bl.wsnz = 1; + bl.unmap_sectors = + s->qdev.conf.discard_granularity / s->qdev.blocksize; + bl.min_io_size = + s->qdev.conf.min_io_size / s->qdev.blocksize; + bl.opt_io_size = + s->qdev.conf.opt_io_size / s->qdev.blocksize; + bl.max_unmap_sectors = + s->max_unmap_size / s->qdev.blocksize; + bl.max_io_sectors = + s->max_io_size / s->qdev.blocksize; + /* 255 descriptors fit in 4 KiB with an 8-byte header */ + bl.max_unmap_descr = 255; + + if (s->qdev.type == TYPE_DISK) { + int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); + int max_io_sectors_blk = + max_transfer_blk / s->qdev.blocksize; + + bl.max_io_sectors = + MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); + } + buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); + break; + } + case 0xb1: /* block device characteristics */ + { + buflen = 0x40; + outbuf[4] = (s->rotation_rate >> 8) & 0xff; + outbuf[5] = s->rotation_rate & 0xff; + outbuf[6] = 0; /* PRODUCT TYPE */ + outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ + outbuf[8] = 0; /* VBULS */ + break; + } + case 0xb2: /* thin provisioning */ + { + buflen = 8; + outbuf[4] = 0; + outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ + outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; + outbuf[7] = 0; + break; + } + default: + return -1; + } + /* done with EVPD */ + assert(buflen - start <= 255); + outbuf[start - 1] = buflen - start; + return buflen; +} + +static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); + int buflen = 0; + + if (req->cmd.buf[1] & 0x1) { + /* Vital product data */ + return scsi_disk_emulate_vpd_page(req, outbuf); + } + + /* Standard INQUIRY data */ + if (req->cmd.buf[2] != 0) { + return -1; + } + + /* PAGE CODE == 0 */ + buflen = req->cmd.xfer; + if (buflen > SCSI_MAX_INQUIRY_LEN) { + buflen = SCSI_MAX_INQUIRY_LEN; + } + + outbuf[0] = s->qdev.type & 0x1f; + outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; + + strpadcpy((char *) &outbuf[16], 16, s->product, ' '); + strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); + + memset(&outbuf[32], 0, 4); + memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); + /* + * We claim conformance to SPC-3, which is required for guests + * to ask for modern features like READ CAPACITY(16) or the + * block characteristics VPD page by default. Not all of SPC-3 + * is actually implemented, but we're good enough. + */ + outbuf[2] = s->qdev.default_scsi_version; + outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ + + if (buflen > 36) { + outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ + } else { + /* If the allocation length of CDB is too small, + the additional length is not adjusted */ + outbuf[4] = 36 - 5; + } + + /* Sync data transfer and TCQ. */ + outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); + return buflen; +} + +static inline bool media_is_dvd(SCSIDiskState *s) +{ + uint64_t nb_sectors; + if (s->qdev.type != TYPE_ROM) { + return false; + } + if (!blk_is_available(s->qdev.conf.blk)) { + return false; + } + blk_get_geometry(s->qdev.conf.blk, &nb_sectors); + return nb_sectors > CD_MAX_SECTORS; +} + +static inline bool media_is_cd(SCSIDiskState *s) +{ + uint64_t nb_sectors; + if (s->qdev.type != TYPE_ROM) { + return false; + } + if (!blk_is_available(s->qdev.conf.blk)) { + return false; + } + blk_get_geometry(s->qdev.conf.blk, &nb_sectors); + return nb_sectors <= CD_MAX_SECTORS; +} + +static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, + uint8_t *outbuf) +{ + uint8_t type = r->req.cmd.buf[1] & 7; + + if (s->qdev.type != TYPE_ROM) { + return -1; + } + + /* Types 1/2 are only defined for Blu-Ray. */ + if (type != 0) { + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); + return -1; + } + + memset(outbuf, 0, 34); + outbuf[1] = 32; + outbuf[2] = 0xe; /* last session complete, disc finalized */ + outbuf[3] = 1; /* first track on disc */ + outbuf[4] = 1; /* # of sessions */ + outbuf[5] = 1; /* first track of last session */ + outbuf[6] = 1; /* last track of last session */ + outbuf[7] = 0x20; /* unrestricted use */ + outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ + /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ + /* 12-23: not meaningful for CD-ROM or DVD-ROM */ + /* 24-31: disc bar code */ + /* 32: disc application code */ + /* 33: number of OPC tables */ + + return 34; +} + +static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, + uint8_t *outbuf) +{ + static const int rds_caps_size[5] = { + [0] = 2048 + 4, + [1] = 4 + 4, + [3] = 188 + 4, + [4] = 2048 + 4, + }; + + uint8_t media = r->req.cmd.buf[1]; + uint8_t layer = r->req.cmd.buf[6]; + uint8_t format = r->req.cmd.buf[7]; + int size = -1; + + if (s->qdev.type != TYPE_ROM) { + return -1; + } + if (media != 0) { + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); + return -1; + } + + if (format != 0xff) { + if (!blk_is_available(s->qdev.conf.blk)) { + scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); + return -1; + } + if (media_is_cd(s)) { + scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); + return -1; + } + if (format >= ARRAY_SIZE(rds_caps_size)) { + return -1; + } + size = rds_caps_size[format]; + memset(outbuf, 0, size); + } + + switch (format) { + case 0x00: { + /* Physical format information */ + uint64_t nb_sectors; + if (layer != 0) { + goto fail; + } + blk_get_geometry(s->qdev.conf.blk, &nb_sectors); + + outbuf[4] = 1; /* DVD-ROM, part version 1 */ + outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ + outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ + outbuf[7] = 0; /* default densities */ + + stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ + stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ + break; + } + + case 0x01: /* DVD copyright information, all zeros */ + break; + + case 0x03: /* BCA information - invalid field for no BCA info */ + return -1; + + case 0x04: /* DVD disc manufacturing information, all zeros */ + break; + + case 0xff: { /* List capabilities */ + int i; + size = 4; + for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { + if (!rds_caps_size[i]) { + continue; + } + outbuf[size] = i; + outbuf[size + 1] = 0x40; /* Not writable, readable */ + stw_be_p(&outbuf[size + 2], rds_caps_size[i]); + size += 4; + } + break; + } + + default: + return -1; + } + + /* Size of buffer, not including 2 byte size field */ + stw_be_p(outbuf, size - 2); + return size; + +fail: + return -1; +} + +static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) +{ + uint8_t event_code, media_status; + + media_status = 0; + if (s->tray_open) { + media_status = MS_TRAY_OPEN; + } else if (blk_is_inserted(s->qdev.conf.blk)) { + media_status = MS_MEDIA_PRESENT; + } + + /* Event notification descriptor */ + event_code = MEC_NO_CHANGE; + if (media_status != MS_TRAY_OPEN) { + if (s->media_event) { + event_code = MEC_NEW_MEDIA; + s->media_event = false; + } else if (s->eject_request) { + event_code = MEC_EJECT_REQUESTED; + s->eject_request = false; + } + } + + outbuf[0] = event_code; + outbuf[1] = media_status; + + /* These fields are reserved, just clear them. */ + outbuf[2] = 0; + outbuf[3] = 0; + return 4; +} + +static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, + uint8_t *outbuf) +{ + int size; + uint8_t *buf = r->req.cmd.buf; + uint8_t notification_class_request = buf[4]; + if (s->qdev.type != TYPE_ROM) { + return -1; + } + if ((buf[1] & 1) == 0) { + /* asynchronous */ + return -1; + } + + size = 4; + outbuf[0] = outbuf[1] = 0; + outbuf[3] = 1 << GESN_MEDIA; /* supported events */ + if (notification_class_request & (1 << GESN_MEDIA)) { + outbuf[2] = GESN_MEDIA; + size += scsi_event_status_media(s, &outbuf[size]); + } else { + outbuf[2] = 0x80; + } + stw_be_p(outbuf, size - 4); + return size; +} + +static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) +{ + int current; + + if (s->qdev.type != TYPE_ROM) { + return -1; + } + + if (media_is_dvd(s)) { + current = MMC_PROFILE_DVD_ROM; + } else if (media_is_cd(s)) { + current = MMC_PROFILE_CD_ROM; + } else { + current = MMC_PROFILE_NONE; + } + + memset(outbuf, 0, 40); + stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ + stw_be_p(&outbuf[6], current); + /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ + outbuf[10] = 0x03; /* persistent, current */ + outbuf[11] = 8; /* two profiles */ + stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); + outbuf[14] = (current == MMC_PROFILE_DVD_ROM); + stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); + outbuf[18] = (current == MMC_PROFILE_CD_ROM); + /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ + stw_be_p(&outbuf[20], 1); + outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ + outbuf[23] = 8; + stl_be_p(&outbuf[24], 1); /* SCSI */ + outbuf[28] = 1; /* DBE = 1, mandatory */ + /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ + stw_be_p(&outbuf[32], 3); + outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ + outbuf[35] = 4; + outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ + /* TODO: Random readable, CD read, DVD read, drive serial number, + power management */ + return 40; +} + +static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) +{ + if (s->qdev.type != TYPE_ROM) { + return -1; + } + memset(outbuf, 0, 8); + outbuf[5] = 1; /* CD-ROM */ + return 8; +} + +static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, + int page_control) +{ + static const int mode_sense_valid[0x3f] = { + [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), + [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), + [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), + [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), + [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), + [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), + }; + + uint8_t *p = *p_outbuf + 2; + int length; + + assert(page < ARRAY_SIZE(mode_sense_valid)); + if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { + return -1; + } + + /* + * If Changeable Values are requested, a mask denoting those mode parameters + * that are changeable shall be returned. As we currently don't support + * parameter changes via MODE_SELECT all bits are returned set to zero. + * The buffer was already menset to zero by the caller of this function. + * + * The offsets here are off by two compared to the descriptions in the + * SCSI specs, because those include a 2-byte header. This is unfortunate, + * but it is done so that offsets are consistent within our implementation + * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both + * 2-byte and 4-byte headers. + */ + switch (page) { + case MODE_PAGE_HD_GEOMETRY: + length = 0x16; + if (page_control == 1) { /* Changeable Values */ + break; + } + /* if a geometry hint is available, use it */ + p[0] = (s->qdev.conf.cyls >> 16) & 0xff; + p[1] = (s->qdev.conf.cyls >> 8) & 0xff; + p[2] = s->qdev.conf.cyls & 0xff; + p[3] = s->qdev.conf.heads & 0xff; + /* Write precomp start cylinder, disabled */ + p[4] = (s->qdev.conf.cyls >> 16) & 0xff; + p[5] = (s->qdev.conf.cyls >> 8) & 0xff; + p[6] = s->qdev.conf.cyls & 0xff; + /* Reduced current start cylinder, disabled */ + p[7] = (s->qdev.conf.cyls >> 16) & 0xff; + p[8] = (s->qdev.conf.cyls >> 8) & 0xff; + p[9] = s->qdev.conf.cyls & 0xff; + /* Device step rate [ns], 200ns */ + p[10] = 0; + p[11] = 200; + /* Landing zone cylinder */ + p[12] = 0xff; + p[13] = 0xff; + p[14] = 0xff; + /* Medium rotation rate [rpm], 5400 rpm */ + p[18] = (5400 >> 8) & 0xff; + p[19] = 5400 & 0xff; + break; + + case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: + length = 0x1e; + if (page_control == 1) { /* Changeable Values */ + break; + } + /* Transfer rate [kbit/s], 5Mbit/s */ + p[0] = 5000 >> 8; + p[1] = 5000 & 0xff; + /* if a geometry hint is available, use it */ + p[2] = s->qdev.conf.heads & 0xff; + p[3] = s->qdev.conf.secs & 0xff; + p[4] = s->qdev.blocksize >> 8; + p[6] = (s->qdev.conf.cyls >> 8) & 0xff; + p[7] = s->qdev.conf.cyls & 0xff; + /* Write precomp start cylinder, disabled */ + p[8] = (s->qdev.conf.cyls >> 8) & 0xff; + p[9] = s->qdev.conf.cyls & 0xff; + /* Reduced current start cylinder, disabled */ + p[10] = (s->qdev.conf.cyls >> 8) & 0xff; + p[11] = s->qdev.conf.cyls & 0xff; + /* Device step rate [100us], 100us */ + p[12] = 0; + p[13] = 1; + /* Device step pulse width [us], 1us */ + p[14] = 1; + /* Device head settle delay [100us], 100us */ + p[15] = 0; + p[16] = 1; + /* Motor on delay [0.1s], 0.1s */ + p[17] = 1; + /* Motor off delay [0.1s], 0.1s */ + p[18] = 1; + /* Medium rotation rate [rpm], 5400 rpm */ + p[26] = (5400 >> 8) & 0xff; + p[27] = 5400 & 0xff; + break; + + case MODE_PAGE_CACHING: + length = 0x12; + if (page_control == 1 || /* Changeable Values */ + blk_enable_write_cache(s->qdev.conf.blk)) { + p[0] = 4; /* WCE */ + } + break; + + case MODE_PAGE_R_W_ERROR: + length = 10; + if (page_control == 1) { /* Changeable Values */ + break; + } + p[0] = 0x80; /* Automatic Write Reallocation Enabled */ + if (s->qdev.type == TYPE_ROM) { + p[1] = 0x20; /* Read Retry Count */ + } + break; + + case MODE_PAGE_AUDIO_CTL: + length = 14; + break; + + case MODE_PAGE_CAPABILITIES: + length = 0x14; + if (page_control == 1) { /* Changeable Values */ + break; + } + + p[0] = 0x3b; /* CD-R & CD-RW read */ + p[1] = 0; /* Writing not supported */ + p[2] = 0x7f; /* Audio, composite, digital out, + mode 2 form 1&2, multi session */ + p[3] = 0xff; /* CD DA, DA accurate, RW supported, + RW corrected, C2 errors, ISRC, + UPC, Bar code */ + p[4] = 0x2d | (s->tray_locked ? 2 : 0); + /* Locking supported, jumper present, eject, tray */ + p[5] = 0; /* no volume & mute control, no + changer */ + p[6] = (50 * 176) >> 8; /* 50x read speed */ + p[7] = (50 * 176) & 0xff; + p[8] = 2 >> 8; /* Two volume levels */ + p[9] = 2 & 0xff; + p[10] = 2048 >> 8; /* 2M buffer */ + p[11] = 2048 & 0xff; + p[12] = (16 * 176) >> 8; /* 16x read speed current */ + p[13] = (16 * 176) & 0xff; + p[16] = (16 * 176) >> 8; /* 16x write speed */ + p[17] = (16 * 176) & 0xff; + p[18] = (16 * 176) >> 8; /* 16x write speed current */ + p[19] = (16 * 176) & 0xff; + break; + + default: + return -1; + } + + assert(length < 256); + (*p_outbuf)[0] = page; + (*p_outbuf)[1] = length; + *p_outbuf += length + 2; + return length + 2; +} + +static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + uint64_t nb_sectors; + bool dbd; + int page, buflen, ret, page_control; + uint8_t *p; + uint8_t dev_specific_param; + + dbd = (r->req.cmd.buf[1] & 0x8) != 0; + page = r->req.cmd.buf[2] & 0x3f; + page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; + + trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : + 10, page, r->req.cmd.xfer, page_control); + memset(outbuf, 0, r->req.cmd.xfer); + p = outbuf; + + if (s->qdev.type == TYPE_DISK) { + dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; + if (!blk_is_writable(s->qdev.conf.blk)) { + dev_specific_param |= 0x80; /* Readonly. */ + } + } else { + /* MMC prescribes that CD/DVD drives have no block descriptors, + * and defines no device-specific parameter. */ + dev_specific_param = 0x00; + dbd = true; + } + + if (r->req.cmd.buf[0] == MODE_SENSE) { + p[1] = 0; /* Default media type. */ + p[2] = dev_specific_param; + p[3] = 0; /* Block descriptor length. */ + p += 4; + } else { /* MODE_SENSE_10 */ + p[2] = 0; /* Default media type. */ + p[3] = dev_specific_param; + p[6] = p[7] = 0; /* Block descriptor length. */ + p += 8; + } + + blk_get_geometry(s->qdev.conf.blk, &nb_sectors); + if (!dbd && nb_sectors) { + if (r->req.cmd.buf[0] == MODE_SENSE) { + outbuf[3] = 8; /* Block descriptor length */ + } else { /* MODE_SENSE_10 */ + outbuf[7] = 8; /* Block descriptor length */ + } + nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE); + if (nb_sectors > 0xffffff) { + nb_sectors = 0; + } + p[0] = 0; /* media density code */ + p[1] = (nb_sectors >> 16) & 0xff; + p[2] = (nb_sectors >> 8) & 0xff; + p[3] = nb_sectors & 0xff; + p[4] = 0; /* reserved */ + p[5] = 0; /* bytes 5-7 are the sector size in bytes */ + p[6] = s->qdev.blocksize >> 8; + p[7] = 0; + p += 8; + } + + if (page_control == 3) { + /* Saved Values */ + scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); + return -1; + } + + if (page == 0x3f) { + for (page = 0; page <= 0x3e; page++) { + mode_sense_page(s, page, &p, page_control); + } + } else { + ret = mode_sense_page(s, page, &p, page_control); + if (ret == -1) { + return -1; + } + } + + buflen = p - outbuf; + /* + * The mode data length field specifies the length in bytes of the + * following data that is available to be transferred. The mode data + * length does not include itself. + */ + if (r->req.cmd.buf[0] == MODE_SENSE) { + outbuf[0] = buflen - 1; + } else { /* MODE_SENSE_10 */ + outbuf[0] = ((buflen - 2) >> 8) & 0xff; + outbuf[1] = (buflen - 2) & 0xff; + } + return buflen; +} + +static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); + int start_track, format, msf, toclen; + uint64_t nb_sectors; + + msf = req->cmd.buf[1] & 2; + format = req->cmd.buf[2] & 0xf; + start_track = req->cmd.buf[6]; + blk_get_geometry(s->qdev.conf.blk, &nb_sectors); + trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); + nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; + switch (format) { + case 0: + toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); + break; + case 1: + /* multi session : only a single session defined */ + toclen = 12; + memset(outbuf, 0, 12); + outbuf[1] = 0x0a; + outbuf[2] = 0x01; + outbuf[3] = 0x01; + break; + case 2: + toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); + break; + default: + return -1; + } + return toclen; +} + +static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) +{ + SCSIRequest *req = &r->req; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); + bool start = req->cmd.buf[4] & 1; + bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ + int pwrcnd = req->cmd.buf[4] & 0xf0; + + if (pwrcnd) { + /* eject/load only happens for power condition == 0 */ + return 0; + } + + if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { + if (!start && !s->tray_open && s->tray_locked) { + scsi_check_condition(r, + blk_is_inserted(s->qdev.conf.blk) + ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) + : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); + return -1; + } + + if (s->tray_open != !start) { + blk_eject(s->qdev.conf.blk, !start); + s->tray_open = !start; + } + } + return 0; +} + +static void scsi_disk_emulate_read_data(SCSIRequest *req) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + int buflen = r->iov.iov_len; + + if (buflen) { + trace_scsi_disk_emulate_read_data(buflen); + r->iov.iov_len = 0; + r->started = true; + scsi_req_data(&r->req, buflen); + return; + } + + /* This also clears the sense buffer for REQUEST SENSE. */ + scsi_req_complete(&r->req, GOOD); +} + +static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, + uint8_t *inbuf, int inlen) +{ + uint8_t mode_current[SCSI_MAX_MODE_LEN]; + uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; + uint8_t *p; + int len, expected_len, changeable_len, i; + + /* The input buffer does not include the page header, so it is + * off by 2 bytes. + */ + expected_len = inlen + 2; + if (expected_len > SCSI_MAX_MODE_LEN) { + return -1; + } + + /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */ + if (page == MODE_PAGE_ALLS) { + return -1; + } + + p = mode_current; + memset(mode_current, 0, inlen + 2); + len = mode_sense_page(s, page, &p, 0); + if (len < 0 || len != expected_len) { + return -1; + } + + p = mode_changeable; + memset(mode_changeable, 0, inlen + 2); + changeable_len = mode_sense_page(s, page, &p, 1); + assert(changeable_len == len); + + /* Check that unchangeable bits are the same as what MODE SENSE + * would return. + */ + for (i = 2; i < len; i++) { + if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { + return -1; + } + } + return 0; +} + +static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) +{ + switch (page) { + case MODE_PAGE_CACHING: + blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); + break; + + default: + break; + } +} + +static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + while (len > 0) { + int page, subpage, page_len; + + /* Parse both possible formats for the mode page headers. */ + page = p[0] & 0x3f; + if (p[0] & 0x40) { + if (len < 4) { + goto invalid_param_len; + } + subpage = p[1]; + page_len = lduw_be_p(&p[2]); + p += 4; + len -= 4; + } else { + if (len < 2) { + goto invalid_param_len; + } + subpage = 0; + page_len = p[1]; + p += 2; + len -= 2; + } + + if (subpage) { + goto invalid_param; + } + if (page_len > len) { + goto invalid_param_len; + } + + if (!change) { + if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { + goto invalid_param; + } + } else { + scsi_disk_apply_mode_select(s, page, p); + } + + p += page_len; + len -= page_len; + } + return 0; + +invalid_param: + scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); + return -1; + +invalid_param_len: + scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); + return -1; +} + +static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + uint8_t *p = inbuf; + int cmd = r->req.cmd.buf[0]; + int len = r->req.cmd.xfer; + int hdr_len = (cmd == MODE_SELECT ? 4 : 8); + int bd_len; + int pass; + + /* We only support PF=1, SP=0. */ + if ((r->req.cmd.buf[1] & 0x11) != 0x10) { + goto invalid_field; + } + + if (len < hdr_len) { + goto invalid_param_len; + } + + bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); + len -= hdr_len; + p += hdr_len; + if (len < bd_len) { + goto invalid_param_len; + } + if (bd_len != 0 && bd_len != 8) { + goto invalid_param; + } + + len -= bd_len; + p += bd_len; + + /* Ensure no change is made if there is an error! */ + for (pass = 0; pass < 2; pass++) { + if (mode_select_pages(r, p, len, pass == 1) < 0) { + assert(pass == 0); + return; + } + } + if (!blk_enable_write_cache(s->qdev.conf.blk)) { + /* The request is used as the AIO opaque value, so add a ref. */ + scsi_req_ref(&r->req); + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, + BLOCK_ACCT_FLUSH); + r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); + return; + } + + scsi_req_complete(&r->req, GOOD); + return; + +invalid_param: + scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); + return; + +invalid_param_len: + scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); + return; + +invalid_field: + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); +} + +/* sector_num and nb_sectors expected to be in qdev blocksize */ +static inline bool check_lba_range(SCSIDiskState *s, + uint64_t sector_num, uint32_t nb_sectors) +{ + /* + * The first line tests that no overflow happens when computing the last + * sector. The second line tests that the last accessed sector is in + * range. + * + * Careful, the computations should not underflow for nb_sectors == 0, + * and a 0-block read to the first LBA beyond the end of device is + * valid. + */ + return (sector_num <= sector_num + nb_sectors && + sector_num + nb_sectors <= s->qdev.max_lba + 1); +} + +typedef struct UnmapCBData { + SCSIDiskReq *r; + uint8_t *inbuf; + int count; +} UnmapCBData; + +static void scsi_unmap_complete(void *opaque, int ret); + +static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) +{ + SCSIDiskReq *r = data->r; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + assert(r->req.aiocb == NULL); + + if (data->count > 0) { + uint64_t sector_num = ldq_be_p(&data->inbuf[0]); + uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; + r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE); + r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); + + if (!check_lba_range(s, sector_num, nb_sectors)) { + block_acct_invalid(blk_get_stats(s->qdev.conf.blk), + BLOCK_ACCT_UNMAP); + scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); + goto done; + } + + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, + r->sector_count * BDRV_SECTOR_SIZE, + BLOCK_ACCT_UNMAP); + + r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, + r->sector * BDRV_SECTOR_SIZE, + r->sector_count * BDRV_SECTOR_SIZE, + scsi_unmap_complete, data); + data->count--; + data->inbuf += 16; + return; + } + + scsi_req_complete(&r->req, GOOD); + +done: + scsi_req_unref(&r->req); + g_free(data); +} + +static void scsi_unmap_complete(void *opaque, int ret) +{ + UnmapCBData *data = opaque; + SCSIDiskReq *r = data->r; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + assert(r->req.aiocb != NULL); + r->req.aiocb = NULL; + + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); + if (scsi_disk_req_check_error(r, ret, true)) { + scsi_req_unref(&r->req); + g_free(data); + } else { + block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); + scsi_unmap_complete_noio(data, ret); + } + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); +} + +static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + uint8_t *p = inbuf; + int len = r->req.cmd.xfer; + UnmapCBData *data; + + /* Reject ANCHOR=1. */ + if (r->req.cmd.buf[1] & 0x1) { + goto invalid_field; + } + + if (len < 8) { + goto invalid_param_len; + } + if (len < lduw_be_p(&p[0]) + 2) { + goto invalid_param_len; + } + if (len < lduw_be_p(&p[2]) + 8) { + goto invalid_param_len; + } + if (lduw_be_p(&p[2]) & 15) { + goto invalid_param_len; + } + + if (!blk_is_writable(s->qdev.conf.blk)) { + block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); + scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); + return; + } + + data = g_new0(UnmapCBData, 1); + data->r = r; + data->inbuf = &p[8]; + data->count = lduw_be_p(&p[2]) >> 4; + + /* The matching unref is in scsi_unmap_complete, before data is freed. */ + scsi_req_ref(&r->req); + scsi_unmap_complete_noio(data, 0); + return; + +invalid_param_len: + block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); + scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); + return; + +invalid_field: + block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); +} + +typedef struct WriteSameCBData { + SCSIDiskReq *r; + int64_t sector; + int nb_sectors; + QEMUIOVector qiov; + struct iovec iov; +} WriteSameCBData; + +static void scsi_write_same_complete(void *opaque, int ret) +{ + WriteSameCBData *data = opaque; + SCSIDiskReq *r = data->r; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + assert(r->req.aiocb != NULL); + r->req.aiocb = NULL; + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); + if (scsi_disk_req_check_error(r, ret, true)) { + goto done; + } + + block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); + + data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE; + data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE; + data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, + data->iov.iov_len); + if (data->iov.iov_len) { + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, + data->iov.iov_len, BLOCK_ACCT_WRITE); + /* Reinitialize qiov, to handle unaligned WRITE SAME request + * where final qiov may need smaller size */ + qemu_iovec_init_external(&data->qiov, &data->iov, 1); + r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, + data->sector << BDRV_SECTOR_BITS, + &data->qiov, 0, + scsi_write_same_complete, data); + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); + return; + } + + scsi_req_complete(&r->req, GOOD); + +done: + scsi_req_unref(&r->req); + qemu_vfree(data->iov.iov_base); + g_free(data); + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); +} + +static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) +{ + SCSIRequest *req = &r->req; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); + uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); + WriteSameCBData *data; + uint8_t *buf; + int i; + + /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ + if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); + return; + } + + if (!blk_is_writable(s->qdev.conf.blk)) { + scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); + return; + } + if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { + scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); + return; + } + + if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { + int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; + + /* The request is used as the AIO opaque value, so add a ref. */ + scsi_req_ref(&r->req); + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, + nb_sectors * s->qdev.blocksize, + BLOCK_ACCT_WRITE); + r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, + r->req.cmd.lba * s->qdev.blocksize, + nb_sectors * s->qdev.blocksize, + flags, scsi_aio_complete, r); + return; + } + + data = g_new0(WriteSameCBData, 1); + data->r = r; + data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); + data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); + data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, + SCSI_WRITE_SAME_MAX); + data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, + data->iov.iov_len); + qemu_iovec_init_external(&data->qiov, &data->iov, 1); + + for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { + memcpy(&buf[i], inbuf, s->qdev.blocksize); + } + + scsi_req_ref(&r->req); + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, + data->iov.iov_len, BLOCK_ACCT_WRITE); + r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, + data->sector << BDRV_SECTOR_BITS, + &data->qiov, 0, + scsi_write_same_complete, data); +} + +static void scsi_disk_emulate_write_data(SCSIRequest *req) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + + if (r->iov.iov_len) { + int buflen = r->iov.iov_len; + trace_scsi_disk_emulate_write_data(buflen); + r->iov.iov_len = 0; + scsi_req_data(&r->req, buflen); + return; + } + + switch (req->cmd.buf[0]) { + case MODE_SELECT: + case MODE_SELECT_10: + /* This also clears the sense buffer for REQUEST SENSE. */ + scsi_disk_emulate_mode_select(r, r->iov.iov_base); + break; + + case UNMAP: + scsi_disk_emulate_unmap(r, r->iov.iov_base); + break; + + case VERIFY_10: + case VERIFY_12: + case VERIFY_16: + if (r->req.status == -1) { + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); + } + break; + + case WRITE_SAME_10: + case WRITE_SAME_16: + scsi_disk_emulate_write_same(r, r->iov.iov_base); + break; + + default: + abort(); + } +} + +static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); + uint64_t nb_sectors; + uint8_t *outbuf; + int buflen; + + switch (req->cmd.buf[0]) { + case INQUIRY: + case MODE_SENSE: + case MODE_SENSE_10: + case RESERVE: + case RESERVE_10: + case RELEASE: + case RELEASE_10: + case START_STOP: + case ALLOW_MEDIUM_REMOVAL: + case GET_CONFIGURATION: + case GET_EVENT_STATUS_NOTIFICATION: + case MECHANISM_STATUS: + case REQUEST_SENSE: + break; + + default: + if (!blk_is_available(s->qdev.conf.blk)) { + scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); + return 0; + } + break; + } + + /* + * FIXME: we shouldn't return anything bigger than 4k, but the code + * requires the buffer to be as big as req->cmd.xfer in several + * places. So, do not allow CDBs with a very large ALLOCATION + * LENGTH. The real fix would be to modify scsi_read_data and + * dma_buf_read, so that they return data beyond the buflen + * as all zeros. + */ + if (req->cmd.xfer > 65536) { + goto illegal_request; + } + r->buflen = MAX(4096, req->cmd.xfer); + + if (!r->iov.iov_base) { + r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); + } + + outbuf = r->iov.iov_base; + memset(outbuf, 0, r->buflen); + switch (req->cmd.buf[0]) { + case TEST_UNIT_READY: + assert(blk_is_available(s->qdev.conf.blk)); + break; + case INQUIRY: + buflen = scsi_disk_emulate_inquiry(req, outbuf); + if (buflen < 0) { + goto illegal_request; + } + break; + case MODE_SENSE: + case MODE_SENSE_10: + buflen = scsi_disk_emulate_mode_sense(r, outbuf); + if (buflen < 0) { + goto illegal_request; + } + break; + case READ_TOC: + buflen = scsi_disk_emulate_read_toc(req, outbuf); + if (buflen < 0) { + goto illegal_request; + } + break; + case RESERVE: + if (req->cmd.buf[1] & 1) { + goto illegal_request; + } + break; + case RESERVE_10: + if (req->cmd.buf[1] & 3) { + goto illegal_request; + } + break; + case RELEASE: + if (req->cmd.buf[1] & 1) { + goto illegal_request; + } + break; + case RELEASE_10: + if (req->cmd.buf[1] & 3) { + goto illegal_request; + } + break; + case START_STOP: + if (scsi_disk_emulate_start_stop(r) < 0) { + return 0; + } + break; + case ALLOW_MEDIUM_REMOVAL: + s->tray_locked = req->cmd.buf[4] & 1; + blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); + break; + case READ_CAPACITY_10: + /* The normal LEN field for this command is zero. */ + memset(outbuf, 0, 8); + blk_get_geometry(s->qdev.conf.blk, &nb_sectors); + if (!nb_sectors) { + scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); + return 0; + } + if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { + goto illegal_request; + } + nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; + /* Returned value is the address of the last sector. */ + nb_sectors--; + /* Remember the new size for read/write sanity checking. */ + s->qdev.max_lba = nb_sectors; + /* Clip to 2TB, instead of returning capacity modulo 2TB. */ + if (nb_sectors > UINT32_MAX) { + nb_sectors = UINT32_MAX; + } + outbuf[0] = (nb_sectors >> 24) & 0xff; + outbuf[1] = (nb_sectors >> 16) & 0xff; + outbuf[2] = (nb_sectors >> 8) & 0xff; + outbuf[3] = nb_sectors & 0xff; + outbuf[4] = 0; + outbuf[5] = 0; + outbuf[6] = s->qdev.blocksize >> 8; + outbuf[7] = 0; + break; + case REQUEST_SENSE: + /* Just return "NO SENSE". */ + buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, + (req->cmd.buf[1] & 1) == 0); + if (buflen < 0) { + goto illegal_request; + } + break; + case MECHANISM_STATUS: + buflen = scsi_emulate_mechanism_status(s, outbuf); + if (buflen < 0) { + goto illegal_request; + } + break; + case GET_CONFIGURATION: + buflen = scsi_get_configuration(s, outbuf); + if (buflen < 0) { + goto illegal_request; + } + break; + case GET_EVENT_STATUS_NOTIFICATION: + buflen = scsi_get_event_status_notification(s, r, outbuf); + if (buflen < 0) { + goto illegal_request; + } + break; + case READ_DISC_INFORMATION: + buflen = scsi_read_disc_information(s, r, outbuf); + if (buflen < 0) { + goto illegal_request; + } + break; + case READ_DVD_STRUCTURE: + buflen = scsi_read_dvd_structure(s, r, outbuf); + if (buflen < 0) { + goto illegal_request; + } + break; + case SERVICE_ACTION_IN_16: + /* Service Action In subcommands. */ + if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { + trace_scsi_disk_emulate_command_SAI_16(); + memset(outbuf, 0, req->cmd.xfer); + blk_get_geometry(s->qdev.conf.blk, &nb_sectors); + if (!nb_sectors) { + scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); + return 0; + } + if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { + goto illegal_request; + } + nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; + /* Returned value is the address of the last sector. */ + nb_sectors--; + /* Remember the new size for read/write sanity checking. */ + s->qdev.max_lba = nb_sectors; + outbuf[0] = (nb_sectors >> 56) & 0xff; + outbuf[1] = (nb_sectors >> 48) & 0xff; + outbuf[2] = (nb_sectors >> 40) & 0xff; + outbuf[3] = (nb_sectors >> 32) & 0xff; + outbuf[4] = (nb_sectors >> 24) & 0xff; + outbuf[5] = (nb_sectors >> 16) & 0xff; + outbuf[6] = (nb_sectors >> 8) & 0xff; + outbuf[7] = nb_sectors & 0xff; + outbuf[8] = 0; + outbuf[9] = 0; + outbuf[10] = s->qdev.blocksize >> 8; + outbuf[11] = 0; + outbuf[12] = 0; + outbuf[13] = get_physical_block_exp(&s->qdev.conf); + + /* set TPE bit if the format supports discard */ + if (s->qdev.conf.discard_granularity) { + outbuf[14] = 0x80; + } + + /* Protection, exponent and lowest lba field left blank. */ + break; + } + trace_scsi_disk_emulate_command_SAI_unsupported(); + goto illegal_request; + case SYNCHRONIZE_CACHE: + /* The request is used as the AIO opaque value, so add a ref. */ + scsi_req_ref(&r->req); + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, + BLOCK_ACCT_FLUSH); + r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); + return 0; + case SEEK_10: + trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); + if (r->req.cmd.lba > s->qdev.max_lba) { + goto illegal_lba; + } + break; + case MODE_SELECT: + trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); + break; + case MODE_SELECT_10: + trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); + break; + case UNMAP: + trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); + break; + case VERIFY_10: + case VERIFY_12: + case VERIFY_16: + trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); + if (req->cmd.buf[1] & 6) { + goto illegal_request; + } + break; + case WRITE_SAME_10: + case WRITE_SAME_16: + trace_scsi_disk_emulate_command_WRITE_SAME( + req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); + break; + default: + trace_scsi_disk_emulate_command_UNKNOWN(buf[0], + scsi_command_name(buf[0])); + scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); + return 0; + } + assert(!r->req.aiocb); + r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); + if (r->iov.iov_len == 0) { + scsi_req_complete(&r->req, GOOD); + } + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { + assert(r->iov.iov_len == req->cmd.xfer); + return -r->iov.iov_len; + } else { + return r->iov.iov_len; + } + +illegal_request: + if (r->req.status == -1) { + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); + } + return 0; + +illegal_lba: + scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); + return 0; +} + +/* Execute a scsi command. Returns the length of the data expected by the + command. This will be Positive for data transfers from the device + (eg. disk reads), negative for transfers to the device (eg. disk writes), + and zero if the command does not transfer any data. */ + +static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); + SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); + uint32_t len; + uint8_t command; + + command = buf[0]; + + if (!blk_is_available(s->qdev.conf.blk)) { + scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); + return 0; + } + + len = scsi_data_cdb_xfer(r->req.cmd.buf); + switch (command) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); + /* Protection information is not supported. For SCSI versions 2 and + * older (as determined by snooping the guest's INQUIRY commands), + * there is no RD/WR/VRPROTECT, so skip this check in these versions. + */ + if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { + goto illegal_request; + } + if (!check_lba_range(s, r->req.cmd.lba, len)) { + goto illegal_lba; + } + r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); + r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); + break; + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + case WRITE_VERIFY_10: + case WRITE_VERIFY_12: + case WRITE_VERIFY_16: + if (!blk_is_writable(s->qdev.conf.blk)) { + scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); + return 0; + } + trace_scsi_disk_dma_command_WRITE( + (command & 0xe) == 0xe ? "And Verify " : "", + r->req.cmd.lba, len); + /* fall through */ + case VERIFY_10: + case VERIFY_12: + case VERIFY_16: + /* We get here only for BYTCHK == 0x01 and only for scsi-block. + * As far as DMA is concerned, we can treat it the same as a write; + * scsi_block_do_sgio will send VERIFY commands. + */ + if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { + goto illegal_request; + } + if (!check_lba_range(s, r->req.cmd.lba, len)) { + goto illegal_lba; + } + r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); + r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); + break; + default: + abort(); + illegal_request: + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); + return 0; + illegal_lba: + scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); + return 0; + } + r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); + if (r->sector_count == 0) { + scsi_req_complete(&r->req, GOOD); + } + assert(r->iov.iov_len == 0); + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { + return -r->sector_count * BDRV_SECTOR_SIZE; + } else { + return r->sector_count * BDRV_SECTOR_SIZE; + } +} + +static void scsi_disk_reset(DeviceState *dev) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); + uint64_t nb_sectors; + + scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); + + blk_get_geometry(s->qdev.conf.blk, &nb_sectors); + nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; + if (nb_sectors) { + nb_sectors--; + } + s->qdev.max_lba = nb_sectors; + /* reset tray statuses */ + s->tray_locked = 0; + s->tray_open = 0; + + s->qdev.scsi_version = s->qdev.default_scsi_version; +} + +static void scsi_disk_resize_cb(void *opaque) +{ + SCSIDiskState *s = opaque; + + /* SPC lists this sense code as available only for + * direct-access devices. + */ + if (s->qdev.type == TYPE_DISK) { + scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); + } +} + +static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) +{ + SCSIDiskState *s = opaque; + + /* + * When a CD gets changed, we have to report an ejected state and + * then a loaded state to guests so that they detect tray + * open/close and media change events. Guests that do not use + * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close + * states rely on this behavior. + * + * media_changed governs the state machine used for unit attention + * report. media_event is used by GET EVENT STATUS NOTIFICATION. + */ + s->media_changed = load; + s->tray_open = !load; + scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); + s->media_event = true; + s->eject_request = false; +} + +static void scsi_cd_eject_request_cb(void *opaque, bool force) +{ + SCSIDiskState *s = opaque; + + s->eject_request = true; + if (force) { + s->tray_locked = false; + } +} + +static bool scsi_cd_is_tray_open(void *opaque) +{ + return ((SCSIDiskState *)opaque)->tray_open; +} + +static bool scsi_cd_is_medium_locked(void *opaque) +{ + return ((SCSIDiskState *)opaque)->tray_locked; +} + +static const BlockDevOps scsi_disk_removable_block_ops = { + .change_media_cb = scsi_cd_change_media_cb, + .eject_request_cb = scsi_cd_eject_request_cb, + .is_tray_open = scsi_cd_is_tray_open, + .is_medium_locked = scsi_cd_is_medium_locked, + + .resize_cb = scsi_disk_resize_cb, +}; + +static const BlockDevOps scsi_disk_block_ops = { + .resize_cb = scsi_disk_resize_cb, +}; + +static void scsi_disk_unit_attention_reported(SCSIDevice *dev) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); + if (s->media_changed) { + s->media_changed = false; + scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); + } +} + +static void scsi_realize(SCSIDevice *dev, Error **errp) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); + bool read_only; + + if (!s->qdev.conf.blk) { + error_setg(errp, "drive property not set"); + return; + } + + if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && + !blk_is_inserted(s->qdev.conf.blk)) { + error_setg(errp, "Device needs media, but drive is empty"); + return; + } + + if (!blkconf_blocksizes(&s->qdev.conf, errp)) { + return; + } + + if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && + !s->qdev.hba_supports_iothread) + { + error_setg(errp, "HBA does not support iothreads"); + return; + } + + if (dev->type == TYPE_DISK) { + if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { + return; + } + } + + read_only = !blk_supports_write_perm(s->qdev.conf.blk); + if (dev->type == TYPE_ROM) { + read_only = true; + } + + if (!blkconf_apply_backend_options(&dev->conf, read_only, + dev->type == TYPE_DISK, errp)) { + return; + } + + if (s->qdev.conf.discard_granularity == -1) { + s->qdev.conf.discard_granularity = + MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); + } + + if (!s->version) { + s->version = g_strdup(qemu_hw_version()); + } + if (!s->vendor) { + s->vendor = g_strdup("QEMU"); + } + if (!s->device_id) { + if (s->serial) { + s->device_id = g_strdup_printf("%.20s", s->serial); + } else { + const char *str = blk_name(s->qdev.conf.blk); + if (str && *str) { + s->device_id = g_strdup(str); + } + } + } + + if (blk_is_sg(s->qdev.conf.blk)) { + error_setg(errp, "unwanted /dev/sg*"); + return; + } + + if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && + !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { + blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); + } else { + blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); + } + blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); + + blk_iostatus_enable(s->qdev.conf.blk); + + add_boot_device_lchs(&dev->qdev, NULL, + dev->conf.lcyls, + dev->conf.lheads, + dev->conf.lsecs); +} + +static void scsi_unrealize(SCSIDevice *dev) +{ + del_boot_device_lchs(&dev->qdev, NULL); +} + +static void scsi_hd_realize(SCSIDevice *dev, Error **errp) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); + AioContext *ctx = NULL; + /* can happen for devices without drive. The error message for missing + * backend will be issued in scsi_realize + */ + if (s->qdev.conf.blk) { + ctx = blk_get_aio_context(s->qdev.conf.blk); + aio_context_acquire(ctx); + if (!blkconf_blocksizes(&s->qdev.conf, errp)) { + goto out; + } + } + s->qdev.blocksize = s->qdev.conf.logical_block_size; + s->qdev.type = TYPE_DISK; + if (!s->product) { + s->product = g_strdup("QEMU HARDDISK"); + } + scsi_realize(&s->qdev, errp); +out: + if (ctx) { + aio_context_release(ctx); + } +} + +static void scsi_cd_realize(SCSIDevice *dev, Error **errp) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); + AioContext *ctx; + int ret; + + if (!dev->conf.blk) { + /* Anonymous BlockBackend for an empty drive. As we put it into + * dev->conf, qdev takes care of detaching on unplug. */ + dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); + ret = blk_attach_dev(dev->conf.blk, &dev->qdev); + assert(ret == 0); + } + + ctx = blk_get_aio_context(dev->conf.blk); + aio_context_acquire(ctx); + s->qdev.blocksize = 2048; + s->qdev.type = TYPE_ROM; + s->features |= 1 << SCSI_DISK_F_REMOVABLE; + if (!s->product) { + s->product = g_strdup("QEMU CD-ROM"); + } + scsi_realize(&s->qdev, errp); + aio_context_release(ctx); +} + + +static const SCSIReqOps scsi_disk_emulate_reqops = { + .size = sizeof(SCSIDiskReq), + .free_req = scsi_free_request, + .send_command = scsi_disk_emulate_command, + .read_data = scsi_disk_emulate_read_data, + .write_data = scsi_disk_emulate_write_data, + .get_buf = scsi_get_buf, +}; + +static const SCSIReqOps scsi_disk_dma_reqops = { + .size = sizeof(SCSIDiskReq), + .free_req = scsi_free_request, + .send_command = scsi_disk_dma_command, + .read_data = scsi_read_data, + .write_data = scsi_write_data, + .get_buf = scsi_get_buf, + .load_request = scsi_disk_load_request, + .save_request = scsi_disk_save_request, +}; + +static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { + [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, + [INQUIRY] = &scsi_disk_emulate_reqops, + [MODE_SENSE] = &scsi_disk_emulate_reqops, + [MODE_SENSE_10] = &scsi_disk_emulate_reqops, + [START_STOP] = &scsi_disk_emulate_reqops, + [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, + [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, + [READ_TOC] = &scsi_disk_emulate_reqops, + [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, + [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, + [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, + [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, + [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, + [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, + [REQUEST_SENSE] = &scsi_disk_emulate_reqops, + [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, + [SEEK_10] = &scsi_disk_emulate_reqops, + [MODE_SELECT] = &scsi_disk_emulate_reqops, + [MODE_SELECT_10] = &scsi_disk_emulate_reqops, + [UNMAP] = &scsi_disk_emulate_reqops, + [WRITE_SAME_10] = &scsi_disk_emulate_reqops, + [WRITE_SAME_16] = &scsi_disk_emulate_reqops, + [VERIFY_10] = &scsi_disk_emulate_reqops, + [VERIFY_12] = &scsi_disk_emulate_reqops, + [VERIFY_16] = &scsi_disk_emulate_reqops, + + [READ_6] = &scsi_disk_dma_reqops, + [READ_10] = &scsi_disk_dma_reqops, + [READ_12] = &scsi_disk_dma_reqops, + [READ_16] = &scsi_disk_dma_reqops, + [WRITE_6] = &scsi_disk_dma_reqops, + [WRITE_10] = &scsi_disk_dma_reqops, + [WRITE_12] = &scsi_disk_dma_reqops, + [WRITE_16] = &scsi_disk_dma_reqops, + [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, + [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, + [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, +}; + +static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) +{ + int i; + int len = scsi_cdb_length(buf); + char *line_buffer, *p; + + assert(len > 0 && len <= 16); + line_buffer = g_malloc(len * 5 + 1); + + for (i = 0, p = line_buffer; i < len; i++) { + p += sprintf(p, " 0x%02x", buf[i]); + } + trace_scsi_disk_new_request(lun, tag, line_buffer); + + g_free(line_buffer); +} + +static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, + uint8_t *buf, void *hba_private) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); + SCSIRequest *req; + const SCSIReqOps *ops; + uint8_t command; + + command = buf[0]; + ops = scsi_disk_reqops_dispatch[command]; + if (!ops) { + ops = &scsi_disk_emulate_reqops; + } + req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); + + if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { + scsi_disk_new_request_dump(lun, tag, buf); + } + + return req; +} + +#ifdef __linux__ +static int get_device_type(SCSIDiskState *s) +{ + uint8_t cmd[16]; + uint8_t buf[36]; + int ret; + + memset(cmd, 0, sizeof(cmd)); + memset(buf, 0, sizeof(buf)); + cmd[0] = INQUIRY; + cmd[4] = sizeof(buf); + + ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), + buf, sizeof(buf), s->qdev.io_timeout); + if (ret < 0) { + return -1; + } + s->qdev.type = buf[0]; + if (buf[1] & 0x80) { + s->features |= 1 << SCSI_DISK_F_REMOVABLE; + } + return 0; +} + +static void scsi_block_realize(SCSIDevice *dev, Error **errp) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); + AioContext *ctx; + int sg_version; + int rc; + + if (!s->qdev.conf.blk) { + error_setg(errp, "drive property not set"); + return; + } + + if (s->rotation_rate) { + error_report_once("rotation_rate is specified for scsi-block but is " + "not implemented. This option is deprecated and will " + "be removed in a future version"); + } + + ctx = blk_get_aio_context(s->qdev.conf.blk); + aio_context_acquire(ctx); + + /* check we are using a driver managing SG_IO (version 3 and after) */ + rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); + if (rc < 0) { + error_setg_errno(errp, -rc, "cannot get SG_IO version number"); + if (rc != -EPERM) { + error_append_hint(errp, "Is this a SCSI device?\n"); + } + goto out; + } + if (sg_version < 30000) { + error_setg(errp, "scsi generic interface too old"); + goto out; + } + + /* get device type from INQUIRY data */ + rc = get_device_type(s); + if (rc < 0) { + error_setg(errp, "INQUIRY failed"); + goto out; + } + + /* Make a guess for the block size, we'll fix it when the guest sends. + * READ CAPACITY. If they don't, they likely would assume these sizes + * anyway. (TODO: check in /sys). + */ + if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { + s->qdev.blocksize = 2048; + } else { + s->qdev.blocksize = 512; + } + + /* Makes the scsi-block device not removable by using HMP and QMP eject + * command. + */ + s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); + + scsi_realize(&s->qdev, errp); + scsi_generic_read_device_inquiry(&s->qdev); + +out: + aio_context_release(ctx); +} + +typedef struct SCSIBlockReq { + SCSIDiskReq req; + sg_io_hdr_t io_header; + + /* Selected bytes of the original CDB, copied into our own CDB. */ + uint8_t cmd, cdb1, group_number; + + /* CDB passed to SG_IO. */ + uint8_t cdb[16]; + BlockCompletionFunc *cb; + void *cb_opaque; +} SCSIBlockReq; + +static void scsi_block_sgio_complete(void *opaque, int ret) +{ + SCSIBlockReq *req = (SCSIBlockReq *)opaque; + SCSIDiskReq *r = &req->req; + SCSIDevice *s = r->req.dev; + sg_io_hdr_t *io_hdr = &req->io_header; + + if (ret == 0) { + if (io_hdr->host_status != SCSI_HOST_OK) { + scsi_req_complete_failed(&r->req, io_hdr->host_status); + scsi_req_unref(&r->req); + return; + } + + if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { + ret = BUSY; + } else { + ret = io_hdr->status; + } + + if (ret > 0) { + aio_context_acquire(blk_get_aio_context(s->conf.blk)); + if (scsi_handle_rw_error(r, ret, true)) { + aio_context_release(blk_get_aio_context(s->conf.blk)); + scsi_req_unref(&r->req); + return; + } + aio_context_release(blk_get_aio_context(s->conf.blk)); + + /* Ignore error. */ + ret = 0; + } + } + + req->cb(req->cb_opaque, ret); +} + +static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, + int64_t offset, QEMUIOVector *iov, + int direction, + BlockCompletionFunc *cb, void *opaque) +{ + sg_io_hdr_t *io_header = &req->io_header; + SCSIDiskReq *r = &req->req; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + int nb_logical_blocks; + uint64_t lba; + BlockAIOCB *aiocb; + + /* This is not supported yet. It can only happen if the guest does + * reads and writes that are not aligned to one logical sectors + * _and_ cover multiple MemoryRegions. + */ + assert(offset % s->qdev.blocksize == 0); + assert(iov->size % s->qdev.blocksize == 0); + + io_header->interface_id = 'S'; + + /* The data transfer comes from the QEMUIOVector. */ + io_header->dxfer_direction = direction; + io_header->dxfer_len = iov->size; + io_header->dxferp = (void *)iov->iov; + io_header->iovec_count = iov->niov; + assert(io_header->iovec_count == iov->niov); /* no overflow! */ + + /* Build a new CDB with the LBA and length patched in, in case + * DMA helpers split the transfer in multiple segments. Do not + * build a CDB smaller than what the guest wanted, and only build + * a larger one if strictly necessary. + */ + io_header->cmdp = req->cdb; + lba = offset / s->qdev.blocksize; + nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; + + if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { + /* 6-byte CDB */ + stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); + req->cdb[4] = nb_logical_blocks; + req->cdb[5] = 0; + io_header->cmd_len = 6; + } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { + /* 10-byte CDB */ + req->cdb[0] = (req->cmd & 0x1f) | 0x20; + req->cdb[1] = req->cdb1; + stl_be_p(&req->cdb[2], lba); + req->cdb[6] = req->group_number; + stw_be_p(&req->cdb[7], nb_logical_blocks); + req->cdb[9] = 0; + io_header->cmd_len = 10; + } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { + /* 12-byte CDB */ + req->cdb[0] = (req->cmd & 0x1f) | 0xA0; + req->cdb[1] = req->cdb1; + stl_be_p(&req->cdb[2], lba); + stl_be_p(&req->cdb[6], nb_logical_blocks); + req->cdb[10] = req->group_number; + req->cdb[11] = 0; + io_header->cmd_len = 12; + } else { + /* 16-byte CDB */ + req->cdb[0] = (req->cmd & 0x1f) | 0x80; + req->cdb[1] = req->cdb1; + stq_be_p(&req->cdb[2], lba); + stl_be_p(&req->cdb[10], nb_logical_blocks); + req->cdb[14] = req->group_number; + req->cdb[15] = 0; + io_header->cmd_len = 16; + } + + /* The rest is as in scsi-generic.c. */ + io_header->mx_sb_len = sizeof(r->req.sense); + io_header->sbp = r->req.sense; + io_header->timeout = s->qdev.io_timeout * 1000; + io_header->usr_ptr = r; + io_header->flags |= SG_FLAG_DIRECT_IO; + req->cb = cb; + req->cb_opaque = opaque; + trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba, + nb_logical_blocks, io_header->timeout); + aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req); + assert(aiocb != NULL); + return aiocb; +} + +static bool scsi_block_no_fua(SCSICommand *cmd) +{ + return false; +} + +static BlockAIOCB *scsi_block_dma_readv(int64_t offset, + QEMUIOVector *iov, + BlockCompletionFunc *cb, void *cb_opaque, + void *opaque) +{ + SCSIBlockReq *r = opaque; + return scsi_block_do_sgio(r, offset, iov, + SG_DXFER_FROM_DEV, cb, cb_opaque); +} + +static BlockAIOCB *scsi_block_dma_writev(int64_t offset, + QEMUIOVector *iov, + BlockCompletionFunc *cb, void *cb_opaque, + void *opaque) +{ + SCSIBlockReq *r = opaque; + return scsi_block_do_sgio(r, offset, iov, + SG_DXFER_TO_DEV, cb, cb_opaque); +} + +static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) +{ + switch (buf[0]) { + case VERIFY_10: + case VERIFY_12: + case VERIFY_16: + /* Check if BYTCHK == 0x01 (data-out buffer contains data + * for the number of logical blocks specified in the length + * field). For other modes, do not use scatter/gather operation. + */ + if ((buf[1] & 6) == 2) { + return false; + } + break; + + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + case WRITE_VERIFY_10: + case WRITE_VERIFY_12: + case WRITE_VERIFY_16: + /* MMC writing cannot be done via DMA helpers, because it sometimes + * involves writing beyond the maximum LBA or to negative LBA (lead-in). + * We might use scsi_block_dma_reqops as long as no writing commands are + * seen, but performance usually isn't paramount on optical media. So, + * just make scsi-block operate the same as scsi-generic for them. + */ + if (s->qdev.type != TYPE_ROM) { + return false; + } + break; + + default: + break; + } + + return true; +} + + +static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) +{ + SCSIBlockReq *r = (SCSIBlockReq *)req; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); + + r->cmd = req->cmd.buf[0]; + switch (r->cmd >> 5) { + case 0: + /* 6-byte CDB. */ + r->cdb1 = r->group_number = 0; + break; + case 1: + /* 10-byte CDB. */ + r->cdb1 = req->cmd.buf[1]; + r->group_number = req->cmd.buf[6]; + break; + case 4: + /* 12-byte CDB. */ + r->cdb1 = req->cmd.buf[1]; + r->group_number = req->cmd.buf[10]; + break; + case 5: + /* 16-byte CDB. */ + r->cdb1 = req->cmd.buf[1]; + r->group_number = req->cmd.buf[14]; + break; + default: + abort(); + } + + /* Protection information is not supported. For SCSI versions 2 and + * older (as determined by snooping the guest's INQUIRY commands), + * there is no RD/WR/VRPROTECT, so skip this check in these versions. + */ + if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { + scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); + return 0; + } + + return scsi_disk_dma_command(req, buf); +} + +static const SCSIReqOps scsi_block_dma_reqops = { + .size = sizeof(SCSIBlockReq), + .free_req = scsi_free_request, + .send_command = scsi_block_dma_command, + .read_data = scsi_read_data, + .write_data = scsi_write_data, + .get_buf = scsi_get_buf, + .load_request = scsi_disk_load_request, + .save_request = scsi_disk_save_request, +}; + +static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, + uint32_t lun, uint8_t *buf, + void *hba_private) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); + + if (scsi_block_is_passthrough(s, buf)) { + return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, + hba_private); + } else { + return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, + hba_private); + } +} + +static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, + uint8_t *buf, void *hba_private) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); + + if (scsi_block_is_passthrough(s, buf)) { + return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); + } else { + return scsi_req_parse_cdb(&s->qdev, cmd, buf); + } +} + +static void scsi_block_update_sense(SCSIRequest *req) +{ + SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); + SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); + r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); +} +#endif + +static +BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, + BlockCompletionFunc *cb, void *cb_opaque, + void *opaque) +{ + SCSIDiskReq *r = opaque; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); +} + +static +BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, + BlockCompletionFunc *cb, void *cb_opaque, + void *opaque) +{ + SCSIDiskReq *r = opaque; + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); +} + +static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); + + dc->fw_name = "disk"; + dc->reset = scsi_disk_reset; + sdc->dma_readv = scsi_dma_readv; + sdc->dma_writev = scsi_dma_writev; + sdc->need_fua_emulation = scsi_is_cmd_fua; +} + +static const TypeInfo scsi_disk_base_info = { + .name = TYPE_SCSI_DISK_BASE, + .parent = TYPE_SCSI_DEVICE, + .class_init = scsi_disk_base_class_initfn, + .instance_size = sizeof(SCSIDiskState), + .class_size = sizeof(SCSIDiskClass), + .abstract = true, +}; + +#define DEFINE_SCSI_DISK_PROPERTIES() \ + DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ + DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ + DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ + DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ + DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ + DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ + DEFINE_PROP_STRING("product", SCSIDiskState, product), \ + DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) + + +static Property scsi_hd_properties[] = { + DEFINE_SCSI_DISK_PROPERTIES(), + DEFINE_PROP_BIT("removable", SCSIDiskState, features, + SCSI_DISK_F_REMOVABLE, false), + DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, + SCSI_DISK_F_DPOFUA, false), + DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), + DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), + DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), + DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, + DEFAULT_MAX_UNMAP_SIZE), + DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, + DEFAULT_MAX_IO_SIZE), + DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), + DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, + 5), + DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_scsi_disk_state = { + .name = "scsi-disk", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), + VMSTATE_BOOL(media_changed, SCSIDiskState), + VMSTATE_BOOL(media_event, SCSIDiskState), + VMSTATE_BOOL(eject_request, SCSIDiskState), + VMSTATE_BOOL(tray_open, SCSIDiskState), + VMSTATE_BOOL(tray_locked, SCSIDiskState), + VMSTATE_END_OF_LIST() + } +}; + +static void scsi_hd_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); + + sc->realize = scsi_hd_realize; + sc->unrealize = scsi_unrealize; + sc->alloc_req = scsi_new_request; + sc->unit_attention_reported = scsi_disk_unit_attention_reported; + dc->desc = "virtual SCSI disk"; + device_class_set_props(dc, scsi_hd_properties); + dc->vmsd = &vmstate_scsi_disk_state; +} + +static const TypeInfo scsi_hd_info = { + .name = "scsi-hd", + .parent = TYPE_SCSI_DISK_BASE, + .class_init = scsi_hd_class_initfn, +}; + +static Property scsi_cd_properties[] = { + DEFINE_SCSI_DISK_PROPERTIES(), + DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), + DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), + DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), + DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, + DEFAULT_MAX_IO_SIZE), + DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, + 5), + DEFINE_PROP_END_OF_LIST(), +}; + +static void scsi_cd_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); + + sc->realize = scsi_cd_realize; + sc->alloc_req = scsi_new_request; + sc->unit_attention_reported = scsi_disk_unit_attention_reported; + dc->desc = "virtual SCSI CD-ROM"; + device_class_set_props(dc, scsi_cd_properties); + dc->vmsd = &vmstate_scsi_disk_state; +} + +static const TypeInfo scsi_cd_info = { + .name = "scsi-cd", + .parent = TYPE_SCSI_DISK_BASE, + .class_init = scsi_cd_class_initfn, +}; + +#ifdef __linux__ +static Property scsi_block_properties[] = { + DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), + DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), + DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), + DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), + DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, + DEFAULT_MAX_UNMAP_SIZE), + DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, + DEFAULT_MAX_IO_SIZE), + DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, + -1), + DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout, + DEFAULT_IO_TIMEOUT), + DEFINE_PROP_END_OF_LIST(), +}; + +static void scsi_block_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); + SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); + + sc->realize = scsi_block_realize; + sc->alloc_req = scsi_block_new_request; + sc->parse_cdb = scsi_block_parse_cdb; + sdc->dma_readv = scsi_block_dma_readv; + sdc->dma_writev = scsi_block_dma_writev; + sdc->update_sense = scsi_block_update_sense; + sdc->need_fua_emulation = scsi_block_no_fua; + dc->desc = "SCSI block device passthrough"; + device_class_set_props(dc, scsi_block_properties); + dc->vmsd = &vmstate_scsi_disk_state; +} + +static const TypeInfo scsi_block_info = { + .name = "scsi-block", + .parent = TYPE_SCSI_DISK_BASE, + .class_init = scsi_block_class_initfn, +}; +#endif + +static void scsi_disk_register_types(void) +{ + type_register_static(&scsi_disk_base_info); + type_register_static(&scsi_hd_info); + type_register_static(&scsi_cd_info); +#ifdef __linux__ + type_register_static(&scsi_block_info); +#endif +} + +type_init(scsi_disk_register_types) diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c new file mode 100644 index 000000000..0306ccc7b --- /dev/null +++ b/hw/scsi/scsi-generic.c @@ -0,0 +1,822 @@ +/* + * Generic SCSI Device support + * + * Copyright (c) 2007 Bull S.A.S. + * Based on code by Paul Brook + * Based on code by Fabrice Bellard + * + * Written by Laurent Vivier + * + * This code is licensed under the LGPL. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/ctype.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/scsi/scsi.h" +#include "migration/qemu-file-types.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/scsi/emulation.h" +#include "sysemu/block-backend.h" +#include "trace.h" + +#ifdef __linux__ + +#include +#include "scsi/constants.h" + +#ifndef MAX_UINT +#define MAX_UINT ((unsigned int)-1) +#endif + +typedef struct SCSIGenericReq { + SCSIRequest req; + uint8_t *buf; + int buflen; + int len; + sg_io_hdr_t io_header; +} SCSIGenericReq; + +static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req) +{ + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + + qemu_put_sbe32s(f, &r->buflen); + if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { + assert(!r->req.sg); + qemu_put_buffer(f, r->buf, r->req.cmd.xfer); + } +} + +static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req) +{ + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + + qemu_get_sbe32s(f, &r->buflen); + if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { + assert(!r->req.sg); + qemu_get_buffer(f, r->buf, r->req.cmd.xfer); + } +} + +static void scsi_free_request(SCSIRequest *req) +{ + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + + g_free(r->buf); +} + +/* Helper function for command completion. */ +static void scsi_command_complete_noio(SCSIGenericReq *r, int ret) +{ + int status; + SCSISense sense; + sg_io_hdr_t *io_hdr = &r->io_header; + + assert(r->req.aiocb == NULL); + + if (r->req.io_canceled) { + scsi_req_cancel_complete(&r->req); + goto done; + } + if (ret < 0) { + status = scsi_sense_from_errno(-ret, &sense); + if (status == CHECK_CONDITION) { + scsi_req_build_sense(&r->req, sense); + } + } else if (io_hdr->host_status != SCSI_HOST_OK) { + scsi_req_complete_failed(&r->req, io_hdr->host_status); + goto done; + } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { + status = BUSY; + } else { + status = io_hdr->status; + if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) { + r->req.sense_len = io_hdr->sb_len_wr; + } + } + trace_scsi_generic_command_complete_noio(r, r->req.tag, status); + + scsi_req_complete(&r->req, status); +done: + scsi_req_unref(&r->req); +} + +static void scsi_command_complete(void *opaque, int ret) +{ + SCSIGenericReq *r = (SCSIGenericReq *)opaque; + SCSIDevice *s = r->req.dev; + + assert(r->req.aiocb != NULL); + r->req.aiocb = NULL; + + aio_context_acquire(blk_get_aio_context(s->conf.blk)); + scsi_command_complete_noio(r, ret); + aio_context_release(blk_get_aio_context(s->conf.blk)); +} + +static int execute_command(BlockBackend *blk, + SCSIGenericReq *r, int direction, + BlockCompletionFunc *complete) +{ + SCSIDevice *s = r->req.dev; + + r->io_header.interface_id = 'S'; + r->io_header.dxfer_direction = direction; + r->io_header.dxferp = r->buf; + r->io_header.dxfer_len = r->buflen; + r->io_header.cmdp = r->req.cmd.buf; + r->io_header.cmd_len = r->req.cmd.len; + r->io_header.mx_sb_len = sizeof(r->req.sense); + r->io_header.sbp = r->req.sense; + r->io_header.timeout = s->io_timeout * 1000; + r->io_header.usr_ptr = r; + r->io_header.flags |= SG_FLAG_DIRECT_IO; + + trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0], + r->io_header.timeout); + r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r); + if (r->req.aiocb == NULL) { + return -EIO; + } + + return 0; +} + +static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) +{ + uint8_t page, page_idx; + + /* + * EVPD set to zero returns the standard INQUIRY data. + * + * Check if scsi_version is unset (-1) to avoid re-defining it + * each time an INQUIRY with standard data is received. + * scsi_version is initialized with -1 in scsi_generic_reset + * and scsi_disk_reset, making sure that we'll set the + * scsi_version after a reset. If the version field of the + * INQUIRY response somehow changes after a guest reboot, + * we'll be able to keep track of it. + * + * On SCSI-2 and older, first 3 bits of byte 2 is the + * ANSI-approved version, while on later versions the + * whole byte 2 contains the version. Check if we're dealing + * with a newer version and, in that case, assign the + * whole byte. + */ + if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) { + s->scsi_version = r->buf[2] & 0x07; + if (s->scsi_version > 2) { + s->scsi_version = r->buf[2]; + } + } + + if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) && + (r->req.cmd.buf[1] & 0x01)) { + page = r->req.cmd.buf[2]; + if (page == 0xb0) { + uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk); + uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk); + + assert(max_transfer); + max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size) + / s->blocksize; + stl_be_p(&r->buf[8], max_transfer); + /* Also take care of the opt xfer len. */ + stl_be_p(&r->buf[12], + MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); + } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { + /* + * Now we're capable of supplying the VPD Block Limits + * response if the hardware can't. Add it in the INQUIRY + * Supported VPD pages response in case we are using the + * emulation for this device. + * + * This way, the guest kernel will be aware of the support + * and will use it to proper setup the SCSI device. + * + * VPD page numbers must be sorted, so insert 0xb0 at the + * right place with an in-place insert. When the while loop + * begins the device response is at r[0] to r[page_idx - 1]. + */ + page_idx = lduw_be_p(r->buf + 2) + 4; + page_idx = MIN(page_idx, r->buflen); + while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) { + if (page_idx < r->buflen) { + r->buf[page_idx] = r->buf[page_idx - 1]; + } + page_idx--; + } + if (page_idx < r->buflen) { + r->buf[page_idx] = 0xb0; + } + stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1); + + if (len < r->buflen) { + len++; + } + } + } + return len; +} + +static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s) +{ + int len; + uint8_t buf[64]; + + SCSIBlockLimits bl = { + .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize + }; + + memset(r->buf, 0, r->buflen); + stb_p(buf, s->type); + stb_p(buf + 1, 0xb0); + len = scsi_emulate_block_limits(buf + 4, &bl); + assert(len <= sizeof(buf) - 4); + stw_be_p(buf + 2, len); + + memcpy(r->buf, buf, MIN(r->buflen, len + 4)); + + r->io_header.sb_len_wr = 0; + + /* + * We have valid contents in the reply buffer but the + * io_header can report a sense error coming from + * the hardware in scsi_command_complete_noio. Clean + * up the io_header to avoid reporting it. + */ + r->io_header.driver_status = 0; + r->io_header.status = 0; + + return r->buflen; +} + +static void scsi_read_complete(void * opaque, int ret) +{ + SCSIGenericReq *r = (SCSIGenericReq *)opaque; + SCSIDevice *s = r->req.dev; + int len; + + assert(r->req.aiocb != NULL); + r->req.aiocb = NULL; + + aio_context_acquire(blk_get_aio_context(s->conf.blk)); + + if (ret || r->req.io_canceled) { + scsi_command_complete_noio(r, ret); + goto done; + } + + len = r->io_header.dxfer_len - r->io_header.resid; + trace_scsi_generic_read_complete(r->req.tag, len); + + r->len = -1; + + if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { + SCSISense sense = + scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr); + + /* + * Check if this is a VPD Block Limits request that + * resulted in sense error but would need emulation. + * In this case, emulate a valid VPD response. + */ + if (sense.key == ILLEGAL_REQUEST && + s->needs_vpd_bl_emulation && + r->req.cmd.buf[0] == INQUIRY && + (r->req.cmd.buf[1] & 0x01) && + r->req.cmd.buf[2] == 0xb0) { + len = scsi_generic_emulate_block_limits(r, s); + /* + * It's okay to jup to req_complete: no need to + * let scsi_handle_inquiry_reply handle an + * INQUIRY VPD BL request we created manually. + */ + } + if (sense.key) { + goto req_complete; + } + } + + if (r->io_header.host_status != SCSI_HOST_OK || + (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) || + r->io_header.status != GOOD || + len == 0) { + scsi_command_complete_noio(r, 0); + goto done; + } + + /* Snoop READ CAPACITY output to set the blocksize. */ + if (r->req.cmd.buf[0] == READ_CAPACITY_10 && + (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { + s->blocksize = ldl_be_p(&r->buf[4]); + s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; + } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && + (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { + s->blocksize = ldl_be_p(&r->buf[8]); + s->max_lba = ldq_be_p(&r->buf[0]); + } + blk_set_guest_block_size(s->conf.blk, s->blocksize); + + /* + * Patch MODE SENSE device specific parameters if the BDS is opened + * readonly. + */ + if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) && + !blk_is_writable(s->conf.blk) && + (r->req.cmd.buf[0] == MODE_SENSE || + r->req.cmd.buf[0] == MODE_SENSE_10) && + (r->req.cmd.buf[1] & 0x8) == 0) { + if (r->req.cmd.buf[0] == MODE_SENSE) { + r->buf[2] |= 0x80; + } else { + r->buf[3] |= 0x80; + } + } + if (r->req.cmd.buf[0] == INQUIRY) { + len = scsi_handle_inquiry_reply(r, s, len); + } + +req_complete: + scsi_req_data(&r->req, len); + scsi_req_unref(&r->req); + +done: + aio_context_release(blk_get_aio_context(s->conf.blk)); +} + +/* Read more data from scsi device into buffer. */ +static void scsi_read_data(SCSIRequest *req) +{ + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + SCSIDevice *s = r->req.dev; + int ret; + + trace_scsi_generic_read_data(req->tag); + + /* The request is used as the AIO opaque value, so add a ref. */ + scsi_req_ref(&r->req); + if (r->len == -1) { + scsi_command_complete_noio(r, 0); + return; + } + + ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV, + scsi_read_complete); + if (ret < 0) { + scsi_command_complete_noio(r, ret); + } +} + +static void scsi_write_complete(void * opaque, int ret) +{ + SCSIGenericReq *r = (SCSIGenericReq *)opaque; + SCSIDevice *s = r->req.dev; + + trace_scsi_generic_write_complete(ret); + + assert(r->req.aiocb != NULL); + r->req.aiocb = NULL; + + aio_context_acquire(blk_get_aio_context(s->conf.blk)); + + if (ret || r->req.io_canceled) { + scsi_command_complete_noio(r, ret); + goto done; + } + + if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && + s->type == TYPE_TAPE) { + s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11]; + trace_scsi_generic_write_complete_blocksize(s->blocksize); + } + + scsi_command_complete_noio(r, ret); + +done: + aio_context_release(blk_get_aio_context(s->conf.blk)); +} + +/* Write data to a scsi device. Returns nonzero on failure. + The transfer may complete asynchronously. */ +static void scsi_write_data(SCSIRequest *req) +{ + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + SCSIDevice *s = r->req.dev; + int ret; + + trace_scsi_generic_write_data(req->tag); + if (r->len == 0) { + r->len = r->buflen; + scsi_req_data(&r->req, r->len); + return; + } + + /* The request is used as the AIO opaque value, so add a ref. */ + scsi_req_ref(&r->req); + ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete); + if (ret < 0) { + scsi_command_complete_noio(r, ret); + } +} + +/* Return a pointer to the data buffer. */ +static uint8_t *scsi_get_buf(SCSIRequest *req) +{ + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + + return r->buf; +} + +static void scsi_generic_command_dump(uint8_t *cmd, int len) +{ + int i; + char *line_buffer, *p; + + line_buffer = g_malloc(len * 5 + 1); + + for (i = 0, p = line_buffer; i < len; i++) { + p += sprintf(p, " 0x%02x", cmd[i]); + } + trace_scsi_generic_send_command(line_buffer); + + g_free(line_buffer); +} + +/* Execute a scsi command. Returns the length of the data expected by the + command. This will be Positive for data transfers from the device + (eg. disk reads), negative for transfers to the device (eg. disk writes), + and zero if the command does not transfer any data. */ + +static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) +{ + SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); + SCSIDevice *s = r->req.dev; + int ret; + + if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) { + scsi_generic_command_dump(cmd, r->req.cmd.len); + } + + if (r->req.cmd.xfer == 0) { + g_free(r->buf); + r->buflen = 0; + r->buf = NULL; + /* The request is used as the AIO opaque value, so add a ref. */ + scsi_req_ref(&r->req); + ret = execute_command(s->conf.blk, r, SG_DXFER_NONE, + scsi_command_complete); + if (ret < 0) { + scsi_command_complete_noio(r, ret); + return 0; + } + return 0; + } + + if (r->buflen != r->req.cmd.xfer) { + g_free(r->buf); + r->buf = g_malloc(r->req.cmd.xfer); + r->buflen = r->req.cmd.xfer; + } + + memset(r->buf, 0, r->buflen); + r->len = r->req.cmd.xfer; + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { + r->len = 0; + return -r->req.cmd.xfer; + } else { + return r->req.cmd.xfer; + } +} + +static int read_naa_id(const uint8_t *p, uint64_t *p_wwn) +{ + int i; + + if ((p[1] & 0xF) == 3) { + /* NAA designator type */ + if (p[3] != 8) { + return -EINVAL; + } + *p_wwn = ldq_be_p(p + 4); + return 0; + } + + if ((p[1] & 0xF) == 8) { + /* SCSI name string designator type */ + if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) { + return -EINVAL; + } + if (p[3] > 20 && p[24] != ',') { + return -EINVAL; + } + *p_wwn = 0; + for (i = 8; i < 24; i++) { + char c = qemu_toupper(p[i]); + c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10); + *p_wwn = (*p_wwn << 4) | c; + } + return 0; + } + + return -EINVAL; +} + +int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size, + uint8_t *buf, uint8_t buf_size, uint32_t timeout) +{ + sg_io_hdr_t io_header; + uint8_t sensebuf[8]; + int ret; + + memset(&io_header, 0, sizeof(io_header)); + io_header.interface_id = 'S'; + io_header.dxfer_direction = SG_DXFER_FROM_DEV; + io_header.dxfer_len = buf_size; + io_header.dxferp = buf; + io_header.cmdp = cmd; + io_header.cmd_len = cmd_size; + io_header.mx_sb_len = sizeof(sensebuf); + io_header.sbp = sensebuf; + io_header.timeout = timeout * 1000; + + trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout); + ret = blk_ioctl(blk, SG_IO, &io_header); + if (ret < 0 || io_header.status || + io_header.driver_status || io_header.host_status) { + trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status, + io_header.host_status); + return -1; + } + return 0; +} + +/* + * Executes an INQUIRY request with EVPD set to retrieve the + * available VPD pages of the device. If the device does + * not support the Block Limits page (page 0xb0), set + * the needs_vpd_bl_emulation flag for future use. + */ +static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s) +{ + uint8_t cmd[6]; + uint8_t buf[250]; + uint8_t page_len; + int ret, i; + + memset(cmd, 0, sizeof(cmd)); + memset(buf, 0, sizeof(buf)); + cmd[0] = INQUIRY; + cmd[1] = 1; + cmd[2] = 0x00; + cmd[4] = sizeof(buf); + + ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), + buf, sizeof(buf), s->io_timeout); + if (ret < 0) { + /* + * Do not assume anything if we can't retrieve the + * INQUIRY response to assert the VPD Block Limits + * support. + */ + s->needs_vpd_bl_emulation = false; + return; + } + + page_len = buf[3]; + for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) { + if (buf[i] == 0xb0) { + s->needs_vpd_bl_emulation = false; + return; + } + } + s->needs_vpd_bl_emulation = true; +} + +static void scsi_generic_read_device_identification(SCSIDevice *s) +{ + uint8_t cmd[6]; + uint8_t buf[250]; + int ret; + int i, len; + + memset(cmd, 0, sizeof(cmd)); + memset(buf, 0, sizeof(buf)); + cmd[0] = INQUIRY; + cmd[1] = 1; + cmd[2] = 0x83; + cmd[4] = sizeof(buf); + + ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), + buf, sizeof(buf), s->io_timeout); + if (ret < 0) { + return; + } + + len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4); + for (i = 0; i + 3 <= len; ) { + const uint8_t *p = &buf[i + 4]; + uint64_t wwn; + + if (i + (p[3] + 4) > len) { + break; + } + + if ((p[1] & 0x10) == 0) { + /* Associated with the logical unit */ + if (read_naa_id(p, &wwn) == 0) { + s->wwn = wwn; + } + } else if ((p[1] & 0x10) == 0x10) { + /* Associated with the target port */ + if (read_naa_id(p, &wwn) == 0) { + s->port_wwn = wwn; + } + } + + i += p[3] + 4; + } +} + +void scsi_generic_read_device_inquiry(SCSIDevice *s) +{ + scsi_generic_read_device_identification(s); + if (s->type == TYPE_DISK || s->type == TYPE_ZBC) { + scsi_generic_set_vpd_bl_emulation(s); + } else { + s->needs_vpd_bl_emulation = false; + } +} + +static int get_stream_blocksize(BlockBackend *blk) +{ + uint8_t cmd[6]; + uint8_t buf[12]; + int ret; + + memset(cmd, 0, sizeof(cmd)); + memset(buf, 0, sizeof(buf)); + cmd[0] = MODE_SENSE; + cmd[4] = sizeof(buf); + + ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6); + if (ret < 0) { + return -1; + } + + return (buf[9] << 16) | (buf[10] << 8) | buf[11]; +} + +static void scsi_generic_reset(DeviceState *dev) +{ + SCSIDevice *s = SCSI_DEVICE(dev); + + s->scsi_version = s->default_scsi_version; + scsi_device_purge_requests(s, SENSE_CODE(RESET)); +} + +static void scsi_generic_realize(SCSIDevice *s, Error **errp) +{ + int rc; + int sg_version; + struct sg_scsi_id scsiid; + + if (!s->conf.blk) { + error_setg(errp, "drive property not set"); + return; + } + + if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC && + blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) { + error_setg(errp, "Device doesn't support drive option werror"); + return; + } + if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { + error_setg(errp, "Device doesn't support drive option rerror"); + return; + } + + /* check we are using a driver managing SG_IO (version 3 and after */ + rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version); + if (rc < 0) { + error_setg_errno(errp, -rc, "cannot get SG_IO version number"); + if (rc != -EPERM) { + error_append_hint(errp, "Is this a SCSI device?\n"); + } + return; + } + if (sg_version < 30000) { + error_setg(errp, "scsi generic interface too old"); + return; + } + + /* get LUN of the /dev/sg? */ + if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) { + error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); + return; + } + if (!blkconf_apply_backend_options(&s->conf, + !blk_supports_write_perm(s->conf.blk), + true, errp)) { + return; + } + + /* define device state */ + s->type = scsiid.scsi_type; + trace_scsi_generic_realize_type(s->type); + + switch (s->type) { + case TYPE_TAPE: + s->blocksize = get_stream_blocksize(s->conf.blk); + if (s->blocksize == -1) { + s->blocksize = 0; + } + break; + + /* Make a guess for block devices, we'll fix it when the guest sends. + * READ CAPACITY. If they don't, they likely would assume these sizes + * anyway. (TODO: they could also send MODE SENSE). + */ + case TYPE_ROM: + case TYPE_WORM: + s->blocksize = 2048; + break; + default: + s->blocksize = 512; + break; + } + + trace_scsi_generic_realize_blocksize(s->blocksize); + + /* Only used by scsi-block, but initialize it nevertheless to be clean. */ + s->default_scsi_version = -1; + s->io_timeout = DEFAULT_IO_TIMEOUT; + scsi_generic_read_device_inquiry(s); +} + +const SCSIReqOps scsi_generic_req_ops = { + .size = sizeof(SCSIGenericReq), + .free_req = scsi_free_request, + .send_command = scsi_send_command, + .read_data = scsi_read_data, + .write_data = scsi_write_data, + .get_buf = scsi_get_buf, + .load_request = scsi_generic_load_request, + .save_request = scsi_generic_save_request, +}; + +static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, + uint8_t *buf, void *hba_private) +{ + return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private); +} + +static Property scsi_generic_properties[] = { + DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk), + DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false), + DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout, + DEFAULT_IO_TIMEOUT), + DEFINE_PROP_END_OF_LIST(), +}; + +static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, + uint8_t *buf, void *hba_private) +{ + return scsi_bus_parse_cdb(dev, cmd, buf, hba_private); +} + +static void scsi_generic_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); + + sc->realize = scsi_generic_realize; + sc->alloc_req = scsi_new_request; + sc->parse_cdb = scsi_generic_parse_cdb; + dc->fw_name = "disk"; + dc->desc = "pass through generic scsi device (/dev/sg*)"; + dc->reset = scsi_generic_reset; + device_class_set_props(dc, scsi_generic_properties); + dc->vmsd = &vmstate_scsi_device; +} + +static const TypeInfo scsi_generic_info = { + .name = "scsi-generic", + .parent = TYPE_SCSI_DEVICE, + .instance_size = sizeof(SCSIDevice), + .class_init = scsi_generic_class_initfn, +}; + +static void scsi_generic_register_types(void) +{ + type_register_static(&scsi_generic_info); +} + +type_init(scsi_generic_register_types) + +#endif /* __linux__ */ diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c new file mode 100644 index 000000000..a07a8e152 --- /dev/null +++ b/hw/scsi/spapr_vscsi.c @@ -0,0 +1,1300 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * PAPR Virtual SCSI, aka ibmvscsi + * + * Copyright (c) 2010,2011 Benjamin Herrenschmidt, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * TODO: + * + * - Cleanups :-) + * - Sort out better how to assign devices to VSCSI instances + * - Fix residual counts + * - Add indirect descriptors support + * - Maybe do autosense (PAPR seems to mandate it, linux doesn't care) + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "hw/scsi/scsi.h" +#include "migration/vmstate.h" +#include "scsi/constants.h" +#include "srp.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" +#include "hw/qdev-properties.h" +#include "viosrp.h" +#include "trace.h" + +#include +#include "qom/object.h" + +/* + * Virtual SCSI device + */ + +/* Random numbers */ +#define VSCSI_MAX_SECTORS 4096 +#define VSCSI_REQ_LIMIT 24 + +/* Maximum size of a IU payload */ +#define SRP_MAX_IU_DATA_LEN (SRP_MAX_IU_LEN - sizeof(union srp_iu)) +#define SRP_RSP_SENSE_DATA_LEN 18 + +#define SRP_REPORT_LUNS_WLUN 0xc10100000000000ULL + +typedef union vscsi_crq { + struct viosrp_crq s; + uint8_t raw[16]; +} vscsi_crq; + +typedef struct vscsi_req { + vscsi_crq crq; + uint8_t viosrp_iu_buf[SRP_MAX_IU_LEN]; + + /* SCSI request tracking */ + SCSIRequest *sreq; + uint32_t qtag; /* qemu tag != srp tag */ + bool active; + bool writing; + bool dma_error; + uint32_t data_len; + uint32_t senselen; + uint8_t sense[SCSI_SENSE_BUF_SIZE]; + + /* RDMA related bits */ + uint8_t dma_fmt; + uint16_t local_desc; + uint16_t total_desc; + uint16_t cdb_offset; + uint16_t cur_desc_num; + uint16_t cur_desc_offset; +} vscsi_req; + +#define TYPE_VIO_SPAPR_VSCSI_DEVICE "spapr-vscsi" +OBJECT_DECLARE_SIMPLE_TYPE(VSCSIState, VIO_SPAPR_VSCSI_DEVICE) + +struct VSCSIState { + SpaprVioDevice vdev; + SCSIBus bus; + vscsi_req reqs[VSCSI_REQ_LIMIT]; +}; + +static union viosrp_iu *req_iu(vscsi_req *req) +{ + return (union viosrp_iu *)req->viosrp_iu_buf; +} + +static struct vscsi_req *vscsi_get_req(VSCSIState *s) +{ + vscsi_req *req; + int i; + + for (i = 0; i < VSCSI_REQ_LIMIT; i++) { + req = &s->reqs[i]; + if (!req->active) { + memset(req, 0, sizeof(*req)); + req->qtag = i; + req->active = 1; + return req; + } + } + return NULL; +} + +static struct vscsi_req *vscsi_find_req(VSCSIState *s, uint64_t srp_tag) +{ + vscsi_req *req; + int i; + + for (i = 0; i < VSCSI_REQ_LIMIT; i++) { + req = &s->reqs[i]; + if (req_iu(req)->srp.cmd.tag == srp_tag) { + return req; + } + } + return NULL; +} + +static void vscsi_put_req(vscsi_req *req) +{ + if (req->sreq != NULL) { + scsi_req_unref(req->sreq); + } + req->sreq = NULL; + req->active = 0; +} + +static SCSIDevice *vscsi_device_find(SCSIBus *bus, uint64_t srp_lun, int *lun) +{ + int channel = 0, id = 0; + +retry: + switch (srp_lun >> 62) { + case 0: + if ((srp_lun >> 56) != 0) { + channel = (srp_lun >> 56) & 0x3f; + id = (srp_lun >> 48) & 0xff; + srp_lun <<= 16; + goto retry; + } + *lun = (srp_lun >> 48) & 0xff; + break; + + case 1: + *lun = (srp_lun >> 48) & 0x3fff; + break; + case 2: + channel = (srp_lun >> 53) & 0x7; + id = (srp_lun >> 56) & 0x3f; + *lun = (srp_lun >> 48) & 0x1f; + break; + case 3: + *lun = -1; + return NULL; + default: + abort(); + } + + return scsi_device_find(bus, channel, id, *lun); +} + +static int vscsi_send_iu(VSCSIState *s, vscsi_req *req, + uint64_t length, uint8_t format) +{ + long rc, rc1; + + assert(length <= SRP_MAX_IU_LEN); + + /* First copy the SRP */ + rc = spapr_vio_dma_write(&s->vdev, req->crq.s.IU_data_ptr, + &req->viosrp_iu_buf, length); + if (rc) { + fprintf(stderr, "vscsi_send_iu: DMA write failure !\n"); + } + + req->crq.s.valid = 0x80; + req->crq.s.format = format; + req->crq.s.reserved = 0x00; + req->crq.s.timeout = cpu_to_be16(0x0000); + req->crq.s.IU_length = cpu_to_be16(length); + req->crq.s.IU_data_ptr = req_iu(req)->srp.rsp.tag; /* right byte order */ + + if (rc == 0) { + req->crq.s.status = VIOSRP_OK; + } else { + req->crq.s.status = VIOSRP_ADAPTER_FAIL; + } + + rc1 = spapr_vio_send_crq(&s->vdev, req->crq.raw); + if (rc1) { + fprintf(stderr, "vscsi_send_iu: Error sending response\n"); + return rc1; + } + + return rc; +} + +static void vscsi_makeup_sense(VSCSIState *s, vscsi_req *req, + uint8_t key, uint8_t asc, uint8_t ascq) +{ + req->senselen = SRP_RSP_SENSE_DATA_LEN; + + /* Valid bit and 'current errors' */ + req->sense[0] = (0x1 << 7 | 0x70); + /* Sense key */ + req->sense[2] = key; + /* Additional sense length */ + req->sense[7] = 0xa; /* 10 bytes */ + /* Additional sense code */ + req->sense[12] = asc; + req->sense[13] = ascq; +} + +static int vscsi_send_rsp(VSCSIState *s, vscsi_req *req, + uint8_t status, int32_t res_in, int32_t res_out) +{ + union viosrp_iu *iu = req_iu(req); + uint64_t tag = iu->srp.rsp.tag; + int total_len = sizeof(iu->srp.rsp); + uint8_t sol_not = iu->srp.cmd.sol_not; + + trace_spapr_vscsi_send_rsp(status, res_in, res_out); + + memset(iu, 0, sizeof(struct srp_rsp)); + iu->srp.rsp.opcode = SRP_RSP; + iu->srp.rsp.req_lim_delta = cpu_to_be32(1); + iu->srp.rsp.tag = tag; + + /* Handle residuals */ + if (res_in < 0) { + iu->srp.rsp.flags |= SRP_RSP_FLAG_DIUNDER; + res_in = -res_in; + } else if (res_in) { + iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER; + } + if (res_out < 0) { + iu->srp.rsp.flags |= SRP_RSP_FLAG_DOUNDER; + res_out = -res_out; + } else if (res_out) { + iu->srp.rsp.flags |= SRP_RSP_FLAG_DOOVER; + } + iu->srp.rsp.data_in_res_cnt = cpu_to_be32(res_in); + iu->srp.rsp.data_out_res_cnt = cpu_to_be32(res_out); + + /* We don't do response data */ + /* iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID; */ + iu->srp.rsp.resp_data_len = cpu_to_be32(0); + + /* Handle success vs. failure */ + iu->srp.rsp.status = status; + if (status) { + iu->srp.rsp.sol_not = (sol_not & 0x04) >> 2; + if (req->senselen) { + int sense_data_len = MIN(req->senselen, SRP_MAX_IU_DATA_LEN); + + iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID; + iu->srp.rsp.sense_data_len = cpu_to_be32(sense_data_len); + memcpy(iu->srp.rsp.data, req->sense, sense_data_len); + total_len += sense_data_len; + } + } else { + iu->srp.rsp.sol_not = (sol_not & 0x02) >> 1; + } + + vscsi_send_iu(s, req, total_len, VIOSRP_SRP_FORMAT); + return 0; +} + +static inline struct srp_direct_buf vscsi_swap_desc(struct srp_direct_buf desc) +{ + desc.va = be64_to_cpu(desc.va); + desc.len = be32_to_cpu(desc.len); + return desc; +} + +static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req, + unsigned n, unsigned buf_offset, + struct srp_direct_buf *ret) +{ + struct srp_cmd *cmd = &req_iu(req)->srp.cmd; + + switch (req->dma_fmt) { + case SRP_NO_DATA_DESC: { + trace_spapr_vscsi_fetch_desc_no_data(); + return 0; + } + case SRP_DATA_DESC_DIRECT: { + memcpy(ret, cmd->add_data + req->cdb_offset, sizeof(*ret)); + assert(req->cur_desc_num == 0); + trace_spapr_vscsi_fetch_desc_direct(); + break; + } + case SRP_DATA_DESC_INDIRECT: { + struct srp_indirect_buf *tmp = (struct srp_indirect_buf *) + (cmd->add_data + req->cdb_offset); + if (n < req->local_desc) { + *ret = tmp->desc_list[n]; + trace_spapr_vscsi_fetch_desc_indirect(req->qtag, n, + req->local_desc); + } else if (n < req->total_desc) { + int rc; + struct srp_direct_buf tbl_desc = vscsi_swap_desc(tmp->table_desc); + unsigned desc_offset = n * sizeof(struct srp_direct_buf); + + if (desc_offset >= tbl_desc.len) { + trace_spapr_vscsi_fetch_desc_out_of_range(n, desc_offset); + return -1; + } + rc = spapr_vio_dma_read(&s->vdev, tbl_desc.va + desc_offset, + ret, sizeof(struct srp_direct_buf)); + if (rc) { + trace_spapr_vscsi_fetch_desc_dma_read_error(rc); + return -1; + } + trace_spapr_vscsi_fetch_desc_indirect_seg_ext(req->qtag, n, + req->total_desc, + tbl_desc.va, + tbl_desc.len); + } else { + trace_spapr_vscsi_fetch_desc_out_of_desc(); + return 0; + } + break; + } + default: + fprintf(stderr, "VSCSI: Unknown format %x\n", req->dma_fmt); + return -1; + } + + *ret = vscsi_swap_desc(*ret); + if (buf_offset > ret->len) { + trace_spapr_vscsi_fetch_desc_out_of_desc_boundary(buf_offset, + req->cur_desc_num, + ret->len); + return -1; + } + ret->va += buf_offset; + ret->len -= buf_offset; + + trace_spapr_vscsi_fetch_desc_done(req->cur_desc_num, req->cur_desc_offset, + ret->va, ret->len); + + return ret->len ? 1 : 0; +} + +static int vscsi_srp_direct_data(VSCSIState *s, vscsi_req *req, + uint8_t *buf, uint32_t len) +{ + struct srp_direct_buf md; + uint32_t llen; + int rc = 0; + + rc = vscsi_fetch_desc(s, req, req->cur_desc_num, req->cur_desc_offset, &md); + if (rc < 0) { + return -1; + } else if (rc == 0) { + return 0; + } + + llen = MIN(len, md.len); + if (llen) { + if (req->writing) { /* writing = to device = reading from memory */ + rc = spapr_vio_dma_read(&s->vdev, md.va, buf, llen); + } else { + rc = spapr_vio_dma_write(&s->vdev, md.va, buf, llen); + } + } + + if (rc) { + return -1; + } + req->cur_desc_offset += llen; + + return llen; +} + +static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req, + uint8_t *buf, uint32_t len) +{ + struct srp_direct_buf md; + int rc = 0; + uint32_t llen, total = 0; + + trace_spapr_vscsi_srp_indirect_data(len); + + /* While we have data ... */ + while (len) { + rc = vscsi_fetch_desc(s, req, req->cur_desc_num, req->cur_desc_offset, &md); + if (rc < 0) { + return -1; + } else if (rc == 0) { + break; + } + + /* Perform transfer */ + llen = MIN(len, md.len); + if (req->writing) { /* writing = to device = reading from memory */ + rc = spapr_vio_dma_read(&s->vdev, md.va, buf, llen); + } else { + rc = spapr_vio_dma_write(&s->vdev, md.va, buf, llen); + } + if (rc) { + trace_spapr_vscsi_srp_indirect_data_rw(req->writing, rc); + break; + } + trace_spapr_vscsi_srp_indirect_data_buf(buf[0], buf[1], buf[2], buf[3]); + + len -= llen; + buf += llen; + + total += llen; + + /* Update current position in the current descriptor */ + req->cur_desc_offset += llen; + if (md.len == llen) { + /* Go to the next descriptor if the current one finished */ + ++req->cur_desc_num; + req->cur_desc_offset = 0; + } + } + + return rc ? -1 : total; +} + +static int vscsi_srp_transfer_data(VSCSIState *s, vscsi_req *req, + int writing, uint8_t *buf, uint32_t len) +{ + int err = 0; + + switch (req->dma_fmt) { + case SRP_NO_DATA_DESC: + trace_spapr_vscsi_srp_transfer_data(len); + break; + case SRP_DATA_DESC_DIRECT: + err = vscsi_srp_direct_data(s, req, buf, len); + break; + case SRP_DATA_DESC_INDIRECT: + err = vscsi_srp_indirect_data(s, req, buf, len); + break; + } + return err; +} + +/* Bits from linux srp */ +static int data_out_desc_size(struct srp_cmd *cmd) +{ + int size = 0; + uint8_t fmt = cmd->buf_fmt >> 4; + + switch (fmt) { + case SRP_NO_DATA_DESC: + break; + case SRP_DATA_DESC_DIRECT: + size = sizeof(struct srp_direct_buf); + break; + case SRP_DATA_DESC_INDIRECT: + size = sizeof(struct srp_indirect_buf) + + sizeof(struct srp_direct_buf)*cmd->data_out_desc_cnt; + break; + default: + break; + } + return size; +} + +static int vscsi_preprocess_desc(vscsi_req *req) +{ + struct srp_cmd *cmd = &req_iu(req)->srp.cmd; + + req->cdb_offset = cmd->add_cdb_len & ~3; + + if (req->writing) { + req->dma_fmt = cmd->buf_fmt >> 4; + } else { + req->cdb_offset += data_out_desc_size(cmd); + req->dma_fmt = cmd->buf_fmt & ((1U << 4) - 1); + } + + switch (req->dma_fmt) { + case SRP_NO_DATA_DESC: + break; + case SRP_DATA_DESC_DIRECT: + req->total_desc = req->local_desc = 1; + break; + case SRP_DATA_DESC_INDIRECT: { + struct srp_indirect_buf *ind_tmp = (struct srp_indirect_buf *) + (cmd->add_data + req->cdb_offset); + + req->total_desc = be32_to_cpu(ind_tmp->table_desc.len) / + sizeof(struct srp_direct_buf); + req->local_desc = req->writing ? cmd->data_out_desc_cnt : + cmd->data_in_desc_cnt; + break; + } + default: + fprintf(stderr, + "vscsi_preprocess_desc: Unknown format %x\n", req->dma_fmt); + return -1; + } + + return 0; +} + +/* Callback to indicate that the SCSI layer has completed a transfer. */ +static void vscsi_transfer_data(SCSIRequest *sreq, uint32_t len) +{ + VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent); + vscsi_req *req = sreq->hba_private; + uint8_t *buf; + int rc = 0; + + trace_spapr_vscsi_transfer_data(sreq->tag, len, req); + if (req == NULL) { + fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag); + return; + } + + if (len) { + buf = scsi_req_get_buf(sreq); + rc = vscsi_srp_transfer_data(s, req, req->writing, buf, len); + } + if (rc < 0) { + fprintf(stderr, "VSCSI: RDMA error rc=%d!\n", rc); + req->dma_error = true; + scsi_req_cancel(req->sreq); + return; + } + + /* Start next chunk */ + req->data_len -= rc; + scsi_req_continue(sreq); +} + +/* Callback to indicate that the SCSI layer has completed a transfer. */ +static void vscsi_command_complete(SCSIRequest *sreq, size_t resid) +{ + VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent); + vscsi_req *req = sreq->hba_private; + int32_t res_in = 0, res_out = 0; + + trace_spapr_vscsi_command_complete(sreq->tag, sreq->status, req); + if (req == NULL) { + fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag); + return; + } + + if (sreq->status == CHECK_CONDITION) { + req->senselen = scsi_req_get_sense(req->sreq, req->sense, + sizeof(req->sense)); + trace_spapr_vscsi_command_complete_sense_data1(req->senselen, + req->sense[0], req->sense[1], req->sense[2], req->sense[3], + req->sense[4], req->sense[5], req->sense[6], req->sense[7]); + trace_spapr_vscsi_command_complete_sense_data2( + req->sense[8], req->sense[9], req->sense[10], req->sense[11], + req->sense[12], req->sense[13], req->sense[14], req->sense[15]); + } + + trace_spapr_vscsi_command_complete_status(sreq->status); + if (sreq->status == 0) { + /* We handle overflows, not underflows for normal commands, + * but hopefully nobody cares + */ + if (req->writing) { + res_out = req->data_len; + } else { + res_in = req->data_len; + } + } + vscsi_send_rsp(s, req, sreq->status, res_in, res_out); + vscsi_put_req(req); +} + +static void vscsi_request_cancelled(SCSIRequest *sreq) +{ + vscsi_req *req = sreq->hba_private; + + if (req->dma_error) { + VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent); + + vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0); + vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); + } + vscsi_put_req(req); +} + +static const VMStateDescription vmstate_spapr_vscsi_req = { + .name = "spapr_vscsi_req", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(crq.raw, vscsi_req), + VMSTATE_BUFFER(viosrp_iu_buf, vscsi_req), + VMSTATE_UINT32(qtag, vscsi_req), + VMSTATE_BOOL(active, vscsi_req), + VMSTATE_UINT32(data_len, vscsi_req), + VMSTATE_BOOL(writing, vscsi_req), + VMSTATE_UINT32(senselen, vscsi_req), + VMSTATE_BUFFER(sense, vscsi_req), + VMSTATE_UINT8(dma_fmt, vscsi_req), + VMSTATE_UINT16(local_desc, vscsi_req), + VMSTATE_UINT16(total_desc, vscsi_req), + VMSTATE_UINT16(cdb_offset, vscsi_req), + /*Restart SCSI request from the beginning for now */ + /*VMSTATE_UINT16(cur_desc_num, vscsi_req), + VMSTATE_UINT16(cur_desc_offset, vscsi_req),*/ + VMSTATE_END_OF_LIST() + }, +}; + +static void vscsi_save_request(QEMUFile *f, SCSIRequest *sreq) +{ + vscsi_req *req = sreq->hba_private; + assert(req->active); + + vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL); + + trace_spapr_vscsi_save_request(req->qtag, req->cur_desc_num, + req->cur_desc_offset); +} + +static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq) +{ + SCSIBus *bus = sreq->bus; + VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(bus->qbus.parent); + vscsi_req *req; + int rc; + + assert(sreq->tag < VSCSI_REQ_LIMIT); + req = &s->reqs[sreq->tag]; + assert(!req->active); + + memset(req, 0, sizeof(*req)); + rc = vmstate_load_state(f, &vmstate_spapr_vscsi_req, req, 1); + if (rc) { + fprintf(stderr, "VSCSI: failed loading request tag#%u\n", sreq->tag); + return NULL; + } + assert(req->active); + + req->sreq = scsi_req_ref(sreq); + + trace_spapr_vscsi_load_request(req->qtag, req->cur_desc_num, + req->cur_desc_offset); + + return req; +} + +static void vscsi_process_login(VSCSIState *s, vscsi_req *req) +{ + union viosrp_iu *iu = req_iu(req); + struct srp_login_rsp *rsp = &iu->srp.login_rsp; + uint64_t tag = iu->srp.rsp.tag; + + trace_spapr_vscsi_process_login(); + + /* TODO handle case that requested size is wrong and + * buffer format is wrong + */ + memset(iu, 0, sizeof(struct srp_login_rsp)); + rsp->opcode = SRP_LOGIN_RSP; + /* Don't advertise quite as many request as we support to + * keep room for management stuff etc... + */ + rsp->req_lim_delta = cpu_to_be32(VSCSI_REQ_LIMIT-2); + rsp->tag = tag; + rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); + rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); + /* direct and indirect */ + rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT); + + vscsi_send_iu(s, req, sizeof(*rsp), VIOSRP_SRP_FORMAT); +} + +static void vscsi_inquiry_no_target(VSCSIState *s, vscsi_req *req) +{ + uint8_t *cdb = req_iu(req)->srp.cmd.cdb; + uint8_t resp_data[36]; + int rc, len, alen; + + /* We don't do EVPD. Also check that page_code is 0 */ + if ((cdb[1] & 0x01) || cdb[2] != 0) { + /* Send INVALID FIELD IN CDB */ + vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0); + vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); + return; + } + alen = cdb[3]; + alen = (alen << 8) | cdb[4]; + len = MIN(alen, 36); + + /* Fake up inquiry using PQ=3 */ + memset(resp_data, 0, 36); + resp_data[0] = 0x7f; /* Not capable of supporting a device here */ + resp_data[2] = 0x06; /* SPS-4 */ + resp_data[3] = 0x02; /* Resp data format */ + resp_data[4] = 36 - 5; /* Additional length */ + resp_data[7] = 0x10; /* Sync transfers */ + memcpy(&resp_data[16], "QEMU EMPTY ", 16); + memcpy(&resp_data[8], "QEMU ", 8); + + req->writing = 0; + vscsi_preprocess_desc(req); + rc = vscsi_srp_transfer_data(s, req, 0, resp_data, len); + if (rc < 0) { + vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0); + vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); + } else { + vscsi_send_rsp(s, req, 0, 36 - rc, 0); + } +} + +static void vscsi_report_luns(VSCSIState *s, vscsi_req *req) +{ + BusChild *kid; + int i, len, n, rc; + uint8_t *resp_data; + bool found_lun0; + + n = 0; + found_lun0 = false; + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *dev = SCSI_DEVICE(kid->child); + + n += 8; + if (dev->channel == 0 && dev->id == 0 && dev->lun == 0) { + found_lun0 = true; + } + } + if (!found_lun0) { + n += 8; + } + len = n+8; + + resp_data = g_malloc0(len); + stl_be_p(resp_data, n); + i = found_lun0 ? 8 : 16; + QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { + DeviceState *qdev = kid->child; + SCSIDevice *dev = SCSI_DEVICE(qdev); + + if (dev->id == 0 && dev->channel == 0) { + resp_data[i] = 0; /* Use simple LUN for 0 (SAM5 4.7.7.1) */ + } else { + resp_data[i] = (2 << 6); /* Otherwise LUN addressing (4.7.7.4) */ + } + resp_data[i] |= dev->id; + resp_data[i+1] = (dev->channel << 5); + resp_data[i+1] |= dev->lun; + i += 8; + } + + vscsi_preprocess_desc(req); + rc = vscsi_srp_transfer_data(s, req, 0, resp_data, len); + g_free(resp_data); + if (rc < 0) { + vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0); + vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); + } else { + vscsi_send_rsp(s, req, 0, len - rc, 0); + } +} + +static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req) +{ + union srp_iu *srp = &req_iu(req)->srp; + SCSIDevice *sdev; + int n, lun; + + if ((srp->cmd.lun == 0 || be64_to_cpu(srp->cmd.lun) == SRP_REPORT_LUNS_WLUN) + && srp->cmd.cdb[0] == REPORT_LUNS) { + vscsi_report_luns(s, req); + return 0; + } + + sdev = vscsi_device_find(&s->bus, be64_to_cpu(srp->cmd.lun), &lun); + if (!sdev) { + trace_spapr_vscsi_queue_cmd_no_drive(be64_to_cpu(srp->cmd.lun)); + if (srp->cmd.cdb[0] == INQUIRY) { + vscsi_inquiry_no_target(s, req); + } else { + vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0x00); + vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0); + } return 1; + } + + req->sreq = scsi_req_new(sdev, req->qtag, lun, srp->cmd.cdb, req); + n = scsi_req_enqueue(req->sreq); + + trace_spapr_vscsi_queue_cmd(req->qtag, srp->cmd.cdb[0], + scsi_command_name(srp->cmd.cdb[0]), lun, n); + + if (n) { + /* Transfer direction must be set before preprocessing the + * descriptors + */ + req->writing = (n < 1); + + /* Preprocess RDMA descriptors */ + vscsi_preprocess_desc(req); + + /* Get transfer direction and initiate transfer */ + if (n > 0) { + req->data_len = n; + } else if (n < 0) { + req->data_len = -n; + } + scsi_req_continue(req->sreq); + } + /* Don't touch req here, it may have been recycled already */ + + return 0; +} + +static int vscsi_process_tsk_mgmt(VSCSIState *s, vscsi_req *req) +{ + union viosrp_iu *iu = req_iu(req); + vscsi_req *tmpreq; + int i, lun = 0, resp = SRP_TSK_MGMT_COMPLETE; + SCSIDevice *d; + uint64_t tag = iu->srp.rsp.tag; + uint8_t sol_not = iu->srp.cmd.sol_not; + + trace_spapr_vscsi_process_tsk_mgmt(iu->srp.tsk_mgmt.tsk_mgmt_func); + d = vscsi_device_find(&s->bus, + be64_to_cpu(req_iu(req)->srp.tsk_mgmt.lun), &lun); + if (!d) { + resp = SRP_TSK_MGMT_FIELDS_INVALID; + } else { + switch (iu->srp.tsk_mgmt.tsk_mgmt_func) { + case SRP_TSK_ABORT_TASK: + if (d->lun != lun) { + resp = SRP_TSK_MGMT_FIELDS_INVALID; + break; + } + + tmpreq = vscsi_find_req(s, req_iu(req)->srp.tsk_mgmt.task_tag); + if (tmpreq && tmpreq->sreq) { + assert(tmpreq->sreq->hba_private); + scsi_req_cancel(tmpreq->sreq); + } + break; + + case SRP_TSK_LUN_RESET: + if (d->lun != lun) { + resp = SRP_TSK_MGMT_FIELDS_INVALID; + break; + } + + qdev_reset_all(&d->qdev); + break; + + case SRP_TSK_ABORT_TASK_SET: + case SRP_TSK_CLEAR_TASK_SET: + if (d->lun != lun) { + resp = SRP_TSK_MGMT_FIELDS_INVALID; + break; + } + + for (i = 0; i < VSCSI_REQ_LIMIT; i++) { + tmpreq = &s->reqs[i]; + if (req_iu(tmpreq)->srp.cmd.lun + != req_iu(req)->srp.tsk_mgmt.lun) { + continue; + } + if (!tmpreq->active || !tmpreq->sreq) { + continue; + } + assert(tmpreq->sreq->hba_private); + scsi_req_cancel(tmpreq->sreq); + } + break; + + case SRP_TSK_CLEAR_ACA: + resp = SRP_TSK_MGMT_NOT_SUPPORTED; + break; + + default: + resp = SRP_TSK_MGMT_FIELDS_INVALID; + break; + } + } + + /* Compose the response here as */ + QEMU_BUILD_BUG_ON(SRP_MAX_IU_DATA_LEN < 4); + memset(iu, 0, sizeof(struct srp_rsp) + 4); + iu->srp.rsp.opcode = SRP_RSP; + iu->srp.rsp.req_lim_delta = cpu_to_be32(1); + iu->srp.rsp.tag = tag; + iu->srp.rsp.flags |= SRP_RSP_FLAG_RSPVALID; + iu->srp.rsp.resp_data_len = cpu_to_be32(4); + if (resp) { + iu->srp.rsp.sol_not = (sol_not & 0x04) >> 2; + } else { + iu->srp.rsp.sol_not = (sol_not & 0x02) >> 1; + } + + iu->srp.rsp.status = GOOD; + iu->srp.rsp.data[3] = resp; + + vscsi_send_iu(s, req, sizeof(iu->srp.rsp) + 4, VIOSRP_SRP_FORMAT); + + return 1; +} + +static int vscsi_handle_srp_req(VSCSIState *s, vscsi_req *req) +{ + union srp_iu *srp = &req_iu(req)->srp; + int done = 1; + uint8_t opcode = srp->rsp.opcode; + + switch (opcode) { + case SRP_LOGIN_REQ: + vscsi_process_login(s, req); + break; + case SRP_TSK_MGMT: + done = vscsi_process_tsk_mgmt(s, req); + break; + case SRP_CMD: + done = vscsi_queue_cmd(s, req); + break; + case SRP_LOGIN_RSP: + case SRP_I_LOGOUT: + case SRP_T_LOGOUT: + case SRP_RSP: + case SRP_CRED_REQ: + case SRP_CRED_RSP: + case SRP_AER_REQ: + case SRP_AER_RSP: + fprintf(stderr, "VSCSI: Unsupported opcode %02x\n", opcode); + break; + default: + fprintf(stderr, "VSCSI: Unknown type %02x\n", opcode); + } + + return done; +} + +static int vscsi_send_adapter_info(VSCSIState *s, vscsi_req *req) +{ + struct viosrp_adapter_info *sinfo; + struct mad_adapter_info_data info; + int rc; + + sinfo = &req_iu(req)->mad.adapter_info; + +#if 0 /* What for ? */ + rc = spapr_vio_dma_read(&s->vdev, be64_to_cpu(sinfo->buffer), + &info, be16_to_cpu(sinfo->common.length)); + if (rc) { + fprintf(stderr, "vscsi_send_adapter_info: DMA read failure !\n"); + } +#endif + memset(&info, 0, sizeof(info)); + strcpy(info.srp_version, SRP_VERSION); + memcpy(info.partition_name, "qemu", sizeof("qemu")); + info.partition_number = cpu_to_be32(0); + info.mad_version = cpu_to_be32(1); + info.os_type = cpu_to_be32(2); + info.port_max_txu[0] = cpu_to_be32(VSCSI_MAX_SECTORS << 9); + + rc = spapr_vio_dma_write(&s->vdev, be64_to_cpu(sinfo->buffer), + &info, be16_to_cpu(sinfo->common.length)); + if (rc) { + fprintf(stderr, "vscsi_send_adapter_info: DMA write failure !\n"); + } + + sinfo->common.status = rc ? cpu_to_be32(1) : 0; + + return vscsi_send_iu(s, req, sizeof(*sinfo), VIOSRP_MAD_FORMAT); +} + +static int vscsi_send_capabilities(VSCSIState *s, vscsi_req *req) +{ + struct viosrp_capabilities *vcap; + struct capabilities cap = { }; + uint16_t len, req_len; + uint64_t buffer; + int rc; + + vcap = &req_iu(req)->mad.capabilities; + req_len = len = be16_to_cpu(vcap->common.length); + buffer = be64_to_cpu(vcap->buffer); + if (len > sizeof(cap)) { + fprintf(stderr, "vscsi_send_capabilities: capabilities size mismatch !\n"); + + /* + * Just read and populate the structure that is known. + * Zero rest of the structure. + */ + len = sizeof(cap); + } + rc = spapr_vio_dma_read(&s->vdev, buffer, &cap, len); + if (rc) { + fprintf(stderr, "vscsi_send_capabilities: DMA read failure !\n"); + } + + /* + * Current implementation does not suppport any migration or + * reservation capabilities. Construct the response telling the + * guest not to use them. + */ + cap.flags = 0; + cap.migration.ecl = 0; + cap.reserve.type = 0; + cap.migration.common.server_support = 0; + cap.reserve.common.server_support = 0; + + rc = spapr_vio_dma_write(&s->vdev, buffer, &cap, len); + if (rc) { + fprintf(stderr, "vscsi_send_capabilities: DMA write failure !\n"); + } + if (req_len > len) { + /* + * Being paranoid and lets not worry about the error code + * here. Actual write of the cap is done above. + */ + spapr_vio_dma_set(&s->vdev, (buffer + len), 0, (req_len - len)); + } + vcap->common.status = rc ? cpu_to_be32(1) : 0; + return vscsi_send_iu(s, req, sizeof(*vcap), VIOSRP_MAD_FORMAT); +} + +static int vscsi_handle_mad_req(VSCSIState *s, vscsi_req *req) +{ + union mad_iu *mad = &req_iu(req)->mad; + bool request_handled = false; + uint64_t retlen = 0; + + switch (be32_to_cpu(mad->empty_iu.common.type)) { + case VIOSRP_EMPTY_IU_TYPE: + fprintf(stderr, "Unsupported EMPTY MAD IU\n"); + retlen = sizeof(mad->empty_iu); + break; + case VIOSRP_ERROR_LOG_TYPE: + fprintf(stderr, "Unsupported ERROR LOG MAD IU\n"); + retlen = sizeof(mad->error_log); + break; + case VIOSRP_ADAPTER_INFO_TYPE: + vscsi_send_adapter_info(s, req); + request_handled = true; + break; + case VIOSRP_HOST_CONFIG_TYPE: + retlen = sizeof(mad->host_config); + break; + case VIOSRP_CAPABILITIES_TYPE: + vscsi_send_capabilities(s, req); + request_handled = true; + break; + default: + fprintf(stderr, "VSCSI: Unknown MAD type %02x\n", + be32_to_cpu(mad->empty_iu.common.type)); + /* + * PAPR+ says that "The length field is set to the length + * of the data structure(s) used in the command". + * As we did not recognize the request type, put zero there. + */ + retlen = 0; + } + + if (!request_handled) { + mad->empty_iu.common.status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED); + vscsi_send_iu(s, req, retlen, VIOSRP_MAD_FORMAT); + } + + return 1; +} + +static void vscsi_got_payload(VSCSIState *s, vscsi_crq *crq) +{ + vscsi_req *req; + int done; + + req = vscsi_get_req(s); + if (req == NULL) { + fprintf(stderr, "VSCSI: Failed to get a request !\n"); + return; + } + + /* We only support a limited number of descriptors, we know + * the ibmvscsi driver uses up to 10 max, so it should fit + * in our 256 bytes IUs. If not we'll have to increase the size + * of the structure. + */ + if (crq->s.IU_length > SRP_MAX_IU_LEN) { + fprintf(stderr, "VSCSI: SRP IU too long (%d bytes) !\n", + crq->s.IU_length); + vscsi_put_req(req); + return; + } + + /* XXX Handle failure differently ? */ + if (spapr_vio_dma_read(&s->vdev, crq->s.IU_data_ptr, &req->viosrp_iu_buf, + crq->s.IU_length)) { + fprintf(stderr, "vscsi_got_payload: DMA read failure !\n"); + vscsi_put_req(req); + return; + } + memcpy(&req->crq, crq, sizeof(vscsi_crq)); + + if (crq->s.format == VIOSRP_MAD_FORMAT) { + done = vscsi_handle_mad_req(s, req); + } else { + done = vscsi_handle_srp_req(s, req); + } + + if (done) { + vscsi_put_req(req); + } +} + + +static int vscsi_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data) +{ + VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev); + vscsi_crq crq; + + memcpy(crq.raw, crq_data, 16); + crq.s.timeout = be16_to_cpu(crq.s.timeout); + crq.s.IU_length = be16_to_cpu(crq.s.IU_length); + crq.s.IU_data_ptr = be64_to_cpu(crq.s.IU_data_ptr); + + trace_spapr_vscsi_do_crq(crq.raw[0], crq.raw[1]); + + switch (crq.s.valid) { + case 0xc0: /* Init command/response */ + + /* Respond to initialization request */ + if (crq.s.format == 0x01) { + memset(crq.raw, 0, 16); + crq.s.valid = 0xc0; + crq.s.format = 0x02; + spapr_vio_send_crq(dev, crq.raw); + } + + /* Note that in hotplug cases, we might get a 0x02 + * as a result of us emitting the init request + */ + + break; + case 0xff: /* Link event */ + + /* Not handled for now */ + + break; + case 0x80: /* Payloads */ + switch (crq.s.format) { + case VIOSRP_SRP_FORMAT: /* AKA VSCSI request */ + case VIOSRP_MAD_FORMAT: /* AKA VSCSI response */ + vscsi_got_payload(s, &crq); + break; + case VIOSRP_OS400_FORMAT: + case VIOSRP_AIX_FORMAT: + case VIOSRP_LINUX_FORMAT: + case VIOSRP_INLINE_FORMAT: + fprintf(stderr, "vscsi_do_srq: Unsupported payload format %02x\n", + crq.s.format); + break; + default: + fprintf(stderr, "vscsi_do_srq: Unknown payload format %02x\n", + crq.s.format); + } + break; + default: + fprintf(stderr, "vscsi_do_crq: unknown CRQ %02x %02x ...\n", + crq.raw[0], crq.raw[1]); + }; + + return 0; +} + +static const struct SCSIBusInfo vscsi_scsi_info = { + .tcq = true, + .max_channel = 7, /* logical unit addressing format */ + .max_target = 63, + .max_lun = 31, + + .transfer_data = vscsi_transfer_data, + .complete = vscsi_command_complete, + .cancel = vscsi_request_cancelled, + .save_request = vscsi_save_request, + .load_request = vscsi_load_request, +}; + +static void spapr_vscsi_reset(SpaprVioDevice *dev) +{ + VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev); + int i; + + memset(s->reqs, 0, sizeof(s->reqs)); + for (i = 0; i < VSCSI_REQ_LIMIT; i++) { + s->reqs[i].qtag = i; + } +} + +static void spapr_vscsi_realize(SpaprVioDevice *dev, Error **errp) +{ + VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev); + + dev->crq.SendFunc = vscsi_do_crq; + + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &vscsi_scsi_info); + + /* ibmvscsi SCSI bus does not allow hotplug. */ + qbus_set_hotplug_handler(BUS(&s->bus), NULL); +} + +void spapr_vscsi_create(SpaprVioBus *bus) +{ + DeviceState *dev; + + dev = qdev_new("spapr-vscsi"); + + qdev_realize_and_unref(dev, &bus->bus, &error_fatal); + scsi_bus_legacy_handle_cmdline(&VIO_SPAPR_VSCSI_DEVICE(dev)->bus); +} + +static int spapr_vscsi_devnode(SpaprVioDevice *dev, void *fdt, int node_off) +{ + int ret; + + ret = fdt_setprop_cell(fdt, node_off, "#address-cells", 2); + if (ret < 0) { + return ret; + } + + ret = fdt_setprop_cell(fdt, node_off, "#size-cells", 0); + if (ret < 0) { + return ret; + } + + return 0; +} + +static Property spapr_vscsi_properties[] = { + DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_spapr_vscsi = { + .name = "spapr_vscsi", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_SPAPR_VIO(vdev, VSCSIState), + /* VSCSI state */ + /* ???? */ + + VMSTATE_END_OF_LIST() + }, +}; + +static void spapr_vscsi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); + + k->realize = spapr_vscsi_realize; + k->reset = spapr_vscsi_reset; + k->devnode = spapr_vscsi_devnode; + k->dt_name = "v-scsi"; + k->dt_type = "vscsi"; + k->dt_compatible = "IBM,v-scsi"; + k->signal_mask = 0x00000001; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + device_class_set_props(dc, spapr_vscsi_properties); + k->rtce_window_size = 0x10000000; + dc->vmsd = &vmstate_spapr_vscsi; +} + +static const TypeInfo spapr_vscsi_info = { + .name = TYPE_VIO_SPAPR_VSCSI_DEVICE, + .parent = TYPE_VIO_SPAPR_DEVICE, + .instance_size = sizeof(VSCSIState), + .class_init = spapr_vscsi_class_init, +}; + +static void spapr_vscsi_register_types(void) +{ + type_register_static(&spapr_vscsi_info); +} + +type_init(spapr_vscsi_register_types) diff --git a/hw/scsi/srp.h b/hw/scsi/srp.h new file mode 100644 index 000000000..d27f31d2d --- /dev/null +++ b/hw/scsi/srp.h @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef SCSI_SRP_H +#define SCSI_SRP_H + +/* + * Structures and constants for the SCSI RDMA Protocol (SRP) as + * defined by the INCITS T10 committee. This file was written using + * draft Revision 16a of the SRP standard. + */ + +enum { + + SRP_LOGIN_REQ = 0x00, + SRP_TSK_MGMT = 0x01, + SRP_CMD = 0x02, + SRP_I_LOGOUT = 0x03, + SRP_LOGIN_RSP = 0xc0, + SRP_RSP = 0xc1, + SRP_LOGIN_REJ = 0xc2, + SRP_T_LOGOUT = 0x80, + SRP_CRED_REQ = 0x81, + SRP_AER_REQ = 0x82, + SRP_CRED_RSP = 0x41, + SRP_AER_RSP = 0x42 +}; + +enum { + SRP_BUF_FORMAT_DIRECT = 1 << 1, + SRP_BUF_FORMAT_INDIRECT = 1 << 2 +}; + +enum { + SRP_NO_DATA_DESC = 0, + SRP_DATA_DESC_DIRECT = 1, + SRP_DATA_DESC_INDIRECT = 2 +}; + +enum { + SRP_TSK_ABORT_TASK = 0x01, + SRP_TSK_ABORT_TASK_SET = 0x02, + SRP_TSK_CLEAR_TASK_SET = 0x04, + SRP_TSK_LUN_RESET = 0x08, + SRP_TSK_CLEAR_ACA = 0x40 +}; + +enum srp_login_rej_reason { + SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL = 0x00010000, + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES = 0x00010001, + SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE = 0x00010002, + SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL = 0x00010003, + SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT = 0x00010004, + SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED = 0x00010005, + SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED = 0x00010006 +}; + +enum { + SRP_REV10_IB_IO_CLASS = 0xff00, + SRP_REV16A_IB_IO_CLASS = 0x0100 +}; + +enum { + SRP_TSK_MGMT_COMPLETE = 0x00, + SRP_TSK_MGMT_FIELDS_INVALID = 0x02, + SRP_TSK_MGMT_NOT_SUPPORTED = 0x04, + SRP_TSK_MGMT_FAILED = 0x05 +}; + +struct srp_direct_buf { + uint64_t va; + uint32_t key; + uint32_t len; +}; + +/* + * We need the packed attribute because the SRP spec puts the list of + * descriptors at an offset of 20, which is not aligned to the size of + * struct srp_direct_buf. The whole structure must be packed to avoid + * having the 20-byte structure padded to 24 bytes on 64-bit architectures. + */ +struct srp_indirect_buf { + struct srp_direct_buf table_desc; + uint32_t len; + struct srp_direct_buf desc_list[0]; +} QEMU_PACKED; + +enum { + SRP_MULTICHAN_SINGLE = 0, + SRP_MULTICHAN_MULTI = 1 +}; + +struct srp_login_req { + uint8_t opcode; + uint8_t reserved1[7]; + uint64_t tag; + uint32_t req_it_iu_len; + uint8_t reserved2[4]; + uint16_t req_buf_fmt; + uint8_t req_flags; + uint8_t reserved3[5]; + uint8_t initiator_port_id[16]; + uint8_t target_port_id[16]; +}; + +/* + * The SRP spec defines the size of the LOGIN_RSP structure to be 52 + * bytes, so it needs to be packed to avoid having it padded to 56 + * bytes on 64-bit architectures. + */ +struct srp_login_rsp { + uint8_t opcode; + uint8_t reserved1[3]; + uint32_t req_lim_delta; + uint64_t tag; + uint32_t max_it_iu_len; + uint32_t max_ti_iu_len; + uint16_t buf_fmt; + uint8_t rsp_flags; + uint8_t reserved2[25]; +} QEMU_PACKED; + +struct srp_login_rej { + uint8_t opcode; + uint8_t reserved1[3]; + uint32_t reason; + uint64_t tag; + uint8_t reserved2[8]; + uint16_t buf_fmt; + uint8_t reserved3[6]; +}; + +struct srp_i_logout { + uint8_t opcode; + uint8_t reserved[7]; + uint64_t tag; +}; + +struct srp_t_logout { + uint8_t opcode; + uint8_t sol_not; + uint8_t reserved[2]; + uint32_t reason; + uint64_t tag; +}; + +/* + * We need the packed attribute because the SRP spec only aligns the + * 8-byte LUN field to 4 bytes. + */ +struct srp_tsk_mgmt { + uint8_t opcode; + uint8_t sol_not; + uint8_t reserved1[6]; + uint64_t tag; + uint8_t reserved2[4]; + uint64_t lun; + uint8_t reserved3[2]; + uint8_t tsk_mgmt_func; + uint8_t reserved4; + uint64_t task_tag; + uint8_t reserved5[8]; +} QEMU_PACKED; + +/* + * We need the packed attribute because the SRP spec only aligns the + * 8-byte LUN field to 4 bytes. + */ +struct srp_cmd { + uint8_t opcode; + uint8_t sol_not; + uint8_t reserved1[3]; + uint8_t buf_fmt; + uint8_t data_out_desc_cnt; + uint8_t data_in_desc_cnt; + uint64_t tag; + uint8_t reserved2[4]; + uint64_t lun; + uint8_t reserved3; + uint8_t task_attr; + uint8_t reserved4; + uint8_t add_cdb_len; + uint8_t cdb[16]; + uint8_t add_data[0]; +} QEMU_PACKED; + +enum { + SRP_RSP_FLAG_RSPVALID = 1 << 0, + SRP_RSP_FLAG_SNSVALID = 1 << 1, + SRP_RSP_FLAG_DOOVER = 1 << 2, + SRP_RSP_FLAG_DOUNDER = 1 << 3, + SRP_RSP_FLAG_DIOVER = 1 << 4, + SRP_RSP_FLAG_DIUNDER = 1 << 5 +}; + +/* + * The SRP spec defines the size of the RSP structure to be 36 bytes, + * so it needs to be packed to avoid having it padded to 40 bytes on + * 64-bit architectures. + */ +struct srp_rsp { + uint8_t opcode; + uint8_t sol_not; + uint8_t reserved1[2]; + uint32_t req_lim_delta; + uint64_t tag; + uint8_t reserved2[2]; + uint8_t flags; + uint8_t status; + uint32_t data_out_res_cnt; + uint32_t data_in_res_cnt; + uint32_t sense_data_len; + uint32_t resp_data_len; + uint8_t data[0]; +} QEMU_PACKED; + +#endif /* SCSI_SRP_H */ diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events new file mode 100644 index 000000000..92d5b40f8 --- /dev/null +++ b/hw/scsi/trace-events @@ -0,0 +1,354 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# scsi-bus.c +scsi_req_alloc(int target, int lun, int tag) "target %d lun %d tag %d" +scsi_req_cancel(int target, int lun, int tag) "target %d lun %d tag %d" +scsi_req_data(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d" +scsi_req_data_canceled(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d" +scsi_req_dequeue(int target, int lun, int tag) "target %d lun %d tag %d" +scsi_req_continue(int target, int lun, int tag) "target %d lun %d tag %d" +scsi_req_continue_canceled(int target, int lun, int tag) "target %d lun %d tag %d" +scsi_req_parsed(int target, int lun, int tag, int cmd, int mode, int xfer) "target %d lun %d tag %d command %d dir %d length %d" +scsi_req_parsed_lba(int target, int lun, int tag, int cmd, uint64_t lba) "target %d lun %d tag %d command %d lba %"PRIu64 +scsi_req_parse_bad(int target, int lun, int tag, int cmd) "target %d lun %d tag %d command %d" +scsi_req_build_sense(int target, int lun, int tag, int key, int asc, int ascq) "target %d lun %d tag %d key 0x%02x asc 0x%02x ascq 0x%02x" +scsi_device_set_ua(int target, int lun, int key, int asc, int ascq) "target %d lun %d key 0x%02x asc 0x%02x ascq 0x%02x" +scsi_report_luns(int target, int lun, int tag) "target %d lun %d tag %d" +scsi_inquiry(int target, int lun, int tag, int cdb1, int cdb2) "target %d lun %d tag %d page 0x%02x/0x%02x" +scsi_test_unit_ready(int target, int lun, int tag) "target %d lun %d tag %d" +scsi_request_sense(int target, int lun, int tag) "target %d lun %d tag %d" + +# mptsas.c +mptsas_command_complete(void *dev, uint32_t ctx, uint32_t status, uint32_t resid) "dev %p context 0x%08x status 0x%x resid %d" +mptsas_diag_read(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%08x" +mptsas_diag_write(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%08x" +mptsas_irq_intx(void *dev, int level) "dev %p level %d" +mptsas_irq_msi(void *dev) "dev %p " +mptsas_mmio_read(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%x" +mptsas_mmio_unhandled_read(void *dev, uint32_t addr) "dev %p addr 0x%08x" +mptsas_mmio_unhandled_write(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%x" +mptsas_mmio_write(void *dev, uint32_t addr, uint32_t val) "dev %p addr 0x%08x value 0x%x" +mptsas_process_message(void *dev, int msg, uint32_t ctx) "dev %p cmd %d context 0x%08x" +mptsas_process_scsi_io_request(void *dev, int bus, int target, int lun, uint64_t len) "dev %p dev %d:%d:%d length %"PRIu64"" +mptsas_reset(void *dev) "dev %p " +mptsas_scsi_overflow(void *dev, uint32_t ctx, uint64_t req, uint64_t found) "dev %p context 0x%08x: %"PRIu64"/%"PRIu64"" +mptsas_sgl_overflow(void *dev, uint32_t ctx, uint64_t req, uint64_t found) "dev %p context 0x%08x: %"PRIu64"/%"PRIu64"" +mptsas_unhandled_cmd(void *dev, uint32_t ctx, uint8_t msg_cmd) "dev %p context 0x%08x: Unhandled cmd 0x%x" +mptsas_unhandled_doorbell_cmd(void *dev, int cmd) "dev %p value 0x%08x" + +# mptconfig.c +mptsas_config_sas_device(void *dev, int address, int port, int phy_handle, int dev_handle, int page) "dev %p address %d (port %d, handles: phy %d dev %d) page %d" +mptsas_config_sas_phy(void *dev, int address, int port, int phy_handle, int dev_handle, int page) "dev %p address %d (port %d, handles: phy %d dev %d) page %d" + +# megasas.c +megasas_init_firmware(uint64_t pa) "pa 0x%" PRIx64 " " +megasas_init_queue(uint64_t queue_pa, int queue_len, uint64_t head, uint64_t tail, uint32_t flags) "queue at 0x%" PRIx64 " len %d head 0x%" PRIx64 " tail 0x%" PRIx64 " flags 0x%x" +megasas_initq_map_failed(int frame) "scmd %d: failed to map queue" +megasas_initq_mapped(uint64_t pa) "queue already mapped at 0x%" PRIx64 +megasas_initq_mismatch(int queue_len, int fw_cmds) "queue size %d max fw cmds %d" +megasas_qf_mapped(unsigned int index) "skip mapped frame 0x%x" +megasas_qf_new(unsigned int index, uint64_t frame) "frame 0x%x addr 0x%" PRIx64 +megasas_qf_busy(unsigned long pa) "all frames busy for frame 0x%lx" +megasas_qf_enqueue(unsigned int index, unsigned int count, uint64_t context, unsigned int head, unsigned int tail, int busy) "frame 0x%x count %d context 0x%" PRIx64 " head 0x%x tail 0x%x busy %d" +megasas_qf_update(unsigned int head, unsigned int tail, unsigned int busy) "head 0x%x tail 0x%x busy %d" +megasas_qf_map_failed(int cmd, unsigned long frame) "scmd %d: frame %lu" +megasas_qf_complete_noirq(uint64_t context) "context 0x%" PRIx64 " " +megasas_qf_complete(uint64_t context, unsigned int head, unsigned int tail, int busy) "context 0x%" PRIx64 " head 0x%x tail 0x%x busy %d" +megasas_frame_busy(uint64_t addr) "frame 0x%" PRIx64 " busy" +megasas_unhandled_frame_cmd(int cmd, uint8_t frame_cmd) "scmd %d: MFI cmd 0x%x" +megasas_handle_scsi(const char *frame, int bus, int dev, int lun, void *sdev, unsigned long size) "%s dev %x/%x/%x sdev %p xfer %lu" +megasas_scsi_target_not_present(const char *frame, int bus, int dev, int lun) "%s dev %x/%x/%x" +megasas_scsi_invalid_cdb_len(const char *frame, int bus, int dev, int lun, int len) "%s dev %x/%x/%x invalid cdb len %d" +megasas_iov_read_overflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes" +megasas_iov_write_overflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes" +megasas_iov_read_underflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes" +megasas_iov_write_underflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes" +megasas_scsi_req_alloc_failed(const char *frame, int dev, int lun) "%s dev %x/%x" +megasas_scsi_read_start(int cmd, int len) "scmd %d: transfer %d bytes of data" +megasas_scsi_write_start(int cmd, int len) "scmd %d: transfer %d bytes of data" +megasas_scsi_nodata(int cmd) "scmd %d: no data to be transferred" +megasas_scsi_complete(int cmd, uint32_t status, int len, int xfer) "scmd %d: status 0x%x, len %u/%u" +megasas_command_complete(int cmd, uint32_t status, uint32_t resid) "scmd %d: status 0x%x, residual %d" +megasas_handle_io(int cmd, const char *frame, int dev, int lun, unsigned long lba, unsigned long count) "scmd %d: %s dev %x/%x lba 0x%lx count %lu" +megasas_io_target_not_present(int cmd, const char *frame, int dev, int lun) "scmd %d: %s dev 1/%x/%x LUN not present" +megasas_io_read_start(int cmd, unsigned long lba, unsigned long count, unsigned long len) "scmd %d: start LBA 0x%lx %lu blocks (%lu bytes)" +megasas_io_write_start(int cmd, unsigned long lba, unsigned long count, unsigned long len) "scmd %d: start LBA 0x%lx %lu blocks (%lu bytes)" +megasas_io_complete(int cmd, uint32_t len) "scmd %d: %d bytes" +megasas_iovec_sgl_overflow(int cmd, int index, int limit) "scmd %d: iovec count %d limit %d" +megasas_iovec_sgl_underflow(int cmd, int index) "scmd %d: iovec count %d" +megasas_iovec_sgl_invalid(int cmd, int index, uint64_t pa, uint32_t len) "scmd %d: element %d pa 0x%" PRIx64 " len %u" +megasas_iovec_overflow(int cmd, int len, int limit) "scmd %d: len %d limit %d" +megasas_iovec_underflow(int cmd, int len, int limit) "scmd %d: len %d limit %d" +megasas_handle_dcmd(int cmd, int opcode) "scmd %d: MFI DCMD opcode 0x%x" +megasas_finish_dcmd(int cmd, int size) "scmd %d: MFI DCMD wrote %d bytes" +megasas_dcmd_req_alloc_failed(int cmd, const char *desc) "scmd %d: %s" +megasas_dcmd_internal_submit(int cmd, const char *desc, int dev) "scmd %d: %s to dev %d" +megasas_dcmd_internal_finish(int cmd, int opcode, int lun) "scmd %d: cmd 0x%x lun %d" +megasas_dcmd_internal_invalid(int cmd, int opcode) "scmd %d: DCMD 0x%x" +megasas_dcmd_unhandled(int cmd, int opcode, int len) "scmd %d: opcode 0x%x, len %d" +megasas_dcmd_zero_sge(int cmd) "scmd %d: zero DCMD sge count" +megasas_dcmd_invalid_sge(int cmd, int count) "scmd %d: DCMD sge count %d" +megasas_dcmd_invalid_xfer_len(int cmd, unsigned long size, unsigned long max) "scmd %d: xfer len %ld, max %ld" +megasas_dcmd_enter(int cmd, const char *dcmd, int len) "scmd %d: DCMD %s len %d" +megasas_dcmd_dummy(int cmd, unsigned long size) "scmd %d: xfer len %ld" +megasas_dcmd_set_fw_time(int cmd, unsigned long time) "scmd %d: Set FW time 0x%lx" +megasas_dcmd_pd_get_list(int cmd, int num, int max, int offset) "scmd %d: DCMD PD get list: %d / %d PDs, size %d" +megasas_dcmd_ld_get_list(int cmd, int num, int max) "scmd %d: DCMD LD get list: found %d / %d LDs" +megasas_dcmd_ld_get_info(int cmd, int ld_id) "scmd %d: dev %d" +megasas_dcmd_ld_list_query(int cmd, int flags) "scmd %d: query flags 0x%x" +megasas_dcmd_pd_get_info(int cmd, int pd_id) "scmd %d: dev %d" +megasas_dcmd_pd_list_query(int cmd, int flags) "scmd %d: query flags 0x%x" +megasas_dcmd_reset_ld(int cmd, int target_id) "scmd %d: dev %d" +megasas_dcmd_unsupported(int cmd, unsigned long size) "scmd %d: set properties len %ld" +megasas_abort_frame(int cmd, int abort_cmd) "scmd %d: frame 0x%x" +megasas_abort_no_cmd(int cmd, uint64_t context) "scmd %d: no active command for frame context 0x%" PRIx64 +megasas_abort_invalid_context(int cmd, uint64_t context, int abort_cmd) "scmd %d: invalid frame context 0x%" PRIx64 " for abort frame 0x%x" +megasas_reset(int fw_state) "firmware state 0x%x" +megasas_init(int sges, int cmds, const char *mode) "Using %d sges, %d cmds, %s mode" +megasas_msix_raise(int vector) "vector %d" +megasas_msi_raise(int vector) "vector %d" +megasas_irq_lower(void) "INTx" +megasas_irq_raise(void) "INTx" +megasas_intr_enabled(void) "Interrupts enabled" +megasas_intr_disabled(void) "Interrupts disabled" +megasas_msix_enabled(int vector) "vector %d" +megasas_msi_enabled(int vector) "vector %d" +megasas_mmio_readl(const char *reg, uint32_t val) "reg %s: 0x%x" +megasas_mmio_invalid_readl(unsigned long addr) "addr 0x%lx" +megasas_mmio_writel(const char *reg, uint32_t val) "reg %s: 0x%x" +megasas_mmio_invalid_writel(uint32_t addr, uint32_t val) "addr 0x%x: 0x%x" + +# vmw_pvscsi.c +pvscsi_ring_init_data(uint32_t txr_len_log2, uint32_t rxr_len_log2) "TX/RX rings logarithms set to %d/%d" +pvscsi_ring_init_msg(uint32_t len_log2) "MSG ring logarithm set to %d" +pvscsi_ring_flush_cmp(uint64_t filled_cmp_ptr) "new production counter of completion ring is 0x%"PRIx64 +pvscsi_ring_flush_msg(uint64_t filled_cmp_ptr) "new production counter of message ring is 0x%"PRIx64 +pvscsi_update_irq_level(bool raise, uint64_t mask, uint64_t status) "interrupt level set to %d (MASK: 0x%"PRIx64", STATUS: 0x%"PRIx64")" +pvscsi_update_irq_msi(void) "sending MSI notification" +pvscsi_cmp_ring_put(unsigned long addr) "got completion descriptor 0x%lx" +pvscsi_msg_ring_put(unsigned long addr) "got message descriptor 0x%lx" +pvscsi_complete_request(uint64_t context, uint64_t len, uint8_t sense_key) "completion: ctx: 0x%"PRIx64", len: 0x%"PRIx64", sense key: %u" +pvscsi_get_sg_list(int nsg, size_t size) "get SG list: depth: %u, size: %zu" +pvscsi_get_next_sg_elem(uint32_t flags) "unknown flags in SG element (val: 0x%x)" +pvscsi_command_complete_not_found(uint32_t tag) "can't find request for tag 0x%x" +pvscsi_command_complete_data_run(void) "not all data required for command transferred" +pvscsi_command_complete_sense_len(int len) "sense information length is %d bytes" +pvscsi_convert_sglist(uint64_t context, unsigned long addr, uint32_t resid) "element: ctx: 0x%"PRIx64" addr: 0x%lx, len: %ul" +pvscsi_process_req_descr(uint8_t cmd, uint64_t ctx) "SCSI cmd 0x%x, ctx: 0x%"PRIx64 +pvscsi_process_req_descr_unknown_device(void) "command directed to unknown device rejected" +pvscsi_process_req_descr_invalid_dir(void) "command with invalid transfer direction rejected" +pvscsi_process_io(unsigned long addr) "got descriptor 0x%lx" +pvscsi_on_cmd_noimpl(const char* cmd) "unimplemented command %s ignored" +pvscsi_on_cmd_reset_dev(uint32_t tgt, int lun, void* dev) "PVSCSI_CMD_RESET_DEVICE[target %u lun %d (dev 0x%p)]" +pvscsi_on_cmd_arrived(const char* cmd) "command %s arrived" +pvscsi_on_cmd_abort(uint64_t ctx, uint32_t tgt) "command PVSCSI_CMD_ABORT_CMD for ctx 0x%"PRIx64", target %u" +pvscsi_on_cmd_unknown(uint64_t cmd_id) "unknown command 0x%"PRIx64 +pvscsi_on_cmd_unknown_data(uint32_t data) "data for unknown command 0x:0x%x" +pvscsi_io_write(const char* cmd, uint64_t val) "%s write: 0x%"PRIx64 +pvscsi_io_write_unknown(unsigned long addr, unsigned sz, uint64_t val) "unknown write address: 0x%lx size: %u bytes value: 0x%"PRIx64 +pvscsi_io_read(const char* cmd, uint64_t status) "%s read: 0x%"PRIx64 +pvscsi_io_read_unknown(unsigned long addr, unsigned sz) "unknown read address: 0x%lx size: %u bytes" +pvscsi_init_msi_fail(int res) "failed to initialize MSI, error %d" +pvscsi_state(const char* state) "starting %s ..." +pvscsi_tx_rings_ppn(const char* label, uint64_t ppn) "%s page: 0x%"PRIx64 +pvscsi_tx_rings_num_pages(const char* label, uint32_t num) "Number of %s pages: %u" + +# esp.c +esp_error_fifo_overrun(void) "FIFO overrun" +esp_error_unhandled_command(uint32_t val) "unhandled command (0x%2.2x)" +esp_error_invalid_write(uint32_t val, uint32_t addr) "invalid write of 0x%02x at [0x%x]" +esp_raise_irq(void) "Raise IRQ" +esp_lower_irq(void) "Lower IRQ" +esp_raise_drq(void) "Raise DREQ" +esp_lower_drq(void) "Lower DREQ" +esp_dma_enable(void) "Raise enable" +esp_dma_disable(void) "Lower enable" +esp_pdma_read(int size) "pDMA read %u bytes" +esp_pdma_write(int size) "pDMA write %u bytes" +esp_get_cmd(uint32_t dmalen, int target) "len %d target %d" +esp_do_command_phase(uint8_t busid) "busid 0x%x" +esp_do_identify(uint8_t byte) "0x%x" +esp_handle_satn_stop(uint32_t cmdlen) "cmdlen %d" +esp_write_response(uint32_t status) "Transfer status (status=%d)" +esp_do_dma(uint32_t cmdlen, uint32_t len) "command len %d + %d" +esp_command_complete(void) "SCSI Command complete" +esp_command_complete_deferred(void) "SCSI Command complete deferred" +esp_command_complete_unexpected(void) "SCSI command completed unexpectedly" +esp_command_complete_fail(void) "Command failed" +esp_transfer_data(uint32_t dma_left, int32_t ti_size) "transfer %d/%d" +esp_handle_ti(uint32_t minlen) "Transfer Information len %d" +esp_handle_ti_cmd(uint32_t cmdlen) "command len %d" +esp_mem_readb(uint32_t saddr, uint8_t reg) "reg[%d]: 0x%2.2x" +esp_mem_writeb(uint32_t saddr, uint8_t reg, uint32_t val) "reg[%d]: 0x%2.2x -> 0x%2.2x" +esp_mem_writeb_cmd_nop(uint32_t val) "NOP (0x%2.2x)" +esp_mem_writeb_cmd_flush(uint32_t val) "Flush FIFO (0x%2.2x)" +esp_mem_writeb_cmd_reset(uint32_t val) "Chip reset (0x%2.2x)" +esp_mem_writeb_cmd_bus_reset(uint32_t val) "Bus reset (0x%2.2x)" +esp_mem_writeb_cmd_iccs(uint32_t val) "Initiator Command Complete Sequence (0x%2.2x)" +esp_mem_writeb_cmd_msgacc(uint32_t val) "Message Accepted (0x%2.2x)" +esp_mem_writeb_cmd_pad(uint32_t val) "Transfer padding (0x%2.2x)" +esp_mem_writeb_cmd_satn(uint32_t val) "Set ATN (0x%2.2x)" +esp_mem_writeb_cmd_rstatn(uint32_t val) "Reset ATN (0x%2.2x)" +esp_mem_writeb_cmd_sel(uint32_t val) "Select without ATN (0x%2.2x)" +esp_mem_writeb_cmd_selatn(uint32_t val) "Select with ATN (0x%2.2x)" +esp_mem_writeb_cmd_selatns(uint32_t val) "Select with ATN & stop (0x%2.2x)" +esp_mem_writeb_cmd_ensel(uint32_t val) "Enable selection (0x%2.2x)" +esp_mem_writeb_cmd_dissel(uint32_t val) "Disable selection (0x%2.2x)" +esp_mem_writeb_cmd_ti(uint32_t val) "Transfer Information (0x%2.2x)" + +# esp-pci.c +esp_pci_error_invalid_dma_direction(void) "invalid DMA transfer direction" +esp_pci_error_invalid_read(uint32_t reg) "read access outside bounds (reg 0x%x)" +esp_pci_error_invalid_write(uint32_t reg) "write access outside bounds (reg 0x%x)" +esp_pci_error_invalid_write_dma(uint32_t val, uint32_t addr) "invalid write of 0x%02x at [0x%x]" +esp_pci_dma_read(uint32_t saddr, uint32_t reg) "reg[%d]: 0x%8.8x" +esp_pci_dma_write(uint32_t saddr, uint32_t reg, uint32_t val) "reg[%d]: 0x%8.8x -> 0x%8.8x" +esp_pci_dma_idle(uint32_t val) "IDLE (0x%.8x)" +esp_pci_dma_blast(uint32_t val) "BLAST (0x%.8x)" +esp_pci_dma_abort(uint32_t val) "ABORT (0x%.8x)" +esp_pci_dma_start(uint32_t val) "START (0x%.8x)" +esp_pci_sbac_read(uint32_t reg) "sbac: 0x%8.8x" +esp_pci_sbac_write(uint32_t reg, uint32_t val) "sbac: 0x%8.8x -> 0x%8.8x" + +# spapr_vscsi.c +spapr_vscsi_send_rsp(uint8_t status, int32_t res_in, int32_t res_out) "status: 0x%x, res_in: %"PRId32", res_out: %"PRId32 +spapr_vscsi_fetch_desc_no_data(void) "no data descriptor" +spapr_vscsi_fetch_desc_direct(void) "direct segment" +spapr_vscsi_fetch_desc_indirect(uint32_t qtag, unsigned desc, unsigned local_desc) "indirect segment local tag=0x%"PRIx32" desc#%u/%u" +spapr_vscsi_fetch_desc_out_of_range(unsigned desc, unsigned desc_offset) "#%u is ouf of range (%u bytes)" +spapr_vscsi_fetch_desc_dma_read_error(int rc) "spapr_vio_dma_read -> %d reading ext_desc" +spapr_vscsi_fetch_desc_indirect_seg_ext(uint32_t qtag, unsigned n, unsigned desc, uint64_t va, uint32_t len) "indirect segment ext. tag=0x%"PRIx32" desc#%u/%u { va=0x%"PRIx64" len=0x%"PRIx32" }" +spapr_vscsi_fetch_desc_out_of_desc(void) "Out of descriptors !" +spapr_vscsi_fetch_desc_out_of_desc_boundary(unsigned offset, unsigned desc, uint32_t len) " offset=0x%x is out of a descriptor #%u boundary=0x%"PRIx32 +spapr_vscsi_fetch_desc_done(unsigned desc_num, unsigned desc_offset, uint64_t va, uint32_t len) " cur=%u offs=0x%x ret { va=0x%"PRIx64" len=0x%"PRIx32" }" +spapr_vscsi_srp_indirect_data(uint32_t len) "indirect segment 0x%"PRIx32" bytes" +spapr_vscsi_srp_indirect_data_rw(int writing, int rc) "spapr_vio_dma_r/w(%d) -> %d" +spapr_vscsi_srp_indirect_data_buf(unsigned a, unsigned b, unsigned c, unsigned d) " data: %02x %02x %02x %02x..." +spapr_vscsi_srp_transfer_data(uint32_t len) "no data desc transfer, skipping 0x%"PRIx32" bytes" +spapr_vscsi_transfer_data(uint32_t tag, uint32_t len, void *req) "SCSI xfer complete tag=0x%"PRIx32" len=0x%"PRIx32", req=%p" +spapr_vscsi_command_complete(uint32_t tag, uint32_t status, void *req) "SCSI cmd complete, tag=0x%"PRIx32" status=0x%"PRIx32", req=%p" +spapr_vscsi_command_complete_sense_data1(uint32_t len, unsigned s0, unsigned s1, unsigned s2, unsigned s3, unsigned s4, unsigned s5, unsigned s6, unsigned s7) "Sense data, %d bytes: %02x %02x %02x %02x %02x %02x %02x %02x" +spapr_vscsi_command_complete_sense_data2(unsigned s8, unsigned s9, unsigned s10, unsigned s11, unsigned s12, unsigned s13, unsigned s14, unsigned s15) " %02x %02x %02x %02x %02x %02x %02x %02x" +spapr_vscsi_command_complete_status(uint32_t status) "Command complete err=%"PRIu32 +spapr_vscsi_save_request(uint32_t qtag, unsigned desc, unsigned offset) "saving tag=%"PRIu32", current desc#%u, offset=0x%x" +spapr_vscsi_load_request(uint32_t qtag, unsigned desc, unsigned offset) "restoring tag=%"PRIu32", current desc#%u, offset=0x%x" +spapr_vscsi_process_login(void) "Got login, sending response !" +spapr_vscsi_process_tsk_mgmt(uint8_t func) "tsk_mgmt_func 0x%02x" +spapr_vscsi_queue_cmd_no_drive(uint64_t lun) "Command for lun 0x%08" PRIx64 " with no drive" +spapr_vscsi_queue_cmd(uint32_t qtag, unsigned cdb, const char *cmd, int lun, int ret) "Queued command tag 0x%"PRIx32" CMD 0x%x=%s LUN %d ret: %d" +spapr_vscsi_do_crq(unsigned c0, unsigned c1) "crq: %02x %02x ..." + +# lsi53c895a.c +lsi_reset(void) "Reset" +lsi_update_irq(int level, uint8_t dstat, uint8_t sist1, uint8_t sist0) "Update IRQ level %d dstat 0x%02x sist 0x%02x0x%02x" +lsi_update_irq_disconnected(void) "Handled IRQs & disconnected, looking for pending processes" +lsi_script_scsi_interrupt(uint8_t stat1, uint8_t stat0, uint8_t sist1, uint8_t sist0) "SCSI Interrupt 0x%02x0x%02x prev 0x%02x0x%02x" +lsi_script_dma_interrupt(uint8_t stat, uint8_t dstat) "DMA Interrupt 0x%x prev 0x%x" +lsi_bad_phase_jump(uint32_t dsp) "Data phase mismatch jump to 0x%"PRIX32 +lsi_bad_phase_interrupt(void) "Phase mismatch interrupt" +lsi_bad_selection(uint32_t id) "Selected absent target %"PRIu32 +lsi_do_dma_unavailable(void) "DMA no data available" +lsi_do_dma(uint64_t addr, int len) "DMA addr=0x%"PRIx64" len=%d" +lsi_queue_command(uint32_t tag) "Queueing tag=0x%"PRId32 +lsi_add_msg_byte_error(void) "MSG IN data too long" +lsi_add_msg_byte(uint8_t data) "MSG IN 0x%02x" +lsi_reselect(int id) "Reselected target %d" +lsi_queue_req_error(void *p) "Multiple IO pending for request %p" +lsi_queue_req(uint32_t tag) "Queueing IO tag=0x%"PRIx32 +lsi_command_complete(uint32_t status) "Command complete status=%"PRId32 +lsi_transfer_data(uint32_t tag, uint32_t len) "Data ready tag=0x%"PRIx32" len=%"PRId32 +lsi_do_command(uint32_t dbc) "Send command len=%"PRId32 +lsi_do_status(uint32_t dbc, uint8_t status) "Get status len=%"PRId32" status=%d" +lsi_do_status_error(void) "Bad Status move" +lsi_do_msgin(uint32_t dbc, int len) "Message in len=%"PRId32" %d" +lsi_do_msgout(uint32_t dbc) "MSG out len=%"PRId32 +lsi_do_msgout_disconnect(void) "MSG: Disconnect" +lsi_do_msgout_noop(void) "MSG: No Operation" +lsi_do_msgout_extended(uint8_t msg, uint8_t len) "Extended message 0x%x (len %d)" +lsi_do_msgout_ignored(const char *msg) "%s (ignored)" +lsi_do_msgout_simplequeue(uint8_t select_tag) "SIMPLE queue tag=0x%x" +lsi_do_msgout_abort(uint32_t tag) "MSG: ABORT TAG tag=0x%"PRId32 +lsi_do_msgout_clearqueue(uint32_t tag) "MSG: CLEAR QUEUE tag=0x%"PRIx32 +lsi_do_msgout_busdevicereset(uint32_t tag) "MSG: BUS DEVICE RESET tag=0x%"PRIx32 +lsi_do_msgout_select(int id) "Select LUN %d" +lsi_memcpy(uint32_t dest, uint32_t src, int count) "memcpy dest 0x%"PRIx32" src 0x%"PRIx32" count %d" +lsi_wait_reselect(void) "Wait Reselect" +lsi_execute_script(uint32_t dsp, uint32_t insn, uint32_t addr) "SCRIPTS dsp=0x%"PRIx32" opcode 0x%"PRIx32" arg 0x%"PRIx32 +lsi_execute_script_blockmove_delayed(void) "Delayed select timeout" +lsi_execute_script_blockmove_badphase(const char *phase, const char *expected) "Wrong phase got %s expected %s" +lsi_execute_script_io_alreadyreselected(void) "Already reselected, jumping to alternative address" +lsi_execute_script_io_selected(uint8_t id, const char *atn) "Selected target %d%s" +lsi_execute_script_io_disconnect(void) "Wait Disconnect" +lsi_execute_script_io_set(const char *atn, const char *ack, const char *tm, const char *cc) "Set%s%s%s%s" +lsi_execute_script_io_clear(const char *atn, const char *ack, const char *tm, const char *cc) "Clear%s%s%s%s" +lsi_execute_script_io_opcode(const char *opcode, int reg, const char *opname, uint8_t data8, uint32_t sfbr, const char *ssfbr) "%s reg 0x%x %s data8=0x%02x sfbr=0x%02x%s" +lsi_execute_script_tc_nop(void) "NOP" +lsi_execute_script_tc_delayedselect_timeout(void) "Delayed select timeout" +lsi_execute_script_tc_compc(int result) "Compare carry %d" +lsi_execute_script_tc_compp(const char *phase, char op, const char *insn_phase) "Compare phase %s %c= %s" +lsi_execute_script_tc_compd(uint32_t sfbr, uint8_t mask, char op, int result) "Compare data 0x%"PRIx32" & 0x%x %c= 0x%x" +lsi_execute_script_tc_jump(uint32_t addr) "Jump to 0x%"PRIx32 +lsi_execute_script_tc_call(uint32_t addr) "Call 0x%"PRIx32 +lsi_execute_script_tc_return(uint32_t addr) "Return to 0x%"PRIx32 +lsi_execute_script_tc_interrupt(uint32_t addr) "Interrupt 0x%"PRIx32 +lsi_execute_script_tc_illegal(void) "Illegal transfer control" +lsi_execute_script_tc_cc_failed(void) "Control condition failed" +lsi_execute_script_mm_load(int reg, int n, uint32_t addr, int data) "Load reg 0x%x size %d addr 0x%"PRIx32" = 0x%08x" +lsi_execute_script_mm_store(int reg, int n, uint32_t addr) "Store reg 0x%x size %d addr 0x%"PRIx32 +lsi_execute_script_stop(void) "SCRIPTS execution stopped" +lsi_awoken(void) "Woken by SIGP" +lsi_reg_read(const char *name, int offset, uint8_t ret) "Read reg %s 0x%x = 0x%02x" +lsi_reg_write(const char *name, int offset, uint8_t val) "Write reg %s 0x%x = 0x%02x" + +# virtio-scsi.c +virtio_scsi_cmd_req(int lun, uint32_t tag, uint8_t cmd) "virtio_scsi_cmd_req lun=%u tag=0x%x cmd=0x%x" +virtio_scsi_cmd_resp(int lun, uint32_t tag, int response, uint8_t status) "virtio_scsi_cmd_resp lun=%u tag=0x%x response=%d status=0x%x" +virtio_scsi_tmf_req(int lun, uint32_t tag, int subtype) "virtio_scsi_tmf_req lun=%u tag=0x%x subtype=%d" +virtio_scsi_tmf_resp(int lun, uint32_t tag, int response) "virtio_scsi_tmf_resp lun=%u tag=0x%x response=%d" +virtio_scsi_an_req(int lun, uint32_t event_requested) "virtio_scsi_an_req lun=%u event_requested=0x%x" +virtio_scsi_an_resp(int lun, int response) "virtio_scsi_an_resp lun=%u response=%d" +virtio_scsi_event(int lun, int event, int reason) "virtio_scsi_event lun=%u event=%d reason=%d" + +# scsi-disk.c +scsi_disk_check_condition(uint32_t tag, uint8_t key, uint8_t asc, uint8_t ascq) "Command complete tag=0x%x sense=%d/%d/%d" +scsi_disk_read_complete(uint32_t tag, size_t size) "Data ready tag=0x%x len=%zd" +scsi_disk_read_data_count(uint32_t sector_count) "Read sector_count=%d" +scsi_disk_read_data_invalid(void) "Data transfer direction invalid" +scsi_disk_write_complete_noio(uint32_t tag, size_t size) "Write complete tag=0x%x more=%zd" +scsi_disk_write_data_invalid(void) "Data transfer direction invalid" +scsi_disk_emulate_vpd_page_00(size_t xfer) "Inquiry EVPD[Supported pages] buffer size %zd" +scsi_disk_emulate_vpd_page_80_not_supported(void) "Inquiry (EVPD[Serial number] not supported" +scsi_disk_emulate_vpd_page_80(size_t xfer) "Inquiry EVPD[Serial number] buffer size %zd" +scsi_disk_emulate_vpd_page_83(size_t xfer) "Inquiry EVPD[Device identification] buffer size %zd" +scsi_disk_emulate_vpd_page_b0_not_supported(void) "Inquiry (EVPD[Block limits] not supported for CDROM" +scsi_disk_emulate_mode_sense(int cmd, int page, size_t xfer, int control) "Mode Sense(%d) (page %d, xfer %zd, page_control %d)" +scsi_disk_emulate_read_toc(int start_track, int format, int msf) "Read TOC (track %d format %d msf %d)" +scsi_disk_emulate_read_data(int buflen) "Read buf_len=%d" +scsi_disk_emulate_write_data(int buflen) "Write buf_len=%d" +scsi_disk_emulate_command_SAI_16(void) "SAI READ CAPACITY(16)" +scsi_disk_emulate_command_SAI_unsupported(void) "Unsupported Service Action In" +scsi_disk_emulate_command_SEEK_10(uint64_t lba) "Seek(10) (sector %" PRId64 ")" +scsi_disk_emulate_command_MODE_SELECT(size_t xfer) "Mode Select(6) (len %zd)" +scsi_disk_emulate_command_MODE_SELECT_10(size_t xfer) "Mode Select(10) (len %zd)" +scsi_disk_emulate_command_UNMAP(size_t xfer) "Unmap (len %zd)" +scsi_disk_emulate_command_VERIFY(int bytchk) "Verify (bytchk %d)" +scsi_disk_emulate_command_WRITE_SAME(int cmd, size_t xfer) "WRITE SAME %d (len %zd)" +scsi_disk_emulate_command_UNKNOWN(int cmd, const char *name) "Unknown SCSI command (0x%2.2x=%s)" +scsi_disk_dma_command_READ(uint64_t lba, uint32_t len) "Read (sector %" PRId64 ", count %u)" +scsi_disk_dma_command_WRITE(const char *cmd, uint64_t lba, int len) "Write %s(sector %" PRId64 ", count %u)" +scsi_disk_new_request(uint32_t lun, uint32_t tag, const char *line) "Command: lun=%d tag=0x%x data=%s" +scsi_disk_aio_sgio_command(uint32_t tag, uint8_t cmd, uint64_t lba, int len, uint32_t timeout) "disk aio sgio: tag=0x%x cmd=0x%x (sector %" PRId64 ", count %d) timeout=%u" + +# scsi-generic.c +scsi_generic_command_complete_noio(void *req, uint32_t tag, int statuc) "Command complete %p tag=0x%x status=%d" +scsi_generic_read_complete(uint32_t tag, int len) "Data ready tag=0x%x len=%d" +scsi_generic_read_data(uint32_t tag) "scsi_read_data tag=0x%x" +scsi_generic_write_complete(int ret) "scsi_write_complete() ret = %d" +scsi_generic_write_complete_blocksize(int blocksize) "block size %d" +scsi_generic_write_data(uint32_t tag) "scsi_write_data tag=0x%x" +scsi_generic_send_command(const char *line) "Command: data=%s" +scsi_generic_realize_type(int type) "device type %d" +scsi_generic_realize_blocksize(int blocksize) "block size %d" +scsi_generic_aio_sgio_command(uint32_t tag, uint8_t cmd, uint32_t timeout) "generic aio sgio: tag=0x%x cmd=0x%x timeout=%u" +scsi_generic_ioctl_sgio_command(uint8_t cmd, uint32_t timeout) "generic ioctl sgio: cmd=0x%x timeout=%u" +scsi_generic_ioctl_sgio_done(uint8_t cmd, int ret, uint8_t status, uint8_t host_status) "generic ioctl sgio: cmd=0x%x ret=%d status=0x%x host_status=0x%x" diff --git a/hw/scsi/trace.h b/hw/scsi/trace.h new file mode 100644 index 000000000..4ce267358 --- /dev/null +++ b/hw/scsi/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_scsi.h" diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c new file mode 100644 index 000000000..767f827e5 --- /dev/null +++ b/hw/scsi/vhost-scsi-common.c @@ -0,0 +1,171 @@ +/* + * vhost-scsi-common + * + * Copyright (c) 2016 Nutanix Inc. All rights reserved. + * + * Author: + * Felipe Franciosi + * + * This work is largely based on the "vhost-scsi" implementation by: + * Stefan Hajnoczi + * Nicholas Bellinger + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-scsi-common.h" +#include "hw/virtio/virtio-scsi.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" +#include "hw/fw-path-provider.h" + +int vhost_scsi_common_start(VHostSCSICommon *vsc) +{ + int ret, i; + VirtIODevice *vdev = VIRTIO_DEVICE(vsc); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + VirtIOSCSICommon *vs = (VirtIOSCSICommon *)vsc; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return -ENOSYS; + } + + ret = vhost_dev_enable_notifiers(&vsc->dev, vdev); + if (ret < 0) { + return ret; + } + + ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier"); + goto err_host_notifiers; + } + + vsc->dev.acked_features = vdev->guest_features; + + assert(vsc->inflight == NULL); + vsc->inflight = g_new0(struct vhost_inflight, 1); + ret = vhost_dev_get_inflight(&vsc->dev, + vs->conf.virtqueue_size, + vsc->inflight); + if (ret < 0) { + error_report("Error get inflight: %d", -ret); + goto err_guest_notifiers; + } + + ret = vhost_dev_set_inflight(&vsc->dev, vsc->inflight); + if (ret < 0) { + error_report("Error set inflight: %d", -ret); + goto err_guest_notifiers; + } + + ret = vhost_dev_start(&vsc->dev, vdev); + if (ret < 0) { + error_report("Error start vhost dev"); + goto err_guest_notifiers; + } + + /* guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < vsc->dev.nvqs; i++) { + vhost_virtqueue_mask(&vsc->dev, vdev, vsc->dev.vq_index + i, false); + } + + return ret; + +err_guest_notifiers: + g_free(vsc->inflight); + vsc->inflight = NULL; + + k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&vsc->dev, vdev); + return ret; +} + +void vhost_scsi_common_stop(VHostSCSICommon *vsc) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vsc); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret = 0; + + vhost_dev_stop(&vsc->dev, vdev); + + if (k->set_guest_notifiers) { + ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + } + } + assert(ret >= 0); + + if (vsc->inflight) { + vhost_dev_free_inflight(vsc->inflight); + vsc->inflight = NULL; + } + + vhost_dev_disable_notifiers(&vsc->dev, vdev); +} + +uint64_t vhost_scsi_common_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(vdev); + + /* Turn on predefined features supported by this device */ + features |= vsc->host_features; + + return vhost_get_features(&vsc->dev, vsc->feature_bits, features); +} + +void vhost_scsi_common_set_config(VirtIODevice *vdev, const uint8_t *config) +{ + VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config; + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); + + if ((uint32_t)virtio_ldl_p(vdev, &scsiconf->sense_size) != vs->sense_size || + (uint32_t)virtio_ldl_p(vdev, &scsiconf->cdb_size) != vs->cdb_size) { + error_report("vhost-scsi does not support changing the sense data and " + "CDB sizes"); + exit(1); + } +} + +/* + * Implementation of an interface to adjust firmware path + * for the bootindex property handling. + */ +char *vhost_scsi_common_get_fw_dev_path(FWPathProvider *p, BusState *bus, + DeviceState *dev) +{ + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(dev); + /* format: /channel@channel/vhost-scsi@target,lun */ + return g_strdup_printf("/channel@%x/%s@%x,%x", vsc->channel, + qdev_fw_name(dev), vsc->target, vsc->lun); +} + +static const TypeInfo vhost_scsi_common_info = { + .name = TYPE_VHOST_SCSI_COMMON, + .parent = TYPE_VIRTIO_SCSI_COMMON, + .instance_size = sizeof(VHostSCSICommon), + .abstract = true, +}; + +static void virtio_register_types(void) +{ + type_register_static(&vhost_scsi_common_info); +} + +type_init(virtio_register_types) diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c new file mode 100644 index 000000000..039caf261 --- /dev/null +++ b/hw/scsi/vhost-scsi.c @@ -0,0 +1,331 @@ +/* + * vhost_scsi host device + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Stefan Hajnoczi + * + * Changes for QEMU mainline + tcm_vhost kernel upstream: + * Nicholas Bellinger + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include +#include +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "monitor/monitor.h" +#include "migration/blocker.h" +#include "hw/virtio/vhost-scsi.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/virtio-scsi.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" +#include "hw/fw-path-provider.h" +#include "hw/qdev-properties.h" +#include "qemu/cutils.h" +#include "sysemu/sysemu.h" + +/* Features supported by host kernel. */ +static const int kernel_feature_bits[] = { + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + VIRTIO_SCSI_F_HOTPLUG, + VHOST_INVALID_FEATURE_BIT +}; + +static int vhost_scsi_set_endpoint(VHostSCSI *s) +{ + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + const VhostOps *vhost_ops = vsc->dev.vhost_ops; + struct vhost_scsi_target backend; + int ret; + + memset(&backend, 0, sizeof(backend)); + pstrcpy(backend.vhost_wwpn, sizeof(backend.vhost_wwpn), vs->conf.wwpn); + ret = vhost_ops->vhost_scsi_set_endpoint(&vsc->dev, &backend); + if (ret < 0) { + return -errno; + } + return 0; +} + +static void vhost_scsi_clear_endpoint(VHostSCSI *s) +{ + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + struct vhost_scsi_target backend; + const VhostOps *vhost_ops = vsc->dev.vhost_ops; + + memset(&backend, 0, sizeof(backend)); + pstrcpy(backend.vhost_wwpn, sizeof(backend.vhost_wwpn), vs->conf.wwpn); + vhost_ops->vhost_scsi_clear_endpoint(&vsc->dev, &backend); +} + +static int vhost_scsi_start(VHostSCSI *s) +{ + int ret, abi_version; + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + const VhostOps *vhost_ops = vsc->dev.vhost_ops; + + ret = vhost_ops->vhost_scsi_get_abi_version(&vsc->dev, &abi_version); + if (ret < 0) { + return -errno; + } + if (abi_version > VHOST_SCSI_ABI_VERSION) { + error_report("vhost-scsi: The running tcm_vhost kernel abi_version:" + " %d is greater than vhost_scsi userspace supports: %d," + " please upgrade your version of QEMU", abi_version, + VHOST_SCSI_ABI_VERSION); + return -ENOSYS; + } + + ret = vhost_scsi_common_start(vsc); + if (ret < 0) { + return ret; + } + + ret = vhost_scsi_set_endpoint(s); + if (ret < 0) { + error_report("Error setting vhost-scsi endpoint"); + vhost_scsi_common_stop(vsc); + } + + return ret; +} + +static void vhost_scsi_stop(VHostSCSI *s) +{ + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + + vhost_scsi_clear_endpoint(s); + vhost_scsi_common_stop(vsc); +} + +static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val) +{ + VHostSCSI *s = VHOST_SCSI(vdev); + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + bool start = (val & VIRTIO_CONFIG_S_DRIVER_OK); + + if (!vdev->vm_running) { + start = false; + } + + if (vsc->dev.started == start) { + return; + } + + if (start) { + int ret; + + ret = vhost_scsi_start(s); + if (ret < 0) { + error_report("unable to start vhost-scsi: %s", strerror(-ret)); + exit(1); + } + } else { + vhost_scsi_stop(s); + } +} + +static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ +} + +static int vhost_scsi_pre_save(void *opaque) +{ + VHostSCSICommon *vsc = opaque; + + /* At this point, backend must be stopped, otherwise + * it might keep writing to memory. */ + assert(!vsc->dev.started); + + return 0; +} + +static const VMStateDescription vmstate_virtio_vhost_scsi = { + .name = "virtio-vhost_scsi", + .minimum_version_id = 1, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, + .pre_save = vhost_scsi_pre_save, +}; + +static void vhost_scsi_realize(DeviceState *dev, Error **errp) +{ + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(dev); + Error *err = NULL; + int vhostfd = -1; + int ret; + + if (!vs->conf.wwpn) { + error_setg(errp, "vhost-scsi: missing wwpn"); + return; + } + + if (vs->conf.vhostfd) { + vhostfd = monitor_fd_param(monitor_cur(), vs->conf.vhostfd, errp); + if (vhostfd == -1) { + error_prepend(errp, "vhost-scsi: unable to parse vhostfd: "); + return; + } + } else { + vhostfd = open("/dev/vhost-scsi", O_RDWR); + if (vhostfd < 0) { + error_setg(errp, "vhost-scsi: open vhost char device failed: %s", + strerror(errno)); + return; + } + } + + virtio_scsi_common_realize(dev, + vhost_dummy_handle_output, + vhost_dummy_handle_output, + vhost_dummy_handle_output, + &err); + if (err != NULL) { + error_propagate(errp, err); + goto close_fd; + } + + if (!vsc->migratable) { + error_setg(&vsc->migration_blocker, + "vhost-scsi does not support migration in all cases. " + "When external environment supports it (Orchestrator migrates " + "target SCSI device state or use shared storage over network), " + "set 'migratable' property to true to enable migration."); + if (migrate_add_blocker(vsc->migration_blocker, errp) < 0) { + goto free_virtio; + } + } + + vsc->dev.nvqs = VHOST_SCSI_VQ_NUM_FIXED + vs->conf.num_queues; + vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs); + vsc->dev.vq_index = 0; + vsc->dev.backend_features = 0; + + ret = vhost_dev_init(&vsc->dev, (void *)(uintptr_t)vhostfd, + VHOST_BACKEND_TYPE_KERNEL, 0, errp); + if (ret < 0) { + goto free_vqs; + } + + /* At present, channel and lun both are 0 for bootable vhost-scsi disk */ + vsc->channel = 0; + vsc->lun = 0; + /* Note: we can also get the minimum tpgt from kernel */ + vsc->target = vs->conf.boot_tpgt; + + return; + + free_vqs: + g_free(vsc->dev.vqs); + if (!vsc->migratable) { + migrate_del_blocker(vsc->migration_blocker); + } + free_virtio: + error_free(vsc->migration_blocker); + virtio_scsi_common_unrealize(dev); + close_fd: + close(vhostfd); + return; +} + +static void vhost_scsi_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(dev); + struct vhost_virtqueue *vqs = vsc->dev.vqs; + + if (!vsc->migratable) { + migrate_del_blocker(vsc->migration_blocker); + error_free(vsc->migration_blocker); + } + + /* This will stop vhost backend. */ + vhost_scsi_set_status(vdev, 0); + + vhost_dev_cleanup(&vsc->dev); + g_free(vqs); + + virtio_scsi_common_unrealize(dev); +} + +static Property vhost_scsi_properties[] = { + DEFINE_PROP_STRING("vhostfd", VirtIOSCSICommon, conf.vhostfd), + DEFINE_PROP_STRING("wwpn", VirtIOSCSICommon, conf.wwpn), + DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0), + DEFINE_PROP_UINT32("num_queues", VirtIOSCSICommon, conf.num_queues, + VIRTIO_SCSI_AUTO_NUM_QUEUES), + DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSICommon, conf.virtqueue_size, + 128), + DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSICommon, conf.seg_max_adjust, + true), + DEFINE_PROP_UINT32("max_sectors", VirtIOSCSICommon, conf.max_sectors, + 0xFFFF), + DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSICommon, conf.cmd_per_lun, 128), + DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features, + VIRTIO_SCSI_F_T10_PI, + false), + DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_scsi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(klass); + + device_class_set_props(dc, vhost_scsi_properties); + dc->vmsd = &vmstate_virtio_vhost_scsi; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + vdc->realize = vhost_scsi_realize; + vdc->unrealize = vhost_scsi_unrealize; + vdc->get_features = vhost_scsi_common_get_features; + vdc->set_config = vhost_scsi_common_set_config; + vdc->set_status = vhost_scsi_set_status; + fwc->get_dev_path = vhost_scsi_common_get_fw_dev_path; +} + +static void vhost_scsi_instance_init(Object *obj) +{ + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(obj); + + vsc->feature_bits = kernel_feature_bits; + + device_add_bootindex_property(obj, &vsc->bootindex, "bootindex", NULL, + DEVICE(vsc)); +} + +static const TypeInfo vhost_scsi_info = { + .name = TYPE_VHOST_SCSI, + .parent = TYPE_VHOST_SCSI_COMMON, + .instance_size = sizeof(VHostSCSI), + .class_init = vhost_scsi_class_init, + .instance_init = vhost_scsi_instance_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_FW_PATH_PROVIDER }, + { } + }, +}; + +static void virtio_register_types(void) +{ + type_register_static(&vhost_scsi_info); +} + +type_init(virtio_register_types) diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c new file mode 100644 index 000000000..1b2f7eed9 --- /dev/null +++ b/hw/scsi/vhost-user-scsi.c @@ -0,0 +1,239 @@ +/* + * vhost-user-scsi host device + * + * Copyright (c) 2016 Nutanix Inc. All rights reserved. + * + * Author: + * Felipe Franciosi + * + * This work is largely based on the "vhost-scsi" implementation by: + * Stefan Hajnoczi + * Nicholas Bellinger + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/fw-path-provider.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-backend.h" +#include "hw/virtio/vhost-user-scsi.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-access.h" +#include "chardev/char-fe.h" +#include "sysemu/sysemu.h" + +/* Features supported by the host application */ +static const int user_feature_bits[] = { + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + VIRTIO_SCSI_F_HOTPLUG, + VHOST_INVALID_FEATURE_BIT +}; + +enum VhostUserProtocolFeature { + VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13, +}; + +static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserSCSI *s = (VHostUserSCSI *)vdev; + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + bool start = (status & VIRTIO_CONFIG_S_DRIVER_OK) && vdev->vm_running; + + if (vsc->dev.started == start) { + return; + } + + if (start) { + int ret; + + ret = vhost_scsi_common_start(vsc); + if (ret < 0) { + error_report("unable to start vhost-user-scsi: %s", strerror(-ret)); + exit(1); + } + } else { + vhost_scsi_common_stop(vsc); + } +} + +static void vhost_user_scsi_reset(VirtIODevice *vdev) +{ + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(vdev); + struct vhost_dev *dev = &vsc->dev; + + /* + * Historically, reset was not implemented so only reset devices + * that are expecting it. + */ + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_RESET_DEVICE)) { + return; + } + + if (dev->vhost_ops->vhost_reset_device) { + dev->vhost_ops->vhost_reset_device(dev); + } +} + +static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ +} + +static void vhost_user_scsi_realize(DeviceState *dev, Error **errp) +{ + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); + VHostUserSCSI *s = VHOST_USER_SCSI(dev); + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + struct vhost_virtqueue *vqs = NULL; + Error *err = NULL; + int ret; + + if (!vs->conf.chardev.chr) { + error_setg(errp, "vhost-user-scsi: missing chardev"); + return; + } + + virtio_scsi_common_realize(dev, vhost_dummy_handle_output, + vhost_dummy_handle_output, + vhost_dummy_handle_output, &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + + if (!vhost_user_init(&s->vhost_user, &vs->conf.chardev, errp)) { + goto free_virtio; + } + + vsc->dev.nvqs = VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues; + vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs); + vsc->dev.vq_index = 0; + vsc->dev.backend_features = 0; + vqs = vsc->dev.vqs; + + ret = vhost_dev_init(&vsc->dev, &s->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + if (ret < 0) { + goto free_vhost; + } + + /* Channel and lun both are 0 for bootable vhost-user-scsi disk */ + vsc->channel = 0; + vsc->lun = 0; + vsc->target = vs->conf.boot_tpgt; + + return; + +free_vhost: + vhost_user_cleanup(&s->vhost_user); + g_free(vqs); +free_virtio: + virtio_scsi_common_unrealize(dev); +} + +static void vhost_user_scsi_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserSCSI *s = VHOST_USER_SCSI(dev); + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + struct vhost_virtqueue *vqs = vsc->dev.vqs; + + /* This will stop the vhost backend. */ + vhost_user_scsi_set_status(vdev, 0); + + vhost_dev_cleanup(&vsc->dev); + g_free(vqs); + + virtio_scsi_common_unrealize(dev); + vhost_user_cleanup(&s->vhost_user); +} + +static Property vhost_user_scsi_properties[] = { + DEFINE_PROP_CHR("chardev", VirtIOSCSICommon, conf.chardev), + DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0), + DEFINE_PROP_UINT32("num_queues", VirtIOSCSICommon, conf.num_queues, + VIRTIO_SCSI_AUTO_NUM_QUEUES), + DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSICommon, conf.virtqueue_size, + 128), + DEFINE_PROP_UINT32("max_sectors", VirtIOSCSICommon, conf.max_sectors, + 0xFFFF), + DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSICommon, conf.cmd_per_lun, 128), + DEFINE_PROP_BIT64("hotplug", VHostSCSICommon, host_features, + VIRTIO_SCSI_F_HOTPLUG, + true), + DEFINE_PROP_BIT64("param_change", VHostSCSICommon, host_features, + VIRTIO_SCSI_F_CHANGE, + true), + DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features, + VIRTIO_SCSI_F_T10_PI, + false), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_vhost_scsi = { + .name = "virtio-scsi", + .minimum_version_id = 1, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static void vhost_user_scsi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(klass); + + device_class_set_props(dc, vhost_user_scsi_properties); + dc->vmsd = &vmstate_vhost_scsi; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + vdc->realize = vhost_user_scsi_realize; + vdc->unrealize = vhost_user_scsi_unrealize; + vdc->get_features = vhost_scsi_common_get_features; + vdc->set_config = vhost_scsi_common_set_config; + vdc->set_status = vhost_user_scsi_set_status; + vdc->reset = vhost_user_scsi_reset; + fwc->get_dev_path = vhost_scsi_common_get_fw_dev_path; +} + +static void vhost_user_scsi_instance_init(Object *obj) +{ + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(obj); + + vsc->feature_bits = user_feature_bits; + + /* Add the bootindex property for this object */ + device_add_bootindex_property(obj, &vsc->bootindex, "bootindex", NULL, + DEVICE(vsc)); +} + +static const TypeInfo vhost_user_scsi_info = { + .name = TYPE_VHOST_USER_SCSI, + .parent = TYPE_VHOST_SCSI_COMMON, + .instance_size = sizeof(VHostUserSCSI), + .class_init = vhost_user_scsi_class_init, + .instance_init = vhost_user_scsi_instance_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_FW_PATH_PROVIDER }, + { } + }, +}; + +static void virtio_register_types(void) +{ + type_register_static(&vhost_user_scsi_info); +} + +type_init(virtio_register_types) diff --git a/hw/scsi/viosrp.h b/hw/scsi/viosrp.h new file mode 100644 index 000000000..e5f9768e8 --- /dev/null +++ b/hw/scsi/viosrp.h @@ -0,0 +1,217 @@ +/*****************************************************************************/ +/* srp.h -- SCSI RDMA Protocol definitions */ +/* */ +/* Written By: Colin Devilbis, IBM Corporation */ +/* */ +/* Copyright (C) 2003 IBM Corporation */ +/* */ +/* This program is free software; you can redistribute it and/or modify */ +/* it under the terms of the GNU General Public License as published by */ +/* the Free Software Foundation; either version 2 of the License, or */ +/* (at your option) any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, */ +/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ +/* GNU General Public License for more details. */ +/* */ +/* You should have received a copy of the GNU General Public License */ +/* along with this program; if not, write to the Free Software */ +/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* */ +/* */ +/* This file contains structures and definitions for IBM RPA (RS/6000 */ +/* platform architecture) implementation of the SRP (SCSI RDMA Protocol) */ +/* standard. SRP is used on IBM iSeries and pSeries platforms to send SCSI */ +/* commands between logical partitions. */ +/* */ +/* SRP Information Units (IUs) are sent on a "Command/Response Queue" (CRQ) */ +/* between partitions. The definitions in this file are architected, */ +/* and cannot be changed without breaking compatibility with other versions */ +/* of Linux and other operating systems (AIX, OS/400) that talk this protocol*/ +/* between logical partitions */ +/*****************************************************************************/ +#ifndef PPC_VIOSRP_H +#define PPC_VIOSRP_H + +#include "hw/scsi/srp.h" + +#define SRP_VERSION "16.a" +#define SRP_MAX_IU_LEN 256 +#define SRP_MAX_LOC_LEN 32 + +union srp_iu { + struct srp_login_req login_req; + struct srp_login_rsp login_rsp; + struct srp_login_rej login_rej; + struct srp_i_logout i_logout; + struct srp_t_logout t_logout; + struct srp_tsk_mgmt tsk_mgmt; + struct srp_cmd cmd; + struct srp_rsp rsp; +}; + +enum viosrp_crq_formats { + VIOSRP_SRP_FORMAT = 0x01, + VIOSRP_MAD_FORMAT = 0x02, + VIOSRP_OS400_FORMAT = 0x03, + VIOSRP_AIX_FORMAT = 0x04, + VIOSRP_LINUX_FORMAT = 0x06, + VIOSRP_INLINE_FORMAT = 0x07 +}; + +enum viosrp_crq_status { + VIOSRP_OK = 0x0, + VIOSRP_NONRECOVERABLE_ERR = 0x1, + VIOSRP_VIOLATES_MAX_XFER = 0x2, + VIOSRP_PARTNER_PANIC = 0x3, + VIOSRP_DEVICE_BUSY = 0x8, + VIOSRP_ADAPTER_FAIL = 0x10, + VIOSRP_OK2 = 0x99, +}; + +struct viosrp_crq { + uint8_t valid; /* used by RPA */ + uint8_t format; /* SCSI vs out-of-band */ + uint8_t reserved; + uint8_t status; /* non-scsi failure? (e.g. DMA failure) */ + uint16_t timeout; /* in seconds */ + uint16_t IU_length; /* in bytes */ + uint64_t IU_data_ptr; /* the TCE for transferring data */ +}; + +/* MADs are Management requests above and beyond the IUs defined in the SRP + * standard. + */ +enum viosrp_mad_types { + VIOSRP_EMPTY_IU_TYPE = 0x01, + VIOSRP_ERROR_LOG_TYPE = 0x02, + VIOSRP_ADAPTER_INFO_TYPE = 0x03, + VIOSRP_HOST_CONFIG_TYPE = 0x04, + VIOSRP_CAPABILITIES_TYPE = 0x05, + VIOSRP_ENABLE_FAST_FAIL = 0x08, +}; + +enum viosrp_mad_status { + VIOSRP_MAD_SUCCESS = 0x00, + VIOSRP_MAD_NOT_SUPPORTED = 0xF1, + VIOSRP_MAD_FAILED = 0xF7, +}; + +enum viosrp_capability_type { + MIGRATION_CAPABILITIES = 0x01, + RESERVATION_CAPABILITIES = 0x02, +}; + +enum viosrp_capability_support { + SERVER_DOES_NOT_SUPPORTS_CAP = 0x0, + SERVER_SUPPORTS_CAP = 0x01, + SERVER_CAP_DATA = 0x02, +}; + +enum viosrp_reserve_type { + CLIENT_RESERVE_SCSI_2 = 0x01, +}; + +enum viosrp_capability_flag { + CLIENT_MIGRATED = 0x01, + CLIENT_RECONNECT = 0x02, + CAP_LIST_SUPPORTED = 0x04, + CAP_LIST_DATA = 0x08, +}; + +/* + * Common MAD header + */ +struct mad_common { + uint32_t type; + uint16_t status; + uint16_t length; + uint64_t tag; +}; + +/* + * All SRP (and MAD) requests normally flow from the + * client to the server. There is no way for the server to send + * an asynchronous message back to the client. The Empty IU is used + * to hang out a meaningless request to the server so that it can respond + * asynchrouously with something like a SCSI AER + */ +struct viosrp_empty_iu { + struct mad_common common; + uint64_t buffer; + uint32_t port; +}; + +struct viosrp_error_log { + struct mad_common common; + uint64_t buffer; +}; + +struct viosrp_adapter_info { + struct mad_common common; + uint64_t buffer; +}; + +struct viosrp_host_config { + struct mad_common common; + uint64_t buffer; +}; + +struct viosrp_fast_fail { + struct mad_common common; +}; + +struct viosrp_capabilities { + struct mad_common common; + uint64_t buffer; +}; + +struct mad_capability_common { + uint32_t cap_type; + uint16_t length; + uint16_t server_support; +}; + +struct mad_reserve_cap { + struct mad_capability_common common; + uint32_t type; +}; + +struct mad_migration_cap { + struct mad_capability_common common; + uint32_t ecl; +}; + +struct capabilities { + uint32_t flags; + char name[SRP_MAX_LOC_LEN]; + char loc[SRP_MAX_LOC_LEN]; + struct mad_migration_cap migration; + struct mad_reserve_cap reserve; +}; + +union mad_iu { + struct viosrp_empty_iu empty_iu; + struct viosrp_error_log error_log; + struct viosrp_adapter_info adapter_info; + struct viosrp_host_config host_config; + struct viosrp_fast_fail fast_fail; + struct viosrp_capabilities capabilities; +}; + +union viosrp_iu { + union srp_iu srp; + union mad_iu mad; +}; + +struct mad_adapter_info_data { + char srp_version[8]; + char partition_name[96]; + uint32_t partition_number; + uint32_t mad_version; + uint32_t os_type; + uint32_t port_max_txu[8]; /* per-port maximum transfer */ +}; + +#endif diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c new file mode 100644 index 000000000..18eb824c9 --- /dev/null +++ b/hw/scsi/virtio-scsi-dataplane.c @@ -0,0 +1,273 @@ +/* + * Virtio SCSI dataplane + * + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Fam Zheng + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/virtio/virtio-scsi.h" +#include "qemu/error-report.h" +#include "sysemu/block-backend.h" +#include "hw/scsi/scsi.h" +#include "scsi/constants.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" + +/* Context: QEMU global mutex held */ +void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp) +{ + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); + VirtIODevice *vdev = VIRTIO_DEVICE(s); + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + if (vs->conf.iothread) { + if (!k->set_guest_notifiers || !k->ioeventfd_assign) { + error_setg(errp, + "device is incompatible with iothread " + "(transport does not support notifiers)"); + return; + } + if (!virtio_device_ioeventfd_enabled(vdev)) { + error_setg(errp, "ioeventfd is required for iothread"); + return; + } + s->ctx = iothread_get_aio_context(vs->conf.iothread); + } else { + if (!virtio_device_ioeventfd_enabled(vdev)) { + return; + } + s->ctx = qemu_get_aio_context(); + } +} + +static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev, + VirtQueue *vq) +{ + bool progress = false; + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + + virtio_scsi_acquire(s); + if (!s->dataplane_fenced) { + assert(s->ctx && s->dataplane_started); + progress = virtio_scsi_handle_cmd_vq(s, vq); + } + virtio_scsi_release(s); + return progress; +} + +static bool virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev, + VirtQueue *vq) +{ + bool progress = false; + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + + virtio_scsi_acquire(s); + if (!s->dataplane_fenced) { + assert(s->ctx && s->dataplane_started); + progress = virtio_scsi_handle_ctrl_vq(s, vq); + } + virtio_scsi_release(s); + return progress; +} + +static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev, + VirtQueue *vq) +{ + bool progress = false; + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + + virtio_scsi_acquire(s); + if (!s->dataplane_fenced) { + assert(s->ctx && s->dataplane_started); + progress = virtio_scsi_handle_event_vq(s, vq); + } + virtio_scsi_release(s); + return progress; +} + +static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); + int rc; + + /* Set up virtqueue notify */ + rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), n, true); + if (rc != 0) { + fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n", + rc); + s->dataplane_fenced = true; + return rc; + } + + return 0; +} + +/* Context: BH in IOThread */ +static void virtio_scsi_dataplane_stop_bh(void *opaque) +{ + VirtIOSCSI *s = opaque; + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); + int i; + + virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, NULL); + virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, NULL); + for (i = 0; i < vs->conf.num_queues; i++) { + virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, NULL); + } +} + +/* Context: QEMU global mutex held */ +int virtio_scsi_dataplane_start(VirtIODevice *vdev) +{ + int i; + int rc; + int vq_init_count = 0; + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + + if (s->dataplane_started || + s->dataplane_starting || + s->dataplane_fenced) { + return 0; + } + + s->dataplane_starting = true; + + /* Set up guest notifier (irq) */ + rc = k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, true); + if (rc != 0) { + error_report("virtio-scsi: Failed to set guest notifiers (%d), " + "ensure -accel kvm is set.", rc); + goto fail_guest_notifiers; + } + + /* + * Batch all the host notifiers in a single transaction to avoid + * quadratic time complexity in address_space_update_ioeventfds(). + */ + memory_region_transaction_begin(); + + rc = virtio_scsi_set_host_notifier(s, vs->ctrl_vq, 0); + if (rc != 0) { + goto fail_host_notifiers; + } + + vq_init_count++; + rc = virtio_scsi_set_host_notifier(s, vs->event_vq, 1); + if (rc != 0) { + goto fail_host_notifiers; + } + + vq_init_count++; + + for (i = 0; i < vs->conf.num_queues; i++) { + rc = virtio_scsi_set_host_notifier(s, vs->cmd_vqs[i], i + 2); + if (rc) { + goto fail_host_notifiers; + } + vq_init_count++; + } + + memory_region_transaction_commit(); + + aio_context_acquire(s->ctx); + virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, + virtio_scsi_data_plane_handle_ctrl); + virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, + virtio_scsi_data_plane_handle_event); + + for (i = 0; i < vs->conf.num_queues; i++) { + virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, + virtio_scsi_data_plane_handle_cmd); + } + + s->dataplane_starting = false; + s->dataplane_started = true; + aio_context_release(s->ctx); + return 0; + +fail_host_notifiers: + for (i = 0; i < vq_init_count; i++) { + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); + } + + /* + * The transaction expects the ioeventfds to be open when it + * commits. Do it now, before the cleanup loop. + */ + memory_region_transaction_commit(); + + for (i = 0; i < vq_init_count; i++) { + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); + } + k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false); +fail_guest_notifiers: + s->dataplane_fenced = true; + s->dataplane_starting = false; + s->dataplane_started = true; + return -ENOSYS; +} + +/* Context: QEMU global mutex held */ +void virtio_scsi_dataplane_stop(VirtIODevice *vdev) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + int i; + + if (!s->dataplane_started || s->dataplane_stopping) { + return; + } + + /* Better luck next time. */ + if (s->dataplane_fenced) { + s->dataplane_fenced = false; + s->dataplane_started = false; + return; + } + s->dataplane_stopping = true; + + aio_context_acquire(s->ctx); + aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s); + aio_context_release(s->ctx); + + blk_drain_all(); /* ensure there are no in-flight requests */ + + /* + * Batch all the host notifiers in a single transaction to avoid + * quadratic time complexity in address_space_update_ioeventfds(). + */ + memory_region_transaction_begin(); + + for (i = 0; i < vs->conf.num_queues + 2; i++) { + virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); + } + + /* + * The transaction expects the ioeventfds to be open when it + * commits. Do it now, before the cleanup loop. + */ + memory_region_transaction_commit(); + + for (i = 0; i < vs->conf.num_queues + 2; i++) { + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); + } + + /* Clean up guest notifier (irq) */ + k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false); + s->dataplane_stopping = false; + s->dataplane_started = false; +} diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c new file mode 100644 index 000000000..51fd09522 --- /dev/null +++ b/hw/scsi/virtio-scsi.c @@ -0,0 +1,1138 @@ +/* + * Virtio SCSI HBA + * + * Copyright IBM, Corp. 2010 + * Copyright Red Hat, Inc. 2011 + * + * Authors: + * Stefan Hajnoczi + * Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "standard-headers/linux/virtio_ids.h" +#include "hw/virtio/virtio-scsi.h" +#include "migration/qemu-file-types.h" +#include "qemu/error-report.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "sysemu/block-backend.h" +#include "hw/qdev-properties.h" +#include "hw/scsi/scsi.h" +#include "scsi/constants.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" +#include "trace.h" + +static inline int virtio_scsi_get_lun(uint8_t *lun) +{ + return ((lun[2] << 8) | lun[3]) & 0x3FFF; +} + +static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun) +{ + if (lun[0] != 1) { + return NULL; + } + if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) { + return NULL; + } + return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun)); +} + +void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(s); + const size_t zero_skip = + offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov); + + req->vq = vq; + req->dev = s; + qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as); + qemu_iovec_init(&req->resp_iov, 1); + memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip); +} + +void virtio_scsi_free_req(VirtIOSCSIReq *req) +{ + qemu_iovec_destroy(&req->resp_iov); + qemu_sglist_destroy(&req->qsgl); + g_free(req); +} + +static void virtio_scsi_complete_req(VirtIOSCSIReq *req) +{ + VirtIOSCSI *s = req->dev; + VirtQueue *vq = req->vq; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size); + virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size); + if (s->dataplane_started && !s->dataplane_fenced) { + virtio_notify_irqfd(vdev, vq); + } else { + virtio_notify(vdev, vq); + } + + if (req->sreq) { + req->sreq->hba_private = NULL; + scsi_req_unref(req->sreq); + } + virtio_scsi_free_req(req); +} + +static void virtio_scsi_bad_req(VirtIOSCSIReq *req) +{ + virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers"); + virtqueue_detach_element(req->vq, &req->elem, 0); + virtio_scsi_free_req(req); +} + +static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov, + hwaddr *addr, int num, size_t skip) +{ + QEMUSGList *qsgl = &req->qsgl; + size_t copied = 0; + + while (num) { + if (skip >= iov->iov_len) { + skip -= iov->iov_len; + } else { + qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip); + copied += iov->iov_len - skip; + skip = 0; + } + iov++; + addr++; + num--; + } + + assert(skip == 0); + return copied; +} + +static int virtio_scsi_parse_req(VirtIOSCSIReq *req, + unsigned req_size, unsigned resp_size) +{ + VirtIODevice *vdev = (VirtIODevice *) req->dev; + size_t in_size, out_size; + + if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0, + &req->req, req_size) < req_size) { + return -EINVAL; + } + + if (qemu_iovec_concat_iov(&req->resp_iov, + req->elem.in_sg, req->elem.in_num, 0, + resp_size) < resp_size) { + return -EINVAL; + } + + req->resp_size = resp_size; + + /* Old BIOSes left some padding by mistake after the req_size/resp_size. + * As a workaround, always consider the first buffer as the virtio-scsi + * request/response, making the payload start at the second element + * of the iovec. + * + * The actual length of the response header, stored in req->resp_size, + * does not change. + * + * TODO: always disable this workaround for virtio 1.0 devices. + */ + if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) { + if (req->elem.out_num) { + req_size = req->elem.out_sg[0].iov_len; + } + if (req->elem.in_num) { + resp_size = req->elem.in_sg[0].iov_len; + } + } + + out_size = qemu_sgl_concat(req, req->elem.out_sg, + &req->elem.out_addr[0], req->elem.out_num, + req_size); + in_size = qemu_sgl_concat(req, req->elem.in_sg, + &req->elem.in_addr[0], req->elem.in_num, + resp_size); + + if (out_size && in_size) { + return -ENOTSUP; + } + + if (out_size) { + req->mode = SCSI_XFER_TO_DEV; + } else if (in_size) { + req->mode = SCSI_XFER_FROM_DEV; + } + + return 0; +} + +static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq) +{ + VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s; + VirtIOSCSIReq *req; + + req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size); + if (!req) { + return NULL; + } + virtio_scsi_init_req(s, vq, req); + return req; +} + +static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq) +{ + VirtIOSCSIReq *req = sreq->hba_private; + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev); + VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); + uint32_t n = virtio_get_queue_index(req->vq) - VIRTIO_SCSI_VQ_NUM_FIXED; + + assert(n < vs->conf.num_queues); + qemu_put_be32s(f, &n); + qemu_put_virtqueue_element(vdev, f, &req->elem); +} + +static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq) +{ + SCSIBus *bus = sreq->bus; + VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus); + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); + VirtIODevice *vdev = VIRTIO_DEVICE(s); + VirtIOSCSIReq *req; + uint32_t n; + + qemu_get_be32s(f, &n); + assert(n < vs->conf.num_queues); + req = qemu_get_virtqueue_element(vdev, f, + sizeof(VirtIOSCSIReq) + vs->cdb_size); + virtio_scsi_init_req(s, vs->cmd_vqs[n], req); + + if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size, + sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) { + error_report("invalid SCSI request migration data"); + exit(1); + } + + scsi_req_ref(sreq); + req->sreq = sreq; + if (req->sreq->cmd.mode != SCSI_XFER_NONE) { + assert(req->sreq->cmd.mode == req->mode); + } + return req; +} + +typedef struct { + Notifier notifier; + VirtIOSCSIReq *tmf_req; +} VirtIOSCSICancelNotifier; + +static void virtio_scsi_cancel_notify(Notifier *notifier, void *data) +{ + VirtIOSCSICancelNotifier *n = container_of(notifier, + VirtIOSCSICancelNotifier, + notifier); + + if (--n->tmf_req->remaining == 0) { + VirtIOSCSIReq *req = n->tmf_req; + + trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun), + req->req.tmf.tag, req->resp.tmf.response); + virtio_scsi_complete_req(req); + } + g_free(n); +} + +static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d) +{ + if (s->dataplane_started && d && blk_is_available(d->conf.blk)) { + assert(blk_get_aio_context(d->conf.blk) == s->ctx); + } +} + +/* Return 0 if the request is ready to be completed and return to guest; + * -EINPROGRESS if the request is submitted and will be completed later, in the + * case of async cancellation. */ +static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) +{ + SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun); + SCSIRequest *r, *next; + BusChild *kid; + int target; + int ret = 0; + + virtio_scsi_ctx_check(s, d); + /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */ + req->resp.tmf.response = VIRTIO_SCSI_S_OK; + + /* + * req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s() + * to avoid compiler errors. + */ + req->req.tmf.subtype = + virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype); + + trace_virtio_scsi_tmf_req(virtio_scsi_get_lun(req->req.tmf.lun), + req->req.tmf.tag, req->req.tmf.subtype); + + switch (req->req.tmf.subtype) { + case VIRTIO_SCSI_T_TMF_ABORT_TASK: + case VIRTIO_SCSI_T_TMF_QUERY_TASK: + if (!d) { + goto fail; + } + if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { + goto incorrect_lun; + } + QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { + VirtIOSCSIReq *cmd_req = r->hba_private; + if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) { + break; + } + } + if (r) { + /* + * Assert that the request has not been completed yet, we + * check for it in the loop above. + */ + assert(r->hba_private); + if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) { + /* "If the specified command is present in the task set, then + * return a service response set to FUNCTION SUCCEEDED". + */ + req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; + } else { + VirtIOSCSICancelNotifier *notifier; + + req->remaining = 1; + notifier = g_new(VirtIOSCSICancelNotifier, 1); + notifier->tmf_req = req; + notifier->notifier.notify = virtio_scsi_cancel_notify; + scsi_req_cancel_async(r, ¬ifier->notifier); + ret = -EINPROGRESS; + } + } + break; + + case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: + if (!d) { + goto fail; + } + if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { + goto incorrect_lun; + } + s->resetting++; + qdev_reset_all(&d->qdev); + s->resetting--; + break; + + case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET: + case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: + case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET: + if (!d) { + goto fail; + } + if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) { + goto incorrect_lun; + } + + /* Add 1 to "remaining" until virtio_scsi_do_tmf returns. + * This way, if the bus starts calling back to the notifiers + * even before we finish the loop, virtio_scsi_cancel_notify + * will not complete the TMF too early. + */ + req->remaining = 1; + QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { + if (r->hba_private) { + if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) { + /* "If there is any command present in the task set, then + * return a service response set to FUNCTION SUCCEEDED". + */ + req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; + break; + } else { + VirtIOSCSICancelNotifier *notifier; + + req->remaining++; + notifier = g_new(VirtIOSCSICancelNotifier, 1); + notifier->notifier.notify = virtio_scsi_cancel_notify; + notifier->tmf_req = req; + scsi_req_cancel_async(r, ¬ifier->notifier); + } + } + } + if (--req->remaining > 0) { + ret = -EINPROGRESS; + } + break; + + case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: + target = req->req.tmf.lun[1]; + s->resetting++; + + rcu_read_lock(); + QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) { + SCSIDevice *d1 = SCSI_DEVICE(kid->child); + if (d1->channel == 0 && d1->id == target) { + qdev_reset_all(&d1->qdev); + } + } + rcu_read_unlock(); + + s->resetting--; + break; + + case VIRTIO_SCSI_T_TMF_CLEAR_ACA: + default: + req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; + break; + } + + object_unref(OBJECT(d)); + return ret; + +incorrect_lun: + req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN; + object_unref(OBJECT(d)); + return ret; + +fail: + req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET; + object_unref(OBJECT(d)); + return ret; +} + +static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) +{ + VirtIODevice *vdev = (VirtIODevice *)s; + uint32_t type; + int r = 0; + + if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0, + &type, sizeof(type)) < sizeof(type)) { + virtio_scsi_bad_req(req); + return; + } + + virtio_tswap32s(vdev, &type); + if (type == VIRTIO_SCSI_T_TMF) { + if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq), + sizeof(VirtIOSCSICtrlTMFResp)) < 0) { + virtio_scsi_bad_req(req); + return; + } else { + r = virtio_scsi_do_tmf(s, req); + } + + } else if (type == VIRTIO_SCSI_T_AN_QUERY || + type == VIRTIO_SCSI_T_AN_SUBSCRIBE) { + if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq), + sizeof(VirtIOSCSICtrlANResp)) < 0) { + virtio_scsi_bad_req(req); + return; + } else { + req->req.an.event_requested = + virtio_tswap32(VIRTIO_DEVICE(s), req->req.an.event_requested); + trace_virtio_scsi_an_req(virtio_scsi_get_lun(req->req.an.lun), + req->req.an.event_requested); + req->resp.an.event_actual = 0; + req->resp.an.response = VIRTIO_SCSI_S_OK; + } + } + if (r == 0) { + if (type == VIRTIO_SCSI_T_TMF) + trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun), + req->req.tmf.tag, + req->resp.tmf.response); + else if (type == VIRTIO_SCSI_T_AN_QUERY || + type == VIRTIO_SCSI_T_AN_SUBSCRIBE) + trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun), + req->resp.an.response); + virtio_scsi_complete_req(req); + } else { + assert(r == -EINPROGRESS); + } +} + +bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq) +{ + VirtIOSCSIReq *req; + bool progress = false; + + while ((req = virtio_scsi_pop_req(s, vq))) { + progress = true; + virtio_scsi_handle_ctrl_req(s, req); + } + return progress; +} + +static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOSCSI *s = (VirtIOSCSI *)vdev; + + if (s->ctx) { + virtio_device_start_ioeventfd(vdev); + if (!s->dataplane_fenced) { + return; + } + } + virtio_scsi_acquire(s); + virtio_scsi_handle_ctrl_vq(s, vq); + virtio_scsi_release(s); +} + +static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req) +{ + trace_virtio_scsi_cmd_resp(virtio_scsi_get_lun(req->req.cmd.lun), + req->req.cmd.tag, + req->resp.cmd.response, + req->resp.cmd.status); + /* Sense data is not in req->resp and is copied separately + * in virtio_scsi_command_complete. + */ + req->resp_size = sizeof(VirtIOSCSICmdResp); + virtio_scsi_complete_req(req); +} + +static void virtio_scsi_command_failed(SCSIRequest *r) +{ + VirtIOSCSIReq *req = r->hba_private; + + if (r->io_canceled) { + return; + } + + req->resp.cmd.status = GOOD; + switch (r->host_status) { + case SCSI_HOST_NO_LUN: + req->resp.cmd.response = VIRTIO_SCSI_S_INCORRECT_LUN; + break; + case SCSI_HOST_BUSY: + req->resp.cmd.response = VIRTIO_SCSI_S_BUSY; + break; + case SCSI_HOST_TIME_OUT: + case SCSI_HOST_ABORTED: + req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED; + break; + case SCSI_HOST_BAD_RESPONSE: + req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; + break; + case SCSI_HOST_RESET: + req->resp.cmd.response = VIRTIO_SCSI_S_RESET; + break; + case SCSI_HOST_TRANSPORT_DISRUPTED: + req->resp.cmd.response = VIRTIO_SCSI_S_TRANSPORT_FAILURE; + break; + case SCSI_HOST_TARGET_FAILURE: + req->resp.cmd.response = VIRTIO_SCSI_S_TARGET_FAILURE; + break; + case SCSI_HOST_RESERVATION_ERROR: + req->resp.cmd.response = VIRTIO_SCSI_S_NEXUS_FAILURE; + break; + case SCSI_HOST_ALLOCATION_FAILURE: + case SCSI_HOST_MEDIUM_ERROR: + case SCSI_HOST_ERROR: + default: + req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE; + break; + } + virtio_scsi_complete_cmd_req(req); +} + +static void virtio_scsi_command_complete(SCSIRequest *r, size_t resid) +{ + VirtIOSCSIReq *req = r->hba_private; + uint8_t sense[SCSI_SENSE_BUF_SIZE]; + uint32_t sense_len; + VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); + + if (r->io_canceled) { + return; + } + + req->resp.cmd.response = VIRTIO_SCSI_S_OK; + req->resp.cmd.status = r->status; + if (req->resp.cmd.status == GOOD) { + req->resp.cmd.resid = virtio_tswap32(vdev, resid); + } else { + req->resp.cmd.resid = 0; + sense_len = scsi_req_get_sense(r, sense, sizeof(sense)); + sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd)); + qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd), + sense, sense_len); + req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len); + } + virtio_scsi_complete_cmd_req(req); +} + +static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, + uint8_t *buf, void *hba_private) +{ + VirtIOSCSIReq *req = hba_private; + + if (cmd->len == 0) { + cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE); + memcpy(cmd->buf, buf, cmd->len); + } + + /* Extract the direction and mode directly from the request, for + * host device passthrough. + */ + cmd->xfer = req->qsgl.size; + cmd->mode = req->mode; + return 0; +} + +static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r) +{ + VirtIOSCSIReq *req = r->hba_private; + + return &req->qsgl; +} + +static void virtio_scsi_request_cancelled(SCSIRequest *r) +{ + VirtIOSCSIReq *req = r->hba_private; + + if (!req) { + return; + } + if (req->dev->resetting) { + req->resp.cmd.response = VIRTIO_SCSI_S_RESET; + } else { + req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED; + } + virtio_scsi_complete_cmd_req(req); +} + +static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req) +{ + req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE; + virtio_scsi_complete_cmd_req(req); +} + +static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req) +{ + VirtIOSCSICommon *vs = &s->parent_obj; + SCSIDevice *d; + int rc; + + rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size, + sizeof(VirtIOSCSICmdResp) + vs->sense_size); + if (rc < 0) { + if (rc == -ENOTSUP) { + virtio_scsi_fail_cmd_req(req); + return -ENOTSUP; + } else { + virtio_scsi_bad_req(req); + return -EINVAL; + } + } + trace_virtio_scsi_cmd_req(virtio_scsi_get_lun(req->req.cmd.lun), + req->req.cmd.tag, req->req.cmd.cdb[0]); + + d = virtio_scsi_device_get(s, req->req.cmd.lun); + if (!d) { + req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; + virtio_scsi_complete_cmd_req(req); + return -ENOENT; + } + virtio_scsi_ctx_check(s, d); + req->sreq = scsi_req_new(d, req->req.cmd.tag, + virtio_scsi_get_lun(req->req.cmd.lun), + req->req.cmd.cdb, req); + + if (req->sreq->cmd.mode != SCSI_XFER_NONE + && (req->sreq->cmd.mode != req->mode || + req->sreq->cmd.xfer > req->qsgl.size)) { + req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN; + virtio_scsi_complete_cmd_req(req); + object_unref(OBJECT(d)); + return -ENOBUFS; + } + scsi_req_ref(req->sreq); + blk_io_plug(d->conf.blk); + object_unref(OBJECT(d)); + return 0; +} + +static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req) +{ + SCSIRequest *sreq = req->sreq; + if (scsi_req_enqueue(sreq)) { + scsi_req_continue(sreq); + } + blk_io_unplug(sreq->dev->conf.blk); + scsi_req_unref(sreq); +} + +bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) +{ + VirtIOSCSIReq *req, *next; + int ret = 0; + bool suppress_notifications = virtio_queue_get_notification(vq); + bool progress = false; + + QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs); + + do { + if (suppress_notifications) { + virtio_queue_set_notification(vq, 0); + } + + while ((req = virtio_scsi_pop_req(s, vq))) { + progress = true; + ret = virtio_scsi_handle_cmd_req_prepare(s, req); + if (!ret) { + QTAILQ_INSERT_TAIL(&reqs, req, next); + } else if (ret == -EINVAL) { + /* The device is broken and shouldn't process any request */ + while (!QTAILQ_EMPTY(&reqs)) { + req = QTAILQ_FIRST(&reqs); + QTAILQ_REMOVE(&reqs, req, next); + blk_io_unplug(req->sreq->dev->conf.blk); + scsi_req_unref(req->sreq); + virtqueue_detach_element(req->vq, &req->elem, 0); + virtio_scsi_free_req(req); + } + } + } + + if (suppress_notifications) { + virtio_queue_set_notification(vq, 1); + } + } while (ret != -EINVAL && !virtio_queue_empty(vq)); + + QTAILQ_FOREACH_SAFE(req, &reqs, next, next) { + virtio_scsi_handle_cmd_req_submit(s, req); + } + return progress; +} + +static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) +{ + /* use non-QOM casts in the data path */ + VirtIOSCSI *s = (VirtIOSCSI *)vdev; + + if (s->ctx) { + virtio_device_start_ioeventfd(vdev); + if (!s->dataplane_fenced) { + return; + } + } + virtio_scsi_acquire(s); + virtio_scsi_handle_cmd_vq(s, vq); + virtio_scsi_release(s); +} + +static void virtio_scsi_get_config(VirtIODevice *vdev, + uint8_t *config) +{ + VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config; + VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev); + + virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues); + virtio_stl_p(vdev, &scsiconf->seg_max, + s->conf.seg_max_adjust ? s->conf.virtqueue_size - 2 : 128 - 2); + virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors); + virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun); + virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent)); + virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size); + virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size); + virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL); + virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET); + virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN); +} + +static void virtio_scsi_set_config(VirtIODevice *vdev, + const uint8_t *config) +{ + VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config; + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); + + if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 || + (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) { + virtio_error(vdev, + "bad data written to virtio-scsi configuration space"); + return; + } + + vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size); + vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size); +} + +static uint64_t virtio_scsi_get_features(VirtIODevice *vdev, + uint64_t requested_features, + Error **errp) +{ + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + + /* Firstly sync all virtio-scsi possible supported features */ + requested_features |= s->host_features; + return requested_features; +} + +static void virtio_scsi_reset(VirtIODevice *vdev) +{ + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); + + assert(!s->dataplane_started); + s->resetting++; + qbus_reset_all(BUS(&s->bus)); + s->resetting--; + + vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE; + vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE; + s->events_dropped = false; +} + +void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, + uint32_t event, uint32_t reason) +{ + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); + VirtIOSCSIReq *req; + VirtIOSCSIEvent *evt; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { + return; + } + + req = virtio_scsi_pop_req(s, vs->event_vq); + if (!req) { + s->events_dropped = true; + return; + } + + if (s->events_dropped) { + event |= VIRTIO_SCSI_T_EVENTS_MISSED; + s->events_dropped = false; + } + + if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) { + virtio_scsi_bad_req(req); + return; + } + + evt = &req->resp.event; + memset(evt, 0, sizeof(VirtIOSCSIEvent)); + evt->event = virtio_tswap32(vdev, event); + evt->reason = virtio_tswap32(vdev, reason); + if (!dev) { + assert(event == VIRTIO_SCSI_T_EVENTS_MISSED); + } else { + evt->lun[0] = 1; + evt->lun[1] = dev->id; + + /* Linux wants us to keep the same encoding we use for REPORT LUNS. */ + if (dev->lun >= 256) { + evt->lun[2] = (dev->lun >> 8) | 0x40; + } + evt->lun[3] = dev->lun & 0xFF; + } + trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason); + + virtio_scsi_complete_req(req); +} + +bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq) +{ + if (s->events_dropped) { + virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); + return true; + } + return false; +} + +static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + + if (s->ctx) { + virtio_device_start_ioeventfd(vdev); + if (!s->dataplane_fenced) { + return; + } + } + virtio_scsi_acquire(s); + virtio_scsi_handle_event_vq(s, vq); + virtio_scsi_release(s); +} + +static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) +{ + VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus); + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) && + dev->type != TYPE_ROM) { + virtio_scsi_acquire(s); + virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE, + sense.asc | (sense.ascq << 8)); + virtio_scsi_release(s); + } +} + +static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + SCSIDevice *sd = SCSI_DEVICE(dev); + sd->hba_supports_iothread = true; +} + +static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + SCSIDevice *sd = SCSI_DEVICE(dev); + AioContext *old_context; + int ret; + + if (s->ctx && !s->dataplane_fenced) { + if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { + return; + } + old_context = blk_get_aio_context(sd->conf.blk); + aio_context_acquire(old_context); + ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp); + aio_context_release(old_context); + if (ret < 0) { + return; + } + } + + if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { + virtio_scsi_acquire(s); + virtio_scsi_push_event(s, sd, + VIRTIO_SCSI_T_TRANSPORT_RESET, + VIRTIO_SCSI_EVT_RESET_RESCAN); + virtio_scsi_release(s); + } +} + +static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); + VirtIOSCSI *s = VIRTIO_SCSI(vdev); + SCSIDevice *sd = SCSI_DEVICE(dev); + AioContext *ctx = s->ctx ?: qemu_get_aio_context(); + + if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { + virtio_scsi_acquire(s); + virtio_scsi_push_event(s, sd, + VIRTIO_SCSI_T_TRANSPORT_RESET, + VIRTIO_SCSI_EVT_RESET_REMOVED); + virtio_scsi_release(s); + } + + aio_disable_external(ctx); + qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); + aio_enable_external(ctx); + + if (s->ctx) { + virtio_scsi_acquire(s); + /* If other users keep the BlockBackend in the iothread, that's ok */ + blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL); + virtio_scsi_release(s); + } +} + +static struct SCSIBusInfo virtio_scsi_scsi_info = { + .tcq = true, + .max_channel = VIRTIO_SCSI_MAX_CHANNEL, + .max_target = VIRTIO_SCSI_MAX_TARGET, + .max_lun = VIRTIO_SCSI_MAX_LUN, + + .complete = virtio_scsi_command_complete, + .fail = virtio_scsi_command_failed, + .cancel = virtio_scsi_request_cancelled, + .change = virtio_scsi_change, + .parse_cdb = virtio_scsi_parse_cdb, + .get_sg_list = virtio_scsi_get_sg_list, + .save_request = virtio_scsi_save_request, + .load_request = virtio_scsi_load_request, +}; + +void virtio_scsi_common_realize(DeviceState *dev, + VirtIOHandleOutput ctrl, + VirtIOHandleOutput evt, + VirtIOHandleOutput cmd, + Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev); + int i; + + virtio_init(vdev, "virtio-scsi", VIRTIO_ID_SCSI, + sizeof(VirtIOSCSIConfig)); + + if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) { + s->conf.num_queues = 1; + } + if (s->conf.num_queues == 0 || + s->conf.num_queues > VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED) { + error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " + "must be a positive integer less than %d.", + s->conf.num_queues, + VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED); + virtio_cleanup(vdev); + return; + } + if (s->conf.virtqueue_size <= 2) { + error_setg(errp, "invalid virtqueue_size property (= %" PRIu32 "), " + "must be > 2", s->conf.virtqueue_size); + return; + } + s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues); + s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE; + s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE; + + s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl); + s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt); + for (i = 0; i < s->conf.num_queues; i++) { + s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd); + } +} + +static void virtio_scsi_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOSCSI *s = VIRTIO_SCSI(dev); + Error *err = NULL; + + virtio_scsi_common_realize(dev, + virtio_scsi_handle_ctrl, + virtio_scsi_handle_event, + virtio_scsi_handle_cmd, + &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + + scsi_bus_init_named(&s->bus, sizeof(s->bus), dev, + &virtio_scsi_scsi_info, vdev->bus_name); + /* override default SCSI bus hotplug-handler, with virtio-scsi's one */ + qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev)); + + virtio_scsi_dataplane_setup(s, errp); +} + +void virtio_scsi_common_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); + int i; + + virtio_delete_queue(vs->ctrl_vq); + virtio_delete_queue(vs->event_vq); + for (i = 0; i < vs->conf.num_queues; i++) { + virtio_delete_queue(vs->cmd_vqs[i]); + } + g_free(vs->cmd_vqs); + virtio_cleanup(vdev); +} + +static void virtio_scsi_device_unrealize(DeviceState *dev) +{ + VirtIOSCSI *s = VIRTIO_SCSI(dev); + + qbus_set_hotplug_handler(BUS(&s->bus), NULL); + virtio_scsi_common_unrealize(dev); +} + +static Property virtio_scsi_properties[] = { + DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues, + VIRTIO_SCSI_AUTO_NUM_QUEUES), + DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI, + parent_obj.conf.virtqueue_size, 256), + DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI, + parent_obj.conf.seg_max_adjust, true), + DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors, + 0xFFFF), + DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun, + 128), + DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features, + VIRTIO_SCSI_F_HOTPLUG, true), + DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features, + VIRTIO_SCSI_F_CHANGE, true), + DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread, + TYPE_IOTHREAD, IOThread *), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_virtio_scsi = { + .name = "virtio-scsi", + .minimum_version_id = 1, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static void virtio_scsi_common_class_init(ObjectClass *klass, void *data) +{ + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + vdc->get_config = virtio_scsi_get_config; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static void virtio_scsi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + + device_class_set_props(dc, virtio_scsi_properties); + dc->vmsd = &vmstate_virtio_scsi; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + vdc->realize = virtio_scsi_device_realize; + vdc->unrealize = virtio_scsi_device_unrealize; + vdc->set_config = virtio_scsi_set_config; + vdc->get_features = virtio_scsi_get_features; + vdc->reset = virtio_scsi_reset; + vdc->start_ioeventfd = virtio_scsi_dataplane_start; + vdc->stop_ioeventfd = virtio_scsi_dataplane_stop; + hc->pre_plug = virtio_scsi_pre_hotplug; + hc->plug = virtio_scsi_hotplug; + hc->unplug = virtio_scsi_hotunplug; +} + +static const TypeInfo virtio_scsi_common_info = { + .name = TYPE_VIRTIO_SCSI_COMMON, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOSCSICommon), + .abstract = true, + .class_init = virtio_scsi_common_class_init, +}; + +static const TypeInfo virtio_scsi_info = { + .name = TYPE_VIRTIO_SCSI, + .parent = TYPE_VIRTIO_SCSI_COMMON, + .instance_size = sizeof(VirtIOSCSI), + .class_init = virtio_scsi_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_scsi_common_info); + type_register_static(&virtio_scsi_info); +} + +type_init(virtio_register_types) diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c new file mode 100644 index 000000000..cd76bd67a --- /dev/null +++ b/hw/scsi/vmw_pvscsi.c @@ -0,0 +1,1357 @@ +/* + * QEMU VMWARE PVSCSI paravirtual SCSI bus + * + * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) + * + * Developed by Daynix Computing LTD (http://www.daynix.com) + * + * Based on implementation by Paolo Bonzini + * http://lists.gnu.org/archive/html/qemu-devel/2011-08/msg00729.html + * + * Authors: + * Paolo Bonzini + * Dmitry Fleytman + * Yan Vugenfirer + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + * + * NOTE about MSI-X: + * MSI-X support has been removed for the moment because it leads Windows OS + * to crash on startup. The crash happens because Windows driver requires + * MSI-X shared memory to be part of the same BAR used for rings state + * registers, etc. This is not supported by QEMU infrastructure so separate + * BAR created from MSI-X purposes. Windows driver fails to deal with 2 BARs. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "hw/scsi/scsi.h" +#include "migration/vmstate.h" +#include "scsi/constants.h" +#include "hw/pci/msi.h" +#include "hw/qdev-properties.h" +#include "vmw_pvscsi.h" +#include "trace.h" +#include "qom/object.h" + + +#define PVSCSI_USE_64BIT (true) +#define PVSCSI_PER_VECTOR_MASK (false) + +#define PVSCSI_MAX_DEVS (64) +#define PVSCSI_MSIX_NUM_VECTORS (1) + +#define PVSCSI_MAX_SG_ELEM 2048 + +#define PVSCSI_MAX_CMD_DATA_WORDS \ + (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t)) + +#define RS_GET_FIELD(m, field) \ + (ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \ + (m)->rs_pa + offsetof(struct PVSCSIRingsState, field))) +#define RS_SET_FIELD(m, field, val) \ + (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \ + (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val)) + +struct PVSCSIClass { + PCIDeviceClass parent_class; + DeviceRealize parent_dc_realize; +}; + +#define TYPE_PVSCSI "pvscsi" +OBJECT_DECLARE_TYPE(PVSCSIState, PVSCSIClass, PVSCSI) + + +/* Compatibility flags for migration */ +#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT 0 +#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION \ + (1 << PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT) +#define PVSCSI_COMPAT_DISABLE_PCIE_BIT 1 +#define PVSCSI_COMPAT_DISABLE_PCIE \ + (1 << PVSCSI_COMPAT_DISABLE_PCIE_BIT) + +#define PVSCSI_USE_OLD_PCI_CONFIGURATION(s) \ + ((s)->compat_flags & PVSCSI_COMPAT_OLD_PCI_CONFIGURATION) +#define PVSCSI_MSI_OFFSET(s) \ + (PVSCSI_USE_OLD_PCI_CONFIGURATION(s) ? 0x50 : 0x7c) +#define PVSCSI_EXP_EP_OFFSET (0x40) + +typedef struct PVSCSIRingInfo { + uint64_t rs_pa; + uint32_t txr_len_mask; + uint32_t rxr_len_mask; + uint32_t msg_len_mask; + uint64_t req_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; + uint64_t cmp_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; + uint64_t msg_ring_pages_pa[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES]; + uint64_t consumed_ptr; + uint64_t filled_cmp_ptr; + uint64_t filled_msg_ptr; +} PVSCSIRingInfo; + +typedef struct PVSCSISGState { + hwaddr elemAddr; + hwaddr dataAddr; + uint32_t resid; +} PVSCSISGState; + +typedef QTAILQ_HEAD(, PVSCSIRequest) PVSCSIRequestList; + +struct PVSCSIState { + PCIDevice parent_obj; + MemoryRegion io_space; + SCSIBus bus; + QEMUBH *completion_worker; + PVSCSIRequestList pending_queue; + PVSCSIRequestList completion_queue; + + uint64_t reg_interrupt_status; /* Interrupt status register value */ + uint64_t reg_interrupt_enabled; /* Interrupt mask register value */ + uint64_t reg_command_status; /* Command status register value */ + + /* Command data adoption mechanism */ + uint64_t curr_cmd; /* Last command arrived */ + uint32_t curr_cmd_data_cntr; /* Amount of data for last command */ + + /* Collector for current command data */ + uint32_t curr_cmd_data[PVSCSI_MAX_CMD_DATA_WORDS]; + + uint8_t rings_info_valid; /* Whether data rings initialized */ + uint8_t msg_ring_info_valid; /* Whether message ring initialized */ + uint8_t use_msg; /* Whether to use message ring */ + + uint8_t msi_used; /* For migration compatibility */ + PVSCSIRingInfo rings; /* Data transfer rings manager */ + uint32_t resetting; /* Reset in progress */ + + uint32_t compat_flags; +}; + +typedef struct PVSCSIRequest { + SCSIRequest *sreq; + PVSCSIState *dev; + uint8_t sense_key; + uint8_t completed; + int lun; + QEMUSGList sgl; + PVSCSISGState sg; + struct PVSCSIRingReqDesc req; + struct PVSCSIRingCmpDesc cmp; + QTAILQ_ENTRY(PVSCSIRequest) next; +} PVSCSIRequest; + +/* Integer binary logarithm */ +static int +pvscsi_log2(uint32_t input) +{ + int log = 0; + assert(input > 0); + while (input >> ++log) { + } + return log; +} + +static void +pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri) +{ + int i; + uint32_t txr_len_log2, rxr_len_log2; + uint32_t req_ring_size, cmp_ring_size; + m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT; + + req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE; + txr_len_log2 = pvscsi_log2(req_ring_size - 1); + rxr_len_log2 = pvscsi_log2(cmp_ring_size - 1); + + m->txr_len_mask = MASK(txr_len_log2); + m->rxr_len_mask = MASK(rxr_len_log2); + + m->consumed_ptr = 0; + m->filled_cmp_ptr = 0; + + for (i = 0; i < ri->reqRingNumPages; i++) { + m->req_ring_pages_pa[i] = ri->reqRingPPNs[i] << VMW_PAGE_SHIFT; + } + + for (i = 0; i < ri->cmpRingNumPages; i++) { + m->cmp_ring_pages_pa[i] = ri->cmpRingPPNs[i] << VMW_PAGE_SHIFT; + } + + RS_SET_FIELD(m, reqProdIdx, 0); + RS_SET_FIELD(m, reqConsIdx, 0); + RS_SET_FIELD(m, reqNumEntriesLog2, txr_len_log2); + + RS_SET_FIELD(m, cmpProdIdx, 0); + RS_SET_FIELD(m, cmpConsIdx, 0); + RS_SET_FIELD(m, cmpNumEntriesLog2, rxr_len_log2); + + trace_pvscsi_ring_init_data(txr_len_log2, rxr_len_log2); + + /* Flush ring state page changes */ + smp_wmb(); +} + +static int +pvscsi_ring_init_msg(PVSCSIRingInfo *m, PVSCSICmdDescSetupMsgRing *ri) +{ + int i; + uint32_t len_log2; + uint32_t ring_size; + + if (!ri->numPages || ri->numPages > PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES) { + return -1; + } + ring_size = ri->numPages * PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE; + len_log2 = pvscsi_log2(ring_size - 1); + + m->msg_len_mask = MASK(len_log2); + + m->filled_msg_ptr = 0; + + for (i = 0; i < ri->numPages; i++) { + m->msg_ring_pages_pa[i] = ri->ringPPNs[i] << VMW_PAGE_SHIFT; + } + + RS_SET_FIELD(m, msgProdIdx, 0); + RS_SET_FIELD(m, msgConsIdx, 0); + RS_SET_FIELD(m, msgNumEntriesLog2, len_log2); + + trace_pvscsi_ring_init_msg(len_log2); + + /* Flush ring state page changes */ + smp_wmb(); + + return 0; +} + +static void +pvscsi_ring_cleanup(PVSCSIRingInfo *mgr) +{ + mgr->rs_pa = 0; + mgr->txr_len_mask = 0; + mgr->rxr_len_mask = 0; + mgr->msg_len_mask = 0; + mgr->consumed_ptr = 0; + mgr->filled_cmp_ptr = 0; + mgr->filled_msg_ptr = 0; + memset(mgr->req_ring_pages_pa, 0, sizeof(mgr->req_ring_pages_pa)); + memset(mgr->cmp_ring_pages_pa, 0, sizeof(mgr->cmp_ring_pages_pa)); + memset(mgr->msg_ring_pages_pa, 0, sizeof(mgr->msg_ring_pages_pa)); +} + +static hwaddr +pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr) +{ + uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx); + uint32_t ring_size = PVSCSI_MAX_NUM_PAGES_REQ_RING + * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + + if (ready_ptr != mgr->consumed_ptr + && ready_ptr - mgr->consumed_ptr < ring_size) { + uint32_t next_ready_ptr = + mgr->consumed_ptr++ & mgr->txr_len_mask; + uint32_t next_ready_page = + next_ready_ptr / PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + uint32_t inpage_idx = + next_ready_ptr % PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + + return mgr->req_ring_pages_pa[next_ready_page] + + inpage_idx * sizeof(PVSCSIRingReqDesc); + } else { + return 0; + } +} + +static void +pvscsi_ring_flush_req(PVSCSIRingInfo *mgr) +{ + RS_SET_FIELD(mgr, reqConsIdx, mgr->consumed_ptr); +} + +static hwaddr +pvscsi_ring_pop_cmp_descr(PVSCSIRingInfo *mgr) +{ + /* + * According to Linux driver code it explicitly verifies that number + * of requests being processed by device is less then the size of + * completion queue, so device may omit completion queue overflow + * conditions check. We assume that this is true for other (Windows) + * drivers as well. + */ + + uint32_t free_cmp_ptr = + mgr->filled_cmp_ptr++ & mgr->rxr_len_mask; + uint32_t free_cmp_page = + free_cmp_ptr / PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE; + uint32_t inpage_idx = + free_cmp_ptr % PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE; + return mgr->cmp_ring_pages_pa[free_cmp_page] + + inpage_idx * sizeof(PVSCSIRingCmpDesc); +} + +static hwaddr +pvscsi_ring_pop_msg_descr(PVSCSIRingInfo *mgr) +{ + uint32_t free_msg_ptr = + mgr->filled_msg_ptr++ & mgr->msg_len_mask; + uint32_t free_msg_page = + free_msg_ptr / PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE; + uint32_t inpage_idx = + free_msg_ptr % PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE; + return mgr->msg_ring_pages_pa[free_msg_page] + + inpage_idx * sizeof(PVSCSIRingMsgDesc); +} + +static void +pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr) +{ + /* Flush descriptor changes */ + smp_wmb(); + + trace_pvscsi_ring_flush_cmp(mgr->filled_cmp_ptr); + + RS_SET_FIELD(mgr, cmpProdIdx, mgr->filled_cmp_ptr); +} + +static bool +pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr) +{ + uint32_t prodIdx = RS_GET_FIELD(mgr, msgProdIdx); + uint32_t consIdx = RS_GET_FIELD(mgr, msgConsIdx); + + return (prodIdx - consIdx) < (mgr->msg_len_mask + 1); +} + +static void +pvscsi_ring_flush_msg(PVSCSIRingInfo *mgr) +{ + /* Flush descriptor changes */ + smp_wmb(); + + trace_pvscsi_ring_flush_msg(mgr->filled_msg_ptr); + + RS_SET_FIELD(mgr, msgProdIdx, mgr->filled_msg_ptr); +} + +static void +pvscsi_reset_state(PVSCSIState *s) +{ + s->curr_cmd = PVSCSI_CMD_FIRST; + s->curr_cmd_data_cntr = 0; + s->reg_command_status = PVSCSI_COMMAND_PROCESSING_SUCCEEDED; + s->reg_interrupt_status = 0; + pvscsi_ring_cleanup(&s->rings); + s->rings_info_valid = FALSE; + s->msg_ring_info_valid = FALSE; + QTAILQ_INIT(&s->pending_queue); + QTAILQ_INIT(&s->completion_queue); +} + +static void +pvscsi_update_irq_status(PVSCSIState *s) +{ + PCIDevice *d = PCI_DEVICE(s); + bool should_raise = s->reg_interrupt_enabled & s->reg_interrupt_status; + + trace_pvscsi_update_irq_level(should_raise, s->reg_interrupt_enabled, + s->reg_interrupt_status); + + if (msi_enabled(d)) { + if (should_raise) { + trace_pvscsi_update_irq_msi(); + msi_notify(d, PVSCSI_VECTOR_COMPLETION); + } + return; + } + + pci_set_irq(d, !!should_raise); +} + +static void +pvscsi_raise_completion_interrupt(PVSCSIState *s) +{ + s->reg_interrupt_status |= PVSCSI_INTR_CMPL_0; + + /* Memory barrier to flush interrupt status register changes*/ + smp_wmb(); + + pvscsi_update_irq_status(s); +} + +static void +pvscsi_raise_message_interrupt(PVSCSIState *s) +{ + s->reg_interrupt_status |= PVSCSI_INTR_MSG_0; + + /* Memory barrier to flush interrupt status register changes*/ + smp_wmb(); + + pvscsi_update_irq_status(s); +} + +static void +pvscsi_cmp_ring_put(PVSCSIState *s, struct PVSCSIRingCmpDesc *cmp_desc) +{ + hwaddr cmp_descr_pa; + + cmp_descr_pa = pvscsi_ring_pop_cmp_descr(&s->rings); + trace_pvscsi_cmp_ring_put(cmp_descr_pa); + cpu_physical_memory_write(cmp_descr_pa, cmp_desc, sizeof(*cmp_desc)); +} + +static void +pvscsi_msg_ring_put(PVSCSIState *s, struct PVSCSIRingMsgDesc *msg_desc) +{ + hwaddr msg_descr_pa; + + msg_descr_pa = pvscsi_ring_pop_msg_descr(&s->rings); + trace_pvscsi_msg_ring_put(msg_descr_pa); + cpu_physical_memory_write(msg_descr_pa, msg_desc, sizeof(*msg_desc)); +} + +static void +pvscsi_process_completion_queue(void *opaque) +{ + PVSCSIState *s = opaque; + PVSCSIRequest *pvscsi_req; + bool has_completed = false; + + while (!QTAILQ_EMPTY(&s->completion_queue)) { + pvscsi_req = QTAILQ_FIRST(&s->completion_queue); + QTAILQ_REMOVE(&s->completion_queue, pvscsi_req, next); + pvscsi_cmp_ring_put(s, &pvscsi_req->cmp); + g_free(pvscsi_req); + has_completed = true; + } + + if (has_completed) { + pvscsi_ring_flush_cmp(&s->rings); + pvscsi_raise_completion_interrupt(s); + } +} + +static void +pvscsi_reset_adapter(PVSCSIState *s) +{ + s->resetting++; + qbus_reset_all(BUS(&s->bus)); + s->resetting--; + pvscsi_process_completion_queue(s); + assert(QTAILQ_EMPTY(&s->pending_queue)); + pvscsi_reset_state(s); +} + +static void +pvscsi_schedule_completion_processing(PVSCSIState *s) +{ + /* Try putting more complete requests on the ring. */ + if (!QTAILQ_EMPTY(&s->completion_queue)) { + qemu_bh_schedule(s->completion_worker); + } +} + +static void +pvscsi_complete_request(PVSCSIState *s, PVSCSIRequest *r) +{ + assert(!r->completed); + + trace_pvscsi_complete_request(r->cmp.context, r->cmp.dataLen, + r->sense_key); + if (r->sreq != NULL) { + scsi_req_unref(r->sreq); + r->sreq = NULL; + } + r->completed = 1; + QTAILQ_REMOVE(&s->pending_queue, r, next); + QTAILQ_INSERT_TAIL(&s->completion_queue, r, next); + pvscsi_schedule_completion_processing(s); +} + +static QEMUSGList *pvscsi_get_sg_list(SCSIRequest *r) +{ + PVSCSIRequest *req = r->hba_private; + + trace_pvscsi_get_sg_list(req->sgl.nsg, req->sgl.size); + + return &req->sgl; +} + +static void +pvscsi_get_next_sg_elem(PVSCSISGState *sg) +{ + struct PVSCSISGElement elem; + + cpu_physical_memory_read(sg->elemAddr, &elem, sizeof(elem)); + if ((elem.flags & ~PVSCSI_KNOWN_FLAGS) != 0) { + /* + * There is PVSCSI_SGE_FLAG_CHAIN_ELEMENT flag described in + * header file but its value is unknown. This flag requires + * additional processing, so we put warning here to catch it + * some day and make proper implementation + */ + trace_pvscsi_get_next_sg_elem(elem.flags); + } + + sg->elemAddr += sizeof(elem); + sg->dataAddr = elem.addr; + sg->resid = elem.length; +} + +static void +pvscsi_write_sense(PVSCSIRequest *r, uint8_t *sense, int len) +{ + r->cmp.senseLen = MIN(r->req.senseLen, len); + r->sense_key = sense[(sense[0] & 2) ? 1 : 2]; + cpu_physical_memory_write(r->req.senseAddr, sense, r->cmp.senseLen); +} + +static void +pvscsi_command_failed(SCSIRequest *req) +{ + PVSCSIRequest *pvscsi_req = req->hba_private; + PVSCSIState *s; + + if (!pvscsi_req) { + trace_pvscsi_command_complete_not_found(req->tag); + return; + } + s = pvscsi_req->dev; + + switch (req->host_status) { + case SCSI_HOST_NO_LUN: + pvscsi_req->cmp.hostStatus = BTSTAT_LUNMISMATCH; + break; + case SCSI_HOST_BUSY: + pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE; + break; + case SCSI_HOST_TIME_OUT: + case SCSI_HOST_ABORTED: + pvscsi_req->cmp.hostStatus = BTSTAT_SENTRST; + break; + case SCSI_HOST_BAD_RESPONSE: + pvscsi_req->cmp.hostStatus = BTSTAT_SELTIMEO; + break; + case SCSI_HOST_RESET: + pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET; + break; + default: + pvscsi_req->cmp.hostStatus = BTSTAT_HASOFTWARE; + break; + } + pvscsi_req->cmp.scsiStatus = GOOD; + qemu_sglist_destroy(&pvscsi_req->sgl); + pvscsi_complete_request(s, pvscsi_req); +} + +static void +pvscsi_command_complete(SCSIRequest *req, size_t resid) +{ + PVSCSIRequest *pvscsi_req = req->hba_private; + PVSCSIState *s; + + if (!pvscsi_req) { + trace_pvscsi_command_complete_not_found(req->tag); + return; + } + s = pvscsi_req->dev; + + if (resid) { + /* Short transfer. */ + trace_pvscsi_command_complete_data_run(); + pvscsi_req->cmp.hostStatus = BTSTAT_DATARUN; + } + + pvscsi_req->cmp.scsiStatus = req->status; + if (pvscsi_req->cmp.scsiStatus == CHECK_CONDITION) { + uint8_t sense[SCSI_SENSE_BUF_SIZE]; + int sense_len = + scsi_req_get_sense(pvscsi_req->sreq, sense, sizeof(sense)); + + trace_pvscsi_command_complete_sense_len(sense_len); + pvscsi_write_sense(pvscsi_req, sense, sense_len); + } + qemu_sglist_destroy(&pvscsi_req->sgl); + pvscsi_complete_request(s, pvscsi_req); +} + +static void +pvscsi_send_msg(PVSCSIState *s, SCSIDevice *dev, uint32_t msg_type) +{ + if (s->msg_ring_info_valid && pvscsi_ring_msg_has_room(&s->rings)) { + PVSCSIMsgDescDevStatusChanged msg = {0}; + + msg.type = msg_type; + msg.bus = dev->channel; + msg.target = dev->id; + msg.lun[1] = dev->lun; + + pvscsi_msg_ring_put(s, (PVSCSIRingMsgDesc *)&msg); + pvscsi_ring_flush_msg(&s->rings); + pvscsi_raise_message_interrupt(s); + } +} + +static void +pvscsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) +{ + PVSCSIState *s = PVSCSI(hotplug_dev); + + pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_ADDED); +} + +static void +pvscsi_hot_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) +{ + PVSCSIState *s = PVSCSI(hotplug_dev); + + pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_REMOVED); + qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); +} + +static void +pvscsi_request_cancelled(SCSIRequest *req) +{ + PVSCSIRequest *pvscsi_req = req->hba_private; + PVSCSIState *s = pvscsi_req->dev; + + if (pvscsi_req->completed) { + return; + } + + if (pvscsi_req->dev->resetting) { + pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET; + } else { + pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE; + } + + pvscsi_complete_request(s, pvscsi_req); +} + +static SCSIDevice* +pvscsi_device_find(PVSCSIState *s, int channel, int target, + uint8_t *requested_lun, uint8_t *target_lun) +{ + if (requested_lun[0] || requested_lun[2] || requested_lun[3] || + requested_lun[4] || requested_lun[5] || requested_lun[6] || + requested_lun[7] || (target > PVSCSI_MAX_DEVS)) { + return NULL; + } else { + *target_lun = requested_lun[1]; + return scsi_device_find(&s->bus, channel, target, *target_lun); + } +} + +static PVSCSIRequest * +pvscsi_queue_pending_descriptor(PVSCSIState *s, SCSIDevice **d, + struct PVSCSIRingReqDesc *descr) +{ + PVSCSIRequest *pvscsi_req; + uint8_t lun; + + pvscsi_req = g_malloc0(sizeof(*pvscsi_req)); + pvscsi_req->dev = s; + pvscsi_req->req = *descr; + pvscsi_req->cmp.context = pvscsi_req->req.context; + QTAILQ_INSERT_TAIL(&s->pending_queue, pvscsi_req, next); + + *d = pvscsi_device_find(s, descr->bus, descr->target, descr->lun, &lun); + if (*d) { + pvscsi_req->lun = lun; + } + + return pvscsi_req; +} + +static void +pvscsi_convert_sglist(PVSCSIRequest *r) +{ + uint32_t chunk_size, elmcnt = 0; + uint64_t data_length = r->req.dataLen; + PVSCSISGState sg = r->sg; + while (data_length && elmcnt < PVSCSI_MAX_SG_ELEM) { + while (!sg.resid && elmcnt++ < PVSCSI_MAX_SG_ELEM) { + pvscsi_get_next_sg_elem(&sg); + trace_pvscsi_convert_sglist(r->req.context, r->sg.dataAddr, + r->sg.resid); + } + chunk_size = MIN(data_length, sg.resid); + if (chunk_size) { + qemu_sglist_add(&r->sgl, sg.dataAddr, chunk_size); + } + + sg.dataAddr += chunk_size; + data_length -= chunk_size; + sg.resid -= chunk_size; + } +} + +static void +pvscsi_build_sglist(PVSCSIState *s, PVSCSIRequest *r) +{ + PCIDevice *d = PCI_DEVICE(s); + + pci_dma_sglist_init(&r->sgl, d, 1); + if (r->req.flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) { + pvscsi_convert_sglist(r); + } else { + qemu_sglist_add(&r->sgl, r->req.dataAddr, r->req.dataLen); + } +} + +static void +pvscsi_process_request_descriptor(PVSCSIState *s, + struct PVSCSIRingReqDesc *descr) +{ + SCSIDevice *d; + PVSCSIRequest *r = pvscsi_queue_pending_descriptor(s, &d, descr); + int64_t n; + + trace_pvscsi_process_req_descr(descr->cdb[0], descr->context); + + if (!d) { + r->cmp.hostStatus = BTSTAT_SELTIMEO; + trace_pvscsi_process_req_descr_unknown_device(); + pvscsi_complete_request(s, r); + return; + } + + if (descr->flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) { + r->sg.elemAddr = descr->dataAddr; + } + + r->sreq = scsi_req_new(d, descr->context, r->lun, descr->cdb, r); + if (r->sreq->cmd.mode == SCSI_XFER_FROM_DEV && + (descr->flags & PVSCSI_FLAG_CMD_DIR_TODEVICE)) { + r->cmp.hostStatus = BTSTAT_BADMSG; + trace_pvscsi_process_req_descr_invalid_dir(); + scsi_req_cancel(r->sreq); + return; + } + if (r->sreq->cmd.mode == SCSI_XFER_TO_DEV && + (descr->flags & PVSCSI_FLAG_CMD_DIR_TOHOST)) { + r->cmp.hostStatus = BTSTAT_BADMSG; + trace_pvscsi_process_req_descr_invalid_dir(); + scsi_req_cancel(r->sreq); + return; + } + + pvscsi_build_sglist(s, r); + n = scsi_req_enqueue(r->sreq); + + if (n) { + scsi_req_continue(r->sreq); + } +} + +static void +pvscsi_process_io(PVSCSIState *s) +{ + PVSCSIRingReqDesc descr; + hwaddr next_descr_pa; + + if (!s->rings_info_valid) { + return; + } + + while ((next_descr_pa = pvscsi_ring_pop_req_descr(&s->rings)) != 0) { + + /* Only read after production index verification */ + smp_rmb(); + + trace_pvscsi_process_io(next_descr_pa); + cpu_physical_memory_read(next_descr_pa, &descr, sizeof(descr)); + pvscsi_process_request_descriptor(s, &descr); + } + + pvscsi_ring_flush_req(&s->rings); +} + +static void +pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings *rc) +{ + int i; + trace_pvscsi_tx_rings_ppn("Rings State", rc->ringsStatePPN); + + trace_pvscsi_tx_rings_num_pages("Request Ring", rc->reqRingNumPages); + for (i = 0; i < rc->reqRingNumPages; i++) { + trace_pvscsi_tx_rings_ppn("Request Ring", rc->reqRingPPNs[i]); + } + + trace_pvscsi_tx_rings_num_pages("Confirm Ring", rc->cmpRingNumPages); + for (i = 0; i < rc->cmpRingNumPages; i++) { + trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->cmpRingPPNs[i]); + } +} + +static uint64_t +pvscsi_on_cmd_config(PVSCSIState *s) +{ + trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_CONFIG"); + return PVSCSI_COMMAND_PROCESSING_FAILED; +} + +static uint64_t +pvscsi_on_cmd_unplug(PVSCSIState *s) +{ + trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_DEVICE_UNPLUG"); + return PVSCSI_COMMAND_PROCESSING_FAILED; +} + +static uint64_t +pvscsi_on_issue_scsi(PVSCSIState *s) +{ + trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_ISSUE_SCSI"); + return PVSCSI_COMMAND_PROCESSING_FAILED; +} + +static uint64_t +pvscsi_on_cmd_setup_rings(PVSCSIState *s) +{ + PVSCSICmdDescSetupRings *rc = + (PVSCSICmdDescSetupRings *) s->curr_cmd_data; + + trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_RINGS"); + + if (!rc->reqRingNumPages + || rc->reqRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES + || !rc->cmpRingNumPages + || rc->cmpRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) { + return PVSCSI_COMMAND_PROCESSING_FAILED; + } + + pvscsi_dbg_dump_tx_rings_config(rc); + pvscsi_ring_init_data(&s->rings, rc); + + s->rings_info_valid = TRUE; + return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; +} + +static uint64_t +pvscsi_on_cmd_abort(PVSCSIState *s) +{ + PVSCSICmdDescAbortCmd *cmd = (PVSCSICmdDescAbortCmd *) s->curr_cmd_data; + PVSCSIRequest *r, *next; + + trace_pvscsi_on_cmd_abort(cmd->context, cmd->target); + + QTAILQ_FOREACH_SAFE(r, &s->pending_queue, next, next) { + if (r->req.context == cmd->context) { + break; + } + } + if (r) { + assert(!r->completed); + r->cmp.hostStatus = BTSTAT_ABORTQUEUE; + scsi_req_cancel(r->sreq); + } + + return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; +} + +static uint64_t +pvscsi_on_cmd_unknown(PVSCSIState *s) +{ + trace_pvscsi_on_cmd_unknown_data(s->curr_cmd_data[0]); + return PVSCSI_COMMAND_PROCESSING_FAILED; +} + +static uint64_t +pvscsi_on_cmd_reset_device(PVSCSIState *s) +{ + uint8_t target_lun = 0; + struct PVSCSICmdDescResetDevice *cmd = + (struct PVSCSICmdDescResetDevice *) s->curr_cmd_data; + SCSIDevice *sdev; + + sdev = pvscsi_device_find(s, 0, cmd->target, cmd->lun, &target_lun); + + trace_pvscsi_on_cmd_reset_dev(cmd->target, (int) target_lun, sdev); + + if (sdev != NULL) { + s->resetting++; + device_legacy_reset(&sdev->qdev); + s->resetting--; + return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; + } + + return PVSCSI_COMMAND_PROCESSING_FAILED; +} + +static uint64_t +pvscsi_on_cmd_reset_bus(PVSCSIState *s) +{ + trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_RESET_BUS"); + + s->resetting++; + qbus_reset_all(BUS(&s->bus)); + s->resetting--; + return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; +} + +static uint64_t +pvscsi_on_cmd_setup_msg_ring(PVSCSIState *s) +{ + PVSCSICmdDescSetupMsgRing *rc = + (PVSCSICmdDescSetupMsgRing *) s->curr_cmd_data; + + trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_MSG_RING"); + + if (!s->use_msg) { + return PVSCSI_COMMAND_PROCESSING_FAILED; + } + + if (s->rings_info_valid) { + if (pvscsi_ring_init_msg(&s->rings, rc) < 0) { + return PVSCSI_COMMAND_PROCESSING_FAILED; + } + s->msg_ring_info_valid = TRUE; + } + return sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(uint32_t); +} + +static uint64_t +pvscsi_on_cmd_adapter_reset(PVSCSIState *s) +{ + trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_ADAPTER_RESET"); + + pvscsi_reset_adapter(s); + return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; +} + +static const struct { + int data_size; + uint64_t (*handler_fn)(PVSCSIState *s); +} pvscsi_commands[] = { + [PVSCSI_CMD_FIRST] = { + .data_size = 0, + .handler_fn = pvscsi_on_cmd_unknown, + }, + + /* Not implemented, data size defined based on what arrives on windows */ + [PVSCSI_CMD_CONFIG] = { + .data_size = 6 * sizeof(uint32_t), + .handler_fn = pvscsi_on_cmd_config, + }, + + /* Command not implemented, data size is unknown */ + [PVSCSI_CMD_ISSUE_SCSI] = { + .data_size = 0, + .handler_fn = pvscsi_on_issue_scsi, + }, + + /* Command not implemented, data size is unknown */ + [PVSCSI_CMD_DEVICE_UNPLUG] = { + .data_size = 0, + .handler_fn = pvscsi_on_cmd_unplug, + }, + + [PVSCSI_CMD_SETUP_RINGS] = { + .data_size = sizeof(PVSCSICmdDescSetupRings), + .handler_fn = pvscsi_on_cmd_setup_rings, + }, + + [PVSCSI_CMD_RESET_DEVICE] = { + .data_size = sizeof(struct PVSCSICmdDescResetDevice), + .handler_fn = pvscsi_on_cmd_reset_device, + }, + + [PVSCSI_CMD_RESET_BUS] = { + .data_size = 0, + .handler_fn = pvscsi_on_cmd_reset_bus, + }, + + [PVSCSI_CMD_SETUP_MSG_RING] = { + .data_size = sizeof(PVSCSICmdDescSetupMsgRing), + .handler_fn = pvscsi_on_cmd_setup_msg_ring, + }, + + [PVSCSI_CMD_ADAPTER_RESET] = { + .data_size = 0, + .handler_fn = pvscsi_on_cmd_adapter_reset, + }, + + [PVSCSI_CMD_ABORT_CMD] = { + .data_size = sizeof(struct PVSCSICmdDescAbortCmd), + .handler_fn = pvscsi_on_cmd_abort, + }, +}; + +static void +pvscsi_do_command_processing(PVSCSIState *s) +{ + size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t); + + assert(s->curr_cmd < PVSCSI_CMD_LAST); + if (bytes_arrived >= pvscsi_commands[s->curr_cmd].data_size) { + s->reg_command_status = pvscsi_commands[s->curr_cmd].handler_fn(s); + s->curr_cmd = PVSCSI_CMD_FIRST; + s->curr_cmd_data_cntr = 0; + } +} + +static void +pvscsi_on_command_data(PVSCSIState *s, uint32_t value) +{ + size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t); + + assert(bytes_arrived < sizeof(s->curr_cmd_data)); + s->curr_cmd_data[s->curr_cmd_data_cntr++] = value; + + pvscsi_do_command_processing(s); +} + +static void +pvscsi_on_command(PVSCSIState *s, uint64_t cmd_id) +{ + if ((cmd_id > PVSCSI_CMD_FIRST) && (cmd_id < PVSCSI_CMD_LAST)) { + s->curr_cmd = cmd_id; + } else { + s->curr_cmd = PVSCSI_CMD_FIRST; + trace_pvscsi_on_cmd_unknown(cmd_id); + } + + s->curr_cmd_data_cntr = 0; + s->reg_command_status = PVSCSI_COMMAND_NOT_ENOUGH_DATA; + + pvscsi_do_command_processing(s); +} + +static void +pvscsi_io_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PVSCSIState *s = opaque; + + switch (addr) { + case PVSCSI_REG_OFFSET_COMMAND: + pvscsi_on_command(s, val); + break; + + case PVSCSI_REG_OFFSET_COMMAND_DATA: + pvscsi_on_command_data(s, (uint32_t) val); + break; + + case PVSCSI_REG_OFFSET_INTR_STATUS: + trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_STATUS", val); + s->reg_interrupt_status &= ~val; + pvscsi_update_irq_status(s); + pvscsi_schedule_completion_processing(s); + break; + + case PVSCSI_REG_OFFSET_INTR_MASK: + trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_MASK", val); + s->reg_interrupt_enabled = val; + pvscsi_update_irq_status(s); + break; + + case PVSCSI_REG_OFFSET_KICK_NON_RW_IO: + trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_NON_RW_IO", val); + pvscsi_process_io(s); + break; + + case PVSCSI_REG_OFFSET_KICK_RW_IO: + trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_RW_IO", val); + pvscsi_process_io(s); + break; + + case PVSCSI_REG_OFFSET_DEBUG: + trace_pvscsi_io_write("PVSCSI_REG_OFFSET_DEBUG", val); + break; + + default: + trace_pvscsi_io_write_unknown(addr, size, val); + break; + } + +} + +static uint64_t +pvscsi_io_read(void *opaque, hwaddr addr, unsigned size) +{ + PVSCSIState *s = opaque; + + switch (addr) { + case PVSCSI_REG_OFFSET_INTR_STATUS: + trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_STATUS", + s->reg_interrupt_status); + return s->reg_interrupt_status; + + case PVSCSI_REG_OFFSET_INTR_MASK: + trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_MASK", + s->reg_interrupt_status); + return s->reg_interrupt_enabled; + + case PVSCSI_REG_OFFSET_COMMAND_STATUS: + trace_pvscsi_io_read("PVSCSI_REG_OFFSET_COMMAND_STATUS", + s->reg_interrupt_status); + return s->reg_command_status; + + default: + trace_pvscsi_io_read_unknown(addr, size); + return 0; + } +} + + +static void +pvscsi_init_msi(PVSCSIState *s) +{ + int res; + PCIDevice *d = PCI_DEVICE(s); + + res = msi_init(d, PVSCSI_MSI_OFFSET(s), PVSCSI_MSIX_NUM_VECTORS, + PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK, NULL); + if (res < 0) { + trace_pvscsi_init_msi_fail(res); + s->msi_used = false; + } else { + s->msi_used = true; + } +} + +static void +pvscsi_cleanup_msi(PVSCSIState *s) +{ + PCIDevice *d = PCI_DEVICE(s); + + msi_uninit(d); +} + +static const MemoryRegionOps pvscsi_ops = { + .read = pvscsi_io_read, + .write = pvscsi_io_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const struct SCSIBusInfo pvscsi_scsi_info = { + .tcq = true, + .max_target = PVSCSI_MAX_DEVS, + .max_channel = 0, + .max_lun = 0, + + .get_sg_list = pvscsi_get_sg_list, + .complete = pvscsi_command_complete, + .cancel = pvscsi_request_cancelled, + .fail = pvscsi_command_failed, +}; + +static void +pvscsi_realizefn(PCIDevice *pci_dev, Error **errp) +{ + PVSCSIState *s = PVSCSI(pci_dev); + + trace_pvscsi_state("init"); + + /* PCI subsystem ID, subsystem vendor ID, revision */ + if (PVSCSI_USE_OLD_PCI_CONFIGURATION(s)) { + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 0x1000); + } else { + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, + PCI_VENDOR_ID_VMWARE); + pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, + PCI_DEVICE_ID_VMWARE_PVSCSI); + pci_config_set_revision(pci_dev->config, 0x2); + } + + /* PCI latency timer = 255 */ + pci_dev->config[PCI_LATENCY_TIMER] = 0xff; + + /* Interrupt pin A */ + pci_config_set_interrupt_pin(pci_dev->config, 1); + + memory_region_init_io(&s->io_space, OBJECT(s), &pvscsi_ops, s, + "pvscsi-io", PVSCSI_MEM_SPACE_SIZE); + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->io_space); + + pvscsi_init_msi(s); + + if (pci_is_express(pci_dev) && pci_bus_is_express(pci_get_bus(pci_dev))) { + pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET); + } + + s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s); + + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info); + /* override default SCSI bus hotplug-handler, with pvscsi's one */ + qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(s)); + pvscsi_reset_state(s); +} + +static void +pvscsi_uninit(PCIDevice *pci_dev) +{ + PVSCSIState *s = PVSCSI(pci_dev); + + trace_pvscsi_state("uninit"); + qemu_bh_delete(s->completion_worker); + + pvscsi_cleanup_msi(s); +} + +static void +pvscsi_reset(DeviceState *dev) +{ + PCIDevice *d = PCI_DEVICE(dev); + PVSCSIState *s = PVSCSI(d); + + trace_pvscsi_state("reset"); + pvscsi_reset_adapter(s); +} + +static int +pvscsi_pre_save(void *opaque) +{ + PVSCSIState *s = (PVSCSIState *) opaque; + + trace_pvscsi_state("presave"); + + assert(QTAILQ_EMPTY(&s->pending_queue)); + assert(QTAILQ_EMPTY(&s->completion_queue)); + + return 0; +} + +static int +pvscsi_post_load(void *opaque, int version_id) +{ + trace_pvscsi_state("postload"); + return 0; +} + +static bool pvscsi_vmstate_need_pcie_device(void *opaque) +{ + PVSCSIState *s = PVSCSI(opaque); + + return !(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE); +} + +static bool pvscsi_vmstate_test_pci_device(void *opaque, int version_id) +{ + return !pvscsi_vmstate_need_pcie_device(opaque); +} + +static const VMStateDescription vmstate_pvscsi_pcie_device = { + .name = "pvscsi/pcie", + .needed = pvscsi_vmstate_need_pcie_device, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_pvscsi = { + .name = "pvscsi", + .version_id = 0, + .minimum_version_id = 0, + .pre_save = pvscsi_pre_save, + .post_load = pvscsi_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_TEST(parent_obj, PVSCSIState, + pvscsi_vmstate_test_pci_device, 0, + vmstate_pci_device, PCIDevice), + VMSTATE_UINT8(msi_used, PVSCSIState), + VMSTATE_UINT32(resetting, PVSCSIState), + VMSTATE_UINT64(reg_interrupt_status, PVSCSIState), + VMSTATE_UINT64(reg_interrupt_enabled, PVSCSIState), + VMSTATE_UINT64(reg_command_status, PVSCSIState), + VMSTATE_UINT64(curr_cmd, PVSCSIState), + VMSTATE_UINT32(curr_cmd_data_cntr, PVSCSIState), + VMSTATE_UINT32_ARRAY(curr_cmd_data, PVSCSIState, + ARRAY_SIZE(((PVSCSIState *)NULL)->curr_cmd_data)), + VMSTATE_UINT8(rings_info_valid, PVSCSIState), + VMSTATE_UINT8(msg_ring_info_valid, PVSCSIState), + VMSTATE_UINT8(use_msg, PVSCSIState), + + VMSTATE_UINT64(rings.rs_pa, PVSCSIState), + VMSTATE_UINT32(rings.txr_len_mask, PVSCSIState), + VMSTATE_UINT32(rings.rxr_len_mask, PVSCSIState), + VMSTATE_UINT64_ARRAY(rings.req_ring_pages_pa, PVSCSIState, + PVSCSI_SETUP_RINGS_MAX_NUM_PAGES), + VMSTATE_UINT64_ARRAY(rings.cmp_ring_pages_pa, PVSCSIState, + PVSCSI_SETUP_RINGS_MAX_NUM_PAGES), + VMSTATE_UINT64(rings.consumed_ptr, PVSCSIState), + VMSTATE_UINT64(rings.filled_cmp_ptr, PVSCSIState), + + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_pvscsi_pcie_device, + NULL + } +}; + +static Property pvscsi_properties[] = { + DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1), + DEFINE_PROP_BIT("x-old-pci-configuration", PVSCSIState, compat_flags, + PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT, false), + DEFINE_PROP_BIT("x-disable-pcie", PVSCSIState, compat_flags, + PVSCSI_COMPAT_DISABLE_PCIE_BIT, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pvscsi_realize(DeviceState *qdev, Error **errp) +{ + PVSCSIClass *pvs_c = PVSCSI_GET_CLASS(qdev); + PCIDevice *pci_dev = PCI_DEVICE(qdev); + PVSCSIState *s = PVSCSI(qdev); + + if (!(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE)) { + pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; + } + + pvs_c->parent_dc_realize(qdev, errp); +} + +static void pvscsi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + PVSCSIClass *pvs_k = PVSCSI_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + + k->realize = pvscsi_realizefn; + k->exit = pvscsi_uninit; + k->vendor_id = PCI_VENDOR_ID_VMWARE; + k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI; + k->class_id = PCI_CLASS_STORAGE_SCSI; + k->subsystem_id = 0x1000; + device_class_set_parent_realize(dc, pvscsi_realize, + &pvs_k->parent_dc_realize); + dc->reset = pvscsi_reset; + dc->vmsd = &vmstate_pvscsi; + device_class_set_props(dc, pvscsi_properties); + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + hc->unplug = pvscsi_hot_unplug; + hc->plug = pvscsi_hotplug; +} + +static const TypeInfo pvscsi_info = { + .name = TYPE_PVSCSI, + .parent = TYPE_PCI_DEVICE, + .class_size = sizeof(PVSCSIClass), + .instance_size = sizeof(PVSCSIState), + .class_init = pvscsi_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { INTERFACE_PCIE_DEVICE }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + } +}; + +static void +pvscsi_register_types(void) +{ + type_register_static(&pvscsi_info); +} + +type_init(pvscsi_register_types); diff --git a/hw/scsi/vmw_pvscsi.h b/hw/scsi/vmw_pvscsi.h new file mode 100644 index 000000000..17fcf6627 --- /dev/null +++ b/hw/scsi/vmw_pvscsi.h @@ -0,0 +1,434 @@ +/* + * VMware PVSCSI header file + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained by: Arvind Kumar + * + */ + +#ifndef VMW_PVSCSI_H +#define VMW_PVSCSI_H + +#define VMW_PAGE_SIZE (4096) +#define VMW_PAGE_SHIFT (12) + +#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */ + +/* + * host adapter status/error codes + */ +enum HostBusAdapterStatus { + BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */ + BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a, + BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b, + BTSTAT_DATA_UNDERRUN = 0x0c, + BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */ + BTSTAT_DATARUN = 0x12, /* data overrun/underrun */ + BTSTAT_BUSFREE = 0x13, /* unexpected bus free */ + BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence */ + /* requested by target */ + BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN */ + /* from first CCB */ + BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */ + BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message */ + /* rejected by target */ + BTSTAT_BADMSG = 0x1d, /* unsupported message received by */ + /* the host adapter */ + BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */ + BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, */ + /* sent a SCSI RST */ + BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */ + BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */ + BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly */ + /* (w/o tag) */ + BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */ + BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */ + BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */ + BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */ + BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */ +}; + +/* + * Register offsets. + * + * These registers are accessible both via i/o space and mm i/o. + */ + +enum PVSCSIRegOffset { + PVSCSI_REG_OFFSET_COMMAND = 0x0, + PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4, + PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8, + PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100, + PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104, + PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108, + PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c, + PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c, + PVSCSI_REG_OFFSET_INTR_MASK = 0x2010, + PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014, + PVSCSI_REG_OFFSET_DEBUG = 0x3018, + PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018, +}; + +/* + * Virtual h/w commands. + */ + +enum PVSCSICommands { + PVSCSI_CMD_FIRST = 0, /* has to be first */ + + PVSCSI_CMD_ADAPTER_RESET = 1, + PVSCSI_CMD_ISSUE_SCSI = 2, + PVSCSI_CMD_SETUP_RINGS = 3, + PVSCSI_CMD_RESET_BUS = 4, + PVSCSI_CMD_RESET_DEVICE = 5, + PVSCSI_CMD_ABORT_CMD = 6, + PVSCSI_CMD_CONFIG = 7, + PVSCSI_CMD_SETUP_MSG_RING = 8, + PVSCSI_CMD_DEVICE_UNPLUG = 9, + + PVSCSI_CMD_LAST = 10 /* has to be last */ +}; + +#define PVSCSI_COMMAND_PROCESSING_SUCCEEDED (0) +#define PVSCSI_COMMAND_PROCESSING_FAILED (-1) +#define PVSCSI_COMMAND_NOT_ENOUGH_DATA (-2) + +/* + * Command descriptor for PVSCSI_CMD_RESET_DEVICE -- + */ + +struct PVSCSICmdDescResetDevice { + uint32_t target; + uint8_t lun[8]; +} QEMU_PACKED; + +typedef struct PVSCSICmdDescResetDevice PVSCSICmdDescResetDevice; + +/* + * Command descriptor for PVSCSI_CMD_ABORT_CMD -- + * + * - currently does not support specifying the LUN. + * - pad should be 0. + */ + +struct PVSCSICmdDescAbortCmd { + uint64_t context; + uint32_t target; + uint32_t pad; +} QEMU_PACKED; + +typedef struct PVSCSICmdDescAbortCmd PVSCSICmdDescAbortCmd; + +/* + * Command descriptor for PVSCSI_CMD_SETUP_RINGS -- + * + * Notes: + * - reqRingNumPages and cmpRingNumPages need to be power of two. + * - reqRingNumPages and cmpRingNumPages need to be different from 0, + * - reqRingNumPages and cmpRingNumPages need to be inferior to + * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES. + */ + +#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32 +struct PVSCSICmdDescSetupRings { + uint32_t reqRingNumPages; + uint32_t cmpRingNumPages; + uint64_t ringsStatePPN; + uint64_t reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; + uint64_t cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; +} QEMU_PACKED; + +typedef struct PVSCSICmdDescSetupRings PVSCSICmdDescSetupRings; + +/* + * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING -- + * + * Notes: + * - this command was not supported in the initial revision of the h/w + * interface. Before using it, you need to check that it is supported by + * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then + * immediately after read the 'command status' register: + * * a value of -1 means that the cmd is NOT supported, + * * a value != -1 means that the cmd IS supported. + * If it's supported the 'command status' register should return: + * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(uint32_t). + * - this command should be issued _after_ the usual SETUP_RINGS so that the + * RingsState page is already setup. If not, the command is a nop. + * - numPages needs to be a power of two, + * - numPages needs to be different from 0, + * - pad should be zero. + */ + +#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16 + +struct PVSCSICmdDescSetupMsgRing { + uint32_t numPages; + uint32_t pad; + uint64_t ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES]; +} QEMU_PACKED; + +typedef struct PVSCSICmdDescSetupMsgRing PVSCSICmdDescSetupMsgRing; + +enum PVSCSIMsgType { + PVSCSI_MSG_DEV_ADDED = 0, + PVSCSI_MSG_DEV_REMOVED = 1, + PVSCSI_MSG_LAST = 2, +}; + +/* + * Msg descriptor. + * + * sizeof(struct PVSCSIRingMsgDesc) == 128. + * + * - type is of type enum PVSCSIMsgType. + * - the content of args depend on the type of event being delivered. + */ + +struct PVSCSIRingMsgDesc { + uint32_t type; + uint32_t args[31]; +} QEMU_PACKED; + +typedef struct PVSCSIRingMsgDesc PVSCSIRingMsgDesc; + +struct PVSCSIMsgDescDevStatusChanged { + uint32_t type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */ + uint32_t bus; + uint32_t target; + uint8_t lun[8]; + uint32_t pad[27]; +} QEMU_PACKED; + +typedef struct PVSCSIMsgDescDevStatusChanged PVSCSIMsgDescDevStatusChanged; + +/* + * Rings state. + * + * - the fields: + * . msgProdIdx, + * . msgConsIdx, + * . msgNumEntriesLog2, + * .. are only used once the SETUP_MSG_RING cmd has been issued. + * - 'pad' helps to ensure that the msg related fields are on their own + * cache-line. + */ + +struct PVSCSIRingsState { + uint32_t reqProdIdx; + uint32_t reqConsIdx; + uint32_t reqNumEntriesLog2; + + uint32_t cmpProdIdx; + uint32_t cmpConsIdx; + uint32_t cmpNumEntriesLog2; + + uint8_t pad[104]; + + uint32_t msgProdIdx; + uint32_t msgConsIdx; + uint32_t msgNumEntriesLog2; +} QEMU_PACKED; + +typedef struct PVSCSIRingsState PVSCSIRingsState; + +/* + * Request descriptor. + * + * sizeof(RingReqDesc) = 128 + * + * - context: is a unique identifier of a command. It could normally be any + * 64bit value, however we currently store it in the serialNumber variable + * of struct SCSI_Command, so we have the following restrictions due to the + * way this field is handled in the vmkernel storage stack: + * * this value can't be 0, + * * the upper 32bit need to be 0 since serialNumber is as a uint32_t. + * Currently tracked as PR 292060. + * - dataLen: contains the total number of bytes that need to be transferred. + * - dataAddr: + * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first + * s/g table segment, each s/g segment is entirely contained on a single + * page of physical memory, + * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of + * the buffer used for the DMA transfer, + * - flags: + * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above, + * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved, + * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory, + * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device, + * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than + * 16bytes. To be specified. + * - vcpuHint: vcpuId of the processor that will be most likely waiting for the + * completion of the i/o. For guest OSes that use lowest priority message + * delivery mode (such as windows), we use this "hint" to deliver the + * completion action to the proper vcpu. For now, we can use the vcpuId of + * the processor that initiated the i/o as a likely candidate for the vcpu + * that will be waiting for the completion.. + * - bus should be 0: we currently only support bus 0 for now. + * - unused should be zero'd. + */ + +#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0) +#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1) +#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2) +#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3) +#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4) + +#define PVSCSI_KNOWN_FLAGS \ + (PVSCSI_FLAG_CMD_WITH_SG_LIST | \ + PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB | \ + PVSCSI_FLAG_CMD_DIR_NONE | \ + PVSCSI_FLAG_CMD_DIR_TOHOST | \ + PVSCSI_FLAG_CMD_DIR_TODEVICE) + +struct PVSCSIRingReqDesc { + uint64_t context; + uint64_t dataAddr; + uint64_t dataLen; + uint64_t senseAddr; + uint32_t senseLen; + uint32_t flags; + uint8_t cdb[16]; + uint8_t cdbLen; + uint8_t lun[8]; + uint8_t tag; + uint8_t bus; + uint8_t target; + uint8_t vcpuHint; + uint8_t unused[59]; +} QEMU_PACKED; + +typedef struct PVSCSIRingReqDesc PVSCSIRingReqDesc; + +/* + * Scatter-gather list management. + * + * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the + * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g + * table segment. + * + * - each segment of the s/g table contain a succession of struct + * PVSCSISGElement. + * - each segment is entirely contained on a single physical page of memory. + * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in + * PVSCSISGElement.flags and in this case: + * * addr is the PA of the next s/g segment, + * * length is undefined, assumed to be 0. + */ + +struct PVSCSISGElement { + uint64_t addr; + uint32_t length; + uint32_t flags; +} QEMU_PACKED; + +typedef struct PVSCSISGElement PVSCSISGElement; + +/* + * Completion descriptor. + * + * sizeof(RingCmpDesc) = 32 + * + * - context: identifier of the command. The same thing that was specified + * under "context" as part of struct RingReqDesc at initiation time, + * - dataLen: number of bytes transferred for the actual i/o operation, + * - senseLen: number of bytes written into the sense buffer, + * - hostStatus: adapter status, + * - scsiStatus: device status, + * - pad should be zero. + */ + +struct PVSCSIRingCmpDesc { + uint64_t context; + uint64_t dataLen; + uint32_t senseLen; + uint16_t hostStatus; + uint16_t scsiStatus; + uint32_t pad[2]; +} QEMU_PACKED; + +typedef struct PVSCSIRingCmpDesc PVSCSIRingCmpDesc; + +/* + * Interrupt status / IRQ bits. + */ + +#define PVSCSI_INTR_CMPL_0 (1 << 0) +#define PVSCSI_INTR_CMPL_1 (1 << 1) +#define PVSCSI_INTR_CMPL_MASK MASK(2) + +#define PVSCSI_INTR_MSG_0 (1 << 2) +#define PVSCSI_INTR_MSG_1 (1 << 3) +#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2) + +#define PVSCSI_INTR_ALL_SUPPORTED MASK(4) + +/* + * Number of MSI-X vectors supported. + */ +#define PVSCSI_MAX_INTRS 24 + +/* + * Enumeration of supported MSI-X vectors + */ +#define PVSCSI_VECTOR_COMPLETION 0 + +/* + * Misc constants for the rings. + */ + +#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES +#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES +#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES + +#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \ + (VMW_PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc)) + +#define PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE \ + (VMW_PAGE_SIZE / sizeof(PVSCSIRingCmpDesc)) + +#define PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE \ + (VMW_PAGE_SIZE / sizeof(PVSCSIRingMsgDesc)) + +#define PVSCSI_MAX_REQ_QUEUE_DEPTH \ + (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE) + +#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1 +#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1 +#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2 +#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2 +#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2 + +enum PVSCSIMemSpace { + PVSCSI_MEM_SPACE_COMMAND_PAGE = 0, + PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1, + PVSCSI_MEM_SPACE_MISC_PAGE = 2, + PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4, + PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6, + PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7, +}; + +#define PVSCSI_MEM_SPACE_NUM_PAGES \ + (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \ + PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \ + PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \ + PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \ + PVSCSI_MEM_SPACE_MSIX_NUM_PAGES) + +#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * VMW_PAGE_SIZE) + +#endif /* VMW_PVSCSI_H */ diff --git a/hw/sd/Kconfig b/hw/sd/Kconfig new file mode 100644 index 000000000..633b9afec --- /dev/null +++ b/hw/sd/Kconfig @@ -0,0 +1,25 @@ +config PL181 + bool + select SD + +config SSI_SD + bool + depends on SSI + select SD + +config SD + bool + +config SDHCI + bool + select SD + +config SDHCI_PCI + bool + default y if PCI_DEVICES + depends on PCI + select SDHCI + +config CADENCE_SDHCI + bool + select SDHCI diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c new file mode 100644 index 000000000..9166d6638 --- /dev/null +++ b/hw/sd/allwinner-sdhost.c @@ -0,0 +1,873 @@ +/* + * Allwinner (sun4i and above) SD Host Controller emulation + * + * Copyright (C) 2019 Niek Linnenbank + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "sysemu/blockdev.h" +#include "sysemu/dma.h" +#include "hw/qdev-properties.h" +#include "hw/irq.h" +#include "hw/sd/allwinner-sdhost.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qom/object.h" + +#define TYPE_AW_SDHOST_BUS "allwinner-sdhost-bus" +/* This is reusing the SDBus typedef from SD_BUS */ +DECLARE_INSTANCE_CHECKER(SDBus, AW_SDHOST_BUS, + TYPE_AW_SDHOST_BUS) + +/* SD Host register offsets */ +enum { + REG_SD_GCTL = 0x00, /* Global Control */ + REG_SD_CKCR = 0x04, /* Clock Control */ + REG_SD_TMOR = 0x08, /* Timeout */ + REG_SD_BWDR = 0x0C, /* Bus Width */ + REG_SD_BKSR = 0x10, /* Block Size */ + REG_SD_BYCR = 0x14, /* Byte Count */ + REG_SD_CMDR = 0x18, /* Command */ + REG_SD_CAGR = 0x1C, /* Command Argument */ + REG_SD_RESP0 = 0x20, /* Response Zero */ + REG_SD_RESP1 = 0x24, /* Response One */ + REG_SD_RESP2 = 0x28, /* Response Two */ + REG_SD_RESP3 = 0x2C, /* Response Three */ + REG_SD_IMKR = 0x30, /* Interrupt Mask */ + REG_SD_MISR = 0x34, /* Masked Interrupt Status */ + REG_SD_RISR = 0x38, /* Raw Interrupt Status */ + REG_SD_STAR = 0x3C, /* Status */ + REG_SD_FWLR = 0x40, /* FIFO Water Level */ + REG_SD_FUNS = 0x44, /* FIFO Function Select */ + REG_SD_DBGC = 0x50, /* Debug Enable */ + REG_SD_A12A = 0x58, /* Auto command 12 argument */ + REG_SD_NTSR = 0x5C, /* SD NewTiming Set */ + REG_SD_SDBG = 0x60, /* SD newTiming Set Debug */ + REG_SD_HWRST = 0x78, /* Hardware Reset Register */ + REG_SD_DMAC = 0x80, /* Internal DMA Controller Control */ + REG_SD_DLBA = 0x84, /* Descriptor List Base Address */ + REG_SD_IDST = 0x88, /* Internal DMA Controller Status */ + REG_SD_IDIE = 0x8C, /* Internal DMA Controller IRQ Enable */ + REG_SD_THLDC = 0x100, /* Card Threshold Control */ + REG_SD_DSBD = 0x10C, /* eMMC DDR Start Bit Detection Control */ + REG_SD_RES_CRC = 0x110, /* Response CRC from card/eMMC */ + REG_SD_DATA7_CRC = 0x114, /* CRC Data 7 from card/eMMC */ + REG_SD_DATA6_CRC = 0x118, /* CRC Data 6 from card/eMMC */ + REG_SD_DATA5_CRC = 0x11C, /* CRC Data 5 from card/eMMC */ + REG_SD_DATA4_CRC = 0x120, /* CRC Data 4 from card/eMMC */ + REG_SD_DATA3_CRC = 0x124, /* CRC Data 3 from card/eMMC */ + REG_SD_DATA2_CRC = 0x128, /* CRC Data 2 from card/eMMC */ + REG_SD_DATA1_CRC = 0x12C, /* CRC Data 1 from card/eMMC */ + REG_SD_DATA0_CRC = 0x130, /* CRC Data 0 from card/eMMC */ + REG_SD_CRC_STA = 0x134, /* CRC status from card/eMMC during write */ + REG_SD_FIFO = 0x200, /* Read/Write FIFO */ +}; + +/* SD Host register flags */ +enum { + SD_GCTL_FIFO_AC_MOD = (1 << 31), + SD_GCTL_DDR_MOD_SEL = (1 << 10), + SD_GCTL_CD_DBC_ENB = (1 << 8), + SD_GCTL_DMA_ENB = (1 << 5), + SD_GCTL_INT_ENB = (1 << 4), + SD_GCTL_DMA_RST = (1 << 2), + SD_GCTL_FIFO_RST = (1 << 1), + SD_GCTL_SOFT_RST = (1 << 0), +}; + +enum { + SD_CMDR_LOAD = (1 << 31), + SD_CMDR_CLKCHANGE = (1 << 21), + SD_CMDR_WRITE = (1 << 10), + SD_CMDR_AUTOSTOP = (1 << 12), + SD_CMDR_DATA = (1 << 9), + SD_CMDR_RESPONSE_LONG = (1 << 7), + SD_CMDR_RESPONSE = (1 << 6), + SD_CMDR_CMDID_MASK = (0x3f), +}; + +enum { + SD_RISR_CARD_REMOVE = (1 << 31), + SD_RISR_CARD_INSERT = (1 << 30), + SD_RISR_SDIO_INTR = (1 << 16), + SD_RISR_AUTOCMD_DONE = (1 << 14), + SD_RISR_DATA_COMPLETE = (1 << 3), + SD_RISR_CMD_COMPLETE = (1 << 2), + SD_RISR_NO_RESPONSE = (1 << 1), +}; + +enum { + SD_STAR_CARD_PRESENT = (1 << 8), +}; + +enum { + SD_IDST_INT_SUMMARY = (1 << 8), + SD_IDST_RECEIVE_IRQ = (1 << 1), + SD_IDST_TRANSMIT_IRQ = (1 << 0), + SD_IDST_IRQ_MASK = (1 << 1) | (1 << 0) | (1 << 8), + SD_IDST_WR_MASK = (0x3ff), +}; + +/* SD Host register reset values */ +enum { + REG_SD_GCTL_RST = 0x00000300, + REG_SD_CKCR_RST = 0x0, + REG_SD_TMOR_RST = 0xFFFFFF40, + REG_SD_BWDR_RST = 0x0, + REG_SD_BKSR_RST = 0x00000200, + REG_SD_BYCR_RST = 0x00000200, + REG_SD_CMDR_RST = 0x0, + REG_SD_CAGR_RST = 0x0, + REG_SD_RESP_RST = 0x0, + REG_SD_IMKR_RST = 0x0, + REG_SD_MISR_RST = 0x0, + REG_SD_RISR_RST = 0x0, + REG_SD_STAR_RST = 0x00000100, + REG_SD_FWLR_RST = 0x000F0000, + REG_SD_FUNS_RST = 0x0, + REG_SD_DBGC_RST = 0x0, + REG_SD_A12A_RST = 0x0000FFFF, + REG_SD_NTSR_RST = 0x00000001, + REG_SD_SDBG_RST = 0x0, + REG_SD_HWRST_RST = 0x00000001, + REG_SD_DMAC_RST = 0x0, + REG_SD_DLBA_RST = 0x0, + REG_SD_IDST_RST = 0x0, + REG_SD_IDIE_RST = 0x0, + REG_SD_THLDC_RST = 0x0, + REG_SD_DSBD_RST = 0x0, + REG_SD_RES_CRC_RST = 0x0, + REG_SD_DATA_CRC_RST = 0x0, + REG_SD_CRC_STA_RST = 0x0, + REG_SD_FIFO_RST = 0x0, +}; + +/* Data transfer descriptor for DMA */ +typedef struct TransferDescriptor { + uint32_t status; /* Status flags */ + uint32_t size; /* Data buffer size */ + uint32_t addr; /* Data buffer address */ + uint32_t next; /* Physical address of next descriptor */ +} TransferDescriptor; + +/* Data transfer descriptor flags */ +enum { + DESC_STATUS_HOLD = (1 << 31), /* Set when descriptor is in use by DMA */ + DESC_STATUS_ERROR = (1 << 30), /* Set when DMA transfer error occurred */ + DESC_STATUS_CHAIN = (1 << 4), /* Indicates chained descriptor. */ + DESC_STATUS_FIRST = (1 << 3), /* Set on the first descriptor */ + DESC_STATUS_LAST = (1 << 2), /* Set on the last descriptor */ + DESC_STATUS_NOIRQ = (1 << 1), /* Skip raising interrupt after transfer */ + DESC_SIZE_MASK = (0xfffffffc) +}; + +static void allwinner_sdhost_update_irq(AwSdHostState *s) +{ + uint32_t irq; + + if (s->global_ctl & SD_GCTL_INT_ENB) { + irq = s->irq_status & s->irq_mask; + } else { + irq = 0; + } + + trace_allwinner_sdhost_update_irq(irq); + qemu_set_irq(s->irq, irq); +} + +static void allwinner_sdhost_update_transfer_cnt(AwSdHostState *s, + uint32_t bytes) +{ + if (s->transfer_cnt > bytes) { + s->transfer_cnt -= bytes; + } else { + s->transfer_cnt = 0; + } + + if (!s->transfer_cnt) { + s->irq_status |= SD_RISR_DATA_COMPLETE; + } +} + +static void allwinner_sdhost_set_inserted(DeviceState *dev, bool inserted) +{ + AwSdHostState *s = AW_SDHOST(dev); + + trace_allwinner_sdhost_set_inserted(inserted); + + if (inserted) { + s->irq_status |= SD_RISR_CARD_INSERT; + s->irq_status &= ~SD_RISR_CARD_REMOVE; + s->status |= SD_STAR_CARD_PRESENT; + } else { + s->irq_status &= ~SD_RISR_CARD_INSERT; + s->irq_status |= SD_RISR_CARD_REMOVE; + s->status &= ~SD_STAR_CARD_PRESENT; + } + + allwinner_sdhost_update_irq(s); +} + +static void allwinner_sdhost_send_command(AwSdHostState *s) +{ + SDRequest request; + uint8_t resp[16]; + int rlen; + + /* Auto clear load flag */ + s->command &= ~SD_CMDR_LOAD; + + /* Clock change does not actually interact with the SD bus */ + if (!(s->command & SD_CMDR_CLKCHANGE)) { + + /* Prepare request */ + request.cmd = s->command & SD_CMDR_CMDID_MASK; + request.arg = s->command_arg; + + /* Send request to SD bus */ + rlen = sdbus_do_command(&s->sdbus, &request, resp); + if (rlen < 0) { + goto error; + } + + /* If the command has a response, store it in the response registers */ + if ((s->command & SD_CMDR_RESPONSE)) { + if (rlen == 4 && !(s->command & SD_CMDR_RESPONSE_LONG)) { + s->response[0] = ldl_be_p(&resp[0]); + s->response[1] = s->response[2] = s->response[3] = 0; + + } else if (rlen == 16 && (s->command & SD_CMDR_RESPONSE_LONG)) { + s->response[0] = ldl_be_p(&resp[12]); + s->response[1] = ldl_be_p(&resp[8]); + s->response[2] = ldl_be_p(&resp[4]); + s->response[3] = ldl_be_p(&resp[0]); + } else { + goto error; + } + } + } + + /* Set interrupt status bits */ + s->irq_status |= SD_RISR_CMD_COMPLETE; + return; + +error: + s->irq_status |= SD_RISR_NO_RESPONSE; +} + +static void allwinner_sdhost_auto_stop(AwSdHostState *s) +{ + /* + * The stop command (CMD12) ensures the SD bus + * returns to the transfer state. + */ + if ((s->command & SD_CMDR_AUTOSTOP) && (s->transfer_cnt == 0)) { + /* First save current command registers */ + uint32_t saved_cmd = s->command; + uint32_t saved_arg = s->command_arg; + + /* Prepare stop command (CMD12) */ + s->command &= ~SD_CMDR_CMDID_MASK; + s->command |= 12; /* CMD12 */ + s->command_arg = 0; + + /* Put the command on SD bus */ + allwinner_sdhost_send_command(s); + + /* Restore command values */ + s->command = saved_cmd; + s->command_arg = saved_arg; + + /* Set IRQ status bit for automatic stop done */ + s->irq_status |= SD_RISR_AUTOCMD_DONE; + } +} + +static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, + hwaddr desc_addr, + TransferDescriptor *desc, + bool is_write, uint32_t max_bytes) +{ + AwSdHostClass *klass = AW_SDHOST_GET_CLASS(s); + uint32_t num_done = 0; + uint32_t num_bytes = max_bytes; + uint8_t buf[1024]; + + /* Read descriptor */ + dma_memory_read(&s->dma_as, desc_addr, desc, sizeof(*desc)); + if (desc->size == 0) { + desc->size = klass->max_desc_size; + } else if (desc->size > klass->max_desc_size) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA descriptor buffer size " + " is out-of-bounds: %" PRIu32 " > %zu", + __func__, desc->size, klass->max_desc_size); + desc->size = klass->max_desc_size; + } + if (desc->size < num_bytes) { + num_bytes = desc->size; + } + + trace_allwinner_sdhost_process_desc(desc_addr, desc->size, + is_write, max_bytes); + + while (num_done < num_bytes) { + /* Try to completely fill the local buffer */ + uint32_t buf_bytes = num_bytes - num_done; + if (buf_bytes > sizeof(buf)) { + buf_bytes = sizeof(buf); + } + + /* Write to SD bus */ + if (is_write) { + dma_memory_read(&s->dma_as, + (desc->addr & DESC_SIZE_MASK) + num_done, + buf, buf_bytes); + sdbus_write_data(&s->sdbus, buf, buf_bytes); + + /* Read from SD bus */ + } else { + sdbus_read_data(&s->sdbus, buf, buf_bytes); + dma_memory_write(&s->dma_as, + (desc->addr & DESC_SIZE_MASK) + num_done, + buf, buf_bytes); + } + num_done += buf_bytes; + } + + /* Clear hold flag and flush descriptor */ + desc->status &= ~DESC_STATUS_HOLD; + dma_memory_write(&s->dma_as, desc_addr, desc, sizeof(*desc)); + + return num_done; +} + +static void allwinner_sdhost_dma(AwSdHostState *s) +{ + TransferDescriptor desc; + hwaddr desc_addr = s->desc_base; + bool is_write = (s->command & SD_CMDR_WRITE); + uint32_t bytes_done = 0; + + /* Check if DMA can be performed */ + if (s->byte_count == 0 || s->block_size == 0 || + !(s->global_ctl & SD_GCTL_DMA_ENB)) { + return; + } + + /* + * For read operations, data must be available on the SD bus + * If not, it is an error and we should not act at all + */ + if (!is_write && !sdbus_data_ready(&s->sdbus)) { + return; + } + + /* Process the DMA descriptors until all data is copied */ + while (s->byte_count > 0) { + bytes_done = allwinner_sdhost_process_desc(s, desc_addr, &desc, + is_write, s->byte_count); + allwinner_sdhost_update_transfer_cnt(s, bytes_done); + + if (bytes_done <= s->byte_count) { + s->byte_count -= bytes_done; + } else { + s->byte_count = 0; + } + + if (desc.status & DESC_STATUS_LAST) { + break; + } else { + desc_addr = desc.next; + } + } + + /* Raise IRQ to signal DMA is completed */ + s->irq_status |= SD_RISR_DATA_COMPLETE | SD_RISR_SDIO_INTR; + + /* Update DMAC bits */ + s->dmac_status |= SD_IDST_INT_SUMMARY; + + if (is_write) { + s->dmac_status |= SD_IDST_TRANSMIT_IRQ; + } else { + s->dmac_status |= SD_IDST_RECEIVE_IRQ; + } +} + +static uint64_t allwinner_sdhost_read(void *opaque, hwaddr offset, + unsigned size) +{ + AwSdHostState *s = AW_SDHOST(opaque); + uint32_t res = 0; + + switch (offset) { + case REG_SD_GCTL: /* Global Control */ + res = s->global_ctl; + break; + case REG_SD_CKCR: /* Clock Control */ + res = s->clock_ctl; + break; + case REG_SD_TMOR: /* Timeout */ + res = s->timeout; + break; + case REG_SD_BWDR: /* Bus Width */ + res = s->bus_width; + break; + case REG_SD_BKSR: /* Block Size */ + res = s->block_size; + break; + case REG_SD_BYCR: /* Byte Count */ + res = s->byte_count; + break; + case REG_SD_CMDR: /* Command */ + res = s->command; + break; + case REG_SD_CAGR: /* Command Argument */ + res = s->command_arg; + break; + case REG_SD_RESP0: /* Response Zero */ + res = s->response[0]; + break; + case REG_SD_RESP1: /* Response One */ + res = s->response[1]; + break; + case REG_SD_RESP2: /* Response Two */ + res = s->response[2]; + break; + case REG_SD_RESP3: /* Response Three */ + res = s->response[3]; + break; + case REG_SD_IMKR: /* Interrupt Mask */ + res = s->irq_mask; + break; + case REG_SD_MISR: /* Masked Interrupt Status */ + res = s->irq_status & s->irq_mask; + break; + case REG_SD_RISR: /* Raw Interrupt Status */ + res = s->irq_status; + break; + case REG_SD_STAR: /* Status */ + res = s->status; + break; + case REG_SD_FWLR: /* FIFO Water Level */ + res = s->fifo_wlevel; + break; + case REG_SD_FUNS: /* FIFO Function Select */ + res = s->fifo_func_sel; + break; + case REG_SD_DBGC: /* Debug Enable */ + res = s->debug_enable; + break; + case REG_SD_A12A: /* Auto command 12 argument */ + res = s->auto12_arg; + break; + case REG_SD_NTSR: /* SD NewTiming Set */ + res = s->newtiming_set; + break; + case REG_SD_SDBG: /* SD newTiming Set Debug */ + res = s->newtiming_debug; + break; + case REG_SD_HWRST: /* Hardware Reset Register */ + res = s->hardware_rst; + break; + case REG_SD_DMAC: /* Internal DMA Controller Control */ + res = s->dmac; + break; + case REG_SD_DLBA: /* Descriptor List Base Address */ + res = s->desc_base; + break; + case REG_SD_IDST: /* Internal DMA Controller Status */ + res = s->dmac_status; + break; + case REG_SD_IDIE: /* Internal DMA Controller Interrupt Enable */ + res = s->dmac_irq; + break; + case REG_SD_THLDC: /* Card Threshold Control */ + res = s->card_threshold; + break; + case REG_SD_DSBD: /* eMMC DDR Start Bit Detection Control */ + res = s->startbit_detect; + break; + case REG_SD_RES_CRC: /* Response CRC from card/eMMC */ + res = s->response_crc; + break; + case REG_SD_DATA7_CRC: /* CRC Data 7 from card/eMMC */ + case REG_SD_DATA6_CRC: /* CRC Data 6 from card/eMMC */ + case REG_SD_DATA5_CRC: /* CRC Data 5 from card/eMMC */ + case REG_SD_DATA4_CRC: /* CRC Data 4 from card/eMMC */ + case REG_SD_DATA3_CRC: /* CRC Data 3 from card/eMMC */ + case REG_SD_DATA2_CRC: /* CRC Data 2 from card/eMMC */ + case REG_SD_DATA1_CRC: /* CRC Data 1 from card/eMMC */ + case REG_SD_DATA0_CRC: /* CRC Data 0 from card/eMMC */ + res = s->data_crc[((offset - REG_SD_DATA7_CRC) / sizeof(uint32_t))]; + break; + case REG_SD_CRC_STA: /* CRC status from card/eMMC in write operation */ + res = s->status_crc; + break; + case REG_SD_FIFO: /* Read/Write FIFO */ + if (sdbus_data_ready(&s->sdbus)) { + sdbus_read_data(&s->sdbus, &res, sizeof(uint32_t)); + le32_to_cpus(&res); + allwinner_sdhost_update_transfer_cnt(s, sizeof(uint32_t)); + allwinner_sdhost_auto_stop(s); + allwinner_sdhost_update_irq(s); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: no data ready on SD bus\n", + __func__); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset %" + HWADDR_PRIx"\n", __func__, offset); + res = 0; + break; + } + + trace_allwinner_sdhost_read(offset, res, size); + return res; +} + +static void allwinner_sdhost_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + AwSdHostState *s = AW_SDHOST(opaque); + uint32_t u32; + + trace_allwinner_sdhost_write(offset, value, size); + + switch (offset) { + case REG_SD_GCTL: /* Global Control */ + s->global_ctl = value; + s->global_ctl &= ~(SD_GCTL_DMA_RST | SD_GCTL_FIFO_RST | + SD_GCTL_SOFT_RST); + allwinner_sdhost_update_irq(s); + break; + case REG_SD_CKCR: /* Clock Control */ + s->clock_ctl = value; + break; + case REG_SD_TMOR: /* Timeout */ + s->timeout = value; + break; + case REG_SD_BWDR: /* Bus Width */ + s->bus_width = value; + break; + case REG_SD_BKSR: /* Block Size */ + s->block_size = value; + break; + case REG_SD_BYCR: /* Byte Count */ + s->byte_count = value; + s->transfer_cnt = value; + break; + case REG_SD_CMDR: /* Command */ + s->command = value; + if (value & SD_CMDR_LOAD) { + allwinner_sdhost_send_command(s); + allwinner_sdhost_dma(s); + allwinner_sdhost_auto_stop(s); + } + allwinner_sdhost_update_irq(s); + break; + case REG_SD_CAGR: /* Command Argument */ + s->command_arg = value; + break; + case REG_SD_RESP0: /* Response Zero */ + s->response[0] = value; + break; + case REG_SD_RESP1: /* Response One */ + s->response[1] = value; + break; + case REG_SD_RESP2: /* Response Two */ + s->response[2] = value; + break; + case REG_SD_RESP3: /* Response Three */ + s->response[3] = value; + break; + case REG_SD_IMKR: /* Interrupt Mask */ + s->irq_mask = value; + allwinner_sdhost_update_irq(s); + break; + case REG_SD_MISR: /* Masked Interrupt Status */ + case REG_SD_RISR: /* Raw Interrupt Status */ + s->irq_status &= ~value; + allwinner_sdhost_update_irq(s); + break; + case REG_SD_STAR: /* Status */ + s->status &= ~value; + allwinner_sdhost_update_irq(s); + break; + case REG_SD_FWLR: /* FIFO Water Level */ + s->fifo_wlevel = value; + break; + case REG_SD_FUNS: /* FIFO Function Select */ + s->fifo_func_sel = value; + break; + case REG_SD_DBGC: /* Debug Enable */ + s->debug_enable = value; + break; + case REG_SD_A12A: /* Auto command 12 argument */ + s->auto12_arg = value; + break; + case REG_SD_NTSR: /* SD NewTiming Set */ + s->newtiming_set = value; + break; + case REG_SD_SDBG: /* SD newTiming Set Debug */ + s->newtiming_debug = value; + break; + case REG_SD_HWRST: /* Hardware Reset Register */ + s->hardware_rst = value; + break; + case REG_SD_DMAC: /* Internal DMA Controller Control */ + s->dmac = value; + allwinner_sdhost_update_irq(s); + break; + case REG_SD_DLBA: /* Descriptor List Base Address */ + s->desc_base = value; + break; + case REG_SD_IDST: /* Internal DMA Controller Status */ + s->dmac_status &= (~SD_IDST_WR_MASK) | (~value & SD_IDST_WR_MASK); + allwinner_sdhost_update_irq(s); + break; + case REG_SD_IDIE: /* Internal DMA Controller Interrupt Enable */ + s->dmac_irq = value; + allwinner_sdhost_update_irq(s); + break; + case REG_SD_THLDC: /* Card Threshold Control */ + s->card_threshold = value; + break; + case REG_SD_DSBD: /* eMMC DDR Start Bit Detection Control */ + s->startbit_detect = value; + break; + case REG_SD_FIFO: /* Read/Write FIFO */ + u32 = cpu_to_le32(value); + sdbus_write_data(&s->sdbus, &u32, sizeof(u32)); + allwinner_sdhost_update_transfer_cnt(s, sizeof(u32)); + allwinner_sdhost_auto_stop(s); + allwinner_sdhost_update_irq(s); + break; + case REG_SD_RES_CRC: /* Response CRC from card/eMMC */ + case REG_SD_DATA7_CRC: /* CRC Data 7 from card/eMMC */ + case REG_SD_DATA6_CRC: /* CRC Data 6 from card/eMMC */ + case REG_SD_DATA5_CRC: /* CRC Data 5 from card/eMMC */ + case REG_SD_DATA4_CRC: /* CRC Data 4 from card/eMMC */ + case REG_SD_DATA3_CRC: /* CRC Data 3 from card/eMMC */ + case REG_SD_DATA2_CRC: /* CRC Data 2 from card/eMMC */ + case REG_SD_DATA1_CRC: /* CRC Data 1 from card/eMMC */ + case REG_SD_DATA0_CRC: /* CRC Data 0 from card/eMMC */ + case REG_SD_CRC_STA: /* CRC status from card/eMMC in write operation */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: out-of-bounds offset %" + HWADDR_PRIx"\n", __func__, offset); + break; + } +} + +static const MemoryRegionOps allwinner_sdhost_ops = { + .read = allwinner_sdhost_read, + .write = allwinner_sdhost_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl.min_access_size = 4, +}; + +static const VMStateDescription vmstate_allwinner_sdhost = { + .name = "allwinner-sdhost", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(global_ctl, AwSdHostState), + VMSTATE_UINT32(clock_ctl, AwSdHostState), + VMSTATE_UINT32(timeout, AwSdHostState), + VMSTATE_UINT32(bus_width, AwSdHostState), + VMSTATE_UINT32(block_size, AwSdHostState), + VMSTATE_UINT32(byte_count, AwSdHostState), + VMSTATE_UINT32(transfer_cnt, AwSdHostState), + VMSTATE_UINT32(command, AwSdHostState), + VMSTATE_UINT32(command_arg, AwSdHostState), + VMSTATE_UINT32_ARRAY(response, AwSdHostState, 4), + VMSTATE_UINT32(irq_mask, AwSdHostState), + VMSTATE_UINT32(irq_status, AwSdHostState), + VMSTATE_UINT32(status, AwSdHostState), + VMSTATE_UINT32(fifo_wlevel, AwSdHostState), + VMSTATE_UINT32(fifo_func_sel, AwSdHostState), + VMSTATE_UINT32(debug_enable, AwSdHostState), + VMSTATE_UINT32(auto12_arg, AwSdHostState), + VMSTATE_UINT32(newtiming_set, AwSdHostState), + VMSTATE_UINT32(newtiming_debug, AwSdHostState), + VMSTATE_UINT32(hardware_rst, AwSdHostState), + VMSTATE_UINT32(dmac, AwSdHostState), + VMSTATE_UINT32(desc_base, AwSdHostState), + VMSTATE_UINT32(dmac_status, AwSdHostState), + VMSTATE_UINT32(dmac_irq, AwSdHostState), + VMSTATE_UINT32(card_threshold, AwSdHostState), + VMSTATE_UINT32(startbit_detect, AwSdHostState), + VMSTATE_UINT32(response_crc, AwSdHostState), + VMSTATE_UINT32_ARRAY(data_crc, AwSdHostState, 8), + VMSTATE_UINT32(status_crc, AwSdHostState), + VMSTATE_END_OF_LIST() + } +}; + +static Property allwinner_sdhost_properties[] = { + DEFINE_PROP_LINK("dma-memory", AwSdHostState, dma_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void allwinner_sdhost_init(Object *obj) +{ + AwSdHostState *s = AW_SDHOST(obj); + + qbus_init(&s->sdbus, sizeof(s->sdbus), + TYPE_AW_SDHOST_BUS, DEVICE(s), "sd-bus"); + + memory_region_init_io(&s->iomem, obj, &allwinner_sdhost_ops, s, + TYPE_AW_SDHOST, 4 * KiB); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); +} + +static void allwinner_sdhost_realize(DeviceState *dev, Error **errp) +{ + AwSdHostState *s = AW_SDHOST(dev); + + if (!s->dma_mr) { + error_setg(errp, TYPE_AW_SDHOST " 'dma-memory' link not set"); + return; + } + + address_space_init(&s->dma_as, s->dma_mr, "sdhost-dma"); +} + +static void allwinner_sdhost_reset(DeviceState *dev) +{ + AwSdHostState *s = AW_SDHOST(dev); + + s->global_ctl = REG_SD_GCTL_RST; + s->clock_ctl = REG_SD_CKCR_RST; + s->timeout = REG_SD_TMOR_RST; + s->bus_width = REG_SD_BWDR_RST; + s->block_size = REG_SD_BKSR_RST; + s->byte_count = REG_SD_BYCR_RST; + s->transfer_cnt = 0; + + s->command = REG_SD_CMDR_RST; + s->command_arg = REG_SD_CAGR_RST; + + for (int i = 0; i < ARRAY_SIZE(s->response); i++) { + s->response[i] = REG_SD_RESP_RST; + } + + s->irq_mask = REG_SD_IMKR_RST; + s->irq_status = REG_SD_RISR_RST; + s->status = REG_SD_STAR_RST; + + s->fifo_wlevel = REG_SD_FWLR_RST; + s->fifo_func_sel = REG_SD_FUNS_RST; + s->debug_enable = REG_SD_DBGC_RST; + s->auto12_arg = REG_SD_A12A_RST; + s->newtiming_set = REG_SD_NTSR_RST; + s->newtiming_debug = REG_SD_SDBG_RST; + s->hardware_rst = REG_SD_HWRST_RST; + s->dmac = REG_SD_DMAC_RST; + s->desc_base = REG_SD_DLBA_RST; + s->dmac_status = REG_SD_IDST_RST; + s->dmac_irq = REG_SD_IDIE_RST; + s->card_threshold = REG_SD_THLDC_RST; + s->startbit_detect = REG_SD_DSBD_RST; + s->response_crc = REG_SD_RES_CRC_RST; + + for (int i = 0; i < ARRAY_SIZE(s->data_crc); i++) { + s->data_crc[i] = REG_SD_DATA_CRC_RST; + } + + s->status_crc = REG_SD_CRC_STA_RST; +} + +static void allwinner_sdhost_bus_class_init(ObjectClass *klass, void *data) +{ + SDBusClass *sbc = SD_BUS_CLASS(klass); + + sbc->set_inserted = allwinner_sdhost_set_inserted; +} + +static void allwinner_sdhost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = allwinner_sdhost_reset; + dc->vmsd = &vmstate_allwinner_sdhost; + dc->realize = allwinner_sdhost_realize; + device_class_set_props(dc, allwinner_sdhost_properties); +} + +static void allwinner_sdhost_sun4i_class_init(ObjectClass *klass, void *data) +{ + AwSdHostClass *sc = AW_SDHOST_CLASS(klass); + sc->max_desc_size = 8 * KiB; +} + +static void allwinner_sdhost_sun5i_class_init(ObjectClass *klass, void *data) +{ + AwSdHostClass *sc = AW_SDHOST_CLASS(klass); + sc->max_desc_size = 64 * KiB; +} + +static TypeInfo allwinner_sdhost_info = { + .name = TYPE_AW_SDHOST, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = allwinner_sdhost_init, + .instance_size = sizeof(AwSdHostState), + .class_init = allwinner_sdhost_class_init, + .class_size = sizeof(AwSdHostClass), + .abstract = true, +}; + +static const TypeInfo allwinner_sdhost_sun4i_info = { + .name = TYPE_AW_SDHOST_SUN4I, + .parent = TYPE_AW_SDHOST, + .class_init = allwinner_sdhost_sun4i_class_init, +}; + +static const TypeInfo allwinner_sdhost_sun5i_info = { + .name = TYPE_AW_SDHOST_SUN5I, + .parent = TYPE_AW_SDHOST, + .class_init = allwinner_sdhost_sun5i_class_init, +}; + +static const TypeInfo allwinner_sdhost_bus_info = { + .name = TYPE_AW_SDHOST_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), + .class_init = allwinner_sdhost_bus_class_init, +}; + +static void allwinner_sdhost_register_types(void) +{ + type_register_static(&allwinner_sdhost_info); + type_register_static(&allwinner_sdhost_sun4i_info); + type_register_static(&allwinner_sdhost_sun5i_info); + type_register_static(&allwinner_sdhost_bus_info); +} + +type_init(allwinner_sdhost_register_types) diff --git a/hw/sd/aspeed_sdhci.c b/hw/sd/aspeed_sdhci.c new file mode 100644 index 000000000..df1bdf1fa --- /dev/null +++ b/hw/sd/aspeed_sdhci.c @@ -0,0 +1,213 @@ +/* + * Aspeed SD Host Controller + * Eddie James + * + * Copyright (C) 2019 IBM Corp + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "hw/sd/aspeed_sdhci.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "trace.h" + +#define ASPEED_SDHCI_INFO 0x00 +#define ASPEED_SDHCI_INFO_SLOT1 (1 << 17) +#define ASPEED_SDHCI_INFO_SLOT0 (1 << 16) +#define ASPEED_SDHCI_INFO_RESET (1 << 0) +#define ASPEED_SDHCI_DEBOUNCE 0x04 +#define ASPEED_SDHCI_DEBOUNCE_RESET 0x00000005 +#define ASPEED_SDHCI_BUS 0x08 +#define ASPEED_SDHCI_SDIO_140 0x10 +#define ASPEED_SDHCI_SDIO_148 0x18 +#define ASPEED_SDHCI_SDIO_240 0x20 +#define ASPEED_SDHCI_SDIO_248 0x28 +#define ASPEED_SDHCI_WP_POL 0xec +#define ASPEED_SDHCI_CARD_DET 0xf0 +#define ASPEED_SDHCI_IRQ_STAT 0xfc + +#define TO_REG(addr) ((addr) / sizeof(uint32_t)) + +static uint64_t aspeed_sdhci_read(void *opaque, hwaddr addr, unsigned int size) +{ + uint32_t val = 0; + AspeedSDHCIState *sdhci = opaque; + + switch (addr) { + case ASPEED_SDHCI_SDIO_140: + val = (uint32_t)sdhci->slots[0].capareg; + break; + case ASPEED_SDHCI_SDIO_148: + val = (uint32_t)sdhci->slots[0].maxcurr; + break; + case ASPEED_SDHCI_SDIO_240: + val = (uint32_t)sdhci->slots[1].capareg; + break; + case ASPEED_SDHCI_SDIO_248: + val = (uint32_t)sdhci->slots[1].maxcurr; + break; + default: + if (addr < ASPEED_SDHCI_REG_SIZE) { + val = sdhci->regs[TO_REG(addr)]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at 0x%" HWADDR_PRIx "\n", + __func__, addr); + } + } + + trace_aspeed_sdhci_read(addr, size, (uint64_t) val); + + return (uint64_t)val; +} + +static void aspeed_sdhci_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + AspeedSDHCIState *sdhci = opaque; + + trace_aspeed_sdhci_write(addr, size, val); + + switch (addr) { + case ASPEED_SDHCI_INFO: + /* The RESET bit automatically clears. */ + sdhci->regs[TO_REG(addr)] = (uint32_t)val & ~ASPEED_SDHCI_INFO_RESET; + break; + case ASPEED_SDHCI_SDIO_140: + sdhci->slots[0].capareg = (uint64_t)(uint32_t)val; + break; + case ASPEED_SDHCI_SDIO_148: + sdhci->slots[0].maxcurr = (uint64_t)(uint32_t)val; + break; + case ASPEED_SDHCI_SDIO_240: + sdhci->slots[1].capareg = (uint64_t)(uint32_t)val; + break; + case ASPEED_SDHCI_SDIO_248: + sdhci->slots[1].maxcurr = (uint64_t)(uint32_t)val; + break; + default: + if (addr < ASPEED_SDHCI_REG_SIZE) { + sdhci->regs[TO_REG(addr)] = (uint32_t)val; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at 0x%" HWADDR_PRIx "\n", + __func__, addr); + } + } +} + +static const MemoryRegionOps aspeed_sdhci_ops = { + .read = aspeed_sdhci_read, + .write = aspeed_sdhci_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void aspeed_sdhci_set_irq(void *opaque, int n, int level) +{ + AspeedSDHCIState *sdhci = opaque; + + if (level) { + sdhci->regs[TO_REG(ASPEED_SDHCI_IRQ_STAT)] |= BIT(n); + + qemu_irq_raise(sdhci->irq); + } else { + sdhci->regs[TO_REG(ASPEED_SDHCI_IRQ_STAT)] &= ~BIT(n); + + qemu_irq_lower(sdhci->irq); + } +} + +static void aspeed_sdhci_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedSDHCIState *sdhci = ASPEED_SDHCI(dev); + + /* Create input irqs for the slots */ + qdev_init_gpio_in_named_with_opaque(DEVICE(sbd), aspeed_sdhci_set_irq, + sdhci, NULL, sdhci->num_slots); + + sysbus_init_irq(sbd, &sdhci->irq); + memory_region_init_io(&sdhci->iomem, OBJECT(sdhci), &aspeed_sdhci_ops, + sdhci, TYPE_ASPEED_SDHCI, 0x1000); + sysbus_init_mmio(sbd, &sdhci->iomem); + + for (int i = 0; i < sdhci->num_slots; ++i) { + Object *sdhci_slot = OBJECT(&sdhci->slots[i]); + SysBusDevice *sbd_slot = SYS_BUS_DEVICE(&sdhci->slots[i]); + + if (!object_property_set_int(sdhci_slot, "sd-spec-version", 2, errp)) { + return; + } + + if (!object_property_set_uint(sdhci_slot, "capareg", + ASPEED_SDHCI_CAPABILITIES, errp)) { + return; + } + + if (!sysbus_realize(sbd_slot, errp)) { + return; + } + + sysbus_connect_irq(sbd_slot, 0, qdev_get_gpio_in(DEVICE(sbd), i)); + memory_region_add_subregion(&sdhci->iomem, (i + 1) * 0x100, + &sdhci->slots[i].iomem); + } +} + +static void aspeed_sdhci_reset(DeviceState *dev) +{ + AspeedSDHCIState *sdhci = ASPEED_SDHCI(dev); + + memset(sdhci->regs, 0, ASPEED_SDHCI_REG_SIZE); + + sdhci->regs[TO_REG(ASPEED_SDHCI_INFO)] = ASPEED_SDHCI_INFO_SLOT0; + if (sdhci->num_slots == 2) { + sdhci->regs[TO_REG(ASPEED_SDHCI_INFO)] |= ASPEED_SDHCI_INFO_SLOT1; + } + sdhci->regs[TO_REG(ASPEED_SDHCI_DEBOUNCE)] = ASPEED_SDHCI_DEBOUNCE_RESET; +} + +static const VMStateDescription vmstate_aspeed_sdhci = { + .name = TYPE_ASPEED_SDHCI, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedSDHCIState, ASPEED_SDHCI_NUM_REGS), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property aspeed_sdhci_properties[] = { + DEFINE_PROP_UINT8("num-slots", AspeedSDHCIState, num_slots, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_sdhci_class_init(ObjectClass *classp, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(classp); + + dc->realize = aspeed_sdhci_realize; + dc->reset = aspeed_sdhci_reset; + dc->vmsd = &vmstate_aspeed_sdhci; + device_class_set_props(dc, aspeed_sdhci_properties); +} + +static TypeInfo aspeed_sdhci_info = { + .name = TYPE_ASPEED_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedSDHCIState), + .class_init = aspeed_sdhci_class_init, +}; + +static void aspeed_sdhci_register_types(void) +{ + type_register_static(&aspeed_sdhci_info); +} + +type_init(aspeed_sdhci_register_types) diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c new file mode 100644 index 000000000..088a7ac6e --- /dev/null +++ b/hw/sd/bcm2835_sdhost.c @@ -0,0 +1,459 @@ +/* + * Raspberry Pi (BCM2835) SD Host Controller + * + * Copyright (c) 2017 Antfield SAS + * + * Authors: + * Clement Deschamps + * Luc Michel + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "sysemu/blockdev.h" +#include "hw/irq.h" +#include "hw/sd/bcm2835_sdhost.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qom/object.h" + +#define TYPE_BCM2835_SDHOST_BUS "bcm2835-sdhost-bus" +/* This is reusing the SDBus typedef from SD_BUS */ +DECLARE_INSTANCE_CHECKER(SDBus, BCM2835_SDHOST_BUS, + TYPE_BCM2835_SDHOST_BUS) + +#define SDCMD 0x00 /* Command to SD card - 16 R/W */ +#define SDARG 0x04 /* Argument to SD card - 32 R/W */ +#define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */ +#define SDCDIV 0x0c /* Start value for clock divider - 11 R/W */ +#define SDRSP0 0x10 /* SD card rsp (31:0) - 32 R */ +#define SDRSP1 0x14 /* SD card rsp (63:32) - 32 R */ +#define SDRSP2 0x18 /* SD card rsp (95:64) - 32 R */ +#define SDRSP3 0x1c /* SD card rsp (127:96) - 32 R */ +#define SDHSTS 0x20 /* SD host status - 11 R */ +#define SDVDD 0x30 /* SD card power control - 1 R/W */ +#define SDEDM 0x34 /* Emergency Debug Mode - 13 R/W */ +#define SDHCFG 0x38 /* Host configuration - 2 R/W */ +#define SDHBCT 0x3c /* Host byte count (debug) - 32 R/W */ +#define SDDATA 0x40 /* Data to/from SD card - 32 R/W */ +#define SDHBLC 0x50 /* Host block count (SDIO/SDHC) - 9 R/W */ + +#define SDCMD_NEW_FLAG 0x8000 +#define SDCMD_FAIL_FLAG 0x4000 +#define SDCMD_BUSYWAIT 0x800 +#define SDCMD_NO_RESPONSE 0x400 +#define SDCMD_LONG_RESPONSE 0x200 +#define SDCMD_WRITE_CMD 0x80 +#define SDCMD_READ_CMD 0x40 +#define SDCMD_CMD_MASK 0x3f + +#define SDCDIV_MAX_CDIV 0x7ff + +#define SDHSTS_BUSY_IRPT 0x400 +#define SDHSTS_BLOCK_IRPT 0x200 +#define SDHSTS_SDIO_IRPT 0x100 +#define SDHSTS_REW_TIME_OUT 0x80 +#define SDHSTS_CMD_TIME_OUT 0x40 +#define SDHSTS_CRC16_ERROR 0x20 +#define SDHSTS_CRC7_ERROR 0x10 +#define SDHSTS_FIFO_ERROR 0x08 +/* Reserved */ +/* Reserved */ +#define SDHSTS_DATA_FLAG 0x01 + +#define SDHCFG_BUSY_IRPT_EN (1 << 10) +#define SDHCFG_BLOCK_IRPT_EN (1 << 8) +#define SDHCFG_SDIO_IRPT_EN (1 << 5) +#define SDHCFG_DATA_IRPT_EN (1 << 4) +#define SDHCFG_SLOW_CARD (1 << 3) +#define SDHCFG_WIDE_EXT_BUS (1 << 2) +#define SDHCFG_WIDE_INT_BUS (1 << 1) +#define SDHCFG_REL_CMD_LINE (1 << 0) + +#define SDEDM_FORCE_DATA_MODE (1 << 19) +#define SDEDM_CLOCK_PULSE (1 << 20) +#define SDEDM_BYPASS (1 << 21) + +#define SDEDM_WRITE_THRESHOLD_SHIFT 9 +#define SDEDM_READ_THRESHOLD_SHIFT 14 +#define SDEDM_THRESHOLD_MASK 0x1f + +#define SDEDM_FSM_MASK 0xf +#define SDEDM_FSM_IDENTMODE 0x0 +#define SDEDM_FSM_DATAMODE 0x1 +#define SDEDM_FSM_READDATA 0x2 +#define SDEDM_FSM_WRITEDATA 0x3 +#define SDEDM_FSM_READWAIT 0x4 +#define SDEDM_FSM_READCRC 0x5 +#define SDEDM_FSM_WRITECRC 0x6 +#define SDEDM_FSM_WRITEWAIT1 0x7 +#define SDEDM_FSM_POWERDOWN 0x8 +#define SDEDM_FSM_POWERUP 0x9 +#define SDEDM_FSM_WRITESTART1 0xa +#define SDEDM_FSM_WRITESTART2 0xb +#define SDEDM_FSM_GENPULSES 0xc +#define SDEDM_FSM_WRITEWAIT2 0xd +#define SDEDM_FSM_STARTPOWDOWN 0xf + +#define SDDATA_FIFO_WORDS 16 + +static void bcm2835_sdhost_update_irq(BCM2835SDHostState *s) +{ + uint32_t irq = s->status & + (SDHSTS_BUSY_IRPT | SDHSTS_BLOCK_IRPT | SDHSTS_SDIO_IRPT); + trace_bcm2835_sdhost_update_irq(irq); + qemu_set_irq(s->irq, !!irq); +} + +static void bcm2835_sdhost_send_command(BCM2835SDHostState *s) +{ + SDRequest request; + uint8_t rsp[16]; + int rlen; + + request.cmd = s->cmd & SDCMD_CMD_MASK; + request.arg = s->cmdarg; + + rlen = sdbus_do_command(&s->sdbus, &request, rsp); + if (rlen < 0) { + goto error; + } + if (!(s->cmd & SDCMD_NO_RESPONSE)) { + if (rlen == 0 || (rlen == 4 && (s->cmd & SDCMD_LONG_RESPONSE))) { + goto error; + } + if (rlen != 4 && rlen != 16) { + goto error; + } + if (rlen == 4) { + s->rsp[0] = ldl_be_p(&rsp[0]); + s->rsp[1] = s->rsp[2] = s->rsp[3] = 0; + } else { + s->rsp[0] = ldl_be_p(&rsp[12]); + s->rsp[1] = ldl_be_p(&rsp[8]); + s->rsp[2] = ldl_be_p(&rsp[4]); + s->rsp[3] = ldl_be_p(&rsp[0]); + } + } + /* We never really delay commands, so if this was a 'busywait' command + * then we've completed it now and can raise the interrupt. + */ + if ((s->cmd & SDCMD_BUSYWAIT) && (s->config & SDHCFG_BUSY_IRPT_EN)) { + s->status |= SDHSTS_BUSY_IRPT; + } + return; + +error: + s->cmd |= SDCMD_FAIL_FLAG; + s->status |= SDHSTS_CMD_TIME_OUT; +} + +static void bcm2835_sdhost_fifo_push(BCM2835SDHostState *s, uint32_t value) +{ + int n; + + if (s->fifo_len == BCM2835_SDHOST_FIFO_LEN) { + /* FIFO overflow */ + return; + } + n = (s->fifo_pos + s->fifo_len) & (BCM2835_SDHOST_FIFO_LEN - 1); + s->fifo_len++; + s->fifo[n] = value; +} + +static uint32_t bcm2835_sdhost_fifo_pop(BCM2835SDHostState *s) +{ + uint32_t value; + + if (s->fifo_len == 0) { + /* FIFO underflow */ + return 0; + } + value = s->fifo[s->fifo_pos]; + s->fifo_len--; + s->fifo_pos = (s->fifo_pos + 1) & (BCM2835_SDHOST_FIFO_LEN - 1); + return value; +} + +static void bcm2835_sdhost_fifo_run(BCM2835SDHostState *s) +{ + uint32_t value = 0; + int n; + int is_read; + int is_write; + + is_read = (s->cmd & SDCMD_READ_CMD) != 0; + is_write = (s->cmd & SDCMD_WRITE_CMD) != 0; + if (s->datacnt != 0 && (is_write || sdbus_data_ready(&s->sdbus))) { + if (is_read) { + n = 0; + while (s->datacnt && s->fifo_len < BCM2835_SDHOST_FIFO_LEN) { + value |= (uint32_t)sdbus_read_byte(&s->sdbus) << (n * 8); + s->datacnt--; + n++; + if (n == 4) { + bcm2835_sdhost_fifo_push(s, value); + s->status |= SDHSTS_DATA_FLAG; + if (s->config & SDHCFG_DATA_IRPT_EN) { + s->status |= SDHSTS_SDIO_IRPT; + } + n = 0; + value = 0; + } + } + if (n != 0) { + bcm2835_sdhost_fifo_push(s, value); + s->status |= SDHSTS_DATA_FLAG; + if (s->config & SDHCFG_DATA_IRPT_EN) { + s->status |= SDHSTS_SDIO_IRPT; + } + } + } else if (is_write) { /* write */ + n = 0; + while (s->datacnt > 0 && (s->fifo_len > 0 || n > 0)) { + if (n == 0) { + value = bcm2835_sdhost_fifo_pop(s); + s->status |= SDHSTS_DATA_FLAG; + if (s->config & SDHCFG_DATA_IRPT_EN) { + s->status |= SDHSTS_SDIO_IRPT; + } + n = 4; + } + n--; + s->datacnt--; + sdbus_write_byte(&s->sdbus, value & 0xff); + value >>= 8; + } + } + if (s->datacnt == 0) { + s->edm &= ~SDEDM_FSM_MASK; + s->edm |= SDEDM_FSM_DATAMODE; + trace_bcm2835_sdhost_edm_change("datacnt 0", s->edm); + } + if (is_write) { + /* set block interrupt at end of each block transfer */ + if (s->hbct && s->datacnt % s->hbct == 0 && + (s->config & SDHCFG_BLOCK_IRPT_EN)) { + s->status |= SDHSTS_BLOCK_IRPT; + } + /* set data interrupt after each transfer */ + s->status |= SDHSTS_DATA_FLAG; + if (s->config & SDHCFG_DATA_IRPT_EN) { + s->status |= SDHSTS_SDIO_IRPT; + } + } + } + + bcm2835_sdhost_update_irq(s); + + s->edm &= ~(0x1f << 4); + s->edm |= ((s->fifo_len & 0x1f) << 4); + trace_bcm2835_sdhost_edm_change("fifo run", s->edm); +} + +static uint64_t bcm2835_sdhost_read(void *opaque, hwaddr offset, + unsigned size) +{ + BCM2835SDHostState *s = (BCM2835SDHostState *)opaque; + uint32_t res = 0; + + switch (offset) { + case SDCMD: + res = s->cmd; + break; + case SDHSTS: + res = s->status; + break; + case SDRSP0: + res = s->rsp[0]; + break; + case SDRSP1: + res = s->rsp[1]; + break; + case SDRSP2: + res = s->rsp[2]; + break; + case SDRSP3: + res = s->rsp[3]; + break; + case SDEDM: + res = s->edm; + break; + case SDVDD: + res = s->vdd; + break; + case SDDATA: + res = bcm2835_sdhost_fifo_pop(s); + bcm2835_sdhost_fifo_run(s); + break; + case SDHBCT: + res = s->hbct; + break; + case SDHBLC: + res = s->hblc; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + res = 0; + break; + } + + trace_bcm2835_sdhost_read(offset, res, size); + + return res; +} + +static void bcm2835_sdhost_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + BCM2835SDHostState *s = (BCM2835SDHostState *)opaque; + + trace_bcm2835_sdhost_write(offset, value, size); + + switch (offset) { + case SDCMD: + s->cmd = value; + if (value & SDCMD_NEW_FLAG) { + bcm2835_sdhost_send_command(s); + bcm2835_sdhost_fifo_run(s); + s->cmd &= ~SDCMD_NEW_FLAG; + } + break; + case SDTOUT: + break; + case SDCDIV: + break; + case SDHSTS: + s->status &= ~value; + bcm2835_sdhost_update_irq(s); + break; + case SDARG: + s->cmdarg = value; + break; + case SDEDM: + if ((value & 0xf) == 0xf) { + /* power down */ + value &= ~0xf; + } + s->edm = value; + trace_bcm2835_sdhost_edm_change("guest register write", s->edm); + break; + case SDHCFG: + s->config = value; + bcm2835_sdhost_fifo_run(s); + break; + case SDVDD: + s->vdd = value; + break; + case SDDATA: + bcm2835_sdhost_fifo_push(s, value); + bcm2835_sdhost_fifo_run(s); + break; + case SDHBCT: + s->hbct = value; + break; + case SDHBLC: + s->hblc = value; + s->datacnt = s->hblc * s->hbct; + bcm2835_sdhost_fifo_run(s); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %"HWADDR_PRIx"\n", + __func__, offset); + break; + } +} + +static const MemoryRegionOps bcm2835_sdhost_ops = { + .read = bcm2835_sdhost_read, + .write = bcm2835_sdhost_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_bcm2835_sdhost = { + .name = TYPE_BCM2835_SDHOST, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cmd, BCM2835SDHostState), + VMSTATE_UINT32(cmdarg, BCM2835SDHostState), + VMSTATE_UINT32(status, BCM2835SDHostState), + VMSTATE_UINT32_ARRAY(rsp, BCM2835SDHostState, 4), + VMSTATE_UINT32(config, BCM2835SDHostState), + VMSTATE_UINT32(edm, BCM2835SDHostState), + VMSTATE_UINT32(vdd, BCM2835SDHostState), + VMSTATE_UINT32(hbct, BCM2835SDHostState), + VMSTATE_UINT32(hblc, BCM2835SDHostState), + VMSTATE_INT32(fifo_pos, BCM2835SDHostState), + VMSTATE_INT32(fifo_len, BCM2835SDHostState), + VMSTATE_UINT32_ARRAY(fifo, BCM2835SDHostState, BCM2835_SDHOST_FIFO_LEN), + VMSTATE_UINT32(datacnt, BCM2835SDHostState), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_sdhost_init(Object *obj) +{ + BCM2835SDHostState *s = BCM2835_SDHOST(obj); + + qbus_init(&s->sdbus, sizeof(s->sdbus), + TYPE_BCM2835_SDHOST_BUS, DEVICE(s), "sd-bus"); + + memory_region_init_io(&s->iomem, obj, &bcm2835_sdhost_ops, s, + TYPE_BCM2835_SDHOST, 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(s), &s->irq); +} + +static void bcm2835_sdhost_reset(DeviceState *dev) +{ + BCM2835SDHostState *s = BCM2835_SDHOST(dev); + + s->cmd = 0; + s->cmdarg = 0; + s->edm = 0x0000c60f; + trace_bcm2835_sdhost_edm_change("device reset", s->edm); + s->config = 0; + s->hbct = 0; + s->hblc = 0; + s->datacnt = 0; + s->fifo_pos = 0; + s->fifo_len = 0; +} + +static void bcm2835_sdhost_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bcm2835_sdhost_reset; + dc->vmsd = &vmstate_bcm2835_sdhost; +} + +static TypeInfo bcm2835_sdhost_info = { + .name = TYPE_BCM2835_SDHOST, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835SDHostState), + .class_init = bcm2835_sdhost_class_init, + .instance_init = bcm2835_sdhost_init, +}; + +static const TypeInfo bcm2835_sdhost_bus_info = { + .name = TYPE_BCM2835_SDHOST_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), +}; + +static void bcm2835_sdhost_register_types(void) +{ + type_register_static(&bcm2835_sdhost_info); + type_register_static(&bcm2835_sdhost_bus_info); +} + +type_init(bcm2835_sdhost_register_types) diff --git a/hw/sd/cadence_sdhci.c b/hw/sd/cadence_sdhci.c new file mode 100644 index 000000000..56b8bae1c --- /dev/null +++ b/hw/sd/cadence_sdhci.c @@ -0,0 +1,191 @@ +/* + * Cadence SDHCI emulation + * + * Copyright (c) 2020 Wind River Systems, Inc. + * + * Author: + * Bin Meng + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/sd/cadence_sdhci.h" +#include "sdhci-internal.h" + +/* HRS - Host Register Set (specific to Cadence) */ + +#define CADENCE_SDHCI_HRS00 0x00 /* general information */ +#define CADENCE_SDHCI_HRS00_SWR BIT(0) +#define CADENCE_SDHCI_HRS00_POR_VAL 0x00010000 + +#define CADENCE_SDHCI_HRS04 0x10 /* PHY access port */ +#define CADENCE_SDHCI_HRS04_WR BIT(24) +#define CADENCE_SDHCI_HRS04_RD BIT(25) +#define CADENCE_SDHCI_HRS04_ACK BIT(26) + +#define CADENCE_SDHCI_HRS06 0x18 /* eMMC control */ +#define CADENCE_SDHCI_HRS06_TUNE_UP BIT(15) + +/* SRS - Slot Register Set (SDHCI-compatible) */ + +#define CADENCE_SDHCI_SRS_BASE 0x200 + +#define TO_REG(addr) ((addr) / sizeof(uint32_t)) + +static void cadence_sdhci_instance_init(Object *obj) +{ + CadenceSDHCIState *s = CADENCE_SDHCI(obj); + + object_initialize_child(OBJECT(s), "generic-sdhci", + &s->sdhci, TYPE_SYSBUS_SDHCI); +} + +static void cadence_sdhci_reset(DeviceState *dev) +{ + CadenceSDHCIState *s = CADENCE_SDHCI(dev); + + memset(s->regs, 0, CADENCE_SDHCI_REG_SIZE); + s->regs[TO_REG(CADENCE_SDHCI_HRS00)] = CADENCE_SDHCI_HRS00_POR_VAL; + + device_cold_reset(DEVICE(&s->sdhci)); +} + +static uint64_t cadence_sdhci_read(void *opaque, hwaddr addr, unsigned int size) +{ + CadenceSDHCIState *s = opaque; + uint32_t val; + + val = s->regs[TO_REG(addr)]; + + return (uint64_t)val; +} + +static void cadence_sdhci_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + CadenceSDHCIState *s = opaque; + uint32_t val32 = (uint32_t)val; + + switch (addr) { + case CADENCE_SDHCI_HRS00: + /* + * The only writable bit is SWR (software reset) and it automatically + * clears to zero, so essentially this register remains unchanged. + */ + if (val32 & CADENCE_SDHCI_HRS00_SWR) { + cadence_sdhci_reset(DEVICE(s)); + } + + break; + case CADENCE_SDHCI_HRS04: + /* + * Only emulate the ACK bit behavior when read or write transaction + * are requested. + */ + if (val32 & (CADENCE_SDHCI_HRS04_WR | CADENCE_SDHCI_HRS04_RD)) { + val32 |= CADENCE_SDHCI_HRS04_ACK; + } else { + val32 &= ~CADENCE_SDHCI_HRS04_ACK; + } + + s->regs[TO_REG(addr)] = val32; + break; + case CADENCE_SDHCI_HRS06: + if (val32 & CADENCE_SDHCI_HRS06_TUNE_UP) { + val32 &= ~CADENCE_SDHCI_HRS06_TUNE_UP; + } + + s->regs[TO_REG(addr)] = val32; + break; + default: + s->regs[TO_REG(addr)] = val32; + break; + } +} + +static const MemoryRegionOps cadence_sdhci_ops = { + .read = cadence_sdhci_read, + .write = cadence_sdhci_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + } +}; + +static void cadence_sdhci_realize(DeviceState *dev, Error **errp) +{ + CadenceSDHCIState *s = CADENCE_SDHCI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + SysBusDevice *sbd_sdhci = SYS_BUS_DEVICE(&s->sdhci); + + memory_region_init(&s->container, OBJECT(s), + "cadence.sdhci-container", 0x1000); + sysbus_init_mmio(sbd, &s->container); + + memory_region_init_io(&s->iomem, OBJECT(s), &cadence_sdhci_ops, + s, TYPE_CADENCE_SDHCI, CADENCE_SDHCI_REG_SIZE); + memory_region_add_subregion(&s->container, 0, &s->iomem); + + sysbus_realize(sbd_sdhci, errp); + memory_region_add_subregion(&s->container, CADENCE_SDHCI_SRS_BASE, + sysbus_mmio_get_region(sbd_sdhci, 0)); + + /* propagate irq and "sd-bus" from generic-sdhci */ + sysbus_pass_irq(sbd, sbd_sdhci); + s->bus = qdev_get_child_bus(DEVICE(sbd_sdhci), "sd-bus"); +} + +static const VMStateDescription vmstate_cadence_sdhci = { + .name = TYPE_CADENCE_SDHCI, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, CadenceSDHCIState, CADENCE_SDHCI_NUM_REGS), + VMSTATE_END_OF_LIST(), + }, +}; + +static void cadence_sdhci_class_init(ObjectClass *classp, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(classp); + + dc->desc = "Cadence SD/SDIO/eMMC Host Controller (SD4HC)"; + dc->realize = cadence_sdhci_realize; + dc->reset = cadence_sdhci_reset; + dc->vmsd = &vmstate_cadence_sdhci; +} + +static TypeInfo cadence_sdhci_info = { + .name = TYPE_CADENCE_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CadenceSDHCIState), + .instance_init = cadence_sdhci_instance_init, + .class_init = cadence_sdhci_class_init, +}; + +static void cadence_sdhci_register_types(void) +{ + type_register_static(&cadence_sdhci_info); +} + +type_init(cadence_sdhci_register_types) diff --git a/hw/sd/core.c b/hw/sd/core.c new file mode 100644 index 000000000..30ee62c51 --- /dev/null +++ b/hw/sd/core.c @@ -0,0 +1,274 @@ +/* + * SD card bus interface code. + * + * Copyright (c) 2015 Linaro Limited + * + * Author: + * Peter Maydell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "hw/qdev-core.h" +#include "hw/sd/sd.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "trace.h" + +static inline const char *sdbus_name(SDBus *sdbus) +{ + return sdbus->qbus.name; +} + +static SDState *get_card(SDBus *sdbus) +{ + /* We only ever have one child on the bus so just return it */ + BusChild *kid = QTAILQ_FIRST(&sdbus->qbus.children); + + if (!kid) { + return NULL; + } + return SD_CARD(kid->child); +} + +uint8_t sdbus_get_dat_lines(SDBus *sdbus) +{ + SDState *slave = get_card(sdbus); + uint8_t dat_lines = 0b1111; /* 4 bit bus width */ + + if (slave) { + SDCardClass *sc = SD_CARD_GET_CLASS(slave); + + if (sc->get_dat_lines) { + dat_lines = sc->get_dat_lines(slave); + } + } + trace_sdbus_get_dat_lines(sdbus_name(sdbus), dat_lines); + + return dat_lines; +} + +bool sdbus_get_cmd_line(SDBus *sdbus) +{ + SDState *slave = get_card(sdbus); + bool cmd_line = true; + + if (slave) { + SDCardClass *sc = SD_CARD_GET_CLASS(slave); + + if (sc->get_cmd_line) { + cmd_line = sc->get_cmd_line(slave); + } + } + trace_sdbus_get_cmd_line(sdbus_name(sdbus), cmd_line); + + return cmd_line; +} + +void sdbus_set_voltage(SDBus *sdbus, uint16_t millivolts) +{ + SDState *card = get_card(sdbus); + + trace_sdbus_set_voltage(sdbus_name(sdbus), millivolts); + if (card) { + SDCardClass *sc = SD_CARD_GET_CLASS(card); + + assert(sc->set_voltage); + sc->set_voltage(card, millivolts); + } +} + +int sdbus_do_command(SDBus *sdbus, SDRequest *req, uint8_t *response) +{ + SDState *card = get_card(sdbus); + + trace_sdbus_command(sdbus_name(sdbus), req->cmd, req->arg); + if (card) { + SDCardClass *sc = SD_CARD_GET_CLASS(card); + + return sc->do_command(card, req, response); + } + + return 0; +} + +void sdbus_write_byte(SDBus *sdbus, uint8_t value) +{ + SDState *card = get_card(sdbus); + + trace_sdbus_write(sdbus_name(sdbus), value); + if (card) { + SDCardClass *sc = SD_CARD_GET_CLASS(card); + + sc->write_byte(card, value); + } +} + +void sdbus_write_data(SDBus *sdbus, const void *buf, size_t length) +{ + SDState *card = get_card(sdbus); + const uint8_t *data = buf; + + if (card) { + SDCardClass *sc = SD_CARD_GET_CLASS(card); + + for (size_t i = 0; i < length; i++) { + trace_sdbus_write(sdbus_name(sdbus), data[i]); + sc->write_byte(card, data[i]); + } + } +} + +uint8_t sdbus_read_byte(SDBus *sdbus) +{ + SDState *card = get_card(sdbus); + uint8_t value = 0; + + if (card) { + SDCardClass *sc = SD_CARD_GET_CLASS(card); + + value = sc->read_byte(card); + } + trace_sdbus_read(sdbus_name(sdbus), value); + + return value; +} + +void sdbus_read_data(SDBus *sdbus, void *buf, size_t length) +{ + SDState *card = get_card(sdbus); + uint8_t *data = buf; + + if (card) { + SDCardClass *sc = SD_CARD_GET_CLASS(card); + + for (size_t i = 0; i < length; i++) { + data[i] = sc->read_byte(card); + trace_sdbus_read(sdbus_name(sdbus), data[i]); + } + } +} + +bool sdbus_receive_ready(SDBus *sdbus) +{ + SDState *card = get_card(sdbus); + + if (card) { + SDCardClass *sc = SD_CARD_GET_CLASS(card); + + return sc->receive_ready(card); + } + + return false; +} + +bool sdbus_data_ready(SDBus *sdbus) +{ + SDState *card = get_card(sdbus); + + if (card) { + SDCardClass *sc = SD_CARD_GET_CLASS(card); + + return sc->data_ready(card); + } + + return false; +} + +bool sdbus_get_inserted(SDBus *sdbus) +{ + SDState *card = get_card(sdbus); + + if (card) { + SDCardClass *sc = SD_CARD_GET_CLASS(card); + + return sc->get_inserted(card); + } + + return false; +} + +bool sdbus_get_readonly(SDBus *sdbus) +{ + SDState *card = get_card(sdbus); + + if (card) { + SDCardClass *sc = SD_CARD_GET_CLASS(card); + + return sc->get_readonly(card); + } + + return false; +} + +void sdbus_set_inserted(SDBus *sdbus, bool inserted) +{ + SDBusClass *sbc = SD_BUS_GET_CLASS(sdbus); + BusState *qbus = BUS(sdbus); + + if (sbc->set_inserted) { + sbc->set_inserted(qbus->parent, inserted); + } +} + +void sdbus_set_readonly(SDBus *sdbus, bool readonly) +{ + SDBusClass *sbc = SD_BUS_GET_CLASS(sdbus); + BusState *qbus = BUS(sdbus); + + if (sbc->set_readonly) { + sbc->set_readonly(qbus->parent, readonly); + } +} + +void sdbus_reparent_card(SDBus *from, SDBus *to) +{ + SDState *card = get_card(from); + SDCardClass *sc; + bool readonly; + + /* We directly reparent the card object rather than implementing this + * as a hotpluggable connection because we don't want to expose SD cards + * to users as being hotpluggable, and we can get away with it in this + * limited use case. This could perhaps be implemented more cleanly in + * future by adding support to the hotplug infrastructure for "device + * can be hotplugged only via code, not by user". + */ + + if (!card) { + return; + } + + sc = SD_CARD_GET_CLASS(card); + readonly = sc->get_readonly(card); + + sdbus_set_inserted(from, false); + qdev_set_parent_bus(DEVICE(card), &to->qbus, &error_abort); + sdbus_set_inserted(to, true); + sdbus_set_readonly(to, readonly); +} + +static const TypeInfo sd_bus_info = { + .name = TYPE_SD_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(SDBus), + .class_size = sizeof(SDBusClass), +}; + +static void sd_bus_register_types(void) +{ + type_register_static(&sd_bus_info); +} + +type_init(sd_bus_register_types) diff --git a/hw/sd/meson.build b/hw/sd/meson.build new file mode 100644 index 000000000..807ca07b7 --- /dev/null +++ b/hw/sd/meson.build @@ -0,0 +1,13 @@ +softmmu_ss.add(when: 'CONFIG_PL181', if_true: files('pl181.c')) +softmmu_ss.add(when: 'CONFIG_SD', if_true: files('sd.c', 'core.c', 'sdmmc-internal.c')) +softmmu_ss.add(when: 'CONFIG_SDHCI', if_true: files('sdhci.c')) +softmmu_ss.add(when: 'CONFIG_SDHCI_PCI', if_true: files('sdhci-pci.c')) +softmmu_ss.add(when: 'CONFIG_SSI_SD', if_true: files('ssi-sd.c')) + +softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_mmc.c')) +softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_mmci.c')) +softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_sdhost.c')) +softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_sdhci.c')) +softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sdhost.c')) +softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_sdhci.c')) +softmmu_ss.add(when: 'CONFIG_CADENCE_SDHCI', if_true: files('cadence_sdhci.c')) diff --git a/hw/sd/npcm7xx_sdhci.c b/hw/sd/npcm7xx_sdhci.c new file mode 100644 index 000000000..ef503365d --- /dev/null +++ b/hw/sd/npcm7xx_sdhci.c @@ -0,0 +1,182 @@ +/* + * NPCM7xx SD-3.0 / eMMC-4.51 Host Controller + * + * Copyright (c) 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/sd/npcm7xx_sdhci.h" +#include "migration/vmstate.h" +#include "sdhci-internal.h" +#include "qemu/log.h" + +static uint64_t npcm7xx_sdhci_read(void *opaque, hwaddr addr, unsigned int size) +{ + NPCM7xxSDHCIState *s = opaque; + uint64_t val = 0; + + switch (addr) { + case NPCM7XX_PRSTVALS_0: + case NPCM7XX_PRSTVALS_1: + case NPCM7XX_PRSTVALS_2: + case NPCM7XX_PRSTVALS_3: + case NPCM7XX_PRSTVALS_4: + case NPCM7XX_PRSTVALS_5: + val = s->regs.prstvals[(addr - NPCM7XX_PRSTVALS_0) / 2]; + break; + case NPCM7XX_BOOTTOCTRL: + val = s->regs.boottoctrl; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "SDHCI read of nonexistent reg: 0x%02" + HWADDR_PRIx, addr); + break; + } + + return val; +} + +static void npcm7xx_sdhci_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + NPCM7xxSDHCIState *s = opaque; + + switch (addr) { + case NPCM7XX_BOOTTOCTRL: + s->regs.boottoctrl = val; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "SDHCI write of nonexistent reg: 0x%02" + HWADDR_PRIx, addr); + break; + } +} + +static bool npcm7xx_sdhci_check_mem_op(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + switch (addr) { + case NPCM7XX_PRSTVALS_0: + case NPCM7XX_PRSTVALS_1: + case NPCM7XX_PRSTVALS_2: + case NPCM7XX_PRSTVALS_3: + case NPCM7XX_PRSTVALS_4: + case NPCM7XX_PRSTVALS_5: + /* RO Word */ + return !is_write && size == 2; + case NPCM7XX_BOOTTOCTRL: + /* R/W Dword */ + return size == 4; + default: + return false; + } +} + +static const MemoryRegionOps npcm7xx_sdhci_ops = { + .read = npcm7xx_sdhci_read, + .write = npcm7xx_sdhci_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + .unaligned = false, + .accepts = npcm7xx_sdhci_check_mem_op, + }, +}; + +static void npcm7xx_sdhci_realize(DeviceState *dev, Error **errp) +{ + NPCM7xxSDHCIState *s = NPCM7XX_SDHCI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + SysBusDevice *sbd_sdhci = SYS_BUS_DEVICE(&s->sdhci); + + memory_region_init(&s->container, OBJECT(s), + "npcm7xx.sdhci-container", 0x1000); + sysbus_init_mmio(sbd, &s->container); + + memory_region_init_io(&s->iomem, OBJECT(s), &npcm7xx_sdhci_ops, s, + TYPE_NPCM7XX_SDHCI, NPCM7XX_SDHCI_REGSIZE); + memory_region_add_subregion_overlap(&s->container, NPCM7XX_PRSTVALS, + &s->iomem, 1); + + sysbus_realize(sbd_sdhci, errp); + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(sbd_sdhci, 0)); + + /* propagate irq and "sd-bus" from generic-sdhci */ + sysbus_pass_irq(sbd, sbd_sdhci); + s->bus = qdev_get_child_bus(DEVICE(sbd_sdhci), "sd-bus"); + + /* Set the read only preset values. */ + memset(s->regs.prstvals, 0, sizeof(s->regs.prstvals)); + s->regs.prstvals[0] = NPCM7XX_PRSTVALS_0_RESET; + s->regs.prstvals[1] = NPCM7XX_PRSTVALS_1_RESET; + s->regs.prstvals[3] = NPCM7XX_PRSTVALS_3_RESET; +} + +static void npcm7xx_sdhci_reset(DeviceState *dev) +{ + NPCM7xxSDHCIState *s = NPCM7XX_SDHCI(dev); + device_cold_reset(DEVICE(&s->sdhci)); + s->regs.boottoctrl = 0; + + s->sdhci.prnsts = NPCM7XX_PRSNTS_RESET; + s->sdhci.blkgap = NPCM7XX_BLKGAP_RESET; + s->sdhci.capareg = NPCM7XX_CAPAB_RESET; + s->sdhci.maxcurr = NPCM7XX_MAXCURR_RESET; + s->sdhci.version = NPCM7XX_HCVER_RESET; +} + +static const VMStateDescription vmstate_npcm7xx_sdhci = { + .name = TYPE_NPCM7XX_SDHCI, + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(regs.boottoctrl, NPCM7xxSDHCIState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_sdhci_class_init(ObjectClass *classp, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(classp); + + dc->desc = "NPCM7xx SD/eMMC Host Controller"; + dc->realize = npcm7xx_sdhci_realize; + dc->reset = npcm7xx_sdhci_reset; + dc->vmsd = &vmstate_npcm7xx_sdhci; +} + +static void npcm7xx_sdhci_instance_init(Object *obj) +{ + NPCM7xxSDHCIState *s = NPCM7XX_SDHCI(obj); + + object_initialize_child(OBJECT(s), "generic-sdhci", &s->sdhci, + TYPE_SYSBUS_SDHCI); +} + +static TypeInfo npcm7xx_sdhci_info = { + .name = TYPE_NPCM7XX_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxSDHCIState), + .instance_init = npcm7xx_sdhci_instance_init, + .class_init = npcm7xx_sdhci_class_init, +}; + +static void npcm7xx_sdhci_register_types(void) +{ + type_register_static(&npcm7xx_sdhci_info); +} + +type_init(npcm7xx_sdhci_register_types) diff --git a/hw/sd/omap_mmc.c b/hw/sd/omap_mmc.c new file mode 100644 index 000000000..b67def638 --- /dev/null +++ b/hw/sd/omap_mmc.c @@ -0,0 +1,665 @@ +/* + * OMAP on-chip MMC/SD host emulation. + * + * Datasheet: TI Multimedia Card (MMC/SD/SDIO) Interface (SPRU765A) + * + * Copyright (C) 2006-2007 Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/arm/omap.h" +#include "hw/sd/sdcard_legacy.h" + +struct omap_mmc_s { + qemu_irq irq; + qemu_irq *dma; + qemu_irq coverswitch; + MemoryRegion iomem; + omap_clk clk; + SDState *card; + uint16_t last_cmd; + uint16_t sdio; + uint16_t rsp[8]; + uint32_t arg; + int lines; + int dw; + int mode; + int enable; + int be; + int rev; + uint16_t status; + uint16_t mask; + uint8_t cto; + uint16_t dto; + int clkdiv; + uint16_t fifo[32]; + int fifo_start; + int fifo_len; + uint16_t blen; + uint16_t blen_counter; + uint16_t nblk; + uint16_t nblk_counter; + int tx_dma; + int rx_dma; + int af_level; + int ae_level; + + int ddir; + int transfer; + + int cdet_wakeup; + int cdet_enable; + int cdet_state; + qemu_irq cdet; +}; + +static void omap_mmc_interrupts_update(struct omap_mmc_s *s) +{ + qemu_set_irq(s->irq, !!(s->status & s->mask)); +} + +static void omap_mmc_fifolevel_update(struct omap_mmc_s *host) +{ + if (!host->transfer && !host->fifo_len) { + host->status &= 0xf3ff; + return; + } + + if (host->fifo_len > host->af_level && host->ddir) { + if (host->rx_dma) { + host->status &= 0xfbff; + qemu_irq_raise(host->dma[1]); + } else + host->status |= 0x0400; + } else { + host->status &= 0xfbff; + qemu_irq_lower(host->dma[1]); + } + + if (host->fifo_len < host->ae_level && !host->ddir) { + if (host->tx_dma) { + host->status &= 0xf7ff; + qemu_irq_raise(host->dma[0]); + } else + host->status |= 0x0800; + } else { + qemu_irq_lower(host->dma[0]); + host->status &= 0xf7ff; + } +} + +typedef enum { + sd_nore = 0, /* no response */ + sd_r1, /* normal response command */ + sd_r2, /* CID, CSD registers */ + sd_r3, /* OCR register */ + sd_r6 = 6, /* Published RCA response */ + sd_r1b = -1, +} sd_rsp_type_t; + +static void omap_mmc_command(struct omap_mmc_s *host, int cmd, int dir, + sd_cmd_type_t type, int busy, sd_rsp_type_t resptype, int init) +{ + uint32_t rspstatus, mask; + int rsplen, timeout; + SDRequest request; + uint8_t response[16]; + + if (init && cmd == 0) { + host->status |= 0x0001; + return; + } + + if (resptype == sd_r1 && busy) + resptype = sd_r1b; + + if (type == sd_adtc) { + host->fifo_start = 0; + host->fifo_len = 0; + host->transfer = 1; + host->ddir = dir; + } else + host->transfer = 0; + timeout = 0; + mask = 0; + rspstatus = 0; + + request.cmd = cmd; + request.arg = host->arg; + request.crc = 0; /* FIXME */ + + rsplen = sd_do_command(host->card, &request, response); + + /* TODO: validate CRCs */ + switch (resptype) { + case sd_nore: + rsplen = 0; + break; + + case sd_r1: + case sd_r1b: + if (rsplen < 4) { + timeout = 1; + break; + } + rsplen = 4; + + mask = OUT_OF_RANGE | ADDRESS_ERROR | BLOCK_LEN_ERROR | + ERASE_SEQ_ERROR | ERASE_PARAM | WP_VIOLATION | + LOCK_UNLOCK_FAILED | COM_CRC_ERROR | ILLEGAL_COMMAND | + CARD_ECC_FAILED | CC_ERROR | SD_ERROR | + CID_CSD_OVERWRITE; + if (host->sdio & (1 << 13)) + mask |= AKE_SEQ_ERROR; + rspstatus = ldl_be_p(response); + break; + + case sd_r2: + if (rsplen < 16) { + timeout = 1; + break; + } + rsplen = 16; + break; + + case sd_r3: + if (rsplen < 4) { + timeout = 1; + break; + } + rsplen = 4; + + rspstatus = ldl_be_p(response); + if (rspstatus & 0x80000000) + host->status &= 0xe000; + else + host->status |= 0x1000; + break; + + case sd_r6: + if (rsplen < 4) { + timeout = 1; + break; + } + rsplen = 4; + + mask = 0xe000 | AKE_SEQ_ERROR; + rspstatus = (response[2] << 8) | (response[3] << 0); + } + + if (rspstatus & mask) + host->status |= 0x4000; + else + host->status &= 0xb000; + + if (rsplen) + for (rsplen = 0; rsplen < 8; rsplen ++) + host->rsp[~rsplen & 7] = response[(rsplen << 1) | 1] | + (response[(rsplen << 1) | 0] << 8); + + if (timeout) + host->status |= 0x0080; + else if (cmd == 12) + host->status |= 0x0005; /* Makes it more real */ + else + host->status |= 0x0001; +} + +static void omap_mmc_transfer(struct omap_mmc_s *host) +{ + uint8_t value; + + if (!host->transfer) + return; + + while (1) { + if (host->ddir) { + if (host->fifo_len > host->af_level) + break; + + value = sd_read_byte(host->card); + host->fifo[(host->fifo_start + host->fifo_len) & 31] = value; + if (-- host->blen_counter) { + value = sd_read_byte(host->card); + host->fifo[(host->fifo_start + host->fifo_len) & 31] |= + value << 8; + host->blen_counter --; + } + + host->fifo_len ++; + } else { + if (!host->fifo_len) + break; + + value = host->fifo[host->fifo_start] & 0xff; + sd_write_byte(host->card, value); + if (-- host->blen_counter) { + value = host->fifo[host->fifo_start] >> 8; + sd_write_byte(host->card, value); + host->blen_counter --; + } + + host->fifo_start ++; + host->fifo_len --; + host->fifo_start &= 31; + } + + if (host->blen_counter == 0) { + host->nblk_counter --; + host->blen_counter = host->blen; + + if (host->nblk_counter == 0) { + host->nblk_counter = host->nblk; + host->transfer = 0; + host->status |= 0x0008; + break; + } + } + } +} + +static void omap_mmc_update(void *opaque) +{ + struct omap_mmc_s *s = opaque; + omap_mmc_transfer(s); + omap_mmc_fifolevel_update(s); + omap_mmc_interrupts_update(s); +} + +static void omap_mmc_pseudo_reset(struct omap_mmc_s *host) +{ + host->status = 0; + host->fifo_len = 0; +} + +void omap_mmc_reset(struct omap_mmc_s *host) +{ + host->last_cmd = 0; + memset(host->rsp, 0, sizeof(host->rsp)); + host->arg = 0; + host->dw = 0; + host->mode = 0; + host->enable = 0; + host->mask = 0; + host->cto = 0; + host->dto = 0; + host->blen = 0; + host->blen_counter = 0; + host->nblk = 0; + host->nblk_counter = 0; + host->tx_dma = 0; + host->rx_dma = 0; + host->ae_level = 0x00; + host->af_level = 0x1f; + host->transfer = 0; + host->cdet_wakeup = 0; + host->cdet_enable = 0; + qemu_set_irq(host->coverswitch, host->cdet_state); + host->clkdiv = 0; + + omap_mmc_pseudo_reset(host); + + /* Since we're still using the legacy SD API the card is not plugged + * into any bus, and we must reset it manually. When omap_mmc is + * QOMified this must move into the QOM reset function. + */ + device_cold_reset(DEVICE(host->card)); +} + +static uint64_t omap_mmc_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint16_t i; + struct omap_mmc_s *s = (struct omap_mmc_s *) opaque; + + if (size != 2) { + return omap_badwidth_read16(opaque, offset); + } + + switch (offset) { + case 0x00: /* MMC_CMD */ + return s->last_cmd; + + case 0x04: /* MMC_ARGL */ + return s->arg & 0x0000ffff; + + case 0x08: /* MMC_ARGH */ + return s->arg >> 16; + + case 0x0c: /* MMC_CON */ + return (s->dw << 15) | (s->mode << 12) | (s->enable << 11) | + (s->be << 10) | s->clkdiv; + + case 0x10: /* MMC_STAT */ + return s->status; + + case 0x14: /* MMC_IE */ + return s->mask; + + case 0x18: /* MMC_CTO */ + return s->cto; + + case 0x1c: /* MMC_DTO */ + return s->dto; + + case 0x20: /* MMC_DATA */ + /* TODO: support 8-bit access */ + i = s->fifo[s->fifo_start]; + if (s->fifo_len == 0) { + printf("MMC: FIFO underrun\n"); + return i; + } + s->fifo_start ++; + s->fifo_len --; + s->fifo_start &= 31; + omap_mmc_transfer(s); + omap_mmc_fifolevel_update(s); + omap_mmc_interrupts_update(s); + return i; + + case 0x24: /* MMC_BLEN */ + return s->blen_counter; + + case 0x28: /* MMC_NBLK */ + return s->nblk_counter; + + case 0x2c: /* MMC_BUF */ + return (s->rx_dma << 15) | (s->af_level << 8) | + (s->tx_dma << 7) | s->ae_level; + + case 0x30: /* MMC_SPI */ + return 0x0000; + case 0x34: /* MMC_SDIO */ + return (s->cdet_wakeup << 2) | (s->cdet_enable) | s->sdio; + case 0x38: /* MMC_SYST */ + return 0x0000; + + case 0x3c: /* MMC_REV */ + return s->rev; + + case 0x40: /* MMC_RSP0 */ + case 0x44: /* MMC_RSP1 */ + case 0x48: /* MMC_RSP2 */ + case 0x4c: /* MMC_RSP3 */ + case 0x50: /* MMC_RSP4 */ + case 0x54: /* MMC_RSP5 */ + case 0x58: /* MMC_RSP6 */ + case 0x5c: /* MMC_RSP7 */ + return s->rsp[(offset - 0x40) >> 2]; + + /* OMAP2-specific */ + case 0x60: /* MMC_IOSR */ + case 0x64: /* MMC_SYSC */ + return 0; + case 0x68: /* MMC_SYSS */ + return 1; /* RSTD */ + } + + OMAP_BAD_REG(offset); + return 0; +} + +static void omap_mmc_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + int i; + struct omap_mmc_s *s = (struct omap_mmc_s *) opaque; + + if (size != 2) { + omap_badwidth_write16(opaque, offset, value); + return; + } + + switch (offset) { + case 0x00: /* MMC_CMD */ + if (!s->enable) + break; + + s->last_cmd = value; + for (i = 0; i < 8; i ++) + s->rsp[i] = 0x0000; + omap_mmc_command(s, value & 63, (value >> 15) & 1, + (sd_cmd_type_t) ((value >> 12) & 3), + (value >> 11) & 1, + (sd_rsp_type_t) ((value >> 8) & 7), + (value >> 7) & 1); + omap_mmc_update(s); + break; + + case 0x04: /* MMC_ARGL */ + s->arg &= 0xffff0000; + s->arg |= 0x0000ffff & value; + break; + + case 0x08: /* MMC_ARGH */ + s->arg &= 0x0000ffff; + s->arg |= value << 16; + break; + + case 0x0c: /* MMC_CON */ + s->dw = (value >> 15) & 1; + s->mode = (value >> 12) & 3; + s->enable = (value >> 11) & 1; + s->be = (value >> 10) & 1; + s->clkdiv = (value >> 0) & (s->rev >= 2 ? 0x3ff : 0xff); + if (s->mode != 0) { + qemu_log_mask(LOG_UNIMP, + "omap_mmc_wr: mode #%i unimplemented\n", s->mode); + } + if (s->be != 0) { + qemu_log_mask(LOG_UNIMP, + "omap_mmc_wr: Big Endian not implemented\n"); + } + if (s->dw != 0 && s->lines < 4) + printf("4-bit SD bus enabled\n"); + if (!s->enable) + omap_mmc_pseudo_reset(s); + break; + + case 0x10: /* MMC_STAT */ + s->status &= ~value; + omap_mmc_interrupts_update(s); + break; + + case 0x14: /* MMC_IE */ + s->mask = value & 0x7fff; + omap_mmc_interrupts_update(s); + break; + + case 0x18: /* MMC_CTO */ + s->cto = value & 0xff; + if (s->cto > 0xfd && s->rev <= 1) + printf("MMC: CTO of 0xff and 0xfe cannot be used!\n"); + break; + + case 0x1c: /* MMC_DTO */ + s->dto = value & 0xffff; + break; + + case 0x20: /* MMC_DATA */ + /* TODO: support 8-bit access */ + if (s->fifo_len == 32) + break; + s->fifo[(s->fifo_start + s->fifo_len) & 31] = value; + s->fifo_len ++; + omap_mmc_transfer(s); + omap_mmc_fifolevel_update(s); + omap_mmc_interrupts_update(s); + break; + + case 0x24: /* MMC_BLEN */ + s->blen = (value & 0x07ff) + 1; + s->blen_counter = s->blen; + break; + + case 0x28: /* MMC_NBLK */ + s->nblk = (value & 0x07ff) + 1; + s->nblk_counter = s->nblk; + s->blen_counter = s->blen; + break; + + case 0x2c: /* MMC_BUF */ + s->rx_dma = (value >> 15) & 1; + s->af_level = (value >> 8) & 0x1f; + s->tx_dma = (value >> 7) & 1; + s->ae_level = value & 0x1f; + + if (s->rx_dma) + s->status &= 0xfbff; + if (s->tx_dma) + s->status &= 0xf7ff; + omap_mmc_fifolevel_update(s); + omap_mmc_interrupts_update(s); + break; + + /* SPI, SDIO and TEST modes unimplemented */ + case 0x30: /* MMC_SPI (OMAP1 only) */ + break; + case 0x34: /* MMC_SDIO */ + s->sdio = value & (s->rev >= 2 ? 0xfbf3 : 0x2020); + s->cdet_wakeup = (value >> 9) & 1; + s->cdet_enable = (value >> 2) & 1; + break; + case 0x38: /* MMC_SYST */ + break; + + case 0x3c: /* MMC_REV */ + case 0x40: /* MMC_RSP0 */ + case 0x44: /* MMC_RSP1 */ + case 0x48: /* MMC_RSP2 */ + case 0x4c: /* MMC_RSP3 */ + case 0x50: /* MMC_RSP4 */ + case 0x54: /* MMC_RSP5 */ + case 0x58: /* MMC_RSP6 */ + case 0x5c: /* MMC_RSP7 */ + OMAP_RO_REG(offset); + break; + + /* OMAP2-specific */ + case 0x60: /* MMC_IOSR */ + if (value & 0xf) + printf("MMC: SDIO bits used!\n"); + break; + case 0x64: /* MMC_SYSC */ + if (value & (1 << 2)) /* SRTS */ + omap_mmc_reset(s); + break; + case 0x68: /* MMC_SYSS */ + OMAP_RO_REG(offset); + break; + + default: + OMAP_BAD_REG(offset); + } +} + +static const MemoryRegionOps omap_mmc_ops = { + .read = omap_mmc_read, + .write = omap_mmc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void omap_mmc_cover_cb(void *opaque, int line, int level) +{ + struct omap_mmc_s *host = (struct omap_mmc_s *) opaque; + + if (!host->cdet_state && level) { + host->status |= 0x0002; + omap_mmc_interrupts_update(host); + if (host->cdet_wakeup) { + /* TODO: Assert wake-up */ + } + } + + if (host->cdet_state != level) { + qemu_set_irq(host->coverswitch, level); + host->cdet_state = level; + } +} + +struct omap_mmc_s *omap_mmc_init(hwaddr base, + MemoryRegion *sysmem, + BlockBackend *blk, + qemu_irq irq, qemu_irq dma[], omap_clk clk) +{ + struct omap_mmc_s *s = g_new0(struct omap_mmc_s, 1); + + s->irq = irq; + s->dma = dma; + s->clk = clk; + s->lines = 1; /* TODO: needs to be settable per-board */ + s->rev = 1; + + memory_region_init_io(&s->iomem, NULL, &omap_mmc_ops, s, "omap.mmc", 0x800); + memory_region_add_subregion(sysmem, base, &s->iomem); + + /* Instantiate the storage */ + s->card = sd_init(blk, false); + if (s->card == NULL) { + exit(1); + } + + omap_mmc_reset(s); + + return s; +} + +struct omap_mmc_s *omap2_mmc_init(struct omap_target_agent_s *ta, + BlockBackend *blk, qemu_irq irq, qemu_irq dma[], + omap_clk fclk, omap_clk iclk) +{ + struct omap_mmc_s *s = g_new0(struct omap_mmc_s, 1); + + s->irq = irq; + s->dma = dma; + s->clk = fclk; + s->lines = 4; + s->rev = 2; + + memory_region_init_io(&s->iomem, NULL, &omap_mmc_ops, s, "omap.mmc", + omap_l4_region_size(ta, 0)); + omap_l4_attach(ta, 0, &s->iomem); + + /* Instantiate the storage */ + s->card = sd_init(blk, false); + if (s->card == NULL) { + exit(1); + } + + s->cdet = qemu_allocate_irq(omap_mmc_cover_cb, s, 0); + sd_set_cb(s->card, NULL, s->cdet); + + omap_mmc_reset(s); + + return s; +} + +void omap_mmc_handlers(struct omap_mmc_s *s, qemu_irq ro, qemu_irq cover) +{ + if (s->cdet) { + sd_set_cb(s->card, ro, s->cdet); + s->coverswitch = cover; + qemu_set_irq(cover, s->cdet_state); + } else + sd_set_cb(s->card, ro, cover); +} + +void omap_mmc_enable(struct omap_mmc_s *s, int enable) +{ + sd_enable(s->card, enable); +} diff --git a/hw/sd/pl181.c b/hw/sd/pl181.c new file mode 100644 index 000000000..5e554bd46 --- /dev/null +++ b/hw/sd/pl181.c @@ -0,0 +1,551 @@ +/* + * Arm PrimeCell PL181 MultiMedia Card Interface + * + * Copyright (c) 2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "sysemu/blockdev.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/sd/sd.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "trace.h" +#include "qom/object.h" + +#define PL181_FIFO_LEN 16 + +#define TYPE_PL181 "pl181" +OBJECT_DECLARE_SIMPLE_TYPE(PL181State, PL181) + +#define TYPE_PL181_BUS "pl181-bus" + +struct PL181State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + SDBus sdbus; + uint32_t clock; + uint32_t power; + uint32_t cmdarg; + uint32_t cmd; + uint32_t datatimer; + uint32_t datalength; + uint32_t respcmd; + uint32_t response[4]; + uint32_t datactrl; + uint32_t datacnt; + uint32_t status; + uint32_t mask[2]; + int32_t fifo_pos; + int32_t fifo_len; + /* The linux 2.6.21 driver is buggy, and misbehaves if new data arrives + while it is reading the FIFO. We hack around this by deferring + subsequent transfers until after the driver polls the status word. + http://www.arm.linux.org.uk/developer/patches/viewpatch.php?id=4446/1 + */ + int32_t linux_hack; + uint32_t fifo[PL181_FIFO_LEN]; /* TODO use Fifo32 */ + qemu_irq irq[2]; + /* GPIO outputs for 'card is readonly' and 'card inserted' */ + qemu_irq card_readonly; + qemu_irq card_inserted; +}; + +static const VMStateDescription vmstate_pl181 = { + .name = "pl181", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(clock, PL181State), + VMSTATE_UINT32(power, PL181State), + VMSTATE_UINT32(cmdarg, PL181State), + VMSTATE_UINT32(cmd, PL181State), + VMSTATE_UINT32(datatimer, PL181State), + VMSTATE_UINT32(datalength, PL181State), + VMSTATE_UINT32(respcmd, PL181State), + VMSTATE_UINT32_ARRAY(response, PL181State, 4), + VMSTATE_UINT32(datactrl, PL181State), + VMSTATE_UINT32(datacnt, PL181State), + VMSTATE_UINT32(status, PL181State), + VMSTATE_UINT32_ARRAY(mask, PL181State, 2), + VMSTATE_INT32(fifo_pos, PL181State), + VMSTATE_INT32(fifo_len, PL181State), + VMSTATE_INT32(linux_hack, PL181State), + VMSTATE_UINT32_ARRAY(fifo, PL181State, PL181_FIFO_LEN), + VMSTATE_END_OF_LIST() + } +}; + +#define PL181_CMD_INDEX 0x3f +#define PL181_CMD_RESPONSE (1 << 6) +#define PL181_CMD_LONGRESP (1 << 7) +#define PL181_CMD_INTERRUPT (1 << 8) +#define PL181_CMD_PENDING (1 << 9) +#define PL181_CMD_ENABLE (1 << 10) + +#define PL181_DATA_ENABLE (1 << 0) +#define PL181_DATA_DIRECTION (1 << 1) +#define PL181_DATA_MODE (1 << 2) +#define PL181_DATA_DMAENABLE (1 << 3) + +#define PL181_STATUS_CMDCRCFAIL (1 << 0) +#define PL181_STATUS_DATACRCFAIL (1 << 1) +#define PL181_STATUS_CMDTIMEOUT (1 << 2) +#define PL181_STATUS_DATATIMEOUT (1 << 3) +#define PL181_STATUS_TXUNDERRUN (1 << 4) +#define PL181_STATUS_RXOVERRUN (1 << 5) +#define PL181_STATUS_CMDRESPEND (1 << 6) +#define PL181_STATUS_CMDSENT (1 << 7) +#define PL181_STATUS_DATAEND (1 << 8) +#define PL181_STATUS_DATABLOCKEND (1 << 10) +#define PL181_STATUS_CMDACTIVE (1 << 11) +#define PL181_STATUS_TXACTIVE (1 << 12) +#define PL181_STATUS_RXACTIVE (1 << 13) +#define PL181_STATUS_TXFIFOHALFEMPTY (1 << 14) +#define PL181_STATUS_RXFIFOHALFFULL (1 << 15) +#define PL181_STATUS_TXFIFOFULL (1 << 16) +#define PL181_STATUS_RXFIFOFULL (1 << 17) +#define PL181_STATUS_TXFIFOEMPTY (1 << 18) +#define PL181_STATUS_RXFIFOEMPTY (1 << 19) +#define PL181_STATUS_TXDATAAVLBL (1 << 20) +#define PL181_STATUS_RXDATAAVLBL (1 << 21) + +#define PL181_STATUS_TX_FIFO (PL181_STATUS_TXACTIVE \ + |PL181_STATUS_TXFIFOHALFEMPTY \ + |PL181_STATUS_TXFIFOFULL \ + |PL181_STATUS_TXFIFOEMPTY \ + |PL181_STATUS_TXDATAAVLBL) +#define PL181_STATUS_RX_FIFO (PL181_STATUS_RXACTIVE \ + |PL181_STATUS_RXFIFOHALFFULL \ + |PL181_STATUS_RXFIFOFULL \ + |PL181_STATUS_RXFIFOEMPTY \ + |PL181_STATUS_RXDATAAVLBL) + +static const unsigned char pl181_id[] = +{ 0x81, 0x11, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; + +static void pl181_update(PL181State *s) +{ + int i; + for (i = 0; i < 2; i++) { + qemu_set_irq(s->irq[i], (s->status & s->mask[i]) != 0); + } +} + +static void pl181_fifo_push(PL181State *s, uint32_t value) +{ + int n; + + if (s->fifo_len == PL181_FIFO_LEN) { + error_report("%s: FIFO overflow", __func__); + return; + } + n = (s->fifo_pos + s->fifo_len) & (PL181_FIFO_LEN - 1); + s->fifo_len++; + s->fifo[n] = value; + trace_pl181_fifo_push(value); +} + +static uint32_t pl181_fifo_pop(PL181State *s) +{ + uint32_t value; + + if (s->fifo_len == 0) { + error_report("%s: FIFO underflow", __func__); + return 0; + } + value = s->fifo[s->fifo_pos]; + s->fifo_len--; + s->fifo_pos = (s->fifo_pos + 1) & (PL181_FIFO_LEN - 1); + trace_pl181_fifo_pop(value); + return value; +} + +static void pl181_do_command(PL181State *s) +{ + SDRequest request; + uint8_t response[16]; + int rlen; + + request.cmd = s->cmd & PL181_CMD_INDEX; + request.arg = s->cmdarg; + trace_pl181_command_send(request.cmd, request.arg); + rlen = sdbus_do_command(&s->sdbus, &request, response); + if (rlen < 0) + goto error; + if (s->cmd & PL181_CMD_RESPONSE) { + if (rlen == 0 || (rlen == 4 && (s->cmd & PL181_CMD_LONGRESP))) + goto error; + if (rlen != 4 && rlen != 16) + goto error; + s->response[0] = ldl_be_p(&response[0]); + if (rlen == 4) { + s->response[1] = s->response[2] = s->response[3] = 0; + } else { + s->response[1] = ldl_be_p(&response[4]); + s->response[2] = ldl_be_p(&response[8]); + s->response[3] = ldl_be_p(&response[12]) & ~1; + } + trace_pl181_command_response_pending(); + s->status |= PL181_STATUS_CMDRESPEND; + } else { + trace_pl181_command_sent(); + s->status |= PL181_STATUS_CMDSENT; + } + return; + +error: + trace_pl181_command_timeout(); + s->status |= PL181_STATUS_CMDTIMEOUT; +} + +/* Transfer data between the card and the FIFO. This is complicated by + the FIFO holding 32-bit words and the card taking data in single byte + chunks. FIFO bytes are transferred in little-endian order. */ + +static void pl181_fifo_run(PL181State *s) +{ + uint32_t bits; + uint32_t value = 0; + int n; + int is_read; + + is_read = (s->datactrl & PL181_DATA_DIRECTION) != 0; + if (s->datacnt != 0 && (!is_read || sdbus_data_ready(&s->sdbus)) + && !s->linux_hack) { + if (is_read) { + n = 0; + while (s->datacnt && s->fifo_len < PL181_FIFO_LEN) { + value |= (uint32_t)sdbus_read_byte(&s->sdbus) << (n * 8); + s->datacnt--; + n++; + if (n == 4) { + pl181_fifo_push(s, value); + n = 0; + value = 0; + } + } + if (n != 0) { + pl181_fifo_push(s, value); + } + } else { /* write */ + n = 0; + while (s->datacnt > 0 && (s->fifo_len > 0 || n > 0)) { + if (n == 0) { + value = pl181_fifo_pop(s); + n = 4; + } + n--; + s->datacnt--; + sdbus_write_byte(&s->sdbus, value & 0xff); + value >>= 8; + } + } + } + s->status &= ~(PL181_STATUS_RX_FIFO | PL181_STATUS_TX_FIFO); + if (s->datacnt == 0) { + s->status |= PL181_STATUS_DATAEND; + /* HACK: */ + s->status |= PL181_STATUS_DATABLOCKEND; + trace_pl181_fifo_transfer_complete(); + } + if (s->datacnt == 0 && s->fifo_len == 0) { + s->datactrl &= ~PL181_DATA_ENABLE; + trace_pl181_data_engine_idle(); + } else { + /* Update FIFO bits. */ + bits = PL181_STATUS_TXACTIVE | PL181_STATUS_RXACTIVE; + if (s->fifo_len == 0) { + bits |= PL181_STATUS_TXFIFOEMPTY; + bits |= PL181_STATUS_RXFIFOEMPTY; + } else { + bits |= PL181_STATUS_TXDATAAVLBL; + bits |= PL181_STATUS_RXDATAAVLBL; + } + if (s->fifo_len == 16) { + bits |= PL181_STATUS_TXFIFOFULL; + bits |= PL181_STATUS_RXFIFOFULL; + } + if (s->fifo_len <= 8) { + bits |= PL181_STATUS_TXFIFOHALFEMPTY; + } + if (s->fifo_len >= 8) { + bits |= PL181_STATUS_RXFIFOHALFFULL; + } + if (s->datactrl & PL181_DATA_DIRECTION) { + bits &= PL181_STATUS_RX_FIFO; + } else { + bits &= PL181_STATUS_TX_FIFO; + } + s->status |= bits; + } +} + +static uint64_t pl181_read(void *opaque, hwaddr offset, + unsigned size) +{ + PL181State *s = (PL181State *)opaque; + uint32_t tmp; + + if (offset >= 0xfe0 && offset < 0x1000) { + return pl181_id[(offset - 0xfe0) >> 2]; + } + switch (offset) { + case 0x00: /* Power */ + return s->power; + case 0x04: /* Clock */ + return s->clock; + case 0x08: /* Argument */ + return s->cmdarg; + case 0x0c: /* Command */ + return s->cmd; + case 0x10: /* RespCmd */ + return s->respcmd; + case 0x14: /* Response0 */ + return s->response[0]; + case 0x18: /* Response1 */ + return s->response[1]; + case 0x1c: /* Response2 */ + return s->response[2]; + case 0x20: /* Response3 */ + return s->response[3]; + case 0x24: /* DataTimer */ + return s->datatimer; + case 0x28: /* DataLength */ + return s->datalength; + case 0x2c: /* DataCtrl */ + return s->datactrl; + case 0x30: /* DataCnt */ + return s->datacnt; + case 0x34: /* Status */ + tmp = s->status; + if (s->linux_hack) { + s->linux_hack = 0; + pl181_fifo_run(s); + pl181_update(s); + } + return tmp; + case 0x3c: /* Mask0 */ + return s->mask[0]; + case 0x40: /* Mask1 */ + return s->mask[1]; + case 0x48: /* FifoCnt */ + /* The documentation is somewhat vague about exactly what FifoCnt + does. On real hardware it appears to be when decrememnted + when a word is transferred between the FIFO and the serial + data engine. DataCnt is decremented after each byte is + transferred between the serial engine and the card. + We don't emulate this level of detail, so both can be the same. */ + tmp = (s->datacnt + 3) >> 2; + if (s->linux_hack) { + s->linux_hack = 0; + pl181_fifo_run(s); + pl181_update(s); + } + return tmp; + case 0x80: case 0x84: case 0x88: case 0x8c: /* FifoData */ + case 0x90: case 0x94: case 0x98: case 0x9c: + case 0xa0: case 0xa4: case 0xa8: case 0xac: + case 0xb0: case 0xb4: case 0xb8: case 0xbc: + if (s->fifo_len == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "pl181: Unexpected FIFO read\n"); + return 0; + } else { + uint32_t value; + value = pl181_fifo_pop(s); + s->linux_hack = 1; + pl181_fifo_run(s); + pl181_update(s); + return value; + } + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl181_read: Bad offset %x\n", (int)offset); + return 0; + } +} + +static void pl181_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PL181State *s = (PL181State *)opaque; + + switch (offset) { + case 0x00: /* Power */ + s->power = value & 0xff; + break; + case 0x04: /* Clock */ + s->clock = value & 0xff; + break; + case 0x08: /* Argument */ + s->cmdarg = value; + break; + case 0x0c: /* Command */ + s->cmd = value; + if (s->cmd & PL181_CMD_ENABLE) { + if (s->cmd & PL181_CMD_INTERRUPT) { + qemu_log_mask(LOG_UNIMP, + "pl181: Interrupt mode not implemented\n"); + } if (s->cmd & PL181_CMD_PENDING) { + qemu_log_mask(LOG_UNIMP, + "pl181: Pending commands not implemented\n"); + } else { + pl181_do_command(s); + pl181_fifo_run(s); + } + /* The command has completed one way or the other. */ + s->cmd &= ~PL181_CMD_ENABLE; + } + break; + case 0x24: /* DataTimer */ + s->datatimer = value; + break; + case 0x28: /* DataLength */ + s->datalength = value & 0xffff; + break; + case 0x2c: /* DataCtrl */ + s->datactrl = value & 0xff; + if (value & PL181_DATA_ENABLE) { + s->datacnt = s->datalength; + pl181_fifo_run(s); + } + break; + case 0x38: /* Clear */ + s->status &= ~(value & 0x7ff); + break; + case 0x3c: /* Mask0 */ + s->mask[0] = value; + break; + case 0x40: /* Mask1 */ + s->mask[1] = value; + break; + case 0x80: case 0x84: case 0x88: case 0x8c: /* FifoData */ + case 0x90: case 0x94: case 0x98: case 0x9c: + case 0xa0: case 0xa4: case 0xa8: case 0xac: + case 0xb0: case 0xb4: case 0xb8: case 0xbc: + if (s->datacnt == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "pl181: Unexpected FIFO write\n"); + } else { + pl181_fifo_push(s, value); + pl181_fifo_run(s); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl181_write: Bad offset %x\n", (int)offset); + } + pl181_update(s); +} + +static const MemoryRegionOps pl181_ops = { + .read = pl181_read, + .write = pl181_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pl181_set_readonly(DeviceState *dev, bool level) +{ + PL181State *s = (PL181State *)dev; + + qemu_set_irq(s->card_readonly, level); +} + +static void pl181_set_inserted(DeviceState *dev, bool level) +{ + PL181State *s = (PL181State *)dev; + + qemu_set_irq(s->card_inserted, level); +} + +static void pl181_reset(DeviceState *d) +{ + PL181State *s = PL181(d); + + s->power = 0; + s->cmdarg = 0; + s->cmd = 0; + s->datatimer = 0; + s->datalength = 0; + s->respcmd = 0; + s->response[0] = 0; + s->response[1] = 0; + s->response[2] = 0; + s->response[3] = 0; + s->datatimer = 0; + s->datalength = 0; + s->datactrl = 0; + s->datacnt = 0; + s->status = 0; + s->linux_hack = 0; + s->mask[0] = 0; + s->mask[1] = 0; + + /* Reset other state based on current card insertion/readonly status */ + pl181_set_inserted(DEVICE(s), sdbus_get_inserted(&s->sdbus)); + pl181_set_readonly(DEVICE(s), sdbus_get_readonly(&s->sdbus)); +} + +static void pl181_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + PL181State *s = PL181(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &pl181_ops, s, "pl181", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq[0]); + sysbus_init_irq(sbd, &s->irq[1]); + qdev_init_gpio_out_named(dev, &s->card_readonly, "card-read-only", 1); + qdev_init_gpio_out_named(dev, &s->card_inserted, "card-inserted", 1); + + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_PL181_BUS, dev, "sd-bus"); +} + +static void pl181_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + + k->vmsd = &vmstate_pl181; + k->reset = pl181_reset; + /* Reason: output IRQs should be wired up */ + k->user_creatable = false; +} + +static const TypeInfo pl181_info = { + .name = TYPE_PL181, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL181State), + .instance_init = pl181_init, + .class_init = pl181_class_init, +}; + +static void pl181_bus_class_init(ObjectClass *klass, void *data) +{ + SDBusClass *sbc = SD_BUS_CLASS(klass); + + sbc->set_inserted = pl181_set_inserted; + sbc->set_readonly = pl181_set_readonly; +} + +static const TypeInfo pl181_bus_info = { + .name = TYPE_PL181_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), + .class_init = pl181_bus_class_init, +}; + +static void pl181_register_types(void) +{ + type_register_static(&pl181_info); + type_register_static(&pl181_bus_info); +} + +type_init(pl181_register_types) diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c new file mode 100644 index 000000000..124fbf8bb --- /dev/null +++ b/hw/sd/pxa2xx_mmci.c @@ -0,0 +1,604 @@ +/* + * Intel XScale PXA255/270 MultiMediaCard/SD/SDIO Controller emulation. + * + * Copyright (c) 2006 Openedhand Ltd. + * Written by Andrzej Zaborowski + * + * This code is licensed under the GPLv2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/arm/pxa.h" +#include "hw/sd/sd.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" + +#define TYPE_PXA2XX_MMCI_BUS "pxa2xx-mmci-bus" +/* This is reusing the SDBus typedef from SD_BUS */ +DECLARE_INSTANCE_CHECKER(SDBus, PXA2XX_MMCI_BUS, + TYPE_PXA2XX_MMCI_BUS) + +struct PXA2xxMMCIState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + qemu_irq irq; + qemu_irq rx_dma; + qemu_irq tx_dma; + qemu_irq inserted; + qemu_irq readonly; + + BlockBackend *blk; + SDBus sdbus; + + uint32_t status; + uint32_t clkrt; + uint32_t spi; + uint32_t cmdat; + uint32_t resp_tout; + uint32_t read_tout; + int32_t blklen; + int32_t numblk; + uint32_t intmask; + uint32_t intreq; + int32_t cmd; + uint32_t arg; + + int32_t active; + int32_t bytesleft; + uint8_t tx_fifo[64]; + uint32_t tx_start; + uint32_t tx_len; + uint8_t rx_fifo[32]; + uint32_t rx_start; + uint32_t rx_len; + uint16_t resp_fifo[9]; + uint32_t resp_len; + + int32_t cmdreq; +}; + +static bool pxa2xx_mmci_vmstate_validate(void *opaque, int version_id) +{ + PXA2xxMMCIState *s = opaque; + + return s->tx_start < ARRAY_SIZE(s->tx_fifo) + && s->rx_start < ARRAY_SIZE(s->rx_fifo) + && s->tx_len <= ARRAY_SIZE(s->tx_fifo) + && s->rx_len <= ARRAY_SIZE(s->rx_fifo) + && s->resp_len <= ARRAY_SIZE(s->resp_fifo); +} + + +static const VMStateDescription vmstate_pxa2xx_mmci = { + .name = "pxa2xx-mmci", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32(status, PXA2xxMMCIState), + VMSTATE_UINT32(clkrt, PXA2xxMMCIState), + VMSTATE_UINT32(spi, PXA2xxMMCIState), + VMSTATE_UINT32(cmdat, PXA2xxMMCIState), + VMSTATE_UINT32(resp_tout, PXA2xxMMCIState), + VMSTATE_UINT32(read_tout, PXA2xxMMCIState), + VMSTATE_INT32(blklen, PXA2xxMMCIState), + VMSTATE_INT32(numblk, PXA2xxMMCIState), + VMSTATE_UINT32(intmask, PXA2xxMMCIState), + VMSTATE_UINT32(intreq, PXA2xxMMCIState), + VMSTATE_INT32(cmd, PXA2xxMMCIState), + VMSTATE_UINT32(arg, PXA2xxMMCIState), + VMSTATE_INT32(cmdreq, PXA2xxMMCIState), + VMSTATE_INT32(active, PXA2xxMMCIState), + VMSTATE_INT32(bytesleft, PXA2xxMMCIState), + VMSTATE_UINT32(tx_start, PXA2xxMMCIState), + VMSTATE_UINT32(tx_len, PXA2xxMMCIState), + VMSTATE_UINT32(rx_start, PXA2xxMMCIState), + VMSTATE_UINT32(rx_len, PXA2xxMMCIState), + VMSTATE_UINT32(resp_len, PXA2xxMMCIState), + VMSTATE_VALIDATE("fifo size incorrect", pxa2xx_mmci_vmstate_validate), + VMSTATE_UINT8_ARRAY(tx_fifo, PXA2xxMMCIState, 64), + VMSTATE_UINT8_ARRAY(rx_fifo, PXA2xxMMCIState, 32), + VMSTATE_UINT16_ARRAY(resp_fifo, PXA2xxMMCIState, 9), + VMSTATE_END_OF_LIST() + } +}; + +#define MMC_STRPCL 0x00 /* MMC Clock Start/Stop register */ +#define MMC_STAT 0x04 /* MMC Status register */ +#define MMC_CLKRT 0x08 /* MMC Clock Rate register */ +#define MMC_SPI 0x0c /* MMC SPI Mode register */ +#define MMC_CMDAT 0x10 /* MMC Command/Data register */ +#define MMC_RESTO 0x14 /* MMC Response Time-Out register */ +#define MMC_RDTO 0x18 /* MMC Read Time-Out register */ +#define MMC_BLKLEN 0x1c /* MMC Block Length register */ +#define MMC_NUMBLK 0x20 /* MMC Number of Blocks register */ +#define MMC_PRTBUF 0x24 /* MMC Buffer Partly Full register */ +#define MMC_I_MASK 0x28 /* MMC Interrupt Mask register */ +#define MMC_I_REG 0x2c /* MMC Interrupt Request register */ +#define MMC_CMD 0x30 /* MMC Command register */ +#define MMC_ARGH 0x34 /* MMC Argument High register */ +#define MMC_ARGL 0x38 /* MMC Argument Low register */ +#define MMC_RES 0x3c /* MMC Response FIFO */ +#define MMC_RXFIFO 0x40 /* MMC Receive FIFO */ +#define MMC_TXFIFO 0x44 /* MMC Transmit FIFO */ +#define MMC_RDWAIT 0x48 /* MMC RD_WAIT register */ +#define MMC_BLKS_REM 0x4c /* MMC Blocks Remaining register */ + +/* Bitfield masks */ +#define STRPCL_STOP_CLK (1 << 0) +#define STRPCL_STRT_CLK (1 << 1) +#define STAT_TOUT_RES (1 << 1) +#define STAT_CLK_EN (1 << 8) +#define STAT_DATA_DONE (1 << 11) +#define STAT_PRG_DONE (1 << 12) +#define STAT_END_CMDRES (1 << 13) +#define SPI_SPI_MODE (1 << 0) +#define CMDAT_RES_TYPE (3 << 0) +#define CMDAT_DATA_EN (1 << 2) +#define CMDAT_WR_RD (1 << 3) +#define CMDAT_DMA_EN (1 << 7) +#define CMDAT_STOP_TRAN (1 << 10) +#define INT_DATA_DONE (1 << 0) +#define INT_PRG_DONE (1 << 1) +#define INT_END_CMD (1 << 2) +#define INT_STOP_CMD (1 << 3) +#define INT_CLK_OFF (1 << 4) +#define INT_RXFIFO_REQ (1 << 5) +#define INT_TXFIFO_REQ (1 << 6) +#define INT_TINT (1 << 7) +#define INT_DAT_ERR (1 << 8) +#define INT_RES_ERR (1 << 9) +#define INT_RD_STALLED (1 << 10) +#define INT_SDIO_INT (1 << 11) +#define INT_SDIO_SACK (1 << 12) +#define PRTBUF_PRT_BUF (1 << 0) + +/* Route internal interrupt lines to the global IC and DMA */ +static void pxa2xx_mmci_int_update(PXA2xxMMCIState *s) +{ + uint32_t mask = s->intmask; + if (s->cmdat & CMDAT_DMA_EN) { + mask |= INT_RXFIFO_REQ | INT_TXFIFO_REQ; + + qemu_set_irq(s->rx_dma, !!(s->intreq & INT_RXFIFO_REQ)); + qemu_set_irq(s->tx_dma, !!(s->intreq & INT_TXFIFO_REQ)); + } + + qemu_set_irq(s->irq, !!(s->intreq & ~mask)); +} + +static void pxa2xx_mmci_fifo_update(PXA2xxMMCIState *s) +{ + if (!s->active) + return; + + if (s->cmdat & CMDAT_WR_RD) { + while (s->bytesleft && s->tx_len) { + sdbus_write_byte(&s->sdbus, s->tx_fifo[s->tx_start++]); + s->tx_start &= 0x1f; + s->tx_len --; + s->bytesleft --; + } + if (s->bytesleft) + s->intreq |= INT_TXFIFO_REQ; + } else + while (s->bytesleft && s->rx_len < 32) { + s->rx_fifo[(s->rx_start + (s->rx_len ++)) & 0x1f] = + sdbus_read_byte(&s->sdbus); + s->bytesleft --; + s->intreq |= INT_RXFIFO_REQ; + } + + if (!s->bytesleft) { + s->active = 0; + s->intreq |= INT_DATA_DONE; + s->status |= STAT_DATA_DONE; + + if (s->cmdat & CMDAT_WR_RD) { + s->intreq |= INT_PRG_DONE; + s->status |= STAT_PRG_DONE; + } + } + + pxa2xx_mmci_int_update(s); +} + +static void pxa2xx_mmci_wakequeues(PXA2xxMMCIState *s) +{ + int rsplen, i; + SDRequest request; + uint8_t response[16]; + + s->active = 1; + s->rx_len = 0; + s->tx_len = 0; + s->cmdreq = 0; + + request.cmd = s->cmd; + request.arg = s->arg; + request.crc = 0; /* FIXME */ + + rsplen = sdbus_do_command(&s->sdbus, &request, response); + s->intreq |= INT_END_CMD; + + memset(s->resp_fifo, 0, sizeof(s->resp_fifo)); + switch (s->cmdat & CMDAT_RES_TYPE) { +#define PXAMMCI_RESP(wd, value0, value1) \ + s->resp_fifo[(wd) + 0] |= (value0); \ + s->resp_fifo[(wd) + 1] |= (value1) << 8; + case 0: /* No response */ + goto complete; + + case 1: /* R1, R4, R5 or R6 */ + if (rsplen < 4) + goto timeout; + goto complete; + + case 2: /* R2 */ + if (rsplen < 16) + goto timeout; + goto complete; + + case 3: /* R3 */ + if (rsplen < 4) + goto timeout; + goto complete; + + complete: + for (i = 0; rsplen > 0; i ++, rsplen -= 2) { + PXAMMCI_RESP(i, response[i * 2], response[i * 2 + 1]); + } + s->status |= STAT_END_CMDRES; + + if (!(s->cmdat & CMDAT_DATA_EN)) + s->active = 0; + else + s->bytesleft = s->numblk * s->blklen; + + s->resp_len = 0; + break; + + timeout: + s->active = 0; + s->status |= STAT_TOUT_RES; + break; + } + + pxa2xx_mmci_fifo_update(s); +} + +static uint64_t pxa2xx_mmci_read(void *opaque, hwaddr offset, unsigned size) +{ + PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque; + uint32_t ret = 0; + + switch (offset) { + case MMC_STRPCL: + break; + case MMC_STAT: + ret = s->status; + break; + case MMC_CLKRT: + ret = s->clkrt; + break; + case MMC_SPI: + ret = s->spi; + break; + case MMC_CMDAT: + ret = s->cmdat; + break; + case MMC_RESTO: + ret = s->resp_tout; + break; + case MMC_RDTO: + ret = s->read_tout; + break; + case MMC_BLKLEN: + ret = s->blklen; + break; + case MMC_NUMBLK: + ret = s->numblk; + break; + case MMC_PRTBUF: + break; + case MMC_I_MASK: + ret = s->intmask; + break; + case MMC_I_REG: + ret = s->intreq; + break; + case MMC_CMD: + ret = s->cmd | 0x40; + break; + case MMC_ARGH: + ret = s->arg >> 16; + break; + case MMC_ARGL: + ret = s->arg & 0xffff; + break; + case MMC_RES: + ret = (s->resp_len < 9) ? s->resp_fifo[s->resp_len++] : 0; + break; + case MMC_RXFIFO: + while (size-- && s->rx_len) { + ret |= s->rx_fifo[s->rx_start++] << (size << 3); + s->rx_start &= 0x1f; + s->rx_len --; + } + s->intreq &= ~INT_RXFIFO_REQ; + pxa2xx_mmci_fifo_update(s); + break; + case MMC_RDWAIT: + break; + case MMC_BLKS_REM: + ret = s->numblk; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: incorrect register 0x%02" HWADDR_PRIx "\n", + __func__, offset); + } + trace_pxa2xx_mmci_read(size, offset, ret); + + return ret; +} + +static void pxa2xx_mmci_write(void *opaque, + hwaddr offset, uint64_t value, unsigned size) +{ + PXA2xxMMCIState *s = (PXA2xxMMCIState *) opaque; + + trace_pxa2xx_mmci_write(size, offset, value); + switch (offset) { + case MMC_STRPCL: + if (value & STRPCL_STRT_CLK) { + s->status |= STAT_CLK_EN; + s->intreq &= ~INT_CLK_OFF; + + if (s->cmdreq && !(s->cmdat & CMDAT_STOP_TRAN)) { + s->status &= STAT_CLK_EN; + pxa2xx_mmci_wakequeues(s); + } + } + + if (value & STRPCL_STOP_CLK) { + s->status &= ~STAT_CLK_EN; + s->intreq |= INT_CLK_OFF; + s->active = 0; + } + + pxa2xx_mmci_int_update(s); + break; + + case MMC_CLKRT: + s->clkrt = value & 7; + break; + + case MMC_SPI: + s->spi = value & 0xf; + if (value & SPI_SPI_MODE) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: attempted to use card in SPI mode\n", __func__); + } + break; + + case MMC_CMDAT: + s->cmdat = value & 0x3dff; + s->active = 0; + s->cmdreq = 1; + if (!(value & CMDAT_STOP_TRAN)) { + s->status &= STAT_CLK_EN; + + if (s->status & STAT_CLK_EN) + pxa2xx_mmci_wakequeues(s); + } + + pxa2xx_mmci_int_update(s); + break; + + case MMC_RESTO: + s->resp_tout = value & 0x7f; + break; + + case MMC_RDTO: + s->read_tout = value & 0xffff; + break; + + case MMC_BLKLEN: + s->blklen = value & 0xfff; + break; + + case MMC_NUMBLK: + s->numblk = value & 0xffff; + break; + + case MMC_PRTBUF: + if (value & PRTBUF_PRT_BUF) { + s->tx_start ^= 32; + s->tx_len = 0; + } + pxa2xx_mmci_fifo_update(s); + break; + + case MMC_I_MASK: + s->intmask = value & 0x1fff; + pxa2xx_mmci_int_update(s); + break; + + case MMC_CMD: + s->cmd = value & 0x3f; + break; + + case MMC_ARGH: + s->arg &= 0x0000ffff; + s->arg |= value << 16; + break; + + case MMC_ARGL: + s->arg &= 0xffff0000; + s->arg |= value & 0x0000ffff; + break; + + case MMC_TXFIFO: + while (size-- && s->tx_len < 0x20) + s->tx_fifo[(s->tx_start + (s->tx_len ++)) & 0x1f] = + (value >> (size << 3)) & 0xff; + s->intreq &= ~INT_TXFIFO_REQ; + pxa2xx_mmci_fifo_update(s); + break; + + case MMC_RDWAIT: + case MMC_BLKS_REM: + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: incorrect reg 0x%02" HWADDR_PRIx " " + "(value 0x%08" PRIx64 ")\n", __func__, offset, value); + } +} + +static const MemoryRegionOps pxa2xx_mmci_ops = { + .read = pxa2xx_mmci_read, + .write = pxa2xx_mmci_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +PXA2xxMMCIState *pxa2xx_mmci_init(MemoryRegion *sysmem, + hwaddr base, + qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma) +{ + DeviceState *dev; + SysBusDevice *sbd; + + dev = qdev_new(TYPE_PXA2XX_MMCI); + sbd = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(sbd, 0, base); + sysbus_connect_irq(sbd, 0, irq); + qdev_connect_gpio_out_named(dev, "rx-dma", 0, rx_dma); + qdev_connect_gpio_out_named(dev, "tx-dma", 0, tx_dma); + sysbus_realize_and_unref(sbd, &error_fatal); + + return PXA2XX_MMCI(dev); +} + +static void pxa2xx_mmci_set_inserted(DeviceState *dev, bool inserted) +{ + PXA2xxMMCIState *s = PXA2XX_MMCI(dev); + + qemu_set_irq(s->inserted, inserted); +} + +static void pxa2xx_mmci_set_readonly(DeviceState *dev, bool readonly) +{ + PXA2xxMMCIState *s = PXA2XX_MMCI(dev); + + qemu_set_irq(s->readonly, readonly); +} + +void pxa2xx_mmci_handlers(PXA2xxMMCIState *s, qemu_irq readonly, + qemu_irq coverswitch) +{ + DeviceState *dev = DEVICE(s); + + s->readonly = readonly; + s->inserted = coverswitch; + + pxa2xx_mmci_set_inserted(dev, sdbus_get_inserted(&s->sdbus)); + pxa2xx_mmci_set_readonly(dev, sdbus_get_readonly(&s->sdbus)); +} + +static void pxa2xx_mmci_reset(DeviceState *d) +{ + PXA2xxMMCIState *s = PXA2XX_MMCI(d); + + s->status = 0; + s->clkrt = 0; + s->spi = 0; + s->cmdat = 0; + s->resp_tout = 0; + s->read_tout = 0; + s->blklen = 0; + s->numblk = 0; + s->intmask = 0; + s->intreq = 0; + s->cmd = 0; + s->arg = 0; + s->active = 0; + s->bytesleft = 0; + s->tx_start = 0; + s->tx_len = 0; + s->rx_start = 0; + s->rx_len = 0; + s->resp_len = 0; + s->cmdreq = 0; + memset(s->tx_fifo, 0, sizeof(s->tx_fifo)); + memset(s->rx_fifo, 0, sizeof(s->rx_fifo)); + memset(s->resp_fifo, 0, sizeof(s->resp_fifo)); +} + +static void pxa2xx_mmci_instance_init(Object *obj) +{ + PXA2xxMMCIState *s = PXA2XX_MMCI(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DeviceState *dev = DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &pxa2xx_mmci_ops, s, + "pxa2xx-mmci", 0x00100000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + qdev_init_gpio_out_named(dev, &s->rx_dma, "rx-dma", 1); + qdev_init_gpio_out_named(dev, &s->tx_dma, "tx-dma", 1); + + qbus_init(&s->sdbus, sizeof(s->sdbus), + TYPE_PXA2XX_MMCI_BUS, DEVICE(obj), "sd-bus"); +} + +static void pxa2xx_mmci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_pxa2xx_mmci; + dc->reset = pxa2xx_mmci_reset; +} + +static void pxa2xx_mmci_bus_class_init(ObjectClass *klass, void *data) +{ + SDBusClass *sbc = SD_BUS_CLASS(klass); + + sbc->set_inserted = pxa2xx_mmci_set_inserted; + sbc->set_readonly = pxa2xx_mmci_set_readonly; +} + +static const TypeInfo pxa2xx_mmci_info = { + .name = TYPE_PXA2XX_MMCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxMMCIState), + .instance_init = pxa2xx_mmci_instance_init, + .class_init = pxa2xx_mmci_class_init, +}; + +static const TypeInfo pxa2xx_mmci_bus_info = { + .name = TYPE_PXA2XX_MMCI_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), + .class_init = pxa2xx_mmci_bus_class_init, +}; + +static void pxa2xx_mmci_register_types(void) +{ + type_register_static(&pxa2xx_mmci_info); + type_register_static(&pxa2xx_mmci_bus_info); +} + +type_init(pxa2xx_mmci_register_types) diff --git a/hw/sd/sd.c b/hw/sd/sd.c new file mode 100644 index 000000000..bb5dbff68 --- /dev/null +++ b/hw/sd/sd.c @@ -0,0 +1,2227 @@ +/* + * SD Memory Card emulation as defined in the "SD Memory Card Physical + * layer specification, Version 2.00." + * + * Copyright (c) 2006 Andrzej Zaborowski + * Copyright (c) 2007 CodeSourcery + * Copyright (c) 2018 Philippe Mathieu-Daudé + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "hw/irq.h" +#include "hw/registerfields.h" +#include "sysemu/block-backend.h" +#include "hw/sd/sd.h" +#include "hw/sd/sdcard_legacy.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/bitmap.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu-common.h" +#include "sdmmc-internal.h" +#include "trace.h" + +//#define DEBUG_SD 1 + +#define SDSC_MAX_CAPACITY (2 * GiB) + +#define INVALID_ADDRESS UINT32_MAX + +typedef enum { + sd_r0 = 0, /* no response */ + sd_r1, /* normal response command */ + sd_r2_i, /* CID register */ + sd_r2_s, /* CSD register */ + sd_r3, /* OCR register */ + sd_r6 = 6, /* Published RCA response */ + sd_r7, /* Operating voltage */ + sd_r1b = -1, + sd_illegal = -2, +} sd_rsp_type_t; + +enum SDCardModes { + sd_inactive, + sd_card_identification_mode, + sd_data_transfer_mode, +}; + +enum SDCardStates { + sd_inactive_state = -1, + sd_idle_state = 0, + sd_ready_state, + sd_identification_state, + sd_standby_state, + sd_transfer_state, + sd_sendingdata_state, + sd_receivingdata_state, + sd_programming_state, + sd_disconnect_state, +}; + +struct SDState { + DeviceState parent_obj; + + /* If true, created by sd_init() for a non-qdevified caller */ + /* TODO purge them with fire */ + bool me_no_qdev_me_kill_mammoth_with_rocks; + + /* SD Memory Card Registers */ + uint32_t ocr; + uint8_t scr[8]; + uint8_t cid[16]; + uint8_t csd[16]; + uint16_t rca; + uint32_t card_status; + uint8_t sd_status[64]; + + /* Static properties */ + + uint8_t spec_version; + BlockBackend *blk; + bool spi; + + /* Runtime changeables */ + + uint32_t mode; /* current card mode, one of SDCardModes */ + int32_t state; /* current card state, one of SDCardStates */ + uint32_t vhs; + bool wp_switch; + unsigned long *wp_groups; + int32_t wpgrps_size; + uint64_t size; + uint32_t blk_len; + uint32_t multi_blk_cnt; + uint32_t erase_start; + uint32_t erase_end; + uint8_t pwd[16]; + uint32_t pwd_len; + uint8_t function_group[6]; + uint8_t current_cmd; + /* True if we will handle the next command as an ACMD. Note that this does + * *not* track the APP_CMD status bit! + */ + bool expecting_acmd; + uint32_t blk_written; + uint64_t data_start; + uint32_t data_offset; + uint8_t data[512]; + qemu_irq readonly_cb; + qemu_irq inserted_cb; + QEMUTimer *ocr_power_timer; + const char *proto_name; + bool enable; + uint8_t dat_lines; + bool cmd_line; +}; + +static void sd_realize(DeviceState *dev, Error **errp); + +static const char *sd_state_name(enum SDCardStates state) +{ + static const char *state_name[] = { + [sd_idle_state] = "idle", + [sd_ready_state] = "ready", + [sd_identification_state] = "identification", + [sd_standby_state] = "standby", + [sd_transfer_state] = "transfer", + [sd_sendingdata_state] = "sendingdata", + [sd_receivingdata_state] = "receivingdata", + [sd_programming_state] = "programming", + [sd_disconnect_state] = "disconnect", + }; + if (state == sd_inactive_state) { + return "inactive"; + } + assert(state < ARRAY_SIZE(state_name)); + return state_name[state]; +} + +static const char *sd_response_name(sd_rsp_type_t rsp) +{ + static const char *response_name[] = { + [sd_r0] = "RESP#0 (no response)", + [sd_r1] = "RESP#1 (normal cmd)", + [sd_r2_i] = "RESP#2 (CID reg)", + [sd_r2_s] = "RESP#2 (CSD reg)", + [sd_r3] = "RESP#3 (OCR reg)", + [sd_r6] = "RESP#6 (RCA)", + [sd_r7] = "RESP#7 (operating voltage)", + }; + if (rsp == sd_illegal) { + return "ILLEGAL RESP"; + } + if (rsp == sd_r1b) { + rsp = sd_r1; + } + assert(rsp < ARRAY_SIZE(response_name)); + return response_name[rsp]; +} + +static uint8_t sd_get_dat_lines(SDState *sd) +{ + return sd->enable ? sd->dat_lines : 0; +} + +static bool sd_get_cmd_line(SDState *sd) +{ + return sd->enable ? sd->cmd_line : false; +} + +static void sd_set_voltage(SDState *sd, uint16_t millivolts) +{ + trace_sdcard_set_voltage(millivolts); + + switch (millivolts) { + case 3001 ... 3600: /* SD_VOLTAGE_3_3V */ + case 2001 ... 3000: /* SD_VOLTAGE_3_0V */ + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "SD card voltage not supported: %.3fV", + millivolts / 1000.f); + } +} + +static void sd_set_mode(SDState *sd) +{ + switch (sd->state) { + case sd_inactive_state: + sd->mode = sd_inactive; + break; + + case sd_idle_state: + case sd_ready_state: + case sd_identification_state: + sd->mode = sd_card_identification_mode; + break; + + case sd_standby_state: + case sd_transfer_state: + case sd_sendingdata_state: + case sd_receivingdata_state: + case sd_programming_state: + case sd_disconnect_state: + sd->mode = sd_data_transfer_mode; + break; + } +} + +static const sd_cmd_type_t sd_cmd_type[SDMMC_CMD_MAX] = { + sd_bc, sd_none, sd_bcr, sd_bcr, sd_none, sd_none, sd_none, sd_ac, + sd_bcr, sd_ac, sd_ac, sd_adtc, sd_ac, sd_ac, sd_none, sd_ac, + /* 16 */ + sd_ac, sd_adtc, sd_adtc, sd_none, sd_none, sd_none, sd_none, sd_none, + sd_adtc, sd_adtc, sd_adtc, sd_adtc, sd_ac, sd_ac, sd_adtc, sd_none, + /* 32 */ + sd_ac, sd_ac, sd_none, sd_none, sd_none, sd_none, sd_ac, sd_none, + sd_none, sd_none, sd_bc, sd_none, sd_none, sd_none, sd_none, sd_none, + /* 48 */ + sd_none, sd_none, sd_none, sd_none, sd_none, sd_none, sd_none, sd_ac, + sd_adtc, sd_none, sd_none, sd_none, sd_none, sd_none, sd_none, sd_none, +}; + +static const int sd_cmd_class[SDMMC_CMD_MAX] = { + 0, 0, 0, 0, 0, 9, 10, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 6, 6, 6, 6, + 5, 5, 10, 10, 10, 10, 5, 9, 9, 9, 7, 7, 7, 7, 7, 7, + 7, 7, 10, 7, 9, 9, 9, 8, 8, 10, 8, 8, 8, 8, 8, 8, +}; + +static uint8_t sd_crc7(const void *message, size_t width) +{ + int i, bit; + uint8_t shift_reg = 0x00; + const uint8_t *msg = (const uint8_t *)message; + + for (i = 0; i < width; i ++, msg ++) + for (bit = 7; bit >= 0; bit --) { + shift_reg <<= 1; + if ((shift_reg >> 7) ^ ((*msg >> bit) & 1)) + shift_reg ^= 0x89; + } + + return shift_reg; +} + +#define OCR_POWER_DELAY_NS 500000 /* 0.5ms */ + +FIELD(OCR, VDD_VOLTAGE_WINDOW, 0, 24) +FIELD(OCR, VDD_VOLTAGE_WIN_LO, 0, 8) +FIELD(OCR, DUAL_VOLTAGE_CARD, 7, 1) +FIELD(OCR, VDD_VOLTAGE_WIN_HI, 8, 16) +FIELD(OCR, ACCEPT_SWITCH_1V8, 24, 1) /* Only UHS-I */ +FIELD(OCR, UHS_II_CARD, 29, 1) /* Only UHS-II */ +FIELD(OCR, CARD_CAPACITY, 30, 1) /* 0:SDSC, 1:SDHC/SDXC */ +FIELD(OCR, CARD_POWER_UP, 31, 1) + +#define ACMD41_ENQUIRY_MASK 0x00ffffff +#define ACMD41_R3_MASK (R_OCR_VDD_VOLTAGE_WIN_HI_MASK \ + | R_OCR_ACCEPT_SWITCH_1V8_MASK \ + | R_OCR_UHS_II_CARD_MASK \ + | R_OCR_CARD_CAPACITY_MASK \ + | R_OCR_CARD_POWER_UP_MASK) + +static void sd_set_ocr(SDState *sd) +{ + /* All voltages OK */ + sd->ocr = R_OCR_VDD_VOLTAGE_WIN_HI_MASK; +} + +static void sd_ocr_powerup(void *opaque) +{ + SDState *sd = opaque; + + trace_sdcard_powerup(); + assert(!FIELD_EX32(sd->ocr, OCR, CARD_POWER_UP)); + + /* card power-up OK */ + sd->ocr = FIELD_DP32(sd->ocr, OCR, CARD_POWER_UP, 1); + + if (sd->size > SDSC_MAX_CAPACITY) { + sd->ocr = FIELD_DP32(sd->ocr, OCR, CARD_CAPACITY, 1); + } +} + +static void sd_set_scr(SDState *sd) +{ + sd->scr[0] = 0 << 4; /* SCR structure version 1.0 */ + if (sd->spec_version == SD_PHY_SPECv1_10_VERS) { + sd->scr[0] |= 1; /* Spec Version 1.10 */ + } else { + sd->scr[0] |= 2; /* Spec Version 2.00 or Version 3.0X */ + } + sd->scr[1] = (2 << 4) /* SDSC Card (Security Version 1.01) */ + | 0b0101; /* 1-bit or 4-bit width bus modes */ + sd->scr[2] = 0x00; /* Extended Security is not supported. */ + if (sd->spec_version >= SD_PHY_SPECv3_01_VERS) { + sd->scr[2] |= 1 << 7; /* Spec Version 3.0X */ + } + sd->scr[3] = 0x00; + /* reserved for manufacturer usage */ + sd->scr[4] = 0x00; + sd->scr[5] = 0x00; + sd->scr[6] = 0x00; + sd->scr[7] = 0x00; +} + +#define MID 0xaa +#define OID "XY" +#define PNM "QEMU!" +#define PRV 0x01 +#define MDT_YR 2006 +#define MDT_MON 2 + +static void sd_set_cid(SDState *sd) +{ + sd->cid[0] = MID; /* Fake card manufacturer ID (MID) */ + sd->cid[1] = OID[0]; /* OEM/Application ID (OID) */ + sd->cid[2] = OID[1]; + sd->cid[3] = PNM[0]; /* Fake product name (PNM) */ + sd->cid[4] = PNM[1]; + sd->cid[5] = PNM[2]; + sd->cid[6] = PNM[3]; + sd->cid[7] = PNM[4]; + sd->cid[8] = PRV; /* Fake product revision (PRV) */ + sd->cid[9] = 0xde; /* Fake serial number (PSN) */ + sd->cid[10] = 0xad; + sd->cid[11] = 0xbe; + sd->cid[12] = 0xef; + sd->cid[13] = 0x00 | /* Manufacture date (MDT) */ + ((MDT_YR - 2000) / 10); + sd->cid[14] = ((MDT_YR % 10) << 4) | MDT_MON; + sd->cid[15] = (sd_crc7(sd->cid, 15) << 1) | 1; +} + +#define HWBLOCK_SHIFT 9 /* 512 bytes */ +#define SECTOR_SHIFT 5 /* 16 kilobytes */ +#define WPGROUP_SHIFT 7 /* 2 megs */ +#define CMULT_SHIFT 9 /* 512 times HWBLOCK_SIZE */ +#define WPGROUP_SIZE (1 << (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT)) + +static const uint8_t sd_csd_rw_mask[16] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0xfe, +}; + +static void sd_set_csd(SDState *sd, uint64_t size) +{ + int hwblock_shift = HWBLOCK_SHIFT; + uint32_t csize; + uint32_t sectsize = (1 << (SECTOR_SHIFT + 1)) - 1; + uint32_t wpsize = (1 << (WPGROUP_SHIFT + 1)) - 1; + + /* To indicate 2 GiB card, BLOCK_LEN shall be 1024 bytes */ + if (size == SDSC_MAX_CAPACITY) { + hwblock_shift += 1; + } + csize = (size >> (CMULT_SHIFT + hwblock_shift)) - 1; + + if (size <= SDSC_MAX_CAPACITY) { /* Standard Capacity SD */ + sd->csd[0] = 0x00; /* CSD structure */ + sd->csd[1] = 0x26; /* Data read access-time-1 */ + sd->csd[2] = 0x00; /* Data read access-time-2 */ + sd->csd[3] = 0x32; /* Max. data transfer rate: 25 MHz */ + sd->csd[4] = 0x5f; /* Card Command Classes */ + sd->csd[5] = 0x50 | /* Max. read data block length */ + hwblock_shift; + sd->csd[6] = 0xe0 | /* Partial block for read allowed */ + ((csize >> 10) & 0x03); + sd->csd[7] = 0x00 | /* Device size */ + ((csize >> 2) & 0xff); + sd->csd[8] = 0x3f | /* Max. read current */ + ((csize << 6) & 0xc0); + sd->csd[9] = 0xfc | /* Max. write current */ + ((CMULT_SHIFT - 2) >> 1); + sd->csd[10] = 0x40 | /* Erase sector size */ + (((CMULT_SHIFT - 2) << 7) & 0x80) | (sectsize >> 1); + sd->csd[11] = 0x00 | /* Write protect group size */ + ((sectsize << 7) & 0x80) | wpsize; + sd->csd[12] = 0x90 | /* Write speed factor */ + (hwblock_shift >> 2); + sd->csd[13] = 0x20 | /* Max. write data block length */ + ((hwblock_shift << 6) & 0xc0); + sd->csd[14] = 0x00; /* File format group */ + } else { /* SDHC */ + size /= 512 * KiB; + size -= 1; + sd->csd[0] = 0x40; + sd->csd[1] = 0x0e; + sd->csd[2] = 0x00; + sd->csd[3] = 0x32; + sd->csd[4] = 0x5b; + sd->csd[5] = 0x59; + sd->csd[6] = 0x00; + sd->csd[7] = (size >> 16) & 0xff; + sd->csd[8] = (size >> 8) & 0xff; + sd->csd[9] = (size & 0xff); + sd->csd[10] = 0x7f; + sd->csd[11] = 0x80; + sd->csd[12] = 0x0a; + sd->csd[13] = 0x40; + sd->csd[14] = 0x00; + } + sd->csd[15] = (sd_crc7(sd->csd, 15) << 1) | 1; +} + +static void sd_set_rca(SDState *sd) +{ + sd->rca += 0x4567; +} + +FIELD(CSR, AKE_SEQ_ERROR, 3, 1) +FIELD(CSR, APP_CMD, 5, 1) +FIELD(CSR, FX_EVENT, 6, 1) +FIELD(CSR, READY_FOR_DATA, 8, 1) +FIELD(CSR, CURRENT_STATE, 9, 4) +FIELD(CSR, ERASE_RESET, 13, 1) +FIELD(CSR, CARD_ECC_DISABLED, 14, 1) +FIELD(CSR, WP_ERASE_SKIP, 15, 1) +FIELD(CSR, CSD_OVERWRITE, 16, 1) +FIELD(CSR, DEFERRED_RESPONSE, 17, 1) +FIELD(CSR, ERROR, 19, 1) +FIELD(CSR, CC_ERROR, 20, 1) +FIELD(CSR, CARD_ECC_FAILED, 21, 1) +FIELD(CSR, ILLEGAL_COMMAND, 22, 1) +FIELD(CSR, COM_CRC_ERROR, 23, 1) +FIELD(CSR, LOCK_UNLOCK_FAILED, 24, 1) +FIELD(CSR, CARD_IS_LOCKED, 25, 1) +FIELD(CSR, WP_VIOLATION, 26, 1) +FIELD(CSR, ERASE_PARAM, 27, 1) +FIELD(CSR, ERASE_SEQ_ERROR, 28, 1) +FIELD(CSR, BLOCK_LEN_ERROR, 29, 1) +FIELD(CSR, ADDRESS_ERROR, 30, 1) +FIELD(CSR, OUT_OF_RANGE, 31, 1) + +/* Card status bits, split by clear condition: + * A : According to the card current state + * B : Always related to the previous command + * C : Cleared by read + */ +#define CARD_STATUS_A (R_CSR_READY_FOR_DATA_MASK \ + | R_CSR_CARD_ECC_DISABLED_MASK \ + | R_CSR_CARD_IS_LOCKED_MASK) +#define CARD_STATUS_B (R_CSR_CURRENT_STATE_MASK \ + | R_CSR_ILLEGAL_COMMAND_MASK \ + | R_CSR_COM_CRC_ERROR_MASK) +#define CARD_STATUS_C (R_CSR_AKE_SEQ_ERROR_MASK \ + | R_CSR_APP_CMD_MASK \ + | R_CSR_ERASE_RESET_MASK \ + | R_CSR_WP_ERASE_SKIP_MASK \ + | R_CSR_CSD_OVERWRITE_MASK \ + | R_CSR_ERROR_MASK \ + | R_CSR_CC_ERROR_MASK \ + | R_CSR_CARD_ECC_FAILED_MASK \ + | R_CSR_LOCK_UNLOCK_FAILED_MASK \ + | R_CSR_WP_VIOLATION_MASK \ + | R_CSR_ERASE_PARAM_MASK \ + | R_CSR_ERASE_SEQ_ERROR_MASK \ + | R_CSR_BLOCK_LEN_ERROR_MASK \ + | R_CSR_ADDRESS_ERROR_MASK \ + | R_CSR_OUT_OF_RANGE_MASK) + +static void sd_set_cardstatus(SDState *sd) +{ + sd->card_status = 0x00000100; +} + +static void sd_set_sdstatus(SDState *sd) +{ + memset(sd->sd_status, 0, 64); +} + +static int sd_req_crc_validate(SDRequest *req) +{ + uint8_t buffer[5]; + buffer[0] = 0x40 | req->cmd; + stl_be_p(&buffer[1], req->arg); + return 0; + return sd_crc7(buffer, 5) != req->crc; /* TODO */ +} + +static void sd_response_r1_make(SDState *sd, uint8_t *response) +{ + stl_be_p(response, sd->card_status); + + /* Clear the "clear on read" status bits */ + sd->card_status &= ~CARD_STATUS_C; +} + +static void sd_response_r3_make(SDState *sd, uint8_t *response) +{ + stl_be_p(response, sd->ocr & ACMD41_R3_MASK); +} + +static void sd_response_r6_make(SDState *sd, uint8_t *response) +{ + uint16_t status; + + status = ((sd->card_status >> 8) & 0xc000) | + ((sd->card_status >> 6) & 0x2000) | + (sd->card_status & 0x1fff); + sd->card_status &= ~(CARD_STATUS_C & 0xc81fff); + stw_be_p(response + 0, sd->rca); + stw_be_p(response + 2, status); +} + +static void sd_response_r7_make(SDState *sd, uint8_t *response) +{ + stl_be_p(response, sd->vhs); +} + +static inline uint64_t sd_addr_to_wpnum(uint64_t addr) +{ + return addr >> (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT); +} + +static void sd_reset(DeviceState *dev) +{ + SDState *sd = SD_CARD(dev); + uint64_t size; + uint64_t sect; + + trace_sdcard_reset(); + if (sd->blk) { + blk_get_geometry(sd->blk, §); + } else { + sect = 0; + } + size = sect << 9; + + sect = sd_addr_to_wpnum(size) + 1; + + sd->state = sd_idle_state; + sd->rca = 0x0000; + sd_set_ocr(sd); + sd_set_scr(sd); + sd_set_cid(sd); + sd_set_csd(sd, size); + sd_set_cardstatus(sd); + sd_set_sdstatus(sd); + + g_free(sd->wp_groups); + sd->wp_switch = sd->blk ? !blk_is_writable(sd->blk) : false; + sd->wpgrps_size = sect; + sd->wp_groups = bitmap_new(sd->wpgrps_size); + memset(sd->function_group, 0, sizeof(sd->function_group)); + sd->erase_start = INVALID_ADDRESS; + sd->erase_end = INVALID_ADDRESS; + sd->size = size; + sd->blk_len = 0x200; + sd->pwd_len = 0; + sd->expecting_acmd = false; + sd->dat_lines = 0xf; + sd->cmd_line = true; + sd->multi_blk_cnt = 0; +} + +static bool sd_get_inserted(SDState *sd) +{ + return sd->blk && blk_is_inserted(sd->blk); +} + +static bool sd_get_readonly(SDState *sd) +{ + return sd->wp_switch; +} + +static void sd_cardchange(void *opaque, bool load, Error **errp) +{ + SDState *sd = opaque; + DeviceState *dev = DEVICE(sd); + SDBus *sdbus; + bool inserted = sd_get_inserted(sd); + bool readonly = sd_get_readonly(sd); + + if (inserted) { + trace_sdcard_inserted(readonly); + sd_reset(dev); + } else { + trace_sdcard_ejected(); + } + + if (sd->me_no_qdev_me_kill_mammoth_with_rocks) { + qemu_set_irq(sd->inserted_cb, inserted); + if (inserted) { + qemu_set_irq(sd->readonly_cb, readonly); + } + } else { + sdbus = SD_BUS(qdev_get_parent_bus(dev)); + sdbus_set_inserted(sdbus, inserted); + if (inserted) { + sdbus_set_readonly(sdbus, readonly); + } + } +} + +static const BlockDevOps sd_block_ops = { + .change_media_cb = sd_cardchange, +}; + +static bool sd_ocr_vmstate_needed(void *opaque) +{ + SDState *sd = opaque; + + /* Include the OCR state (and timer) if it is not yet powered up */ + return !FIELD_EX32(sd->ocr, OCR, CARD_POWER_UP); +} + +static const VMStateDescription sd_ocr_vmstate = { + .name = "sd-card/ocr-state", + .version_id = 1, + .minimum_version_id = 1, + .needed = sd_ocr_vmstate_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ocr, SDState), + VMSTATE_TIMER_PTR(ocr_power_timer, SDState), + VMSTATE_END_OF_LIST() + }, +}; + +static int sd_vmstate_pre_load(void *opaque) +{ + SDState *sd = opaque; + + /* If the OCR state is not included (prior versions, or not + * needed), then the OCR must be set as powered up. If the OCR state + * is included, this will be replaced by the state restore. + */ + sd_ocr_powerup(sd); + + return 0; +} + +static const VMStateDescription sd_vmstate = { + .name = "sd-card", + .version_id = 2, + .minimum_version_id = 2, + .pre_load = sd_vmstate_pre_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(mode, SDState), + VMSTATE_INT32(state, SDState), + VMSTATE_UINT8_ARRAY(cid, SDState, 16), + VMSTATE_UINT8_ARRAY(csd, SDState, 16), + VMSTATE_UINT16(rca, SDState), + VMSTATE_UINT32(card_status, SDState), + VMSTATE_PARTIAL_BUFFER(sd_status, SDState, 1), + VMSTATE_UINT32(vhs, SDState), + VMSTATE_BITMAP(wp_groups, SDState, 0, wpgrps_size), + VMSTATE_UINT32(blk_len, SDState), + VMSTATE_UINT32(multi_blk_cnt, SDState), + VMSTATE_UINT32(erase_start, SDState), + VMSTATE_UINT32(erase_end, SDState), + VMSTATE_UINT8_ARRAY(pwd, SDState, 16), + VMSTATE_UINT32(pwd_len, SDState), + VMSTATE_UINT8_ARRAY(function_group, SDState, 6), + VMSTATE_UINT8(current_cmd, SDState), + VMSTATE_BOOL(expecting_acmd, SDState), + VMSTATE_UINT32(blk_written, SDState), + VMSTATE_UINT64(data_start, SDState), + VMSTATE_UINT32(data_offset, SDState), + VMSTATE_UINT8_ARRAY(data, SDState, 512), + VMSTATE_UNUSED_V(1, 512), + VMSTATE_BOOL(enable, SDState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &sd_ocr_vmstate, + NULL + }, +}; + +/* Legacy initialization function for use by non-qdevified callers */ +SDState *sd_init(BlockBackend *blk, bool is_spi) +{ + Object *obj; + DeviceState *dev; + SDState *sd; + Error *err = NULL; + + obj = object_new(TYPE_SD_CARD); + dev = DEVICE(obj); + if (!qdev_prop_set_drive_err(dev, "drive", blk, &err)) { + error_reportf_err(err, "sd_init failed: "); + return NULL; + } + qdev_prop_set_bit(dev, "spi", is_spi); + + /* + * Realizing the device properly would put it into the QOM + * composition tree even though it is not plugged into an + * appropriate bus. That's a no-no. Hide the device from + * QOM/qdev, and call its qdev realize callback directly. + */ + object_ref(obj); + object_unparent(obj); + sd_realize(dev, &err); + if (err) { + error_reportf_err(err, "sd_init failed: "); + return NULL; + } + + sd = SD_CARD(dev); + sd->me_no_qdev_me_kill_mammoth_with_rocks = true; + return sd; +} + +void sd_set_cb(SDState *sd, qemu_irq readonly, qemu_irq insert) +{ + sd->readonly_cb = readonly; + sd->inserted_cb = insert; + qemu_set_irq(readonly, sd->blk ? !blk_is_writable(sd->blk) : 0); + qemu_set_irq(insert, sd->blk ? blk_is_inserted(sd->blk) : 0); +} + +static void sd_blk_read(SDState *sd, uint64_t addr, uint32_t len) +{ + trace_sdcard_read_block(addr, len); + if (!sd->blk || blk_pread(sd->blk, addr, sd->data, len) < 0) { + fprintf(stderr, "sd_blk_read: read error on host side\n"); + } +} + +static void sd_blk_write(SDState *sd, uint64_t addr, uint32_t len) +{ + trace_sdcard_write_block(addr, len); + if (!sd->blk || blk_pwrite(sd->blk, addr, sd->data, len, 0) < 0) { + fprintf(stderr, "sd_blk_write: write error on host side\n"); + } +} + +#define BLK_READ_BLOCK(a, len) sd_blk_read(sd, a, len) +#define BLK_WRITE_BLOCK(a, len) sd_blk_write(sd, a, len) +#define APP_READ_BLOCK(a, len) memset(sd->data, 0xec, len) +#define APP_WRITE_BLOCK(a, len) + +static void sd_erase(SDState *sd) +{ + uint64_t erase_start = sd->erase_start; + uint64_t erase_end = sd->erase_end; + bool sdsc = true; + uint64_t wpnum; + uint64_t erase_addr; + int erase_len = 1 << HWBLOCK_SHIFT; + + trace_sdcard_erase(sd->erase_start, sd->erase_end); + if (sd->erase_start == INVALID_ADDRESS + || sd->erase_end == INVALID_ADDRESS) { + sd->card_status |= ERASE_SEQ_ERROR; + sd->erase_start = INVALID_ADDRESS; + sd->erase_end = INVALID_ADDRESS; + return; + } + + if (FIELD_EX32(sd->ocr, OCR, CARD_CAPACITY)) { + /* High capacity memory card: erase units are 512 byte blocks */ + erase_start *= 512; + erase_end *= 512; + sdsc = false; + } + + if (erase_start > sd->size || erase_end > sd->size) { + sd->card_status |= OUT_OF_RANGE; + sd->erase_start = INVALID_ADDRESS; + sd->erase_end = INVALID_ADDRESS; + return; + } + + sd->erase_start = INVALID_ADDRESS; + sd->erase_end = INVALID_ADDRESS; + sd->csd[14] |= 0x40; + + memset(sd->data, 0xff, erase_len); + for (erase_addr = erase_start; erase_addr <= erase_end; + erase_addr += erase_len) { + if (sdsc) { + /* Only SDSC cards support write protect groups */ + wpnum = sd_addr_to_wpnum(erase_addr); + assert(wpnum < sd->wpgrps_size); + if (test_bit(wpnum, sd->wp_groups)) { + sd->card_status |= WP_ERASE_SKIP; + continue; + } + } + BLK_WRITE_BLOCK(erase_addr, erase_len); + } +} + +static uint32_t sd_wpbits(SDState *sd, uint64_t addr) +{ + uint32_t i, wpnum; + uint32_t ret = 0; + + wpnum = sd_addr_to_wpnum(addr); + + for (i = 0; i < 32; i++, wpnum++, addr += WPGROUP_SIZE) { + if (addr >= sd->size) { + /* + * If the addresses of the last groups are outside the valid range, + * then the corresponding write protection bits shall be set to 0. + */ + continue; + } + assert(wpnum < sd->wpgrps_size); + if (test_bit(wpnum, sd->wp_groups)) { + ret |= (1 << i); + } + } + + return ret; +} + +static void sd_function_switch(SDState *sd, uint32_t arg) +{ + int i, mode, new_func; + mode = !!(arg & 0x80000000); + + sd->data[0] = 0x00; /* Maximum current consumption */ + sd->data[1] = 0x01; + sd->data[2] = 0x80; /* Supported group 6 functions */ + sd->data[3] = 0x01; + sd->data[4] = 0x80; /* Supported group 5 functions */ + sd->data[5] = 0x01; + sd->data[6] = 0x80; /* Supported group 4 functions */ + sd->data[7] = 0x01; + sd->data[8] = 0x80; /* Supported group 3 functions */ + sd->data[9] = 0x01; + sd->data[10] = 0x80; /* Supported group 2 functions */ + sd->data[11] = 0x43; + sd->data[12] = 0x80; /* Supported group 1 functions */ + sd->data[13] = 0x03; + + memset(&sd->data[14], 0, 3); + for (i = 0; i < 6; i ++) { + new_func = (arg >> (i * 4)) & 0x0f; + if (mode && new_func != 0x0f) + sd->function_group[i] = new_func; + sd->data[16 - (i >> 1)] |= new_func << ((i % 2) * 4); + } + memset(&sd->data[17], 0, 47); +} + +static inline bool sd_wp_addr(SDState *sd, uint64_t addr) +{ + return test_bit(sd_addr_to_wpnum(addr), sd->wp_groups); +} + +static void sd_lock_command(SDState *sd) +{ + int erase, lock, clr_pwd, set_pwd, pwd_len; + erase = !!(sd->data[0] & 0x08); + lock = sd->data[0] & 0x04; + clr_pwd = sd->data[0] & 0x02; + set_pwd = sd->data[0] & 0x01; + + if (sd->blk_len > 1) + pwd_len = sd->data[1]; + else + pwd_len = 0; + + if (lock) { + trace_sdcard_lock(); + } else { + trace_sdcard_unlock(); + } + if (erase) { + if (!(sd->card_status & CARD_IS_LOCKED) || sd->blk_len > 1 || + set_pwd || clr_pwd || lock || sd->wp_switch || + (sd->csd[14] & 0x20)) { + sd->card_status |= LOCK_UNLOCK_FAILED; + return; + } + bitmap_zero(sd->wp_groups, sd->wpgrps_size); + sd->csd[14] &= ~0x10; + sd->card_status &= ~CARD_IS_LOCKED; + sd->pwd_len = 0; + /* Erasing the entire card here! */ + fprintf(stderr, "SD: Card force-erased by CMD42\n"); + return; + } + + if (sd->blk_len < 2 + pwd_len || + pwd_len <= sd->pwd_len || + pwd_len > sd->pwd_len + 16) { + sd->card_status |= LOCK_UNLOCK_FAILED; + return; + } + + if (sd->pwd_len && memcmp(sd->pwd, sd->data + 2, sd->pwd_len)) { + sd->card_status |= LOCK_UNLOCK_FAILED; + return; + } + + pwd_len -= sd->pwd_len; + if ((pwd_len && !set_pwd) || + (clr_pwd && (set_pwd || lock)) || + (lock && !sd->pwd_len && !set_pwd) || + (!set_pwd && !clr_pwd && + (((sd->card_status & CARD_IS_LOCKED) && lock) || + (!(sd->card_status & CARD_IS_LOCKED) && !lock)))) { + sd->card_status |= LOCK_UNLOCK_FAILED; + return; + } + + if (set_pwd) { + memcpy(sd->pwd, sd->data + 2 + sd->pwd_len, pwd_len); + sd->pwd_len = pwd_len; + } + + if (clr_pwd) { + sd->pwd_len = 0; + } + + if (lock) + sd->card_status |= CARD_IS_LOCKED; + else + sd->card_status &= ~CARD_IS_LOCKED; +} + +static bool address_in_range(SDState *sd, const char *desc, + uint64_t addr, uint32_t length) +{ + if (addr + length > sd->size) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s offset %"PRIu64" > card %"PRIu64" [%%%u]\n", + desc, addr, sd->size, length); + sd->card_status |= ADDRESS_ERROR; + return false; + } + return true; +} + +static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) +{ + uint32_t rca = 0x0000; + uint64_t addr = (sd->ocr & (1 << 30)) ? (uint64_t) req.arg << 9 : req.arg; + + /* CMD55 precedes an ACMD, so we are not interested in tracing it. + * However there is no ACMD55, so we want to trace this particular case. + */ + if (req.cmd != 55 || sd->expecting_acmd) { + trace_sdcard_normal_command(sd->proto_name, + sd_cmd_name(req.cmd), req.cmd, + req.arg, sd_state_name(sd->state)); + } + + /* Not interpreting this as an app command */ + sd->card_status &= ~APP_CMD; + + if (sd_cmd_type[req.cmd] == sd_ac + || sd_cmd_type[req.cmd] == sd_adtc) { + rca = req.arg >> 16; + } + + /* CMD23 (set block count) must be immediately followed by CMD18 or CMD25 + * if not, its effects are cancelled */ + if (sd->multi_blk_cnt != 0 && !(req.cmd == 18 || req.cmd == 25)) { + sd->multi_blk_cnt = 0; + } + + if (sd_cmd_class[req.cmd] == 6 && FIELD_EX32(sd->ocr, OCR, CARD_CAPACITY)) { + /* Only Standard Capacity cards support class 6 commands */ + return sd_illegal; + } + + switch (req.cmd) { + /* Basic commands (Class 0 and Class 1) */ + case 0: /* CMD0: GO_IDLE_STATE */ + switch (sd->state) { + case sd_inactive_state: + return sd->spi ? sd_r1 : sd_r0; + + default: + sd->state = sd_idle_state; + sd_reset(DEVICE(sd)); + return sd->spi ? sd_r1 : sd_r0; + } + break; + + case 1: /* CMD1: SEND_OP_CMD */ + if (!sd->spi) + goto bad_cmd; + + sd->state = sd_transfer_state; + return sd_r1; + + case 2: /* CMD2: ALL_SEND_CID */ + if (sd->spi) + goto bad_cmd; + switch (sd->state) { + case sd_ready_state: + sd->state = sd_identification_state; + return sd_r2_i; + + default: + break; + } + break; + + case 3: /* CMD3: SEND_RELATIVE_ADDR */ + if (sd->spi) + goto bad_cmd; + switch (sd->state) { + case sd_identification_state: + case sd_standby_state: + sd->state = sd_standby_state; + sd_set_rca(sd); + return sd_r6; + + default: + break; + } + break; + + case 4: /* CMD4: SEND_DSR */ + if (sd->spi) + goto bad_cmd; + switch (sd->state) { + case sd_standby_state: + break; + + default: + break; + } + break; + + case 5: /* CMD5: reserved for SDIO cards */ + return sd_illegal; + + case 6: /* CMD6: SWITCH_FUNCTION */ + switch (sd->mode) { + case sd_data_transfer_mode: + sd_function_switch(sd, req.arg); + sd->state = sd_sendingdata_state; + sd->data_start = 0; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + case 7: /* CMD7: SELECT/DESELECT_CARD */ + if (sd->spi) + goto bad_cmd; + switch (sd->state) { + case sd_standby_state: + if (sd->rca != rca) + return sd_r0; + + sd->state = sd_transfer_state; + return sd_r1b; + + case sd_transfer_state: + case sd_sendingdata_state: + if (sd->rca == rca) + break; + + sd->state = sd_standby_state; + return sd_r1b; + + case sd_disconnect_state: + if (sd->rca != rca) + return sd_r0; + + sd->state = sd_programming_state; + return sd_r1b; + + case sd_programming_state: + if (sd->rca == rca) + break; + + sd->state = sd_disconnect_state; + return sd_r1b; + + default: + break; + } + break; + + case 8: /* CMD8: SEND_IF_COND */ + if (sd->spec_version < SD_PHY_SPECv2_00_VERS) { + break; + } + if (sd->state != sd_idle_state) { + break; + } + sd->vhs = 0; + + /* No response if not exactly one VHS bit is set. */ + if (!(req.arg >> 8) || (req.arg >> (ctz32(req.arg & ~0xff) + 1))) { + return sd->spi ? sd_r7 : sd_r0; + } + + /* Accept. */ + sd->vhs = req.arg; + return sd_r7; + + case 9: /* CMD9: SEND_CSD */ + switch (sd->state) { + case sd_standby_state: + if (sd->rca != rca) + return sd_r0; + + return sd_r2_s; + + case sd_transfer_state: + if (!sd->spi) + break; + sd->state = sd_sendingdata_state; + memcpy(sd->data, sd->csd, 16); + sd->data_start = addr; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + case 10: /* CMD10: SEND_CID */ + switch (sd->state) { + case sd_standby_state: + if (sd->rca != rca) + return sd_r0; + + return sd_r2_i; + + case sd_transfer_state: + if (!sd->spi) + break; + sd->state = sd_sendingdata_state; + memcpy(sd->data, sd->cid, 16); + sd->data_start = addr; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + case 12: /* CMD12: STOP_TRANSMISSION */ + switch (sd->state) { + case sd_sendingdata_state: + sd->state = sd_transfer_state; + return sd_r1b; + + case sd_receivingdata_state: + sd->state = sd_programming_state; + /* Bzzzzzzztt .... Operation complete. */ + sd->state = sd_transfer_state; + return sd_r1b; + + default: + break; + } + break; + + case 13: /* CMD13: SEND_STATUS */ + switch (sd->mode) { + case sd_data_transfer_mode: + if (!sd->spi && sd->rca != rca) { + return sd_r0; + } + + return sd_r1; + + default: + break; + } + break; + + case 15: /* CMD15: GO_INACTIVE_STATE */ + if (sd->spi) + goto bad_cmd; + switch (sd->mode) { + case sd_data_transfer_mode: + if (sd->rca != rca) + return sd_r0; + + sd->state = sd_inactive_state; + return sd_r0; + + default: + break; + } + break; + + /* Block read commands (Classs 2) */ + case 16: /* CMD16: SET_BLOCKLEN */ + switch (sd->state) { + case sd_transfer_state: + if (req.arg > (1 << HWBLOCK_SHIFT)) { + sd->card_status |= BLOCK_LEN_ERROR; + } else { + trace_sdcard_set_blocklen(req.arg); + sd->blk_len = req.arg; + } + + return sd_r1; + + default: + break; + } + break; + + case 17: /* CMD17: READ_SINGLE_BLOCK */ + case 18: /* CMD18: READ_MULTIPLE_BLOCK */ + switch (sd->state) { + case sd_transfer_state: + + if (!address_in_range(sd, "READ_BLOCK", addr, sd->blk_len)) { + return sd_r1; + } + + sd->state = sd_sendingdata_state; + sd->data_start = addr; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + case 19: /* CMD19: SEND_TUNING_BLOCK (SD) */ + if (sd->spec_version < SD_PHY_SPECv3_01_VERS) { + break; + } + if (sd->state == sd_transfer_state) { + sd->state = sd_sendingdata_state; + sd->data_offset = 0; + return sd_r1; + } + break; + + case 23: /* CMD23: SET_BLOCK_COUNT */ + if (sd->spec_version < SD_PHY_SPECv3_01_VERS) { + break; + } + switch (sd->state) { + case sd_transfer_state: + sd->multi_blk_cnt = req.arg; + return sd_r1; + + default: + break; + } + break; + + /* Block write commands (Class 4) */ + case 24: /* CMD24: WRITE_SINGLE_BLOCK */ + case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */ + switch (sd->state) { + case sd_transfer_state: + + if (!address_in_range(sd, "WRITE_BLOCK", addr, sd->blk_len)) { + return sd_r1; + } + + sd->state = sd_receivingdata_state; + sd->data_start = addr; + sd->data_offset = 0; + sd->blk_written = 0; + + if (sd->size <= SDSC_MAX_CAPACITY) { + if (sd_wp_addr(sd, sd->data_start)) { + sd->card_status |= WP_VIOLATION; + } + } + if (sd->csd[14] & 0x30) { + sd->card_status |= WP_VIOLATION; + } + return sd_r1; + + default: + break; + } + break; + + case 26: /* CMD26: PROGRAM_CID */ + if (sd->spi) + goto bad_cmd; + switch (sd->state) { + case sd_transfer_state: + sd->state = sd_receivingdata_state; + sd->data_start = 0; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + case 27: /* CMD27: PROGRAM_CSD */ + switch (sd->state) { + case sd_transfer_state: + sd->state = sd_receivingdata_state; + sd->data_start = 0; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + /* Write protection (Class 6) */ + case 28: /* CMD28: SET_WRITE_PROT */ + if (sd->size > SDSC_MAX_CAPACITY) { + return sd_illegal; + } + + switch (sd->state) { + case sd_transfer_state: + if (!address_in_range(sd, "SET_WRITE_PROT", addr, 1)) { + return sd_r1b; + } + + sd->state = sd_programming_state; + set_bit(sd_addr_to_wpnum(addr), sd->wp_groups); + /* Bzzzzzzztt .... Operation complete. */ + sd->state = sd_transfer_state; + return sd_r1b; + + default: + break; + } + break; + + case 29: /* CMD29: CLR_WRITE_PROT */ + if (sd->size > SDSC_MAX_CAPACITY) { + return sd_illegal; + } + + switch (sd->state) { + case sd_transfer_state: + if (!address_in_range(sd, "CLR_WRITE_PROT", addr, 1)) { + return sd_r1b; + } + + sd->state = sd_programming_state; + clear_bit(sd_addr_to_wpnum(addr), sd->wp_groups); + /* Bzzzzzzztt .... Operation complete. */ + sd->state = sd_transfer_state; + return sd_r1b; + + default: + break; + } + break; + + case 30: /* CMD30: SEND_WRITE_PROT */ + if (sd->size > SDSC_MAX_CAPACITY) { + return sd_illegal; + } + + switch (sd->state) { + case sd_transfer_state: + if (!address_in_range(sd, "SEND_WRITE_PROT", + req.arg, sd->blk_len)) { + return sd_r1; + } + + sd->state = sd_sendingdata_state; + *(uint32_t *) sd->data = sd_wpbits(sd, req.arg); + sd->data_start = addr; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + /* Erase commands (Class 5) */ + case 32: /* CMD32: ERASE_WR_BLK_START */ + switch (sd->state) { + case sd_transfer_state: + sd->erase_start = req.arg; + return sd_r1; + + default: + break; + } + break; + + case 33: /* CMD33: ERASE_WR_BLK_END */ + switch (sd->state) { + case sd_transfer_state: + sd->erase_end = req.arg; + return sd_r1; + + default: + break; + } + break; + + case 38: /* CMD38: ERASE */ + switch (sd->state) { + case sd_transfer_state: + if (sd->csd[14] & 0x30) { + sd->card_status |= WP_VIOLATION; + return sd_r1b; + } + + sd->state = sd_programming_state; + sd_erase(sd); + /* Bzzzzzzztt .... Operation complete. */ + sd->state = sd_transfer_state; + return sd_r1b; + + default: + break; + } + break; + + /* Lock card commands (Class 7) */ + case 42: /* CMD42: LOCK_UNLOCK */ + switch (sd->state) { + case sd_transfer_state: + sd->state = sd_receivingdata_state; + sd->data_start = 0; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + case 52 ... 54: + /* CMD52, CMD53, CMD54: reserved for SDIO cards + * (see the SDIO Simplified Specification V2.0) + * Handle as illegal command but do not complain + * on stderr, as some OSes may use these in their + * probing for presence of an SDIO card. + */ + return sd_illegal; + + /* Application specific commands (Class 8) */ + case 55: /* CMD55: APP_CMD */ + switch (sd->state) { + case sd_ready_state: + case sd_identification_state: + case sd_inactive_state: + return sd_illegal; + case sd_idle_state: + if (rca) { + qemu_log_mask(LOG_GUEST_ERROR, + "SD: illegal RCA 0x%04x for APP_CMD\n", req.cmd); + } + default: + break; + } + if (!sd->spi) { + if (sd->rca != rca) { + return sd_r0; + } + } + sd->expecting_acmd = true; + sd->card_status |= APP_CMD; + return sd_r1; + + case 56: /* CMD56: GEN_CMD */ + switch (sd->state) { + case sd_transfer_state: + sd->data_offset = 0; + if (req.arg & 1) + sd->state = sd_sendingdata_state; + else + sd->state = sd_receivingdata_state; + return sd_r1; + + default: + break; + } + break; + + case 58: /* CMD58: READ_OCR (SPI) */ + if (!sd->spi) { + goto bad_cmd; + } + return sd_r3; + + case 59: /* CMD59: CRC_ON_OFF (SPI) */ + if (!sd->spi) { + goto bad_cmd; + } + return sd_r1; + + default: + bad_cmd: + qemu_log_mask(LOG_GUEST_ERROR, "SD: Unknown CMD%i\n", req.cmd); + return sd_illegal; + } + + qemu_log_mask(LOG_GUEST_ERROR, "SD: CMD%i in a wrong state: %s\n", + req.cmd, sd_state_name(sd->state)); + return sd_illegal; +} + +static sd_rsp_type_t sd_app_command(SDState *sd, + SDRequest req) +{ + trace_sdcard_app_command(sd->proto_name, sd_acmd_name(req.cmd), + req.cmd, req.arg, sd_state_name(sd->state)); + sd->card_status |= APP_CMD; + switch (req.cmd) { + case 6: /* ACMD6: SET_BUS_WIDTH */ + if (sd->spi) { + goto unimplemented_spi_cmd; + } + switch (sd->state) { + case sd_transfer_state: + sd->sd_status[0] &= 0x3f; + sd->sd_status[0] |= (req.arg & 0x03) << 6; + return sd_r1; + + default: + break; + } + break; + + case 13: /* ACMD13: SD_STATUS */ + switch (sd->state) { + case sd_transfer_state: + sd->state = sd_sendingdata_state; + sd->data_start = 0; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */ + switch (sd->state) { + case sd_transfer_state: + *(uint32_t *) sd->data = sd->blk_written; + + sd->state = sd_sendingdata_state; + sd->data_start = 0; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + case 23: /* ACMD23: SET_WR_BLK_ERASE_COUNT */ + switch (sd->state) { + case sd_transfer_state: + return sd_r1; + + default: + break; + } + break; + + case 41: /* ACMD41: SD_APP_OP_COND */ + if (sd->spi) { + /* SEND_OP_CMD */ + sd->state = sd_transfer_state; + return sd_r1; + } + if (sd->state != sd_idle_state) { + break; + } + /* If it's the first ACMD41 since reset, we need to decide + * whether to power up. If this is not an enquiry ACMD41, + * we immediately report power on and proceed below to the + * ready state, but if it is, we set a timer to model a + * delay for power up. This works around a bug in EDK2 + * UEFI, which sends an initial enquiry ACMD41, but + * assumes that the card is in ready state as soon as it + * sees the power up bit set. */ + if (!FIELD_EX32(sd->ocr, OCR, CARD_POWER_UP)) { + if ((req.arg & ACMD41_ENQUIRY_MASK) != 0) { + timer_del(sd->ocr_power_timer); + sd_ocr_powerup(sd); + } else { + trace_sdcard_inquiry_cmd41(); + if (!timer_pending(sd->ocr_power_timer)) { + timer_mod_ns(sd->ocr_power_timer, + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + OCR_POWER_DELAY_NS)); + } + } + } + + if (FIELD_EX32(sd->ocr & req.arg, OCR, VDD_VOLTAGE_WINDOW)) { + /* We accept any voltage. 10000 V is nothing. + * + * Once we're powered up, we advance straight to ready state + * unless it's an enquiry ACMD41 (bits 23:0 == 0). + */ + sd->state = sd_ready_state; + } + + return sd_r3; + + case 42: /* ACMD42: SET_CLR_CARD_DETECT */ + switch (sd->state) { + case sd_transfer_state: + /* Bringing in the 50KOhm pull-up resistor... Done. */ + return sd_r1; + + default: + break; + } + break; + + case 51: /* ACMD51: SEND_SCR */ + switch (sd->state) { + case sd_transfer_state: + sd->state = sd_sendingdata_state; + sd->data_start = 0; + sd->data_offset = 0; + return sd_r1; + + default: + break; + } + break; + + case 18: /* Reserved for SD security applications */ + case 25: + case 26: + case 38: + case 43 ... 49: + /* Refer to the "SD Specifications Part3 Security Specification" for + * information about the SD Security Features. + */ + qemu_log_mask(LOG_UNIMP, "SD: CMD%i Security not implemented\n", + req.cmd); + return sd_illegal; + + default: + /* Fall back to standard commands. */ + return sd_normal_command(sd, req); + + unimplemented_spi_cmd: + /* Commands that are recognised but not yet implemented in SPI mode. */ + qemu_log_mask(LOG_UNIMP, "SD: CMD%i not implemented in SPI mode\n", + req.cmd); + return sd_illegal; + } + + qemu_log_mask(LOG_GUEST_ERROR, "SD: ACMD%i in a wrong state\n", req.cmd); + return sd_illegal; +} + +static int cmd_valid_while_locked(SDState *sd, const uint8_t cmd) +{ + /* Valid commands in locked state: + * basic class (0) + * lock card class (7) + * CMD16 + * implicitly, the ACMD prefix CMD55 + * ACMD41 and ACMD42 + * Anything else provokes an "illegal command" response. + */ + if (sd->expecting_acmd) { + return cmd == 41 || cmd == 42; + } + if (cmd == 16 || cmd == 55) { + return 1; + } + return sd_cmd_class[cmd] == 0 || sd_cmd_class[cmd] == 7; +} + +int sd_do_command(SDState *sd, SDRequest *req, + uint8_t *response) { + int last_state; + sd_rsp_type_t rtype; + int rsplen; + + if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable) { + return 0; + } + + if (sd_req_crc_validate(req)) { + sd->card_status |= COM_CRC_ERROR; + rtype = sd_illegal; + goto send_response; + } + + if (req->cmd >= SDMMC_CMD_MAX) { + qemu_log_mask(LOG_GUEST_ERROR, "SD: incorrect command 0x%02x\n", + req->cmd); + req->cmd &= 0x3f; + } + + if (sd->card_status & CARD_IS_LOCKED) { + if (!cmd_valid_while_locked(sd, req->cmd)) { + sd->card_status |= ILLEGAL_COMMAND; + sd->expecting_acmd = false; + qemu_log_mask(LOG_GUEST_ERROR, "SD: Card is locked\n"); + rtype = sd_illegal; + goto send_response; + } + } + + last_state = sd->state; + sd_set_mode(sd); + + if (sd->expecting_acmd) { + sd->expecting_acmd = false; + rtype = sd_app_command(sd, *req); + } else { + rtype = sd_normal_command(sd, *req); + } + + if (rtype == sd_illegal) { + sd->card_status |= ILLEGAL_COMMAND; + } else { + /* Valid command, we can update the 'state before command' bits. + * (Do this now so they appear in r1 responses.) + */ + sd->current_cmd = req->cmd; + sd->card_status &= ~CURRENT_STATE; + sd->card_status |= (last_state << 9); + } + +send_response: + switch (rtype) { + case sd_r1: + case sd_r1b: + sd_response_r1_make(sd, response); + rsplen = 4; + break; + + case sd_r2_i: + memcpy(response, sd->cid, sizeof(sd->cid)); + rsplen = 16; + break; + + case sd_r2_s: + memcpy(response, sd->csd, sizeof(sd->csd)); + rsplen = 16; + break; + + case sd_r3: + sd_response_r3_make(sd, response); + rsplen = 4; + break; + + case sd_r6: + sd_response_r6_make(sd, response); + rsplen = 4; + break; + + case sd_r7: + sd_response_r7_make(sd, response); + rsplen = 4; + break; + + case sd_r0: + case sd_illegal: + rsplen = 0; + break; + default: + g_assert_not_reached(); + } + trace_sdcard_response(sd_response_name(rtype), rsplen); + + if (rtype != sd_illegal) { + /* Clear the "clear on valid command" status bits now we've + * sent any response + */ + sd->card_status &= ~CARD_STATUS_B; + } + +#ifdef DEBUG_SD + qemu_hexdump(stderr, "Response", response, rsplen); +#endif + + return rsplen; +} + +void sd_write_byte(SDState *sd, uint8_t value) +{ + int i; + + if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable) + return; + + if (sd->state != sd_receivingdata_state) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: not in Receiving-Data state\n", __func__); + return; + } + + if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION)) + return; + + trace_sdcard_write_data(sd->proto_name, + sd_acmd_name(sd->current_cmd), + sd->current_cmd, value); + switch (sd->current_cmd) { + case 24: /* CMD24: WRITE_SINGLE_BLOCK */ + sd->data[sd->data_offset ++] = value; + if (sd->data_offset >= sd->blk_len) { + /* TODO: Check CRC before committing */ + sd->state = sd_programming_state; + BLK_WRITE_BLOCK(sd->data_start, sd->data_offset); + sd->blk_written ++; + sd->csd[14] |= 0x40; + /* Bzzzzzzztt .... Operation complete. */ + sd->state = sd_transfer_state; + } + break; + + case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */ + if (sd->data_offset == 0) { + /* Start of the block - let's check the address is valid */ + if (!address_in_range(sd, "WRITE_MULTIPLE_BLOCK", + sd->data_start, sd->blk_len)) { + break; + } + if (sd->size <= SDSC_MAX_CAPACITY) { + if (sd_wp_addr(sd, sd->data_start)) { + sd->card_status |= WP_VIOLATION; + break; + } + } + } + sd->data[sd->data_offset++] = value; + if (sd->data_offset >= sd->blk_len) { + /* TODO: Check CRC before committing */ + sd->state = sd_programming_state; + BLK_WRITE_BLOCK(sd->data_start, sd->data_offset); + sd->blk_written++; + sd->data_start += sd->blk_len; + sd->data_offset = 0; + sd->csd[14] |= 0x40; + + /* Bzzzzzzztt .... Operation complete. */ + if (sd->multi_blk_cnt != 0) { + if (--sd->multi_blk_cnt == 0) { + /* Stop! */ + sd->state = sd_transfer_state; + break; + } + } + + sd->state = sd_receivingdata_state; + } + break; + + case 26: /* CMD26: PROGRAM_CID */ + sd->data[sd->data_offset ++] = value; + if (sd->data_offset >= sizeof(sd->cid)) { + /* TODO: Check CRC before committing */ + sd->state = sd_programming_state; + for (i = 0; i < sizeof(sd->cid); i ++) + if ((sd->cid[i] | 0x00) != sd->data[i]) + sd->card_status |= CID_CSD_OVERWRITE; + + if (!(sd->card_status & CID_CSD_OVERWRITE)) + for (i = 0; i < sizeof(sd->cid); i ++) { + sd->cid[i] |= 0x00; + sd->cid[i] &= sd->data[i]; + } + /* Bzzzzzzztt .... Operation complete. */ + sd->state = sd_transfer_state; + } + break; + + case 27: /* CMD27: PROGRAM_CSD */ + sd->data[sd->data_offset ++] = value; + if (sd->data_offset >= sizeof(sd->csd)) { + /* TODO: Check CRC before committing */ + sd->state = sd_programming_state; + for (i = 0; i < sizeof(sd->csd); i ++) + if ((sd->csd[i] | sd_csd_rw_mask[i]) != + (sd->data[i] | sd_csd_rw_mask[i])) + sd->card_status |= CID_CSD_OVERWRITE; + + /* Copy flag (OTP) & Permanent write protect */ + if (sd->csd[14] & ~sd->data[14] & 0x60) + sd->card_status |= CID_CSD_OVERWRITE; + + if (!(sd->card_status & CID_CSD_OVERWRITE)) + for (i = 0; i < sizeof(sd->csd); i ++) { + sd->csd[i] |= sd_csd_rw_mask[i]; + sd->csd[i] &= sd->data[i]; + } + /* Bzzzzzzztt .... Operation complete. */ + sd->state = sd_transfer_state; + } + break; + + case 42: /* CMD42: LOCK_UNLOCK */ + sd->data[sd->data_offset ++] = value; + if (sd->data_offset >= sd->blk_len) { + /* TODO: Check CRC before committing */ + sd->state = sd_programming_state; + sd_lock_command(sd); + /* Bzzzzzzztt .... Operation complete. */ + sd->state = sd_transfer_state; + } + break; + + case 56: /* CMD56: GEN_CMD */ + sd->data[sd->data_offset ++] = value; + if (sd->data_offset >= sd->blk_len) { + APP_WRITE_BLOCK(sd->data_start, sd->data_offset); + sd->state = sd_transfer_state; + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: unknown command\n", __func__); + break; + } +} + +#define SD_TUNING_BLOCK_SIZE 64 + +static const uint8_t sd_tuning_block_pattern[SD_TUNING_BLOCK_SIZE] = { + /* See: Physical Layer Simplified Specification Version 3.01, Table 4-2 */ + 0xff, 0x0f, 0xff, 0x00, 0x0f, 0xfc, 0xc3, 0xcc, + 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, + 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, + 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, + 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, + 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, + 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, + 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, +}; + +uint8_t sd_read_byte(SDState *sd) +{ + /* TODO: Append CRCs */ + uint8_t ret; + uint32_t io_len; + + if (!sd->blk || !blk_is_inserted(sd->blk) || !sd->enable) + return 0x00; + + if (sd->state != sd_sendingdata_state) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: not in Sending-Data state\n", __func__); + return 0x00; + } + + if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION)) + return 0x00; + + io_len = (sd->ocr & (1 << 30)) ? 512 : sd->blk_len; + + trace_sdcard_read_data(sd->proto_name, + sd_acmd_name(sd->current_cmd), + sd->current_cmd, io_len); + switch (sd->current_cmd) { + case 6: /* CMD6: SWITCH_FUNCTION */ + ret = sd->data[sd->data_offset ++]; + + if (sd->data_offset >= 64) + sd->state = sd_transfer_state; + break; + + case 9: /* CMD9: SEND_CSD */ + case 10: /* CMD10: SEND_CID */ + ret = sd->data[sd->data_offset ++]; + + if (sd->data_offset >= 16) + sd->state = sd_transfer_state; + break; + + case 13: /* ACMD13: SD_STATUS */ + ret = sd->sd_status[sd->data_offset ++]; + + if (sd->data_offset >= sizeof(sd->sd_status)) + sd->state = sd_transfer_state; + break; + + case 17: /* CMD17: READ_SINGLE_BLOCK */ + if (sd->data_offset == 0) + BLK_READ_BLOCK(sd->data_start, io_len); + ret = sd->data[sd->data_offset ++]; + + if (sd->data_offset >= io_len) + sd->state = sd_transfer_state; + break; + + case 18: /* CMD18: READ_MULTIPLE_BLOCK */ + if (sd->data_offset == 0) { + if (!address_in_range(sd, "READ_MULTIPLE_BLOCK", + sd->data_start, io_len)) { + return 0x00; + } + BLK_READ_BLOCK(sd->data_start, io_len); + } + ret = sd->data[sd->data_offset ++]; + + if (sd->data_offset >= io_len) { + sd->data_start += io_len; + sd->data_offset = 0; + + if (sd->multi_blk_cnt != 0) { + if (--sd->multi_blk_cnt == 0) { + /* Stop! */ + sd->state = sd_transfer_state; + break; + } + } + } + break; + + case 19: /* CMD19: SEND_TUNING_BLOCK (SD) */ + if (sd->data_offset >= SD_TUNING_BLOCK_SIZE - 1) { + sd->state = sd_transfer_state; + } + ret = sd_tuning_block_pattern[sd->data_offset++]; + break; + + case 22: /* ACMD22: SEND_NUM_WR_BLOCKS */ + ret = sd->data[sd->data_offset ++]; + + if (sd->data_offset >= 4) + sd->state = sd_transfer_state; + break; + + case 30: /* CMD30: SEND_WRITE_PROT */ + ret = sd->data[sd->data_offset ++]; + + if (sd->data_offset >= 4) + sd->state = sd_transfer_state; + break; + + case 51: /* ACMD51: SEND_SCR */ + ret = sd->scr[sd->data_offset ++]; + + if (sd->data_offset >= sizeof(sd->scr)) + sd->state = sd_transfer_state; + break; + + case 56: /* CMD56: GEN_CMD */ + if (sd->data_offset == 0) + APP_READ_BLOCK(sd->data_start, sd->blk_len); + ret = sd->data[sd->data_offset ++]; + + if (sd->data_offset >= sd->blk_len) + sd->state = sd_transfer_state; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: unknown command\n", __func__); + return 0x00; + } + + return ret; +} + +static bool sd_receive_ready(SDState *sd) +{ + return sd->state == sd_receivingdata_state; +} + +static bool sd_data_ready(SDState *sd) +{ + return sd->state == sd_sendingdata_state; +} + +void sd_enable(SDState *sd, bool enable) +{ + sd->enable = enable; +} + +static void sd_instance_init(Object *obj) +{ + SDState *sd = SD_CARD(obj); + + sd->enable = true; + sd->ocr_power_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sd_ocr_powerup, sd); +} + +static void sd_instance_finalize(Object *obj) +{ + SDState *sd = SD_CARD(obj); + + timer_free(sd->ocr_power_timer); +} + +static void sd_realize(DeviceState *dev, Error **errp) +{ + SDState *sd = SD_CARD(dev); + int ret; + + sd->proto_name = sd->spi ? "SPI" : "SD"; + + switch (sd->spec_version) { + case SD_PHY_SPECv1_10_VERS + ... SD_PHY_SPECv3_01_VERS: + break; + default: + error_setg(errp, "Invalid SD card Spec version: %u", sd->spec_version); + return; + } + + if (sd->blk) { + int64_t blk_size; + + if (!blk_supports_write_perm(sd->blk)) { + error_setg(errp, "Cannot use read-only drive as SD card"); + return; + } + + blk_size = blk_getlength(sd->blk); + if (blk_size > 0 && !is_power_of_2(blk_size)) { + int64_t blk_size_aligned = pow2ceil(blk_size); + char *blk_size_str; + + blk_size_str = size_to_str(blk_size); + error_setg(errp, "Invalid SD card size: %s", blk_size_str); + g_free(blk_size_str); + + blk_size_str = size_to_str(blk_size_aligned); + error_append_hint(errp, + "SD card size has to be a power of 2, e.g. %s.\n" + "You can resize disk images with" + " 'qemu-img resize '\n" + "(note that this will lose data if you make the" + " image smaller than it currently is).\n", + blk_size_str); + g_free(blk_size_str); + + return; + } + + ret = blk_set_perm(sd->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE, + BLK_PERM_ALL, errp); + if (ret < 0) { + return; + } + blk_set_dev_ops(sd->blk, &sd_block_ops, sd); + } +} + +static Property sd_properties[] = { + DEFINE_PROP_UINT8("spec_version", SDState, + spec_version, SD_PHY_SPECv2_00_VERS), + DEFINE_PROP_DRIVE("drive", SDState, blk), + /* We do not model the chip select pin, so allow the board to select + * whether card should be in SSI or MMC/SD mode. It is also up to the + * board to ensure that ssi transfers only occur when the chip select + * is asserted. */ + DEFINE_PROP_BOOL("spi", SDState, spi, false), + DEFINE_PROP_END_OF_LIST() +}; + +static void sd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SDCardClass *sc = SD_CARD_CLASS(klass); + + dc->realize = sd_realize; + device_class_set_props(dc, sd_properties); + dc->vmsd = &sd_vmstate; + dc->reset = sd_reset; + dc->bus_type = TYPE_SD_BUS; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + + sc->set_voltage = sd_set_voltage; + sc->get_dat_lines = sd_get_dat_lines; + sc->get_cmd_line = sd_get_cmd_line; + sc->do_command = sd_do_command; + sc->write_byte = sd_write_byte; + sc->read_byte = sd_read_byte; + sc->receive_ready = sd_receive_ready; + sc->data_ready = sd_data_ready; + sc->enable = sd_enable; + sc->get_inserted = sd_get_inserted; + sc->get_readonly = sd_get_readonly; +} + +static const TypeInfo sd_info = { + .name = TYPE_SD_CARD, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SDState), + .class_size = sizeof(SDCardClass), + .class_init = sd_class_init, + .instance_init = sd_instance_init, + .instance_finalize = sd_instance_finalize, +}; + +static void sd_register_types(void) +{ + type_register_static(&sd_info); +} + +type_init(sd_register_types) diff --git a/hw/sd/sdhci-internal.h b/hw/sd/sdhci-internal.h new file mode 100644 index 000000000..e8c753d6d --- /dev/null +++ b/hw/sd/sdhci-internal.h @@ -0,0 +1,346 @@ +/* + * SD Association Host Standard Specification v2.0 controller emulation + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Mitsyanko Igor + * Peter A.G. Crosthwaite + * + * Based on MMC controller for Samsung S5PC1xx-based board emulation + * by Alexey Merkulov and Vladimir Monakhov. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#ifndef SDHCI_INTERNAL_H +#define SDHCI_INTERNAL_H + +#include "hw/registerfields.h" + +/* R/W SDMA System Address register 0x0 */ +#define SDHC_SYSAD 0x00 + +/* R/W Host DMA Buffer Boundary and Transfer Block Size Register 0x0 */ +#define SDHC_BLKSIZE 0x04 + +/* R/W Blocks count for current transfer 0x0 */ +#define SDHC_BLKCNT 0x06 + +/* R/W Command Argument Register 0x0 */ +#define SDHC_ARGUMENT 0x08 + +/* R/W Transfer Mode Setting Register 0x0 */ +#define SDHC_TRNMOD 0x0C +#define SDHC_TRNS_DMA 0x0001 +#define SDHC_TRNS_BLK_CNT_EN 0x0002 +#define SDHC_TRNS_ACMD12 0x0004 +#define SDHC_TRNS_ACMD23 0x0008 /* since v3 */ +#define SDHC_TRNS_READ 0x0010 +#define SDHC_TRNS_MULTI 0x0020 +#define SDHC_TRNMOD_MASK 0x0037 + +/* R/W Command Register 0x0 */ +#define SDHC_CMDREG 0x0E +#define SDHC_CMD_RSP_WITH_BUSY (3 << 0) +#define SDHC_CMD_DATA_PRESENT (1 << 5) +#define SDHC_CMD_SUSPEND (1 << 6) +#define SDHC_CMD_RESUME (1 << 7) +#define SDHC_CMD_ABORT ((1 << 6)|(1 << 7)) +#define SDHC_CMD_TYPE_MASK ((1 << 6)|(1 << 7)) +#define SDHC_COMMAND_TYPE(x) ((x) & SDHC_CMD_TYPE_MASK) + +/* ROC Response Register 0 0x0 */ +#define SDHC_RSPREG0 0x10 +/* ROC Response Register 1 0x0 */ +#define SDHC_RSPREG1 0x14 +/* ROC Response Register 2 0x0 */ +#define SDHC_RSPREG2 0x18 +/* ROC Response Register 3 0x0 */ +#define SDHC_RSPREG3 0x1C + +/* R/W Buffer Data Register 0x0 */ +#define SDHC_BDATA 0x20 + +/* R/ROC Present State Register 0x000A0000 */ +#define SDHC_PRNSTS 0x24 +#define SDHC_CMD_INHIBIT 0x00000001 +#define SDHC_DATA_INHIBIT 0x00000002 +#define SDHC_DAT_LINE_ACTIVE 0x00000004 +#define SDHC_IMX_CLOCK_GATE_OFF 0x00000080 +#define SDHC_DOING_WRITE 0x00000100 +#define SDHC_DOING_READ 0x00000200 +#define SDHC_SPACE_AVAILABLE 0x00000400 +#define SDHC_DATA_AVAILABLE 0x00000800 +#define SDHC_CARD_PRESENT 0x00010000 +#define SDHC_CARD_DETECT 0x00040000 +#define SDHC_WRITE_PROTECT 0x00080000 +FIELD(SDHC_PRNSTS, DAT_LVL, 20, 4); +FIELD(SDHC_PRNSTS, CMD_LVL, 24, 1); +#define TRANSFERRING_DATA(x) \ + ((x) & (SDHC_DOING_READ | SDHC_DOING_WRITE)) + +/* R/W Host control Register 0x0 */ +#define SDHC_HOSTCTL 0x28 +#define SDHC_CTRL_LED 0x01 +#define SDHC_CTRL_DATATRANSFERWIDTH 0x02 /* SD mode only */ +#define SDHC_CTRL_HIGH_SPEED 0x04 +#define SDHC_CTRL_DMA_CHECK_MASK 0x18 +#define SDHC_CTRL_SDMA 0x00 +#define SDHC_CTRL_ADMA1_32 0x08 /* NOT ALLOWED since v2 */ +#define SDHC_CTRL_ADMA2_32 0x10 +#define SDHC_CTRL_ADMA2_64 0x18 +#define SDHC_DMA_TYPE(x) ((x) & SDHC_CTRL_DMA_CHECK_MASK) +#define SDHC_CTRL_4BITBUS 0x02 +#define SDHC_CTRL_8BITBUS 0x20 +#define SDHC_CTRL_CDTEST_INS 0x40 +#define SDHC_CTRL_CDTEST_EN 0x80 + +/* R/W Power Control Register 0x0 */ +#define SDHC_PWRCON 0x29 +#define SDHC_POWER_ON (1 << 0) +FIELD(SDHC_PWRCON, BUS_VOLTAGE, 1, 3); + +/* R/W Block Gap Control Register 0x0 */ +#define SDHC_BLKGAP 0x2A +#define SDHC_STOP_AT_GAP_REQ 0x01 +#define SDHC_CONTINUE_REQ 0x02 + +/* R/W WakeUp Control Register 0x0 */ +#define SDHC_WAKCON 0x2B +#define SDHC_WKUP_ON_INS (1 << 1) +#define SDHC_WKUP_ON_RMV (1 << 2) + +/* CLKCON */ +#define SDHC_CLKCON 0x2C +#define SDHC_CLOCK_INT_STABLE 0x0002 +#define SDHC_CLOCK_INT_EN 0x0001 +#define SDHC_CLOCK_SDCLK_EN (1 << 2) +#define SDHC_CLOCK_CHK_MASK 0x0007 +#define SDHC_CLOCK_IS_ON(x) \ + (((x) & SDHC_CLOCK_CHK_MASK) == SDHC_CLOCK_CHK_MASK) + +/* R/W Timeout Control Register 0x0 */ +#define SDHC_TIMEOUTCON 0x2E +FIELD(SDHC_TIMEOUTCON, COUNTER, 0, 4); + +/* R/W Software Reset Register 0x0 */ +#define SDHC_SWRST 0x2F +#define SDHC_RESET_ALL 0x01 +#define SDHC_RESET_CMD 0x02 +#define SDHC_RESET_DATA 0x04 + +/* ROC/RW1C Normal Interrupt Status Register 0x0 */ +#define SDHC_NORINTSTS 0x30 +#define SDHC_NIS_ERR 0x8000 +#define SDHC_NIS_CMDCMP 0x0001 +#define SDHC_NIS_TRSCMP 0x0002 +#define SDHC_NIS_BLKGAP 0x0004 +#define SDHC_NIS_DMA 0x0008 +#define SDHC_NIS_WBUFRDY 0x0010 +#define SDHC_NIS_RBUFRDY 0x0020 +#define SDHC_NIS_INSERT 0x0040 +#define SDHC_NIS_REMOVE 0x0080 +#define SDHC_NIS_CARDINT 0x0100 + +/* ROC/RW1C Error Interrupt Status Register 0x0 */ +#define SDHC_ERRINTSTS 0x32 +#define SDHC_EIS_CMDTIMEOUT 0x0001 +#define SDHC_EIS_BLKGAP 0x0004 +#define SDHC_EIS_CMDIDX 0x0008 +#define SDHC_EIS_CMD12ERR 0x0100 +#define SDHC_EIS_ADMAERR 0x0200 + +/* R/W Normal Interrupt Status Enable Register 0x0 */ +#define SDHC_NORINTSTSEN 0x34 +#define SDHC_NISEN_CMDCMP 0x0001 +#define SDHC_NISEN_TRSCMP 0x0002 +#define SDHC_NISEN_DMA 0x0008 +#define SDHC_NISEN_WBUFRDY 0x0010 +#define SDHC_NISEN_RBUFRDY 0x0020 +#define SDHC_NISEN_INSERT 0x0040 +#define SDHC_NISEN_REMOVE 0x0080 +#define SDHC_NISEN_CARDINT 0x0100 + +/* R/W Error Interrupt Status Enable Register 0x0 */ +#define SDHC_ERRINTSTSEN 0x36 +#define SDHC_EISEN_CMDTIMEOUT 0x0001 +#define SDHC_EISEN_BLKGAP 0x0004 +#define SDHC_EISEN_CMDIDX 0x0008 +#define SDHC_EISEN_ADMAERR 0x0200 + +/* R/W Normal Interrupt Signal Enable Register 0x0 */ +#define SDHC_NORINTSIGEN 0x38 +#define SDHC_NORINTSIG_INSERT (1 << 6) +#define SDHC_NORINTSIG_REMOVE (1 << 7) + +/* R/W Error Interrupt Signal Enable Register 0x0 */ +#define SDHC_ERRINTSIGEN 0x3A + +/* ROC Auto CMD12 error status register 0x0 */ +#define SDHC_ACMD12ERRSTS 0x3C +FIELD(SDHC_ACMD12ERRSTS, TIMEOUT_ERR, 1, 1); +FIELD(SDHC_ACMD12ERRSTS, CRC_ERR, 2, 1); +FIELD(SDHC_ACMD12ERRSTS, INDEX_ERR, 4, 1); + +/* Host Control Register 2 (since v3) */ +#define SDHC_HOSTCTL2 0x3E +FIELD(SDHC_HOSTCTL2, UHS_MODE_SEL, 0, 3); +FIELD(SDHC_HOSTCTL2, V18_ENA, 3, 1); /* UHS-I only */ +FIELD(SDHC_HOSTCTL2, DRIVER_STRENGTH, 4, 2); /* UHS-I only */ +FIELD(SDHC_HOSTCTL2, EXECUTE_TUNING, 6, 1); /* UHS-I only */ +FIELD(SDHC_HOSTCTL2, SAMPLING_CLKSEL, 7, 1); /* UHS-I only */ +FIELD(SDHC_HOSTCTL2, UHS_II_ENA, 8, 1); /* since v4 */ +FIELD(SDHC_HOSTCTL2, ADMA2_LENGTH, 10, 1); /* since v4 */ +FIELD(SDHC_HOSTCTL2, CMD23_ENA, 11, 1); /* since v4 */ +FIELD(SDHC_HOSTCTL2, VERSION4, 12, 1); /* since v4 */ +FIELD(SDHC_HOSTCTL2, ASYNC_INT, 14, 1); +FIELD(SDHC_HOSTCTL2, PRESET_ENA, 15, 1); + +/* HWInit Capabilities Register 0x05E80080 */ +#define SDHC_CAPAB 0x40 +FIELD(SDHC_CAPAB, TOCLKFREQ, 0, 6); +FIELD(SDHC_CAPAB, TOUNIT, 7, 1); +FIELD(SDHC_CAPAB, BASECLKFREQ, 8, 8); +FIELD(SDHC_CAPAB, MAXBLOCKLENGTH, 16, 2); +FIELD(SDHC_CAPAB, EMBEDDED_8BIT, 18, 1); /* since v3 */ +FIELD(SDHC_CAPAB, ADMA2, 19, 1); /* since v2 */ +FIELD(SDHC_CAPAB, ADMA1, 20, 1); /* v1 only? */ +FIELD(SDHC_CAPAB, HIGHSPEED, 21, 1); +FIELD(SDHC_CAPAB, SDMA, 22, 1); +FIELD(SDHC_CAPAB, SUSPRESUME, 23, 1); +FIELD(SDHC_CAPAB, V33, 24, 1); +FIELD(SDHC_CAPAB, V30, 25, 1); +FIELD(SDHC_CAPAB, V18, 26, 1); +FIELD(SDHC_CAPAB, BUS64BIT_V4, 27, 1); /* since v4.10 */ +FIELD(SDHC_CAPAB, BUS64BIT, 28, 1); /* since v2 */ +FIELD(SDHC_CAPAB, ASYNC_INT, 29, 1); /* since v3 */ +FIELD(SDHC_CAPAB, SLOT_TYPE, 30, 2); /* since v3 */ +FIELD(SDHC_CAPAB, BUS_SPEED, 32, 3); /* since v3 */ +FIELD(SDHC_CAPAB, UHS_II, 35, 8); /* since v4.20 */ +FIELD(SDHC_CAPAB, DRIVER_STRENGTH, 36, 3); /* since v3 */ +FIELD(SDHC_CAPAB, DRIVER_TYPE_A, 36, 1); /* since v3 */ +FIELD(SDHC_CAPAB, DRIVER_TYPE_C, 37, 1); /* since v3 */ +FIELD(SDHC_CAPAB, DRIVER_TYPE_D, 38, 1); /* since v3 */ +FIELD(SDHC_CAPAB, TIMER_RETUNING, 40, 4); /* since v3 */ +FIELD(SDHC_CAPAB, SDR50_TUNING, 45, 1); /* since v3 */ +FIELD(SDHC_CAPAB, RETUNING_MODE, 46, 2); /* since v3 */ +FIELD(SDHC_CAPAB, CLOCK_MULT, 48, 8); /* since v3 */ +FIELD(SDHC_CAPAB, ADMA3, 59, 1); /* since v4.20 */ +FIELD(SDHC_CAPAB, V18_VDD2, 60, 1); /* since v4.20 */ + +/* HWInit Maximum Current Capabilities Register 0x0 */ +#define SDHC_MAXCURR 0x48 +FIELD(SDHC_MAXCURR, V33_VDD1, 0, 8); +FIELD(SDHC_MAXCURR, V30_VDD1, 8, 8); +FIELD(SDHC_MAXCURR, V18_VDD1, 16, 8); +FIELD(SDHC_MAXCURR, V18_VDD2, 32, 8); /* since v4.20 */ + +/* W Force Event Auto CMD12 Error Interrupt Register 0x0000 */ +#define SDHC_FEAER 0x50 +/* W Force Event Error Interrupt Register Error Interrupt 0x0000 */ +#define SDHC_FEERR 0x52 + +/* R/W ADMA Error Status Register 0x00 */ +#define SDHC_ADMAERR 0x54 +#define SDHC_ADMAERR_LENGTH_MISMATCH (1 << 2) +#define SDHC_ADMAERR_STATE_ST_STOP (0 << 0) +#define SDHC_ADMAERR_STATE_ST_FDS (1 << 0) +#define SDHC_ADMAERR_STATE_ST_TFR (3 << 0) +#define SDHC_ADMAERR_STATE_MASK (3 << 0) + +/* R/W ADMA System Address Register 0x00 */ +#define SDHC_ADMASYSADDR 0x58 +#define SDHC_ADMA_ATTR_SET_LEN (1 << 4) +#define SDHC_ADMA_ATTR_ACT_TRAN (1 << 5) +#define SDHC_ADMA_ATTR_ACT_LINK (3 << 4) +#define SDHC_ADMA_ATTR_INT (1 << 2) +#define SDHC_ADMA_ATTR_END (1 << 1) +#define SDHC_ADMA_ATTR_VALID (1 << 0) +#define SDHC_ADMA_ATTR_ACT_MASK ((1 << 4)|(1 << 5)) + +/* Slot interrupt status */ +#define SDHC_SLOT_INT_STATUS 0xFC + +/* HWInit Host Controller Version Register */ +#define SDHC_HCVER 0xFE +#define SDHC_HCVER_VENDOR 0x24 + +#define SDHC_REGISTERS_MAP_SIZE 0x100 +#define SDHC_INSERTION_DELAY (NANOSECONDS_PER_SECOND) +#define SDHC_TRANSFER_DELAY 100 +#define SDHC_ADMA_DESCS_PER_DELAY 5 +#define SDHC_CMD_RESPONSE (3 << 0) + +enum { + sdhc_not_stopped = 0, /* normal SDHC state */ + sdhc_gap_read = 1, /* SDHC stopped at block gap during read operation */ + sdhc_gap_write = 2 /* SDHC stopped at block gap during write operation */ +}; + +extern const VMStateDescription sdhci_vmstate; + + +#define ESDHC_MIX_CTRL 0x48 + +#define ESDHC_VENDOR_SPEC 0xc0 +#define ESDHC_IMX_FRC_SDCLK_ON (1 << 8) + +#define ESDHC_DLL_CTRL 0x60 + +#define ESDHC_TUNING_CTRL 0xcc +#define ESDHC_TUNE_CTRL_STATUS 0x68 +#define ESDHC_WTMK_LVL 0x44 + +/* Undocumented register used by guests working around erratum ERR004536 */ +#define ESDHC_UNDOCUMENTED_REG27 0x6c + +#define ESDHC_CTRL_4BITBUS (0x1 << 1) +#define ESDHC_CTRL_8BITBUS (0x2 << 1) + +#define ESDHC_PRNSTS_SDSTB (1 << 3) + +/* + * Default SD/MMC host controller features information, which will be + * presented in CAPABILITIES register of generic SD host controller at reset. + * + * support: + * - 3.3v and 1.8v voltages + * - SDMA/ADMA1/ADMA2 + * - high-speed + * max host controller R/W buffers size: 512B + * max clock frequency for SDclock: 52 MHz + * timeout clock frequency: 52 MHz + * + * does not support: + * - 3.0v voltage + * - 64-bit system bus + * - suspend/resume + */ +#define SDHC_CAPAB_REG_DEFAULT 0x057834b4 + +#define DEFINE_SDHCI_COMMON_PROPERTIES(_state) \ + DEFINE_PROP_UINT8("sd-spec-version", _state, sd_spec_version, 2), \ + DEFINE_PROP_UINT8("uhs", _state, uhs_mode, UHS_NOT_SUPPORTED), \ + DEFINE_PROP_UINT8("vendor", _state, vendor, SDHCI_VENDOR_NONE), \ + \ + /* Capabilities registers provide information on supported + * features of this specific host controller implementation */ \ + DEFINE_PROP_UINT64("capareg", _state, capareg, SDHC_CAPAB_REG_DEFAULT), \ + DEFINE_PROP_UINT64("maxcurr", _state, maxcurr, 0) + +void sdhci_initfn(SDHCIState *s); +void sdhci_uninitfn(SDHCIState *s); +void sdhci_common_realize(SDHCIState *s, Error **errp); +void sdhci_common_unrealize(SDHCIState *s); +void sdhci_common_class_init(ObjectClass *klass, void *data); + +#endif diff --git a/hw/sd/sdhci-pci.c b/hw/sd/sdhci-pci.c new file mode 100644 index 000000000..c737c8b93 --- /dev/null +++ b/hw/sd/sdhci-pci.c @@ -0,0 +1,87 @@ +/* + * SDHCI device on PCI + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/qdev-properties.h" +#include "hw/sd/sdhci.h" +#include "sdhci-internal.h" + +static Property sdhci_pci_properties[] = { + DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sdhci_pci_realize(PCIDevice *dev, Error **errp) +{ + ERRP_GUARD(); + SDHCIState *s = PCI_SDHCI(dev); + + sdhci_initfn(s); + sdhci_common_realize(s, errp); + if (*errp) { + return; + } + + dev->config[PCI_CLASS_PROG] = 0x01; /* Standard Host supported DMA */ + dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */ + s->irq = pci_allocate_irq(dev); + s->dma_as = pci_get_address_space(dev); + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->iomem); +} + +static void sdhci_pci_exit(PCIDevice *dev) +{ + SDHCIState *s = PCI_SDHCI(dev); + + sdhci_common_unrealize(s); + sdhci_uninitfn(s); +} + +static void sdhci_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = sdhci_pci_realize; + k->exit = sdhci_pci_exit; + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_SDHCI; + k->class_id = PCI_CLASS_SYSTEM_SDHCI; + device_class_set_props(dc, sdhci_pci_properties); + + sdhci_common_class_init(klass, data); +} + +static const TypeInfo sdhci_pci_info = { + .name = TYPE_PCI_SDHCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(SDHCIState), + .class_init = sdhci_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void sdhci_pci_register_type(void) +{ + type_register_static(&sdhci_pci_info); +} + +type_init(sdhci_pci_register_type) diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c new file mode 100644 index 000000000..c9dc065cc --- /dev/null +++ b/hw/sd/sdhci.c @@ -0,0 +1,1869 @@ +/* + * SD Association Host Standard Specification v2.0 controller emulation + * + * Datasheet: PartA2_SD_Host_Controller_Simplified_Specification_Ver2.00.pdf + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Mitsyanko Igor + * Peter A.G. Crosthwaite + * + * Based on MMC controller for Samsung S5PC1xx-based board emulation + * by Alexey Merkulov and Vladimir Monakhov. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "sysemu/dma.h" +#include "qemu/timer.h" +#include "qemu/bitops.h" +#include "hw/sd/sdhci.h" +#include "migration/vmstate.h" +#include "sdhci-internal.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" +#include "qom/object.h" + +#define TYPE_SDHCI_BUS "sdhci-bus" +/* This is reusing the SDBus typedef from SD_BUS */ +DECLARE_INSTANCE_CHECKER(SDBus, SDHCI_BUS, + TYPE_SDHCI_BUS) + +#define MASKED_WRITE(reg, mask, val) (reg = (reg & (mask)) | (val)) + +static inline unsigned int sdhci_get_fifolen(SDHCIState *s) +{ + return 1 << (9 + FIELD_EX32(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH)); +} + +/* return true on error */ +static bool sdhci_check_capab_freq_range(SDHCIState *s, const char *desc, + uint8_t freq, Error **errp) +{ + if (s->sd_spec_version >= 3) { + return false; + } + switch (freq) { + case 0: + case 10 ... 63: + break; + default: + error_setg(errp, "SD %s clock frequency can have value" + "in range 0-63 only", desc); + return true; + } + return false; +} + +static void sdhci_check_capareg(SDHCIState *s, Error **errp) +{ + uint64_t msk = s->capareg; + uint32_t val; + bool y; + + switch (s->sd_spec_version) { + case 4: + val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT_V4); + trace_sdhci_capareg("64-bit system bus (v4)", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT_V4, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, UHS_II); + trace_sdhci_capareg("UHS-II", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, UHS_II, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA3); + trace_sdhci_capareg("ADMA3", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA3, 0); + + /* fallthrough */ + case 3: + val = FIELD_EX64(s->capareg, SDHC_CAPAB, ASYNC_INT); + trace_sdhci_capareg("async interrupt", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, ASYNC_INT, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, SLOT_TYPE); + if (val) { + error_setg(errp, "slot-type not supported"); + return; + } + trace_sdhci_capareg("slot type", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, SLOT_TYPE, 0); + + if (val != 2) { + val = FIELD_EX64(s->capareg, SDHC_CAPAB, EMBEDDED_8BIT); + trace_sdhci_capareg("8-bit bus", val); + } + msk = FIELD_DP64(msk, SDHC_CAPAB, EMBEDDED_8BIT, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS_SPEED); + trace_sdhci_capareg("bus speed mask", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, BUS_SPEED, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, DRIVER_STRENGTH); + trace_sdhci_capareg("driver strength mask", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, DRIVER_STRENGTH, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, TIMER_RETUNING); + trace_sdhci_capareg("timer re-tuning", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, TIMER_RETUNING, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDR50_TUNING); + trace_sdhci_capareg("use SDR50 tuning", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, SDR50_TUNING, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, RETUNING_MODE); + trace_sdhci_capareg("re-tuning mode", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, RETUNING_MODE, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, CLOCK_MULT); + trace_sdhci_capareg("clock multiplier", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, CLOCK_MULT, 0); + + /* fallthrough */ + case 2: /* default version */ + val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA2); + trace_sdhci_capareg("ADMA2", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA2, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA1); + trace_sdhci_capareg("ADMA1", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA1, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT); + trace_sdhci_capareg("64-bit system bus (v3)", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT, 0); + + /* fallthrough */ + case 1: + y = FIELD_EX64(s->capareg, SDHC_CAPAB, TOUNIT); + msk = FIELD_DP64(msk, SDHC_CAPAB, TOUNIT, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, TOCLKFREQ); + trace_sdhci_capareg(y ? "timeout (MHz)" : "Timeout (KHz)", val); + if (sdhci_check_capab_freq_range(s, "timeout", val, errp)) { + return; + } + msk = FIELD_DP64(msk, SDHC_CAPAB, TOCLKFREQ, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, BASECLKFREQ); + trace_sdhci_capareg(y ? "base (MHz)" : "Base (KHz)", val); + if (sdhci_check_capab_freq_range(s, "base", val, errp)) { + return; + } + msk = FIELD_DP64(msk, SDHC_CAPAB, BASECLKFREQ, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH); + if (val >= 3) { + error_setg(errp, "block size can be 512, 1024 or 2048 only"); + return; + } + trace_sdhci_capareg("max block length", sdhci_get_fifolen(s)); + msk = FIELD_DP64(msk, SDHC_CAPAB, MAXBLOCKLENGTH, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, HIGHSPEED); + trace_sdhci_capareg("high speed", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, HIGHSPEED, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDMA); + trace_sdhci_capareg("SDMA", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, SDMA, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, SUSPRESUME); + trace_sdhci_capareg("suspend/resume", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, SUSPRESUME, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, V33); + trace_sdhci_capareg("3.3v", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, V33, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, V30); + trace_sdhci_capareg("3.0v", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, V30, 0); + + val = FIELD_EX64(s->capareg, SDHC_CAPAB, V18); + trace_sdhci_capareg("1.8v", val); + msk = FIELD_DP64(msk, SDHC_CAPAB, V18, 0); + break; + + default: + error_setg(errp, "Unsupported spec version: %u", s->sd_spec_version); + } + if (msk) { + qemu_log_mask(LOG_UNIMP, + "SDHCI: unknown CAPAB mask: 0x%016" PRIx64 "\n", msk); + } +} + +static uint8_t sdhci_slotint(SDHCIState *s) +{ + return (s->norintsts & s->norintsigen) || (s->errintsts & s->errintsigen) || + ((s->norintsts & SDHC_NIS_INSERT) && (s->wakcon & SDHC_WKUP_ON_INS)) || + ((s->norintsts & SDHC_NIS_REMOVE) && (s->wakcon & SDHC_WKUP_ON_RMV)); +} + +/* Return true if IRQ was pending and delivered */ +static bool sdhci_update_irq(SDHCIState *s) +{ + bool pending = sdhci_slotint(s); + + qemu_set_irq(s->irq, pending); + + return pending; +} + +static void sdhci_raise_insertion_irq(void *opaque) +{ + SDHCIState *s = (SDHCIState *)opaque; + + if (s->norintsts & SDHC_NIS_REMOVE) { + timer_mod(s->insert_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); + } else { + s->prnsts = 0x1ff0000; + if (s->norintstsen & SDHC_NISEN_INSERT) { + s->norintsts |= SDHC_NIS_INSERT; + } + sdhci_update_irq(s); + } +} + +static void sdhci_set_inserted(DeviceState *dev, bool level) +{ + SDHCIState *s = (SDHCIState *)dev; + + trace_sdhci_set_inserted(level ? "insert" : "eject"); + if ((s->norintsts & SDHC_NIS_REMOVE) && level) { + /* Give target some time to notice card ejection */ + timer_mod(s->insert_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY); + } else { + if (level) { + s->prnsts = 0x1ff0000; + if (s->norintstsen & SDHC_NISEN_INSERT) { + s->norintsts |= SDHC_NIS_INSERT; + } + } else { + s->prnsts = 0x1fa0000; + s->pwrcon &= ~SDHC_POWER_ON; + s->clkcon &= ~SDHC_CLOCK_SDCLK_EN; + if (s->norintstsen & SDHC_NISEN_REMOVE) { + s->norintsts |= SDHC_NIS_REMOVE; + } + } + sdhci_update_irq(s); + } +} + +static void sdhci_set_readonly(DeviceState *dev, bool level) +{ + SDHCIState *s = (SDHCIState *)dev; + + if (level) { + s->prnsts &= ~SDHC_WRITE_PROTECT; + } else { + /* Write enabled */ + s->prnsts |= SDHC_WRITE_PROTECT; + } +} + +static void sdhci_reset(SDHCIState *s) +{ + DeviceState *dev = DEVICE(s); + + timer_del(s->insert_timer); + timer_del(s->transfer_timer); + + /* Set all registers to 0. Capabilities/Version registers are not cleared + * and assumed to always preserve their value, given to them during + * initialization */ + memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad); + + /* Reset other state based on current card insertion/readonly status */ + sdhci_set_inserted(dev, sdbus_get_inserted(&s->sdbus)); + sdhci_set_readonly(dev, sdbus_get_readonly(&s->sdbus)); + + s->data_count = 0; + s->stopped_state = sdhc_not_stopped; + s->pending_insert_state = false; +} + +static void sdhci_poweron_reset(DeviceState *dev) +{ + /* QOM (ie power-on) reset. This is identical to reset + * commanded via device register apart from handling of the + * 'pending insert on powerup' quirk. + */ + SDHCIState *s = (SDHCIState *)dev; + + sdhci_reset(s); + + if (s->pending_insert_quirk) { + s->pending_insert_state = true; + } +} + +static void sdhci_data_transfer(void *opaque); + +static void sdhci_send_command(SDHCIState *s) +{ + SDRequest request; + uint8_t response[16]; + int rlen; + bool timeout = false; + + s->errintsts = 0; + s->acmd12errsts = 0; + request.cmd = s->cmdreg >> 8; + request.arg = s->argument; + + trace_sdhci_send_command(request.cmd, request.arg); + rlen = sdbus_do_command(&s->sdbus, &request, response); + + if (s->cmdreg & SDHC_CMD_RESPONSE) { + if (rlen == 4) { + s->rspreg[0] = ldl_be_p(response); + s->rspreg[1] = s->rspreg[2] = s->rspreg[3] = 0; + trace_sdhci_response4(s->rspreg[0]); + } else if (rlen == 16) { + s->rspreg[0] = ldl_be_p(&response[11]); + s->rspreg[1] = ldl_be_p(&response[7]); + s->rspreg[2] = ldl_be_p(&response[3]); + s->rspreg[3] = (response[0] << 16) | (response[1] << 8) | + response[2]; + trace_sdhci_response16(s->rspreg[3], s->rspreg[2], + s->rspreg[1], s->rspreg[0]); + } else { + timeout = true; + trace_sdhci_error("timeout waiting for command response"); + if (s->errintstsen & SDHC_EISEN_CMDTIMEOUT) { + s->errintsts |= SDHC_EIS_CMDTIMEOUT; + s->norintsts |= SDHC_NIS_ERR; + } + } + + if (!(s->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && + (s->norintstsen & SDHC_NISEN_TRSCMP) && + (s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY) { + s->norintsts |= SDHC_NIS_TRSCMP; + } + } + + if (s->norintstsen & SDHC_NISEN_CMDCMP) { + s->norintsts |= SDHC_NIS_CMDCMP; + } + + sdhci_update_irq(s); + + if (!timeout && s->blksize && (s->cmdreg & SDHC_CMD_DATA_PRESENT)) { + s->data_count = 0; + sdhci_data_transfer(s); + } +} + +static void sdhci_end_transfer(SDHCIState *s) +{ + /* Automatically send CMD12 to stop transfer if AutoCMD12 enabled */ + if ((s->trnmod & SDHC_TRNS_ACMD12) != 0) { + SDRequest request; + uint8_t response[16]; + + request.cmd = 0x0C; + request.arg = 0; + trace_sdhci_end_transfer(request.cmd, request.arg); + sdbus_do_command(&s->sdbus, &request, response); + /* Auto CMD12 response goes to the upper Response register */ + s->rspreg[3] = ldl_be_p(response); + } + + s->prnsts &= ~(SDHC_DOING_READ | SDHC_DOING_WRITE | + SDHC_DAT_LINE_ACTIVE | SDHC_DATA_INHIBIT | + SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE); + + if (s->norintstsen & SDHC_NISEN_TRSCMP) { + s->norintsts |= SDHC_NIS_TRSCMP; + } + + sdhci_update_irq(s); +} + +/* + * Programmed i/o data transfer + */ +#define BLOCK_SIZE_MASK (4 * KiB - 1) + +/* Fill host controller's read buffer with BLKSIZE bytes of data from card */ +static void sdhci_read_block_from_card(SDHCIState *s) +{ + const uint16_t blk_size = s->blksize & BLOCK_SIZE_MASK; + + if ((s->trnmod & SDHC_TRNS_MULTI) && + (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) { + return; + } + + if (!FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) { + /* Device is not in tuning */ + sdbus_read_data(&s->sdbus, s->fifo_buffer, blk_size); + } + + if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) { + /* Device is in tuning */ + s->hostctl2 &= ~R_SDHC_HOSTCTL2_EXECUTE_TUNING_MASK; + s->hostctl2 |= R_SDHC_HOSTCTL2_SAMPLING_CLKSEL_MASK; + s->prnsts &= ~(SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ | + SDHC_DATA_INHIBIT); + goto read_done; + } + + /* New data now available for READ through Buffer Port Register */ + s->prnsts |= SDHC_DATA_AVAILABLE; + if (s->norintstsen & SDHC_NISEN_RBUFRDY) { + s->norintsts |= SDHC_NIS_RBUFRDY; + } + + /* Clear DAT line active status if that was the last block */ + if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || + ((s->trnmod & SDHC_TRNS_MULTI) && s->blkcnt == 1)) { + s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; + } + + /* If stop at block gap request was set and it's not the last block of + * data - generate Block Event interrupt */ + if (s->stopped_state == sdhc_gap_read && (s->trnmod & SDHC_TRNS_MULTI) && + s->blkcnt != 1) { + s->prnsts &= ~SDHC_DAT_LINE_ACTIVE; + if (s->norintstsen & SDHC_EISEN_BLKGAP) { + s->norintsts |= SDHC_EIS_BLKGAP; + } + } + +read_done: + sdhci_update_irq(s); +} + +/* Read @size byte of data from host controller @s BUFFER DATA PORT register */ +static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size) +{ + uint32_t value = 0; + int i; + + /* first check that a valid data exists in host controller input buffer */ + if ((s->prnsts & SDHC_DATA_AVAILABLE) == 0) { + trace_sdhci_error("read from empty buffer"); + return 0; + } + + for (i = 0; i < size; i++) { + value |= s->fifo_buffer[s->data_count] << i * 8; + s->data_count++; + /* check if we've read all valid data (blksize bytes) from buffer */ + if ((s->data_count) >= (s->blksize & BLOCK_SIZE_MASK)) { + trace_sdhci_read_dataport(s->data_count); + s->prnsts &= ~SDHC_DATA_AVAILABLE; /* no more data in a buffer */ + s->data_count = 0; /* next buff read must start at position [0] */ + + if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { + s->blkcnt--; + } + + /* if that was the last block of data */ + if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || + ((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) || + /* stop at gap request */ + (s->stopped_state == sdhc_gap_read && + !(s->prnsts & SDHC_DAT_LINE_ACTIVE))) { + sdhci_end_transfer(s); + } else { /* if there are more data, read next block from card */ + sdhci_read_block_from_card(s); + } + break; + } + } + + return value; +} + +/* Write data from host controller FIFO to card */ +static void sdhci_write_block_to_card(SDHCIState *s) +{ + if (s->prnsts & SDHC_SPACE_AVAILABLE) { + if (s->norintstsen & SDHC_NISEN_WBUFRDY) { + s->norintsts |= SDHC_NIS_WBUFRDY; + } + sdhci_update_irq(s); + return; + } + + if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { + if (s->blkcnt == 0) { + return; + } else { + s->blkcnt--; + } + } + + sdbus_write_data(&s->sdbus, s->fifo_buffer, s->blksize & BLOCK_SIZE_MASK); + + /* Next data can be written through BUFFER DATORT register */ + s->prnsts |= SDHC_SPACE_AVAILABLE; + + /* Finish transfer if that was the last block of data */ + if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || + ((s->trnmod & SDHC_TRNS_MULTI) && + (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0))) { + sdhci_end_transfer(s); + } else if (s->norintstsen & SDHC_NISEN_WBUFRDY) { + s->norintsts |= SDHC_NIS_WBUFRDY; + } + + /* Generate Block Gap Event if requested and if not the last block */ + if (s->stopped_state == sdhc_gap_write && (s->trnmod & SDHC_TRNS_MULTI) && + s->blkcnt > 0) { + s->prnsts &= ~SDHC_DOING_WRITE; + if (s->norintstsen & SDHC_EISEN_BLKGAP) { + s->norintsts |= SDHC_EIS_BLKGAP; + } + sdhci_end_transfer(s); + } + + sdhci_update_irq(s); +} + +/* Write @size bytes of @value data to host controller @s Buffer Data Port + * register */ +static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size) +{ + unsigned i; + + /* Check that there is free space left in a buffer */ + if (!(s->prnsts & SDHC_SPACE_AVAILABLE)) { + trace_sdhci_error("Can't write to data buffer: buffer full"); + return; + } + + for (i = 0; i < size; i++) { + s->fifo_buffer[s->data_count] = value & 0xFF; + s->data_count++; + value >>= 8; + if (s->data_count >= (s->blksize & BLOCK_SIZE_MASK)) { + trace_sdhci_write_dataport(s->data_count); + s->data_count = 0; + s->prnsts &= ~SDHC_SPACE_AVAILABLE; + if (s->prnsts & SDHC_DOING_WRITE) { + sdhci_write_block_to_card(s); + } + } + } +} + +/* + * Single DMA data transfer + */ + +/* Multi block SDMA transfer */ +static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) +{ + bool page_aligned = false; + unsigned int begin; + const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; + uint32_t boundary_chk = 1 << (((s->blksize & ~BLOCK_SIZE_MASK) >> 12) + 12); + uint32_t boundary_count = boundary_chk - (s->sdmasysad % boundary_chk); + + if (!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || !s->blkcnt) { + qemu_log_mask(LOG_UNIMP, "infinite transfer is not supported\n"); + return; + } + + /* XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for + * possible stop at page boundary if initial address is not page aligned, + * allow them to work properly */ + if ((s->sdmasysad % boundary_chk) == 0) { + page_aligned = true; + } + + s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE; + if (s->trnmod & SDHC_TRNS_READ) { + s->prnsts |= SDHC_DOING_READ; + while (s->blkcnt) { + if (s->data_count == 0) { + sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size); + } + begin = s->data_count; + if (((boundary_count + begin) < block_size) && page_aligned) { + s->data_count = boundary_count + begin; + boundary_count = 0; + } else { + s->data_count = block_size; + boundary_count -= block_size - begin; + if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { + s->blkcnt--; + } + } + dma_memory_write(s->dma_as, s->sdmasysad, + &s->fifo_buffer[begin], s->data_count - begin); + s->sdmasysad += s->data_count - begin; + if (s->data_count == block_size) { + s->data_count = 0; + } + if (page_aligned && boundary_count == 0) { + break; + } + } + } else { + s->prnsts |= SDHC_DOING_WRITE; + while (s->blkcnt) { + begin = s->data_count; + if (((boundary_count + begin) < block_size) && page_aligned) { + s->data_count = boundary_count + begin; + boundary_count = 0; + } else { + s->data_count = block_size; + boundary_count -= block_size - begin; + } + dma_memory_read(s->dma_as, s->sdmasysad, + &s->fifo_buffer[begin], s->data_count - begin); + s->sdmasysad += s->data_count - begin; + if (s->data_count == block_size) { + sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); + s->data_count = 0; + if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { + s->blkcnt--; + } + } + if (page_aligned && boundary_count == 0) { + break; + } + } + } + + if (s->blkcnt == 0) { + sdhci_end_transfer(s); + } else { + if (s->norintstsen & SDHC_NISEN_DMA) { + s->norintsts |= SDHC_NIS_DMA; + } + sdhci_update_irq(s); + } +} + +/* single block SDMA transfer */ +static void sdhci_sdma_transfer_single_block(SDHCIState *s) +{ + uint32_t datacnt = s->blksize & BLOCK_SIZE_MASK; + + if (s->trnmod & SDHC_TRNS_READ) { + sdbus_read_data(&s->sdbus, s->fifo_buffer, datacnt); + dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt); + } else { + dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt); + sdbus_write_data(&s->sdbus, s->fifo_buffer, datacnt); + } + s->blkcnt--; + + sdhci_end_transfer(s); +} + +typedef struct ADMADescr { + hwaddr addr; + uint16_t length; + uint8_t attr; + uint8_t incr; +} ADMADescr; + +static void get_adma_description(SDHCIState *s, ADMADescr *dscr) +{ + uint32_t adma1 = 0; + uint64_t adma2 = 0; + hwaddr entry_addr = (hwaddr)s->admasysaddr; + switch (SDHC_DMA_TYPE(s->hostctl1)) { + case SDHC_CTRL_ADMA2_32: + dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2)); + adma2 = le64_to_cpu(adma2); + /* The spec does not specify endianness of descriptor table. + * We currently assume that it is LE. + */ + dscr->addr = (hwaddr)extract64(adma2, 32, 32) & ~0x3ull; + dscr->length = (uint16_t)extract64(adma2, 16, 16); + dscr->attr = (uint8_t)extract64(adma2, 0, 7); + dscr->incr = 8; + break; + case SDHC_CTRL_ADMA1_32: + dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1)); + adma1 = le32_to_cpu(adma1); + dscr->addr = (hwaddr)(adma1 & 0xFFFFF000); + dscr->attr = (uint8_t)extract32(adma1, 0, 7); + dscr->incr = 4; + if ((dscr->attr & SDHC_ADMA_ATTR_ACT_MASK) == SDHC_ADMA_ATTR_SET_LEN) { + dscr->length = (uint16_t)extract32(adma1, 12, 16); + } else { + dscr->length = 4 * KiB; + } + break; + case SDHC_CTRL_ADMA2_64: + dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1); + dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2); + dscr->length = le16_to_cpu(dscr->length); + dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8); + dscr->addr = le64_to_cpu(dscr->addr); + dscr->attr &= (uint8_t) ~0xC0; + dscr->incr = 12; + break; + } +} + +/* Advanced DMA data transfer */ + +static void sdhci_do_adma(SDHCIState *s) +{ + unsigned int begin, length; + const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; + ADMADescr dscr = {}; + int i; + + if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) { + /* Stop Multiple Transfer */ + sdhci_end_transfer(s); + return; + } + + for (i = 0; i < SDHC_ADMA_DESCS_PER_DELAY; ++i) { + s->admaerr &= ~SDHC_ADMAERR_LENGTH_MISMATCH; + + get_adma_description(s, &dscr); + trace_sdhci_adma_loop(dscr.addr, dscr.length, dscr.attr); + + if ((dscr.attr & SDHC_ADMA_ATTR_VALID) == 0) { + /* Indicate that error occurred in ST_FDS state */ + s->admaerr &= ~SDHC_ADMAERR_STATE_MASK; + s->admaerr |= SDHC_ADMAERR_STATE_ST_FDS; + + /* Generate ADMA error interrupt */ + if (s->errintstsen & SDHC_EISEN_ADMAERR) { + s->errintsts |= SDHC_EIS_ADMAERR; + s->norintsts |= SDHC_NIS_ERR; + } + + sdhci_update_irq(s); + return; + } + + length = dscr.length ? dscr.length : 64 * KiB; + + switch (dscr.attr & SDHC_ADMA_ATTR_ACT_MASK) { + case SDHC_ADMA_ATTR_ACT_TRAN: /* data transfer */ + s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE; + if (s->trnmod & SDHC_TRNS_READ) { + s->prnsts |= SDHC_DOING_READ; + while (length) { + if (s->data_count == 0) { + sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size); + } + begin = s->data_count; + if ((length + begin) < block_size) { + s->data_count = length + begin; + length = 0; + } else { + s->data_count = block_size; + length -= block_size - begin; + } + dma_memory_write(s->dma_as, dscr.addr, + &s->fifo_buffer[begin], + s->data_count - begin); + dscr.addr += s->data_count - begin; + if (s->data_count == block_size) { + s->data_count = 0; + if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { + s->blkcnt--; + if (s->blkcnt == 0) { + break; + } + } + } + } + } else { + s->prnsts |= SDHC_DOING_WRITE; + while (length) { + begin = s->data_count; + if ((length + begin) < block_size) { + s->data_count = length + begin; + length = 0; + } else { + s->data_count = block_size; + length -= block_size - begin; + } + dma_memory_read(s->dma_as, dscr.addr, + &s->fifo_buffer[begin], + s->data_count - begin); + dscr.addr += s->data_count - begin; + if (s->data_count == block_size) { + sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); + s->data_count = 0; + if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { + s->blkcnt--; + if (s->blkcnt == 0) { + break; + } + } + } + } + } + s->admasysaddr += dscr.incr; + break; + case SDHC_ADMA_ATTR_ACT_LINK: /* link to next descriptor table */ + s->admasysaddr = dscr.addr; + trace_sdhci_adma("link", s->admasysaddr); + break; + default: + s->admasysaddr += dscr.incr; + break; + } + + if (dscr.attr & SDHC_ADMA_ATTR_INT) { + trace_sdhci_adma("interrupt", s->admasysaddr); + if (s->norintstsen & SDHC_NISEN_DMA) { + s->norintsts |= SDHC_NIS_DMA; + } + + if (sdhci_update_irq(s) && !(dscr.attr & SDHC_ADMA_ATTR_END)) { + /* IRQ delivered, reschedule current transfer */ + break; + } + } + + /* ADMA transfer terminates if blkcnt == 0 or by END attribute */ + if (((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && + (s->blkcnt == 0)) || (dscr.attr & SDHC_ADMA_ATTR_END)) { + trace_sdhci_adma_transfer_completed(); + if (length || ((dscr.attr & SDHC_ADMA_ATTR_END) && + (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && + s->blkcnt != 0)) { + trace_sdhci_error("SD/MMC host ADMA length mismatch"); + s->admaerr |= SDHC_ADMAERR_LENGTH_MISMATCH | + SDHC_ADMAERR_STATE_ST_TFR; + if (s->errintstsen & SDHC_EISEN_ADMAERR) { + trace_sdhci_error("Set ADMA error flag"); + s->errintsts |= SDHC_EIS_ADMAERR; + s->norintsts |= SDHC_NIS_ERR; + } + + sdhci_update_irq(s); + } + sdhci_end_transfer(s); + return; + } + + } + + /* we have unfinished business - reschedule to continue ADMA */ + timer_mod(s->transfer_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_TRANSFER_DELAY); +} + +/* Perform data transfer according to controller configuration */ + +static void sdhci_data_transfer(void *opaque) +{ + SDHCIState *s = (SDHCIState *)opaque; + + if (s->trnmod & SDHC_TRNS_DMA) { + switch (SDHC_DMA_TYPE(s->hostctl1)) { + case SDHC_CTRL_SDMA: + if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) { + sdhci_sdma_transfer_single_block(s); + } else { + sdhci_sdma_transfer_multi_blocks(s); + } + + break; + case SDHC_CTRL_ADMA1_32: + if (!(s->capareg & R_SDHC_CAPAB_ADMA1_MASK)) { + trace_sdhci_error("ADMA1 not supported"); + break; + } + + sdhci_do_adma(s); + break; + case SDHC_CTRL_ADMA2_32: + if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK)) { + trace_sdhci_error("ADMA2 not supported"); + break; + } + + sdhci_do_adma(s); + break; + case SDHC_CTRL_ADMA2_64: + if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK) || + !(s->capareg & R_SDHC_CAPAB_BUS64BIT_MASK)) { + trace_sdhci_error("64 bit ADMA not supported"); + break; + } + + sdhci_do_adma(s); + break; + default: + trace_sdhci_error("Unsupported DMA type"); + break; + } + } else { + if ((s->trnmod & SDHC_TRNS_READ) && sdbus_data_ready(&s->sdbus)) { + s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT | + SDHC_DAT_LINE_ACTIVE; + sdhci_read_block_from_card(s); + } else { + s->prnsts |= SDHC_DOING_WRITE | SDHC_DAT_LINE_ACTIVE | + SDHC_SPACE_AVAILABLE | SDHC_DATA_INHIBIT; + sdhci_write_block_to_card(s); + } + } +} + +static bool sdhci_can_issue_command(SDHCIState *s) +{ + if (!SDHC_CLOCK_IS_ON(s->clkcon) || + (((s->prnsts & SDHC_DATA_INHIBIT) || s->stopped_state) && + ((s->cmdreg & SDHC_CMD_DATA_PRESENT) || + ((s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY && + !(SDHC_COMMAND_TYPE(s->cmdreg) == SDHC_CMD_ABORT))))) { + return false; + } + + return true; +} + +/* The Buffer Data Port register must be accessed in sequential and + * continuous manner */ +static inline bool +sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num) +{ + if ((s->data_count & 0x3) != byte_num) { + trace_sdhci_error("Non-sequential access to Buffer Data Port register" + "is prohibited\n"); + return false; + } + return true; +} + +static void sdhci_resume_pending_transfer(SDHCIState *s) +{ + timer_del(s->transfer_timer); + sdhci_data_transfer(s); +} + +static uint64_t sdhci_read(void *opaque, hwaddr offset, unsigned size) +{ + SDHCIState *s = (SDHCIState *)opaque; + uint32_t ret = 0; + + if (timer_pending(s->transfer_timer)) { + sdhci_resume_pending_transfer(s); + } + + switch (offset & ~0x3) { + case SDHC_SYSAD: + ret = s->sdmasysad; + break; + case SDHC_BLKSIZE: + ret = s->blksize | (s->blkcnt << 16); + break; + case SDHC_ARGUMENT: + ret = s->argument; + break; + case SDHC_TRNMOD: + ret = s->trnmod | (s->cmdreg << 16); + break; + case SDHC_RSPREG0 ... SDHC_RSPREG3: + ret = s->rspreg[((offset & ~0x3) - SDHC_RSPREG0) >> 2]; + break; + case SDHC_BDATA: + if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { + ret = sdhci_read_dataport(s, size); + trace_sdhci_access("rd", size << 3, offset, "->", ret, ret); + return ret; + } + break; + case SDHC_PRNSTS: + ret = s->prnsts; + ret = FIELD_DP32(ret, SDHC_PRNSTS, DAT_LVL, + sdbus_get_dat_lines(&s->sdbus)); + ret = FIELD_DP32(ret, SDHC_PRNSTS, CMD_LVL, + sdbus_get_cmd_line(&s->sdbus)); + break; + case SDHC_HOSTCTL: + ret = s->hostctl1 | (s->pwrcon << 8) | (s->blkgap << 16) | + (s->wakcon << 24); + break; + case SDHC_CLKCON: + ret = s->clkcon | (s->timeoutcon << 16); + break; + case SDHC_NORINTSTS: + ret = s->norintsts | (s->errintsts << 16); + break; + case SDHC_NORINTSTSEN: + ret = s->norintstsen | (s->errintstsen << 16); + break; + case SDHC_NORINTSIGEN: + ret = s->norintsigen | (s->errintsigen << 16); + break; + case SDHC_ACMD12ERRSTS: + ret = s->acmd12errsts | (s->hostctl2 << 16); + break; + case SDHC_CAPAB: + ret = (uint32_t)s->capareg; + break; + case SDHC_CAPAB + 4: + ret = (uint32_t)(s->capareg >> 32); + break; + case SDHC_MAXCURR: + ret = (uint32_t)s->maxcurr; + break; + case SDHC_MAXCURR + 4: + ret = (uint32_t)(s->maxcurr >> 32); + break; + case SDHC_ADMAERR: + ret = s->admaerr; + break; + case SDHC_ADMASYSADDR: + ret = (uint32_t)s->admasysaddr; + break; + case SDHC_ADMASYSADDR + 4: + ret = (uint32_t)(s->admasysaddr >> 32); + break; + case SDHC_SLOT_INT_STATUS: + ret = (s->version << 16) | sdhci_slotint(s); + break; + default: + qemu_log_mask(LOG_UNIMP, "SDHC rd_%ub @0x%02" HWADDR_PRIx " " + "not implemented\n", size, offset); + break; + } + + ret >>= (offset & 0x3) * 8; + ret &= (1ULL << (size * 8)) - 1; + trace_sdhci_access("rd", size << 3, offset, "->", ret, ret); + return ret; +} + +static inline void sdhci_blkgap_write(SDHCIState *s, uint8_t value) +{ + if ((value & SDHC_STOP_AT_GAP_REQ) && (s->blkgap & SDHC_STOP_AT_GAP_REQ)) { + return; + } + s->blkgap = value & SDHC_STOP_AT_GAP_REQ; + + if ((value & SDHC_CONTINUE_REQ) && s->stopped_state && + (s->blkgap & SDHC_STOP_AT_GAP_REQ) == 0) { + if (s->stopped_state == sdhc_gap_read) { + s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ; + sdhci_read_block_from_card(s); + } else { + s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_WRITE; + sdhci_write_block_to_card(s); + } + s->stopped_state = sdhc_not_stopped; + } else if (!s->stopped_state && (value & SDHC_STOP_AT_GAP_REQ)) { + if (s->prnsts & SDHC_DOING_READ) { + s->stopped_state = sdhc_gap_read; + } else if (s->prnsts & SDHC_DOING_WRITE) { + s->stopped_state = sdhc_gap_write; + } + } +} + +static inline void sdhci_reset_write(SDHCIState *s, uint8_t value) +{ + switch (value) { + case SDHC_RESET_ALL: + sdhci_reset(s); + break; + case SDHC_RESET_CMD: + s->prnsts &= ~SDHC_CMD_INHIBIT; + s->norintsts &= ~SDHC_NIS_CMDCMP; + break; + case SDHC_RESET_DATA: + s->data_count = 0; + s->prnsts &= ~(SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE | + SDHC_DOING_READ | SDHC_DOING_WRITE | + SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE); + s->blkgap &= ~(SDHC_STOP_AT_GAP_REQ | SDHC_CONTINUE_REQ); + s->stopped_state = sdhc_not_stopped; + s->norintsts &= ~(SDHC_NIS_WBUFRDY | SDHC_NIS_RBUFRDY | + SDHC_NIS_DMA | SDHC_NIS_TRSCMP | SDHC_NIS_BLKGAP); + break; + } +} + +static void +sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) +{ + SDHCIState *s = (SDHCIState *)opaque; + unsigned shift = 8 * (offset & 0x3); + uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift); + uint32_t value = val; + value <<= shift; + + if (timer_pending(s->transfer_timer)) { + sdhci_resume_pending_transfer(s); + } + + switch (offset & ~0x3) { + case SDHC_SYSAD: + if (!TRANSFERRING_DATA(s->prnsts)) { + s->sdmasysad = (s->sdmasysad & mask) | value; + MASKED_WRITE(s->sdmasysad, mask, value); + /* Writing to last byte of sdmasysad might trigger transfer */ + if (!(mask & 0xFF000000) && s->blkcnt && s->blksize && + SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) { + if (s->trnmod & SDHC_TRNS_MULTI) { + sdhci_sdma_transfer_multi_blocks(s); + } else { + sdhci_sdma_transfer_single_block(s); + } + } + } + break; + case SDHC_BLKSIZE: + if (!TRANSFERRING_DATA(s->prnsts)) { + uint16_t blksize = s->blksize; + + MASKED_WRITE(s->blksize, mask, extract32(value, 0, 12)); + MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16); + + /* Limit block size to the maximum buffer size */ + if (extract32(s->blksize, 0, 12) > s->buf_maxsz) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than " + "the maximum buffer 0x%x\n", __func__, s->blksize, + s->buf_maxsz); + + s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz); + } + + /* + * If the block size is programmed to a different value from + * the previous one, reset the data pointer of s->fifo_buffer[] + * so that s->fifo_buffer[] can be filled in using the new block + * size in the next transfer. + */ + if (blksize != s->blksize) { + s->data_count = 0; + } + } + + break; + case SDHC_ARGUMENT: + MASKED_WRITE(s->argument, mask, value); + break; + case SDHC_TRNMOD: + /* DMA can be enabled only if it is supported as indicated by + * capabilities register */ + if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) { + value &= ~SDHC_TRNS_DMA; + } + MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK); + MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); + + /* Writing to the upper byte of CMDREG triggers SD command generation */ + if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) { + break; + } + + sdhci_send_command(s); + break; + case SDHC_BDATA: + if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { + sdhci_write_dataport(s, value >> shift, size); + } + break; + case SDHC_HOSTCTL: + if (!(mask & 0xFF0000)) { + sdhci_blkgap_write(s, value >> 16); + } + MASKED_WRITE(s->hostctl1, mask, value); + MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8); + MASKED_WRITE(s->wakcon, mask >> 24, value >> 24); + if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 || + !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) { + s->pwrcon &= ~SDHC_POWER_ON; + } + break; + case SDHC_CLKCON: + if (!(mask & 0xFF000000)) { + sdhci_reset_write(s, value >> 24); + } + MASKED_WRITE(s->clkcon, mask, value); + MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16); + if (s->clkcon & SDHC_CLOCK_INT_EN) { + s->clkcon |= SDHC_CLOCK_INT_STABLE; + } else { + s->clkcon &= ~SDHC_CLOCK_INT_STABLE; + } + break; + case SDHC_NORINTSTS: + if (s->norintstsen & SDHC_NISEN_CARDINT) { + value &= ~SDHC_NIS_CARDINT; + } + s->norintsts &= mask | ~value; + s->errintsts &= (mask >> 16) | ~(value >> 16); + if (s->errintsts) { + s->norintsts |= SDHC_NIS_ERR; + } else { + s->norintsts &= ~SDHC_NIS_ERR; + } + sdhci_update_irq(s); + break; + case SDHC_NORINTSTSEN: + MASKED_WRITE(s->norintstsen, mask, value); + MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16); + s->norintsts &= s->norintstsen; + s->errintsts &= s->errintstsen; + if (s->errintsts) { + s->norintsts |= SDHC_NIS_ERR; + } else { + s->norintsts &= ~SDHC_NIS_ERR; + } + /* Quirk for Raspberry Pi: pending card insert interrupt + * appears when first enabled after power on */ + if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) { + assert(s->pending_insert_quirk); + s->norintsts |= SDHC_NIS_INSERT; + s->pending_insert_state = false; + } + sdhci_update_irq(s); + break; + case SDHC_NORINTSIGEN: + MASKED_WRITE(s->norintsigen, mask, value); + MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16); + sdhci_update_irq(s); + break; + case SDHC_ADMAERR: + MASKED_WRITE(s->admaerr, mask, value); + break; + case SDHC_ADMASYSADDR: + s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL | + (uint64_t)mask)) | (uint64_t)value; + break; + case SDHC_ADMASYSADDR + 4: + s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL | + ((uint64_t)mask << 32))) | ((uint64_t)value << 32); + break; + case SDHC_FEAER: + s->acmd12errsts |= value; + s->errintsts |= (value >> 16) & s->errintstsen; + if (s->acmd12errsts) { + s->errintsts |= SDHC_EIS_CMD12ERR; + } + if (s->errintsts) { + s->norintsts |= SDHC_NIS_ERR; + } + sdhci_update_irq(s); + break; + case SDHC_ACMD12ERRSTS: + MASKED_WRITE(s->acmd12errsts, mask, value & UINT16_MAX); + if (s->uhs_mode >= UHS_I) { + MASKED_WRITE(s->hostctl2, mask >> 16, value >> 16); + + if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, V18_ENA)) { + sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_1_8V); + } else { + sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_3_3V); + } + } + break; + + case SDHC_CAPAB: + case SDHC_CAPAB + 4: + case SDHC_MAXCURR: + case SDHC_MAXCURR + 4: + qemu_log_mask(LOG_GUEST_ERROR, "SDHC wr_%ub @0x%02" HWADDR_PRIx + " <- 0x%08x read-only\n", size, offset, value >> shift); + break; + + default: + qemu_log_mask(LOG_UNIMP, "SDHC wr_%ub @0x%02" HWADDR_PRIx " <- 0x%08x " + "not implemented\n", size, offset, value >> shift); + break; + } + trace_sdhci_access("wr", size << 3, offset, "<-", + value >> shift, value >> shift); +} + +static const MemoryRegionOps sdhci_mmio_ops = { + .read = sdhci_read, + .write = sdhci_write, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + .unaligned = false + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp) +{ + ERRP_GUARD(); + + switch (s->sd_spec_version) { + case 2 ... 3: + break; + default: + error_setg(errp, "Only Spec v2/v3 are supported"); + return; + } + s->version = (SDHC_HCVER_VENDOR << 8) | (s->sd_spec_version - 1); + + sdhci_check_capareg(s, errp); + if (*errp) { + return; + } +} + +/* --- qdev common --- */ + +void sdhci_initfn(SDHCIState *s) +{ + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); + + s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s); + s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s); + + s->io_ops = &sdhci_mmio_ops; +} + +void sdhci_uninitfn(SDHCIState *s) +{ + timer_free(s->insert_timer); + timer_free(s->transfer_timer); + + g_free(s->fifo_buffer); + s->fifo_buffer = NULL; +} + +void sdhci_common_realize(SDHCIState *s, Error **errp) +{ + ERRP_GUARD(); + + sdhci_init_readonly_registers(s, errp); + if (*errp) { + return; + } + s->buf_maxsz = sdhci_get_fifolen(s); + s->fifo_buffer = g_malloc0(s->buf_maxsz); + + memory_region_init_io(&s->iomem, OBJECT(s), s->io_ops, s, "sdhci", + SDHC_REGISTERS_MAP_SIZE); +} + +void sdhci_common_unrealize(SDHCIState *s) +{ + /* This function is expected to be called only once for each class: + * - SysBus: via DeviceClass->unrealize(), + * - PCI: via PCIDeviceClass->exit(). + * However to avoid double-free and/or use-after-free we still nullify + * this variable (better safe than sorry!). */ + g_free(s->fifo_buffer); + s->fifo_buffer = NULL; +} + +static bool sdhci_pending_insert_vmstate_needed(void *opaque) +{ + SDHCIState *s = opaque; + + return s->pending_insert_state; +} + +static const VMStateDescription sdhci_pending_insert_vmstate = { + .name = "sdhci/pending-insert", + .version_id = 1, + .minimum_version_id = 1, + .needed = sdhci_pending_insert_vmstate_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(pending_insert_state, SDHCIState), + VMSTATE_END_OF_LIST() + }, +}; + +const VMStateDescription sdhci_vmstate = { + .name = "sdhci", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(sdmasysad, SDHCIState), + VMSTATE_UINT16(blksize, SDHCIState), + VMSTATE_UINT16(blkcnt, SDHCIState), + VMSTATE_UINT32(argument, SDHCIState), + VMSTATE_UINT16(trnmod, SDHCIState), + VMSTATE_UINT16(cmdreg, SDHCIState), + VMSTATE_UINT32_ARRAY(rspreg, SDHCIState, 4), + VMSTATE_UINT32(prnsts, SDHCIState), + VMSTATE_UINT8(hostctl1, SDHCIState), + VMSTATE_UINT8(pwrcon, SDHCIState), + VMSTATE_UINT8(blkgap, SDHCIState), + VMSTATE_UINT8(wakcon, SDHCIState), + VMSTATE_UINT16(clkcon, SDHCIState), + VMSTATE_UINT8(timeoutcon, SDHCIState), + VMSTATE_UINT8(admaerr, SDHCIState), + VMSTATE_UINT16(norintsts, SDHCIState), + VMSTATE_UINT16(errintsts, SDHCIState), + VMSTATE_UINT16(norintstsen, SDHCIState), + VMSTATE_UINT16(errintstsen, SDHCIState), + VMSTATE_UINT16(norintsigen, SDHCIState), + VMSTATE_UINT16(errintsigen, SDHCIState), + VMSTATE_UINT16(acmd12errsts, SDHCIState), + VMSTATE_UINT16(data_count, SDHCIState), + VMSTATE_UINT64(admasysaddr, SDHCIState), + VMSTATE_UINT8(stopped_state, SDHCIState), + VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, buf_maxsz), + VMSTATE_TIMER_PTR(insert_timer, SDHCIState), + VMSTATE_TIMER_PTR(transfer_timer, SDHCIState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &sdhci_pending_insert_vmstate, + NULL + }, +}; + +void sdhci_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->vmsd = &sdhci_vmstate; + dc->reset = sdhci_poweron_reset; +} + +/* --- qdev SysBus --- */ + +static Property sdhci_sysbus_properties[] = { + DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState), + DEFINE_PROP_BOOL("pending-insert-quirk", SDHCIState, pending_insert_quirk, + false), + DEFINE_PROP_LINK("dma", SDHCIState, + dma_mr, TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sdhci_sysbus_init(Object *obj) +{ + SDHCIState *s = SYSBUS_SDHCI(obj); + + sdhci_initfn(s); +} + +static void sdhci_sysbus_finalize(Object *obj) +{ + SDHCIState *s = SYSBUS_SDHCI(obj); + + if (s->dma_mr) { + object_unparent(OBJECT(s->dma_mr)); + } + + sdhci_uninitfn(s); +} + +static void sdhci_sysbus_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + SDHCIState *s = SYSBUS_SDHCI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + sdhci_common_realize(s, errp); + if (*errp) { + return; + } + + if (s->dma_mr) { + s->dma_as = &s->sysbus_dma_as; + address_space_init(s->dma_as, s->dma_mr, "sdhci-dma"); + } else { + /* use system_memory() if property "dma" not set */ + s->dma_as = &address_space_memory; + } + + sysbus_init_irq(sbd, &s->irq); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static void sdhci_sysbus_unrealize(DeviceState *dev) +{ + SDHCIState *s = SYSBUS_SDHCI(dev); + + sdhci_common_unrealize(s); + + if (s->dma_mr) { + address_space_destroy(s->dma_as); + } +} + +static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, sdhci_sysbus_properties); + dc->realize = sdhci_sysbus_realize; + dc->unrealize = sdhci_sysbus_unrealize; + + sdhci_common_class_init(klass, data); +} + +static const TypeInfo sdhci_sysbus_info = { + .name = TYPE_SYSBUS_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SDHCIState), + .instance_init = sdhci_sysbus_init, + .instance_finalize = sdhci_sysbus_finalize, + .class_init = sdhci_sysbus_class_init, +}; + +/* --- qdev bus master --- */ + +static void sdhci_bus_class_init(ObjectClass *klass, void *data) +{ + SDBusClass *sbc = SD_BUS_CLASS(klass); + + sbc->set_inserted = sdhci_set_inserted; + sbc->set_readonly = sdhci_set_readonly; +} + +static const TypeInfo sdhci_bus_info = { + .name = TYPE_SDHCI_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), + .class_init = sdhci_bus_class_init, +}; + +/* --- qdev i.MX eSDHC --- */ + +static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size) +{ + SDHCIState *s = SYSBUS_SDHCI(opaque); + uint32_t ret; + uint16_t hostctl1; + + switch (offset) { + default: + return sdhci_read(opaque, offset, size); + + case SDHC_HOSTCTL: + /* + * For a detailed explanation on the following bit + * manipulation code see comments in a similar part of + * usdhc_write() + */ + hostctl1 = SDHC_DMA_TYPE(s->hostctl1) << (8 - 3); + + if (s->hostctl1 & SDHC_CTRL_8BITBUS) { + hostctl1 |= ESDHC_CTRL_8BITBUS; + } + + if (s->hostctl1 & SDHC_CTRL_4BITBUS) { + hostctl1 |= ESDHC_CTRL_4BITBUS; + } + + ret = hostctl1; + ret |= (uint32_t)s->blkgap << 16; + ret |= (uint32_t)s->wakcon << 24; + + break; + + case SDHC_PRNSTS: + /* Add SDSTB (SD Clock Stable) bit to PRNSTS */ + ret = sdhci_read(opaque, offset, size) & ~ESDHC_PRNSTS_SDSTB; + if (s->clkcon & SDHC_CLOCK_INT_STABLE) { + ret |= ESDHC_PRNSTS_SDSTB; + } + break; + + case ESDHC_VENDOR_SPEC: + ret = s->vendor_spec; + break; + case ESDHC_DLL_CTRL: + case ESDHC_TUNE_CTRL_STATUS: + case ESDHC_UNDOCUMENTED_REG27: + case ESDHC_TUNING_CTRL: + case ESDHC_MIX_CTRL: + case ESDHC_WTMK_LVL: + ret = 0; + break; + } + + return ret; +} + +static void +usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) +{ + SDHCIState *s = SYSBUS_SDHCI(opaque); + uint8_t hostctl1; + uint32_t value = (uint32_t)val; + + switch (offset) { + case ESDHC_DLL_CTRL: + case ESDHC_TUNE_CTRL_STATUS: + case ESDHC_UNDOCUMENTED_REG27: + case ESDHC_TUNING_CTRL: + case ESDHC_WTMK_LVL: + break; + + case ESDHC_VENDOR_SPEC: + s->vendor_spec = value; + switch (s->vendor) { + case SDHCI_VENDOR_IMX: + if (value & ESDHC_IMX_FRC_SDCLK_ON) { + s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF; + } else { + s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF; + } + break; + default: + break; + } + break; + + case SDHC_HOSTCTL: + /* + * Here's What ESDHCI has at offset 0x28 (SDHC_HOSTCTL) + * + * 7 6 5 4 3 2 1 0 + * |-----------+--------+--------+-----------+----------+---------| + * | Card | Card | Endian | DATA3 | Data | Led | + * | Detect | Detect | Mode | as Card | Transfer | Control | + * | Signal | Test | | Detection | Width | | + * | Selection | Level | | Pin | | | + * |-----------+--------+--------+-----------+----------+---------| + * + * and 0x29 + * + * 15 10 9 8 + * |----------+------| + * | Reserved | DMA | + * | | Sel. | + * | | | + * |----------+------| + * + * and here's what SDCHI spec expects those offsets to be: + * + * 0x28 (Host Control Register) + * + * 7 6 5 4 3 2 1 0 + * |--------+--------+----------+------+--------+----------+---------| + * | Card | Card | Extended | DMA | High | Data | LED | + * | Detect | Detect | Data | Sel. | Speed | Transfer | Control | + * | Signal | Test | Transfer | | Enable | Width | | + * | Sel. | Level | Width | | | | | + * |--------+--------+----------+------+--------+----------+---------| + * + * and 0x29 (Power Control Register) + * + * |----------------------------------| + * | Power Control Register | + * | | + * | Description omitted, | + * | since it has no analog in ESDHCI | + * | | + * |----------------------------------| + * + * Since offsets 0x2A and 0x2B should be compatible between + * both IP specs we only need to reconcile least 16-bit of the + * word we've been given. + */ + + /* + * First, save bits 7 6 and 0 since they are identical + */ + hostctl1 = value & (SDHC_CTRL_LED | + SDHC_CTRL_CDTEST_INS | + SDHC_CTRL_CDTEST_EN); + /* + * Second, split "Data Transfer Width" from bits 2 and 1 in to + * bits 5 and 1 + */ + if (value & ESDHC_CTRL_8BITBUS) { + hostctl1 |= SDHC_CTRL_8BITBUS; + } + + if (value & ESDHC_CTRL_4BITBUS) { + hostctl1 |= ESDHC_CTRL_4BITBUS; + } + + /* + * Third, move DMA select from bits 9 and 8 to bits 4 and 3 + */ + hostctl1 |= SDHC_DMA_TYPE(value >> (8 - 3)); + + /* + * Now place the corrected value into low 16-bit of the value + * we are going to give standard SDHCI write function + * + * NOTE: This transformation should be the inverse of what can + * be found in drivers/mmc/host/sdhci-esdhc-imx.c in Linux + * kernel + */ + value &= ~UINT16_MAX; + value |= hostctl1; + value |= (uint16_t)s->pwrcon << 8; + + sdhci_write(opaque, offset, value, size); + break; + + case ESDHC_MIX_CTRL: + /* + * So, when SD/MMC stack in Linux tries to write to "Transfer + * Mode Register", ESDHC i.MX quirk code will translate it + * into a write to ESDHC_MIX_CTRL, so we do the opposite in + * order to get where we started + * + * Note that Auto CMD23 Enable bit is located in a wrong place + * on i.MX, but since it is not used by QEMU we do not care. + * + * We don't want to call sdhci_write(.., SDHC_TRNMOD, ...) + * here becuase it will result in a call to + * sdhci_send_command(s) which we don't want. + * + */ + s->trnmod = value & UINT16_MAX; + break; + case SDHC_TRNMOD: + /* + * Similar to above, but this time a write to "Command + * Register" will be translated into a 4-byte write to + * "Transfer Mode register" where lower 16-bit of value would + * be set to zero. So what we do is fill those bits with + * cached value from s->trnmod and let the SDHCI + * infrastructure handle the rest + */ + sdhci_write(opaque, offset, val | s->trnmod, size); + break; + case SDHC_BLKSIZE: + /* + * ESDHCI does not implement "Host SDMA Buffer Boundary", and + * Linux driver will try to zero this field out which will + * break the rest of SDHCI emulation. + * + * Linux defaults to maximum possible setting (512K boundary) + * and it seems to be the only option that i.MX IP implements, + * so we artificially set it to that value. + */ + val |= 0x7 << 12; + /* FALLTHROUGH */ + default: + sdhci_write(opaque, offset, val, size); + break; + } +} + +static const MemoryRegionOps usdhc_mmio_ops = { + .read = usdhc_read, + .write = usdhc_write, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + .unaligned = false + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void imx_usdhc_init(Object *obj) +{ + SDHCIState *s = SYSBUS_SDHCI(obj); + + s->io_ops = &usdhc_mmio_ops; + s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ; +} + +static const TypeInfo imx_usdhc_info = { + .name = TYPE_IMX_USDHC, + .parent = TYPE_SYSBUS_SDHCI, + .instance_init = imx_usdhc_init, +}; + +/* --- qdev Samsung s3c --- */ + +#define S3C_SDHCI_CONTROL2 0x80 +#define S3C_SDHCI_CONTROL3 0x84 +#define S3C_SDHCI_CONTROL4 0x8c + +static uint64_t sdhci_s3c_read(void *opaque, hwaddr offset, unsigned size) +{ + uint64_t ret; + + switch (offset) { + case S3C_SDHCI_CONTROL2: + case S3C_SDHCI_CONTROL3: + case S3C_SDHCI_CONTROL4: + /* ignore */ + ret = 0; + break; + default: + ret = sdhci_read(opaque, offset, size); + break; + } + + return ret; +} + +static void sdhci_s3c_write(void *opaque, hwaddr offset, uint64_t val, + unsigned size) +{ + switch (offset) { + case S3C_SDHCI_CONTROL2: + case S3C_SDHCI_CONTROL3: + case S3C_SDHCI_CONTROL4: + /* ignore */ + break; + default: + sdhci_write(opaque, offset, val, size); + break; + } +} + +static const MemoryRegionOps sdhci_s3c_mmio_ops = { + .read = sdhci_s3c_read, + .write = sdhci_s3c_write, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + .unaligned = false + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void sdhci_s3c_init(Object *obj) +{ + SDHCIState *s = SYSBUS_SDHCI(obj); + + s->io_ops = &sdhci_s3c_mmio_ops; +} + +static const TypeInfo sdhci_s3c_info = { + .name = TYPE_S3C_SDHCI , + .parent = TYPE_SYSBUS_SDHCI, + .instance_init = sdhci_s3c_init, +}; + +static void sdhci_register_types(void) +{ + type_register_static(&sdhci_sysbus_info); + type_register_static(&sdhci_bus_info); + type_register_static(&imx_usdhc_info); + type_register_static(&sdhci_s3c_info); +} + +type_init(sdhci_register_types) diff --git a/hw/sd/sdmmc-internal.c b/hw/sd/sdmmc-internal.c new file mode 100644 index 000000000..2053def3f --- /dev/null +++ b/hw/sd/sdmmc-internal.c @@ -0,0 +1,72 @@ +/* + * SD/MMC cards common helpers + * + * Copyright (c) 2018 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "sdmmc-internal.h" + +const char *sd_cmd_name(uint8_t cmd) +{ + static const char *cmd_abbrev[SDMMC_CMD_MAX] = { + [0] = "GO_IDLE_STATE", + [2] = "ALL_SEND_CID", [3] = "SEND_RELATIVE_ADDR", + [4] = "SET_DSR", [5] = "IO_SEND_OP_COND", + [6] = "SWITCH_FUNC", [7] = "SELECT/DESELECT_CARD", + [8] = "SEND_IF_COND", [9] = "SEND_CSD", + [10] = "SEND_CID", [11] = "VOLTAGE_SWITCH", + [12] = "STOP_TRANSMISSION", [13] = "SEND_STATUS", + [15] = "GO_INACTIVE_STATE", + [16] = "SET_BLOCKLEN", [17] = "READ_SINGLE_BLOCK", + [18] = "READ_MULTIPLE_BLOCK", [19] = "SEND_TUNING_BLOCK", + [20] = "SPEED_CLASS_CONTROL", [21] = "DPS_spec", + [23] = "SET_BLOCK_COUNT", + [24] = "WRITE_BLOCK", [25] = "WRITE_MULTIPLE_BLOCK", + [26] = "MANUF_RSVD", [27] = "PROGRAM_CSD", + [28] = "SET_WRITE_PROT", [29] = "CLR_WRITE_PROT", + [30] = "SEND_WRITE_PROT", + [32] = "ERASE_WR_BLK_START", [33] = "ERASE_WR_BLK_END", + [34] = "SW_FUNC_RSVD", [35] = "SW_FUNC_RSVD", + [36] = "SW_FUNC_RSVD", [37] = "SW_FUNC_RSVD", + [38] = "ERASE", + [40] = "DPS_spec", + [42] = "LOCK_UNLOCK", [43] = "Q_MANAGEMENT", + [44] = "Q_TASK_INFO_A", [45] = "Q_TASK_INFO_B", + [46] = "Q_RD_TASK", [47] = "Q_WR_TASK", + [48] = "READ_EXTR_SINGLE", [49] = "WRITE_EXTR_SINGLE", + [50] = "SW_FUNC_RSVD", + [52] = "IO_RW_DIRECT", [53] = "IO_RW_EXTENDED", + [54] = "SDIO_RSVD", [55] = "APP_CMD", + [56] = "GEN_CMD", [57] = "SW_FUNC_RSVD", + [58] = "READ_EXTR_MULTI", [59] = "WRITE_EXTR_MULTI", + [60] = "MANUF_RSVD", [61] = "MANUF_RSVD", + [62] = "MANUF_RSVD", [63] = "MANUF_RSVD", + }; + return cmd_abbrev[cmd] ? cmd_abbrev[cmd] : "UNKNOWN_CMD"; +} + +const char *sd_acmd_name(uint8_t cmd) +{ + static const char *acmd_abbrev[SDMMC_CMD_MAX] = { + [6] = "SET_BUS_WIDTH", + [13] = "SD_STATUS", + [14] = "DPS_spec", [15] = "DPS_spec", + [16] = "DPS_spec", + [18] = "SECU_spec", + [22] = "SEND_NUM_WR_BLOCKS", [23] = "SET_WR_BLK_ERASE_COUNT", + [41] = "SD_SEND_OP_COND", + [42] = "SET_CLR_CARD_DETECT", + [51] = "SEND_SCR", + [52] = "SECU_spec", [53] = "SECU_spec", + [54] = "SECU_spec", + [56] = "SECU_spec", [57] = "SECU_spec", + [58] = "SECU_spec", [59] = "SECU_spec", + }; + + return acmd_abbrev[cmd] ? acmd_abbrev[cmd] : "UNKNOWN_ACMD"; +} diff --git a/hw/sd/sdmmc-internal.h b/hw/sd/sdmmc-internal.h new file mode 100644 index 000000000..d8bf17d20 --- /dev/null +++ b/hw/sd/sdmmc-internal.h @@ -0,0 +1,40 @@ +/* + * SD/MMC cards common + * + * Copyright (c) 2018 Philippe Mathieu-Daudé + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef SDMMC_INTERNAL_H +#define SDMMC_INTERNAL_H + +#define SDMMC_CMD_MAX 64 + +/** + * sd_cmd_name: + * @cmd: A SD "normal" command, up to SDMMC_CMD_MAX. + * + * Returns a human-readable name describing the command. + * The return value is always a static string which does not need + * to be freed after use. + * + * Returns: The command name of @cmd or "UNKNOWN_CMD". + */ +const char *sd_cmd_name(uint8_t cmd); + +/** + * sd_acmd_name: + * @cmd: A SD "Application-Specific" command, up to SDMMC_CMD_MAX. + * + * Returns a human-readable name describing the application command. + * The return value is always a static string which does not need + * to be freed after use. + * + * Returns: The application command name of @cmd or "UNKNOWN_ACMD". + */ +const char *sd_acmd_name(uint8_t cmd); + +#endif diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c new file mode 100644 index 000000000..e60854eee --- /dev/null +++ b/hw/sd/ssi-sd.c @@ -0,0 +1,445 @@ +/* + * SSI to SD card adapter. + * + * Copyright (c) 2007-2009 CodeSourcery. + * Written by Paul Brook + * + * Copyright (c) 2021 Wind River Systems, Inc. + * Improved by Bin Meng + * + * Validated with U-Boot v2021.01 and Linux v5.10 mmc_spi driver + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "sysemu/blockdev.h" +#include "hw/ssi/ssi.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/sd/sd.h" +#include "qapi/error.h" +#include "qemu/crc-ccitt.h" +#include "qemu/module.h" +#include "qom/object.h" + +//#define DEBUG_SSI_SD 1 + +#ifdef DEBUG_SSI_SD +#define DPRINTF(fmt, ...) \ +do { printf("ssi_sd: " fmt , ## __VA_ARGS__); } while (0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "ssi_sd: error: " fmt , ## __VA_ARGS__); exit(1);} while (0) +#else +#define DPRINTF(fmt, ...) do {} while(0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "ssi_sd: error: " fmt , ## __VA_ARGS__);} while (0) +#endif + +typedef enum { + SSI_SD_CMD = 0, + SSI_SD_CMDARG, + SSI_SD_PREP_RESP, + SSI_SD_RESPONSE, + SSI_SD_PREP_DATA, + SSI_SD_DATA_START, + SSI_SD_DATA_READ, + SSI_SD_DATA_CRC16, + SSI_SD_DATA_WRITE, + SSI_SD_SKIP_CRC16, +} ssi_sd_mode; + +struct ssi_sd_state { + SSIPeripheral ssidev; + uint32_t mode; + int cmd; + uint8_t cmdarg[4]; + uint8_t response[5]; + uint16_t crc16; + int32_t read_bytes; + int32_t write_bytes; + int32_t arglen; + int32_t response_pos; + int32_t stopping; + SDBus sdbus; +}; + +#define TYPE_SSI_SD "ssi-sd" +OBJECT_DECLARE_SIMPLE_TYPE(ssi_sd_state, SSI_SD) + +/* State word bits. */ +#define SSI_SDR_LOCKED 0x0001 +#define SSI_SDR_WP_ERASE 0x0002 +#define SSI_SDR_ERROR 0x0004 +#define SSI_SDR_CC_ERROR 0x0008 +#define SSI_SDR_ECC_FAILED 0x0010 +#define SSI_SDR_WP_VIOLATION 0x0020 +#define SSI_SDR_ERASE_PARAM 0x0040 +#define SSI_SDR_OUT_OF_RANGE 0x0080 +#define SSI_SDR_IDLE 0x0100 +#define SSI_SDR_ERASE_RESET 0x0200 +#define SSI_SDR_ILLEGAL_COMMAND 0x0400 +#define SSI_SDR_COM_CRC_ERROR 0x0800 +#define SSI_SDR_ERASE_SEQ_ERROR 0x1000 +#define SSI_SDR_ADDRESS_ERROR 0x2000 +#define SSI_SDR_PARAMETER_ERROR 0x4000 + +/* multiple block write */ +#define SSI_TOKEN_MULTI_WRITE 0xfc +/* terminate multiple block write */ +#define SSI_TOKEN_STOP_TRAN 0xfd +/* single block read/write, multiple block read */ +#define SSI_TOKEN_SINGLE 0xfe + +/* dummy value - don't care */ +#define SSI_DUMMY 0xff + +/* data accepted */ +#define DATA_RESPONSE_ACCEPTED 0x05 + +static uint32_t ssi_sd_transfer(SSIPeripheral *dev, uint32_t val) +{ + ssi_sd_state *s = SSI_SD(dev); + SDRequest request; + uint8_t longresp[16]; + + /* + * Special case: allow CMD12 (STOP TRANSMISSION) while reading data. + * + * See "Physical Layer Specification Version 8.00" chapter 7.5.2.2, + * to avoid conflict between CMD12 response and next data block, + * timing of CMD12 should be controlled as follows: + * + * - CMD12 issued at the timing that end bit of CMD12 and end bit of + * data block is overlapped + * - CMD12 issued after one clock cycle after host receives a token + * (either Start Block token or Data Error token) + * + * We need to catch CMD12 in all of the data read states. + */ + if (s->mode >= SSI_SD_PREP_DATA && s->mode <= SSI_SD_DATA_CRC16) { + if (val == 0x4c) { + s->mode = SSI_SD_CMD; + /* There must be at least one byte delay before the card responds */ + s->stopping = 1; + } + } + + switch (s->mode) { + case SSI_SD_CMD: + switch (val) { + case SSI_DUMMY: + DPRINTF("NULL command\n"); + return SSI_DUMMY; + break; + case SSI_TOKEN_SINGLE: + case SSI_TOKEN_MULTI_WRITE: + DPRINTF("Start write block\n"); + s->mode = SSI_SD_DATA_WRITE; + return SSI_DUMMY; + case SSI_TOKEN_STOP_TRAN: + DPRINTF("Stop multiple write\n"); + + /* manually issue cmd12 to stop the transfer */ + request.cmd = 12; + request.arg = 0; + s->arglen = sdbus_do_command(&s->sdbus, &request, longresp); + if (s->arglen <= 0) { + s->arglen = 1; + /* a zero value indicates the card is busy */ + s->response[0] = 0; + DPRINTF("SD card busy\n"); + } else { + s->arglen = 1; + /* a non-zero value indicates the card is ready */ + s->response[0] = SSI_DUMMY; + } + + return SSI_DUMMY; + } + + s->cmd = val & 0x3f; + s->mode = SSI_SD_CMDARG; + s->arglen = 0; + return SSI_DUMMY; + case SSI_SD_CMDARG: + if (s->arglen == 4) { + /* FIXME: Check CRC. */ + request.cmd = s->cmd; + request.arg = ldl_be_p(s->cmdarg); + DPRINTF("CMD%d arg 0x%08x\n", s->cmd, request.arg); + s->arglen = sdbus_do_command(&s->sdbus, &request, longresp); + if (s->arglen <= 0) { + s->arglen = 1; + s->response[0] = 4; + DPRINTF("SD command failed\n"); + } else if (s->cmd == 8 || s->cmd == 58) { + /* CMD8/CMD58 returns R3/R7 response */ + DPRINTF("Returned R3/R7\n"); + s->arglen = 5; + s->response[0] = 1; + memcpy(&s->response[1], longresp, 4); + } else if (s->arglen != 4) { + BADF("Unexpected response to cmd %d\n", s->cmd); + /* Illegal command is about as near as we can get. */ + s->arglen = 1; + s->response[0] = 4; + } else { + /* All other commands return status. */ + uint32_t cardstatus; + uint16_t status; + /* CMD13 returns a 2-byte statuse work. Other commands + only return the first byte. */ + s->arglen = (s->cmd == 13) ? 2 : 1; + + /* handle R1b */ + if (s->cmd == 28 || s->cmd == 29 || s->cmd == 38) { + s->stopping = 1; + } + + cardstatus = ldl_be_p(longresp); + status = 0; + if (((cardstatus >> 9) & 0xf) < 4) + status |= SSI_SDR_IDLE; + if (cardstatus & ERASE_RESET) + status |= SSI_SDR_ERASE_RESET; + if (cardstatus & ILLEGAL_COMMAND) + status |= SSI_SDR_ILLEGAL_COMMAND; + if (cardstatus & COM_CRC_ERROR) + status |= SSI_SDR_COM_CRC_ERROR; + if (cardstatus & ERASE_SEQ_ERROR) + status |= SSI_SDR_ERASE_SEQ_ERROR; + if (cardstatus & ADDRESS_ERROR) + status |= SSI_SDR_ADDRESS_ERROR; + if (cardstatus & CARD_IS_LOCKED) + status |= SSI_SDR_LOCKED; + if (cardstatus & (LOCK_UNLOCK_FAILED | WP_ERASE_SKIP)) + status |= SSI_SDR_WP_ERASE; + if (cardstatus & SD_ERROR) + status |= SSI_SDR_ERROR; + if (cardstatus & CC_ERROR) + status |= SSI_SDR_CC_ERROR; + if (cardstatus & CARD_ECC_FAILED) + status |= SSI_SDR_ECC_FAILED; + if (cardstatus & WP_VIOLATION) + status |= SSI_SDR_WP_VIOLATION; + if (cardstatus & ERASE_PARAM) + status |= SSI_SDR_ERASE_PARAM; + if (cardstatus & (OUT_OF_RANGE | CID_CSD_OVERWRITE)) + status |= SSI_SDR_OUT_OF_RANGE; + /* ??? Don't know what Parameter Error really means, so + assume it's set if the second byte is nonzero. */ + if (status & 0xff) + status |= SSI_SDR_PARAMETER_ERROR; + s->response[0] = status >> 8; + s->response[1] = status; + DPRINTF("Card status 0x%02x\n", status); + } + s->mode = SSI_SD_PREP_RESP; + s->response_pos = 0; + } else { + s->cmdarg[s->arglen++] = val; + } + return SSI_DUMMY; + case SSI_SD_PREP_RESP: + DPRINTF("Prepare card response (Ncr)\n"); + s->mode = SSI_SD_RESPONSE; + return SSI_DUMMY; + case SSI_SD_RESPONSE: + if (s->response_pos < s->arglen) { + DPRINTF("Response 0x%02x\n", s->response[s->response_pos]); + return s->response[s->response_pos++]; + } + if (s->stopping) { + s->stopping = 0; + s->mode = SSI_SD_CMD; + return SSI_DUMMY; + } + if (sdbus_data_ready(&s->sdbus)) { + DPRINTF("Data read\n"); + s->mode = SSI_SD_DATA_START; + } else { + DPRINTF("End of command\n"); + s->mode = SSI_SD_CMD; + } + return SSI_DUMMY; + case SSI_SD_PREP_DATA: + DPRINTF("Prepare data block (Nac)\n"); + s->mode = SSI_SD_DATA_START; + return SSI_DUMMY; + case SSI_SD_DATA_START: + DPRINTF("Start read block\n"); + s->mode = SSI_SD_DATA_READ; + s->response_pos = 0; + return SSI_TOKEN_SINGLE; + case SSI_SD_DATA_READ: + val = sdbus_read_byte(&s->sdbus); + s->read_bytes++; + s->crc16 = crc_ccitt_false(s->crc16, (uint8_t *)&val, 1); + if (!sdbus_data_ready(&s->sdbus) || s->read_bytes == 512) { + DPRINTF("Data read end\n"); + s->mode = SSI_SD_DATA_CRC16; + } + return val; + case SSI_SD_DATA_CRC16: + val = (s->crc16 & 0xff00) >> 8; + s->crc16 <<= 8; + s->response_pos++; + if (s->response_pos == 2) { + DPRINTF("CRC16 read end\n"); + if (s->read_bytes == 512 && s->cmd != 17) { + s->mode = SSI_SD_PREP_DATA; + } else { + s->mode = SSI_SD_CMD; + } + s->read_bytes = 0; + s->response_pos = 0; + } + return val; + case SSI_SD_DATA_WRITE: + sdbus_write_byte(&s->sdbus, val); + s->write_bytes++; + if (!sdbus_receive_ready(&s->sdbus) || s->write_bytes == 512) { + DPRINTF("Data write end\n"); + s->mode = SSI_SD_SKIP_CRC16; + s->response_pos = 0; + } + return val; + case SSI_SD_SKIP_CRC16: + /* we don't verify the crc16 */ + s->response_pos++; + if (s->response_pos == 2) { + DPRINTF("CRC16 receive end\n"); + s->mode = SSI_SD_RESPONSE; + s->write_bytes = 0; + s->arglen = 1; + s->response[0] = DATA_RESPONSE_ACCEPTED; + s->response_pos = 0; + } + return SSI_DUMMY; + } + /* Should never happen. */ + return SSI_DUMMY; +} + +static int ssi_sd_post_load(void *opaque, int version_id) +{ + ssi_sd_state *s = (ssi_sd_state *)opaque; + + if (s->mode > SSI_SD_SKIP_CRC16) { + return -EINVAL; + } + if (s->mode == SSI_SD_CMDARG && + (s->arglen < 0 || s->arglen >= ARRAY_SIZE(s->cmdarg))) { + return -EINVAL; + } + if (s->mode == SSI_SD_RESPONSE && + (s->response_pos < 0 || s->response_pos >= ARRAY_SIZE(s->response) || + (!s->stopping && s->arglen > ARRAY_SIZE(s->response)))) { + return -EINVAL; + } + + return 0; +} + +static const VMStateDescription vmstate_ssi_sd = { + .name = "ssi_sd", + .version_id = 7, + .minimum_version_id = 7, + .post_load = ssi_sd_post_load, + .fields = (VMStateField []) { + VMSTATE_UINT32(mode, ssi_sd_state), + VMSTATE_INT32(cmd, ssi_sd_state), + VMSTATE_UINT8_ARRAY(cmdarg, ssi_sd_state, 4), + VMSTATE_UINT8_ARRAY(response, ssi_sd_state, 5), + VMSTATE_UINT16(crc16, ssi_sd_state), + VMSTATE_INT32(read_bytes, ssi_sd_state), + VMSTATE_INT32(write_bytes, ssi_sd_state), + VMSTATE_INT32(arglen, ssi_sd_state), + VMSTATE_INT32(response_pos, ssi_sd_state), + VMSTATE_INT32(stopping, ssi_sd_state), + VMSTATE_SSI_PERIPHERAL(ssidev, ssi_sd_state), + VMSTATE_END_OF_LIST() + } +}; + +static void ssi_sd_realize(SSIPeripheral *d, Error **errp) +{ + ERRP_GUARD(); + ssi_sd_state *s = SSI_SD(d); + DeviceState *carddev; + DriveInfo *dinfo; + + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(d), "sd-bus"); + + /* Create and plug in the sd card */ + /* FIXME use a qdev drive property instead of drive_get_next() */ + dinfo = drive_get_next(IF_SD); + carddev = qdev_new(TYPE_SD_CARD); + if (dinfo) { + if (!qdev_prop_set_drive_err(carddev, "drive", + blk_by_legacy_dinfo(dinfo), errp)) { + goto fail; + } + } + + if (!object_property_set_bool(OBJECT(carddev), "spi", true, errp)) { + goto fail; + } + + if (!qdev_realize_and_unref(carddev, BUS(&s->sdbus), errp)) { + goto fail; + } + + return; + +fail: + error_prepend(errp, "failed to init SD card: "); +} + +static void ssi_sd_reset(DeviceState *dev) +{ + ssi_sd_state *s = SSI_SD(dev); + + s->mode = SSI_SD_CMD; + s->cmd = 0; + memset(s->cmdarg, 0, sizeof(s->cmdarg)); + memset(s->response, 0, sizeof(s->response)); + s->crc16 = 0; + s->read_bytes = 0; + s->write_bytes = 0; + s->arglen = 0; + s->response_pos = 0; + s->stopping = 0; +} + +static void ssi_sd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SSIPeripheralClass *k = SSI_PERIPHERAL_CLASS(klass); + + k->realize = ssi_sd_realize; + k->transfer = ssi_sd_transfer; + k->cs_polarity = SSI_CS_LOW; + dc->vmsd = &vmstate_ssi_sd; + dc->reset = ssi_sd_reset; + /* Reason: init() method uses drive_get_next() */ + dc->user_creatable = false; +} + +static const TypeInfo ssi_sd_info = { + .name = TYPE_SSI_SD, + .parent = TYPE_SSI_PERIPHERAL, + .instance_size = sizeof(ssi_sd_state), + .class_init = ssi_sd_class_init, +}; + +static void ssi_sd_register_types(void) +{ + type_register_static(&ssi_sd_info); +} + +type_init(ssi_sd_register_types) diff --git a/hw/sd/trace-events b/hw/sd/trace-events new file mode 100644 index 000000000..94a00557b --- /dev/null +++ b/hw/sd/trace-events @@ -0,0 +1,74 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# allwinner-sdhost.c +allwinner_sdhost_set_inserted(bool inserted) "inserted %u" +allwinner_sdhost_process_desc(uint64_t desc_addr, uint32_t desc_size, bool is_write, uint32_t max_bytes) "desc_addr 0x%" PRIx64 " desc_size %" PRIu32 " is_write %u max_bytes %" PRIu32 +allwinner_sdhost_read(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_sdhost_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %" PRIu32 +allwinner_sdhost_update_irq(uint32_t irq) "IRQ bits 0x%" PRIx32 + +# bcm2835_sdhost.c +bcm2835_sdhost_read(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +bcm2835_sdhost_write(uint64_t offset, uint64_t data, unsigned size) "offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +bcm2835_sdhost_edm_change(const char *why, uint32_t edm) "(%s) EDM now 0x%x" +bcm2835_sdhost_update_irq(uint32_t irq) "IRQ bits 0x%x" + +# core.c +sdbus_command(const char *bus_name, uint8_t cmd, uint32_t arg) "@%s CMD%02d arg 0x%08x" +sdbus_read(const char *bus_name, uint8_t value) "@%s value 0x%02x" +sdbus_write(const char *bus_name, uint8_t value) "@%s value 0x%02x" +sdbus_set_voltage(const char *bus_name, uint16_t millivolts) "@%s %u (mV)" +sdbus_get_dat_lines(const char *bus_name, uint8_t dat_lines) "@%s dat_lines: %u" +sdbus_get_cmd_line(const char *bus_name, bool cmd_line) "@%s cmd_line: %u" + +# sdhci.c +sdhci_set_inserted(const char *level) "card state changed: %s" +sdhci_send_command(uint8_t cmd, uint32_t arg) "CMD%02u ARG[0x%08x]" +sdhci_error(const char *msg) "%s" +sdhci_response4(uint32_t r0) "RSPREG[31..0]=0x%08x" +sdhci_response16(uint32_t r3, uint32_t r2, uint32_t r1, uint32_t r0) "RSPREG[127..96]=0x%08x, RSPREG[95..64]=0x%08x, RSPREG[63..32]=0x%08x, RSPREG[31..0]=0x%08x" +sdhci_end_transfer(uint8_t cmd, uint32_t arg) "Automatically issue CMD%02u 0x%08x" +sdhci_adma(const char *desc, uint32_t sysad) "%s: admasysaddr=0x%" PRIx32 +sdhci_adma_loop(uint64_t addr, uint16_t length, uint8_t attr) "addr=0x%08" PRIx64 ", len=%d, attr=0x%x" +sdhci_adma_transfer_completed(void) "" +sdhci_access(const char *access, unsigned int size, uint64_t offset, const char *dir, uint64_t val, uint64_t val2) "%s%u: addr[0x%04" PRIx64 "] %s 0x%08" PRIx64 " (%" PRIu64 ")" +sdhci_read_dataport(uint16_t data_count) "all %u bytes of data have been read from input buffer" +sdhci_write_dataport(uint16_t data_count) "write buffer filled with %u bytes of data" +sdhci_capareg(const char *desc, uint16_t val) "%s: %u" + +# sd.c +sdcard_normal_command(const char *proto, const char *cmd_desc, uint8_t cmd, uint32_t arg, const char *state) "%s %20s/ CMD%02d arg 0x%08x (state %s)" +sdcard_app_command(const char *proto, const char *acmd_desc, uint8_t acmd, uint32_t arg, const char *state) "%s %23s/ACMD%02d arg 0x%08x (state %s)" +sdcard_response(const char *rspdesc, int rsplen) "%s (sz:%d)" +sdcard_powerup(void) "" +sdcard_inquiry_cmd41(void) "" +sdcard_reset(void) "" +sdcard_set_blocklen(uint16_t length) "0x%03x" +sdcard_inserted(bool readonly) "read_only: %u" +sdcard_ejected(void) "" +sdcard_erase(uint32_t first, uint32_t last) "addr first 0x%" PRIx32" last 0x%" PRIx32 +sdcard_lock(void) "" +sdcard_unlock(void) "" +sdcard_read_block(uint64_t addr, uint32_t len) "addr 0x%" PRIx64 " size 0x%x" +sdcard_write_block(uint64_t addr, uint32_t len) "addr 0x%" PRIx64 " size 0x%x" +sdcard_write_data(const char *proto, const char *cmd_desc, uint8_t cmd, uint8_t value) "%s %20s/ CMD%02d value 0x%02x" +sdcard_read_data(const char *proto, const char *cmd_desc, uint8_t cmd, uint32_t length) "%s %20s/ CMD%02d len %" PRIu32 +sdcard_set_voltage(uint16_t millivolts) "%u mV" + +# pxa2xx_mmci.c +pxa2xx_mmci_read(uint8_t size, uint32_t addr, uint32_t value) "size %d addr 0x%02x value 0x%08x" +pxa2xx_mmci_write(uint8_t size, uint32_t addr, uint32_t value) "size %d addr 0x%02x value 0x%08x" + +# pl181.c +pl181_command_send(uint8_t cmd, uint32_t arg) "sending CMD%02d arg 0x%08" PRIx32 +pl181_command_sent(void) "command sent" +pl181_command_response_pending(void) "response received" +pl181_command_timeout(void) "command timeouted" +pl181_fifo_push(uint32_t data) "FIFO push 0x%08" PRIx32 +pl181_fifo_pop(uint32_t data) "FIFO pop 0x%08" PRIx32 +pl181_fifo_transfer_complete(void) "FIFO transfer complete" +pl181_data_engine_idle(void) "data engine idle" + +# aspeed_sdhci.c +aspeed_sdhci_read(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64 +aspeed_sdhci_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64 diff --git a/hw/sd/trace.h b/hw/sd/trace.h new file mode 100644 index 000000000..f3d0c5856 --- /dev/null +++ b/hw/sd/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_sd.h" diff --git a/hw/sensor/Kconfig b/hw/sensor/Kconfig new file mode 100644 index 000000000..9c8a049b0 --- /dev/null +++ b/hw/sensor/Kconfig @@ -0,0 +1,23 @@ +config TMP105 + bool + depends on I2C + +config TMP421 + bool + depends on I2C + +config DPS310 + bool + depends on I2C + +config EMC141X + bool + depends on I2C + +config ADM1272 + bool + depends on I2C + +config MAX34451 + bool + depends on I2C diff --git a/hw/sensor/adm1272.c b/hw/sensor/adm1272.c new file mode 100644 index 000000000..7310c769b --- /dev/null +++ b/hw/sensor/adm1272.c @@ -0,0 +1,543 @@ +/* + * Analog Devices ADM1272 High Voltage Positive Hot Swap Controller and Digital + * Power Monitor with PMBus + * + * Copyright 2021 Google LLC + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include +#include "hw/i2c/pmbus_device.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define TYPE_ADM1272 "adm1272" +#define ADM1272(obj) OBJECT_CHECK(ADM1272State, (obj), TYPE_ADM1272) + +#define ADM1272_RESTART_TIME 0xCC +#define ADM1272_MFR_PEAK_IOUT 0xD0 +#define ADM1272_MFR_PEAK_VIN 0xD1 +#define ADM1272_MFR_PEAK_VOUT 0xD2 +#define ADM1272_MFR_PMON_CONTROL 0xD3 +#define ADM1272_MFR_PMON_CONFIG 0xD4 +#define ADM1272_MFR_ALERT1_CONFIG 0xD5 +#define ADM1272_MFR_ALERT2_CONFIG 0xD6 +#define ADM1272_MFR_PEAK_TEMPERATURE 0xD7 +#define ADM1272_MFR_DEVICE_CONFIG 0xD8 +#define ADM1272_MFR_POWER_CYCLE 0xD9 +#define ADM1272_MFR_PEAK_PIN 0xDA +#define ADM1272_MFR_READ_PIN_EXT 0xDB +#define ADM1272_MFR_READ_EIN_EXT 0xDC + +#define ADM1272_HYSTERESIS_LOW 0xF2 +#define ADM1272_HYSTERESIS_HIGH 0xF3 +#define ADM1272_STATUS_HYSTERESIS 0xF4 +#define ADM1272_STATUS_GPIO 0xF5 +#define ADM1272_STRT_UP_IOUT_LIM 0xF6 + +/* Defaults */ +#define ADM1272_OPERATION_DEFAULT 0x80 +#define ADM1272_CAPABILITY_DEFAULT 0xB0 +#define ADM1272_CAPABILITY_NO_PEC 0x30 +#define ADM1272_DIRECT_MODE 0x40 +#define ADM1272_HIGH_LIMIT_DEFAULT 0x0FFF +#define ADM1272_PIN_OP_DEFAULT 0x7FFF +#define ADM1272_PMBUS_REVISION_DEFAULT 0x22 +#define ADM1272_MFR_ID_DEFAULT "ADI" +#define ADM1272_MODEL_DEFAULT "ADM1272-A1" +#define ADM1272_MFR_DEFAULT_REVISION "25" +#define ADM1272_DEFAULT_DATE "160301" +#define ADM1272_RESTART_TIME_DEFAULT 0x64 +#define ADM1272_PMON_CONTROL_DEFAULT 0x1 +#define ADM1272_PMON_CONFIG_DEFAULT 0x3F35 +#define ADM1272_DEVICE_CONFIG_DEFAULT 0x8 +#define ADM1272_HYSTERESIS_HIGH_DEFAULT 0xFFFF +#define ADM1272_STRT_UP_IOUT_LIM_DEFAULT 0x000F +#define ADM1272_VOLT_DEFAULT 12000 +#define ADM1272_IOUT_DEFAULT 25000 +#define ADM1272_PWR_DEFAULT 300 /* 12V 25A */ +#define ADM1272_SHUNT 300 /* micro-ohms */ +#define ADM1272_VOLTAGE_COEFF_DEFAULT 1 +#define ADM1272_CURRENT_COEFF_DEFAULT 3 +#define ADM1272_PWR_COEFF_DEFAULT 7 +#define ADM1272_IOUT_OFFSET 0x5000 +#define ADM1272_IOUT_OFFSET 0x5000 + + +typedef struct ADM1272State { + PMBusDevice parent; + + uint64_t ein_ext; + uint32_t pin_ext; + uint8_t restart_time; + + uint16_t peak_vin; + uint16_t peak_vout; + uint16_t peak_iout; + uint16_t peak_temperature; + uint16_t peak_pin; + + uint8_t pmon_control; + uint16_t pmon_config; + uint16_t alert1_config; + uint16_t alert2_config; + uint16_t device_config; + + uint16_t hysteresis_low; + uint16_t hysteresis_high; + uint8_t status_hysteresis; + uint8_t status_gpio; + + uint16_t strt_up_iout_lim; + +} ADM1272State; + +static const PMBusCoefficients adm1272_coefficients[] = { + [0] = { 6770, 0, -2 }, /* voltage, vrange 60V */ + [1] = { 4062, 0, -2 }, /* voltage, vrange 100V */ + [2] = { 1326, 20480, -1 }, /* current, vsense range 15mV */ + [3] = { 663, 20480, -1 }, /* current, vsense range 30mV */ + [4] = { 3512, 0, -2 }, /* power, vrange 60V, irange 15mV */ + [5] = { 21071, 0, -3 }, /* power, vrange 100V, irange 15mV */ + [6] = { 17561, 0, -3 }, /* power, vrange 60V, irange 30mV */ + [7] = { 10535, 0, -3 }, /* power, vrange 100V, irange 30mV */ + [8] = { 42, 31871, -1 }, /* temperature */ +}; + +static void adm1272_check_limits(ADM1272State *s) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(s); + + pmbus_check_limits(pmdev); + + if (pmdev->pages[0].read_vout > s->peak_vout) { + s->peak_vout = pmdev->pages[0].read_vout; + } + + if (pmdev->pages[0].read_vin > s->peak_vin) { + s->peak_vin = pmdev->pages[0].read_vin; + } + + if (pmdev->pages[0].read_iout > s->peak_iout) { + s->peak_iout = pmdev->pages[0].read_iout; + } + + if (pmdev->pages[0].read_temperature_1 > s->peak_temperature) { + s->peak_temperature = pmdev->pages[0].read_temperature_1; + } + + if (pmdev->pages[0].read_pin > s->peak_pin) { + s->peak_pin = pmdev->pages[0].read_pin; + } +} + +static uint16_t adm1272_millivolts_to_direct(uint32_t value) +{ + PMBusCoefficients c = adm1272_coefficients[ADM1272_VOLTAGE_COEFF_DEFAULT]; + c.b = c.b * 1000; + c.R = c.R - 3; + return pmbus_data2direct_mode(c, value); +} + +static uint32_t adm1272_direct_to_millivolts(uint16_t value) +{ + PMBusCoefficients c = adm1272_coefficients[ADM1272_VOLTAGE_COEFF_DEFAULT]; + c.b = c.b * 1000; + c.R = c.R - 3; + return pmbus_direct_mode2data(c, value); +} + +static uint16_t adm1272_milliamps_to_direct(uint32_t value) +{ + PMBusCoefficients c = adm1272_coefficients[ADM1272_CURRENT_COEFF_DEFAULT]; + /* Y = (m * r_sense * x - b) * 10^R */ + c.m = c.m * ADM1272_SHUNT / 1000; /* micro-ohms */ + c.b = c.b * 1000; + c.R = c.R - 3; + return pmbus_data2direct_mode(c, value); +} + +static uint32_t adm1272_direct_to_milliamps(uint16_t value) +{ + PMBusCoefficients c = adm1272_coefficients[ADM1272_CURRENT_COEFF_DEFAULT]; + c.m = c.m * ADM1272_SHUNT / 1000; + c.b = c.b * 1000; + c.R = c.R - 3; + return pmbus_direct_mode2data(c, value); +} + +static uint16_t adm1272_watts_to_direct(uint32_t value) +{ + PMBusCoefficients c = adm1272_coefficients[ADM1272_PWR_COEFF_DEFAULT]; + c.m = c.m * ADM1272_SHUNT / 1000; + return pmbus_data2direct_mode(c, value); +} + +static uint32_t adm1272_direct_to_watts(uint16_t value) +{ + PMBusCoefficients c = adm1272_coefficients[ADM1272_PWR_COEFF_DEFAULT]; + c.m = c.m * ADM1272_SHUNT / 1000; + return pmbus_direct_mode2data(c, value); +} + +static void adm1272_exit_reset(Object *obj) +{ + ADM1272State *s = ADM1272(obj); + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + + pmdev->page = 0; + pmdev->pages[0].operation = ADM1272_OPERATION_DEFAULT; + + + pmdev->capability = ADM1272_CAPABILITY_NO_PEC; + pmdev->pages[0].revision = ADM1272_PMBUS_REVISION_DEFAULT; + pmdev->pages[0].vout_mode = ADM1272_DIRECT_MODE; + pmdev->pages[0].vout_ov_warn_limit = ADM1272_HIGH_LIMIT_DEFAULT; + pmdev->pages[0].vout_uv_warn_limit = 0; + pmdev->pages[0].iout_oc_warn_limit = ADM1272_HIGH_LIMIT_DEFAULT; + pmdev->pages[0].ot_fault_limit = ADM1272_HIGH_LIMIT_DEFAULT; + pmdev->pages[0].ot_warn_limit = ADM1272_HIGH_LIMIT_DEFAULT; + pmdev->pages[0].vin_ov_warn_limit = ADM1272_HIGH_LIMIT_DEFAULT; + pmdev->pages[0].vin_uv_warn_limit = 0; + pmdev->pages[0].pin_op_warn_limit = ADM1272_PIN_OP_DEFAULT; + + pmdev->pages[0].status_word = 0; + pmdev->pages[0].status_vout = 0; + pmdev->pages[0].status_iout = 0; + pmdev->pages[0].status_input = 0; + pmdev->pages[0].status_temperature = 0; + pmdev->pages[0].status_mfr_specific = 0; + + pmdev->pages[0].read_vin + = adm1272_millivolts_to_direct(ADM1272_VOLT_DEFAULT); + pmdev->pages[0].read_vout + = adm1272_millivolts_to_direct(ADM1272_VOLT_DEFAULT); + pmdev->pages[0].read_iout + = adm1272_milliamps_to_direct(ADM1272_IOUT_DEFAULT); + pmdev->pages[0].read_temperature_1 = 0; + pmdev->pages[0].read_pin = adm1272_watts_to_direct(ADM1272_PWR_DEFAULT); + pmdev->pages[0].revision = ADM1272_PMBUS_REVISION_DEFAULT; + pmdev->pages[0].mfr_id = ADM1272_MFR_ID_DEFAULT; + pmdev->pages[0].mfr_model = ADM1272_MODEL_DEFAULT; + pmdev->pages[0].mfr_revision = ADM1272_MFR_DEFAULT_REVISION; + pmdev->pages[0].mfr_date = ADM1272_DEFAULT_DATE; + + s->pin_ext = 0; + s->ein_ext = 0; + s->restart_time = ADM1272_RESTART_TIME_DEFAULT; + + s->peak_vin = 0; + s->peak_vout = 0; + s->peak_iout = 0; + s->peak_temperature = 0; + s->peak_pin = 0; + + s->pmon_control = ADM1272_PMON_CONTROL_DEFAULT; + s->pmon_config = ADM1272_PMON_CONFIG_DEFAULT; + s->alert1_config = 0; + s->alert2_config = 0; + s->device_config = ADM1272_DEVICE_CONFIG_DEFAULT; + + s->hysteresis_low = 0; + s->hysteresis_high = ADM1272_HYSTERESIS_HIGH_DEFAULT; + s->status_hysteresis = 0; + s->status_gpio = 0; + + s->strt_up_iout_lim = ADM1272_STRT_UP_IOUT_LIM_DEFAULT; +} + +static uint8_t adm1272_read_byte(PMBusDevice *pmdev) +{ + ADM1272State *s = ADM1272(pmdev); + + switch (pmdev->code) { + case ADM1272_RESTART_TIME: + pmbus_send8(pmdev, s->restart_time); + break; + + case ADM1272_MFR_PEAK_IOUT: + pmbus_send16(pmdev, s->peak_iout); + break; + + case ADM1272_MFR_PEAK_VIN: + pmbus_send16(pmdev, s->peak_vin); + break; + + case ADM1272_MFR_PEAK_VOUT: + pmbus_send16(pmdev, s->peak_vout); + break; + + case ADM1272_MFR_PMON_CONTROL: + pmbus_send8(pmdev, s->pmon_control); + break; + + case ADM1272_MFR_PMON_CONFIG: + pmbus_send16(pmdev, s->pmon_config); + break; + + case ADM1272_MFR_ALERT1_CONFIG: + pmbus_send16(pmdev, s->alert1_config); + break; + + case ADM1272_MFR_ALERT2_CONFIG: + pmbus_send16(pmdev, s->alert2_config); + break; + + case ADM1272_MFR_PEAK_TEMPERATURE: + pmbus_send16(pmdev, s->peak_temperature); + break; + + case ADM1272_MFR_DEVICE_CONFIG: + pmbus_send16(pmdev, s->device_config); + break; + + case ADM1272_MFR_PEAK_PIN: + pmbus_send16(pmdev, s->peak_pin); + break; + + case ADM1272_MFR_READ_PIN_EXT: + pmbus_send32(pmdev, s->pin_ext); + break; + + case ADM1272_MFR_READ_EIN_EXT: + pmbus_send64(pmdev, s->ein_ext); + break; + + case ADM1272_HYSTERESIS_LOW: + pmbus_send16(pmdev, s->hysteresis_low); + break; + + case ADM1272_HYSTERESIS_HIGH: + pmbus_send16(pmdev, s->hysteresis_high); + break; + + case ADM1272_STATUS_HYSTERESIS: + pmbus_send16(pmdev, s->status_hysteresis); + break; + + case ADM1272_STATUS_GPIO: + pmbus_send16(pmdev, s->status_gpio); + break; + + case ADM1272_STRT_UP_IOUT_LIM: + pmbus_send16(pmdev, s->strt_up_iout_lim); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: reading from unsupported register: 0x%02x\n", + __func__, pmdev->code); + return 0xFF; + break; + } + + return 0; +} + +static int adm1272_write_data(PMBusDevice *pmdev, const uint8_t *buf, + uint8_t len) +{ + ADM1272State *s = ADM1272(pmdev); + + if (len == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: writing empty data\n", __func__); + return -1; + } + + pmdev->code = buf[0]; /* PMBus command code */ + + if (len == 1) { + return 0; + } + + /* Exclude command code from buffer */ + buf++; + len--; + + switch (pmdev->code) { + + case ADM1272_RESTART_TIME: + s->restart_time = pmbus_receive8(pmdev); + break; + + case ADM1272_MFR_PMON_CONTROL: + s->pmon_control = pmbus_receive8(pmdev); + break; + + case ADM1272_MFR_PMON_CONFIG: + s->pmon_config = pmbus_receive16(pmdev); + break; + + case ADM1272_MFR_ALERT1_CONFIG: + s->alert1_config = pmbus_receive16(pmdev); + break; + + case ADM1272_MFR_ALERT2_CONFIG: + s->alert2_config = pmbus_receive16(pmdev); + break; + + case ADM1272_MFR_DEVICE_CONFIG: + s->device_config = pmbus_receive16(pmdev); + break; + + case ADM1272_MFR_POWER_CYCLE: + adm1272_exit_reset((Object *)s); + break; + + case ADM1272_HYSTERESIS_LOW: + s->hysteresis_low = pmbus_receive16(pmdev); + break; + + case ADM1272_HYSTERESIS_HIGH: + s->hysteresis_high = pmbus_receive16(pmdev); + break; + + case ADM1272_STRT_UP_IOUT_LIM: + s->strt_up_iout_lim = pmbus_receive16(pmdev); + adm1272_check_limits(s); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: writing to unsupported register: 0x%02x\n", + __func__, pmdev->code); + break; + } + return 0; +} + +static void adm1272_get(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + uint16_t value; + + if (strcmp(name, "vin") == 0 || strcmp(name, "vout") == 0) { + value = adm1272_direct_to_millivolts(*(uint16_t *)opaque); + } else if (strcmp(name, "iout") == 0) { + value = adm1272_direct_to_milliamps(*(uint16_t *)opaque); + } else if (strcmp(name, "pin") == 0) { + value = adm1272_direct_to_watts(*(uint16_t *)opaque); + } else { + value = *(uint16_t *)opaque; + } + + visit_type_uint16(v, name, &value, errp); +} + +static void adm1272_set(Object *obj, Visitor *v, const char *name, void *opaque, + Error **errp) +{ + ADM1272State *s = ADM1272(obj); + uint16_t *internal = opaque; + uint16_t value; + + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + if (strcmp(name, "vin") == 0 || strcmp(name, "vout") == 0) { + *internal = adm1272_millivolts_to_direct(value); + } else if (strcmp(name, "iout") == 0) { + *internal = adm1272_milliamps_to_direct(value); + } else if (strcmp(name, "pin") == 0) { + *internal = adm1272_watts_to_direct(value); + } else { + *internal = value; + } + + adm1272_check_limits(s); +} + +static const VMStateDescription vmstate_adm1272 = { + .name = "ADM1272", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]){ + VMSTATE_PMBUS_DEVICE(parent, ADM1272State), + VMSTATE_UINT64(ein_ext, ADM1272State), + VMSTATE_UINT32(pin_ext, ADM1272State), + VMSTATE_UINT8(restart_time, ADM1272State), + + VMSTATE_UINT16(peak_vin, ADM1272State), + VMSTATE_UINT16(peak_vout, ADM1272State), + VMSTATE_UINT16(peak_iout, ADM1272State), + VMSTATE_UINT16(peak_temperature, ADM1272State), + VMSTATE_UINT16(peak_pin, ADM1272State), + + VMSTATE_UINT8(pmon_control, ADM1272State), + VMSTATE_UINT16(pmon_config, ADM1272State), + VMSTATE_UINT16(alert1_config, ADM1272State), + VMSTATE_UINT16(alert2_config, ADM1272State), + VMSTATE_UINT16(device_config, ADM1272State), + + VMSTATE_UINT16(hysteresis_low, ADM1272State), + VMSTATE_UINT16(hysteresis_high, ADM1272State), + VMSTATE_UINT8(status_hysteresis, ADM1272State), + VMSTATE_UINT8(status_gpio, ADM1272State), + + VMSTATE_UINT16(strt_up_iout_lim, ADM1272State), + VMSTATE_END_OF_LIST() + } +}; + +static void adm1272_init(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + uint64_t flags = PB_HAS_VOUT_MODE | PB_HAS_VOUT | PB_HAS_VIN | PB_HAS_IOUT | + PB_HAS_PIN | PB_HAS_TEMPERATURE | PB_HAS_MFR_INFO; + + pmbus_page_config(pmdev, 0, flags); + + object_property_add(obj, "vin", "uint16", + adm1272_get, + adm1272_set, NULL, &pmdev->pages[0].read_vin); + + object_property_add(obj, "vout", "uint16", + adm1272_get, + adm1272_set, NULL, &pmdev->pages[0].read_vout); + + object_property_add(obj, "iout", "uint16", + adm1272_get, + adm1272_set, NULL, &pmdev->pages[0].read_iout); + + object_property_add(obj, "pin", "uint16", + adm1272_get, + adm1272_set, NULL, &pmdev->pages[0].read_pin); + +} + +static void adm1272_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + PMBusDeviceClass *k = PMBUS_DEVICE_CLASS(klass); + + dc->desc = "Analog Devices ADM1272 Hot Swap controller"; + dc->vmsd = &vmstate_adm1272; + k->write_data = adm1272_write_data; + k->receive_byte = adm1272_read_byte; + k->device_num_pages = 1; + + rc->phases.exit = adm1272_exit_reset; +} + +static const TypeInfo adm1272_info = { + .name = TYPE_ADM1272, + .parent = TYPE_PMBUS_DEVICE, + .instance_size = sizeof(ADM1272State), + .instance_init = adm1272_init, + .class_init = adm1272_class_init, +}; + +static void adm1272_register_types(void) +{ + type_register_static(&adm1272_info); +} + +type_init(adm1272_register_types) diff --git a/hw/sensor/dps310.c b/hw/sensor/dps310.c new file mode 100644 index 000000000..d60a18ac4 --- /dev/null +++ b/hw/sensor/dps310.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2017-2021 Joel Stanley , IBM Corporation + * + * Infineon DPS310 temperature and humidity sensor + * + * https://www.infineon.com/cms/en/product/sensor/pressure-sensors/pressure-sensors-for-iot/dps310/ + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/hw.h" +#include "hw/i2c/i2c.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "migration/vmstate.h" + +#define NUM_REGISTERS 0x33 + +typedef struct DPS310State { + /*< private >*/ + I2CSlave i2c; + + /*< public >*/ + uint8_t regs[NUM_REGISTERS]; + + uint8_t len; + uint8_t pointer; + +} DPS310State; + +#define TYPE_DPS310 "dps310" +#define DPS310(obj) OBJECT_CHECK(DPS310State, (obj), TYPE_DPS310) + +#define DPS310_PRS_B2 0x00 +#define DPS310_PRS_B1 0x01 +#define DPS310_PRS_B0 0x02 +#define DPS310_TMP_B2 0x03 +#define DPS310_TMP_B1 0x04 +#define DPS310_TMP_B0 0x05 +#define DPS310_PRS_CFG 0x06 +#define DPS310_TMP_CFG 0x07 +#define DPS310_TMP_RATE_BITS (0x70) +#define DPS310_MEAS_CFG 0x08 +#define DPS310_MEAS_CTRL_BITS (0x07) +#define DPS310_PRESSURE_EN BIT(0) +#define DPS310_TEMP_EN BIT(1) +#define DPS310_BACKGROUND BIT(2) +#define DPS310_PRS_RDY BIT(4) +#define DPS310_TMP_RDY BIT(5) +#define DPS310_SENSOR_RDY BIT(6) +#define DPS310_COEF_RDY BIT(7) +#define DPS310_CFG_REG 0x09 +#define DPS310_RESET 0x0c +#define DPS310_RESET_MAGIC (BIT(0) | BIT(3)) +#define DPS310_COEF_BASE 0x10 +#define DPS310_COEF_LAST 0x21 +#define DPS310_COEF_SRC 0x28 + +static void dps310_reset(DeviceState *dev) +{ + DPS310State *s = DPS310(dev); + + static const uint8_t regs_reset_state[sizeof(s->regs)] = { + 0xfe, 0x2f, 0xee, 0x02, 0x69, 0xa6, 0x00, 0x80, 0xc7, 0x00, 0x00, 0x00, + 0x00, 0x10, 0x00, 0x00, 0x0e, 0x1e, 0xdd, 0x13, 0xca, 0x5f, 0x21, 0x52, + 0xf9, 0xc6, 0x04, 0xd1, 0xdb, 0x47, 0x00, 0x5b, 0xfb, 0x3a, 0x00, 0x00, + 0x20, 0x49, 0x4e, 0xa5, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x60, 0x15, 0x02 + }; + + memcpy(s->regs, regs_reset_state, sizeof(s->regs)); + s->pointer = 0; + + /* TODO: assert these after some timeout ? */ + s->regs[DPS310_MEAS_CFG] = DPS310_COEF_RDY | DPS310_SENSOR_RDY + | DPS310_TMP_RDY | DPS310_PRS_RDY; +} + +static uint8_t dps310_read(DPS310State *s, uint8_t reg) +{ + if (reg >= sizeof(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: register 0x%02x out of bounds\n", + __func__, s->pointer); + return 0xFF; + } + + switch (reg) { + case DPS310_PRS_B2: + case DPS310_PRS_B1: + case DPS310_PRS_B0: + case DPS310_TMP_B2: + case DPS310_TMP_B1: + case DPS310_TMP_B0: + case DPS310_PRS_CFG: + case DPS310_TMP_CFG: + case DPS310_MEAS_CFG: + case DPS310_CFG_REG: + case DPS310_COEF_BASE...DPS310_COEF_LAST: + case DPS310_COEF_SRC: + case 0x32: /* Undocumented register to indicate workaround not required */ + return s->regs[reg]; + default: + qemu_log_mask(LOG_UNIMP, "%s: register 0x%02x unimplemented\n", + __func__, reg); + return 0xFF; + } +} + +static void dps310_write(DPS310State *s, uint8_t reg, uint8_t data) +{ + if (reg >= sizeof(s->regs)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: register %d out of bounds\n", + __func__, s->pointer); + return; + } + + switch (reg) { + case DPS310_RESET: + if (data == DPS310_RESET_MAGIC) { + device_cold_reset(DEVICE(s)); + } + break; + case DPS310_PRS_CFG: + case DPS310_TMP_CFG: + case DPS310_MEAS_CFG: + case DPS310_CFG_REG: + s->regs[reg] = data; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: register 0x%02x unimplemented\n", + __func__, reg); + return; + } +} + +static uint8_t dps310_rx(I2CSlave *i2c) +{ + DPS310State *s = DPS310(i2c); + + if (s->len == 1) { + return dps310_read(s, s->pointer++); + } else { + return 0xFF; + } +} + +static int dps310_tx(I2CSlave *i2c, uint8_t data) +{ + DPS310State *s = DPS310(i2c); + + if (s->len == 0) { + /* + * first byte is the register pointer for a read or write + * operation + */ + s->pointer = data; + s->len++; + } else if (s->len == 1) { + dps310_write(s, s->pointer++, data); + } + + return 0; +} + +static int dps310_event(I2CSlave *i2c, enum i2c_event event) +{ + DPS310State *s = DPS310(i2c); + + switch (event) { + case I2C_START_SEND: + s->pointer = 0xFF; + s->len = 0; + break; + case I2C_START_RECV: + if (s->len != 1) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid recv sequence\n", + __func__); + } + break; + default: + break; + } + + return 0; +} + +static const VMStateDescription vmstate_dps310 = { + .name = "DPS310", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(len, DPS310State), + VMSTATE_UINT8_ARRAY(regs, DPS310State, NUM_REGISTERS), + VMSTATE_UINT8(pointer, DPS310State), + VMSTATE_I2C_SLAVE(i2c, DPS310State), + VMSTATE_END_OF_LIST() + } +}; + +static void dps310_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + k->event = dps310_event; + k->recv = dps310_rx; + k->send = dps310_tx; + dc->reset = dps310_reset; + dc->vmsd = &vmstate_dps310; +} + +static const TypeInfo dps310_info = { + .name = TYPE_DPS310, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(DPS310State), + .class_init = dps310_class_init, +}; + +static void dps310_register_types(void) +{ + type_register_static(&dps310_info); +} + +type_init(dps310_register_types) diff --git a/hw/sensor/emc141x.c b/hw/sensor/emc141x.c new file mode 100644 index 000000000..7ce8f4e97 --- /dev/null +++ b/hw/sensor/emc141x.c @@ -0,0 +1,326 @@ +/* + * SMSC EMC141X temperature sensor. + * + * Copyright (c) 2020 Bytedance Corporation + * Written by John Wang + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/module.h" +#include "qom/object.h" +#include "hw/sensor/emc141x_regs.h" + +#define SENSORS_COUNT_MAX 4 + +struct EMC141XState { + I2CSlave parent_obj; + struct { + uint8_t raw_temp_min; + uint8_t raw_temp_current; + uint8_t raw_temp_max; + } sensor[SENSORS_COUNT_MAX]; + uint8_t len; + uint8_t data; + uint8_t pointer; +}; + +struct EMC141XClass { + I2CSlaveClass parent_class; + uint8_t model; + unsigned sensors_count; +}; + +#define TYPE_EMC141X "emc141x" +OBJECT_DECLARE_TYPE(EMC141XState, EMC141XClass, EMC141X) + +static void emc141x_get_temperature(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + EMC141XState *s = EMC141X(obj); + EMC141XClass *sc = EMC141X_GET_CLASS(s); + int64_t value; + unsigned tempid; + + if (sscanf(name, "temperature%u", &tempid) != 1) { + error_setg(errp, "error reading %s: %s", name, g_strerror(errno)); + return; + } + + if (tempid >= sc->sensors_count) { + error_setg(errp, "error reading %s", name); + return; + } + + value = s->sensor[tempid].raw_temp_current * 1000; + + visit_type_int(v, name, &value, errp); +} + +static void emc141x_set_temperature(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + EMC141XState *s = EMC141X(obj); + EMC141XClass *sc = EMC141X_GET_CLASS(s); + int64_t temp; + unsigned tempid; + + if (!visit_type_int(v, name, &temp, errp)) { + return; + } + + if (sscanf(name, "temperature%u", &tempid) != 1) { + error_setg(errp, "error reading %s: %s", name, g_strerror(errno)); + return; + } + + if (tempid >= sc->sensors_count) { + error_setg(errp, "error reading %s", name); + return; + } + + s->sensor[tempid].raw_temp_current = temp / 1000; +} + +static void emc141x_read(EMC141XState *s) +{ + EMC141XClass *sc = EMC141X_GET_CLASS(s); + switch (s->pointer) { + case EMC141X_DEVICE_ID: + s->data = sc->model; + break; + case EMC141X_MANUFACTURER_ID: + s->data = MANUFACTURER_ID; + break; + case EMC141X_REVISION: + s->data = REVISION; + break; + case EMC141X_TEMP_HIGH0: + s->data = s->sensor[0].raw_temp_current; + break; + case EMC141X_TEMP_HIGH1: + s->data = s->sensor[1].raw_temp_current; + break; + case EMC141X_TEMP_HIGH2: + s->data = s->sensor[2].raw_temp_current; + break; + case EMC141X_TEMP_HIGH3: + s->data = s->sensor[3].raw_temp_current; + break; + case EMC141X_TEMP_MAX_HIGH0: + s->data = s->sensor[0].raw_temp_max; + break; + case EMC141X_TEMP_MAX_HIGH1: + s->data = s->sensor[1].raw_temp_max; + break; + case EMC141X_TEMP_MAX_HIGH2: + s->data = s->sensor[2].raw_temp_max; + break; + case EMC141X_TEMP_MAX_HIGH3: + s->data = s->sensor[3].raw_temp_max; + break; + case EMC141X_TEMP_MIN_HIGH0: + s->data = s->sensor[0].raw_temp_min; + break; + case EMC141X_TEMP_MIN_HIGH1: + s->data = s->sensor[1].raw_temp_min; + break; + case EMC141X_TEMP_MIN_HIGH2: + s->data = s->sensor[2].raw_temp_min; + break; + case EMC141X_TEMP_MIN_HIGH3: + s->data = s->sensor[3].raw_temp_min; + break; + default: + s->data = 0; + } +} + +static void emc141x_write(EMC141XState *s) +{ + switch (s->pointer) { + case EMC141X_TEMP_MAX_HIGH0: + s->sensor[0].raw_temp_max = s->data; + break; + case EMC141X_TEMP_MAX_HIGH1: + s->sensor[1].raw_temp_max = s->data; + break; + case EMC141X_TEMP_MAX_HIGH2: + s->sensor[2].raw_temp_max = s->data; + break; + case EMC141X_TEMP_MAX_HIGH3: + s->sensor[3].raw_temp_max = s->data; + break; + case EMC141X_TEMP_MIN_HIGH0: + s->sensor[0].raw_temp_min = s->data; + break; + case EMC141X_TEMP_MIN_HIGH1: + s->sensor[1].raw_temp_min = s->data; + break; + case EMC141X_TEMP_MIN_HIGH2: + s->sensor[2].raw_temp_min = s->data; + break; + case EMC141X_TEMP_MIN_HIGH3: + s->sensor[3].raw_temp_min = s->data; + break; + default: + s->data = 0; + } +} + +static uint8_t emc141x_rx(I2CSlave *i2c) +{ + EMC141XState *s = EMC141X(i2c); + + if (s->len == 0) { + s->len++; + return s->data; + } else { + return 0xff; + } +} + +static int emc141x_tx(I2CSlave *i2c, uint8_t data) +{ + EMC141XState *s = EMC141X(i2c); + + if (s->len == 0) { + /* first byte is the reg pointer */ + s->pointer = data; + s->len++; + } else if (s->len == 1) { + s->data = data; + emc141x_write(s); + } + + return 0; +} + +static int emc141x_event(I2CSlave *i2c, enum i2c_event event) +{ + EMC141XState *s = EMC141X(i2c); + + if (event == I2C_START_RECV) { + emc141x_read(s); + } + + s->len = 0; + return 0; +} + +static const VMStateDescription vmstate_emc141x = { + .name = "EMC141X", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(len, EMC141XState), + VMSTATE_UINT8(data, EMC141XState), + VMSTATE_UINT8(pointer, EMC141XState), + VMSTATE_I2C_SLAVE(parent_obj, EMC141XState), + VMSTATE_END_OF_LIST() + } +}; + +static void emc141x_reset(DeviceState *dev) +{ + EMC141XState *s = EMC141X(dev); + int i; + + for (i = 0; i < SENSORS_COUNT_MAX; i++) { + s->sensor[i].raw_temp_max = 0x55; + } + s->pointer = 0; + s->len = 0; +} + +static void emc141x_initfn(Object *obj) +{ + object_property_add(obj, "temperature0", "int", + emc141x_get_temperature, + emc141x_set_temperature, NULL, NULL); + object_property_add(obj, "temperature1", "int", + emc141x_get_temperature, + emc141x_set_temperature, NULL, NULL); + object_property_add(obj, "temperature2", "int", + emc141x_get_temperature, + emc141x_set_temperature, NULL, NULL); + object_property_add(obj, "temperature3", "int", + emc141x_get_temperature, + emc141x_set_temperature, NULL, NULL); +} + +static void emc141x_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + dc->reset = emc141x_reset; + k->event = emc141x_event; + k->recv = emc141x_rx; + k->send = emc141x_tx; + dc->vmsd = &vmstate_emc141x; +} + +static void emc1413_class_init(ObjectClass *klass, void *data) +{ + EMC141XClass *ec = EMC141X_CLASS(klass); + + emc141x_class_init(klass, data); + ec->model = EMC1413_DEVICE_ID; + ec->sensors_count = 3; +} + +static void emc1414_class_init(ObjectClass *klass, void *data) +{ + EMC141XClass *ec = EMC141X_CLASS(klass); + + emc141x_class_init(klass, data); + ec->model = EMC1414_DEVICE_ID; + ec->sensors_count = 4; +} + +static const TypeInfo emc141x_info = { + .name = TYPE_EMC141X, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(EMC141XState), + .class_size = sizeof(EMC141XClass), + .instance_init = emc141x_initfn, + .abstract = true, +}; + +static const TypeInfo emc1413_info = { + .name = "emc1413", + .parent = TYPE_EMC141X, + .class_init = emc1413_class_init, +}; + +static const TypeInfo emc1414_info = { + .name = "emc1414", + .parent = TYPE_EMC141X, + .class_init = emc1414_class_init, +}; + +static void emc141x_register_types(void) +{ + type_register_static(&emc141x_info); + type_register_static(&emc1413_info); + type_register_static(&emc1414_info); +} + +type_init(emc141x_register_types) diff --git a/hw/sensor/max34451.c b/hw/sensor/max34451.c new file mode 100644 index 000000000..a91d8bd48 --- /dev/null +++ b/hw/sensor/max34451.c @@ -0,0 +1,775 @@ +/* + * Maxim MAX34451 PMBus 16-Channel V/I monitor and 12-Channel Sequencer/Marginer + * + * Copyright 2021 Google LLC + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/i2c/pmbus_device.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define TYPE_MAX34451 "max34451" +#define MAX34451(obj) OBJECT_CHECK(MAX34451State, (obj), TYPE_MAX34451) + +#define MAX34451_MFR_MODE 0xD1 +#define MAX34451_MFR_PSEN_CONFIG 0xD2 +#define MAX34451_MFR_VOUT_PEAK 0xD4 +#define MAX34451_MFR_IOUT_PEAK 0xD5 +#define MAX34451_MFR_TEMPERATURE_PEAK 0xD6 +#define MAX34451_MFR_VOUT_MIN 0xD7 +#define MAX34451_MFR_NV_LOG_CONFIG 0xD8 +#define MAX34451_MFR_FAULT_RESPONSE 0xD9 +#define MAX34451_MFR_FAULT_RETRY 0xDA +#define MAX34451_MFR_NV_FAULT_LOG 0xDC +#define MAX34451_MFR_TIME_COUNT 0xDD +#define MAX34451_MFR_MARGIN_CONFIG 0xDF +#define MAX34451_MFR_FW_SERIAL 0xE0 +#define MAX34451_MFR_IOUT_AVG 0xE2 +#define MAX34451_MFR_CHANNEL_CONFIG 0xE4 +#define MAX34451_MFR_TON_SEQ_MAX 0xE6 +#define MAX34451_MFR_PWM_CONFIG 0xE7 +#define MAX34451_MFR_SEQ_CONFIG 0xE8 +#define MAX34451_MFR_STORE_ALL 0xEE +#define MAX34451_MFR_RESTORE_ALL 0xEF +#define MAX34451_MFR_TEMP_SENSOR_CONFIG 0xF0 +#define MAX34451_MFR_STORE_SINGLE 0xFC +#define MAX34451_MFR_CRC 0xFE + +#define MAX34451_NUM_MARGINED_PSU 12 +#define MAX34451_NUM_PWR_DEVICES 16 +#define MAX34451_NUM_TEMP_DEVICES 5 +#define MAX34451_NUM_PAGES 21 + +#define DEFAULT_OP_ON 0x80 +#define DEFAULT_CAPABILITY 0x20 +#define DEFAULT_ON_OFF_CONFIG 0x1a +#define DEFAULT_VOUT_MODE 0x40 +#define DEFAULT_TEMPERATURE 2500 +#define DEFAULT_SCALE 0x7FFF +#define DEFAULT_OV_LIMIT 0x7FFF +#define DEFAULT_OC_LIMIT 0x7FFF +#define DEFAULT_OT_LIMIT 0x7FFF +#define DEFAULT_VMIN 0x7FFF +#define DEFAULT_TON_FAULT_LIMIT 0xFFFF +#define DEFAULT_CHANNEL_CONFIG 0x20 +#define DEFAULT_TEXT 0x3130313031303130 + +/** + * MAX34451State: + * @code: The command code received + * @page: Each page corresponds to a device monitored by the Max 34451 + * The page register determines the available commands depending on device + ___________________________________________________________________________ + | 0 | Power supply monitored by RS0, controlled by PSEN0, and | + | | margined with PWM0. | + |_______|___________________________________________________________________| + | 1 | Power supply monitored by RS1, controlled by PSEN1, and | + | | margined with PWM1. | + |_______|___________________________________________________________________| + | 2 | Power supply monitored by RS2, controlled by PSEN2, and | + | | margined with PWM2. | + |_______|___________________________________________________________________| + | 3 | Power supply monitored by RS3, controlled by PSEN3, and | + | | margined with PWM3. | + |_______|___________________________________________________________________| + | 4 | Power supply monitored by RS4, controlled by PSEN4, and | + | | margined with PWM4. | + |_______|___________________________________________________________________| + | 5 | Power supply monitored by RS5, controlled by PSEN5, and | + | | margined with PWM5. | + |_______|___________________________________________________________________| + | 6 | Power supply monitored by RS6, controlled by PSEN6, and | + | | margined with PWM6. | + |_______|___________________________________________________________________| + | 7 | Power supply monitored by RS7, controlled by PSEN7, and | + | | margined with PWM7. | + |_______|___________________________________________________________________| + | 8 | Power supply monitored by RS8, controlled by PSEN8, and | + | | optionally margined by OUT0 of external DS4424 at I2C address A0h.| + |_______|___________________________________________________________________| + | 9 | Power supply monitored by RS9, controlled by PSEN9, and | + | | optionally margined by OUT1 of external DS4424 at I2C address A0h.| + |_______|___________________________________________________________________| + | 10 | Power supply monitored by RS10, controlled by PSEN10, and | + | | optionally margined by OUT2 of external DS4424 at I2C address A0h.| + |_______|___________________________________________________________________| + | 11 | Power supply monitored by RS11, controlled by PSEN11, and | + | | optionally margined by OUT3 of external DS4424 at I2C address A0h.| + |_______|___________________________________________________________________| + | 12 | ADC channel 12 (monitors voltage or current) or GPI. | + |_______|___________________________________________________________________| + | 13 | ADC channel 13 (monitors voltage or current) or GPI. | + |_______|___________________________________________________________________| + | 14 | ADC channel 14 (monitors voltage or current) or GPI. | + |_______|___________________________________________________________________| + | 15 | ADC channel 15 (monitors voltage or current) or GPI. | + |_______|___________________________________________________________________| + | 16 | Internal temperature sensor. | + |_______|___________________________________________________________________| + | 17 | External DS75LV temperature sensor with I2C address 90h. | + |_______|___________________________________________________________________| + | 18 | External DS75LV temperature sensor with I2C address 92h. | + |_______|___________________________________________________________________| + | 19 | External DS75LV temperature sensor with I2C address 94h. | + |_______|___________________________________________________________________| + | 20 | External DS75LV temperature sensor with I2C address 96h. | + |_______|___________________________________________________________________| + | 21=E2=80=93254| Reserved. | + |_______|___________________________________________________________________| + | 255 | Applies to all pages. | + |_______|___________________________________________________________________| + * + * @operation: Turn on and off power supplies + * @on_off_config: Configure the power supply on and off transition behaviour + * @write_protect: protect against changes to the device's memory + * @vout_margin_high: the voltage when OPERATION is set to margin high + * @vout_margin_low: the voltage when OPERATION is set to margin low + * @vout_scale: scale ADC reading to actual device reading if different + * @iout_cal_gain: set ratio of the voltage at the ADC input to sensed current + */ +typedef struct MAX34451State { + PMBusDevice parent; + + uint16_t power_good_on[MAX34451_NUM_PWR_DEVICES]; + uint16_t power_good_off[MAX34451_NUM_PWR_DEVICES]; + uint16_t ton_delay[MAX34451_NUM_MARGINED_PSU]; + uint16_t ton_max_fault_limit[MAX34451_NUM_MARGINED_PSU]; + uint16_t toff_delay[MAX34451_NUM_MARGINED_PSU]; + uint8_t status_mfr_specific[MAX34451_NUM_PWR_DEVICES]; + /* Manufacturer specific function */ + uint64_t mfr_location; + uint64_t mfr_date; + uint64_t mfr_serial; + uint16_t mfr_mode; + uint32_t psen_config[MAX34451_NUM_MARGINED_PSU]; + uint16_t vout_peak[MAX34451_NUM_PWR_DEVICES]; + uint16_t iout_peak[MAX34451_NUM_PWR_DEVICES]; + uint16_t temperature_peak[MAX34451_NUM_TEMP_DEVICES]; + uint16_t vout_min[MAX34451_NUM_PWR_DEVICES]; + uint16_t nv_log_config; + uint32_t fault_response[MAX34451_NUM_PWR_DEVICES]; + uint16_t fault_retry; + uint32_t fault_log; + uint32_t time_count; + uint16_t margin_config[MAX34451_NUM_MARGINED_PSU]; + uint16_t fw_serial; + uint16_t iout_avg[MAX34451_NUM_PWR_DEVICES]; + uint16_t channel_config[MAX34451_NUM_PWR_DEVICES]; + uint16_t ton_seq_max[MAX34451_NUM_MARGINED_PSU]; + uint32_t pwm_config[MAX34451_NUM_MARGINED_PSU]; + uint32_t seq_config[MAX34451_NUM_MARGINED_PSU]; + uint16_t temp_sensor_config[MAX34451_NUM_TEMP_DEVICES]; + uint16_t store_single; + uint16_t crc; +} MAX34451State; + + +static void max34451_check_limits(MAX34451State *s) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(s); + + pmbus_check_limits(pmdev); + + for (int i = 0; i < MAX34451_NUM_PWR_DEVICES; i++) { + if (pmdev->pages[i].read_vout == 0) { /* PSU disabled */ + continue; + } + + if (pmdev->pages[i].read_vout > s->vout_peak[i]) { + s->vout_peak[i] = pmdev->pages[i].read_vout; + } + + if (pmdev->pages[i].read_vout < s->vout_min[i]) { + s->vout_min[i] = pmdev->pages[i].read_vout; + } + + if (pmdev->pages[i].read_iout > s->iout_peak[i]) { + s->iout_peak[i] = pmdev->pages[i].read_iout; + } + } + + for (int i = 0; i < MAX34451_NUM_TEMP_DEVICES; i++) { + if (pmdev->pages[i + 16].read_temperature_1 > s->temperature_peak[i]) { + s->temperature_peak[i] = pmdev->pages[i + 16].read_temperature_1; + } + } +} + +static uint8_t max34451_read_byte(PMBusDevice *pmdev) +{ + MAX34451State *s = MAX34451(pmdev); + switch (pmdev->code) { + + case PMBUS_POWER_GOOD_ON: + if (pmdev->page < 16) { + pmbus_send16(pmdev, s->power_good_on[pmdev->page]); + } + break; + + case PMBUS_POWER_GOOD_OFF: + if (pmdev->page < 16) { + pmbus_send16(pmdev, s->power_good_off[pmdev->page]); + } + break; + + case PMBUS_TON_DELAY: + if (pmdev->page < 12) { + pmbus_send16(pmdev, s->ton_delay[pmdev->page]); + } + break; + + case PMBUS_TON_MAX_FAULT_LIMIT: + if (pmdev->page < 12) { + pmbus_send16(pmdev, s->ton_max_fault_limit[pmdev->page]); + } + break; + + case PMBUS_TOFF_DELAY: + if (pmdev->page < 12) { + pmbus_send16(pmdev, s->toff_delay[pmdev->page]); + } + break; + + case PMBUS_STATUS_MFR_SPECIFIC: + if (pmdev->page < 16) { + pmbus_send8(pmdev, s->status_mfr_specific[pmdev->page]); + } + break; + + case PMBUS_MFR_ID: + pmbus_send8(pmdev, 0x4d); /* Maxim */ + break; + + case PMBUS_MFR_MODEL: + pmbus_send8(pmdev, 0x59); + break; + + case PMBUS_MFR_LOCATION: + pmbus_send64(pmdev, s->mfr_location); + break; + + case PMBUS_MFR_DATE: + pmbus_send64(pmdev, s->mfr_date); + break; + + case PMBUS_MFR_SERIAL: + pmbus_send64(pmdev, s->mfr_serial); + break; + + case MAX34451_MFR_MODE: + pmbus_send16(pmdev, s->mfr_mode); + break; + + case MAX34451_MFR_PSEN_CONFIG: + if (pmdev->page < 12) { + pmbus_send32(pmdev, s->psen_config[pmdev->page]); + } + break; + + case MAX34451_MFR_VOUT_PEAK: + if (pmdev->page < 16) { + pmbus_send16(pmdev, s->vout_peak[pmdev->page]); + } + break; + + case MAX34451_MFR_IOUT_PEAK: + if (pmdev->page < 16) { + pmbus_send16(pmdev, s->iout_peak[pmdev->page]); + } + break; + + case MAX34451_MFR_TEMPERATURE_PEAK: + if (15 < pmdev->page && pmdev->page < 21) { + pmbus_send16(pmdev, s->temperature_peak[pmdev->page % 16]); + } else { + pmbus_send16(pmdev, s->temperature_peak[0]); + } + break; + + case MAX34451_MFR_VOUT_MIN: + if (pmdev->page < 16) { + pmbus_send16(pmdev, s->vout_min[pmdev->page]); + } + break; + + case MAX34451_MFR_NV_LOG_CONFIG: + pmbus_send16(pmdev, s->nv_log_config); + break; + + case MAX34451_MFR_FAULT_RESPONSE: + if (pmdev->page < 16) { + pmbus_send32(pmdev, s->fault_response[pmdev->page]); + } + break; + + case MAX34451_MFR_FAULT_RETRY: + pmbus_send32(pmdev, s->fault_retry); + break; + + case MAX34451_MFR_NV_FAULT_LOG: + pmbus_send32(pmdev, s->fault_log); + break; + + case MAX34451_MFR_TIME_COUNT: + pmbus_send32(pmdev, s->time_count); + break; + + case MAX34451_MFR_MARGIN_CONFIG: + if (pmdev->page < 12) { + pmbus_send16(pmdev, s->margin_config[pmdev->page]); + } + break; + + case MAX34451_MFR_FW_SERIAL: + if (pmdev->page == 255) { + pmbus_send16(pmdev, 1); /* Firmware revision */ + } + break; + + case MAX34451_MFR_IOUT_AVG: + if (pmdev->page < 16) { + pmbus_send16(pmdev, s->iout_avg[pmdev->page]); + } + break; + + case MAX34451_MFR_CHANNEL_CONFIG: + if (pmdev->page < 16) { + pmbus_send16(pmdev, s->channel_config[pmdev->page]); + } + break; + + case MAX34451_MFR_TON_SEQ_MAX: + if (pmdev->page < 12) { + pmbus_send16(pmdev, s->ton_seq_max[pmdev->page]); + } + break; + + case MAX34451_MFR_PWM_CONFIG: + if (pmdev->page < 12) { + pmbus_send32(pmdev, s->pwm_config[pmdev->page]); + } + break; + + case MAX34451_MFR_SEQ_CONFIG: + if (pmdev->page < 12) { + pmbus_send32(pmdev, s->seq_config[pmdev->page]); + } + break; + + case MAX34451_MFR_TEMP_SENSOR_CONFIG: + if (15 < pmdev->page && pmdev->page < 21) { + pmbus_send32(pmdev, s->temp_sensor_config[pmdev->page % 16]); + } + break; + + case MAX34451_MFR_STORE_SINGLE: + pmbus_send32(pmdev, s->store_single); + break; + + case MAX34451_MFR_CRC: + pmbus_send32(pmdev, s->crc); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: reading from unsupported register: 0x%02x\n", + __func__, pmdev->code); + break; + } + return 0xFF; +} + +static int max34451_write_data(PMBusDevice *pmdev, const uint8_t *buf, + uint8_t len) +{ + MAX34451State *s = MAX34451(pmdev); + + if (len == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: writing empty data\n", __func__); + return -1; + } + + pmdev->code = buf[0]; /* PMBus command code */ + + if (len == 1) { + return 0; + } + + /* Exclude command code from buffer */ + buf++; + len--; + uint8_t index = pmdev->page; + + switch (pmdev->code) { + case MAX34451_MFR_STORE_ALL: + case MAX34451_MFR_RESTORE_ALL: + case MAX34451_MFR_STORE_SINGLE: + /* + * TODO: hardware behaviour is to move the contents of volatile + * memory to non-volatile memory. + */ + break; + + case PMBUS_POWER_GOOD_ON: /* R/W word */ + if (pmdev->page < MAX34451_NUM_PWR_DEVICES) { + s->power_good_on[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case PMBUS_POWER_GOOD_OFF: /* R/W word */ + if (pmdev->page < MAX34451_NUM_PWR_DEVICES) { + s->power_good_off[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case PMBUS_TON_DELAY: /* R/W word */ + if (pmdev->page < 12) { + s->ton_delay[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case PMBUS_TON_MAX_FAULT_LIMIT: /* R/W word */ + if (pmdev->page < 12) { + s->ton_max_fault_limit[pmdev->page] + = pmbus_receive16(pmdev); + } + break; + + case PMBUS_TOFF_DELAY: /* R/W word */ + if (pmdev->page < 12) { + s->toff_delay[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case PMBUS_MFR_LOCATION: /* R/W 64 */ + s->mfr_location = pmbus_receive64(pmdev); + break; + + case PMBUS_MFR_DATE: /* R/W 64 */ + s->mfr_date = pmbus_receive64(pmdev); + break; + + case PMBUS_MFR_SERIAL: /* R/W 64 */ + s->mfr_serial = pmbus_receive64(pmdev); + break; + + case MAX34451_MFR_MODE: /* R/W word */ + s->mfr_mode = pmbus_receive16(pmdev); + break; + + case MAX34451_MFR_PSEN_CONFIG: /* R/W 32 */ + if (pmdev->page < 12) { + s->psen_config[pmdev->page] = pmbus_receive32(pmdev); + } + break; + + case MAX34451_MFR_VOUT_PEAK: /* R/W word */ + if (pmdev->page < 16) { + s->vout_peak[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX34451_MFR_IOUT_PEAK: /* R/W word */ + if (pmdev->page < 16) { + s->iout_peak[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX34451_MFR_TEMPERATURE_PEAK: /* R/W word */ + if (15 < pmdev->page && pmdev->page < 21) { + s->temperature_peak[pmdev->page % 16] + = pmbus_receive16(pmdev); + } + break; + + case MAX34451_MFR_VOUT_MIN: /* R/W word */ + if (pmdev->page < 16) { + s->vout_min[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX34451_MFR_NV_LOG_CONFIG: /* R/W word */ + s->nv_log_config = pmbus_receive16(pmdev); + break; + + case MAX34451_MFR_FAULT_RESPONSE: /* R/W 32 */ + if (pmdev->page < 16) { + s->fault_response[pmdev->page] = pmbus_receive32(pmdev); + } + break; + + case MAX34451_MFR_FAULT_RETRY: /* R/W word */ + s->fault_retry = pmbus_receive16(pmdev); + break; + + case MAX34451_MFR_TIME_COUNT: /* R/W 32 */ + s->time_count = pmbus_receive32(pmdev); + break; + + case MAX34451_MFR_MARGIN_CONFIG: /* R/W word */ + if (pmdev->page < 12) { + s->margin_config[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX34451_MFR_CHANNEL_CONFIG: /* R/W word */ + if (pmdev->page < 16) { + s->channel_config[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX34451_MFR_TON_SEQ_MAX: /* R/W word */ + if (pmdev->page < 12) { + s->ton_seq_max[pmdev->page] = pmbus_receive16(pmdev); + } + break; + + case MAX34451_MFR_PWM_CONFIG: /* R/W 32 */ + if (pmdev->page < 12) { + s->pwm_config[pmdev->page] = pmbus_receive32(pmdev); + } + break; + + case MAX34451_MFR_SEQ_CONFIG: /* R/W 32 */ + if (pmdev->page < 12) { + s->seq_config[pmdev->page] = pmbus_receive32(pmdev); + } + break; + + case MAX34451_MFR_TEMP_SENSOR_CONFIG: /* R/W word */ + if (15 < pmdev->page && pmdev->page < 21) { + s->temp_sensor_config[pmdev->page % 16] + = pmbus_receive16(pmdev); + } + break; + + case MAX34451_MFR_CRC: /* R/W word */ + s->crc = pmbus_receive16(pmdev); + break; + + case MAX34451_MFR_NV_FAULT_LOG: + case MAX34451_MFR_FW_SERIAL: + case MAX34451_MFR_IOUT_AVG: + /* Read only commands */ + pmdev->pages[index].status_word |= PMBUS_STATUS_CML; + pmdev->pages[index].status_cml |= PB_CML_FAULT_INVALID_DATA; + qemu_log_mask(LOG_GUEST_ERROR, + "%s: writing to read-only register 0x%02x\n", + __func__, pmdev->code); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: writing to unsupported register: 0x%02x\n", + __func__, pmdev->code); + break; + } + + return 0; +} + +static void max34451_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + visit_type_uint16(v, name, (uint16_t *)opaque, errp); +} + +static void max34451_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MAX34451State *s = MAX34451(obj); + uint16_t *internal = opaque; + uint16_t value; + if (!visit_type_uint16(v, name, &value, errp)) { + return; + } + + *internal = value; + max34451_check_limits(s); +} + +/* used to init uint16_t arrays */ +static inline void *memset_word(void *s, uint16_t c, size_t n) +{ + size_t i; + uint16_t *p = s; + + for (i = 0; i < n; i++) { + p[i] = c; + } + + return s; +} + +static void max34451_exit_reset(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + MAX34451State *s = MAX34451(obj); + pmdev->capability = DEFAULT_CAPABILITY; + + for (int i = 0; i < MAX34451_NUM_PAGES; i++) { + pmdev->pages[i].operation = DEFAULT_OP_ON; + pmdev->pages[i].on_off_config = DEFAULT_ON_OFF_CONFIG; + pmdev->pages[i].revision = 0x11; + pmdev->pages[i].vout_mode = DEFAULT_VOUT_MODE; + } + + for (int i = 0; i < MAX34451_NUM_PWR_DEVICES; i++) { + pmdev->pages[i].vout_scale_monitor = DEFAULT_SCALE; + pmdev->pages[i].vout_ov_fault_limit = DEFAULT_OV_LIMIT; + pmdev->pages[i].vout_ov_warn_limit = DEFAULT_OV_LIMIT; + pmdev->pages[i].iout_oc_warn_limit = DEFAULT_OC_LIMIT; + pmdev->pages[i].iout_oc_fault_limit = DEFAULT_OC_LIMIT; + } + + for (int i = 0; i < MAX34451_NUM_MARGINED_PSU; i++) { + pmdev->pages[i].ton_max_fault_limit = DEFAULT_TON_FAULT_LIMIT; + } + + for (int i = 16; i < MAX34451_NUM_TEMP_DEVICES + 16; i++) { + pmdev->pages[i].read_temperature_1 = DEFAULT_TEMPERATURE; + pmdev->pages[i].ot_warn_limit = DEFAULT_OT_LIMIT; + pmdev->pages[i].ot_fault_limit = DEFAULT_OT_LIMIT; + } + + memset_word(s->ton_max_fault_limit, DEFAULT_TON_FAULT_LIMIT, + MAX34451_NUM_MARGINED_PSU); + memset_word(s->channel_config, DEFAULT_CHANNEL_CONFIG, + MAX34451_NUM_PWR_DEVICES); + memset_word(s->vout_min, DEFAULT_VMIN, MAX34451_NUM_PWR_DEVICES); + + s->mfr_location = DEFAULT_TEXT; + s->mfr_date = DEFAULT_TEXT; + s->mfr_serial = DEFAULT_TEXT; +} + +static const VMStateDescription vmstate_max34451 = { + .name = TYPE_MAX34451, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]){ + VMSTATE_PMBUS_DEVICE(parent, MAX34451State), + VMSTATE_UINT16_ARRAY(power_good_on, MAX34451State, + MAX34451_NUM_PWR_DEVICES), + VMSTATE_UINT16_ARRAY(power_good_off, MAX34451State, + MAX34451_NUM_PWR_DEVICES), + VMSTATE_UINT16_ARRAY(ton_delay, MAX34451State, + MAX34451_NUM_MARGINED_PSU), + VMSTATE_UINT16_ARRAY(ton_max_fault_limit, MAX34451State, + MAX34451_NUM_MARGINED_PSU), + VMSTATE_UINT16_ARRAY(toff_delay, MAX34451State, + MAX34451_NUM_MARGINED_PSU), + VMSTATE_UINT8_ARRAY(status_mfr_specific, MAX34451State, + MAX34451_NUM_PWR_DEVICES), + VMSTATE_UINT64(mfr_location, MAX34451State), + VMSTATE_UINT64(mfr_date, MAX34451State), + VMSTATE_UINT64(mfr_serial, MAX34451State), + VMSTATE_UINT16(mfr_mode, MAX34451State), + VMSTATE_UINT32_ARRAY(psen_config, MAX34451State, + MAX34451_NUM_MARGINED_PSU), + VMSTATE_UINT16_ARRAY(vout_peak, MAX34451State, + MAX34451_NUM_PWR_DEVICES), + VMSTATE_UINT16_ARRAY(iout_peak, MAX34451State, + MAX34451_NUM_PWR_DEVICES), + VMSTATE_UINT16_ARRAY(temperature_peak, MAX34451State, + MAX34451_NUM_TEMP_DEVICES), + VMSTATE_UINT16_ARRAY(vout_min, MAX34451State, MAX34451_NUM_PWR_DEVICES), + VMSTATE_UINT16(nv_log_config, MAX34451State), + VMSTATE_UINT32_ARRAY(fault_response, MAX34451State, + MAX34451_NUM_PWR_DEVICES), + VMSTATE_UINT16(fault_retry, MAX34451State), + VMSTATE_UINT32(fault_log, MAX34451State), + VMSTATE_UINT32(time_count, MAX34451State), + VMSTATE_UINT16_ARRAY(margin_config, MAX34451State, + MAX34451_NUM_MARGINED_PSU), + VMSTATE_UINT16(fw_serial, MAX34451State), + VMSTATE_UINT16_ARRAY(iout_avg, MAX34451State, MAX34451_NUM_PWR_DEVICES), + VMSTATE_UINT16_ARRAY(channel_config, MAX34451State, + MAX34451_NUM_PWR_DEVICES), + VMSTATE_UINT16_ARRAY(ton_seq_max, MAX34451State, + MAX34451_NUM_MARGINED_PSU), + VMSTATE_UINT32_ARRAY(pwm_config, MAX34451State, + MAX34451_NUM_MARGINED_PSU), + VMSTATE_UINT32_ARRAY(seq_config, MAX34451State, + MAX34451_NUM_MARGINED_PSU), + VMSTATE_UINT16_ARRAY(temp_sensor_config, MAX34451State, + MAX34451_NUM_TEMP_DEVICES), + VMSTATE_UINT16(store_single, MAX34451State), + VMSTATE_UINT16(crc, MAX34451State), + VMSTATE_END_OF_LIST() + } +}; + +static void max34451_init(Object *obj) +{ + PMBusDevice *pmdev = PMBUS_DEVICE(obj); + uint64_t psu_flags = PB_HAS_VOUT | PB_HAS_IOUT | PB_HAS_VOUT_MODE | + PB_HAS_IOUT_GAIN; + + for (int i = 0; i < MAX34451_NUM_PWR_DEVICES; i++) { + pmbus_page_config(pmdev, i, psu_flags); + } + + for (int i = 0; i < MAX34451_NUM_MARGINED_PSU; i++) { + pmbus_page_config(pmdev, i, psu_flags | PB_HAS_VOUT_MARGIN); + } + + for (int i = 16; i < MAX34451_NUM_TEMP_DEVICES + 16; i++) { + pmbus_page_config(pmdev, i, PB_HAS_TEMPERATURE | PB_HAS_VOUT_MODE); + } + + /* get and set the voltage in millivolts, max is 32767 mV */ + for (int i = 0; i < MAX34451_NUM_PWR_DEVICES; i++) { + object_property_add(obj, "vout[*]", "uint16", + max34451_get, + max34451_set, NULL, &pmdev->pages[i].read_vout); + } + + /* + * get and set the temperature of the internal temperature sensor in + * centidegrees Celcius i.e.: 2500 -> 25.00 C, max is 327.67 C + */ + for (int i = 0; i < MAX34451_NUM_TEMP_DEVICES; i++) { + object_property_add(obj, "temperature[*]", "uint16", + max34451_get, + max34451_set, + NULL, + &pmdev->pages[i + 16].read_temperature_1); + } + +} + +static void max34451_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + PMBusDeviceClass *k = PMBUS_DEVICE_CLASS(klass); + dc->desc = "Maxim MAX34451 16-Channel V/I monitor"; + dc->vmsd = &vmstate_max34451; + k->write_data = max34451_write_data; + k->receive_byte = max34451_read_byte; + k->device_num_pages = MAX34451_NUM_PAGES; + rc->phases.exit = max34451_exit_reset; +} + +static const TypeInfo max34451_info = { + .name = TYPE_MAX34451, + .parent = TYPE_PMBUS_DEVICE, + .instance_size = sizeof(MAX34451State), + .instance_init = max34451_init, + .class_init = max34451_class_init, +}; + +static void max34451_register_types(void) +{ + type_register_static(&max34451_info); +} + +type_init(max34451_register_types) diff --git a/hw/sensor/meson.build b/hw/sensor/meson.build new file mode 100644 index 000000000..059c4ca93 --- /dev/null +++ b/hw/sensor/meson.build @@ -0,0 +1,6 @@ +softmmu_ss.add(when: 'CONFIG_TMP105', if_true: files('tmp105.c')) +softmmu_ss.add(when: 'CONFIG_TMP421', if_true: files('tmp421.c')) +softmmu_ss.add(when: 'CONFIG_DPS310', if_true: files('dps310.c')) +softmmu_ss.add(when: 'CONFIG_EMC141X', if_true: files('emc141x.c')) +softmmu_ss.add(when: 'CONFIG_ADM1272', if_true: files('adm1272.c')) +softmmu_ss.add(when: 'CONFIG_MAX34451', if_true: files('max34451.c')) diff --git a/hw/sensor/tmp105.c b/hw/sensor/tmp105.c new file mode 100644 index 000000000..205644948 --- /dev/null +++ b/hw/sensor/tmp105.c @@ -0,0 +1,328 @@ +/* + * Texas Instruments TMP105 temperature sensor. + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "hw/irq.h" +#include "migration/vmstate.h" +#include "hw/sensor/tmp105.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/module.h" + +static void tmp105_interrupt_update(TMP105State *s) +{ + qemu_set_irq(s->pin, s->alarm ^ ((~s->config >> 2) & 1)); /* POL */ +} + +static void tmp105_alarm_update(TMP105State *s) +{ + if ((s->config >> 0) & 1) { /* SD */ + if ((s->config >> 7) & 1) /* OS */ + s->config &= ~(1 << 7); /* OS */ + else + return; + } + + if (s->config >> 1 & 1) { + /* + * TM == 1 : Interrupt mode. We signal Alert when the + * temperature rises above T_high, and expect the guest to clear + * it (eg by reading a device register). + */ + if (s->detect_falling) { + if (s->temperature < s->limit[0]) { + s->alarm = 1; + s->detect_falling = false; + } + } else { + if (s->temperature >= s->limit[1]) { + s->alarm = 1; + s->detect_falling = true; + } + } + } else { + /* + * TM == 0 : Comparator mode. We signal Alert when the temperature + * rises above T_high, and stop signalling it when the temperature + * falls below T_low. + */ + if (s->detect_falling) { + if (s->temperature < s->limit[0]) { + s->alarm = 0; + s->detect_falling = false; + } + } else { + if (s->temperature >= s->limit[1]) { + s->alarm = 1; + s->detect_falling = true; + } + } + } + + tmp105_interrupt_update(s); +} + +static void tmp105_get_temperature(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + TMP105State *s = TMP105(obj); + int64_t value = s->temperature * 1000 / 256; + + visit_type_int(v, name, &value, errp); +} + +/* Units are 0.001 centigrades relative to 0 C. s->temperature is 8.8 + * fixed point, so units are 1/256 centigrades. A simple ratio will do. + */ +static void tmp105_set_temperature(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + TMP105State *s = TMP105(obj); + int64_t temp; + + if (!visit_type_int(v, name, &temp, errp)) { + return; + } + if (temp >= 128000 || temp < -128000) { + error_setg(errp, "value %" PRId64 ".%03" PRIu64 " C is out of range", + temp / 1000, temp % 1000); + return; + } + + s->temperature = (int16_t) (temp * 256 / 1000); + + tmp105_alarm_update(s); +} + +static const int tmp105_faultq[4] = { 1, 2, 4, 6 }; + +static void tmp105_read(TMP105State *s) +{ + s->len = 0; + + if ((s->config >> 1) & 1) { /* TM */ + s->alarm = 0; + tmp105_interrupt_update(s); + } + + switch (s->pointer & 3) { + case TMP105_REG_TEMPERATURE: + s->buf[s->len ++] = (((uint16_t) s->temperature) >> 8); + s->buf[s->len ++] = (((uint16_t) s->temperature) >> 0) & + (0xf0 << ((~s->config >> 5) & 3)); /* R */ + break; + + case TMP105_REG_CONFIG: + s->buf[s->len ++] = s->config; + break; + + case TMP105_REG_T_LOW: + s->buf[s->len ++] = ((uint16_t) s->limit[0]) >> 8; + s->buf[s->len ++] = ((uint16_t) s->limit[0]) >> 0; + break; + + case TMP105_REG_T_HIGH: + s->buf[s->len ++] = ((uint16_t) s->limit[1]) >> 8; + s->buf[s->len ++] = ((uint16_t) s->limit[1]) >> 0; + break; + } +} + +static void tmp105_write(TMP105State *s) +{ + switch (s->pointer & 3) { + case TMP105_REG_TEMPERATURE: + break; + + case TMP105_REG_CONFIG: + if (s->buf[0] & ~s->config & (1 << 0)) /* SD */ + printf("%s: TMP105 shutdown\n", __func__); + s->config = s->buf[0]; + s->faults = tmp105_faultq[(s->config >> 3) & 3]; /* F */ + tmp105_alarm_update(s); + break; + + case TMP105_REG_T_LOW: + case TMP105_REG_T_HIGH: + if (s->len >= 3) + s->limit[s->pointer & 1] = (int16_t) + ((((uint16_t) s->buf[0]) << 8) | s->buf[1]); + tmp105_alarm_update(s); + break; + } +} + +static uint8_t tmp105_rx(I2CSlave *i2c) +{ + TMP105State *s = TMP105(i2c); + + if (s->len < 2) { + return s->buf[s->len ++]; + } else { + return 0xff; + } +} + +static int tmp105_tx(I2CSlave *i2c, uint8_t data) +{ + TMP105State *s = TMP105(i2c); + + if (s->len == 0) { + s->pointer = data; + s->len++; + } else { + if (s->len <= 2) { + s->buf[s->len - 1] = data; + } + s->len++; + tmp105_write(s); + } + + return 0; +} + +static int tmp105_event(I2CSlave *i2c, enum i2c_event event) +{ + TMP105State *s = TMP105(i2c); + + if (event == I2C_START_RECV) { + tmp105_read(s); + } + + s->len = 0; + return 0; +} + +static int tmp105_post_load(void *opaque, int version_id) +{ + TMP105State *s = opaque; + + s->faults = tmp105_faultq[(s->config >> 3) & 3]; /* F */ + + tmp105_interrupt_update(s); + return 0; +} + +static bool detect_falling_needed(void *opaque) +{ + TMP105State *s = opaque; + + /* + * We only need to migrate the detect_falling bool if it's set; + * for migration from older machines we assume that it is false + * (ie temperature is not out of range). + */ + return s->detect_falling; +} + +static const VMStateDescription vmstate_tmp105_detect_falling = { + .name = "TMP105/detect-falling", + .version_id = 1, + .minimum_version_id = 1, + .needed = detect_falling_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(detect_falling, TMP105State), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_tmp105 = { + .name = "TMP105", + .version_id = 0, + .minimum_version_id = 0, + .post_load = tmp105_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(len, TMP105State), + VMSTATE_UINT8_ARRAY(buf, TMP105State, 2), + VMSTATE_UINT8(pointer, TMP105State), + VMSTATE_UINT8(config, TMP105State), + VMSTATE_INT16(temperature, TMP105State), + VMSTATE_INT16_ARRAY(limit, TMP105State, 2), + VMSTATE_UINT8(alarm, TMP105State), + VMSTATE_I2C_SLAVE(i2c, TMP105State), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_tmp105_detect_falling, + NULL + } +}; + +static void tmp105_reset(I2CSlave *i2c) +{ + TMP105State *s = TMP105(i2c); + + s->temperature = 0; + s->pointer = 0; + s->config = 0; + s->faults = tmp105_faultq[(s->config >> 3) & 3]; + s->alarm = 0; + s->detect_falling = false; + + s->limit[0] = 0x4b00; /* T_LOW, 75 degrees C */ + s->limit[1] = 0x5000; /* T_HIGH, 80 degrees C */ + + tmp105_interrupt_update(s); +} + +static void tmp105_realize(DeviceState *dev, Error **errp) +{ + I2CSlave *i2c = I2C_SLAVE(dev); + TMP105State *s = TMP105(i2c); + + qdev_init_gpio_out(&i2c->qdev, &s->pin, 1); + + tmp105_reset(&s->i2c); +} + +static void tmp105_initfn(Object *obj) +{ + object_property_add(obj, "temperature", "int", + tmp105_get_temperature, + tmp105_set_temperature, NULL, NULL); +} + +static void tmp105_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + + dc->realize = tmp105_realize; + k->event = tmp105_event; + k->recv = tmp105_rx; + k->send = tmp105_tx; + dc->vmsd = &vmstate_tmp105; +} + +static const TypeInfo tmp105_info = { + .name = TYPE_TMP105, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(TMP105State), + .instance_init = tmp105_initfn, + .class_init = tmp105_class_init, +}; + +static void tmp105_register_types(void) +{ + type_register_static(&tmp105_info); +} + +type_init(tmp105_register_types) diff --git a/hw/sensor/tmp421.c b/hw/sensor/tmp421.c new file mode 100644 index 000000000..a3db57dcb --- /dev/null +++ b/hw/sensor/tmp421.c @@ -0,0 +1,391 @@ +/* + * Texas Instruments TMP421 temperature sensor. + * + * Copyright (c) 2016 IBM Corporation. + * + * Largely inspired by : + * + * Texas Instruments TMP105 temperature sensor. + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/i2c/i2c.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* Manufacturer / Device ID's */ +#define TMP421_MANUFACTURER_ID 0x55 +#define TMP421_DEVICE_ID 0x21 +#define TMP422_DEVICE_ID 0x22 +#define TMP423_DEVICE_ID 0x23 + +typedef struct DeviceInfo { + int model; + const char *name; +} DeviceInfo; + +static const DeviceInfo devices[] = { + { TMP421_DEVICE_ID, "tmp421" }, + { TMP422_DEVICE_ID, "tmp422" }, + { TMP423_DEVICE_ID, "tmp423" }, +}; + +struct TMP421State { + /*< private >*/ + I2CSlave i2c; + /*< public >*/ + + int16_t temperature[4]; + + uint8_t status; + uint8_t config[2]; + uint8_t rate; + + uint8_t len; + uint8_t buf[2]; + uint8_t pointer; + +}; + +struct TMP421Class { + I2CSlaveClass parent_class; + DeviceInfo *dev; +}; + +#define TYPE_TMP421 "tmp421-generic" +OBJECT_DECLARE_TYPE(TMP421State, TMP421Class, TMP421) + + +/* the TMP421 registers */ +#define TMP421_STATUS_REG 0x08 +#define TMP421_STATUS_BUSY (1 << 7) +#define TMP421_CONFIG_REG_1 0x09 +#define TMP421_CONFIG_RANGE (1 << 2) +#define TMP421_CONFIG_SHUTDOWN (1 << 6) +#define TMP421_CONFIG_REG_2 0x0A +#define TMP421_CONFIG_RC (1 << 2) +#define TMP421_CONFIG_LEN (1 << 3) +#define TMP421_CONFIG_REN (1 << 4) +#define TMP421_CONFIG_REN2 (1 << 5) +#define TMP421_CONFIG_REN3 (1 << 6) + +#define TMP421_CONVERSION_RATE_REG 0x0B +#define TMP421_ONE_SHOT 0x0F + +#define TMP421_RESET 0xFC +#define TMP421_MANUFACTURER_ID_REG 0xFE +#define TMP421_DEVICE_ID_REG 0xFF + +#define TMP421_TEMP_MSB0 0x00 +#define TMP421_TEMP_MSB1 0x01 +#define TMP421_TEMP_MSB2 0x02 +#define TMP421_TEMP_MSB3 0x03 +#define TMP421_TEMP_LSB0 0x10 +#define TMP421_TEMP_LSB1 0x11 +#define TMP421_TEMP_LSB2 0x12 +#define TMP421_TEMP_LSB3 0x13 + +static const int32_t mins[2] = { -40000, -55000 }; +static const int32_t maxs[2] = { 127000, 150000 }; + +static void tmp421_get_temperature(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + TMP421State *s = TMP421(obj); + bool ext_range = (s->config[0] & TMP421_CONFIG_RANGE); + int offset = ext_range * 64 * 256; + int64_t value; + int tempid; + + if (sscanf(name, "temperature%d", &tempid) != 1) { + error_setg(errp, "error reading %s: %s", name, g_strerror(errno)); + return; + } + + if (tempid >= 4 || tempid < 0) { + error_setg(errp, "error reading %s", name); + return; + } + + value = ((s->temperature[tempid] - offset) * 1000 + 128) / 256; + + visit_type_int(v, name, &value, errp); +} + +/* Units are 0.001 centigrades relative to 0 C. s->temperature is 8.8 + * fixed point, so units are 1/256 centigrades. A simple ratio will do. + */ +static void tmp421_set_temperature(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + TMP421State *s = TMP421(obj); + int64_t temp; + bool ext_range = (s->config[0] & TMP421_CONFIG_RANGE); + int offset = ext_range * 64 * 256; + int tempid; + + if (!visit_type_int(v, name, &temp, errp)) { + return; + } + + if (temp >= maxs[ext_range] || temp < mins[ext_range]) { + error_setg(errp, "value %" PRId64 ".%03" PRIu64 " C is out of range", + temp / 1000, temp % 1000); + return; + } + + if (sscanf(name, "temperature%d", &tempid) != 1) { + error_setg(errp, "error reading %s: %s", name, g_strerror(errno)); + return; + } + + if (tempid >= 4 || tempid < 0) { + error_setg(errp, "error reading %s", name); + return; + } + + s->temperature[tempid] = (int16_t) ((temp * 256 - 128) / 1000) + offset; +} + +static void tmp421_read(TMP421State *s) +{ + TMP421Class *sc = TMP421_GET_CLASS(s); + + s->len = 0; + + switch (s->pointer) { + case TMP421_MANUFACTURER_ID_REG: + s->buf[s->len++] = TMP421_MANUFACTURER_ID; + break; + case TMP421_DEVICE_ID_REG: + s->buf[s->len++] = sc->dev->model; + break; + case TMP421_CONFIG_REG_1: + s->buf[s->len++] = s->config[0]; + break; + case TMP421_CONFIG_REG_2: + s->buf[s->len++] = s->config[1]; + break; + case TMP421_CONVERSION_RATE_REG: + s->buf[s->len++] = s->rate; + break; + case TMP421_STATUS_REG: + s->buf[s->len++] = s->status; + break; + + /* FIXME: check for channel enablement in config registers */ + case TMP421_TEMP_MSB0: + s->buf[s->len++] = (((uint16_t) s->temperature[0]) >> 8); + s->buf[s->len++] = (((uint16_t) s->temperature[0]) >> 0) & 0xf0; + break; + case TMP421_TEMP_MSB1: + s->buf[s->len++] = (((uint16_t) s->temperature[1]) >> 8); + s->buf[s->len++] = (((uint16_t) s->temperature[1]) >> 0) & 0xf0; + break; + case TMP421_TEMP_MSB2: + s->buf[s->len++] = (((uint16_t) s->temperature[2]) >> 8); + s->buf[s->len++] = (((uint16_t) s->temperature[2]) >> 0) & 0xf0; + break; + case TMP421_TEMP_MSB3: + s->buf[s->len++] = (((uint16_t) s->temperature[3]) >> 8); + s->buf[s->len++] = (((uint16_t) s->temperature[3]) >> 0) & 0xf0; + break; + case TMP421_TEMP_LSB0: + s->buf[s->len++] = (((uint16_t) s->temperature[0]) >> 0) & 0xf0; + break; + case TMP421_TEMP_LSB1: + s->buf[s->len++] = (((uint16_t) s->temperature[1]) >> 0) & 0xf0; + break; + case TMP421_TEMP_LSB2: + s->buf[s->len++] = (((uint16_t) s->temperature[2]) >> 0) & 0xf0; + break; + case TMP421_TEMP_LSB3: + s->buf[s->len++] = (((uint16_t) s->temperature[3]) >> 0) & 0xf0; + break; + } +} + +static void tmp421_reset(I2CSlave *i2c); + +static void tmp421_write(TMP421State *s) +{ + switch (s->pointer) { + case TMP421_CONVERSION_RATE_REG: + s->rate = s->buf[0]; + break; + case TMP421_CONFIG_REG_1: + s->config[0] = s->buf[0]; + break; + case TMP421_CONFIG_REG_2: + s->config[1] = s->buf[0]; + break; + case TMP421_RESET: + tmp421_reset(I2C_SLAVE(s)); + break; + } +} + +static uint8_t tmp421_rx(I2CSlave *i2c) +{ + TMP421State *s = TMP421(i2c); + + if (s->len < 2) { + return s->buf[s->len++]; + } else { + return 0xff; + } +} + +static int tmp421_tx(I2CSlave *i2c, uint8_t data) +{ + TMP421State *s = TMP421(i2c); + + if (s->len == 0) { + /* first byte is the register pointer for a read or write + * operation */ + s->pointer = data; + s->len++; + } else if (s->len == 1) { + /* second byte is the data to write. The device only supports + * one byte writes */ + s->buf[0] = data; + tmp421_write(s); + } + + return 0; +} + +static int tmp421_event(I2CSlave *i2c, enum i2c_event event) +{ + TMP421State *s = TMP421(i2c); + + if (event == I2C_START_RECV) { + tmp421_read(s); + } + + s->len = 0; + return 0; +} + +static const VMStateDescription vmstate_tmp421 = { + .name = "TMP421", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(len, TMP421State), + VMSTATE_UINT8_ARRAY(buf, TMP421State, 2), + VMSTATE_UINT8(pointer, TMP421State), + VMSTATE_UINT8_ARRAY(config, TMP421State, 2), + VMSTATE_UINT8(status, TMP421State), + VMSTATE_UINT8(rate, TMP421State), + VMSTATE_INT16_ARRAY(temperature, TMP421State, 4), + VMSTATE_I2C_SLAVE(i2c, TMP421State), + VMSTATE_END_OF_LIST() + } +}; + +static void tmp421_reset(I2CSlave *i2c) +{ + TMP421State *s = TMP421(i2c); + TMP421Class *sc = TMP421_GET_CLASS(s); + + memset(s->temperature, 0, sizeof(s->temperature)); + s->pointer = 0; + + s->config[0] = 0; /* TMP421_CONFIG_RANGE */ + + /* resistance correction and channel enablement */ + switch (sc->dev->model) { + case TMP421_DEVICE_ID: + s->config[1] = 0x1c; + break; + case TMP422_DEVICE_ID: + s->config[1] = 0x3c; + break; + case TMP423_DEVICE_ID: + s->config[1] = 0x7c; + break; + } + + s->rate = 0x7; /* 8Hz */ + s->status = 0; +} + +static void tmp421_realize(DeviceState *dev, Error **errp) +{ + TMP421State *s = TMP421(dev); + + tmp421_reset(&s->i2c); +} + +static void tmp421_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); + TMP421Class *sc = TMP421_CLASS(klass); + + dc->realize = tmp421_realize; + k->event = tmp421_event; + k->recv = tmp421_rx; + k->send = tmp421_tx; + dc->vmsd = &vmstate_tmp421; + sc->dev = (DeviceInfo *) data; + + object_class_property_add(klass, "temperature0", "int", + tmp421_get_temperature, + tmp421_set_temperature, NULL, NULL); + object_class_property_add(klass, "temperature1", "int", + tmp421_get_temperature, + tmp421_set_temperature, NULL, NULL); + object_class_property_add(klass, "temperature2", "int", + tmp421_get_temperature, + tmp421_set_temperature, NULL, NULL); + object_class_property_add(klass, "temperature3", "int", + tmp421_get_temperature, + tmp421_set_temperature, NULL, NULL); +} + +static const TypeInfo tmp421_info = { + .name = TYPE_TMP421, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(TMP421State), + .class_size = sizeof(TMP421Class), + .abstract = true, +}; + +static void tmp421_register_types(void) +{ + int i; + + type_register_static(&tmp421_info); + for (i = 0; i < ARRAY_SIZE(devices); ++i) { + TypeInfo ti = { + .name = devices[i].name, + .parent = TYPE_TMP421, + .class_init = tmp421_class_init, + .class_data = (void *) &devices[i], + }; + type_register(&ti); + } +} + +type_init(tmp421_register_types) diff --git a/hw/sh4/Kconfig b/hw/sh4/Kconfig new file mode 100644 index 000000000..ab733a3f7 --- /dev/null +++ b/hw/sh4/Kconfig @@ -0,0 +1,24 @@ +config R2D + bool + imply PCI_DEVICES + imply TEST_DEVICES + imply RTL8139_PCI + select I82378 if TEST_DEVICES + select IDE_MMIO + select PFLASH_CFI02 + select USB_OHCI_PCI + select PCI + select SM501 + select SH7750 + select SH_PCI + +config SHIX + bool + select SH7750 + select TC58128 + +config SH7750 + bool + select SH_INTC + select SH_SCI + select SH_TIMER diff --git a/hw/sh4/meson.build b/hw/sh4/meson.build new file mode 100644 index 000000000..424d5674d --- /dev/null +++ b/hw/sh4/meson.build @@ -0,0 +1,9 @@ +sh4_ss = ss.source_set() +sh4_ss.add(files( + 'sh7750.c', + 'sh7750_regnames.c', +)) +sh4_ss.add(when: 'CONFIG_R2D', if_true: files('r2d.c')) +sh4_ss.add(when: 'CONFIG_SHIX', if_true: files('shix.c')) + +hw_arch += {'sh4': sh4_ss} diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c new file mode 100644 index 000000000..72759413f --- /dev/null +++ b/hw/sh4/r2d.c @@ -0,0 +1,380 @@ +/* + * Renesas SH7751R R2D-PLUS emulation + * + * Copyright (c) 2007 Magnus Damm + * Copyright (c) 2008 Paul Mundt + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "hw/sh4/sh.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "net/net.h" +#include "sh7750_regs.h" +#include "hw/ide.h" +#include "hw/irq.h" +#include "hw/loader.h" +#include "hw/usb.h" +#include "hw/block/flash.h" + +#define FLASH_BASE 0x00000000 +#define FLASH_SIZE (16 * MiB) + +#define SDRAM_BASE 0x0c000000 /* Physical location of SDRAM: Area 3 */ +#define SDRAM_SIZE 0x04000000 + +#define SM501_VRAM_SIZE 0x800000 + +#define BOOT_PARAMS_OFFSET 0x0010000 +/* CONFIG_BOOT_LINK_OFFSET of Linux kernel */ +#define LINUX_LOAD_OFFSET 0x0800000 +#define INITRD_LOAD_OFFSET 0x1800000 + +#define PA_IRLMSK 0x00 +#define PA_POWOFF 0x30 +#define PA_VERREG 0x32 +#define PA_OUTPORT 0x36 + +typedef struct { + uint16_t bcr; + uint16_t irlmsk; + uint16_t irlmon; + uint16_t cfctl; + uint16_t cfpow; + uint16_t dispctl; + uint16_t sdmpow; + uint16_t rtcce; + uint16_t pcicd; + uint16_t voyagerrts; + uint16_t cfrst; + uint16_t admrts; + uint16_t extrst; + uint16_t cfcdintclr; + uint16_t keyctlclr; + uint16_t pad0; + uint16_t pad1; + uint16_t verreg; + uint16_t inport; + uint16_t outport; + uint16_t bverreg; + +/* output pin */ + qemu_irq irl; + MemoryRegion iomem; +} r2d_fpga_t; + +enum r2d_fpga_irq { + PCI_INTD, CF_IDE, CF_CD, PCI_INTC, SM501, KEY, RTC_A, RTC_T, + SDCARD, PCI_INTA, PCI_INTB, EXT, TP, + NR_IRQS +}; + +static const struct { short irl; uint16_t msk; } irqtab[NR_IRQS] = { + [CF_IDE] = { 1, 1 << 9 }, + [CF_CD] = { 2, 1 << 8 }, + [PCI_INTA] = { 9, 1 << 14 }, + [PCI_INTB] = { 10, 1 << 13 }, + [PCI_INTC] = { 3, 1 << 12 }, + [PCI_INTD] = { 0, 1 << 11 }, + [SM501] = { 4, 1 << 10 }, + [KEY] = { 5, 1 << 6 }, + [RTC_A] = { 6, 1 << 5 }, + [RTC_T] = { 7, 1 << 4 }, + [SDCARD] = { 8, 1 << 7 }, + [EXT] = { 11, 1 << 0 }, + [TP] = { 12, 1 << 15 }, +}; + +static void update_irl(r2d_fpga_t *fpga) +{ + int i, irl = 15; + for (i = 0; i < NR_IRQS; i++) { + if ((fpga->irlmon & fpga->irlmsk & irqtab[i].msk) && + irqtab[i].irl < irl) { + irl = irqtab[i].irl; + } + } + qemu_set_irq(fpga->irl, irl ^ 15); +} + +static void r2d_fpga_irq_set(void *opaque, int n, int level) +{ + r2d_fpga_t *fpga = opaque; + if (level) { + fpga->irlmon |= irqtab[n].msk; + } else { + fpga->irlmon &= ~irqtab[n].msk; + } + update_irl(fpga); +} + +static uint64_t r2d_fpga_read(void *opaque, hwaddr addr, unsigned int size) +{ + r2d_fpga_t *s = opaque; + + switch (addr) { + case PA_IRLMSK: + return s->irlmsk; + case PA_OUTPORT: + return s->outport; + case PA_POWOFF: + return 0x00; + case PA_VERREG: + return 0x10; + } + + return 0; +} + +static void +r2d_fpga_write(void *opaque, hwaddr addr, uint64_t value, unsigned int size) +{ + r2d_fpga_t *s = opaque; + + switch (addr) { + case PA_IRLMSK: + s->irlmsk = value; + update_irl(s); + break; + case PA_OUTPORT: + s->outport = value; + break; + case PA_POWOFF: + if (value & 1) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } + break; + case PA_VERREG: + /* Discard writes */ + break; + } +} + +static const MemoryRegionOps r2d_fpga_ops = { + .read = r2d_fpga_read, + .write = r2d_fpga_write, + .impl.min_access_size = 2, + .impl.max_access_size = 2, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static qemu_irq *r2d_fpga_init(MemoryRegion *sysmem, + hwaddr base, qemu_irq irl) +{ + r2d_fpga_t *s; + + s = g_malloc0(sizeof(r2d_fpga_t)); + + s->irl = irl; + + memory_region_init_io(&s->iomem, NULL, &r2d_fpga_ops, s, "r2d-fpga", 0x40); + memory_region_add_subregion(sysmem, base, &s->iomem); + return qemu_allocate_irqs(r2d_fpga_irq_set, s, NR_IRQS); +} + +typedef struct ResetData { + SuperHCPU *cpu; + uint32_t vector; +} ResetData; + +static void main_cpu_reset(void *opaque) +{ + ResetData *s = (ResetData *)opaque; + CPUSH4State *env = &s->cpu->env; + + cpu_reset(CPU(s->cpu)); + env->pc = s->vector; +} + +static struct QEMU_PACKED +{ + int mount_root_rdonly; + int ramdisk_flags; + int orig_root_dev; + int loader_type; + int initrd_start; + int initrd_size; + + char pad[232]; + + char kernel_cmdline[256] QEMU_NONSTRING; +} boot_params; + +static void r2d_init(MachineState *machine) +{ + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *initrd_filename = machine->initrd_filename; + SuperHCPU *cpu; + CPUSH4State *env; + ResetData *reset_info; + struct SH7750State *s; + MemoryRegion *sdram = g_new(MemoryRegion, 1); + qemu_irq *irq; + DriveInfo *dinfo; + int i; + DeviceState *dev; + SysBusDevice *busdev; + MemoryRegion *address_space_mem = get_system_memory(); + PCIBus *pci_bus; + + cpu = SUPERH_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + + reset_info = g_malloc0(sizeof(ResetData)); + reset_info->cpu = cpu; + reset_info->vector = env->pc; + qemu_register_reset(main_cpu_reset, reset_info); + + /* Allocate memory space */ + memory_region_init_ram(sdram, NULL, "r2d.sdram", SDRAM_SIZE, &error_fatal); + memory_region_add_subregion(address_space_mem, SDRAM_BASE, sdram); + /* Register peripherals */ + s = sh7750_init(cpu, address_space_mem); + irq = r2d_fpga_init(address_space_mem, 0x04000000, sh7750_irl(s)); + + dev = qdev_new("sh_pci"); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci")); + sysbus_mmio_map(busdev, 0, P4ADDR(0x1e200000)); + sysbus_mmio_map(busdev, 1, A7ADDR(0x1e200000)); + sysbus_connect_irq(busdev, 0, irq[PCI_INTA]); + sysbus_connect_irq(busdev, 1, irq[PCI_INTB]); + sysbus_connect_irq(busdev, 2, irq[PCI_INTC]); + sysbus_connect_irq(busdev, 3, irq[PCI_INTD]); + + dev = qdev_new("sysbus-sm501"); + busdev = SYS_BUS_DEVICE(dev); + qdev_prop_set_uint32(dev, "vram-size", SM501_VRAM_SIZE); + qdev_prop_set_uint32(dev, "base", 0x10000000); + qdev_prop_set_chr(dev, "chardev", serial_hd(2)); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0x10000000); + sysbus_mmio_map(busdev, 1, 0x13e00000); + sysbus_connect_irq(busdev, 0, irq[SM501]); + + /* onboard CF (True IDE mode, Master only). */ + dinfo = drive_get(IF_IDE, 0, 0); + dev = qdev_new("mmio-ide"); + busdev = SYS_BUS_DEVICE(dev); + sysbus_connect_irq(busdev, 0, irq[CF_IDE]); + qdev_prop_set_uint32(dev, "shift", 1); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, 0x14001000); + sysbus_mmio_map(busdev, 1, 0x1400080c); + mmio_ide_init_drives(dev, dinfo, NULL); + + /* + * Onboard flash memory + * According to the old board user document in Japanese (under + * NDA) what is referred to as FROM (Area0) is connected via a + * 32-bit bus and CS0 to CN8. The docs mention a Cypress + * S29PL127J60TFI130 chipsset. Per the 'S29PL-J 002-00615 + * Rev. *E' datasheet, it is a 128Mbit NOR parallel flash + * addressable in words of 16bit. + */ + dinfo = drive_get(IF_PFLASH, 0, 0); + pflash_cfi02_register(0x0, "r2d.flash", FLASH_SIZE, + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, + 64 * KiB, 1, 2, 0x0001, 0x227e, 0x2220, 0x2200, + 0x555, 0x2aa, 0); + + /* NIC: rtl8139 on-board, and 2 slots. */ + for (i = 0; i < nb_nics; i++) + pci_nic_init_nofail(&nd_table[i], pci_bus, + "rtl8139", i == 0 ? "2" : NULL); + + /* USB keyboard */ + usb_create_simple(usb_bus_find(-1), "usb-kbd"); + + /* Todo: register on board registers */ + memset(&boot_params, 0, sizeof(boot_params)); + + if (kernel_filename) { + int kernel_size; + + kernel_size = load_image_targphys(kernel_filename, + SDRAM_BASE + LINUX_LOAD_OFFSET, + INITRD_LOAD_OFFSET - LINUX_LOAD_OFFSET); + if (kernel_size < 0) { + error_report("qemu: could not load kernel '%s'", kernel_filename); + exit(1); + } + + /* initialization which should be done by firmware */ + address_space_stl(&address_space_memory, SH7750_BCR1, 1 << 3, + MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 SDRAM */ + address_space_stw(&address_space_memory, SH7750_BCR2, 3 << (3 * 2), + MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 32bit */ + /* Start from P2 area */ + reset_info->vector = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; + } + + if (initrd_filename) { + int initrd_size; + + initrd_size = load_image_targphys(initrd_filename, + SDRAM_BASE + INITRD_LOAD_OFFSET, + SDRAM_SIZE - INITRD_LOAD_OFFSET); + + if (initrd_size < 0) { + error_report("qemu: could not load initrd '%s'", initrd_filename); + exit(1); + } + + /* initialization which should be done by firmware */ + boot_params.loader_type = tswap32(1); + boot_params.initrd_start = tswap32(INITRD_LOAD_OFFSET); + boot_params.initrd_size = tswap32(initrd_size); + } + + if (kernel_cmdline) { + /* + * I see no evidence that this .kernel_cmdline buffer requires + * NUL-termination, so using strncpy should be ok. + */ + strncpy(boot_params.kernel_cmdline, kernel_cmdline, + sizeof(boot_params.kernel_cmdline)); + } + + rom_add_blob_fixed("boot_params", &boot_params, sizeof(boot_params), + SDRAM_BASE + BOOT_PARAMS_OFFSET); +} + +static void r2d_machine_init(MachineClass *mc) +{ + mc->desc = "r2d-plus board"; + mc->init = r2d_init; + mc->block_default_type = IF_IDE; + mc->default_cpu_type = TYPE_SH7751R_CPU; +} + +DEFINE_MACHINE("r2d", r2d_machine_init) diff --git a/hw/sh4/sh7750.c b/hw/sh4/sh7750.c new file mode 100644 index 000000000..43dfb6497 --- /dev/null +++ b/hw/sh4/sh7750.c @@ -0,0 +1,904 @@ +/* + * SH7750 device + * + * Copyright (c) 2007 Magnus Damm + * Copyright (c) 2005 Samuel Tardieu + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/sh4/sh.h" +#include "sysemu/sysemu.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "sh7750_regs.h" +#include "sh7750_regnames.h" +#include "hw/sh4/sh_intc.h" +#include "hw/timer/tmu012.h" +#include "exec/exec-all.h" +#include "trace.h" + +#define NB_DEVICES 4 + +typedef struct SH7750State { + MemoryRegion iomem; + MemoryRegion iomem_1f0; + MemoryRegion iomem_ff0; + MemoryRegion iomem_1f8; + MemoryRegion iomem_ff8; + MemoryRegion iomem_1fc; + MemoryRegion iomem_ffc; + MemoryRegion mmct_iomem; + /* CPU */ + SuperHCPU *cpu; + /* Peripheral frequency in Hz */ + uint32_t periph_freq; + /* SDRAM controller */ + uint32_t bcr1; + uint16_t bcr2; + uint16_t bcr3; + uint32_t bcr4; + uint16_t rfcr; + /* PCMCIA controller */ + uint16_t pcr; + /* IO ports */ + uint16_t gpioic; + uint32_t pctra; + uint32_t pctrb; + uint16_t portdira; /* Cached */ + uint16_t portpullupa; /* Cached */ + uint16_t portdirb; /* Cached */ + uint16_t portpullupb; /* Cached */ + uint16_t pdtra; + uint16_t pdtrb; + uint16_t periph_pdtra; /* Imposed by the peripherals */ + uint16_t periph_portdira; /* Direction seen from the peripherals */ + uint16_t periph_pdtrb; /* Imposed by the peripherals */ + uint16_t periph_portdirb; /* Direction seen from the peripherals */ + sh7750_io_device *devices[NB_DEVICES]; /* External peripherals */ + + /* Cache */ + uint32_t ccr; + + struct intc_desc intc; +} SH7750State; + +static inline int has_bcr3_and_bcr4(SH7750State *s) +{ + return s->cpu->env.features & SH_FEATURE_BCR3_AND_BCR4; +} + +/* + * I/O ports + */ + +int sh7750_register_io_device(SH7750State *s, sh7750_io_device *device) +{ + int i; + + for (i = 0; i < NB_DEVICES; i++) { + if (s->devices[i] == NULL) { + s->devices[i] = device; + return 0; + } + } + return -1; +} + +static uint16_t portdir(uint32_t v) +{ +#define EVENPORTMASK(n) ((v & (1 << ((n) << 1))) >> (n)) + return + EVENPORTMASK(15) | EVENPORTMASK(14) | EVENPORTMASK(13) | + EVENPORTMASK(12) | EVENPORTMASK(11) | EVENPORTMASK(10) | + EVENPORTMASK(9) | EVENPORTMASK(8) | EVENPORTMASK(7) | + EVENPORTMASK(6) | EVENPORTMASK(5) | EVENPORTMASK(4) | + EVENPORTMASK(3) | EVENPORTMASK(2) | EVENPORTMASK(1) | + EVENPORTMASK(0); +} + +static uint16_t portpullup(uint32_t v) +{ +#define ODDPORTMASK(n) ((v & (1 << (((n) << 1) + 1))) >> (n)) + return + ODDPORTMASK(15) | ODDPORTMASK(14) | ODDPORTMASK(13) | + ODDPORTMASK(12) | ODDPORTMASK(11) | ODDPORTMASK(10) | + ODDPORTMASK(9) | ODDPORTMASK(8) | ODDPORTMASK(7) | ODDPORTMASK(6) | + ODDPORTMASK(5) | ODDPORTMASK(4) | ODDPORTMASK(3) | ODDPORTMASK(2) | + ODDPORTMASK(1) | ODDPORTMASK(0); +} + +static uint16_t porta_lines(SH7750State *s) +{ + return (s->portdira & s->pdtra) | /* CPU */ + (s->periph_portdira & s->periph_pdtra) | /* Peripherals */ + (~(s->portdira | s->periph_portdira) & s->portpullupa); /* Pullups */ +} + +static uint16_t portb_lines(SH7750State *s) +{ + return (s->portdirb & s->pdtrb) | /* CPU */ + (s->periph_portdirb & s->periph_pdtrb) | /* Peripherals */ + (~(s->portdirb | s->periph_portdirb) & s->portpullupb); /* Pullups */ +} + +static void gen_port_interrupts(SH7750State *s) +{ + /* XXXXX interrupts not generated */ +} + +static void porta_changed(SH7750State *s, uint16_t prev) +{ + uint16_t currenta, changes; + int i, r = 0; + + currenta = porta_lines(s); + if (currenta == prev) { + return; + } + trace_sh7750_porta(prev, currenta, s->pdtra, s->pctra); + changes = currenta ^ prev; + + for (i = 0; i < NB_DEVICES; i++) { + if (s->devices[i] && (s->devices[i]->portamask_trigger & changes)) { + r |= s->devices[i]->port_change_cb(currenta, portb_lines(s), + &s->periph_pdtra, + &s->periph_portdira, + &s->periph_pdtrb, + &s->periph_portdirb); + } + } + + if (r) { + gen_port_interrupts(s); + } +} + +static void portb_changed(SH7750State *s, uint16_t prev) +{ + uint16_t currentb, changes; + int i, r = 0; + + currentb = portb_lines(s); + if (currentb == prev) { + return; + } + trace_sh7750_portb(prev, currentb, s->pdtrb, s->pctrb); + changes = currentb ^ prev; + + for (i = 0; i < NB_DEVICES; i++) { + if (s->devices[i] && (s->devices[i]->portbmask_trigger & changes)) { + r |= s->devices[i]->port_change_cb(portb_lines(s), currentb, + &s->periph_pdtra, + &s->periph_portdira, + &s->periph_pdtrb, + &s->periph_portdirb); + } + } + + if (r) { + gen_port_interrupts(s); + } +} + +/* + * Memory + */ + +static void error_access(const char *kind, hwaddr addr) +{ + fprintf(stderr, "%s to %s (0x" TARGET_FMT_plx ") not supported\n", + kind, regname(addr), addr); +} + +static void ignore_access(const char *kind, hwaddr addr) +{ + fprintf(stderr, "%s to %s (0x" TARGET_FMT_plx ") ignored\n", + kind, regname(addr), addr); +} + +static uint32_t sh7750_mem_readb(void *opaque, hwaddr addr) +{ + switch (addr) { + default: + error_access("byte read", addr); + abort(); + } +} + +static uint32_t sh7750_mem_readw(void *opaque, hwaddr addr) +{ + SH7750State *s = opaque; + + switch (addr) { + case SH7750_BCR2_A7: + return s->bcr2; + case SH7750_BCR3_A7: + if (!has_bcr3_and_bcr4(s)) { + error_access("word read", addr); + } + return s->bcr3; + case SH7750_FRQCR_A7: + return 0; + case SH7750_PCR_A7: + return s->pcr; + case SH7750_RFCR_A7: + fprintf(stderr, + "Read access to refresh count register, incrementing\n"); + return s->rfcr++; + case SH7750_PDTRA_A7: + return porta_lines(s); + case SH7750_PDTRB_A7: + return portb_lines(s); + case SH7750_RTCOR_A7: + case SH7750_RTCNT_A7: + case SH7750_RTCSR_A7: + ignore_access("word read", addr); + return 0; + default: + error_access("word read", addr); + abort(); + } +} + +static uint32_t sh7750_mem_readl(void *opaque, hwaddr addr) +{ + SH7750State *s = opaque; + SuperHCPUClass *scc; + + switch (addr) { + case SH7750_BCR1_A7: + return s->bcr1; + case SH7750_BCR4_A7: + if (!has_bcr3_and_bcr4(s)) { + error_access("long read", addr); + } + return s->bcr4; + case SH7750_WCR1_A7: + case SH7750_WCR2_A7: + case SH7750_WCR3_A7: + case SH7750_MCR_A7: + ignore_access("long read", addr); + return 0; + case SH7750_MMUCR_A7: + return s->cpu->env.mmucr; + case SH7750_PTEH_A7: + return s->cpu->env.pteh; + case SH7750_PTEL_A7: + return s->cpu->env.ptel; + case SH7750_TTB_A7: + return s->cpu->env.ttb; + case SH7750_TEA_A7: + return s->cpu->env.tea; + case SH7750_TRA_A7: + return s->cpu->env.tra; + case SH7750_EXPEVT_A7: + return s->cpu->env.expevt; + case SH7750_INTEVT_A7: + return s->cpu->env.intevt; + case SH7750_CCR_A7: + return s->ccr; + case 0x1f000030: /* Processor version */ + scc = SUPERH_CPU_GET_CLASS(s->cpu); + return scc->pvr; + case 0x1f000040: /* Cache version */ + scc = SUPERH_CPU_GET_CLASS(s->cpu); + return scc->cvr; + case 0x1f000044: /* Processor revision */ + scc = SUPERH_CPU_GET_CLASS(s->cpu); + return scc->prr; + default: + error_access("long read", addr); + abort(); + } +} + +#define is_in_sdrmx(a, x) (a >= SH7750_SDMR ## x ## _A7 \ + && a <= (SH7750_SDMR ## x ## _A7 + SH7750_SDMR ## x ## _REGNB)) +static void sh7750_mem_writeb(void *opaque, hwaddr addr, + uint32_t mem_value) +{ + + if (is_in_sdrmx(addr, 2) || is_in_sdrmx(addr, 3)) { + ignore_access("byte write", addr); + return; + } + + error_access("byte write", addr); + abort(); +} + +static void sh7750_mem_writew(void *opaque, hwaddr addr, + uint32_t mem_value) +{ + SH7750State *s = opaque; + uint16_t temp; + + switch (addr) { + /* SDRAM controller */ + case SH7750_BCR2_A7: + s->bcr2 = mem_value; + return; + case SH7750_BCR3_A7: + if (!has_bcr3_and_bcr4(s)) { + error_access("word write", addr); + } + s->bcr3 = mem_value; + return; + case SH7750_PCR_A7: + s->pcr = mem_value; + return; + case SH7750_RTCNT_A7: + case SH7750_RTCOR_A7: + case SH7750_RTCSR_A7: + ignore_access("word write", addr); + return; + /* IO ports */ + case SH7750_PDTRA_A7: + temp = porta_lines(s); + s->pdtra = mem_value; + porta_changed(s, temp); + return; + case SH7750_PDTRB_A7: + temp = portb_lines(s); + s->pdtrb = mem_value; + portb_changed(s, temp); + return; + case SH7750_RFCR_A7: + fprintf(stderr, "Write access to refresh count register\n"); + s->rfcr = mem_value; + return; + case SH7750_GPIOIC_A7: + s->gpioic = mem_value; + if (mem_value != 0) { + fprintf(stderr, "I/O interrupts not implemented\n"); + abort(); + } + return; + default: + error_access("word write", addr); + abort(); + } +} + +static void sh7750_mem_writel(void *opaque, hwaddr addr, + uint32_t mem_value) +{ + SH7750State *s = opaque; + uint16_t temp; + + switch (addr) { + /* SDRAM controller */ + case SH7750_BCR1_A7: + s->bcr1 = mem_value; + return; + case SH7750_BCR4_A7: + if (!has_bcr3_and_bcr4(s)) { + error_access("long write", addr); + } + s->bcr4 = mem_value; + return; + case SH7750_WCR1_A7: + case SH7750_WCR2_A7: + case SH7750_WCR3_A7: + case SH7750_MCR_A7: + ignore_access("long write", addr); + return; + /* IO ports */ + case SH7750_PCTRA_A7: + temp = porta_lines(s); + s->pctra = mem_value; + s->portdira = portdir(mem_value); + s->portpullupa = portpullup(mem_value); + porta_changed(s, temp); + return; + case SH7750_PCTRB_A7: + temp = portb_lines(s); + s->pctrb = mem_value; + s->portdirb = portdir(mem_value); + s->portpullupb = portpullup(mem_value); + portb_changed(s, temp); + return; + case SH7750_MMUCR_A7: + if (mem_value & MMUCR_TI) { + cpu_sh4_invalidate_tlb(&s->cpu->env); + } + s->cpu->env.mmucr = mem_value & ~MMUCR_TI; + return; + case SH7750_PTEH_A7: + /* If asid changes, clear all registered tlb entries. */ + if ((s->cpu->env.pteh & 0xff) != (mem_value & 0xff)) { + tlb_flush(CPU(s->cpu)); + } + s->cpu->env.pteh = mem_value; + return; + case SH7750_PTEL_A7: + s->cpu->env.ptel = mem_value; + return; + case SH7750_PTEA_A7: + s->cpu->env.ptea = mem_value & 0x0000000f; + return; + case SH7750_TTB_A7: + s->cpu->env.ttb = mem_value; + return; + case SH7750_TEA_A7: + s->cpu->env.tea = mem_value; + return; + case SH7750_TRA_A7: + s->cpu->env.tra = mem_value & 0x000007ff; + return; + case SH7750_EXPEVT_A7: + s->cpu->env.expevt = mem_value & 0x000007ff; + return; + case SH7750_INTEVT_A7: + s->cpu->env.intevt = mem_value & 0x000007ff; + return; + case SH7750_CCR_A7: + s->ccr = mem_value; + return; + default: + error_access("long write", addr); + abort(); + } +} + +static uint64_t sh7750_mem_readfn(void *opaque, hwaddr addr, unsigned size) +{ + switch (size) { + case 1: + return sh7750_mem_readb(opaque, addr); + case 2: + return sh7750_mem_readw(opaque, addr); + case 4: + return sh7750_mem_readl(opaque, addr); + default: + g_assert_not_reached(); + } +} + +static void sh7750_mem_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + switch (size) { + case 1: + sh7750_mem_writeb(opaque, addr, value); + break; + case 2: + sh7750_mem_writew(opaque, addr, value); + break; + case 4: + sh7750_mem_writel(opaque, addr, value); + break; + default: + g_assert_not_reached(); + } +} + +static const MemoryRegionOps sh7750_mem_ops = { + .read = sh7750_mem_readfn, + .write = sh7750_mem_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* + * sh775x interrupt controller tables for sh_intc.c + * stolen from linux/arch/sh/kernel/cpu/sh4/setup-sh7750.c + */ + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRL_0, IRL_1, IRL_2, IRL_3, IRL_4, IRL_5, IRL_6, IRL_7, + IRL_8, IRL_9, IRL_A, IRL_B, IRL_C, IRL_D, IRL_E, + IRL0, IRL1, IRL2, IRL3, + HUDI, GPIOI, + DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, DMAC_DMTE3, + DMAC_DMTE4, DMAC_DMTE5, DMAC_DMTE6, DMAC_DMTE7, + DMAC_DMAE, + PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3, + TMU3, TMU4, TMU0, TMU1, TMU2_TUNI, TMU2_TICPI, + RTC_ATI, RTC_PRI, RTC_CUI, + SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI, + SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI, + WDT, + REF_RCMI, REF_ROVI, + + /* interrupt groups */ + DMAC, PCIC1, TMU2, RTC, SCI1, SCIF, REF, + /* irl bundle */ + IRL, + + NR_SOURCES, +}; + +static struct intc_vect vectors[] = { + INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460), + INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), + INTC_VECT(RTC_CUI, 0x4c0), + INTC_VECT(SCI1_ERI, 0x4e0), INTC_VECT(SCI1_RXI, 0x500), + INTC_VECT(SCI1_TXI, 0x520), INTC_VECT(SCI1_TEI, 0x540), + INTC_VECT(SCIF_ERI, 0x700), INTC_VECT(SCIF_RXI, 0x720), + INTC_VECT(SCIF_BRI, 0x740), INTC_VECT(SCIF_TXI, 0x760), + INTC_VECT(WDT, 0x560), + INTC_VECT(REF_RCMI, 0x580), INTC_VECT(REF_ROVI, 0x5a0), +}; + +static struct intc_group groups[] = { + INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI), + INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), + INTC_GROUP(SCI1, SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI), + INTC_GROUP(SCIF, SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI), + INTC_GROUP(REF, REF_RCMI, REF_ROVI), +}; + +static struct intc_prio_reg prio_registers[] = { + { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } }, + { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, SCIF, HUDI } }, + { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, + { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { 0, 0, 0, 0, TMU4, TMU3, + PCIC1, PCIC0_PCISERR } }, +}; + +/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */ + +static struct intc_vect vectors_dma4[] = { + INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), + INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), + INTC_VECT(DMAC_DMAE, 0x6c0), +}; + +static struct intc_group groups_dma4[] = { + INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, + DMAC_DMTE3, DMAC_DMAE), +}; + +/* SH7750R and SH7751R both have 8-channel DMA controllers */ + +static struct intc_vect vectors_dma8[] = { + INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), + INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), + INTC_VECT(DMAC_DMTE4, 0x780), INTC_VECT(DMAC_DMTE5, 0x7a0), + INTC_VECT(DMAC_DMTE6, 0x7c0), INTC_VECT(DMAC_DMTE7, 0x7e0), + INTC_VECT(DMAC_DMAE, 0x6c0), +}; + +static struct intc_group groups_dma8[] = { + INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, + DMAC_DMTE3, DMAC_DMTE4, DMAC_DMTE5, + DMAC_DMTE6, DMAC_DMTE7, DMAC_DMAE), +}; + +/* SH7750R, SH7751 and SH7751R all have two extra timer channels */ + +static struct intc_vect vectors_tmu34[] = { + INTC_VECT(TMU3, 0xb00), INTC_VECT(TMU4, 0xb80), +}; + +static struct intc_mask_reg mask_registers[] = { + { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, TMU4, TMU3, + PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, + PCIC1_PCIDMA3, PCIC0_PCISERR } }, +}; + +/* SH7750S, SH7750R, SH7751 and SH7751R all have IRLM priority registers */ + +static struct intc_vect vectors_irlm[] = { + INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), + INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), +}; + +/* SH7751 and SH7751R both have PCI */ + +static struct intc_vect vectors_pci[] = { + INTC_VECT(PCIC0_PCISERR, 0xa00), INTC_VECT(PCIC1_PCIERR, 0xae0), + INTC_VECT(PCIC1_PCIPWDWN, 0xac0), INTC_VECT(PCIC1_PCIPWON, 0xaa0), + INTC_VECT(PCIC1_PCIDMA0, 0xa80), INTC_VECT(PCIC1_PCIDMA1, 0xa60), + INTC_VECT(PCIC1_PCIDMA2, 0xa40), INTC_VECT(PCIC1_PCIDMA3, 0xa20), +}; + +static struct intc_group groups_pci[] = { + INTC_GROUP(PCIC1, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3), +}; + +static struct intc_vect vectors_irl[] = { + INTC_VECT(IRL_0, 0x200), + INTC_VECT(IRL_1, 0x220), + INTC_VECT(IRL_2, 0x240), + INTC_VECT(IRL_3, 0x260), + INTC_VECT(IRL_4, 0x280), + INTC_VECT(IRL_5, 0x2a0), + INTC_VECT(IRL_6, 0x2c0), + INTC_VECT(IRL_7, 0x2e0), + INTC_VECT(IRL_8, 0x300), + INTC_VECT(IRL_9, 0x320), + INTC_VECT(IRL_A, 0x340), + INTC_VECT(IRL_B, 0x360), + INTC_VECT(IRL_C, 0x380), + INTC_VECT(IRL_D, 0x3a0), + INTC_VECT(IRL_E, 0x3c0), +}; + +static struct intc_group groups_irl[] = { + INTC_GROUP(IRL, IRL_0, IRL_1, IRL_2, IRL_3, IRL_4, IRL_5, IRL_6, + IRL_7, IRL_8, IRL_9, IRL_A, IRL_B, IRL_C, IRL_D, IRL_E), +}; + +/* + * Memory mapped cache and TLB + */ + +#define MM_REGION_MASK 0x07000000 +#define MM_ICACHE_ADDR (0) +#define MM_ICACHE_DATA (1) +#define MM_ITLB_ADDR (2) +#define MM_ITLB_DATA (3) +#define MM_OCACHE_ADDR (4) +#define MM_OCACHE_DATA (5) +#define MM_UTLB_ADDR (6) +#define MM_UTLB_DATA (7) +#define MM_REGION_TYPE(addr) ((addr & MM_REGION_MASK) >> 24) + +static uint64_t invalid_read(void *opaque, hwaddr addr) +{ + abort(); + + return 0; +} + +static uint64_t sh7750_mmct_read(void *opaque, hwaddr addr, + unsigned size) +{ + SH7750State *s = opaque; + uint32_t ret = 0; + + if (size != 4) { + return invalid_read(opaque, addr); + } + + switch (MM_REGION_TYPE(addr)) { + case MM_ICACHE_ADDR: + case MM_ICACHE_DATA: + /* do nothing */ + break; + case MM_ITLB_ADDR: + ret = cpu_sh4_read_mmaped_itlb_addr(&s->cpu->env, addr); + break; + case MM_ITLB_DATA: + ret = cpu_sh4_read_mmaped_itlb_data(&s->cpu->env, addr); + break; + case MM_OCACHE_ADDR: + case MM_OCACHE_DATA: + /* do nothing */ + break; + case MM_UTLB_ADDR: + ret = cpu_sh4_read_mmaped_utlb_addr(&s->cpu->env, addr); + break; + case MM_UTLB_DATA: + ret = cpu_sh4_read_mmaped_utlb_data(&s->cpu->env, addr); + break; + default: + abort(); + } + + return ret; +} + +static void invalid_write(void *opaque, hwaddr addr, + uint64_t mem_value) +{ + abort(); +} + +static void sh7750_mmct_write(void *opaque, hwaddr addr, + uint64_t mem_value, unsigned size) +{ + SH7750State *s = opaque; + + if (size != 4) { + invalid_write(opaque, addr, mem_value); + } + + switch (MM_REGION_TYPE(addr)) { + case MM_ICACHE_ADDR: + case MM_ICACHE_DATA: + /* do nothing */ + break; + case MM_ITLB_ADDR: + cpu_sh4_write_mmaped_itlb_addr(&s->cpu->env, addr, mem_value); + break; + case MM_ITLB_DATA: + cpu_sh4_write_mmaped_itlb_data(&s->cpu->env, addr, mem_value); + abort(); + break; + case MM_OCACHE_ADDR: + case MM_OCACHE_DATA: + /* do nothing */ + break; + case MM_UTLB_ADDR: + cpu_sh4_write_mmaped_utlb_addr(&s->cpu->env, addr, mem_value); + break; + case MM_UTLB_DATA: + cpu_sh4_write_mmaped_utlb_data(&s->cpu->env, addr, mem_value); + break; + default: + abort(); + break; + } +} + +static const MemoryRegionOps sh7750_mmct_ops = { + .read = sh7750_mmct_read, + .write = sh7750_mmct_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +SH7750State *sh7750_init(SuperHCPU *cpu, MemoryRegion *sysmem) +{ + SH7750State *s; + DeviceState *dev; + SysBusDevice *sb; + MemoryRegion *mr, *alias; + + s = g_malloc0(sizeof(SH7750State)); + s->cpu = cpu; + s->periph_freq = 60000000; /* 60MHz */ + memory_region_init_io(&s->iomem, NULL, &sh7750_mem_ops, s, + "memory", 0x1fc01000); + + memory_region_init_alias(&s->iomem_1f0, NULL, "memory-1f0", + &s->iomem, 0x1f000000, 0x1000); + memory_region_add_subregion(sysmem, 0x1f000000, &s->iomem_1f0); + + memory_region_init_alias(&s->iomem_ff0, NULL, "memory-ff0", + &s->iomem, 0x1f000000, 0x1000); + memory_region_add_subregion(sysmem, 0xff000000, &s->iomem_ff0); + + memory_region_init_alias(&s->iomem_1f8, NULL, "memory-1f8", + &s->iomem, 0x1f800000, 0x1000); + memory_region_add_subregion(sysmem, 0x1f800000, &s->iomem_1f8); + + memory_region_init_alias(&s->iomem_ff8, NULL, "memory-ff8", + &s->iomem, 0x1f800000, 0x1000); + memory_region_add_subregion(sysmem, 0xff800000, &s->iomem_ff8); + + memory_region_init_alias(&s->iomem_1fc, NULL, "memory-1fc", + &s->iomem, 0x1fc00000, 0x1000); + memory_region_add_subregion(sysmem, 0x1fc00000, &s->iomem_1fc); + + memory_region_init_alias(&s->iomem_ffc, NULL, "memory-ffc", + &s->iomem, 0x1fc00000, 0x1000); + memory_region_add_subregion(sysmem, 0xffc00000, &s->iomem_ffc); + + memory_region_init_io(&s->mmct_iomem, NULL, &sh7750_mmct_ops, s, + "cache-and-tlb", 0x08000000); + memory_region_add_subregion(sysmem, 0xf0000000, &s->mmct_iomem); + + sh_intc_init(sysmem, &s->intc, NR_SOURCES, + _INTC_ARRAY(mask_registers), + _INTC_ARRAY(prio_registers)); + + sh_intc_register_sources(&s->intc, + _INTC_ARRAY(vectors), + _INTC_ARRAY(groups)); + + cpu->env.intc_handle = &s->intc; + + /* SCI */ + dev = qdev_new(TYPE_SH_SERIAL); + dev->id = g_strdup("sci"); + qdev_prop_set_chr(dev, "chardev", serial_hd(0)); + sb = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sb, &error_fatal); + sysbus_mmio_map(sb, 0, 0xffe00000); + alias = g_malloc(sizeof(*alias)); + mr = sysbus_mmio_get_region(sb, 0); + memory_region_init_alias(alias, OBJECT(dev), "sci-a7", mr, + 0, memory_region_size(mr)); + memory_region_add_subregion(sysmem, A7ADDR(0xffe00000), alias); + qdev_connect_gpio_out_named(dev, "eri", 0, s->intc.irqs[SCI1_ERI]); + qdev_connect_gpio_out_named(dev, "rxi", 0, s->intc.irqs[SCI1_RXI]); + qdev_connect_gpio_out_named(dev, "txi", 0, s->intc.irqs[SCI1_TXI]); + qdev_connect_gpio_out_named(dev, "tei", 0, s->intc.irqs[SCI1_TEI]); + + /* SCIF */ + dev = qdev_new(TYPE_SH_SERIAL); + dev->id = g_strdup("scif"); + qdev_prop_set_chr(dev, "chardev", serial_hd(1)); + qdev_prop_set_uint8(dev, "features", SH_SERIAL_FEAT_SCIF); + sb = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sb, &error_fatal); + sysbus_mmio_map(sb, 0, 0xffe80000); + alias = g_malloc(sizeof(*alias)); + mr = sysbus_mmio_get_region(sb, 0); + memory_region_init_alias(alias, OBJECT(dev), "scif-a7", mr, + 0, memory_region_size(mr)); + memory_region_add_subregion(sysmem, A7ADDR(0xffe80000), alias); + qdev_connect_gpio_out_named(dev, "eri", 0, s->intc.irqs[SCIF_ERI]); + qdev_connect_gpio_out_named(dev, "rxi", 0, s->intc.irqs[SCIF_RXI]); + qdev_connect_gpio_out_named(dev, "txi", 0, s->intc.irqs[SCIF_TXI]); + qdev_connect_gpio_out_named(dev, "bri", 0, s->intc.irqs[SCIF_BRI]); + + tmu012_init(sysmem, 0x1fd80000, + TMU012_FEAT_TOCR | TMU012_FEAT_3CHAN | TMU012_FEAT_EXTCLK, + s->periph_freq, + s->intc.irqs[TMU0], + s->intc.irqs[TMU1], + s->intc.irqs[TMU2_TUNI], + s->intc.irqs[TMU2_TICPI]); + + if (cpu->env.id & (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7751)) { + sh_intc_register_sources(&s->intc, + _INTC_ARRAY(vectors_dma4), + _INTC_ARRAY(groups_dma4)); + } + + if (cpu->env.id & (SH_CPU_SH7750R | SH_CPU_SH7751R)) { + sh_intc_register_sources(&s->intc, + _INTC_ARRAY(vectors_dma8), + _INTC_ARRAY(groups_dma8)); + } + + if (cpu->env.id & (SH_CPU_SH7750R | SH_CPU_SH7751 | SH_CPU_SH7751R)) { + sh_intc_register_sources(&s->intc, + _INTC_ARRAY(vectors_tmu34), + NULL, 0); + tmu012_init(sysmem, 0x1e100000, 0, s->periph_freq, + s->intc.irqs[TMU3], + s->intc.irqs[TMU4], + NULL, NULL); + } + + if (cpu->env.id & (SH_CPU_SH7751_ALL)) { + sh_intc_register_sources(&s->intc, + _INTC_ARRAY(vectors_pci), + _INTC_ARRAY(groups_pci)); + } + + if (cpu->env.id & (SH_CPU_SH7750S | SH_CPU_SH7750R | SH_CPU_SH7751_ALL)) { + sh_intc_register_sources(&s->intc, + _INTC_ARRAY(vectors_irlm), + NULL, 0); + } + + sh_intc_register_sources(&s->intc, + _INTC_ARRAY(vectors_irl), + _INTC_ARRAY(groups_irl)); + return s; +} + +qemu_irq sh7750_irl(SH7750State *s) +{ + sh_intc_toggle_source(&s->intc.sources[IRL], 1, 0); /* enable */ + return qemu_allocate_irq(sh_intc_set_irl, &s->intc.sources[IRL], 0); +} diff --git a/hw/sh4/sh7750_regnames.c b/hw/sh4/sh7750_regnames.c new file mode 100644 index 000000000..e531d46a8 --- /dev/null +++ b/hw/sh4/sh7750_regnames.c @@ -0,0 +1,99 @@ +#include "qemu/osdep.h" +#include "hw/sh4/sh.h" +#include "sh7750_regs.h" +#include "sh7750_regnames.h" + +#define REGNAME(r) {r, #r}, + +typedef struct { + uint32_t regaddr; + const char *regname; +} regname_t; + +static regname_t regnames[] = { + REGNAME(SH7750_PTEH_A7) + REGNAME(SH7750_PTEL_A7) + REGNAME(SH7750_PTEA_A7) + REGNAME(SH7750_TTB_A7) + REGNAME(SH7750_TEA_A7) + REGNAME(SH7750_MMUCR_A7) + REGNAME(SH7750_CCR_A7) + REGNAME(SH7750_QACR0_A7) + REGNAME(SH7750_QACR1_A7) + REGNAME(SH7750_TRA_A7) + REGNAME(SH7750_EXPEVT_A7) + REGNAME(SH7750_INTEVT_A7) + REGNAME(SH7750_STBCR_A7) + REGNAME(SH7750_STBCR2_A7) + REGNAME(SH7750_FRQCR_A7) + REGNAME(SH7750_WTCNT_A7) + REGNAME(SH7750_WTCSR_A7) + REGNAME(SH7750_R64CNT_A7) + REGNAME(SH7750_RSECCNT_A7) + REGNAME(SH7750_RMINCNT_A7) + REGNAME(SH7750_RHRCNT_A7) + REGNAME(SH7750_RWKCNT_A7) + REGNAME(SH7750_RDAYCNT_A7) + REGNAME(SH7750_RMONCNT_A7) + REGNAME(SH7750_RYRCNT_A7) + REGNAME(SH7750_RSECAR_A7) + REGNAME(SH7750_RMINAR_A7) + REGNAME(SH7750_RHRAR_A7) + REGNAME(SH7750_RWKAR_A7) + REGNAME(SH7750_RDAYAR_A7) + REGNAME(SH7750_RMONAR_A7) + REGNAME(SH7750_RCR1_A7) + REGNAME(SH7750_RCR2_A7) + REGNAME(SH7750_BCR1_A7) + REGNAME(SH7750_BCR2_A7) + REGNAME(SH7750_WCR1_A7) + REGNAME(SH7750_WCR2_A7) + REGNAME(SH7750_WCR3_A7) + REGNAME(SH7750_MCR_A7) + REGNAME(SH7750_PCR_A7) + REGNAME(SH7750_RTCSR_A7) + REGNAME(SH7750_RTCNT_A7) + REGNAME(SH7750_RTCOR_A7) + REGNAME(SH7750_RFCR_A7) + REGNAME(SH7750_SAR0_A7) + REGNAME(SH7750_SAR1_A7) + REGNAME(SH7750_SAR2_A7) + REGNAME(SH7750_SAR3_A7) + REGNAME(SH7750_DAR0_A7) + REGNAME(SH7750_DAR1_A7) + REGNAME(SH7750_DAR2_A7) + REGNAME(SH7750_DAR3_A7) + REGNAME(SH7750_DMATCR0_A7) + REGNAME(SH7750_DMATCR1_A7) + REGNAME(SH7750_DMATCR2_A7) + REGNAME(SH7750_DMATCR3_A7) + REGNAME(SH7750_CHCR0_A7) + REGNAME(SH7750_CHCR1_A7) + REGNAME(SH7750_CHCR2_A7) + REGNAME(SH7750_CHCR3_A7) + REGNAME(SH7750_DMAOR_A7) + REGNAME(SH7750_PCTRA_A7) + REGNAME(SH7750_PDTRA_A7) + REGNAME(SH7750_PCTRB_A7) + REGNAME(SH7750_PDTRB_A7) + REGNAME(SH7750_GPIOIC_A7) + REGNAME(SH7750_ICR_A7) + REGNAME(SH7750_BCR3_A7) + REGNAME(SH7750_BCR4_A7) + REGNAME(SH7750_SDMR2_A7) + REGNAME(SH7750_SDMR3_A7) + { (uint32_t)-1, NULL } +}; + +const char *regname(uint32_t addr) +{ + unsigned int i; + + for (i = 0; regnames[i].regaddr != (uint32_t)-1; i++) { + if (regnames[i].regaddr == addr) { + return regnames[i].regname; + } + } + + return ""; +} diff --git a/hw/sh4/sh7750_regnames.h b/hw/sh4/sh7750_regnames.h new file mode 100644 index 000000000..e3ba88636 --- /dev/null +++ b/hw/sh4/sh7750_regnames.h @@ -0,0 +1,6 @@ +#ifndef SH7750_REGNAMES_H +#define SH7750_REGNAMES_H + +const char *regname(uint32_t addr); + +#endif /* SH7750_REGNAMES_H */ diff --git a/hw/sh4/sh7750_regs.h b/hw/sh4/sh7750_regs.h new file mode 100644 index 000000000..beb571d5e --- /dev/null +++ b/hw/sh4/sh7750_regs.h @@ -0,0 +1,1297 @@ +/* + * SH-7750 memory-mapped registers + * This file based on information provided in the following document: + * "Hitachi SuperH (tm) RISC engine. SH7750 Series (SH7750, SH7750S) + * Hardware Manual" + * Document Number ADE-602-124C, Rev. 4.0, 4/21/00, Hitachi Ltd. + * + * Copyright (C) 2001 OKTET Ltd., St.-Petersburg, Russia + * Author: Alexandra Kossovsky + * Victor V. Vengerov + * + * The license and distribution terms for this file may be + * found in this file hereafter or at http://www.rtems.com/license/LICENSE. + * + * LICENSE INFORMATION + * + * RTEMS is free software; you can redistribute it and/or modify it under + * terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. RTEMS is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. You should have received + * a copy of the GNU General Public License along with RTEMS; see + * file COPYING. If not, write to the Free Software Foundation, 675 + * Mass Ave, Cambridge, MA 02139, USA. + * + * As a special exception, including RTEMS header files in a file, + * instantiating RTEMS generics or templates, or linking other files + * with RTEMS objects to produce an executable application, does not + * by itself cause the resulting executable application to be covered + * by the GNU General Public License. This exception does not + * however invalidate any other reasons why the executable file might be + * covered by the GNU Public License. + * + * @(#) sh7750_regs.h,v 1.2.4.1 2003/09/04 18:46:00 joel Exp + */ + +#ifndef SH7750_REGS_H +#define SH7750_REGS_H + +/* + * All register has 2 addresses: in 0xff000000 - 0xffffffff (P4 address) and + * in 0x1f000000 - 0x1fffffff (area 7 address) + */ +#define SH7750_P4_BASE 0xff000000 /* Accessible only in privileged mode */ +#define SH7750_A7_BASE 0x1f000000 /* Accessible only using TLB */ + +#define SH7750_P4_REG32(ofs) (SH7750_P4_BASE + (ofs)) +#define SH7750_A7_REG32(ofs) (SH7750_A7_BASE + (ofs)) + +/* + * MMU Registers + */ + +/* Page Table Entry High register - PTEH */ +#define SH7750_PTEH_REGOFS 0x000000 /* offset */ +#define SH7750_PTEH SH7750_P4_REG32(SH7750_PTEH_REGOFS) +#define SH7750_PTEH_A7 SH7750_A7_REG32(SH7750_PTEH_REGOFS) +#define SH7750_PTEH_VPN 0xfffffd00 /* Virtual page number */ +#define SH7750_PTEH_VPN_S 10 +#define SH7750_PTEH_ASID 0x000000ff /* Address space identifier */ +#define SH7750_PTEH_ASID_S 0 + +/* Page Table Entry Low register - PTEL */ +#define SH7750_PTEL_REGOFS 0x000004 /* offset */ +#define SH7750_PTEL SH7750_P4_REG32(SH7750_PTEL_REGOFS) +#define SH7750_PTEL_A7 SH7750_A7_REG32(SH7750_PTEL_REGOFS) +#define SH7750_PTEL_PPN 0x1ffffc00 /* Physical page number */ +#define SH7750_PTEL_PPN_S 10 +#define SH7750_PTEL_V 0x00000100 /* Validity (0-entry is invalid) */ +#define SH7750_PTEL_SZ1 0x00000080 /* Page size bit 1 */ +#define SH7750_PTEL_SZ0 0x00000010 /* Page size bit 0 */ +#define SH7750_PTEL_SZ_1KB 0x00000000 /* 1-kbyte page */ +#define SH7750_PTEL_SZ_4KB 0x00000010 /* 4-kbyte page */ +#define SH7750_PTEL_SZ_64KB 0x00000080 /* 64-kbyte page */ +#define SH7750_PTEL_SZ_1MB 0x00000090 /* 1-Mbyte page */ +#define SH7750_PTEL_PR 0x00000060 /* Protection Key Data */ +#define SH7750_PTEL_PR_ROPO 0x00000000 /* read-only in priv mode */ +#define SH7750_PTEL_PR_RWPO 0x00000020 /* read-write in priv mode */ +#define SH7750_PTEL_PR_ROPU 0x00000040 /* read-only in priv or user mode */ +#define SH7750_PTEL_PR_RWPU 0x00000060 /* read-write in priv or user mode */ +#define SH7750_PTEL_C 0x00000008 /* Cacheability */ + /* (0 - page not cacheable) */ +#define SH7750_PTEL_D 0x00000004 /* Dirty bit (1 - write has been */ + /* performed to a page) */ +#define SH7750_PTEL_SH 0x00000002 /* Share Status bit (1 - page are */ + /* shared by processes) */ +#define SH7750_PTEL_WT 0x00000001 /* Write-through bit, specifies the */ + /* cache write mode: */ + /* 0 - Copy-back mode */ + /* 1 - Write-through mode */ + +/* Page Table Entry Assistance register - PTEA */ +#define SH7750_PTEA_REGOFS 0x000034 /* offset */ +#define SH7750_PTEA SH7750_P4_REG32(SH7750_PTEA_REGOFS) +#define SH7750_PTEA_A7 SH7750_A7_REG32(SH7750_PTEA_REGOFS) +#define SH7750_PTEA_TC 0x00000008 /* Timing Control bit */ + /* 0 - use area 5 wait states */ + /* 1 - use area 6 wait states */ +#define SH7750_PTEA_SA 0x00000007 /* Space Attribute bits: */ +#define SH7750_PTEA_SA_UNDEF 0x00000000 /* 0 - undefined */ +#define SH7750_PTEA_SA_IOVAR 0x00000001 /* 1 - variable-size I/O space */ +#define SH7750_PTEA_SA_IO8 0x00000002 /* 2 - 8-bit I/O space */ +#define SH7750_PTEA_SA_IO16 0x00000003 /* 3 - 16-bit I/O space */ +#define SH7750_PTEA_SA_CMEM8 0x00000004 /* 4 - 8-bit common memory space */ +#define SH7750_PTEA_SA_CMEM16 0x00000005 /* 5 - 16-bit common memory space */ +#define SH7750_PTEA_SA_AMEM8 0x00000006 /* 6 - 8-bit attr memory space */ +#define SH7750_PTEA_SA_AMEM16 0x00000007 /* 7 - 16-bit attr memory space */ + + +/* Translation table base register */ +#define SH7750_TTB_REGOFS 0x000008 /* offset */ +#define SH7750_TTB SH7750_P4_REG32(SH7750_TTB_REGOFS) +#define SH7750_TTB_A7 SH7750_A7_REG32(SH7750_TTB_REGOFS) + +/* TLB exeption address register - TEA */ +#define SH7750_TEA_REGOFS 0x00000c /* offset */ +#define SH7750_TEA SH7750_P4_REG32(SH7750_TEA_REGOFS) +#define SH7750_TEA_A7 SH7750_A7_REG32(SH7750_TEA_REGOFS) + +/* MMU control register - MMUCR */ +#define SH7750_MMUCR_REGOFS 0x000010 /* offset */ +#define SH7750_MMUCR SH7750_P4_REG32(SH7750_MMUCR_REGOFS) +#define SH7750_MMUCR_A7 SH7750_A7_REG32(SH7750_MMUCR_REGOFS) +#define SH7750_MMUCR_AT 0x00000001 /* Address translation bit */ +#define SH7750_MMUCR_TI 0x00000004 /* TLB invalidate */ +#define SH7750_MMUCR_SV 0x00000100 /* Single Virtual Mode bit */ +#define SH7750_MMUCR_SQMD 0x00000200 /* Store Queue Mode bit */ +#define SH7750_MMUCR_URC 0x0000FC00 /* UTLB Replace Counter */ +#define SH7750_MMUCR_URC_S 10 +#define SH7750_MMUCR_URB 0x00FC0000 /* UTLB Replace Boundary */ +#define SH7750_MMUCR_URB_S 18 +#define SH7750_MMUCR_LRUI 0xFC000000 /* Least Recently Used ITLB */ +#define SH7750_MMUCR_LRUI_S 26 + + + + +/* + * Cache registers + * IC -- instructions cache + * OC -- operand cache + */ + +/* Cache Control Register - CCR */ +#define SH7750_CCR_REGOFS 0x00001c /* offset */ +#define SH7750_CCR SH7750_P4_REG32(SH7750_CCR_REGOFS) +#define SH7750_CCR_A7 SH7750_A7_REG32(SH7750_CCR_REGOFS) + +#define SH7750_CCR_IIX 0x00008000 /* IC index enable bit */ +#define SH7750_CCR_ICI 0x00000800 /* IC invalidation bit: */ + /* set it to clear IC */ +#define SH7750_CCR_ICE 0x00000100 /* IC enable bit */ +#define SH7750_CCR_OIX 0x00000080 /* OC index enable bit */ +#define SH7750_CCR_ORA 0x00000020 /* OC RAM enable bit */ + /* if you set OCE = 0, */ + /* you should set ORA = 0 */ +#define SH7750_CCR_OCI 0x00000008 /* OC invalidation bit */ +#define SH7750_CCR_CB 0x00000004 /* Copy-back bit for P1 area */ +#define SH7750_CCR_WT 0x00000002 /* Write-through bit for P0,U0,P3 area */ +#define SH7750_CCR_OCE 0x00000001 /* OC enable bit */ + +/* Queue address control register 0 - QACR0 */ +#define SH7750_QACR0_REGOFS 0x000038 /* offset */ +#define SH7750_QACR0 SH7750_P4_REG32(SH7750_QACR0_REGOFS) +#define SH7750_QACR0_A7 SH7750_A7_REG32(SH7750_QACR0_REGOFS) + +/* Queue address control register 1 - QACR1 */ +#define SH7750_QACR1_REGOFS 0x00003c /* offset */ +#define SH7750_QACR1 SH7750_P4_REG32(SH7750_QACR1_REGOFS) +#define SH7750_QACR1_A7 SH7750_A7_REG32(SH7750_QACR1_REGOFS) + + +/* + * Exeption-related registers + */ + +/* Immediate data for TRAPA instruction - TRA */ +#define SH7750_TRA_REGOFS 0x000020 /* offset */ +#define SH7750_TRA SH7750_P4_REG32(SH7750_TRA_REGOFS) +#define SH7750_TRA_A7 SH7750_A7_REG32(SH7750_TRA_REGOFS) + +#define SH7750_TRA_IMM 0x000003fd /* Immediate data operand */ +#define SH7750_TRA_IMM_S 2 + +/* Exeption event register - EXPEVT */ +#define SH7750_EXPEVT_REGOFS 0x000024 +#define SH7750_EXPEVT SH7750_P4_REG32(SH7750_EXPEVT_REGOFS) +#define SH7750_EXPEVT_A7 SH7750_A7_REG32(SH7750_EXPEVT_REGOFS) + +#define SH7750_EXPEVT_EX 0x00000fff /* Exeption code */ +#define SH7750_EXPEVT_EX_S 0 + +/* Interrupt event register */ +#define SH7750_INTEVT_REGOFS 0x000028 +#define SH7750_INTEVT SH7750_P4_REG32(SH7750_INTEVT_REGOFS) +#define SH7750_INTEVT_A7 SH7750_A7_REG32(SH7750_INTEVT_REGOFS) +#define SH7750_INTEVT_EX 0x00000fff /* Exeption code */ +#define SH7750_INTEVT_EX_S 0 + +/* + * Exception/interrupt codes + */ +#define SH7750_EVT_TO_NUM(evt) ((evt) >> 5) + +/* Reset exception category */ +#define SH7750_EVT_POWER_ON_RST 0x000 /* Power-on reset */ +#define SH7750_EVT_MANUAL_RST 0x020 /* Manual reset */ +#define SH7750_EVT_TLB_MULT_HIT 0x140 /* TLB multiple-hit exception */ + +/* General exception category */ +#define SH7750_EVT_USER_BREAK 0x1E0 /* User break */ +#define SH7750_EVT_IADDR_ERR 0x0E0 /* Instruction address error */ +#define SH7750_EVT_TLB_READ_MISS 0x040 /* ITLB miss exception / */ + /* DTLB miss exception (read) */ +#define SH7750_EVT_TLB_READ_PROTV 0x0A0 /* ITLB protection violation, */ + /* DTLB protection violation */ + /* (read) */ +#define SH7750_EVT_ILLEGAL_INSTR 0x180 /* General Illegal Instruction */ + /* exception */ +#define SH7750_EVT_SLOT_ILLEGAL_INSTR 0x1A0 /* Slot Illegal Instruction */ + /* exception */ +#define SH7750_EVT_FPU_DISABLE 0x800 /* General FPU disable exception */ +#define SH7750_EVT_SLOT_FPU_DISABLE 0x820 /* Slot FPU disable exception */ +#define SH7750_EVT_DATA_READ_ERR 0x0E0 /* Data address error (read) */ +#define SH7750_EVT_DATA_WRITE_ERR 0x100 /* Data address error (write) */ +#define SH7750_EVT_DTLB_WRITE_MISS 0x060 /* DTLB miss exception (write) */ +#define SH7750_EVT_DTLB_WRITE_PROTV 0x0C0 /* DTLB protection violation */ + /* exception (write) */ +#define SH7750_EVT_FPU_EXCEPTION 0x120 /* FPU exception */ +#define SH7750_EVT_INITIAL_PGWRITE 0x080 /* Initial Page Write exception */ +#define SH7750_EVT_TRAPA 0x160 /* Unconditional trap (TRAPA) */ + +/* Interrupt exception category */ +#define SH7750_EVT_NMI 0x1C0 /* Non-maskable interrupt */ +#define SH7750_EVT_IRQ0 0x200 /* External Interrupt 0 */ +#define SH7750_EVT_IRQ1 0x220 /* External Interrupt 1 */ +#define SH7750_EVT_IRQ2 0x240 /* External Interrupt 2 */ +#define SH7750_EVT_IRQ3 0x260 /* External Interrupt 3 */ +#define SH7750_EVT_IRQ4 0x280 /* External Interrupt 4 */ +#define SH7750_EVT_IRQ5 0x2A0 /* External Interrupt 5 */ +#define SH7750_EVT_IRQ6 0x2C0 /* External Interrupt 6 */ +#define SH7750_EVT_IRQ7 0x2E0 /* External Interrupt 7 */ +#define SH7750_EVT_IRQ8 0x300 /* External Interrupt 8 */ +#define SH7750_EVT_IRQ9 0x320 /* External Interrupt 9 */ +#define SH7750_EVT_IRQA 0x340 /* External Interrupt A */ +#define SH7750_EVT_IRQB 0x360 /* External Interrupt B */ +#define SH7750_EVT_IRQC 0x380 /* External Interrupt C */ +#define SH7750_EVT_IRQD 0x3A0 /* External Interrupt D */ +#define SH7750_EVT_IRQE 0x3C0 /* External Interrupt E */ + +/* Peripheral Module Interrupts - Timer Unit (TMU) */ +#define SH7750_EVT_TUNI0 0x400 /* TMU Underflow Interrupt 0 */ +#define SH7750_EVT_TUNI1 0x420 /* TMU Underflow Interrupt 1 */ +#define SH7750_EVT_TUNI2 0x440 /* TMU Underflow Interrupt 2 */ +#define SH7750_EVT_TICPI2 0x460 /* TMU Input Capture Interrupt 2 */ + +/* Peripheral Module Interrupts - Real-Time Clock (RTC) */ +#define SH7750_EVT_RTC_ATI 0x480 /* Alarm Interrupt Request */ +#define SH7750_EVT_RTC_PRI 0x4A0 /* Periodic Interrupt Request */ +#define SH7750_EVT_RTC_CUI 0x4C0 /* Carry Interrupt Request */ + +/* Peripheral Module Interrupts - Serial Communication Interface (SCI) */ +#define SH7750_EVT_SCI_ERI 0x4E0 /* Receive Error */ +#define SH7750_EVT_SCI_RXI 0x500 /* Receive Data Register Full */ +#define SH7750_EVT_SCI_TXI 0x520 /* Transmit Data Register Empty */ +#define SH7750_EVT_SCI_TEI 0x540 /* Transmit End */ + +/* Peripheral Module Interrupts - Watchdog Timer (WDT) */ +#define SH7750_EVT_WDT_ITI 0x560 /* Interval Timer Interrupt */ + /* (used when WDT operates in */ + /* interval timer mode) */ + +/* Peripheral Module Interrupts - Memory Refresh Unit (REF) */ +#define SH7750_EVT_REF_RCMI 0x580 /* Compare-match Interrupt */ +#define SH7750_EVT_REF_ROVI 0x5A0 /* Refresh Counter Overflow */ + /* interrupt */ + +/* Peripheral Module Interrupts - Hitachi User Debug Interface (H-UDI) */ +#define SH7750_EVT_HUDI 0x600 /* UDI interrupt */ + +/* Peripheral Module Interrupts - General-Purpose I/O (GPIO) */ +#define SH7750_EVT_GPIO 0x620 /* GPIO Interrupt */ + +/* Peripheral Module Interrupts - DMA Controller (DMAC) */ +#define SH7750_EVT_DMAC_DMTE0 0x640 /* DMAC 0 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMTE1 0x660 /* DMAC 1 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMTE2 0x680 /* DMAC 2 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMTE3 0x6A0 /* DMAC 3 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMAE 0x6C0 /* DMAC Address Error Interrupt */ + +/* Peripheral Module Interrupts Serial Communication Interface w/ FIFO (SCIF) */ +#define SH7750_EVT_SCIF_ERI 0x700 /* Receive Error */ +#define SH7750_EVT_SCIF_RXI 0x720 /* Receive FIFO Data Full or */ + /* Receive Data ready interrupt */ +#define SH7750_EVT_SCIF_BRI 0x740 /* Break or overrun error */ +#define SH7750_EVT_SCIF_TXI 0x760 /* Transmit FIFO Data Empty */ + +/* + * Power Management + */ +#define SH7750_STBCR_REGOFS 0xC00004 /* offset */ +#define SH7750_STBCR SH7750_P4_REG32(SH7750_STBCR_REGOFS) +#define SH7750_STBCR_A7 SH7750_A7_REG32(SH7750_STBCR_REGOFS) + +#define SH7750_STBCR_STBY 0x80 /* Specifies a transition to standby mode: */ + /* 0 Transition to SLEEP mode on SLEEP */ + /* 1 Transition to STANDBY mode on SLEEP */ +#define SH7750_STBCR_PHZ 0x40 /* State of peripheral module pins in */ + /* standby mode: */ + /* 0 normal state */ + /* 1 high-impendance state */ + +#define SH7750_STBCR_PPU 0x20 /* Peripheral module pins pull-up controls */ +#define SH7750_STBCR_MSTP4 0x10 /* Stopping the clock supply to DMAC */ +#define SH7750_STBCR_DMAC_STP SH7750_STBCR_MSTP4 +#define SH7750_STBCR_MSTP3 0x08 /* Stopping the clock supply to SCIF */ +#define SH7750_STBCR_SCIF_STP SH7750_STBCR_MSTP3 +#define SH7750_STBCR_MSTP2 0x04 /* Stopping the clock supply to TMU */ +#define SH7750_STBCR_TMU_STP SH7750_STBCR_MSTP2 +#define SH7750_STBCR_MSTP1 0x02 /* Stopping the clock supply to RTC */ +#define SH7750_STBCR_RTC_STP SH7750_STBCR_MSTP1 +#define SH7750_STBCR_MSPT0 0x01 /* Stopping the clock supply to SCI */ +#define SH7750_STBCR_SCI_STP SH7750_STBCR_MSTP0 + +#define SH7750_STBCR_STBY 0x80 + + +#define SH7750_STBCR2_REGOFS 0xC00010 /* offset */ +#define SH7750_STBCR2 SH7750_P4_REG32(SH7750_STBCR2_REGOFS) +#define SH7750_STBCR2_A7 SH7750_A7_REG32(SH7750_STBCR2_REGOFS) + +#define SH7750_STBCR2_DSLP 0x80 /* Specifies transition to deep sleep mode */ + /* 0 transition to sleep or standby mode */ + /* as it is specified in STBY bit */ + /* 1 transition to deep sleep mode on */ + /* execution of SLEEP instruction */ +#define SH7750_STBCR2_MSTP6 0x02 /* Stopping the clock supply to the */ + /* Store Queue in the cache controller */ +#define SH7750_STBCR2_SQ_STP SH7750_STBCR2_MSTP6 +#define SH7750_STBCR2_MSTP5 0x01 /* Stopping the clock supply to the */ + /* User Break Controller (UBC) */ +#define SH7750_STBCR2_UBC_STP SH7750_STBCR2_MSTP5 + +/* + * Clock Pulse Generator (CPG) + */ +#define SH7750_FRQCR_REGOFS 0xC00000 /* offset */ +#define SH7750_FRQCR SH7750_P4_REG32(SH7750_FRQCR_REGOFS) +#define SH7750_FRQCR_A7 SH7750_A7_REG32(SH7750_FRQCR_REGOFS) + +#define SH7750_FRQCR_CKOEN 0x0800 /* Clock Output Enable */ + /* 0 - CKIO pin goes to HiZ/pullup */ + /* 1 - Clock is output from CKIO */ +#define SH7750_FRQCR_PLL1EN 0x0400 /* PLL circuit 1 enable */ +#define SH7750_FRQCR_PLL2EN 0x0200 /* PLL circuit 2 enable */ + +#define SH7750_FRQCR_IFC 0x01C0 /* CPU clock frequency division ratio: */ +#define SH7750_FRQCR_IFCDIV1 0x0000 /* 0 - * 1 */ +#define SH7750_FRQCR_IFCDIV2 0x0040 /* 1 - * 1/2 */ +#define SH7750_FRQCR_IFCDIV3 0x0080 /* 2 - * 1/3 */ +#define SH7750_FRQCR_IFCDIV4 0x00C0 /* 3 - * 1/4 */ +#define SH7750_FRQCR_IFCDIV6 0x0100 /* 4 - * 1/6 */ +#define SH7750_FRQCR_IFCDIV8 0x0140 /* 5 - * 1/8 */ + +#define SH7750_FRQCR_BFC 0x0038 /* Bus clock frequency division ratio: */ +#define SH7750_FRQCR_BFCDIV1 0x0000 /* 0 - * 1 */ +#define SH7750_FRQCR_BFCDIV2 0x0008 /* 1 - * 1/2 */ +#define SH7750_FRQCR_BFCDIV3 0x0010 /* 2 - * 1/3 */ +#define SH7750_FRQCR_BFCDIV4 0x0018 /* 3 - * 1/4 */ +#define SH7750_FRQCR_BFCDIV6 0x0020 /* 4 - * 1/6 */ +#define SH7750_FRQCR_BFCDIV8 0x0028 /* 5 - * 1/8 */ + +#define SH7750_FRQCR_PFC 0x0007 /* Peripheral module clock frequency */ + /* division ratio: */ +#define SH7750_FRQCR_PFCDIV2 0x0000 /* 0 - * 1/2 */ +#define SH7750_FRQCR_PFCDIV3 0x0001 /* 1 - * 1/3 */ +#define SH7750_FRQCR_PFCDIV4 0x0002 /* 2 - * 1/4 */ +#define SH7750_FRQCR_PFCDIV6 0x0003 /* 3 - * 1/6 */ +#define SH7750_FRQCR_PFCDIV8 0x0004 /* 4 - * 1/8 */ + +/* + * Watchdog Timer (WDT) + */ + +/* Watchdog Timer Counter register - WTCNT */ +#define SH7750_WTCNT_REGOFS 0xC00008 /* offset */ +#define SH7750_WTCNT SH7750_P4_REG32(SH7750_WTCNT_REGOFS) +#define SH7750_WTCNT_A7 SH7750_A7_REG32(SH7750_WTCNT_REGOFS) +#define SH7750_WTCNT_KEY 0x5A00 /* When WTCNT byte register written, you */ + /* have to set the upper byte to 0x5A */ + +/* Watchdog Timer Control/Status register - WTCSR */ +#define SH7750_WTCSR_REGOFS 0xC0000C /* offset */ +#define SH7750_WTCSR SH7750_P4_REG32(SH7750_WTCSR_REGOFS) +#define SH7750_WTCSR_A7 SH7750_A7_REG32(SH7750_WTCSR_REGOFS) +#define SH7750_WTCSR_KEY 0xA500 /* When WTCSR byte register written, you */ + /* have to set the upper byte to 0xA5 */ +#define SH7750_WTCSR_TME 0x80 /* Timer enable (1-upcount start) */ +#define SH7750_WTCSR_MODE 0x40 /* Timer Mode Select: */ +#define SH7750_WTCSR_MODE_WT 0x40 /* Watchdog Timer Mode */ +#define SH7750_WTCSR_MODE_IT 0x00 /* Interval Timer Mode */ +#define SH7750_WTCSR_RSTS 0x20 /* Reset Select: */ +#define SH7750_WTCSR_RST_MAN 0x20 /* Manual Reset */ +#define SH7750_WTCSR_RST_PWR 0x00 /* Power-on Reset */ +#define SH7750_WTCSR_WOVF 0x10 /* Watchdog Timer Overflow Flag */ +#define SH7750_WTCSR_IOVF 0x08 /* Interval Timer Overflow Flag */ +#define SH7750_WTCSR_CKS 0x07 /* Clock Select: */ +#define SH7750_WTCSR_CKS_DIV32 0x00 /* 1/32 of frequency divider 2 input */ +#define SH7750_WTCSR_CKS_DIV64 0x01 /* 1/64 */ +#define SH7750_WTCSR_CKS_DIV128 0x02 /* 1/128 */ +#define SH7750_WTCSR_CKS_DIV256 0x03 /* 1/256 */ +#define SH7750_WTCSR_CKS_DIV512 0x04 /* 1/512 */ +#define SH7750_WTCSR_CKS_DIV1024 0x05 /* 1/1024 */ +#define SH7750_WTCSR_CKS_DIV2048 0x06 /* 1/2048 */ +#define SH7750_WTCSR_CKS_DIV4096 0x07 /* 1/4096 */ + +/* + * Real-Time Clock (RTC) + */ +/* 64-Hz Counter Register (byte, read-only) - R64CNT */ +#define SH7750_R64CNT_REGOFS 0xC80000 /* offset */ +#define SH7750_R64CNT SH7750_P4_REG32(SH7750_R64CNT_REGOFS) +#define SH7750_R64CNT_A7 SH7750_A7_REG32(SH7750_R64CNT_REGOFS) + +/* Second Counter Register (byte, BCD-coded) - RSECCNT */ +#define SH7750_RSECCNT_REGOFS 0xC80004 /* offset */ +#define SH7750_RSECCNT SH7750_P4_REG32(SH7750_RSECCNT_REGOFS) +#define SH7750_RSECCNT_A7 SH7750_A7_REG32(SH7750_RSECCNT_REGOFS) + +/* Minute Counter Register (byte, BCD-coded) - RMINCNT */ +#define SH7750_RMINCNT_REGOFS 0xC80008 /* offset */ +#define SH7750_RMINCNT SH7750_P4_REG32(SH7750_RMINCNT_REGOFS) +#define SH7750_RMINCNT_A7 SH7750_A7_REG32(SH7750_RMINCNT_REGOFS) + +/* Hour Counter Register (byte, BCD-coded) - RHRCNT */ +#define SH7750_RHRCNT_REGOFS 0xC8000C /* offset */ +#define SH7750_RHRCNT SH7750_P4_REG32(SH7750_RHRCNT_REGOFS) +#define SH7750_RHRCNT_A7 SH7750_A7_REG32(SH7750_RHRCNT_REGOFS) + +/* Day-of-Week Counter Register (byte) - RWKCNT */ +#define SH7750_RWKCNT_REGOFS 0xC80010 /* offset */ +#define SH7750_RWKCNT SH7750_P4_REG32(SH7750_RWKCNT_REGOFS) +#define SH7750_RWKCNT_A7 SH7750_A7_REG32(SH7750_RWKCNT_REGOFS) + +#define SH7750_RWKCNT_SUN 0 /* Sunday */ +#define SH7750_RWKCNT_MON 1 /* Monday */ +#define SH7750_RWKCNT_TUE 2 /* Tuesday */ +#define SH7750_RWKCNT_WED 3 /* Wednesday */ +#define SH7750_RWKCNT_THU 4 /* Thursday */ +#define SH7750_RWKCNT_FRI 5 /* Friday */ +#define SH7750_RWKCNT_SAT 6 /* Saturday */ + +/* Day Counter Register (byte, BCD-coded) - RDAYCNT */ +#define SH7750_RDAYCNT_REGOFS 0xC80014 /* offset */ +#define SH7750_RDAYCNT SH7750_P4_REG32(SH7750_RDAYCNT_REGOFS) +#define SH7750_RDAYCNT_A7 SH7750_A7_REG32(SH7750_RDAYCNT_REGOFS) + +/* Month Counter Register (byte, BCD-coded) - RMONCNT */ +#define SH7750_RMONCNT_REGOFS 0xC80018 /* offset */ +#define SH7750_RMONCNT SH7750_P4_REG32(SH7750_RMONCNT_REGOFS) +#define SH7750_RMONCNT_A7 SH7750_A7_REG32(SH7750_RMONCNT_REGOFS) + +/* Year Counter Register (half, BCD-coded) - RYRCNT */ +#define SH7750_RYRCNT_REGOFS 0xC8001C /* offset */ +#define SH7750_RYRCNT SH7750_P4_REG32(SH7750_RYRCNT_REGOFS) +#define SH7750_RYRCNT_A7 SH7750_A7_REG32(SH7750_RYRCNT_REGOFS) + +/* Second Alarm Register (byte, BCD-coded) - RSECAR */ +#define SH7750_RSECAR_REGOFS 0xC80020 /* offset */ +#define SH7750_RSECAR SH7750_P4_REG32(SH7750_RSECAR_REGOFS) +#define SH7750_RSECAR_A7 SH7750_A7_REG32(SH7750_RSECAR_REGOFS) +#define SH7750_RSECAR_ENB 0x80 /* Second Alarm Enable */ + +/* Minute Alarm Register (byte, BCD-coded) - RMINAR */ +#define SH7750_RMINAR_REGOFS 0xC80024 /* offset */ +#define SH7750_RMINAR SH7750_P4_REG32(SH7750_RMINAR_REGOFS) +#define SH7750_RMINAR_A7 SH7750_A7_REG32(SH7750_RMINAR_REGOFS) +#define SH7750_RMINAR_ENB 0x80 /* Minute Alarm Enable */ + +/* Hour Alarm Register (byte, BCD-coded) - RHRAR */ +#define SH7750_RHRAR_REGOFS 0xC80028 /* offset */ +#define SH7750_RHRAR SH7750_P4_REG32(SH7750_RHRAR_REGOFS) +#define SH7750_RHRAR_A7 SH7750_A7_REG32(SH7750_RHRAR_REGOFS) +#define SH7750_RHRAR_ENB 0x80 /* Hour Alarm Enable */ + +/* Day-of-Week Alarm Register (byte) - RWKAR */ +#define SH7750_RWKAR_REGOFS 0xC8002C /* offset */ +#define SH7750_RWKAR SH7750_P4_REG32(SH7750_RWKAR_REGOFS) +#define SH7750_RWKAR_A7 SH7750_A7_REG32(SH7750_RWKAR_REGOFS) +#define SH7750_RWKAR_ENB 0x80 /* Day-of-week Alarm Enable */ + +#define SH7750_RWKAR_SUN 0 /* Sunday */ +#define SH7750_RWKAR_MON 1 /* Monday */ +#define SH7750_RWKAR_TUE 2 /* Tuesday */ +#define SH7750_RWKAR_WED 3 /* Wednesday */ +#define SH7750_RWKAR_THU 4 /* Thursday */ +#define SH7750_RWKAR_FRI 5 /* Friday */ +#define SH7750_RWKAR_SAT 6 /* Saturday */ + +/* Day Alarm Register (byte, BCD-coded) - RDAYAR */ +#define SH7750_RDAYAR_REGOFS 0xC80030 /* offset */ +#define SH7750_RDAYAR SH7750_P4_REG32(SH7750_RDAYAR_REGOFS) +#define SH7750_RDAYAR_A7 SH7750_A7_REG32(SH7750_RDAYAR_REGOFS) +#define SH7750_RDAYAR_ENB 0x80 /* Day Alarm Enable */ + +/* Month Counter Register (byte, BCD-coded) - RMONAR */ +#define SH7750_RMONAR_REGOFS 0xC80034 /* offset */ +#define SH7750_RMONAR SH7750_P4_REG32(SH7750_RMONAR_REGOFS) +#define SH7750_RMONAR_A7 SH7750_A7_REG32(SH7750_RMONAR_REGOFS) +#define SH7750_RMONAR_ENB 0x80 /* Month Alarm Enable */ + +/* RTC Control Register 1 (byte) - RCR1 */ +#define SH7750_RCR1_REGOFS 0xC80038 /* offset */ +#define SH7750_RCR1 SH7750_P4_REG32(SH7750_RCR1_REGOFS) +#define SH7750_RCR1_A7 SH7750_A7_REG32(SH7750_RCR1_REGOFS) +#define SH7750_RCR1_CF 0x80 /* Carry Flag */ +#define SH7750_RCR1_CIE 0x10 /* Carry Interrupt Enable */ +#define SH7750_RCR1_AIE 0x08 /* Alarm Interrupt Enable */ +#define SH7750_RCR1_AF 0x01 /* Alarm Flag */ + +/* RTC Control Register 2 (byte) - RCR2 */ +#define SH7750_RCR2_REGOFS 0xC8003C /* offset */ +#define SH7750_RCR2 SH7750_P4_REG32(SH7750_RCR2_REGOFS) +#define SH7750_RCR2_A7 SH7750_A7_REG32(SH7750_RCR2_REGOFS) +#define SH7750_RCR2_PEF 0x80 /* Periodic Interrupt Flag */ +#define SH7750_RCR2_PES 0x70 /* Periodic Interrupt Enable: */ +#define SH7750_RCR2_PES_DIS 0x00 /* Periodic Interrupt Disabled */ +#define SH7750_RCR2_PES_DIV256 0x10 /* Generated at 1/256 sec interval */ +#define SH7750_RCR2_PES_DIV64 0x20 /* Generated at 1/64 sec interval */ +#define SH7750_RCR2_PES_DIV16 0x30 /* Generated at 1/16 sec interval */ +#define SH7750_RCR2_PES_DIV4 0x40 /* Generated at 1/4 sec interval */ +#define SH7750_RCR2_PES_DIV2 0x50 /* Generated at 1/2 sec interval */ +#define SH7750_RCR2_PES_x1 0x60 /* Generated at 1 sec interval */ +#define SH7750_RCR2_PES_x2 0x70 /* Generated at 2 sec interval */ +#define SH7750_RCR2_RTCEN 0x08 /* RTC Crystal Oscillator is Operated */ +#define SH7750_RCR2_ADJ 0x04 /* 30-Second Adjastment */ +#define SH7750_RCR2_RESET 0x02 /* Frequency divider circuits are reset */ +#define SH7750_RCR2_START 0x01 /* 0 - sec, min, hr, day-of-week, month, */ + /* year counters are stopped */ + /* 1 - sec, min, hr, day-of-week, month, */ + /* year counters operate normally */ +/* + * Bus State Controller - BSC + */ +/* Bus Control Register 1 - BCR1 */ +#define SH7750_BCR1_REGOFS 0x800000 /* offset */ +#define SH7750_BCR1 SH7750_P4_REG32(SH7750_BCR1_REGOFS) +#define SH7750_BCR1_A7 SH7750_A7_REG32(SH7750_BCR1_REGOFS) +#define SH7750_BCR1_ENDIAN 0x80000000 /* Endianness (1 - little endian) */ +#define SH7750_BCR1_MASTER 0x40000000 /* Master/Slave mode (1-master) */ +#define SH7750_BCR1_A0MPX 0x20000000 /* Area 0 Memory Type (0-SRAM,1-MPX) */ +#define SH7750_BCR1_IPUP 0x02000000 /* Input Pin Pull-up Control: */ + /* 0 - pull-up resistor is on for */ + /* control input pins */ + /* 1 - pull-up resistor is off */ +#define SH7750_BCR1_OPUP 0x01000000 /* Output Pin Pull-up Control: */ + /* 0 - pull-up resistor is on for */ + /* control output pins */ + /* 1 - pull-up resistor is off */ +#define SH7750_BCR1_A1MBC 0x00200000 /* Area 1 SRAM Byte Control Mode: */ + /* 0 - Area 1 SRAM is set to */ + /* normal mode */ + /* 1 - Area 1 SRAM is set to byte */ + /* control mode */ +#define SH7750_BCR1_A4MBC 0x00100000 /* Area 4 SRAM Byte Control Mode: */ + /* 0 - Area 4 SRAM is set to */ + /* normal mode */ + /* 1 - Area 4 SRAM is set to byte */ + /* control mode */ +#define SH7750_BCR1_BREQEN 0x00080000 /* BREQ Enable: */ + /* 0 - External requests are not */ + /* accepted */ + /* 1 - External requests are */ + /* accepted */ +#define SH7750_BCR1_PSHR 0x00040000 /* Partial Sharing Bit: */ + /* 0 - Master Mode */ + /* 1 - Partial-sharing Mode */ +#define SH7750_BCR1_MEMMPX 0x00020000 /* Area 1 to 6 MPX Interface: */ + /* 0 - SRAM/burst ROM interface */ + /* 1 - MPX interface */ +#define SH7750_BCR1_HIZMEM 0x00008000 /* High Impendance Control. */ + /* Specifies the state of A[25:0], */ + /* BS\, CSn\, RD/WR\, CE2A\, CE2B\ */ + /* in standby mode and when bus is */ + /* released: */ + /* 0 - signals go to High-Z mode */ + /* 1 - signals driven */ +#define SH7750_BCR1_HIZCNT 0x00004000 /* High Impendance Control. */ + /* Specifies the state of the */ + /* RAS\, RAS2\, WEn\, CASn\, DQMn, */ + /* RD\, CASS\, FRAME\, RD2\ */ + /* signals in standby mode and */ + /* when bus is released: */ + /* 0 - signals go to High-Z mode */ + /* 1 - signals driven */ +#define SH7750_BCR1_A0BST 0x00003800 /* Area 0 Burst ROM Control */ +#define SH7750_BCR1_A0BST_SRAM 0x0000 /* Area 0 accessed as SRAM i/f */ +#define SH7750_BCR1_A0BST_ROM4 0x0800 /* Area 0 accessed as burst ROM */ + /* interface, 4 cosequtive access */ +#define SH7750_BCR1_A0BST_ROM8 0x1000 /* Area 0 accessed as burst ROM */ + /* interface, 8 cosequtive access */ +#define SH7750_BCR1_A0BST_ROM16 0x1800 /* Area 0 accessed as burst ROM */ + /* interface, 16 cosequtive access */ +#define SH7750_BCR1_A0BST_ROM32 0x2000 /* Area 0 accessed as burst ROM */ + /* interface, 32 cosequtive access */ + +#define SH7750_BCR1_A5BST 0x00000700 /* Area 5 Burst ROM Control */ +#define SH7750_BCR1_A5BST_SRAM 0x0000 /* Area 5 accessed as SRAM i/f */ +#define SH7750_BCR1_A5BST_ROM4 0x0100 /* Area 5 accessed as burst ROM */ + /* interface, 4 cosequtive access */ +#define SH7750_BCR1_A5BST_ROM8 0x0200 /* Area 5 accessed as burst ROM */ + /* interface, 8 cosequtive access */ +#define SH7750_BCR1_A5BST_ROM16 0x0300 /* Area 5 accessed as burst ROM */ + /* interface, 16 cosequtive access */ +#define SH7750_BCR1_A5BST_ROM32 0x0400 /* Area 5 accessed as burst ROM */ + /* interface, 32 cosequtive access */ + +#define SH7750_BCR1_A6BST 0x000000E0 /* Area 6 Burst ROM Control */ +#define SH7750_BCR1_A6BST_SRAM 0x0000 /* Area 6 accessed as SRAM i/f */ +#define SH7750_BCR1_A6BST_ROM4 0x0020 /* Area 6 accessed as burst ROM */ + /* interface, 4 cosequtive access */ +#define SH7750_BCR1_A6BST_ROM8 0x0040 /* Area 6 accessed as burst ROM */ + /* interface, 8 cosequtive access */ +#define SH7750_BCR1_A6BST_ROM16 0x0060 /* Area 6 accessed as burst ROM */ + /* interface, 16 cosequtive access */ +#define SH7750_BCR1_A6BST_ROM32 0x0080 /* Area 6 accessed as burst ROM */ + /* interface, 32 cosequtive access */ + +#define SH7750_BCR1_DRAMTP 0x001C /* Area 2 and 3 Memory Type */ +#define SH7750_BCR1_DRAMTP_2SRAM_3SRAM 0x0000 /* Area 2 and 3 are SRAM or */ + /* MPX interface. */ +#define SH7750_BCR1_DRAMTP_2SRAM_3SDRAM 0x0008 /* Area 2 - SRAM/MPX, Area 3 */ + /* synchronous DRAM */ +#define SH7750_BCR1_DRAMTP_2SDRAM_3SDRAM 0x000C /* Area 2 and 3 are */ + /* synchronous DRAM interface */ +#define SH7750_BCR1_DRAMTP_2SRAM_3DRAM 0x0010 /* Area 2 - SRAM/MPX, Area 3 */ + /* DRAM interface */ +#define SH7750_BCR1_DRAMTP_2DRAM_3DRAM 0x0014 /* Area 2 and 3 are DRAM */ + /* interface */ + +#define SH7750_BCR1_A56PCM 0x00000001 /* Area 5 and 6 Bus Type: */ + /* 0 - SRAM interface */ + /* 1 - PCMCIA interface */ + +/* Bus Control Register 2 (half) - BCR2 */ +#define SH7750_BCR2_REGOFS 0x800004 /* offset */ +#define SH7750_BCR2 SH7750_P4_REG32(SH7750_BCR2_REGOFS) +#define SH7750_BCR2_A7 SH7750_A7_REG32(SH7750_BCR2_REGOFS) + +#define SH7750_BCR2_A0SZ 0xC000 /* Area 0 Bus Width */ +#define SH7750_BCR2_A0SZ_S 14 +#define SH7750_BCR2_A6SZ 0x3000 /* Area 6 Bus Width */ +#define SH7750_BCR2_A6SZ_S 12 +#define SH7750_BCR2_A5SZ 0x0C00 /* Area 5 Bus Width */ +#define SH7750_BCR2_A5SZ_S 10 +#define SH7750_BCR2_A4SZ 0x0300 /* Area 4 Bus Width */ +#define SH7750_BCR2_A4SZ_S 8 +#define SH7750_BCR2_A3SZ 0x00C0 /* Area 3 Bus Width */ +#define SH7750_BCR2_A3SZ_S 6 +#define SH7750_BCR2_A2SZ 0x0030 /* Area 2 Bus Width */ +#define SH7750_BCR2_A2SZ_S 4 +#define SH7750_BCR2_A1SZ 0x000C /* Area 1 Bus Width */ +#define SH7750_BCR2_A1SZ_S 2 +#define SH7750_BCR2_SZ_64 0 /* 64 bits */ +#define SH7750_BCR2_SZ_8 1 /* 8 bits */ +#define SH7750_BCR2_SZ_16 2 /* 16 bits */ +#define SH7750_BCR2_SZ_32 3 /* 32 bits */ +#define SH7750_BCR2_PORTEN 0x0001 /* Port Function Enable */ + /* 0 - D51-D32 are not used as a port */ + /* 1 - D51-D32 are used as a port */ + +/* Wait Control Register 1 - WCR1 */ +#define SH7750_WCR1_REGOFS 0x800008 /* offset */ +#define SH7750_WCR1 SH7750_P4_REG32(SH7750_WCR1_REGOFS) +#define SH7750_WCR1_A7 SH7750_A7_REG32(SH7750_WCR1_REGOFS) +#define SH7750_WCR1_DMAIW 0x70000000 /* DACK Device Inter-Cycle Idle */ + /* specification */ +#define SH7750_WCR1_DMAIW_S 28 +#define SH7750_WCR1_A6IW 0x07000000 /* Area 6 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A6IW_S 24 +#define SH7750_WCR1_A5IW 0x00700000 /* Area 5 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A5IW_S 20 +#define SH7750_WCR1_A4IW 0x00070000 /* Area 4 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A4IW_S 16 +#define SH7750_WCR1_A3IW 0x00007000 /* Area 3 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A3IW_S 12 +#define SH7750_WCR1_A2IW 0x00000700 /* Area 2 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A2IW_S 8 +#define SH7750_WCR1_A1IW 0x00000070 /* Area 1 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A1IW_S 4 +#define SH7750_WCR1_A0IW 0x00000007 /* Area 0 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A0IW_S 0 + +/* Wait Control Register 2 - WCR2 */ +#define SH7750_WCR2_REGOFS 0x80000C /* offset */ +#define SH7750_WCR2 SH7750_P4_REG32(SH7750_WCR2_REGOFS) +#define SH7750_WCR2_A7 SH7750_A7_REG32(SH7750_WCR2_REGOFS) + +#define SH7750_WCR2_A6W 0xE0000000 /* Area 6 Wait Control */ +#define SH7750_WCR2_A6W_S 29 +#define SH7750_WCR2_A6B 0x1C000000 /* Area 6 Burst Pitch */ +#define SH7750_WCR2_A6B_S 26 +#define SH7750_WCR2_A5W 0x03800000 /* Area 5 Wait Control */ +#define SH7750_WCR2_A5W_S 23 +#define SH7750_WCR2_A5B 0x00700000 /* Area 5 Burst Pitch */ +#define SH7750_WCR2_A5B_S 20 +#define SH7750_WCR2_A4W 0x000E0000 /* Area 4 Wait Control */ +#define SH7750_WCR2_A4W_S 17 +#define SH7750_WCR2_A3W 0x0000E000 /* Area 3 Wait Control */ +#define SH7750_WCR2_A3W_S 13 +#define SH7750_WCR2_A2W 0x00000E00 /* Area 2 Wait Control */ +#define SH7750_WCR2_A2W_S 9 +#define SH7750_WCR2_A1W 0x000001C0 /* Area 1 Wait Control */ +#define SH7750_WCR2_A1W_S 6 +#define SH7750_WCR2_A0W 0x00000038 /* Area 0 Wait Control */ +#define SH7750_WCR2_A0W_S 3 +#define SH7750_WCR2_A0B 0x00000007 /* Area 0 Burst Pitch */ +#define SH7750_WCR2_A0B_S 0 + +#define SH7750_WCR2_WS0 0 /* 0 wait states inserted */ +#define SH7750_WCR2_WS1 1 /* 1 wait states inserted */ +#define SH7750_WCR2_WS2 2 /* 2 wait states inserted */ +#define SH7750_WCR2_WS3 3 /* 3 wait states inserted */ +#define SH7750_WCR2_WS6 4 /* 6 wait states inserted */ +#define SH7750_WCR2_WS9 5 /* 9 wait states inserted */ +#define SH7750_WCR2_WS12 6 /* 12 wait states inserted */ +#define SH7750_WCR2_WS15 7 /* 15 wait states inserted */ + +#define SH7750_WCR2_BPWS0 0 /* 0 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS1 1 /* 1 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS2 2 /* 2 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS3 3 /* 3 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS4 4 /* 4 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS5 5 /* 5 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS6 6 /* 6 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS7 7 /* 7 wait states inserted from 2nd access */ + +/* DRAM CAS\ Assertion Delay (area 3,2) */ +#define SH7750_WCR2_DRAM_CAS_ASW1 0 /* 1 cycle */ +#define SH7750_WCR2_DRAM_CAS_ASW2 1 /* 2 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW3 2 /* 3 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW4 3 /* 4 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW7 4 /* 7 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW10 5 /* 10 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW13 6 /* 13 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW16 7 /* 16 cycles */ + +/* SDRAM CAS\ Latency Cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT1 1 /* 1 cycle */ +#define SH7750_WCR2_SDRAM_CAS_LAT2 2 /* 2 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT3 3 /* 3 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT4 4 /* 4 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT5 5 /* 5 cycles */ + +/* Wait Control Register 3 - WCR3 */ +#define SH7750_WCR3_REGOFS 0x800010 /* offset */ +#define SH7750_WCR3 SH7750_P4_REG32(SH7750_WCR3_REGOFS) +#define SH7750_WCR3_A7 SH7750_A7_REG32(SH7750_WCR3_REGOFS) + +#define SH7750_WCR3_A6S 0x04000000 /* Area 6 Write Strobe Setup time */ +#define SH7750_WCR3_A6H 0x03000000 /* Area 6 Data Hold Time */ +#define SH7750_WCR3_A6H_S 24 +#define SH7750_WCR3_A5S 0x00400000 /* Area 5 Write Strobe Setup time */ +#define SH7750_WCR3_A5H 0x00300000 /* Area 5 Data Hold Time */ +#define SH7750_WCR3_A5H_S 20 +#define SH7750_WCR3_A4S 0x00040000 /* Area 4 Write Strobe Setup time */ +#define SH7750_WCR3_A4H 0x00030000 /* Area 4 Data Hold Time */ +#define SH7750_WCR3_A4H_S 16 +#define SH7750_WCR3_A3S 0x00004000 /* Area 3 Write Strobe Setup time */ +#define SH7750_WCR3_A3H 0x00003000 /* Area 3 Data Hold Time */ +#define SH7750_WCR3_A3H_S 12 +#define SH7750_WCR3_A2S 0x00000400 /* Area 2 Write Strobe Setup time */ +#define SH7750_WCR3_A2H 0x00000300 /* Area 2 Data Hold Time */ +#define SH7750_WCR3_A2H_S 8 +#define SH7750_WCR3_A1S 0x00000040 /* Area 1 Write Strobe Setup time */ +#define SH7750_WCR3_A1H 0x00000030 /* Area 1 Data Hold Time */ +#define SH7750_WCR3_A1H_S 4 +#define SH7750_WCR3_A0S 0x00000004 /* Area 0 Write Strobe Setup time */ +#define SH7750_WCR3_A0H 0x00000003 /* Area 0 Data Hold Time */ +#define SH7750_WCR3_A0H_S 0 + +#define SH7750_WCR3_DHWS_0 0 /* 0 wait states data hold time */ +#define SH7750_WCR3_DHWS_1 1 /* 1 wait states data hold time */ +#define SH7750_WCR3_DHWS_2 2 /* 2 wait states data hold time */ +#define SH7750_WCR3_DHWS_3 3 /* 3 wait states data hold time */ + +#define SH7750_MCR_REGOFS 0x800014 /* offset */ +#define SH7750_MCR SH7750_P4_REG32(SH7750_MCR_REGOFS) +#define SH7750_MCR_A7 SH7750_A7_REG32(SH7750_MCR_REGOFS) + +#define SH7750_MCR_RASD 0x80000000 /* RAS Down mode */ +#define SH7750_MCR_MRSET 0x40000000 /* SDRAM Mode Register Set */ +#define SH7750_MCR_PALL 0x00000000 /* SDRAM Precharge All cmd. Mode */ +#define SH7750_MCR_TRC 0x38000000 /* RAS Precharge Time at End of */ + /* Refresh: */ +#define SH7750_MCR_TRC_0 0x00000000 /* 0 */ +#define SH7750_MCR_TRC_3 0x08000000 /* 3 */ +#define SH7750_MCR_TRC_6 0x10000000 /* 6 */ +#define SH7750_MCR_TRC_9 0x18000000 /* 9 */ +#define SH7750_MCR_TRC_12 0x20000000 /* 12 */ +#define SH7750_MCR_TRC_15 0x28000000 /* 15 */ +#define SH7750_MCR_TRC_18 0x30000000 /* 18 */ +#define SH7750_MCR_TRC_21 0x38000000 /* 21 */ + +#define SH7750_MCR_TCAS 0x00800000 /* CAS Negation Period */ +#define SH7750_MCR_TCAS_1 0x00000000 /* 1 */ +#define SH7750_MCR_TCAS_2 0x00800000 /* 2 */ + +#define SH7750_MCR_TPC 0x00380000 /* DRAM: RAS Precharge Period */ + /* SDRAM: minimum number of cycles */ + /* until the next bank active cmd */ + /* is output after precharging */ +#define SH7750_MCR_TPC_S 19 +#define SH7750_MCR_TPC_SDRAM_1 0x00000000 /* 1 cycle */ +#define SH7750_MCR_TPC_SDRAM_2 0x00080000 /* 2 cycles */ +#define SH7750_MCR_TPC_SDRAM_3 0x00100000 /* 3 cycles */ +#define SH7750_MCR_TPC_SDRAM_4 0x00180000 /* 4 cycles */ +#define SH7750_MCR_TPC_SDRAM_5 0x00200000 /* 5 cycles */ +#define SH7750_MCR_TPC_SDRAM_6 0x00280000 /* 6 cycles */ +#define SH7750_MCR_TPC_SDRAM_7 0x00300000 /* 7 cycles */ +#define SH7750_MCR_TPC_SDRAM_8 0x00380000 /* 8 cycles */ + +#define SH7750_MCR_RCD 0x00030000 /* DRAM: RAS-CAS Assertion Delay */ + /* time */ + /* SDRAM: bank active-read/write */ + /* command delay time */ +#define SH7750_MCR_RCD_DRAM_2 0x00000000 /* DRAM delay 2 clocks */ +#define SH7750_MCR_RCD_DRAM_3 0x00010000 /* DRAM delay 3 clocks */ +#define SH7750_MCR_RCD_DRAM_4 0x00020000 /* DRAM delay 4 clocks */ +#define SH7750_MCR_RCD_DRAM_5 0x00030000 /* DRAM delay 5 clocks */ +#define SH7750_MCR_RCD_SDRAM_2 0x00010000 /* DRAM delay 2 clocks */ +#define SH7750_MCR_RCD_SDRAM_3 0x00020000 /* DRAM delay 3 clocks */ +#define SH7750_MCR_RCD_SDRAM_4 0x00030000 /* DRAM delay 4 clocks */ + +#define SH7750_MCR_TRWL 0x0000E000 /* SDRAM Write Precharge Delay */ +#define SH7750_MCR_TRWL_1 0x00000000 /* 1 */ +#define SH7750_MCR_TRWL_2 0x00002000 /* 2 */ +#define SH7750_MCR_TRWL_3 0x00004000 /* 3 */ +#define SH7750_MCR_TRWL_4 0x00006000 /* 4 */ +#define SH7750_MCR_TRWL_5 0x00008000 /* 5 */ + +#define SH7750_MCR_TRAS 0x00001C00 /* DRAM: CAS-Before-RAS Refresh RAS */ + /* asserting period */ + /* SDRAM: Command interval after */ + /* synchronous DRAM refresh */ +#define SH7750_MCR_TRAS_DRAM_2 0x00000000 /* 2 */ +#define SH7750_MCR_TRAS_DRAM_3 0x00000400 /* 3 */ +#define SH7750_MCR_TRAS_DRAM_4 0x00000800 /* 4 */ +#define SH7750_MCR_TRAS_DRAM_5 0x00000C00 /* 5 */ +#define SH7750_MCR_TRAS_DRAM_6 0x00001000 /* 6 */ +#define SH7750_MCR_TRAS_DRAM_7 0x00001400 /* 7 */ +#define SH7750_MCR_TRAS_DRAM_8 0x00001800 /* 8 */ +#define SH7750_MCR_TRAS_DRAM_9 0x00001C00 /* 9 */ + +#define SH7750_MCR_TRAS_SDRAM_TRC_4 0x00000000 /* 4 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_5 0x00000400 /* 5 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_6 0x00000800 /* 6 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_7 0x00000C00 /* 7 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_8 0x00001000 /* 8 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_9 0x00001400 /* 9 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_10 0x00001800 /* 10 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_11 0x00001C00 /* 11 + TRC */ + +#define SH7750_MCR_BE 0x00000200 /* Burst Enable */ +#define SH7750_MCR_SZ 0x00000180 /* Memory Data Size */ +#define SH7750_MCR_SZ_64 0x00000000 /* 64 bits */ +#define SH7750_MCR_SZ_16 0x00000100 /* 16 bits */ +#define SH7750_MCR_SZ_32 0x00000180 /* 32 bits */ + +#define SH7750_MCR_AMX 0x00000078 /* Address Multiplexing */ +#define SH7750_MCR_AMX_S 3 +#define SH7750_MCR_AMX_DRAM_8BIT_COL 0x00000000 /* 8-bit column addr */ +#define SH7750_MCR_AMX_DRAM_9BIT_COL 0x00000008 /* 9-bit column addr */ +#define SH7750_MCR_AMX_DRAM_10BIT_COL 0x00000010 /* 10-bit column addr */ +#define SH7750_MCR_AMX_DRAM_11BIT_COL 0x00000018 /* 11-bit column addr */ +#define SH7750_MCR_AMX_DRAM_12BIT_COL 0x00000020 /* 12-bit column addr */ +/* See SH7750 Hardware Manual for SDRAM address multiplexor selection */ + +#define SH7750_MCR_RFSH 0x00000004 /* Refresh Control */ +#define SH7750_MCR_RMODE 0x00000002 /* Refresh Mode: */ +#define SH7750_MCR_RMODE_NORMAL 0x00000000 /* Normal Refresh Mode */ +#define SH7750_MCR_RMODE_SELF 0x00000002 /* Self-Refresh Mode */ +#define SH7750_MCR_RMODE_EDO 0x00000001 /* EDO Mode */ + +/* SDRAM Mode Set address */ +#define SH7750_SDRAM_MODE_A2_BASE 0xFF900000 +#define SH7750_SDRAM_MODE_A3_BASE 0xFF940000 +#define SH7750_SDRAM_MODE_A2_32BIT(x) (SH7750_SDRAM_MODE_A2_BASE + ((x) << 2)) +#define SH7750_SDRAM_MODE_A3_32BIT(x) (SH7750_SDRAM_MODE_A3_BASE + ((x) << 2)) +#define SH7750_SDRAM_MODE_A2_64BIT(x) (SH7750_SDRAM_MODE_A2_BASE + ((x) << 3)) +#define SH7750_SDRAM_MODE_A3_64BIT(x) (SH7750_SDRAM_MODE_A3_BASE + ((x) << 3)) + + +/* PCMCIA Control Register (half) - PCR */ +#define SH7750_PCR_REGOFS 0x800018 /* offset */ +#define SH7750_PCR SH7750_P4_REG32(SH7750_PCR_REGOFS) +#define SH7750_PCR_A7 SH7750_A7_REG32(SH7750_PCR_REGOFS) + +#define SH7750_PCR_A5PCW 0xC000 /* Area 5 PCMCIA Wait - Number of wait */ + /* states to be added to the number of */ + /* waits specified by WCR2 in a */ + /* low-speed PCMCIA wait cycle */ +#define SH7750_PCR_A5PCW_0 0x0000 /* 0 waits inserted */ +#define SH7750_PCR_A5PCW_15 0x4000 /* 15 waits inserted */ +#define SH7750_PCR_A5PCW_30 0x8000 /* 30 waits inserted */ +#define SH7750_PCR_A5PCW_50 0xC000 /* 50 waits inserted */ + +#define SH7750_PCR_A6PCW 0x3000 /* Area 6 PCMCIA Wait - Number of wait */ + /* states to be added to the number of */ + /* waits specified by WCR2 in a */ + /* low-speed PCMCIA wait cycle */ +#define SH7750_PCR_A6PCW_0 0x0000 /* 0 waits inserted */ +#define SH7750_PCR_A6PCW_15 0x1000 /* 15 waits inserted */ +#define SH7750_PCR_A6PCW_30 0x2000 /* 30 waits inserted */ +#define SH7750_PCR_A6PCW_50 0x3000 /* 50 waits inserted */ + +#define SH7750_PCR_A5TED 0x0E00 /* Area 5 Addr-OE\/WE\ Assertion Delay */ + /* delay time from address output to */ + /* OE\/WE\ assertion on the connected */ + /* PCMCIA interface */ +#define SH7750_PCR_A5TED_S 9 +#define SH7750_PCR_A6TED 0x01C0 /* Area 6 Addr-OE\/WE\ Assertion Delay */ +#define SH7750_PCR_A6TED_S 6 + +#define SH7750_PCR_TED_0WS 0 /* 0 Waits inserted */ +#define SH7750_PCR_TED_1WS 1 /* 1 Waits inserted */ +#define SH7750_PCR_TED_2WS 2 /* 2 Waits inserted */ +#define SH7750_PCR_TED_3WS 3 /* 3 Waits inserted */ +#define SH7750_PCR_TED_6WS 4 /* 6 Waits inserted */ +#define SH7750_PCR_TED_9WS 5 /* 9 Waits inserted */ +#define SH7750_PCR_TED_12WS 6 /* 12 Waits inserted */ +#define SH7750_PCR_TED_15WS 7 /* 15 Waits inserted */ + +#define SH7750_PCR_A5TEH 0x0038 /* Area 5 OE\/WE\ Negation Addr delay, */ + /* address hold delay time from OE\/WE\ */ + /* negation in a write on the connected */ + /* PCMCIA interface */ +#define SH7750_PCR_A5TEH_S 3 + +#define SH7750_PCR_A6TEH 0x0007 /* Area 6 OE\/WE\ Negation Address delay */ +#define SH7750_PCR_A6TEH_S 0 + +#define SH7750_PCR_TEH_0WS 0 /* 0 Waits inserted */ +#define SH7750_PCR_TEH_1WS 1 /* 1 Waits inserted */ +#define SH7750_PCR_TEH_2WS 2 /* 2 Waits inserted */ +#define SH7750_PCR_TEH_3WS 3 /* 3 Waits inserted */ +#define SH7750_PCR_TEH_6WS 4 /* 6 Waits inserted */ +#define SH7750_PCR_TEH_9WS 5 /* 9 Waits inserted */ +#define SH7750_PCR_TEH_12WS 6 /* 12 Waits inserted */ +#define SH7750_PCR_TEH_15WS 7 /* 15 Waits inserted */ + +/* Refresh Timer Control/Status Register (half) - RTSCR */ +#define SH7750_RTCSR_REGOFS 0x80001C /* offset */ +#define SH7750_RTCSR SH7750_P4_REG32(SH7750_RTCSR_REGOFS) +#define SH7750_RTCSR_A7 SH7750_A7_REG32(SH7750_RTCSR_REGOFS) + +#define SH7750_RTCSR_KEY 0xA500 /* RTCSR write key */ +#define SH7750_RTCSR_CMF 0x0080 /* Compare-Match Flag (indicates a */ + /* match between the refresh timer */ + /* counter and refresh time constant) */ +#define SH7750_RTCSR_CMIE 0x0040 /* Compare-Match Interrupt Enable */ +#define SH7750_RTCSR_CKS 0x0038 /* Refresh Counter Clock Selects */ +#define SH7750_RTCSR_CKS_DIS 0x0000 /* Clock Input Disabled */ +#define SH7750_RTCSR_CKS_CKIO_DIV4 0x0008 /* Bus Clock / 4 */ +#define SH7750_RTCSR_CKS_CKIO_DIV16 0x0010 /* Bus Clock / 16 */ +#define SH7750_RTCSR_CKS_CKIO_DIV64 0x0018 /* Bus Clock / 64 */ +#define SH7750_RTCSR_CKS_CKIO_DIV256 0x0020 /* Bus Clock / 256 */ +#define SH7750_RTCSR_CKS_CKIO_DIV1024 0x0028 /* Bus Clock / 1024 */ +#define SH7750_RTCSR_CKS_CKIO_DIV2048 0x0030 /* Bus Clock / 2048 */ +#define SH7750_RTCSR_CKS_CKIO_DIV4096 0x0038 /* Bus Clock / 4096 */ + +#define SH7750_RTCSR_OVF 0x0004 /* Refresh Count Overflow Flag */ +#define SH7750_RTCSR_OVIE 0x0002 /* Refresh Count Overflow Interrupt */ + /* Enable */ +#define SH7750_RTCSR_LMTS 0x0001 /* Refresh Count Overflow Limit Select */ +#define SH7750_RTCSR_LMTS_1024 0x0000 /* Count Limit is 1024 */ +#define SH7750_RTCSR_LMTS_512 0x0001 /* Count Limit is 512 */ + +/* Refresh Timer Counter (half) - RTCNT */ +#define SH7750_RTCNT_REGOFS 0x800020 /* offset */ +#define SH7750_RTCNT SH7750_P4_REG32(SH7750_RTCNT_REGOFS) +#define SH7750_RTCNT_A7 SH7750_A7_REG32(SH7750_RTCNT_REGOFS) + +#define SH7750_RTCNT_KEY 0xA500 /* RTCNT write key */ + +/* Refresh Time Constant Register (half) - RTCOR */ +#define SH7750_RTCOR_REGOFS 0x800024 /* offset */ +#define SH7750_RTCOR SH7750_P4_REG32(SH7750_RTCOR_REGOFS) +#define SH7750_RTCOR_A7 SH7750_A7_REG32(SH7750_RTCOR_REGOFS) + +#define SH7750_RTCOR_KEY 0xA500 /* RTCOR write key */ + +/* Refresh Count Register (half) - RFCR */ +#define SH7750_RFCR_REGOFS 0x800028 /* offset */ +#define SH7750_RFCR SH7750_P4_REG32(SH7750_RFCR_REGOFS) +#define SH7750_RFCR_A7 SH7750_A7_REG32(SH7750_RFCR_REGOFS) + +#define SH7750_RFCR_KEY 0xA400 /* RFCR write key */ + +/* Synchronous DRAM mode registers - SDMR */ +#define SH7750_SDMR2_REGOFS 0x900000 /* base offset */ +#define SH7750_SDMR2_REGNB 0x0FFC /* nb of register */ +#define SH7750_SDMR2 SH7750_P4_REG32(SH7750_SDMR2_REGOFS) +#define SH7750_SDMR2_A7 SH7750_A7_REG32(SH7750_SDMR2_REGOFS) + +#define SH7750_SDMR3_REGOFS 0x940000 /* offset */ +#define SH7750_SDMR3_REGNB 0x0FFC /* nb of register */ +#define SH7750_SDMR3 SH7750_P4_REG32(SH7750_SDMR3_REGOFS) +#define SH7750_SDMR3_A7 SH7750_A7_REG32(SH7750_SDMR3_REGOFS) + +/* + * Direct Memory Access Controller (DMAC) + */ + +/* DMA Source Address Register - SAR0, SAR1, SAR2, SAR3 */ +#define SH7750_SAR_REGOFS(n) (0xA00000 + ((n) * 16)) /* offset */ +#define SH7750_SAR(n) SH7750_P4_REG32(SH7750_SAR_REGOFS(n)) +#define SH7750_SAR_A7(n) SH7750_A7_REG32(SH7750_SAR_REGOFS(n)) +#define SH7750_SAR0 SH7750_SAR(0) +#define SH7750_SAR1 SH7750_SAR(1) +#define SH7750_SAR2 SH7750_SAR(2) +#define SH7750_SAR3 SH7750_SAR(3) +#define SH7750_SAR0_A7 SH7750_SAR_A7(0) +#define SH7750_SAR1_A7 SH7750_SAR_A7(1) +#define SH7750_SAR2_A7 SH7750_SAR_A7(2) +#define SH7750_SAR3_A7 SH7750_SAR_A7(3) + +/* DMA Destination Address Register - DAR0, DAR1, DAR2, DAR3 */ +#define SH7750_DAR_REGOFS(n) (0xA00004 + ((n) * 16)) /* offset */ +#define SH7750_DAR(n) SH7750_P4_REG32(SH7750_DAR_REGOFS(n)) +#define SH7750_DAR_A7(n) SH7750_A7_REG32(SH7750_DAR_REGOFS(n)) +#define SH7750_DAR0 SH7750_DAR(0) +#define SH7750_DAR1 SH7750_DAR(1) +#define SH7750_DAR2 SH7750_DAR(2) +#define SH7750_DAR3 SH7750_DAR(3) +#define SH7750_DAR0_A7 SH7750_DAR_A7(0) +#define SH7750_DAR1_A7 SH7750_DAR_A7(1) +#define SH7750_DAR2_A7 SH7750_DAR_A7(2) +#define SH7750_DAR3_A7 SH7750_DAR_A7(3) + +/* DMA Transfer Count Register - DMATCR0, DMATCR1, DMATCR2, DMATCR3 */ +#define SH7750_DMATCR_REGOFS(n) (0xA00008 + ((n) * 16)) /* offset */ +#define SH7750_DMATCR(n) SH7750_P4_REG32(SH7750_DMATCR_REGOFS(n)) +#define SH7750_DMATCR_A7(n) SH7750_A7_REG32(SH7750_DMATCR_REGOFS(n)) +#define SH7750_DMATCR0_P4 SH7750_DMATCR(0) +#define SH7750_DMATCR1_P4 SH7750_DMATCR(1) +#define SH7750_DMATCR2_P4 SH7750_DMATCR(2) +#define SH7750_DMATCR3_P4 SH7750_DMATCR(3) +#define SH7750_DMATCR0_A7 SH7750_DMATCR_A7(0) +#define SH7750_DMATCR1_A7 SH7750_DMATCR_A7(1) +#define SH7750_DMATCR2_A7 SH7750_DMATCR_A7(2) +#define SH7750_DMATCR3_A7 SH7750_DMATCR_A7(3) + +/* DMA Channel Control Register - CHCR0, CHCR1, CHCR2, CHCR3 */ +#define SH7750_CHCR_REGOFS(n) (0xA0000C + ((n) * 16)) /* offset */ +#define SH7750_CHCR(n) SH7750_P4_REG32(SH7750_CHCR_REGOFS(n)) +#define SH7750_CHCR_A7(n) SH7750_A7_REG32(SH7750_CHCR_REGOFS(n)) +#define SH7750_CHCR0 SH7750_CHCR(0) +#define SH7750_CHCR1 SH7750_CHCR(1) +#define SH7750_CHCR2 SH7750_CHCR(2) +#define SH7750_CHCR3 SH7750_CHCR(3) +#define SH7750_CHCR0_A7 SH7750_CHCR_A7(0) +#define SH7750_CHCR1_A7 SH7750_CHCR_A7(1) +#define SH7750_CHCR2_A7 SH7750_CHCR_A7(2) +#define SH7750_CHCR3_A7 SH7750_CHCR_A7(3) + +#define SH7750_CHCR_SSA 0xE0000000 /* Source Address Space Attribute */ +#define SH7750_CHCR_SSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ +#define SH7750_CHCR_SSA_DYNBSZ 0x20000000 /* Dynamic Bus Sizing I/O space */ +#define SH7750_CHCR_SSA_IO8 0x40000000 /* 8-bit I/O space */ +#define SH7750_CHCR_SSA_IO16 0x60000000 /* 16-bit I/O space */ +#define SH7750_CHCR_SSA_CMEM8 0x80000000 /* 8-bit common memory space */ +#define SH7750_CHCR_SSA_CMEM16 0xA0000000 /* 16-bit common memory space */ +#define SH7750_CHCR_SSA_AMEM8 0xC0000000 /* 8-bit attribute memory space */ +#define SH7750_CHCR_SSA_AMEM16 0xE0000000 /* 16-bit attribute memory space */ + +#define SH7750_CHCR_STC 0x10000000 /* Source Addr Wait Control Select */ + /* specifies CS5 or CS6 space wait */ + /* control for PCMCIA access */ + +#define SH7750_CHCR_DSA 0x0E000000 /* Source Address Space Attribute */ +#define SH7750_CHCR_DSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ +#define SH7750_CHCR_DSA_DYNBSZ 0x02000000 /* Dynamic Bus Sizing I/O space */ +#define SH7750_CHCR_DSA_IO8 0x04000000 /* 8-bit I/O space */ +#define SH7750_CHCR_DSA_IO16 0x06000000 /* 16-bit I/O space */ +#define SH7750_CHCR_DSA_CMEM8 0x08000000 /* 8-bit common memory space */ +#define SH7750_CHCR_DSA_CMEM16 0x0A000000 /* 16-bit common memory space */ +#define SH7750_CHCR_DSA_AMEM8 0x0C000000 /* 8-bit attribute memory space */ +#define SH7750_CHCR_DSA_AMEM16 0x0E000000 /* 16-bit attribute memory space */ + +#define SH7750_CHCR_DTC 0x01000000 /* Destination Address Wait Control */ + /* Select, specifies CS5 or CS6 */ + /* space wait control for PCMCIA */ + /* access */ + +#define SH7750_CHCR_DS 0x00080000 /* DREQ\ Select : */ +#define SH7750_CHCR_DS_LOWLVL 0x00000000 /* Low Level Detection */ +#define SH7750_CHCR_DS_FALL 0x00080000 /* Falling Edge Detection */ + +#define SH7750_CHCR_RL 0x00040000 /* Request Check Level: */ +#define SH7750_CHCR_RL_ACTH 0x00000000 /* DRAK is an active high out */ +#define SH7750_CHCR_RL_ACTL 0x00040000 /* DRAK is an active low out */ + +#define SH7750_CHCR_AM 0x00020000 /* Acknowledge Mode: */ +#define SH7750_CHCR_AM_RD 0x00000000 /* DACK is output in read cycle */ +#define SH7750_CHCR_AM_WR 0x00020000 /* DACK is output in write cycle */ + +#define SH7750_CHCR_AL 0x00010000 /* Acknowledge Level: */ +#define SH7750_CHCR_AL_ACTH 0x00000000 /* DACK is an active high out */ +#define SH7750_CHCR_AL_ACTL 0x00010000 /* DACK is an active low out */ + +#define SH7750_CHCR_DM 0x0000C000 /* Destination Address Mode: */ +#define SH7750_CHCR_DM_FIX 0x00000000 /* Destination Addr Fixed */ +#define SH7750_CHCR_DM_INC 0x00004000 /* Destination Addr Incremented */ +#define SH7750_CHCR_DM_DEC 0x00008000 /* Destination Addr Decremented */ + +#define SH7750_CHCR_SM 0x00003000 /* Source Address Mode: */ +#define SH7750_CHCR_SM_FIX 0x00000000 /* Source Addr Fixed */ +#define SH7750_CHCR_SM_INC 0x00001000 /* Source Addr Incremented */ +#define SH7750_CHCR_SM_DEC 0x00002000 /* Source Addr Decremented */ + +#define SH7750_CHCR_RS 0x00000F00 /* Request Source Select: */ +#define SH7750_CHCR_RS_ER_DA_EA_TO_EA 0x000 /* External Request, Dual Addr */ + /* Mode, External Addr Space */ + /* -> External Addr Space) */ +#define SH7750_CHCR_RS_ER_SA_EA_TO_ED 0x200 /* External Request, Single */ + /* Address Mode (Ext. Addr */ + /* Space -> External Device) */ +#define SH7750_CHCR_RS_ER_SA_ED_TO_EA 0x300 /* External Request, Single */ + /* Address Mode, (External */ + /* Device -> External Addr */ + /* Space) */ +#define SH7750_CHCR_RS_AR_EA_TO_EA 0x400 /* Auto-Request (External Addr */ + /* Space -> Ext. Addr Space) */ + +#define SH7750_CHCR_RS_AR_EA_TO_OCP 0x500 /* Auto-Request (External Addr */ + /* Space -> On-chip */ + /* Peripheral Module) */ +#define SH7750_CHCR_RS_AR_OCP_TO_EA 0x600 /* Auto-Request (On-chip */ + /* Peripheral Module -> */ + /* External Addr Space */ +#define SH7750_CHCR_RS_SCITX_EA_TO_SC 0x800 /* SCI Transmit-Data-Empty intr */ + /* transfer request (external */ + /* address space -> SCTDR1) */ +#define SH7750_CHCR_RS_SCIRX_SC_TO_EA 0x900 /* SCI Receive-Data-Full intr */ + /* transfer request (SCRDR1 */ + /* -> External Addr Space) */ +#define SH7750_CHCR_RS_SCIFTX_EA_TO_SC 0xA00 /* SCIF TX-Data-Empty intr */ + /* transfer request (external */ + /* address space -> SCFTDR1) */ +#define SH7750_CHCR_RS_SCIFRX_SC_TO_EA 0xB00 /* SCIF Receive-Data-Full intr */ + /* transfer request (SCFRDR2 */ + /* -> External Addr Space) */ +#define SH7750_CHCR_RS_TMU2_EA_TO_EA 0xC00 /* TMU Channel 2 (input capture */ + /* interrupt), (external */ + /* address space -> external */ + /* address space) */ +#define SH7750_CHCR_RS_TMU2_EA_TO_OCP 0xD00 /* TMU Channel 2 (input capture */ + /* interrupt), (external */ + /* address space -> on-chip */ + /* peripheral module) */ +#define SH7750_CHCR_RS_TMU2_OCP_TO_EA 0xE00 /* TMU Channel 2 (input capture */ + /* interrupt), (on-chip */ + /* peripheral module -> */ + /* external address space) */ + +#define SH7750_CHCR_TM 0x00000080 /* Transmit mode: */ +#define SH7750_CHCR_TM_CSTEAL 0x00000000 /* Cycle Steal Mode */ +#define SH7750_CHCR_TM_BURST 0x00000080 /* Burst Mode */ + +#define SH7750_CHCR_TS 0x00000070 /* Transmit Size: */ +#define SH7750_CHCR_TS_QUAD 0x00000000 /* Quadword Size (64 bits) */ +#define SH7750_CHCR_TS_BYTE 0x00000010 /* Byte Size (8 bit) */ +#define SH7750_CHCR_TS_WORD 0x00000020 /* Word Size (16 bit) */ +#define SH7750_CHCR_TS_LONG 0x00000030 /* Longword Size (32 bit) */ +#define SH7750_CHCR_TS_BLOCK 0x00000040 /* 32-byte block transfer */ + +#define SH7750_CHCR_IE 0x00000004 /* Interrupt Enable */ +#define SH7750_CHCR_TE 0x00000002 /* Transfer End */ +#define SH7750_CHCR_DE 0x00000001 /* DMAC Enable */ + +/* DMA Operation Register - DMAOR */ +#define SH7750_DMAOR_REGOFS 0xA00040 /* offset */ +#define SH7750_DMAOR SH7750_P4_REG32(SH7750_DMAOR_REGOFS) +#define SH7750_DMAOR_A7 SH7750_A7_REG32(SH7750_DMAOR_REGOFS) + +#define SH7750_DMAOR_DDT 0x00008000 /* On-Demand Data Transfer Mode */ + +#define SH7750_DMAOR_PR 0x00000300 /* Priority Mode: */ +#define SH7750_DMAOR_PR_0123 0x00000000 /* CH0 > CH1 > CH2 > CH3 */ +#define SH7750_DMAOR_PR_0231 0x00000100 /* CH0 > CH2 > CH3 > CH1 */ +#define SH7750_DMAOR_PR_2013 0x00000200 /* CH2 > CH0 > CH1 > CH3 */ +#define SH7750_DMAOR_PR_RR 0x00000300 /* Round-robin mode */ + +#define SH7750_DMAOR_COD 0x00000010 /* Check Overrun for DREQ\ */ +#define SH7750_DMAOR_AE 0x00000004 /* Address Error flag */ +#define SH7750_DMAOR_NMIF 0x00000002 /* NMI Flag */ +#define SH7750_DMAOR_DME 0x00000001 /* DMAC Master Enable */ + +/* + * I/O Ports + */ +/* Port Control Register A - PCTRA */ +#define SH7750_PCTRA_REGOFS 0x80002C /* offset */ +#define SH7750_PCTRA SH7750_P4_REG32(SH7750_PCTRA_REGOFS) +#define SH7750_PCTRA_A7 SH7750_A7_REG32(SH7750_PCTRA_REGOFS) + +#define SH7750_PCTRA_PBPUP(n) 0 /* Bit n is pulled up */ +#define SH7750_PCTRA_PBNPUP(n) (1 << ((n) * 2 + 1)) /* Bit n is not pulled up */ +#define SH7750_PCTRA_PBINP(n) 0 /* Bit n is an input */ +#define SH7750_PCTRA_PBOUT(n) (1 << ((n) * 2)) /* Bit n is an output */ + +/* Port Data Register A - PDTRA(half) */ +#define SH7750_PDTRA_REGOFS 0x800030 /* offset */ +#define SH7750_PDTRA SH7750_P4_REG32(SH7750_PDTRA_REGOFS) +#define SH7750_PDTRA_A7 SH7750_A7_REG32(SH7750_PDTRA_REGOFS) + +#define SH7750_PDTRA_BIT(n) (1 << (n)) + +/* Port Control Register B - PCTRB */ +#define SH7750_PCTRB_REGOFS 0x800040 /* offset */ +#define SH7750_PCTRB SH7750_P4_REG32(SH7750_PCTRB_REGOFS) +#define SH7750_PCTRB_A7 SH7750_A7_REG32(SH7750_PCTRB_REGOFS) + +#define SH7750_PCTRB_PBPUP(n) 0 /* Bit n is pulled up */ +#define SH7750_PCTRB_PBNPUP(n) (1 << ((n - 16) * 2 + 1)) /* Bit n is not pulled up */ +#define SH7750_PCTRB_PBINP(n) 0 /* Bit n is an input */ +#define SH7750_PCTRB_PBOUT(n) (1 << ((n - 16) * 2)) /* Bit n is an output */ + +/* Port Data Register B - PDTRB(half) */ +#define SH7750_PDTRB_REGOFS 0x800044 /* offset */ +#define SH7750_PDTRB SH7750_P4_REG32(SH7750_PDTRB_REGOFS) +#define SH7750_PDTRB_A7 SH7750_A7_REG32(SH7750_PDTRB_REGOFS) + +#define SH7750_PDTRB_BIT(n) (1 << ((n) - 16)) + +/* GPIO Interrupt Control Register - GPIOIC(half) */ +#define SH7750_GPIOIC_REGOFS 0x800048 /* offset */ +#define SH7750_GPIOIC SH7750_P4_REG32(SH7750_GPIOIC_REGOFS) +#define SH7750_GPIOIC_A7 SH7750_A7_REG32(SH7750_GPIOIC_REGOFS) + +#define SH7750_GPIOIC_PTIREN(n) (1 << (n)) /* Port n is used as a GPIO int */ + +/* + * Interrupt Controller - INTC + */ +/* Interrupt Control Register - ICR (half) */ +#define SH7750_ICR_REGOFS 0xD00000 /* offset */ +#define SH7750_ICR SH7750_P4_REG32(SH7750_ICR_REGOFS) +#define SH7750_ICR_A7 SH7750_A7_REG32(SH7750_ICR_REGOFS) + +#define SH7750_ICR_NMIL 0x8000 /* NMI Input Level */ +#define SH7750_ICR_MAI 0x4000 /* NMI Interrupt Mask */ + +#define SH7750_ICR_NMIB 0x0200 /* NMI Block Mode: */ +#define SH7750_ICR_NMIB_BLK 0x0000 /* NMI requests held pending while */ + /* SR.BL bit is set to 1 */ +#define SH7750_ICR_NMIB_NBLK 0x0200 /* NMI requests detected when SR.BL */ + /* bit set to 1 */ + +#define SH7750_ICR_NMIE 0x0100 /* NMI Edge Select: */ +#define SH7750_ICR_NMIE_FALL 0x0000 /* Interrupt request detected on */ + /* falling edge of NMI input */ +#define SH7750_ICR_NMIE_RISE 0x0100 /* Interrupt request detected on */ + /* rising edge of NMI input */ + +#define SH7750_ICR_IRLM 0x0080 /* IRL Pin Mode: */ +#define SH7750_ICR_IRLM_ENC 0x0000 /* IRL\ pins used as a level-encoded */ + /* interrupt requests */ +#define SH7750_ICR_IRLM_RAW 0x0080 /* IRL\ pins used as a four */ + /* independent interrupt requests */ + +/* + * User Break Controller registers + */ +#define SH7750_BARA 0x200000 /* Break address regiser A */ +#define SH7750_BAMRA 0x200004 /* Break address mask regiser A */ +#define SH7750_BBRA 0x200008 /* Break bus cycle regiser A */ +#define SH7750_BARB 0x20000c /* Break address regiser B */ +#define SH7750_BAMRB 0x200010 /* Break address mask regiser B */ +#define SH7750_BBRB 0x200014 /* Break bus cycle regiser B */ +#define SH7750_BASRB 0x000018 /* Break ASID regiser B */ +#define SH7750_BDRB 0x200018 /* Break data regiser B */ +#define SH7750_BDMRB 0x20001c /* Break data mask regiser B */ +#define SH7750_BRCR 0x200020 /* Break control register */ + +#define SH7750_BRCR_UDBE 0x0001 /* User break debug enable bit */ + +/* + * Missing in RTEMS, added for QEMU + */ +#define SH7750_BCR3_A7 0x1f800050 +#define SH7750_BCR4_A7 0x1e0a00f0 + +#endif diff --git a/hw/sh4/shix.c b/hw/sh4/shix.c new file mode 100644 index 000000000..aa812512f --- /dev/null +++ b/hw/sh4/shix.c @@ -0,0 +1,85 @@ +/* + * SHIX 2.0 board description + * + * Copyright (c) 2005 Samuel Tardieu + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* + * Shix 2.0 board by Alexis Polti, described at + * https://web.archive.org/web/20070917001736/perso.enst.fr/~polti/realisations/shix20 + * + * More information in target/sh4/README.sh4 + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "hw/sh4/sh.h" +#include "sysemu/qtest.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "qemu/error-report.h" + +#define BIOS_FILENAME "shix_bios.bin" +#define BIOS_ADDRESS 0xA0000000 + +static void shix_init(MachineState *machine) +{ + int ret; + SuperHCPU *cpu; + struct SH7750State *s; + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *rom = g_new(MemoryRegion, 1); + MemoryRegion *sdram = g_new(MemoryRegion, 2); + const char *bios_name = machine->firmware ?: BIOS_FILENAME; + + cpu = SUPERH_CPU(cpu_create(machine->cpu_type)); + + /* Allocate memory space */ + memory_region_init_rom(rom, NULL, "shix.rom", 0x4000, &error_fatal); + memory_region_add_subregion(sysmem, 0x00000000, rom); + memory_region_init_ram(&sdram[0], NULL, "shix.sdram1", 0x01000000, + &error_fatal); + memory_region_add_subregion(sysmem, 0x08000000, &sdram[0]); + memory_region_init_ram(&sdram[1], NULL, "shix.sdram2", 0x01000000, + &error_fatal); + memory_region_add_subregion(sysmem, 0x0c000000, &sdram[1]); + + /* Load BIOS in 0 (and access it through P2, 0xA0000000) */ + ret = load_image_targphys(bios_name, 0, 0x4000); + if (ret < 0 && !qtest_enabled()) { + error_report("Could not load SHIX bios '%s'", bios_name); + exit(1); + } + + /* Register peripherals */ + s = sh7750_init(cpu, sysmem); + /* XXXXX Check success */ + tc58128_init(s, "shix_linux_nand.bin", NULL); +} + +static void shix_machine_init(MachineClass *mc) +{ + mc->desc = "shix card"; + mc->init = shix_init; + mc->is_default = true; + mc->default_cpu_type = TYPE_SH7750R_CPU; +} + +DEFINE_MACHINE("shix", shix_machine_init) diff --git a/hw/sh4/trace-events b/hw/sh4/trace-events new file mode 100644 index 000000000..4b61cd56c --- /dev/null +++ b/hw/sh4/trace-events @@ -0,0 +1,3 @@ +# sh7750.c +sh7750_porta(uint16_t prev, uint16_t cur, uint16_t pdtr, uint16_t pctr) "porta changed from 0x%04x to 0x%04x\npdtra=0x%04x, pctra=0x%08x" +sh7750_portb(uint16_t prev, uint16_t cur, uint16_t pdtr, uint16_t pctr) "portb changed from 0x%04x to 0x%04x\npdtrb=0x%04x, pctrb=0x%08x" diff --git a/hw/sh4/trace.h b/hw/sh4/trace.h new file mode 100644 index 000000000..e2c13323b --- /dev/null +++ b/hw/sh4/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_sh4.h" diff --git a/hw/smbios/Kconfig b/hw/smbios/Kconfig new file mode 100644 index 000000000..553adf4bf --- /dev/null +++ b/hw/smbios/Kconfig @@ -0,0 +1,2 @@ +config SMBIOS + bool diff --git a/hw/smbios/meson.build b/hw/smbios/meson.build new file mode 100644 index 000000000..9e762c710 --- /dev/null +++ b/hw/smbios/meson.build @@ -0,0 +1,13 @@ +smbios_ss = ss.source_set() +smbios_ss.add(files('smbios.c')) +smbios_ss.add(when: 'CONFIG_IPMI', + if_true: files('smbios_type_38.c'), + if_false: files('smbios_type_38-stub.c')) + +softmmu_ss.add_all(when: 'CONFIG_SMBIOS', if_true: smbios_ss) +softmmu_ss.add(when: 'CONFIG_SMBIOS', if_false: files('smbios-stub.c')) + +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files( + 'smbios-stub.c', + 'smbios_type_38-stub.c', +)) diff --git a/hw/smbios/smbios-stub.c b/hw/smbios/smbios-stub.c new file mode 100644 index 000000000..64e5ba93e --- /dev/null +++ b/hw/smbios/smbios-stub.c @@ -0,0 +1,31 @@ +/* + * SMBIOS stubs for platforms that don't support SMBIOS. + * + * Copyright (c) 2010 Isaku Yamahata + * VA Linux Systems Japan K.K. + * Copyright (c) 2016 Leif Lindholm + * Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qmp/qerror.h" +#include "hw/firmware/smbios.h" + +void smbios_entry_add(QemuOpts *opts, Error **errp) +{ + error_setg(errp, QERR_UNSUPPORTED); +} diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c new file mode 100644 index 000000000..7397e5673 --- /dev/null +++ b/hw/smbios/smbios.c @@ -0,0 +1,1358 @@ +/* + * SMBIOS Support + * + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. + * Copyright (C) 2013 Red Hat, Inc. + * + * Authors: + * Alex Williamson + * Markus Armbruster + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "sysemu/sysemu.h" +#include "qemu/uuid.h" +#include "hw/firmware/smbios.h" +#include "hw/loader.h" +#include "hw/boards.h" +#include "hw/pci/pci_bus.h" +#include "smbios_build.h" + +/* legacy structures and constants for <= 2.0 machines */ +struct smbios_header { + uint16_t length; + uint8_t type; +} QEMU_PACKED; + +struct smbios_field { + struct smbios_header header; + uint8_t type; + uint16_t offset; + uint8_t data[]; +} QEMU_PACKED; + +struct smbios_table { + struct smbios_header header; + uint8_t data[]; +} QEMU_PACKED; + +#define SMBIOS_FIELD_ENTRY 0 +#define SMBIOS_TABLE_ENTRY 1 + +static uint8_t *smbios_entries; +static size_t smbios_entries_len; +static bool smbios_legacy = true; +static bool smbios_uuid_encoded = true; +/* end: legacy structures & constants for <= 2.0 machines */ + + +uint8_t *smbios_tables; +size_t smbios_tables_len; +unsigned smbios_table_max; +unsigned smbios_table_cnt; +static SmbiosEntryPointType smbios_ep_type = SMBIOS_ENTRY_POINT_21; + +static SmbiosEntryPoint ep; + +static int smbios_type4_count = 0; +static bool smbios_immutable; +static bool smbios_have_defaults; +static uint32_t smbios_cpuid_version, smbios_cpuid_features, smbios_smp_sockets; + +static DECLARE_BITMAP(have_binfile_bitmap, SMBIOS_MAX_TYPE+1); +static DECLARE_BITMAP(have_fields_bitmap, SMBIOS_MAX_TYPE+1); + +static struct { + const char *vendor, *version, *date; + bool have_major_minor, uefi; + uint8_t major, minor; +} type0; + +static struct { + const char *manufacturer, *product, *version, *serial, *sku, *family; + /* uuid is in qemu_uuid */ +} type1; + +static struct { + const char *manufacturer, *product, *version, *serial, *asset, *location; +} type2; + +static struct { + const char *manufacturer, *version, *serial, *asset, *sku; +} type3; + +/* + * SVVP requires max_speed and current_speed to be set and not being + * 0 which counts as unknown (SMBIOS 3.1.0/Table 21). Set the + * default value to 2000MHz as we did before. + */ +#define DEFAULT_CPU_SPEED 2000 + +static struct { + const char *sock_pfx, *manufacturer, *version, *serial, *asset, *part; + uint64_t max_speed; + uint64_t current_speed; +} type4 = { + .max_speed = DEFAULT_CPU_SPEED, + .current_speed = DEFAULT_CPU_SPEED +}; + +static struct { + size_t nvalues; + char **values; +} type11; + +static struct { + const char *loc_pfx, *bank, *manufacturer, *serial, *asset, *part; + uint16_t speed; +} type17; + +static QEnumLookup type41_kind_lookup = { + .array = (const char *const[]) { + "other", + "unknown", + "video", + "scsi", + "ethernet", + "tokenring", + "sound", + "pata", + "sata", + "sas", + }, + .size = 10 +}; +struct type41_instance { + const char *designation, *pcidev; + uint8_t instance, kind; + QTAILQ_ENTRY(type41_instance) next; +}; +static QTAILQ_HEAD(, type41_instance) type41 = QTAILQ_HEAD_INITIALIZER(type41); + +static QemuOptsList qemu_smbios_opts = { + .name = "smbios", + .head = QTAILQ_HEAD_INITIALIZER(qemu_smbios_opts.head), + .desc = { + /* + * no elements => accept any params + * validation will happen later + */ + { /* end of list */ } + } +}; + +static const QemuOptDesc qemu_smbios_file_opts[] = { + { + .name = "file", + .type = QEMU_OPT_STRING, + .help = "binary file containing an SMBIOS element", + }, + { /* end of list */ } +}; + +static const QemuOptDesc qemu_smbios_type0_opts[] = { + { + .name = "type", + .type = QEMU_OPT_NUMBER, + .help = "SMBIOS element type", + },{ + .name = "vendor", + .type = QEMU_OPT_STRING, + .help = "vendor name", + },{ + .name = "version", + .type = QEMU_OPT_STRING, + .help = "version number", + },{ + .name = "date", + .type = QEMU_OPT_STRING, + .help = "release date", + },{ + .name = "release", + .type = QEMU_OPT_STRING, + .help = "revision number", + },{ + .name = "uefi", + .type = QEMU_OPT_BOOL, + .help = "uefi support", + }, + { /* end of list */ } +}; + +static const QemuOptDesc qemu_smbios_type1_opts[] = { + { + .name = "type", + .type = QEMU_OPT_NUMBER, + .help = "SMBIOS element type", + },{ + .name = "manufacturer", + .type = QEMU_OPT_STRING, + .help = "manufacturer name", + },{ + .name = "product", + .type = QEMU_OPT_STRING, + .help = "product name", + },{ + .name = "version", + .type = QEMU_OPT_STRING, + .help = "version number", + },{ + .name = "serial", + .type = QEMU_OPT_STRING, + .help = "serial number", + },{ + .name = "uuid", + .type = QEMU_OPT_STRING, + .help = "UUID", + },{ + .name = "sku", + .type = QEMU_OPT_STRING, + .help = "SKU number", + },{ + .name = "family", + .type = QEMU_OPT_STRING, + .help = "family name", + }, + { /* end of list */ } +}; + +static const QemuOptDesc qemu_smbios_type2_opts[] = { + { + .name = "type", + .type = QEMU_OPT_NUMBER, + .help = "SMBIOS element type", + },{ + .name = "manufacturer", + .type = QEMU_OPT_STRING, + .help = "manufacturer name", + },{ + .name = "product", + .type = QEMU_OPT_STRING, + .help = "product name", + },{ + .name = "version", + .type = QEMU_OPT_STRING, + .help = "version number", + },{ + .name = "serial", + .type = QEMU_OPT_STRING, + .help = "serial number", + },{ + .name = "asset", + .type = QEMU_OPT_STRING, + .help = "asset tag number", + },{ + .name = "location", + .type = QEMU_OPT_STRING, + .help = "location in chassis", + }, + { /* end of list */ } +}; + +static const QemuOptDesc qemu_smbios_type3_opts[] = { + { + .name = "type", + .type = QEMU_OPT_NUMBER, + .help = "SMBIOS element type", + },{ + .name = "manufacturer", + .type = QEMU_OPT_STRING, + .help = "manufacturer name", + },{ + .name = "version", + .type = QEMU_OPT_STRING, + .help = "version number", + },{ + .name = "serial", + .type = QEMU_OPT_STRING, + .help = "serial number", + },{ + .name = "asset", + .type = QEMU_OPT_STRING, + .help = "asset tag number", + },{ + .name = "sku", + .type = QEMU_OPT_STRING, + .help = "SKU number", + }, + { /* end of list */ } +}; + +static const QemuOptDesc qemu_smbios_type4_opts[] = { + { + .name = "type", + .type = QEMU_OPT_NUMBER, + .help = "SMBIOS element type", + },{ + .name = "sock_pfx", + .type = QEMU_OPT_STRING, + .help = "socket designation string prefix", + },{ + .name = "manufacturer", + .type = QEMU_OPT_STRING, + .help = "manufacturer name", + },{ + .name = "version", + .type = QEMU_OPT_STRING, + .help = "version number", + },{ + .name = "max-speed", + .type = QEMU_OPT_NUMBER, + .help = "max speed in MHz", + },{ + .name = "current-speed", + .type = QEMU_OPT_NUMBER, + .help = "speed at system boot in MHz", + },{ + .name = "serial", + .type = QEMU_OPT_STRING, + .help = "serial number", + },{ + .name = "asset", + .type = QEMU_OPT_STRING, + .help = "asset tag number", + },{ + .name = "part", + .type = QEMU_OPT_STRING, + .help = "part number", + }, + { /* end of list */ } +}; + +static const QemuOptDesc qemu_smbios_type11_opts[] = { + { + .name = "value", + .type = QEMU_OPT_STRING, + .help = "OEM string data", + }, + { + .name = "path", + .type = QEMU_OPT_STRING, + .help = "OEM string data from file", + }, +}; + +static const QemuOptDesc qemu_smbios_type17_opts[] = { + { + .name = "type", + .type = QEMU_OPT_NUMBER, + .help = "SMBIOS element type", + },{ + .name = "loc_pfx", + .type = QEMU_OPT_STRING, + .help = "device locator string prefix", + },{ + .name = "bank", + .type = QEMU_OPT_STRING, + .help = "bank locator string", + },{ + .name = "manufacturer", + .type = QEMU_OPT_STRING, + .help = "manufacturer name", + },{ + .name = "serial", + .type = QEMU_OPT_STRING, + .help = "serial number", + },{ + .name = "asset", + .type = QEMU_OPT_STRING, + .help = "asset tag number", + },{ + .name = "part", + .type = QEMU_OPT_STRING, + .help = "part number", + },{ + .name = "speed", + .type = QEMU_OPT_NUMBER, + .help = "maximum capable speed", + }, + { /* end of list */ } +}; + +static const QemuOptDesc qemu_smbios_type41_opts[] = { + { + .name = "type", + .type = QEMU_OPT_NUMBER, + .help = "SMBIOS element type", + },{ + .name = "designation", + .type = QEMU_OPT_STRING, + .help = "reference designation string", + },{ + .name = "kind", + .type = QEMU_OPT_STRING, + .help = "device type", + .def_value_str = "other", + },{ + .name = "instance", + .type = QEMU_OPT_NUMBER, + .help = "device type instance", + },{ + .name = "pcidev", + .type = QEMU_OPT_STRING, + .help = "PCI device", + }, + { /* end of list */ } +}; + +static void smbios_register_config(void) +{ + qemu_add_opts(&qemu_smbios_opts); +} + +opts_init(smbios_register_config); + +/* + * The SMBIOS 2.1 "structure table length" field in the + * entry point uses a 16-bit integer, so we're limited + * in total table size + */ +#define SMBIOS_21_MAX_TABLES_LEN 0xffff + +static void smbios_validate_table(MachineState *ms) +{ + uint32_t expect_t4_count = smbios_legacy ? + ms->smp.cpus : smbios_smp_sockets; + + if (smbios_type4_count && smbios_type4_count != expect_t4_count) { + error_report("Expected %d SMBIOS Type 4 tables, got %d instead", + expect_t4_count, smbios_type4_count); + exit(1); + } + + if (smbios_ep_type == SMBIOS_ENTRY_POINT_21 && + smbios_tables_len > SMBIOS_21_MAX_TABLES_LEN) { + error_report("SMBIOS 2.1 table length %zu exceeds %d", + smbios_tables_len, SMBIOS_21_MAX_TABLES_LEN); + exit(1); + } +} + + +/* legacy setup functions for <= 2.0 machines */ +static void smbios_add_field(int type, int offset, const void *data, size_t len) +{ + struct smbios_field *field; + + if (!smbios_entries) { + smbios_entries_len = sizeof(uint16_t); + smbios_entries = g_malloc0(smbios_entries_len); + } + smbios_entries = g_realloc(smbios_entries, smbios_entries_len + + sizeof(*field) + len); + field = (struct smbios_field *)(smbios_entries + smbios_entries_len); + field->header.type = SMBIOS_FIELD_ENTRY; + field->header.length = cpu_to_le16(sizeof(*field) + len); + + field->type = type; + field->offset = cpu_to_le16(offset); + memcpy(field->data, data, len); + + smbios_entries_len += sizeof(*field) + len; + (*(uint16_t *)smbios_entries) = + cpu_to_le16(le16_to_cpu(*(uint16_t *)smbios_entries) + 1); +} + +static void smbios_maybe_add_str(int type, int offset, const char *data) +{ + if (data) { + smbios_add_field(type, offset, data, strlen(data) + 1); + } +} + +static void smbios_build_type_0_fields(void) +{ + smbios_maybe_add_str(0, offsetof(struct smbios_type_0, vendor_str), + type0.vendor); + smbios_maybe_add_str(0, offsetof(struct smbios_type_0, bios_version_str), + type0.version); + smbios_maybe_add_str(0, offsetof(struct smbios_type_0, + bios_release_date_str), + type0.date); + if (type0.have_major_minor) { + smbios_add_field(0, offsetof(struct smbios_type_0, + system_bios_major_release), + &type0.major, 1); + smbios_add_field(0, offsetof(struct smbios_type_0, + system_bios_minor_release), + &type0.minor, 1); + } +} + +static void smbios_build_type_1_fields(void) +{ + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, manufacturer_str), + type1.manufacturer); + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, product_name_str), + type1.product); + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, version_str), + type1.version); + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, serial_number_str), + type1.serial); + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, sku_number_str), + type1.sku); + smbios_maybe_add_str(1, offsetof(struct smbios_type_1, family_str), + type1.family); + if (qemu_uuid_set) { + /* We don't encode the UUID in the "wire format" here because this + * function is for legacy mode and needs to keep the guest ABI, and + * because we don't know what's the SMBIOS version advertised by the + * BIOS. + */ + smbios_add_field(1, offsetof(struct smbios_type_1, uuid), + &qemu_uuid, 16); + } +} + +uint8_t *smbios_get_table_legacy(MachineState *ms, size_t *length) +{ + if (!smbios_legacy) { + *length = 0; + return NULL; + } + + if (!smbios_immutable) { + smbios_build_type_0_fields(); + smbios_build_type_1_fields(); + smbios_validate_table(ms); + smbios_immutable = true; + } + *length = smbios_entries_len; + return smbios_entries; +} +/* end: legacy setup functions for <= 2.0 machines */ + + +bool smbios_skip_table(uint8_t type, bool required_table) +{ + if (test_bit(type, have_binfile_bitmap)) { + return true; /* user provided their own binary blob(s) */ + } + if (test_bit(type, have_fields_bitmap)) { + return false; /* user provided fields via command line */ + } + if (smbios_have_defaults && required_table) { + return false; /* we're building tables, and this one's required */ + } + return true; +} + +static void smbios_build_type_0_table(void) +{ + SMBIOS_BUILD_TABLE_PRE(0, 0x000, false); /* optional, leave up to BIOS */ + + SMBIOS_TABLE_SET_STR(0, vendor_str, type0.vendor); + SMBIOS_TABLE_SET_STR(0, bios_version_str, type0.version); + + t->bios_starting_address_segment = cpu_to_le16(0xE800); /* from SeaBIOS */ + + SMBIOS_TABLE_SET_STR(0, bios_release_date_str, type0.date); + + t->bios_rom_size = 0; /* hardcoded in SeaBIOS with FIXME comment */ + + t->bios_characteristics = cpu_to_le64(0x08); /* Not supported */ + t->bios_characteristics_extension_bytes[0] = 0; + t->bios_characteristics_extension_bytes[1] = 0x14; /* TCD/SVVP | VM */ + if (type0.uefi) { + t->bios_characteristics_extension_bytes[1] |= 0x08; /* |= UEFI */ + } + + if (type0.have_major_minor) { + t->system_bios_major_release = type0.major; + t->system_bios_minor_release = type0.minor; + } else { + t->system_bios_major_release = 0; + t->system_bios_minor_release = 0; + } + + /* hardcoded in SeaBIOS */ + t->embedded_controller_major_release = 0xFF; + t->embedded_controller_minor_release = 0xFF; + + SMBIOS_BUILD_TABLE_POST; +} + +/* Encode UUID from the big endian encoding described on RFC4122 to the wire + * format specified by SMBIOS version 2.6. + */ +static void smbios_encode_uuid(struct smbios_uuid *uuid, QemuUUID *in) +{ + memcpy(uuid, in, 16); + if (smbios_uuid_encoded) { + uuid->time_low = bswap32(uuid->time_low); + uuid->time_mid = bswap16(uuid->time_mid); + uuid->time_hi_and_version = bswap16(uuid->time_hi_and_version); + } +} + +static void smbios_build_type_1_table(void) +{ + SMBIOS_BUILD_TABLE_PRE(1, 0x100, true); /* required */ + + SMBIOS_TABLE_SET_STR(1, manufacturer_str, type1.manufacturer); + SMBIOS_TABLE_SET_STR(1, product_name_str, type1.product); + SMBIOS_TABLE_SET_STR(1, version_str, type1.version); + SMBIOS_TABLE_SET_STR(1, serial_number_str, type1.serial); + if (qemu_uuid_set) { + smbios_encode_uuid(&t->uuid, &qemu_uuid); + } else { + memset(&t->uuid, 0, 16); + } + t->wake_up_type = 0x06; /* power switch */ + SMBIOS_TABLE_SET_STR(1, sku_number_str, type1.sku); + SMBIOS_TABLE_SET_STR(1, family_str, type1.family); + + SMBIOS_BUILD_TABLE_POST; +} + +static void smbios_build_type_2_table(void) +{ + SMBIOS_BUILD_TABLE_PRE(2, 0x200, false); /* optional */ + + SMBIOS_TABLE_SET_STR(2, manufacturer_str, type2.manufacturer); + SMBIOS_TABLE_SET_STR(2, product_str, type2.product); + SMBIOS_TABLE_SET_STR(2, version_str, type2.version); + SMBIOS_TABLE_SET_STR(2, serial_number_str, type2.serial); + SMBIOS_TABLE_SET_STR(2, asset_tag_number_str, type2.asset); + t->feature_flags = 0x01; /* Motherboard */ + SMBIOS_TABLE_SET_STR(2, location_str, type2.location); + t->chassis_handle = cpu_to_le16(0x300); /* Type 3 (System enclosure) */ + t->board_type = 0x0A; /* Motherboard */ + t->contained_element_count = 0; + + SMBIOS_BUILD_TABLE_POST; +} + +static void smbios_build_type_3_table(void) +{ + SMBIOS_BUILD_TABLE_PRE(3, 0x300, true); /* required */ + + SMBIOS_TABLE_SET_STR(3, manufacturer_str, type3.manufacturer); + t->type = 0x01; /* Other */ + SMBIOS_TABLE_SET_STR(3, version_str, type3.version); + SMBIOS_TABLE_SET_STR(3, serial_number_str, type3.serial); + SMBIOS_TABLE_SET_STR(3, asset_tag_number_str, type3.asset); + t->boot_up_state = 0x03; /* Safe */ + t->power_supply_state = 0x03; /* Safe */ + t->thermal_state = 0x03; /* Safe */ + t->security_status = 0x02; /* Unknown */ + t->oem_defined = cpu_to_le32(0); + t->height = 0; + t->number_of_power_cords = 0; + t->contained_element_count = 0; + t->contained_element_record_length = 0; + SMBIOS_TABLE_SET_STR(3, sku_number_str, type3.sku); + + SMBIOS_BUILD_TABLE_POST; +} + +static void smbios_build_type_4_table(MachineState *ms, unsigned instance) +{ + char sock_str[128]; + + SMBIOS_BUILD_TABLE_PRE(4, 0x400 + instance, true); /* required */ + + snprintf(sock_str, sizeof(sock_str), "%s%2x", type4.sock_pfx, instance); + SMBIOS_TABLE_SET_STR(4, socket_designation_str, sock_str); + t->processor_type = 0x03; /* CPU */ + t->processor_family = 0x01; /* Other */ + SMBIOS_TABLE_SET_STR(4, processor_manufacturer_str, type4.manufacturer); + t->processor_id[0] = cpu_to_le32(smbios_cpuid_version); + t->processor_id[1] = cpu_to_le32(smbios_cpuid_features); + SMBIOS_TABLE_SET_STR(4, processor_version_str, type4.version); + t->voltage = 0; + t->external_clock = cpu_to_le16(0); /* Unknown */ + t->max_speed = cpu_to_le16(type4.max_speed); + t->current_speed = cpu_to_le16(type4.current_speed); + t->status = 0x41; /* Socket populated, CPU enabled */ + t->processor_upgrade = 0x01; /* Other */ + t->l1_cache_handle = cpu_to_le16(0xFFFF); /* N/A */ + t->l2_cache_handle = cpu_to_le16(0xFFFF); /* N/A */ + t->l3_cache_handle = cpu_to_le16(0xFFFF); /* N/A */ + SMBIOS_TABLE_SET_STR(4, serial_number_str, type4.serial); + SMBIOS_TABLE_SET_STR(4, asset_tag_number_str, type4.asset); + SMBIOS_TABLE_SET_STR(4, part_number_str, type4.part); + t->core_count = t->core_enabled = ms->smp.cores; + t->thread_count = ms->smp.threads; + t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */ + t->processor_family2 = cpu_to_le16(0x01); /* Other */ + + SMBIOS_BUILD_TABLE_POST; + smbios_type4_count++; +} + +static void smbios_build_type_11_table(void) +{ + char count_str[128]; + size_t i; + + if (type11.nvalues == 0) { + return; + } + + SMBIOS_BUILD_TABLE_PRE(11, 0xe00, true); /* required */ + + snprintf(count_str, sizeof(count_str), "%zu", type11.nvalues); + t->count = type11.nvalues; + + for (i = 0; i < type11.nvalues; i++) { + SMBIOS_TABLE_SET_STR_LIST(11, type11.values[i]); + g_free(type11.values[i]); + type11.values[i] = NULL; + } + + SMBIOS_BUILD_TABLE_POST; +} + +#define MAX_T16_STD_SZ 0x80000000 /* 2T in Kilobytes */ + +static void smbios_build_type_16_table(unsigned dimm_cnt) +{ + uint64_t size_kb; + + SMBIOS_BUILD_TABLE_PRE(16, 0x1000, true); /* required */ + + t->location = 0x01; /* Other */ + t->use = 0x03; /* System memory */ + t->error_correction = 0x06; /* Multi-bit ECC (for Microsoft, per SeaBIOS) */ + size_kb = QEMU_ALIGN_UP(current_machine->ram_size, KiB) / KiB; + if (size_kb < MAX_T16_STD_SZ) { + t->maximum_capacity = cpu_to_le32(size_kb); + t->extended_maximum_capacity = cpu_to_le64(0); + } else { + t->maximum_capacity = cpu_to_le32(MAX_T16_STD_SZ); + t->extended_maximum_capacity = cpu_to_le64(current_machine->ram_size); + } + t->memory_error_information_handle = cpu_to_le16(0xFFFE); /* Not provided */ + t->number_of_memory_devices = cpu_to_le16(dimm_cnt); + + SMBIOS_BUILD_TABLE_POST; +} + +#define MAX_T17_STD_SZ 0x7FFF /* (32G - 1M), in Megabytes */ +#define MAX_T17_EXT_SZ 0x80000000 /* 2P, in Megabytes */ + +static void smbios_build_type_17_table(unsigned instance, uint64_t size) +{ + char loc_str[128]; + uint64_t size_mb; + + SMBIOS_BUILD_TABLE_PRE(17, 0x1100 + instance, true); /* required */ + + t->physical_memory_array_handle = cpu_to_le16(0x1000); /* Type 16 above */ + t->memory_error_information_handle = cpu_to_le16(0xFFFE); /* Not provided */ + t->total_width = cpu_to_le16(0xFFFF); /* Unknown */ + t->data_width = cpu_to_le16(0xFFFF); /* Unknown */ + size_mb = QEMU_ALIGN_UP(size, MiB) / MiB; + if (size_mb < MAX_T17_STD_SZ) { + t->size = cpu_to_le16(size_mb); + t->extended_size = cpu_to_le32(0); + } else { + assert(size_mb < MAX_T17_EXT_SZ); + t->size = cpu_to_le16(MAX_T17_STD_SZ); + t->extended_size = cpu_to_le32(size_mb); + } + t->form_factor = 0x09; /* DIMM */ + t->device_set = 0; /* Not in a set */ + snprintf(loc_str, sizeof(loc_str), "%s %d", type17.loc_pfx, instance); + SMBIOS_TABLE_SET_STR(17, device_locator_str, loc_str); + SMBIOS_TABLE_SET_STR(17, bank_locator_str, type17.bank); + t->memory_type = 0x07; /* RAM */ + t->type_detail = cpu_to_le16(0x02); /* Other */ + t->speed = cpu_to_le16(type17.speed); + SMBIOS_TABLE_SET_STR(17, manufacturer_str, type17.manufacturer); + SMBIOS_TABLE_SET_STR(17, serial_number_str, type17.serial); + SMBIOS_TABLE_SET_STR(17, asset_tag_number_str, type17.asset); + SMBIOS_TABLE_SET_STR(17, part_number_str, type17.part); + t->attributes = 0; /* Unknown */ + t->configured_clock_speed = t->speed; /* reuse value for max speed */ + t->minimum_voltage = cpu_to_le16(0); /* Unknown */ + t->maximum_voltage = cpu_to_le16(0); /* Unknown */ + t->configured_voltage = cpu_to_le16(0); /* Unknown */ + + SMBIOS_BUILD_TABLE_POST; +} + +static void smbios_build_type_19_table(unsigned instance, + uint64_t start, uint64_t size) +{ + uint64_t end, start_kb, end_kb; + + SMBIOS_BUILD_TABLE_PRE(19, 0x1300 + instance, true); /* required */ + + end = start + size - 1; + assert(end > start); + start_kb = start / KiB; + end_kb = end / KiB; + if (start_kb < UINT32_MAX && end_kb < UINT32_MAX) { + t->starting_address = cpu_to_le32(start_kb); + t->ending_address = cpu_to_le32(end_kb); + t->extended_starting_address = + t->extended_ending_address = cpu_to_le64(0); + } else { + t->starting_address = t->ending_address = cpu_to_le32(UINT32_MAX); + t->extended_starting_address = cpu_to_le64(start); + t->extended_ending_address = cpu_to_le64(end); + } + t->memory_array_handle = cpu_to_le16(0x1000); /* Type 16 above */ + t->partition_width = 1; /* One device per row */ + + SMBIOS_BUILD_TABLE_POST; +} + +static void smbios_build_type_32_table(void) +{ + SMBIOS_BUILD_TABLE_PRE(32, 0x2000, true); /* required */ + + memset(t->reserved, 0, 6); + t->boot_status = 0; /* No errors detected */ + + SMBIOS_BUILD_TABLE_POST; +} + +static void smbios_build_type_41_table(Error **errp) +{ + unsigned instance = 0; + struct type41_instance *t41; + + QTAILQ_FOREACH(t41, &type41, next) { + SMBIOS_BUILD_TABLE_PRE(41, 0x2900 + instance, true); + + SMBIOS_TABLE_SET_STR(41, reference_designation_str, t41->designation); + t->device_type = t41->kind; + t->device_type_instance = t41->instance; + t->segment_group_number = cpu_to_le16(0); + t->bus_number = 0; + t->device_number = 0; + + if (t41->pcidev) { + PCIDevice *pdev = NULL; + int rc = pci_qdev_find_device(t41->pcidev, &pdev); + if (rc != 0) { + error_setg(errp, + "No PCI device %s for SMBIOS type 41 entry %s", + t41->pcidev, t41->designation); + return; + } + /* + * We only handle the case were the device is attached to + * the PCI root bus. The general case is more complex as + * bridges are enumerated later and the table would need + * to be updated at this moment. + */ + if (!pci_bus_is_root(pci_get_bus(pdev))) { + error_setg(errp, + "Cannot create type 41 entry for PCI device %s: " + "not attached to the root bus", + t41->pcidev); + return; + } + t->segment_group_number = cpu_to_le16(0); + t->bus_number = pci_dev_bus_num(pdev); + t->device_number = pdev->devfn; + } + + SMBIOS_BUILD_TABLE_POST; + instance++; + } +} + +static void smbios_build_type_127_table(void) +{ + SMBIOS_BUILD_TABLE_PRE(127, 0x7F00, true); /* required */ + SMBIOS_BUILD_TABLE_POST; +} + +void smbios_set_cpuid(uint32_t version, uint32_t features) +{ + smbios_cpuid_version = version; + smbios_cpuid_features = features; +} + +#define SMBIOS_SET_DEFAULT(field, value) \ + if (!field) { \ + field = value; \ + } + +void smbios_set_defaults(const char *manufacturer, const char *product, + const char *version, bool legacy_mode, + bool uuid_encoded, SmbiosEntryPointType ep_type) +{ + smbios_have_defaults = true; + smbios_legacy = legacy_mode; + smbios_uuid_encoded = uuid_encoded; + smbios_ep_type = ep_type; + + /* drop unwanted version of command-line file blob(s) */ + if (smbios_legacy) { + g_free(smbios_tables); + /* in legacy mode, also complain if fields were given for types > 1 */ + if (find_next_bit(have_fields_bitmap, + SMBIOS_MAX_TYPE+1, 2) < SMBIOS_MAX_TYPE+1) { + error_report("can't process fields for smbios " + "types > 1 on machine versions < 2.1!"); + exit(1); + } + } else { + g_free(smbios_entries); + } + + SMBIOS_SET_DEFAULT(type1.manufacturer, manufacturer); + SMBIOS_SET_DEFAULT(type1.product, product); + SMBIOS_SET_DEFAULT(type1.version, version); + SMBIOS_SET_DEFAULT(type2.manufacturer, manufacturer); + SMBIOS_SET_DEFAULT(type2.product, product); + SMBIOS_SET_DEFAULT(type2.version, version); + SMBIOS_SET_DEFAULT(type3.manufacturer, manufacturer); + SMBIOS_SET_DEFAULT(type3.version, version); + SMBIOS_SET_DEFAULT(type4.sock_pfx, "CPU"); + SMBIOS_SET_DEFAULT(type4.manufacturer, manufacturer); + SMBIOS_SET_DEFAULT(type4.version, version); + SMBIOS_SET_DEFAULT(type17.loc_pfx, "DIMM"); + SMBIOS_SET_DEFAULT(type17.manufacturer, manufacturer); +} + +static void smbios_entry_point_setup(void) +{ + switch (smbios_ep_type) { + case SMBIOS_ENTRY_POINT_21: + memcpy(ep.ep21.anchor_string, "_SM_", 4); + memcpy(ep.ep21.intermediate_anchor_string, "_DMI_", 5); + ep.ep21.length = sizeof(struct smbios_21_entry_point); + ep.ep21.entry_point_revision = 0; /* formatted_area reserved */ + memset(ep.ep21.formatted_area, 0, 5); + + /* compliant with smbios spec v2.8 */ + ep.ep21.smbios_major_version = 2; + ep.ep21.smbios_minor_version = 8; + ep.ep21.smbios_bcd_revision = 0x28; + + /* set during table construction, but BIOS may override: */ + ep.ep21.structure_table_length = cpu_to_le16(smbios_tables_len); + ep.ep21.max_structure_size = cpu_to_le16(smbios_table_max); + ep.ep21.number_of_structures = cpu_to_le16(smbios_table_cnt); + + /* BIOS must recalculate */ + ep.ep21.checksum = 0; + ep.ep21.intermediate_checksum = 0; + ep.ep21.structure_table_address = cpu_to_le32(0); + + break; + case SMBIOS_ENTRY_POINT_30: + memcpy(ep.ep30.anchor_string, "_SM3_", 5); + ep.ep30.length = sizeof(struct smbios_30_entry_point); + ep.ep30.entry_point_revision = 1; + ep.ep30.reserved = 0; + + /* compliant with smbios spec 3.0 */ + ep.ep30.smbios_major_version = 3; + ep.ep30.smbios_minor_version = 0; + ep.ep30.smbios_doc_rev = 0; + + /* set during table construct, but BIOS might override */ + ep.ep30.structure_table_max_size = cpu_to_le32(smbios_tables_len); + + /* BIOS must recalculate */ + ep.ep30.checksum = 0; + ep.ep30.structure_table_address = cpu_to_le64(0); + + break; + default: + abort(); + break; + } +} + +void smbios_get_tables(MachineState *ms, + const struct smbios_phys_mem_area *mem_array, + const unsigned int mem_array_size, + uint8_t **tables, size_t *tables_len, + uint8_t **anchor, size_t *anchor_len, + Error **errp) +{ + unsigned i, dimm_cnt; + + if (smbios_legacy) { + *tables = *anchor = NULL; + *tables_len = *anchor_len = 0; + return; + } + + if (!smbios_immutable) { + smbios_build_type_0_table(); + smbios_build_type_1_table(); + smbios_build_type_2_table(); + smbios_build_type_3_table(); + + smbios_smp_sockets = DIV_ROUND_UP(ms->smp.cpus, + ms->smp.cores * ms->smp.threads); + assert(smbios_smp_sockets >= 1); + + for (i = 0; i < smbios_smp_sockets; i++) { + smbios_build_type_4_table(ms, i); + } + + smbios_build_type_11_table(); + +#define MAX_DIMM_SZ (16 * GiB) +#define GET_DIMM_SZ ((i < dimm_cnt - 1) ? MAX_DIMM_SZ \ + : ((current_machine->ram_size - 1) % MAX_DIMM_SZ) + 1) + + dimm_cnt = QEMU_ALIGN_UP(current_machine->ram_size, MAX_DIMM_SZ) / MAX_DIMM_SZ; + + smbios_build_type_16_table(dimm_cnt); + + for (i = 0; i < dimm_cnt; i++) { + smbios_build_type_17_table(i, GET_DIMM_SZ); + } + + for (i = 0; i < mem_array_size; i++) { + smbios_build_type_19_table(i, mem_array[i].address, + mem_array[i].length); + } + + smbios_build_type_32_table(); + smbios_build_type_38_table(); + smbios_build_type_41_table(errp); + smbios_build_type_127_table(); + + smbios_validate_table(ms); + smbios_entry_point_setup(); + smbios_immutable = true; + } + + /* return tables blob and entry point (anchor), and their sizes */ + *tables = smbios_tables; + *tables_len = smbios_tables_len; + *anchor = (uint8_t *)&ep; + + /* calculate length based on anchor string */ + if (!strncmp((char *)&ep, "_SM_", 4)) { + *anchor_len = sizeof(struct smbios_21_entry_point); + } else if (!strncmp((char *)&ep, "_SM3_", 5)) { + *anchor_len = sizeof(struct smbios_30_entry_point); + } else { + abort(); + } +} + +static void save_opt(const char **dest, QemuOpts *opts, const char *name) +{ + const char *val = qemu_opt_get(opts, name); + + if (val) { + *dest = val; + } +} + + +struct opt_list { + size_t *ndest; + char ***dest; +}; + +static int save_opt_one(void *opaque, + const char *name, const char *value, + Error **errp) +{ + struct opt_list *opt = opaque; + + if (g_str_equal(name, "path")) { + g_autoptr(GByteArray) data = g_byte_array_new(); + g_autofree char *buf = g_new(char, 4096); + ssize_t ret; + int fd = qemu_open(value, O_RDONLY, errp); + if (fd < 0) { + return -1; + } + + while (1) { + ret = read(fd, buf, 4096); + if (ret == 0) { + break; + } + if (ret < 0) { + error_setg(errp, "Unable to read from %s: %s", + value, strerror(errno)); + qemu_close(fd); + return -1; + } + if (memchr(buf, '\0', ret)) { + error_setg(errp, "NUL in OEM strings value in %s", value); + qemu_close(fd); + return -1; + } + g_byte_array_append(data, (guint8 *)buf, ret); + } + + qemu_close(fd); + + *opt->dest = g_renew(char *, *opt->dest, (*opt->ndest) + 1); + (*opt->dest)[*opt->ndest] = (char *)g_byte_array_free(data, FALSE); + (*opt->ndest)++; + data = NULL; + } else if (g_str_equal(name, "value")) { + *opt->dest = g_renew(char *, *opt->dest, (*opt->ndest) + 1); + (*opt->dest)[*opt->ndest] = g_strdup(value); + (*opt->ndest)++; + } else if (!g_str_equal(name, "type")) { + error_setg(errp, "Unexpected option %s", name); + return -1; + } + + return 0; +} + +static bool save_opt_list(size_t *ndest, char ***dest, QemuOpts *opts, + Error **errp) +{ + struct opt_list opt = { + ndest, dest, + }; + if (!qemu_opt_foreach(opts, save_opt_one, &opt, errp)) { + return false; + } + return true; +} + +void smbios_entry_add(QemuOpts *opts, Error **errp) +{ + const char *val; + + assert(!smbios_immutable); + + val = qemu_opt_get(opts, "file"); + if (val) { + struct smbios_structure_header *header; + int size; + struct smbios_table *table; /* legacy mode only */ + + if (!qemu_opts_validate(opts, qemu_smbios_file_opts, errp)) { + return; + } + + size = get_image_size(val); + if (size == -1 || size < sizeof(struct smbios_structure_header)) { + error_setg(errp, "Cannot read SMBIOS file %s", val); + return; + } + + /* + * NOTE: standard double '\0' terminator expected, per smbios spec. + * (except in legacy mode, where the second '\0' is implicit and + * will be inserted by the BIOS). + */ + smbios_tables = g_realloc(smbios_tables, smbios_tables_len + size); + header = (struct smbios_structure_header *)(smbios_tables + + smbios_tables_len); + + if (load_image_size(val, (uint8_t *)header, size) != size) { + error_setg(errp, "Failed to load SMBIOS file %s", val); + return; + } + + if (test_bit(header->type, have_fields_bitmap)) { + error_setg(errp, + "can't load type %d struct, fields already specified!", + header->type); + return; + } + set_bit(header->type, have_binfile_bitmap); + + if (header->type == 4) { + smbios_type4_count++; + } + + smbios_tables_len += size; + if (size > smbios_table_max) { + smbios_table_max = size; + } + smbios_table_cnt++; + + /* add a copy of the newly loaded blob to legacy smbios_entries */ + /* NOTE: This code runs before smbios_set_defaults(), so we don't + * yet know which mode (legacy vs. aggregate-table) will be + * required. We therefore add the binary blob to both legacy + * (smbios_entries) and aggregate (smbios_tables) tables, and + * delete the one we don't need from smbios_set_defaults(), + * once we know which machine version has been requested. + */ + if (!smbios_entries) { + smbios_entries_len = sizeof(uint16_t); + smbios_entries = g_malloc0(smbios_entries_len); + } + smbios_entries = g_realloc(smbios_entries, smbios_entries_len + + size + sizeof(*table)); + table = (struct smbios_table *)(smbios_entries + smbios_entries_len); + table->header.type = SMBIOS_TABLE_ENTRY; + table->header.length = cpu_to_le16(sizeof(*table) + size); + memcpy(table->data, header, size); + smbios_entries_len += sizeof(*table) + size; + (*(uint16_t *)smbios_entries) = + cpu_to_le16(le16_to_cpu(*(uint16_t *)smbios_entries) + 1); + /* end: add a copy of the newly loaded blob to legacy smbios_entries */ + + return; + } + + val = qemu_opt_get(opts, "type"); + if (val) { + unsigned long type = strtoul(val, NULL, 0); + + if (type > SMBIOS_MAX_TYPE) { + error_setg(errp, "out of range!"); + return; + } + + if (test_bit(type, have_binfile_bitmap)) { + error_setg(errp, "can't add fields, binary file already loaded!"); + return; + } + set_bit(type, have_fields_bitmap); + + switch (type) { + case 0: + if (!qemu_opts_validate(opts, qemu_smbios_type0_opts, errp)) { + return; + } + save_opt(&type0.vendor, opts, "vendor"); + save_opt(&type0.version, opts, "version"); + save_opt(&type0.date, opts, "date"); + type0.uefi = qemu_opt_get_bool(opts, "uefi", false); + + val = qemu_opt_get(opts, "release"); + if (val) { + if (sscanf(val, "%hhu.%hhu", &type0.major, &type0.minor) != 2) { + error_setg(errp, "Invalid release"); + return; + } + type0.have_major_minor = true; + } + return; + case 1: + if (!qemu_opts_validate(opts, qemu_smbios_type1_opts, errp)) { + return; + } + save_opt(&type1.manufacturer, opts, "manufacturer"); + save_opt(&type1.product, opts, "product"); + save_opt(&type1.version, opts, "version"); + save_opt(&type1.serial, opts, "serial"); + save_opt(&type1.sku, opts, "sku"); + save_opt(&type1.family, opts, "family"); + + val = qemu_opt_get(opts, "uuid"); + if (val) { + if (qemu_uuid_parse(val, &qemu_uuid) != 0) { + error_setg(errp, "Invalid UUID"); + return; + } + qemu_uuid_set = true; + } + return; + case 2: + if (!qemu_opts_validate(opts, qemu_smbios_type2_opts, errp)) { + return; + } + save_opt(&type2.manufacturer, opts, "manufacturer"); + save_opt(&type2.product, opts, "product"); + save_opt(&type2.version, opts, "version"); + save_opt(&type2.serial, opts, "serial"); + save_opt(&type2.asset, opts, "asset"); + save_opt(&type2.location, opts, "location"); + return; + case 3: + if (!qemu_opts_validate(opts, qemu_smbios_type3_opts, errp)) { + return; + } + save_opt(&type3.manufacturer, opts, "manufacturer"); + save_opt(&type3.version, opts, "version"); + save_opt(&type3.serial, opts, "serial"); + save_opt(&type3.asset, opts, "asset"); + save_opt(&type3.sku, opts, "sku"); + return; + case 4: + if (!qemu_opts_validate(opts, qemu_smbios_type4_opts, errp)) { + return; + } + save_opt(&type4.sock_pfx, opts, "sock_pfx"); + save_opt(&type4.manufacturer, opts, "manufacturer"); + save_opt(&type4.version, opts, "version"); + save_opt(&type4.serial, opts, "serial"); + save_opt(&type4.asset, opts, "asset"); + save_opt(&type4.part, opts, "part"); + type4.max_speed = qemu_opt_get_number(opts, "max-speed", + DEFAULT_CPU_SPEED); + type4.current_speed = qemu_opt_get_number(opts, "current-speed", + DEFAULT_CPU_SPEED); + if (type4.max_speed > UINT16_MAX || + type4.current_speed > UINT16_MAX) { + error_setg(errp, "SMBIOS CPU speed is too large (> %d)", + UINT16_MAX); + } + return; + case 11: + if (!qemu_opts_validate(opts, qemu_smbios_type11_opts, errp)) { + return; + } + if (!save_opt_list(&type11.nvalues, &type11.values, opts, errp)) { + return; + } + return; + case 17: + if (!qemu_opts_validate(opts, qemu_smbios_type17_opts, errp)) { + return; + } + save_opt(&type17.loc_pfx, opts, "loc_pfx"); + save_opt(&type17.bank, opts, "bank"); + save_opt(&type17.manufacturer, opts, "manufacturer"); + save_opt(&type17.serial, opts, "serial"); + save_opt(&type17.asset, opts, "asset"); + save_opt(&type17.part, opts, "part"); + type17.speed = qemu_opt_get_number(opts, "speed", 0); + return; + case 41: { + struct type41_instance *t; + Error *local_err = NULL; + + if (!qemu_opts_validate(opts, qemu_smbios_type41_opts, errp)) { + return; + } + t = g_new0(struct type41_instance, 1); + save_opt(&t->designation, opts, "designation"); + t->kind = qapi_enum_parse(&type41_kind_lookup, + qemu_opt_get(opts, "kind"), + 0, &local_err) + 1; + t->kind |= 0x80; /* enabled */ + if (local_err != NULL) { + error_propagate(errp, local_err); + g_free(t); + return; + } + t->instance = qemu_opt_get_number(opts, "instance", 1); + save_opt(&t->pcidev, opts, "pcidev"); + + QTAILQ_INSERT_TAIL(&type41, t, next); + return; + } + default: + error_setg(errp, + "Don't know how to build fields for SMBIOS type %ld", + type); + return; + } + } + + error_setg(errp, "Must specify type= or file="); +} diff --git a/hw/smbios/smbios_build.h b/hw/smbios/smbios_build.h new file mode 100644 index 000000000..56b5a1e3f --- /dev/null +++ b/hw/smbios/smbios_build.h @@ -0,0 +1,103 @@ +/* + * SMBIOS Support + * + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. + * Copyright (C) 2013 Red Hat, Inc. + * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC + * + * Authors: + * Alex Williamson + * Markus Armbruster + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#ifndef QEMU_SMBIOS_BUILD_H +#define QEMU_SMBIOS_BUILD_H + +bool smbios_skip_table(uint8_t type, bool required_table); + +extern uint8_t *smbios_tables; +extern size_t smbios_tables_len; +extern unsigned smbios_table_max; +extern unsigned smbios_table_cnt; + +#define SMBIOS_BUILD_TABLE_PRE(tbl_type, tbl_handle, tbl_required) \ + struct smbios_type_##tbl_type *t; \ + size_t t_off; /* table offset into smbios_tables */ \ + int str_index = 0; \ + do { \ + /* should we skip building this table ? */ \ + if (smbios_skip_table(tbl_type, tbl_required)) { \ + return; \ + } \ + \ + /* use offset of table t within smbios_tables */ \ + /* (pointer must be updated after each realloc) */ \ + t_off = smbios_tables_len; \ + smbios_tables_len += sizeof(*t); \ + smbios_tables = g_realloc(smbios_tables, smbios_tables_len); \ + t = (struct smbios_type_##tbl_type *)(smbios_tables + t_off); \ + \ + t->header.type = tbl_type; \ + t->header.length = sizeof(*t); \ + t->header.handle = cpu_to_le16(tbl_handle); \ + } while (0) + +#define SMBIOS_TABLE_SET_STR(tbl_type, field, value) \ + do { \ + int len = (value != NULL) ? strlen(value) + 1 : 0; \ + if (len > 1) { \ + smbios_tables = g_realloc(smbios_tables, \ + smbios_tables_len + len); \ + memcpy(smbios_tables + smbios_tables_len, value, len); \ + smbios_tables_len += len; \ + /* update pointer post-realloc */ \ + t = (struct smbios_type_##tbl_type *)(smbios_tables + t_off); \ + t->field = ++str_index; \ + } else { \ + t->field = 0; \ + } \ + } while (0) + +#define SMBIOS_TABLE_SET_STR_LIST(tbl_type, value) \ + do { \ + int len = (value != NULL) ? strlen(value) + 1 : 0; \ + if (len > 1) { \ + smbios_tables = g_realloc(smbios_tables, \ + smbios_tables_len + len); \ + memcpy(smbios_tables + smbios_tables_len, value, len); \ + smbios_tables_len += len; \ + ++str_index; \ + } \ + } while (0) + +#define SMBIOS_BUILD_TABLE_POST \ + do { \ + size_t term_cnt, t_size; \ + \ + /* add '\0' terminator (add two if no strings defined) */ \ + term_cnt = (str_index == 0) ? 2 : 1; \ + smbios_tables = g_realloc(smbios_tables, \ + smbios_tables_len + term_cnt); \ + memset(smbios_tables + smbios_tables_len, 0, term_cnt); \ + smbios_tables_len += term_cnt; \ + \ + /* update smbios max. element size */ \ + t_size = smbios_tables_len - t_off; \ + if (t_size > smbios_table_max) { \ + smbios_table_max = t_size; \ + } \ + \ + /* update smbios element count */ \ + smbios_table_cnt++; \ + } while (0) + +/* IPMI SMBIOS firmware handling */ +void smbios_build_type_38_table(void); + +#endif /* QEMU_SMBIOS_BUILD_H */ diff --git a/hw/smbios/smbios_type_38-stub.c b/hw/smbios/smbios_type_38-stub.c new file mode 100644 index 000000000..14b53d004 --- /dev/null +++ b/hw/smbios/smbios_type_38-stub.c @@ -0,0 +1,15 @@ +/* + * IPMI SMBIOS firmware handling + * + * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "smbios_build.h" + +void smbios_build_type_38_table(void) +{ +} diff --git a/hw/smbios/smbios_type_38.c b/hw/smbios/smbios_type_38.c new file mode 100644 index 000000000..168b88664 --- /dev/null +++ b/hw/smbios/smbios_type_38.c @@ -0,0 +1,119 @@ +/* + * IPMI SMBIOS firmware handling + * + * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/ipmi/ipmi.h" +#include "hw/firmware/smbios.h" +#include "qemu/error-report.h" +#include "smbios_build.h" + +/* SMBIOS type 38 - IPMI */ +struct smbios_type_38 { + struct smbios_structure_header header; + uint8_t interface_type; + uint8_t ipmi_spec_revision; + uint8_t i2c_slave_address; + uint8_t nv_storage_device_address; + uint64_t base_address; + uint8_t base_address_modifier; + uint8_t interrupt_number; +} QEMU_PACKED; + +static void smbios_build_one_type_38(IPMIFwInfo *info) +{ + uint64_t baseaddr = info->base_address; + SMBIOS_BUILD_TABLE_PRE(38, 0x3000, true); + + t->interface_type = info->interface_type; + t->ipmi_spec_revision = ((info->ipmi_spec_major_revision << 4) + | info->ipmi_spec_minor_revision); + t->i2c_slave_address = info->i2c_slave_address; + t->nv_storage_device_address = 0; + + assert(info->ipmi_spec_minor_revision <= 15); + assert(info->ipmi_spec_major_revision <= 15); + + /* or 1 to set it to I/O space */ + switch (info->memspace) { + case IPMI_MEMSPACE_IO: + baseaddr |= 1; + break; + case IPMI_MEMSPACE_MEM32: + case IPMI_MEMSPACE_MEM64: + break; + case IPMI_MEMSPACE_SMBUS: + baseaddr <<= 1; + break; + } + + t->base_address = cpu_to_le64(baseaddr); + + t->base_address_modifier = 0; + if (info->irq_type == IPMI_LEVEL_IRQ) { + t->base_address_modifier |= 1; + } + switch (info->register_spacing) { + case 1: + break; + case 4: + t->base_address_modifier |= 1 << 6; + break; + case 16: + t->base_address_modifier |= 2 << 6; + break; + default: + error_report("IPMI register spacing %d is not compatible with" + " SMBIOS, ignoring this entry.", info->register_spacing); + return; + } + t->interrupt_number = info->interrupt_number; + + SMBIOS_BUILD_TABLE_POST; +} + +static void smbios_add_ipmi_devices(BusState *bus) +{ + BusChild *kid; + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + Object *obj = object_dynamic_cast(OBJECT(dev), TYPE_IPMI_INTERFACE); + BusState *childbus; + + if (obj) { + IPMIInterface *ii; + IPMIInterfaceClass *iic; + IPMIFwInfo info; + + ii = IPMI_INTERFACE(obj); + iic = IPMI_INTERFACE_GET_CLASS(obj); + memset(&info, 0, sizeof(info)); + if (!iic->get_fwinfo) { + continue; + } + iic->get_fwinfo(ii, &info); + smbios_build_one_type_38(&info); + continue; + } + + QLIST_FOREACH(childbus, &dev->child_bus, sibling) { + smbios_add_ipmi_devices(childbus); + } + } +} + +void smbios_build_type_38_table(void) +{ + BusState *bus; + + bus = sysbus_get_default(); + if (bus) { + smbios_add_ipmi_devices(bus); + } +} diff --git a/hw/sparc/Kconfig b/hw/sparc/Kconfig new file mode 100644 index 000000000..79d58beb7 --- /dev/null +++ b/hw/sparc/Kconfig @@ -0,0 +1,29 @@ +config SUN4M + bool + imply TCX + imply CG3 + select CS4231 + select ECCMEMCTL + select EMPTY_SLOT + select UNIMP + select ESCC + select ESP + select FDC_SYSBUS + select SLAVIO + select LANCE + select M48T59 + select STP2000 + select CHRP_NVRAM + select OR_IRQ + +config LEON3 + bool + select GRLIB + +config GRLIB + bool + select PTIMER + +config SLAVIO + bool + select PTIMER diff --git a/hw/sparc/leon3.c b/hw/sparc/leon3.c new file mode 100644 index 000000000..7b4dec172 --- /dev/null +++ b/hw/sparc/leon3.c @@ -0,0 +1,390 @@ +/* + * QEMU Leon3 System Emulator + * + * Copyright (c) 2010-2019 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "cpu.h" +#include "hw/irq.h" +#include "qemu/timer.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "sysemu/sysemu.h" +#include "sysemu/qtest.h" +#include "sysemu/reset.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "elf.h" +#include "trace.h" + +#include "hw/sparc/grlib.h" +#include "hw/misc/grlib_ahb_apb_pnp.h" + +/* Default system clock. */ +#define CPU_CLK (40 * 1000 * 1000) + +#define LEON3_PROM_FILENAME "u-boot.bin" +#define LEON3_PROM_OFFSET (0x00000000) +#define LEON3_RAM_OFFSET (0x40000000) + +#define LEON3_UART_OFFSET (0x80000100) +#define LEON3_UART_IRQ (3) + +#define LEON3_IRQMP_OFFSET (0x80000200) + +#define LEON3_TIMER_OFFSET (0x80000300) +#define LEON3_TIMER_IRQ (6) +#define LEON3_TIMER_COUNT (2) + +#define LEON3_APB_PNP_OFFSET (0x800FF000) +#define LEON3_AHB_PNP_OFFSET (0xFFFFF000) + +typedef struct ResetData { + SPARCCPU *cpu; + uint32_t entry; /* save kernel entry in case of reset */ + target_ulong sp; /* initial stack pointer */ +} ResetData; + +static uint32_t *gen_store_u32(uint32_t *code, hwaddr addr, uint32_t val) +{ + stl_p(code++, 0x82100000); /* mov %g0, %g1 */ + stl_p(code++, 0x84100000); /* mov %g0, %g2 */ + stl_p(code++, 0x03000000 + + extract32(addr, 10, 22)); + /* sethi %hi(addr), %g1 */ + stl_p(code++, 0x82106000 + + extract32(addr, 0, 10)); + /* or %g1, addr, %g1 */ + stl_p(code++, 0x05000000 + + extract32(val, 10, 22)); + /* sethi %hi(val), %g2 */ + stl_p(code++, 0x8410a000 + + extract32(val, 0, 10)); + /* or %g2, val, %g2 */ + stl_p(code++, 0xc4204000); /* st %g2, [ %g1 ] */ + + return code; +} + +/* + * When loading a kernel in RAM the machine is expected to be in a different + * state (eg: initialized by the bootloader). This little code reproduces + * this behavior. + */ +static void write_bootloader(CPUSPARCState *env, uint8_t *base, + hwaddr kernel_addr) +{ + uint32_t *p = (uint32_t *) base; + + /* Initialize the UARTs */ + /* *UART_CONTROL = UART_RECEIVE_ENABLE | UART_TRANSMIT_ENABLE; */ + p = gen_store_u32(p, 0x80000108, 3); + + /* Initialize the TIMER 0 */ + /* *GPTIMER_SCALER_RELOAD = 40 - 1; */ + p = gen_store_u32(p, 0x80000304, 39); + /* *GPTIMER0_COUNTER_RELOAD = 0xFFFE; */ + p = gen_store_u32(p, 0x80000314, 0xFFFFFFFE); + /* *GPTIMER0_CONFIG = GPTIMER_ENABLE | GPTIMER_RESTART; */ + p = gen_store_u32(p, 0x80000318, 3); + + /* JUMP to the entry point */ + stl_p(p++, 0x82100000); /* mov %g0, %g1 */ + stl_p(p++, 0x03000000 + extract32(kernel_addr, 10, 22)); + /* sethi %hi(kernel_addr), %g1 */ + stl_p(p++, 0x82106000 + extract32(kernel_addr, 0, 10)); + /* or kernel_addr, %g1 */ + stl_p(p++, 0x81c04000); /* jmp %g1 */ + stl_p(p++, 0x01000000); /* nop */ +} + +static void main_cpu_reset(void *opaque) +{ + ResetData *s = (ResetData *)opaque; + CPUState *cpu = CPU(s->cpu); + CPUSPARCState *env = &s->cpu->env; + + cpu_reset(cpu); + + cpu->halted = 0; + env->pc = s->entry; + env->npc = s->entry + 4; + env->regbase[6] = s->sp; +} + +static void leon3_cache_control_int(CPUSPARCState *env) +{ + uint32_t state = 0; + + if (env->cache_control & CACHE_CTRL_IF) { + /* Instruction cache state */ + state = env->cache_control & CACHE_STATE_MASK; + if (state == CACHE_ENABLED) { + state = CACHE_FROZEN; + trace_int_helper_icache_freeze(); + } + + env->cache_control &= ~CACHE_STATE_MASK; + env->cache_control |= state; + } + + if (env->cache_control & CACHE_CTRL_DF) { + /* Data cache state */ + state = (env->cache_control >> 2) & CACHE_STATE_MASK; + if (state == CACHE_ENABLED) { + state = CACHE_FROZEN; + trace_int_helper_dcache_freeze(); + } + + env->cache_control &= ~(CACHE_STATE_MASK << 2); + env->cache_control |= (state << 2); + } +} + +static void leon3_irq_ack(void *irq_manager, int intno) +{ + grlib_irqmp_ack((DeviceState *)irq_manager, intno); +} + +/* + * This device assumes that the incoming 'level' value on the + * qemu_irq is the interrupt number, not just a simple 0/1 level. + */ +static void leon3_set_pil_in(void *opaque, int n, int level) +{ + CPUSPARCState *env = opaque; + uint32_t pil_in = level; + CPUState *cs; + + assert(env != NULL); + + env->pil_in = pil_in; + + if (env->pil_in && (env->interrupt_index == 0 || + (env->interrupt_index & ~15) == TT_EXTINT)) { + unsigned int i; + + for (i = 15; i > 0; i--) { + if (env->pil_in & (1 << i)) { + int old_interrupt = env->interrupt_index; + + env->interrupt_index = TT_EXTINT | i; + if (old_interrupt != env->interrupt_index) { + cs = env_cpu(env); + trace_leon3_set_irq(i); + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } + break; + } + } + } else if (!env->pil_in && (env->interrupt_index & ~15) == TT_EXTINT) { + cs = env_cpu(env); + trace_leon3_reset_irq(env->interrupt_index & 15); + env->interrupt_index = 0; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + +static void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno) +{ + leon3_irq_ack(irq_manager, intno); + leon3_cache_control_int(env); +} + +static void leon3_generic_hw_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + const char *bios_name = machine->firmware ?: LEON3_PROM_FILENAME; + const char *kernel_filename = machine->kernel_filename; + SPARCCPU *cpu; + CPUSPARCState *env; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *prom = g_new(MemoryRegion, 1); + int ret; + char *filename; + int bios_size; + int prom_size; + ResetData *reset_info; + DeviceState *dev, *irqmpdev; + int i; + AHBPnp *ahb_pnp; + APBPnp *apb_pnp; + + /* Init CPU */ + cpu = SPARC_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + + cpu_sparc_set_id(env, 0); + + /* Reset data */ + reset_info = g_malloc0(sizeof(ResetData)); + reset_info->cpu = cpu; + reset_info->sp = LEON3_RAM_OFFSET + ram_size; + qemu_register_reset(main_cpu_reset, reset_info); + + ahb_pnp = GRLIB_AHB_PNP(qdev_new(TYPE_GRLIB_AHB_PNP)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ahb_pnp), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(ahb_pnp), 0, LEON3_AHB_PNP_OFFSET); + grlib_ahb_pnp_add_entry(ahb_pnp, 0, 0, GRLIB_VENDOR_GAISLER, + GRLIB_LEON3_DEV, GRLIB_AHB_MASTER, + GRLIB_CPU_AREA); + + apb_pnp = GRLIB_APB_PNP(qdev_new(TYPE_GRLIB_APB_PNP)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(apb_pnp), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(apb_pnp), 0, LEON3_APB_PNP_OFFSET); + grlib_ahb_pnp_add_entry(ahb_pnp, LEON3_APB_PNP_OFFSET, 0xFFF, + GRLIB_VENDOR_GAISLER, GRLIB_APBMST_DEV, + GRLIB_AHB_SLAVE, GRLIB_AHBMEM_AREA); + + /* Allocate IRQ manager */ + irqmpdev = qdev_new(TYPE_GRLIB_IRQMP); + qdev_init_gpio_in_named_with_opaque(DEVICE(cpu), leon3_set_pil_in, + env, "pil", 1); + qdev_connect_gpio_out_named(irqmpdev, "grlib-irq", 0, + qdev_get_gpio_in_named(DEVICE(cpu), "pil", 0)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(irqmpdev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(irqmpdev), 0, LEON3_IRQMP_OFFSET); + env->irq_manager = irqmpdev; + env->qemu_irq_ack = leon3_irq_manager; + grlib_apb_pnp_add_entry(apb_pnp, LEON3_IRQMP_OFFSET, 0xFFF, + GRLIB_VENDOR_GAISLER, GRLIB_IRQMP_DEV, + 2, 0, GRLIB_APBIO_AREA); + + /* Allocate RAM */ + if (ram_size > 1 * GiB) { + error_report("Too much memory for this machine: %" PRId64 "MB," + " maximum 1G", + ram_size / MiB); + exit(1); + } + + memory_region_add_subregion(address_space_mem, LEON3_RAM_OFFSET, + machine->ram); + + /* Allocate BIOS */ + prom_size = 8 * MiB; + memory_region_init_rom(prom, NULL, "Leon3.bios", prom_size, &error_fatal); + memory_region_add_subregion(address_space_mem, LEON3_PROM_OFFSET, prom); + + /* Load boot prom */ + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + + if (filename) { + bios_size = get_image_size(filename); + } else { + bios_size = -1; + } + + if (bios_size > prom_size) { + error_report("could not load prom '%s': file too big", filename); + exit(1); + } + + if (bios_size > 0) { + ret = load_image_targphys(filename, LEON3_PROM_OFFSET, bios_size); + if (ret < 0 || ret > prom_size) { + error_report("could not load prom '%s'", filename); + exit(1); + } + } else if (kernel_filename == NULL && !qtest_enabled()) { + error_report("Can't read bios image '%s'", filename + ? filename + : LEON3_PROM_FILENAME); + exit(1); + } + g_free(filename); + + /* Can directly load an application. */ + if (kernel_filename != NULL) { + long kernel_size; + uint64_t entry; + + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, + &entry, NULL, NULL, NULL, + 1 /* big endian */, EM_SPARC, 0, 0); + if (kernel_size < 0) { + kernel_size = load_uimage(kernel_filename, NULL, &entry, + NULL, NULL, NULL); + } + if (kernel_size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + if (bios_size <= 0) { + /* + * If there is no bios/monitor just start the application but put + * the machine in an initialized state through a little + * bootloader. + */ + uint8_t *bootloader_entry; + + bootloader_entry = memory_region_get_ram_ptr(prom); + write_bootloader(env, bootloader_entry, entry); + env->pc = LEON3_PROM_OFFSET; + env->npc = LEON3_PROM_OFFSET + 4; + reset_info->entry = LEON3_PROM_OFFSET; + } + } + + /* Allocate timers */ + dev = qdev_new(TYPE_GRLIB_GPTIMER); + qdev_prop_set_uint32(dev, "nr-timers", LEON3_TIMER_COUNT); + qdev_prop_set_uint32(dev, "frequency", CPU_CLK); + qdev_prop_set_uint32(dev, "irq-line", LEON3_TIMER_IRQ); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, LEON3_TIMER_OFFSET); + for (i = 0; i < LEON3_TIMER_COUNT; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, + qdev_get_gpio_in(irqmpdev, LEON3_TIMER_IRQ + i)); + } + + grlib_apb_pnp_add_entry(apb_pnp, LEON3_TIMER_OFFSET, 0xFFF, + GRLIB_VENDOR_GAISLER, GRLIB_GPTIMER_DEV, + 0, LEON3_TIMER_IRQ, GRLIB_APBIO_AREA); + + /* Allocate uart */ + dev = qdev_new(TYPE_GRLIB_APB_UART); + qdev_prop_set_chr(dev, "chrdev", serial_hd(0)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, LEON3_UART_OFFSET); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, + qdev_get_gpio_in(irqmpdev, LEON3_UART_IRQ)); + grlib_apb_pnp_add_entry(apb_pnp, LEON3_UART_OFFSET, 0xFFF, + GRLIB_VENDOR_GAISLER, GRLIB_APBUART_DEV, 1, + LEON3_UART_IRQ, GRLIB_APBIO_AREA); +} + +static void leon3_generic_machine_init(MachineClass *mc) +{ + mc->desc = "Leon-3 generic"; + mc->init = leon3_generic_hw_init; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("LEON3"); + mc->default_ram_id = "leon3.ram"; +} + +DEFINE_MACHINE("leon3_generic", leon3_generic_machine_init) diff --git a/hw/sparc/meson.build b/hw/sparc/meson.build new file mode 100644 index 000000000..19c442c90 --- /dev/null +++ b/hw/sparc/meson.build @@ -0,0 +1,6 @@ +sparc_ss = ss.source_set() +sparc_ss.add(when: 'CONFIG_LEON3', if_true: files('leon3.c')) +sparc_ss.add(when: 'CONFIG_SUN4M', if_true: files('sun4m.c')) +sparc_ss.add(when: 'CONFIG_SUN4M', if_true: files('sun4m_iommu.c')) + +hw_arch += {'sparc': sparc_ss} diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c new file mode 100644 index 000000000..7f3a7c002 --- /dev/null +++ b/hw/sparc/sun4m.c @@ -0,0 +1,1494 @@ +/* + * QEMU Sun4m & Sun4d & Sun4c System Emulator + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/datadir.h" +#include "qemu-common.h" +#include "cpu.h" +#include "hw/sysbus.h" +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "hw/sparc/sun4m_iommu.h" +#include "hw/rtc/m48t59.h" +#include "migration/vmstate.h" +#include "hw/sparc/sparc32_dma.h" +#include "hw/block/fdc.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "net/net.h" +#include "hw/boards.h" +#include "hw/scsi/esp.h" +#include "hw/nvram/sun_nvram.h" +#include "hw/qdev-properties.h" +#include "hw/nvram/chrp_nvram.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/char/escc.h" +#include "hw/misc/empty_slot.h" +#include "hw/misc/unimp.h" +#include "hw/irq.h" +#include "hw/or-irq.h" +#include "hw/loader.h" +#include "elf.h" +#include "trace.h" +#include "qom/object.h" + +/* + * Sun4m architecture was used in the following machines: + * + * SPARCserver 6xxMP/xx + * SPARCclassic (SPARCclassic Server)(SPARCstation LC) (4/15), + * SPARCclassic X (4/10) + * SPARCstation LX/ZX (4/30) + * SPARCstation Voyager + * SPARCstation 10/xx, SPARCserver 10/xx + * SPARCstation 5, SPARCserver 5 + * SPARCstation 20/xx, SPARCserver 20 + * SPARCstation 4 + * + * See for example: http://www.sunhelp.org/faq/sunref1.html + */ + +#define KERNEL_LOAD_ADDR 0x00004000 +#define CMDLINE_ADDR 0x007ff000 +#define INITRD_LOAD_ADDR 0x00800000 +#define PROM_SIZE_MAX (1 * MiB) +#define PROM_VADDR 0xffd00000 +#define PROM_FILENAME "openbios-sparc32" +#define CFG_ADDR 0xd00000510ULL +#define FW_CFG_SUN4M_DEPTH (FW_CFG_ARCH_LOCAL + 0x00) +#define FW_CFG_SUN4M_WIDTH (FW_CFG_ARCH_LOCAL + 0x01) +#define FW_CFG_SUN4M_HEIGHT (FW_CFG_ARCH_LOCAL + 0x02) + +#define MAX_CPUS 16 +#define MAX_PILS 16 +#define MAX_VSIMMS 4 + +#define ESCC_CLOCK 4915200 + +struct sun4m_hwdef { + hwaddr iommu_base, iommu_pad_base, iommu_pad_len, slavio_base; + hwaddr intctl_base, counter_base, nvram_base, ms_kb_base; + hwaddr serial_base, fd_base; + hwaddr afx_base, idreg_base, dma_base, esp_base, le_base; + hwaddr tcx_base, cs_base, apc_base, aux1_base, aux2_base; + hwaddr bpp_base, dbri_base, sx_base; + struct { + hwaddr reg_base, vram_base; + } vsimm[MAX_VSIMMS]; + hwaddr ecc_base; + uint64_t max_mem; + uint32_t ecc_version; + uint32_t iommu_version; + uint16_t machine_id; + uint8_t nvram_machine_id; +}; + +struct Sun4mMachineClass { + /*< private >*/ + MachineClass parent_obj; + /*< public >*/ + const struct sun4m_hwdef *hwdef; +}; +typedef struct Sun4mMachineClass Sun4mMachineClass; + +#define TYPE_SUN4M_MACHINE MACHINE_TYPE_NAME("sun4m-common") +DECLARE_CLASS_CHECKERS(Sun4mMachineClass, SUN4M_MACHINE, TYPE_SUN4M_MACHINE) + +const char *fw_cfg_arch_key_name(uint16_t key) +{ + static const struct { + uint16_t key; + const char *name; + } fw_cfg_arch_wellknown_keys[] = { + {FW_CFG_SUN4M_DEPTH, "depth"}, + {FW_CFG_SUN4M_WIDTH, "width"}, + {FW_CFG_SUN4M_HEIGHT, "height"}, + }; + + for (size_t i = 0; i < ARRAY_SIZE(fw_cfg_arch_wellknown_keys); i++) { + if (fw_cfg_arch_wellknown_keys[i].key == key) { + return fw_cfg_arch_wellknown_keys[i].name; + } + } + return NULL; +} + +static void fw_cfg_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); +} + +static void nvram_init(Nvram *nvram, uint8_t *macaddr, + const char *cmdline, const char *boot_devices, + ram_addr_t RAM_size, uint32_t kernel_size, + int width, int height, int depth, + int nvram_machine_id, const char *arch) +{ + unsigned int i; + int sysp_end; + uint8_t image[0x1ff0]; + NvramClass *k = NVRAM_GET_CLASS(nvram); + + memset(image, '\0', sizeof(image)); + + /* OpenBIOS nvram variables partition */ + sysp_end = chrp_nvram_create_system_partition(image, 0, 0x1fd0); + + /* Free space partition */ + chrp_nvram_create_free_partition(&image[sysp_end], 0x1fd0 - sysp_end); + + Sun_init_header((struct Sun_nvram *)&image[0x1fd8], macaddr, + nvram_machine_id); + + for (i = 0; i < sizeof(image); i++) { + (k->write)(nvram, i, image[i]); + } +} + +static void cpu_kick_irq(SPARCCPU *cpu) +{ + CPUSPARCState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + cs->halted = 0; + cpu_check_irqs(env); + qemu_cpu_kick(cs); +} + +static void cpu_set_irq(void *opaque, int irq, int level) +{ + SPARCCPU *cpu = opaque; + CPUSPARCState *env = &cpu->env; + + if (level) { + trace_sun4m_cpu_set_irq_raise(irq); + env->pil_in |= 1 << irq; + cpu_kick_irq(cpu); + } else { + trace_sun4m_cpu_set_irq_lower(irq); + env->pil_in &= ~(1 << irq); + cpu_check_irqs(env); + } +} + +static void dummy_cpu_set_irq(void *opaque, int irq, int level) +{ +} + +static void sun4m_cpu_reset(void *opaque) +{ + SPARCCPU *cpu = opaque; + CPUState *cs = CPU(cpu); + + cpu_reset(cs); +} + +static void cpu_halt_signal(void *opaque, int irq, int level) +{ + if (level && current_cpu) { + cpu_interrupt(current_cpu, CPU_INTERRUPT_HALT); + } +} + +static uint64_t translate_kernel_address(void *opaque, uint64_t addr) +{ + return addr - 0xf0000000ULL; +} + +static unsigned long sun4m_load_kernel(const char *kernel_filename, + const char *initrd_filename, + ram_addr_t RAM_size, + uint32_t *initrd_size) +{ + int linux_boot; + unsigned int i; + long kernel_size; + uint8_t *ptr; + + linux_boot = (kernel_filename != NULL); + + kernel_size = 0; + if (linux_boot) { + int bswap_needed; + +#ifdef BSWAP_NEEDED + bswap_needed = 1; +#else + bswap_needed = 0; +#endif + kernel_size = load_elf(kernel_filename, NULL, + translate_kernel_address, NULL, + NULL, NULL, NULL, NULL, 1, EM_SPARC, 0, 0); + if (kernel_size < 0) + kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR, + RAM_size - KERNEL_LOAD_ADDR, bswap_needed, + TARGET_PAGE_SIZE); + if (kernel_size < 0) + kernel_size = load_image_targphys(kernel_filename, + KERNEL_LOAD_ADDR, + RAM_size - KERNEL_LOAD_ADDR); + if (kernel_size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + + /* load initrd */ + *initrd_size = 0; + if (initrd_filename) { + *initrd_size = load_image_targphys(initrd_filename, + INITRD_LOAD_ADDR, + RAM_size - INITRD_LOAD_ADDR); + if ((int)*initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + initrd_filename); + exit(1); + } + } + if (*initrd_size > 0) { + for (i = 0; i < 64 * TARGET_PAGE_SIZE; i += TARGET_PAGE_SIZE) { + ptr = rom_ptr(KERNEL_LOAD_ADDR + i, 24); + if (ptr && ldl_p(ptr) == 0x48647253) { /* HdrS */ + stl_p(ptr + 16, INITRD_LOAD_ADDR); + stl_p(ptr + 20, *initrd_size); + break; + } + } + } + } + return kernel_size; +} + +static void *iommu_init(hwaddr addr, uint32_t version, qemu_irq irq) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new(TYPE_SUN4M_IOMMU); + qdev_prop_set_uint32(dev, "version", version); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, irq); + sysbus_mmio_map(s, 0, addr); + + return s; +} + +static void *sparc32_dma_init(hwaddr dma_base, + hwaddr esp_base, qemu_irq espdma_irq, + hwaddr le_base, qemu_irq ledma_irq, NICInfo *nd) +{ + DeviceState *dma; + ESPDMADeviceState *espdma; + LEDMADeviceState *ledma; + SysBusESPState *esp; + SysBusPCNetState *lance; + + dma = qdev_new(TYPE_SPARC32_DMA); + espdma = SPARC32_ESPDMA_DEVICE(object_resolve_path_component( + OBJECT(dma), "espdma")); + sysbus_connect_irq(SYS_BUS_DEVICE(espdma), 0, espdma_irq); + + esp = SYSBUS_ESP(object_resolve_path_component(OBJECT(espdma), "esp")); + + ledma = SPARC32_LEDMA_DEVICE(object_resolve_path_component( + OBJECT(dma), "ledma")); + sysbus_connect_irq(SYS_BUS_DEVICE(ledma), 0, ledma_irq); + + lance = SYSBUS_PCNET(object_resolve_path_component( + OBJECT(ledma), "lance")); + qdev_set_nic_properties(DEVICE(lance), nd); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dma), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dma), 0, dma_base); + + sysbus_mmio_map(SYS_BUS_DEVICE(esp), 0, esp_base); + scsi_bus_legacy_handle_cmdline(&esp->esp.bus); + + sysbus_mmio_map(SYS_BUS_DEVICE(lance), 0, le_base); + + return dma; +} + +static DeviceState *slavio_intctl_init(hwaddr addr, + hwaddr addrg, + qemu_irq **parent_irq) +{ + DeviceState *dev; + SysBusDevice *s; + unsigned int i, j; + + dev = qdev_new("slavio_intctl"); + + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + + for (i = 0; i < MAX_CPUS; i++) { + for (j = 0; j < MAX_PILS; j++) { + sysbus_connect_irq(s, i * MAX_PILS + j, parent_irq[i][j]); + } + } + sysbus_mmio_map(s, 0, addrg); + for (i = 0; i < MAX_CPUS; i++) { + sysbus_mmio_map(s, i + 1, addr + i * TARGET_PAGE_SIZE); + } + + return dev; +} + +#define SYS_TIMER_OFFSET 0x10000ULL +#define CPU_TIMER_OFFSET(cpu) (0x1000ULL * cpu) + +static void slavio_timer_init_all(hwaddr addr, qemu_irq master_irq, + qemu_irq *cpu_irqs, unsigned int num_cpus) +{ + DeviceState *dev; + SysBusDevice *s; + unsigned int i; + + dev = qdev_new("slavio_timer"); + qdev_prop_set_uint32(dev, "num_cpus", num_cpus); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, master_irq); + sysbus_mmio_map(s, 0, addr + SYS_TIMER_OFFSET); + + for (i = 0; i < MAX_CPUS; i++) { + sysbus_mmio_map(s, i + 1, addr + (hwaddr)CPU_TIMER_OFFSET(i)); + sysbus_connect_irq(s, i + 1, cpu_irqs[i]); + } +} + +static qemu_irq slavio_system_powerdown; + +static void slavio_powerdown_req(Notifier *n, void *opaque) +{ + qemu_irq_raise(slavio_system_powerdown); +} + +static Notifier slavio_system_powerdown_notifier = { + .notify = slavio_powerdown_req +}; + +#define MISC_LEDS 0x01600000 +#define MISC_CFG 0x01800000 +#define MISC_DIAG 0x01a00000 +#define MISC_MDM 0x01b00000 +#define MISC_SYS 0x01f00000 + +static void slavio_misc_init(hwaddr base, + hwaddr aux1_base, + hwaddr aux2_base, qemu_irq irq, + qemu_irq fdc_tc) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new("slavio_misc"); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + if (base) { + /* 8 bit registers */ + /* Slavio control */ + sysbus_mmio_map(s, 0, base + MISC_CFG); + /* Diagnostics */ + sysbus_mmio_map(s, 1, base + MISC_DIAG); + /* Modem control */ + sysbus_mmio_map(s, 2, base + MISC_MDM); + /* 16 bit registers */ + /* ss600mp diag LEDs */ + sysbus_mmio_map(s, 3, base + MISC_LEDS); + /* 32 bit registers */ + /* System control */ + sysbus_mmio_map(s, 4, base + MISC_SYS); + } + if (aux1_base) { + /* AUX 1 (Misc System Functions) */ + sysbus_mmio_map(s, 5, aux1_base); + } + if (aux2_base) { + /* AUX 2 (Software Powerdown Control) */ + sysbus_mmio_map(s, 6, aux2_base); + } + sysbus_connect_irq(s, 0, irq); + sysbus_connect_irq(s, 1, fdc_tc); + slavio_system_powerdown = qdev_get_gpio_in(dev, 0); + qemu_register_powerdown_notifier(&slavio_system_powerdown_notifier); +} + +static void ecc_init(hwaddr base, qemu_irq irq, uint32_t version) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new("eccmemctl"); + qdev_prop_set_uint32(dev, "version", version); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, irq); + sysbus_mmio_map(s, 0, base); + if (version == 0) { // SS-600MP only + sysbus_mmio_map(s, 1, base + 0x1000); + } +} + +static void apc_init(hwaddr power_base, qemu_irq cpu_halt) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new("apc"); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + /* Power management (APC) XXX: not a Slavio device */ + sysbus_mmio_map(s, 0, power_base); + sysbus_connect_irq(s, 0, cpu_halt); +} + +static void tcx_init(hwaddr addr, qemu_irq irq, int vram_size, int width, + int height, int depth) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new("sun-tcx"); + qdev_prop_set_uint32(dev, "vram_size", vram_size); + qdev_prop_set_uint16(dev, "width", width); + qdev_prop_set_uint16(dev, "height", height); + qdev_prop_set_uint16(dev, "depth", depth); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + + /* 10/ROM : FCode ROM */ + sysbus_mmio_map(s, 0, addr); + /* 2/STIP : Stipple */ + sysbus_mmio_map(s, 1, addr + 0x04000000ULL); + /* 3/BLIT : Blitter */ + sysbus_mmio_map(s, 2, addr + 0x06000000ULL); + /* 5/RSTIP : Raw Stipple */ + sysbus_mmio_map(s, 3, addr + 0x0c000000ULL); + /* 6/RBLIT : Raw Blitter */ + sysbus_mmio_map(s, 4, addr + 0x0e000000ULL); + /* 7/TEC : Transform Engine */ + sysbus_mmio_map(s, 5, addr + 0x00700000ULL); + /* 8/CMAP : DAC */ + sysbus_mmio_map(s, 6, addr + 0x00200000ULL); + /* 9/THC : */ + if (depth == 8) { + sysbus_mmio_map(s, 7, addr + 0x00300000ULL); + } else { + sysbus_mmio_map(s, 7, addr + 0x00301000ULL); + } + /* 11/DHC : */ + sysbus_mmio_map(s, 8, addr + 0x00240000ULL); + /* 12/ALT : */ + sysbus_mmio_map(s, 9, addr + 0x00280000ULL); + /* 0/DFB8 : 8-bit plane */ + sysbus_mmio_map(s, 10, addr + 0x00800000ULL); + /* 1/DFB24 : 24bit plane */ + sysbus_mmio_map(s, 11, addr + 0x02000000ULL); + /* 4/RDFB32: Raw framebuffer. Control plane */ + sysbus_mmio_map(s, 12, addr + 0x0a000000ULL); + /* 9/THC24bits : NetBSD writes here even with 8-bit display: dummy */ + if (depth == 8) { + sysbus_mmio_map(s, 13, addr + 0x00301000ULL); + } + + sysbus_connect_irq(s, 0, irq); +} + +static void cg3_init(hwaddr addr, qemu_irq irq, int vram_size, int width, + int height, int depth) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new("cgthree"); + qdev_prop_set_uint32(dev, "vram-size", vram_size); + qdev_prop_set_uint16(dev, "width", width); + qdev_prop_set_uint16(dev, "height", height); + qdev_prop_set_uint16(dev, "depth", depth); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + + /* FCode ROM */ + sysbus_mmio_map(s, 0, addr); + /* DAC */ + sysbus_mmio_map(s, 1, addr + 0x400000ULL); + /* 8-bit plane */ + sysbus_mmio_map(s, 2, addr + 0x800000ULL); + + sysbus_connect_irq(s, 0, irq); +} + +/* NCR89C100/MACIO Internal ID register */ + +#define TYPE_MACIO_ID_REGISTER "macio_idreg" + +static const uint8_t idreg_data[] = { 0xfe, 0x81, 0x01, 0x03 }; + +static void idreg_init(hwaddr addr) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new(TYPE_MACIO_ID_REGISTER); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + + sysbus_mmio_map(s, 0, addr); + address_space_write_rom(&address_space_memory, addr, + MEMTXATTRS_UNSPECIFIED, + idreg_data, sizeof(idreg_data)); +} + +OBJECT_DECLARE_SIMPLE_TYPE(IDRegState, MACIO_ID_REGISTER) + +struct IDRegState { + SysBusDevice parent_obj; + + MemoryRegion mem; +}; + +static void idreg_realize(DeviceState *ds, Error **errp) +{ + IDRegState *s = MACIO_ID_REGISTER(ds); + SysBusDevice *dev = SYS_BUS_DEVICE(ds); + Error *local_err = NULL; + + memory_region_init_ram_nomigrate(&s->mem, OBJECT(ds), "sun4m.idreg", + sizeof(idreg_data), &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + vmstate_register_ram_global(&s->mem); + memory_region_set_readonly(&s->mem, true); + sysbus_init_mmio(dev, &s->mem); +} + +static void idreg_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = idreg_realize; +} + +static const TypeInfo idreg_info = { + .name = TYPE_MACIO_ID_REGISTER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IDRegState), + .class_init = idreg_class_init, +}; + +#define TYPE_TCX_AFX "tcx_afx" +OBJECT_DECLARE_SIMPLE_TYPE(AFXState, TCX_AFX) + +struct AFXState { + SysBusDevice parent_obj; + + MemoryRegion mem; +}; + +/* SS-5 TCX AFX register */ +static void afx_init(hwaddr addr) +{ + DeviceState *dev; + SysBusDevice *s; + + dev = qdev_new(TYPE_TCX_AFX); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + + sysbus_mmio_map(s, 0, addr); +} + +static void afx_realize(DeviceState *ds, Error **errp) +{ + AFXState *s = TCX_AFX(ds); + SysBusDevice *dev = SYS_BUS_DEVICE(ds); + Error *local_err = NULL; + + memory_region_init_ram_nomigrate(&s->mem, OBJECT(ds), "sun4m.afx", 4, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + vmstate_register_ram_global(&s->mem); + sysbus_init_mmio(dev, &s->mem); +} + +static void afx_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = afx_realize; +} + +static const TypeInfo afx_info = { + .name = TYPE_TCX_AFX, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AFXState), + .class_init = afx_class_init, +}; + +#define TYPE_OPENPROM "openprom" +typedef struct PROMState PROMState; +DECLARE_INSTANCE_CHECKER(PROMState, OPENPROM, + TYPE_OPENPROM) + +struct PROMState { + SysBusDevice parent_obj; + + MemoryRegion prom; +}; + +/* Boot PROM (OpenBIOS) */ +static uint64_t translate_prom_address(void *opaque, uint64_t addr) +{ + hwaddr *base_addr = (hwaddr *)opaque; + return addr + *base_addr - PROM_VADDR; +} + +static void prom_init(hwaddr addr, const char *bios_name) +{ + DeviceState *dev; + SysBusDevice *s; + char *filename; + int ret; + + dev = qdev_new(TYPE_OPENPROM); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + + sysbus_mmio_map(s, 0, addr); + + /* load boot prom */ + if (bios_name == NULL) { + bios_name = PROM_FILENAME; + } + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (filename) { + ret = load_elf(filename, NULL, + translate_prom_address, &addr, NULL, + NULL, NULL, NULL, 1, EM_SPARC, 0, 0); + if (ret < 0 || ret > PROM_SIZE_MAX) { + ret = load_image_targphys(filename, addr, PROM_SIZE_MAX); + } + g_free(filename); + } else { + ret = -1; + } + if (ret < 0 || ret > PROM_SIZE_MAX) { + error_report("could not load prom '%s'", bios_name); + exit(1); + } +} + +static void prom_realize(DeviceState *ds, Error **errp) +{ + PROMState *s = OPENPROM(ds); + SysBusDevice *dev = SYS_BUS_DEVICE(ds); + Error *local_err = NULL; + + memory_region_init_ram_nomigrate(&s->prom, OBJECT(ds), "sun4m.prom", + PROM_SIZE_MAX, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + vmstate_register_ram_global(&s->prom); + memory_region_set_readonly(&s->prom, true); + sysbus_init_mmio(dev, &s->prom); +} + +static Property prom_properties[] = { + {/* end of property list */}, +}; + +static void prom_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, prom_properties); + dc->realize = prom_realize; +} + +static const TypeInfo prom_info = { + .name = TYPE_OPENPROM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PROMState), + .class_init = prom_class_init, +}; + +#define TYPE_SUN4M_MEMORY "memory" +typedef struct RamDevice RamDevice; +DECLARE_INSTANCE_CHECKER(RamDevice, SUN4M_RAM, + TYPE_SUN4M_MEMORY) + +struct RamDevice { + SysBusDevice parent_obj; + HostMemoryBackend *memdev; +}; + +/* System RAM */ +static void ram_realize(DeviceState *dev, Error **errp) +{ + RamDevice *d = SUN4M_RAM(dev); + MemoryRegion *ram = host_memory_backend_get_memory(d->memdev); + + sysbus_init_mmio(SYS_BUS_DEVICE(dev), ram); +} + +static void ram_initfn(Object *obj) +{ + RamDevice *d = SUN4M_RAM(obj); + object_property_add_link(obj, "memdev", TYPE_MEMORY_BACKEND, + (Object **)&d->memdev, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + object_property_set_description(obj, "memdev", "Set RAM backend" + "Valid value is ID of a hostmem backend"); +} + +static void ram_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = ram_realize; +} + +static const TypeInfo ram_info = { + .name = TYPE_SUN4M_MEMORY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RamDevice), + .instance_init = ram_initfn, + .class_init = ram_class_init, +}; + +static void cpu_devinit(const char *cpu_type, unsigned int id, + uint64_t prom_addr, qemu_irq **cpu_irqs) +{ + SPARCCPU *cpu; + CPUSPARCState *env; + + cpu = SPARC_CPU(object_new(cpu_type)); + env = &cpu->env; + + qemu_register_reset(sun4m_cpu_reset, cpu); + object_property_set_bool(OBJECT(cpu), "start-powered-off", id != 0, + &error_fatal); + qdev_realize_and_unref(DEVICE(cpu), NULL, &error_fatal); + cpu_sparc_set_id(env, id); + *cpu_irqs = qemu_allocate_irqs(cpu_set_irq, cpu, MAX_PILS); + env->prom_addr = prom_addr; +} + +static void dummy_fdc_tc(void *opaque, int irq, int level) +{ +} + +static void sun4m_hw_init(MachineState *machine) +{ + const struct sun4m_hwdef *hwdef = SUN4M_MACHINE_GET_CLASS(machine)->hwdef; + DeviceState *slavio_intctl; + unsigned int i; + Nvram *nvram; + qemu_irq *cpu_irqs[MAX_CPUS], slavio_irq[32], slavio_cpu_irq[MAX_CPUS]; + qemu_irq fdc_tc; + unsigned long kernel_size; + uint32_t initrd_size; + DriveInfo *fd[MAX_FD]; + FWCfgState *fw_cfg; + DeviceState *dev, *ms_kb_orgate, *serial_orgate; + SysBusDevice *s; + unsigned int smp_cpus = machine->smp.cpus; + unsigned int max_cpus = machine->smp.max_cpus; + Object *ram_memdev = object_resolve_path_type(machine->ram_memdev_id, + TYPE_MEMORY_BACKEND, NULL); + NICInfo *nd = &nd_table[0]; + + if (machine->ram_size > hwdef->max_mem) { + error_report("Too much memory for this machine: %" PRId64 "," + " maximum %" PRId64, + machine->ram_size / MiB, hwdef->max_mem / MiB); + exit(1); + } + + /* init CPUs */ + for(i = 0; i < smp_cpus; i++) { + cpu_devinit(machine->cpu_type, i, hwdef->slavio_base, &cpu_irqs[i]); + } + + for (i = smp_cpus; i < MAX_CPUS; i++) + cpu_irqs[i] = qemu_allocate_irqs(dummy_cpu_set_irq, NULL, MAX_PILS); + + /* Create and map RAM frontend */ + dev = qdev_new("memory"); + object_property_set_link(OBJECT(dev), "memdev", ram_memdev, &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0); + + /* models without ECC don't trap when missing ram is accessed */ + if (!hwdef->ecc_base) { + empty_slot_init("ecc", machine->ram_size, + hwdef->max_mem - machine->ram_size); + } + + prom_init(hwdef->slavio_base, machine->firmware); + + slavio_intctl = slavio_intctl_init(hwdef->intctl_base, + hwdef->intctl_base + 0x10000ULL, + cpu_irqs); + + for (i = 0; i < 32; i++) { + slavio_irq[i] = qdev_get_gpio_in(slavio_intctl, i); + } + for (i = 0; i < MAX_CPUS; i++) { + slavio_cpu_irq[i] = qdev_get_gpio_in(slavio_intctl, 32 + i); + } + + if (hwdef->idreg_base) { + idreg_init(hwdef->idreg_base); + } + + if (hwdef->afx_base) { + afx_init(hwdef->afx_base); + } + + iommu_init(hwdef->iommu_base, hwdef->iommu_version, slavio_irq[30]); + + if (hwdef->iommu_pad_base) { + /* On the real hardware (SS-5, LX) the MMU is not padded, but aliased. + Software shouldn't use aliased addresses, neither should it crash + when does. Using empty_slot instead of aliasing can help with + debugging such accesses */ + empty_slot_init("iommu.alias", + hwdef->iommu_pad_base, hwdef->iommu_pad_len); + } + + qemu_check_nic_model(nd, TYPE_LANCE); + sparc32_dma_init(hwdef->dma_base, + hwdef->esp_base, slavio_irq[18], + hwdef->le_base, slavio_irq[16], nd); + + if (graphic_depth != 8 && graphic_depth != 24) { + error_report("Unsupported depth: %d", graphic_depth); + exit (1); + } + if (vga_interface_type != VGA_NONE) { + if (vga_interface_type == VGA_CG3) { + if (graphic_depth != 8) { + error_report("Unsupported depth: %d", graphic_depth); + exit(1); + } + + if (!(graphic_width == 1024 && graphic_height == 768) && + !(graphic_width == 1152 && graphic_height == 900)) { + error_report("Unsupported resolution: %d x %d", graphic_width, + graphic_height); + exit(1); + } + + /* sbus irq 5 */ + cg3_init(hwdef->tcx_base, slavio_irq[11], 0x00100000, + graphic_width, graphic_height, graphic_depth); + } else { + /* If no display specified, default to TCX */ + if (graphic_depth != 8 && graphic_depth != 24) { + error_report("Unsupported depth: %d", graphic_depth); + exit(1); + } + + if (!(graphic_width == 1024 && graphic_height == 768)) { + error_report("Unsupported resolution: %d x %d", + graphic_width, graphic_height); + exit(1); + } + + tcx_init(hwdef->tcx_base, slavio_irq[11], 0x00100000, + graphic_width, graphic_height, graphic_depth); + } + } + + for (i = 0; i < MAX_VSIMMS; i++) { + /* vsimm registers probed by OBP */ + if (hwdef->vsimm[i].reg_base) { + char *name = g_strdup_printf("vsimm[%d]", i); + empty_slot_init(name, hwdef->vsimm[i].reg_base, 0x2000); + g_free(name); + } + } + + if (hwdef->sx_base) { + create_unimplemented_device("sun-sx", hwdef->sx_base, 0x2000); + } + + dev = qdev_new("sysbus-m48t08"); + qdev_prop_set_int32(dev, "base-year", 1968); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, slavio_irq[0]); + sysbus_mmio_map(s, 0, hwdef->nvram_base); + nvram = NVRAM(dev); + + slavio_timer_init_all(hwdef->counter_base, slavio_irq[19], slavio_cpu_irq, smp_cpus); + + /* Slavio TTYA (base+4, Linux ttyS0) is the first QEMU serial device + Slavio TTYB (base+0, Linux ttyS1) is the second QEMU serial device */ + dev = qdev_new(TYPE_ESCC); + qdev_prop_set_uint32(dev, "disabled", !machine->enable_graphics); + qdev_prop_set_uint32(dev, "frequency", ESCC_CLOCK); + qdev_prop_set_uint32(dev, "it_shift", 1); + qdev_prop_set_chr(dev, "chrB", NULL); + qdev_prop_set_chr(dev, "chrA", NULL); + qdev_prop_set_uint32(dev, "chnBtype", escc_mouse); + qdev_prop_set_uint32(dev, "chnAtype", escc_kbd); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, hwdef->ms_kb_base); + + /* Logically OR both its IRQs together */ + ms_kb_orgate = DEVICE(object_new(TYPE_OR_IRQ)); + object_property_set_int(OBJECT(ms_kb_orgate), "num-lines", 2, &error_fatal); + qdev_realize_and_unref(ms_kb_orgate, NULL, &error_fatal); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(ms_kb_orgate, 0)); + sysbus_connect_irq(s, 1, qdev_get_gpio_in(ms_kb_orgate, 1)); + qdev_connect_gpio_out(DEVICE(ms_kb_orgate), 0, slavio_irq[14]); + + dev = qdev_new(TYPE_ESCC); + qdev_prop_set_uint32(dev, "disabled", 0); + qdev_prop_set_uint32(dev, "frequency", ESCC_CLOCK); + qdev_prop_set_uint32(dev, "it_shift", 1); + qdev_prop_set_chr(dev, "chrB", serial_hd(1)); + qdev_prop_set_chr(dev, "chrA", serial_hd(0)); + qdev_prop_set_uint32(dev, "chnBtype", escc_serial); + qdev_prop_set_uint32(dev, "chnAtype", escc_serial); + + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, hwdef->serial_base); + + /* Logically OR both its IRQs together */ + serial_orgate = DEVICE(object_new(TYPE_OR_IRQ)); + object_property_set_int(OBJECT(serial_orgate), "num-lines", 2, + &error_fatal); + qdev_realize_and_unref(serial_orgate, NULL, &error_fatal); + sysbus_connect_irq(s, 0, qdev_get_gpio_in(serial_orgate, 0)); + sysbus_connect_irq(s, 1, qdev_get_gpio_in(serial_orgate, 1)); + qdev_connect_gpio_out(DEVICE(serial_orgate), 0, slavio_irq[15]); + + if (hwdef->apc_base) { + apc_init(hwdef->apc_base, qemu_allocate_irq(cpu_halt_signal, NULL, 0)); + } + + if (hwdef->fd_base) { + /* there is zero or one floppy drive */ + memset(fd, 0, sizeof(fd)); + fd[0] = drive_get(IF_FLOPPY, 0, 0); + sun4m_fdctrl_init(slavio_irq[22], hwdef->fd_base, fd, + &fdc_tc); + } else { + fdc_tc = qemu_allocate_irq(dummy_fdc_tc, NULL, 0); + } + + slavio_misc_init(hwdef->slavio_base, hwdef->aux1_base, hwdef->aux2_base, + slavio_irq[30], fdc_tc); + + if (hwdef->cs_base) { + sysbus_create_simple("sun-CS4231", hwdef->cs_base, + slavio_irq[5]); + } + + if (hwdef->dbri_base) { + /* ISDN chip with attached CS4215 audio codec */ + /* prom space */ + create_unimplemented_device("sun-DBRI.prom", + hwdef->dbri_base + 0x1000, 0x30); + /* reg space */ + create_unimplemented_device("sun-DBRI", + hwdef->dbri_base + 0x10000, 0x100); + } + + if (hwdef->bpp_base) { + /* parallel port */ + create_unimplemented_device("sun-bpp", hwdef->bpp_base, 0x20); + } + + initrd_size = 0; + kernel_size = sun4m_load_kernel(machine->kernel_filename, + machine->initrd_filename, + machine->ram_size, &initrd_size); + + nvram_init(nvram, (uint8_t *)&nd->macaddr, machine->kernel_cmdline, + machine->boot_order, machine->ram_size, kernel_size, + graphic_width, graphic_height, graphic_depth, + hwdef->nvram_machine_id, "Sun4m"); + + if (hwdef->ecc_base) + ecc_init(hwdef->ecc_base, slavio_irq[28], + hwdef->ecc_version); + + dev = qdev_new(TYPE_FW_CFG_MEM); + fw_cfg = FW_CFG(dev); + qdev_prop_set_uint32(dev, "data_width", 1); + qdev_prop_set_bit(dev, "dma_enabled", false); + object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG, + OBJECT(fw_cfg)); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_mmio_map(s, 0, CFG_ADDR); + sysbus_mmio_map(s, 1, CFG_ADDR + 2); + + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)machine->ram_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id); + fw_cfg_add_i16(fw_cfg, FW_CFG_SUN4M_DEPTH, graphic_depth); + fw_cfg_add_i16(fw_cfg, FW_CFG_SUN4M_WIDTH, graphic_width); + fw_cfg_add_i16(fw_cfg, FW_CFG_SUN4M_HEIGHT, graphic_height); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, KERNEL_LOAD_ADDR); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); + if (machine->kernel_cmdline) { + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, CMDLINE_ADDR); + pstrcpy_targphys("cmdline", CMDLINE_ADDR, TARGET_PAGE_SIZE, + machine->kernel_cmdline); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, machine->kernel_cmdline); + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, + strlen(machine->kernel_cmdline) + 1); + } else { + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0); + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, 0); + } + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, INITRD_LOAD_ADDR); + fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, machine->boot_order[0]); + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); +} + +enum { + ss5_id = 32, + vger_id, + lx_id, + ss4_id, + scls_id, + sbook_id, + ss10_id = 64, + ss20_id, + ss600mp_id, +}; + +static void sun4m_machine_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->init = sun4m_hw_init; + mc->block_default_type = IF_SCSI; + mc->default_boot_order = "c"; + mc->default_display = "tcx"; + mc->default_ram_id = "sun4m.ram"; +} + +static void ss5_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); + static const struct sun4m_hwdef ss5_hwdef = { + .iommu_base = 0x10000000, + .iommu_pad_base = 0x10004000, + .iommu_pad_len = 0x0fffb000, + .tcx_base = 0x50000000, + .cs_base = 0x6c000000, + .slavio_base = 0x70000000, + .ms_kb_base = 0x71000000, + .serial_base = 0x71100000, + .nvram_base = 0x71200000, + .fd_base = 0x71400000, + .counter_base = 0x71d00000, + .intctl_base = 0x71e00000, + .idreg_base = 0x78000000, + .dma_base = 0x78400000, + .esp_base = 0x78800000, + .le_base = 0x78c00000, + .apc_base = 0x6a000000, + .afx_base = 0x6e000000, + .aux1_base = 0x71900000, + .aux2_base = 0x71910000, + .nvram_machine_id = 0x80, + .machine_id = ss5_id, + .iommu_version = 0x05000000, + .max_mem = 0x10000000, + }; + + mc->desc = "Sun4m platform, SPARCstation 5"; + mc->is_default = true; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("Fujitsu-MB86904"); + smc->hwdef = &ss5_hwdef; +} + +static void ss10_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); + static const struct sun4m_hwdef ss10_hwdef = { + .iommu_base = 0xfe0000000ULL, + .tcx_base = 0xe20000000ULL, + .slavio_base = 0xff0000000ULL, + .ms_kb_base = 0xff1000000ULL, + .serial_base = 0xff1100000ULL, + .nvram_base = 0xff1200000ULL, + .fd_base = 0xff1700000ULL, + .counter_base = 0xff1300000ULL, + .intctl_base = 0xff1400000ULL, + .idreg_base = 0xef0000000ULL, + .dma_base = 0xef0400000ULL, + .esp_base = 0xef0800000ULL, + .le_base = 0xef0c00000ULL, + .apc_base = 0xefa000000ULL, /* XXX should not exist */ + .aux1_base = 0xff1800000ULL, + .aux2_base = 0xff1a01000ULL, + .ecc_base = 0xf00000000ULL, + .ecc_version = 0x10000000, /* version 0, implementation 1 */ + .nvram_machine_id = 0x72, + .machine_id = ss10_id, + .iommu_version = 0x03000000, + .max_mem = 0xf00000000ULL, + }; + + mc->desc = "Sun4m platform, SPARCstation 10"; + mc->max_cpus = 4; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-SuperSparc-II"); + smc->hwdef = &ss10_hwdef; +} + +static void ss600mp_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); + static const struct sun4m_hwdef ss600mp_hwdef = { + .iommu_base = 0xfe0000000ULL, + .tcx_base = 0xe20000000ULL, + .slavio_base = 0xff0000000ULL, + .ms_kb_base = 0xff1000000ULL, + .serial_base = 0xff1100000ULL, + .nvram_base = 0xff1200000ULL, + .counter_base = 0xff1300000ULL, + .intctl_base = 0xff1400000ULL, + .dma_base = 0xef0081000ULL, + .esp_base = 0xef0080000ULL, + .le_base = 0xef0060000ULL, + .apc_base = 0xefa000000ULL, /* XXX should not exist */ + .aux1_base = 0xff1800000ULL, + .aux2_base = 0xff1a01000ULL, /* XXX should not exist */ + .ecc_base = 0xf00000000ULL, + .ecc_version = 0x00000000, /* version 0, implementation 0 */ + .nvram_machine_id = 0x71, + .machine_id = ss600mp_id, + .iommu_version = 0x01000000, + .max_mem = 0xf00000000ULL, + }; + + mc->desc = "Sun4m platform, SPARCserver 600MP"; + mc->max_cpus = 4; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-SuperSparc-II"); + smc->hwdef = &ss600mp_hwdef; +} + +static void ss20_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); + static const struct sun4m_hwdef ss20_hwdef = { + .iommu_base = 0xfe0000000ULL, + .tcx_base = 0xe20000000ULL, + .slavio_base = 0xff0000000ULL, + .ms_kb_base = 0xff1000000ULL, + .serial_base = 0xff1100000ULL, + .nvram_base = 0xff1200000ULL, + .fd_base = 0xff1700000ULL, + .counter_base = 0xff1300000ULL, + .intctl_base = 0xff1400000ULL, + .idreg_base = 0xef0000000ULL, + .dma_base = 0xef0400000ULL, + .esp_base = 0xef0800000ULL, + .le_base = 0xef0c00000ULL, + .bpp_base = 0xef4800000ULL, + .apc_base = 0xefa000000ULL, /* XXX should not exist */ + .aux1_base = 0xff1800000ULL, + .aux2_base = 0xff1a01000ULL, + .dbri_base = 0xee0000000ULL, + .sx_base = 0xf80000000ULL, + .vsimm = { + { + .reg_base = 0x9c000000ULL, + .vram_base = 0xfc000000ULL + }, { + .reg_base = 0x90000000ULL, + .vram_base = 0xf0000000ULL + }, { + .reg_base = 0x94000000ULL + }, { + .reg_base = 0x98000000ULL + } + }, + .ecc_base = 0xf00000000ULL, + .ecc_version = 0x20000000, /* version 0, implementation 2 */ + .nvram_machine_id = 0x72, + .machine_id = ss20_id, + .iommu_version = 0x13000000, + .max_mem = 0xf00000000ULL, + }; + + mc->desc = "Sun4m platform, SPARCstation 20"; + mc->max_cpus = 4; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-SuperSparc-II"); + smc->hwdef = &ss20_hwdef; +} + +static void voyager_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); + static const struct sun4m_hwdef voyager_hwdef = { + .iommu_base = 0x10000000, + .tcx_base = 0x50000000, + .slavio_base = 0x70000000, + .ms_kb_base = 0x71000000, + .serial_base = 0x71100000, + .nvram_base = 0x71200000, + .fd_base = 0x71400000, + .counter_base = 0x71d00000, + .intctl_base = 0x71e00000, + .idreg_base = 0x78000000, + .dma_base = 0x78400000, + .esp_base = 0x78800000, + .le_base = 0x78c00000, + .apc_base = 0x71300000, /* pmc */ + .aux1_base = 0x71900000, + .aux2_base = 0x71910000, + .nvram_machine_id = 0x80, + .machine_id = vger_id, + .iommu_version = 0x05000000, + .max_mem = 0x10000000, + }; + + mc->desc = "Sun4m platform, SPARCstation Voyager"; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("Fujitsu-MB86904"); + smc->hwdef = &voyager_hwdef; +} + +static void ss_lx_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); + static const struct sun4m_hwdef ss_lx_hwdef = { + .iommu_base = 0x10000000, + .iommu_pad_base = 0x10004000, + .iommu_pad_len = 0x0fffb000, + .tcx_base = 0x50000000, + .slavio_base = 0x70000000, + .ms_kb_base = 0x71000000, + .serial_base = 0x71100000, + .nvram_base = 0x71200000, + .fd_base = 0x71400000, + .counter_base = 0x71d00000, + .intctl_base = 0x71e00000, + .idreg_base = 0x78000000, + .dma_base = 0x78400000, + .esp_base = 0x78800000, + .le_base = 0x78c00000, + .aux1_base = 0x71900000, + .aux2_base = 0x71910000, + .nvram_machine_id = 0x80, + .machine_id = lx_id, + .iommu_version = 0x04000000, + .max_mem = 0x10000000, + }; + + mc->desc = "Sun4m platform, SPARCstation LX"; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-MicroSparc-I"); + smc->hwdef = &ss_lx_hwdef; +} + +static void ss4_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); + static const struct sun4m_hwdef ss4_hwdef = { + .iommu_base = 0x10000000, + .tcx_base = 0x50000000, + .cs_base = 0x6c000000, + .slavio_base = 0x70000000, + .ms_kb_base = 0x71000000, + .serial_base = 0x71100000, + .nvram_base = 0x71200000, + .fd_base = 0x71400000, + .counter_base = 0x71d00000, + .intctl_base = 0x71e00000, + .idreg_base = 0x78000000, + .dma_base = 0x78400000, + .esp_base = 0x78800000, + .le_base = 0x78c00000, + .apc_base = 0x6a000000, + .aux1_base = 0x71900000, + .aux2_base = 0x71910000, + .nvram_machine_id = 0x80, + .machine_id = ss4_id, + .iommu_version = 0x05000000, + .max_mem = 0x10000000, + }; + + mc->desc = "Sun4m platform, SPARCstation 4"; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("Fujitsu-MB86904"); + smc->hwdef = &ss4_hwdef; +} + +static void scls_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); + static const struct sun4m_hwdef scls_hwdef = { + .iommu_base = 0x10000000, + .tcx_base = 0x50000000, + .slavio_base = 0x70000000, + .ms_kb_base = 0x71000000, + .serial_base = 0x71100000, + .nvram_base = 0x71200000, + .fd_base = 0x71400000, + .counter_base = 0x71d00000, + .intctl_base = 0x71e00000, + .idreg_base = 0x78000000, + .dma_base = 0x78400000, + .esp_base = 0x78800000, + .le_base = 0x78c00000, + .apc_base = 0x6a000000, + .aux1_base = 0x71900000, + .aux2_base = 0x71910000, + .nvram_machine_id = 0x80, + .machine_id = scls_id, + .iommu_version = 0x05000000, + .max_mem = 0x10000000, + }; + + mc->desc = "Sun4m platform, SPARCClassic"; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-MicroSparc-I"); + smc->hwdef = &scls_hwdef; +} + +static void sbook_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + Sun4mMachineClass *smc = SUN4M_MACHINE_CLASS(mc); + static const struct sun4m_hwdef sbook_hwdef = { + .iommu_base = 0x10000000, + .tcx_base = 0x50000000, /* XXX */ + .slavio_base = 0x70000000, + .ms_kb_base = 0x71000000, + .serial_base = 0x71100000, + .nvram_base = 0x71200000, + .fd_base = 0x71400000, + .counter_base = 0x71d00000, + .intctl_base = 0x71e00000, + .idreg_base = 0x78000000, + .dma_base = 0x78400000, + .esp_base = 0x78800000, + .le_base = 0x78c00000, + .apc_base = 0x6a000000, + .aux1_base = 0x71900000, + .aux2_base = 0x71910000, + .nvram_machine_id = 0x80, + .machine_id = sbook_id, + .iommu_version = 0x05000000, + .max_mem = 0x10000000, + }; + + mc->desc = "Sun4m platform, SPARCbook"; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-MicroSparc-I"); + smc->hwdef = &sbook_hwdef; +} + +static const TypeInfo sun4m_machine_types[] = { + { + .name = MACHINE_TYPE_NAME("SS-5"), + .parent = TYPE_SUN4M_MACHINE, + .class_init = ss5_class_init, + }, { + .name = MACHINE_TYPE_NAME("SS-10"), + .parent = TYPE_SUN4M_MACHINE, + .class_init = ss10_class_init, + }, { + .name = MACHINE_TYPE_NAME("SS-600MP"), + .parent = TYPE_SUN4M_MACHINE, + .class_init = ss600mp_class_init, + }, { + .name = MACHINE_TYPE_NAME("SS-20"), + .parent = TYPE_SUN4M_MACHINE, + .class_init = ss20_class_init, + }, { + .name = MACHINE_TYPE_NAME("Voyager"), + .parent = TYPE_SUN4M_MACHINE, + .class_init = voyager_class_init, + }, { + .name = MACHINE_TYPE_NAME("LX"), + .parent = TYPE_SUN4M_MACHINE, + .class_init = ss_lx_class_init, + }, { + .name = MACHINE_TYPE_NAME("SS-4"), + .parent = TYPE_SUN4M_MACHINE, + .class_init = ss4_class_init, + }, { + .name = MACHINE_TYPE_NAME("SPARCClassic"), + .parent = TYPE_SUN4M_MACHINE, + .class_init = scls_class_init, + }, { + .name = MACHINE_TYPE_NAME("SPARCbook"), + .parent = TYPE_SUN4M_MACHINE, + .class_init = sbook_class_init, + }, { + .name = TYPE_SUN4M_MACHINE, + .parent = TYPE_MACHINE, + .class_size = sizeof(Sun4mMachineClass), + .class_init = sun4m_machine_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(sun4m_machine_types) + +static void sun4m_register_types(void) +{ + type_register_static(&idreg_info); + type_register_static(&afx_info); + type_register_static(&prom_info); + type_register_static(&ram_info); +} + +type_init(sun4m_register_types) diff --git a/hw/sparc/sun4m_iommu.c b/hw/sparc/sun4m_iommu.c new file mode 100644 index 000000000..71f546524 --- /dev/null +++ b/hw/sparc/sun4m_iommu.c @@ -0,0 +1,412 @@ +/* + * QEMU Sun4m iommu emulation + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sparc/sun4m_iommu.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "exec/address-spaces.h" +#include "trace.h" + +/* + * I/O MMU used by Sun4m systems + * + * Chipset docs: + * "Sun-4M System Architecture (revision 2.0) by Chuck Narad", 950-1373-01, + * http://mediacast.sun.com/users/Barton808/media/Sun4M_SystemArchitecture_edited2.pdf + */ + +#define IOMMU_CTRL (0x0000 >> 2) +#define IOMMU_CTRL_IMPL 0xf0000000 /* Implementation */ +#define IOMMU_CTRL_VERS 0x0f000000 /* Version */ +#define IOMMU_CTRL_RNGE 0x0000001c /* Mapping RANGE */ +#define IOMMU_RNGE_16MB 0x00000000 /* 0xff000000 -> 0xffffffff */ +#define IOMMU_RNGE_32MB 0x00000004 /* 0xfe000000 -> 0xffffffff */ +#define IOMMU_RNGE_64MB 0x00000008 /* 0xfc000000 -> 0xffffffff */ +#define IOMMU_RNGE_128MB 0x0000000c /* 0xf8000000 -> 0xffffffff */ +#define IOMMU_RNGE_256MB 0x00000010 /* 0xf0000000 -> 0xffffffff */ +#define IOMMU_RNGE_512MB 0x00000014 /* 0xe0000000 -> 0xffffffff */ +#define IOMMU_RNGE_1GB 0x00000018 /* 0xc0000000 -> 0xffffffff */ +#define IOMMU_RNGE_2GB 0x0000001c /* 0x80000000 -> 0xffffffff */ +#define IOMMU_CTRL_ENAB 0x00000001 /* IOMMU Enable */ +#define IOMMU_CTRL_MASK 0x0000001d + +#define IOMMU_BASE (0x0004 >> 2) +#define IOMMU_BASE_MASK 0x07fffc00 + +#define IOMMU_TLBFLUSH (0x0014 >> 2) +#define IOMMU_TLBFLUSH_MASK 0xffffffff + +#define IOMMU_PGFLUSH (0x0018 >> 2) +#define IOMMU_PGFLUSH_MASK 0xffffffff + +#define IOMMU_AFSR (0x1000 >> 2) +#define IOMMU_AFSR_ERR 0x80000000 /* LE, TO, or BE asserted */ +#define IOMMU_AFSR_LE 0x40000000 /* SBUS reports error after + transaction */ +#define IOMMU_AFSR_TO 0x20000000 /* Write access took more than + 12.8 us. */ +#define IOMMU_AFSR_BE 0x10000000 /* Write access received error + acknowledge */ +#define IOMMU_AFSR_SIZE 0x0e000000 /* Size of transaction causing error */ +#define IOMMU_AFSR_S 0x01000000 /* Sparc was in supervisor mode */ +#define IOMMU_AFSR_RESV 0x00800000 /* Reserved, forced to 0x8 by + hardware */ +#define IOMMU_AFSR_ME 0x00080000 /* Multiple errors occurred */ +#define IOMMU_AFSR_RD 0x00040000 /* A read operation was in progress */ +#define IOMMU_AFSR_FAV 0x00020000 /* IOMMU afar has valid contents */ +#define IOMMU_AFSR_MASK 0xff0fffff + +#define IOMMU_AFAR (0x1004 >> 2) + +#define IOMMU_AER (0x1008 >> 2) /* Arbiter Enable Register */ +#define IOMMU_AER_EN_P0_ARB 0x00000001 /* MBus master 0x8 (Always 1) */ +#define IOMMU_AER_EN_P1_ARB 0x00000002 /* MBus master 0x9 */ +#define IOMMU_AER_EN_P2_ARB 0x00000004 /* MBus master 0xa */ +#define IOMMU_AER_EN_P3_ARB 0x00000008 /* MBus master 0xb */ +#define IOMMU_AER_EN_0 0x00010000 /* SBus slot 0 */ +#define IOMMU_AER_EN_1 0x00020000 /* SBus slot 1 */ +#define IOMMU_AER_EN_2 0x00040000 /* SBus slot 2 */ +#define IOMMU_AER_EN_3 0x00080000 /* SBus slot 3 */ +#define IOMMU_AER_EN_F 0x00100000 /* SBus on-board */ +#define IOMMU_AER_SBW 0x80000000 /* S-to-M asynchronous writes */ +#define IOMMU_AER_MASK 0x801f000f + +#define IOMMU_SBCFG0 (0x1010 >> 2) /* SBUS configration per-slot */ +#define IOMMU_SBCFG1 (0x1014 >> 2) /* SBUS configration per-slot */ +#define IOMMU_SBCFG2 (0x1018 >> 2) /* SBUS configration per-slot */ +#define IOMMU_SBCFG3 (0x101c >> 2) /* SBUS configration per-slot */ +#define IOMMU_SBCFG_SAB30 0x00010000 /* Phys-address bit 30 when + bypass enabled */ +#define IOMMU_SBCFG_BA16 0x00000004 /* Slave supports 16 byte bursts */ +#define IOMMU_SBCFG_BA8 0x00000002 /* Slave supports 8 byte bursts */ +#define IOMMU_SBCFG_BYPASS 0x00000001 /* Bypass IOMMU, treat all addresses + produced by this device as pure + physical. */ +#define IOMMU_SBCFG_MASK 0x00010003 + +#define IOMMU_ARBEN (0x2000 >> 2) /* SBUS arbitration enable */ +#define IOMMU_ARBEN_MASK 0x001f0000 +#define IOMMU_MID 0x00000008 + +#define IOMMU_MASK_ID (0x3018 >> 2) /* Mask ID */ +#define IOMMU_MASK_ID_MASK 0x00ffffff + +#define IOMMU_MSII_MASK 0x26000000 /* microSPARC II mask number */ +#define IOMMU_TS_MASK 0x23000000 /* turboSPARC mask number */ + +/* The format of an iopte in the page tables */ +#define IOPTE_PAGE 0xffffff00 /* Physical page number (PA[35:12]) */ +#define IOPTE_CACHE 0x00000080 /* Cached (in vme IOCACHE or + Viking/MXCC) */ +#define IOPTE_WRITE 0x00000004 /* Writable */ +#define IOPTE_VALID 0x00000002 /* IOPTE is valid */ +#define IOPTE_WAZ 0x00000001 /* Write as zeros */ + +#define IOMMU_PAGE_SHIFT 12 +#define IOMMU_PAGE_SIZE (1 << IOMMU_PAGE_SHIFT) +#define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1)) + +static uint64_t iommu_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + IOMMUState *s = opaque; + hwaddr saddr; + uint32_t ret; + + saddr = addr >> 2; + switch (saddr) { + default: + ret = s->regs[saddr]; + break; + case IOMMU_AFAR: + case IOMMU_AFSR: + ret = s->regs[saddr]; + qemu_irq_lower(s->irq); + break; + } + trace_sun4m_iommu_mem_readl(saddr, ret); + return ret; +} + +static void iommu_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + IOMMUState *s = opaque; + hwaddr saddr; + + saddr = addr >> 2; + trace_sun4m_iommu_mem_writel(saddr, val); + switch (saddr) { + case IOMMU_CTRL: + switch (val & IOMMU_CTRL_RNGE) { + case IOMMU_RNGE_16MB: + s->iostart = 0xffffffffff000000ULL; + break; + case IOMMU_RNGE_32MB: + s->iostart = 0xfffffffffe000000ULL; + break; + case IOMMU_RNGE_64MB: + s->iostart = 0xfffffffffc000000ULL; + break; + case IOMMU_RNGE_128MB: + s->iostart = 0xfffffffff8000000ULL; + break; + case IOMMU_RNGE_256MB: + s->iostart = 0xfffffffff0000000ULL; + break; + case IOMMU_RNGE_512MB: + s->iostart = 0xffffffffe0000000ULL; + break; + case IOMMU_RNGE_1GB: + s->iostart = 0xffffffffc0000000ULL; + break; + default: + case IOMMU_RNGE_2GB: + s->iostart = 0xffffffff80000000ULL; + break; + } + trace_sun4m_iommu_mem_writel_ctrl(s->iostart); + s->regs[saddr] = ((val & IOMMU_CTRL_MASK) | s->version); + break; + case IOMMU_BASE: + s->regs[saddr] = val & IOMMU_BASE_MASK; + break; + case IOMMU_TLBFLUSH: + trace_sun4m_iommu_mem_writel_tlbflush(val); + s->regs[saddr] = val & IOMMU_TLBFLUSH_MASK; + break; + case IOMMU_PGFLUSH: + trace_sun4m_iommu_mem_writel_pgflush(val); + s->regs[saddr] = val & IOMMU_PGFLUSH_MASK; + break; + case IOMMU_AFAR: + s->regs[saddr] = val; + qemu_irq_lower(s->irq); + break; + case IOMMU_AER: + s->regs[saddr] = (val & IOMMU_AER_MASK) | IOMMU_AER_EN_P0_ARB; + break; + case IOMMU_AFSR: + s->regs[saddr] = (val & IOMMU_AFSR_MASK) | IOMMU_AFSR_RESV; + qemu_irq_lower(s->irq); + break; + case IOMMU_SBCFG0: + case IOMMU_SBCFG1: + case IOMMU_SBCFG2: + case IOMMU_SBCFG3: + s->regs[saddr] = val & IOMMU_SBCFG_MASK; + break; + case IOMMU_ARBEN: + /* XXX implement SBus probing: fault when reading unmapped + addresses, fault cause and address stored to MMU/IOMMU */ + s->regs[saddr] = (val & IOMMU_ARBEN_MASK) | IOMMU_MID; + break; + case IOMMU_MASK_ID: + s->regs[saddr] |= val & IOMMU_MASK_ID_MASK; + break; + default: + s->regs[saddr] = val; + break; + } +} + +static const MemoryRegionOps iommu_mem_ops = { + .read = iommu_mem_read, + .write = iommu_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static uint32_t iommu_page_get_flags(IOMMUState *s, hwaddr addr) +{ + uint32_t ret; + hwaddr iopte; + hwaddr pa = addr; + + iopte = s->regs[IOMMU_BASE] << 4; + addr &= ~s->iostart; + iopte += (addr >> (IOMMU_PAGE_SHIFT - 2)) & ~3; + ret = address_space_ldl_be(&address_space_memory, iopte, + MEMTXATTRS_UNSPECIFIED, NULL); + trace_sun4m_iommu_page_get_flags(pa, iopte, ret); + return ret; +} + +static hwaddr iommu_translate_pa(hwaddr addr, + uint32_t pte) +{ + hwaddr pa; + + pa = ((pte & IOPTE_PAGE) << 4) + (addr & ~IOMMU_PAGE_MASK); + trace_sun4m_iommu_translate_pa(addr, pa, pte); + return pa; +} + +static void iommu_bad_addr(IOMMUState *s, hwaddr addr, + int is_write) +{ + trace_sun4m_iommu_bad_addr(addr); + s->regs[IOMMU_AFSR] = IOMMU_AFSR_ERR | IOMMU_AFSR_LE | IOMMU_AFSR_RESV | + IOMMU_AFSR_FAV; + if (!is_write) { + s->regs[IOMMU_AFSR] |= IOMMU_AFSR_RD; + } + s->regs[IOMMU_AFAR] = addr; + qemu_irq_raise(s->irq); +} + +/* Called from RCU critical section */ +static IOMMUTLBEntry sun4m_translate_iommu(IOMMUMemoryRegion *iommu, + hwaddr addr, + IOMMUAccessFlags flags, + int iommu_idx) +{ + IOMMUState *is = container_of(iommu, IOMMUState, iommu); + hwaddr page, pa; + int is_write = (flags & IOMMU_WO) ? 1 : 0; + uint32_t pte; + IOMMUTLBEntry ret = { + .target_as = &address_space_memory, + .iova = 0, + .translated_addr = 0, + .addr_mask = ~(hwaddr)0, + .perm = IOMMU_NONE, + }; + + page = addr & IOMMU_PAGE_MASK; + pte = iommu_page_get_flags(is, page); + if (!(pte & IOPTE_VALID)) { + iommu_bad_addr(is, page, is_write); + return ret; + } + + pa = iommu_translate_pa(addr, pte); + if (is_write && !(pte & IOPTE_WRITE)) { + iommu_bad_addr(is, page, is_write); + return ret; + } + + if (pte & IOPTE_WRITE) { + ret.perm = IOMMU_RW; + } else { + ret.perm = IOMMU_RO; + } + + ret.iova = page; + ret.translated_addr = pa; + ret.addr_mask = ~IOMMU_PAGE_MASK; + + return ret; +} + +static const VMStateDescription vmstate_iommu = { + .name = "iommu", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, IOMMUState, IOMMU_NREGS), + VMSTATE_UINT64(iostart, IOMMUState), + VMSTATE_END_OF_LIST() + } +}; + +static void iommu_reset(DeviceState *d) +{ + IOMMUState *s = SUN4M_IOMMU(d); + + memset(s->regs, 0, IOMMU_NREGS * 4); + s->iostart = 0; + s->regs[IOMMU_CTRL] = s->version; + s->regs[IOMMU_ARBEN] = IOMMU_MID; + s->regs[IOMMU_AFSR] = IOMMU_AFSR_RESV; + s->regs[IOMMU_AER] = IOMMU_AER_EN_P0_ARB | IOMMU_AER_EN_P1_ARB; + s->regs[IOMMU_MASK_ID] = IOMMU_TS_MASK; +} + +static void iommu_init(Object *obj) +{ + IOMMUState *s = SUN4M_IOMMU(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + memory_region_init_iommu(&s->iommu, sizeof(s->iommu), + TYPE_SUN4M_IOMMU_MEMORY_REGION, OBJECT(dev), + "iommu-sun4m", UINT64_MAX); + address_space_init(&s->iommu_as, MEMORY_REGION(&s->iommu), "iommu-as"); + + sysbus_init_irq(dev, &s->irq); + + memory_region_init_io(&s->iomem, obj, &iommu_mem_ops, s, "iommu", + IOMMU_NREGS * sizeof(uint32_t)); + sysbus_init_mmio(dev, &s->iomem); +} + +static Property iommu_properties[] = { + DEFINE_PROP_UINT32("version", IOMMUState, version, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void iommu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = iommu_reset; + dc->vmsd = &vmstate_iommu; + device_class_set_props(dc, iommu_properties); +} + +static const TypeInfo iommu_info = { + .name = TYPE_SUN4M_IOMMU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IOMMUState), + .instance_init = iommu_init, + .class_init = iommu_class_init, +}; + +static void sun4m_iommu_memory_region_class_init(ObjectClass *klass, void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = sun4m_translate_iommu; +} + +static const TypeInfo sun4m_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_SUN4M_IOMMU_MEMORY_REGION, + .class_init = sun4m_iommu_memory_region_class_init, +}; + +static void iommu_register_types(void) +{ + type_register_static(&iommu_info); + type_register_static(&sun4m_iommu_memory_region_info); +} + +type_init(iommu_register_types) diff --git a/hw/sparc/trace-events b/hw/sparc/trace-events new file mode 100644 index 000000000..00b0212c3 --- /dev/null +++ b/hw/sparc/trace-events @@ -0,0 +1,21 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# sun4m.c +sun4m_cpu_set_irq_raise(int level) "Raise CPU IRQ %d" +sun4m_cpu_set_irq_lower(int level) "Lower CPU IRQ %d" + +# sun4m_iommu.c +sun4m_iommu_mem_readl(uint64_t addr, uint32_t ret) "read reg[0x%"PRIx64"] = 0x%x" +sun4m_iommu_mem_writel(uint64_t addr, uint32_t val) "write reg[0x%"PRIx64"] = 0x%x" +sun4m_iommu_mem_writel_ctrl(uint64_t iostart) "iostart = 0x%"PRIx64 +sun4m_iommu_mem_writel_tlbflush(uint32_t val) "tlb flush 0x%x" +sun4m_iommu_mem_writel_pgflush(uint32_t val) "page flush 0x%x" +sun4m_iommu_page_get_flags(uint64_t pa, uint64_t iopte, uint32_t ret) "get flags addr 0x%"PRIx64" => pte 0x%"PRIx64", *pte = 0x%x" +sun4m_iommu_translate_pa(uint64_t addr, uint64_t pa, uint32_t iopte) "xlate dva 0x%"PRIx64" => pa 0x%"PRIx64" iopte = 0x%x" +sun4m_iommu_bad_addr(uint64_t addr) "bad addr 0x%"PRIx64 + +# leon3.c +leon3_set_irq(int intno) "Set CPU IRQ %d" +leon3_reset_irq(int intno) "Reset CPU IRQ %d" +int_helper_icache_freeze(void) "Instruction cache: freeze" +int_helper_dcache_freeze(void) "Data cache: freeze" diff --git a/hw/sparc/trace.h b/hw/sparc/trace.h new file mode 100644 index 000000000..625d60ca7 --- /dev/null +++ b/hw/sparc/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_sparc.h" diff --git a/hw/sparc64/Kconfig b/hw/sparc64/Kconfig new file mode 100644 index 000000000..7e557ad17 --- /dev/null +++ b/hw/sparc64/Kconfig @@ -0,0 +1,21 @@ +config SUN4U + bool + imply PCI_DEVICES + imply SUNHME + imply TEST_DEVICES + imply PARALLEL + select M48T59 + select ISA_BUS + select FDC_ISA + select SERIAL_ISA + select PCI_SABRE + select IDE_CMD646 + select PCKBD + select SIMBA + select CHRP_NVRAM + +config NIAGARA + bool + select EMPTY_SLOT + select SUN4V_RTC + select UNIMP diff --git a/hw/sparc64/meson.build b/hw/sparc64/meson.build new file mode 100644 index 000000000..58b550465 --- /dev/null +++ b/hw/sparc64/meson.build @@ -0,0 +1,6 @@ +sparc64_ss = ss.source_set() +sparc64_ss.add(files('sparc64.c')) +sparc64_ss.add(when: 'CONFIG_NIAGARA', if_true: files('niagara.c')) +sparc64_ss.add(when: 'CONFIG_SUN4U', if_true: files('sun4u.c', 'sun4u_iommu.c')) + +hw_arch += {'sparc64': sparc64_ss} diff --git a/hw/sparc64/niagara.c b/hw/sparc64/niagara.c new file mode 100644 index 000000000..f3e42d032 --- /dev/null +++ b/hw/sparc64/niagara.c @@ -0,0 +1,182 @@ +/* + * QEMU Sun4v/Niagara System Emulator + * + * Copyright (c) 2016 Artyom Tarasenko + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "cpu.h" +#include "hw/boards.h" +#include "hw/char/serial.h" +#include "hw/misc/unimp.h" +#include "hw/loader.h" +#include "hw/sparc/sparc64.h" +#include "hw/rtc/sun4v-rtc.h" +#include "sysemu/block-backend.h" +#include "qemu/error-report.h" +#include "sysemu/qtest.h" +#include "sysemu/sysemu.h" +#include "qapi/error.h" + +typedef struct NiagaraBoardState { + MemoryRegion hv_ram; + MemoryRegion nvram; + MemoryRegion md_rom; + MemoryRegion hv_rom; + MemoryRegion vdisk_ram; + MemoryRegion prom; +} NiagaraBoardState; + +#define NIAGARA_HV_RAM_BASE 0x100000ULL +#define NIAGARA_HV_RAM_SIZE 0x3f00000ULL /* 63 MiB */ + +#define NIAGARA_PARTITION_RAM_BASE 0x80000000ULL + +#define NIAGARA_UART_BASE 0x1f10000000ULL + +#define NIAGARA_NVRAM_BASE 0x1f11000000ULL +#define NIAGARA_NVRAM_SIZE 0x2000 + +#define NIAGARA_MD_ROM_BASE 0x1f12000000ULL +#define NIAGARA_MD_ROM_SIZE 0x2000 + +#define NIAGARA_HV_ROM_BASE 0x1f12080000ULL +#define NIAGARA_HV_ROM_SIZE 0x2000 + +#define NIAGARA_IOBBASE 0x9800000000ULL +#define NIAGARA_IOBSIZE 0x0100000000ULL + +#define NIAGARA_VDISK_BASE 0x1f40000000ULL +#define NIAGARA_RTC_BASE 0xfff0c1fff8ULL + +/* Firmware layout + * + * |------------------| + * | openboot.bin | + * |------------------| PROM_ADDR + OBP_OFFSET + * | q.bin | + * |------------------| PROM_ADDR + Q_OFFSET + * | reset.bin | + * |------------------| PROM_ADDR + */ +#define NIAGARA_PROM_BASE 0xfff0000000ULL +#define NIAGARA_Q_OFFSET 0x10000ULL +#define NIAGARA_OBP_OFFSET 0x80000ULL +#define PROM_SIZE_MAX (4 * MiB) + +static void add_rom_or_fail(const char *file, const hwaddr addr) +{ + /* XXX remove qtest_enabled() check once firmware files are + * in the qemu tree + */ + if (!qtest_enabled() && rom_add_file_fixed(file, addr, -1)) { + error_report("Unable to load a firmware for -M niagara"); + exit(1); + } + +} +/* Niagara hardware initialisation */ +static void niagara_init(MachineState *machine) +{ + NiagaraBoardState *s = g_new(NiagaraBoardState, 1); + DriveInfo *dinfo = drive_get_next(IF_PFLASH); + MemoryRegion *sysmem = get_system_memory(); + + /* init CPUs */ + sparc64_cpu_devinit(machine->cpu_type, NIAGARA_PROM_BASE); + /* set up devices */ + memory_region_init_ram(&s->hv_ram, NULL, "sun4v-hv.ram", + NIAGARA_HV_RAM_SIZE, &error_fatal); + memory_region_add_subregion(sysmem, NIAGARA_HV_RAM_BASE, &s->hv_ram); + + memory_region_add_subregion(sysmem, NIAGARA_PARTITION_RAM_BASE, + machine->ram); + + memory_region_init_ram(&s->nvram, NULL, "sun4v.nvram", NIAGARA_NVRAM_SIZE, + &error_fatal); + memory_region_add_subregion(sysmem, NIAGARA_NVRAM_BASE, &s->nvram); + memory_region_init_ram(&s->md_rom, NULL, "sun4v-md.rom", + NIAGARA_MD_ROM_SIZE, &error_fatal); + memory_region_add_subregion(sysmem, NIAGARA_MD_ROM_BASE, &s->md_rom); + memory_region_init_ram(&s->hv_rom, NULL, "sun4v-hv.rom", + NIAGARA_HV_ROM_SIZE, &error_fatal); + memory_region_add_subregion(sysmem, NIAGARA_HV_ROM_BASE, &s->hv_rom); + memory_region_init_ram(&s->prom, NULL, "sun4v.prom", PROM_SIZE_MAX, + &error_fatal); + memory_region_add_subregion(sysmem, NIAGARA_PROM_BASE, &s->prom); + + add_rom_or_fail("nvram1", NIAGARA_NVRAM_BASE); + add_rom_or_fail("1up-md.bin", NIAGARA_MD_ROM_BASE); + add_rom_or_fail("1up-hv.bin", NIAGARA_HV_ROM_BASE); + + add_rom_or_fail("reset.bin", NIAGARA_PROM_BASE); + add_rom_or_fail("q.bin", NIAGARA_PROM_BASE + NIAGARA_Q_OFFSET); + add_rom_or_fail("openboot.bin", NIAGARA_PROM_BASE + NIAGARA_OBP_OFFSET); + + /* the virtual ramdisk is kind of initrd, but it resides + outside of the partition RAM */ + if (dinfo) { + BlockBackend *blk = blk_by_legacy_dinfo(dinfo); + int size = blk_getlength(blk); + if (size > 0) { + memory_region_init_ram(&s->vdisk_ram, NULL, "sun4v_vdisk.ram", size, + &error_fatal); + memory_region_add_subregion(get_system_memory(), + NIAGARA_VDISK_BASE, &s->vdisk_ram); + dinfo->is_default = 1; + rom_add_file_fixed(blk_bs(blk)->filename, NIAGARA_VDISK_BASE, -1); + } else { + error_report("could not load ram disk '%s'", + blk_bs(blk)->filename); + exit(1); + } + } + serial_mm_init(sysmem, NIAGARA_UART_BASE, 0, NULL, + 115200, serial_hd(0), DEVICE_BIG_ENDIAN); + create_unimplemented_device("sun4v-iob", NIAGARA_IOBBASE, NIAGARA_IOBSIZE); + sun4v_rtc_init(NIAGARA_RTC_BASE); +} + +static void niagara_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Sun4v platform, Niagara"; + mc->init = niagara_init; + mc->max_cpus = 1; /* XXX for now */ + mc->default_boot_order = "c"; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("Sun-UltraSparc-T1"); + mc->default_ram_id = "sun4v-partition.ram"; +} + +static const TypeInfo niagara_type = { + .name = MACHINE_TYPE_NAME("niagara"), + .parent = TYPE_MACHINE, + .class_init = niagara_class_init, +}; + +static void niagara_register_types(void) +{ + type_register_static(&niagara_type); +} + +type_init(niagara_register_types) diff --git a/hw/sparc64/sparc64.c b/hw/sparc64/sparc64.c new file mode 100644 index 000000000..8654e955e --- /dev/null +++ b/hw/sparc64/sparc64.c @@ -0,0 +1,297 @@ +/* + * QEMU Sun4u/Sun4v System Emulator common routines + * + * Copyright (c) 2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/boards.h" +#include "hw/sparc/sparc64.h" +#include "qemu/timer.h" +#include "sysemu/reset.h" +#include "trace.h" + + +#define TICK_MAX 0x7fffffffffffffffULL + +static void cpu_kick_irq(SPARCCPU *cpu) +{ + CPUState *cs = CPU(cpu); + CPUSPARCState *env = &cpu->env; + + cs->halted = 0; + cpu_check_irqs(env); + qemu_cpu_kick(cs); +} + +void sparc64_cpu_set_ivec_irq(void *opaque, int irq, int level) +{ + SPARCCPU *cpu = opaque; + CPUSPARCState *env = &cpu->env; + CPUState *cs; + + if (level) { + if (!(env->ivec_status & 0x20)) { + trace_sparc64_cpu_ivec_raise_irq(irq); + cs = CPU(cpu); + cs->halted = 0; + env->interrupt_index = TT_IVEC; + env->ivec_status |= 0x20; + env->ivec_data[0] = (0x1f << 6) | irq; + env->ivec_data[1] = 0; + env->ivec_data[2] = 0; + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } + } else { + if (env->ivec_status & 0x20) { + trace_sparc64_cpu_ivec_lower_irq(irq); + cs = CPU(cpu); + env->ivec_status &= ~0x20; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + } +} + +typedef struct ResetData { + SPARCCPU *cpu; + uint64_t prom_addr; +} ResetData; + +static CPUTimer *cpu_timer_create(const char *name, SPARCCPU *cpu, + QEMUBHFunc *cb, uint32_t frequency, + uint64_t disabled_mask, uint64_t npt_mask) +{ + CPUTimer *timer = g_malloc0(sizeof(CPUTimer)); + + timer->name = name; + timer->frequency = frequency; + timer->disabled_mask = disabled_mask; + timer->npt_mask = npt_mask; + + timer->disabled = 1; + timer->npt = 1; + timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + timer->qtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cb, cpu); + + return timer; +} + +static void cpu_timer_reset(CPUTimer *timer) +{ + timer->disabled = 1; + timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + timer_del(timer->qtimer); +} + +static void main_cpu_reset(void *opaque) +{ + ResetData *s = (ResetData *)opaque; + CPUSPARCState *env = &s->cpu->env; + static unsigned int nr_resets; + + cpu_reset(CPU(s->cpu)); + + cpu_timer_reset(env->tick); + cpu_timer_reset(env->stick); + cpu_timer_reset(env->hstick); + + env->gregs[1] = 0; /* Memory start */ + env->gregs[2] = current_machine->ram_size; /* Memory size */ + env->gregs[3] = 0; /* Machine description XXX */ + if (nr_resets++ == 0) { + /* Power on reset */ + env->pc = s->prom_addr + 0x20ULL; + } else { + env->pc = s->prom_addr + 0x40ULL; + } + env->npc = env->pc + 4; +} + +static void tick_irq(void *opaque) +{ + SPARCCPU *cpu = opaque; + CPUSPARCState *env = &cpu->env; + + CPUTimer *timer = env->tick; + + if (timer->disabled) { + trace_sparc64_cpu_tick_irq_disabled(); + return; + } else { + trace_sparc64_cpu_tick_irq_fire(); + } + + env->softint |= SOFTINT_TIMER; + cpu_kick_irq(cpu); +} + +static void stick_irq(void *opaque) +{ + SPARCCPU *cpu = opaque; + CPUSPARCState *env = &cpu->env; + + CPUTimer *timer = env->stick; + + if (timer->disabled) { + trace_sparc64_cpu_stick_irq_disabled(); + return; + } else { + trace_sparc64_cpu_stick_irq_fire(); + } + + env->softint |= SOFTINT_STIMER; + cpu_kick_irq(cpu); +} + +static void hstick_irq(void *opaque) +{ + SPARCCPU *cpu = opaque; + CPUSPARCState *env = &cpu->env; + + CPUTimer *timer = env->hstick; + + if (timer->disabled) { + trace_sparc64_cpu_hstick_irq_disabled(); + return; + } else { + trace_sparc64_cpu_hstick_irq_fire(); + } + + env->softint |= SOFTINT_STIMER; + cpu_kick_irq(cpu); +} + +static int64_t cpu_to_timer_ticks(int64_t cpu_ticks, uint32_t frequency) +{ + return muldiv64(cpu_ticks, NANOSECONDS_PER_SECOND, frequency); +} + +static uint64_t timer_to_cpu_ticks(int64_t timer_ticks, uint32_t frequency) +{ + return muldiv64(timer_ticks, frequency, NANOSECONDS_PER_SECOND); +} + +void cpu_tick_set_count(CPUTimer *timer, uint64_t count) +{ + uint64_t real_count = count & ~timer->npt_mask; + uint64_t npt_bit = count & timer->npt_mask; + + int64_t vm_clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - + cpu_to_timer_ticks(real_count, timer->frequency); + + trace_sparc64_cpu_tick_set_count(timer->name, real_count, + timer->npt ? "disabled" : "enabled", + timer); + + timer->npt = npt_bit ? 1 : 0; + timer->clock_offset = vm_clock_offset; +} + +uint64_t cpu_tick_get_count(CPUTimer *timer) +{ + uint64_t real_count = timer_to_cpu_ticks( + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->clock_offset, + timer->frequency); + + trace_sparc64_cpu_tick_get_count(timer->name, real_count, + timer->npt ? "disabled" : "enabled", + timer); + + if (timer->npt) { + real_count |= timer->npt_mask; + } + + return real_count; +} + +void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit) +{ + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + uint64_t real_limit = limit & ~timer->disabled_mask; + timer->disabled = (limit & timer->disabled_mask) ? 1 : 0; + + int64_t expires = cpu_to_timer_ticks(real_limit, timer->frequency) + + timer->clock_offset; + + if (expires < now) { + expires = now + 1; + } + + trace_sparc64_cpu_tick_set_limit(timer->name, real_limit, + timer->disabled ? "disabled" : "enabled", + timer, limit, + timer_to_cpu_ticks( + now - timer->clock_offset, + timer->frequency + ), + timer_to_cpu_ticks( + expires - now, timer->frequency + )); + + if (!real_limit) { + trace_sparc64_cpu_tick_set_limit_zero(timer->name); + timer_del(timer->qtimer); + } else if (timer->disabled) { + timer_del(timer->qtimer); + } else { + timer_mod(timer->qtimer, expires); + } +} + +SPARCCPU *sparc64_cpu_devinit(const char *cpu_type, uint64_t prom_addr) +{ + SPARCCPU *cpu; + CPUSPARCState *env; + ResetData *reset_info; + + uint32_t tick_frequency = 100 * 1000000; + uint32_t stick_frequency = 100 * 1000000; + uint32_t hstick_frequency = 100 * 1000000; + + cpu = SPARC_CPU(cpu_create(cpu_type)); + qdev_init_gpio_in_named(DEVICE(cpu), sparc64_cpu_set_ivec_irq, + "ivec-irq", IVEC_MAX); + env = &cpu->env; + + env->tick = cpu_timer_create("tick", cpu, tick_irq, + tick_frequency, TICK_INT_DIS, + TICK_NPT_MASK); + + env->stick = cpu_timer_create("stick", cpu, stick_irq, + stick_frequency, TICK_INT_DIS, + TICK_NPT_MASK); + + env->hstick = cpu_timer_create("hstick", cpu, hstick_irq, + hstick_frequency, TICK_INT_DIS, + TICK_NPT_MASK); + + reset_info = g_malloc0(sizeof(ResetData)); + reset_info->cpu = cpu; + reset_info->prom_addr = prom_addr; + qemu_register_reset(main_cpu_reset, reset_info); + + return cpu; +} diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c new file mode 100644 index 000000000..cda7df36e --- /dev/null +++ b/hw/sparc64/sun4u.c @@ -0,0 +1,863 @@ +/* + * QEMU Sun4u/Sun4v System Emulator + * + * Copyright (c) 2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "cpu.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci_host.h" +#include "hw/qdev-properties.h" +#include "hw/pci-host/sabre.h" +#include "hw/char/serial.h" +#include "hw/char/parallel.h" +#include "hw/rtc/m48t59.h" +#include "migration/vmstate.h" +#include "hw/input/i8042.h" +#include "hw/block/fdc.h" +#include "net/net.h" +#include "qemu/timer.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/nvram/sun_nvram.h" +#include "hw/nvram/chrp_nvram.h" +#include "hw/sparc/sparc64.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/sysbus.h" +#include "hw/ide/pci.h" +#include "hw/loader.h" +#include "hw/fw-path-provider.h" +#include "elf.h" +#include "trace.h" +#include "qom/object.h" + +#define KERNEL_LOAD_ADDR 0x00404000 +#define CMDLINE_ADDR 0x003ff000 +#define PROM_SIZE_MAX (4 * MiB) +#define PROM_VADDR 0x000ffd00000ULL +#define PBM_SPECIAL_BASE 0x1fe00000000ULL +#define PBM_MEM_BASE 0x1ff00000000ULL +#define PBM_PCI_IO_BASE (PBM_SPECIAL_BASE + 0x02000000ULL) +#define PROM_FILENAME "openbios-sparc64" +#define NVRAM_SIZE 0x2000 +#define MAX_IDE_BUS 2 +#define BIOS_CFG_IOPORT 0x510 +#define FW_CFG_SPARC64_WIDTH (FW_CFG_ARCH_LOCAL + 0x00) +#define FW_CFG_SPARC64_HEIGHT (FW_CFG_ARCH_LOCAL + 0x01) +#define FW_CFG_SPARC64_DEPTH (FW_CFG_ARCH_LOCAL + 0x02) + +#define IVEC_MAX 0x40 + +struct hwdef { + uint16_t machine_id; + uint64_t prom_addr; + uint64_t console_serial_base; +}; + +struct EbusState { + /*< private >*/ + PCIDevice parent_obj; + + ISABus *isa_bus; + qemu_irq isa_bus_irqs[ISA_NUM_IRQS]; + uint64_t console_serial_base; + MemoryRegion bar0; + MemoryRegion bar1; +}; + +#define TYPE_EBUS "ebus" +OBJECT_DECLARE_SIMPLE_TYPE(EbusState, EBUS) + +const char *fw_cfg_arch_key_name(uint16_t key) +{ + static const struct { + uint16_t key; + const char *name; + } fw_cfg_arch_wellknown_keys[] = { + {FW_CFG_SPARC64_WIDTH, "width"}, + {FW_CFG_SPARC64_HEIGHT, "height"}, + {FW_CFG_SPARC64_DEPTH, "depth"}, + }; + + for (size_t i = 0; i < ARRAY_SIZE(fw_cfg_arch_wellknown_keys); i++) { + if (fw_cfg_arch_wellknown_keys[i].key == key) { + return fw_cfg_arch_wellknown_keys[i].name; + } + } + return NULL; +} + +static void fw_cfg_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); +} + +static int sun4u_NVRAM_set_params(Nvram *nvram, uint16_t NVRAM_size, + const char *arch, ram_addr_t RAM_size, + const char *boot_devices, + uint32_t kernel_image, uint32_t kernel_size, + const char *cmdline, + uint32_t initrd_image, uint32_t initrd_size, + uint32_t NVRAM_image, + int width, int height, int depth, + const uint8_t *macaddr) +{ + unsigned int i; + int sysp_end; + uint8_t image[0x1ff0]; + NvramClass *k = NVRAM_GET_CLASS(nvram); + + memset(image, '\0', sizeof(image)); + + /* OpenBIOS nvram variables partition */ + sysp_end = chrp_nvram_create_system_partition(image, 0, 0x1fd0); + + /* Free space partition */ + chrp_nvram_create_free_partition(&image[sysp_end], 0x1fd0 - sysp_end); + + Sun_init_header((struct Sun_nvram *)&image[0x1fd8], macaddr, 0x80); + + for (i = 0; i < sizeof(image); i++) { + (k->write)(nvram, i, image[i]); + } + + return 0; +} + +static uint64_t sun4u_load_kernel(const char *kernel_filename, + const char *initrd_filename, + ram_addr_t RAM_size, uint64_t *initrd_size, + uint64_t *initrd_addr, uint64_t *kernel_addr, + uint64_t *kernel_entry) +{ + int linux_boot; + unsigned int i; + long kernel_size; + uint8_t *ptr; + uint64_t kernel_top = 0; + + linux_boot = (kernel_filename != NULL); + + kernel_size = 0; + if (linux_boot) { + int bswap_needed; + +#ifdef BSWAP_NEEDED + bswap_needed = 1; +#else + bswap_needed = 0; +#endif + kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, kernel_entry, + kernel_addr, &kernel_top, NULL, 1, EM_SPARCV9, 0, + 0); + if (kernel_size < 0) { + *kernel_addr = KERNEL_LOAD_ADDR; + *kernel_entry = KERNEL_LOAD_ADDR; + kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR, + RAM_size - KERNEL_LOAD_ADDR, bswap_needed, + TARGET_PAGE_SIZE); + } + if (kernel_size < 0) { + kernel_size = load_image_targphys(kernel_filename, + KERNEL_LOAD_ADDR, + RAM_size - KERNEL_LOAD_ADDR); + } + if (kernel_size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + /* load initrd above kernel */ + *initrd_size = 0; + if (initrd_filename && kernel_top) { + *initrd_addr = TARGET_PAGE_ALIGN(kernel_top); + + *initrd_size = load_image_targphys(initrd_filename, + *initrd_addr, + RAM_size - *initrd_addr); + if ((int)*initrd_size < 0) { + error_report("could not load initial ram disk '%s'", + initrd_filename); + exit(1); + } + } + if (*initrd_size > 0) { + for (i = 0; i < 64 * TARGET_PAGE_SIZE; i += TARGET_PAGE_SIZE) { + ptr = rom_ptr(*kernel_addr + i, 32); + if (ptr && ldl_p(ptr + 8) == 0x48647253) { /* HdrS */ + stl_p(ptr + 24, *initrd_addr + *kernel_addr); + stl_p(ptr + 28, *initrd_size); + break; + } + } + } + } + return kernel_size; +} + +typedef struct ResetData { + SPARCCPU *cpu; + uint64_t prom_addr; +} ResetData; + +#define TYPE_SUN4U_POWER "power" +OBJECT_DECLARE_SIMPLE_TYPE(PowerDevice, SUN4U_POWER) + +struct PowerDevice { + SysBusDevice parent_obj; + + MemoryRegion power_mmio; +}; + +/* Power */ +static uint64_t power_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void power_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + /* According to a real Ultra 5, bit 24 controls the power */ + if (val & 0x1000000) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } +} + +static const MemoryRegionOps power_mem_ops = { + .read = power_mem_read, + .write = power_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void power_realize(DeviceState *dev, Error **errp) +{ + PowerDevice *d = SUN4U_POWER(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&d->power_mmio, OBJECT(dev), &power_mem_ops, d, + "power", sizeof(uint32_t)); + + sysbus_init_mmio(sbd, &d->power_mmio); +} + +static void power_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = power_realize; +} + +static const TypeInfo power_info = { + .name = TYPE_SUN4U_POWER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PowerDevice), + .class_init = power_class_init, +}; + +static void ebus_isa_irq_handler(void *opaque, int n, int level) +{ + EbusState *s = EBUS(opaque); + qemu_irq irq = s->isa_bus_irqs[n]; + + /* Pass ISA bus IRQs onto their gpio equivalent */ + trace_ebus_isa_irq_handler(n, level); + if (irq) { + qemu_set_irq(irq, level); + } +} + +/* EBUS (Eight bit bus) bridge */ +static void ebus_realize(PCIDevice *pci_dev, Error **errp) +{ + EbusState *s = EBUS(pci_dev); + ISADevice *isa_dev; + SysBusDevice *sbd; + DeviceState *dev; + qemu_irq *isa_irq; + DriveInfo *fd[MAX_FD]; + int i; + + s->isa_bus = isa_bus_new(DEVICE(pci_dev), get_system_memory(), + pci_address_space_io(pci_dev), errp); + if (!s->isa_bus) { + error_setg(errp, "unable to instantiate EBUS ISA bus"); + return; + } + + /* ISA bus */ + isa_irq = qemu_allocate_irqs(ebus_isa_irq_handler, s, ISA_NUM_IRQS); + isa_bus_irqs(s->isa_bus, isa_irq); + qdev_init_gpio_out_named(DEVICE(s), s->isa_bus_irqs, "isa-irq", + ISA_NUM_IRQS); + + /* Serial ports */ + i = 0; + if (s->console_serial_base) { + serial_mm_init(pci_address_space(pci_dev), s->console_serial_base, + 0, NULL, 115200, serial_hd(i), DEVICE_BIG_ENDIAN); + i++; + } + serial_hds_isa_init(s->isa_bus, i, MAX_ISA_SERIAL_PORTS); + + /* Parallel ports */ + parallel_hds_isa_init(s->isa_bus, MAX_PARALLEL_PORTS); + + /* Keyboard */ + isa_create_simple(s->isa_bus, "i8042"); + + /* Floppy */ + for (i = 0; i < MAX_FD; i++) { + fd[i] = drive_get(IF_FLOPPY, 0, i); + } + isa_dev = isa_new(TYPE_ISA_FDC); + dev = DEVICE(isa_dev); + qdev_prop_set_uint32(dev, "dma", -1); + isa_realize_and_unref(isa_dev, s->isa_bus, &error_fatal); + isa_fdc_init_drives(isa_dev, fd); + + /* Power */ + dev = qdev_new(TYPE_SUN4U_POWER); + sbd = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sbd, &error_fatal); + memory_region_add_subregion(pci_address_space_io(pci_dev), 0x7240, + sysbus_mmio_get_region(sbd, 0)); + + /* PCI */ + pci_dev->config[0x04] = 0x06; // command = bus master, pci mem + pci_dev->config[0x05] = 0x00; + pci_dev->config[0x06] = 0xa0; // status = fast back-to-back, 66MHz, no error + pci_dev->config[0x07] = 0x03; // status = medium devsel + pci_dev->config[0x09] = 0x00; // programming i/f + pci_dev->config[0x0D] = 0x0a; // latency_timer + + memory_region_init_alias(&s->bar0, OBJECT(s), "bar0", get_system_io(), + 0, 0x1000000); + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0); + memory_region_init_alias(&s->bar1, OBJECT(s), "bar1", get_system_io(), + 0, 0x8000); + pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &s->bar1); +} + +static Property ebus_properties[] = { + DEFINE_PROP_UINT64("console-serial-base", EbusState, + console_serial_base, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ebus_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + k->realize = ebus_realize; + k->vendor_id = PCI_VENDOR_ID_SUN; + k->device_id = PCI_DEVICE_ID_SUN_EBUS; + k->revision = 0x01; + k->class_id = PCI_CLASS_BRIDGE_OTHER; + device_class_set_props(dc, ebus_properties); +} + +static const TypeInfo ebus_info = { + .name = TYPE_EBUS, + .parent = TYPE_PCI_DEVICE, + .class_init = ebus_class_init, + .instance_size = sizeof(EbusState), + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +#define TYPE_OPENPROM "openprom" +typedef struct PROMState PROMState; +DECLARE_INSTANCE_CHECKER(PROMState, OPENPROM, + TYPE_OPENPROM) + +struct PROMState { + SysBusDevice parent_obj; + + MemoryRegion prom; +}; + +static uint64_t translate_prom_address(void *opaque, uint64_t addr) +{ + hwaddr *base_addr = (hwaddr *)opaque; + return addr + *base_addr - PROM_VADDR; +} + +/* Boot PROM (OpenBIOS) */ +static void prom_init(hwaddr addr, const char *bios_name) +{ + DeviceState *dev; + SysBusDevice *s; + char *filename; + int ret; + + dev = qdev_new(TYPE_OPENPROM); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + + sysbus_mmio_map(s, 0, addr); + + /* load boot prom */ + if (bios_name == NULL) { + bios_name = PROM_FILENAME; + } + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (filename) { + ret = load_elf(filename, NULL, translate_prom_address, &addr, + NULL, NULL, NULL, NULL, 1, EM_SPARCV9, 0, 0); + if (ret < 0 || ret > PROM_SIZE_MAX) { + ret = load_image_targphys(filename, addr, PROM_SIZE_MAX); + } + g_free(filename); + } else { + ret = -1; + } + if (ret < 0 || ret > PROM_SIZE_MAX) { + error_report("could not load prom '%s'", bios_name); + exit(1); + } +} + +static void prom_realize(DeviceState *ds, Error **errp) +{ + PROMState *s = OPENPROM(ds); + SysBusDevice *dev = SYS_BUS_DEVICE(ds); + Error *local_err = NULL; + + memory_region_init_ram_nomigrate(&s->prom, OBJECT(ds), "sun4u.prom", + PROM_SIZE_MAX, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + vmstate_register_ram_global(&s->prom); + memory_region_set_readonly(&s->prom, true); + sysbus_init_mmio(dev, &s->prom); +} + +static Property prom_properties[] = { + {/* end of property list */}, +}; + +static void prom_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, prom_properties); + dc->realize = prom_realize; +} + +static const TypeInfo prom_info = { + .name = TYPE_OPENPROM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PROMState), + .class_init = prom_class_init, +}; + + +#define TYPE_SUN4U_MEMORY "memory" +typedef struct RamDevice RamDevice; +DECLARE_INSTANCE_CHECKER(RamDevice, SUN4U_RAM, + TYPE_SUN4U_MEMORY) + +struct RamDevice { + SysBusDevice parent_obj; + + MemoryRegion ram; + uint64_t size; +}; + +/* System RAM */ +static void ram_realize(DeviceState *dev, Error **errp) +{ + RamDevice *d = SUN4U_RAM(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_ram_nomigrate(&d->ram, OBJECT(d), "sun4u.ram", d->size, + &error_fatal); + vmstate_register_ram_global(&d->ram); + sysbus_init_mmio(sbd, &d->ram); +} + +static void ram_init(hwaddr addr, ram_addr_t RAM_size) +{ + DeviceState *dev; + SysBusDevice *s; + RamDevice *d; + + /* allocate RAM */ + dev = qdev_new(TYPE_SUN4U_MEMORY); + s = SYS_BUS_DEVICE(dev); + + d = SUN4U_RAM(dev); + d->size = RAM_size; + sysbus_realize_and_unref(s, &error_fatal); + + sysbus_mmio_map(s, 0, addr); +} + +static Property ram_properties[] = { + DEFINE_PROP_UINT64("size", RamDevice, size, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ram_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = ram_realize; + device_class_set_props(dc, ram_properties); +} + +static const TypeInfo ram_info = { + .name = TYPE_SUN4U_MEMORY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RamDevice), + .class_init = ram_class_init, +}; + +static void sun4uv_init(MemoryRegion *address_space_mem, + MachineState *machine, + const struct hwdef *hwdef) +{ + SPARCCPU *cpu; + Nvram *nvram; + unsigned int i; + uint64_t initrd_addr, initrd_size, kernel_addr, kernel_size, kernel_entry; + SabreState *sabre; + PCIBus *pci_bus, *pci_busA, *pci_busB; + PCIDevice *ebus, *pci_dev; + SysBusDevice *s; + DeviceState *iommu, *dev; + FWCfgState *fw_cfg; + NICInfo *nd; + MACAddr macaddr; + bool onboard_nic; + + /* init CPUs */ + cpu = sparc64_cpu_devinit(machine->cpu_type, hwdef->prom_addr); + + /* IOMMU */ + iommu = qdev_new(TYPE_SUN4U_IOMMU); + sysbus_realize_and_unref(SYS_BUS_DEVICE(iommu), &error_fatal); + + /* set up devices */ + ram_init(0, machine->ram_size); + + prom_init(hwdef->prom_addr, machine->firmware); + + /* Init sabre (PCI host bridge) */ + sabre = SABRE(qdev_new(TYPE_SABRE)); + qdev_prop_set_uint64(DEVICE(sabre), "special-base", PBM_SPECIAL_BASE); + qdev_prop_set_uint64(DEVICE(sabre), "mem-base", PBM_MEM_BASE); + object_property_set_link(OBJECT(sabre), "iommu", OBJECT(iommu), + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(sabre), &error_fatal); + + /* sabre_config */ + sysbus_mmio_map(SYS_BUS_DEVICE(sabre), 0, PBM_SPECIAL_BASE); + /* PCI configuration space */ + sysbus_mmio_map(SYS_BUS_DEVICE(sabre), 1, PBM_SPECIAL_BASE + 0x1000000ULL); + /* pci_ioport */ + sysbus_mmio_map(SYS_BUS_DEVICE(sabre), 2, PBM_SPECIAL_BASE + 0x2000000ULL); + + /* Wire up PCI interrupts to CPU */ + for (i = 0; i < IVEC_MAX; i++) { + qdev_connect_gpio_out_named(DEVICE(sabre), "ivec-irq", i, + qdev_get_gpio_in_named(DEVICE(cpu), "ivec-irq", i)); + } + + pci_bus = PCI_HOST_BRIDGE(sabre)->bus; + pci_busA = pci_bridge_get_sec_bus(sabre->bridgeA); + pci_busB = pci_bridge_get_sec_bus(sabre->bridgeB); + + /* Only in-built Simba APBs can exist on the root bus, slot 0 on busA is + reserved (leaving no slots free after on-board devices) however slots + 0-3 are free on busB */ + pci_bus->slot_reserved_mask = 0xfffffffc; + pci_busA->slot_reserved_mask = 0xfffffff1; + pci_busB->slot_reserved_mask = 0xfffffff0; + + ebus = pci_new_multifunction(PCI_DEVFN(1, 0), true, TYPE_EBUS); + qdev_prop_set_uint64(DEVICE(ebus), "console-serial-base", + hwdef->console_serial_base); + pci_realize_and_unref(ebus, pci_busA, &error_fatal); + + /* Wire up "well-known" ISA IRQs to PBM legacy obio IRQs */ + qdev_connect_gpio_out_named(DEVICE(ebus), "isa-irq", 7, + qdev_get_gpio_in_named(DEVICE(sabre), "pbm-irq", OBIO_LPT_IRQ)); + qdev_connect_gpio_out_named(DEVICE(ebus), "isa-irq", 6, + qdev_get_gpio_in_named(DEVICE(sabre), "pbm-irq", OBIO_FDD_IRQ)); + qdev_connect_gpio_out_named(DEVICE(ebus), "isa-irq", 1, + qdev_get_gpio_in_named(DEVICE(sabre), "pbm-irq", OBIO_KBD_IRQ)); + qdev_connect_gpio_out_named(DEVICE(ebus), "isa-irq", 12, + qdev_get_gpio_in_named(DEVICE(sabre), "pbm-irq", OBIO_MSE_IRQ)); + qdev_connect_gpio_out_named(DEVICE(ebus), "isa-irq", 4, + qdev_get_gpio_in_named(DEVICE(sabre), "pbm-irq", OBIO_SER_IRQ)); + + switch (vga_interface_type) { + case VGA_STD: + pci_create_simple(pci_busA, PCI_DEVFN(2, 0), "VGA"); + break; + case VGA_NONE: + break; + default: + abort(); /* Should not happen - types are checked in vl.c already */ + } + + memset(&macaddr, 0, sizeof(MACAddr)); + onboard_nic = false; + for (i = 0; i < nb_nics; i++) { + PCIBus *bus; + nd = &nd_table[i]; + + if (!nd->model || strcmp(nd->model, "sunhme") == 0) { + if (!onboard_nic) { + pci_dev = pci_new_multifunction(PCI_DEVFN(1, 1), + true, "sunhme"); + bus = pci_busA; + memcpy(&macaddr, &nd->macaddr.a, sizeof(MACAddr)); + onboard_nic = true; + } else { + pci_dev = pci_new(-1, "sunhme"); + bus = pci_busB; + } + } else { + pci_dev = pci_new(-1, nd->model); + bus = pci_busB; + } + + dev = &pci_dev->qdev; + qdev_set_nic_properties(dev, nd); + pci_realize_and_unref(pci_dev, bus, &error_fatal); + } + + /* If we don't have an onboard NIC, grab a default MAC address so that + * we have a valid machine id */ + if (!onboard_nic) { + qemu_macaddr_default_if_unset(&macaddr); + } + + pci_dev = pci_new(PCI_DEVFN(3, 0), "cmd646-ide"); + qdev_prop_set_uint32(&pci_dev->qdev, "secondary", 1); + pci_realize_and_unref(pci_dev, pci_busA, &error_fatal); + pci_ide_create_devs(pci_dev); + + /* Map NVRAM into I/O (ebus) space */ + dev = qdev_new("sysbus-m48t59"); + qdev_prop_set_int32(dev, "base-year", 1968); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + memory_region_add_subregion(pci_address_space_io(ebus), 0x2000, + sysbus_mmio_get_region(s, 0)); + nvram = NVRAM(dev); + + initrd_size = 0; + initrd_addr = 0; + kernel_size = sun4u_load_kernel(machine->kernel_filename, + machine->initrd_filename, + machine->ram_size, &initrd_size, &initrd_addr, + &kernel_addr, &kernel_entry); + + sun4u_NVRAM_set_params(nvram, NVRAM_SIZE, "Sun4u", machine->ram_size, + machine->boot_order, + kernel_addr, kernel_size, + machine->kernel_cmdline, + initrd_addr, initrd_size, + /* XXX: need an option to load a NVRAM image */ + 0, + graphic_width, graphic_height, graphic_depth, + (uint8_t *)&macaddr); + + dev = qdev_new(TYPE_FW_CFG_IO); + qdev_prop_set_bit(dev, "dma_enabled", false); + object_property_add_child(OBJECT(ebus), TYPE_FW_CFG, OBJECT(dev)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + memory_region_add_subregion(pci_address_space_io(ebus), BIOS_CFG_IOPORT, + &FW_CFG_IO(dev)->comb_iomem); + + fw_cfg = FW_CFG(dev); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)machine->smp.cpus); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)machine->smp.max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)machine->ram_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id); + fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_entry); + fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); + if (machine->kernel_cmdline) { + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, + strlen(machine->kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, machine->kernel_cmdline); + } else { + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, 0); + } + fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); + fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, machine->boot_order[0]); + + fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_WIDTH, graphic_width); + fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_HEIGHT, graphic_height); + fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_DEPTH, graphic_depth); + + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); +} + +enum { + sun4u_id = 0, + sun4v_id = 64, +}; + +/* + * Implementation of an interface to adjust firmware path + * for the bootindex property handling. + */ +static char *sun4u_fw_dev_path(FWPathProvider *p, BusState *bus, + DeviceState *dev) +{ + PCIDevice *pci; + + if (!strcmp(object_get_typename(OBJECT(dev)), "pbm-bridge")) { + pci = PCI_DEVICE(dev); + + if (PCI_FUNC(pci->devfn)) { + return g_strdup_printf("pci@%x,%x", PCI_SLOT(pci->devfn), + PCI_FUNC(pci->devfn)); + } else { + return g_strdup_printf("pci@%x", PCI_SLOT(pci->devfn)); + } + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "ide-hd")) { + return g_strdup("disk"); + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "ide-cd")) { + return g_strdup("cdrom"); + } + + if (!strcmp(object_get_typename(OBJECT(dev)), "virtio-blk-device")) { + return g_strdup("disk"); + } + + return NULL; +} + +static const struct hwdef hwdefs[] = { + /* Sun4u generic PC-like machine */ + { + .machine_id = sun4u_id, + .prom_addr = 0x1fff0000000ULL, + .console_serial_base = 0, + }, + /* Sun4v generic PC-like machine */ + { + .machine_id = sun4v_id, + .prom_addr = 0x1fff0000000ULL, + .console_serial_base = 0, + }, +}; + +/* Sun4u hardware initialisation */ +static void sun4u_init(MachineState *machine) +{ + sun4uv_init(get_system_memory(), machine, &hwdefs[0]); +} + +/* Sun4v hardware initialisation */ +static void sun4v_init(MachineState *machine) +{ + sun4uv_init(get_system_memory(), machine, &hwdefs[1]); +} + +static void sun4u_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc); + + mc->desc = "Sun4u platform"; + mc->init = sun4u_init; + mc->block_default_type = IF_IDE; + mc->max_cpus = 1; /* XXX for now */ + mc->is_default = true; + mc->default_boot_order = "c"; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("TI-UltraSparc-IIi"); + mc->ignore_boot_device_suffixes = true; + mc->default_display = "std"; + fwc->get_dev_path = sun4u_fw_dev_path; +} + +static const TypeInfo sun4u_type = { + .name = MACHINE_TYPE_NAME("sun4u"), + .parent = TYPE_MACHINE, + .class_init = sun4u_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_FW_PATH_PROVIDER }, + { } + }, +}; + +static void sun4v_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "Sun4v platform"; + mc->init = sun4v_init; + mc->block_default_type = IF_IDE; + mc->max_cpus = 1; /* XXX for now */ + mc->default_boot_order = "c"; + mc->default_cpu_type = SPARC_CPU_TYPE_NAME("Sun-UltraSparc-T1"); + mc->default_display = "std"; +} + +static const TypeInfo sun4v_type = { + .name = MACHINE_TYPE_NAME("sun4v"), + .parent = TYPE_MACHINE, + .class_init = sun4v_class_init, +}; + +static void sun4u_register_types(void) +{ + type_register_static(&power_info); + type_register_static(&ebus_info); + type_register_static(&prom_info); + type_register_static(&ram_info); + + type_register_static(&sun4u_type); + type_register_static(&sun4v_type); +} + +type_init(sun4u_register_types) diff --git a/hw/sparc64/sun4u_iommu.c b/hw/sparc64/sun4u_iommu.c new file mode 100644 index 000000000..9178277f8 --- /dev/null +++ b/hw/sparc64/sun4u_iommu.c @@ -0,0 +1,342 @@ +/* + * QEMU sun4u IOMMU emulation + * + * Copyright (c) 2006 Fabrice Bellard + * Copyright (c) 2012,2013 Artyom Tarasenko + * Copyright (c) 2017 Mark Cave-Ayland + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/sparc/sun4u_iommu.h" +#include "exec/address-spaces.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "trace.h" + + +#define IOMMU_PAGE_SIZE_8K (1ULL << 13) +#define IOMMU_PAGE_MASK_8K (~(IOMMU_PAGE_SIZE_8K - 1)) +#define IOMMU_PAGE_SIZE_64K (1ULL << 16) +#define IOMMU_PAGE_MASK_64K (~(IOMMU_PAGE_SIZE_64K - 1)) + +#define IOMMU_CTRL 0x0 +#define IOMMU_CTRL_TBW_SIZE (1ULL << 2) +#define IOMMU_CTRL_MMU_EN (1ULL) + +#define IOMMU_CTRL_TSB_SHIFT 16 + +#define IOMMU_BASE 0x8 +#define IOMMU_FLUSH 0x10 + +#define IOMMU_TTE_DATA_V (1ULL << 63) +#define IOMMU_TTE_DATA_SIZE (1ULL << 61) +#define IOMMU_TTE_DATA_W (1ULL << 1) + +#define IOMMU_TTE_PHYS_MASK_8K 0x1ffffffe000ULL +#define IOMMU_TTE_PHYS_MASK_64K 0x1ffffff8000ULL + +#define IOMMU_TSB_8K_OFFSET_MASK_8M 0x00000000007fe000ULL +#define IOMMU_TSB_8K_OFFSET_MASK_16M 0x0000000000ffe000ULL +#define IOMMU_TSB_8K_OFFSET_MASK_32M 0x0000000001ffe000ULL +#define IOMMU_TSB_8K_OFFSET_MASK_64M 0x0000000003ffe000ULL +#define IOMMU_TSB_8K_OFFSET_MASK_128M 0x0000000007ffe000ULL +#define IOMMU_TSB_8K_OFFSET_MASK_256M 0x000000000fffe000ULL +#define IOMMU_TSB_8K_OFFSET_MASK_512M 0x000000001fffe000ULL +#define IOMMU_TSB_8K_OFFSET_MASK_1G 0x000000003fffe000ULL + +#define IOMMU_TSB_64K_OFFSET_MASK_64M 0x0000000003ff0000ULL +#define IOMMU_TSB_64K_OFFSET_MASK_128M 0x0000000007ff0000ULL +#define IOMMU_TSB_64K_OFFSET_MASK_256M 0x000000000fff0000ULL +#define IOMMU_TSB_64K_OFFSET_MASK_512M 0x000000001fff0000ULL +#define IOMMU_TSB_64K_OFFSET_MASK_1G 0x000000003fff0000ULL +#define IOMMU_TSB_64K_OFFSET_MASK_2G 0x000000007fff0000ULL + + +/* Called from RCU critical section */ +static IOMMUTLBEntry sun4u_translate_iommu(IOMMUMemoryRegion *iommu, + hwaddr addr, + IOMMUAccessFlags flag, int iommu_idx) +{ + IOMMUState *is = container_of(iommu, IOMMUState, iommu); + hwaddr baseaddr, offset; + uint64_t tte; + uint32_t tsbsize; + IOMMUTLBEntry ret = { + .target_as = &address_space_memory, + .iova = 0, + .translated_addr = 0, + .addr_mask = ~(hwaddr)0, + .perm = IOMMU_NONE, + }; + + if (!(is->regs[IOMMU_CTRL >> 3] & IOMMU_CTRL_MMU_EN)) { + /* IOMMU disabled, passthrough using standard 8K page */ + ret.iova = addr & IOMMU_PAGE_MASK_8K; + ret.translated_addr = addr; + ret.addr_mask = IOMMU_PAGE_MASK_8K; + ret.perm = IOMMU_RW; + + return ret; + } + + baseaddr = is->regs[IOMMU_BASE >> 3]; + tsbsize = (is->regs[IOMMU_CTRL >> 3] >> IOMMU_CTRL_TSB_SHIFT) & 0x7; + + if (is->regs[IOMMU_CTRL >> 3] & IOMMU_CTRL_TBW_SIZE) { + /* 64K */ + switch (tsbsize) { + case 0: + offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_64M) >> 13; + break; + case 1: + offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_128M) >> 13; + break; + case 2: + offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_256M) >> 13; + break; + case 3: + offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_512M) >> 13; + break; + case 4: + offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_1G) >> 13; + break; + case 5: + offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_2G) >> 13; + break; + default: + /* Not implemented, error */ + return ret; + } + } else { + /* 8K */ + switch (tsbsize) { + case 0: + offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_8M) >> 10; + break; + case 1: + offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_16M) >> 10; + break; + case 2: + offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_32M) >> 10; + break; + case 3: + offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_64M) >> 10; + break; + case 4: + offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_128M) >> 10; + break; + case 5: + offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_256M) >> 10; + break; + case 6: + offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_512M) >> 10; + break; + case 7: + offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_1G) >> 10; + break; + } + } + + tte = address_space_ldq_be(&address_space_memory, baseaddr + offset, + MEMTXATTRS_UNSPECIFIED, NULL); + + if (!(tte & IOMMU_TTE_DATA_V)) { + /* Invalid mapping */ + return ret; + } + + if (tte & IOMMU_TTE_DATA_W) { + /* Writeable */ + ret.perm = IOMMU_RW; + } else { + ret.perm = IOMMU_RO; + } + + /* Extract phys */ + if (tte & IOMMU_TTE_DATA_SIZE) { + /* 64K */ + ret.iova = addr & IOMMU_PAGE_MASK_64K; + ret.translated_addr = tte & IOMMU_TTE_PHYS_MASK_64K; + ret.addr_mask = (IOMMU_PAGE_SIZE_64K - 1); + } else { + /* 8K */ + ret.iova = addr & IOMMU_PAGE_MASK_8K; + ret.translated_addr = tte & IOMMU_TTE_PHYS_MASK_8K; + ret.addr_mask = (IOMMU_PAGE_SIZE_8K - 1); + } + + trace_sun4u_iommu_translate(ret.iova, ret.translated_addr, tte); + + return ret; +} + +static void iommu_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + IOMMUState *is = opaque; + + trace_sun4u_iommu_mem_write(addr, val, size); + + switch (addr) { + case IOMMU_CTRL: + if (size == 4) { + is->regs[IOMMU_CTRL >> 3] &= 0xffffffffULL; + is->regs[IOMMU_CTRL >> 3] |= val << 32; + } else { + is->regs[IOMMU_CTRL >> 3] = val; + } + break; + case IOMMU_CTRL + 0x4: + is->regs[IOMMU_CTRL >> 3] &= 0xffffffff00000000ULL; + is->regs[IOMMU_CTRL >> 3] |= val & 0xffffffffULL; + break; + case IOMMU_BASE: + if (size == 4) { + is->regs[IOMMU_BASE >> 3] &= 0xffffffffULL; + is->regs[IOMMU_BASE >> 3] |= val << 32; + } else { + is->regs[IOMMU_BASE >> 3] = val; + } + break; + case IOMMU_BASE + 0x4: + is->regs[IOMMU_BASE >> 3] &= 0xffffffff00000000ULL; + is->regs[IOMMU_BASE >> 3] |= val & 0xffffffffULL; + break; + case IOMMU_FLUSH: + case IOMMU_FLUSH + 0x4: + break; + default: + qemu_log_mask(LOG_UNIMP, + "sun4u-iommu: Unimplemented register write " + "reg 0x%" HWADDR_PRIx " size 0x%x value 0x%" PRIx64 "\n", + addr, size, val); + break; + } +} + +static uint64_t iommu_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + IOMMUState *is = opaque; + uint64_t val; + + switch (addr) { + case IOMMU_CTRL: + if (size == 4) { + val = is->regs[IOMMU_CTRL >> 3] >> 32; + } else { + val = is->regs[IOMMU_CTRL >> 3]; + } + break; + case IOMMU_CTRL + 0x4: + val = is->regs[IOMMU_CTRL >> 3] & 0xffffffffULL; + break; + case IOMMU_BASE: + if (size == 4) { + val = is->regs[IOMMU_BASE >> 3] >> 32; + } else { + val = is->regs[IOMMU_BASE >> 3]; + } + break; + case IOMMU_BASE + 0x4: + val = is->regs[IOMMU_BASE >> 3] & 0xffffffffULL; + break; + case IOMMU_FLUSH: + case IOMMU_FLUSH + 0x4: + val = 0; + break; + default: + qemu_log_mask(LOG_UNIMP, + "sun4u-iommu: Unimplemented register read " + "reg 0x%" HWADDR_PRIx " size 0x%x\n", + addr, size); + val = 0; + break; + } + + trace_sun4u_iommu_mem_read(addr, val, size); + + return val; +} + +static const MemoryRegionOps iommu_mem_ops = { + .read = iommu_mem_read, + .write = iommu_mem_write, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void iommu_reset(DeviceState *d) +{ + IOMMUState *s = SUN4U_IOMMU(d); + + memset(s->regs, 0, IOMMU_NREGS * sizeof(uint64_t)); +} + +static void iommu_init(Object *obj) +{ + IOMMUState *s = SUN4U_IOMMU(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_iommu(&s->iommu, sizeof(s->iommu), + TYPE_SUN4U_IOMMU_MEMORY_REGION, OBJECT(s), + "iommu-sun4u", UINT64_MAX); + address_space_init(&s->iommu_as, MEMORY_REGION(&s->iommu), "iommu-as"); + + memory_region_init_io(&s->iomem, obj, &iommu_mem_ops, s, "iommu", + IOMMU_NREGS * sizeof(uint64_t)); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void iommu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = iommu_reset; +} + +static const TypeInfo iommu_info = { + .name = TYPE_SUN4U_IOMMU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IOMMUState), + .instance_init = iommu_init, + .class_init = iommu_class_init, +}; + +static void sun4u_iommu_memory_region_class_init(ObjectClass *klass, void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = sun4u_translate_iommu; +} + +static const TypeInfo sun4u_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_SUN4U_IOMMU_MEMORY_REGION, + .class_init = sun4u_iommu_memory_region_class_init, +}; + +static void iommu_register_types(void) +{ + type_register_static(&iommu_info); + type_register_static(&sun4u_iommu_memory_region_info); +} + +type_init(iommu_register_types) diff --git a/hw/sparc64/trace-events b/hw/sparc64/trace-events new file mode 100644 index 000000000..3eb4bacf7 --- /dev/null +++ b/hw/sparc64/trace-events @@ -0,0 +1,23 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# sun4u.c +ebus_isa_irq_handler(int n, int level) "Set ISA IRQ %d level %d" + +# sun4u_iommu.c +sun4u_iommu_mem_read(uint64_t addr, uint64_t val, int size) "addr: 0x%"PRIx64" val: 0x%"PRIx64" size: %d" +sun4u_iommu_mem_write(uint64_t addr, uint64_t val, int size) "addr: 0x%"PRIx64" val: 0x%"PRIx64" size: %d" +sun4u_iommu_translate(uint64_t addr, uint64_t trans_addr, uint64_t tte) "xlate 0x%"PRIx64" => pa 0x%"PRIx64" tte: 0x%"PRIx64 + +# sparc64.c +sparc64_cpu_ivec_raise_irq(int irq) "Raise IVEC IRQ %d" +sparc64_cpu_ivec_lower_irq(int irq) "Lower IVEC IRQ %d" +sparc64_cpu_tick_irq_disabled(void) "tick_irq: softint disabled" +sparc64_cpu_tick_irq_fire(void) "tick_irq: fire" +sparc64_cpu_stick_irq_disabled(void) "stick_irq: softint disabled" +sparc64_cpu_stick_irq_fire(void) "stick_irq: fire" +sparc64_cpu_hstick_irq_disabled(void) "hstick_irq: softint disabled" +sparc64_cpu_hstick_irq_fire(void) "hstick_irq: fire" +sparc64_cpu_tick_set_count(const char *name, uint64_t real_count, const char *npt, void *p) "%s set_count count=0x%"PRIx64" (npt %s) p=%p" +sparc64_cpu_tick_get_count(const char *name, uint64_t real_count, const char *npt, void *p) "%s get_count count=0x%"PRIx64" (npt %s) p=%p" +sparc64_cpu_tick_set_limit(const char *name, uint64_t real_limit, const char *dis, void *p, uint64_t limit, uint64_t t, uint64_t dt) "%s set_limit limit=0x%"PRIx64 " (%s) p=%p called with limit=0x%"PRIx64" at 0x%"PRIx64" (delta=0x%"PRIx64")" +sparc64_cpu_tick_set_limit_zero(const char *name) "%s set_limit limit=ZERO - not starting timer" diff --git a/hw/sparc64/trace.h b/hw/sparc64/trace.h new file mode 100644 index 000000000..b6ef6e611 --- /dev/null +++ b/hw/sparc64/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_sparc64.h" diff --git a/hw/ssi/Kconfig b/hw/ssi/Kconfig new file mode 100644 index 000000000..7d90a0218 --- /dev/null +++ b/hw/ssi/Kconfig @@ -0,0 +1,22 @@ +config PL022 + bool + select SSI + +config SIFIVE_SPI + bool + select SSI + +config SSI + bool + +config XILINX_SPI + bool + select SSI + +config XILINX_SPIPS + bool + select SSI + +config STM32F2XX_SPI + bool + select SSI diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c new file mode 100644 index 000000000..ff154eb84 --- /dev/null +++ b/hw/ssi/aspeed_smc.c @@ -0,0 +1,1710 @@ +/* + * ASPEED AST2400 SMC Controller (SPI Flash Only) + * + * Copyright (C) 2016 IBM Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qemu/units.h" +#include "trace.h" + +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/ssi/aspeed_smc.h" + +/* CE Type Setting Register */ +#define R_CONF (0x00 / 4) +#define CONF_LEGACY_DISABLE (1 << 31) +#define CONF_ENABLE_W4 20 +#define CONF_ENABLE_W3 19 +#define CONF_ENABLE_W2 18 +#define CONF_ENABLE_W1 17 +#define CONF_ENABLE_W0 16 +#define CONF_FLASH_TYPE4 8 +#define CONF_FLASH_TYPE3 6 +#define CONF_FLASH_TYPE2 4 +#define CONF_FLASH_TYPE1 2 +#define CONF_FLASH_TYPE0 0 +#define CONF_FLASH_TYPE_NOR 0x0 +#define CONF_FLASH_TYPE_NAND 0x1 +#define CONF_FLASH_TYPE_SPI 0x2 /* AST2600 is SPI only */ + +/* CE Control Register */ +#define R_CE_CTRL (0x04 / 4) +#define CTRL_EXTENDED4 4 /* 32 bit addressing for SPI */ +#define CTRL_EXTENDED3 3 /* 32 bit addressing for SPI */ +#define CTRL_EXTENDED2 2 /* 32 bit addressing for SPI */ +#define CTRL_EXTENDED1 1 /* 32 bit addressing for SPI */ +#define CTRL_EXTENDED0 0 /* 32 bit addressing for SPI */ + +/* Interrupt Control and Status Register */ +#define R_INTR_CTRL (0x08 / 4) +#define INTR_CTRL_DMA_STATUS (1 << 11) +#define INTR_CTRL_CMD_ABORT_STATUS (1 << 10) +#define INTR_CTRL_WRITE_PROTECT_STATUS (1 << 9) +#define INTR_CTRL_DMA_EN (1 << 3) +#define INTR_CTRL_CMD_ABORT_EN (1 << 2) +#define INTR_CTRL_WRITE_PROTECT_EN (1 << 1) + +/* Command Control Register */ +#define R_CE_CMD_CTRL (0x0C / 4) +#define CTRL_ADDR_BYTE0_DISABLE_SHIFT 4 +#define CTRL_DATA_BYTE0_DISABLE_SHIFT 0 + +#define aspeed_smc_addr_byte_enabled(s, i) \ + (!((s)->regs[R_CE_CMD_CTRL] & (1 << (CTRL_ADDR_BYTE0_DISABLE_SHIFT + (i))))) +#define aspeed_smc_data_byte_enabled(s, i) \ + (!((s)->regs[R_CE_CMD_CTRL] & (1 << (CTRL_DATA_BYTE0_DISABLE_SHIFT + (i))))) + +/* CEx Control Register */ +#define R_CTRL0 (0x10 / 4) +#define CTRL_IO_QPI (1 << 31) +#define CTRL_IO_QUAD_DATA (1 << 30) +#define CTRL_IO_DUAL_DATA (1 << 29) +#define CTRL_IO_DUAL_ADDR_DATA (1 << 28) /* Includes dummies */ +#define CTRL_IO_QUAD_ADDR_DATA (1 << 28) /* Includes dummies */ +#define CTRL_CMD_SHIFT 16 +#define CTRL_CMD_MASK 0xff +#define CTRL_DUMMY_HIGH_SHIFT 14 +#define CTRL_AST2400_SPI_4BYTE (1 << 13) +#define CE_CTRL_CLOCK_FREQ_SHIFT 8 +#define CE_CTRL_CLOCK_FREQ_MASK 0xf +#define CE_CTRL_CLOCK_FREQ(div) \ + (((div) & CE_CTRL_CLOCK_FREQ_MASK) << CE_CTRL_CLOCK_FREQ_SHIFT) +#define CTRL_DUMMY_LOW_SHIFT 6 /* 2 bits [7:6] */ +#define CTRL_CE_STOP_ACTIVE (1 << 2) +#define CTRL_CMD_MODE_MASK 0x3 +#define CTRL_READMODE 0x0 +#define CTRL_FREADMODE 0x1 +#define CTRL_WRITEMODE 0x2 +#define CTRL_USERMODE 0x3 +#define R_CTRL1 (0x14 / 4) +#define R_CTRL2 (0x18 / 4) +#define R_CTRL3 (0x1C / 4) +#define R_CTRL4 (0x20 / 4) + +/* CEx Segment Address Register */ +#define R_SEG_ADDR0 (0x30 / 4) +#define SEG_END_SHIFT 24 /* 8MB units */ +#define SEG_END_MASK 0xff +#define SEG_START_SHIFT 16 /* address bit [A29-A23] */ +#define SEG_START_MASK 0xff +#define R_SEG_ADDR1 (0x34 / 4) +#define R_SEG_ADDR2 (0x38 / 4) +#define R_SEG_ADDR3 (0x3C / 4) +#define R_SEG_ADDR4 (0x40 / 4) + +/* Misc Control Register #1 */ +#define R_MISC_CTRL1 (0x50 / 4) + +/* SPI dummy cycle data */ +#define R_DUMMY_DATA (0x54 / 4) + +/* FMC_WDT2 Control/Status Register for Alternate Boot (AST2600) */ +#define R_FMC_WDT2_CTRL (0x64 / 4) +#define FMC_WDT2_CTRL_ALT_BOOT_MODE BIT(6) /* O: 2 chips 1: 1 chip */ +#define FMC_WDT2_CTRL_SINGLE_BOOT_MODE BIT(5) +#define FMC_WDT2_CTRL_BOOT_SOURCE BIT(4) /* O: primary 1: alternate */ +#define FMC_WDT2_CTRL_EN BIT(0) + +/* DMA Control/Status Register */ +#define R_DMA_CTRL (0x80 / 4) +#define DMA_CTRL_REQUEST (1 << 31) +#define DMA_CTRL_GRANT (1 << 30) +#define DMA_CTRL_DELAY_MASK 0xf +#define DMA_CTRL_DELAY_SHIFT 8 +#define DMA_CTRL_FREQ_MASK 0xf +#define DMA_CTRL_FREQ_SHIFT 4 +#define DMA_CTRL_CALIB (1 << 3) +#define DMA_CTRL_CKSUM (1 << 2) +#define DMA_CTRL_WRITE (1 << 1) +#define DMA_CTRL_ENABLE (1 << 0) + +/* DMA Flash Side Address */ +#define R_DMA_FLASH_ADDR (0x84 / 4) + +/* DMA DRAM Side Address */ +#define R_DMA_DRAM_ADDR (0x88 / 4) + +/* DMA Length Register */ +#define R_DMA_LEN (0x8C / 4) + +/* Checksum Calculation Result */ +#define R_DMA_CHECKSUM (0x90 / 4) + +/* Read Timing Compensation Register */ +#define R_TIMINGS (0x94 / 4) + +/* SPI controller registers and bits (AST2400) */ +#define R_SPI_CONF (0x00 / 4) +#define SPI_CONF_ENABLE_W0 0 +#define R_SPI_CTRL0 (0x4 / 4) +#define R_SPI_MISC_CTRL (0x10 / 4) +#define R_SPI_TIMINGS (0x14 / 4) + +#define ASPEED_SMC_R_SPI_MAX (0x20 / 4) +#define ASPEED_SMC_R_SMC_MAX (0x20 / 4) + +/* + * DMA DRAM addresses should be 4 bytes aligned and the valid address + * range is 0x40000000 - 0x5FFFFFFF (AST2400) + * 0x80000000 - 0xBFFFFFFF (AST2500) + * + * DMA flash addresses should be 4 bytes aligned and the valid address + * range is 0x20000000 - 0x2FFFFFFF. + * + * DMA length is from 4 bytes to 32MB + * 0: 4 bytes + * 0x7FFFFF: 32M bytes + */ +#define DMA_DRAM_ADDR(asc, val) ((val) & (asc)->dma_dram_mask) +#define DMA_FLASH_ADDR(asc, val) ((val) & (asc)->dma_flash_mask) +#define DMA_LENGTH(val) ((val) & 0x01FFFFFC) + +/* Flash opcodes. */ +#define SPI_OP_READ 0x03 /* Read data bytes (low frequency) */ + +#define SNOOP_OFF 0xFF +#define SNOOP_START 0x0 + +/* + * Default segments mapping addresses and size for each peripheral per + * controller. These can be changed when board is initialized with the + * Segment Address Registers. + */ +static const AspeedSegments aspeed_2500_spi1_segments[]; +static const AspeedSegments aspeed_2500_spi2_segments[]; + +#define ASPEED_SMC_FEATURE_DMA 0x1 +#define ASPEED_SMC_FEATURE_DMA_GRANT 0x2 +#define ASPEED_SMC_FEATURE_WDT_CONTROL 0x4 + +static inline bool aspeed_smc_has_dma(const AspeedSMCClass *asc) +{ + return !!(asc->features & ASPEED_SMC_FEATURE_DMA); +} + +static inline bool aspeed_smc_has_wdt_control(const AspeedSMCClass *asc) +{ + return !!(asc->features & ASPEED_SMC_FEATURE_WDT_CONTROL); +} + +#define aspeed_smc_error(fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "%s: " fmt "\n", __func__, ## __VA_ARGS__) + +static bool aspeed_smc_flash_overlap(const AspeedSMCState *s, + const AspeedSegments *new, + int cs) +{ + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + AspeedSegments seg; + int i; + + for (i = 0; i < asc->max_peripherals; i++) { + if (i == cs) { + continue; + } + + asc->reg_to_segment(s, s->regs[R_SEG_ADDR0 + i], &seg); + + if (new->addr + new->size > seg.addr && + new->addr < seg.addr + seg.size) { + aspeed_smc_error("new segment CS%d [ 0x%" + HWADDR_PRIx" - 0x%"HWADDR_PRIx" ] overlaps with " + "CS%d [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + cs, new->addr, new->addr + new->size, + i, seg.addr, seg.addr + seg.size); + return true; + } + } + return false; +} + +static void aspeed_smc_flash_set_segment_region(AspeedSMCState *s, int cs, + uint64_t regval) +{ + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + AspeedSMCFlash *fl = &s->flashes[cs]; + AspeedSegments seg; + + asc->reg_to_segment(s, regval, &seg); + + memory_region_transaction_begin(); + memory_region_set_size(&fl->mmio, seg.size); + memory_region_set_address(&fl->mmio, seg.addr - asc->flash_window_base); + memory_region_set_enabled(&fl->mmio, !!seg.size); + memory_region_transaction_commit(); + + s->regs[R_SEG_ADDR0 + cs] = regval; +} + +static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs, + uint64_t new) +{ + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + AspeedSegments seg; + + asc->reg_to_segment(s, new, &seg); + + trace_aspeed_smc_flash_set_segment(cs, new, seg.addr, seg.addr + seg.size); + + /* The start address of CS0 is read-only */ + if (cs == 0 && seg.addr != asc->flash_window_base) { + aspeed_smc_error("Tried to change CS0 start address to 0x%" + HWADDR_PRIx, seg.addr); + seg.addr = asc->flash_window_base; + new = asc->segment_to_reg(s, &seg); + } + + /* + * The end address of the AST2500 spi controllers is also + * read-only. + */ + if ((asc->segments == aspeed_2500_spi1_segments || + asc->segments == aspeed_2500_spi2_segments) && + cs == asc->max_peripherals && + seg.addr + seg.size != asc->segments[cs].addr + + asc->segments[cs].size) { + aspeed_smc_error("Tried to change CS%d end address to 0x%" + HWADDR_PRIx, cs, seg.addr + seg.size); + seg.size = asc->segments[cs].addr + asc->segments[cs].size - + seg.addr; + new = asc->segment_to_reg(s, &seg); + } + + /* Keep the segment in the overall flash window */ + if (seg.size && + (seg.addr + seg.size <= asc->flash_window_base || + seg.addr > asc->flash_window_base + asc->flash_window_size)) { + aspeed_smc_error("new segment for CS%d is invalid : " + "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + cs, seg.addr, seg.addr + seg.size); + return; + } + + /* Check start address vs. alignment */ + if (seg.size && !QEMU_IS_ALIGNED(seg.addr, seg.size)) { + aspeed_smc_error("new segment for CS%d is not " + "aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + cs, seg.addr, seg.addr + seg.size); + } + + /* And segments should not overlap (in the specs) */ + aspeed_smc_flash_overlap(s, &seg, cs); + + /* All should be fine now to move the region */ + aspeed_smc_flash_set_segment_region(s, cs, new); +} + +static uint64_t aspeed_smc_flash_default_read(void *opaque, hwaddr addr, + unsigned size) +{ + aspeed_smc_error("To 0x%" HWADDR_PRIx " of size %u" PRIx64, addr, size); + return 0; +} + +static void aspeed_smc_flash_default_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + aspeed_smc_error("To 0x%" HWADDR_PRIx " of size %u: 0x%" PRIx64, + addr, size, data); +} + +static const MemoryRegionOps aspeed_smc_flash_default_ops = { + .read = aspeed_smc_flash_default_read, + .write = aspeed_smc_flash_default_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static inline int aspeed_smc_flash_mode(const AspeedSMCFlash *fl) +{ + const AspeedSMCState *s = fl->controller; + + return s->regs[s->r_ctrl0 + fl->cs] & CTRL_CMD_MODE_MASK; +} + +static inline bool aspeed_smc_is_writable(const AspeedSMCFlash *fl) +{ + const AspeedSMCState *s = fl->controller; + + return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + fl->cs)); +} + +static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl) +{ + const AspeedSMCState *s = fl->controller; + int cmd = (s->regs[s->r_ctrl0 + fl->cs] >> CTRL_CMD_SHIFT) & CTRL_CMD_MASK; + + /* + * In read mode, the default SPI command is READ (0x3). In other + * modes, the command should necessarily be defined + * + * TODO: add support for READ4 (0x13) on AST2600 + */ + if (aspeed_smc_flash_mode(fl) == CTRL_READMODE) { + cmd = SPI_OP_READ; + } + + if (!cmd) { + aspeed_smc_error("no command defined for mode %d", + aspeed_smc_flash_mode(fl)); + } + + return cmd; +} + +static inline int aspeed_smc_flash_addr_width(const AspeedSMCFlash *fl) +{ + const AspeedSMCState *s = fl->controller; + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + + if (asc->addr_width) { + return asc->addr_width(s); + } else { + return s->regs[s->r_ce_ctrl] & (1 << (CTRL_EXTENDED0 + fl->cs)) ? 4 : 3; + } +} + +static void aspeed_smc_flash_do_select(AspeedSMCFlash *fl, bool unselect) +{ + AspeedSMCState *s = fl->controller; + + trace_aspeed_smc_flash_select(fl->cs, unselect ? "un" : ""); + + qemu_set_irq(s->cs_lines[fl->cs], unselect); +} + +static void aspeed_smc_flash_select(AspeedSMCFlash *fl) +{ + aspeed_smc_flash_do_select(fl, false); +} + +static void aspeed_smc_flash_unselect(AspeedSMCFlash *fl) +{ + aspeed_smc_flash_do_select(fl, true); +} + +static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl, + uint32_t addr) +{ + const AspeedSMCState *s = fl->controller; + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + AspeedSegments seg; + + asc->reg_to_segment(s, s->regs[R_SEG_ADDR0 + fl->cs], &seg); + if ((addr % seg.size) != addr) { + aspeed_smc_error("invalid address 0x%08x for CS%d segment : " + "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + addr, fl->cs, seg.addr, seg.addr + seg.size); + addr %= seg.size; + } + + return addr; +} + +static int aspeed_smc_flash_dummies(const AspeedSMCFlash *fl) +{ + const AspeedSMCState *s = fl->controller; + uint32_t r_ctrl0 = s->regs[s->r_ctrl0 + fl->cs]; + uint32_t dummy_high = (r_ctrl0 >> CTRL_DUMMY_HIGH_SHIFT) & 0x1; + uint32_t dummy_low = (r_ctrl0 >> CTRL_DUMMY_LOW_SHIFT) & 0x3; + uint32_t dummies = ((dummy_high << 2) | dummy_low) * 8; + + if (r_ctrl0 & CTRL_IO_DUAL_ADDR_DATA) { + dummies /= 2; + } + + return dummies; +} + +static void aspeed_smc_flash_setup(AspeedSMCFlash *fl, uint32_t addr) +{ + const AspeedSMCState *s = fl->controller; + uint8_t cmd = aspeed_smc_flash_cmd(fl); + int i = aspeed_smc_flash_addr_width(fl); + + /* Flash access can not exceed CS segment */ + addr = aspeed_smc_check_segment_addr(fl, addr); + + ssi_transfer(s->spi, cmd); + while (i--) { + if (aspeed_smc_addr_byte_enabled(s, i)) { + ssi_transfer(s->spi, (addr >> (i * 8)) & 0xff); + } + } + + /* + * Use fake transfers to model dummy bytes. The value should + * be configured to some non-zero value in fast read mode and + * zero in read mode. But, as the HW allows inconsistent + * settings, let's check for fast read mode. + */ + if (aspeed_smc_flash_mode(fl) == CTRL_FREADMODE) { + for (i = 0; i < aspeed_smc_flash_dummies(fl); i++) { + ssi_transfer(fl->controller->spi, s->regs[R_DUMMY_DATA] & 0xff); + } + } +} + +static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size) +{ + AspeedSMCFlash *fl = opaque; + AspeedSMCState *s = fl->controller; + uint64_t ret = 0; + int i; + + switch (aspeed_smc_flash_mode(fl)) { + case CTRL_USERMODE: + for (i = 0; i < size; i++) { + ret |= ssi_transfer(s->spi, 0x0) << (8 * i); + } + break; + case CTRL_READMODE: + case CTRL_FREADMODE: + aspeed_smc_flash_select(fl); + aspeed_smc_flash_setup(fl, addr); + + for (i = 0; i < size; i++) { + ret |= ssi_transfer(s->spi, 0x0) << (8 * i); + } + + aspeed_smc_flash_unselect(fl); + break; + default: + aspeed_smc_error("invalid flash mode %d", aspeed_smc_flash_mode(fl)); + } + + trace_aspeed_smc_flash_read(fl->cs, addr, size, ret, + aspeed_smc_flash_mode(fl)); + return ret; +} + +/* + * TODO (clg@kaod.org): stolen from xilinx_spips.c. Should move to a + * common include header. + */ +typedef enum { + READ = 0x3, READ_4 = 0x13, + FAST_READ = 0xb, FAST_READ_4 = 0x0c, + DOR = 0x3b, DOR_4 = 0x3c, + QOR = 0x6b, QOR_4 = 0x6c, + DIOR = 0xbb, DIOR_4 = 0xbc, + QIOR = 0xeb, QIOR_4 = 0xec, + + PP = 0x2, PP_4 = 0x12, + DPP = 0xa2, + QPP = 0x32, QPP_4 = 0x34, +} FlashCMD; + +static int aspeed_smc_num_dummies(uint8_t command) +{ + switch (command) { /* check for dummies */ + case READ: /* no dummy bytes/cycles */ + case PP: + case DPP: + case QPP: + case READ_4: + case PP_4: + case QPP_4: + return 0; + case FAST_READ: + case DOR: + case QOR: + case FAST_READ_4: + case DOR_4: + case QOR_4: + return 1; + case DIOR: + case DIOR_4: + return 2; + case QIOR: + case QIOR_4: + return 4; + default: + return -1; + } +} + +static bool aspeed_smc_do_snoop(AspeedSMCFlash *fl, uint64_t data, + unsigned size) +{ + AspeedSMCState *s = fl->controller; + uint8_t addr_width = aspeed_smc_flash_addr_width(fl); + + trace_aspeed_smc_do_snoop(fl->cs, s->snoop_index, s->snoop_dummies, + (uint8_t) data & 0xff); + + if (s->snoop_index == SNOOP_OFF) { + return false; /* Do nothing */ + + } else if (s->snoop_index == SNOOP_START) { + uint8_t cmd = data & 0xff; + int ndummies = aspeed_smc_num_dummies(cmd); + + /* + * No dummy cycles are expected with the current command. Turn + * off snooping and let the transfer proceed normally. + */ + if (ndummies <= 0) { + s->snoop_index = SNOOP_OFF; + return false; + } + + s->snoop_dummies = ndummies * 8; + + } else if (s->snoop_index >= addr_width + 1) { + + /* The SPI transfer has reached the dummy cycles sequence */ + for (; s->snoop_dummies; s->snoop_dummies--) { + ssi_transfer(s->spi, s->regs[R_DUMMY_DATA] & 0xff); + } + + /* If no more dummy cycles are expected, turn off snooping */ + if (!s->snoop_dummies) { + s->snoop_index = SNOOP_OFF; + } else { + s->snoop_index += size; + } + + /* + * Dummy cycles have been faked already. Ignore the current + * SPI transfer + */ + return true; + } + + s->snoop_index += size; + return false; +} + +static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + AspeedSMCFlash *fl = opaque; + AspeedSMCState *s = fl->controller; + int i; + + trace_aspeed_smc_flash_write(fl->cs, addr, size, data, + aspeed_smc_flash_mode(fl)); + + if (!aspeed_smc_is_writable(fl)) { + aspeed_smc_error("flash is not writable at 0x%" HWADDR_PRIx, addr); + return; + } + + switch (aspeed_smc_flash_mode(fl)) { + case CTRL_USERMODE: + if (aspeed_smc_do_snoop(fl, data, size)) { + break; + } + + for (i = 0; i < size; i++) { + ssi_transfer(s->spi, (data >> (8 * i)) & 0xff); + } + break; + case CTRL_WRITEMODE: + aspeed_smc_flash_select(fl); + aspeed_smc_flash_setup(fl, addr); + + for (i = 0; i < size; i++) { + ssi_transfer(s->spi, (data >> (8 * i)) & 0xff); + } + + aspeed_smc_flash_unselect(fl); + break; + default: + aspeed_smc_error("invalid flash mode %d", aspeed_smc_flash_mode(fl)); + } +} + +static const MemoryRegionOps aspeed_smc_flash_ops = { + .read = aspeed_smc_flash_read, + .write = aspeed_smc_flash_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void aspeed_smc_flash_update_ctrl(AspeedSMCFlash *fl, uint32_t value) +{ + AspeedSMCState *s = fl->controller; + bool unselect; + + /* User mode selects the CS, other modes unselect */ + unselect = (value & CTRL_CMD_MODE_MASK) != CTRL_USERMODE; + + /* A change of CTRL_CE_STOP_ACTIVE from 0 to 1, unselects the CS */ + if (!(s->regs[s->r_ctrl0 + fl->cs] & CTRL_CE_STOP_ACTIVE) && + value & CTRL_CE_STOP_ACTIVE) { + unselect = true; + } + + s->regs[s->r_ctrl0 + fl->cs] = value; + + s->snoop_index = unselect ? SNOOP_OFF : SNOOP_START; + + aspeed_smc_flash_do_select(fl, unselect); +} + +static void aspeed_smc_reset(DeviceState *d) +{ + AspeedSMCState *s = ASPEED_SMC(d); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + int i; + + if (asc->resets) { + memcpy(s->regs, asc->resets, sizeof s->regs); + } else { + memset(s->regs, 0, sizeof s->regs); + } + + /* Unselect all peripherals */ + for (i = 0; i < s->num_cs; ++i) { + s->regs[s->r_ctrl0 + i] |= CTRL_CE_STOP_ACTIVE; + qemu_set_irq(s->cs_lines[i], true); + } + + /* setup the default segment register values and regions for all */ + for (i = 0; i < asc->max_peripherals; ++i) { + aspeed_smc_flash_set_segment_region(s, i, + asc->segment_to_reg(s, &asc->segments[i])); + } + + s->snoop_index = SNOOP_OFF; + s->snoop_dummies = 0; +} + +static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size) +{ + AspeedSMCState *s = ASPEED_SMC(opaque); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(opaque); + + addr >>= 2; + + if (addr == s->r_conf || + (addr >= s->r_timings && + addr < s->r_timings + asc->nregs_timings) || + addr == s->r_ce_ctrl || + addr == R_CE_CMD_CTRL || + addr == R_INTR_CTRL || + addr == R_DUMMY_DATA || + (aspeed_smc_has_wdt_control(asc) && addr == R_FMC_WDT2_CTRL) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_CTRL) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_FLASH_ADDR) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_DRAM_ADDR) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_LEN) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_CHECKSUM) || + (addr >= R_SEG_ADDR0 && + addr < R_SEG_ADDR0 + asc->max_peripherals) || + (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + asc->max_peripherals)) { + + trace_aspeed_smc_read(addr << 2, size, s->regs[addr]); + + return s->regs[addr]; + } else { + qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n", + __func__, addr); + return -1; + } +} + +static uint8_t aspeed_smc_hclk_divisor(uint8_t hclk_mask) +{ + /* HCLK/1 .. HCLK/16 */ + const uint8_t hclk_divisors[] = { + 15, 7, 14, 6, 13, 5, 12, 4, 11, 3, 10, 2, 9, 1, 8, 0 + }; + int i; + + for (i = 0; i < ARRAY_SIZE(hclk_divisors); i++) { + if (hclk_mask == hclk_divisors[i]) { + return i + 1; + } + } + + aspeed_smc_error("invalid HCLK mask %x", hclk_mask); + return 0; +} + +/* + * When doing calibration, the SPI clock rate in the CE0 Control + * Register and the read delay cycles in the Read Timing Compensation + * Register are set using bit[11:4] of the DMA Control Register. + */ +static void aspeed_smc_dma_calibration(AspeedSMCState *s) +{ + uint8_t delay = + (s->regs[R_DMA_CTRL] >> DMA_CTRL_DELAY_SHIFT) & DMA_CTRL_DELAY_MASK; + uint8_t hclk_mask = + (s->regs[R_DMA_CTRL] >> DMA_CTRL_FREQ_SHIFT) & DMA_CTRL_FREQ_MASK; + uint8_t hclk_div = aspeed_smc_hclk_divisor(hclk_mask); + uint32_t hclk_shift = (hclk_div - 1) << 2; + uint8_t cs; + + /* + * The Read Timing Compensation Register values apply to all CS on + * the SPI bus and only HCLK/1 - HCLK/5 can have tunable delays + */ + if (hclk_div && hclk_div < 6) { + s->regs[s->r_timings] &= ~(0xf << hclk_shift); + s->regs[s->r_timings] |= delay << hclk_shift; + } + + /* + * TODO: compute the CS from the DMA address and the segment + * registers. This is not really a problem for now because the + * Timing Register values apply to all CS and software uses CS0 to + * do calibration. + */ + cs = 0; + s->regs[s->r_ctrl0 + cs] &= + ~(CE_CTRL_CLOCK_FREQ_MASK << CE_CTRL_CLOCK_FREQ_SHIFT); + s->regs[s->r_ctrl0 + cs] |= CE_CTRL_CLOCK_FREQ(hclk_div); +} + +/* + * Emulate read errors in the DMA Checksum Register for high + * frequencies and optimistic settings of the Read Timing Compensation + * Register. This will help in tuning the SPI timing calibration + * algorithm. + */ +static bool aspeed_smc_inject_read_failure(AspeedSMCState *s) +{ + uint8_t delay = + (s->regs[R_DMA_CTRL] >> DMA_CTRL_DELAY_SHIFT) & DMA_CTRL_DELAY_MASK; + uint8_t hclk_mask = + (s->regs[R_DMA_CTRL] >> DMA_CTRL_FREQ_SHIFT) & DMA_CTRL_FREQ_MASK; + + /* + * Typical values of a palmetto-bmc machine. + */ + switch (aspeed_smc_hclk_divisor(hclk_mask)) { + case 4 ... 16: + return false; + case 3: /* at least one HCLK cycle delay */ + return (delay & 0x7) < 1; + case 2: /* at least two HCLK cycle delay */ + return (delay & 0x7) < 2; + case 1: /* (> 100MHz) is above the max freq of the controller */ + return true; + default: + g_assert_not_reached(); + } +} + +/* + * Accumulate the result of the reads to provide a checksum that will + * be used to validate the read timing settings. + */ +static void aspeed_smc_dma_checksum(AspeedSMCState *s) +{ + MemTxResult result; + uint32_t data; + + if (s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE) { + aspeed_smc_error("invalid direction for DMA checksum"); + return; + } + + if (s->regs[R_DMA_CTRL] & DMA_CTRL_CALIB) { + aspeed_smc_dma_calibration(s); + } + + while (s->regs[R_DMA_LEN]) { + data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + aspeed_smc_error("Flash read failed @%08x", + s->regs[R_DMA_FLASH_ADDR]); + return; + } + trace_aspeed_smc_dma_checksum(s->regs[R_DMA_FLASH_ADDR], data); + + /* + * When the DMA is on-going, the DMA registers are updated + * with the current working addresses and length. + */ + s->regs[R_DMA_CHECKSUM] += data; + s->regs[R_DMA_FLASH_ADDR] += 4; + s->regs[R_DMA_LEN] -= 4; + } + + if (s->inject_failure && aspeed_smc_inject_read_failure(s)) { + s->regs[R_DMA_CHECKSUM] = 0xbadc0de; + } + +} + +static void aspeed_smc_dma_rw(AspeedSMCState *s) +{ + MemTxResult result; + uint32_t data; + + trace_aspeed_smc_dma_rw(s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE ? + "write" : "read", + s->regs[R_DMA_FLASH_ADDR], + s->regs[R_DMA_DRAM_ADDR], + s->regs[R_DMA_LEN]); + while (s->regs[R_DMA_LEN]) { + if (s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE) { + data = address_space_ldl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + aspeed_smc_error("DRAM read failed @%08x", + s->regs[R_DMA_DRAM_ADDR]); + return; + } + + address_space_stl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], + data, MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + aspeed_smc_error("Flash write failed @%08x", + s->regs[R_DMA_FLASH_ADDR]); + return; + } + } else { + data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + aspeed_smc_error("Flash read failed @%08x", + s->regs[R_DMA_FLASH_ADDR]); + return; + } + + address_space_stl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], + data, MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + aspeed_smc_error("DRAM write failed @%08x", + s->regs[R_DMA_DRAM_ADDR]); + return; + } + } + + /* + * When the DMA is on-going, the DMA registers are updated + * with the current working addresses and length. + */ + s->regs[R_DMA_FLASH_ADDR] += 4; + s->regs[R_DMA_DRAM_ADDR] += 4; + s->regs[R_DMA_LEN] -= 4; + s->regs[R_DMA_CHECKSUM] += data; + } +} + +static void aspeed_smc_dma_stop(AspeedSMCState *s) +{ + /* + * When the DMA is disabled, INTR_CTRL_DMA_STATUS=0 means the + * engine is idle + */ + s->regs[R_INTR_CTRL] &= ~INTR_CTRL_DMA_STATUS; + s->regs[R_DMA_CHECKSUM] = 0; + + /* + * Lower the DMA irq in any case. The IRQ control register could + * have been cleared before disabling the DMA. + */ + qemu_irq_lower(s->irq); +} + +/* + * When INTR_CTRL_DMA_STATUS=1, the DMA has completed and a new DMA + * can start even if the result of the previous was not collected. + */ +static bool aspeed_smc_dma_in_progress(AspeedSMCState *s) +{ + return s->regs[R_DMA_CTRL] & DMA_CTRL_ENABLE && + !(s->regs[R_INTR_CTRL] & INTR_CTRL_DMA_STATUS); +} + +static void aspeed_smc_dma_done(AspeedSMCState *s) +{ + s->regs[R_INTR_CTRL] |= INTR_CTRL_DMA_STATUS; + if (s->regs[R_INTR_CTRL] & INTR_CTRL_DMA_EN) { + qemu_irq_raise(s->irq); + } +} + +static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint32_t dma_ctrl) +{ + if (!(dma_ctrl & DMA_CTRL_ENABLE)) { + s->regs[R_DMA_CTRL] = dma_ctrl; + + aspeed_smc_dma_stop(s); + return; + } + + if (aspeed_smc_dma_in_progress(s)) { + aspeed_smc_error("DMA in progress !"); + return; + } + + s->regs[R_DMA_CTRL] = dma_ctrl; + + if (s->regs[R_DMA_CTRL] & DMA_CTRL_CKSUM) { + aspeed_smc_dma_checksum(s); + } else { + aspeed_smc_dma_rw(s); + } + + aspeed_smc_dma_done(s); +} + +static inline bool aspeed_smc_dma_granted(AspeedSMCState *s) +{ + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + + if (!(asc->features & ASPEED_SMC_FEATURE_DMA_GRANT)) { + return true; + } + + if (!(s->regs[R_DMA_CTRL] & DMA_CTRL_GRANT)) { + aspeed_smc_error("DMA not granted"); + return false; + } + + return true; +} + +static void aspeed_2600_smc_dma_ctrl(AspeedSMCState *s, uint32_t dma_ctrl) +{ + /* Preserve DMA bits */ + dma_ctrl |= s->regs[R_DMA_CTRL] & (DMA_CTRL_REQUEST | DMA_CTRL_GRANT); + + if (dma_ctrl == 0xAEED0000) { + /* automatically grant request */ + s->regs[R_DMA_CTRL] |= (DMA_CTRL_REQUEST | DMA_CTRL_GRANT); + return; + } + + /* clear request */ + if (dma_ctrl == 0xDEEA0000) { + s->regs[R_DMA_CTRL] &= ~(DMA_CTRL_REQUEST | DMA_CTRL_GRANT); + return; + } + + if (!aspeed_smc_dma_granted(s)) { + aspeed_smc_error("DMA not granted"); + return; + } + + aspeed_smc_dma_ctrl(s, dma_ctrl); + s->regs[R_DMA_CTRL] &= ~(DMA_CTRL_REQUEST | DMA_CTRL_GRANT); +} + +static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + AspeedSMCState *s = ASPEED_SMC(opaque); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + uint32_t value = data; + + trace_aspeed_smc_write(addr, size, data); + + addr >>= 2; + + if (addr == s->r_conf || + (addr >= s->r_timings && + addr < s->r_timings + asc->nregs_timings) || + addr == s->r_ce_ctrl) { + s->regs[addr] = value; + } else if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) { + int cs = addr - s->r_ctrl0; + aspeed_smc_flash_update_ctrl(&s->flashes[cs], value); + } else if (addr >= R_SEG_ADDR0 && + addr < R_SEG_ADDR0 + asc->max_peripherals) { + int cs = addr - R_SEG_ADDR0; + + if (value != s->regs[R_SEG_ADDR0 + cs]) { + aspeed_smc_flash_set_segment(s, cs, value); + } + } else if (addr == R_CE_CMD_CTRL) { + s->regs[addr] = value & 0xff; + } else if (addr == R_DUMMY_DATA) { + s->regs[addr] = value & 0xff; + } else if (aspeed_smc_has_wdt_control(asc) && addr == R_FMC_WDT2_CTRL) { + s->regs[addr] = value & FMC_WDT2_CTRL_EN; + } else if (addr == R_INTR_CTRL) { + s->regs[addr] = value; + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_CTRL) { + asc->dma_ctrl(s, value); + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_DRAM_ADDR && + aspeed_smc_dma_granted(s)) { + s->regs[addr] = DMA_DRAM_ADDR(asc, value); + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_FLASH_ADDR && + aspeed_smc_dma_granted(s)) { + s->regs[addr] = DMA_FLASH_ADDR(asc, value); + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_LEN && + aspeed_smc_dma_granted(s)) { + s->regs[addr] = DMA_LENGTH(value); + } else { + qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n", + __func__, addr); + return; + } +} + +static const MemoryRegionOps aspeed_smc_ops = { + .read = aspeed_smc_read, + .write = aspeed_smc_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void aspeed_smc_instance_init(Object *obj) +{ + AspeedSMCState *s = ASPEED_SMC(obj); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + int i; + + for (i = 0; i < asc->max_peripherals; i++) { + object_initialize_child(obj, "flash[*]", &s->flashes[i], + TYPE_ASPEED_SMC_FLASH); + } +} + +/* + * Initialize the custom address spaces for DMAs + */ +static void aspeed_smc_dma_setup(AspeedSMCState *s, Error **errp) +{ + if (!s->dram_mr) { + error_setg(errp, TYPE_ASPEED_SMC ": 'dram' link not set"); + return; + } + + address_space_init(&s->flash_as, &s->mmio_flash, + TYPE_ASPEED_SMC ".dma-flash"); + address_space_init(&s->dram_as, s->dram_mr, + TYPE_ASPEED_SMC ".dma-dram"); +} + +static void aspeed_smc_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedSMCState *s = ASPEED_SMC(dev); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + int i; + hwaddr offset = 0; + + /* keep a copy under AspeedSMCState to speed up accesses */ + s->r_conf = asc->r_conf; + s->r_ce_ctrl = asc->r_ce_ctrl; + s->r_ctrl0 = asc->r_ctrl0; + s->r_timings = asc->r_timings; + s->conf_enable_w0 = asc->conf_enable_w0; + + /* Enforce some real HW limits */ + if (s->num_cs > asc->max_peripherals) { + aspeed_smc_error("num_cs cannot exceed: %d", asc->max_peripherals); + s->num_cs = asc->max_peripherals; + } + + /* DMA irq. Keep it first for the initialization in the SoC */ + sysbus_init_irq(sbd, &s->irq); + + s->spi = ssi_create_bus(dev, "spi"); + + /* Setup cs_lines for peripherals */ + s->cs_lines = g_new0(qemu_irq, s->num_cs); + + for (i = 0; i < s->num_cs; ++i) { + sysbus_init_irq(sbd, &s->cs_lines[i]); + } + + /* The memory region for the controller registers */ + memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s, + TYPE_ASPEED_SMC, asc->nregs * 4); + sysbus_init_mmio(sbd, &s->mmio); + + /* + * The container memory region representing the address space + * window in which the flash modules are mapped. The size and + * address depends on the SoC model and controller type. + */ + memory_region_init(&s->mmio_flash_container, OBJECT(s), + TYPE_ASPEED_SMC ".container", + asc->flash_window_size); + sysbus_init_mmio(sbd, &s->mmio_flash_container); + + memory_region_init_io(&s->mmio_flash, OBJECT(s), + &aspeed_smc_flash_default_ops, s, + TYPE_ASPEED_SMC ".flash", + asc->flash_window_size); + memory_region_add_subregion(&s->mmio_flash_container, 0x0, + &s->mmio_flash); + + /* + * Let's create a sub memory region for each possible peripheral. All + * have a configurable memory segment in the overall flash mapping + * window of the controller but, there is not necessarily a flash + * module behind to handle the memory accesses. This depends on + * the board configuration. + */ + for (i = 0; i < asc->max_peripherals; ++i) { + AspeedSMCFlash *fl = &s->flashes[i]; + + if (!object_property_set_link(OBJECT(fl), "controller", OBJECT(s), + errp)) { + return; + } + if (!object_property_set_uint(OBJECT(fl), "cs", i, errp)) { + return; + } + if (!sysbus_realize(SYS_BUS_DEVICE(fl), errp)) { + return; + } + + memory_region_add_subregion(&s->mmio_flash, offset, &fl->mmio); + offset += asc->segments[i].size; + } + + /* DMA support */ + if (aspeed_smc_has_dma(asc)) { + aspeed_smc_dma_setup(s, errp); + } +} + +static const VMStateDescription vmstate_aspeed_smc = { + .name = "aspeed.smc", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedSMCState, ASPEED_SMC_R_MAX), + VMSTATE_UINT8(snoop_index, AspeedSMCState), + VMSTATE_UINT8(snoop_dummies, AspeedSMCState), + VMSTATE_END_OF_LIST() + } +}; + +static Property aspeed_smc_properties[] = { + DEFINE_PROP_UINT32("num-cs", AspeedSMCState, num_cs, 1), + DEFINE_PROP_BOOL("inject-failure", AspeedSMCState, inject_failure, false), + DEFINE_PROP_LINK("dram", AspeedSMCState, dram_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_smc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_smc_realize; + dc->reset = aspeed_smc_reset; + device_class_set_props(dc, aspeed_smc_properties); + dc->vmsd = &vmstate_aspeed_smc; +} + +static const TypeInfo aspeed_smc_info = { + .name = TYPE_ASPEED_SMC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_smc_instance_init, + .instance_size = sizeof(AspeedSMCState), + .class_size = sizeof(AspeedSMCClass), + .class_init = aspeed_smc_class_init, + .abstract = true, +}; + +static void aspeed_smc_flash_realize(DeviceState *dev, Error **errp) +{ + AspeedSMCFlash *s = ASPEED_SMC_FLASH(dev); + AspeedSMCClass *asc; + g_autofree char *name = g_strdup_printf(TYPE_ASPEED_SMC_FLASH ".%d", s->cs); + + if (!s->controller) { + error_setg(errp, TYPE_ASPEED_SMC_FLASH ": 'controller' link not set"); + return; + } + + asc = ASPEED_SMC_GET_CLASS(s->controller); + + /* + * Use the default segment value to size the memory region. This + * can be changed by FW at runtime. + */ + memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_flash_ops, + s, name, asc->segments[s->cs].size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); +} + +static Property aspeed_smc_flash_properties[] = { + DEFINE_PROP_UINT8("cs", AspeedSMCFlash, cs, 0), + DEFINE_PROP_LINK("controller", AspeedSMCFlash, controller, TYPE_ASPEED_SMC, + AspeedSMCState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_smc_flash_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Aspeed SMC Flash device region"; + dc->realize = aspeed_smc_flash_realize; + device_class_set_props(dc, aspeed_smc_flash_properties); +} + +static const TypeInfo aspeed_smc_flash_info = { + .name = TYPE_ASPEED_SMC_FLASH, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedSMCFlash), + .class_init = aspeed_smc_flash_class_init, +}; + +/* + * The Segment Registers of the AST2400 and AST2500 have a 8MB + * unit. The address range of a flash SPI peripheral is encoded with + * absolute addresses which should be part of the overall controller + * window. + */ +static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, + const AspeedSegments *seg) +{ + uint32_t reg = 0; + reg |= ((seg->addr >> 23) & SEG_START_MASK) << SEG_START_SHIFT; + reg |= (((seg->addr + seg->size) >> 23) & SEG_END_MASK) << SEG_END_SHIFT; + return reg; +} + +static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, + uint32_t reg, AspeedSegments *seg) +{ + seg->addr = ((reg >> SEG_START_SHIFT) & SEG_START_MASK) << 23; + seg->size = (((reg >> SEG_END_SHIFT) & SEG_END_MASK) << 23) - seg->addr; +} + +static const AspeedSegments aspeed_2400_smc_segments[] = { + { 0x10000000, 32 * MiB }, +}; + +static void aspeed_2400_smc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2400 SMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 1; + asc->segments = aspeed_2400_smc_segments; + asc->flash_window_base = 0x10000000; + asc->flash_window_size = 0x6000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_SMC_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2400_smc_info = { + .name = "aspeed.smc-ast2400", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2400_smc_class_init, +}; + +static const uint32_t aspeed_2400_fmc_resets[ASPEED_SMC_R_MAX] = { + /* + * CE0 and CE1 types are HW strapped in SCU70. Do it here to + * simplify the model. + */ + [R_CONF] = CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0, +}; + +static const AspeedSegments aspeed_2400_fmc_segments[] = { + { 0x20000000, 64 * MiB }, /* start address is readonly */ + { 0x24000000, 32 * MiB }, + { 0x26000000, 32 * MiB }, + { 0x28000000, 32 * MiB }, + { 0x2A000000, 32 * MiB } +}; + +static void aspeed_2400_fmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2400 FMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 5; + asc->segments = aspeed_2400_fmc_segments; + asc->resets = aspeed_2400_fmc_resets; + asc->flash_window_base = 0x20000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x1FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2400_fmc_info = { + .name = "aspeed.fmc-ast2400", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2400_fmc_class_init, +}; + +static const AspeedSegments aspeed_2400_spi1_segments[] = { + { 0x30000000, 64 * MiB }, +}; + +static int aspeed_2400_spi1_addr_width(const AspeedSMCState *s) +{ + return s->regs[R_SPI_CTRL0] & CTRL_AST2400_SPI_4BYTE ? 4 : 3; +} + +static void aspeed_2400_spi1_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2400 SPI1 Controller"; + asc->r_conf = R_SPI_CONF; + asc->r_ce_ctrl = 0xff; + asc->r_ctrl0 = R_SPI_CTRL0; + asc->r_timings = R_SPI_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = SPI_CONF_ENABLE_W0; + asc->max_peripherals = 1; + asc->segments = aspeed_2400_spi1_segments; + asc->flash_window_base = 0x30000000; + asc->flash_window_size = 0x10000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_SPI_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; + asc->addr_width = aspeed_2400_spi1_addr_width; +} + +static const TypeInfo aspeed_2400_spi1_info = { + .name = "aspeed.spi1-ast2400", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2400_spi1_class_init, +}; + +static const uint32_t aspeed_2500_fmc_resets[ASPEED_SMC_R_MAX] = { + [R_CONF] = (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0 | + CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1), +}; + +static const AspeedSegments aspeed_2500_fmc_segments[] = { + { 0x20000000, 128 * MiB }, /* start address is readonly */ + { 0x28000000, 32 * MiB }, + { 0x2A000000, 32 * MiB }, +}; + +static void aspeed_2500_fmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 FMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 3; + asc->segments = aspeed_2500_fmc_segments; + asc->resets = aspeed_2500_fmc_resets; + asc->flash_window_base = 0x20000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2500_fmc_info = { + .name = "aspeed.fmc-ast2500", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2500_fmc_class_init, +}; + +static const AspeedSegments aspeed_2500_spi1_segments[] = { + { 0x30000000, 32 * MiB }, /* start address is readonly */ + { 0x32000000, 96 * MiB }, /* end address is readonly */ +}; + +static void aspeed_2500_spi1_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI1 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 2; + asc->segments = aspeed_2500_spi1_segments; + asc->flash_window_base = 0x30000000; + asc->flash_window_size = 0x8000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2500_spi1_info = { + .name = "aspeed.spi1-ast2500", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2500_spi1_class_init, +}; + +static const AspeedSegments aspeed_2500_spi2_segments[] = { + { 0x38000000, 32 * MiB }, /* start address is readonly */ + { 0x3A000000, 96 * MiB }, /* end address is readonly */ +}; + +static void aspeed_2500_spi2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI2 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 2; + asc->segments = aspeed_2500_spi2_segments; + asc->flash_window_base = 0x38000000; + asc->flash_window_size = 0x8000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2500_spi2_info = { + .name = "aspeed.spi2-ast2500", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2500_spi2_class_init, +}; + +/* + * The Segment Registers of the AST2600 have a 1MB unit. The address + * range of a flash SPI peripheral is encoded with offsets in the overall + * controller window. The previous SoC AST2400 and AST2500 used + * absolute addresses. Only bits [27:20] are relevant and the end + * address is an upper bound limit. + */ +#define AST2600_SEG_ADDR_MASK 0x0ff00000 + +static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, + const AspeedSegments *seg) +{ + uint32_t reg = 0; + + /* Disabled segments have a nil register */ + if (!seg->size) { + return 0; + } + + reg |= (seg->addr & AST2600_SEG_ADDR_MASK) >> 16; /* start offset */ + reg |= (seg->addr + seg->size - 1) & AST2600_SEG_ADDR_MASK; /* end offset */ + return reg; +} + +static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, + uint32_t reg, AspeedSegments *seg) +{ + uint32_t start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; + uint32_t end_offset = reg & AST2600_SEG_ADDR_MASK; + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + + if (reg) { + seg->addr = asc->flash_window_base + start_offset; + seg->size = end_offset + MiB - start_offset; + } else { + seg->addr = asc->flash_window_base; + seg->size = 0; + } +} + +static const uint32_t aspeed_2600_fmc_resets[ASPEED_SMC_R_MAX] = { + [R_CONF] = (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0 | + CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1 | + CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE2), +}; + +static const AspeedSegments aspeed_2600_fmc_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 128 * MiB, 128 * MiB }, /* default is disabled but needed for -kernel */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_2600_fmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 FMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 3; + asc->segments = aspeed_2600_fmc_segments; + asc->resets = aspeed_2600_fmc_resets; + asc->flash_window_base = 0x20000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_WDT_CONTROL; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2600_fmc_info = { + .name = "aspeed.fmc-ast2600", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2600_fmc_class_init, +}; + +static const AspeedSegments aspeed_2600_spi1_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_2600_spi1_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI1 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 2; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 2; + asc->segments = aspeed_2600_spi1_segments; + asc->flash_window_base = 0x30000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_DMA_GRANT; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2600_spi1_info = { + .name = "aspeed.spi1-ast2600", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2600_spi1_class_init, +}; + +static const AspeedSegments aspeed_2600_spi2_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 0x0, 0 }, /* disabled */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_2600_spi2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI2 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 3; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 3; + asc->segments = aspeed_2600_spi2_segments; + asc->flash_window_base = 0x50000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_DMA_GRANT; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2600_spi2_info = { + .name = "aspeed.spi2-ast2600", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2600_spi2_class_init, +}; + +static void aspeed_smc_register_types(void) +{ + type_register_static(&aspeed_smc_flash_info); + type_register_static(&aspeed_smc_info); + type_register_static(&aspeed_2400_smc_info); + type_register_static(&aspeed_2400_fmc_info); + type_register_static(&aspeed_2400_spi1_info); + type_register_static(&aspeed_2500_fmc_info); + type_register_static(&aspeed_2500_spi1_info); + type_register_static(&aspeed_2500_spi2_info); + type_register_static(&aspeed_2600_fmc_info); + type_register_static(&aspeed_2600_spi1_info); + type_register_static(&aspeed_2600_spi2_info); +} + +type_init(aspeed_smc_register_types) diff --git a/hw/ssi/imx_spi.c b/hw/ssi/imx_spi.c new file mode 100644 index 000000000..189423bb3 --- /dev/null +++ b/hw/ssi/imx_spi.c @@ -0,0 +1,500 @@ +/* + * IMX SPI Controller + * + * Copyright (c) 2016 Jean-Christophe Dubois + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/ssi/imx_spi.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef DEBUG_IMX_SPI +#define DEBUG_IMX_SPI 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX_SPI) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_SPI, \ + __func__, ##args); \ + } \ + } while (0) + +static const char *imx_spi_reg_name(uint32_t reg) +{ + static char unknown[20]; + + switch (reg) { + case ECSPI_RXDATA: + return "ECSPI_RXDATA"; + case ECSPI_TXDATA: + return "ECSPI_TXDATA"; + case ECSPI_CONREG: + return "ECSPI_CONREG"; + case ECSPI_CONFIGREG: + return "ECSPI_CONFIGREG"; + case ECSPI_INTREG: + return "ECSPI_INTREG"; + case ECSPI_DMAREG: + return "ECSPI_DMAREG"; + case ECSPI_STATREG: + return "ECSPI_STATREG"; + case ECSPI_PERIODREG: + return "ECSPI_PERIODREG"; + case ECSPI_TESTREG: + return "ECSPI_TESTREG"; + case ECSPI_MSGDATA: + return "ECSPI_MSGDATA"; + default: + sprintf(unknown, "%u ?", reg); + return unknown; + } +} + +static const VMStateDescription vmstate_imx_spi = { + .name = TYPE_IMX_SPI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_FIFO32(tx_fifo, IMXSPIState), + VMSTATE_FIFO32(rx_fifo, IMXSPIState), + VMSTATE_INT16(burst_length, IMXSPIState), + VMSTATE_UINT32_ARRAY(regs, IMXSPIState, ECSPI_MAX), + VMSTATE_END_OF_LIST() + }, +}; + +static void imx_spi_txfifo_reset(IMXSPIState *s) +{ + fifo32_reset(&s->tx_fifo); + s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TE; + s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_TF; +} + +static void imx_spi_rxfifo_reset(IMXSPIState *s) +{ + fifo32_reset(&s->rx_fifo); + s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_RR; + s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_RF; + s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_RO; +} + +static void imx_spi_update_irq(IMXSPIState *s) +{ + int level; + + if (fifo32_is_empty(&s->rx_fifo)) { + s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_RR; + } else { + s->regs[ECSPI_STATREG] |= ECSPI_STATREG_RR; + } + + if (fifo32_is_full(&s->rx_fifo)) { + s->regs[ECSPI_STATREG] |= ECSPI_STATREG_RF; + } else { + s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_RF; + } + + if (fifo32_is_empty(&s->tx_fifo)) { + s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TE; + } else { + s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_TE; + } + + if (fifo32_is_full(&s->tx_fifo)) { + s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TF; + } else { + s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_TF; + } + + level = s->regs[ECSPI_STATREG] & s->regs[ECSPI_INTREG] ? 1 : 0; + + qemu_set_irq(s->irq, level); + + DPRINTF("IRQ level is %d\n", level); +} + +static uint8_t imx_spi_selected_channel(IMXSPIState *s) +{ + return EXTRACT(s->regs[ECSPI_CONREG], ECSPI_CONREG_CHANNEL_SELECT); +} + +static uint32_t imx_spi_burst_length(IMXSPIState *s) +{ + uint32_t burst; + + burst = EXTRACT(s->regs[ECSPI_CONREG], ECSPI_CONREG_BURST_LENGTH) + 1; + if (burst % 8) { + burst = ROUND_UP(burst, 8); + } + + return burst; +} + +static bool imx_spi_is_enabled(IMXSPIState *s) +{ + return s->regs[ECSPI_CONREG] & ECSPI_CONREG_EN; +} + +static bool imx_spi_channel_is_master(IMXSPIState *s) +{ + uint8_t mode = EXTRACT(s->regs[ECSPI_CONREG], ECSPI_CONREG_CHANNEL_MODE); + + return (mode & (1 << imx_spi_selected_channel(s))) ? true : false; +} + +static bool imx_spi_is_multiple_master_burst(IMXSPIState *s) +{ + uint8_t wave = EXTRACT(s->regs[ECSPI_CONFIGREG], ECSPI_CONFIGREG_SS_CTL); + + return imx_spi_channel_is_master(s) && + !(s->regs[ECSPI_CONREG] & ECSPI_CONREG_SMC) && + ((wave & (1 << imx_spi_selected_channel(s))) ? true : false); +} + +static void imx_spi_flush_txfifo(IMXSPIState *s) +{ + uint32_t tx; + uint32_t rx; + + DPRINTF("Begin: TX Fifo Size = %d, RX Fifo Size = %d\n", + fifo32_num_used(&s->tx_fifo), fifo32_num_used(&s->rx_fifo)); + + while (!fifo32_is_empty(&s->tx_fifo)) { + int tx_burst = 0; + + if (s->burst_length <= 0) { + s->burst_length = imx_spi_burst_length(s); + + DPRINTF("Burst length = %d\n", s->burst_length); + + if (imx_spi_is_multiple_master_burst(s)) { + s->regs[ECSPI_CONREG] |= ECSPI_CONREG_XCH; + } + } + + tx = fifo32_pop(&s->tx_fifo); + + DPRINTF("data tx:0x%08x\n", tx); + + tx_burst = (s->burst_length % 32) ? : 32; + + rx = 0; + + while (tx_burst > 0) { + uint8_t byte = tx >> (tx_burst - 8); + + DPRINTF("writing 0x%02x\n", (uint32_t)byte); + + /* We need to write one byte at a time */ + byte = ssi_transfer(s->bus, byte); + + DPRINTF("0x%02x read\n", (uint32_t)byte); + + rx = (rx << 8) | byte; + + /* Remove 8 bits from the actual burst */ + tx_burst -= 8; + s->burst_length -= 8; + } + + DPRINTF("data rx:0x%08x\n", rx); + + if (fifo32_is_full(&s->rx_fifo)) { + s->regs[ECSPI_STATREG] |= ECSPI_STATREG_RO; + } else { + fifo32_push(&s->rx_fifo, rx); + } + + if (s->burst_length <= 0) { + if (!imx_spi_is_multiple_master_burst(s)) { + s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TC; + break; + } + } + } + + if (fifo32_is_empty(&s->tx_fifo)) { + s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TC; + s->regs[ECSPI_CONREG] &= ~ECSPI_CONREG_XCH; + } + + /* TODO: We should also use TDR and RDR bits */ + + DPRINTF("End: TX Fifo Size = %d, RX Fifo Size = %d\n", + fifo32_num_used(&s->tx_fifo), fifo32_num_used(&s->rx_fifo)); +} + +static void imx_spi_common_reset(IMXSPIState *s) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(s->regs); i++) { + switch (i) { + case ECSPI_CONREG: + /* CONREG is not updated on soft reset */ + break; + case ECSPI_STATREG: + s->regs[i] = 0x00000003; + break; + default: + s->regs[i] = 0; + break; + } + } + + imx_spi_rxfifo_reset(s); + imx_spi_txfifo_reset(s); + + s->burst_length = 0; +} + +static void imx_spi_soft_reset(IMXSPIState *s) +{ + int i; + + imx_spi_common_reset(s); + + imx_spi_update_irq(s); + + for (i = 0; i < ECSPI_NUM_CS; i++) { + qemu_set_irq(s->cs_lines[i], 1); + } +} + +static void imx_spi_reset(DeviceState *dev) +{ + IMXSPIState *s = IMX_SPI(dev); + + imx_spi_common_reset(s); + s->regs[ECSPI_CONREG] = 0; +} + +static uint64_t imx_spi_read(void *opaque, hwaddr offset, unsigned size) +{ + uint32_t value = 0; + IMXSPIState *s = opaque; + uint32_t index = offset >> 2; + + if (index >= ECSPI_MAX) { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_SPI, __func__, offset); + return 0; + } + + value = s->regs[index]; + + if (imx_spi_is_enabled(s)) { + switch (index) { + case ECSPI_RXDATA: + if (fifo32_is_empty(&s->rx_fifo)) { + /* value is undefined */ + value = 0xdeadbeef; + } else { + /* read from the RX FIFO */ + value = fifo32_pop(&s->rx_fifo); + } + break; + case ECSPI_TXDATA: + qemu_log_mask(LOG_GUEST_ERROR, + "[%s]%s: Trying to read from TX FIFO\n", + TYPE_IMX_SPI, __func__); + + /* Reading from TXDATA gives 0 */ + break; + case ECSPI_MSGDATA: + qemu_log_mask(LOG_GUEST_ERROR, + "[%s]%s: Trying to read from MSG FIFO\n", + TYPE_IMX_SPI, __func__); + /* Reading from MSGDATA gives 0 */ + break; + default: + break; + } + + imx_spi_update_irq(s); + } + DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx_spi_reg_name(index), value); + + return (uint64_t)value; +} + +static void imx_spi_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + IMXSPIState *s = opaque; + uint32_t index = offset >> 2; + uint32_t change_mask; + uint32_t burst; + + if (index >= ECSPI_MAX) { + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_SPI, __func__, offset); + return; + } + + DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx_spi_reg_name(index), + (uint32_t)value); + + if (!imx_spi_is_enabled(s)) { + /* Block is disabled */ + if (index != ECSPI_CONREG) { + /* Ignore access */ + return; + } + } + + change_mask = s->regs[index] ^ value; + + switch (index) { + case ECSPI_RXDATA: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Trying to write to RX FIFO\n", + TYPE_IMX_SPI, __func__); + break; + case ECSPI_TXDATA: + if (fifo32_is_full(&s->tx_fifo)) { + /* Ignore writes if queue is full */ + break; + } + + fifo32_push(&s->tx_fifo, (uint32_t)value); + + if (imx_spi_channel_is_master(s) && + (s->regs[ECSPI_CONREG] & ECSPI_CONREG_SMC)) { + /* + * Start emitting if current channel is master and SMC bit is + * set. + */ + imx_spi_flush_txfifo(s); + } + + break; + case ECSPI_STATREG: + /* the RO and TC bits are write-one-to-clear */ + value &= ECSPI_STATREG_RO | ECSPI_STATREG_TC; + s->regs[ECSPI_STATREG] &= ~value; + + break; + case ECSPI_CONREG: + s->regs[ECSPI_CONREG] = value; + + burst = EXTRACT(s->regs[ECSPI_CONREG], ECSPI_CONREG_BURST_LENGTH) + 1; + if (burst % 8) { + qemu_log_mask(LOG_UNIMP, + "[%s]%s: burst length %d not supported: rounding up to next multiple of 8\n", + TYPE_IMX_SPI, __func__, burst); + } + + if (!imx_spi_is_enabled(s)) { + /* device is disabled, so this is a soft reset */ + imx_spi_soft_reset(s); + + return; + } + + if (imx_spi_channel_is_master(s)) { + int i; + + /* We are in master mode */ + + for (i = 0; i < ECSPI_NUM_CS; i++) { + qemu_set_irq(s->cs_lines[i], + i == imx_spi_selected_channel(s) ? 0 : 1); + } + + if ((value & change_mask & ECSPI_CONREG_SMC) && + !fifo32_is_empty(&s->tx_fifo)) { + /* SMC bit is set and TX FIFO has some slots filled in */ + imx_spi_flush_txfifo(s); + } else if ((value & change_mask & ECSPI_CONREG_XCH) && + !(value & ECSPI_CONREG_SMC)) { + /* This is a request to start emitting */ + imx_spi_flush_txfifo(s); + } + } + + break; + case ECSPI_MSGDATA: + /* it is not clear from the spec what MSGDATA is for */ + /* Anyway it is not used by Linux driver */ + /* So for now we just ignore it */ + qemu_log_mask(LOG_UNIMP, + "[%s]%s: Trying to write to MSGDATA, ignoring\n", + TYPE_IMX_SPI, __func__); + break; + default: + s->regs[index] = value; + + break; + } + + imx_spi_update_irq(s); +} + +static const struct MemoryRegionOps imx_spi_ops = { + .read = imx_spi_read, + .write = imx_spi_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx_spi_realize(DeviceState *dev, Error **errp) +{ + IMXSPIState *s = IMX_SPI(dev); + int i; + + s->bus = ssi_create_bus(dev, "spi"); + + memory_region_init_io(&s->iomem, OBJECT(dev), &imx_spi_ops, s, + TYPE_IMX_SPI, 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); + + for (i = 0; i < ECSPI_NUM_CS; ++i) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]); + } + + fifo32_create(&s->tx_fifo, ECSPI_FIFO_SIZE); + fifo32_create(&s->rx_fifo, ECSPI_FIFO_SIZE); +} + +static void imx_spi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = imx_spi_realize; + dc->vmsd = &vmstate_imx_spi; + dc->reset = imx_spi_reset; + dc->desc = "i.MX SPI Controller"; +} + +static const TypeInfo imx_spi_info = { + .name = TYPE_IMX_SPI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXSPIState), + .class_init = imx_spi_class_init, +}; + +static void imx_spi_register_types(void) +{ + type_register_static(&imx_spi_info); +} + +type_init(imx_spi_register_types) diff --git a/hw/ssi/meson.build b/hw/ssi/meson.build new file mode 100644 index 000000000..3d6bc82ab --- /dev/null +++ b/hw/ssi/meson.build @@ -0,0 +1,11 @@ +softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_smc.c')) +softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('mss-spi.c')) +softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_fiu.c')) +softmmu_ss.add(when: 'CONFIG_PL022', if_true: files('pl022.c')) +softmmu_ss.add(when: 'CONFIG_SIFIVE_SPI', if_true: files('sifive_spi.c')) +softmmu_ss.add(when: 'CONFIG_SSI', if_true: files('ssi.c')) +softmmu_ss.add(when: 'CONFIG_STM32F2XX_SPI', if_true: files('stm32f2xx_spi.c')) +softmmu_ss.add(when: 'CONFIG_XILINX_SPI', if_true: files('xilinx_spi.c')) +softmmu_ss.add(when: 'CONFIG_XILINX_SPIPS', if_true: files('xilinx_spips.c')) +softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c')) +softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c')) diff --git a/hw/ssi/mss-spi.c b/hw/ssi/mss-spi.c new file mode 100644 index 000000000..b2432c5a1 --- /dev/null +++ b/hw/ssi/mss-spi.c @@ -0,0 +1,422 @@ +/* + * Block model of SPI controller present in + * Microsemi's SmartFusion2 and SmartFusion SoCs. + * + * Copyright (C) 2017 Subbaraya Sundeep + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/ssi/mss-spi.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef MSS_SPI_ERR_DEBUG +#define MSS_SPI_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(lvl, fmt, args...) do { \ + if (MSS_SPI_ERR_DEBUG >= lvl) { \ + qemu_log("%s: " fmt "\n", __func__, ## args); \ + } \ +} while (0) + +#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args) + +#define FIFO_CAPACITY 32 + +#define R_SPI_CONTROL 0 +#define R_SPI_DFSIZE 1 +#define R_SPI_STATUS 2 +#define R_SPI_INTCLR 3 +#define R_SPI_RX 4 +#define R_SPI_TX 5 +#define R_SPI_CLKGEN 6 +#define R_SPI_SS 7 +#define R_SPI_MIS 8 +#define R_SPI_RIS 9 + +#define S_TXDONE (1 << 0) +#define S_RXRDY (1 << 1) +#define S_RXCHOVRF (1 << 2) +#define S_RXFIFOFUL (1 << 4) +#define S_RXFIFOFULNXT (1 << 5) +#define S_RXFIFOEMP (1 << 6) +#define S_RXFIFOEMPNXT (1 << 7) +#define S_TXFIFOFUL (1 << 8) +#define S_TXFIFOFULNXT (1 << 9) +#define S_TXFIFOEMP (1 << 10) +#define S_TXFIFOEMPNXT (1 << 11) +#define S_FRAMESTART (1 << 12) +#define S_SSEL (1 << 13) +#define S_ACTIVE (1 << 14) + +#define C_ENABLE (1 << 0) +#define C_MODE (1 << 1) +#define C_INTRXDATA (1 << 4) +#define C_INTTXDATA (1 << 5) +#define C_INTRXOVRFLO (1 << 6) +#define C_SPS (1 << 26) +#define C_BIGFIFO (1 << 29) +#define C_RESET (1 << 31) + +#define FRAMESZ_MASK 0x3F +#define FMCOUNT_MASK 0x00FFFF00 +#define FMCOUNT_SHIFT 8 +#define FRAMESZ_MAX 32 + +static void txfifo_reset(MSSSpiState *s) +{ + fifo32_reset(&s->tx_fifo); + + s->regs[R_SPI_STATUS] &= ~S_TXFIFOFUL; + s->regs[R_SPI_STATUS] |= S_TXFIFOEMP; +} + +static void rxfifo_reset(MSSSpiState *s) +{ + fifo32_reset(&s->rx_fifo); + + s->regs[R_SPI_STATUS] &= ~S_RXFIFOFUL; + s->regs[R_SPI_STATUS] |= S_RXFIFOEMP; +} + +static void set_fifodepth(MSSSpiState *s) +{ + unsigned int size = s->regs[R_SPI_DFSIZE] & FRAMESZ_MASK; + + if (size <= 8) { + s->fifo_depth = 32; + } else if (size <= 16) { + s->fifo_depth = 16; + } else { + s->fifo_depth = 8; + } +} + +static void update_mis(MSSSpiState *s) +{ + uint32_t reg = s->regs[R_SPI_CONTROL]; + uint32_t tmp; + + /* + * form the Control register interrupt enable bits + * same as RIS, MIS and Interrupt clear registers for simplicity + */ + tmp = ((reg & C_INTRXOVRFLO) >> 4) | ((reg & C_INTRXDATA) >> 3) | + ((reg & C_INTTXDATA) >> 5); + s->regs[R_SPI_MIS] |= tmp & s->regs[R_SPI_RIS]; +} + +static void spi_update_irq(MSSSpiState *s) +{ + int irq; + + update_mis(s); + irq = !!(s->regs[R_SPI_MIS]); + + qemu_set_irq(s->irq, irq); +} + +static void mss_spi_reset(DeviceState *d) +{ + MSSSpiState *s = MSS_SPI(d); + + memset(s->regs, 0, sizeof s->regs); + s->regs[R_SPI_CONTROL] = 0x80000102; + s->regs[R_SPI_DFSIZE] = 0x4; + s->regs[R_SPI_STATUS] = S_SSEL | S_TXFIFOEMP | S_RXFIFOEMP; + s->regs[R_SPI_CLKGEN] = 0x7; + s->regs[R_SPI_RIS] = 0x0; + + s->fifo_depth = 4; + s->frame_count = 1; + s->enabled = false; + + rxfifo_reset(s); + txfifo_reset(s); +} + +static uint64_t +spi_read(void *opaque, hwaddr addr, unsigned int size) +{ + MSSSpiState *s = opaque; + uint32_t ret = 0; + + addr >>= 2; + switch (addr) { + case R_SPI_RX: + s->regs[R_SPI_STATUS] &= ~S_RXFIFOFUL; + s->regs[R_SPI_STATUS] &= ~S_RXCHOVRF; + if (fifo32_is_empty(&s->rx_fifo)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reading empty RX_FIFO\n", + __func__); + } else { + ret = fifo32_pop(&s->rx_fifo); + } + if (fifo32_is_empty(&s->rx_fifo)) { + s->regs[R_SPI_STATUS] |= S_RXFIFOEMP; + } + break; + + case R_SPI_MIS: + update_mis(s); + ret = s->regs[R_SPI_MIS]; + break; + + default: + if (addr < ARRAY_SIZE(s->regs)) { + ret = s->regs[addr]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, + addr * 4); + return ret; + } + break; + } + + DB_PRINT("addr=0x%" HWADDR_PRIx " = 0x%" PRIx32, addr * 4, ret); + spi_update_irq(s); + return ret; +} + +static void assert_cs(MSSSpiState *s) +{ + qemu_set_irq(s->cs_line, 0); +} + +static void deassert_cs(MSSSpiState *s) +{ + qemu_set_irq(s->cs_line, 1); +} + +static void spi_flush_txfifo(MSSSpiState *s) +{ + uint32_t tx; + uint32_t rx; + bool sps = !!(s->regs[R_SPI_CONTROL] & C_SPS); + + /* + * Chip Select(CS) is automatically controlled by this controller. + * If SPS bit is set in Control register then CS is asserted + * until all the frames set in frame count of Control register are + * transferred. If SPS is not set then CS pulses between frames. + * Note that Slave Select register specifies which of the CS line + * has to be controlled automatically by controller. Bits SS[7:1] are for + * masters in FPGA fabric since we model only Microcontroller subsystem + * of Smartfusion2 we control only one CS(SS[0]) line. + */ + while (!fifo32_is_empty(&s->tx_fifo) && s->frame_count) { + assert_cs(s); + + s->regs[R_SPI_STATUS] &= ~(S_TXDONE | S_RXRDY); + + tx = fifo32_pop(&s->tx_fifo); + DB_PRINT("data tx:0x%" PRIx32, tx); + rx = ssi_transfer(s->spi, tx); + DB_PRINT("data rx:0x%" PRIx32, rx); + + if (fifo32_num_used(&s->rx_fifo) == s->fifo_depth) { + s->regs[R_SPI_STATUS] |= S_RXCHOVRF; + s->regs[R_SPI_RIS] |= S_RXCHOVRF; + } else { + fifo32_push(&s->rx_fifo, rx); + s->regs[R_SPI_STATUS] &= ~S_RXFIFOEMP; + if (fifo32_num_used(&s->rx_fifo) == (s->fifo_depth - 1)) { + s->regs[R_SPI_STATUS] |= S_RXFIFOFULNXT; + } else if (fifo32_num_used(&s->rx_fifo) == s->fifo_depth) { + s->regs[R_SPI_STATUS] |= S_RXFIFOFUL; + } + } + s->frame_count--; + if (!sps) { + deassert_cs(s); + } + } + + if (!s->frame_count) { + s->frame_count = (s->regs[R_SPI_CONTROL] & FMCOUNT_MASK) >> + FMCOUNT_SHIFT; + deassert_cs(s); + s->regs[R_SPI_RIS] |= S_TXDONE | S_RXRDY; + s->regs[R_SPI_STATUS] |= S_TXDONE | S_RXRDY; + } +} + +static void spi_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + MSSSpiState *s = opaque; + uint32_t value = val64; + + DB_PRINT("addr=0x%" HWADDR_PRIx " =0x%" PRIx32, addr, value); + addr >>= 2; + + switch (addr) { + case R_SPI_TX: + /* adding to already full FIFO */ + if (fifo32_num_used(&s->tx_fifo) == s->fifo_depth) { + break; + } + s->regs[R_SPI_STATUS] &= ~S_TXFIFOEMP; + fifo32_push(&s->tx_fifo, value); + if (fifo32_num_used(&s->tx_fifo) == (s->fifo_depth - 1)) { + s->regs[R_SPI_STATUS] |= S_TXFIFOFULNXT; + } else if (fifo32_num_used(&s->tx_fifo) == s->fifo_depth) { + s->regs[R_SPI_STATUS] |= S_TXFIFOFUL; + } + if (s->enabled) { + spi_flush_txfifo(s); + } + break; + + case R_SPI_CONTROL: + s->regs[R_SPI_CONTROL] = value; + if (value & C_BIGFIFO) { + set_fifodepth(s); + } else { + s->fifo_depth = 4; + } + s->enabled = value & C_ENABLE; + s->frame_count = (value & FMCOUNT_MASK) >> FMCOUNT_SHIFT; + if (value & C_RESET) { + mss_spi_reset(DEVICE(s)); + } + break; + + case R_SPI_DFSIZE: + if (s->enabled) { + break; + } + /* + * [31:6] bits are reserved bits and for future use. + * [5:0] are for frame size. Only [5:0] bits are validated + * during write, [31:6] bits are untouched. + */ + if ((value & FRAMESZ_MASK) > FRAMESZ_MAX) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Incorrect size %u provided." + "Maximum frame size is %u\n", + __func__, value & FRAMESZ_MASK, FRAMESZ_MAX); + break; + } + s->regs[R_SPI_DFSIZE] = value; + break; + + case R_SPI_INTCLR: + s->regs[R_SPI_INTCLR] = value; + if (value & S_TXDONE) { + s->regs[R_SPI_RIS] &= ~S_TXDONE; + } + if (value & S_RXRDY) { + s->regs[R_SPI_RIS] &= ~S_RXRDY; + } + if (value & S_RXCHOVRF) { + s->regs[R_SPI_RIS] &= ~S_RXCHOVRF; + } + break; + + case R_SPI_MIS: + case R_SPI_STATUS: + case R_SPI_RIS: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write to read only register 0x%" HWADDR_PRIx "\n", + __func__, addr * 4); + break; + + default: + if (addr < ARRAY_SIZE(s->regs)) { + s->regs[addr] = value; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, + addr * 4); + } + break; + } + + spi_update_irq(s); +} + +static const MemoryRegionOps spi_ops = { + .read = spi_read, + .write = spi_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4 + } +}; + +static void mss_spi_realize(DeviceState *dev, Error **errp) +{ + MSSSpiState *s = MSS_SPI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + s->spi = ssi_create_bus(dev, "spi"); + + sysbus_init_irq(sbd, &s->irq); + sysbus_init_irq(sbd, &s->cs_line); + + memory_region_init_io(&s->mmio, OBJECT(s), &spi_ops, s, + TYPE_MSS_SPI, R_SPI_MAX * 4); + sysbus_init_mmio(sbd, &s->mmio); + + fifo32_create(&s->tx_fifo, FIFO_CAPACITY); + fifo32_create(&s->rx_fifo, FIFO_CAPACITY); +} + +static const VMStateDescription vmstate_mss_spi = { + .name = TYPE_MSS_SPI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_FIFO32(tx_fifo, MSSSpiState), + VMSTATE_FIFO32(rx_fifo, MSSSpiState), + VMSTATE_UINT32_ARRAY(regs, MSSSpiState, R_SPI_MAX), + VMSTATE_END_OF_LIST() + } +}; + +static void mss_spi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = mss_spi_realize; + dc->reset = mss_spi_reset; + dc->vmsd = &vmstate_mss_spi; +} + +static const TypeInfo mss_spi_info = { + .name = TYPE_MSS_SPI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MSSSpiState), + .class_init = mss_spi_class_init, +}; + +static void mss_spi_register_types(void) +{ + type_register_static(&mss_spi_info); +} + +type_init(mss_spi_register_types) diff --git a/hw/ssi/npcm7xx_fiu.c b/hw/ssi/npcm7xx_fiu.c new file mode 100644 index 000000000..4eedb2927 --- /dev/null +++ b/hw/ssi/npcm7xx_fiu.c @@ -0,0 +1,572 @@ +/* + * Nuvoton NPCM7xx Flash Interface Unit (FIU) + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/ssi/npcm7xx_fiu.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/units.h" + +#include "trace.h" + +/* Up to 128 MiB of flash may be accessed directly as memory. */ +#define NPCM7XX_FIU_FLASH_WINDOW_SIZE (128 * MiB) + +/* Each module has 4 KiB of register space. Only a fraction of it is used. */ +#define NPCM7XX_FIU_CTRL_REGS_SIZE (4 * KiB) + +/* 32-bit FIU register indices. */ +enum NPCM7xxFIURegister { + NPCM7XX_FIU_DRD_CFG, + NPCM7XX_FIU_DWR_CFG, + NPCM7XX_FIU_UMA_CFG, + NPCM7XX_FIU_UMA_CTS, + NPCM7XX_FIU_UMA_CMD, + NPCM7XX_FIU_UMA_ADDR, + NPCM7XX_FIU_PRT_CFG, + NPCM7XX_FIU_UMA_DW0 = 0x0020 / sizeof(uint32_t), + NPCM7XX_FIU_UMA_DW1, + NPCM7XX_FIU_UMA_DW2, + NPCM7XX_FIU_UMA_DW3, + NPCM7XX_FIU_UMA_DR0, + NPCM7XX_FIU_UMA_DR1, + NPCM7XX_FIU_UMA_DR2, + NPCM7XX_FIU_UMA_DR3, + NPCM7XX_FIU_PRT_CMD0, + NPCM7XX_FIU_PRT_CMD1, + NPCM7XX_FIU_PRT_CMD2, + NPCM7XX_FIU_PRT_CMD3, + NPCM7XX_FIU_PRT_CMD4, + NPCM7XX_FIU_PRT_CMD5, + NPCM7XX_FIU_PRT_CMD6, + NPCM7XX_FIU_PRT_CMD7, + NPCM7XX_FIU_PRT_CMD8, + NPCM7XX_FIU_PRT_CMD9, + NPCM7XX_FIU_CFG = 0x78 / sizeof(uint32_t), + NPCM7XX_FIU_REGS_END, +}; + +/* FIU_{DRD,DWR,UMA,PTR}_CFG cannot be written when this bit is set. */ +#define NPCM7XX_FIU_CFG_LCK BIT(31) + +/* Direct Read configuration register fields. */ +#define FIU_DRD_CFG_ADDSIZ(rv) extract32(rv, 16, 2) +#define FIU_ADDSIZ_3BYTES 0 +#define FIU_ADDSIZ_4BYTES 1 +#define FIU_DRD_CFG_DBW(rv) extract32(rv, 12, 2) +#define FIU_DRD_CFG_ACCTYPE(rv) extract32(rv, 8, 2) +#define FIU_DRD_CFG_RDCMD(rv) extract32(rv, 0, 8) + +/* Direct Write configuration register fields. */ +#define FIU_DWR_CFG_ADDSIZ(rv) extract32(rv, 16, 2) +#define FIU_DWR_CFG_WRCMD(rv) extract32(rv, 0, 8) + +/* User-Mode Access register fields. */ + +/* Command Mode Lock and the bits protected by it. */ +#define FIU_UMA_CFG_CMMLCK BIT(30) +#define FIU_UMA_CFG_CMMLCK_MASK 0x00000403 + +#define FIU_UMA_CFG_RDATSIZ(rv) extract32(rv, 24, 5) +#define FIU_UMA_CFG_DBSIZ(rv) extract32(rv, 21, 3) +#define FIU_UMA_CFG_WDATSIZ(rv) extract32(rv, 16, 5) +#define FIU_UMA_CFG_ADDSIZ(rv) extract32(rv, 11, 3) +#define FIU_UMA_CFG_CMDSIZ(rv) extract32(rv, 10, 1) +#define FIU_UMA_CFG_DBPCK(rv) extract32(rv, 6, 2) + +#define FIU_UMA_CTS_RDYIE BIT(25) +#define FIU_UMA_CTS_RDYST BIT(24) +#define FIU_UMA_CTS_SW_CS BIT(16) +#define FIU_UMA_CTS_DEV_NUM(rv) extract32(rv, 8, 2) +#define FIU_UMA_CTS_EXEC_DONE BIT(0) + +/* + * Returns the index of flash in the fiu->flash array. This corresponds to the + * chip select ID of the flash. + */ +static unsigned npcm7xx_fiu_cs_index(NPCM7xxFIUState *fiu, + NPCM7xxFIUFlash *flash) +{ + int index = flash - fiu->flash; + + g_assert(index >= 0 && index < fiu->cs_count); + + return index; +} + +/* Assert the chip select specified in the UMA Control/Status Register. */ +static void npcm7xx_fiu_select(NPCM7xxFIUState *s, unsigned cs_id) +{ + trace_npcm7xx_fiu_select(DEVICE(s)->canonical_path, cs_id); + + if (cs_id < s->cs_count) { + qemu_irq_lower(s->cs_lines[cs_id]); + s->active_cs = cs_id; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: UMA to CS%d; this module has only %d chip selects", + DEVICE(s)->canonical_path, cs_id, s->cs_count); + s->active_cs = -1; + } +} + +/* Deassert the currently active chip select. */ +static void npcm7xx_fiu_deselect(NPCM7xxFIUState *s) +{ + if (s->active_cs < 0) { + return; + } + + trace_npcm7xx_fiu_deselect(DEVICE(s)->canonical_path, s->active_cs); + + qemu_irq_raise(s->cs_lines[s->active_cs]); + s->active_cs = -1; +} + +/* Direct flash memory read handler. */ +static uint64_t npcm7xx_fiu_flash_read(void *opaque, hwaddr addr, + unsigned int size) +{ + NPCM7xxFIUFlash *f = opaque; + NPCM7xxFIUState *fiu = f->fiu; + uint64_t value = 0; + uint32_t drd_cfg; + int dummy_cycles; + int i; + + if (fiu->active_cs != -1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: direct flash read with CS%d already active", + DEVICE(fiu)->canonical_path, fiu->active_cs); + } + + npcm7xx_fiu_select(fiu, npcm7xx_fiu_cs_index(fiu, f)); + + drd_cfg = fiu->regs[NPCM7XX_FIU_DRD_CFG]; + ssi_transfer(fiu->spi, FIU_DRD_CFG_RDCMD(drd_cfg)); + + switch (FIU_DRD_CFG_ADDSIZ(drd_cfg)) { + case FIU_ADDSIZ_4BYTES: + ssi_transfer(fiu->spi, extract32(addr, 24, 8)); + /* fall through */ + case FIU_ADDSIZ_3BYTES: + ssi_transfer(fiu->spi, extract32(addr, 16, 8)); + ssi_transfer(fiu->spi, extract32(addr, 8, 8)); + ssi_transfer(fiu->spi, extract32(addr, 0, 8)); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad address size %d\n", + DEVICE(fiu)->canonical_path, FIU_DRD_CFG_ADDSIZ(drd_cfg)); + break; + } + + /* Flash chip model expects one transfer per dummy bit, not byte */ + dummy_cycles = + (FIU_DRD_CFG_DBW(drd_cfg) * 8) >> FIU_DRD_CFG_ACCTYPE(drd_cfg); + for (i = 0; i < dummy_cycles; i++) { + ssi_transfer(fiu->spi, 0); + } + + for (i = 0; i < size; i++) { + value = deposit64(value, 8 * i, 8, ssi_transfer(fiu->spi, 0)); + } + + trace_npcm7xx_fiu_flash_read(DEVICE(fiu)->canonical_path, fiu->active_cs, + addr, size, value); + + npcm7xx_fiu_deselect(fiu); + + return value; +} + +/* Direct flash memory write handler. */ +static void npcm7xx_fiu_flash_write(void *opaque, hwaddr addr, uint64_t v, + unsigned int size) +{ + NPCM7xxFIUFlash *f = opaque; + NPCM7xxFIUState *fiu = f->fiu; + uint32_t dwr_cfg; + unsigned cs_id; + int i; + + if (fiu->active_cs != -1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: direct flash write with CS%d already active", + DEVICE(fiu)->canonical_path, fiu->active_cs); + } + + cs_id = npcm7xx_fiu_cs_index(fiu, f); + trace_npcm7xx_fiu_flash_write(DEVICE(fiu)->canonical_path, cs_id, addr, + size, v); + npcm7xx_fiu_select(fiu, cs_id); + + dwr_cfg = fiu->regs[NPCM7XX_FIU_DWR_CFG]; + ssi_transfer(fiu->spi, FIU_DWR_CFG_WRCMD(dwr_cfg)); + + switch (FIU_DWR_CFG_ADDSIZ(dwr_cfg)) { + case FIU_ADDSIZ_4BYTES: + ssi_transfer(fiu->spi, extract32(addr, 24, 8)); + /* fall through */ + case FIU_ADDSIZ_3BYTES: + ssi_transfer(fiu->spi, extract32(addr, 16, 8)); + ssi_transfer(fiu->spi, extract32(addr, 8, 8)); + ssi_transfer(fiu->spi, extract32(addr, 0, 8)); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad address size %d\n", + DEVICE(fiu)->canonical_path, FIU_DWR_CFG_ADDSIZ(dwr_cfg)); + break; + } + + for (i = 0; i < size; i++) { + ssi_transfer(fiu->spi, extract64(v, i * 8, 8)); + } + + npcm7xx_fiu_deselect(fiu); +} + +static const MemoryRegionOps npcm7xx_fiu_flash_ops = { + .read = npcm7xx_fiu_flash_read, + .write = npcm7xx_fiu_flash_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = true, + }, +}; + +/* Control register read handler. */ +static uint64_t npcm7xx_fiu_ctrl_read(void *opaque, hwaddr addr, + unsigned int size) +{ + hwaddr reg = addr / sizeof(uint32_t); + NPCM7xxFIUState *s = opaque; + uint32_t value; + + if (reg < NPCM7XX_FIU_NR_REGS) { + value = s->regs[reg]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from invalid offset 0x%" PRIx64 "\n", + DEVICE(s)->canonical_path, addr); + value = 0; + } + + trace_npcm7xx_fiu_ctrl_read(DEVICE(s)->canonical_path, addr, value); + + return value; +} + +/* Send the specified number of address bytes from the UMA address register. */ +static void send_address(SSIBus *spi, unsigned int addsiz, uint32_t addr) +{ + switch (addsiz) { + case 4: + ssi_transfer(spi, extract32(addr, 24, 8)); + /* fall through */ + case 3: + ssi_transfer(spi, extract32(addr, 16, 8)); + /* fall through */ + case 2: + ssi_transfer(spi, extract32(addr, 8, 8)); + /* fall through */ + case 1: + ssi_transfer(spi, extract32(addr, 0, 8)); + /* fall through */ + case 0: + break; + } +} + +/* Send the number of dummy bits specified in the UMA config register. */ +static void send_dummy_bits(SSIBus *spi, uint32_t uma_cfg, uint32_t uma_cmd) +{ + unsigned int bits_per_clock = 1U << FIU_UMA_CFG_DBPCK(uma_cfg); + unsigned int i; + + for (i = 0; i < FIU_UMA_CFG_DBSIZ(uma_cfg); i++) { + /* Use bytes 0 and 1 first, then keep repeating byte 2 */ + unsigned int field = (i < 2) ? ((i + 1) * 8) : 24; + unsigned int j; + + for (j = 0; j < 8; j += bits_per_clock) { + ssi_transfer(spi, extract32(uma_cmd, field + j, bits_per_clock)); + } + } +} + +/* Perform a User-Mode Access transaction. */ +static void npcm7xx_fiu_uma_transaction(NPCM7xxFIUState *s) +{ + uint32_t uma_cts = s->regs[NPCM7XX_FIU_UMA_CTS]; + uint32_t uma_cfg; + unsigned int i; + + /* SW_CS means the CS is already forced low, so don't touch it. */ + if (uma_cts & FIU_UMA_CTS_SW_CS) { + int cs_id = FIU_UMA_CTS_DEV_NUM(s->regs[NPCM7XX_FIU_UMA_CTS]); + npcm7xx_fiu_select(s, cs_id); + } + + /* Send command, if present. */ + uma_cfg = s->regs[NPCM7XX_FIU_UMA_CFG]; + if (FIU_UMA_CFG_CMDSIZ(uma_cfg) > 0) { + ssi_transfer(s->spi, extract32(s->regs[NPCM7XX_FIU_UMA_CMD], 0, 8)); + } + + /* Send address, if present. */ + send_address(s->spi, FIU_UMA_CFG_ADDSIZ(uma_cfg), + s->regs[NPCM7XX_FIU_UMA_ADDR]); + + /* Write data, if present. */ + for (i = 0; i < FIU_UMA_CFG_WDATSIZ(uma_cfg); i++) { + unsigned int reg = + (i < 16) ? (NPCM7XX_FIU_UMA_DW0 + i / 4) : NPCM7XX_FIU_UMA_DW3; + unsigned int field = (i % 4) * 8; + + ssi_transfer(s->spi, extract32(s->regs[reg], field, 8)); + } + + /* Send dummy bits, if present. */ + send_dummy_bits(s->spi, uma_cfg, s->regs[NPCM7XX_FIU_UMA_CMD]); + + /* Read data, if present. */ + for (i = 0; i < FIU_UMA_CFG_RDATSIZ(uma_cfg); i++) { + unsigned int reg = NPCM7XX_FIU_UMA_DR0 + i / 4; + unsigned int field = (i % 4) * 8; + uint8_t c; + + c = ssi_transfer(s->spi, 0); + if (reg <= NPCM7XX_FIU_UMA_DR3) { + s->regs[reg] = deposit32(s->regs[reg], field, 8, c); + } + } + + /* Again, don't touch CS if the user is forcing it low. */ + if (uma_cts & FIU_UMA_CTS_SW_CS) { + npcm7xx_fiu_deselect(s); + } + + /* RDYST means a command has completed since it was cleared. */ + s->regs[NPCM7XX_FIU_UMA_CTS] |= FIU_UMA_CTS_RDYST; + /* EXEC_DONE means Execute Command / Not Done, so clear it here. */ + s->regs[NPCM7XX_FIU_UMA_CTS] &= ~FIU_UMA_CTS_EXEC_DONE; +} + +/* Control register write handler. */ +static void npcm7xx_fiu_ctrl_write(void *opaque, hwaddr addr, uint64_t v, + unsigned int size) +{ + hwaddr reg = addr / sizeof(uint32_t); + NPCM7xxFIUState *s = opaque; + uint32_t value = v; + + trace_npcm7xx_fiu_ctrl_write(DEVICE(s)->canonical_path, addr, value); + + switch (reg) { + case NPCM7XX_FIU_UMA_CFG: + if (s->regs[reg] & FIU_UMA_CFG_CMMLCK) { + value &= ~FIU_UMA_CFG_CMMLCK_MASK; + value |= (s->regs[reg] & FIU_UMA_CFG_CMMLCK_MASK); + } + /* fall through */ + case NPCM7XX_FIU_DRD_CFG: + case NPCM7XX_FIU_DWR_CFG: + if (s->regs[reg] & NPCM7XX_FIU_CFG_LCK) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to locked register @ 0x%" PRIx64 "\n", + DEVICE(s)->canonical_path, addr); + return; + } + s->regs[reg] = value; + break; + + case NPCM7XX_FIU_UMA_CTS: + if (value & FIU_UMA_CTS_RDYST) { + value &= ~FIU_UMA_CTS_RDYST; + } else { + value |= s->regs[reg] & FIU_UMA_CTS_RDYST; + } + if ((s->regs[reg] ^ value) & FIU_UMA_CTS_SW_CS) { + if (value & FIU_UMA_CTS_SW_CS) { + /* + * Don't drop CS if there's a transfer in progress, or we're + * about to start one. + */ + if (!((value | s->regs[reg]) & FIU_UMA_CTS_EXEC_DONE)) { + npcm7xx_fiu_deselect(s); + } + } else { + int cs_id = FIU_UMA_CTS_DEV_NUM(s->regs[NPCM7XX_FIU_UMA_CTS]); + npcm7xx_fiu_select(s, cs_id); + } + } + s->regs[reg] = value | (s->regs[reg] & FIU_UMA_CTS_EXEC_DONE); + if (value & FIU_UMA_CTS_EXEC_DONE) { + npcm7xx_fiu_uma_transaction(s); + } + break; + + case NPCM7XX_FIU_UMA_DR0 ... NPCM7XX_FIU_UMA_DR3: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to read-only register @ 0x%" PRIx64 "\n", + DEVICE(s)->canonical_path, addr); + return; + + case NPCM7XX_FIU_PRT_CFG: + case NPCM7XX_FIU_PRT_CMD0 ... NPCM7XX_FIU_PRT_CMD9: + qemu_log_mask(LOG_UNIMP, "%s: PRT is not implemented\n", __func__); + break; + + case NPCM7XX_FIU_UMA_CMD: + case NPCM7XX_FIU_UMA_ADDR: + case NPCM7XX_FIU_UMA_DW0 ... NPCM7XX_FIU_UMA_DW3: + case NPCM7XX_FIU_CFG: + s->regs[reg] = value; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to invalid offset 0x%" PRIx64 "\n", + DEVICE(s)->canonical_path, addr); + return; + } +} + +static const MemoryRegionOps npcm7xx_fiu_ctrl_ops = { + .read = npcm7xx_fiu_ctrl_read, + .write = npcm7xx_fiu_ctrl_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void npcm7xx_fiu_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxFIUState *s = NPCM7XX_FIU(obj); + + trace_npcm7xx_fiu_enter_reset(DEVICE(obj)->canonical_path, type); + + memset(s->regs, 0, sizeof(s->regs)); + + s->regs[NPCM7XX_FIU_DRD_CFG] = 0x0300100b; + s->regs[NPCM7XX_FIU_DWR_CFG] = 0x03000002; + s->regs[NPCM7XX_FIU_UMA_CFG] = 0x00000400; + s->regs[NPCM7XX_FIU_UMA_CTS] = 0x00010000; + s->regs[NPCM7XX_FIU_UMA_CMD] = 0x0000000b; + s->regs[NPCM7XX_FIU_PRT_CFG] = 0x00000400; + s->regs[NPCM7XX_FIU_CFG] = 0x0000000b; +} + +static void npcm7xx_fiu_hold_reset(Object *obj) +{ + NPCM7xxFIUState *s = NPCM7XX_FIU(obj); + int i; + + trace_npcm7xx_fiu_hold_reset(DEVICE(obj)->canonical_path); + + for (i = 0; i < s->cs_count; i++) { + qemu_irq_raise(s->cs_lines[i]); + } +} + +static void npcm7xx_fiu_realize(DeviceState *dev, Error **errp) +{ + NPCM7xxFIUState *s = NPCM7XX_FIU(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i; + + if (s->cs_count <= 0) { + error_setg(errp, "%s: %d chip selects specified, need at least one", + dev->canonical_path, s->cs_count); + return; + } + + s->spi = ssi_create_bus(dev, "spi"); + s->cs_lines = g_new0(qemu_irq, s->cs_count); + qdev_init_gpio_out_named(DEVICE(s), s->cs_lines, "cs", s->cs_count); + s->flash = g_new0(NPCM7xxFIUFlash, s->cs_count); + + /* + * Register the control registers region first. It may be followed by one + * or more direct flash access regions. + */ + memory_region_init_io(&s->mmio, OBJECT(s), &npcm7xx_fiu_ctrl_ops, s, "ctrl", + NPCM7XX_FIU_CTRL_REGS_SIZE); + sysbus_init_mmio(sbd, &s->mmio); + + for (i = 0; i < s->cs_count; i++) { + NPCM7xxFIUFlash *flash = &s->flash[i]; + flash->fiu = s; + memory_region_init_io(&flash->direct_access, OBJECT(s), + &npcm7xx_fiu_flash_ops, &s->flash[i], "flash", + NPCM7XX_FIU_FLASH_WINDOW_SIZE); + sysbus_init_mmio(sbd, &flash->direct_access); + } +} + +static const VMStateDescription vmstate_npcm7xx_fiu = { + .name = "npcm7xx-fiu", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_INT32(active_cs, NPCM7xxFIUState), + VMSTATE_UINT32_ARRAY(regs, NPCM7xxFIUState, NPCM7XX_FIU_NR_REGS), + VMSTATE_END_OF_LIST(), + }, +}; + +static Property npcm7xx_fiu_properties[] = { + DEFINE_PROP_INT32("cs-count", NPCM7xxFIUState, cs_count, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void npcm7xx_fiu_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + QEMU_BUILD_BUG_ON(NPCM7XX_FIU_REGS_END > NPCM7XX_FIU_NR_REGS); + + dc->desc = "NPCM7xx Flash Interface Unit"; + dc->realize = npcm7xx_fiu_realize; + dc->vmsd = &vmstate_npcm7xx_fiu; + rc->phases.enter = npcm7xx_fiu_enter_reset; + rc->phases.hold = npcm7xx_fiu_hold_reset; + device_class_set_props(dc, npcm7xx_fiu_properties); +} + +static const TypeInfo npcm7xx_fiu_types[] = { + { + .name = TYPE_NPCM7XX_FIU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxFIUState), + .class_init = npcm7xx_fiu_class_init, + }, +}; +DEFINE_TYPES(npcm7xx_fiu_types); diff --git a/hw/ssi/omap_spi.c b/hw/ssi/omap_spi.c new file mode 100644 index 000000000..7c7e68970 --- /dev/null +++ b/hw/ssi/omap_spi.c @@ -0,0 +1,381 @@ +/* + * TI OMAP processor's Multichannel SPI emulation. + * + * Copyright (C) 2007-2009 Nokia Corporation + * + * Original code for OMAP2 by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) any later version of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/arm/omap.h" + +/* Multichannel SPI */ +struct omap_mcspi_s { + MemoryRegion iomem; + qemu_irq irq; + int chnum; + + uint32_t sysconfig; + uint32_t systest; + uint32_t irqst; + uint32_t irqen; + uint32_t wken; + uint32_t control; + + struct omap_mcspi_ch_s { + qemu_irq txdrq; + qemu_irq rxdrq; + uint32_t (*txrx)(void *opaque, uint32_t, int); + void *opaque; + + uint32_t tx; + uint32_t rx; + + uint32_t config; + uint32_t status; + uint32_t control; + } ch[4]; +}; + +static inline void omap_mcspi_interrupt_update(struct omap_mcspi_s *s) +{ + qemu_set_irq(s->irq, s->irqst & s->irqen); +} + +static inline void omap_mcspi_dmarequest_update(struct omap_mcspi_ch_s *ch) +{ + qemu_set_irq(ch->txdrq, + (ch->control & 1) && /* EN */ + (ch->config & (1 << 14)) && /* DMAW */ + (ch->status & (1 << 1)) && /* TXS */ + ((ch->config >> 12) & 3) != 1); /* TRM */ + qemu_set_irq(ch->rxdrq, + (ch->control & 1) && /* EN */ + (ch->config & (1 << 15)) && /* DMAW */ + (ch->status & (1 << 0)) && /* RXS */ + ((ch->config >> 12) & 3) != 2); /* TRM */ +} + +static void omap_mcspi_transfer_run(struct omap_mcspi_s *s, int chnum) +{ + struct omap_mcspi_ch_s *ch = s->ch + chnum; + + if (!(ch->control & 1)) /* EN */ + return; + if ((ch->status & (1 << 0)) && /* RXS */ + ((ch->config >> 12) & 3) != 2 && /* TRM */ + !(ch->config & (1 << 19))) /* TURBO */ + goto intr_update; + if ((ch->status & (1 << 1)) && /* TXS */ + ((ch->config >> 12) & 3) != 1) /* TRM */ + goto intr_update; + + if (!(s->control & 1) || /* SINGLE */ + (ch->config & (1 << 20))) { /* FORCE */ + if (ch->txrx) + ch->rx = ch->txrx(ch->opaque, ch->tx, /* WL */ + 1 + (0x1f & (ch->config >> 7))); + } + + ch->tx = 0; + ch->status |= 1 << 2; /* EOT */ + ch->status |= 1 << 1; /* TXS */ + if (((ch->config >> 12) & 3) != 2) /* TRM */ + ch->status |= 1 << 0; /* RXS */ + +intr_update: + if ((ch->status & (1 << 0)) && /* RXS */ + ((ch->config >> 12) & 3) != 2 && /* TRM */ + !(ch->config & (1 << 19))) /* TURBO */ + s->irqst |= 1 << (2 + 4 * chnum); /* RX_FULL */ + if ((ch->status & (1 << 1)) && /* TXS */ + ((ch->config >> 12) & 3) != 1) /* TRM */ + s->irqst |= 1 << (0 + 4 * chnum); /* TX_EMPTY */ + omap_mcspi_interrupt_update(s); + omap_mcspi_dmarequest_update(ch); +} + +void omap_mcspi_reset(struct omap_mcspi_s *s) +{ + int ch; + + s->sysconfig = 0; + s->systest = 0; + s->irqst = 0; + s->irqen = 0; + s->wken = 0; + s->control = 4; + + for (ch = 0; ch < 4; ch ++) { + s->ch[ch].config = 0x060000; + s->ch[ch].status = 2; /* TXS */ + s->ch[ch].control = 0; + + omap_mcspi_dmarequest_update(s->ch + ch); + } + + omap_mcspi_interrupt_update(s); +} + +static uint64_t omap_mcspi_read(void *opaque, hwaddr addr, + unsigned size) +{ + struct omap_mcspi_s *s = (struct omap_mcspi_s *) opaque; + int ch = 0; + uint32_t ret; + + if (size != 4) { + return omap_badwidth_read32(opaque, addr); + } + + switch (addr) { + case 0x00: /* MCSPI_REVISION */ + return 0x91; + + case 0x10: /* MCSPI_SYSCONFIG */ + return s->sysconfig; + + case 0x14: /* MCSPI_SYSSTATUS */ + return 1; /* RESETDONE */ + + case 0x18: /* MCSPI_IRQSTATUS */ + return s->irqst; + + case 0x1c: /* MCSPI_IRQENABLE */ + return s->irqen; + + case 0x20: /* MCSPI_WAKEUPENABLE */ + return s->wken; + + case 0x24: /* MCSPI_SYST */ + return s->systest; + + case 0x28: /* MCSPI_MODULCTRL */ + return s->control; + + case 0x68: ch ++; + /* fall through */ + case 0x54: ch ++; + /* fall through */ + case 0x40: ch ++; + /* fall through */ + case 0x2c: /* MCSPI_CHCONF */ + return s->ch[ch].config; + + case 0x6c: ch ++; + /* fall through */ + case 0x58: ch ++; + /* fall through */ + case 0x44: ch ++; + /* fall through */ + case 0x30: /* MCSPI_CHSTAT */ + return s->ch[ch].status; + + case 0x70: ch ++; + /* fall through */ + case 0x5c: ch ++; + /* fall through */ + case 0x48: ch ++; + /* fall through */ + case 0x34: /* MCSPI_CHCTRL */ + return s->ch[ch].control; + + case 0x74: ch ++; + /* fall through */ + case 0x60: ch ++; + /* fall through */ + case 0x4c: ch ++; + /* fall through */ + case 0x38: /* MCSPI_TX */ + return s->ch[ch].tx; + + case 0x78: ch ++; + /* fall through */ + case 0x64: ch ++; + /* fall through */ + case 0x50: ch ++; + /* fall through */ + case 0x3c: /* MCSPI_RX */ + s->ch[ch].status &= ~(1 << 0); /* RXS */ + ret = s->ch[ch].rx; + omap_mcspi_transfer_run(s, ch); + return ret; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static void omap_mcspi_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + struct omap_mcspi_s *s = (struct omap_mcspi_s *) opaque; + int ch = 0; + + if (size != 4) { + omap_badwidth_write32(opaque, addr, value); + return; + } + + switch (addr) { + case 0x00: /* MCSPI_REVISION */ + case 0x14: /* MCSPI_SYSSTATUS */ + case 0x30: /* MCSPI_CHSTAT0 */ + case 0x3c: /* MCSPI_RX0 */ + case 0x44: /* MCSPI_CHSTAT1 */ + case 0x50: /* MCSPI_RX1 */ + case 0x58: /* MCSPI_CHSTAT2 */ + case 0x64: /* MCSPI_RX2 */ + case 0x6c: /* MCSPI_CHSTAT3 */ + case 0x78: /* MCSPI_RX3 */ + OMAP_RO_REG(addr); + return; + + case 0x10: /* MCSPI_SYSCONFIG */ + if (value & (1 << 1)) /* SOFTRESET */ + omap_mcspi_reset(s); + s->sysconfig = value & 0x31d; + break; + + case 0x18: /* MCSPI_IRQSTATUS */ + if (!((s->control & (1 << 3)) && (s->systest & (1 << 11)))) { + s->irqst &= ~value; + omap_mcspi_interrupt_update(s); + } + break; + + case 0x1c: /* MCSPI_IRQENABLE */ + s->irqen = value & 0x1777f; + omap_mcspi_interrupt_update(s); + break; + + case 0x20: /* MCSPI_WAKEUPENABLE */ + s->wken = value & 1; + break; + + case 0x24: /* MCSPI_SYST */ + if (s->control & (1 << 3)) /* SYSTEM_TEST */ + if (value & (1 << 11)) { /* SSB */ + s->irqst |= 0x1777f; + omap_mcspi_interrupt_update(s); + } + s->systest = value & 0xfff; + break; + + case 0x28: /* MCSPI_MODULCTRL */ + if (value & (1 << 3)) /* SYSTEM_TEST */ + if (s->systest & (1 << 11)) { /* SSB */ + s->irqst |= 0x1777f; + omap_mcspi_interrupt_update(s); + } + s->control = value & 0xf; + break; + + case 0x68: ch ++; + /* fall through */ + case 0x54: ch ++; + /* fall through */ + case 0x40: ch ++; + /* fall through */ + case 0x2c: /* MCSPI_CHCONF */ + if ((value ^ s->ch[ch].config) & (3 << 14)) /* DMAR | DMAW */ + omap_mcspi_dmarequest_update(s->ch + ch); + if (((value >> 12) & 3) == 3) { /* TRM */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid TRM value (3)\n", + __func__); + } + if (((value >> 7) & 0x1f) < 3) { /* WL */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid WL value (%" PRIx64 ")\n", + __func__, (value >> 7) & 0x1f); + } + s->ch[ch].config = value & 0x7fffff; + break; + + case 0x70: ch ++; + /* fall through */ + case 0x5c: ch ++; + /* fall through */ + case 0x48: ch ++; + /* fall through */ + case 0x34: /* MCSPI_CHCTRL */ + if (value & ~s->ch[ch].control & 1) { /* EN */ + s->ch[ch].control |= 1; + omap_mcspi_transfer_run(s, ch); + } else + s->ch[ch].control = value & 1; + break; + + case 0x74: ch ++; + /* fall through */ + case 0x60: ch ++; + /* fall through */ + case 0x4c: ch ++; + /* fall through */ + case 0x38: /* MCSPI_TX */ + s->ch[ch].tx = value; + s->ch[ch].status &= ~(1 << 1); /* TXS */ + omap_mcspi_transfer_run(s, ch); + break; + + default: + OMAP_BAD_REG(addr); + return; + } +} + +static const MemoryRegionOps omap_mcspi_ops = { + .read = omap_mcspi_read, + .write = omap_mcspi_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct omap_mcspi_s *omap_mcspi_init(struct omap_target_agent_s *ta, int chnum, + qemu_irq irq, qemu_irq *drq, omap_clk fclk, omap_clk iclk) +{ + struct omap_mcspi_s *s = g_new0(struct omap_mcspi_s, 1); + struct omap_mcspi_ch_s *ch = s->ch; + + s->irq = irq; + s->chnum = chnum; + while (chnum --) { + ch->txdrq = *drq ++; + ch->rxdrq = *drq ++; + ch ++; + } + omap_mcspi_reset(s); + + memory_region_init_io(&s->iomem, NULL, &omap_mcspi_ops, s, "omap.mcspi", + omap_l4_region_size(ta, 0)); + omap_l4_attach(ta, 0, &s->iomem); + + return s; +} + +void omap_mcspi_attach(struct omap_mcspi_s *s, + uint32_t (*txrx)(void *opaque, uint32_t, int), void *opaque, + int chipselect) +{ + if (chipselect < 0 || chipselect >= s->chnum) + hw_error("%s: Bad chipselect %i\n", __func__, chipselect); + + s->ch[chipselect].txrx = txrx; + s->ch[chipselect].opaque = opaque; +} diff --git a/hw/ssi/pl022.c b/hw/ssi/pl022.c new file mode 100644 index 000000000..8954ffebb --- /dev/null +++ b/hw/ssi/pl022.c @@ -0,0 +1,316 @@ +/* + * Arm PrimeCell PL022 Synchronous Serial Port + * + * Copyright (c) 2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/ssi/pl022.h" +#include "hw/ssi/ssi.h" +#include "qemu/log.h" +#include "qemu/module.h" + +//#define DEBUG_PL022 1 + +#ifdef DEBUG_PL022 +#define DPRINTF(fmt, ...) \ +do { printf("pl022: " fmt , ## __VA_ARGS__); } while (0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "pl022: error: " fmt , ## __VA_ARGS__); exit(1);} while (0) +#else +#define DPRINTF(fmt, ...) do {} while(0) +#define BADF(fmt, ...) \ +do { fprintf(stderr, "pl022: error: " fmt , ## __VA_ARGS__);} while (0) +#endif + +#define PL022_CR1_LBM 0x01 +#define PL022_CR1_SSE 0x02 +#define PL022_CR1_MS 0x04 +#define PL022_CR1_SDO 0x08 + +#define PL022_SR_TFE 0x01 +#define PL022_SR_TNF 0x02 +#define PL022_SR_RNE 0x04 +#define PL022_SR_RFF 0x08 +#define PL022_SR_BSY 0x10 + +#define PL022_INT_ROR 0x01 +#define PL022_INT_RT 0x02 +#define PL022_INT_RX 0x04 +#define PL022_INT_TX 0x08 + +static const unsigned char pl022_id[8] = + { 0x22, 0x10, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; + +static void pl022_update(PL022State *s) +{ + s->sr = 0; + if (s->tx_fifo_len == 0) + s->sr |= PL022_SR_TFE; + if (s->tx_fifo_len != 8) + s->sr |= PL022_SR_TNF; + if (s->rx_fifo_len != 0) + s->sr |= PL022_SR_RNE; + if (s->rx_fifo_len == 8) + s->sr |= PL022_SR_RFF; + if (s->tx_fifo_len) + s->sr |= PL022_SR_BSY; + s->is = 0; + if (s->rx_fifo_len >= 4) + s->is |= PL022_INT_RX; + if (s->tx_fifo_len <= 4) + s->is |= PL022_INT_TX; + + qemu_set_irq(s->irq, (s->is & s->im) != 0); +} + +static void pl022_xfer(PL022State *s) +{ + int i; + int o; + int val; + + if ((s->cr1 & PL022_CR1_SSE) == 0) { + pl022_update(s); + DPRINTF("Disabled\n"); + return; + } + + DPRINTF("Maybe xfer %d/%d\n", s->tx_fifo_len, s->rx_fifo_len); + i = (s->tx_fifo_head - s->tx_fifo_len) & 7; + o = s->rx_fifo_head; + /* ??? We do not emulate the line speed. + This may break some applications. The are two problematic cases: + (a) A driver feeds data into the TX FIFO until it is full, + and only then drains the RX FIFO. On real hardware the CPU can + feed data fast enough that the RX fifo never gets chance to overflow. + (b) A driver transmits data, deliberately allowing the RX FIFO to + overflow because it ignores the RX data anyway. + + We choose to support (a) by stalling the transmit engine if it would + cause the RX FIFO to overflow. In practice much transmit-only code + falls into (a) because it flushes the RX FIFO to determine when + the transfer has completed. */ + while (s->tx_fifo_len && s->rx_fifo_len < 8) { + DPRINTF("xfer\n"); + val = s->tx_fifo[i]; + if (s->cr1 & PL022_CR1_LBM) { + /* Loopback mode. */ + } else { + val = ssi_transfer(s->ssi, val); + } + s->rx_fifo[o] = val & s->bitmask; + i = (i + 1) & 7; + o = (o + 1) & 7; + s->tx_fifo_len--; + s->rx_fifo_len++; + } + s->rx_fifo_head = o; + pl022_update(s); +} + +static uint64_t pl022_read(void *opaque, hwaddr offset, + unsigned size) +{ + PL022State *s = (PL022State *)opaque; + int val; + + if (offset >= 0xfe0 && offset < 0x1000) { + return pl022_id[(offset - 0xfe0) >> 2]; + } + switch (offset) { + case 0x00: /* CR0 */ + return s->cr0; + case 0x04: /* CR1 */ + return s->cr1; + case 0x08: /* DR */ + if (s->rx_fifo_len) { + val = s->rx_fifo[(s->rx_fifo_head - s->rx_fifo_len) & 7]; + DPRINTF("RX %02x\n", val); + s->rx_fifo_len--; + pl022_xfer(s); + } else { + val = 0; + } + return val; + case 0x0c: /* SR */ + return s->sr; + case 0x10: /* CPSR */ + return s->cpsr; + case 0x14: /* IMSC */ + return s->im; + case 0x18: /* RIS */ + return s->is; + case 0x1c: /* MIS */ + return s->im & s->is; + case 0x24: /* DMACR */ + /* Not implemented. */ + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl022_read: Bad offset %x\n", (int)offset); + return 0; + } +} + +static void pl022_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PL022State *s = (PL022State *)opaque; + + switch (offset) { + case 0x00: /* CR0 */ + s->cr0 = value; + /* Clock rate and format are ignored. */ + s->bitmask = (1 << ((value & 15) + 1)) - 1; + break; + case 0x04: /* CR1 */ + s->cr1 = value; + if ((s->cr1 & (PL022_CR1_MS | PL022_CR1_SSE)) + == (PL022_CR1_MS | PL022_CR1_SSE)) { + BADF("SPI peripheral mode not implemented\n"); + } + pl022_xfer(s); + break; + case 0x08: /* DR */ + if (s->tx_fifo_len < 8) { + DPRINTF("TX %02x\n", (unsigned)value); + s->tx_fifo[s->tx_fifo_head] = value & s->bitmask; + s->tx_fifo_head = (s->tx_fifo_head + 1) & 7; + s->tx_fifo_len++; + pl022_xfer(s); + } + break; + case 0x10: /* CPSR */ + /* Prescaler. Ignored. */ + s->cpsr = value & 0xff; + break; + case 0x14: /* IMSC */ + s->im = value; + pl022_update(s); + break; + case 0x20: /* ICR */ + /* + * write-1-to-clear: bit 0 clears ROR, bit 1 clears RT; + * RX and TX interrupts cannot be cleared this way. + */ + value &= PL022_INT_ROR | PL022_INT_RT; + s->is &= ~value; + break; + case 0x24: /* DMACR */ + if (value) { + qemu_log_mask(LOG_UNIMP, "pl022: DMA not implemented\n"); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "pl022_write: Bad offset %x\n", (int)offset); + } +} + +static void pl022_reset(DeviceState *dev) +{ + PL022State *s = PL022(dev); + + s->rx_fifo_len = 0; + s->tx_fifo_len = 0; + s->im = 0; + s->is = PL022_INT_TX; + s->sr = PL022_SR_TFE | PL022_SR_TNF; +} + +static const MemoryRegionOps pl022_ops = { + .read = pl022_read, + .write = pl022_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int pl022_post_load(void *opaque, int version_id) +{ + PL022State *s = opaque; + + if (s->tx_fifo_head < 0 || + s->tx_fifo_head >= ARRAY_SIZE(s->tx_fifo) || + s->rx_fifo_head < 0 || + s->rx_fifo_head >= ARRAY_SIZE(s->rx_fifo)) { + return -1; + } + return 0; +} + +static const VMStateDescription vmstate_pl022 = { + .name = "pl022_ssp", + .version_id = 1, + .minimum_version_id = 1, + .post_load = pl022_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cr0, PL022State), + VMSTATE_UINT32(cr1, PL022State), + VMSTATE_UINT32(bitmask, PL022State), + VMSTATE_UINT32(sr, PL022State), + VMSTATE_UINT32(cpsr, PL022State), + VMSTATE_UINT32(is, PL022State), + VMSTATE_UINT32(im, PL022State), + VMSTATE_INT32(tx_fifo_head, PL022State), + VMSTATE_INT32(rx_fifo_head, PL022State), + VMSTATE_INT32(tx_fifo_len, PL022State), + VMSTATE_INT32(rx_fifo_len, PL022State), + VMSTATE_UINT16(tx_fifo[0], PL022State), + VMSTATE_UINT16(rx_fifo[0], PL022State), + VMSTATE_UINT16(tx_fifo[1], PL022State), + VMSTATE_UINT16(rx_fifo[1], PL022State), + VMSTATE_UINT16(tx_fifo[2], PL022State), + VMSTATE_UINT16(rx_fifo[2], PL022State), + VMSTATE_UINT16(tx_fifo[3], PL022State), + VMSTATE_UINT16(rx_fifo[3], PL022State), + VMSTATE_UINT16(tx_fifo[4], PL022State), + VMSTATE_UINT16(rx_fifo[4], PL022State), + VMSTATE_UINT16(tx_fifo[5], PL022State), + VMSTATE_UINT16(rx_fifo[5], PL022State), + VMSTATE_UINT16(tx_fifo[6], PL022State), + VMSTATE_UINT16(rx_fifo[6], PL022State), + VMSTATE_UINT16(tx_fifo[7], PL022State), + VMSTATE_UINT16(rx_fifo[7], PL022State), + VMSTATE_END_OF_LIST() + } +}; + +static void pl022_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + PL022State *s = PL022(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &pl022_ops, s, "pl022", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + s->ssi = ssi_create_bus(dev, "ssi"); +} + +static void pl022_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = pl022_reset; + dc->vmsd = &vmstate_pl022; + dc->realize = pl022_realize; +} + +static const TypeInfo pl022_info = { + .name = TYPE_PL022, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PL022State), + .class_init = pl022_class_init, +}; + +static void pl022_register_types(void) +{ + type_register_static(&pl022_info); +} + +type_init(pl022_register_types) diff --git a/hw/ssi/sifive_spi.c b/hw/ssi/sifive_spi.c new file mode 100644 index 000000000..03540cf5c --- /dev/null +++ b/hw/ssi/sifive_spi.c @@ -0,0 +1,357 @@ +/* + * QEMU model of the SiFive SPI Controller + * + * Copyright (c) 2021 Wind River Systems, Inc. + * + * Author: + * Bin Meng + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "hw/ssi/ssi.h" +#include "qemu/fifo8.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/ssi/sifive_spi.h" + +#define R_SCKDIV (0x00 / 4) +#define R_SCKMODE (0x04 / 4) +#define R_CSID (0x10 / 4) +#define R_CSDEF (0x14 / 4) +#define R_CSMODE (0x18 / 4) +#define R_DELAY0 (0x28 / 4) +#define R_DELAY1 (0x2C / 4) +#define R_FMT (0x40 / 4) +#define R_TXDATA (0x48 / 4) +#define R_RXDATA (0x4C / 4) +#define R_TXMARK (0x50 / 4) +#define R_RXMARK (0x54 / 4) +#define R_FCTRL (0x60 / 4) +#define R_FFMT (0x64 / 4) +#define R_IE (0x70 / 4) +#define R_IP (0x74 / 4) + +#define FMT_DIR (1 << 3) + +#define TXDATA_FULL (1 << 31) +#define RXDATA_EMPTY (1 << 31) + +#define IE_TXWM (1 << 0) +#define IE_RXWM (1 << 1) + +#define IP_TXWM (1 << 0) +#define IP_RXWM (1 << 1) + +#define FIFO_CAPACITY 8 + +static void sifive_spi_txfifo_reset(SiFiveSPIState *s) +{ + fifo8_reset(&s->tx_fifo); + + s->regs[R_TXDATA] &= ~TXDATA_FULL; + s->regs[R_IP] &= ~IP_TXWM; +} + +static void sifive_spi_rxfifo_reset(SiFiveSPIState *s) +{ + fifo8_reset(&s->rx_fifo); + + s->regs[R_RXDATA] |= RXDATA_EMPTY; + s->regs[R_IP] &= ~IP_RXWM; +} + +static void sifive_spi_update_cs(SiFiveSPIState *s) +{ + int i; + + for (i = 0; i < s->num_cs; i++) { + if (s->regs[R_CSDEF] & (1 << i)) { + qemu_set_irq(s->cs_lines[i], !(s->regs[R_CSMODE])); + } + } +} + +static void sifive_spi_update_irq(SiFiveSPIState *s) +{ + int level; + + if (fifo8_num_used(&s->tx_fifo) < s->regs[R_TXMARK]) { + s->regs[R_IP] |= IP_TXWM; + } else { + s->regs[R_IP] &= ~IP_TXWM; + } + + if (fifo8_num_used(&s->rx_fifo) > s->regs[R_RXMARK]) { + s->regs[R_IP] |= IP_RXWM; + } else { + s->regs[R_IP] &= ~IP_RXWM; + } + + level = s->regs[R_IP] & s->regs[R_IE] ? 1 : 0; + qemu_set_irq(s->irq, level); +} + +static void sifive_spi_reset(DeviceState *d) +{ + SiFiveSPIState *s = SIFIVE_SPI(d); + + memset(s->regs, 0, sizeof(s->regs)); + + /* The reset value is high for all implemented CS pins */ + s->regs[R_CSDEF] = (1 << s->num_cs) - 1; + + /* Populate register with their default value */ + s->regs[R_SCKDIV] = 0x03; + s->regs[R_DELAY0] = 0x1001; + s->regs[R_DELAY1] = 0x01; + + sifive_spi_txfifo_reset(s); + sifive_spi_rxfifo_reset(s); + + sifive_spi_update_cs(s); + sifive_spi_update_irq(s); +} + +static void sifive_spi_flush_txfifo(SiFiveSPIState *s) +{ + uint8_t tx; + uint8_t rx; + + while (!fifo8_is_empty(&s->tx_fifo)) { + tx = fifo8_pop(&s->tx_fifo); + rx = ssi_transfer(s->spi, tx); + + if (!fifo8_is_full(&s->rx_fifo)) { + if (!(s->regs[R_FMT] & FMT_DIR)) { + fifo8_push(&s->rx_fifo, rx); + } + } + } +} + +static bool sifive_spi_is_bad_reg(hwaddr addr, bool allow_reserved) +{ + bool bad; + + switch (addr) { + /* reserved offsets */ + case 0x08: + case 0x0C: + case 0x1C: + case 0x20: + case 0x24: + case 0x30: + case 0x34: + case 0x38: + case 0x3C: + case 0x44: + case 0x58: + case 0x5C: + case 0x68: + case 0x6C: + bad = allow_reserved ? false : true; + break; + default: + bad = false; + } + + if (addr >= (SIFIVE_SPI_REG_NUM << 2)) { + bad = true; + } + + return bad; +} + +static uint64_t sifive_spi_read(void *opaque, hwaddr addr, unsigned int size) +{ + SiFiveSPIState *s = opaque; + uint32_t r; + + if (sifive_spi_is_bad_reg(addr, true)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad read at address 0x%" + HWADDR_PRIx "\n", __func__, addr); + return 0; + } + + addr >>= 2; + switch (addr) { + case R_TXDATA: + if (fifo8_is_full(&s->tx_fifo)) { + return TXDATA_FULL; + } + r = 0; + break; + + case R_RXDATA: + if (fifo8_is_empty(&s->rx_fifo)) { + return RXDATA_EMPTY; + } + r = fifo8_pop(&s->rx_fifo); + break; + + default: + r = s->regs[addr]; + break; + } + + sifive_spi_update_irq(s); + + return r; +} + +static void sifive_spi_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + SiFiveSPIState *s = opaque; + uint32_t value = val64; + + if (sifive_spi_is_bad_reg(addr, false)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write at addr=0x%" + HWADDR_PRIx " value=0x%x\n", __func__, addr, value); + return; + } + + addr >>= 2; + switch (addr) { + case R_CSID: + if (value >= s->num_cs) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid csid %d\n", + __func__, value); + } else { + s->regs[R_CSID] = value; + sifive_spi_update_cs(s); + } + break; + + case R_CSDEF: + if (value >= (1 << s->num_cs)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid csdef %x\n", + __func__, value); + } else { + s->regs[R_CSDEF] = value; + } + break; + + case R_CSMODE: + if (value > 3) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid csmode %x\n", + __func__, value); + } else { + s->regs[R_CSMODE] = value; + sifive_spi_update_cs(s); + } + break; + + case R_TXDATA: + if (!fifo8_is_full(&s->tx_fifo)) { + fifo8_push(&s->tx_fifo, (uint8_t)value); + sifive_spi_flush_txfifo(s); + } + break; + + case R_RXDATA: + case R_IP: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid write to read-only reigster 0x%" + HWADDR_PRIx " with 0x%x\n", __func__, addr << 2, value); + break; + + case R_TXMARK: + case R_RXMARK: + if (value >= FIFO_CAPACITY) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid watermark %d\n", + __func__, value); + } else { + s->regs[addr] = value; + } + break; + + case R_FCTRL: + case R_FFMT: + qemu_log_mask(LOG_UNIMP, + "%s: direct-map flash interface unimplemented\n", + __func__); + break; + + default: + s->regs[addr] = value; + break; + } + + sifive_spi_update_irq(s); +} + +static const MemoryRegionOps sifive_spi_ops = { + .read = sifive_spi_read, + .write = sifive_spi_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void sifive_spi_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + SiFiveSPIState *s = SIFIVE_SPI(dev); + int i; + + s->spi = ssi_create_bus(dev, "spi"); + sysbus_init_irq(sbd, &s->irq); + + s->cs_lines = g_new0(qemu_irq, s->num_cs); + for (i = 0; i < s->num_cs; i++) { + sysbus_init_irq(sbd, &s->cs_lines[i]); + } + + memory_region_init_io(&s->mmio, OBJECT(s), &sifive_spi_ops, s, + TYPE_SIFIVE_SPI, 0x1000); + sysbus_init_mmio(sbd, &s->mmio); + + fifo8_create(&s->tx_fifo, FIFO_CAPACITY); + fifo8_create(&s->rx_fifo, FIFO_CAPACITY); +} + +static Property sifive_spi_properties[] = { + DEFINE_PROP_UINT32("num-cs", SiFiveSPIState, num_cs, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sifive_spi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, sifive_spi_properties); + dc->reset = sifive_spi_reset; + dc->realize = sifive_spi_realize; +} + +static const TypeInfo sifive_spi_info = { + .name = TYPE_SIFIVE_SPI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SiFiveSPIState), + .class_init = sifive_spi_class_init, +}; + +static void sifive_spi_register_types(void) +{ + type_register_static(&sifive_spi_info); +} + +type_init(sifive_spi_register_types) diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c new file mode 100644 index 000000000..003931fb5 --- /dev/null +++ b/hw/ssi/ssi.c @@ -0,0 +1,146 @@ +/* + * QEMU Synchronous Serial Interface support + * + * Copyright (c) 2009 CodeSourcery. + * Copyright (c) 2012 Peter A.G. Crosthwaite (peter.crosthwaite@petalogix.com) + * Copyright (c) 2012 PetaLogix Pty Ltd. + * Written by Paul Brook + * + * This code is licensed under the GNU GPL v2. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/ssi/ssi.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "qom/object.h" + +struct SSIBus { + BusState parent_obj; +}; + +#define TYPE_SSI_BUS "SSI" +OBJECT_DECLARE_SIMPLE_TYPE(SSIBus, SSI_BUS) + +static const TypeInfo ssi_bus_info = { + .name = TYPE_SSI_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(SSIBus), +}; + +static void ssi_cs_default(void *opaque, int n, int level) +{ + SSIPeripheral *s = SSI_PERIPHERAL(opaque); + bool cs = !!level; + assert(n == 0); + if (s->cs != cs) { + SSIPeripheralClass *ssc = SSI_PERIPHERAL_GET_CLASS(s); + if (ssc->set_cs) { + ssc->set_cs(s, cs); + } + } + s->cs = cs; +} + +static uint32_t ssi_transfer_raw_default(SSIPeripheral *dev, uint32_t val) +{ + SSIPeripheralClass *ssc = SSI_PERIPHERAL_GET_CLASS(dev); + + if ((dev->cs && ssc->cs_polarity == SSI_CS_HIGH) || + (!dev->cs && ssc->cs_polarity == SSI_CS_LOW) || + ssc->cs_polarity == SSI_CS_NONE) { + return ssc->transfer(dev, val); + } + return 0; +} + +static void ssi_peripheral_realize(DeviceState *dev, Error **errp) +{ + SSIPeripheral *s = SSI_PERIPHERAL(dev); + SSIPeripheralClass *ssc = SSI_PERIPHERAL_GET_CLASS(s); + + if (ssc->transfer_raw == ssi_transfer_raw_default && + ssc->cs_polarity != SSI_CS_NONE) { + qdev_init_gpio_in_named(dev, ssi_cs_default, SSI_GPIO_CS, 1); + } + + ssc->realize(s, errp); +} + +static void ssi_peripheral_class_init(ObjectClass *klass, void *data) +{ + SSIPeripheralClass *ssc = SSI_PERIPHERAL_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = ssi_peripheral_realize; + dc->bus_type = TYPE_SSI_BUS; + if (!ssc->transfer_raw) { + ssc->transfer_raw = ssi_transfer_raw_default; + } +} + +static const TypeInfo ssi_peripheral_info = { + .name = TYPE_SSI_PERIPHERAL, + .parent = TYPE_DEVICE, + .class_init = ssi_peripheral_class_init, + .class_size = sizeof(SSIPeripheralClass), + .abstract = true, +}; + +bool ssi_realize_and_unref(DeviceState *dev, SSIBus *bus, Error **errp) +{ + return qdev_realize_and_unref(dev, &bus->parent_obj, errp); +} + +DeviceState *ssi_create_peripheral(SSIBus *bus, const char *name) +{ + DeviceState *dev = qdev_new(name); + + ssi_realize_and_unref(dev, bus, &error_fatal); + return dev; +} + +SSIBus *ssi_create_bus(DeviceState *parent, const char *name) +{ + BusState *bus; + bus = qbus_new(TYPE_SSI_BUS, parent, name); + return SSI_BUS(bus); +} + +uint32_t ssi_transfer(SSIBus *bus, uint32_t val) +{ + BusState *b = BUS(bus); + BusChild *kid; + SSIPeripheralClass *ssc; + uint32_t r = 0; + + QTAILQ_FOREACH(kid, &b->children, sibling) { + SSIPeripheral *peripheral = SSI_PERIPHERAL(kid->child); + ssc = SSI_PERIPHERAL_GET_CLASS(peripheral); + r |= ssc->transfer_raw(peripheral, val); + } + + return r; +} + +const VMStateDescription vmstate_ssi_peripheral = { + .name = "SSISlave", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(cs, SSIPeripheral), + VMSTATE_END_OF_LIST() + } +}; + +static void ssi_peripheral_register_types(void) +{ + type_register_static(&ssi_bus_info); + type_register_static(&ssi_peripheral_info); +} + +type_init(ssi_peripheral_register_types) diff --git a/hw/ssi/stm32f2xx_spi.c b/hw/ssi/stm32f2xx_spi.c new file mode 100644 index 000000000..cd6e8443d --- /dev/null +++ b/hw/ssi/stm32f2xx_spi.c @@ -0,0 +1,226 @@ +/* + * STM32F405 SPI + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/ssi/stm32f2xx_spi.h" +#include "migration/vmstate.h" + +#ifndef STM_SPI_ERR_DEBUG +#define STM_SPI_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(lvl, fmt, args...) do { \ + if (STM_SPI_ERR_DEBUG >= lvl) { \ + qemu_log("%s: " fmt, __func__, ## args); \ + } \ +} while (0) + +#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args) + +static void stm32f2xx_spi_reset(DeviceState *dev) +{ + STM32F2XXSPIState *s = STM32F2XX_SPI(dev); + + s->spi_cr1 = 0x00000000; + s->spi_cr2 = 0x00000000; + s->spi_sr = 0x0000000A; + s->spi_dr = 0x0000000C; + s->spi_crcpr = 0x00000007; + s->spi_rxcrcr = 0x00000000; + s->spi_txcrcr = 0x00000000; + s->spi_i2scfgr = 0x00000000; + s->spi_i2spr = 0x00000002; +} + +static void stm32f2xx_spi_transfer(STM32F2XXSPIState *s) +{ + DB_PRINT("Data to send: 0x%x\n", s->spi_dr); + + s->spi_dr = ssi_transfer(s->ssi, s->spi_dr); + s->spi_sr |= STM_SPI_SR_RXNE; + + DB_PRINT("Data received: 0x%x\n", s->spi_dr); +} + +static uint64_t stm32f2xx_spi_read(void *opaque, hwaddr addr, + unsigned int size) +{ + STM32F2XXSPIState *s = opaque; + + DB_PRINT("Address: 0x%" HWADDR_PRIx "\n", addr); + + switch (addr) { + case STM_SPI_CR1: + return s->spi_cr1; + case STM_SPI_CR2: + qemu_log_mask(LOG_UNIMP, "%s: Interrupts and DMA are not implemented\n", + __func__); + return s->spi_cr2; + case STM_SPI_SR: + return s->spi_sr; + case STM_SPI_DR: + stm32f2xx_spi_transfer(s); + s->spi_sr &= ~STM_SPI_SR_RXNE; + return s->spi_dr; + case STM_SPI_CRCPR: + qemu_log_mask(LOG_UNIMP, "%s: CRC is not implemented, the registers " \ + "are included for compatibility\n", __func__); + return s->spi_crcpr; + case STM_SPI_RXCRCR: + qemu_log_mask(LOG_UNIMP, "%s: CRC is not implemented, the registers " \ + "are included for compatibility\n", __func__); + return s->spi_rxcrcr; + case STM_SPI_TXCRCR: + qemu_log_mask(LOG_UNIMP, "%s: CRC is not implemented, the registers " \ + "are included for compatibility\n", __func__); + return s->spi_txcrcr; + case STM_SPI_I2SCFGR: + qemu_log_mask(LOG_UNIMP, "%s: I2S is not implemented, the registers " \ + "are included for compatibility\n", __func__); + return s->spi_i2scfgr; + case STM_SPI_I2SPR: + qemu_log_mask(LOG_UNIMP, "%s: I2S is not implemented, the registers " \ + "are included for compatibility\n", __func__); + return s->spi_i2spr; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, addr); + } + + return 0; +} + +static void stm32f2xx_spi_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + STM32F2XXSPIState *s = opaque; + uint32_t value = val64; + + DB_PRINT("Address: 0x%" HWADDR_PRIx ", Value: 0x%x\n", addr, value); + + switch (addr) { + case STM_SPI_CR1: + s->spi_cr1 = value; + return; + case STM_SPI_CR2: + qemu_log_mask(LOG_UNIMP, "%s: " \ + "Interrupts and DMA are not implemented\n", __func__); + s->spi_cr2 = value; + return; + case STM_SPI_SR: + /* Read only register, except for clearing the CRCERR bit, which + * is not supported + */ + return; + case STM_SPI_DR: + s->spi_dr = value; + stm32f2xx_spi_transfer(s); + return; + case STM_SPI_CRCPR: + qemu_log_mask(LOG_UNIMP, "%s: CRC is not implemented\n", __func__); + return; + case STM_SPI_RXCRCR: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Read only register: " \ + "0x%" HWADDR_PRIx "\n", __func__, addr); + return; + case STM_SPI_TXCRCR: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Read only register: " \ + "0x%" HWADDR_PRIx "\n", __func__, addr); + return; + case STM_SPI_I2SCFGR: + qemu_log_mask(LOG_UNIMP, "%s: " \ + "I2S is not implemented\n", __func__); + return; + case STM_SPI_I2SPR: + qemu_log_mask(LOG_UNIMP, "%s: " \ + "I2S is not implemented\n", __func__); + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr); + } +} + +static const MemoryRegionOps stm32f2xx_spi_ops = { + .read = stm32f2xx_spi_read, + .write = stm32f2xx_spi_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_stm32f2xx_spi = { + .name = TYPE_STM32F2XX_SPI, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(spi_cr1, STM32F2XXSPIState), + VMSTATE_UINT32(spi_cr2, STM32F2XXSPIState), + VMSTATE_UINT32(spi_sr, STM32F2XXSPIState), + VMSTATE_UINT32(spi_dr, STM32F2XXSPIState), + VMSTATE_UINT32(spi_crcpr, STM32F2XXSPIState), + VMSTATE_UINT32(spi_rxcrcr, STM32F2XXSPIState), + VMSTATE_UINT32(spi_txcrcr, STM32F2XXSPIState), + VMSTATE_UINT32(spi_i2scfgr, STM32F2XXSPIState), + VMSTATE_UINT32(spi_i2spr, STM32F2XXSPIState), + VMSTATE_END_OF_LIST() + } +}; + +static void stm32f2xx_spi_init(Object *obj) +{ + STM32F2XXSPIState *s = STM32F2XX_SPI(obj); + DeviceState *dev = DEVICE(obj); + + memory_region_init_io(&s->mmio, obj, &stm32f2xx_spi_ops, s, + TYPE_STM32F2XX_SPI, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + s->ssi = ssi_create_bus(dev, "ssi"); +} + +static void stm32f2xx_spi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = stm32f2xx_spi_reset; + dc->vmsd = &vmstate_stm32f2xx_spi; +} + +static const TypeInfo stm32f2xx_spi_info = { + .name = TYPE_STM32F2XX_SPI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32F2XXSPIState), + .instance_init = stm32f2xx_spi_init, + .class_init = stm32f2xx_spi_class_init, +}; + +static void stm32f2xx_spi_register_types(void) +{ + type_register_static(&stm32f2xx_spi_info); +} + +type_init(stm32f2xx_spi_register_types) diff --git a/hw/ssi/trace-events b/hw/ssi/trace-events new file mode 100644 index 000000000..612d3d608 --- /dev/null +++ b/hw/ssi/trace-events @@ -0,0 +1,22 @@ +# aspeed_smc.c + +aspeed_smc_flash_set_segment(int cs, uint64_t reg, uint64_t start, uint64_t end) "CS%d segreg=0x%"PRIx64" [ 0x%"PRIx64" - 0x%"PRIx64" ]" +aspeed_smc_flash_read(int cs, uint64_t addr, uint32_t size, uint64_t data, int mode) "CS%d @0x%" PRIx64 " size %u: 0x%" PRIx64" mode:%d" +aspeed_smc_do_snoop(int cs, int index, int dummies, int data) "CS%d index:0x%x dummies:%d data:0x%x" +aspeed_smc_flash_write(int cs, uint64_t addr, uint32_t size, uint64_t data, int mode) "CS%d @0x%" PRIx64 " size %u: 0x%" PRIx64" mode:%d" +aspeed_smc_read(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64 +aspeed_smc_dma_checksum(uint32_t addr, uint32_t data) "0x%08x: 0x%08x" +aspeed_smc_dma_rw(const char *dir, uint32_t flash_addr, uint32_t dram_addr, uint32_t size) "%s flash:@0x%08x dram:@0x%08x size:0x%08x" +aspeed_smc_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64 +aspeed_smc_flash_select(int cs, const char *prefix) "CS%d %sselect" + +# npcm7xx_fiu.c + +npcm7xx_fiu_enter_reset(const char *id, int reset_type) "%s reset type: %d" +npcm7xx_fiu_hold_reset(const char *id) "%s" +npcm7xx_fiu_select(const char *id, int cs) "%s select CS%d" +npcm7xx_fiu_deselect(const char *id, int cs) "%s deselect CS%d" +npcm7xx_fiu_ctrl_read(const char *id, uint64_t addr, uint32_t data) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 +npcm7xx_fiu_ctrl_write(const char *id, uint64_t addr, uint32_t data) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 +npcm7xx_fiu_flash_read(const char *id, int cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64 +npcm7xx_fiu_flash_write(const char *id, unsigned cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64 diff --git a/hw/ssi/trace.h b/hw/ssi/trace.h new file mode 100644 index 000000000..0c1de2679 --- /dev/null +++ b/hw/ssi/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_ssi.h" diff --git a/hw/ssi/xilinx_spi.c b/hw/ssi/xilinx_spi.c new file mode 100644 index 000000000..b2819a7ff --- /dev/null +++ b/hw/ssi/xilinx_spi.c @@ -0,0 +1,390 @@ +/* + * QEMU model of the Xilinx SPI Controller + * + * Copyright (C) 2010 Edgar E. Iglesias. + * Copyright (C) 2012 Peter A. G. Crosthwaite + * Copyright (C) 2012 PetaLogix + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qemu/fifo8.h" + +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/ssi/ssi.h" +#include "qom/object.h" + +#ifdef XILINX_SPI_ERR_DEBUG +#define DB_PRINT(...) do { \ + fprintf(stderr, ": %s: ", __func__); \ + fprintf(stderr, ## __VA_ARGS__); \ + } while (0) +#else + #define DB_PRINT(...) +#endif + +#define R_DGIER (0x1c / 4) +#define R_DGIER_IE (1 << 31) + +#define R_IPISR (0x20 / 4) +#define IRQ_DRR_NOT_EMPTY (1 << (31 - 23)) +#define IRQ_DRR_OVERRUN (1 << (31 - 26)) +#define IRQ_DRR_FULL (1 << (31 - 27)) +#define IRQ_TX_FF_HALF_EMPTY (1 << 6) +#define IRQ_DTR_UNDERRUN (1 << 3) +#define IRQ_DTR_EMPTY (1 << (31 - 29)) + +#define R_IPIER (0x28 / 4) +#define R_SRR (0x40 / 4) +#define R_SPICR (0x60 / 4) +#define R_SPICR_TXFF_RST (1 << 5) +#define R_SPICR_RXFF_RST (1 << 6) +#define R_SPICR_MTI (1 << 8) + +#define R_SPISR (0x64 / 4) +#define SR_TX_FULL (1 << 3) +#define SR_TX_EMPTY (1 << 2) +#define SR_RX_FULL (1 << 1) +#define SR_RX_EMPTY (1 << 0) + +#define R_SPIDTR (0x68 / 4) +#define R_SPIDRR (0x6C / 4) +#define R_SPISSR (0x70 / 4) +#define R_TX_FF_OCY (0x74 / 4) +#define R_RX_FF_OCY (0x78 / 4) +#define R_MAX (0x7C / 4) + +#define FIFO_CAPACITY 256 + +#define TYPE_XILINX_SPI "xlnx.xps-spi" +OBJECT_DECLARE_SIMPLE_TYPE(XilinxSPI, XILINX_SPI) + +struct XilinxSPI { + SysBusDevice parent_obj; + + MemoryRegion mmio; + + qemu_irq irq; + int irqline; + + uint8_t num_cs; + qemu_irq *cs_lines; + + SSIBus *spi; + + Fifo8 rx_fifo; + Fifo8 tx_fifo; + + uint32_t regs[R_MAX]; +}; + +static void txfifo_reset(XilinxSPI *s) +{ + fifo8_reset(&s->tx_fifo); + + s->regs[R_SPISR] &= ~SR_TX_FULL; + s->regs[R_SPISR] |= SR_TX_EMPTY; +} + +static void rxfifo_reset(XilinxSPI *s) +{ + fifo8_reset(&s->rx_fifo); + + s->regs[R_SPISR] |= SR_RX_EMPTY; + s->regs[R_SPISR] &= ~SR_RX_FULL; +} + +static void xlx_spi_update_cs(XilinxSPI *s) +{ + int i; + + for (i = 0; i < s->num_cs; ++i) { + qemu_set_irq(s->cs_lines[i], !(~s->regs[R_SPISSR] & 1 << i)); + } +} + +static void xlx_spi_update_irq(XilinxSPI *s) +{ + uint32_t pending; + + s->regs[R_IPISR] |= + (!fifo8_is_empty(&s->rx_fifo) ? IRQ_DRR_NOT_EMPTY : 0) | + (fifo8_is_full(&s->rx_fifo) ? IRQ_DRR_FULL : 0); + + pending = s->regs[R_IPISR] & s->regs[R_IPIER]; + + pending = pending && (s->regs[R_DGIER] & R_DGIER_IE); + pending = !!pending; + + /* This call lies right in the data paths so don't call the + irq chain unless things really changed. */ + if (pending != s->irqline) { + s->irqline = pending; + DB_PRINT("irq_change of state %u ISR:%x IER:%X\n", + pending, s->regs[R_IPISR], s->regs[R_IPIER]); + qemu_set_irq(s->irq, pending); + } + +} + +static void xlx_spi_do_reset(XilinxSPI *s) +{ + memset(s->regs, 0, sizeof s->regs); + + rxfifo_reset(s); + txfifo_reset(s); + + s->regs[R_SPISSR] = ~0; + xlx_spi_update_irq(s); + xlx_spi_update_cs(s); +} + +static void xlx_spi_reset(DeviceState *d) +{ + xlx_spi_do_reset(XILINX_SPI(d)); +} + +static inline int spi_master_enabled(XilinxSPI *s) +{ + return !(s->regs[R_SPICR] & R_SPICR_MTI); +} + +static void spi_flush_txfifo(XilinxSPI *s) +{ + uint32_t tx; + uint32_t rx; + + while (!fifo8_is_empty(&s->tx_fifo)) { + tx = (uint32_t)fifo8_pop(&s->tx_fifo); + DB_PRINT("data tx:%x\n", tx); + rx = ssi_transfer(s->spi, tx); + DB_PRINT("data rx:%x\n", rx); + if (fifo8_is_full(&s->rx_fifo)) { + s->regs[R_IPISR] |= IRQ_DRR_OVERRUN; + } else { + fifo8_push(&s->rx_fifo, (uint8_t)rx); + if (fifo8_is_full(&s->rx_fifo)) { + s->regs[R_SPISR] |= SR_RX_FULL; + s->regs[R_IPISR] |= IRQ_DRR_FULL; + } + } + + s->regs[R_SPISR] &= ~SR_RX_EMPTY; + s->regs[R_SPISR] &= ~SR_TX_FULL; + s->regs[R_SPISR] |= SR_TX_EMPTY; + + s->regs[R_IPISR] |= IRQ_DTR_EMPTY; + s->regs[R_IPISR] |= IRQ_DRR_NOT_EMPTY; + } + +} + +static uint64_t +spi_read(void *opaque, hwaddr addr, unsigned int size) +{ + XilinxSPI *s = opaque; + uint32_t r = 0; + + addr >>= 2; + switch (addr) { + case R_SPIDRR: + if (fifo8_is_empty(&s->rx_fifo)) { + DB_PRINT("Read from empty FIFO!\n"); + return 0xdeadbeef; + } + + s->regs[R_SPISR] &= ~SR_RX_FULL; + r = fifo8_pop(&s->rx_fifo); + if (fifo8_is_empty(&s->rx_fifo)) { + s->regs[R_SPISR] |= SR_RX_EMPTY; + } + break; + + case R_SPISR: + r = s->regs[addr]; + break; + + default: + if (addr < ARRAY_SIZE(s->regs)) { + r = s->regs[addr]; + } + break; + + } + DB_PRINT("addr=" TARGET_FMT_plx " = %x\n", addr * 4, r); + xlx_spi_update_irq(s); + return r; +} + +static void +spi_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + XilinxSPI *s = opaque; + uint32_t value = val64; + + DB_PRINT("addr=" TARGET_FMT_plx " = %x\n", addr, value); + addr >>= 2; + switch (addr) { + case R_SRR: + if (value != 0xa) { + DB_PRINT("Invalid write to SRR %x\n", value); + } else { + xlx_spi_do_reset(s); + } + break; + + case R_SPIDTR: + s->regs[R_SPISR] &= ~SR_TX_EMPTY; + fifo8_push(&s->tx_fifo, (uint8_t)value); + if (fifo8_is_full(&s->tx_fifo)) { + s->regs[R_SPISR] |= SR_TX_FULL; + } + if (!spi_master_enabled(s)) { + goto done; + } else { + DB_PRINT("DTR and master enabled\n"); + } + spi_flush_txfifo(s); + break; + + case R_SPISR: + DB_PRINT("Invalid write to SPISR %x\n", value); + break; + + case R_IPISR: + /* Toggle the bits. */ + s->regs[addr] ^= value; + break; + + /* Slave Select Register. */ + case R_SPISSR: + s->regs[addr] = value; + xlx_spi_update_cs(s); + break; + + case R_SPICR: + /* FIXME: reset irq and sr state to empty queues. */ + if (value & R_SPICR_RXFF_RST) { + rxfifo_reset(s); + } + + if (value & R_SPICR_TXFF_RST) { + txfifo_reset(s); + } + value &= ~(R_SPICR_RXFF_RST | R_SPICR_TXFF_RST); + s->regs[addr] = value; + + if (!(value & R_SPICR_MTI)) { + spi_flush_txfifo(s); + } + break; + + default: + if (addr < ARRAY_SIZE(s->regs)) { + s->regs[addr] = value; + } + break; + } + +done: + xlx_spi_update_irq(s); +} + +static const MemoryRegionOps spi_ops = { + .read = spi_read, + .write = spi_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void xilinx_spi_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + XilinxSPI *s = XILINX_SPI(dev); + int i; + + DB_PRINT("\n"); + + s->spi = ssi_create_bus(dev, "spi"); + + sysbus_init_irq(sbd, &s->irq); + s->cs_lines = g_new0(qemu_irq, s->num_cs); + for (i = 0; i < s->num_cs; ++i) { + sysbus_init_irq(sbd, &s->cs_lines[i]); + } + + memory_region_init_io(&s->mmio, OBJECT(s), &spi_ops, s, + "xilinx-spi", R_MAX * 4); + sysbus_init_mmio(sbd, &s->mmio); + + s->irqline = -1; + + fifo8_create(&s->tx_fifo, FIFO_CAPACITY); + fifo8_create(&s->rx_fifo, FIFO_CAPACITY); +} + +static const VMStateDescription vmstate_xilinx_spi = { + .name = "xilinx_spi", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_FIFO8(tx_fifo, XilinxSPI), + VMSTATE_FIFO8(rx_fifo, XilinxSPI), + VMSTATE_UINT32_ARRAY(regs, XilinxSPI, R_MAX), + VMSTATE_END_OF_LIST() + } +}; + +static Property xilinx_spi_properties[] = { + DEFINE_PROP_UINT8("num-ss-bits", XilinxSPI, num_cs, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xilinx_spi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xilinx_spi_realize; + dc->reset = xlx_spi_reset; + device_class_set_props(dc, xilinx_spi_properties); + dc->vmsd = &vmstate_xilinx_spi; +} + +static const TypeInfo xilinx_spi_info = { + .name = TYPE_XILINX_SPI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XilinxSPI), + .class_init = xilinx_spi_class_init, +}; + +static void xilinx_spi_register_types(void) +{ + type_register_static(&xilinx_spi_info); +} + +type_init(xilinx_spi_register_types) diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c new file mode 100644 index 000000000..1e9dba203 --- /dev/null +++ b/hw/ssi/xilinx_spips.c @@ -0,0 +1,1501 @@ +/* + * QEMU model of the Xilinx Zynq SPI controller + * + * Copyright (c) 2012 Peter A. G. Crosthwaite + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/bitops.h" +#include "hw/ssi/xilinx_spips.h" +#include "qapi/error.h" +#include "hw/register.h" +#include "sysemu/dma.h" +#include "migration/blocker.h" +#include "migration/vmstate.h" + +#ifndef XILINX_SPIPS_ERR_DEBUG +#define XILINX_SPIPS_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(level, ...) do { \ + if (XILINX_SPIPS_ERR_DEBUG > (level)) { \ + fprintf(stderr, ": %s: ", __func__); \ + fprintf(stderr, ## __VA_ARGS__); \ + } \ +} while (0) + +/* config register */ +#define R_CONFIG (0x00 / 4) +#define IFMODE (1U << 31) +#define R_CONFIG_ENDIAN (1 << 26) +#define MODEFAIL_GEN_EN (1 << 17) +#define MAN_START_COM (1 << 16) +#define MAN_START_EN (1 << 15) +#define MANUAL_CS (1 << 14) +#define CS (0xF << 10) +#define CS_SHIFT (10) +#define PERI_SEL (1 << 9) +#define REF_CLK (1 << 8) +#define FIFO_WIDTH (3 << 6) +#define BAUD_RATE_DIV (7 << 3) +#define CLK_PH (1 << 2) +#define CLK_POL (1 << 1) +#define MODE_SEL (1 << 0) +#define R_CONFIG_RSVD (0x7bf40000) + +/* interrupt mechanism */ +#define R_INTR_STATUS (0x04 / 4) +#define R_INTR_STATUS_RESET (0x104) +#define R_INTR_EN (0x08 / 4) +#define R_INTR_DIS (0x0C / 4) +#define R_INTR_MASK (0x10 / 4) +#define IXR_TX_FIFO_UNDERFLOW (1 << 6) +/* Poll timeout not implemented */ +#define IXR_RX_FIFO_EMPTY (1 << 11) +#define IXR_GENERIC_FIFO_FULL (1 << 10) +#define IXR_GENERIC_FIFO_NOT_FULL (1 << 9) +#define IXR_TX_FIFO_EMPTY (1 << 8) +#define IXR_GENERIC_FIFO_EMPTY (1 << 7) +#define IXR_RX_FIFO_FULL (1 << 5) +#define IXR_RX_FIFO_NOT_EMPTY (1 << 4) +#define IXR_TX_FIFO_FULL (1 << 3) +#define IXR_TX_FIFO_NOT_FULL (1 << 2) +#define IXR_TX_FIFO_MODE_FAIL (1 << 1) +#define IXR_RX_FIFO_OVERFLOW (1 << 0) +#define IXR_ALL ((1 << 13) - 1) +#define GQSPI_IXR_MASK 0xFBE +#define IXR_SELF_CLEAR \ +(IXR_GENERIC_FIFO_EMPTY \ +| IXR_GENERIC_FIFO_FULL \ +| IXR_GENERIC_FIFO_NOT_FULL \ +| IXR_TX_FIFO_EMPTY \ +| IXR_TX_FIFO_FULL \ +| IXR_TX_FIFO_NOT_FULL \ +| IXR_RX_FIFO_EMPTY \ +| IXR_RX_FIFO_FULL \ +| IXR_RX_FIFO_NOT_EMPTY) + +#define R_EN (0x14 / 4) +#define R_DELAY (0x18 / 4) +#define R_TX_DATA (0x1C / 4) +#define R_RX_DATA (0x20 / 4) +#define R_SLAVE_IDLE_COUNT (0x24 / 4) +#define R_TX_THRES (0x28 / 4) +#define R_RX_THRES (0x2C / 4) +#define R_GPIO (0x30 / 4) +#define R_LPBK_DLY_ADJ (0x38 / 4) +#define R_LPBK_DLY_ADJ_RESET (0x33) +#define R_IOU_TAPDLY_BYPASS (0x3C / 4) +#define R_TXD1 (0x80 / 4) +#define R_TXD2 (0x84 / 4) +#define R_TXD3 (0x88 / 4) + +#define R_LQSPI_CFG (0xa0 / 4) +#define R_LQSPI_CFG_RESET 0x03A002EB +#define LQSPI_CFG_LQ_MODE (1U << 31) +#define LQSPI_CFG_TWO_MEM (1 << 30) +#define LQSPI_CFG_SEP_BUS (1 << 29) +#define LQSPI_CFG_U_PAGE (1 << 28) +#define LQSPI_CFG_ADDR4 (1 << 27) +#define LQSPI_CFG_MODE_EN (1 << 25) +#define LQSPI_CFG_MODE_WIDTH 8 +#define LQSPI_CFG_MODE_SHIFT 16 +#define LQSPI_CFG_DUMMY_WIDTH 3 +#define LQSPI_CFG_DUMMY_SHIFT 8 +#define LQSPI_CFG_INST_CODE 0xFF + +#define R_CMND (0xc0 / 4) + #define R_CMND_RXFIFO_DRAIN (1 << 19) + FIELD(CMND, PARTIAL_BYTE_LEN, 16, 3) +#define R_CMND_EXT_ADD (1 << 15) + FIELD(CMND, RX_DISCARD, 8, 7) + FIELD(CMND, DUMMY_CYCLES, 2, 6) +#define R_CMND_DMA_EN (1 << 1) +#define R_CMND_PUSH_WAIT (1 << 0) +#define R_TRANSFER_SIZE (0xc4 / 4) +#define R_LQSPI_STS (0xA4 / 4) +#define LQSPI_STS_WR_RECVD (1 << 1) + +#define R_DUMMY_CYCLE_EN (0xC8 / 4) +#define R_ECO (0xF8 / 4) +#define R_MOD_ID (0xFC / 4) + +#define R_GQSPI_SELECT (0x144 / 4) + FIELD(GQSPI_SELECT, GENERIC_QSPI_EN, 0, 1) +#define R_GQSPI_ISR (0x104 / 4) +#define R_GQSPI_IER (0x108 / 4) +#define R_GQSPI_IDR (0x10c / 4) +#define R_GQSPI_IMR (0x110 / 4) +#define R_GQSPI_IMR_RESET (0xfbe) +#define R_GQSPI_TX_THRESH (0x128 / 4) +#define R_GQSPI_RX_THRESH (0x12c / 4) +#define R_GQSPI_GPIO (0x130 / 4) +#define R_GQSPI_LPBK_DLY_ADJ (0x138 / 4) +#define R_GQSPI_LPBK_DLY_ADJ_RESET (0x33) +#define R_GQSPI_CNFG (0x100 / 4) + FIELD(GQSPI_CNFG, MODE_EN, 30, 2) + FIELD(GQSPI_CNFG, GEN_FIFO_START_MODE, 29, 1) + FIELD(GQSPI_CNFG, GEN_FIFO_START, 28, 1) + FIELD(GQSPI_CNFG, ENDIAN, 26, 1) + /* Poll timeout not implemented */ + FIELD(GQSPI_CNFG, EN_POLL_TIMEOUT, 20, 1) + /* QEMU doesnt care about any of these last three */ + FIELD(GQSPI_CNFG, BR, 3, 3) + FIELD(GQSPI_CNFG, CPH, 2, 1) + FIELD(GQSPI_CNFG, CPL, 1, 1) +#define R_GQSPI_GEN_FIFO (0x140 / 4) +#define R_GQSPI_TXD (0x11c / 4) +#define R_GQSPI_RXD (0x120 / 4) +#define R_GQSPI_FIFO_CTRL (0x14c / 4) + FIELD(GQSPI_FIFO_CTRL, RX_FIFO_RESET, 2, 1) + FIELD(GQSPI_FIFO_CTRL, TX_FIFO_RESET, 1, 1) + FIELD(GQSPI_FIFO_CTRL, GENERIC_FIFO_RESET, 0, 1) +#define R_GQSPI_GFIFO_THRESH (0x150 / 4) +#define R_GQSPI_DATA_STS (0x15c / 4) +/* + * We use the snapshot register to hold the core state for the currently + * or most recently executed command. So the generic fifo format is defined + * for the snapshot register + */ +#define R_GQSPI_GF_SNAPSHOT (0x160 / 4) + FIELD(GQSPI_GF_SNAPSHOT, POLL, 19, 1) + FIELD(GQSPI_GF_SNAPSHOT, STRIPE, 18, 1) + FIELD(GQSPI_GF_SNAPSHOT, RECIEVE, 17, 1) + FIELD(GQSPI_GF_SNAPSHOT, TRANSMIT, 16, 1) + FIELD(GQSPI_GF_SNAPSHOT, DATA_BUS_SELECT, 14, 2) + FIELD(GQSPI_GF_SNAPSHOT, CHIP_SELECT, 12, 2) + FIELD(GQSPI_GF_SNAPSHOT, SPI_MODE, 10, 2) + FIELD(GQSPI_GF_SNAPSHOT, EXPONENT, 9, 1) + FIELD(GQSPI_GF_SNAPSHOT, DATA_XFER, 8, 1) + FIELD(GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA, 0, 8) +#define R_GQSPI_MOD_ID (0x1fc / 4) +#define R_GQSPI_MOD_ID_RESET (0x10a0000) + +/* size of TXRX FIFOs */ +#define RXFF_A (128) +#define TXFF_A (128) + +#define RXFF_A_Q (64 * 4) +#define TXFF_A_Q (64 * 4) + +/* 16MB per linear region */ +#define LQSPI_ADDRESS_BITS 24 + +#define SNOOP_CHECKING 0xFF +#define SNOOP_ADDR 0xF0 +#define SNOOP_NONE 0xEE +#define SNOOP_STRIPING 0 + +#define MIN_NUM_BUSSES 1 +#define MAX_NUM_BUSSES 2 + +static inline int num_effective_busses(XilinxSPIPS *s) +{ + return (s->regs[R_LQSPI_CFG] & LQSPI_CFG_SEP_BUS && + s->regs[R_LQSPI_CFG] & LQSPI_CFG_TWO_MEM) ? s->num_busses : 1; +} + +static void xilinx_spips_update_cs(XilinxSPIPS *s, int field) +{ + int i; + + for (i = 0; i < s->num_cs * s->num_busses; i++) { + bool old_state = s->cs_lines_state[i]; + bool new_state = field & (1 << i); + + if (old_state != new_state) { + s->cs_lines_state[i] = new_state; + s->rx_discard = ARRAY_FIELD_EX32(s->regs, CMND, RX_DISCARD); + DB_PRINT_L(1, "%sselecting peripheral %d\n", + new_state ? "" : "de", i); + } + qemu_set_irq(s->cs_lines[i], !new_state); + } + if (!(field & ((1 << (s->num_cs * s->num_busses)) - 1))) { + s->snoop_state = SNOOP_CHECKING; + s->cmd_dummies = 0; + s->link_state = 1; + s->link_state_next = 1; + s->link_state_next_when = 0; + DB_PRINT_L(1, "moving to snoop check state\n"); + } +} + +static void xlnx_zynqmp_qspips_update_cs_lines(XlnxZynqMPQSPIPS *s) +{ + if (s->regs[R_GQSPI_GF_SNAPSHOT]) { + int field = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, CHIP_SELECT); + bool upper_cs_sel = field & (1 << 1); + bool lower_cs_sel = field & 1; + bool bus0_enabled; + bool bus1_enabled; + uint8_t buses; + int cs = 0; + + buses = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_BUS_SELECT); + bus0_enabled = buses & 1; + bus1_enabled = buses & (1 << 1); + + if (bus0_enabled && bus1_enabled) { + if (lower_cs_sel) { + cs |= 1; + } + if (upper_cs_sel) { + cs |= 1 << 3; + } + } else if (bus0_enabled) { + if (lower_cs_sel) { + cs |= 1; + } + if (upper_cs_sel) { + cs |= 1 << 1; + } + } else if (bus1_enabled) { + if (lower_cs_sel) { + cs |= 1 << 2; + } + if (upper_cs_sel) { + cs |= 1 << 3; + } + } + xilinx_spips_update_cs(XILINX_SPIPS(s), cs); + } +} + +static void xilinx_spips_update_cs_lines(XilinxSPIPS *s) +{ + int field = ~((s->regs[R_CONFIG] & CS) >> CS_SHIFT); + + /* In dual parallel, mirror low CS to both */ + if (num_effective_busses(s) == 2) { + /* Single bit chip-select for qspi */ + field &= 0x1; + field |= field << 3; + /* Dual stack U-Page */ + } else if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_TWO_MEM && + s->regs[R_LQSPI_STS] & LQSPI_CFG_U_PAGE) { + /* Single bit chip-select for qspi */ + field &= 0x1; + /* change from CS0 to CS1 */ + field <<= 1; + } + /* Auto CS */ + if (!(s->regs[R_CONFIG] & MANUAL_CS) && + fifo8_is_empty(&s->tx_fifo)) { + field = 0; + } + xilinx_spips_update_cs(s, field); +} + +static void xilinx_spips_update_ixr(XilinxSPIPS *s) +{ + if (!(s->regs[R_LQSPI_CFG] & LQSPI_CFG_LQ_MODE)) { + s->regs[R_INTR_STATUS] &= ~IXR_SELF_CLEAR; + s->regs[R_INTR_STATUS] |= + (fifo8_is_full(&s->rx_fifo) ? IXR_RX_FIFO_FULL : 0) | + (s->rx_fifo.num >= s->regs[R_RX_THRES] ? + IXR_RX_FIFO_NOT_EMPTY : 0) | + (fifo8_is_full(&s->tx_fifo) ? IXR_TX_FIFO_FULL : 0) | + (fifo8_is_empty(&s->tx_fifo) ? IXR_TX_FIFO_EMPTY : 0) | + (s->tx_fifo.num < s->regs[R_TX_THRES] ? IXR_TX_FIFO_NOT_FULL : 0); + } + int new_irqline = !!(s->regs[R_INTR_MASK] & s->regs[R_INTR_STATUS] & + IXR_ALL); + if (new_irqline != s->irqline) { + s->irqline = new_irqline; + qemu_set_irq(s->irq, s->irqline); + } +} + +static void xlnx_zynqmp_qspips_update_ixr(XlnxZynqMPQSPIPS *s) +{ + uint32_t gqspi_int; + int new_irqline; + + s->regs[R_GQSPI_ISR] &= ~IXR_SELF_CLEAR; + s->regs[R_GQSPI_ISR] |= + (fifo32_is_empty(&s->fifo_g) ? IXR_GENERIC_FIFO_EMPTY : 0) | + (fifo32_is_full(&s->fifo_g) ? IXR_GENERIC_FIFO_FULL : 0) | + (s->fifo_g.fifo.num < s->regs[R_GQSPI_GFIFO_THRESH] ? + IXR_GENERIC_FIFO_NOT_FULL : 0) | + (fifo8_is_empty(&s->rx_fifo_g) ? IXR_RX_FIFO_EMPTY : 0) | + (fifo8_is_full(&s->rx_fifo_g) ? IXR_RX_FIFO_FULL : 0) | + (s->rx_fifo_g.num >= s->regs[R_GQSPI_RX_THRESH] ? + IXR_RX_FIFO_NOT_EMPTY : 0) | + (fifo8_is_empty(&s->tx_fifo_g) ? IXR_TX_FIFO_EMPTY : 0) | + (fifo8_is_full(&s->tx_fifo_g) ? IXR_TX_FIFO_FULL : 0) | + (s->tx_fifo_g.num < s->regs[R_GQSPI_TX_THRESH] ? + IXR_TX_FIFO_NOT_FULL : 0); + + /* GQSPI Interrupt Trigger Status */ + gqspi_int = (~s->regs[R_GQSPI_IMR]) & s->regs[R_GQSPI_ISR] & GQSPI_IXR_MASK; + new_irqline = !!(gqspi_int & IXR_ALL); + + /* drive external interrupt pin */ + if (new_irqline != s->gqspi_irqline) { + s->gqspi_irqline = new_irqline; + qemu_set_irq(XILINX_SPIPS(s)->irq, s->gqspi_irqline); + } +} + +static void xilinx_spips_reset(DeviceState *d) +{ + XilinxSPIPS *s = XILINX_SPIPS(d); + + memset(s->regs, 0, sizeof(s->regs)); + + fifo8_reset(&s->rx_fifo); + fifo8_reset(&s->rx_fifo); + /* non zero resets */ + s->regs[R_CONFIG] |= MODEFAIL_GEN_EN; + s->regs[R_SLAVE_IDLE_COUNT] = 0xFF; + s->regs[R_TX_THRES] = 1; + s->regs[R_RX_THRES] = 1; + /* FIXME: move magic number definition somewhere sensible */ + s->regs[R_MOD_ID] = 0x01090106; + s->regs[R_LQSPI_CFG] = R_LQSPI_CFG_RESET; + s->link_state = 1; + s->link_state_next = 1; + s->link_state_next_when = 0; + s->snoop_state = SNOOP_CHECKING; + s->cmd_dummies = 0; + s->man_start_com = false; + xilinx_spips_update_ixr(s); + xilinx_spips_update_cs_lines(s); +} + +static void xlnx_zynqmp_qspips_reset(DeviceState *d) +{ + XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(d); + + xilinx_spips_reset(d); + + memset(s->regs, 0, sizeof(s->regs)); + + fifo8_reset(&s->rx_fifo_g); + fifo8_reset(&s->rx_fifo_g); + fifo32_reset(&s->fifo_g); + s->regs[R_INTR_STATUS] = R_INTR_STATUS_RESET; + s->regs[R_GPIO] = 1; + s->regs[R_LPBK_DLY_ADJ] = R_LPBK_DLY_ADJ_RESET; + s->regs[R_GQSPI_GFIFO_THRESH] = 0x10; + s->regs[R_MOD_ID] = 0x01090101; + s->regs[R_GQSPI_IMR] = R_GQSPI_IMR_RESET; + s->regs[R_GQSPI_TX_THRESH] = 1; + s->regs[R_GQSPI_RX_THRESH] = 1; + s->regs[R_GQSPI_GPIO] = 1; + s->regs[R_GQSPI_LPBK_DLY_ADJ] = R_GQSPI_LPBK_DLY_ADJ_RESET; + s->regs[R_GQSPI_MOD_ID] = R_GQSPI_MOD_ID_RESET; + s->man_start_com_g = false; + s->gqspi_irqline = 0; + xlnx_zynqmp_qspips_update_ixr(s); +} + +/* + * N way (num) in place bit striper. Lay out row wise bits (MSB to LSB) + * column wise (from element 0 to N-1). num is the length of x, and dir + * reverses the direction of the transform. Best illustrated by example: + * Each digit in the below array is a single bit (num == 3): + * + * {{ 76543210, } ----- stripe (dir == false) -----> {{ 741gdaFC, } + * { hgfedcba, } { 630fcHEB, } + * { HGFEDCBA, }} <---- upstripe (dir == true) ----- { 52hebGDA, }} + */ + +static inline void stripe8(uint8_t *x, int num, bool dir) +{ + uint8_t r[MAX_NUM_BUSSES]; + int idx[2] = {0, 0}; + int bit[2] = {0, 7}; + int d = dir; + + assert(num <= MAX_NUM_BUSSES); + memset(r, 0, sizeof(uint8_t) * num); + + for (idx[0] = 0; idx[0] < num; ++idx[0]) { + for (bit[0] = 7; bit[0] >= 0; bit[0]--) { + r[idx[!d]] |= x[idx[d]] & 1 << bit[d] ? 1 << bit[!d] : 0; + idx[1] = (idx[1] + 1) % num; + if (!idx[1]) { + bit[1]--; + } + } + } + memcpy(x, r, sizeof(uint8_t) * num); +} + +static void xlnx_zynqmp_qspips_flush_fifo_g(XlnxZynqMPQSPIPS *s) +{ + while (s->regs[R_GQSPI_DATA_STS] || !fifo32_is_empty(&s->fifo_g)) { + uint8_t tx_rx[2] = { 0 }; + int num_stripes = 1; + uint8_t busses; + int i; + + if (!s->regs[R_GQSPI_DATA_STS]) { + uint8_t imm; + + s->regs[R_GQSPI_GF_SNAPSHOT] = fifo32_pop(&s->fifo_g); + DB_PRINT_L(0, "GQSPI command: %x\n", s->regs[R_GQSPI_GF_SNAPSHOT]); + if (!s->regs[R_GQSPI_GF_SNAPSHOT]) { + DB_PRINT_L(0, "Dummy GQSPI Delay Command Entry, Do nothing"); + continue; + } + xlnx_zynqmp_qspips_update_cs_lines(s); + + imm = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA); + if (!ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_XFER)) { + /* immedate transfer */ + if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT) || + ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE)) { + s->regs[R_GQSPI_DATA_STS] = 1; + /* CS setup/hold - do nothing */ + } else { + s->regs[R_GQSPI_DATA_STS] = 0; + } + } else if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, EXPONENT)) { + if (imm > 31) { + qemu_log_mask(LOG_UNIMP, "QSPI exponential transfer too" + " long - 2 ^ %" PRId8 " requested\n", imm); + } + s->regs[R_GQSPI_DATA_STS] = 1ul << imm; + } else { + s->regs[R_GQSPI_DATA_STS] = imm; + } + } + /* Zero length transfer check */ + if (!s->regs[R_GQSPI_DATA_STS]) { + continue; + } + if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE) && + fifo8_is_full(&s->rx_fifo_g)) { + /* No space in RX fifo for transfer - try again later */ + return; + } + if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, STRIPE) && + (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT) || + ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE))) { + num_stripes = 2; + } + if (!ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_XFER)) { + tx_rx[0] = ARRAY_FIELD_EX32(s->regs, + GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA); + } else if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT)) { + for (i = 0; i < num_stripes; ++i) { + if (!fifo8_is_empty(&s->tx_fifo_g)) { + tx_rx[i] = fifo8_pop(&s->tx_fifo_g); + s->tx_fifo_g_align++; + } else { + return; + } + } + } + if (num_stripes == 1) { + /* mirror */ + tx_rx[1] = tx_rx[0]; + } + busses = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_BUS_SELECT); + for (i = 0; i < 2; ++i) { + DB_PRINT_L(1, "bus %d tx = %02x\n", i, tx_rx[i]); + tx_rx[i] = ssi_transfer(XILINX_SPIPS(s)->spi[i], tx_rx[i]); + DB_PRINT_L(1, "bus %d rx = %02x\n", i, tx_rx[i]); + } + if (s->regs[R_GQSPI_DATA_STS] > 1 && + busses == 0x3 && num_stripes == 2) { + s->regs[R_GQSPI_DATA_STS] -= 2; + } else if (s->regs[R_GQSPI_DATA_STS] > 0) { + s->regs[R_GQSPI_DATA_STS]--; + } + if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE)) { + for (i = 0; i < 2; ++i) { + if (busses & (1 << i)) { + DB_PRINT_L(1, "bus %d push_byte = %02x\n", i, tx_rx[i]); + fifo8_push(&s->rx_fifo_g, tx_rx[i]); + s->rx_fifo_g_align++; + } + } + } + if (!s->regs[R_GQSPI_DATA_STS]) { + for (; s->tx_fifo_g_align % 4; s->tx_fifo_g_align++) { + fifo8_pop(&s->tx_fifo_g); + } + for (; s->rx_fifo_g_align % 4; s->rx_fifo_g_align++) { + fifo8_push(&s->rx_fifo_g, 0); + } + } + } +} + +static int xilinx_spips_num_dummies(XilinxQSPIPS *qs, uint8_t command) +{ + if (!qs) { + /* The SPI device is not a QSPI device */ + return -1; + } + + switch (command) { /* check for dummies */ + case READ: /* no dummy bytes/cycles */ + case PP: + case DPP: + case QPP: + case READ_4: + case PP_4: + case QPP_4: + return 0; + case FAST_READ: + case DOR: + case QOR: + case FAST_READ_4: + case DOR_4: + case QOR_4: + return 1; + case DIOR: + case DIOR_4: + return 2; + case QIOR: + case QIOR_4: + return 4; + default: + return -1; + } +} + +static inline uint8_t get_addr_length(XilinxSPIPS *s, uint8_t cmd) +{ + switch (cmd) { + case PP_4: + case QPP_4: + case READ_4: + case QIOR_4: + case FAST_READ_4: + case DOR_4: + case QOR_4: + case DIOR_4: + return 4; + default: + return (s->regs[R_CMND] & R_CMND_EXT_ADD) ? 4 : 3; + } +} + +static void xilinx_spips_flush_txfifo(XilinxSPIPS *s) +{ + int debug_level = 0; + XilinxQSPIPS *q = (XilinxQSPIPS *) object_dynamic_cast(OBJECT(s), + TYPE_XILINX_QSPIPS); + + for (;;) { + int i; + uint8_t tx = 0; + uint8_t tx_rx[MAX_NUM_BUSSES] = { 0 }; + uint8_t dummy_cycles = 0; + uint8_t addr_length; + + if (fifo8_is_empty(&s->tx_fifo)) { + xilinx_spips_update_ixr(s); + return; + } else if (s->snoop_state == SNOOP_STRIPING || + s->snoop_state == SNOOP_NONE) { + for (i = 0; i < num_effective_busses(s); ++i) { + tx_rx[i] = fifo8_pop(&s->tx_fifo); + } + stripe8(tx_rx, num_effective_busses(s), false); + } else if (s->snoop_state >= SNOOP_ADDR) { + tx = fifo8_pop(&s->tx_fifo); + for (i = 0; i < num_effective_busses(s); ++i) { + tx_rx[i] = tx; + } + } else { + /* + * Extract a dummy byte and generate dummy cycles according to the + * link state + */ + tx = fifo8_pop(&s->tx_fifo); + dummy_cycles = 8 / s->link_state; + } + + for (i = 0; i < num_effective_busses(s); ++i) { + int bus = num_effective_busses(s) - 1 - i; + if (dummy_cycles) { + int d; + for (d = 0; d < dummy_cycles; ++d) { + tx_rx[0] = ssi_transfer(s->spi[bus], (uint32_t)tx_rx[0]); + } + } else { + DB_PRINT_L(debug_level, "tx = %02x\n", tx_rx[i]); + tx_rx[i] = ssi_transfer(s->spi[bus], (uint32_t)tx_rx[i]); + DB_PRINT_L(debug_level, "rx = %02x\n", tx_rx[i]); + } + } + + if (s->regs[R_CMND] & R_CMND_RXFIFO_DRAIN) { + DB_PRINT_L(debug_level, "dircarding drained rx byte\n"); + /* Do nothing */ + } else if (s->rx_discard) { + DB_PRINT_L(debug_level, "dircarding discarded rx byte\n"); + s->rx_discard -= 8 / s->link_state; + } else if (fifo8_is_full(&s->rx_fifo)) { + s->regs[R_INTR_STATUS] |= IXR_RX_FIFO_OVERFLOW; + DB_PRINT_L(0, "rx FIFO overflow"); + } else if (s->snoop_state == SNOOP_STRIPING) { + stripe8(tx_rx, num_effective_busses(s), true); + for (i = 0; i < num_effective_busses(s); ++i) { + fifo8_push(&s->rx_fifo, (uint8_t)tx_rx[i]); + DB_PRINT_L(debug_level, "pushing striped rx byte\n"); + } + } else { + DB_PRINT_L(debug_level, "pushing unstriped rx byte\n"); + fifo8_push(&s->rx_fifo, (uint8_t)tx_rx[0]); + } + + if (s->link_state_next_when) { + s->link_state_next_when--; + if (!s->link_state_next_when) { + s->link_state = s->link_state_next; + } + } + + DB_PRINT_L(debug_level, "initial snoop state: %x\n", + (unsigned)s->snoop_state); + switch (s->snoop_state) { + case (SNOOP_CHECKING): + /* Store the count of dummy bytes in the txfifo */ + s->cmd_dummies = xilinx_spips_num_dummies(q, tx); + addr_length = get_addr_length(s, tx); + if (s->cmd_dummies < 0) { + s->snoop_state = SNOOP_NONE; + } else { + s->snoop_state = SNOOP_ADDR + addr_length - 1; + } + switch (tx) { + case DPP: + case DOR: + case DOR_4: + s->link_state_next = 2; + s->link_state_next_when = addr_length + s->cmd_dummies; + break; + case QPP: + case QPP_4: + case QOR: + case QOR_4: + s->link_state_next = 4; + s->link_state_next_when = addr_length + s->cmd_dummies; + break; + case DIOR: + case DIOR_4: + s->link_state = 2; + break; + case QIOR: + case QIOR_4: + s->link_state = 4; + break; + } + break; + case (SNOOP_ADDR): + /* + * Address has been transmitted, transmit dummy cycles now if needed + */ + if (s->cmd_dummies < 0) { + s->snoop_state = SNOOP_NONE; + } else { + s->snoop_state = s->cmd_dummies; + } + break; + case (SNOOP_STRIPING): + case (SNOOP_NONE): + /* Once we hit the boring stuff - squelch debug noise */ + if (!debug_level) { + DB_PRINT_L(0, "squelching debug info ....\n"); + debug_level = 1; + } + break; + default: + s->snoop_state--; + } + DB_PRINT_L(debug_level, "final snoop state: %x\n", + (unsigned)s->snoop_state); + } +} + +static inline void tx_data_bytes(Fifo8 *fifo, uint32_t value, int num, bool be) +{ + int i; + for (i = 0; i < num && !fifo8_is_full(fifo); ++i) { + if (be) { + fifo8_push(fifo, (uint8_t)(value >> 24)); + value <<= 8; + } else { + fifo8_push(fifo, (uint8_t)value); + value >>= 8; + } + } +} + +static void xilinx_spips_check_zero_pump(XilinxSPIPS *s) +{ + if (!s->regs[R_TRANSFER_SIZE]) { + return; + } + if (!fifo8_is_empty(&s->tx_fifo) && s->regs[R_CMND] & R_CMND_PUSH_WAIT) { + return; + } + /* + * The zero pump must never fill tx fifo such that rx overflow is + * possible + */ + while (s->regs[R_TRANSFER_SIZE] && + s->rx_fifo.num + s->tx_fifo.num < RXFF_A_Q - 3) { + /* endianess just doesn't matter when zero pumping */ + tx_data_bytes(&s->tx_fifo, 0, 4, false); + s->regs[R_TRANSFER_SIZE] &= ~0x03ull; + s->regs[R_TRANSFER_SIZE] -= 4; + } +} + +static void xilinx_spips_check_flush(XilinxSPIPS *s) +{ + if (s->man_start_com || + (!fifo8_is_empty(&s->tx_fifo) && + !(s->regs[R_CONFIG] & MAN_START_EN))) { + xilinx_spips_check_zero_pump(s); + xilinx_spips_flush_txfifo(s); + } + if (fifo8_is_empty(&s->tx_fifo) && !s->regs[R_TRANSFER_SIZE]) { + s->man_start_com = false; + } + xilinx_spips_update_ixr(s); +} + +static void xlnx_zynqmp_qspips_check_flush(XlnxZynqMPQSPIPS *s) +{ + bool gqspi_has_work = s->regs[R_GQSPI_DATA_STS] || + !fifo32_is_empty(&s->fifo_g); + + if (ARRAY_FIELD_EX32(s->regs, GQSPI_SELECT, GENERIC_QSPI_EN)) { + if (s->man_start_com_g || (gqspi_has_work && + !ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, GEN_FIFO_START_MODE))) { + xlnx_zynqmp_qspips_flush_fifo_g(s); + } + } else { + xilinx_spips_check_flush(XILINX_SPIPS(s)); + } + if (!gqspi_has_work) { + s->man_start_com_g = false; + } + xlnx_zynqmp_qspips_update_ixr(s); +} + +static inline int rx_data_bytes(Fifo8 *fifo, uint8_t *value, int max) +{ + int i; + + for (i = 0; i < max && !fifo8_is_empty(fifo); ++i) { + value[i] = fifo8_pop(fifo); + } + return max - i; +} + +static const void *pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num) +{ + void *ret; + + if (max == 0 || max > fifo->num) { + abort(); + } + *num = MIN(fifo->capacity - fifo->head, max); + ret = &fifo->data[fifo->head]; + fifo->head += *num; + fifo->head %= fifo->capacity; + fifo->num -= *num; + return ret; +} + +static void xlnx_zynqmp_qspips_notify(void *opaque) +{ + XlnxZynqMPQSPIPS *rq = XLNX_ZYNQMP_QSPIPS(opaque); + XilinxSPIPS *s = XILINX_SPIPS(rq); + Fifo8 *recv_fifo; + + if (ARRAY_FIELD_EX32(rq->regs, GQSPI_SELECT, GENERIC_QSPI_EN)) { + if (!(ARRAY_FIELD_EX32(rq->regs, GQSPI_CNFG, MODE_EN) == 2)) { + return; + } + recv_fifo = &rq->rx_fifo_g; + } else { + if (!(s->regs[R_CMND] & R_CMND_DMA_EN)) { + return; + } + recv_fifo = &s->rx_fifo; + } + while (recv_fifo->num >= 4 + && stream_can_push(rq->dma, xlnx_zynqmp_qspips_notify, rq)) + { + size_t ret; + uint32_t num; + const void *rxd; + int len; + + len = recv_fifo->num >= rq->dma_burst_size ? rq->dma_burst_size : + recv_fifo->num; + rxd = pop_buf(recv_fifo, len, &num); + + memcpy(rq->dma_buf, rxd, num); + + ret = stream_push(rq->dma, rq->dma_buf, num, false); + assert(ret == num); + xlnx_zynqmp_qspips_check_flush(rq); + } +} + +static uint64_t xilinx_spips_read(void *opaque, hwaddr addr, + unsigned size) +{ + XilinxSPIPS *s = opaque; + uint32_t mask = ~0; + uint32_t ret; + uint8_t rx_buf[4]; + int shortfall; + + addr >>= 2; + switch (addr) { + case R_CONFIG: + mask = ~(R_CONFIG_RSVD | MAN_START_COM); + break; + case R_INTR_STATUS: + ret = s->regs[addr] & IXR_ALL; + s->regs[addr] = 0; + DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret); + xilinx_spips_update_ixr(s); + return ret; + case R_INTR_MASK: + mask = IXR_ALL; + break; + case R_EN: + mask = 0x1; + break; + case R_SLAVE_IDLE_COUNT: + mask = 0xFF; + break; + case R_MOD_ID: + mask = 0x01FFFFFF; + break; + case R_INTR_EN: + case R_INTR_DIS: + case R_TX_DATA: + mask = 0; + break; + case R_RX_DATA: + memset(rx_buf, 0, sizeof(rx_buf)); + shortfall = rx_data_bytes(&s->rx_fifo, rx_buf, s->num_txrx_bytes); + ret = s->regs[R_CONFIG] & R_CONFIG_ENDIAN ? + cpu_to_be32(*(uint32_t *)rx_buf) : + cpu_to_le32(*(uint32_t *)rx_buf); + if (!(s->regs[R_CONFIG] & R_CONFIG_ENDIAN)) { + ret <<= 8 * shortfall; + } + DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret); + xilinx_spips_check_flush(s); + xilinx_spips_update_ixr(s); + return ret; + } + DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4, + s->regs[addr] & mask); + return s->regs[addr] & mask; + +} + +static uint64_t xlnx_zynqmp_qspips_read(void *opaque, + hwaddr addr, unsigned size) +{ + XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(opaque); + uint32_t reg = addr / 4; + uint32_t ret; + uint8_t rx_buf[4]; + int shortfall; + + if (reg <= R_MOD_ID) { + return xilinx_spips_read(opaque, addr, size); + } else { + switch (reg) { + case R_GQSPI_RXD: + if (fifo8_is_empty(&s->rx_fifo_g)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Read from empty GQSPI RX FIFO\n"); + return 0; + } + memset(rx_buf, 0, sizeof(rx_buf)); + shortfall = rx_data_bytes(&s->rx_fifo_g, rx_buf, + XILINX_SPIPS(s)->num_txrx_bytes); + ret = ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, ENDIAN) ? + cpu_to_be32(*(uint32_t *)rx_buf) : + cpu_to_le32(*(uint32_t *)rx_buf); + if (!ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, ENDIAN)) { + ret <<= 8 * shortfall; + } + xlnx_zynqmp_qspips_check_flush(s); + xlnx_zynqmp_qspips_update_ixr(s); + return ret; + default: + return s->regs[reg]; + } + } +} + +static void xilinx_spips_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + int mask = ~0; + XilinxSPIPS *s = opaque; + bool try_flush = true; + + DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr, (unsigned)value); + addr >>= 2; + switch (addr) { + case R_CONFIG: + mask = ~(R_CONFIG_RSVD | MAN_START_COM); + if ((value & MAN_START_COM) && (s->regs[R_CONFIG] & MAN_START_EN)) { + s->man_start_com = true; + } + break; + case R_INTR_STATUS: + mask = IXR_ALL; + s->regs[R_INTR_STATUS] &= ~(mask & value); + goto no_reg_update; + case R_INTR_DIS: + mask = IXR_ALL; + s->regs[R_INTR_MASK] &= ~(mask & value); + goto no_reg_update; + case R_INTR_EN: + mask = IXR_ALL; + s->regs[R_INTR_MASK] |= mask & value; + goto no_reg_update; + case R_EN: + mask = 0x1; + break; + case R_SLAVE_IDLE_COUNT: + mask = 0xFF; + break; + case R_RX_DATA: + case R_INTR_MASK: + case R_MOD_ID: + mask = 0; + break; + case R_TX_DATA: + tx_data_bytes(&s->tx_fifo, (uint32_t)value, s->num_txrx_bytes, + s->regs[R_CONFIG] & R_CONFIG_ENDIAN); + goto no_reg_update; + case R_TXD1: + tx_data_bytes(&s->tx_fifo, (uint32_t)value, 1, + s->regs[R_CONFIG] & R_CONFIG_ENDIAN); + goto no_reg_update; + case R_TXD2: + tx_data_bytes(&s->tx_fifo, (uint32_t)value, 2, + s->regs[R_CONFIG] & R_CONFIG_ENDIAN); + goto no_reg_update; + case R_TXD3: + tx_data_bytes(&s->tx_fifo, (uint32_t)value, 3, + s->regs[R_CONFIG] & R_CONFIG_ENDIAN); + goto no_reg_update; + /* Skip SPI bus update for below registers writes */ + case R_GPIO: + case R_LPBK_DLY_ADJ: + case R_IOU_TAPDLY_BYPASS: + case R_DUMMY_CYCLE_EN: + case R_ECO: + try_flush = false; + break; + } + s->regs[addr] = (s->regs[addr] & ~mask) | (value & mask); +no_reg_update: + if (try_flush) { + xilinx_spips_update_cs_lines(s); + xilinx_spips_check_flush(s); + xilinx_spips_update_cs_lines(s); + xilinx_spips_update_ixr(s); + } +} + +static const MemoryRegionOps spips_ops = { + .read = xilinx_spips_read, + .write = xilinx_spips_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void xilinx_qspips_invalidate_mmio_ptr(XilinxQSPIPS *q) +{ + q->lqspi_cached_addr = ~0ULL; +} + +static void xilinx_qspips_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + XilinxQSPIPS *q = XILINX_QSPIPS(opaque); + XilinxSPIPS *s = XILINX_SPIPS(opaque); + + xilinx_spips_write(opaque, addr, value, size); + addr >>= 2; + + if (addr == R_LQSPI_CFG) { + xilinx_qspips_invalidate_mmio_ptr(q); + } + if (s->regs[R_CMND] & R_CMND_RXFIFO_DRAIN) { + fifo8_reset(&s->rx_fifo); + } +} + +static void xlnx_zynqmp_qspips_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(opaque); + uint32_t reg = addr / 4; + + if (reg <= R_MOD_ID) { + xilinx_qspips_write(opaque, addr, value, size); + } else { + switch (reg) { + case R_GQSPI_CNFG: + if (FIELD_EX32(value, GQSPI_CNFG, GEN_FIFO_START) && + ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, GEN_FIFO_START_MODE)) { + s->man_start_com_g = true; + } + s->regs[reg] = value & ~(R_GQSPI_CNFG_GEN_FIFO_START_MASK); + break; + case R_GQSPI_GEN_FIFO: + if (!fifo32_is_full(&s->fifo_g)) { + fifo32_push(&s->fifo_g, value); + } + break; + case R_GQSPI_TXD: + tx_data_bytes(&s->tx_fifo_g, (uint32_t)value, 4, + ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, ENDIAN)); + break; + case R_GQSPI_FIFO_CTRL: + if (FIELD_EX32(value, GQSPI_FIFO_CTRL, GENERIC_FIFO_RESET)) { + fifo32_reset(&s->fifo_g); + } + if (FIELD_EX32(value, GQSPI_FIFO_CTRL, TX_FIFO_RESET)) { + fifo8_reset(&s->tx_fifo_g); + } + if (FIELD_EX32(value, GQSPI_FIFO_CTRL, RX_FIFO_RESET)) { + fifo8_reset(&s->rx_fifo_g); + } + break; + case R_GQSPI_IDR: + s->regs[R_GQSPI_IMR] |= value; + break; + case R_GQSPI_IER: + s->regs[R_GQSPI_IMR] &= ~value; + break; + case R_GQSPI_ISR: + s->regs[R_GQSPI_ISR] &= ~value; + break; + case R_GQSPI_IMR: + case R_GQSPI_RXD: + case R_GQSPI_GF_SNAPSHOT: + case R_GQSPI_MOD_ID: + break; + default: + s->regs[reg] = value; + break; + } + xlnx_zynqmp_qspips_update_cs_lines(s); + xlnx_zynqmp_qspips_check_flush(s); + xlnx_zynqmp_qspips_update_cs_lines(s); + xlnx_zynqmp_qspips_update_ixr(s); + } + xlnx_zynqmp_qspips_notify(s); +} + +static const MemoryRegionOps qspips_ops = { + .read = xilinx_spips_read, + .write = xilinx_qspips_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps xlnx_zynqmp_qspips_ops = { + .read = xlnx_zynqmp_qspips_read, + .write = xlnx_zynqmp_qspips_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +#define LQSPI_CACHE_SIZE 1024 + +static void lqspi_load_cache(void *opaque, hwaddr addr) +{ + XilinxQSPIPS *q = opaque; + XilinxSPIPS *s = opaque; + int i; + int flash_addr = ((addr & ~(LQSPI_CACHE_SIZE - 1)) + / num_effective_busses(s)); + int peripheral = flash_addr >> LQSPI_ADDRESS_BITS; + int cache_entry = 0; + uint32_t u_page_save = s->regs[R_LQSPI_STS] & ~LQSPI_CFG_U_PAGE; + + if (addr < q->lqspi_cached_addr || + addr > q->lqspi_cached_addr + LQSPI_CACHE_SIZE - 4) { + xilinx_qspips_invalidate_mmio_ptr(q); + s->regs[R_LQSPI_STS] &= ~LQSPI_CFG_U_PAGE; + s->regs[R_LQSPI_STS] |= peripheral ? LQSPI_CFG_U_PAGE : 0; + + DB_PRINT_L(0, "config reg status: %08x\n", s->regs[R_LQSPI_CFG]); + + fifo8_reset(&s->tx_fifo); + fifo8_reset(&s->rx_fifo); + + /* instruction */ + DB_PRINT_L(0, "pushing read instruction: %02x\n", + (unsigned)(uint8_t)(s->regs[R_LQSPI_CFG] & + LQSPI_CFG_INST_CODE)); + fifo8_push(&s->tx_fifo, s->regs[R_LQSPI_CFG] & LQSPI_CFG_INST_CODE); + /* read address */ + DB_PRINT_L(0, "pushing read address %06x\n", flash_addr); + if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_ADDR4) { + fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 24)); + } + fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 16)); + fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 8)); + fifo8_push(&s->tx_fifo, (uint8_t)flash_addr); + /* mode bits */ + if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_MODE_EN) { + fifo8_push(&s->tx_fifo, extract32(s->regs[R_LQSPI_CFG], + LQSPI_CFG_MODE_SHIFT, + LQSPI_CFG_MODE_WIDTH)); + } + /* dummy bytes */ + for (i = 0; i < (extract32(s->regs[R_LQSPI_CFG], LQSPI_CFG_DUMMY_SHIFT, + LQSPI_CFG_DUMMY_WIDTH)); ++i) { + DB_PRINT_L(0, "pushing dummy byte\n"); + fifo8_push(&s->tx_fifo, 0); + } + xilinx_spips_update_cs_lines(s); + xilinx_spips_flush_txfifo(s); + fifo8_reset(&s->rx_fifo); + + DB_PRINT_L(0, "starting QSPI data read\n"); + + while (cache_entry < LQSPI_CACHE_SIZE) { + for (i = 0; i < 64; ++i) { + tx_data_bytes(&s->tx_fifo, 0, 1, false); + } + xilinx_spips_flush_txfifo(s); + for (i = 0; i < 64; ++i) { + rx_data_bytes(&s->rx_fifo, &q->lqspi_buf[cache_entry++], 1); + } + } + + s->regs[R_LQSPI_STS] &= ~LQSPI_CFG_U_PAGE; + s->regs[R_LQSPI_STS] |= u_page_save; + xilinx_spips_update_cs_lines(s); + + q->lqspi_cached_addr = flash_addr * num_effective_busses(s); + } +} + +static MemTxResult lqspi_read(void *opaque, hwaddr addr, uint64_t *value, + unsigned size, MemTxAttrs attrs) +{ + XilinxQSPIPS *q = XILINX_QSPIPS(opaque); + + if (addr >= q->lqspi_cached_addr && + addr <= q->lqspi_cached_addr + LQSPI_CACHE_SIZE - 4) { + uint8_t *retp = &q->lqspi_buf[addr - q->lqspi_cached_addr]; + *value = cpu_to_le32(*(uint32_t *)retp); + DB_PRINT_L(1, "addr: %08" HWADDR_PRIx ", data: %08" PRIx64 "\n", + addr, *value); + return MEMTX_OK; + } + + lqspi_load_cache(opaque, addr); + return lqspi_read(opaque, addr, value, size, attrs); +} + +static MemTxResult lqspi_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size, MemTxAttrs attrs) +{ + /* + * From UG1085, Chapter 24 (Quad-SPI controllers): + * - Writes are ignored + * - AXI writes generate an external AXI slave error (SLVERR) + */ + qemu_log_mask(LOG_GUEST_ERROR, "%s Unexpected %u-bit access to 0x%" PRIx64 + " (value: 0x%" PRIx64 "\n", + __func__, size << 3, offset, value); + + return MEMTX_ERROR; +} + +static const MemoryRegionOps lqspi_ops = { + .read_with_attrs = lqspi_read, + .write_with_attrs = lqspi_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .valid = { + .min_access_size = 1, + .max_access_size = 4 + } +}; + +static void xilinx_spips_realize(DeviceState *dev, Error **errp) +{ + XilinxSPIPS *s = XILINX_SPIPS(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + XilinxSPIPSClass *xsc = XILINX_SPIPS_GET_CLASS(s); + int i; + + DB_PRINT_L(0, "realized spips\n"); + + if (s->num_busses > MAX_NUM_BUSSES) { + error_setg(errp, + "requested number of SPI busses %u exceeds maximum %d", + s->num_busses, MAX_NUM_BUSSES); + return; + } + if (s->num_busses < MIN_NUM_BUSSES) { + error_setg(errp, + "requested number of SPI busses %u is below minimum %d", + s->num_busses, MIN_NUM_BUSSES); + return; + } + + s->spi = g_new(SSIBus *, s->num_busses); + for (i = 0; i < s->num_busses; ++i) { + char bus_name[16]; + snprintf(bus_name, 16, "spi%d", i); + s->spi[i] = ssi_create_bus(dev, bus_name); + } + + s->cs_lines = g_new0(qemu_irq, s->num_cs * s->num_busses); + s->cs_lines_state = g_new0(bool, s->num_cs * s->num_busses); + + sysbus_init_irq(sbd, &s->irq); + for (i = 0; i < s->num_cs * s->num_busses; ++i) { + sysbus_init_irq(sbd, &s->cs_lines[i]); + } + + memory_region_init_io(&s->iomem, OBJECT(s), xsc->reg_ops, s, + "spi", XLNX_ZYNQMP_SPIPS_R_MAX * 4); + sysbus_init_mmio(sbd, &s->iomem); + + s->irqline = -1; + + fifo8_create(&s->rx_fifo, xsc->rx_fifo_size); + fifo8_create(&s->tx_fifo, xsc->tx_fifo_size); +} + +static void xilinx_qspips_realize(DeviceState *dev, Error **errp) +{ + XilinxSPIPS *s = XILINX_SPIPS(dev); + XilinxQSPIPS *q = XILINX_QSPIPS(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + DB_PRINT_L(0, "realized qspips\n"); + + s->num_busses = 2; + s->num_cs = 2; + s->num_txrx_bytes = 4; + + xilinx_spips_realize(dev, errp); + memory_region_init_io(&s->mmlqspi, OBJECT(s), &lqspi_ops, s, "lqspi", + (1 << LQSPI_ADDRESS_BITS) * 2); + sysbus_init_mmio(sbd, &s->mmlqspi); + + q->lqspi_cached_addr = ~0ULL; +} + +static void xlnx_zynqmp_qspips_realize(DeviceState *dev, Error **errp) +{ + XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(dev); + XilinxSPIPSClass *xsc = XILINX_SPIPS_GET_CLASS(s); + + if (s->dma_burst_size > QSPI_DMA_MAX_BURST_SIZE) { + error_setg(errp, + "qspi dma burst size %u exceeds maximum limit %d", + s->dma_burst_size, QSPI_DMA_MAX_BURST_SIZE); + return; + } + xilinx_qspips_realize(dev, errp); + fifo8_create(&s->rx_fifo_g, xsc->rx_fifo_size); + fifo8_create(&s->tx_fifo_g, xsc->tx_fifo_size); + fifo32_create(&s->fifo_g, 32); +} + +static void xlnx_zynqmp_qspips_init(Object *obj) +{ + XlnxZynqMPQSPIPS *rq = XLNX_ZYNQMP_QSPIPS(obj); + + object_property_add_link(obj, "stream-connected-dma", TYPE_STREAM_SINK, + (Object **)&rq->dma, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); +} + +static int xilinx_spips_post_load(void *opaque, int version_id) +{ + xilinx_spips_update_ixr((XilinxSPIPS *)opaque); + xilinx_spips_update_cs_lines((XilinxSPIPS *)opaque); + return 0; +} + +static const VMStateDescription vmstate_xilinx_spips = { + .name = "xilinx_spips", + .version_id = 2, + .minimum_version_id = 2, + .post_load = xilinx_spips_post_load, + .fields = (VMStateField[]) { + VMSTATE_FIFO8(tx_fifo, XilinxSPIPS), + VMSTATE_FIFO8(rx_fifo, XilinxSPIPS), + VMSTATE_UINT32_ARRAY(regs, XilinxSPIPS, XLNX_SPIPS_R_MAX), + VMSTATE_UINT8(snoop_state, XilinxSPIPS), + VMSTATE_END_OF_LIST() + } +}; + +static int xlnx_zynqmp_qspips_post_load(void *opaque, int version_id) +{ + XlnxZynqMPQSPIPS *s = (XlnxZynqMPQSPIPS *)opaque; + XilinxSPIPS *qs = XILINX_SPIPS(s); + + if (ARRAY_FIELD_EX32(s->regs, GQSPI_SELECT, GENERIC_QSPI_EN) && + fifo8_is_empty(&qs->rx_fifo) && fifo8_is_empty(&qs->tx_fifo)) { + xlnx_zynqmp_qspips_update_ixr(s); + xlnx_zynqmp_qspips_update_cs_lines(s); + } + return 0; +} + +static const VMStateDescription vmstate_xilinx_qspips = { + .name = "xilinx_qspips", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(parent_obj, XilinxQSPIPS, 0, + vmstate_xilinx_spips, XilinxSPIPS), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_xlnx_zynqmp_qspips = { + .name = "xlnx_zynqmp_qspips", + .version_id = 1, + .minimum_version_id = 1, + .post_load = xlnx_zynqmp_qspips_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(parent_obj, XlnxZynqMPQSPIPS, 0, + vmstate_xilinx_qspips, XilinxQSPIPS), + VMSTATE_FIFO8(tx_fifo_g, XlnxZynqMPQSPIPS), + VMSTATE_FIFO8(rx_fifo_g, XlnxZynqMPQSPIPS), + VMSTATE_FIFO32(fifo_g, XlnxZynqMPQSPIPS), + VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPQSPIPS, XLNX_ZYNQMP_SPIPS_R_MAX), + VMSTATE_END_OF_LIST() + } +}; + +static Property xilinx_zynqmp_qspips_properties[] = { + DEFINE_PROP_UINT32("dma-burst-size", XlnxZynqMPQSPIPS, dma_burst_size, 64), + DEFINE_PROP_END_OF_LIST(), +}; + +static Property xilinx_spips_properties[] = { + DEFINE_PROP_UINT8("num-busses", XilinxSPIPS, num_busses, 1), + DEFINE_PROP_UINT8("num-ss-bits", XilinxSPIPS, num_cs, 4), + DEFINE_PROP_UINT8("num-txrx-bytes", XilinxSPIPS, num_txrx_bytes, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xilinx_qspips_class_init(ObjectClass *klass, void * data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass); + + dc->realize = xilinx_qspips_realize; + xsc->reg_ops = &qspips_ops; + xsc->rx_fifo_size = RXFF_A_Q; + xsc->tx_fifo_size = TXFF_A_Q; +} + +static void xilinx_spips_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass); + + dc->realize = xilinx_spips_realize; + dc->reset = xilinx_spips_reset; + device_class_set_props(dc, xilinx_spips_properties); + dc->vmsd = &vmstate_xilinx_spips; + + xsc->reg_ops = &spips_ops; + xsc->rx_fifo_size = RXFF_A; + xsc->tx_fifo_size = TXFF_A; +} + +static void xlnx_zynqmp_qspips_class_init(ObjectClass *klass, void * data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass); + + dc->realize = xlnx_zynqmp_qspips_realize; + dc->reset = xlnx_zynqmp_qspips_reset; + dc->vmsd = &vmstate_xlnx_zynqmp_qspips; + device_class_set_props(dc, xilinx_zynqmp_qspips_properties); + xsc->reg_ops = &xlnx_zynqmp_qspips_ops; + xsc->rx_fifo_size = RXFF_A_Q; + xsc->tx_fifo_size = TXFF_A_Q; +} + +static const TypeInfo xilinx_spips_info = { + .name = TYPE_XILINX_SPIPS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XilinxSPIPS), + .class_init = xilinx_spips_class_init, + .class_size = sizeof(XilinxSPIPSClass), +}; + +static const TypeInfo xilinx_qspips_info = { + .name = TYPE_XILINX_QSPIPS, + .parent = TYPE_XILINX_SPIPS, + .instance_size = sizeof(XilinxQSPIPS), + .class_init = xilinx_qspips_class_init, +}; + +static const TypeInfo xlnx_zynqmp_qspips_info = { + .name = TYPE_XLNX_ZYNQMP_QSPIPS, + .parent = TYPE_XILINX_QSPIPS, + .instance_size = sizeof(XlnxZynqMPQSPIPS), + .instance_init = xlnx_zynqmp_qspips_init, + .class_init = xlnx_zynqmp_qspips_class_init, +}; + +static void xilinx_spips_register_types(void) +{ + type_register_static(&xilinx_spips_info); + type_register_static(&xilinx_qspips_info); + type_register_static(&xlnx_zynqmp_qspips_info); +} + +type_init(xilinx_spips_register_types) diff --git a/hw/timer/Kconfig b/hw/timer/Kconfig new file mode 100644 index 000000000..010be7ed1 --- /dev/null +++ b/hw/timer/Kconfig @@ -0,0 +1,62 @@ +config ARM_TIMER + bool + select PTIMER + +config ARM_MPTIMER + bool + select PTIMER + +config A9_GTIMER + bool + +config HPET + bool + default y if PC + +config I8254 + bool + depends on ISA_BUS + +config ALTERA_TIMER + bool + select PTIMER + +config ALLWINNER_A10_PIT + bool + select PTIMER + +config SIFIVE_PWM + bool + +config STM32F2XX_TIMER + bool + +config CMSDK_APB_TIMER + bool + select PTIMER + +config CMSDK_APB_DUALTIMER + bool + select PTIMER + +config SH_TIMER + bool + select PTIMER + +config RENESAS_TMR + bool + +config RENESAS_CMT + bool + +config SSE_COUNTER + bool + +config SSE_TIMER + bool + +config STELLARIS_GPTM + bool + +config AVR_TIMER16 + bool diff --git a/hw/timer/a9gtimer.c b/hw/timer/a9gtimer.c new file mode 100644 index 000000000..7233068a3 --- /dev/null +++ b/hw/timer/a9gtimer.c @@ -0,0 +1,377 @@ +/* + * Global peripheral timer block for ARM A9MP + * + * (C) 2013 Xilinx Inc. + * + * Written by François LEGAL + * Written by Peter Crosthwaite + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/timer/a9gtimer.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/core/cpu.h" + +#ifndef A9_GTIMER_ERR_DEBUG +#define A9_GTIMER_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(level, ...) do { \ + if (A9_GTIMER_ERR_DEBUG > (level)) { \ + fprintf(stderr, ": %s: ", __func__); \ + fprintf(stderr, ## __VA_ARGS__); \ + } \ +} while (0) + +#define DB_PRINT(...) DB_PRINT_L(0, ## __VA_ARGS__) + +static inline int a9_gtimer_get_current_cpu(A9GTimerState *s) +{ + if (current_cpu->cpu_index >= s->num_cpu) { + hw_error("a9gtimer: num-cpu %d but this cpu is %d!\n", + s->num_cpu, current_cpu->cpu_index); + } + return current_cpu->cpu_index; +} + +static inline uint64_t a9_gtimer_get_conv(A9GTimerState *s) +{ + uint64_t prescale = extract32(s->control, R_CONTROL_PRESCALER_SHIFT, + R_CONTROL_PRESCALER_LEN); + + return (prescale + 1) * 10; +} + +static A9GTimerUpdate a9_gtimer_get_update(A9GTimerState *s) +{ + A9GTimerUpdate ret; + + ret.now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + ret.new = s->ref_counter + + (ret.now - s->cpu_ref_time) / a9_gtimer_get_conv(s); + return ret; +} + +static void a9_gtimer_update(A9GTimerState *s, bool sync) +{ + + A9GTimerUpdate update = a9_gtimer_get_update(s); + int i; + int64_t next_cdiff = 0; + + for (i = 0; i < s->num_cpu; ++i) { + A9GTimerPerCPU *gtb = &s->per_cpu[i]; + int64_t cdiff = 0; + + if ((s->control & R_CONTROL_TIMER_ENABLE) && + (gtb->control & R_CONTROL_COMP_ENABLE)) { + /* R2p0+, where the compare function is >= */ + if (gtb->compare < update.new) { + DB_PRINT("Compare event happened for CPU %d\n", i); + gtb->status = 1; + if (gtb->control & R_CONTROL_AUTO_INCREMENT && gtb->inc) { + uint64_t inc = + QEMU_ALIGN_UP(update.new - gtb->compare, gtb->inc); + DB_PRINT("Auto incrementing timer compare by %" + PRId64 "\n", inc); + gtb->compare += inc; + } + } + cdiff = (int64_t)gtb->compare - (int64_t)update.new + 1; + if (cdiff > 0 && (cdiff < next_cdiff || !next_cdiff)) { + next_cdiff = cdiff; + } + } + + qemu_set_irq(gtb->irq, + gtb->status && (gtb->control & R_CONTROL_IRQ_ENABLE)); + } + + timer_del(s->timer); + if (next_cdiff) { + DB_PRINT("scheduling qemu_timer to fire again in %" + PRIx64 " cycles\n", next_cdiff); + timer_mod(s->timer, update.now + next_cdiff * a9_gtimer_get_conv(s)); + } + + if (s->control & R_CONTROL_TIMER_ENABLE) { + s->counter = update.new; + } + + if (sync) { + s->cpu_ref_time = update.now; + s->ref_counter = s->counter; + } +} + +static void a9_gtimer_update_no_sync(void *opaque) +{ + A9GTimerState *s = A9_GTIMER(opaque); + + a9_gtimer_update(s, false); +} + +static uint64_t a9_gtimer_read(void *opaque, hwaddr addr, unsigned size) +{ + A9GTimerPerCPU *gtb = (A9GTimerPerCPU *)opaque; + A9GTimerState *s = gtb->parent; + A9GTimerUpdate update; + uint64_t ret = 0; + int shift = 0; + + switch (addr) { + case R_COUNTER_HI: + shift = 32; + /* fallthrough */ + case R_COUNTER_LO: + update = a9_gtimer_get_update(s); + ret = extract64(update.new, shift, 32); + break; + case R_CONTROL: + ret = s->control | gtb->control; + break; + case R_INTERRUPT_STATUS: + ret = gtb->status; + break; + case R_COMPARATOR_HI: + shift = 32; + /* fallthrough */ + case R_COMPARATOR_LO: + ret = extract64(gtb->compare, shift, 32); + break; + case R_AUTO_INCREMENT: + ret = gtb->inc; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "bad a9gtimer register: %x\n", + (unsigned)addr); + return 0; + } + + DB_PRINT("addr:%#x data:%#08" PRIx64 "\n", (unsigned)addr, ret); + return ret; +} + +static void a9_gtimer_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + A9GTimerPerCPU *gtb = (A9GTimerPerCPU *)opaque; + A9GTimerState *s = gtb->parent; + int shift = 0; + + DB_PRINT("addr:%#x data:%#08" PRIx64 "\n", (unsigned)addr, value); + + switch (addr) { + case R_COUNTER_HI: + shift = 32; + /* fallthrough */ + case R_COUNTER_LO: + /* + * Keep it simple - ARM docco explicitly says to disable timer before + * modding it, so don't bother trying to do all the difficult on the fly + * timer modifications - (if they even work in real hardware??). + */ + if (s->control & R_CONTROL_TIMER_ENABLE) { + qemu_log_mask(LOG_GUEST_ERROR, "Cannot mod running ARM gtimer\n"); + return; + } + s->counter = deposit64(s->counter, shift, 32, value); + return; + case R_CONTROL: + a9_gtimer_update(s, (value ^ s->control) & R_CONTROL_NEEDS_SYNC); + gtb->control = value & R_CONTROL_BANKED; + s->control = value & ~R_CONTROL_BANKED; + break; + case R_INTERRUPT_STATUS: + a9_gtimer_update(s, false); + gtb->status &= ~value; + break; + case R_COMPARATOR_HI: + shift = 32; + /* fallthrough */ + case R_COMPARATOR_LO: + a9_gtimer_update(s, false); + gtb->compare = deposit64(gtb->compare, shift, 32, value); + break; + case R_AUTO_INCREMENT: + gtb->inc = value; + return; + default: + return; + } + + a9_gtimer_update(s, false); +} + +/* Wrapper functions to implement the "read global timer for + * the current CPU" memory regions. + */ +static uint64_t a9_gtimer_this_read(void *opaque, hwaddr addr, + unsigned size) +{ + A9GTimerState *s = A9_GTIMER(opaque); + int id = a9_gtimer_get_current_cpu(s); + + /* no \n so concatenates with message from read fn */ + DB_PRINT("CPU:%d:", id); + + return a9_gtimer_read(&s->per_cpu[id], addr, size); +} + +static void a9_gtimer_this_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + A9GTimerState *s = A9_GTIMER(opaque); + int id = a9_gtimer_get_current_cpu(s); + + /* no \n so concatenates with message from write fn */ + DB_PRINT("CPU:%d:", id); + + a9_gtimer_write(&s->per_cpu[id], addr, value, size); +} + +static const MemoryRegionOps a9_gtimer_this_ops = { + .read = a9_gtimer_this_read, + .write = a9_gtimer_this_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const MemoryRegionOps a9_gtimer_ops = { + .read = a9_gtimer_read, + .write = a9_gtimer_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void a9_gtimer_reset(DeviceState *dev) +{ + A9GTimerState *s = A9_GTIMER(dev); + int i; + + s->counter = 0; + s->control = 0; + + for (i = 0; i < s->num_cpu; i++) { + A9GTimerPerCPU *gtb = &s->per_cpu[i]; + + gtb->control = 0; + gtb->status = 0; + gtb->compare = 0; + gtb->inc = 0; + } + a9_gtimer_update(s, false); +} + +static void a9_gtimer_realize(DeviceState *dev, Error **errp) +{ + A9GTimerState *s = A9_GTIMER(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i; + + if (s->num_cpu < 1 || s->num_cpu > A9_GTIMER_MAX_CPUS) { + error_setg(errp, "%s: num-cpu must be between 1 and %d", + __func__, A9_GTIMER_MAX_CPUS); + return; + } + + memory_region_init_io(&s->iomem, OBJECT(dev), &a9_gtimer_this_ops, s, + "a9gtimer shared", 0x20); + sysbus_init_mmio(sbd, &s->iomem); + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, a9_gtimer_update_no_sync, s); + + for (i = 0; i < s->num_cpu; i++) { + A9GTimerPerCPU *gtb = &s->per_cpu[i]; + + gtb->parent = s; + sysbus_init_irq(sbd, >b->irq); + memory_region_init_io(>b->iomem, OBJECT(dev), &a9_gtimer_ops, gtb, + "a9gtimer per cpu", 0x20); + sysbus_init_mmio(sbd, >b->iomem); + } +} + +static const VMStateDescription vmstate_a9_gtimer_per_cpu = { + .name = "arm.cortex-a9-global-timer.percpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(control, A9GTimerPerCPU), + VMSTATE_UINT64(compare, A9GTimerPerCPU), + VMSTATE_UINT32(status, A9GTimerPerCPU), + VMSTATE_UINT32(inc, A9GTimerPerCPU), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_a9_gtimer = { + .name = "arm.cortex-a9-global-timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(timer, A9GTimerState), + VMSTATE_UINT64(counter, A9GTimerState), + VMSTATE_UINT64(ref_counter, A9GTimerState), + VMSTATE_UINT64(cpu_ref_time, A9GTimerState), + VMSTATE_STRUCT_VARRAY_UINT32(per_cpu, A9GTimerState, num_cpu, + 1, vmstate_a9_gtimer_per_cpu, + A9GTimerPerCPU), + VMSTATE_END_OF_LIST() + } +}; + +static Property a9_gtimer_properties[] = { + DEFINE_PROP_UINT32("num-cpu", A9GTimerState, num_cpu, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void a9_gtimer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = a9_gtimer_realize; + dc->vmsd = &vmstate_a9_gtimer; + dc->reset = a9_gtimer_reset; + device_class_set_props(dc, a9_gtimer_properties); +} + +static const TypeInfo a9_gtimer_info = { + .name = TYPE_A9_GTIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(A9GTimerState), + .class_init = a9_gtimer_class_init, +}; + +static void a9_gtimer_register_types(void) +{ + type_register_static(&a9_gtimer_info); +} + +type_init(a9_gtimer_register_types) diff --git a/hw/timer/allwinner-a10-pit.c b/hw/timer/allwinner-a10-pit.c new file mode 100644 index 000000000..c3fc2a4da --- /dev/null +++ b/hw/timer/allwinner-a10-pit.c @@ -0,0 +1,316 @@ +/* + * Allwinner A10 timer device emulation + * + * Copyright (C) 2013 Li Guang + * Written by Li Guang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "hw/timer/allwinner-a10-pit.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +static void a10_pit_update_irq(AwA10PITState *s) +{ + int i; + + for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) { + qemu_set_irq(s->irq[i], !!(s->irq_status & s->irq_enable & (1 << i))); + } +} + +static uint64_t a10_pit_read(void *opaque, hwaddr offset, unsigned size) +{ + AwA10PITState *s = AW_A10_PIT(opaque); + uint8_t index; + + switch (offset) { + case AW_A10_PIT_TIMER_IRQ_EN: + return s->irq_enable; + case AW_A10_PIT_TIMER_IRQ_ST: + return s->irq_status; + case AW_A10_PIT_TIMER_BASE ... AW_A10_PIT_TIMER_BASE_END: + index = offset & 0xf0; + index >>= 4; + index -= 1; + switch (offset & 0x0f) { + case AW_A10_PIT_TIMER_CONTROL: + return s->control[index]; + case AW_A10_PIT_TIMER_INTERVAL: + return s->interval[index]; + case AW_A10_PIT_TIMER_COUNT: + s->count[index] = ptimer_get_count(s->timer[index]); + return s->count[index]; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%x\n", __func__, (int)offset); + break; + } + case AW_A10_PIT_WDOG_CONTROL: + break; + case AW_A10_PIT_WDOG_MODE: + break; + case AW_A10_PIT_COUNT_LO: + return s->count_lo; + case AW_A10_PIT_COUNT_HI: + return s->count_hi; + case AW_A10_PIT_COUNT_CTL: + return s->count_ctl; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%x\n", __func__, (int)offset); + break; + } + + return 0; +} + +/* Must be called inside a ptimer transaction block for s->timer[index] */ +static void a10_pit_set_freq(AwA10PITState *s, int index) +{ + uint32_t prescaler, source, source_freq; + + prescaler = 1 << extract32(s->control[index], 4, 3); + source = extract32(s->control[index], 2, 2); + source_freq = s->clk_freq[source]; + + if (source_freq) { + ptimer_set_freq(s->timer[index], source_freq / prescaler); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid clock source %u\n", + __func__, source); + } +} + +static void a10_pit_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + AwA10PITState *s = AW_A10_PIT(opaque); + uint8_t index; + + switch (offset) { + case AW_A10_PIT_TIMER_IRQ_EN: + s->irq_enable = value; + a10_pit_update_irq(s); + break; + case AW_A10_PIT_TIMER_IRQ_ST: + s->irq_status &= ~value; + a10_pit_update_irq(s); + break; + case AW_A10_PIT_TIMER_BASE ... AW_A10_PIT_TIMER_BASE_END: + index = offset & 0xf0; + index >>= 4; + index -= 1; + switch (offset & 0x0f) { + case AW_A10_PIT_TIMER_CONTROL: + s->control[index] = value; + ptimer_transaction_begin(s->timer[index]); + a10_pit_set_freq(s, index); + if (s->control[index] & AW_A10_PIT_TIMER_RELOAD) { + ptimer_set_count(s->timer[index], s->interval[index]); + } + if (s->control[index] & AW_A10_PIT_TIMER_EN) { + int oneshot = 0; + if (s->control[index] & AW_A10_PIT_TIMER_MODE) { + oneshot = 1; + } + ptimer_run(s->timer[index], oneshot); + } else { + ptimer_stop(s->timer[index]); + } + ptimer_transaction_commit(s->timer[index]); + break; + case AW_A10_PIT_TIMER_INTERVAL: + s->interval[index] = value; + ptimer_transaction_begin(s->timer[index]); + ptimer_set_limit(s->timer[index], s->interval[index], 1); + ptimer_transaction_commit(s->timer[index]); + break; + case AW_A10_PIT_TIMER_COUNT: + s->count[index] = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%x\n", __func__, (int)offset); + } + break; + case AW_A10_PIT_WDOG_CONTROL: + s->watch_dog_control = value; + break; + case AW_A10_PIT_WDOG_MODE: + s->watch_dog_mode = value; + break; + case AW_A10_PIT_COUNT_LO: + s->count_lo = value; + break; + case AW_A10_PIT_COUNT_HI: + s->count_hi = value; + break; + case AW_A10_PIT_COUNT_CTL: + s->count_ctl = value; + if (s->count_ctl & AW_A10_PIT_COUNT_RL_EN) { + uint64_t tmp_count = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + s->count_lo = tmp_count; + s->count_hi = tmp_count >> 32; + s->count_ctl &= ~AW_A10_PIT_COUNT_RL_EN; + } + if (s->count_ctl & AW_A10_PIT_COUNT_CLR_EN) { + s->count_lo = 0; + s->count_hi = 0; + s->count_ctl &= ~AW_A10_PIT_COUNT_CLR_EN; + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%x\n", __func__, (int)offset); + break; + } +} + +static const MemoryRegionOps a10_pit_ops = { + .read = a10_pit_read, + .write = a10_pit_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static Property a10_pit_properties[] = { + DEFINE_PROP_UINT32("clk0-freq", AwA10PITState, clk_freq[0], 0), + DEFINE_PROP_UINT32("clk1-freq", AwA10PITState, clk_freq[1], 0), + DEFINE_PROP_UINT32("clk2-freq", AwA10PITState, clk_freq[2], 0), + DEFINE_PROP_UINT32("clk3-freq", AwA10PITState, clk_freq[3], 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_a10_pit = { + .name = "a10.pit", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(irq_enable, AwA10PITState), + VMSTATE_UINT32(irq_status, AwA10PITState), + VMSTATE_UINT32_ARRAY(control, AwA10PITState, AW_A10_PIT_TIMER_NR), + VMSTATE_UINT32_ARRAY(interval, AwA10PITState, AW_A10_PIT_TIMER_NR), + VMSTATE_UINT32_ARRAY(count, AwA10PITState, AW_A10_PIT_TIMER_NR), + VMSTATE_UINT32(watch_dog_mode, AwA10PITState), + VMSTATE_UINT32(watch_dog_control, AwA10PITState), + VMSTATE_UINT32(count_lo, AwA10PITState), + VMSTATE_UINT32(count_hi, AwA10PITState), + VMSTATE_UINT32(count_ctl, AwA10PITState), + VMSTATE_PTIMER_ARRAY(timer, AwA10PITState, AW_A10_PIT_TIMER_NR), + VMSTATE_END_OF_LIST() + } +}; + +static void a10_pit_reset(DeviceState *dev) +{ + AwA10PITState *s = AW_A10_PIT(dev); + uint8_t i; + + s->irq_enable = 0; + s->irq_status = 0; + a10_pit_update_irq(s); + + for (i = 0; i < 6; i++) { + s->control[i] = AW_A10_PIT_DEFAULT_CLOCK; + s->interval[i] = 0; + s->count[i] = 0; + ptimer_transaction_begin(s->timer[i]); + ptimer_stop(s->timer[i]); + a10_pit_set_freq(s, i); + ptimer_transaction_commit(s->timer[i]); + } + s->watch_dog_mode = 0; + s->watch_dog_control = 0; + s->count_lo = 0; + s->count_hi = 0; + s->count_ctl = 0; +} + +static void a10_pit_timer_cb(void *opaque) +{ + AwA10TimerContext *tc = opaque; + AwA10PITState *s = tc->container; + uint8_t i = tc->index; + + if (s->control[i] & AW_A10_PIT_TIMER_EN) { + s->irq_status |= 1 << i; + if (s->control[i] & AW_A10_PIT_TIMER_MODE) { + ptimer_stop(s->timer[i]); + s->control[i] &= ~AW_A10_PIT_TIMER_EN; + } + a10_pit_update_irq(s); + } +} + +static void a10_pit_init(Object *obj) +{ + AwA10PITState *s = AW_A10_PIT(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + uint8_t i; + + for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } + memory_region_init_io(&s->iomem, OBJECT(s), &a10_pit_ops, s, + TYPE_AW_A10_PIT, 0x400); + sysbus_init_mmio(sbd, &s->iomem); + + for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) { + AwA10TimerContext *tc = &s->timer_context[i]; + + tc->container = s; + tc->index = i; + s->timer[i] = ptimer_init(a10_pit_timer_cb, tc, PTIMER_POLICY_DEFAULT); + } +} + +static void a10_pit_finalize(Object *obj) +{ + AwA10PITState *s = AW_A10_PIT(obj); + int i; + + for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) { + ptimer_free(s->timer[i]); + } +} + +static void a10_pit_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = a10_pit_reset; + device_class_set_props(dc, a10_pit_properties); + dc->desc = "allwinner a10 timer"; + dc->vmsd = &vmstate_a10_pit; +} + +static const TypeInfo a10_pit_info = { + .name = TYPE_AW_A10_PIT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AwA10PITState), + .instance_init = a10_pit_init, + .instance_finalize = a10_pit_finalize, + .class_init = a10_pit_class_init, +}; + +static void a10_register_types(void) +{ + type_register_static(&a10_pit_info); +} + +type_init(a10_register_types); diff --git a/hw/timer/altera_timer.c b/hw/timer/altera_timer.c new file mode 100644 index 000000000..c6e02d2b5 --- /dev/null +++ b/hw/timer/altera_timer.c @@ -0,0 +1,244 @@ +/* + * QEMU model of the Altera timer. + * + * Copyright (c) 2012 Chris Wulff + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qapi/error.h" + +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +#define R_STATUS 0 +#define R_CONTROL 1 +#define R_PERIODL 2 +#define R_PERIODH 3 +#define R_SNAPL 4 +#define R_SNAPH 5 +#define R_MAX 6 + +#define STATUS_TO 0x0001 +#define STATUS_RUN 0x0002 + +#define CONTROL_ITO 0x0001 +#define CONTROL_CONT 0x0002 +#define CONTROL_START 0x0004 +#define CONTROL_STOP 0x0008 + +#define TYPE_ALTERA_TIMER "ALTR.timer" +OBJECT_DECLARE_SIMPLE_TYPE(AlteraTimer, ALTERA_TIMER) + +struct AlteraTimer { + SysBusDevice busdev; + MemoryRegion mmio; + qemu_irq irq; + uint32_t freq_hz; + ptimer_state *ptimer; + uint32_t regs[R_MAX]; +}; + +static int timer_irq_state(AlteraTimer *t) +{ + bool irq = (t->regs[R_STATUS] & STATUS_TO) && + (t->regs[R_CONTROL] & CONTROL_ITO); + return irq; +} + +static uint64_t timer_read(void *opaque, hwaddr addr, + unsigned int size) +{ + AlteraTimer *t = opaque; + uint64_t r = 0; + + addr >>= 2; + + switch (addr) { + case R_CONTROL: + r = t->regs[R_CONTROL] & (CONTROL_ITO | CONTROL_CONT); + break; + + default: + if (addr < ARRAY_SIZE(t->regs)) { + r = t->regs[addr]; + } + break; + } + + return r; +} + +static void timer_write(void *opaque, hwaddr addr, + uint64_t value, unsigned int size) +{ + AlteraTimer *t = opaque; + uint64_t tvalue; + uint32_t count = 0; + int irqState = timer_irq_state(t); + + addr >>= 2; + + switch (addr) { + case R_STATUS: + /* The timeout bit is cleared by writing the status register. */ + t->regs[R_STATUS] &= ~STATUS_TO; + break; + + case R_CONTROL: + ptimer_transaction_begin(t->ptimer); + t->regs[R_CONTROL] = value & (CONTROL_ITO | CONTROL_CONT); + if ((value & CONTROL_START) && + !(t->regs[R_STATUS] & STATUS_RUN)) { + ptimer_run(t->ptimer, 1); + t->regs[R_STATUS] |= STATUS_RUN; + } + if ((value & CONTROL_STOP) && (t->regs[R_STATUS] & STATUS_RUN)) { + ptimer_stop(t->ptimer); + t->regs[R_STATUS] &= ~STATUS_RUN; + } + ptimer_transaction_commit(t->ptimer); + break; + + case R_PERIODL: + case R_PERIODH: + ptimer_transaction_begin(t->ptimer); + t->regs[addr] = value & 0xFFFF; + if (t->regs[R_STATUS] & STATUS_RUN) { + ptimer_stop(t->ptimer); + t->regs[R_STATUS] &= ~STATUS_RUN; + } + tvalue = (t->regs[R_PERIODH] << 16) | t->regs[R_PERIODL]; + ptimer_set_limit(t->ptimer, tvalue + 1, 1); + ptimer_transaction_commit(t->ptimer); + break; + + case R_SNAPL: + case R_SNAPH: + count = ptimer_get_count(t->ptimer); + t->regs[R_SNAPL] = count & 0xFFFF; + t->regs[R_SNAPH] = count >> 16; + break; + + default: + break; + } + + if (irqState != timer_irq_state(t)) { + qemu_set_irq(t->irq, timer_irq_state(t)); + } +} + +static const MemoryRegionOps timer_ops = { + .read = timer_read, + .write = timer_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4 + } +}; + +static void timer_hit(void *opaque) +{ + AlteraTimer *t = opaque; + const uint64_t tvalue = (t->regs[R_PERIODH] << 16) | t->regs[R_PERIODL]; + + t->regs[R_STATUS] |= STATUS_TO; + + ptimer_set_limit(t->ptimer, tvalue + 1, 1); + + if (!(t->regs[R_CONTROL] & CONTROL_CONT)) { + t->regs[R_STATUS] &= ~STATUS_RUN; + ptimer_set_count(t->ptimer, tvalue); + } else { + ptimer_run(t->ptimer, 1); + } + + qemu_set_irq(t->irq, timer_irq_state(t)); +} + +static void altera_timer_realize(DeviceState *dev, Error **errp) +{ + AlteraTimer *t = ALTERA_TIMER(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + if (t->freq_hz == 0) { + error_setg(errp, "\"clock-frequency\" property must be provided."); + return; + } + + t->ptimer = ptimer_init(timer_hit, t, PTIMER_POLICY_DEFAULT); + ptimer_transaction_begin(t->ptimer); + ptimer_set_freq(t->ptimer, t->freq_hz); + ptimer_transaction_commit(t->ptimer); + + memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t, + TYPE_ALTERA_TIMER, R_MAX * sizeof(uint32_t)); + sysbus_init_mmio(sbd, &t->mmio); +} + +static void altera_timer_init(Object *obj) +{ + AlteraTimer *t = ALTERA_TIMER(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &t->irq); +} + +static void altera_timer_reset(DeviceState *dev) +{ + AlteraTimer *t = ALTERA_TIMER(dev); + + ptimer_transaction_begin(t->ptimer); + ptimer_stop(t->ptimer); + ptimer_set_limit(t->ptimer, 0xffffffff, 1); + ptimer_transaction_commit(t->ptimer); + memset(t->regs, 0, sizeof(t->regs)); +} + +static Property altera_timer_properties[] = { + DEFINE_PROP_UINT32("clock-frequency", AlteraTimer, freq_hz, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void altera_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = altera_timer_realize; + device_class_set_props(dc, altera_timer_properties); + dc->reset = altera_timer_reset; +} + +static const TypeInfo altera_timer_info = { + .name = TYPE_ALTERA_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AlteraTimer), + .instance_init = altera_timer_init, + .class_init = altera_timer_class_init, +}; + +static void altera_timer_register(void) +{ + type_register_static(&altera_timer_info); +} + +type_init(altera_timer_register) diff --git a/hw/timer/arm_mptimer.c b/hw/timer/arm_mptimer.c new file mode 100644 index 000000000..cdfca3000 --- /dev/null +++ b/hw/timer/arm_mptimer.c @@ -0,0 +1,331 @@ +/* + * Private peripheral timer/watchdog blocks for ARM 11MPCore and A9MP + * + * Copyright (c) 2006-2007 CodeSourcery. + * Copyright (c) 2011 Linaro Limited + * Written by Paul Brook, Peter Maydell + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "hw/timer/arm_mptimer.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/core/cpu.h" + +#define PTIMER_POLICY \ + (PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | \ + PTIMER_POLICY_CONTINUOUS_TRIGGER | \ + PTIMER_POLICY_NO_IMMEDIATE_TRIGGER | \ + PTIMER_POLICY_NO_IMMEDIATE_RELOAD | \ + PTIMER_POLICY_NO_COUNTER_ROUND_DOWN) + +/* This device implements the per-cpu private timer and watchdog block + * which is used in both the ARM11MPCore and Cortex-A9MP. + */ + +static inline int get_current_cpu(ARMMPTimerState *s) +{ + int cpu_id = current_cpu ? current_cpu->cpu_index : 0; + + if (cpu_id >= s->num_cpu) { + hw_error("arm_mptimer: num-cpu %d but this cpu is %d!\n", + s->num_cpu, cpu_id); + } + + return cpu_id; +} + +static inline void timerblock_update_irq(TimerBlock *tb) +{ + qemu_set_irq(tb->irq, tb->status && (tb->control & 4)); +} + +/* Return conversion factor from mpcore timer ticks to qemu timer ticks. */ +static inline uint32_t timerblock_scale(uint32_t control) +{ + return (((control >> 8) & 0xff) + 1) * 10; +} + +/* Must be called within a ptimer transaction block */ +static inline void timerblock_set_count(struct ptimer_state *timer, + uint32_t control, uint64_t *count) +{ + /* PTimer would trigger interrupt for periodic timer when counter set + * to 0, MPtimer under certain condition only. + */ + if ((control & 3) == 3 && (control & 0xff00) == 0 && *count == 0) { + *count = ptimer_get_limit(timer); + } + ptimer_set_count(timer, *count); +} + +/* Must be called within a ptimer transaction block */ +static inline void timerblock_run(struct ptimer_state *timer, + uint32_t control, uint32_t load) +{ + if ((control & 1) && ((control & 0xff00) || load != 0)) { + ptimer_run(timer, !(control & 2)); + } +} + +static void timerblock_tick(void *opaque) +{ + TimerBlock *tb = (TimerBlock *)opaque; + /* Periodic timer with load = 0 and prescaler != 0 would re-trigger + * IRQ after one period, otherwise it either stops or wraps around. + */ + if ((tb->control & 2) && (tb->control & 0xff00) == 0 && + ptimer_get_limit(tb->timer) == 0) { + ptimer_stop(tb->timer); + } + tb->status = 1; + timerblock_update_irq(tb); +} + +static uint64_t timerblock_read(void *opaque, hwaddr addr, + unsigned size) +{ + TimerBlock *tb = (TimerBlock *)opaque; + switch (addr) { + case 0: /* Load */ + return ptimer_get_limit(tb->timer); + case 4: /* Counter. */ + return ptimer_get_count(tb->timer); + case 8: /* Control. */ + return tb->control; + case 12: /* Interrupt status. */ + return tb->status; + default: + return 0; + } +} + +static void timerblock_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + TimerBlock *tb = (TimerBlock *)opaque; + uint32_t control = tb->control; + switch (addr) { + case 0: /* Load */ + ptimer_transaction_begin(tb->timer); + /* Setting load to 0 stops the timer without doing the tick if + * prescaler = 0. + */ + if ((control & 1) && (control & 0xff00) == 0 && value == 0) { + ptimer_stop(tb->timer); + } + ptimer_set_limit(tb->timer, value, 1); + timerblock_run(tb->timer, control, value); + ptimer_transaction_commit(tb->timer); + break; + case 4: /* Counter. */ + ptimer_transaction_begin(tb->timer); + /* Setting counter to 0 stops the one-shot timer, or periodic with + * load = 0, without doing the tick if prescaler = 0. + */ + if ((control & 1) && (control & 0xff00) == 0 && value == 0 && + (!(control & 2) || ptimer_get_limit(tb->timer) == 0)) { + ptimer_stop(tb->timer); + } + timerblock_set_count(tb->timer, control, &value); + timerblock_run(tb->timer, control, value); + ptimer_transaction_commit(tb->timer); + break; + case 8: /* Control. */ + ptimer_transaction_begin(tb->timer); + if ((control & 3) != (value & 3)) { + ptimer_stop(tb->timer); + } + if ((control & 0xff00) != (value & 0xff00)) { + ptimer_set_period(tb->timer, timerblock_scale(value)); + } + if (value & 1) { + uint64_t count = ptimer_get_count(tb->timer); + /* Re-load periodic timer counter if needed. */ + if ((value & 2) && count == 0) { + timerblock_set_count(tb->timer, value, &count); + } + timerblock_run(tb->timer, value, count); + } + tb->control = value; + ptimer_transaction_commit(tb->timer); + break; + case 12: /* Interrupt status. */ + tb->status &= ~value; + timerblock_update_irq(tb); + break; + } +} + +/* Wrapper functions to implement the "read timer/watchdog for + * the current CPU" memory regions. + */ +static uint64_t arm_thistimer_read(void *opaque, hwaddr addr, + unsigned size) +{ + ARMMPTimerState *s = (ARMMPTimerState *)opaque; + int id = get_current_cpu(s); + return timerblock_read(&s->timerblock[id], addr, size); +} + +static void arm_thistimer_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + ARMMPTimerState *s = (ARMMPTimerState *)opaque; + int id = get_current_cpu(s); + timerblock_write(&s->timerblock[id], addr, value, size); +} + +static const MemoryRegionOps arm_thistimer_ops = { + .read = arm_thistimer_read, + .write = arm_thistimer_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const MemoryRegionOps timerblock_ops = { + .read = timerblock_read, + .write = timerblock_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void timerblock_reset(TimerBlock *tb) +{ + tb->control = 0; + tb->status = 0; + if (tb->timer) { + ptimer_transaction_begin(tb->timer); + ptimer_stop(tb->timer); + ptimer_set_limit(tb->timer, 0, 1); + ptimer_set_period(tb->timer, timerblock_scale(0)); + ptimer_transaction_commit(tb->timer); + } +} + +static void arm_mptimer_reset(DeviceState *dev) +{ + ARMMPTimerState *s = ARM_MPTIMER(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(s->timerblock); i++) { + timerblock_reset(&s->timerblock[i]); + } +} + +static void arm_mptimer_init(Object *obj) +{ + ARMMPTimerState *s = ARM_MPTIMER(obj); + + memory_region_init_io(&s->iomem, obj, &arm_thistimer_ops, s, + "arm_mptimer_timer", 0x20); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem); +} + +static void arm_mptimer_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + ARMMPTimerState *s = ARM_MPTIMER(dev); + int i; + + if (s->num_cpu < 1 || s->num_cpu > ARM_MPTIMER_MAX_CPUS) { + error_setg(errp, "num-cpu must be between 1 and %d", + ARM_MPTIMER_MAX_CPUS); + return; + } + /* We implement one timer block per CPU, and expose multiple MMIO regions: + * * region 0 is "timer for this core" + * * region 1 is "timer for core 0" + * * region 2 is "timer for core 1" + * and so on. + * The outgoing interrupt lines are + * * timer for core 0 + * * timer for core 1 + * and so on. + */ + for (i = 0; i < s->num_cpu; i++) { + TimerBlock *tb = &s->timerblock[i]; + tb->timer = ptimer_init(timerblock_tick, tb, PTIMER_POLICY); + sysbus_init_irq(sbd, &tb->irq); + memory_region_init_io(&tb->iomem, OBJECT(s), &timerblock_ops, tb, + "arm_mptimer_timerblock", 0x20); + sysbus_init_mmio(sbd, &tb->iomem); + } +} + +static const VMStateDescription vmstate_timerblock = { + .name = "arm_mptimer_timerblock", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_UINT32(control, TimerBlock), + VMSTATE_UINT32(status, TimerBlock), + VMSTATE_PTIMER(timer, TimerBlock), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_arm_mptimer = { + .name = "arm_mptimer", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_UINT32(timerblock, ARMMPTimerState, num_cpu, + 3, vmstate_timerblock, TimerBlock), + VMSTATE_END_OF_LIST() + } +}; + +static Property arm_mptimer_properties[] = { + DEFINE_PROP_UINT32("num-cpu", ARMMPTimerState, num_cpu, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void arm_mptimer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = arm_mptimer_realize; + dc->vmsd = &vmstate_arm_mptimer; + dc->reset = arm_mptimer_reset; + device_class_set_props(dc, arm_mptimer_properties); +} + +static const TypeInfo arm_mptimer_info = { + .name = TYPE_ARM_MPTIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ARMMPTimerState), + .instance_init = arm_mptimer_init, + .class_init = arm_mptimer_class_init, +}; + +static void arm_mptimer_register_types(void) +{ + type_register_static(&arm_mptimer_info); +} + +type_init(arm_mptimer_register_types) diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c new file mode 100644 index 000000000..15caff0e4 --- /dev/null +++ b/hw/timer/arm_timer.c @@ -0,0 +1,419 @@ +/* + * ARM PrimeCell Timer modules. + * + * Copyright (c) 2005-2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/timer.h" +#include "hw/irq.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "qom/object.h" + +/* Common timer implementation. */ + +#define TIMER_CTRL_ONESHOT (1 << 0) +#define TIMER_CTRL_32BIT (1 << 1) +#define TIMER_CTRL_DIV1 (0 << 2) +#define TIMER_CTRL_DIV16 (1 << 2) +#define TIMER_CTRL_DIV256 (2 << 2) +#define TIMER_CTRL_IE (1 << 5) +#define TIMER_CTRL_PERIODIC (1 << 6) +#define TIMER_CTRL_ENABLE (1 << 7) + +typedef struct { + ptimer_state *timer; + uint32_t control; + uint32_t limit; + int freq; + int int_level; + qemu_irq irq; +} arm_timer_state; + +/* Check all active timers, and schedule the next timer interrupt. */ + +static void arm_timer_update(arm_timer_state *s) +{ + /* Update interrupts. */ + if (s->int_level && (s->control & TIMER_CTRL_IE)) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static uint32_t arm_timer_read(void *opaque, hwaddr offset) +{ + arm_timer_state *s = (arm_timer_state *)opaque; + + switch (offset >> 2) { + case 0: /* TimerLoad */ + case 6: /* TimerBGLoad */ + return s->limit; + case 1: /* TimerValue */ + return ptimer_get_count(s->timer); + case 2: /* TimerControl */ + return s->control; + case 4: /* TimerRIS */ + return s->int_level; + case 5: /* TimerMIS */ + if ((s->control & TIMER_CTRL_IE) == 0) + return 0; + return s->int_level; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset %x\n", __func__, (int)offset); + return 0; + } +} + +/* + * Reset the timer limit after settings have changed. + * May only be called from inside a ptimer transaction block. + */ +static void arm_timer_recalibrate(arm_timer_state *s, int reload) +{ + uint32_t limit; + + if ((s->control & (TIMER_CTRL_PERIODIC | TIMER_CTRL_ONESHOT)) == 0) { + /* Free running. */ + if (s->control & TIMER_CTRL_32BIT) + limit = 0xffffffff; + else + limit = 0xffff; + } else { + /* Periodic. */ + limit = s->limit; + } + ptimer_set_limit(s->timer, limit, reload); +} + +static void arm_timer_write(void *opaque, hwaddr offset, + uint32_t value) +{ + arm_timer_state *s = (arm_timer_state *)opaque; + int freq; + + switch (offset >> 2) { + case 0: /* TimerLoad */ + s->limit = value; + ptimer_transaction_begin(s->timer); + arm_timer_recalibrate(s, 1); + ptimer_transaction_commit(s->timer); + break; + case 1: /* TimerValue */ + /* ??? Linux seems to want to write to this readonly register. + Ignore it. */ + break; + case 2: /* TimerControl */ + ptimer_transaction_begin(s->timer); + if (s->control & TIMER_CTRL_ENABLE) { + /* Pause the timer if it is running. This may cause some + inaccuracy dure to rounding, but avoids a whole lot of other + messyness. */ + ptimer_stop(s->timer); + } + s->control = value; + freq = s->freq; + /* ??? Need to recalculate expiry time after changing divisor. */ + switch ((value >> 2) & 3) { + case 1: freq >>= 4; break; + case 2: freq >>= 8; break; + } + arm_timer_recalibrate(s, s->control & TIMER_CTRL_ENABLE); + ptimer_set_freq(s->timer, freq); + if (s->control & TIMER_CTRL_ENABLE) { + /* Restart the timer if still enabled. */ + ptimer_run(s->timer, (s->control & TIMER_CTRL_ONESHOT) != 0); + } + ptimer_transaction_commit(s->timer); + break; + case 3: /* TimerIntClr */ + s->int_level = 0; + break; + case 6: /* TimerBGLoad */ + s->limit = value; + ptimer_transaction_begin(s->timer); + arm_timer_recalibrate(s, 0); + ptimer_transaction_commit(s->timer); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset %x\n", __func__, (int)offset); + } + arm_timer_update(s); +} + +static void arm_timer_tick(void *opaque) +{ + arm_timer_state *s = (arm_timer_state *)opaque; + s->int_level = 1; + arm_timer_update(s); +} + +static const VMStateDescription vmstate_arm_timer = { + .name = "arm_timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(control, arm_timer_state), + VMSTATE_UINT32(limit, arm_timer_state), + VMSTATE_INT32(int_level, arm_timer_state), + VMSTATE_PTIMER(timer, arm_timer_state), + VMSTATE_END_OF_LIST() + } +}; + +static arm_timer_state *arm_timer_init(uint32_t freq) +{ + arm_timer_state *s; + + s = (arm_timer_state *)g_malloc0(sizeof(arm_timer_state)); + s->freq = freq; + s->control = TIMER_CTRL_IE; + + s->timer = ptimer_init(arm_timer_tick, s, PTIMER_POLICY_DEFAULT); + vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_arm_timer, s); + return s; +} + +/* + * ARM PrimeCell SP804 dual timer module. + * Docs at + * https://developer.arm.com/documentation/ddi0271/latest/ + */ + +#define TYPE_SP804 "sp804" +OBJECT_DECLARE_SIMPLE_TYPE(SP804State, SP804) + +struct SP804State { + SysBusDevice parent_obj; + + MemoryRegion iomem; + arm_timer_state *timer[2]; + uint32_t freq0, freq1; + int level[2]; + qemu_irq irq; +}; + +static const uint8_t sp804_ids[] = { + /* Timer ID */ + 0x04, 0x18, 0x14, 0, + /* PrimeCell ID */ + 0xd, 0xf0, 0x05, 0xb1 +}; + +/* Merge the IRQs from the two component devices. */ +static void sp804_set_irq(void *opaque, int irq, int level) +{ + SP804State *s = (SP804State *)opaque; + + s->level[irq] = level; + qemu_set_irq(s->irq, s->level[0] || s->level[1]); +} + +static uint64_t sp804_read(void *opaque, hwaddr offset, + unsigned size) +{ + SP804State *s = (SP804State *)opaque; + + if (offset < 0x20) { + return arm_timer_read(s->timer[0], offset); + } + if (offset < 0x40) { + return arm_timer_read(s->timer[1], offset - 0x20); + } + + /* TimerPeriphID */ + if (offset >= 0xfe0 && offset <= 0xffc) { + return sp804_ids[(offset - 0xfe0) >> 2]; + } + + switch (offset) { + /* Integration Test control registers, which we won't support */ + case 0xf00: /* TimerITCR */ + case 0xf04: /* TimerITOP (strictly write only but..) */ + qemu_log_mask(LOG_UNIMP, + "%s: integration test registers unimplemented\n", + __func__); + return 0; + } + + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset %x\n", __func__, (int)offset); + return 0; +} + +static void sp804_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + SP804State *s = (SP804State *)opaque; + + if (offset < 0x20) { + arm_timer_write(s->timer[0], offset, value); + return; + } + + if (offset < 0x40) { + arm_timer_write(s->timer[1], offset - 0x20, value); + return; + } + + /* Technically we could be writing to the Test Registers, but not likely */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset %x\n", + __func__, (int)offset); +} + +static const MemoryRegionOps sp804_ops = { + .read = sp804_read, + .write = sp804_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_sp804 = { + .name = "sp804", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32_ARRAY(level, SP804State, 2), + VMSTATE_END_OF_LIST() + } +}; + +static void sp804_init(Object *obj) +{ + SP804State *s = SP804(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &s->irq); + memory_region_init_io(&s->iomem, obj, &sp804_ops, s, + "sp804", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void sp804_realize(DeviceState *dev, Error **errp) +{ + SP804State *s = SP804(dev); + + s->timer[0] = arm_timer_init(s->freq0); + s->timer[1] = arm_timer_init(s->freq1); + s->timer[0]->irq = qemu_allocate_irq(sp804_set_irq, s, 0); + s->timer[1]->irq = qemu_allocate_irq(sp804_set_irq, s, 1); +} + +/* Integrator/CP timer module. */ + +#define TYPE_INTEGRATOR_PIT "integrator_pit" +OBJECT_DECLARE_SIMPLE_TYPE(icp_pit_state, INTEGRATOR_PIT) + +struct icp_pit_state { + SysBusDevice parent_obj; + + MemoryRegion iomem; + arm_timer_state *timer[3]; +}; + +static uint64_t icp_pit_read(void *opaque, hwaddr offset, + unsigned size) +{ + icp_pit_state *s = (icp_pit_state *)opaque; + int n; + + /* ??? Don't know the PrimeCell ID for this device. */ + n = offset >> 8; + if (n > 2) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad timer %d\n", __func__, n); + return 0; + } + + return arm_timer_read(s->timer[n], offset & 0xff); +} + +static void icp_pit_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + icp_pit_state *s = (icp_pit_state *)opaque; + int n; + + n = offset >> 8; + if (n > 2) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad timer %d\n", __func__, n); + return; + } + + arm_timer_write(s->timer[n], offset & 0xff, value); +} + +static const MemoryRegionOps icp_pit_ops = { + .read = icp_pit_read, + .write = icp_pit_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void icp_pit_init(Object *obj) +{ + icp_pit_state *s = INTEGRATOR_PIT(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + /* Timer 0 runs at the system clock speed (40MHz). */ + s->timer[0] = arm_timer_init(40000000); + /* The other two timers run at 1MHz. */ + s->timer[1] = arm_timer_init(1000000); + s->timer[2] = arm_timer_init(1000000); + + sysbus_init_irq(dev, &s->timer[0]->irq); + sysbus_init_irq(dev, &s->timer[1]->irq); + sysbus_init_irq(dev, &s->timer[2]->irq); + + memory_region_init_io(&s->iomem, obj, &icp_pit_ops, s, + "icp_pit", 0x1000); + sysbus_init_mmio(dev, &s->iomem); + /* This device has no state to save/restore. The component timers will + save themselves. */ +} + +static const TypeInfo icp_pit_info = { + .name = TYPE_INTEGRATOR_PIT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(icp_pit_state), + .instance_init = icp_pit_init, +}; + +static Property sp804_properties[] = { + DEFINE_PROP_UINT32("freq0", SP804State, freq0, 1000000), + DEFINE_PROP_UINT32("freq1", SP804State, freq1, 1000000), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sp804_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + + k->realize = sp804_realize; + device_class_set_props(k, sp804_properties); + k->vmsd = &vmstate_sp804; +} + +static const TypeInfo sp804_info = { + .name = TYPE_SP804, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SP804State), + .instance_init = sp804_init, + .class_init = sp804_class_init, +}; + +static void arm_timer_register_types(void) +{ + type_register_static(&icp_pit_info); + type_register_static(&sp804_info); +} + +type_init(arm_timer_register_types) diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c new file mode 100644 index 000000000..3bd951dd0 --- /dev/null +++ b/hw/timer/armv7m_systick.c @@ -0,0 +1,310 @@ +/* + * ARMv7M SysTick timer + * + * Copyright (c) 2006-2007 CodeSourcery. + * Written by Paul Brook + * Copyright (c) 2017 Linaro Ltd + * Written by Peter Maydell + * + * This code is licensed under the GPL (version 2 or later). + */ + +#include "qemu/osdep.h" +#include "hw/timer/armv7m_systick.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/qdev-clock.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "trace.h" + +#define SYSTICK_ENABLE (1 << 0) +#define SYSTICK_TICKINT (1 << 1) +#define SYSTICK_CLKSOURCE (1 << 2) +#define SYSTICK_COUNTFLAG (1 << 16) + +#define SYSCALIB_NOREF (1U << 31) +#define SYSCALIB_SKEW (1U << 30) +#define SYSCALIB_TENMS ((1U << 24) - 1) + +static void systick_set_period_from_clock(SysTickState *s) +{ + /* + * Set the ptimer period from whichever clock is selected. + * Must be called from within a ptimer transaction block. + */ + if (s->control & SYSTICK_CLKSOURCE) { + ptimer_set_period_from_clock(s->ptimer, s->cpuclk, 1); + } else { + ptimer_set_period_from_clock(s->ptimer, s->refclk, 1); + } +} + +static void systick_timer_tick(void *opaque) +{ + SysTickState *s = (SysTickState *)opaque; + + trace_systick_timer_tick(); + + s->control |= SYSTICK_COUNTFLAG; + if (s->control & SYSTICK_TICKINT) { + /* Tell the NVIC to pend the SysTick exception */ + qemu_irq_pulse(s->irq); + } + if (ptimer_get_limit(s->ptimer) == 0) { + /* + * Timer expiry with SYST_RVR zero disables the timer + * (but doesn't clear SYST_CSR.ENABLE) + */ + ptimer_stop(s->ptimer); + } +} + +static MemTxResult systick_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) +{ + SysTickState *s = opaque; + uint32_t val; + + if (attrs.user) { + /* Generate BusFault for unprivileged accesses */ + return MEMTX_ERROR; + } + + switch (addr) { + case 0x0: /* SysTick Control and Status. */ + val = s->control; + s->control &= ~SYSTICK_COUNTFLAG; + break; + case 0x4: /* SysTick Reload Value. */ + val = ptimer_get_limit(s->ptimer); + break; + case 0x8: /* SysTick Current Value. */ + val = ptimer_get_count(s->ptimer); + break; + case 0xc: /* SysTick Calibration Value. */ + /* + * In real hardware it is possible to make this register report + * a different value from what the reference clock is actually + * running at. We don't model that (which usually happens due + * to integration errors in the real hardware) and instead always + * report the theoretical correct value as described in the + * knowledgebase article at + * https://developer.arm.com/documentation/ka001325/latest + * If necessary, we could implement an extra QOM property on this + * device to force the STCALIB value to something different from + * the "correct" value. + */ + if (!clock_has_source(s->refclk)) { + val = SYSCALIB_NOREF; + break; + } + val = clock_ns_to_ticks(s->refclk, 10 * SCALE_MS) - 1; + val &= SYSCALIB_TENMS; + if (clock_ticks_to_ns(s->refclk, val + 1) != 10 * SCALE_MS) { + /* report that tick count does not yield exactly 10ms */ + val |= SYSCALIB_SKEW; + } + break; + default: + val = 0; + qemu_log_mask(LOG_GUEST_ERROR, + "SysTick: Bad read offset 0x%" HWADDR_PRIx "\n", addr); + break; + } + + trace_systick_read(addr, val, size); + *data = val; + return MEMTX_OK; +} + +static MemTxResult systick_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + SysTickState *s = opaque; + + if (attrs.user) { + /* Generate BusFault for unprivileged accesses */ + return MEMTX_ERROR; + } + + trace_systick_write(addr, value, size); + + switch (addr) { + case 0x0: /* SysTick Control and Status. */ + { + uint32_t oldval; + + if (!clock_has_source(s->refclk)) { + /* This bit is always 1 if there is no external refclk */ + value |= SYSTICK_CLKSOURCE; + } + + ptimer_transaction_begin(s->ptimer); + oldval = s->control; + s->control &= 0xfffffff8; + s->control |= value & 7; + + if ((oldval ^ value) & SYSTICK_ENABLE) { + if (value & SYSTICK_ENABLE) { + ptimer_run(s->ptimer, 0); + } else { + ptimer_stop(s->ptimer); + } + } + + if ((oldval ^ value) & SYSTICK_CLKSOURCE) { + systick_set_period_from_clock(s); + } + ptimer_transaction_commit(s->ptimer); + break; + } + case 0x4: /* SysTick Reload Value. */ + ptimer_transaction_begin(s->ptimer); + ptimer_set_limit(s->ptimer, value & 0xffffff, 0); + ptimer_transaction_commit(s->ptimer); + break; + case 0x8: /* SysTick Current Value. */ + /* + * Writing any value clears SYST_CVR to zero and clears + * SYST_CSR.COUNTFLAG. The counter will then reload from SYST_RVR + * on the next clock edge unless SYST_RVR is zero. + */ + ptimer_transaction_begin(s->ptimer); + if (ptimer_get_limit(s->ptimer) == 0) { + ptimer_stop(s->ptimer); + } + ptimer_set_count(s->ptimer, 0); + s->control &= ~SYSTICK_COUNTFLAG; + ptimer_transaction_commit(s->ptimer); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SysTick: Bad write offset 0x%" HWADDR_PRIx "\n", addr); + } + return MEMTX_OK; +} + +static const MemoryRegionOps systick_ops = { + .read_with_attrs = systick_read, + .write_with_attrs = systick_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void systick_reset(DeviceState *dev) +{ + SysTickState *s = SYSTICK(dev); + + ptimer_transaction_begin(s->ptimer); + s->control = 0; + if (!clock_has_source(s->refclk)) { + /* This bit is always 1 if there is no external refclk */ + s->control |= SYSTICK_CLKSOURCE; + } + ptimer_stop(s->ptimer); + ptimer_set_count(s->ptimer, 0); + ptimer_set_limit(s->ptimer, 0, 0); + systick_set_period_from_clock(s); + ptimer_transaction_commit(s->ptimer); +} + +static void systick_cpuclk_update(void *opaque, ClockEvent event) +{ + SysTickState *s = SYSTICK(opaque); + + if (!(s->control & SYSTICK_CLKSOURCE)) { + /* currently using refclk, we can ignore cpuclk changes */ + } + + ptimer_transaction_begin(s->ptimer); + ptimer_set_period_from_clock(s->ptimer, s->cpuclk, 1); + ptimer_transaction_commit(s->ptimer); +} + +static void systick_refclk_update(void *opaque, ClockEvent event) +{ + SysTickState *s = SYSTICK(opaque); + + if (s->control & SYSTICK_CLKSOURCE) { + /* currently using cpuclk, we can ignore refclk changes */ + } + + ptimer_transaction_begin(s->ptimer); + ptimer_set_period_from_clock(s->ptimer, s->refclk, 1); + ptimer_transaction_commit(s->ptimer); +} + +static void systick_instance_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + SysTickState *s = SYSTICK(obj); + + memory_region_init_io(&s->iomem, obj, &systick_ops, s, "systick", 0xe0); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + + s->refclk = qdev_init_clock_in(DEVICE(obj), "refclk", + systick_refclk_update, s, ClockUpdate); + s->cpuclk = qdev_init_clock_in(DEVICE(obj), "cpuclk", + systick_cpuclk_update, s, ClockUpdate); +} + +static void systick_realize(DeviceState *dev, Error **errp) +{ + SysTickState *s = SYSTICK(dev); + s->ptimer = ptimer_init(systick_timer_tick, s, + PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | + PTIMER_POLICY_NO_COUNTER_ROUND_DOWN | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD | + PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT); + + if (!clock_has_source(s->cpuclk)) { + error_setg(errp, "systick: cpuclk must be connected"); + return; + } + /* It's OK not to connect the refclk */ +} + +static const VMStateDescription vmstate_systick = { + .name = "armv7m_systick", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(refclk, SysTickState), + VMSTATE_CLOCK(cpuclk, SysTickState), + VMSTATE_UINT32(control, SysTickState), + VMSTATE_INT64(tick, SysTickState), + VMSTATE_PTIMER(ptimer, SysTickState), + VMSTATE_END_OF_LIST() + } +}; + +static void systick_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_systick; + dc->reset = systick_reset; + dc->realize = systick_realize; +} + +static const TypeInfo armv7m_systick_info = { + .name = TYPE_SYSTICK, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = systick_instance_init, + .instance_size = sizeof(SysTickState), + .class_init = systick_class_init, +}; + +static void armv7m_systick_register_types(void) +{ + type_register_static(&armv7m_systick_info); +} + +type_init(armv7m_systick_register_types) diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c new file mode 100644 index 000000000..42c47d2ce --- /dev/null +++ b/hw/timer/aspeed_timer.c @@ -0,0 +1,756 @@ +/* + * ASPEED AST2400 Timer + * + * Andrew Jeffery + * + * Copyright (C) 2016 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/timer/aspeed_timer.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/timer.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/qdev-properties.h" +#include "trace.h" + +#define TIMER_NR_REGS 4 + +#define TIMER_CTRL_BITS 4 +#define TIMER_CTRL_MASK ((1 << TIMER_CTRL_BITS) - 1) + +#define TIMER_CLOCK_USE_EXT true +#define TIMER_CLOCK_EXT_HZ 1000000 +#define TIMER_CLOCK_USE_APB false + +#define TIMER_REG_STATUS 0 +#define TIMER_REG_RELOAD 1 +#define TIMER_REG_MATCH_FIRST 2 +#define TIMER_REG_MATCH_SECOND 3 + +#define TIMER_FIRST_CAP_PULSE 4 + +enum timer_ctrl_op { + op_enable = 0, + op_external_clock, + op_overflow_interrupt, + op_pulse_enable +}; + +/* + * Minimum value of the reload register to filter out short period + * timers which have a noticeable impact in emulation. 5us should be + * enough, use 20us for "safety". + */ +#define TIMER_MIN_NS (20 * SCALE_US) + +/** + * Avoid mutual references between AspeedTimerCtrlState and AspeedTimer + * structs, as it's a waste of memory. The ptimer BH callback needs to know + * whether a specific AspeedTimer is enabled, but this information is held in + * AspeedTimerCtrlState. So, provide a helper to hoist ourselves from an + * arbitrary AspeedTimer to AspeedTimerCtrlState. + */ +static inline AspeedTimerCtrlState *timer_to_ctrl(AspeedTimer *t) +{ + const AspeedTimer (*timers)[] = (void *)t - (t->id * sizeof(*t)); + return container_of(timers, AspeedTimerCtrlState, timers); +} + +static inline bool timer_ctrl_status(AspeedTimer *t, enum timer_ctrl_op op) +{ + return !!(timer_to_ctrl(t)->ctrl & BIT(t->id * TIMER_CTRL_BITS + op)); +} + +static inline bool timer_enabled(AspeedTimer *t) +{ + return timer_ctrl_status(t, op_enable); +} + +static inline bool timer_overflow_interrupt(AspeedTimer *t) +{ + return timer_ctrl_status(t, op_overflow_interrupt); +} + +static inline bool timer_can_pulse(AspeedTimer *t) +{ + return t->id >= TIMER_FIRST_CAP_PULSE; +} + +static inline bool timer_external_clock(AspeedTimer *t) +{ + return timer_ctrl_status(t, op_external_clock); +} + +static inline uint32_t calculate_rate(struct AspeedTimer *t) +{ + AspeedTimerCtrlState *s = timer_to_ctrl(t); + + return timer_external_clock(t) ? TIMER_CLOCK_EXT_HZ : + aspeed_scu_get_apb_freq(s->scu); +} + +static inline uint32_t calculate_ticks(struct AspeedTimer *t, uint64_t now_ns) +{ + uint64_t delta_ns = now_ns - MIN(now_ns, t->start); + uint32_t rate = calculate_rate(t); + uint64_t ticks = muldiv64(delta_ns, rate, NANOSECONDS_PER_SECOND); + + return t->reload - MIN(t->reload, ticks); +} + +static uint32_t calculate_min_ticks(AspeedTimer *t, uint32_t value) +{ + uint32_t rate = calculate_rate(t); + uint32_t min_ticks = muldiv64(TIMER_MIN_NS, rate, NANOSECONDS_PER_SECOND); + + return value < min_ticks ? min_ticks : value; +} + +static inline uint64_t calculate_time(struct AspeedTimer *t, uint32_t ticks) +{ + uint64_t delta_ns; + uint64_t delta_ticks; + + delta_ticks = t->reload - MIN(t->reload, ticks); + delta_ns = muldiv64(delta_ticks, NANOSECONDS_PER_SECOND, calculate_rate(t)); + + return t->start + delta_ns; +} + +static inline uint32_t calculate_match(struct AspeedTimer *t, int i) +{ + return t->match[i] < t->reload ? t->match[i] : 0; +} + +static uint64_t calculate_next(struct AspeedTimer *t) +{ + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint64_t next; + + /* + * We don't know the relationship between the values in the match + * registers, so sort using MAX/MIN/zero. We sort in that order as + * the timer counts down to zero. + */ + + next = calculate_time(t, MAX(calculate_match(t, 0), calculate_match(t, 1))); + if (now < next) { + return next; + } + + next = calculate_time(t, MIN(calculate_match(t, 0), calculate_match(t, 1))); + if (now < next) { + return next; + } + + next = calculate_time(t, 0); + if (now < next) { + return next; + } + + /* We've missed all deadlines, fire interrupt and try again */ + timer_del(&t->timer); + + if (timer_overflow_interrupt(t)) { + AspeedTimerCtrlState *s = timer_to_ctrl(t); + t->level = !t->level; + s->irq_sts |= BIT(t->id); + qemu_set_irq(t->irq, t->level); + } + + next = MAX(MAX(calculate_match(t, 0), calculate_match(t, 1)), 0); + t->start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + return calculate_time(t, next); +} + +static void aspeed_timer_mod(AspeedTimer *t) +{ + uint64_t next = calculate_next(t); + if (next) { + timer_mod(&t->timer, next); + } +} + +static void aspeed_timer_expire(void *opaque) +{ + AspeedTimer *t = opaque; + bool interrupt = false; + uint32_t ticks; + + if (!timer_enabled(t)) { + return; + } + + ticks = calculate_ticks(t, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + + if (!ticks) { + interrupt = timer_overflow_interrupt(t) || !t->match[0] || !t->match[1]; + } else if (ticks <= MIN(t->match[0], t->match[1])) { + interrupt = true; + } else if (ticks <= MAX(t->match[0], t->match[1])) { + interrupt = true; + } + + if (interrupt) { + AspeedTimerCtrlState *s = timer_to_ctrl(t); + t->level = !t->level; + s->irq_sts |= BIT(t->id); + qemu_set_irq(t->irq, t->level); + } + + aspeed_timer_mod(t); +} + +static uint64_t aspeed_timer_get_value(AspeedTimer *t, int reg) +{ + uint64_t value; + + switch (reg) { + case TIMER_REG_STATUS: + if (timer_enabled(t)) { + value = calculate_ticks(t, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + } else { + value = t->reload; + } + break; + case TIMER_REG_RELOAD: + value = t->reload; + break; + case TIMER_REG_MATCH_FIRST: + case TIMER_REG_MATCH_SECOND: + value = t->match[reg - 2]; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Programming error: unexpected reg: %d\n", + __func__, reg); + value = 0; + break; + } + return value; +} + +static uint64_t aspeed_timer_read(void *opaque, hwaddr offset, unsigned size) +{ + AspeedTimerCtrlState *s = opaque; + const int reg = (offset & 0xf) / 4; + uint64_t value; + + switch (offset) { + case 0x30: /* Control Register */ + value = s->ctrl; + break; + case 0x00 ... 0x2c: /* Timers 1 - 4 */ + value = aspeed_timer_get_value(&s->timers[(offset >> 4)], reg); + break; + case 0x40 ... 0x8c: /* Timers 5 - 8 */ + value = aspeed_timer_get_value(&s->timers[(offset >> 4) - 1], reg); + break; + default: + value = ASPEED_TIMER_GET_CLASS(s)->read(s, offset); + break; + } + trace_aspeed_timer_read(offset, size, value); + return value; +} + +static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg, + uint32_t value) +{ + AspeedTimer *t; + uint32_t old_reload; + + trace_aspeed_timer_set_value(timer, reg, value); + t = &s->timers[timer]; + switch (reg) { + case TIMER_REG_RELOAD: + old_reload = t->reload; + t->reload = calculate_min_ticks(t, value); + + /* If the reload value was not previously set, or zero, and + * the current value is valid, try to start the timer if it is + * enabled. + */ + if (old_reload || !t->reload) { + break; + } + /* fall through to re-enable */ + case TIMER_REG_STATUS: + if (timer_enabled(t)) { + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + int64_t delta = (int64_t) value - (int64_t) calculate_ticks(t, now); + uint32_t rate = calculate_rate(t); + + if (delta >= 0) { + t->start += muldiv64(delta, NANOSECONDS_PER_SECOND, rate); + } else { + t->start -= muldiv64(-delta, NANOSECONDS_PER_SECOND, rate); + } + aspeed_timer_mod(t); + } + break; + case TIMER_REG_MATCH_FIRST: + case TIMER_REG_MATCH_SECOND: + t->match[reg - 2] = value; + if (timer_enabled(t)) { + aspeed_timer_mod(t); + } + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: Programming error: unexpected reg: %d\n", + __func__, reg); + break; + } +} + +/* Control register operations are broken out into helpers that can be + * explicitly called on aspeed_timer_reset(), but also from + * aspeed_timer_ctrl_op(). + */ + +static void aspeed_timer_ctrl_enable(AspeedTimer *t, bool enable) +{ + trace_aspeed_timer_ctrl_enable(t->id, enable); + if (enable) { + t->start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + aspeed_timer_mod(t); + } else { + timer_del(&t->timer); + } +} + +static void aspeed_timer_ctrl_external_clock(AspeedTimer *t, bool enable) +{ + trace_aspeed_timer_ctrl_external_clock(t->id, enable); +} + +static void aspeed_timer_ctrl_overflow_interrupt(AspeedTimer *t, bool enable) +{ + trace_aspeed_timer_ctrl_overflow_interrupt(t->id, enable); +} + +static void aspeed_timer_ctrl_pulse_enable(AspeedTimer *t, bool enable) +{ + if (timer_can_pulse(t)) { + trace_aspeed_timer_ctrl_pulse_enable(t->id, enable); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Timer does not support pulse mode\n", __func__); + } +} + +/** + * Given the actions are fixed in number and completely described in helper + * functions, dispatch with a lookup table rather than manage control flow with + * a switch statement. + */ +static void (*const ctrl_ops[])(AspeedTimer *, bool) = { + [op_enable] = aspeed_timer_ctrl_enable, + [op_external_clock] = aspeed_timer_ctrl_external_clock, + [op_overflow_interrupt] = aspeed_timer_ctrl_overflow_interrupt, + [op_pulse_enable] = aspeed_timer_ctrl_pulse_enable, +}; + +/** + * Conditionally affect changes chosen by a timer's control bit. + * + * The aspeed_timer_ctrl_op() interface is convenient for the + * aspeed_timer_set_ctrl() function as the "no change" early exit can be + * calculated for all operations, which cleans up the caller code. However the + * interface isn't convenient for the reset function where we want to enter a + * specific state without artificially constructing old and new values that + * will fall through the change guard (and motivates extracting the actions + * out to helper functions). + * + * @t: The timer to manipulate + * @op: The type of operation to be performed + * @old: The old state of the timer's control bits + * @new: The incoming state for the timer's control bits + */ +static void aspeed_timer_ctrl_op(AspeedTimer *t, enum timer_ctrl_op op, + uint8_t old, uint8_t new) +{ + const uint8_t mask = BIT(op); + const bool enable = !!(new & mask); + const bool changed = ((old ^ new) & mask); + if (!changed) { + return; + } + ctrl_ops[op](t, enable); +} + +static void aspeed_timer_set_ctrl(AspeedTimerCtrlState *s, uint32_t reg) +{ + int i; + int shift; + uint8_t t_old, t_new; + AspeedTimer *t; + const uint8_t enable_mask = BIT(op_enable); + + /* Handle a dependency between the 'enable' and remaining three + * configuration bits - i.e. if more than one bit in the control set has + * changed, including the 'enable' bit, then we want either disable the + * timer and perform configuration, or perform configuration and then + * enable the timer + */ + for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { + t = &s->timers[i]; + shift = (i * TIMER_CTRL_BITS); + t_old = (s->ctrl >> shift) & TIMER_CTRL_MASK; + t_new = (reg >> shift) & TIMER_CTRL_MASK; + + /* If we are disabling, do so first */ + if ((t_old & enable_mask) && !(t_new & enable_mask)) { + aspeed_timer_ctrl_enable(t, false); + } + aspeed_timer_ctrl_op(t, op_external_clock, t_old, t_new); + aspeed_timer_ctrl_op(t, op_overflow_interrupt, t_old, t_new); + aspeed_timer_ctrl_op(t, op_pulse_enable, t_old, t_new); + /* If we are enabling, do so last */ + if (!(t_old & enable_mask) && (t_new & enable_mask)) { + aspeed_timer_ctrl_enable(t, true); + } + } + s->ctrl = reg; +} + +static void aspeed_timer_set_ctrl2(AspeedTimerCtrlState *s, uint32_t value) +{ + trace_aspeed_timer_set_ctrl2(value); +} + +static void aspeed_timer_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + const uint32_t tv = (uint32_t)(value & 0xFFFFFFFF); + const int reg = (offset & 0xf) / 4; + AspeedTimerCtrlState *s = opaque; + + switch (offset) { + /* Control Registers */ + case 0x30: + aspeed_timer_set_ctrl(s, tv); + break; + /* Timer Registers */ + case 0x00 ... 0x2c: + aspeed_timer_set_value(s, (offset >> TIMER_NR_REGS), reg, tv); + break; + case 0x40 ... 0x8c: + aspeed_timer_set_value(s, (offset >> TIMER_NR_REGS) - 1, reg, tv); + break; + default: + ASPEED_TIMER_GET_CLASS(s)->write(s, offset, value); + break; + } +} + +static const MemoryRegionOps aspeed_timer_ops = { + .read = aspeed_timer_read, + .write = aspeed_timer_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static uint64_t aspeed_2400_timer_read(AspeedTimerCtrlState *s, hwaddr offset) +{ + uint64_t value; + + switch (offset) { + case 0x34: + value = s->ctrl2; + break; + case 0x38: + case 0x3C: + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + value = 0; + break; + } + return value; +} + +static void aspeed_2400_timer_write(AspeedTimerCtrlState *s, hwaddr offset, + uint64_t value) +{ + const uint32_t tv = (uint32_t)(value & 0xFFFFFFFF); + + switch (offset) { + case 0x34: + aspeed_timer_set_ctrl2(s, tv); + break; + case 0x38: + case 0x3C: + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + } +} + +static uint64_t aspeed_2500_timer_read(AspeedTimerCtrlState *s, hwaddr offset) +{ + uint64_t value; + + switch (offset) { + case 0x34: + value = s->ctrl2; + break; + case 0x38: + value = s->ctrl3 & BIT(0); + break; + case 0x3C: + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + value = 0; + break; + } + return value; +} + +static void aspeed_2500_timer_write(AspeedTimerCtrlState *s, hwaddr offset, + uint64_t value) +{ + const uint32_t tv = (uint32_t)(value & 0xFFFFFFFF); + uint8_t command; + + switch (offset) { + case 0x34: + aspeed_timer_set_ctrl2(s, tv); + break; + case 0x38: + command = (value >> 1) & 0xFF; + if (command == 0xAE) { + s->ctrl3 = 0x1; + } else if (command == 0xEA) { + s->ctrl3 = 0x0; + } + break; + case 0x3C: + if (s->ctrl3 & BIT(0)) { + aspeed_timer_set_ctrl(s, s->ctrl & ~tv); + } + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + } +} + +static uint64_t aspeed_2600_timer_read(AspeedTimerCtrlState *s, hwaddr offset) +{ + uint64_t value; + + switch (offset) { + case 0x34: + value = s->irq_sts; + break; + case 0x38: + case 0x3C: + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + value = 0; + break; + } + return value; +} + +static void aspeed_2600_timer_write(AspeedTimerCtrlState *s, hwaddr offset, + uint64_t value) +{ + const uint32_t tv = (uint32_t)(value & 0xFFFFFFFF); + + switch (offset) { + case 0x34: + s->irq_sts &= tv; + break; + case 0x3C: + aspeed_timer_set_ctrl(s, s->ctrl & ~tv); + break; + + case 0x38: + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + } +} + +static void aspeed_init_one_timer(AspeedTimerCtrlState *s, uint8_t id) +{ + AspeedTimer *t = &s->timers[id]; + + t->id = id; + timer_init_ns(&t->timer, QEMU_CLOCK_VIRTUAL, aspeed_timer_expire, t); +} + +static void aspeed_timer_realize(DeviceState *dev, Error **errp) +{ + int i; + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedTimerCtrlState *s = ASPEED_TIMER(dev); + + assert(s->scu); + + for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { + aspeed_init_one_timer(s, i); + sysbus_init_irq(sbd, &s->timers[i].irq); + } + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_timer_ops, s, + TYPE_ASPEED_TIMER, 0x1000); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void aspeed_timer_reset(DeviceState *dev) +{ + int i; + AspeedTimerCtrlState *s = ASPEED_TIMER(dev); + + for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { + AspeedTimer *t = &s->timers[i]; + /* Explicitly call helpers to avoid any conditional behaviour through + * aspeed_timer_set_ctrl(). + */ + aspeed_timer_ctrl_enable(t, false); + aspeed_timer_ctrl_external_clock(t, TIMER_CLOCK_USE_APB); + aspeed_timer_ctrl_overflow_interrupt(t, false); + aspeed_timer_ctrl_pulse_enable(t, false); + t->level = 0; + t->reload = 0; + t->match[0] = 0; + t->match[1] = 0; + } + s->ctrl = 0; + s->ctrl2 = 0; + s->ctrl3 = 0; + s->irq_sts = 0; +} + +static const VMStateDescription vmstate_aspeed_timer = { + .name = "aspeed.timer", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT8(id, AspeedTimer), + VMSTATE_INT32(level, AspeedTimer), + VMSTATE_TIMER(timer, AspeedTimer), + VMSTATE_UINT32(reload, AspeedTimer), + VMSTATE_UINT32_ARRAY(match, AspeedTimer, 2), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_aspeed_timer_state = { + .name = "aspeed.timerctrl", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ctrl, AspeedTimerCtrlState), + VMSTATE_UINT32(ctrl2, AspeedTimerCtrlState), + VMSTATE_UINT32(ctrl3, AspeedTimerCtrlState), + VMSTATE_UINT32(irq_sts, AspeedTimerCtrlState), + VMSTATE_STRUCT_ARRAY(timers, AspeedTimerCtrlState, + ASPEED_TIMER_NR_TIMERS, 1, vmstate_aspeed_timer, + AspeedTimer), + VMSTATE_END_OF_LIST() + } +}; + +static Property aspeed_timer_properties[] = { + DEFINE_PROP_LINK("scu", AspeedTimerCtrlState, scu, TYPE_ASPEED_SCU, + AspeedSCUState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_timer_realize; + dc->reset = aspeed_timer_reset; + dc->desc = "ASPEED Timer"; + dc->vmsd = &vmstate_aspeed_timer_state; + device_class_set_props(dc, aspeed_timer_properties); +} + +static const TypeInfo aspeed_timer_info = { + .name = TYPE_ASPEED_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedTimerCtrlState), + .class_init = timer_class_init, + .class_size = sizeof(AspeedTimerClass), + .abstract = true, +}; + +static void aspeed_2400_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass); + + dc->desc = "ASPEED 2400 Timer"; + awc->read = aspeed_2400_timer_read; + awc->write = aspeed_2400_timer_write; +} + +static const TypeInfo aspeed_2400_timer_info = { + .name = TYPE_ASPEED_2400_TIMER, + .parent = TYPE_ASPEED_TIMER, + .class_init = aspeed_2400_timer_class_init, +}; + +static void aspeed_2500_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass); + + dc->desc = "ASPEED 2500 Timer"; + awc->read = aspeed_2500_timer_read; + awc->write = aspeed_2500_timer_write; +} + +static const TypeInfo aspeed_2500_timer_info = { + .name = TYPE_ASPEED_2500_TIMER, + .parent = TYPE_ASPEED_TIMER, + .class_init = aspeed_2500_timer_class_init, +}; + +static void aspeed_2600_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedTimerClass *awc = ASPEED_TIMER_CLASS(klass); + + dc->desc = "ASPEED 2600 Timer"; + awc->read = aspeed_2600_timer_read; + awc->write = aspeed_2600_timer_write; +} + +static const TypeInfo aspeed_2600_timer_info = { + .name = TYPE_ASPEED_2600_TIMER, + .parent = TYPE_ASPEED_TIMER, + .class_init = aspeed_2600_timer_class_init, +}; + +static void aspeed_timer_register_types(void) +{ + type_register_static(&aspeed_timer_info); + type_register_static(&aspeed_2400_timer_info); + type_register_static(&aspeed_2500_timer_info); + type_register_static(&aspeed_2600_timer_info); +} + +type_init(aspeed_timer_register_types) diff --git a/hw/timer/avr_timer16.c b/hw/timer/avr_timer16.c new file mode 100644 index 000000000..c48555da5 --- /dev/null +++ b/hw/timer/avr_timer16.c @@ -0,0 +1,621 @@ +/* + * AVR 16-bit timer + * + * Copyright (c) 2018 University of Kent + * Author: Ed Robbins + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +/* + * Driver for 16 bit timers on 8 bit AVR devices. + * Note: + * ATmega640/V-1280/V-1281/V-2560/V-2561/V timers 1, 3, 4 and 5 are 16 bit + */ + +/* + * XXX TODO: Power Reduction Register support + * prescaler pause support + * PWM modes, GPIO, output capture pins, input compare pin + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/timer/avr_timer16.h" +#include "trace.h" + +/* Register offsets */ +#define T16_CRA 0x0 +#define T16_CRB 0x1 +#define T16_CRC 0x2 +#define T16_CNTL 0x4 +#define T16_CNTH 0x5 +#define T16_ICRL 0x6 +#define T16_ICRH 0x7 +#define T16_OCRAL 0x8 +#define T16_OCRAH 0x9 +#define T16_OCRBL 0xa +#define T16_OCRBH 0xb +#define T16_OCRCL 0xc +#define T16_OCRCH 0xd + +/* Field masks */ +#define T16_CRA_WGM01 0x3 +#define T16_CRA_COMC 0xc +#define T16_CRA_COMB 0x30 +#define T16_CRA_COMA 0xc0 +#define T16_CRA_OC_CONF \ + (T16_CRA_COMA | T16_CRA_COMB | T16_CRA_COMC) + +#define T16_CRB_CS 0x7 +#define T16_CRB_WGM23 0x18 +#define T16_CRB_ICES 0x40 +#define T16_CRB_ICNC 0x80 + +#define T16_CRC_FOCC 0x20 +#define T16_CRC_FOCB 0x40 +#define T16_CRC_FOCA 0x80 + +/* Fields masks both TIMSK and TIFR (interrupt mask/flag registers) */ +#define T16_INT_TOV 0x1 /* Timer overflow */ +#define T16_INT_OCA 0x2 /* Output compare A */ +#define T16_INT_OCB 0x4 /* Output compare B */ +#define T16_INT_OCC 0x8 /* Output compare C */ +#define T16_INT_IC 0x20 /* Input capture */ + +/* Clock source values */ +#define T16_CLKSRC_STOPPED 0 +#define T16_CLKSRC_DIV1 1 +#define T16_CLKSRC_DIV8 2 +#define T16_CLKSRC_DIV64 3 +#define T16_CLKSRC_DIV256 4 +#define T16_CLKSRC_DIV1024 5 +#define T16_CLKSRC_EXT_FALLING 6 +#define T16_CLKSRC_EXT_RISING 7 + +/* Timer mode values (not including PWM modes) */ +#define T16_MODE_NORMAL 0 +#define T16_MODE_CTC_OCRA 4 +#define T16_MODE_CTC_ICR 12 + +/* Accessors */ +#define CLKSRC(t16) (t16->crb & T16_CRB_CS) +#define MODE(t16) (((t16->crb & T16_CRB_WGM23) >> 1) | \ + (t16->cra & T16_CRA_WGM01)) +#define CNT(t16) VAL16(t16->cntl, t16->cnth) +#define OCRA(t16) VAL16(t16->ocral, t16->ocrah) +#define OCRB(t16) VAL16(t16->ocrbl, t16->ocrbh) +#define OCRC(t16) VAL16(t16->ocrcl, t16->ocrch) +#define ICR(t16) VAL16(t16->icrl, t16->icrh) + +/* Helper macros */ +#define VAL16(l, h) ((h << 8) | l) +#define DB_PRINT(fmt, args...) /* Nothing */ + +static inline int64_t avr_timer16_ns_to_ticks(AVRTimer16State *t16, int64_t t) +{ + if (t16->period_ns == 0) { + return 0; + } + return t / t16->period_ns; +} + +static void avr_timer16_update_cnt(AVRTimer16State *t16) +{ + uint16_t cnt; + cnt = avr_timer16_ns_to_ticks(t16, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - + t16->reset_time_ns); + t16->cntl = (uint8_t)(cnt & 0xff); + t16->cnth = (uint8_t)((cnt & 0xff00) >> 8); +} + +static inline void avr_timer16_recalc_reset_time(AVRTimer16State *t16) +{ + t16->reset_time_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - + CNT(t16) * t16->period_ns; +} + +static void avr_timer16_clock_reset(AVRTimer16State *t16) +{ + t16->cntl = 0; + t16->cnth = 0; + t16->reset_time_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +} + +static void avr_timer16_clksrc_update(AVRTimer16State *t16) +{ + uint16_t divider = 0; + switch (CLKSRC(t16)) { + case T16_CLKSRC_EXT_FALLING: + case T16_CLKSRC_EXT_RISING: + qemu_log_mask(LOG_UNIMP, "%s: external clock source unsupported\n", + __func__); + break; + case T16_CLKSRC_STOPPED: + break; + case T16_CLKSRC_DIV1: + divider = 1; + break; + case T16_CLKSRC_DIV8: + divider = 8; + break; + case T16_CLKSRC_DIV64: + divider = 64; + break; + case T16_CLKSRC_DIV256: + divider = 256; + break; + case T16_CLKSRC_DIV1024: + divider = 1024; + break; + default: + break; + } + if (divider) { + t16->freq_hz = t16->cpu_freq_hz / divider; + t16->period_ns = NANOSECONDS_PER_SECOND / t16->freq_hz; + trace_avr_timer16_clksrc_update(t16->freq_hz, t16->period_ns, + (uint64_t)(1e6 / t16->freq_hz)); + } +} + +static void avr_timer16_set_alarm(AVRTimer16State *t16) +{ + if (CLKSRC(t16) == T16_CLKSRC_EXT_FALLING || + CLKSRC(t16) == T16_CLKSRC_EXT_RISING || + CLKSRC(t16) == T16_CLKSRC_STOPPED) { + /* Timer is disabled or set to external clock source (unsupported) */ + return; + } + + uint64_t alarm_offset = 0xffff; + enum NextInterrupt next_interrupt = OVERFLOW; + + switch (MODE(t16)) { + case T16_MODE_NORMAL: + /* Normal mode */ + if (OCRA(t16) < alarm_offset && OCRA(t16) > CNT(t16) && + (t16->imsk & T16_INT_OCA)) { + alarm_offset = OCRA(t16); + next_interrupt = COMPA; + } + break; + case T16_MODE_CTC_OCRA: + /* CTC mode, top = ocra */ + if (OCRA(t16) < alarm_offset && OCRA(t16) > CNT(t16)) { + alarm_offset = OCRA(t16); + next_interrupt = COMPA; + } + break; + case T16_MODE_CTC_ICR: + /* CTC mode, top = icr */ + if (ICR(t16) < alarm_offset && ICR(t16) > CNT(t16)) { + alarm_offset = ICR(t16); + next_interrupt = CAPT; + } + if (OCRA(t16) < alarm_offset && OCRA(t16) > CNT(t16) && + (t16->imsk & T16_INT_OCA)) { + alarm_offset = OCRA(t16); + next_interrupt = COMPA; + } + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: pwm modes are unsupported\n", + __func__); + return; + } + if (OCRB(t16) < alarm_offset && OCRB(t16) > CNT(t16) && + (t16->imsk & T16_INT_OCB)) { + alarm_offset = OCRB(t16); + next_interrupt = COMPB; + } + if (OCRC(t16) < alarm_offset && OCRB(t16) > CNT(t16) && + (t16->imsk & T16_INT_OCC)) { + alarm_offset = OCRB(t16); + next_interrupt = COMPC; + } + alarm_offset -= CNT(t16); + + t16->next_interrupt = next_interrupt; + uint64_t alarm_ns = + t16->reset_time_ns + ((CNT(t16) + alarm_offset) * t16->period_ns); + timer_mod(t16->timer, alarm_ns); + + trace_avr_timer16_next_alarm(alarm_offset * t16->period_ns); +} + +static void avr_timer16_interrupt(void *opaque) +{ + AVRTimer16State *t16 = opaque; + uint8_t mode = MODE(t16); + + avr_timer16_update_cnt(t16); + + if (CLKSRC(t16) == T16_CLKSRC_EXT_FALLING || + CLKSRC(t16) == T16_CLKSRC_EXT_RISING || + CLKSRC(t16) == T16_CLKSRC_STOPPED) { + /* Timer is disabled or set to external clock source (unsupported) */ + return; + } + + trace_avr_timer16_interrupt_count(CNT(t16)); + + /* Counter overflow */ + if (t16->next_interrupt == OVERFLOW) { + trace_avr_timer16_interrupt_overflow("counter 0xffff"); + avr_timer16_clock_reset(t16); + if (t16->imsk & T16_INT_TOV) { + t16->ifr |= T16_INT_TOV; + qemu_set_irq(t16->ovf_irq, 1); + } + } + /* Check for ocra overflow in CTC mode */ + if (mode == T16_MODE_CTC_OCRA && t16->next_interrupt == COMPA) { + trace_avr_timer16_interrupt_overflow("CTC OCRA"); + avr_timer16_clock_reset(t16); + } + /* Check for icr overflow in CTC mode */ + if (mode == T16_MODE_CTC_ICR && t16->next_interrupt == CAPT) { + trace_avr_timer16_interrupt_overflow("CTC ICR"); + avr_timer16_clock_reset(t16); + if (t16->imsk & T16_INT_IC) { + t16->ifr |= T16_INT_IC; + qemu_set_irq(t16->capt_irq, 1); + } + } + /* Check for output compare interrupts */ + if (t16->imsk & T16_INT_OCA && t16->next_interrupt == COMPA) { + t16->ifr |= T16_INT_OCA; + qemu_set_irq(t16->compa_irq, 1); + } + if (t16->imsk & T16_INT_OCB && t16->next_interrupt == COMPB) { + t16->ifr |= T16_INT_OCB; + qemu_set_irq(t16->compb_irq, 1); + } + if (t16->imsk & T16_INT_OCC && t16->next_interrupt == COMPC) { + t16->ifr |= T16_INT_OCC; + qemu_set_irq(t16->compc_irq, 1); + } + avr_timer16_set_alarm(t16); +} + +static void avr_timer16_reset(DeviceState *dev) +{ + AVRTimer16State *t16 = AVR_TIMER16(dev); + + avr_timer16_clock_reset(t16); + avr_timer16_clksrc_update(t16); + avr_timer16_set_alarm(t16); + + qemu_set_irq(t16->capt_irq, 0); + qemu_set_irq(t16->compa_irq, 0); + qemu_set_irq(t16->compb_irq, 0); + qemu_set_irq(t16->compc_irq, 0); + qemu_set_irq(t16->ovf_irq, 0); +} + +static uint64_t avr_timer16_read(void *opaque, hwaddr offset, unsigned size) +{ + assert(size == 1); + AVRTimer16State *t16 = opaque; + uint8_t retval = 0; + + switch (offset) { + case T16_CRA: + retval = t16->cra; + break; + case T16_CRB: + retval = t16->crb; + break; + case T16_CRC: + retval = t16->crc; + break; + case T16_CNTL: + avr_timer16_update_cnt(t16); + t16->rtmp = t16->cnth; + retval = t16->cntl; + break; + case T16_CNTH: + retval = t16->rtmp; + break; + case T16_ICRL: + /* + * The timer copies cnt to icr when the input capture pin changes + * state or when the analog comparator has a match. We don't + * emulate this behaviour. We do support it's use for defining a + * TOP value in T16_MODE_CTC_ICR + */ + t16->rtmp = t16->icrh; + retval = t16->icrl; + break; + case T16_ICRH: + retval = t16->rtmp; + break; + case T16_OCRAL: + retval = t16->ocral; + break; + case T16_OCRAH: + retval = t16->ocrah; + break; + case T16_OCRBL: + retval = t16->ocrbl; + break; + case T16_OCRBH: + retval = t16->ocrbh; + break; + case T16_OCRCL: + retval = t16->ocrcl; + break; + case T16_OCRCH: + retval = t16->ocrch; + break; + default: + break; + } + trace_avr_timer16_read(offset, retval); + + return (uint64_t)retval; +} + +static void avr_timer16_write(void *opaque, hwaddr offset, + uint64_t val64, unsigned size) +{ + assert(size == 1); + AVRTimer16State *t16 = opaque; + uint8_t val8 = (uint8_t)val64; + uint8_t prev_clk_src = CLKSRC(t16); + + trace_avr_timer16_write(offset, val8); + + switch (offset) { + case T16_CRA: + t16->cra = val8; + if (t16->cra & T16_CRA_OC_CONF) { + qemu_log_mask(LOG_UNIMP, "%s: output compare pins unsupported\n", + __func__); + } + break; + case T16_CRB: + t16->crb = val8; + if (t16->crb & T16_CRB_ICNC) { + qemu_log_mask(LOG_UNIMP, + "%s: input capture noise canceller unsupported\n", + __func__); + } + if (t16->crb & T16_CRB_ICES) { + qemu_log_mask(LOG_UNIMP, "%s: input capture unsupported\n", + __func__); + } + if (CLKSRC(t16) != prev_clk_src) { + avr_timer16_clksrc_update(t16); + if (prev_clk_src == T16_CLKSRC_STOPPED) { + t16->reset_time_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + } + } + break; + case T16_CRC: + t16->crc = val8; + qemu_log_mask(LOG_UNIMP, "%s: output compare pins unsupported\n", + __func__); + break; + case T16_CNTL: + /* + * CNT is the 16-bit counter value, it must be read/written via + * a temporary register (rtmp) to make the read/write atomic. + */ + /* ICR also has this behaviour, and shares rtmp */ + /* + * Writing CNT blocks compare matches for one clock cycle. + * Writing CNT to TOP or to an OCR value (if in use) will + * skip the relevant interrupt + */ + t16->cntl = val8; + t16->cnth = t16->rtmp; + avr_timer16_recalc_reset_time(t16); + break; + case T16_CNTH: + t16->rtmp = val8; + break; + case T16_ICRL: + /* ICR can only be written in mode T16_MODE_CTC_ICR */ + if (MODE(t16) == T16_MODE_CTC_ICR) { + t16->icrl = val8; + t16->icrh = t16->rtmp; + } + break; + case T16_ICRH: + if (MODE(t16) == T16_MODE_CTC_ICR) { + t16->rtmp = val8; + } + break; + case T16_OCRAL: + /* + * OCRn cause the relevant output compare flag to be raised, and + * trigger an interrupt, when CNT is equal to the value here + */ + t16->ocral = val8; + break; + case T16_OCRAH: + t16->ocrah = val8; + break; + case T16_OCRBL: + t16->ocrbl = val8; + break; + case T16_OCRBH: + t16->ocrbh = val8; + break; + case T16_OCRCL: + t16->ocrcl = val8; + break; + case T16_OCRCH: + t16->ocrch = val8; + break; + default: + break; + } + avr_timer16_set_alarm(t16); +} + +static uint64_t avr_timer16_imsk_read(void *opaque, + hwaddr offset, + unsigned size) +{ + assert(size == 1); + AVRTimer16State *t16 = opaque; + trace_avr_timer16_read_imsk(offset ? 0 : t16->imsk); + if (offset != 0) { + return 0; + } + return t16->imsk; +} + +static void avr_timer16_imsk_write(void *opaque, hwaddr offset, + uint64_t val64, unsigned size) +{ + assert(size == 1); + AVRTimer16State *t16 = opaque; + trace_avr_timer16_write_imsk(val64); + if (offset != 0) { + return; + } + t16->imsk = (uint8_t)val64; +} + +static uint64_t avr_timer16_ifr_read(void *opaque, + hwaddr offset, + unsigned size) +{ + assert(size == 1); + AVRTimer16State *t16 = opaque; + trace_avr_timer16_read_ifr(offset ? 0 : t16->ifr); + if (offset != 0) { + return 0; + } + return t16->ifr; +} + +static void avr_timer16_ifr_write(void *opaque, hwaddr offset, + uint64_t val64, unsigned size) +{ + assert(size == 1); + AVRTimer16State *t16 = opaque; + trace_avr_timer16_write_imsk(val64); + if (offset != 0) { + return; + } + t16->ifr = (uint8_t)val64; +} + +static const MemoryRegionOps avr_timer16_ops = { + .read = avr_timer16_read, + .write = avr_timer16_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = {.max_access_size = 1} +}; + +static const MemoryRegionOps avr_timer16_imsk_ops = { + .read = avr_timer16_imsk_read, + .write = avr_timer16_imsk_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = {.max_access_size = 1} +}; + +static const MemoryRegionOps avr_timer16_ifr_ops = { + .read = avr_timer16_ifr_read, + .write = avr_timer16_ifr_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = {.max_access_size = 1} +}; + +static Property avr_timer16_properties[] = { + DEFINE_PROP_UINT8("id", struct AVRTimer16State, id, 0), + DEFINE_PROP_UINT64("cpu-frequency-hz", struct AVRTimer16State, + cpu_freq_hz, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void avr_timer16_pr(void *opaque, int irq, int level) +{ + AVRTimer16State *s = AVR_TIMER16(opaque); + + s->enabled = !level; + + if (!s->enabled) { + avr_timer16_reset(DEVICE(s)); + } +} + +static void avr_timer16_init(Object *obj) +{ + AVRTimer16State *s = AVR_TIMER16(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->capt_irq); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->compa_irq); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->compb_irq); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->compc_irq); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->ovf_irq); + + memory_region_init_io(&s->iomem, obj, &avr_timer16_ops, + s, "avr-timer16", 0xe); + memory_region_init_io(&s->imsk_iomem, obj, &avr_timer16_imsk_ops, + s, "avr-timer16-intmask", 0x1); + memory_region_init_io(&s->ifr_iomem, obj, &avr_timer16_ifr_ops, + s, "avr-timer16-intflag", 0x1); + + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->imsk_iomem); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->ifr_iomem); + qdev_init_gpio_in(DEVICE(s), avr_timer16_pr, 1); +} + +static void avr_timer16_realize(DeviceState *dev, Error **errp) +{ + AVRTimer16State *s = AVR_TIMER16(dev); + + if (s->cpu_freq_hz == 0) { + error_setg(errp, "AVR timer16: cpu-frequency-hz property must be set"); + return; + } + + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, avr_timer16_interrupt, s); + s->enabled = true; +} + +static void avr_timer16_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = avr_timer16_reset; + dc->realize = avr_timer16_realize; + device_class_set_props(dc, avr_timer16_properties); +} + +static const TypeInfo avr_timer16_info = { + .name = TYPE_AVR_TIMER16, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AVRTimer16State), + .instance_init = avr_timer16_init, + .class_init = avr_timer16_class_init, +}; + +static void avr_timer16_register_types(void) +{ + type_register_static(&avr_timer16_info); +} + +type_init(avr_timer16_register_types) diff --git a/hw/timer/bcm2835_systmr.c b/hw/timer/bcm2835_systmr.c new file mode 100644 index 000000000..67669a57f --- /dev/null +++ b/hw/timer/bcm2835_systmr.c @@ -0,0 +1,178 @@ +/* + * BCM2835 SYS timer emulation + * + * Copyright (C) 2019 Philippe Mathieu-Daudé + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Datasheet: BCM2835 ARM Peripherals (C6357-M-1398) + * https://www.raspberrypi.org/app/uploads/2012/02/BCM2835-ARM-Peripherals.pdf + * + * Only the free running 64-bit counter is implemented. + * The 4 COMPARE registers and the interruption are not implemented. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/timer.h" +#include "hw/timer/bcm2835_systmr.h" +#include "hw/registerfields.h" +#include "migration/vmstate.h" +#include "trace.h" + +REG32(CTRL_STATUS, 0x00) +REG32(COUNTER_LOW, 0x04) +REG32(COUNTER_HIGH, 0x08) +REG32(COMPARE0, 0x0c) +REG32(COMPARE1, 0x10) +REG32(COMPARE2, 0x14) +REG32(COMPARE3, 0x18) + +static void bcm2835_systmr_timer_expire(void *opaque) +{ + BCM2835SystemTimerCompare *tmr = opaque; + + trace_bcm2835_systmr_timer_expired(tmr->id); + tmr->state->reg.ctrl_status |= 1 << tmr->id; + qemu_set_irq(tmr->irq, 1); +} + +static uint64_t bcm2835_systmr_read(void *opaque, hwaddr offset, + unsigned size) +{ + BCM2835SystemTimerState *s = BCM2835_SYSTIMER(opaque); + uint64_t r = 0; + + switch (offset) { + case A_CTRL_STATUS: + r = s->reg.ctrl_status; + break; + case A_COMPARE0 ... A_COMPARE3: + r = s->reg.compare[(offset - A_COMPARE0) >> 2]; + break; + case A_COUNTER_LOW: + case A_COUNTER_HIGH: + /* Free running counter at 1MHz */ + r = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL); + r >>= 8 * (offset - A_COUNTER_LOW); + r &= UINT32_MAX; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + } + trace_bcm2835_systmr_read(offset, r); + + return r; +} + +static void bcm2835_systmr_write(void *opaque, hwaddr offset, + uint64_t value64, unsigned size) +{ + BCM2835SystemTimerState *s = BCM2835_SYSTIMER(opaque); + int index; + uint32_t value = value64; + uint32_t triggers_delay_us; + uint64_t now; + + trace_bcm2835_systmr_write(offset, value); + switch (offset) { + case A_CTRL_STATUS: + s->reg.ctrl_status &= ~value; /* Ack */ + for (index = 0; index < ARRAY_SIZE(s->tmr); index++) { + if (extract32(value, index, 1)) { + trace_bcm2835_systmr_irq_ack(index); + qemu_set_irq(s->tmr[index].irq, 0); + } + } + break; + case A_COMPARE0 ... A_COMPARE3: + index = (offset - A_COMPARE0) >> 2; + s->reg.compare[index] = value; + now = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL); + /* Compare lower 32-bits of the free-running counter. */ + triggers_delay_us = value - now; + trace_bcm2835_systmr_run(index, triggers_delay_us); + timer_mod(&s->tmr[index].timer, now + triggers_delay_us); + break; + case A_COUNTER_LOW: + case A_COUNTER_HIGH: + qemu_log_mask(LOG_GUEST_ERROR, "%s: read-only ofs 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + } +} + +static const MemoryRegionOps bcm2835_systmr_ops = { + .read = bcm2835_systmr_read, + .write = bcm2835_systmr_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void bcm2835_systmr_reset(DeviceState *dev) +{ + BCM2835SystemTimerState *s = BCM2835_SYSTIMER(dev); + + memset(&s->reg, 0, sizeof(s->reg)); +} + +static void bcm2835_systmr_realize(DeviceState *dev, Error **errp) +{ + BCM2835SystemTimerState *s = BCM2835_SYSTIMER(dev); + + memory_region_init_io(&s->iomem, OBJECT(dev), &bcm2835_systmr_ops, + s, "bcm2835-sys-timer", 0x20); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); + + for (size_t i = 0; i < ARRAY_SIZE(s->tmr); i++) { + s->tmr[i].id = i; + s->tmr[i].state = s; + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->tmr[i].irq); + timer_init_us(&s->tmr[i].timer, QEMU_CLOCK_VIRTUAL, + bcm2835_systmr_timer_expire, &s->tmr[i]); + } +} + +static const VMStateDescription bcm2835_systmr_vmstate = { + .name = "bcm2835_sys_timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(reg.ctrl_status, BCM2835SystemTimerState), + VMSTATE_UINT32_ARRAY(reg.compare, BCM2835SystemTimerState, + BCM2835_SYSTIMER_COUNT), + VMSTATE_END_OF_LIST() + } +}; + +static void bcm2835_systmr_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = bcm2835_systmr_realize; + dc->reset = bcm2835_systmr_reset; + dc->vmsd = &bcm2835_systmr_vmstate; +} + +static const TypeInfo bcm2835_systmr_info = { + .name = TYPE_BCM2835_SYSTIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(BCM2835SystemTimerState), + .class_init = bcm2835_systmr_class_init, +}; + +static void bcm2835_systmr_register_types(void) +{ + type_register_static(&bcm2835_systmr_info); +} + +type_init(bcm2835_systmr_register_types); diff --git a/hw/timer/cadence_ttc.c b/hw/timer/cadence_ttc.c new file mode 100644 index 000000000..64108241b --- /dev/null +++ b/hw/timer/cadence_ttc.c @@ -0,0 +1,503 @@ +/* + * Xilinx Zynq cadence TTC model + * + * Copyright (c) 2011 Xilinx Inc. + * Copyright (c) 2012 Peter A.G. Crosthwaite (peter.crosthwaite@petalogix.com) + * Copyright (c) 2012 PetaLogix Pty Ltd. + * Written By Haibing Ma + * M. Habib + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qom/object.h" + +#ifdef CADENCE_TTC_ERR_DEBUG +#define DB_PRINT(...) do { \ + fprintf(stderr, ": %s: ", __func__); \ + fprintf(stderr, ## __VA_ARGS__); \ + } while (0) +#else + #define DB_PRINT(...) +#endif + +#define COUNTER_INTR_IV 0x00000001 +#define COUNTER_INTR_M1 0x00000002 +#define COUNTER_INTR_M2 0x00000004 +#define COUNTER_INTR_M3 0x00000008 +#define COUNTER_INTR_OV 0x00000010 +#define COUNTER_INTR_EV 0x00000020 + +#define COUNTER_CTRL_DIS 0x00000001 +#define COUNTER_CTRL_INT 0x00000002 +#define COUNTER_CTRL_DEC 0x00000004 +#define COUNTER_CTRL_MATCH 0x00000008 +#define COUNTER_CTRL_RST 0x00000010 + +#define CLOCK_CTRL_PS_EN 0x00000001 +#define CLOCK_CTRL_PS_V 0x0000001e + +typedef struct { + QEMUTimer *timer; + int freq; + + uint32_t reg_clock; + uint32_t reg_count; + uint32_t reg_value; + uint16_t reg_interval; + uint16_t reg_match[3]; + uint32_t reg_intr; + uint32_t reg_intr_en; + uint32_t reg_event_ctrl; + uint32_t reg_event; + + uint64_t cpu_time; + unsigned int cpu_time_valid; + + qemu_irq irq; +} CadenceTimerState; + +#define TYPE_CADENCE_TTC "cadence_ttc" +OBJECT_DECLARE_SIMPLE_TYPE(CadenceTTCState, CADENCE_TTC) + +struct CadenceTTCState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + CadenceTimerState timer[3]; +}; + +static void cadence_timer_update(CadenceTimerState *s) +{ + qemu_set_irq(s->irq, !!(s->reg_intr & s->reg_intr_en)); +} + +static CadenceTimerState *cadence_timer_from_addr(void *opaque, + hwaddr offset) +{ + unsigned int index; + CadenceTTCState *s = (CadenceTTCState *)opaque; + + index = (offset >> 2) % 3; + + return &s->timer[index]; +} + +static uint64_t cadence_timer_get_ns(CadenceTimerState *s, uint64_t timer_steps) +{ + /* timer_steps has max value of 0x100000000. double check it + * (or overflow can happen below) */ + assert(timer_steps <= 1ULL << 32); + + uint64_t r = timer_steps * 1000000000ULL; + if (s->reg_clock & CLOCK_CTRL_PS_EN) { + r >>= 16 - (((s->reg_clock & CLOCK_CTRL_PS_V) >> 1) + 1); + } else { + r >>= 16; + } + r /= (uint64_t)s->freq; + return r; +} + +static uint64_t cadence_timer_get_steps(CadenceTimerState *s, uint64_t ns) +{ + uint64_t to_divide = 1000000000ULL; + + uint64_t r = ns; + /* for very large intervals (> 8s) do some division first to stop + * overflow (costs some prescision) */ + while (r >= 8ULL << 30 && to_divide > 1) { + r /= 1000; + to_divide /= 1000; + } + r <<= 16; + /* keep early-dividing as needed */ + while (r >= 8ULL << 30 && to_divide > 1) { + r /= 1000; + to_divide /= 1000; + } + r *= (uint64_t)s->freq; + if (s->reg_clock & CLOCK_CTRL_PS_EN) { + r /= 1 << (((s->reg_clock & CLOCK_CTRL_PS_V) >> 1) + 1); + } + + r /= to_divide; + return r; +} + +/* determine if x is in between a and b, exclusive of a, inclusive of b */ + +static inline int64_t is_between(int64_t x, int64_t a, int64_t b) +{ + if (a < b) { + return x > a && x <= b; + } + return x < a && x >= b; +} + +static void cadence_timer_run(CadenceTimerState *s) +{ + int i; + int64_t event_interval, next_value; + + assert(s->cpu_time_valid); /* cadence_timer_sync must be called first */ + + if (s->reg_count & COUNTER_CTRL_DIS) { + s->cpu_time_valid = 0; + return; + } + + { /* figure out what's going to happen next (rollover or match) */ + int64_t interval = (uint64_t)((s->reg_count & COUNTER_CTRL_INT) ? + (int64_t)s->reg_interval + 1 : 0x10000ULL) << 16; + next_value = (s->reg_count & COUNTER_CTRL_DEC) ? -1ULL : interval; + for (i = 0; i < 3; ++i) { + int64_t cand = (uint64_t)s->reg_match[i] << 16; + if (is_between(cand, (uint64_t)s->reg_value, next_value)) { + next_value = cand; + } + } + } + DB_PRINT("next timer event value: %09llx\n", + (unsigned long long)next_value); + + event_interval = next_value - (int64_t)s->reg_value; + event_interval = (event_interval < 0) ? -event_interval : event_interval; + + timer_mod(s->timer, s->cpu_time + + cadence_timer_get_ns(s, event_interval)); +} + +static void cadence_timer_sync(CadenceTimerState *s) +{ + int i; + int64_t r, x; + int64_t interval = ((s->reg_count & COUNTER_CTRL_INT) ? + (int64_t)s->reg_interval + 1 : 0x10000ULL) << 16; + uint64_t old_time = s->cpu_time; + + s->cpu_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + DB_PRINT("cpu time: %lld ns\n", (long long)old_time); + + if (!s->cpu_time_valid || old_time == s->cpu_time) { + s->cpu_time_valid = 1; + return; + } + + r = (int64_t)cadence_timer_get_steps(s, s->cpu_time - old_time); + x = (int64_t)s->reg_value + ((s->reg_count & COUNTER_CTRL_DEC) ? -r : r); + + for (i = 0; i < 3; ++i) { + int64_t m = (int64_t)s->reg_match[i] << 16; + if (m > interval) { + continue; + } + /* check to see if match event has occurred. check m +/- interval + * to account for match events in wrap around cases */ + if (is_between(m, s->reg_value, x) || + is_between(m + interval, s->reg_value, x) || + is_between(m - interval, s->reg_value, x)) { + s->reg_intr |= (2 << i); + } + } + if ((x < 0) || (x >= interval)) { + s->reg_intr |= (s->reg_count & COUNTER_CTRL_INT) ? + COUNTER_INTR_IV : COUNTER_INTR_OV; + } + while (x < 0) { + x += interval; + } + s->reg_value = (uint32_t)(x % interval); + cadence_timer_update(s); +} + +static void cadence_timer_tick(void *opaque) +{ + CadenceTimerState *s = opaque; + + DB_PRINT("\n"); + cadence_timer_sync(s); + cadence_timer_run(s); +} + +static uint32_t cadence_ttc_read_imp(void *opaque, hwaddr offset) +{ + CadenceTimerState *s = cadence_timer_from_addr(opaque, offset); + uint32_t value; + + cadence_timer_sync(s); + cadence_timer_run(s); + + switch (offset) { + case 0x00: /* clock control */ + case 0x04: + case 0x08: + return s->reg_clock; + + case 0x0c: /* counter control */ + case 0x10: + case 0x14: + return s->reg_count; + + case 0x18: /* counter value */ + case 0x1c: + case 0x20: + return (uint16_t)(s->reg_value >> 16); + + case 0x24: /* reg_interval counter */ + case 0x28: + case 0x2c: + return s->reg_interval; + + case 0x30: /* match 1 counter */ + case 0x34: + case 0x38: + return s->reg_match[0]; + + case 0x3c: /* match 2 counter */ + case 0x40: + case 0x44: + return s->reg_match[1]; + + case 0x48: /* match 3 counter */ + case 0x4c: + case 0x50: + return s->reg_match[2]; + + case 0x54: /* interrupt register */ + case 0x58: + case 0x5c: + /* cleared after read */ + value = s->reg_intr; + s->reg_intr = 0; + cadence_timer_update(s); + return value; + + case 0x60: /* interrupt enable */ + case 0x64: + case 0x68: + return s->reg_intr_en; + + case 0x6c: + case 0x70: + case 0x74: + return s->reg_event_ctrl; + + case 0x78: + case 0x7c: + case 0x80: + return s->reg_event; + + default: + return 0; + } +} + +static uint64_t cadence_ttc_read(void *opaque, hwaddr offset, + unsigned size) +{ + uint32_t ret = cadence_ttc_read_imp(opaque, offset); + + DB_PRINT("addr: %08x data: %08x\n", (unsigned)offset, (unsigned)ret); + return ret; +} + +static void cadence_ttc_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + CadenceTimerState *s = cadence_timer_from_addr(opaque, offset); + + DB_PRINT("addr: %08x data %08x\n", (unsigned)offset, (unsigned)value); + + cadence_timer_sync(s); + + switch (offset) { + case 0x00: /* clock control */ + case 0x04: + case 0x08: + s->reg_clock = value & 0x3F; + break; + + case 0x0c: /* counter control */ + case 0x10: + case 0x14: + if (value & COUNTER_CTRL_RST) { + s->reg_value = 0; + } + s->reg_count = value & 0x3f & ~COUNTER_CTRL_RST; + break; + + case 0x24: /* interval register */ + case 0x28: + case 0x2c: + s->reg_interval = value & 0xffff; + break; + + case 0x30: /* match register */ + case 0x34: + case 0x38: + s->reg_match[0] = value & 0xffff; + break; + + case 0x3c: /* match register */ + case 0x40: + case 0x44: + s->reg_match[1] = value & 0xffff; + break; + + case 0x48: /* match register */ + case 0x4c: + case 0x50: + s->reg_match[2] = value & 0xffff; + break; + + case 0x54: /* interrupt register */ + case 0x58: + case 0x5c: + break; + + case 0x60: /* interrupt enable */ + case 0x64: + case 0x68: + s->reg_intr_en = value & 0x3f; + break; + + case 0x6c: /* event control */ + case 0x70: + case 0x74: + s->reg_event_ctrl = value & 0x07; + break; + + default: + return; + } + + cadence_timer_run(s); + cadence_timer_update(s); +} + +static const MemoryRegionOps cadence_ttc_ops = { + .read = cadence_ttc_read, + .write = cadence_ttc_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void cadence_timer_reset(CadenceTimerState *s) +{ + s->reg_count = 0x21; +} + +static void cadence_timer_init(uint32_t freq, CadenceTimerState *s) +{ + memset(s, 0, sizeof(CadenceTimerState)); + s->freq = freq; + + cadence_timer_reset(s); + + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cadence_timer_tick, s); +} + +static void cadence_ttc_init(Object *obj) +{ + CadenceTTCState *s = CADENCE_TTC(obj); + + memory_region_init_io(&s->iomem, obj, &cadence_ttc_ops, s, + "timer", 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem); +} + +static void cadence_ttc_realize(DeviceState *dev, Error **errp) +{ + CadenceTTCState *s = CADENCE_TTC(dev); + int i; + + for (i = 0; i < 3; ++i) { + cadence_timer_init(133000000, &s->timer[i]); + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->timer[i].irq); + } +} + +static int cadence_timer_pre_save(void *opaque) +{ + cadence_timer_sync((CadenceTimerState *)opaque); + + return 0; +} + +static int cadence_timer_post_load(void *opaque, int version_id) +{ + CadenceTimerState *s = opaque; + + s->cpu_time_valid = 0; + cadence_timer_sync(s); + cadence_timer_run(s); + cadence_timer_update(s); + return 0; +} + +static const VMStateDescription vmstate_cadence_timer = { + .name = "cadence_timer", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = cadence_timer_pre_save, + .post_load = cadence_timer_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(reg_clock, CadenceTimerState), + VMSTATE_UINT32(reg_count, CadenceTimerState), + VMSTATE_UINT32(reg_value, CadenceTimerState), + VMSTATE_UINT16(reg_interval, CadenceTimerState), + VMSTATE_UINT16_ARRAY(reg_match, CadenceTimerState, 3), + VMSTATE_UINT32(reg_intr, CadenceTimerState), + VMSTATE_UINT32(reg_intr_en, CadenceTimerState), + VMSTATE_UINT32(reg_event_ctrl, CadenceTimerState), + VMSTATE_UINT32(reg_event, CadenceTimerState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_cadence_ttc = { + .name = "cadence_TTC", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(timer, CadenceTTCState, 3, 0, + vmstate_cadence_timer, + CadenceTimerState), + VMSTATE_END_OF_LIST() + } +}; + +static void cadence_ttc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_cadence_ttc; + dc->realize = cadence_ttc_realize; +} + +static const TypeInfo cadence_ttc_info = { + .name = TYPE_CADENCE_TTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CadenceTTCState), + .instance_init = cadence_ttc_init, + .class_init = cadence_ttc_class_init, +}; + +static void cadence_ttc_register_types(void) +{ + type_register_static(&cadence_ttc_info); +} + +type_init(cadence_ttc_register_types) diff --git a/hw/timer/cmsdk-apb-dualtimer.c b/hw/timer/cmsdk-apb-dualtimer.c new file mode 100644 index 000000000..d4a509c79 --- /dev/null +++ b/hw/timer/cmsdk-apb-dualtimer.c @@ -0,0 +1,559 @@ +/* + * ARM CMSDK APB dual-timer emulation + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the "APB dual-input timer" which is part of the Cortex-M + * System Design Kit (CMSDK) and documented in the Cortex-M System + * Design Kit Technical Reference Manual (ARM DDI0479C): + * https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/registerfields.h" +#include "hw/qdev-clock.h" +#include "hw/timer/cmsdk-apb-dualtimer.h" +#include "migration/vmstate.h" + +REG32(TIMER1LOAD, 0x0) +REG32(TIMER1VALUE, 0x4) +REG32(TIMER1CONTROL, 0x8) + FIELD(CONTROL, ONESHOT, 0, 1) + FIELD(CONTROL, SIZE, 1, 1) + FIELD(CONTROL, PRESCALE, 2, 2) + FIELD(CONTROL, INTEN, 5, 1) + FIELD(CONTROL, MODE, 6, 1) + FIELD(CONTROL, ENABLE, 7, 1) +#define R_CONTROL_VALID_MASK (R_CONTROL_ONESHOT_MASK | R_CONTROL_SIZE_MASK | \ + R_CONTROL_PRESCALE_MASK | R_CONTROL_INTEN_MASK | \ + R_CONTROL_MODE_MASK | R_CONTROL_ENABLE_MASK) +REG32(TIMER1INTCLR, 0xc) +REG32(TIMER1RIS, 0x10) +REG32(TIMER1MIS, 0x14) +REG32(TIMER1BGLOAD, 0x18) +REG32(TIMER2LOAD, 0x20) +REG32(TIMER2VALUE, 0x24) +REG32(TIMER2CONTROL, 0x28) +REG32(TIMER2INTCLR, 0x2c) +REG32(TIMER2RIS, 0x30) +REG32(TIMER2MIS, 0x34) +REG32(TIMER2BGLOAD, 0x38) +REG32(TIMERITCR, 0xf00) + FIELD(TIMERITCR, ENABLE, 0, 1) +#define R_TIMERITCR_VALID_MASK R_TIMERITCR_ENABLE_MASK +REG32(TIMERITOP, 0xf04) + FIELD(TIMERITOP, TIMINT1, 0, 1) + FIELD(TIMERITOP, TIMINT2, 1, 1) +#define R_TIMERITOP_VALID_MASK (R_TIMERITOP_TIMINT1_MASK | \ + R_TIMERITOP_TIMINT2_MASK) +REG32(PID4, 0xfd0) +REG32(PID5, 0xfd4) +REG32(PID6, 0xfd8) +REG32(PID7, 0xfdc) +REG32(PID0, 0xfe0) +REG32(PID1, 0xfe4) +REG32(PID2, 0xfe8) +REG32(PID3, 0xfec) +REG32(CID0, 0xff0) +REG32(CID1, 0xff4) +REG32(CID2, 0xff8) +REG32(CID3, 0xffc) + +/* PID/CID values */ +static const int timer_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x23, 0xb8, 0x1b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static bool cmsdk_dualtimermod_intstatus(CMSDKAPBDualTimerModule *m) +{ + /* Return masked interrupt status for the timer module */ + return m->intstatus && (m->control & R_CONTROL_INTEN_MASK); +} + +static void cmsdk_apb_dualtimer_update(CMSDKAPBDualTimer *s) +{ + bool timint1, timint2, timintc; + + if (s->timeritcr) { + /* Integration test mode: outputs driven directly from TIMERITOP bits */ + timint1 = s->timeritop & R_TIMERITOP_TIMINT1_MASK; + timint2 = s->timeritop & R_TIMERITOP_TIMINT2_MASK; + } else { + timint1 = cmsdk_dualtimermod_intstatus(&s->timermod[0]); + timint2 = cmsdk_dualtimermod_intstatus(&s->timermod[1]); + } + + timintc = timint1 || timint2; + + qemu_set_irq(s->timermod[0].timerint, timint1); + qemu_set_irq(s->timermod[1].timerint, timint2); + qemu_set_irq(s->timerintc, timintc); +} + +static int cmsdk_dualtimermod_divisor(CMSDKAPBDualTimerModule *m) +{ + /* Return the divisor set by the current CONTROL.PRESCALE value */ + switch (FIELD_EX32(m->control, CONTROL, PRESCALE)) { + case 0: + return 1; + case 1: + return 16; + case 2: + case 3: /* UNDEFINED, we treat like 2 (and complained when it was set) */ + return 256; + default: + g_assert_not_reached(); + } +} + +static void cmsdk_dualtimermod_write_control(CMSDKAPBDualTimerModule *m, + uint32_t newctrl) +{ + /* Handle a write to the CONTROL register */ + uint32_t changed; + + ptimer_transaction_begin(m->timer); + + newctrl &= R_CONTROL_VALID_MASK; + + changed = m->control ^ newctrl; + + if (changed & ~newctrl & R_CONTROL_ENABLE_MASK) { + /* ENABLE cleared, stop timer before any further changes */ + ptimer_stop(m->timer); + } + + if (changed & R_CONTROL_PRESCALE_MASK) { + int divisor; + + switch (FIELD_EX32(newctrl, CONTROL, PRESCALE)) { + case 0: + divisor = 1; + break; + case 1: + divisor = 16; + break; + case 2: + divisor = 256; + break; + case 3: + /* UNDEFINED; complain, and arbitrarily treat like 2 */ + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB dual-timer: CONTROL.PRESCALE==0b11" + " is undefined behaviour\n"); + divisor = 256; + break; + default: + g_assert_not_reached(); + } + ptimer_set_period_from_clock(m->timer, m->parent->timclk, divisor); + } + + if (changed & R_CONTROL_MODE_MASK) { + uint32_t load; + if (newctrl & R_CONTROL_MODE_MASK) { + /* Periodic: the limit is the LOAD register value */ + load = m->load; + } else { + /* Free-running: counter wraps around */ + load = ptimer_get_limit(m->timer); + if (!(m->control & R_CONTROL_SIZE_MASK)) { + load = deposit32(m->load, 0, 16, load); + } + m->load = load; + load = 0xffffffff; + } + if (!(m->control & R_CONTROL_SIZE_MASK)) { + load &= 0xffff; + } + ptimer_set_limit(m->timer, load, 0); + } + + if (changed & R_CONTROL_SIZE_MASK) { + /* Timer switched between 16 and 32 bit count */ + uint32_t value, load; + + value = ptimer_get_count(m->timer); + load = ptimer_get_limit(m->timer); + if (newctrl & R_CONTROL_SIZE_MASK) { + /* 16 -> 32, top half of VALUE is in struct field */ + value = deposit32(m->value, 0, 16, value); + } else { + /* 32 -> 16: save top half to struct field and truncate */ + m->value = value; + value &= 0xffff; + } + + if (newctrl & R_CONTROL_MODE_MASK) { + /* Periodic, timer limit has LOAD value */ + if (newctrl & R_CONTROL_SIZE_MASK) { + load = deposit32(m->load, 0, 16, load); + } else { + m->load = load; + load &= 0xffff; + } + } else { + /* Free-running, timer limit is set to give wraparound */ + if (newctrl & R_CONTROL_SIZE_MASK) { + load = 0xffffffff; + } else { + load = 0xffff; + } + } + ptimer_set_count(m->timer, value); + ptimer_set_limit(m->timer, load, 0); + } + + if (newctrl & R_CONTROL_ENABLE_MASK) { + /* + * ENABLE is set; start the timer after all other changes. + * We start it even if the ENABLE bit didn't actually change, + * in case the timer was an expired one-shot timer that has + * now been changed into a free-running or periodic timer. + */ + ptimer_run(m->timer, !!(newctrl & R_CONTROL_ONESHOT_MASK)); + } + + m->control = newctrl; + + ptimer_transaction_commit(m->timer); +} + +static uint64_t cmsdk_apb_dualtimer_read(void *opaque, hwaddr offset, + unsigned size) +{ + CMSDKAPBDualTimer *s = CMSDK_APB_DUALTIMER(opaque); + uint64_t r; + + if (offset >= A_TIMERITCR) { + switch (offset) { + case A_TIMERITCR: + r = s->timeritcr; + break; + case A_PID4 ... A_CID3: + r = timer_id[(offset - A_PID4) / 4]; + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB dual-timer read: bad offset %x\n", + (int) offset); + r = 0; + break; + } + } else { + int timer = offset >> 5; + CMSDKAPBDualTimerModule *m; + + if (timer >= ARRAY_SIZE(s->timermod)) { + goto bad_offset; + } + + m = &s->timermod[timer]; + + switch (offset & 0x1F) { + case A_TIMER1LOAD: + case A_TIMER1BGLOAD: + if (m->control & R_CONTROL_MODE_MASK) { + /* + * Periodic: the ptimer limit is the LOAD register value, (or + * just the low 16 bits of it if the timer is in 16-bit mode) + */ + r = ptimer_get_limit(m->timer); + if (!(m->control & R_CONTROL_SIZE_MASK)) { + r = deposit32(m->load, 0, 16, r); + } + } else { + /* Free-running: LOAD register value is just in m->load */ + r = m->load; + } + break; + case A_TIMER1VALUE: + r = ptimer_get_count(m->timer); + if (!(m->control & R_CONTROL_SIZE_MASK)) { + r = deposit32(m->value, 0, 16, r); + } + break; + case A_TIMER1CONTROL: + r = m->control; + break; + case A_TIMER1RIS: + r = m->intstatus; + break; + case A_TIMER1MIS: + r = cmsdk_dualtimermod_intstatus(m); + break; + default: + goto bad_offset; + } + } + + trace_cmsdk_apb_dualtimer_read(offset, r, size); + return r; +} + +static void cmsdk_apb_dualtimer_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + CMSDKAPBDualTimer *s = CMSDK_APB_DUALTIMER(opaque); + + trace_cmsdk_apb_dualtimer_write(offset, value, size); + + if (offset >= A_TIMERITCR) { + switch (offset) { + case A_TIMERITCR: + s->timeritcr = value & R_TIMERITCR_VALID_MASK; + cmsdk_apb_dualtimer_update(s); + break; + case A_TIMERITOP: + s->timeritop = value & R_TIMERITOP_VALID_MASK; + cmsdk_apb_dualtimer_update(s); + break; + default: + bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB dual-timer write: bad offset %x\n", + (int) offset); + break; + } + } else { + int timer = offset >> 5; + CMSDKAPBDualTimerModule *m; + + if (timer >= ARRAY_SIZE(s->timermod)) { + goto bad_offset; + } + + m = &s->timermod[timer]; + + switch (offset & 0x1F) { + case A_TIMER1LOAD: + /* Set the limit, and immediately reload the count from it */ + m->load = value; + m->value = value; + if (!(m->control & R_CONTROL_SIZE_MASK)) { + value &= 0xffff; + } + ptimer_transaction_begin(m->timer); + if (!(m->control & R_CONTROL_MODE_MASK)) { + /* + * In free-running mode this won't set the limit but will + * still change the current count value. + */ + ptimer_set_count(m->timer, value); + } else { + if (!value) { + ptimer_stop(m->timer); + } + ptimer_set_limit(m->timer, value, 1); + if (value && (m->control & R_CONTROL_ENABLE_MASK)) { + /* Force possibly-expired oneshot timer to restart */ + ptimer_run(m->timer, 1); + } + } + ptimer_transaction_commit(m->timer); + break; + case A_TIMER1BGLOAD: + /* Set the limit, but not the current count */ + m->load = value; + if (!(m->control & R_CONTROL_MODE_MASK)) { + /* In free-running mode there is no limit */ + break; + } + if (!(m->control & R_CONTROL_SIZE_MASK)) { + value &= 0xffff; + } + ptimer_transaction_begin(m->timer); + ptimer_set_limit(m->timer, value, 0); + ptimer_transaction_commit(m->timer); + break; + case A_TIMER1CONTROL: + cmsdk_dualtimermod_write_control(m, value); + cmsdk_apb_dualtimer_update(s); + break; + case A_TIMER1INTCLR: + m->intstatus = 0; + cmsdk_apb_dualtimer_update(s); + break; + default: + goto bad_offset; + } + } +} + +static const MemoryRegionOps cmsdk_apb_dualtimer_ops = { + .read = cmsdk_apb_dualtimer_read, + .write = cmsdk_apb_dualtimer_write, + .endianness = DEVICE_LITTLE_ENDIAN, + /* byte/halfword accesses are just zero-padded on reads and writes */ + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, +}; + +static void cmsdk_dualtimermod_tick(void *opaque) +{ + CMSDKAPBDualTimerModule *m = opaque; + + m->intstatus = 1; + cmsdk_apb_dualtimer_update(m->parent); +} + +static void cmsdk_dualtimermod_reset(CMSDKAPBDualTimerModule *m) +{ + m->control = R_CONTROL_INTEN_MASK; + m->intstatus = 0; + m->load = 0; + m->value = 0xffffffff; + ptimer_transaction_begin(m->timer); + ptimer_stop(m->timer); + /* + * We start in free-running mode, with VALUE at 0xffffffff, and + * in 16-bit counter mode. This means that the ptimer count and + * limit must both be set to 0xffff, so we wrap at 16 bits. + */ + ptimer_set_limit(m->timer, 0xffff, 1); + ptimer_set_period_from_clock(m->timer, m->parent->timclk, + cmsdk_dualtimermod_divisor(m)); + ptimer_transaction_commit(m->timer); +} + +static void cmsdk_apb_dualtimer_reset(DeviceState *dev) +{ + CMSDKAPBDualTimer *s = CMSDK_APB_DUALTIMER(dev); + int i; + + trace_cmsdk_apb_dualtimer_reset(); + + for (i = 0; i < ARRAY_SIZE(s->timermod); i++) { + cmsdk_dualtimermod_reset(&s->timermod[i]); + } + s->timeritcr = 0; + s->timeritop = 0; +} + +static void cmsdk_apb_dualtimer_clk_update(void *opaque, ClockEvent event) +{ + CMSDKAPBDualTimer *s = CMSDK_APB_DUALTIMER(opaque); + int i; + + for (i = 0; i < ARRAY_SIZE(s->timermod); i++) { + CMSDKAPBDualTimerModule *m = &s->timermod[i]; + ptimer_transaction_begin(m->timer); + ptimer_set_period_from_clock(m->timer, m->parent->timclk, + cmsdk_dualtimermod_divisor(m)); + ptimer_transaction_commit(m->timer); + } +} + +static void cmsdk_apb_dualtimer_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + CMSDKAPBDualTimer *s = CMSDK_APB_DUALTIMER(obj); + int i; + + memory_region_init_io(&s->iomem, obj, &cmsdk_apb_dualtimer_ops, + s, "cmsdk-apb-dualtimer", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->timerintc); + + for (i = 0; i < ARRAY_SIZE(s->timermod); i++) { + sysbus_init_irq(sbd, &s->timermod[i].timerint); + } + s->timclk = qdev_init_clock_in(DEVICE(s), "TIMCLK", + cmsdk_apb_dualtimer_clk_update, s, + ClockUpdate); +} + +static void cmsdk_apb_dualtimer_realize(DeviceState *dev, Error **errp) +{ + CMSDKAPBDualTimer *s = CMSDK_APB_DUALTIMER(dev); + int i; + + if (!clock_has_source(s->timclk)) { + error_setg(errp, "CMSDK APB dualtimer: TIMCLK clock must be connected"); + return; + } + + for (i = 0; i < ARRAY_SIZE(s->timermod); i++) { + CMSDKAPBDualTimerModule *m = &s->timermod[i]; + + m->parent = s; + m->timer = ptimer_init(cmsdk_dualtimermod_tick, m, + PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | + PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD | + PTIMER_POLICY_NO_COUNTER_ROUND_DOWN); + } +} + +static const VMStateDescription cmsdk_dualtimermod_vmstate = { + .name = "cmsdk-apb-dualtimer-module", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PTIMER(timer, CMSDKAPBDualTimerModule), + VMSTATE_UINT32(load, CMSDKAPBDualTimerModule), + VMSTATE_UINT32(value, CMSDKAPBDualTimerModule), + VMSTATE_UINT32(control, CMSDKAPBDualTimerModule), + VMSTATE_UINT32(intstatus, CMSDKAPBDualTimerModule), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription cmsdk_apb_dualtimer_vmstate = { + .name = "cmsdk-apb-dualtimer", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(timclk, CMSDKAPBDualTimer), + VMSTATE_STRUCT_ARRAY(timermod, CMSDKAPBDualTimer, + CMSDK_APB_DUALTIMER_NUM_MODULES, + 1, cmsdk_dualtimermod_vmstate, + CMSDKAPBDualTimerModule), + VMSTATE_UINT32(timeritcr, CMSDKAPBDualTimer), + VMSTATE_UINT32(timeritop, CMSDKAPBDualTimer), + VMSTATE_END_OF_LIST() + } +}; + +static void cmsdk_apb_dualtimer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = cmsdk_apb_dualtimer_realize; + dc->vmsd = &cmsdk_apb_dualtimer_vmstate; + dc->reset = cmsdk_apb_dualtimer_reset; +} + +static const TypeInfo cmsdk_apb_dualtimer_info = { + .name = TYPE_CMSDK_APB_DUALTIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CMSDKAPBDualTimer), + .instance_init = cmsdk_apb_dualtimer_init, + .class_init = cmsdk_apb_dualtimer_class_init, +}; + +static void cmsdk_apb_dualtimer_register_types(void) +{ + type_register_static(&cmsdk_apb_dualtimer_info); +} + +type_init(cmsdk_apb_dualtimer_register_types); diff --git a/hw/timer/cmsdk-apb-timer.c b/hw/timer/cmsdk-apb-timer.c new file mode 100644 index 000000000..68aa1a763 --- /dev/null +++ b/hw/timer/cmsdk-apb-timer.c @@ -0,0 +1,286 @@ +/* + * ARM CMSDK APB timer emulation + * + * Copyright (c) 2017 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* This is a model of the "APB timer" which is part of the Cortex-M + * System Design Kit (CMSDK) and documented in the Cortex-M System + * Design Kit Technical Reference Manual (ARM DDI0479C): + * https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit + * + * The hardware has an EXTIN input wire, which can be configured + * by the guest to act either as a 'timer enable' (timer does not run + * when EXTIN is low), or as a 'timer clock' (timer runs at frequency + * of EXTIN clock, not PCLK frequency). We don't model this. + * + * The documentation is not very clear about the exact behaviour; + * we choose to implement that the interrupt is triggered when + * the counter goes from 1 to 0, that the counter then holds at 0 + * for one clock cycle before reloading from the RELOAD register, + * and that if the RELOAD register is 0 this does not cause an + * interrupt (as there is no further 1->0 transition). + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/registerfields.h" +#include "hw/qdev-clock.h" +#include "hw/timer/cmsdk-apb-timer.h" +#include "migration/vmstate.h" + +REG32(CTRL, 0) + FIELD(CTRL, EN, 0, 1) + FIELD(CTRL, SELEXTEN, 1, 1) + FIELD(CTRL, SELEXTCLK, 2, 1) + FIELD(CTRL, IRQEN, 3, 1) +REG32(VALUE, 4) +REG32(RELOAD, 8) +REG32(INTSTATUS, 0xc) + FIELD(INTSTATUS, IRQ, 0, 1) +REG32(PID4, 0xFD0) +REG32(PID5, 0xFD4) +REG32(PID6, 0xFD8) +REG32(PID7, 0xFDC) +REG32(PID0, 0xFE0) +REG32(PID1, 0xFE4) +REG32(PID2, 0xFE8) +REG32(PID3, 0xFEC) +REG32(CID0, 0xFF0) +REG32(CID1, 0xFF4) +REG32(CID2, 0xFF8) +REG32(CID3, 0xFFC) + +/* PID/CID values */ +static const int timer_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x22, 0xb8, 0x1b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static void cmsdk_apb_timer_update(CMSDKAPBTimer *s) +{ + qemu_set_irq(s->timerint, !!(s->intstatus & R_INTSTATUS_IRQ_MASK)); +} + +static uint64_t cmsdk_apb_timer_read(void *opaque, hwaddr offset, unsigned size) +{ + CMSDKAPBTimer *s = CMSDK_APB_TIMER(opaque); + uint64_t r; + + switch (offset) { + case A_CTRL: + r = s->ctrl; + break; + case A_VALUE: + r = ptimer_get_count(s->timer); + break; + case A_RELOAD: + r = ptimer_get_limit(s->timer); + break; + case A_INTSTATUS: + r = s->intstatus; + break; + case A_PID4 ... A_CID3: + r = timer_id[(offset - A_PID4) / 4]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB timer read: bad offset %x\n", (int) offset); + r = 0; + break; + } + trace_cmsdk_apb_timer_read(offset, r, size); + return r; +} + +static void cmsdk_apb_timer_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + CMSDKAPBTimer *s = CMSDK_APB_TIMER(opaque); + + trace_cmsdk_apb_timer_write(offset, value, size); + + switch (offset) { + case A_CTRL: + if (value & 6) { + /* Bits [1] and [2] enable using EXTIN as either clock or + * an enable line. We don't model this. + */ + qemu_log_mask(LOG_UNIMP, + "CMSDK APB timer: EXTIN input not supported\n"); + } + s->ctrl = value & 0xf; + ptimer_transaction_begin(s->timer); + if (s->ctrl & R_CTRL_EN_MASK) { + ptimer_run(s->timer, ptimer_get_limit(s->timer) == 0); + } else { + ptimer_stop(s->timer); + } + ptimer_transaction_commit(s->timer); + break; + case A_RELOAD: + /* Writing to reload also sets the current timer value */ + ptimer_transaction_begin(s->timer); + if (!value) { + ptimer_stop(s->timer); + } + ptimer_set_limit(s->timer, value, 1); + if (value && (s->ctrl & R_CTRL_EN_MASK)) { + /* + * Make sure timer is running (it might have stopped if this + * was an expired one-shot timer) + */ + ptimer_run(s->timer, 0); + } + ptimer_transaction_commit(s->timer); + break; + case A_VALUE: + ptimer_transaction_begin(s->timer); + if (!value && !ptimer_get_limit(s->timer)) { + ptimer_stop(s->timer); + } + ptimer_set_count(s->timer, value); + if (value && (s->ctrl & R_CTRL_EN_MASK)) { + ptimer_run(s->timer, ptimer_get_limit(s->timer) == 0); + } + ptimer_transaction_commit(s->timer); + break; + case A_INTSTATUS: + /* Just one bit, which is W1C. */ + value &= 1; + s->intstatus &= ~value; + cmsdk_apb_timer_update(s); + break; + case A_PID4 ... A_CID3: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB timer write: write to RO offset 0x%x\n", + (int)offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB timer write: bad offset 0x%x\n", (int) offset); + break; + } +} + +static const MemoryRegionOps cmsdk_apb_timer_ops = { + .read = cmsdk_apb_timer_read, + .write = cmsdk_apb_timer_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void cmsdk_apb_timer_tick(void *opaque) +{ + CMSDKAPBTimer *s = CMSDK_APB_TIMER(opaque); + + if (s->ctrl & R_CTRL_IRQEN_MASK) { + s->intstatus |= R_INTSTATUS_IRQ_MASK; + cmsdk_apb_timer_update(s); + } +} + +static void cmsdk_apb_timer_reset(DeviceState *dev) +{ + CMSDKAPBTimer *s = CMSDK_APB_TIMER(dev); + + trace_cmsdk_apb_timer_reset(); + s->ctrl = 0; + s->intstatus = 0; + ptimer_transaction_begin(s->timer); + ptimer_stop(s->timer); + /* Set the limit and the count */ + ptimer_set_limit(s->timer, 0, 1); + ptimer_transaction_commit(s->timer); +} + +static void cmsdk_apb_timer_clk_update(void *opaque, ClockEvent event) +{ + CMSDKAPBTimer *s = CMSDK_APB_TIMER(opaque); + + ptimer_transaction_begin(s->timer); + ptimer_set_period_from_clock(s->timer, s->pclk, 1); + ptimer_transaction_commit(s->timer); +} + +static void cmsdk_apb_timer_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + CMSDKAPBTimer *s = CMSDK_APB_TIMER(obj); + + memory_region_init_io(&s->iomem, obj, &cmsdk_apb_timer_ops, + s, "cmsdk-apb-timer", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->timerint); + s->pclk = qdev_init_clock_in(DEVICE(s), "pclk", + cmsdk_apb_timer_clk_update, s, ClockUpdate); +} + +static void cmsdk_apb_timer_realize(DeviceState *dev, Error **errp) +{ + CMSDKAPBTimer *s = CMSDK_APB_TIMER(dev); + + if (!clock_has_source(s->pclk)) { + error_setg(errp, "CMSDK APB timer: pclk clock must be connected"); + return; + } + + s->timer = ptimer_init(cmsdk_apb_timer_tick, s, + PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | + PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD | + PTIMER_POLICY_NO_COUNTER_ROUND_DOWN); + + ptimer_transaction_begin(s->timer); + ptimer_set_period_from_clock(s->timer, s->pclk, 1); + ptimer_transaction_commit(s->timer); +} + +static const VMStateDescription cmsdk_apb_timer_vmstate = { + .name = "cmsdk-apb-timer", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_PTIMER(timer, CMSDKAPBTimer), + VMSTATE_CLOCK(pclk, CMSDKAPBTimer), + VMSTATE_UINT32(ctrl, CMSDKAPBTimer), + VMSTATE_UINT32(value, CMSDKAPBTimer), + VMSTATE_UINT32(reload, CMSDKAPBTimer), + VMSTATE_UINT32(intstatus, CMSDKAPBTimer), + VMSTATE_END_OF_LIST() + } +}; + +static void cmsdk_apb_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = cmsdk_apb_timer_realize; + dc->vmsd = &cmsdk_apb_timer_vmstate; + dc->reset = cmsdk_apb_timer_reset; +} + +static const TypeInfo cmsdk_apb_timer_info = { + .name = TYPE_CMSDK_APB_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CMSDKAPBTimer), + .instance_init = cmsdk_apb_timer_init, + .class_init = cmsdk_apb_timer_class_init, +}; + +static void cmsdk_apb_timer_register_types(void) +{ + type_register_static(&cmsdk_apb_timer_info); +} + +type_init(cmsdk_apb_timer_register_types); diff --git a/hw/timer/digic-timer.c b/hw/timer/digic-timer.c new file mode 100644 index 000000000..e3aae4a45 --- /dev/null +++ b/hw/timer/digic-timer.c @@ -0,0 +1,186 @@ +/* + * QEMU model of the Canon DIGIC timer block. + * + * Copyright (C) 2013 Antony Pavlov + * + * This model is based on reverse engineering efforts + * made by CHDK (http://chdk.wikia.com) and + * Magic Lantern (http://www.magiclantern.fm) projects + * contributors. + * + * See "Timer/Clock Module" docs here: + * http://magiclantern.wikia.com/wiki/Register_Map + * + * The QEMU model of the OSTimer in PKUnity SoC by Guan Xuetao + * is used as a template. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/ptimer.h" +#include "qemu/module.h" +#include "qemu/log.h" + +#include "hw/timer/digic-timer.h" +#include "migration/vmstate.h" + +static const VMStateDescription vmstate_digic_timer = { + .name = "digic.timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PTIMER(ptimer, DigicTimerState), + VMSTATE_UINT32(control, DigicTimerState), + VMSTATE_UINT32(relvalue, DigicTimerState), + VMSTATE_END_OF_LIST() + } +}; + +static void digic_timer_reset(DeviceState *dev) +{ + DigicTimerState *s = DIGIC_TIMER(dev); + + ptimer_transaction_begin(s->ptimer); + ptimer_stop(s->ptimer); + ptimer_transaction_commit(s->ptimer); + s->control = 0; + s->relvalue = 0; +} + +static uint64_t digic_timer_read(void *opaque, hwaddr offset, unsigned size) +{ + DigicTimerState *s = opaque; + uint64_t ret = 0; + + switch (offset) { + case DIGIC_TIMER_CONTROL: + ret = s->control; + break; + case DIGIC_TIMER_RELVALUE: + ret = s->relvalue; + break; + case DIGIC_TIMER_VALUE: + ret = ptimer_get_count(s->ptimer) & 0xffff; + break; + default: + qemu_log_mask(LOG_UNIMP, + "digic-timer: read access to unknown register 0x" + TARGET_FMT_plx "\n", offset); + } + + return ret; +} + +static void digic_timer_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + DigicTimerState *s = opaque; + + switch (offset) { + case DIGIC_TIMER_CONTROL: + if (value & DIGIC_TIMER_CONTROL_RST) { + digic_timer_reset((DeviceState *)s); + break; + } + + ptimer_transaction_begin(s->ptimer); + if (value & DIGIC_TIMER_CONTROL_EN) { + ptimer_run(s->ptimer, 0); + } + + s->control = (uint32_t)value; + ptimer_transaction_commit(s->ptimer); + break; + + case DIGIC_TIMER_RELVALUE: + s->relvalue = extract32(value, 0, 16); + ptimer_transaction_begin(s->ptimer); + ptimer_set_limit(s->ptimer, s->relvalue, 1); + ptimer_transaction_commit(s->ptimer); + break; + + case DIGIC_TIMER_VALUE: + break; + + default: + qemu_log_mask(LOG_UNIMP, + "digic-timer: read access to unknown register 0x" + TARGET_FMT_plx "\n", offset); + } +} + +static const MemoryRegionOps digic_timer_ops = { + .read = digic_timer_read, + .write = digic_timer_write, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void digic_timer_tick(void *opaque) +{ + /* Nothing to do on timer rollover */ +} + +static void digic_timer_init(Object *obj) +{ + DigicTimerState *s = DIGIC_TIMER(obj); + + s->ptimer = ptimer_init(digic_timer_tick, NULL, PTIMER_POLICY_DEFAULT); + + /* + * FIXME: there is no documentation on Digic timer + * frequency setup so let it always run at 1 MHz + */ + ptimer_transaction_begin(s->ptimer); + ptimer_set_freq(s->ptimer, 1 * 1000 * 1000); + ptimer_transaction_commit(s->ptimer); + + memory_region_init_io(&s->iomem, OBJECT(s), &digic_timer_ops, s, + TYPE_DIGIC_TIMER, 0x100); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem); +} + +static void digic_timer_finalize(Object *obj) +{ + DigicTimerState *s = DIGIC_TIMER(obj); + + ptimer_free(s->ptimer); +} + +static void digic_timer_class_init(ObjectClass *klass, void *class_data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = digic_timer_reset; + dc->vmsd = &vmstate_digic_timer; +} + +static const TypeInfo digic_timer_info = { + .name = TYPE_DIGIC_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(DigicTimerState), + .instance_init = digic_timer_init, + .instance_finalize = digic_timer_finalize, + .class_init = digic_timer_class_init, +}; + +static void digic_timer_register_type(void) +{ + type_register_static(&digic_timer_info); +} + +type_init(digic_timer_register_type) diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c new file mode 100644 index 000000000..4ba662190 --- /dev/null +++ b/hw/timer/etraxfs_timer.c @@ -0,0 +1,376 @@ +/* + * QEMU ETRAX Timers + * + * Copyright (c) 2007 Edgar E. Iglesias, Axis Communications AB. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/irq.h" +#include "hw/ptimer.h" +#include "qom/object.h" + +#define D(x) + +#define RW_TMR0_DIV 0x00 +#define R_TMR0_DATA 0x04 +#define RW_TMR0_CTRL 0x08 +#define RW_TMR1_DIV 0x10 +#define R_TMR1_DATA 0x14 +#define RW_TMR1_CTRL 0x18 +#define R_TIME 0x38 +#define RW_WD_CTRL 0x40 +#define R_WD_STAT 0x44 +#define RW_INTR_MASK 0x48 +#define RW_ACK_INTR 0x4c +#define R_INTR 0x50 +#define R_MASKED_INTR 0x54 + +#define TYPE_ETRAX_FS_TIMER "etraxfs-timer" +typedef struct ETRAXTimerState ETRAXTimerState; +DECLARE_INSTANCE_CHECKER(ETRAXTimerState, ETRAX_TIMER, + TYPE_ETRAX_FS_TIMER) + +struct ETRAXTimerState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + qemu_irq irq; + qemu_irq nmi; + + ptimer_state *ptimer_t0; + ptimer_state *ptimer_t1; + ptimer_state *ptimer_wd; + + int wd_hits; + + /* Control registers. */ + uint32_t rw_tmr0_div; + uint32_t r_tmr0_data; + uint32_t rw_tmr0_ctrl; + + uint32_t rw_tmr1_div; + uint32_t r_tmr1_data; + uint32_t rw_tmr1_ctrl; + + uint32_t rw_wd_ctrl; + + uint32_t rw_intr_mask; + uint32_t rw_ack_intr; + uint32_t r_intr; + uint32_t r_masked_intr; +}; + +static uint64_t +timer_read(void *opaque, hwaddr addr, unsigned int size) +{ + ETRAXTimerState *t = opaque; + uint32_t r = 0; + + switch (addr) { + case R_TMR0_DATA: + r = ptimer_get_count(t->ptimer_t0); + break; + case R_TMR1_DATA: + r = ptimer_get_count(t->ptimer_t1); + break; + case R_TIME: + r = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / 10; + break; + case RW_INTR_MASK: + r = t->rw_intr_mask; + break; + case R_MASKED_INTR: + r = t->r_intr & t->rw_intr_mask; + break; + default: + D(printf ("%s %x\n", __func__, addr)); + break; + } + return r; +} + +static void update_ctrl(ETRAXTimerState *t, int tnum) +{ + unsigned int op; + unsigned int freq; + unsigned int freq_hz; + unsigned int div; + uint32_t ctrl; + + ptimer_state *timer; + + if (tnum == 0) { + ctrl = t->rw_tmr0_ctrl; + div = t->rw_tmr0_div; + timer = t->ptimer_t0; + } else { + ctrl = t->rw_tmr1_ctrl; + div = t->rw_tmr1_div; + timer = t->ptimer_t1; + } + + + op = ctrl & 3; + freq = ctrl >> 2; + freq_hz = 32000000; + + switch (freq) + { + case 0: + case 1: + D(printf ("extern or disabled timer clock?\n")); + break; + case 4: freq_hz = 29493000; break; + case 5: freq_hz = 32000000; break; + case 6: freq_hz = 32768000; break; + case 7: freq_hz = 100000000; break; + default: + abort(); + break; + } + + D(printf ("freq_hz=%d div=%d\n", freq_hz, div)); + ptimer_transaction_begin(timer); + ptimer_set_freq(timer, freq_hz); + ptimer_set_limit(timer, div, 0); + + switch (op) + { + case 0: + /* Load. */ + ptimer_set_limit(timer, div, 1); + break; + case 1: + /* Hold. */ + ptimer_stop(timer); + break; + case 2: + /* Run. */ + ptimer_run(timer, 0); + break; + default: + abort(); + break; + } + ptimer_transaction_commit(timer); +} + +static void timer_update_irq(ETRAXTimerState *t) +{ + t->r_intr &= ~(t->rw_ack_intr); + t->r_masked_intr = t->r_intr & t->rw_intr_mask; + + D(printf("%s: masked_intr=%x\n", __func__, t->r_masked_intr)); + qemu_set_irq(t->irq, !!t->r_masked_intr); +} + +static void timer0_hit(void *opaque) +{ + ETRAXTimerState *t = opaque; + t->r_intr |= 1; + timer_update_irq(t); +} + +static void timer1_hit(void *opaque) +{ + ETRAXTimerState *t = opaque; + t->r_intr |= 2; + timer_update_irq(t); +} + +static void watchdog_hit(void *opaque) +{ + ETRAXTimerState *t = opaque; + if (t->wd_hits == 0) { + /* real hw gives a single tick before reseting but we are + a bit friendlier to compensate for our slower execution. */ + ptimer_set_count(t->ptimer_wd, 10); + ptimer_run(t->ptimer_wd, 1); + qemu_irq_raise(t->nmi); + } + else + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + + t->wd_hits++; +} + +static inline void timer_watchdog_update(ETRAXTimerState *t, uint32_t value) +{ + unsigned int wd_en = t->rw_wd_ctrl & (1 << 8); + unsigned int wd_key = t->rw_wd_ctrl >> 9; + unsigned int wd_cnt = t->rw_wd_ctrl & 511; + unsigned int new_key = value >> 9 & ((1 << 7) - 1); + unsigned int new_cmd = (value >> 8) & 1; + + /* If the watchdog is enabled, they written key must match the + complement of the previous. */ + wd_key = ~wd_key & ((1 << 7) - 1); + + if (wd_en && wd_key != new_key) + return; + + D(printf("en=%d new_key=%x oldkey=%x cmd=%d cnt=%d\n", + wd_en, new_key, wd_key, new_cmd, wd_cnt)); + + if (t->wd_hits) + qemu_irq_lower(t->nmi); + + t->wd_hits = 0; + + ptimer_transaction_begin(t->ptimer_wd); + ptimer_set_freq(t->ptimer_wd, 760); + if (wd_cnt == 0) + wd_cnt = 256; + ptimer_set_count(t->ptimer_wd, wd_cnt); + if (new_cmd) + ptimer_run(t->ptimer_wd, 1); + else + ptimer_stop(t->ptimer_wd); + + t->rw_wd_ctrl = value; + ptimer_transaction_commit(t->ptimer_wd); +} + +static void +timer_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + ETRAXTimerState *t = opaque; + uint32_t value = val64; + + switch (addr) + { + case RW_TMR0_DIV: + t->rw_tmr0_div = value; + break; + case RW_TMR0_CTRL: + D(printf ("RW_TMR0_CTRL=%x\n", value)); + t->rw_tmr0_ctrl = value; + update_ctrl(t, 0); + break; + case RW_TMR1_DIV: + t->rw_tmr1_div = value; + break; + case RW_TMR1_CTRL: + D(printf ("RW_TMR1_CTRL=%x\n", value)); + t->rw_tmr1_ctrl = value; + update_ctrl(t, 1); + break; + case RW_INTR_MASK: + D(printf ("RW_INTR_MASK=%x\n", value)); + t->rw_intr_mask = value; + timer_update_irq(t); + break; + case RW_WD_CTRL: + timer_watchdog_update(t, value); + break; + case RW_ACK_INTR: + t->rw_ack_intr = value; + timer_update_irq(t); + t->rw_ack_intr = 0; + break; + default: + printf ("%s " TARGET_FMT_plx " %x\n", + __func__, addr, value); + break; + } +} + +static const MemoryRegionOps timer_ops = { + .read = timer_read, + .write = timer_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void etraxfs_timer_reset_enter(Object *obj, ResetType type) +{ + ETRAXTimerState *t = ETRAX_TIMER(obj); + + ptimer_transaction_begin(t->ptimer_t0); + ptimer_stop(t->ptimer_t0); + ptimer_transaction_commit(t->ptimer_t0); + ptimer_transaction_begin(t->ptimer_t1); + ptimer_stop(t->ptimer_t1); + ptimer_transaction_commit(t->ptimer_t1); + ptimer_transaction_begin(t->ptimer_wd); + ptimer_stop(t->ptimer_wd); + ptimer_transaction_commit(t->ptimer_wd); + t->rw_wd_ctrl = 0; + t->r_intr = 0; + t->rw_intr_mask = 0; +} + +static void etraxfs_timer_reset_hold(Object *obj) +{ + ETRAXTimerState *t = ETRAX_TIMER(obj); + + qemu_irq_lower(t->irq); +} + +static void etraxfs_timer_realize(DeviceState *dev, Error **errp) +{ + ETRAXTimerState *t = ETRAX_TIMER(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + t->ptimer_t0 = ptimer_init(timer0_hit, t, PTIMER_POLICY_DEFAULT); + t->ptimer_t1 = ptimer_init(timer1_hit, t, PTIMER_POLICY_DEFAULT); + t->ptimer_wd = ptimer_init(watchdog_hit, t, PTIMER_POLICY_DEFAULT); + + sysbus_init_irq(sbd, &t->irq); + sysbus_init_irq(sbd, &t->nmi); + + memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t, + "etraxfs-timer", 0x5c); + sysbus_init_mmio(sbd, &t->mmio); +} + +static void etraxfs_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->realize = etraxfs_timer_realize; + rc->phases.enter = etraxfs_timer_reset_enter; + rc->phases.hold = etraxfs_timer_reset_hold; +} + +static const TypeInfo etraxfs_timer_info = { + .name = TYPE_ETRAX_FS_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(ETRAXTimerState), + .class_init = etraxfs_timer_class_init, +}; + +static void etraxfs_timer_register_types(void) +{ + type_register_static(&etraxfs_timer_info); +} + +type_init(etraxfs_timer_register_types) diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c new file mode 100644 index 000000000..d0e534399 --- /dev/null +++ b/hw/timer/exynos4210_mct.c @@ -0,0 +1,1568 @@ +/* + * Samsung exynos4210 Multi Core timer + * + * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. + * All rights reserved. + * + * Evgeny Voevodin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +/* + * Global Timer: + * + * Consists of two timers. First represents Free Running Counter and second + * is used to measure interval from FRC to nearest comparator. + * + * 0 UINT64_MAX + * | timer0 | + * | <-------------------------------------------------------------- | + * | --------------------------------------------frc---------------> | + * |______________________________________________|__________________| + * CMP0 CMP1 CMP2 | CMP3 + * __| |_ + * | timer1 | + * | -------------> | + * frc CMPx + * + * Problem: when implementing global timer as is, overflow arises. + * next_time = cur_time + period * count; + * period and count are 64 bits width. + * Lets arm timer for MCT_GT_COUNTER_STEP count and update internal G_CNT + * register during each event. + * + * Problem: both timers need to be implemented using MCT_XT_COUNTER_STEP because + * local timer contains two counters: TCNT and ICNT. TCNT == 0 -> ICNT--. + * IRQ is generated when ICNT riches zero. Implementation where TCNT == 0 + * generates IRQs suffers from too frequently events. Better to have one + * uint64_t counter equal to TCNT*ICNT and arm ptimer.c for a minimum(TCNT*ICNT, + * MCT_GT_COUNTER_STEP); (yes, if target tunes ICNT * TCNT to be too low values, + * there is no way to avoid frequently events). + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/timer.h" +#include "qemu/module.h" +#include "hw/ptimer.h" + +#include "hw/arm/exynos4210.h" +#include "hw/irq.h" +#include "qom/object.h" + +//#define DEBUG_MCT + +#ifdef DEBUG_MCT +#define DPRINTF(fmt, ...) \ + do { fprintf(stdout, "MCT: [%24s:%5d] " fmt, __func__, __LINE__, \ + ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#define MCT_CFG 0x000 +#define G_CNT_L 0x100 +#define G_CNT_U 0x104 +#define G_CNT_WSTAT 0x110 +#define G_COMP0_L 0x200 +#define G_COMP0_U 0x204 +#define G_COMP0_ADD_INCR 0x208 +#define G_COMP1_L 0x210 +#define G_COMP1_U 0x214 +#define G_COMP1_ADD_INCR 0x218 +#define G_COMP2_L 0x220 +#define G_COMP2_U 0x224 +#define G_COMP2_ADD_INCR 0x228 +#define G_COMP3_L 0x230 +#define G_COMP3_U 0x234 +#define G_COMP3_ADD_INCR 0x238 +#define G_TCON 0x240 +#define G_INT_CSTAT 0x244 +#define G_INT_ENB 0x248 +#define G_WSTAT 0x24C +#define L0_TCNTB 0x300 +#define L0_TCNTO 0x304 +#define L0_ICNTB 0x308 +#define L0_ICNTO 0x30C +#define L0_FRCNTB 0x310 +#define L0_FRCNTO 0x314 +#define L0_TCON 0x320 +#define L0_INT_CSTAT 0x330 +#define L0_INT_ENB 0x334 +#define L0_WSTAT 0x340 +#define L1_TCNTB 0x400 +#define L1_TCNTO 0x404 +#define L1_ICNTB 0x408 +#define L1_ICNTO 0x40C +#define L1_FRCNTB 0x410 +#define L1_FRCNTO 0x414 +#define L1_TCON 0x420 +#define L1_INT_CSTAT 0x430 +#define L1_INT_ENB 0x434 +#define L1_WSTAT 0x440 + +#define MCT_CFG_GET_PRESCALER(x) ((x) & 0xFF) +#define MCT_CFG_GET_DIVIDER(x) (1 << ((x) >> 8 & 7)) + +#define GET_G_COMP_IDX(offset) (((offset) - G_COMP0_L) / 0x10) +#define GET_G_COMP_ADD_INCR_IDX(offset) (((offset) - G_COMP0_ADD_INCR) / 0x10) + +#define G_COMP_L(x) (G_COMP0_L + (x) * 0x10) +#define G_COMP_U(x) (G_COMP0_U + (x) * 0x10) + +#define G_COMP_ADD_INCR(x) (G_COMP0_ADD_INCR + (x) * 0x10) + +/* MCT bits */ +#define G_TCON_COMP_ENABLE(x) (1 << 2 * (x)) +#define G_TCON_AUTO_ICREMENT(x) (1 << (2 * (x) + 1)) +#define G_TCON_TIMER_ENABLE (1 << 8) + +#define G_INT_ENABLE(x) (1 << (x)) +#define G_INT_CSTAT_COMP(x) (1 << (x)) + +#define G_CNT_WSTAT_L 1 +#define G_CNT_WSTAT_U 2 + +#define G_WSTAT_COMP_L(x) (1 << 4 * (x)) +#define G_WSTAT_COMP_U(x) (1 << ((4 * (x)) + 1)) +#define G_WSTAT_COMP_ADDINCR(x) (1 << ((4 * (x)) + 2)) +#define G_WSTAT_TCON_WRITE (1 << 16) + +#define GET_L_TIMER_IDX(offset) ((((offset) & 0xF00) - L0_TCNTB) / 0x100) +#define GET_L_TIMER_CNT_REG_IDX(offset, lt_i) \ + (((offset) - (L0_TCNTB + 0x100 * (lt_i))) >> 2) + +#define L_ICNTB_MANUAL_UPDATE (1 << 31) + +#define L_TCON_TICK_START (1) +#define L_TCON_INT_START (1 << 1) +#define L_TCON_INTERVAL_MODE (1 << 2) +#define L_TCON_FRC_START (1 << 3) + +#define L_INT_CSTAT_INTCNT (1 << 0) +#define L_INT_CSTAT_FRCCNT (1 << 1) + +#define L_INT_INTENB_ICNTEIE (1 << 0) +#define L_INT_INTENB_FRCEIE (1 << 1) + +#define L_WSTAT_TCNTB_WRITE (1 << 0) +#define L_WSTAT_ICNTB_WRITE (1 << 1) +#define L_WSTAT_FRCCNTB_WRITE (1 << 2) +#define L_WSTAT_TCON_WRITE (1 << 3) + +enum LocalTimerRegCntIndexes { + L_REG_CNT_TCNTB, + L_REG_CNT_TCNTO, + L_REG_CNT_ICNTB, + L_REG_CNT_ICNTO, + L_REG_CNT_FRCCNTB, + L_REG_CNT_FRCCNTO, + + L_REG_CNT_AMOUNT +}; + +#define MCT_SFR_SIZE 0x444 + +#define MCT_GT_CMP_NUM 4 + +#define MCT_GT_COUNTER_STEP 0x100000000ULL +#define MCT_LT_COUNTER_STEP 0x100000000ULL +#define MCT_LT_CNT_LOW_LIMIT 0x100 + +/* global timer */ +typedef struct { + qemu_irq irq[MCT_GT_CMP_NUM]; + + struct gregs { + uint64_t cnt; + uint32_t cnt_wstat; + uint32_t tcon; + uint32_t int_cstat; + uint32_t int_enb; + uint32_t wstat; + uint64_t comp[MCT_GT_CMP_NUM]; + uint32_t comp_add_incr[MCT_GT_CMP_NUM]; + } reg; + + uint64_t count; /* Value FRC was armed with */ + int32_t curr_comp; /* Current comparator FRC is running to */ + + ptimer_state *ptimer_frc; /* FRC timer */ + +} Exynos4210MCTGT; + +/* local timer */ +typedef struct { + int id; /* timer id */ + qemu_irq irq; /* local timer irq */ + + struct tick_timer { + uint32_t cnt_run; /* cnt timer is running */ + uint32_t int_run; /* int timer is running */ + + uint32_t last_icnto; + uint32_t last_tcnto; + uint32_t tcntb; /* initial value for TCNTB */ + uint32_t icntb; /* initial value for ICNTB */ + + /* for step mode */ + uint64_t distance; /* distance to count to the next event */ + uint64_t progress; /* progress when counting by steps */ + uint64_t count; /* count to arm timer with */ + + ptimer_state *ptimer_tick; /* timer for tick counter */ + } tick_timer; + + /* use ptimer.c to represent count down timer */ + + ptimer_state *ptimer_frc; /* timer for free running counter */ + + /* registers */ + struct lregs { + uint32_t cnt[L_REG_CNT_AMOUNT]; + uint32_t tcon; + uint32_t int_cstat; + uint32_t int_enb; + uint32_t wstat; + } reg; + +} Exynos4210MCTLT; + +#define TYPE_EXYNOS4210_MCT "exynos4210.mct" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210MCTState, EXYNOS4210_MCT) + +struct Exynos4210MCTState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + /* Registers */ + uint32_t reg_mct_cfg; + + Exynos4210MCTLT l_timer[2]; + Exynos4210MCTGT g_timer; + + uint32_t freq; /* all timers tick frequency, TCLK */ +}; + +/*** VMState ***/ +static const VMStateDescription vmstate_tick_timer = { + .name = "exynos4210.mct.tick_timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cnt_run, struct tick_timer), + VMSTATE_UINT32(int_run, struct tick_timer), + VMSTATE_UINT32(last_icnto, struct tick_timer), + VMSTATE_UINT32(last_tcnto, struct tick_timer), + VMSTATE_UINT32(tcntb, struct tick_timer), + VMSTATE_UINT32(icntb, struct tick_timer), + VMSTATE_UINT64(distance, struct tick_timer), + VMSTATE_UINT64(progress, struct tick_timer), + VMSTATE_UINT64(count, struct tick_timer), + VMSTATE_PTIMER(ptimer_tick, struct tick_timer), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_lregs = { + .name = "exynos4210.mct.lregs", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(cnt, struct lregs, L_REG_CNT_AMOUNT), + VMSTATE_UINT32(tcon, struct lregs), + VMSTATE_UINT32(int_cstat, struct lregs), + VMSTATE_UINT32(int_enb, struct lregs), + VMSTATE_UINT32(wstat, struct lregs), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_exynos4210_mct_lt = { + .name = "exynos4210.mct.lt", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT32(id, Exynos4210MCTLT), + VMSTATE_STRUCT(tick_timer, Exynos4210MCTLT, 0, + vmstate_tick_timer, + struct tick_timer), + VMSTATE_PTIMER(ptimer_frc, Exynos4210MCTLT), + VMSTATE_STRUCT(reg, Exynos4210MCTLT, 0, + vmstate_lregs, + struct lregs), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_gregs = { + .name = "exynos4210.mct.lregs", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(cnt, struct gregs), + VMSTATE_UINT32(cnt_wstat, struct gregs), + VMSTATE_UINT32(tcon, struct gregs), + VMSTATE_UINT32(int_cstat, struct gregs), + VMSTATE_UINT32(int_enb, struct gregs), + VMSTATE_UINT32(wstat, struct gregs), + VMSTATE_UINT64_ARRAY(comp, struct gregs, MCT_GT_CMP_NUM), + VMSTATE_UINT32_ARRAY(comp_add_incr, struct gregs, + MCT_GT_CMP_NUM), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_exynos4210_mct_gt = { + .name = "exynos4210.mct.lt", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(reg, Exynos4210MCTGT, 0, vmstate_gregs, + struct gregs), + VMSTATE_UINT64(count, Exynos4210MCTGT), + VMSTATE_INT32(curr_comp, Exynos4210MCTGT), + VMSTATE_PTIMER(ptimer_frc, Exynos4210MCTGT), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_exynos4210_mct_state = { + .name = "exynos4210.mct", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(reg_mct_cfg, Exynos4210MCTState), + VMSTATE_STRUCT_ARRAY(l_timer, Exynos4210MCTState, 2, 0, + vmstate_exynos4210_mct_lt, Exynos4210MCTLT), + VMSTATE_STRUCT(g_timer, Exynos4210MCTState, 0, + vmstate_exynos4210_mct_gt, Exynos4210MCTGT), + VMSTATE_UINT32(freq, Exynos4210MCTState), + VMSTATE_END_OF_LIST() + } +}; + +static void exynos4210_mct_update_freq(Exynos4210MCTState *s); + +/* + * Set counter of FRC global timer. + * Must be called within exynos4210_gfrc_tx_begin/commit block. + */ +static void exynos4210_gfrc_set_count(Exynos4210MCTGT *s, uint64_t count) +{ + s->count = count; + DPRINTF("global timer frc set count 0x%llx\n", count); + ptimer_set_count(s->ptimer_frc, count); +} + +/* + * Get counter of FRC global timer. + */ +static uint64_t exynos4210_gfrc_get_count(Exynos4210MCTGT *s) +{ + uint64_t count = 0; + count = ptimer_get_count(s->ptimer_frc); + count = s->count - count; + return s->reg.cnt + count; +} + +/* + * Stop global FRC timer + * Must be called within exynos4210_gfrc_tx_begin/commit block. + */ +static void exynos4210_gfrc_stop(Exynos4210MCTGT *s) +{ + DPRINTF("global timer frc stop\n"); + + ptimer_stop(s->ptimer_frc); +} + +/* + * Start global FRC timer + * Must be called within exynos4210_gfrc_tx_begin/commit block. + */ +static void exynos4210_gfrc_start(Exynos4210MCTGT *s) +{ + DPRINTF("global timer frc start\n"); + + ptimer_run(s->ptimer_frc, 1); +} + +/* + * Start ptimer transaction for global FRC timer; this is just for + * consistency with the way we wrap operations like stop and run. + */ +static void exynos4210_gfrc_tx_begin(Exynos4210MCTGT *s) +{ + ptimer_transaction_begin(s->ptimer_frc); +} + +/* Commit ptimer transaction for global FRC timer. */ +static void exynos4210_gfrc_tx_commit(Exynos4210MCTGT *s) +{ + ptimer_transaction_commit(s->ptimer_frc); +} + +/* + * Find next nearest Comparator. If current Comparator value equals to other + * Comparator value, skip them both + */ +static int32_t exynos4210_gcomp_find(Exynos4210MCTState *s) +{ + int res; + int i; + int enabled; + uint64_t min; + int min_comp_i; + uint64_t gfrc; + uint64_t distance; + uint64_t distance_min; + int comp_i; + + /* get gfrc count */ + gfrc = exynos4210_gfrc_get_count(&s->g_timer); + + min = UINT64_MAX; + distance_min = UINT64_MAX; + comp_i = MCT_GT_CMP_NUM; + min_comp_i = MCT_GT_CMP_NUM; + enabled = 0; + + /* lookup for nearest comparator */ + for (i = 0; i < MCT_GT_CMP_NUM; i++) { + + if (s->g_timer.reg.tcon & G_TCON_COMP_ENABLE(i)) { + + enabled = 1; + + if (s->g_timer.reg.comp[i] > gfrc) { + /* Comparator is upper then FRC */ + distance = s->g_timer.reg.comp[i] - gfrc; + + if (distance <= distance_min) { + distance_min = distance; + comp_i = i; + } + } else { + /* Comparator is below FRC, find the smallest */ + + if (s->g_timer.reg.comp[i] <= min) { + min = s->g_timer.reg.comp[i]; + min_comp_i = i; + } + } + } + } + + if (!enabled) { + /* All Comparators disabled */ + res = -1; + } else if (comp_i < MCT_GT_CMP_NUM) { + /* Found upper Comparator */ + res = comp_i; + } else { + /* All Comparators are below or equal to FRC */ + res = min_comp_i; + } + + DPRINTF("found comparator %d: comp 0x%llx distance 0x%llx, gfrc 0x%llx\n", + res, + s->g_timer.reg.comp[res], + distance_min, + gfrc); + + return res; +} + +/* + * Get distance to nearest Comparator + */ +static uint64_t exynos4210_gcomp_get_distance(Exynos4210MCTState *s, int32_t id) +{ + if (id == -1) { + /* no enabled Comparators, choose max distance */ + return MCT_GT_COUNTER_STEP; + } + if (s->g_timer.reg.comp[id] - s->g_timer.reg.cnt < MCT_GT_COUNTER_STEP) { + return s->g_timer.reg.comp[id] - s->g_timer.reg.cnt; + } else { + return MCT_GT_COUNTER_STEP; + } +} + +/* + * Restart global FRC timer + * Must be called within exynos4210_gfrc_tx_begin/commit block. + */ +static void exynos4210_gfrc_restart(Exynos4210MCTState *s) +{ + uint64_t distance; + + exynos4210_gfrc_stop(&s->g_timer); + + s->g_timer.curr_comp = exynos4210_gcomp_find(s); + + distance = exynos4210_gcomp_get_distance(s, s->g_timer.curr_comp); + + if (distance > MCT_GT_COUNTER_STEP || !distance) { + distance = MCT_GT_COUNTER_STEP; + } + + exynos4210_gfrc_set_count(&s->g_timer, distance); + exynos4210_gfrc_start(&s->g_timer); +} + +/* + * Raise global timer CMP IRQ + */ +static void exynos4210_gcomp_raise_irq(void *opaque, uint32_t id) +{ + Exynos4210MCTGT *s = opaque; + + /* If CSTAT is pending and IRQ is enabled */ + if ((s->reg.int_cstat & G_INT_CSTAT_COMP(id)) && + (s->reg.int_enb & G_INT_ENABLE(id))) { + DPRINTF("gcmp timer[%u] IRQ\n", id); + qemu_irq_raise(s->irq[id]); + } +} + +/* + * Lower global timer CMP IRQ + */ +static void exynos4210_gcomp_lower_irq(void *opaque, uint32_t id) +{ + Exynos4210MCTGT *s = opaque; + qemu_irq_lower(s->irq[id]); +} + +/* + * Global timer FRC event handler. + * Each event occurs when internal counter reaches counter + MCT_GT_COUNTER_STEP + * Every time we arm global FRC timer to count for MCT_GT_COUNTER_STEP value + */ +static void exynos4210_gfrc_event(void *opaque) +{ + Exynos4210MCTState *s = (Exynos4210MCTState *)opaque; + int i; + uint64_t distance; + + DPRINTF("\n"); + + s->g_timer.reg.cnt += s->g_timer.count; + + /* Process all comparators */ + for (i = 0; i < MCT_GT_CMP_NUM; i++) { + + if (s->g_timer.reg.cnt == s->g_timer.reg.comp[i]) { + /* reached nearest comparator */ + + s->g_timer.reg.int_cstat |= G_INT_CSTAT_COMP(i); + + /* Auto increment */ + if (s->g_timer.reg.tcon & G_TCON_AUTO_ICREMENT(i)) { + s->g_timer.reg.comp[i] += s->g_timer.reg.comp_add_incr[i]; + } + + /* IRQ */ + exynos4210_gcomp_raise_irq(&s->g_timer, i); + } + } + + /* Reload FRC to reach nearest comparator */ + s->g_timer.curr_comp = exynos4210_gcomp_find(s); + distance = exynos4210_gcomp_get_distance(s, s->g_timer.curr_comp); + if (distance > MCT_GT_COUNTER_STEP || !distance) { + distance = MCT_GT_COUNTER_STEP; + } + exynos4210_gfrc_set_count(&s->g_timer, distance); + + exynos4210_gfrc_start(&s->g_timer); +} + +/* + * Get counter of FRC local timer. + */ +static uint64_t exynos4210_lfrc_get_count(Exynos4210MCTLT *s) +{ + return ptimer_get_count(s->ptimer_frc); +} + +/* + * Set counter of FRC local timer. + * Must be called from within exynos4210_lfrc_tx_begin/commit block. + */ +static void exynos4210_lfrc_update_count(Exynos4210MCTLT *s) +{ + if (!s->reg.cnt[L_REG_CNT_FRCCNTB]) { + ptimer_set_count(s->ptimer_frc, MCT_LT_COUNTER_STEP); + } else { + ptimer_set_count(s->ptimer_frc, s->reg.cnt[L_REG_CNT_FRCCNTB]); + } +} + +/* + * Start local FRC timer + * Must be called from within exynos4210_lfrc_tx_begin/commit block. + */ +static void exynos4210_lfrc_start(Exynos4210MCTLT *s) +{ + ptimer_run(s->ptimer_frc, 1); +} + +/* + * Stop local FRC timer + * Must be called from within exynos4210_lfrc_tx_begin/commit block. + */ +static void exynos4210_lfrc_stop(Exynos4210MCTLT *s) +{ + ptimer_stop(s->ptimer_frc); +} + +/* Start ptimer transaction for local FRC timer */ +static void exynos4210_lfrc_tx_begin(Exynos4210MCTLT *s) +{ + ptimer_transaction_begin(s->ptimer_frc); +} + +/* Commit ptimer transaction for local FRC timer */ +static void exynos4210_lfrc_tx_commit(Exynos4210MCTLT *s) +{ + ptimer_transaction_commit(s->ptimer_frc); +} + +/* + * Local timer free running counter tick handler + */ +static void exynos4210_lfrc_event(void *opaque) +{ + Exynos4210MCTLT * s = (Exynos4210MCTLT *)opaque; + + /* local frc expired */ + + DPRINTF("\n"); + + s->reg.int_cstat |= L_INT_CSTAT_FRCCNT; + + /* update frc counter */ + exynos4210_lfrc_update_count(s); + + /* raise irq */ + if (s->reg.int_enb & L_INT_INTENB_FRCEIE) { + qemu_irq_raise(s->irq); + } + + /* we reached here, this means that timer is enabled */ + exynos4210_lfrc_start(s); +} + +static uint32_t exynos4210_ltick_int_get_cnto(struct tick_timer *s); +static uint32_t exynos4210_ltick_cnt_get_cnto(struct tick_timer *s); +static void exynos4210_ltick_recalc_count(struct tick_timer *s); + +/* + * Action on enabling local tick int timer + */ +static void exynos4210_ltick_int_start(struct tick_timer *s) +{ + if (!s->int_run) { + s->int_run = 1; + } +} + +/* + * Action on disabling local tick int timer + */ +static void exynos4210_ltick_int_stop(struct tick_timer *s) +{ + if (s->int_run) { + s->last_icnto = exynos4210_ltick_int_get_cnto(s); + s->int_run = 0; + } +} + +/* + * Get count for INT timer + */ +static uint32_t exynos4210_ltick_int_get_cnto(struct tick_timer *s) +{ + uint32_t icnto; + uint64_t remain; + uint64_t count; + uint64_t counted; + uint64_t cur_progress; + + count = ptimer_get_count(s->ptimer_tick); + if (count) { + /* timer is still counting, called not from event */ + counted = s->count - ptimer_get_count(s->ptimer_tick); + cur_progress = s->progress + counted; + } else { + /* timer expired earlier */ + cur_progress = s->progress; + } + + remain = s->distance - cur_progress; + + if (!s->int_run) { + /* INT is stopped. */ + icnto = s->last_icnto; + } else { + /* Both are counting */ + icnto = remain / s->tcntb; + } + + return icnto; +} + +/* + * Start local tick cnt timer. + * Must be called within exynos4210_ltick_tx_begin/commit block. + */ +static void exynos4210_ltick_cnt_start(struct tick_timer *s) +{ + if (!s->cnt_run) { + + exynos4210_ltick_recalc_count(s); + ptimer_set_count(s->ptimer_tick, s->count); + ptimer_run(s->ptimer_tick, 1); + + s->cnt_run = 1; + } +} + +/* + * Stop local tick cnt timer. + * Must be called within exynos4210_ltick_tx_begin/commit block. + */ +static void exynos4210_ltick_cnt_stop(struct tick_timer *s) +{ + if (s->cnt_run) { + + s->last_tcnto = exynos4210_ltick_cnt_get_cnto(s); + + if (s->int_run) { + exynos4210_ltick_int_stop(s); + } + + ptimer_stop(s->ptimer_tick); + + s->cnt_run = 0; + } +} + +/* Start ptimer transaction for local tick timer */ +static void exynos4210_ltick_tx_begin(struct tick_timer *s) +{ + ptimer_transaction_begin(s->ptimer_tick); +} + +/* Commit ptimer transaction for local tick timer */ +static void exynos4210_ltick_tx_commit(struct tick_timer *s) +{ + ptimer_transaction_commit(s->ptimer_tick); +} + +/* + * Get counter for CNT timer + */ +static uint32_t exynos4210_ltick_cnt_get_cnto(struct tick_timer *s) +{ + uint32_t tcnto; + uint32_t icnto; + uint64_t remain; + uint64_t counted; + uint64_t count; + uint64_t cur_progress; + + count = ptimer_get_count(s->ptimer_tick); + if (count) { + /* timer is still counting, called not from event */ + counted = s->count - ptimer_get_count(s->ptimer_tick); + cur_progress = s->progress + counted; + } else { + /* timer expired earlier */ + cur_progress = s->progress; + } + + remain = s->distance - cur_progress; + + if (!s->cnt_run) { + /* Both are stopped. */ + tcnto = s->last_tcnto; + } else if (!s->int_run) { + /* INT counter is stopped, progress is by CNT timer */ + tcnto = remain % s->tcntb; + } else { + /* Both are counting */ + icnto = remain / s->tcntb; + if (icnto) { + tcnto = remain % (icnto * s->tcntb); + } else { + tcnto = remain % s->tcntb; + } + } + + return tcnto; +} + +/* + * Set new values of counters for CNT and INT timers + * Must be called within exynos4210_ltick_tx_begin/commit block. + */ +static void exynos4210_ltick_set_cntb(struct tick_timer *s, uint32_t new_cnt, + uint32_t new_int) +{ + uint32_t cnt_stopped = 0; + uint32_t int_stopped = 0; + + if (s->cnt_run) { + exynos4210_ltick_cnt_stop(s); + cnt_stopped = 1; + } + + if (s->int_run) { + exynos4210_ltick_int_stop(s); + int_stopped = 1; + } + + s->tcntb = new_cnt + 1; + s->icntb = new_int + 1; + + if (cnt_stopped) { + exynos4210_ltick_cnt_start(s); + } + if (int_stopped) { + exynos4210_ltick_int_start(s); + } + +} + +/* + * Calculate new counter value for tick timer + */ +static void exynos4210_ltick_recalc_count(struct tick_timer *s) +{ + uint64_t to_count; + + if ((s->cnt_run && s->last_tcnto) || (s->int_run && s->last_icnto)) { + /* + * one or both timers run and not counted to the end; + * distance is not passed, recalculate with last_tcnto * last_icnto + */ + + if (s->last_tcnto) { + to_count = (uint64_t)s->last_tcnto * s->last_icnto; + } else { + to_count = s->last_icnto; + } + } else { + /* distance is passed, recalculate with tcnto * icnto */ + if (s->icntb) { + s->distance = (uint64_t)s->tcntb * s->icntb; + } else { + s->distance = s->tcntb; + } + + to_count = s->distance; + s->progress = 0; + } + + if (to_count > MCT_LT_COUNTER_STEP) { + /* count by step */ + s->count = MCT_LT_COUNTER_STEP; + } else { + s->count = to_count; + } +} + +/* + * Initialize tick_timer + */ +static void exynos4210_ltick_timer_init(struct tick_timer *s) +{ + exynos4210_ltick_int_stop(s); + exynos4210_ltick_tx_begin(s); + exynos4210_ltick_cnt_stop(s); + exynos4210_ltick_tx_commit(s); + + s->count = 0; + s->distance = 0; + s->progress = 0; + s->icntb = 0; + s->tcntb = 0; +} + +/* + * tick_timer event. + * Raises when abstract tick_timer expires. + */ +static void exynos4210_ltick_timer_event(struct tick_timer *s) +{ + s->progress += s->count; +} + +/* + * Local timer tick counter handler. + * Don't use reloaded timers. If timer counter = zero + * then handler called but after handler finished no + * timer reload occurs. + */ +static void exynos4210_ltick_event(void *opaque) +{ + Exynos4210MCTLT * s = (Exynos4210MCTLT *)opaque; + uint32_t tcnto; + uint32_t icnto; +#ifdef DEBUG_MCT + static uint64_t time1[2] = {0}; + static uint64_t time2[2] = {0}; +#endif + + /* Call tick_timer event handler, it will update its tcntb and icntb. */ + exynos4210_ltick_timer_event(&s->tick_timer); + + /* get tick_timer cnt */ + tcnto = exynos4210_ltick_cnt_get_cnto(&s->tick_timer); + + /* get tick_timer int */ + icnto = exynos4210_ltick_int_get_cnto(&s->tick_timer); + + /* raise IRQ if needed */ + if (!icnto && s->reg.tcon & L_TCON_INT_START) { + /* INT counter enabled and expired */ + + s->reg.int_cstat |= L_INT_CSTAT_INTCNT; + + /* raise interrupt if enabled */ + if (s->reg.int_enb & L_INT_INTENB_ICNTEIE) { +#ifdef DEBUG_MCT + time2[s->id] = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + DPRINTF("local timer[%d] IRQ: %llx\n", s->id, + time2[s->id] - time1[s->id]); + time1[s->id] = time2[s->id]; +#endif + qemu_irq_raise(s->irq); + } + + /* reload ICNTB */ + if (s->reg.tcon & L_TCON_INTERVAL_MODE) { + exynos4210_ltick_set_cntb(&s->tick_timer, + s->reg.cnt[L_REG_CNT_TCNTB], + s->reg.cnt[L_REG_CNT_ICNTB]); + } + } else { + /* reload TCNTB */ + if (!tcnto) { + exynos4210_ltick_set_cntb(&s->tick_timer, + s->reg.cnt[L_REG_CNT_TCNTB], + icnto); + } + } + + /* start tick_timer cnt */ + exynos4210_ltick_cnt_start(&s->tick_timer); + + /* start tick_timer int */ + exynos4210_ltick_int_start(&s->tick_timer); +} + +static void tx_ptimer_set_freq(ptimer_state *s, uint32_t freq) +{ + /* + * callers of exynos4210_mct_update_freq() never do anything + * else that needs to be in the same ptimer transaction, so + * to avoid a lot of repetition we have a convenience function + * for begin/set_freq/commit. + */ + ptimer_transaction_begin(s); + ptimer_set_freq(s, freq); + ptimer_transaction_commit(s); +} + +/* update timer frequency */ +static void exynos4210_mct_update_freq(Exynos4210MCTState *s) +{ + uint32_t freq = s->freq; + s->freq = 24000000 / + ((MCT_CFG_GET_PRESCALER(s->reg_mct_cfg) + 1) * + MCT_CFG_GET_DIVIDER(s->reg_mct_cfg)); + + if (freq != s->freq) { + DPRINTF("freq=%uHz\n", s->freq); + + /* global timer */ + tx_ptimer_set_freq(s->g_timer.ptimer_frc, s->freq); + + /* local timer */ + tx_ptimer_set_freq(s->l_timer[0].tick_timer.ptimer_tick, s->freq); + tx_ptimer_set_freq(s->l_timer[0].ptimer_frc, s->freq); + tx_ptimer_set_freq(s->l_timer[1].tick_timer.ptimer_tick, s->freq); + tx_ptimer_set_freq(s->l_timer[1].ptimer_frc, s->freq); + } +} + +/* set defaul_timer values for all fields */ +static void exynos4210_mct_reset(DeviceState *d) +{ + Exynos4210MCTState *s = EXYNOS4210_MCT(d); + uint32_t i; + + s->reg_mct_cfg = 0; + + /* global timer */ + memset(&s->g_timer.reg, 0, sizeof(s->g_timer.reg)); + exynos4210_gfrc_tx_begin(&s->g_timer); + exynos4210_gfrc_stop(&s->g_timer); + exynos4210_gfrc_tx_commit(&s->g_timer); + + /* local timer */ + memset(s->l_timer[0].reg.cnt, 0, sizeof(s->l_timer[0].reg.cnt)); + memset(s->l_timer[1].reg.cnt, 0, sizeof(s->l_timer[1].reg.cnt)); + for (i = 0; i < 2; i++) { + s->l_timer[i].reg.int_cstat = 0; + s->l_timer[i].reg.int_enb = 0; + s->l_timer[i].reg.tcon = 0; + s->l_timer[i].reg.wstat = 0; + s->l_timer[i].tick_timer.count = 0; + s->l_timer[i].tick_timer.distance = 0; + s->l_timer[i].tick_timer.progress = 0; + exynos4210_lfrc_tx_begin(&s->l_timer[i]); + ptimer_stop(s->l_timer[i].ptimer_frc); + exynos4210_lfrc_tx_commit(&s->l_timer[i]); + + exynos4210_ltick_timer_init(&s->l_timer[i].tick_timer); + } + + exynos4210_mct_update_freq(s); + +} + +/* Multi Core Timer read */ +static uint64_t exynos4210_mct_read(void *opaque, hwaddr offset, + unsigned size) +{ + Exynos4210MCTState *s = (Exynos4210MCTState *)opaque; + int index; + int shift; + uint64_t count; + uint32_t value = 0; + int lt_i; + + switch (offset) { + + case MCT_CFG: + value = s->reg_mct_cfg; + break; + + case G_CNT_L: case G_CNT_U: + shift = 8 * (offset & 0x4); + count = exynos4210_gfrc_get_count(&s->g_timer); + value = UINT32_MAX & (count >> shift); + DPRINTF("read FRC=0x%llx\n", count); + break; + + case G_CNT_WSTAT: + value = s->g_timer.reg.cnt_wstat; + break; + + case G_COMP_L(0): case G_COMP_L(1): case G_COMP_L(2): case G_COMP_L(3): + case G_COMP_U(0): case G_COMP_U(1): case G_COMP_U(2): case G_COMP_U(3): + index = GET_G_COMP_IDX(offset); + shift = 8 * (offset & 0x4); + value = UINT32_MAX & (s->g_timer.reg.comp[index] >> shift); + break; + + case G_TCON: + value = s->g_timer.reg.tcon; + break; + + case G_INT_CSTAT: + value = s->g_timer.reg.int_cstat; + break; + + case G_INT_ENB: + value = s->g_timer.reg.int_enb; + break; + case G_WSTAT: + value = s->g_timer.reg.wstat; + break; + + case G_COMP0_ADD_INCR: case G_COMP1_ADD_INCR: + case G_COMP2_ADD_INCR: case G_COMP3_ADD_INCR: + value = s->g_timer.reg.comp_add_incr[GET_G_COMP_ADD_INCR_IDX(offset)]; + break; + + /* Local timers */ + case L0_TCNTB: case L0_ICNTB: case L0_FRCNTB: + case L1_TCNTB: case L1_ICNTB: case L1_FRCNTB: + lt_i = GET_L_TIMER_IDX(offset); + index = GET_L_TIMER_CNT_REG_IDX(offset, lt_i); + value = s->l_timer[lt_i].reg.cnt[index]; + break; + + case L0_TCNTO: case L1_TCNTO: + lt_i = GET_L_TIMER_IDX(offset); + + value = exynos4210_ltick_cnt_get_cnto(&s->l_timer[lt_i].tick_timer); + DPRINTF("local timer[%d] read TCNTO %x\n", lt_i, value); + break; + + case L0_ICNTO: case L1_ICNTO: + lt_i = GET_L_TIMER_IDX(offset); + + value = exynos4210_ltick_int_get_cnto(&s->l_timer[lt_i].tick_timer); + DPRINTF("local timer[%d] read ICNTO %x\n", lt_i, value); + break; + + case L0_FRCNTO: case L1_FRCNTO: + lt_i = GET_L_TIMER_IDX(offset); + + value = exynos4210_lfrc_get_count(&s->l_timer[lt_i]); + break; + + case L0_TCON: case L1_TCON: + lt_i = ((offset & 0xF00) - L0_TCNTB) / 0x100; + value = s->l_timer[lt_i].reg.tcon; + break; + + case L0_INT_CSTAT: case L1_INT_CSTAT: + lt_i = ((offset & 0xF00) - L0_TCNTB) / 0x100; + value = s->l_timer[lt_i].reg.int_cstat; + break; + + case L0_INT_ENB: case L1_INT_ENB: + lt_i = ((offset & 0xF00) - L0_TCNTB) / 0x100; + value = s->l_timer[lt_i].reg.int_enb; + break; + + case L0_WSTAT: case L1_WSTAT: + lt_i = ((offset & 0xF00) - L0_TCNTB) / 0x100; + value = s->l_timer[lt_i].reg.wstat; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + break; + } + return value; +} + +/* MCT write */ +static void exynos4210_mct_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + Exynos4210MCTState *s = (Exynos4210MCTState *)opaque; + int index; /* index in buffer which represents register set */ + int shift; + int lt_i; + uint64_t new_frc; + uint32_t i; + uint32_t old_val; +#ifdef DEBUG_MCT + static uint32_t icntb_max[2] = {0}; + static uint32_t icntb_min[2] = {UINT32_MAX, UINT32_MAX}; + static uint32_t tcntb_max[2] = {0}; + static uint32_t tcntb_min[2] = {UINT32_MAX, UINT32_MAX}; +#endif + + new_frc = s->g_timer.reg.cnt; + + switch (offset) { + + case MCT_CFG: + s->reg_mct_cfg = value; + exynos4210_mct_update_freq(s); + break; + + case G_CNT_L: + case G_CNT_U: + if (offset == G_CNT_L) { + + DPRINTF("global timer write to reg.cntl %llx\n", value); + + new_frc = (s->g_timer.reg.cnt & (uint64_t)UINT32_MAX << 32) + value; + s->g_timer.reg.cnt_wstat |= G_CNT_WSTAT_L; + } + if (offset == G_CNT_U) { + + DPRINTF("global timer write to reg.cntu %llx\n", value); + + new_frc = (s->g_timer.reg.cnt & UINT32_MAX) + + ((uint64_t)value << 32); + s->g_timer.reg.cnt_wstat |= G_CNT_WSTAT_U; + } + + s->g_timer.reg.cnt = new_frc; + exynos4210_gfrc_tx_begin(&s->g_timer); + exynos4210_gfrc_restart(s); + exynos4210_gfrc_tx_commit(&s->g_timer); + break; + + case G_CNT_WSTAT: + s->g_timer.reg.cnt_wstat &= ~(value); + break; + + case G_COMP_L(0): case G_COMP_L(1): case G_COMP_L(2): case G_COMP_L(3): + case G_COMP_U(0): case G_COMP_U(1): case G_COMP_U(2): case G_COMP_U(3): + index = GET_G_COMP_IDX(offset); + shift = 8 * (offset & 0x4); + s->g_timer.reg.comp[index] = + (s->g_timer.reg.comp[index] & + (((uint64_t)UINT32_MAX << 32) >> shift)) + + (value << shift); + + DPRINTF("comparator %d write 0x%llx val << %d\n", index, value, shift); + + if (offset & 0x4) { + s->g_timer.reg.wstat |= G_WSTAT_COMP_U(index); + } else { + s->g_timer.reg.wstat |= G_WSTAT_COMP_L(index); + } + + exynos4210_gfrc_tx_begin(&s->g_timer); + exynos4210_gfrc_restart(s); + exynos4210_gfrc_tx_commit(&s->g_timer); + break; + + case G_TCON: + old_val = s->g_timer.reg.tcon; + s->g_timer.reg.tcon = value; + s->g_timer.reg.wstat |= G_WSTAT_TCON_WRITE; + + DPRINTF("global timer write to reg.g_tcon %llx\n", value); + + exynos4210_gfrc_tx_begin(&s->g_timer); + + /* Start FRC if transition from disabled to enabled */ + if ((value & G_TCON_TIMER_ENABLE) > (old_val & + G_TCON_TIMER_ENABLE)) { + exynos4210_gfrc_restart(s); + } + if ((value & G_TCON_TIMER_ENABLE) < (old_val & + G_TCON_TIMER_ENABLE)) { + exynos4210_gfrc_stop(&s->g_timer); + } + + /* Start CMP if transition from disabled to enabled */ + for (i = 0; i < MCT_GT_CMP_NUM; i++) { + if ((value & G_TCON_COMP_ENABLE(i)) != (old_val & + G_TCON_COMP_ENABLE(i))) { + exynos4210_gfrc_restart(s); + } + } + + exynos4210_gfrc_tx_commit(&s->g_timer); + break; + + case G_INT_CSTAT: + s->g_timer.reg.int_cstat &= ~(value); + for (i = 0; i < MCT_GT_CMP_NUM; i++) { + if (value & G_INT_CSTAT_COMP(i)) { + exynos4210_gcomp_lower_irq(&s->g_timer, i); + } + } + break; + + case G_INT_ENB: + /* Raise IRQ if transition from disabled to enabled and CSTAT pending */ + for (i = 0; i < MCT_GT_CMP_NUM; i++) { + if ((value & G_INT_ENABLE(i)) > (s->g_timer.reg.tcon & + G_INT_ENABLE(i))) { + if (s->g_timer.reg.int_cstat & G_INT_CSTAT_COMP(i)) { + exynos4210_gcomp_raise_irq(&s->g_timer, i); + } + } + + if ((value & G_INT_ENABLE(i)) < (s->g_timer.reg.tcon & + G_INT_ENABLE(i))) { + exynos4210_gcomp_lower_irq(&s->g_timer, i); + } + } + + DPRINTF("global timer INT enable %llx\n", value); + s->g_timer.reg.int_enb = value; + break; + + case G_WSTAT: + s->g_timer.reg.wstat &= ~(value); + break; + + case G_COMP0_ADD_INCR: case G_COMP1_ADD_INCR: + case G_COMP2_ADD_INCR: case G_COMP3_ADD_INCR: + index = GET_G_COMP_ADD_INCR_IDX(offset); + s->g_timer.reg.comp_add_incr[index] = value; + s->g_timer.reg.wstat |= G_WSTAT_COMP_ADDINCR(index); + break; + + /* Local timers */ + case L0_TCON: case L1_TCON: + lt_i = GET_L_TIMER_IDX(offset); + old_val = s->l_timer[lt_i].reg.tcon; + + s->l_timer[lt_i].reg.wstat |= L_WSTAT_TCON_WRITE; + s->l_timer[lt_i].reg.tcon = value; + + exynos4210_ltick_tx_begin(&s->l_timer[lt_i].tick_timer); + /* Stop local CNT */ + if ((value & L_TCON_TICK_START) < + (old_val & L_TCON_TICK_START)) { + DPRINTF("local timer[%d] stop cnt\n", lt_i); + exynos4210_ltick_cnt_stop(&s->l_timer[lt_i].tick_timer); + } + + /* Stop local INT */ + if ((value & L_TCON_INT_START) < + (old_val & L_TCON_INT_START)) { + DPRINTF("local timer[%d] stop int\n", lt_i); + exynos4210_ltick_int_stop(&s->l_timer[lt_i].tick_timer); + } + + /* Start local CNT */ + if ((value & L_TCON_TICK_START) > + (old_val & L_TCON_TICK_START)) { + DPRINTF("local timer[%d] start cnt\n", lt_i); + exynos4210_ltick_cnt_start(&s->l_timer[lt_i].tick_timer); + } + + /* Start local INT */ + if ((value & L_TCON_INT_START) > + (old_val & L_TCON_INT_START)) { + DPRINTF("local timer[%d] start int\n", lt_i); + exynos4210_ltick_int_start(&s->l_timer[lt_i].tick_timer); + } + exynos4210_ltick_tx_commit(&s->l_timer[lt_i].tick_timer); + + /* Start or Stop local FRC if TCON changed */ + exynos4210_lfrc_tx_begin(&s->l_timer[lt_i]); + if ((value & L_TCON_FRC_START) > + (s->l_timer[lt_i].reg.tcon & L_TCON_FRC_START)) { + DPRINTF("local timer[%d] start frc\n", lt_i); + exynos4210_lfrc_start(&s->l_timer[lt_i]); + } + if ((value & L_TCON_FRC_START) < + (s->l_timer[lt_i].reg.tcon & L_TCON_FRC_START)) { + DPRINTF("local timer[%d] stop frc\n", lt_i); + exynos4210_lfrc_stop(&s->l_timer[lt_i]); + } + exynos4210_lfrc_tx_commit(&s->l_timer[lt_i]); + break; + + case L0_TCNTB: case L1_TCNTB: + lt_i = GET_L_TIMER_IDX(offset); + + /* + * TCNTB is updated to internal register only after CNT expired. + * Due to this we should reload timer to nearest moment when CNT is + * expired and then in event handler update tcntb to new TCNTB value. + */ + exynos4210_ltick_tx_begin(&s->l_timer[lt_i].tick_timer); + exynos4210_ltick_set_cntb(&s->l_timer[lt_i].tick_timer, value, + s->l_timer[lt_i].tick_timer.icntb); + exynos4210_ltick_tx_commit(&s->l_timer[lt_i].tick_timer); + + s->l_timer[lt_i].reg.wstat |= L_WSTAT_TCNTB_WRITE; + s->l_timer[lt_i].reg.cnt[L_REG_CNT_TCNTB] = value; + +#ifdef DEBUG_MCT + if (tcntb_min[lt_i] > value) { + tcntb_min[lt_i] = value; + } + if (tcntb_max[lt_i] < value) { + tcntb_max[lt_i] = value; + } + DPRINTF("local timer[%d] TCNTB write %llx; max=%x, min=%x\n", + lt_i, value, tcntb_max[lt_i], tcntb_min[lt_i]); +#endif + break; + + case L0_ICNTB: case L1_ICNTB: + lt_i = GET_L_TIMER_IDX(offset); + + s->l_timer[lt_i].reg.wstat |= L_WSTAT_ICNTB_WRITE; + s->l_timer[lt_i].reg.cnt[L_REG_CNT_ICNTB] = value & + ~L_ICNTB_MANUAL_UPDATE; + + /* + * We need to avoid too small values for TCNTB*ICNTB. If not, IRQ event + * could raise too fast disallowing QEMU to execute target code. + */ + if (s->l_timer[lt_i].reg.cnt[L_REG_CNT_ICNTB] * + s->l_timer[lt_i].reg.cnt[L_REG_CNT_TCNTB] < MCT_LT_CNT_LOW_LIMIT) { + if (!s->l_timer[lt_i].reg.cnt[L_REG_CNT_TCNTB]) { + s->l_timer[lt_i].reg.cnt[L_REG_CNT_ICNTB] = + MCT_LT_CNT_LOW_LIMIT; + } else { + s->l_timer[lt_i].reg.cnt[L_REG_CNT_ICNTB] = + MCT_LT_CNT_LOW_LIMIT / + s->l_timer[lt_i].reg.cnt[L_REG_CNT_TCNTB]; + } + } + + if (value & L_ICNTB_MANUAL_UPDATE) { + exynos4210_ltick_set_cntb(&s->l_timer[lt_i].tick_timer, + s->l_timer[lt_i].tick_timer.tcntb, + s->l_timer[lt_i].reg.cnt[L_REG_CNT_ICNTB]); + } + +#ifdef DEBUG_MCT + if (icntb_min[lt_i] > value) { + icntb_min[lt_i] = value; + } + if (icntb_max[lt_i] < value) { + icntb_max[lt_i] = value; + } + DPRINTF("local timer[%d] ICNTB write %llx; max=%x, min=%x\n\n", + lt_i, value, icntb_max[lt_i], icntb_min[lt_i]); +#endif + break; + + case L0_FRCNTB: case L1_FRCNTB: + lt_i = GET_L_TIMER_IDX(offset); + DPRINTF("local timer[%d] FRCNTB write %llx\n", lt_i, value); + + s->l_timer[lt_i].reg.wstat |= L_WSTAT_FRCCNTB_WRITE; + s->l_timer[lt_i].reg.cnt[L_REG_CNT_FRCCNTB] = value; + + break; + + case L0_TCNTO: case L1_TCNTO: + case L0_ICNTO: case L1_ICNTO: + case L0_FRCNTO: case L1_FRCNTO: + qemu_log_mask(LOG_GUEST_ERROR, + "exynos4210.mct: write to RO register " TARGET_FMT_plx, + offset); + break; + + case L0_INT_CSTAT: case L1_INT_CSTAT: + lt_i = GET_L_TIMER_IDX(offset); + + DPRINTF("local timer[%d] CSTAT write %llx\n", lt_i, value); + + s->l_timer[lt_i].reg.int_cstat &= ~value; + if (!s->l_timer[lt_i].reg.int_cstat) { + qemu_irq_lower(s->l_timer[lt_i].irq); + } + break; + + case L0_INT_ENB: case L1_INT_ENB: + lt_i = GET_L_TIMER_IDX(offset); + old_val = s->l_timer[lt_i].reg.int_enb; + + /* Raise Local timer IRQ if cstat is pending */ + if ((value & L_INT_INTENB_ICNTEIE) > (old_val & L_INT_INTENB_ICNTEIE)) { + if (s->l_timer[lt_i].reg.int_cstat & L_INT_CSTAT_INTCNT) { + qemu_irq_raise(s->l_timer[lt_i].irq); + } + } + + s->l_timer[lt_i].reg.int_enb = value; + + break; + + case L0_WSTAT: case L1_WSTAT: + lt_i = GET_L_TIMER_IDX(offset); + + s->l_timer[lt_i].reg.wstat &= ~value; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n", + __func__, offset); + break; + } +} + +static const MemoryRegionOps exynos4210_mct_ops = { + .read = exynos4210_mct_read, + .write = exynos4210_mct_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* MCT init */ +static void exynos4210_mct_init(Object *obj) +{ + int i; + Exynos4210MCTState *s = EXYNOS4210_MCT(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + /* Global timer */ + s->g_timer.ptimer_frc = ptimer_init(exynos4210_gfrc_event, s, + PTIMER_POLICY_DEFAULT); + memset(&s->g_timer.reg, 0, sizeof(struct gregs)); + + /* Local timers */ + for (i = 0; i < 2; i++) { + s->l_timer[i].tick_timer.ptimer_tick = + ptimer_init(exynos4210_ltick_event, &s->l_timer[i], + PTIMER_POLICY_DEFAULT); + s->l_timer[i].ptimer_frc = + ptimer_init(exynos4210_lfrc_event, &s->l_timer[i], + PTIMER_POLICY_DEFAULT); + s->l_timer[i].id = i; + } + + /* IRQs */ + for (i = 0; i < MCT_GT_CMP_NUM; i++) { + sysbus_init_irq(dev, &s->g_timer.irq[i]); + } + for (i = 0; i < 2; i++) { + sysbus_init_irq(dev, &s->l_timer[i].irq); + } + + memory_region_init_io(&s->iomem, obj, &exynos4210_mct_ops, s, + "exynos4210-mct", MCT_SFR_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static void exynos4210_mct_finalize(Object *obj) +{ + int i; + Exynos4210MCTState *s = EXYNOS4210_MCT(obj); + + ptimer_free(s->g_timer.ptimer_frc); + + for (i = 0; i < 2; i++) { + ptimer_free(s->l_timer[i].tick_timer.ptimer_tick); + ptimer_free(s->l_timer[i].ptimer_frc); + } +} + +static void exynos4210_mct_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = exynos4210_mct_reset; + dc->vmsd = &vmstate_exynos4210_mct_state; +} + +static const TypeInfo exynos4210_mct_info = { + .name = TYPE_EXYNOS4210_MCT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210MCTState), + .instance_init = exynos4210_mct_init, + .instance_finalize = exynos4210_mct_finalize, + .class_init = exynos4210_mct_class_init, +}; + +static void exynos4210_mct_register_types(void) +{ + type_register_static(&exynos4210_mct_info); +} + +type_init(exynos4210_mct_register_types) diff --git a/hw/timer/exynos4210_pwm.c b/hw/timer/exynos4210_pwm.c new file mode 100644 index 000000000..220088120 --- /dev/null +++ b/hw/timer/exynos4210_pwm.c @@ -0,0 +1,445 @@ +/* + * Samsung exynos4210 Pulse Width Modulation Timer + * + * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. + * All rights reserved. + * + * Evgeny Voevodin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/timer.h" +#include "qemu/module.h" +#include "hw/ptimer.h" + +#include "hw/arm/exynos4210.h" +#include "hw/irq.h" +#include "qom/object.h" + +//#define DEBUG_PWM + +#ifdef DEBUG_PWM +#define DPRINTF(fmt, ...) \ + do { fprintf(stdout, "PWM: [%24s:%5d] " fmt, __func__, __LINE__, \ + ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do {} while (0) +#endif + +#define EXYNOS4210_PWM_TIMERS_NUM 5 +#define EXYNOS4210_PWM_REG_MEM_SIZE 0x50 + +#define TCFG0 0x0000 +#define TCFG1 0x0004 +#define TCON 0x0008 +#define TCNTB0 0x000C +#define TCMPB0 0x0010 +#define TCNTO0 0x0014 +#define TCNTB1 0x0018 +#define TCMPB1 0x001C +#define TCNTO1 0x0020 +#define TCNTB2 0x0024 +#define TCMPB2 0x0028 +#define TCNTO2 0x002C +#define TCNTB3 0x0030 +#define TCMPB3 0x0034 +#define TCNTO3 0x0038 +#define TCNTB4 0x003C +#define TCNTO4 0x0040 +#define TINT_CSTAT 0x0044 + +#define TCNTB(x) (0xC * (x)) +#define TCMPB(x) (0xC * (x) + 1) +#define TCNTO(x) (0xC * (x) + 2) + +#define GET_PRESCALER(reg, x) (((reg) & (0xFF << (8 * (x)))) >> 8 * (x)) +#define GET_DIVIDER(reg, x) (1 << (((reg) & (0xF << (4 * (x)))) >> (4 * (x)))) + +/* + * Attention! Timer4 doesn't have OUTPUT_INVERTER, + * so Auto Reload bit is not accessible by macros! + */ +#define TCON_TIMER_BASE(x) (((x) ? 1 : 0) * 4 + 4 * (x)) +#define TCON_TIMER_START(x) (1 << (TCON_TIMER_BASE(x) + 0)) +#define TCON_TIMER_MANUAL_UPD(x) (1 << (TCON_TIMER_BASE(x) + 1)) +#define TCON_TIMER_OUTPUT_INV(x) (1 << (TCON_TIMER_BASE(x) + 2)) +#define TCON_TIMER_AUTO_RELOAD(x) (1 << (TCON_TIMER_BASE(x) + 3)) +#define TCON_TIMER4_AUTO_RELOAD (1 << 22) + +#define TINT_CSTAT_STATUS(x) (1 << (5 + (x))) +#define TINT_CSTAT_ENABLE(x) (1 << (x)) + +/* timer struct */ +typedef struct { + uint32_t id; /* timer id */ + qemu_irq irq; /* local timer irq */ + uint32_t freq; /* timer frequency */ + + /* use ptimer.c to represent count down timer */ + ptimer_state *ptimer; /* timer */ + + /* registers */ + uint32_t reg_tcntb; /* counter register buffer */ + uint32_t reg_tcmpb; /* compare register buffer */ + + struct Exynos4210PWMState *parent; + +} Exynos4210PWM; + +#define TYPE_EXYNOS4210_PWM "exynos4210.pwm" +OBJECT_DECLARE_SIMPLE_TYPE(Exynos4210PWMState, EXYNOS4210_PWM) + +struct Exynos4210PWMState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + uint32_t reg_tcfg[2]; + uint32_t reg_tcon; + uint32_t reg_tint_cstat; + + Exynos4210PWM timer[EXYNOS4210_PWM_TIMERS_NUM]; + +}; + +/*** VMState ***/ +static const VMStateDescription vmstate_exynos4210_pwm = { + .name = "exynos4210.pwm.pwm", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(id, Exynos4210PWM), + VMSTATE_UINT32(freq, Exynos4210PWM), + VMSTATE_PTIMER(ptimer, Exynos4210PWM), + VMSTATE_UINT32(reg_tcntb, Exynos4210PWM), + VMSTATE_UINT32(reg_tcmpb, Exynos4210PWM), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_exynos4210_pwm_state = { + .name = "exynos4210.pwm", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg_tcfg, Exynos4210PWMState, 2), + VMSTATE_UINT32(reg_tcon, Exynos4210PWMState), + VMSTATE_UINT32(reg_tint_cstat, Exynos4210PWMState), + VMSTATE_STRUCT_ARRAY(timer, Exynos4210PWMState, + EXYNOS4210_PWM_TIMERS_NUM, 0, + vmstate_exynos4210_pwm, Exynos4210PWM), + VMSTATE_END_OF_LIST() + } +}; + +/* + * PWM update frequency. + * Must be called within a ptimer_transaction_begin/commit block + * for s->timer[id].ptimer. + */ +static void exynos4210_pwm_update_freq(Exynos4210PWMState *s, uint32_t id) +{ + uint32_t freq; + freq = s->timer[id].freq; + if (id > 1) { + s->timer[id].freq = 24000000 / + ((GET_PRESCALER(s->reg_tcfg[0], 1) + 1) * + (GET_DIVIDER(s->reg_tcfg[1], id))); + } else { + s->timer[id].freq = 24000000 / + ((GET_PRESCALER(s->reg_tcfg[0], 0) + 1) * + (GET_DIVIDER(s->reg_tcfg[1], id))); + } + + if (freq != s->timer[id].freq) { + ptimer_set_freq(s->timer[id].ptimer, s->timer[id].freq); + DPRINTF("freq=%uHz\n", s->timer[id].freq); + } +} + +/* + * Counter tick handler + */ +static void exynos4210_pwm_tick(void *opaque) +{ + Exynos4210PWM *s = (Exynos4210PWM *)opaque; + Exynos4210PWMState *p = (Exynos4210PWMState *)s->parent; + uint32_t id = s->id; + bool cmp; + + DPRINTF("timer %u tick\n", id); + + /* set irq status */ + p->reg_tint_cstat |= TINT_CSTAT_STATUS(id); + + /* raise IRQ */ + if (p->reg_tint_cstat & TINT_CSTAT_ENABLE(id)) { + DPRINTF("timer %u IRQ\n", id); + qemu_irq_raise(p->timer[id].irq); + } + + /* reload timer */ + if (id != 4) { + cmp = p->reg_tcon & TCON_TIMER_AUTO_RELOAD(id); + } else { + cmp = p->reg_tcon & TCON_TIMER4_AUTO_RELOAD; + } + + if (cmp) { + DPRINTF("auto reload timer %u count to %x\n", id, + p->timer[id].reg_tcntb); + ptimer_set_count(p->timer[id].ptimer, p->timer[id].reg_tcntb); + ptimer_run(p->timer[id].ptimer, 1); + } else { + /* stop timer, set status to STOP, see Basic Timer Operation */ + p->reg_tcon &= ~TCON_TIMER_START(id); + ptimer_stop(p->timer[id].ptimer); + } +} + +/* + * PWM Read + */ +static uint64_t exynos4210_pwm_read(void *opaque, hwaddr offset, + unsigned size) +{ + Exynos4210PWMState *s = (Exynos4210PWMState *)opaque; + uint32_t value = 0; + int index; + + switch (offset) { + case TCFG0: case TCFG1: + index = (offset - TCFG0) >> 2; + value = s->reg_tcfg[index]; + break; + + case TCON: + value = s->reg_tcon; + break; + + case TCNTB0: case TCNTB1: + case TCNTB2: case TCNTB3: case TCNTB4: + index = (offset - TCNTB0) / 0xC; + value = s->timer[index].reg_tcntb; + break; + + case TCMPB0: case TCMPB1: + case TCMPB2: case TCMPB3: + index = (offset - TCMPB0) / 0xC; + value = s->timer[index].reg_tcmpb; + break; + + case TCNTO0: case TCNTO1: + case TCNTO2: case TCNTO3: case TCNTO4: + index = (offset == TCNTO4) ? 4 : (offset - TCNTO0) / 0xC; + value = ptimer_get_count(s->timer[index].ptimer); + break; + + case TINT_CSTAT: + value = s->reg_tint_cstat; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "exynos4210.pwm: bad read offset " TARGET_FMT_plx, + offset); + break; + } + return value; +} + +/* + * PWM Write + */ +static void exynos4210_pwm_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + Exynos4210PWMState *s = (Exynos4210PWMState *)opaque; + int index; + uint32_t new_val; + int i; + + switch (offset) { + case TCFG0: case TCFG1: + index = (offset - TCFG0) >> 2; + s->reg_tcfg[index] = value; + + /* update timers frequencies */ + for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { + ptimer_transaction_begin(s->timer[i].ptimer); + exynos4210_pwm_update_freq(s, s->timer[i].id); + ptimer_transaction_commit(s->timer[i].ptimer); + } + break; + + case TCON: + for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { + ptimer_transaction_begin(s->timer[i].ptimer); + if ((value & TCON_TIMER_MANUAL_UPD(i)) > + (s->reg_tcon & TCON_TIMER_MANUAL_UPD(i))) { + /* + * TCNTB and TCMPB are loaded into TCNT and TCMP. + * Update timers. + */ + + /* this will start timer to run, this ok, because + * during processing start bit timer will be stopped + * if needed */ + ptimer_set_count(s->timer[i].ptimer, s->timer[i].reg_tcntb); + DPRINTF("set timer %d count to %x\n", i, + s->timer[i].reg_tcntb); + } + + if ((value & TCON_TIMER_START(i)) > + (s->reg_tcon & TCON_TIMER_START(i))) { + /* changed to start */ + ptimer_run(s->timer[i].ptimer, 1); + DPRINTF("run timer %d\n", i); + } + + if ((value & TCON_TIMER_START(i)) < + (s->reg_tcon & TCON_TIMER_START(i))) { + /* changed to stop */ + ptimer_stop(s->timer[i].ptimer); + DPRINTF("stop timer %d\n", i); + } + ptimer_transaction_commit(s->timer[i].ptimer); + } + s->reg_tcon = value; + break; + + case TCNTB0: case TCNTB1: + case TCNTB2: case TCNTB3: case TCNTB4: + index = (offset - TCNTB0) / 0xC; + s->timer[index].reg_tcntb = value; + break; + + case TCMPB0: case TCMPB1: + case TCMPB2: case TCMPB3: + index = (offset - TCMPB0) / 0xC; + s->timer[index].reg_tcmpb = value; + break; + + case TINT_CSTAT: + new_val = (s->reg_tint_cstat & 0x3E0) + (0x1F & value); + new_val &= ~(0x3E0 & value); + + for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { + if ((new_val & TINT_CSTAT_STATUS(i)) < + (s->reg_tint_cstat & TINT_CSTAT_STATUS(i))) { + qemu_irq_lower(s->timer[i].irq); + } + } + + s->reg_tint_cstat = new_val; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "exynos4210.pwm: bad write offset " TARGET_FMT_plx, + offset); + break; + + } +} + +/* + * Set default values to timer fields and registers + */ +static void exynos4210_pwm_reset(DeviceState *d) +{ + Exynos4210PWMState *s = EXYNOS4210_PWM(d); + int i; + s->reg_tcfg[0] = 0x0101; + s->reg_tcfg[1] = 0x0; + s->reg_tcon = 0; + s->reg_tint_cstat = 0; + for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { + s->timer[i].reg_tcmpb = 0; + s->timer[i].reg_tcntb = 0; + + ptimer_transaction_begin(s->timer[i].ptimer); + exynos4210_pwm_update_freq(s, s->timer[i].id); + ptimer_stop(s->timer[i].ptimer); + ptimer_transaction_commit(s->timer[i].ptimer); + } +} + +static const MemoryRegionOps exynos4210_pwm_ops = { + .read = exynos4210_pwm_read, + .write = exynos4210_pwm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +/* + * PWM timer initialization + */ +static void exynos4210_pwm_init(Object *obj) +{ + Exynos4210PWMState *s = EXYNOS4210_PWM(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + int i; + + for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { + sysbus_init_irq(dev, &s->timer[i].irq); + s->timer[i].ptimer = ptimer_init(exynos4210_pwm_tick, + &s->timer[i], + PTIMER_POLICY_DEFAULT); + s->timer[i].id = i; + s->timer[i].parent = s; + } + + memory_region_init_io(&s->iomem, obj, &exynos4210_pwm_ops, s, + "exynos4210-pwm", EXYNOS4210_PWM_REG_MEM_SIZE); + sysbus_init_mmio(dev, &s->iomem); +} + +static void exynos4210_pwm_finalize(Object *obj) +{ + Exynos4210PWMState *s = EXYNOS4210_PWM(obj); + int i; + + for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) { + ptimer_free(s->timer[i].ptimer); + } +} + +static void exynos4210_pwm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = exynos4210_pwm_reset; + dc->vmsd = &vmstate_exynos4210_pwm_state; +} + +static const TypeInfo exynos4210_pwm_info = { + .name = TYPE_EXYNOS4210_PWM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Exynos4210PWMState), + .instance_init = exynos4210_pwm_init, + .instance_finalize = exynos4210_pwm_finalize, + .class_init = exynos4210_pwm_class_init, +}; + +static void exynos4210_pwm_register_types(void) +{ + type_register_static(&exynos4210_pwm_info); +} + +type_init(exynos4210_pwm_register_types) diff --git a/hw/timer/grlib_gptimer.c b/hw/timer/grlib_gptimer.c new file mode 100644 index 000000000..d51189010 --- /dev/null +++ b/hw/timer/grlib_gptimer.c @@ -0,0 +1,432 @@ +/* + * QEMU GRLIB GPTimer Emulator + * + * Copyright (c) 2010-2019 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sparc/grlib.h" +#include "hw/sysbus.h" +#include "qemu/timer.h" +#include "hw/irq.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "qemu/module.h" + +#include "trace.h" +#include "qom/object.h" + +#define UNIT_REG_SIZE 16 /* Size of memory mapped regs for the unit */ +#define GPTIMER_REG_SIZE 16 /* Size of memory mapped regs for a GPTimer */ + +#define GPTIMER_MAX_TIMERS 8 + +/* GPTimer Config register fields */ +#define GPTIMER_ENABLE (1 << 0) +#define GPTIMER_RESTART (1 << 1) +#define GPTIMER_LOAD (1 << 2) +#define GPTIMER_INT_ENABLE (1 << 3) +#define GPTIMER_INT_PENDING (1 << 4) +#define GPTIMER_CHAIN (1 << 5) /* Not supported */ +#define GPTIMER_DEBUG_HALT (1 << 6) /* Not supported */ + +/* Memory mapped register offsets */ +#define SCALER_OFFSET 0x00 +#define SCALER_RELOAD_OFFSET 0x04 +#define CONFIG_OFFSET 0x08 +#define COUNTER_OFFSET 0x00 +#define COUNTER_RELOAD_OFFSET 0x04 +#define TIMER_BASE 0x10 + +OBJECT_DECLARE_SIMPLE_TYPE(GPTimerUnit, GRLIB_GPTIMER) + +typedef struct GPTimer GPTimer; + +struct GPTimer { + struct ptimer_state *ptimer; + + qemu_irq irq; + int id; + GPTimerUnit *unit; + + /* registers */ + uint32_t counter; + uint32_t reload; + uint32_t config; +}; + +struct GPTimerUnit { + SysBusDevice parent_obj; + + MemoryRegion iomem; + + uint32_t nr_timers; /* Number of timers available */ + uint32_t freq_hz; /* System frequency */ + uint32_t irq_line; /* Base irq line */ + + GPTimer *timers; + + /* registers */ + uint32_t scaler; + uint32_t reload; + uint32_t config; +}; + +static void grlib_gptimer_tx_begin(GPTimer *timer) +{ + ptimer_transaction_begin(timer->ptimer); +} + +static void grlib_gptimer_tx_commit(GPTimer *timer) +{ + ptimer_transaction_commit(timer->ptimer); +} + +/* Must be called within grlib_gptimer_tx_begin/commit block */ +static void grlib_gptimer_enable(GPTimer *timer) +{ + assert(timer != NULL); + + + ptimer_stop(timer->ptimer); + + if (!(timer->config & GPTIMER_ENABLE)) { + /* Timer disabled */ + trace_grlib_gptimer_disabled(timer->id, timer->config); + return; + } + + /* ptimer is triggered when the counter reach 0 but GPTimer is triggered at + underflow. Set count + 1 to simulate the GPTimer behavior. */ + + trace_grlib_gptimer_enable(timer->id, timer->counter); + + ptimer_set_count(timer->ptimer, (uint64_t)timer->counter + 1); + ptimer_run(timer->ptimer, 1); +} + +/* Must be called within grlib_gptimer_tx_begin/commit block */ +static void grlib_gptimer_restart(GPTimer *timer) +{ + assert(timer != NULL); + + trace_grlib_gptimer_restart(timer->id, timer->reload); + + timer->counter = timer->reload; + grlib_gptimer_enable(timer); +} + +static void grlib_gptimer_set_scaler(GPTimerUnit *unit, uint32_t scaler) +{ + int i = 0; + uint32_t value = 0; + + assert(unit != NULL); + + if (scaler > 0) { + value = unit->freq_hz / (scaler + 1); + } else { + value = unit->freq_hz; + } + + trace_grlib_gptimer_set_scaler(scaler, value); + + for (i = 0; i < unit->nr_timers; i++) { + ptimer_transaction_begin(unit->timers[i].ptimer); + ptimer_set_freq(unit->timers[i].ptimer, value); + ptimer_transaction_commit(unit->timers[i].ptimer); + } +} + +static void grlib_gptimer_hit(void *opaque) +{ + GPTimer *timer = opaque; + assert(timer != NULL); + + trace_grlib_gptimer_hit(timer->id); + + /* Timer expired */ + + if (timer->config & GPTIMER_INT_ENABLE) { + /* Set the pending bit (only unset by write in the config register) */ + timer->config |= GPTIMER_INT_PENDING; + qemu_irq_pulse(timer->irq); + } + + if (timer->config & GPTIMER_RESTART) { + grlib_gptimer_restart(timer); + } +} + +static uint64_t grlib_gptimer_read(void *opaque, hwaddr addr, + unsigned size) +{ + GPTimerUnit *unit = opaque; + hwaddr timer_addr; + int id; + uint32_t value = 0; + + addr &= 0xff; + + /* Unit registers */ + switch (addr) { + case SCALER_OFFSET: + trace_grlib_gptimer_readl(-1, addr, unit->scaler); + return unit->scaler; + + case SCALER_RELOAD_OFFSET: + trace_grlib_gptimer_readl(-1, addr, unit->reload); + return unit->reload; + + case CONFIG_OFFSET: + trace_grlib_gptimer_readl(-1, addr, unit->config); + return unit->config; + + default: + break; + } + + timer_addr = (addr % TIMER_BASE); + id = (addr - TIMER_BASE) / TIMER_BASE; + + if (id >= 0 && id < unit->nr_timers) { + + /* GPTimer registers */ + switch (timer_addr) { + case COUNTER_OFFSET: + value = ptimer_get_count(unit->timers[id].ptimer); + trace_grlib_gptimer_readl(id, addr, value); + return value; + + case COUNTER_RELOAD_OFFSET: + value = unit->timers[id].reload; + trace_grlib_gptimer_readl(id, addr, value); + return value; + + case CONFIG_OFFSET: + trace_grlib_gptimer_readl(id, addr, unit->timers[id].config); + return unit->timers[id].config; + + default: + break; + } + + } + + trace_grlib_gptimer_readl(-1, addr, 0); + return 0; +} + +static void grlib_gptimer_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + GPTimerUnit *unit = opaque; + hwaddr timer_addr; + int id; + + addr &= 0xff; + + /* Unit registers */ + switch (addr) { + case SCALER_OFFSET: + value &= 0xFFFF; /* clean up the value */ + unit->scaler = value; + trace_grlib_gptimer_writel(-1, addr, unit->scaler); + return; + + case SCALER_RELOAD_OFFSET: + value &= 0xFFFF; /* clean up the value */ + unit->reload = value; + trace_grlib_gptimer_writel(-1, addr, unit->reload); + grlib_gptimer_set_scaler(unit, value); + return; + + case CONFIG_OFFSET: + /* Read Only (disable timer freeze not supported) */ + trace_grlib_gptimer_writel(-1, addr, 0); + return; + + default: + break; + } + + timer_addr = (addr % TIMER_BASE); + id = (addr - TIMER_BASE) / TIMER_BASE; + + if (id >= 0 && id < unit->nr_timers) { + + /* GPTimer registers */ + switch (timer_addr) { + case COUNTER_OFFSET: + trace_grlib_gptimer_writel(id, addr, value); + grlib_gptimer_tx_begin(&unit->timers[id]); + unit->timers[id].counter = value; + grlib_gptimer_enable(&unit->timers[id]); + grlib_gptimer_tx_commit(&unit->timers[id]); + return; + + case COUNTER_RELOAD_OFFSET: + trace_grlib_gptimer_writel(id, addr, value); + unit->timers[id].reload = value; + return; + + case CONFIG_OFFSET: + trace_grlib_gptimer_writel(id, addr, value); + + if (value & GPTIMER_INT_PENDING) { + /* clear pending bit */ + value &= ~GPTIMER_INT_PENDING; + } else { + /* keep pending bit */ + value |= unit->timers[id].config & GPTIMER_INT_PENDING; + } + + unit->timers[id].config = value; + + /* gptimer_restart calls gptimer_enable, so if "enable" and "load" + bits are present, we just have to call restart. */ + + grlib_gptimer_tx_begin(&unit->timers[id]); + if (value & GPTIMER_LOAD) { + grlib_gptimer_restart(&unit->timers[id]); + } else if (value & GPTIMER_ENABLE) { + grlib_gptimer_enable(&unit->timers[id]); + } + + /* These fields must always be read as 0 */ + value &= ~(GPTIMER_LOAD & GPTIMER_DEBUG_HALT); + + unit->timers[id].config = value; + grlib_gptimer_tx_commit(&unit->timers[id]); + return; + + default: + break; + } + + } + + trace_grlib_gptimer_writel(-1, addr, value); +} + +static const MemoryRegionOps grlib_gptimer_ops = { + .read = grlib_gptimer_read, + .write = grlib_gptimer_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void grlib_gptimer_reset(DeviceState *d) +{ + GPTimerUnit *unit = GRLIB_GPTIMER(d); + int i = 0; + + assert(unit != NULL); + + unit->scaler = 0; + unit->reload = 0; + + unit->config = unit->nr_timers; + unit->config |= unit->irq_line << 3; + unit->config |= 1 << 8; /* separate interrupt */ + unit->config |= 1 << 9; /* Disable timer freeze */ + + + for (i = 0; i < unit->nr_timers; i++) { + GPTimer *timer = &unit->timers[i]; + + timer->counter = 0; + timer->reload = 0; + timer->config = 0; + ptimer_transaction_begin(timer->ptimer); + ptimer_stop(timer->ptimer); + ptimer_set_count(timer->ptimer, 0); + ptimer_set_freq(timer->ptimer, unit->freq_hz); + ptimer_transaction_commit(timer->ptimer); + } +} + +static void grlib_gptimer_realize(DeviceState *dev, Error **errp) +{ + GPTimerUnit *unit = GRLIB_GPTIMER(dev); + unsigned int i; + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + assert(unit->nr_timers > 0); + assert(unit->nr_timers <= GPTIMER_MAX_TIMERS); + + unit->timers = g_malloc0(sizeof unit->timers[0] * unit->nr_timers); + + for (i = 0; i < unit->nr_timers; i++) { + GPTimer *timer = &unit->timers[i]; + + timer->unit = unit; + timer->ptimer = ptimer_init(grlib_gptimer_hit, timer, + PTIMER_POLICY_DEFAULT); + timer->id = i; + + /* One IRQ line for each timer */ + sysbus_init_irq(sbd, &timer->irq); + + ptimer_transaction_begin(timer->ptimer); + ptimer_set_freq(timer->ptimer, unit->freq_hz); + ptimer_transaction_commit(timer->ptimer); + } + + memory_region_init_io(&unit->iomem, OBJECT(unit), &grlib_gptimer_ops, + unit, "gptimer", + UNIT_REG_SIZE + GPTIMER_REG_SIZE * unit->nr_timers); + + sysbus_init_mmio(sbd, &unit->iomem); +} + +static Property grlib_gptimer_properties[] = { + DEFINE_PROP_UINT32("frequency", GPTimerUnit, freq_hz, 40000000), + DEFINE_PROP_UINT32("irq-line", GPTimerUnit, irq_line, 8), + DEFINE_PROP_UINT32("nr-timers", GPTimerUnit, nr_timers, 2), + DEFINE_PROP_END_OF_LIST(), +}; + +static void grlib_gptimer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = grlib_gptimer_realize; + dc->reset = grlib_gptimer_reset; + device_class_set_props(dc, grlib_gptimer_properties); +} + +static const TypeInfo grlib_gptimer_info = { + .name = TYPE_GRLIB_GPTIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GPTimerUnit), + .class_init = grlib_gptimer_class_init, +}; + +static void grlib_gptimer_register_types(void) +{ + type_register_static(&grlib_gptimer_info); +} + +type_init(grlib_gptimer_register_types) diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c new file mode 100644 index 000000000..9520471be --- /dev/null +++ b/hw/timer/hpet.c @@ -0,0 +1,807 @@ +/* + * High Precision Event Timer emulation + * + * Copyright (c) 2007 Alexander Graf + * Copyright (c) 2008 IBM Corporation + * + * Authors: Beth Kon + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + * ***************************************************************** + * + * This driver attempts to emulate an HPET device in software. + */ + +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/irq.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "hw/timer/hpet.h" +#include "hw/sysbus.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/rtc/mc146818rtc_regs.h" +#include "migration/vmstate.h" +#include "hw/timer/i8254.h" +#include "exec/address-spaces.h" +#include "qom/object.h" + +//#define HPET_DEBUG +#ifdef HPET_DEBUG +#define DPRINTF printf +#else +#define DPRINTF(...) +#endif + +#define HPET_MSI_SUPPORT 0 + +OBJECT_DECLARE_SIMPLE_TYPE(HPETState, HPET) + +struct HPETState; +typedef struct HPETTimer { /* timers */ + uint8_t tn; /*timer number*/ + QEMUTimer *qemu_timer; + struct HPETState *state; + /* Memory-mapped, software visible timer registers */ + uint64_t config; /* configuration/cap */ + uint64_t cmp; /* comparator */ + uint64_t fsb; /* FSB route */ + /* Hidden register state */ + uint64_t period; /* Last value written to comparator */ + uint8_t wrap_flag; /* timer pop will indicate wrap for one-shot 32-bit + * mode. Next pop will be actual timer expiration. + */ +} HPETTimer; + +struct HPETState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + MemoryRegion iomem; + uint64_t hpet_offset; + bool hpet_offset_saved; + qemu_irq irqs[HPET_NUM_IRQ_ROUTES]; + uint32_t flags; + uint8_t rtc_irq_level; + qemu_irq pit_enabled; + uint8_t num_timers; + uint32_t intcap; + HPETTimer timer[HPET_MAX_TIMERS]; + + /* Memory-mapped, software visible registers */ + uint64_t capability; /* capabilities */ + uint64_t config; /* configuration */ + uint64_t isr; /* interrupt status reg */ + uint64_t hpet_counter; /* main counter */ + uint8_t hpet_id; /* instance id */ +}; + +static uint32_t hpet_in_legacy_mode(HPETState *s) +{ + return s->config & HPET_CFG_LEGACY; +} + +static uint32_t timer_int_route(struct HPETTimer *timer) +{ + return (timer->config & HPET_TN_INT_ROUTE_MASK) >> HPET_TN_INT_ROUTE_SHIFT; +} + +static uint32_t timer_fsb_route(HPETTimer *t) +{ + return t->config & HPET_TN_FSB_ENABLE; +} + +static uint32_t hpet_enabled(HPETState *s) +{ + return s->config & HPET_CFG_ENABLE; +} + +static uint32_t timer_is_periodic(HPETTimer *t) +{ + return t->config & HPET_TN_PERIODIC; +} + +static uint32_t timer_enabled(HPETTimer *t) +{ + return t->config & HPET_TN_ENABLE; +} + +static uint32_t hpet_time_after(uint64_t a, uint64_t b) +{ + return ((int32_t)(b - a) < 0); +} + +static uint32_t hpet_time_after64(uint64_t a, uint64_t b) +{ + return ((int64_t)(b - a) < 0); +} + +static uint64_t ticks_to_ns(uint64_t value) +{ + return value * HPET_CLK_PERIOD; +} + +static uint64_t ns_to_ticks(uint64_t value) +{ + return value / HPET_CLK_PERIOD; +} + +static uint64_t hpet_fixup_reg(uint64_t new, uint64_t old, uint64_t mask) +{ + new &= mask; + new |= old & ~mask; + return new; +} + +static int activating_bit(uint64_t old, uint64_t new, uint64_t mask) +{ + return (!(old & mask) && (new & mask)); +} + +static int deactivating_bit(uint64_t old, uint64_t new, uint64_t mask) +{ + return ((old & mask) && !(new & mask)); +} + +static uint64_t hpet_get_ticks(HPETState *s) +{ + return ns_to_ticks(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->hpet_offset); +} + +/* + * calculate diff between comparator value and current ticks + */ +static inline uint64_t hpet_calculate_diff(HPETTimer *t, uint64_t current) +{ + + if (t->config & HPET_TN_32BIT) { + uint32_t diff, cmp; + + cmp = (uint32_t)t->cmp; + diff = cmp - (uint32_t)current; + diff = (int32_t)diff > 0 ? diff : (uint32_t)1; + return (uint64_t)diff; + } else { + uint64_t diff, cmp; + + cmp = t->cmp; + diff = cmp - current; + diff = (int64_t)diff > 0 ? diff : (uint64_t)1; + return diff; + } +} + +static void update_irq(struct HPETTimer *timer, int set) +{ + uint64_t mask; + HPETState *s; + int route; + + if (timer->tn <= 1 && hpet_in_legacy_mode(timer->state)) { + /* if LegacyReplacementRoute bit is set, HPET specification requires + * timer0 be routed to IRQ0 in NON-APIC or IRQ2 in the I/O APIC, + * timer1 be routed to IRQ8 in NON-APIC or IRQ8 in the I/O APIC. + */ + route = (timer->tn == 0) ? 0 : RTC_ISA_IRQ; + } else { + route = timer_int_route(timer); + } + s = timer->state; + mask = 1 << timer->tn; + if (!set || !timer_enabled(timer) || !hpet_enabled(timer->state)) { + s->isr &= ~mask; + if (!timer_fsb_route(timer)) { + qemu_irq_lower(s->irqs[route]); + } + } else if (timer_fsb_route(timer)) { + address_space_stl_le(&address_space_memory, timer->fsb >> 32, + timer->fsb & 0xffffffff, MEMTXATTRS_UNSPECIFIED, + NULL); + } else if (timer->config & HPET_TN_TYPE_LEVEL) { + s->isr |= mask; + qemu_irq_raise(s->irqs[route]); + } else { + s->isr &= ~mask; + qemu_irq_pulse(s->irqs[route]); + } +} + +static int hpet_pre_save(void *opaque) +{ + HPETState *s = opaque; + + /* save current counter value */ + if (hpet_enabled(s)) { + s->hpet_counter = hpet_get_ticks(s); + } + + return 0; +} + +static int hpet_pre_load(void *opaque) +{ + HPETState *s = opaque; + + /* version 1 only supports 3, later versions will load the actual value */ + s->num_timers = HPET_MIN_TIMERS; + return 0; +} + +static bool hpet_validate_num_timers(void *opaque, int version_id) +{ + HPETState *s = opaque; + + if (s->num_timers < HPET_MIN_TIMERS) { + return false; + } else if (s->num_timers > HPET_MAX_TIMERS) { + return false; + } + return true; +} + +static int hpet_post_load(void *opaque, int version_id) +{ + HPETState *s = opaque; + + /* Recalculate the offset between the main counter and guest time */ + if (!s->hpet_offset_saved) { + s->hpet_offset = ticks_to_ns(s->hpet_counter) + - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + } + + /* Push number of timers into capability returned via HPET_ID */ + s->capability &= ~HPET_ID_NUM_TIM_MASK; + s->capability |= (s->num_timers - 1) << HPET_ID_NUM_TIM_SHIFT; + hpet_cfg.hpet[s->hpet_id].event_timer_block_id = (uint32_t)s->capability; + + /* Derive HPET_MSI_SUPPORT from the capability of the first timer. */ + s->flags &= ~(1 << HPET_MSI_SUPPORT); + if (s->timer[0].config & HPET_TN_FSB_CAP) { + s->flags |= 1 << HPET_MSI_SUPPORT; + } + return 0; +} + +static bool hpet_offset_needed(void *opaque) +{ + HPETState *s = opaque; + + return hpet_enabled(s) && s->hpet_offset_saved; +} + +static bool hpet_rtc_irq_level_needed(void *opaque) +{ + HPETState *s = opaque; + + return s->rtc_irq_level != 0; +} + +static const VMStateDescription vmstate_hpet_rtc_irq_level = { + .name = "hpet/rtc_irq_level", + .version_id = 1, + .minimum_version_id = 1, + .needed = hpet_rtc_irq_level_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(rtc_irq_level, HPETState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_hpet_offset = { + .name = "hpet/offset", + .version_id = 1, + .minimum_version_id = 1, + .needed = hpet_offset_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(hpet_offset, HPETState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_hpet_timer = { + .name = "hpet_timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(tn, HPETTimer), + VMSTATE_UINT64(config, HPETTimer), + VMSTATE_UINT64(cmp, HPETTimer), + VMSTATE_UINT64(fsb, HPETTimer), + VMSTATE_UINT64(period, HPETTimer), + VMSTATE_UINT8(wrap_flag, HPETTimer), + VMSTATE_TIMER_PTR(qemu_timer, HPETTimer), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_hpet = { + .name = "hpet", + .version_id = 2, + .minimum_version_id = 1, + .pre_save = hpet_pre_save, + .pre_load = hpet_pre_load, + .post_load = hpet_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64(config, HPETState), + VMSTATE_UINT64(isr, HPETState), + VMSTATE_UINT64(hpet_counter, HPETState), + VMSTATE_UINT8_V(num_timers, HPETState, 2), + VMSTATE_VALIDATE("num_timers in range", hpet_validate_num_timers), + VMSTATE_STRUCT_VARRAY_UINT8(timer, HPETState, num_timers, 0, + vmstate_hpet_timer, HPETTimer), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_hpet_rtc_irq_level, + &vmstate_hpet_offset, + NULL + } +}; + +/* + * timer expiration callback + */ +static void hpet_timer(void *opaque) +{ + HPETTimer *t = opaque; + uint64_t diff; + + uint64_t period = t->period; + uint64_t cur_tick = hpet_get_ticks(t->state); + + if (timer_is_periodic(t) && period != 0) { + if (t->config & HPET_TN_32BIT) { + while (hpet_time_after(cur_tick, t->cmp)) { + t->cmp = (uint32_t)(t->cmp + t->period); + } + } else { + while (hpet_time_after64(cur_tick, t->cmp)) { + t->cmp += period; + } + } + diff = hpet_calculate_diff(t, cur_tick); + timer_mod(t->qemu_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff)); + } else if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) { + if (t->wrap_flag) { + diff = hpet_calculate_diff(t, cur_tick); + timer_mod(t->qemu_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (int64_t)ticks_to_ns(diff)); + t->wrap_flag = 0; + } + } + update_irq(t, 1); +} + +static void hpet_set_timer(HPETTimer *t) +{ + uint64_t diff; + uint32_t wrap_diff; /* how many ticks until we wrap? */ + uint64_t cur_tick = hpet_get_ticks(t->state); + + /* whenever new timer is being set up, make sure wrap_flag is 0 */ + t->wrap_flag = 0; + diff = hpet_calculate_diff(t, cur_tick); + + /* hpet spec says in one-shot 32-bit mode, generate an interrupt when + * counter wraps in addition to an interrupt with comparator match. + */ + if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) { + wrap_diff = 0xffffffff - (uint32_t)cur_tick; + if (wrap_diff < (uint32_t)diff) { + diff = wrap_diff; + t->wrap_flag = 1; + } + } + timer_mod(t->qemu_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff)); +} + +static void hpet_del_timer(HPETTimer *t) +{ + timer_del(t->qemu_timer); + update_irq(t, 0); +} + +static uint64_t hpet_ram_read(void *opaque, hwaddr addr, + unsigned size) +{ + HPETState *s = opaque; + uint64_t cur_tick, index; + + DPRINTF("qemu: Enter hpet_ram_readl at %" PRIx64 "\n", addr); + index = addr; + /*address range of all TN regs*/ + if (index >= 0x100 && index <= 0x3ff) { + uint8_t timer_id = (addr - 0x100) / 0x20; + HPETTimer *timer = &s->timer[timer_id]; + + if (timer_id > s->num_timers) { + DPRINTF("qemu: timer id out of range\n"); + return 0; + } + + switch ((addr - 0x100) % 0x20) { + case HPET_TN_CFG: + return timer->config; + case HPET_TN_CFG + 4: // Interrupt capabilities + return timer->config >> 32; + case HPET_TN_CMP: // comparator register + return timer->cmp; + case HPET_TN_CMP + 4: + return timer->cmp >> 32; + case HPET_TN_ROUTE: + return timer->fsb; + case HPET_TN_ROUTE + 4: + return timer->fsb >> 32; + default: + DPRINTF("qemu: invalid hpet_ram_readl\n"); + break; + } + } else { + switch (index) { + case HPET_ID: + return s->capability; + case HPET_PERIOD: + return s->capability >> 32; + case HPET_CFG: + return s->config; + case HPET_CFG + 4: + DPRINTF("qemu: invalid HPET_CFG + 4 hpet_ram_readl\n"); + return 0; + case HPET_COUNTER: + if (hpet_enabled(s)) { + cur_tick = hpet_get_ticks(s); + } else { + cur_tick = s->hpet_counter; + } + DPRINTF("qemu: reading counter = %" PRIx64 "\n", cur_tick); + return cur_tick; + case HPET_COUNTER + 4: + if (hpet_enabled(s)) { + cur_tick = hpet_get_ticks(s); + } else { + cur_tick = s->hpet_counter; + } + DPRINTF("qemu: reading counter + 4 = %" PRIx64 "\n", cur_tick); + return cur_tick >> 32; + case HPET_STATUS: + return s->isr; + default: + DPRINTF("qemu: invalid hpet_ram_readl\n"); + break; + } + } + return 0; +} + +static void hpet_ram_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + int i; + HPETState *s = opaque; + uint64_t old_val, new_val, val, index; + + DPRINTF("qemu: Enter hpet_ram_writel at %" PRIx64 " = 0x%" PRIx64 "\n", + addr, value); + index = addr; + old_val = hpet_ram_read(opaque, addr, 4); + new_val = value; + + /*address range of all TN regs*/ + if (index >= 0x100 && index <= 0x3ff) { + uint8_t timer_id = (addr - 0x100) / 0x20; + HPETTimer *timer = &s->timer[timer_id]; + + DPRINTF("qemu: hpet_ram_writel timer_id = 0x%x\n", timer_id); + if (timer_id > s->num_timers) { + DPRINTF("qemu: timer id out of range\n"); + return; + } + switch ((addr - 0x100) % 0x20) { + case HPET_TN_CFG: + DPRINTF("qemu: hpet_ram_writel HPET_TN_CFG\n"); + if (activating_bit(old_val, new_val, HPET_TN_FSB_ENABLE)) { + update_irq(timer, 0); + } + val = hpet_fixup_reg(new_val, old_val, HPET_TN_CFG_WRITE_MASK); + timer->config = (timer->config & 0xffffffff00000000ULL) | val; + if (new_val & HPET_TN_32BIT) { + timer->cmp = (uint32_t)timer->cmp; + timer->period = (uint32_t)timer->period; + } + if (activating_bit(old_val, new_val, HPET_TN_ENABLE) && + hpet_enabled(s)) { + hpet_set_timer(timer); + } else if (deactivating_bit(old_val, new_val, HPET_TN_ENABLE)) { + hpet_del_timer(timer); + } + break; + case HPET_TN_CFG + 4: // Interrupt capabilities + DPRINTF("qemu: invalid HPET_TN_CFG+4 write\n"); + break; + case HPET_TN_CMP: // comparator register + DPRINTF("qemu: hpet_ram_writel HPET_TN_CMP\n"); + if (timer->config & HPET_TN_32BIT) { + new_val = (uint32_t)new_val; + } + if (!timer_is_periodic(timer) + || (timer->config & HPET_TN_SETVAL)) { + timer->cmp = (timer->cmp & 0xffffffff00000000ULL) | new_val; + } + if (timer_is_periodic(timer)) { + /* + * FIXME: Clamp period to reasonable min value? + * Clamp period to reasonable max value + */ + new_val &= (timer->config & HPET_TN_32BIT ? ~0u : ~0ull) >> 1; + timer->period = + (timer->period & 0xffffffff00000000ULL) | new_val; + } + timer->config &= ~HPET_TN_SETVAL; + if (hpet_enabled(s)) { + hpet_set_timer(timer); + } + break; + case HPET_TN_CMP + 4: // comparator register high order + DPRINTF("qemu: hpet_ram_writel HPET_TN_CMP + 4\n"); + if (!timer_is_periodic(timer) + || (timer->config & HPET_TN_SETVAL)) { + timer->cmp = (timer->cmp & 0xffffffffULL) | new_val << 32; + } else { + /* + * FIXME: Clamp period to reasonable min value? + * Clamp period to reasonable max value + */ + new_val &= (timer->config & HPET_TN_32BIT ? ~0u : ~0ull) >> 1; + timer->period = + (timer->period & 0xffffffffULL) | new_val << 32; + } + timer->config &= ~HPET_TN_SETVAL; + if (hpet_enabled(s)) { + hpet_set_timer(timer); + } + break; + case HPET_TN_ROUTE: + timer->fsb = (timer->fsb & 0xffffffff00000000ULL) | new_val; + break; + case HPET_TN_ROUTE + 4: + timer->fsb = (new_val << 32) | (timer->fsb & 0xffffffff); + break; + default: + DPRINTF("qemu: invalid hpet_ram_writel\n"); + break; + } + return; + } else { + switch (index) { + case HPET_ID: + return; + case HPET_CFG: + val = hpet_fixup_reg(new_val, old_val, HPET_CFG_WRITE_MASK); + s->config = (s->config & 0xffffffff00000000ULL) | val; + if (activating_bit(old_val, new_val, HPET_CFG_ENABLE)) { + /* Enable main counter and interrupt generation. */ + s->hpet_offset = + ticks_to_ns(s->hpet_counter) - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + for (i = 0; i < s->num_timers; i++) { + if ((&s->timer[i])->cmp != ~0ULL) { + hpet_set_timer(&s->timer[i]); + } + } + } else if (deactivating_bit(old_val, new_val, HPET_CFG_ENABLE)) { + /* Halt main counter and disable interrupt generation. */ + s->hpet_counter = hpet_get_ticks(s); + for (i = 0; i < s->num_timers; i++) { + hpet_del_timer(&s->timer[i]); + } + } + /* i8254 and RTC output pins are disabled + * when HPET is in legacy mode */ + if (activating_bit(old_val, new_val, HPET_CFG_LEGACY)) { + qemu_set_irq(s->pit_enabled, 0); + qemu_irq_lower(s->irqs[0]); + qemu_irq_lower(s->irqs[RTC_ISA_IRQ]); + } else if (deactivating_bit(old_val, new_val, HPET_CFG_LEGACY)) { + qemu_irq_lower(s->irqs[0]); + qemu_set_irq(s->pit_enabled, 1); + qemu_set_irq(s->irqs[RTC_ISA_IRQ], s->rtc_irq_level); + } + break; + case HPET_CFG + 4: + DPRINTF("qemu: invalid HPET_CFG+4 write\n"); + break; + case HPET_STATUS: + val = new_val & s->isr; + for (i = 0; i < s->num_timers; i++) { + if (val & (1 << i)) { + update_irq(&s->timer[i], 0); + } + } + break; + case HPET_COUNTER: + if (hpet_enabled(s)) { + DPRINTF("qemu: Writing counter while HPET enabled!\n"); + } + s->hpet_counter = + (s->hpet_counter & 0xffffffff00000000ULL) | value; + DPRINTF("qemu: HPET counter written. ctr = 0x%" PRIx64 " -> " + "%" PRIx64 "\n", value, s->hpet_counter); + break; + case HPET_COUNTER + 4: + if (hpet_enabled(s)) { + DPRINTF("qemu: Writing counter while HPET enabled!\n"); + } + s->hpet_counter = + (s->hpet_counter & 0xffffffffULL) | (((uint64_t)value) << 32); + DPRINTF("qemu: HPET counter + 4 written. ctr = 0x%" PRIx64 " -> " + "%" PRIx64 "\n", value, s->hpet_counter); + break; + default: + DPRINTF("qemu: invalid hpet_ram_writel\n"); + break; + } + } +} + +static const MemoryRegionOps hpet_ram_ops = { + .read = hpet_ram_read, + .write = hpet_ram_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void hpet_reset(DeviceState *d) +{ + HPETState *s = HPET(d); + SysBusDevice *sbd = SYS_BUS_DEVICE(d); + int i; + + for (i = 0; i < s->num_timers; i++) { + HPETTimer *timer = &s->timer[i]; + + hpet_del_timer(timer); + timer->cmp = ~0ULL; + timer->config = HPET_TN_PERIODIC_CAP | HPET_TN_SIZE_CAP; + if (s->flags & (1 << HPET_MSI_SUPPORT)) { + timer->config |= HPET_TN_FSB_CAP; + } + /* advertise availability of ioapic int */ + timer->config |= (uint64_t)s->intcap << 32; + timer->period = 0ULL; + timer->wrap_flag = 0; + } + + qemu_set_irq(s->pit_enabled, 1); + s->hpet_counter = 0ULL; + s->hpet_offset = 0ULL; + s->config = 0ULL; + hpet_cfg.hpet[s->hpet_id].event_timer_block_id = (uint32_t)s->capability; + hpet_cfg.hpet[s->hpet_id].address = sbd->mmio[0].addr; + + /* to document that the RTC lowers its output on reset as well */ + s->rtc_irq_level = 0; +} + +static void hpet_handle_legacy_irq(void *opaque, int n, int level) +{ + HPETState *s = HPET(opaque); + + if (n == HPET_LEGACY_PIT_INT) { + if (!hpet_in_legacy_mode(s)) { + qemu_set_irq(s->irqs[0], level); + } + } else { + s->rtc_irq_level = level; + if (!hpet_in_legacy_mode(s)) { + qemu_set_irq(s->irqs[RTC_ISA_IRQ], level); + } + } +} + +static void hpet_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + HPETState *s = HPET(obj); + + /* HPET Area */ + memory_region_init_io(&s->iomem, obj, &hpet_ram_ops, s, "hpet", HPET_LEN); + sysbus_init_mmio(sbd, &s->iomem); +} + +static void hpet_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + HPETState *s = HPET(dev); + int i; + HPETTimer *timer; + + if (!s->intcap) { + warn_report("Hpet's intcap not initialized"); + } + if (hpet_cfg.count == UINT8_MAX) { + /* first instance */ + hpet_cfg.count = 0; + } + + if (hpet_cfg.count == 8) { + error_setg(errp, "Only 8 instances of HPET is allowed"); + return; + } + + s->hpet_id = hpet_cfg.count++; + + for (i = 0; i < HPET_NUM_IRQ_ROUTES; i++) { + sysbus_init_irq(sbd, &s->irqs[i]); + } + + if (s->num_timers < HPET_MIN_TIMERS) { + s->num_timers = HPET_MIN_TIMERS; + } else if (s->num_timers > HPET_MAX_TIMERS) { + s->num_timers = HPET_MAX_TIMERS; + } + for (i = 0; i < HPET_MAX_TIMERS; i++) { + timer = &s->timer[i]; + timer->qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, hpet_timer, timer); + timer->tn = i; + timer->state = s; + } + + /* 64-bit main counter; LegacyReplacementRoute. */ + s->capability = 0x8086a001ULL; + s->capability |= (s->num_timers - 1) << HPET_ID_NUM_TIM_SHIFT; + s->capability |= ((uint64_t)(HPET_CLK_PERIOD * FS_PER_NS) << 32); + + qdev_init_gpio_in(dev, hpet_handle_legacy_irq, 2); + qdev_init_gpio_out(dev, &s->pit_enabled, 1); +} + +static Property hpet_device_properties[] = { + DEFINE_PROP_UINT8("timers", HPETState, num_timers, HPET_MIN_TIMERS), + DEFINE_PROP_BIT("msi", HPETState, flags, HPET_MSI_SUPPORT, false), + DEFINE_PROP_UINT32(HPET_INTCAP, HPETState, intcap, 0), + DEFINE_PROP_BOOL("hpet-offset-saved", HPETState, hpet_offset_saved, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void hpet_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = hpet_realize; + dc->reset = hpet_reset; + dc->vmsd = &vmstate_hpet; + device_class_set_props(dc, hpet_device_properties); +} + +static const TypeInfo hpet_device_info = { + .name = TYPE_HPET, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(HPETState), + .instance_init = hpet_init, + .class_init = hpet_device_class_init, +}; + +static void hpet_register_types(void) +{ + type_register_static(&hpet_device_info); +} + +type_init(hpet_register_types) diff --git a/hw/timer/i8254.c b/hw/timer/i8254.c new file mode 100644 index 000000000..c8388ea43 --- /dev/null +++ b/hw/timer/i8254.c @@ -0,0 +1,385 @@ +/* + * QEMU 8253/8254 interval timer emulation + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/timer/i8254.h" +#include "hw/timer/i8254_internal.h" +#include "qom/object.h" + +//#define DEBUG_PIT + +#define RW_STATE_LSB 1 +#define RW_STATE_MSB 2 +#define RW_STATE_WORD0 3 +#define RW_STATE_WORD1 4 + +typedef struct PITClass PITClass; +DECLARE_CLASS_CHECKERS(PITClass, PIT, + TYPE_I8254) + +struct PITClass { + PITCommonClass parent_class; + + DeviceRealize parent_realize; +}; + +static void pit_irq_timer_update(PITChannelState *s, int64_t current_time); + +static int pit_get_count(PITChannelState *s) +{ + uint64_t d; + int counter; + + d = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->count_load_time, PIT_FREQ, + NANOSECONDS_PER_SECOND); + switch(s->mode) { + case 0: + case 1: + case 4: + case 5: + counter = (s->count - d) & 0xffff; + break; + case 3: + /* XXX: may be incorrect for odd counts */ + counter = s->count - ((2 * d) % s->count); + break; + default: + counter = s->count - (d % s->count); + break; + } + return counter; +} + +/* val must be 0 or 1 */ +static void pit_set_channel_gate(PITCommonState *s, PITChannelState *sc, + int val) +{ + switch (sc->mode) { + default: + case 0: + case 4: + /* XXX: just disable/enable counting */ + break; + case 1: + case 5: + if (sc->gate < val) { + /* restart counting on rising edge */ + sc->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + pit_irq_timer_update(sc, sc->count_load_time); + } + break; + case 2: + case 3: + if (sc->gate < val) { + /* restart counting on rising edge */ + sc->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + pit_irq_timer_update(sc, sc->count_load_time); + } + /* XXX: disable/enable counting */ + break; + } + sc->gate = val; +} + +static inline void pit_load_count(PITChannelState *s, int val) +{ + if (val == 0) + val = 0x10000; + s->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->count = val; + pit_irq_timer_update(s, s->count_load_time); +} + +/* if already latched, do not latch again */ +static void pit_latch_count(PITChannelState *s) +{ + if (!s->count_latched) { + s->latched_count = pit_get_count(s); + s->count_latched = s->rw_mode; + } +} + +static void pit_ioport_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PITCommonState *pit = opaque; + int channel, access; + PITChannelState *s; + + addr &= 3; + if (addr == 3) { + channel = val >> 6; + if (channel == 3) { + /* read back command */ + for(channel = 0; channel < 3; channel++) { + s = &pit->channels[channel]; + if (val & (2 << channel)) { + if (!(val & 0x20)) { + pit_latch_count(s); + } + if (!(val & 0x10) && !s->status_latched) { + /* status latch */ + /* XXX: add BCD and null count */ + s->status = + (pit_get_out(s, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) << 7) | + (s->rw_mode << 4) | + (s->mode << 1) | + s->bcd; + s->status_latched = 1; + } + } + } + } else { + s = &pit->channels[channel]; + access = (val >> 4) & 3; + if (access == 0) { + pit_latch_count(s); + } else { + s->rw_mode = access; + s->read_state = access; + s->write_state = access; + + s->mode = (val >> 1) & 7; + s->bcd = val & 1; + /* XXX: update irq timer ? */ + } + } + } else { + s = &pit->channels[addr]; + switch(s->write_state) { + default: + case RW_STATE_LSB: + pit_load_count(s, val); + break; + case RW_STATE_MSB: + pit_load_count(s, val << 8); + break; + case RW_STATE_WORD0: + s->write_latch = val; + s->write_state = RW_STATE_WORD1; + break; + case RW_STATE_WORD1: + pit_load_count(s, s->write_latch | (val << 8)); + s->write_state = RW_STATE_WORD0; + break; + } + } +} + +static uint64_t pit_ioport_read(void *opaque, hwaddr addr, + unsigned size) +{ + PITCommonState *pit = opaque; + int ret, count; + PITChannelState *s; + + addr &= 3; + + if (addr == 3) { + /* Mode/Command register is write only, read is ignored */ + return 0; + } + + s = &pit->channels[addr]; + if (s->status_latched) { + s->status_latched = 0; + ret = s->status; + } else if (s->count_latched) { + switch(s->count_latched) { + default: + case RW_STATE_LSB: + ret = s->latched_count & 0xff; + s->count_latched = 0; + break; + case RW_STATE_MSB: + ret = s->latched_count >> 8; + s->count_latched = 0; + break; + case RW_STATE_WORD0: + ret = s->latched_count & 0xff; + s->count_latched = RW_STATE_MSB; + break; + } + } else { + switch(s->read_state) { + default: + case RW_STATE_LSB: + count = pit_get_count(s); + ret = count & 0xff; + break; + case RW_STATE_MSB: + count = pit_get_count(s); + ret = (count >> 8) & 0xff; + break; + case RW_STATE_WORD0: + count = pit_get_count(s); + ret = count & 0xff; + s->read_state = RW_STATE_WORD1; + break; + case RW_STATE_WORD1: + count = pit_get_count(s); + ret = (count >> 8) & 0xff; + s->read_state = RW_STATE_WORD0; + break; + } + } + return ret; +} + +static void pit_irq_timer_update(PITChannelState *s, int64_t current_time) +{ + int64_t expire_time; + int irq_level; + + if (!s->irq_timer || s->irq_disabled) { + return; + } + expire_time = pit_get_next_transition_time(s, current_time); + irq_level = pit_get_out(s, current_time); + qemu_set_irq(s->irq, irq_level); +#ifdef DEBUG_PIT + printf("irq_level=%d next_delay=%f\n", + irq_level, + (double)(expire_time - current_time) / NANOSECONDS_PER_SECOND); +#endif + s->next_transition_time = expire_time; + if (expire_time != -1) + timer_mod(s->irq_timer, expire_time); + else + timer_del(s->irq_timer); +} + +static void pit_irq_timer(void *opaque) +{ + PITChannelState *s = opaque; + + pit_irq_timer_update(s, s->next_transition_time); +} + +static void pit_reset(DeviceState *dev) +{ + PITCommonState *pit = PIT_COMMON(dev); + PITChannelState *s; + + pit_reset_common(pit); + + s = &pit->channels[0]; + if (!s->irq_disabled) { + timer_mod(s->irq_timer, s->next_transition_time); + } +} + +/* When HPET is operating in legacy mode, suppress the ignored timer IRQ, + * reenable it when legacy mode is left again. */ +static void pit_irq_control(void *opaque, int n, int enable) +{ + PITCommonState *pit = opaque; + PITChannelState *s = &pit->channels[0]; + + if (enable) { + s->irq_disabled = 0; + pit_irq_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + } else { + s->irq_disabled = 1; + timer_del(s->irq_timer); + } +} + +static const MemoryRegionOps pit_ioport_ops = { + .read = pit_ioport_read, + .write = pit_ioport_write, + .impl = { + .min_access_size = 1, + .max_access_size = 1, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void pit_post_load(PITCommonState *s) +{ + PITChannelState *sc = &s->channels[0]; + + if (sc->next_transition_time != -1 && !sc->irq_disabled) { + timer_mod(sc->irq_timer, sc->next_transition_time); + } else { + timer_del(sc->irq_timer); + } +} + +static void pit_realizefn(DeviceState *dev, Error **errp) +{ + PITCommonState *pit = PIT_COMMON(dev); + PITClass *pc = PIT_GET_CLASS(dev); + PITChannelState *s; + + s = &pit->channels[0]; + /* the timer 0 is connected to an IRQ */ + s->irq_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pit_irq_timer, s); + qdev_init_gpio_out(dev, &s->irq, 1); + + memory_region_init_io(&pit->ioports, OBJECT(pit), &pit_ioport_ops, + pit, "pit", 4); + + qdev_init_gpio_in(dev, pit_irq_control, 1); + + pc->parent_realize(dev, errp); +} + +static Property pit_properties[] = { + DEFINE_PROP_UINT32("iobase", PITCommonState, iobase, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pit_class_initfn(ObjectClass *klass, void *data) +{ + PITClass *pc = PIT_CLASS(klass); + PITCommonClass *k = PIT_COMMON_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_parent_realize(dc, pit_realizefn, &pc->parent_realize); + k->set_channel_gate = pit_set_channel_gate; + k->get_channel_info = pit_get_channel_info_common; + k->post_load = pit_post_load; + dc->reset = pit_reset; + device_class_set_props(dc, pit_properties); +} + +static const TypeInfo pit_info = { + .name = TYPE_I8254, + .parent = TYPE_PIT_COMMON, + .instance_size = sizeof(PITCommonState), + .class_init = pit_class_initfn, + .class_size = sizeof(PITClass), +}; + +static void pit_register_types(void) +{ + type_register_static(&pit_info); +} + +type_init(pit_register_types) diff --git a/hw/timer/i8254_common.c b/hw/timer/i8254_common.c new file mode 100644 index 000000000..050875b49 --- /dev/null +++ b/hw/timer/i8254_common.c @@ -0,0 +1,271 @@ +/* + * QEMU 8253/8254 - common bits of emulated and KVM kernel model + * + * Copyright (c) 2003-2004 Fabrice Bellard + * Copyright (c) 2012 Jan Kiszka, Siemens AG + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/timer/i8254.h" +#include "hw/timer/i8254_internal.h" +#include "migration/vmstate.h" + +/* val must be 0 or 1 */ +void pit_set_gate(ISADevice *dev, int channel, int val) +{ + PITCommonState *pit = PIT_COMMON(dev); + PITChannelState *s = &pit->channels[channel]; + PITCommonClass *c = PIT_COMMON_GET_CLASS(pit); + + c->set_channel_gate(pit, s, val); +} + +/* get pit output bit */ +int pit_get_out(PITChannelState *s, int64_t current_time) +{ + uint64_t d; + int out; + + d = muldiv64(current_time - s->count_load_time, PIT_FREQ, + NANOSECONDS_PER_SECOND); + switch (s->mode) { + default: + case 0: + out = (d >= s->count); + break; + case 1: + out = (d < s->count); + break; + case 2: + if ((d % s->count) == 0 && d != 0) { + out = 1; + } else { + out = 0; + } + break; + case 3: + out = (d % s->count) < ((s->count + 1) >> 1); + break; + case 4: + case 5: + out = (d == s->count); + break; + } + return out; +} + +/* return -1 if no transition will occur. */ +int64_t pit_get_next_transition_time(PITChannelState *s, int64_t current_time) +{ + uint64_t d, next_time, base; + int period2; + + d = muldiv64(current_time - s->count_load_time, PIT_FREQ, + NANOSECONDS_PER_SECOND); + switch (s->mode) { + default: + case 0: + case 1: + if (d < s->count) { + next_time = s->count; + } else { + return -1; + } + break; + case 2: + base = QEMU_ALIGN_DOWN(d, s->count); + if ((d - base) == 0 && d != 0) { + next_time = base + s->count; + } else { + next_time = base + s->count + 1; + } + break; + case 3: + base = QEMU_ALIGN_DOWN(d, s->count); + period2 = ((s->count + 1) >> 1); + if ((d - base) < period2) { + next_time = base + period2; + } else { + next_time = base + s->count; + } + break; + case 4: + case 5: + if (d < s->count) { + next_time = s->count; + } else if (d == s->count) { + next_time = s->count + 1; + } else { + return -1; + } + break; + } + /* convert to timer units */ + next_time = s->count_load_time + muldiv64(next_time, NANOSECONDS_PER_SECOND, + PIT_FREQ); + /* fix potential rounding problems */ + /* XXX: better solution: use a clock at PIT_FREQ Hz */ + if (next_time <= current_time) { + next_time = current_time + 1; + } + return next_time; +} + +void pit_get_channel_info_common(PITCommonState *s, PITChannelState *sc, + PITChannelInfo *info) +{ + info->gate = sc->gate; + info->mode = sc->mode; + info->initial_count = sc->count; + info->out = pit_get_out(sc, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); +} + +void pit_get_channel_info(ISADevice *dev, int channel, PITChannelInfo *info) +{ + PITCommonState *pit = PIT_COMMON(dev); + PITChannelState *s = &pit->channels[channel]; + PITCommonClass *c = PIT_COMMON_GET_CLASS(pit); + + c->get_channel_info(pit, s, info); +} + +void pit_reset_common(PITCommonState *pit) +{ + PITChannelState *s; + int i; + + for (i = 0; i < 3; i++) { + s = &pit->channels[i]; + s->mode = 3; + s->gate = (i != 2); + s->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->count = 0x10000; + if (i == 0 && !s->irq_disabled) { + s->next_transition_time = + pit_get_next_transition_time(s, s->count_load_time); + } + } +} + +static void pit_common_realize(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + PITCommonState *pit = PIT_COMMON(dev); + + isa_register_ioport(isadev, &pit->ioports, pit->iobase); + + qdev_set_legacy_instance_id(dev, pit->iobase, 2); +} + +static const VMStateDescription vmstate_pit_channel = { + .name = "pit channel", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_INT32(count, PITChannelState), + VMSTATE_UINT16(latched_count, PITChannelState), + VMSTATE_UINT8(count_latched, PITChannelState), + VMSTATE_UINT8(status_latched, PITChannelState), + VMSTATE_UINT8(status, PITChannelState), + VMSTATE_UINT8(read_state, PITChannelState), + VMSTATE_UINT8(write_state, PITChannelState), + VMSTATE_UINT8(write_latch, PITChannelState), + VMSTATE_UINT8(rw_mode, PITChannelState), + VMSTATE_UINT8(mode, PITChannelState), + VMSTATE_UINT8(bcd, PITChannelState), + VMSTATE_UINT8(gate, PITChannelState), + VMSTATE_INT64(count_load_time, PITChannelState), + VMSTATE_INT64(next_transition_time, PITChannelState), + VMSTATE_END_OF_LIST() + } +}; + +static int pit_dispatch_pre_save(void *opaque) +{ + PITCommonState *s = opaque; + PITCommonClass *c = PIT_COMMON_GET_CLASS(s); + + if (c->pre_save) { + c->pre_save(s); + } + + return 0; +} + +static int pit_dispatch_post_load(void *opaque, int version_id) +{ + PITCommonState *s = opaque; + PITCommonClass *c = PIT_COMMON_GET_CLASS(s); + + if (c->post_load) { + c->post_load(s); + } + return 0; +} + +static const VMStateDescription vmstate_pit_common = { + .name = "i8254", + .version_id = 3, + .minimum_version_id = 2, + .pre_save = pit_dispatch_pre_save, + .post_load = pit_dispatch_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32_V(channels[0].irq_disabled, PITCommonState, 3), + VMSTATE_STRUCT_ARRAY(channels, PITCommonState, 3, 2, + vmstate_pit_channel, PITChannelState), + VMSTATE_INT64(channels[0].next_transition_time, + PITCommonState), /* formerly irq_timer */ + VMSTATE_END_OF_LIST() + } +}; + +static void pit_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = pit_common_realize; + dc->vmsd = &vmstate_pit_common; + /* + * Reason: unlike ordinary ISA devices, the PIT may need to be + * wired to the HPET, and because of that, some wiring is always + * done by board code. + */ + dc->user_creatable = false; +} + +static const TypeInfo pit_common_type = { + .name = TYPE_PIT_COMMON, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(PITCommonState), + .class_size = sizeof(PITCommonClass), + .class_init = pit_common_class_init, + .abstract = true, +}; + +static void register_devices(void) +{ + type_register_static(&pit_common_type); +} + +type_init(register_devices); diff --git a/hw/timer/ibex_timer.c b/hw/timer/ibex_timer.c new file mode 100644 index 000000000..66e1f8e48 --- /dev/null +++ b/hw/timer/ibex_timer.c @@ -0,0 +1,312 @@ +/* + * QEMU lowRISC Ibex Timer device + * + * Copyright (c) 2021 Western Digital + * + * For details check the documentation here: + * https://docs.opentitan.org/hw/ip/rv_timer/doc/ + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/timer.h" +#include "hw/timer/ibex_timer.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "target/riscv/cpu.h" +#include "migration/vmstate.h" + +REG32(CTRL, 0x00) + FIELD(CTRL, ACTIVE, 0, 1) +REG32(CFG0, 0x100) + FIELD(CFG0, PRESCALE, 0, 12) + FIELD(CFG0, STEP, 16, 8) +REG32(LOWER0, 0x104) +REG32(UPPER0, 0x108) +REG32(COMPARE_LOWER0, 0x10C) +REG32(COMPARE_UPPER0, 0x110) +REG32(INTR_ENABLE, 0x114) + FIELD(INTR_ENABLE, IE_0, 0, 1) +REG32(INTR_STATE, 0x118) + FIELD(INTR_STATE, IS_0, 0, 1) +REG32(INTR_TEST, 0x11C) + FIELD(INTR_TEST, T_0, 0, 1) + +static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq) +{ + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + timebase_freq, NANOSECONDS_PER_SECOND); +} + +static void ibex_timer_update_irqs(IbexTimerState *s) +{ + CPUState *cs = qemu_get_cpu(0); + RISCVCPU *cpu = RISCV_CPU(cs); + uint64_t value = s->timer_compare_lower0 | + ((uint64_t)s->timer_compare_upper0 << 32); + uint64_t next, diff; + uint64_t now = cpu_riscv_read_rtc(s->timebase_freq); + + if (!(s->timer_ctrl & R_CTRL_ACTIVE_MASK)) { + /* Timer isn't active */ + return; + } + + /* Update the CPUs mtimecmp */ + cpu->env.timecmp = value; + + if (cpu->env.timecmp <= now) { + /* + * If the mtimecmp was in the past raise the interrupt now. + */ + qemu_irq_raise(s->m_timer_irq); + if (s->timer_intr_enable & R_INTR_ENABLE_IE_0_MASK) { + s->timer_intr_state |= R_INTR_STATE_IS_0_MASK; + qemu_set_irq(s->irq, true); + } + return; + } + + /* Setup a timer to trigger the interrupt in the future */ + qemu_irq_lower(s->m_timer_irq); + qemu_set_irq(s->irq, false); + + diff = cpu->env.timecmp - now; + next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + muldiv64(diff, + NANOSECONDS_PER_SECOND, + s->timebase_freq); + + if (next < qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) { + /* We overflowed the timer, just set it as large as we can */ + timer_mod(cpu->env.timer, 0x7FFFFFFFFFFFFFFF); + } else { + timer_mod(cpu->env.timer, next); + } +} + +static void ibex_timer_cb(void *opaque) +{ + IbexTimerState *s = opaque; + + qemu_irq_raise(s->m_timer_irq); + if (s->timer_intr_enable & R_INTR_ENABLE_IE_0_MASK) { + s->timer_intr_state |= R_INTR_STATE_IS_0_MASK; + qemu_set_irq(s->irq, true); + } +} + +static void ibex_timer_reset(DeviceState *dev) +{ + IbexTimerState *s = IBEX_TIMER(dev); + + CPUState *cpu = qemu_get_cpu(0); + CPURISCVState *env = cpu->env_ptr; + env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + &ibex_timer_cb, s); + env->timecmp = 0; + + s->timer_ctrl = 0x00000000; + s->timer_cfg0 = 0x00010000; + s->timer_compare_lower0 = 0xFFFFFFFF; + s->timer_compare_upper0 = 0xFFFFFFFF; + s->timer_intr_enable = 0x00000000; + s->timer_intr_state = 0x00000000; + s->timer_intr_test = 0x00000000; + + ibex_timer_update_irqs(s); +} + +static uint64_t ibex_timer_read(void *opaque, hwaddr addr, + unsigned int size) +{ + IbexTimerState *s = opaque; + uint64_t now = cpu_riscv_read_rtc(s->timebase_freq); + uint64_t retvalue = 0; + + switch (addr >> 2) { + case R_CTRL: + retvalue = s->timer_ctrl; + break; + case R_CFG0: + retvalue = s->timer_cfg0; + break; + case R_LOWER0: + retvalue = now; + break; + case R_UPPER0: + retvalue = now >> 32; + break; + case R_COMPARE_LOWER0: + retvalue = s->timer_compare_lower0; + break; + case R_COMPARE_UPPER0: + retvalue = s->timer_compare_upper0; + break; + case R_INTR_ENABLE: + retvalue = s->timer_intr_enable; + break; + case R_INTR_STATE: + retvalue = s->timer_intr_state; + break; + case R_INTR_TEST: + retvalue = s->timer_intr_test; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + return 0; + } + + return retvalue; +} + +static void ibex_timer_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + IbexTimerState *s = opaque; + uint32_t val = val64; + + switch (addr >> 2) { + case R_CTRL: + s->timer_ctrl = val; + break; + case R_CFG0: + qemu_log_mask(LOG_UNIMP, "Changing prescale or step not supported"); + s->timer_cfg0 = val; + break; + case R_LOWER0: + qemu_log_mask(LOG_UNIMP, "Changing timer value is not supported"); + break; + case R_UPPER0: + qemu_log_mask(LOG_UNIMP, "Changing timer value is not supported"); + break; + case R_COMPARE_LOWER0: + s->timer_compare_lower0 = val; + ibex_timer_update_irqs(s); + break; + case R_COMPARE_UPPER0: + s->timer_compare_upper0 = val; + ibex_timer_update_irqs(s); + break; + case R_INTR_ENABLE: + s->timer_intr_enable = val; + break; + case R_INTR_STATE: + /* Write 1 to clear */ + s->timer_intr_state &= ~val; + break; + case R_INTR_TEST: + s->timer_intr_test = val; + if (s->timer_intr_enable & + s->timer_intr_test & + R_INTR_ENABLE_IE_0_MASK) { + s->timer_intr_state |= R_INTR_STATE_IS_0_MASK; + qemu_set_irq(s->irq, true); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + } +} + +static const MemoryRegionOps ibex_timer_ops = { + .read = ibex_timer_read, + .write = ibex_timer_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static int ibex_timer_post_load(void *opaque, int version_id) +{ + IbexTimerState *s = opaque; + + ibex_timer_update_irqs(s); + return 0; +} + +static const VMStateDescription vmstate_ibex_timer = { + .name = TYPE_IBEX_TIMER, + .version_id = 1, + .minimum_version_id = 1, + .post_load = ibex_timer_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT32(timer_ctrl, IbexTimerState), + VMSTATE_UINT32(timer_cfg0, IbexTimerState), + VMSTATE_UINT32(timer_compare_lower0, IbexTimerState), + VMSTATE_UINT32(timer_compare_upper0, IbexTimerState), + VMSTATE_UINT32(timer_intr_enable, IbexTimerState), + VMSTATE_UINT32(timer_intr_state, IbexTimerState), + VMSTATE_UINT32(timer_intr_test, IbexTimerState), + VMSTATE_END_OF_LIST() + } +}; + +static Property ibex_timer_properties[] = { + DEFINE_PROP_UINT32("timebase-freq", IbexTimerState, timebase_freq, 10000), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ibex_timer_init(Object *obj) +{ + IbexTimerState *s = IBEX_TIMER(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->mmio, obj, &ibex_timer_ops, s, + TYPE_IBEX_TIMER, 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void ibex_timer_realize(DeviceState *dev, Error **errp) +{ + IbexTimerState *s = IBEX_TIMER(dev); + + qdev_init_gpio_out(dev, &s->m_timer_irq, 1); +} + + +static void ibex_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = ibex_timer_reset; + dc->vmsd = &vmstate_ibex_timer; + dc->realize = ibex_timer_realize; + device_class_set_props(dc, ibex_timer_properties); +} + +static const TypeInfo ibex_timer_info = { + .name = TYPE_IBEX_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IbexTimerState), + .instance_init = ibex_timer_init, + .class_init = ibex_timer_class_init, +}; + +static void ibex_timer_register_types(void) +{ + type_register_static(&ibex_timer_info); +} + +type_init(ibex_timer_register_types) diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c new file mode 100644 index 000000000..ebd58254d --- /dev/null +++ b/hw/timer/imx_epit.c @@ -0,0 +1,377 @@ +/* + * IMX EPIT Timer + * + * Copyright (c) 2008 OK Labs + * Copyright (c) 2011 NICTA Pty Ltd + * Originally written by Hans Jiang + * Updated by Peter Chubb + * Updated by Jean-Christophe Dubois + * + * This code is licensed under GPL version 2 or later. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/timer/imx_epit.h" +#include "migration/vmstate.h" +#include "hw/irq.h" +#include "hw/misc/imx_ccm.h" +#include "qemu/module.h" +#include "qemu/log.h" + +#ifndef DEBUG_IMX_EPIT +#define DEBUG_IMX_EPIT 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX_EPIT) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_EPIT, \ + __func__, ##args); \ + } \ + } while (0) + +static const char *imx_epit_reg_name(uint32_t reg) +{ + switch (reg) { + case 0: + return "CR"; + case 1: + return "SR"; + case 2: + return "LR"; + case 3: + return "CMP"; + case 4: + return "CNT"; + default: + return "[?]"; + } +} + +/* + * Exact clock frequencies vary from board to board. + * These are typical. + */ +static const IMXClk imx_epit_clocks[] = { + CLK_NONE, /* 00 disabled */ + CLK_IPG, /* 01 ipg_clk, ~532MHz */ + CLK_IPG_HIGH, /* 10 ipg_clk_highfreq */ + CLK_32k, /* 11 ipg_clk_32k -- ~32kHz */ +}; + +/* + * Update interrupt status + */ +static void imx_epit_update_int(IMXEPITState *s) +{ + if (s->sr && (s->cr & CR_OCIEN) && (s->cr & CR_EN)) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +/* + * Must be called from within a ptimer_transaction_begin/commit block + * for both s->timer_cmp and s->timer_reload. + */ +static void imx_epit_set_freq(IMXEPITState *s) +{ + uint32_t clksrc; + uint32_t prescaler; + + clksrc = extract32(s->cr, CR_CLKSRC_SHIFT, 2); + prescaler = 1 + extract32(s->cr, CR_PRESCALE_SHIFT, 12); + + s->freq = imx_ccm_get_clock_frequency(s->ccm, + imx_epit_clocks[clksrc]) / prescaler; + + DPRINTF("Setting ptimer frequency to %u\n", s->freq); + + if (s->freq) { + ptimer_set_freq(s->timer_reload, s->freq); + ptimer_set_freq(s->timer_cmp, s->freq); + } +} + +static void imx_epit_reset(DeviceState *dev) +{ + IMXEPITState *s = IMX_EPIT(dev); + + /* + * Soft reset doesn't touch some bits; hard reset clears them + */ + s->cr &= (CR_EN|CR_ENMOD|CR_STOPEN|CR_DOZEN|CR_WAITEN|CR_DBGEN); + s->sr = 0; + s->lr = EPIT_TIMER_MAX; + s->cmp = 0; + s->cnt = 0; + ptimer_transaction_begin(s->timer_cmp); + ptimer_transaction_begin(s->timer_reload); + /* stop both timers */ + ptimer_stop(s->timer_cmp); + ptimer_stop(s->timer_reload); + /* compute new frequency */ + imx_epit_set_freq(s); + /* init both timers to EPIT_TIMER_MAX */ + ptimer_set_limit(s->timer_cmp, EPIT_TIMER_MAX, 1); + ptimer_set_limit(s->timer_reload, EPIT_TIMER_MAX, 1); + if (s->freq && (s->cr & CR_EN)) { + /* if the timer is still enabled, restart it */ + ptimer_run(s->timer_reload, 0); + } + ptimer_transaction_commit(s->timer_cmp); + ptimer_transaction_commit(s->timer_reload); +} + +static uint32_t imx_epit_update_count(IMXEPITState *s) +{ + s->cnt = ptimer_get_count(s->timer_reload); + + return s->cnt; +} + +static uint64_t imx_epit_read(void *opaque, hwaddr offset, unsigned size) +{ + IMXEPITState *s = IMX_EPIT(opaque); + uint32_t reg_value = 0; + + switch (offset >> 2) { + case 0: /* Control Register */ + reg_value = s->cr; + break; + + case 1: /* Status Register */ + reg_value = s->sr; + break; + + case 2: /* LR - ticks*/ + reg_value = s->lr; + break; + + case 3: /* CMP */ + reg_value = s->cmp; + break; + + case 4: /* CNT */ + imx_epit_update_count(s); + reg_value = s->cnt; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_EPIT, __func__, offset); + break; + } + + DPRINTF("(%s) = 0x%08x\n", imx_epit_reg_name(offset >> 2), reg_value); + + return reg_value; +} + +/* Must be called from ptimer_transaction_begin/commit block for s->timer_cmp */ +static void imx_epit_reload_compare_timer(IMXEPITState *s) +{ + if ((s->cr & (CR_EN | CR_OCIEN)) == (CR_EN | CR_OCIEN)) { + /* if the compare feature is on and timers are running */ + uint32_t tmp = imx_epit_update_count(s); + uint64_t next; + if (tmp > s->cmp) { + /* It'll fire in this round of the timer */ + next = tmp - s->cmp; + } else { /* catch it next time around */ + next = tmp - s->cmp + ((s->cr & CR_RLD) ? EPIT_TIMER_MAX : s->lr); + } + ptimer_set_count(s->timer_cmp, next); + } +} + +static void imx_epit_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + IMXEPITState *s = IMX_EPIT(opaque); + uint64_t oldcr; + + DPRINTF("(%s, value = 0x%08x)\n", imx_epit_reg_name(offset >> 2), + (uint32_t)value); + + switch (offset >> 2) { + case 0: /* CR */ + + oldcr = s->cr; + s->cr = value & 0x03ffffff; + if (s->cr & CR_SWR) { + /* handle the reset */ + imx_epit_reset(DEVICE(s)); + /* + * TODO: could we 'break' here? following operations appear + * to duplicate the work imx_epit_reset() already did. + */ + } + + ptimer_transaction_begin(s->timer_cmp); + ptimer_transaction_begin(s->timer_reload); + + if (!(s->cr & CR_SWR)) { + imx_epit_set_freq(s); + } + + if (s->freq && (s->cr & CR_EN) && !(oldcr & CR_EN)) { + if (s->cr & CR_ENMOD) { + if (s->cr & CR_RLD) { + ptimer_set_limit(s->timer_reload, s->lr, 1); + ptimer_set_limit(s->timer_cmp, s->lr, 1); + } else { + ptimer_set_limit(s->timer_reload, EPIT_TIMER_MAX, 1); + ptimer_set_limit(s->timer_cmp, EPIT_TIMER_MAX, 1); + } + } + + imx_epit_reload_compare_timer(s); + ptimer_run(s->timer_reload, 0); + if (s->cr & CR_OCIEN) { + ptimer_run(s->timer_cmp, 0); + } else { + ptimer_stop(s->timer_cmp); + } + } else if (!(s->cr & CR_EN)) { + /* stop both timers */ + ptimer_stop(s->timer_reload); + ptimer_stop(s->timer_cmp); + } else if (s->cr & CR_OCIEN) { + if (!(oldcr & CR_OCIEN)) { + imx_epit_reload_compare_timer(s); + ptimer_run(s->timer_cmp, 0); + } + } else { + ptimer_stop(s->timer_cmp); + } + + ptimer_transaction_commit(s->timer_cmp); + ptimer_transaction_commit(s->timer_reload); + break; + + case 1: /* SR - ACK*/ + /* writing 1 to OCIF clear the OCIF bit */ + if (value & 0x01) { + s->sr = 0; + imx_epit_update_int(s); + } + break; + + case 2: /* LR - set ticks */ + s->lr = value; + + ptimer_transaction_begin(s->timer_cmp); + ptimer_transaction_begin(s->timer_reload); + if (s->cr & CR_RLD) { + /* Also set the limit if the LRD bit is set */ + /* If IOVW bit is set then set the timer value */ + ptimer_set_limit(s->timer_reload, s->lr, s->cr & CR_IOVW); + ptimer_set_limit(s->timer_cmp, s->lr, 0); + } else if (s->cr & CR_IOVW) { + /* If IOVW bit is set then set the timer value */ + ptimer_set_count(s->timer_reload, s->lr); + } + + imx_epit_reload_compare_timer(s); + ptimer_transaction_commit(s->timer_cmp); + ptimer_transaction_commit(s->timer_reload); + break; + + case 3: /* CMP */ + s->cmp = value; + + ptimer_transaction_begin(s->timer_cmp); + imx_epit_reload_compare_timer(s); + ptimer_transaction_commit(s->timer_cmp); + + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_EPIT, __func__, offset); + + break; + } +} +static void imx_epit_cmp(void *opaque) +{ + IMXEPITState *s = IMX_EPIT(opaque); + + DPRINTF("sr was %d\n", s->sr); + + s->sr = 1; + imx_epit_update_int(s); +} + +static void imx_epit_reload(void *opaque) +{ + /* No action required on rollover of timer_reload */ +} + +static const MemoryRegionOps imx_epit_ops = { + .read = imx_epit_read, + .write = imx_epit_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_imx_timer_epit = { + .name = TYPE_IMX_EPIT, + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cr, IMXEPITState), + VMSTATE_UINT32(sr, IMXEPITState), + VMSTATE_UINT32(lr, IMXEPITState), + VMSTATE_UINT32(cmp, IMXEPITState), + VMSTATE_UINT32(cnt, IMXEPITState), + VMSTATE_UINT32(freq, IMXEPITState), + VMSTATE_PTIMER(timer_reload, IMXEPITState), + VMSTATE_PTIMER(timer_cmp, IMXEPITState), + VMSTATE_END_OF_LIST() + } +}; + +static void imx_epit_realize(DeviceState *dev, Error **errp) +{ + IMXEPITState *s = IMX_EPIT(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + DPRINTF("\n"); + + sysbus_init_irq(sbd, &s->irq); + memory_region_init_io(&s->iomem, OBJECT(s), &imx_epit_ops, s, TYPE_IMX_EPIT, + 0x00001000); + sysbus_init_mmio(sbd, &s->iomem); + + s->timer_reload = ptimer_init(imx_epit_reload, s, PTIMER_POLICY_DEFAULT); + + s->timer_cmp = ptimer_init(imx_epit_cmp, s, PTIMER_POLICY_DEFAULT); +} + +static void imx_epit_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = imx_epit_realize; + dc->reset = imx_epit_reset; + dc->vmsd = &vmstate_imx_timer_epit; + dc->desc = "i.MX periodic timer"; +} + +static const TypeInfo imx_epit_info = { + .name = TYPE_IMX_EPIT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXEPITState), + .class_init = imx_epit_class_init, +}; + +static void imx_epit_register_types(void) +{ + type_register_static(&imx_epit_info); +} + +type_init(imx_epit_register_types) diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c new file mode 100644 index 000000000..5c0d9a269 --- /dev/null +++ b/hw/timer/imx_gpt.c @@ -0,0 +1,583 @@ +/* + * IMX GPT Timer + * + * Copyright (c) 2008 OK Labs + * Copyright (c) 2011 NICTA Pty Ltd + * Originally written by Hans Jiang + * Updated by Peter Chubb + * Updated by Jean-Christophe Dubois + * + * This code is licensed under GPL version 2 or later. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/timer/imx_gpt.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qemu/log.h" + +#ifndef DEBUG_IMX_GPT +#define DEBUG_IMX_GPT 0 +#endif + +#define DPRINTF(fmt, args...) \ + do { \ + if (DEBUG_IMX_GPT) { \ + fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_GPT, \ + __func__, ##args); \ + } \ + } while (0) + +static const char *imx_gpt_reg_name(uint32_t reg) +{ + switch (reg) { + case 0: + return "CR"; + case 1: + return "PR"; + case 2: + return "SR"; + case 3: + return "IR"; + case 4: + return "OCR1"; + case 5: + return "OCR2"; + case 6: + return "OCR3"; + case 7: + return "ICR1"; + case 8: + return "ICR2"; + case 9: + return "CNT"; + default: + return "[?]"; + } +} + +static const VMStateDescription vmstate_imx_timer_gpt = { + .name = TYPE_IMX_GPT, + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cr, IMXGPTState), + VMSTATE_UINT32(pr, IMXGPTState), + VMSTATE_UINT32(sr, IMXGPTState), + VMSTATE_UINT32(ir, IMXGPTState), + VMSTATE_UINT32(ocr1, IMXGPTState), + VMSTATE_UINT32(ocr2, IMXGPTState), + VMSTATE_UINT32(ocr3, IMXGPTState), + VMSTATE_UINT32(icr1, IMXGPTState), + VMSTATE_UINT32(icr2, IMXGPTState), + VMSTATE_UINT32(cnt, IMXGPTState), + VMSTATE_UINT32(next_timeout, IMXGPTState), + VMSTATE_UINT32(next_int, IMXGPTState), + VMSTATE_UINT32(freq, IMXGPTState), + VMSTATE_PTIMER(timer, IMXGPTState), + VMSTATE_END_OF_LIST() + } +}; + +static const IMXClk imx25_gpt_clocks[] = { + CLK_NONE, /* 000 No clock source */ + CLK_IPG, /* 001 ipg_clk, 532MHz*/ + CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */ + CLK_NONE, /* 011 not defined */ + CLK_32k, /* 100 ipg_clk_32k */ + CLK_32k, /* 101 ipg_clk_32k */ + CLK_32k, /* 110 ipg_clk_32k */ + CLK_32k, /* 111 ipg_clk_32k */ +}; + +static const IMXClk imx31_gpt_clocks[] = { + CLK_NONE, /* 000 No clock source */ + CLK_IPG, /* 001 ipg_clk, 532MHz*/ + CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */ + CLK_NONE, /* 011 not defined */ + CLK_32k, /* 100 ipg_clk_32k */ + CLK_NONE, /* 101 not defined */ + CLK_NONE, /* 110 not defined */ + CLK_NONE, /* 111 not defined */ +}; + +static const IMXClk imx6_gpt_clocks[] = { + CLK_NONE, /* 000 No clock source */ + CLK_IPG, /* 001 ipg_clk, 532MHz*/ + CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */ + CLK_EXT, /* 011 External clock */ + CLK_32k, /* 100 ipg_clk_32k */ + CLK_HIGH_DIV, /* 101 reference clock / 8 */ + CLK_NONE, /* 110 not defined */ + CLK_HIGH, /* 111 reference clock */ +}; + +static const IMXClk imx7_gpt_clocks[] = { + CLK_NONE, /* 000 No clock source */ + CLK_IPG, /* 001 ipg_clk, 532MHz*/ + CLK_IPG_HIGH, /* 010 ipg_clk_highfreq */ + CLK_EXT, /* 011 External clock */ + CLK_32k, /* 100 ipg_clk_32k */ + CLK_HIGH, /* 101 reference clock */ + CLK_NONE, /* 110 not defined */ + CLK_NONE, /* 111 not defined */ +}; + +/* Must be called from within ptimer_transaction_begin/commit block */ +static void imx_gpt_set_freq(IMXGPTState *s) +{ + uint32_t clksrc = extract32(s->cr, GPT_CR_CLKSRC_SHIFT, 3); + + s->freq = imx_ccm_get_clock_frequency(s->ccm, + s->clocks[clksrc]) / (1 + s->pr); + + DPRINTF("Setting clksrc %d to frequency %d\n", clksrc, s->freq); + + if (s->freq) { + ptimer_set_freq(s->timer, s->freq); + } +} + +static void imx_gpt_update_int(IMXGPTState *s) +{ + if ((s->sr & s->ir) && (s->cr & GPT_CR_EN)) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static uint32_t imx_gpt_update_count(IMXGPTState *s) +{ + s->cnt = s->next_timeout - (uint32_t)ptimer_get_count(s->timer); + + return s->cnt; +} + +static inline uint32_t imx_gpt_find_limit(uint32_t count, uint32_t reg, + uint32_t timeout) +{ + if ((count < reg) && (timeout > reg)) { + timeout = reg; + } + + return timeout; +} + +/* Must be called from within ptimer_transaction_begin/commit block */ +static void imx_gpt_compute_next_timeout(IMXGPTState *s, bool event) +{ + uint32_t timeout = GPT_TIMER_MAX; + uint32_t count; + long long limit; + + if (!(s->cr & GPT_CR_EN)) { + /* if not enabled just return */ + return; + } + + /* update the count */ + count = imx_gpt_update_count(s); + + if (event) { + /* + * This is an event (the ptimer reached 0 and stopped), and the + * timer counter is now equal to s->next_timeout. + */ + if (!(s->cr & GPT_CR_FRR) && (count == s->ocr1)) { + /* We are in restart mode and we crossed the compare channel 1 + * value. We need to reset the counter to 0. + */ + count = s->cnt = s->next_timeout = 0; + } else if (count == GPT_TIMER_MAX) { + /* We reached GPT_TIMER_MAX so we need to rollover */ + count = s->cnt = s->next_timeout = 0; + } + } + + /* now, find the next timeout related to count */ + + if (s->ir & GPT_IR_OF1IE) { + timeout = imx_gpt_find_limit(count, s->ocr1, timeout); + } + if (s->ir & GPT_IR_OF2IE) { + timeout = imx_gpt_find_limit(count, s->ocr2, timeout); + } + if (s->ir & GPT_IR_OF3IE) { + timeout = imx_gpt_find_limit(count, s->ocr3, timeout); + } + + /* find the next set of interrupts to raise for next timer event */ + + s->next_int = 0; + if ((s->ir & GPT_IR_OF1IE) && (timeout == s->ocr1)) { + s->next_int |= GPT_SR_OF1; + } + if ((s->ir & GPT_IR_OF2IE) && (timeout == s->ocr2)) { + s->next_int |= GPT_SR_OF2; + } + if ((s->ir & GPT_IR_OF3IE) && (timeout == s->ocr3)) { + s->next_int |= GPT_SR_OF3; + } + if ((s->ir & GPT_IR_ROVIE) && (timeout == GPT_TIMER_MAX)) { + s->next_int |= GPT_SR_ROV; + } + + /* the new range to count down from */ + limit = timeout - imx_gpt_update_count(s); + + if (limit < 0) { + /* + * if we reach here, then QEMU is running too slow and we pass the + * timeout limit while computing it. Let's deliver the interrupt + * and compute a new limit. + */ + s->sr |= s->next_int; + + imx_gpt_compute_next_timeout(s, event); + + imx_gpt_update_int(s); + } else { + /* New timeout value */ + s->next_timeout = timeout; + + /* reset the limit to the computed range */ + ptimer_set_limit(s->timer, limit, 1); + } +} + +static uint64_t imx_gpt_read(void *opaque, hwaddr offset, unsigned size) +{ + IMXGPTState *s = IMX_GPT(opaque); + uint32_t reg_value = 0; + + switch (offset >> 2) { + case 0: /* Control Register */ + reg_value = s->cr; + break; + + case 1: /* prescaler */ + reg_value = s->pr; + break; + + case 2: /* Status Register */ + reg_value = s->sr; + break; + + case 3: /* Interrupt Register */ + reg_value = s->ir; + break; + + case 4: /* Output Compare Register 1 */ + reg_value = s->ocr1; + break; + + case 5: /* Output Compare Register 2 */ + reg_value = s->ocr2; + break; + + case 6: /* Output Compare Register 3 */ + reg_value = s->ocr3; + break; + + case 7: /* input Capture Register 1 */ + qemu_log_mask(LOG_UNIMP, "[%s]%s: icr1 feature is not implemented\n", + TYPE_IMX_GPT, __func__); + reg_value = s->icr1; + break; + + case 8: /* input Capture Register 2 */ + qemu_log_mask(LOG_UNIMP, "[%s]%s: icr2 feature is not implemented\n", + TYPE_IMX_GPT, __func__); + reg_value = s->icr2; + break; + + case 9: /* cnt */ + imx_gpt_update_count(s); + reg_value = s->cnt; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_GPT, __func__, offset); + break; + } + + DPRINTF("(%s) = 0x%08x\n", imx_gpt_reg_name(offset >> 2), reg_value); + + return reg_value; +} + + +static void imx_gpt_reset_common(IMXGPTState *s, bool is_soft_reset) +{ + ptimer_transaction_begin(s->timer); + /* stop timer */ + ptimer_stop(s->timer); + + /* Soft reset and hard reset differ only in their handling of the CR + * register -- soft reset preserves the values of some bits there. + */ + if (is_soft_reset) { + /* Clear all CR bits except those that are preserved by soft reset. */ + s->cr &= GPT_CR_EN | GPT_CR_ENMOD | GPT_CR_STOPEN | GPT_CR_DOZEN | + GPT_CR_WAITEN | GPT_CR_DBGEN | + (GPT_CR_CLKSRC_MASK << GPT_CR_CLKSRC_SHIFT); + } else { + s->cr = 0; + } + s->sr = 0; + s->pr = 0; + s->ir = 0; + s->cnt = 0; + s->ocr1 = GPT_TIMER_MAX; + s->ocr2 = GPT_TIMER_MAX; + s->ocr3 = GPT_TIMER_MAX; + s->icr1 = 0; + s->icr2 = 0; + + s->next_timeout = GPT_TIMER_MAX; + s->next_int = 0; + + /* compute new freq */ + imx_gpt_set_freq(s); + + /* reset the limit to GPT_TIMER_MAX */ + ptimer_set_limit(s->timer, GPT_TIMER_MAX, 1); + + /* if the timer is still enabled, restart it */ + if (s->freq && (s->cr & GPT_CR_EN)) { + ptimer_run(s->timer, 1); + } + ptimer_transaction_commit(s->timer); +} + +static void imx_gpt_soft_reset(DeviceState *dev) +{ + IMXGPTState *s = IMX_GPT(dev); + imx_gpt_reset_common(s, true); +} + +static void imx_gpt_reset(DeviceState *dev) +{ + IMXGPTState *s = IMX_GPT(dev); + imx_gpt_reset_common(s, false); +} + +static void imx_gpt_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + IMXGPTState *s = IMX_GPT(opaque); + uint32_t oldreg; + + DPRINTF("(%s, value = 0x%08x)\n", imx_gpt_reg_name(offset >> 2), + (uint32_t)value); + + switch (offset >> 2) { + case 0: + oldreg = s->cr; + s->cr = value & ~0x7c14; + if (s->cr & GPT_CR_SWR) { /* force reset */ + /* handle the reset */ + imx_gpt_soft_reset(DEVICE(s)); + } else { + /* set our freq, as the source might have changed */ + ptimer_transaction_begin(s->timer); + imx_gpt_set_freq(s); + + if ((oldreg ^ s->cr) & GPT_CR_EN) { + if (s->cr & GPT_CR_EN) { + if (s->cr & GPT_CR_ENMOD) { + s->next_timeout = GPT_TIMER_MAX; + ptimer_set_count(s->timer, GPT_TIMER_MAX); + imx_gpt_compute_next_timeout(s, false); + } + ptimer_run(s->timer, 1); + } else { + /* stop timer */ + ptimer_stop(s->timer); + } + } + ptimer_transaction_commit(s->timer); + } + break; + + case 1: /* Prescaler */ + s->pr = value & 0xfff; + ptimer_transaction_begin(s->timer); + imx_gpt_set_freq(s); + ptimer_transaction_commit(s->timer); + break; + + case 2: /* SR */ + s->sr &= ~(value & 0x3f); + imx_gpt_update_int(s); + break; + + case 3: /* IR -- interrupt register */ + s->ir = value & 0x3f; + imx_gpt_update_int(s); + + ptimer_transaction_begin(s->timer); + imx_gpt_compute_next_timeout(s, false); + ptimer_transaction_commit(s->timer); + + break; + + case 4: /* OCR1 -- output compare register */ + s->ocr1 = value; + + ptimer_transaction_begin(s->timer); + /* In non-freerun mode, reset count when this register is written */ + if (!(s->cr & GPT_CR_FRR)) { + s->next_timeout = GPT_TIMER_MAX; + ptimer_set_limit(s->timer, GPT_TIMER_MAX, 1); + } + + /* compute the new timeout */ + imx_gpt_compute_next_timeout(s, false); + ptimer_transaction_commit(s->timer); + + break; + + case 5: /* OCR2 -- output compare register */ + s->ocr2 = value; + + /* compute the new timeout */ + ptimer_transaction_begin(s->timer); + imx_gpt_compute_next_timeout(s, false); + ptimer_transaction_commit(s->timer); + + break; + + case 6: /* OCR3 -- output compare register */ + s->ocr3 = value; + + /* compute the new timeout */ + ptimer_transaction_begin(s->timer); + imx_gpt_compute_next_timeout(s, false); + ptimer_transaction_commit(s->timer); + + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" + HWADDR_PRIx "\n", TYPE_IMX_GPT, __func__, offset); + break; + } +} + +static void imx_gpt_timeout(void *opaque) +{ + IMXGPTState *s = IMX_GPT(opaque); + + DPRINTF("\n"); + + s->sr |= s->next_int; + s->next_int = 0; + + imx_gpt_compute_next_timeout(s, true); + + imx_gpt_update_int(s); + + if (s->freq && (s->cr & GPT_CR_EN)) { + ptimer_run(s->timer, 1); + } +} + +static const MemoryRegionOps imx_gpt_ops = { + .read = imx_gpt_read, + .write = imx_gpt_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + + +static void imx_gpt_realize(DeviceState *dev, Error **errp) +{ + IMXGPTState *s = IMX_GPT(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + sysbus_init_irq(sbd, &s->irq); + memory_region_init_io(&s->iomem, OBJECT(s), &imx_gpt_ops, s, TYPE_IMX_GPT, + 0x00001000); + sysbus_init_mmio(sbd, &s->iomem); + + s->timer = ptimer_init(imx_gpt_timeout, s, PTIMER_POLICY_DEFAULT); +} + +static void imx_gpt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = imx_gpt_realize; + dc->reset = imx_gpt_reset; + dc->vmsd = &vmstate_imx_timer_gpt; + dc->desc = "i.MX general timer"; +} + +static void imx25_gpt_init(Object *obj) +{ + IMXGPTState *s = IMX_GPT(obj); + + s->clocks = imx25_gpt_clocks; +} + +static void imx31_gpt_init(Object *obj) +{ + IMXGPTState *s = IMX_GPT(obj); + + s->clocks = imx31_gpt_clocks; +} + +static void imx6_gpt_init(Object *obj) +{ + IMXGPTState *s = IMX_GPT(obj); + + s->clocks = imx6_gpt_clocks; +} + +static void imx7_gpt_init(Object *obj) +{ + IMXGPTState *s = IMX_GPT(obj); + + s->clocks = imx7_gpt_clocks; +} + +static const TypeInfo imx25_gpt_info = { + .name = TYPE_IMX25_GPT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXGPTState), + .instance_init = imx25_gpt_init, + .class_init = imx_gpt_class_init, +}; + +static const TypeInfo imx31_gpt_info = { + .name = TYPE_IMX31_GPT, + .parent = TYPE_IMX25_GPT, + .instance_init = imx31_gpt_init, +}; + +static const TypeInfo imx6_gpt_info = { + .name = TYPE_IMX6_GPT, + .parent = TYPE_IMX25_GPT, + .instance_init = imx6_gpt_init, +}; + +static const TypeInfo imx7_gpt_info = { + .name = TYPE_IMX7_GPT, + .parent = TYPE_IMX25_GPT, + .instance_init = imx7_gpt_init, +}; + +static void imx_gpt_register_types(void) +{ + type_register_static(&imx25_gpt_info); + type_register_static(&imx31_gpt_info); + type_register_static(&imx6_gpt_info); + type_register_static(&imx7_gpt_info); +} + +type_init(imx_gpt_register_types) diff --git a/hw/timer/meson.build b/hw/timer/meson.build new file mode 100644 index 000000000..03092e2ce --- /dev/null +++ b/hw/timer/meson.build @@ -0,0 +1,40 @@ +softmmu_ss.add(when: 'CONFIG_A9_GTIMER', if_true: files('a9gtimer.c')) +softmmu_ss.add(when: 'CONFIG_ALLWINNER_A10_PIT', if_true: files('allwinner-a10-pit.c')) +softmmu_ss.add(when: 'CONFIG_ALTERA_TIMER', if_true: files('altera_timer.c')) +softmmu_ss.add(when: 'CONFIG_ARM_MPTIMER', if_true: files('arm_mptimer.c')) +softmmu_ss.add(when: 'CONFIG_ARM_TIMER', if_true: files('arm_timer.c')) +softmmu_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_systick.c')) +softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_timer.c')) +softmmu_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_ttc.c')) +softmmu_ss.add(when: 'CONFIG_CMSDK_APB_DUALTIMER', if_true: files('cmsdk-apb-dualtimer.c')) +softmmu_ss.add(when: 'CONFIG_CMSDK_APB_TIMER', if_true: files('cmsdk-apb-timer.c')) +softmmu_ss.add(when: 'CONFIG_RENESAS_TMR', if_true: files('renesas_tmr.c')) +softmmu_ss.add(when: 'CONFIG_RENESAS_CMT', if_true: files('renesas_cmt.c')) +softmmu_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-timer.c')) +softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_timer.c')) +softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_mct.c')) +softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pwm.c')) +softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_gptimer.c')) +softmmu_ss.add(when: 'CONFIG_HPET', if_true: files('hpet.c')) +softmmu_ss.add(when: 'CONFIG_I8254', if_true: files('i8254_common.c', 'i8254.c')) +softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_epit.c')) +softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpt.c')) +softmmu_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_gictimer.c')) +softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('mss-timer.c')) +softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_timer.c')) +softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_timer.c')) +softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_gptimer.c')) +softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_synctimer.c')) +softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_timer.c')) +softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_systmr.c')) +softmmu_ss.add(when: 'CONFIG_SH_TIMER', if_true: files('sh_timer.c')) +softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_timer.c')) +softmmu_ss.add(when: 'CONFIG_SSE_COUNTER', if_true: files('sse-counter.c')) +softmmu_ss.add(when: 'CONFIG_SSE_TIMER', if_true: files('sse-timer.c')) +softmmu_ss.add(when: 'CONFIG_STELLARIS_GPTM', if_true: files('stellaris-gptm.c')) +softmmu_ss.add(when: 'CONFIG_STM32F2XX_TIMER', if_true: files('stm32f2xx_timer.c')) +softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_timer.c')) +specific_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_timer.c')) +softmmu_ss.add(when: 'CONFIG_SIFIVE_PWM', if_true: files('sifive_pwm.c')) + +specific_ss.add(when: 'CONFIG_AVR_TIMER16', if_true: files('avr_timer16.c')) diff --git a/hw/timer/mips_gictimer.c b/hw/timer/mips_gictimer.c new file mode 100644 index 000000000..2b0696d4a --- /dev/null +++ b/hw/timer/mips_gictimer.c @@ -0,0 +1,145 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2016 Imagination Technologies + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "hw/timer/mips_gictimer.h" + +#define TIMER_PERIOD 10 /* 10 ns period for 100 Mhz frequency */ + +uint32_t mips_gictimer_get_freq(MIPSGICTimerState *gic) +{ + return NANOSECONDS_PER_SECOND / TIMER_PERIOD; +} + +static void gic_vptimer_update(MIPSGICTimerState *gictimer, + uint32_t vp_index, uint64_t now) +{ + uint64_t next; + uint32_t wait; + + wait = gictimer->vptimers[vp_index].comparelo - gictimer->sh_counterlo - + (uint32_t)(now / TIMER_PERIOD); + next = now + (uint64_t)wait * TIMER_PERIOD; + + timer_mod(gictimer->vptimers[vp_index].qtimer, next); +} + +static void gic_vptimer_expire(MIPSGICTimerState *gictimer, uint32_t vp_index, + uint64_t now) +{ + if (gictimer->countstop) { + /* timer stopped */ + return; + } + gictimer->cb(gictimer->opaque, vp_index); + gic_vptimer_update(gictimer, vp_index, now); +} + +static void gic_vptimer_cb(void *opaque) +{ + MIPSGICTimerVPState *vptimer = opaque; + MIPSGICTimerState *gictimer = vptimer->gictimer; + gic_vptimer_expire(gictimer, vptimer->vp_index, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); +} + +uint32_t mips_gictimer_get_sh_count(MIPSGICTimerState *gictimer) +{ + int i; + if (gictimer->countstop) { + return gictimer->sh_counterlo; + } else { + uint64_t now; + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + for (i = 0; i < gictimer->num_vps; i++) { + if (timer_pending(gictimer->vptimers[i].qtimer) + && timer_expired(gictimer->vptimers[i].qtimer, now)) { + /* The timer has already expired. */ + gic_vptimer_expire(gictimer, i, now); + } + } + return gictimer->sh_counterlo + (uint32_t)(now / TIMER_PERIOD); + } +} + +void mips_gictimer_store_sh_count(MIPSGICTimerState *gictimer, uint64_t count) +{ + int i; + uint64_t now; + + if (gictimer->countstop || !gictimer->vptimers[0].qtimer) { + gictimer->sh_counterlo = count; + } else { + /* Store new count register */ + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + gictimer->sh_counterlo = count - (uint32_t)(now / TIMER_PERIOD); + /* Update timer timer */ + for (i = 0; i < gictimer->num_vps; i++) { + gic_vptimer_update(gictimer, i, now); + } + } +} + +uint32_t mips_gictimer_get_vp_compare(MIPSGICTimerState *gictimer, + uint32_t vp_index) +{ + return gictimer->vptimers[vp_index].comparelo; +} + +void mips_gictimer_store_vp_compare(MIPSGICTimerState *gictimer, + uint32_t vp_index, uint64_t compare) +{ + gictimer->vptimers[vp_index].comparelo = (uint32_t) compare; + gic_vptimer_update(gictimer, vp_index, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); +} + +uint8_t mips_gictimer_get_countstop(MIPSGICTimerState *gictimer) +{ + return gictimer->countstop; +} + +void mips_gictimer_start_count(MIPSGICTimerState *gictimer) +{ + gictimer->countstop = 0; + mips_gictimer_store_sh_count(gictimer, gictimer->sh_counterlo); +} + +void mips_gictimer_stop_count(MIPSGICTimerState *gictimer) +{ + int i; + + gictimer->countstop = 1; + /* Store the current value */ + gictimer->sh_counterlo += + (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / TIMER_PERIOD); + for (i = 0; i < gictimer->num_vps; i++) { + timer_del(gictimer->vptimers[i].qtimer); + } +} + +MIPSGICTimerState *mips_gictimer_init(void *opaque, uint32_t nvps, + MIPSGICTimerCB *cb) +{ + int i; + MIPSGICTimerState *gictimer = g_new(MIPSGICTimerState, 1); + gictimer->vptimers = g_new(MIPSGICTimerVPState, nvps); + gictimer->countstop = 1; + gictimer->num_vps = nvps; + gictimer->opaque = opaque; + gictimer->cb = cb; + for (i = 0; i < nvps; i++) { + gictimer->vptimers[i].gictimer = gictimer; + gictimer->vptimers[i].vp_index = i; + gictimer->vptimers[i].qtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + &gic_vptimer_cb, + &gictimer->vptimers[i]); + } + return gictimer; +} diff --git a/hw/timer/mss-timer.c b/hw/timer/mss-timer.c new file mode 100644 index 000000000..fe0ca905f --- /dev/null +++ b/hw/timer/mss-timer.c @@ -0,0 +1,311 @@ +/* + * Block model of System timer present in + * Microsemi's SmartFusion2 and SmartFusion SoCs. + * + * Copyright (c) 2017 Subbaraya Sundeep . + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/timer/mss-timer.h" +#include "migration/vmstate.h" + +#ifndef MSS_TIMER_ERR_DEBUG +#define MSS_TIMER_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(lvl, fmt, args...) do { \ + if (MSS_TIMER_ERR_DEBUG >= lvl) { \ + qemu_log("%s: " fmt "\n", __func__, ## args); \ + } \ +} while (0) + +#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args) + +#define R_TIM_VAL 0 +#define R_TIM_LOADVAL 1 +#define R_TIM_BGLOADVAL 2 +#define R_TIM_CTRL 3 +#define R_TIM_RIS 4 +#define R_TIM_MIS 5 + +#define TIMER_CTRL_ENBL (1 << 0) +#define TIMER_CTRL_ONESHOT (1 << 1) +#define TIMER_CTRL_INTR (1 << 2) +#define TIMER_RIS_ACK (1 << 0) +#define TIMER_RST_CLR (1 << 6) +#define TIMER_MODE (1 << 0) + +static void timer_update_irq(struct Msf2Timer *st) +{ + bool isr, ier; + + isr = !!(st->regs[R_TIM_RIS] & TIMER_RIS_ACK); + ier = !!(st->regs[R_TIM_CTRL] & TIMER_CTRL_INTR); + qemu_set_irq(st->irq, (ier && isr)); +} + +/* Must be called from within a ptimer_transaction_begin/commit block */ +static void timer_update(struct Msf2Timer *st) +{ + uint64_t count; + + if (!(st->regs[R_TIM_CTRL] & TIMER_CTRL_ENBL)) { + ptimer_stop(st->ptimer); + return; + } + + count = st->regs[R_TIM_LOADVAL]; + ptimer_set_limit(st->ptimer, count, 1); + ptimer_run(st->ptimer, 1); +} + +static uint64_t +timer_read(void *opaque, hwaddr offset, unsigned int size) +{ + MSSTimerState *t = opaque; + hwaddr addr; + struct Msf2Timer *st; + uint32_t ret = 0; + int timer = 0; + int isr; + int ier; + + addr = offset >> 2; + /* + * Two independent timers has same base address. + * Based on address passed figure out which timer is being used. + */ + if ((addr >= R_TIM1_MAX) && (addr < NUM_TIMERS * R_TIM1_MAX)) { + timer = 1; + addr -= R_TIM1_MAX; + } + + st = &t->timers[timer]; + + switch (addr) { + case R_TIM_VAL: + ret = ptimer_get_count(st->ptimer); + break; + + case R_TIM_MIS: + isr = !!(st->regs[R_TIM_RIS] & TIMER_RIS_ACK); + ier = !!(st->regs[R_TIM_CTRL] & TIMER_CTRL_INTR); + ret = ier & isr; + break; + + default: + if (addr < R_TIM1_MAX) { + ret = st->regs[addr]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + TYPE_MSS_TIMER": 64-bit mode not supported\n"); + return ret; + } + break; + } + + DB_PRINT("timer=%d 0x%" HWADDR_PRIx "=0x%" PRIx32, timer, offset, + ret); + return ret; +} + +static void +timer_write(void *opaque, hwaddr offset, + uint64_t val64, unsigned int size) +{ + MSSTimerState *t = opaque; + hwaddr addr; + struct Msf2Timer *st; + int timer = 0; + uint32_t value = val64; + + addr = offset >> 2; + /* + * Two independent timers has same base address. + * Based on addr passed figure out which timer is being used. + */ + if ((addr >= R_TIM1_MAX) && (addr < NUM_TIMERS * R_TIM1_MAX)) { + timer = 1; + addr -= R_TIM1_MAX; + } + + st = &t->timers[timer]; + + DB_PRINT("addr=0x%" HWADDR_PRIx " val=0x%" PRIx32 " (timer=%d)", offset, + value, timer); + + switch (addr) { + case R_TIM_CTRL: + st->regs[R_TIM_CTRL] = value; + ptimer_transaction_begin(st->ptimer); + timer_update(st); + ptimer_transaction_commit(st->ptimer); + break; + + case R_TIM_RIS: + if (value & TIMER_RIS_ACK) { + st->regs[R_TIM_RIS] &= ~TIMER_RIS_ACK; + } + break; + + case R_TIM_LOADVAL: + st->regs[R_TIM_LOADVAL] = value; + if (st->regs[R_TIM_CTRL] & TIMER_CTRL_ENBL) { + ptimer_transaction_begin(st->ptimer); + timer_update(st); + ptimer_transaction_commit(st->ptimer); + } + break; + + case R_TIM_BGLOADVAL: + st->regs[R_TIM_BGLOADVAL] = value; + st->regs[R_TIM_LOADVAL] = value; + break; + + case R_TIM_VAL: + case R_TIM_MIS: + break; + + default: + if (addr < R_TIM1_MAX) { + st->regs[addr] = value; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + TYPE_MSS_TIMER": 64-bit mode not supported\n"); + return; + } + break; + } + timer_update_irq(st); +} + +static const MemoryRegionOps timer_ops = { + .read = timer_read, + .write = timer_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4 + } +}; + +static void timer_hit(void *opaque) +{ + struct Msf2Timer *st = opaque; + + st->regs[R_TIM_RIS] |= TIMER_RIS_ACK; + + if (!(st->regs[R_TIM_CTRL] & TIMER_CTRL_ONESHOT)) { + timer_update(st); + } + timer_update_irq(st); +} + +static void mss_timer_init(Object *obj) +{ + MSSTimerState *t = MSS_TIMER(obj); + int i; + + /* Init all the ptimers. */ + for (i = 0; i < NUM_TIMERS; i++) { + struct Msf2Timer *st = &t->timers[i]; + + st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT); + ptimer_transaction_begin(st->ptimer); + ptimer_set_freq(st->ptimer, t->freq_hz); + ptimer_transaction_commit(st->ptimer); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &st->irq); + } + + memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t, TYPE_MSS_TIMER, + NUM_TIMERS * R_TIM1_MAX * 4); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &t->mmio); +} + +static void mss_timer_finalize(Object *obj) +{ + MSSTimerState *t = MSS_TIMER(obj); + int i; + + for (i = 0; i < NUM_TIMERS; i++) { + struct Msf2Timer *st = &t->timers[i]; + + ptimer_free(st->ptimer); + } +} + +static const VMStateDescription vmstate_timers = { + .name = "mss-timer-block", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PTIMER(ptimer, struct Msf2Timer), + VMSTATE_UINT32_ARRAY(regs, struct Msf2Timer, R_TIM1_MAX), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_mss_timer = { + .name = TYPE_MSS_TIMER, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(freq_hz, MSSTimerState), + VMSTATE_STRUCT_ARRAY(timers, MSSTimerState, NUM_TIMERS, 0, + vmstate_timers, struct Msf2Timer), + VMSTATE_END_OF_LIST() + } +}; + +static Property mss_timer_properties[] = { + /* Libero GUI shows 100Mhz as default for clocks */ + DEFINE_PROP_UINT32("clock-frequency", MSSTimerState, freq_hz, + 100 * 1000000), + DEFINE_PROP_END_OF_LIST(), +}; + +static void mss_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, mss_timer_properties); + dc->vmsd = &vmstate_mss_timer; +} + +static const TypeInfo mss_timer_info = { + .name = TYPE_MSS_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MSSTimerState), + .instance_init = mss_timer_init, + .instance_finalize = mss_timer_finalize, + .class_init = mss_timer_class_init, +}; + +static void mss_timer_register_types(void) +{ + type_register_static(&mss_timer_info); +} + +type_init(mss_timer_register_types) diff --git a/hw/timer/npcm7xx_timer.c b/hw/timer/npcm7xx_timer.c new file mode 100644 index 000000000..32f5e021f --- /dev/null +++ b/hw/timer/npcm7xx_timer.c @@ -0,0 +1,714 @@ +/* + * Nuvoton NPCM7xx Timer Controller + * + * Copyright 2020 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/irq.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/timer/npcm7xx_timer.h" +#include "migration/vmstate.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "qemu/units.h" +#include "trace.h" + +/* 32-bit register indices. */ +enum NPCM7xxTimerRegisters { + NPCM7XX_TIMER_TCSR0, + NPCM7XX_TIMER_TCSR1, + NPCM7XX_TIMER_TICR0, + NPCM7XX_TIMER_TICR1, + NPCM7XX_TIMER_TDR0, + NPCM7XX_TIMER_TDR1, + NPCM7XX_TIMER_TISR, + NPCM7XX_TIMER_WTCR, + NPCM7XX_TIMER_TCSR2, + NPCM7XX_TIMER_TCSR3, + NPCM7XX_TIMER_TICR2, + NPCM7XX_TIMER_TICR3, + NPCM7XX_TIMER_TDR2, + NPCM7XX_TIMER_TDR3, + NPCM7XX_TIMER_TCSR4 = 0x0040 / sizeof(uint32_t), + NPCM7XX_TIMER_TICR4 = 0x0048 / sizeof(uint32_t), + NPCM7XX_TIMER_TDR4 = 0x0050 / sizeof(uint32_t), + NPCM7XX_TIMER_REGS_END, +}; + +/* Register field definitions. */ +#define NPCM7XX_TCSR_CEN BIT(30) +#define NPCM7XX_TCSR_IE BIT(29) +#define NPCM7XX_TCSR_PERIODIC BIT(27) +#define NPCM7XX_TCSR_CRST BIT(26) +#define NPCM7XX_TCSR_CACT BIT(25) +#define NPCM7XX_TCSR_RSVD 0x01ffff00 +#define NPCM7XX_TCSR_PRESCALE_START 0 +#define NPCM7XX_TCSR_PRESCALE_LEN 8 + +#define NPCM7XX_WTCR_WTCLK(rv) extract32(rv, 10, 2) +#define NPCM7XX_WTCR_FREEZE_EN BIT(9) +#define NPCM7XX_WTCR_WTE BIT(7) +#define NPCM7XX_WTCR_WTIE BIT(6) +#define NPCM7XX_WTCR_WTIS(rv) extract32(rv, 4, 2) +#define NPCM7XX_WTCR_WTIF BIT(3) +#define NPCM7XX_WTCR_WTRF BIT(2) +#define NPCM7XX_WTCR_WTRE BIT(1) +#define NPCM7XX_WTCR_WTR BIT(0) + +/* + * The number of clock cycles between interrupt and reset in watchdog, used + * by the software to handle the interrupt before system is reset. + */ +#define NPCM7XX_WATCHDOG_INTERRUPT_TO_RESET_CYCLES 1024 + +/* Start or resume the timer. */ +static void npcm7xx_timer_start(NPCM7xxBaseTimer *t) +{ + int64_t now; + + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + t->expires_ns = now + t->remaining_ns; + timer_mod(&t->qtimer, t->expires_ns); +} + +/* Stop counting. Record the time remaining so we can continue later. */ +static void npcm7xx_timer_pause(NPCM7xxBaseTimer *t) +{ + int64_t now; + + timer_del(&t->qtimer); + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + t->remaining_ns = t->expires_ns - now; +} + +/* Delete the timer and reset it to default state. */ +static void npcm7xx_timer_clear(NPCM7xxBaseTimer *t) +{ + timer_del(&t->qtimer); + t->expires_ns = 0; + t->remaining_ns = 0; +} + +/* + * Returns the index of timer in the tc->timer array. This can be used to + * locate the registers that belong to this timer. + */ +static int npcm7xx_timer_index(NPCM7xxTimerCtrlState *tc, NPCM7xxTimer *timer) +{ + int index = timer - tc->timer; + + g_assert(index >= 0 && index < NPCM7XX_TIMERS_PER_CTRL); + + return index; +} + +/* Return the value by which to divide the reference clock rate. */ +static uint32_t npcm7xx_tcsr_prescaler(uint32_t tcsr) +{ + return extract32(tcsr, NPCM7XX_TCSR_PRESCALE_START, + NPCM7XX_TCSR_PRESCALE_LEN) + 1; +} + +/* Convert a timer cycle count to a time interval in nanoseconds. */ +static int64_t npcm7xx_timer_count_to_ns(NPCM7xxTimer *t, uint32_t count) +{ + int64_t ticks = count; + + ticks *= npcm7xx_tcsr_prescaler(t->tcsr); + + return clock_ticks_to_ns(t->ctrl->clock, ticks); +} + +/* Convert a time interval in nanoseconds to a timer cycle count. */ +static uint32_t npcm7xx_timer_ns_to_count(NPCM7xxTimer *t, int64_t ns) +{ + return clock_ns_to_ticks(t->ctrl->clock, ns) / + npcm7xx_tcsr_prescaler(t->tcsr); +} + +static uint32_t npcm7xx_watchdog_timer_prescaler(const NPCM7xxWatchdogTimer *t) +{ + switch (NPCM7XX_WTCR_WTCLK(t->wtcr)) { + case 0: + return 1; + case 1: + return 256; + case 2: + return 2048; + case 3: + return 65536; + default: + g_assert_not_reached(); + } +} + +static void npcm7xx_watchdog_timer_reset_cycles(NPCM7xxWatchdogTimer *t, + int64_t cycles) +{ + int64_t ticks = cycles * npcm7xx_watchdog_timer_prescaler(t); + int64_t ns = clock_ticks_to_ns(t->ctrl->clock, ticks); + + /* + * The reset function always clears the current timer. The caller of the + * this needs to decide whether to start the watchdog timer based on + * specific flag in WTCR. + */ + npcm7xx_timer_clear(&t->base_timer); + + t->base_timer.remaining_ns = ns; +} + +static void npcm7xx_watchdog_timer_reset(NPCM7xxWatchdogTimer *t) +{ + int64_t cycles = 1; + uint32_t s = NPCM7XX_WTCR_WTIS(t->wtcr); + + g_assert(s <= 3); + + cycles <<= NPCM7XX_WATCHDOG_BASETIME_SHIFT; + cycles <<= 2 * s; + + npcm7xx_watchdog_timer_reset_cycles(t, cycles); +} + +/* + * Raise the interrupt line if there's a pending interrupt and interrupts are + * enabled for this timer. If not, lower it. + */ +static void npcm7xx_timer_check_interrupt(NPCM7xxTimer *t) +{ + NPCM7xxTimerCtrlState *tc = t->ctrl; + int index = npcm7xx_timer_index(tc, t); + bool pending = (t->tcsr & NPCM7XX_TCSR_IE) && (tc->tisr & BIT(index)); + + qemu_set_irq(t->irq, pending); + trace_npcm7xx_timer_irq(DEVICE(tc)->canonical_path, index, pending); +} + +/* + * Called when the counter reaches zero. Sets the interrupt flag, and either + * restarts or disables the timer. + */ +static void npcm7xx_timer_reached_zero(NPCM7xxTimer *t) +{ + NPCM7xxTimerCtrlState *tc = t->ctrl; + int index = npcm7xx_timer_index(tc, t); + + tc->tisr |= BIT(index); + + if (t->tcsr & NPCM7XX_TCSR_PERIODIC) { + t->base_timer.remaining_ns = npcm7xx_timer_count_to_ns(t, t->ticr); + if (t->tcsr & NPCM7XX_TCSR_CEN) { + npcm7xx_timer_start(&t->base_timer); + } + } else { + t->tcsr &= ~(NPCM7XX_TCSR_CEN | NPCM7XX_TCSR_CACT); + } + + npcm7xx_timer_check_interrupt(t); +} + + +/* + * Restart the timer from its initial value. If the timer was enabled and stays + * enabled, adjust the QEMU timer according to the new count. If the timer is + * transitioning from disabled to enabled, the caller is expected to start the + * timer later. + */ +static void npcm7xx_timer_restart(NPCM7xxTimer *t, uint32_t old_tcsr) +{ + t->base_timer.remaining_ns = npcm7xx_timer_count_to_ns(t, t->ticr); + + if (old_tcsr & t->tcsr & NPCM7XX_TCSR_CEN) { + npcm7xx_timer_start(&t->base_timer); + } +} + +/* Register read and write handlers */ + +static uint32_t npcm7xx_timer_read_tdr(NPCM7xxTimer *t) +{ + if (t->tcsr & NPCM7XX_TCSR_CEN) { + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + return npcm7xx_timer_ns_to_count(t, t->base_timer.expires_ns - now); + } + + return npcm7xx_timer_ns_to_count(t, t->base_timer.remaining_ns); +} + +static void npcm7xx_timer_write_tcsr(NPCM7xxTimer *t, uint32_t new_tcsr) +{ + uint32_t old_tcsr = t->tcsr; + uint32_t tdr; + + if (new_tcsr & NPCM7XX_TCSR_RSVD) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits in 0x%08x ignored\n", + __func__, new_tcsr); + new_tcsr &= ~NPCM7XX_TCSR_RSVD; + } + if (new_tcsr & NPCM7XX_TCSR_CACT) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: read-only bits in 0x%08x ignored\n", + __func__, new_tcsr); + new_tcsr &= ~NPCM7XX_TCSR_CACT; + } + if ((new_tcsr & NPCM7XX_TCSR_CRST) && (new_tcsr & NPCM7XX_TCSR_CEN)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: both CRST and CEN set; ignoring CEN.\n", + __func__); + new_tcsr &= ~NPCM7XX_TCSR_CEN; + } + + /* Calculate the value of TDR before potentially changing the prescaler. */ + tdr = npcm7xx_timer_read_tdr(t); + + t->tcsr = (t->tcsr & NPCM7XX_TCSR_CACT) | new_tcsr; + + if (npcm7xx_tcsr_prescaler(old_tcsr) != npcm7xx_tcsr_prescaler(new_tcsr)) { + /* Recalculate time remaining based on the current TDR value. */ + t->base_timer.remaining_ns = npcm7xx_timer_count_to_ns(t, tdr); + if (old_tcsr & t->tcsr & NPCM7XX_TCSR_CEN) { + npcm7xx_timer_start(&t->base_timer); + } + } + + if ((old_tcsr ^ new_tcsr) & NPCM7XX_TCSR_IE) { + npcm7xx_timer_check_interrupt(t); + } + if (new_tcsr & NPCM7XX_TCSR_CRST) { + npcm7xx_timer_restart(t, old_tcsr); + t->tcsr &= ~NPCM7XX_TCSR_CRST; + } + if ((old_tcsr ^ new_tcsr) & NPCM7XX_TCSR_CEN) { + if (new_tcsr & NPCM7XX_TCSR_CEN) { + t->tcsr |= NPCM7XX_TCSR_CACT; + npcm7xx_timer_start(&t->base_timer); + } else { + t->tcsr &= ~NPCM7XX_TCSR_CACT; + npcm7xx_timer_pause(&t->base_timer); + if (t->base_timer.remaining_ns <= 0) { + npcm7xx_timer_reached_zero(t); + } + } + } +} + +static void npcm7xx_timer_write_ticr(NPCM7xxTimer *t, uint32_t new_ticr) +{ + t->ticr = new_ticr; + + npcm7xx_timer_restart(t, t->tcsr); +} + +static void npcm7xx_timer_write_tisr(NPCM7xxTimerCtrlState *s, uint32_t value) +{ + int i; + + s->tisr &= ~value; + for (i = 0; i < ARRAY_SIZE(s->timer); i++) { + if (value & (1U << i)) { + npcm7xx_timer_check_interrupt(&s->timer[i]); + } + + } +} + +static void npcm7xx_timer_write_wtcr(NPCM7xxWatchdogTimer *t, uint32_t new_wtcr) +{ + uint32_t old_wtcr = t->wtcr; + + /* + * WTIF and WTRF are cleared by writing 1. Writing 0 makes these bits + * unchanged. + */ + if (new_wtcr & NPCM7XX_WTCR_WTIF) { + new_wtcr &= ~NPCM7XX_WTCR_WTIF; + } else if (old_wtcr & NPCM7XX_WTCR_WTIF) { + new_wtcr |= NPCM7XX_WTCR_WTIF; + } + if (new_wtcr & NPCM7XX_WTCR_WTRF) { + new_wtcr &= ~NPCM7XX_WTCR_WTRF; + } else if (old_wtcr & NPCM7XX_WTCR_WTRF) { + new_wtcr |= NPCM7XX_WTCR_WTRF; + } + + t->wtcr = new_wtcr; + + if (new_wtcr & NPCM7XX_WTCR_WTR) { + t->wtcr &= ~NPCM7XX_WTCR_WTR; + npcm7xx_watchdog_timer_reset(t); + if (new_wtcr & NPCM7XX_WTCR_WTE) { + npcm7xx_timer_start(&t->base_timer); + } + } else if ((old_wtcr ^ new_wtcr) & NPCM7XX_WTCR_WTE) { + if (new_wtcr & NPCM7XX_WTCR_WTE) { + npcm7xx_timer_start(&t->base_timer); + } else { + npcm7xx_timer_pause(&t->base_timer); + } + } + +} + +static hwaddr npcm7xx_tcsr_index(hwaddr reg) +{ + switch (reg) { + case NPCM7XX_TIMER_TCSR0: + return 0; + case NPCM7XX_TIMER_TCSR1: + return 1; + case NPCM7XX_TIMER_TCSR2: + return 2; + case NPCM7XX_TIMER_TCSR3: + return 3; + case NPCM7XX_TIMER_TCSR4: + return 4; + default: + g_assert_not_reached(); + } +} + +static hwaddr npcm7xx_ticr_index(hwaddr reg) +{ + switch (reg) { + case NPCM7XX_TIMER_TICR0: + return 0; + case NPCM7XX_TIMER_TICR1: + return 1; + case NPCM7XX_TIMER_TICR2: + return 2; + case NPCM7XX_TIMER_TICR3: + return 3; + case NPCM7XX_TIMER_TICR4: + return 4; + default: + g_assert_not_reached(); + } +} + +static hwaddr npcm7xx_tdr_index(hwaddr reg) +{ + switch (reg) { + case NPCM7XX_TIMER_TDR0: + return 0; + case NPCM7XX_TIMER_TDR1: + return 1; + case NPCM7XX_TIMER_TDR2: + return 2; + case NPCM7XX_TIMER_TDR3: + return 3; + case NPCM7XX_TIMER_TDR4: + return 4; + default: + g_assert_not_reached(); + } +} + +static uint64_t npcm7xx_timer_read(void *opaque, hwaddr offset, unsigned size) +{ + NPCM7xxTimerCtrlState *s = opaque; + uint64_t value = 0; + hwaddr reg; + + reg = offset / sizeof(uint32_t); + switch (reg) { + case NPCM7XX_TIMER_TCSR0: + case NPCM7XX_TIMER_TCSR1: + case NPCM7XX_TIMER_TCSR2: + case NPCM7XX_TIMER_TCSR3: + case NPCM7XX_TIMER_TCSR4: + value = s->timer[npcm7xx_tcsr_index(reg)].tcsr; + break; + + case NPCM7XX_TIMER_TICR0: + case NPCM7XX_TIMER_TICR1: + case NPCM7XX_TIMER_TICR2: + case NPCM7XX_TIMER_TICR3: + case NPCM7XX_TIMER_TICR4: + value = s->timer[npcm7xx_ticr_index(reg)].ticr; + break; + + case NPCM7XX_TIMER_TDR0: + case NPCM7XX_TIMER_TDR1: + case NPCM7XX_TIMER_TDR2: + case NPCM7XX_TIMER_TDR3: + case NPCM7XX_TIMER_TDR4: + value = npcm7xx_timer_read_tdr(&s->timer[npcm7xx_tdr_index(reg)]); + break; + + case NPCM7XX_TIMER_TISR: + value = s->tisr; + break; + + case NPCM7XX_TIMER_WTCR: + value = s->watchdog_timer.wtcr; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid offset 0x%04" HWADDR_PRIx "\n", + __func__, offset); + break; + } + + trace_npcm7xx_timer_read(DEVICE(s)->canonical_path, offset, value); + + return value; +} + +static void npcm7xx_timer_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + uint32_t reg = offset / sizeof(uint32_t); + NPCM7xxTimerCtrlState *s = opaque; + uint32_t value = v; + + trace_npcm7xx_timer_write(DEVICE(s)->canonical_path, offset, value); + + switch (reg) { + case NPCM7XX_TIMER_TCSR0: + case NPCM7XX_TIMER_TCSR1: + case NPCM7XX_TIMER_TCSR2: + case NPCM7XX_TIMER_TCSR3: + case NPCM7XX_TIMER_TCSR4: + npcm7xx_timer_write_tcsr(&s->timer[npcm7xx_tcsr_index(reg)], value); + return; + + case NPCM7XX_TIMER_TICR0: + case NPCM7XX_TIMER_TICR1: + case NPCM7XX_TIMER_TICR2: + case NPCM7XX_TIMER_TICR3: + case NPCM7XX_TIMER_TICR4: + npcm7xx_timer_write_ticr(&s->timer[npcm7xx_ticr_index(reg)], value); + return; + + case NPCM7XX_TIMER_TDR0: + case NPCM7XX_TIMER_TDR1: + case NPCM7XX_TIMER_TDR2: + case NPCM7XX_TIMER_TDR3: + case NPCM7XX_TIMER_TDR4: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: register @ 0x%04" HWADDR_PRIx " is read-only\n", + __func__, offset); + return; + + case NPCM7XX_TIMER_TISR: + npcm7xx_timer_write_tisr(s, value); + return; + + case NPCM7XX_TIMER_WTCR: + npcm7xx_timer_write_wtcr(&s->watchdog_timer, value); + return; + } + + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid offset 0x%04" HWADDR_PRIx "\n", + __func__, offset); +} + +static const struct MemoryRegionOps npcm7xx_timer_ops = { + .read = npcm7xx_timer_read, + .write = npcm7xx_timer_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +/* Called when the QEMU timer expires. */ +static void npcm7xx_timer_expired(void *opaque) +{ + NPCM7xxTimer *t = opaque; + + if (t->tcsr & NPCM7XX_TCSR_CEN) { + npcm7xx_timer_reached_zero(t); + } +} + +static void npcm7xx_timer_enter_reset(Object *obj, ResetType type) +{ + NPCM7xxTimerCtrlState *s = NPCM7XX_TIMER(obj); + int i; + + for (i = 0; i < NPCM7XX_TIMERS_PER_CTRL; i++) { + NPCM7xxTimer *t = &s->timer[i]; + + npcm7xx_timer_clear(&t->base_timer); + t->tcsr = 0x00000005; + t->ticr = 0x00000000; + } + + s->tisr = 0x00000000; + /* + * Set WTCLK to 1(default) and reset all flags except WTRF. + * WTRF is not reset during a core domain reset. + */ + s->watchdog_timer.wtcr = 0x00000400 | (s->watchdog_timer.wtcr & + NPCM7XX_WTCR_WTRF); +} + +static void npcm7xx_watchdog_timer_expired(void *opaque) +{ + NPCM7xxWatchdogTimer *t = opaque; + + if (t->wtcr & NPCM7XX_WTCR_WTE) { + if (t->wtcr & NPCM7XX_WTCR_WTIF) { + if (t->wtcr & NPCM7XX_WTCR_WTRE) { + t->wtcr |= NPCM7XX_WTCR_WTRF; + /* send reset signal to CLK module*/ + qemu_irq_raise(t->reset_signal); + } + } else { + t->wtcr |= NPCM7XX_WTCR_WTIF; + if (t->wtcr & NPCM7XX_WTCR_WTIE) { + /* send interrupt */ + qemu_irq_raise(t->irq); + } + npcm7xx_watchdog_timer_reset_cycles(t, + NPCM7XX_WATCHDOG_INTERRUPT_TO_RESET_CYCLES); + npcm7xx_timer_start(&t->base_timer); + } + } +} + +static void npcm7xx_timer_hold_reset(Object *obj) +{ + NPCM7xxTimerCtrlState *s = NPCM7XX_TIMER(obj); + int i; + + for (i = 0; i < NPCM7XX_TIMERS_PER_CTRL; i++) { + qemu_irq_lower(s->timer[i].irq); + } + qemu_irq_lower(s->watchdog_timer.irq); +} + +static void npcm7xx_timer_init(Object *obj) +{ + NPCM7xxTimerCtrlState *s = NPCM7XX_TIMER(obj); + DeviceState *dev = DEVICE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + int i; + NPCM7xxWatchdogTimer *w; + + for (i = 0; i < NPCM7XX_TIMERS_PER_CTRL; i++) { + NPCM7xxTimer *t = &s->timer[i]; + t->ctrl = s; + timer_init_ns(&t->base_timer.qtimer, QEMU_CLOCK_VIRTUAL, + npcm7xx_timer_expired, t); + sysbus_init_irq(sbd, &t->irq); + } + + w = &s->watchdog_timer; + w->ctrl = s; + timer_init_ns(&w->base_timer.qtimer, QEMU_CLOCK_VIRTUAL, + npcm7xx_watchdog_timer_expired, w); + sysbus_init_irq(sbd, &w->irq); + + memory_region_init_io(&s->iomem, obj, &npcm7xx_timer_ops, s, + TYPE_NPCM7XX_TIMER, 4 * KiB); + sysbus_init_mmio(sbd, &s->iomem); + qdev_init_gpio_out_named(dev, &w->reset_signal, + NPCM7XX_WATCHDOG_RESET_GPIO_OUT, 1); + s->clock = qdev_init_clock_in(dev, "clock", NULL, NULL, 0); +} + +static const VMStateDescription vmstate_npcm7xx_base_timer = { + .name = "npcm7xx-base-timer", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_TIMER(qtimer, NPCM7xxBaseTimer), + VMSTATE_INT64(expires_ns, NPCM7xxBaseTimer), + VMSTATE_INT64(remaining_ns, NPCM7xxBaseTimer), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_npcm7xx_timer = { + .name = "npcm7xx-timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(base_timer, NPCM7xxTimer, + 0, vmstate_npcm7xx_base_timer, + NPCM7xxBaseTimer), + VMSTATE_UINT32(tcsr, NPCM7xxTimer), + VMSTATE_UINT32(ticr, NPCM7xxTimer), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_npcm7xx_watchdog_timer = { + .name = "npcm7xx-watchdog-timer", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(base_timer, NPCM7xxWatchdogTimer, + 0, vmstate_npcm7xx_base_timer, + NPCM7xxBaseTimer), + VMSTATE_UINT32(wtcr, NPCM7xxWatchdogTimer), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_npcm7xx_timer_ctrl = { + .name = "npcm7xx-timer-ctrl", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32(tisr, NPCM7xxTimerCtrlState), + VMSTATE_CLOCK(clock, NPCM7xxTimerCtrlState), + VMSTATE_STRUCT_ARRAY(timer, NPCM7xxTimerCtrlState, + NPCM7XX_TIMERS_PER_CTRL, 0, vmstate_npcm7xx_timer, + NPCM7xxTimer), + VMSTATE_STRUCT(watchdog_timer, NPCM7xxTimerCtrlState, + 0, vmstate_npcm7xx_watchdog_timer, + NPCM7xxWatchdogTimer), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_timer_class_init(ObjectClass *klass, void *data) +{ + ResettableClass *rc = RESETTABLE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + QEMU_BUILD_BUG_ON(NPCM7XX_TIMER_REGS_END > NPCM7XX_TIMER_NR_REGS); + + dc->desc = "NPCM7xx Timer Controller"; + dc->vmsd = &vmstate_npcm7xx_timer_ctrl; + rc->phases.enter = npcm7xx_timer_enter_reset; + rc->phases.hold = npcm7xx_timer_hold_reset; +} + +static const TypeInfo npcm7xx_timer_info = { + .name = TYPE_NPCM7XX_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxTimerCtrlState), + .class_init = npcm7xx_timer_class_init, + .instance_init = npcm7xx_timer_init, +}; + +static void npcm7xx_timer_register_type(void) +{ + type_register_static(&npcm7xx_timer_info); +} +type_init(npcm7xx_timer_register_type); diff --git a/hw/timer/nrf51_timer.c b/hw/timer/nrf51_timer.c new file mode 100644 index 000000000..42be79c73 --- /dev/null +++ b/hw/timer/nrf51_timer.c @@ -0,0 +1,404 @@ +/* + * nRF51 System-on-Chip Timer peripheral + * + * Reference Manual: http://infocenter.nordicsemi.com/pdf/nRF51_RM_v3.0.pdf + * Product Spec: http://infocenter.nordicsemi.com/pdf/nRF51822_PS_v3.1.pdf + * + * Copyright 2018 Steffen Görtz + * Copyright (c) 2019 Red Hat, Inc. + * + * This code is licensed under the GPL version 2 or later. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/arm/nrf51.h" +#include "hw/irq.h" +#include "hw/timer/nrf51_timer.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "trace.h" + +#define TIMER_CLK_FREQ 16000000UL + +static uint32_t const bitwidths[] = {16, 8, 24, 32}; + +static uint32_t ns_to_ticks(NRF51TimerState *s, int64_t ns) +{ + uint32_t freq = TIMER_CLK_FREQ >> s->prescaler; + + return muldiv64(ns, freq, NANOSECONDS_PER_SECOND); +} + +static int64_t ticks_to_ns(NRF51TimerState *s, uint32_t ticks) +{ + uint32_t freq = TIMER_CLK_FREQ >> s->prescaler; + + return muldiv64(ticks, NANOSECONDS_PER_SECOND, freq); +} + +/* Returns number of ticks since last call */ +static uint32_t update_counter(NRF51TimerState *s, int64_t now) +{ + uint32_t ticks = ns_to_ticks(s, now - s->update_counter_ns); + + s->counter = (s->counter + ticks) % BIT(bitwidths[s->bitmode]); + s->update_counter_ns = now; + return ticks; +} + +/* Assumes s->counter is up-to-date */ +static void rearm_timer(NRF51TimerState *s, int64_t now) +{ + int64_t min_ns = INT64_MAX; + size_t i; + + for (i = 0; i < NRF51_TIMER_REG_COUNT; i++) { + int64_t delta_ns; + + if (s->events_compare[i]) { + continue; /* already expired, ignore it for now */ + } + + if (s->cc[i] <= s->counter) { + delta_ns = ticks_to_ns(s, BIT(bitwidths[s->bitmode]) - + s->counter + s->cc[i]); + } else { + delta_ns = ticks_to_ns(s, s->cc[i] - s->counter); + } + + if (delta_ns < min_ns) { + min_ns = delta_ns; + } + } + + if (min_ns != INT64_MAX) { + timer_mod_ns(&s->timer, now + min_ns); + } +} + +static void update_irq(NRF51TimerState *s) +{ + bool flag = false; + size_t i; + + for (i = 0; i < NRF51_TIMER_REG_COUNT; i++) { + flag |= s->events_compare[i] && extract32(s->inten, 16 + i, 1); + } + qemu_set_irq(s->irq, flag); +} + +static void timer_expire(void *opaque) +{ + NRF51TimerState *s = NRF51_TIMER(opaque); + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint32_t cc_remaining[NRF51_TIMER_REG_COUNT]; + bool should_stop = false; + uint32_t ticks; + size_t i; + + for (i = 0; i < NRF51_TIMER_REG_COUNT; i++) { + if (s->cc[i] > s->counter) { + cc_remaining[i] = s->cc[i] - s->counter; + } else { + cc_remaining[i] = BIT(bitwidths[s->bitmode]) - + s->counter + s->cc[i]; + } + } + + ticks = update_counter(s, now); + + for (i = 0; i < NRF51_TIMER_REG_COUNT; i++) { + if (cc_remaining[i] <= ticks) { + s->events_compare[i] = 1; + + if (s->shorts & BIT(i)) { + s->timer_start_ns = now; + s->update_counter_ns = s->timer_start_ns; + s->counter = 0; + } + + should_stop |= s->shorts & BIT(i + 8); + } + } + + update_irq(s); + + if (should_stop) { + s->running = false; + timer_del(&s->timer); + } else { + rearm_timer(s, now); + } +} + +static void counter_compare(NRF51TimerState *s) +{ + uint32_t counter = s->counter; + size_t i; + + for (i = 0; i < NRF51_TIMER_REG_COUNT; i++) { + if (counter == s->cc[i]) { + s->events_compare[i] = 1; + + if (s->shorts & BIT(i)) { + s->counter = 0; + } + } + } +} + +static uint64_t nrf51_timer_read(void *opaque, hwaddr offset, unsigned int size) +{ + NRF51TimerState *s = NRF51_TIMER(opaque); + uint64_t r = 0; + + switch (offset) { + case NRF51_TIMER_EVENT_COMPARE_0 ... NRF51_TIMER_EVENT_COMPARE_3: + r = s->events_compare[(offset - NRF51_TIMER_EVENT_COMPARE_0) / 4]; + break; + case NRF51_TIMER_REG_SHORTS: + r = s->shorts; + break; + case NRF51_TIMER_REG_INTENSET: + r = s->inten; + break; + case NRF51_TIMER_REG_INTENCLR: + r = s->inten; + break; + case NRF51_TIMER_REG_MODE: + r = s->mode; + break; + case NRF51_TIMER_REG_BITMODE: + r = s->bitmode; + break; + case NRF51_TIMER_REG_PRESCALER: + r = s->prescaler; + break; + case NRF51_TIMER_REG_CC0 ... NRF51_TIMER_REG_CC3: + r = s->cc[(offset - NRF51_TIMER_REG_CC0) / 4]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad read offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + + trace_nrf51_timer_read(s->id, offset, r, size); + + return r; +} + +static void nrf51_timer_write(void *opaque, hwaddr offset, + uint64_t value, unsigned int size) +{ + NRF51TimerState *s = NRF51_TIMER(opaque); + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + size_t idx; + + trace_nrf51_timer_write(s->id, offset, value, size); + + switch (offset) { + case NRF51_TIMER_TASK_START: + if (value == NRF51_TRIGGER_TASK && s->mode == NRF51_TIMER_TIMER) { + s->running = true; + s->timer_start_ns = now - ticks_to_ns(s, s->counter); + s->update_counter_ns = s->timer_start_ns; + rearm_timer(s, now); + } + break; + case NRF51_TIMER_TASK_STOP: + case NRF51_TIMER_TASK_SHUTDOWN: + if (value == NRF51_TRIGGER_TASK) { + s->running = false; + timer_del(&s->timer); + } + break; + case NRF51_TIMER_TASK_COUNT: + if (value == NRF51_TRIGGER_TASK && s->mode == NRF51_TIMER_COUNTER) { + s->counter = (s->counter + 1) % BIT(bitwidths[s->bitmode]); + counter_compare(s); + } + break; + case NRF51_TIMER_TASK_CLEAR: + if (value == NRF51_TRIGGER_TASK) { + s->timer_start_ns = now; + s->update_counter_ns = s->timer_start_ns; + s->counter = 0; + if (s->running) { + rearm_timer(s, now); + } + } + break; + case NRF51_TIMER_TASK_CAPTURE_0 ... NRF51_TIMER_TASK_CAPTURE_3: + if (value == NRF51_TRIGGER_TASK) { + if (s->running) { + timer_expire(s); /* update counter and all state */ + } + + idx = (offset - NRF51_TIMER_TASK_CAPTURE_0) / 4; + s->cc[idx] = s->counter; + trace_nrf51_timer_set_count(s->id, idx, s->counter); + } + break; + case NRF51_TIMER_EVENT_COMPARE_0 ... NRF51_TIMER_EVENT_COMPARE_3: + if (value == NRF51_EVENT_CLEAR) { + s->events_compare[(offset - NRF51_TIMER_EVENT_COMPARE_0) / 4] = 0; + + if (s->running) { + timer_expire(s); /* update counter and all state */ + } + } + break; + case NRF51_TIMER_REG_SHORTS: + s->shorts = value & NRF51_TIMER_REG_SHORTS_MASK; + break; + case NRF51_TIMER_REG_INTENSET: + s->inten |= value & NRF51_TIMER_REG_INTEN_MASK; + break; + case NRF51_TIMER_REG_INTENCLR: + s->inten &= ~(value & NRF51_TIMER_REG_INTEN_MASK); + break; + case NRF51_TIMER_REG_MODE: + s->mode = value; + break; + case NRF51_TIMER_REG_BITMODE: + if (s->mode == NRF51_TIMER_TIMER && s->running) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: erroneous change of BITMODE while timer is running\n", + __func__); + } + s->bitmode = value & NRF51_TIMER_REG_BITMODE_MASK; + break; + case NRF51_TIMER_REG_PRESCALER: + if (s->mode == NRF51_TIMER_TIMER && s->running) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: erroneous change of PRESCALER while timer is running\n", + __func__); + } + s->prescaler = value & NRF51_TIMER_REG_PRESCALER_MASK; + break; + case NRF51_TIMER_REG_CC0 ... NRF51_TIMER_REG_CC3: + if (s->running) { + timer_expire(s); /* update counter */ + } + + idx = (offset - NRF51_TIMER_REG_CC0) / 4; + s->cc[idx] = value % BIT(bitwidths[s->bitmode]); + + if (s->running) { + rearm_timer(s, now); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad write offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + + update_irq(s); +} + +static const MemoryRegionOps rng_ops = { + .read = nrf51_timer_read, + .write = nrf51_timer_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, + .impl.max_access_size = 4, +}; + +static void nrf51_timer_init(Object *obj) +{ + NRF51TimerState *s = NRF51_TIMER(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &rng_ops, s, + TYPE_NRF51_TIMER, NRF51_PERIPHERAL_SIZE); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); + + timer_init_ns(&s->timer, QEMU_CLOCK_VIRTUAL, timer_expire, s); +} + +static void nrf51_timer_reset(DeviceState *dev) +{ + NRF51TimerState *s = NRF51_TIMER(dev); + + timer_del(&s->timer); + s->timer_start_ns = 0x00; + s->update_counter_ns = 0x00; + s->counter = 0x00; + s->running = false; + + memset(s->events_compare, 0x00, sizeof(s->events_compare)); + memset(s->cc, 0x00, sizeof(s->cc)); + + s->shorts = 0x00; + s->inten = 0x00; + s->mode = 0x00; + s->bitmode = 0x00; + s->prescaler = 0x00; +} + +static int nrf51_timer_post_load(void *opaque, int version_id) +{ + NRF51TimerState *s = NRF51_TIMER(opaque); + + if (s->running && s->mode == NRF51_TIMER_TIMER) { + timer_expire(s); + } + return 0; +} + +static const VMStateDescription vmstate_nrf51_timer = { + .name = TYPE_NRF51_TIMER, + .version_id = 1, + .post_load = nrf51_timer_post_load, + .fields = (VMStateField[]) { + VMSTATE_TIMER(timer, NRF51TimerState), + VMSTATE_INT64(timer_start_ns, NRF51TimerState), + VMSTATE_INT64(update_counter_ns, NRF51TimerState), + VMSTATE_UINT32(counter, NRF51TimerState), + VMSTATE_BOOL(running, NRF51TimerState), + VMSTATE_UINT8_ARRAY(events_compare, NRF51TimerState, + NRF51_TIMER_REG_COUNT), + VMSTATE_UINT32_ARRAY(cc, NRF51TimerState, NRF51_TIMER_REG_COUNT), + VMSTATE_UINT32(shorts, NRF51TimerState), + VMSTATE_UINT32(inten, NRF51TimerState), + VMSTATE_UINT32(mode, NRF51TimerState), + VMSTATE_UINT32(bitmode, NRF51TimerState), + VMSTATE_UINT32(prescaler, NRF51TimerState), + VMSTATE_END_OF_LIST() + } +}; + +static Property nrf51_timer_properties[] = { + DEFINE_PROP_UINT8("id", NRF51TimerState, id, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void nrf51_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = nrf51_timer_reset; + dc->vmsd = &vmstate_nrf51_timer; + device_class_set_props(dc, nrf51_timer_properties); +} + +static const TypeInfo nrf51_timer_info = { + .name = TYPE_NRF51_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NRF51TimerState), + .instance_init = nrf51_timer_init, + .class_init = nrf51_timer_class_init +}; + +static void nrf51_timer_register_types(void) +{ + type_register_static(&nrf51_timer_info); +} + +type_init(nrf51_timer_register_types) diff --git a/hw/timer/omap_gptimer.c b/hw/timer/omap_gptimer.c new file mode 100644 index 000000000..c40719013 --- /dev/null +++ b/hw/timer/omap_gptimer.c @@ -0,0 +1,514 @@ +/* + * TI OMAP2 general purpose timers emulation. + * + * Copyright (C) 2007-2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) any later version of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "qemu/timer.h" +#include "hw/arm/omap.h" + +/* GP timers */ +struct omap_gp_timer_s { + MemoryRegion iomem; + qemu_irq irq; + qemu_irq wkup; + qemu_irq in; + qemu_irq out; + omap_clk clk; + QEMUTimer *timer; + QEMUTimer *match; + struct omap_target_agent_s *ta; + + int in_val; + int out_val; + int64_t time; + int64_t rate; + int64_t ticks_per_sec; + + int16_t config; + int status; + int it_ena; + int wu_ena; + int enable; + int inout; + int capt2; + int pt; + enum { + gpt_trigger_none, gpt_trigger_overflow, gpt_trigger_both + } trigger; + enum { + gpt_capture_none, gpt_capture_rising, + gpt_capture_falling, gpt_capture_both + } capture; + int scpwm; + int ce; + int pre; + int ptv; + int ar; + int st; + int posted; + uint32_t val; + uint32_t load_val; + uint32_t capture_val[2]; + uint32_t match_val; + int capt_num; + + uint16_t writeh; /* LSB */ + uint16_t readh; /* MSB */ +}; + +#define GPT_TCAR_IT (1 << 2) +#define GPT_OVF_IT (1 << 1) +#define GPT_MAT_IT (1 << 0) + +static inline void omap_gp_timer_intr(struct omap_gp_timer_s *timer, int it) +{ + if (timer->it_ena & it) { + if (!timer->status) + qemu_irq_raise(timer->irq); + + timer->status |= it; + /* Or are the status bits set even when masked? + * i.e. is masking applied before or after the status register? */ + } + + if (timer->wu_ena & it) + qemu_irq_pulse(timer->wkup); +} + +static inline void omap_gp_timer_out(struct omap_gp_timer_s *timer, int level) +{ + if (!timer->inout && timer->out_val != level) { + timer->out_val = level; + qemu_set_irq(timer->out, level); + } +} + +static inline uint32_t omap_gp_timer_read(struct omap_gp_timer_s *timer) +{ + uint64_t distance; + + if (timer->st && timer->rate) { + distance = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->time; + distance = muldiv64(distance, timer->rate, timer->ticks_per_sec); + + if (distance >= 0xffffffff - timer->val) + return 0xffffffff; + else + return timer->val + distance; + } else + return timer->val; +} + +static inline void omap_gp_timer_sync(struct omap_gp_timer_s *timer) +{ + if (timer->st) { + timer->val = omap_gp_timer_read(timer); + timer->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + } +} + +static inline void omap_gp_timer_update(struct omap_gp_timer_s *timer) +{ + int64_t expires, matches; + + if (timer->st && timer->rate) { + expires = muldiv64(0x100000000ll - timer->val, + timer->ticks_per_sec, timer->rate); + timer_mod(timer->timer, timer->time + expires); + + if (timer->ce && timer->match_val >= timer->val) { + matches = muldiv64(timer->ticks_per_sec, + timer->match_val - timer->val, timer->rate); + timer_mod(timer->match, timer->time + matches); + } else + timer_del(timer->match); + } else { + timer_del(timer->timer); + timer_del(timer->match); + omap_gp_timer_out(timer, timer->scpwm); + } +} + +static inline void omap_gp_timer_trigger(struct omap_gp_timer_s *timer) +{ + if (timer->pt) + /* TODO in overflow-and-match mode if the first event to + * occur is the match, don't toggle. */ + omap_gp_timer_out(timer, !timer->out_val); + else + /* TODO inverted pulse on timer->out_val == 1? */ + qemu_irq_pulse(timer->out); +} + +static void omap_gp_timer_tick(void *opaque) +{ + struct omap_gp_timer_s *timer = (struct omap_gp_timer_s *) opaque; + + if (!timer->ar) { + timer->st = 0; + timer->val = 0; + } else { + timer->val = timer->load_val; + timer->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + } + + if (timer->trigger == gpt_trigger_overflow || + timer->trigger == gpt_trigger_both) + omap_gp_timer_trigger(timer); + + omap_gp_timer_intr(timer, GPT_OVF_IT); + omap_gp_timer_update(timer); +} + +static void omap_gp_timer_match(void *opaque) +{ + struct omap_gp_timer_s *timer = (struct omap_gp_timer_s *) opaque; + + if (timer->trigger == gpt_trigger_both) + omap_gp_timer_trigger(timer); + + omap_gp_timer_intr(timer, GPT_MAT_IT); +} + +static void omap_gp_timer_input(void *opaque, int line, int on) +{ + struct omap_gp_timer_s *s = (struct omap_gp_timer_s *) opaque; + int trigger; + + switch (s->capture) { + default: + case gpt_capture_none: + trigger = 0; + break; + case gpt_capture_rising: + trigger = !s->in_val && on; + break; + case gpt_capture_falling: + trigger = s->in_val && !on; + break; + case gpt_capture_both: + trigger = (s->in_val == !on); + break; + } + s->in_val = on; + + if (s->inout && trigger && s->capt_num < 2) { + s->capture_val[s->capt_num] = omap_gp_timer_read(s); + + if (s->capt2 == s->capt_num ++) + omap_gp_timer_intr(s, GPT_TCAR_IT); + } +} + +static void omap_gp_timer_clk_update(void *opaque, int line, int on) +{ + struct omap_gp_timer_s *timer = (struct omap_gp_timer_s *) opaque; + + omap_gp_timer_sync(timer); + timer->rate = on ? omap_clk_getrate(timer->clk) : 0; + omap_gp_timer_update(timer); +} + +static void omap_gp_timer_clk_setup(struct omap_gp_timer_s *timer) +{ + omap_clk_adduser(timer->clk, + qemu_allocate_irq(omap_gp_timer_clk_update, timer, 0)); + timer->rate = omap_clk_getrate(timer->clk); +} + +void omap_gp_timer_reset(struct omap_gp_timer_s *s) +{ + s->config = 0x000; + s->status = 0; + s->it_ena = 0; + s->wu_ena = 0; + s->inout = 0; + s->capt2 = 0; + s->capt_num = 0; + s->pt = 0; + s->trigger = gpt_trigger_none; + s->capture = gpt_capture_none; + s->scpwm = 0; + s->ce = 0; + s->pre = 0; + s->ptv = 0; + s->ar = 0; + s->st = 0; + s->posted = 1; + s->val = 0x00000000; + s->load_val = 0x00000000; + s->capture_val[0] = 0x00000000; + s->capture_val[1] = 0x00000000; + s->match_val = 0x00000000; + omap_gp_timer_update(s); +} + +static uint32_t omap_gp_timer_readw(void *opaque, hwaddr addr) +{ + struct omap_gp_timer_s *s = (struct omap_gp_timer_s *) opaque; + + switch (addr) { + case 0x00: /* TIDR */ + return 0x21; + + case 0x10: /* TIOCP_CFG */ + return s->config; + + case 0x14: /* TISTAT */ + /* ??? When's this bit reset? */ + return 1; /* RESETDONE */ + + case 0x18: /* TISR */ + return s->status; + + case 0x1c: /* TIER */ + return s->it_ena; + + case 0x20: /* TWER */ + return s->wu_ena; + + case 0x24: /* TCLR */ + return (s->inout << 14) | + (s->capt2 << 13) | + (s->pt << 12) | + (s->trigger << 10) | + (s->capture << 8) | + (s->scpwm << 7) | + (s->ce << 6) | + (s->pre << 5) | + (s->ptv << 2) | + (s->ar << 1) | + (s->st << 0); + + case 0x28: /* TCRR */ + return omap_gp_timer_read(s); + + case 0x2c: /* TLDR */ + return s->load_val; + + case 0x30: /* TTGR */ + return 0xffffffff; + + case 0x34: /* TWPS */ + return 0x00000000; /* No posted writes pending. */ + + case 0x38: /* TMAR */ + return s->match_val; + + case 0x3c: /* TCAR1 */ + return s->capture_val[0]; + + case 0x40: /* TSICR */ + return s->posted << 2; + + case 0x44: /* TCAR2 */ + return s->capture_val[1]; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static uint32_t omap_gp_timer_readh(void *opaque, hwaddr addr) +{ + struct omap_gp_timer_s *s = (struct omap_gp_timer_s *) opaque; + uint32_t ret; + + if (addr & 2) + return s->readh; + else { + ret = omap_gp_timer_readw(opaque, addr); + s->readh = ret >> 16; + return ret & 0xffff; + } +} + +static void omap_gp_timer_write(void *opaque, hwaddr addr, + uint32_t value) +{ + struct omap_gp_timer_s *s = (struct omap_gp_timer_s *) opaque; + + switch (addr) { + case 0x00: /* TIDR */ + case 0x14: /* TISTAT */ + case 0x34: /* TWPS */ + case 0x3c: /* TCAR1 */ + case 0x44: /* TCAR2 */ + OMAP_RO_REG(addr); + break; + + case 0x10: /* TIOCP_CFG */ + s->config = value & 0x33d; + if (((value >> 3) & 3) == 3) /* IDLEMODE */ + fprintf(stderr, "%s: illegal IDLEMODE value in TIOCP_CFG\n", + __func__); + if (value & 2) /* SOFTRESET */ + omap_gp_timer_reset(s); + break; + + case 0x18: /* TISR */ + if (value & GPT_TCAR_IT) + s->capt_num = 0; + if (s->status && !(s->status &= ~value)) + qemu_irq_lower(s->irq); + break; + + case 0x1c: /* TIER */ + s->it_ena = value & 7; + break; + + case 0x20: /* TWER */ + s->wu_ena = value & 7; + break; + + case 0x24: /* TCLR */ + omap_gp_timer_sync(s); + s->inout = (value >> 14) & 1; + s->capt2 = (value >> 13) & 1; + s->pt = (value >> 12) & 1; + s->trigger = (value >> 10) & 3; + if (s->capture == gpt_capture_none && + ((value >> 8) & 3) != gpt_capture_none) + s->capt_num = 0; + s->capture = (value >> 8) & 3; + s->scpwm = (value >> 7) & 1; + s->ce = (value >> 6) & 1; + s->pre = (value >> 5) & 1; + s->ptv = (value >> 2) & 7; + s->ar = (value >> 1) & 1; + s->st = (value >> 0) & 1; + if (s->inout && s->trigger != gpt_trigger_none) + fprintf(stderr, "%s: GP timer pin must be an output " + "for this trigger mode\n", __func__); + if (!s->inout && s->capture != gpt_capture_none) + fprintf(stderr, "%s: GP timer pin must be an input " + "for this capture mode\n", __func__); + if (s->trigger == gpt_trigger_none) + omap_gp_timer_out(s, s->scpwm); + /* TODO: make sure this doesn't overflow 32-bits */ + s->ticks_per_sec = NANOSECONDS_PER_SECOND << (s->pre ? s->ptv + 1 : 0); + omap_gp_timer_update(s); + break; + + case 0x28: /* TCRR */ + s->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->val = value; + omap_gp_timer_update(s); + break; + + case 0x2c: /* TLDR */ + s->load_val = value; + break; + + case 0x30: /* TTGR */ + s->time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->val = s->load_val; + omap_gp_timer_update(s); + break; + + case 0x38: /* TMAR */ + omap_gp_timer_sync(s); + s->match_val = value; + omap_gp_timer_update(s); + break; + + case 0x40: /* TSICR */ + s->posted = (value >> 2) & 1; + if (value & 2) /* How much exactly are we supposed to reset? */ + omap_gp_timer_reset(s); + break; + + default: + OMAP_BAD_REG(addr); + } +} + +static void omap_gp_timer_writeh(void *opaque, hwaddr addr, + uint32_t value) +{ + struct omap_gp_timer_s *s = (struct omap_gp_timer_s *) opaque; + + if (addr & 2) + omap_gp_timer_write(opaque, addr, (value << 16) | s->writeh); + else + s->writeh = (uint16_t) value; +} + +static uint64_t omap_gp_timer_readfn(void *opaque, hwaddr addr, + unsigned size) +{ + switch (size) { + case 1: + return omap_badwidth_read32(opaque, addr); + case 2: + return omap_gp_timer_readh(opaque, addr); + case 4: + return omap_gp_timer_readw(opaque, addr); + default: + g_assert_not_reached(); + } +} + +static void omap_gp_timer_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + switch (size) { + case 1: + omap_badwidth_write32(opaque, addr, value); + break; + case 2: + omap_gp_timer_writeh(opaque, addr, value); + break; + case 4: + omap_gp_timer_write(opaque, addr, value); + break; + default: + g_assert_not_reached(); + } +} + +static const MemoryRegionOps omap_gp_timer_ops = { + .read = omap_gp_timer_readfn, + .write = omap_gp_timer_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct omap_gp_timer_s *omap_gp_timer_init(struct omap_target_agent_s *ta, + qemu_irq irq, omap_clk fclk, omap_clk iclk) +{ + struct omap_gp_timer_s *s = g_new0(struct omap_gp_timer_s, 1); + + s->ta = ta; + s->irq = irq; + s->clk = fclk; + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_gp_timer_tick, s); + s->match = timer_new_ns(QEMU_CLOCK_VIRTUAL, omap_gp_timer_match, s); + s->in = qemu_allocate_irq(omap_gp_timer_input, s, 0); + omap_gp_timer_reset(s); + omap_gp_timer_clk_setup(s); + + memory_region_init_io(&s->iomem, NULL, &omap_gp_timer_ops, s, "omap.gptimer", + omap_l4_region_size(ta, 0)); + omap_l4_attach(ta, 0, &s->iomem); + + return s; +} diff --git a/hw/timer/omap_synctimer.c b/hw/timer/omap_synctimer.c new file mode 100644 index 000000000..72b997939 --- /dev/null +++ b/hw/timer/omap_synctimer.c @@ -0,0 +1,110 @@ +/* + * TI OMAP2 32kHz sync timer emulation. + * + * Copyright (C) 2007-2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) any later version of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "hw/arm/omap.h" +struct omap_synctimer_s { + MemoryRegion iomem; + uint32_t val; + uint16_t readh; +}; + +/* 32-kHz Sync Timer of the OMAP2 */ +static uint32_t omap_synctimer_read(struct omap_synctimer_s *s) { + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 0x8000, + NANOSECONDS_PER_SECOND); +} + +void omap_synctimer_reset(struct omap_synctimer_s *s) +{ + s->val = omap_synctimer_read(s); +} + +static uint32_t omap_synctimer_readw(void *opaque, hwaddr addr) +{ + struct omap_synctimer_s *s = (struct omap_synctimer_s *) opaque; + + switch (addr) { + case 0x00: /* 32KSYNCNT_REV */ + return 0x21; + + case 0x10: /* CR */ + return omap_synctimer_read(s) - s->val; + } + + OMAP_BAD_REG(addr); + return 0; +} + +static uint32_t omap_synctimer_readh(void *opaque, hwaddr addr) +{ + struct omap_synctimer_s *s = (struct omap_synctimer_s *) opaque; + uint32_t ret; + + if (addr & 2) + return s->readh; + else { + ret = omap_synctimer_readw(opaque, addr); + s->readh = ret >> 16; + return ret & 0xffff; + } +} + +static uint64_t omap_synctimer_readfn(void *opaque, hwaddr addr, + unsigned size) +{ + switch (size) { + case 1: + return omap_badwidth_read32(opaque, addr); + case 2: + return omap_synctimer_readh(opaque, addr); + case 4: + return omap_synctimer_readw(opaque, addr); + default: + g_assert_not_reached(); + } +} + +static void omap_synctimer_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + OMAP_BAD_REG(addr); +} + +static const MemoryRegionOps omap_synctimer_ops = { + .read = omap_synctimer_readfn, + .write = omap_synctimer_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +struct omap_synctimer_s *omap_synctimer_init(struct omap_target_agent_s *ta, + struct omap_mpu_state_s *mpu, omap_clk fclk, omap_clk iclk) +{ + struct omap_synctimer_s *s = g_malloc0(sizeof(*s)); + + omap_synctimer_reset(s); + memory_region_init_io(&s->iomem, NULL, &omap_synctimer_ops, s, "omap.synctimer", + omap_l4_region_size(ta, 0)); + omap_l4_attach(ta, 0, &s->iomem); + + return s; +} diff --git a/hw/timer/pxa2xx_timer.c b/hw/timer/pxa2xx_timer.c new file mode 100644 index 000000000..2ae5ae321 --- /dev/null +++ b/hw/timer/pxa2xx_timer.c @@ -0,0 +1,621 @@ +/* + * Intel XScale PXA255/270 OS Timers. + * + * Copyright (c) 2006 Openedhand Ltd. + * Copyright (c) 2006 Thorsten Zitterell + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qemu/timer.h" +#include "sysemu/runstate.h" +#include "hw/arm/pxa.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define OSMR0 0x00 +#define OSMR1 0x04 +#define OSMR2 0x08 +#define OSMR3 0x0c +#define OSMR4 0x80 +#define OSMR5 0x84 +#define OSMR6 0x88 +#define OSMR7 0x8c +#define OSMR8 0x90 +#define OSMR9 0x94 +#define OSMR10 0x98 +#define OSMR11 0x9c +#define OSCR 0x10 /* OS Timer Count */ +#define OSCR4 0x40 +#define OSCR5 0x44 +#define OSCR6 0x48 +#define OSCR7 0x4c +#define OSCR8 0x50 +#define OSCR9 0x54 +#define OSCR10 0x58 +#define OSCR11 0x5c +#define OSSR 0x14 /* Timer status register */ +#define OWER 0x18 +#define OIER 0x1c /* Interrupt enable register 3-0 to E3-E0 */ +#define OMCR4 0xc0 /* OS Match Control registers */ +#define OMCR5 0xc4 +#define OMCR6 0xc8 +#define OMCR7 0xcc +#define OMCR8 0xd0 +#define OMCR9 0xd4 +#define OMCR10 0xd8 +#define OMCR11 0xdc +#define OSNR 0x20 + +#define PXA25X_FREQ 3686400 /* 3.6864 MHz */ +#define PXA27X_FREQ 3250000 /* 3.25 MHz */ + +static int pxa2xx_timer4_freq[8] = { + [0] = 0, + [1] = 32768, + [2] = 1000, + [3] = 1, + [4] = 1000000, + /* [5] is the "Externally supplied clock". Assign if necessary. */ + [5 ... 7] = 0, +}; + +#define TYPE_PXA2XX_TIMER "pxa2xx-timer" +OBJECT_DECLARE_SIMPLE_TYPE(PXA2xxTimerInfo, PXA2XX_TIMER) + + +typedef struct { + uint32_t value; + qemu_irq irq; + QEMUTimer *qtimer; + int num; + PXA2xxTimerInfo *info; +} PXA2xxTimer0; + +typedef struct { + PXA2xxTimer0 tm; + int32_t oldclock; + int32_t clock; + uint64_t lastload; + uint32_t freq; + uint32_t control; +} PXA2xxTimer4; + +struct PXA2xxTimerInfo { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t flags; + + int32_t clock; + int32_t oldclock; + uint64_t lastload; + uint32_t freq; + PXA2xxTimer0 timer[4]; + uint32_t events; + uint32_t irq_enabled; + uint32_t reset3; + uint32_t snapshot; + + qemu_irq irq4; + PXA2xxTimer4 tm4[8]; +}; + +#define PXA2XX_TIMER_HAVE_TM4 0 + +static inline int pxa2xx_timer_has_tm4(PXA2xxTimerInfo *s) +{ + return s->flags & (1 << PXA2XX_TIMER_HAVE_TM4); +} + +static void pxa2xx_timer_update(void *opaque, uint64_t now_qemu) +{ + PXA2xxTimerInfo *s = (PXA2xxTimerInfo *) opaque; + int i; + uint32_t now_vm; + uint64_t new_qemu; + + now_vm = s->clock + + muldiv64(now_qemu - s->lastload, s->freq, NANOSECONDS_PER_SECOND); + + for (i = 0; i < 4; i ++) { + new_qemu = now_qemu + muldiv64((uint32_t) (s->timer[i].value - now_vm), + NANOSECONDS_PER_SECOND, s->freq); + timer_mod(s->timer[i].qtimer, new_qemu); + } +} + +static void pxa2xx_timer_update4(void *opaque, uint64_t now_qemu, int n) +{ + PXA2xxTimerInfo *s = (PXA2xxTimerInfo *) opaque; + uint32_t now_vm; + uint64_t new_qemu; + static const int counters[8] = { 0, 0, 0, 0, 4, 4, 6, 6 }; + int counter; + + assert(n < ARRAY_SIZE(counters)); + if (s->tm4[n].control & (1 << 7)) + counter = n; + else + counter = counters[n]; + + if (!s->tm4[counter].freq) { + timer_del(s->tm4[n].tm.qtimer); + return; + } + + now_vm = s->tm4[counter].clock + muldiv64(now_qemu - + s->tm4[counter].lastload, + s->tm4[counter].freq, NANOSECONDS_PER_SECOND); + + new_qemu = now_qemu + muldiv64((uint32_t) (s->tm4[n].tm.value - now_vm), + NANOSECONDS_PER_SECOND, s->tm4[counter].freq); + timer_mod(s->tm4[n].tm.qtimer, new_qemu); +} + +static uint64_t pxa2xx_timer_read(void *opaque, hwaddr offset, + unsigned size) +{ + PXA2xxTimerInfo *s = (PXA2xxTimerInfo *) opaque; + int tm = 0; + + switch (offset) { + case OSMR3: tm ++; + /* fall through */ + case OSMR2: tm ++; + /* fall through */ + case OSMR1: tm ++; + /* fall through */ + case OSMR0: + return s->timer[tm].value; + case OSMR11: tm ++; + /* fall through */ + case OSMR10: tm ++; + /* fall through */ + case OSMR9: tm ++; + /* fall through */ + case OSMR8: tm ++; + /* fall through */ + case OSMR7: tm ++; + /* fall through */ + case OSMR6: tm ++; + /* fall through */ + case OSMR5: tm ++; + /* fall through */ + case OSMR4: + if (!pxa2xx_timer_has_tm4(s)) + goto badreg; + return s->tm4[tm].tm.value; + case OSCR: + return s->clock + muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - + s->lastload, s->freq, NANOSECONDS_PER_SECOND); + case OSCR11: tm ++; + /* fall through */ + case OSCR10: tm ++; + /* fall through */ + case OSCR9: tm ++; + /* fall through */ + case OSCR8: tm ++; + /* fall through */ + case OSCR7: tm ++; + /* fall through */ + case OSCR6: tm ++; + /* fall through */ + case OSCR5: tm ++; + /* fall through */ + case OSCR4: + if (!pxa2xx_timer_has_tm4(s)) + goto badreg; + + if ((tm == 9 - 4 || tm == 11 - 4) && (s->tm4[tm].control & (1 << 9))) { + if (s->tm4[tm - 1].freq) + s->snapshot = s->tm4[tm - 1].clock + muldiv64( + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - + s->tm4[tm - 1].lastload, + s->tm4[tm - 1].freq, NANOSECONDS_PER_SECOND); + else + s->snapshot = s->tm4[tm - 1].clock; + } + + if (!s->tm4[tm].freq) + return s->tm4[tm].clock; + return s->tm4[tm].clock + + muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - + s->tm4[tm].lastload, s->tm4[tm].freq, + NANOSECONDS_PER_SECOND); + case OIER: + return s->irq_enabled; + case OSSR: /* Status register */ + return s->events; + case OWER: + return s->reset3; + case OMCR11: tm ++; + /* fall through */ + case OMCR10: tm ++; + /* fall through */ + case OMCR9: tm ++; + /* fall through */ + case OMCR8: tm ++; + /* fall through */ + case OMCR7: tm ++; + /* fall through */ + case OMCR6: tm ++; + /* fall through */ + case OMCR5: tm ++; + /* fall through */ + case OMCR4: + if (!pxa2xx_timer_has_tm4(s)) + goto badreg; + return s->tm4[tm].control; + case OSNR: + return s->snapshot; + default: + qemu_log_mask(LOG_UNIMP, + "%s: unknown register 0x%02" HWADDR_PRIx "\n", + __func__, offset); + break; + badreg: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: incorrect register 0x%02" HWADDR_PRIx "\n", + __func__, offset); + } + + return 0; +} + +static void pxa2xx_timer_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + int i, tm = 0; + PXA2xxTimerInfo *s = (PXA2xxTimerInfo *) opaque; + + switch (offset) { + case OSMR3: tm ++; + /* fall through */ + case OSMR2: tm ++; + /* fall through */ + case OSMR1: tm ++; + /* fall through */ + case OSMR0: + s->timer[tm].value = value; + pxa2xx_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + break; + case OSMR11: tm ++; + /* fall through */ + case OSMR10: tm ++; + /* fall through */ + case OSMR9: tm ++; + /* fall through */ + case OSMR8: tm ++; + /* fall through */ + case OSMR7: tm ++; + /* fall through */ + case OSMR6: tm ++; + /* fall through */ + case OSMR5: tm ++; + /* fall through */ + case OSMR4: + if (!pxa2xx_timer_has_tm4(s)) + goto badreg; + s->tm4[tm].tm.value = value; + pxa2xx_timer_update4(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tm); + break; + case OSCR: + s->oldclock = s->clock; + s->lastload = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->clock = value; + pxa2xx_timer_update(s, s->lastload); + break; + case OSCR11: tm ++; + /* fall through */ + case OSCR10: tm ++; + /* fall through */ + case OSCR9: tm ++; + /* fall through */ + case OSCR8: tm ++; + /* fall through */ + case OSCR7: tm ++; + /* fall through */ + case OSCR6: tm ++; + /* fall through */ + case OSCR5: tm ++; + /* fall through */ + case OSCR4: + if (!pxa2xx_timer_has_tm4(s)) + goto badreg; + s->tm4[tm].oldclock = s->tm4[tm].clock; + s->tm4[tm].lastload = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->tm4[tm].clock = value; + pxa2xx_timer_update4(s, s->tm4[tm].lastload, tm); + break; + case OIER: + s->irq_enabled = value & 0xfff; + break; + case OSSR: /* Status register */ + value &= s->events; + s->events &= ~value; + for (i = 0; i < 4; i ++, value >>= 1) + if (value & 1) + qemu_irq_lower(s->timer[i].irq); + if (pxa2xx_timer_has_tm4(s) && !(s->events & 0xff0) && value) + qemu_irq_lower(s->irq4); + break; + case OWER: /* XXX: Reset on OSMR3 match? */ + s->reset3 = value; + break; + case OMCR7: tm ++; + /* fall through */ + case OMCR6: tm ++; + /* fall through */ + case OMCR5: tm ++; + /* fall through */ + case OMCR4: + if (!pxa2xx_timer_has_tm4(s)) + goto badreg; + s->tm4[tm].control = value & 0x0ff; + /* XXX Stop if running (shouldn't happen) */ + if ((value & (1 << 7)) || tm == 0) + s->tm4[tm].freq = pxa2xx_timer4_freq[value & 7]; + else { + s->tm4[tm].freq = 0; + pxa2xx_timer_update4(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tm); + } + break; + case OMCR11: tm ++; + /* fall through */ + case OMCR10: tm ++; + /* fall through */ + case OMCR9: tm ++; + /* fall through */ + case OMCR8: tm += 4; + if (!pxa2xx_timer_has_tm4(s)) + goto badreg; + s->tm4[tm].control = value & 0x3ff; + /* XXX Stop if running (shouldn't happen) */ + if ((value & (1 << 7)) || !(tm & 1)) + s->tm4[tm].freq = + pxa2xx_timer4_freq[(value & (1 << 8)) ? 0 : (value & 7)]; + else { + s->tm4[tm].freq = 0; + pxa2xx_timer_update4(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tm); + } + break; + default: + qemu_log_mask(LOG_UNIMP, + "%s: unknown register 0x%02" HWADDR_PRIx " " + "(value 0x%08" PRIx64 ")\n", __func__, offset, value); + break; + badreg: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: incorrect register 0x%02" HWADDR_PRIx " " + "(value 0x%08" PRIx64 ")\n", __func__, offset, value); + } +} + +static const MemoryRegionOps pxa2xx_timer_ops = { + .read = pxa2xx_timer_read, + .write = pxa2xx_timer_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void pxa2xx_timer_tick(void *opaque) +{ + PXA2xxTimer0 *t = (PXA2xxTimer0 *) opaque; + PXA2xxTimerInfo *i = t->info; + + if (i->irq_enabled & (1 << t->num)) { + i->events |= 1 << t->num; + qemu_irq_raise(t->irq); + } + + if (t->num == 3) + if (i->reset3 & 1) { + i->reset3 = 0; + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } +} + +static void pxa2xx_timer_tick4(void *opaque) +{ + PXA2xxTimer4 *t = (PXA2xxTimer4 *) opaque; + PXA2xxTimerInfo *i = (PXA2xxTimerInfo *) t->tm.info; + + pxa2xx_timer_tick(&t->tm); + if (t->control & (1 << 3)) + t->clock = 0; + if (t->control & (1 << 6)) + pxa2xx_timer_update4(i, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), t->tm.num - 4); + if (i->events & 0xff0) + qemu_irq_raise(i->irq4); +} + +static int pxa25x_timer_post_load(void *opaque, int version_id) +{ + PXA2xxTimerInfo *s = (PXA2xxTimerInfo *) opaque; + int64_t now; + int i; + + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + pxa2xx_timer_update(s, now); + + if (pxa2xx_timer_has_tm4(s)) + for (i = 0; i < 8; i ++) + pxa2xx_timer_update4(s, now, i); + + return 0; +} + +static void pxa2xx_timer_init(Object *obj) +{ + PXA2xxTimerInfo *s = PXA2XX_TIMER(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + + s->irq_enabled = 0; + s->oldclock = 0; + s->clock = 0; + s->lastload = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->reset3 = 0; + + memory_region_init_io(&s->iomem, obj, &pxa2xx_timer_ops, s, + "pxa2xx-timer", 0x00001000); + sysbus_init_mmio(dev, &s->iomem); +} + +static void pxa2xx_timer_realize(DeviceState *dev, Error **errp) +{ + PXA2xxTimerInfo *s = PXA2XX_TIMER(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i; + + for (i = 0; i < 4; i ++) { + s->timer[i].value = 0; + sysbus_init_irq(sbd, &s->timer[i].irq); + s->timer[i].info = s; + s->timer[i].num = i; + s->timer[i].qtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + pxa2xx_timer_tick, &s->timer[i]); + } + + if (s->flags & (1 << PXA2XX_TIMER_HAVE_TM4)) { + sysbus_init_irq(sbd, &s->irq4); + + for (i = 0; i < 8; i ++) { + s->tm4[i].tm.value = 0; + s->tm4[i].tm.info = s; + s->tm4[i].tm.num = i + 4; + s->tm4[i].freq = 0; + s->tm4[i].control = 0x0; + s->tm4[i].tm.qtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + pxa2xx_timer_tick4, &s->tm4[i]); + } + } +} + +static const VMStateDescription vmstate_pxa2xx_timer0_regs = { + .name = "pxa2xx_timer0", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32(value, PXA2xxTimer0), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_pxa2xx_timer4_regs = { + .name = "pxa2xx_timer4", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(tm, PXA2xxTimer4, 1, + vmstate_pxa2xx_timer0_regs, PXA2xxTimer0), + VMSTATE_INT32(oldclock, PXA2xxTimer4), + VMSTATE_INT32(clock, PXA2xxTimer4), + VMSTATE_UINT64(lastload, PXA2xxTimer4), + VMSTATE_UINT32(freq, PXA2xxTimer4), + VMSTATE_UINT32(control, PXA2xxTimer4), + VMSTATE_END_OF_LIST(), + }, +}; + +static bool pxa2xx_timer_has_tm4_test(void *opaque, int version_id) +{ + return pxa2xx_timer_has_tm4(opaque); +} + +static const VMStateDescription vmstate_pxa2xx_timer_regs = { + .name = "pxa2xx_timer", + .version_id = 1, + .minimum_version_id = 1, + .post_load = pxa25x_timer_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT32(clock, PXA2xxTimerInfo), + VMSTATE_INT32(oldclock, PXA2xxTimerInfo), + VMSTATE_UINT64(lastload, PXA2xxTimerInfo), + VMSTATE_STRUCT_ARRAY(timer, PXA2xxTimerInfo, 4, 1, + vmstate_pxa2xx_timer0_regs, PXA2xxTimer0), + VMSTATE_UINT32(events, PXA2xxTimerInfo), + VMSTATE_UINT32(irq_enabled, PXA2xxTimerInfo), + VMSTATE_UINT32(reset3, PXA2xxTimerInfo), + VMSTATE_UINT32(snapshot, PXA2xxTimerInfo), + VMSTATE_STRUCT_ARRAY_TEST(tm4, PXA2xxTimerInfo, 8, + pxa2xx_timer_has_tm4_test, 0, + vmstate_pxa2xx_timer4_regs, PXA2xxTimer4), + VMSTATE_END_OF_LIST(), + } +}; + +static Property pxa25x_timer_dev_properties[] = { + DEFINE_PROP_UINT32("freq", PXA2xxTimerInfo, freq, PXA25X_FREQ), + DEFINE_PROP_BIT("tm4", PXA2xxTimerInfo, flags, + PXA2XX_TIMER_HAVE_TM4, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pxa25x_timer_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PXA25x timer"; + device_class_set_props(dc, pxa25x_timer_dev_properties); +} + +static const TypeInfo pxa25x_timer_dev_info = { + .name = "pxa25x-timer", + .parent = TYPE_PXA2XX_TIMER, + .instance_size = sizeof(PXA2xxTimerInfo), + .class_init = pxa25x_timer_dev_class_init, +}; + +static Property pxa27x_timer_dev_properties[] = { + DEFINE_PROP_UINT32("freq", PXA2xxTimerInfo, freq, PXA27X_FREQ), + DEFINE_PROP_BIT("tm4", PXA2xxTimerInfo, flags, + PXA2XX_TIMER_HAVE_TM4, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pxa27x_timer_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "PXA27x timer"; + device_class_set_props(dc, pxa27x_timer_dev_properties); +} + +static const TypeInfo pxa27x_timer_dev_info = { + .name = "pxa27x-timer", + .parent = TYPE_PXA2XX_TIMER, + .instance_size = sizeof(PXA2xxTimerInfo), + .class_init = pxa27x_timer_dev_class_init, +}; + +static void pxa2xx_timer_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = pxa2xx_timer_realize; + dc->vmsd = &vmstate_pxa2xx_timer_regs; +} + +static const TypeInfo pxa2xx_timer_type_info = { + .name = TYPE_PXA2XX_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PXA2xxTimerInfo), + .instance_init = pxa2xx_timer_init, + .abstract = true, + .class_init = pxa2xx_timer_class_init, +}; + +static void pxa2xx_timer_register_types(void) +{ + type_register_static(&pxa2xx_timer_type_info); + type_register_static(&pxa25x_timer_dev_info); + type_register_static(&pxa27x_timer_dev_info); +} + +type_init(pxa2xx_timer_register_types) diff --git a/hw/timer/renesas_cmt.c b/hw/timer/renesas_cmt.c new file mode 100644 index 000000000..2e0fd21a3 --- /dev/null +++ b/hw/timer/renesas_cmt.c @@ -0,0 +1,283 @@ +/* + * Renesas 16bit Compare-match timer + * + * Datasheet: RX62N Group, RX621 Group User's Manual: Hardware + * (Rev.1.40 R01UH0033EJ0140) + * + * Copyright (c) 2019 Yoshinori Sato + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/registerfields.h" +#include "hw/qdev-properties.h" +#include "hw/timer/renesas_cmt.h" +#include "migration/vmstate.h" + +/* + * +0 CMSTR - common control + * +2 CMCR - ch0 + * +4 CMCNT - ch0 + * +6 CMCOR - ch0 + * +8 CMCR - ch1 + * +10 CMCNT - ch1 + * +12 CMCOR - ch1 + * If we think that the address of CH 0 has an offset of +2, + * we can treat it with the same address as CH 1, so define it like that. + */ +REG16(CMSTR, 0) + FIELD(CMSTR, STR0, 0, 1) + FIELD(CMSTR, STR1, 1, 1) + FIELD(CMSTR, STR, 0, 2) +/* This addeess is channel offset */ +REG16(CMCR, 0) + FIELD(CMCR, CKS, 0, 2) + FIELD(CMCR, CMIE, 6, 1) +REG16(CMCNT, 2) +REG16(CMCOR, 4) + +static void update_events(RCMTState *cmt, int ch) +{ + int64_t next_time; + + if ((cmt->cmstr & (1 << ch)) == 0) { + /* count disable, so not happened next event. */ + return ; + } + next_time = cmt->cmcor[ch] - cmt->cmcnt[ch]; + next_time *= NANOSECONDS_PER_SECOND; + next_time /= cmt->input_freq; + /* + * CKS -> div rate + * 0 -> 8 (1 << 3) + * 1 -> 32 (1 << 5) + * 2 -> 128 (1 << 7) + * 3 -> 512 (1 << 9) + */ + next_time *= 1 << (3 + FIELD_EX16(cmt->cmcr[ch], CMCR, CKS) * 2); + next_time += qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + timer_mod(&cmt->timer[ch], next_time); +} + +static int64_t read_cmcnt(RCMTState *cmt, int ch) +{ + int64_t delta, now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + if (cmt->cmstr & (1 << ch)) { + delta = (now - cmt->tick[ch]); + delta /= NANOSECONDS_PER_SECOND; + delta /= cmt->input_freq; + delta /= 1 << (3 + FIELD_EX16(cmt->cmcr[ch], CMCR, CKS) * 2); + cmt->tick[ch] = now; + return cmt->cmcnt[ch] + delta; + } else { + return cmt->cmcnt[ch]; + } +} + +static uint64_t cmt_read(void *opaque, hwaddr offset, unsigned size) +{ + RCMTState *cmt = opaque; + int ch = offset / 0x08; + uint64_t ret; + + if (offset == A_CMSTR) { + ret = 0; + ret = FIELD_DP16(ret, CMSTR, STR, + FIELD_EX16(cmt->cmstr, CMSTR, STR)); + return ret; + } else { + offset &= 0x07; + if (ch == 0) { + offset -= 0x02; + } + switch (offset) { + case A_CMCR: + ret = 0; + ret = FIELD_DP16(ret, CMCR, CKS, + FIELD_EX16(cmt->cmstr, CMCR, CKS)); + ret = FIELD_DP16(ret, CMCR, CMIE, + FIELD_EX16(cmt->cmstr, CMCR, CMIE)); + return ret; + case A_CMCNT: + return read_cmcnt(cmt, ch); + case A_CMCOR: + return cmt->cmcor[ch]; + } + } + qemu_log_mask(LOG_UNIMP, "renesas_cmt: Register 0x%" HWADDR_PRIX " " + "not implemented\n", + offset); + return UINT64_MAX; +} + +static void start_stop(RCMTState *cmt, int ch, int st) +{ + if (st) { + update_events(cmt, ch); + } else { + timer_del(&cmt->timer[ch]); + } +} + +static void cmt_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) +{ + RCMTState *cmt = opaque; + int ch = offset / 0x08; + + if (offset == A_CMSTR) { + cmt->cmstr = FIELD_EX16(val, CMSTR, STR); + start_stop(cmt, 0, FIELD_EX16(cmt->cmstr, CMSTR, STR0)); + start_stop(cmt, 1, FIELD_EX16(cmt->cmstr, CMSTR, STR1)); + } else { + offset &= 0x07; + if (ch == 0) { + offset -= 0x02; + } + switch (offset) { + case A_CMCR: + cmt->cmcr[ch] = FIELD_DP16(cmt->cmcr[ch], CMCR, CKS, + FIELD_EX16(val, CMCR, CKS)); + cmt->cmcr[ch] = FIELD_DP16(cmt->cmcr[ch], CMCR, CMIE, + FIELD_EX16(val, CMCR, CMIE)); + break; + case 2: + cmt->cmcnt[ch] = val; + break; + case 4: + cmt->cmcor[ch] = val; + break; + default: + qemu_log_mask(LOG_UNIMP, "renesas_cmt: Register 0x%" HWADDR_PRIX " " + "not implemented\n", + offset); + return; + } + if (FIELD_EX16(cmt->cmstr, CMSTR, STR) & (1 << ch)) { + update_events(cmt, ch); + } + } +} + +static const MemoryRegionOps cmt_ops = { + .write = cmt_write, + .read = cmt_read, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + .min_access_size = 2, + .max_access_size = 2, + }, + .valid = { + .min_access_size = 2, + .max_access_size = 2, + }, +}; + +static void timer_events(RCMTState *cmt, int ch) +{ + cmt->cmcnt[ch] = 0; + cmt->tick[ch] = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + update_events(cmt, ch); + if (FIELD_EX16(cmt->cmcr[ch], CMCR, CMIE)) { + qemu_irq_pulse(cmt->cmi[ch]); + } +} + +static void timer_event0(void *opaque) +{ + RCMTState *cmt = opaque; + + timer_events(cmt, 0); +} + +static void timer_event1(void *opaque) +{ + RCMTState *cmt = opaque; + + timer_events(cmt, 1); +} + +static void rcmt_reset(DeviceState *dev) +{ + RCMTState *cmt = RCMT(dev); + cmt->cmstr = 0; + cmt->cmcr[0] = cmt->cmcr[1] = 0; + cmt->cmcnt[0] = cmt->cmcnt[1] = 0; + cmt->cmcor[0] = cmt->cmcor[1] = 0xffff; +} + +static void rcmt_init(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + RCMTState *cmt = RCMT(obj); + int i; + + memory_region_init_io(&cmt->memory, OBJECT(cmt), &cmt_ops, + cmt, "renesas-cmt", 0x10); + sysbus_init_mmio(d, &cmt->memory); + + for (i = 0; i < ARRAY_SIZE(cmt->cmi); i++) { + sysbus_init_irq(d, &cmt->cmi[i]); + } + timer_init_ns(&cmt->timer[0], QEMU_CLOCK_VIRTUAL, timer_event0, cmt); + timer_init_ns(&cmt->timer[1], QEMU_CLOCK_VIRTUAL, timer_event1, cmt); +} + +static const VMStateDescription vmstate_rcmt = { + .name = "rx-cmt", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(cmstr, RCMTState), + VMSTATE_UINT16_ARRAY(cmcr, RCMTState, CMT_CH), + VMSTATE_UINT16_ARRAY(cmcnt, RCMTState, CMT_CH), + VMSTATE_UINT16_ARRAY(cmcor, RCMTState, CMT_CH), + VMSTATE_INT64_ARRAY(tick, RCMTState, CMT_CH), + VMSTATE_TIMER_ARRAY(timer, RCMTState, CMT_CH), + VMSTATE_END_OF_LIST() + } +}; + +static Property rcmt_properties[] = { + DEFINE_PROP_UINT64("input-freq", RCMTState, input_freq, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void rcmt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_rcmt; + dc->reset = rcmt_reset; + device_class_set_props(dc, rcmt_properties); +} + +static const TypeInfo rcmt_info = { + .name = TYPE_RENESAS_CMT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RCMTState), + .instance_init = rcmt_init, + .class_init = rcmt_class_init, +}; + +static void rcmt_register_types(void) +{ + type_register_static(&rcmt_info); +} + +type_init(rcmt_register_types) diff --git a/hw/timer/renesas_tmr.c b/hw/timer/renesas_tmr.c new file mode 100644 index 000000000..d96002e1e --- /dev/null +++ b/hw/timer/renesas_tmr.c @@ -0,0 +1,493 @@ +/* + * Renesas 8bit timer + * + * Datasheet: RX62N Group, RX621 Group User's Manual: Hardware + * (Rev.1.40 R01UH0033EJ0140) + * + * Copyright (c) 2019 Yoshinori Sato + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/registerfields.h" +#include "hw/qdev-properties.h" +#include "hw/timer/renesas_tmr.h" +#include "migration/vmstate.h" + +REG8(TCR, 0) + FIELD(TCR, CCLR, 3, 2) + FIELD(TCR, OVIE, 5, 1) + FIELD(TCR, CMIEA, 6, 1) + FIELD(TCR, CMIEB, 7, 1) +REG8(TCSR, 2) + FIELD(TCSR, OSA, 0, 2) + FIELD(TCSR, OSB, 2, 2) + FIELD(TCSR, ADTE, 4, 2) +REG8(TCORA, 4) +REG8(TCORB, 6) +REG8(TCNT, 8) +REG8(TCCR, 10) + FIELD(TCCR, CKS, 0, 3) + FIELD(TCCR, CSS, 3, 2) + FIELD(TCCR, TMRIS, 7, 1) + +#define CSS_EXTERNAL 0x00 +#define CSS_INTERNAL 0x01 +#define CSS_INVALID 0x02 +#define CSS_CASCADING 0x03 +#define CCLR_A 0x01 +#define CCLR_B 0x02 + +static const int clkdiv[] = {0, 1, 2, 8, 32, 64, 1024, 8192}; + +static uint8_t concat_reg(uint8_t *reg) +{ + return (reg[0] << 8) | reg[1]; +} + +static void update_events(RTMRState *tmr, int ch) +{ + uint16_t diff[TMR_NR_EVENTS], min; + int64_t next_time; + int i, event; + + if (tmr->tccr[ch] == 0) { + return ; + } + if (FIELD_EX8(tmr->tccr[ch], TCCR, CSS) == 0) { + /* external clock mode */ + /* event not happened */ + return ; + } + if (FIELD_EX8(tmr->tccr[0], TCCR, CSS) == CSS_CASCADING) { + /* cascading mode */ + if (ch == 1) { + tmr->next[ch] = none; + return ; + } + diff[cmia] = concat_reg(tmr->tcora) - concat_reg(tmr->tcnt); + diff[cmib] = concat_reg(tmr->tcorb) - concat_reg(tmr->tcnt); + diff[ovi] = 0x10000 - concat_reg(tmr->tcnt); + } else { + /* separate mode */ + diff[cmia] = tmr->tcora[ch] - tmr->tcnt[ch]; + diff[cmib] = tmr->tcorb[ch] - tmr->tcnt[ch]; + diff[ovi] = 0x100 - tmr->tcnt[ch]; + } + /* Search for the most recently occurring event. */ + for (event = 0, min = diff[0], i = 1; i < none; i++) { + if (min > diff[i]) { + event = i; + min = diff[i]; + } + } + tmr->next[ch] = event; + next_time = diff[event]; + next_time *= clkdiv[FIELD_EX8(tmr->tccr[ch], TCCR, CKS)]; + next_time *= NANOSECONDS_PER_SECOND; + next_time /= tmr->input_freq; + next_time += qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + timer_mod(&tmr->timer[ch], next_time); +} + +static int elapsed_time(RTMRState *tmr, int ch, int64_t delta) +{ + int divrate = clkdiv[FIELD_EX8(tmr->tccr[ch], TCCR, CKS)]; + int et; + + tmr->div_round[ch] += delta; + if (divrate > 0) { + et = tmr->div_round[ch] / divrate; + tmr->div_round[ch] %= divrate; + } else { + /* disble clock. so no update */ + et = 0; + } + return et; +} + +static uint16_t read_tcnt(RTMRState *tmr, unsigned size, int ch) +{ + int64_t delta, now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + int elapsed, ovf = 0; + uint16_t tcnt[2]; + uint32_t ret; + + delta = (now - tmr->tick) * NANOSECONDS_PER_SECOND / tmr->input_freq; + if (delta > 0) { + tmr->tick = now; + + switch (FIELD_EX8(tmr->tccr[1], TCCR, CSS)) { + case CSS_INTERNAL: + /* timer1 count update */ + elapsed = elapsed_time(tmr, 1, delta); + if (elapsed >= 0x100) { + ovf = elapsed >> 8; + } + tcnt[1] = tmr->tcnt[1] + (elapsed & 0xff); + break; + case CSS_INVALID: /* guest error to have set this */ + case CSS_EXTERNAL: /* QEMU doesn't implement these */ + case CSS_CASCADING: + tcnt[1] = tmr->tcnt[1]; + break; + default: + g_assert_not_reached(); + } + switch (FIELD_EX8(tmr->tccr[0], TCCR, CSS)) { + case CSS_INTERNAL: + elapsed = elapsed_time(tmr, 0, delta); + tcnt[0] = tmr->tcnt[0] + elapsed; + break; + case CSS_CASCADING: + tcnt[0] = tmr->tcnt[0] + ovf; + break; + case CSS_INVALID: /* guest error to have set this */ + case CSS_EXTERNAL: /* QEMU doesn't implement this */ + tcnt[0] = tmr->tcnt[0]; + break; + default: + g_assert_not_reached(); + } + } else { + tcnt[0] = tmr->tcnt[0]; + tcnt[1] = tmr->tcnt[1]; + } + if (size == 1) { + return tcnt[ch]; + } else { + ret = 0; + ret = deposit32(ret, 0, 8, tcnt[1]); + ret = deposit32(ret, 8, 8, tcnt[0]); + return ret; + } +} + +static uint8_t read_tccr(uint8_t r) +{ + uint8_t tccr = 0; + tccr = FIELD_DP8(tccr, TCCR, TMRIS, + FIELD_EX8(r, TCCR, TMRIS)); + tccr = FIELD_DP8(tccr, TCCR, CSS, + FIELD_EX8(r, TCCR, CSS)); + tccr = FIELD_DP8(tccr, TCCR, CKS, + FIELD_EX8(r, TCCR, CKS)); + return tccr; +} + +static uint64_t tmr_read(void *opaque, hwaddr addr, unsigned size) +{ + RTMRState *tmr = opaque; + int ch = addr & 1; + uint64_t ret; + + if (size == 2 && (ch != 0 || addr == A_TCR || addr == A_TCSR)) { + qemu_log_mask(LOG_GUEST_ERROR, "renesas_tmr: Invalid read size 0x%" + HWADDR_PRIX "\n", + addr); + return UINT64_MAX; + } + switch (addr & 0x0e) { + case A_TCR: + ret = 0; + ret = FIELD_DP8(ret, TCR, CCLR, + FIELD_EX8(tmr->tcr[ch], TCR, CCLR)); + ret = FIELD_DP8(ret, TCR, OVIE, + FIELD_EX8(tmr->tcr[ch], TCR, OVIE)); + ret = FIELD_DP8(ret, TCR, CMIEA, + FIELD_EX8(tmr->tcr[ch], TCR, CMIEA)); + ret = FIELD_DP8(ret, TCR, CMIEB, + FIELD_EX8(tmr->tcr[ch], TCR, CMIEB)); + return ret; + case A_TCSR: + ret = 0; + ret = FIELD_DP8(ret, TCSR, OSA, + FIELD_EX8(tmr->tcsr[ch], TCSR, OSA)); + ret = FIELD_DP8(ret, TCSR, OSB, + FIELD_EX8(tmr->tcsr[ch], TCSR, OSB)); + switch (ch) { + case 0: + ret = FIELD_DP8(ret, TCSR, ADTE, + FIELD_EX8(tmr->tcsr[ch], TCSR, ADTE)); + break; + case 1: /* CH1 ADTE unimplement always 1 */ + ret = FIELD_DP8(ret, TCSR, ADTE, 1); + break; + } + return ret; + case A_TCORA: + if (size == 1) { + return tmr->tcora[ch]; + } else if (ch == 0) { + return concat_reg(tmr->tcora); + } + /* fall through */ + case A_TCORB: + if (size == 1) { + return tmr->tcorb[ch]; + } else { + return concat_reg(tmr->tcorb); + } + case A_TCNT: + return read_tcnt(tmr, size, ch); + case A_TCCR: + if (size == 1) { + return read_tccr(tmr->tccr[ch]); + } else { + return read_tccr(tmr->tccr[0]) << 8 | read_tccr(tmr->tccr[1]); + } + default: + qemu_log_mask(LOG_UNIMP, "renesas_tmr: Register 0x%" HWADDR_PRIX + " not implemented\n", + addr); + break; + } + return UINT64_MAX; +} + +static void tmr_write_count(RTMRState *tmr, int ch, unsigned size, + uint8_t *reg, uint64_t val) +{ + if (size == 1) { + reg[ch] = val; + update_events(tmr, ch); + } else { + reg[0] = extract32(val, 8, 8); + reg[1] = extract32(val, 0, 8); + update_events(tmr, 0); + update_events(tmr, 1); + } +} + +static void tmr_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + RTMRState *tmr = opaque; + int ch = addr & 1; + + if (size == 2 && (ch != 0 || addr == A_TCR || addr == A_TCSR)) { + qemu_log_mask(LOG_GUEST_ERROR, + "renesas_tmr: Invalid write size 0x%" HWADDR_PRIX "\n", + addr); + return; + } + switch (addr & 0x0e) { + case A_TCR: + tmr->tcr[ch] = val; + break; + case A_TCSR: + tmr->tcsr[ch] = val; + break; + case A_TCORA: + tmr_write_count(tmr, ch, size, tmr->tcora, val); + break; + case A_TCORB: + tmr_write_count(tmr, ch, size, tmr->tcorb, val); + break; + case A_TCNT: + tmr_write_count(tmr, ch, size, tmr->tcnt, val); + break; + case A_TCCR: + tmr_write_count(tmr, ch, size, tmr->tccr, val); + break; + default: + qemu_log_mask(LOG_UNIMP, "renesas_tmr: Register 0x%" HWADDR_PRIX + " not implemented\n", + addr); + break; + } +} + +static const MemoryRegionOps tmr_ops = { + .write = tmr_write, + .read = tmr_read, + .endianness = DEVICE_LITTLE_ENDIAN, + .impl = { + .min_access_size = 1, + .max_access_size = 2, + }, + .valid = { + .min_access_size = 1, + .max_access_size = 2, + }, +}; + +static void timer_events(RTMRState *tmr, int ch); + +static uint16_t issue_event(RTMRState *tmr, int ch, int sz, + uint16_t tcnt, uint16_t tcora, uint16_t tcorb) +{ + uint16_t ret = tcnt; + + switch (tmr->next[ch]) { + case none: + break; + case cmia: + if (tcnt >= tcora) { + if (FIELD_EX8(tmr->tcr[ch], TCR, CCLR) == CCLR_A) { + ret = tcnt - tcora; + } + if (FIELD_EX8(tmr->tcr[ch], TCR, CMIEA)) { + qemu_irq_pulse(tmr->cmia[ch]); + } + if (sz == 8 && ch == 0 && + FIELD_EX8(tmr->tccr[1], TCCR, CSS) == CSS_CASCADING) { + tmr->tcnt[1]++; + timer_events(tmr, 1); + } + } + break; + case cmib: + if (tcnt >= tcorb) { + if (FIELD_EX8(tmr->tcr[ch], TCR, CCLR) == CCLR_B) { + ret = tcnt - tcorb; + } + if (FIELD_EX8(tmr->tcr[ch], TCR, CMIEB)) { + qemu_irq_pulse(tmr->cmib[ch]); + } + } + break; + case ovi: + if ((tcnt >= (1 << sz)) && FIELD_EX8(tmr->tcr[ch], TCR, OVIE)) { + qemu_irq_pulse(tmr->ovi[ch]); + } + break; + default: + g_assert_not_reached(); + } + return ret; +} + +static void timer_events(RTMRState *tmr, int ch) +{ + uint16_t tcnt; + + tmr->tcnt[ch] = read_tcnt(tmr, 1, ch); + if (FIELD_EX8(tmr->tccr[0], TCCR, CSS) != CSS_CASCADING) { + tmr->tcnt[ch] = issue_event(tmr, ch, 8, + tmr->tcnt[ch], + tmr->tcora[ch], + tmr->tcorb[ch]) & 0xff; + } else { + if (ch == 1) { + return ; + } + tcnt = issue_event(tmr, ch, 16, + concat_reg(tmr->tcnt), + concat_reg(tmr->tcora), + concat_reg(tmr->tcorb)); + tmr->tcnt[0] = (tcnt >> 8) & 0xff; + tmr->tcnt[1] = tcnt & 0xff; + } + update_events(tmr, ch); +} + +static void timer_event0(void *opaque) +{ + RTMRState *tmr = opaque; + + timer_events(tmr, 0); +} + +static void timer_event1(void *opaque) +{ + RTMRState *tmr = opaque; + + timer_events(tmr, 1); +} + +static void rtmr_reset(DeviceState *dev) +{ + RTMRState *tmr = RTMR(dev); + tmr->tcr[0] = tmr->tcr[1] = 0x00; + tmr->tcsr[0] = 0x00; + tmr->tcsr[1] = 0x10; + tmr->tcnt[0] = tmr->tcnt[1] = 0x00; + tmr->tcora[0] = tmr->tcora[1] = 0xff; + tmr->tcorb[0] = tmr->tcorb[1] = 0xff; + tmr->tccr[0] = tmr->tccr[1] = 0x00; + tmr->next[0] = tmr->next[1] = none; + tmr->tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +} + +static void rtmr_init(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + RTMRState *tmr = RTMR(obj); + int i; + + memory_region_init_io(&tmr->memory, OBJECT(tmr), &tmr_ops, + tmr, "renesas-tmr", 0x10); + sysbus_init_mmio(d, &tmr->memory); + + for (i = 0; i < ARRAY_SIZE(tmr->ovi); i++) { + sysbus_init_irq(d, &tmr->cmia[i]); + sysbus_init_irq(d, &tmr->cmib[i]); + sysbus_init_irq(d, &tmr->ovi[i]); + } + timer_init_ns(&tmr->timer[0], QEMU_CLOCK_VIRTUAL, timer_event0, tmr); + timer_init_ns(&tmr->timer[1], QEMU_CLOCK_VIRTUAL, timer_event1, tmr); +} + +static const VMStateDescription vmstate_rtmr = { + .name = "rx-tmr", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT64(tick, RTMRState), + VMSTATE_UINT8_ARRAY(tcnt, RTMRState, TMR_CH), + VMSTATE_UINT8_ARRAY(tcora, RTMRState, TMR_CH), + VMSTATE_UINT8_ARRAY(tcorb, RTMRState, TMR_CH), + VMSTATE_UINT8_ARRAY(tcr, RTMRState, TMR_CH), + VMSTATE_UINT8_ARRAY(tccr, RTMRState, TMR_CH), + VMSTATE_UINT8_ARRAY(tcor, RTMRState, TMR_CH), + VMSTATE_UINT8_ARRAY(tcsr, RTMRState, TMR_CH), + VMSTATE_INT64_ARRAY(div_round, RTMRState, TMR_CH), + VMSTATE_UINT8_ARRAY(next, RTMRState, TMR_CH), + VMSTATE_TIMER_ARRAY(timer, RTMRState, TMR_CH), + VMSTATE_END_OF_LIST() + } +}; + +static Property rtmr_properties[] = { + DEFINE_PROP_UINT64("input-freq", RTMRState, input_freq, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void rtmr_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_rtmr; + dc->reset = rtmr_reset; + device_class_set_props(dc, rtmr_properties); +} + +static const TypeInfo rtmr_info = { + .name = TYPE_RENESAS_TMR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RTMRState), + .instance_init = rtmr_init, + .class_init = rtmr_class_init, +}; + +static void rtmr_register_types(void) +{ + type_register_static(&rtmr_info); +} + +type_init(rtmr_register_types) diff --git a/hw/timer/sh_timer.c b/hw/timer/sh_timer.c new file mode 100644 index 000000000..c72c327bf --- /dev/null +++ b/hw/timer/sh_timer.c @@ -0,0 +1,373 @@ +/* + * SuperH Timer modules. + * + * Copyright (c) 2007 Magnus Damm + * Based on arm_timer.c by Paul Brook + * Copyright (c) 2005-2006 CodeSourcery. + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "exec/memory.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/sh4/sh.h" +#include "hw/timer/tmu012.h" +#include "hw/ptimer.h" +#include "trace.h" + +#define TIMER_TCR_TPSC (7 << 0) +#define TIMER_TCR_CKEG (3 << 3) +#define TIMER_TCR_UNIE (1 << 5) +#define TIMER_TCR_ICPE (3 << 6) +#define TIMER_TCR_UNF (1 << 8) +#define TIMER_TCR_ICPF (1 << 9) +#define TIMER_TCR_RESERVED (0x3f << 10) + +#define TIMER_FEAT_CAPT (1 << 0) +#define TIMER_FEAT_EXTCLK (1 << 1) + +#define OFFSET_TCOR 0 +#define OFFSET_TCNT 1 +#define OFFSET_TCR 2 +#define OFFSET_TCPR 3 + +typedef struct { + ptimer_state *timer; + uint32_t tcnt; + uint32_t tcor; + uint32_t tcr; + uint32_t tcpr; + int freq; + int int_level; + int old_level; + int feat; + int enabled; + qemu_irq irq; +} SHTimerState; + +/* Check all active timers, and schedule the next timer interrupt. */ + +static void sh_timer_update(SHTimerState *s) +{ + int new_level = s->int_level && (s->tcr & TIMER_TCR_UNIE); + + if (new_level != s->old_level) { + qemu_set_irq(s->irq, new_level); + } + s->old_level = s->int_level; + s->int_level = new_level; +} + +static uint32_t sh_timer_read(void *opaque, hwaddr offset) +{ + SHTimerState *s = opaque; + + switch (offset >> 2) { + case OFFSET_TCOR: + return s->tcor; + case OFFSET_TCNT: + return ptimer_get_count(s->timer); + case OFFSET_TCR: + return s->tcr | (s->int_level ? TIMER_TCR_UNF : 0); + case OFFSET_TCPR: + if (s->feat & TIMER_FEAT_CAPT) { + return s->tcpr; + } + } + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return 0; +} + +static void sh_timer_write(void *opaque, hwaddr offset, uint32_t value) +{ + SHTimerState *s = opaque; + int freq; + + switch (offset >> 2) { + case OFFSET_TCOR: + s->tcor = value; + ptimer_transaction_begin(s->timer); + ptimer_set_limit(s->timer, s->tcor, 0); + ptimer_transaction_commit(s->timer); + break; + case OFFSET_TCNT: + s->tcnt = value; + ptimer_transaction_begin(s->timer); + ptimer_set_count(s->timer, s->tcnt); + ptimer_transaction_commit(s->timer); + break; + case OFFSET_TCR: + ptimer_transaction_begin(s->timer); + if (s->enabled) { + /* + * Pause the timer if it is running. This may cause some inaccuracy + * due to rounding, but avoids a whole lot of other messiness + */ + ptimer_stop(s->timer); + } + freq = s->freq; + /* ??? Need to recalculate expiry time after changing divisor. */ + switch (value & TIMER_TCR_TPSC) { + case 0: + freq >>= 2; + break; + case 1: + freq >>= 4; + break; + case 2: + freq >>= 6; + break; + case 3: + freq >>= 8; + break; + case 4: + freq >>= 10; + break; + case 6: + case 7: + if (s->feat & TIMER_FEAT_EXTCLK) { + break; + } + /* fallthrough */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved TPSC value\n", __func__); + } + switch ((value & TIMER_TCR_CKEG) >> 3) { + case 0: + break; + case 1: + case 2: + case 3: + if (s->feat & TIMER_FEAT_EXTCLK) { + break; + } + /* fallthrough */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved CKEG value\n", __func__); + } + switch ((value & TIMER_TCR_ICPE) >> 6) { + case 0: + break; + case 2: + case 3: + if (s->feat & TIMER_FEAT_CAPT) { + break; + } + /* fallthrough */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved ICPE value\n", __func__); + } + if ((value & TIMER_TCR_UNF) == 0) { + s->int_level = 0; + } + + value &= ~TIMER_TCR_UNF; + + if ((value & TIMER_TCR_ICPF) && (!(s->feat & TIMER_FEAT_CAPT))) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved ICPF value\n", __func__); + } + + value &= ~TIMER_TCR_ICPF; /* capture not supported */ + + if (value & TIMER_TCR_RESERVED) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved TCR bits set\n", __func__); + } + s->tcr = value; + ptimer_set_limit(s->timer, s->tcor, 0); + ptimer_set_freq(s->timer, freq); + if (s->enabled) { + /* Restart the timer if still enabled. */ + ptimer_run(s->timer, 0); + } + ptimer_transaction_commit(s->timer); + break; + case OFFSET_TCPR: + if (s->feat & TIMER_FEAT_CAPT) { + s->tcpr = value; + break; + } + /* fallthrough */ + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); + } + sh_timer_update(s); +} + +static void sh_timer_start_stop(void *opaque, int enable) +{ + SHTimerState *s = opaque; + + trace_sh_timer_start_stop(enable, s->enabled); + ptimer_transaction_begin(s->timer); + if (s->enabled && !enable) { + ptimer_stop(s->timer); + } + if (!s->enabled && enable) { + ptimer_run(s->timer, 0); + } + ptimer_transaction_commit(s->timer); + s->enabled = !!enable; +} + +static void sh_timer_tick(void *opaque) +{ + SHTimerState *s = opaque; + s->int_level = s->enabled; + sh_timer_update(s); +} + +static void *sh_timer_init(uint32_t freq, int feat, qemu_irq irq) +{ + SHTimerState *s; + + s = g_malloc0(sizeof(*s)); + s->freq = freq; + s->feat = feat; + s->tcor = 0xffffffff; + s->tcnt = 0xffffffff; + s->tcpr = 0xdeadbeef; + s->tcr = 0; + s->enabled = 0; + s->irq = irq; + + s->timer = ptimer_init(sh_timer_tick, s, PTIMER_POLICY_DEFAULT); + + sh_timer_write(s, OFFSET_TCOR >> 2, s->tcor); + sh_timer_write(s, OFFSET_TCNT >> 2, s->tcnt); + sh_timer_write(s, OFFSET_TCPR >> 2, s->tcpr); + sh_timer_write(s, OFFSET_TCR >> 2, s->tcpr); + /* ??? Save/restore. */ + return s; +} + +typedef struct { + MemoryRegion iomem; + MemoryRegion iomem_p4; + MemoryRegion iomem_a7; + void *timer[3]; + int level[3]; + uint32_t tocr; + uint32_t tstr; + int feat; +} tmu012_state; + +static uint64_t tmu012_read(void *opaque, hwaddr offset, unsigned size) +{ + tmu012_state *s = opaque; + + trace_sh_timer_read(offset); + if (offset >= 0x20) { + if (!(s->feat & TMU012_FEAT_3CHAN)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad channel offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + return sh_timer_read(s->timer[2], offset - 0x20); + } + + if (offset >= 0x14) { + return sh_timer_read(s->timer[1], offset - 0x14); + } + if (offset >= 0x08) { + return sh_timer_read(s->timer[0], offset - 0x08); + } + if (offset == 4) { + return s->tstr; + } + if ((s->feat & TMU012_FEAT_TOCR) && offset == 0) { + return s->tocr; + } + + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); + return 0; +} + +static void tmu012_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + tmu012_state *s = opaque; + + trace_sh_timer_write(offset, value); + if (offset >= 0x20) { + if (!(s->feat & TMU012_FEAT_3CHAN)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad channel offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + sh_timer_write(s->timer[2], offset - 0x20, value); + return; + } + + if (offset >= 0x14) { + sh_timer_write(s->timer[1], offset - 0x14, value); + return; + } + + if (offset >= 0x08) { + sh_timer_write(s->timer[0], offset - 0x08, value); + return; + } + + if (offset == 4) { + sh_timer_start_stop(s->timer[0], value & (1 << 0)); + sh_timer_start_stop(s->timer[1], value & (1 << 1)); + if (s->feat & TMU012_FEAT_3CHAN) { + sh_timer_start_stop(s->timer[2], value & (1 << 2)); + } else { + if (value & (1 << 2)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad channel\n", __func__); + } + } + + s->tstr = value; + return; + } + + if ((s->feat & TMU012_FEAT_TOCR) && offset == 0) { + s->tocr = value & (1 << 0); + } +} + +static const MemoryRegionOps tmu012_ops = { + .read = tmu012_read, + .write = tmu012_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +void tmu012_init(MemoryRegion *sysmem, hwaddr base, int feat, uint32_t freq, + qemu_irq ch0_irq, qemu_irq ch1_irq, + qemu_irq ch2_irq0, qemu_irq ch2_irq1) +{ + tmu012_state *s; + int timer_feat = (feat & TMU012_FEAT_EXTCLK) ? TIMER_FEAT_EXTCLK : 0; + + s = g_malloc0(sizeof(*s)); + s->feat = feat; + s->timer[0] = sh_timer_init(freq, timer_feat, ch0_irq); + s->timer[1] = sh_timer_init(freq, timer_feat, ch1_irq); + if (feat & TMU012_FEAT_3CHAN) { + s->timer[2] = sh_timer_init(freq, timer_feat | TIMER_FEAT_CAPT, + ch2_irq0); /* ch2_irq1 not supported */ + } + + memory_region_init_io(&s->iomem, NULL, &tmu012_ops, s, "timer", 0x30); + + memory_region_init_alias(&s->iomem_p4, NULL, "timer-p4", + &s->iomem, 0, memory_region_size(&s->iomem)); + memory_region_add_subregion(sysmem, P4ADDR(base), &s->iomem_p4); + + memory_region_init_alias(&s->iomem_a7, NULL, "timer-a7", + &s->iomem, 0, memory_region_size(&s->iomem)); + memory_region_add_subregion(sysmem, A7ADDR(base), &s->iomem_a7); + /* ??? Save/restore. */ +} diff --git a/hw/timer/sifive_pwm.c b/hw/timer/sifive_pwm.c new file mode 100644 index 000000000..c664480cc --- /dev/null +++ b/hw/timer/sifive_pwm.c @@ -0,0 +1,468 @@ +/* + * SiFive PWM + * + * Copyright (c) 2020 Western Digital + * + * Author: Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "trace.h" +#include "hw/irq.h" +#include "hw/timer/sifive_pwm.h" +#include "hw/qdev-properties.h" +#include "hw/registerfields.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#define HAS_PWM_EN_BITS(cfg) ((cfg & R_CONFIG_ENONESHOT_MASK) || \ + (cfg & R_CONFIG_ENALWAYS_MASK)) + +#define PWMCMP_MASK 0xFFFF +#define PWMCOUNT_MASK 0x7FFFFFFF + +REG32(CONFIG, 0x00) + FIELD(CONFIG, SCALE, 0, 4) + FIELD(CONFIG, STICKY, 8, 1) + FIELD(CONFIG, ZEROCMP, 9, 1) + FIELD(CONFIG, DEGLITCH, 10, 1) + FIELD(CONFIG, ENALWAYS, 12, 1) + FIELD(CONFIG, ENONESHOT, 13, 1) + FIELD(CONFIG, CMP0CENTER, 16, 1) + FIELD(CONFIG, CMP1CENTER, 17, 1) + FIELD(CONFIG, CMP2CENTER, 18, 1) + FIELD(CONFIG, CMP3CENTER, 19, 1) + FIELD(CONFIG, CMP0GANG, 24, 1) + FIELD(CONFIG, CMP1GANG, 25, 1) + FIELD(CONFIG, CMP2GANG, 26, 1) + FIELD(CONFIG, CMP3GANG, 27, 1) + FIELD(CONFIG, CMP0IP, 28, 1) + FIELD(CONFIG, CMP1IP, 29, 1) + FIELD(CONFIG, CMP2IP, 30, 1) + FIELD(CONFIG, CMP3IP, 31, 1) +REG32(COUNT, 0x08) +REG32(PWMS, 0x10) +REG32(PWMCMP0, 0x20) +REG32(PWMCMP1, 0x24) +REG32(PWMCMP2, 0x28) +REG32(PWMCMP3, 0x2C) + +static inline uint64_t sifive_pwm_ns_to_ticks(SiFivePwmState *s, + uint64_t time) +{ + return muldiv64(time, s->freq_hz, NANOSECONDS_PER_SECOND); +} + +static inline uint64_t sifive_pwm_ticks_to_ns(SiFivePwmState *s, + uint64_t ticks) +{ + return muldiv64(ticks, NANOSECONDS_PER_SECOND, s->freq_hz); +} + +static inline uint64_t sifive_pwm_compute_scale(SiFivePwmState *s) +{ + return s->pwmcfg & R_CONFIG_SCALE_MASK; +} + +static void sifive_pwm_set_alarms(SiFivePwmState *s) +{ + uint64_t now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + if (HAS_PWM_EN_BITS(s->pwmcfg)) { + /* + * Subtract ticks from number of ticks when the timer was zero + * and mask to the register width. + */ + uint64_t pwmcount = (sifive_pwm_ns_to_ticks(s, now_ns) - + s->tick_offset) & PWMCOUNT_MASK; + uint64_t scale = sifive_pwm_compute_scale(s); + /* PWMs only contains PWMCMP_MASK bits starting at scale */ + uint64_t pwms = (pwmcount & (PWMCMP_MASK << scale)) >> scale; + + for (int i = 0; i < SIFIVE_PWM_CHANS; i++) { + uint64_t pwmcmp = s->pwmcmp[i] & PWMCMP_MASK; + uint64_t pwmcmp_ticks = pwmcmp << scale; + + /* + * Per circuit diagram and spec, both cases raises corresponding + * IP bit one clock cycle after time expires. + */ + if (pwmcmp > pwms) { + uint64_t offset = pwmcmp_ticks - pwmcount + 1; + uint64_t when_to_fire = now_ns + + sifive_pwm_ticks_to_ns(s, offset); + + trace_sifive_pwm_set_alarm(when_to_fire, now_ns); + timer_mod(&s->timer[i], when_to_fire); + } else { + /* Schedule interrupt for next cycle */ + trace_sifive_pwm_set_alarm(now_ns + 1, now_ns); + timer_mod(&s->timer[i], now_ns + 1); + } + + } + } else { + /* + * If timer incrementing disabled, just do pwms > pwmcmp check since + * a write may have happened to PWMs. + */ + uint64_t pwmcount = (s->tick_offset) & PWMCOUNT_MASK; + uint64_t scale = sifive_pwm_compute_scale(s); + uint64_t pwms = (pwmcount & (PWMCMP_MASK << scale)) >> scale; + + for (int i = 0; i < SIFIVE_PWM_CHANS; i++) { + uint64_t pwmcmp = s->pwmcmp[i] & PWMCMP_MASK; + + if (pwms >= pwmcmp) { + trace_sifive_pwm_set_alarm(now_ns + 1, now_ns); + timer_mod(&s->timer[i], now_ns + 1); + } else { + /* Effectively disable timer by scheduling far in future. */ + trace_sifive_pwm_set_alarm(0xFFFFFFFFFFFFFF, now_ns); + timer_mod(&s->timer[i], 0xFFFFFFFFFFFFFF); + } + } + } +} + +static void sifive_pwm_interrupt(SiFivePwmState *s, int num) +{ + uint64_t now = sifive_pwm_ns_to_ticks(s, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + bool was_incrementing = HAS_PWM_EN_BITS(s->pwmcfg); + + trace_sifive_pwm_interrupt(num); + + s->pwmcfg |= R_CONFIG_CMP0IP_MASK << num; + qemu_irq_raise(s->irqs[num]); + + /* + * If the zerocmp is set and pwmcmp0 raised the interrupt + * reset the zero ticks. + */ + if ((s->pwmcfg & R_CONFIG_ZEROCMP_MASK) && (num == 0)) { + /* If reset signal conditions, disable ENONESHOT. */ + s->pwmcfg &= ~R_CONFIG_ENONESHOT_MASK; + + if (was_incrementing) { + /* If incrementing, time in ticks is when pwmcount is zero */ + s->tick_offset = now; + } else { + /* If not incrementing, pwmcount = 0 */ + s->tick_offset = 0; + } + } + + /* + * If carryout bit set, which we discern via looking for overflow, + * also reset ENONESHOT. + */ + if (was_incrementing && + ((now & PWMCOUNT_MASK) < (s->tick_offset & PWMCOUNT_MASK))) { + s->pwmcfg &= ~R_CONFIG_ENONESHOT_MASK; + } + + /* Schedule or disable interrupts */ + sifive_pwm_set_alarms(s); + + /* If was enabled, and now not enabled, switch tick rep */ + if (was_incrementing && !HAS_PWM_EN_BITS(s->pwmcfg)) { + s->tick_offset = (now - s->tick_offset) & PWMCOUNT_MASK; + } +} + +static void sifive_pwm_interrupt_0(void *opaque) +{ + SiFivePwmState *s = opaque; + + sifive_pwm_interrupt(s, 0); +} + +static void sifive_pwm_interrupt_1(void *opaque) +{ + SiFivePwmState *s = opaque; + + sifive_pwm_interrupt(s, 1); +} + +static void sifive_pwm_interrupt_2(void *opaque) +{ + SiFivePwmState *s = opaque; + + sifive_pwm_interrupt(s, 2); +} + +static void sifive_pwm_interrupt_3(void *opaque) +{ + SiFivePwmState *s = opaque; + + sifive_pwm_interrupt(s, 3); +} + +static uint64_t sifive_pwm_read(void *opaque, hwaddr addr, + unsigned int size) +{ + SiFivePwmState *s = opaque; + uint64_t cur_time, scale; + uint64_t now = sifive_pwm_ns_to_ticks(s, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + + trace_sifive_pwm_read(addr); + + switch (addr) { + case A_CONFIG: + return s->pwmcfg; + case A_COUNT: + cur_time = s->tick_offset; + + if (HAS_PWM_EN_BITS(s->pwmcfg)) { + cur_time = now - cur_time; + } + + /* + * Return the value in the counter with bit 31 always 0 + * This is allowed to wrap around so we don't need to check that. + */ + return cur_time & PWMCOUNT_MASK; + case A_PWMS: + cur_time = s->tick_offset; + scale = sifive_pwm_compute_scale(s); + + if (HAS_PWM_EN_BITS(s->pwmcfg)) { + cur_time = now - cur_time; + } + + return ((cur_time & PWMCOUNT_MASK) >> scale) & PWMCMP_MASK; + case A_PWMCMP0: + return s->pwmcmp[0] & PWMCMP_MASK; + case A_PWMCMP1: + return s->pwmcmp[1] & PWMCMP_MASK; + case A_PWMCMP2: + return s->pwmcmp[2] & PWMCMP_MASK; + case A_PWMCMP3: + return s->pwmcmp[3] & PWMCMP_MASK; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + return 0; + } + + return 0; +} + +static void sifive_pwm_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + SiFivePwmState *s = opaque; + uint32_t value = val64; + uint64_t new_offset, scale; + uint64_t now = sifive_pwm_ns_to_ticks(s, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + + trace_sifive_pwm_write(value, addr); + + switch (addr) { + case A_CONFIG: + if (value & (R_CONFIG_CMP0CENTER_MASK | R_CONFIG_CMP1CENTER_MASK | + R_CONFIG_CMP2CENTER_MASK | R_CONFIG_CMP3CENTER_MASK)) { + qemu_log_mask(LOG_UNIMP, "%s: CMPxCENTER is not supported\n", + __func__); + } + + if (value & (R_CONFIG_CMP0GANG_MASK | R_CONFIG_CMP1GANG_MASK | + R_CONFIG_CMP2GANG_MASK | R_CONFIG_CMP3GANG_MASK)) { + qemu_log_mask(LOG_UNIMP, "%s: CMPxGANG is not supported\n", + __func__); + } + + if (value & (R_CONFIG_CMP0IP_MASK | R_CONFIG_CMP1IP_MASK | + R_CONFIG_CMP2IP_MASK | R_CONFIG_CMP3IP_MASK)) { + qemu_log_mask(LOG_UNIMP, "%s: CMPxIP is not supported\n", + __func__); + } + + if (!(value & R_CONFIG_CMP0IP_MASK)) { + qemu_irq_lower(s->irqs[0]); + } + + if (!(value & R_CONFIG_CMP1IP_MASK)) { + qemu_irq_lower(s->irqs[1]); + } + + if (!(value & R_CONFIG_CMP2IP_MASK)) { + qemu_irq_lower(s->irqs[2]); + } + + if (!(value & R_CONFIG_CMP3IP_MASK)) { + qemu_irq_lower(s->irqs[3]); + } + + /* + * If this write enables the timer increment + * set the time when pwmcount was zero to be cur_time - pwmcount. + * If this write disables the timer increment + * convert back from pwmcount to the time in ticks + * when pwmcount was zero. + */ + if ((!HAS_PWM_EN_BITS(s->pwmcfg) && HAS_PWM_EN_BITS(value)) || + (HAS_PWM_EN_BITS(s->pwmcfg) && !HAS_PWM_EN_BITS(value))) { + s->tick_offset = (now - s->tick_offset) & PWMCOUNT_MASK; + } + + s->pwmcfg = value; + break; + case A_COUNT: + /* The guest changed the counter, updated the offset value. */ + new_offset = value; + + if (HAS_PWM_EN_BITS(s->pwmcfg)) { + new_offset = now - new_offset; + } + + s->tick_offset = new_offset; + break; + case A_PWMS: + scale = sifive_pwm_compute_scale(s); + new_offset = (((value & PWMCMP_MASK) << scale) & PWMCOUNT_MASK); + + if (HAS_PWM_EN_BITS(s->pwmcfg)) { + new_offset = now - new_offset; + } + + s->tick_offset = new_offset; + break; + case A_PWMCMP0: + s->pwmcmp[0] = value & PWMCMP_MASK; + break; + case A_PWMCMP1: + s->pwmcmp[1] = value & PWMCMP_MASK; + break; + case A_PWMCMP2: + s->pwmcmp[2] = value & PWMCMP_MASK; + break; + case A_PWMCMP3: + s->pwmcmp[3] = value & PWMCMP_MASK; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr); + } + + /* Update the alarms to reflect possible updated values */ + sifive_pwm_set_alarms(s); +} + +static void sifive_pwm_reset(DeviceState *dev) +{ + SiFivePwmState *s = SIFIVE_PWM(dev); + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + s->pwmcfg = 0x00000000; + s->pwmcmp[0] = 0x00000000; + s->pwmcmp[1] = 0x00000000; + s->pwmcmp[2] = 0x00000000; + s->pwmcmp[3] = 0x00000000; + + s->tick_offset = sifive_pwm_ns_to_ticks(s, now); +} + +static const MemoryRegionOps sifive_pwm_ops = { + .read = sifive_pwm_read, + .write = sifive_pwm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_sifive_pwm = { + .name = TYPE_SIFIVE_PWM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_TIMER_ARRAY(timer, SiFivePwmState, 4), + VMSTATE_UINT64(tick_offset, SiFivePwmState), + VMSTATE_UINT32(pwmcfg, SiFivePwmState), + VMSTATE_UINT32_ARRAY(pwmcmp, SiFivePwmState, 4), + VMSTATE_END_OF_LIST() + } +}; + +static Property sifive_pwm_properties[] = { + /* 0.5Ghz per spec after FSBL */ + DEFINE_PROP_UINT64("clock-frequency", struct SiFivePwmState, + freq_hz, 500000000ULL), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sifive_pwm_init(Object *obj) +{ + SiFivePwmState *s = SIFIVE_PWM(obj); + int i; + + for (i = 0; i < SIFIVE_PWM_IRQS; i++) { + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irqs[i]); + } + + memory_region_init_io(&s->mmio, obj, &sifive_pwm_ops, s, + TYPE_SIFIVE_PWM, 0x100); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void sifive_pwm_realize(DeviceState *dev, Error **errp) +{ + SiFivePwmState *s = SIFIVE_PWM(dev); + + timer_init_ns(&s->timer[0], QEMU_CLOCK_VIRTUAL, + sifive_pwm_interrupt_0, s); + + timer_init_ns(&s->timer[1], QEMU_CLOCK_VIRTUAL, + sifive_pwm_interrupt_1, s); + + timer_init_ns(&s->timer[2], QEMU_CLOCK_VIRTUAL, + sifive_pwm_interrupt_2, s); + + timer_init_ns(&s->timer[3], QEMU_CLOCK_VIRTUAL, + sifive_pwm_interrupt_3, s); +} + +static void sifive_pwm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = sifive_pwm_reset; + device_class_set_props(dc, sifive_pwm_properties); + dc->vmsd = &vmstate_sifive_pwm; + dc->realize = sifive_pwm_realize; +} + +static const TypeInfo sifive_pwm_info = { + .name = TYPE_SIFIVE_PWM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SiFivePwmState), + .instance_init = sifive_pwm_init, + .class_init = sifive_pwm_class_init, +}; + +static void sifive_pwm_register_types(void) +{ + type_register_static(&sifive_pwm_info); +} + +type_init(sifive_pwm_register_types) diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c new file mode 100644 index 000000000..03e33fc59 --- /dev/null +++ b/hw/timer/slavio_timer.c @@ -0,0 +1,450 @@ +/* + * QEMU Sparc SLAVIO timer controller emulation + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "hw/irq.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* + * Registers of hardware timer in sun4m. + * + * This is the timer/counter part of chip STP2001 (Slave I/O), also + * produced as NCR89C105. See + * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C105.txt + * + * The 31-bit counter is incremented every 500ns by bit 9. Bits 8..0 + * are zero. Bit 31 is 1 when count has been reached. + * + * Per-CPU timers interrupt local CPU, system timer uses normal + * interrupt routing. + * + */ + +#define MAX_CPUS 16 + +typedef struct CPUTimerState { + qemu_irq irq; + ptimer_state *timer; + uint32_t count, counthigh, reached; + /* processor only */ + uint32_t run; + uint64_t limit; +} CPUTimerState; + +#define TYPE_SLAVIO_TIMER "slavio_timer" +OBJECT_DECLARE_SIMPLE_TYPE(SLAVIO_TIMERState, SLAVIO_TIMER) + +struct SLAVIO_TIMERState { + SysBusDevice parent_obj; + + uint32_t num_cpus; + uint32_t cputimer_mode; + CPUTimerState cputimer[MAX_CPUS + 1]; +}; + +typedef struct TimerContext { + MemoryRegion iomem; + SLAVIO_TIMERState *s; + unsigned int timer_index; /* 0 for system, 1 ... MAX_CPUS for CPU timers */ +} TimerContext; + +#define SYS_TIMER_SIZE 0x14 +#define CPU_TIMER_SIZE 0x10 + +#define TIMER_LIMIT 0 +#define TIMER_COUNTER 1 +#define TIMER_COUNTER_NORST 2 +#define TIMER_STATUS 3 +#define TIMER_MODE 4 + +#define TIMER_COUNT_MASK32 0xfffffe00 +#define TIMER_LIMIT_MASK32 0x7fffffff +#define TIMER_MAX_COUNT64 0x7ffffffffffffe00ULL +#define TIMER_MAX_COUNT32 0x7ffffe00ULL +#define TIMER_REACHED 0x80000000 +#define TIMER_PERIOD 500ULL // 500ns +#define LIMIT_TO_PERIODS(l) (((l) >> 9) - 1) +#define PERIODS_TO_LIMIT(l) (((l) + 1) << 9) + +static int slavio_timer_is_user(TimerContext *tc) +{ + SLAVIO_TIMERState *s = tc->s; + unsigned int timer_index = tc->timer_index; + + return timer_index != 0 && (s->cputimer_mode & (1 << (timer_index - 1))); +} + +// Update count, set irq, update expire_time +// Convert from ptimer countdown units +static void slavio_timer_get_out(CPUTimerState *t) +{ + uint64_t count, limit; + + if (t->limit == 0) { /* free-run system or processor counter */ + limit = TIMER_MAX_COUNT32; + } else { + limit = t->limit; + } + count = limit - PERIODS_TO_LIMIT(ptimer_get_count(t->timer)); + + trace_slavio_timer_get_out(t->limit, t->counthigh, t->count); + t->count = count & TIMER_COUNT_MASK32; + t->counthigh = count >> 32; +} + +// timer callback +static void slavio_timer_irq(void *opaque) +{ + TimerContext *tc = opaque; + SLAVIO_TIMERState *s = tc->s; + CPUTimerState *t = &s->cputimer[tc->timer_index]; + + slavio_timer_get_out(t); + trace_slavio_timer_irq(t->counthigh, t->count); + /* if limit is 0 (free-run), there will be no match */ + if (t->limit != 0) { + t->reached = TIMER_REACHED; + } + /* there is no interrupt if user timer or free-run */ + if (!slavio_timer_is_user(tc) && t->limit != 0) { + qemu_irq_raise(t->irq); + } +} + +static uint64_t slavio_timer_mem_readl(void *opaque, hwaddr addr, + unsigned size) +{ + TimerContext *tc = opaque; + SLAVIO_TIMERState *s = tc->s; + uint32_t saddr, ret; + unsigned int timer_index = tc->timer_index; + CPUTimerState *t = &s->cputimer[timer_index]; + + saddr = addr >> 2; + switch (saddr) { + case TIMER_LIMIT: + // read limit (system counter mode) or read most signifying + // part of counter (user mode) + if (slavio_timer_is_user(tc)) { + // read user timer MSW + slavio_timer_get_out(t); + ret = t->counthigh | t->reached; + } else { + // read limit + // clear irq + qemu_irq_lower(t->irq); + t->reached = 0; + ret = t->limit & TIMER_LIMIT_MASK32; + } + break; + case TIMER_COUNTER: + // read counter and reached bit (system mode) or read lsbits + // of counter (user mode) + slavio_timer_get_out(t); + if (slavio_timer_is_user(tc)) { // read user timer LSW + ret = t->count & TIMER_MAX_COUNT64; + } else { // read limit + ret = (t->count & TIMER_MAX_COUNT32) | + t->reached; + } + break; + case TIMER_STATUS: + // only available in processor counter/timer + // read start/stop status + if (timer_index > 0) { + ret = t->run; + } else { + ret = 0; + } + break; + case TIMER_MODE: + // only available in system counter + // read user/system mode + ret = s->cputimer_mode; + break; + default: + trace_slavio_timer_mem_readl_invalid(addr); + ret = 0; + break; + } + trace_slavio_timer_mem_readl(addr, ret); + return ret; +} + +static void slavio_timer_mem_writel(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + TimerContext *tc = opaque; + SLAVIO_TIMERState *s = tc->s; + uint32_t saddr; + unsigned int timer_index = tc->timer_index; + CPUTimerState *t = &s->cputimer[timer_index]; + + trace_slavio_timer_mem_writel(addr, val); + saddr = addr >> 2; + switch (saddr) { + case TIMER_LIMIT: + ptimer_transaction_begin(t->timer); + if (slavio_timer_is_user(tc)) { + uint64_t count; + + // set user counter MSW, reset counter + t->limit = TIMER_MAX_COUNT64; + t->counthigh = val & (TIMER_MAX_COUNT64 >> 32); + t->reached = 0; + count = ((uint64_t)t->counthigh << 32) | t->count; + trace_slavio_timer_mem_writel_limit(timer_index, count); + ptimer_set_count(t->timer, LIMIT_TO_PERIODS(t->limit - count)); + } else { + // set limit, reset counter + qemu_irq_lower(t->irq); + t->limit = val & TIMER_MAX_COUNT32; + if (t->limit == 0) { /* free-run */ + ptimer_set_limit(t->timer, + LIMIT_TO_PERIODS(TIMER_MAX_COUNT32), 1); + } else { + ptimer_set_limit(t->timer, LIMIT_TO_PERIODS(t->limit), 1); + } + } + ptimer_transaction_commit(t->timer); + break; + case TIMER_COUNTER: + if (slavio_timer_is_user(tc)) { + uint64_t count; + + // set user counter LSW, reset counter + t->limit = TIMER_MAX_COUNT64; + t->count = val & TIMER_MAX_COUNT64; + t->reached = 0; + count = ((uint64_t)t->counthigh) << 32 | t->count; + trace_slavio_timer_mem_writel_limit(timer_index, count); + ptimer_transaction_begin(t->timer); + ptimer_set_count(t->timer, LIMIT_TO_PERIODS(t->limit - count)); + ptimer_transaction_commit(t->timer); + } else { + trace_slavio_timer_mem_writel_counter_invalid(); + } + break; + case TIMER_COUNTER_NORST: + // set limit without resetting counter + t->limit = val & TIMER_MAX_COUNT32; + ptimer_transaction_begin(t->timer); + if (t->limit == 0) { /* free-run */ + ptimer_set_limit(t->timer, LIMIT_TO_PERIODS(TIMER_MAX_COUNT32), 0); + } else { + ptimer_set_limit(t->timer, LIMIT_TO_PERIODS(t->limit), 0); + } + ptimer_transaction_commit(t->timer); + break; + case TIMER_STATUS: + ptimer_transaction_begin(t->timer); + if (slavio_timer_is_user(tc)) { + // start/stop user counter + if (val & 1) { + trace_slavio_timer_mem_writel_status_start(timer_index); + ptimer_run(t->timer, 0); + } else { + trace_slavio_timer_mem_writel_status_stop(timer_index); + ptimer_stop(t->timer); + } + } + t->run = val & 1; + ptimer_transaction_commit(t->timer); + break; + case TIMER_MODE: + if (timer_index == 0) { + unsigned int i; + + for (i = 0; i < s->num_cpus; i++) { + unsigned int processor = 1 << i; + CPUTimerState *curr_timer = &s->cputimer[i + 1]; + + ptimer_transaction_begin(curr_timer->timer); + // check for a change in timer mode for this processor + if ((val & processor) != (s->cputimer_mode & processor)) { + if (val & processor) { // counter -> user timer + qemu_irq_lower(curr_timer->irq); + // counters are always running + if (!curr_timer->run) { + ptimer_stop(curr_timer->timer); + } + // user timer limit is always the same + curr_timer->limit = TIMER_MAX_COUNT64; + ptimer_set_limit(curr_timer->timer, + LIMIT_TO_PERIODS(curr_timer->limit), + 1); + // set this processors user timer bit in config + // register + s->cputimer_mode |= processor; + trace_slavio_timer_mem_writel_mode_user(timer_index); + } else { // user timer -> counter + // start the counter + ptimer_run(curr_timer->timer, 0); + // clear this processors user timer bit in config + // register + s->cputimer_mode &= ~processor; + trace_slavio_timer_mem_writel_mode_counter(timer_index); + } + } + ptimer_transaction_commit(curr_timer->timer); + } + } else { + trace_slavio_timer_mem_writel_mode_invalid(); + } + break; + default: + trace_slavio_timer_mem_writel_invalid(addr); + break; + } +} + +static const MemoryRegionOps slavio_timer_mem_ops = { + .read = slavio_timer_mem_readl, + .write = slavio_timer_mem_writel, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const VMStateDescription vmstate_timer = { + .name ="timer", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_UINT64(limit, CPUTimerState), + VMSTATE_UINT32(count, CPUTimerState), + VMSTATE_UINT32(counthigh, CPUTimerState), + VMSTATE_UINT32(reached, CPUTimerState), + VMSTATE_UINT32(run , CPUTimerState), + VMSTATE_PTIMER(timer, CPUTimerState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_slavio_timer = { + .name ="slavio_timer", + .version_id = 3, + .minimum_version_id = 3, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_ARRAY(cputimer, SLAVIO_TIMERState, MAX_CPUS + 1, 3, + vmstate_timer, CPUTimerState), + VMSTATE_END_OF_LIST() + } +}; + +static void slavio_timer_reset(DeviceState *d) +{ + SLAVIO_TIMERState *s = SLAVIO_TIMER(d); + unsigned int i; + CPUTimerState *curr_timer; + + for (i = 0; i <= MAX_CPUS; i++) { + curr_timer = &s->cputimer[i]; + curr_timer->limit = 0; + curr_timer->count = 0; + curr_timer->reached = 0; + if (i <= s->num_cpus) { + ptimer_transaction_begin(curr_timer->timer); + ptimer_set_limit(curr_timer->timer, + LIMIT_TO_PERIODS(TIMER_MAX_COUNT32), 1); + ptimer_run(curr_timer->timer, 0); + curr_timer->run = 1; + ptimer_transaction_commit(curr_timer->timer); + } + } + s->cputimer_mode = 0; +} + +static void slavio_timer_init(Object *obj) +{ + SLAVIO_TIMERState *s = SLAVIO_TIMER(obj); + SysBusDevice *dev = SYS_BUS_DEVICE(obj); + unsigned int i; + TimerContext *tc; + + for (i = 0; i <= MAX_CPUS; i++) { + uint64_t size; + char timer_name[20]; + + tc = g_malloc0(sizeof(TimerContext)); + tc->s = s; + tc->timer_index = i; + + s->cputimer[i].timer = ptimer_init(slavio_timer_irq, tc, + PTIMER_POLICY_DEFAULT); + ptimer_transaction_begin(s->cputimer[i].timer); + ptimer_set_period(s->cputimer[i].timer, TIMER_PERIOD); + ptimer_transaction_commit(s->cputimer[i].timer); + + size = i == 0 ? SYS_TIMER_SIZE : CPU_TIMER_SIZE; + snprintf(timer_name, sizeof(timer_name), "timer-%i", i); + memory_region_init_io(&tc->iomem, obj, &slavio_timer_mem_ops, tc, + timer_name, size); + sysbus_init_mmio(dev, &tc->iomem); + + sysbus_init_irq(dev, &s->cputimer[i].irq); + } +} + +static Property slavio_timer_properties[] = { + DEFINE_PROP_UINT32("num_cpus", SLAVIO_TIMERState, num_cpus, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void slavio_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = slavio_timer_reset; + dc->vmsd = &vmstate_slavio_timer; + device_class_set_props(dc, slavio_timer_properties); +} + +static const TypeInfo slavio_timer_info = { + .name = TYPE_SLAVIO_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SLAVIO_TIMERState), + .instance_init = slavio_timer_init, + .class_init = slavio_timer_class_init, +}; + +static void slavio_timer_register_types(void) +{ + type_register_static(&slavio_timer_info); +} + +type_init(slavio_timer_register_types) diff --git a/hw/timer/sse-counter.c b/hw/timer/sse-counter.c new file mode 100644 index 000000000..16c0e8ad1 --- /dev/null +++ b/hw/timer/sse-counter.c @@ -0,0 +1,473 @@ +/* + * Arm SSE Subsystem System Counter + * + * Copyright (c) 2020 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the "System counter" which is documented in + * the Arm SSE-123 Example Subsystem Technical Reference Manual: + * https://developer.arm.com/documentation/101370/latest/ + * + * The system counter is a non-stop 64-bit up-counter. It provides + * this count value to other devices like the SSE system timer, + * which are driven by this system timestamp rather than directly + * from a clock. Internally to the counter the count is actually + * 88-bit precision (64.24 fixed point), with a programmable scale factor. + * + * The hardware has the optional feature that it supports dynamic + * clock switching, where two clock inputs are connected, and which + * one is used is selected via a CLKSEL input signal. Since the + * users of this device in QEMU don't use this feature, we only model + * the HWCLKSW=0 configuration. + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/timer.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/timer/sse-counter.h" +#include "hw/sysbus.h" +#include "hw/registerfields.h" +#include "hw/clock.h" +#include "hw/qdev-clock.h" +#include "migration/vmstate.h" + +/* Registers in the control frame */ +REG32(CNTCR, 0x0) + FIELD(CNTCR, EN, 0, 1) + FIELD(CNTCR, HDBG, 1, 1) + FIELD(CNTCR, SCEN, 2, 1) + FIELD(CNTCR, INTRMASK, 3, 1) + FIELD(CNTCR, PSLVERRDIS, 4, 1) + FIELD(CNTCR, INTRCLR, 5, 1) +/* + * Although CNTCR defines interrupt-related bits, the counter doesn't + * appear to actually have an interrupt output. So INTRCLR is + * effectively a RAZ/WI bit, as are the reserved bits [31:6]. + */ +#define CNTCR_VALID_MASK (R_CNTCR_EN_MASK | R_CNTCR_HDBG_MASK | \ + R_CNTCR_SCEN_MASK | R_CNTCR_INTRMASK_MASK | \ + R_CNTCR_PSLVERRDIS_MASK) +REG32(CNTSR, 0x4) +REG32(CNTCV_LO, 0x8) +REG32(CNTCV_HI, 0xc) +REG32(CNTSCR, 0x10) /* Aliased with CNTSCR0 */ +REG32(CNTID, 0x1c) + FIELD(CNTID, CNTSC, 0, 4) + FIELD(CNTID, CNTCS, 16, 1) + FIELD(CNTID, CNTSELCLK, 17, 2) + FIELD(CNTID, CNTSCR_OVR, 19, 1) +REG32(CNTSCR0, 0xd0) +REG32(CNTSCR1, 0xd4) + +/* Registers in the status frame */ +REG32(STATUS_CNTCV_LO, 0x0) +REG32(STATUS_CNTCV_HI, 0x4) + +/* Standard ID registers, present in both frames */ +REG32(PID4, 0xFD0) +REG32(PID5, 0xFD4) +REG32(PID6, 0xFD8) +REG32(PID7, 0xFDC) +REG32(PID0, 0xFE0) +REG32(PID1, 0xFE4) +REG32(PID2, 0xFE8) +REG32(PID3, 0xFEC) +REG32(CID0, 0xFF0) +REG32(CID1, 0xFF4) +REG32(CID2, 0xFF8) +REG32(CID3, 0xFFC) + +/* PID/CID values */ +static const int control_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0xba, 0xb0, 0x0b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static const int status_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0xbb, 0xb0, 0x0b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static void sse_counter_notify_users(SSECounter *s) +{ + /* + * Notify users of the count timestamp that they may + * need to recalculate. + */ + notifier_list_notify(&s->notifier_list, NULL); +} + +static bool sse_counter_enabled(SSECounter *s) +{ + return (s->cntcr & R_CNTCR_EN_MASK) != 0; +} + +uint64_t sse_counter_tick_to_time(SSECounter *s, uint64_t tick) +{ + if (!sse_counter_enabled(s)) { + return UINT64_MAX; + } + + tick -= s->ticks_then; + + if (s->cntcr & R_CNTCR_SCEN_MASK) { + /* Adjust the tick count to account for the scale factor */ + tick = muldiv64(tick, 0x01000000, s->cntscr0); + } + + return s->ns_then + clock_ticks_to_ns(s->clk, tick); +} + +void sse_counter_register_consumer(SSECounter *s, Notifier *notifier) +{ + /* + * For the moment we assume that both we and the devices + * which consume us last for the life of the simulation, + * and so there is no mechanism for removing a notifier. + */ + notifier_list_add(&s->notifier_list, notifier); +} + +uint64_t sse_counter_for_timestamp(SSECounter *s, uint64_t now) +{ + /* Return the CNTCV value for a particular timestamp (clock ns value). */ + uint64_t ticks; + + if (!sse_counter_enabled(s)) { + /* Counter is disabled and does not increment */ + return s->ticks_then; + } + + ticks = clock_ns_to_ticks(s->clk, now - s->ns_then); + if (s->cntcr & R_CNTCR_SCEN_MASK) { + /* + * Scaling is enabled. The CNTSCR value is the amount added to + * the underlying 88-bit counter for every tick of the + * underlying clock; CNTCV is the top 64 bits of that full + * 88-bit value. Multiplying the tick count by CNTSCR tells us + * how much the full 88-bit counter has moved on; we then + * divide that by 0x01000000 to find out how much the 64-bit + * visible portion has advanced. muldiv64() gives us the + * necessary at-least-88-bit precision for the intermediate + * result. + */ + ticks = muldiv64(ticks, s->cntscr0, 0x01000000); + } + return s->ticks_then + ticks; +} + +static uint64_t sse_cntcv(SSECounter *s) +{ + /* Return the CNTCV value for the current time */ + return sse_counter_for_timestamp(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); +} + +static void sse_write_cntcv(SSECounter *s, uint32_t value, unsigned startbit) +{ + /* + * Write one 32-bit half of the counter value; startbit is the + * bit position of this half in the 64-bit word, either 0 or 32. + */ + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint64_t cntcv = sse_counter_for_timestamp(s, now); + + cntcv = deposit64(cntcv, startbit, 32, value); + s->ticks_then = cntcv; + s->ns_then = now; + sse_counter_notify_users(s); +} + +static uint64_t sse_counter_control_read(void *opaque, hwaddr offset, + unsigned size) +{ + SSECounter *s = SSE_COUNTER(opaque); + uint64_t r; + + switch (offset) { + case A_CNTCR: + r = s->cntcr; + break; + case A_CNTSR: + /* + * The only bit here is DBGH, indicating that the counter has been + * halted via the Halt-on-Debug signal. We don't implement halting + * debug, so the whole register always reads as zero. + */ + r = 0; + break; + case A_CNTCV_LO: + r = extract64(sse_cntcv(s), 0, 32); + break; + case A_CNTCV_HI: + r = extract64(sse_cntcv(s), 32, 32); + break; + case A_CNTID: + /* + * For our implementation: + * - CNTSCR can only be written when CNTCR.EN == 0 + * - HWCLKSW=0, so selected clock is always CLK0 + * - counter scaling is implemented + */ + r = (1 << R_CNTID_CNTSELCLK_SHIFT) | (1 << R_CNTID_CNTSC_SHIFT); + break; + case A_CNTSCR: + case A_CNTSCR0: + r = s->cntscr0; + break; + case A_CNTSCR1: + /* If HWCLKSW == 0, CNTSCR1 is RAZ/WI */ + r = 0; + break; + case A_PID4 ... A_CID3: + r = control_id[(offset - A_PID4) / 4]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE System Counter control frame read: bad offset 0x%x", + (unsigned)offset); + r = 0; + break; + } + + trace_sse_counter_control_read(offset, r, size); + return r; +} + +static void sse_counter_control_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + SSECounter *s = SSE_COUNTER(opaque); + + trace_sse_counter_control_write(offset, value, size); + + switch (offset) { + case A_CNTCR: + /* + * Although CNTCR defines interrupt-related bits, the counter doesn't + * appear to actually have an interrupt output. So INTRCLR is + * effectively a RAZ/WI bit, as are the reserved bits [31:6]. + * The documentation does not explicitly say so, but we assume + * that changing the scale factor while the counter is enabled + * by toggling CNTCR.SCEN has the same behaviour (making the counter + * value UNKNOWN) as changing it by writing to CNTSCR, and so we + * don't need to try to recalculate for that case. + */ + value &= CNTCR_VALID_MASK; + if ((value ^ s->cntcr) & R_CNTCR_EN_MASK) { + /* + * Whether the counter is being enabled or disabled, the + * required action is the same: sync the (ns_then, ticks_then) + * tuple. + */ + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->ticks_then = sse_counter_for_timestamp(s, now); + s->ns_then = now; + sse_counter_notify_users(s); + } + s->cntcr = value; + break; + case A_CNTCV_LO: + sse_write_cntcv(s, value, 0); + break; + case A_CNTCV_HI: + sse_write_cntcv(s, value, 32); + break; + case A_CNTSCR: + case A_CNTSCR0: + /* + * If the scale registers are changed when the counter is enabled, + * the count value becomes UNKNOWN. So we don't try to recalculate + * anything here but only do it on a write to CNTCR.EN. + */ + s->cntscr0 = value; + break; + case A_CNTSCR1: + /* If HWCLKSW == 0, CNTSCR1 is RAZ/WI */ + break; + case A_CNTSR: + case A_CNTID: + case A_PID4 ... A_CID3: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE System Counter control frame: write to RO offset 0x%x\n", + (unsigned)offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE System Counter control frame: write to bad offset 0x%x\n", + (unsigned)offset); + break; + } +} + +static uint64_t sse_counter_status_read(void *opaque, hwaddr offset, + unsigned size) +{ + SSECounter *s = SSE_COUNTER(opaque); + uint64_t r; + + switch (offset) { + case A_STATUS_CNTCV_LO: + r = extract64(sse_cntcv(s), 0, 32); + break; + case A_STATUS_CNTCV_HI: + r = extract64(sse_cntcv(s), 32, 32); + break; + case A_PID4 ... A_CID3: + r = status_id[(offset - A_PID4) / 4]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE System Counter status frame read: bad offset 0x%x", + (unsigned)offset); + r = 0; + break; + } + + trace_sse_counter_status_read(offset, r, size); + return r; +} + +static void sse_counter_status_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + trace_sse_counter_status_write(offset, value, size); + + switch (offset) { + case A_STATUS_CNTCV_LO: + case A_STATUS_CNTCV_HI: + case A_PID4 ... A_CID3: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE System Counter status frame: write to RO offset 0x%x\n", + (unsigned)offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE System Counter status frame: write to bad offset 0x%x\n", + (unsigned)offset); + break; + } +} + +static const MemoryRegionOps sse_counter_control_ops = { + .read = sse_counter_control_read, + .write = sse_counter_control_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static const MemoryRegionOps sse_counter_status_ops = { + .read = sse_counter_status_read, + .write = sse_counter_status_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void sse_counter_reset(DeviceState *dev) +{ + SSECounter *s = SSE_COUNTER(dev); + + trace_sse_counter_reset(); + + s->cntcr = 0; + s->cntscr0 = 0x01000000; + s->ns_then = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->ticks_then = 0; +} + +static void sse_clk_callback(void *opaque, ClockEvent event) +{ + SSECounter *s = SSE_COUNTER(opaque); + uint64_t now; + + switch (event) { + case ClockPreUpdate: + /* + * Before the clock period updates, set (ticks_then, ns_then) + * to the current time and tick count (as calculated with + * the old clock period). + */ + if (sse_counter_enabled(s)) { + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + s->ticks_then = sse_counter_for_timestamp(s, now); + s->ns_then = now; + } + break; + case ClockUpdate: + sse_counter_notify_users(s); + break; + default: + break; + } +} + +static void sse_counter_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + SSECounter *s = SSE_COUNTER(obj); + + notifier_list_init(&s->notifier_list); + + s->clk = qdev_init_clock_in(DEVICE(obj), "CLK", sse_clk_callback, s, + ClockPreUpdate | ClockUpdate); + memory_region_init_io(&s->control_mr, obj, &sse_counter_control_ops, + s, "sse-counter-control", 0x1000); + memory_region_init_io(&s->status_mr, obj, &sse_counter_status_ops, + s, "sse-counter-status", 0x1000); + sysbus_init_mmio(sbd, &s->control_mr); + sysbus_init_mmio(sbd, &s->status_mr); +} + +static void sse_counter_realize(DeviceState *dev, Error **errp) +{ + SSECounter *s = SSE_COUNTER(dev); + + if (!clock_has_source(s->clk)) { + error_setg(errp, "SSE system counter: CLK must be connected"); + return; + } +} + +static const VMStateDescription sse_counter_vmstate = { + .name = "sse-counter", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(clk, SSECounter), + VMSTATE_END_OF_LIST() + } +}; + +static void sse_counter_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sse_counter_realize; + dc->vmsd = &sse_counter_vmstate; + dc->reset = sse_counter_reset; +} + +static const TypeInfo sse_counter_info = { + .name = TYPE_SSE_COUNTER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SSECounter), + .instance_init = sse_counter_init, + .class_init = sse_counter_class_init, +}; + +static void sse_counter_register_types(void) +{ + type_register_static(&sse_counter_info); +} + +type_init(sse_counter_register_types); diff --git a/hw/timer/sse-timer.c b/hw/timer/sse-timer.c new file mode 100644 index 000000000..f959cb9d6 --- /dev/null +++ b/hw/timer/sse-timer.c @@ -0,0 +1,471 @@ +/* + * Arm SSE Subsystem System Timer + * + * Copyright (c) 2020 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the "System timer" which is documented in + * the Arm SSE-123 Example Subsystem Technical Reference Manual: + * https://developer.arm.com/documentation/101370/latest/ + * + * The timer is based around a simple 64-bit incrementing counter + * (readable from CNTPCT_HI/LO). The timer fires when + * Counter - CompareValue >= 0. + * The CompareValue is guest-writable, via CNTP_CVAL_HI/LO. + * CNTP_TVAL is an alternative view of the CompareValue defined by + * TimerValue = CompareValue[31:0] - Counter[31:0] + * which can be both read and written. + * This part is similar to the generic timer in an Arm A-class CPU. + * + * The timer also has a separate auto-increment timer. When this + * timer is enabled, then the AutoIncrValue is set to: + * AutoIncrValue = Reload + Counter + * and this timer fires when + * Counter - AutoIncrValue >= 0 + * at which point, an interrupt is generated and the new AutoIncrValue + * is calculated. + * When the auto-increment timer is enabled, interrupt generation + * via the compare/timervalue registers is disabled. + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/timer.h" +#include "qapi/error.h" +#include "trace.h" +#include "hw/timer/sse-timer.h" +#include "hw/timer/sse-counter.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/registerfields.h" +#include "hw/clock.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +REG32(CNTPCT_LO, 0x0) +REG32(CNTPCT_HI, 0x4) +REG32(CNTFRQ, 0x10) +REG32(CNTP_CVAL_LO, 0x20) +REG32(CNTP_CVAL_HI, 0x24) +REG32(CNTP_TVAL, 0x28) +REG32(CNTP_CTL, 0x2c) + FIELD(CNTP_CTL, ENABLE, 0, 1) + FIELD(CNTP_CTL, IMASK, 1, 1) + FIELD(CNTP_CTL, ISTATUS, 2, 1) +REG32(CNTP_AIVAL_LO, 0x40) +REG32(CNTP_AIVAL_HI, 0x44) +REG32(CNTP_AIVAL_RELOAD, 0x48) +REG32(CNTP_AIVAL_CTL, 0x4c) + FIELD(CNTP_AIVAL_CTL, EN, 0, 1) + FIELD(CNTP_AIVAL_CTL, CLR, 1, 1) +REG32(CNTP_CFG, 0x50) + FIELD(CNTP_CFG, AIVAL, 0, 4) +#define R_CNTP_CFG_AIVAL_IMPLEMENTED 1 +REG32(PID4, 0xFD0) +REG32(PID5, 0xFD4) +REG32(PID6, 0xFD8) +REG32(PID7, 0xFDC) +REG32(PID0, 0xFE0) +REG32(PID1, 0xFE4) +REG32(PID2, 0xFE8) +REG32(PID3, 0xFEC) +REG32(CID0, 0xFF0) +REG32(CID1, 0xFF4) +REG32(CID2, 0xFF8) +REG32(CID3, 0xFFC) + +/* PID/CID values */ +static const int timer_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0xb7, 0xb0, 0x0b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static bool sse_is_autoinc(SSETimer *s) +{ + return (s->cntp_aival_ctl & R_CNTP_AIVAL_CTL_EN_MASK) != 0; +} + +static bool sse_enabled(SSETimer *s) +{ + return (s->cntp_ctl & R_CNTP_CTL_ENABLE_MASK) != 0; +} + +static uint64_t sse_cntpct(SSETimer *s) +{ + /* Return the CNTPCT value for the current time */ + return sse_counter_for_timestamp(s->counter, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); +} + +static bool sse_timer_status(SSETimer *s) +{ + /* + * Return true if timer condition is met. This is used for both + * the CNTP_CTL.ISTATUS bit and for whether (unless masked) we + * assert our IRQ. + * The documentation is unclear about the behaviour of ISTATUS when + * in autoincrement mode; we assume that it follows CNTP_AIVAL_CTL.CLR + * (ie whether the autoincrement timer is asserting the interrupt). + */ + if (!sse_enabled(s)) { + return false; + } + + if (sse_is_autoinc(s)) { + return s->cntp_aival_ctl & R_CNTP_AIVAL_CTL_CLR_MASK; + } else { + return sse_cntpct(s) >= s->cntp_cval; + } +} + +static void sse_update_irq(SSETimer *s) +{ + bool irqstate = (!(s->cntp_ctl & R_CNTP_CTL_IMASK_MASK) && + sse_timer_status(s)); + + qemu_set_irq(s->irq, irqstate); +} + +static void sse_set_timer(SSETimer *s, uint64_t nexttick) +{ + /* Set the timer to expire at nexttick */ + uint64_t expiry = sse_counter_tick_to_time(s->counter, nexttick); + + if (expiry <= INT64_MAX) { + timer_mod_ns(&s->timer, expiry); + } else { + /* + * nexttick is so far in the future that it would overflow the + * signed 64-bit range of a QEMUTimer. Since timer_mod_ns() + * expiry times are absolute, not relative, we are never going + * to be able to set the timer to this value, so we must just + * assume that guest execution can never run so long that it + * reaches the theoretical point when the timer fires. + * This is also the code path for "counter is not running", + * which is signalled by expiry == UINT64_MAX. + */ + timer_del(&s->timer); + } +} + +static void sse_recalc_timer(SSETimer *s) +{ + /* Recalculate the normal timer */ + uint64_t count, nexttick; + + if (sse_is_autoinc(s)) { + return; + } + + if (!sse_enabled(s)) { + timer_del(&s->timer); + return; + } + + count = sse_cntpct(s); + + if (count >= s->cntp_cval) { + /* + * Timer condition already met. In theory we have a transition when + * the count rolls back over to 0, but that is so far in the future + * that it is not representable as a timer_mod() expiry, so in + * fact sse_set_timer() will always just delete the timer. + */ + nexttick = UINT64_MAX; + } else { + /* Next transition is when count hits cval */ + nexttick = s->cntp_cval; + } + sse_set_timer(s, nexttick); + sse_update_irq(s); +} + +static void sse_autoinc(SSETimer *s) +{ + /* Auto-increment the AIVAL, and set the timer accordingly */ + s->cntp_aival = sse_cntpct(s) + s->cntp_aival_reload; + sse_set_timer(s, s->cntp_aival); +} + +static void sse_timer_cb(void *opaque) +{ + SSETimer *s = SSE_TIMER(opaque); + + if (sse_is_autoinc(s)) { + uint64_t count = sse_cntpct(s); + + if (count >= s->cntp_aival) { + /* Timer condition met, set CLR and do another autoinc */ + s->cntp_aival_ctl |= R_CNTP_AIVAL_CTL_CLR_MASK; + s->cntp_aival = count + s->cntp_aival_reload; + } + sse_set_timer(s, s->cntp_aival); + sse_update_irq(s); + } else { + sse_recalc_timer(s); + } +} + +static uint64_t sse_timer_read(void *opaque, hwaddr offset, unsigned size) +{ + SSETimer *s = SSE_TIMER(opaque); + uint64_t r; + + switch (offset) { + case A_CNTPCT_LO: + r = extract64(sse_cntpct(s), 0, 32); + break; + case A_CNTPCT_HI: + r = extract64(sse_cntpct(s), 32, 32); + break; + case A_CNTFRQ: + r = s->cntfrq; + break; + case A_CNTP_CVAL_LO: + r = extract64(s->cntp_cval, 0, 32); + break; + case A_CNTP_CVAL_HI: + r = extract64(s->cntp_cval, 32, 32); + break; + case A_CNTP_TVAL: + r = extract64(s->cntp_cval - sse_cntpct(s), 0, 32); + break; + case A_CNTP_CTL: + r = s->cntp_ctl; + if (sse_timer_status(s)) { + r |= R_CNTP_CTL_ISTATUS_MASK; + } + break; + case A_CNTP_AIVAL_LO: + r = extract64(s->cntp_aival, 0, 32); + break; + case A_CNTP_AIVAL_HI: + r = extract64(s->cntp_aival, 32, 32); + break; + case A_CNTP_AIVAL_RELOAD: + r = s->cntp_aival_reload; + break; + case A_CNTP_AIVAL_CTL: + /* + * All the bits of AIVAL_CTL are documented as WO, but this is probably + * a documentation error. We implement them as readable. + */ + r = s->cntp_aival_ctl; + break; + case A_CNTP_CFG: + r = R_CNTP_CFG_AIVAL_IMPLEMENTED << R_CNTP_CFG_AIVAL_SHIFT; + break; + case A_PID4 ... A_CID3: + r = timer_id[(offset - A_PID4) / 4]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE System Timer read: bad offset 0x%x", + (unsigned) offset); + r = 0; + break; + } + + trace_sse_timer_read(offset, r, size); + return r; +} + +static void sse_timer_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + SSETimer *s = SSE_TIMER(opaque); + + trace_sse_timer_write(offset, value, size); + + switch (offset) { + case A_CNTFRQ: + s->cntfrq = value; + break; + case A_CNTP_CVAL_LO: + s->cntp_cval = deposit64(s->cntp_cval, 0, 32, value); + sse_recalc_timer(s); + break; + case A_CNTP_CVAL_HI: + s->cntp_cval = deposit64(s->cntp_cval, 32, 32, value); + sse_recalc_timer(s); + break; + case A_CNTP_TVAL: + s->cntp_cval = sse_cntpct(s) + sextract64(value, 0, 32); + sse_recalc_timer(s); + break; + case A_CNTP_CTL: + { + uint32_t old_ctl = s->cntp_ctl; + value &= R_CNTP_CTL_ENABLE_MASK | R_CNTP_CTL_IMASK_MASK; + s->cntp_ctl = value; + if ((old_ctl ^ s->cntp_ctl) & R_CNTP_CTL_ENABLE_MASK) { + if (sse_enabled(s)) { + if (sse_is_autoinc(s)) { + sse_autoinc(s); + } else { + sse_recalc_timer(s); + } + } + } + sse_update_irq(s); + break; + } + case A_CNTP_AIVAL_RELOAD: + s->cntp_aival_reload = value; + break; + case A_CNTP_AIVAL_CTL: + { + uint32_t old_ctl = s->cntp_aival_ctl; + + /* EN bit is writeable; CLR bit is write-0-to-clear, write-1-ignored */ + s->cntp_aival_ctl &= ~R_CNTP_AIVAL_CTL_EN_MASK; + s->cntp_aival_ctl |= value & R_CNTP_AIVAL_CTL_EN_MASK; + if (!(value & R_CNTP_AIVAL_CTL_CLR_MASK)) { + s->cntp_aival_ctl &= ~R_CNTP_AIVAL_CTL_CLR_MASK; + } + if ((old_ctl ^ s->cntp_aival_ctl) & R_CNTP_AIVAL_CTL_EN_MASK) { + /* Auto-increment toggled on/off */ + if (sse_enabled(s)) { + if (sse_is_autoinc(s)) { + sse_autoinc(s); + } else { + sse_recalc_timer(s); + } + } + } + sse_update_irq(s); + break; + } + case A_CNTPCT_LO: + case A_CNTPCT_HI: + case A_CNTP_CFG: + case A_CNTP_AIVAL_LO: + case A_CNTP_AIVAL_HI: + case A_PID4 ... A_CID3: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE System Timer write: write to RO offset 0x%x\n", + (unsigned)offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "SSE System Timer write: bad offset 0x%x\n", + (unsigned)offset); + break; + } +} + +static const MemoryRegionOps sse_timer_ops = { + .read = sse_timer_read, + .write = sse_timer_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, +}; + +static void sse_timer_reset(DeviceState *dev) +{ + SSETimer *s = SSE_TIMER(dev); + + trace_sse_timer_reset(); + + timer_del(&s->timer); + s->cntfrq = 0; + s->cntp_ctl = 0; + s->cntp_cval = 0; + s->cntp_aival = 0; + s->cntp_aival_ctl = 0; + s->cntp_aival_reload = 0; +} + +static void sse_timer_counter_callback(Notifier *notifier, void *data) +{ + SSETimer *s = container_of(notifier, SSETimer, counter_notifier); + + /* System counter told us we need to recalculate */ + if (sse_enabled(s)) { + if (sse_is_autoinc(s)) { + sse_set_timer(s, s->cntp_aival); + } else { + sse_recalc_timer(s); + } + } +} + +static void sse_timer_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + SSETimer *s = SSE_TIMER(obj); + + memory_region_init_io(&s->iomem, obj, &sse_timer_ops, + s, "sse-timer", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq); +} + +static void sse_timer_realize(DeviceState *dev, Error **errp) +{ + SSETimer *s = SSE_TIMER(dev); + + if (!s->counter) { + error_setg(errp, "counter property was not set"); + return; + } + + s->counter_notifier.notify = sse_timer_counter_callback; + sse_counter_register_consumer(s->counter, &s->counter_notifier); + + timer_init_ns(&s->timer, QEMU_CLOCK_VIRTUAL, sse_timer_cb, s); +} + +static const VMStateDescription sse_timer_vmstate = { + .name = "sse-timer", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_TIMER(timer, SSETimer), + VMSTATE_UINT32(cntfrq, SSETimer), + VMSTATE_UINT32(cntp_ctl, SSETimer), + VMSTATE_UINT64(cntp_cval, SSETimer), + VMSTATE_UINT64(cntp_aival, SSETimer), + VMSTATE_UINT32(cntp_aival_ctl, SSETimer), + VMSTATE_UINT32(cntp_aival_reload, SSETimer), + VMSTATE_END_OF_LIST() + } +}; + +static Property sse_timer_properties[] = { + DEFINE_PROP_LINK("counter", SSETimer, counter, TYPE_SSE_COUNTER, SSECounter *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sse_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sse_timer_realize; + dc->vmsd = &sse_timer_vmstate; + dc->reset = sse_timer_reset; + device_class_set_props(dc, sse_timer_properties); +} + +static const TypeInfo sse_timer_info = { + .name = TYPE_SSE_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SSETimer), + .instance_init = sse_timer_init, + .class_init = sse_timer_class_init, +}; + +static void sse_timer_register_types(void) +{ + type_register_static(&sse_timer_info); +} + +type_init(sse_timer_register_types); diff --git a/hw/timer/stellaris-gptm.c b/hw/timer/stellaris-gptm.c new file mode 100644 index 000000000..fd71c79be --- /dev/null +++ b/hw/timer/stellaris-gptm.c @@ -0,0 +1,332 @@ +/* + * Luminary Micro Stellaris General Purpose Timer Module + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/timer.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-clock.h" +#include "hw/timer/stellaris-gptm.h" + +static void gptm_update_irq(gptm_state *s) +{ + int level; + level = (s->state & s->mask) != 0; + qemu_set_irq(s->irq, level); +} + +static void gptm_stop(gptm_state *s, int n) +{ + timer_del(s->timer[n]); +} + +static void gptm_reload(gptm_state *s, int n, int reset) +{ + int64_t tick; + if (reset) { + tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + } else { + tick = s->tick[n]; + } + + if (s->config == 0) { + /* 32-bit CountDown. */ + uint32_t count; + count = s->load[0] | (s->load[1] << 16); + tick += clock_ticks_to_ns(s->clk, count); + } else if (s->config == 1) { + /* 32-bit RTC. 1Hz tick. */ + tick += NANOSECONDS_PER_SECOND; + } else if (s->mode[n] == 0xa) { + /* PWM mode. Not implemented. */ + } else { + qemu_log_mask(LOG_UNIMP, + "GPTM: 16-bit timer mode unimplemented: 0x%x\n", + s->mode[n]); + return; + } + s->tick[n] = tick; + timer_mod(s->timer[n], tick); +} + +static void gptm_tick(void *opaque) +{ + gptm_state **p = (gptm_state **)opaque; + gptm_state *s; + int n; + + s = *p; + n = p - s->opaque; + if (s->config == 0) { + s->state |= 1; + if ((s->control & 0x20)) { + /* Output trigger. */ + qemu_irq_pulse(s->trigger); + } + if (s->mode[0] & 1) { + /* One-shot. */ + s->control &= ~1; + } else { + /* Periodic. */ + gptm_reload(s, 0, 0); + } + } else if (s->config == 1) { + /* RTC. */ + uint32_t match; + s->rtc++; + match = s->match[0] | (s->match[1] << 16); + if (s->rtc > match) + s->rtc = 0; + if (s->rtc == 0) { + s->state |= 8; + } + gptm_reload(s, 0, 0); + } else if (s->mode[n] == 0xa) { + /* PWM mode. Not implemented. */ + } else { + qemu_log_mask(LOG_UNIMP, + "GPTM: 16-bit timer mode unimplemented: 0x%x\n", + s->mode[n]); + } + gptm_update_irq(s); +} + +static uint64_t gptm_read(void *opaque, hwaddr offset, + unsigned size) +{ + gptm_state *s = (gptm_state *)opaque; + + switch (offset) { + case 0x00: /* CFG */ + return s->config; + case 0x04: /* TAMR */ + return s->mode[0]; + case 0x08: /* TBMR */ + return s->mode[1]; + case 0x0c: /* CTL */ + return s->control; + case 0x18: /* IMR */ + return s->mask; + case 0x1c: /* RIS */ + return s->state; + case 0x20: /* MIS */ + return s->state & s->mask; + case 0x24: /* CR */ + return 0; + case 0x28: /* TAILR */ + return s->load[0] | ((s->config < 4) ? (s->load[1] << 16) : 0); + case 0x2c: /* TBILR */ + return s->load[1]; + case 0x30: /* TAMARCHR */ + return s->match[0] | ((s->config < 4) ? (s->match[1] << 16) : 0); + case 0x34: /* TBMATCHR */ + return s->match[1]; + case 0x38: /* TAPR */ + return s->prescale[0]; + case 0x3c: /* TBPR */ + return s->prescale[1]; + case 0x40: /* TAPMR */ + return s->match_prescale[0]; + case 0x44: /* TBPMR */ + return s->match_prescale[1]; + case 0x48: /* TAR */ + if (s->config == 1) { + return s->rtc; + } + qemu_log_mask(LOG_UNIMP, + "GPTM: read of TAR but timer read not supported\n"); + return 0; + case 0x4c: /* TBR */ + qemu_log_mask(LOG_UNIMP, + "GPTM: read of TBR but timer read not supported\n"); + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "GPTM: read at bad offset 0x02%" HWADDR_PRIx "\n", + offset); + return 0; + } +} + +static void gptm_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + gptm_state *s = (gptm_state *)opaque; + uint32_t oldval; + + /* + * The timers should be disabled before changing the configuration. + * We take advantage of this and defer everything until the timer + * is enabled. + */ + switch (offset) { + case 0x00: /* CFG */ + s->config = value; + break; + case 0x04: /* TAMR */ + s->mode[0] = value; + break; + case 0x08: /* TBMR */ + s->mode[1] = value; + break; + case 0x0c: /* CTL */ + oldval = s->control; + s->control = value; + /* TODO: Implement pause. */ + if ((oldval ^ value) & 1) { + if (value & 1) { + gptm_reload(s, 0, 1); + } else { + gptm_stop(s, 0); + } + } + if (((oldval ^ value) & 0x100) && s->config >= 4) { + if (value & 0x100) { + gptm_reload(s, 1, 1); + } else { + gptm_stop(s, 1); + } + } + break; + case 0x18: /* IMR */ + s->mask = value & 0x77; + gptm_update_irq(s); + break; + case 0x24: /* CR */ + s->state &= ~value; + break; + case 0x28: /* TAILR */ + s->load[0] = value & 0xffff; + if (s->config < 4) { + s->load[1] = value >> 16; + } + break; + case 0x2c: /* TBILR */ + s->load[1] = value & 0xffff; + break; + case 0x30: /* TAMARCHR */ + s->match[0] = value & 0xffff; + if (s->config < 4) { + s->match[1] = value >> 16; + } + break; + case 0x34: /* TBMATCHR */ + s->match[1] = value >> 16; + break; + case 0x38: /* TAPR */ + s->prescale[0] = value; + break; + case 0x3c: /* TBPR */ + s->prescale[1] = value; + break; + case 0x40: /* TAPMR */ + s->match_prescale[0] = value; + break; + case 0x44: /* TBPMR */ + s->match_prescale[0] = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "GPTM: write at bad offset 0x02%" HWADDR_PRIx "\n", + offset); + } + gptm_update_irq(s); +} + +static const MemoryRegionOps gptm_ops = { + .read = gptm_read, + .write = gptm_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_stellaris_gptm = { + .name = "stellaris_gptm", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_UINT32(config, gptm_state), + VMSTATE_UINT32_ARRAY(mode, gptm_state, 2), + VMSTATE_UINT32(control, gptm_state), + VMSTATE_UINT32(state, gptm_state), + VMSTATE_UINT32(mask, gptm_state), + VMSTATE_UNUSED(8), + VMSTATE_UINT32_ARRAY(load, gptm_state, 2), + VMSTATE_UINT32_ARRAY(match, gptm_state, 2), + VMSTATE_UINT32_ARRAY(prescale, gptm_state, 2), + VMSTATE_UINT32_ARRAY(match_prescale, gptm_state, 2), + VMSTATE_UINT32(rtc, gptm_state), + VMSTATE_INT64_ARRAY(tick, gptm_state, 2), + VMSTATE_TIMER_PTR_ARRAY(timer, gptm_state, 2), + VMSTATE_CLOCK(clk, gptm_state), + VMSTATE_END_OF_LIST() + } +}; + +static void stellaris_gptm_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + gptm_state *s = STELLARIS_GPTM(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &s->irq); + qdev_init_gpio_out(dev, &s->trigger, 1); + + memory_region_init_io(&s->iomem, obj, &gptm_ops, s, + "gptm", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + + s->opaque[0] = s->opaque[1] = s; + + /* + * TODO: in an ideal world we would model the effects of changing + * the input clock frequency while the countdown timer is active. + * The best way to do this would be to convert the device to use + * ptimer instead of hand-rolling its own timer. This would also + * make it easy to implement reading the current count from the + * TAR and TBR registers. + */ + s->clk = qdev_init_clock_in(dev, "clk", NULL, NULL, 0); +} + +static void stellaris_gptm_realize(DeviceState *dev, Error **errp) +{ + gptm_state *s = STELLARIS_GPTM(dev); + + if (!clock_has_source(s->clk)) { + error_setg(errp, "stellaris-gptm: clk must be connected"); + return; + } + + s->timer[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[0]); + s->timer[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[1]); +} + +static void stellaris_gptm_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_stellaris_gptm; + dc->realize = stellaris_gptm_realize; +} + +static const TypeInfo stellaris_gptm_info = { + .name = TYPE_STELLARIS_GPTM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(gptm_state), + .instance_init = stellaris_gptm_init, + .class_init = stellaris_gptm_class_init, +}; + +static void stellaris_gptm_register_types(void) +{ + type_register_static(&stellaris_gptm_info); +} + +type_init(stellaris_gptm_register_types) diff --git a/hw/timer/stm32f2xx_timer.c b/hw/timer/stm32f2xx_timer.c new file mode 100644 index 000000000..ba8694dcd --- /dev/null +++ b/hw/timer/stm32f2xx_timer.c @@ -0,0 +1,347 @@ +/* + * STM32F2XX Timer + * + * Copyright (c) 2014 Alistair Francis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/timer/stm32f2xx_timer.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +#ifndef STM_TIMER_ERR_DEBUG +#define STM_TIMER_ERR_DEBUG 0 +#endif + +#define DB_PRINT_L(lvl, fmt, args...) do { \ + if (STM_TIMER_ERR_DEBUG >= lvl) { \ + qemu_log("%s: " fmt, __func__, ## args); \ + } \ +} while (0) + +#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args) + +static void stm32f2xx_timer_set_alarm(STM32F2XXTimerState *s, int64_t now); + +static void stm32f2xx_timer_interrupt(void *opaque) +{ + STM32F2XXTimerState *s = opaque; + + DB_PRINT("Interrupt\n"); + + if (s->tim_dier & TIM_DIER_UIE && s->tim_cr1 & TIM_CR1_CEN) { + s->tim_sr |= 1; + qemu_irq_pulse(s->irq); + stm32f2xx_timer_set_alarm(s, s->hit_time); + } + + if (s->tim_ccmr1 & (TIM_CCMR1_OC2M2 | TIM_CCMR1_OC2M1) && + !(s->tim_ccmr1 & TIM_CCMR1_OC2M0) && + s->tim_ccmr1 & TIM_CCMR1_OC2PE && + s->tim_ccer & TIM_CCER_CC2E) { + /* PWM 2 - Mode 1 */ + DB_PRINT("PWM2 Duty Cycle: %d%%\n", + s->tim_ccr2 / (100 * (s->tim_psc + 1))); + } +} + +static inline int64_t stm32f2xx_ns_to_ticks(STM32F2XXTimerState *s, int64_t t) +{ + return muldiv64(t, s->freq_hz, 1000000000ULL) / (s->tim_psc + 1); +} + +static void stm32f2xx_timer_set_alarm(STM32F2XXTimerState *s, int64_t now) +{ + uint64_t ticks; + int64_t now_ticks; + + if (s->tim_arr == 0) { + return; + } + + DB_PRINT("Alarm set at: 0x%x\n", s->tim_cr1); + + now_ticks = stm32f2xx_ns_to_ticks(s, now); + ticks = s->tim_arr - (now_ticks - s->tick_offset); + + DB_PRINT("Alarm set in %d ticks\n", (int) ticks); + + s->hit_time = muldiv64((ticks + (uint64_t) now_ticks) * (s->tim_psc + 1), + 1000000000ULL, s->freq_hz); + + timer_mod(s->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->hit_time); + DB_PRINT("Wait Time: %" PRId64 " ticks\n", s->hit_time); +} + +static void stm32f2xx_timer_reset(DeviceState *dev) +{ + STM32F2XXTimerState *s = STM32F2XXTIMER(dev); + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + s->tim_cr1 = 0; + s->tim_cr2 = 0; + s->tim_smcr = 0; + s->tim_dier = 0; + s->tim_sr = 0; + s->tim_egr = 0; + s->tim_ccmr1 = 0; + s->tim_ccmr2 = 0; + s->tim_ccer = 0; + s->tim_psc = 0; + s->tim_arr = 0; + s->tim_ccr1 = 0; + s->tim_ccr2 = 0; + s->tim_ccr3 = 0; + s->tim_ccr4 = 0; + s->tim_dcr = 0; + s->tim_dmar = 0; + s->tim_or = 0; + + s->tick_offset = stm32f2xx_ns_to_ticks(s, now); +} + +static uint64_t stm32f2xx_timer_read(void *opaque, hwaddr offset, + unsigned size) +{ + STM32F2XXTimerState *s = opaque; + + DB_PRINT("Read 0x%"HWADDR_PRIx"\n", offset); + + switch (offset) { + case TIM_CR1: + return s->tim_cr1; + case TIM_CR2: + return s->tim_cr2; + case TIM_SMCR: + return s->tim_smcr; + case TIM_DIER: + return s->tim_dier; + case TIM_SR: + return s->tim_sr; + case TIM_EGR: + return s->tim_egr; + case TIM_CCMR1: + return s->tim_ccmr1; + case TIM_CCMR2: + return s->tim_ccmr2; + case TIM_CCER: + return s->tim_ccer; + case TIM_CNT: + return stm32f2xx_ns_to_ticks(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) - + s->tick_offset; + case TIM_PSC: + return s->tim_psc; + case TIM_ARR: + return s->tim_arr; + case TIM_CCR1: + return s->tim_ccr1; + case TIM_CCR2: + return s->tim_ccr2; + case TIM_CCR3: + return s->tim_ccr3; + case TIM_CCR4: + return s->tim_ccr4; + case TIM_DCR: + return s->tim_dcr; + case TIM_DMAR: + return s->tim_dmar; + case TIM_OR: + return s->tim_or; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, offset); + } + + return 0; +} + +static void stm32f2xx_timer_write(void *opaque, hwaddr offset, + uint64_t val64, unsigned size) +{ + STM32F2XXTimerState *s = opaque; + uint32_t value = val64; + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint32_t timer_val = 0; + + DB_PRINT("Write 0x%x, 0x%"HWADDR_PRIx"\n", value, offset); + + switch (offset) { + case TIM_CR1: + s->tim_cr1 = value; + return; + case TIM_CR2: + s->tim_cr2 = value; + return; + case TIM_SMCR: + s->tim_smcr = value; + return; + case TIM_DIER: + s->tim_dier = value; + return; + case TIM_SR: + /* This is set by hardware and cleared by software */ + s->tim_sr &= value; + return; + case TIM_EGR: + s->tim_egr = value; + if (s->tim_egr & TIM_EGR_UG) { + timer_val = 0; + break; + } + return; + case TIM_CCMR1: + s->tim_ccmr1 = value; + return; + case TIM_CCMR2: + s->tim_ccmr2 = value; + return; + case TIM_CCER: + s->tim_ccer = value; + return; + case TIM_PSC: + timer_val = stm32f2xx_ns_to_ticks(s, now) - s->tick_offset; + s->tim_psc = value & 0xFFFF; + break; + case TIM_CNT: + timer_val = value; + break; + case TIM_ARR: + s->tim_arr = value; + stm32f2xx_timer_set_alarm(s, now); + return; + case TIM_CCR1: + s->tim_ccr1 = value; + return; + case TIM_CCR2: + s->tim_ccr2 = value; + return; + case TIM_CCR3: + s->tim_ccr3 = value; + return; + case TIM_CCR4: + s->tim_ccr4 = value; + return; + case TIM_DCR: + s->tim_dcr = value; + return; + case TIM_DMAR: + s->tim_dmar = value; + return; + case TIM_OR: + s->tim_or = value; + return; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, offset); + return; + } + + /* This means that a register write has affected the timer in a way that + * requires a refresh of both tick_offset and the alarm. + */ + s->tick_offset = stm32f2xx_ns_to_ticks(s, now) - timer_val; + stm32f2xx_timer_set_alarm(s, now); +} + +static const MemoryRegionOps stm32f2xx_timer_ops = { + .read = stm32f2xx_timer_read, + .write = stm32f2xx_timer_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_stm32f2xx_timer = { + .name = TYPE_STM32F2XX_TIMER, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT64(tick_offset, STM32F2XXTimerState), + VMSTATE_UINT32(tim_cr1, STM32F2XXTimerState), + VMSTATE_UINT32(tim_cr2, STM32F2XXTimerState), + VMSTATE_UINT32(tim_smcr, STM32F2XXTimerState), + VMSTATE_UINT32(tim_dier, STM32F2XXTimerState), + VMSTATE_UINT32(tim_sr, STM32F2XXTimerState), + VMSTATE_UINT32(tim_egr, STM32F2XXTimerState), + VMSTATE_UINT32(tim_ccmr1, STM32F2XXTimerState), + VMSTATE_UINT32(tim_ccmr2, STM32F2XXTimerState), + VMSTATE_UINT32(tim_ccer, STM32F2XXTimerState), + VMSTATE_UINT32(tim_psc, STM32F2XXTimerState), + VMSTATE_UINT32(tim_arr, STM32F2XXTimerState), + VMSTATE_UINT32(tim_ccr1, STM32F2XXTimerState), + VMSTATE_UINT32(tim_ccr2, STM32F2XXTimerState), + VMSTATE_UINT32(tim_ccr3, STM32F2XXTimerState), + VMSTATE_UINT32(tim_ccr4, STM32F2XXTimerState), + VMSTATE_UINT32(tim_dcr, STM32F2XXTimerState), + VMSTATE_UINT32(tim_dmar, STM32F2XXTimerState), + VMSTATE_UINT32(tim_or, STM32F2XXTimerState), + VMSTATE_END_OF_LIST() + } +}; + +static Property stm32f2xx_timer_properties[] = { + DEFINE_PROP_UINT64("clock-frequency", struct STM32F2XXTimerState, + freq_hz, 1000000000), + DEFINE_PROP_END_OF_LIST(), +}; + +static void stm32f2xx_timer_init(Object *obj) +{ + STM32F2XXTimerState *s = STM32F2XXTIMER(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); + + memory_region_init_io(&s->iomem, obj, &stm32f2xx_timer_ops, s, + "stm32f2xx_timer", 0x400); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem); +} + +static void stm32f2xx_timer_realize(DeviceState *dev, Error **errp) +{ + STM32F2XXTimerState *s = STM32F2XXTIMER(dev); + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, stm32f2xx_timer_interrupt, s); +} + +static void stm32f2xx_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = stm32f2xx_timer_reset; + device_class_set_props(dc, stm32f2xx_timer_properties); + dc->vmsd = &vmstate_stm32f2xx_timer; + dc->realize = stm32f2xx_timer_realize; +} + +static const TypeInfo stm32f2xx_timer_info = { + .name = TYPE_STM32F2XX_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(STM32F2XXTimerState), + .instance_init = stm32f2xx_timer_init, + .class_init = stm32f2xx_timer_class_init, +}; + +static void stm32f2xx_timer_register_types(void) +{ + type_register_static(&stm32f2xx_timer_info); +} + +type_init(stm32f2xx_timer_register_types) diff --git a/hw/timer/trace-events b/hw/timer/trace-events new file mode 100644 index 000000000..3eccef838 --- /dev/null +++ b/hw/timer/trace-events @@ -0,0 +1,101 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# slavio_timer.c +slavio_timer_get_out(uint64_t limit, uint32_t counthigh, uint32_t count) "limit 0x%"PRIx64" count 0x%x0x%08x" +slavio_timer_irq(uint32_t counthigh, uint32_t count) "callback: count 0x%x0x%08x" +slavio_timer_mem_readl_invalid(uint64_t addr) "invalid read address 0x%"PRIx64 +slavio_timer_mem_readl(uint64_t addr, uint32_t ret) "read 0x%"PRIx64" = 0x%08x" +slavio_timer_mem_writel(uint64_t addr, uint32_t val) "write 0x%"PRIx64" = 0x%08x" +slavio_timer_mem_writel_limit(unsigned int timer_index, uint64_t count) "processor %d user timer set to 0x%016"PRIx64 +slavio_timer_mem_writel_counter_invalid(void) "not user timer" +slavio_timer_mem_writel_status_start(unsigned int timer_index) "processor %d user timer started" +slavio_timer_mem_writel_status_stop(unsigned int timer_index) "processor %d user timer stopped" +slavio_timer_mem_writel_mode_user(unsigned int timer_index) "processor %d changed from counter to user timer" +slavio_timer_mem_writel_mode_counter(unsigned int timer_index) "processor %d changed from user timer to counter" +slavio_timer_mem_writel_mode_invalid(void) "not system timer" +slavio_timer_mem_writel_invalid(uint64_t addr) "invalid write address 0x%"PRIx64 + +# grlib_gptimer.c +grlib_gptimer_enable(int id, uint32_t count) "timer:%d set count 0x%x and run" +grlib_gptimer_disabled(int id, uint32_t config) "timer:%d Timer disable config 0x%x" +grlib_gptimer_restart(int id, uint32_t reload) "timer:%d reload val: 0x%x" +grlib_gptimer_set_scaler(uint32_t scaler, uint32_t freq) "scaler:0x%x freq:%uHz" +grlib_gptimer_hit(int id) "timer:%d HIT" +grlib_gptimer_readl(int id, uint64_t addr, uint32_t val) "timer:%d addr 0x%"PRIx64" 0x%x" +grlib_gptimer_writel(int id, uint64_t addr, uint32_t val) "timer:%d addr 0x%"PRIx64" 0x%x" + +# aspeed_timer.c +aspeed_timer_ctrl_enable(uint8_t i, bool enable) "Timer %" PRIu8 ": %d" +aspeed_timer_ctrl_external_clock(uint8_t i, bool enable) "Timer %" PRIu8 ": %d" +aspeed_timer_ctrl_overflow_interrupt(uint8_t i, bool enable) "Timer %" PRIu8 ": %d" +aspeed_timer_ctrl_pulse_enable(uint8_t i, bool enable) "Timer %" PRIu8 ": %d" +aspeed_timer_set_ctrl2(uint32_t value) "Value: 0x%" PRIx32 +aspeed_timer_set_value(int timer, int reg, uint32_t value) "Timer %d register %d: 0x%" PRIx32 +aspeed_timer_read(uint64_t offset, unsigned size, uint64_t value) "From 0x%" PRIx64 ": of size %u: 0x%" PRIx64 + +# armv7m_systick.c +systick_reload(void) "systick reload" +systick_timer_tick(void) "systick reload" +systick_read(uint64_t addr, uint32_t value, unsigned size) "systick read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" +systick_write(uint64_t addr, uint32_t value, unsigned size) "systick write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" + +# cmsdk-apb-timer.c +cmsdk_apb_timer_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB timer read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +cmsdk_apb_timer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB timer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +cmsdk_apb_timer_reset(void) "CMSDK APB timer: reset" + +# cmsdk-apb-dualtimer.c +cmsdk_apb_dualtimer_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +cmsdk_apb_dualtimer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +cmsdk_apb_dualtimer_reset(void) "CMSDK APB dualtimer: reset" + +# npcm7xx_timer.c +npcm7xx_timer_read(const char *id, uint64_t offset, uint64_t value) " %s offset: 0x%04" PRIx64 " value 0x%08" PRIx64 +npcm7xx_timer_write(const char *id, uint64_t offset, uint64_t value) "%s offset: 0x%04" PRIx64 " value 0x%08" PRIx64 +npcm7xx_timer_irq(const char *id, int timer, int state) "%s timer %d state %d" + +# nrf51_timer.c +nrf51_timer_read(uint8_t timer_id, uint64_t addr, uint32_t value, unsigned size) "timer %u read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" +nrf51_timer_write(uint8_t timer_id, uint64_t addr, uint32_t value, unsigned size) "timer %u write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" +nrf51_timer_set_count(uint8_t timer_id, uint8_t counter_id, uint32_t value) "timer %u counter %u count 0x%" PRIx32 + +# bcm2835_systmr.c +bcm2835_systmr_timer_expired(unsigned id) "timer #%u expired" +bcm2835_systmr_irq_ack(unsigned id) "timer #%u acked" +bcm2835_systmr_read(uint64_t offset, uint64_t data) "timer read: offset 0x%" PRIx64 " data 0x%" PRIx64 +bcm2835_systmr_write(uint64_t offset, uint32_t data) "timer write: offset 0x%" PRIx64 " data 0x%" PRIx32 +bcm2835_systmr_run(unsigned id, uint64_t delay_us) "timer #%u expiring in %"PRIu64" us" + +# avr_timer16.c +avr_timer16_read(uint8_t addr, uint8_t value) "timer16 read addr:%u value:%u" +avr_timer16_read_ifr(uint8_t value) "timer16 read addr:ifr value:%u" +avr_timer16_read_imsk(uint8_t value) "timer16 read addr:imsk value:%u" +avr_timer16_write(uint8_t addr, uint8_t value) "timer16 write addr:%u value:%u" +avr_timer16_write_imsk(uint8_t value) "timer16 write addr:imsk value:%u" +avr_timer16_interrupt_count(uint8_t cnt) "count: %u" +avr_timer16_interrupt_overflow(const char *reason) "overflow: %s" +avr_timer16_next_alarm(uint64_t delay_ns) "next alarm: %" PRIu64 " ns from now" +avr_timer16_clksrc_update(uint64_t freq_hz, uint64_t period_ns, uint64_t delay_s) "timer frequency: %" PRIu64 " Hz, period: %" PRIu64 " ns (%" PRId64 " us)" + +# sse_counter.c +sse_counter_control_read(uint64_t offset, uint64_t data, unsigned size) "SSE system counter control frame read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +sse_counter_control_write(uint64_t offset, uint64_t data, unsigned size) "SSE system counter control framen write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +sse_counter_status_read(uint64_t offset, uint64_t data, unsigned size) "SSE system counter status frame read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +sse_counter_status_write(uint64_t offset, uint64_t data, unsigned size) "SSE system counter status frame write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +sse_counter_reset(void) "SSE system counter: reset" + +# sse_timer.c +sse_timer_read(uint64_t offset, uint64_t data, unsigned size) "SSE system timer read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +sse_timer_write(uint64_t offset, uint64_t data, unsigned size) "SSE system timer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +sse_timer_reset(void) "SSE system timer: reset" + +# sifive_pwm.c +sifive_pwm_set_alarm(uint64_t alarm, uint64_t now) "Setting alarm to: 0x%" PRIx64 ", now: 0x%" PRIx64 +sifive_pwm_interrupt(int num) "Interrupt %d" +sifive_pwm_read(uint64_t offset) "Read at address: 0x%" PRIx64 +sifive_pwm_write(uint64_t data, uint64_t offset) "Write 0x%" PRIx64 " at address: 0x%" PRIx64 + +# sh_timer.c +sh_timer_start_stop(int enable, int current) "%d (%d)" +sh_timer_read(uint64_t offset) "tmu012_read 0x%" PRIx64 +sh_timer_write(uint64_t offset, uint64_t value) "tmu012_write 0x%" PRIx64 " 0x%08" PRIx64 diff --git a/hw/timer/trace.h b/hw/timer/trace.h new file mode 100644 index 000000000..5f72c441b --- /dev/null +++ b/hw/timer/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_timer.h" diff --git a/hw/timer/xilinx_timer.c b/hw/timer/xilinx_timer.c new file mode 100644 index 000000000..1eb927eb8 --- /dev/null +++ b/hw/timer/xilinx_timer.c @@ -0,0 +1,273 @@ +/* + * QEMU model of the Xilinx timer block. + * + * Copyright (c) 2009 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/ptimer.h" +#include "hw/qdev-properties.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define D(x) + +#define R_TCSR 0 +#define R_TLR 1 +#define R_TCR 2 +#define R_MAX 4 + +#define TCSR_MDT (1<<0) +#define TCSR_UDT (1<<1) +#define TCSR_GENT (1<<2) +#define TCSR_CAPT (1<<3) +#define TCSR_ARHT (1<<4) +#define TCSR_LOAD (1<<5) +#define TCSR_ENIT (1<<6) +#define TCSR_ENT (1<<7) +#define TCSR_TINT (1<<8) +#define TCSR_PWMA (1<<9) +#define TCSR_ENALL (1<<10) + +struct xlx_timer +{ + ptimer_state *ptimer; + void *parent; + int nr; /* for debug. */ + + unsigned long timer_div; + + uint32_t regs[R_MAX]; +}; + +#define TYPE_XILINX_TIMER "xlnx.xps-timer" +DECLARE_INSTANCE_CHECKER(struct timerblock, XILINX_TIMER, + TYPE_XILINX_TIMER) + +struct timerblock +{ + SysBusDevice parent_obj; + + MemoryRegion mmio; + qemu_irq irq; + uint8_t one_timer_only; + uint32_t freq_hz; + struct xlx_timer *timers; +}; + +static inline unsigned int num_timers(struct timerblock *t) +{ + return 2 - t->one_timer_only; +} + +static inline unsigned int timer_from_addr(hwaddr addr) +{ + /* Timers get a 4x32bit control reg area each. */ + return addr >> 2; +} + +static void timer_update_irq(struct timerblock *t) +{ + unsigned int i, irq = 0; + uint32_t csr; + + for (i = 0; i < num_timers(t); i++) { + csr = t->timers[i].regs[R_TCSR]; + irq |= (csr & TCSR_TINT) && (csr & TCSR_ENIT); + } + + /* All timers within the same slave share a single IRQ line. */ + qemu_set_irq(t->irq, !!irq); +} + +static uint64_t +timer_read(void *opaque, hwaddr addr, unsigned int size) +{ + struct timerblock *t = opaque; + struct xlx_timer *xt; + uint32_t r = 0; + unsigned int timer; + + addr >>= 2; + timer = timer_from_addr(addr); + xt = &t->timers[timer]; + /* Further decoding to address a specific timers reg. */ + addr &= 0x3; + switch (addr) + { + case R_TCR: + r = ptimer_get_count(xt->ptimer); + if (!(xt->regs[R_TCSR] & TCSR_UDT)) + r = ~r; + D(qemu_log("xlx_timer t=%d read counter=%x udt=%d\n", + timer, r, xt->regs[R_TCSR] & TCSR_UDT)); + break; + default: + if (addr < ARRAY_SIZE(xt->regs)) + r = xt->regs[addr]; + break; + + } + D(fprintf(stderr, "%s timer=%d %x=%x\n", __func__, timer, addr * 4, r)); + return r; +} + +/* Must be called inside ptimer transaction block */ +static void timer_enable(struct xlx_timer *xt) +{ + uint64_t count; + + D(fprintf(stderr, "%s timer=%d down=%d\n", __func__, + xt->nr, xt->regs[R_TCSR] & TCSR_UDT)); + + ptimer_stop(xt->ptimer); + + if (xt->regs[R_TCSR] & TCSR_UDT) + count = xt->regs[R_TLR]; + else + count = ~0 - xt->regs[R_TLR]; + ptimer_set_limit(xt->ptimer, count, 1); + ptimer_run(xt->ptimer, 1); +} + +static void +timer_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + struct timerblock *t = opaque; + struct xlx_timer *xt; + unsigned int timer; + uint32_t value = val64; + + addr >>= 2; + timer = timer_from_addr(addr); + xt = &t->timers[timer]; + D(fprintf(stderr, "%s addr=%x val=%x (timer=%d off=%d)\n", + __func__, addr * 4, value, timer, addr & 3)); + /* Further decoding to address a specific timers reg. */ + addr &= 3; + switch (addr) + { + case R_TCSR: + if (value & TCSR_TINT) + value &= ~TCSR_TINT; + + xt->regs[addr] = value & 0x7ff; + if (value & TCSR_ENT) { + ptimer_transaction_begin(xt->ptimer); + timer_enable(xt); + ptimer_transaction_commit(xt->ptimer); + } + break; + + default: + if (addr < ARRAY_SIZE(xt->regs)) + xt->regs[addr] = value; + break; + } + timer_update_irq(t); +} + +static const MemoryRegionOps timer_ops = { + .read = timer_read, + .write = timer_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void timer_hit(void *opaque) +{ + struct xlx_timer *xt = opaque; + struct timerblock *t = xt->parent; + D(fprintf(stderr, "%s %d\n", __func__, xt->nr)); + xt->regs[R_TCSR] |= TCSR_TINT; + + if (xt->regs[R_TCSR] & TCSR_ARHT) + timer_enable(xt); + timer_update_irq(t); +} + +static void xilinx_timer_realize(DeviceState *dev, Error **errp) +{ + struct timerblock *t = XILINX_TIMER(dev); + unsigned int i; + + /* Init all the ptimers. */ + t->timers = g_malloc0(sizeof t->timers[0] * num_timers(t)); + for (i = 0; i < num_timers(t); i++) { + struct xlx_timer *xt = &t->timers[i]; + + xt->parent = t; + xt->nr = i; + xt->ptimer = ptimer_init(timer_hit, xt, PTIMER_POLICY_DEFAULT); + ptimer_transaction_begin(xt->ptimer); + ptimer_set_freq(xt->ptimer, t->freq_hz); + ptimer_transaction_commit(xt->ptimer); + } + + memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t, "xlnx.xps-timer", + R_MAX * 4 * num_timers(t)); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &t->mmio); +} + +static void xilinx_timer_init(Object *obj) +{ + struct timerblock *t = XILINX_TIMER(obj); + + /* All timers share a single irq line. */ + sysbus_init_irq(SYS_BUS_DEVICE(obj), &t->irq); +} + +static Property xilinx_timer_properties[] = { + DEFINE_PROP_UINT32("clock-frequency", struct timerblock, freq_hz, + 62 * 1000000), + DEFINE_PROP_UINT8("one-timer-only", struct timerblock, one_timer_only, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xilinx_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = xilinx_timer_realize; + device_class_set_props(dc, xilinx_timer_properties); +} + +static const TypeInfo xilinx_timer_info = { + .name = TYPE_XILINX_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(struct timerblock), + .instance_init = xilinx_timer_init, + .class_init = xilinx_timer_class_init, +}; + +static void xilinx_timer_register_types(void) +{ + type_register_static(&xilinx_timer_info); +} + +type_init(xilinx_timer_register_types) diff --git a/hw/tpm/Kconfig b/hw/tpm/Kconfig new file mode 100644 index 000000000..29e82f3c9 --- /dev/null +++ b/hw/tpm/Kconfig @@ -0,0 +1,25 @@ +config TPM_TIS_ISA + bool + depends on TPM && ISA_BUS + select TPM_TIS + +config TPM_TIS_SYSBUS + bool + depends on TPM + select TPM_TIS + +config TPM_TIS + bool + depends on TPM + select TPM_BACKEND + +config TPM_CRB + bool + depends on TPM && PC + select TPM_BACKEND + +config TPM_SPAPR + bool + default y + depends on TPM && PSERIES + select TPM_BACKEND diff --git a/hw/tpm/meson.build b/hw/tpm/meson.build new file mode 100644 index 000000000..1c68d81d6 --- /dev/null +++ b/hw/tpm/meson.build @@ -0,0 +1,8 @@ +softmmu_ss.add(when: 'CONFIG_TPM_TIS', if_true: files('tpm_tis_common.c')) +softmmu_ss.add(when: 'CONFIG_TPM_TIS_ISA', if_true: files('tpm_tis_isa.c')) +softmmu_ss.add(when: 'CONFIG_TPM_TIS_SYSBUS', if_true: files('tpm_tis_sysbus.c')) +softmmu_ss.add(when: 'CONFIG_TPM_CRB', if_true: files('tpm_crb.c')) + +specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TPM_TIS'], if_true: files('tpm_ppi.c')) +specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TPM_CRB'], if_true: files('tpm_ppi.c')) +specific_ss.add(when: 'CONFIG_TPM_SPAPR', if_true: files('tpm_spapr.c')) diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c new file mode 100644 index 000000000..58ebd1469 --- /dev/null +++ b/hw/tpm/tpm_crb.c @@ -0,0 +1,345 @@ +/* + * tpm_crb.c - QEMU's TPM CRB interface emulator + * + * Copyright (c) 2018 Red Hat, Inc. + * + * Authors: + * Marc-André Lureau + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * tpm_crb is a device for TPM 2.0 Command Response Buffer (CRB) Interface + * as defined in TCG PC Client Platform TPM Profile (PTP) Specification + * Family “2.0” Level 00 Revision 01.03 v22 + */ + +#include "qemu/osdep.h" + +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/pci/pci_ids.h" +#include "hw/acpi/tpm.h" +#include "migration/vmstate.h" +#include "sysemu/tpm_backend.h" +#include "sysemu/tpm_util.h" +#include "sysemu/reset.h" +#include "tpm_prop.h" +#include "tpm_ppi.h" +#include "trace.h" +#include "qom/object.h" + +struct CRBState { + DeviceState parent_obj; + + TPMBackend *tpmbe; + TPMBackendCmd cmd; + uint32_t regs[TPM_CRB_R_MAX]; + MemoryRegion mmio; + MemoryRegion cmdmem; + + size_t be_buffer_size; + + bool ppi_enabled; + TPMPPI ppi; +}; +typedef struct CRBState CRBState; + +DECLARE_INSTANCE_CHECKER(CRBState, CRB, + TYPE_TPM_CRB) + +#define CRB_INTF_TYPE_CRB_ACTIVE 0b1 +#define CRB_INTF_VERSION_CRB 0b1 +#define CRB_INTF_CAP_LOCALITY_0_ONLY 0b0 +#define CRB_INTF_CAP_IDLE_FAST 0b0 +#define CRB_INTF_CAP_XFER_SIZE_64 0b11 +#define CRB_INTF_CAP_FIFO_NOT_SUPPORTED 0b0 +#define CRB_INTF_CAP_CRB_SUPPORTED 0b1 +#define CRB_INTF_IF_SELECTOR_CRB 0b1 + +#define CRB_CTRL_CMD_SIZE (TPM_CRB_ADDR_SIZE - A_CRB_DATA_BUFFER) + +enum crb_loc_ctrl { + CRB_LOC_CTRL_REQUEST_ACCESS = BIT(0), + CRB_LOC_CTRL_RELINQUISH = BIT(1), + CRB_LOC_CTRL_SEIZE = BIT(2), + CRB_LOC_CTRL_RESET_ESTABLISHMENT_BIT = BIT(3), +}; + +enum crb_ctrl_req { + CRB_CTRL_REQ_CMD_READY = BIT(0), + CRB_CTRL_REQ_GO_IDLE = BIT(1), +}; + +enum crb_start { + CRB_START_INVOKE = BIT(0), +}; + +enum crb_cancel { + CRB_CANCEL_INVOKE = BIT(0), +}; + +#define TPM_CRB_NO_LOCALITY 0xff + +static uint64_t tpm_crb_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + CRBState *s = CRB(opaque); + void *regs = (void *)&s->regs + (addr & ~3); + unsigned offset = addr & 3; + uint32_t val = *(uint32_t *)regs >> (8 * offset); + + switch (addr) { + case A_CRB_LOC_STATE: + val |= !tpm_backend_get_tpm_established_flag(s->tpmbe); + break; + } + + trace_tpm_crb_mmio_read(addr, size, val); + + return val; +} + +static uint8_t tpm_crb_get_active_locty(CRBState *s) +{ + if (!ARRAY_FIELD_EX32(s->regs, CRB_LOC_STATE, locAssigned)) { + return TPM_CRB_NO_LOCALITY; + } + return ARRAY_FIELD_EX32(s->regs, CRB_LOC_STATE, activeLocality); +} + +static void tpm_crb_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + CRBState *s = CRB(opaque); + uint8_t locty = addr >> 12; + + trace_tpm_crb_mmio_write(addr, size, val); + + switch (addr) { + case A_CRB_CTRL_REQ: + switch (val) { + case CRB_CTRL_REQ_CMD_READY: + ARRAY_FIELD_DP32(s->regs, CRB_CTRL_STS, + tpmIdle, 0); + break; + case CRB_CTRL_REQ_GO_IDLE: + ARRAY_FIELD_DP32(s->regs, CRB_CTRL_STS, + tpmIdle, 1); + break; + } + break; + case A_CRB_CTRL_CANCEL: + if (val == CRB_CANCEL_INVOKE && + s->regs[R_CRB_CTRL_START] & CRB_START_INVOKE) { + tpm_backend_cancel_cmd(s->tpmbe); + } + break; + case A_CRB_CTRL_START: + if (val == CRB_START_INVOKE && + !(s->regs[R_CRB_CTRL_START] & CRB_START_INVOKE) && + tpm_crb_get_active_locty(s) == locty) { + void *mem = memory_region_get_ram_ptr(&s->cmdmem); + + s->regs[R_CRB_CTRL_START] |= CRB_START_INVOKE; + s->cmd = (TPMBackendCmd) { + .in = mem, + .in_len = MIN(tpm_cmd_get_size(mem), s->be_buffer_size), + .out = mem, + .out_len = s->be_buffer_size, + }; + + tpm_backend_deliver_request(s->tpmbe, &s->cmd); + } + break; + case A_CRB_LOC_CTRL: + switch (val) { + case CRB_LOC_CTRL_RESET_ESTABLISHMENT_BIT: + /* not loc 3 or 4 */ + break; + case CRB_LOC_CTRL_RELINQUISH: + ARRAY_FIELD_DP32(s->regs, CRB_LOC_STATE, + locAssigned, 0); + ARRAY_FIELD_DP32(s->regs, CRB_LOC_STS, + Granted, 0); + break; + case CRB_LOC_CTRL_REQUEST_ACCESS: + ARRAY_FIELD_DP32(s->regs, CRB_LOC_STS, + Granted, 1); + ARRAY_FIELD_DP32(s->regs, CRB_LOC_STS, + beenSeized, 0); + ARRAY_FIELD_DP32(s->regs, CRB_LOC_STATE, + locAssigned, 1); + break; + } + break; + } +} + +static const MemoryRegionOps tpm_crb_memory_ops = { + .read = tpm_crb_mmio_read, + .write = tpm_crb_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void tpm_crb_request_completed(TPMIf *ti, int ret) +{ + CRBState *s = CRB(ti); + + s->regs[R_CRB_CTRL_START] &= ~CRB_START_INVOKE; + if (ret != 0) { + ARRAY_FIELD_DP32(s->regs, CRB_CTRL_STS, + tpmSts, 1); /* fatal error */ + } +} + +static enum TPMVersion tpm_crb_get_version(TPMIf *ti) +{ + CRBState *s = CRB(ti); + + return tpm_backend_get_tpm_version(s->tpmbe); +} + +static int tpm_crb_pre_save(void *opaque) +{ + CRBState *s = opaque; + + tpm_backend_finish_sync(s->tpmbe); + + return 0; +} + +static const VMStateDescription vmstate_tpm_crb = { + .name = "tpm-crb", + .pre_save = tpm_crb_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, CRBState, TPM_CRB_R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property tpm_crb_properties[] = { + DEFINE_PROP_TPMBE("tpmdev", CRBState, tpmbe), + DEFINE_PROP_BOOL("ppi", CRBState, ppi_enabled, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void tpm_crb_reset(void *dev) +{ + CRBState *s = CRB(dev); + + if (s->ppi_enabled) { + tpm_ppi_reset(&s->ppi); + } + tpm_backend_reset(s->tpmbe); + + memset(s->regs, 0, sizeof(s->regs)); + + ARRAY_FIELD_DP32(s->regs, CRB_LOC_STATE, + tpmRegValidSts, 1); + ARRAY_FIELD_DP32(s->regs, CRB_CTRL_STS, + tpmIdle, 1); + ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, + InterfaceType, CRB_INTF_TYPE_CRB_ACTIVE); + ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, + InterfaceVersion, CRB_INTF_VERSION_CRB); + ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, + CapLocality, CRB_INTF_CAP_LOCALITY_0_ONLY); + ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, + CapCRBIdleBypass, CRB_INTF_CAP_IDLE_FAST); + ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, + CapDataXferSizeSupport, CRB_INTF_CAP_XFER_SIZE_64); + ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, + CapFIFO, CRB_INTF_CAP_FIFO_NOT_SUPPORTED); + ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, + CapCRB, CRB_INTF_CAP_CRB_SUPPORTED); + ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, + InterfaceSelector, CRB_INTF_IF_SELECTOR_CRB); + ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, + RID, 0b0000); + ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID2, + VID, PCI_VENDOR_ID_IBM); + + s->regs[R_CRB_CTRL_CMD_SIZE] = CRB_CTRL_CMD_SIZE; + s->regs[R_CRB_CTRL_CMD_LADDR] = TPM_CRB_ADDR_BASE + A_CRB_DATA_BUFFER; + s->regs[R_CRB_CTRL_RSP_SIZE] = CRB_CTRL_CMD_SIZE; + s->regs[R_CRB_CTRL_RSP_ADDR] = TPM_CRB_ADDR_BASE + A_CRB_DATA_BUFFER; + + s->be_buffer_size = MIN(tpm_backend_get_buffer_size(s->tpmbe), + CRB_CTRL_CMD_SIZE); + + if (tpm_backend_startup_tpm(s->tpmbe, s->be_buffer_size) < 0) { + exit(1); + } +} + +static void tpm_crb_realize(DeviceState *dev, Error **errp) +{ + CRBState *s = CRB(dev); + + if (!tpm_find()) { + error_setg(errp, "at most one TPM device is permitted"); + return; + } + if (!s->tpmbe) { + error_setg(errp, "'tpmdev' property is required"); + return; + } + + memory_region_init_io(&s->mmio, OBJECT(s), &tpm_crb_memory_ops, s, + "tpm-crb-mmio", sizeof(s->regs)); + memory_region_init_ram(&s->cmdmem, OBJECT(s), + "tpm-crb-cmd", CRB_CTRL_CMD_SIZE, errp); + + memory_region_add_subregion(get_system_memory(), + TPM_CRB_ADDR_BASE, &s->mmio); + memory_region_add_subregion(get_system_memory(), + TPM_CRB_ADDR_BASE + sizeof(s->regs), &s->cmdmem); + + if (s->ppi_enabled) { + tpm_ppi_init(&s->ppi, get_system_memory(), + TPM_PPI_ADDR_BASE, OBJECT(s)); + } + + qemu_register_reset(tpm_crb_reset, dev); +} + +static void tpm_crb_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + TPMIfClass *tc = TPM_IF_CLASS(klass); + + dc->realize = tpm_crb_realize; + device_class_set_props(dc, tpm_crb_properties); + dc->vmsd = &vmstate_tpm_crb; + dc->user_creatable = true; + tc->model = TPM_MODEL_TPM_CRB; + tc->get_version = tpm_crb_get_version; + tc->request_completed = tpm_crb_request_completed; + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo tpm_crb_info = { + .name = TYPE_TPM_CRB, + /* could be TYPE_SYS_BUS_DEVICE (or LPC etc) */ + .parent = TYPE_DEVICE, + .instance_size = sizeof(CRBState), + .class_init = tpm_crb_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_TPM_IF }, + { } + } +}; + +static void tpm_crb_register(void) +{ + type_register_static(&tpm_crb_info); +} + +type_init(tpm_crb_register) diff --git a/hw/tpm/tpm_ppi.c b/hw/tpm/tpm_ppi.c new file mode 100644 index 000000000..274e9aa4b --- /dev/null +++ b/hw/tpm/tpm_ppi.c @@ -0,0 +1,57 @@ +/* + * tpm_ppi.c - TPM Physical Presence Interface + * + * Copyright (C) 2018 IBM Corporation + * + * Authors: + * Stefan Berger + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "qapi/error.h" +#include "cpu.h" +#include "sysemu/memory_mapping.h" +#include "migration/vmstate.h" +#include "hw/acpi/tpm.h" +#include "tpm_ppi.h" +#include "trace.h" + +void tpm_ppi_reset(TPMPPI *tpmppi) +{ + if (tpmppi->buf[0x15a /* movv, docs/specs/tpm.rst */] & 0x1) { + GuestPhysBlockList guest_phys_blocks; + GuestPhysBlock *block; + + guest_phys_blocks_init(&guest_phys_blocks); + guest_phys_blocks_append(&guest_phys_blocks); + QTAILQ_FOREACH(block, &guest_phys_blocks.head, next) { + hwaddr mr_offs = block->host_addr - + (uint8_t *)memory_region_get_ram_ptr(block->mr); + + trace_tpm_ppi_memset(block->host_addr, + block->target_end - block->target_start); + memset(block->host_addr, 0, + block->target_end - block->target_start); + memory_region_set_dirty(block->mr, mr_offs, + block->target_end - block->target_start); + } + guest_phys_blocks_free(&guest_phys_blocks); + } +} + +void tpm_ppi_init(TPMPPI *tpmppi, struct MemoryRegion *m, + hwaddr addr, Object *obj) +{ + tpmppi->buf = qemu_memalign(qemu_real_host_page_size, + HOST_PAGE_ALIGN(TPM_PPI_ADDR_SIZE)); + memory_region_init_ram_device_ptr(&tpmppi->ram, obj, "tpm-ppi", + TPM_PPI_ADDR_SIZE, tpmppi->buf); + vmstate_register_ram(&tpmppi->ram, DEVICE(obj)); + + memory_region_add_subregion(m, addr, &tpmppi->ram); +} diff --git a/hw/tpm/tpm_ppi.h b/hw/tpm/tpm_ppi.h new file mode 100644 index 000000000..6f773c25a --- /dev/null +++ b/hw/tpm/tpm_ppi.h @@ -0,0 +1,45 @@ +/* + * TPM Physical Presence Interface + * + * Copyright (C) 2018 IBM Corporation + * + * Authors: + * Stefan Berger + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef TPM_TPM_PPI_H +#define TPM_TPM_PPI_H + +#include "exec/address-spaces.h" + +typedef struct TPMPPI { + MemoryRegion ram; + uint8_t *buf; +} TPMPPI; + +/** + * tpm_ppi_init: + * @tpmppi: a TPMPPI + * @m: the address-space / MemoryRegion to use + * @addr: the address of the PPI region + * @obj: the owner object + * + * Register the TPM PPI memory region at @addr on the given address + * space for the object @obj. + **/ +void tpm_ppi_init(TPMPPI *tpmppi, struct MemoryRegion *m, + hwaddr addr, Object *obj); + +/** + * tpm_ppi_reset: + * @tpmppi: a TPMPPI + * + * Function to call on machine reset. It will check if the "Memory + * overwrite" variable is set, and perform a memory clear on volatile + * memory if requested. + **/ +void tpm_ppi_reset(TPMPPI *tpmppi); + +#endif /* TPM_TPM_PPI_H */ diff --git a/hw/tpm/tpm_prop.h b/hw/tpm/tpm_prop.h new file mode 100644 index 000000000..bbd4225d6 --- /dev/null +++ b/hw/tpm/tpm_prop.h @@ -0,0 +1,33 @@ +/* + * TPM utility functions + * + * Copyright (c) 2010 - 2015 IBM Corporation + * Authors: + * Stefan Berger + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#ifndef HW_TPM_PROP_H +#define HW_TPM_PROP_H + +#include "sysemu/tpm_backend.h" +#include "hw/qdev-properties.h" + +extern const PropertyInfo qdev_prop_tpm; + +#define DEFINE_PROP_TPMBE(_n, _s, _f) \ + DEFINE_PROP(_n, _s, _f, qdev_prop_tpm, TPMBackend *) + +#endif /* HW_TPM_PROP_H */ diff --git a/hw/tpm/tpm_spapr.c b/hw/tpm/tpm_spapr.c new file mode 100644 index 000000000..dea7b1333 --- /dev/null +++ b/hw/tpm/tpm_spapr.c @@ -0,0 +1,430 @@ +/* + * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator + * + * PAPR Virtual TPM + * + * Copyright (c) 2015, 2017, 2019 IBM Corporation. + * + * Authors: + * Stefan Berger + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" + +#include "sysemu/tpm_backend.h" +#include "sysemu/tpm_util.h" +#include "tpm_prop.h" + +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_vio.h" +#include "trace.h" +#include "qom/object.h" + +#define DEBUG_SPAPR 0 + +typedef struct SpaprTpmState SpaprTpmState; +DECLARE_INSTANCE_CHECKER(SpaprTpmState, VIO_SPAPR_VTPM, + TYPE_TPM_SPAPR) + +typedef struct TpmCrq { + uint8_t valid; /* 0x80: cmd; 0xc0: init crq */ + /* 0x81-0x83: CRQ message response */ + uint8_t msg; /* see below */ + uint16_t len; /* len of TPM request; len of TPM response */ + uint32_t data; /* rtce_dma_handle when sending TPM request */ + uint64_t reserved; +} TpmCrq; + +#define SPAPR_VTPM_VALID_INIT_CRQ_COMMAND 0xC0 +#define SPAPR_VTPM_VALID_COMMAND 0x80 +#define SPAPR_VTPM_MSG_RESULT 0x80 + +/* msg types for valid = SPAPR_VTPM_VALID_INIT_CRQ */ +#define SPAPR_VTPM_INIT_CRQ_RESULT 0x1 +#define SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT 0x2 + +/* msg types for valid = SPAPR_VTPM_VALID_CMD */ +#define SPAPR_VTPM_GET_VERSION 0x1 +#define SPAPR_VTPM_TPM_COMMAND 0x2 +#define SPAPR_VTPM_GET_RTCE_BUFFER_SIZE 0x3 +#define SPAPR_VTPM_PREPARE_TO_SUSPEND 0x4 + +/* response error messages */ +#define SPAPR_VTPM_VTPM_ERROR 0xff + +/* error codes */ +#define SPAPR_VTPM_ERR_COPY_IN_FAILED 0x3 +#define SPAPR_VTPM_ERR_COPY_OUT_FAILED 0x4 + +#define TPM_SPAPR_BUFFER_MAX 4096 + +struct SpaprTpmState { + SpaprVioDevice vdev; + + TpmCrq crq; /* track single TPM command */ + + uint8_t state; +#define SPAPR_VTPM_STATE_NONE 0 +#define SPAPR_VTPM_STATE_EXECUTION 1 +#define SPAPR_VTPM_STATE_COMPLETION 2 + + unsigned char *buffer; + + uint32_t numbytes; /* number of bytes to deliver on resume */ + + TPMBackendCmd cmd; + + TPMBackend *be_driver; + TPMVersion be_tpm_version; + + size_t be_buffer_size; +}; + +/* + * Send a request to the TPM. + */ +static void tpm_spapr_tpm_send(SpaprTpmState *s) +{ + tpm_util_show_buffer(s->buffer, s->be_buffer_size, "To TPM"); + + s->state = SPAPR_VTPM_STATE_EXECUTION; + s->cmd = (TPMBackendCmd) { + .locty = 0, + .in = s->buffer, + .in_len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size), + .out = s->buffer, + .out_len = s->be_buffer_size, + }; + + tpm_backend_deliver_request(s->be_driver, &s->cmd); +} + +static int tpm_spapr_process_cmd(SpaprTpmState *s, uint64_t dataptr) +{ + long rc; + + /* a max. of be_buffer_size bytes can be transported */ + rc = spapr_vio_dma_read(&s->vdev, dataptr, + s->buffer, s->be_buffer_size); + if (rc) { + error_report("tpm_spapr_got_payload: DMA read failure"); + } + /* let vTPM handle any malformed request */ + tpm_spapr_tpm_send(s); + + return rc; +} + +static inline int spapr_tpm_send_crq(struct SpaprVioDevice *dev, TpmCrq *crq) +{ + return spapr_vio_send_crq(dev, (uint8_t *)crq); +} + +static int tpm_spapr_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data) +{ + SpaprTpmState *s = VIO_SPAPR_VTPM(dev); + TpmCrq local_crq; + TpmCrq *crq = &s->crq; /* requests only */ + int rc; + uint8_t valid = crq_data[0]; + uint8_t msg = crq_data[1]; + + trace_tpm_spapr_do_crq(valid, msg); + + switch (valid) { + case SPAPR_VTPM_VALID_INIT_CRQ_COMMAND: /* Init command/response */ + + /* Respond to initialization request */ + switch (msg) { + case SPAPR_VTPM_INIT_CRQ_RESULT: + trace_tpm_spapr_do_crq_crq_result(); + memset(&local_crq, 0, sizeof(local_crq)); + local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND; + local_crq.msg = SPAPR_VTPM_INIT_CRQ_RESULT; + spapr_tpm_send_crq(dev, &local_crq); + break; + + case SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT: + trace_tpm_spapr_do_crq_crq_complete_result(); + memset(&local_crq, 0, sizeof(local_crq)); + local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND; + local_crq.msg = SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT; + spapr_tpm_send_crq(dev, &local_crq); + break; + } + + break; + case SPAPR_VTPM_VALID_COMMAND: /* Payloads */ + switch (msg) { + case SPAPR_VTPM_TPM_COMMAND: + trace_tpm_spapr_do_crq_tpm_command(); + if (s->state == SPAPR_VTPM_STATE_EXECUTION) { + return H_BUSY; + } + memcpy(crq, crq_data, sizeof(*crq)); + + rc = tpm_spapr_process_cmd(s, be32_to_cpu(crq->data)); + + if (rc == H_SUCCESS) { + crq->valid = be16_to_cpu(0); + } else { + local_crq.valid = SPAPR_VTPM_MSG_RESULT; + local_crq.msg = SPAPR_VTPM_VTPM_ERROR; + local_crq.len = cpu_to_be16(0); + local_crq.data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_IN_FAILED); + spapr_tpm_send_crq(dev, &local_crq); + } + break; + + case SPAPR_VTPM_GET_RTCE_BUFFER_SIZE: + trace_tpm_spapr_do_crq_tpm_get_rtce_buffer_size(s->be_buffer_size); + local_crq.valid = SPAPR_VTPM_VALID_COMMAND; + local_crq.msg = SPAPR_VTPM_GET_RTCE_BUFFER_SIZE | + SPAPR_VTPM_MSG_RESULT; + local_crq.len = cpu_to_be16(s->be_buffer_size); + spapr_tpm_send_crq(dev, &local_crq); + break; + + case SPAPR_VTPM_GET_VERSION: + local_crq.valid = SPAPR_VTPM_VALID_COMMAND; + local_crq.msg = SPAPR_VTPM_GET_VERSION | SPAPR_VTPM_MSG_RESULT; + local_crq.len = cpu_to_be16(0); + switch (s->be_tpm_version) { + case TPM_VERSION_1_2: + local_crq.data = cpu_to_be32(1); + break; + case TPM_VERSION_2_0: + local_crq.data = cpu_to_be32(2); + break; + default: + g_assert_not_reached(); + break; + } + trace_tpm_spapr_do_crq_get_version(be32_to_cpu(local_crq.data)); + spapr_tpm_send_crq(dev, &local_crq); + break; + + case SPAPR_VTPM_PREPARE_TO_SUSPEND: + trace_tpm_spapr_do_crq_prepare_to_suspend(); + local_crq.valid = SPAPR_VTPM_VALID_COMMAND; + local_crq.msg = SPAPR_VTPM_PREPARE_TO_SUSPEND | + SPAPR_VTPM_MSG_RESULT; + spapr_tpm_send_crq(dev, &local_crq); + break; + + default: + trace_tpm_spapr_do_crq_unknown_msg_type(crq->msg); + } + break; + default: + trace_tpm_spapr_do_crq_unknown_crq(valid, msg); + }; + + return H_SUCCESS; +} + +static void tpm_spapr_request_completed(TPMIf *ti, int ret) +{ + SpaprTpmState *s = VIO_SPAPR_VTPM(ti); + TpmCrq *crq = &s->crq; + uint32_t len; + int rc; + + s->state = SPAPR_VTPM_STATE_COMPLETION; + + /* a max. of be_buffer_size bytes can be transported */ + len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size); + + if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { + trace_tpm_spapr_caught_response(len); + /* defer delivery of response until .post_load */ + s->numbytes = len; + return; + } + + rc = spapr_vio_dma_write(&s->vdev, be32_to_cpu(crq->data), + s->buffer, len); + + tpm_util_show_buffer(s->buffer, len, "From TPM"); + + crq->valid = SPAPR_VTPM_MSG_RESULT; + if (rc == H_SUCCESS) { + crq->msg = SPAPR_VTPM_TPM_COMMAND | SPAPR_VTPM_MSG_RESULT; + crq->len = cpu_to_be16(len); + } else { + error_report("%s: DMA write failure", __func__); + crq->msg = SPAPR_VTPM_VTPM_ERROR; + crq->len = cpu_to_be16(0); + crq->data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_OUT_FAILED); + } + + rc = spapr_tpm_send_crq(&s->vdev, crq); + if (rc) { + error_report("%s: Error sending response", __func__); + } +} + +static int tpm_spapr_do_startup_tpm(SpaprTpmState *s, size_t buffersize) +{ + return tpm_backend_startup_tpm(s->be_driver, buffersize); +} + +static const char *tpm_spapr_get_dt_compatible(SpaprVioDevice *dev) +{ + SpaprTpmState *s = VIO_SPAPR_VTPM(dev); + + switch (s->be_tpm_version) { + case TPM_VERSION_1_2: + return "IBM,vtpm"; + case TPM_VERSION_2_0: + return "IBM,vtpm20"; + default: + g_assert_not_reached(); + } +} + +static void tpm_spapr_reset(SpaprVioDevice *dev) +{ + SpaprTpmState *s = VIO_SPAPR_VTPM(dev); + + s->state = SPAPR_VTPM_STATE_NONE; + s->numbytes = 0; + + s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver); + + s->be_buffer_size = MIN(tpm_backend_get_buffer_size(s->be_driver), + TPM_SPAPR_BUFFER_MAX); + + tpm_backend_reset(s->be_driver); + + if (tpm_spapr_do_startup_tpm(s, s->be_buffer_size) < 0) { + exit(1); + } +} + +static enum TPMVersion tpm_spapr_get_version(TPMIf *ti) +{ + SpaprTpmState *s = VIO_SPAPR_VTPM(ti); + + if (tpm_backend_had_startup_error(s->be_driver)) { + return TPM_VERSION_UNSPEC; + } + + return tpm_backend_get_tpm_version(s->be_driver); +} + +/* persistent state handling */ + +static int tpm_spapr_pre_save(void *opaque) +{ + SpaprTpmState *s = opaque; + + tpm_backend_finish_sync(s->be_driver); + /* + * we cannot deliver the results to the VM since DMA would touch VM memory + */ + + return 0; +} + +static int tpm_spapr_post_load(void *opaque, int version_id) +{ + SpaprTpmState *s = opaque; + + if (s->numbytes) { + trace_tpm_spapr_post_load(); + /* deliver the results to the VM via DMA */ + tpm_spapr_request_completed(TPM_IF(s), 0); + s->numbytes = 0; + } + + return 0; +} + +static const VMStateDescription vmstate_spapr_vtpm = { + .name = "tpm-spapr", + .pre_save = tpm_spapr_pre_save, + .post_load = tpm_spapr_post_load, + .fields = (VMStateField[]) { + VMSTATE_SPAPR_VIO(vdev, SpaprTpmState), + + VMSTATE_UINT8(state, SpaprTpmState), + VMSTATE_UINT32(numbytes, SpaprTpmState), + VMSTATE_VBUFFER_UINT32(buffer, SpaprTpmState, 0, NULL, numbytes), + /* remember DMA address */ + VMSTATE_UINT32(crq.data, SpaprTpmState), + VMSTATE_END_OF_LIST(), + } +}; + +static Property tpm_spapr_properties[] = { + DEFINE_SPAPR_PROPERTIES(SpaprTpmState, vdev), + DEFINE_PROP_TPMBE("tpmdev", SpaprTpmState, be_driver), + DEFINE_PROP_END_OF_LIST(), +}; + +static void tpm_spapr_realizefn(SpaprVioDevice *dev, Error **errp) +{ + SpaprTpmState *s = VIO_SPAPR_VTPM(dev); + + if (!tpm_find()) { + error_setg(errp, "at most one TPM device is permitted"); + return; + } + + dev->crq.SendFunc = tpm_spapr_do_crq; + + if (!s->be_driver) { + error_setg(errp, "'tpmdev' property is required"); + return; + } + s->buffer = g_malloc(TPM_SPAPR_BUFFER_MAX); +} + +static void tpm_spapr_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); + TPMIfClass *tc = TPM_IF_CLASS(klass); + + k->realize = tpm_spapr_realizefn; + k->reset = tpm_spapr_reset; + k->dt_name = "vtpm"; + k->dt_type = "IBM,vtpm"; + k->get_dt_compatible = tpm_spapr_get_dt_compatible; + k->signal_mask = 0x00000001; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, tpm_spapr_properties); + k->rtce_window_size = 0x10000000; + dc->vmsd = &vmstate_spapr_vtpm; + + tc->model = TPM_MODEL_TPM_SPAPR; + tc->get_version = tpm_spapr_get_version; + tc->request_completed = tpm_spapr_request_completed; +} + +static const TypeInfo tpm_spapr_info = { + .name = TYPE_TPM_SPAPR, + .parent = TYPE_VIO_SPAPR_DEVICE, + .instance_size = sizeof(SpaprTpmState), + .class_init = tpm_spapr_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_TPM_IF }, + { } + } +}; + +static void tpm_spapr_register_types(void) +{ + type_register_static(&tpm_spapr_info); +} + +type_init(tpm_spapr_register_types) diff --git a/hw/tpm/tpm_tis.h b/hw/tpm/tpm_tis.h new file mode 100644 index 000000000..f6b5872ba --- /dev/null +++ b/hw/tpm/tpm_tis.h @@ -0,0 +1,90 @@ +/* + * tpm_tis.h - QEMU's TPM TIS common header + * + * Copyright (C) 2006,2010-2013 IBM Corporation + * + * Authors: + * Stefan Berger + * David Safford + * + * Xen 4 support: Andrease Niederl + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * Implementation of the TIS interface according to specs found at + * http://www.trustedcomputinggroup.org. This implementation currently + * supports version 1.3, 21 March 2013 + * In the developers menu choose the PC Client section then find the TIS + * specification. + * + * TPM TIS for TPM 2 implementation following TCG PC Client Platform + * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43 + */ +#ifndef TPM_TPM_TIS_H +#define TPM_TPM_TIS_H + +#include "sysemu/tpm_backend.h" +#include "tpm_ppi.h" + +#define TPM_TIS_NUM_LOCALITIES 5 /* per spec */ +#define TPM_TIS_LOCALITY_SHIFT 12 +#define TPM_TIS_NO_LOCALITY 0xff + +#define TPM_TIS_IS_VALID_LOCTY(x) ((x) < TPM_TIS_NUM_LOCALITIES) + +#define TPM_TIS_BUFFER_MAX 4096 + +typedef enum { + TPM_TIS_STATE_IDLE = 0, + TPM_TIS_STATE_READY, + TPM_TIS_STATE_COMPLETION, + TPM_TIS_STATE_EXECUTION, + TPM_TIS_STATE_RECEPTION, +} TPMTISState; + +/* locality data -- all fields are persisted */ +typedef struct TPMLocality { + TPMTISState state; + uint8_t access; + uint32_t sts; + uint32_t iface_id; + uint32_t inte; + uint32_t ints; +} TPMLocality; + +typedef struct TPMState { + MemoryRegion mmio; + + unsigned char buffer[TPM_TIS_BUFFER_MAX]; + uint16_t rw_offset; + + uint8_t active_locty; + uint8_t aborting_locty; + uint8_t next_locty; + + TPMLocality loc[TPM_TIS_NUM_LOCALITIES]; + + qemu_irq irq; + uint32_t irq_num; + + TPMBackendCmd cmd; + + TPMBackend *be_driver; + TPMVersion be_tpm_version; + + size_t be_buffer_size; + + bool ppi_enabled; + TPMPPI ppi; +} TPMState; + +extern const VMStateDescription vmstate_locty; +extern const MemoryRegionOps tpm_tis_memory_ops; + +int tpm_tis_pre_save(TPMState *s); +void tpm_tis_reset(TPMState *s); +enum TPMVersion tpm_tis_get_tpm_version(TPMState *s); +void tpm_tis_request_completed(TPMState *s, int ret); + +#endif /* TPM_TPM_TIS_H */ diff --git a/hw/tpm/tpm_tis_common.c b/hw/tpm/tpm_tis_common.c new file mode 100644 index 000000000..e700d8218 --- /dev/null +++ b/hw/tpm/tpm_tis_common.c @@ -0,0 +1,867 @@ +/* + * tpm_tis_common.c - QEMU's TPM TIS interface emulator + * device agnostic functions + * + * Copyright (C) 2006,2010-2013 IBM Corporation + * + * Authors: + * Stefan Berger + * David Safford + * + * Xen 4 support: Andrease Niederl + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * Implementation of the TIS interface according to specs found at + * http://www.trustedcomputinggroup.org. This implementation currently + * supports version 1.3, 21 March 2013 + * In the developers menu choose the PC Client section then find the TIS + * specification. + * + * TPM TIS for TPM 2 implementation following TCG PC Client Platform + * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43 + */ +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/isa/isa.h" +#include "qapi/error.h" +#include "qemu/module.h" + +#include "hw/acpi/tpm.h" +#include "hw/pci/pci_ids.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "sysemu/tpm_backend.h" +#include "sysemu/tpm_util.h" +#include "tpm_ppi.h" +#include "trace.h" + +#include "tpm_tis.h" + +#define DEBUG_TIS 0 + +/* local prototypes */ + +static uint64_t tpm_tis_mmio_read(void *opaque, hwaddr addr, + unsigned size); + +/* utility functions */ + +static uint8_t tpm_tis_locality_from_addr(hwaddr addr) +{ + return (uint8_t)((addr >> TPM_TIS_LOCALITY_SHIFT) & 0x7); +} + + +/* + * Set the given flags in the STS register by clearing the register but + * preserving the SELFTEST_DONE and TPM_FAMILY_MASK flags and then setting + * the new flags. + * + * The SELFTEST_DONE flag is acquired from the backend that determines it by + * peeking into TPM commands. + * + * A VM suspend/resume will preserve the flag by storing it into the VM + * device state, but the backend will not remember it when QEMU is started + * again. Therefore, we cache the flag here. Once set, it will not be unset + * except by a reset. + */ +static void tpm_tis_sts_set(TPMLocality *l, uint32_t flags) +{ + l->sts &= TPM_TIS_STS_SELFTEST_DONE | TPM_TIS_STS_TPM_FAMILY_MASK; + l->sts |= flags; +} + +/* + * Send a request to the TPM. + */ +static void tpm_tis_tpm_send(TPMState *s, uint8_t locty) +{ + tpm_util_show_buffer(s->buffer, s->be_buffer_size, "To TPM"); + + /* + * rw_offset serves as length indicator for length of data; + * it's reset when the response comes back + */ + s->loc[locty].state = TPM_TIS_STATE_EXECUTION; + + s->cmd = (TPMBackendCmd) { + .locty = locty, + .in = s->buffer, + .in_len = s->rw_offset, + .out = s->buffer, + .out_len = s->be_buffer_size, + }; + + tpm_backend_deliver_request(s->be_driver, &s->cmd); +} + +/* raise an interrupt if allowed */ +static void tpm_tis_raise_irq(TPMState *s, uint8_t locty, uint32_t irqmask) +{ + if (!TPM_TIS_IS_VALID_LOCTY(locty)) { + return; + } + + if ((s->loc[locty].inte & TPM_TIS_INT_ENABLED) && + (s->loc[locty].inte & irqmask)) { + trace_tpm_tis_raise_irq(irqmask); + qemu_irq_raise(s->irq); + s->loc[locty].ints |= irqmask; + } +} + +static uint32_t tpm_tis_check_request_use_except(TPMState *s, uint8_t locty) +{ + uint8_t l; + + for (l = 0; l < TPM_TIS_NUM_LOCALITIES; l++) { + if (l == locty) { + continue; + } + if ((s->loc[l].access & TPM_TIS_ACCESS_REQUEST_USE)) { + return 1; + } + } + + return 0; +} + +static void tpm_tis_new_active_locality(TPMState *s, uint8_t new_active_locty) +{ + bool change = (s->active_locty != new_active_locty); + bool is_seize; + uint8_t mask; + + if (change && TPM_TIS_IS_VALID_LOCTY(s->active_locty)) { + is_seize = TPM_TIS_IS_VALID_LOCTY(new_active_locty) && + s->loc[new_active_locty].access & TPM_TIS_ACCESS_SEIZE; + + if (is_seize) { + mask = ~(TPM_TIS_ACCESS_ACTIVE_LOCALITY); + } else { + mask = ~(TPM_TIS_ACCESS_ACTIVE_LOCALITY| + TPM_TIS_ACCESS_REQUEST_USE); + } + /* reset flags on the old active locality */ + s->loc[s->active_locty].access &= mask; + + if (is_seize) { + s->loc[s->active_locty].access |= TPM_TIS_ACCESS_BEEN_SEIZED; + } + } + + s->active_locty = new_active_locty; + + trace_tpm_tis_new_active_locality(s->active_locty); + + if (TPM_TIS_IS_VALID_LOCTY(new_active_locty)) { + /* set flags on the new active locality */ + s->loc[new_active_locty].access |= TPM_TIS_ACCESS_ACTIVE_LOCALITY; + s->loc[new_active_locty].access &= ~(TPM_TIS_ACCESS_REQUEST_USE | + TPM_TIS_ACCESS_SEIZE); + } + + if (change) { + tpm_tis_raise_irq(s, s->active_locty, TPM_TIS_INT_LOCALITY_CHANGED); + } +} + +/* abort -- this function switches the locality */ +static void tpm_tis_abort(TPMState *s) +{ + s->rw_offset = 0; + + trace_tpm_tis_abort(s->next_locty); + + /* + * Need to react differently depending on who's aborting now and + * which locality will become active afterwards. + */ + if (s->aborting_locty == s->next_locty) { + s->loc[s->aborting_locty].state = TPM_TIS_STATE_READY; + tpm_tis_sts_set(&s->loc[s->aborting_locty], + TPM_TIS_STS_COMMAND_READY); + tpm_tis_raise_irq(s, s->aborting_locty, TPM_TIS_INT_COMMAND_READY); + } + + /* locality after abort is another one than the current one */ + tpm_tis_new_active_locality(s, s->next_locty); + + s->next_locty = TPM_TIS_NO_LOCALITY; + /* nobody's aborting a command anymore */ + s->aborting_locty = TPM_TIS_NO_LOCALITY; +} + +/* prepare aborting current command */ +static void tpm_tis_prep_abort(TPMState *s, uint8_t locty, uint8_t newlocty) +{ + uint8_t busy_locty; + + assert(TPM_TIS_IS_VALID_LOCTY(newlocty)); + + s->aborting_locty = locty; /* may also be TPM_TIS_NO_LOCALITY */ + s->next_locty = newlocty; /* locality after successful abort */ + + /* + * only abort a command using an interrupt if currently executing + * a command AND if there's a valid connection to the vTPM. + */ + for (busy_locty = 0; busy_locty < TPM_TIS_NUM_LOCALITIES; busy_locty++) { + if (s->loc[busy_locty].state == TPM_TIS_STATE_EXECUTION) { + /* + * request the backend to cancel. Some backends may not + * support it + */ + tpm_backend_cancel_cmd(s->be_driver); + return; + } + } + + tpm_tis_abort(s); +} + +/* + * Callback from the TPM to indicate that the response was received. + */ +void tpm_tis_request_completed(TPMState *s, int ret) +{ + uint8_t locty = s->cmd.locty; + uint8_t l; + + assert(TPM_TIS_IS_VALID_LOCTY(locty)); + + if (s->cmd.selftest_done) { + for (l = 0; l < TPM_TIS_NUM_LOCALITIES; l++) { + s->loc[l].sts |= TPM_TIS_STS_SELFTEST_DONE; + } + } + + /* FIXME: report error if ret != 0 */ + tpm_tis_sts_set(&s->loc[locty], + TPM_TIS_STS_VALID | TPM_TIS_STS_DATA_AVAILABLE); + s->loc[locty].state = TPM_TIS_STATE_COMPLETION; + s->rw_offset = 0; + + tpm_util_show_buffer(s->buffer, s->be_buffer_size, "From TPM"); + + if (TPM_TIS_IS_VALID_LOCTY(s->next_locty)) { + tpm_tis_abort(s); + } + + tpm_tis_raise_irq(s, locty, + TPM_TIS_INT_DATA_AVAILABLE | TPM_TIS_INT_STS_VALID); +} + +/* + * Read a byte of response data + */ +static uint32_t tpm_tis_data_read(TPMState *s, uint8_t locty) +{ + uint32_t ret = TPM_TIS_NO_DATA_BYTE; + uint16_t len; + + if ((s->loc[locty].sts & TPM_TIS_STS_DATA_AVAILABLE)) { + len = MIN(tpm_cmd_get_size(&s->buffer), + s->be_buffer_size); + + ret = s->buffer[s->rw_offset++]; + if (s->rw_offset >= len) { + /* got last byte */ + tpm_tis_sts_set(&s->loc[locty], TPM_TIS_STS_VALID); + tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID); + } + trace_tpm_tis_data_read(ret, s->rw_offset - 1); + } + + return ret; +} + +#ifdef DEBUG_TIS +static void tpm_tis_dump_state(TPMState *s, hwaddr addr) +{ + static const unsigned regs[] = { + TPM_TIS_REG_ACCESS, + TPM_TIS_REG_INT_ENABLE, + TPM_TIS_REG_INT_VECTOR, + TPM_TIS_REG_INT_STATUS, + TPM_TIS_REG_INTF_CAPABILITY, + TPM_TIS_REG_STS, + TPM_TIS_REG_DID_VID, + TPM_TIS_REG_RID, + 0xfff}; + int idx; + uint8_t locty = tpm_tis_locality_from_addr(addr); + hwaddr base = addr & ~0xfff; + + printf("tpm_tis: active locality : %d\n" + "tpm_tis: state of locality %d : %d\n" + "tpm_tis: register dump:\n", + s->active_locty, + locty, s->loc[locty].state); + + for (idx = 0; regs[idx] != 0xfff; idx++) { + printf("tpm_tis: 0x%04x : 0x%08x\n", regs[idx], + (int)tpm_tis_mmio_read(s, base + regs[idx], 4)); + } + + printf("tpm_tis: r/w offset : %d\n" + "tpm_tis: result buffer : ", + s->rw_offset); + for (idx = 0; + idx < MIN(tpm_cmd_get_size(&s->buffer), s->be_buffer_size); + idx++) { + printf("%c%02x%s", + s->rw_offset == idx ? '>' : ' ', + s->buffer[idx], + ((idx & 0xf) == 0xf) ? "\ntpm_tis: " : ""); + } + printf("\n"); +} +#endif + +/* + * Read a register of the TIS interface + * See specs pages 33-63 for description of the registers + */ +static uint64_t tpm_tis_mmio_read(void *opaque, hwaddr addr, + unsigned size) +{ + TPMState *s = opaque; + uint16_t offset = addr & 0xffc; + uint8_t shift = (addr & 0x3) * 8; + uint32_t val = 0xffffffff; + uint8_t locty = tpm_tis_locality_from_addr(addr); + uint32_t avail; + uint8_t v; + + if (tpm_backend_had_startup_error(s->be_driver)) { + return 0; + } + + switch (offset) { + case TPM_TIS_REG_ACCESS: + /* never show the SEIZE flag even though we use it internally */ + val = s->loc[locty].access & ~TPM_TIS_ACCESS_SEIZE; + /* the pending flag is always calculated */ + if (tpm_tis_check_request_use_except(s, locty)) { + val |= TPM_TIS_ACCESS_PENDING_REQUEST; + } + val |= !tpm_backend_get_tpm_established_flag(s->be_driver); + break; + case TPM_TIS_REG_INT_ENABLE: + val = s->loc[locty].inte; + break; + case TPM_TIS_REG_INT_VECTOR: + val = s->irq_num; + break; + case TPM_TIS_REG_INT_STATUS: + val = s->loc[locty].ints; + break; + case TPM_TIS_REG_INTF_CAPABILITY: + switch (s->be_tpm_version) { + case TPM_VERSION_UNSPEC: + val = 0; + break; + case TPM_VERSION_1_2: + val = TPM_TIS_CAPABILITIES_SUPPORTED1_3; + break; + case TPM_VERSION_2_0: + val = TPM_TIS_CAPABILITIES_SUPPORTED2_0; + break; + } + break; + case TPM_TIS_REG_STS: + if (s->active_locty == locty) { + if ((s->loc[locty].sts & TPM_TIS_STS_DATA_AVAILABLE)) { + val = TPM_TIS_BURST_COUNT( + MIN(tpm_cmd_get_size(&s->buffer), + s->be_buffer_size) + - s->rw_offset) | s->loc[locty].sts; + } else { + avail = s->be_buffer_size - s->rw_offset; + /* + * byte-sized reads should not return 0x00 for 0x100 + * available bytes. + */ + if (size == 1 && avail > 0xff) { + avail = 0xff; + } + val = TPM_TIS_BURST_COUNT(avail) | s->loc[locty].sts; + } + } + break; + case TPM_TIS_REG_DATA_FIFO: + case TPM_TIS_REG_DATA_XFIFO ... TPM_TIS_REG_DATA_XFIFO_END: + if (s->active_locty == locty) { + if (size > 4 - (addr & 0x3)) { + /* prevent access beyond FIFO */ + size = 4 - (addr & 0x3); + } + val = 0; + shift = 0; + while (size > 0) { + switch (s->loc[locty].state) { + case TPM_TIS_STATE_COMPLETION: + v = tpm_tis_data_read(s, locty); + break; + default: + v = TPM_TIS_NO_DATA_BYTE; + break; + } + val |= (v << shift); + shift += 8; + size--; + } + shift = 0; /* no more adjustments */ + } + break; + case TPM_TIS_REG_INTERFACE_ID: + val = s->loc[locty].iface_id; + break; + case TPM_TIS_REG_DID_VID: + val = (TPM_TIS_TPM_DID << 16) | TPM_TIS_TPM_VID; + break; + case TPM_TIS_REG_RID: + val = TPM_TIS_TPM_RID; + break; +#ifdef DEBUG_TIS + case TPM_TIS_REG_DEBUG: + tpm_tis_dump_state(s, addr); + break; +#endif + } + + if (shift) { + val >>= shift; + } + + trace_tpm_tis_mmio_read(size, addr, val); + + return val; +} + +/* + * Write a value to a register of the TIS interface + * See specs pages 33-63 for description of the registers + */ +static void tpm_tis_mmio_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + TPMState *s = opaque; + uint16_t off = addr & 0xffc; + uint8_t shift = (addr & 0x3) * 8; + uint8_t locty = tpm_tis_locality_from_addr(addr); + uint8_t active_locty, l; + int c, set_new_locty = 1; + uint16_t len; + uint32_t mask = (size == 1) ? 0xff : ((size == 2) ? 0xffff : ~0); + + trace_tpm_tis_mmio_write(size, addr, val); + + if (locty == 4) { + trace_tpm_tis_mmio_write_locty4(); + return; + } + + if (tpm_backend_had_startup_error(s->be_driver)) { + return; + } + + val &= mask; + + if (shift) { + val <<= shift; + mask <<= shift; + } + + mask ^= 0xffffffff; + + switch (off) { + case TPM_TIS_REG_ACCESS: + + if ((val & TPM_TIS_ACCESS_SEIZE)) { + val &= ~(TPM_TIS_ACCESS_REQUEST_USE | + TPM_TIS_ACCESS_ACTIVE_LOCALITY); + } + + active_locty = s->active_locty; + + if ((val & TPM_TIS_ACCESS_ACTIVE_LOCALITY)) { + /* give up locality if currently owned */ + if (s->active_locty == locty) { + trace_tpm_tis_mmio_write_release_locty(locty); + + uint8_t newlocty = TPM_TIS_NO_LOCALITY; + /* anybody wants the locality ? */ + for (c = TPM_TIS_NUM_LOCALITIES - 1; c >= 0; c--) { + if ((s->loc[c].access & TPM_TIS_ACCESS_REQUEST_USE)) { + trace_tpm_tis_mmio_write_locty_req_use(c); + newlocty = c; + break; + } + } + trace_tpm_tis_mmio_write_next_locty(newlocty); + + if (TPM_TIS_IS_VALID_LOCTY(newlocty)) { + set_new_locty = 0; + tpm_tis_prep_abort(s, locty, newlocty); + } else { + active_locty = TPM_TIS_NO_LOCALITY; + } + } else { + /* not currently the owner; clear a pending request */ + s->loc[locty].access &= ~TPM_TIS_ACCESS_REQUEST_USE; + } + } + + if ((val & TPM_TIS_ACCESS_BEEN_SEIZED)) { + s->loc[locty].access &= ~TPM_TIS_ACCESS_BEEN_SEIZED; + } + + if ((val & TPM_TIS_ACCESS_SEIZE)) { + /* + * allow seize if a locality is active and the requesting + * locality is higher than the one that's active + * OR + * allow seize for requesting locality if no locality is + * active + */ + while ((TPM_TIS_IS_VALID_LOCTY(s->active_locty) && + locty > s->active_locty) || + !TPM_TIS_IS_VALID_LOCTY(s->active_locty)) { + bool higher_seize = false; + + /* already a pending SEIZE ? */ + if ((s->loc[locty].access & TPM_TIS_ACCESS_SEIZE)) { + break; + } + + /* check for ongoing seize by a higher locality */ + for (l = locty + 1; l < TPM_TIS_NUM_LOCALITIES; l++) { + if ((s->loc[l].access & TPM_TIS_ACCESS_SEIZE)) { + higher_seize = true; + break; + } + } + + if (higher_seize) { + break; + } + + /* cancel any seize by a lower locality */ + for (l = 0; l < locty; l++) { + s->loc[l].access &= ~TPM_TIS_ACCESS_SEIZE; + } + + s->loc[locty].access |= TPM_TIS_ACCESS_SEIZE; + + trace_tpm_tis_mmio_write_locty_seized(locty, s->active_locty); + trace_tpm_tis_mmio_write_init_abort(); + + set_new_locty = 0; + tpm_tis_prep_abort(s, s->active_locty, locty); + break; + } + } + + if ((val & TPM_TIS_ACCESS_REQUEST_USE)) { + if (s->active_locty != locty) { + if (TPM_TIS_IS_VALID_LOCTY(s->active_locty)) { + s->loc[locty].access |= TPM_TIS_ACCESS_REQUEST_USE; + } else { + /* no locality active -> make this one active now */ + active_locty = locty; + } + } + } + + if (set_new_locty) { + tpm_tis_new_active_locality(s, active_locty); + } + + break; + case TPM_TIS_REG_INT_ENABLE: + if (s->active_locty != locty) { + break; + } + + s->loc[locty].inte &= mask; + s->loc[locty].inte |= (val & (TPM_TIS_INT_ENABLED | + TPM_TIS_INT_POLARITY_MASK | + TPM_TIS_INTERRUPTS_SUPPORTED)); + break; + case TPM_TIS_REG_INT_VECTOR: + /* hard wired -- ignore */ + break; + case TPM_TIS_REG_INT_STATUS: + if (s->active_locty != locty) { + break; + } + + /* clearing of interrupt flags */ + if (((val & TPM_TIS_INTERRUPTS_SUPPORTED)) && + (s->loc[locty].ints & TPM_TIS_INTERRUPTS_SUPPORTED)) { + s->loc[locty].ints &= ~val; + if (s->loc[locty].ints == 0) { + qemu_irq_lower(s->irq); + trace_tpm_tis_mmio_write_lowering_irq(); + } + } + s->loc[locty].ints &= ~(val & TPM_TIS_INTERRUPTS_SUPPORTED); + break; + case TPM_TIS_REG_STS: + if (s->active_locty != locty) { + break; + } + + if (s->be_tpm_version == TPM_VERSION_2_0) { + /* some flags that are only supported for TPM 2 */ + if (val & TPM_TIS_STS_COMMAND_CANCEL) { + if (s->loc[locty].state == TPM_TIS_STATE_EXECUTION) { + /* + * request the backend to cancel. Some backends may not + * support it + */ + tpm_backend_cancel_cmd(s->be_driver); + } + } + + if (val & TPM_TIS_STS_RESET_ESTABLISHMENT_BIT) { + if (locty == 3 || locty == 4) { + tpm_backend_reset_tpm_established_flag(s->be_driver, locty); + } + } + } + + val &= (TPM_TIS_STS_COMMAND_READY | TPM_TIS_STS_TPM_GO | + TPM_TIS_STS_RESPONSE_RETRY); + + if (val == TPM_TIS_STS_COMMAND_READY) { + switch (s->loc[locty].state) { + + case TPM_TIS_STATE_READY: + s->rw_offset = 0; + break; + + case TPM_TIS_STATE_IDLE: + tpm_tis_sts_set(&s->loc[locty], TPM_TIS_STS_COMMAND_READY); + s->loc[locty].state = TPM_TIS_STATE_READY; + tpm_tis_raise_irq(s, locty, TPM_TIS_INT_COMMAND_READY); + break; + + case TPM_TIS_STATE_EXECUTION: + case TPM_TIS_STATE_RECEPTION: + /* abort currently running command */ + trace_tpm_tis_mmio_write_init_abort(); + tpm_tis_prep_abort(s, locty, locty); + break; + + case TPM_TIS_STATE_COMPLETION: + s->rw_offset = 0; + /* shortcut to ready state with C/R set */ + s->loc[locty].state = TPM_TIS_STATE_READY; + if (!(s->loc[locty].sts & TPM_TIS_STS_COMMAND_READY)) { + tpm_tis_sts_set(&s->loc[locty], + TPM_TIS_STS_COMMAND_READY); + tpm_tis_raise_irq(s, locty, TPM_TIS_INT_COMMAND_READY); + } + s->loc[locty].sts &= ~(TPM_TIS_STS_DATA_AVAILABLE); + break; + + } + } else if (val == TPM_TIS_STS_TPM_GO) { + switch (s->loc[locty].state) { + case TPM_TIS_STATE_RECEPTION: + if ((s->loc[locty].sts & TPM_TIS_STS_EXPECT) == 0) { + tpm_tis_tpm_send(s, locty); + } + break; + default: + /* ignore */ + break; + } + } else if (val == TPM_TIS_STS_RESPONSE_RETRY) { + switch (s->loc[locty].state) { + case TPM_TIS_STATE_COMPLETION: + s->rw_offset = 0; + tpm_tis_sts_set(&s->loc[locty], + TPM_TIS_STS_VALID| + TPM_TIS_STS_DATA_AVAILABLE); + break; + default: + /* ignore */ + break; + } + } + break; + case TPM_TIS_REG_DATA_FIFO: + case TPM_TIS_REG_DATA_XFIFO ... TPM_TIS_REG_DATA_XFIFO_END: + /* data fifo */ + if (s->active_locty != locty) { + break; + } + + if (s->loc[locty].state == TPM_TIS_STATE_IDLE || + s->loc[locty].state == TPM_TIS_STATE_EXECUTION || + s->loc[locty].state == TPM_TIS_STATE_COMPLETION) { + /* drop the byte */ + } else { + trace_tpm_tis_mmio_write_data2send(val, size); + if (s->loc[locty].state == TPM_TIS_STATE_READY) { + s->loc[locty].state = TPM_TIS_STATE_RECEPTION; + tpm_tis_sts_set(&s->loc[locty], + TPM_TIS_STS_EXPECT | TPM_TIS_STS_VALID); + } + + val >>= shift; + if (size > 4 - (addr & 0x3)) { + /* prevent access beyond FIFO */ + size = 4 - (addr & 0x3); + } + + while ((s->loc[locty].sts & TPM_TIS_STS_EXPECT) && size > 0) { + if (s->rw_offset < s->be_buffer_size) { + s->buffer[s->rw_offset++] = + (uint8_t)val; + val >>= 8; + size--; + } else { + tpm_tis_sts_set(&s->loc[locty], TPM_TIS_STS_VALID); + } + } + + /* check for complete packet */ + if (s->rw_offset > 5 && + (s->loc[locty].sts & TPM_TIS_STS_EXPECT)) { + /* we have a packet length - see if we have all of it */ + bool need_irq = !(s->loc[locty].sts & TPM_TIS_STS_VALID); + + len = tpm_cmd_get_size(&s->buffer); + if (len > s->rw_offset) { + tpm_tis_sts_set(&s->loc[locty], + TPM_TIS_STS_EXPECT | TPM_TIS_STS_VALID); + } else { + /* packet complete */ + tpm_tis_sts_set(&s->loc[locty], TPM_TIS_STS_VALID); + } + if (need_irq) { + tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID); + } + } + } + break; + case TPM_TIS_REG_INTERFACE_ID: + if (val & TPM_TIS_IFACE_ID_INT_SEL_LOCK) { + for (l = 0; l < TPM_TIS_NUM_LOCALITIES; l++) { + s->loc[l].iface_id |= TPM_TIS_IFACE_ID_INT_SEL_LOCK; + } + } + break; + } +} + +const MemoryRegionOps tpm_tis_memory_ops = { + .read = tpm_tis_mmio_read, + .write = tpm_tis_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +/* + * Get the TPMVersion of the backend device being used + */ +enum TPMVersion tpm_tis_get_tpm_version(TPMState *s) +{ + if (tpm_backend_had_startup_error(s->be_driver)) { + return TPM_VERSION_UNSPEC; + } + + return tpm_backend_get_tpm_version(s->be_driver); +} + +/* + * This function is called when the machine starts, resets or due to + * S3 resume. + */ +void tpm_tis_reset(TPMState *s) +{ + int c; + + s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver); + s->be_buffer_size = MIN(tpm_backend_get_buffer_size(s->be_driver), + TPM_TIS_BUFFER_MAX); + + if (s->ppi_enabled) { + tpm_ppi_reset(&s->ppi); + } + tpm_backend_reset(s->be_driver); + + s->active_locty = TPM_TIS_NO_LOCALITY; + s->next_locty = TPM_TIS_NO_LOCALITY; + s->aborting_locty = TPM_TIS_NO_LOCALITY; + + for (c = 0; c < TPM_TIS_NUM_LOCALITIES; c++) { + s->loc[c].access = TPM_TIS_ACCESS_TPM_REG_VALID_STS; + switch (s->be_tpm_version) { + case TPM_VERSION_UNSPEC: + break; + case TPM_VERSION_1_2: + s->loc[c].sts = TPM_TIS_STS_TPM_FAMILY1_2; + s->loc[c].iface_id = TPM_TIS_IFACE_ID_SUPPORTED_FLAGS1_3; + break; + case TPM_VERSION_2_0: + s->loc[c].sts = TPM_TIS_STS_TPM_FAMILY2_0; + s->loc[c].iface_id = TPM_TIS_IFACE_ID_SUPPORTED_FLAGS2_0; + break; + } + s->loc[c].inte = TPM_TIS_INT_POLARITY_LOW_LEVEL; + s->loc[c].ints = 0; + s->loc[c].state = TPM_TIS_STATE_IDLE; + + s->rw_offset = 0; + } + + if (tpm_backend_startup_tpm(s->be_driver, s->be_buffer_size) < 0) { + exit(1); + } +} + +/* persistent state handling */ + +int tpm_tis_pre_save(TPMState *s) +{ + uint8_t locty = s->active_locty; + + trace_tpm_tis_pre_save(locty, s->rw_offset); + + if (DEBUG_TIS) { + tpm_tis_dump_state(s, 0); + } + + /* + * Synchronize with backend completion. + */ + tpm_backend_finish_sync(s->be_driver); + + return 0; +} + +const VMStateDescription vmstate_locty = { + .name = "tpm-tis/locty", + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(state, TPMLocality), + VMSTATE_UINT32(inte, TPMLocality), + VMSTATE_UINT32(ints, TPMLocality), + VMSTATE_UINT8(access, TPMLocality), + VMSTATE_UINT32(sts, TPMLocality), + VMSTATE_UINT32(iface_id, TPMLocality), + VMSTATE_END_OF_LIST(), + } +}; + diff --git a/hw/tpm/tpm_tis_isa.c b/hw/tpm/tpm_tis_isa.c new file mode 100644 index 000000000..10d8a14f1 --- /dev/null +++ b/hw/tpm/tpm_tis_isa.c @@ -0,0 +1,173 @@ +/* + * tpm_tis_isa.c - QEMU's TPM TIS ISA Device + * + * Copyright (C) 2006,2010-2013 IBM Corporation + * + * Authors: + * Stefan Berger + * David Safford + * + * Xen 4 support: Andrease Niederl + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * Implementation of the TIS interface according to specs found at + * http://www.trustedcomputinggroup.org. This implementation currently + * supports version 1.3, 21 March 2013 + * In the developers menu choose the PC Client section then find the TIS + * specification. + * + * TPM TIS for TPM 2 implementation following TCG PC Client Platform + * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43 + */ + +#include "qemu/osdep.h" +#include "hw/isa/isa.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/acpi/tpm.h" +#include "tpm_prop.h" +#include "tpm_tis.h" +#include "qom/object.h" + +struct TPMStateISA { + /*< private >*/ + ISADevice parent_obj; + + /*< public >*/ + TPMState state; /* not a QOM object */ +}; + +OBJECT_DECLARE_SIMPLE_TYPE(TPMStateISA, TPM_TIS_ISA) + +static int tpm_tis_pre_save_isa(void *opaque) +{ + TPMStateISA *isadev = opaque; + + return tpm_tis_pre_save(&isadev->state); +} + +static const VMStateDescription vmstate_tpm_tis_isa = { + .name = "tpm-tis", + .version_id = 0, + .pre_save = tpm_tis_pre_save_isa, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(state.buffer, TPMStateISA), + VMSTATE_UINT16(state.rw_offset, TPMStateISA), + VMSTATE_UINT8(state.active_locty, TPMStateISA), + VMSTATE_UINT8(state.aborting_locty, TPMStateISA), + VMSTATE_UINT8(state.next_locty, TPMStateISA), + + VMSTATE_STRUCT_ARRAY(state.loc, TPMStateISA, TPM_TIS_NUM_LOCALITIES, 0, + vmstate_locty, TPMLocality), + + VMSTATE_END_OF_LIST() + } +}; + +static void tpm_tis_isa_request_completed(TPMIf *ti, int ret) +{ + TPMStateISA *isadev = TPM_TIS_ISA(ti); + TPMState *s = &isadev->state; + + tpm_tis_request_completed(s, ret); +} + +static enum TPMVersion tpm_tis_isa_get_tpm_version(TPMIf *ti) +{ + TPMStateISA *isadev = TPM_TIS_ISA(ti); + TPMState *s = &isadev->state; + + return tpm_tis_get_tpm_version(s); +} + +static void tpm_tis_isa_reset(DeviceState *dev) +{ + TPMStateISA *isadev = TPM_TIS_ISA(dev); + TPMState *s = &isadev->state; + + return tpm_tis_reset(s); +} + +static Property tpm_tis_isa_properties[] = { + DEFINE_PROP_UINT32("irq", TPMStateISA, state.irq_num, TPM_TIS_IRQ), + DEFINE_PROP_TPMBE("tpmdev", TPMStateISA, state.be_driver), + DEFINE_PROP_BOOL("ppi", TPMStateISA, state.ppi_enabled, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void tpm_tis_isa_initfn(Object *obj) +{ + TPMStateISA *isadev = TPM_TIS_ISA(obj); + TPMState *s = &isadev->state; + + memory_region_init_io(&s->mmio, obj, &tpm_tis_memory_ops, + s, "tpm-tis-mmio", + TPM_TIS_NUM_LOCALITIES << TPM_TIS_LOCALITY_SHIFT); +} + +static void tpm_tis_isa_realizefn(DeviceState *dev, Error **errp) +{ + TPMStateISA *isadev = TPM_TIS_ISA(dev); + TPMState *s = &isadev->state; + + if (!tpm_find()) { + error_setg(errp, "at most one TPM device is permitted"); + return; + } + + if (!s->be_driver) { + error_setg(errp, "'tpmdev' property is required"); + return; + } + if (s->irq_num > 15) { + error_setg(errp, "IRQ %d is outside valid range of 0 to 15", + s->irq_num); + return; + } + + isa_init_irq(ISA_DEVICE(dev), &s->irq, s->irq_num); + + memory_region_add_subregion(isa_address_space(ISA_DEVICE(dev)), + TPM_TIS_ADDR_BASE, &s->mmio); + + if (s->ppi_enabled) { + tpm_ppi_init(&s->ppi, isa_address_space(ISA_DEVICE(dev)), + TPM_PPI_ADDR_BASE, OBJECT(dev)); + } +} + +static void tpm_tis_isa_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + TPMIfClass *tc = TPM_IF_CLASS(klass); + + device_class_set_props(dc, tpm_tis_isa_properties); + dc->vmsd = &vmstate_tpm_tis_isa; + tc->model = TPM_MODEL_TPM_TIS; + dc->realize = tpm_tis_isa_realizefn; + dc->reset = tpm_tis_isa_reset; + tc->request_completed = tpm_tis_isa_request_completed; + tc->get_version = tpm_tis_isa_get_tpm_version; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo tpm_tis_isa_info = { + .name = TYPE_TPM_TIS_ISA, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(TPMStateISA), + .instance_init = tpm_tis_isa_initfn, + .class_init = tpm_tis_isa_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_TPM_IF }, + { } + } +}; + +static void tpm_tis_isa_register(void) +{ + type_register_static(&tpm_tis_isa_info); +} + +type_init(tpm_tis_isa_register) diff --git a/hw/tpm/tpm_tis_sysbus.c b/hw/tpm/tpm_tis_sysbus.c new file mode 100644 index 000000000..45e63efd6 --- /dev/null +++ b/hw/tpm/tpm_tis_sysbus.c @@ -0,0 +1,162 @@ +/* + * tpm_tis_sysbus.c - QEMU's TPM TIS SYSBUS Device + * + * Copyright (C) 2006,2010-2013 IBM Corporation + * + * Authors: + * Stefan Berger + * David Safford + * + * Xen 4 support: Andrease Niederl + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * Implementation of the TIS interface according to specs found at + * http://www.trustedcomputinggroup.org. This implementation currently + * supports version 1.3, 21 March 2013 + * In the developers menu choose the PC Client section then find the TIS + * specification. + * + * TPM TIS for TPM 2 implementation following TCG PC Client Platform + * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43 + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/acpi/tpm.h" +#include "tpm_prop.h" +#include "hw/sysbus.h" +#include "tpm_tis.h" +#include "qom/object.h" + +struct TPMStateSysBus { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + TPMState state; /* not a QOM object */ +}; + +OBJECT_DECLARE_SIMPLE_TYPE(TPMStateSysBus, TPM_TIS_SYSBUS) + +static int tpm_tis_pre_save_sysbus(void *opaque) +{ + TPMStateSysBus *sbdev = opaque; + + return tpm_tis_pre_save(&sbdev->state); +} + +static const VMStateDescription vmstate_tpm_tis_sysbus = { + .name = "tpm-tis", + .version_id = 0, + .pre_save = tpm_tis_pre_save_sysbus, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(state.buffer, TPMStateSysBus), + VMSTATE_UINT16(state.rw_offset, TPMStateSysBus), + VMSTATE_UINT8(state.active_locty, TPMStateSysBus), + VMSTATE_UINT8(state.aborting_locty, TPMStateSysBus), + VMSTATE_UINT8(state.next_locty, TPMStateSysBus), + + VMSTATE_STRUCT_ARRAY(state.loc, TPMStateSysBus, TPM_TIS_NUM_LOCALITIES, + 0, vmstate_locty, TPMLocality), + + VMSTATE_END_OF_LIST() + } +}; + +static void tpm_tis_sysbus_request_completed(TPMIf *ti, int ret) +{ + TPMStateSysBus *sbdev = TPM_TIS_SYSBUS(ti); + TPMState *s = &sbdev->state; + + tpm_tis_request_completed(s, ret); +} + +static enum TPMVersion tpm_tis_sysbus_get_tpm_version(TPMIf *ti) +{ + TPMStateSysBus *sbdev = TPM_TIS_SYSBUS(ti); + TPMState *s = &sbdev->state; + + return tpm_tis_get_tpm_version(s); +} + +static void tpm_tis_sysbus_reset(DeviceState *dev) +{ + TPMStateSysBus *sbdev = TPM_TIS_SYSBUS(dev); + TPMState *s = &sbdev->state; + + return tpm_tis_reset(s); +} + +static Property tpm_tis_sysbus_properties[] = { + DEFINE_PROP_UINT32("irq", TPMStateSysBus, state.irq_num, TPM_TIS_IRQ), + DEFINE_PROP_TPMBE("tpmdev", TPMStateSysBus, state.be_driver), + DEFINE_PROP_BOOL("ppi", TPMStateSysBus, state.ppi_enabled, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void tpm_tis_sysbus_initfn(Object *obj) +{ + TPMStateSysBus *sbdev = TPM_TIS_SYSBUS(obj); + TPMState *s = &sbdev->state; + + memory_region_init_io(&s->mmio, obj, &tpm_tis_memory_ops, + s, "tpm-tis-mmio", + TPM_TIS_NUM_LOCALITIES << TPM_TIS_LOCALITY_SHIFT); + + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq); +} + +static void tpm_tis_sysbus_realizefn(DeviceState *dev, Error **errp) +{ + TPMStateSysBus *sbdev = TPM_TIS_SYSBUS(dev); + TPMState *s = &sbdev->state; + + if (!tpm_find()) { + error_setg(errp, "at most one TPM device is permitted"); + return; + } + + if (!s->be_driver) { + error_setg(errp, "'tpmdev' property is required"); + return; + } +} + +static void tpm_tis_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + TPMIfClass *tc = TPM_IF_CLASS(klass); + + device_class_set_props(dc, tpm_tis_sysbus_properties); + dc->vmsd = &vmstate_tpm_tis_sysbus; + tc->model = TPM_MODEL_TPM_TIS; + dc->realize = tpm_tis_sysbus_realizefn; + dc->user_creatable = true; + dc->reset = tpm_tis_sysbus_reset; + tc->request_completed = tpm_tis_sysbus_request_completed; + tc->get_version = tpm_tis_sysbus_get_tpm_version; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo tpm_tis_sysbus_info = { + .name = TYPE_TPM_TIS_SYSBUS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(TPMStateSysBus), + .instance_init = tpm_tis_sysbus_initfn, + .class_init = tpm_tis_sysbus_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_TPM_IF }, + { } + } +}; + +static void tpm_tis_sysbus_register(void) +{ + type_register_static(&tpm_tis_sysbus_info); +} + +type_init(tpm_tis_sysbus_register) diff --git a/hw/tpm/trace-events b/hw/tpm/trace-events new file mode 100644 index 000000000..f17110458 --- /dev/null +++ b/hw/tpm/trace-events @@ -0,0 +1,38 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# tpm_crb.c +tpm_crb_mmio_read(uint64_t addr, unsigned size, uint32_t val) "CRB read 0x%016" PRIx64 " len:%u val: 0x%" PRIx32 +tpm_crb_mmio_write(uint64_t addr, unsigned size, uint32_t val) "CRB write 0x%016" PRIx64 " len:%u val: 0x%" PRIx32 + +# tpm_tis_common.c +tpm_tis_raise_irq(uint32_t irqmask) "Raising IRQ for flag 0x%08x" +tpm_tis_new_active_locality(uint8_t locty) "Active locality is now %d" +tpm_tis_abort(uint8_t locty) "New active locality is %d" +tpm_tis_data_read(uint32_t value, uint32_t off) "byte 0x%02x [%d]" +tpm_tis_mmio_read(unsigned size, uint32_t addr, uint32_t val) " read.%u(0x%08x) = 0x%08x" +tpm_tis_mmio_write(unsigned size, uint32_t addr, uint32_t val) "write.%u(0x%08x) = 0x%08x" +tpm_tis_mmio_write_locty4(void) "Access to locality 4 only allowed from hardware" +tpm_tis_mmio_write_release_locty(uint8_t locty) "Releasing locality %d" +tpm_tis_mmio_write_locty_req_use(uint8_t locty) "Locality %d requests use" +tpm_tis_mmio_write_next_locty(uint8_t locty) "Next active locality is %d" +tpm_tis_mmio_write_locty_seized(uint8_t locty, uint8_t active) "Locality %d seized from locality %d" +tpm_tis_mmio_write_init_abort(void) "Initiating abort" +tpm_tis_mmio_write_lowering_irq(void) "Lowering IRQ" +tpm_tis_mmio_write_data2send(uint32_t value, unsigned size) "Data to send to TPM: 0x%08x (size=%d)" +tpm_tis_pre_save(uint8_t locty, uint32_t rw_offset) "locty: %d, rw_offset = %u" + +# tpm_ppi.c +tpm_ppi_memset(uint8_t *ptr, size_t size) "memset: %p %zu" + +# tpm_spapr.c +tpm_spapr_do_crq(uint8_t raw1, uint8_t raw2) "1st 2 bytes in CRQ: 0x%02x 0x%02x" +tpm_spapr_do_crq_crq_result(void) "SPAPR_VTPM_INIT_CRQ_RESULT" +tpm_spapr_do_crq_crq_complete_result(void) "SPAPR_VTPM_INIT_CRQ_COMP_RESULT" +tpm_spapr_do_crq_tpm_command(void) "got TPM command payload" +tpm_spapr_do_crq_tpm_get_rtce_buffer_size(size_t buffersize) "response: buffer size is %zu" +tpm_spapr_do_crq_get_version(uint32_t version) "response: version %u" +tpm_spapr_do_crq_prepare_to_suspend(void) "response: preparing to suspend" +tpm_spapr_do_crq_unknown_msg_type(uint8_t type) "Unknown message type 0x%02x" +tpm_spapr_do_crq_unknown_crq(uint8_t raw1, uint8_t raw2) "unknown CRQ 0x%02x 0x%02x ..." +tpm_spapr_post_load(void) "Delivering TPM response after resume" +tpm_spapr_caught_response(uint32_t v) "Caught response to deliver after resume: %u bytes" diff --git a/hw/tpm/trace.h b/hw/tpm/trace.h new file mode 100644 index 000000000..9827c128a --- /dev/null +++ b/hw/tpm/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_tpm.h" diff --git a/hw/tricore/Kconfig b/hw/tricore/Kconfig new file mode 100644 index 000000000..33c1e852c --- /dev/null +++ b/hw/tricore/Kconfig @@ -0,0 +1,9 @@ +config TRICORE_TESTBOARD + bool + +config TRIBOARD + bool + select TC27X_SOC + +config TC27X_SOC + bool diff --git a/hw/tricore/meson.build b/hw/tricore/meson.build new file mode 100644 index 000000000..7e3585daf --- /dev/null +++ b/hw/tricore/meson.build @@ -0,0 +1,7 @@ +tricore_ss = ss.source_set() +tricore_ss.add(when: 'CONFIG_TRICORE_TESTBOARD', if_true: files('tricore_testboard.c')) +tricore_ss.add(when: 'CONFIG_TRICORE_TESTBOARD', if_true: files('tricore_testdevice.c')) +tricore_ss.add(when: 'CONFIG_TRIBOARD', if_true: files('triboard.c')) +tricore_ss.add(when: 'CONFIG_TC27X_SOC', if_true: files('tc27x_soc.c')) + +hw_arch += {'tricore': tricore_ss} diff --git a/hw/tricore/tc27x_soc.c b/hw/tricore/tc27x_soc.c new file mode 100644 index 000000000..ecd92717b --- /dev/null +++ b/hw/tricore/tc27x_soc.c @@ -0,0 +1,242 @@ +/* + * Infineon tc27x SoC System emulation. + * + * Copyright (c) 2020 Andreas Konopik + * Copyright (c) 2020 David Brenken + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/sysbus.h" +#include "hw/loader.h" +#include "qemu/units.h" +#include "hw/misc/unimp.h" + +#include "hw/tricore/tc27x_soc.h" +#include "hw/tricore/triboard.h" + +const MemmapEntry tc27x_soc_memmap[] = { + [TC27XD_DSPR2] = { 0x50000000, 120 * KiB }, + [TC27XD_DCACHE2] = { 0x5001E000, 8 * KiB }, + [TC27XD_DTAG2] = { 0x500C0000, 0xC00 }, + [TC27XD_PSPR2] = { 0x50100000, 32 * KiB }, + [TC27XD_PCACHE2] = { 0x50108000, 16 * KiB }, + [TC27XD_PTAG2] = { 0x501C0000, 0x1800 }, + [TC27XD_DSPR1] = { 0x60000000, 120 * KiB }, + [TC27XD_DCACHE1] = { 0x6001E000, 8 * KiB }, + [TC27XD_DTAG1] = { 0x600C0000, 0xC00 }, + [TC27XD_PSPR1] = { 0x60100000, 32 * KiB }, + [TC27XD_PCACHE1] = { 0x60108000, 16 * KiB }, + [TC27XD_PTAG1] = { 0x601C0000, 0x1800 }, + [TC27XD_DSPR0] = { 0x70000000, 112 * KiB }, + [TC27XD_PSPR0] = { 0x70100000, 24 * KiB }, + [TC27XD_PCACHE0] = { 0x70106000, 8 * KiB }, + [TC27XD_PTAG0] = { 0x701C0000, 0xC00 }, + [TC27XD_PFLASH0_C] = { 0x80000000, 2 * MiB }, + [TC27XD_PFLASH1_C] = { 0x80200000, 2 * MiB }, + [TC27XD_OLDA_C] = { 0x8FE70000, 32 * KiB }, + [TC27XD_BROM_C] = { 0x8FFF8000, 32 * KiB }, + [TC27XD_LMURAM_C] = { 0x90000000, 32 * KiB }, + [TC27XD_EMEM_C] = { 0x9F000000, 1 * MiB }, + [TC27XD_PFLASH0_U] = { 0xA0000000, 0x0 }, + [TC27XD_PFLASH1_U] = { 0xA0200000, 0x0 }, + [TC27XD_DFLASH0] = { 0xAF000000, 1 * MiB + 16 * KiB }, + [TC27XD_DFLASH1] = { 0xAF110000, 64 * KiB }, + [TC27XD_OLDA_U] = { 0xAFE70000, 0x0 }, + [TC27XD_BROM_U] = { 0xAFFF8000, 0x0 }, + [TC27XD_LMURAM_U] = { 0xB0000000, 0x0 }, + [TC27XD_EMEM_U] = { 0xBF000000, 0x0 }, + [TC27XD_PSPRX] = { 0xC0000000, 0x0 }, + [TC27XD_DSPRX] = { 0xD0000000, 0x0 }, +}; + +/* + * Initialize the auxiliary ROM region @mr and map it into + * the memory map at @base. + */ +static void make_rom(MemoryRegion *mr, const char *name, + hwaddr base, hwaddr size) +{ + memory_region_init_rom(mr, NULL, name, size, &error_fatal); + memory_region_add_subregion(get_system_memory(), base, mr); +} + +/* + * Initialize the auxiliary RAM region @mr and map it into + * the memory map at @base. + */ +static void make_ram(MemoryRegion *mr, const char *name, + hwaddr base, hwaddr size) +{ + memory_region_init_ram(mr, NULL, name, size, &error_fatal); + memory_region_add_subregion(get_system_memory(), base, mr); +} + +/* + * Create an alias of an entire original MemoryRegion @orig + * located at @base in the memory map. + */ +static void make_alias(MemoryRegion *mr, const char *name, + MemoryRegion *orig, hwaddr base) +{ + memory_region_init_alias(mr, NULL, name, orig, 0, + memory_region_size(orig)); + memory_region_add_subregion(get_system_memory(), base, mr); +} + +static void tc27x_soc_init_memory_mapping(DeviceState *dev_soc) +{ + TC27XSoCState *s = TC27X_SOC(dev_soc); + TC27XSoCClass *sc = TC27X_SOC_GET_CLASS(s); + + make_ram(&s->cpu0mem.dspr, "CPU0.DSPR", + sc->memmap[TC27XD_DSPR0].base, sc->memmap[TC27XD_DSPR0].size); + make_ram(&s->cpu0mem.pspr, "CPU0.PSPR", + sc->memmap[TC27XD_PSPR0].base, sc->memmap[TC27XD_PSPR0].size); + make_ram(&s->cpu1mem.dspr, "CPU1.DSPR", + sc->memmap[TC27XD_DSPR1].base, sc->memmap[TC27XD_DSPR1].size); + make_ram(&s->cpu1mem.pspr, "CPU1.PSPR", + sc->memmap[TC27XD_PSPR1].base, sc->memmap[TC27XD_PSPR1].size); + make_ram(&s->cpu2mem.dspr, "CPU2.DSPR", + sc->memmap[TC27XD_DSPR2].base, sc->memmap[TC27XD_DSPR2].size); + make_ram(&s->cpu2mem.pspr, "CPU2.PSPR", + sc->memmap[TC27XD_PSPR2].base, sc->memmap[TC27XD_PSPR2].size); + + /* TODO: Control Cache mapping with Memory Test Unit (MTU) */ + make_ram(&s->cpu2mem.dcache, "CPU2.DCACHE", + sc->memmap[TC27XD_DCACHE2].base, sc->memmap[TC27XD_DCACHE2].size); + make_ram(&s->cpu2mem.dtag, "CPU2.DTAG", + sc->memmap[TC27XD_DTAG2].base, sc->memmap[TC27XD_DTAG2].size); + make_ram(&s->cpu2mem.pcache, "CPU2.PCACHE", + sc->memmap[TC27XD_PCACHE2].base, sc->memmap[TC27XD_PCACHE2].size); + make_ram(&s->cpu2mem.ptag, "CPU2.PTAG", + sc->memmap[TC27XD_PTAG2].base, sc->memmap[TC27XD_PTAG2].size); + + make_ram(&s->cpu1mem.dcache, "CPU1.DCACHE", + sc->memmap[TC27XD_DCACHE1].base, sc->memmap[TC27XD_DCACHE1].size); + make_ram(&s->cpu1mem.dtag, "CPU1.DTAG", + sc->memmap[TC27XD_DTAG1].base, sc->memmap[TC27XD_DTAG1].size); + make_ram(&s->cpu1mem.pcache, "CPU1.PCACHE", + sc->memmap[TC27XD_PCACHE1].base, sc->memmap[TC27XD_PCACHE1].size); + make_ram(&s->cpu1mem.ptag, "CPU1.PTAG", + sc->memmap[TC27XD_PTAG1].base, sc->memmap[TC27XD_PTAG1].size); + + make_ram(&s->cpu0mem.pcache, "CPU0.PCACHE", + sc->memmap[TC27XD_PCACHE0].base, sc->memmap[TC27XD_PCACHE0].size); + make_ram(&s->cpu0mem.ptag, "CPU0.PTAG", + sc->memmap[TC27XD_PTAG0].base, sc->memmap[TC27XD_PTAG0].size); + + /* + * TriCore QEMU executes CPU0 only, thus it is sufficient to map + * LOCAL.PSPR/LOCAL.DSPR exclusively onto PSPR0/DSPR0. + */ + make_alias(&s->psprX, "LOCAL.PSPR", &s->cpu0mem.pspr, + sc->memmap[TC27XD_PSPRX].base); + make_alias(&s->dsprX, "LOCAL.DSPR", &s->cpu0mem.dspr, + sc->memmap[TC27XD_DSPRX].base); + + make_ram(&s->flashmem.pflash0_c, "PF0", + sc->memmap[TC27XD_PFLASH0_C].base, sc->memmap[TC27XD_PFLASH0_C].size); + make_ram(&s->flashmem.pflash1_c, "PF1", + sc->memmap[TC27XD_PFLASH1_C].base, sc->memmap[TC27XD_PFLASH1_C].size); + make_ram(&s->flashmem.dflash0, "DF0", + sc->memmap[TC27XD_DFLASH0].base, sc->memmap[TC27XD_DFLASH0].size); + make_ram(&s->flashmem.dflash1, "DF1", + sc->memmap[TC27XD_DFLASH1].base, sc->memmap[TC27XD_DFLASH1].size); + make_ram(&s->flashmem.olda_c, "OLDA", + sc->memmap[TC27XD_OLDA_C].base, sc->memmap[TC27XD_OLDA_C].size); + make_rom(&s->flashmem.brom_c, "BROM", + sc->memmap[TC27XD_BROM_C].base, sc->memmap[TC27XD_BROM_C].size); + make_ram(&s->flashmem.lmuram_c, "LMURAM", + sc->memmap[TC27XD_LMURAM_C].base, sc->memmap[TC27XD_LMURAM_C].size); + make_ram(&s->flashmem.emem_c, "EMEM", + sc->memmap[TC27XD_EMEM_C].base, sc->memmap[TC27XD_EMEM_C].size); + + make_alias(&s->flashmem.pflash0_u, "PF0.U", &s->flashmem.pflash0_c, + sc->memmap[TC27XD_PFLASH0_U].base); + make_alias(&s->flashmem.pflash1_u, "PF1.U", &s->flashmem.pflash1_c, + sc->memmap[TC27XD_PFLASH1_U].base); + make_alias(&s->flashmem.olda_u, "OLDA.U", &s->flashmem.olda_c, + sc->memmap[TC27XD_OLDA_U].base); + make_alias(&s->flashmem.brom_u, "BROM.U", &s->flashmem.brom_c, + sc->memmap[TC27XD_BROM_U].base); + make_alias(&s->flashmem.lmuram_u, "LMURAM.U", &s->flashmem.lmuram_c, + sc->memmap[TC27XD_LMURAM_U].base); + make_alias(&s->flashmem.emem_u, "EMEM.U", &s->flashmem.emem_c, + sc->memmap[TC27XD_EMEM_U].base); +} + +static void tc27x_soc_realize(DeviceState *dev_soc, Error **errp) +{ + TC27XSoCState *s = TC27X_SOC(dev_soc); + Error *err = NULL; + + qdev_realize(DEVICE(&s->cpu), NULL, &err); + if (err) { + error_propagate(errp, err); + return; + } + + tc27x_soc_init_memory_mapping(dev_soc); +} + +static void tc27x_soc_init(Object *obj) +{ + TC27XSoCState *s = TC27X_SOC(obj); + TC27XSoCClass *sc = TC27X_SOC_GET_CLASS(s); + + object_initialize_child(obj, "tc27x", &s->cpu, sc->cpu_type); +} + +static Property tc27x_soc_properties[] = { + DEFINE_PROP_END_OF_LIST(), +}; + +static void tc27x_soc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = tc27x_soc_realize; + device_class_set_props(dc, tc27x_soc_properties); +} + +static void tc277d_soc_class_init(ObjectClass *oc, void *data) +{ + TC27XSoCClass *sc = TC27X_SOC_CLASS(oc); + + sc->name = "tc277d-soc"; + sc->cpu_type = TRICORE_CPU_TYPE_NAME("tc27x"); + sc->memmap = tc27x_soc_memmap; + sc->num_cpus = 1; +} + +static const TypeInfo tc27x_soc_types[] = { + { + .name = "tc277d-soc", + .parent = TYPE_TC27X_SOC, + .class_init = tc277d_soc_class_init, + }, { + .name = TYPE_TC27X_SOC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(TC27XSoCState), + .instance_init = tc27x_soc_init, + .class_size = sizeof(TC27XSoCClass), + .class_init = tc27x_soc_class_init, + .abstract = true, + }, +}; + +DEFINE_TYPES(tc27x_soc_types) diff --git a/hw/tricore/triboard.c b/hw/tricore/triboard.c new file mode 100644 index 000000000..4dba0259c --- /dev/null +++ b/hw/tricore/triboard.c @@ -0,0 +1,95 @@ +/* + * Infineon TriBoard System emulation. + * + * Copyright (c) 2020 Andreas Konopik + * Copyright (c) 2020 David Brenken + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "net/net.h" +#include "hw/loader.h" +#include "elf.h" +#include "hw/tricore/tricore.h" +#include "qemu/error-report.h" + +#include "hw/tricore/triboard.h" +#include "hw/tricore/tc27x_soc.h" + +static void tricore_load_kernel(const char *kernel_filename) +{ + uint64_t entry; + long kernel_size; + TriCoreCPU *cpu; + CPUTriCoreState *env; + + kernel_size = load_elf(kernel_filename, NULL, + NULL, NULL, &entry, NULL, + NULL, NULL, 0, + EM_TRICORE, 1, 0); + if (kernel_size <= 0) { + error_report("no kernel file '%s'", kernel_filename); + exit(1); + } + cpu = TRICORE_CPU(first_cpu); + env = &cpu->env; + env->PC = entry; +} + + +static void triboard_machine_init(MachineState *machine) +{ + TriBoardMachineState *ms = TRIBOARD_MACHINE(machine); + TriBoardMachineClass *amc = TRIBOARD_MACHINE_GET_CLASS(machine); + + object_initialize_child(OBJECT(machine), "soc", &ms->tc27x_soc, + amc->soc_name); + sysbus_realize(SYS_BUS_DEVICE(&ms->tc27x_soc), &error_fatal); + + if (machine->kernel_filename) { + tricore_load_kernel(machine->kernel_filename); + } +} + +static void triboard_machine_tc277d_class_init(ObjectClass *oc, + void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + TriBoardMachineClass *amc = TRIBOARD_MACHINE_CLASS(oc); + + mc->init = triboard_machine_init; + mc->desc = "Infineon AURIX TriBoard TC277 (D-Step)"; + mc->max_cpus = 1; + amc->soc_name = "tc277d-soc"; +}; + +static const TypeInfo triboard_machine_types[] = { + { + .name = MACHINE_TYPE_NAME("KIT_AURIX_TC277_TRB"), + .parent = TYPE_TRIBOARD_MACHINE, + .class_init = triboard_machine_tc277d_class_init, + }, { + .name = TYPE_TRIBOARD_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(TriBoardMachineState), + .class_size = sizeof(TriBoardMachineClass), + .abstract = true, + }, +}; + +DEFINE_TYPES(triboard_machine_types) diff --git a/hw/tricore/tricore_testboard.c b/hw/tricore/tricore_testboard.c new file mode 100644 index 000000000..b6810e3be --- /dev/null +++ b/hw/tricore/tricore_testboard.c @@ -0,0 +1,118 @@ +/* + * TriCore Baseboard System emulation. + * + * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "cpu.h" +#include "net/net.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "elf.h" +#include "hw/tricore/tricore.h" +#include "hw/tricore/tricore_testdevice.h" +#include "qemu/error-report.h" + + +/* Board init. */ + +static struct tricore_boot_info tricoretb_binfo; + +static void tricore_load_kernel(CPUTriCoreState *env) +{ + uint64_t entry; + long kernel_size; + + kernel_size = load_elf(tricoretb_binfo.kernel_filename, NULL, + NULL, NULL, &entry, NULL, + NULL, NULL, 0, + EM_TRICORE, 1, 0); + if (kernel_size <= 0) { + error_report("no kernel file '%s'", + tricoretb_binfo.kernel_filename); + exit(1); + } + env->PC = entry; + +} + +static void tricore_testboard_init(MachineState *machine, int board_id) +{ + TriCoreCPU *cpu; + CPUTriCoreState *env; + TriCoreTestDeviceState *test_dev; + + MemoryRegion *sysmem = get_system_memory(); + MemoryRegion *ext_cram = g_new(MemoryRegion, 1); + MemoryRegion *ext_dram = g_new(MemoryRegion, 1); + MemoryRegion *int_cram = g_new(MemoryRegion, 1); + MemoryRegion *int_dram = g_new(MemoryRegion, 1); + MemoryRegion *pcp_data = g_new(MemoryRegion, 1); + MemoryRegion *pcp_text = g_new(MemoryRegion, 1); + + cpu = TRICORE_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + memory_region_init_ram(ext_cram, NULL, "powerlink_ext_c.ram", + 2 * MiB, &error_fatal); + memory_region_init_ram(ext_dram, NULL, "powerlink_ext_d.ram", + 4 * MiB, &error_fatal); + memory_region_init_ram(int_cram, NULL, "powerlink_int_c.ram", 48 * KiB, + &error_fatal); + memory_region_init_ram(int_dram, NULL, "powerlink_int_d.ram", 48 * KiB, + &error_fatal); + memory_region_init_ram(pcp_data, NULL, "powerlink_pcp_data.ram", + 16 * KiB, &error_fatal); + memory_region_init_ram(pcp_text, NULL, "powerlink_pcp_text.ram", + 32 * KiB, &error_fatal); + + memory_region_add_subregion(sysmem, 0x80000000, ext_cram); + memory_region_add_subregion(sysmem, 0xa1000000, ext_dram); + memory_region_add_subregion(sysmem, 0xd4000000, int_cram); + memory_region_add_subregion(sysmem, 0xd0000000, int_dram); + memory_region_add_subregion(sysmem, 0xf0050000, pcp_data); + memory_region_add_subregion(sysmem, 0xf0060000, pcp_text); + + test_dev = g_new(TriCoreTestDeviceState, 1); + object_initialize(test_dev, sizeof(TriCoreTestDeviceState), + TYPE_TRICORE_TESTDEVICE); + memory_region_add_subregion(sysmem, 0xf0000000, &test_dev->iomem); + + + tricoretb_binfo.ram_size = machine->ram_size; + tricoretb_binfo.kernel_filename = machine->kernel_filename; + + if (machine->kernel_filename) { + tricore_load_kernel(env); + } +} + +static void tricoreboard_init(MachineState *machine) +{ + tricore_testboard_init(machine, 0x183); +} + +static void ttb_machine_init(MachineClass *mc) +{ + mc->desc = "a minimal TriCore board"; + mc->init = tricoreboard_init; + mc->default_cpu_type = TRICORE_CPU_TYPE_NAME("tc1796"); +} + +DEFINE_MACHINE("tricore_testboard", ttb_machine_init) diff --git a/hw/tricore/tricore_testdevice.c b/hw/tricore/tricore_testdevice.c new file mode 100644 index 000000000..a1563aa56 --- /dev/null +++ b/hw/tricore/tricore_testdevice.c @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2018-2021 Bastian Koppelmann Paderborn University + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/qdev-properties.h" +#include "hw/tricore/tricore_testdevice.h" + +static void tricore_testdevice_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + exit(value); +} + +static uint64_t tricore_testdevice_read(void *opaque, hwaddr offset, + unsigned size) +{ + return 0xdeadbeef; +} + +static void tricore_testdevice_reset(DeviceState *dev) +{ +} + +static const MemoryRegionOps tricore_testdevice_ops = { + .read = tricore_testdevice_read, + .write = tricore_testdevice_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void tricore_testdevice_init(Object *obj) +{ + TriCoreTestDeviceState *s = TRICORE_TESTDEVICE(obj); + /* map memory */ + memory_region_init_io(&s->iomem, OBJECT(s), &tricore_testdevice_ops, s, + "tricore_testdevice", 0x4); +} + +static Property tricore_testdevice_properties[] = { + DEFINE_PROP_END_OF_LIST() +}; + +static void tricore_testdevice_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, tricore_testdevice_properties); + dc->reset = tricore_testdevice_reset; +} + +static const TypeInfo tricore_testdevice_info = { + .name = TYPE_TRICORE_TESTDEVICE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(TriCoreTestDeviceState), + .instance_init = tricore_testdevice_init, + .class_init = tricore_testdevice_class_init, +}; + +static void tricore_testdevice_register_types(void) +{ + type_register_static(&tricore_testdevice_info); +} + +type_init(tricore_testdevice_register_types) diff --git a/hw/usb/Kconfig b/hw/usb/Kconfig new file mode 100644 index 000000000..53f8283ff --- /dev/null +++ b/hw/usb/Kconfig @@ -0,0 +1,135 @@ +config USB + bool + +config USB_UHCI + bool + default y if PCI_DEVICES + depends on PCI + select USB + +config USB_OHCI + bool + select USB + +config USB_OHCI_PCI + bool + default y if PCI_DEVICES + depends on PCI + select USB_OHCI + +config USB_EHCI + bool + select USB + +config USB_EHCI_PCI + bool + default y if PCI_DEVICES + select USB_EHCI + +config USB_EHCI_SYSBUS + bool + select USB_EHCI + +config USB_XHCI + bool + select USB + +config USB_XHCI_PCI + bool + default y if PCI_DEVICES + depends on PCI + select USB_XHCI + +config USB_XHCI_NEC + bool + default y if PCI_DEVICES + select USB_XHCI_PCI + +config USB_XHCI_SYSBUS + bool + select USB_XHCI + +config USB_MUSB + bool + select USB + +config USB_DWC2 + bool + select USB + +config TUSB6010 + bool + select USB_MUSB + +config USB_TABLET_WACOM + bool + default y + depends on USB + +config USB_STORAGE_CORE + bool + depends on USB + select SCSI + +config USB_STORAGE_CLASSIC + bool + default y + depends on USB + select USB_STORAGE_CORE + +config USB_STORAGE_BOT + bool + default y + depends on USB + select USB_STORAGE_CORE + +config USB_STORAGE_UAS + bool + default y + depends on USB + select SCSI + +config USB_AUDIO + bool + default y + depends on USB + +config USB_SERIAL + bool + default y + depends on USB + +config USB_NETWORK + bool + default y + depends on USB + +config USB_SMARTCARD + bool + default y + depends on USB + +config USB_STORAGE_MTP + bool + default y + depends on USB + +config USB_U2F + bool + default y + depends on USB + +config IMX_USBPHY + bool + default y + depends on USB + +config USB_DWC3 + bool + select USB_XHCI_SYSBUS + select REGISTER + +config XLNX_USB_SUBSYS + bool + default y if XLNX_VERSAL + select USB_DWC3 diff --git a/hw/usb/bus.c b/hw/usb/bus.c new file mode 100644 index 000000000..92d6ed562 --- /dev/null +++ b/hw/usb/bus.c @@ -0,0 +1,776 @@ +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/usb.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/type-helpers.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "sysemu/sysemu.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "trace.h" +#include "qemu/cutils.h" + +static void usb_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent); + +static char *usb_get_dev_path(DeviceState *dev); +static char *usb_get_fw_dev_path(DeviceState *qdev); +static void usb_qdev_unrealize(DeviceState *qdev); + +static Property usb_props[] = { + DEFINE_PROP_STRING("port", USBDevice, port_path), + DEFINE_PROP_STRING("serial", USBDevice, serial), + DEFINE_PROP_BIT("msos-desc", USBDevice, flags, + USB_DEV_FLAG_MSOS_DESC_ENABLE, true), + DEFINE_PROP_STRING("pcap", USBDevice, pcap_filename), + DEFINE_PROP_END_OF_LIST() +}; + +static void usb_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + + k->print_dev = usb_bus_dev_print; + k->get_dev_path = usb_get_dev_path; + k->get_fw_dev_path = usb_get_fw_dev_path; + hc->unplug = qdev_simple_device_unplug_cb; +} + +static const TypeInfo usb_bus_info = { + .name = TYPE_USB_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(USBBus), + .class_init = usb_bus_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static int next_usb_bus = 0; +static QTAILQ_HEAD(, USBBus) busses = QTAILQ_HEAD_INITIALIZER(busses); + +static int usb_device_post_load(void *opaque, int version_id) +{ + USBDevice *dev = opaque; + + if (dev->state == USB_STATE_NOTATTACHED) { + dev->attached = false; + } else { + dev->attached = true; + } + return 0; +} + +const VMStateDescription vmstate_usb_device = { + .name = "USBDevice", + .version_id = 1, + .minimum_version_id = 1, + .post_load = usb_device_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT8(addr, USBDevice), + VMSTATE_INT32(state, USBDevice), + VMSTATE_INT32(remote_wakeup, USBDevice), + VMSTATE_INT32(setup_state, USBDevice), + VMSTATE_INT32(setup_len, USBDevice), + VMSTATE_INT32(setup_index, USBDevice), + VMSTATE_UINT8_ARRAY(setup_buf, USBDevice, 8), + VMSTATE_END_OF_LIST(), + } +}; + +void usb_bus_new(USBBus *bus, size_t bus_size, + USBBusOps *ops, DeviceState *host) +{ + qbus_init(bus, bus_size, TYPE_USB_BUS, host, NULL); + qbus_set_bus_hotplug_handler(BUS(bus)); + bus->ops = ops; + bus->busnr = next_usb_bus++; + QTAILQ_INIT(&bus->free); + QTAILQ_INIT(&bus->used); + QTAILQ_INSERT_TAIL(&busses, bus, next); +} + +void usb_bus_release(USBBus *bus) +{ + assert(next_usb_bus > 0); + + QTAILQ_REMOVE(&busses, bus, next); +} + +USBBus *usb_bus_find(int busnr) +{ + USBBus *bus; + + if (-1 == busnr) + return QTAILQ_FIRST(&busses); + QTAILQ_FOREACH(bus, &busses, next) { + if (bus->busnr == busnr) + return bus; + } + return NULL; +} + +static void usb_device_realize(USBDevice *dev, Error **errp) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + + if (klass->realize) { + klass->realize(dev, errp); + } +} + +USBDevice *usb_device_find_device(USBDevice *dev, uint8_t addr) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->find_device) { + return klass->find_device(dev, addr); + } + return NULL; +} + +static void usb_device_unrealize(USBDevice *dev) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + + if (klass->unrealize) { + klass->unrealize(dev); + } +} + +void usb_device_cancel_packet(USBDevice *dev, USBPacket *p) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->cancel_packet) { + klass->cancel_packet(dev, p); + } +} + +void usb_device_handle_attach(USBDevice *dev) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->handle_attach) { + klass->handle_attach(dev); + } +} + +void usb_device_handle_reset(USBDevice *dev) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->handle_reset) { + klass->handle_reset(dev); + } +} + +void usb_device_handle_control(USBDevice *dev, USBPacket *p, int request, + int value, int index, int length, uint8_t *data) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->handle_control) { + klass->handle_control(dev, p, request, value, index, length, data); + } +} + +void usb_device_handle_data(USBDevice *dev, USBPacket *p) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->handle_data) { + klass->handle_data(dev, p); + } +} + +const char *usb_device_get_product_desc(USBDevice *dev) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + return klass->product_desc; +} + +const USBDesc *usb_device_get_usb_desc(USBDevice *dev) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (dev->usb_desc) { + return dev->usb_desc; + } + return klass->usb_desc; +} + +void usb_device_set_interface(USBDevice *dev, int interface, + int alt_old, int alt_new) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->set_interface) { + klass->set_interface(dev, interface, alt_old, alt_new); + } +} + +void usb_device_flush_ep_queue(USBDevice *dev, USBEndpoint *ep) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->flush_ep_queue) { + klass->flush_ep_queue(dev, ep); + } +} + +void usb_device_ep_stopped(USBDevice *dev, USBEndpoint *ep) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->ep_stopped) { + klass->ep_stopped(dev, ep); + } +} + +int usb_device_alloc_streams(USBDevice *dev, USBEndpoint **eps, int nr_eps, + int streams) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->alloc_streams) { + return klass->alloc_streams(dev, eps, nr_eps, streams); + } + return 0; +} + +void usb_device_free_streams(USBDevice *dev, USBEndpoint **eps, int nr_eps) +{ + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + if (klass->free_streams) { + klass->free_streams(dev, eps, nr_eps); + } +} + +static void usb_qdev_realize(DeviceState *qdev, Error **errp) +{ + USBDevice *dev = USB_DEVICE(qdev); + Error *local_err = NULL; + + pstrcpy(dev->product_desc, sizeof(dev->product_desc), + usb_device_get_product_desc(dev)); + dev->auto_attach = 1; + QLIST_INIT(&dev->strings); + usb_ep_init(dev); + + usb_claim_port(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + usb_device_realize(dev, &local_err); + if (local_err) { + usb_release_port(dev); + error_propagate(errp, local_err); + return; + } + + if (dev->auto_attach) { + usb_device_attach(dev, &local_err); + if (local_err) { + usb_qdev_unrealize(qdev); + error_propagate(errp, local_err); + return; + } + } + + if (dev->pcap_filename) { + int fd = qemu_open_old(dev->pcap_filename, O_CREAT | O_WRONLY | O_TRUNC, 0666); + if (fd < 0) { + error_setg(errp, "open %s failed", dev->pcap_filename); + usb_qdev_unrealize(qdev); + return; + } + dev->pcap = fdopen(fd, "w"); + usb_pcap_init(dev->pcap); + } +} + +static void usb_qdev_unrealize(DeviceState *qdev) +{ + USBDevice *dev = USB_DEVICE(qdev); + USBDescString *s, *next; + + QLIST_FOREACH_SAFE(s, &dev->strings, next, next) { + QLIST_REMOVE(s, next); + g_free(s->str); + g_free(s); + } + + if (dev->pcap) { + fclose(dev->pcap); + } + + if (dev->attached) { + usb_device_detach(dev); + } + usb_device_unrealize(dev); + if (dev->port) { + usb_release_port(dev); + } +} + +typedef struct LegacyUSBFactory +{ + const char *name; + const char *usbdevice_name; + USBDevice *(*usbdevice_init)(void); +} LegacyUSBFactory; + +static GSList *legacy_usb_factory; + +void usb_legacy_register(const char *typename, const char *usbdevice_name, + USBDevice *(*usbdevice_init)(void)) +{ + if (usbdevice_name) { + LegacyUSBFactory *f = g_malloc0(sizeof(*f)); + f->name = typename; + f->usbdevice_name = usbdevice_name; + f->usbdevice_init = usbdevice_init; + legacy_usb_factory = g_slist_append(legacy_usb_factory, f); + } +} + +USBDevice *usb_new(const char *name) +{ + return USB_DEVICE(qdev_new(name)); +} + +static USBDevice *usb_try_new(const char *name) +{ + return USB_DEVICE(qdev_try_new(name)); +} + +bool usb_realize_and_unref(USBDevice *dev, USBBus *bus, Error **errp) +{ + return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp); +} + +USBDevice *usb_create_simple(USBBus *bus, const char *name) +{ + USBDevice *dev = usb_new(name); + + usb_realize_and_unref(dev, bus, &error_abort); + return dev; +} + +static void usb_fill_port(USBPort *port, void *opaque, int index, + USBPortOps *ops, int speedmask) +{ + port->opaque = opaque; + port->index = index; + port->ops = ops; + port->speedmask = speedmask; + usb_port_location(port, NULL, index + 1); +} + +void usb_register_port(USBBus *bus, USBPort *port, void *opaque, int index, + USBPortOps *ops, int speedmask) +{ + usb_fill_port(port, opaque, index, ops, speedmask); + QTAILQ_INSERT_TAIL(&bus->free, port, next); + bus->nfree++; +} + +void usb_register_companion(const char *masterbus, USBPort *ports[], + uint32_t portcount, uint32_t firstport, + void *opaque, USBPortOps *ops, int speedmask, + Error **errp) +{ + USBBus *bus; + int i; + + QTAILQ_FOREACH(bus, &busses, next) { + if (strcmp(bus->qbus.name, masterbus) == 0) { + break; + } + } + + if (!bus) { + error_setg(errp, "USB bus '%s' not found", masterbus); + return; + } + if (!bus->ops->register_companion) { + error_setg(errp, "Can't use USB bus '%s' as masterbus," + " it doesn't support companion controllers", + masterbus); + return; + } + + for (i = 0; i < portcount; i++) { + usb_fill_port(ports[i], opaque, i, ops, speedmask); + } + + bus->ops->register_companion(bus, ports, portcount, firstport, errp); +} + +void usb_port_location(USBPort *downstream, USBPort *upstream, int portnr) +{ + if (upstream) { + int l = snprintf(downstream->path, sizeof(downstream->path), "%s.%d", + upstream->path, portnr); + /* Max string is nn.nn.nn.nn.nn, which fits in 16 bytes */ + assert(l < sizeof(downstream->path)); + downstream->hubcount = upstream->hubcount + 1; + } else { + snprintf(downstream->path, sizeof(downstream->path), "%d", portnr); + downstream->hubcount = 0; + } +} + +void usb_unregister_port(USBBus *bus, USBPort *port) +{ + if (port->dev) { + object_unparent(OBJECT(port->dev)); + } + QTAILQ_REMOVE(&bus->free, port, next); + bus->nfree--; +} + +void usb_claim_port(USBDevice *dev, Error **errp) +{ + USBBus *bus = usb_bus_from_device(dev); + USBPort *port; + USBDevice *hub; + + assert(dev->port == NULL); + + if (dev->port_path) { + QTAILQ_FOREACH(port, &bus->free, next) { + if (strcmp(port->path, dev->port_path) == 0) { + break; + } + } + if (port == NULL) { + error_setg(errp, "usb port %s (bus %s) not found (in use?)", + dev->port_path, bus->qbus.name); + return; + } + } else { + if (bus->nfree == 1 && strcmp(object_get_typename(OBJECT(dev)), "usb-hub") != 0) { + /* Create a new hub and chain it on */ + hub = usb_try_new("usb-hub"); + if (hub) { + usb_realize_and_unref(hub, bus, NULL); + } + } + if (bus->nfree == 0) { + error_setg(errp, "tried to attach usb device %s to a bus " + "with no free ports", dev->product_desc); + return; + } + port = QTAILQ_FIRST(&bus->free); + } + trace_usb_port_claim(bus->busnr, port->path); + + QTAILQ_REMOVE(&bus->free, port, next); + bus->nfree--; + + dev->port = port; + port->dev = dev; + + QTAILQ_INSERT_TAIL(&bus->used, port, next); + bus->nused++; +} + +void usb_release_port(USBDevice *dev) +{ + USBBus *bus = usb_bus_from_device(dev); + USBPort *port = dev->port; + + assert(port != NULL); + trace_usb_port_release(bus->busnr, port->path); + + QTAILQ_REMOVE(&bus->used, port, next); + bus->nused--; + + dev->port = NULL; + port->dev = NULL; + + QTAILQ_INSERT_TAIL(&bus->free, port, next); + bus->nfree++; +} + +static void usb_mask_to_str(char *dest, size_t size, + unsigned int speedmask) +{ + static const struct { + unsigned int mask; + const char *name; + } speeds[] = { + { .mask = USB_SPEED_MASK_FULL, .name = "full" }, + { .mask = USB_SPEED_MASK_HIGH, .name = "high" }, + { .mask = USB_SPEED_MASK_SUPER, .name = "super" }, + }; + int i, pos = 0; + + for (i = 0; i < ARRAY_SIZE(speeds); i++) { + if (speeds[i].mask & speedmask) { + pos += snprintf(dest + pos, size - pos, "%s%s", + pos ? "+" : "", + speeds[i].name); + } + } + + if (pos == 0) { + snprintf(dest, size, "unknown"); + } +} + +void usb_check_attach(USBDevice *dev, Error **errp) +{ + USBBus *bus = usb_bus_from_device(dev); + USBPort *port = dev->port; + char devspeed[32], portspeed[32]; + + assert(port != NULL); + assert(!dev->attached); + usb_mask_to_str(devspeed, sizeof(devspeed), dev->speedmask); + usb_mask_to_str(portspeed, sizeof(portspeed), port->speedmask); + trace_usb_port_attach(bus->busnr, port->path, + devspeed, portspeed); + + if (!(port->speedmask & dev->speedmask)) { + error_setg(errp, "Warning: speed mismatch trying to attach" + " usb device \"%s\" (%s speed)" + " to bus \"%s\", port \"%s\" (%s speed)", + dev->product_desc, devspeed, + bus->qbus.name, port->path, portspeed); + return; + } +} + +void usb_device_attach(USBDevice *dev, Error **errp) +{ + USBPort *port = dev->port; + Error *local_err = NULL; + + usb_check_attach(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + dev->attached = true; + usb_attach(port); +} + +int usb_device_detach(USBDevice *dev) +{ + USBBus *bus = usb_bus_from_device(dev); + USBPort *port = dev->port; + + assert(port != NULL); + assert(dev->attached); + trace_usb_port_detach(bus->busnr, port->path); + + usb_detach(port); + dev->attached = false; + return 0; +} + +static const char *usb_speed(unsigned int speed) +{ + static const char *txt[] = { + [ USB_SPEED_LOW ] = "1.5", + [ USB_SPEED_FULL ] = "12", + [ USB_SPEED_HIGH ] = "480", + [ USB_SPEED_SUPER ] = "5000", + }; + if (speed >= ARRAY_SIZE(txt)) + return "?"; + return txt[speed]; +} + +static void usb_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent) +{ + USBDevice *dev = USB_DEVICE(qdev); + USBBus *bus = usb_bus_from_device(dev); + + monitor_printf(mon, "%*saddr %d.%d, port %s, speed %s, name %s%s\n", + indent, "", bus->busnr, dev->addr, + dev->port ? dev->port->path : "-", + usb_speed(dev->speed), dev->product_desc, + dev->attached ? ", attached" : ""); +} + +static char *usb_get_dev_path(DeviceState *qdev) +{ + USBDevice *dev = USB_DEVICE(qdev); + DeviceState *hcd = qdev->parent_bus->parent; + char *id = qdev_get_dev_path(hcd); + + if (id) { + char *ret = g_strdup_printf("%s/%s", id, dev->port->path); + g_free(id); + return ret; + } else { + return g_strdup(dev->port->path); + } +} + +static char *usb_get_fw_dev_path(DeviceState *qdev) +{ + USBDevice *dev = USB_DEVICE(qdev); + char *fw_path, *in; + ssize_t pos = 0, fw_len; + long nr; + + fw_len = 32 + strlen(dev->port->path) * 6; + fw_path = g_malloc(fw_len); + in = dev->port->path; + while (fw_len - pos > 0) { + nr = strtol(in, &in, 10); + if (in[0] == '.') { + /* some hub between root port and device */ + pos += snprintf(fw_path + pos, fw_len - pos, "hub@%lx/", nr); + in++; + } else { + /* the device itself */ + snprintf(fw_path + pos, fw_len - pos, "%s@%lx", + qdev_fw_name(qdev), nr); + break; + } + } + return fw_path; +} + +HumanReadableText *qmp_x_query_usb(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + USBBus *bus; + USBDevice *dev; + USBPort *port; + + if (QTAILQ_EMPTY(&busses)) { + error_setg(errp, "USB support not enabled"); + return NULL; + } + + QTAILQ_FOREACH(bus, &busses, next) { + QTAILQ_FOREACH(port, &bus->used, next) { + dev = port->dev; + if (!dev) + continue; + g_string_append_printf(buf, + " Device %d.%d, Port %s, Speed %s Mb/s, " + "Product %s%s%s\n", + bus->busnr, dev->addr, port->path, + usb_speed(dev->speed), dev->product_desc, + dev->qdev.id ? ", ID: " : "", + dev->qdev.id ?: ""); + } + } + + return human_readable_text_from_str(buf); +} + +/* handle legacy -usbdevice cmd line option */ +USBDevice *usbdevice_create(const char *driver) +{ + USBBus *bus = usb_bus_find(-1 /* any */); + LegacyUSBFactory *f = NULL; + Error *err = NULL; + GSList *i; + USBDevice *dev; + + if (strchr(driver, ':')) { + error_report("usbdevice parameters are not supported anymore"); + return NULL; + } + + for (i = legacy_usb_factory; i; i = i->next) { + f = i->data; + if (strcmp(f->usbdevice_name, driver) == 0) { + break; + } + } + if (i == NULL) { +#if 0 + /* no error because some drivers are not converted (yet) */ + error_report("usbdevice %s not found", driver); +#endif + return NULL; + } + + if (!bus) { + error_report("Error: no usb bus to attach usbdevice %s, " + "please try -machine usb=on and check that " + "the machine model supports USB", driver); + return NULL; + } + + dev = f->usbdevice_init ? f->usbdevice_init() : usb_new(f->name); + if (!dev) { + error_report("Failed to create USB device '%s'", f->name); + return NULL; + } + if (!usb_realize_and_unref(dev, bus, &err)) { + error_reportf_err(err, "Failed to initialize USB device '%s': ", + f->name); + object_unparent(OBJECT(dev)); + return NULL; + } + return dev; +} + +static bool usb_get_attached(Object *obj, Error **errp) +{ + USBDevice *dev = USB_DEVICE(obj); + + return dev->attached; +} + +static void usb_set_attached(Object *obj, bool value, Error **errp) +{ + USBDevice *dev = USB_DEVICE(obj); + + if (dev->attached == value) { + return; + } + + if (value) { + usb_device_attach(dev, errp); + } else { + usb_device_detach(dev); + } +} + +static void usb_device_instance_init(Object *obj) +{ + USBDevice *dev = USB_DEVICE(obj); + USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); + + if (klass->attached_settable) { + object_property_add_bool(obj, "attached", + usb_get_attached, usb_set_attached); + } else { + object_property_add_bool(obj, "attached", + usb_get_attached, NULL); + } +} + +static void usb_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + k->bus_type = TYPE_USB_BUS; + k->realize = usb_qdev_realize; + k->unrealize = usb_qdev_unrealize; + device_class_set_props(k, usb_props); +} + +static const TypeInfo usb_device_type_info = { + .name = TYPE_USB_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(USBDevice), + .instance_init = usb_device_instance_init, + .abstract = true, + .class_size = sizeof(USBDeviceClass), + .class_init = usb_device_class_init, +}; + +static void usb_register_types(void) +{ + type_register_static(&usb_bus_info); + type_register_static(&usb_device_type_info); +} + +type_init(usb_register_types) diff --git a/hw/usb/ccid-card-emulated.c b/hw/usb/ccid-card-emulated.c new file mode 100644 index 000000000..6c8c0355e --- /dev/null +++ b/hw/usb/ccid-card-emulated.c @@ -0,0 +1,622 @@ +/* + * CCID Card Device. Emulated card. + * + * Copyright (c) 2011 Red Hat. + * Written by Alon Levy. + * + * This code is licensed under the GNU LGPL, version 2 or later. + */ + +/* + * It can be used to provide access to the local hardware in a non exclusive + * way, or it can use certificates. It requires the usb-ccid bus. + * + * Usage 1: standard, mirror hardware reader+card: + * qemu .. -usb -device usb-ccid -device ccid-card-emulated + * + * Usage 2: use certificates, no hardware required + * one time: create the certificates: + * for i in 1 2 3; do + * certutil -d /etc/pki/nssdb -x -t "CT,CT,CT" -S -s "CN=user$i" -n user$i + * done + * qemu .. -usb -device usb-ccid \ + * -device ccid-card-emulated,cert1=user1,cert2=user2,cert3=user3 + * + * If you use a non default db for the certificates you can specify it using + * the db parameter. + */ + +#include "qemu/osdep.h" +#include + +#include "qemu/thread.h" +#include "qemu/lockable.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "ccid.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qom/object.h" + +#define DPRINTF(card, lvl, fmt, ...) \ +do {\ + if (lvl <= card->debug) {\ + printf("ccid-card-emul: %s: " fmt , __func__, ## __VA_ARGS__);\ + } \ +} while (0) + + +#define TYPE_EMULATED_CCID "ccid-card-emulated" +typedef struct EmulatedState EmulatedState; +DECLARE_INSTANCE_CHECKER(EmulatedState, EMULATED_CCID_CARD, + TYPE_EMULATED_CCID) + +#define BACKEND_NSS_EMULATED_NAME "nss-emulated" +#define BACKEND_CERTIFICATES_NAME "certificates" + +enum { + BACKEND_NSS_EMULATED = 1, + BACKEND_CERTIFICATES +}; + +#define DEFAULT_BACKEND BACKEND_NSS_EMULATED + + +enum { + EMUL_READER_INSERT = 0, + EMUL_READER_REMOVE, + EMUL_CARD_INSERT, + EMUL_CARD_REMOVE, + EMUL_GUEST_APDU, + EMUL_RESPONSE_APDU, + EMUL_ERROR, +}; + +static const char *emul_event_to_string(uint32_t emul_event) +{ + switch (emul_event) { + case EMUL_READER_INSERT: + return "EMUL_READER_INSERT"; + case EMUL_READER_REMOVE: + return "EMUL_READER_REMOVE"; + case EMUL_CARD_INSERT: + return "EMUL_CARD_INSERT"; + case EMUL_CARD_REMOVE: + return "EMUL_CARD_REMOVE"; + case EMUL_GUEST_APDU: + return "EMUL_GUEST_APDU"; + case EMUL_RESPONSE_APDU: + return "EMUL_RESPONSE_APDU"; + case EMUL_ERROR: + return "EMUL_ERROR"; + } + return "UNKNOWN"; +} + +typedef struct EmulEvent { + QSIMPLEQ_ENTRY(EmulEvent) entry; + union { + struct { + uint32_t type; + } gen; + struct { + uint32_t type; + uint64_t code; + } error; + struct { + uint32_t type; + uint32_t len; + uint8_t data[]; + } data; + } p; +} EmulEvent; + +#define MAX_ATR_SIZE 40 +struct EmulatedState { + CCIDCardState base; + uint8_t debug; + char *backend_str; + uint32_t backend; + char *cert1; + char *cert2; + char *cert3; + char *db; + uint8_t atr[MAX_ATR_SIZE]; + uint8_t atr_length; + QSIMPLEQ_HEAD(, EmulEvent) event_list; + QemuMutex event_list_mutex; + QemuThread event_thread_id; + VReader *reader; + QSIMPLEQ_HEAD(, EmulEvent) guest_apdu_list; + QemuMutex vreader_mutex; /* and guest_apdu_list mutex */ + QemuMutex handle_apdu_mutex; + QemuCond handle_apdu_cond; + EventNotifier notifier; + int quit_apdu_thread; + QemuThread apdu_thread_id; +}; + +static void emulated_apdu_from_guest(CCIDCardState *base, + const uint8_t *apdu, uint32_t len) +{ + EmulatedState *card = EMULATED_CCID_CARD(base); + EmulEvent *event = (EmulEvent *)g_malloc(sizeof(EmulEvent) + len); + + assert(event); + event->p.data.type = EMUL_GUEST_APDU; + event->p.data.len = len; + memcpy(event->p.data.data, apdu, len); + qemu_mutex_lock(&card->vreader_mutex); + QSIMPLEQ_INSERT_TAIL(&card->guest_apdu_list, event, entry); + qemu_mutex_unlock(&card->vreader_mutex); + qemu_mutex_lock(&card->handle_apdu_mutex); + qemu_cond_signal(&card->handle_apdu_cond); + qemu_mutex_unlock(&card->handle_apdu_mutex); +} + +static const uint8_t *emulated_get_atr(CCIDCardState *base, uint32_t *len) +{ + EmulatedState *card = EMULATED_CCID_CARD(base); + + *len = card->atr_length; + return card->atr; +} + +static void emulated_push_event(EmulatedState *card, EmulEvent *event) +{ + qemu_mutex_lock(&card->event_list_mutex); + QSIMPLEQ_INSERT_TAIL(&(card->event_list), event, entry); + qemu_mutex_unlock(&card->event_list_mutex); + event_notifier_set(&card->notifier); +} + +static void emulated_push_type(EmulatedState *card, uint32_t type) +{ + EmulEvent *event = g_new(EmulEvent, 1); + + assert(event); + event->p.gen.type = type; + emulated_push_event(card, event); +} + +static void emulated_push_error(EmulatedState *card, uint64_t code) +{ + EmulEvent *event = g_new(EmulEvent, 1); + + assert(event); + event->p.error.type = EMUL_ERROR; + event->p.error.code = code; + emulated_push_event(card, event); +} + +static void emulated_push_data_type(EmulatedState *card, uint32_t type, + const uint8_t *data, uint32_t len) +{ + EmulEvent *event = (EmulEvent *)g_malloc(sizeof(EmulEvent) + len); + + assert(event); + event->p.data.type = type; + event->p.data.len = len; + memcpy(event->p.data.data, data, len); + emulated_push_event(card, event); +} + +static void emulated_push_reader_insert(EmulatedState *card) +{ + emulated_push_type(card, EMUL_READER_INSERT); +} + +static void emulated_push_reader_remove(EmulatedState *card) +{ + emulated_push_type(card, EMUL_READER_REMOVE); +} + +static void emulated_push_card_insert(EmulatedState *card, + const uint8_t *atr, uint32_t len) +{ + emulated_push_data_type(card, EMUL_CARD_INSERT, atr, len); +} + +static void emulated_push_card_remove(EmulatedState *card) +{ + emulated_push_type(card, EMUL_CARD_REMOVE); +} + +static void emulated_push_response_apdu(EmulatedState *card, + const uint8_t *apdu, uint32_t len) +{ + emulated_push_data_type(card, EMUL_RESPONSE_APDU, apdu, len); +} + +#define APDU_BUF_SIZE 270 +static void *handle_apdu_thread(void* arg) +{ + EmulatedState *card = arg; + uint8_t recv_data[APDU_BUF_SIZE]; + int recv_len; + VReaderStatus reader_status; + EmulEvent *event; + + while (1) { + qemu_mutex_lock(&card->handle_apdu_mutex); + qemu_cond_wait(&card->handle_apdu_cond, &card->handle_apdu_mutex); + qemu_mutex_unlock(&card->handle_apdu_mutex); + if (card->quit_apdu_thread) { + card->quit_apdu_thread = 0; /* debugging */ + break; + } + WITH_QEMU_LOCK_GUARD(&card->vreader_mutex) { + while (!QSIMPLEQ_EMPTY(&card->guest_apdu_list)) { + event = QSIMPLEQ_FIRST(&card->guest_apdu_list); + assert((unsigned long)event > 1000); + QSIMPLEQ_REMOVE_HEAD(&card->guest_apdu_list, entry); + if (event->p.data.type != EMUL_GUEST_APDU) { + DPRINTF(card, 1, "unexpected message in handle_apdu_thread\n"); + g_free(event); + continue; + } + if (card->reader == NULL) { + DPRINTF(card, 1, "reader is NULL\n"); + g_free(event); + continue; + } + recv_len = sizeof(recv_data); + reader_status = vreader_xfr_bytes(card->reader, + event->p.data.data, event->p.data.len, + recv_data, &recv_len); + DPRINTF(card, 2, "got back apdu of length %d\n", recv_len); + if (reader_status == VREADER_OK) { + emulated_push_response_apdu(card, recv_data, recv_len); + } else { + emulated_push_error(card, reader_status); + } + g_free(event); + } + } + } + return NULL; +} + +static void *event_thread(void *arg) +{ + int atr_len = MAX_ATR_SIZE; + uint8_t atr[MAX_ATR_SIZE]; + VEvent *event = NULL; + EmulatedState *card = arg; + + while (1) { + const char *reader_name; + + event = vevent_wait_next_vevent(); + if (event == NULL || event->type == VEVENT_LAST) { + break; + } + if (event->type != VEVENT_READER_INSERT) { + if (card->reader == NULL && event->reader != NULL) { + /* Happens after device_add followed by card remove or insert. + * XXX: create synthetic add_reader events if vcard_emul_init + * already called, which happens if device_del and device_add + * are called */ + card->reader = vreader_reference(event->reader); + } else { + if (event->reader != card->reader) { + fprintf(stderr, + "ERROR: wrong reader: quitting event_thread\n"); + break; + } + } + } + switch (event->type) { + case VEVENT_READER_INSERT: + /* TODO: take a specific reader. i.e. track which reader + * we are seeing here, check it is the one we want (the first, + * or by a particular name), and ignore if we don't want it. + */ + reader_name = vreader_get_name(event->reader); + if (card->reader != NULL) { + DPRINTF(card, 2, "READER INSERT - replacing %s with %s\n", + vreader_get_name(card->reader), reader_name); + qemu_mutex_lock(&card->vreader_mutex); + vreader_free(card->reader); + qemu_mutex_unlock(&card->vreader_mutex); + emulated_push_reader_remove(card); + } + qemu_mutex_lock(&card->vreader_mutex); + DPRINTF(card, 2, "READER INSERT %s\n", reader_name); + card->reader = vreader_reference(event->reader); + qemu_mutex_unlock(&card->vreader_mutex); + emulated_push_reader_insert(card); + break; + case VEVENT_READER_REMOVE: + DPRINTF(card, 2, " READER REMOVE: %s\n", + vreader_get_name(event->reader)); + qemu_mutex_lock(&card->vreader_mutex); + vreader_free(card->reader); + card->reader = NULL; + qemu_mutex_unlock(&card->vreader_mutex); + emulated_push_reader_remove(card); + break; + case VEVENT_CARD_INSERT: + /* get the ATR (intended as a response to a power on from the + * reader */ + atr_len = MAX_ATR_SIZE; + vreader_power_on(event->reader, atr, &atr_len); + card->atr_length = (uint8_t)atr_len; + DPRINTF(card, 2, " CARD INSERT\n"); + emulated_push_card_insert(card, atr, atr_len); + break; + case VEVENT_CARD_REMOVE: + DPRINTF(card, 2, " CARD REMOVE\n"); + emulated_push_card_remove(card); + break; + case VEVENT_LAST: /* quit */ + vevent_delete(event); + return NULL; + default: + break; + } + vevent_delete(event); + } + return NULL; +} + +static void card_event_handler(EventNotifier *notifier) +{ + EmulatedState *card = container_of(notifier, EmulatedState, notifier); + EmulEvent *event, *next; + + event_notifier_test_and_clear(&card->notifier); + QEMU_LOCK_GUARD(&card->event_list_mutex); + QSIMPLEQ_FOREACH_SAFE(event, &card->event_list, entry, next) { + DPRINTF(card, 2, "event %s\n", emul_event_to_string(event->p.gen.type)); + switch (event->p.gen.type) { + case EMUL_RESPONSE_APDU: + ccid_card_send_apdu_to_guest(&card->base, event->p.data.data, + event->p.data.len); + break; + case EMUL_READER_INSERT: + ccid_card_ccid_attach(&card->base); + break; + case EMUL_READER_REMOVE: + ccid_card_ccid_detach(&card->base); + break; + case EMUL_CARD_INSERT: + assert(event->p.data.len <= MAX_ATR_SIZE); + card->atr_length = event->p.data.len; + memcpy(card->atr, event->p.data.data, card->atr_length); + ccid_card_card_inserted(&card->base); + break; + case EMUL_CARD_REMOVE: + ccid_card_card_removed(&card->base); + break; + case EMUL_ERROR: + ccid_card_card_error(&card->base, event->p.error.code); + break; + default: + DPRINTF(card, 2, "unexpected event\n"); + break; + } + g_free(event); + } + QSIMPLEQ_INIT(&card->event_list); +} + +static int init_event_notifier(EmulatedState *card, Error **errp) +{ + if (event_notifier_init(&card->notifier, false) < 0) { + error_setg(errp, "ccid-card-emul: event notifier creation failed"); + return -1; + } + event_notifier_set_handler(&card->notifier, card_event_handler); + return 0; +} + +static void clean_event_notifier(EmulatedState *card) +{ + event_notifier_set_handler(&card->notifier, NULL); + event_notifier_cleanup(&card->notifier); +} + +#define CERTIFICATES_DEFAULT_DB "/etc/pki/nssdb" +#define CERTIFICATES_ARGS_TEMPLATE\ + "db=\"%s\" use_hw=no soft=(,Virtual Reader,CAC,,%s,%s,%s)" + +static int wrap_vcard_emul_init(VCardEmulOptions *options) +{ + static int called; + static int options_was_null; + + if (called) { + if ((options == NULL) != options_was_null) { + printf("%s: warning: running emulated with certificates" + " and emulated side by side is not supported\n", + __func__); + return VCARD_EMUL_FAIL; + } + vcard_emul_replay_insertion_events(); + return VCARD_EMUL_OK; + } + options_was_null = (options == NULL); + called = 1; + return vcard_emul_init(options); +} + +static int emulated_initialize_vcard_from_certificates(EmulatedState *card) +{ + char emul_args[200]; + VCardEmulOptions *options = NULL; + + snprintf(emul_args, sizeof(emul_args) - 1, CERTIFICATES_ARGS_TEMPLATE, + card->db ? card->db : CERTIFICATES_DEFAULT_DB, + card->cert1, card->cert2, card->cert3); + options = vcard_emul_options(emul_args); + if (options == NULL) { + printf("%s: warning: not using certificates due to" + " initialization error\n", __func__); + } + return wrap_vcard_emul_init(options); +} + +typedef struct EnumTable { + const char *name; + uint32_t value; +} EnumTable; + +static const EnumTable backend_enum_table[] = { + {BACKEND_NSS_EMULATED_NAME, BACKEND_NSS_EMULATED}, + {BACKEND_CERTIFICATES_NAME, BACKEND_CERTIFICATES}, + {NULL, 0}, +}; + +static uint32_t parse_enumeration(char *str, + const EnumTable *table, uint32_t not_found_value) +{ + uint32_t ret = not_found_value; + + if (str == NULL) + return 0; + + while (table->name != NULL) { + if (strcmp(table->name, str) == 0) { + ret = table->value; + break; + } + table++; + } + return ret; +} + +static void emulated_realize(CCIDCardState *base, Error **errp) +{ + EmulatedState *card = EMULATED_CCID_CARD(base); + VCardEmulError ret; + const EnumTable *ptable; + + QSIMPLEQ_INIT(&card->event_list); + QSIMPLEQ_INIT(&card->guest_apdu_list); + qemu_mutex_init(&card->event_list_mutex); + qemu_mutex_init(&card->vreader_mutex); + qemu_mutex_init(&card->handle_apdu_mutex); + qemu_cond_init(&card->handle_apdu_cond); + card->reader = NULL; + card->quit_apdu_thread = 0; + if (init_event_notifier(card, errp) < 0) { + goto out1; + } + + card->backend = 0; + if (card->backend_str) { + card->backend = parse_enumeration(card->backend_str, + backend_enum_table, 0); + } + + if (card->backend == 0) { + error_setg(errp, "backend must be one of:"); + for (ptable = backend_enum_table; ptable->name != NULL; ++ptable) { + error_append_hint(errp, "%s\n", ptable->name); + } + goto out2; + } + + /* TODO: a passthru backened that works on local machine. third card type?*/ + if (card->backend == BACKEND_CERTIFICATES) { + if (card->cert1 != NULL && card->cert2 != NULL && card->cert3 != NULL) { + ret = emulated_initialize_vcard_from_certificates(card); + } else { + error_setg(errp, "%s: you must provide all three certs for" + " certificates backend", TYPE_EMULATED_CCID); + goto out2; + } + } else { + if (card->backend != BACKEND_NSS_EMULATED) { + error_setg(errp, "%s: bad backend specified. The options are:%s" + " (default), %s.", TYPE_EMULATED_CCID, + BACKEND_NSS_EMULATED_NAME, BACKEND_CERTIFICATES_NAME); + goto out2; + } + if (card->cert1 != NULL || card->cert2 != NULL || card->cert3 != NULL) { + error_setg(errp, "%s: unexpected cert parameters to nss emulated " + "backend", TYPE_EMULATED_CCID); + goto out2; + } + /* default to mirroring the local hardware readers */ + ret = wrap_vcard_emul_init(NULL); + } + if (ret != VCARD_EMUL_OK) { + error_setg(errp, "%s: failed to initialize vcard", TYPE_EMULATED_CCID); + goto out2; + } + qemu_thread_create(&card->event_thread_id, "ccid/event", event_thread, + card, QEMU_THREAD_JOINABLE); + qemu_thread_create(&card->apdu_thread_id, "ccid/apdu", handle_apdu_thread, + card, QEMU_THREAD_JOINABLE); + + return; + +out2: + clean_event_notifier(card); +out1: + qemu_cond_destroy(&card->handle_apdu_cond); + qemu_mutex_destroy(&card->handle_apdu_mutex); + qemu_mutex_destroy(&card->vreader_mutex); + qemu_mutex_destroy(&card->event_list_mutex); +} + +static void emulated_unrealize(CCIDCardState *base) +{ + EmulatedState *card = EMULATED_CCID_CARD(base); + VEvent *vevent = vevent_new(VEVENT_LAST, NULL, NULL); + + vevent_queue_vevent(vevent); /* stop vevent thread */ + qemu_thread_join(&card->event_thread_id); + + card->quit_apdu_thread = 1; /* stop handle_apdu thread */ + qemu_cond_signal(&card->handle_apdu_cond); + qemu_thread_join(&card->apdu_thread_id); + + clean_event_notifier(card); + /* threads exited, can destroy all condvars/mutexes */ + qemu_cond_destroy(&card->handle_apdu_cond); + qemu_mutex_destroy(&card->handle_apdu_mutex); + qemu_mutex_destroy(&card->vreader_mutex); + qemu_mutex_destroy(&card->event_list_mutex); +} + +static Property emulated_card_properties[] = { + DEFINE_PROP_STRING("backend", EmulatedState, backend_str), + DEFINE_PROP_STRING("cert1", EmulatedState, cert1), + DEFINE_PROP_STRING("cert2", EmulatedState, cert2), + DEFINE_PROP_STRING("cert3", EmulatedState, cert3), + DEFINE_PROP_STRING("db", EmulatedState, db), + DEFINE_PROP_UINT8("debug", EmulatedState, debug, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void emulated_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + CCIDCardClass *cc = CCID_CARD_CLASS(klass); + + cc->realize = emulated_realize; + cc->unrealize = emulated_unrealize; + cc->get_atr = emulated_get_atr; + cc->apdu_from_guest = emulated_apdu_from_guest; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + dc->desc = "emulated smartcard"; + device_class_set_props(dc, emulated_card_properties); +} + +static const TypeInfo emulated_card_info = { + .name = TYPE_EMULATED_CCID, + .parent = TYPE_CCID_CARD, + .instance_size = sizeof(EmulatedState), + .class_init = emulated_class_initfn, +}; +module_obj(TYPE_EMULATED_CCID); + +static void ccid_card_emulated_register_types(void) +{ + type_register_static(&emulated_card_info); +} + +type_init(ccid_card_emulated_register_types) diff --git a/hw/usb/ccid-card-passthru.c b/hw/usb/ccid-card-passthru.c new file mode 100644 index 000000000..fa3040fb7 --- /dev/null +++ b/hw/usb/ccid-card-passthru.c @@ -0,0 +1,424 @@ +/* + * CCID Passthru Card Device emulation + * + * Copyright (c) 2011 Red Hat. + * Written by Alon Levy. + * + * This work is licensed under the terms of the GNU GPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/units.h" +#include +#include "chardev/char-fe.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qemu/sockets.h" +#include "ccid.h" +#include "qapi/error.h" +#include "qom/object.h" + +#define DPRINTF(card, lvl, fmt, ...) \ +do { \ + if (lvl <= card->debug) { \ + printf("ccid-card-passthru: " fmt , ## __VA_ARGS__); \ + } \ +} while (0) + +#define D_WARN 1 +#define D_INFO 2 +#define D_MORE_INFO 3 +#define D_VERBOSE 4 + +/* TODO: do we still need this? */ +static const uint8_t DEFAULT_ATR[] = { +/* + * From some example somewhere + * 0x3B, 0xB0, 0x18, 0x00, 0xD1, 0x81, 0x05, 0xB1, 0x40, 0x38, 0x1F, 0x03, 0x28 + */ + +/* From an Athena smart card */ + 0x3B, 0xD5, 0x18, 0xFF, 0x80, 0x91, 0xFE, 0x1F, 0xC3, 0x80, 0x73, 0xC8, 0x21, + 0x13, 0x08 +}; + +#define VSCARD_IN_SIZE (64 * KiB) + +/* maximum size of ATR - from 7816-3 */ +#define MAX_ATR_SIZE 40 + +typedef struct PassthruState PassthruState; + +struct PassthruState { + CCIDCardState base; + CharBackend cs; + uint8_t vscard_in_data[VSCARD_IN_SIZE]; + uint32_t vscard_in_pos; + uint32_t vscard_in_hdr; + uint8_t atr[MAX_ATR_SIZE]; + uint8_t atr_length; + uint8_t debug; +}; + +#define TYPE_CCID_PASSTHRU "ccid-card-passthru" +DECLARE_INSTANCE_CHECKER(PassthruState, PASSTHRU_CCID_CARD, + TYPE_CCID_PASSTHRU) + +/* + * VSCard protocol over chardev + * This code should not depend on the card type. + */ + +static void ccid_card_vscard_send_msg(PassthruState *s, + VSCMsgType type, uint32_t reader_id, + const uint8_t *payload, uint32_t length) +{ + VSCMsgHeader scr_msg_header; + + scr_msg_header.type = htonl(type); + scr_msg_header.reader_id = htonl(reader_id); + scr_msg_header.length = htonl(length); + /* XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks */ + qemu_chr_fe_write_all(&s->cs, (uint8_t *)&scr_msg_header, + sizeof(VSCMsgHeader)); + qemu_chr_fe_write_all(&s->cs, payload, length); +} + +static void ccid_card_vscard_send_apdu(PassthruState *s, + const uint8_t *apdu, uint32_t length) +{ + ccid_card_vscard_send_msg( + s, VSC_APDU, VSCARD_MINIMAL_READER_ID, apdu, length); +} + +static void ccid_card_vscard_send_error(PassthruState *s, + uint32_t reader_id, VSCErrorCode code) +{ + VSCMsgError msg = {.code = htonl(code)}; + + ccid_card_vscard_send_msg( + s, VSC_Error, reader_id, (uint8_t *)&msg, sizeof(msg)); +} + +static void ccid_card_vscard_send_init(PassthruState *s) +{ + VSCMsgInit msg = { + .version = htonl(VSCARD_VERSION), + .magic = VSCARD_MAGIC, + .capabilities = {0} + }; + + ccid_card_vscard_send_msg(s, VSC_Init, VSCARD_UNDEFINED_READER_ID, + (uint8_t *)&msg, sizeof(msg)); +} + +static int ccid_card_vscard_can_read(void *opaque) +{ + PassthruState *card = opaque; + + return VSCARD_IN_SIZE >= card->vscard_in_pos ? + VSCARD_IN_SIZE - card->vscard_in_pos : 0; +} + +static void ccid_card_vscard_handle_init( + PassthruState *card, VSCMsgHeader *hdr, VSCMsgInit *init) +{ + uint32_t *capabilities; + int num_capabilities; + int i; + + capabilities = init->capabilities; + num_capabilities = + 1 + ((hdr->length - sizeof(VSCMsgInit)) / sizeof(uint32_t)); + init->version = ntohl(init->version); + for (i = 0 ; i < num_capabilities; ++i) { + capabilities[i] = ntohl(capabilities[i]); + } + if (init->magic != VSCARD_MAGIC) { + error_report("wrong magic"); + /* we can't disconnect the chardev */ + } + if (init->version != VSCARD_VERSION) { + DPRINTF(card, D_WARN, + "got version %d, have %d", init->version, VSCARD_VERSION); + } + /* future handling of capabilities, none exist atm */ + ccid_card_vscard_send_init(card); +} + +static int check_atr(PassthruState *card, uint8_t *data, int len) +{ + int historical_length, opt_bytes; + int td_count = 0; + int td; + + if (len < 2) { + return 0; + } + historical_length = data[1] & 0xf; + opt_bytes = 0; + if (data[0] != 0x3b && data[0] != 0x3f) { + DPRINTF(card, D_WARN, "atr's T0 is 0x%X, not in {0x3b, 0x3f}\n", + data[0]); + return 0; + } + td_count = 0; + td = data[1] >> 4; + while (td && td_count < 2 && opt_bytes + historical_length + 2 < len) { + td_count++; + if (td & 0x1) { + opt_bytes++; + } + if (td & 0x2) { + opt_bytes++; + } + if (td & 0x4) { + opt_bytes++; + } + if (td & 0x8) { + opt_bytes++; + td = data[opt_bytes + 2] >> 4; + } + } + if (len < 2 + historical_length + opt_bytes) { + DPRINTF(card, D_WARN, + "atr too short: len %d, but historical_len %d, T1 0x%X\n", + len, historical_length, data[1]); + return 0; + } + if (len > 2 + historical_length + opt_bytes) { + DPRINTF(card, D_WARN, + "atr too long: len %d, but hist/opt %d/%d, T1 0x%X\n", + len, historical_length, opt_bytes, data[1]); + /* let it through */ + } + DPRINTF(card, D_VERBOSE, + "atr passes check: %d total length, %d historical, %d optional\n", + len, historical_length, opt_bytes); + + return 1; +} + +static void ccid_card_vscard_handle_message(PassthruState *card, + VSCMsgHeader *scr_msg_header) +{ + uint8_t *data = (uint8_t *)&scr_msg_header[1]; + + switch (scr_msg_header->type) { + case VSC_ATR: + DPRINTF(card, D_INFO, "VSC_ATR %d\n", scr_msg_header->length); + if (scr_msg_header->length > MAX_ATR_SIZE) { + error_report("ATR size exceeds spec, ignoring"); + ccid_card_vscard_send_error(card, scr_msg_header->reader_id, + VSC_GENERAL_ERROR); + break; + } + if (!check_atr(card, data, scr_msg_header->length)) { + error_report("ATR is inconsistent, ignoring"); + ccid_card_vscard_send_error(card, scr_msg_header->reader_id, + VSC_GENERAL_ERROR); + break; + } + memcpy(card->atr, data, scr_msg_header->length); + card->atr_length = scr_msg_header->length; + ccid_card_card_inserted(&card->base); + ccid_card_vscard_send_error(card, scr_msg_header->reader_id, + VSC_SUCCESS); + break; + case VSC_APDU: + ccid_card_send_apdu_to_guest( + &card->base, data, scr_msg_header->length); + break; + case VSC_CardRemove: + DPRINTF(card, D_INFO, "VSC_CardRemove\n"); + ccid_card_card_removed(&card->base); + ccid_card_vscard_send_error(card, + scr_msg_header->reader_id, VSC_SUCCESS); + break; + case VSC_Init: + ccid_card_vscard_handle_init( + card, scr_msg_header, (VSCMsgInit *)data); + break; + case VSC_Error: + ccid_card_card_error(&card->base, *(uint32_t *)data); + break; + case VSC_ReaderAdd: + if (ccid_card_ccid_attach(&card->base) < 0) { + ccid_card_vscard_send_error(card, VSCARD_UNDEFINED_READER_ID, + VSC_CANNOT_ADD_MORE_READERS); + } else { + ccid_card_vscard_send_error(card, VSCARD_MINIMAL_READER_ID, + VSC_SUCCESS); + } + break; + case VSC_ReaderRemove: + ccid_card_ccid_detach(&card->base); + ccid_card_vscard_send_error(card, + scr_msg_header->reader_id, VSC_SUCCESS); + break; + default: + printf("usb-ccid: chardev: unexpected message of type %X\n", + scr_msg_header->type); + ccid_card_vscard_send_error(card, scr_msg_header->reader_id, + VSC_GENERAL_ERROR); + } +} + +static void ccid_card_vscard_drop_connection(PassthruState *card) +{ + qemu_chr_fe_deinit(&card->cs, true); + card->vscard_in_pos = card->vscard_in_hdr = 0; +} + +static void ccid_card_vscard_read(void *opaque, const uint8_t *buf, int size) +{ + PassthruState *card = opaque; + VSCMsgHeader *hdr; + + if (card->vscard_in_pos + size > VSCARD_IN_SIZE) { + error_report("no room for data: pos %u + size %d > %" PRId64 "." + " dropping connection.", + card->vscard_in_pos, size, VSCARD_IN_SIZE); + ccid_card_vscard_drop_connection(card); + return; + } + assert(card->vscard_in_pos < VSCARD_IN_SIZE); + assert(card->vscard_in_hdr < VSCARD_IN_SIZE); + memcpy(card->vscard_in_data + card->vscard_in_pos, buf, size); + card->vscard_in_pos += size; + hdr = (VSCMsgHeader *)(card->vscard_in_data + card->vscard_in_hdr); + + while ((card->vscard_in_pos - card->vscard_in_hdr >= sizeof(VSCMsgHeader)) + &&(card->vscard_in_pos - card->vscard_in_hdr >= + sizeof(VSCMsgHeader) + ntohl(hdr->length))) { + hdr->reader_id = ntohl(hdr->reader_id); + hdr->length = ntohl(hdr->length); + hdr->type = ntohl(hdr->type); + ccid_card_vscard_handle_message(card, hdr); + card->vscard_in_hdr += hdr->length + sizeof(VSCMsgHeader); + hdr = (VSCMsgHeader *)(card->vscard_in_data + card->vscard_in_hdr); + } + if (card->vscard_in_hdr == card->vscard_in_pos) { + card->vscard_in_pos = card->vscard_in_hdr = 0; + } +} + +static void ccid_card_vscard_event(void *opaque, QEMUChrEvent event) +{ + PassthruState *card = opaque; + + switch (event) { + case CHR_EVENT_BREAK: + card->vscard_in_pos = card->vscard_in_hdr = 0; + break; + case CHR_EVENT_OPENED: + DPRINTF(card, D_INFO, "%s: CHR_EVENT_OPENED\n", __func__); + break; + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + case CHR_EVENT_CLOSED: + /* Ignore */ + break; + } +} + +/* End VSCard handling */ + +static void passthru_apdu_from_guest( + CCIDCardState *base, const uint8_t *apdu, uint32_t len) +{ + PassthruState *card = PASSTHRU_CCID_CARD(base); + + if (!qemu_chr_fe_backend_connected(&card->cs)) { + printf("ccid-passthru: no chardev, discarding apdu length %u\n", len); + return; + } + ccid_card_vscard_send_apdu(card, apdu, len); +} + +static const uint8_t *passthru_get_atr(CCIDCardState *base, uint32_t *len) +{ + PassthruState *card = PASSTHRU_CCID_CARD(base); + + *len = card->atr_length; + return card->atr; +} + +static void passthru_realize(CCIDCardState *base, Error **errp) +{ + PassthruState *card = PASSTHRU_CCID_CARD(base); + + card->vscard_in_pos = 0; + card->vscard_in_hdr = 0; + if (qemu_chr_fe_backend_connected(&card->cs)) { + DPRINTF(card, D_INFO, "ccid-card-passthru: initing chardev"); + qemu_chr_fe_set_handlers(&card->cs, + ccid_card_vscard_can_read, + ccid_card_vscard_read, + ccid_card_vscard_event, NULL, card, NULL, true); + ccid_card_vscard_send_init(card); + } else { + error_setg(errp, "missing chardev"); + return; + } + card->debug = parse_debug_env("QEMU_CCID_PASSTHRU_DEBUG", D_VERBOSE, + card->debug); + assert(sizeof(DEFAULT_ATR) <= MAX_ATR_SIZE); + memcpy(card->atr, DEFAULT_ATR, sizeof(DEFAULT_ATR)); + card->atr_length = sizeof(DEFAULT_ATR); +} + +static const VMStateDescription passthru_vmstate = { + .name = "ccid-card-passthru", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(vscard_in_data, PassthruState), + VMSTATE_UINT32(vscard_in_pos, PassthruState), + VMSTATE_UINT32(vscard_in_hdr, PassthruState), + VMSTATE_BUFFER(atr, PassthruState), + VMSTATE_UINT8(atr_length, PassthruState), + VMSTATE_END_OF_LIST() + } +}; + +static Property passthru_card_properties[] = { + DEFINE_PROP_CHR("chardev", PassthruState, cs), + DEFINE_PROP_UINT8("debug", PassthruState, debug, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void passthru_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + CCIDCardClass *cc = CCID_CARD_CLASS(klass); + + cc->realize = passthru_realize; + cc->get_atr = passthru_get_atr; + cc->apdu_from_guest = passthru_apdu_from_guest; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + dc->desc = "passthrough smartcard"; + dc->vmsd = &passthru_vmstate; + device_class_set_props(dc, passthru_card_properties); +} + +static const TypeInfo passthru_card_info = { + .name = TYPE_CCID_PASSTHRU, + .parent = TYPE_CCID_CARD, + .instance_size = sizeof(PassthruState), + .class_init = passthru_class_initfn, +}; +module_obj(TYPE_CCID_PASSTHRU); + +static void ccid_card_passthru_register_types(void) +{ + type_register_static(&passthru_card_info); +} + +type_init(ccid_card_passthru_register_types) diff --git a/hw/usb/ccid.h b/hw/usb/ccid.h new file mode 100644 index 000000000..6b82a55bd --- /dev/null +++ b/hw/usb/ccid.h @@ -0,0 +1,62 @@ +/* + * CCID Passthru Card Device emulation + * + * Copyright (c) 2011 Red Hat. + * Written by Alon Levy. + * + * This code is licensed under the GNU LGPL, version 2 or later. + */ + +#ifndef CCID_H +#define CCID_H + +#include "hw/qdev-core.h" +#include "qom/object.h" + +typedef struct CCIDCardInfo CCIDCardInfo; + +#define TYPE_CCID_CARD "ccid-card" +OBJECT_DECLARE_TYPE(CCIDCardState, CCIDCardClass, CCID_CARD) + +/* + * callbacks to be used by the CCID device (hw/usb-ccid.c) to call + * into the smartcard device (hw/ccid-card-*.c) + */ +struct CCIDCardClass { + /*< private >*/ + DeviceClass parent_class; + /*< public >*/ + const uint8_t *(*get_atr)(CCIDCardState *card, uint32_t *len); + void (*apdu_from_guest)(CCIDCardState *card, + const uint8_t *apdu, + uint32_t len); + void (*realize)(CCIDCardState *card, Error **errp); + void (*unrealize)(CCIDCardState *card); +}; + +/* + * state of the CCID Card device (i.e. hw/ccid-card-*.c) + */ +struct CCIDCardState { + DeviceState qdev; + uint32_t slot; /* For future use with multiple slot reader. */ +}; + +/* + * API for smartcard calling the CCID device (used by hw/ccid-card-*.c) + */ +void ccid_card_send_apdu_to_guest(CCIDCardState *card, + uint8_t *apdu, + uint32_t len); +void ccid_card_card_removed(CCIDCardState *card); +void ccid_card_card_inserted(CCIDCardState *card); +void ccid_card_card_error(CCIDCardState *card, uint64_t error); + +/* + * support guest visible insertion/removal of ccid devices based on actual + * devices connected/removed. Called by card implementation (passthru, local) + */ +int ccid_card_ccid_attach(CCIDCardState *card); +void ccid_card_ccid_detach(CCIDCardState *card); + +#endif /* CCID_H */ diff --git a/hw/usb/chipidea.c b/hw/usb/chipidea.c new file mode 100644 index 000000000..b1c85404d --- /dev/null +++ b/hw/usb/chipidea.c @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2018, Impinj, Inc. + * + * Chipidea USB block emulation code + * + * Author: Andrey Smirnov + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/usb/hcd-ehci.h" +#include "hw/usb/chipidea.h" +#include "qemu/module.h" + +enum { + CHIPIDEA_USBx_DCIVERSION = 0x000, + CHIPIDEA_USBx_DCCPARAMS = 0x004, + CHIPIDEA_USBx_DCCPARAMS_HC = BIT(8), +}; + +static uint64_t chipidea_read(void *opaque, hwaddr offset, + unsigned size) +{ + return 0; +} + +static void chipidea_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ +} + +static const struct MemoryRegionOps chipidea_ops = { + .read = chipidea_read, + .write = chipidea_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the + * real device but in practice there is no reason for a guest + * to access this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static uint64_t chipidea_dc_read(void *opaque, hwaddr offset, + unsigned size) +{ + switch (offset) { + case CHIPIDEA_USBx_DCIVERSION: + return 0x1; + case CHIPIDEA_USBx_DCCPARAMS: + /* + * Real hardware (at least i.MX7) will also report the + * controller as "Device Capable" (and 8 supported endpoints), + * but there doesn't seem to be much point in doing so, since + * we don't emulate that part. + */ + return CHIPIDEA_USBx_DCCPARAMS_HC; + } + + return 0; +} + +static void chipidea_dc_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ +} + +static const struct MemoryRegionOps chipidea_dc_ops = { + .read = chipidea_dc_read, + .write = chipidea_dc_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void chipidea_init(Object *obj) +{ + EHCIState *ehci = &SYS_BUS_EHCI(obj)->ehci; + ChipideaState *ci = CHIPIDEA(obj); + int i; + + for (i = 0; i < ARRAY_SIZE(ci->iomem); i++) { + const struct { + const char *name; + hwaddr offset; + uint64_t size; + const struct MemoryRegionOps *ops; + } regions[ARRAY_SIZE(ci->iomem)] = { + /* + * Registers located between offsets 0x000 and 0xFC + */ + { + .name = TYPE_CHIPIDEA ".misc", + .offset = 0x000, + .size = 0x100, + .ops = &chipidea_ops, + }, + /* + * Registers located between offsets 0x1A4 and 0x1DC + */ + { + .name = TYPE_CHIPIDEA ".endpoints", + .offset = 0x1A4, + .size = 0x1DC - 0x1A4 + 4, + .ops = &chipidea_ops, + }, + /* + * USB_x_DCIVERSION and USB_x_DCCPARAMS + */ + { + .name = TYPE_CHIPIDEA ".dc", + .offset = 0x120, + .size = 8, + .ops = &chipidea_dc_ops, + }, + }; + + memory_region_init_io(&ci->iomem[i], + obj, + regions[i].ops, + ci, + regions[i].name, + regions[i].size); + + memory_region_add_subregion(&ehci->mem, + regions[i].offset, + &ci->iomem[i]); + } +} + +static void chipidea_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(klass); + + /* + * Offsets used were taken from i.MX7Dual Applications Processor + * Reference Manual, Rev 0.1, p. 3177, Table 11-59 + */ + sec->capsbase = 0x100; + sec->opregbase = 0x140; + sec->portnr = 1; + + set_bit(DEVICE_CATEGORY_USB, dc->categories); + dc->desc = "Chipidea USB Module"; +} + +static const TypeInfo chipidea_info = { + .name = TYPE_CHIPIDEA, + .parent = TYPE_SYS_BUS_EHCI, + .instance_size = sizeof(ChipideaState), + .instance_init = chipidea_init, + .class_init = chipidea_class_init, +}; + +static void chipidea_register_type(void) +{ + type_register_static(&chipidea_info); +} +type_init(chipidea_register_type) diff --git a/hw/usb/combined-packet.c b/hw/usb/combined-packet.c new file mode 100644 index 000000000..e56802f89 --- /dev/null +++ b/hw/usb/combined-packet.c @@ -0,0 +1,190 @@ +/* + * QEMU USB packet combining code (for input pipelining) + * + * Copyright(c) 2012 Red Hat, Inc. + * + * Red Hat Authors: + * Hans de Goede + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, see . + */ +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "hw/usb.h" +#include "qemu/iov.h" +#include "trace.h" + +static void usb_combined_packet_add(USBCombinedPacket *combined, USBPacket *p) +{ + qemu_iovec_concat(&combined->iov, &p->iov, 0, p->iov.size); + QTAILQ_INSERT_TAIL(&combined->packets, p, combined_entry); + p->combined = combined; +} + +/* Note will free combined when the last packet gets removed */ +static void usb_combined_packet_remove(USBCombinedPacket *combined, + USBPacket *p) +{ + assert(p->combined == combined); + p->combined = NULL; + QTAILQ_REMOVE(&combined->packets, p, combined_entry); + if (QTAILQ_EMPTY(&combined->packets)) { + qemu_iovec_destroy(&combined->iov); + g_free(combined); + } +} + +/* Also handles completion of non combined packets for pipelined input eps */ +void usb_combined_input_packet_complete(USBDevice *dev, USBPacket *p) +{ + USBCombinedPacket *combined = p->combined; + USBEndpoint *ep = p->ep; + USBPacket *next; + int status, actual_length; + bool short_not_ok, done = false; + + if (combined == NULL) { + usb_packet_complete_one(dev, p); + goto leave; + } + + assert(combined->first == p && p == QTAILQ_FIRST(&combined->packets)); + + status = combined->first->status; + actual_length = combined->first->actual_length; + short_not_ok = QTAILQ_LAST(&combined->packets)->short_not_ok; + + QTAILQ_FOREACH_SAFE(p, &combined->packets, combined_entry, next) { + if (!done) { + /* Distribute data over uncombined packets */ + if (actual_length >= p->iov.size) { + p->actual_length = p->iov.size; + } else { + /* Send short or error packet to complete the transfer */ + p->actual_length = actual_length; + done = true; + } + /* Report status on the last packet */ + if (done || next == NULL) { + p->status = status; + } else { + p->status = USB_RET_SUCCESS; + } + p->short_not_ok = short_not_ok; + /* Note will free combined when the last packet gets removed! */ + usb_combined_packet_remove(combined, p); + usb_packet_complete_one(dev, p); + actual_length -= p->actual_length; + } else { + /* Remove any leftover packets from the queue */ + p->status = USB_RET_REMOVE_FROM_QUEUE; + /* Note will free combined on the last packet! */ + dev->port->ops->complete(dev->port, p); + } + } + /* Do not use combined here, it has been freed! */ +leave: + /* Check if there are packets in the queue waiting for our completion */ + usb_ep_combine_input_packets(ep); +} + +/* May only be called for combined packets! */ +void usb_combined_packet_cancel(USBDevice *dev, USBPacket *p) +{ + USBCombinedPacket *combined = p->combined; + assert(combined != NULL); + USBPacket *first = p->combined->first; + + /* Note will free combined on the last packet! */ + usb_combined_packet_remove(combined, p); + if (p == first) { + usb_device_cancel_packet(dev, p); + } +} + +/* + * Large input transfers can get split into multiple input packets, this + * function recombines them, removing the short_not_ok checks which all but + * the last packet of such splits transfers have, thereby allowing input + * transfer pipelining (which we cannot do on short_not_ok transfers) + */ +void usb_ep_combine_input_packets(USBEndpoint *ep) +{ + USBPacket *p, *u, *next, *prev = NULL, *first = NULL; + USBPort *port = ep->dev->port; + int totalsize; + + assert(ep->pipeline); + assert(ep->pid == USB_TOKEN_IN); + + QTAILQ_FOREACH_SAFE(p, &ep->queue, queue, next) { + /* Empty the queue on a halt */ + if (ep->halted) { + p->status = USB_RET_REMOVE_FROM_QUEUE; + port->ops->complete(port, p); + continue; + } + + /* Skip packets already submitted to the device */ + if (p->state == USB_PACKET_ASYNC) { + prev = p; + continue; + } + usb_packet_check_state(p, USB_PACKET_QUEUED); + + /* + * If the previous (combined) packet has the short_not_ok flag set + * stop, as we must not submit packets to the device after a transfer + * ending with short_not_ok packet. + */ + if (prev && prev->short_not_ok) { + break; + } + + if (first) { + if (first->combined == NULL) { + USBCombinedPacket *combined = g_new0(USBCombinedPacket, 1); + + combined->first = first; + QTAILQ_INIT(&combined->packets); + qemu_iovec_init(&combined->iov, 2); + usb_combined_packet_add(combined, first); + } + usb_combined_packet_add(first->combined, p); + } else { + first = p; + } + + /* Is this packet the last one of a (combined) transfer? */ + totalsize = (p->combined) ? p->combined->iov.size : p->iov.size; + if ((p->iov.size % ep->max_packet_size) != 0 || !p->short_not_ok || + next == NULL || + /* Work around for Linux usbfs bulk splitting + migration */ + (totalsize == (16 * KiB - 36) && p->int_req) || + /* Next package may grow combined package over 1MiB */ + totalsize > 1 * MiB - ep->max_packet_size) { + usb_device_handle_data(ep->dev, first); + assert(first->status == USB_RET_ASYNC); + if (first->combined) { + QTAILQ_FOREACH(u, &first->combined->packets, combined_entry) { + usb_packet_set_state(u, USB_PACKET_ASYNC); + } + } else { + usb_packet_set_state(first, USB_PACKET_ASYNC); + } + first = NULL; + prev = p; + } + } +} diff --git a/hw/usb/core.c b/hw/usb/core.c new file mode 100644 index 000000000..975f76250 --- /dev/null +++ b/hw/usb/core.c @@ -0,0 +1,821 @@ +/* + * QEMU USB emulation + * + * Copyright (c) 2005 Fabrice Bellard + * + * 2008 Generic packet handler rewrite by Max Krasnyansky + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "hw/usb.h" +#include "qemu/iov.h" +#include "trace.h" + +void usb_pick_speed(USBPort *port) +{ + static const int speeds[] = { + USB_SPEED_SUPER, + USB_SPEED_HIGH, + USB_SPEED_FULL, + USB_SPEED_LOW, + }; + USBDevice *udev = port->dev; + int i; + + for (i = 0; i < ARRAY_SIZE(speeds); i++) { + if ((udev->speedmask & (1 << speeds[i])) && + (port->speedmask & (1 << speeds[i]))) { + udev->speed = speeds[i]; + return; + } + } +} + +void usb_attach(USBPort *port) +{ + USBDevice *dev = port->dev; + + assert(dev != NULL); + assert(dev->attached); + assert(dev->state == USB_STATE_NOTATTACHED); + usb_pick_speed(port); + port->ops->attach(port); + dev->state = USB_STATE_ATTACHED; + usb_device_handle_attach(dev); +} + +void usb_detach(USBPort *port) +{ + USBDevice *dev = port->dev; + + assert(dev != NULL); + assert(dev->state != USB_STATE_NOTATTACHED); + port->ops->detach(port); + dev->state = USB_STATE_NOTATTACHED; +} + +void usb_port_reset(USBPort *port) +{ + USBDevice *dev = port->dev; + + assert(dev != NULL); + usb_detach(port); + usb_attach(port); + usb_device_reset(dev); +} + +void usb_device_reset(USBDevice *dev) +{ + if (dev == NULL || !dev->attached) { + return; + } + usb_device_handle_reset(dev); + dev->remote_wakeup = 0; + dev->addr = 0; + dev->state = USB_STATE_DEFAULT; +} + +void usb_wakeup(USBEndpoint *ep, unsigned int stream) +{ + USBDevice *dev = ep->dev; + USBBus *bus = usb_bus_from_device(dev); + + if (!phase_check(PHASE_MACHINE_READY)) { + /* + * This is machine init cold plug. No need to wakeup anyone, + * all devices will be reset anyway. And trying to wakeup can + * cause problems due to hitting uninitialized devices. + */ + return; + } + if (dev->remote_wakeup && dev->port && dev->port->ops->wakeup) { + dev->port->ops->wakeup(dev->port); + } + if (bus->ops->wakeup_endpoint) { + bus->ops->wakeup_endpoint(bus, ep, stream); + } +} + +/**********************/ + +/* generic USB device helpers (you are not forced to use them when + writing your USB device driver, but they help handling the + protocol) +*/ + +#define SETUP_STATE_IDLE 0 +#define SETUP_STATE_SETUP 1 +#define SETUP_STATE_DATA 2 +#define SETUP_STATE_ACK 3 +#define SETUP_STATE_PARAM 4 + +static void do_token_setup(USBDevice *s, USBPacket *p) +{ + int request, value, index; + unsigned int setup_len; + + if (p->iov.size != 8) { + p->status = USB_RET_STALL; + return; + } + + usb_packet_copy(p, s->setup_buf, p->iov.size); + s->setup_index = 0; + p->actual_length = 0; + setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6]; + if (setup_len > sizeof(s->data_buf)) { + fprintf(stderr, + "usb_generic_handle_packet: ctrl buffer too small (%u > %zu)\n", + setup_len, sizeof(s->data_buf)); + p->status = USB_RET_STALL; + return; + } + s->setup_len = setup_len; + + request = (s->setup_buf[0] << 8) | s->setup_buf[1]; + value = (s->setup_buf[3] << 8) | s->setup_buf[2]; + index = (s->setup_buf[5] << 8) | s->setup_buf[4]; + + if (s->setup_buf[0] & USB_DIR_IN) { + usb_pcap_ctrl(p, true); + usb_device_handle_control(s, p, request, value, index, + s->setup_len, s->data_buf); + if (p->status == USB_RET_ASYNC) { + s->setup_state = SETUP_STATE_SETUP; + } + if (p->status != USB_RET_SUCCESS) { + return; + } + + if (p->actual_length < s->setup_len) { + s->setup_len = p->actual_length; + } + s->setup_state = SETUP_STATE_DATA; + } else { + if (s->setup_len == 0) + s->setup_state = SETUP_STATE_ACK; + else + s->setup_state = SETUP_STATE_DATA; + } + + p->actual_length = 8; +} + +static void do_token_in(USBDevice *s, USBPacket *p) +{ + int request, value, index; + + assert(p->ep->nr == 0); + + request = (s->setup_buf[0] << 8) | s->setup_buf[1]; + value = (s->setup_buf[3] << 8) | s->setup_buf[2]; + index = (s->setup_buf[5] << 8) | s->setup_buf[4]; + + switch(s->setup_state) { + case SETUP_STATE_ACK: + if (!(s->setup_buf[0] & USB_DIR_IN)) { + usb_pcap_ctrl(p, true); + usb_device_handle_control(s, p, request, value, index, + s->setup_len, s->data_buf); + if (p->status == USB_RET_ASYNC) { + return; + } + s->setup_state = SETUP_STATE_IDLE; + p->actual_length = 0; + usb_pcap_ctrl(p, false); + } + break; + + case SETUP_STATE_DATA: + if (s->setup_buf[0] & USB_DIR_IN) { + int len = s->setup_len - s->setup_index; + if (len > p->iov.size) { + len = p->iov.size; + } + usb_packet_copy(p, s->data_buf + s->setup_index, len); + s->setup_index += len; + if (s->setup_index >= s->setup_len) { + s->setup_state = SETUP_STATE_ACK; + } + return; + } + s->setup_state = SETUP_STATE_IDLE; + p->status = USB_RET_STALL; + usb_pcap_ctrl(p, false); + break; + + default: + p->status = USB_RET_STALL; + } +} + +static void do_token_out(USBDevice *s, USBPacket *p) +{ + assert(p->ep->nr == 0); + + switch(s->setup_state) { + case SETUP_STATE_ACK: + if (s->setup_buf[0] & USB_DIR_IN) { + s->setup_state = SETUP_STATE_IDLE; + usb_pcap_ctrl(p, false); + /* transfer OK */ + } else { + /* ignore additional output */ + } + break; + + case SETUP_STATE_DATA: + if (!(s->setup_buf[0] & USB_DIR_IN)) { + int len = s->setup_len - s->setup_index; + if (len > p->iov.size) { + len = p->iov.size; + } + usb_packet_copy(p, s->data_buf + s->setup_index, len); + s->setup_index += len; + if (s->setup_index >= s->setup_len) { + s->setup_state = SETUP_STATE_ACK; + } + return; + } + s->setup_state = SETUP_STATE_IDLE; + p->status = USB_RET_STALL; + usb_pcap_ctrl(p, false); + break; + + default: + p->status = USB_RET_STALL; + } +} + +static void do_parameter(USBDevice *s, USBPacket *p) +{ + int i, request, value, index; + unsigned int setup_len; + + for (i = 0; i < 8; i++) { + s->setup_buf[i] = p->parameter >> (i*8); + } + + s->setup_state = SETUP_STATE_PARAM; + s->setup_index = 0; + + request = (s->setup_buf[0] << 8) | s->setup_buf[1]; + value = (s->setup_buf[3] << 8) | s->setup_buf[2]; + index = (s->setup_buf[5] << 8) | s->setup_buf[4]; + + setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6]; + if (setup_len > sizeof(s->data_buf)) { + fprintf(stderr, + "usb_generic_handle_packet: ctrl buffer too small (%u > %zu)\n", + setup_len, sizeof(s->data_buf)); + p->status = USB_RET_STALL; + return; + } + s->setup_len = setup_len; + + if (p->pid == USB_TOKEN_OUT) { + usb_packet_copy(p, s->data_buf, s->setup_len); + } + + usb_pcap_ctrl(p, true); + usb_device_handle_control(s, p, request, value, index, + s->setup_len, s->data_buf); + if (p->status == USB_RET_ASYNC) { + return; + } + + if (p->actual_length < s->setup_len) { + s->setup_len = p->actual_length; + } + if (p->pid == USB_TOKEN_IN) { + p->actual_length = 0; + usb_packet_copy(p, s->data_buf, s->setup_len); + } + usb_pcap_ctrl(p, false); +} + +/* ctrl complete function for devices which use usb_generic_handle_packet and + may return USB_RET_ASYNC from their handle_control callback. Device code + which does this *must* call this function instead of the normal + usb_packet_complete to complete their async control packets. */ +void usb_generic_async_ctrl_complete(USBDevice *s, USBPacket *p) +{ + if (p->status < 0) { + s->setup_state = SETUP_STATE_IDLE; + usb_pcap_ctrl(p, false); + } + + switch (s->setup_state) { + case SETUP_STATE_SETUP: + if (p->actual_length < s->setup_len) { + s->setup_len = p->actual_length; + } + s->setup_state = SETUP_STATE_DATA; + p->actual_length = 8; + break; + + case SETUP_STATE_ACK: + s->setup_state = SETUP_STATE_IDLE; + p->actual_length = 0; + usb_pcap_ctrl(p, false); + break; + + case SETUP_STATE_PARAM: + if (p->actual_length < s->setup_len) { + s->setup_len = p->actual_length; + } + if (p->pid == USB_TOKEN_IN) { + p->actual_length = 0; + usb_packet_copy(p, s->data_buf, s->setup_len); + } + break; + + default: + break; + } + usb_packet_complete(s, p); +} + +USBDevice *usb_find_device(USBPort *port, uint8_t addr) +{ + USBDevice *dev = port->dev; + + if (dev == NULL || !dev->attached || dev->state != USB_STATE_DEFAULT) { + return NULL; + } + if (dev->addr == addr) { + return dev; + } + return usb_device_find_device(dev, addr); +} + +static void usb_process_one(USBPacket *p) +{ + USBDevice *dev = p->ep->dev; + bool nak; + + /* + * Handlers expect status to be initialized to USB_RET_SUCCESS, but it + * can be USB_RET_NAK here from a previous usb_process_one() call, + * or USB_RET_ASYNC from going through usb_queue_one(). + */ + nak = (p->status == USB_RET_NAK); + p->status = USB_RET_SUCCESS; + + if (p->ep->nr == 0) { + /* control pipe */ + if (p->parameter) { + do_parameter(dev, p); + return; + } + switch (p->pid) { + case USB_TOKEN_SETUP: + do_token_setup(dev, p); + break; + case USB_TOKEN_IN: + do_token_in(dev, p); + break; + case USB_TOKEN_OUT: + do_token_out(dev, p); + break; + default: + p->status = USB_RET_STALL; + } + } else { + /* data pipe */ + if (!nak) { + usb_pcap_data(p, true); + } + usb_device_handle_data(dev, p); + } +} + +static void usb_queue_one(USBPacket *p) +{ + usb_packet_set_state(p, USB_PACKET_QUEUED); + QTAILQ_INSERT_TAIL(&p->ep->queue, p, queue); + p->status = USB_RET_ASYNC; +} + +/* Hand over a packet to a device for processing. p->status == + USB_RET_ASYNC indicates the processing isn't finished yet, the + driver will call usb_packet_complete() when done processing it. */ +void usb_handle_packet(USBDevice *dev, USBPacket *p) +{ + if (dev == NULL) { + p->status = USB_RET_NODEV; + return; + } + assert(dev == p->ep->dev); + assert(dev->state == USB_STATE_DEFAULT); + usb_packet_check_state(p, USB_PACKET_SETUP); + assert(p->ep != NULL); + + /* Submitting a new packet clears halt */ + if (p->ep->halted) { + assert(QTAILQ_EMPTY(&p->ep->queue)); + p->ep->halted = false; + } + + if (QTAILQ_EMPTY(&p->ep->queue) || p->ep->pipeline || p->stream) { + usb_process_one(p); + if (p->status == USB_RET_ASYNC) { + /* hcd drivers cannot handle async for isoc */ + assert(p->ep->type != USB_ENDPOINT_XFER_ISOC); + /* using async for interrupt packets breaks migration */ + assert(p->ep->type != USB_ENDPOINT_XFER_INT || + (dev->flags & (1 << USB_DEV_FLAG_IS_HOST))); + usb_packet_set_state(p, USB_PACKET_ASYNC); + QTAILQ_INSERT_TAIL(&p->ep->queue, p, queue); + } else if (p->status == USB_RET_ADD_TO_QUEUE) { + usb_queue_one(p); + } else { + /* + * When pipelining is enabled usb-devices must always return async, + * otherwise packets can complete out of order! + */ + assert(p->stream || !p->ep->pipeline || + QTAILQ_EMPTY(&p->ep->queue)); + if (p->status != USB_RET_NAK) { + usb_pcap_data(p, false); + usb_packet_set_state(p, USB_PACKET_COMPLETE); + } + } + } else { + usb_queue_one(p); + } +} + +void usb_packet_complete_one(USBDevice *dev, USBPacket *p) +{ + USBEndpoint *ep = p->ep; + + assert(p->stream || QTAILQ_FIRST(&ep->queue) == p); + assert(p->status != USB_RET_ASYNC && p->status != USB_RET_NAK); + + if (p->status != USB_RET_SUCCESS || + (p->short_not_ok && (p->actual_length < p->iov.size))) { + ep->halted = true; + } + usb_pcap_data(p, false); + usb_packet_set_state(p, USB_PACKET_COMPLETE); + QTAILQ_REMOVE(&ep->queue, p, queue); + dev->port->ops->complete(dev->port, p); +} + +/* Notify the controller that an async packet is complete. This should only + be called for packets previously deferred by returning USB_RET_ASYNC from + handle_packet. */ +void usb_packet_complete(USBDevice *dev, USBPacket *p) +{ + USBEndpoint *ep = p->ep; + + usb_packet_check_state(p, USB_PACKET_ASYNC); + usb_packet_complete_one(dev, p); + + while (!QTAILQ_EMPTY(&ep->queue)) { + p = QTAILQ_FIRST(&ep->queue); + if (ep->halted) { + /* Empty the queue on a halt */ + p->status = USB_RET_REMOVE_FROM_QUEUE; + dev->port->ops->complete(dev->port, p); + continue; + } + if (p->state == USB_PACKET_ASYNC) { + break; + } + usb_packet_check_state(p, USB_PACKET_QUEUED); + usb_process_one(p); + if (p->status == USB_RET_ASYNC) { + usb_packet_set_state(p, USB_PACKET_ASYNC); + break; + } + usb_packet_complete_one(ep->dev, p); + } +} + +/* Cancel an active packet. The packed must have been deferred by + returning USB_RET_ASYNC from handle_packet, and not yet + completed. */ +void usb_cancel_packet(USBPacket * p) +{ + bool callback = (p->state == USB_PACKET_ASYNC); + assert(usb_packet_is_inflight(p)); + usb_packet_set_state(p, USB_PACKET_CANCELED); + QTAILQ_REMOVE(&p->ep->queue, p, queue); + if (callback) { + usb_device_cancel_packet(p->ep->dev, p); + } +} + + +void usb_packet_init(USBPacket *p) +{ + qemu_iovec_init(&p->iov, 1); +} + +static const char *usb_packet_state_name(USBPacketState state) +{ + static const char *name[] = { + [USB_PACKET_UNDEFINED] = "undef", + [USB_PACKET_SETUP] = "setup", + [USB_PACKET_QUEUED] = "queued", + [USB_PACKET_ASYNC] = "async", + [USB_PACKET_COMPLETE] = "complete", + [USB_PACKET_CANCELED] = "canceled", + }; + if (state < ARRAY_SIZE(name)) { + return name[state]; + } + return "INVALID"; +} + +void usb_packet_check_state(USBPacket *p, USBPacketState expected) +{ + USBDevice *dev; + USBBus *bus; + + if (p->state == expected) { + return; + } + dev = p->ep->dev; + bus = usb_bus_from_device(dev); + trace_usb_packet_state_fault(bus->busnr, dev->port->path, p->ep->nr, p, + usb_packet_state_name(p->state), + usb_packet_state_name(expected)); + assert(!"usb packet state check failed"); +} + +void usb_packet_set_state(USBPacket *p, USBPacketState state) +{ + if (p->ep) { + USBDevice *dev = p->ep->dev; + USBBus *bus = usb_bus_from_device(dev); + trace_usb_packet_state_change(bus->busnr, dev->port->path, p->ep->nr, p, + usb_packet_state_name(p->state), + usb_packet_state_name(state)); + } else { + trace_usb_packet_state_change(-1, "", -1, p, + usb_packet_state_name(p->state), + usb_packet_state_name(state)); + } + p->state = state; +} + +void usb_packet_setup(USBPacket *p, int pid, + USBEndpoint *ep, unsigned int stream, + uint64_t id, bool short_not_ok, bool int_req) +{ + assert(!usb_packet_is_inflight(p)); + assert(p->iov.iov != NULL); + p->id = id; + p->pid = pid; + p->ep = ep; + p->stream = stream; + p->status = USB_RET_SUCCESS; + p->actual_length = 0; + p->parameter = 0; + p->short_not_ok = short_not_ok; + p->int_req = int_req; + p->combined = NULL; + qemu_iovec_reset(&p->iov); + usb_packet_set_state(p, USB_PACKET_SETUP); +} + +void usb_packet_addbuf(USBPacket *p, void *ptr, size_t len) +{ + qemu_iovec_add(&p->iov, ptr, len); +} + +void usb_packet_copy(USBPacket *p, void *ptr, size_t bytes) +{ + QEMUIOVector *iov = p->combined ? &p->combined->iov : &p->iov; + + assert(p->actual_length >= 0); + assert(p->actual_length + bytes <= iov->size); + switch (p->pid) { + case USB_TOKEN_SETUP: + case USB_TOKEN_OUT: + iov_to_buf(iov->iov, iov->niov, p->actual_length, ptr, bytes); + break; + case USB_TOKEN_IN: + iov_from_buf(iov->iov, iov->niov, p->actual_length, ptr, bytes); + break; + default: + fprintf(stderr, "%s: invalid pid: %x\n", __func__, p->pid); + abort(); + } + p->actual_length += bytes; +} + +void usb_packet_skip(USBPacket *p, size_t bytes) +{ + QEMUIOVector *iov = p->combined ? &p->combined->iov : &p->iov; + + assert(p->actual_length >= 0); + assert(p->actual_length + bytes <= iov->size); + if (p->pid == USB_TOKEN_IN) { + iov_memset(iov->iov, iov->niov, p->actual_length, 0, bytes); + } + p->actual_length += bytes; +} + +size_t usb_packet_size(USBPacket *p) +{ + return p->combined ? p->combined->iov.size : p->iov.size; +} + +void usb_packet_cleanup(USBPacket *p) +{ + assert(!usb_packet_is_inflight(p)); + qemu_iovec_destroy(&p->iov); +} + +void usb_ep_reset(USBDevice *dev) +{ + int ep; + + dev->ep_ctl.nr = 0; + dev->ep_ctl.type = USB_ENDPOINT_XFER_CONTROL; + dev->ep_ctl.ifnum = 0; + dev->ep_ctl.max_packet_size = 64; + dev->ep_ctl.max_streams = 0; + dev->ep_ctl.dev = dev; + dev->ep_ctl.pipeline = false; + for (ep = 0; ep < USB_MAX_ENDPOINTS; ep++) { + dev->ep_in[ep].nr = ep + 1; + dev->ep_out[ep].nr = ep + 1; + dev->ep_in[ep].pid = USB_TOKEN_IN; + dev->ep_out[ep].pid = USB_TOKEN_OUT; + dev->ep_in[ep].type = USB_ENDPOINT_XFER_INVALID; + dev->ep_out[ep].type = USB_ENDPOINT_XFER_INVALID; + dev->ep_in[ep].ifnum = USB_INTERFACE_INVALID; + dev->ep_out[ep].ifnum = USB_INTERFACE_INVALID; + dev->ep_in[ep].max_packet_size = 0; + dev->ep_out[ep].max_packet_size = 0; + dev->ep_in[ep].max_streams = 0; + dev->ep_out[ep].max_streams = 0; + dev->ep_in[ep].dev = dev; + dev->ep_out[ep].dev = dev; + dev->ep_in[ep].pipeline = false; + dev->ep_out[ep].pipeline = false; + } +} + +void usb_ep_init(USBDevice *dev) +{ + int ep; + + usb_ep_reset(dev); + QTAILQ_INIT(&dev->ep_ctl.queue); + for (ep = 0; ep < USB_MAX_ENDPOINTS; ep++) { + QTAILQ_INIT(&dev->ep_in[ep].queue); + QTAILQ_INIT(&dev->ep_out[ep].queue); + } +} + +void usb_ep_dump(USBDevice *dev) +{ + static const char *tname[] = { + [USB_ENDPOINT_XFER_CONTROL] = "control", + [USB_ENDPOINT_XFER_ISOC] = "isoc", + [USB_ENDPOINT_XFER_BULK] = "bulk", + [USB_ENDPOINT_XFER_INT] = "int", + }; + int ifnum, ep, first; + + fprintf(stderr, "Device \"%s\", config %d\n", + dev->product_desc, dev->configuration); + for (ifnum = 0; ifnum < 16; ifnum++) { + first = 1; + for (ep = 0; ep < USB_MAX_ENDPOINTS; ep++) { + if (dev->ep_in[ep].type != USB_ENDPOINT_XFER_INVALID && + dev->ep_in[ep].ifnum == ifnum) { + if (first) { + first = 0; + fprintf(stderr, " Interface %d, alternative %d\n", + ifnum, dev->altsetting[ifnum]); + } + fprintf(stderr, " Endpoint %d, IN, %s, %d max\n", ep, + tname[dev->ep_in[ep].type], + dev->ep_in[ep].max_packet_size); + } + if (dev->ep_out[ep].type != USB_ENDPOINT_XFER_INVALID && + dev->ep_out[ep].ifnum == ifnum) { + if (first) { + first = 0; + fprintf(stderr, " Interface %d, alternative %d\n", + ifnum, dev->altsetting[ifnum]); + } + fprintf(stderr, " Endpoint %d, OUT, %s, %d max\n", ep, + tname[dev->ep_out[ep].type], + dev->ep_out[ep].max_packet_size); + } + } + } + fprintf(stderr, "--\n"); +} + +struct USBEndpoint *usb_ep_get(USBDevice *dev, int pid, int ep) +{ + struct USBEndpoint *eps; + + assert(dev != NULL); + if (ep == 0) { + return &dev->ep_ctl; + } + assert(pid == USB_TOKEN_IN || pid == USB_TOKEN_OUT); + assert(ep > 0 && ep <= USB_MAX_ENDPOINTS); + eps = (pid == USB_TOKEN_IN) ? dev->ep_in : dev->ep_out; + return eps + ep - 1; +} + +uint8_t usb_ep_get_type(USBDevice *dev, int pid, int ep) +{ + struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); + return uep->type; +} + +void usb_ep_set_type(USBDevice *dev, int pid, int ep, uint8_t type) +{ + struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); + uep->type = type; +} + +void usb_ep_set_ifnum(USBDevice *dev, int pid, int ep, uint8_t ifnum) +{ + struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); + uep->ifnum = ifnum; +} + +void usb_ep_set_max_packet_size(USBDevice *dev, int pid, int ep, + uint16_t raw) +{ + struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); + int size, microframes; + + size = raw & 0x7ff; + switch ((raw >> 11) & 3) { + case 1: + microframes = 2; + break; + case 2: + microframes = 3; + break; + default: + microframes = 1; + break; + } + uep->max_packet_size = size * microframes; +} + +void usb_ep_set_max_streams(USBDevice *dev, int pid, int ep, uint8_t raw) +{ + struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); + int MaxStreams; + + MaxStreams = raw & 0x1f; + if (MaxStreams) { + uep->max_streams = 1 << MaxStreams; + } else { + uep->max_streams = 0; + } +} + +void usb_ep_set_halted(USBDevice *dev, int pid, int ep, bool halted) +{ + struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); + uep->halted = halted; +} + +USBPacket *usb_ep_find_packet_by_id(USBDevice *dev, int pid, int ep, + uint64_t id) +{ + struct USBEndpoint *uep = usb_ep_get(dev, pid, ep); + USBPacket *p; + + QTAILQ_FOREACH(p, &uep->queue, queue) { + if (p->id == id) { + return p; + } + } + + return NULL; +} diff --git a/hw/usb/desc-msos.c b/hw/usb/desc-msos.c new file mode 100644 index 000000000..c72c65b65 --- /dev/null +++ b/hw/usb/desc-msos.c @@ -0,0 +1,239 @@ +#include "qemu/osdep.h" +#include "hw/usb.h" +#include "desc.h" + +/* + * Microsoft OS Descriptors + * + * Windows tries to fetch some special descriptors with information + * specifically for windows. Presence is indicated using a special + * string @ index 0xee. There are two kinds of descriptors: + * + * compatid descriptor + * Used to bind drivers, if usb class isn't specific enough. + * Used for PTP/MTP for example (both share the same usb class). + * + * properties descriptor + * Does carry registry entries. They show up in + * HLM\SYSTEM\CurrentControlSet\Enum\USB\\\Device Parameters + * + * Note that Windows caches the stuff it got in the registry, so when + * playing with this you have to delete registry subtrees to make + * windows query the device again: + * HLM\SYSTEM\CurrentControlSet\Control\usbflags + * HLM\SYSTEM\CurrentControlSet\Enum\USB + * Windows will complain it can't delete entries on the second one. + * It has deleted everything it had permissions too, which is enough + * as this includes "Device Parameters". + * + * http://msdn.microsoft.com/en-us/library/windows/hardware/ff537430.aspx + * + */ + +/* ------------------------------------------------------------------ */ + +typedef struct msos_compat_hdr { + uint32_t dwLength; + uint8_t bcdVersion_lo; + uint8_t bcdVersion_hi; + uint8_t wIndex_lo; + uint8_t wIndex_hi; + uint8_t bCount; + uint8_t reserved[7]; +} QEMU_PACKED msos_compat_hdr; + +typedef struct msos_compat_func { + uint8_t bFirstInterfaceNumber; + uint8_t reserved_1; + char compatibleId[8]; + uint8_t subCompatibleId[8]; + uint8_t reserved_2[6]; +} QEMU_PACKED msos_compat_func; + +static int usb_desc_msos_compat(const USBDesc *desc, uint8_t *dest) +{ + msos_compat_hdr *hdr = (void *)dest; + msos_compat_func *func; + int length = sizeof(*hdr); + int count = 0; + + func = (void *)(dest + length); + func->bFirstInterfaceNumber = 0; + func->reserved_1 = 0x01; + if (desc->msos->CompatibleID) { + snprintf(func->compatibleId, sizeof(func->compatibleId), + "%s", desc->msos->CompatibleID); + } + length += sizeof(*func); + count++; + + hdr->dwLength = cpu_to_le32(length); + hdr->bcdVersion_lo = 0x00; + hdr->bcdVersion_hi = 0x01; + hdr->wIndex_lo = 0x04; + hdr->wIndex_hi = 0x00; + hdr->bCount = count; + return length; +} + +/* ------------------------------------------------------------------ */ + +typedef struct msos_prop_hdr { + uint32_t dwLength; + uint8_t bcdVersion_lo; + uint8_t bcdVersion_hi; + uint8_t wIndex_lo; + uint8_t wIndex_hi; + uint8_t wCount_lo; + uint8_t wCount_hi; +} QEMU_PACKED msos_prop_hdr; + +typedef struct msos_prop { + uint32_t dwLength; + uint32_t dwPropertyDataType; + uint8_t dwPropertyNameLength_lo; + uint8_t dwPropertyNameLength_hi; + uint8_t bPropertyName[]; +} QEMU_PACKED msos_prop; + +typedef struct msos_prop_data { + uint32_t dwPropertyDataLength; + uint8_t bPropertyData[]; +} QEMU_PACKED msos_prop_data; + +typedef enum msos_prop_type { + MSOS_REG_SZ = 1, + MSOS_REG_EXPAND_SZ = 2, + MSOS_REG_BINARY = 3, + MSOS_REG_DWORD_LE = 4, + MSOS_REG_DWORD_BE = 5, + MSOS_REG_LINK = 6, + MSOS_REG_MULTI_SZ = 7, +} msos_prop_type; + +static int usb_desc_msos_prop_name(struct msos_prop *prop, + const wchar_t *name) +{ + int length = wcslen(name) + 1; + int i; + + prop->dwPropertyNameLength_lo = usb_lo(length*2); + prop->dwPropertyNameLength_hi = usb_hi(length*2); + for (i = 0; i < length; i++) { + prop->bPropertyName[i*2] = usb_lo(name[i]); + prop->bPropertyName[i*2+1] = usb_hi(name[i]); + } + return length*2; +} + +static int usb_desc_msos_prop_str(uint8_t *dest, msos_prop_type type, + const wchar_t *name, const wchar_t *value) +{ + struct msos_prop *prop = (void *)dest; + struct msos_prop_data *data; + int length = sizeof(*prop); + int i, vlen = wcslen(value) + 1; + + prop->dwPropertyDataType = cpu_to_le32(type); + length += usb_desc_msos_prop_name(prop, name); + data = (void *)(dest + length); + + data->dwPropertyDataLength = cpu_to_le32(vlen*2); + length += sizeof(*prop); + + for (i = 0; i < vlen; i++) { + data->bPropertyData[i*2] = usb_lo(value[i]); + data->bPropertyData[i*2+1] = usb_hi(value[i]); + } + length += vlen*2; + + prop->dwLength = cpu_to_le32(length); + return length; +} + +static int usb_desc_msos_prop_dword(uint8_t *dest, const wchar_t *name, + uint32_t value) +{ + struct msos_prop *prop = (void *)dest; + struct msos_prop_data *data; + int length = sizeof(*prop); + + prop->dwPropertyDataType = cpu_to_le32(MSOS_REG_DWORD_LE); + length += usb_desc_msos_prop_name(prop, name); + data = (void *)(dest + length); + + data->dwPropertyDataLength = cpu_to_le32(4); + data->bPropertyData[0] = (value) & 0xff; + data->bPropertyData[1] = (value >> 8) & 0xff; + data->bPropertyData[2] = (value >> 16) & 0xff; + data->bPropertyData[3] = (value >> 24) & 0xff; + length += sizeof(*prop) + 4; + + prop->dwLength = cpu_to_le32(length); + return length; +} + +static int usb_desc_msos_prop(const USBDesc *desc, uint8_t *dest) +{ + msos_prop_hdr *hdr = (void *)dest; + int length = sizeof(*hdr); + int count = 0; + + if (desc->msos->Label) { + /* + * Given as example in the specs. Haven't figured yet where + * this label shows up in the windows gui. + */ + length += usb_desc_msos_prop_str(dest+length, MSOS_REG_SZ, + L"Label", desc->msos->Label); + count++; + } + + if (desc->msos->SelectiveSuspendEnabled) { + /* + * Signaling remote wakeup capability in the standard usb + * descriptors isn't enough to make windows actually use it. + * This is the "Yes, we really mean it" registry entry to flip + * the switch in the windows drivers. + */ + length += usb_desc_msos_prop_dword(dest+length, + L"SelectiveSuspendEnabled", 1); + count++; + } + + hdr->dwLength = cpu_to_le32(length); + hdr->bcdVersion_lo = 0x00; + hdr->bcdVersion_hi = 0x01; + hdr->wIndex_lo = 0x05; + hdr->wIndex_hi = 0x00; + hdr->wCount_lo = usb_lo(count); + hdr->wCount_hi = usb_hi(count); + return length; +} + +/* ------------------------------------------------------------------ */ + +int usb_desc_msos(const USBDesc *desc, USBPacket *p, + int index, uint8_t *dest, size_t len) +{ + void *buf = g_malloc0(4096); + int length = 0; + + switch (index) { + case 0x0004: + length = usb_desc_msos_compat(desc, buf); + break; + case 0x0005: + length = usb_desc_msos_prop(desc, buf); + break; + } + + if (length > len) { + length = len; + } + memcpy(dest, buf, length); + g_free(buf); + + p->actual_length = length; + return 0; +} diff --git a/hw/usb/desc.c b/hw/usb/desc.c new file mode 100644 index 000000000..8b6eaea40 --- /dev/null +++ b/hw/usb/desc.c @@ -0,0 +1,812 @@ +#include "qemu/osdep.h" + +#include "hw/usb.h" +#include "desc.h" +#include "trace.h" + +/* ------------------------------------------------------------------ */ + +int usb_desc_device(const USBDescID *id, const USBDescDevice *dev, + bool msos, uint8_t *dest, size_t len) +{ + uint8_t bLength = 0x12; + USBDescriptor *d = (void *)dest; + + if (len < bLength) { + return -1; + } + + d->bLength = bLength; + d->bDescriptorType = USB_DT_DEVICE; + + if (msos && dev->bcdUSB < 0x0200) { + /* + * Version 2.0+ required for microsoft os descriptors to work. + * Done this way so msos-desc compat property will handle both + * the version and the new descriptors being present. + */ + d->u.device.bcdUSB_lo = usb_lo(0x0200); + d->u.device.bcdUSB_hi = usb_hi(0x0200); + } else { + d->u.device.bcdUSB_lo = usb_lo(dev->bcdUSB); + d->u.device.bcdUSB_hi = usb_hi(dev->bcdUSB); + } + d->u.device.bDeviceClass = dev->bDeviceClass; + d->u.device.bDeviceSubClass = dev->bDeviceSubClass; + d->u.device.bDeviceProtocol = dev->bDeviceProtocol; + d->u.device.bMaxPacketSize0 = dev->bMaxPacketSize0; + + d->u.device.idVendor_lo = usb_lo(id->idVendor); + d->u.device.idVendor_hi = usb_hi(id->idVendor); + d->u.device.idProduct_lo = usb_lo(id->idProduct); + d->u.device.idProduct_hi = usb_hi(id->idProduct); + d->u.device.bcdDevice_lo = usb_lo(id->bcdDevice); + d->u.device.bcdDevice_hi = usb_hi(id->bcdDevice); + d->u.device.iManufacturer = id->iManufacturer; + d->u.device.iProduct = id->iProduct; + d->u.device.iSerialNumber = id->iSerialNumber; + + d->u.device.bNumConfigurations = dev->bNumConfigurations; + + return bLength; +} + +int usb_desc_device_qualifier(const USBDescDevice *dev, + uint8_t *dest, size_t len) +{ + uint8_t bLength = 0x0a; + USBDescriptor *d = (void *)dest; + + if (len < bLength) { + return -1; + } + + d->bLength = bLength; + d->bDescriptorType = USB_DT_DEVICE_QUALIFIER; + + d->u.device_qualifier.bcdUSB_lo = usb_lo(dev->bcdUSB); + d->u.device_qualifier.bcdUSB_hi = usb_hi(dev->bcdUSB); + d->u.device_qualifier.bDeviceClass = dev->bDeviceClass; + d->u.device_qualifier.bDeviceSubClass = dev->bDeviceSubClass; + d->u.device_qualifier.bDeviceProtocol = dev->bDeviceProtocol; + d->u.device_qualifier.bMaxPacketSize0 = dev->bMaxPacketSize0; + d->u.device_qualifier.bNumConfigurations = dev->bNumConfigurations; + d->u.device_qualifier.bReserved = 0; + + return bLength; +} + +int usb_desc_config(const USBDescConfig *conf, int flags, + uint8_t *dest, size_t len) +{ + uint8_t bLength = 0x09; + uint16_t wTotalLength = 0; + USBDescriptor *d = (void *)dest; + int i, rc; + + if (len < bLength) { + return -1; + } + + d->bLength = bLength; + d->bDescriptorType = USB_DT_CONFIG; + + d->u.config.bNumInterfaces = conf->bNumInterfaces; + d->u.config.bConfigurationValue = conf->bConfigurationValue; + d->u.config.iConfiguration = conf->iConfiguration; + d->u.config.bmAttributes = conf->bmAttributes; + d->u.config.bMaxPower = conf->bMaxPower; + wTotalLength += bLength; + + /* handle grouped interfaces if any */ + for (i = 0; i < conf->nif_groups; i++) { + rc = usb_desc_iface_group(&(conf->if_groups[i]), flags, + dest + wTotalLength, + len - wTotalLength); + if (rc < 0) { + return rc; + } + wTotalLength += rc; + } + + /* handle normal (ungrouped / no IAD) interfaces if any */ + for (i = 0; i < conf->nif; i++) { + rc = usb_desc_iface(conf->ifs + i, flags, + dest + wTotalLength, len - wTotalLength); + if (rc < 0) { + return rc; + } + wTotalLength += rc; + } + + d->u.config.wTotalLength_lo = usb_lo(wTotalLength); + d->u.config.wTotalLength_hi = usb_hi(wTotalLength); + return wTotalLength; +} + +int usb_desc_iface_group(const USBDescIfaceAssoc *iad, int flags, + uint8_t *dest, size_t len) +{ + int pos = 0; + int i = 0; + + /* handle interface association descriptor */ + uint8_t bLength = 0x08; + + if (len < bLength) { + return -1; + } + + dest[0x00] = bLength; + dest[0x01] = USB_DT_INTERFACE_ASSOC; + dest[0x02] = iad->bFirstInterface; + dest[0x03] = iad->bInterfaceCount; + dest[0x04] = iad->bFunctionClass; + dest[0x05] = iad->bFunctionSubClass; + dest[0x06] = iad->bFunctionProtocol; + dest[0x07] = iad->iFunction; + pos += bLength; + + /* handle associated interfaces in this group */ + for (i = 0; i < iad->nif; i++) { + int rc = usb_desc_iface(&(iad->ifs[i]), flags, dest + pos, len - pos); + if (rc < 0) { + return rc; + } + pos += rc; + } + + return pos; +} + +int usb_desc_iface(const USBDescIface *iface, int flags, + uint8_t *dest, size_t len) +{ + uint8_t bLength = 0x09; + int i, rc, pos = 0; + USBDescriptor *d = (void *)dest; + + if (len < bLength) { + return -1; + } + + d->bLength = bLength; + d->bDescriptorType = USB_DT_INTERFACE; + + d->u.interface.bInterfaceNumber = iface->bInterfaceNumber; + d->u.interface.bAlternateSetting = iface->bAlternateSetting; + d->u.interface.bNumEndpoints = iface->bNumEndpoints; + d->u.interface.bInterfaceClass = iface->bInterfaceClass; + d->u.interface.bInterfaceSubClass = iface->bInterfaceSubClass; + d->u.interface.bInterfaceProtocol = iface->bInterfaceProtocol; + d->u.interface.iInterface = iface->iInterface; + pos += bLength; + + for (i = 0; i < iface->ndesc; i++) { + rc = usb_desc_other(iface->descs + i, dest + pos, len - pos); + if (rc < 0) { + return rc; + } + pos += rc; + } + + for (i = 0; i < iface->bNumEndpoints; i++) { + rc = usb_desc_endpoint(iface->eps + i, flags, dest + pos, len - pos); + if (rc < 0) { + return rc; + } + pos += rc; + } + + return pos; +} + +int usb_desc_endpoint(const USBDescEndpoint *ep, int flags, + uint8_t *dest, size_t len) +{ + uint8_t bLength = ep->is_audio ? 0x09 : 0x07; + uint8_t extralen = ep->extra ? ep->extra[0] : 0; + uint8_t superlen = (flags & USB_DESC_FLAG_SUPER) ? 0x06 : 0; + USBDescriptor *d = (void *)dest; + + if (len < bLength + extralen + superlen) { + return -1; + } + + d->bLength = bLength; + d->bDescriptorType = USB_DT_ENDPOINT; + + d->u.endpoint.bEndpointAddress = ep->bEndpointAddress; + d->u.endpoint.bmAttributes = ep->bmAttributes; + d->u.endpoint.wMaxPacketSize_lo = usb_lo(ep->wMaxPacketSize); + d->u.endpoint.wMaxPacketSize_hi = usb_hi(ep->wMaxPacketSize); + d->u.endpoint.bInterval = ep->bInterval; + if (ep->is_audio) { + d->u.endpoint.bRefresh = ep->bRefresh; + d->u.endpoint.bSynchAddress = ep->bSynchAddress; + } + + if (superlen) { + USBDescriptor *d = (void *)(dest + bLength); + + d->bLength = 0x06; + d->bDescriptorType = USB_DT_ENDPOINT_COMPANION; + + d->u.super_endpoint.bMaxBurst = ep->bMaxBurst; + d->u.super_endpoint.bmAttributes = ep->bmAttributes_super; + d->u.super_endpoint.wBytesPerInterval_lo = + usb_lo(ep->wBytesPerInterval); + d->u.super_endpoint.wBytesPerInterval_hi = + usb_hi(ep->wBytesPerInterval); + } + + if (ep->extra) { + memcpy(dest + bLength + superlen, ep->extra, extralen); + } + + return bLength + extralen + superlen; +} + +int usb_desc_other(const USBDescOther *desc, uint8_t *dest, size_t len) +{ + int bLength = desc->length ? desc->length : desc->data[0]; + + if (len < bLength) { + return -1; + } + + memcpy(dest, desc->data, bLength); + return bLength; +} + +static int usb_desc_cap_usb2_ext(const USBDesc *desc, uint8_t *dest, size_t len) +{ + uint8_t bLength = 0x07; + USBDescriptor *d = (void *)dest; + + if (len < bLength) { + return -1; + } + + d->bLength = bLength; + d->bDescriptorType = USB_DT_DEVICE_CAPABILITY; + d->u.cap.bDevCapabilityType = USB_DEV_CAP_USB2_EXT; + + d->u.cap.u.usb2_ext.bmAttributes_1 = (1 << 1); /* LPM */ + d->u.cap.u.usb2_ext.bmAttributes_2 = 0; + d->u.cap.u.usb2_ext.bmAttributes_3 = 0; + d->u.cap.u.usb2_ext.bmAttributes_4 = 0; + + return bLength; +} + +static int usb_desc_cap_super(const USBDesc *desc, uint8_t *dest, size_t len) +{ + uint8_t bLength = 0x0a; + USBDescriptor *d = (void *)dest; + + if (len < bLength) { + return -1; + } + + d->bLength = bLength; + d->bDescriptorType = USB_DT_DEVICE_CAPABILITY; + d->u.cap.bDevCapabilityType = USB_DEV_CAP_SUPERSPEED; + + d->u.cap.u.super.bmAttributes = 0; + d->u.cap.u.super.wSpeedsSupported_lo = 0; + d->u.cap.u.super.wSpeedsSupported_hi = 0; + d->u.cap.u.super.bFunctionalitySupport = 0; + d->u.cap.u.super.bU1DevExitLat = 0x0a; + d->u.cap.u.super.wU2DevExitLat_lo = 0x20; + d->u.cap.u.super.wU2DevExitLat_hi = 0; + + if (desc->full) { + d->u.cap.u.super.wSpeedsSupported_lo |= (1 << 1); + d->u.cap.u.super.bFunctionalitySupport = 1; + } + if (desc->high) { + d->u.cap.u.super.wSpeedsSupported_lo |= (1 << 2); + if (!d->u.cap.u.super.bFunctionalitySupport) { + d->u.cap.u.super.bFunctionalitySupport = 2; + } + } + if (desc->super) { + d->u.cap.u.super.wSpeedsSupported_lo |= (1 << 3); + if (!d->u.cap.u.super.bFunctionalitySupport) { + d->u.cap.u.super.bFunctionalitySupport = 3; + } + } + + return bLength; +} + +static int usb_desc_bos(const USBDesc *desc, uint8_t *dest, size_t len) +{ + uint8_t bLength = 0x05; + uint16_t wTotalLength = 0; + uint8_t bNumDeviceCaps = 0; + USBDescriptor *d = (void *)dest; + int rc; + + if (len < bLength) { + return -1; + } + + d->bLength = bLength; + d->bDescriptorType = USB_DT_BOS; + + wTotalLength += bLength; + + if (desc->high != NULL) { + rc = usb_desc_cap_usb2_ext(desc, dest + wTotalLength, + len - wTotalLength); + if (rc < 0) { + return rc; + } + wTotalLength += rc; + bNumDeviceCaps++; + } + + if (desc->super != NULL) { + rc = usb_desc_cap_super(desc, dest + wTotalLength, + len - wTotalLength); + if (rc < 0) { + return rc; + } + wTotalLength += rc; + bNumDeviceCaps++; + } + + d->u.bos.wTotalLength_lo = usb_lo(wTotalLength); + d->u.bos.wTotalLength_hi = usb_hi(wTotalLength); + d->u.bos.bNumDeviceCaps = bNumDeviceCaps; + return wTotalLength; +} + +/* ------------------------------------------------------------------ */ + +static void usb_desc_ep_init(USBDevice *dev) +{ + const USBDescIface *iface; + int i, e, pid, ep; + + usb_ep_init(dev); + for (i = 0; i < dev->ninterfaces; i++) { + iface = dev->ifaces[i]; + if (iface == NULL) { + continue; + } + for (e = 0; e < iface->bNumEndpoints; e++) { + pid = (iface->eps[e].bEndpointAddress & USB_DIR_IN) ? + USB_TOKEN_IN : USB_TOKEN_OUT; + ep = iface->eps[e].bEndpointAddress & 0x0f; + usb_ep_set_type(dev, pid, ep, iface->eps[e].bmAttributes & 0x03); + usb_ep_set_ifnum(dev, pid, ep, iface->bInterfaceNumber); + usb_ep_set_max_packet_size(dev, pid, ep, + iface->eps[e].wMaxPacketSize); + usb_ep_set_max_streams(dev, pid, ep, + iface->eps[e].bmAttributes_super); + } + } +} + +static const USBDescIface *usb_desc_find_interface(USBDevice *dev, + int nif, int alt) +{ + const USBDescIface *iface; + int g, i; + + if (!dev->config) { + return NULL; + } + for (g = 0; g < dev->config->nif_groups; g++) { + for (i = 0; i < dev->config->if_groups[g].nif; i++) { + iface = &dev->config->if_groups[g].ifs[i]; + if (iface->bInterfaceNumber == nif && + iface->bAlternateSetting == alt) { + return iface; + } + } + } + for (i = 0; i < dev->config->nif; i++) { + iface = &dev->config->ifs[i]; + if (iface->bInterfaceNumber == nif && + iface->bAlternateSetting == alt) { + return iface; + } + } + return NULL; +} + +static int usb_desc_set_interface(USBDevice *dev, int index, int value) +{ + const USBDescIface *iface; + int old; + + iface = usb_desc_find_interface(dev, index, value); + if (iface == NULL) { + return -1; + } + + old = dev->altsetting[index]; + dev->altsetting[index] = value; + dev->ifaces[index] = iface; + usb_desc_ep_init(dev); + + if (old != value) { + usb_device_set_interface(dev, index, old, value); + } + return 0; +} + +static int usb_desc_set_config(USBDevice *dev, int value) +{ + int i; + + if (value == 0) { + dev->configuration = 0; + dev->ninterfaces = 0; + dev->config = NULL; + } else { + for (i = 0; i < dev->device->bNumConfigurations; i++) { + if (dev->device->confs[i].bConfigurationValue == value) { + dev->configuration = value; + dev->ninterfaces = dev->device->confs[i].bNumInterfaces; + dev->config = dev->device->confs + i; + assert(dev->ninterfaces <= USB_MAX_INTERFACES); + } + } + if (i < dev->device->bNumConfigurations) { + return -1; + } + } + + for (i = 0; i < dev->ninterfaces; i++) { + usb_desc_set_interface(dev, i, 0); + } + for (; i < USB_MAX_INTERFACES; i++) { + dev->altsetting[i] = 0; + dev->ifaces[i] = NULL; + } + + return 0; +} + +static void usb_desc_setdefaults(USBDevice *dev) +{ + const USBDesc *desc = usb_device_get_usb_desc(dev); + + assert(desc != NULL); + switch (dev->speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + dev->device = desc->full; + break; + case USB_SPEED_HIGH: + dev->device = desc->high; + break; + case USB_SPEED_SUPER: + dev->device = desc->super; + break; + } + usb_desc_set_config(dev, 0); +} + +void usb_desc_init(USBDevice *dev) +{ + const USBDesc *desc = usb_device_get_usb_desc(dev); + + assert(desc != NULL); + dev->speed = USB_SPEED_FULL; + dev->speedmask = 0; + if (desc->full) { + dev->speedmask |= USB_SPEED_MASK_FULL; + } + if (desc->high) { + dev->speedmask |= USB_SPEED_MASK_HIGH; + } + if (desc->super) { + dev->speedmask |= USB_SPEED_MASK_SUPER; + } + if (desc->msos && (dev->flags & (1 << USB_DEV_FLAG_MSOS_DESC_ENABLE))) { + dev->flags |= (1 << USB_DEV_FLAG_MSOS_DESC_IN_USE); + usb_desc_set_string(dev, 0xee, "MSFT100Q"); + } + usb_desc_setdefaults(dev); +} + +void usb_desc_attach(USBDevice *dev) +{ + usb_desc_setdefaults(dev); +} + +void usb_desc_set_string(USBDevice *dev, uint8_t index, const char *str) +{ + USBDescString *s; + + QLIST_FOREACH(s, &dev->strings, next) { + if (s->index == index) { + break; + } + } + if (s == NULL) { + s = g_malloc0(sizeof(*s)); + s->index = index; + QLIST_INSERT_HEAD(&dev->strings, s, next); + } + g_free(s->str); + s->str = g_strdup(str); +} + +/* + * This function creates a serial number for a usb device. + * The serial number should: + * (a) Be unique within the virtual machine. + * (b) Be constant, so you don't get a new one each + * time the guest is started. + * So we are using the physical location to generate a serial number + * from it. It has three pieces: First a fixed, device-specific + * prefix. Second the device path of the host controller (which is + * the pci address in most cases). Third the physical port path. + * Results in serial numbers like this: "314159-0000:00:1d.7-3". + */ +void usb_desc_create_serial(USBDevice *dev) +{ + DeviceState *hcd = dev->qdev.parent_bus->parent; + const USBDesc *desc = usb_device_get_usb_desc(dev); + int index = desc->id.iSerialNumber; + char *path, *serial; + + if (dev->serial) { + /* 'serial' usb bus property has priority if present */ + usb_desc_set_string(dev, index, dev->serial); + return; + } + + assert(index != 0 && desc->str[index] != NULL); + path = qdev_get_dev_path(hcd); + if (path) { + serial = g_strdup_printf("%s-%s-%s", desc->str[index], + path, dev->port->path); + } else { + serial = g_strdup_printf("%s-%s", desc->str[index], dev->port->path); + } + usb_desc_set_string(dev, index, serial); + g_free(path); + g_free(serial); +} + +const char *usb_desc_get_string(USBDevice *dev, uint8_t index) +{ + USBDescString *s; + + QLIST_FOREACH(s, &dev->strings, next) { + if (s->index == index) { + return s->str; + } + } + return NULL; +} + +int usb_desc_string(USBDevice *dev, int index, uint8_t *dest, size_t len) +{ + uint8_t bLength, pos, i; + const char *str; + + if (len < 4) { + return -1; + } + + if (index == 0) { + /* language ids */ + dest[0] = 4; + dest[1] = USB_DT_STRING; + dest[2] = 0x09; + dest[3] = 0x04; + return 4; + } + + str = usb_desc_get_string(dev, index); + if (str == NULL) { + str = usb_device_get_usb_desc(dev)->str[index]; + if (str == NULL) { + return 0; + } + } + + bLength = strlen(str) * 2 + 2; + dest[0] = bLength; + dest[1] = USB_DT_STRING; + i = 0; pos = 2; + while (pos+1 < bLength && pos+1 < len) { + dest[pos++] = str[i++]; + dest[pos++] = 0; + } + return pos; +} + +int usb_desc_get_descriptor(USBDevice *dev, USBPacket *p, + int value, uint8_t *dest, size_t len) +{ + bool msos = (dev->flags & (1 << USB_DEV_FLAG_MSOS_DESC_IN_USE)); + const USBDesc *desc = usb_device_get_usb_desc(dev); + const USBDescDevice *other_dev; + uint8_t buf[256]; + uint8_t type = value >> 8; + uint8_t index = value & 0xff; + int flags, ret = -1; + + if (dev->speed == USB_SPEED_HIGH) { + other_dev = usb_device_get_usb_desc(dev)->full; + } else { + other_dev = usb_device_get_usb_desc(dev)->high; + } + + flags = 0; + if (dev->device->bcdUSB >= 0x0300) { + flags |= USB_DESC_FLAG_SUPER; + } + + switch(type) { + case USB_DT_DEVICE: + ret = usb_desc_device(&desc->id, dev->device, msos, buf, sizeof(buf)); + trace_usb_desc_device(dev->addr, len, ret); + break; + case USB_DT_CONFIG: + if (index < dev->device->bNumConfigurations) { + ret = usb_desc_config(dev->device->confs + index, flags, + buf, sizeof(buf)); + } + trace_usb_desc_config(dev->addr, index, len, ret); + break; + case USB_DT_STRING: + ret = usb_desc_string(dev, index, buf, sizeof(buf)); + trace_usb_desc_string(dev->addr, index, len, ret); + break; + case USB_DT_DEVICE_QUALIFIER: + if (other_dev != NULL) { + ret = usb_desc_device_qualifier(other_dev, buf, sizeof(buf)); + } + trace_usb_desc_device_qualifier(dev->addr, len, ret); + break; + case USB_DT_OTHER_SPEED_CONFIG: + if (other_dev != NULL && index < other_dev->bNumConfigurations) { + ret = usb_desc_config(other_dev->confs + index, flags, + buf, sizeof(buf)); + buf[0x01] = USB_DT_OTHER_SPEED_CONFIG; + } + trace_usb_desc_other_speed_config(dev->addr, index, len, ret); + break; + case USB_DT_BOS: + ret = usb_desc_bos(desc, buf, sizeof(buf)); + trace_usb_desc_bos(dev->addr, len, ret); + break; + + case USB_DT_DEBUG: + /* ignore silently */ + break; + + default: + fprintf(stderr, "%s: %d unknown type %d (len %zd)\n", __func__, + dev->addr, type, len); + break; + } + + if (ret > 0) { + if (ret > len) { + ret = len; + } + memcpy(dest, buf, ret); + p->actual_length = ret; + ret = 0; + } + return ret; +} + +int usb_desc_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) +{ + bool msos = (dev->flags & (1 << USB_DEV_FLAG_MSOS_DESC_IN_USE)); + const USBDesc *desc = usb_device_get_usb_desc(dev); + int ret = -1; + + assert(desc != NULL); + switch(request) { + case DeviceOutRequest | USB_REQ_SET_ADDRESS: + dev->addr = value; + trace_usb_set_addr(dev->addr); + ret = 0; + break; + + case DeviceRequest | USB_REQ_GET_DESCRIPTOR: + ret = usb_desc_get_descriptor(dev, p, value, data, length); + break; + + case DeviceRequest | USB_REQ_GET_CONFIGURATION: + /* + * 9.4.2: 0 should be returned if the device is unconfigured, otherwise + * the non zero value of bConfigurationValue. + */ + data[0] = dev->config ? dev->config->bConfigurationValue : 0; + p->actual_length = 1; + ret = 0; + break; + case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: + ret = usb_desc_set_config(dev, value); + trace_usb_set_config(dev->addr, value, ret); + break; + + case DeviceRequest | USB_REQ_GET_STATUS: { + const USBDescConfig *config = dev->config ? + dev->config : &dev->device->confs[0]; + + data[0] = 0; + /* + * Default state: Device behavior when this request is received while + * the device is in the Default state is not specified. + * We return the same value that a configured device would return if + * it used the first configuration. + */ + if (config->bmAttributes & USB_CFG_ATT_SELFPOWER) { + data[0] |= 1 << USB_DEVICE_SELF_POWERED; + } + if (dev->remote_wakeup) { + data[0] |= 1 << USB_DEVICE_REMOTE_WAKEUP; + } + data[1] = 0x00; + p->actual_length = 2; + ret = 0; + break; + } + case DeviceOutRequest | USB_REQ_CLEAR_FEATURE: + if (value == USB_DEVICE_REMOTE_WAKEUP) { + dev->remote_wakeup = 0; + ret = 0; + } + trace_usb_clear_device_feature(dev->addr, value, ret); + break; + case DeviceOutRequest | USB_REQ_SET_FEATURE: + if (value == USB_DEVICE_REMOTE_WAKEUP) { + dev->remote_wakeup = 1; + ret = 0; + } + trace_usb_set_device_feature(dev->addr, value, ret); + break; + + case DeviceOutRequest | USB_REQ_SET_SEL: + case DeviceOutRequest | USB_REQ_SET_ISOCH_DELAY: + if (dev->speed == USB_SPEED_SUPER) { + ret = 0; + } + break; + + case InterfaceRequest | USB_REQ_GET_INTERFACE: + if (index < 0 || index >= dev->ninterfaces) { + break; + } + data[0] = dev->altsetting[index]; + p->actual_length = 1; + ret = 0; + break; + case InterfaceOutRequest | USB_REQ_SET_INTERFACE: + ret = usb_desc_set_interface(dev, index, value); + trace_usb_set_interface(dev->addr, index, value, ret); + break; + + case VendorDeviceRequest | 'Q': + if (msos) { + ret = usb_desc_msos(desc, p, index, data, length); + trace_usb_desc_msos(dev->addr, index, length, ret); + } + break; + case VendorInterfaceRequest | 'Q': + if (msos) { + ret = usb_desc_msos(desc, p, index, data, length); + trace_usb_desc_msos(dev->addr, index, length, ret); + } + break; + + } + return ret; +} diff --git a/hw/usb/desc.h b/hw/usb/desc.h new file mode 100644 index 000000000..3ac604ecf --- /dev/null +++ b/hw/usb/desc.h @@ -0,0 +1,244 @@ +#ifndef QEMU_HW_USB_DESC_H +#define QEMU_HW_USB_DESC_H + +#include + +/* binary representation */ +typedef struct USBDescriptor { + uint8_t bLength; + uint8_t bDescriptorType; + union { + struct { + uint8_t bcdUSB_lo; + uint8_t bcdUSB_hi; + uint8_t bDeviceClass; + uint8_t bDeviceSubClass; + uint8_t bDeviceProtocol; + uint8_t bMaxPacketSize0; + uint8_t idVendor_lo; + uint8_t idVendor_hi; + uint8_t idProduct_lo; + uint8_t idProduct_hi; + uint8_t bcdDevice_lo; + uint8_t bcdDevice_hi; + uint8_t iManufacturer; + uint8_t iProduct; + uint8_t iSerialNumber; + uint8_t bNumConfigurations; + } device; + struct { + uint8_t bcdUSB_lo; + uint8_t bcdUSB_hi; + uint8_t bDeviceClass; + uint8_t bDeviceSubClass; + uint8_t bDeviceProtocol; + uint8_t bMaxPacketSize0; + uint8_t bNumConfigurations; + uint8_t bReserved; + } device_qualifier; + struct { + uint8_t wTotalLength_lo; + uint8_t wTotalLength_hi; + uint8_t bNumInterfaces; + uint8_t bConfigurationValue; + uint8_t iConfiguration; + uint8_t bmAttributes; + uint8_t bMaxPower; + } config; + struct { + uint8_t bInterfaceNumber; + uint8_t bAlternateSetting; + uint8_t bNumEndpoints; + uint8_t bInterfaceClass; + uint8_t bInterfaceSubClass; + uint8_t bInterfaceProtocol; + uint8_t iInterface; + } interface; + struct { + uint8_t bEndpointAddress; + uint8_t bmAttributes; + uint8_t wMaxPacketSize_lo; + uint8_t wMaxPacketSize_hi; + uint8_t bInterval; + uint8_t bRefresh; /* only audio ep */ + uint8_t bSynchAddress; /* only audio ep */ + } endpoint; + struct { + uint8_t bMaxBurst; + uint8_t bmAttributes; + uint8_t wBytesPerInterval_lo; + uint8_t wBytesPerInterval_hi; + } super_endpoint; + struct { + uint8_t wTotalLength_lo; + uint8_t wTotalLength_hi; + uint8_t bNumDeviceCaps; + } bos; + struct { + uint8_t bDevCapabilityType; + union { + struct { + uint8_t bmAttributes_1; + uint8_t bmAttributes_2; + uint8_t bmAttributes_3; + uint8_t bmAttributes_4; + } usb2_ext; + struct { + uint8_t bmAttributes; + uint8_t wSpeedsSupported_lo; + uint8_t wSpeedsSupported_hi; + uint8_t bFunctionalitySupport; + uint8_t bU1DevExitLat; + uint8_t wU2DevExitLat_lo; + uint8_t wU2DevExitLat_hi; + } super; + } u; + } cap; + } u; +} QEMU_PACKED USBDescriptor; + +struct USBDescID { + uint16_t idVendor; + uint16_t idProduct; + uint16_t bcdDevice; + uint8_t iManufacturer; + uint8_t iProduct; + uint8_t iSerialNumber; +}; + +struct USBDescDevice { + uint16_t bcdUSB; + uint8_t bDeviceClass; + uint8_t bDeviceSubClass; + uint8_t bDeviceProtocol; + uint8_t bMaxPacketSize0; + uint8_t bNumConfigurations; + + const USBDescConfig *confs; +}; + +struct USBDescConfig { + uint8_t bNumInterfaces; + uint8_t bConfigurationValue; + uint8_t iConfiguration; + uint8_t bmAttributes; + uint8_t bMaxPower; + + /* grouped interfaces */ + uint8_t nif_groups; + const USBDescIfaceAssoc *if_groups; + + /* "normal" interfaces */ + uint8_t nif; + const USBDescIface *ifs; +}; + +/* conceptually an Interface Association Descriptor, and related interfaces */ +struct USBDescIfaceAssoc { + uint8_t bFirstInterface; + uint8_t bInterfaceCount; + uint8_t bFunctionClass; + uint8_t bFunctionSubClass; + uint8_t bFunctionProtocol; + uint8_t iFunction; + + uint8_t nif; + const USBDescIface *ifs; +}; + +struct USBDescIface { + uint8_t bInterfaceNumber; + uint8_t bAlternateSetting; + uint8_t bNumEndpoints; + uint8_t bInterfaceClass; + uint8_t bInterfaceSubClass; + uint8_t bInterfaceProtocol; + uint8_t iInterface; + + uint8_t ndesc; + USBDescOther *descs; + USBDescEndpoint *eps; +}; + +struct USBDescEndpoint { + uint8_t bEndpointAddress; + uint8_t bmAttributes; + uint16_t wMaxPacketSize; + uint8_t bInterval; + uint8_t bRefresh; + uint8_t bSynchAddress; + + uint8_t is_audio; /* has bRefresh + bSynchAddress */ + uint8_t *extra; + + /* superspeed endpoint companion */ + uint8_t bMaxBurst; + uint8_t bmAttributes_super; + uint16_t wBytesPerInterval; +}; + +struct USBDescOther { + uint8_t length; + const uint8_t *data; +}; + +struct USBDescMSOS { + const char *CompatibleID; + const wchar_t *Label; + bool SelectiveSuspendEnabled; +}; + +typedef const char *USBDescStrings[256]; + +struct USBDesc { + USBDescID id; + const USBDescDevice *full; + const USBDescDevice *high; + const USBDescDevice *super; + const char* const *str; + const USBDescMSOS *msos; +}; + +#define USB_DESC_FLAG_SUPER (1 << 1) + +/* little helpers */ +static inline uint8_t usb_lo(uint16_t val) +{ + return val & 0xff; +} + +static inline uint8_t usb_hi(uint16_t val) +{ + return (val >> 8) & 0xff; +} + +/* generate usb packages from structs */ +int usb_desc_device(const USBDescID *id, const USBDescDevice *dev, + bool msos, uint8_t *dest, size_t len); +int usb_desc_device_qualifier(const USBDescDevice *dev, + uint8_t *dest, size_t len); +int usb_desc_config(const USBDescConfig *conf, int flags, + uint8_t *dest, size_t len); +int usb_desc_iface_group(const USBDescIfaceAssoc *iad, int flags, + uint8_t *dest, size_t len); +int usb_desc_iface(const USBDescIface *iface, int flags, + uint8_t *dest, size_t len); +int usb_desc_endpoint(const USBDescEndpoint *ep, int flags, + uint8_t *dest, size_t len); +int usb_desc_other(const USBDescOther *desc, uint8_t *dest, size_t len); +int usb_desc_msos(const USBDesc *desc, USBPacket *p, + int index, uint8_t *dest, size_t len); + +/* control message emulation helpers */ +void usb_desc_init(USBDevice *dev); +void usb_desc_attach(USBDevice *dev); +void usb_desc_set_string(USBDevice *dev, uint8_t index, const char *str); +void usb_desc_create_serial(USBDevice *dev); +const char *usb_desc_get_string(USBDevice *dev, uint8_t index); +int usb_desc_string(USBDevice *dev, int index, uint8_t *dest, size_t len); +int usb_desc_get_descriptor(USBDevice *dev, USBPacket *p, + int value, uint8_t *dest, size_t len); +int usb_desc_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data); + +#endif /* QEMU_HW_USB_DESC_H */ diff --git a/hw/usb/dev-audio.c b/hw/usb/dev-audio.c new file mode 100644 index 000000000..8748c1ba0 --- /dev/null +++ b/hw/usb/dev-audio.c @@ -0,0 +1,1029 @@ +/* + * QEMU USB audio device + * + * written by: + * H. Peter Anvin + * Gerd Hoffmann + * + * lousely based on usb net device code which is: + * + * Copyright (c) 2006 Thomas Sailer + * Copyright (c) 2008 Andrzej Zaborowski + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "hw/qdev-properties.h" +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "desc.h" +#include "audio/audio.h" +#include "qom/object.h" + +static void usb_audio_reinit(USBDevice *dev, unsigned channels); + +#define USBAUDIO_VENDOR_NUM 0x46f4 /* CRC16() of "QEMU" */ +#define USBAUDIO_PRODUCT_NUM 0x0002 + +#define DEV_CONFIG_VALUE 1 /* The one and only */ + +#define USBAUDIO_MAX_CHANNELS(s) (s->multi ? 8 : 2) + +/* Descriptor subtypes for AC interfaces */ +#define DST_AC_HEADER 1 +#define DST_AC_INPUT_TERMINAL 2 +#define DST_AC_OUTPUT_TERMINAL 3 +#define DST_AC_FEATURE_UNIT 6 +/* Descriptor subtypes for AS interfaces */ +#define DST_AS_GENERAL 1 +#define DST_AS_FORMAT_TYPE 2 +/* Descriptor subtypes for endpoints */ +#define DST_EP_GENERAL 1 + +enum usb_audio_strings { + STRING_NULL, + STRING_MANUFACTURER, + STRING_PRODUCT, + STRING_SERIALNUMBER, + STRING_CONFIG, + STRING_USBAUDIO_CONTROL, + STRING_INPUT_TERMINAL, + STRING_FEATURE_UNIT, + STRING_OUTPUT_TERMINAL, + STRING_NULL_STREAM, + STRING_REAL_STREAM, +}; + +static const USBDescStrings usb_audio_stringtable = { + [STRING_MANUFACTURER] = "QEMU", + [STRING_PRODUCT] = "QEMU USB Audio", + [STRING_SERIALNUMBER] = "1", + [STRING_CONFIG] = "Audio Configuration", + [STRING_USBAUDIO_CONTROL] = "Audio Device", + [STRING_INPUT_TERMINAL] = "Audio Output Pipe", + [STRING_FEATURE_UNIT] = "Audio Output Volume Control", + [STRING_OUTPUT_TERMINAL] = "Audio Output Terminal", + [STRING_NULL_STREAM] = "Audio Output - Disabled", + [STRING_REAL_STREAM] = "Audio Output - 48 kHz Stereo", +}; + +/* + * A USB audio device supports an arbitrary number of alternate + * interface settings for each interface. Each corresponds to a block + * diagram of parameterized blocks. This can thus refer to things like + * number of channels, data rates, or in fact completely different + * block diagrams. Alternative setting 0 is always the null block diagram, + * which is used by a disabled device. + */ +enum usb_audio_altset { + ALTSET_OFF = 0x00, /* No endpoint */ + ALTSET_STEREO = 0x01, /* Single endpoint */ + ALTSET_51 = 0x02, + ALTSET_71 = 0x03, +}; + +static unsigned altset_channels[] = { + [ALTSET_STEREO] = 2, + [ALTSET_51] = 6, + [ALTSET_71] = 8, +}; + +#define U16(x) ((x) & 0xff), (((x) >> 8) & 0xff) +#define U24(x) U16(x), (((x) >> 16) & 0xff) +#define U32(x) U24(x), (((x) >> 24) & 0xff) + +/* + * A Basic Audio Device uses these specific values + */ +#define USBAUDIO_PACKET_SIZE_BASE 96 +#define USBAUDIO_PACKET_SIZE(channels) (USBAUDIO_PACKET_SIZE_BASE * channels) +#define USBAUDIO_SAMPLE_RATE 48000 +#define USBAUDIO_PACKET_INTERVAL 1 + +static const USBDescIface desc_iface[] = { + { + .bInterfaceNumber = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL, + .bInterfaceProtocol = 0x04, + .iInterface = STRING_USBAUDIO_CONTROL, + .ndesc = 4, + .descs = (USBDescOther[]) { + { + /* Headphone Class-Specific AC Interface Header Descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AC_HEADER, /* u8 bDescriptorSubtype */ + U16(0x0100), /* u16 bcdADC */ + U16(0x2b), /* u16 wTotalLength */ + 0x01, /* u8 bInCollection */ + 0x01, /* u8 baInterfaceNr */ + } + },{ + /* Generic Stereo Input Terminal ID1 Descriptor */ + .data = (uint8_t[]) { + 0x0c, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AC_INPUT_TERMINAL, /* u8 bDescriptorSubtype */ + 0x01, /* u8 bTerminalID */ + U16(0x0101), /* u16 wTerminalType */ + 0x00, /* u8 bAssocTerminal */ + 0x02, /* u8 bNrChannels */ + U16(0x0003), /* u16 wChannelConfig */ + 0x00, /* u8 iChannelNames */ + STRING_INPUT_TERMINAL, /* u8 iTerminal */ + } + },{ + /* Generic Stereo Feature Unit ID2 Descriptor */ + .data = (uint8_t[]) { + 0x0d, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AC_FEATURE_UNIT, /* u8 bDescriptorSubtype */ + 0x02, /* u8 bUnitID */ + 0x01, /* u8 bSourceID */ + 0x02, /* u8 bControlSize */ + U16(0x0001), /* u16 bmaControls(0) */ + U16(0x0002), /* u16 bmaControls(1) */ + U16(0x0002), /* u16 bmaControls(2) */ + STRING_FEATURE_UNIT, /* u8 iFeature */ + } + },{ + /* Headphone Output Terminal ID3 Descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AC_OUTPUT_TERMINAL, /* u8 bDescriptorSubtype */ + 0x03, /* u8 bUnitID */ + U16(0x0301), /* u16 wTerminalType (SPK) */ + 0x00, /* u8 bAssocTerminal */ + 0x02, /* u8 bSourceID */ + STRING_OUTPUT_TERMINAL, /* u8 iTerminal */ + } + } + }, + },{ + .bInterfaceNumber = 1, + .bAlternateSetting = ALTSET_OFF, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIO_STREAMING, + .iInterface = STRING_NULL_STREAM, + },{ + .bInterfaceNumber = 1, + .bAlternateSetting = ALTSET_STEREO, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIO_STREAMING, + .iInterface = STRING_REAL_STREAM, + .ndesc = 2, + .descs = (USBDescOther[]) { + { + /* Headphone Class-specific AS General Interface Descriptor */ + .data = (uint8_t[]) { + 0x07, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AS_GENERAL, /* u8 bDescriptorSubtype */ + 0x01, /* u8 bTerminalLink */ + 0x00, /* u8 bDelay */ + 0x01, 0x00, /* u16 wFormatTag */ + } + },{ + /* Headphone Type I Format Type Descriptor */ + .data = (uint8_t[]) { + 0x0b, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AS_FORMAT_TYPE, /* u8 bDescriptorSubtype */ + 0x01, /* u8 bFormatType */ + 0x02, /* u8 bNrChannels */ + 0x02, /* u8 bSubFrameSize */ + 0x10, /* u8 bBitResolution */ + 0x01, /* u8 bSamFreqType */ + U24(USBAUDIO_SAMPLE_RATE), /* u24 tSamFreq */ + } + } + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_OUT | 0x01, + .bmAttributes = 0x0d, + .wMaxPacketSize = USBAUDIO_PACKET_SIZE(2), + .bInterval = 1, + .is_audio = 1, + /* Stereo Headphone Class-specific + AS Audio Data Endpoint Descriptor */ + .extra = (uint8_t[]) { + 0x07, /* u8 bLength */ + USB_DT_CS_ENDPOINT, /* u8 bDescriptorType */ + DST_EP_GENERAL, /* u8 bDescriptorSubtype */ + 0x00, /* u8 bmAttributes */ + 0x00, /* u8 bLockDelayUnits */ + U16(0x0000), /* u16 wLockDelay */ + }, + }, + } + } +}; + +static const USBDescDevice desc_device = { + .bcdUSB = 0x0100, + .bMaxPacketSize0 = 64, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 2, + .bConfigurationValue = DEV_CONFIG_VALUE, + .iConfiguration = STRING_CONFIG, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER, + .bMaxPower = 0x32, + .nif = ARRAY_SIZE(desc_iface), + .ifs = desc_iface, + }, + }, +}; + +static const USBDesc desc_audio = { + .id = { + .idVendor = USBAUDIO_VENDOR_NUM, + .idProduct = USBAUDIO_PRODUCT_NUM, + .bcdDevice = 0, + .iManufacturer = STRING_MANUFACTURER, + .iProduct = STRING_PRODUCT, + .iSerialNumber = STRING_SERIALNUMBER, + }, + .full = &desc_device, + .str = usb_audio_stringtable, +}; + +/* multi channel compatible desc */ + +static const USBDescIface desc_iface_multi[] = { + { + .bInterfaceNumber = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL, + .bInterfaceProtocol = 0x04, + .iInterface = STRING_USBAUDIO_CONTROL, + .ndesc = 4, + .descs = (USBDescOther[]) { + { + /* Headphone Class-Specific AC Interface Header Descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AC_HEADER, /* u8 bDescriptorSubtype */ + U16(0x0100), /* u16 bcdADC */ + U16(0x38), /* u16 wTotalLength */ + 0x01, /* u8 bInCollection */ + 0x01, /* u8 baInterfaceNr */ + } + },{ + /* Generic Stereo Input Terminal ID1 Descriptor */ + .data = (uint8_t[]) { + 0x0c, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AC_INPUT_TERMINAL, /* u8 bDescriptorSubtype */ + 0x01, /* u8 bTerminalID */ + U16(0x0101), /* u16 wTerminalType */ + 0x00, /* u8 bAssocTerminal */ + 0x08, /* u8 bNrChannels */ + U16(0x063f), /* u16 wChannelConfig */ + 0x00, /* u8 iChannelNames */ + STRING_INPUT_TERMINAL, /* u8 iTerminal */ + } + },{ + /* Generic Stereo Feature Unit ID2 Descriptor */ + .data = (uint8_t[]) { + 0x19, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AC_FEATURE_UNIT, /* u8 bDescriptorSubtype */ + 0x02, /* u8 bUnitID */ + 0x01, /* u8 bSourceID */ + 0x02, /* u8 bControlSize */ + U16(0x0001), /* u16 bmaControls(0) */ + U16(0x0002), /* u16 bmaControls(1) */ + U16(0x0002), /* u16 bmaControls(2) */ + U16(0x0002), /* u16 bmaControls(3) */ + U16(0x0002), /* u16 bmaControls(4) */ + U16(0x0002), /* u16 bmaControls(5) */ + U16(0x0002), /* u16 bmaControls(6) */ + U16(0x0002), /* u16 bmaControls(7) */ + U16(0x0002), /* u16 bmaControls(8) */ + STRING_FEATURE_UNIT, /* u8 iFeature */ + } + },{ + /* Headphone Output Terminal ID3 Descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AC_OUTPUT_TERMINAL, /* u8 bDescriptorSubtype */ + 0x03, /* u8 bUnitID */ + U16(0x0301), /* u16 wTerminalType (SPK) */ + 0x00, /* u8 bAssocTerminal */ + 0x02, /* u8 bSourceID */ + STRING_OUTPUT_TERMINAL, /* u8 iTerminal */ + } + } + }, + },{ + .bInterfaceNumber = 1, + .bAlternateSetting = ALTSET_OFF, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIO_STREAMING, + .iInterface = STRING_NULL_STREAM, + },{ + .bInterfaceNumber = 1, + .bAlternateSetting = ALTSET_STEREO, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIO_STREAMING, + .iInterface = STRING_REAL_STREAM, + .ndesc = 2, + .descs = (USBDescOther[]) { + { + /* Headphone Class-specific AS General Interface Descriptor */ + .data = (uint8_t[]) { + 0x07, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AS_GENERAL, /* u8 bDescriptorSubtype */ + 0x01, /* u8 bTerminalLink */ + 0x00, /* u8 bDelay */ + 0x01, 0x00, /* u16 wFormatTag */ + } + },{ + /* Headphone Type I Format Type Descriptor */ + .data = (uint8_t[]) { + 0x0b, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AS_FORMAT_TYPE, /* u8 bDescriptorSubtype */ + 0x01, /* u8 bFormatType */ + 0x02, /* u8 bNrChannels */ + 0x02, /* u8 bSubFrameSize */ + 0x10, /* u8 bBitResolution */ + 0x01, /* u8 bSamFreqType */ + U24(USBAUDIO_SAMPLE_RATE), /* u24 tSamFreq */ + } + } + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_OUT | 0x01, + .bmAttributes = 0x0d, + .wMaxPacketSize = USBAUDIO_PACKET_SIZE(2), + .bInterval = 1, + .is_audio = 1, + /* Stereo Headphone Class-specific + AS Audio Data Endpoint Descriptor */ + .extra = (uint8_t[]) { + 0x07, /* u8 bLength */ + USB_DT_CS_ENDPOINT, /* u8 bDescriptorType */ + DST_EP_GENERAL, /* u8 bDescriptorSubtype */ + 0x00, /* u8 bmAttributes */ + 0x00, /* u8 bLockDelayUnits */ + U16(0x0000), /* u16 wLockDelay */ + }, + }, + } + },{ + .bInterfaceNumber = 1, + .bAlternateSetting = ALTSET_51, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIO_STREAMING, + .iInterface = STRING_REAL_STREAM, + .ndesc = 2, + .descs = (USBDescOther[]) { + { + /* Headphone Class-specific AS General Interface Descriptor */ + .data = (uint8_t[]) { + 0x07, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AS_GENERAL, /* u8 bDescriptorSubtype */ + 0x01, /* u8 bTerminalLink */ + 0x00, /* u8 bDelay */ + 0x01, 0x00, /* u16 wFormatTag */ + } + },{ + /* Headphone Type I Format Type Descriptor */ + .data = (uint8_t[]) { + 0x0b, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AS_FORMAT_TYPE, /* u8 bDescriptorSubtype */ + 0x01, /* u8 bFormatType */ + 0x06, /* u8 bNrChannels */ + 0x02, /* u8 bSubFrameSize */ + 0x10, /* u8 bBitResolution */ + 0x01, /* u8 bSamFreqType */ + U24(USBAUDIO_SAMPLE_RATE), /* u24 tSamFreq */ + } + } + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_OUT | 0x01, + .bmAttributes = 0x0d, + .wMaxPacketSize = USBAUDIO_PACKET_SIZE(6), + .bInterval = 1, + .is_audio = 1, + /* Stereo Headphone Class-specific + AS Audio Data Endpoint Descriptor */ + .extra = (uint8_t[]) { + 0x07, /* u8 bLength */ + USB_DT_CS_ENDPOINT, /* u8 bDescriptorType */ + DST_EP_GENERAL, /* u8 bDescriptorSubtype */ + 0x00, /* u8 bmAttributes */ + 0x00, /* u8 bLockDelayUnits */ + U16(0x0000), /* u16 wLockDelay */ + }, + }, + } + },{ + .bInterfaceNumber = 1, + .bAlternateSetting = ALTSET_71, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIO_STREAMING, + .iInterface = STRING_REAL_STREAM, + .ndesc = 2, + .descs = (USBDescOther[]) { + { + /* Headphone Class-specific AS General Interface Descriptor */ + .data = (uint8_t[]) { + 0x07, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AS_GENERAL, /* u8 bDescriptorSubtype */ + 0x01, /* u8 bTerminalLink */ + 0x00, /* u8 bDelay */ + 0x01, 0x00, /* u16 wFormatTag */ + } + },{ + /* Headphone Type I Format Type Descriptor */ + .data = (uint8_t[]) { + 0x0b, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + DST_AS_FORMAT_TYPE, /* u8 bDescriptorSubtype */ + 0x01, /* u8 bFormatType */ + 0x08, /* u8 bNrChannels */ + 0x02, /* u8 bSubFrameSize */ + 0x10, /* u8 bBitResolution */ + 0x01, /* u8 bSamFreqType */ + U24(USBAUDIO_SAMPLE_RATE), /* u24 tSamFreq */ + } + } + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_OUT | 0x01, + .bmAttributes = 0x0d, + .wMaxPacketSize = USBAUDIO_PACKET_SIZE(8), + .bInterval = 1, + .is_audio = 1, + /* Stereo Headphone Class-specific + AS Audio Data Endpoint Descriptor */ + .extra = (uint8_t[]) { + 0x07, /* u8 bLength */ + USB_DT_CS_ENDPOINT, /* u8 bDescriptorType */ + DST_EP_GENERAL, /* u8 bDescriptorSubtype */ + 0x00, /* u8 bmAttributes */ + 0x00, /* u8 bLockDelayUnits */ + U16(0x0000), /* u16 wLockDelay */ + }, + }, + } + } +}; + +static const USBDescDevice desc_device_multi = { + .bcdUSB = 0x0100, + .bMaxPacketSize0 = 64, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 2, + .bConfigurationValue = DEV_CONFIG_VALUE, + .iConfiguration = STRING_CONFIG, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER, + .bMaxPower = 0x32, + .nif = ARRAY_SIZE(desc_iface_multi), + .ifs = desc_iface_multi, + } + }, +}; + +static const USBDesc desc_audio_multi = { + .id = { + .idVendor = USBAUDIO_VENDOR_NUM, + .idProduct = USBAUDIO_PRODUCT_NUM, + .bcdDevice = 0, + .iManufacturer = STRING_MANUFACTURER, + .iProduct = STRING_PRODUCT, + .iSerialNumber = STRING_SERIALNUMBER, + }, + .full = &desc_device_multi, + .str = usb_audio_stringtable, +}; + +/* + * Class-specific control requests + */ +#define CR_SET_CUR 0x01 +#define CR_GET_CUR 0x81 +#define CR_SET_MIN 0x02 +#define CR_GET_MIN 0x82 +#define CR_SET_MAX 0x03 +#define CR_GET_MAX 0x83 +#define CR_SET_RES 0x04 +#define CR_GET_RES 0x84 +#define CR_SET_MEM 0x05 +#define CR_GET_MEM 0x85 +#define CR_GET_STAT 0xff + +/* + * Feature Unit Control Selectors + */ +#define MUTE_CONTROL 0x01 +#define VOLUME_CONTROL 0x02 +#define BASS_CONTROL 0x03 +#define MID_CONTROL 0x04 +#define TREBLE_CONTROL 0x05 +#define GRAPHIC_EQUALIZER_CONTROL 0x06 +#define AUTOMATIC_GAIN_CONTROL 0x07 +#define DELAY_CONTROL 0x08 +#define BASS_BOOST_CONTROL 0x09 +#define LOUDNESS_CONTROL 0x0a + +/* + * buffering + */ + +struct streambuf { + uint8_t *data; + size_t size; + uint64_t prod; + uint64_t cons; +}; + +static void streambuf_init(struct streambuf *buf, uint32_t size, + uint32_t channels) +{ + g_free(buf->data); + buf->size = size - (size % USBAUDIO_PACKET_SIZE(channels)); + buf->data = g_malloc(buf->size); + buf->prod = 0; + buf->cons = 0; +} + +static void streambuf_fini(struct streambuf *buf) +{ + g_free(buf->data); + buf->data = NULL; +} + +static int streambuf_put(struct streambuf *buf, USBPacket *p, uint32_t channels) +{ + int64_t free = buf->size - (buf->prod - buf->cons); + + if (free < USBAUDIO_PACKET_SIZE(channels)) { + return 0; + } + if (p->iov.size != USBAUDIO_PACKET_SIZE(channels)) { + return 0; + } + + /* can happen if prod overflows */ + assert(buf->prod % USBAUDIO_PACKET_SIZE(channels) == 0); + usb_packet_copy(p, buf->data + (buf->prod % buf->size), + USBAUDIO_PACKET_SIZE(channels)); + buf->prod += USBAUDIO_PACKET_SIZE(channels); + return USBAUDIO_PACKET_SIZE(channels); +} + +static uint8_t *streambuf_get(struct streambuf *buf, size_t *len) +{ + int64_t used = buf->prod - buf->cons; + uint8_t *data; + + if (used <= 0) { + *len = 0; + return NULL; + } + data = buf->data + (buf->cons % buf->size); + *len = MIN(buf->prod - buf->cons, + buf->size - (buf->cons % buf->size)); + return data; +} + +struct USBAudioState { + /* qemu interfaces */ + USBDevice dev; + QEMUSoundCard card; + + /* state */ + struct { + enum usb_audio_altset altset; + struct audsettings as; + SWVoiceOut *voice; + Volume vol; + struct streambuf buf; + uint32_t channels; + } out; + + /* properties */ + uint32_t debug; + uint32_t buffer_user, buffer; + bool multi; +}; + +#define TYPE_USB_AUDIO "usb-audio" +OBJECT_DECLARE_SIMPLE_TYPE(USBAudioState, USB_AUDIO) + +static void output_callback(void *opaque, int avail) +{ + USBAudioState *s = opaque; + uint8_t *data; + + while (avail) { + size_t written, len; + + data = streambuf_get(&s->out.buf, &len); + if (!data) { + return; + } + + written = AUD_write(s->out.voice, data, len); + avail -= written; + s->out.buf.cons += written; + + if (written < len) { + return; + } + } +} + +static int usb_audio_set_output_altset(USBAudioState *s, int altset) +{ + switch (altset) { + case ALTSET_OFF: + AUD_set_active_out(s->out.voice, false); + break; + case ALTSET_STEREO: + case ALTSET_51: + case ALTSET_71: + if (s->out.channels != altset_channels[altset]) { + usb_audio_reinit(USB_DEVICE(s), altset_channels[altset]); + } + streambuf_init(&s->out.buf, s->buffer, s->out.channels); + AUD_set_active_out(s->out.voice, true); + break; + default: + return -1; + } + + if (s->debug) { + fprintf(stderr, "usb-audio: set interface %d\n", altset); + } + s->out.altset = altset; + return 0; +} + +/* + * Note: we arbitrarily map the volume control range onto -inf..+8 dB + */ +#define ATTRIB_ID(cs, attrib, idif) \ + (((cs) << 24) | ((attrib) << 16) | (idif)) + +static int usb_audio_get_control(USBAudioState *s, uint8_t attrib, + uint16_t cscn, uint16_t idif, + int length, uint8_t *data) +{ + uint8_t cs = cscn >> 8; + uint8_t cn = cscn - 1; /* -1 for the non-present master control */ + uint32_t aid = ATTRIB_ID(cs, attrib, idif); + int ret = USB_RET_STALL; + + switch (aid) { + case ATTRIB_ID(MUTE_CONTROL, CR_GET_CUR, 0x0200): + data[0] = s->out.vol.mute; + ret = 1; + break; + case ATTRIB_ID(VOLUME_CONTROL, CR_GET_CUR, 0x0200): + if (cn < USBAUDIO_MAX_CHANNELS(s)) { + uint16_t vol = (s->out.vol.vol[cn] * 0x8800 + 127) / 255 + 0x8000; + data[0] = vol; + data[1] = vol >> 8; + ret = 2; + } + break; + case ATTRIB_ID(VOLUME_CONTROL, CR_GET_MIN, 0x0200): + if (cn < USBAUDIO_MAX_CHANNELS(s)) { + data[0] = 0x01; + data[1] = 0x80; + ret = 2; + } + break; + case ATTRIB_ID(VOLUME_CONTROL, CR_GET_MAX, 0x0200): + if (cn < USBAUDIO_MAX_CHANNELS(s)) { + data[0] = 0x00; + data[1] = 0x08; + ret = 2; + } + break; + case ATTRIB_ID(VOLUME_CONTROL, CR_GET_RES, 0x0200): + if (cn < USBAUDIO_MAX_CHANNELS(s)) { + data[0] = 0x88; + data[1] = 0x00; + ret = 2; + } + break; + } + + return ret; +} +static int usb_audio_set_control(USBAudioState *s, uint8_t attrib, + uint16_t cscn, uint16_t idif, + int length, uint8_t *data) +{ + uint8_t cs = cscn >> 8; + uint8_t cn = cscn - 1; /* -1 for the non-present master control */ + uint32_t aid = ATTRIB_ID(cs, attrib, idif); + int ret = USB_RET_STALL; + bool set_vol = false; + + switch (aid) { + case ATTRIB_ID(MUTE_CONTROL, CR_SET_CUR, 0x0200): + s->out.vol.mute = data[0] & 1; + set_vol = true; + ret = 0; + break; + case ATTRIB_ID(VOLUME_CONTROL, CR_SET_CUR, 0x0200): + if (cn < USBAUDIO_MAX_CHANNELS(s)) { + uint16_t vol = data[0] + (data[1] << 8); + + if (s->debug) { + fprintf(stderr, "usb-audio: cn %d vol %04x\n", cn, + (uint16_t)vol); + } + + vol -= 0x8000; + vol = (vol * 255 + 0x4400) / 0x8800; + if (vol > 255) { + vol = 255; + } + + s->out.vol.vol[cn] = vol; + set_vol = true; + ret = 0; + } + break; + } + + if (set_vol) { + if (s->debug) { + int i; + fprintf(stderr, "usb-audio: mute %d", s->out.vol.mute); + for (i = 0; i < USBAUDIO_MAX_CHANNELS(s); ++i) { + fprintf(stderr, ", vol[%d] %3d", i, s->out.vol.vol[i]); + } + fprintf(stderr, "\n"); + } + audio_set_volume_out(s->out.voice, &s->out.vol); + } + + return ret; +} + +static void usb_audio_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, + int length, uint8_t *data) +{ + USBAudioState *s = USB_AUDIO(dev); + int ret = 0; + + if (s->debug) { + fprintf(stderr, "usb-audio: control transaction: " + "request 0x%04x value 0x%04x index 0x%04x length 0x%04x\n", + request, value, index, length); + } + + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); + if (ret >= 0) { + return; + } + + switch (request) { + case ClassInterfaceRequest | CR_GET_CUR: + case ClassInterfaceRequest | CR_GET_MIN: + case ClassInterfaceRequest | CR_GET_MAX: + case ClassInterfaceRequest | CR_GET_RES: + ret = usb_audio_get_control(s, request & 0xff, value, index, + length, data); + if (ret < 0) { + if (s->debug) { + fprintf(stderr, "usb-audio: fail: get control\n"); + } + goto fail; + } + p->actual_length = ret; + break; + + case ClassInterfaceOutRequest | CR_SET_CUR: + case ClassInterfaceOutRequest | CR_SET_MIN: + case ClassInterfaceOutRequest | CR_SET_MAX: + case ClassInterfaceOutRequest | CR_SET_RES: + ret = usb_audio_set_control(s, request & 0xff, value, index, + length, data); + if (ret < 0) { + if (s->debug) { + fprintf(stderr, "usb-audio: fail: set control\n"); + } + goto fail; + } + break; + + default: +fail: + if (s->debug) { + fprintf(stderr, "usb-audio: failed control transaction: " + "request 0x%04x value 0x%04x index 0x%04x length 0x%04x\n", + request, value, index, length); + } + p->status = USB_RET_STALL; + break; + } +} + +static void usb_audio_set_interface(USBDevice *dev, int iface, + int old, int value) +{ + USBAudioState *s = USB_AUDIO(dev); + + if (iface == 1) { + usb_audio_set_output_altset(s, value); + } +} + +static void usb_audio_handle_reset(USBDevice *dev) +{ + USBAudioState *s = USB_AUDIO(dev); + + if (s->debug) { + fprintf(stderr, "usb-audio: reset\n"); + } + usb_audio_set_output_altset(s, ALTSET_OFF); +} + +static void usb_audio_handle_dataout(USBAudioState *s, USBPacket *p) +{ + if (s->out.altset == ALTSET_OFF) { + p->status = USB_RET_STALL; + return; + } + + streambuf_put(&s->out.buf, p, s->out.channels); + if (p->actual_length < p->iov.size && s->debug > 1) { + fprintf(stderr, "usb-audio: output overrun (%zd bytes)\n", + p->iov.size - p->actual_length); + } +} + +static void usb_audio_handle_data(USBDevice *dev, USBPacket *p) +{ + USBAudioState *s = (USBAudioState *) dev; + + if (p->pid == USB_TOKEN_OUT && p->ep->nr == 1) { + usb_audio_handle_dataout(s, p); + return; + } + + p->status = USB_RET_STALL; + if (s->debug) { + fprintf(stderr, "usb-audio: failed data transaction: " + "pid 0x%x ep 0x%x len 0x%zx\n", + p->pid, p->ep->nr, p->iov.size); + } +} + +static void usb_audio_unrealize(USBDevice *dev) +{ + USBAudioState *s = USB_AUDIO(dev); + + if (s->debug) { + fprintf(stderr, "usb-audio: destroy\n"); + } + + usb_audio_set_output_altset(s, ALTSET_OFF); + AUD_close_out(&s->card, s->out.voice); + AUD_remove_card(&s->card); + + streambuf_fini(&s->out.buf); +} + +static void usb_audio_realize(USBDevice *dev, Error **errp) +{ + USBAudioState *s = USB_AUDIO(dev); + int i; + + dev->usb_desc = s->multi ? &desc_audio_multi : &desc_audio; + + usb_desc_create_serial(dev); + usb_desc_init(dev); + s->dev.opaque = s; + AUD_register_card(TYPE_USB_AUDIO, &s->card); + + s->out.altset = ALTSET_OFF; + s->out.vol.mute = false; + for (i = 0; i < USBAUDIO_MAX_CHANNELS(s); ++i) { + s->out.vol.vol[i] = 240; /* 0 dB */ + } + + usb_audio_reinit(dev, 2); +} + +static void usb_audio_reinit(USBDevice *dev, unsigned channels) +{ + USBAudioState *s = USB_AUDIO(dev); + + s->out.channels = channels; + if (!s->buffer_user) { + s->buffer = 32 * USBAUDIO_PACKET_SIZE(s->out.channels); + } else { + s->buffer = s->buffer_user; + } + + s->out.vol.channels = s->out.channels; + s->out.as.freq = USBAUDIO_SAMPLE_RATE; + s->out.as.nchannels = s->out.channels; + s->out.as.fmt = AUDIO_FORMAT_S16; + s->out.as.endianness = 0; + streambuf_init(&s->out.buf, s->buffer, s->out.channels); + + s->out.voice = AUD_open_out(&s->card, s->out.voice, TYPE_USB_AUDIO, + s, output_callback, &s->out.as); + audio_set_volume_out(s->out.voice, &s->out.vol); + AUD_set_active_out(s->out.voice, 0); +} + +static const VMStateDescription vmstate_usb_audio = { + .name = TYPE_USB_AUDIO, + .unmigratable = 1, +}; + +static Property usb_audio_properties[] = { + DEFINE_AUDIO_PROPERTIES(USBAudioState, card), + DEFINE_PROP_UINT32("debug", USBAudioState, debug, 0), + DEFINE_PROP_UINT32("buffer", USBAudioState, buffer_user, 0), + DEFINE_PROP_BOOL("multi", USBAudioState, multi, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_audio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *k = USB_DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_usb_audio; + device_class_set_props(dc, usb_audio_properties); + set_bit(DEVICE_CATEGORY_SOUND, dc->categories); + k->product_desc = "QEMU USB Audio Interface"; + k->realize = usb_audio_realize; + k->handle_reset = usb_audio_handle_reset; + k->handle_control = usb_audio_handle_control; + k->handle_data = usb_audio_handle_data; + k->unrealize = usb_audio_unrealize; + k->set_interface = usb_audio_set_interface; +} + +static const TypeInfo usb_audio_info = { + .name = TYPE_USB_AUDIO, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBAudioState), + .class_init = usb_audio_class_init, +}; + +static void usb_audio_register_types(void) +{ + type_register_static(&usb_audio_info); +} + +type_init(usb_audio_register_types) diff --git a/hw/usb/dev-hid.c b/hw/usb/dev-hid.c new file mode 100644 index 000000000..1c7ae97c3 --- /dev/null +++ b/hw/usb/dev-hid.c @@ -0,0 +1,879 @@ +/* + * QEMU USB HID devices + * + * Copyright (c) 2005 Fabrice Bellard + * Copyright (c) 2007 OpenMoko, Inc. (andrew@openedhand.com) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "ui/console.h" +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "desc.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/input/hid.h" +#include "hw/usb/hid.h" +#include "hw/qdev-properties.h" +#include "qom/object.h" + +struct USBHIDState { + USBDevice dev; + USBEndpoint *intr; + HIDState hid; + uint32_t usb_version; + char *display; + uint32_t head; +}; + +#define TYPE_USB_HID "usb-hid" +OBJECT_DECLARE_SIMPLE_TYPE(USBHIDState, USB_HID) + +enum { + STR_MANUFACTURER = 1, + STR_PRODUCT_MOUSE, + STR_PRODUCT_TABLET, + STR_PRODUCT_KEYBOARD, + STR_SERIAL_COMPAT, + STR_CONFIG_MOUSE, + STR_CONFIG_TABLET, + STR_CONFIG_KEYBOARD, + STR_SERIAL_MOUSE, + STR_SERIAL_TABLET, + STR_SERIAL_KEYBOARD, +}; + +static const USBDescStrings desc_strings = { + [STR_MANUFACTURER] = "QEMU", + [STR_PRODUCT_MOUSE] = "QEMU USB Mouse", + [STR_PRODUCT_TABLET] = "QEMU USB Tablet", + [STR_PRODUCT_KEYBOARD] = "QEMU USB Keyboard", + [STR_SERIAL_COMPAT] = "42", + [STR_CONFIG_MOUSE] = "HID Mouse", + [STR_CONFIG_TABLET] = "HID Tablet", + [STR_CONFIG_KEYBOARD] = "HID Keyboard", + [STR_SERIAL_MOUSE] = "89126", + [STR_SERIAL_TABLET] = "28754", + [STR_SERIAL_KEYBOARD] = "68284", +}; + +static const USBDescIface desc_iface_mouse = { + .bInterfaceNumber = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_HID, + .bInterfaceSubClass = 0x01, /* boot */ + .bInterfaceProtocol = 0x02, + .ndesc = 1, + .descs = (USBDescOther[]) { + { + /* HID descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_HID, /* u8 bDescriptorType */ + 0x01, 0x00, /* u16 HID_class */ + 0x00, /* u8 country_code */ + 0x01, /* u8 num_descriptors */ + USB_DT_REPORT, /* u8 type: Report */ + 52, 0, /* u16 len */ + }, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = 4, + .bInterval = 0x0a, + }, + }, +}; + +static const USBDescIface desc_iface_mouse2 = { + .bInterfaceNumber = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_HID, + .bInterfaceSubClass = 0x01, /* boot */ + .bInterfaceProtocol = 0x02, + .ndesc = 1, + .descs = (USBDescOther[]) { + { + /* HID descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_HID, /* u8 bDescriptorType */ + 0x01, 0x00, /* u16 HID_class */ + 0x00, /* u8 country_code */ + 0x01, /* u8 num_descriptors */ + USB_DT_REPORT, /* u8 type: Report */ + 52, 0, /* u16 len */ + }, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = 4, + .bInterval = 7, /* 2 ^ (8-1) * 125 usecs = 8 ms */ + }, + }, +}; + +static const USBDescIface desc_iface_tablet = { + .bInterfaceNumber = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_HID, + .bInterfaceProtocol = 0x00, + .ndesc = 1, + .descs = (USBDescOther[]) { + { + /* HID descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_HID, /* u8 bDescriptorType */ + 0x01, 0x00, /* u16 HID_class */ + 0x00, /* u8 country_code */ + 0x01, /* u8 num_descriptors */ + USB_DT_REPORT, /* u8 type: Report */ + 74, 0, /* u16 len */ + }, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = 8, + .bInterval = 0x0a, + }, + }, +}; + +static const USBDescIface desc_iface_tablet2 = { + .bInterfaceNumber = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_HID, + .bInterfaceProtocol = 0x00, + .ndesc = 1, + .descs = (USBDescOther[]) { + { + /* HID descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_HID, /* u8 bDescriptorType */ + 0x01, 0x00, /* u16 HID_class */ + 0x00, /* u8 country_code */ + 0x01, /* u8 num_descriptors */ + USB_DT_REPORT, /* u8 type: Report */ + 74, 0, /* u16 len */ + }, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = 8, + .bInterval = 4, /* 2 ^ (4-1) * 125 usecs = 1 ms */ + }, + }, +}; + +static const USBDescIface desc_iface_keyboard = { + .bInterfaceNumber = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_HID, + .bInterfaceSubClass = 0x01, /* boot */ + .bInterfaceProtocol = 0x01, /* keyboard */ + .ndesc = 1, + .descs = (USBDescOther[]) { + { + /* HID descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_HID, /* u8 bDescriptorType */ + 0x11, 0x01, /* u16 HID_class */ + 0x00, /* u8 country_code */ + 0x01, /* u8 num_descriptors */ + USB_DT_REPORT, /* u8 type: Report */ + 0x3f, 0, /* u16 len */ + }, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = 8, + .bInterval = 0x0a, + }, + }, +}; + +static const USBDescIface desc_iface_keyboard2 = { + .bInterfaceNumber = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_HID, + .bInterfaceSubClass = 0x01, /* boot */ + .bInterfaceProtocol = 0x01, /* keyboard */ + .ndesc = 1, + .descs = (USBDescOther[]) { + { + /* HID descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_HID, /* u8 bDescriptorType */ + 0x11, 0x01, /* u16 HID_class */ + 0x00, /* u8 country_code */ + 0x01, /* u8 num_descriptors */ + USB_DT_REPORT, /* u8 type: Report */ + 0x3f, 0, /* u16 len */ + }, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = 8, + .bInterval = 7, /* 2 ^ (8-1) * 125 usecs = 8 ms */ + }, + }, +}; + +static const USBDescDevice desc_device_mouse = { + .bcdUSB = 0x0100, + .bMaxPacketSize0 = 8, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_MOUSE, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP, + .bMaxPower = 50, + .nif = 1, + .ifs = &desc_iface_mouse, + }, + }, +}; + +static const USBDescDevice desc_device_mouse2 = { + .bcdUSB = 0x0200, + .bMaxPacketSize0 = 64, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_MOUSE, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP, + .bMaxPower = 50, + .nif = 1, + .ifs = &desc_iface_mouse2, + }, + }, +}; + +static const USBDescDevice desc_device_tablet = { + .bcdUSB = 0x0100, + .bMaxPacketSize0 = 8, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_TABLET, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP, + .bMaxPower = 50, + .nif = 1, + .ifs = &desc_iface_tablet, + }, + }, +}; + +static const USBDescDevice desc_device_tablet2 = { + .bcdUSB = 0x0200, + .bMaxPacketSize0 = 64, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_TABLET, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP, + .bMaxPower = 50, + .nif = 1, + .ifs = &desc_iface_tablet2, + }, + }, +}; + +static const USBDescDevice desc_device_keyboard = { + .bcdUSB = 0x0100, + .bMaxPacketSize0 = 8, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_KEYBOARD, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP, + .bMaxPower = 50, + .nif = 1, + .ifs = &desc_iface_keyboard, + }, + }, +}; + +static const USBDescDevice desc_device_keyboard2 = { + .bcdUSB = 0x0200, + .bMaxPacketSize0 = 64, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_KEYBOARD, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP, + .bMaxPower = 50, + .nif = 1, + .ifs = &desc_iface_keyboard2, + }, + }, +}; + +static const USBDescMSOS desc_msos_suspend = { + .SelectiveSuspendEnabled = true, +}; + +static const USBDesc desc_mouse = { + .id = { + .idVendor = 0x0627, + .idProduct = 0x0001, + .bcdDevice = 0, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT_MOUSE, + .iSerialNumber = STR_SERIAL_MOUSE, + }, + .full = &desc_device_mouse, + .str = desc_strings, + .msos = &desc_msos_suspend, +}; + +static const USBDesc desc_mouse2 = { + .id = { + .idVendor = 0x0627, + .idProduct = 0x0001, + .bcdDevice = 0, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT_MOUSE, + .iSerialNumber = STR_SERIAL_MOUSE, + }, + .full = &desc_device_mouse, + .high = &desc_device_mouse2, + .str = desc_strings, + .msos = &desc_msos_suspend, +}; + +static const USBDesc desc_tablet = { + .id = { + .idVendor = 0x0627, + .idProduct = 0x0001, + .bcdDevice = 0, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT_TABLET, + .iSerialNumber = STR_SERIAL_TABLET, + }, + .full = &desc_device_tablet, + .str = desc_strings, + .msos = &desc_msos_suspend, +}; + +static const USBDesc desc_tablet2 = { + .id = { + .idVendor = 0x0627, + .idProduct = 0x0001, + .bcdDevice = 0, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT_TABLET, + .iSerialNumber = STR_SERIAL_TABLET, + }, + .full = &desc_device_tablet, + .high = &desc_device_tablet2, + .str = desc_strings, + .msos = &desc_msos_suspend, +}; + +static const USBDesc desc_keyboard = { + .id = { + .idVendor = 0x0627, + .idProduct = 0x0001, + .bcdDevice = 0, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT_KEYBOARD, + .iSerialNumber = STR_SERIAL_KEYBOARD, + }, + .full = &desc_device_keyboard, + .str = desc_strings, + .msos = &desc_msos_suspend, +}; + +static const USBDesc desc_keyboard2 = { + .id = { + .idVendor = 0x0627, + .idProduct = 0x0001, + .bcdDevice = 0, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT_KEYBOARD, + .iSerialNumber = STR_SERIAL_KEYBOARD, + }, + .full = &desc_device_keyboard, + .high = &desc_device_keyboard2, + .str = desc_strings, + .msos = &desc_msos_suspend, +}; + +static const uint8_t qemu_mouse_hid_report_descriptor[] = { + 0x05, 0x01, /* Usage Page (Generic Desktop) */ + 0x09, 0x02, /* Usage (Mouse) */ + 0xa1, 0x01, /* Collection (Application) */ + 0x09, 0x01, /* Usage (Pointer) */ + 0xa1, 0x00, /* Collection (Physical) */ + 0x05, 0x09, /* Usage Page (Button) */ + 0x19, 0x01, /* Usage Minimum (1) */ + 0x29, 0x03, /* Usage Maximum (3) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x25, 0x01, /* Logical Maximum (1) */ + 0x95, 0x03, /* Report Count (3) */ + 0x75, 0x01, /* Report Size (1) */ + 0x81, 0x02, /* Input (Data, Variable, Absolute) */ + 0x95, 0x01, /* Report Count (1) */ + 0x75, 0x05, /* Report Size (5) */ + 0x81, 0x01, /* Input (Constant) */ + 0x05, 0x01, /* Usage Page (Generic Desktop) */ + 0x09, 0x30, /* Usage (X) */ + 0x09, 0x31, /* Usage (Y) */ + 0x09, 0x38, /* Usage (Wheel) */ + 0x15, 0x81, /* Logical Minimum (-0x7f) */ + 0x25, 0x7f, /* Logical Maximum (0x7f) */ + 0x75, 0x08, /* Report Size (8) */ + 0x95, 0x03, /* Report Count (3) */ + 0x81, 0x06, /* Input (Data, Variable, Relative) */ + 0xc0, /* End Collection */ + 0xc0, /* End Collection */ +}; + +static const uint8_t qemu_tablet_hid_report_descriptor[] = { + 0x05, 0x01, /* Usage Page (Generic Desktop) */ + 0x09, 0x02, /* Usage (Mouse) */ + 0xa1, 0x01, /* Collection (Application) */ + 0x09, 0x01, /* Usage (Pointer) */ + 0xa1, 0x00, /* Collection (Physical) */ + 0x05, 0x09, /* Usage Page (Button) */ + 0x19, 0x01, /* Usage Minimum (1) */ + 0x29, 0x03, /* Usage Maximum (3) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x25, 0x01, /* Logical Maximum (1) */ + 0x95, 0x03, /* Report Count (3) */ + 0x75, 0x01, /* Report Size (1) */ + 0x81, 0x02, /* Input (Data, Variable, Absolute) */ + 0x95, 0x01, /* Report Count (1) */ + 0x75, 0x05, /* Report Size (5) */ + 0x81, 0x01, /* Input (Constant) */ + 0x05, 0x01, /* Usage Page (Generic Desktop) */ + 0x09, 0x30, /* Usage (X) */ + 0x09, 0x31, /* Usage (Y) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x26, 0xff, 0x7f, /* Logical Maximum (0x7fff) */ + 0x35, 0x00, /* Physical Minimum (0) */ + 0x46, 0xff, 0x7f, /* Physical Maximum (0x7fff) */ + 0x75, 0x10, /* Report Size (16) */ + 0x95, 0x02, /* Report Count (2) */ + 0x81, 0x02, /* Input (Data, Variable, Absolute) */ + 0x05, 0x01, /* Usage Page (Generic Desktop) */ + 0x09, 0x38, /* Usage (Wheel) */ + 0x15, 0x81, /* Logical Minimum (-0x7f) */ + 0x25, 0x7f, /* Logical Maximum (0x7f) */ + 0x35, 0x00, /* Physical Minimum (same as logical) */ + 0x45, 0x00, /* Physical Maximum (same as logical) */ + 0x75, 0x08, /* Report Size (8) */ + 0x95, 0x01, /* Report Count (1) */ + 0x81, 0x06, /* Input (Data, Variable, Relative) */ + 0xc0, /* End Collection */ + 0xc0, /* End Collection */ +}; + +static const uint8_t qemu_keyboard_hid_report_descriptor[] = { + 0x05, 0x01, /* Usage Page (Generic Desktop) */ + 0x09, 0x06, /* Usage (Keyboard) */ + 0xa1, 0x01, /* Collection (Application) */ + 0x75, 0x01, /* Report Size (1) */ + 0x95, 0x08, /* Report Count (8) */ + 0x05, 0x07, /* Usage Page (Key Codes) */ + 0x19, 0xe0, /* Usage Minimum (224) */ + 0x29, 0xe7, /* Usage Maximum (231) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x25, 0x01, /* Logical Maximum (1) */ + 0x81, 0x02, /* Input (Data, Variable, Absolute) */ + 0x95, 0x01, /* Report Count (1) */ + 0x75, 0x08, /* Report Size (8) */ + 0x81, 0x01, /* Input (Constant) */ + 0x95, 0x05, /* Report Count (5) */ + 0x75, 0x01, /* Report Size (1) */ + 0x05, 0x08, /* Usage Page (LEDs) */ + 0x19, 0x01, /* Usage Minimum (1) */ + 0x29, 0x05, /* Usage Maximum (5) */ + 0x91, 0x02, /* Output (Data, Variable, Absolute) */ + 0x95, 0x01, /* Report Count (1) */ + 0x75, 0x03, /* Report Size (3) */ + 0x91, 0x01, /* Output (Constant) */ + 0x95, 0x06, /* Report Count (6) */ + 0x75, 0x08, /* Report Size (8) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x25, 0xff, /* Logical Maximum (255) */ + 0x05, 0x07, /* Usage Page (Key Codes) */ + 0x19, 0x00, /* Usage Minimum (0) */ + 0x29, 0xff, /* Usage Maximum (255) */ + 0x81, 0x00, /* Input (Data, Array) */ + 0xc0, /* End Collection */ +}; + +static void usb_hid_changed(HIDState *hs) +{ + USBHIDState *us = container_of(hs, USBHIDState, hid); + + usb_wakeup(us->intr, 0); +} + +static void usb_hid_handle_reset(USBDevice *dev) +{ + USBHIDState *us = USB_HID(dev); + + hid_reset(&us->hid); +} + +static void usb_hid_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) +{ + USBHIDState *us = USB_HID(dev); + HIDState *hs = &us->hid; + int ret; + + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); + if (ret >= 0) { + return; + } + + switch (request) { + /* hid specific requests */ + case InterfaceRequest | USB_REQ_GET_DESCRIPTOR: + switch (value >> 8) { + case 0x22: + if (hs->kind == HID_MOUSE) { + memcpy(data, qemu_mouse_hid_report_descriptor, + sizeof(qemu_mouse_hid_report_descriptor)); + p->actual_length = sizeof(qemu_mouse_hid_report_descriptor); + } else if (hs->kind == HID_TABLET) { + memcpy(data, qemu_tablet_hid_report_descriptor, + sizeof(qemu_tablet_hid_report_descriptor)); + p->actual_length = sizeof(qemu_tablet_hid_report_descriptor); + } else if (hs->kind == HID_KEYBOARD) { + memcpy(data, qemu_keyboard_hid_report_descriptor, + sizeof(qemu_keyboard_hid_report_descriptor)); + p->actual_length = sizeof(qemu_keyboard_hid_report_descriptor); + } + break; + default: + goto fail; + } + break; + case HID_GET_REPORT: + if (hs->kind == HID_MOUSE || hs->kind == HID_TABLET) { + p->actual_length = hid_pointer_poll(hs, data, length); + } else if (hs->kind == HID_KEYBOARD) { + p->actual_length = hid_keyboard_poll(hs, data, length); + } + break; + case HID_SET_REPORT: + if (hs->kind == HID_KEYBOARD) { + p->actual_length = hid_keyboard_write(hs, data, length); + } else { + goto fail; + } + break; + case HID_GET_PROTOCOL: + if (hs->kind != HID_KEYBOARD && hs->kind != HID_MOUSE) { + goto fail; + } + data[0] = hs->protocol; + p->actual_length = 1; + break; + case HID_SET_PROTOCOL: + if (hs->kind != HID_KEYBOARD && hs->kind != HID_MOUSE) { + goto fail; + } + hs->protocol = value; + break; + case HID_GET_IDLE: + data[0] = hs->idle; + p->actual_length = 1; + break; + case HID_SET_IDLE: + hs->idle = (uint8_t) (value >> 8); + hid_set_next_idle(hs); + if (hs->kind == HID_MOUSE || hs->kind == HID_TABLET) { + hid_pointer_activate(hs); + } + break; + default: + fail: + p->status = USB_RET_STALL; + break; + } +} + +static void usb_hid_handle_data(USBDevice *dev, USBPacket *p) +{ + USBHIDState *us = USB_HID(dev); + HIDState *hs = &us->hid; + g_autofree uint8_t *buf = g_malloc(p->iov.size); + int len = 0; + + switch (p->pid) { + case USB_TOKEN_IN: + if (p->ep->nr == 1) { + if (hs->kind == HID_MOUSE || hs->kind == HID_TABLET) { + hid_pointer_activate(hs); + } + if (!hid_has_events(hs)) { + p->status = USB_RET_NAK; + return; + } + hid_set_next_idle(hs); + if (hs->kind == HID_MOUSE || hs->kind == HID_TABLET) { + len = hid_pointer_poll(hs, buf, p->iov.size); + } else if (hs->kind == HID_KEYBOARD) { + len = hid_keyboard_poll(hs, buf, p->iov.size); + } + usb_packet_copy(p, buf, len); + } else { + goto fail; + } + break; + case USB_TOKEN_OUT: + default: + fail: + p->status = USB_RET_STALL; + break; + } +} + +static void usb_hid_unrealize(USBDevice *dev) +{ + USBHIDState *us = USB_HID(dev); + + hid_free(&us->hid); +} + +static void usb_hid_initfn(USBDevice *dev, int kind, + const USBDesc *usb1, const USBDesc *usb2, + Error **errp) +{ + USBHIDState *us = USB_HID(dev); + switch (us->usb_version) { + case 1: + dev->usb_desc = usb1; + break; + case 2: + dev->usb_desc = usb2; + break; + default: + dev->usb_desc = NULL; + } + if (!dev->usb_desc) { + error_setg(errp, "Invalid usb version %d for usb hid device", + us->usb_version); + return; + } + + usb_desc_create_serial(dev); + usb_desc_init(dev); + us->intr = usb_ep_get(dev, USB_TOKEN_IN, 1); + hid_init(&us->hid, kind, usb_hid_changed); + if (us->display && us->hid.s) { + qemu_input_handler_bind(us->hid.s, us->display, us->head, NULL); + } +} + +static void usb_tablet_realize(USBDevice *dev, Error **errp) +{ + + usb_hid_initfn(dev, HID_TABLET, &desc_tablet, &desc_tablet2, errp); +} + +static void usb_mouse_realize(USBDevice *dev, Error **errp) +{ + usb_hid_initfn(dev, HID_MOUSE, &desc_mouse, &desc_mouse2, errp); +} + +static void usb_keyboard_realize(USBDevice *dev, Error **errp) +{ + usb_hid_initfn(dev, HID_KEYBOARD, &desc_keyboard, &desc_keyboard2, errp); +} + +static int usb_ptr_post_load(void *opaque, int version_id) +{ + USBHIDState *s = opaque; + + if (s->dev.remote_wakeup) { + hid_pointer_activate(&s->hid); + } + return 0; +} + +static const VMStateDescription vmstate_usb_ptr = { + .name = "usb-ptr", + .version_id = 1, + .minimum_version_id = 1, + .post_load = usb_ptr_post_load, + .fields = (VMStateField[]) { + VMSTATE_USB_DEVICE(dev, USBHIDState), + VMSTATE_HID_POINTER_DEVICE(hid, USBHIDState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_usb_kbd = { + .name = "usb-kbd", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_USB_DEVICE(dev, USBHIDState), + VMSTATE_HID_KEYBOARD_DEVICE(hid, USBHIDState), + VMSTATE_END_OF_LIST() + } +}; + +static void usb_hid_class_initfn(ObjectClass *klass, void *data) +{ + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->handle_reset = usb_hid_handle_reset; + uc->handle_control = usb_hid_handle_control; + uc->handle_data = usb_hid_handle_data; + uc->unrealize = usb_hid_unrealize; + uc->handle_attach = usb_desc_attach; +} + +static const TypeInfo usb_hid_type_info = { + .name = TYPE_USB_HID, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBHIDState), + .abstract = true, + .class_init = usb_hid_class_initfn, +}; + +static Property usb_tablet_properties[] = { + DEFINE_PROP_UINT32("usb_version", USBHIDState, usb_version, 2), + DEFINE_PROP_STRING("display", USBHIDState, display), + DEFINE_PROP_UINT32("head", USBHIDState, head, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_tablet_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_tablet_realize; + uc->product_desc = "QEMU USB Tablet"; + dc->vmsd = &vmstate_usb_ptr; + device_class_set_props(dc, usb_tablet_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo usb_tablet_info = { + .name = "usb-tablet", + .parent = TYPE_USB_HID, + .class_init = usb_tablet_class_initfn, +}; + +static Property usb_mouse_properties[] = { + DEFINE_PROP_UINT32("usb_version", USBHIDState, usb_version, 2), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_mouse_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_mouse_realize; + uc->product_desc = "QEMU USB Mouse"; + dc->vmsd = &vmstate_usb_ptr; + device_class_set_props(dc, usb_mouse_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo usb_mouse_info = { + .name = "usb-mouse", + .parent = TYPE_USB_HID, + .class_init = usb_mouse_class_initfn, +}; + +static Property usb_keyboard_properties[] = { + DEFINE_PROP_UINT32("usb_version", USBHIDState, usb_version, 2), + DEFINE_PROP_STRING("display", USBHIDState, display), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_keyboard_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_keyboard_realize; + uc->product_desc = "QEMU USB Keyboard"; + dc->vmsd = &vmstate_usb_kbd; + device_class_set_props(dc, usb_keyboard_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo usb_keyboard_info = { + .name = "usb-kbd", + .parent = TYPE_USB_HID, + .class_init = usb_keyboard_class_initfn, +}; + +static void usb_hid_register_types(void) +{ + type_register_static(&usb_hid_type_info); + type_register_static(&usb_tablet_info); + usb_legacy_register("usb-tablet", "tablet", NULL); + type_register_static(&usb_mouse_info); + usb_legacy_register("usb-mouse", "mouse", NULL); + type_register_static(&usb_keyboard_info); + usb_legacy_register("usb-kbd", "keyboard", NULL); +} + +type_init(usb_hid_register_types) diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c new file mode 100644 index 000000000..e35813d77 --- /dev/null +++ b/hw/usb/dev-hub.c @@ -0,0 +1,704 @@ +/* + * QEMU USB HUB emulation + * + * Copyright (c) 2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "trace.h" +#include "hw/qdev-properties.h" +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "desc.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qom/object.h" + +#define MAX_PORTS 8 + +typedef struct USBHubPort { + USBPort port; + uint16_t wPortStatus; + uint16_t wPortChange; +} USBHubPort; + +struct USBHubState { + USBDevice dev; + USBEndpoint *intr; + uint32_t num_ports; + bool port_power; + QEMUTimer *port_timer; + USBHubPort ports[MAX_PORTS]; +}; + +#define TYPE_USB_HUB "usb-hub" +OBJECT_DECLARE_SIMPLE_TYPE(USBHubState, USB_HUB) + +#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE) +#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE) +#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR) +#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS) +#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS) +#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE) +#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE) + +#define PORT_STAT_CONNECTION 0x0001 +#define PORT_STAT_ENABLE 0x0002 +#define PORT_STAT_SUSPEND 0x0004 +#define PORT_STAT_OVERCURRENT 0x0008 +#define PORT_STAT_RESET 0x0010 +#define PORT_STAT_POWER 0x0100 +#define PORT_STAT_LOW_SPEED 0x0200 +#define PORT_STAT_HIGH_SPEED 0x0400 +#define PORT_STAT_TEST 0x0800 +#define PORT_STAT_INDICATOR 0x1000 + +#define PORT_STAT_C_CONNECTION 0x0001 +#define PORT_STAT_C_ENABLE 0x0002 +#define PORT_STAT_C_SUSPEND 0x0004 +#define PORT_STAT_C_OVERCURRENT 0x0008 +#define PORT_STAT_C_RESET 0x0010 + +#define PORT_CONNECTION 0 +#define PORT_ENABLE 1 +#define PORT_SUSPEND 2 +#define PORT_OVERCURRENT 3 +#define PORT_RESET 4 +#define PORT_POWER 8 +#define PORT_LOWSPEED 9 +#define PORT_HIGHSPEED 10 +#define PORT_C_CONNECTION 16 +#define PORT_C_ENABLE 17 +#define PORT_C_SUSPEND 18 +#define PORT_C_OVERCURRENT 19 +#define PORT_C_RESET 20 +#define PORT_TEST 21 +#define PORT_INDICATOR 22 + +/* same as Linux kernel root hubs */ + +enum { + STR_MANUFACTURER = 1, + STR_PRODUCT, + STR_SERIALNUMBER, +}; + +static const USBDescStrings desc_strings = { + [STR_MANUFACTURER] = "QEMU", + [STR_PRODUCT] = "QEMU USB Hub", + [STR_SERIALNUMBER] = "314159", +}; + +static const USBDescIface desc_iface_hub = { + .bInterfaceNumber = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_HUB, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = 1 + DIV_ROUND_UP(MAX_PORTS, 8), + .bInterval = 0xff, + }, + } +}; + +static const USBDescDevice desc_device_hub = { + .bcdUSB = 0x0110, + .bDeviceClass = USB_CLASS_HUB, + .bMaxPacketSize0 = 8, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER | + USB_CFG_ATT_WAKEUP, + .nif = 1, + .ifs = &desc_iface_hub, + }, + }, +}; + +static const USBDesc desc_hub = { + .id = { + .idVendor = 0x0409, + .idProduct = 0x55aa, + .bcdDevice = 0x0101, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT, + .iSerialNumber = STR_SERIALNUMBER, + }, + .full = &desc_device_hub, + .str = desc_strings, +}; + +static const uint8_t qemu_hub_hub_descriptor[] = +{ + 0x00, /* u8 bLength; patched in later */ + 0x29, /* u8 bDescriptorType; Hub-descriptor */ + 0x00, /* u8 bNbrPorts; (patched later) */ + 0x0a, /* u16 wHubCharacteristics; */ + 0x00, /* (per-port OC, no power switching) */ + 0x01, /* u8 bPwrOn2pwrGood; 2ms */ + 0x00 /* u8 bHubContrCurrent; 0 mA */ + + /* DeviceRemovable and PortPwrCtrlMask patched in later */ +}; + +static bool usb_hub_port_change(USBHubPort *port, uint16_t status) +{ + bool notify = false; + + if (status & 0x1f) { + port->wPortChange |= status; + notify = true; + } + return notify; +} + +static bool usb_hub_port_set(USBHubPort *port, uint16_t status) +{ + if (port->wPortStatus & status) { + return false; + } + port->wPortStatus |= status; + return usb_hub_port_change(port, status); +} + +static bool usb_hub_port_clear(USBHubPort *port, uint16_t status) +{ + if (!(port->wPortStatus & status)) { + return false; + } + port->wPortStatus &= ~status; + return usb_hub_port_change(port, status); +} + +static bool usb_hub_port_update(USBHubPort *port) +{ + bool notify = false; + + if (port->port.dev && port->port.dev->attached) { + notify = usb_hub_port_set(port, PORT_STAT_CONNECTION); + if (port->port.dev->speed == USB_SPEED_LOW) { + usb_hub_port_set(port, PORT_STAT_LOW_SPEED); + } else { + usb_hub_port_clear(port, PORT_STAT_LOW_SPEED); + } + } + return notify; +} + +static void usb_hub_port_update_timer(void *opaque) +{ + USBHubState *s = opaque; + bool notify = false; + int i; + + for (i = 0; i < s->num_ports; i++) { + notify |= usb_hub_port_update(&s->ports[i]); + } + if (notify) { + usb_wakeup(s->intr, 0); + } +} + +static void usb_hub_attach(USBPort *port1) +{ + USBHubState *s = port1->opaque; + USBHubPort *port = &s->ports[port1->index]; + + trace_usb_hub_attach(s->dev.addr, port1->index + 1); + usb_hub_port_update(port); + usb_wakeup(s->intr, 0); +} + +static void usb_hub_detach(USBPort *port1) +{ + USBHubState *s = port1->opaque; + USBHubPort *port = &s->ports[port1->index]; + + trace_usb_hub_detach(s->dev.addr, port1->index + 1); + usb_wakeup(s->intr, 0); + + /* Let upstream know the device on this port is gone */ + s->dev.port->ops->child_detach(s->dev.port, port1->dev); + + usb_hub_port_clear(port, PORT_STAT_CONNECTION); + usb_hub_port_clear(port, PORT_STAT_ENABLE); + usb_hub_port_clear(port, PORT_STAT_SUSPEND); + usb_wakeup(s->intr, 0); +} + +static void usb_hub_child_detach(USBPort *port1, USBDevice *child) +{ + USBHubState *s = port1->opaque; + + /* Pass along upstream */ + s->dev.port->ops->child_detach(s->dev.port, child); +} + +static void usb_hub_wakeup(USBPort *port1) +{ + USBHubState *s = port1->opaque; + USBHubPort *port = &s->ports[port1->index]; + + if (usb_hub_port_clear(port, PORT_STAT_SUSPEND)) { + usb_wakeup(s->intr, 0); + } +} + +static void usb_hub_complete(USBPort *port, USBPacket *packet) +{ + USBHubState *s = port->opaque; + + /* + * Just pass it along upstream for now. + * + * If we ever implement usb 2.0 split transactions this will + * become a little more complicated ... + * + * Can't use usb_packet_complete() here because packet->owner is + * cleared already, go call the ->complete() callback directly + * instead. + */ + s->dev.port->ops->complete(s->dev.port, packet); +} + +static USBDevice *usb_hub_find_device(USBDevice *dev, uint8_t addr) +{ + USBHubState *s = USB_HUB(dev); + USBHubPort *port; + USBDevice *downstream; + int i; + + for (i = 0; i < s->num_ports; i++) { + port = &s->ports[i]; + if (!(port->wPortStatus & PORT_STAT_ENABLE)) { + continue; + } + downstream = usb_find_device(&port->port, addr); + if (downstream != NULL) { + return downstream; + } + } + return NULL; +} + +static void usb_hub_handle_reset(USBDevice *dev) +{ + USBHubState *s = USB_HUB(dev); + USBHubPort *port; + int i; + + trace_usb_hub_reset(s->dev.addr); + for (i = 0; i < s->num_ports; i++) { + port = s->ports + i; + port->wPortStatus = 0; + port->wPortChange = 0; + usb_hub_port_set(port, PORT_STAT_POWER); + usb_hub_port_update(port); + } +} + +static const char *feature_name(int feature) +{ + static const char *name[] = { + [PORT_CONNECTION] = "connection", + [PORT_ENABLE] = "enable", + [PORT_SUSPEND] = "suspend", + [PORT_OVERCURRENT] = "overcurrent", + [PORT_RESET] = "reset", + [PORT_POWER] = "power", + [PORT_LOWSPEED] = "lowspeed", + [PORT_HIGHSPEED] = "highspeed", + [PORT_C_CONNECTION] = "change-connection", + [PORT_C_ENABLE] = "change-enable", + [PORT_C_SUSPEND] = "change-suspend", + [PORT_C_OVERCURRENT] = "change-overcurrent", + [PORT_C_RESET] = "change-reset", + [PORT_TEST] = "test", + [PORT_INDICATOR] = "indicator", + }; + if (feature < 0 || feature >= ARRAY_SIZE(name)) { + return "?"; + } + return name[feature] ?: "?"; +} + +static void usb_hub_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) +{ + USBHubState *s = (USBHubState *)dev; + int ret; + + trace_usb_hub_control(s->dev.addr, request, value, index, length); + + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); + if (ret >= 0) { + return; + } + + switch(request) { + case EndpointOutRequest | USB_REQ_CLEAR_FEATURE: + if (value == 0 && index != 0x81) { /* clear ep halt */ + goto fail; + } + break; + /* usb specific requests */ + case GetHubStatus: + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + p->actual_length = 4; + break; + case GetPortStatus: + { + unsigned int n = index - 1; + USBHubPort *port; + if (n >= s->num_ports) { + goto fail; + } + port = &s->ports[n]; + trace_usb_hub_get_port_status(s->dev.addr, index, + port->wPortStatus, + port->wPortChange); + data[0] = port->wPortStatus; + data[1] = port->wPortStatus >> 8; + data[2] = port->wPortChange; + data[3] = port->wPortChange >> 8; + p->actual_length = 4; + } + break; + case SetHubFeature: + case ClearHubFeature: + if (value != 0 && value != 1) { + goto fail; + } + break; + case SetPortFeature: + { + unsigned int n = index - 1; + USBHubPort *port; + USBDevice *dev; + + trace_usb_hub_set_port_feature(s->dev.addr, index, + feature_name(value)); + + if (n >= s->num_ports) { + goto fail; + } + port = &s->ports[n]; + dev = port->port.dev; + switch(value) { + case PORT_SUSPEND: + port->wPortStatus |= PORT_STAT_SUSPEND; + break; + case PORT_RESET: + usb_hub_port_set(port, PORT_STAT_RESET); + usb_hub_port_clear(port, PORT_STAT_RESET); + if (dev && dev->attached) { + usb_device_reset(dev); + usb_hub_port_set(port, PORT_STAT_ENABLE); + } + usb_wakeup(s->intr, 0); + break; + case PORT_POWER: + if (s->port_power) { + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + usb_hub_port_set(port, PORT_STAT_POWER); + timer_mod(s->port_timer, now + 5000000); /* 5 ms */ + } + break; + default: + goto fail; + } + } + break; + case ClearPortFeature: + { + unsigned int n = index - 1; + USBHubPort *port; + + trace_usb_hub_clear_port_feature(s->dev.addr, index, + feature_name(value)); + + if (n >= s->num_ports) { + goto fail; + } + port = &s->ports[n]; + switch(value) { + case PORT_ENABLE: + port->wPortStatus &= ~PORT_STAT_ENABLE; + break; + case PORT_C_ENABLE: + port->wPortChange &= ~PORT_STAT_C_ENABLE; + break; + case PORT_SUSPEND: + usb_hub_port_clear(port, PORT_STAT_SUSPEND); + break; + case PORT_C_SUSPEND: + port->wPortChange &= ~PORT_STAT_C_SUSPEND; + break; + case PORT_C_CONNECTION: + port->wPortChange &= ~PORT_STAT_C_CONNECTION; + break; + case PORT_C_OVERCURRENT: + port->wPortChange &= ~PORT_STAT_C_OVERCURRENT; + break; + case PORT_C_RESET: + port->wPortChange &= ~PORT_STAT_C_RESET; + break; + case PORT_POWER: + if (s->port_power) { + usb_hub_port_clear(port, PORT_STAT_POWER); + usb_hub_port_clear(port, PORT_STAT_CONNECTION); + usb_hub_port_clear(port, PORT_STAT_ENABLE); + usb_hub_port_clear(port, PORT_STAT_SUSPEND); + port->wPortChange = 0; + } + default: + goto fail; + } + } + break; + case GetHubDescriptor: + { + unsigned int n, limit, var_hub_size = 0; + memcpy(data, qemu_hub_hub_descriptor, + sizeof(qemu_hub_hub_descriptor)); + data[2] = s->num_ports; + + if (s->port_power) { + data[3] &= ~0x03; + data[3] |= 0x01; + } + + /* fill DeviceRemovable bits */ + limit = DIV_ROUND_UP(s->num_ports + 1, 8) + 7; + for (n = 7; n < limit; n++) { + data[n] = 0x00; + var_hub_size++; + } + + /* fill PortPwrCtrlMask bits */ + limit = limit + DIV_ROUND_UP(s->num_ports, 8); + for (;n < limit; n++) { + data[n] = 0xff; + var_hub_size++; + } + + p->actual_length = sizeof(qemu_hub_hub_descriptor) + var_hub_size; + data[0] = p->actual_length; + break; + } + default: + fail: + p->status = USB_RET_STALL; + break; + } +} + +static void usb_hub_handle_data(USBDevice *dev, USBPacket *p) +{ + USBHubState *s = (USBHubState *)dev; + + switch(p->pid) { + case USB_TOKEN_IN: + if (p->ep->nr == 1) { + USBHubPort *port; + unsigned int status; + uint8_t buf[4]; + int i, n; + n = DIV_ROUND_UP(s->num_ports + 1, 8); + if (p->iov.size == 1) { /* FreeBSD workaround */ + n = 1; + } else if (n > p->iov.size) { + p->status = USB_RET_BABBLE; + return; + } + status = 0; + for (i = 0; i < s->num_ports; i++) { + port = &s->ports[i]; + if (port->wPortChange) + status |= (1 << (i + 1)); + } + if (status != 0) { + trace_usb_hub_status_report(s->dev.addr, status); + for(i = 0; i < n; i++) { + buf[i] = status >> (8 * i); + } + usb_packet_copy(p, buf, n); + } else { + p->status = USB_RET_NAK; /* usb11 11.13.1 */ + } + } else { + goto fail; + } + break; + case USB_TOKEN_OUT: + default: + fail: + p->status = USB_RET_STALL; + break; + } +} + +static void usb_hub_unrealize(USBDevice *dev) +{ + USBHubState *s = (USBHubState *)dev; + int i; + + for (i = 0; i < s->num_ports; i++) { + usb_unregister_port(usb_bus_from_device(dev), + &s->ports[i].port); + } + + timer_free(s->port_timer); +} + +static USBPortOps usb_hub_port_ops = { + .attach = usb_hub_attach, + .detach = usb_hub_detach, + .child_detach = usb_hub_child_detach, + .wakeup = usb_hub_wakeup, + .complete = usb_hub_complete, +}; + +static void usb_hub_realize(USBDevice *dev, Error **errp) +{ + USBHubState *s = USB_HUB(dev); + USBHubPort *port; + int i; + + if (s->num_ports < 1 || s->num_ports > MAX_PORTS) { + error_setg(errp, "num_ports (%d) out of range (1..%d)", + s->num_ports, MAX_PORTS); + return; + } + + if (dev->port->hubcount == 5) { + error_setg(errp, "usb hub chain too deep"); + return; + } + + usb_desc_create_serial(dev); + usb_desc_init(dev); + s->port_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + usb_hub_port_update_timer, s); + s->intr = usb_ep_get(dev, USB_TOKEN_IN, 1); + for (i = 0; i < s->num_ports; i++) { + port = &s->ports[i]; + usb_register_port(usb_bus_from_device(dev), + &port->port, s, i, &usb_hub_port_ops, + USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); + usb_port_location(&port->port, dev->port, i+1); + } + usb_hub_handle_reset(dev); +} + +static const VMStateDescription vmstate_usb_hub_port = { + .name = "usb-hub-port", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(wPortStatus, USBHubPort), + VMSTATE_UINT16(wPortChange, USBHubPort), + VMSTATE_END_OF_LIST() + } +}; + +static bool usb_hub_port_timer_needed(void *opaque) +{ + USBHubState *s = opaque; + + return s->port_power; +} + +static const VMStateDescription vmstate_usb_hub_port_timer = { + .name = "usb-hub/port-timer", + .version_id = 1, + .minimum_version_id = 1, + .needed = usb_hub_port_timer_needed, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(port_timer, USBHubState), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_usb_hub = { + .name = "usb-hub", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_USB_DEVICE(dev, USBHubState), + VMSTATE_STRUCT_ARRAY(ports, USBHubState, MAX_PORTS, 0, + vmstate_usb_hub_port, USBHubPort), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_usb_hub_port_timer, + NULL + } +}; + +static Property usb_hub_properties[] = { + DEFINE_PROP_UINT32("ports", USBHubState, num_ports, 8), + DEFINE_PROP_BOOL("port-power", USBHubState, port_power, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_hub_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_hub_realize; + uc->product_desc = "QEMU USB Hub"; + uc->usb_desc = &desc_hub; + uc->find_device = usb_hub_find_device; + uc->handle_reset = usb_hub_handle_reset; + uc->handle_control = usb_hub_handle_control; + uc->handle_data = usb_hub_handle_data; + uc->unrealize = usb_hub_unrealize; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->fw_name = "hub"; + dc->vmsd = &vmstate_usb_hub; + device_class_set_props(dc, usb_hub_properties); +} + +static const TypeInfo hub_info = { + .name = TYPE_USB_HUB, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBHubState), + .class_init = usb_hub_class_initfn, +}; + +static void usb_hub_register_types(void) +{ + type_register_static(&hub_info); +} + +type_init(usb_hub_register_types) diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c new file mode 100644 index 000000000..c1d1694fd --- /dev/null +++ b/hw/usb/dev-mtp.c @@ -0,0 +1,2121 @@ +/* + * Media Transfer Protocol implementation, backed by host filesystem. + * + * Copyright Red Hat, Inc 2014 + * + * Author: + * Gerd Hoffmann + * + * This code is licensed under the GPL v2 or later. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include +#include + +#include + + +#include "qemu/iov.h" +#include "qemu/module.h" +#include "qemu/filemonitor.h" +#include "trace.h" +#include "hw/qdev-properties.h" +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "desc.h" +#include "qemu/units.h" +#include "qom/object.h" + +/* ----------------------------------------------------------------------- */ + +enum mtp_container_type { + TYPE_COMMAND = 1, + TYPE_DATA = 2, + TYPE_RESPONSE = 3, + TYPE_EVENT = 4, +}; + +/* MTP write stage, for internal use only */ +enum mtp_write_status { + WRITE_START = 1, + WRITE_CONTINUE = 2, + WRITE_END = 3, +}; + +enum mtp_code { + /* command codes */ + CMD_GET_DEVICE_INFO = 0x1001, + CMD_OPEN_SESSION = 0x1002, + CMD_CLOSE_SESSION = 0x1003, + CMD_GET_STORAGE_IDS = 0x1004, + CMD_GET_STORAGE_INFO = 0x1005, + CMD_GET_NUM_OBJECTS = 0x1006, + CMD_GET_OBJECT_HANDLES = 0x1007, + CMD_GET_OBJECT_INFO = 0x1008, + CMD_GET_OBJECT = 0x1009, + CMD_DELETE_OBJECT = 0x100b, + CMD_SEND_OBJECT_INFO = 0x100c, + CMD_SEND_OBJECT = 0x100d, + CMD_GET_PARTIAL_OBJECT = 0x101b, + CMD_GET_OBJECT_PROPS_SUPPORTED = 0x9801, + CMD_GET_OBJECT_PROP_DESC = 0x9802, + CMD_GET_OBJECT_PROP_VALUE = 0x9803, + + /* response codes */ + RES_OK = 0x2001, + RES_GENERAL_ERROR = 0x2002, + RES_SESSION_NOT_OPEN = 0x2003, + RES_INVALID_TRANSACTION_ID = 0x2004, + RES_OPERATION_NOT_SUPPORTED = 0x2005, + RES_PARAMETER_NOT_SUPPORTED = 0x2006, + RES_INCOMPLETE_TRANSFER = 0x2007, + RES_INVALID_STORAGE_ID = 0x2008, + RES_INVALID_OBJECT_HANDLE = 0x2009, + RES_INVALID_OBJECT_FORMAT_CODE = 0x200b, + RES_STORE_FULL = 0x200c, + RES_STORE_READ_ONLY = 0x200e, + RES_PARTIAL_DELETE = 0x2012, + RES_STORE_NOT_AVAILABLE = 0x2013, + RES_SPEC_BY_FORMAT_UNSUPPORTED = 0x2014, + RES_INVALID_OBJECTINFO = 0x2015, + RES_DESTINATION_UNSUPPORTED = 0x2020, + RES_INVALID_PARENT_OBJECT = 0x201a, + RES_INVALID_PARAMETER = 0x201d, + RES_SESSION_ALREADY_OPEN = 0x201e, + RES_INVALID_OBJECT_PROP_CODE = 0xA801, + + /* format codes */ + FMT_UNDEFINED_OBJECT = 0x3000, + FMT_ASSOCIATION = 0x3001, + + /* event codes */ + EVT_CANCEL_TRANSACTION = 0x4001, + EVT_OBJ_ADDED = 0x4002, + EVT_OBJ_REMOVED = 0x4003, + EVT_OBJ_INFO_CHANGED = 0x4007, + + /* object properties */ + PROP_STORAGE_ID = 0xDC01, + PROP_OBJECT_FORMAT = 0xDC02, + PROP_OBJECT_COMPRESSED_SIZE = 0xDC04, + PROP_PARENT_OBJECT = 0xDC0B, + PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER = 0xDC41, + PROP_NAME = 0xDC44, +}; + +enum mtp_data_type { + DATA_TYPE_UINT16 = 0x0004, + DATA_TYPE_UINT32 = 0x0006, + DATA_TYPE_UINT64 = 0x0008, + DATA_TYPE_UINT128 = 0x000a, + DATA_TYPE_STRING = 0xffff, +}; + +typedef struct { + uint32_t length; + uint16_t type; + uint16_t code; + uint32_t trans; +} QEMU_PACKED mtp_container; + +/* ----------------------------------------------------------------------- */ + +typedef struct MTPState MTPState; +typedef struct MTPControl MTPControl; +typedef struct MTPData MTPData; +typedef struct MTPObject MTPObject; + +enum { + EP_DATA_IN = 1, + EP_DATA_OUT, + EP_EVENT, +}; + +typedef struct MTPMonEntry MTPMonEntry; + +struct MTPMonEntry { + uint32_t event; + uint32_t handle; + + QTAILQ_ENTRY(MTPMonEntry) next; +}; + +struct MTPControl { + uint16_t code; + uint32_t trans; + int argc; + uint32_t argv[5]; +}; + +struct MTPData { + uint16_t code; + uint32_t trans; + uint64_t offset; + uint64_t length; + uint64_t alloc; + uint8_t *data; + bool first; + /* Used for >4G file sizes */ + bool pending; + int fd; + uint8_t write_status; + /* Internal pointer per every MTP_WRITE_BUF_SZ */ + uint64_t data_offset; +}; + +struct MTPObject { + uint32_t handle; + uint16_t format; + char *name; + char *path; + struct stat stat; + /* file monitor watch id */ + int64_t watchid; + MTPObject *parent; + uint32_t nchildren; + QLIST_HEAD(, MTPObject) children; + QLIST_ENTRY(MTPObject) list; + bool have_children; + QTAILQ_ENTRY(MTPObject) next; +}; + +struct MTPState { + USBDevice dev; + char *root; + char *desc; + uint32_t flags; + + MTPData *data_in; + MTPData *data_out; + MTPControl *result; + uint32_t session; + uint32_t next_handle; + bool readonly; + + QTAILQ_HEAD(, MTPObject) objects; + QFileMonitor *file_monitor; + QTAILQ_HEAD(, MTPMonEntry) events; + /* Responder is expecting a write operation */ + bool write_pending; + struct { + uint32_t parent_handle; + uint16_t format; + uint32_t size; + char *filename; + } dataset; +}; + +/* + * ObjectInfo dataset received from initiator + * Fields we don't care about are ignored + */ +typedef struct { + uint32_t storage_id; /*unused*/ + uint16_t format; + uint16_t protection_status; /*unused*/ + uint32_t size; + uint16_t thumb_format; /*unused*/ + uint32_t thumb_comp_sz; /*unused*/ + uint32_t thumb_pix_width; /*unused*/ + uint32_t thumb_pix_height; /*unused*/ + uint32_t image_pix_width; /*unused*/ + uint32_t image_pix_height; /*unused*/ + uint32_t image_bit_depth; /*unused*/ + uint32_t parent; /*unused*/ + uint16_t assoc_type; + uint32_t assoc_desc; + uint32_t seq_no; /*unused*/ + uint8_t length; /*part of filename field*/ + uint8_t filename[0]; /* UTF-16 encoded */ + char date_created[0]; /*unused*/ + char date_modified[0]; /*unused*/ + char keywords[0]; /*unused*/ + /* string and other data follows */ +} QEMU_PACKED ObjectInfo; + +#define TYPE_USB_MTP "usb-mtp" +OBJECT_DECLARE_SIMPLE_TYPE(MTPState, USB_MTP) + +#define QEMU_STORAGE_ID 0x00010001 + +#define MTP_FLAG_WRITABLE 0 + +#define FLAG_SET(_mtp, _flag) ((_mtp)->flags & (1 << (_flag))) + +/* ----------------------------------------------------------------------- */ + +#define MTP_MANUFACTURER "QEMU" +#define MTP_PRODUCT "QEMU filesharing" +#define MTP_WRITE_BUF_SZ (512 * KiB) + +enum { + STR_MANUFACTURER = 1, + STR_PRODUCT, + STR_SERIALNUMBER, + STR_MTP, + STR_CONFIG_FULL, + STR_CONFIG_HIGH, + STR_CONFIG_SUPER, +}; + +static const USBDescStrings desc_strings = { + [STR_MANUFACTURER] = MTP_MANUFACTURER, + [STR_PRODUCT] = MTP_PRODUCT, + [STR_SERIALNUMBER] = "34617", + [STR_MTP] = "MTP", + [STR_CONFIG_FULL] = "Full speed config (usb 1.1)", + [STR_CONFIG_HIGH] = "High speed config (usb 2.0)", + [STR_CONFIG_SUPER] = "Super speed config (usb 3.0)", +}; + +static const USBDescIface desc_iface_full = { + .bInterfaceNumber = 0, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_STILL_IMAGE, + .bInterfaceSubClass = 0x01, + .bInterfaceProtocol = 0x01, + .iInterface = STR_MTP, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | EP_DATA_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 64, + },{ + .bEndpointAddress = USB_DIR_OUT | EP_DATA_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 64, + },{ + .bEndpointAddress = USB_DIR_IN | EP_EVENT, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = 64, + .bInterval = 0x0a, + }, + } +}; + +static const USBDescDevice desc_device_full = { + .bcdUSB = 0x0200, + .bMaxPacketSize0 = 8, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_FULL, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP, + .bMaxPower = 2, + .nif = 1, + .ifs = &desc_iface_full, + }, + }, +}; + +static const USBDescIface desc_iface_high = { + .bInterfaceNumber = 0, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_STILL_IMAGE, + .bInterfaceSubClass = 0x01, + .bInterfaceProtocol = 0x01, + .iInterface = STR_MTP, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | EP_DATA_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 512, + },{ + .bEndpointAddress = USB_DIR_OUT | EP_DATA_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 512, + },{ + .bEndpointAddress = USB_DIR_IN | EP_EVENT, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = 64, + .bInterval = 0x0a, + }, + } +}; + +static const USBDescDevice desc_device_high = { + .bcdUSB = 0x0200, + .bMaxPacketSize0 = 64, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_HIGH, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP, + .bMaxPower = 2, + .nif = 1, + .ifs = &desc_iface_high, + }, + }, +}; + +static const USBDescMSOS desc_msos = { + .CompatibleID = "MTP", + .SelectiveSuspendEnabled = true, +}; + +static const USBDesc desc = { + .id = { + .idVendor = 0x46f4, /* CRC16() of "QEMU" */ + .idProduct = 0x0004, + .bcdDevice = 0, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT, + .iSerialNumber = STR_SERIALNUMBER, + }, + .full = &desc_device_full, + .high = &desc_device_high, + .str = desc_strings, + .msos = &desc_msos, +}; + +/* ----------------------------------------------------------------------- */ + +static MTPObject *usb_mtp_object_alloc(MTPState *s, uint32_t handle, + MTPObject *parent, const char *name) +{ + MTPObject *o = g_new0(MTPObject, 1); + + if (name[0] == '.') { + goto ignore; + } + + o->watchid = -1; + o->handle = handle; + o->parent = parent; + o->name = g_strdup(name); + if (parent == NULL) { + o->path = g_strdup(name); + } else { + o->path = g_strdup_printf("%s/%s", parent->path, name); + } + + if (lstat(o->path, &o->stat) != 0) { + goto ignore; + } + if (S_ISREG(o->stat.st_mode)) { + o->format = FMT_UNDEFINED_OBJECT; + } else if (S_ISDIR(o->stat.st_mode)) { + o->format = FMT_ASSOCIATION; + } else { + goto ignore; + } + + if (access(o->path, R_OK) != 0) { + goto ignore; + } + + trace_usb_mtp_object_alloc(s->dev.addr, o->handle, o->path); + + QTAILQ_INSERT_TAIL(&s->objects, o, next); + return o; + +ignore: + g_free(o->name); + g_free(o->path); + g_free(o); + return NULL; +} + +static void usb_mtp_object_free(MTPState *s, MTPObject *o) +{ + MTPObject *iter; + + if (!o) { + return; + } + + trace_usb_mtp_object_free(s->dev.addr, o->handle, o->path); + + if (o->watchid != -1 && s->file_monitor) { + qemu_file_monitor_remove_watch(s->file_monitor, o->path, o->watchid); + } + + QTAILQ_REMOVE(&s->objects, o, next); + if (o->parent) { + QLIST_REMOVE(o, list); + o->parent->nchildren--; + } + + while (!QLIST_EMPTY(&o->children)) { + iter = QLIST_FIRST(&o->children); + usb_mtp_object_free(s, iter); + } + g_free(o->name); + g_free(o->path); + g_free(o); +} + +static MTPObject *usb_mtp_object_lookup(MTPState *s, uint32_t handle) +{ + MTPObject *o; + + QTAILQ_FOREACH(o, &s->objects, next) { + if (o->handle == handle) { + return o; + } + } + return NULL; +} + +static MTPObject *usb_mtp_add_child(MTPState *s, MTPObject *o, + const char *name) +{ + MTPObject *child = + usb_mtp_object_alloc(s, s->next_handle++, o, name); + + if (child) { + trace_usb_mtp_add_child(s->dev.addr, child->handle, child->path); + QLIST_INSERT_HEAD(&o->children, child, list); + o->nchildren++; + + if (child->format == FMT_ASSOCIATION) { + QLIST_INIT(&child->children); + } + } + + return child; +} + +static MTPObject *usb_mtp_object_lookup_name(MTPObject *parent, + const char *name, int len) +{ + MTPObject *iter; + + if (len == -1) { + len = strlen(name); + } + + QLIST_FOREACH(iter, &parent->children, list) { + if (strncmp(iter->name, name, len) == 0) { + return iter; + } + } + + return NULL; +} + +static MTPObject *usb_mtp_object_lookup_id(MTPState *s, int64_t id) +{ + MTPObject *iter; + + QTAILQ_FOREACH(iter, &s->objects, next) { + if (iter->watchid == id) { + return iter; + } + } + + return NULL; +} + +static void file_monitor_event(int64_t id, + QFileMonitorEvent ev, + const char *name, + void *opaque) +{ + MTPState *s = opaque; + MTPObject *parent = usb_mtp_object_lookup_id(s, id); + MTPMonEntry *entry = NULL; + MTPObject *o; + + if (!parent) { + return; + } + + switch (ev) { + case QFILE_MONITOR_EVENT_CREATED: + if (usb_mtp_object_lookup_name(parent, name, -1)) { + /* Duplicate create event */ + return; + } + entry = g_new0(MTPMonEntry, 1); + entry->handle = s->next_handle; + entry->event = EVT_OBJ_ADDED; + o = usb_mtp_add_child(s, parent, name); + if (!o) { + g_free(entry); + return; + } + trace_usb_mtp_file_monitor_event(s->dev.addr, name, "Obj Added"); + break; + + case QFILE_MONITOR_EVENT_DELETED: + /* + * The kernel issues a IN_IGNORED event + * when a dir containing a watchpoint is + * deleted, so we don't have to delete the + * watchpoint + */ + o = usb_mtp_object_lookup_name(parent, name, -1); + if (!o) { + return; + } + entry = g_new0(MTPMonEntry, 1); + entry->handle = o->handle; + entry->event = EVT_OBJ_REMOVED; + trace_usb_mtp_file_monitor_event(s->dev.addr, o->path, "Obj Deleted"); + usb_mtp_object_free(s, o); + break; + + case QFILE_MONITOR_EVENT_MODIFIED: + o = usb_mtp_object_lookup_name(parent, name, -1); + if (!o) { + return; + } + entry = g_new0(MTPMonEntry, 1); + entry->handle = o->handle; + entry->event = EVT_OBJ_INFO_CHANGED; + trace_usb_mtp_file_monitor_event(s->dev.addr, o->path, "Obj Modified"); + break; + + case QFILE_MONITOR_EVENT_IGNORED: + trace_usb_mtp_file_monitor_event(s->dev.addr, parent->path, + "Obj parent dir ignored"); + break; + + case QFILE_MONITOR_EVENT_ATTRIBUTES: + break; + + default: + g_assert_not_reached(); + } + + if (entry) { + QTAILQ_INSERT_HEAD(&s->events, entry, next); + } +} + +static void usb_mtp_file_monitor_cleanup(MTPState *s) +{ + MTPMonEntry *e, *p; + + QTAILQ_FOREACH_SAFE(e, &s->events, next, p) { + QTAILQ_REMOVE(&s->events, e, next); + g_free(e); + } + + qemu_file_monitor_free(s->file_monitor); + s->file_monitor = NULL; +} + + +static void usb_mtp_object_readdir(MTPState *s, MTPObject *o) +{ + struct dirent *entry; + DIR *dir; + int fd; + Error *err = NULL; + + if (o->have_children) { + return; + } + o->have_children = true; + + fd = open(o->path, O_DIRECTORY | O_CLOEXEC | O_NOFOLLOW); + if (fd < 0) { + return; + } + dir = fdopendir(fd); + if (!dir) { + close(fd); + return; + } + + if (s->file_monitor) { + int64_t id = qemu_file_monitor_add_watch(s->file_monitor, o->path, NULL, + file_monitor_event, s, &err); + if (id == -1) { + error_reportf_err(err, + "usb-mtp: failed to add watch for %s: ", + o->path); + } else { + trace_usb_mtp_file_monitor_event(s->dev.addr, o->path, + "Watch Added"); + o->watchid = id; + } + } + + while ((entry = readdir(dir)) != NULL) { + usb_mtp_add_child(s, o, entry->d_name); + } + closedir(dir); +} + +/* ----------------------------------------------------------------------- */ + +static MTPData *usb_mtp_data_alloc(MTPControl *c) +{ + MTPData *data = g_new0(MTPData, 1); + + data->code = c->code; + data->trans = c->trans; + data->fd = -1; + data->first = true; + return data; +} + +static void usb_mtp_data_free(MTPData *data) +{ + if (data == NULL) { + return; + } + if (data->fd != -1) { + close(data->fd); + } + g_free(data->data); + g_free(data); +} + +static void usb_mtp_realloc(MTPData *data, uint32_t bytes) +{ + if (data->length + bytes <= data->alloc) { + return; + } + data->alloc = (data->length + bytes + 0xff) & ~0xff; + data->data = g_realloc(data->data, data->alloc); +} + +static void usb_mtp_add_u8(MTPData *data, uint8_t val) +{ + usb_mtp_realloc(data, 1); + data->data[data->length++] = val; +} + +static void usb_mtp_add_u16(MTPData *data, uint16_t val) +{ + usb_mtp_realloc(data, 2); + data->data[data->length++] = (val >> 0) & 0xff; + data->data[data->length++] = (val >> 8) & 0xff; +} + +static void usb_mtp_add_u32(MTPData *data, uint32_t val) +{ + usb_mtp_realloc(data, 4); + data->data[data->length++] = (val >> 0) & 0xff; + data->data[data->length++] = (val >> 8) & 0xff; + data->data[data->length++] = (val >> 16) & 0xff; + data->data[data->length++] = (val >> 24) & 0xff; +} + +static void usb_mtp_add_u64(MTPData *data, uint64_t val) +{ + usb_mtp_realloc(data, 8); + data->data[data->length++] = (val >> 0) & 0xff; + data->data[data->length++] = (val >> 8) & 0xff; + data->data[data->length++] = (val >> 16) & 0xff; + data->data[data->length++] = (val >> 24) & 0xff; + data->data[data->length++] = (val >> 32) & 0xff; + data->data[data->length++] = (val >> 40) & 0xff; + data->data[data->length++] = (val >> 48) & 0xff; + data->data[data->length++] = (val >> 56) & 0xff; +} + +static void usb_mtp_add_u16_array(MTPData *data, uint32_t len, + const uint16_t *vals) +{ + int i; + + usb_mtp_add_u32(data, len); + for (i = 0; i < len; i++) { + usb_mtp_add_u16(data, vals[i]); + } +} + +static void usb_mtp_add_u32_array(MTPData *data, uint32_t len, + const uint32_t *vals) +{ + int i; + + usb_mtp_add_u32(data, len); + for (i = 0; i < len; i++) { + usb_mtp_add_u32(data, vals[i]); + } +} + +static void usb_mtp_add_wstr(MTPData *data, const wchar_t *str) +{ + uint32_t len = wcslen(str); + int i; + + if (len > 0) { + len++; /* include terminating L'\0' */ + } + + usb_mtp_add_u8(data, len); + for (i = 0; i < len; i++) { + usb_mtp_add_u16(data, str[i]); + } +} + +static void usb_mtp_add_str(MTPData *data, const char *str) +{ + uint32_t len = strlen(str)+1; + wchar_t *wstr = g_new(wchar_t, len); + size_t ret; + + ret = mbstowcs(wstr, str, len); + if (ret == -1) { + usb_mtp_add_wstr(data, L"Oops"); + } else { + usb_mtp_add_wstr(data, wstr); + } + + g_free(wstr); +} + +static void usb_mtp_add_time(MTPData *data, time_t time) +{ + g_autoptr(GDateTime) then = g_date_time_new_from_unix_utc(time); + g_autofree char *thenstr = g_date_time_format(then, "%Y%m%dT%H%M%S"); + usb_mtp_add_str(data, thenstr); +} + +/* ----------------------------------------------------------------------- */ + +static void usb_mtp_queue_result(MTPState *s, uint16_t code, uint32_t trans, + int argc, uint32_t arg0, uint32_t arg1, + uint32_t arg2) +{ + MTPControl *c = g_new0(MTPControl, 1); + + c->code = code; + c->trans = trans; + c->argc = argc; + if (argc > 0) { + c->argv[0] = arg0; + } + if (argc > 1) { + c->argv[1] = arg1; + } + if (argc > 2) { + c->argv[2] = arg2; + } + + assert(s->result == NULL); + s->result = c; +} + +/* ----------------------------------------------------------------------- */ + +static MTPData *usb_mtp_get_device_info(MTPState *s, MTPControl *c) +{ + static const uint16_t ops[] = { + CMD_GET_DEVICE_INFO, + CMD_OPEN_SESSION, + CMD_CLOSE_SESSION, + CMD_GET_STORAGE_IDS, + CMD_GET_STORAGE_INFO, + CMD_GET_NUM_OBJECTS, + CMD_GET_OBJECT_HANDLES, + CMD_GET_OBJECT_INFO, + CMD_DELETE_OBJECT, + CMD_SEND_OBJECT_INFO, + CMD_SEND_OBJECT, + CMD_GET_OBJECT, + CMD_GET_PARTIAL_OBJECT, + CMD_GET_OBJECT_PROPS_SUPPORTED, + CMD_GET_OBJECT_PROP_DESC, + CMD_GET_OBJECT_PROP_VALUE, + }; + static const uint16_t fmt[] = { + FMT_UNDEFINED_OBJECT, + FMT_ASSOCIATION, + }; + MTPData *d = usb_mtp_data_alloc(c); + + trace_usb_mtp_op_get_device_info(s->dev.addr); + + usb_mtp_add_u16(d, 100); + usb_mtp_add_u32(d, 0x00000006); + usb_mtp_add_u16(d, 0x0064); + usb_mtp_add_wstr(d, L""); + usb_mtp_add_u16(d, 0x0000); + + usb_mtp_add_u16_array(d, ARRAY_SIZE(ops), ops); + usb_mtp_add_u16_array(d, 0, NULL); + usb_mtp_add_u16_array(d, 0, NULL); + usb_mtp_add_u16_array(d, 0, NULL); + usb_mtp_add_u16_array(d, ARRAY_SIZE(fmt), fmt); + + usb_mtp_add_wstr(d, L"" MTP_MANUFACTURER); + usb_mtp_add_wstr(d, L"" MTP_PRODUCT); + usb_mtp_add_wstr(d, L"0.1"); + usb_mtp_add_wstr(d, L"0123456789abcdef0123456789abcdef"); + + return d; +} + +static MTPData *usb_mtp_get_storage_ids(MTPState *s, MTPControl *c) +{ + static const uint32_t ids[] = { + QEMU_STORAGE_ID, + }; + MTPData *d = usb_mtp_data_alloc(c); + + trace_usb_mtp_op_get_storage_ids(s->dev.addr); + + usb_mtp_add_u32_array(d, ARRAY_SIZE(ids), ids); + + return d; +} + +static MTPData *usb_mtp_get_storage_info(MTPState *s, MTPControl *c) +{ + MTPData *d = usb_mtp_data_alloc(c); + struct statvfs buf; + int rc; + + trace_usb_mtp_op_get_storage_info(s->dev.addr); + + if (FLAG_SET(s, MTP_FLAG_WRITABLE)) { + usb_mtp_add_u16(d, 0x0003); + usb_mtp_add_u16(d, 0x0002); + usb_mtp_add_u16(d, 0x0000); + } else { + usb_mtp_add_u16(d, 0x0001); + usb_mtp_add_u16(d, 0x0002); + usb_mtp_add_u16(d, 0x0001); + } + + rc = statvfs(s->root, &buf); + if (rc == 0) { + usb_mtp_add_u64(d, (uint64_t)buf.f_frsize * buf.f_blocks); + usb_mtp_add_u64(d, (uint64_t)buf.f_bavail * buf.f_blocks); + usb_mtp_add_u32(d, buf.f_ffree); + } else { + usb_mtp_add_u64(d, 0xffffffff); + usb_mtp_add_u64(d, 0xffffffff); + usb_mtp_add_u32(d, 0xffffffff); + } + + usb_mtp_add_str(d, s->desc); + usb_mtp_add_wstr(d, L"123456789abcdef"); + return d; +} + +static MTPData *usb_mtp_get_object_handles(MTPState *s, MTPControl *c, + MTPObject *o) +{ + MTPData *d = usb_mtp_data_alloc(c); + uint32_t i = 0; + g_autofree uint32_t *handles = g_new(uint32_t, o->nchildren); + MTPObject *iter; + + trace_usb_mtp_op_get_object_handles(s->dev.addr, o->handle, o->path); + + QLIST_FOREACH(iter, &o->children, list) { + handles[i++] = iter->handle; + } + assert(i == o->nchildren); + usb_mtp_add_u32_array(d, o->nchildren, handles); + + return d; +} + +static MTPData *usb_mtp_get_object_info(MTPState *s, MTPControl *c, + MTPObject *o) +{ + MTPData *d = usb_mtp_data_alloc(c); + + trace_usb_mtp_op_get_object_info(s->dev.addr, o->handle, o->path); + + usb_mtp_add_u32(d, QEMU_STORAGE_ID); + usb_mtp_add_u16(d, o->format); + usb_mtp_add_u16(d, 0); + + if (o->stat.st_size > 0xFFFFFFFF) { + usb_mtp_add_u32(d, 0xFFFFFFFF); + } else { + usb_mtp_add_u32(d, o->stat.st_size); + } + + usb_mtp_add_u16(d, 0); + usb_mtp_add_u32(d, 0); + usb_mtp_add_u32(d, 0); + usb_mtp_add_u32(d, 0); + usb_mtp_add_u32(d, 0); + usb_mtp_add_u32(d, 0); + usb_mtp_add_u32(d, 0); + + if (o->parent) { + usb_mtp_add_u32(d, o->parent->handle); + } else { + usb_mtp_add_u32(d, 0); + } + if (o->format == FMT_ASSOCIATION) { + usb_mtp_add_u16(d, 0x0001); + usb_mtp_add_u32(d, 0x00000001); + usb_mtp_add_u32(d, 0); + } else { + usb_mtp_add_u16(d, 0); + usb_mtp_add_u32(d, 0); + usb_mtp_add_u32(d, 0); + } + + usb_mtp_add_str(d, o->name); + usb_mtp_add_time(d, o->stat.st_ctime); + usb_mtp_add_time(d, o->stat.st_mtime); + usb_mtp_add_wstr(d, L""); + + return d; +} + +static MTPData *usb_mtp_get_object(MTPState *s, MTPControl *c, + MTPObject *o) +{ + MTPData *d = usb_mtp_data_alloc(c); + + trace_usb_mtp_op_get_object(s->dev.addr, o->handle, o->path); + + d->fd = open(o->path, O_RDONLY | O_CLOEXEC | O_NOFOLLOW); + if (d->fd == -1) { + usb_mtp_data_free(d); + return NULL; + } + d->length = o->stat.st_size; + d->alloc = 512; + d->data = g_malloc(d->alloc); + return d; +} + +static MTPData *usb_mtp_get_partial_object(MTPState *s, MTPControl *c, + MTPObject *o) +{ + MTPData *d; + off_t offset; + + if (c->argc <= 2) { + return NULL; + } + trace_usb_mtp_op_get_partial_object(s->dev.addr, o->handle, o->path, + c->argv[1], c->argv[2]); + + d = usb_mtp_data_alloc(c); + d->fd = open(o->path, O_RDONLY | O_CLOEXEC | O_NOFOLLOW); + if (d->fd == -1) { + usb_mtp_data_free(d); + return NULL; + } + + offset = c->argv[1]; + if (offset > o->stat.st_size) { + offset = o->stat.st_size; + } + if (lseek(d->fd, offset, SEEK_SET) < 0) { + usb_mtp_data_free(d); + return NULL; + } + + d->length = c->argv[2]; + if (d->length > o->stat.st_size - offset) { + d->length = o->stat.st_size - offset; + } + + return d; +} + +static MTPData *usb_mtp_get_object_props_supported(MTPState *s, MTPControl *c) +{ + static const uint16_t props[] = { + PROP_STORAGE_ID, + PROP_OBJECT_FORMAT, + PROP_OBJECT_COMPRESSED_SIZE, + PROP_PARENT_OBJECT, + PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER, + PROP_NAME, + }; + MTPData *d = usb_mtp_data_alloc(c); + usb_mtp_add_u16_array(d, ARRAY_SIZE(props), props); + + return d; +} + +static MTPData *usb_mtp_get_object_prop_desc(MTPState *s, MTPControl *c) +{ + MTPData *d = usb_mtp_data_alloc(c); + switch (c->argv[0]) { + case PROP_STORAGE_ID: + usb_mtp_add_u16(d, PROP_STORAGE_ID); + usb_mtp_add_u16(d, DATA_TYPE_UINT32); + usb_mtp_add_u8(d, 0x00); + usb_mtp_add_u32(d, 0x00000000); + usb_mtp_add_u32(d, 0x00000000); + usb_mtp_add_u8(d, 0x00); + break; + case PROP_OBJECT_FORMAT: + usb_mtp_add_u16(d, PROP_OBJECT_FORMAT); + usb_mtp_add_u16(d, DATA_TYPE_UINT16); + usb_mtp_add_u8(d, 0x00); + usb_mtp_add_u16(d, 0x0000); + usb_mtp_add_u32(d, 0x00000000); + usb_mtp_add_u8(d, 0x00); + break; + case PROP_OBJECT_COMPRESSED_SIZE: + usb_mtp_add_u16(d, PROP_OBJECT_COMPRESSED_SIZE); + usb_mtp_add_u16(d, DATA_TYPE_UINT64); + usb_mtp_add_u8(d, 0x00); + usb_mtp_add_u64(d, 0x0000000000000000); + usb_mtp_add_u32(d, 0x00000000); + usb_mtp_add_u8(d, 0x00); + break; + case PROP_PARENT_OBJECT: + usb_mtp_add_u16(d, PROP_PARENT_OBJECT); + usb_mtp_add_u16(d, DATA_TYPE_UINT32); + usb_mtp_add_u8(d, 0x00); + usb_mtp_add_u32(d, 0x00000000); + usb_mtp_add_u32(d, 0x00000000); + usb_mtp_add_u8(d, 0x00); + break; + case PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER: + usb_mtp_add_u16(d, PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER); + usb_mtp_add_u16(d, DATA_TYPE_UINT128); + usb_mtp_add_u8(d, 0x00); + usb_mtp_add_u64(d, 0x0000000000000000); + usb_mtp_add_u64(d, 0x0000000000000000); + usb_mtp_add_u32(d, 0x00000000); + usb_mtp_add_u8(d, 0x00); + break; + case PROP_NAME: + usb_mtp_add_u16(d, PROP_NAME); + usb_mtp_add_u16(d, DATA_TYPE_STRING); + usb_mtp_add_u8(d, 0x00); + usb_mtp_add_u8(d, 0x00); + usb_mtp_add_u32(d, 0x00000000); + usb_mtp_add_u8(d, 0x00); + break; + default: + usb_mtp_data_free(d); + return NULL; + } + + return d; +} + +static MTPData *usb_mtp_get_object_prop_value(MTPState *s, MTPControl *c, + MTPObject *o) +{ + MTPData *d = usb_mtp_data_alloc(c); + switch (c->argv[1]) { + case PROP_STORAGE_ID: + usb_mtp_add_u32(d, QEMU_STORAGE_ID); + break; + case PROP_OBJECT_FORMAT: + usb_mtp_add_u16(d, o->format); + break; + case PROP_OBJECT_COMPRESSED_SIZE: + usb_mtp_add_u64(d, o->stat.st_size); + break; + case PROP_PARENT_OBJECT: + if (o->parent == NULL) { + usb_mtp_add_u32(d, 0x00000000); + } else { + usb_mtp_add_u32(d, o->parent->handle); + } + break; + case PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER: + /* Should be persistent between sessions, + * but using our objedt ID is "good enough" + * for now */ + usb_mtp_add_u64(d, 0x0000000000000000); + usb_mtp_add_u64(d, o->handle); + break; + case PROP_NAME: + usb_mtp_add_str(d, o->name); + break; + default: + usb_mtp_data_free(d); + return NULL; + } + + return d; +} + +/* + * Return values when object @o is deleted. + * If at least one of the deletions succeeded, + * DELETE_SUCCESS is set and if at least one + * of the deletions failed, DELETE_FAILURE is + * set. Both bits being set (DELETE_PARTIAL) + * signifies a RES_PARTIAL_DELETE being sent + * back to the initiator. + */ +enum { + DELETE_SUCCESS = (1 << 0), + DELETE_FAILURE = (1 << 1), + DELETE_PARTIAL = (DELETE_FAILURE | DELETE_SUCCESS), +}; + +static int usb_mtp_deletefn(MTPState *s, MTPObject *o, uint32_t trans) +{ + MTPObject *iter, *iter2; + int ret = 0; + + /* + * TODO: Add support for Protection Status + */ + + QLIST_FOREACH(iter, &o->children, list) { + if (iter->format == FMT_ASSOCIATION) { + QLIST_FOREACH(iter2, &iter->children, list) { + ret |= usb_mtp_deletefn(s, iter2, trans); + } + } + } + + if (o->format == FMT_UNDEFINED_OBJECT) { + if (remove(o->path)) { + ret |= DELETE_FAILURE; + } else { + usb_mtp_object_free(s, o); + ret |= DELETE_SUCCESS; + } + } else if (o->format == FMT_ASSOCIATION) { + if (rmdir(o->path)) { + ret |= DELETE_FAILURE; + } else { + usb_mtp_object_free(s, o); + ret |= DELETE_SUCCESS; + } + } + + return ret; +} + +static void usb_mtp_object_delete(MTPState *s, uint32_t handle, + uint32_t format_code, uint32_t trans) +{ + MTPObject *o; + int ret; + + /* Return error if store is read-only */ + if (!FLAG_SET(s, MTP_FLAG_WRITABLE)) { + usb_mtp_queue_result(s, RES_STORE_READ_ONLY, + trans, 0, 0, 0, 0); + return; + } + + if (format_code != 0) { + usb_mtp_queue_result(s, RES_SPEC_BY_FORMAT_UNSUPPORTED, + trans, 0, 0, 0, 0); + return; + } + + if (handle == 0xFFFFFFF) { + o = QTAILQ_FIRST(&s->objects); + } else { + o = usb_mtp_object_lookup(s, handle); + } + if (o == NULL) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE, + trans, 0, 0, 0, 0); + return; + } + + ret = usb_mtp_deletefn(s, o, trans); + switch (ret) { + case DELETE_SUCCESS: + usb_mtp_queue_result(s, RES_OK, trans, + 0, 0, 0, 0); + break; + case DELETE_FAILURE: + usb_mtp_queue_result(s, RES_PARTIAL_DELETE, + trans, 0, 0, 0, 0); + break; + case DELETE_PARTIAL: + usb_mtp_queue_result(s, RES_PARTIAL_DELETE, + trans, 0, 0, 0, 0); + break; + default: + g_assert_not_reached(); + } + + return; +} + +static void usb_mtp_command(MTPState *s, MTPControl *c) +{ + MTPData *data_in = NULL; + MTPObject *o = NULL; + uint32_t nres = 0, res0 = 0; + Error *err = NULL; + + /* sanity checks */ + if (c->code >= CMD_CLOSE_SESSION && s->session == 0) { + usb_mtp_queue_result(s, RES_SESSION_NOT_OPEN, + c->trans, 0, 0, 0, 0); + return; + } + + /* process commands */ + switch (c->code) { + case CMD_GET_DEVICE_INFO: + data_in = usb_mtp_get_device_info(s, c); + break; + case CMD_OPEN_SESSION: + if (s->session) { + usb_mtp_queue_result(s, RES_SESSION_ALREADY_OPEN, + c->trans, 1, s->session, 0, 0); + return; + } + if (c->argv[0] == 0) { + usb_mtp_queue_result(s, RES_INVALID_PARAMETER, + c->trans, 0, 0, 0, 0); + return; + } + trace_usb_mtp_op_open_session(s->dev.addr); + s->session = c->argv[0]; + usb_mtp_object_alloc(s, s->next_handle++, NULL, s->root); + + s->file_monitor = qemu_file_monitor_new(&err); + if (err) { + error_reportf_err(err, + "usb-mtp: file monitoring init failed: "); + } else { + QTAILQ_INIT(&s->events); + } + break; + case CMD_CLOSE_SESSION: + trace_usb_mtp_op_close_session(s->dev.addr); + s->session = 0; + s->next_handle = 0; + usb_mtp_file_monitor_cleanup(s); + usb_mtp_object_free(s, QTAILQ_FIRST(&s->objects)); + assert(QTAILQ_EMPTY(&s->objects)); + break; + case CMD_GET_STORAGE_IDS: + data_in = usb_mtp_get_storage_ids(s, c); + break; + case CMD_GET_STORAGE_INFO: + if (c->argv[0] != QEMU_STORAGE_ID && + c->argv[0] != 0xffffffff) { + usb_mtp_queue_result(s, RES_INVALID_STORAGE_ID, + c->trans, 0, 0, 0, 0); + return; + } + data_in = usb_mtp_get_storage_info(s, c); + break; + case CMD_GET_NUM_OBJECTS: + case CMD_GET_OBJECT_HANDLES: + if (c->argv[0] != QEMU_STORAGE_ID && + c->argv[0] != 0xffffffff) { + usb_mtp_queue_result(s, RES_INVALID_STORAGE_ID, + c->trans, 0, 0, 0, 0); + return; + } + if (c->argv[1] != 0x00000000) { + usb_mtp_queue_result(s, RES_SPEC_BY_FORMAT_UNSUPPORTED, + c->trans, 0, 0, 0, 0); + return; + } + if (c->argv[2] == 0x00000000 || + c->argv[2] == 0xffffffff) { + o = QTAILQ_FIRST(&s->objects); + } else { + o = usb_mtp_object_lookup(s, c->argv[2]); + } + if (o == NULL) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE, + c->trans, 0, 0, 0, 0); + return; + } + if (o->format != FMT_ASSOCIATION) { + usb_mtp_queue_result(s, RES_INVALID_PARENT_OBJECT, + c->trans, 0, 0, 0, 0); + return; + } + usb_mtp_object_readdir(s, o); + if (c->code == CMD_GET_NUM_OBJECTS) { + trace_usb_mtp_op_get_num_objects(s->dev.addr, o->handle, o->path); + nres = 1; + res0 = o->nchildren; + } else { + data_in = usb_mtp_get_object_handles(s, c, o); + } + break; + case CMD_GET_OBJECT_INFO: + o = usb_mtp_object_lookup(s, c->argv[0]); + if (o == NULL) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE, + c->trans, 0, 0, 0, 0); + return; + } + data_in = usb_mtp_get_object_info(s, c, o); + break; + case CMD_GET_OBJECT: + o = usb_mtp_object_lookup(s, c->argv[0]); + if (o == NULL) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE, + c->trans, 0, 0, 0, 0); + return; + } + if (o->format == FMT_ASSOCIATION) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE, + c->trans, 0, 0, 0, 0); + return; + } + data_in = usb_mtp_get_object(s, c, o); + if (data_in == NULL) { + usb_mtp_queue_result(s, RES_GENERAL_ERROR, + c->trans, 0, 0, 0, 0); + return; + } + break; + case CMD_DELETE_OBJECT: + usb_mtp_object_delete(s, c->argv[0], c->argv[1], c->trans); + return; + case CMD_GET_PARTIAL_OBJECT: + o = usb_mtp_object_lookup(s, c->argv[0]); + if (o == NULL) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE, + c->trans, 0, 0, 0, 0); + return; + } + if (o->format == FMT_ASSOCIATION) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE, + c->trans, 0, 0, 0, 0); + return; + } + data_in = usb_mtp_get_partial_object(s, c, o); + if (data_in == NULL) { + usb_mtp_queue_result(s, RES_GENERAL_ERROR, + c->trans, 0, 0, 0, 0); + return; + } + nres = 1; + res0 = data_in->length; + break; + case CMD_SEND_OBJECT_INFO: + /* Return error if store is read-only */ + if (!FLAG_SET(s, MTP_FLAG_WRITABLE)) { + usb_mtp_queue_result(s, RES_STORE_READ_ONLY, + c->trans, 0, 0, 0, 0); + } else if (c->argv[0] && (c->argv[0] != QEMU_STORAGE_ID)) { + /* First parameter points to storage id or is 0 */ + usb_mtp_queue_result(s, RES_STORE_NOT_AVAILABLE, c->trans, + 0, 0, 0, 0); + } else if (c->argv[1] && !c->argv[0]) { + /* If second parameter is specified, first must also be specified */ + usb_mtp_queue_result(s, RES_DESTINATION_UNSUPPORTED, c->trans, + 0, 0, 0, 0); + } else { + uint32_t handle = c->argv[1]; + if (handle == 0xFFFFFFFF || handle == 0) { + /* root object */ + o = QTAILQ_FIRST(&s->objects); + } else { + o = usb_mtp_object_lookup(s, handle); + } + if (o == NULL) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE, c->trans, + 0, 0, 0, 0); + } else if (o->format != FMT_ASSOCIATION) { + usb_mtp_queue_result(s, RES_INVALID_PARENT_OBJECT, c->trans, + 0, 0, 0, 0); + } + } + if (o) { + s->dataset.parent_handle = o->handle; + } + s->data_out = usb_mtp_data_alloc(c); + return; + case CMD_SEND_OBJECT: + if (!FLAG_SET(s, MTP_FLAG_WRITABLE)) { + usb_mtp_queue_result(s, RES_STORE_READ_ONLY, + c->trans, 0, 0, 0, 0); + return; + } + if (!s->write_pending) { + usb_mtp_queue_result(s, RES_INVALID_OBJECTINFO, + c->trans, 0, 0, 0, 0); + return; + } + s->data_out = usb_mtp_data_alloc(c); + return; + case CMD_GET_OBJECT_PROPS_SUPPORTED: + if (c->argv[0] != FMT_UNDEFINED_OBJECT && + c->argv[0] != FMT_ASSOCIATION) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_FORMAT_CODE, + c->trans, 0, 0, 0, 0); + return; + } + data_in = usb_mtp_get_object_props_supported(s, c); + break; + case CMD_GET_OBJECT_PROP_DESC: + if (c->argv[1] != FMT_UNDEFINED_OBJECT && + c->argv[1] != FMT_ASSOCIATION) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_FORMAT_CODE, + c->trans, 0, 0, 0, 0); + return; + } + data_in = usb_mtp_get_object_prop_desc(s, c); + if (data_in == NULL) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_PROP_CODE, + c->trans, 0, 0, 0, 0); + return; + } + break; + case CMD_GET_OBJECT_PROP_VALUE: + o = usb_mtp_object_lookup(s, c->argv[0]); + if (o == NULL) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE, + c->trans, 0, 0, 0, 0); + return; + } + data_in = usb_mtp_get_object_prop_value(s, c, o); + if (data_in == NULL) { + usb_mtp_queue_result(s, RES_INVALID_OBJECT_PROP_CODE, + c->trans, 0, 0, 0, 0); + return; + } + break; + default: + trace_usb_mtp_op_unknown(s->dev.addr, c->code); + usb_mtp_queue_result(s, RES_OPERATION_NOT_SUPPORTED, + c->trans, 0, 0, 0, 0); + return; + } + + /* return results on success */ + if (data_in) { + assert(s->data_in == NULL); + s->data_in = data_in; + } + usb_mtp_queue_result(s, RES_OK, c->trans, nres, res0, 0, 0); +} + +/* ----------------------------------------------------------------------- */ + +static void usb_mtp_handle_reset(USBDevice *dev) +{ + MTPState *s = USB_MTP(dev); + + trace_usb_mtp_reset(s->dev.addr); + + usb_mtp_file_monitor_cleanup(s); + usb_mtp_object_free(s, QTAILQ_FIRST(&s->objects)); + s->session = 0; + usb_mtp_data_free(s->data_in); + s->data_in = NULL; + usb_mtp_data_free(s->data_out); + s->data_out = NULL; + g_free(s->result); + s->result = NULL; +} + +static void usb_mtp_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, + int length, uint8_t *data) +{ + int ret; + MTPState *s = USB_MTP(dev); + uint16_t *event = (uint16_t *)data; + + switch (request) { + case ClassInterfaceOutRequest | 0x64: + if (*event == EVT_CANCEL_TRANSACTION) { + g_free(s->result); + s->result = NULL; + usb_mtp_data_free(s->data_in); + s->data_in = NULL; + if (s->write_pending) { + g_free(s->dataset.filename); + s->write_pending = false; + s->dataset.size = 0; + } + usb_mtp_data_free(s->data_out); + s->data_out = NULL; + } else { + p->status = USB_RET_STALL; + } + break; + default: + ret = usb_desc_handle_control(dev, p, request, + value, index, length, data); + if (ret >= 0) { + return; + } + } + + trace_usb_mtp_stall(dev->addr, "unknown control request"); +} + +static void usb_mtp_cancel_packet(USBDevice *dev, USBPacket *p) +{ + /* we don't use async packets, so this should never be called */ + fprintf(stderr, "%s\n", __func__); +} + +static char *utf16_to_str(uint8_t len, uint8_t *str16) +{ + wchar_t *wstr = g_new0(wchar_t, len + 1); + int count, dlen; + char *dest; + + for (count = 0; count < len; count++) { + /* FIXME: not working for surrogate pairs */ + wstr[count] = lduw_le_p(str16 + (count * 2)); + } + wstr[count] = 0; + + dlen = wcstombs(NULL, wstr, 0) + 1; + dest = g_malloc(dlen); + wcstombs(dest, wstr, dlen); + g_free(wstr); + return dest; +} + +/* Wrapper around write, returns 0 on failure */ +static uint64_t write_retry(int fd, void *buf, uint64_t size, off_t offset) +{ + uint64_t ret = 0; + + if (lseek(fd, offset, SEEK_SET) < 0) { + goto done; + } + + ret = qemu_write_full(fd, buf, size); + +done: + return ret; +} + +static int usb_mtp_update_object(MTPObject *parent, char *name) +{ + int ret = 0; + + MTPObject *o = + usb_mtp_object_lookup_name(parent, name, strlen(name)); + + if (o) { + ret = lstat(o->path, &o->stat); + } + + return ret; +} + +static void usb_mtp_write_data(MTPState *s, uint32_t handle) +{ + MTPData *d = s->data_out; + MTPObject *parent = + usb_mtp_object_lookup(s, s->dataset.parent_handle); + char *path = NULL; + uint64_t rc; + mode_t mask = 0644; + int ret = 0; + + assert(d != NULL); + + switch (d->write_status) { + case WRITE_START: + if (!parent || !s->write_pending) { + usb_mtp_queue_result(s, RES_INVALID_OBJECTINFO, d->trans, + 0, 0, 0, 0); + return; + } + + if (s->dataset.filename) { + path = g_strdup_printf("%s/%s", parent->path, s->dataset.filename); + if (s->dataset.format == FMT_ASSOCIATION) { + ret = mkdir(path, mask); + if (!ret) { + usb_mtp_queue_result(s, RES_OK, d->trans, 3, + QEMU_STORAGE_ID, + s->dataset.parent_handle, + handle); + goto close; + } + goto done; + } + + d->fd = open(path, O_CREAT | O_WRONLY | + O_CLOEXEC | O_NOFOLLOW, mask); + if (d->fd == -1) { + ret = 1; + goto done; + } + + /* Return success if initiator sent 0 sized data */ + if (!s->dataset.size) { + goto done; + } + if (d->length != MTP_WRITE_BUF_SZ && !d->pending) { + d->write_status = WRITE_END; + } + } + /* fall through */ + case WRITE_CONTINUE: + case WRITE_END: + rc = write_retry(d->fd, d->data, d->data_offset, + d->offset - d->data_offset); + if (rc != d->data_offset) { + ret = 1; + goto done; + } + if (d->write_status != WRITE_END) { + g_free(path); + return; + } else { + /* + * Return an incomplete transfer if file size doesn't match + * for < 4G file or if lstat fails which will result in an incorrect + * file size + */ + if ((s->dataset.size != 0xFFFFFFFF && + d->offset != s->dataset.size) || + usb_mtp_update_object(parent, s->dataset.filename)) { + usb_mtp_queue_result(s, RES_INCOMPLETE_TRANSFER, d->trans, + 0, 0, 0, 0); + goto close; + } + } + } + +done: + if (ret) { + usb_mtp_queue_result(s, RES_STORE_FULL, d->trans, + 0, 0, 0, 0); + } else { + usb_mtp_queue_result(s, RES_OK, d->trans, + 0, 0, 0, 0); + } +close: + /* + * The write dataset is kept around and freed only + * on success or if another write request comes in + */ + if (d->fd != -1) { + close(d->fd); + d->fd = -1; + } + g_free(s->dataset.filename); + s->dataset.size = 0; + g_free(path); + s->write_pending = false; +} + +static void usb_mtp_write_metadata(MTPState *s, uint64_t dlen) +{ + MTPData *d = s->data_out; + ObjectInfo *dataset = (ObjectInfo *)d->data; + char *filename; + MTPObject *o; + MTPObject *p = usb_mtp_object_lookup(s, s->dataset.parent_handle); + uint32_t next_handle = s->next_handle; + size_t filename_chars = dlen - offsetof(ObjectInfo, filename); + + /* + * filename is utf-16. We're intentionally doing + * integer division to truncate if malicious guest + * sent an odd number of bytes. + */ + filename_chars /= 2; + + assert(!s->write_pending); + assert(p != NULL); + + filename = utf16_to_str(MIN(dataset->length, filename_chars), + dataset->filename); + + if (strchr(filename, '/')) { + usb_mtp_queue_result(s, RES_PARAMETER_NOT_SUPPORTED, d->trans, + 0, 0, 0, 0); + g_free(filename); + return; + } + + o = usb_mtp_object_lookup_name(p, filename, -1); + if (o != NULL) { + next_handle = o->handle; + } + + s->dataset.filename = filename; + s->dataset.format = dataset->format; + s->dataset.size = dataset->size; + s->write_pending = true; + + if (s->dataset.format == FMT_ASSOCIATION) { + usb_mtp_write_data(s, next_handle); + } else { + usb_mtp_queue_result(s, RES_OK, d->trans, 3, QEMU_STORAGE_ID, + s->dataset.parent_handle, next_handle); + } +} + +static void usb_mtp_get_data(MTPState *s, mtp_container *container, + USBPacket *p) +{ + MTPData *d = s->data_out; + uint64_t dlen; + uint32_t data_len = p->iov.size; + uint64_t total_len; + + if (!d) { + usb_mtp_queue_result(s, RES_INVALID_OBJECTINFO, 0, + 0, 0, 0, 0); + return; + } + if (d->first) { + /* Total length of incoming data */ + total_len = cpu_to_le32(container->length) - sizeof(mtp_container); + /* Length of data in this packet */ + data_len -= sizeof(mtp_container); + if (total_len < MTP_WRITE_BUF_SZ) { + usb_mtp_realloc(d, total_len); + d->length += total_len; + } else { + usb_mtp_realloc(d, MTP_WRITE_BUF_SZ - sizeof(mtp_container)); + d->length += MTP_WRITE_BUF_SZ - sizeof(mtp_container); + } + d->offset = 0; + d->first = false; + d->pending = false; + d->data_offset = 0; + d->write_status = WRITE_START; + } + + if (d->pending) { + memset(d->data, 0, d->length); + if (d->length != MTP_WRITE_BUF_SZ) { + usb_mtp_realloc(d, MTP_WRITE_BUF_SZ - d->length); + d->length += (MTP_WRITE_BUF_SZ - d->length); + } + d->pending = false; + d->write_status = WRITE_CONTINUE; + d->data_offset = 0; + } + + if (d->length - d->data_offset > data_len) { + dlen = data_len; + } else { + dlen = d->length - d->data_offset; + } + + switch (d->code) { + case CMD_SEND_OBJECT_INFO: + usb_packet_copy(p, d->data + d->data_offset, dlen); + d->offset += dlen; + d->data_offset += dlen; + if (d->data_offset == d->length) { + /* The operation might have already failed */ + if (!s->result) { + usb_mtp_write_metadata(s, dlen); + } + usb_mtp_data_free(s->data_out); + s->data_out = NULL; + return; + } + break; + case CMD_SEND_OBJECT: + usb_packet_copy(p, d->data + d->data_offset, dlen); + d->offset += dlen; + d->data_offset += dlen; + if ((p->iov.size % 64) || !p->iov.size) { + assert((s->dataset.size == 0xFFFFFFFF) || + (s->dataset.size == d->offset)); + + if (d->length == MTP_WRITE_BUF_SZ) { + d->write_status = WRITE_END; + } else { + d->write_status = WRITE_START; + } + usb_mtp_write_data(s, 0); + usb_mtp_data_free(s->data_out); + s->data_out = NULL; + return; + } + if (d->data_offset == d->length) { + d->pending = true; + usb_mtp_write_data(s, 0); + } + break; + default: + p->status = USB_RET_STALL; + return; + } +} + +static void usb_mtp_handle_data(USBDevice *dev, USBPacket *p) +{ + MTPState *s = USB_MTP(dev); + MTPControl cmd; + mtp_container container; + uint32_t params[5]; + uint16_t container_type; + int i, rc; + + switch (p->ep->nr) { + case EP_DATA_IN: + if (s->data_out != NULL) { + /* guest bug */ + trace_usb_mtp_stall(s->dev.addr, "awaiting data-out"); + p->status = USB_RET_STALL; + return; + } + if (p->iov.size < sizeof(container)) { + trace_usb_mtp_stall(s->dev.addr, "packet too small"); + p->status = USB_RET_STALL; + return; + } + if (s->data_in != NULL) { + MTPData *d = s->data_in; + uint64_t dlen = d->length - d->offset; + if (d->first) { + trace_usb_mtp_data_in(s->dev.addr, d->trans, d->length); + if (d->length + sizeof(container) > 0xFFFFFFFF) { + container.length = cpu_to_le32(0xFFFFFFFF); + } else { + container.length = + cpu_to_le32(d->length + sizeof(container)); + } + container.type = cpu_to_le16(TYPE_DATA); + container.code = cpu_to_le16(d->code); + container.trans = cpu_to_le32(d->trans); + usb_packet_copy(p, &container, sizeof(container)); + d->first = false; + if (dlen > p->iov.size - sizeof(container)) { + dlen = p->iov.size - sizeof(container); + } + } else { + if (dlen > p->iov.size) { + dlen = p->iov.size; + } + } + if (d->fd == -1) { + usb_packet_copy(p, d->data + d->offset, dlen); + } else { + if (d->alloc < p->iov.size) { + d->alloc = p->iov.size; + d->data = g_realloc(d->data, d->alloc); + } + rc = read(d->fd, d->data, dlen); + if (rc != dlen) { + memset(d->data, 0, dlen); + s->result->code = RES_INCOMPLETE_TRANSFER; + } + usb_packet_copy(p, d->data, dlen); + } + d->offset += dlen; + if (d->offset == d->length) { + usb_mtp_data_free(s->data_in); + s->data_in = NULL; + } + } else if (s->result != NULL) { + MTPControl *r = s->result; + int length = sizeof(container) + r->argc * sizeof(uint32_t); + if (r->code == RES_OK) { + trace_usb_mtp_success(s->dev.addr, r->trans, + (r->argc > 0) ? r->argv[0] : 0, + (r->argc > 1) ? r->argv[1] : 0); + } else { + trace_usb_mtp_error(s->dev.addr, r->code, r->trans, + (r->argc > 0) ? r->argv[0] : 0, + (r->argc > 1) ? r->argv[1] : 0); + } + container.length = cpu_to_le32(length); + container.type = cpu_to_le16(TYPE_RESPONSE); + container.code = cpu_to_le16(r->code); + container.trans = cpu_to_le32(r->trans); + for (i = 0; i < r->argc; i++) { + params[i] = cpu_to_le32(r->argv[i]); + } + usb_packet_copy(p, &container, sizeof(container)); + usb_packet_copy(p, ¶ms, length - sizeof(container)); + g_free(s->result); + s->result = NULL; + } + break; + case EP_DATA_OUT: + if (p->iov.size < sizeof(container)) { + trace_usb_mtp_stall(s->dev.addr, "packet too small"); + p->status = USB_RET_STALL; + return; + } + if ((s->data_out != NULL) && !s->data_out->first) { + container_type = TYPE_DATA; + } else { + usb_packet_copy(p, &container, sizeof(container)); + container_type = le16_to_cpu(container.type); + } + switch (container_type) { + case TYPE_COMMAND: + if (s->data_in || s->data_out || s->result) { + trace_usb_mtp_stall(s->dev.addr, "transaction inflight"); + p->status = USB_RET_STALL; + return; + } + cmd.code = le16_to_cpu(container.code); + cmd.argc = (le32_to_cpu(container.length) - sizeof(container)) + / sizeof(uint32_t); + cmd.trans = le32_to_cpu(container.trans); + if (cmd.argc > ARRAY_SIZE(cmd.argv)) { + cmd.argc = ARRAY_SIZE(cmd.argv); + } + if (p->iov.size < sizeof(container) + cmd.argc * sizeof(uint32_t)) { + trace_usb_mtp_stall(s->dev.addr, "packet too small"); + p->status = USB_RET_STALL; + return; + } + usb_packet_copy(p, ¶ms, cmd.argc * sizeof(uint32_t)); + for (i = 0; i < cmd.argc; i++) { + cmd.argv[i] = le32_to_cpu(params[i]); + } + trace_usb_mtp_command(s->dev.addr, cmd.code, cmd.trans, + (cmd.argc > 0) ? cmd.argv[0] : 0, + (cmd.argc > 1) ? cmd.argv[1] : 0, + (cmd.argc > 2) ? cmd.argv[2] : 0, + (cmd.argc > 3) ? cmd.argv[3] : 0, + (cmd.argc > 4) ? cmd.argv[4] : 0); + usb_mtp_command(s, &cmd); + break; + case TYPE_DATA: + /* One of the previous transfers has already errored but the + * responder is still sending data associated with it + */ + if (s->result != NULL) { + return; + } + usb_mtp_get_data(s, &container, p); + break; + default: + /* not needed as long as the mtp device is read-only */ + p->status = USB_RET_STALL; + return; + } + break; + case EP_EVENT: + if (!QTAILQ_EMPTY(&s->events)) { + struct MTPMonEntry *e = QTAILQ_LAST(&s->events); + uint32_t handle; + int len = sizeof(container) + sizeof(uint32_t); + + if (p->iov.size < len) { + trace_usb_mtp_stall(s->dev.addr, + "packet too small to send event"); + p->status = USB_RET_STALL; + return; + } + + QTAILQ_REMOVE(&s->events, e, next); + container.length = cpu_to_le32(len); + container.type = cpu_to_le32(TYPE_EVENT); + container.code = cpu_to_le16(e->event); + container.trans = 0; /* no trans specific events */ + handle = cpu_to_le32(e->handle); + usb_packet_copy(p, &container, sizeof(container)); + usb_packet_copy(p, &handle, sizeof(uint32_t)); + g_free(e); + return; + } + p->status = USB_RET_NAK; + return; + default: + trace_usb_mtp_stall(s->dev.addr, "invalid endpoint"); + p->status = USB_RET_STALL; + return; + } + + if (p->actual_length == 0) { + trace_usb_mtp_nak(s->dev.addr, p->ep->nr); + p->status = USB_RET_NAK; + return; + } else { + trace_usb_mtp_xfer(s->dev.addr, p->ep->nr, p->actual_length, + p->iov.size); + return; + } +} + +static void usb_mtp_realize(USBDevice *dev, Error **errp) +{ + MTPState *s = USB_MTP(dev); + + if ((s->root == NULL) || !g_path_is_absolute(s->root)) { + error_setg(errp, "usb-mtp: rootdir must be configured and be an absolute path"); + return; + } + + if (access(s->root, R_OK) != 0) { + error_setg(errp, "usb-mtp: rootdir does not exist/not readable"); + return; + } else if (!s->readonly && access(s->root, W_OK) != 0) { + error_setg(errp, "usb-mtp: rootdir does not have write permissions"); + return; + } + + /* Mark store as RW */ + if (!s->readonly) { + s->flags |= (1 << MTP_FLAG_WRITABLE); + } + + if (s->desc == NULL) { + /* + * This does not check if path exists + * but we have the checks above + */ + s->desc = g_path_get_basename(s->root); + } + + usb_desc_create_serial(dev); + usb_desc_init(dev); + QTAILQ_INIT(&s->objects); + +} + +static const VMStateDescription vmstate_usb_mtp = { + .name = "usb-mtp", + .unmigratable = 1, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_USB_DEVICE(dev, MTPState), + VMSTATE_END_OF_LIST() + } +}; + +static Property mtp_properties[] = { + DEFINE_PROP_STRING("rootdir", MTPState, root), + DEFINE_PROP_STRING("desc", MTPState, desc), + DEFINE_PROP_BOOL("readonly", MTPState, readonly, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_mtp_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_mtp_realize; + uc->product_desc = "QEMU USB MTP"; + uc->usb_desc = &desc; + uc->cancel_packet = usb_mtp_cancel_packet; + uc->handle_attach = usb_desc_attach; + uc->handle_reset = usb_mtp_handle_reset; + uc->handle_control = usb_mtp_handle_control; + uc->handle_data = usb_mtp_handle_data; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->desc = "USB Media Transfer Protocol device"; + dc->fw_name = "mtp"; + dc->vmsd = &vmstate_usb_mtp; + device_class_set_props(dc, mtp_properties); +} + +static TypeInfo mtp_info = { + .name = TYPE_USB_MTP, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(MTPState), + .class_init = usb_mtp_class_initfn, +}; + +static void usb_mtp_register_types(void) +{ + type_register_static(&mtp_info); +} + +type_init(usb_mtp_register_types) diff --git a/hw/usb/dev-network.c b/hw/usb/dev-network.c new file mode 100644 index 000000000..6c49c1601 --- /dev/null +++ b/hw/usb/dev-network.c @@ -0,0 +1,1429 @@ +/* + * QEMU USB Net devices + * + * Copyright (c) 2006 Thomas Sailer + * Copyright (c) 2008 Andrzej Zaborowski + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "desc.h" +#include "net/net.h" +#include "qemu/error-report.h" +#include "qemu/queue.h" +#include "qemu/config-file.h" +#include "sysemu/sysemu.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "qemu/cutils.h" +#include "qom/object.h" + +/*#define TRAFFIC_DEBUG*/ +/* Thanks to NetChip Technologies for donating this product ID. + * It's for devices with only CDC Ethernet configurations. + */ +#define CDC_VENDOR_NUM 0x0525 /* NetChip */ +#define CDC_PRODUCT_NUM 0xa4a1 /* Linux-USB Ethernet Gadget */ +/* For hardware that can talk RNDIS and either of the above protocols, + * use this ID ... the windows INF files will know it. + */ +#define RNDIS_VENDOR_NUM 0x0525 /* NetChip */ +#define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */ + +enum usbstring_idx { + STRING_MANUFACTURER = 1, + STRING_PRODUCT, + STRING_ETHADDR, + STRING_DATA, + STRING_CONTROL, + STRING_RNDIS_CONTROL, + STRING_CDC, + STRING_SUBSET, + STRING_RNDIS, + STRING_SERIALNUMBER, +}; + +#define DEV_CONFIG_VALUE 1 /* CDC or a subset */ +#define DEV_RNDIS_CONFIG_VALUE 2 /* RNDIS; optional */ + +#define USB_CDC_SUBCLASS_ACM 0x02 +#define USB_CDC_SUBCLASS_ETHERNET 0x06 + +#define USB_CDC_PROTO_NONE 0 +#define USB_CDC_ACM_PROTO_VENDOR 0xff + +#define USB_CDC_HEADER_TYPE 0x00 /* header_desc */ +#define USB_CDC_CALL_MANAGEMENT_TYPE 0x01 /* call_mgmt_descriptor */ +#define USB_CDC_ACM_TYPE 0x02 /* acm_descriptor */ +#define USB_CDC_UNION_TYPE 0x06 /* union_desc */ +#define USB_CDC_ETHERNET_TYPE 0x0f /* ether_desc */ + +#define USB_CDC_SEND_ENCAPSULATED_COMMAND 0x00 +#define USB_CDC_GET_ENCAPSULATED_RESPONSE 0x01 +#define USB_CDC_REQ_SET_LINE_CODING 0x20 +#define USB_CDC_REQ_GET_LINE_CODING 0x21 +#define USB_CDC_REQ_SET_CONTROL_LINE_STATE 0x22 +#define USB_CDC_REQ_SEND_BREAK 0x23 +#define USB_CDC_SET_ETHERNET_MULTICAST_FILTERS 0x40 +#define USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER 0x41 +#define USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER 0x42 +#define USB_CDC_SET_ETHERNET_PACKET_FILTER 0x43 +#define USB_CDC_GET_ETHERNET_STATISTIC 0x44 + +#define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ +#define STATUS_BYTECOUNT 16 /* 8 byte header + data */ + +#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */ + +static const USBDescStrings usb_net_stringtable = { + [STRING_MANUFACTURER] = "QEMU", + [STRING_PRODUCT] = "RNDIS/QEMU USB Network Device", + [STRING_ETHADDR] = "400102030405", + [STRING_DATA] = "QEMU USB Net Data Interface", + [STRING_CONTROL] = "QEMU USB Net Control Interface", + [STRING_RNDIS_CONTROL] = "QEMU USB Net RNDIS Control Interface", + [STRING_CDC] = "QEMU USB Net CDC", + [STRING_SUBSET] = "QEMU USB Net Subset", + [STRING_RNDIS] = "QEMU USB Net RNDIS", + [STRING_SERIALNUMBER] = "1", +}; + +static const USBDescIface desc_iface_rndis[] = { + { + /* RNDIS Control Interface */ + .bInterfaceNumber = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM, + .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR, + .iInterface = STRING_RNDIS_CONTROL, + .ndesc = 4, + .descs = (USBDescOther[]) { + { + /* Header Descriptor */ + .data = (uint8_t[]) { + 0x05, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + USB_CDC_HEADER_TYPE, /* u8 bDescriptorSubType */ + 0x10, 0x01, /* le16 bcdCDC */ + }, + },{ + /* Call Management Descriptor */ + .data = (uint8_t[]) { + 0x05, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + USB_CDC_CALL_MANAGEMENT_TYPE, /* u8 bDescriptorSubType */ + 0x00, /* u8 bmCapabilities */ + 0x01, /* u8 bDataInterface */ + }, + },{ + /* ACM Descriptor */ + .data = (uint8_t[]) { + 0x04, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + USB_CDC_ACM_TYPE, /* u8 bDescriptorSubType */ + 0x00, /* u8 bmCapabilities */ + }, + },{ + /* Union Descriptor */ + .data = (uint8_t[]) { + 0x05, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + USB_CDC_UNION_TYPE, /* u8 bDescriptorSubType */ + 0x00, /* u8 bMasterInterface0 */ + 0x01, /* u8 bSlaveInterface0 */ + }, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = STATUS_BYTECOUNT, + .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC, + }, + } + },{ + /* RNDIS Data Interface */ + .bInterfaceNumber = 1, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_CDC_DATA, + .iInterface = STRING_DATA, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x02, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 0x40, + },{ + .bEndpointAddress = USB_DIR_OUT | 0x02, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 0x40, + } + } + } +}; + +static const USBDescIface desc_iface_cdc[] = { + { + /* CDC Control Interface */ + .bInterfaceNumber = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .iInterface = STRING_CONTROL, + .ndesc = 3, + .descs = (USBDescOther[]) { + { + /* Header Descriptor */ + .data = (uint8_t[]) { + 0x05, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + USB_CDC_HEADER_TYPE, /* u8 bDescriptorSubType */ + 0x10, 0x01, /* le16 bcdCDC */ + }, + },{ + /* Union Descriptor */ + .data = (uint8_t[]) { + 0x05, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + USB_CDC_UNION_TYPE, /* u8 bDescriptorSubType */ + 0x00, /* u8 bMasterInterface0 */ + 0x01, /* u8 bSlaveInterface0 */ + }, + },{ + /* Ethernet Descriptor */ + .data = (uint8_t[]) { + 0x0d, /* u8 bLength */ + USB_DT_CS_INTERFACE, /* u8 bDescriptorType */ + USB_CDC_ETHERNET_TYPE, /* u8 bDescriptorSubType */ + STRING_ETHADDR, /* u8 iMACAddress */ + 0x00, 0x00, 0x00, 0x00, /* le32 bmEthernetStatistics */ + ETH_FRAME_LEN & 0xff, + ETH_FRAME_LEN >> 8, /* le16 wMaxSegmentSize */ + 0x00, 0x00, /* le16 wNumberMCFilters */ + 0x00, /* u8 bNumberPowerFilters */ + }, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = STATUS_BYTECOUNT, + .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC, + }, + } + },{ + /* CDC Data Interface (off) */ + .bInterfaceNumber = 1, + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_CDC_DATA, + },{ + /* CDC Data Interface */ + .bInterfaceNumber = 1, + .bAlternateSetting = 1, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_CDC_DATA, + .iInterface = STRING_DATA, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x02, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 0x40, + },{ + .bEndpointAddress = USB_DIR_OUT | 0x02, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 0x40, + } + } + } +}; + +static const USBDescDevice desc_device_net = { + .bcdUSB = 0x0200, + .bDeviceClass = USB_CLASS_COMM, + .bMaxPacketSize0 = 0x40, + .bNumConfigurations = 2, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 2, + .bConfigurationValue = DEV_RNDIS_CONFIG_VALUE, + .iConfiguration = STRING_RNDIS, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER, + .bMaxPower = 0x32, + .nif = ARRAY_SIZE(desc_iface_rndis), + .ifs = desc_iface_rndis, + },{ + .bNumInterfaces = 2, + .bConfigurationValue = DEV_CONFIG_VALUE, + .iConfiguration = STRING_CDC, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER, + .bMaxPower = 0x32, + .nif = ARRAY_SIZE(desc_iface_cdc), + .ifs = desc_iface_cdc, + } + }, +}; + +static const USBDesc desc_net = { + .id = { + .idVendor = RNDIS_VENDOR_NUM, + .idProduct = RNDIS_PRODUCT_NUM, + .bcdDevice = 0, + .iManufacturer = STRING_MANUFACTURER, + .iProduct = STRING_PRODUCT, + .iSerialNumber = STRING_SERIALNUMBER, + }, + .full = &desc_device_net, + .str = usb_net_stringtable, +}; + +/* + * RNDIS Definitions - in theory not specific to USB. + */ +#define RNDIS_MAXIMUM_FRAME_SIZE 1518 +#define RNDIS_MAX_TOTAL_SIZE 1558 + +/* Remote NDIS Versions */ +#define RNDIS_MAJOR_VERSION 1 +#define RNDIS_MINOR_VERSION 0 + +/* Status Values */ +#define RNDIS_STATUS_SUCCESS 0x00000000U /* Success */ +#define RNDIS_STATUS_FAILURE 0xc0000001U /* Unspecified error */ +#define RNDIS_STATUS_INVALID_DATA 0xc0010015U /* Invalid data */ +#define RNDIS_STATUS_NOT_SUPPORTED 0xc00000bbU /* Unsupported request */ +#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000bU /* Device connected */ +#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000cU /* Device disconnected */ + +/* Message Set for Connectionless (802.3) Devices */ +enum { + RNDIS_PACKET_MSG = 1, + RNDIS_INITIALIZE_MSG = 2, /* Initialize device */ + RNDIS_HALT_MSG = 3, + RNDIS_QUERY_MSG = 4, + RNDIS_SET_MSG = 5, + RNDIS_RESET_MSG = 6, + RNDIS_INDICATE_STATUS_MSG = 7, + RNDIS_KEEPALIVE_MSG = 8, +}; + +/* Message completion */ +enum { + RNDIS_INITIALIZE_CMPLT = 0x80000002U, + RNDIS_QUERY_CMPLT = 0x80000004U, + RNDIS_SET_CMPLT = 0x80000005U, + RNDIS_RESET_CMPLT = 0x80000006U, + RNDIS_KEEPALIVE_CMPLT = 0x80000008U, +}; + +/* Device Flags */ +enum { + RNDIS_DF_CONNECTIONLESS = 1, + RNDIS_DF_CONNECTIONORIENTED = 2, +}; + +#define RNDIS_MEDIUM_802_3 0x00000000U + +/* from drivers/net/sk98lin/h/skgepnmi.h */ +#define OID_PNP_CAPABILITIES 0xfd010100 +#define OID_PNP_SET_POWER 0xfd010101 +#define OID_PNP_QUERY_POWER 0xfd010102 +#define OID_PNP_ADD_WAKE_UP_PATTERN 0xfd010103 +#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xfd010104 +#define OID_PNP_ENABLE_WAKE_UP 0xfd010106 + +typedef uint32_t le32; + +typedef struct rndis_init_msg_type { + le32 MessageType; + le32 MessageLength; + le32 RequestID; + le32 MajorVersion; + le32 MinorVersion; + le32 MaxTransferSize; +} rndis_init_msg_type; + +typedef struct rndis_init_cmplt_type { + le32 MessageType; + le32 MessageLength; + le32 RequestID; + le32 Status; + le32 MajorVersion; + le32 MinorVersion; + le32 DeviceFlags; + le32 Medium; + le32 MaxPacketsPerTransfer; + le32 MaxTransferSize; + le32 PacketAlignmentFactor; + le32 AFListOffset; + le32 AFListSize; +} rndis_init_cmplt_type; + +typedef struct rndis_halt_msg_type { + le32 MessageType; + le32 MessageLength; + le32 RequestID; +} rndis_halt_msg_type; + +typedef struct rndis_query_msg_type { + le32 MessageType; + le32 MessageLength; + le32 RequestID; + le32 OID; + le32 InformationBufferLength; + le32 InformationBufferOffset; + le32 DeviceVcHandle; +} rndis_query_msg_type; + +typedef struct rndis_query_cmplt_type { + le32 MessageType; + le32 MessageLength; + le32 RequestID; + le32 Status; + le32 InformationBufferLength; + le32 InformationBufferOffset; +} rndis_query_cmplt_type; + +typedef struct rndis_set_msg_type { + le32 MessageType; + le32 MessageLength; + le32 RequestID; + le32 OID; + le32 InformationBufferLength; + le32 InformationBufferOffset; + le32 DeviceVcHandle; +} rndis_set_msg_type; + +typedef struct rndis_set_cmplt_type { + le32 MessageType; + le32 MessageLength; + le32 RequestID; + le32 Status; +} rndis_set_cmplt_type; + +typedef struct rndis_reset_msg_type { + le32 MessageType; + le32 MessageLength; + le32 Reserved; +} rndis_reset_msg_type; + +typedef struct rndis_reset_cmplt_type { + le32 MessageType; + le32 MessageLength; + le32 Status; + le32 AddressingReset; +} rndis_reset_cmplt_type; + +typedef struct rndis_indicate_status_msg_type { + le32 MessageType; + le32 MessageLength; + le32 Status; + le32 StatusBufferLength; + le32 StatusBufferOffset; +} rndis_indicate_status_msg_type; + +typedef struct rndis_keepalive_msg_type { + le32 MessageType; + le32 MessageLength; + le32 RequestID; +} rndis_keepalive_msg_type; + +typedef struct rndis_keepalive_cmplt_type { + le32 MessageType; + le32 MessageLength; + le32 RequestID; + le32 Status; +} rndis_keepalive_cmplt_type; + +struct rndis_packet_msg_type { + le32 MessageType; + le32 MessageLength; + le32 DataOffset; + le32 DataLength; + le32 OOBDataOffset; + le32 OOBDataLength; + le32 NumOOBDataElements; + le32 PerPacketInfoOffset; + le32 PerPacketInfoLength; + le32 VcHandle; + le32 Reserved; +}; + +struct rndis_config_parameter { + le32 ParameterNameOffset; + le32 ParameterNameLength; + le32 ParameterType; + le32 ParameterValueOffset; + le32 ParameterValueLength; +}; + +/* implementation specific */ +enum rndis_state +{ + RNDIS_UNINITIALIZED, + RNDIS_INITIALIZED, + RNDIS_DATA_INITIALIZED, +}; + +/* from ndis.h */ +enum ndis_oid { + /* Required Object IDs (OIDs) */ + OID_GEN_SUPPORTED_LIST = 0x00010101, + OID_GEN_HARDWARE_STATUS = 0x00010102, + OID_GEN_MEDIA_SUPPORTED = 0x00010103, + OID_GEN_MEDIA_IN_USE = 0x00010104, + OID_GEN_MAXIMUM_LOOKAHEAD = 0x00010105, + OID_GEN_MAXIMUM_FRAME_SIZE = 0x00010106, + OID_GEN_LINK_SPEED = 0x00010107, + OID_GEN_TRANSMIT_BUFFER_SPACE = 0x00010108, + OID_GEN_RECEIVE_BUFFER_SPACE = 0x00010109, + OID_GEN_TRANSMIT_BLOCK_SIZE = 0x0001010a, + OID_GEN_RECEIVE_BLOCK_SIZE = 0x0001010b, + OID_GEN_VENDOR_ID = 0x0001010c, + OID_GEN_VENDOR_DESCRIPTION = 0x0001010d, + OID_GEN_CURRENT_PACKET_FILTER = 0x0001010e, + OID_GEN_CURRENT_LOOKAHEAD = 0x0001010f, + OID_GEN_DRIVER_VERSION = 0x00010110, + OID_GEN_MAXIMUM_TOTAL_SIZE = 0x00010111, + OID_GEN_PROTOCOL_OPTIONS = 0x00010112, + OID_GEN_MAC_OPTIONS = 0x00010113, + OID_GEN_MEDIA_CONNECT_STATUS = 0x00010114, + OID_GEN_MAXIMUM_SEND_PACKETS = 0x00010115, + OID_GEN_VENDOR_DRIVER_VERSION = 0x00010116, + OID_GEN_SUPPORTED_GUIDS = 0x00010117, + OID_GEN_NETWORK_LAYER_ADDRESSES = 0x00010118, + OID_GEN_TRANSPORT_HEADER_OFFSET = 0x00010119, + OID_GEN_MACHINE_NAME = 0x0001021a, + OID_GEN_RNDIS_CONFIG_PARAMETER = 0x0001021b, + OID_GEN_VLAN_ID = 0x0001021c, + + /* Optional OIDs */ + OID_GEN_MEDIA_CAPABILITIES = 0x00010201, + OID_GEN_PHYSICAL_MEDIUM = 0x00010202, + + /* Required statistics OIDs */ + OID_GEN_XMIT_OK = 0x00020101, + OID_GEN_RCV_OK = 0x00020102, + OID_GEN_XMIT_ERROR = 0x00020103, + OID_GEN_RCV_ERROR = 0x00020104, + OID_GEN_RCV_NO_BUFFER = 0x00020105, + + /* Optional statistics OIDs */ + OID_GEN_DIRECTED_BYTES_XMIT = 0x00020201, + OID_GEN_DIRECTED_FRAMES_XMIT = 0x00020202, + OID_GEN_MULTICAST_BYTES_XMIT = 0x00020203, + OID_GEN_MULTICAST_FRAMES_XMIT = 0x00020204, + OID_GEN_BROADCAST_BYTES_XMIT = 0x00020205, + OID_GEN_BROADCAST_FRAMES_XMIT = 0x00020206, + OID_GEN_DIRECTED_BYTES_RCV = 0x00020207, + OID_GEN_DIRECTED_FRAMES_RCV = 0x00020208, + OID_GEN_MULTICAST_BYTES_RCV = 0x00020209, + OID_GEN_MULTICAST_FRAMES_RCV = 0x0002020a, + OID_GEN_BROADCAST_BYTES_RCV = 0x0002020b, + OID_GEN_BROADCAST_FRAMES_RCV = 0x0002020c, + OID_GEN_RCV_CRC_ERROR = 0x0002020d, + OID_GEN_TRANSMIT_QUEUE_LENGTH = 0x0002020e, + OID_GEN_GET_TIME_CAPS = 0x0002020f, + OID_GEN_GET_NETCARD_TIME = 0x00020210, + OID_GEN_NETCARD_LOAD = 0x00020211, + OID_GEN_DEVICE_PROFILE = 0x00020212, + OID_GEN_INIT_TIME_MS = 0x00020213, + OID_GEN_RESET_COUNTS = 0x00020214, + OID_GEN_MEDIA_SENSE_COUNTS = 0x00020215, + OID_GEN_FRIENDLY_NAME = 0x00020216, + OID_GEN_MINIPORT_INFO = 0x00020217, + OID_GEN_RESET_VERIFY_PARAMETERS = 0x00020218, + + /* IEEE 802.3 (Ethernet) OIDs */ + OID_802_3_PERMANENT_ADDRESS = 0x01010101, + OID_802_3_CURRENT_ADDRESS = 0x01010102, + OID_802_3_MULTICAST_LIST = 0x01010103, + OID_802_3_MAXIMUM_LIST_SIZE = 0x01010104, + OID_802_3_MAC_OPTIONS = 0x01010105, + OID_802_3_RCV_ERROR_ALIGNMENT = 0x01020101, + OID_802_3_XMIT_ONE_COLLISION = 0x01020102, + OID_802_3_XMIT_MORE_COLLISIONS = 0x01020103, + OID_802_3_XMIT_DEFERRED = 0x01020201, + OID_802_3_XMIT_MAX_COLLISIONS = 0x01020202, + OID_802_3_RCV_OVERRUN = 0x01020203, + OID_802_3_XMIT_UNDERRUN = 0x01020204, + OID_802_3_XMIT_HEARTBEAT_FAILURE = 0x01020205, + OID_802_3_XMIT_TIMES_CRS_LOST = 0x01020206, + OID_802_3_XMIT_LATE_COLLISIONS = 0x01020207, +}; + +static const uint32_t oid_supported_list[] = +{ + /* the general stuff */ + OID_GEN_SUPPORTED_LIST, + OID_GEN_HARDWARE_STATUS, + OID_GEN_MEDIA_SUPPORTED, + OID_GEN_MEDIA_IN_USE, + OID_GEN_MAXIMUM_FRAME_SIZE, + OID_GEN_LINK_SPEED, + OID_GEN_TRANSMIT_BLOCK_SIZE, + OID_GEN_RECEIVE_BLOCK_SIZE, + OID_GEN_VENDOR_ID, + OID_GEN_VENDOR_DESCRIPTION, + OID_GEN_VENDOR_DRIVER_VERSION, + OID_GEN_CURRENT_PACKET_FILTER, + OID_GEN_MAXIMUM_TOTAL_SIZE, + OID_GEN_MEDIA_CONNECT_STATUS, + OID_GEN_PHYSICAL_MEDIUM, + + /* the statistical stuff */ + OID_GEN_XMIT_OK, + OID_GEN_RCV_OK, + OID_GEN_XMIT_ERROR, + OID_GEN_RCV_ERROR, + OID_GEN_RCV_NO_BUFFER, + + /* IEEE 802.3 */ + /* the general stuff */ + OID_802_3_PERMANENT_ADDRESS, + OID_802_3_CURRENT_ADDRESS, + OID_802_3_MULTICAST_LIST, + OID_802_3_MAC_OPTIONS, + OID_802_3_MAXIMUM_LIST_SIZE, + + /* the statistical stuff */ + OID_802_3_RCV_ERROR_ALIGNMENT, + OID_802_3_XMIT_ONE_COLLISION, + OID_802_3_XMIT_MORE_COLLISIONS, +}; + +#define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA (1 << 0) +#define NDIS_MAC_OPTION_RECEIVE_SERIALIZED (1 << 1) +#define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND (1 << 2) +#define NDIS_MAC_OPTION_NO_LOOPBACK (1 << 3) +#define NDIS_MAC_OPTION_FULL_DUPLEX (1 << 4) +#define NDIS_MAC_OPTION_EOTX_INDICATION (1 << 5) +#define NDIS_MAC_OPTION_8021P_PRIORITY (1 << 6) + +struct rndis_response { + QTAILQ_ENTRY(rndis_response) entries; + uint32_t length; + uint8_t buf[]; +}; + +struct USBNetState { + USBDevice dev; + + enum rndis_state rndis_state; + uint32_t medium; + uint32_t speed; + uint32_t media_state; + uint16_t filter; + uint32_t vendorid; + + unsigned int out_ptr; + uint8_t out_buf[2048]; + + unsigned int in_ptr, in_len; + uint8_t in_buf[2048]; + + USBEndpoint *intr; + + char usbstring_mac[13]; + NICState *nic; + NICConf conf; + QTAILQ_HEAD(, rndis_response) rndis_resp; +}; + +#define TYPE_USB_NET "usb-net" +OBJECT_DECLARE_SIMPLE_TYPE(USBNetState, USB_NET) + +static int is_rndis(USBNetState *s) +{ + return s->dev.config ? + s->dev.config->bConfigurationValue == DEV_RNDIS_CONFIG_VALUE : 0; +} + +static int ndis_query(USBNetState *s, uint32_t oid, + uint8_t *inbuf, unsigned int inlen, uint8_t *outbuf, + size_t outlen) +{ + unsigned int i; + + switch (oid) { + /* general oids (table 4-1) */ + /* mandatory */ + case OID_GEN_SUPPORTED_LIST: + for (i = 0; i < ARRAY_SIZE(oid_supported_list); i++) { + stl_le_p(outbuf + (i * sizeof(le32)), oid_supported_list[i]); + } + return sizeof(oid_supported_list); + + /* mandatory */ + case OID_GEN_HARDWARE_STATUS: + stl_le_p(outbuf, 0); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_MEDIA_SUPPORTED: + stl_le_p(outbuf, s->medium); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_MEDIA_IN_USE: + stl_le_p(outbuf, s->medium); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_MAXIMUM_FRAME_SIZE: + stl_le_p(outbuf, ETH_FRAME_LEN); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_LINK_SPEED: + stl_le_p(outbuf, s->speed); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_TRANSMIT_BLOCK_SIZE: + stl_le_p(outbuf, ETH_FRAME_LEN); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_RECEIVE_BLOCK_SIZE: + stl_le_p(outbuf, ETH_FRAME_LEN); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_VENDOR_ID: + stl_le_p(outbuf, s->vendorid); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_VENDOR_DESCRIPTION: + pstrcpy((char *)outbuf, outlen, "QEMU USB RNDIS Net"); + return strlen((char *)outbuf) + 1; + + case OID_GEN_VENDOR_DRIVER_VERSION: + stl_le_p(outbuf, 1); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_CURRENT_PACKET_FILTER: + stl_le_p(outbuf, s->filter); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_MAXIMUM_TOTAL_SIZE: + stl_le_p(outbuf, RNDIS_MAX_TOTAL_SIZE); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_MEDIA_CONNECT_STATUS: + stl_le_p(outbuf, s->media_state); + return sizeof(le32); + + case OID_GEN_PHYSICAL_MEDIUM: + stl_le_p(outbuf, 0); + return sizeof(le32); + + case OID_GEN_MAC_OPTIONS: + stl_le_p(outbuf, NDIS_MAC_OPTION_RECEIVE_SERIALIZED | + NDIS_MAC_OPTION_FULL_DUPLEX); + return sizeof(le32); + + /* statistics OIDs (table 4-2) */ + /* mandatory */ + case OID_GEN_XMIT_OK: + stl_le_p(outbuf, 0); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_RCV_OK: + stl_le_p(outbuf, 0); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_XMIT_ERROR: + stl_le_p(outbuf, 0); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_RCV_ERROR: + stl_le_p(outbuf, 0); + return sizeof(le32); + + /* mandatory */ + case OID_GEN_RCV_NO_BUFFER: + stl_le_p(outbuf, 0); + return sizeof(le32); + + /* ieee802.3 OIDs (table 4-3) */ + /* mandatory */ + case OID_802_3_PERMANENT_ADDRESS: + memcpy(outbuf, s->conf.macaddr.a, 6); + return 6; + + /* mandatory */ + case OID_802_3_CURRENT_ADDRESS: + memcpy(outbuf, s->conf.macaddr.a, 6); + return 6; + + /* mandatory */ + case OID_802_3_MULTICAST_LIST: + stl_le_p(outbuf, 0xe0000000); + return sizeof(le32); + + /* mandatory */ + case OID_802_3_MAXIMUM_LIST_SIZE: + stl_le_p(outbuf, 1); + return sizeof(le32); + + case OID_802_3_MAC_OPTIONS: + return 0; + + /* ieee802.3 statistics OIDs (table 4-4) */ + /* mandatory */ + case OID_802_3_RCV_ERROR_ALIGNMENT: + stl_le_p(outbuf, 0); + return sizeof(le32); + + /* mandatory */ + case OID_802_3_XMIT_ONE_COLLISION: + stl_le_p(outbuf, 0); + return sizeof(le32); + + /* mandatory */ + case OID_802_3_XMIT_MORE_COLLISIONS: + stl_le_p(outbuf, 0); + return sizeof(le32); + + default: + fprintf(stderr, "usbnet: unknown OID 0x%08x\n", oid); + return 0; + } + return -1; +} + +static int ndis_set(USBNetState *s, uint32_t oid, + uint8_t *inbuf, unsigned int inlen) +{ + switch (oid) { + case OID_GEN_CURRENT_PACKET_FILTER: + s->filter = ldl_le_p(inbuf); + if (s->filter) { + s->rndis_state = RNDIS_DATA_INITIALIZED; + } else { + s->rndis_state = RNDIS_INITIALIZED; + } + return 0; + + case OID_802_3_MULTICAST_LIST: + return 0; + } + return -1; +} + +static int rndis_get_response(USBNetState *s, uint8_t *buf) +{ + int ret = 0; + struct rndis_response *r = s->rndis_resp.tqh_first; + + if (!r) + return ret; + + QTAILQ_REMOVE(&s->rndis_resp, r, entries); + ret = r->length; + memcpy(buf, r->buf, r->length); + g_free(r); + + return ret; +} + +static void *rndis_queue_response(USBNetState *s, unsigned int length) +{ + struct rndis_response *r = + g_malloc0(sizeof(struct rndis_response) + length); + + if (QTAILQ_EMPTY(&s->rndis_resp)) { + usb_wakeup(s->intr, 0); + } + + QTAILQ_INSERT_TAIL(&s->rndis_resp, r, entries); + r->length = length; + + return &r->buf[0]; +} + +static void rndis_clear_responsequeue(USBNetState *s) +{ + struct rndis_response *r; + + while ((r = s->rndis_resp.tqh_first)) { + QTAILQ_REMOVE(&s->rndis_resp, r, entries); + g_free(r); + } +} + +static int rndis_init_response(USBNetState *s, rndis_init_msg_type *buf) +{ + rndis_init_cmplt_type *resp = + rndis_queue_response(s, sizeof(rndis_init_cmplt_type)); + + if (!resp) + return USB_RET_STALL; + + resp->MessageType = cpu_to_le32(RNDIS_INITIALIZE_CMPLT); + resp->MessageLength = cpu_to_le32(sizeof(rndis_init_cmplt_type)); + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); + resp->MajorVersion = cpu_to_le32(RNDIS_MAJOR_VERSION); + resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION); + resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS); + resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3); + resp->MaxPacketsPerTransfer = cpu_to_le32(1); + resp->MaxTransferSize = cpu_to_le32(ETH_FRAME_LEN + + sizeof(struct rndis_packet_msg_type) + 22); + resp->PacketAlignmentFactor = cpu_to_le32(0); + resp->AFListOffset = cpu_to_le32(0); + resp->AFListSize = cpu_to_le32(0); + return 0; +} + +static int rndis_query_response(USBNetState *s, + rndis_query_msg_type *buf, unsigned int length) +{ + rndis_query_cmplt_type *resp; + /* oid_supported_list is the largest data reply */ + uint8_t infobuf[sizeof(oid_supported_list)]; + uint32_t bufoffs, buflen; + int infobuflen; + unsigned int resplen; + + bufoffs = le32_to_cpu(buf->InformationBufferOffset) + 8; + buflen = le32_to_cpu(buf->InformationBufferLength); + if (buflen > length || bufoffs >= length || bufoffs + buflen > length) { + return USB_RET_STALL; + } + + infobuflen = ndis_query(s, le32_to_cpu(buf->OID), + bufoffs + (uint8_t *) buf, buflen, infobuf, + sizeof(infobuf)); + resplen = sizeof(rndis_query_cmplt_type) + + ((infobuflen < 0) ? 0 : infobuflen); + resp = rndis_queue_response(s, resplen); + if (!resp) + return USB_RET_STALL; + + resp->MessageType = cpu_to_le32(RNDIS_QUERY_CMPLT); + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ + resp->MessageLength = cpu_to_le32(resplen); + + if (infobuflen < 0) { + /* OID not supported */ + resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); + resp->InformationBufferLength = cpu_to_le32(0); + resp->InformationBufferOffset = cpu_to_le32(0); + return 0; + } + + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); + resp->InformationBufferOffset = + cpu_to_le32(infobuflen ? sizeof(rndis_query_cmplt_type) - 8 : 0); + resp->InformationBufferLength = cpu_to_le32(infobuflen); + memcpy(resp + 1, infobuf, infobuflen); + + return 0; +} + +static int rndis_set_response(USBNetState *s, + rndis_set_msg_type *buf, unsigned int length) +{ + rndis_set_cmplt_type *resp = + rndis_queue_response(s, sizeof(rndis_set_cmplt_type)); + uint32_t bufoffs, buflen; + int ret; + + if (!resp) + return USB_RET_STALL; + + bufoffs = le32_to_cpu(buf->InformationBufferOffset) + 8; + buflen = le32_to_cpu(buf->InformationBufferLength); + if (buflen > length || bufoffs >= length || bufoffs + buflen > length) { + return USB_RET_STALL; + } + + ret = ndis_set(s, le32_to_cpu(buf->OID), + bufoffs + (uint8_t *) buf, buflen); + resp->MessageType = cpu_to_le32(RNDIS_SET_CMPLT); + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ + resp->MessageLength = cpu_to_le32(sizeof(rndis_set_cmplt_type)); + if (ret < 0) { + /* OID not supported */ + resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); + return 0; + } + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); + + return 0; +} + +static int rndis_reset_response(USBNetState *s, rndis_reset_msg_type *buf) +{ + rndis_reset_cmplt_type *resp = + rndis_queue_response(s, sizeof(rndis_reset_cmplt_type)); + + if (!resp) + return USB_RET_STALL; + + resp->MessageType = cpu_to_le32(RNDIS_RESET_CMPLT); + resp->MessageLength = cpu_to_le32(sizeof(rndis_reset_cmplt_type)); + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); + resp->AddressingReset = cpu_to_le32(1); /* reset information */ + + return 0; +} + +static int rndis_keepalive_response(USBNetState *s, + rndis_keepalive_msg_type *buf) +{ + rndis_keepalive_cmplt_type *resp = + rndis_queue_response(s, sizeof(rndis_keepalive_cmplt_type)); + + if (!resp) + return USB_RET_STALL; + + resp->MessageType = cpu_to_le32(RNDIS_KEEPALIVE_CMPLT); + resp->MessageLength = cpu_to_le32(sizeof(rndis_keepalive_cmplt_type)); + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); + + return 0; +} + +/* Prepare to receive the next packet */ +static void usb_net_reset_in_buf(USBNetState *s) +{ + s->in_ptr = s->in_len = 0; + qemu_flush_queued_packets(qemu_get_queue(s->nic)); +} + +static int rndis_parse(USBNetState *s, uint8_t *data, int length) +{ + uint32_t msg_type = ldl_le_p(data); + + switch (msg_type) { + case RNDIS_INITIALIZE_MSG: + s->rndis_state = RNDIS_INITIALIZED; + return rndis_init_response(s, (rndis_init_msg_type *) data); + + case RNDIS_HALT_MSG: + s->rndis_state = RNDIS_UNINITIALIZED; + return 0; + + case RNDIS_QUERY_MSG: + return rndis_query_response(s, (rndis_query_msg_type *) data, length); + + case RNDIS_SET_MSG: + return rndis_set_response(s, (rndis_set_msg_type *) data, length); + + case RNDIS_RESET_MSG: + rndis_clear_responsequeue(s); + s->out_ptr = 0; + usb_net_reset_in_buf(s); + return rndis_reset_response(s, (rndis_reset_msg_type *) data); + + case RNDIS_KEEPALIVE_MSG: + /* For USB: host does this every 5 seconds */ + return rndis_keepalive_response(s, (rndis_keepalive_msg_type *) data); + } + + return USB_RET_STALL; +} + +static void usb_net_handle_reset(USBDevice *dev) +{ +} + +static void usb_net_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) +{ + USBNetState *s = (USBNetState *) dev; + int ret; + + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); + if (ret >= 0) { + return; + } + + switch(request) { + case ClassInterfaceOutRequest | USB_CDC_SEND_ENCAPSULATED_COMMAND: + if (!is_rndis(s) || value || index != 0) { + goto fail; + } +#ifdef TRAFFIC_DEBUG + { + unsigned int i; + fprintf(stderr, "SEND_ENCAPSULATED_COMMAND:"); + for (i = 0; i < length; i++) { + if (!(i & 15)) + fprintf(stderr, "\n%04x:", i); + fprintf(stderr, " %02x", data[i]); + } + fprintf(stderr, "\n\n"); + } +#endif + ret = rndis_parse(s, data, length); + if (ret < 0) { + p->status = ret; + } + break; + + case ClassInterfaceRequest | USB_CDC_GET_ENCAPSULATED_RESPONSE: + if (!is_rndis(s) || value || index != 0) { + goto fail; + } + p->actual_length = rndis_get_response(s, data); + if (p->actual_length == 0) { + data[0] = 0; + p->actual_length = 1; + } +#ifdef TRAFFIC_DEBUG + { + unsigned int i; + fprintf(stderr, "GET_ENCAPSULATED_RESPONSE:"); + for (i = 0; i < p->actual_length; i++) { + if (!(i & 15)) + fprintf(stderr, "\n%04x:", i); + fprintf(stderr, " %02x", data[i]); + } + fprintf(stderr, "\n\n"); + } +#endif + break; + + default: + fail: + fprintf(stderr, "usbnet: failed control transaction: " + "request 0x%x value 0x%x index 0x%x length 0x%x\n", + request, value, index, length); + p->status = USB_RET_STALL; + break; + } +} + +static void usb_net_handle_statusin(USBNetState *s, USBPacket *p) +{ + le32 buf[2]; + + if (p->iov.size < 8) { + p->status = USB_RET_STALL; + return; + } + + buf[0] = cpu_to_le32(1); + buf[1] = cpu_to_le32(0); + usb_packet_copy(p, buf, 8); + if (!s->rndis_resp.tqh_first) { + p->status = USB_RET_NAK; + } + +#ifdef TRAFFIC_DEBUG + fprintf(stderr, "usbnet: interrupt poll len %zu return %d", + p->iov.size, p->status); + iov_hexdump(p->iov.iov, p->iov.niov, stderr, "usbnet", p->status); +#endif +} + +static void usb_net_handle_datain(USBNetState *s, USBPacket *p) +{ + int len; + + if (s->in_ptr > s->in_len) { + usb_net_reset_in_buf(s); + p->status = USB_RET_NAK; + return; + } + if (!s->in_len) { + p->status = USB_RET_NAK; + return; + } + len = s->in_len - s->in_ptr; + if (len > p->iov.size) { + len = p->iov.size; + } + usb_packet_copy(p, &s->in_buf[s->in_ptr], len); + s->in_ptr += len; + if (s->in_ptr >= s->in_len && + (is_rndis(s) || (s->in_len & (64 - 1)) || !len)) { + /* no short packet necessary */ + usb_net_reset_in_buf(s); + } + +#ifdef TRAFFIC_DEBUG + fprintf(stderr, "usbnet: data in len %zu return %d", p->iov.size, len); + iov_hexdump(p->iov.iov, p->iov.niov, stderr, "usbnet", len); +#endif +} + +static void usb_net_handle_dataout(USBNetState *s, USBPacket *p) +{ + int sz = sizeof(s->out_buf) - s->out_ptr; + struct rndis_packet_msg_type *msg = + (struct rndis_packet_msg_type *) s->out_buf; + uint32_t len; + +#ifdef TRAFFIC_DEBUG + fprintf(stderr, "usbnet: data out len %zu\n", p->iov.size); + iov_hexdump(p->iov.iov, p->iov.niov, stderr, "usbnet", p->iov.size); +#endif + + if (sz > p->iov.size) { + sz = p->iov.size; + } + usb_packet_copy(p, &s->out_buf[s->out_ptr], sz); + s->out_ptr += sz; + + if (!is_rndis(s)) { + if (p->iov.size < 64) { + qemu_send_packet(qemu_get_queue(s->nic), s->out_buf, s->out_ptr); + s->out_ptr = 0; + } + return; + } + len = le32_to_cpu(msg->MessageLength); + if (s->out_ptr < 8 || s->out_ptr < len) { + return; + } + if (le32_to_cpu(msg->MessageType) == RNDIS_PACKET_MSG) { + uint32_t offs = 8 + le32_to_cpu(msg->DataOffset); + uint32_t size = le32_to_cpu(msg->DataLength); + if (offs < len && size < len && offs + size <= len) { + qemu_send_packet(qemu_get_queue(s->nic), s->out_buf + offs, size); + } + } + s->out_ptr -= len; + memmove(s->out_buf, &s->out_buf[len], s->out_ptr); +} + +static void usb_net_handle_data(USBDevice *dev, USBPacket *p) +{ + USBNetState *s = (USBNetState *) dev; + + switch(p->pid) { + case USB_TOKEN_IN: + switch (p->ep->nr) { + case 1: + usb_net_handle_statusin(s, p); + break; + + case 2: + usb_net_handle_datain(s, p); + break; + + default: + goto fail; + } + break; + + case USB_TOKEN_OUT: + switch (p->ep->nr) { + case 2: + usb_net_handle_dataout(s, p); + break; + + default: + goto fail; + } + break; + + default: + fail: + p->status = USB_RET_STALL; + break; + } + + if (p->status == USB_RET_STALL) { + fprintf(stderr, "usbnet: failed data transaction: " + "pid 0x%x ep 0x%x len 0x%zx\n", + p->pid, p->ep->nr, p->iov.size); + } +} + +static ssize_t usbnet_receive(NetClientState *nc, const uint8_t *buf, size_t size) +{ + USBNetState *s = qemu_get_nic_opaque(nc); + uint8_t *in_buf = s->in_buf; + size_t total_size = size; + + if (!s->dev.config) { + return -1; + } + + if (is_rndis(s)) { + if (s->rndis_state != RNDIS_DATA_INITIALIZED) { + return -1; + } + total_size += sizeof(struct rndis_packet_msg_type); + } + if (total_size > sizeof(s->in_buf)) { + return -1; + } + + /* Only accept packet if input buffer is empty */ + if (s->in_len > 0) { + return 0; + } + + if (is_rndis(s)) { + struct rndis_packet_msg_type *msg; + + msg = (struct rndis_packet_msg_type *)in_buf; + memset(msg, 0, sizeof(struct rndis_packet_msg_type)); + msg->MessageType = cpu_to_le32(RNDIS_PACKET_MSG); + msg->MessageLength = cpu_to_le32(size + sizeof(*msg)); + msg->DataOffset = cpu_to_le32(sizeof(*msg) - 8); + msg->DataLength = cpu_to_le32(size); + /* msg->OOBDataOffset; + * msg->OOBDataLength; + * msg->NumOOBDataElements; + * msg->PerPacketInfoOffset; + * msg->PerPacketInfoLength; + * msg->VcHandle; + * msg->Reserved; + */ + in_buf += sizeof(*msg); + } + + memcpy(in_buf, buf, size); + s->in_len = total_size; + s->in_ptr = 0; + return size; +} + +static void usbnet_cleanup(NetClientState *nc) +{ + USBNetState *s = qemu_get_nic_opaque(nc); + + s->nic = NULL; +} + +static void usb_net_unrealize(USBDevice *dev) +{ + USBNetState *s = (USBNetState *) dev; + + /* TODO: remove the nd_table[] entry */ + rndis_clear_responsequeue(s); + qemu_del_nic(s->nic); +} + +static NetClientInfo net_usbnet_info = { + .type = NET_CLIENT_DRIVER_NIC, + .size = sizeof(NICState), + .receive = usbnet_receive, + .cleanup = usbnet_cleanup, +}; + +static void usb_net_realize(USBDevice *dev, Error **errp) +{ + USBNetState *s = USB_NET(dev); + + usb_desc_create_serial(dev); + usb_desc_init(dev); + + s->rndis_state = RNDIS_UNINITIALIZED; + QTAILQ_INIT(&s->rndis_resp); + + s->medium = 0; /* NDIS_MEDIUM_802_3 */ + s->speed = 1000000; /* 100MBps, in 100Bps units */ + s->media_state = 0; /* NDIS_MEDIA_STATE_CONNECTED */; + s->filter = 0; + s->vendorid = 0x1234; + s->intr = usb_ep_get(dev, USB_TOKEN_IN, 1); + + qemu_macaddr_default_if_unset(&s->conf.macaddr); + s->nic = qemu_new_nic(&net_usbnet_info, &s->conf, + object_get_typename(OBJECT(s)), s->dev.qdev.id, s); + qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); + snprintf(s->usbstring_mac, sizeof(s->usbstring_mac), + "%02x%02x%02x%02x%02x%02x", + 0x40, + s->conf.macaddr.a[1], + s->conf.macaddr.a[2], + s->conf.macaddr.a[3], + s->conf.macaddr.a[4], + s->conf.macaddr.a[5]); + usb_desc_set_string(dev, STRING_ETHADDR, s->usbstring_mac); +} + +static void usb_net_instance_init(Object *obj) +{ + USBDevice *dev = USB_DEVICE(obj); + USBNetState *s = USB_NET(dev); + + device_add_bootindex_property(obj, &s->conf.bootindex, + "bootindex", "/ethernet-phy@0", + &dev->qdev); +} + +static const VMStateDescription vmstate_usb_net = { + .name = "usb-net", + .unmigratable = 1, +}; + +static Property net_properties[] = { + DEFINE_NIC_PROPERTIES(USBNetState, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_net_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_net_realize; + uc->product_desc = "QEMU USB Network Interface"; + uc->usb_desc = &desc_net; + uc->handle_reset = usb_net_handle_reset; + uc->handle_control = usb_net_handle_control; + uc->handle_data = usb_net_handle_data; + uc->unrealize = usb_net_unrealize; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + dc->fw_name = "network"; + dc->vmsd = &vmstate_usb_net; + device_class_set_props(dc, net_properties); +} + +static const TypeInfo net_info = { + .name = TYPE_USB_NET, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBNetState), + .class_init = usb_net_class_initfn, + .instance_init = usb_net_instance_init, +}; + +static void usb_net_register_types(void) +{ + type_register_static(&net_info); +} + +type_init(usb_net_register_types) diff --git a/hw/usb/dev-serial.c b/hw/usb/dev-serial.c new file mode 100644 index 000000000..63047d79c --- /dev/null +++ b/hw/usb/dev-serial.c @@ -0,0 +1,709 @@ +/* + * FTDI FT232BM Device emulation + * + * Copyright (c) 2006 CodeSourcery. + * Copyright (c) 2008 Samuel Thibault + * Written by Paul Brook, reused for FTDI by Samuel Thibault + * + * This code is licensed under the LGPL. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "desc.h" +#include "chardev/char-serial.h" +#include "chardev/char-fe.h" +#include "qom/object.h" +#include "trace.h" + + +#define RECV_BUF (512 - (2 * 8)) + +/* Commands */ +#define FTDI_RESET 0 +#define FTDI_SET_MDM_CTRL 1 +#define FTDI_SET_FLOW_CTRL 2 +#define FTDI_SET_BAUD 3 +#define FTDI_SET_DATA 4 +#define FTDI_GET_MDM_ST 5 +#define FTDI_SET_EVENT_CHR 6 +#define FTDI_SET_ERROR_CHR 7 +#define FTDI_SET_LATENCY 9 +#define FTDI_GET_LATENCY 10 + +/* RESET */ + +#define FTDI_RESET_SIO 0 +#define FTDI_RESET_RX 1 +#define FTDI_RESET_TX 2 + +/* SET_MDM_CTRL */ + +#define FTDI_DTR 1 +#define FTDI_SET_DTR (FTDI_DTR << 8) +#define FTDI_RTS 2 +#define FTDI_SET_RTS (FTDI_RTS << 8) + +/* SET_FLOW_CTRL */ + +#define FTDI_NO_HS 0 +#define FTDI_RTS_CTS_HS 1 +#define FTDI_DTR_DSR_HS 2 +#define FTDI_XON_XOFF_HS 4 + +/* SET_DATA */ + +#define FTDI_PARITY (0x7 << 8) +#define FTDI_ODD (0x1 << 8) +#define FTDI_EVEN (0x2 << 8) +#define FTDI_MARK (0x3 << 8) +#define FTDI_SPACE (0x4 << 8) + +#define FTDI_STOP (0x3 << 11) +#define FTDI_STOP1 (0x0 << 11) +#define FTDI_STOP15 (0x1 << 11) +#define FTDI_STOP2 (0x2 << 11) + +/* GET_MDM_ST */ +/* TODO: should be sent every 40ms */ +#define FTDI_CTS (1 << 4) /* CTS line status */ +#define FTDI_DSR (1 << 5) /* DSR line status */ +#define FTDI_RI (1 << 6) /* RI line status */ +#define FTDI_RLSD (1 << 7) /* Receive Line Signal Detect */ + +/* Status */ + +#define FTDI_DR (1 << 0) /* Data Ready */ +#define FTDI_OE (1 << 1) /* Overrun Err */ +#define FTDI_PE (1 << 2) /* Parity Err */ +#define FTDI_FE (1 << 3) /* Framing Err */ +#define FTDI_BI (1 << 4) /* Break Interrupt */ +#define FTDI_THRE (1 << 5) /* Transmitter Holding Register */ +#define FTDI_TEMT (1 << 6) /* Transmitter Empty */ +#define FTDI_FIFO (1 << 7) /* Error in FIFO */ + +struct USBSerialState { + USBDevice dev; + + USBEndpoint *intr; + uint8_t recv_buf[RECV_BUF]; + uint16_t recv_ptr; + uint16_t recv_used; + uint8_t event_chr; + uint8_t error_chr; + uint8_t event_trigger; + bool always_plugged; + uint8_t flow_control; + uint8_t xon; + uint8_t xoff; + QEMUSerialSetParams params; + int latency; /* ms */ + CharBackend cs; +}; + +#define TYPE_USB_SERIAL "usb-serial-dev" +OBJECT_DECLARE_SIMPLE_TYPE(USBSerialState, USB_SERIAL) + +enum { + STR_MANUFACTURER = 1, + STR_PRODUCT_SERIAL, + STR_PRODUCT_BRAILLE, + STR_SERIALNUMBER, +}; + +static const USBDescStrings desc_strings = { + [STR_MANUFACTURER] = "QEMU", + [STR_PRODUCT_SERIAL] = "QEMU USB SERIAL", + [STR_PRODUCT_BRAILLE] = "QEMU USB BAUM BRAILLE", + [STR_SERIALNUMBER] = "1", +}; + +static const USBDescIface desc_iface0 = { + .bInterfaceNumber = 0, + .bNumEndpoints = 2, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 64, + },{ + .bEndpointAddress = USB_DIR_OUT | 0x02, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 64, + }, + } +}; + +static const USBDescDevice desc_device = { + .bcdUSB = 0x0200, + .bMaxPacketSize0 = 8, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP, + .bMaxPower = 50, + .nif = 1, + .ifs = &desc_iface0, + }, + }, +}; + +static const USBDesc desc_serial = { + .id = { + .idVendor = 0x0403, + .idProduct = 0x6001, + .bcdDevice = 0x0400, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT_SERIAL, + .iSerialNumber = STR_SERIALNUMBER, + }, + .full = &desc_device, + .str = desc_strings, +}; + +static const USBDesc desc_braille = { + .id = { + .idVendor = 0x0403, + .idProduct = 0xfe72, + .bcdDevice = 0x0400, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT_BRAILLE, + .iSerialNumber = STR_SERIALNUMBER, + }, + .full = &desc_device, + .str = desc_strings, +}; + +static void usb_serial_set_flow_control(USBSerialState *s, + uint8_t flow_control) +{ + USBDevice *dev = USB_DEVICE(s); + USBBus *bus = usb_bus_from_device(dev); + + /* TODO: ioctl */ + s->flow_control = flow_control; + trace_usb_serial_set_flow_control(bus->busnr, dev->addr, flow_control); +} + +static void usb_serial_set_xonxoff(USBSerialState *s, int xonxoff) +{ + USBDevice *dev = USB_DEVICE(s); + USBBus *bus = usb_bus_from_device(dev); + + s->xon = xonxoff & 0xff; + s->xoff = (xonxoff >> 8) & 0xff; + + trace_usb_serial_set_xonxoff(bus->busnr, dev->addr, s->xon, s->xoff); +} + +static void usb_serial_reset(USBSerialState *s) +{ + s->event_chr = 0x0d; + s->event_trigger = 0; + s->recv_ptr = 0; + s->recv_used = 0; + /* TODO: purge in char driver */ + usb_serial_set_flow_control(s, FTDI_NO_HS); +} + +static void usb_serial_handle_reset(USBDevice *dev) +{ + USBSerialState *s = USB_SERIAL(dev); + USBBus *bus = usb_bus_from_device(dev); + + trace_usb_serial_reset(bus->busnr, dev->addr); + + usb_serial_reset(s); + /* TODO: Reset char device, send BREAK? */ +} + +static uint8_t usb_get_modem_lines(USBSerialState *s) +{ + int flags; + uint8_t ret; + + if (qemu_chr_fe_ioctl(&s->cs, + CHR_IOCTL_SERIAL_GET_TIOCM, &flags) == -ENOTSUP) { + return FTDI_CTS | FTDI_DSR | FTDI_RLSD; + } + + ret = 0; + if (flags & CHR_TIOCM_CTS) { + ret |= FTDI_CTS; + } + if (flags & CHR_TIOCM_DSR) { + ret |= FTDI_DSR; + } + if (flags & CHR_TIOCM_RI) { + ret |= FTDI_RI; + } + if (flags & CHR_TIOCM_CAR) { + ret |= FTDI_RLSD; + } + + return ret; +} + +static void usb_serial_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, + int length, uint8_t *data) +{ + USBSerialState *s = USB_SERIAL(dev); + USBBus *bus = usb_bus_from_device(dev); + int ret; + + trace_usb_serial_handle_control(bus->busnr, dev->addr, request, value); + + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); + if (ret >= 0) { + return; + } + + switch (request) { + case EndpointOutRequest | USB_REQ_CLEAR_FEATURE: + break; + + /* Class specific requests. */ + case VendorDeviceOutRequest | FTDI_RESET: + switch (value) { + case FTDI_RESET_SIO: + usb_serial_reset(s); + break; + case FTDI_RESET_RX: + s->recv_ptr = 0; + s->recv_used = 0; + /* TODO: purge from char device */ + break; + case FTDI_RESET_TX: + /* TODO: purge from char device */ + break; + } + break; + case VendorDeviceOutRequest | FTDI_SET_MDM_CTRL: + { + static int flags; + qemu_chr_fe_ioctl(&s->cs, CHR_IOCTL_SERIAL_GET_TIOCM, &flags); + if (value & FTDI_SET_RTS) { + if (value & FTDI_RTS) { + flags |= CHR_TIOCM_RTS; + } else { + flags &= ~CHR_TIOCM_RTS; + } + } + if (value & FTDI_SET_DTR) { + if (value & FTDI_DTR) { + flags |= CHR_TIOCM_DTR; + } else { + flags &= ~CHR_TIOCM_DTR; + } + } + qemu_chr_fe_ioctl(&s->cs, CHR_IOCTL_SERIAL_SET_TIOCM, &flags); + break; + } + case VendorDeviceOutRequest | FTDI_SET_FLOW_CTRL: { + uint8_t flow_control = index >> 8; + + usb_serial_set_flow_control(s, flow_control); + if (flow_control & FTDI_XON_XOFF_HS) { + usb_serial_set_xonxoff(s, value); + } + break; + } + case VendorDeviceOutRequest | FTDI_SET_BAUD: { + static const int subdivisors8[8] = { 0, 4, 2, 1, 3, 5, 6, 7 }; + int subdivisor8 = subdivisors8[((value & 0xc000) >> 14) + | ((index & 1) << 2)]; + int divisor = value & 0x3fff; + + /* chip special cases */ + if (divisor == 1 && subdivisor8 == 0) { + subdivisor8 = 4; + } + if (divisor == 0 && subdivisor8 == 0) { + divisor = 1; + } + + s->params.speed = (48000000 / 2) / (8 * divisor + subdivisor8); + trace_usb_serial_set_baud(bus->busnr, dev->addr, s->params.speed); + qemu_chr_fe_ioctl(&s->cs, CHR_IOCTL_SERIAL_SET_PARAMS, &s->params); + break; + } + case VendorDeviceOutRequest | FTDI_SET_DATA: + switch (value & 0xff) { + case 7: + s->params.data_bits = 7; + break; + case 8: + s->params.data_bits = 8; + break; + default: + /* + * According to a comment in Linux's ftdi_sio.c original FTDI + * chips fall back to 8 data bits for unsupported data_bits + */ + trace_usb_serial_unsupported_data_bits(bus->busnr, dev->addr, + value & 0xff); + s->params.data_bits = 8; + } + + switch (value & FTDI_PARITY) { + case 0: + s->params.parity = 'N'; + break; + case FTDI_ODD: + s->params.parity = 'O'; + break; + case FTDI_EVEN: + s->params.parity = 'E'; + break; + default: + trace_usb_serial_unsupported_parity(bus->busnr, dev->addr, + value & FTDI_PARITY); + goto fail; + } + + switch (value & FTDI_STOP) { + case FTDI_STOP1: + s->params.stop_bits = 1; + break; + case FTDI_STOP2: + s->params.stop_bits = 2; + break; + default: + trace_usb_serial_unsupported_stopbits(bus->busnr, dev->addr, + value & FTDI_STOP); + goto fail; + } + + trace_usb_serial_set_data(bus->busnr, dev->addr, s->params.parity, + s->params.data_bits, s->params.stop_bits); + qemu_chr_fe_ioctl(&s->cs, CHR_IOCTL_SERIAL_SET_PARAMS, &s->params); + /* TODO: TX ON/OFF */ + break; + case VendorDeviceRequest | FTDI_GET_MDM_ST: + data[0] = usb_get_modem_lines(s) | 1; + data[1] = FTDI_THRE | FTDI_TEMT; + p->actual_length = 2; + break; + case VendorDeviceOutRequest | FTDI_SET_EVENT_CHR: + /* TODO: handle it */ + s->event_chr = value; + break; + case VendorDeviceOutRequest | FTDI_SET_ERROR_CHR: + /* TODO: handle it */ + s->error_chr = value; + break; + case VendorDeviceOutRequest | FTDI_SET_LATENCY: + s->latency = value; + break; + case VendorDeviceRequest | FTDI_GET_LATENCY: + data[0] = s->latency; + p->actual_length = 1; + break; + default: + fail: + trace_usb_serial_unsupported_control(bus->busnr, dev->addr, request, + value); + p->status = USB_RET_STALL; + break; + } +} + +static void usb_serial_token_in(USBSerialState *s, USBPacket *p) +{ + const int max_packet_size = desc_iface0.eps[0].wMaxPacketSize; + int packet_len; + uint8_t header[2]; + + packet_len = p->iov.size; + if (packet_len <= 2) { + p->status = USB_RET_NAK; + return; + } + + header[0] = usb_get_modem_lines(s) | 1; + /* We do not have the uart details */ + /* handle serial break */ + if (s->event_trigger && s->event_trigger & FTDI_BI) { + s->event_trigger &= ~FTDI_BI; + header[1] = FTDI_BI; + usb_packet_copy(p, header, 2); + return; + } else { + header[1] = 0; + } + + if (!s->recv_used) { + p->status = USB_RET_NAK; + return; + } + + while (s->recv_used && packet_len > 2) { + int first_len, len; + + len = MIN(packet_len, max_packet_size); + len -= 2; + if (len > s->recv_used) { + len = s->recv_used; + } + + first_len = RECV_BUF - s->recv_ptr; + if (first_len > len) { + first_len = len; + } + usb_packet_copy(p, header, 2); + usb_packet_copy(p, s->recv_buf + s->recv_ptr, first_len); + if (len > first_len) { + usb_packet_copy(p, s->recv_buf, len - first_len); + } + s->recv_used -= len; + s->recv_ptr = (s->recv_ptr + len) % RECV_BUF; + packet_len -= len + 2; + } + + return; +} + +static void usb_serial_handle_data(USBDevice *dev, USBPacket *p) +{ + USBSerialState *s = USB_SERIAL(dev); + USBBus *bus = usb_bus_from_device(dev); + uint8_t devep = p->ep->nr; + struct iovec *iov; + int i; + + switch (p->pid) { + case USB_TOKEN_OUT: + if (devep != 2) { + goto fail; + } + for (i = 0; i < p->iov.niov; i++) { + iov = p->iov.iov + i; + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ + qemu_chr_fe_write_all(&s->cs, iov->iov_base, iov->iov_len); + } + p->actual_length = p->iov.size; + break; + + case USB_TOKEN_IN: + if (devep != 1) { + goto fail; + } + usb_serial_token_in(s, p); + break; + + default: + trace_usb_serial_bad_token(bus->busnr, dev->addr); + fail: + p->status = USB_RET_STALL; + break; + } +} + +static int usb_serial_can_read(void *opaque) +{ + USBSerialState *s = opaque; + + if (!s->dev.attached) { + return 0; + } + return RECV_BUF - s->recv_used; +} + +static void usb_serial_read(void *opaque, const uint8_t *buf, int size) +{ + USBSerialState *s = opaque; + int first_size, start; + + /* room in the buffer? */ + if (size > (RECV_BUF - s->recv_used)) { + size = RECV_BUF - s->recv_used; + } + + start = s->recv_ptr + s->recv_used; + if (start < RECV_BUF) { + /* copy data to end of buffer */ + first_size = RECV_BUF - start; + if (first_size > size) { + first_size = size; + } + + memcpy(s->recv_buf + start, buf, first_size); + + /* wrap around to front if needed */ + if (size > first_size) { + memcpy(s->recv_buf, buf + first_size, size - first_size); + } + } else { + start -= RECV_BUF; + memcpy(s->recv_buf + start, buf, size); + } + s->recv_used += size; + + usb_wakeup(s->intr, 0); +} + +static void usb_serial_event(void *opaque, QEMUChrEvent event) +{ + USBSerialState *s = opaque; + + switch (event) { + case CHR_EVENT_BREAK: + s->event_trigger |= FTDI_BI; + break; + case CHR_EVENT_OPENED: + if (!s->always_plugged && !s->dev.attached) { + usb_device_attach(&s->dev, &error_abort); + } + break; + case CHR_EVENT_CLOSED: + if (!s->always_plugged && s->dev.attached) { + usb_device_detach(&s->dev); + } + break; + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static void usb_serial_realize(USBDevice *dev, Error **errp) +{ + USBSerialState *s = USB_SERIAL(dev); + Error *local_err = NULL; + + usb_desc_create_serial(dev); + usb_desc_init(dev); + dev->auto_attach = 0; + + if (!qemu_chr_fe_backend_connected(&s->cs)) { + error_setg(errp, "Property chardev is required"); + return; + } + + usb_check_attach(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + qemu_chr_fe_set_handlers(&s->cs, usb_serial_can_read, usb_serial_read, + usb_serial_event, NULL, s, NULL, true); + usb_serial_handle_reset(dev); + + if ((s->always_plugged || qemu_chr_fe_backend_open(&s->cs)) && + !dev->attached) { + usb_device_attach(dev, &error_abort); + } + s->intr = usb_ep_get(dev, USB_TOKEN_IN, 1); +} + +static USBDevice *usb_braille_init(void) +{ + USBDevice *dev; + Chardev *cdrv; + + cdrv = qemu_chr_new("braille", "braille", NULL); + if (!cdrv) { + return NULL; + } + + dev = usb_new("usb-braille"); + qdev_prop_set_chr(&dev->qdev, "chardev", cdrv); + return dev; +} + +static const VMStateDescription vmstate_usb_serial = { + .name = "usb-serial", + .unmigratable = 1, +}; + +static Property serial_properties[] = { + DEFINE_PROP_CHR("chardev", USBSerialState, cs), + DEFINE_PROP_BOOL("always-plugged", USBSerialState, always_plugged, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_serial_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_serial_realize; + uc->handle_reset = usb_serial_handle_reset; + uc->handle_control = usb_serial_handle_control; + uc->handle_data = usb_serial_handle_data; + dc->vmsd = &vmstate_usb_serial; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo usb_serial_dev_type_info = { + .name = TYPE_USB_SERIAL, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBSerialState), + .abstract = true, + .class_init = usb_serial_dev_class_init, +}; + +static void usb_serial_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->product_desc = "QEMU USB Serial"; + uc->usb_desc = &desc_serial; + device_class_set_props(dc, serial_properties); +} + +static const TypeInfo serial_info = { + .name = "usb-serial", + .parent = TYPE_USB_SERIAL, + .class_init = usb_serial_class_initfn, +}; + +static Property braille_properties[] = { + DEFINE_PROP_CHR("chardev", USBSerialState, cs), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_braille_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->product_desc = "QEMU USB Braille"; + uc->usb_desc = &desc_braille; + device_class_set_props(dc, braille_properties); +} + +static const TypeInfo braille_info = { + .name = "usb-braille", + .parent = TYPE_USB_SERIAL, + .class_init = usb_braille_class_initfn, +}; + +static void usb_serial_register_types(void) +{ + type_register_static(&usb_serial_dev_type_info); + type_register_static(&serial_info); + type_register_static(&braille_info); + usb_legacy_register("usb-braille", "braille", usb_braille_init); +} + +type_init(usb_serial_register_types) diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c new file mode 100644 index 000000000..91ffd9f8a --- /dev/null +++ b/hw/usb/dev-smartcard-reader.c @@ -0,0 +1,1496 @@ +/* + * Copyright (C) 2011 Red Hat, Inc. + * + * CCID Device emulation + * + * Written by Alon Levy, with contributions from Robert Relyea. + * + * Based on usb-serial.c, see its copyright and attributions below. + * + * This work is licensed under the terms of the GNU GPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + * ------- (original copyright & attribution for usb-serial.c below) -------- + * Copyright (c) 2006 CodeSourcery. + * Copyright (c) 2008 Samuel Thibault + * Written by Paul Brook, reused for FTDI by Samuel Thibault, + */ + +/* + * References: + * + * CCID Specification Revision 1.1 April 22nd 2005 + * "Universal Serial Bus, Device Class: Smart Card" + * Specification for Integrated Circuit(s) Cards Interface Devices + * + * Endianness note: from the spec (1.3) + * "Fields that are larger than a byte are stored in little endian" + * + * KNOWN BUGS + * 1. remove/insert can sometimes result in removed state instead of inserted. + * This is a result of the following: + * symptom: dmesg shows ERMOTEIO (-121), pcscd shows -99. This can happen + * when a short packet is sent, as seen in uhci-usb.c, resulting from a urb + * from the guest requesting SPD and us returning a smaller packet. + * Not sure which messages trigger this. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/qdev-properties.h" +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "desc.h" + +#include "ccid.h" +#include "qom/object.h" + +#define DPRINTF(s, lvl, fmt, ...) \ +do { \ + if (lvl <= s->debug) { \ + printf("usb-ccid: " fmt , ## __VA_ARGS__); \ + } \ +} while (0) + +#define D_WARN 1 +#define D_INFO 2 +#define D_MORE_INFO 3 +#define D_VERBOSE 4 + +#define TYPE_USB_CCID_DEV "usb-ccid" +OBJECT_DECLARE_SIMPLE_TYPE(USBCCIDState, USB_CCID_DEV) +/* + * The two options for variable sized buffers: + * make them constant size, for large enough constant, + * or handle the migration complexity - VMState doesn't handle this case. + * sizes are expected never to be exceeded, unless guest misbehaves. + */ +#define BULK_OUT_DATA_SIZE (64 * KiB) +#define PENDING_ANSWERS_NUM 128 + +#define BULK_IN_BUF_SIZE 384 +#define BULK_IN_PENDING_NUM 8 + +#define CCID_MAX_PACKET_SIZE 64 + +#define CCID_CONTROL_ABORT 0x1 +#define CCID_CONTROL_GET_CLOCK_FREQUENCIES 0x2 +#define CCID_CONTROL_GET_DATA_RATES 0x3 + +#define CCID_PRODUCT_DESCRIPTION "QEMU USB CCID" +#define CCID_VENDOR_DESCRIPTION "QEMU" +#define CCID_INTERFACE_NAME "CCID Interface" +#define CCID_SERIAL_NUMBER_STRING "1" +/* + * Using Gemplus Vendor and Product id + * Effect on various drivers: + * usbccid.sys (winxp, others untested) is a class driver so it doesn't care. + * linux has a number of class drivers, but openct filters based on + * vendor/product (/etc/openct.conf under fedora), hence Gemplus. + */ +#define CCID_VENDOR_ID 0x08e6 +#define CCID_PRODUCT_ID 0x4433 +#define CCID_DEVICE_VERSION 0x0000 + +/* + * BULK_OUT messages from PC to Reader + * Defined in CCID Rev 1.1 6.1 (page 26) + */ +#define CCID_MESSAGE_TYPE_PC_to_RDR_IccPowerOn 0x62 +#define CCID_MESSAGE_TYPE_PC_to_RDR_IccPowerOff 0x63 +#define CCID_MESSAGE_TYPE_PC_to_RDR_GetSlotStatus 0x65 +#define CCID_MESSAGE_TYPE_PC_to_RDR_XfrBlock 0x6f +#define CCID_MESSAGE_TYPE_PC_to_RDR_GetParameters 0x6c +#define CCID_MESSAGE_TYPE_PC_to_RDR_ResetParameters 0x6d +#define CCID_MESSAGE_TYPE_PC_to_RDR_SetParameters 0x61 +#define CCID_MESSAGE_TYPE_PC_to_RDR_Escape 0x6b +#define CCID_MESSAGE_TYPE_PC_to_RDR_IccClock 0x6e +#define CCID_MESSAGE_TYPE_PC_to_RDR_T0APDU 0x6a +#define CCID_MESSAGE_TYPE_PC_to_RDR_Secure 0x69 +#define CCID_MESSAGE_TYPE_PC_to_RDR_Mechanical 0x71 +#define CCID_MESSAGE_TYPE_PC_to_RDR_Abort 0x72 +#define CCID_MESSAGE_TYPE_PC_to_RDR_SetDataRateAndClockFrequency 0x73 + +/* + * BULK_IN messages from Reader to PC + * Defined in CCID Rev 1.1 6.2 (page 48) + */ +#define CCID_MESSAGE_TYPE_RDR_to_PC_DataBlock 0x80 +#define CCID_MESSAGE_TYPE_RDR_to_PC_SlotStatus 0x81 +#define CCID_MESSAGE_TYPE_RDR_to_PC_Parameters 0x82 +#define CCID_MESSAGE_TYPE_RDR_to_PC_Escape 0x83 +#define CCID_MESSAGE_TYPE_RDR_to_PC_DataRateAndClockFrequency 0x84 + +/* + * INTERRUPT_IN messages from Reader to PC + * Defined in CCID Rev 1.1 6.3 (page 56) + */ +#define CCID_MESSAGE_TYPE_RDR_to_PC_NotifySlotChange 0x50 +#define CCID_MESSAGE_TYPE_RDR_to_PC_HardwareError 0x51 + +/* + * Endpoints for CCID - addresses are up to us to decide. + * To support slot insertion and removal we must have an interrupt in ep + * in addition we need a bulk in and bulk out ep + * 5.2, page 20 + */ +#define CCID_INT_IN_EP 1 +#define CCID_BULK_IN_EP 2 +#define CCID_BULK_OUT_EP 3 + +/* bmSlotICCState masks */ +#define SLOT_0_STATE_MASK 1 +#define SLOT_0_CHANGED_MASK 2 + +/* Status codes that go in bStatus (see 6.2.6) */ +enum { + ICC_STATUS_PRESENT_ACTIVE = 0, + ICC_STATUS_PRESENT_INACTIVE, + ICC_STATUS_NOT_PRESENT +}; + +enum { + COMMAND_STATUS_NO_ERROR = 0, + COMMAND_STATUS_FAILED, + COMMAND_STATUS_TIME_EXTENSION_REQUIRED +}; + +/* Error codes that go in bError (see 6.2.6) */ +enum { + ERROR_CMD_NOT_SUPPORTED = 0, + ERROR_CMD_ABORTED = -1, + ERROR_ICC_MUTE = -2, + ERROR_XFR_PARITY_ERROR = -3, + ERROR_XFR_OVERRUN = -4, + ERROR_HW_ERROR = -5, +}; + +/* 6.2.6 RDR_to_PC_SlotStatus definitions */ +enum { + CLOCK_STATUS_RUNNING = 0, + /* + * 0 - Clock Running, 1 - Clock stopped in State L, 2 - H, + * 3 - unknown state. rest are RFU + */ +}; + +typedef struct QEMU_PACKED CCID_Header { + uint8_t bMessageType; + uint32_t dwLength; + uint8_t bSlot; + uint8_t bSeq; +} CCID_Header; + +typedef struct QEMU_PACKED CCID_BULK_IN { + CCID_Header hdr; + uint8_t bStatus; /* Only used in BULK_IN */ + uint8_t bError; /* Only used in BULK_IN */ +} CCID_BULK_IN; + +typedef struct QEMU_PACKED CCID_SlotStatus { + CCID_BULK_IN b; + uint8_t bClockStatus; +} CCID_SlotStatus; + +typedef struct QEMU_PACKED CCID_T0ProtocolDataStructure { + uint8_t bmFindexDindex; + uint8_t bmTCCKST0; + uint8_t bGuardTimeT0; + uint8_t bWaitingIntegerT0; + uint8_t bClockStop; +} CCID_T0ProtocolDataStructure; + +typedef struct QEMU_PACKED CCID_T1ProtocolDataStructure { + uint8_t bmFindexDindex; + uint8_t bmTCCKST1; + uint8_t bGuardTimeT1; + uint8_t bWaitingIntegerT1; + uint8_t bClockStop; + uint8_t bIFSC; + uint8_t bNadValue; +} CCID_T1ProtocolDataStructure; + +typedef union CCID_ProtocolDataStructure { + CCID_T0ProtocolDataStructure t0; + CCID_T1ProtocolDataStructure t1; + uint8_t data[7]; /* must be = max(sizeof(t0), sizeof(t1)) */ +} CCID_ProtocolDataStructure; + +typedef struct QEMU_PACKED CCID_Parameter { + CCID_BULK_IN b; + uint8_t bProtocolNum; + CCID_ProtocolDataStructure abProtocolDataStructure; +} CCID_Parameter; + +typedef struct QEMU_PACKED CCID_DataBlock { + CCID_BULK_IN b; + uint8_t bChainParameter; + uint8_t abData[]; +} CCID_DataBlock; + +/* 6.1.4 PC_to_RDR_XfrBlock */ +typedef struct QEMU_PACKED CCID_XferBlock { + CCID_Header hdr; + uint8_t bBWI; /* Block Waiting Timeout */ + uint16_t wLevelParameter; /* XXX currently unused */ + uint8_t abData[]; +} CCID_XferBlock; + +typedef struct QEMU_PACKED CCID_IccPowerOn { + CCID_Header hdr; + uint8_t bPowerSelect; + uint16_t abRFU; +} CCID_IccPowerOn; + +typedef struct QEMU_PACKED CCID_IccPowerOff { + CCID_Header hdr; + uint16_t abRFU; +} CCID_IccPowerOff; + +typedef struct QEMU_PACKED CCID_SetParameters { + CCID_Header hdr; + uint8_t bProtocolNum; + uint16_t abRFU; + CCID_ProtocolDataStructure abProtocolDataStructure; +} CCID_SetParameters; + +typedef struct CCID_Notify_Slot_Change { + uint8_t bMessageType; /* CCID_MESSAGE_TYPE_RDR_to_PC_NotifySlotChange */ + uint8_t bmSlotICCState; +} CCID_Notify_Slot_Change; + +/* used for DataBlock response to XferBlock */ +typedef struct Answer { + uint8_t slot; + uint8_t seq; +} Answer; + +/* pending BULK_IN messages */ +typedef struct BulkIn { + uint8_t data[BULK_IN_BUF_SIZE]; + uint32_t len; + uint32_t pos; +} BulkIn; + +struct CCIDBus { + BusState qbus; +}; +typedef struct CCIDBus CCIDBus; + +/* + * powered - defaults to true, changed by PowerOn/PowerOff messages + */ +struct USBCCIDState { + USBDevice dev; + USBEndpoint *intr; + USBEndpoint *bulk; + CCIDBus bus; + CCIDCardState *card; + BulkIn bulk_in_pending[BULK_IN_PENDING_NUM]; /* circular */ + uint32_t bulk_in_pending_start; + uint32_t bulk_in_pending_end; /* first free */ + uint32_t bulk_in_pending_num; + BulkIn *current_bulk_in; + uint8_t bulk_out_data[BULK_OUT_DATA_SIZE]; + uint32_t bulk_out_pos; + uint64_t last_answer_error; + Answer pending_answers[PENDING_ANSWERS_NUM]; + uint32_t pending_answers_start; + uint32_t pending_answers_end; + uint32_t pending_answers_num; + uint8_t bError; + uint8_t bmCommandStatus; + uint8_t bProtocolNum; + CCID_ProtocolDataStructure abProtocolDataStructure; + uint32_t ulProtocolDataStructureSize; + uint32_t state_vmstate; + uint8_t bmSlotICCState; + uint8_t powered; + uint8_t notify_slot_change; + uint8_t debug; +}; + +/* + * CCID Spec chapter 4: CCID uses a standard device descriptor per Chapter 9, + * "USB Device Framework", section 9.6.1, in the Universal Serial Bus + * Specification. + * + * This device implemented based on the spec and with an Athena Smart Card + * Reader as reference: + * 0dc3:1004 Athena Smartcard Solutions, Inc. + */ + +static const uint8_t qemu_ccid_descriptor[] = { + /* Smart Card Device Class Descriptor */ + 0x36, /* u8 bLength; */ + 0x21, /* u8 bDescriptorType; Functional */ + 0x10, 0x01, /* u16 bcdCCID; CCID Specification Release Number. */ + 0x00, /* + * u8 bMaxSlotIndex; The index of the highest available + * slot on this device. All slots are consecutive starting + * at 00h. + */ + 0x07, /* u8 bVoltageSupport; 01h - 5.0v, 02h - 3.0, 03 - 1.8 */ + + 0x01, 0x00, /* u32 dwProtocols; RRRR PPPP. RRRR = 0000h.*/ + 0x00, 0x00, /* PPPP: 0001h = Protocol T=0, 0002h = Protocol T=1 */ + /* u32 dwDefaultClock; in kHZ (0x0fa0 is 4 MHz) */ + 0xa0, 0x0f, 0x00, 0x00, + /* u32 dwMaximumClock; */ + 0x00, 0x00, 0x01, 0x00, + 0x00, /* u8 bNumClockSupported; * + * 0 means just the default and max. */ + /* u32 dwDataRate ;bps. 9600 == 00002580h */ + 0x80, 0x25, 0x00, 0x00, + /* u32 dwMaxDataRate ; 11520 bps == 0001C200h */ + 0x00, 0xC2, 0x01, 0x00, + 0x00, /* u8 bNumDataRatesSupported; 00 means all rates between + * default and max */ + /* u32 dwMaxIFSD; * + * maximum IFSD supported by CCID for protocol * + * T=1 (Maximum seen from various cards) */ + 0xfe, 0x00, 0x00, 0x00, + /* u32 dwSyncProtocols; 1 - 2-wire, 2 - 3-wire, 4 - I2C */ + 0x00, 0x00, 0x00, 0x00, + /* u32 dwMechanical; 0 - no special characteristics. */ + 0x00, 0x00, 0x00, 0x00, + /* + * u32 dwFeatures; + * 0 - No special characteristics + * + 2 Automatic parameter configuration based on ATR data + * + 4 Automatic activation of ICC on inserting + * + 8 Automatic ICC voltage selection + * + 10 Automatic ICC clock frequency change + * + 20 Automatic baud rate change + * + 40 Automatic parameters negotiation made by the CCID + * + 80 automatic PPS made by the CCID + * 100 CCID can set ICC in clock stop mode + * 200 NAD value other then 00 accepted (T=1 protocol) + * + 400 Automatic IFSD exchange as first exchange (T=1) + * One of the following only: + * + 10000 TPDU level exchanges with CCID + * 20000 Short APDU level exchange with CCID + * 40000 Short and Extended APDU level exchange with CCID + * + * 100000 USB Wake up signaling supported on card + * insertion and removal. Must set bit 5 in bmAttributes + * in Configuration descriptor if 100000 is set. + */ + 0xfe, 0x04, 0x01, 0x00, + /* + * u32 dwMaxCCIDMessageLength; For extended APDU in + * [261 + 10 , 65544 + 10]. Otherwise the minimum is + * wMaxPacketSize of the Bulk-OUT endpoint + */ + 0x12, 0x00, 0x01, 0x00, + 0xFF, /* + * u8 bClassGetResponse; Significant only for CCID that + * offers an APDU level for exchanges. Indicates the + * default class value used by the CCID when it sends a + * Get Response command to perform the transportation of + * an APDU by T=0 protocol + * FFh indicates that the CCID echos the class of the APDU. + */ + 0xFF, /* + * u8 bClassEnvelope; EAPDU only. Envelope command for + * T=0 + */ + 0x00, 0x00, /* + * u16 wLcdLayout; XXYY Number of lines (XX) and chars per + * line for LCD display used for PIN entry. 0000 - no LCD + */ + 0x01, /* + * u8 bPINSupport; 01h PIN Verification, + * 02h PIN Modification + */ + 0x01, /* u8 bMaxCCIDBusySlots; */ +}; + +enum { + STR_MANUFACTURER = 1, + STR_PRODUCT, + STR_SERIALNUMBER, + STR_INTERFACE, +}; + +static const USBDescStrings desc_strings = { + [STR_MANUFACTURER] = "QEMU", + [STR_PRODUCT] = "QEMU USB CCID", + [STR_SERIALNUMBER] = "1", + [STR_INTERFACE] = "CCID Interface", +}; + +static const USBDescIface desc_iface0 = { + .bInterfaceNumber = 0, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_CSCID, + .bInterfaceSubClass = USB_SUBCLASS_UNDEFINED, + .bInterfaceProtocol = 0x00, + .iInterface = STR_INTERFACE, + .ndesc = 1, + .descs = (USBDescOther[]) { + { + /* smartcard descriptor */ + .data = qemu_ccid_descriptor, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | CCID_INT_IN_EP, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .bInterval = 255, + .wMaxPacketSize = 64, + },{ + .bEndpointAddress = USB_DIR_IN | CCID_BULK_IN_EP, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 64, + },{ + .bEndpointAddress = USB_DIR_OUT | CCID_BULK_OUT_EP, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 64, + }, + } +}; + +static const USBDescDevice desc_device = { + .bcdUSB = 0x0110, + .bMaxPacketSize0 = 64, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER | + USB_CFG_ATT_WAKEUP, + .bMaxPower = 50, + .nif = 1, + .ifs = &desc_iface0, + }, + }, +}; + +static const USBDesc desc_ccid = { + .id = { + .idVendor = CCID_VENDOR_ID, + .idProduct = CCID_PRODUCT_ID, + .bcdDevice = CCID_DEVICE_VERSION, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT, + .iSerialNumber = STR_SERIALNUMBER, + }, + .full = &desc_device, + .str = desc_strings, +}; + +static const uint8_t *ccid_card_get_atr(CCIDCardState *card, uint32_t *len) +{ + CCIDCardClass *cc = CCID_CARD_GET_CLASS(card); + + if (cc->get_atr) { + return cc->get_atr(card, len); + } + return NULL; +} + +static void ccid_card_apdu_from_guest(CCIDCardState *card, + const uint8_t *apdu, + uint32_t len) +{ + CCIDCardClass *cc = CCID_CARD_GET_CLASS(card); + + if (cc->apdu_from_guest) { + cc->apdu_from_guest(card, apdu, len); + } +} + +static bool ccid_has_pending_answers(USBCCIDState *s) +{ + return s->pending_answers_num > 0; +} + +static void ccid_clear_pending_answers(USBCCIDState *s) +{ + s->pending_answers_num = 0; + s->pending_answers_start = 0; + s->pending_answers_end = 0; +} + +static void ccid_print_pending_answers(USBCCIDState *s) +{ + Answer *answer; + int i, count; + + DPRINTF(s, D_VERBOSE, "usb-ccid: pending answers:"); + if (!ccid_has_pending_answers(s)) { + DPRINTF(s, D_VERBOSE, " empty\n"); + return; + } + for (i = s->pending_answers_start, count = s->pending_answers_num ; + count > 0; count--, i++) { + answer = &s->pending_answers[i % PENDING_ANSWERS_NUM]; + if (count == 1) { + DPRINTF(s, D_VERBOSE, "%d:%d\n", answer->slot, answer->seq); + } else { + DPRINTF(s, D_VERBOSE, "%d:%d,", answer->slot, answer->seq); + } + } +} + +static void ccid_add_pending_answer(USBCCIDState *s, CCID_Header *hdr) +{ + Answer *answer; + + assert(s->pending_answers_num < PENDING_ANSWERS_NUM); + s->pending_answers_num++; + answer = + &s->pending_answers[(s->pending_answers_end++) % PENDING_ANSWERS_NUM]; + answer->slot = hdr->bSlot; + answer->seq = hdr->bSeq; + ccid_print_pending_answers(s); +} + +static void ccid_remove_pending_answer(USBCCIDState *s, + uint8_t *slot, uint8_t *seq) +{ + Answer *answer; + + assert(s->pending_answers_num > 0); + s->pending_answers_num--; + answer = + &s->pending_answers[(s->pending_answers_start++) % PENDING_ANSWERS_NUM]; + *slot = answer->slot; + *seq = answer->seq; + ccid_print_pending_answers(s); +} + +static void ccid_bulk_in_clear(USBCCIDState *s) +{ + s->bulk_in_pending_start = 0; + s->bulk_in_pending_end = 0; + s->bulk_in_pending_num = 0; +} + +static void ccid_bulk_in_release(USBCCIDState *s) +{ + assert(s->current_bulk_in != NULL); + s->current_bulk_in->pos = 0; + s->current_bulk_in = NULL; +} + +static void ccid_bulk_in_get(USBCCIDState *s) +{ + if (s->current_bulk_in != NULL || s->bulk_in_pending_num == 0) { + return; + } + assert(s->bulk_in_pending_num > 0); + s->bulk_in_pending_num--; + s->current_bulk_in = + &s->bulk_in_pending[(s->bulk_in_pending_start++) % BULK_IN_PENDING_NUM]; +} + +static void *ccid_reserve_recv_buf(USBCCIDState *s, uint16_t len) +{ + BulkIn *bulk_in; + + DPRINTF(s, D_VERBOSE, "%s: QUEUE: reserve %d bytes\n", __func__, len); + + /* look for an existing element */ + if (len > BULK_IN_BUF_SIZE) { + DPRINTF(s, D_WARN, "usb-ccid.c: %s: len larger then max (%d>%d). " + "discarding message.\n", + __func__, len, BULK_IN_BUF_SIZE); + return NULL; + } + if (s->bulk_in_pending_num >= BULK_IN_PENDING_NUM) { + DPRINTF(s, D_WARN, "usb-ccid.c: %s: No free bulk_in buffers. " + "discarding message.\n", __func__); + return NULL; + } + bulk_in = + &s->bulk_in_pending[(s->bulk_in_pending_end++) % BULK_IN_PENDING_NUM]; + s->bulk_in_pending_num++; + bulk_in->len = len; + return bulk_in->data; +} + +static void ccid_reset(USBCCIDState *s) +{ + ccid_bulk_in_clear(s); + ccid_clear_pending_answers(s); +} + +static void ccid_detach(USBCCIDState *s) +{ + ccid_reset(s); +} + +static void ccid_handle_reset(USBDevice *dev) +{ + USBCCIDState *s = USB_CCID_DEV(dev); + + DPRINTF(s, 1, "Reset\n"); + + ccid_reset(s); +} + +static const char *ccid_control_to_str(USBCCIDState *s, int request) +{ + switch (request) { + /* generic - should be factored out if there are other debugees */ + case DeviceOutRequest | USB_REQ_SET_ADDRESS: + return "(generic) set address"; + case DeviceRequest | USB_REQ_GET_DESCRIPTOR: + return "(generic) get descriptor"; + case DeviceRequest | USB_REQ_GET_CONFIGURATION: + return "(generic) get configuration"; + case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: + return "(generic) set configuration"; + case DeviceRequest | USB_REQ_GET_STATUS: + return "(generic) get status"; + case DeviceOutRequest | USB_REQ_CLEAR_FEATURE: + return "(generic) clear feature"; + case DeviceOutRequest | USB_REQ_SET_FEATURE: + return "(generic) set_feature"; + case InterfaceRequest | USB_REQ_GET_INTERFACE: + return "(generic) get interface"; + case InterfaceOutRequest | USB_REQ_SET_INTERFACE: + return "(generic) set interface"; + /* class requests */ + case ClassInterfaceOutRequest | CCID_CONTROL_ABORT: + return "ABORT"; + case ClassInterfaceRequest | CCID_CONTROL_GET_CLOCK_FREQUENCIES: + return "GET_CLOCK_FREQUENCIES"; + case ClassInterfaceRequest | CCID_CONTROL_GET_DATA_RATES: + return "GET_DATA_RATES"; + } + return "unknown"; +} + +static void ccid_handle_control(USBDevice *dev, USBPacket *p, int request, + int value, int index, int length, uint8_t *data) +{ + USBCCIDState *s = USB_CCID_DEV(dev); + int ret; + + DPRINTF(s, 1, "%s: got control %s (%x), value %x\n", __func__, + ccid_control_to_str(s, request), request, value); + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); + if (ret >= 0) { + return; + } + + switch (request) { + /* Class specific requests. */ + case ClassInterfaceOutRequest | CCID_CONTROL_ABORT: + DPRINTF(s, 1, "ccid_control abort UNIMPLEMENTED\n"); + p->status = USB_RET_STALL; + break; + case ClassInterfaceRequest | CCID_CONTROL_GET_CLOCK_FREQUENCIES: + DPRINTF(s, 1, "ccid_control get clock frequencies UNIMPLEMENTED\n"); + p->status = USB_RET_STALL; + break; + case ClassInterfaceRequest | CCID_CONTROL_GET_DATA_RATES: + DPRINTF(s, 1, "ccid_control get data rates UNIMPLEMENTED\n"); + p->status = USB_RET_STALL; + break; + default: + DPRINTF(s, 1, "got unsupported/bogus control %x, value %x\n", + request, value); + p->status = USB_RET_STALL; + break; + } +} + +static bool ccid_card_inserted(USBCCIDState *s) +{ + return s->bmSlotICCState & SLOT_0_STATE_MASK; +} + +static uint8_t ccid_card_status(USBCCIDState *s) +{ + return ccid_card_inserted(s) + ? (s->powered ? + ICC_STATUS_PRESENT_ACTIVE + : ICC_STATUS_PRESENT_INACTIVE + ) + : ICC_STATUS_NOT_PRESENT; +} + +static uint8_t ccid_calc_status(USBCCIDState *s) +{ + /* + * page 55, 6.2.6, calculation of bStatus from bmICCStatus and + * bmCommandStatus + */ + uint8_t ret = ccid_card_status(s) | (s->bmCommandStatus << 6); + DPRINTF(s, D_VERBOSE, "%s: status = %d\n", __func__, ret); + return ret; +} + +static void ccid_reset_error_status(USBCCIDState *s) +{ + s->bError = ERROR_CMD_NOT_SUPPORTED; + s->bmCommandStatus = COMMAND_STATUS_NO_ERROR; +} + +static void ccid_write_slot_status(USBCCIDState *s, CCID_Header *recv) +{ + CCID_SlotStatus *h = ccid_reserve_recv_buf(s, sizeof(CCID_SlotStatus)); + if (h == NULL) { + return; + } + h->b.hdr.bMessageType = CCID_MESSAGE_TYPE_RDR_to_PC_SlotStatus; + h->b.hdr.dwLength = 0; + h->b.hdr.bSlot = recv->bSlot; + h->b.hdr.bSeq = recv->bSeq; + h->b.bStatus = ccid_calc_status(s); + h->b.bError = s->bError; + h->bClockStatus = CLOCK_STATUS_RUNNING; + ccid_reset_error_status(s); + usb_wakeup(s->bulk, 0); +} + +static void ccid_write_parameters(USBCCIDState *s, CCID_Header *recv) +{ + CCID_Parameter *h; + uint32_t len = s->ulProtocolDataStructureSize; + + h = ccid_reserve_recv_buf(s, sizeof(CCID_Parameter) + len); + if (h == NULL) { + return; + } + h->b.hdr.bMessageType = CCID_MESSAGE_TYPE_RDR_to_PC_Parameters; + h->b.hdr.dwLength = 0; + h->b.hdr.bSlot = recv->bSlot; + h->b.hdr.bSeq = recv->bSeq; + h->b.bStatus = ccid_calc_status(s); + h->b.bError = s->bError; + h->bProtocolNum = s->bProtocolNum; + h->abProtocolDataStructure = s->abProtocolDataStructure; + ccid_reset_error_status(s); + usb_wakeup(s->bulk, 0); +} + +static void ccid_write_data_block(USBCCIDState *s, uint8_t slot, uint8_t seq, + const uint8_t *data, uint32_t len) +{ + CCID_DataBlock *p = ccid_reserve_recv_buf(s, sizeof(*p) + len); + + if (p == NULL) { + return; + } + p->b.hdr.bMessageType = CCID_MESSAGE_TYPE_RDR_to_PC_DataBlock; + p->b.hdr.dwLength = cpu_to_le32(len); + p->b.hdr.bSlot = slot; + p->b.hdr.bSeq = seq; + p->b.bStatus = ccid_calc_status(s); + p->b.bError = s->bError; + if (p->b.bError) { + DPRINTF(s, D_VERBOSE, "error %d\n", p->b.bError); + } + if (len) { + assert(data); + memcpy(p->abData, data, len); + } + ccid_reset_error_status(s); + usb_wakeup(s->bulk, 0); +} + +static void ccid_report_error_failed(USBCCIDState *s, uint8_t error) +{ + s->bmCommandStatus = COMMAND_STATUS_FAILED; + s->bError = error; +} + +static void ccid_write_data_block_answer(USBCCIDState *s, + const uint8_t *data, uint32_t len) +{ + uint8_t seq; + uint8_t slot; + + if (!ccid_has_pending_answers(s)) { + DPRINTF(s, D_WARN, "error: no pending answer to return to guest\n"); + ccid_report_error_failed(s, ERROR_ICC_MUTE); + return; + } + ccid_remove_pending_answer(s, &slot, &seq); + ccid_write_data_block(s, slot, seq, data, len); +} + +static uint8_t atr_get_protocol_num(const uint8_t *atr, uint32_t len) +{ + int i; + + if (len < 2 || !(atr[1] & 0x80)) { + /* too short or TD1 not included */ + return 0; /* T=0, default */ + } + i = 1 + !!(atr[1] & 0x10) + !!(atr[1] & 0x20) + !!(atr[1] & 0x40); + i += !!(atr[1] & 0x80); + return atr[i] & 0x0f; +} + +static void ccid_write_data_block_atr(USBCCIDState *s, CCID_Header *recv) +{ + const uint8_t *atr = NULL; + uint32_t len = 0; + uint8_t atr_protocol_num; + CCID_T0ProtocolDataStructure *t0 = &s->abProtocolDataStructure.t0; + CCID_T1ProtocolDataStructure *t1 = &s->abProtocolDataStructure.t1; + + if (s->card) { + atr = ccid_card_get_atr(s->card, &len); + } + atr_protocol_num = atr_get_protocol_num(atr, len); + DPRINTF(s, D_VERBOSE, "%s: atr contains protocol=%d\n", __func__, + atr_protocol_num); + /* set parameters from ATR - see spec page 109 */ + s->bProtocolNum = (atr_protocol_num <= 1 ? atr_protocol_num + : s->bProtocolNum); + switch (atr_protocol_num) { + case 0: + /* TODO: unimplemented ATR T0 parameters */ + t0->bmFindexDindex = 0; + t0->bmTCCKST0 = 0; + t0->bGuardTimeT0 = 0; + t0->bWaitingIntegerT0 = 0; + t0->bClockStop = 0; + break; + case 1: + /* TODO: unimplemented ATR T1 parameters */ + t1->bmFindexDindex = 0; + t1->bmTCCKST1 = 0; + t1->bGuardTimeT1 = 0; + t1->bWaitingIntegerT1 = 0; + t1->bClockStop = 0; + t1->bIFSC = 0; + t1->bNadValue = 0; + break; + default: + DPRINTF(s, D_WARN, "%s: error: unsupported ATR protocol %d\n", + __func__, atr_protocol_num); + } + ccid_write_data_block(s, recv->bSlot, recv->bSeq, atr, len); +} + +static void ccid_set_parameters(USBCCIDState *s, CCID_Header *recv) +{ + CCID_SetParameters *ph = (CCID_SetParameters *) recv; + uint32_t protocol_num = ph->bProtocolNum & 3; + + if (protocol_num != 0 && protocol_num != 1) { + ccid_report_error_failed(s, ERROR_CMD_NOT_SUPPORTED); + return; + } + s->bProtocolNum = protocol_num; + s->abProtocolDataStructure = ph->abProtocolDataStructure; +} + +/* + * must be 5 bytes for T=0, 7 bytes for T=1 + * See page 52 + */ +static const CCID_ProtocolDataStructure defaultProtocolDataStructure = { + .t1 = { + .bmFindexDindex = 0x77, + .bmTCCKST1 = 0x00, + .bGuardTimeT1 = 0x00, + .bWaitingIntegerT1 = 0x00, + .bClockStop = 0x00, + .bIFSC = 0xfe, + .bNadValue = 0x00, + } +}; + +static void ccid_reset_parameters(USBCCIDState *s) +{ + s->bProtocolNum = 0; /* T=0 */ + s->abProtocolDataStructure = defaultProtocolDataStructure; +} + +/* NOTE: only a single slot is supported (SLOT_0) */ +static void ccid_on_slot_change(USBCCIDState *s, bool full) +{ + /* RDR_to_PC_NotifySlotChange, 6.3.1 page 56 */ + uint8_t current = s->bmSlotICCState; + if (full) { + s->bmSlotICCState |= SLOT_0_STATE_MASK; + } else { + s->bmSlotICCState &= ~SLOT_0_STATE_MASK; + } + if (current != s->bmSlotICCState) { + s->bmSlotICCState |= SLOT_0_CHANGED_MASK; + } + s->notify_slot_change = true; + usb_wakeup(s->intr, 0); +} + +static void ccid_write_data_block_error( + USBCCIDState *s, uint8_t slot, uint8_t seq) +{ + ccid_write_data_block(s, slot, seq, NULL, 0); +} + +static void ccid_on_apdu_from_guest(USBCCIDState *s, CCID_XferBlock *recv) +{ + uint32_t len; + + if (ccid_card_status(s) != ICC_STATUS_PRESENT_ACTIVE) { + DPRINTF(s, 1, + "usb-ccid: not sending apdu to client, no card connected\n"); + ccid_write_data_block_error(s, recv->hdr.bSlot, recv->hdr.bSeq); + return; + } + len = le32_to_cpu(recv->hdr.dwLength); + DPRINTF(s, 1, "%s: seq %d, len %u\n", __func__, + recv->hdr.bSeq, len); + ccid_add_pending_answer(s, (CCID_Header *)recv); + if (s->card && len <= BULK_OUT_DATA_SIZE) { + ccid_card_apdu_from_guest(s->card, recv->abData, len); + } else { + DPRINTF(s, D_WARN, "warning: discarded apdu\n"); + } +} + +static const char *ccid_message_type_to_str(uint8_t type) +{ + switch (type) { + case CCID_MESSAGE_TYPE_PC_to_RDR_IccPowerOn: return "IccPowerOn"; + case CCID_MESSAGE_TYPE_PC_to_RDR_IccPowerOff: return "IccPowerOff"; + case CCID_MESSAGE_TYPE_PC_to_RDR_GetSlotStatus: return "GetSlotStatus"; + case CCID_MESSAGE_TYPE_PC_to_RDR_XfrBlock: return "XfrBlock"; + case CCID_MESSAGE_TYPE_PC_to_RDR_GetParameters: return "GetParameters"; + case CCID_MESSAGE_TYPE_PC_to_RDR_ResetParameters: return "ResetParameters"; + case CCID_MESSAGE_TYPE_PC_to_RDR_SetParameters: return "SetParameters"; + case CCID_MESSAGE_TYPE_PC_to_RDR_Escape: return "Escape"; + case CCID_MESSAGE_TYPE_PC_to_RDR_IccClock: return "IccClock"; + case CCID_MESSAGE_TYPE_PC_to_RDR_T0APDU: return "T0APDU"; + case CCID_MESSAGE_TYPE_PC_to_RDR_Secure: return "Secure"; + case CCID_MESSAGE_TYPE_PC_to_RDR_Mechanical: return "Mechanical"; + case CCID_MESSAGE_TYPE_PC_to_RDR_Abort: return "Abort"; + case CCID_MESSAGE_TYPE_PC_to_RDR_SetDataRateAndClockFrequency: + return "SetDataRateAndClockFrequency"; + } + return "unknown"; +} + +static void ccid_handle_bulk_out(USBCCIDState *s, USBPacket *p) +{ + CCID_Header *ccid_header; + + if (p->iov.size + s->bulk_out_pos > BULK_OUT_DATA_SIZE) { + goto err; + } + usb_packet_copy(p, s->bulk_out_data + s->bulk_out_pos, p->iov.size); + s->bulk_out_pos += p->iov.size; + if (s->bulk_out_pos < 10) { + DPRINTF(s, 1, "%s: header incomplete\n", __func__); + goto err; + } + + ccid_header = (CCID_Header *)s->bulk_out_data; + if ((s->bulk_out_pos - 10 < ccid_header->dwLength) && + (p->iov.size == CCID_MAX_PACKET_SIZE)) { + DPRINTF(s, D_VERBOSE, + "usb-ccid: bulk_in: expecting more packets (%u/%u)\n", + s->bulk_out_pos - 10, ccid_header->dwLength); + return; + } + if (s->bulk_out_pos - 10 != ccid_header->dwLength) { + DPRINTF(s, 1, + "usb-ccid: bulk_in: message size mismatch (got %u, expected %u)\n", + s->bulk_out_pos - 10, ccid_header->dwLength); + goto err; + } + + DPRINTF(s, D_MORE_INFO, "%s %x %s\n", __func__, + ccid_header->bMessageType, + ccid_message_type_to_str(ccid_header->bMessageType)); + switch (ccid_header->bMessageType) { + case CCID_MESSAGE_TYPE_PC_to_RDR_GetSlotStatus: + ccid_write_slot_status(s, ccid_header); + break; + case CCID_MESSAGE_TYPE_PC_to_RDR_IccPowerOn: + DPRINTF(s, 1, "%s: PowerOn: %d\n", __func__, + ((CCID_IccPowerOn *)(ccid_header))->bPowerSelect); + s->powered = true; + if (!ccid_card_inserted(s)) { + ccid_report_error_failed(s, ERROR_ICC_MUTE); + } + /* atr is written regardless of error. */ + ccid_write_data_block_atr(s, ccid_header); + break; + case CCID_MESSAGE_TYPE_PC_to_RDR_IccPowerOff: + ccid_reset_error_status(s); + s->powered = false; + ccid_write_slot_status(s, ccid_header); + break; + case CCID_MESSAGE_TYPE_PC_to_RDR_XfrBlock: + ccid_on_apdu_from_guest(s, (CCID_XferBlock *)s->bulk_out_data); + break; + case CCID_MESSAGE_TYPE_PC_to_RDR_SetParameters: + ccid_reset_error_status(s); + ccid_set_parameters(s, ccid_header); + ccid_write_parameters(s, ccid_header); + break; + case CCID_MESSAGE_TYPE_PC_to_RDR_ResetParameters: + ccid_reset_error_status(s); + ccid_reset_parameters(s); + ccid_write_parameters(s, ccid_header); + break; + case CCID_MESSAGE_TYPE_PC_to_RDR_GetParameters: + ccid_reset_error_status(s); + ccid_write_parameters(s, ccid_header); + break; + case CCID_MESSAGE_TYPE_PC_to_RDR_Mechanical: + ccid_report_error_failed(s, 0); + ccid_write_slot_status(s, ccid_header); + break; + default: + DPRINTF(s, 1, + "handle_data: ERROR: unhandled message type %Xh\n", + ccid_header->bMessageType); + /* + * The caller is expecting the device to respond, tell it we + * don't support the operation. + */ + ccid_report_error_failed(s, ERROR_CMD_NOT_SUPPORTED); + ccid_write_slot_status(s, ccid_header); + break; + } + s->bulk_out_pos = 0; + return; + +err: + p->status = USB_RET_STALL; + s->bulk_out_pos = 0; + return; +} + +static void ccid_bulk_in_copy_to_guest(USBCCIDState *s, USBPacket *p, + unsigned int max_packet_size) +{ + int len = 0; + + ccid_bulk_in_get(s); + if (s->current_bulk_in != NULL) { + len = MIN(s->current_bulk_in->len - s->current_bulk_in->pos, + p->iov.size); + if (len) { + usb_packet_copy(p, s->current_bulk_in->data + + s->current_bulk_in->pos, len); + } + s->current_bulk_in->pos += len; + if (s->current_bulk_in->pos == s->current_bulk_in->len + && len != max_packet_size) { + ccid_bulk_in_release(s); + } + } else { + /* return when device has no data - usb 2.0 spec Table 8-4 */ + p->status = USB_RET_NAK; + } + if (len) { + DPRINTF(s, D_MORE_INFO, + "%s: %zd/%d req/act to guest (BULK_IN)\n", + __func__, p->iov.size, len); + } + if (len < p->iov.size) { + DPRINTF(s, 1, + "%s: returning short (EREMOTEIO) %d < %zd\n", + __func__, len, p->iov.size); + } +} + +static void ccid_handle_data(USBDevice *dev, USBPacket *p) +{ + USBCCIDState *s = USB_CCID_DEV(dev); + uint8_t buf[2]; + + switch (p->pid) { + case USB_TOKEN_OUT: + ccid_handle_bulk_out(s, p); + break; + + case USB_TOKEN_IN: + switch (p->ep->nr) { + case CCID_BULK_IN_EP: + ccid_bulk_in_copy_to_guest(s, p, dev->ep_ctl.max_packet_size); + break; + case CCID_INT_IN_EP: + if (s->notify_slot_change) { + /* page 56, RDR_to_PC_NotifySlotChange */ + buf[0] = CCID_MESSAGE_TYPE_RDR_to_PC_NotifySlotChange; + buf[1] = s->bmSlotICCState; + usb_packet_copy(p, buf, 2); + s->notify_slot_change = false; + s->bmSlotICCState &= ~SLOT_0_CHANGED_MASK; + DPRINTF(s, D_INFO, + "handle_data: int_in: notify_slot_change %X, " + "requested len %zd\n", + s->bmSlotICCState, p->iov.size); + } else { + p->status = USB_RET_NAK; + } + break; + default: + DPRINTF(s, 1, "Bad endpoint\n"); + p->status = USB_RET_STALL; + break; + } + break; + default: + DPRINTF(s, 1, "Bad token\n"); + p->status = USB_RET_STALL; + break; + } +} + +static void ccid_unrealize(USBDevice *dev) +{ + USBCCIDState *s = USB_CCID_DEV(dev); + + ccid_bulk_in_clear(s); +} + +static void ccid_flush_pending_answers(USBCCIDState *s) +{ + while (ccid_has_pending_answers(s)) { + ccid_write_data_block_answer(s, NULL, 0); + } +} + +static Answer *ccid_peek_next_answer(USBCCIDState *s) +{ + return s->pending_answers_num == 0 + ? NULL + : &s->pending_answers[s->pending_answers_start % PENDING_ANSWERS_NUM]; +} + +static Property ccid_props[] = { + DEFINE_PROP_UINT32("slot", struct CCIDCardState, slot, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +#define TYPE_CCID_BUS "ccid-bus" +OBJECT_DECLARE_SIMPLE_TYPE(CCIDBus, CCID_BUS) + +static const TypeInfo ccid_bus_info = { + .name = TYPE_CCID_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(CCIDBus), +}; + +void ccid_card_send_apdu_to_guest(CCIDCardState *card, + uint8_t *apdu, uint32_t len) +{ + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent); + USBCCIDState *s = USB_CCID_DEV(dev); + Answer *answer; + + if (!ccid_has_pending_answers(s)) { + DPRINTF(s, 1, "CCID ERROR: got an APDU without pending answers\n"); + return; + } + s->bmCommandStatus = COMMAND_STATUS_NO_ERROR; + answer = ccid_peek_next_answer(s); + if (answer == NULL) { + DPRINTF(s, D_WARN, "%s: error: unexpected lack of answer\n", __func__); + ccid_report_error_failed(s, ERROR_HW_ERROR); + return; + } + DPRINTF(s, 1, "APDU returned to guest %u (answer seq %d, slot %d)\n", + len, answer->seq, answer->slot); + ccid_write_data_block_answer(s, apdu, len); +} + +void ccid_card_card_removed(CCIDCardState *card) +{ + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent); + USBCCIDState *s = USB_CCID_DEV(dev); + + ccid_on_slot_change(s, false); + ccid_flush_pending_answers(s); + ccid_reset(s); +} + +int ccid_card_ccid_attach(CCIDCardState *card) +{ + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent); + USBCCIDState *s = USB_CCID_DEV(dev); + + DPRINTF(s, 1, "CCID Attach\n"); + return 0; +} + +void ccid_card_ccid_detach(CCIDCardState *card) +{ + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent); + USBCCIDState *s = USB_CCID_DEV(dev); + + DPRINTF(s, 1, "CCID Detach\n"); + if (ccid_card_inserted(s)) { + ccid_on_slot_change(s, false); + } + ccid_detach(s); +} + +void ccid_card_card_error(CCIDCardState *card, uint64_t error) +{ + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent); + USBCCIDState *s = USB_CCID_DEV(dev); + + s->bmCommandStatus = COMMAND_STATUS_FAILED; + s->last_answer_error = error; + DPRINTF(s, 1, "VSC_Error: %" PRIX64 "\n", s->last_answer_error); + /* TODO: these errors should be more verbose and propagated to the guest.*/ + /* + * We flush all pending answers on CardRemove message in ccid-card-passthru, + * so check that first to not trigger abort + */ + if (ccid_has_pending_answers(s)) { + ccid_write_data_block_answer(s, NULL, 0); + } +} + +void ccid_card_card_inserted(CCIDCardState *card) +{ + DeviceState *qdev = DEVICE(card); + USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent); + USBCCIDState *s = USB_CCID_DEV(dev); + + s->bmCommandStatus = COMMAND_STATUS_NO_ERROR; + ccid_flush_pending_answers(s); + ccid_on_slot_change(s, true); +} + +static void ccid_card_unrealize(DeviceState *qdev) +{ + CCIDCardState *card = CCID_CARD(qdev); + CCIDCardClass *cc = CCID_CARD_GET_CLASS(card); + USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent); + USBCCIDState *s = USB_CCID_DEV(dev); + + if (ccid_card_inserted(s)) { + ccid_card_card_removed(card); + } + if (cc->unrealize) { + cc->unrealize(card); + } + s->card = NULL; +} + +static void ccid_card_realize(DeviceState *qdev, Error **errp) +{ + CCIDCardState *card = CCID_CARD(qdev); + CCIDCardClass *cc = CCID_CARD_GET_CLASS(card); + USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent); + USBCCIDState *s = USB_CCID_DEV(dev); + Error *local_err = NULL; + + if (card->slot != 0) { + error_setg(errp, "usb-ccid supports one slot, can't add %d", + card->slot); + return; + } + if (s->card != NULL) { + error_setg(errp, "usb-ccid card already full, not adding"); + return; + } + if (cc->realize) { + cc->realize(card, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + } + s->card = card; +} + +static void ccid_realize(USBDevice *dev, Error **errp) +{ + USBCCIDState *s = USB_CCID_DEV(dev); + + usb_desc_create_serial(dev); + usb_desc_init(dev); + qbus_init(&s->bus, sizeof(s->bus), TYPE_CCID_BUS, DEVICE(dev), NULL); + qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev)); + s->intr = usb_ep_get(dev, USB_TOKEN_IN, CCID_INT_IN_EP); + s->bulk = usb_ep_get(dev, USB_TOKEN_IN, CCID_BULK_IN_EP); + s->card = NULL; + s->dev.speed = USB_SPEED_FULL; + s->dev.speedmask = USB_SPEED_MASK_FULL; + s->notify_slot_change = false; + s->powered = true; + s->pending_answers_num = 0; + s->last_answer_error = 0; + s->bulk_in_pending_start = 0; + s->bulk_in_pending_end = 0; + s->current_bulk_in = NULL; + ccid_reset_error_status(s); + s->bulk_out_pos = 0; + ccid_reset_parameters(s); + ccid_reset(s); + s->debug = parse_debug_env("QEMU_CCID_DEBUG", D_VERBOSE, s->debug); +} + +static int ccid_post_load(void *opaque, int version_id) +{ + USBCCIDState *s = opaque; + + /* + * This must be done after usb_device_attach, which sets state to ATTACHED, + * while it must be DEFAULT in order to accept packets (like it is after + * reset, but reset will reset our addr and call our reset handler which + * may change state, and we don't want to do that when migrating). + */ + s->dev.state = s->state_vmstate; + return 0; +} + +static int ccid_pre_save(void *opaque) +{ + USBCCIDState *s = opaque; + + s->state_vmstate = s->dev.state; + + return 0; +} + +static const VMStateDescription bulk_in_vmstate = { + .name = "CCID BulkIn state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(data, BulkIn), + VMSTATE_UINT32(len, BulkIn), + VMSTATE_UINT32(pos, BulkIn), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription answer_vmstate = { + .name = "CCID Answer state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(slot, Answer), + VMSTATE_UINT8(seq, Answer), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription usb_device_vmstate = { + .name = "usb_device", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(addr, USBDevice), + VMSTATE_BUFFER(setup_buf, USBDevice), + VMSTATE_BUFFER(data_buf, USBDevice), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription ccid_vmstate = { + .name = "usb-ccid", + .version_id = 1, + .minimum_version_id = 1, + .post_load = ccid_post_load, + .pre_save = ccid_pre_save, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(dev, USBCCIDState, 1, usb_device_vmstate, USBDevice), + VMSTATE_UINT8(debug, USBCCIDState), + VMSTATE_BUFFER(bulk_out_data, USBCCIDState), + VMSTATE_UINT32(bulk_out_pos, USBCCIDState), + VMSTATE_UINT8(bmSlotICCState, USBCCIDState), + VMSTATE_UINT8(powered, USBCCIDState), + VMSTATE_UINT8(notify_slot_change, USBCCIDState), + VMSTATE_UINT64(last_answer_error, USBCCIDState), + VMSTATE_UINT8(bError, USBCCIDState), + VMSTATE_UINT8(bmCommandStatus, USBCCIDState), + VMSTATE_UINT8(bProtocolNum, USBCCIDState), + VMSTATE_BUFFER(abProtocolDataStructure.data, USBCCIDState), + VMSTATE_UINT32(ulProtocolDataStructureSize, USBCCIDState), + VMSTATE_STRUCT_ARRAY(bulk_in_pending, USBCCIDState, + BULK_IN_PENDING_NUM, 1, bulk_in_vmstate, BulkIn), + VMSTATE_UINT32(bulk_in_pending_start, USBCCIDState), + VMSTATE_UINT32(bulk_in_pending_end, USBCCIDState), + VMSTATE_STRUCT_ARRAY(pending_answers, USBCCIDState, + PENDING_ANSWERS_NUM, 1, answer_vmstate, Answer), + VMSTATE_UINT32(pending_answers_num, USBCCIDState), + VMSTATE_UNUSED(1), /* was migration_state */ + VMSTATE_UINT32(state_vmstate, USBCCIDState), + VMSTATE_END_OF_LIST() + } +}; + +static Property ccid_properties[] = { + DEFINE_PROP_UINT8("debug", USBCCIDState, debug, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ccid_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + + uc->realize = ccid_realize; + uc->product_desc = "QEMU USB CCID"; + uc->usb_desc = &desc_ccid; + uc->handle_reset = ccid_handle_reset; + uc->handle_control = ccid_handle_control; + uc->handle_data = ccid_handle_data; + uc->unrealize = ccid_unrealize; + dc->desc = "CCID Rev 1.1 smartcard reader"; + dc->vmsd = &ccid_vmstate; + device_class_set_props(dc, ccid_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + hc->unplug = qdev_simple_device_unplug_cb; +} + +static const TypeInfo ccid_info = { + .name = TYPE_USB_CCID_DEV, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBCCIDState), + .class_init = ccid_class_initfn, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static void ccid_card_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + k->bus_type = TYPE_CCID_BUS; + k->realize = ccid_card_realize; + k->unrealize = ccid_card_unrealize; + device_class_set_props(k, ccid_props); +} + +static const TypeInfo ccid_card_type_info = { + .name = TYPE_CCID_CARD, + .parent = TYPE_DEVICE, + .instance_size = sizeof(CCIDCardState), + .abstract = true, + .class_size = sizeof(CCIDCardClass), + .class_init = ccid_card_class_init, +}; + +static void ccid_register_types(void) +{ + type_register_static(&ccid_bus_info); + type_register_static(&ccid_card_type_info); + type_register_static(&ccid_info); +} + +type_init(ccid_register_types) diff --git a/hw/usb/dev-storage-bot.c b/hw/usb/dev-storage-bot.c new file mode 100644 index 000000000..b24b3148c --- /dev/null +++ b/hw/usb/dev-storage-bot.c @@ -0,0 +1,63 @@ +/* + * USB Mass Storage Device emulation + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the LGPL. + */ + +#include "qemu/osdep.h" +#include "qemu/typedefs.h" +#include "qapi/error.h" +#include "hw/usb.h" +#include "hw/usb/desc.h" +#include "hw/usb/msd.h" + +static const struct SCSIBusInfo usb_msd_scsi_info_bot = { + .tcq = false, + .max_target = 0, + .max_lun = 15, + + .transfer_data = usb_msd_transfer_data, + .complete = usb_msd_command_complete, + .cancel = usb_msd_request_cancelled, + .load_request = usb_msd_load_request, +}; + +static void usb_msd_bot_realize(USBDevice *dev, Error **errp) +{ + MSDState *s = USB_STORAGE_DEV(dev); + DeviceState *d = DEVICE(dev); + + usb_desc_create_serial(dev); + usb_desc_init(dev); + dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); + if (d->hotplugged) { + s->dev.auto_attach = 0; + } + + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &usb_msd_scsi_info_bot); + usb_msd_handle_reset(dev); +} + +static void usb_msd_class_bot_initfn(ObjectClass *klass, void *data) +{ + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_msd_bot_realize; + uc->attached_settable = true; +} + +static const TypeInfo bot_info = { + .name = "usb-bot", + .parent = TYPE_USB_STORAGE, + .class_init = usb_msd_class_bot_initfn, +}; + +static void register_types(void) +{ + type_register_static(&bot_info); +} + +type_init(register_types) diff --git a/hw/usb/dev-storage-classic.c b/hw/usb/dev-storage-classic.c new file mode 100644 index 000000000..00f25bade --- /dev/null +++ b/hw/usb/dev-storage-classic.c @@ -0,0 +1,157 @@ +/* + * USB Mass Storage Device emulation + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the LGPL. + */ + +#include "qemu/osdep.h" +#include "qemu/typedefs.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "hw/usb.h" +#include "hw/usb/desc.h" +#include "hw/usb/msd.h" +#include "sysemu/sysemu.h" +#include "sysemu/block-backend.h" + +static const struct SCSIBusInfo usb_msd_scsi_info_storage = { + .tcq = false, + .max_target = 0, + .max_lun = 0, + + .transfer_data = usb_msd_transfer_data, + .complete = usb_msd_command_complete, + .cancel = usb_msd_request_cancelled, + .load_request = usb_msd_load_request, +}; + +static void usb_msd_storage_realize(USBDevice *dev, Error **errp) +{ + MSDState *s = USB_STORAGE_DEV(dev); + BlockBackend *blk = s->conf.blk; + SCSIDevice *scsi_dev; + + if (!blk) { + error_setg(errp, "drive property not set"); + return; + } + + if (!blkconf_blocksizes(&s->conf, errp)) { + return; + } + + if (!blkconf_apply_backend_options(&s->conf, !blk_supports_write_perm(blk), + true, errp)) { + return; + } + + /* + * Hack alert: this pretends to be a block device, but it's really + * a SCSI bus that can serve only a single device, which it + * creates automatically. But first it needs to detach from its + * blockdev, or else scsi_bus_legacy_add_drive() dies when it + * attaches again. We also need to take another reference so that + * blk_detach_dev() doesn't free blk while we still need it. + * + * The hack is probably a bad idea. + */ + blk_ref(blk); + blk_detach_dev(blk, DEVICE(s)); + s->conf.blk = NULL; + + usb_desc_create_serial(dev); + usb_desc_init(dev); + dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), + &usb_msd_scsi_info_storage); + scsi_dev = scsi_bus_legacy_add_drive(&s->bus, blk, 0, !!s->removable, + s->conf.bootindex, s->conf.share_rw, + s->conf.rerror, s->conf.werror, + dev->serial, + errp); + blk_unref(blk); + if (!scsi_dev) { + return; + } + usb_msd_handle_reset(dev); + s->scsi_dev = scsi_dev; +} + +static Property msd_properties[] = { + DEFINE_BLOCK_PROPERTIES(MSDState, conf), + DEFINE_BLOCK_ERROR_PROPERTIES(MSDState, conf), + DEFINE_PROP_BOOL("removable", MSDState, removable, false), + DEFINE_PROP_BOOL("commandlog", MSDState, commandlog, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_msd_class_storage_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_msd_storage_realize; + device_class_set_props(dc, msd_properties); +} + +static void usb_msd_get_bootindex(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + USBDevice *dev = USB_DEVICE(obj); + MSDState *s = USB_STORAGE_DEV(dev); + + visit_type_int32(v, name, &s->conf.bootindex, errp); +} + +static void usb_msd_set_bootindex(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + USBDevice *dev = USB_DEVICE(obj); + MSDState *s = USB_STORAGE_DEV(dev); + int32_t boot_index; + Error *local_err = NULL; + + if (!visit_type_int32(v, name, &boot_index, errp)) { + return; + } + /* check whether bootindex is present in fw_boot_order list */ + check_boot_index(boot_index, &local_err); + if (local_err) { + goto out; + } + /* change bootindex to a new one */ + s->conf.bootindex = boot_index; + + if (s->scsi_dev) { + object_property_set_int(OBJECT(s->scsi_dev), "bootindex", boot_index, + &error_abort); + } + +out: + error_propagate(errp, local_err); +} + +static void usb_msd_instance_init(Object *obj) +{ + object_property_add(obj, "bootindex", "int32", + usb_msd_get_bootindex, + usb_msd_set_bootindex, NULL, NULL); + object_property_set_int(obj, "bootindex", -1, NULL); +} + +static const TypeInfo msd_info = { + .name = "usb-storage", + .parent = TYPE_USB_STORAGE, + .class_init = usb_msd_class_storage_initfn, + .instance_init = usb_msd_instance_init, +}; + +static void register_types(void) +{ + type_register_static(&msd_info); +} + +type_init(register_types) diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c new file mode 100644 index 000000000..dca62d544 --- /dev/null +++ b/hw/usb/dev-storage.c @@ -0,0 +1,589 @@ +/* + * USB Mass Storage Device emulation + * + * Copyright (c) 2006 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the LGPL. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "hw/usb.h" +#include "hw/usb/msd.h" +#include "desc.h" +#include "hw/qdev-properties.h" +#include "hw/scsi/scsi.h" +#include "migration/vmstate.h" +#include "qemu/cutils.h" +#include "qom/object.h" +#include "trace.h" + +/* USB requests. */ +#define MassStorageReset 0xff +#define GetMaxLun 0xfe + +struct usb_msd_cbw { + uint32_t sig; + uint32_t tag; + uint32_t data_len; + uint8_t flags; + uint8_t lun; + uint8_t cmd_len; + uint8_t cmd[16]; +}; + +enum { + STR_MANUFACTURER = 1, + STR_PRODUCT, + STR_SERIALNUMBER, + STR_CONFIG_FULL, + STR_CONFIG_HIGH, + STR_CONFIG_SUPER, +}; + +static const USBDescStrings desc_strings = { + [STR_MANUFACTURER] = "QEMU", + [STR_PRODUCT] = "QEMU USB HARDDRIVE", + [STR_SERIALNUMBER] = "1", + [STR_CONFIG_FULL] = "Full speed config (usb 1.1)", + [STR_CONFIG_HIGH] = "High speed config (usb 2.0)", + [STR_CONFIG_SUPER] = "Super speed config (usb 3.0)", +}; + +static const USBDescIface desc_iface_full = { + .bInterfaceNumber = 0, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_MASS_STORAGE, + .bInterfaceSubClass = 0x06, /* SCSI */ + .bInterfaceProtocol = 0x50, /* Bulk */ + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 64, + },{ + .bEndpointAddress = USB_DIR_OUT | 0x02, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 64, + }, + } +}; + +static const USBDescDevice desc_device_full = { + .bcdUSB = 0x0200, + .bMaxPacketSize0 = 8, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_FULL, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER, + .nif = 1, + .ifs = &desc_iface_full, + }, + }, +}; + +static const USBDescIface desc_iface_high = { + .bInterfaceNumber = 0, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_MASS_STORAGE, + .bInterfaceSubClass = 0x06, /* SCSI */ + .bInterfaceProtocol = 0x50, /* Bulk */ + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 512, + },{ + .bEndpointAddress = USB_DIR_OUT | 0x02, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 512, + }, + } +}; + +static const USBDescDevice desc_device_high = { + .bcdUSB = 0x0200, + .bMaxPacketSize0 = 64, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_HIGH, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER, + .nif = 1, + .ifs = &desc_iface_high, + }, + }, +}; + +static const USBDescIface desc_iface_super = { + .bInterfaceNumber = 0, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_MASS_STORAGE, + .bInterfaceSubClass = 0x06, /* SCSI */ + .bInterfaceProtocol = 0x50, /* Bulk */ + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 1024, + .bMaxBurst = 15, + },{ + .bEndpointAddress = USB_DIR_OUT | 0x02, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 1024, + .bMaxBurst = 15, + }, + } +}; + +static const USBDescDevice desc_device_super = { + .bcdUSB = 0x0300, + .bMaxPacketSize0 = 9, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_SUPER, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER, + .nif = 1, + .ifs = &desc_iface_super, + }, + }, +}; + +static const USBDesc desc = { + .id = { + .idVendor = 0x46f4, /* CRC16() of "QEMU" */ + .idProduct = 0x0001, + .bcdDevice = 0, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT, + .iSerialNumber = STR_SERIALNUMBER, + }, + .full = &desc_device_full, + .high = &desc_device_high, + .super = &desc_device_super, + .str = desc_strings, +}; + +static void usb_msd_copy_data(MSDState *s, USBPacket *p) +{ + uint32_t len; + len = p->iov.size - p->actual_length; + if (len > s->scsi_len) + len = s->scsi_len; + usb_packet_copy(p, scsi_req_get_buf(s->req) + s->scsi_off, len); + s->scsi_len -= len; + s->scsi_off += len; + if (len > s->data_len) { + len = s->data_len; + } + s->data_len -= len; + if (s->scsi_len == 0 || s->data_len == 0) { + scsi_req_continue(s->req); + } +} + +static void usb_msd_send_status(MSDState *s, USBPacket *p) +{ + int len; + + trace_usb_msd_send_status(s->csw.status, le32_to_cpu(s->csw.tag), + p->iov.size); + + assert(s->csw.sig == cpu_to_le32(0x53425355)); + len = MIN(sizeof(s->csw), p->iov.size); + usb_packet_copy(p, &s->csw, len); + memset(&s->csw, 0, sizeof(s->csw)); +} + +static void usb_msd_packet_complete(MSDState *s) +{ + USBPacket *p = s->packet; + + /* Set s->packet to NULL before calling usb_packet_complete + because another request may be issued before + usb_packet_complete returns. */ + trace_usb_msd_packet_complete(); + s->packet = NULL; + usb_packet_complete(&s->dev, p); +} + +void usb_msd_transfer_data(SCSIRequest *req, uint32_t len) +{ + MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent); + USBPacket *p = s->packet; + + assert((s->mode == USB_MSDM_DATAOUT) == (req->cmd.mode == SCSI_XFER_TO_DEV)); + s->scsi_len = len; + s->scsi_off = 0; + if (p) { + usb_msd_copy_data(s, p); + p = s->packet; + if (p && p->actual_length == p->iov.size) { + p->status = USB_RET_SUCCESS; /* Clear previous ASYNC status */ + usb_msd_packet_complete(s); + } + } +} + +void usb_msd_command_complete(SCSIRequest *req, size_t resid) +{ + MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent); + USBPacket *p = s->packet; + + trace_usb_msd_cmd_complete(req->status, req->tag); + + s->csw.sig = cpu_to_le32(0x53425355); + s->csw.tag = cpu_to_le32(req->tag); + s->csw.residue = cpu_to_le32(s->data_len); + s->csw.status = req->status != 0; + + if (s->packet) { + if (s->data_len == 0 && s->mode == USB_MSDM_DATAOUT) { + /* A deferred packet with no write data remaining must be + the status read packet. */ + usb_msd_send_status(s, p); + s->mode = USB_MSDM_CBW; + } else if (s->mode == USB_MSDM_CSW) { + usb_msd_send_status(s, p); + s->mode = USB_MSDM_CBW; + } else { + if (s->data_len) { + int len = (p->iov.size - p->actual_length); + usb_packet_skip(p, len); + if (len > s->data_len) { + len = s->data_len; + } + s->data_len -= len; + } + if (s->data_len == 0) { + s->mode = USB_MSDM_CSW; + } + } + p->status = USB_RET_SUCCESS; /* Clear previous ASYNC status */ + usb_msd_packet_complete(s); + } else if (s->data_len == 0) { + s->mode = USB_MSDM_CSW; + } + scsi_req_unref(req); + s->req = NULL; +} + +void usb_msd_request_cancelled(SCSIRequest *req) +{ + MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent); + + trace_usb_msd_cmd_cancel(req->tag); + + if (req == s->req) { + s->csw.sig = cpu_to_le32(0x53425355); + s->csw.tag = cpu_to_le32(req->tag); + s->csw.status = 1; /* error */ + + scsi_req_unref(s->req); + s->req = NULL; + s->scsi_len = 0; + } +} + +void usb_msd_handle_reset(USBDevice *dev) +{ + MSDState *s = (MSDState *)dev; + + trace_usb_msd_reset(); + if (s->req) { + scsi_req_cancel(s->req); + } + assert(s->req == NULL); + + if (s->packet) { + s->packet->status = USB_RET_STALL; + usb_msd_packet_complete(s); + } + + memset(&s->csw, 0, sizeof(s->csw)); + s->mode = USB_MSDM_CBW; +} + +static void usb_msd_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) +{ + MSDState *s = (MSDState *)dev; + SCSIDevice *scsi_dev; + int ret, maxlun; + + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); + if (ret >= 0) { + return; + } + + switch (request) { + case EndpointOutRequest | USB_REQ_CLEAR_FEATURE: + break; + /* Class specific requests. */ + case ClassInterfaceOutRequest | MassStorageReset: + /* Reset state ready for the next CBW. */ + s->mode = USB_MSDM_CBW; + break; + case ClassInterfaceRequest | GetMaxLun: + maxlun = 0; + for (;;) { + scsi_dev = scsi_device_find(&s->bus, 0, 0, maxlun+1); + if (scsi_dev == NULL) { + break; + } + if (scsi_dev->lun != maxlun+1) { + break; + } + maxlun++; + } + trace_usb_msd_maxlun(maxlun); + data[0] = maxlun; + p->actual_length = 1; + break; + default: + p->status = USB_RET_STALL; + break; + } +} + +static void usb_msd_cancel_io(USBDevice *dev, USBPacket *p) +{ + MSDState *s = USB_STORAGE_DEV(dev); + + assert(s->packet == p); + s->packet = NULL; + + if (s->req) { + scsi_req_cancel(s->req); + } +} + +static void usb_msd_handle_data(USBDevice *dev, USBPacket *p) +{ + MSDState *s = (MSDState *)dev; + uint32_t tag; + struct usb_msd_cbw cbw; + uint8_t devep = p->ep->nr; + SCSIDevice *scsi_dev; + uint32_t len; + + switch (p->pid) { + case USB_TOKEN_OUT: + if (devep != 2) + goto fail; + + switch (s->mode) { + case USB_MSDM_CBW: + if (p->iov.size != 31) { + error_report("usb-msd: Bad CBW size"); + goto fail; + } + usb_packet_copy(p, &cbw, 31); + if (le32_to_cpu(cbw.sig) != 0x43425355) { + error_report("usb-msd: Bad signature %08x", + le32_to_cpu(cbw.sig)); + goto fail; + } + scsi_dev = scsi_device_find(&s->bus, 0, 0, cbw.lun); + if (scsi_dev == NULL) { + error_report("usb-msd: Bad LUN %d", cbw.lun); + goto fail; + } + tag = le32_to_cpu(cbw.tag); + s->data_len = le32_to_cpu(cbw.data_len); + if (s->data_len == 0) { + s->mode = USB_MSDM_CSW; + } else if (cbw.flags & 0x80) { + s->mode = USB_MSDM_DATAIN; + } else { + s->mode = USB_MSDM_DATAOUT; + } + trace_usb_msd_cmd_submit(cbw.lun, tag, cbw.flags, + cbw.cmd_len, s->data_len); + assert(le32_to_cpu(s->csw.residue) == 0); + s->scsi_len = 0; + s->req = scsi_req_new(scsi_dev, tag, cbw.lun, cbw.cmd, NULL); + if (s->commandlog) { + scsi_req_print(s->req); + } + len = scsi_req_enqueue(s->req); + if (len) { + scsi_req_continue(s->req); + } + break; + + case USB_MSDM_DATAOUT: + trace_usb_msd_data_out(p->iov.size, s->data_len); + if (p->iov.size > s->data_len) { + goto fail; + } + + if (s->scsi_len) { + usb_msd_copy_data(s, p); + } + if (le32_to_cpu(s->csw.residue)) { + int len = p->iov.size - p->actual_length; + if (len) { + usb_packet_skip(p, len); + if (len > s->data_len) { + len = s->data_len; + } + s->data_len -= len; + if (s->data_len == 0) { + s->mode = USB_MSDM_CSW; + } + } + } + if (p->actual_length < p->iov.size) { + trace_usb_msd_packet_async(); + s->packet = p; + p->status = USB_RET_ASYNC; + } + break; + + default: + goto fail; + } + break; + + case USB_TOKEN_IN: + if (devep != 1) + goto fail; + + switch (s->mode) { + case USB_MSDM_DATAOUT: + if (s->data_len != 0 || p->iov.size < 13) { + goto fail; + } + /* Waiting for SCSI write to complete. */ + trace_usb_msd_packet_async(); + s->packet = p; + p->status = USB_RET_ASYNC; + break; + + case USB_MSDM_CSW: + if (p->iov.size < 13) { + goto fail; + } + + if (s->req) { + /* still in flight */ + trace_usb_msd_packet_async(); + s->packet = p; + p->status = USB_RET_ASYNC; + } else { + usb_msd_send_status(s, p); + s->mode = USB_MSDM_CBW; + } + break; + + case USB_MSDM_DATAIN: + trace_usb_msd_data_in(p->iov.size, s->data_len, s->scsi_len); + if (s->scsi_len) { + usb_msd_copy_data(s, p); + } + if (le32_to_cpu(s->csw.residue)) { + int len = p->iov.size - p->actual_length; + if (len) { + usb_packet_skip(p, len); + if (len > s->data_len) { + len = s->data_len; + } + s->data_len -= len; + if (s->data_len == 0) { + s->mode = USB_MSDM_CSW; + } + } + } + if (p->actual_length < p->iov.size && s->mode == USB_MSDM_DATAIN) { + trace_usb_msd_packet_async(); + s->packet = p; + p->status = USB_RET_ASYNC; + } + break; + + default: + goto fail; + } + break; + + default: + fail: + p->status = USB_RET_STALL; + break; + } +} + +void *usb_msd_load_request(QEMUFile *f, SCSIRequest *req) +{ + MSDState *s = DO_UPCAST(MSDState, dev.qdev, req->bus->qbus.parent); + + /* nothing to load, just store req in our state struct */ + assert(s->req == NULL); + scsi_req_ref(req); + s->req = req; + return NULL; +} + +static const VMStateDescription vmstate_usb_msd = { + .name = "usb-storage", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_USB_DEVICE(dev, MSDState), + VMSTATE_UINT32(mode, MSDState), + VMSTATE_UINT32(scsi_len, MSDState), + VMSTATE_UINT32(scsi_off, MSDState), + VMSTATE_UINT32(data_len, MSDState), + VMSTATE_UINT32(csw.sig, MSDState), + VMSTATE_UINT32(csw.tag, MSDState), + VMSTATE_UINT32(csw.residue, MSDState), + VMSTATE_UINT8(csw.status, MSDState), + VMSTATE_END_OF_LIST() + } +}; + +static void usb_msd_class_initfn_common(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->product_desc = "QEMU USB MSD"; + uc->usb_desc = &desc; + uc->cancel_packet = usb_msd_cancel_io; + uc->handle_attach = usb_desc_attach; + uc->handle_reset = usb_msd_handle_reset; + uc->handle_control = usb_msd_handle_control; + uc->handle_data = usb_msd_handle_data; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->fw_name = "storage"; + dc->vmsd = &vmstate_usb_msd; +} + +static const TypeInfo usb_storage_dev_type_info = { + .name = TYPE_USB_STORAGE, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(MSDState), + .abstract = true, + .class_init = usb_msd_class_initfn_common, +}; + +static void usb_msd_register_types(void) +{ + type_register_static(&usb_storage_dev_type_info); +} + +type_init(usb_msd_register_types) diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c new file mode 100644 index 000000000..599d6b52a --- /dev/null +++ b/hw/usb/dev-uas.c @@ -0,0 +1,991 @@ +/* + * UAS (USB Attached SCSI) emulation + * + * Copyright Red Hat, Inc. 2012 + * + * Author: Gerd Hoffmann + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "trace.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/log.h" + +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "desc.h" +#include "hw/qdev-properties.h" +#include "hw/scsi/scsi.h" +#include "scsi/constants.h" +#include "qom/object.h" + +/* --------------------------------------------------------------------- */ + +#define UAS_UI_COMMAND 0x01 +#define UAS_UI_SENSE 0x03 +#define UAS_UI_RESPONSE 0x04 +#define UAS_UI_TASK_MGMT 0x05 +#define UAS_UI_READ_READY 0x06 +#define UAS_UI_WRITE_READY 0x07 + +#define UAS_RC_TMF_COMPLETE 0x00 +#define UAS_RC_INVALID_INFO_UNIT 0x02 +#define UAS_RC_TMF_NOT_SUPPORTED 0x04 +#define UAS_RC_TMF_FAILED 0x05 +#define UAS_RC_TMF_SUCCEEDED 0x08 +#define UAS_RC_INCORRECT_LUN 0x09 +#define UAS_RC_OVERLAPPED_TAG 0x0a + +#define UAS_TMF_ABORT_TASK 0x01 +#define UAS_TMF_ABORT_TASK_SET 0x02 +#define UAS_TMF_CLEAR_TASK_SET 0x04 +#define UAS_TMF_LOGICAL_UNIT_RESET 0x08 +#define UAS_TMF_I_T_NEXUS_RESET 0x10 +#define UAS_TMF_CLEAR_ACA 0x40 +#define UAS_TMF_QUERY_TASK 0x80 +#define UAS_TMF_QUERY_TASK_SET 0x81 +#define UAS_TMF_QUERY_ASYNC_EVENT 0x82 + +#define UAS_PIPE_ID_COMMAND 0x01 +#define UAS_PIPE_ID_STATUS 0x02 +#define UAS_PIPE_ID_DATA_IN 0x03 +#define UAS_PIPE_ID_DATA_OUT 0x04 + +typedef struct { + uint8_t id; + uint8_t reserved; + uint16_t tag; +} QEMU_PACKED uas_iu_header; + +typedef struct { + uint8_t prio_taskattr; /* 6:3 priority, 2:0 task attribute */ + uint8_t reserved_1; + uint8_t add_cdb_length; /* 7:2 additional adb length (dwords) */ + uint8_t reserved_2; + uint64_t lun; + uint8_t cdb[16]; + uint8_t add_cdb[1]; /* not supported by QEMU */ +} QEMU_PACKED uas_iu_command; + +typedef struct { + uint16_t status_qualifier; + uint8_t status; + uint8_t reserved[7]; + uint16_t sense_length; + uint8_t sense_data[18]; +} QEMU_PACKED uas_iu_sense; + +typedef struct { + uint8_t add_response_info[3]; + uint8_t response_code; +} QEMU_PACKED uas_iu_response; + +typedef struct { + uint8_t function; + uint8_t reserved; + uint16_t task_tag; + uint64_t lun; +} QEMU_PACKED uas_iu_task_mgmt; + +typedef struct { + uas_iu_header hdr; + union { + uas_iu_command command; + uas_iu_sense sense; + uas_iu_task_mgmt task; + uas_iu_response response; + }; +} QEMU_PACKED uas_iu; + +/* --------------------------------------------------------------------- */ + +#define UAS_STREAM_BM_ATTR 4 +#define UAS_MAX_STREAMS (1 << UAS_STREAM_BM_ATTR) + +typedef struct UASDevice UASDevice; +typedef struct UASRequest UASRequest; +typedef struct UASStatus UASStatus; + +struct UASDevice { + USBDevice dev; + SCSIBus bus; + QEMUBH *status_bh; + QTAILQ_HEAD(, UASStatus) results; + QTAILQ_HEAD(, UASRequest) requests; + + /* properties */ + uint32_t requestlog; + + /* usb 2.0 only */ + USBPacket *status2; + UASRequest *datain2; + UASRequest *dataout2; + + /* usb 3.0 only */ + USBPacket *data3[UAS_MAX_STREAMS + 1]; + USBPacket *status3[UAS_MAX_STREAMS + 1]; +}; + +#define TYPE_USB_UAS "usb-uas" +OBJECT_DECLARE_SIMPLE_TYPE(UASDevice, USB_UAS) + +struct UASRequest { + uint16_t tag; + uint64_t lun; + UASDevice *uas; + SCSIDevice *dev; + SCSIRequest *req; + USBPacket *data; + bool data_async; + bool active; + bool complete; + uint32_t buf_off; + uint32_t buf_size; + uint32_t data_off; + uint32_t data_size; + QTAILQ_ENTRY(UASRequest) next; +}; + +struct UASStatus { + uint32_t stream; + uas_iu status; + uint32_t length; + QTAILQ_ENTRY(UASStatus) next; +}; + +/* --------------------------------------------------------------------- */ + +enum { + STR_MANUFACTURER = 1, + STR_PRODUCT, + STR_SERIALNUMBER, + STR_CONFIG_HIGH, + STR_CONFIG_SUPER, +}; + +static const USBDescStrings desc_strings = { + [STR_MANUFACTURER] = "QEMU", + [STR_PRODUCT] = "USB Attached SCSI HBA", + [STR_SERIALNUMBER] = "27842", + [STR_CONFIG_HIGH] = "High speed config (usb 2.0)", + [STR_CONFIG_SUPER] = "Super speed config (usb 3.0)", +}; + +static const USBDescIface desc_iface_high = { + .bInterfaceNumber = 0, + .bNumEndpoints = 4, + .bInterfaceClass = USB_CLASS_MASS_STORAGE, + .bInterfaceSubClass = 0x06, /* SCSI */ + .bInterfaceProtocol = 0x62, /* UAS */ + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_OUT | UAS_PIPE_ID_COMMAND, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 512, + .extra = (uint8_t[]) { + 0x04, /* u8 bLength */ + 0x24, /* u8 bDescriptorType */ + UAS_PIPE_ID_COMMAND, + 0x00, /* u8 bReserved */ + }, + },{ + .bEndpointAddress = USB_DIR_IN | UAS_PIPE_ID_STATUS, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 512, + .extra = (uint8_t[]) { + 0x04, /* u8 bLength */ + 0x24, /* u8 bDescriptorType */ + UAS_PIPE_ID_STATUS, + 0x00, /* u8 bReserved */ + }, + },{ + .bEndpointAddress = USB_DIR_IN | UAS_PIPE_ID_DATA_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 512, + .extra = (uint8_t[]) { + 0x04, /* u8 bLength */ + 0x24, /* u8 bDescriptorType */ + UAS_PIPE_ID_DATA_IN, + 0x00, /* u8 bReserved */ + }, + },{ + .bEndpointAddress = USB_DIR_OUT | UAS_PIPE_ID_DATA_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 512, + .extra = (uint8_t[]) { + 0x04, /* u8 bLength */ + 0x24, /* u8 bDescriptorType */ + UAS_PIPE_ID_DATA_OUT, + 0x00, /* u8 bReserved */ + }, + }, + } +}; + +static const USBDescIface desc_iface_super = { + .bInterfaceNumber = 0, + .bNumEndpoints = 4, + .bInterfaceClass = USB_CLASS_MASS_STORAGE, + .bInterfaceSubClass = 0x06, /* SCSI */ + .bInterfaceProtocol = 0x62, /* UAS */ + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_OUT | UAS_PIPE_ID_COMMAND, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 1024, + .bMaxBurst = 15, + .extra = (uint8_t[]) { + 0x04, /* u8 bLength */ + 0x24, /* u8 bDescriptorType */ + UAS_PIPE_ID_COMMAND, + 0x00, /* u8 bReserved */ + }, + },{ + .bEndpointAddress = USB_DIR_IN | UAS_PIPE_ID_STATUS, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 1024, + .bMaxBurst = 15, + .bmAttributes_super = UAS_STREAM_BM_ATTR, + .extra = (uint8_t[]) { + 0x04, /* u8 bLength */ + 0x24, /* u8 bDescriptorType */ + UAS_PIPE_ID_STATUS, + 0x00, /* u8 bReserved */ + }, + },{ + .bEndpointAddress = USB_DIR_IN | UAS_PIPE_ID_DATA_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 1024, + .bMaxBurst = 15, + .bmAttributes_super = UAS_STREAM_BM_ATTR, + .extra = (uint8_t[]) { + 0x04, /* u8 bLength */ + 0x24, /* u8 bDescriptorType */ + UAS_PIPE_ID_DATA_IN, + 0x00, /* u8 bReserved */ + }, + },{ + .bEndpointAddress = USB_DIR_OUT | UAS_PIPE_ID_DATA_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = 1024, + .bMaxBurst = 15, + .bmAttributes_super = UAS_STREAM_BM_ATTR, + .extra = (uint8_t[]) { + 0x04, /* u8 bLength */ + 0x24, /* u8 bDescriptorType */ + UAS_PIPE_ID_DATA_OUT, + 0x00, /* u8 bReserved */ + }, + }, + } +}; + +static const USBDescDevice desc_device_high = { + .bcdUSB = 0x0200, + .bMaxPacketSize0 = 64, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_HIGH, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER, + .nif = 1, + .ifs = &desc_iface_high, + }, + }, +}; + +static const USBDescDevice desc_device_super = { + .bcdUSB = 0x0300, + .bMaxPacketSize0 = 9, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG_SUPER, + .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_SELFPOWER, + .nif = 1, + .ifs = &desc_iface_super, + }, + }, +}; + +static const USBDesc desc = { + .id = { + .idVendor = 0x46f4, /* CRC16() of "QEMU" */ + .idProduct = 0x0003, + .bcdDevice = 0, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT, + .iSerialNumber = STR_SERIALNUMBER, + }, + .high = &desc_device_high, + .super = &desc_device_super, + .str = desc_strings, +}; + +/* --------------------------------------------------------------------- */ + +static bool uas_using_streams(UASDevice *uas) +{ + return uas->dev.speed == USB_SPEED_SUPER; +} + +/* --------------------------------------------------------------------- */ + +static UASStatus *usb_uas_alloc_status(UASDevice *uas, uint8_t id, uint16_t tag) +{ + UASStatus *st = g_new0(UASStatus, 1); + + st->status.hdr.id = id; + st->status.hdr.tag = cpu_to_be16(tag); + st->length = sizeof(uas_iu_header); + if (uas_using_streams(uas)) { + st->stream = tag; + } + return st; +} + +static void usb_uas_send_status_bh(void *opaque) +{ + UASDevice *uas = opaque; + UASStatus *st; + USBPacket *p; + + while ((st = QTAILQ_FIRST(&uas->results)) != NULL) { + if (uas_using_streams(uas)) { + p = uas->status3[st->stream]; + uas->status3[st->stream] = NULL; + } else { + p = uas->status2; + uas->status2 = NULL; + } + if (p == NULL) { + break; + } + + usb_packet_copy(p, &st->status, st->length); + QTAILQ_REMOVE(&uas->results, st, next); + g_free(st); + + p->status = USB_RET_SUCCESS; /* Clear previous ASYNC status */ + usb_packet_complete(&uas->dev, p); + } +} + +static void usb_uas_queue_status(UASDevice *uas, UASStatus *st, int length) +{ + USBPacket *p = uas_using_streams(uas) ? + uas->status3[st->stream] : uas->status2; + + st->length += length; + QTAILQ_INSERT_TAIL(&uas->results, st, next); + if (p) { + /* + * Just schedule bh make sure any in-flight data transaction + * is finished before completing (sending) the status packet. + */ + qemu_bh_schedule(uas->status_bh); + } else { + USBEndpoint *ep = usb_ep_get(&uas->dev, USB_TOKEN_IN, + UAS_PIPE_ID_STATUS); + usb_wakeup(ep, st->stream); + } +} + +static void usb_uas_queue_response(UASDevice *uas, uint16_t tag, uint8_t code) +{ + UASStatus *st = usb_uas_alloc_status(uas, UAS_UI_RESPONSE, tag); + + trace_usb_uas_response(uas->dev.addr, tag, code); + st->status.response.response_code = code; + usb_uas_queue_status(uas, st, sizeof(uas_iu_response)); +} + +static void usb_uas_queue_sense(UASRequest *req, uint8_t status) +{ + UASStatus *st = usb_uas_alloc_status(req->uas, UAS_UI_SENSE, req->tag); + int len, slen = 0; + + trace_usb_uas_sense(req->uas->dev.addr, req->tag, status); + st->status.sense.status = status; + st->status.sense.status_qualifier = cpu_to_be16(0); + if (status != GOOD) { + slen = scsi_req_get_sense(req->req, st->status.sense.sense_data, + sizeof(st->status.sense.sense_data)); + st->status.sense.sense_length = cpu_to_be16(slen); + } + len = sizeof(uas_iu_sense) - sizeof(st->status.sense.sense_data) + slen; + usb_uas_queue_status(req->uas, st, len); +} + +static void usb_uas_queue_fake_sense(UASDevice *uas, uint16_t tag, + struct SCSISense sense) +{ + UASStatus *st = usb_uas_alloc_status(uas, UAS_UI_SENSE, tag); + int len, slen = 0; + + st->status.sense.status = CHECK_CONDITION; + st->status.sense.status_qualifier = cpu_to_be16(0); + st->status.sense.sense_data[0] = 0x70; + st->status.sense.sense_data[2] = sense.key; + st->status.sense.sense_data[7] = 10; + st->status.sense.sense_data[12] = sense.asc; + st->status.sense.sense_data[13] = sense.ascq; + slen = 18; + len = sizeof(uas_iu_sense) - sizeof(st->status.sense.sense_data) + slen; + usb_uas_queue_status(uas, st, len); +} + +static void usb_uas_queue_read_ready(UASRequest *req) +{ + UASStatus *st = usb_uas_alloc_status(req->uas, UAS_UI_READ_READY, + req->tag); + + trace_usb_uas_read_ready(req->uas->dev.addr, req->tag); + usb_uas_queue_status(req->uas, st, 0); +} + +static void usb_uas_queue_write_ready(UASRequest *req) +{ + UASStatus *st = usb_uas_alloc_status(req->uas, UAS_UI_WRITE_READY, + req->tag); + + trace_usb_uas_write_ready(req->uas->dev.addr, req->tag); + usb_uas_queue_status(req->uas, st, 0); +} + +/* --------------------------------------------------------------------- */ + +static int usb_uas_get_lun(uint64_t lun64) +{ + return (lun64 >> 48) & 0xff; +} + +static SCSIDevice *usb_uas_get_dev(UASDevice *uas, uint64_t lun64) +{ + if ((lun64 >> 56) != 0x00) { + return NULL; + } + return scsi_device_find(&uas->bus, 0, 0, usb_uas_get_lun(lun64)); +} + +static void usb_uas_complete_data_packet(UASRequest *req) +{ + USBPacket *p; + + if (!req->data_async) { + return; + } + p = req->data; + req->data = NULL; + req->data_async = false; + p->status = USB_RET_SUCCESS; /* Clear previous ASYNC status */ + usb_packet_complete(&req->uas->dev, p); +} + +static void usb_uas_copy_data(UASRequest *req) +{ + uint32_t length; + + length = MIN(req->buf_size - req->buf_off, + req->data->iov.size - req->data->actual_length); + trace_usb_uas_xfer_data(req->uas->dev.addr, req->tag, length, + req->data->actual_length, req->data->iov.size, + req->buf_off, req->buf_size); + usb_packet_copy(req->data, scsi_req_get_buf(req->req) + req->buf_off, + length); + req->buf_off += length; + req->data_off += length; + + if (req->data->actual_length == req->data->iov.size) { + usb_uas_complete_data_packet(req); + } + if (req->buf_size && req->buf_off == req->buf_size) { + req->buf_off = 0; + req->buf_size = 0; + scsi_req_continue(req->req); + } +} + +static void usb_uas_start_next_transfer(UASDevice *uas) +{ + UASRequest *req; + + if (uas_using_streams(uas)) { + return; + } + + QTAILQ_FOREACH(req, &uas->requests, next) { + if (req->active || req->complete) { + continue; + } + if (req->req->cmd.mode == SCSI_XFER_FROM_DEV && uas->datain2 == NULL) { + uas->datain2 = req; + usb_uas_queue_read_ready(req); + req->active = true; + return; + } + if (req->req->cmd.mode == SCSI_XFER_TO_DEV && uas->dataout2 == NULL) { + uas->dataout2 = req; + usb_uas_queue_write_ready(req); + req->active = true; + return; + } + } +} + +static UASRequest *usb_uas_alloc_request(UASDevice *uas, uas_iu *iu) +{ + UASRequest *req; + + req = g_new0(UASRequest, 1); + req->uas = uas; + req->tag = be16_to_cpu(iu->hdr.tag); + req->lun = be64_to_cpu(iu->command.lun); + req->dev = usb_uas_get_dev(req->uas, req->lun); + return req; +} + +static void usb_uas_scsi_free_request(SCSIBus *bus, void *priv) +{ + UASRequest *req = priv; + UASDevice *uas = req->uas; + + if (req == uas->datain2) { + uas->datain2 = NULL; + } + if (req == uas->dataout2) { + uas->dataout2 = NULL; + } + QTAILQ_REMOVE(&uas->requests, req, next); + g_free(req); + usb_uas_start_next_transfer(uas); +} + +static UASRequest *usb_uas_find_request(UASDevice *uas, uint16_t tag) +{ + UASRequest *req; + + QTAILQ_FOREACH(req, &uas->requests, next) { + if (req->tag == tag) { + return req; + } + } + return NULL; +} + +static void usb_uas_scsi_transfer_data(SCSIRequest *r, uint32_t len) +{ + UASRequest *req = r->hba_private; + + trace_usb_uas_scsi_data(req->uas->dev.addr, req->tag, len); + req->buf_off = 0; + req->buf_size = len; + if (req->data) { + usb_uas_copy_data(req); + } else { + usb_uas_start_next_transfer(req->uas); + } +} + +static void usb_uas_scsi_command_complete(SCSIRequest *r, size_t resid) +{ + UASRequest *req = r->hba_private; + + trace_usb_uas_scsi_complete(req->uas->dev.addr, req->tag, r->status, resid); + req->complete = true; + if (req->data) { + usb_uas_complete_data_packet(req); + } + usb_uas_queue_sense(req, r->status); + scsi_req_unref(req->req); +} + +static void usb_uas_scsi_request_cancelled(SCSIRequest *r) +{ + UASRequest *req = r->hba_private; + + /* FIXME: queue notification to status pipe? */ + scsi_req_unref(req->req); +} + +static const struct SCSIBusInfo usb_uas_scsi_info = { + .tcq = true, + .max_target = 0, + .max_lun = 255, + + .transfer_data = usb_uas_scsi_transfer_data, + .complete = usb_uas_scsi_command_complete, + .cancel = usb_uas_scsi_request_cancelled, + .free_request = usb_uas_scsi_free_request, +}; + +/* --------------------------------------------------------------------- */ + +static void usb_uas_handle_reset(USBDevice *dev) +{ + UASDevice *uas = USB_UAS(dev); + UASRequest *req, *nreq; + UASStatus *st, *nst; + + trace_usb_uas_reset(dev->addr); + QTAILQ_FOREACH_SAFE(req, &uas->requests, next, nreq) { + scsi_req_cancel(req->req); + } + QTAILQ_FOREACH_SAFE(st, &uas->results, next, nst) { + QTAILQ_REMOVE(&uas->results, st, next); + g_free(st); + } +} + +static void usb_uas_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) +{ + int ret; + + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); + if (ret >= 0) { + return; + } + error_report("%s: unhandled control request (req 0x%x, val 0x%x, idx 0x%x", + __func__, request, value, index); + p->status = USB_RET_STALL; +} + +static void usb_uas_cancel_io(USBDevice *dev, USBPacket *p) +{ + UASDevice *uas = USB_UAS(dev); + UASRequest *req, *nreq; + int i; + + if (uas->status2 == p) { + uas->status2 = NULL; + qemu_bh_cancel(uas->status_bh); + return; + } + if (uas_using_streams(uas)) { + for (i = 0; i <= UAS_MAX_STREAMS; i++) { + if (uas->status3[i] == p) { + uas->status3[i] = NULL; + return; + } + if (uas->data3[i] == p) { + uas->data3[i] = NULL; + return; + } + } + } + QTAILQ_FOREACH_SAFE(req, &uas->requests, next, nreq) { + if (req->data == p) { + req->data = NULL; + return; + } + } + assert(!"canceled usb packet not found"); +} + +static void usb_uas_command(UASDevice *uas, uas_iu *iu) +{ + UASRequest *req; + uint32_t len; + uint16_t tag = be16_to_cpu(iu->hdr.tag); + + if (iu->command.add_cdb_length > 0) { + qemu_log_mask(LOG_UNIMP, "additional adb length not yet supported\n"); + goto unsupported_len; + } + + if (uas_using_streams(uas) && tag > UAS_MAX_STREAMS) { + goto invalid_tag; + } + req = usb_uas_find_request(uas, tag); + if (req) { + goto overlapped_tag; + } + req = usb_uas_alloc_request(uas, iu); + if (req->dev == NULL) { + goto bad_target; + } + + trace_usb_uas_command(uas->dev.addr, req->tag, + usb_uas_get_lun(req->lun), + req->lun >> 32, req->lun & 0xffffffff); + QTAILQ_INSERT_TAIL(&uas->requests, req, next); + if (uas_using_streams(uas) && uas->data3[req->tag] != NULL) { + req->data = uas->data3[req->tag]; + req->data_async = true; + uas->data3[req->tag] = NULL; + } + + req->req = scsi_req_new(req->dev, req->tag, + usb_uas_get_lun(req->lun), + iu->command.cdb, req); + if (uas->requestlog) { + scsi_req_print(req->req); + } + len = scsi_req_enqueue(req->req); + if (len) { + req->data_size = len; + scsi_req_continue(req->req); + } + return; + +unsupported_len: + usb_uas_queue_fake_sense(uas, tag, sense_code_INVALID_PARAM_VALUE); + return; + +invalid_tag: + usb_uas_queue_fake_sense(uas, tag, sense_code_INVALID_TAG); + return; + +overlapped_tag: + usb_uas_queue_fake_sense(uas, tag, sense_code_OVERLAPPED_COMMANDS); + return; + +bad_target: + usb_uas_queue_fake_sense(uas, tag, sense_code_LUN_NOT_SUPPORTED); + g_free(req); +} + +static void usb_uas_task(UASDevice *uas, uas_iu *iu) +{ + uint16_t tag = be16_to_cpu(iu->hdr.tag); + uint64_t lun64 = be64_to_cpu(iu->task.lun); + SCSIDevice *dev = usb_uas_get_dev(uas, lun64); + int lun = usb_uas_get_lun(lun64); + UASRequest *req; + uint16_t task_tag; + + if (uas_using_streams(uas) && tag > UAS_MAX_STREAMS) { + goto invalid_tag; + } + req = usb_uas_find_request(uas, be16_to_cpu(iu->hdr.tag)); + if (req) { + goto overlapped_tag; + } + if (dev == NULL) { + goto incorrect_lun; + } + + switch (iu->task.function) { + case UAS_TMF_ABORT_TASK: + task_tag = be16_to_cpu(iu->task.task_tag); + trace_usb_uas_tmf_abort_task(uas->dev.addr, tag, task_tag); + req = usb_uas_find_request(uas, task_tag); + if (req && req->dev == dev) { + scsi_req_cancel(req->req); + } + usb_uas_queue_response(uas, tag, UAS_RC_TMF_COMPLETE); + break; + + case UAS_TMF_LOGICAL_UNIT_RESET: + trace_usb_uas_tmf_logical_unit_reset(uas->dev.addr, tag, lun); + qdev_reset_all(&dev->qdev); + usb_uas_queue_response(uas, tag, UAS_RC_TMF_COMPLETE); + break; + + default: + trace_usb_uas_tmf_unsupported(uas->dev.addr, tag, iu->task.function); + usb_uas_queue_response(uas, tag, UAS_RC_TMF_NOT_SUPPORTED); + break; + } + return; + +invalid_tag: + usb_uas_queue_response(uas, tag, UAS_RC_INVALID_INFO_UNIT); + return; + +overlapped_tag: + usb_uas_queue_response(uas, req->tag, UAS_RC_OVERLAPPED_TAG); + return; + +incorrect_lun: + usb_uas_queue_response(uas, tag, UAS_RC_INCORRECT_LUN); +} + +static void usb_uas_handle_data(USBDevice *dev, USBPacket *p) +{ + UASDevice *uas = USB_UAS(dev); + uas_iu iu; + UASStatus *st; + UASRequest *req; + int length; + + switch (p->ep->nr) { + case UAS_PIPE_ID_COMMAND: + length = MIN(sizeof(iu), p->iov.size); + usb_packet_copy(p, &iu, length); + switch (iu.hdr.id) { + case UAS_UI_COMMAND: + usb_uas_command(uas, &iu); + break; + case UAS_UI_TASK_MGMT: + usb_uas_task(uas, &iu); + break; + default: + error_report("%s: unknown command iu: id 0x%x", + __func__, iu.hdr.id); + p->status = USB_RET_STALL; + break; + } + break; + case UAS_PIPE_ID_STATUS: + if (p->stream > UAS_MAX_STREAMS) { + goto err_stream; + } + if (p->stream) { + QTAILQ_FOREACH(st, &uas->results, next) { + if (st->stream == p->stream) { + break; + } + } + if (st == NULL) { + assert(uas->status3[p->stream] == NULL); + uas->status3[p->stream] = p; + p->status = USB_RET_ASYNC; + break; + } + } else { + st = QTAILQ_FIRST(&uas->results); + if (st == NULL) { + assert(uas->status2 == NULL); + uas->status2 = p; + p->status = USB_RET_ASYNC; + break; + } + } + usb_packet_copy(p, &st->status, st->length); + QTAILQ_REMOVE(&uas->results, st, next); + g_free(st); + break; + case UAS_PIPE_ID_DATA_IN: + case UAS_PIPE_ID_DATA_OUT: + if (p->stream > UAS_MAX_STREAMS) { + goto err_stream; + } + if (p->stream) { + req = usb_uas_find_request(uas, p->stream); + } else { + req = (p->ep->nr == UAS_PIPE_ID_DATA_IN) + ? uas->datain2 : uas->dataout2; + } + if (req == NULL) { + if (p->stream) { + assert(uas->data3[p->stream] == NULL); + uas->data3[p->stream] = p; + p->status = USB_RET_ASYNC; + break; + } else { + error_report("%s: no inflight request", __func__); + p->status = USB_RET_STALL; + break; + } + } + scsi_req_ref(req->req); + req->data = p; + usb_uas_copy_data(req); + if (p->actual_length == p->iov.size || req->complete) { + req->data = NULL; + } else { + req->data_async = true; + p->status = USB_RET_ASYNC; + } + scsi_req_unref(req->req); + usb_uas_start_next_transfer(uas); + break; + default: + error_report("%s: invalid endpoint %d", __func__, p->ep->nr); + p->status = USB_RET_STALL; + break; + } + +err_stream: + error_report("%s: invalid stream %d", __func__, p->stream); + p->status = USB_RET_STALL; + return; +} + +static void usb_uas_unrealize(USBDevice *dev) +{ + UASDevice *uas = USB_UAS(dev); + + qemu_bh_delete(uas->status_bh); +} + +static void usb_uas_realize(USBDevice *dev, Error **errp) +{ + UASDevice *uas = USB_UAS(dev); + DeviceState *d = DEVICE(dev); + + usb_desc_create_serial(dev); + usb_desc_init(dev); + if (d->hotplugged) { + uas->dev.auto_attach = 0; + } + + QTAILQ_INIT(&uas->results); + QTAILQ_INIT(&uas->requests); + uas->status_bh = qemu_bh_new(usb_uas_send_status_bh, uas); + + dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); + scsi_bus_init(&uas->bus, sizeof(uas->bus), DEVICE(dev), &usb_uas_scsi_info); +} + +static const VMStateDescription vmstate_usb_uas = { + .name = "usb-uas", + .unmigratable = 1, + .fields = (VMStateField[]) { + VMSTATE_USB_DEVICE(dev, UASDevice), + VMSTATE_END_OF_LIST() + } +}; + +static Property uas_properties[] = { + DEFINE_PROP_UINT32("log-scsi-req", UASDevice, requestlog, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_uas_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_uas_realize; + uc->product_desc = desc_strings[STR_PRODUCT]; + uc->usb_desc = &desc; + uc->cancel_packet = usb_uas_cancel_io; + uc->handle_attach = usb_desc_attach; + uc->handle_reset = usb_uas_handle_reset; + uc->handle_control = usb_uas_handle_control; + uc->handle_data = usb_uas_handle_data; + uc->unrealize = usb_uas_unrealize; + uc->attached_settable = true; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->fw_name = "storage"; + dc->vmsd = &vmstate_usb_uas; + device_class_set_props(dc, uas_properties); +} + +static const TypeInfo uas_info = { + .name = TYPE_USB_UAS, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(UASDevice), + .class_init = usb_uas_class_initfn, +}; + +static void usb_uas_register_types(void) +{ + type_register_static(&uas_info); +} + +type_init(usb_uas_register_types) diff --git a/hw/usb/dev-wacom.c b/hw/usb/dev-wacom.c new file mode 100644 index 000000000..ed687bc9f --- /dev/null +++ b/hw/usb/dev-wacom.c @@ -0,0 +1,383 @@ +/* + * Wacom PenPartner USB tablet emulation. + * + * Copyright (c) 2006 Openedhand Ltd. + * Author: Andrzej Zaborowski + * + * Based on hw/usb-hid.c: + * Copyright (c) 2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "ui/console.h" +#include "hw/usb.h" +#include "hw/usb/hid.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "desc.h" +#include "qom/object.h" + +/* Interface requests */ +#define WACOM_GET_REPORT 0x2101 +#define WACOM_SET_REPORT 0x2109 + +struct USBWacomState { + USBDevice dev; + USBEndpoint *intr; + QEMUPutMouseEntry *eh_entry; + int dx, dy, dz, buttons_state; + int x, y; + int mouse_grabbed; + enum { + WACOM_MODE_HID = 1, + WACOM_MODE_WACOM = 2, + } mode; + uint8_t idle; + int changed; +}; + +#define TYPE_USB_WACOM "usb-wacom-tablet" +OBJECT_DECLARE_SIMPLE_TYPE(USBWacomState, USB_WACOM) + +enum { + STR_MANUFACTURER = 1, + STR_PRODUCT, + STR_SERIALNUMBER, +}; + +static const USBDescStrings desc_strings = { + [STR_MANUFACTURER] = "QEMU", + [STR_PRODUCT] = "Wacom PenPartner", + [STR_SERIALNUMBER] = "1", +}; + +static const USBDescIface desc_iface_wacom = { + .bInterfaceNumber = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_HID, + .bInterfaceSubClass = 0x01, /* boot */ + .bInterfaceProtocol = 0x02, + .ndesc = 1, + .descs = (USBDescOther[]) { + { + /* HID descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_HID, /* u8 bDescriptorType */ + 0x01, 0x10, /* u16 HID_class */ + 0x00, /* u8 country_code */ + 0x01, /* u8 num_descriptors */ + USB_DT_REPORT, /* u8 type: Report */ + 0x6e, 0, /* u16 len */ + }, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = 8, + .bInterval = 0x0a, + }, + }, +}; + +static const USBDescDevice desc_device_wacom = { + .bcdUSB = 0x0110, + .bMaxPacketSize0 = 8, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .bmAttributes = USB_CFG_ATT_ONE, + .bMaxPower = 40, + .nif = 1, + .ifs = &desc_iface_wacom, + }, + }, +}; + +static const USBDesc desc_wacom = { + .id = { + .idVendor = 0x056a, + .idProduct = 0x0000, + .bcdDevice = 0x4210, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT, + .iSerialNumber = STR_SERIALNUMBER, + }, + .full = &desc_device_wacom, + .str = desc_strings, +}; + +static void usb_mouse_event(void *opaque, + int dx1, int dy1, int dz1, int buttons_state) +{ + USBWacomState *s = opaque; + + s->dx += dx1; + s->dy += dy1; + s->dz += dz1; + s->buttons_state = buttons_state; + s->changed = 1; + usb_wakeup(s->intr, 0); +} + +static void usb_wacom_event(void *opaque, + int x, int y, int dz, int buttons_state) +{ + USBWacomState *s = opaque; + + /* scale to Penpartner resolution */ + s->x = (x * 5040 / 0x7FFF); + s->y = (y * 3780 / 0x7FFF); + s->dz += dz; + s->buttons_state = buttons_state; + s->changed = 1; + usb_wakeup(s->intr, 0); +} + +static inline int int_clamp(int val, int vmin, int vmax) +{ + if (val < vmin) + return vmin; + else if (val > vmax) + return vmax; + else + return val; +} + +static int usb_mouse_poll(USBWacomState *s, uint8_t *buf, int len) +{ + int dx, dy, dz, b, l; + + if (!s->mouse_grabbed) { + s->eh_entry = qemu_add_mouse_event_handler(usb_mouse_event, s, 0, + "QEMU PenPartner tablet"); + qemu_activate_mouse_event_handler(s->eh_entry); + s->mouse_grabbed = 1; + } + + dx = int_clamp(s->dx, -128, 127); + dy = int_clamp(s->dy, -128, 127); + dz = int_clamp(s->dz, -128, 127); + + s->dx -= dx; + s->dy -= dy; + s->dz -= dz; + + b = 0; + if (s->buttons_state & MOUSE_EVENT_LBUTTON) + b |= 0x01; + if (s->buttons_state & MOUSE_EVENT_RBUTTON) + b |= 0x02; + if (s->buttons_state & MOUSE_EVENT_MBUTTON) + b |= 0x04; + + buf[0] = b; + buf[1] = dx; + buf[2] = dy; + l = 3; + if (len >= 4) { + buf[3] = dz; + l = 4; + } + return l; +} + +static int usb_wacom_poll(USBWacomState *s, uint8_t *buf, int len) +{ + int b; + + if (!s->mouse_grabbed) { + s->eh_entry = qemu_add_mouse_event_handler(usb_wacom_event, s, 1, + "QEMU PenPartner tablet"); + qemu_activate_mouse_event_handler(s->eh_entry); + s->mouse_grabbed = 1; + } + + b = 0; + if (s->buttons_state & MOUSE_EVENT_LBUTTON) + b |= 0x01; + if (s->buttons_state & MOUSE_EVENT_RBUTTON) + b |= 0x40; + if (s->buttons_state & MOUSE_EVENT_MBUTTON) + b |= 0x20; /* eraser */ + + if (len < 7) + return 0; + + buf[0] = s->mode; + buf[5] = 0x00 | (b & 0xf0); + buf[1] = s->x & 0xff; + buf[2] = s->x >> 8; + buf[3] = s->y & 0xff; + buf[4] = s->y >> 8; + if (b & 0x3f) { + buf[6] = 0; + } else { + buf[6] = (unsigned char) -127; + } + + return 7; +} + +static void usb_wacom_handle_reset(USBDevice *dev) +{ + USBWacomState *s = (USBWacomState *) dev; + + s->dx = 0; + s->dy = 0; + s->dz = 0; + s->x = 0; + s->y = 0; + s->buttons_state = 0; + s->mode = WACOM_MODE_HID; +} + +static void usb_wacom_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) +{ + USBWacomState *s = (USBWacomState *) dev; + int ret; + + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); + if (ret >= 0) { + return; + } + + switch (request) { + case WACOM_SET_REPORT: + if (s->mouse_grabbed) { + qemu_remove_mouse_event_handler(s->eh_entry); + s->mouse_grabbed = 0; + } + s->mode = data[0]; + break; + case WACOM_GET_REPORT: + data[0] = 0; + data[1] = s->mode; + p->actual_length = 2; + break; + /* USB HID requests */ + case HID_GET_REPORT: + if (s->mode == WACOM_MODE_HID) + p->actual_length = usb_mouse_poll(s, data, length); + else if (s->mode == WACOM_MODE_WACOM) + p->actual_length = usb_wacom_poll(s, data, length); + break; + case HID_GET_IDLE: + data[0] = s->idle; + p->actual_length = 1; + break; + case HID_SET_IDLE: + s->idle = (uint8_t) (value >> 8); + break; + default: + p->status = USB_RET_STALL; + break; + } +} + +static void usb_wacom_handle_data(USBDevice *dev, USBPacket *p) +{ + USBWacomState *s = (USBWacomState *) dev; + g_autofree uint8_t *buf = g_malloc(p->iov.size); + int len = 0; + + switch (p->pid) { + case USB_TOKEN_IN: + if (p->ep->nr == 1) { + if (!(s->changed || s->idle)) { + p->status = USB_RET_NAK; + return; + } + s->changed = 0; + if (s->mode == WACOM_MODE_HID) + len = usb_mouse_poll(s, buf, p->iov.size); + else if (s->mode == WACOM_MODE_WACOM) + len = usb_wacom_poll(s, buf, p->iov.size); + usb_packet_copy(p, buf, len); + break; + } + /* Fall through. */ + case USB_TOKEN_OUT: + default: + p->status = USB_RET_STALL; + } +} + +static void usb_wacom_unrealize(USBDevice *dev) +{ + USBWacomState *s = (USBWacomState *) dev; + + if (s->mouse_grabbed) { + qemu_remove_mouse_event_handler(s->eh_entry); + s->mouse_grabbed = 0; + } +} + +static void usb_wacom_realize(USBDevice *dev, Error **errp) +{ + USBWacomState *s = USB_WACOM(dev); + usb_desc_create_serial(dev); + usb_desc_init(dev); + s->intr = usb_ep_get(dev, USB_TOKEN_IN, 1); + s->changed = 1; +} + +static const VMStateDescription vmstate_usb_wacom = { + .name = "usb-wacom", + .unmigratable = 1, +}; + +static void usb_wacom_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->product_desc = "QEMU PenPartner Tablet"; + uc->usb_desc = &desc_wacom; + uc->realize = usb_wacom_realize; + uc->handle_reset = usb_wacom_handle_reset; + uc->handle_control = usb_wacom_handle_control; + uc->handle_data = usb_wacom_handle_data; + uc->unrealize = usb_wacom_unrealize; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + dc->desc = "QEMU PenPartner Tablet"; + dc->vmsd = &vmstate_usb_wacom; +} + +static const TypeInfo wacom_info = { + .name = TYPE_USB_WACOM, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBWacomState), + .class_init = usb_wacom_class_init, +}; + +static void usb_wacom_register_types(void) +{ + type_register_static(&wacom_info); + usb_legacy_register(TYPE_USB_WACOM, "wacom-tablet", NULL); +} + +type_init(usb_wacom_register_types) diff --git a/hw/usb/hcd-dwc2.c b/hw/usb/hcd-dwc2.c new file mode 100644 index 000000000..e1d96acf7 --- /dev/null +++ b/hw/usb/hcd-dwc2.c @@ -0,0 +1,1478 @@ +/* + * dwc-hsotg (dwc2) USB host controller emulation + * + * Based on hw/usb/hcd-ehci.c and hw/usb/hcd-ohci.c + * + * Note that to use this emulation with the dwc-otg driver in the + * Raspbian kernel, you must pass the option "dwc_otg.fiq_fsm_enable=0" + * on the kernel command line. + * + * Some useful documentation used to develop this emulation can be + * found online (as of April 2020) at: + * + * http://www.capital-micro.com/PDF/CME-M7_Family_User_Guide_EN.pdf + * which has a pretty complete description of the controller starting + * on page 370. + * + * https://sourceforge.net/p/wive-ng/wive-ng-mt/ci/master/tree/docs/DataSheets/RT3050_5x_V2.0_081408_0902.pdf + * which has a description of the controller registers starting on + * page 130. + * + * Copyright (c) 2020 Paul Zimmerman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/usb/dwc2-regs.h" +#include "hw/usb/hcd-dwc2.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "hw/qdev-properties.h" + +#define USB_HZ_FS 12000000 +#define USB_HZ_HS 96000000 +#define USB_FRMINTVL 12000 + +/* nifty macros from Arnon's EHCI version */ +#define get_field(data, field) \ + (((data) & field##_MASK) >> field##_SHIFT) + +#define set_field(data, newval, field) do { \ + uint32_t val = *(data); \ + val &= ~field##_MASK; \ + val |= ((newval) << field##_SHIFT) & field##_MASK; \ + *(data) = val; \ +} while (0) + +#define get_bit(data, bitmask) \ + (!!((data) & (bitmask))) + +/* update irq line */ +static inline void dwc2_update_irq(DWC2State *s) +{ + static int oldlevel; + int level = 0; + + if ((s->gintsts & s->gintmsk) && (s->gahbcfg & GAHBCFG_GLBL_INTR_EN)) { + level = 1; + } + if (level != oldlevel) { + oldlevel = level; + trace_usb_dwc2_update_irq(level); + qemu_set_irq(s->irq, level); + } +} + +/* flag interrupt condition */ +static inline void dwc2_raise_global_irq(DWC2State *s, uint32_t intr) +{ + if (!(s->gintsts & intr)) { + s->gintsts |= intr; + trace_usb_dwc2_raise_global_irq(intr); + dwc2_update_irq(s); + } +} + +static inline void dwc2_lower_global_irq(DWC2State *s, uint32_t intr) +{ + if (s->gintsts & intr) { + s->gintsts &= ~intr; + trace_usb_dwc2_lower_global_irq(intr); + dwc2_update_irq(s); + } +} + +static inline void dwc2_raise_host_irq(DWC2State *s, uint32_t host_intr) +{ + if (!(s->haint & host_intr)) { + s->haint |= host_intr; + s->haint &= 0xffff; + trace_usb_dwc2_raise_host_irq(host_intr); + if (s->haint & s->haintmsk) { + dwc2_raise_global_irq(s, GINTSTS_HCHINT); + } + } +} + +static inline void dwc2_lower_host_irq(DWC2State *s, uint32_t host_intr) +{ + if (s->haint & host_intr) { + s->haint &= ~host_intr; + trace_usb_dwc2_lower_host_irq(host_intr); + if (!(s->haint & s->haintmsk)) { + dwc2_lower_global_irq(s, GINTSTS_HCHINT); + } + } +} + +static inline void dwc2_update_hc_irq(DWC2State *s, int index) +{ + uint32_t host_intr = 1 << (index >> 3); + + if (s->hreg1[index + 2] & s->hreg1[index + 3]) { + dwc2_raise_host_irq(s, host_intr); + } else { + dwc2_lower_host_irq(s, host_intr); + } +} + +/* set a timer for EOF */ +static void dwc2_eof_timer(DWC2State *s) +{ + timer_mod(s->eof_timer, s->sof_time + s->usb_frame_time); +} + +/* Set a timer for EOF and generate SOF event */ +static void dwc2_sof(DWC2State *s) +{ + s->sof_time += s->usb_frame_time; + trace_usb_dwc2_sof(s->sof_time); + dwc2_eof_timer(s); + dwc2_raise_global_irq(s, GINTSTS_SOF); +} + +/* Do frame processing on frame boundary */ +static void dwc2_frame_boundary(void *opaque) +{ + DWC2State *s = opaque; + int64_t now; + uint16_t frcnt; + + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + /* Frame boundary, so do EOF stuff here */ + + /* Increment frame number */ + frcnt = (uint16_t)((now - s->sof_time) / s->fi); + s->frame_number = (s->frame_number + frcnt) & 0xffff; + s->hfnum = s->frame_number & HFNUM_MAX_FRNUM; + + /* Do SOF stuff here */ + dwc2_sof(s); +} + +/* Start sending SOF tokens on the USB bus */ +static void dwc2_bus_start(DWC2State *s) +{ + trace_usb_dwc2_bus_start(); + s->sof_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + dwc2_eof_timer(s); +} + +/* Stop sending SOF tokens on the USB bus */ +static void dwc2_bus_stop(DWC2State *s) +{ + trace_usb_dwc2_bus_stop(); + timer_del(s->eof_timer); +} + +static USBDevice *dwc2_find_device(DWC2State *s, uint8_t addr) +{ + USBDevice *dev; + + trace_usb_dwc2_find_device(addr); + + if (!(s->hprt0 & HPRT0_ENA)) { + trace_usb_dwc2_port_disabled(0); + } else { + dev = usb_find_device(&s->uport, addr); + if (dev != NULL) { + trace_usb_dwc2_device_found(0); + return dev; + } + } + + trace_usb_dwc2_device_not_found(); + return NULL; +} + +static const char *pstatus[] = { + "USB_RET_SUCCESS", "USB_RET_NODEV", "USB_RET_NAK", "USB_RET_STALL", + "USB_RET_BABBLE", "USB_RET_IOERROR", "USB_RET_ASYNC", + "USB_RET_ADD_TO_QUEUE", "USB_RET_REMOVE_FROM_QUEUE" +}; + +static uint32_t pintr[] = { + HCINTMSK_XFERCOMPL, HCINTMSK_XACTERR, HCINTMSK_NAK, HCINTMSK_STALL, + HCINTMSK_BBLERR, HCINTMSK_XACTERR, HCINTMSK_XACTERR, HCINTMSK_XACTERR, + HCINTMSK_XACTERR +}; + +static const char *types[] = { + "Ctrl", "Isoc", "Bulk", "Intr" +}; + +static const char *dirs[] = { + "Out", "In" +}; + +static void dwc2_handle_packet(DWC2State *s, uint32_t devadr, USBDevice *dev, + USBEndpoint *ep, uint32_t index, bool send) +{ + DWC2Packet *p; + uint32_t hcchar = s->hreg1[index]; + uint32_t hctsiz = s->hreg1[index + 4]; + uint32_t hcdma = s->hreg1[index + 5]; + uint32_t chan, epnum, epdir, eptype, mps, pid, pcnt, len, tlen, intr = 0; + uint32_t tpcnt, stsidx, actual = 0; + bool do_intr = false, done = false; + + epnum = get_field(hcchar, HCCHAR_EPNUM); + epdir = get_bit(hcchar, HCCHAR_EPDIR); + eptype = get_field(hcchar, HCCHAR_EPTYPE); + mps = get_field(hcchar, HCCHAR_MPS); + pid = get_field(hctsiz, TSIZ_SC_MC_PID); + pcnt = get_field(hctsiz, TSIZ_PKTCNT); + len = get_field(hctsiz, TSIZ_XFERSIZE); + if (len > DWC2_MAX_XFER_SIZE) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: HCTSIZ transfer size too large\n", __func__); + return; + } + + chan = index >> 3; + p = &s->packet[chan]; + + trace_usb_dwc2_handle_packet(chan, dev, &p->packet, epnum, types[eptype], + dirs[epdir], mps, len, pcnt); + + if (mps == 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad HCCHAR_MPS set to zero\n", __func__); + return; + } + + if (eptype == USB_ENDPOINT_XFER_CONTROL && pid == TSIZ_SC_MC_PID_SETUP) { + pid = USB_TOKEN_SETUP; + } else { + pid = epdir ? USB_TOKEN_IN : USB_TOKEN_OUT; + } + + if (send) { + tlen = len; + if (p->small) { + if (tlen > mps) { + tlen = mps; + } + } + + if (pid != USB_TOKEN_IN) { + trace_usb_dwc2_memory_read(hcdma, tlen); + if (dma_memory_read(&s->dma_as, hcdma, + s->usb_buf[chan], tlen) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: dma_memory_read failed\n", + __func__); + } + } + + usb_packet_init(&p->packet); + usb_packet_setup(&p->packet, pid, ep, 0, hcdma, + pid != USB_TOKEN_IN, true); + usb_packet_addbuf(&p->packet, s->usb_buf[chan], tlen); + p->async = DWC2_ASYNC_NONE; + usb_handle_packet(dev, &p->packet); + } else { + tlen = p->len; + } + + stsidx = -p->packet.status; + assert(stsidx < sizeof(pstatus) / sizeof(*pstatus)); + actual = p->packet.actual_length; + trace_usb_dwc2_packet_status(pstatus[stsidx], actual); + +babble: + if (p->packet.status != USB_RET_SUCCESS && + p->packet.status != USB_RET_NAK && + p->packet.status != USB_RET_STALL && + p->packet.status != USB_RET_ASYNC) { + trace_usb_dwc2_packet_error(pstatus[stsidx]); + } + + if (p->packet.status == USB_RET_ASYNC) { + trace_usb_dwc2_async_packet(&p->packet, chan, dev, epnum, + dirs[epdir], tlen); + usb_device_flush_ep_queue(dev, ep); + assert(p->async != DWC2_ASYNC_INFLIGHT); + p->devadr = devadr; + p->epnum = epnum; + p->epdir = epdir; + p->mps = mps; + p->pid = pid; + p->index = index; + p->pcnt = pcnt; + p->len = tlen; + p->async = DWC2_ASYNC_INFLIGHT; + p->needs_service = false; + return; + } + + if (p->packet.status == USB_RET_SUCCESS) { + if (actual > tlen) { + p->packet.status = USB_RET_BABBLE; + goto babble; + } + + if (pid == USB_TOKEN_IN) { + trace_usb_dwc2_memory_write(hcdma, actual); + if (dma_memory_write(&s->dma_as, hcdma, s->usb_buf[chan], + actual) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: dma_memory_write failed\n", + __func__); + } + } + + tpcnt = actual / mps; + if (actual % mps) { + tpcnt++; + if (pid == USB_TOKEN_IN) { + done = true; + } + } + + pcnt -= tpcnt < pcnt ? tpcnt : pcnt; + set_field(&hctsiz, pcnt, TSIZ_PKTCNT); + len -= actual < len ? actual : len; + set_field(&hctsiz, len, TSIZ_XFERSIZE); + s->hreg1[index + 4] = hctsiz; + hcdma += actual; + s->hreg1[index + 5] = hcdma; + + if (!pcnt || len == 0 || actual == 0) { + done = true; + } + } else { + intr |= pintr[stsidx]; + if (p->packet.status == USB_RET_NAK && + (eptype == USB_ENDPOINT_XFER_CONTROL || + eptype == USB_ENDPOINT_XFER_BULK)) { + /* + * for ctrl/bulk, automatically retry on NAK, + * but send the interrupt anyway + */ + intr &= ~HCINTMSK_RESERVED14_31; + s->hreg1[index + 2] |= intr; + do_intr = true; + } else { + intr |= HCINTMSK_CHHLTD; + done = true; + } + } + + usb_packet_cleanup(&p->packet); + + if (done) { + hcchar &= ~HCCHAR_CHENA; + s->hreg1[index] = hcchar; + if (!(intr & HCINTMSK_CHHLTD)) { + intr |= HCINTMSK_CHHLTD | HCINTMSK_XFERCOMPL; + } + intr &= ~HCINTMSK_RESERVED14_31; + s->hreg1[index + 2] |= intr; + p->needs_service = false; + trace_usb_dwc2_packet_done(pstatus[stsidx], actual, len, pcnt); + dwc2_update_hc_irq(s, index); + return; + } + + p->devadr = devadr; + p->epnum = epnum; + p->epdir = epdir; + p->mps = mps; + p->pid = pid; + p->index = index; + p->pcnt = pcnt; + p->len = len; + p->needs_service = true; + trace_usb_dwc2_packet_next(pstatus[stsidx], len, pcnt); + if (do_intr) { + dwc2_update_hc_irq(s, index); + } +} + +/* Attach or detach a device on root hub */ + +static const char *speeds[] = { + "low", "full", "high" +}; + +static void dwc2_attach(USBPort *port) +{ + DWC2State *s = port->opaque; + int hispd = 0; + + trace_usb_dwc2_attach(port); + assert(port->index == 0); + + if (!port->dev || !port->dev->attached) { + return; + } + + assert(port->dev->speed <= USB_SPEED_HIGH); + trace_usb_dwc2_attach_speed(speeds[port->dev->speed]); + s->hprt0 &= ~HPRT0_SPD_MASK; + + switch (port->dev->speed) { + case USB_SPEED_LOW: + s->hprt0 |= HPRT0_SPD_LOW_SPEED << HPRT0_SPD_SHIFT; + break; + case USB_SPEED_FULL: + s->hprt0 |= HPRT0_SPD_FULL_SPEED << HPRT0_SPD_SHIFT; + break; + case USB_SPEED_HIGH: + s->hprt0 |= HPRT0_SPD_HIGH_SPEED << HPRT0_SPD_SHIFT; + hispd = 1; + break; + } + + if (hispd) { + s->usb_frame_time = NANOSECONDS_PER_SECOND / 8000; /* 125000 */ + if (NANOSECONDS_PER_SECOND >= USB_HZ_HS) { + s->usb_bit_time = NANOSECONDS_PER_SECOND / USB_HZ_HS; /* 10.4 */ + } else { + s->usb_bit_time = 1; + } + } else { + s->usb_frame_time = NANOSECONDS_PER_SECOND / 1000; /* 1000000 */ + if (NANOSECONDS_PER_SECOND >= USB_HZ_FS) { + s->usb_bit_time = NANOSECONDS_PER_SECOND / USB_HZ_FS; /* 83.3 */ + } else { + s->usb_bit_time = 1; + } + } + + s->fi = USB_FRMINTVL - 1; + s->hprt0 |= HPRT0_CONNDET | HPRT0_CONNSTS; + + dwc2_bus_start(s); + dwc2_raise_global_irq(s, GINTSTS_PRTINT); +} + +static void dwc2_detach(USBPort *port) +{ + DWC2State *s = port->opaque; + + trace_usb_dwc2_detach(port); + assert(port->index == 0); + + dwc2_bus_stop(s); + + s->hprt0 &= ~(HPRT0_SPD_MASK | HPRT0_SUSP | HPRT0_ENA | HPRT0_CONNSTS); + s->hprt0 |= HPRT0_CONNDET | HPRT0_ENACHG; + + dwc2_raise_global_irq(s, GINTSTS_PRTINT); +} + +static void dwc2_child_detach(USBPort *port, USBDevice *child) +{ + trace_usb_dwc2_child_detach(port, child); + assert(port->index == 0); +} + +static void dwc2_wakeup(USBPort *port) +{ + DWC2State *s = port->opaque; + + trace_usb_dwc2_wakeup(port); + assert(port->index == 0); + + if (s->hprt0 & HPRT0_SUSP) { + s->hprt0 |= HPRT0_RES; + dwc2_raise_global_irq(s, GINTSTS_PRTINT); + } + + qemu_bh_schedule(s->async_bh); +} + +static void dwc2_async_packet_complete(USBPort *port, USBPacket *packet) +{ + DWC2State *s = port->opaque; + DWC2Packet *p; + USBDevice *dev; + USBEndpoint *ep; + + assert(port->index == 0); + p = container_of(packet, DWC2Packet, packet); + dev = dwc2_find_device(s, p->devadr); + ep = usb_ep_get(dev, p->pid, p->epnum); + trace_usb_dwc2_async_packet_complete(port, packet, p->index >> 3, dev, + p->epnum, dirs[p->epdir], p->len); + assert(p->async == DWC2_ASYNC_INFLIGHT); + + if (packet->status == USB_RET_REMOVE_FROM_QUEUE) { + usb_cancel_packet(packet); + usb_packet_cleanup(packet); + return; + } + + dwc2_handle_packet(s, p->devadr, dev, ep, p->index, false); + + p->async = DWC2_ASYNC_FINISHED; + qemu_bh_schedule(s->async_bh); +} + +static USBPortOps dwc2_port_ops = { + .attach = dwc2_attach, + .detach = dwc2_detach, + .child_detach = dwc2_child_detach, + .wakeup = dwc2_wakeup, + .complete = dwc2_async_packet_complete, +}; + +static uint32_t dwc2_get_frame_remaining(DWC2State *s) +{ + uint32_t fr = 0; + int64_t tks; + + tks = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->sof_time; + if (tks < 0) { + tks = 0; + } + + /* avoid muldiv if possible */ + if (tks >= s->usb_frame_time) { + goto out; + } + if (tks < s->usb_bit_time) { + fr = s->fi; + goto out; + } + + /* tks = number of ns since SOF, divided by 83 (fs) or 10 (hs) */ + tks = tks / s->usb_bit_time; + if (tks >= (int64_t)s->fi) { + goto out; + } + + /* remaining = frame interval minus tks */ + fr = (uint32_t)((int64_t)s->fi - tks); + +out: + return fr; +} + +static void dwc2_work_bh(void *opaque) +{ + DWC2State *s = opaque; + DWC2Packet *p; + USBDevice *dev; + USBEndpoint *ep; + int64_t t_now, expire_time; + int chan; + bool found = false; + + trace_usb_dwc2_work_bh(); + if (s->working) { + return; + } + s->working = true; + + t_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + chan = s->next_chan; + + do { + p = &s->packet[chan]; + if (p->needs_service) { + dev = dwc2_find_device(s, p->devadr); + ep = usb_ep_get(dev, p->pid, p->epnum); + trace_usb_dwc2_work_bh_service(s->next_chan, chan, dev, p->epnum); + dwc2_handle_packet(s, p->devadr, dev, ep, p->index, true); + found = true; + } + if (++chan == DWC2_NB_CHAN) { + chan = 0; + } + if (found) { + s->next_chan = chan; + trace_usb_dwc2_work_bh_next(chan); + } + } while (chan != s->next_chan); + + if (found) { + expire_time = t_now + NANOSECONDS_PER_SECOND / 4000; + timer_mod(s->frame_timer, expire_time); + } + s->working = false; +} + +static void dwc2_enable_chan(DWC2State *s, uint32_t index) +{ + USBDevice *dev; + USBEndpoint *ep; + uint32_t hcchar; + uint32_t hctsiz; + uint32_t devadr, epnum, epdir, eptype, pid, len; + DWC2Packet *p; + + assert((index >> 3) < DWC2_NB_CHAN); + p = &s->packet[index >> 3]; + hcchar = s->hreg1[index]; + hctsiz = s->hreg1[index + 4]; + devadr = get_field(hcchar, HCCHAR_DEVADDR); + epnum = get_field(hcchar, HCCHAR_EPNUM); + epdir = get_bit(hcchar, HCCHAR_EPDIR); + eptype = get_field(hcchar, HCCHAR_EPTYPE); + pid = get_field(hctsiz, TSIZ_SC_MC_PID); + len = get_field(hctsiz, TSIZ_XFERSIZE); + + dev = dwc2_find_device(s, devadr); + + trace_usb_dwc2_enable_chan(index >> 3, dev, &p->packet, epnum); + if (dev == NULL) { + return; + } + + if (eptype == USB_ENDPOINT_XFER_CONTROL && pid == TSIZ_SC_MC_PID_SETUP) { + pid = USB_TOKEN_SETUP; + } else { + pid = epdir ? USB_TOKEN_IN : USB_TOKEN_OUT; + } + + ep = usb_ep_get(dev, pid, epnum); + + /* + * Hack: Networking doesn't like us delivering large transfers, it kind + * of works but the latency is horrible. So if the transfer is <= the mtu + * size, we take that as a hint that this might be a network transfer, + * and do the transfer packet-by-packet. + */ + if (len > 1536) { + p->small = false; + } else { + p->small = true; + } + + dwc2_handle_packet(s, devadr, dev, ep, index, true); + qemu_bh_schedule(s->async_bh); +} + +static const char *glbregnm[] = { + "GOTGCTL ", "GOTGINT ", "GAHBCFG ", "GUSBCFG ", "GRSTCTL ", + "GINTSTS ", "GINTMSK ", "GRXSTSR ", "GRXSTSP ", "GRXFSIZ ", + "GNPTXFSIZ", "GNPTXSTS ", "GI2CCTL ", "GPVNDCTL ", "GGPIO ", + "GUID ", "GSNPSID ", "GHWCFG1 ", "GHWCFG2 ", "GHWCFG3 ", + "GHWCFG4 ", "GLPMCFG ", "GPWRDN ", "GDFIFOCFG", "GADPCTL ", + "GREFCLK ", "GINTMSK2 ", "GINTSTS2 " +}; + +static uint64_t dwc2_glbreg_read(void *ptr, hwaddr addr, int index, + unsigned size) +{ + DWC2State *s = ptr; + uint32_t val; + + if (addr > GINTSTS2) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return 0; + } + + val = s->glbreg[index]; + + switch (addr) { + case GRSTCTL: + /* clear any self-clearing bits that were set */ + val &= ~(GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH | GRSTCTL_IN_TKNQ_FLSH | + GRSTCTL_FRMCNTRRST | GRSTCTL_HSFTRST | GRSTCTL_CSFTRST); + s->glbreg[index] = val; + break; + default: + break; + } + + trace_usb_dwc2_glbreg_read(addr, glbregnm[index], val); + return val; +} + +static void dwc2_glbreg_write(void *ptr, hwaddr addr, int index, uint64_t val, + unsigned size) +{ + DWC2State *s = ptr; + uint64_t orig = val; + uint32_t *mmio; + uint32_t old; + int iflg = 0; + + if (addr > GINTSTS2) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return; + } + + mmio = &s->glbreg[index]; + old = *mmio; + + switch (addr) { + case GOTGCTL: + /* don't allow setting of read-only bits */ + val &= ~(GOTGCTL_MULT_VALID_BC_MASK | GOTGCTL_BSESVLD | + GOTGCTL_ASESVLD | GOTGCTL_DBNC_SHORT | GOTGCTL_CONID_B | + GOTGCTL_HSTNEGSCS | GOTGCTL_SESREQSCS); + /* don't allow clearing of read-only bits */ + val |= old & (GOTGCTL_MULT_VALID_BC_MASK | GOTGCTL_BSESVLD | + GOTGCTL_ASESVLD | GOTGCTL_DBNC_SHORT | GOTGCTL_CONID_B | + GOTGCTL_HSTNEGSCS | GOTGCTL_SESREQSCS); + break; + case GAHBCFG: + if ((val & GAHBCFG_GLBL_INTR_EN) && !(old & GAHBCFG_GLBL_INTR_EN)) { + iflg = 1; + } + break; + case GRSTCTL: + val |= GRSTCTL_AHBIDLE; + val &= ~GRSTCTL_DMAREQ; + if (!(old & GRSTCTL_TXFFLSH) && (val & GRSTCTL_TXFFLSH)) { + /* TODO - TX fifo flush */ + qemu_log_mask(LOG_UNIMP, "%s: Tx FIFO flush not implemented\n", + __func__); + } + if (!(old & GRSTCTL_RXFFLSH) && (val & GRSTCTL_RXFFLSH)) { + /* TODO - RX fifo flush */ + qemu_log_mask(LOG_UNIMP, "%s: Rx FIFO flush not implemented\n", + __func__); + } + if (!(old & GRSTCTL_IN_TKNQ_FLSH) && (val & GRSTCTL_IN_TKNQ_FLSH)) { + /* TODO - device IN token queue flush */ + qemu_log_mask(LOG_UNIMP, "%s: Token queue flush not implemented\n", + __func__); + } + if (!(old & GRSTCTL_FRMCNTRRST) && (val & GRSTCTL_FRMCNTRRST)) { + /* TODO - host frame counter reset */ + qemu_log_mask(LOG_UNIMP, + "%s: Frame counter reset not implemented\n", + __func__); + } + if (!(old & GRSTCTL_HSFTRST) && (val & GRSTCTL_HSFTRST)) { + /* TODO - host soft reset */ + qemu_log_mask(LOG_UNIMP, "%s: Host soft reset not implemented\n", + __func__); + } + if (!(old & GRSTCTL_CSFTRST) && (val & GRSTCTL_CSFTRST)) { + /* TODO - core soft reset */ + qemu_log_mask(LOG_UNIMP, "%s: Core soft reset not implemented\n", + __func__); + } + /* don't allow clearing of self-clearing bits */ + val |= old & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH | + GRSTCTL_IN_TKNQ_FLSH | GRSTCTL_FRMCNTRRST | + GRSTCTL_HSFTRST | GRSTCTL_CSFTRST); + break; + case GINTSTS: + /* clear the write-1-to-clear bits */ + val |= ~old; + val = ~val; + /* don't allow clearing of read-only bits */ + val |= old & (GINTSTS_PTXFEMP | GINTSTS_HCHINT | GINTSTS_PRTINT | + GINTSTS_OEPINT | GINTSTS_IEPINT | GINTSTS_GOUTNAKEFF | + GINTSTS_GINNAKEFF | GINTSTS_NPTXFEMP | GINTSTS_RXFLVL | + GINTSTS_OTGINT | GINTSTS_CURMODE_HOST); + iflg = 1; + break; + case GINTMSK: + iflg = 1; + break; + default: + break; + } + + trace_usb_dwc2_glbreg_write(addr, glbregnm[index], orig, old, val); + *mmio = val; + + if (iflg) { + dwc2_update_irq(s); + } +} + +static uint64_t dwc2_fszreg_read(void *ptr, hwaddr addr, int index, + unsigned size) +{ + DWC2State *s = ptr; + uint32_t val; + + if (addr != HPTXFSIZ) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return 0; + } + + val = s->fszreg[index]; + + trace_usb_dwc2_fszreg_read(addr, val); + return val; +} + +static void dwc2_fszreg_write(void *ptr, hwaddr addr, int index, uint64_t val, + unsigned size) +{ + DWC2State *s = ptr; + uint64_t orig = val; + uint32_t *mmio; + uint32_t old; + + if (addr != HPTXFSIZ) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return; + } + + mmio = &s->fszreg[index]; + old = *mmio; + + trace_usb_dwc2_fszreg_write(addr, orig, old, val); + *mmio = val; +} + +static const char *hreg0nm[] = { + "HCFG ", "HFIR ", "HFNUM ", " ", "HPTXSTS ", + "HAINT ", "HAINTMSK ", "HFLBADDR ", " ", " ", + " ", " ", " ", " ", " ", + " ", "HPRT0 " +}; + +static uint64_t dwc2_hreg0_read(void *ptr, hwaddr addr, int index, + unsigned size) +{ + DWC2State *s = ptr; + uint32_t val; + + if (addr < HCFG || addr > HPRT0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return 0; + } + + val = s->hreg0[index]; + + switch (addr) { + case HFNUM: + val = (dwc2_get_frame_remaining(s) << HFNUM_FRREM_SHIFT) | + (s->hfnum << HFNUM_FRNUM_SHIFT); + break; + default: + break; + } + + trace_usb_dwc2_hreg0_read(addr, hreg0nm[index], val); + return val; +} + +static void dwc2_hreg0_write(void *ptr, hwaddr addr, int index, uint64_t val, + unsigned size) +{ + DWC2State *s = ptr; + USBDevice *dev = s->uport.dev; + uint64_t orig = val; + uint32_t *mmio; + uint32_t tval, told, old; + int prst = 0; + int iflg = 0; + + if (addr < HCFG || addr > HPRT0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return; + } + + mmio = &s->hreg0[index]; + old = *mmio; + + switch (addr) { + case HFIR: + break; + case HFNUM: + case HPTXSTS: + case HAINT: + qemu_log_mask(LOG_GUEST_ERROR, "%s: write to read-only register\n", + __func__); + return; + case HAINTMSK: + val &= 0xffff; + break; + case HPRT0: + /* don't allow clearing of read-only bits */ + val |= old & (HPRT0_SPD_MASK | HPRT0_LNSTS_MASK | HPRT0_OVRCURRACT | + HPRT0_CONNSTS); + /* don't allow clearing of self-clearing bits */ + val |= old & (HPRT0_SUSP | HPRT0_RES); + /* don't allow setting of self-setting bits */ + if (!(old & HPRT0_ENA) && (val & HPRT0_ENA)) { + val &= ~HPRT0_ENA; + } + /* clear the write-1-to-clear bits */ + tval = val & (HPRT0_OVRCURRCHG | HPRT0_ENACHG | HPRT0_ENA | + HPRT0_CONNDET); + told = old & (HPRT0_OVRCURRCHG | HPRT0_ENACHG | HPRT0_ENA | + HPRT0_CONNDET); + tval |= ~told; + tval = ~tval; + tval &= (HPRT0_OVRCURRCHG | HPRT0_ENACHG | HPRT0_ENA | + HPRT0_CONNDET); + val &= ~(HPRT0_OVRCURRCHG | HPRT0_ENACHG | HPRT0_ENA | + HPRT0_CONNDET); + val |= tval; + if (!(val & HPRT0_RST) && (old & HPRT0_RST)) { + if (dev && dev->attached) { + val |= HPRT0_ENA | HPRT0_ENACHG; + prst = 1; + } + } + if (val & (HPRT0_OVRCURRCHG | HPRT0_ENACHG | HPRT0_CONNDET)) { + iflg = 1; + } else { + iflg = -1; + } + break; + default: + break; + } + + if (prst) { + trace_usb_dwc2_hreg0_write(addr, hreg0nm[index], orig, old, + val & ~HPRT0_CONNDET); + trace_usb_dwc2_hreg0_action("call usb_port_reset"); + usb_port_reset(&s->uport); + val &= ~HPRT0_CONNDET; + } else { + trace_usb_dwc2_hreg0_write(addr, hreg0nm[index], orig, old, val); + } + + *mmio = val; + + if (iflg > 0) { + trace_usb_dwc2_hreg0_action("enable PRTINT"); + dwc2_raise_global_irq(s, GINTSTS_PRTINT); + } else if (iflg < 0) { + trace_usb_dwc2_hreg0_action("disable PRTINT"); + dwc2_lower_global_irq(s, GINTSTS_PRTINT); + } +} + +static const char *hreg1nm[] = { + "HCCHAR ", "HCSPLT ", "HCINT ", "HCINTMSK", "HCTSIZ ", "HCDMA ", + " ", "HCDMAB " +}; + +static uint64_t dwc2_hreg1_read(void *ptr, hwaddr addr, int index, + unsigned size) +{ + DWC2State *s = ptr; + uint32_t val; + + if (addr < HCCHAR(0) || addr > HCDMAB(DWC2_NB_CHAN - 1)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return 0; + } + + val = s->hreg1[index]; + + trace_usb_dwc2_hreg1_read(addr, hreg1nm[index & 7], addr >> 5, val); + return val; +} + +static void dwc2_hreg1_write(void *ptr, hwaddr addr, int index, uint64_t val, + unsigned size) +{ + DWC2State *s = ptr; + uint64_t orig = val; + uint32_t *mmio; + uint32_t old; + int iflg = 0; + int enflg = 0; + int disflg = 0; + + if (addr < HCCHAR(0) || addr > HCDMAB(DWC2_NB_CHAN - 1)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return; + } + + mmio = &s->hreg1[index]; + old = *mmio; + + switch (HSOTG_REG(0x500) + (addr & 0x1c)) { + case HCCHAR(0): + if ((val & HCCHAR_CHDIS) && !(old & HCCHAR_CHDIS)) { + val &= ~(HCCHAR_CHENA | HCCHAR_CHDIS); + disflg = 1; + } else { + val |= old & HCCHAR_CHDIS; + if ((val & HCCHAR_CHENA) && !(old & HCCHAR_CHENA)) { + val &= ~HCCHAR_CHDIS; + enflg = 1; + } else { + val |= old & HCCHAR_CHENA; + } + } + break; + case HCINT(0): + /* clear the write-1-to-clear bits */ + val |= ~old; + val = ~val; + val &= ~HCINTMSK_RESERVED14_31; + iflg = 1; + break; + case HCINTMSK(0): + val &= ~HCINTMSK_RESERVED14_31; + iflg = 1; + break; + case HCDMAB(0): + qemu_log_mask(LOG_GUEST_ERROR, "%s: write to read-only register\n", + __func__); + return; + default: + break; + } + + trace_usb_dwc2_hreg1_write(addr, hreg1nm[index & 7], index >> 3, orig, + old, val); + *mmio = val; + + if (disflg) { + /* set ChHltd in HCINT */ + s->hreg1[(index & ~7) + 2] |= HCINTMSK_CHHLTD; + iflg = 1; + } + + if (enflg) { + dwc2_enable_chan(s, index & ~7); + } + + if (iflg) { + dwc2_update_hc_irq(s, index & ~7); + } +} + +static const char *pcgregnm[] = { + "PCGCTL ", "PCGCCTL1 " +}; + +static uint64_t dwc2_pcgreg_read(void *ptr, hwaddr addr, int index, + unsigned size) +{ + DWC2State *s = ptr; + uint32_t val; + + if (addr < PCGCTL || addr > PCGCCTL1) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return 0; + } + + val = s->pcgreg[index]; + + trace_usb_dwc2_pcgreg_read(addr, pcgregnm[index], val); + return val; +} + +static void dwc2_pcgreg_write(void *ptr, hwaddr addr, int index, + uint64_t val, unsigned size) +{ + DWC2State *s = ptr; + uint64_t orig = val; + uint32_t *mmio; + uint32_t old; + + if (addr < PCGCTL || addr > PCGCCTL1) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"HWADDR_PRIx"\n", + __func__, addr); + return; + } + + mmio = &s->pcgreg[index]; + old = *mmio; + + trace_usb_dwc2_pcgreg_write(addr, pcgregnm[index], orig, old, val); + *mmio = val; +} + +static uint64_t dwc2_hsotg_read(void *ptr, hwaddr addr, unsigned size) +{ + uint64_t val; + + switch (addr) { + case HSOTG_REG(0x000) ... HSOTG_REG(0x0fc): + val = dwc2_glbreg_read(ptr, addr, (addr - HSOTG_REG(0x000)) >> 2, size); + break; + case HSOTG_REG(0x100): + val = dwc2_fszreg_read(ptr, addr, (addr - HSOTG_REG(0x100)) >> 2, size); + break; + case HSOTG_REG(0x104) ... HSOTG_REG(0x3fc): + /* Gadget-mode registers, just return 0 for now */ + val = 0; + break; + case HSOTG_REG(0x400) ... HSOTG_REG(0x4fc): + val = dwc2_hreg0_read(ptr, addr, (addr - HSOTG_REG(0x400)) >> 2, size); + break; + case HSOTG_REG(0x500) ... HSOTG_REG(0x7fc): + val = dwc2_hreg1_read(ptr, addr, (addr - HSOTG_REG(0x500)) >> 2, size); + break; + case HSOTG_REG(0x800) ... HSOTG_REG(0xdfc): + /* Gadget-mode registers, just return 0 for now */ + val = 0; + break; + case HSOTG_REG(0xe00) ... HSOTG_REG(0xffc): + val = dwc2_pcgreg_read(ptr, addr, (addr - HSOTG_REG(0xe00)) >> 2, size); + break; + default: + g_assert_not_reached(); + } + + return val; +} + +static void dwc2_hsotg_write(void *ptr, hwaddr addr, uint64_t val, + unsigned size) +{ + switch (addr) { + case HSOTG_REG(0x000) ... HSOTG_REG(0x0fc): + dwc2_glbreg_write(ptr, addr, (addr - HSOTG_REG(0x000)) >> 2, val, size); + break; + case HSOTG_REG(0x100): + dwc2_fszreg_write(ptr, addr, (addr - HSOTG_REG(0x100)) >> 2, val, size); + break; + case HSOTG_REG(0x104) ... HSOTG_REG(0x3fc): + /* Gadget-mode registers, do nothing for now */ + break; + case HSOTG_REG(0x400) ... HSOTG_REG(0x4fc): + dwc2_hreg0_write(ptr, addr, (addr - HSOTG_REG(0x400)) >> 2, val, size); + break; + case HSOTG_REG(0x500) ... HSOTG_REG(0x7fc): + dwc2_hreg1_write(ptr, addr, (addr - HSOTG_REG(0x500)) >> 2, val, size); + break; + case HSOTG_REG(0x800) ... HSOTG_REG(0xdfc): + /* Gadget-mode registers, do nothing for now */ + break; + case HSOTG_REG(0xe00) ... HSOTG_REG(0xffc): + dwc2_pcgreg_write(ptr, addr, (addr - HSOTG_REG(0xe00)) >> 2, val, size); + break; + default: + g_assert_not_reached(); + } +} + +static const MemoryRegionOps dwc2_mmio_hsotg_ops = { + .read = dwc2_hsotg_read, + .write = dwc2_hsotg_write, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t dwc2_hreg2_read(void *ptr, hwaddr addr, unsigned size) +{ + /* TODO - implement FIFOs to support slave mode */ + trace_usb_dwc2_hreg2_read(addr, addr >> 12, 0); + qemu_log_mask(LOG_UNIMP, "%s: FIFO read not implemented\n", __func__); + return 0; +} + +static void dwc2_hreg2_write(void *ptr, hwaddr addr, uint64_t val, + unsigned size) +{ + uint64_t orig = val; + + /* TODO - implement FIFOs to support slave mode */ + trace_usb_dwc2_hreg2_write(addr, addr >> 12, orig, 0, val); + qemu_log_mask(LOG_UNIMP, "%s: FIFO write not implemented\n", __func__); +} + +static const MemoryRegionOps dwc2_mmio_hreg2_ops = { + .read = dwc2_hreg2_read, + .write = dwc2_hreg2_write, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void dwc2_wakeup_endpoint(USBBus *bus, USBEndpoint *ep, + unsigned int stream) +{ + DWC2State *s = container_of(bus, DWC2State, bus); + + trace_usb_dwc2_wakeup_endpoint(ep, stream); + + /* TODO - do something here? */ + qemu_bh_schedule(s->async_bh); +} + +static USBBusOps dwc2_bus_ops = { + .wakeup_endpoint = dwc2_wakeup_endpoint, +}; + +static void dwc2_work_timer(void *opaque) +{ + DWC2State *s = opaque; + + trace_usb_dwc2_work_timer(); + qemu_bh_schedule(s->async_bh); +} + +static void dwc2_reset_enter(Object *obj, ResetType type) +{ + DWC2Class *c = DWC2_USB_GET_CLASS(obj); + DWC2State *s = DWC2_USB(obj); + int i; + + trace_usb_dwc2_reset_enter(); + + if (c->parent_phases.enter) { + c->parent_phases.enter(obj, type); + } + + timer_del(s->frame_timer); + qemu_bh_cancel(s->async_bh); + + if (s->uport.dev && s->uport.dev->attached) { + usb_detach(&s->uport); + } + + dwc2_bus_stop(s); + + s->gotgctl = GOTGCTL_BSESVLD | GOTGCTL_ASESVLD | GOTGCTL_CONID_B; + s->gotgint = 0; + s->gahbcfg = 0; + s->gusbcfg = 5 << GUSBCFG_USBTRDTIM_SHIFT; + s->grstctl = GRSTCTL_AHBIDLE; + s->gintsts = GINTSTS_CONIDSTSCHNG | GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | + GINTSTS_CURMODE_HOST; + s->gintmsk = 0; + s->grxstsr = 0; + s->grxstsp = 0; + s->grxfsiz = 1024; + s->gnptxfsiz = 1024 << FIFOSIZE_DEPTH_SHIFT; + s->gnptxsts = (4 << FIFOSIZE_DEPTH_SHIFT) | 1024; + s->gi2cctl = GI2CCTL_I2CDATSE0 | GI2CCTL_ACK; + s->gpvndctl = 0; + s->ggpio = 0; + s->guid = 0; + s->gsnpsid = 0x4f54294a; + s->ghwcfg1 = 0; + s->ghwcfg2 = (8 << GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT) | + (4 << GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT) | + (4 << GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT) | + GHWCFG2_DYNAMIC_FIFO | + GHWCFG2_PERIO_EP_SUPPORTED | + ((DWC2_NB_CHAN - 1) << GHWCFG2_NUM_HOST_CHAN_SHIFT) | + (GHWCFG2_INT_DMA_ARCH << GHWCFG2_ARCHITECTURE_SHIFT) | + (GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST << GHWCFG2_OP_MODE_SHIFT); + s->ghwcfg3 = (4096 << GHWCFG3_DFIFO_DEPTH_SHIFT) | + (4 << GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT) | + (4 << GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT); + s->ghwcfg4 = 0; + s->glpmcfg = 0; + s->gpwrdn = GPWRDN_PWRDNRSTN; + s->gdfifocfg = 0; + s->gadpctl = 0; + s->grefclk = 0; + s->gintmsk2 = 0; + s->gintsts2 = 0; + + s->hptxfsiz = 500 << FIFOSIZE_DEPTH_SHIFT; + + s->hcfg = 2 << HCFG_RESVALID_SHIFT; + s->hfir = 60000; + s->hfnum = 0x3fff; + s->hptxsts = (16 << TXSTS_QSPCAVAIL_SHIFT) | 32768; + s->haint = 0; + s->haintmsk = 0; + s->hprt0 = 0; + + memset(s->hreg1, 0, sizeof(s->hreg1)); + memset(s->pcgreg, 0, sizeof(s->pcgreg)); + + s->sof_time = 0; + s->frame_number = 0; + s->fi = USB_FRMINTVL - 1; + s->next_chan = 0; + s->working = false; + + for (i = 0; i < DWC2_NB_CHAN; i++) { + s->packet[i].needs_service = false; + } +} + +static void dwc2_reset_hold(Object *obj) +{ + DWC2Class *c = DWC2_USB_GET_CLASS(obj); + DWC2State *s = DWC2_USB(obj); + + trace_usb_dwc2_reset_hold(); + + if (c->parent_phases.hold) { + c->parent_phases.hold(obj); + } + + dwc2_update_irq(s); +} + +static void dwc2_reset_exit(Object *obj) +{ + DWC2Class *c = DWC2_USB_GET_CLASS(obj); + DWC2State *s = DWC2_USB(obj); + + trace_usb_dwc2_reset_exit(); + + if (c->parent_phases.exit) { + c->parent_phases.exit(obj); + } + + s->hprt0 = HPRT0_PWR; + if (s->uport.dev && s->uport.dev->attached) { + usb_attach(&s->uport); + usb_device_reset(s->uport.dev); + } +} + +static void dwc2_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + DWC2State *s = DWC2_USB(dev); + Object *obj; + + obj = object_property_get_link(OBJECT(dev), "dma-mr", &error_abort); + + s->dma_mr = MEMORY_REGION(obj); + address_space_init(&s->dma_as, s->dma_mr, "dwc2"); + + usb_bus_new(&s->bus, sizeof(s->bus), &dwc2_bus_ops, dev); + usb_register_port(&s->bus, &s->uport, s, 0, &dwc2_port_ops, + USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL | + (s->usb_version == 2 ? USB_SPEED_MASK_HIGH : 0)); + s->uport.dev = 0; + + s->usb_frame_time = NANOSECONDS_PER_SECOND / 1000; /* 1000000 */ + if (NANOSECONDS_PER_SECOND >= USB_HZ_FS) { + s->usb_bit_time = NANOSECONDS_PER_SECOND / USB_HZ_FS; /* 83.3 */ + } else { + s->usb_bit_time = 1; + } + + s->fi = USB_FRMINTVL - 1; + s->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_frame_boundary, s); + s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_work_timer, s); + s->async_bh = qemu_bh_new(dwc2_work_bh, s); + + sysbus_init_irq(sbd, &s->irq); +} + +static void dwc2_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + DWC2State *s = DWC2_USB(obj); + + memory_region_init(&s->container, obj, "dwc2", DWC2_MMIO_SIZE); + sysbus_init_mmio(sbd, &s->container); + + memory_region_init_io(&s->hsotg, obj, &dwc2_mmio_hsotg_ops, s, + "dwc2-io", 4 * KiB); + memory_region_add_subregion(&s->container, 0x0000, &s->hsotg); + + memory_region_init_io(&s->fifos, obj, &dwc2_mmio_hreg2_ops, s, + "dwc2-fifo", 64 * KiB); + memory_region_add_subregion(&s->container, 0x1000, &s->fifos); +} + +static const VMStateDescription vmstate_dwc2_state_packet = { + .name = "dwc2/packet", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(devadr, DWC2Packet), + VMSTATE_UINT32(epnum, DWC2Packet), + VMSTATE_UINT32(epdir, DWC2Packet), + VMSTATE_UINT32(mps, DWC2Packet), + VMSTATE_UINT32(pid, DWC2Packet), + VMSTATE_UINT32(index, DWC2Packet), + VMSTATE_UINT32(pcnt, DWC2Packet), + VMSTATE_UINT32(len, DWC2Packet), + VMSTATE_INT32(async, DWC2Packet), + VMSTATE_BOOL(small, DWC2Packet), + VMSTATE_BOOL(needs_service, DWC2Packet), + VMSTATE_END_OF_LIST() + }, +}; + +const VMStateDescription vmstate_dwc2_state = { + .name = "dwc2", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(glbreg, DWC2State, + DWC2_GLBREG_SIZE / sizeof(uint32_t)), + VMSTATE_UINT32_ARRAY(fszreg, DWC2State, + DWC2_FSZREG_SIZE / sizeof(uint32_t)), + VMSTATE_UINT32_ARRAY(hreg0, DWC2State, + DWC2_HREG0_SIZE / sizeof(uint32_t)), + VMSTATE_UINT32_ARRAY(hreg1, DWC2State, + DWC2_HREG1_SIZE / sizeof(uint32_t)), + VMSTATE_UINT32_ARRAY(pcgreg, DWC2State, + DWC2_PCGREG_SIZE / sizeof(uint32_t)), + + VMSTATE_TIMER_PTR(eof_timer, DWC2State), + VMSTATE_TIMER_PTR(frame_timer, DWC2State), + VMSTATE_INT64(sof_time, DWC2State), + VMSTATE_INT64(usb_frame_time, DWC2State), + VMSTATE_INT64(usb_bit_time, DWC2State), + VMSTATE_UINT32(usb_version, DWC2State), + VMSTATE_UINT16(frame_number, DWC2State), + VMSTATE_UINT16(fi, DWC2State), + VMSTATE_UINT16(next_chan, DWC2State), + VMSTATE_BOOL(working, DWC2State), + + VMSTATE_STRUCT_ARRAY(packet, DWC2State, DWC2_NB_CHAN, 1, + vmstate_dwc2_state_packet, DWC2Packet), + VMSTATE_UINT8_2DARRAY(usb_buf, DWC2State, DWC2_NB_CHAN, + DWC2_MAX_XFER_SIZE), + + VMSTATE_END_OF_LIST() + } +}; + +static Property dwc2_usb_properties[] = { + DEFINE_PROP_UINT32("usb_version", DWC2State, usb_version, 2), + DEFINE_PROP_END_OF_LIST(), +}; + +static void dwc2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + DWC2Class *c = DWC2_USB_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->realize = dwc2_realize; + dc->vmsd = &vmstate_dwc2_state; + set_bit(DEVICE_CATEGORY_USB, dc->categories); + device_class_set_props(dc, dwc2_usb_properties); + resettable_class_set_parent_phases(rc, dwc2_reset_enter, dwc2_reset_hold, + dwc2_reset_exit, &c->parent_phases); +} + +static const TypeInfo dwc2_usb_type_info = { + .name = TYPE_DWC2_USB, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(DWC2State), + .instance_init = dwc2_init, + .class_size = sizeof(DWC2Class), + .class_init = dwc2_class_init, +}; + +static void dwc2_usb_register_types(void) +{ + type_register_static(&dwc2_usb_type_info); +} + +type_init(dwc2_usb_register_types) diff --git a/hw/usb/hcd-dwc2.h b/hw/usb/hcd-dwc2.h new file mode 100644 index 000000000..6998b0470 --- /dev/null +++ b/hw/usb/hcd-dwc2.h @@ -0,0 +1,186 @@ +/* + * dwc-hsotg (dwc2) USB host controller state definitions + * + * Based on hw/usb/hcd-ehci.h + * + * Copyright (c) 2020 Paul Zimmerman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef HW_USB_DWC2_H +#define HW_USB_DWC2_H + +#include "qemu/timer.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/usb.h" +#include "sysemu/dma.h" +#include "qom/object.h" + +#define DWC2_MMIO_SIZE 0x11000 + +#define DWC2_NB_CHAN 8 /* Number of host channels */ +#define DWC2_MAX_XFER_SIZE 65536 /* Max transfer size expected in HCTSIZ */ + +typedef struct DWC2Packet DWC2Packet; +typedef struct DWC2State DWC2State; +typedef struct DWC2Class DWC2Class; + +enum async_state { + DWC2_ASYNC_NONE = 0, + DWC2_ASYNC_INITIALIZED, + DWC2_ASYNC_INFLIGHT, + DWC2_ASYNC_FINISHED, +}; + +struct DWC2Packet { + USBPacket packet; + uint32_t devadr; + uint32_t epnum; + uint32_t epdir; + uint32_t mps; + uint32_t pid; + uint32_t index; + uint32_t pcnt; + uint32_t len; + int32_t async; + bool small; + bool needs_service; +}; + +struct DWC2State { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + USBBus bus; + qemu_irq irq; + MemoryRegion *dma_mr; + AddressSpace dma_as; + MemoryRegion container; + MemoryRegion hsotg; + MemoryRegion fifos; + + union { +#define DWC2_GLBREG_SIZE 0x70 + uint32_t glbreg[DWC2_GLBREG_SIZE / sizeof(uint32_t)]; + struct { + uint32_t gotgctl; /* 00 */ + uint32_t gotgint; /* 04 */ + uint32_t gahbcfg; /* 08 */ + uint32_t gusbcfg; /* 0c */ + uint32_t grstctl; /* 10 */ + uint32_t gintsts; /* 14 */ + uint32_t gintmsk; /* 18 */ + uint32_t grxstsr; /* 1c */ + uint32_t grxstsp; /* 20 */ + uint32_t grxfsiz; /* 24 */ + uint32_t gnptxfsiz; /* 28 */ + uint32_t gnptxsts; /* 2c */ + uint32_t gi2cctl; /* 30 */ + uint32_t gpvndctl; /* 34 */ + uint32_t ggpio; /* 38 */ + uint32_t guid; /* 3c */ + uint32_t gsnpsid; /* 40 */ + uint32_t ghwcfg1; /* 44 */ + uint32_t ghwcfg2; /* 48 */ + uint32_t ghwcfg3; /* 4c */ + uint32_t ghwcfg4; /* 50 */ + uint32_t glpmcfg; /* 54 */ + uint32_t gpwrdn; /* 58 */ + uint32_t gdfifocfg; /* 5c */ + uint32_t gadpctl; /* 60 */ + uint32_t grefclk; /* 64 */ + uint32_t gintmsk2; /* 68 */ + uint32_t gintsts2; /* 6c */ + }; + }; + + union { +#define DWC2_FSZREG_SIZE 0x04 + uint32_t fszreg[DWC2_FSZREG_SIZE / sizeof(uint32_t)]; + struct { + uint32_t hptxfsiz; /* 100 */ + }; + }; + + union { +#define DWC2_HREG0_SIZE 0x44 + uint32_t hreg0[DWC2_HREG0_SIZE / sizeof(uint32_t)]; + struct { + uint32_t hcfg; /* 400 */ + uint32_t hfir; /* 404 */ + uint32_t hfnum; /* 408 */ + uint32_t rsvd0; /* 40c */ + uint32_t hptxsts; /* 410 */ + uint32_t haint; /* 414 */ + uint32_t haintmsk; /* 418 */ + uint32_t hflbaddr; /* 41c */ + uint32_t rsvd1[8]; /* 420-43c */ + uint32_t hprt0; /* 440 */ + }; + }; + +#define DWC2_HREG1_SIZE (0x20 * DWC2_NB_CHAN) + uint32_t hreg1[DWC2_HREG1_SIZE / sizeof(uint32_t)]; + +#define hcchar(_ch) hreg1[((_ch) << 3) + 0] /* 500, 520, ... */ +#define hcsplt(_ch) hreg1[((_ch) << 3) + 1] /* 504, 524, ... */ +#define hcint(_ch) hreg1[((_ch) << 3) + 2] /* 508, 528, ... */ +#define hcintmsk(_ch) hreg1[((_ch) << 3) + 3] /* 50c, 52c, ... */ +#define hctsiz(_ch) hreg1[((_ch) << 3) + 4] /* 510, 530, ... */ +#define hcdma(_ch) hreg1[((_ch) << 3) + 5] /* 514, 534, ... */ +#define hcdmab(_ch) hreg1[((_ch) << 3) + 7] /* 51c, 53c, ... */ + + union { +#define DWC2_PCGREG_SIZE 0x08 + uint32_t pcgreg[DWC2_PCGREG_SIZE / sizeof(uint32_t)]; + struct { + uint32_t pcgctl; /* e00 */ + uint32_t pcgcctl1; /* e04 */ + }; + }; + + /* TODO - implement FIFO registers for slave mode */ +#define DWC2_HFIFO_SIZE (0x1000 * DWC2_NB_CHAN) + + /* + * Internal state + */ + QEMUTimer *eof_timer; + QEMUTimer *frame_timer; + QEMUBH *async_bh; + int64_t sof_time; + int64_t usb_frame_time; + int64_t usb_bit_time; + uint32_t usb_version; + uint16_t frame_number; + uint16_t fi; + uint16_t next_chan; + bool working; + USBPort uport; + DWC2Packet packet[DWC2_NB_CHAN]; /* one packet per chan */ + uint8_t usb_buf[DWC2_NB_CHAN][DWC2_MAX_XFER_SIZE]; /* one buffer per chan */ +}; + +struct DWC2Class { + /*< private >*/ + SysBusDeviceClass parent_class; + ResettablePhases parent_phases; + + /*< public >*/ +}; + +#define TYPE_DWC2_USB "dwc2-usb" +OBJECT_DECLARE_TYPE(DWC2State, DWC2Class, DWC2_USB) + +#endif diff --git a/hw/usb/hcd-dwc3.c b/hw/usb/hcd-dwc3.c new file mode 100644 index 000000000..279263489 --- /dev/null +++ b/hw/usb/hcd-dwc3.c @@ -0,0 +1,688 @@ +/* + * QEMU model of the USB DWC3 host controller emulation. + * + * This model defines global register space of DWC3 controller. Global + * registers control the AXI/AHB interfaces properties, external FIFO support + * and event count support. All of which are unimplemented at present. We are + * only supporting core reset and read of ID register. + * + * Copyright (c) 2020 Xilinx Inc. Vikram Garhwal + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "qemu/bitops.h" +#include "qom/object.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/usb/hcd-dwc3.h" +#include "qapi/error.h" + +#ifndef USB_DWC3_ERR_DEBUG +#define USB_DWC3_ERR_DEBUG 0 +#endif + +#define HOST_MODE 1 +#define FIFO_LEN 0x1000 + +REG32(GSBUSCFG0, 0x00) + FIELD(GSBUSCFG0, DATRDREQINFO, 28, 4) + FIELD(GSBUSCFG0, DESRDREQINFO, 24, 4) + FIELD(GSBUSCFG0, DATWRREQINFO, 20, 4) + FIELD(GSBUSCFG0, DESWRREQINFO, 16, 4) + FIELD(GSBUSCFG0, RESERVED_15_12, 12, 4) + FIELD(GSBUSCFG0, DATBIGEND, 11, 1) + FIELD(GSBUSCFG0, DESBIGEND, 10, 1) + FIELD(GSBUSCFG0, RESERVED_9_8, 8, 2) + FIELD(GSBUSCFG0, INCR256BRSTENA, 7, 1) + FIELD(GSBUSCFG0, INCR128BRSTENA, 6, 1) + FIELD(GSBUSCFG0, INCR64BRSTENA, 5, 1) + FIELD(GSBUSCFG0, INCR32BRSTENA, 4, 1) + FIELD(GSBUSCFG0, INCR16BRSTENA, 3, 1) + FIELD(GSBUSCFG0, INCR8BRSTENA, 2, 1) + FIELD(GSBUSCFG0, INCR4BRSTENA, 1, 1) + FIELD(GSBUSCFG0, INCRBRSTENA, 0, 1) +REG32(GSBUSCFG1, 0x04) + FIELD(GSBUSCFG1, RESERVED_31_13, 13, 19) + FIELD(GSBUSCFG1, EN1KPAGE, 12, 1) + FIELD(GSBUSCFG1, PIPETRANSLIMIT, 8, 4) + FIELD(GSBUSCFG1, RESERVED_7_0, 0, 8) +REG32(GTXTHRCFG, 0x08) + FIELD(GTXTHRCFG, RESERVED_31, 31, 1) + FIELD(GTXTHRCFG, RESERVED_30, 30, 1) + FIELD(GTXTHRCFG, USBTXPKTCNTSEL, 29, 1) + FIELD(GTXTHRCFG, RESERVED_28, 28, 1) + FIELD(GTXTHRCFG, USBTXPKTCNT, 24, 4) + FIELD(GTXTHRCFG, USBMAXTXBURSTSIZE, 16, 8) + FIELD(GTXTHRCFG, RESERVED_15, 15, 1) + FIELD(GTXTHRCFG, RESERVED_14, 14, 1) + FIELD(GTXTHRCFG, RESERVED_13_11, 11, 3) + FIELD(GTXTHRCFG, RESERVED_10_0, 0, 11) +REG32(GRXTHRCFG, 0x0c) + FIELD(GRXTHRCFG, RESERVED_31_30, 30, 2) + FIELD(GRXTHRCFG, USBRXPKTCNTSEL, 29, 1) + FIELD(GRXTHRCFG, RESERVED_28, 28, 1) + FIELD(GRXTHRCFG, USBRXPKTCNT, 24, 4) + FIELD(GRXTHRCFG, USBMAXRXBURSTSIZE, 19, 5) + FIELD(GRXTHRCFG, RESERVED_18_16, 16, 3) + FIELD(GRXTHRCFG, RESERVED_15, 15, 1) + FIELD(GRXTHRCFG, RESERVED_14_13, 13, 2) + FIELD(GRXTHRCFG, RESVISOCOUTSPC, 0, 13) +REG32(GCTL, 0x10) + FIELD(GCTL, PWRDNSCALE, 19, 13) + FIELD(GCTL, MASTERFILTBYPASS, 18, 1) + FIELD(GCTL, BYPSSETADDR, 17, 1) + FIELD(GCTL, U2RSTECN, 16, 1) + FIELD(GCTL, FRMSCLDWN, 14, 2) + FIELD(GCTL, PRTCAPDIR, 12, 2) + FIELD(GCTL, CORESOFTRESET, 11, 1) + FIELD(GCTL, U1U2TIMERSCALE, 9, 1) + FIELD(GCTL, DEBUGATTACH, 8, 1) + FIELD(GCTL, RAMCLKSEL, 6, 2) + FIELD(GCTL, SCALEDOWN, 4, 2) + FIELD(GCTL, DISSCRAMBLE, 3, 1) + FIELD(GCTL, U2EXIT_LFPS, 2, 1) + FIELD(GCTL, GBLHIBERNATIONEN, 1, 1) + FIELD(GCTL, DSBLCLKGTNG, 0, 1) +REG32(GPMSTS, 0x14) +REG32(GSTS, 0x18) + FIELD(GSTS, CBELT, 20, 12) + FIELD(GSTS, RESERVED_19_12, 12, 8) + FIELD(GSTS, SSIC_IP, 11, 1) + FIELD(GSTS, OTG_IP, 10, 1) + FIELD(GSTS, BC_IP, 9, 1) + FIELD(GSTS, ADP_IP, 8, 1) + FIELD(GSTS, HOST_IP, 7, 1) + FIELD(GSTS, DEVICE_IP, 6, 1) + FIELD(GSTS, CSRTIMEOUT, 5, 1) + FIELD(GSTS, BUSERRADDRVLD, 4, 1) + FIELD(GSTS, RESERVED_3_2, 2, 2) + FIELD(GSTS, CURMOD, 0, 2) +REG32(GUCTL1, 0x1c) + FIELD(GUCTL1, RESUME_OPMODE_HS_HOST, 10, 1) +REG32(GSNPSID, 0x20) +REG32(GGPIO, 0x24) + FIELD(GGPIO, GPO, 16, 16) + FIELD(GGPIO, GPI, 0, 16) +REG32(GUID, 0x28) +REG32(GUCTL, 0x2c) + FIELD(GUCTL, REFCLKPER, 22, 10) + FIELD(GUCTL, NOEXTRDL, 21, 1) + FIELD(GUCTL, RESERVED_20_18, 18, 3) + FIELD(GUCTL, SPRSCTRLTRANSEN, 17, 1) + FIELD(GUCTL, RESBWHSEPS, 16, 1) + FIELD(GUCTL, RESERVED_15, 15, 1) + FIELD(GUCTL, USBHSTINAUTORETRYEN, 14, 1) + FIELD(GUCTL, ENOVERLAPCHK, 13, 1) + FIELD(GUCTL, EXTCAPSUPPTEN, 12, 1) + FIELD(GUCTL, INSRTEXTRFSBODI, 11, 1) + FIELD(GUCTL, DTCT, 9, 2) + FIELD(GUCTL, DTFT, 0, 9) +REG32(GBUSERRADDRLO, 0x30) +REG32(GBUSERRADDRHI, 0x34) +REG32(GHWPARAMS0, 0x40) + FIELD(GHWPARAMS0, GHWPARAMS0_31_24, 24, 8) + FIELD(GHWPARAMS0, GHWPARAMS0_23_16, 16, 8) + FIELD(GHWPARAMS0, GHWPARAMS0_15_8, 8, 8) + FIELD(GHWPARAMS0, GHWPARAMS0_7_6, 6, 2) + FIELD(GHWPARAMS0, GHWPARAMS0_5_3, 3, 3) + FIELD(GHWPARAMS0, GHWPARAMS0_2_0, 0, 3) +REG32(GHWPARAMS1, 0x44) + FIELD(GHWPARAMS1, GHWPARAMS1_31, 31, 1) + FIELD(GHWPARAMS1, GHWPARAMS1_30, 30, 1) + FIELD(GHWPARAMS1, GHWPARAMS1_29, 29, 1) + FIELD(GHWPARAMS1, GHWPARAMS1_28, 28, 1) + FIELD(GHWPARAMS1, GHWPARAMS1_27, 27, 1) + FIELD(GHWPARAMS1, GHWPARAMS1_26, 26, 1) + FIELD(GHWPARAMS1, GHWPARAMS1_25_24, 24, 2) + FIELD(GHWPARAMS1, GHWPARAMS1_23, 23, 1) + FIELD(GHWPARAMS1, GHWPARAMS1_22_21, 21, 2) + FIELD(GHWPARAMS1, GHWPARAMS1_20_15, 15, 6) + FIELD(GHWPARAMS1, GHWPARAMS1_14_12, 12, 3) + FIELD(GHWPARAMS1, GHWPARAMS1_11_9, 9, 3) + FIELD(GHWPARAMS1, GHWPARAMS1_8_6, 6, 3) + FIELD(GHWPARAMS1, GHWPARAMS1_5_3, 3, 3) + FIELD(GHWPARAMS1, GHWPARAMS1_2_0, 0, 3) +REG32(GHWPARAMS2, 0x48) +REG32(GHWPARAMS3, 0x4c) + FIELD(GHWPARAMS3, GHWPARAMS3_31, 31, 1) + FIELD(GHWPARAMS3, GHWPARAMS3_30_23, 23, 8) + FIELD(GHWPARAMS3, GHWPARAMS3_22_18, 18, 5) + FIELD(GHWPARAMS3, GHWPARAMS3_17_12, 12, 6) + FIELD(GHWPARAMS3, GHWPARAMS3_11, 11, 1) + FIELD(GHWPARAMS3, GHWPARAMS3_10, 10, 1) + FIELD(GHWPARAMS3, GHWPARAMS3_9_8, 8, 2) + FIELD(GHWPARAMS3, GHWPARAMS3_7_6, 6, 2) + FIELD(GHWPARAMS3, GHWPARAMS3_5_4, 4, 2) + FIELD(GHWPARAMS3, GHWPARAMS3_3_2, 2, 2) + FIELD(GHWPARAMS3, GHWPARAMS3_1_0, 0, 2) +REG32(GHWPARAMS4, 0x50) + FIELD(GHWPARAMS4, GHWPARAMS4_31_28, 28, 4) + FIELD(GHWPARAMS4, GHWPARAMS4_27_24, 24, 4) + FIELD(GHWPARAMS4, GHWPARAMS4_23, 23, 1) + FIELD(GHWPARAMS4, GHWPARAMS4_22, 22, 1) + FIELD(GHWPARAMS4, GHWPARAMS4_21, 21, 1) + FIELD(GHWPARAMS4, GHWPARAMS4_20_17, 17, 4) + FIELD(GHWPARAMS4, GHWPARAMS4_16_13, 13, 4) + FIELD(GHWPARAMS4, GHWPARAMS4_12, 12, 1) + FIELD(GHWPARAMS4, GHWPARAMS4_11, 11, 1) + FIELD(GHWPARAMS4, GHWPARAMS4_10_9, 9, 2) + FIELD(GHWPARAMS4, GHWPARAMS4_8_7, 7, 2) + FIELD(GHWPARAMS4, GHWPARAMS4_6, 6, 1) + FIELD(GHWPARAMS4, GHWPARAMS4_5_0, 0, 6) +REG32(GHWPARAMS5, 0x54) + FIELD(GHWPARAMS5, GHWPARAMS5_31_28, 28, 4) + FIELD(GHWPARAMS5, GHWPARAMS5_27_22, 22, 6) + FIELD(GHWPARAMS5, GHWPARAMS5_21_16, 16, 6) + FIELD(GHWPARAMS5, GHWPARAMS5_15_10, 10, 6) + FIELD(GHWPARAMS5, GHWPARAMS5_9_4, 4, 6) + FIELD(GHWPARAMS5, GHWPARAMS5_3_0, 0, 4) +REG32(GHWPARAMS6, 0x58) + FIELD(GHWPARAMS6, GHWPARAMS6_31_16, 16, 16) + FIELD(GHWPARAMS6, BUSFLTRSSUPPORT, 15, 1) + FIELD(GHWPARAMS6, BCSUPPORT, 14, 1) + FIELD(GHWPARAMS6, OTG_SS_SUPPORT, 13, 1) + FIELD(GHWPARAMS6, ADPSUPPORT, 12, 1) + FIELD(GHWPARAMS6, HNPSUPPORT, 11, 1) + FIELD(GHWPARAMS6, SRPSUPPORT, 10, 1) + FIELD(GHWPARAMS6, GHWPARAMS6_9_8, 8, 2) + FIELD(GHWPARAMS6, GHWPARAMS6_7, 7, 1) + FIELD(GHWPARAMS6, GHWPARAMS6_6, 6, 1) + FIELD(GHWPARAMS6, GHWPARAMS6_5_0, 0, 6) +REG32(GHWPARAMS7, 0x5c) + FIELD(GHWPARAMS7, GHWPARAMS7_31_16, 16, 16) + FIELD(GHWPARAMS7, GHWPARAMS7_15_0, 0, 16) +REG32(GDBGFIFOSPACE, 0x60) + FIELD(GDBGFIFOSPACE, SPACE_AVAILABLE, 16, 16) + FIELD(GDBGFIFOSPACE, RESERVED_15_9, 9, 7) + FIELD(GDBGFIFOSPACE, FIFO_QUEUE_SELECT, 0, 9) +REG32(GUCTL2, 0x9c) + FIELD(GUCTL2, RESERVED_31_26, 26, 6) + FIELD(GUCTL2, EN_HP_PM_TIMER, 19, 7) + FIELD(GUCTL2, NOLOWPWRDUR, 15, 4) + FIELD(GUCTL2, RST_ACTBITLATER, 14, 1) + FIELD(GUCTL2, RESERVED_13, 13, 1) + FIELD(GUCTL2, DISABLECFC, 11, 1) +REG32(GUSB2PHYCFG, 0x100) + FIELD(GUSB2PHYCFG, U2_FREECLK_EXISTS, 30, 1) + FIELD(GUSB2PHYCFG, ULPI_LPM_WITH_OPMODE_CHK, 29, 1) + FIELD(GUSB2PHYCFG, RESERVED_25, 25, 1) + FIELD(GUSB2PHYCFG, LSTRD, 22, 3) + FIELD(GUSB2PHYCFG, LSIPD, 19, 3) + FIELD(GUSB2PHYCFG, ULPIEXTVBUSINDIACTOR, 18, 1) + FIELD(GUSB2PHYCFG, ULPIEXTVBUSDRV, 17, 1) + FIELD(GUSB2PHYCFG, RESERVED_16, 16, 1) + FIELD(GUSB2PHYCFG, ULPIAUTORES, 15, 1) + FIELD(GUSB2PHYCFG, RESERVED_14, 14, 1) + FIELD(GUSB2PHYCFG, USBTRDTIM, 10, 4) + FIELD(GUSB2PHYCFG, XCVRDLY, 9, 1) + FIELD(GUSB2PHYCFG, ENBLSLPM, 8, 1) + FIELD(GUSB2PHYCFG, PHYSEL, 7, 1) + FIELD(GUSB2PHYCFG, SUSPENDUSB20, 6, 1) + FIELD(GUSB2PHYCFG, FSINTF, 5, 1) + FIELD(GUSB2PHYCFG, ULPI_UTMI_SEL, 4, 1) + FIELD(GUSB2PHYCFG, PHYIF, 3, 1) + FIELD(GUSB2PHYCFG, TOUTCAL, 0, 3) +REG32(GUSB2I2CCTL, 0x140) +REG32(GUSB2PHYACC_ULPI, 0x180) + FIELD(GUSB2PHYACC_ULPI, RESERVED_31_27, 27, 5) + FIELD(GUSB2PHYACC_ULPI, DISUIPIDRVR, 26, 1) + FIELD(GUSB2PHYACC_ULPI, NEWREGREQ, 25, 1) + FIELD(GUSB2PHYACC_ULPI, VSTSDONE, 24, 1) + FIELD(GUSB2PHYACC_ULPI, VSTSBSY, 23, 1) + FIELD(GUSB2PHYACC_ULPI, REGWR, 22, 1) + FIELD(GUSB2PHYACC_ULPI, REGADDR, 16, 6) + FIELD(GUSB2PHYACC_ULPI, EXTREGADDR, 8, 8) + FIELD(GUSB2PHYACC_ULPI, REGDATA, 0, 8) +REG32(GTXFIFOSIZ0, 0x200) + FIELD(GTXFIFOSIZ0, TXFSTADDR_N, 16, 16) + FIELD(GTXFIFOSIZ0, TXFDEP_N, 0, 16) +REG32(GTXFIFOSIZ1, 0x204) + FIELD(GTXFIFOSIZ1, TXFSTADDR_N, 16, 16) + FIELD(GTXFIFOSIZ1, TXFDEP_N, 0, 16) +REG32(GTXFIFOSIZ2, 0x208) + FIELD(GTXFIFOSIZ2, TXFSTADDR_N, 16, 16) + FIELD(GTXFIFOSIZ2, TXFDEP_N, 0, 16) +REG32(GTXFIFOSIZ3, 0x20c) + FIELD(GTXFIFOSIZ3, TXFSTADDR_N, 16, 16) + FIELD(GTXFIFOSIZ3, TXFDEP_N, 0, 16) +REG32(GTXFIFOSIZ4, 0x210) + FIELD(GTXFIFOSIZ4, TXFSTADDR_N, 16, 16) + FIELD(GTXFIFOSIZ4, TXFDEP_N, 0, 16) +REG32(GTXFIFOSIZ5, 0x214) + FIELD(GTXFIFOSIZ5, TXFSTADDR_N, 16, 16) + FIELD(GTXFIFOSIZ5, TXFDEP_N, 0, 16) +REG32(GRXFIFOSIZ0, 0x280) + FIELD(GRXFIFOSIZ0, RXFSTADDR_N, 16, 16) + FIELD(GRXFIFOSIZ0, RXFDEP_N, 0, 16) +REG32(GRXFIFOSIZ1, 0x284) + FIELD(GRXFIFOSIZ1, RXFSTADDR_N, 16, 16) + FIELD(GRXFIFOSIZ1, RXFDEP_N, 0, 16) +REG32(GRXFIFOSIZ2, 0x288) + FIELD(GRXFIFOSIZ2, RXFSTADDR_N, 16, 16) + FIELD(GRXFIFOSIZ2, RXFDEP_N, 0, 16) +REG32(GEVNTADRLO_0, 0x300) +REG32(GEVNTADRHI_0, 0x304) +REG32(GEVNTSIZ_0, 0x308) + FIELD(GEVNTSIZ_0, EVNTINTRPTMASK, 31, 1) + FIELD(GEVNTSIZ_0, RESERVED_30_16, 16, 15) + FIELD(GEVNTSIZ_0, EVENTSIZ, 0, 16) +REG32(GEVNTCOUNT_0, 0x30c) + FIELD(GEVNTCOUNT_0, EVNT_HANDLER_BUSY, 31, 1) + FIELD(GEVNTCOUNT_0, RESERVED_30_16, 16, 15) + FIELD(GEVNTCOUNT_0, EVNTCOUNT, 0, 16) +REG32(GEVNTADRLO_1, 0x310) +REG32(GEVNTADRHI_1, 0x314) +REG32(GEVNTSIZ_1, 0x318) + FIELD(GEVNTSIZ_1, EVNTINTRPTMASK, 31, 1) + FIELD(GEVNTSIZ_1, RESERVED_30_16, 16, 15) + FIELD(GEVNTSIZ_1, EVENTSIZ, 0, 16) +REG32(GEVNTCOUNT_1, 0x31c) + FIELD(GEVNTCOUNT_1, EVNT_HANDLER_BUSY, 31, 1) + FIELD(GEVNTCOUNT_1, RESERVED_30_16, 16, 15) + FIELD(GEVNTCOUNT_1, EVNTCOUNT, 0, 16) +REG32(GEVNTADRLO_2, 0x320) +REG32(GEVNTADRHI_2, 0x324) +REG32(GEVNTSIZ_2, 0x328) + FIELD(GEVNTSIZ_2, EVNTINTRPTMASK, 31, 1) + FIELD(GEVNTSIZ_2, RESERVED_30_16, 16, 15) + FIELD(GEVNTSIZ_2, EVENTSIZ, 0, 16) +REG32(GEVNTCOUNT_2, 0x32c) + FIELD(GEVNTCOUNT_2, EVNT_HANDLER_BUSY, 31, 1) + FIELD(GEVNTCOUNT_2, RESERVED_30_16, 16, 15) + FIELD(GEVNTCOUNT_2, EVNTCOUNT, 0, 16) +REG32(GEVNTADRLO_3, 0x330) +REG32(GEVNTADRHI_3, 0x334) +REG32(GEVNTSIZ_3, 0x338) + FIELD(GEVNTSIZ_3, EVNTINTRPTMASK, 31, 1) + FIELD(GEVNTSIZ_3, RESERVED_30_16, 16, 15) + FIELD(GEVNTSIZ_3, EVENTSIZ, 0, 16) +REG32(GEVNTCOUNT_3, 0x33c) + FIELD(GEVNTCOUNT_3, EVNT_HANDLER_BUSY, 31, 1) + FIELD(GEVNTCOUNT_3, RESERVED_30_16, 16, 15) + FIELD(GEVNTCOUNT_3, EVNTCOUNT, 0, 16) +REG32(GHWPARAMS8, 0x500) +REG32(GTXFIFOPRIDEV, 0x510) + FIELD(GTXFIFOPRIDEV, RESERVED_31_N, 6, 26) + FIELD(GTXFIFOPRIDEV, GTXFIFOPRIDEV, 0, 6) +REG32(GTXFIFOPRIHST, 0x518) + FIELD(GTXFIFOPRIHST, RESERVED_31_16, 3, 29) + FIELD(GTXFIFOPRIHST, GTXFIFOPRIHST, 0, 3) +REG32(GRXFIFOPRIHST, 0x51c) + FIELD(GRXFIFOPRIHST, RESERVED_31_16, 3, 29) + FIELD(GRXFIFOPRIHST, GRXFIFOPRIHST, 0, 3) +REG32(GDMAHLRATIO, 0x524) + FIELD(GDMAHLRATIO, RESERVED_31_13, 13, 19) + FIELD(GDMAHLRATIO, HSTRXFIFO, 8, 5) + FIELD(GDMAHLRATIO, RESERVED_7_5, 5, 3) + FIELD(GDMAHLRATIO, HSTTXFIFO, 0, 5) +REG32(GFLADJ, 0x530) + FIELD(GFLADJ, GFLADJ_REFCLK_240MHZDECR_PLS1, 31, 1) + FIELD(GFLADJ, GFLADJ_REFCLK_240MHZ_DECR, 24, 7) + FIELD(GFLADJ, GFLADJ_REFCLK_LPM_SEL, 23, 1) + FIELD(GFLADJ, RESERVED_22, 22, 1) + FIELD(GFLADJ, GFLADJ_REFCLK_FLADJ, 8, 14) + FIELD(GFLADJ, GFLADJ_30MHZ_SDBND_SEL, 7, 1) + FIELD(GFLADJ, GFLADJ_30MHZ, 0, 6) + +#define DWC3_GLOBAL_OFFSET 0xC100 +static void reset_csr(USBDWC3 * s) +{ + int i = 0; + /* + * We reset all CSR regs except GCTL, GUCTL, GSTS, GSNPSID, GGPIO, GUID, + * GUSB2PHYCFGn registers and GUSB3PIPECTLn registers. We will skip PHY + * register as we don't implement them. + */ + for (i = 0; i < USB_DWC3_R_MAX; i++) { + switch (i) { + case R_GCTL: + break; + case R_GSTS: + break; + case R_GSNPSID: + break; + case R_GGPIO: + break; + case R_GUID: + break; + case R_GUCTL: + break; + case R_GHWPARAMS0...R_GHWPARAMS7: + break; + case R_GHWPARAMS8: + break; + default: + register_reset(&s->regs_info[i]); + break; + } + } + + xhci_sysbus_reset(DEVICE(&s->sysbus_xhci)); +} + +static void usb_dwc3_gctl_postw(RegisterInfo *reg, uint64_t val64) +{ + USBDWC3 *s = USB_DWC3(reg->opaque); + + if (ARRAY_FIELD_EX32(s->regs, GCTL, CORESOFTRESET)) { + reset_csr(s); + } +} + +static void usb_dwc3_guid_postw(RegisterInfo *reg, uint64_t val64) +{ + USBDWC3 *s = USB_DWC3(reg->opaque); + + s->regs[R_GUID] = s->cfg.dwc_usb3_user; +} + +static const RegisterAccessInfo usb_dwc3_regs_info[] = { + { .name = "GSBUSCFG0", .addr = A_GSBUSCFG0, + .ro = 0xf300, + .unimp = 0xffffffff, + },{ .name = "GSBUSCFG1", .addr = A_GSBUSCFG1, + .reset = 0x300, + .ro = 0xffffe0ff, + .unimp = 0xffffffff, + },{ .name = "GTXTHRCFG", .addr = A_GTXTHRCFG, + .ro = 0xd000ffff, + .unimp = 0xffffffff, + },{ .name = "GRXTHRCFG", .addr = A_GRXTHRCFG, + .ro = 0xd007e000, + .unimp = 0xffffffff, + },{ .name = "GCTL", .addr = A_GCTL, + .reset = 0x30c13004, .post_write = usb_dwc3_gctl_postw, + },{ .name = "GPMSTS", .addr = A_GPMSTS, + .ro = 0xfffffff, + .unimp = 0xffffffff, + },{ .name = "GSTS", .addr = A_GSTS, + .reset = 0x7e800000, + .ro = 0xffffffcf, + .w1c = 0x30, + .unimp = 0xffffffff, + },{ .name = "GUCTL1", .addr = A_GUCTL1, + .reset = 0x198a, + .ro = 0x7800, + .unimp = 0xffffffff, + },{ .name = "GSNPSID", .addr = A_GSNPSID, + .reset = 0x5533330a, + .ro = 0xffffffff, + },{ .name = "GGPIO", .addr = A_GGPIO, + .ro = 0xffff, + .unimp = 0xffffffff, + },{ .name = "GUID", .addr = A_GUID, + .reset = 0x12345678, .post_write = usb_dwc3_guid_postw, + },{ .name = "GUCTL", .addr = A_GUCTL, + .reset = 0x0c808010, + .ro = 0x1c8000, + .unimp = 0xffffffff, + },{ .name = "GBUSERRADDRLO", .addr = A_GBUSERRADDRLO, + .ro = 0xffffffff, + },{ .name = "GBUSERRADDRHI", .addr = A_GBUSERRADDRHI, + .ro = 0xffffffff, + },{ .name = "GHWPARAMS0", .addr = A_GHWPARAMS0, + .ro = 0xffffffff, + },{ .name = "GHWPARAMS1", .addr = A_GHWPARAMS1, + .ro = 0xffffffff, + },{ .name = "GHWPARAMS2", .addr = A_GHWPARAMS2, + .ro = 0xffffffff, + },{ .name = "GHWPARAMS3", .addr = A_GHWPARAMS3, + .ro = 0xffffffff, + },{ .name = "GHWPARAMS4", .addr = A_GHWPARAMS4, + .ro = 0xffffffff, + },{ .name = "GHWPARAMS5", .addr = A_GHWPARAMS5, + .ro = 0xffffffff, + },{ .name = "GHWPARAMS6", .addr = A_GHWPARAMS6, + .ro = 0xffffffff, + },{ .name = "GHWPARAMS7", .addr = A_GHWPARAMS7, + .ro = 0xffffffff, + },{ .name = "GDBGFIFOSPACE", .addr = A_GDBGFIFOSPACE, + .reset = 0xa0000, + .ro = 0xfffffe00, + .unimp = 0xffffffff, + },{ .name = "GUCTL2", .addr = A_GUCTL2, + .reset = 0x40d, + .ro = 0x2000, + .unimp = 0xffffffff, + },{ .name = "GUSB2PHYCFG", .addr = A_GUSB2PHYCFG, + .reset = 0x40102410, + .ro = 0x1e014030, + .unimp = 0xffffffff, + },{ .name = "GUSB2I2CCTL", .addr = A_GUSB2I2CCTL, + .ro = 0xffffffff, + .unimp = 0xffffffff, + },{ .name = "GUSB2PHYACC_ULPI", .addr = A_GUSB2PHYACC_ULPI, + .ro = 0xfd000000, + .unimp = 0xffffffff, + },{ .name = "GTXFIFOSIZ0", .addr = A_GTXFIFOSIZ0, + .reset = 0x2c7000a, + .unimp = 0xffffffff, + },{ .name = "GTXFIFOSIZ1", .addr = A_GTXFIFOSIZ1, + .reset = 0x2d10103, + .unimp = 0xffffffff, + },{ .name = "GTXFIFOSIZ2", .addr = A_GTXFIFOSIZ2, + .reset = 0x3d40103, + .unimp = 0xffffffff, + },{ .name = "GTXFIFOSIZ3", .addr = A_GTXFIFOSIZ3, + .reset = 0x4d70083, + .unimp = 0xffffffff, + },{ .name = "GTXFIFOSIZ4", .addr = A_GTXFIFOSIZ4, + .reset = 0x55a0083, + .unimp = 0xffffffff, + },{ .name = "GTXFIFOSIZ5", .addr = A_GTXFIFOSIZ5, + .reset = 0x5dd0083, + .unimp = 0xffffffff, + },{ .name = "GRXFIFOSIZ0", .addr = A_GRXFIFOSIZ0, + .reset = 0x1c20105, + .unimp = 0xffffffff, + },{ .name = "GRXFIFOSIZ1", .addr = A_GRXFIFOSIZ1, + .reset = 0x2c70000, + .unimp = 0xffffffff, + },{ .name = "GRXFIFOSIZ2", .addr = A_GRXFIFOSIZ2, + .reset = 0x2c70000, + .unimp = 0xffffffff, + },{ .name = "GEVNTADRLO_0", .addr = A_GEVNTADRLO_0, + .unimp = 0xffffffff, + },{ .name = "GEVNTADRHI_0", .addr = A_GEVNTADRHI_0, + .unimp = 0xffffffff, + },{ .name = "GEVNTSIZ_0", .addr = A_GEVNTSIZ_0, + .ro = 0x7fff0000, + .unimp = 0xffffffff, + },{ .name = "GEVNTCOUNT_0", .addr = A_GEVNTCOUNT_0, + .ro = 0x7fff0000, + .unimp = 0xffffffff, + },{ .name = "GEVNTADRLO_1", .addr = A_GEVNTADRLO_1, + .unimp = 0xffffffff, + },{ .name = "GEVNTADRHI_1", .addr = A_GEVNTADRHI_1, + .unimp = 0xffffffff, + },{ .name = "GEVNTSIZ_1", .addr = A_GEVNTSIZ_1, + .ro = 0x7fff0000, + .unimp = 0xffffffff, + },{ .name = "GEVNTCOUNT_1", .addr = A_GEVNTCOUNT_1, + .ro = 0x7fff0000, + .unimp = 0xffffffff, + },{ .name = "GEVNTADRLO_2", .addr = A_GEVNTADRLO_2, + .unimp = 0xffffffff, + },{ .name = "GEVNTADRHI_2", .addr = A_GEVNTADRHI_2, + .unimp = 0xffffffff, + },{ .name = "GEVNTSIZ_2", .addr = A_GEVNTSIZ_2, + .ro = 0x7fff0000, + .unimp = 0xffffffff, + },{ .name = "GEVNTCOUNT_2", .addr = A_GEVNTCOUNT_2, + .ro = 0x7fff0000, + .unimp = 0xffffffff, + },{ .name = "GEVNTADRLO_3", .addr = A_GEVNTADRLO_3, + .unimp = 0xffffffff, + },{ .name = "GEVNTADRHI_3", .addr = A_GEVNTADRHI_3, + .unimp = 0xffffffff, + },{ .name = "GEVNTSIZ_3", .addr = A_GEVNTSIZ_3, + .ro = 0x7fff0000, + .unimp = 0xffffffff, + },{ .name = "GEVNTCOUNT_3", .addr = A_GEVNTCOUNT_3, + .ro = 0x7fff0000, + .unimp = 0xffffffff, + },{ .name = "GHWPARAMS8", .addr = A_GHWPARAMS8, + .ro = 0xffffffff, + },{ .name = "GTXFIFOPRIDEV", .addr = A_GTXFIFOPRIDEV, + .ro = 0xffffffc0, + .unimp = 0xffffffff, + },{ .name = "GTXFIFOPRIHST", .addr = A_GTXFIFOPRIHST, + .ro = 0xfffffff8, + .unimp = 0xffffffff, + },{ .name = "GRXFIFOPRIHST", .addr = A_GRXFIFOPRIHST, + .ro = 0xfffffff8, + .unimp = 0xffffffff, + },{ .name = "GDMAHLRATIO", .addr = A_GDMAHLRATIO, + .ro = 0xffffe0e0, + .unimp = 0xffffffff, + },{ .name = "GFLADJ", .addr = A_GFLADJ, + .reset = 0xc83f020, + .rsvd = 0x40, + .ro = 0x400040, + .unimp = 0xffffffff, + } +}; + +static void usb_dwc3_reset(DeviceState *dev) +{ + USBDWC3 *s = USB_DWC3(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + switch (i) { + case R_GHWPARAMS0...R_GHWPARAMS7: + break; + case R_GHWPARAMS8: + break; + default: + register_reset(&s->regs_info[i]); + }; + } + + xhci_sysbus_reset(DEVICE(&s->sysbus_xhci)); +} + +static const MemoryRegionOps usb_dwc3_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void usb_dwc3_realize(DeviceState *dev, Error **errp) +{ + USBDWC3 *s = USB_DWC3(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + Error *err = NULL; + + sysbus_realize(SYS_BUS_DEVICE(&s->sysbus_xhci), &err); + if (err) { + error_propagate(errp, err); + return; + } + + memory_region_add_subregion(&s->iomem, 0, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->sysbus_xhci), 0)); + sysbus_init_mmio(sbd, &s->iomem); + + /* + * Device Configuration + */ + s->regs[R_GHWPARAMS0] = 0x40204048 | s->cfg.mode; + s->regs[R_GHWPARAMS1] = 0x222493b; + s->regs[R_GHWPARAMS2] = 0x12345678; + s->regs[R_GHWPARAMS3] = 0x618c088; + s->regs[R_GHWPARAMS4] = 0x47822004; + s->regs[R_GHWPARAMS5] = 0x4202088; + s->regs[R_GHWPARAMS6] = 0x7850c20; + s->regs[R_GHWPARAMS7] = 0x0; + s->regs[R_GHWPARAMS8] = 0x478; +} + +static void usb_dwc3_init(Object *obj) +{ + USBDWC3 *s = USB_DWC3(obj); + RegisterInfoArray *reg_array; + + memory_region_init(&s->iomem, obj, TYPE_USB_DWC3, DWC3_SIZE); + reg_array = + register_init_block32(DEVICE(obj), usb_dwc3_regs_info, + ARRAY_SIZE(usb_dwc3_regs_info), + s->regs_info, s->regs, + &usb_dwc3_ops, + USB_DWC3_ERR_DEBUG, + USB_DWC3_R_MAX * 4); + memory_region_add_subregion(&s->iomem, + DWC3_GLOBAL_OFFSET, + ®_array->mem); + object_initialize_child(obj, "dwc3-xhci", &s->sysbus_xhci, + TYPE_XHCI_SYSBUS); + qdev_alias_all_properties(DEVICE(&s->sysbus_xhci), obj); + + s->cfg.mode = HOST_MODE; +} + +static const VMStateDescription vmstate_usb_dwc3 = { + .name = "usb-dwc3", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, USBDWC3, USB_DWC3_R_MAX), + VMSTATE_UINT8(cfg.mode, USBDWC3), + VMSTATE_UINT32(cfg.dwc_usb3_user, USBDWC3), + VMSTATE_END_OF_LIST() + } +}; + +static Property usb_dwc3_properties[] = { + DEFINE_PROP_UINT32("DWC_USB3_USERID", USBDWC3, cfg.dwc_usb3_user, + 0x12345678), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_dwc3_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = usb_dwc3_reset; + dc->realize = usb_dwc3_realize; + dc->vmsd = &vmstate_usb_dwc3; + device_class_set_props(dc, usb_dwc3_properties); +} + +static const TypeInfo usb_dwc3_info = { + .name = TYPE_USB_DWC3, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(USBDWC3), + .class_init = usb_dwc3_class_init, + .instance_init = usb_dwc3_init, +}; + +static void usb_dwc3_register_types(void) +{ + type_register_static(&usb_dwc3_info); +} + +type_init(usb_dwc3_register_types) diff --git a/hw/usb/hcd-ehci-pci.c b/hw/usb/hcd-ehci-pci.c new file mode 100644 index 000000000..4c37c8e22 --- /dev/null +++ b/hw/usb/hcd-ehci-pci.c @@ -0,0 +1,235 @@ +/* + * QEMU USB EHCI Emulation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/usb/hcd-ehci.h" +#include "migration/vmstate.h" +#include "qemu/module.h" +#include "qemu/range.h" + +typedef struct EHCIPCIInfo { + const char *name; + uint16_t vendor_id; + uint16_t device_id; + uint8_t revision; + bool companion; +} EHCIPCIInfo; + +static void usb_ehci_pci_realize(PCIDevice *dev, Error **errp) +{ + EHCIPCIState *i = PCI_EHCI(dev); + EHCIState *s = &i->ehci; + uint8_t *pci_conf = dev->config; + + pci_set_byte(&pci_conf[PCI_CLASS_PROG], 0x20); + + /* capabilities pointer */ + pci_set_byte(&pci_conf[PCI_CAPABILITY_LIST], 0x00); + /* pci_set_byte(&pci_conf[PCI_CAPABILITY_LIST], 0x50); */ + + pci_set_byte(&pci_conf[PCI_INTERRUPT_PIN], 4); /* interrupt pin D */ + pci_set_byte(&pci_conf[PCI_MIN_GNT], 0); + pci_set_byte(&pci_conf[PCI_MAX_LAT], 0); + + /* pci_conf[0x50] = 0x01; *//* power management caps */ + + pci_set_byte(&pci_conf[USB_SBRN], USB_RELEASE_2); /* release # (2.1.4) */ + pci_set_byte(&pci_conf[0x61], 0x20); /* frame length adjustment (2.1.5) */ + pci_set_word(&pci_conf[0x62], 0x00); /* port wake up capability (2.1.6) */ + + pci_conf[0x64] = 0x00; + pci_conf[0x65] = 0x00; + pci_conf[0x66] = 0x00; + pci_conf[0x67] = 0x00; + pci_conf[0x68] = 0x01; + pci_conf[0x69] = 0x00; + pci_conf[0x6a] = 0x00; + pci_conf[0x6b] = 0x00; /* USBLEGSUP */ + pci_conf[0x6c] = 0x00; + pci_conf[0x6d] = 0x00; + pci_conf[0x6e] = 0x00; + pci_conf[0x6f] = 0xc0; /* USBLEFCTLSTS */ + + s->irq = pci_allocate_irq(dev); + s->as = pci_get_address_space(dev); + + usb_ehci_realize(s, DEVICE(dev), NULL); + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mem); +} + +static void usb_ehci_pci_init(Object *obj) +{ + DeviceClass *dc = OBJECT_GET_CLASS(DeviceClass, obj, TYPE_DEVICE); + EHCIPCIState *i = PCI_EHCI(obj); + EHCIState *s = &i->ehci; + + s->caps[0x09] = 0x68; /* EECP */ + + s->capsbase = 0x00; + s->opregbase = 0x20; + s->portscbase = 0x44; + s->portnr = NB_PORTS; + + if (!dc->hotpluggable) { + s->companion_enable = true; + } + + usb_ehci_init(s, DEVICE(obj)); +} + +static void usb_ehci_pci_finalize(Object *obj) +{ + EHCIPCIState *i = PCI_EHCI(obj); + EHCIState *s = &i->ehci; + + usb_ehci_finalize(s); +} + +static void usb_ehci_pci_exit(PCIDevice *dev) +{ + EHCIPCIState *i = PCI_EHCI(dev); + EHCIState *s = &i->ehci; + + usb_ehci_unrealize(s, DEVICE(dev)); + + g_free(s->irq); + s->irq = NULL; +} + +static void usb_ehci_pci_reset(DeviceState *dev) +{ + PCIDevice *pci_dev = PCI_DEVICE(dev); + EHCIPCIState *i = PCI_EHCI(pci_dev); + EHCIState *s = &i->ehci; + + ehci_reset(s); +} + +static void usb_ehci_pci_write_config(PCIDevice *dev, uint32_t addr, + uint32_t val, int l) +{ + EHCIPCIState *i = PCI_EHCI(dev); + bool busmaster; + + pci_default_write_config(dev, addr, val, l); + + if (!range_covers_byte(addr, l, PCI_COMMAND)) { + return; + } + busmaster = pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_MASTER; + i->ehci.as = busmaster ? pci_get_address_space(dev) : &address_space_memory; +} + +static Property ehci_pci_properties[] = { + DEFINE_PROP_UINT32("maxframes", EHCIPCIState, ehci.maxframes, 128), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_ehci_pci = { + .name = "ehci", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(pcidev, EHCIPCIState), + VMSTATE_STRUCT(ehci, EHCIPCIState, 2, vmstate_ehci, EHCIState), + VMSTATE_END_OF_LIST() + } +}; + +static void ehci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = usb_ehci_pci_realize; + k->exit = usb_ehci_pci_exit; + k->class_id = PCI_CLASS_SERIAL_USB; + k->config_write = usb_ehci_pci_write_config; + dc->vmsd = &vmstate_ehci_pci; + device_class_set_props(dc, ehci_pci_properties); + dc->reset = usb_ehci_pci_reset; +} + +static const TypeInfo ehci_pci_type_info = { + .name = TYPE_PCI_EHCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(EHCIPCIState), + .instance_init = usb_ehci_pci_init, + .instance_finalize = usb_ehci_pci_finalize, + .abstract = true, + .class_init = ehci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void ehci_data_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + EHCIPCIInfo *i = data; + + k->vendor_id = i->vendor_id; + k->device_id = i->device_id; + k->revision = i->revision; + set_bit(DEVICE_CATEGORY_USB, dc->categories); + if (i->companion) { + dc->hotpluggable = false; + } +} + +static struct EHCIPCIInfo ehci_pci_info[] = { + { + .name = "usb-ehci", + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82801D, /* ich4 */ + .revision = 0x10, + },{ + .name = "ich9-usb-ehci1", /* 00:1d.7 */ + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82801I_EHCI1, + .revision = 0x03, + .companion = true, + },{ + .name = "ich9-usb-ehci2", /* 00:1a.7 */ + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82801I_EHCI2, + .revision = 0x03, + .companion = true, + } +}; + +static void ehci_pci_register_types(void) +{ + TypeInfo ehci_type_info = { + .parent = TYPE_PCI_EHCI, + .class_init = ehci_data_class_init, + }; + int i; + + type_register_static(&ehci_pci_type_info); + + for (i = 0; i < ARRAY_SIZE(ehci_pci_info); i++) { + ehci_type_info.name = ehci_pci_info[i].name; + ehci_type_info.class_data = ehci_pci_info + i; + type_register(&ehci_type_info); + } +} + +type_init(ehci_pci_register_types) diff --git a/hw/usb/hcd-ehci-sysbus.c b/hw/usb/hcd-ehci-sysbus.c new file mode 100644 index 000000000..a12e21884 --- /dev/null +++ b/hw/usb/hcd-ehci-sysbus.c @@ -0,0 +1,305 @@ +/* + * QEMU USB EHCI Emulation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/usb/hcd-ehci.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +static const VMStateDescription vmstate_ehci_sysbus = { + .name = "ehci-sysbus", + .version_id = 2, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(ehci, EHCISysBusState, 2, vmstate_ehci, EHCIState), + VMSTATE_END_OF_LIST() + } +}; + +static Property ehci_sysbus_properties[] = { + DEFINE_PROP_UINT32("maxframes", EHCISysBusState, ehci.maxframes, 128), + DEFINE_PROP_BOOL("companion-enable", EHCISysBusState, ehci.companion_enable, + false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_ehci_sysbus_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *d = SYS_BUS_DEVICE(dev); + EHCISysBusState *i = SYS_BUS_EHCI(dev); + EHCIState *s = &i->ehci; + + usb_ehci_realize(s, dev, errp); + sysbus_init_irq(d, &s->irq); +} + +static void usb_ehci_sysbus_reset(DeviceState *dev) +{ + SysBusDevice *d = SYS_BUS_DEVICE(dev); + EHCISysBusState *i = SYS_BUS_EHCI(d); + EHCIState *s = &i->ehci; + + ehci_reset(s); +} + +static void ehci_sysbus_init(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + EHCISysBusState *i = SYS_BUS_EHCI(obj); + SysBusEHCIClass *sec = SYS_BUS_EHCI_GET_CLASS(obj); + EHCIState *s = &i->ehci; + + s->capsbase = sec->capsbase; + s->opregbase = sec->opregbase; + s->portscbase = sec->portscbase; + s->portnr = sec->portnr; + s->as = &address_space_memory; + + usb_ehci_init(s, DEVICE(obj)); + sysbus_init_mmio(d, &s->mem); +} + +static void ehci_sysbus_finalize(Object *obj) +{ + EHCISysBusState *i = SYS_BUS_EHCI(obj); + EHCIState *s = &i->ehci; + + usb_ehci_finalize(s); +} + +static void ehci_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(klass); + + sec->portscbase = 0x44; + sec->portnr = NB_PORTS; + + dc->realize = usb_ehci_sysbus_realize; + dc->vmsd = &vmstate_ehci_sysbus; + device_class_set_props(dc, ehci_sysbus_properties); + dc->reset = usb_ehci_sysbus_reset; + set_bit(DEVICE_CATEGORY_USB, dc->categories); +} + +static const TypeInfo ehci_type_info = { + .name = TYPE_SYS_BUS_EHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(EHCISysBusState), + .instance_init = ehci_sysbus_init, + .instance_finalize = ehci_sysbus_finalize, + .abstract = true, + .class_init = ehci_sysbus_class_init, + .class_size = sizeof(SysBusEHCIClass), +}; + +static void ehci_platform_class_init(ObjectClass *oc, void *data) +{ + SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + sec->capsbase = 0x0; + sec->opregbase = 0x20; + set_bit(DEVICE_CATEGORY_USB, dc->categories); +} + +static const TypeInfo ehci_platform_type_info = { + .name = TYPE_PLATFORM_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_platform_class_init, +}; + +static void ehci_exynos4210_class_init(ObjectClass *oc, void *data) +{ + SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + sec->capsbase = 0x0; + sec->opregbase = 0x10; + set_bit(DEVICE_CATEGORY_USB, dc->categories); +} + +static const TypeInfo ehci_exynos4210_type_info = { + .name = TYPE_EXYNOS4210_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_exynos4210_class_init, +}; + +static void ehci_aw_h3_class_init(ObjectClass *oc, void *data) +{ + SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + sec->capsbase = 0x0; + sec->opregbase = 0x10; + set_bit(DEVICE_CATEGORY_USB, dc->categories); +} + +static const TypeInfo ehci_aw_h3_type_info = { + .name = TYPE_AW_H3_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_aw_h3_class_init, +}; + +static void ehci_npcm7xx_class_init(ObjectClass *oc, void *data) +{ + SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + sec->capsbase = 0x0; + sec->opregbase = 0x10; + sec->portscbase = 0x44; + sec->portnr = 1; + set_bit(DEVICE_CATEGORY_USB, dc->categories); +} + +static const TypeInfo ehci_npcm7xx_type_info = { + .name = TYPE_NPCM7XX_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_npcm7xx_class_init, +}; + +static void ehci_tegra2_class_init(ObjectClass *oc, void *data) +{ + SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + sec->capsbase = 0x100; + sec->opregbase = 0x140; + set_bit(DEVICE_CATEGORY_USB, dc->categories); +} + +static const TypeInfo ehci_tegra2_type_info = { + .name = TYPE_TEGRA2_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_tegra2_class_init, +}; + +static void ehci_ppc4xx_init(Object *o) +{ + EHCISysBusState *s = SYS_BUS_EHCI(o); + + s->ehci.companion_enable = true; +} + +static void ehci_ppc4xx_class_init(ObjectClass *oc, void *data) +{ + SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + sec->capsbase = 0x0; + sec->opregbase = 0x10; + set_bit(DEVICE_CATEGORY_USB, dc->categories); +} + +static const TypeInfo ehci_ppc4xx_type_info = { + .name = TYPE_PPC4xx_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_ppc4xx_class_init, + .instance_init = ehci_ppc4xx_init, +}; + +/* + * Faraday FUSBH200 USB 2.0 EHCI + */ + +/** + * FUSBH200EHCIRegs: + * @FUSBH200_REG_EOF_ASTR: EOF/Async. Sleep Timer Register + * @FUSBH200_REG_BMCSR: Bus Monitor Control/Status Register + */ +enum FUSBH200EHCIRegs { + FUSBH200_REG_EOF_ASTR = 0x34, + FUSBH200_REG_BMCSR = 0x40, +}; + +static uint64_t fusbh200_ehci_read(void *opaque, hwaddr addr, unsigned size) +{ + EHCIState *s = opaque; + hwaddr off = s->opregbase + s->portscbase + 4 * s->portnr + addr; + + switch (off) { + case FUSBH200_REG_EOF_ASTR: + return 0x00000041; + case FUSBH200_REG_BMCSR: + /* High-Speed, VBUS valid, interrupt level-high active */ + return (2 << 9) | (1 << 8) | (1 << 3); + } + + return 0; +} + +static void fusbh200_ehci_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ +} + +static const MemoryRegionOps fusbh200_ehci_mmio_ops = { + .read = fusbh200_ehci_read, + .write = fusbh200_ehci_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void fusbh200_ehci_init(Object *obj) +{ + EHCISysBusState *i = SYS_BUS_EHCI(obj); + FUSBH200EHCIState *f = FUSBH200_EHCI(obj); + EHCIState *s = &i->ehci; + + memory_region_init_io(&f->mem_vendor, OBJECT(f), &fusbh200_ehci_mmio_ops, s, + "fusbh200", 0x4c); + memory_region_add_subregion(&s->mem, + s->opregbase + s->portscbase + 4 * s->portnr, + &f->mem_vendor); +} + +static void fusbh200_ehci_class_init(ObjectClass *oc, void *data) +{ + SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + sec->capsbase = 0x0; + sec->opregbase = 0x10; + sec->portscbase = 0x20; + sec->portnr = 1; + set_bit(DEVICE_CATEGORY_USB, dc->categories); +} + +static const TypeInfo ehci_fusbh200_type_info = { + .name = TYPE_FUSBH200_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .instance_size = sizeof(FUSBH200EHCIState), + .instance_init = fusbh200_ehci_init, + .class_init = fusbh200_ehci_class_init, +}; + +static void ehci_sysbus_register_types(void) +{ + type_register_static(&ehci_type_info); + type_register_static(&ehci_platform_type_info); + type_register_static(&ehci_exynos4210_type_info); + type_register_static(&ehci_aw_h3_type_info); + type_register_static(&ehci_npcm7xx_type_info); + type_register_static(&ehci_tegra2_type_info); + type_register_static(&ehci_ppc4xx_type_info); + type_register_static(&ehci_fusbh200_type_info); +} + +type_init(ehci_sysbus_register_types) diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c new file mode 100644 index 000000000..6caa7ac6c --- /dev/null +++ b/hw/usb/hcd-ehci.c @@ -0,0 +1,2598 @@ +/* + * QEMU USB EHCI Emulation + * + * Copyright(c) 2008 Emutex Ltd. (address@hidden) + * Copyright(c) 2011-2012 Red Hat, Inc. + * + * Red Hat Authors: + * Gerd Hoffmann + * Hans de Goede + * + * EHCI project was started by Mark Burkley, with contributions by + * Niels de Vos. David S. Ahern continued working on it. Kevin Wolf, + * Jan Kiszka and Vincent Palatin contributed bugfixes. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/irq.h" +#include "hw/usb/ehci-regs.h" +#include "hw/usb/hcd-ehci.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "sysemu/runstate.h" + +#define FRAME_TIMER_FREQ 1000 +#define FRAME_TIMER_NS (NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ) +#define UFRAME_TIMER_NS (FRAME_TIMER_NS / 8) + +#define NB_MAXINTRATE 8 // Max rate at which controller issues ints +#define BUFF_SIZE 5*4096 // Max bytes to transfer per transaction +#define MAX_QH 100 // Max allowable queue heads in a chain +#define MIN_UFR_PER_TICK 24 /* Min frames to process when catching up */ +#define PERIODIC_ACTIVE 512 /* Micro-frames */ + +/* Internal periodic / asynchronous schedule state machine states + */ +typedef enum { + EST_INACTIVE = 1000, + EST_ACTIVE, + EST_EXECUTING, + EST_SLEEPING, + /* The following states are internal to the state machine function + */ + EST_WAITLISTHEAD, + EST_FETCHENTRY, + EST_FETCHQH, + EST_FETCHITD, + EST_FETCHSITD, + EST_ADVANCEQUEUE, + EST_FETCHQTD, + EST_EXECUTE, + EST_WRITEBACK, + EST_HORIZONTALQH +} EHCI_STATES; + +/* macros for accessing fields within next link pointer entry */ +#define NLPTR_GET(x) ((x) & 0xffffffe0) +#define NLPTR_TYPE_GET(x) (((x) >> 1) & 3) +#define NLPTR_TBIT(x) ((x) & 1) // 1=invalid, 0=valid + +/* link pointer types */ +#define NLPTR_TYPE_ITD 0 // isoc xfer descriptor +#define NLPTR_TYPE_QH 1 // queue head +#define NLPTR_TYPE_STITD 2 // split xaction, isoc xfer descriptor +#define NLPTR_TYPE_FSTN 3 // frame span traversal node + +#define SET_LAST_RUN_CLOCK(s) \ + (s)->last_run_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + +/* nifty macros from Arnon's EHCI version */ +#define get_field(data, field) \ + (((data) & field##_MASK) >> field##_SH) + +#define set_field(data, newval, field) do { \ + uint32_t val = *data; \ + val &= ~ field##_MASK; \ + val |= ((newval) << field##_SH) & field##_MASK; \ + *data = val; \ + } while(0) + +static const char *ehci_state_names[] = { + [EST_INACTIVE] = "INACTIVE", + [EST_ACTIVE] = "ACTIVE", + [EST_EXECUTING] = "EXECUTING", + [EST_SLEEPING] = "SLEEPING", + [EST_WAITLISTHEAD] = "WAITLISTHEAD", + [EST_FETCHENTRY] = "FETCH ENTRY", + [EST_FETCHQH] = "FETCH QH", + [EST_FETCHITD] = "FETCH ITD", + [EST_ADVANCEQUEUE] = "ADVANCEQUEUE", + [EST_FETCHQTD] = "FETCH QTD", + [EST_EXECUTE] = "EXECUTE", + [EST_WRITEBACK] = "WRITEBACK", + [EST_HORIZONTALQH] = "HORIZONTALQH", +}; + +static const char *ehci_mmio_names[] = { + [USBCMD] = "USBCMD", + [USBSTS] = "USBSTS", + [USBINTR] = "USBINTR", + [FRINDEX] = "FRINDEX", + [PERIODICLISTBASE] = "P-LIST BASE", + [ASYNCLISTADDR] = "A-LIST ADDR", + [CONFIGFLAG] = "CONFIGFLAG", +}; + +static int ehci_state_executing(EHCIQueue *q); +static int ehci_state_writeback(EHCIQueue *q); +static int ehci_state_advqueue(EHCIQueue *q); +static int ehci_fill_queue(EHCIPacket *p); +static void ehci_free_packet(EHCIPacket *p); + +static const char *nr2str(const char **n, size_t len, uint32_t nr) +{ + if (nr < len && n[nr] != NULL) { + return n[nr]; + } else { + return "unknown"; + } +} + +static const char *state2str(uint32_t state) +{ + return nr2str(ehci_state_names, ARRAY_SIZE(ehci_state_names), state); +} + +static const char *addr2str(hwaddr addr) +{ + return nr2str(ehci_mmio_names, ARRAY_SIZE(ehci_mmio_names), addr); +} + +static void ehci_trace_usbsts(uint32_t mask, int state) +{ + /* interrupts */ + if (mask & USBSTS_INT) { + trace_usb_ehci_usbsts("INT", state); + } + if (mask & USBSTS_ERRINT) { + trace_usb_ehci_usbsts("ERRINT", state); + } + if (mask & USBSTS_PCD) { + trace_usb_ehci_usbsts("PCD", state); + } + if (mask & USBSTS_FLR) { + trace_usb_ehci_usbsts("FLR", state); + } + if (mask & USBSTS_HSE) { + trace_usb_ehci_usbsts("HSE", state); + } + if (mask & USBSTS_IAA) { + trace_usb_ehci_usbsts("IAA", state); + } + + /* status */ + if (mask & USBSTS_HALT) { + trace_usb_ehci_usbsts("HALT", state); + } + if (mask & USBSTS_REC) { + trace_usb_ehci_usbsts("REC", state); + } + if (mask & USBSTS_PSS) { + trace_usb_ehci_usbsts("PSS", state); + } + if (mask & USBSTS_ASS) { + trace_usb_ehci_usbsts("ASS", state); + } +} + +static inline void ehci_set_usbsts(EHCIState *s, int mask) +{ + if ((s->usbsts & mask) == mask) { + return; + } + ehci_trace_usbsts(mask, 1); + s->usbsts |= mask; +} + +static inline void ehci_clear_usbsts(EHCIState *s, int mask) +{ + if ((s->usbsts & mask) == 0) { + return; + } + ehci_trace_usbsts(mask, 0); + s->usbsts &= ~mask; +} + +/* update irq line */ +static inline void ehci_update_irq(EHCIState *s) +{ + int level = 0; + + if ((s->usbsts & USBINTR_MASK) & s->usbintr) { + level = 1; + } + + trace_usb_ehci_irq(level, s->frindex, s->usbsts, s->usbintr); + qemu_set_irq(s->irq, level); +} + +/* flag interrupt condition */ +static inline void ehci_raise_irq(EHCIState *s, int intr) +{ + if (intr & (USBSTS_PCD | USBSTS_FLR | USBSTS_HSE)) { + s->usbsts |= intr; + ehci_update_irq(s); + } else { + s->usbsts_pending |= intr; + } +} + +/* + * Commit pending interrupts (added via ehci_raise_irq), + * at the rate allowed by "Interrupt Threshold Control". + */ +static inline void ehci_commit_irq(EHCIState *s) +{ + uint32_t itc; + + if (!s->usbsts_pending) { + return; + } + if (s->usbsts_frindex > s->frindex) { + return; + } + + itc = (s->usbcmd >> 16) & 0xff; + s->usbsts |= s->usbsts_pending; + s->usbsts_pending = 0; + s->usbsts_frindex = s->frindex + itc; + ehci_update_irq(s); +} + +static void ehci_update_halt(EHCIState *s) +{ + if (s->usbcmd & USBCMD_RUNSTOP) { + ehci_clear_usbsts(s, USBSTS_HALT); + } else { + if (s->astate == EST_INACTIVE && s->pstate == EST_INACTIVE) { + ehci_set_usbsts(s, USBSTS_HALT); + } + } +} + +static void ehci_set_state(EHCIState *s, int async, int state) +{ + if (async) { + trace_usb_ehci_state("async", state2str(state)); + s->astate = state; + if (s->astate == EST_INACTIVE) { + ehci_clear_usbsts(s, USBSTS_ASS); + ehci_update_halt(s); + } else { + ehci_set_usbsts(s, USBSTS_ASS); + } + } else { + trace_usb_ehci_state("periodic", state2str(state)); + s->pstate = state; + if (s->pstate == EST_INACTIVE) { + ehci_clear_usbsts(s, USBSTS_PSS); + ehci_update_halt(s); + } else { + ehci_set_usbsts(s, USBSTS_PSS); + } + } +} + +static int ehci_get_state(EHCIState *s, int async) +{ + return async ? s->astate : s->pstate; +} + +static void ehci_set_fetch_addr(EHCIState *s, int async, uint32_t addr) +{ + if (async) { + s->a_fetch_addr = addr; + } else { + s->p_fetch_addr = addr; + } +} + +static int ehci_get_fetch_addr(EHCIState *s, int async) +{ + return async ? s->a_fetch_addr : s->p_fetch_addr; +} + +static void ehci_trace_qh(EHCIQueue *q, hwaddr addr, EHCIqh *qh) +{ + /* need three here due to argument count limits */ + trace_usb_ehci_qh_ptrs(q, addr, qh->next, + qh->current_qtd, qh->next_qtd, qh->altnext_qtd); + trace_usb_ehci_qh_fields(addr, + get_field(qh->epchar, QH_EPCHAR_RL), + get_field(qh->epchar, QH_EPCHAR_MPLEN), + get_field(qh->epchar, QH_EPCHAR_EPS), + get_field(qh->epchar, QH_EPCHAR_EP), + get_field(qh->epchar, QH_EPCHAR_DEVADDR)); + trace_usb_ehci_qh_bits(addr, + (bool)(qh->epchar & QH_EPCHAR_C), + (bool)(qh->epchar & QH_EPCHAR_H), + (bool)(qh->epchar & QH_EPCHAR_DTC), + (bool)(qh->epchar & QH_EPCHAR_I)); +} + +static void ehci_trace_qtd(EHCIQueue *q, hwaddr addr, EHCIqtd *qtd) +{ + /* need three here due to argument count limits */ + trace_usb_ehci_qtd_ptrs(q, addr, qtd->next, qtd->altnext); + trace_usb_ehci_qtd_fields(addr, + get_field(qtd->token, QTD_TOKEN_TBYTES), + get_field(qtd->token, QTD_TOKEN_CPAGE), + get_field(qtd->token, QTD_TOKEN_CERR), + get_field(qtd->token, QTD_TOKEN_PID)); + trace_usb_ehci_qtd_bits(addr, + (bool)(qtd->token & QTD_TOKEN_IOC), + (bool)(qtd->token & QTD_TOKEN_ACTIVE), + (bool)(qtd->token & QTD_TOKEN_HALT), + (bool)(qtd->token & QTD_TOKEN_BABBLE), + (bool)(qtd->token & QTD_TOKEN_XACTERR)); +} + +static void ehci_trace_itd(EHCIState *s, hwaddr addr, EHCIitd *itd) +{ + trace_usb_ehci_itd(addr, itd->next, + get_field(itd->bufptr[1], ITD_BUFPTR_MAXPKT), + get_field(itd->bufptr[2], ITD_BUFPTR_MULT), + get_field(itd->bufptr[0], ITD_BUFPTR_EP), + get_field(itd->bufptr[0], ITD_BUFPTR_DEVADDR)); +} + +static void ehci_trace_sitd(EHCIState *s, hwaddr addr, + EHCIsitd *sitd) +{ + trace_usb_ehci_sitd(addr, sitd->next, + (bool)(sitd->results & SITD_RESULTS_ACTIVE)); +} + +static void ehci_trace_guest_bug(EHCIState *s, const char *message) +{ + trace_usb_ehci_guest_bug(message); +} + +static inline bool ehci_enabled(EHCIState *s) +{ + return s->usbcmd & USBCMD_RUNSTOP; +} + +static inline bool ehci_async_enabled(EHCIState *s) +{ + return ehci_enabled(s) && (s->usbcmd & USBCMD_ASE); +} + +static inline bool ehci_periodic_enabled(EHCIState *s) +{ + return ehci_enabled(s) && (s->usbcmd & USBCMD_PSE); +} + +/* Get an array of dwords from main memory */ +static inline int get_dwords(EHCIState *ehci, uint32_t addr, + uint32_t *buf, int num) +{ + int i; + + if (!ehci->as) { + ehci_raise_irq(ehci, USBSTS_HSE); + ehci->usbcmd &= ~USBCMD_RUNSTOP; + trace_usb_ehci_dma_error(); + return -1; + } + + for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { + dma_memory_read(ehci->as, addr, buf, sizeof(*buf)); + *buf = le32_to_cpu(*buf); + } + + return num; +} + +/* Put an array of dwords in to main memory */ +static inline int put_dwords(EHCIState *ehci, uint32_t addr, + uint32_t *buf, int num) +{ + int i; + + if (!ehci->as) { + ehci_raise_irq(ehci, USBSTS_HSE); + ehci->usbcmd &= ~USBCMD_RUNSTOP; + trace_usb_ehci_dma_error(); + return -1; + } + + for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { + uint32_t tmp = cpu_to_le32(*buf); + dma_memory_write(ehci->as, addr, &tmp, sizeof(tmp)); + } + + return num; +} + +static int ehci_get_pid(EHCIqtd *qtd) +{ + switch (get_field(qtd->token, QTD_TOKEN_PID)) { + case 0: + return USB_TOKEN_OUT; + case 1: + return USB_TOKEN_IN; + case 2: + return USB_TOKEN_SETUP; + default: + fprintf(stderr, "bad token\n"); + return 0; + } +} + +static bool ehci_verify_qh(EHCIQueue *q, EHCIqh *qh) +{ + uint32_t devaddr = get_field(qh->epchar, QH_EPCHAR_DEVADDR); + uint32_t endp = get_field(qh->epchar, QH_EPCHAR_EP); + if ((devaddr != get_field(q->qh.epchar, QH_EPCHAR_DEVADDR)) || + (endp != get_field(q->qh.epchar, QH_EPCHAR_EP)) || + (qh->current_qtd != q->qh.current_qtd) || + (q->async && qh->next_qtd != q->qh.next_qtd) || + (memcmp(&qh->altnext_qtd, &q->qh.altnext_qtd, + 7 * sizeof(uint32_t)) != 0) || + (q->dev != NULL && q->dev->addr != devaddr)) { + return false; + } else { + return true; + } +} + +static bool ehci_verify_qtd(EHCIPacket *p, EHCIqtd *qtd) +{ + if (p->qtdaddr != p->queue->qtdaddr || + (p->queue->async && !NLPTR_TBIT(p->qtd.next) && + (p->qtd.next != qtd->next)) || + (!NLPTR_TBIT(p->qtd.altnext) && (p->qtd.altnext != qtd->altnext)) || + p->qtd.token != qtd->token || + p->qtd.bufptr[0] != qtd->bufptr[0]) { + return false; + } else { + return true; + } +} + +static bool ehci_verify_pid(EHCIQueue *q, EHCIqtd *qtd) +{ + int ep = get_field(q->qh.epchar, QH_EPCHAR_EP); + int pid = ehci_get_pid(qtd); + + /* Note the pid changing is normal for ep 0 (the control ep) */ + if (q->last_pid && ep != 0 && pid != q->last_pid) { + return false; + } else { + return true; + } +} + +/* Finish executing and writeback a packet outside of the regular + fetchqh -> fetchqtd -> execute -> writeback cycle */ +static void ehci_writeback_async_complete_packet(EHCIPacket *p) +{ + EHCIQueue *q = p->queue; + EHCIqtd qtd; + EHCIqh qh; + int state; + + /* Verify the qh + qtd, like we do when going through fetchqh & fetchqtd */ + get_dwords(q->ehci, NLPTR_GET(q->qhaddr), + (uint32_t *) &qh, sizeof(EHCIqh) >> 2); + get_dwords(q->ehci, NLPTR_GET(q->qtdaddr), + (uint32_t *) &qtd, sizeof(EHCIqtd) >> 2); + if (!ehci_verify_qh(q, &qh) || !ehci_verify_qtd(p, &qtd)) { + p->async = EHCI_ASYNC_INITIALIZED; + ehci_free_packet(p); + return; + } + + state = ehci_get_state(q->ehci, q->async); + ehci_state_executing(q); + ehci_state_writeback(q); /* Frees the packet! */ + if (!(q->qh.token & QTD_TOKEN_HALT)) { + ehci_state_advqueue(q); + } + ehci_set_state(q->ehci, q->async, state); +} + +/* packet management */ + +static EHCIPacket *ehci_alloc_packet(EHCIQueue *q) +{ + EHCIPacket *p; + + p = g_new0(EHCIPacket, 1); + p->queue = q; + usb_packet_init(&p->packet); + QTAILQ_INSERT_TAIL(&q->packets, p, next); + trace_usb_ehci_packet_action(p->queue, p, "alloc"); + return p; +} + +static void ehci_free_packet(EHCIPacket *p) +{ + if (p->async == EHCI_ASYNC_FINISHED && + !(p->queue->qh.token & QTD_TOKEN_HALT)) { + ehci_writeback_async_complete_packet(p); + return; + } + trace_usb_ehci_packet_action(p->queue, p, "free"); + if (p->async == EHCI_ASYNC_INFLIGHT) { + usb_cancel_packet(&p->packet); + } + if (p->async == EHCI_ASYNC_FINISHED && + p->packet.status == USB_RET_SUCCESS) { + fprintf(stderr, + "EHCI: Dropping completed packet from halted %s ep %02X\n", + (p->pid == USB_TOKEN_IN) ? "in" : "out", + get_field(p->queue->qh.epchar, QH_EPCHAR_EP)); + } + if (p->async != EHCI_ASYNC_NONE) { + usb_packet_unmap(&p->packet, &p->sgl); + qemu_sglist_destroy(&p->sgl); + } + QTAILQ_REMOVE(&p->queue->packets, p, next); + usb_packet_cleanup(&p->packet); + g_free(p); +} + +/* queue management */ + +static EHCIQueue *ehci_alloc_queue(EHCIState *ehci, uint32_t addr, int async) +{ + EHCIQueueHead *head = async ? &ehci->aqueues : &ehci->pqueues; + EHCIQueue *q; + + q = g_malloc0(sizeof(*q)); + q->ehci = ehci; + q->qhaddr = addr; + q->async = async; + QTAILQ_INIT(&q->packets); + QTAILQ_INSERT_HEAD(head, q, next); + trace_usb_ehci_queue_action(q, "alloc"); + return q; +} + +static void ehci_queue_stopped(EHCIQueue *q) +{ + int endp = get_field(q->qh.epchar, QH_EPCHAR_EP); + + if (!q->last_pid || !q->dev) { + return; + } + + usb_device_ep_stopped(q->dev, usb_ep_get(q->dev, q->last_pid, endp)); +} + +static int ehci_cancel_queue(EHCIQueue *q) +{ + EHCIPacket *p; + int packets = 0; + + p = QTAILQ_FIRST(&q->packets); + if (p == NULL) { + goto leave; + } + + trace_usb_ehci_queue_action(q, "cancel"); + do { + ehci_free_packet(p); + packets++; + } while ((p = QTAILQ_FIRST(&q->packets)) != NULL); + +leave: + ehci_queue_stopped(q); + return packets; +} + +static int ehci_reset_queue(EHCIQueue *q) +{ + int packets; + + trace_usb_ehci_queue_action(q, "reset"); + packets = ehci_cancel_queue(q); + q->dev = NULL; + q->qtdaddr = 0; + q->last_pid = 0; + return packets; +} + +static void ehci_free_queue(EHCIQueue *q, const char *warn) +{ + EHCIQueueHead *head = q->async ? &q->ehci->aqueues : &q->ehci->pqueues; + int cancelled; + + trace_usb_ehci_queue_action(q, "free"); + cancelled = ehci_cancel_queue(q); + if (warn && cancelled > 0) { + ehci_trace_guest_bug(q->ehci, warn); + } + QTAILQ_REMOVE(head, q, next); + g_free(q); +} + +static EHCIQueue *ehci_find_queue_by_qh(EHCIState *ehci, uint32_t addr, + int async) +{ + EHCIQueueHead *head = async ? &ehci->aqueues : &ehci->pqueues; + EHCIQueue *q; + + QTAILQ_FOREACH(q, head, next) { + if (addr == q->qhaddr) { + return q; + } + } + return NULL; +} + +static void ehci_queues_rip_unused(EHCIState *ehci, int async) +{ + EHCIQueueHead *head = async ? &ehci->aqueues : &ehci->pqueues; + const char *warn = async ? "guest unlinked busy QH" : NULL; + uint64_t maxage = FRAME_TIMER_NS * ehci->maxframes * 4; + EHCIQueue *q, *tmp; + + QTAILQ_FOREACH_SAFE(q, head, next, tmp) { + if (q->seen) { + q->seen = 0; + q->ts = ehci->last_run_ns; + continue; + } + if (ehci->last_run_ns < q->ts + maxage) { + continue; + } + ehci_free_queue(q, warn); + } +} + +static void ehci_queues_rip_unseen(EHCIState *ehci, int async) +{ + EHCIQueueHead *head = async ? &ehci->aqueues : &ehci->pqueues; + EHCIQueue *q, *tmp; + + QTAILQ_FOREACH_SAFE(q, head, next, tmp) { + if (!q->seen) { + ehci_free_queue(q, NULL); + } + } +} + +static void ehci_queues_rip_device(EHCIState *ehci, USBDevice *dev, int async) +{ + EHCIQueueHead *head = async ? &ehci->aqueues : &ehci->pqueues; + EHCIQueue *q, *tmp; + + QTAILQ_FOREACH_SAFE(q, head, next, tmp) { + if (q->dev != dev) { + continue; + } + ehci_free_queue(q, NULL); + } +} + +static void ehci_queues_rip_all(EHCIState *ehci, int async) +{ + EHCIQueueHead *head = async ? &ehci->aqueues : &ehci->pqueues; + const char *warn = async ? "guest stopped busy async schedule" : NULL; + EHCIQueue *q, *tmp; + + QTAILQ_FOREACH_SAFE(q, head, next, tmp) { + ehci_free_queue(q, warn); + } +} + +/* Attach or detach a device on root hub */ + +static void ehci_attach(USBPort *port) +{ + EHCIState *s = port->opaque; + uint32_t *portsc = &s->portsc[port->index]; + const char *owner = (*portsc & PORTSC_POWNER) ? "comp" : "ehci"; + + trace_usb_ehci_port_attach(port->index, owner, port->dev->product_desc); + + if (*portsc & PORTSC_POWNER) { + USBPort *companion = s->companion_ports[port->index]; + companion->dev = port->dev; + companion->ops->attach(companion); + return; + } + + *portsc |= PORTSC_CONNECT; + *portsc |= PORTSC_CSC; + + ehci_raise_irq(s, USBSTS_PCD); +} + +static void ehci_detach(USBPort *port) +{ + EHCIState *s = port->opaque; + uint32_t *portsc = &s->portsc[port->index]; + const char *owner = (*portsc & PORTSC_POWNER) ? "comp" : "ehci"; + + trace_usb_ehci_port_detach(port->index, owner); + + if (*portsc & PORTSC_POWNER) { + USBPort *companion = s->companion_ports[port->index]; + companion->ops->detach(companion); + companion->dev = NULL; + /* + * EHCI spec 4.2.2: "When a disconnect occurs... On the event, + * the port ownership is returned immediately to the EHCI controller." + */ + *portsc &= ~PORTSC_POWNER; + return; + } + + ehci_queues_rip_device(s, port->dev, 0); + ehci_queues_rip_device(s, port->dev, 1); + + *portsc &= ~(PORTSC_CONNECT|PORTSC_PED|PORTSC_SUSPEND); + *portsc |= PORTSC_CSC; + + ehci_raise_irq(s, USBSTS_PCD); +} + +static void ehci_child_detach(USBPort *port, USBDevice *child) +{ + EHCIState *s = port->opaque; + uint32_t portsc = s->portsc[port->index]; + + if (portsc & PORTSC_POWNER) { + USBPort *companion = s->companion_ports[port->index]; + companion->ops->child_detach(companion, child); + return; + } + + ehci_queues_rip_device(s, child, 0); + ehci_queues_rip_device(s, child, 1); +} + +static void ehci_wakeup(USBPort *port) +{ + EHCIState *s = port->opaque; + uint32_t *portsc = &s->portsc[port->index]; + + if (*portsc & PORTSC_POWNER) { + USBPort *companion = s->companion_ports[port->index]; + if (companion->ops->wakeup) { + companion->ops->wakeup(companion); + } + return; + } + + if (*portsc & PORTSC_SUSPEND) { + trace_usb_ehci_port_wakeup(port->index); + *portsc |= PORTSC_FPRES; + ehci_raise_irq(s, USBSTS_PCD); + } + + qemu_bh_schedule(s->async_bh); +} + +static void ehci_register_companion(USBBus *bus, USBPort *ports[], + uint32_t portcount, uint32_t firstport, + Error **errp) +{ + EHCIState *s = container_of(bus, EHCIState, bus); + uint32_t i; + + if (firstport + portcount > NB_PORTS) { + error_setg(errp, "firstport must be between 0 and %u", + NB_PORTS - portcount); + return; + } + + for (i = 0; i < portcount; i++) { + if (s->companion_ports[firstport + i]) { + error_setg(errp, "firstport %u asks for ports %u-%u," + " but port %u has a companion assigned already", + firstport, firstport, firstport + portcount - 1, + firstport + i); + return; + } + } + + for (i = 0; i < portcount; i++) { + s->companion_ports[firstport + i] = ports[i]; + s->ports[firstport + i].speedmask |= + USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL; + /* Ensure devs attached before the initial reset go to the companion */ + s->portsc[firstport + i] = PORTSC_POWNER; + } + + s->companion_count++; + s->caps[0x05] = (s->companion_count << 4) | portcount; +} + +static void ehci_wakeup_endpoint(USBBus *bus, USBEndpoint *ep, + unsigned int stream) +{ + EHCIState *s = container_of(bus, EHCIState, bus); + uint32_t portsc = s->portsc[ep->dev->port->index]; + + if (portsc & PORTSC_POWNER) { + return; + } + + s->periodic_sched_active = PERIODIC_ACTIVE; + qemu_bh_schedule(s->async_bh); +} + +static USBDevice *ehci_find_device(EHCIState *ehci, uint8_t addr) +{ + USBDevice *dev; + USBPort *port; + int i; + + for (i = 0; i < NB_PORTS; i++) { + port = &ehci->ports[i]; + if (!(ehci->portsc[i] & PORTSC_PED)) { + DPRINTF("Port %d not enabled\n", i); + continue; + } + dev = usb_find_device(port, addr); + if (dev != NULL) { + return dev; + } + } + return NULL; +} + +/* 4.1 host controller initialization */ +void ehci_reset(void *opaque) +{ + EHCIState *s = opaque; + int i; + USBDevice *devs[NB_PORTS]; + + trace_usb_ehci_reset(); + + /* + * Do the detach before touching portsc, so that it correctly gets send to + * us or to our companion based on PORTSC_POWNER before the reset. + */ + for(i = 0; i < NB_PORTS; i++) { + devs[i] = s->ports[i].dev; + if (devs[i] && devs[i]->attached) { + usb_detach(&s->ports[i]); + } + } + + memset(&s->opreg, 0x00, sizeof(s->opreg)); + memset(&s->portsc, 0x00, sizeof(s->portsc)); + + s->usbcmd = NB_MAXINTRATE << USBCMD_ITC_SH; + s->usbsts = USBSTS_HALT; + s->usbsts_pending = 0; + s->usbsts_frindex = 0; + ehci_update_irq(s); + + s->astate = EST_INACTIVE; + s->pstate = EST_INACTIVE; + + for(i = 0; i < NB_PORTS; i++) { + if (s->companion_ports[i]) { + s->portsc[i] = PORTSC_POWNER | PORTSC_PPOWER; + } else { + s->portsc[i] = PORTSC_PPOWER; + } + if (devs[i] && devs[i]->attached) { + usb_attach(&s->ports[i]); + usb_device_reset(devs[i]); + } + } + ehci_queues_rip_all(s, 0); + ehci_queues_rip_all(s, 1); + timer_del(s->frame_timer); + qemu_bh_cancel(s->async_bh); +} + +static uint64_t ehci_caps_read(void *ptr, hwaddr addr, + unsigned size) +{ + EHCIState *s = ptr; + return s->caps[addr]; +} + +static void ehci_caps_write(void *ptr, hwaddr addr, + uint64_t val, unsigned size) +{ +} + +static uint64_t ehci_opreg_read(void *ptr, hwaddr addr, + unsigned size) +{ + EHCIState *s = ptr; + uint32_t val; + + switch (addr) { + case FRINDEX: + /* Round down to mult of 8, else it can go backwards on migration */ + val = s->frindex & ~7; + break; + default: + val = s->opreg[addr >> 2]; + } + + trace_usb_ehci_opreg_read(addr + s->opregbase, addr2str(addr), val); + return val; +} + +static uint64_t ehci_port_read(void *ptr, hwaddr addr, + unsigned size) +{ + EHCIState *s = ptr; + uint32_t val; + + val = s->portsc[addr >> 2]; + trace_usb_ehci_portsc_read(addr + s->portscbase, addr >> 2, val); + return val; +} + +static void handle_port_owner_write(EHCIState *s, int port, uint32_t owner) +{ + USBDevice *dev = s->ports[port].dev; + uint32_t *portsc = &s->portsc[port]; + uint32_t orig; + + if (s->companion_ports[port] == NULL) + return; + + owner = owner & PORTSC_POWNER; + orig = *portsc & PORTSC_POWNER; + + if (!(owner ^ orig)) { + return; + } + + if (dev && dev->attached) { + usb_detach(&s->ports[port]); + } + + *portsc &= ~PORTSC_POWNER; + *portsc |= owner; + + if (dev && dev->attached) { + usb_attach(&s->ports[port]); + } +} + +static void ehci_port_write(void *ptr, hwaddr addr, + uint64_t val, unsigned size) +{ + EHCIState *s = ptr; + int port = addr >> 2; + uint32_t *portsc = &s->portsc[port]; + uint32_t old = *portsc; + USBDevice *dev = s->ports[port].dev; + + trace_usb_ehci_portsc_write(addr + s->portscbase, addr >> 2, val); + + /* Clear rwc bits */ + *portsc &= ~(val & PORTSC_RWC_MASK); + /* The guest may clear, but not set the PED bit */ + *portsc &= val | ~PORTSC_PED; + /* POWNER is masked out by RO_MASK as it is RO when we've no companion */ + handle_port_owner_write(s, port, val); + /* And finally apply RO_MASK */ + val &= PORTSC_RO_MASK; + + if ((val & PORTSC_PRESET) && !(*portsc & PORTSC_PRESET)) { + trace_usb_ehci_port_reset(port, 1); + } + + if (!(val & PORTSC_PRESET) &&(*portsc & PORTSC_PRESET)) { + trace_usb_ehci_port_reset(port, 0); + if (dev && dev->attached) { + usb_port_reset(&s->ports[port]); + *portsc &= ~PORTSC_CSC; + } + + /* + * Table 2.16 Set the enable bit(and enable bit change) to indicate + * to SW that this port has a high speed device attached + */ + if (dev && dev->attached && (dev->speedmask & USB_SPEED_MASK_HIGH)) { + val |= PORTSC_PED; + } + } + + if ((val & PORTSC_SUSPEND) && !(*portsc & PORTSC_SUSPEND)) { + trace_usb_ehci_port_suspend(port); + } + if (!(val & PORTSC_FPRES) && (*portsc & PORTSC_FPRES)) { + trace_usb_ehci_port_resume(port); + val &= ~PORTSC_SUSPEND; + } + + *portsc &= ~PORTSC_RO_MASK; + *portsc |= val; + trace_usb_ehci_portsc_change(addr + s->portscbase, addr >> 2, *portsc, old); +} + +static void ehci_opreg_write(void *ptr, hwaddr addr, + uint64_t val, unsigned size) +{ + EHCIState *s = ptr; + uint32_t *mmio = s->opreg + (addr >> 2); + uint32_t old = *mmio; + int i; + + trace_usb_ehci_opreg_write(addr + s->opregbase, addr2str(addr), val); + + switch (addr) { + case USBCMD: + if (val & USBCMD_HCRESET) { + ehci_reset(s); + val = s->usbcmd; + break; + } + + /* not supporting dynamic frame list size at the moment */ + if ((val & USBCMD_FLS) && !(s->usbcmd & USBCMD_FLS)) { + fprintf(stderr, "attempt to set frame list size -- value %d\n", + (int)val & USBCMD_FLS); + val &= ~USBCMD_FLS; + } + + if (val & USBCMD_IAAD) { + /* + * Process IAAD immediately, otherwise the Linux IAAD watchdog may + * trigger and re-use a qh without us seeing the unlink. + */ + s->async_stepdown = 0; + qemu_bh_schedule(s->async_bh); + trace_usb_ehci_doorbell_ring(); + } + + if (((USBCMD_RUNSTOP | USBCMD_PSE | USBCMD_ASE) & val) != + ((USBCMD_RUNSTOP | USBCMD_PSE | USBCMD_ASE) & s->usbcmd)) { + if (s->pstate == EST_INACTIVE) { + SET_LAST_RUN_CLOCK(s); + } + s->usbcmd = val; /* Set usbcmd for ehci_update_halt() */ + ehci_update_halt(s); + s->async_stepdown = 0; + qemu_bh_schedule(s->async_bh); + } + break; + + case USBSTS: + val &= USBSTS_RO_MASK; // bits 6 through 31 are RO + ehci_clear_usbsts(s, val); // bits 0 through 5 are R/WC + val = s->usbsts; + ehci_update_irq(s); + break; + + case USBINTR: + val &= USBINTR_MASK; + if (ehci_enabled(s) && (USBSTS_FLR & val)) { + qemu_bh_schedule(s->async_bh); + } + break; + + case FRINDEX: + val &= 0x00003fff; /* frindex is 14bits */ + s->usbsts_frindex = val; + break; + + case CONFIGFLAG: + val &= 0x1; + if (val) { + for(i = 0; i < NB_PORTS; i++) + handle_port_owner_write(s, i, 0); + } + break; + + case PERIODICLISTBASE: + if (ehci_periodic_enabled(s)) { + fprintf(stderr, + "ehci: PERIODIC list base register set while periodic schedule\n" + " is enabled and HC is enabled\n"); + } + break; + + case ASYNCLISTADDR: + if (ehci_async_enabled(s)) { + fprintf(stderr, + "ehci: ASYNC list address register set while async schedule\n" + " is enabled and HC is enabled\n"); + } + break; + } + + *mmio = val; + trace_usb_ehci_opreg_change(addr + s->opregbase, addr2str(addr), + *mmio, old); +} + +/* + * Write the qh back to guest physical memory. This step isn't + * in the EHCI spec but we need to do it since we don't share + * physical memory with our guest VM. + * + * The first three dwords are read-only for the EHCI, so skip them + * when writing back the qh. + */ +static void ehci_flush_qh(EHCIQueue *q) +{ + uint32_t *qh = (uint32_t *) &q->qh; + uint32_t dwords = sizeof(EHCIqh) >> 2; + uint32_t addr = NLPTR_GET(q->qhaddr); + + put_dwords(q->ehci, addr + 3 * sizeof(uint32_t), qh + 3, dwords - 3); +} + +// 4.10.2 + +static int ehci_qh_do_overlay(EHCIQueue *q) +{ + EHCIPacket *p = QTAILQ_FIRST(&q->packets); + int i; + int dtoggle; + int ping; + int eps; + int reload; + + assert(p != NULL); + assert(p->qtdaddr == q->qtdaddr); + + // remember values in fields to preserve in qh after overlay + + dtoggle = q->qh.token & QTD_TOKEN_DTOGGLE; + ping = q->qh.token & QTD_TOKEN_PING; + + q->qh.current_qtd = p->qtdaddr; + q->qh.next_qtd = p->qtd.next; + q->qh.altnext_qtd = p->qtd.altnext; + q->qh.token = p->qtd.token; + + + eps = get_field(q->qh.epchar, QH_EPCHAR_EPS); + if (eps == EHCI_QH_EPS_HIGH) { + q->qh.token &= ~QTD_TOKEN_PING; + q->qh.token |= ping; + } + + reload = get_field(q->qh.epchar, QH_EPCHAR_RL); + set_field(&q->qh.altnext_qtd, reload, QH_ALTNEXT_NAKCNT); + + for (i = 0; i < 5; i++) { + q->qh.bufptr[i] = p->qtd.bufptr[i]; + } + + if (!(q->qh.epchar & QH_EPCHAR_DTC)) { + // preserve QH DT bit + q->qh.token &= ~QTD_TOKEN_DTOGGLE; + q->qh.token |= dtoggle; + } + + q->qh.bufptr[1] &= ~BUFPTR_CPROGMASK_MASK; + q->qh.bufptr[2] &= ~BUFPTR_FRAMETAG_MASK; + + ehci_flush_qh(q); + + return 0; +} + +static int ehci_init_transfer(EHCIPacket *p) +{ + uint32_t cpage, offset, bytes, plen; + dma_addr_t page; + + cpage = get_field(p->qtd.token, QTD_TOKEN_CPAGE); + bytes = get_field(p->qtd.token, QTD_TOKEN_TBYTES); + offset = p->qtd.bufptr[0] & ~QTD_BUFPTR_MASK; + qemu_sglist_init(&p->sgl, p->queue->ehci->device, 5, p->queue->ehci->as); + + while (bytes > 0) { + if (cpage > 4) { + fprintf(stderr, "cpage out of range (%u)\n", cpage); + qemu_sglist_destroy(&p->sgl); + return -1; + } + + page = p->qtd.bufptr[cpage] & QTD_BUFPTR_MASK; + page += offset; + plen = bytes; + if (plen > 4096 - offset) { + plen = 4096 - offset; + offset = 0; + cpage++; + } + + qemu_sglist_add(&p->sgl, page, plen); + bytes -= plen; + } + return 0; +} + +static void ehci_finish_transfer(EHCIQueue *q, int len) +{ + uint32_t cpage, offset; + + if (len > 0) { + /* update cpage & offset */ + cpage = get_field(q->qh.token, QTD_TOKEN_CPAGE); + offset = q->qh.bufptr[0] & ~QTD_BUFPTR_MASK; + + offset += len; + cpage += offset >> QTD_BUFPTR_SH; + offset &= ~QTD_BUFPTR_MASK; + + set_field(&q->qh.token, cpage, QTD_TOKEN_CPAGE); + q->qh.bufptr[0] &= QTD_BUFPTR_MASK; + q->qh.bufptr[0] |= offset; + } +} + +static void ehci_async_complete_packet(USBPort *port, USBPacket *packet) +{ + EHCIPacket *p; + EHCIState *s = port->opaque; + uint32_t portsc = s->portsc[port->index]; + + if (portsc & PORTSC_POWNER) { + USBPort *companion = s->companion_ports[port->index]; + companion->ops->complete(companion, packet); + return; + } + + p = container_of(packet, EHCIPacket, packet); + assert(p->async == EHCI_ASYNC_INFLIGHT); + + if (packet->status == USB_RET_REMOVE_FROM_QUEUE) { + trace_usb_ehci_packet_action(p->queue, p, "remove"); + ehci_free_packet(p); + return; + } + + trace_usb_ehci_packet_action(p->queue, p, "wakeup"); + p->async = EHCI_ASYNC_FINISHED; + + if (!p->queue->async) { + s->periodic_sched_active = PERIODIC_ACTIVE; + } + qemu_bh_schedule(s->async_bh); +} + +static void ehci_execute_complete(EHCIQueue *q) +{ + EHCIPacket *p = QTAILQ_FIRST(&q->packets); + uint32_t tbytes; + + assert(p != NULL); + assert(p->qtdaddr == q->qtdaddr); + assert(p->async == EHCI_ASYNC_INITIALIZED || + p->async == EHCI_ASYNC_FINISHED); + + DPRINTF("execute_complete: qhaddr 0x%x, next 0x%x, qtdaddr 0x%x, " + "status %d, actual_length %d\n", + q->qhaddr, q->qh.next, q->qtdaddr, + p->packet.status, p->packet.actual_length); + + switch (p->packet.status) { + case USB_RET_SUCCESS: + break; + case USB_RET_IOERROR: + case USB_RET_NODEV: + q->qh.token |= (QTD_TOKEN_HALT | QTD_TOKEN_XACTERR); + set_field(&q->qh.token, 0, QTD_TOKEN_CERR); + ehci_raise_irq(q->ehci, USBSTS_ERRINT); + break; + case USB_RET_STALL: + q->qh.token |= QTD_TOKEN_HALT; + ehci_raise_irq(q->ehci, USBSTS_ERRINT); + break; + case USB_RET_NAK: + set_field(&q->qh.altnext_qtd, 0, QH_ALTNEXT_NAKCNT); + return; /* We're not done yet with this transaction */ + case USB_RET_BABBLE: + q->qh.token |= (QTD_TOKEN_HALT | QTD_TOKEN_BABBLE); + ehci_raise_irq(q->ehci, USBSTS_ERRINT); + break; + default: + /* should not be triggerable */ + fprintf(stderr, "USB invalid response %d\n", p->packet.status); + g_assert_not_reached(); + } + + /* TODO check 4.12 for splits */ + tbytes = get_field(q->qh.token, QTD_TOKEN_TBYTES); + if (tbytes && p->pid == USB_TOKEN_IN) { + tbytes -= p->packet.actual_length; + if (tbytes) { + /* 4.15.1.2 must raise int on a short input packet */ + ehci_raise_irq(q->ehci, USBSTS_INT); + if (q->async) { + q->ehci->int_req_by_async = true; + } + } + } else { + tbytes = 0; + } + DPRINTF("updating tbytes to %d\n", tbytes); + set_field(&q->qh.token, tbytes, QTD_TOKEN_TBYTES); + + ehci_finish_transfer(q, p->packet.actual_length); + usb_packet_unmap(&p->packet, &p->sgl); + qemu_sglist_destroy(&p->sgl); + p->async = EHCI_ASYNC_NONE; + + q->qh.token ^= QTD_TOKEN_DTOGGLE; + q->qh.token &= ~QTD_TOKEN_ACTIVE; + + if (q->qh.token & QTD_TOKEN_IOC) { + ehci_raise_irq(q->ehci, USBSTS_INT); + if (q->async) { + q->ehci->int_req_by_async = true; + } + } +} + +/* 4.10.3 returns "again" */ +static int ehci_execute(EHCIPacket *p, const char *action) +{ + USBEndpoint *ep; + int endp; + bool spd; + + assert(p->async == EHCI_ASYNC_NONE || + p->async == EHCI_ASYNC_INITIALIZED); + + if (!(p->qtd.token & QTD_TOKEN_ACTIVE)) { + fprintf(stderr, "Attempting to execute inactive qtd\n"); + return -1; + } + + if (get_field(p->qtd.token, QTD_TOKEN_TBYTES) > BUFF_SIZE) { + ehci_trace_guest_bug(p->queue->ehci, + "guest requested more bytes than allowed"); + return -1; + } + + if (!ehci_verify_pid(p->queue, &p->qtd)) { + ehci_queue_stopped(p->queue); /* Mark the ep in the prev dir stopped */ + } + p->pid = ehci_get_pid(&p->qtd); + p->queue->last_pid = p->pid; + endp = get_field(p->queue->qh.epchar, QH_EPCHAR_EP); + ep = usb_ep_get(p->queue->dev, p->pid, endp); + + if (p->async == EHCI_ASYNC_NONE) { + if (ehci_init_transfer(p) != 0) { + return -1; + } + + spd = (p->pid == USB_TOKEN_IN && NLPTR_TBIT(p->qtd.altnext) == 0); + usb_packet_setup(&p->packet, p->pid, ep, 0, p->qtdaddr, spd, + (p->qtd.token & QTD_TOKEN_IOC) != 0); + if (usb_packet_map(&p->packet, &p->sgl)) { + qemu_sglist_destroy(&p->sgl); + return -1; + } + p->async = EHCI_ASYNC_INITIALIZED; + } + + trace_usb_ehci_packet_action(p->queue, p, action); + usb_handle_packet(p->queue->dev, &p->packet); + DPRINTF("submit: qh 0x%x next 0x%x qtd 0x%x pid 0x%x len %zd endp 0x%x " + "status %d actual_length %d\n", p->queue->qhaddr, p->qtd.next, + p->qtdaddr, p->pid, p->packet.iov.size, endp, p->packet.status, + p->packet.actual_length); + + if (p->packet.actual_length > BUFF_SIZE) { + fprintf(stderr, "ret from usb_handle_packet > BUFF_SIZE\n"); + return -1; + } + + return 1; +} + +/* 4.7.2 + */ + +static int ehci_process_itd(EHCIState *ehci, + EHCIitd *itd, + uint32_t addr) +{ + USBDevice *dev; + USBEndpoint *ep; + uint32_t i, len, pid, dir, devaddr, endp; + uint32_t pg, off, ptr1, ptr2, max, mult; + + ehci->periodic_sched_active = PERIODIC_ACTIVE; + + dir =(itd->bufptr[1] & ITD_BUFPTR_DIRECTION); + devaddr = get_field(itd->bufptr[0], ITD_BUFPTR_DEVADDR); + endp = get_field(itd->bufptr[0], ITD_BUFPTR_EP); + max = get_field(itd->bufptr[1], ITD_BUFPTR_MAXPKT); + mult = get_field(itd->bufptr[2], ITD_BUFPTR_MULT); + + for(i = 0; i < 8; i++) { + if (itd->transact[i] & ITD_XACT_ACTIVE) { + pg = get_field(itd->transact[i], ITD_XACT_PGSEL); + off = itd->transact[i] & ITD_XACT_OFFSET_MASK; + len = get_field(itd->transact[i], ITD_XACT_LENGTH); + + if (len > max * mult) { + len = max * mult; + } + if (len > BUFF_SIZE || pg > 6) { + return -1; + } + + ptr1 = (itd->bufptr[pg] & ITD_BUFPTR_MASK); + qemu_sglist_init(&ehci->isgl, ehci->device, 2, ehci->as); + if (off + len > 4096) { + /* transfer crosses page border */ + if (pg == 6) { + qemu_sglist_destroy(&ehci->isgl); + return -1; /* avoid page pg + 1 */ + } + ptr2 = (itd->bufptr[pg + 1] & ITD_BUFPTR_MASK); + uint32_t len2 = off + len - 4096; + uint32_t len1 = len - len2; + qemu_sglist_add(&ehci->isgl, ptr1 + off, len1); + qemu_sglist_add(&ehci->isgl, ptr2, len2); + } else { + qemu_sglist_add(&ehci->isgl, ptr1 + off, len); + } + + dev = ehci_find_device(ehci, devaddr); + if (dev == NULL) { + ehci_trace_guest_bug(ehci, "no device found"); + ehci->ipacket.status = USB_RET_NODEV; + ehci->ipacket.actual_length = 0; + } else { + pid = dir ? USB_TOKEN_IN : USB_TOKEN_OUT; + ep = usb_ep_get(dev, pid, endp); + if (ep && ep->type == USB_ENDPOINT_XFER_ISOC) { + usb_packet_setup(&ehci->ipacket, pid, ep, 0, addr, false, + (itd->transact[i] & ITD_XACT_IOC) != 0); + if (usb_packet_map(&ehci->ipacket, &ehci->isgl)) { + qemu_sglist_destroy(&ehci->isgl); + return -1; + } + usb_handle_packet(dev, &ehci->ipacket); + usb_packet_unmap(&ehci->ipacket, &ehci->isgl); + } else { + DPRINTF("ISOCH: attempt to addess non-iso endpoint\n"); + ehci->ipacket.status = USB_RET_NAK; + ehci->ipacket.actual_length = 0; + } + } + qemu_sglist_destroy(&ehci->isgl); + + switch (ehci->ipacket.status) { + case USB_RET_SUCCESS: + break; + default: + fprintf(stderr, "Unexpected iso usb result: %d\n", + ehci->ipacket.status); + /* Fall through */ + case USB_RET_IOERROR: + case USB_RET_NODEV: + /* 3.3.2: XACTERR is only allowed on IN transactions */ + if (dir) { + itd->transact[i] |= ITD_XACT_XACTERR; + ehci_raise_irq(ehci, USBSTS_ERRINT); + } + break; + case USB_RET_BABBLE: + itd->transact[i] |= ITD_XACT_BABBLE; + ehci_raise_irq(ehci, USBSTS_ERRINT); + break; + case USB_RET_NAK: + /* no data for us, so do a zero-length transfer */ + ehci->ipacket.actual_length = 0; + break; + } + if (!dir) { + set_field(&itd->transact[i], len - ehci->ipacket.actual_length, + ITD_XACT_LENGTH); /* OUT */ + } else { + set_field(&itd->transact[i], ehci->ipacket.actual_length, + ITD_XACT_LENGTH); /* IN */ + } + if (itd->transact[i] & ITD_XACT_IOC) { + ehci_raise_irq(ehci, USBSTS_INT); + } + itd->transact[i] &= ~ITD_XACT_ACTIVE; + } + } + return 0; +} + + +/* This state is the entry point for asynchronous schedule + * processing. Entry here consitutes a EHCI start event state (4.8.5) + */ +static int ehci_state_waitlisthead(EHCIState *ehci, int async) +{ + EHCIqh qh; + int i = 0; + int again = 0; + uint32_t entry = ehci->asynclistaddr; + + /* set reclamation flag at start event (4.8.6) */ + if (async) { + ehci_set_usbsts(ehci, USBSTS_REC); + } + + ehci_queues_rip_unused(ehci, async); + + /* Find the head of the list (4.9.1.1) */ + for(i = 0; i < MAX_QH; i++) { + if (get_dwords(ehci, NLPTR_GET(entry), (uint32_t *) &qh, + sizeof(EHCIqh) >> 2) < 0) { + return 0; + } + ehci_trace_qh(NULL, NLPTR_GET(entry), &qh); + + if (qh.epchar & QH_EPCHAR_H) { + if (async) { + entry |= (NLPTR_TYPE_QH << 1); + } + + ehci_set_fetch_addr(ehci, async, entry); + ehci_set_state(ehci, async, EST_FETCHENTRY); + again = 1; + goto out; + } + + entry = qh.next; + if (entry == ehci->asynclistaddr) { + break; + } + } + + /* no head found for list. */ + + ehci_set_state(ehci, async, EST_ACTIVE); + +out: + return again; +} + + +/* This state is the entry point for periodic schedule processing as + * well as being a continuation state for async processing. + */ +static int ehci_state_fetchentry(EHCIState *ehci, int async) +{ + int again = 0; + uint32_t entry = ehci_get_fetch_addr(ehci, async); + + if (NLPTR_TBIT(entry)) { + ehci_set_state(ehci, async, EST_ACTIVE); + goto out; + } + + /* section 4.8, only QH in async schedule */ + if (async && (NLPTR_TYPE_GET(entry) != NLPTR_TYPE_QH)) { + fprintf(stderr, "non queue head request in async schedule\n"); + return -1; + } + + switch (NLPTR_TYPE_GET(entry)) { + case NLPTR_TYPE_QH: + ehci_set_state(ehci, async, EST_FETCHQH); + again = 1; + break; + + case NLPTR_TYPE_ITD: + ehci_set_state(ehci, async, EST_FETCHITD); + again = 1; + break; + + case NLPTR_TYPE_STITD: + ehci_set_state(ehci, async, EST_FETCHSITD); + again = 1; + break; + + default: + /* TODO: handle FSTN type */ + fprintf(stderr, "FETCHENTRY: entry at %X is of type %u " + "which is not supported yet\n", entry, NLPTR_TYPE_GET(entry)); + return -1; + } + +out: + return again; +} + +static EHCIQueue *ehci_state_fetchqh(EHCIState *ehci, int async) +{ + uint32_t entry; + EHCIQueue *q; + EHCIqh qh; + + entry = ehci_get_fetch_addr(ehci, async); + q = ehci_find_queue_by_qh(ehci, entry, async); + if (q == NULL) { + q = ehci_alloc_queue(ehci, entry, async); + } + + q->seen++; + if (q->seen > 1) { + /* we are going in circles -- stop processing */ + ehci_set_state(ehci, async, EST_ACTIVE); + q = NULL; + goto out; + } + + if (get_dwords(ehci, NLPTR_GET(q->qhaddr), + (uint32_t *) &qh, sizeof(EHCIqh) >> 2) < 0) { + q = NULL; + goto out; + } + ehci_trace_qh(q, NLPTR_GET(q->qhaddr), &qh); + + /* + * The overlay area of the qh should never be changed by the guest, + * except when idle, in which case the reset is a nop. + */ + if (!ehci_verify_qh(q, &qh)) { + if (ehci_reset_queue(q) > 0) { + ehci_trace_guest_bug(ehci, "guest updated active QH"); + } + } + q->qh = qh; + + q->transact_ctr = get_field(q->qh.epcap, QH_EPCAP_MULT); + if (q->transact_ctr == 0) { /* Guest bug in some versions of windows */ + q->transact_ctr = 4; + } + + if (q->dev == NULL) { + q->dev = ehci_find_device(q->ehci, + get_field(q->qh.epchar, QH_EPCHAR_DEVADDR)); + } + + if (async && (q->qh.epchar & QH_EPCHAR_H)) { + + /* EHCI spec version 1.0 Section 4.8.3 & 4.10.1 */ + if (ehci->usbsts & USBSTS_REC) { + ehci_clear_usbsts(ehci, USBSTS_REC); + } else { + DPRINTF("FETCHQH: QH 0x%08x. H-bit set, reclamation status reset" + " - done processing\n", q->qhaddr); + ehci_set_state(ehci, async, EST_ACTIVE); + q = NULL; + goto out; + } + } + +#if EHCI_DEBUG + if (q->qhaddr != q->qh.next) { + DPRINTF("FETCHQH: QH 0x%08x (h %x halt %x active %x) next 0x%08x\n", + q->qhaddr, + q->qh.epchar & QH_EPCHAR_H, + q->qh.token & QTD_TOKEN_HALT, + q->qh.token & QTD_TOKEN_ACTIVE, + q->qh.next); + } +#endif + + if (q->qh.token & QTD_TOKEN_HALT) { + ehci_set_state(ehci, async, EST_HORIZONTALQH); + + } else if ((q->qh.token & QTD_TOKEN_ACTIVE) && + (NLPTR_TBIT(q->qh.current_qtd) == 0) && + (q->qh.current_qtd != 0)) { + q->qtdaddr = q->qh.current_qtd; + ehci_set_state(ehci, async, EST_FETCHQTD); + + } else { + /* EHCI spec version 1.0 Section 4.10.2 */ + ehci_set_state(ehci, async, EST_ADVANCEQUEUE); + } + +out: + return q; +} + +static int ehci_state_fetchitd(EHCIState *ehci, int async) +{ + uint32_t entry; + EHCIitd itd; + + assert(!async); + entry = ehci_get_fetch_addr(ehci, async); + + if (get_dwords(ehci, NLPTR_GET(entry), (uint32_t *) &itd, + sizeof(EHCIitd) >> 2) < 0) { + return -1; + } + ehci_trace_itd(ehci, entry, &itd); + + if (ehci_process_itd(ehci, &itd, entry) != 0) { + return -1; + } + + put_dwords(ehci, NLPTR_GET(entry), (uint32_t *) &itd, + sizeof(EHCIitd) >> 2); + ehci_set_fetch_addr(ehci, async, itd.next); + ehci_set_state(ehci, async, EST_FETCHENTRY); + + return 1; +} + +static int ehci_state_fetchsitd(EHCIState *ehci, int async) +{ + uint32_t entry; + EHCIsitd sitd; + + assert(!async); + entry = ehci_get_fetch_addr(ehci, async); + + if (get_dwords(ehci, NLPTR_GET(entry), (uint32_t *)&sitd, + sizeof(EHCIsitd) >> 2) < 0) { + return 0; + } + ehci_trace_sitd(ehci, entry, &sitd); + + if (!(sitd.results & SITD_RESULTS_ACTIVE)) { + /* siTD is not active, nothing to do */; + } else { + /* TODO: split transfers are not implemented */ + warn_report("Skipping active siTD"); + } + + ehci_set_fetch_addr(ehci, async, sitd.next); + ehci_set_state(ehci, async, EST_FETCHENTRY); + return 1; +} + +/* Section 4.10.2 - paragraph 3 */ +static int ehci_state_advqueue(EHCIQueue *q) +{ +#if 0 + /* TO-DO: 4.10.2 - paragraph 2 + * if I-bit is set to 1 and QH is not active + * go to horizontal QH + */ + if (I-bit set) { + ehci_set_state(ehci, async, EST_HORIZONTALQH); + goto out; + } +#endif + + /* + * want data and alt-next qTD is valid + */ + if (((q->qh.token & QTD_TOKEN_TBYTES_MASK) != 0) && + (NLPTR_TBIT(q->qh.altnext_qtd) == 0)) { + q->qtdaddr = q->qh.altnext_qtd; + ehci_set_state(q->ehci, q->async, EST_FETCHQTD); + + /* + * next qTD is valid + */ + } else if (NLPTR_TBIT(q->qh.next_qtd) == 0) { + q->qtdaddr = q->qh.next_qtd; + ehci_set_state(q->ehci, q->async, EST_FETCHQTD); + + /* + * no valid qTD, try next QH + */ + } else { + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); + } + + return 1; +} + +/* Section 4.10.2 - paragraph 4 */ +static int ehci_state_fetchqtd(EHCIQueue *q) +{ + EHCIqtd qtd; + EHCIPacket *p; + int again = 1; + uint32_t addr; + + addr = NLPTR_GET(q->qtdaddr); + if (get_dwords(q->ehci, addr + 8, &qtd.token, 1) < 0) { + return 0; + } + barrier(); + if (get_dwords(q->ehci, addr + 0, &qtd.next, 1) < 0 || + get_dwords(q->ehci, addr + 4, &qtd.altnext, 1) < 0 || + get_dwords(q->ehci, addr + 12, qtd.bufptr, + ARRAY_SIZE(qtd.bufptr)) < 0) { + return 0; + } + ehci_trace_qtd(q, NLPTR_GET(q->qtdaddr), &qtd); + + p = QTAILQ_FIRST(&q->packets); + if (p != NULL) { + if (!ehci_verify_qtd(p, &qtd)) { + ehci_cancel_queue(q); + if (qtd.token & QTD_TOKEN_ACTIVE) { + ehci_trace_guest_bug(q->ehci, "guest updated active qTD"); + } + p = NULL; + } else { + p->qtd = qtd; + ehci_qh_do_overlay(q); + } + } + + if (!(qtd.token & QTD_TOKEN_ACTIVE)) { + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); + } else if (p != NULL) { + switch (p->async) { + case EHCI_ASYNC_NONE: + case EHCI_ASYNC_INITIALIZED: + /* Not yet executed (MULT), or previously nacked (int) packet */ + ehci_set_state(q->ehci, q->async, EST_EXECUTE); + break; + case EHCI_ASYNC_INFLIGHT: + /* Check if the guest has added new tds to the queue */ + again = ehci_fill_queue(QTAILQ_LAST(&q->packets)); + /* Unfinished async handled packet, go horizontal */ + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); + break; + case EHCI_ASYNC_FINISHED: + /* Complete executing of the packet */ + ehci_set_state(q->ehci, q->async, EST_EXECUTING); + break; + } + } else if (q->dev == NULL) { + ehci_trace_guest_bug(q->ehci, "no device attached to queue"); + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); + } else { + p = ehci_alloc_packet(q); + p->qtdaddr = q->qtdaddr; + p->qtd = qtd; + ehci_set_state(q->ehci, q->async, EST_EXECUTE); + } + + return again; +} + +static int ehci_state_horizqh(EHCIQueue *q) +{ + int again = 0; + + if (ehci_get_fetch_addr(q->ehci, q->async) != q->qh.next) { + ehci_set_fetch_addr(q->ehci, q->async, q->qh.next); + ehci_set_state(q->ehci, q->async, EST_FETCHENTRY); + again = 1; + } else { + ehci_set_state(q->ehci, q->async, EST_ACTIVE); + } + + return again; +} + +/* Returns "again" */ +static int ehci_fill_queue(EHCIPacket *p) +{ + USBEndpoint *ep = p->packet.ep; + EHCIQueue *q = p->queue; + EHCIqtd qtd = p->qtd; + uint32_t qtdaddr; + + for (;;) { + if (NLPTR_TBIT(qtd.next) != 0) { + break; + } + qtdaddr = qtd.next; + /* + * Detect circular td lists, Windows creates these, counting on the + * active bit going low after execution to make the queue stop. + */ + QTAILQ_FOREACH(p, &q->packets, next) { + if (p->qtdaddr == qtdaddr) { + goto leave; + } + } + if (get_dwords(q->ehci, NLPTR_GET(qtdaddr), + (uint32_t *) &qtd, sizeof(EHCIqtd) >> 2) < 0) { + return -1; + } + ehci_trace_qtd(q, NLPTR_GET(qtdaddr), &qtd); + if (!(qtd.token & QTD_TOKEN_ACTIVE)) { + break; + } + if (!ehci_verify_pid(q, &qtd)) { + ehci_trace_guest_bug(q->ehci, "guest queued token with wrong pid"); + break; + } + p = ehci_alloc_packet(q); + p->qtdaddr = qtdaddr; + p->qtd = qtd; + if (ehci_execute(p, "queue") == -1) { + return -1; + } + assert(p->packet.status == USB_RET_ASYNC); + p->async = EHCI_ASYNC_INFLIGHT; + } +leave: + usb_device_flush_ep_queue(ep->dev, ep); + return 1; +} + +static int ehci_state_execute(EHCIQueue *q) +{ + EHCIPacket *p = QTAILQ_FIRST(&q->packets); + int again = 0; + + assert(p != NULL); + assert(p->qtdaddr == q->qtdaddr); + + if (ehci_qh_do_overlay(q) != 0) { + return -1; + } + + // TODO verify enough time remains in the uframe as in 4.4.1.1 + // TODO write back ptr to async list when done or out of time + + /* 4.10.3, bottom of page 82, go horizontal on transaction counter == 0 */ + if (!q->async && q->transact_ctr == 0) { + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); + again = 1; + goto out; + } + + if (q->async) { + ehci_set_usbsts(q->ehci, USBSTS_REC); + } + + again = ehci_execute(p, "process"); + if (again == -1) { + goto out; + } + if (p->packet.status == USB_RET_ASYNC) { + ehci_flush_qh(q); + trace_usb_ehci_packet_action(p->queue, p, "async"); + p->async = EHCI_ASYNC_INFLIGHT; + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); + if (q->async) { + again = ehci_fill_queue(p); + } else { + again = 1; + } + goto out; + } + + ehci_set_state(q->ehci, q->async, EST_EXECUTING); + again = 1; + +out: + return again; +} + +static int ehci_state_executing(EHCIQueue *q) +{ + EHCIPacket *p = QTAILQ_FIRST(&q->packets); + + assert(p != NULL); + assert(p->qtdaddr == q->qtdaddr); + + ehci_execute_complete(q); + + /* 4.10.3 */ + if (!q->async && q->transact_ctr > 0) { + q->transact_ctr--; + } + + /* 4.10.5 */ + if (p->packet.status == USB_RET_NAK) { + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); + } else { + ehci_set_state(q->ehci, q->async, EST_WRITEBACK); + } + + ehci_flush_qh(q); + return 1; +} + + +static int ehci_state_writeback(EHCIQueue *q) +{ + EHCIPacket *p = QTAILQ_FIRST(&q->packets); + uint32_t *qtd, addr; + int again = 0; + + /* Write back the QTD from the QH area */ + assert(p != NULL); + assert(p->qtdaddr == q->qtdaddr); + + ehci_trace_qtd(q, NLPTR_GET(p->qtdaddr), (EHCIqtd *) &q->qh.next_qtd); + qtd = (uint32_t *) &q->qh.next_qtd; + addr = NLPTR_GET(p->qtdaddr); + put_dwords(q->ehci, addr + 2 * sizeof(uint32_t), qtd + 2, 2); + ehci_free_packet(p); + + /* + * EHCI specs say go horizontal here. + * + * We can also advance the queue here for performance reasons. We + * need to take care to only take that shortcut in case we've + * processed the qtd just written back without errors, i.e. halt + * bit is clear. + */ + if (q->qh.token & QTD_TOKEN_HALT) { + ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); + again = 1; + } else { + ehci_set_state(q->ehci, q->async, EST_ADVANCEQUEUE); + again = 1; + } + return again; +} + +/* + * This is the state machine that is common to both async and periodic + */ + +static void ehci_advance_state(EHCIState *ehci, int async) +{ + EHCIQueue *q = NULL; + int itd_count = 0; + int again; + + do { + switch(ehci_get_state(ehci, async)) { + case EST_WAITLISTHEAD: + again = ehci_state_waitlisthead(ehci, async); + break; + + case EST_FETCHENTRY: + again = ehci_state_fetchentry(ehci, async); + break; + + case EST_FETCHQH: + q = ehci_state_fetchqh(ehci, async); + if (q != NULL) { + assert(q->async == async); + again = 1; + } else { + again = 0; + } + break; + + case EST_FETCHITD: + again = ehci_state_fetchitd(ehci, async); + itd_count++; + break; + + case EST_FETCHSITD: + again = ehci_state_fetchsitd(ehci, async); + itd_count++; + break; + + case EST_ADVANCEQUEUE: + assert(q != NULL); + again = ehci_state_advqueue(q); + break; + + case EST_FETCHQTD: + assert(q != NULL); + again = ehci_state_fetchqtd(q); + break; + + case EST_HORIZONTALQH: + assert(q != NULL); + again = ehci_state_horizqh(q); + break; + + case EST_EXECUTE: + assert(q != NULL); + again = ehci_state_execute(q); + if (async) { + ehci->async_stepdown = 0; + } + break; + + case EST_EXECUTING: + assert(q != NULL); + if (async) { + ehci->async_stepdown = 0; + } + again = ehci_state_executing(q); + break; + + case EST_WRITEBACK: + assert(q != NULL); + again = ehci_state_writeback(q); + if (!async) { + ehci->periodic_sched_active = PERIODIC_ACTIVE; + } + break; + + default: + fprintf(stderr, "Bad state!\n"); + g_assert_not_reached(); + } + + if (again < 0 || itd_count > 16) { + /* TODO: notify guest (raise HSE irq?) */ + fprintf(stderr, "processing error - resetting ehci HC\n"); + ehci_reset(ehci); + again = 0; + } + } + while (again); +} + +static void ehci_advance_async_state(EHCIState *ehci) +{ + const int async = 1; + + switch(ehci_get_state(ehci, async)) { + case EST_INACTIVE: + if (!ehci_async_enabled(ehci)) { + break; + } + ehci_set_state(ehci, async, EST_ACTIVE); + // No break, fall through to ACTIVE + + case EST_ACTIVE: + if (!ehci_async_enabled(ehci)) { + ehci_queues_rip_all(ehci, async); + ehci_set_state(ehci, async, EST_INACTIVE); + break; + } + + /* make sure guest has acknowledged the doorbell interrupt */ + /* TO-DO: is this really needed? */ + if (ehci->usbsts & USBSTS_IAA) { + DPRINTF("IAA status bit still set.\n"); + break; + } + + /* check that address register has been set */ + if (ehci->asynclistaddr == 0) { + break; + } + + ehci_set_state(ehci, async, EST_WAITLISTHEAD); + ehci_advance_state(ehci, async); + + /* If the doorbell is set, the guest wants to make a change to the + * schedule. The host controller needs to release cached data. + * (section 4.8.2) + */ + if (ehci->usbcmd & USBCMD_IAAD) { + /* Remove all unseen qhs from the async qhs queue */ + ehci_queues_rip_unseen(ehci, async); + trace_usb_ehci_doorbell_ack(); + ehci->usbcmd &= ~USBCMD_IAAD; + ehci_raise_irq(ehci, USBSTS_IAA); + } + break; + + default: + /* this should only be due to a developer mistake */ + fprintf(stderr, "ehci: Bad asynchronous state %d. " + "Resetting to active\n", ehci->astate); + g_assert_not_reached(); + } +} + +static void ehci_advance_periodic_state(EHCIState *ehci) +{ + uint32_t entry; + uint32_t list; + const int async = 0; + + // 4.6 + + switch(ehci_get_state(ehci, async)) { + case EST_INACTIVE: + if (!(ehci->frindex & 7) && ehci_periodic_enabled(ehci)) { + ehci_set_state(ehci, async, EST_ACTIVE); + // No break, fall through to ACTIVE + } else + break; + + case EST_ACTIVE: + if (!(ehci->frindex & 7) && !ehci_periodic_enabled(ehci)) { + ehci_queues_rip_all(ehci, async); + ehci_set_state(ehci, async, EST_INACTIVE); + break; + } + + list = ehci->periodiclistbase & 0xfffff000; + /* check that register has been set */ + if (list == 0) { + break; + } + list |= ((ehci->frindex & 0x1ff8) >> 1); + + if (get_dwords(ehci, list, &entry, 1) < 0) { + break; + } + + DPRINTF("PERIODIC state adv fr=%d. [%08X] -> %08X\n", + ehci->frindex / 8, list, entry); + ehci_set_fetch_addr(ehci, async,entry); + ehci_set_state(ehci, async, EST_FETCHENTRY); + ehci_advance_state(ehci, async); + ehci_queues_rip_unused(ehci, async); + break; + + default: + /* this should only be due to a developer mistake */ + fprintf(stderr, "ehci: Bad periodic state %d. " + "Resetting to active\n", ehci->pstate); + g_assert_not_reached(); + } +} + +static void ehci_update_frindex(EHCIState *ehci, int uframes) +{ + if (!ehci_enabled(ehci) && ehci->pstate == EST_INACTIVE) { + return; + } + + /* Generate FLR interrupt if frame index rolls over 0x2000 */ + if ((ehci->frindex % 0x2000) + uframes >= 0x2000) { + ehci_raise_irq(ehci, USBSTS_FLR); + } + + /* How many times will frindex roll over 0x4000 with this frame count? + * usbsts_frindex is decremented by 0x4000 on rollover until it reaches 0 + */ + int rollovers = (ehci->frindex + uframes) / 0x4000; + if (rollovers > 0) { + if (ehci->usbsts_frindex >= (rollovers * 0x4000)) { + ehci->usbsts_frindex -= 0x4000 * rollovers; + } else { + ehci->usbsts_frindex = 0; + } + } + + ehci->frindex = (ehci->frindex + uframes) % 0x4000; +} + +static void ehci_work_bh(void *opaque) +{ + EHCIState *ehci = opaque; + int need_timer = 0; + int64_t expire_time, t_now; + uint64_t ns_elapsed; + uint64_t uframes, skipped_uframes; + int i; + + if (ehci->working) { + return; + } + ehci->working = true; + + t_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + ns_elapsed = t_now - ehci->last_run_ns; + uframes = ns_elapsed / UFRAME_TIMER_NS; + + if (ehci_periodic_enabled(ehci) || ehci->pstate != EST_INACTIVE) { + need_timer++; + + if (uframes > (ehci->maxframes * 8)) { + skipped_uframes = uframes - (ehci->maxframes * 8); + ehci_update_frindex(ehci, skipped_uframes); + ehci->last_run_ns += UFRAME_TIMER_NS * skipped_uframes; + uframes -= skipped_uframes; + DPRINTF("WARNING - EHCI skipped %d uframes\n", skipped_uframes); + } + + for (i = 0; i < uframes; i++) { + /* + * If we're running behind schedule, we should not catch up + * too fast, as that will make some guests unhappy: + * 1) We must process a minimum of MIN_UFR_PER_TICK frames, + * otherwise we will never catch up + * 2) Process frames until the guest has requested an irq (IOC) + */ + if (i >= MIN_UFR_PER_TICK) { + ehci_commit_irq(ehci); + if ((ehci->usbsts & USBINTR_MASK) & ehci->usbintr) { + break; + } + } + if (ehci->periodic_sched_active) { + ehci->periodic_sched_active--; + } + ehci_update_frindex(ehci, 1); + if ((ehci->frindex & 7) == 0) { + ehci_advance_periodic_state(ehci); + } + ehci->last_run_ns += UFRAME_TIMER_NS; + } + } else { + ehci->periodic_sched_active = 0; + ehci_update_frindex(ehci, uframes); + ehci->last_run_ns += UFRAME_TIMER_NS * uframes; + } + + if (ehci->periodic_sched_active) { + ehci->async_stepdown = 0; + } else if (ehci->async_stepdown < ehci->maxframes / 2) { + ehci->async_stepdown++; + } + + /* Async is not inside loop since it executes everything it can once + * called + */ + if (ehci_async_enabled(ehci) || ehci->astate != EST_INACTIVE) { + need_timer++; + ehci_advance_async_state(ehci); + } + + ehci_commit_irq(ehci); + if (ehci->usbsts_pending) { + need_timer++; + ehci->async_stepdown = 0; + } + + if (ehci_enabled(ehci) && (ehci->usbintr & USBSTS_FLR)) { + need_timer++; + } + + if (need_timer) { + /* If we've raised int, we speed up the timer, so that we quickly + * notice any new packets queued up in response */ + if (ehci->int_req_by_async && (ehci->usbsts & USBSTS_INT)) { + expire_time = t_now + + NANOSECONDS_PER_SECOND / (FRAME_TIMER_FREQ * 4); + ehci->int_req_by_async = false; + } else { + expire_time = t_now + (NANOSECONDS_PER_SECOND + * (ehci->async_stepdown+1) / FRAME_TIMER_FREQ); + } + timer_mod(ehci->frame_timer, expire_time); + } + + ehci->working = false; +} + +static void ehci_work_timer(void *opaque) +{ + EHCIState *ehci = opaque; + + qemu_bh_schedule(ehci->async_bh); +} + +static const MemoryRegionOps ehci_mmio_caps_ops = { + .read = ehci_caps_read, + .write = ehci_caps_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps ehci_mmio_opreg_ops = { + .read = ehci_opreg_read, + .write = ehci_opreg_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps ehci_mmio_port_ops = { + .read = ehci_port_read, + .write = ehci_port_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static USBPortOps ehci_port_ops = { + .attach = ehci_attach, + .detach = ehci_detach, + .child_detach = ehci_child_detach, + .wakeup = ehci_wakeup, + .complete = ehci_async_complete_packet, +}; + +static USBBusOps ehci_bus_ops_companion = { + .register_companion = ehci_register_companion, + .wakeup_endpoint = ehci_wakeup_endpoint, +}; +static USBBusOps ehci_bus_ops_standalone = { + .wakeup_endpoint = ehci_wakeup_endpoint, +}; + +static int usb_ehci_pre_save(void *opaque) +{ + EHCIState *ehci = opaque; + uint32_t new_frindex; + + /* Round down frindex to a multiple of 8 for migration compatibility */ + new_frindex = ehci->frindex & ~7; + ehci->last_run_ns -= (ehci->frindex - new_frindex) * UFRAME_TIMER_NS; + ehci->frindex = new_frindex; + + return 0; +} + +static int usb_ehci_post_load(void *opaque, int version_id) +{ + EHCIState *s = opaque; + int i; + + for (i = 0; i < NB_PORTS; i++) { + USBPort *companion = s->companion_ports[i]; + if (companion == NULL) { + continue; + } + if (s->portsc[i] & PORTSC_POWNER) { + companion->dev = s->ports[i].dev; + } else { + companion->dev = NULL; + } + } + + return 0; +} + +static void usb_ehci_vm_state_change(void *opaque, bool running, RunState state) +{ + EHCIState *ehci = opaque; + + /* + * We don't migrate the EHCIQueue-s, instead we rebuild them for the + * schedule in guest memory. We must do the rebuilt ASAP, so that + * USB-devices which have async handled packages have a packet in the + * ep queue to match the completion with. + */ + if (state == RUN_STATE_RUNNING) { + ehci_advance_async_state(ehci); + } + + /* + * The schedule rebuilt from guest memory could cause the migration dest + * to miss a QH unlink, and fail to cancel packets, since the unlinked QH + * will never have existed on the destination. Therefor we must flush the + * async schedule on savevm to catch any not yet noticed unlinks. + */ + if (state == RUN_STATE_SAVE_VM) { + ehci_advance_async_state(ehci); + ehci_queues_rip_unseen(ehci, 1); + } +} + +const VMStateDescription vmstate_ehci = { + .name = "ehci-core", + .version_id = 2, + .minimum_version_id = 1, + .pre_save = usb_ehci_pre_save, + .post_load = usb_ehci_post_load, + .fields = (VMStateField[]) { + /* mmio registers */ + VMSTATE_UINT32(usbcmd, EHCIState), + VMSTATE_UINT32(usbsts, EHCIState), + VMSTATE_UINT32_V(usbsts_pending, EHCIState, 2), + VMSTATE_UINT32_V(usbsts_frindex, EHCIState, 2), + VMSTATE_UINT32(usbintr, EHCIState), + VMSTATE_UINT32(frindex, EHCIState), + VMSTATE_UINT32(ctrldssegment, EHCIState), + VMSTATE_UINT32(periodiclistbase, EHCIState), + VMSTATE_UINT32(asynclistaddr, EHCIState), + VMSTATE_UINT32(configflag, EHCIState), + VMSTATE_UINT32(portsc[0], EHCIState), + VMSTATE_UINT32(portsc[1], EHCIState), + VMSTATE_UINT32(portsc[2], EHCIState), + VMSTATE_UINT32(portsc[3], EHCIState), + VMSTATE_UINT32(portsc[4], EHCIState), + VMSTATE_UINT32(portsc[5], EHCIState), + /* frame timer */ + VMSTATE_TIMER_PTR(frame_timer, EHCIState), + VMSTATE_UINT64(last_run_ns, EHCIState), + VMSTATE_UINT32(async_stepdown, EHCIState), + /* schedule state */ + VMSTATE_UINT32(astate, EHCIState), + VMSTATE_UINT32(pstate, EHCIState), + VMSTATE_UINT32(a_fetch_addr, EHCIState), + VMSTATE_UINT32(p_fetch_addr, EHCIState), + VMSTATE_END_OF_LIST() + } +}; + +void usb_ehci_realize(EHCIState *s, DeviceState *dev, Error **errp) +{ + int i; + + if (s->portnr > NB_PORTS) { + error_setg(errp, "Too many ports! Max. port number is %d.", + NB_PORTS); + return; + } + if (s->maxframes < 8 || s->maxframes > 512) { + error_setg(errp, "maxframes %d out if range (8 .. 512)", + s->maxframes); + return; + } + + memory_region_add_subregion(&s->mem, s->capsbase, &s->mem_caps); + memory_region_add_subregion(&s->mem, s->opregbase, &s->mem_opreg); + memory_region_add_subregion(&s->mem, s->opregbase + s->portscbase, + &s->mem_ports); + + usb_bus_new(&s->bus, sizeof(s->bus), s->companion_enable ? + &ehci_bus_ops_companion : &ehci_bus_ops_standalone, dev); + for (i = 0; i < s->portnr; i++) { + usb_register_port(&s->bus, &s->ports[i], s, i, &ehci_port_ops, + USB_SPEED_MASK_HIGH); + s->ports[i].dev = 0; + } + + s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ehci_work_timer, s); + s->async_bh = qemu_bh_new(ehci_work_bh, s); + s->device = dev; + + s->vmstate = qemu_add_vm_change_state_handler(usb_ehci_vm_state_change, s); +} + +void usb_ehci_unrealize(EHCIState *s, DeviceState *dev) +{ + trace_usb_ehci_unrealize(); + + if (s->frame_timer) { + timer_free(s->frame_timer); + s->frame_timer = NULL; + } + if (s->async_bh) { + qemu_bh_delete(s->async_bh); + } + + ehci_queues_rip_all(s, 0); + ehci_queues_rip_all(s, 1); + + memory_region_del_subregion(&s->mem, &s->mem_caps); + memory_region_del_subregion(&s->mem, &s->mem_opreg); + memory_region_del_subregion(&s->mem, &s->mem_ports); + + usb_bus_release(&s->bus); + + if (s->vmstate) { + qemu_del_vm_change_state_handler(s->vmstate); + } +} + +void usb_ehci_init(EHCIState *s, DeviceState *dev) +{ + /* 2.2 host controller interface version */ + s->caps[0x00] = (uint8_t)(s->opregbase - s->capsbase); + s->caps[0x01] = 0x00; + s->caps[0x02] = 0x00; + s->caps[0x03] = 0x01; /* HC version */ + s->caps[0x04] = s->portnr; /* Number of downstream ports */ + s->caps[0x05] = 0x00; /* No companion ports at present */ + s->caps[0x06] = 0x00; + s->caps[0x07] = 0x00; + s->caps[0x08] = 0x80; /* We can cache whole frame, no 64-bit */ + s->caps[0x0a] = 0x00; + s->caps[0x0b] = 0x00; + + QTAILQ_INIT(&s->aqueues); + QTAILQ_INIT(&s->pqueues); + usb_packet_init(&s->ipacket); + + memory_region_init(&s->mem, OBJECT(dev), "ehci", MMIO_SIZE); + memory_region_init_io(&s->mem_caps, OBJECT(dev), &ehci_mmio_caps_ops, s, + "capabilities", CAPA_SIZE); + memory_region_init_io(&s->mem_opreg, OBJECT(dev), &ehci_mmio_opreg_ops, s, + "operational", s->portscbase); + memory_region_init_io(&s->mem_ports, OBJECT(dev), &ehci_mmio_port_ops, s, + "ports", 4 * s->portnr); +} + +void usb_ehci_finalize(EHCIState *s) +{ + usb_packet_cleanup(&s->ipacket); +} + +/* + * vim: expandtab ts=4 + */ diff --git a/hw/usb/hcd-ehci.h b/hw/usb/hcd-ehci.h new file mode 100644 index 000000000..a173707d9 --- /dev/null +++ b/hw/usb/hcd-ehci.h @@ -0,0 +1,383 @@ +/* + * QEMU USB EHCI Emulation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, see . + */ + +#ifndef HW_USB_HCD_EHCI_H +#define HW_USB_HCD_EHCI_H + +#include "qemu/timer.h" +#include "hw/usb.h" +#include "sysemu/dma.h" +#include "hw/pci/pci.h" +#include "hw/sysbus.h" +#include "qom/object.h" + +#ifndef EHCI_DEBUG +#define EHCI_DEBUG 0 +#endif + +#if EHCI_DEBUG +#define DPRINTF printf +#else +#define DPRINTF(...) +#endif + +#define MMIO_SIZE 0x1000 +#define CAPA_SIZE 0x10 + +#define NB_PORTS 6 /* Max. Number of downstream ports */ + +typedef struct EHCIPacket EHCIPacket; +typedef struct EHCIQueue EHCIQueue; +typedef struct EHCIState EHCIState; + +/* EHCI spec version 1.0 Section 3.3 + */ +typedef struct EHCIitd { + uint32_t next; + + uint32_t transact[8]; +#define ITD_XACT_ACTIVE (1 << 31) +#define ITD_XACT_DBERROR (1 << 30) +#define ITD_XACT_BABBLE (1 << 29) +#define ITD_XACT_XACTERR (1 << 28) +#define ITD_XACT_LENGTH_MASK 0x0fff0000 +#define ITD_XACT_LENGTH_SH 16 +#define ITD_XACT_IOC (1 << 15) +#define ITD_XACT_PGSEL_MASK 0x00007000 +#define ITD_XACT_PGSEL_SH 12 +#define ITD_XACT_OFFSET_MASK 0x00000fff + + uint32_t bufptr[7]; +#define ITD_BUFPTR_MASK 0xfffff000 +#define ITD_BUFPTR_SH 12 +#define ITD_BUFPTR_EP_MASK 0x00000f00 +#define ITD_BUFPTR_EP_SH 8 +#define ITD_BUFPTR_DEVADDR_MASK 0x0000007f +#define ITD_BUFPTR_DEVADDR_SH 0 +#define ITD_BUFPTR_DIRECTION (1 << 11) +#define ITD_BUFPTR_MAXPKT_MASK 0x000007ff +#define ITD_BUFPTR_MAXPKT_SH 0 +#define ITD_BUFPTR_MULT_MASK 0x00000003 +#define ITD_BUFPTR_MULT_SH 0 +} EHCIitd; + +/* EHCI spec version 1.0 Section 3.4 + */ +typedef struct EHCIsitd { + uint32_t next; /* Standard next link pointer */ + uint32_t epchar; +#define SITD_EPCHAR_IO (1 << 31) +#define SITD_EPCHAR_PORTNUM_MASK 0x7f000000 +#define SITD_EPCHAR_PORTNUM_SH 24 +#define SITD_EPCHAR_HUBADD_MASK 0x007f0000 +#define SITD_EPCHAR_HUBADDR_SH 16 +#define SITD_EPCHAR_EPNUM_MASK 0x00000f00 +#define SITD_EPCHAR_EPNUM_SH 8 +#define SITD_EPCHAR_DEVADDR_MASK 0x0000007f + + uint32_t uframe; +#define SITD_UFRAME_CMASK_MASK 0x0000ff00 +#define SITD_UFRAME_CMASK_SH 8 +#define SITD_UFRAME_SMASK_MASK 0x000000ff + + uint32_t results; +#define SITD_RESULTS_IOC (1 << 31) +#define SITD_RESULTS_PGSEL (1 << 30) +#define SITD_RESULTS_TBYTES_MASK 0x03ff0000 +#define SITD_RESULTS_TYBYTES_SH 16 +#define SITD_RESULTS_CPROGMASK_MASK 0x0000ff00 +#define SITD_RESULTS_CPROGMASK_SH 8 +#define SITD_RESULTS_ACTIVE (1 << 7) +#define SITD_RESULTS_ERR (1 << 6) +#define SITD_RESULTS_DBERR (1 << 5) +#define SITD_RESULTS_BABBLE (1 << 4) +#define SITD_RESULTS_XACTERR (1 << 3) +#define SITD_RESULTS_MISSEDUF (1 << 2) +#define SITD_RESULTS_SPLITXSTATE (1 << 1) + + uint32_t bufptr[2]; +#define SITD_BUFPTR_MASK 0xfffff000 +#define SITD_BUFPTR_CURROFF_MASK 0x00000fff +#define SITD_BUFPTR_TPOS_MASK 0x00000018 +#define SITD_BUFPTR_TPOS_SH 3 +#define SITD_BUFPTR_TCNT_MASK 0x00000007 + + uint32_t backptr; /* Standard next link pointer */ +} EHCIsitd; + +/* EHCI spec version 1.0 Section 3.5 + */ +typedef struct EHCIqtd { + uint32_t next; /* Standard next link pointer */ + uint32_t altnext; /* Standard next link pointer */ + uint32_t token; +#define QTD_TOKEN_DTOGGLE (1 << 31) +#define QTD_TOKEN_TBYTES_MASK 0x7fff0000 +#define QTD_TOKEN_TBYTES_SH 16 +#define QTD_TOKEN_IOC (1 << 15) +#define QTD_TOKEN_CPAGE_MASK 0x00007000 +#define QTD_TOKEN_CPAGE_SH 12 +#define QTD_TOKEN_CERR_MASK 0x00000c00 +#define QTD_TOKEN_CERR_SH 10 +#define QTD_TOKEN_PID_MASK 0x00000300 +#define QTD_TOKEN_PID_SH 8 +#define QTD_TOKEN_ACTIVE (1 << 7) +#define QTD_TOKEN_HALT (1 << 6) +#define QTD_TOKEN_DBERR (1 << 5) +#define QTD_TOKEN_BABBLE (1 << 4) +#define QTD_TOKEN_XACTERR (1 << 3) +#define QTD_TOKEN_MISSEDUF (1 << 2) +#define QTD_TOKEN_SPLITXSTATE (1 << 1) +#define QTD_TOKEN_PING (1 << 0) + + uint32_t bufptr[5]; /* Standard buffer pointer */ +#define QTD_BUFPTR_MASK 0xfffff000 +#define QTD_BUFPTR_SH 12 +} EHCIqtd; + +/* EHCI spec version 1.0 Section 3.6 + */ +typedef struct EHCIqh { + uint32_t next; /* Standard next link pointer */ + + /* endpoint characteristics */ + uint32_t epchar; +#define QH_EPCHAR_RL_MASK 0xf0000000 +#define QH_EPCHAR_RL_SH 28 +#define QH_EPCHAR_C (1 << 27) +#define QH_EPCHAR_MPLEN_MASK 0x07FF0000 +#define QH_EPCHAR_MPLEN_SH 16 +#define QH_EPCHAR_H (1 << 15) +#define QH_EPCHAR_DTC (1 << 14) +#define QH_EPCHAR_EPS_MASK 0x00003000 +#define QH_EPCHAR_EPS_SH 12 +#define EHCI_QH_EPS_FULL 0 +#define EHCI_QH_EPS_LOW 1 +#define EHCI_QH_EPS_HIGH 2 +#define EHCI_QH_EPS_RESERVED 3 + +#define QH_EPCHAR_EP_MASK 0x00000f00 +#define QH_EPCHAR_EP_SH 8 +#define QH_EPCHAR_I (1 << 7) +#define QH_EPCHAR_DEVADDR_MASK 0x0000007f +#define QH_EPCHAR_DEVADDR_SH 0 + + /* endpoint capabilities */ + uint32_t epcap; +#define QH_EPCAP_MULT_MASK 0xc0000000 +#define QH_EPCAP_MULT_SH 30 +#define QH_EPCAP_PORTNUM_MASK 0x3f800000 +#define QH_EPCAP_PORTNUM_SH 23 +#define QH_EPCAP_HUBADDR_MASK 0x007f0000 +#define QH_EPCAP_HUBADDR_SH 16 +#define QH_EPCAP_CMASK_MASK 0x0000ff00 +#define QH_EPCAP_CMASK_SH 8 +#define QH_EPCAP_SMASK_MASK 0x000000ff +#define QH_EPCAP_SMASK_SH 0 + + uint32_t current_qtd; /* Standard next link pointer */ + uint32_t next_qtd; /* Standard next link pointer */ + uint32_t altnext_qtd; +#define QH_ALTNEXT_NAKCNT_MASK 0x0000001e +#define QH_ALTNEXT_NAKCNT_SH 1 + + uint32_t token; /* Same as QTD token */ + uint32_t bufptr[5]; /* Standard buffer pointer */ +#define BUFPTR_CPROGMASK_MASK 0x000000ff +#define BUFPTR_FRAMETAG_MASK 0x0000001f +#define BUFPTR_SBYTES_MASK 0x00000fe0 +#define BUFPTR_SBYTES_SH 5 +} EHCIqh; + +/* EHCI spec version 1.0 Section 3.7 + */ +typedef struct EHCIfstn { + uint32_t next; /* Standard next link pointer */ + uint32_t backptr; /* Standard next link pointer */ +} EHCIfstn; + +enum async_state { + EHCI_ASYNC_NONE = 0, + EHCI_ASYNC_INITIALIZED, + EHCI_ASYNC_INFLIGHT, + EHCI_ASYNC_FINISHED, +}; + +struct EHCIPacket { + EHCIQueue *queue; + QTAILQ_ENTRY(EHCIPacket) next; + + EHCIqtd qtd; /* copy of current QTD (being worked on) */ + uint32_t qtdaddr; /* address QTD read from */ + + USBPacket packet; + QEMUSGList sgl; + int pid; + enum async_state async; +}; + +struct EHCIQueue { + EHCIState *ehci; + QTAILQ_ENTRY(EHCIQueue) next; + uint32_t seen; + uint64_t ts; + int async; + int transact_ctr; + + /* cached data from guest - needs to be flushed + * when guest removes an entry (doorbell, handshake sequence) + */ + EHCIqh qh; /* copy of current QH (being worked on) */ + uint32_t qhaddr; /* address QH read from */ + uint32_t qtdaddr; /* address QTD read from */ + int last_pid; /* pid of last packet executed */ + USBDevice *dev; + QTAILQ_HEAD(, EHCIPacket) packets; +}; + +typedef QTAILQ_HEAD(EHCIQueueHead, EHCIQueue) EHCIQueueHead; + +struct EHCIState { + USBBus bus; + DeviceState *device; + qemu_irq irq; + MemoryRegion mem; + AddressSpace *as; + MemoryRegion mem_caps; + MemoryRegion mem_opreg; + MemoryRegion mem_ports; + int companion_count; + bool companion_enable; + uint16_t capsbase; + uint16_t opregbase; + uint16_t portscbase; + uint16_t portnr; + + /* properties */ + uint32_t maxframes; + + /* + * EHCI spec version 1.0 Section 2.3 + * Host Controller Operational Registers + */ + uint8_t caps[CAPA_SIZE]; + union { + uint32_t opreg[0x44/sizeof(uint32_t)]; + struct { + uint32_t usbcmd; + uint32_t usbsts; + uint32_t usbintr; + uint32_t frindex; + uint32_t ctrldssegment; + uint32_t periodiclistbase; + uint32_t asynclistaddr; + uint32_t notused[9]; + uint32_t configflag; + }; + }; + uint32_t portsc[NB_PORTS]; + + /* + * Internal states, shadow registers, etc + */ + QEMUTimer *frame_timer; + QEMUBH *async_bh; + bool working; + uint32_t astate; /* Current state in asynchronous schedule */ + uint32_t pstate; /* Current state in periodic schedule */ + USBPort ports[NB_PORTS]; + USBPort *companion_ports[NB_PORTS]; + uint32_t usbsts_pending; + uint32_t usbsts_frindex; + EHCIQueueHead aqueues; + EHCIQueueHead pqueues; + + /* which address to look at next */ + uint32_t a_fetch_addr; + uint32_t p_fetch_addr; + + USBPacket ipacket; + QEMUSGList isgl; + + uint64_t last_run_ns; + uint32_t async_stepdown; + uint32_t periodic_sched_active; + bool int_req_by_async; + VMChangeStateEntry *vmstate; +}; + +extern const VMStateDescription vmstate_ehci; + +void usb_ehci_init(EHCIState *s, DeviceState *dev); +void usb_ehci_finalize(EHCIState *s); +void usb_ehci_realize(EHCIState *s, DeviceState *dev, Error **errp); +void usb_ehci_unrealize(EHCIState *s, DeviceState *dev); +void ehci_reset(void *opaque); + +#define TYPE_PCI_EHCI "pci-ehci-usb" +OBJECT_DECLARE_SIMPLE_TYPE(EHCIPCIState, PCI_EHCI) + +struct EHCIPCIState { + /*< private >*/ + PCIDevice pcidev; + /*< public >*/ + + EHCIState ehci; +}; + + +#define TYPE_SYS_BUS_EHCI "sysbus-ehci-usb" +#define TYPE_PLATFORM_EHCI "platform-ehci-usb" +#define TYPE_EXYNOS4210_EHCI "exynos4210-ehci-usb" +#define TYPE_AW_H3_EHCI "aw-h3-ehci-usb" +#define TYPE_NPCM7XX_EHCI "npcm7xx-ehci-usb" +#define TYPE_TEGRA2_EHCI "tegra2-ehci-usb" +#define TYPE_PPC4xx_EHCI "ppc4xx-ehci-usb" +#define TYPE_FUSBH200_EHCI "fusbh200-ehci-usb" + +OBJECT_DECLARE_TYPE(EHCISysBusState, SysBusEHCIClass, SYS_BUS_EHCI) + +struct EHCISysBusState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + EHCIState ehci; +}; + +struct SysBusEHCIClass { + /*< private >*/ + SysBusDeviceClass parent_class; + /*< public >*/ + + uint16_t capsbase; + uint16_t opregbase; + uint16_t portscbase; + uint16_t portnr; +}; + +OBJECT_DECLARE_SIMPLE_TYPE(FUSBH200EHCIState, FUSBH200_EHCI) + +struct FUSBH200EHCIState { + /*< private >*/ + EHCISysBusState parent_obj; + /*< public >*/ + + MemoryRegion mem_vendor; +}; + +#endif diff --git a/hw/usb/hcd-musb.c b/hw/usb/hcd-musb.c new file mode 100644 index 000000000..85f5ff5bd --- /dev/null +++ b/hw/usb/hcd-musb.c @@ -0,0 +1,1553 @@ +/* + * "Inventra" High-speed Dual-Role Controller (MUSB-HDRC), Mentor Graphics, + * USB2.0 OTG compliant core used in various chips. + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Only host-mode and non-DMA accesses are currently supported. + */ +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "hw/usb.h" +#include "hw/usb/hcd-musb.h" +#include "hw/irq.h" +#include "hw/hw.h" + +/* Common USB registers */ +#define MUSB_HDRC_FADDR 0x00 /* 8-bit */ +#define MUSB_HDRC_POWER 0x01 /* 8-bit */ + +#define MUSB_HDRC_INTRTX 0x02 /* 16-bit */ +#define MUSB_HDRC_INTRRX 0x04 +#define MUSB_HDRC_INTRTXE 0x06 +#define MUSB_HDRC_INTRRXE 0x08 +#define MUSB_HDRC_INTRUSB 0x0a /* 8 bit */ +#define MUSB_HDRC_INTRUSBE 0x0b /* 8 bit */ +#define MUSB_HDRC_FRAME 0x0c /* 16-bit */ +#define MUSB_HDRC_INDEX 0x0e /* 8 bit */ +#define MUSB_HDRC_TESTMODE 0x0f /* 8 bit */ + +/* Per-EP registers in indexed mode */ +#define MUSB_HDRC_EP_IDX 0x10 /* 8-bit */ + +/* EP FIFOs */ +#define MUSB_HDRC_FIFO 0x20 + +/* Additional Control Registers */ +#define MUSB_HDRC_DEVCTL 0x60 /* 8 bit */ + +/* These are indexed */ +#define MUSB_HDRC_TXFIFOSZ 0x62 /* 8 bit (see masks) */ +#define MUSB_HDRC_RXFIFOSZ 0x63 /* 8 bit (see masks) */ +#define MUSB_HDRC_TXFIFOADDR 0x64 /* 16 bit offset shifted right 3 */ +#define MUSB_HDRC_RXFIFOADDR 0x66 /* 16 bit offset shifted right 3 */ + +/* Some more registers */ +#define MUSB_HDRC_VCTRL 0x68 /* 8 bit */ +#define MUSB_HDRC_HWVERS 0x6c /* 8 bit */ + +/* Added in HDRC 1.9(?) & MHDRC 1.4 */ +/* ULPI pass-through */ +#define MUSB_HDRC_ULPI_VBUSCTL 0x70 +#define MUSB_HDRC_ULPI_REGDATA 0x74 +#define MUSB_HDRC_ULPI_REGADDR 0x75 +#define MUSB_HDRC_ULPI_REGCTL 0x76 + +/* Extended config & PHY control */ +#define MUSB_HDRC_ENDCOUNT 0x78 /* 8 bit */ +#define MUSB_HDRC_DMARAMCFG 0x79 /* 8 bit */ +#define MUSB_HDRC_PHYWAIT 0x7a /* 8 bit */ +#define MUSB_HDRC_PHYVPLEN 0x7b /* 8 bit */ +#define MUSB_HDRC_HS_EOF1 0x7c /* 8 bit, units of 546.1 us */ +#define MUSB_HDRC_FS_EOF1 0x7d /* 8 bit, units of 533.3 ns */ +#define MUSB_HDRC_LS_EOF1 0x7e /* 8 bit, units of 1.067 us */ + +/* Per-EP BUSCTL registers */ +#define MUSB_HDRC_BUSCTL 0x80 + +/* Per-EP registers in flat mode */ +#define MUSB_HDRC_EP 0x100 + +/* offsets to registers in flat model */ +#define MUSB_HDRC_TXMAXP 0x00 /* 16 bit apparently */ +#define MUSB_HDRC_TXCSR 0x02 /* 16 bit apparently */ +#define MUSB_HDRC_CSR0 MUSB_HDRC_TXCSR /* re-used for EP0 */ +#define MUSB_HDRC_RXMAXP 0x04 /* 16 bit apparently */ +#define MUSB_HDRC_RXCSR 0x06 /* 16 bit apparently */ +#define MUSB_HDRC_RXCOUNT 0x08 /* 16 bit apparently */ +#define MUSB_HDRC_COUNT0 MUSB_HDRC_RXCOUNT /* re-used for EP0 */ +#define MUSB_HDRC_TXTYPE 0x0a /* 8 bit apparently */ +#define MUSB_HDRC_TYPE0 MUSB_HDRC_TXTYPE /* re-used for EP0 */ +#define MUSB_HDRC_TXINTERVAL 0x0b /* 8 bit apparently */ +#define MUSB_HDRC_NAKLIMIT0 MUSB_HDRC_TXINTERVAL /* re-used for EP0 */ +#define MUSB_HDRC_RXTYPE 0x0c /* 8 bit apparently */ +#define MUSB_HDRC_RXINTERVAL 0x0d /* 8 bit apparently */ +#define MUSB_HDRC_FIFOSIZE 0x0f /* 8 bit apparently */ +#define MUSB_HDRC_CONFIGDATA MGC_O_HDRC_FIFOSIZE /* re-used for EP0 */ + +/* "Bus control" registers */ +#define MUSB_HDRC_TXFUNCADDR 0x00 +#define MUSB_HDRC_TXHUBADDR 0x02 +#define MUSB_HDRC_TXHUBPORT 0x03 + +#define MUSB_HDRC_RXFUNCADDR 0x04 +#define MUSB_HDRC_RXHUBADDR 0x06 +#define MUSB_HDRC_RXHUBPORT 0x07 + +/* + * MUSBHDRC Register bit masks + */ + +/* POWER */ +#define MGC_M_POWER_ISOUPDATE 0x80 +#define MGC_M_POWER_SOFTCONN 0x40 +#define MGC_M_POWER_HSENAB 0x20 +#define MGC_M_POWER_HSMODE 0x10 +#define MGC_M_POWER_RESET 0x08 +#define MGC_M_POWER_RESUME 0x04 +#define MGC_M_POWER_SUSPENDM 0x02 +#define MGC_M_POWER_ENSUSPEND 0x01 + +/* INTRUSB */ +#define MGC_M_INTR_SUSPEND 0x01 +#define MGC_M_INTR_RESUME 0x02 +#define MGC_M_INTR_RESET 0x04 +#define MGC_M_INTR_BABBLE 0x04 +#define MGC_M_INTR_SOF 0x08 +#define MGC_M_INTR_CONNECT 0x10 +#define MGC_M_INTR_DISCONNECT 0x20 +#define MGC_M_INTR_SESSREQ 0x40 +#define MGC_M_INTR_VBUSERROR 0x80 /* FOR SESSION END */ +#define MGC_M_INTR_EP0 0x01 /* FOR EP0 INTERRUPT */ + +/* DEVCTL */ +#define MGC_M_DEVCTL_BDEVICE 0x80 +#define MGC_M_DEVCTL_FSDEV 0x40 +#define MGC_M_DEVCTL_LSDEV 0x20 +#define MGC_M_DEVCTL_VBUS 0x18 +#define MGC_S_DEVCTL_VBUS 3 +#define MGC_M_DEVCTL_HM 0x04 +#define MGC_M_DEVCTL_HR 0x02 +#define MGC_M_DEVCTL_SESSION 0x01 + +/* TESTMODE */ +#define MGC_M_TEST_FORCE_HOST 0x80 +#define MGC_M_TEST_FIFO_ACCESS 0x40 +#define MGC_M_TEST_FORCE_FS 0x20 +#define MGC_M_TEST_FORCE_HS 0x10 +#define MGC_M_TEST_PACKET 0x08 +#define MGC_M_TEST_K 0x04 +#define MGC_M_TEST_J 0x02 +#define MGC_M_TEST_SE0_NAK 0x01 + +/* CSR0 */ +#define MGC_M_CSR0_FLUSHFIFO 0x0100 +#define MGC_M_CSR0_TXPKTRDY 0x0002 +#define MGC_M_CSR0_RXPKTRDY 0x0001 + +/* CSR0 in Peripheral mode */ +#define MGC_M_CSR0_P_SVDSETUPEND 0x0080 +#define MGC_M_CSR0_P_SVDRXPKTRDY 0x0040 +#define MGC_M_CSR0_P_SENDSTALL 0x0020 +#define MGC_M_CSR0_P_SETUPEND 0x0010 +#define MGC_M_CSR0_P_DATAEND 0x0008 +#define MGC_M_CSR0_P_SENTSTALL 0x0004 + +/* CSR0 in Host mode */ +#define MGC_M_CSR0_H_NO_PING 0x0800 +#define MGC_M_CSR0_H_WR_DATATOGGLE 0x0400 /* set to allow setting: */ +#define MGC_M_CSR0_H_DATATOGGLE 0x0200 /* data toggle control */ +#define MGC_M_CSR0_H_NAKTIMEOUT 0x0080 +#define MGC_M_CSR0_H_STATUSPKT 0x0040 +#define MGC_M_CSR0_H_REQPKT 0x0020 +#define MGC_M_CSR0_H_ERROR 0x0010 +#define MGC_M_CSR0_H_SETUPPKT 0x0008 +#define MGC_M_CSR0_H_RXSTALL 0x0004 + +/* CONFIGDATA */ +#define MGC_M_CONFIGDATA_MPRXE 0x80 /* auto bulk pkt combining */ +#define MGC_M_CONFIGDATA_MPTXE 0x40 /* auto bulk pkt splitting */ +#define MGC_M_CONFIGDATA_BIGENDIAN 0x20 +#define MGC_M_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */ +#define MGC_M_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */ +#define MGC_M_CONFIGDATA_DYNFIFO 0x04 /* dynamic FIFO sizing */ +#define MGC_M_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */ +#define MGC_M_CONFIGDATA_UTMIDW 0x01 /* Width, 0 => 8b, 1 => 16b */ + +/* TXCSR in Peripheral and Host mode */ +#define MGC_M_TXCSR_AUTOSET 0x8000 +#define MGC_M_TXCSR_ISO 0x4000 +#define MGC_M_TXCSR_MODE 0x2000 +#define MGC_M_TXCSR_DMAENAB 0x1000 +#define MGC_M_TXCSR_FRCDATATOG 0x0800 +#define MGC_M_TXCSR_DMAMODE 0x0400 +#define MGC_M_TXCSR_CLRDATATOG 0x0040 +#define MGC_M_TXCSR_FLUSHFIFO 0x0008 +#define MGC_M_TXCSR_FIFONOTEMPTY 0x0002 +#define MGC_M_TXCSR_TXPKTRDY 0x0001 + +/* TXCSR in Peripheral mode */ +#define MGC_M_TXCSR_P_INCOMPTX 0x0080 +#define MGC_M_TXCSR_P_SENTSTALL 0x0020 +#define MGC_M_TXCSR_P_SENDSTALL 0x0010 +#define MGC_M_TXCSR_P_UNDERRUN 0x0004 + +/* TXCSR in Host mode */ +#define MGC_M_TXCSR_H_WR_DATATOGGLE 0x0200 +#define MGC_M_TXCSR_H_DATATOGGLE 0x0100 +#define MGC_M_TXCSR_H_NAKTIMEOUT 0x0080 +#define MGC_M_TXCSR_H_RXSTALL 0x0020 +#define MGC_M_TXCSR_H_ERROR 0x0004 + +/* RXCSR in Peripheral and Host mode */ +#define MGC_M_RXCSR_AUTOCLEAR 0x8000 +#define MGC_M_RXCSR_DMAENAB 0x2000 +#define MGC_M_RXCSR_DISNYET 0x1000 +#define MGC_M_RXCSR_DMAMODE 0x0800 +#define MGC_M_RXCSR_INCOMPRX 0x0100 +#define MGC_M_RXCSR_CLRDATATOG 0x0080 +#define MGC_M_RXCSR_FLUSHFIFO 0x0010 +#define MGC_M_RXCSR_DATAERROR 0x0008 +#define MGC_M_RXCSR_FIFOFULL 0x0002 +#define MGC_M_RXCSR_RXPKTRDY 0x0001 + +/* RXCSR in Peripheral mode */ +#define MGC_M_RXCSR_P_ISO 0x4000 +#define MGC_M_RXCSR_P_SENTSTALL 0x0040 +#define MGC_M_RXCSR_P_SENDSTALL 0x0020 +#define MGC_M_RXCSR_P_OVERRUN 0x0004 + +/* RXCSR in Host mode */ +#define MGC_M_RXCSR_H_AUTOREQ 0x4000 +#define MGC_M_RXCSR_H_WR_DATATOGGLE 0x0400 +#define MGC_M_RXCSR_H_DATATOGGLE 0x0200 +#define MGC_M_RXCSR_H_RXSTALL 0x0040 +#define MGC_M_RXCSR_H_REQPKT 0x0020 +#define MGC_M_RXCSR_H_ERROR 0x0004 + +/* HUBADDR */ +#define MGC_M_HUBADDR_MULTI_TT 0x80 + +/* ULPI: Added in HDRC 1.9(?) & MHDRC 1.4 */ +#define MGC_M_ULPI_VBCTL_USEEXTVBUSIND 0x02 +#define MGC_M_ULPI_VBCTL_USEEXTVBUS 0x01 +#define MGC_M_ULPI_REGCTL_INT_ENABLE 0x08 +#define MGC_M_ULPI_REGCTL_READNOTWRITE 0x04 +#define MGC_M_ULPI_REGCTL_COMPLETE 0x02 +#define MGC_M_ULPI_REGCTL_REG 0x01 + +/* #define MUSB_DEBUG */ + +#ifdef MUSB_DEBUG +#define TRACE(fmt, ...) fprintf(stderr, "%s@%d: " fmt "\n", __func__, \ + __LINE__, ##__VA_ARGS__) +#else +#define TRACE(...) +#endif + + +static void musb_attach(USBPort *port); +static void musb_detach(USBPort *port); +static void musb_child_detach(USBPort *port, USBDevice *child); +static void musb_schedule_cb(USBPort *port, USBPacket *p); +static void musb_async_cancel_device(MUSBState *s, USBDevice *dev); + +static USBPortOps musb_port_ops = { + .attach = musb_attach, + .detach = musb_detach, + .child_detach = musb_child_detach, + .complete = musb_schedule_cb, +}; + +static USBBusOps musb_bus_ops = { +}; + +typedef struct MUSBPacket MUSBPacket; +typedef struct MUSBEndPoint MUSBEndPoint; + +struct MUSBPacket { + USBPacket p; + MUSBEndPoint *ep; + int dir; +}; + +struct MUSBEndPoint { + uint16_t faddr[2]; + uint8_t haddr[2]; + uint8_t hport[2]; + uint16_t csr[2]; + uint16_t maxp[2]; + uint16_t rxcount; + uint8_t type[2]; + uint8_t interval[2]; + uint8_t config; + uint8_t fifosize; + int timeout[2]; /* Always in microframes */ + + uint8_t *buf[2]; + int fifolen[2]; + int fifostart[2]; + int fifoaddr[2]; + MUSBPacket packey[2]; + int status[2]; + int ext_size[2]; + + /* For callbacks' use */ + int epnum; + int interrupt[2]; + MUSBState *musb; + USBCallback *delayed_cb[2]; + QEMUTimer *intv_timer[2]; +}; + +struct MUSBState { + qemu_irq irqs[musb_irq_max]; + USBBus bus; + USBPort port; + + int idx; + uint8_t devctl; + uint8_t power; + uint8_t faddr; + + uint8_t intr; + uint8_t mask; + uint16_t tx_intr; + uint16_t tx_mask; + uint16_t rx_intr; + uint16_t rx_mask; + + int setup_len; + int session; + + uint8_t buf[0x8000]; + + /* Duplicating the world since 2008!... probably we should have 32 + * logical, single endpoints instead. */ + MUSBEndPoint ep[16]; +}; + +void musb_reset(MUSBState *s) +{ + int i; + + s->faddr = 0x00; + s->devctl = 0; + s->power = MGC_M_POWER_HSENAB; + s->tx_intr = 0x0000; + s->rx_intr = 0x0000; + s->tx_mask = 0xffff; + s->rx_mask = 0xffff; + s->intr = 0x00; + s->mask = 0x06; + s->idx = 0; + + s->setup_len = 0; + s->session = 0; + memset(s->buf, 0, sizeof(s->buf)); + + /* TODO: _DW */ + s->ep[0].config = MGC_M_CONFIGDATA_SOFTCONE | MGC_M_CONFIGDATA_DYNFIFO; + for (i = 0; i < 16; i ++) { + s->ep[i].fifosize = 64; + s->ep[i].maxp[0] = 0x40; + s->ep[i].maxp[1] = 0x40; + s->ep[i].musb = s; + s->ep[i].epnum = i; + usb_packet_init(&s->ep[i].packey[0].p); + usb_packet_init(&s->ep[i].packey[1].p); + } +} + +struct MUSBState *musb_init(DeviceState *parent_device, int gpio_base) +{ + MUSBState *s = g_malloc0(sizeof(*s)); + int i; + + for (i = 0; i < musb_irq_max; i++) { + s->irqs[i] = qdev_get_gpio_in(parent_device, gpio_base + i); + } + + musb_reset(s); + + usb_bus_new(&s->bus, sizeof(s->bus), &musb_bus_ops, parent_device); + usb_register_port(&s->bus, &s->port, s, 0, &musb_port_ops, + USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); + + return s; +} + +static void musb_vbus_set(MUSBState *s, int level) +{ + if (level) + s->devctl |= 3 << MGC_S_DEVCTL_VBUS; + else + s->devctl &= ~MGC_M_DEVCTL_VBUS; + + qemu_set_irq(s->irqs[musb_set_vbus], level); +} + +static void musb_intr_set(MUSBState *s, int line, int level) +{ + if (!level) { + s->intr &= ~(1 << line); + qemu_irq_lower(s->irqs[line]); + } else if (s->mask & (1 << line)) { + s->intr |= 1 << line; + qemu_irq_raise(s->irqs[line]); + } +} + +static void musb_tx_intr_set(MUSBState *s, int line, int level) +{ + if (!level) { + s->tx_intr &= ~(1 << line); + if (!s->tx_intr) + qemu_irq_lower(s->irqs[musb_irq_tx]); + } else if (s->tx_mask & (1 << line)) { + s->tx_intr |= 1 << line; + qemu_irq_raise(s->irqs[musb_irq_tx]); + } +} + +static void musb_rx_intr_set(MUSBState *s, int line, int level) +{ + if (line) { + if (!level) { + s->rx_intr &= ~(1 << line); + if (!s->rx_intr) + qemu_irq_lower(s->irqs[musb_irq_rx]); + } else if (s->rx_mask & (1 << line)) { + s->rx_intr |= 1 << line; + qemu_irq_raise(s->irqs[musb_irq_rx]); + } + } else + musb_tx_intr_set(s, line, level); +} + +uint32_t musb_core_intr_get(MUSBState *s) +{ + return (s->rx_intr << 15) | s->tx_intr; +} + +void musb_core_intr_clear(MUSBState *s, uint32_t mask) +{ + if (s->rx_intr) { + s->rx_intr &= mask >> 15; + if (!s->rx_intr) + qemu_irq_lower(s->irqs[musb_irq_rx]); + } + + if (s->tx_intr) { + s->tx_intr &= mask & 0xffff; + if (!s->tx_intr) + qemu_irq_lower(s->irqs[musb_irq_tx]); + } +} + +void musb_set_size(MUSBState *s, int epnum, int size, int is_tx) +{ + s->ep[epnum].ext_size[!is_tx] = size; + s->ep[epnum].fifostart[0] = 0; + s->ep[epnum].fifostart[1] = 0; + s->ep[epnum].fifolen[0] = 0; + s->ep[epnum].fifolen[1] = 0; +} + +static void musb_session_update(MUSBState *s, int prev_dev, int prev_sess) +{ + int detect_prev = prev_dev && prev_sess; + int detect = !!s->port.dev && s->session; + + if (detect && !detect_prev) { + /* Let's skip the ID pin sense and VBUS sense formalities and + * and signal a successful SRP directly. This should work at least + * for the Linux driver stack. */ + musb_intr_set(s, musb_irq_connect, 1); + + if (s->port.dev->speed == USB_SPEED_LOW) { + s->devctl &= ~MGC_M_DEVCTL_FSDEV; + s->devctl |= MGC_M_DEVCTL_LSDEV; + } else { + s->devctl |= MGC_M_DEVCTL_FSDEV; + s->devctl &= ~MGC_M_DEVCTL_LSDEV; + } + + /* A-mode? */ + s->devctl &= ~MGC_M_DEVCTL_BDEVICE; + + /* Host-mode bit? */ + s->devctl |= MGC_M_DEVCTL_HM; +#if 1 + musb_vbus_set(s, 1); +#endif + } else if (!detect && detect_prev) { +#if 1 + musb_vbus_set(s, 0); +#endif + } +} + +/* Attach or detach a device on our only port. */ +static void musb_attach(USBPort *port) +{ + MUSBState *s = (MUSBState *) port->opaque; + + musb_intr_set(s, musb_irq_vbus_request, 1); + musb_session_update(s, 0, s->session); +} + +static void musb_detach(USBPort *port) +{ + MUSBState *s = (MUSBState *) port->opaque; + + musb_async_cancel_device(s, port->dev); + + musb_intr_set(s, musb_irq_disconnect, 1); + musb_session_update(s, 1, s->session); +} + +static void musb_child_detach(USBPort *port, USBDevice *child) +{ + MUSBState *s = (MUSBState *) port->opaque; + + musb_async_cancel_device(s, child); +} + +static void musb_cb_tick0(void *opaque) +{ + MUSBEndPoint *ep = (MUSBEndPoint *) opaque; + + ep->delayed_cb[0](&ep->packey[0].p, opaque); +} + +static void musb_cb_tick1(void *opaque) +{ + MUSBEndPoint *ep = (MUSBEndPoint *) opaque; + + ep->delayed_cb[1](&ep->packey[1].p, opaque); +} + +#define musb_cb_tick (dir ? musb_cb_tick1 : musb_cb_tick0) + +static void musb_schedule_cb(USBPort *port, USBPacket *packey) +{ + MUSBPacket *p = container_of(packey, MUSBPacket, p); + MUSBEndPoint *ep = p->ep; + int dir = p->dir; + int timeout = 0; + + if (ep->status[dir] == USB_RET_NAK) + timeout = ep->timeout[dir]; + else if (ep->interrupt[dir]) + timeout = 8; + else { + musb_cb_tick(ep); + return; + } + + if (!ep->intv_timer[dir]) + ep->intv_timer[dir] = timer_new_ns(QEMU_CLOCK_VIRTUAL, musb_cb_tick, ep); + + timer_mod(ep->intv_timer[dir], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + muldiv64(timeout, NANOSECONDS_PER_SECOND, 8000)); +} + +static int musb_timeout(int ttype, int speed, int val) +{ +#if 1 + return val << 3; +#endif + + switch (ttype) { + case USB_ENDPOINT_XFER_CONTROL: + if (val < 2) + return 0; + else if (speed == USB_SPEED_HIGH) + return 1 << (val - 1); + else + return 8 << (val - 1); + + case USB_ENDPOINT_XFER_INT: + if (speed == USB_SPEED_HIGH) + if (val < 2) + return 0; + else + return 1 << (val - 1); + else + return val << 3; + + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_ISOC: + if (val < 2) + return 0; + else if (speed == USB_SPEED_HIGH) + return 1 << (val - 1); + else + return 8 << (val - 1); + /* TODO: what with low-speed Bulk and Isochronous? */ + } + + hw_error("bad interval\n"); +} + +static void musb_packet(MUSBState *s, MUSBEndPoint *ep, + int epnum, int pid, int len, USBCallback cb, int dir) +{ + USBDevice *dev; + USBEndpoint *uep; + int idx = epnum && dir; + int id; + int ttype; + + /* ep->type[0,1] contains: + * in bits 7:6 the speed (0 - invalid, 1 - high, 2 - full, 3 - slow) + * in bits 5:4 the transfer type (BULK / INT) + * in bits 3:0 the EP num + */ + ttype = epnum ? (ep->type[idx] >> 4) & 3 : 0; + + ep->timeout[dir] = musb_timeout(ttype, + ep->type[idx] >> 6, ep->interval[idx]); + ep->interrupt[dir] = ttype == USB_ENDPOINT_XFER_INT; + ep->delayed_cb[dir] = cb; + + /* A wild guess on the FADDR semantics... */ + dev = usb_find_device(&s->port, ep->faddr[idx]); + if (dev == NULL) { + return; + } + uep = usb_ep_get(dev, pid, ep->type[idx] & 0xf); + id = pid | (dev->addr << 16) | (uep->nr << 8); + usb_packet_setup(&ep->packey[dir].p, pid, uep, 0, id, false, true); + usb_packet_addbuf(&ep->packey[dir].p, ep->buf[idx], len); + ep->packey[dir].ep = ep; + ep->packey[dir].dir = dir; + + usb_handle_packet(dev, &ep->packey[dir].p); + + if (ep->packey[dir].p.status == USB_RET_ASYNC) { + usb_device_flush_ep_queue(dev, uep); + ep->status[dir] = len; + return; + } + + if (ep->packey[dir].p.status == USB_RET_SUCCESS) { + ep->status[dir] = ep->packey[dir].p.actual_length; + } else { + ep->status[dir] = ep->packey[dir].p.status; + } + musb_schedule_cb(&s->port, &ep->packey[dir].p); +} + +static void musb_tx_packet_complete(USBPacket *packey, void *opaque) +{ + /* Unfortunately we can't use packey->devep because that's the remote + * endpoint number and may be different than our local. */ + MUSBEndPoint *ep = (MUSBEndPoint *) opaque; + int epnum = ep->epnum; + MUSBState *s = ep->musb; + + ep->fifostart[0] = 0; + ep->fifolen[0] = 0; +#ifdef CLEAR_NAK + if (ep->status[0] != USB_RET_NAK) { +#endif + if (epnum) + ep->csr[0] &= ~(MGC_M_TXCSR_FIFONOTEMPTY | MGC_M_TXCSR_TXPKTRDY); + else + ep->csr[0] &= ~MGC_M_CSR0_TXPKTRDY; +#ifdef CLEAR_NAK + } +#endif + + /* Clear all of the error bits first */ + if (epnum) + ep->csr[0] &= ~(MGC_M_TXCSR_H_ERROR | MGC_M_TXCSR_H_RXSTALL | + MGC_M_TXCSR_H_NAKTIMEOUT); + else + ep->csr[0] &= ~(MGC_M_CSR0_H_ERROR | MGC_M_CSR0_H_RXSTALL | + MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_NO_PING); + + if (ep->status[0] == USB_RET_STALL) { + /* Command not supported by target! */ + ep->status[0] = 0; + + if (epnum) + ep->csr[0] |= MGC_M_TXCSR_H_RXSTALL; + else + ep->csr[0] |= MGC_M_CSR0_H_RXSTALL; + } + + if (ep->status[0] == USB_RET_NAK) { + ep->status[0] = 0; + + /* NAK timeouts are only generated in Bulk transfers and + * Data-errors in Isochronous. */ + if (ep->interrupt[0]) { + return; + } + + if (epnum) + ep->csr[0] |= MGC_M_TXCSR_H_NAKTIMEOUT; + else + ep->csr[0] |= MGC_M_CSR0_H_NAKTIMEOUT; + } + + if (ep->status[0] < 0) { + if (ep->status[0] == USB_RET_BABBLE) + musb_intr_set(s, musb_irq_rst_babble, 1); + + /* Pretend we've tried three times already and failed (in + * case of USB_TOKEN_SETUP). */ + if (epnum) + ep->csr[0] |= MGC_M_TXCSR_H_ERROR; + else + ep->csr[0] |= MGC_M_CSR0_H_ERROR; + + musb_tx_intr_set(s, epnum, 1); + return; + } + /* TODO: check len for over/underruns of an OUT packet? */ + +#ifdef SETUPLEN_HACK + if (!epnum && ep->packey[0].pid == USB_TOKEN_SETUP) + s->setup_len = ep->packey[0].data[6]; +#endif + + /* In DMA mode: if no error, assert DMA request for this EP, + * and skip the interrupt. */ + musb_tx_intr_set(s, epnum, 1); +} + +static void musb_rx_packet_complete(USBPacket *packey, void *opaque) +{ + /* Unfortunately we can't use packey->devep because that's the remote + * endpoint number and may be different than our local. */ + MUSBEndPoint *ep = (MUSBEndPoint *) opaque; + int epnum = ep->epnum; + MUSBState *s = ep->musb; + + ep->fifostart[1] = 0; + ep->fifolen[1] = 0; + +#ifdef CLEAR_NAK + if (ep->status[1] != USB_RET_NAK) { +#endif + ep->csr[1] &= ~MGC_M_RXCSR_H_REQPKT; + if (!epnum) + ep->csr[0] &= ~MGC_M_CSR0_H_REQPKT; +#ifdef CLEAR_NAK + } +#endif + + /* Clear all of the imaginable error bits first */ + ep->csr[1] &= ~(MGC_M_RXCSR_H_ERROR | MGC_M_RXCSR_H_RXSTALL | + MGC_M_RXCSR_DATAERROR); + if (!epnum) + ep->csr[0] &= ~(MGC_M_CSR0_H_ERROR | MGC_M_CSR0_H_RXSTALL | + MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_NO_PING); + + if (ep->status[1] == USB_RET_STALL) { + ep->status[1] = 0; + + ep->csr[1] |= MGC_M_RXCSR_H_RXSTALL; + if (!epnum) + ep->csr[0] |= MGC_M_CSR0_H_RXSTALL; + } + + if (ep->status[1] == USB_RET_NAK) { + ep->status[1] = 0; + + /* NAK timeouts are only generated in Bulk transfers and + * Data-errors in Isochronous. */ + if (ep->interrupt[1]) { + musb_packet(s, ep, epnum, USB_TOKEN_IN, + packey->iov.size, musb_rx_packet_complete, 1); + return; + } + + ep->csr[1] |= MGC_M_RXCSR_DATAERROR; + if (!epnum) + ep->csr[0] |= MGC_M_CSR0_H_NAKTIMEOUT; + } + + if (ep->status[1] < 0) { + if (ep->status[1] == USB_RET_BABBLE) { + musb_intr_set(s, musb_irq_rst_babble, 1); + return; + } + + /* Pretend we've tried three times already and failed (in + * case of a control transfer). */ + ep->csr[1] |= MGC_M_RXCSR_H_ERROR; + if (!epnum) + ep->csr[0] |= MGC_M_CSR0_H_ERROR; + + musb_rx_intr_set(s, epnum, 1); + return; + } + /* TODO: check len for over/underruns of an OUT packet? */ + /* TODO: perhaps make use of e->ext_size[1] here. */ + + if (!(ep->csr[1] & (MGC_M_RXCSR_H_RXSTALL | MGC_M_RXCSR_DATAERROR))) { + ep->csr[1] |= MGC_M_RXCSR_FIFOFULL | MGC_M_RXCSR_RXPKTRDY; + if (!epnum) + ep->csr[0] |= MGC_M_CSR0_RXPKTRDY; + + ep->rxcount = ep->status[1]; /* XXX: MIN(packey->len, ep->maxp[1]); */ + /* In DMA mode: assert DMA request for this EP */ + } + + /* Only if DMA has not been asserted */ + musb_rx_intr_set(s, epnum, 1); +} + +static void musb_async_cancel_device(MUSBState *s, USBDevice *dev) +{ + int ep, dir; + + for (ep = 0; ep < 16; ep++) { + for (dir = 0; dir < 2; dir++) { + if (!usb_packet_is_inflight(&s->ep[ep].packey[dir].p) || + s->ep[ep].packey[dir].p.ep->dev != dev) { + continue; + } + usb_cancel_packet(&s->ep[ep].packey[dir].p); + /* status updates needed here? */ + } + } +} + +static void musb_tx_rdy(MUSBState *s, int epnum) +{ + MUSBEndPoint *ep = s->ep + epnum; + int pid; + int total, valid = 0; + TRACE("start %d, len %d", ep->fifostart[0], ep->fifolen[0] ); + ep->fifostart[0] += ep->fifolen[0]; + ep->fifolen[0] = 0; + + /* XXX: how's the total size of the packet retrieved exactly in + * the generic case? */ + total = ep->maxp[0] & 0x3ff; + + if (ep->ext_size[0]) { + total = ep->ext_size[0]; + ep->ext_size[0] = 0; + valid = 1; + } + + /* If the packet is not fully ready yet, wait for a next segment. */ + if (epnum && (ep->fifostart[0]) < total) + return; + + if (!valid) + total = ep->fifostart[0]; + + pid = USB_TOKEN_OUT; + if (!epnum && (ep->csr[0] & MGC_M_CSR0_H_SETUPPKT)) { + pid = USB_TOKEN_SETUP; + if (total != 8) { + TRACE("illegal SETUPPKT length of %i bytes", total); + } + /* Controller should retry SETUP packets three times on errors + * but it doesn't make sense for us to do that. */ + } + + musb_packet(s, ep, epnum, pid, total, musb_tx_packet_complete, 0); +} + +static void musb_rx_req(MUSBState *s, int epnum) +{ + MUSBEndPoint *ep = s->ep + epnum; + int total; + + /* If we already have a packet, which didn't fit into the + * 64 bytes of the FIFO, only move the FIFO start and return. (Obsolete) */ + if (ep->packey[1].p.pid == USB_TOKEN_IN && ep->status[1] >= 0 && + (ep->fifostart[1]) + ep->rxcount < + ep->packey[1].p.iov.size) { + TRACE("0x%08x, %d", ep->fifostart[1], ep->rxcount ); + ep->fifostart[1] += ep->rxcount; + ep->fifolen[1] = 0; + + ep->rxcount = MIN(ep->packey[0].p.iov.size - (ep->fifostart[1]), + ep->maxp[1]); + + ep->csr[1] &= ~MGC_M_RXCSR_H_REQPKT; + if (!epnum) + ep->csr[0] &= ~MGC_M_CSR0_H_REQPKT; + + /* Clear all of the error bits first */ + ep->csr[1] &= ~(MGC_M_RXCSR_H_ERROR | MGC_M_RXCSR_H_RXSTALL | + MGC_M_RXCSR_DATAERROR); + if (!epnum) + ep->csr[0] &= ~(MGC_M_CSR0_H_ERROR | MGC_M_CSR0_H_RXSTALL | + MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_NO_PING); + + ep->csr[1] |= MGC_M_RXCSR_FIFOFULL | MGC_M_RXCSR_RXPKTRDY; + if (!epnum) + ep->csr[0] |= MGC_M_CSR0_RXPKTRDY; + musb_rx_intr_set(s, epnum, 1); + return; + } + + /* The driver sets maxp[1] to 64 or less because it knows the hardware + * FIFO is this deep. Bigger packets get split in + * usb_generic_handle_packet but we can also do the splitting locally + * for performance. It turns out we can also have a bigger FIFO and + * ignore the limit set in ep->maxp[1]. The Linux MUSB driver deals + * OK with single packets of even 32KB and we avoid splitting, however + * usb_msd.c sometimes sends a packet bigger than what Linux expects + * (e.g. 8192 bytes instead of 4096) and we get an OVERRUN. Splitting + * hides this overrun from Linux. Up to 4096 everything is fine + * though. Currently this is disabled. + * + * XXX: mind ep->fifosize. */ + total = MIN(ep->maxp[1] & 0x3ff, sizeof(s->buf)); + +#ifdef SETUPLEN_HACK + /* Why should *we* do that instead of Linux? */ + if (!epnum) { + if (ep->packey[0].p.devaddr == 2) { + total = MIN(s->setup_len, 8); + } else { + total = MIN(s->setup_len, 64); + } + s->setup_len -= total; + } +#endif + + musb_packet(s, ep, epnum, USB_TOKEN_IN, total, musb_rx_packet_complete, 1); +} + +static uint8_t musb_read_fifo(MUSBEndPoint *ep) +{ + uint8_t value; + if (ep->fifolen[1] >= 64) { + /* We have a FIFO underrun */ + TRACE("EP%d FIFO is now empty, stop reading", ep->epnum); + return 0x00000000; + } + /* In DMA mode clear RXPKTRDY and set REQPKT automatically + * (if AUTOREQ is set) */ + + ep->csr[1] &= ~MGC_M_RXCSR_FIFOFULL; + value=ep->buf[1][ep->fifostart[1] + ep->fifolen[1] ++]; + TRACE("EP%d 0x%02x, %d", ep->epnum, value, ep->fifolen[1] ); + return value; +} + +static void musb_write_fifo(MUSBEndPoint *ep, uint8_t value) +{ + TRACE("EP%d = %02x", ep->epnum, value); + if (ep->fifolen[0] >= 64) { + /* We have a FIFO overrun */ + TRACE("EP%d FIFO exceeded 64 bytes, stop feeding data", ep->epnum); + return; + } + + ep->buf[0][ep->fifostart[0] + ep->fifolen[0] ++] = value; + ep->csr[0] |= MGC_M_TXCSR_FIFONOTEMPTY; +} + +static void musb_ep_frame_cancel(MUSBEndPoint *ep, int dir) +{ + if (ep->intv_timer[dir]) + timer_del(ep->intv_timer[dir]); +} + +/* Bus control */ +static uint8_t musb_busctl_readb(void *opaque, int ep, int addr) +{ + MUSBState *s = (MUSBState *) opaque; + + switch (addr) { + /* For USB2.0 HS hubs only */ + case MUSB_HDRC_TXHUBADDR: + return s->ep[ep].haddr[0]; + case MUSB_HDRC_TXHUBPORT: + return s->ep[ep].hport[0]; + case MUSB_HDRC_RXHUBADDR: + return s->ep[ep].haddr[1]; + case MUSB_HDRC_RXHUBPORT: + return s->ep[ep].hport[1]; + + default: + TRACE("unknown register 0x%02x", addr); + return 0x00; + }; +} + +static void musb_busctl_writeb(void *opaque, int ep, int addr, uint8_t value) +{ + MUSBState *s = (MUSBState *) opaque; + + switch (addr) { + case MUSB_HDRC_TXFUNCADDR: + s->ep[ep].faddr[0] = value; + break; + case MUSB_HDRC_RXFUNCADDR: + s->ep[ep].faddr[1] = value; + break; + case MUSB_HDRC_TXHUBADDR: + s->ep[ep].haddr[0] = value; + break; + case MUSB_HDRC_TXHUBPORT: + s->ep[ep].hport[0] = value; + break; + case MUSB_HDRC_RXHUBADDR: + s->ep[ep].haddr[1] = value; + break; + case MUSB_HDRC_RXHUBPORT: + s->ep[ep].hport[1] = value; + break; + + default: + TRACE("unknown register 0x%02x", addr); + break; + }; +} + +static uint16_t musb_busctl_readh(void *opaque, int ep, int addr) +{ + MUSBState *s = (MUSBState *) opaque; + + switch (addr) { + case MUSB_HDRC_TXFUNCADDR: + return s->ep[ep].faddr[0]; + case MUSB_HDRC_RXFUNCADDR: + return s->ep[ep].faddr[1]; + + default: + return musb_busctl_readb(s, ep, addr) | + (musb_busctl_readb(s, ep, addr | 1) << 8); + }; +} + +static void musb_busctl_writeh(void *opaque, int ep, int addr, uint16_t value) +{ + MUSBState *s = (MUSBState *) opaque; + + switch (addr) { + case MUSB_HDRC_TXFUNCADDR: + s->ep[ep].faddr[0] = value; + break; + case MUSB_HDRC_RXFUNCADDR: + s->ep[ep].faddr[1] = value; + break; + + default: + musb_busctl_writeb(s, ep, addr, value & 0xff); + musb_busctl_writeb(s, ep, addr | 1, value >> 8); + }; +} + +/* Endpoint control */ +static uint8_t musb_ep_readb(void *opaque, int ep, int addr) +{ + MUSBState *s = (MUSBState *) opaque; + + switch (addr) { + case MUSB_HDRC_TXTYPE: + return s->ep[ep].type[0]; + case MUSB_HDRC_TXINTERVAL: + return s->ep[ep].interval[0]; + case MUSB_HDRC_RXTYPE: + return s->ep[ep].type[1]; + case MUSB_HDRC_RXINTERVAL: + return s->ep[ep].interval[1]; + case (MUSB_HDRC_FIFOSIZE & ~1): + return 0x00; + case MUSB_HDRC_FIFOSIZE: + return ep ? s->ep[ep].fifosize : s->ep[ep].config; + case MUSB_HDRC_RXCOUNT: + return s->ep[ep].rxcount; + + default: + TRACE("unknown register 0x%02x", addr); + return 0x00; + }; +} + +static void musb_ep_writeb(void *opaque, int ep, int addr, uint8_t value) +{ + MUSBState *s = (MUSBState *) opaque; + + switch (addr) { + case MUSB_HDRC_TXTYPE: + s->ep[ep].type[0] = value; + break; + case MUSB_HDRC_TXINTERVAL: + s->ep[ep].interval[0] = value; + musb_ep_frame_cancel(&s->ep[ep], 0); + break; + case MUSB_HDRC_RXTYPE: + s->ep[ep].type[1] = value; + break; + case MUSB_HDRC_RXINTERVAL: + s->ep[ep].interval[1] = value; + musb_ep_frame_cancel(&s->ep[ep], 1); + break; + case (MUSB_HDRC_FIFOSIZE & ~1): + break; + case MUSB_HDRC_FIFOSIZE: + TRACE("somebody messes with fifosize (now %i bytes)", value); + s->ep[ep].fifosize = value; + break; + default: + TRACE("unknown register 0x%02x", addr); + break; + }; +} + +static uint16_t musb_ep_readh(void *opaque, int ep, int addr) +{ + MUSBState *s = (MUSBState *) opaque; + uint16_t ret; + + switch (addr) { + case MUSB_HDRC_TXMAXP: + return s->ep[ep].maxp[0]; + case MUSB_HDRC_TXCSR: + return s->ep[ep].csr[0]; + case MUSB_HDRC_RXMAXP: + return s->ep[ep].maxp[1]; + case MUSB_HDRC_RXCSR: + ret = s->ep[ep].csr[1]; + + /* TODO: This and other bits probably depend on + * ep->csr[1] & MGC_M_RXCSR_AUTOCLEAR. */ + if (s->ep[ep].csr[1] & MGC_M_RXCSR_AUTOCLEAR) + s->ep[ep].csr[1] &= ~MGC_M_RXCSR_RXPKTRDY; + + return ret; + case MUSB_HDRC_RXCOUNT: + return s->ep[ep].rxcount; + + default: + return musb_ep_readb(s, ep, addr) | + (musb_ep_readb(s, ep, addr | 1) << 8); + }; +} + +static void musb_ep_writeh(void *opaque, int ep, int addr, uint16_t value) +{ + MUSBState *s = (MUSBState *) opaque; + + switch (addr) { + case MUSB_HDRC_TXMAXP: + s->ep[ep].maxp[0] = value; + break; + case MUSB_HDRC_TXCSR: + if (ep) { + s->ep[ep].csr[0] &= value & 0xa6; + s->ep[ep].csr[0] |= value & 0xff59; + } else { + s->ep[ep].csr[0] &= value & 0x85; + s->ep[ep].csr[0] |= value & 0xf7a; + } + + musb_ep_frame_cancel(&s->ep[ep], 0); + + if ((ep && (value & MGC_M_TXCSR_FLUSHFIFO)) || + (!ep && (value & MGC_M_CSR0_FLUSHFIFO))) { + s->ep[ep].fifolen[0] = 0; + s->ep[ep].fifostart[0] = 0; + if (ep) + s->ep[ep].csr[0] &= + ~(MGC_M_TXCSR_FIFONOTEMPTY | MGC_M_TXCSR_TXPKTRDY); + else + s->ep[ep].csr[0] &= + ~(MGC_M_CSR0_TXPKTRDY | MGC_M_CSR0_RXPKTRDY); + } + if ( + (ep && +#ifdef CLEAR_NAK + (value & MGC_M_TXCSR_TXPKTRDY) && + !(value & MGC_M_TXCSR_H_NAKTIMEOUT)) || +#else + (value & MGC_M_TXCSR_TXPKTRDY)) || +#endif + (!ep && +#ifdef CLEAR_NAK + (value & MGC_M_CSR0_TXPKTRDY) && + !(value & MGC_M_CSR0_H_NAKTIMEOUT))) +#else + (value & MGC_M_CSR0_TXPKTRDY))) +#endif + musb_tx_rdy(s, ep); + if (!ep && + (value & MGC_M_CSR0_H_REQPKT) && +#ifdef CLEAR_NAK + !(value & (MGC_M_CSR0_H_NAKTIMEOUT | + MGC_M_CSR0_RXPKTRDY))) +#else + !(value & MGC_M_CSR0_RXPKTRDY)) +#endif + musb_rx_req(s, ep); + break; + + case MUSB_HDRC_RXMAXP: + s->ep[ep].maxp[1] = value; + break; + case MUSB_HDRC_RXCSR: + /* (DMA mode only) */ + if ( + (value & MGC_M_RXCSR_H_AUTOREQ) && + !(value & MGC_M_RXCSR_RXPKTRDY) && + (s->ep[ep].csr[1] & MGC_M_RXCSR_RXPKTRDY)) + value |= MGC_M_RXCSR_H_REQPKT; + + s->ep[ep].csr[1] &= 0x102 | (value & 0x4d); + s->ep[ep].csr[1] |= value & 0xfeb0; + + musb_ep_frame_cancel(&s->ep[ep], 1); + + if (value & MGC_M_RXCSR_FLUSHFIFO) { + s->ep[ep].fifolen[1] = 0; + s->ep[ep].fifostart[1] = 0; + s->ep[ep].csr[1] &= ~(MGC_M_RXCSR_FIFOFULL | MGC_M_RXCSR_RXPKTRDY); + /* If double buffering and we have two packets ready, flush + * only the first one and set up the fifo at the second packet. */ + } +#ifdef CLEAR_NAK + if ((value & MGC_M_RXCSR_H_REQPKT) && !(value & MGC_M_RXCSR_DATAERROR)) +#else + if (value & MGC_M_RXCSR_H_REQPKT) +#endif + musb_rx_req(s, ep); + break; + case MUSB_HDRC_RXCOUNT: + s->ep[ep].rxcount = value; + break; + + default: + musb_ep_writeb(s, ep, addr, value & 0xff); + musb_ep_writeb(s, ep, addr | 1, value >> 8); + }; +} + +/* Generic control */ +static uint32_t musb_readb(void *opaque, hwaddr addr) +{ + MUSBState *s = (MUSBState *) opaque; + int ep, i; + uint8_t ret; + + switch (addr) { + case MUSB_HDRC_FADDR: + return s->faddr; + case MUSB_HDRC_POWER: + return s->power; + case MUSB_HDRC_INTRUSB: + ret = s->intr; + for (i = 0; i < sizeof(ret) * 8; i ++) + if (ret & (1 << i)) + musb_intr_set(s, i, 0); + return ret; + case MUSB_HDRC_INTRUSBE: + return s->mask; + case MUSB_HDRC_INDEX: + return s->idx; + case MUSB_HDRC_TESTMODE: + return 0x00; + + case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf): + return musb_ep_readb(s, s->idx, addr & 0xf); + + case MUSB_HDRC_DEVCTL: + return s->devctl; + + case MUSB_HDRC_TXFIFOSZ: + case MUSB_HDRC_RXFIFOSZ: + case MUSB_HDRC_VCTRL: + /* TODO */ + return 0x00; + + case MUSB_HDRC_HWVERS: + return (1 << 10) | 400; + + case (MUSB_HDRC_VCTRL | 1): + case (MUSB_HDRC_HWVERS | 1): + case (MUSB_HDRC_DEVCTL | 1): + return 0x00; + + case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f): + ep = (addr >> 3) & 0xf; + return musb_busctl_readb(s, ep, addr & 0x7); + + case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff): + ep = (addr >> 4) & 0xf; + return musb_ep_readb(s, ep, addr & 0xf); + + case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): + ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; + return musb_read_fifo(s->ep + ep); + + default: + TRACE("unknown register 0x%02x", (int) addr); + return 0x00; + }; +} + +static void musb_writeb(void *opaque, hwaddr addr, uint32_t value) +{ + MUSBState *s = (MUSBState *) opaque; + int ep; + + switch (addr) { + case MUSB_HDRC_FADDR: + s->faddr = value & 0x7f; + break; + case MUSB_HDRC_POWER: + s->power = (value & 0xef) | (s->power & 0x10); + /* MGC_M_POWER_RESET is also read-only in Peripheral Mode */ + if ((value & MGC_M_POWER_RESET) && s->port.dev) { + usb_device_reset(s->port.dev); + /* Negotiate high-speed operation if MGC_M_POWER_HSENAB is set. */ + if ((value & MGC_M_POWER_HSENAB) && + s->port.dev->speed == USB_SPEED_HIGH) + s->power |= MGC_M_POWER_HSMODE; /* Success */ + /* Restart frame counting. */ + } + if (value & MGC_M_POWER_SUSPENDM) { + /* When all transfers finish, suspend and if MGC_M_POWER_ENSUSPEND + * is set, also go into low power mode. Frame counting stops. */ + /* XXX: Cleared when the interrupt register is read */ + } + if (value & MGC_M_POWER_RESUME) { + /* Wait 20ms and signal resuming on the bus. Frame counting + * restarts. */ + } + break; + case MUSB_HDRC_INTRUSB: + break; + case MUSB_HDRC_INTRUSBE: + s->mask = value & 0xff; + break; + case MUSB_HDRC_INDEX: + s->idx = value & 0xf; + break; + case MUSB_HDRC_TESTMODE: + break; + + case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf): + musb_ep_writeb(s, s->idx, addr & 0xf, value); + break; + + case MUSB_HDRC_DEVCTL: + s->session = !!(value & MGC_M_DEVCTL_SESSION); + musb_session_update(s, + !!s->port.dev, + !!(s->devctl & MGC_M_DEVCTL_SESSION)); + + /* It seems this is the only R/W bit in this register? */ + s->devctl &= ~MGC_M_DEVCTL_SESSION; + s->devctl |= value & MGC_M_DEVCTL_SESSION; + break; + + case MUSB_HDRC_TXFIFOSZ: + case MUSB_HDRC_RXFIFOSZ: + case MUSB_HDRC_VCTRL: + /* TODO */ + break; + + case (MUSB_HDRC_VCTRL | 1): + case (MUSB_HDRC_DEVCTL | 1): + break; + + case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f): + ep = (addr >> 3) & 0xf; + musb_busctl_writeb(s, ep, addr & 0x7, value); + break; + + case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff): + ep = (addr >> 4) & 0xf; + musb_ep_writeb(s, ep, addr & 0xf, value); + break; + + case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): + ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; + musb_write_fifo(s->ep + ep, value & 0xff); + break; + + default: + TRACE("unknown register 0x%02x", (int) addr); + break; + }; +} + +static uint32_t musb_readh(void *opaque, hwaddr addr) +{ + MUSBState *s = (MUSBState *) opaque; + int ep, i; + uint16_t ret; + + switch (addr) { + case MUSB_HDRC_INTRTX: + ret = s->tx_intr; + /* Auto clear */ + for (i = 0; i < sizeof(ret) * 8; i ++) + if (ret & (1 << i)) + musb_tx_intr_set(s, i, 0); + return ret; + case MUSB_HDRC_INTRRX: + ret = s->rx_intr; + /* Auto clear */ + for (i = 0; i < sizeof(ret) * 8; i ++) + if (ret & (1 << i)) + musb_rx_intr_set(s, i, 0); + return ret; + case MUSB_HDRC_INTRTXE: + return s->tx_mask; + case MUSB_HDRC_INTRRXE: + return s->rx_mask; + + case MUSB_HDRC_FRAME: + /* TODO */ + return 0x0000; + case MUSB_HDRC_TXFIFOADDR: + return s->ep[s->idx].fifoaddr[0]; + case MUSB_HDRC_RXFIFOADDR: + return s->ep[s->idx].fifoaddr[1]; + + case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf): + return musb_ep_readh(s, s->idx, addr & 0xf); + + case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f): + ep = (addr >> 3) & 0xf; + return musb_busctl_readh(s, ep, addr & 0x7); + + case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff): + ep = (addr >> 4) & 0xf; + return musb_ep_readh(s, ep, addr & 0xf); + + case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): + ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; + return (musb_read_fifo(s->ep + ep) | musb_read_fifo(s->ep + ep) << 8); + + default: + return musb_readb(s, addr) | (musb_readb(s, addr | 1) << 8); + }; +} + +static void musb_writeh(void *opaque, hwaddr addr, uint32_t value) +{ + MUSBState *s = (MUSBState *) opaque; + int ep; + + switch (addr) { + case MUSB_HDRC_INTRTXE: + s->tx_mask = value; + /* XXX: the masks seem to apply on the raising edge like with + * edge-triggered interrupts, thus no need to update. I may be + * wrong though. */ + break; + case MUSB_HDRC_INTRRXE: + s->rx_mask = value; + break; + + case MUSB_HDRC_FRAME: + /* TODO */ + break; + case MUSB_HDRC_TXFIFOADDR: + s->ep[s->idx].fifoaddr[0] = value; + s->ep[s->idx].buf[0] = + s->buf + ((value << 3) & 0x7ff ); + break; + case MUSB_HDRC_RXFIFOADDR: + s->ep[s->idx].fifoaddr[1] = value; + s->ep[s->idx].buf[1] = + s->buf + ((value << 3) & 0x7ff); + break; + + case MUSB_HDRC_EP_IDX ... (MUSB_HDRC_EP_IDX + 0xf): + musb_ep_writeh(s, s->idx, addr & 0xf, value); + break; + + case MUSB_HDRC_BUSCTL ... (MUSB_HDRC_BUSCTL + 0x7f): + ep = (addr >> 3) & 0xf; + musb_busctl_writeh(s, ep, addr & 0x7, value); + break; + + case MUSB_HDRC_EP ... (MUSB_HDRC_EP + 0xff): + ep = (addr >> 4) & 0xf; + musb_ep_writeh(s, ep, addr & 0xf, value); + break; + + case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): + ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; + musb_write_fifo(s->ep + ep, value & 0xff); + musb_write_fifo(s->ep + ep, (value >> 8) & 0xff); + break; + + default: + musb_writeb(s, addr, value & 0xff); + musb_writeb(s, addr | 1, value >> 8); + }; +} + +static uint32_t musb_readw(void *opaque, hwaddr addr) +{ + MUSBState *s = (MUSBState *) opaque; + int ep; + + switch (addr) { + case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): + ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; + return ( musb_read_fifo(s->ep + ep) | + musb_read_fifo(s->ep + ep) << 8 | + musb_read_fifo(s->ep + ep) << 16 | + musb_read_fifo(s->ep + ep) << 24 ); + default: + TRACE("unknown register 0x%02x", (int) addr); + return 0x00000000; + }; +} + +static void musb_writew(void *opaque, hwaddr addr, uint32_t value) +{ + MUSBState *s = (MUSBState *) opaque; + int ep; + + switch (addr) { + case MUSB_HDRC_FIFO ... (MUSB_HDRC_FIFO + 0x3f): + ep = ((addr - MUSB_HDRC_FIFO) >> 2) & 0xf; + musb_write_fifo(s->ep + ep, value & 0xff); + musb_write_fifo(s->ep + ep, (value >> 8 ) & 0xff); + musb_write_fifo(s->ep + ep, (value >> 16) & 0xff); + musb_write_fifo(s->ep + ep, (value >> 24) & 0xff); + break; + default: + TRACE("unknown register 0x%02x", (int) addr); + break; + }; +} + +MUSBReadFunc * const musb_read[] = { + musb_readb, + musb_readh, + musb_readw, +}; + +MUSBWriteFunc * const musb_write[] = { + musb_writeb, + musb_writeh, + musb_writew, +}; diff --git a/hw/usb/hcd-ohci-pci.c b/hw/usb/hcd-ohci-pci.c new file mode 100644 index 000000000..8e1146b86 --- /dev/null +++ b/hw/usb/hcd-ohci-pci.c @@ -0,0 +1,164 @@ +/* + * QEMU USB OHCI Emulation + * Copyright (c) 2004 Gianni Tedesco + * Copyright (c) 2006 CodeSourcery + * Copyright (c) 2006 Openedhand Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "hw/pci/pci.h" +#include "hw/sysbus.h" +#include "hw/qdev-dma.h" +#include "hw/qdev-properties.h" +#include "trace.h" +#include "hcd-ohci.h" +#include "qom/object.h" + +#define TYPE_PCI_OHCI "pci-ohci" +OBJECT_DECLARE_SIMPLE_TYPE(OHCIPCIState, PCI_OHCI) + +struct OHCIPCIState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + OHCIState state; + char *masterbus; + uint32_t num_ports; + uint32_t firstport; +}; + +/** + * A typical PCI OHCI will additionally set PERR in its configspace to + * signal that it got an error. + */ +static void ohci_pci_die(struct OHCIState *ohci) +{ + OHCIPCIState *dev = container_of(ohci, OHCIPCIState, state); + + ohci_sysbus_die(ohci); + + pci_set_word(dev->parent_obj.config + PCI_STATUS, + PCI_STATUS_DETECTED_PARITY); +} + +static void usb_ohci_realize_pci(PCIDevice *dev, Error **errp) +{ + Error *err = NULL; + OHCIPCIState *ohci = PCI_OHCI(dev); + + dev->config[PCI_CLASS_PROG] = 0x10; /* OHCI */ + dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */ + + usb_ohci_init(&ohci->state, DEVICE(dev), ohci->num_ports, 0, + ohci->masterbus, ohci->firstport, + pci_get_address_space(dev), ohci_pci_die, &err); + if (err) { + error_propagate(errp, err); + return; + } + + ohci->state.irq = pci_allocate_irq(dev); + pci_register_bar(dev, 0, 0, &ohci->state.mem); +} + +static void usb_ohci_exit(PCIDevice *dev) +{ + OHCIPCIState *ohci = PCI_OHCI(dev); + OHCIState *s = &ohci->state; + + trace_usb_ohci_exit(s->name); + ohci_bus_stop(s); + + if (s->async_td) { + usb_cancel_packet(&s->usb_packet); + s->async_td = 0; + } + ohci_stop_endpoints(s); + + if (!ohci->masterbus) { + usb_bus_release(&s->bus); + } + + timer_free(s->eof_timer); +} + +static void usb_ohci_reset_pci(DeviceState *d) +{ + PCIDevice *dev = PCI_DEVICE(d); + OHCIPCIState *ohci = PCI_OHCI(dev); + OHCIState *s = &ohci->state; + + ohci_hard_reset(s); +} + +static Property ohci_pci_properties[] = { + DEFINE_PROP_STRING("masterbus", OHCIPCIState, masterbus), + DEFINE_PROP_UINT32("num-ports", OHCIPCIState, num_ports, 3), + DEFINE_PROP_UINT32("firstport", OHCIPCIState, firstport, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_ohci = { + .name = "ohci", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, OHCIPCIState), + VMSTATE_STRUCT(state, OHCIPCIState, 1, vmstate_ohci_state, OHCIState), + VMSTATE_END_OF_LIST() + } +}; + +static void ohci_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = usb_ohci_realize_pci; + k->exit = usb_ohci_exit; + k->vendor_id = PCI_VENDOR_ID_APPLE; + k->device_id = PCI_DEVICE_ID_APPLE_IPID_USB; + k->class_id = PCI_CLASS_SERIAL_USB; + set_bit(DEVICE_CATEGORY_USB, dc->categories); + dc->desc = "Apple USB Controller"; + device_class_set_props(dc, ohci_pci_properties); + dc->hotpluggable = false; + dc->vmsd = &vmstate_ohci; + dc->reset = usb_ohci_reset_pci; +} + +static const TypeInfo ohci_pci_info = { + .name = TYPE_PCI_OHCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(OHCIPCIState), + .class_init = ohci_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void ohci_pci_register_types(void) +{ + type_register_static(&ohci_pci_info); +} + +type_init(ohci_pci_register_types) diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c new file mode 100644 index 000000000..1cf281677 --- /dev/null +++ b/hw/usb/hcd-ohci.c @@ -0,0 +1,2028 @@ +/* + * QEMU USB OHCI Emulation + * Copyright (c) 2004 Gianni Tedesco + * Copyright (c) 2006 CodeSourcery + * Copyright (c) 2006 Openedhand Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + * TODO: + * o Isochronous transfers + * o Allocate bandwidth in frames properly + * o Disable timers when nothing needs to be done, or remove timer usage + * all together. + * o BIOS work to boot from USB storage +*/ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/usb.h" +#include "migration/vmstate.h" +#include "hw/sysbus.h" +#include "hw/qdev-dma.h" +#include "hw/qdev-properties.h" +#include "trace.h" +#include "hcd-ohci.h" + +/* This causes frames to occur 1000x slower */ +//#define OHCI_TIME_WARP 1 + +#define ED_LINK_LIMIT 32 + +static int64_t usb_frame_time; +static int64_t usb_bit_time; + +/* Host Controller Communications Area */ +struct ohci_hcca { + uint32_t intr[32]; + uint16_t frame, pad; + uint32_t done; +}; +#define HCCA_WRITEBACK_OFFSET offsetof(struct ohci_hcca, frame) +#define HCCA_WRITEBACK_SIZE 8 /* frame, pad, done */ + +#define ED_WBACK_OFFSET offsetof(struct ohci_ed, head) +#define ED_WBACK_SIZE 4 + +static void ohci_async_cancel_device(OHCIState *ohci, USBDevice *dev); + +/* Bitfields for the first word of an Endpoint Desciptor. */ +#define OHCI_ED_FA_SHIFT 0 +#define OHCI_ED_FA_MASK (0x7f<> OHCI_##field##_SHIFT) + +#define OHCI_SET_BM(val, field, newval) do { \ + val &= ~OHCI_##field##_MASK; \ + val |= ((newval) << OHCI_##field##_SHIFT) & OHCI_##field##_MASK; \ + } while(0) + +/* endpoint descriptor */ +struct ohci_ed { + uint32_t flags; + uint32_t tail; + uint32_t head; + uint32_t next; +}; + +/* General transfer descriptor */ +struct ohci_td { + uint32_t flags; + uint32_t cbp; + uint32_t next; + uint32_t be; +}; + +/* Isochronous transfer descriptor */ +struct ohci_iso_td { + uint32_t flags; + uint32_t bp; + uint32_t next; + uint32_t be; + uint16_t offset[8]; +}; + +#define USB_HZ 12000000 + +/* OHCI Local stuff */ +#define OHCI_CTL_CBSR ((1<<0)|(1<<1)) +#define OHCI_CTL_PLE (1<<2) +#define OHCI_CTL_IE (1<<3) +#define OHCI_CTL_CLE (1<<4) +#define OHCI_CTL_BLE (1<<5) +#define OHCI_CTL_HCFS ((1<<6)|(1<<7)) +#define OHCI_USB_RESET 0x00 +#define OHCI_USB_RESUME 0x40 +#define OHCI_USB_OPERATIONAL 0x80 +#define OHCI_USB_SUSPEND 0xc0 +#define OHCI_CTL_IR (1<<8) +#define OHCI_CTL_RWC (1<<9) +#define OHCI_CTL_RWE (1<<10) + +#define OHCI_STATUS_HCR (1<<0) +#define OHCI_STATUS_CLF (1<<1) +#define OHCI_STATUS_BLF (1<<2) +#define OHCI_STATUS_OCR (1<<3) +#define OHCI_STATUS_SOC ((1<<6)|(1<<7)) + +#define OHCI_INTR_SO (1U<<0) /* Scheduling overrun */ +#define OHCI_INTR_WD (1U<<1) /* HcDoneHead writeback */ +#define OHCI_INTR_SF (1U<<2) /* Start of frame */ +#define OHCI_INTR_RD (1U<<3) /* Resume detect */ +#define OHCI_INTR_UE (1U<<4) /* Unrecoverable error */ +#define OHCI_INTR_FNO (1U<<5) /* Frame number overflow */ +#define OHCI_INTR_RHSC (1U<<6) /* Root hub status change */ +#define OHCI_INTR_OC (1U<<30) /* Ownership change */ +#define OHCI_INTR_MIE (1U<<31) /* Master Interrupt Enable */ + +#define OHCI_HCCA_SIZE 0x100 +#define OHCI_HCCA_MASK 0xffffff00 + +#define OHCI_EDPTR_MASK 0xfffffff0 + +#define OHCI_FMI_FI 0x00003fff +#define OHCI_FMI_FSMPS 0xffff0000 +#define OHCI_FMI_FIT 0x80000000 + +#define OHCI_FR_RT (1U<<31) + +#define OHCI_LS_THRESH 0x628 + +#define OHCI_RHA_RW_MASK 0x00000000 /* Mask of supported features. */ +#define OHCI_RHA_PSM (1<<8) +#define OHCI_RHA_NPS (1<<9) +#define OHCI_RHA_DT (1<<10) +#define OHCI_RHA_OCPM (1<<11) +#define OHCI_RHA_NOCP (1<<12) +#define OHCI_RHA_POTPGT_MASK 0xff000000 + +#define OHCI_RHS_LPS (1U<<0) +#define OHCI_RHS_OCI (1U<<1) +#define OHCI_RHS_DRWE (1U<<15) +#define OHCI_RHS_LPSC (1U<<16) +#define OHCI_RHS_OCIC (1U<<17) +#define OHCI_RHS_CRWE (1U<<31) + +#define OHCI_PORT_CCS (1<<0) +#define OHCI_PORT_PES (1<<1) +#define OHCI_PORT_PSS (1<<2) +#define OHCI_PORT_POCI (1<<3) +#define OHCI_PORT_PRS (1<<4) +#define OHCI_PORT_PPS (1<<8) +#define OHCI_PORT_LSDA (1<<9) +#define OHCI_PORT_CSC (1<<16) +#define OHCI_PORT_PESC (1<<17) +#define OHCI_PORT_PSSC (1<<18) +#define OHCI_PORT_OCIC (1<<19) +#define OHCI_PORT_PRSC (1<<20) +#define OHCI_PORT_WTC (OHCI_PORT_CSC|OHCI_PORT_PESC|OHCI_PORT_PSSC \ + |OHCI_PORT_OCIC|OHCI_PORT_PRSC) + +#define OHCI_TD_DIR_SETUP 0x0 +#define OHCI_TD_DIR_OUT 0x1 +#define OHCI_TD_DIR_IN 0x2 +#define OHCI_TD_DIR_RESERVED 0x3 + +#define OHCI_CC_NOERROR 0x0 +#define OHCI_CC_CRC 0x1 +#define OHCI_CC_BITSTUFFING 0x2 +#define OHCI_CC_DATATOGGLEMISMATCH 0x3 +#define OHCI_CC_STALL 0x4 +#define OHCI_CC_DEVICENOTRESPONDING 0x5 +#define OHCI_CC_PIDCHECKFAILURE 0x6 +#define OHCI_CC_UNDEXPETEDPID 0x7 +#define OHCI_CC_DATAOVERRUN 0x8 +#define OHCI_CC_DATAUNDERRUN 0x9 +#define OHCI_CC_BUFFEROVERRUN 0xc +#define OHCI_CC_BUFFERUNDERRUN 0xd + +#define OHCI_HRESET_FSBIR (1 << 0) + +static void ohci_die(OHCIState *ohci) +{ + ohci->ohci_die(ohci); +} + +/* Update IRQ levels */ +static inline void ohci_intr_update(OHCIState *ohci) +{ + int level = 0; + + if ((ohci->intr & OHCI_INTR_MIE) && + (ohci->intr_status & ohci->intr)) + level = 1; + + qemu_set_irq(ohci->irq, level); +} + +/* Set an interrupt */ +static inline void ohci_set_interrupt(OHCIState *ohci, uint32_t intr) +{ + ohci->intr_status |= intr; + ohci_intr_update(ohci); +} + +/* Attach or detach a device on a root hub port. */ +static void ohci_attach(USBPort *port1) +{ + OHCIState *s = port1->opaque; + OHCIPort *port = &s->rhport[port1->index]; + uint32_t old_state = port->ctrl; + + /* set connect status */ + port->ctrl |= OHCI_PORT_CCS | OHCI_PORT_CSC; + + /* update speed */ + if (port->port.dev->speed == USB_SPEED_LOW) { + port->ctrl |= OHCI_PORT_LSDA; + } else { + port->ctrl &= ~OHCI_PORT_LSDA; + } + + /* notify of remote-wakeup */ + if ((s->ctl & OHCI_CTL_HCFS) == OHCI_USB_SUSPEND) { + ohci_set_interrupt(s, OHCI_INTR_RD); + } + + trace_usb_ohci_port_attach(port1->index); + + if (old_state != port->ctrl) { + ohci_set_interrupt(s, OHCI_INTR_RHSC); + } +} + +static void ohci_detach(USBPort *port1) +{ + OHCIState *s = port1->opaque; + OHCIPort *port = &s->rhport[port1->index]; + uint32_t old_state = port->ctrl; + + ohci_async_cancel_device(s, port1->dev); + + /* set connect status */ + if (port->ctrl & OHCI_PORT_CCS) { + port->ctrl &= ~OHCI_PORT_CCS; + port->ctrl |= OHCI_PORT_CSC; + } + /* disable port */ + if (port->ctrl & OHCI_PORT_PES) { + port->ctrl &= ~OHCI_PORT_PES; + port->ctrl |= OHCI_PORT_PESC; + } + trace_usb_ohci_port_detach(port1->index); + + if (old_state != port->ctrl) { + ohci_set_interrupt(s, OHCI_INTR_RHSC); + } +} + +static void ohci_wakeup(USBPort *port1) +{ + OHCIState *s = port1->opaque; + OHCIPort *port = &s->rhport[port1->index]; + uint32_t intr = 0; + if (port->ctrl & OHCI_PORT_PSS) { + trace_usb_ohci_port_wakeup(port1->index); + port->ctrl |= OHCI_PORT_PSSC; + port->ctrl &= ~OHCI_PORT_PSS; + intr = OHCI_INTR_RHSC; + } + /* Note that the controller can be suspended even if this port is not */ + if ((s->ctl & OHCI_CTL_HCFS) == OHCI_USB_SUSPEND) { + trace_usb_ohci_remote_wakeup(s->name); + /* This is the one state transition the controller can do by itself */ + s->ctl &= ~OHCI_CTL_HCFS; + s->ctl |= OHCI_USB_RESUME; + /* In suspend mode only ResumeDetected is possible, not RHSC: + * see the OHCI spec 5.1.2.3. + */ + intr = OHCI_INTR_RD; + } + ohci_set_interrupt(s, intr); +} + +static void ohci_child_detach(USBPort *port1, USBDevice *child) +{ + OHCIState *s = port1->opaque; + + ohci_async_cancel_device(s, child); +} + +static USBDevice *ohci_find_device(OHCIState *ohci, uint8_t addr) +{ + USBDevice *dev; + int i; + + for (i = 0; i < ohci->num_ports; i++) { + if ((ohci->rhport[i].ctrl & OHCI_PORT_PES) == 0) { + continue; + } + dev = usb_find_device(&ohci->rhport[i].port, addr); + if (dev != NULL) { + return dev; + } + } + return NULL; +} + +void ohci_stop_endpoints(OHCIState *ohci) +{ + USBDevice *dev; + int i, j; + + for (i = 0; i < ohci->num_ports; i++) { + dev = ohci->rhport[i].port.dev; + if (dev && dev->attached) { + usb_device_ep_stopped(dev, &dev->ep_ctl); + for (j = 0; j < USB_MAX_ENDPOINTS; j++) { + usb_device_ep_stopped(dev, &dev->ep_in[j]); + usb_device_ep_stopped(dev, &dev->ep_out[j]); + } + } + } +} + +static void ohci_roothub_reset(OHCIState *ohci) +{ + OHCIPort *port; + int i; + + ohci_bus_stop(ohci); + ohci->rhdesc_a = OHCI_RHA_NPS | ohci->num_ports; + ohci->rhdesc_b = 0x0; /* Impl. specific */ + ohci->rhstatus = 0; + + for (i = 0; i < ohci->num_ports; i++) { + port = &ohci->rhport[i]; + port->ctrl = 0; + if (port->port.dev && port->port.dev->attached) { + usb_port_reset(&port->port); + } + } + if (ohci->async_td) { + usb_cancel_packet(&ohci->usb_packet); + ohci->async_td = 0; + } + ohci_stop_endpoints(ohci); +} + +/* Reset the controller */ +static void ohci_soft_reset(OHCIState *ohci) +{ + trace_usb_ohci_reset(ohci->name); + + ohci_bus_stop(ohci); + ohci->ctl = (ohci->ctl & OHCI_CTL_IR) | OHCI_USB_SUSPEND; + ohci->old_ctl = 0; + ohci->status = 0; + ohci->intr_status = 0; + ohci->intr = OHCI_INTR_MIE; + + ohci->hcca = 0; + ohci->ctrl_head = ohci->ctrl_cur = 0; + ohci->bulk_head = ohci->bulk_cur = 0; + ohci->per_cur = 0; + ohci->done = 0; + ohci->done_count = 7; + + /* FSMPS is marked TBD in OCHI 1.0, what gives ffs? + * I took the value linux sets ... + */ + ohci->fsmps = 0x2778; + ohci->fi = 0x2edf; + ohci->fit = 0; + ohci->frt = 0; + ohci->frame_number = 0; + ohci->pstart = 0; + ohci->lst = OHCI_LS_THRESH; +} + +void ohci_hard_reset(OHCIState *ohci) +{ + ohci_soft_reset(ohci); + ohci->ctl = 0; + ohci_roothub_reset(ohci); +} + +/* Get an array of dwords from main memory */ +static inline int get_dwords(OHCIState *ohci, + dma_addr_t addr, uint32_t *buf, int num) +{ + int i; + + addr += ohci->localmem_base; + + for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { + if (dma_memory_read(ohci->as, addr, buf, sizeof(*buf))) { + return -1; + } + *buf = le32_to_cpu(*buf); + } + + return 0; +} + +/* Put an array of dwords in to main memory */ +static inline int put_dwords(OHCIState *ohci, + dma_addr_t addr, uint32_t *buf, int num) +{ + int i; + + addr += ohci->localmem_base; + + for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { + uint32_t tmp = cpu_to_le32(*buf); + if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) { + return -1; + } + } + + return 0; +} + +/* Get an array of words from main memory */ +static inline int get_words(OHCIState *ohci, + dma_addr_t addr, uint16_t *buf, int num) +{ + int i; + + addr += ohci->localmem_base; + + for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { + if (dma_memory_read(ohci->as, addr, buf, sizeof(*buf))) { + return -1; + } + *buf = le16_to_cpu(*buf); + } + + return 0; +} + +/* Put an array of words in to main memory */ +static inline int put_words(OHCIState *ohci, + dma_addr_t addr, uint16_t *buf, int num) +{ + int i; + + addr += ohci->localmem_base; + + for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { + uint16_t tmp = cpu_to_le16(*buf); + if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) { + return -1; + } + } + + return 0; +} + +static inline int ohci_read_ed(OHCIState *ohci, + dma_addr_t addr, struct ohci_ed *ed) +{ + return get_dwords(ohci, addr, (uint32_t *)ed, sizeof(*ed) >> 2); +} + +static inline int ohci_read_td(OHCIState *ohci, + dma_addr_t addr, struct ohci_td *td) +{ + return get_dwords(ohci, addr, (uint32_t *)td, sizeof(*td) >> 2); +} + +static inline int ohci_read_iso_td(OHCIState *ohci, + dma_addr_t addr, struct ohci_iso_td *td) +{ + return get_dwords(ohci, addr, (uint32_t *)td, 4) || + get_words(ohci, addr + 16, td->offset, 8); +} + +static inline int ohci_read_hcca(OHCIState *ohci, + dma_addr_t addr, struct ohci_hcca *hcca) +{ + return dma_memory_read(ohci->as, addr + ohci->localmem_base, + hcca, sizeof(*hcca)); +} + +static inline int ohci_put_ed(OHCIState *ohci, + dma_addr_t addr, struct ohci_ed *ed) +{ + /* ed->tail is under control of the HCD. + * Since just ed->head is changed by HC, just write back this + */ + + return put_dwords(ohci, addr + ED_WBACK_OFFSET, + (uint32_t *)((char *)ed + ED_WBACK_OFFSET), + ED_WBACK_SIZE >> 2); +} + +static inline int ohci_put_td(OHCIState *ohci, + dma_addr_t addr, struct ohci_td *td) +{ + return put_dwords(ohci, addr, (uint32_t *)td, sizeof(*td) >> 2); +} + +static inline int ohci_put_iso_td(OHCIState *ohci, + dma_addr_t addr, struct ohci_iso_td *td) +{ + return put_dwords(ohci, addr, (uint32_t *)td, 4) || + put_words(ohci, addr + 16, td->offset, 8); +} + +static inline int ohci_put_hcca(OHCIState *ohci, + dma_addr_t addr, struct ohci_hcca *hcca) +{ + return dma_memory_write(ohci->as, + addr + ohci->localmem_base + HCCA_WRITEBACK_OFFSET, + (char *)hcca + HCCA_WRITEBACK_OFFSET, + HCCA_WRITEBACK_SIZE); +} + +/* Read/Write the contents of a TD from/to main memory. */ +static int ohci_copy_td(OHCIState *ohci, struct ohci_td *td, + uint8_t *buf, int len, DMADirection dir) +{ + dma_addr_t ptr, n; + + ptr = td->cbp; + n = 0x1000 - (ptr & 0xfff); + if (n > len) + n = len; + + if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, n, dir)) { + return -1; + } + if (n == len) { + return 0; + } + ptr = td->be & ~0xfffu; + buf += n; + if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, + len - n, dir)) { + return -1; + } + return 0; +} + +/* Read/Write the contents of an ISO TD from/to main memory. */ +static int ohci_copy_iso_td(OHCIState *ohci, + uint32_t start_addr, uint32_t end_addr, + uint8_t *buf, int len, DMADirection dir) +{ + dma_addr_t ptr, n; + + ptr = start_addr; + n = 0x1000 - (ptr & 0xfff); + if (n > len) + n = len; + + if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, n, dir)) { + return -1; + } + if (n == len) { + return 0; + } + ptr = end_addr & ~0xfffu; + buf += n; + if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, + len - n, dir)) { + return -1; + } + return 0; +} + +static void ohci_process_lists(OHCIState *ohci, int completion); + +static void ohci_async_complete_packet(USBPort *port, USBPacket *packet) +{ + OHCIState *ohci = container_of(packet, OHCIState, usb_packet); + + trace_usb_ohci_async_complete(); + ohci->async_complete = true; + ohci_process_lists(ohci, 1); +} + +#define USUB(a, b) ((int16_t)((uint16_t)(a) - (uint16_t)(b))) + +static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, + int completion) +{ + int dir; + size_t len = 0; + const char *str = NULL; + int pid; + int ret; + int i; + USBDevice *dev; + USBEndpoint *ep; + struct ohci_iso_td iso_td; + uint32_t addr; + uint16_t starting_frame; + int16_t relative_frame_number; + int frame_count; + uint32_t start_offset, next_offset, end_offset = 0; + uint32_t start_addr, end_addr; + + addr = ed->head & OHCI_DPTR_MASK; + + if (ohci_read_iso_td(ohci, addr, &iso_td)) { + trace_usb_ohci_iso_td_read_failed(addr); + ohci_die(ohci); + return 1; + } + + starting_frame = OHCI_BM(iso_td.flags, TD_SF); + frame_count = OHCI_BM(iso_td.flags, TD_FC); + relative_frame_number = USUB(ohci->frame_number, starting_frame); + + trace_usb_ohci_iso_td_head( + ed->head & OHCI_DPTR_MASK, ed->tail & OHCI_DPTR_MASK, + iso_td.flags, iso_td.bp, iso_td.next, iso_td.be, + ohci->frame_number, starting_frame, + frame_count, relative_frame_number); + trace_usb_ohci_iso_td_head_offset( + iso_td.offset[0], iso_td.offset[1], + iso_td.offset[2], iso_td.offset[3], + iso_td.offset[4], iso_td.offset[5], + iso_td.offset[6], iso_td.offset[7]); + + if (relative_frame_number < 0) { + trace_usb_ohci_iso_td_relative_frame_number_neg(relative_frame_number); + return 1; + } else if (relative_frame_number > frame_count) { + /* ISO TD expired - retire the TD to the Done Queue and continue with + the next ISO TD of the same ED */ + trace_usb_ohci_iso_td_relative_frame_number_big(relative_frame_number, + frame_count); + if (OHCI_CC_DATAOVERRUN == OHCI_BM(iso_td.flags, TD_CC)) { + /* avoid infinite loop */ + return 1; + } + OHCI_SET_BM(iso_td.flags, TD_CC, OHCI_CC_DATAOVERRUN); + ed->head &= ~OHCI_DPTR_MASK; + ed->head |= (iso_td.next & OHCI_DPTR_MASK); + iso_td.next = ohci->done; + ohci->done = addr; + i = OHCI_BM(iso_td.flags, TD_DI); + if (i < ohci->done_count) + ohci->done_count = i; + if (ohci_put_iso_td(ohci, addr, &iso_td)) { + ohci_die(ohci); + return 1; + } + return 0; + } + + dir = OHCI_BM(ed->flags, ED_D); + switch (dir) { + case OHCI_TD_DIR_IN: + str = "in"; + pid = USB_TOKEN_IN; + break; + case OHCI_TD_DIR_OUT: + str = "out"; + pid = USB_TOKEN_OUT; + break; + case OHCI_TD_DIR_SETUP: + str = "setup"; + pid = USB_TOKEN_SETUP; + break; + default: + trace_usb_ohci_iso_td_bad_direction(dir); + return 1; + } + + if (!iso_td.bp || !iso_td.be) { + trace_usb_ohci_iso_td_bad_bp_be(iso_td.bp, iso_td.be); + return 1; + } + + start_offset = iso_td.offset[relative_frame_number]; + if (relative_frame_number < frame_count) { + next_offset = iso_td.offset[relative_frame_number + 1]; + } else { + next_offset = iso_td.be; + } + + if (!(OHCI_BM(start_offset, TD_PSW_CC) & 0xe) || + ((relative_frame_number < frame_count) && + !(OHCI_BM(next_offset, TD_PSW_CC) & 0xe))) { + trace_usb_ohci_iso_td_bad_cc_not_accessed(start_offset, next_offset); + return 1; + } + + if ((relative_frame_number < frame_count) && (start_offset > next_offset)) { + trace_usb_ohci_iso_td_bad_cc_overrun(start_offset, next_offset); + return 1; + } + + if ((start_offset & 0x1000) == 0) { + start_addr = (iso_td.bp & OHCI_PAGE_MASK) | + (start_offset & OHCI_OFFSET_MASK); + } else { + start_addr = (iso_td.be & OHCI_PAGE_MASK) | + (start_offset & OHCI_OFFSET_MASK); + } + + if (relative_frame_number < frame_count) { + end_offset = next_offset - 1; + if ((end_offset & 0x1000) == 0) { + end_addr = (iso_td.bp & OHCI_PAGE_MASK) | + (end_offset & OHCI_OFFSET_MASK); + } else { + end_addr = (iso_td.be & OHCI_PAGE_MASK) | + (end_offset & OHCI_OFFSET_MASK); + } + } else { + /* Last packet in the ISO TD */ + end_addr = next_offset; + } + + if (start_addr > end_addr) { + trace_usb_ohci_iso_td_bad_cc_overrun(start_addr, end_addr); + return 1; + } + + if ((start_addr & OHCI_PAGE_MASK) != (end_addr & OHCI_PAGE_MASK)) { + len = (end_addr & OHCI_OFFSET_MASK) + 0x1001 + - (start_addr & OHCI_OFFSET_MASK); + } else { + len = end_addr - start_addr + 1; + } + if (len > sizeof(ohci->usb_buf)) { + len = sizeof(ohci->usb_buf); + } + + if (len && dir != OHCI_TD_DIR_IN) { + if (ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, len, + DMA_DIRECTION_TO_DEVICE)) { + ohci_die(ohci); + return 1; + } + } + + if (!completion) { + bool int_req = relative_frame_number == frame_count && + OHCI_BM(iso_td.flags, TD_DI) == 0; + dev = ohci_find_device(ohci, OHCI_BM(ed->flags, ED_FA)); + if (dev == NULL) { + trace_usb_ohci_td_dev_error(); + return 1; + } + ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN)); + usb_packet_setup(&ohci->usb_packet, pid, ep, 0, addr, false, int_req); + usb_packet_addbuf(&ohci->usb_packet, ohci->usb_buf, len); + usb_handle_packet(dev, &ohci->usb_packet); + if (ohci->usb_packet.status == USB_RET_ASYNC) { + usb_device_flush_ep_queue(dev, ep); + return 1; + } + } + if (ohci->usb_packet.status == USB_RET_SUCCESS) { + ret = ohci->usb_packet.actual_length; + } else { + ret = ohci->usb_packet.status; + } + + trace_usb_ohci_iso_td_so(start_offset, end_offset, start_addr, end_addr, + str, len, ret); + + /* Writeback */ + if (dir == OHCI_TD_DIR_IN && ret >= 0 && ret <= len) { + /* IN transfer succeeded */ + if (ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, ret, + DMA_DIRECTION_FROM_DEVICE)) { + ohci_die(ohci); + return 1; + } + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, + OHCI_CC_NOERROR); + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, ret); + } else if (dir == OHCI_TD_DIR_OUT && ret == len) { + /* OUT transfer succeeded */ + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, + OHCI_CC_NOERROR); + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, 0); + } else { + if (ret > (ssize_t) len) { + trace_usb_ohci_iso_td_data_overrun(ret, len); + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, + OHCI_CC_DATAOVERRUN); + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, + len); + } else if (ret >= 0) { + trace_usb_ohci_iso_td_data_underrun(ret); + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, + OHCI_CC_DATAUNDERRUN); + } else { + switch (ret) { + case USB_RET_IOERROR: + case USB_RET_NODEV: + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, + OHCI_CC_DEVICENOTRESPONDING); + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, + 0); + break; + case USB_RET_NAK: + case USB_RET_STALL: + trace_usb_ohci_iso_td_nak(ret); + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, + OHCI_CC_STALL); + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, + 0); + break; + default: + trace_usb_ohci_iso_td_bad_response(ret); + OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC, + OHCI_CC_UNDEXPETEDPID); + break; + } + } + } + + if (relative_frame_number == frame_count) { + /* Last data packet of ISO TD - retire the TD to the Done Queue */ + OHCI_SET_BM(iso_td.flags, TD_CC, OHCI_CC_NOERROR); + ed->head &= ~OHCI_DPTR_MASK; + ed->head |= (iso_td.next & OHCI_DPTR_MASK); + iso_td.next = ohci->done; + ohci->done = addr; + i = OHCI_BM(iso_td.flags, TD_DI); + if (i < ohci->done_count) + ohci->done_count = i; + } + if (ohci_put_iso_td(ohci, addr, &iso_td)) { + ohci_die(ohci); + } + return 1; +} + +static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len) +{ + bool print16; + bool printall; + const int width = 16; + int i; + char tmp[3 * width + 1]; + char *p = tmp; + + print16 = !!trace_event_get_state_backends(TRACE_USB_OHCI_TD_PKT_SHORT); + printall = !!trace_event_get_state_backends(TRACE_USB_OHCI_TD_PKT_FULL); + + if (!printall && !print16) { + return; + } + + for (i = 0; ; i++) { + if (i && (!(i % width) || (i == len))) { + if (!printall) { + trace_usb_ohci_td_pkt_short(msg, tmp); + break; + } + trace_usb_ohci_td_pkt_full(msg, tmp); + p = tmp; + *p = 0; + } + if (i == len) { + break; + } + + p += sprintf(p, " %.2x", buf[i]); + } +} + +/* Service a transport descriptor. + Returns nonzero to terminate processing of this endpoint. */ + +static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) +{ + int dir; + size_t len = 0, pktlen = 0; + const char *str = NULL; + int pid; + int ret; + int i; + USBDevice *dev; + USBEndpoint *ep; + struct ohci_td td; + uint32_t addr; + int flag_r; + int completion; + + addr = ed->head & OHCI_DPTR_MASK; + /* See if this TD has already been submitted to the device. */ + completion = (addr == ohci->async_td); + if (completion && !ohci->async_complete) { + trace_usb_ohci_td_skip_async(); + return 1; + } + if (ohci_read_td(ohci, addr, &td)) { + trace_usb_ohci_td_read_error(addr); + ohci_die(ohci); + return 1; + } + + dir = OHCI_BM(ed->flags, ED_D); + switch (dir) { + case OHCI_TD_DIR_OUT: + case OHCI_TD_DIR_IN: + /* Same value. */ + break; + default: + dir = OHCI_BM(td.flags, TD_DP); + break; + } + + switch (dir) { + case OHCI_TD_DIR_IN: + str = "in"; + pid = USB_TOKEN_IN; + break; + case OHCI_TD_DIR_OUT: + str = "out"; + pid = USB_TOKEN_OUT; + break; + case OHCI_TD_DIR_SETUP: + str = "setup"; + pid = USB_TOKEN_SETUP; + break; + default: + trace_usb_ohci_td_bad_direction(dir); + return 1; + } + if (td.cbp && td.be) { + if ((td.cbp & 0xfffff000) != (td.be & 0xfffff000)) { + len = (td.be & 0xfff) + 0x1001 - (td.cbp & 0xfff); + } else { + if (td.cbp > td.be) { + trace_usb_ohci_iso_td_bad_cc_overrun(td.cbp, td.be); + ohci_die(ohci); + return 1; + } + len = (td.be - td.cbp) + 1; + } + if (len > sizeof(ohci->usb_buf)) { + len = sizeof(ohci->usb_buf); + } + + pktlen = len; + if (len && dir != OHCI_TD_DIR_IN) { + /* The endpoint may not allow us to transfer it all now */ + pktlen = (ed->flags & OHCI_ED_MPS_MASK) >> OHCI_ED_MPS_SHIFT; + if (pktlen > len) { + pktlen = len; + } + if (!completion) { + if (ohci_copy_td(ohci, &td, ohci->usb_buf, pktlen, + DMA_DIRECTION_TO_DEVICE)) { + ohci_die(ohci); + } + } + } + } + + flag_r = (td.flags & OHCI_TD_R) != 0; + trace_usb_ohci_td_pkt_hdr(addr, (int64_t)pktlen, (int64_t)len, str, + flag_r, td.cbp, td.be); + ohci_td_pkt("OUT", ohci->usb_buf, pktlen); + + if (completion) { + ohci->async_td = 0; + ohci->async_complete = false; + } else { + if (ohci->async_td) { + /* ??? The hardware should allow one active packet per + endpoint. We only allow one active packet per controller. + This should be sufficient as long as devices respond in a + timely manner. + */ + trace_usb_ohci_td_too_many_pending(); + return 1; + } + dev = ohci_find_device(ohci, OHCI_BM(ed->flags, ED_FA)); + if (dev == NULL) { + trace_usb_ohci_td_dev_error(); + return 1; + } + ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN)); + usb_packet_setup(&ohci->usb_packet, pid, ep, 0, addr, !flag_r, + OHCI_BM(td.flags, TD_DI) == 0); + usb_packet_addbuf(&ohci->usb_packet, ohci->usb_buf, pktlen); + usb_handle_packet(dev, &ohci->usb_packet); + trace_usb_ohci_td_packet_status(ohci->usb_packet.status); + + if (ohci->usb_packet.status == USB_RET_ASYNC) { + usb_device_flush_ep_queue(dev, ep); + ohci->async_td = addr; + return 1; + } + } + if (ohci->usb_packet.status == USB_RET_SUCCESS) { + ret = ohci->usb_packet.actual_length; + } else { + ret = ohci->usb_packet.status; + } + + if (ret >= 0) { + if (dir == OHCI_TD_DIR_IN) { + if (ohci_copy_td(ohci, &td, ohci->usb_buf, ret, + DMA_DIRECTION_FROM_DEVICE)) { + ohci_die(ohci); + } + ohci_td_pkt("IN", ohci->usb_buf, pktlen); + } else { + ret = pktlen; + } + } + + /* Writeback */ + if (ret == pktlen || (dir == OHCI_TD_DIR_IN && ret >= 0 && flag_r)) { + /* Transmission succeeded. */ + if (ret == len) { + td.cbp = 0; + } else { + if ((td.cbp & 0xfff) + ret > 0xfff) { + td.cbp = (td.be & ~0xfff) + ((td.cbp + ret) & 0xfff); + } else { + td.cbp += ret; + } + } + td.flags |= OHCI_TD_T1; + td.flags ^= OHCI_TD_T0; + OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_NOERROR); + OHCI_SET_BM(td.flags, TD_EC, 0); + + if ((dir != OHCI_TD_DIR_IN) && (ret != len)) { + /* Partial packet transfer: TD not ready to retire yet */ + goto exit_no_retire; + } + + /* Setting ED_C is part of the TD retirement process */ + ed->head &= ~OHCI_ED_C; + if (td.flags & OHCI_TD_T0) + ed->head |= OHCI_ED_C; + } else { + if (ret >= 0) { + trace_usb_ohci_td_underrun(); + OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DATAUNDERRUN); + } else { + switch (ret) { + case USB_RET_IOERROR: + case USB_RET_NODEV: + trace_usb_ohci_td_dev_error(); + OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DEVICENOTRESPONDING); + break; + case USB_RET_NAK: + trace_usb_ohci_td_nak(); + return 1; + case USB_RET_STALL: + trace_usb_ohci_td_stall(); + OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_STALL); + break; + case USB_RET_BABBLE: + trace_usb_ohci_td_babble(); + OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DATAOVERRUN); + break; + default: + trace_usb_ohci_td_bad_device_response(ret); + OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_UNDEXPETEDPID); + OHCI_SET_BM(td.flags, TD_EC, 3); + break; + } + /* An error occurred so we have to clear the interrupt counter. See + * spec at 6.4.4 on page 104 */ + ohci->done_count = 0; + } + ed->head |= OHCI_ED_H; + } + + /* Retire this TD */ + ed->head &= ~OHCI_DPTR_MASK; + ed->head |= td.next & OHCI_DPTR_MASK; + td.next = ohci->done; + ohci->done = addr; + i = OHCI_BM(td.flags, TD_DI); + if (i < ohci->done_count) + ohci->done_count = i; +exit_no_retire: + if (ohci_put_td(ohci, addr, &td)) { + ohci_die(ohci); + return 1; + } + return OHCI_BM(td.flags, TD_CC) != OHCI_CC_NOERROR; +} + +/* Service an endpoint list. Returns nonzero if active TD were found. */ +static int ohci_service_ed_list(OHCIState *ohci, uint32_t head, int completion) +{ + struct ohci_ed ed; + uint32_t next_ed; + uint32_t cur; + int active; + uint32_t link_cnt = 0; + active = 0; + + if (head == 0) + return 0; + + for (cur = head; cur && link_cnt++ < ED_LINK_LIMIT; cur = next_ed) { + if (ohci_read_ed(ohci, cur, &ed)) { + trace_usb_ohci_ed_read_error(cur); + ohci_die(ohci); + return 0; + } + + next_ed = ed.next & OHCI_DPTR_MASK; + + if ((ed.head & OHCI_ED_H) || (ed.flags & OHCI_ED_K)) { + uint32_t addr; + /* Cancel pending packets for ED that have been paused. */ + addr = ed.head & OHCI_DPTR_MASK; + if (ohci->async_td && addr == ohci->async_td) { + usb_cancel_packet(&ohci->usb_packet); + ohci->async_td = 0; + usb_device_ep_stopped(ohci->usb_packet.ep->dev, + ohci->usb_packet.ep); + } + continue; + } + + while ((ed.head & OHCI_DPTR_MASK) != ed.tail) { + trace_usb_ohci_ed_pkt(cur, (ed.head & OHCI_ED_H) != 0, + (ed.head & OHCI_ED_C) != 0, ed.head & OHCI_DPTR_MASK, + ed.tail & OHCI_DPTR_MASK, ed.next & OHCI_DPTR_MASK); + trace_usb_ohci_ed_pkt_flags( + OHCI_BM(ed.flags, ED_FA), OHCI_BM(ed.flags, ED_EN), + OHCI_BM(ed.flags, ED_D), (ed.flags & OHCI_ED_S)!= 0, + (ed.flags & OHCI_ED_K) != 0, (ed.flags & OHCI_ED_F) != 0, + OHCI_BM(ed.flags, ED_MPS)); + + active = 1; + + if ((ed.flags & OHCI_ED_F) == 0) { + if (ohci_service_td(ohci, &ed)) + break; + } else { + /* Handle isochronous endpoints */ + if (ohci_service_iso_td(ohci, &ed, completion)) + break; + } + } + + if (ohci_put_ed(ohci, cur, &ed)) { + ohci_die(ohci); + return 0; + } + } + + return active; +} + +/* set a timer for EOF */ +static void ohci_eof_timer(OHCIState *ohci) +{ + timer_mod(ohci->eof_timer, ohci->sof_time + usb_frame_time); +} +/* Set a timer for EOF and generate a SOF event */ +static void ohci_sof(OHCIState *ohci) +{ + ohci->sof_time += usb_frame_time; + ohci_eof_timer(ohci); + ohci_set_interrupt(ohci, OHCI_INTR_SF); +} + +/* Process Control and Bulk lists. */ +static void ohci_process_lists(OHCIState *ohci, int completion) +{ + if ((ohci->ctl & OHCI_CTL_CLE) && (ohci->status & OHCI_STATUS_CLF)) { + if (ohci->ctrl_cur && ohci->ctrl_cur != ohci->ctrl_head) { + trace_usb_ohci_process_lists(ohci->ctrl_head, ohci->ctrl_cur); + } + if (!ohci_service_ed_list(ohci, ohci->ctrl_head, completion)) { + ohci->ctrl_cur = 0; + ohci->status &= ~OHCI_STATUS_CLF; + } + } + + if ((ohci->ctl & OHCI_CTL_BLE) && (ohci->status & OHCI_STATUS_BLF)) { + if (!ohci_service_ed_list(ohci, ohci->bulk_head, completion)) { + ohci->bulk_cur = 0; + ohci->status &= ~OHCI_STATUS_BLF; + } + } +} + +/* Do frame processing on frame boundary */ +static void ohci_frame_boundary(void *opaque) +{ + OHCIState *ohci = opaque; + struct ohci_hcca hcca; + + if (ohci_read_hcca(ohci, ohci->hcca, &hcca)) { + trace_usb_ohci_hcca_read_error(ohci->hcca); + ohci_die(ohci); + return; + } + + /* Process all the lists at the end of the frame */ + if (ohci->ctl & OHCI_CTL_PLE) { + int n; + + n = ohci->frame_number & 0x1f; + ohci_service_ed_list(ohci, le32_to_cpu(hcca.intr[n]), 0); + } + + /* Cancel all pending packets if either of the lists has been disabled. */ + if (ohci->old_ctl & (~ohci->ctl) & (OHCI_CTL_BLE | OHCI_CTL_CLE)) { + if (ohci->async_td) { + usb_cancel_packet(&ohci->usb_packet); + ohci->async_td = 0; + } + ohci_stop_endpoints(ohci); + } + ohci->old_ctl = ohci->ctl; + ohci_process_lists(ohci, 0); + + /* Stop if UnrecoverableError happened or ohci_sof will crash */ + if (ohci->intr_status & OHCI_INTR_UE) { + return; + } + + /* Frame boundary, so do EOF stuf here */ + ohci->frt = ohci->fit; + + /* Increment frame number and take care of endianness. */ + ohci->frame_number = (ohci->frame_number + 1) & 0xffff; + hcca.frame = cpu_to_le16(ohci->frame_number); + + if (ohci->done_count == 0 && !(ohci->intr_status & OHCI_INTR_WD)) { + if (!ohci->done) + abort(); + if (ohci->intr & ohci->intr_status) + ohci->done |= 1; + hcca.done = cpu_to_le32(ohci->done); + ohci->done = 0; + ohci->done_count = 7; + ohci_set_interrupt(ohci, OHCI_INTR_WD); + } + + if (ohci->done_count != 7 && ohci->done_count != 0) + ohci->done_count--; + + /* Do SOF stuff here */ + ohci_sof(ohci); + + /* Writeback HCCA */ + if (ohci_put_hcca(ohci, ohci->hcca, &hcca)) { + ohci_die(ohci); + } +} + +/* Start sending SOF tokens across the USB bus, lists are processed in + * next frame + */ +static int ohci_bus_start(OHCIState *ohci) +{ + trace_usb_ohci_start(ohci->name); + + /* Delay the first SOF event by one frame time as + * linux driver is not ready to receive it and + * can meet some race conditions + */ + + ohci->sof_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + ohci_eof_timer(ohci); + + return 1; +} + +/* Stop sending SOF tokens on the bus */ +void ohci_bus_stop(OHCIState *ohci) +{ + trace_usb_ohci_stop(ohci->name); + timer_del(ohci->eof_timer); +} + +/* Sets a flag in a port status register but only set it if the port is + * connected, if not set ConnectStatusChange flag. If flag is enabled + * return 1. + */ +static int ohci_port_set_if_connected(OHCIState *ohci, int i, uint32_t val) +{ + int ret = 1; + + /* writing a 0 has no effect */ + if (val == 0) + return 0; + + /* If CurrentConnectStatus is cleared we set + * ConnectStatusChange + */ + if (!(ohci->rhport[i].ctrl & OHCI_PORT_CCS)) { + ohci->rhport[i].ctrl |= OHCI_PORT_CSC; + if (ohci->rhstatus & OHCI_RHS_DRWE) { + /* TODO: CSC is a wakeup event */ + } + return 0; + } + + if (ohci->rhport[i].ctrl & val) + ret = 0; + + /* set the bit */ + ohci->rhport[i].ctrl |= val; + + return ret; +} + +/* Set the frame interval - frame interval toggle is manipulated by the hcd only */ +static void ohci_set_frame_interval(OHCIState *ohci, uint16_t val) +{ + val &= OHCI_FMI_FI; + + if (val != ohci->fi) { + trace_usb_ohci_set_frame_interval(ohci->name, ohci->fi, ohci->fi); + } + + ohci->fi = val; +} + +static void ohci_port_power(OHCIState *ohci, int i, int p) +{ + if (p) { + ohci->rhport[i].ctrl |= OHCI_PORT_PPS; + } else { + ohci->rhport[i].ctrl &= ~(OHCI_PORT_PPS| + OHCI_PORT_CCS| + OHCI_PORT_PSS| + OHCI_PORT_PRS); + } +} + +/* Set HcControlRegister */ +static void ohci_set_ctl(OHCIState *ohci, uint32_t val) +{ + uint32_t old_state; + uint32_t new_state; + + old_state = ohci->ctl & OHCI_CTL_HCFS; + ohci->ctl = val; + new_state = ohci->ctl & OHCI_CTL_HCFS; + + /* no state change */ + if (old_state == new_state) + return; + + trace_usb_ohci_set_ctl(ohci->name, new_state); + switch (new_state) { + case OHCI_USB_OPERATIONAL: + ohci_bus_start(ohci); + break; + case OHCI_USB_SUSPEND: + ohci_bus_stop(ohci); + /* clear pending SF otherwise linux driver loops in ohci_irq() */ + ohci->intr_status &= ~OHCI_INTR_SF; + ohci_intr_update(ohci); + break; + case OHCI_USB_RESUME: + trace_usb_ohci_resume(ohci->name); + break; + case OHCI_USB_RESET: + ohci_roothub_reset(ohci); + break; + } +} + +static uint32_t ohci_get_frame_remaining(OHCIState *ohci) +{ + uint16_t fr; + int64_t tks; + + if ((ohci->ctl & OHCI_CTL_HCFS) != OHCI_USB_OPERATIONAL) + return (ohci->frt << 31); + + /* Being in USB operational state guarnatees sof_time was + * set already. + */ + tks = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - ohci->sof_time; + if (tks < 0) { + tks = 0; + } + + /* avoid muldiv if possible */ + if (tks >= usb_frame_time) + return (ohci->frt << 31); + + tks = tks / usb_bit_time; + fr = (uint16_t)(ohci->fi - tks); + + return (ohci->frt << 31) | fr; +} + + +/* Set root hub status */ +static void ohci_set_hub_status(OHCIState *ohci, uint32_t val) +{ + uint32_t old_state; + + old_state = ohci->rhstatus; + + /* write 1 to clear OCIC */ + if (val & OHCI_RHS_OCIC) + ohci->rhstatus &= ~OHCI_RHS_OCIC; + + if (val & OHCI_RHS_LPS) { + int i; + + for (i = 0; i < ohci->num_ports; i++) + ohci_port_power(ohci, i, 0); + trace_usb_ohci_hub_power_down(); + } + + if (val & OHCI_RHS_LPSC) { + int i; + + for (i = 0; i < ohci->num_ports; i++) + ohci_port_power(ohci, i, 1); + trace_usb_ohci_hub_power_up(); + } + + if (val & OHCI_RHS_DRWE) + ohci->rhstatus |= OHCI_RHS_DRWE; + + if (val & OHCI_RHS_CRWE) + ohci->rhstatus &= ~OHCI_RHS_DRWE; + + if (old_state != ohci->rhstatus) + ohci_set_interrupt(ohci, OHCI_INTR_RHSC); +} + +/* Set root hub port status */ +static void ohci_port_set_status(OHCIState *ohci, int portnum, uint32_t val) +{ + uint32_t old_state; + OHCIPort *port; + + port = &ohci->rhport[portnum]; + old_state = port->ctrl; + + /* Write to clear CSC, PESC, PSSC, OCIC, PRSC */ + if (val & OHCI_PORT_WTC) + port->ctrl &= ~(val & OHCI_PORT_WTC); + + if (val & OHCI_PORT_CCS) + port->ctrl &= ~OHCI_PORT_PES; + + ohci_port_set_if_connected(ohci, portnum, val & OHCI_PORT_PES); + + if (ohci_port_set_if_connected(ohci, portnum, val & OHCI_PORT_PSS)) { + trace_usb_ohci_port_suspend(portnum); + } + + if (ohci_port_set_if_connected(ohci, portnum, val & OHCI_PORT_PRS)) { + trace_usb_ohci_port_reset(portnum); + usb_device_reset(port->port.dev); + port->ctrl &= ~OHCI_PORT_PRS; + /* ??? Should this also set OHCI_PORT_PESC. */ + port->ctrl |= OHCI_PORT_PES | OHCI_PORT_PRSC; + } + + /* Invert order here to ensure in ambiguous case, device is + * powered up... + */ + if (val & OHCI_PORT_LSDA) + ohci_port_power(ohci, portnum, 0); + if (val & OHCI_PORT_PPS) + ohci_port_power(ohci, portnum, 1); + + if (old_state != port->ctrl) + ohci_set_interrupt(ohci, OHCI_INTR_RHSC); +} + +static uint64_t ohci_mem_read(void *opaque, + hwaddr addr, + unsigned size) +{ + OHCIState *ohci = opaque; + uint32_t retval; + + /* Only aligned reads are allowed on OHCI */ + if (addr & 3) { + trace_usb_ohci_mem_read_unaligned(addr); + return 0xffffffff; + } else if (addr >= 0x54 && addr < 0x54 + ohci->num_ports * 4) { + /* HcRhPortStatus */ + retval = ohci->rhport[(addr - 0x54) >> 2].ctrl | OHCI_PORT_PPS; + } else { + switch (addr >> 2) { + case 0: /* HcRevision */ + retval = 0x10; + break; + + case 1: /* HcControl */ + retval = ohci->ctl; + break; + + case 2: /* HcCommandStatus */ + retval = ohci->status; + break; + + case 3: /* HcInterruptStatus */ + retval = ohci->intr_status; + break; + + case 4: /* HcInterruptEnable */ + case 5: /* HcInterruptDisable */ + retval = ohci->intr; + break; + + case 6: /* HcHCCA */ + retval = ohci->hcca; + break; + + case 7: /* HcPeriodCurrentED */ + retval = ohci->per_cur; + break; + + case 8: /* HcControlHeadED */ + retval = ohci->ctrl_head; + break; + + case 9: /* HcControlCurrentED */ + retval = ohci->ctrl_cur; + break; + + case 10: /* HcBulkHeadED */ + retval = ohci->bulk_head; + break; + + case 11: /* HcBulkCurrentED */ + retval = ohci->bulk_cur; + break; + + case 12: /* HcDoneHead */ + retval = ohci->done; + break; + + case 13: /* HcFmInterretval */ + retval = (ohci->fit << 31) | (ohci->fsmps << 16) | (ohci->fi); + break; + + case 14: /* HcFmRemaining */ + retval = ohci_get_frame_remaining(ohci); + break; + + case 15: /* HcFmNumber */ + retval = ohci->frame_number; + break; + + case 16: /* HcPeriodicStart */ + retval = ohci->pstart; + break; + + case 17: /* HcLSThreshold */ + retval = ohci->lst; + break; + + case 18: /* HcRhDescriptorA */ + retval = ohci->rhdesc_a; + break; + + case 19: /* HcRhDescriptorB */ + retval = ohci->rhdesc_b; + break; + + case 20: /* HcRhStatus */ + retval = ohci->rhstatus; + break; + + /* PXA27x specific registers */ + case 24: /* HcStatus */ + retval = ohci->hstatus & ohci->hmask; + break; + + case 25: /* HcHReset */ + retval = ohci->hreset; + break; + + case 26: /* HcHInterruptEnable */ + retval = ohci->hmask; + break; + + case 27: /* HcHInterruptTest */ + retval = ohci->htest; + break; + + default: + trace_usb_ohci_mem_read_bad_offset(addr); + retval = 0xffffffff; + } + } + + return retval; +} + +static void ohci_mem_write(void *opaque, + hwaddr addr, + uint64_t val, + unsigned size) +{ + OHCIState *ohci = opaque; + + /* Only aligned reads are allowed on OHCI */ + if (addr & 3) { + trace_usb_ohci_mem_write_unaligned(addr); + return; + } + + if (addr >= 0x54 && addr < 0x54 + ohci->num_ports * 4) { + /* HcRhPortStatus */ + ohci_port_set_status(ohci, (addr - 0x54) >> 2, val); + return; + } + + switch (addr >> 2) { + case 1: /* HcControl */ + ohci_set_ctl(ohci, val); + break; + + case 2: /* HcCommandStatus */ + /* SOC is read-only */ + val = (val & ~OHCI_STATUS_SOC); + + /* Bits written as '0' remain unchanged in the register */ + ohci->status |= val; + + if (ohci->status & OHCI_STATUS_HCR) + ohci_soft_reset(ohci); + break; + + case 3: /* HcInterruptStatus */ + ohci->intr_status &= ~val; + ohci_intr_update(ohci); + break; + + case 4: /* HcInterruptEnable */ + ohci->intr |= val; + ohci_intr_update(ohci); + break; + + case 5: /* HcInterruptDisable */ + ohci->intr &= ~val; + ohci_intr_update(ohci); + break; + + case 6: /* HcHCCA */ + ohci->hcca = val & OHCI_HCCA_MASK; + break; + + case 7: /* HcPeriodCurrentED */ + /* Ignore writes to this read-only register, Linux does them */ + break; + + case 8: /* HcControlHeadED */ + ohci->ctrl_head = val & OHCI_EDPTR_MASK; + break; + + case 9: /* HcControlCurrentED */ + ohci->ctrl_cur = val & OHCI_EDPTR_MASK; + break; + + case 10: /* HcBulkHeadED */ + ohci->bulk_head = val & OHCI_EDPTR_MASK; + break; + + case 11: /* HcBulkCurrentED */ + ohci->bulk_cur = val & OHCI_EDPTR_MASK; + break; + + case 13: /* HcFmInterval */ + ohci->fsmps = (val & OHCI_FMI_FSMPS) >> 16; + ohci->fit = (val & OHCI_FMI_FIT) >> 31; + ohci_set_frame_interval(ohci, val); + break; + + case 15: /* HcFmNumber */ + break; + + case 16: /* HcPeriodicStart */ + ohci->pstart = val & 0xffff; + break; + + case 17: /* HcLSThreshold */ + ohci->lst = val & 0xffff; + break; + + case 18: /* HcRhDescriptorA */ + ohci->rhdesc_a &= ~OHCI_RHA_RW_MASK; + ohci->rhdesc_a |= val & OHCI_RHA_RW_MASK; + break; + + case 19: /* HcRhDescriptorB */ + break; + + case 20: /* HcRhStatus */ + ohci_set_hub_status(ohci, val); + break; + + /* PXA27x specific registers */ + case 24: /* HcStatus */ + ohci->hstatus &= ~(val & ohci->hmask); + break; + + case 25: /* HcHReset */ + ohci->hreset = val & ~OHCI_HRESET_FSBIR; + if (val & OHCI_HRESET_FSBIR) + ohci_hard_reset(ohci); + break; + + case 26: /* HcHInterruptEnable */ + ohci->hmask = val; + break; + + case 27: /* HcHInterruptTest */ + ohci->htest = val; + break; + + default: + trace_usb_ohci_mem_write_bad_offset(addr); + break; + } +} + +static void ohci_async_cancel_device(OHCIState *ohci, USBDevice *dev) +{ + if (ohci->async_td && + usb_packet_is_inflight(&ohci->usb_packet) && + ohci->usb_packet.ep->dev == dev) { + usb_cancel_packet(&ohci->usb_packet); + ohci->async_td = 0; + } +} + +static const MemoryRegionOps ohci_mem_ops = { + .read = ohci_mem_read, + .write = ohci_mem_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static USBPortOps ohci_port_ops = { + .attach = ohci_attach, + .detach = ohci_detach, + .child_detach = ohci_child_detach, + .wakeup = ohci_wakeup, + .complete = ohci_async_complete_packet, +}; + +static USBBusOps ohci_bus_ops = { +}; + +void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports, + dma_addr_t localmem_base, char *masterbus, + uint32_t firstport, AddressSpace *as, + void (*ohci_die_fn)(struct OHCIState *), Error **errp) +{ + Error *err = NULL; + int i; + + ohci->as = as; + ohci->ohci_die = ohci_die_fn; + + if (num_ports > OHCI_MAX_PORTS) { + error_setg(errp, "OHCI num-ports=%u is too big (limit is %u ports)", + num_ports, OHCI_MAX_PORTS); + return; + } + + if (usb_frame_time == 0) { +#ifdef OHCI_TIME_WARP + usb_frame_time = NANOSECONDS_PER_SECOND; + usb_bit_time = NANOSECONDS_PER_SECOND / (USB_HZ / 1000); +#else + usb_frame_time = NANOSECONDS_PER_SECOND / 1000; + if (NANOSECONDS_PER_SECOND >= USB_HZ) { + usb_bit_time = NANOSECONDS_PER_SECOND / USB_HZ; + } else { + usb_bit_time = 1; + } +#endif + trace_usb_ohci_init_time(usb_frame_time, usb_bit_time); + } + + ohci->num_ports = num_ports; + if (masterbus) { + USBPort *ports[OHCI_MAX_PORTS]; + for(i = 0; i < num_ports; i++) { + ports[i] = &ohci->rhport[i].port; + } + usb_register_companion(masterbus, ports, num_ports, + firstport, ohci, &ohci_port_ops, + USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL, + &err); + if (err) { + error_propagate(errp, err); + return; + } + } else { + usb_bus_new(&ohci->bus, sizeof(ohci->bus), &ohci_bus_ops, dev); + for (i = 0; i < num_ports; i++) { + usb_register_port(&ohci->bus, &ohci->rhport[i].port, + ohci, i, &ohci_port_ops, + USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); + } + } + + memory_region_init_io(&ohci->mem, OBJECT(dev), &ohci_mem_ops, + ohci, "ohci", 256); + ohci->localmem_base = localmem_base; + + ohci->name = object_get_typename(OBJECT(dev)); + usb_packet_init(&ohci->usb_packet); + + ohci->async_td = 0; + + ohci->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + ohci_frame_boundary, ohci); +} + +/** + * A typical OHCI will stop operating and set itself into error state + * (which can be queried by MMIO) to signal that it got an error. + */ +void ohci_sysbus_die(struct OHCIState *ohci) +{ + trace_usb_ohci_die(); + + ohci_set_interrupt(ohci, OHCI_INTR_UE); + ohci_bus_stop(ohci); +} + +static void ohci_realize_pxa(DeviceState *dev, Error **errp) +{ + OHCISysBusState *s = SYSBUS_OHCI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + Error *err = NULL; + + usb_ohci_init(&s->ohci, dev, s->num_ports, s->dma_offset, + s->masterbus, s->firstport, + &address_space_memory, ohci_sysbus_die, &err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_init_irq(sbd, &s->ohci.irq); + sysbus_init_mmio(sbd, &s->ohci.mem); +} + +static void usb_ohci_reset_sysbus(DeviceState *dev) +{ + OHCISysBusState *s = SYSBUS_OHCI(dev); + OHCIState *ohci = &s->ohci; + + ohci_hard_reset(ohci); +} + +static const VMStateDescription vmstate_ohci_state_port = { + .name = "ohci-core/port", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(ctrl, OHCIPort), + VMSTATE_END_OF_LIST() + }, +}; + +static bool ohci_eof_timer_needed(void *opaque) +{ + OHCIState *ohci = opaque; + + return timer_pending(ohci->eof_timer); +} + +static const VMStateDescription vmstate_ohci_eof_timer = { + .name = "ohci-core/eof-timer", + .version_id = 1, + .minimum_version_id = 1, + .needed = ohci_eof_timer_needed, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(eof_timer, OHCIState), + VMSTATE_END_OF_LIST() + }, +}; + +const VMStateDescription vmstate_ohci_state = { + .name = "ohci-core", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT64(sof_time, OHCIState), + VMSTATE_UINT32(ctl, OHCIState), + VMSTATE_UINT32(status, OHCIState), + VMSTATE_UINT32(intr_status, OHCIState), + VMSTATE_UINT32(intr, OHCIState), + VMSTATE_UINT32(hcca, OHCIState), + VMSTATE_UINT32(ctrl_head, OHCIState), + VMSTATE_UINT32(ctrl_cur, OHCIState), + VMSTATE_UINT32(bulk_head, OHCIState), + VMSTATE_UINT32(bulk_cur, OHCIState), + VMSTATE_UINT32(per_cur, OHCIState), + VMSTATE_UINT32(done, OHCIState), + VMSTATE_INT32(done_count, OHCIState), + VMSTATE_UINT16(fsmps, OHCIState), + VMSTATE_UINT8(fit, OHCIState), + VMSTATE_UINT16(fi, OHCIState), + VMSTATE_UINT8(frt, OHCIState), + VMSTATE_UINT16(frame_number, OHCIState), + VMSTATE_UINT16(padding, OHCIState), + VMSTATE_UINT32(pstart, OHCIState), + VMSTATE_UINT32(lst, OHCIState), + VMSTATE_UINT32(rhdesc_a, OHCIState), + VMSTATE_UINT32(rhdesc_b, OHCIState), + VMSTATE_UINT32(rhstatus, OHCIState), + VMSTATE_STRUCT_ARRAY(rhport, OHCIState, OHCI_MAX_PORTS, 0, + vmstate_ohci_state_port, OHCIPort), + VMSTATE_UINT32(hstatus, OHCIState), + VMSTATE_UINT32(hmask, OHCIState), + VMSTATE_UINT32(hreset, OHCIState), + VMSTATE_UINT32(htest, OHCIState), + VMSTATE_UINT32(old_ctl, OHCIState), + VMSTATE_UINT8_ARRAY(usb_buf, OHCIState, 8192), + VMSTATE_UINT32(async_td, OHCIState), + VMSTATE_BOOL(async_complete, OHCIState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_ohci_eof_timer, + NULL + } +}; + +static Property ohci_sysbus_properties[] = { + DEFINE_PROP_STRING("masterbus", OHCISysBusState, masterbus), + DEFINE_PROP_UINT32("num-ports", OHCISysBusState, num_ports, 3), + DEFINE_PROP_UINT32("firstport", OHCISysBusState, firstport, 0), + DEFINE_PROP_DMAADDR("dma-offset", OHCISysBusState, dma_offset, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ohci_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = ohci_realize_pxa; + set_bit(DEVICE_CATEGORY_USB, dc->categories); + dc->desc = "OHCI USB Controller"; + device_class_set_props(dc, ohci_sysbus_properties); + dc->reset = usb_ohci_reset_sysbus; +} + +static const TypeInfo ohci_sysbus_info = { + .name = TYPE_SYSBUS_OHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(OHCISysBusState), + .class_init = ohci_sysbus_class_init, +}; + +static void ohci_register_types(void) +{ + type_register_static(&ohci_sysbus_info); +} + +type_init(ohci_register_types) diff --git a/hw/usb/hcd-ohci.h b/hw/usb/hcd-ohci.h new file mode 100644 index 000000000..11ac57058 --- /dev/null +++ b/hw/usb/hcd-ohci.h @@ -0,0 +1,121 @@ +/* + * QEMU USB OHCI Emulation + * Copyright (c) 2004 Gianni Tedesco + * Copyright (c) 2006 CodeSourcery + * Copyright (c) 2006 Openedhand Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef HCD_OHCI_H +#define HCD_OHCI_H + +#include "sysemu/dma.h" +#include "hw/usb.h" +#include "qom/object.h" + +/* Number of Downstream Ports on the root hub: */ +#define OHCI_MAX_PORTS 15 + +typedef struct OHCIPort { + USBPort port; + uint32_t ctrl; +} OHCIPort; + +typedef struct OHCIState { + USBBus bus; + qemu_irq irq; + MemoryRegion mem; + AddressSpace *as; + uint32_t num_ports; + const char *name; + + QEMUTimer *eof_timer; + int64_t sof_time; + + /* OHCI state */ + /* Control partition */ + uint32_t ctl, status; + uint32_t intr_status; + uint32_t intr; + + /* memory pointer partition */ + uint32_t hcca; + uint32_t ctrl_head, ctrl_cur; + uint32_t bulk_head, bulk_cur; + uint32_t per_cur; + uint32_t done; + int32_t done_count; + + /* Frame counter partition */ + uint16_t fsmps; + uint8_t fit; + uint16_t fi; + uint8_t frt; + uint16_t frame_number; + uint16_t padding; + uint32_t pstart; + uint32_t lst; + + /* Root Hub partition */ + uint32_t rhdesc_a, rhdesc_b; + uint32_t rhstatus; + OHCIPort rhport[OHCI_MAX_PORTS]; + + /* PXA27x Non-OHCI events */ + uint32_t hstatus; + uint32_t hmask; + uint32_t hreset; + uint32_t htest; + + /* SM501 local memory offset */ + dma_addr_t localmem_base; + + /* Active packets. */ + uint32_t old_ctl; + USBPacket usb_packet; + uint8_t usb_buf[8192]; + uint32_t async_td; + bool async_complete; + + void (*ohci_die)(struct OHCIState *ohci); +} OHCIState; + +#define TYPE_SYSBUS_OHCI "sysbus-ohci" +OBJECT_DECLARE_SIMPLE_TYPE(OHCISysBusState, SYSBUS_OHCI) + +struct OHCISysBusState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + OHCIState ohci; + char *masterbus; + uint32_t num_ports; + uint32_t firstport; + dma_addr_t dma_offset; +}; + +extern const VMStateDescription vmstate_ohci_state; + +void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports, + dma_addr_t localmem_base, char *masterbus, + uint32_t firstport, AddressSpace *as, + void (*ohci_die_fn)(struct OHCIState *), Error **errp); +void ohci_bus_stop(OHCIState *ohci); +void ohci_stop_endpoints(OHCIState *ohci); +void ohci_hard_reset(OHCIState *ohci); +void ohci_sysbus_die(struct OHCIState *ohci); + +#endif diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c new file mode 100644 index 000000000..d1b5657d7 --- /dev/null +++ b/hw/usb/hcd-uhci.c @@ -0,0 +1,1370 @@ +/* + * USB UHCI controller emulation + * + * Copyright (c) 2005 Fabrice Bellard + * + * Copyright (c) 2008 Max Krasnyansky + * Magor rewrite of the UHCI data structures parser and frame processor + * Support for fully async operation and multiple outstanding transactions + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/usb.h" +#include "hw/usb/uhci-regs.h" +#include "migration/vmstate.h" +#include "hw/pci/pci.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "qemu/iov.h" +#include "sysemu/dma.h" +#include "trace.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qom/object.h" +#include "hcd-uhci.h" + +#define FRAME_TIMER_FREQ 1000 + +#define FRAME_MAX_LOOPS 256 + +/* Must be large enough to handle 10 frame delay for initial isoc requests */ +#define QH_VALID 32 + +#define MAX_FRAMES_PER_TICK (QH_VALID / 2) + +enum { + TD_RESULT_STOP_FRAME = 10, + TD_RESULT_COMPLETE, + TD_RESULT_NEXT_QH, + TD_RESULT_ASYNC_START, + TD_RESULT_ASYNC_CONT, +}; + +typedef struct UHCIState UHCIState; +typedef struct UHCIAsync UHCIAsync; +typedef struct UHCIPCIDeviceClass UHCIPCIDeviceClass; + +struct UHCIPCIDeviceClass { + PCIDeviceClass parent_class; + UHCIInfo info; +}; + +/* + * Pending async transaction. + * 'packet' must be the first field because completion + * handler does "(UHCIAsync *) pkt" cast. + */ + +struct UHCIAsync { + USBPacket packet; + uint8_t static_buf[64]; /* 64 bytes is enough, except for isoc packets */ + uint8_t *buf; + UHCIQueue *queue; + QTAILQ_ENTRY(UHCIAsync) next; + uint32_t td_addr; + uint8_t done; +}; + +struct UHCIQueue { + uint32_t qh_addr; + uint32_t token; + UHCIState *uhci; + USBEndpoint *ep; + QTAILQ_ENTRY(UHCIQueue) next; + QTAILQ_HEAD(, UHCIAsync) asyncs; + int8_t valid; +}; + +typedef struct UHCI_TD { + uint32_t link; + uint32_t ctrl; /* see TD_CTRL_xxx */ + uint32_t token; + uint32_t buffer; +} UHCI_TD; + +typedef struct UHCI_QH { + uint32_t link; + uint32_t el_link; +} UHCI_QH; + +static void uhci_async_cancel(UHCIAsync *async); +static void uhci_queue_fill(UHCIQueue *q, UHCI_TD *td); +static void uhci_resume(void *opaque); + +static inline int32_t uhci_queue_token(UHCI_TD *td) +{ + if ((td->token & (0xf << 15)) == 0) { + /* ctrl ep, cover ep and dev, not pid! */ + return td->token & 0x7ff00; + } else { + /* covers ep, dev, pid -> identifies the endpoint */ + return td->token & 0x7ffff; + } +} + +static UHCIQueue *uhci_queue_new(UHCIState *s, uint32_t qh_addr, UHCI_TD *td, + USBEndpoint *ep) +{ + UHCIQueue *queue; + + queue = g_new0(UHCIQueue, 1); + queue->uhci = s; + queue->qh_addr = qh_addr; + queue->token = uhci_queue_token(td); + queue->ep = ep; + QTAILQ_INIT(&queue->asyncs); + QTAILQ_INSERT_HEAD(&s->queues, queue, next); + queue->valid = QH_VALID; + trace_usb_uhci_queue_add(queue->token); + return queue; +} + +static void uhci_queue_free(UHCIQueue *queue, const char *reason) +{ + UHCIState *s = queue->uhci; + UHCIAsync *async; + + while (!QTAILQ_EMPTY(&queue->asyncs)) { + async = QTAILQ_FIRST(&queue->asyncs); + uhci_async_cancel(async); + } + usb_device_ep_stopped(queue->ep->dev, queue->ep); + + trace_usb_uhci_queue_del(queue->token, reason); + QTAILQ_REMOVE(&s->queues, queue, next); + g_free(queue); +} + +static UHCIQueue *uhci_queue_find(UHCIState *s, UHCI_TD *td) +{ + uint32_t token = uhci_queue_token(td); + UHCIQueue *queue; + + QTAILQ_FOREACH(queue, &s->queues, next) { + if (queue->token == token) { + return queue; + } + } + return NULL; +} + +static bool uhci_queue_verify(UHCIQueue *queue, uint32_t qh_addr, UHCI_TD *td, + uint32_t td_addr, bool queuing) +{ + UHCIAsync *first = QTAILQ_FIRST(&queue->asyncs); + uint32_t queue_token_addr = (queue->token >> 8) & 0x7f; + + return queue->qh_addr == qh_addr && + queue->token == uhci_queue_token(td) && + queue_token_addr == queue->ep->dev->addr && + (queuing || !(td->ctrl & TD_CTRL_ACTIVE) || first == NULL || + first->td_addr == td_addr); +} + +static UHCIAsync *uhci_async_alloc(UHCIQueue *queue, uint32_t td_addr) +{ + UHCIAsync *async = g_new0(UHCIAsync, 1); + + async->queue = queue; + async->td_addr = td_addr; + usb_packet_init(&async->packet); + trace_usb_uhci_packet_add(async->queue->token, async->td_addr); + + return async; +} + +static void uhci_async_free(UHCIAsync *async) +{ + trace_usb_uhci_packet_del(async->queue->token, async->td_addr); + usb_packet_cleanup(&async->packet); + if (async->buf != async->static_buf) { + g_free(async->buf); + } + g_free(async); +} + +static void uhci_async_link(UHCIAsync *async) +{ + UHCIQueue *queue = async->queue; + QTAILQ_INSERT_TAIL(&queue->asyncs, async, next); + trace_usb_uhci_packet_link_async(async->queue->token, async->td_addr); +} + +static void uhci_async_unlink(UHCIAsync *async) +{ + UHCIQueue *queue = async->queue; + QTAILQ_REMOVE(&queue->asyncs, async, next); + trace_usb_uhci_packet_unlink_async(async->queue->token, async->td_addr); +} + +static void uhci_async_cancel(UHCIAsync *async) +{ + uhci_async_unlink(async); + trace_usb_uhci_packet_cancel(async->queue->token, async->td_addr, + async->done); + if (!async->done) + usb_cancel_packet(&async->packet); + uhci_async_free(async); +} + +/* + * Mark all outstanding async packets as invalid. + * This is used for canceling them when TDs are removed by the HCD. + */ +static void uhci_async_validate_begin(UHCIState *s) +{ + UHCIQueue *queue; + + QTAILQ_FOREACH(queue, &s->queues, next) { + queue->valid--; + } +} + +/* + * Cancel async packets that are no longer valid + */ +static void uhci_async_validate_end(UHCIState *s) +{ + UHCIQueue *queue, *n; + + QTAILQ_FOREACH_SAFE(queue, &s->queues, next, n) { + if (!queue->valid) { + uhci_queue_free(queue, "validate-end"); + } + } +} + +static void uhci_async_cancel_device(UHCIState *s, USBDevice *dev) +{ + UHCIQueue *queue, *n; + + QTAILQ_FOREACH_SAFE(queue, &s->queues, next, n) { + if (queue->ep->dev == dev) { + uhci_queue_free(queue, "cancel-device"); + } + } +} + +static void uhci_async_cancel_all(UHCIState *s) +{ + UHCIQueue *queue, *nq; + + QTAILQ_FOREACH_SAFE(queue, &s->queues, next, nq) { + uhci_queue_free(queue, "cancel-all"); + } +} + +static UHCIAsync *uhci_async_find_td(UHCIState *s, uint32_t td_addr) +{ + UHCIQueue *queue; + UHCIAsync *async; + + QTAILQ_FOREACH(queue, &s->queues, next) { + QTAILQ_FOREACH(async, &queue->asyncs, next) { + if (async->td_addr == td_addr) { + return async; + } + } + } + return NULL; +} + +static void uhci_update_irq(UHCIState *s) +{ + int level = 0; + if (((s->status2 & 1) && (s->intr & (1 << 2))) || + ((s->status2 & 2) && (s->intr & (1 << 3))) || + ((s->status & UHCI_STS_USBERR) && (s->intr & (1 << 0))) || + ((s->status & UHCI_STS_RD) && (s->intr & (1 << 1))) || + (s->status & UHCI_STS_HSERR) || + (s->status & UHCI_STS_HCPERR)) { + level = 1; + } + qemu_set_irq(s->irq, level); +} + +static void uhci_reset(DeviceState *dev) +{ + PCIDevice *d = PCI_DEVICE(dev); + UHCIState *s = UHCI(d); + uint8_t *pci_conf; + int i; + UHCIPort *port; + + trace_usb_uhci_reset(); + + pci_conf = s->dev.config; + + pci_conf[0x6a] = 0x01; /* usb clock */ + pci_conf[0x6b] = 0x00; + s->cmd = 0; + s->status = UHCI_STS_HCHALTED; + s->status2 = 0; + s->intr = 0; + s->fl_base_addr = 0; + s->sof_timing = 64; + + for(i = 0; i < NB_PORTS; i++) { + port = &s->ports[i]; + port->ctrl = 0x0080; + if (port->port.dev && port->port.dev->attached) { + usb_port_reset(&port->port); + } + } + + uhci_async_cancel_all(s); + qemu_bh_cancel(s->bh); + uhci_update_irq(s); +} + +static const VMStateDescription vmstate_uhci_port = { + .name = "uhci port", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(ctrl, UHCIPort), + VMSTATE_END_OF_LIST() + } +}; + +static int uhci_post_load(void *opaque, int version_id) +{ + UHCIState *s = opaque; + + if (version_id < 2) { + s->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ); + } + return 0; +} + +static const VMStateDescription vmstate_uhci = { + .name = "uhci", + .version_id = 3, + .minimum_version_id = 1, + .post_load = uhci_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, UHCIState), + VMSTATE_UINT8_EQUAL(num_ports_vmstate, UHCIState, NULL), + VMSTATE_STRUCT_ARRAY(ports, UHCIState, NB_PORTS, 1, + vmstate_uhci_port, UHCIPort), + VMSTATE_UINT16(cmd, UHCIState), + VMSTATE_UINT16(status, UHCIState), + VMSTATE_UINT16(intr, UHCIState), + VMSTATE_UINT16(frnum, UHCIState), + VMSTATE_UINT32(fl_base_addr, UHCIState), + VMSTATE_UINT8(sof_timing, UHCIState), + VMSTATE_UINT8(status2, UHCIState), + VMSTATE_TIMER_PTR(frame_timer, UHCIState), + VMSTATE_INT64_V(expire_time, UHCIState, 2), + VMSTATE_UINT32_V(pending_int_mask, UHCIState, 3), + VMSTATE_END_OF_LIST() + } +}; + +static void uhci_port_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + UHCIState *s = opaque; + + trace_usb_uhci_mmio_writew(addr, val); + + switch(addr) { + case 0x00: + if ((val & UHCI_CMD_RS) && !(s->cmd & UHCI_CMD_RS)) { + /* start frame processing */ + trace_usb_uhci_schedule_start(); + s->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ); + timer_mod(s->frame_timer, s->expire_time); + s->status &= ~UHCI_STS_HCHALTED; + } else if (!(val & UHCI_CMD_RS)) { + s->status |= UHCI_STS_HCHALTED; + } + if (val & UHCI_CMD_GRESET) { + UHCIPort *port; + int i; + + /* send reset on the USB bus */ + for(i = 0; i < NB_PORTS; i++) { + port = &s->ports[i]; + usb_device_reset(port->port.dev); + } + uhci_reset(DEVICE(s)); + return; + } + if (val & UHCI_CMD_HCRESET) { + uhci_reset(DEVICE(s)); + return; + } + s->cmd = val; + if (val & UHCI_CMD_EGSM) { + if ((s->ports[0].ctrl & UHCI_PORT_RD) || + (s->ports[1].ctrl & UHCI_PORT_RD)) { + uhci_resume(s); + } + } + break; + case 0x02: + s->status &= ~val; + /* XXX: the chip spec is not coherent, so we add a hidden + register to distinguish between IOC and SPD */ + if (val & UHCI_STS_USBINT) + s->status2 = 0; + uhci_update_irq(s); + break; + case 0x04: + s->intr = val; + uhci_update_irq(s); + break; + case 0x06: + if (s->status & UHCI_STS_HCHALTED) + s->frnum = val & 0x7ff; + break; + case 0x08: + s->fl_base_addr &= 0xffff0000; + s->fl_base_addr |= val & ~0xfff; + break; + case 0x0a: + s->fl_base_addr &= 0x0000ffff; + s->fl_base_addr |= (val << 16); + break; + case 0x0c: + s->sof_timing = val & 0xff; + break; + case 0x10 ... 0x1f: + { + UHCIPort *port; + USBDevice *dev; + int n; + + n = (addr >> 1) & 7; + if (n >= NB_PORTS) + return; + port = &s->ports[n]; + dev = port->port.dev; + if (dev && dev->attached) { + /* port reset */ + if ( (val & UHCI_PORT_RESET) && + !(port->ctrl & UHCI_PORT_RESET) ) { + usb_device_reset(dev); + } + } + port->ctrl &= UHCI_PORT_READ_ONLY; + /* enabled may only be set if a device is connected */ + if (!(port->ctrl & UHCI_PORT_CCS)) { + val &= ~UHCI_PORT_EN; + } + port->ctrl |= (val & ~UHCI_PORT_READ_ONLY); + /* some bits are reset when a '1' is written to them */ + port->ctrl &= ~(val & UHCI_PORT_WRITE_CLEAR); + } + break; + } +} + +static uint64_t uhci_port_read(void *opaque, hwaddr addr, unsigned size) +{ + UHCIState *s = opaque; + uint32_t val; + + switch(addr) { + case 0x00: + val = s->cmd; + break; + case 0x02: + val = s->status; + break; + case 0x04: + val = s->intr; + break; + case 0x06: + val = s->frnum; + break; + case 0x08: + val = s->fl_base_addr & 0xffff; + break; + case 0x0a: + val = (s->fl_base_addr >> 16) & 0xffff; + break; + case 0x0c: + val = s->sof_timing; + break; + case 0x10 ... 0x1f: + { + UHCIPort *port; + int n; + n = (addr >> 1) & 7; + if (n >= NB_PORTS) + goto read_default; + port = &s->ports[n]; + val = port->ctrl; + } + break; + default: + read_default: + val = 0xff7f; /* disabled port */ + break; + } + + trace_usb_uhci_mmio_readw(addr, val); + + return val; +} + +/* signal resume if controller suspended */ +static void uhci_resume (void *opaque) +{ + UHCIState *s = (UHCIState *)opaque; + + if (!s) + return; + + if (s->cmd & UHCI_CMD_EGSM) { + s->cmd |= UHCI_CMD_FGR; + s->status |= UHCI_STS_RD; + uhci_update_irq(s); + } +} + +static void uhci_attach(USBPort *port1) +{ + UHCIState *s = port1->opaque; + UHCIPort *port = &s->ports[port1->index]; + + /* set connect status */ + port->ctrl |= UHCI_PORT_CCS | UHCI_PORT_CSC; + + /* update speed */ + if (port->port.dev->speed == USB_SPEED_LOW) { + port->ctrl |= UHCI_PORT_LSDA; + } else { + port->ctrl &= ~UHCI_PORT_LSDA; + } + + uhci_resume(s); +} + +static void uhci_detach(USBPort *port1) +{ + UHCIState *s = port1->opaque; + UHCIPort *port = &s->ports[port1->index]; + + uhci_async_cancel_device(s, port1->dev); + + /* set connect status */ + if (port->ctrl & UHCI_PORT_CCS) { + port->ctrl &= ~UHCI_PORT_CCS; + port->ctrl |= UHCI_PORT_CSC; + } + /* disable port */ + if (port->ctrl & UHCI_PORT_EN) { + port->ctrl &= ~UHCI_PORT_EN; + port->ctrl |= UHCI_PORT_ENC; + } + + uhci_resume(s); +} + +static void uhci_child_detach(USBPort *port1, USBDevice *child) +{ + UHCIState *s = port1->opaque; + + uhci_async_cancel_device(s, child); +} + +static void uhci_wakeup(USBPort *port1) +{ + UHCIState *s = port1->opaque; + UHCIPort *port = &s->ports[port1->index]; + + if (port->ctrl & UHCI_PORT_SUSPEND && !(port->ctrl & UHCI_PORT_RD)) { + port->ctrl |= UHCI_PORT_RD; + uhci_resume(s); + } +} + +static USBDevice *uhci_find_device(UHCIState *s, uint8_t addr) +{ + USBDevice *dev; + int i; + + for (i = 0; i < NB_PORTS; i++) { + UHCIPort *port = &s->ports[i]; + if (!(port->ctrl & UHCI_PORT_EN)) { + continue; + } + dev = usb_find_device(&port->port, addr); + if (dev != NULL) { + return dev; + } + } + return NULL; +} + +static void uhci_read_td(UHCIState *s, UHCI_TD *td, uint32_t link) +{ + pci_dma_read(&s->dev, link & ~0xf, td, sizeof(*td)); + le32_to_cpus(&td->link); + le32_to_cpus(&td->ctrl); + le32_to_cpus(&td->token); + le32_to_cpus(&td->buffer); +} + +static int uhci_handle_td_error(UHCIState *s, UHCI_TD *td, uint32_t td_addr, + int status, uint32_t *int_mask) +{ + uint32_t queue_token = uhci_queue_token(td); + int ret; + + switch (status) { + case USB_RET_NAK: + td->ctrl |= TD_CTRL_NAK; + return TD_RESULT_NEXT_QH; + + case USB_RET_STALL: + td->ctrl |= TD_CTRL_STALL; + trace_usb_uhci_packet_complete_stall(queue_token, td_addr); + ret = TD_RESULT_NEXT_QH; + break; + + case USB_RET_BABBLE: + td->ctrl |= TD_CTRL_BABBLE | TD_CTRL_STALL; + /* frame interrupted */ + trace_usb_uhci_packet_complete_babble(queue_token, td_addr); + ret = TD_RESULT_STOP_FRAME; + break; + + case USB_RET_IOERROR: + case USB_RET_NODEV: + default: + td->ctrl |= TD_CTRL_TIMEOUT; + td->ctrl &= ~(3 << TD_CTRL_ERROR_SHIFT); + trace_usb_uhci_packet_complete_error(queue_token, td_addr); + ret = TD_RESULT_NEXT_QH; + break; + } + + td->ctrl &= ~TD_CTRL_ACTIVE; + s->status |= UHCI_STS_USBERR; + if (td->ctrl & TD_CTRL_IOC) { + *int_mask |= 0x01; + } + uhci_update_irq(s); + return ret; +} + +static int uhci_complete_td(UHCIState *s, UHCI_TD *td, UHCIAsync *async, uint32_t *int_mask) +{ + int len = 0, max_len; + uint8_t pid; + + max_len = ((td->token >> 21) + 1) & 0x7ff; + pid = td->token & 0xff; + + if (td->ctrl & TD_CTRL_IOS) + td->ctrl &= ~TD_CTRL_ACTIVE; + + if (async->packet.status != USB_RET_SUCCESS) { + return uhci_handle_td_error(s, td, async->td_addr, + async->packet.status, int_mask); + } + + len = async->packet.actual_length; + td->ctrl = (td->ctrl & ~0x7ff) | ((len - 1) & 0x7ff); + + /* The NAK bit may have been set by a previous frame, so clear it + here. The docs are somewhat unclear, but win2k relies on this + behavior. */ + td->ctrl &= ~(TD_CTRL_ACTIVE | TD_CTRL_NAK); + if (td->ctrl & TD_CTRL_IOC) + *int_mask |= 0x01; + + if (pid == USB_TOKEN_IN) { + pci_dma_write(&s->dev, td->buffer, async->buf, len); + if ((td->ctrl & TD_CTRL_SPD) && len < max_len) { + *int_mask |= 0x02; + /* short packet: do not update QH */ + trace_usb_uhci_packet_complete_shortxfer(async->queue->token, + async->td_addr); + return TD_RESULT_NEXT_QH; + } + } + + /* success */ + trace_usb_uhci_packet_complete_success(async->queue->token, + async->td_addr); + return TD_RESULT_COMPLETE; +} + +static int uhci_handle_td(UHCIState *s, UHCIQueue *q, uint32_t qh_addr, + UHCI_TD *td, uint32_t td_addr, uint32_t *int_mask) +{ + int ret, max_len; + bool spd; + bool queuing = (q != NULL); + uint8_t pid = td->token & 0xff; + UHCIAsync *async; + + async = uhci_async_find_td(s, td_addr); + if (async) { + if (uhci_queue_verify(async->queue, qh_addr, td, td_addr, queuing)) { + assert(q == NULL || q == async->queue); + q = async->queue; + } else { + uhci_queue_free(async->queue, "guest re-used pending td"); + async = NULL; + } + } + + if (q == NULL) { + q = uhci_queue_find(s, td); + if (q && !uhci_queue_verify(q, qh_addr, td, td_addr, queuing)) { + uhci_queue_free(q, "guest re-used qh"); + q = NULL; + } + } + + if (q) { + q->valid = QH_VALID; + } + + /* Is active ? */ + if (!(td->ctrl & TD_CTRL_ACTIVE)) { + if (async) { + /* Guest marked a pending td non-active, cancel the queue */ + uhci_queue_free(async->queue, "pending td non-active"); + } + /* + * ehci11d spec page 22: "Even if the Active bit in the TD is already + * cleared when the TD is fetched ... an IOC interrupt is generated" + */ + if (td->ctrl & TD_CTRL_IOC) { + *int_mask |= 0x01; + } + return TD_RESULT_NEXT_QH; + } + + switch (pid) { + case USB_TOKEN_OUT: + case USB_TOKEN_SETUP: + case USB_TOKEN_IN: + break; + default: + /* invalid pid : frame interrupted */ + s->status |= UHCI_STS_HCPERR; + s->cmd &= ~UHCI_CMD_RS; + uhci_update_irq(s); + return TD_RESULT_STOP_FRAME; + } + + if (async) { + if (queuing) { + /* we are busy filling the queue, we are not prepared + to consume completed packages then, just leave them + in async state */ + return TD_RESULT_ASYNC_CONT; + } + if (!async->done) { + UHCI_TD last_td; + UHCIAsync *last = QTAILQ_LAST(&async->queue->asyncs); + /* + * While we are waiting for the current td to complete, the guest + * may have added more tds to the queue. Note we re-read the td + * rather then caching it, as we want to see guest made changes! + */ + uhci_read_td(s, &last_td, last->td_addr); + uhci_queue_fill(async->queue, &last_td); + + return TD_RESULT_ASYNC_CONT; + } + uhci_async_unlink(async); + goto done; + } + + if (s->completions_only) { + return TD_RESULT_ASYNC_CONT; + } + + /* Allocate new packet */ + if (q == NULL) { + USBDevice *dev; + USBEndpoint *ep; + + dev = uhci_find_device(s, (td->token >> 8) & 0x7f); + if (dev == NULL) { + return uhci_handle_td_error(s, td, td_addr, USB_RET_NODEV, + int_mask); + } + ep = usb_ep_get(dev, pid, (td->token >> 15) & 0xf); + q = uhci_queue_new(s, qh_addr, td, ep); + } + async = uhci_async_alloc(q, td_addr); + + max_len = ((td->token >> 21) + 1) & 0x7ff; + spd = (pid == USB_TOKEN_IN && (td->ctrl & TD_CTRL_SPD) != 0); + usb_packet_setup(&async->packet, pid, q->ep, 0, td_addr, spd, + (td->ctrl & TD_CTRL_IOC) != 0); + if (max_len <= sizeof(async->static_buf)) { + async->buf = async->static_buf; + } else { + async->buf = g_malloc(max_len); + } + usb_packet_addbuf(&async->packet, async->buf, max_len); + + switch(pid) { + case USB_TOKEN_OUT: + case USB_TOKEN_SETUP: + pci_dma_read(&s->dev, td->buffer, async->buf, max_len); + usb_handle_packet(q->ep->dev, &async->packet); + if (async->packet.status == USB_RET_SUCCESS) { + async->packet.actual_length = max_len; + } + break; + + case USB_TOKEN_IN: + usb_handle_packet(q->ep->dev, &async->packet); + break; + + default: + abort(); /* Never to execute */ + } + + if (async->packet.status == USB_RET_ASYNC) { + uhci_async_link(async); + if (!queuing) { + uhci_queue_fill(q, td); + } + return TD_RESULT_ASYNC_START; + } + +done: + ret = uhci_complete_td(s, td, async, int_mask); + uhci_async_free(async); + return ret; +} + +static void uhci_async_complete(USBPort *port, USBPacket *packet) +{ + UHCIAsync *async = container_of(packet, UHCIAsync, packet); + UHCIState *s = async->queue->uhci; + + if (packet->status == USB_RET_REMOVE_FROM_QUEUE) { + uhci_async_cancel(async); + return; + } + + async->done = 1; + /* Force processing of this packet *now*, needed for migration */ + s->completions_only = true; + qemu_bh_schedule(s->bh); +} + +static int is_valid(uint32_t link) +{ + return (link & 1) == 0; +} + +static int is_qh(uint32_t link) +{ + return (link & 2) != 0; +} + +static int depth_first(uint32_t link) +{ + return (link & 4) != 0; +} + +/* QH DB used for detecting QH loops */ +#define UHCI_MAX_QUEUES 128 +typedef struct { + uint32_t addr[UHCI_MAX_QUEUES]; + int count; +} QhDb; + +static void qhdb_reset(QhDb *db) +{ + db->count = 0; +} + +/* Add QH to DB. Returns 1 if already present or DB is full. */ +static int qhdb_insert(QhDb *db, uint32_t addr) +{ + int i; + for (i = 0; i < db->count; i++) + if (db->addr[i] == addr) + return 1; + + if (db->count >= UHCI_MAX_QUEUES) + return 1; + + db->addr[db->count++] = addr; + return 0; +} + +static void uhci_queue_fill(UHCIQueue *q, UHCI_TD *td) +{ + uint32_t int_mask = 0; + uint32_t plink = td->link; + UHCI_TD ptd; + int ret; + + while (is_valid(plink)) { + uhci_read_td(q->uhci, &ptd, plink); + if (!(ptd.ctrl & TD_CTRL_ACTIVE)) { + break; + } + if (uhci_queue_token(&ptd) != q->token) { + break; + } + trace_usb_uhci_td_queue(plink & ~0xf, ptd.ctrl, ptd.token); + ret = uhci_handle_td(q->uhci, q, q->qh_addr, &ptd, plink, &int_mask); + if (ret == TD_RESULT_ASYNC_CONT) { + break; + } + assert(ret == TD_RESULT_ASYNC_START); + assert(int_mask == 0); + plink = ptd.link; + } + usb_device_flush_ep_queue(q->ep->dev, q->ep); +} + +static void uhci_process_frame(UHCIState *s) +{ + uint32_t frame_addr, link, old_td_ctrl, val, int_mask; + uint32_t curr_qh, td_count = 0; + int cnt, ret; + UHCI_TD td; + UHCI_QH qh; + QhDb qhdb; + + frame_addr = s->fl_base_addr + ((s->frnum & 0x3ff) << 2); + + pci_dma_read(&s->dev, frame_addr, &link, 4); + le32_to_cpus(&link); + + int_mask = 0; + curr_qh = 0; + + qhdb_reset(&qhdb); + + for (cnt = FRAME_MAX_LOOPS; is_valid(link) && cnt; cnt--) { + if (!s->completions_only && s->frame_bytes >= s->frame_bandwidth) { + /* We've reached the usb 1.1 bandwidth, which is + 1280 bytes/frame, stop processing */ + trace_usb_uhci_frame_stop_bandwidth(); + break; + } + if (is_qh(link)) { + /* QH */ + trace_usb_uhci_qh_load(link & ~0xf); + + if (qhdb_insert(&qhdb, link)) { + /* + * We're going in circles. Which is not a bug because + * HCD is allowed to do that as part of the BW management. + * + * Stop processing here if no transaction has been done + * since we've been here last time. + */ + if (td_count == 0) { + trace_usb_uhci_frame_loop_stop_idle(); + break; + } else { + trace_usb_uhci_frame_loop_continue(); + td_count = 0; + qhdb_reset(&qhdb); + qhdb_insert(&qhdb, link); + } + } + + pci_dma_read(&s->dev, link & ~0xf, &qh, sizeof(qh)); + le32_to_cpus(&qh.link); + le32_to_cpus(&qh.el_link); + + if (!is_valid(qh.el_link)) { + /* QH w/o elements */ + curr_qh = 0; + link = qh.link; + } else { + /* QH with elements */ + curr_qh = link; + link = qh.el_link; + } + continue; + } + + /* TD */ + uhci_read_td(s, &td, link); + trace_usb_uhci_td_load(curr_qh & ~0xf, link & ~0xf, td.ctrl, td.token); + + old_td_ctrl = td.ctrl; + ret = uhci_handle_td(s, NULL, curr_qh, &td, link, &int_mask); + if (old_td_ctrl != td.ctrl) { + /* update the status bits of the TD */ + val = cpu_to_le32(td.ctrl); + pci_dma_write(&s->dev, (link & ~0xf) + 4, &val, sizeof(val)); + } + + switch (ret) { + case TD_RESULT_STOP_FRAME: /* interrupted frame */ + goto out; + + case TD_RESULT_NEXT_QH: + case TD_RESULT_ASYNC_CONT: + trace_usb_uhci_td_nextqh(curr_qh & ~0xf, link & ~0xf); + link = curr_qh ? qh.link : td.link; + continue; + + case TD_RESULT_ASYNC_START: + trace_usb_uhci_td_async(curr_qh & ~0xf, link & ~0xf); + link = curr_qh ? qh.link : td.link; + continue; + + case TD_RESULT_COMPLETE: + trace_usb_uhci_td_complete(curr_qh & ~0xf, link & ~0xf); + link = td.link; + td_count++; + s->frame_bytes += (td.ctrl & 0x7ff) + 1; + + if (curr_qh) { + /* update QH element link */ + qh.el_link = link; + val = cpu_to_le32(qh.el_link); + pci_dma_write(&s->dev, (curr_qh & ~0xf) + 4, &val, sizeof(val)); + + if (!depth_first(link)) { + /* done with this QH */ + curr_qh = 0; + link = qh.link; + } + } + break; + + default: + assert(!"unknown return code"); + } + + /* go to the next entry */ + } + +out: + s->pending_int_mask |= int_mask; +} + +static void uhci_bh(void *opaque) +{ + UHCIState *s = opaque; + uhci_process_frame(s); +} + +static void uhci_frame_timer(void *opaque) +{ + UHCIState *s = opaque; + uint64_t t_now, t_last_run; + int i, frames; + const uint64_t frame_t = NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ; + + s->completions_only = false; + qemu_bh_cancel(s->bh); + + if (!(s->cmd & UHCI_CMD_RS)) { + /* Full stop */ + trace_usb_uhci_schedule_stop(); + timer_del(s->frame_timer); + uhci_async_cancel_all(s); + /* set hchalted bit in status - UHCI11D 2.1.2 */ + s->status |= UHCI_STS_HCHALTED; + return; + } + + /* We still store expire_time in our state, for migration */ + t_last_run = s->expire_time - frame_t; + t_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + /* Process up to MAX_FRAMES_PER_TICK frames */ + frames = (t_now - t_last_run) / frame_t; + if (frames > s->maxframes) { + int skipped = frames - s->maxframes; + s->expire_time += skipped * frame_t; + s->frnum = (s->frnum + skipped) & 0x7ff; + frames -= skipped; + } + if (frames > MAX_FRAMES_PER_TICK) { + frames = MAX_FRAMES_PER_TICK; + } + + for (i = 0; i < frames; i++) { + s->frame_bytes = 0; + trace_usb_uhci_frame_start(s->frnum); + uhci_async_validate_begin(s); + uhci_process_frame(s); + uhci_async_validate_end(s); + /* The spec says frnum is the frame currently being processed, and + * the guest must look at frnum - 1 on interrupt, so inc frnum now */ + s->frnum = (s->frnum + 1) & 0x7ff; + s->expire_time += frame_t; + } + + /* Complete the previous frame(s) */ + if (s->pending_int_mask) { + s->status2 |= s->pending_int_mask; + s->status |= UHCI_STS_USBINT; + uhci_update_irq(s); + } + s->pending_int_mask = 0; + + timer_mod(s->frame_timer, t_now + frame_t); +} + +static const MemoryRegionOps uhci_ioport_ops = { + .read = uhci_port_read, + .write = uhci_port_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 2, + .impl.max_access_size = 2, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static USBPortOps uhci_port_ops = { + .attach = uhci_attach, + .detach = uhci_detach, + .child_detach = uhci_child_detach, + .wakeup = uhci_wakeup, + .complete = uhci_async_complete, +}; + +static USBBusOps uhci_bus_ops = { +}; + +void usb_uhci_common_realize(PCIDevice *dev, Error **errp) +{ + Error *err = NULL; + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); + UHCIPCIDeviceClass *u = container_of(pc, UHCIPCIDeviceClass, parent_class); + UHCIState *s = UHCI(dev); + uint8_t *pci_conf = s->dev.config; + int i; + + pci_conf[PCI_CLASS_PROG] = 0x00; + /* TODO: reset value should be 0. */ + pci_conf[USB_SBRN] = USB_RELEASE_1; /* release number */ + pci_config_set_interrupt_pin(pci_conf, u->info.irq_pin + 1); + s->irq = pci_allocate_irq(dev); + + if (s->masterbus) { + USBPort *ports[NB_PORTS]; + for(i = 0; i < NB_PORTS; i++) { + ports[i] = &s->ports[i].port; + } + usb_register_companion(s->masterbus, ports, NB_PORTS, + s->firstport, s, &uhci_port_ops, + USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL, + &err); + if (err) { + error_propagate(errp, err); + return; + } + } else { + usb_bus_new(&s->bus, sizeof(s->bus), &uhci_bus_ops, DEVICE(dev)); + for (i = 0; i < NB_PORTS; i++) { + usb_register_port(&s->bus, &s->ports[i].port, s, i, &uhci_port_ops, + USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); + } + } + s->bh = qemu_bh_new(uhci_bh, s); + s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, uhci_frame_timer, s); + s->num_ports_vmstate = NB_PORTS; + QTAILQ_INIT(&s->queues); + + memory_region_init_io(&s->io_bar, OBJECT(s), &uhci_ioport_ops, s, + "uhci", 0x20); + + /* Use region 4 for consistency with real hardware. BSD guests seem + to rely on this. */ + pci_register_bar(&s->dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &s->io_bar); +} + +static void usb_uhci_exit(PCIDevice *dev) +{ + UHCIState *s = UHCI(dev); + + trace_usb_uhci_exit(); + + if (s->frame_timer) { + timer_free(s->frame_timer); + s->frame_timer = NULL; + } + + if (s->bh) { + qemu_bh_delete(s->bh); + } + + uhci_async_cancel_all(s); + + if (!s->masterbus) { + usb_bus_release(&s->bus); + } +} + +static Property uhci_properties_companion[] = { + DEFINE_PROP_STRING("masterbus", UHCIState, masterbus), + DEFINE_PROP_UINT32("firstport", UHCIState, firstport, 0), + DEFINE_PROP_UINT32("bandwidth", UHCIState, frame_bandwidth, 1280), + DEFINE_PROP_UINT32("maxframes", UHCIState, maxframes, 128), + DEFINE_PROP_END_OF_LIST(), +}; +static Property uhci_properties_standalone[] = { + DEFINE_PROP_UINT32("bandwidth", UHCIState, frame_bandwidth, 1280), + DEFINE_PROP_UINT32("maxframes", UHCIState, maxframes, 128), + DEFINE_PROP_END_OF_LIST(), +}; + +static void uhci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->class_id = PCI_CLASS_SERIAL_USB; + dc->vmsd = &vmstate_uhci; + dc->reset = uhci_reset; + set_bit(DEVICE_CATEGORY_USB, dc->categories); +} + +static const TypeInfo uhci_pci_type_info = { + .name = TYPE_UHCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(UHCIState), + .class_size = sizeof(UHCIPCIDeviceClass), + .abstract = true, + .class_init = uhci_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +void uhci_data_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + UHCIPCIDeviceClass *u = container_of(k, UHCIPCIDeviceClass, parent_class); + UHCIInfo *info = data; + + k->realize = info->realize ? info->realize : usb_uhci_common_realize; + k->exit = info->unplug ? usb_uhci_exit : NULL; + k->vendor_id = info->vendor_id; + k->device_id = info->device_id; + k->revision = info->revision; + if (!info->unplug) { + /* uhci controllers in companion setups can't be hotplugged */ + dc->hotpluggable = false; + device_class_set_props(dc, uhci_properties_companion); + } else { + device_class_set_props(dc, uhci_properties_standalone); + } + if (info->notuser) { + dc->user_creatable = false; + } + u->info = *info; +} + +static UHCIInfo uhci_info[] = { + { + .name = "piix3-usb-uhci", + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82371SB_2, + .revision = 0x01, + .irq_pin = 3, + .unplug = true, + },{ + .name = "piix4-usb-uhci", + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82371AB_2, + .revision = 0x01, + .irq_pin = 3, + .unplug = true, + },{ + .name = "ich9-usb-uhci1", /* 00:1d.0 */ + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82801I_UHCI1, + .revision = 0x03, + .irq_pin = 0, + .unplug = false, + },{ + .name = "ich9-usb-uhci2", /* 00:1d.1 */ + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82801I_UHCI2, + .revision = 0x03, + .irq_pin = 1, + .unplug = false, + },{ + .name = "ich9-usb-uhci3", /* 00:1d.2 */ + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82801I_UHCI3, + .revision = 0x03, + .irq_pin = 2, + .unplug = false, + },{ + .name = "ich9-usb-uhci4", /* 00:1a.0 */ + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82801I_UHCI4, + .revision = 0x03, + .irq_pin = 0, + .unplug = false, + },{ + .name = "ich9-usb-uhci5", /* 00:1a.1 */ + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82801I_UHCI5, + .revision = 0x03, + .irq_pin = 1, + .unplug = false, + },{ + .name = "ich9-usb-uhci6", /* 00:1a.2 */ + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = PCI_DEVICE_ID_INTEL_82801I_UHCI6, + .revision = 0x03, + .irq_pin = 2, + .unplug = false, + } +}; + +static void uhci_register_types(void) +{ + TypeInfo uhci_type_info = { + .parent = TYPE_UHCI, + .class_init = uhci_data_class_init, + }; + int i; + + type_register_static(&uhci_pci_type_info); + + for (i = 0; i < ARRAY_SIZE(uhci_info); i++) { + uhci_type_info.name = uhci_info[i].name; + uhci_type_info.class_data = uhci_info + i; + type_register(&uhci_type_info); + } +} + +type_init(uhci_register_types) diff --git a/hw/usb/hcd-uhci.h b/hw/usb/hcd-uhci.h new file mode 100644 index 000000000..c85ab7868 --- /dev/null +++ b/hw/usb/hcd-uhci.h @@ -0,0 +1,94 @@ +/* + * USB UHCI controller emulation + * + * Copyright (c) 2005 Fabrice Bellard + * + * Copyright (c) 2008 Max Krasnyansky + * Magor rewrite of the UHCI data structures parser and frame processor + * Support for fully async operation and multiple outstanding transactions + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef HW_USB_HCD_UHCI_H +#define HW_USB_HCD_UHCI_H + +#include "exec/memory.h" +#include "qemu/timer.h" +#include "hw/pci/pci.h" +#include "hw/usb.h" + +typedef struct UHCIQueue UHCIQueue; + +#define NB_PORTS 2 + +typedef struct UHCIPort { + USBPort port; + uint16_t ctrl; +} UHCIPort; + +typedef struct UHCIState { + PCIDevice dev; + MemoryRegion io_bar; + USBBus bus; /* Note unused when we're a companion controller */ + uint16_t cmd; /* cmd register */ + uint16_t status; + uint16_t intr; /* interrupt enable register */ + uint16_t frnum; /* frame number */ + uint32_t fl_base_addr; /* frame list base address */ + uint8_t sof_timing; + uint8_t status2; /* bit 0 and 1 are used to generate UHCI_STS_USBINT */ + int64_t expire_time; + QEMUTimer *frame_timer; + QEMUBH *bh; + uint32_t frame_bytes; + uint32_t frame_bandwidth; + bool completions_only; + UHCIPort ports[NB_PORTS]; + qemu_irq irq; + /* Interrupts that should be raised at the end of the current frame. */ + uint32_t pending_int_mask; + + /* Active packets */ + QTAILQ_HEAD(, UHCIQueue) queues; + uint8_t num_ports_vmstate; + + /* Properties */ + char *masterbus; + uint32_t firstport; + uint32_t maxframes; +} UHCIState; + +#define TYPE_UHCI "pci-uhci-usb" +DECLARE_INSTANCE_CHECKER(UHCIState, UHCI, TYPE_UHCI) + +typedef struct UHCIInfo { + const char *name; + uint16_t vendor_id; + uint16_t device_id; + uint8_t revision; + uint8_t irq_pin; + void (*realize)(PCIDevice *dev, Error **errp); + bool unplug; + bool notuser; /* disallow user_creatable */ +} UHCIInfo; + +void uhci_data_class_init(ObjectClass *klass, void *data); +void usb_uhci_common_realize(PCIDevice *dev, Error **errp); + +#endif diff --git a/hw/usb/hcd-xhci-nec.c b/hw/usb/hcd-xhci-nec.c new file mode 100644 index 000000000..13c9ac5db --- /dev/null +++ b/hw/usb/hcd-xhci-nec.c @@ -0,0 +1,85 @@ +/* + * USB xHCI controller emulation + * + * Copyright (c) 2011 Securiforest + * Date: 2011-05-11 ; Author: Hector Martin + * Based on usb-ohci.c, emulates Renesas NEC USB 3.0 + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/usb.h" +#include "qemu/module.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" + +#include "hcd-xhci-pci.h" + +typedef struct XHCINecState { + /*< private >*/ + XHCIPciState parent_obj; + /*< public >*/ + uint32_t flags; + uint32_t intrs; + uint32_t slots; +} XHCINecState; + +static Property nec_xhci_properties[] = { + DEFINE_PROP_ON_OFF_AUTO("msi", XHCIPciState, msi, ON_OFF_AUTO_AUTO), + DEFINE_PROP_ON_OFF_AUTO("msix", XHCIPciState, msix, ON_OFF_AUTO_AUTO), + DEFINE_PROP_BIT("superspeed-ports-first", XHCINecState, flags, + XHCI_FLAG_SS_FIRST, true), + DEFINE_PROP_BIT("force-pcie-endcap", XHCINecState, flags, + XHCI_FLAG_FORCE_PCIE_ENDCAP, false), + DEFINE_PROP_UINT32("intrs", XHCINecState, intrs, XHCI_MAXINTRS), + DEFINE_PROP_UINT32("slots", XHCINecState, slots, XHCI_MAXSLOTS), + DEFINE_PROP_END_OF_LIST(), +}; + +static void nec_xhci_instance_init(Object *obj) +{ + XHCIPciState *pci = XHCI_PCI(obj); + XHCINecState *nec = container_of(pci, XHCINecState, parent_obj); + + pci->xhci.flags = nec->flags; + pci->xhci.numintrs = nec->intrs; + pci->xhci.numslots = nec->slots; +} + +static void nec_xhci_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, nec_xhci_properties); + k->vendor_id = PCI_VENDOR_ID_NEC; + k->device_id = PCI_DEVICE_ID_NEC_UPD720200; + k->revision = 0x03; +} + +static const TypeInfo nec_xhci_info = { + .name = TYPE_NEC_XHCI, + .parent = TYPE_XHCI_PCI, + .instance_size = sizeof(XHCINecState), + .instance_init = nec_xhci_instance_init, + .class_init = nec_xhci_class_init, +}; + +static void nec_xhci_register_types(void) +{ + type_register_static(&nec_xhci_info); +} + +type_init(nec_xhci_register_types) diff --git a/hw/usb/hcd-xhci-pci.c b/hw/usb/hcd-xhci-pci.c new file mode 100644 index 000000000..e934b1a5b --- /dev/null +++ b/hw/usb/hcd-xhci-pci.c @@ -0,0 +1,262 @@ +/* + * USB xHCI controller with PCI bus emulation + * + * SPDX-FileCopyrightText: 2011 Securiforest + * SPDX-FileContributor: Hector Martin + * SPDX-sourceInfo: Based on usb-ohci.c, emulates Renesas NEC USB 3.0 + * SPDX-FileCopyrightText: 2020 Xilinx + * SPDX-FileContributor: Sai Pavan Boddu + * SPDX-sourceInfo: Moved the pci specific content for hcd-xhci.c to + * hcd-xhci-pci.c + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hcd-xhci-pci.h" +#include "trace.h" +#include "qapi/error.h" + +#define OFF_MSIX_TABLE 0x3000 +#define OFF_MSIX_PBA 0x3800 + +static void xhci_pci_intr_update(XHCIState *xhci, int n, bool enable) +{ + XHCIPciState *s = container_of(xhci, XHCIPciState, xhci); + PCIDevice *pci_dev = PCI_DEVICE(s); + + if (!msix_enabled(pci_dev)) { + return; + } + if (enable == !!xhci->intr[n].msix_used) { + return; + } + if (enable) { + trace_usb_xhci_irq_msix_use(n); + msix_vector_use(pci_dev, n); + xhci->intr[n].msix_used = true; + } else { + trace_usb_xhci_irq_msix_unuse(n); + msix_vector_unuse(pci_dev, n); + xhci->intr[n].msix_used = false; + } +} + +static bool xhci_pci_intr_raise(XHCIState *xhci, int n, bool level) +{ + XHCIPciState *s = container_of(xhci, XHCIPciState, xhci); + PCIDevice *pci_dev = PCI_DEVICE(s); + + if (n == 0 && + !(msix_enabled(pci_dev) || + msi_enabled(pci_dev))) { + pci_set_irq(pci_dev, level); + } + + if (msix_enabled(pci_dev) && level) { + msix_notify(pci_dev, n); + return true; + } + + if (msi_enabled(pci_dev) && level) { + msi_notify(pci_dev, n); + return true; + } + + return false; +} + +static void xhci_pci_reset(DeviceState *dev) +{ + XHCIPciState *s = XHCI_PCI(dev); + + device_legacy_reset(DEVICE(&s->xhci)); +} + +static int xhci_pci_vmstate_post_load(void *opaque, int version_id) +{ + XHCIPciState *s = XHCI_PCI(opaque); + PCIDevice *pci_dev = PCI_DEVICE(s); + int intr; + + for (intr = 0; intr < s->xhci.numintrs; intr++) { + if (s->xhci.intr[intr].msix_used) { + msix_vector_use(pci_dev, intr); + } else { + msix_vector_unuse(pci_dev, intr); + } + } + return 0; +} + +static void usb_xhci_pci_realize(struct PCIDevice *dev, Error **errp) +{ + int ret; + Error *err = NULL; + XHCIPciState *s = XHCI_PCI(dev); + + dev->config[PCI_CLASS_PROG] = 0x30; /* xHCI */ + dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin 1 */ + dev->config[PCI_CACHE_LINE_SIZE] = 0x10; + dev->config[0x60] = 0x30; /* release number */ + + object_property_set_link(OBJECT(&s->xhci), "host", OBJECT(s), NULL); + s->xhci.intr_update = xhci_pci_intr_update; + s->xhci.intr_raise = xhci_pci_intr_raise; + if (!qdev_realize(DEVICE(&s->xhci), NULL, errp)) { + return; + } + if (strcmp(object_get_typename(OBJECT(dev)), TYPE_NEC_XHCI) == 0) { + s->xhci.nec_quirks = true; + } + + if (s->msi != ON_OFF_AUTO_OFF) { + ret = msi_init(dev, 0x70, s->xhci.numintrs, true, false, &err); + /* + * Any error other than -ENOTSUP(board's MSI support is broken) + * is a programming error + */ + assert(!ret || ret == -ENOTSUP); + if (ret && s->msi == ON_OFF_AUTO_ON) { + /* Can't satisfy user's explicit msi=on request, fail */ + error_append_hint(&err, "You have to use msi=auto (default) or " + "msi=off with this machine type.\n"); + error_propagate(errp, err); + return; + } + assert(!err || s->msi == ON_OFF_AUTO_AUTO); + /* With msi=auto, we fall back to MSI off silently */ + error_free(err); + } + pci_register_bar(dev, 0, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, + &s->xhci.mem); + + if (pci_bus_is_express(pci_get_bus(dev)) || + xhci_get_flag(&s->xhci, XHCI_FLAG_FORCE_PCIE_ENDCAP)) { + ret = pcie_endpoint_cap_init(dev, 0xa0); + assert(ret > 0); + } + + if (s->msix != ON_OFF_AUTO_OFF) { + /* TODO check for errors, and should fail when msix=on */ + msix_init(dev, s->xhci.numintrs, + &s->xhci.mem, 0, OFF_MSIX_TABLE, + &s->xhci.mem, 0, OFF_MSIX_PBA, + 0x90, NULL); + } + s->xhci.as = pci_get_address_space(dev); +} + +static void usb_xhci_pci_exit(PCIDevice *dev) +{ + XHCIPciState *s = XHCI_PCI(dev); + /* destroy msix memory region */ + if (dev->msix_table && dev->msix_pba + && dev->msix_entry_used) { + msix_uninit(dev, &s->xhci.mem, &s->xhci.mem); + } +} + +static const VMStateDescription vmstate_xhci_pci = { + .name = "xhci", + .version_id = 1, + .post_load = xhci_pci_vmstate_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, XHCIPciState), + VMSTATE_MSIX(parent_obj, XHCIPciState), + VMSTATE_STRUCT(xhci, XHCIPciState, 1, vmstate_xhci, XHCIState), + VMSTATE_END_OF_LIST() + } +}; + +static void xhci_instance_init(Object *obj) +{ + XHCIPciState *s = XHCI_PCI(obj); + /* + * QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command + * line, therefore, no need to wait to realize like other devices + */ + PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS; + object_initialize_child(obj, "xhci-core", &s->xhci, TYPE_XHCI); + qdev_alias_all_properties(DEVICE(&s->xhci), obj); +} + +static void xhci_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = xhci_pci_reset; + dc->vmsd = &vmstate_xhci_pci; + set_bit(DEVICE_CATEGORY_USB, dc->categories); + k->realize = usb_xhci_pci_realize; + k->exit = usb_xhci_pci_exit; + k->class_id = PCI_CLASS_SERIAL_USB; +} + +static const TypeInfo xhci_pci_info = { + .name = TYPE_XHCI_PCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(XHCIPciState), + .class_init = xhci_class_init, + .instance_init = xhci_instance_init, + .abstract = true, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + }, +}; + +static void qemu_xhci_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->vendor_id = PCI_VENDOR_ID_REDHAT; + k->device_id = PCI_DEVICE_ID_REDHAT_XHCI; + k->revision = 0x01; +} + +static void qemu_xhci_instance_init(Object *obj) +{ + XHCIPciState *s = XHCI_PCI(obj); + XHCIState *xhci = &s->xhci; + + s->msi = ON_OFF_AUTO_OFF; + s->msix = ON_OFF_AUTO_AUTO; + xhci->numintrs = XHCI_MAXINTRS; + xhci->numslots = XHCI_MAXSLOTS; + xhci_set_flag(xhci, XHCI_FLAG_SS_FIRST); +} + +static const TypeInfo qemu_xhci_info = { + .name = TYPE_QEMU_XHCI, + .parent = TYPE_XHCI_PCI, + .class_init = qemu_xhci_class_init, + .instance_init = qemu_xhci_instance_init, +}; + +static void xhci_register_types(void) +{ + type_register_static(&xhci_pci_info); + type_register_static(&qemu_xhci_info); +} + +type_init(xhci_register_types) diff --git a/hw/usb/hcd-xhci-pci.h b/hw/usb/hcd-xhci-pci.h new file mode 100644 index 000000000..c193f7944 --- /dev/null +++ b/hw/usb/hcd-xhci-pci.h @@ -0,0 +1,44 @@ +/* + * USB xHCI controller emulation + * + * Copyright (c) 2011 Securiforest + * Date: 2011-05-11 ; Author: Hector Martin + * Based on usb-ohci.c, emulates Renesas NEC USB 3.0 + * Date: 2020-01-1; Author: Sai Pavan Boddu + * PCI hooks are moved from XHCIState to XHCIPciState + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef HW_USB_HCD_XHCI_PCI_H +#define HW_USB_HCD_XHCI_PCI_H + +#include "hw/usb.h" +#include "hcd-xhci.h" + +#define TYPE_XHCI_PCI "pci-xhci" +#define XHCI_PCI(obj) \ + OBJECT_CHECK(XHCIPciState, (obj), TYPE_XHCI_PCI) + + +typedef struct XHCIPciState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + XHCIState xhci; + OnOffAuto msi; + OnOffAuto msix; +} XHCIPciState; + +#endif diff --git a/hw/usb/hcd-xhci-sysbus.c b/hw/usb/hcd-xhci-sysbus.c new file mode 100644 index 000000000..a14e43819 --- /dev/null +++ b/hw/usb/hcd-xhci-sysbus.c @@ -0,0 +1,123 @@ +/* + * USB xHCI controller for system-bus interface + * Based on hcd-echi-sysbus.c + + * SPDX-FileCopyrightText: 2020 Xilinx + * SPDX-FileContributor: Author: Sai Pavan Boddu + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "qapi/error.h" +#include "hcd-xhci-sysbus.h" +#include "hw/acpi/aml-build.h" +#include "hw/irq.h" + +static bool xhci_sysbus_intr_raise(XHCIState *xhci, int n, bool level) +{ + XHCISysbusState *s = container_of(xhci, XHCISysbusState, xhci); + + qemu_set_irq(s->irq[n], level); + + return false; +} + +void xhci_sysbus_reset(DeviceState *dev) +{ + XHCISysbusState *s = XHCI_SYSBUS(dev); + + device_legacy_reset(DEVICE(&s->xhci)); +} + +static void xhci_sysbus_realize(DeviceState *dev, Error **errp) +{ + XHCISysbusState *s = XHCI_SYSBUS(dev); + + object_property_set_link(OBJECT(&s->xhci), "host", OBJECT(s), NULL); + if (!qdev_realize(DEVICE(&s->xhci), NULL, errp)) { + return; + } + s->irq = g_new0(qemu_irq, s->xhci.numintrs); + qdev_init_gpio_out_named(dev, s->irq, SYSBUS_DEVICE_GPIO_IRQ, + s->xhci.numintrs); + if (s->xhci.dma_mr) { + s->xhci.as = g_malloc0(sizeof(AddressSpace)); + address_space_init(s->xhci.as, s->xhci.dma_mr, NULL); + } else { + s->xhci.as = &address_space_memory; + } + + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->xhci.mem); +} + +static void xhci_sysbus_instance_init(Object *obj) +{ + XHCISysbusState *s = XHCI_SYSBUS(obj); + + object_initialize_child(obj, "xhci-core", &s->xhci, TYPE_XHCI); + qdev_alias_all_properties(DEVICE(&s->xhci), obj); + + object_property_add_link(obj, "dma", TYPE_MEMORY_REGION, + (Object **)&s->xhci.dma_mr, + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + s->xhci.intr_update = NULL; + s->xhci.intr_raise = xhci_sysbus_intr_raise; +} + +void xhci_sysbus_build_aml(Aml *scope, uint32_t mmio, unsigned int irq) +{ + Aml *dev = aml_device("XHCI"); + Aml *crs = aml_resource_template(); + + aml_append(crs, aml_memory32_fixed(mmio, XHCI_LEN_REGS, AML_READ_WRITE)); + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &irq, 1)); + + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0D10"))); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} + +static Property xhci_sysbus_props[] = { + DEFINE_PROP_UINT32("intrs", XHCISysbusState, xhci.numintrs, XHCI_MAXINTRS), + DEFINE_PROP_UINT32("slots", XHCISysbusState, xhci.numslots, XHCI_MAXSLOTS), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_xhci_sysbus = { + .name = "xhci-sysbus", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_STRUCT(xhci, XHCISysbusState, 1, vmstate_xhci, XHCIState), + VMSTATE_END_OF_LIST() + } +}; + +static void xhci_sysbus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = xhci_sysbus_reset; + dc->realize = xhci_sysbus_realize; + dc->vmsd = &vmstate_xhci_sysbus; + device_class_set_props(dc, xhci_sysbus_props); +} + +static const TypeInfo xhci_sysbus_info = { + .name = TYPE_XHCI_SYSBUS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XHCISysbusState), + .class_init = xhci_sysbus_class_init, + .instance_init = xhci_sysbus_instance_init +}; + +static void xhci_sysbus_register_types(void) +{ + type_register_static(&xhci_sysbus_info); +} + +type_init(xhci_sysbus_register_types); diff --git a/hw/usb/hcd-xhci-sysbus.h b/hw/usb/hcd-xhci-sysbus.h new file mode 100644 index 000000000..fdfcbbee3 --- /dev/null +++ b/hw/usb/hcd-xhci-sysbus.h @@ -0,0 +1,31 @@ +/* + * USB xHCI controller for system-bus interface + * + * SPDX-FileCopyrightText: 2020 Xilinx + * SPDX-FileContributor: Author: Sai Pavan Boddu + * SPDX-sourceInfo: Based on hcd-echi-sysbus + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_USB_HCD_XHCI_SYSBUS_H +#define HW_USB_HCD_XHCI_SYSBUS_H + +#include "hw/usb.h" +#include "hcd-xhci.h" +#include "hw/sysbus.h" + +#define XHCI_SYSBUS(obj) \ + OBJECT_CHECK(XHCISysbusState, (obj), TYPE_XHCI_SYSBUS) + + +typedef struct XHCISysbusState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + XHCIState xhci; + qemu_irq *irq; +} XHCISysbusState; + +void xhci_sysbus_reset(DeviceState *dev); +#endif diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c new file mode 100644 index 000000000..e01700039 --- /dev/null +++ b/hw/usb/hcd-xhci.c @@ -0,0 +1,3613 @@ +/* + * USB xHCI controller emulation + * + * Copyright (c) 2011 Securiforest + * Date: 2011-05-11 ; Author: Hector Martin + * Based on usb-ohci.c, emulates Renesas NEC USB 3.0 + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "qemu/module.h" +#include "qemu/queue.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "trace.h" +#include "qapi/error.h" + +#include "hcd-xhci.h" + +//#define DEBUG_XHCI +//#define DEBUG_DATA + +#ifdef DEBUG_XHCI +#define DPRINTF(...) fprintf(stderr, __VA_ARGS__) +#else +#define DPRINTF(...) do {} while (0) +#endif +#define FIXME(_msg) do { fprintf(stderr, "FIXME %s:%d %s\n", \ + __func__, __LINE__, _msg); abort(); } while (0) + +#define TRB_LINK_LIMIT 32 +#define COMMAND_LIMIT 256 +#define TRANSFER_LIMIT 256 + +#define LEN_CAP 0x40 +#define LEN_OPER (0x400 + 0x10 * XHCI_MAXPORTS) +#define LEN_RUNTIME ((XHCI_MAXINTRS + 1) * 0x20) +#define LEN_DOORBELL ((XHCI_MAXSLOTS + 1) * 0x20) + +#define OFF_OPER LEN_CAP +#define OFF_RUNTIME 0x1000 +#define OFF_DOORBELL 0x2000 + +#if (OFF_OPER + LEN_OPER) > OFF_RUNTIME +#error Increase OFF_RUNTIME +#endif +#if (OFF_RUNTIME + LEN_RUNTIME) > OFF_DOORBELL +#error Increase OFF_DOORBELL +#endif +#if (OFF_DOORBELL + LEN_DOORBELL) > XHCI_LEN_REGS +# error Increase XHCI_LEN_REGS +#endif + +/* bit definitions */ +#define USBCMD_RS (1<<0) +#define USBCMD_HCRST (1<<1) +#define USBCMD_INTE (1<<2) +#define USBCMD_HSEE (1<<3) +#define USBCMD_LHCRST (1<<7) +#define USBCMD_CSS (1<<8) +#define USBCMD_CRS (1<<9) +#define USBCMD_EWE (1<<10) +#define USBCMD_EU3S (1<<11) + +#define USBSTS_HCH (1<<0) +#define USBSTS_HSE (1<<2) +#define USBSTS_EINT (1<<3) +#define USBSTS_PCD (1<<4) +#define USBSTS_SSS (1<<8) +#define USBSTS_RSS (1<<9) +#define USBSTS_SRE (1<<10) +#define USBSTS_CNR (1<<11) +#define USBSTS_HCE (1<<12) + + +#define PORTSC_CCS (1<<0) +#define PORTSC_PED (1<<1) +#define PORTSC_OCA (1<<3) +#define PORTSC_PR (1<<4) +#define PORTSC_PLS_SHIFT 5 +#define PORTSC_PLS_MASK 0xf +#define PORTSC_PP (1<<9) +#define PORTSC_SPEED_SHIFT 10 +#define PORTSC_SPEED_MASK 0xf +#define PORTSC_SPEED_FULL (1<<10) +#define PORTSC_SPEED_LOW (2<<10) +#define PORTSC_SPEED_HIGH (3<<10) +#define PORTSC_SPEED_SUPER (4<<10) +#define PORTSC_PIC_SHIFT 14 +#define PORTSC_PIC_MASK 0x3 +#define PORTSC_LWS (1<<16) +#define PORTSC_CSC (1<<17) +#define PORTSC_PEC (1<<18) +#define PORTSC_WRC (1<<19) +#define PORTSC_OCC (1<<20) +#define PORTSC_PRC (1<<21) +#define PORTSC_PLC (1<<22) +#define PORTSC_CEC (1<<23) +#define PORTSC_CAS (1<<24) +#define PORTSC_WCE (1<<25) +#define PORTSC_WDE (1<<26) +#define PORTSC_WOE (1<<27) +#define PORTSC_DR (1<<30) +#define PORTSC_WPR (1<<31) + +#define CRCR_RCS (1<<0) +#define CRCR_CS (1<<1) +#define CRCR_CA (1<<2) +#define CRCR_CRR (1<<3) + +#define IMAN_IP (1<<0) +#define IMAN_IE (1<<1) + +#define ERDP_EHB (1<<3) + +#define TRB_SIZE 16 +typedef struct XHCITRB { + uint64_t parameter; + uint32_t status; + uint32_t control; + dma_addr_t addr; + bool ccs; +} XHCITRB; + +enum { + PLS_U0 = 0, + PLS_U1 = 1, + PLS_U2 = 2, + PLS_U3 = 3, + PLS_DISABLED = 4, + PLS_RX_DETECT = 5, + PLS_INACTIVE = 6, + PLS_POLLING = 7, + PLS_RECOVERY = 8, + PLS_HOT_RESET = 9, + PLS_COMPILANCE_MODE = 10, + PLS_TEST_MODE = 11, + PLS_RESUME = 15, +}; + +#define CR_LINK TR_LINK + +#define TRB_C (1<<0) +#define TRB_TYPE_SHIFT 10 +#define TRB_TYPE_MASK 0x3f +#define TRB_TYPE(t) (((t).control >> TRB_TYPE_SHIFT) & TRB_TYPE_MASK) + +#define TRB_EV_ED (1<<2) + +#define TRB_TR_ENT (1<<1) +#define TRB_TR_ISP (1<<2) +#define TRB_TR_NS (1<<3) +#define TRB_TR_CH (1<<4) +#define TRB_TR_IOC (1<<5) +#define TRB_TR_IDT (1<<6) +#define TRB_TR_TBC_SHIFT 7 +#define TRB_TR_TBC_MASK 0x3 +#define TRB_TR_BEI (1<<9) +#define TRB_TR_TLBPC_SHIFT 16 +#define TRB_TR_TLBPC_MASK 0xf +#define TRB_TR_FRAMEID_SHIFT 20 +#define TRB_TR_FRAMEID_MASK 0x7ff +#define TRB_TR_SIA (1<<31) + +#define TRB_TR_DIR (1<<16) + +#define TRB_CR_SLOTID_SHIFT 24 +#define TRB_CR_SLOTID_MASK 0xff +#define TRB_CR_EPID_SHIFT 16 +#define TRB_CR_EPID_MASK 0x1f + +#define TRB_CR_BSR (1<<9) +#define TRB_CR_DC (1<<9) + +#define TRB_LK_TC (1<<1) + +#define TRB_INTR_SHIFT 22 +#define TRB_INTR_MASK 0x3ff +#define TRB_INTR(t) (((t).status >> TRB_INTR_SHIFT) & TRB_INTR_MASK) + +#define EP_TYPE_MASK 0x7 +#define EP_TYPE_SHIFT 3 + +#define EP_STATE_MASK 0x7 +#define EP_DISABLED (0<<0) +#define EP_RUNNING (1<<0) +#define EP_HALTED (2<<0) +#define EP_STOPPED (3<<0) +#define EP_ERROR (4<<0) + +#define SLOT_STATE_MASK 0x1f +#define SLOT_STATE_SHIFT 27 +#define SLOT_STATE(s) (((s)>>SLOT_STATE_SHIFT)&SLOT_STATE_MASK) +#define SLOT_ENABLED 0 +#define SLOT_DEFAULT 1 +#define SLOT_ADDRESSED 2 +#define SLOT_CONFIGURED 3 + +#define SLOT_CONTEXT_ENTRIES_MASK 0x1f +#define SLOT_CONTEXT_ENTRIES_SHIFT 27 + +#define get_field(data, field) \ + (((data) >> field##_SHIFT) & field##_MASK) + +#define set_field(data, newval, field) do { \ + uint32_t val = *data; \ + val &= ~(field##_MASK << field##_SHIFT); \ + val |= ((newval) & field##_MASK) << field##_SHIFT; \ + *data = val; \ + } while (0) + +typedef enum EPType { + ET_INVALID = 0, + ET_ISO_OUT, + ET_BULK_OUT, + ET_INTR_OUT, + ET_CONTROL, + ET_ISO_IN, + ET_BULK_IN, + ET_INTR_IN, +} EPType; + +typedef struct XHCITransfer { + XHCIEPContext *epctx; + USBPacket packet; + QEMUSGList sgl; + bool running_async; + bool running_retry; + bool complete; + bool int_req; + unsigned int iso_pkts; + unsigned int streamid; + bool in_xfer; + bool iso_xfer; + bool timed_xfer; + + unsigned int trb_count; + XHCITRB *trbs; + + TRBCCode status; + + unsigned int pkts; + unsigned int pktsize; + unsigned int cur_pkt; + + uint64_t mfindex_kick; + + QTAILQ_ENTRY(XHCITransfer) next; +} XHCITransfer; + +struct XHCIStreamContext { + dma_addr_t pctx; + unsigned int sct; + XHCIRing ring; +}; + +struct XHCIEPContext { + XHCIState *xhci; + unsigned int slotid; + unsigned int epid; + + XHCIRing ring; + uint32_t xfer_count; + QTAILQ_HEAD(, XHCITransfer) transfers; + XHCITransfer *retry; + EPType type; + dma_addr_t pctx; + unsigned int max_psize; + uint32_t state; + uint32_t kick_active; + + /* streams */ + unsigned int max_pstreams; + bool lsa; + unsigned int nr_pstreams; + XHCIStreamContext *pstreams; + + /* iso xfer scheduling */ + unsigned int interval; + int64_t mfindex_last; + QEMUTimer *kick_timer; +}; + +typedef struct XHCIEvRingSeg { + uint32_t addr_low; + uint32_t addr_high; + uint32_t size; + uint32_t rsvd; +} XHCIEvRingSeg; + +static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, + unsigned int epid, unsigned int streamid); +static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid); +static TRBCCode xhci_disable_ep(XHCIState *xhci, unsigned int slotid, + unsigned int epid); +static void xhci_xfer_report(XHCITransfer *xfer); +static void xhci_event(XHCIState *xhci, XHCIEvent *event, int v); +static void xhci_write_event(XHCIState *xhci, XHCIEvent *event, int v); +static USBEndpoint *xhci_epid_to_usbep(XHCIEPContext *epctx); + +static const char *TRBType_names[] = { + [TRB_RESERVED] = "TRB_RESERVED", + [TR_NORMAL] = "TR_NORMAL", + [TR_SETUP] = "TR_SETUP", + [TR_DATA] = "TR_DATA", + [TR_STATUS] = "TR_STATUS", + [TR_ISOCH] = "TR_ISOCH", + [TR_LINK] = "TR_LINK", + [TR_EVDATA] = "TR_EVDATA", + [TR_NOOP] = "TR_NOOP", + [CR_ENABLE_SLOT] = "CR_ENABLE_SLOT", + [CR_DISABLE_SLOT] = "CR_DISABLE_SLOT", + [CR_ADDRESS_DEVICE] = "CR_ADDRESS_DEVICE", + [CR_CONFIGURE_ENDPOINT] = "CR_CONFIGURE_ENDPOINT", + [CR_EVALUATE_CONTEXT] = "CR_EVALUATE_CONTEXT", + [CR_RESET_ENDPOINT] = "CR_RESET_ENDPOINT", + [CR_STOP_ENDPOINT] = "CR_STOP_ENDPOINT", + [CR_SET_TR_DEQUEUE] = "CR_SET_TR_DEQUEUE", + [CR_RESET_DEVICE] = "CR_RESET_DEVICE", + [CR_FORCE_EVENT] = "CR_FORCE_EVENT", + [CR_NEGOTIATE_BW] = "CR_NEGOTIATE_BW", + [CR_SET_LATENCY_TOLERANCE] = "CR_SET_LATENCY_TOLERANCE", + [CR_GET_PORT_BANDWIDTH] = "CR_GET_PORT_BANDWIDTH", + [CR_FORCE_HEADER] = "CR_FORCE_HEADER", + [CR_NOOP] = "CR_NOOP", + [ER_TRANSFER] = "ER_TRANSFER", + [ER_COMMAND_COMPLETE] = "ER_COMMAND_COMPLETE", + [ER_PORT_STATUS_CHANGE] = "ER_PORT_STATUS_CHANGE", + [ER_BANDWIDTH_REQUEST] = "ER_BANDWIDTH_REQUEST", + [ER_DOORBELL] = "ER_DOORBELL", + [ER_HOST_CONTROLLER] = "ER_HOST_CONTROLLER", + [ER_DEVICE_NOTIFICATION] = "ER_DEVICE_NOTIFICATION", + [ER_MFINDEX_WRAP] = "ER_MFINDEX_WRAP", + [CR_VENDOR_NEC_FIRMWARE_REVISION] = "CR_VENDOR_NEC_FIRMWARE_REVISION", + [CR_VENDOR_NEC_CHALLENGE_RESPONSE] = "CR_VENDOR_NEC_CHALLENGE_RESPONSE", +}; + +static const char *TRBCCode_names[] = { + [CC_INVALID] = "CC_INVALID", + [CC_SUCCESS] = "CC_SUCCESS", + [CC_DATA_BUFFER_ERROR] = "CC_DATA_BUFFER_ERROR", + [CC_BABBLE_DETECTED] = "CC_BABBLE_DETECTED", + [CC_USB_TRANSACTION_ERROR] = "CC_USB_TRANSACTION_ERROR", + [CC_TRB_ERROR] = "CC_TRB_ERROR", + [CC_STALL_ERROR] = "CC_STALL_ERROR", + [CC_RESOURCE_ERROR] = "CC_RESOURCE_ERROR", + [CC_BANDWIDTH_ERROR] = "CC_BANDWIDTH_ERROR", + [CC_NO_SLOTS_ERROR] = "CC_NO_SLOTS_ERROR", + [CC_INVALID_STREAM_TYPE_ERROR] = "CC_INVALID_STREAM_TYPE_ERROR", + [CC_SLOT_NOT_ENABLED_ERROR] = "CC_SLOT_NOT_ENABLED_ERROR", + [CC_EP_NOT_ENABLED_ERROR] = "CC_EP_NOT_ENABLED_ERROR", + [CC_SHORT_PACKET] = "CC_SHORT_PACKET", + [CC_RING_UNDERRUN] = "CC_RING_UNDERRUN", + [CC_RING_OVERRUN] = "CC_RING_OVERRUN", + [CC_VF_ER_FULL] = "CC_VF_ER_FULL", + [CC_PARAMETER_ERROR] = "CC_PARAMETER_ERROR", + [CC_BANDWIDTH_OVERRUN] = "CC_BANDWIDTH_OVERRUN", + [CC_CONTEXT_STATE_ERROR] = "CC_CONTEXT_STATE_ERROR", + [CC_NO_PING_RESPONSE_ERROR] = "CC_NO_PING_RESPONSE_ERROR", + [CC_EVENT_RING_FULL_ERROR] = "CC_EVENT_RING_FULL_ERROR", + [CC_INCOMPATIBLE_DEVICE_ERROR] = "CC_INCOMPATIBLE_DEVICE_ERROR", + [CC_MISSED_SERVICE_ERROR] = "CC_MISSED_SERVICE_ERROR", + [CC_COMMAND_RING_STOPPED] = "CC_COMMAND_RING_STOPPED", + [CC_COMMAND_ABORTED] = "CC_COMMAND_ABORTED", + [CC_STOPPED] = "CC_STOPPED", + [CC_STOPPED_LENGTH_INVALID] = "CC_STOPPED_LENGTH_INVALID", + [CC_MAX_EXIT_LATENCY_TOO_LARGE_ERROR] + = "CC_MAX_EXIT_LATENCY_TOO_LARGE_ERROR", + [CC_ISOCH_BUFFER_OVERRUN] = "CC_ISOCH_BUFFER_OVERRUN", + [CC_EVENT_LOST_ERROR] = "CC_EVENT_LOST_ERROR", + [CC_UNDEFINED_ERROR] = "CC_UNDEFINED_ERROR", + [CC_INVALID_STREAM_ID_ERROR] = "CC_INVALID_STREAM_ID_ERROR", + [CC_SECONDARY_BANDWIDTH_ERROR] = "CC_SECONDARY_BANDWIDTH_ERROR", + [CC_SPLIT_TRANSACTION_ERROR] = "CC_SPLIT_TRANSACTION_ERROR", +}; + +static const char *ep_state_names[] = { + [EP_DISABLED] = "disabled", + [EP_RUNNING] = "running", + [EP_HALTED] = "halted", + [EP_STOPPED] = "stopped", + [EP_ERROR] = "error", +}; + +static const char *lookup_name(uint32_t index, const char **list, uint32_t llen) +{ + if (index >= llen || list[index] == NULL) { + return "???"; + } + return list[index]; +} + +static const char *trb_name(XHCITRB *trb) +{ + return lookup_name(TRB_TYPE(*trb), TRBType_names, + ARRAY_SIZE(TRBType_names)); +} + +static const char *event_name(XHCIEvent *event) +{ + return lookup_name(event->ccode, TRBCCode_names, + ARRAY_SIZE(TRBCCode_names)); +} + +static const char *ep_state_name(uint32_t state) +{ + return lookup_name(state, ep_state_names, + ARRAY_SIZE(ep_state_names)); +} + +bool xhci_get_flag(XHCIState *xhci, enum xhci_flags bit) +{ + return xhci->flags & (1 << bit); +} + +void xhci_set_flag(XHCIState *xhci, enum xhci_flags bit) +{ + xhci->flags |= (1 << bit); +} + +static uint64_t xhci_mfindex_get(XHCIState *xhci) +{ + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + return (now - xhci->mfindex_start) / 125000; +} + +static void xhci_mfwrap_update(XHCIState *xhci) +{ + const uint32_t bits = USBCMD_RS | USBCMD_EWE; + uint32_t mfindex, left; + int64_t now; + + if ((xhci->usbcmd & bits) == bits) { + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + mfindex = ((now - xhci->mfindex_start) / 125000) & 0x3fff; + left = 0x4000 - mfindex; + timer_mod(xhci->mfwrap_timer, now + left * 125000); + } else { + timer_del(xhci->mfwrap_timer); + } +} + +static void xhci_mfwrap_timer(void *opaque) +{ + XHCIState *xhci = opaque; + XHCIEvent wrap = { ER_MFINDEX_WRAP, CC_SUCCESS }; + + xhci_event(xhci, &wrap, 0); + xhci_mfwrap_update(xhci); +} + +static inline dma_addr_t xhci_addr64(uint32_t low, uint32_t high) +{ + if (sizeof(dma_addr_t) == 4) { + return low; + } else { + return low | (((dma_addr_t)high << 16) << 16); + } +} + +static inline dma_addr_t xhci_mask64(uint64_t addr) +{ + if (sizeof(dma_addr_t) == 4) { + return addr & 0xffffffff; + } else { + return addr; + } +} + +static inline void xhci_dma_read_u32s(XHCIState *xhci, dma_addr_t addr, + uint32_t *buf, size_t len) +{ + int i; + + assert((len % sizeof(uint32_t)) == 0); + + dma_memory_read(xhci->as, addr, buf, len); + + for (i = 0; i < (len / sizeof(uint32_t)); i++) { + buf[i] = le32_to_cpu(buf[i]); + } +} + +static inline void xhci_dma_write_u32s(XHCIState *xhci, dma_addr_t addr, + uint32_t *buf, size_t len) +{ + int i; + uint32_t tmp[5]; + uint32_t n = len / sizeof(uint32_t); + + assert((len % sizeof(uint32_t)) == 0); + assert(n <= ARRAY_SIZE(tmp)); + + for (i = 0; i < n; i++) { + tmp[i] = cpu_to_le32(buf[i]); + } + dma_memory_write(xhci->as, addr, tmp, len); +} + +static XHCIPort *xhci_lookup_port(XHCIState *xhci, struct USBPort *uport) +{ + int index; + + if (!uport->dev) { + return NULL; + } + switch (uport->dev->speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + if (xhci_get_flag(xhci, XHCI_FLAG_SS_FIRST)) { + index = uport->index + xhci->numports_3; + } else { + index = uport->index; + } + break; + case USB_SPEED_SUPER: + if (xhci_get_flag(xhci, XHCI_FLAG_SS_FIRST)) { + index = uport->index; + } else { + index = uport->index + xhci->numports_2; + } + break; + default: + return NULL; + } + return &xhci->ports[index]; +} + +static void xhci_intr_update(XHCIState *xhci, int v) +{ + int level = 0; + + if (v == 0) { + if (xhci->intr[0].iman & IMAN_IP && + xhci->intr[0].iman & IMAN_IE && + xhci->usbcmd & USBCMD_INTE) { + level = 1; + } + if (xhci->intr_raise) { + if (xhci->intr_raise(xhci, 0, level)) { + xhci->intr[0].iman &= ~IMAN_IP; + } + } + } + if (xhci->intr_update) { + xhci->intr_update(xhci, v, + xhci->intr[v].iman & IMAN_IE); + } +} + +static void xhci_intr_raise(XHCIState *xhci, int v) +{ + bool pending = (xhci->intr[v].erdp_low & ERDP_EHB); + + xhci->intr[v].erdp_low |= ERDP_EHB; + xhci->intr[v].iman |= IMAN_IP; + xhci->usbsts |= USBSTS_EINT; + + if (pending) { + return; + } + if (!(xhci->intr[v].iman & IMAN_IE)) { + return; + } + + if (!(xhci->usbcmd & USBCMD_INTE)) { + return; + } + if (xhci->intr_raise) { + if (xhci->intr_raise(xhci, v, true)) { + xhci->intr[v].iman &= ~IMAN_IP; + } + } +} + +static inline int xhci_running(XHCIState *xhci) +{ + return !(xhci->usbsts & USBSTS_HCH); +} + +static void xhci_die(XHCIState *xhci) +{ + xhci->usbsts |= USBSTS_HCE; + DPRINTF("xhci: asserted controller error\n"); +} + +static void xhci_write_event(XHCIState *xhci, XHCIEvent *event, int v) +{ + XHCIInterrupter *intr = &xhci->intr[v]; + XHCITRB ev_trb; + dma_addr_t addr; + + ev_trb.parameter = cpu_to_le64(event->ptr); + ev_trb.status = cpu_to_le32(event->length | (event->ccode << 24)); + ev_trb.control = (event->slotid << 24) | (event->epid << 16) | + event->flags | (event->type << TRB_TYPE_SHIFT); + if (intr->er_pcs) { + ev_trb.control |= TRB_C; + } + ev_trb.control = cpu_to_le32(ev_trb.control); + + trace_usb_xhci_queue_event(v, intr->er_ep_idx, trb_name(&ev_trb), + event_name(event), ev_trb.parameter, + ev_trb.status, ev_trb.control); + + addr = intr->er_start + TRB_SIZE*intr->er_ep_idx; + dma_memory_write(xhci->as, addr, &ev_trb, TRB_SIZE); + + intr->er_ep_idx++; + if (intr->er_ep_idx >= intr->er_size) { + intr->er_ep_idx = 0; + intr->er_pcs = !intr->er_pcs; + } +} + +static void xhci_event(XHCIState *xhci, XHCIEvent *event, int v) +{ + XHCIInterrupter *intr; + dma_addr_t erdp; + unsigned int dp_idx; + + if (v >= xhci->numintrs) { + DPRINTF("intr nr out of range (%d >= %d)\n", v, xhci->numintrs); + return; + } + intr = &xhci->intr[v]; + + erdp = xhci_addr64(intr->erdp_low, intr->erdp_high); + if (erdp < intr->er_start || + erdp >= (intr->er_start + TRB_SIZE*intr->er_size)) { + DPRINTF("xhci: ERDP out of bounds: "DMA_ADDR_FMT"\n", erdp); + DPRINTF("xhci: ER[%d] at "DMA_ADDR_FMT" len %d\n", + v, intr->er_start, intr->er_size); + xhci_die(xhci); + return; + } + + dp_idx = (erdp - intr->er_start) / TRB_SIZE; + assert(dp_idx < intr->er_size); + + if ((intr->er_ep_idx + 2) % intr->er_size == dp_idx) { + DPRINTF("xhci: ER %d full, send ring full error\n", v); + XHCIEvent full = {ER_HOST_CONTROLLER, CC_EVENT_RING_FULL_ERROR}; + xhci_write_event(xhci, &full, v); + } else if ((intr->er_ep_idx + 1) % intr->er_size == dp_idx) { + DPRINTF("xhci: ER %d full, drop event\n", v); + } else { + xhci_write_event(xhci, event, v); + } + + xhci_intr_raise(xhci, v); +} + +static void xhci_ring_init(XHCIState *xhci, XHCIRing *ring, + dma_addr_t base) +{ + ring->dequeue = base; + ring->ccs = 1; +} + +static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb, + dma_addr_t *addr) +{ + uint32_t link_cnt = 0; + + while (1) { + TRBType type; + dma_memory_read(xhci->as, ring->dequeue, trb, TRB_SIZE); + trb->addr = ring->dequeue; + trb->ccs = ring->ccs; + le64_to_cpus(&trb->parameter); + le32_to_cpus(&trb->status); + le32_to_cpus(&trb->control); + + trace_usb_xhci_fetch_trb(ring->dequeue, trb_name(trb), + trb->parameter, trb->status, trb->control); + + if ((trb->control & TRB_C) != ring->ccs) { + return 0; + } + + type = TRB_TYPE(*trb); + + if (type != TR_LINK) { + if (addr) { + *addr = ring->dequeue; + } + ring->dequeue += TRB_SIZE; + return type; + } else { + if (++link_cnt > TRB_LINK_LIMIT) { + trace_usb_xhci_enforced_limit("trb-link"); + return 0; + } + ring->dequeue = xhci_mask64(trb->parameter); + if (trb->control & TRB_LK_TC) { + ring->ccs = !ring->ccs; + } + } + } +} + +static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring) +{ + XHCITRB trb; + int length = 0; + dma_addr_t dequeue = ring->dequeue; + bool ccs = ring->ccs; + /* hack to bundle together the two/three TDs that make a setup transfer */ + bool control_td_set = 0; + uint32_t link_cnt = 0; + + while (1) { + TRBType type; + dma_memory_read(xhci->as, dequeue, &trb, TRB_SIZE); + le64_to_cpus(&trb.parameter); + le32_to_cpus(&trb.status); + le32_to_cpus(&trb.control); + + if ((trb.control & TRB_C) != ccs) { + return -length; + } + + type = TRB_TYPE(trb); + + if (type == TR_LINK) { + if (++link_cnt > TRB_LINK_LIMIT) { + return -length; + } + dequeue = xhci_mask64(trb.parameter); + if (trb.control & TRB_LK_TC) { + ccs = !ccs; + } + continue; + } + + length += 1; + dequeue += TRB_SIZE; + + if (type == TR_SETUP) { + control_td_set = 1; + } else if (type == TR_STATUS) { + control_td_set = 0; + } + + if (!control_td_set && !(trb.control & TRB_TR_CH)) { + return length; + } + } +} + +static void xhci_er_reset(XHCIState *xhci, int v) +{ + XHCIInterrupter *intr = &xhci->intr[v]; + XHCIEvRingSeg seg; + dma_addr_t erstba = xhci_addr64(intr->erstba_low, intr->erstba_high); + + if (intr->erstsz == 0 || erstba == 0) { + /* disabled */ + intr->er_start = 0; + intr->er_size = 0; + return; + } + /* cache the (sole) event ring segment location */ + if (intr->erstsz != 1) { + DPRINTF("xhci: invalid value for ERSTSZ: %d\n", intr->erstsz); + xhci_die(xhci); + return; + } + dma_memory_read(xhci->as, erstba, &seg, sizeof(seg)); + le32_to_cpus(&seg.addr_low); + le32_to_cpus(&seg.addr_high); + le32_to_cpus(&seg.size); + if (seg.size < 16 || seg.size > 4096) { + DPRINTF("xhci: invalid value for segment size: %d\n", seg.size); + xhci_die(xhci); + return; + } + intr->er_start = xhci_addr64(seg.addr_low, seg.addr_high); + intr->er_size = seg.size; + + intr->er_ep_idx = 0; + intr->er_pcs = 1; + + DPRINTF("xhci: event ring[%d]:" DMA_ADDR_FMT " [%d]\n", + v, intr->er_start, intr->er_size); +} + +static void xhci_run(XHCIState *xhci) +{ + trace_usb_xhci_run(); + xhci->usbsts &= ~USBSTS_HCH; + xhci->mfindex_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); +} + +static void xhci_stop(XHCIState *xhci) +{ + trace_usb_xhci_stop(); + xhci->usbsts |= USBSTS_HCH; + xhci->crcr_low &= ~CRCR_CRR; +} + +static XHCIStreamContext *xhci_alloc_stream_contexts(unsigned count, + dma_addr_t base) +{ + XHCIStreamContext *stctx; + unsigned int i; + + stctx = g_new0(XHCIStreamContext, count); + for (i = 0; i < count; i++) { + stctx[i].pctx = base + i * 16; + stctx[i].sct = -1; + } + return stctx; +} + +static void xhci_reset_streams(XHCIEPContext *epctx) +{ + unsigned int i; + + for (i = 0; i < epctx->nr_pstreams; i++) { + epctx->pstreams[i].sct = -1; + } +} + +static void xhci_alloc_streams(XHCIEPContext *epctx, dma_addr_t base) +{ + assert(epctx->pstreams == NULL); + epctx->nr_pstreams = 2 << epctx->max_pstreams; + epctx->pstreams = xhci_alloc_stream_contexts(epctx->nr_pstreams, base); +} + +static void xhci_free_streams(XHCIEPContext *epctx) +{ + assert(epctx->pstreams != NULL); + + g_free(epctx->pstreams); + epctx->pstreams = NULL; + epctx->nr_pstreams = 0; +} + +static int xhci_epmask_to_eps_with_streams(XHCIState *xhci, + unsigned int slotid, + uint32_t epmask, + XHCIEPContext **epctxs, + USBEndpoint **eps) +{ + XHCISlot *slot; + XHCIEPContext *epctx; + USBEndpoint *ep; + int i, j; + + assert(slotid >= 1 && slotid <= xhci->numslots); + + slot = &xhci->slots[slotid - 1]; + + for (i = 2, j = 0; i <= 31; i++) { + if (!(epmask & (1u << i))) { + continue; + } + + epctx = slot->eps[i - 1]; + ep = xhci_epid_to_usbep(epctx); + if (!epctx || !epctx->nr_pstreams || !ep) { + continue; + } + + if (epctxs) { + epctxs[j] = epctx; + } + eps[j++] = ep; + } + return j; +} + +static void xhci_free_device_streams(XHCIState *xhci, unsigned int slotid, + uint32_t epmask) +{ + USBEndpoint *eps[30]; + int nr_eps; + + nr_eps = xhci_epmask_to_eps_with_streams(xhci, slotid, epmask, NULL, eps); + if (nr_eps) { + usb_device_free_streams(eps[0]->dev, eps, nr_eps); + } +} + +static TRBCCode xhci_alloc_device_streams(XHCIState *xhci, unsigned int slotid, + uint32_t epmask) +{ + XHCIEPContext *epctxs[30]; + USBEndpoint *eps[30]; + int i, r, nr_eps, req_nr_streams, dev_max_streams; + + nr_eps = xhci_epmask_to_eps_with_streams(xhci, slotid, epmask, epctxs, + eps); + if (nr_eps == 0) { + return CC_SUCCESS; + } + + req_nr_streams = epctxs[0]->nr_pstreams; + dev_max_streams = eps[0]->max_streams; + + for (i = 1; i < nr_eps; i++) { + /* + * HdG: I don't expect these to ever trigger, but if they do we need + * to come up with another solution, ie group identical endpoints + * together and make an usb_device_alloc_streams call per group. + */ + if (epctxs[i]->nr_pstreams != req_nr_streams) { + FIXME("guest streams config not identical for all eps"); + return CC_RESOURCE_ERROR; + } + if (eps[i]->max_streams != dev_max_streams) { + FIXME("device streams config not identical for all eps"); + return CC_RESOURCE_ERROR; + } + } + + /* + * max-streams in both the device descriptor and in the controller is a + * power of 2. But stream id 0 is reserved, so if a device can do up to 4 + * streams the guest will ask for 5 rounded up to the next power of 2 which + * becomes 8. For emulated devices usb_device_alloc_streams is a nop. + * + * For redirected devices however this is an issue, as there we must ask + * the real xhci controller to alloc streams, and the host driver for the + * real xhci controller will likely disallow allocating more streams then + * the device can handle. + * + * So we limit the requested nr_streams to the maximum number the device + * can handle. + */ + if (req_nr_streams > dev_max_streams) { + req_nr_streams = dev_max_streams; + } + + r = usb_device_alloc_streams(eps[0]->dev, eps, nr_eps, req_nr_streams); + if (r != 0) { + DPRINTF("xhci: alloc streams failed\n"); + return CC_RESOURCE_ERROR; + } + + return CC_SUCCESS; +} + +static XHCIStreamContext *xhci_find_stream(XHCIEPContext *epctx, + unsigned int streamid, + uint32_t *cc_error) +{ + XHCIStreamContext *sctx; + dma_addr_t base; + uint32_t ctx[2], sct; + + assert(streamid != 0); + if (epctx->lsa) { + if (streamid >= epctx->nr_pstreams) { + *cc_error = CC_INVALID_STREAM_ID_ERROR; + return NULL; + } + sctx = epctx->pstreams + streamid; + } else { + FIXME("secondary streams not implemented yet"); + } + + if (sctx->sct == -1) { + xhci_dma_read_u32s(epctx->xhci, sctx->pctx, ctx, sizeof(ctx)); + sct = (ctx[0] >> 1) & 0x07; + if (epctx->lsa && sct != 1) { + *cc_error = CC_INVALID_STREAM_TYPE_ERROR; + return NULL; + } + sctx->sct = sct; + base = xhci_addr64(ctx[0] & ~0xf, ctx[1]); + xhci_ring_init(epctx->xhci, &sctx->ring, base); + } + return sctx; +} + +static void xhci_set_ep_state(XHCIState *xhci, XHCIEPContext *epctx, + XHCIStreamContext *sctx, uint32_t state) +{ + XHCIRing *ring = NULL; + uint32_t ctx[5]; + uint32_t ctx2[2]; + + xhci_dma_read_u32s(xhci, epctx->pctx, ctx, sizeof(ctx)); + ctx[0] &= ~EP_STATE_MASK; + ctx[0] |= state; + + /* update ring dequeue ptr */ + if (epctx->nr_pstreams) { + if (sctx != NULL) { + ring = &sctx->ring; + xhci_dma_read_u32s(xhci, sctx->pctx, ctx2, sizeof(ctx2)); + ctx2[0] &= 0xe; + ctx2[0] |= sctx->ring.dequeue | sctx->ring.ccs; + ctx2[1] = (sctx->ring.dequeue >> 16) >> 16; + xhci_dma_write_u32s(xhci, sctx->pctx, ctx2, sizeof(ctx2)); + } + } else { + ring = &epctx->ring; + } + if (ring) { + ctx[2] = ring->dequeue | ring->ccs; + ctx[3] = (ring->dequeue >> 16) >> 16; + + DPRINTF("xhci: set epctx: " DMA_ADDR_FMT " state=%d dequeue=%08x%08x\n", + epctx->pctx, state, ctx[3], ctx[2]); + } + + xhci_dma_write_u32s(xhci, epctx->pctx, ctx, sizeof(ctx)); + if (epctx->state != state) { + trace_usb_xhci_ep_state(epctx->slotid, epctx->epid, + ep_state_name(epctx->state), + ep_state_name(state)); + } + epctx->state = state; +} + +static void xhci_ep_kick_timer(void *opaque) +{ + XHCIEPContext *epctx = opaque; + xhci_kick_epctx(epctx, 0); +} + +static XHCIEPContext *xhci_alloc_epctx(XHCIState *xhci, + unsigned int slotid, + unsigned int epid) +{ + XHCIEPContext *epctx; + + epctx = g_new0(XHCIEPContext, 1); + epctx->xhci = xhci; + epctx->slotid = slotid; + epctx->epid = epid; + + QTAILQ_INIT(&epctx->transfers); + epctx->kick_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, xhci_ep_kick_timer, epctx); + + return epctx; +} + +static void xhci_init_epctx(XHCIEPContext *epctx, + dma_addr_t pctx, uint32_t *ctx) +{ + dma_addr_t dequeue; + + dequeue = xhci_addr64(ctx[2] & ~0xf, ctx[3]); + + epctx->type = (ctx[1] >> EP_TYPE_SHIFT) & EP_TYPE_MASK; + epctx->pctx = pctx; + epctx->max_psize = ctx[1]>>16; + epctx->max_psize *= 1+((ctx[1]>>8)&0xff); + epctx->max_pstreams = (ctx[0] >> 10) & epctx->xhci->max_pstreams_mask; + epctx->lsa = (ctx[0] >> 15) & 1; + if (epctx->max_pstreams) { + xhci_alloc_streams(epctx, dequeue); + } else { + xhci_ring_init(epctx->xhci, &epctx->ring, dequeue); + epctx->ring.ccs = ctx[2] & 1; + } + + epctx->interval = 1 << ((ctx[0] >> 16) & 0xff); +} + +static TRBCCode xhci_enable_ep(XHCIState *xhci, unsigned int slotid, + unsigned int epid, dma_addr_t pctx, + uint32_t *ctx) +{ + XHCISlot *slot; + XHCIEPContext *epctx; + + trace_usb_xhci_ep_enable(slotid, epid); + assert(slotid >= 1 && slotid <= xhci->numslots); + assert(epid >= 1 && epid <= 31); + + slot = &xhci->slots[slotid-1]; + if (slot->eps[epid-1]) { + xhci_disable_ep(xhci, slotid, epid); + } + + epctx = xhci_alloc_epctx(xhci, slotid, epid); + slot->eps[epid-1] = epctx; + xhci_init_epctx(epctx, pctx, ctx); + + DPRINTF("xhci: endpoint %d.%d type is %d, max transaction (burst) " + "size is %d\n", epid/2, epid%2, epctx->type, epctx->max_psize); + + epctx->mfindex_last = 0; + + epctx->state = EP_RUNNING; + ctx[0] &= ~EP_STATE_MASK; + ctx[0] |= EP_RUNNING; + + return CC_SUCCESS; +} + +static XHCITransfer *xhci_ep_alloc_xfer(XHCIEPContext *epctx, + uint32_t length) +{ + uint32_t limit = epctx->nr_pstreams + 16; + XHCITransfer *xfer; + + if (epctx->xfer_count >= limit) { + return NULL; + } + + xfer = g_new0(XHCITransfer, 1); + xfer->epctx = epctx; + xfer->trbs = g_new(XHCITRB, length); + xfer->trb_count = length; + usb_packet_init(&xfer->packet); + + QTAILQ_INSERT_TAIL(&epctx->transfers, xfer, next); + epctx->xfer_count++; + + return xfer; +} + +static void xhci_ep_free_xfer(XHCITransfer *xfer) +{ + QTAILQ_REMOVE(&xfer->epctx->transfers, xfer, next); + xfer->epctx->xfer_count--; + + usb_packet_cleanup(&xfer->packet); + g_free(xfer->trbs); + g_free(xfer); +} + +static int xhci_ep_nuke_one_xfer(XHCITransfer *t, TRBCCode report) +{ + int killed = 0; + + if (report && (t->running_async || t->running_retry)) { + t->status = report; + xhci_xfer_report(t); + } + + if (t->running_async) { + usb_cancel_packet(&t->packet); + t->running_async = 0; + killed = 1; + } + if (t->running_retry) { + if (t->epctx) { + t->epctx->retry = NULL; + timer_del(t->epctx->kick_timer); + } + t->running_retry = 0; + killed = 1; + } + g_free(t->trbs); + + t->trbs = NULL; + t->trb_count = 0; + + return killed; +} + +static int xhci_ep_nuke_xfers(XHCIState *xhci, unsigned int slotid, + unsigned int epid, TRBCCode report) +{ + XHCISlot *slot; + XHCIEPContext *epctx; + XHCITransfer *xfer; + int killed = 0; + USBEndpoint *ep = NULL; + assert(slotid >= 1 && slotid <= xhci->numslots); + assert(epid >= 1 && epid <= 31); + + DPRINTF("xhci_ep_nuke_xfers(%d, %d)\n", slotid, epid); + + slot = &xhci->slots[slotid-1]; + + if (!slot->eps[epid-1]) { + return 0; + } + + epctx = slot->eps[epid-1]; + + for (;;) { + xfer = QTAILQ_FIRST(&epctx->transfers); + if (xfer == NULL) { + break; + } + killed += xhci_ep_nuke_one_xfer(xfer, report); + if (killed) { + report = 0; /* Only report once */ + } + xhci_ep_free_xfer(xfer); + } + + ep = xhci_epid_to_usbep(epctx); + if (ep) { + usb_device_ep_stopped(ep->dev, ep); + } + return killed; +} + +static TRBCCode xhci_disable_ep(XHCIState *xhci, unsigned int slotid, + unsigned int epid) +{ + XHCISlot *slot; + XHCIEPContext *epctx; + + trace_usb_xhci_ep_disable(slotid, epid); + assert(slotid >= 1 && slotid <= xhci->numslots); + assert(epid >= 1 && epid <= 31); + + slot = &xhci->slots[slotid-1]; + + if (!slot->eps[epid-1]) { + DPRINTF("xhci: slot %d ep %d already disabled\n", slotid, epid); + return CC_SUCCESS; + } + + xhci_ep_nuke_xfers(xhci, slotid, epid, 0); + + epctx = slot->eps[epid-1]; + + if (epctx->nr_pstreams) { + xhci_free_streams(epctx); + } + + /* only touch guest RAM if we're not resetting the HC */ + if (xhci->dcbaap_low || xhci->dcbaap_high) { + xhci_set_ep_state(xhci, epctx, NULL, EP_DISABLED); + } + + timer_free(epctx->kick_timer); + g_free(epctx); + slot->eps[epid-1] = NULL; + + return CC_SUCCESS; +} + +static TRBCCode xhci_stop_ep(XHCIState *xhci, unsigned int slotid, + unsigned int epid) +{ + XHCISlot *slot; + XHCIEPContext *epctx; + + trace_usb_xhci_ep_stop(slotid, epid); + assert(slotid >= 1 && slotid <= xhci->numslots); + + if (epid < 1 || epid > 31) { + DPRINTF("xhci: bad ep %d\n", epid); + return CC_TRB_ERROR; + } + + slot = &xhci->slots[slotid-1]; + + if (!slot->eps[epid-1]) { + DPRINTF("xhci: slot %d ep %d not enabled\n", slotid, epid); + return CC_EP_NOT_ENABLED_ERROR; + } + + if (xhci_ep_nuke_xfers(xhci, slotid, epid, CC_STOPPED) > 0) { + DPRINTF("xhci: FIXME: endpoint stopped w/ xfers running, " + "data might be lost\n"); + } + + epctx = slot->eps[epid-1]; + + xhci_set_ep_state(xhci, epctx, NULL, EP_STOPPED); + + if (epctx->nr_pstreams) { + xhci_reset_streams(epctx); + } + + return CC_SUCCESS; +} + +static TRBCCode xhci_reset_ep(XHCIState *xhci, unsigned int slotid, + unsigned int epid) +{ + XHCISlot *slot; + XHCIEPContext *epctx; + + trace_usb_xhci_ep_reset(slotid, epid); + assert(slotid >= 1 && slotid <= xhci->numslots); + + if (epid < 1 || epid > 31) { + DPRINTF("xhci: bad ep %d\n", epid); + return CC_TRB_ERROR; + } + + slot = &xhci->slots[slotid-1]; + + if (!slot->eps[epid-1]) { + DPRINTF("xhci: slot %d ep %d not enabled\n", slotid, epid); + return CC_EP_NOT_ENABLED_ERROR; + } + + epctx = slot->eps[epid-1]; + + if (epctx->state != EP_HALTED) { + DPRINTF("xhci: reset EP while EP %d not halted (%d)\n", + epid, epctx->state); + return CC_CONTEXT_STATE_ERROR; + } + + if (xhci_ep_nuke_xfers(xhci, slotid, epid, 0) > 0) { + DPRINTF("xhci: FIXME: endpoint reset w/ xfers running, " + "data might be lost\n"); + } + + if (!xhci->slots[slotid-1].uport || + !xhci->slots[slotid-1].uport->dev || + !xhci->slots[slotid-1].uport->dev->attached) { + return CC_USB_TRANSACTION_ERROR; + } + + xhci_set_ep_state(xhci, epctx, NULL, EP_STOPPED); + + if (epctx->nr_pstreams) { + xhci_reset_streams(epctx); + } + + return CC_SUCCESS; +} + +static TRBCCode xhci_set_ep_dequeue(XHCIState *xhci, unsigned int slotid, + unsigned int epid, unsigned int streamid, + uint64_t pdequeue) +{ + XHCISlot *slot; + XHCIEPContext *epctx; + XHCIStreamContext *sctx; + dma_addr_t dequeue; + + assert(slotid >= 1 && slotid <= xhci->numslots); + + if (epid < 1 || epid > 31) { + DPRINTF("xhci: bad ep %d\n", epid); + return CC_TRB_ERROR; + } + + trace_usb_xhci_ep_set_dequeue(slotid, epid, streamid, pdequeue); + dequeue = xhci_mask64(pdequeue); + + slot = &xhci->slots[slotid-1]; + + if (!slot->eps[epid-1]) { + DPRINTF("xhci: slot %d ep %d not enabled\n", slotid, epid); + return CC_EP_NOT_ENABLED_ERROR; + } + + epctx = slot->eps[epid-1]; + + if (epctx->state != EP_STOPPED) { + DPRINTF("xhci: set EP dequeue pointer while EP %d not stopped\n", epid); + return CC_CONTEXT_STATE_ERROR; + } + + if (epctx->nr_pstreams) { + uint32_t err; + sctx = xhci_find_stream(epctx, streamid, &err); + if (sctx == NULL) { + return err; + } + xhci_ring_init(xhci, &sctx->ring, dequeue & ~0xf); + sctx->ring.ccs = dequeue & 1; + } else { + sctx = NULL; + xhci_ring_init(xhci, &epctx->ring, dequeue & ~0xF); + epctx->ring.ccs = dequeue & 1; + } + + xhci_set_ep_state(xhci, epctx, sctx, EP_STOPPED); + + return CC_SUCCESS; +} + +static int xhci_xfer_create_sgl(XHCITransfer *xfer, int in_xfer) +{ + XHCIState *xhci = xfer->epctx->xhci; + int i; + + xfer->int_req = false; + qemu_sglist_init(&xfer->sgl, DEVICE(xhci), xfer->trb_count, xhci->as); + for (i = 0; i < xfer->trb_count; i++) { + XHCITRB *trb = &xfer->trbs[i]; + dma_addr_t addr; + unsigned int chunk = 0; + + if (trb->control & TRB_TR_IOC) { + xfer->int_req = true; + } + + switch (TRB_TYPE(*trb)) { + case TR_DATA: + if ((!(trb->control & TRB_TR_DIR)) != (!in_xfer)) { + DPRINTF("xhci: data direction mismatch for TR_DATA\n"); + goto err; + } + /* fallthrough */ + case TR_NORMAL: + case TR_ISOCH: + addr = xhci_mask64(trb->parameter); + chunk = trb->status & 0x1ffff; + if (trb->control & TRB_TR_IDT) { + if (chunk > 8 || in_xfer) { + DPRINTF("xhci: invalid immediate data TRB\n"); + goto err; + } + qemu_sglist_add(&xfer->sgl, trb->addr, chunk); + } else { + qemu_sglist_add(&xfer->sgl, addr, chunk); + } + break; + } + } + + return 0; + +err: + qemu_sglist_destroy(&xfer->sgl); + xhci_die(xhci); + return -1; +} + +static void xhci_xfer_unmap(XHCITransfer *xfer) +{ + usb_packet_unmap(&xfer->packet, &xfer->sgl); + qemu_sglist_destroy(&xfer->sgl); +} + +static void xhci_xfer_report(XHCITransfer *xfer) +{ + uint32_t edtla = 0; + unsigned int left; + bool reported = 0; + bool shortpkt = 0; + XHCIEvent event = {ER_TRANSFER, CC_SUCCESS}; + XHCIState *xhci = xfer->epctx->xhci; + int i; + + left = xfer->packet.actual_length; + + for (i = 0; i < xfer->trb_count; i++) { + XHCITRB *trb = &xfer->trbs[i]; + unsigned int chunk = 0; + + switch (TRB_TYPE(*trb)) { + case TR_SETUP: + chunk = trb->status & 0x1ffff; + if (chunk > 8) { + chunk = 8; + } + break; + case TR_DATA: + case TR_NORMAL: + case TR_ISOCH: + chunk = trb->status & 0x1ffff; + if (chunk > left) { + chunk = left; + if (xfer->status == CC_SUCCESS) { + shortpkt = 1; + } + } + left -= chunk; + edtla += chunk; + break; + case TR_STATUS: + reported = 0; + shortpkt = 0; + break; + } + + if (!reported && ((trb->control & TRB_TR_IOC) || + (shortpkt && (trb->control & TRB_TR_ISP)) || + (xfer->status != CC_SUCCESS && left == 0))) { + event.slotid = xfer->epctx->slotid; + event.epid = xfer->epctx->epid; + event.length = (trb->status & 0x1ffff) - chunk; + event.flags = 0; + event.ptr = trb->addr; + if (xfer->status == CC_SUCCESS) { + event.ccode = shortpkt ? CC_SHORT_PACKET : CC_SUCCESS; + } else { + event.ccode = xfer->status; + } + if (TRB_TYPE(*trb) == TR_EVDATA) { + event.ptr = trb->parameter; + event.flags |= TRB_EV_ED; + event.length = edtla & 0xffffff; + DPRINTF("xhci_xfer_data: EDTLA=%d\n", event.length); + edtla = 0; + } + xhci_event(xhci, &event, TRB_INTR(*trb)); + reported = 1; + if (xfer->status != CC_SUCCESS) { + return; + } + } + + switch (TRB_TYPE(*trb)) { + case TR_SETUP: + reported = 0; + shortpkt = 0; + break; + } + + } +} + +static void xhci_stall_ep(XHCITransfer *xfer) +{ + XHCIEPContext *epctx = xfer->epctx; + XHCIState *xhci = epctx->xhci; + uint32_t err; + XHCIStreamContext *sctx; + + if (epctx->type == ET_ISO_IN || epctx->type == ET_ISO_OUT) { + /* never halt isoch endpoints, 4.10.2 */ + return; + } + + if (epctx->nr_pstreams) { + sctx = xhci_find_stream(epctx, xfer->streamid, &err); + if (sctx == NULL) { + return; + } + sctx->ring.dequeue = xfer->trbs[0].addr; + sctx->ring.ccs = xfer->trbs[0].ccs; + xhci_set_ep_state(xhci, epctx, sctx, EP_HALTED); + } else { + epctx->ring.dequeue = xfer->trbs[0].addr; + epctx->ring.ccs = xfer->trbs[0].ccs; + xhci_set_ep_state(xhci, epctx, NULL, EP_HALTED); + } +} + +static int xhci_setup_packet(XHCITransfer *xfer) +{ + USBEndpoint *ep; + int dir; + + dir = xfer->in_xfer ? USB_TOKEN_IN : USB_TOKEN_OUT; + + if (xfer->packet.ep) { + ep = xfer->packet.ep; + } else { + ep = xhci_epid_to_usbep(xfer->epctx); + if (!ep) { + DPRINTF("xhci: slot %d has no device\n", + xfer->epctx->slotid); + return -1; + } + } + + xhci_xfer_create_sgl(xfer, dir == USB_TOKEN_IN); /* Also sets int_req */ + usb_packet_setup(&xfer->packet, dir, ep, xfer->streamid, + xfer->trbs[0].addr, false, xfer->int_req); + if (usb_packet_map(&xfer->packet, &xfer->sgl)) { + qemu_sglist_destroy(&xfer->sgl); + return -1; + } + DPRINTF("xhci: setup packet pid 0x%x addr %d ep %d\n", + xfer->packet.pid, ep->dev->addr, ep->nr); + return 0; +} + +static int xhci_try_complete_packet(XHCITransfer *xfer) +{ + if (xfer->packet.status == USB_RET_ASYNC) { + trace_usb_xhci_xfer_async(xfer); + xfer->running_async = 1; + xfer->running_retry = 0; + xfer->complete = 0; + return 0; + } else if (xfer->packet.status == USB_RET_NAK) { + trace_usb_xhci_xfer_nak(xfer); + xfer->running_async = 0; + xfer->running_retry = 1; + xfer->complete = 0; + return 0; + } else { + xfer->running_async = 0; + xfer->running_retry = 0; + xfer->complete = 1; + xhci_xfer_unmap(xfer); + } + + if (xfer->packet.status == USB_RET_SUCCESS) { + trace_usb_xhci_xfer_success(xfer, xfer->packet.actual_length); + xfer->status = CC_SUCCESS; + xhci_xfer_report(xfer); + return 0; + } + + /* error */ + trace_usb_xhci_xfer_error(xfer, xfer->packet.status); + switch (xfer->packet.status) { + case USB_RET_NODEV: + case USB_RET_IOERROR: + xfer->status = CC_USB_TRANSACTION_ERROR; + xhci_xfer_report(xfer); + xhci_stall_ep(xfer); + break; + case USB_RET_STALL: + xfer->status = CC_STALL_ERROR; + xhci_xfer_report(xfer); + xhci_stall_ep(xfer); + break; + case USB_RET_BABBLE: + xfer->status = CC_BABBLE_DETECTED; + xhci_xfer_report(xfer); + xhci_stall_ep(xfer); + break; + default: + DPRINTF("%s: FIXME: status = %d\n", __func__, + xfer->packet.status); + FIXME("unhandled USB_RET_*"); + } + return 0; +} + +static int xhci_fire_ctl_transfer(XHCIState *xhci, XHCITransfer *xfer) +{ + XHCITRB *trb_setup, *trb_status; + uint8_t bmRequestType; + + trb_setup = &xfer->trbs[0]; + trb_status = &xfer->trbs[xfer->trb_count-1]; + + trace_usb_xhci_xfer_start(xfer, xfer->epctx->slotid, + xfer->epctx->epid, xfer->streamid); + + /* at most one Event Data TRB allowed after STATUS */ + if (TRB_TYPE(*trb_status) == TR_EVDATA && xfer->trb_count > 2) { + trb_status--; + } + + /* do some sanity checks */ + if (TRB_TYPE(*trb_setup) != TR_SETUP) { + DPRINTF("xhci: ep0 first TD not SETUP: %d\n", + TRB_TYPE(*trb_setup)); + return -1; + } + if (TRB_TYPE(*trb_status) != TR_STATUS) { + DPRINTF("xhci: ep0 last TD not STATUS: %d\n", + TRB_TYPE(*trb_status)); + return -1; + } + if (!(trb_setup->control & TRB_TR_IDT)) { + DPRINTF("xhci: Setup TRB doesn't have IDT set\n"); + return -1; + } + if ((trb_setup->status & 0x1ffff) != 8) { + DPRINTF("xhci: Setup TRB has bad length (%d)\n", + (trb_setup->status & 0x1ffff)); + return -1; + } + + bmRequestType = trb_setup->parameter; + + xfer->in_xfer = bmRequestType & USB_DIR_IN; + xfer->iso_xfer = false; + xfer->timed_xfer = false; + + if (xhci_setup_packet(xfer) < 0) { + return -1; + } + xfer->packet.parameter = trb_setup->parameter; + + usb_handle_packet(xfer->packet.ep->dev, &xfer->packet); + xhci_try_complete_packet(xfer); + return 0; +} + +static void xhci_calc_intr_kick(XHCIState *xhci, XHCITransfer *xfer, + XHCIEPContext *epctx, uint64_t mfindex) +{ + uint64_t asap = ((mfindex + epctx->interval - 1) & + ~(epctx->interval-1)); + uint64_t kick = epctx->mfindex_last + epctx->interval; + + assert(epctx->interval != 0); + xfer->mfindex_kick = MAX(asap, kick); +} + +static void xhci_calc_iso_kick(XHCIState *xhci, XHCITransfer *xfer, + XHCIEPContext *epctx, uint64_t mfindex) +{ + if (xfer->trbs[0].control & TRB_TR_SIA) { + uint64_t asap = ((mfindex + epctx->interval - 1) & + ~(epctx->interval-1)); + if (asap >= epctx->mfindex_last && + asap <= epctx->mfindex_last + epctx->interval * 4) { + xfer->mfindex_kick = epctx->mfindex_last + epctx->interval; + } else { + xfer->mfindex_kick = asap; + } + } else { + xfer->mfindex_kick = ((xfer->trbs[0].control >> TRB_TR_FRAMEID_SHIFT) + & TRB_TR_FRAMEID_MASK) << 3; + xfer->mfindex_kick |= mfindex & ~0x3fff; + if (xfer->mfindex_kick + 0x100 < mfindex) { + xfer->mfindex_kick += 0x4000; + } + } +} + +static void xhci_check_intr_iso_kick(XHCIState *xhci, XHCITransfer *xfer, + XHCIEPContext *epctx, uint64_t mfindex) +{ + if (xfer->mfindex_kick > mfindex) { + timer_mod(epctx->kick_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (xfer->mfindex_kick - mfindex) * 125000); + xfer->running_retry = 1; + } else { + epctx->mfindex_last = xfer->mfindex_kick; + timer_del(epctx->kick_timer); + xfer->running_retry = 0; + } +} + + +static int xhci_submit(XHCIState *xhci, XHCITransfer *xfer, XHCIEPContext *epctx) +{ + uint64_t mfindex; + + DPRINTF("xhci_submit(slotid=%d,epid=%d)\n", epctx->slotid, epctx->epid); + + xfer->in_xfer = epctx->type>>2; + + switch(epctx->type) { + case ET_INTR_OUT: + case ET_INTR_IN: + xfer->pkts = 0; + xfer->iso_xfer = false; + xfer->timed_xfer = true; + mfindex = xhci_mfindex_get(xhci); + xhci_calc_intr_kick(xhci, xfer, epctx, mfindex); + xhci_check_intr_iso_kick(xhci, xfer, epctx, mfindex); + if (xfer->running_retry) { + return -1; + } + break; + case ET_BULK_OUT: + case ET_BULK_IN: + xfer->pkts = 0; + xfer->iso_xfer = false; + xfer->timed_xfer = false; + break; + case ET_ISO_OUT: + case ET_ISO_IN: + xfer->pkts = 1; + xfer->iso_xfer = true; + xfer->timed_xfer = true; + mfindex = xhci_mfindex_get(xhci); + xhci_calc_iso_kick(xhci, xfer, epctx, mfindex); + xhci_check_intr_iso_kick(xhci, xfer, epctx, mfindex); + if (xfer->running_retry) { + return -1; + } + break; + default: + trace_usb_xhci_unimplemented("endpoint type", epctx->type); + return -1; + } + + if (xhci_setup_packet(xfer) < 0) { + return -1; + } + usb_handle_packet(xfer->packet.ep->dev, &xfer->packet); + xhci_try_complete_packet(xfer); + return 0; +} + +static int xhci_fire_transfer(XHCIState *xhci, XHCITransfer *xfer, XHCIEPContext *epctx) +{ + trace_usb_xhci_xfer_start(xfer, xfer->epctx->slotid, + xfer->epctx->epid, xfer->streamid); + return xhci_submit(xhci, xfer, epctx); +} + +static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid, + unsigned int epid, unsigned int streamid) +{ + XHCIEPContext *epctx; + + assert(slotid >= 1 && slotid <= xhci->numslots); + assert(epid >= 1 && epid <= 31); + + if (!xhci->slots[slotid-1].enabled) { + DPRINTF("xhci: xhci_kick_ep for disabled slot %d\n", slotid); + return; + } + epctx = xhci->slots[slotid-1].eps[epid-1]; + if (!epctx) { + DPRINTF("xhci: xhci_kick_ep for disabled endpoint %d,%d\n", + epid, slotid); + return; + } + + if (epctx->kick_active) { + return; + } + xhci_kick_epctx(epctx, streamid); +} + +static bool xhci_slot_ok(XHCIState *xhci, int slotid) +{ + return (xhci->slots[slotid - 1].uport && + xhci->slots[slotid - 1].uport->dev && + xhci->slots[slotid - 1].uport->dev->attached); +} + +static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid) +{ + XHCIState *xhci = epctx->xhci; + XHCIStreamContext *stctx = NULL; + XHCITransfer *xfer; + XHCIRing *ring; + USBEndpoint *ep = NULL; + uint64_t mfindex; + unsigned int count = 0; + int length; + int i; + + trace_usb_xhci_ep_kick(epctx->slotid, epctx->epid, streamid); + assert(!epctx->kick_active); + + /* If the device has been detached, but the guest has not noticed this + yet the 2 above checks will succeed, but we must NOT continue */ + if (!xhci_slot_ok(xhci, epctx->slotid)) { + return; + } + + if (epctx->retry) { + XHCITransfer *xfer = epctx->retry; + + trace_usb_xhci_xfer_retry(xfer); + assert(xfer->running_retry); + if (xfer->timed_xfer) { + /* time to kick the transfer? */ + mfindex = xhci_mfindex_get(xhci); + xhci_check_intr_iso_kick(xhci, xfer, epctx, mfindex); + if (xfer->running_retry) { + return; + } + xfer->timed_xfer = 0; + xfer->running_retry = 1; + } + if (xfer->iso_xfer) { + /* retry iso transfer */ + if (xhci_setup_packet(xfer) < 0) { + return; + } + usb_handle_packet(xfer->packet.ep->dev, &xfer->packet); + assert(xfer->packet.status != USB_RET_NAK); + xhci_try_complete_packet(xfer); + } else { + /* retry nak'ed transfer */ + if (xhci_setup_packet(xfer) < 0) { + return; + } + usb_handle_packet(xfer->packet.ep->dev, &xfer->packet); + if (xfer->packet.status == USB_RET_NAK) { + xhci_xfer_unmap(xfer); + return; + } + xhci_try_complete_packet(xfer); + } + assert(!xfer->running_retry); + if (xfer->complete) { + /* update ring dequeue ptr */ + xhci_set_ep_state(xhci, epctx, stctx, epctx->state); + xhci_ep_free_xfer(epctx->retry); + } + epctx->retry = NULL; + } + + if (epctx->state == EP_HALTED) { + DPRINTF("xhci: ep halted, not running schedule\n"); + return; + } + + + if (epctx->nr_pstreams) { + uint32_t err; + stctx = xhci_find_stream(epctx, streamid, &err); + if (stctx == NULL) { + return; + } + ring = &stctx->ring; + xhci_set_ep_state(xhci, epctx, stctx, EP_RUNNING); + } else { + ring = &epctx->ring; + streamid = 0; + xhci_set_ep_state(xhci, epctx, NULL, EP_RUNNING); + } + if (!ring->dequeue) { + return; + } + + epctx->kick_active++; + while (1) { + length = xhci_ring_chain_length(xhci, ring); + if (length <= 0) { + if (epctx->type == ET_ISO_OUT || epctx->type == ET_ISO_IN) { + /* 4.10.3.1 */ + XHCIEvent ev = { ER_TRANSFER }; + ev.ccode = epctx->type == ET_ISO_IN ? + CC_RING_OVERRUN : CC_RING_UNDERRUN; + ev.slotid = epctx->slotid; + ev.epid = epctx->epid; + ev.ptr = epctx->ring.dequeue; + xhci_event(xhci, &ev, xhci->slots[epctx->slotid-1].intr); + } + break; + } + xfer = xhci_ep_alloc_xfer(epctx, length); + if (xfer == NULL) { + break; + } + + for (i = 0; i < length; i++) { + TRBType type; + type = xhci_ring_fetch(xhci, ring, &xfer->trbs[i], NULL); + if (!type) { + xhci_die(xhci); + xhci_ep_free_xfer(xfer); + epctx->kick_active--; + return; + } + } + xfer->streamid = streamid; + + if (epctx->epid == 1) { + xhci_fire_ctl_transfer(xhci, xfer); + } else { + xhci_fire_transfer(xhci, xfer, epctx); + } + if (!xhci_slot_ok(xhci, epctx->slotid)) { + /* surprise removal -> stop processing */ + break; + } + if (xfer->complete) { + /* update ring dequeue ptr */ + xhci_set_ep_state(xhci, epctx, stctx, epctx->state); + xhci_ep_free_xfer(xfer); + xfer = NULL; + } + + if (epctx->state == EP_HALTED) { + break; + } + if (xfer != NULL && xfer->running_retry) { + DPRINTF("xhci: xfer nacked, stopping schedule\n"); + epctx->retry = xfer; + xhci_xfer_unmap(xfer); + break; + } + if (count++ > TRANSFER_LIMIT) { + trace_usb_xhci_enforced_limit("transfers"); + break; + } + } + epctx->kick_active--; + + ep = xhci_epid_to_usbep(epctx); + if (ep) { + usb_device_flush_ep_queue(ep->dev, ep); + } +} + +static TRBCCode xhci_enable_slot(XHCIState *xhci, unsigned int slotid) +{ + trace_usb_xhci_slot_enable(slotid); + assert(slotid >= 1 && slotid <= xhci->numslots); + xhci->slots[slotid-1].enabled = 1; + xhci->slots[slotid-1].uport = NULL; + memset(xhci->slots[slotid-1].eps, 0, sizeof(XHCIEPContext*)*31); + + return CC_SUCCESS; +} + +static TRBCCode xhci_disable_slot(XHCIState *xhci, unsigned int slotid) +{ + int i; + + trace_usb_xhci_slot_disable(slotid); + assert(slotid >= 1 && slotid <= xhci->numslots); + + for (i = 1; i <= 31; i++) { + if (xhci->slots[slotid-1].eps[i-1]) { + xhci_disable_ep(xhci, slotid, i); + } + } + + xhci->slots[slotid-1].enabled = 0; + xhci->slots[slotid-1].addressed = 0; + xhci->slots[slotid-1].uport = NULL; + xhci->slots[slotid-1].intr = 0; + return CC_SUCCESS; +} + +static USBPort *xhci_lookup_uport(XHCIState *xhci, uint32_t *slot_ctx) +{ + USBPort *uport; + char path[32]; + int i, pos, port; + + port = (slot_ctx[1]>>16) & 0xFF; + if (port < 1 || port > xhci->numports) { + return NULL; + } + port = xhci->ports[port-1].uport->index+1; + pos = snprintf(path, sizeof(path), "%d", port); + for (i = 0; i < 5; i++) { + port = (slot_ctx[0] >> 4*i) & 0x0f; + if (!port) { + break; + } + pos += snprintf(path + pos, sizeof(path) - pos, ".%d", port); + } + + QTAILQ_FOREACH(uport, &xhci->bus.used, next) { + if (strcmp(uport->path, path) == 0) { + return uport; + } + } + return NULL; +} + +static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, + uint64_t pictx, bool bsr) +{ + XHCISlot *slot; + USBPort *uport; + USBDevice *dev; + dma_addr_t ictx, octx, dcbaap; + uint64_t poctx; + uint32_t ictl_ctx[2]; + uint32_t slot_ctx[4]; + uint32_t ep0_ctx[5]; + int i; + TRBCCode res; + + assert(slotid >= 1 && slotid <= xhci->numslots); + + dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high); + poctx = ldq_le_dma(xhci->as, dcbaap + 8 * slotid); + ictx = xhci_mask64(pictx); + octx = xhci_mask64(poctx); + + DPRINTF("xhci: input context at "DMA_ADDR_FMT"\n", ictx); + DPRINTF("xhci: output context at "DMA_ADDR_FMT"\n", octx); + + xhci_dma_read_u32s(xhci, ictx, ictl_ctx, sizeof(ictl_ctx)); + + if (ictl_ctx[0] != 0x0 || ictl_ctx[1] != 0x3) { + DPRINTF("xhci: invalid input context control %08x %08x\n", + ictl_ctx[0], ictl_ctx[1]); + return CC_TRB_ERROR; + } + + xhci_dma_read_u32s(xhci, ictx+32, slot_ctx, sizeof(slot_ctx)); + xhci_dma_read_u32s(xhci, ictx+64, ep0_ctx, sizeof(ep0_ctx)); + + DPRINTF("xhci: input slot context: %08x %08x %08x %08x\n", + slot_ctx[0], slot_ctx[1], slot_ctx[2], slot_ctx[3]); + + DPRINTF("xhci: input ep0 context: %08x %08x %08x %08x %08x\n", + ep0_ctx[0], ep0_ctx[1], ep0_ctx[2], ep0_ctx[3], ep0_ctx[4]); + + uport = xhci_lookup_uport(xhci, slot_ctx); + if (uport == NULL) { + DPRINTF("xhci: port not found\n"); + return CC_TRB_ERROR; + } + trace_usb_xhci_slot_address(slotid, uport->path); + + dev = uport->dev; + if (!dev || !dev->attached) { + DPRINTF("xhci: port %s not connected\n", uport->path); + return CC_USB_TRANSACTION_ERROR; + } + + for (i = 0; i < xhci->numslots; i++) { + if (i == slotid-1) { + continue; + } + if (xhci->slots[i].uport == uport) { + DPRINTF("xhci: port %s already assigned to slot %d\n", + uport->path, i+1); + return CC_TRB_ERROR; + } + } + + slot = &xhci->slots[slotid-1]; + slot->uport = uport; + slot->ctx = octx; + slot->intr = get_field(slot_ctx[2], TRB_INTR); + + /* Make sure device is in USB_STATE_DEFAULT state */ + usb_device_reset(dev); + if (bsr) { + slot_ctx[3] = SLOT_DEFAULT << SLOT_STATE_SHIFT; + } else { + USBPacket p; + uint8_t buf[1]; + + slot_ctx[3] = (SLOT_ADDRESSED << SLOT_STATE_SHIFT) | slotid; + memset(&p, 0, sizeof(p)); + usb_packet_addbuf(&p, buf, sizeof(buf)); + usb_packet_setup(&p, USB_TOKEN_OUT, + usb_ep_get(dev, USB_TOKEN_OUT, 0), 0, + 0, false, false); + usb_device_handle_control(dev, &p, + DeviceOutRequest | USB_REQ_SET_ADDRESS, + slotid, 0, 0, NULL); + assert(p.status != USB_RET_ASYNC); + usb_packet_cleanup(&p); + } + + res = xhci_enable_ep(xhci, slotid, 1, octx+32, ep0_ctx); + + DPRINTF("xhci: output slot context: %08x %08x %08x %08x\n", + slot_ctx[0], slot_ctx[1], slot_ctx[2], slot_ctx[3]); + DPRINTF("xhci: output ep0 context: %08x %08x %08x %08x %08x\n", + ep0_ctx[0], ep0_ctx[1], ep0_ctx[2], ep0_ctx[3], ep0_ctx[4]); + + xhci_dma_write_u32s(xhci, octx, slot_ctx, sizeof(slot_ctx)); + xhci_dma_write_u32s(xhci, octx+32, ep0_ctx, sizeof(ep0_ctx)); + + xhci->slots[slotid-1].addressed = 1; + return res; +} + + +static TRBCCode xhci_configure_slot(XHCIState *xhci, unsigned int slotid, + uint64_t pictx, bool dc) +{ + dma_addr_t ictx, octx; + uint32_t ictl_ctx[2]; + uint32_t slot_ctx[4]; + uint32_t islot_ctx[4]; + uint32_t ep_ctx[5]; + int i; + TRBCCode res; + + trace_usb_xhci_slot_configure(slotid); + assert(slotid >= 1 && slotid <= xhci->numslots); + + ictx = xhci_mask64(pictx); + octx = xhci->slots[slotid-1].ctx; + + DPRINTF("xhci: input context at "DMA_ADDR_FMT"\n", ictx); + DPRINTF("xhci: output context at "DMA_ADDR_FMT"\n", octx); + + if (dc) { + for (i = 2; i <= 31; i++) { + if (xhci->slots[slotid-1].eps[i-1]) { + xhci_disable_ep(xhci, slotid, i); + } + } + + xhci_dma_read_u32s(xhci, octx, slot_ctx, sizeof(slot_ctx)); + slot_ctx[3] &= ~(SLOT_STATE_MASK << SLOT_STATE_SHIFT); + slot_ctx[3] |= SLOT_ADDRESSED << SLOT_STATE_SHIFT; + DPRINTF("xhci: output slot context: %08x %08x %08x %08x\n", + slot_ctx[0], slot_ctx[1], slot_ctx[2], slot_ctx[3]); + xhci_dma_write_u32s(xhci, octx, slot_ctx, sizeof(slot_ctx)); + + return CC_SUCCESS; + } + + xhci_dma_read_u32s(xhci, ictx, ictl_ctx, sizeof(ictl_ctx)); + + if ((ictl_ctx[0] & 0x3) != 0x0 || (ictl_ctx[1] & 0x3) != 0x1) { + DPRINTF("xhci: invalid input context control %08x %08x\n", + ictl_ctx[0], ictl_ctx[1]); + return CC_TRB_ERROR; + } + + xhci_dma_read_u32s(xhci, ictx+32, islot_ctx, sizeof(islot_ctx)); + xhci_dma_read_u32s(xhci, octx, slot_ctx, sizeof(slot_ctx)); + + if (SLOT_STATE(slot_ctx[3]) < SLOT_ADDRESSED) { + DPRINTF("xhci: invalid slot state %08x\n", slot_ctx[3]); + return CC_CONTEXT_STATE_ERROR; + } + + xhci_free_device_streams(xhci, slotid, ictl_ctx[0] | ictl_ctx[1]); + + for (i = 2; i <= 31; i++) { + if (ictl_ctx[0] & (1<= 1 && slotid <= xhci->numslots); + + ictx = xhci_mask64(pictx); + octx = xhci->slots[slotid-1].ctx; + + DPRINTF("xhci: input context at "DMA_ADDR_FMT"\n", ictx); + DPRINTF("xhci: output context at "DMA_ADDR_FMT"\n", octx); + + xhci_dma_read_u32s(xhci, ictx, ictl_ctx, sizeof(ictl_ctx)); + + if (ictl_ctx[0] != 0x0 || ictl_ctx[1] & ~0x3) { + DPRINTF("xhci: invalid input context control %08x %08x\n", + ictl_ctx[0], ictl_ctx[1]); + return CC_TRB_ERROR; + } + + if (ictl_ctx[1] & 0x1) { + xhci_dma_read_u32s(xhci, ictx+32, islot_ctx, sizeof(islot_ctx)); + + DPRINTF("xhci: input slot context: %08x %08x %08x %08x\n", + islot_ctx[0], islot_ctx[1], islot_ctx[2], islot_ctx[3]); + + xhci_dma_read_u32s(xhci, octx, slot_ctx, sizeof(slot_ctx)); + + slot_ctx[1] &= ~0xFFFF; /* max exit latency */ + slot_ctx[1] |= islot_ctx[1] & 0xFFFF; + /* update interrupter target field */ + xhci->slots[slotid-1].intr = get_field(islot_ctx[2], TRB_INTR); + set_field(&slot_ctx[2], xhci->slots[slotid-1].intr, TRB_INTR); + + DPRINTF("xhci: output slot context: %08x %08x %08x %08x\n", + slot_ctx[0], slot_ctx[1], slot_ctx[2], slot_ctx[3]); + + xhci_dma_write_u32s(xhci, octx, slot_ctx, sizeof(slot_ctx)); + } + + if (ictl_ctx[1] & 0x2) { + xhci_dma_read_u32s(xhci, ictx+64, iep0_ctx, sizeof(iep0_ctx)); + + DPRINTF("xhci: input ep0 context: %08x %08x %08x %08x %08x\n", + iep0_ctx[0], iep0_ctx[1], iep0_ctx[2], + iep0_ctx[3], iep0_ctx[4]); + + xhci_dma_read_u32s(xhci, octx+32, ep0_ctx, sizeof(ep0_ctx)); + + ep0_ctx[1] &= ~0xFFFF0000; /* max packet size*/ + ep0_ctx[1] |= iep0_ctx[1] & 0xFFFF0000; + + DPRINTF("xhci: output ep0 context: %08x %08x %08x %08x %08x\n", + ep0_ctx[0], ep0_ctx[1], ep0_ctx[2], ep0_ctx[3], ep0_ctx[4]); + + xhci_dma_write_u32s(xhci, octx+32, ep0_ctx, sizeof(ep0_ctx)); + } + + return CC_SUCCESS; +} + +static TRBCCode xhci_reset_slot(XHCIState *xhci, unsigned int slotid) +{ + uint32_t slot_ctx[4]; + dma_addr_t octx; + int i; + + trace_usb_xhci_slot_reset(slotid); + assert(slotid >= 1 && slotid <= xhci->numslots); + + octx = xhci->slots[slotid-1].ctx; + + DPRINTF("xhci: output context at "DMA_ADDR_FMT"\n", octx); + + for (i = 2; i <= 31; i++) { + if (xhci->slots[slotid-1].eps[i-1]) { + xhci_disable_ep(xhci, slotid, i); + } + } + + xhci_dma_read_u32s(xhci, octx, slot_ctx, sizeof(slot_ctx)); + slot_ctx[3] &= ~(SLOT_STATE_MASK << SLOT_STATE_SHIFT); + slot_ctx[3] |= SLOT_DEFAULT << SLOT_STATE_SHIFT; + DPRINTF("xhci: output slot context: %08x %08x %08x %08x\n", + slot_ctx[0], slot_ctx[1], slot_ctx[2], slot_ctx[3]); + xhci_dma_write_u32s(xhci, octx, slot_ctx, sizeof(slot_ctx)); + + return CC_SUCCESS; +} + +static unsigned int xhci_get_slot(XHCIState *xhci, XHCIEvent *event, XHCITRB *trb) +{ + unsigned int slotid; + slotid = (trb->control >> TRB_CR_SLOTID_SHIFT) & TRB_CR_SLOTID_MASK; + if (slotid < 1 || slotid > xhci->numslots) { + DPRINTF("xhci: bad slot id %d\n", slotid); + event->ccode = CC_TRB_ERROR; + return 0; + } else if (!xhci->slots[slotid-1].enabled) { + DPRINTF("xhci: slot id %d not enabled\n", slotid); + event->ccode = CC_SLOT_NOT_ENABLED_ERROR; + return 0; + } + return slotid; +} + +/* cleanup slot state on usb device detach */ +static void xhci_detach_slot(XHCIState *xhci, USBPort *uport) +{ + int slot, ep; + + for (slot = 0; slot < xhci->numslots; slot++) { + if (xhci->slots[slot].uport == uport) { + break; + } + } + if (slot == xhci->numslots) { + return; + } + + for (ep = 0; ep < 31; ep++) { + if (xhci->slots[slot].eps[ep]) { + xhci_ep_nuke_xfers(xhci, slot + 1, ep + 1, 0); + } + } + xhci->slots[slot].uport = NULL; +} + +static TRBCCode xhci_get_port_bandwidth(XHCIState *xhci, uint64_t pctx) +{ + dma_addr_t ctx; + uint8_t bw_ctx[xhci->numports+1]; + + DPRINTF("xhci_get_port_bandwidth()\n"); + + ctx = xhci_mask64(pctx); + + DPRINTF("xhci: bandwidth context at "DMA_ADDR_FMT"\n", ctx); + + /* TODO: actually implement real values here */ + bw_ctx[0] = 0; + memset(&bw_ctx[1], 80, xhci->numports); /* 80% */ + dma_memory_write(xhci->as, ctx, bw_ctx, sizeof(bw_ctx)); + + return CC_SUCCESS; +} + +static uint32_t rotl(uint32_t v, unsigned count) +{ + count &= 31; + return (v << count) | (v >> (32 - count)); +} + + +static uint32_t xhci_nec_challenge(uint32_t hi, uint32_t lo) +{ + uint32_t val; + val = rotl(lo - 0x49434878, 32 - ((hi>>8) & 0x1F)); + val += rotl(lo + 0x49434878, hi & 0x1F); + val -= rotl(hi ^ 0x49434878, (lo >> 16) & 0x1F); + return ~val; +} + +static void xhci_process_commands(XHCIState *xhci) +{ + XHCITRB trb; + TRBType type; + XHCIEvent event = {ER_COMMAND_COMPLETE, CC_SUCCESS}; + dma_addr_t addr; + unsigned int i, slotid = 0, count = 0; + + DPRINTF("xhci_process_commands()\n"); + if (!xhci_running(xhci)) { + DPRINTF("xhci_process_commands() called while xHC stopped or paused\n"); + return; + } + + xhci->crcr_low |= CRCR_CRR; + + while ((type = xhci_ring_fetch(xhci, &xhci->cmd_ring, &trb, &addr))) { + event.ptr = addr; + switch (type) { + case CR_ENABLE_SLOT: + for (i = 0; i < xhci->numslots; i++) { + if (!xhci->slots[i].enabled) { + break; + } + } + if (i >= xhci->numslots) { + DPRINTF("xhci: no device slots available\n"); + event.ccode = CC_NO_SLOTS_ERROR; + } else { + slotid = i+1; + event.ccode = xhci_enable_slot(xhci, slotid); + } + break; + case CR_DISABLE_SLOT: + slotid = xhci_get_slot(xhci, &event, &trb); + if (slotid) { + event.ccode = xhci_disable_slot(xhci, slotid); + } + break; + case CR_ADDRESS_DEVICE: + slotid = xhci_get_slot(xhci, &event, &trb); + if (slotid) { + event.ccode = xhci_address_slot(xhci, slotid, trb.parameter, + trb.control & TRB_CR_BSR); + } + break; + case CR_CONFIGURE_ENDPOINT: + slotid = xhci_get_slot(xhci, &event, &trb); + if (slotid) { + event.ccode = xhci_configure_slot(xhci, slotid, trb.parameter, + trb.control & TRB_CR_DC); + } + break; + case CR_EVALUATE_CONTEXT: + slotid = xhci_get_slot(xhci, &event, &trb); + if (slotid) { + event.ccode = xhci_evaluate_slot(xhci, slotid, trb.parameter); + } + break; + case CR_STOP_ENDPOINT: + slotid = xhci_get_slot(xhci, &event, &trb); + if (slotid) { + unsigned int epid = (trb.control >> TRB_CR_EPID_SHIFT) + & TRB_CR_EPID_MASK; + event.ccode = xhci_stop_ep(xhci, slotid, epid); + } + break; + case CR_RESET_ENDPOINT: + slotid = xhci_get_slot(xhci, &event, &trb); + if (slotid) { + unsigned int epid = (trb.control >> TRB_CR_EPID_SHIFT) + & TRB_CR_EPID_MASK; + event.ccode = xhci_reset_ep(xhci, slotid, epid); + } + break; + case CR_SET_TR_DEQUEUE: + slotid = xhci_get_slot(xhci, &event, &trb); + if (slotid) { + unsigned int epid = (trb.control >> TRB_CR_EPID_SHIFT) + & TRB_CR_EPID_MASK; + unsigned int streamid = (trb.status >> 16) & 0xffff; + event.ccode = xhci_set_ep_dequeue(xhci, slotid, + epid, streamid, + trb.parameter); + } + break; + case CR_RESET_DEVICE: + slotid = xhci_get_slot(xhci, &event, &trb); + if (slotid) { + event.ccode = xhci_reset_slot(xhci, slotid); + } + break; + case CR_GET_PORT_BANDWIDTH: + event.ccode = xhci_get_port_bandwidth(xhci, trb.parameter); + break; + case CR_NOOP: + event.ccode = CC_SUCCESS; + break; + case CR_VENDOR_NEC_FIRMWARE_REVISION: + if (xhci->nec_quirks) { + event.type = 48; /* NEC reply */ + event.length = 0x3025; + } else { + event.ccode = CC_TRB_ERROR; + } + break; + case CR_VENDOR_NEC_CHALLENGE_RESPONSE: + if (xhci->nec_quirks) { + uint32_t chi = trb.parameter >> 32; + uint32_t clo = trb.parameter; + uint32_t val = xhci_nec_challenge(chi, clo); + event.length = val & 0xFFFF; + event.epid = val >> 16; + slotid = val >> 24; + event.type = 48; /* NEC reply */ + } else { + event.ccode = CC_TRB_ERROR; + } + break; + default: + trace_usb_xhci_unimplemented("command", type); + event.ccode = CC_TRB_ERROR; + break; + } + event.slotid = slotid; + xhci_event(xhci, &event, 0); + + if (count++ > COMMAND_LIMIT) { + trace_usb_xhci_enforced_limit("commands"); + return; + } + } +} + +static bool xhci_port_have_device(XHCIPort *port) +{ + if (!port->uport->dev || !port->uport->dev->attached) { + return false; /* no device present */ + } + if (!((1 << port->uport->dev->speed) & port->speedmask)) { + return false; /* speed mismatch */ + } + return true; +} + +static void xhci_port_notify(XHCIPort *port, uint32_t bits) +{ + XHCIEvent ev = { ER_PORT_STATUS_CHANGE, CC_SUCCESS, + port->portnr << 24 }; + + if ((port->portsc & bits) == bits) { + return; + } + trace_usb_xhci_port_notify(port->portnr, bits); + port->portsc |= bits; + if (!xhci_running(port->xhci)) { + return; + } + xhci_event(port->xhci, &ev, 0); +} + +static void xhci_port_update(XHCIPort *port, int is_detach) +{ + uint32_t pls = PLS_RX_DETECT; + + assert(port); + port->portsc = PORTSC_PP; + if (!is_detach && xhci_port_have_device(port)) { + port->portsc |= PORTSC_CCS; + switch (port->uport->dev->speed) { + case USB_SPEED_LOW: + port->portsc |= PORTSC_SPEED_LOW; + pls = PLS_POLLING; + break; + case USB_SPEED_FULL: + port->portsc |= PORTSC_SPEED_FULL; + pls = PLS_POLLING; + break; + case USB_SPEED_HIGH: + port->portsc |= PORTSC_SPEED_HIGH; + pls = PLS_POLLING; + break; + case USB_SPEED_SUPER: + port->portsc |= PORTSC_SPEED_SUPER; + port->portsc |= PORTSC_PED; + pls = PLS_U0; + break; + } + } + set_field(&port->portsc, pls, PORTSC_PLS); + trace_usb_xhci_port_link(port->portnr, pls); + xhci_port_notify(port, PORTSC_CSC); +} + +static void xhci_port_reset(XHCIPort *port, bool warm_reset) +{ + trace_usb_xhci_port_reset(port->portnr, warm_reset); + + if (!xhci_port_have_device(port)) { + return; + } + + usb_device_reset(port->uport->dev); + + switch (port->uport->dev->speed) { + case USB_SPEED_SUPER: + if (warm_reset) { + port->portsc |= PORTSC_WRC; + } + /* fall through */ + case USB_SPEED_LOW: + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + set_field(&port->portsc, PLS_U0, PORTSC_PLS); + trace_usb_xhci_port_link(port->portnr, PLS_U0); + port->portsc |= PORTSC_PED; + break; + } + + port->portsc &= ~PORTSC_PR; + xhci_port_notify(port, PORTSC_PRC); +} + +static void xhci_reset(DeviceState *dev) +{ + XHCIState *xhci = XHCI(dev); + int i; + + trace_usb_xhci_reset(); + if (!(xhci->usbsts & USBSTS_HCH)) { + DPRINTF("xhci: reset while running!\n"); + } + + xhci->usbcmd = 0; + xhci->usbsts = USBSTS_HCH; + xhci->dnctrl = 0; + xhci->crcr_low = 0; + xhci->crcr_high = 0; + xhci->dcbaap_low = 0; + xhci->dcbaap_high = 0; + xhci->config = 0; + + for (i = 0; i < xhci->numslots; i++) { + xhci_disable_slot(xhci, i+1); + } + + for (i = 0; i < xhci->numports; i++) { + xhci_port_update(xhci->ports + i, 0); + } + + for (i = 0; i < xhci->numintrs; i++) { + xhci->intr[i].iman = 0; + xhci->intr[i].imod = 0; + xhci->intr[i].erstsz = 0; + xhci->intr[i].erstba_low = 0; + xhci->intr[i].erstba_high = 0; + xhci->intr[i].erdp_low = 0; + xhci->intr[i].erdp_high = 0; + + xhci->intr[i].er_ep_idx = 0; + xhci->intr[i].er_pcs = 1; + xhci->intr[i].ev_buffer_put = 0; + xhci->intr[i].ev_buffer_get = 0; + } + + xhci->mfindex_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + xhci_mfwrap_update(xhci); +} + +static uint64_t xhci_cap_read(void *ptr, hwaddr reg, unsigned size) +{ + XHCIState *xhci = ptr; + uint32_t ret; + + switch (reg) { + case 0x00: /* HCIVERSION, CAPLENGTH */ + ret = 0x01000000 | LEN_CAP; + break; + case 0x04: /* HCSPARAMS 1 */ + ret = ((xhci->numports_2+xhci->numports_3)<<24) + | (xhci->numintrs<<8) | xhci->numslots; + break; + case 0x08: /* HCSPARAMS 2 */ + ret = 0x0000000f; + break; + case 0x0c: /* HCSPARAMS 3 */ + ret = 0x00000000; + break; + case 0x10: /* HCCPARAMS */ + if (sizeof(dma_addr_t) == 4) { + ret = 0x00080000 | (xhci->max_pstreams_mask << 12); + } else { + ret = 0x00080001 | (xhci->max_pstreams_mask << 12); + } + break; + case 0x14: /* DBOFF */ + ret = OFF_DOORBELL; + break; + case 0x18: /* RTSOFF */ + ret = OFF_RUNTIME; + break; + + /* extended capabilities */ + case 0x20: /* Supported Protocol:00 */ + ret = 0x02000402; /* USB 2.0 */ + break; + case 0x24: /* Supported Protocol:04 */ + ret = 0x20425355; /* "USB " */ + break; + case 0x28: /* Supported Protocol:08 */ + if (xhci_get_flag(xhci, XHCI_FLAG_SS_FIRST)) { + ret = (xhci->numports_2<<8) | (xhci->numports_3+1); + } else { + ret = (xhci->numports_2<<8) | 1; + } + break; + case 0x2c: /* Supported Protocol:0c */ + ret = 0x00000000; /* reserved */ + break; + case 0x30: /* Supported Protocol:00 */ + ret = 0x03000002; /* USB 3.0 */ + break; + case 0x34: /* Supported Protocol:04 */ + ret = 0x20425355; /* "USB " */ + break; + case 0x38: /* Supported Protocol:08 */ + if (xhci_get_flag(xhci, XHCI_FLAG_SS_FIRST)) { + ret = (xhci->numports_3<<8) | 1; + } else { + ret = (xhci->numports_3<<8) | (xhci->numports_2+1); + } + break; + case 0x3c: /* Supported Protocol:0c */ + ret = 0x00000000; /* reserved */ + break; + default: + trace_usb_xhci_unimplemented("cap read", reg); + ret = 0; + } + + trace_usb_xhci_cap_read(reg, ret); + return ret; +} + +static uint64_t xhci_port_read(void *ptr, hwaddr reg, unsigned size) +{ + XHCIPort *port = ptr; + uint32_t ret; + + switch (reg) { + case 0x00: /* PORTSC */ + ret = port->portsc; + break; + case 0x04: /* PORTPMSC */ + case 0x08: /* PORTLI */ + ret = 0; + break; + case 0x0c: /* reserved */ + default: + trace_usb_xhci_unimplemented("port read", reg); + ret = 0; + } + + trace_usb_xhci_port_read(port->portnr, reg, ret); + return ret; +} + +static void xhci_port_write(void *ptr, hwaddr reg, + uint64_t val, unsigned size) +{ + XHCIPort *port = ptr; + uint32_t portsc, notify; + + trace_usb_xhci_port_write(port->portnr, reg, val); + + switch (reg) { + case 0x00: /* PORTSC */ + /* write-1-to-start bits */ + if (val & PORTSC_WPR) { + xhci_port_reset(port, true); + break; + } + if (val & PORTSC_PR) { + xhci_port_reset(port, false); + break; + } + + portsc = port->portsc; + notify = 0; + /* write-1-to-clear bits*/ + portsc &= ~(val & (PORTSC_CSC|PORTSC_PEC|PORTSC_WRC|PORTSC_OCC| + PORTSC_PRC|PORTSC_PLC|PORTSC_CEC)); + if (val & PORTSC_LWS) { + /* overwrite PLS only when LWS=1 */ + uint32_t old_pls = get_field(port->portsc, PORTSC_PLS); + uint32_t new_pls = get_field(val, PORTSC_PLS); + switch (new_pls) { + case PLS_U0: + if (old_pls != PLS_U0) { + set_field(&portsc, new_pls, PORTSC_PLS); + trace_usb_xhci_port_link(port->portnr, new_pls); + notify = PORTSC_PLC; + } + break; + case PLS_U3: + if (old_pls < PLS_U3) { + set_field(&portsc, new_pls, PORTSC_PLS); + trace_usb_xhci_port_link(port->portnr, new_pls); + } + break; + case PLS_RESUME: + /* windows does this for some reason, don't spam stderr */ + break; + default: + DPRINTF("%s: ignore pls write (old %d, new %d)\n", + __func__, old_pls, new_pls); + break; + } + } + /* read/write bits */ + portsc &= ~(PORTSC_PP|PORTSC_WCE|PORTSC_WDE|PORTSC_WOE); + portsc |= (val & (PORTSC_PP|PORTSC_WCE|PORTSC_WDE|PORTSC_WOE)); + port->portsc = portsc; + if (notify) { + xhci_port_notify(port, notify); + } + break; + case 0x04: /* PORTPMSC */ + case 0x08: /* PORTLI */ + default: + trace_usb_xhci_unimplemented("port write", reg); + } +} + +static uint64_t xhci_oper_read(void *ptr, hwaddr reg, unsigned size) +{ + XHCIState *xhci = ptr; + uint32_t ret; + + switch (reg) { + case 0x00: /* USBCMD */ + ret = xhci->usbcmd; + break; + case 0x04: /* USBSTS */ + ret = xhci->usbsts; + break; + case 0x08: /* PAGESIZE */ + ret = 1; /* 4KiB */ + break; + case 0x14: /* DNCTRL */ + ret = xhci->dnctrl; + break; + case 0x18: /* CRCR low */ + ret = xhci->crcr_low & ~0xe; + break; + case 0x1c: /* CRCR high */ + ret = xhci->crcr_high; + break; + case 0x30: /* DCBAAP low */ + ret = xhci->dcbaap_low; + break; + case 0x34: /* DCBAAP high */ + ret = xhci->dcbaap_high; + break; + case 0x38: /* CONFIG */ + ret = xhci->config; + break; + default: + trace_usb_xhci_unimplemented("oper read", reg); + ret = 0; + } + + trace_usb_xhci_oper_read(reg, ret); + return ret; +} + +static void xhci_oper_write(void *ptr, hwaddr reg, + uint64_t val, unsigned size) +{ + XHCIState *xhci = XHCI(ptr); + + trace_usb_xhci_oper_write(reg, val); + + switch (reg) { + case 0x00: /* USBCMD */ + if ((val & USBCMD_RS) && !(xhci->usbcmd & USBCMD_RS)) { + xhci_run(xhci); + } else if (!(val & USBCMD_RS) && (xhci->usbcmd & USBCMD_RS)) { + xhci_stop(xhci); + } + if (val & USBCMD_CSS) { + /* save state */ + xhci->usbsts &= ~USBSTS_SRE; + } + if (val & USBCMD_CRS) { + /* restore state */ + xhci->usbsts |= USBSTS_SRE; + } + xhci->usbcmd = val & 0xc0f; + xhci_mfwrap_update(xhci); + if (val & USBCMD_HCRST) { + xhci_reset(DEVICE(xhci)); + } + xhci_intr_update(xhci, 0); + break; + + case 0x04: /* USBSTS */ + /* these bits are write-1-to-clear */ + xhci->usbsts &= ~(val & (USBSTS_HSE|USBSTS_EINT|USBSTS_PCD|USBSTS_SRE)); + xhci_intr_update(xhci, 0); + break; + + case 0x14: /* DNCTRL */ + xhci->dnctrl = val & 0xffff; + break; + case 0x18: /* CRCR low */ + xhci->crcr_low = (val & 0xffffffcf) | (xhci->crcr_low & CRCR_CRR); + break; + case 0x1c: /* CRCR high */ + xhci->crcr_high = val; + if (xhci->crcr_low & (CRCR_CA|CRCR_CS) && (xhci->crcr_low & CRCR_CRR)) { + XHCIEvent event = {ER_COMMAND_COMPLETE, CC_COMMAND_RING_STOPPED}; + xhci->crcr_low &= ~CRCR_CRR; + xhci_event(xhci, &event, 0); + DPRINTF("xhci: command ring stopped (CRCR=%08x)\n", xhci->crcr_low); + } else { + dma_addr_t base = xhci_addr64(xhci->crcr_low & ~0x3f, val); + xhci_ring_init(xhci, &xhci->cmd_ring, base); + } + xhci->crcr_low &= ~(CRCR_CA | CRCR_CS); + break; + case 0x30: /* DCBAAP low */ + xhci->dcbaap_low = val & 0xffffffc0; + break; + case 0x34: /* DCBAAP high */ + xhci->dcbaap_high = val; + break; + case 0x38: /* CONFIG */ + xhci->config = val & 0xff; + break; + default: + trace_usb_xhci_unimplemented("oper write", reg); + } +} + +static uint64_t xhci_runtime_read(void *ptr, hwaddr reg, + unsigned size) +{ + XHCIState *xhci = ptr; + uint32_t ret = 0; + + if (reg < 0x20) { + switch (reg) { + case 0x00: /* MFINDEX */ + ret = xhci_mfindex_get(xhci) & 0x3fff; + break; + default: + trace_usb_xhci_unimplemented("runtime read", reg); + break; + } + } else { + int v = (reg - 0x20) / 0x20; + XHCIInterrupter *intr = &xhci->intr[v]; + switch (reg & 0x1f) { + case 0x00: /* IMAN */ + ret = intr->iman; + break; + case 0x04: /* IMOD */ + ret = intr->imod; + break; + case 0x08: /* ERSTSZ */ + ret = intr->erstsz; + break; + case 0x10: /* ERSTBA low */ + ret = intr->erstba_low; + break; + case 0x14: /* ERSTBA high */ + ret = intr->erstba_high; + break; + case 0x18: /* ERDP low */ + ret = intr->erdp_low; + break; + case 0x1c: /* ERDP high */ + ret = intr->erdp_high; + break; + } + } + + trace_usb_xhci_runtime_read(reg, ret); + return ret; +} + +static void xhci_runtime_write(void *ptr, hwaddr reg, + uint64_t val, unsigned size) +{ + XHCIState *xhci = ptr; + XHCIInterrupter *intr; + int v; + + trace_usb_xhci_runtime_write(reg, val); + + if (reg < 0x20) { + trace_usb_xhci_unimplemented("runtime write", reg); + return; + } + v = (reg - 0x20) / 0x20; + intr = &xhci->intr[v]; + + switch (reg & 0x1f) { + case 0x00: /* IMAN */ + if (val & IMAN_IP) { + intr->iman &= ~IMAN_IP; + } + intr->iman &= ~IMAN_IE; + intr->iman |= val & IMAN_IE; + xhci_intr_update(xhci, v); + break; + case 0x04: /* IMOD */ + intr->imod = val; + break; + case 0x08: /* ERSTSZ */ + intr->erstsz = val & 0xffff; + break; + case 0x10: /* ERSTBA low */ + if (xhci->nec_quirks) { + /* NEC driver bug: it doesn't align this to 64 bytes */ + intr->erstba_low = val & 0xfffffff0; + } else { + intr->erstba_low = val & 0xffffffc0; + } + break; + case 0x14: /* ERSTBA high */ + intr->erstba_high = val; + xhci_er_reset(xhci, v); + break; + case 0x18: /* ERDP low */ + if (val & ERDP_EHB) { + intr->erdp_low &= ~ERDP_EHB; + } + intr->erdp_low = (val & ~ERDP_EHB) | (intr->erdp_low & ERDP_EHB); + if (val & ERDP_EHB) { + dma_addr_t erdp = xhci_addr64(intr->erdp_low, intr->erdp_high); + unsigned int dp_idx = (erdp - intr->er_start) / TRB_SIZE; + if (erdp >= intr->er_start && + erdp < (intr->er_start + TRB_SIZE * intr->er_size) && + dp_idx != intr->er_ep_idx) { + xhci_intr_raise(xhci, v); + } + } + break; + case 0x1c: /* ERDP high */ + intr->erdp_high = val; + break; + default: + trace_usb_xhci_unimplemented("oper write", reg); + } +} + +static uint64_t xhci_doorbell_read(void *ptr, hwaddr reg, + unsigned size) +{ + /* doorbells always read as 0 */ + trace_usb_xhci_doorbell_read(reg, 0); + return 0; +} + +static void xhci_doorbell_write(void *ptr, hwaddr reg, + uint64_t val, unsigned size) +{ + XHCIState *xhci = ptr; + unsigned int epid, streamid; + + trace_usb_xhci_doorbell_write(reg, val); + + if (!xhci_running(xhci)) { + DPRINTF("xhci: wrote doorbell while xHC stopped or paused\n"); + return; + } + + reg >>= 2; + + if (reg == 0) { + if (val == 0) { + xhci_process_commands(xhci); + } else { + DPRINTF("xhci: bad doorbell 0 write: 0x%x\n", + (uint32_t)val); + } + } else { + epid = val & 0xff; + streamid = (val >> 16) & 0xffff; + if (reg > xhci->numslots) { + DPRINTF("xhci: bad doorbell %d\n", (int)reg); + } else if (epid == 0 || epid > 31) { + DPRINTF("xhci: bad doorbell %d write: 0x%x\n", + (int)reg, (uint32_t)val); + } else { + xhci_kick_ep(xhci, reg, epid, streamid); + } + } +} + +static void xhci_cap_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + /* nothing */ +} + +static const MemoryRegionOps xhci_cap_ops = { + .read = xhci_cap_read, + .write = xhci_cap_write, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps xhci_oper_ops = { + .read = xhci_oper_read, + .write = xhci_oper_write, + .valid.min_access_size = 4, + .valid.max_access_size = sizeof(dma_addr_t), + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps xhci_port_ops = { + .read = xhci_port_read, + .write = xhci_port_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps xhci_runtime_ops = { + .read = xhci_runtime_read, + .write = xhci_runtime_write, + .valid.min_access_size = 4, + .valid.max_access_size = sizeof(dma_addr_t), + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const MemoryRegionOps xhci_doorbell_ops = { + .read = xhci_doorbell_read, + .write = xhci_doorbell_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void xhci_attach(USBPort *usbport) +{ + XHCIState *xhci = usbport->opaque; + XHCIPort *port = xhci_lookup_port(xhci, usbport); + + xhci_port_update(port, 0); +} + +static void xhci_detach(USBPort *usbport) +{ + XHCIState *xhci = usbport->opaque; + XHCIPort *port = xhci_lookup_port(xhci, usbport); + + xhci_detach_slot(xhci, usbport); + xhci_port_update(port, 1); +} + +static void xhci_wakeup(USBPort *usbport) +{ + XHCIState *xhci = usbport->opaque; + XHCIPort *port = xhci_lookup_port(xhci, usbport); + + assert(port); + if (get_field(port->portsc, PORTSC_PLS) != PLS_U3) { + return; + } + set_field(&port->portsc, PLS_RESUME, PORTSC_PLS); + xhci_port_notify(port, PORTSC_PLC); +} + +static void xhci_complete(USBPort *port, USBPacket *packet) +{ + XHCITransfer *xfer = container_of(packet, XHCITransfer, packet); + + if (packet->status == USB_RET_REMOVE_FROM_QUEUE) { + xhci_ep_nuke_one_xfer(xfer, 0); + return; + } + xhci_try_complete_packet(xfer); + xhci_kick_epctx(xfer->epctx, xfer->streamid); + if (xfer->complete) { + xhci_ep_free_xfer(xfer); + } +} + +static void xhci_child_detach(USBPort *uport, USBDevice *child) +{ + USBBus *bus = usb_bus_from_device(child); + XHCIState *xhci = container_of(bus, XHCIState, bus); + + xhci_detach_slot(xhci, child->port); +} + +static USBPortOps xhci_uport_ops = { + .attach = xhci_attach, + .detach = xhci_detach, + .wakeup = xhci_wakeup, + .complete = xhci_complete, + .child_detach = xhci_child_detach, +}; + +static int xhci_find_epid(USBEndpoint *ep) +{ + if (ep->nr == 0) { + return 1; + } + if (ep->pid == USB_TOKEN_IN) { + return ep->nr * 2 + 1; + } else { + return ep->nr * 2; + } +} + +static USBEndpoint *xhci_epid_to_usbep(XHCIEPContext *epctx) +{ + USBPort *uport; + uint32_t token; + + if (!epctx) { + return NULL; + } + uport = epctx->xhci->slots[epctx->slotid - 1].uport; + if (!uport || !uport->dev) { + return NULL; + } + token = (epctx->epid & 1) ? USB_TOKEN_IN : USB_TOKEN_OUT; + return usb_ep_get(uport->dev, token, epctx->epid >> 1); +} + +static void xhci_wakeup_endpoint(USBBus *bus, USBEndpoint *ep, + unsigned int stream) +{ + XHCIState *xhci = container_of(bus, XHCIState, bus); + int slotid; + + DPRINTF("%s\n", __func__); + slotid = ep->dev->addr; + if (slotid == 0 || !xhci->slots[slotid-1].enabled) { + DPRINTF("%s: oops, no slot for dev %d\n", __func__, ep->dev->addr); + return; + } + xhci_kick_ep(xhci, slotid, xhci_find_epid(ep), stream); +} + +static USBBusOps xhci_bus_ops = { + .wakeup_endpoint = xhci_wakeup_endpoint, +}; + +static void usb_xhci_init(XHCIState *xhci) +{ + XHCIPort *port; + unsigned int i, usbports, speedmask; + + xhci->usbsts = USBSTS_HCH; + + if (xhci->numports_2 > XHCI_MAXPORTS_2) { + xhci->numports_2 = XHCI_MAXPORTS_2; + } + if (xhci->numports_3 > XHCI_MAXPORTS_3) { + xhci->numports_3 = XHCI_MAXPORTS_3; + } + usbports = MAX(xhci->numports_2, xhci->numports_3); + xhci->numports = xhci->numports_2 + xhci->numports_3; + + usb_bus_new(&xhci->bus, sizeof(xhci->bus), &xhci_bus_ops, xhci->hostOpaque); + + for (i = 0; i < usbports; i++) { + speedmask = 0; + if (i < xhci->numports_2) { + if (xhci_get_flag(xhci, XHCI_FLAG_SS_FIRST)) { + port = &xhci->ports[i + xhci->numports_3]; + port->portnr = i + 1 + xhci->numports_3; + } else { + port = &xhci->ports[i]; + port->portnr = i + 1; + } + port->uport = &xhci->uports[i]; + port->speedmask = + USB_SPEED_MASK_LOW | + USB_SPEED_MASK_FULL | + USB_SPEED_MASK_HIGH; + assert(i < XHCI_MAXPORTS); + snprintf(port->name, sizeof(port->name), "usb2 port #%d", i+1); + speedmask |= port->speedmask; + } + if (i < xhci->numports_3) { + if (xhci_get_flag(xhci, XHCI_FLAG_SS_FIRST)) { + port = &xhci->ports[i]; + port->portnr = i + 1; + } else { + port = &xhci->ports[i + xhci->numports_2]; + port->portnr = i + 1 + xhci->numports_2; + } + port->uport = &xhci->uports[i]; + port->speedmask = USB_SPEED_MASK_SUPER; + assert(i < XHCI_MAXPORTS); + snprintf(port->name, sizeof(port->name), "usb3 port #%d", i+1); + speedmask |= port->speedmask; + } + usb_register_port(&xhci->bus, &xhci->uports[i], xhci, i, + &xhci_uport_ops, speedmask); + } +} + +static void usb_xhci_realize(DeviceState *dev, Error **errp) +{ + int i; + + XHCIState *xhci = XHCI(dev); + + if (xhci->numintrs > XHCI_MAXINTRS) { + xhci->numintrs = XHCI_MAXINTRS; + } + while (xhci->numintrs & (xhci->numintrs - 1)) { /* ! power of 2 */ + xhci->numintrs++; + } + if (xhci->numintrs < 1) { + xhci->numintrs = 1; + } + if (xhci->numslots > XHCI_MAXSLOTS) { + xhci->numslots = XHCI_MAXSLOTS; + } + if (xhci->numslots < 1) { + xhci->numslots = 1; + } + if (xhci_get_flag(xhci, XHCI_FLAG_ENABLE_STREAMS)) { + xhci->max_pstreams_mask = 7; /* == 256 primary streams */ + } else { + xhci->max_pstreams_mask = 0; + } + + usb_xhci_init(xhci); + xhci->mfwrap_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, xhci_mfwrap_timer, xhci); + + memory_region_init(&xhci->mem, OBJECT(dev), "xhci", XHCI_LEN_REGS); + memory_region_init_io(&xhci->mem_cap, OBJECT(dev), &xhci_cap_ops, xhci, + "capabilities", LEN_CAP); + memory_region_init_io(&xhci->mem_oper, OBJECT(dev), &xhci_oper_ops, xhci, + "operational", 0x400); + memory_region_init_io(&xhci->mem_runtime, OBJECT(dev), &xhci_runtime_ops, + xhci, "runtime", LEN_RUNTIME); + memory_region_init_io(&xhci->mem_doorbell, OBJECT(dev), &xhci_doorbell_ops, + xhci, "doorbell", LEN_DOORBELL); + + memory_region_add_subregion(&xhci->mem, 0, &xhci->mem_cap); + memory_region_add_subregion(&xhci->mem, OFF_OPER, &xhci->mem_oper); + memory_region_add_subregion(&xhci->mem, OFF_RUNTIME, &xhci->mem_runtime); + memory_region_add_subregion(&xhci->mem, OFF_DOORBELL, &xhci->mem_doorbell); + + for (i = 0; i < xhci->numports; i++) { + XHCIPort *port = &xhci->ports[i]; + uint32_t offset = OFF_OPER + 0x400 + 0x10 * i; + port->xhci = xhci; + memory_region_init_io(&port->mem, OBJECT(dev), &xhci_port_ops, port, + port->name, 0x10); + memory_region_add_subregion(&xhci->mem, offset, &port->mem); + } +} + +static void usb_xhci_unrealize(DeviceState *dev) +{ + int i; + XHCIState *xhci = XHCI(dev); + + trace_usb_xhci_exit(); + + for (i = 0; i < xhci->numslots; i++) { + xhci_disable_slot(xhci, i + 1); + } + + if (xhci->mfwrap_timer) { + timer_free(xhci->mfwrap_timer); + xhci->mfwrap_timer = NULL; + } + + memory_region_del_subregion(&xhci->mem, &xhci->mem_cap); + memory_region_del_subregion(&xhci->mem, &xhci->mem_oper); + memory_region_del_subregion(&xhci->mem, &xhci->mem_runtime); + memory_region_del_subregion(&xhci->mem, &xhci->mem_doorbell); + + for (i = 0; i < xhci->numports; i++) { + XHCIPort *port = &xhci->ports[i]; + memory_region_del_subregion(&xhci->mem, &port->mem); + } + + usb_bus_release(&xhci->bus); +} + +static int usb_xhci_post_load(void *opaque, int version_id) +{ + XHCIState *xhci = opaque; + XHCISlot *slot; + XHCIEPContext *epctx; + dma_addr_t dcbaap, pctx; + uint32_t slot_ctx[4]; + uint32_t ep_ctx[5]; + int slotid, epid, state; + + dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high); + + for (slotid = 1; slotid <= xhci->numslots; slotid++) { + slot = &xhci->slots[slotid-1]; + if (!slot->addressed) { + continue; + } + slot->ctx = + xhci_mask64(ldq_le_dma(xhci->as, dcbaap + 8 * slotid)); + xhci_dma_read_u32s(xhci, slot->ctx, slot_ctx, sizeof(slot_ctx)); + slot->uport = xhci_lookup_uport(xhci, slot_ctx); + if (!slot->uport) { + /* should not happen, but may trigger on guest bugs */ + slot->enabled = 0; + slot->addressed = 0; + continue; + } + assert(slot->uport && slot->uport->dev); + + for (epid = 1; epid <= 31; epid++) { + pctx = slot->ctx + 32 * epid; + xhci_dma_read_u32s(xhci, pctx, ep_ctx, sizeof(ep_ctx)); + state = ep_ctx[0] & EP_STATE_MASK; + if (state == EP_DISABLED) { + continue; + } + epctx = xhci_alloc_epctx(xhci, slotid, epid); + slot->eps[epid-1] = epctx; + xhci_init_epctx(epctx, pctx, ep_ctx); + epctx->state = state; + if (state == EP_RUNNING) { + /* kick endpoint after vmload is finished */ + timer_mod(epctx->kick_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); + } + } + } + return 0; +} + +static const VMStateDescription vmstate_xhci_ring = { + .name = "xhci-ring", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(dequeue, XHCIRing), + VMSTATE_BOOL(ccs, XHCIRing), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_xhci_port = { + .name = "xhci-port", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(portsc, XHCIPort), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_xhci_slot = { + .name = "xhci-slot", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(enabled, XHCISlot), + VMSTATE_BOOL(addressed, XHCISlot), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_xhci_event = { + .name = "xhci-event", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(type, XHCIEvent), + VMSTATE_UINT32(ccode, XHCIEvent), + VMSTATE_UINT64(ptr, XHCIEvent), + VMSTATE_UINT32(length, XHCIEvent), + VMSTATE_UINT32(flags, XHCIEvent), + VMSTATE_UINT8(slotid, XHCIEvent), + VMSTATE_UINT8(epid, XHCIEvent), + VMSTATE_END_OF_LIST() + } +}; + +static bool xhci_er_full(void *opaque, int version_id) +{ + return false; +} + +static const VMStateDescription vmstate_xhci_intr = { + .name = "xhci-intr", + .version_id = 1, + .fields = (VMStateField[]) { + /* registers */ + VMSTATE_UINT32(iman, XHCIInterrupter), + VMSTATE_UINT32(imod, XHCIInterrupter), + VMSTATE_UINT32(erstsz, XHCIInterrupter), + VMSTATE_UINT32(erstba_low, XHCIInterrupter), + VMSTATE_UINT32(erstba_high, XHCIInterrupter), + VMSTATE_UINT32(erdp_low, XHCIInterrupter), + VMSTATE_UINT32(erdp_high, XHCIInterrupter), + + /* state */ + VMSTATE_BOOL(msix_used, XHCIInterrupter), + VMSTATE_BOOL(er_pcs, XHCIInterrupter), + VMSTATE_UINT64(er_start, XHCIInterrupter), + VMSTATE_UINT32(er_size, XHCIInterrupter), + VMSTATE_UINT32(er_ep_idx, XHCIInterrupter), + + /* event queue (used if ring is full) */ + VMSTATE_BOOL(er_full_unused, XHCIInterrupter), + VMSTATE_UINT32_TEST(ev_buffer_put, XHCIInterrupter, xhci_er_full), + VMSTATE_UINT32_TEST(ev_buffer_get, XHCIInterrupter, xhci_er_full), + VMSTATE_STRUCT_ARRAY_TEST(ev_buffer, XHCIInterrupter, EV_QUEUE, + xhci_er_full, 1, + vmstate_xhci_event, XHCIEvent), + + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_xhci = { + .name = "xhci-core", + .version_id = 1, + .post_load = usb_xhci_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_UINT32(ports, XHCIState, numports, 1, + vmstate_xhci_port, XHCIPort), + VMSTATE_STRUCT_VARRAY_UINT32(slots, XHCIState, numslots, 1, + vmstate_xhci_slot, XHCISlot), + VMSTATE_STRUCT_VARRAY_UINT32(intr, XHCIState, numintrs, 1, + vmstate_xhci_intr, XHCIInterrupter), + + /* Operational Registers */ + VMSTATE_UINT32(usbcmd, XHCIState), + VMSTATE_UINT32(usbsts, XHCIState), + VMSTATE_UINT32(dnctrl, XHCIState), + VMSTATE_UINT32(crcr_low, XHCIState), + VMSTATE_UINT32(crcr_high, XHCIState), + VMSTATE_UINT32(dcbaap_low, XHCIState), + VMSTATE_UINT32(dcbaap_high, XHCIState), + VMSTATE_UINT32(config, XHCIState), + + /* Runtime Registers & state */ + VMSTATE_INT64(mfindex_start, XHCIState), + VMSTATE_TIMER_PTR(mfwrap_timer, XHCIState), + VMSTATE_STRUCT(cmd_ring, XHCIState, 1, vmstate_xhci_ring, XHCIRing), + + VMSTATE_END_OF_LIST() + } +}; + +static Property xhci_properties[] = { + DEFINE_PROP_BIT("streams", XHCIState, flags, + XHCI_FLAG_ENABLE_STREAMS, true), + DEFINE_PROP_UINT32("p2", XHCIState, numports_2, 4), + DEFINE_PROP_UINT32("p3", XHCIState, numports_3, 4), + DEFINE_PROP_LINK("host", XHCIState, hostOpaque, TYPE_DEVICE, + DeviceState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xhci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = usb_xhci_realize; + dc->unrealize = usb_xhci_unrealize; + dc->reset = xhci_reset; + device_class_set_props(dc, xhci_properties); + dc->user_creatable = false; +} + +static const TypeInfo xhci_info = { + .name = TYPE_XHCI, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XHCIState), + .class_init = xhci_class_init, +}; + +static void xhci_register_types(void) +{ + type_register_static(&xhci_info); +} + +type_init(xhci_register_types) diff --git a/hw/usb/hcd-xhci.h b/hw/usb/hcd-xhci.h new file mode 100644 index 000000000..98f598382 --- /dev/null +++ b/hw/usb/hcd-xhci.h @@ -0,0 +1,228 @@ +/* + * USB xHCI controller emulation + * + * Copyright (c) 2011 Securiforest + * Date: 2011-05-11 ; Author: Hector Martin + * Based on usb-ohci.c, emulates Renesas NEC USB 3.0 + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef HW_USB_HCD_XHCI_H +#define HW_USB_HCD_XHCI_H +#include "qom/object.h" + +#include "hw/usb.h" +#include "hw/usb/xhci.h" +#include "sysemu/dma.h" + +OBJECT_DECLARE_SIMPLE_TYPE(XHCIState, XHCI) + +/* Very pessimistic, let's hope it's enough for all cases */ +#define EV_QUEUE (((3 * 24) + 16) * XHCI_MAXSLOTS) + +typedef struct XHCIStreamContext XHCIStreamContext; +typedef struct XHCIEPContext XHCIEPContext; + +enum xhci_flags { + XHCI_FLAG_SS_FIRST = 1, + XHCI_FLAG_FORCE_PCIE_ENDCAP, + XHCI_FLAG_ENABLE_STREAMS, +}; + +typedef enum TRBType { + TRB_RESERVED = 0, + TR_NORMAL, + TR_SETUP, + TR_DATA, + TR_STATUS, + TR_ISOCH, + TR_LINK, + TR_EVDATA, + TR_NOOP, + CR_ENABLE_SLOT, + CR_DISABLE_SLOT, + CR_ADDRESS_DEVICE, + CR_CONFIGURE_ENDPOINT, + CR_EVALUATE_CONTEXT, + CR_RESET_ENDPOINT, + CR_STOP_ENDPOINT, + CR_SET_TR_DEQUEUE, + CR_RESET_DEVICE, + CR_FORCE_EVENT, + CR_NEGOTIATE_BW, + CR_SET_LATENCY_TOLERANCE, + CR_GET_PORT_BANDWIDTH, + CR_FORCE_HEADER, + CR_NOOP, + ER_TRANSFER = 32, + ER_COMMAND_COMPLETE, + ER_PORT_STATUS_CHANGE, + ER_BANDWIDTH_REQUEST, + ER_DOORBELL, + ER_HOST_CONTROLLER, + ER_DEVICE_NOTIFICATION, + ER_MFINDEX_WRAP, + /* vendor specific bits */ + CR_VENDOR_NEC_FIRMWARE_REVISION = 49, + CR_VENDOR_NEC_CHALLENGE_RESPONSE = 50, +} TRBType; + +typedef enum TRBCCode { + CC_INVALID = 0, + CC_SUCCESS, + CC_DATA_BUFFER_ERROR, + CC_BABBLE_DETECTED, + CC_USB_TRANSACTION_ERROR, + CC_TRB_ERROR, + CC_STALL_ERROR, + CC_RESOURCE_ERROR, + CC_BANDWIDTH_ERROR, + CC_NO_SLOTS_ERROR, + CC_INVALID_STREAM_TYPE_ERROR, + CC_SLOT_NOT_ENABLED_ERROR, + CC_EP_NOT_ENABLED_ERROR, + CC_SHORT_PACKET, + CC_RING_UNDERRUN, + CC_RING_OVERRUN, + CC_VF_ER_FULL, + CC_PARAMETER_ERROR, + CC_BANDWIDTH_OVERRUN, + CC_CONTEXT_STATE_ERROR, + CC_NO_PING_RESPONSE_ERROR, + CC_EVENT_RING_FULL_ERROR, + CC_INCOMPATIBLE_DEVICE_ERROR, + CC_MISSED_SERVICE_ERROR, + CC_COMMAND_RING_STOPPED, + CC_COMMAND_ABORTED, + CC_STOPPED, + CC_STOPPED_LENGTH_INVALID, + CC_MAX_EXIT_LATENCY_TOO_LARGE_ERROR = 29, + CC_ISOCH_BUFFER_OVERRUN = 31, + CC_EVENT_LOST_ERROR, + CC_UNDEFINED_ERROR, + CC_INVALID_STREAM_ID_ERROR, + CC_SECONDARY_BANDWIDTH_ERROR, + CC_SPLIT_TRANSACTION_ERROR +} TRBCCode; + +typedef struct XHCIRing { + dma_addr_t dequeue; + bool ccs; +} XHCIRing; + +typedef struct XHCIPort { + XHCIState *xhci; + uint32_t portsc; + uint32_t portnr; + USBPort *uport; + uint32_t speedmask; + char name[20]; + MemoryRegion mem; +} XHCIPort; + +typedef struct XHCISlot { + bool enabled; + bool addressed; + uint16_t intr; + dma_addr_t ctx; + USBPort *uport; + XHCIEPContext *eps[31]; +} XHCISlot; + +typedef struct XHCIEvent { + TRBType type; + TRBCCode ccode; + uint64_t ptr; + uint32_t length; + uint32_t flags; + uint8_t slotid; + uint8_t epid; +} XHCIEvent; + +typedef struct XHCIInterrupter { + uint32_t iman; + uint32_t imod; + uint32_t erstsz; + uint32_t erstba_low; + uint32_t erstba_high; + uint32_t erdp_low; + uint32_t erdp_high; + + bool msix_used, er_pcs; + + dma_addr_t er_start; + uint32_t er_size; + unsigned int er_ep_idx; + + /* kept for live migration compat only */ + bool er_full_unused; + XHCIEvent ev_buffer[EV_QUEUE]; + unsigned int ev_buffer_put; + unsigned int ev_buffer_get; + +} XHCIInterrupter; + +typedef struct XHCIState { + DeviceState parent; + + USBBus bus; + MemoryRegion mem; + MemoryRegion *dma_mr; + AddressSpace *as; + MemoryRegion mem_cap; + MemoryRegion mem_oper; + MemoryRegion mem_runtime; + MemoryRegion mem_doorbell; + + /* properties */ + uint32_t numports_2; + uint32_t numports_3; + uint32_t numintrs; + uint32_t numslots; + uint32_t flags; + uint32_t max_pstreams_mask; + void (*intr_update)(XHCIState *s, int n, bool enable); + bool (*intr_raise)(XHCIState *s, int n, bool level); + DeviceState *hostOpaque; + + /* Operational Registers */ + uint32_t usbcmd; + uint32_t usbsts; + uint32_t dnctrl; + uint32_t crcr_low; + uint32_t crcr_high; + uint32_t dcbaap_low; + uint32_t dcbaap_high; + uint32_t config; + + USBPort uports[MAX_CONST(XHCI_MAXPORTS_2, XHCI_MAXPORTS_3)]; + XHCIPort ports[XHCI_MAXPORTS]; + XHCISlot slots[XHCI_MAXSLOTS]; + uint32_t numports; + + /* Runtime Registers */ + int64_t mfindex_start; + QEMUTimer *mfwrap_timer; + XHCIInterrupter intr[XHCI_MAXINTRS]; + + XHCIRing cmd_ring; + + bool nec_quirks; +} XHCIState; + +extern const VMStateDescription vmstate_xhci; +bool xhci_get_flag(XHCIState *xhci, enum xhci_flags bit); +void xhci_set_flag(XHCIState *xhci, enum xhci_flags bit); +#endif diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c new file mode 100644 index 000000000..d0d46dd0a --- /dev/null +++ b/hw/usb/host-libusb.c @@ -0,0 +1,1978 @@ +/* + * Linux host USB redirector + * + * Copyright (c) 2005 Fabrice Bellard + * + * Copyright (c) 2008 Max Krasnyansky + * Support for host device auto connect & disconnect + * Major rewrite to support fully async operation + * + * Copyright 2008 TJ + * Added flexible support for /dev/bus/usb /sys/bus/usb/devices in addition + * to the legacy /proc/bus/usb USB device discovery and handling + * + * (c) 2012 Gerd Hoffmann + * Completely rewritten to use libusb instead of usbfs ioctls. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qom/object.h" +#ifndef CONFIG_WIN32 +#include +#endif +#include + +#ifdef CONFIG_LINUX +#include +#include +#endif + +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "monitor/monitor.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "trace.h" + +#include "hw/qdev-properties.h" +#include "hw/usb.h" + +/* ------------------------------------------------------------------------ */ + +#define TYPE_USB_HOST_DEVICE "usb-host" +OBJECT_DECLARE_SIMPLE_TYPE(USBHostDevice, USB_HOST_DEVICE) + +typedef struct USBHostRequest USBHostRequest; +typedef struct USBHostIsoXfer USBHostIsoXfer; +typedef struct USBHostIsoRing USBHostIsoRing; + +struct USBAutoFilter { + uint32_t bus_num; + uint32_t addr; + char *port; + uint32_t vendor_id; + uint32_t product_id; +}; + +enum USBHostDeviceOptions { + USB_HOST_OPT_PIPELINE, +}; + +struct USBHostDevice { + USBDevice parent_obj; + + /* properties */ + struct USBAutoFilter match; + char *hostdevice; + int32_t bootindex; + uint32_t iso_urb_count; + uint32_t iso_urb_frames; + uint32_t options; + uint32_t loglevel; + bool needs_autoscan; + bool allow_one_guest_reset; + bool allow_all_guest_resets; + bool suppress_remote_wake; + + /* state */ + QTAILQ_ENTRY(USBHostDevice) next; + int seen, errcount; + int bus_num; + int addr; + char port[16]; + + int hostfd; + libusb_device *dev; + libusb_device_handle *dh; + struct libusb_device_descriptor ddesc; + + struct { + bool detached; + bool claimed; + } ifs[USB_MAX_INTERFACES]; + + /* callbacks & friends */ + QEMUBH *bh_nodev; + QEMUBH *bh_postld; + bool bh_postld_pending; + Notifier exit; + + /* request queues */ + QTAILQ_HEAD(, USBHostRequest) requests; + QTAILQ_HEAD(, USBHostIsoRing) isorings; +}; + +struct USBHostRequest { + USBHostDevice *host; + USBPacket *p; + bool in; + struct libusb_transfer *xfer; + unsigned char *buffer; + unsigned char *cbuf; + unsigned int clen; + bool usb3ep0quirk; + QTAILQ_ENTRY(USBHostRequest) next; +}; + +struct USBHostIsoXfer { + USBHostIsoRing *ring; + struct libusb_transfer *xfer; + bool copy_complete; + unsigned int packet; + QTAILQ_ENTRY(USBHostIsoXfer) next; +}; + +struct USBHostIsoRing { + USBHostDevice *host; + USBEndpoint *ep; + QTAILQ_HEAD(, USBHostIsoXfer) unused; + QTAILQ_HEAD(, USBHostIsoXfer) inflight; + QTAILQ_HEAD(, USBHostIsoXfer) copy; + QTAILQ_ENTRY(USBHostIsoRing) next; +}; + +static QTAILQ_HEAD(, USBHostDevice) hostdevs = + QTAILQ_HEAD_INITIALIZER(hostdevs); + +static void usb_host_auto_check(void *unused); +static void usb_host_release_interfaces(USBHostDevice *s); +static void usb_host_nodev(USBHostDevice *s); +static void usb_host_detach_kernel(USBHostDevice *s); +static void usb_host_attach_kernel(USBHostDevice *s); + +/* ------------------------------------------------------------------------ */ + +#ifndef LIBUSB_LOG_LEVEL_WARNING /* older libusb didn't define these */ +#define LIBUSB_LOG_LEVEL_WARNING 2 +#endif + +/* ------------------------------------------------------------------------ */ + +#define CONTROL_TIMEOUT 10000 /* 10 sec */ +#define BULK_TIMEOUT 0 /* unlimited */ +#define INTR_TIMEOUT 0 /* unlimited */ + +#ifndef LIBUSB_API_VERSION +# define LIBUSB_API_VERSION LIBUSBX_API_VERSION +#endif +#if LIBUSB_API_VERSION >= 0x01000103 +# define HAVE_STREAMS 1 +#endif +#if LIBUSB_API_VERSION >= 0x01000106 +# define HAVE_SUPER_PLUS 1 +#endif + +static const char *speed_name[] = { + [LIBUSB_SPEED_UNKNOWN] = "?", + [LIBUSB_SPEED_LOW] = "1.5", + [LIBUSB_SPEED_FULL] = "12", + [LIBUSB_SPEED_HIGH] = "480", + [LIBUSB_SPEED_SUPER] = "5000", +#ifdef HAVE_SUPER_PLUS + [LIBUSB_SPEED_SUPER_PLUS] = "5000+", +#endif +}; + +static const unsigned int speed_map[] = { + [LIBUSB_SPEED_LOW] = USB_SPEED_LOW, + [LIBUSB_SPEED_FULL] = USB_SPEED_FULL, + [LIBUSB_SPEED_HIGH] = USB_SPEED_HIGH, + [LIBUSB_SPEED_SUPER] = USB_SPEED_SUPER, +#ifdef HAVE_SUPER_PLUS + [LIBUSB_SPEED_SUPER_PLUS] = USB_SPEED_SUPER, +#endif +}; + +static const unsigned int status_map[] = { + [LIBUSB_TRANSFER_COMPLETED] = USB_RET_SUCCESS, + [LIBUSB_TRANSFER_ERROR] = USB_RET_IOERROR, + [LIBUSB_TRANSFER_TIMED_OUT] = USB_RET_IOERROR, + [LIBUSB_TRANSFER_CANCELLED] = USB_RET_IOERROR, + [LIBUSB_TRANSFER_STALL] = USB_RET_STALL, + [LIBUSB_TRANSFER_NO_DEVICE] = USB_RET_NODEV, + [LIBUSB_TRANSFER_OVERFLOW] = USB_RET_BABBLE, +}; + +static const char *err_names[] = { + [-LIBUSB_ERROR_IO] = "IO", + [-LIBUSB_ERROR_INVALID_PARAM] = "INVALID_PARAM", + [-LIBUSB_ERROR_ACCESS] = "ACCESS", + [-LIBUSB_ERROR_NO_DEVICE] = "NO_DEVICE", + [-LIBUSB_ERROR_NOT_FOUND] = "NOT_FOUND", + [-LIBUSB_ERROR_BUSY] = "BUSY", + [-LIBUSB_ERROR_TIMEOUT] = "TIMEOUT", + [-LIBUSB_ERROR_OVERFLOW] = "OVERFLOW", + [-LIBUSB_ERROR_PIPE] = "PIPE", + [-LIBUSB_ERROR_INTERRUPTED] = "INTERRUPTED", + [-LIBUSB_ERROR_NO_MEM] = "NO_MEM", + [-LIBUSB_ERROR_NOT_SUPPORTED] = "NOT_SUPPORTED", + [-LIBUSB_ERROR_OTHER] = "OTHER", +}; + +static libusb_context *ctx; +static uint32_t loglevel; + +#ifndef CONFIG_WIN32 + +static void usb_host_handle_fd(void *opaque) +{ + struct timeval tv = { 0, 0 }; + libusb_handle_events_timeout(ctx, &tv); +} + +static void usb_host_add_fd(int fd, short events, void *user_data) +{ + qemu_set_fd_handler(fd, + (events & POLLIN) ? usb_host_handle_fd : NULL, + (events & POLLOUT) ? usb_host_handle_fd : NULL, + ctx); +} + +static void usb_host_del_fd(int fd, void *user_data) +{ + qemu_set_fd_handler(fd, NULL, NULL, NULL); +} + +#else + +static QEMUTimer *poll_timer; +static uint32_t request_count; + +static void usb_host_timer_kick(void) +{ + int64_t delay_ns; + + delay_ns = request_count + ? (NANOSECONDS_PER_SECOND / 100) /* 10 ms interval with active req */ + : (NANOSECONDS_PER_SECOND); /* 1 sec interval otherwise */ + timer_mod(poll_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns); +} + +static void usb_host_timer(void *opaque) +{ + struct timeval tv = { 0, 0 }; + + libusb_handle_events_timeout(ctx, &tv); + usb_host_timer_kick(); +} + +#endif /* !CONFIG_WIN32 */ + +static int usb_host_init(void) +{ +#ifndef CONFIG_WIN32 + const struct libusb_pollfd **poll; +#endif + int rc; + + if (ctx) { + return 0; + } + rc = libusb_init(&ctx); + if (rc != 0) { + return -1; + } +#if LIBUSB_API_VERSION >= 0x01000106 + libusb_set_option(ctx, LIBUSB_OPTION_LOG_LEVEL, loglevel); +#else + libusb_set_debug(ctx, loglevel); +#endif +#ifdef CONFIG_WIN32 + poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, usb_host_timer, NULL); + usb_host_timer_kick(); +#else + libusb_set_pollfd_notifiers(ctx, usb_host_add_fd, + usb_host_del_fd, + ctx); + poll = libusb_get_pollfds(ctx); + if (poll) { + int i; + for (i = 0; poll[i] != NULL; i++) { + usb_host_add_fd(poll[i]->fd, poll[i]->events, ctx); + } + } + free(poll); +#endif + return 0; +} + +static int usb_host_get_port(libusb_device *dev, char *port, size_t len) +{ + uint8_t path[7]; + size_t off; + int rc, i; + +#if LIBUSB_API_VERSION >= 0x01000102 + rc = libusb_get_port_numbers(dev, path, 7); +#else + rc = libusb_get_port_path(ctx, dev, path, 7); +#endif + if (rc < 0) { + return 0; + } + off = snprintf(port, len, "%d", path[0]); + for (i = 1; i < rc; i++) { + off += snprintf(port+off, len-off, ".%d", path[i]); + } + return off; +} + +static void usb_host_libusb_error(const char *func, int rc) +{ + const char *errname; + + if (rc >= 0) { + return; + } + + if (-rc < ARRAY_SIZE(err_names) && err_names[-rc]) { + errname = err_names[-rc]; + } else { + errname = "?"; + } + error_report("%s: %d [%s]", func, rc, errname); +} + +/* ------------------------------------------------------------------------ */ + +static bool usb_host_use_combining(USBEndpoint *ep) +{ + int type; + + if (!ep->pipeline) { + return false; + } + if (ep->pid != USB_TOKEN_IN) { + return false; + } + type = usb_ep_get_type(ep->dev, ep->pid, ep->nr); + if (type != USB_ENDPOINT_XFER_BULK) { + return false; + } + return true; +} + +/* ------------------------------------------------------------------------ */ + +static USBHostRequest *usb_host_req_alloc(USBHostDevice *s, USBPacket *p, + bool in, size_t bufsize) +{ + USBHostRequest *r = g_new0(USBHostRequest, 1); + + r->host = s; + r->p = p; + r->in = in; + r->xfer = libusb_alloc_transfer(0); + if (bufsize) { + r->buffer = g_malloc(bufsize); + } + QTAILQ_INSERT_TAIL(&s->requests, r, next); +#ifdef CONFIG_WIN32 + request_count++; + usb_host_timer_kick(); +#endif + return r; +} + +static void usb_host_req_free(USBHostRequest *r) +{ +#ifdef CONFIG_WIN32 + request_count--; +#endif + QTAILQ_REMOVE(&r->host->requests, r, next); + libusb_free_transfer(r->xfer); + g_free(r->buffer); + g_free(r); +} + +static USBHostRequest *usb_host_req_find(USBHostDevice *s, USBPacket *p) +{ + USBHostRequest *r; + + QTAILQ_FOREACH(r, &s->requests, next) { + if (r->p == p) { + return r; + } + } + return NULL; +} + +static void LIBUSB_CALL usb_host_req_complete_ctrl(struct libusb_transfer *xfer) +{ + USBHostRequest *r = xfer->user_data; + USBHostDevice *s = r->host; + bool disconnect = (xfer->status == LIBUSB_TRANSFER_NO_DEVICE); + + if (r->p == NULL) { + goto out; /* request was canceled */ + } + + r->p->status = status_map[xfer->status]; + r->p->actual_length = xfer->actual_length; + if (r->in && xfer->actual_length) { + USBDevice *udev = USB_DEVICE(s); + struct libusb_config_descriptor *conf = (void *)r->cbuf; + memcpy(r->cbuf, r->buffer + 8, xfer->actual_length); + + /* Fix up USB-3 ep0 maxpacket size to allow superspeed connected devices + * to work redirected to a not superspeed capable hcd */ + if (r->usb3ep0quirk && xfer->actual_length >= 18 && + r->cbuf[7] == 9) { + r->cbuf[7] = 64; + } + /* + *If this is GET_DESCRIPTOR request for configuration descriptor, + * remove 'remote wakeup' flag from it to prevent idle power down + * in Windows guest + */ + if (s->suppress_remote_wake && + udev->setup_buf[0] == USB_DIR_IN && + udev->setup_buf[1] == USB_REQ_GET_DESCRIPTOR && + udev->setup_buf[3] == USB_DT_CONFIG && udev->setup_buf[2] == 0 && + xfer->actual_length > + offsetof(struct libusb_config_descriptor, bmAttributes) && + (conf->bmAttributes & USB_CFG_ATT_WAKEUP)) { + trace_usb_host_remote_wakeup_removed(s->bus_num, s->addr); + conf->bmAttributes &= ~USB_CFG_ATT_WAKEUP; + } + } + trace_usb_host_req_complete(s->bus_num, s->addr, r->p, + r->p->status, r->p->actual_length); + usb_generic_async_ctrl_complete(USB_DEVICE(s), r->p); + +out: + usb_host_req_free(r); + if (disconnect) { + usb_host_nodev(s); + } +} + +static void LIBUSB_CALL usb_host_req_complete_data(struct libusb_transfer *xfer) +{ + USBHostRequest *r = xfer->user_data; + USBHostDevice *s = r->host; + bool disconnect = (xfer->status == LIBUSB_TRANSFER_NO_DEVICE); + + if (r->p == NULL) { + goto out; /* request was canceled */ + } + + r->p->status = status_map[xfer->status]; + if (r->in && xfer->actual_length) { + usb_packet_copy(r->p, r->buffer, xfer->actual_length); + } + trace_usb_host_req_complete(s->bus_num, s->addr, r->p, + r->p->status, r->p->actual_length); + if (usb_host_use_combining(r->p->ep)) { + usb_combined_input_packet_complete(USB_DEVICE(s), r->p); + } else { + usb_packet_complete(USB_DEVICE(s), r->p); + } + +out: + usb_host_req_free(r); + if (disconnect) { + usb_host_nodev(s); + } +} + +static void usb_host_req_abort(USBHostRequest *r) +{ + USBHostDevice *s = r->host; + bool inflight = (r->p && r->p->state == USB_PACKET_ASYNC); + + if (inflight) { + r->p->status = USB_RET_NODEV; + trace_usb_host_req_complete(s->bus_num, s->addr, r->p, + r->p->status, r->p->actual_length); + if (r->p->ep->nr == 0) { + usb_generic_async_ctrl_complete(USB_DEVICE(s), r->p); + } else { + usb_packet_complete(USB_DEVICE(s), r->p); + } + r->p = NULL; + + libusb_cancel_transfer(r->xfer); + } +} + +/* ------------------------------------------------------------------------ */ + +static void LIBUSB_CALL +usb_host_req_complete_iso(struct libusb_transfer *transfer) +{ + USBHostIsoXfer *xfer = transfer->user_data; + + if (!xfer) { + /* USBHostIsoXfer released while inflight */ + g_free(transfer->buffer); + libusb_free_transfer(transfer); + return; + } + + QTAILQ_REMOVE(&xfer->ring->inflight, xfer, next); + if (QTAILQ_EMPTY(&xfer->ring->inflight)) { + USBHostDevice *s = xfer->ring->host; + trace_usb_host_iso_stop(s->bus_num, s->addr, xfer->ring->ep->nr); + } + if (xfer->ring->ep->pid == USB_TOKEN_IN) { + QTAILQ_INSERT_TAIL(&xfer->ring->copy, xfer, next); + usb_wakeup(xfer->ring->ep, 0); + } else { + QTAILQ_INSERT_TAIL(&xfer->ring->unused, xfer, next); + } +} + +static USBHostIsoRing *usb_host_iso_alloc(USBHostDevice *s, USBEndpoint *ep) +{ + USBHostIsoRing *ring = g_new0(USBHostIsoRing, 1); + USBHostIsoXfer *xfer; + /* FIXME: check interval (for now assume one xfer per frame) */ + int packets = s->iso_urb_frames; + int i; + + ring->host = s; + ring->ep = ep; + QTAILQ_INIT(&ring->unused); + QTAILQ_INIT(&ring->inflight); + QTAILQ_INIT(&ring->copy); + QTAILQ_INSERT_TAIL(&s->isorings, ring, next); + + for (i = 0; i < s->iso_urb_count; i++) { + xfer = g_new0(USBHostIsoXfer, 1); + xfer->ring = ring; + xfer->xfer = libusb_alloc_transfer(packets); + xfer->xfer->dev_handle = s->dh; + xfer->xfer->type = LIBUSB_TRANSFER_TYPE_ISOCHRONOUS; + + xfer->xfer->endpoint = ring->ep->nr; + if (ring->ep->pid == USB_TOKEN_IN) { + xfer->xfer->endpoint |= USB_DIR_IN; + } + xfer->xfer->callback = usb_host_req_complete_iso; + xfer->xfer->user_data = xfer; + + xfer->xfer->num_iso_packets = packets; + xfer->xfer->length = ring->ep->max_packet_size * packets; + xfer->xfer->buffer = g_malloc0(xfer->xfer->length); + + QTAILQ_INSERT_TAIL(&ring->unused, xfer, next); + } + + return ring; +} + +static USBHostIsoRing *usb_host_iso_find(USBHostDevice *s, USBEndpoint *ep) +{ + USBHostIsoRing *ring; + + QTAILQ_FOREACH(ring, &s->isorings, next) { + if (ring->ep == ep) { + return ring; + } + } + return NULL; +} + +static void usb_host_iso_reset_xfer(USBHostIsoXfer *xfer) +{ + libusb_set_iso_packet_lengths(xfer->xfer, + xfer->ring->ep->max_packet_size); + xfer->packet = 0; + xfer->copy_complete = false; +} + +static void usb_host_iso_free_xfer(USBHostIsoXfer *xfer, bool inflight) +{ + if (inflight) { + xfer->xfer->user_data = NULL; + } else { + g_free(xfer->xfer->buffer); + libusb_free_transfer(xfer->xfer); + } + g_free(xfer); +} + +static void usb_host_iso_free(USBHostIsoRing *ring) +{ + USBHostIsoXfer *xfer; + + while ((xfer = QTAILQ_FIRST(&ring->inflight)) != NULL) { + QTAILQ_REMOVE(&ring->inflight, xfer, next); + usb_host_iso_free_xfer(xfer, true); + } + while ((xfer = QTAILQ_FIRST(&ring->unused)) != NULL) { + QTAILQ_REMOVE(&ring->unused, xfer, next); + usb_host_iso_free_xfer(xfer, false); + } + while ((xfer = QTAILQ_FIRST(&ring->copy)) != NULL) { + QTAILQ_REMOVE(&ring->copy, xfer, next); + usb_host_iso_free_xfer(xfer, false); + } + + QTAILQ_REMOVE(&ring->host->isorings, ring, next); + g_free(ring); +} + +static void usb_host_iso_free_all(USBHostDevice *s) +{ + USBHostIsoRing *ring; + + while ((ring = QTAILQ_FIRST(&s->isorings)) != NULL) { + usb_host_iso_free(ring); + } +} + +static bool usb_host_iso_data_copy(USBHostIsoXfer *xfer, USBPacket *p) +{ + unsigned int psize; + unsigned char *buf; + + buf = libusb_get_iso_packet_buffer_simple(xfer->xfer, xfer->packet); + if (p->pid == USB_TOKEN_OUT) { + psize = p->iov.size; + if (psize > xfer->ring->ep->max_packet_size) { + /* should not happen (guest bug) */ + psize = xfer->ring->ep->max_packet_size; + } + xfer->xfer->iso_packet_desc[xfer->packet].length = psize; + } else { + psize = xfer->xfer->iso_packet_desc[xfer->packet].actual_length; + if (psize > p->iov.size) { + /* should not happen (guest bug) */ + psize = p->iov.size; + } + } + usb_packet_copy(p, buf, psize); + xfer->packet++; + xfer->copy_complete = (xfer->packet == xfer->xfer->num_iso_packets); + return xfer->copy_complete; +} + +static void usb_host_iso_data_in(USBHostDevice *s, USBPacket *p) +{ + USBHostIsoRing *ring; + USBHostIsoXfer *xfer; + bool disconnect = false; + int rc; + + ring = usb_host_iso_find(s, p->ep); + if (ring == NULL) { + ring = usb_host_iso_alloc(s, p->ep); + } + + /* copy data to guest */ + xfer = QTAILQ_FIRST(&ring->copy); + if (xfer != NULL) { + if (usb_host_iso_data_copy(xfer, p)) { + QTAILQ_REMOVE(&ring->copy, xfer, next); + QTAILQ_INSERT_TAIL(&ring->unused, xfer, next); + } + } + + /* submit empty bufs to host */ + while ((xfer = QTAILQ_FIRST(&ring->unused)) != NULL) { + QTAILQ_REMOVE(&ring->unused, xfer, next); + usb_host_iso_reset_xfer(xfer); + rc = libusb_submit_transfer(xfer->xfer); + if (rc != 0) { + usb_host_libusb_error("libusb_submit_transfer [iso]", rc); + QTAILQ_INSERT_TAIL(&ring->unused, xfer, next); + if (rc == LIBUSB_ERROR_NO_DEVICE) { + disconnect = true; + } + break; + } + if (QTAILQ_EMPTY(&ring->inflight)) { + trace_usb_host_iso_start(s->bus_num, s->addr, p->ep->nr); + } + QTAILQ_INSERT_TAIL(&ring->inflight, xfer, next); + } + + if (disconnect) { + usb_host_nodev(s); + } +} + +static void usb_host_iso_data_out(USBHostDevice *s, USBPacket *p) +{ + USBHostIsoRing *ring; + USBHostIsoXfer *xfer; + bool disconnect = false; + int rc, filled = 0; + + ring = usb_host_iso_find(s, p->ep); + if (ring == NULL) { + ring = usb_host_iso_alloc(s, p->ep); + } + + /* copy data from guest */ + xfer = QTAILQ_FIRST(&ring->copy); + while (xfer != NULL && xfer->copy_complete) { + filled++; + xfer = QTAILQ_NEXT(xfer, next); + } + if (xfer == NULL) { + xfer = QTAILQ_FIRST(&ring->unused); + if (xfer == NULL) { + trace_usb_host_iso_out_of_bufs(s->bus_num, s->addr, p->ep->nr); + return; + } + QTAILQ_REMOVE(&ring->unused, xfer, next); + usb_host_iso_reset_xfer(xfer); + QTAILQ_INSERT_TAIL(&ring->copy, xfer, next); + } + usb_host_iso_data_copy(xfer, p); + + if (QTAILQ_EMPTY(&ring->inflight)) { + /* wait until half of our buffers are filled + before kicking the iso out stream */ + if (filled*2 < s->iso_urb_count) { + return; + } + } + + /* submit filled bufs to host */ + while ((xfer = QTAILQ_FIRST(&ring->copy)) != NULL && + xfer->copy_complete) { + QTAILQ_REMOVE(&ring->copy, xfer, next); + rc = libusb_submit_transfer(xfer->xfer); + if (rc != 0) { + usb_host_libusb_error("libusb_submit_transfer [iso]", rc); + QTAILQ_INSERT_TAIL(&ring->unused, xfer, next); + if (rc == LIBUSB_ERROR_NO_DEVICE) { + disconnect = true; + } + break; + } + if (QTAILQ_EMPTY(&ring->inflight)) { + trace_usb_host_iso_start(s->bus_num, s->addr, p->ep->nr); + } + QTAILQ_INSERT_TAIL(&ring->inflight, xfer, next); + } + + if (disconnect) { + usb_host_nodev(s); + } +} + +/* ------------------------------------------------------------------------ */ + +static void usb_host_speed_compat(USBHostDevice *s) +{ + USBDevice *udev = USB_DEVICE(s); + struct libusb_config_descriptor *conf; + const struct libusb_interface_descriptor *intf; + const struct libusb_endpoint_descriptor *endp; +#ifdef HAVE_STREAMS + struct libusb_ss_endpoint_companion_descriptor *endp_ss_comp; +#endif + bool compat_high = true; + bool compat_full = true; + uint8_t type; + int rc, c, i, a, e; + + for (c = 0;; c++) { + rc = libusb_get_config_descriptor(s->dev, c, &conf); + if (rc != 0) { + break; + } + for (i = 0; i < conf->bNumInterfaces; i++) { + for (a = 0; a < conf->interface[i].num_altsetting; a++) { + intf = &conf->interface[i].altsetting[a]; + + if (intf->bInterfaceClass == LIBUSB_CLASS_MASS_STORAGE && + intf->bInterfaceSubClass == 6) { /* SCSI */ + udev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); + break; + } + + for (e = 0; e < intf->bNumEndpoints; e++) { + endp = &intf->endpoint[e]; + type = endp->bmAttributes & 0x3; + switch (type) { + case 0x01: /* ISO */ + compat_full = false; + compat_high = false; + break; + case 0x02: /* BULK */ +#ifdef HAVE_STREAMS + rc = libusb_get_ss_endpoint_companion_descriptor + (ctx, endp, &endp_ss_comp); + if (rc == LIBUSB_SUCCESS) { + int streams = endp_ss_comp->bmAttributes & 0x1f; + if (streams) { + compat_full = false; + compat_high = false; + } + libusb_free_ss_endpoint_companion_descriptor + (endp_ss_comp); + } +#endif + break; + case 0x03: /* INTERRUPT */ + if (endp->wMaxPacketSize > 64) { + compat_full = false; + } + if (endp->wMaxPacketSize > 1024) { + compat_high = false; + } + break; + } + } + } + } + libusb_free_config_descriptor(conf); + } + + udev->speedmask = (1 << udev->speed); + if (udev->speed == USB_SPEED_SUPER && compat_high) { + udev->speedmask |= USB_SPEED_MASK_HIGH; + } + if (udev->speed == USB_SPEED_SUPER && compat_full) { + udev->speedmask |= USB_SPEED_MASK_FULL; + } + if (udev->speed == USB_SPEED_HIGH && compat_full) { + udev->speedmask |= USB_SPEED_MASK_FULL; + } +} + +static void usb_host_ep_update(USBHostDevice *s) +{ + static const char *tname[] = { + [USB_ENDPOINT_XFER_CONTROL] = "control", + [USB_ENDPOINT_XFER_ISOC] = "isoc", + [USB_ENDPOINT_XFER_BULK] = "bulk", + [USB_ENDPOINT_XFER_INT] = "int", + }; + USBDevice *udev = USB_DEVICE(s); + struct libusb_config_descriptor *conf; + const struct libusb_interface_descriptor *intf; + const struct libusb_endpoint_descriptor *endp; +#ifdef HAVE_STREAMS + struct libusb_ss_endpoint_companion_descriptor *endp_ss_comp; +#endif + uint8_t devep, type; + int pid, ep, alt; + int rc, i, e; + + usb_ep_reset(udev); + rc = libusb_get_active_config_descriptor(s->dev, &conf); + if (rc != 0) { + return; + } + trace_usb_host_parse_config(s->bus_num, s->addr, + conf->bConfigurationValue, true); + + for (i = 0; i < conf->bNumInterfaces; i++) { + /* + * The udev->altsetting array indexes alternate settings + * by the interface number. Get the 0th alternate setting + * first so that we can grab the interface number, and + * then correct the alternate setting value if necessary. + */ + intf = &conf->interface[i].altsetting[0]; + alt = udev->altsetting[intf->bInterfaceNumber]; + + if (alt != 0) { + assert(alt < conf->interface[i].num_altsetting); + intf = &conf->interface[i].altsetting[alt]; + } + + trace_usb_host_parse_interface(s->bus_num, s->addr, + intf->bInterfaceNumber, + intf->bAlternateSetting, true); + for (e = 0; e < intf->bNumEndpoints; e++) { + endp = &intf->endpoint[e]; + + devep = endp->bEndpointAddress; + pid = (devep & USB_DIR_IN) ? USB_TOKEN_IN : USB_TOKEN_OUT; + ep = devep & 0xf; + type = endp->bmAttributes & 0x3; + + if (ep == 0) { + trace_usb_host_parse_error(s->bus_num, s->addr, + "invalid endpoint address"); + return; + } + if (usb_ep_get_type(udev, pid, ep) != USB_ENDPOINT_XFER_INVALID) { + trace_usb_host_parse_error(s->bus_num, s->addr, + "duplicate endpoint address"); + return; + } + + trace_usb_host_parse_endpoint(s->bus_num, s->addr, ep, + (devep & USB_DIR_IN) ? "in" : "out", + tname[type], true); + usb_ep_set_max_packet_size(udev, pid, ep, + endp->wMaxPacketSize); + usb_ep_set_type(udev, pid, ep, type); + usb_ep_set_ifnum(udev, pid, ep, i); + usb_ep_set_halted(udev, pid, ep, 0); +#ifdef HAVE_STREAMS + if (type == LIBUSB_TRANSFER_TYPE_BULK && + libusb_get_ss_endpoint_companion_descriptor(ctx, endp, + &endp_ss_comp) == LIBUSB_SUCCESS) { + usb_ep_set_max_streams(udev, pid, ep, + endp_ss_comp->bmAttributes); + libusb_free_ss_endpoint_companion_descriptor(endp_ss_comp); + } +#endif + } + } + + libusb_free_config_descriptor(conf); +} + +static int usb_host_open(USBHostDevice *s, libusb_device *dev, int hostfd) +{ + USBDevice *udev = USB_DEVICE(s); + int libusb_speed; + int bus_num = 0; + int addr = 0; + int rc; + Error *local_err = NULL; + + if (s->bh_postld_pending) { + return -1; + } + if (s->dh != NULL) { + goto fail; + } + + if (dev) { + bus_num = libusb_get_bus_number(dev); + addr = libusb_get_device_address(dev); + trace_usb_host_open_started(bus_num, addr); + + rc = libusb_open(dev, &s->dh); + if (rc != 0) { + goto fail; + } + } else { +#if LIBUSB_API_VERSION >= 0x01000107 && !defined(CONFIG_WIN32) + trace_usb_host_open_hostfd(hostfd); + + rc = libusb_wrap_sys_device(ctx, hostfd, &s->dh); + if (rc != 0) { + goto fail; + } + s->hostfd = hostfd; + dev = libusb_get_device(s->dh); + bus_num = libusb_get_bus_number(dev); + addr = libusb_get_device_address(dev); +#else + g_assert_not_reached(); +#endif + } + + s->dev = dev; + s->bus_num = bus_num; + s->addr = addr; + + usb_host_detach_kernel(s); + + libusb_get_device_descriptor(dev, &s->ddesc); + usb_host_get_port(s->dev, s->port, sizeof(s->port)); + + usb_ep_init(udev); + usb_host_ep_update(s); + + libusb_speed = libusb_get_device_speed(dev); +#if LIBUSB_API_VERSION >= 0x01000107 && defined(CONFIG_LINUX) && \ + defined(USBDEVFS_GET_SPEED) + if (hostfd && libusb_speed == 0) { + /* + * Workaround libusb bug: libusb_get_device_speed() does not + * work for libusb_wrap_sys_device() devices in v1.0.23. + * + * Speeds are defined in linux/usb/ch9.h, file not included + * due to name conflicts. + */ + int rc = ioctl(hostfd, USBDEVFS_GET_SPEED, NULL); + switch (rc) { + case 1: /* low */ + libusb_speed = LIBUSB_SPEED_LOW; + break; + case 2: /* full */ + libusb_speed = LIBUSB_SPEED_FULL; + break; + case 3: /* high */ + case 4: /* wireless */ + libusb_speed = LIBUSB_SPEED_HIGH; + break; + case 5: /* super */ + libusb_speed = LIBUSB_SPEED_SUPER; + break; + case 6: /* super plus */ +#ifdef HAVE_SUPER_PLUS + libusb_speed = LIBUSB_SPEED_SUPER_PLUS; +#else + libusb_speed = LIBUSB_SPEED_SUPER; +#endif + break; + } + } +#endif + udev->speed = speed_map[libusb_speed]; + usb_host_speed_compat(s); + + if (s->ddesc.iProduct) { + libusb_get_string_descriptor_ascii(s->dh, s->ddesc.iProduct, + (unsigned char *)udev->product_desc, + sizeof(udev->product_desc)); + } else { + snprintf(udev->product_desc, sizeof(udev->product_desc), + "host:%d.%d", bus_num, addr); + } + + usb_device_attach(udev, &local_err); + if (local_err) { + error_report_err(local_err); + goto fail; + } + + trace_usb_host_open_success(bus_num, addr); + return 0; + +fail: + trace_usb_host_open_failure(bus_num, addr); + if (s->dh != NULL) { + usb_host_release_interfaces(s); + libusb_reset_device(s->dh); + usb_host_attach_kernel(s); + libusb_close(s->dh); + s->dh = NULL; + s->dev = NULL; + } + return -1; +} + +static void usb_host_abort_xfers(USBHostDevice *s) +{ + USBHostRequest *r, *rtmp; + int limit = 100; + + QTAILQ_FOREACH_SAFE(r, &s->requests, next, rtmp) { + usb_host_req_abort(r); + } + + while (QTAILQ_FIRST(&s->requests) != NULL) { + struct timeval tv; + memset(&tv, 0, sizeof(tv)); + tv.tv_usec = 2500; + libusb_handle_events_timeout(ctx, &tv); + if (--limit == 0) { + /* + * Don't wait forever for libusb calling the complete + * callback (which will unlink and free the request). + * + * Leaking memory here, to make sure libusb will not + * access memory which we have released already. + */ + QTAILQ_FOREACH_SAFE(r, &s->requests, next, rtmp) { + QTAILQ_REMOVE(&s->requests, r, next); + } + return; + } + } +} + +static int usb_host_close(USBHostDevice *s) +{ + USBDevice *udev = USB_DEVICE(s); + + if (s->dh == NULL) { + return -1; + } + + trace_usb_host_close(s->bus_num, s->addr); + + usb_host_abort_xfers(s); + usb_host_iso_free_all(s); + + if (udev->attached) { + usb_device_detach(udev); + } + + usb_host_release_interfaces(s); + libusb_reset_device(s->dh); + usb_host_attach_kernel(s); + libusb_close(s->dh); + s->dh = NULL; + s->dev = NULL; + + if (s->hostfd != -1) { + close(s->hostfd); + s->hostfd = -1; + } + + usb_host_auto_check(NULL); + return 0; +} + +static void usb_host_nodev_bh(void *opaque) +{ + USBHostDevice *s = opaque; + usb_host_close(s); +} + +static void usb_host_nodev(USBHostDevice *s) +{ + if (!s->bh_nodev) { + s->bh_nodev = qemu_bh_new(usb_host_nodev_bh, s); + } + qemu_bh_schedule(s->bh_nodev); +} + +static void usb_host_exit_notifier(struct Notifier *n, void *data) +{ + USBHostDevice *s = container_of(n, USBHostDevice, exit); + + if (s->dh) { + usb_host_abort_xfers(s); + usb_host_release_interfaces(s); + libusb_reset_device(s->dh); + usb_host_attach_kernel(s); + libusb_close(s->dh); + } +} + +static libusb_device *usb_host_find_ref(int bus, int addr) +{ + libusb_device **devs = NULL; + libusb_device *ret = NULL; + int i, n; + + n = libusb_get_device_list(ctx, &devs); + for (i = 0; i < n; i++) { + if (libusb_get_bus_number(devs[i]) == bus && + libusb_get_device_address(devs[i]) == addr) { + ret = libusb_ref_device(devs[i]); + break; + } + } + libusb_free_device_list(devs, 1); + return ret; +} + +static void usb_host_realize(USBDevice *udev, Error **errp) +{ + USBHostDevice *s = USB_HOST_DEVICE(udev); + libusb_device *ldev; + int rc; + + if (usb_host_init() != 0) { + error_setg(errp, "failed to init libusb"); + return; + } + if (s->match.vendor_id > 0xffff) { + error_setg(errp, "vendorid out of range"); + return; + } + if (s->match.product_id > 0xffff) { + error_setg(errp, "productid out of range"); + return; + } + if (s->match.addr > 127) { + error_setg(errp, "hostaddr out of range"); + return; + } + + loglevel = s->loglevel; + udev->flags |= (1 << USB_DEV_FLAG_IS_HOST); + udev->auto_attach = 0; + QTAILQ_INIT(&s->requests); + QTAILQ_INIT(&s->isorings); + s->hostfd = -1; + +#if LIBUSB_API_VERSION >= 0x01000107 && !defined(CONFIG_WIN32) + if (s->hostdevice) { + int fd; + s->needs_autoscan = false; + fd = qemu_open_old(s->hostdevice, O_RDWR); + if (fd < 0) { + error_setg_errno(errp, errno, "failed to open %s", s->hostdevice); + return; + } + rc = usb_host_open(s, NULL, fd); + if (rc < 0) { + error_setg(errp, "failed to open host usb device %s", s->hostdevice); + return; + } + } else +#endif + if (s->match.addr && s->match.bus_num && + !s->match.vendor_id && + !s->match.product_id && + !s->match.port) { + s->needs_autoscan = false; + ldev = usb_host_find_ref(s->match.bus_num, + s->match.addr); + if (!ldev) { + error_setg(errp, "failed to find host usb device %d:%d", + s->match.bus_num, s->match.addr); + return; + } + rc = usb_host_open(s, ldev, 0); + libusb_unref_device(ldev); + if (rc < 0) { + error_setg(errp, "failed to open host usb device %d:%d", + s->match.bus_num, s->match.addr); + return; + } + } else { + s->needs_autoscan = true; + QTAILQ_INSERT_TAIL(&hostdevs, s, next); + usb_host_auto_check(NULL); + } + + s->exit.notify = usb_host_exit_notifier; + qemu_add_exit_notifier(&s->exit); +} + +static void usb_host_instance_init(Object *obj) +{ + USBDevice *udev = USB_DEVICE(obj); + USBHostDevice *s = USB_HOST_DEVICE(udev); + + device_add_bootindex_property(obj, &s->bootindex, + "bootindex", NULL, + &udev->qdev); +} + +static void usb_host_unrealize(USBDevice *udev) +{ + USBHostDevice *s = USB_HOST_DEVICE(udev); + + qemu_remove_exit_notifier(&s->exit); + if (s->needs_autoscan) { + QTAILQ_REMOVE(&hostdevs, s, next); + } + usb_host_close(s); +} + +static void usb_host_cancel_packet(USBDevice *udev, USBPacket *p) +{ + USBHostDevice *s = USB_HOST_DEVICE(udev); + USBHostRequest *r; + + if (p->combined) { + usb_combined_packet_cancel(udev, p); + return; + } + + trace_usb_host_req_canceled(s->bus_num, s->addr, p); + + r = usb_host_req_find(s, p); + if (r && r->p) { + r->p = NULL; /* mark as dead */ + libusb_cancel_transfer(r->xfer); + } +} + +static void usb_host_detach_kernel(USBHostDevice *s) +{ + struct libusb_config_descriptor *conf; + int rc, i; + + rc = libusb_get_active_config_descriptor(s->dev, &conf); + if (rc != 0) { + return; + } + for (i = 0; i < USB_MAX_INTERFACES; i++) { + rc = libusb_kernel_driver_active(s->dh, i); + usb_host_libusb_error("libusb_kernel_driver_active", rc); + if (rc != 1) { + if (rc == 0) { + s->ifs[i].detached = true; + } + continue; + } + trace_usb_host_detach_kernel(s->bus_num, s->addr, i); + rc = libusb_detach_kernel_driver(s->dh, i); + usb_host_libusb_error("libusb_detach_kernel_driver", rc); + s->ifs[i].detached = true; + } + libusb_free_config_descriptor(conf); +} + +static void usb_host_attach_kernel(USBHostDevice *s) +{ + struct libusb_config_descriptor *conf; + int rc, i; + + rc = libusb_get_active_config_descriptor(s->dev, &conf); + if (rc != 0) { + return; + } + for (i = 0; i < USB_MAX_INTERFACES; i++) { + if (!s->ifs[i].detached) { + continue; + } + trace_usb_host_attach_kernel(s->bus_num, s->addr, i); + libusb_attach_kernel_driver(s->dh, i); + s->ifs[i].detached = false; + } + libusb_free_config_descriptor(conf); +} + +static int usb_host_claim_interfaces(USBHostDevice *s, int configuration) +{ + USBDevice *udev = USB_DEVICE(s); + struct libusb_config_descriptor *conf; + int rc, i, claimed; + + for (i = 0; i < USB_MAX_INTERFACES; i++) { + udev->altsetting[i] = 0; + } + udev->ninterfaces = 0; + udev->configuration = 0; + + usb_host_detach_kernel(s); + + rc = libusb_get_active_config_descriptor(s->dev, &conf); + if (rc != 0) { + if (rc == LIBUSB_ERROR_NOT_FOUND) { + /* address state - ignore */ + return USB_RET_SUCCESS; + } + return USB_RET_STALL; + } + + claimed = 0; + for (i = 0; i < USB_MAX_INTERFACES; i++) { + trace_usb_host_claim_interface(s->bus_num, s->addr, configuration, i); + rc = libusb_claim_interface(s->dh, i); + if (rc == 0) { + s->ifs[i].claimed = true; + if (++claimed == conf->bNumInterfaces) { + break; + } + } + } + if (claimed != conf->bNumInterfaces) { + return USB_RET_STALL; + } + + udev->ninterfaces = conf->bNumInterfaces; + udev->configuration = configuration; + + libusb_free_config_descriptor(conf); + return USB_RET_SUCCESS; +} + +static void usb_host_release_interfaces(USBHostDevice *s) +{ + int i, rc; + + for (i = 0; i < USB_MAX_INTERFACES; i++) { + if (!s->ifs[i].claimed) { + continue; + } + trace_usb_host_release_interface(s->bus_num, s->addr, i); + rc = libusb_release_interface(s->dh, i); + usb_host_libusb_error("libusb_release_interface", rc); + s->ifs[i].claimed = false; + } +} + +static void usb_host_set_address(USBHostDevice *s, int addr) +{ + USBDevice *udev = USB_DEVICE(s); + + trace_usb_host_set_address(s->bus_num, s->addr, addr); + udev->addr = addr; +} + +static void usb_host_set_config(USBHostDevice *s, int config, USBPacket *p) +{ + int rc = 0; + + trace_usb_host_set_config(s->bus_num, s->addr, config); + + usb_host_release_interfaces(s); + if (s->ddesc.bNumConfigurations != 1) { + rc = libusb_set_configuration(s->dh, config); + if (rc != 0) { + usb_host_libusb_error("libusb_set_configuration", rc); + p->status = USB_RET_STALL; + if (rc == LIBUSB_ERROR_NO_DEVICE) { + usb_host_nodev(s); + } + return; + } + } + p->status = usb_host_claim_interfaces(s, config); + if (p->status != USB_RET_SUCCESS) { + return; + } + usb_host_ep_update(s); +} + +static void usb_host_set_interface(USBHostDevice *s, int iface, int alt, + USBPacket *p) +{ + USBDevice *udev = USB_DEVICE(s); + int rc; + + trace_usb_host_set_interface(s->bus_num, s->addr, iface, alt); + + usb_host_iso_free_all(s); + + if (iface >= USB_MAX_INTERFACES) { + p->status = USB_RET_STALL; + return; + } + + rc = libusb_set_interface_alt_setting(s->dh, iface, alt); + if (rc != 0) { + usb_host_libusb_error("libusb_set_interface_alt_setting", rc); + p->status = USB_RET_STALL; + if (rc == LIBUSB_ERROR_NO_DEVICE) { + usb_host_nodev(s); + } + return; + } + + udev->altsetting[iface] = alt; + usb_host_ep_update(s); +} + +static void usb_host_handle_control(USBDevice *udev, USBPacket *p, + int request, int value, int index, + int length, uint8_t *data) +{ + USBHostDevice *s = USB_HOST_DEVICE(udev); + USBHostRequest *r; + int rc; + + trace_usb_host_req_control(s->bus_num, s->addr, p, request, value, index); + + if (s->dh == NULL) { + p->status = USB_RET_NODEV; + trace_usb_host_req_emulated(s->bus_num, s->addr, p, p->status); + return; + } + + switch (request) { + case DeviceOutRequest | USB_REQ_SET_ADDRESS: + usb_host_set_address(s, value); + trace_usb_host_req_emulated(s->bus_num, s->addr, p, p->status); + return; + + case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: + usb_host_set_config(s, value & 0xff, p); + trace_usb_host_req_emulated(s->bus_num, s->addr, p, p->status); + return; + + case InterfaceOutRequest | USB_REQ_SET_INTERFACE: + usb_host_set_interface(s, index, value, p); + trace_usb_host_req_emulated(s->bus_num, s->addr, p, p->status); + return; + + case EndpointOutRequest | USB_REQ_CLEAR_FEATURE: + if (value == 0) { /* clear halt */ + int pid = (index & USB_DIR_IN) ? USB_TOKEN_IN : USB_TOKEN_OUT; + libusb_clear_halt(s->dh, index); + usb_ep_set_halted(udev, pid, index & 0x0f, 0); + trace_usb_host_req_emulated(s->bus_num, s->addr, p, p->status); + return; + } + } + + r = usb_host_req_alloc(s, p, (request >> 8) & USB_DIR_IN, length + 8); + r->cbuf = data; + r->clen = length; + memcpy(r->buffer, udev->setup_buf, 8); + if (!r->in) { + memcpy(r->buffer + 8, r->cbuf, r->clen); + } + + /* Fix up USB-3 ep0 maxpacket size to allow superspeed connected devices + * to work redirected to a not superspeed capable hcd */ + if ((udev->speedmask & USB_SPEED_MASK_SUPER) && + !(udev->port->speedmask & USB_SPEED_MASK_SUPER) && + request == 0x8006 && value == 0x100 && index == 0) { + r->usb3ep0quirk = true; + } + + libusb_fill_control_transfer(r->xfer, s->dh, r->buffer, + usb_host_req_complete_ctrl, r, + CONTROL_TIMEOUT); + rc = libusb_submit_transfer(r->xfer); + if (rc != 0) { + p->status = USB_RET_NODEV; + trace_usb_host_req_complete(s->bus_num, s->addr, p, + p->status, p->actual_length); + if (rc == LIBUSB_ERROR_NO_DEVICE) { + usb_host_nodev(s); + } + return; + } + + p->status = USB_RET_ASYNC; +} + +static void usb_host_handle_data(USBDevice *udev, USBPacket *p) +{ + USBHostDevice *s = USB_HOST_DEVICE(udev); + USBHostRequest *r; + size_t size; + int ep, rc; + + if (usb_host_use_combining(p->ep) && p->state == USB_PACKET_SETUP) { + p->status = USB_RET_ADD_TO_QUEUE; + return; + } + + trace_usb_host_req_data(s->bus_num, s->addr, p, + p->pid == USB_TOKEN_IN, + p->ep->nr, p->iov.size); + + if (s->dh == NULL) { + p->status = USB_RET_NODEV; + trace_usb_host_req_emulated(s->bus_num, s->addr, p, p->status); + return; + } + if (p->ep->halted) { + p->status = USB_RET_STALL; + trace_usb_host_req_emulated(s->bus_num, s->addr, p, p->status); + return; + } + + switch (usb_ep_get_type(udev, p->pid, p->ep->nr)) { + case USB_ENDPOINT_XFER_BULK: + size = usb_packet_size(p); + r = usb_host_req_alloc(s, p, p->pid == USB_TOKEN_IN, size); + if (!r->in) { + usb_packet_copy(p, r->buffer, size); + } + ep = p->ep->nr | (r->in ? USB_DIR_IN : 0); + if (p->stream) { +#ifdef HAVE_STREAMS + libusb_fill_bulk_stream_transfer(r->xfer, s->dh, ep, p->stream, + r->buffer, size, + usb_host_req_complete_data, r, + BULK_TIMEOUT); +#else + usb_host_req_free(r); + p->status = USB_RET_STALL; + return; +#endif + } else { + libusb_fill_bulk_transfer(r->xfer, s->dh, ep, + r->buffer, size, + usb_host_req_complete_data, r, + BULK_TIMEOUT); + } + break; + case USB_ENDPOINT_XFER_INT: + r = usb_host_req_alloc(s, p, p->pid == USB_TOKEN_IN, p->iov.size); + if (!r->in) { + usb_packet_copy(p, r->buffer, p->iov.size); + } + ep = p->ep->nr | (r->in ? USB_DIR_IN : 0); + libusb_fill_interrupt_transfer(r->xfer, s->dh, ep, + r->buffer, p->iov.size, + usb_host_req_complete_data, r, + INTR_TIMEOUT); + break; + case USB_ENDPOINT_XFER_ISOC: + if (p->pid == USB_TOKEN_IN) { + usb_host_iso_data_in(s, p); + } else { + usb_host_iso_data_out(s, p); + } + trace_usb_host_req_complete(s->bus_num, s->addr, p, + p->status, p->actual_length); + return; + default: + p->status = USB_RET_STALL; + trace_usb_host_req_complete(s->bus_num, s->addr, p, + p->status, p->actual_length); + return; + } + + rc = libusb_submit_transfer(r->xfer); + if (rc != 0) { + p->status = USB_RET_NODEV; + trace_usb_host_req_complete(s->bus_num, s->addr, p, + p->status, p->actual_length); + if (rc == LIBUSB_ERROR_NO_DEVICE) { + usb_host_nodev(s); + } + return; + } + + p->status = USB_RET_ASYNC; +} + +static void usb_host_flush_ep_queue(USBDevice *dev, USBEndpoint *ep) +{ + if (usb_host_use_combining(ep)) { + usb_ep_combine_input_packets(ep); + } +} + +static void usb_host_handle_reset(USBDevice *udev) +{ + USBHostDevice *s = USB_HOST_DEVICE(udev); + int rc; + + if (!s->allow_one_guest_reset && !s->allow_all_guest_resets) { + return; + } + if (!s->allow_all_guest_resets && udev->addr == 0) { + return; + } + + trace_usb_host_reset(s->bus_num, s->addr); + + rc = libusb_reset_device(s->dh); + if (rc != 0) { + usb_host_nodev(s); + } +} + +static int usb_host_alloc_streams(USBDevice *udev, USBEndpoint **eps, + int nr_eps, int streams) +{ +#ifdef HAVE_STREAMS + USBHostDevice *s = USB_HOST_DEVICE(udev); + unsigned char endpoints[30]; + int i, rc; + + for (i = 0; i < nr_eps; i++) { + endpoints[i] = eps[i]->nr; + if (eps[i]->pid == USB_TOKEN_IN) { + endpoints[i] |= 0x80; + } + } + rc = libusb_alloc_streams(s->dh, streams, endpoints, nr_eps); + if (rc < 0) { + usb_host_libusb_error("libusb_alloc_streams", rc); + } else if (rc != streams) { + error_report("libusb_alloc_streams: got less streams " + "then requested %d < %d", rc, streams); + } + + return (rc == streams) ? 0 : -1; +#else + error_report("libusb_alloc_streams: error not implemented"); + return -1; +#endif +} + +static void usb_host_free_streams(USBDevice *udev, USBEndpoint **eps, + int nr_eps) +{ +#ifdef HAVE_STREAMS + USBHostDevice *s = USB_HOST_DEVICE(udev); + unsigned char endpoints[30]; + int i; + + for (i = 0; i < nr_eps; i++) { + endpoints[i] = eps[i]->nr; + if (eps[i]->pid == USB_TOKEN_IN) { + endpoints[i] |= 0x80; + } + } + libusb_free_streams(s->dh, endpoints, nr_eps); +#endif +} + +/* + * This is *NOT* about restoring state. We have absolutely no idea + * what state the host device is in at the moment and whenever it is + * still present in the first place. Attempting to continue where we + * left off is impossible. + * + * What we are going to do here is emulate a surprise removal of + * the usb device passed through, then kick host scan so the device + * will get re-attached (and re-initialized by the guest) in case it + * is still present. + * + * As the device removal will change the state of other devices (usb + * host controller, most likely interrupt controller too) we have to + * wait with it until *all* vmstate is loaded. Thus post_load just + * kicks a bottom half which then does the actual work. + */ +static void usb_host_post_load_bh(void *opaque) +{ + USBHostDevice *dev = opaque; + USBDevice *udev = USB_DEVICE(dev); + + if (dev->dh != NULL) { + usb_host_close(dev); + } + if (udev->attached) { + usb_device_detach(udev); + } + dev->bh_postld_pending = false; + usb_host_auto_check(NULL); +} + +static int usb_host_post_load(void *opaque, int version_id) +{ + USBHostDevice *dev = opaque; + + if (!dev->bh_postld) { + dev->bh_postld = qemu_bh_new(usb_host_post_load_bh, dev); + } + qemu_bh_schedule(dev->bh_postld); + dev->bh_postld_pending = true; + return 0; +} + +static const VMStateDescription vmstate_usb_host = { + .name = "usb-host", + .version_id = 1, + .minimum_version_id = 1, + .post_load = usb_host_post_load, + .fields = (VMStateField[]) { + VMSTATE_USB_DEVICE(parent_obj, USBHostDevice), + VMSTATE_END_OF_LIST() + } +}; + +static Property usb_host_dev_properties[] = { + DEFINE_PROP_UINT32("hostbus", USBHostDevice, match.bus_num, 0), + DEFINE_PROP_UINT32("hostaddr", USBHostDevice, match.addr, 0), + DEFINE_PROP_STRING("hostport", USBHostDevice, match.port), + DEFINE_PROP_UINT32("vendorid", USBHostDevice, match.vendor_id, 0), + DEFINE_PROP_UINT32("productid", USBHostDevice, match.product_id, 0), +#if LIBUSB_API_VERSION >= 0x01000107 + DEFINE_PROP_STRING("hostdevice", USBHostDevice, hostdevice), +#endif + DEFINE_PROP_UINT32("isobufs", USBHostDevice, iso_urb_count, 4), + DEFINE_PROP_UINT32("isobsize", USBHostDevice, iso_urb_frames, 32), + DEFINE_PROP_BOOL("guest-reset", USBHostDevice, + allow_one_guest_reset, true), + DEFINE_PROP_BOOL("guest-resets-all", USBHostDevice, + allow_all_guest_resets, false), + DEFINE_PROP_UINT32("loglevel", USBHostDevice, loglevel, + LIBUSB_LOG_LEVEL_WARNING), + DEFINE_PROP_BIT("pipeline", USBHostDevice, options, + USB_HOST_OPT_PIPELINE, true), + DEFINE_PROP_BOOL("suppress-remote-wake", USBHostDevice, + suppress_remote_wake, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usb_host_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->realize = usb_host_realize; + uc->product_desc = "USB Host Device"; + uc->cancel_packet = usb_host_cancel_packet; + uc->handle_data = usb_host_handle_data; + uc->handle_control = usb_host_handle_control; + uc->handle_reset = usb_host_handle_reset; + uc->unrealize = usb_host_unrealize; + uc->flush_ep_queue = usb_host_flush_ep_queue; + uc->alloc_streams = usb_host_alloc_streams; + uc->free_streams = usb_host_free_streams; + dc->vmsd = &vmstate_usb_host; + device_class_set_props(dc, usb_host_dev_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static TypeInfo usb_host_dev_info = { + .name = TYPE_USB_HOST_DEVICE, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBHostDevice), + .class_init = usb_host_class_initfn, + .instance_init = usb_host_instance_init, +}; +module_obj(TYPE_USB_HOST_DEVICE); + +static void usb_host_register_types(void) +{ + type_register_static(&usb_host_dev_info); + monitor_register_hmp("usbhost", true, hmp_info_usbhost); +} + +type_init(usb_host_register_types) + +/* ------------------------------------------------------------------------ */ + +static QEMUTimer *usb_auto_timer; +static VMChangeStateEntry *usb_vmstate; + +static void usb_host_vm_state(void *unused, bool running, RunState state) +{ + if (running) { + usb_host_auto_check(unused); + } +} + +static void usb_host_auto_check(void *unused) +{ + struct USBHostDevice *s; + struct USBAutoFilter *f; + libusb_device **devs = NULL; + struct libusb_device_descriptor ddesc; + int unconnected = 0; + int i, n; + + if (usb_host_init() != 0) { + return; + } + + if (runstate_is_running()) { + n = libusb_get_device_list(ctx, &devs); + for (i = 0; i < n; i++) { + if (libusb_get_device_descriptor(devs[i], &ddesc) != 0) { + continue; + } + if (ddesc.bDeviceClass == LIBUSB_CLASS_HUB) { + continue; + } + QTAILQ_FOREACH(s, &hostdevs, next) { + f = &s->match; + if (f->bus_num > 0 && + f->bus_num != libusb_get_bus_number(devs[i])) { + continue; + } + if (f->addr > 0 && + f->addr != libusb_get_device_address(devs[i])) { + continue; + } + if (f->port != NULL) { + char port[16] = "-"; + usb_host_get_port(devs[i], port, sizeof(port)); + if (strcmp(f->port, port) != 0) { + continue; + } + } + if (f->vendor_id > 0 && + f->vendor_id != ddesc.idVendor) { + continue; + } + if (f->product_id > 0 && + f->product_id != ddesc.idProduct) { + continue; + } + + /* We got a match */ + s->seen++; + if (s->errcount >= 3) { + continue; + } + if (s->dh != NULL) { + continue; + } + if (usb_host_open(s, devs[i], 0) < 0) { + s->errcount++; + continue; + } + break; + } + } + libusb_free_device_list(devs, 1); + + QTAILQ_FOREACH(s, &hostdevs, next) { + if (s->dh == NULL) { + unconnected++; + } + if (s->seen == 0) { + if (s->dh) { + usb_host_close(s); + } + s->errcount = 0; + } + s->seen = 0; + } + +#if 0 + if (unconnected == 0) { + /* nothing to watch */ + if (usb_auto_timer) { + timer_del(usb_auto_timer); + trace_usb_host_auto_scan_disabled(); + } + return; + } +#endif + } + + if (!usb_vmstate) { + usb_vmstate = qemu_add_vm_change_state_handler(usb_host_vm_state, NULL); + } + if (!usb_auto_timer) { + usb_auto_timer = timer_new_ms(QEMU_CLOCK_REALTIME, usb_host_auto_check, NULL); + if (!usb_auto_timer) { + return; + } + trace_usb_host_auto_scan_enabled(); + } + timer_mod(usb_auto_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 2000); +} + +void hmp_info_usbhost(Monitor *mon, const QDict *qdict) +{ + libusb_device **devs = NULL; + struct libusb_device_descriptor ddesc; + char port[16]; + int i, n; + + if (usb_host_init() != 0) { + return; + } + + n = libusb_get_device_list(ctx, &devs); + for (i = 0; i < n; i++) { + if (libusb_get_device_descriptor(devs[i], &ddesc) != 0) { + continue; + } + if (ddesc.bDeviceClass == LIBUSB_CLASS_HUB) { + continue; + } + usb_host_get_port(devs[i], port, sizeof(port)); + monitor_printf(mon, " Bus %d, Addr %d, Port %s, Speed %s Mb/s\n", + libusb_get_bus_number(devs[i]), + libusb_get_device_address(devs[i]), + port, + speed_name[libusb_get_device_speed(devs[i])]); + monitor_printf(mon, " Class %02x:", ddesc.bDeviceClass); + monitor_printf(mon, " USB device %04x:%04x", + ddesc.idVendor, ddesc.idProduct); + if (ddesc.iProduct) { + libusb_device_handle *handle; + if (libusb_open(devs[i], &handle) == 0) { + unsigned char name[64] = ""; + libusb_get_string_descriptor_ascii(handle, + ddesc.iProduct, + name, sizeof(name)); + libusb_close(handle); + monitor_printf(mon, ", %s", name); + } + } + monitor_printf(mon, "\n"); + } + libusb_free_device_list(devs, 1); +} diff --git a/hw/usb/host.h b/hw/usb/host.h new file mode 100644 index 000000000..048ff3b48 --- /dev/null +++ b/hw/usb/host.h @@ -0,0 +1,44 @@ +/* + * Linux host USB redirector + * + * Copyright (c) 2005 Fabrice Bellard + * + * Copyright (c) 2008 Max Krasnyansky + * Support for host device auto connect & disconnect + * Major rewrite to support fully async operation + * + * Copyright 2008 TJ + * Added flexible support for /dev/bus/usb /sys/bus/usb/devices in addition + * to the legacy /proc/bus/usb USB device discovery and handling + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_USB_HOST_H +#define QEMU_USB_HOST_H + +struct USBAutoFilter { + uint32_t bus_num; + uint32_t addr; + char *port; + uint32_t vendor_id; + uint32_t product_id; +}; + +#endif /* QEMU_USB_HOST_H */ diff --git a/hw/usb/imx-usb-phy.c b/hw/usb/imx-usb-phy.c new file mode 100644 index 000000000..5d7a549e3 --- /dev/null +++ b/hw/usb/imx-usb-phy.c @@ -0,0 +1,224 @@ +/* + * i.MX USB PHY + * + * Copyright (c) 2020 Guenter Roeck + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * We need to implement basic reset control in the PHY control register. + * For everything else, it is sufficient to set whatever is written. + */ + +#include "qemu/osdep.h" +#include "hw/usb/imx-usb-phy.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +static const VMStateDescription vmstate_imx_usbphy = { + .name = TYPE_IMX_USBPHY, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(usbphy, IMXUSBPHYState, USBPHY_MAX), + VMSTATE_END_OF_LIST() + }, +}; + +static void imx_usbphy_softreset(IMXUSBPHYState *s) +{ + s->usbphy[USBPHY_PWD] = 0x001e1c00; + s->usbphy[USBPHY_TX] = 0x10060607; + s->usbphy[USBPHY_RX] = 0x00000000; + s->usbphy[USBPHY_CTRL] = 0xc0200000; +} + +static void imx_usbphy_reset(DeviceState *dev) +{ + IMXUSBPHYState *s = IMX_USBPHY(dev); + + s->usbphy[USBPHY_STATUS] = 0x00000000; + s->usbphy[USBPHY_DEBUG] = 0x7f180000; + s->usbphy[USBPHY_DEBUG0_STATUS] = 0x00000000; + s->usbphy[USBPHY_DEBUG1] = 0x00001000; + s->usbphy[USBPHY_VERSION] = 0x04020000; + + imx_usbphy_softreset(s); +} + +static uint64_t imx_usbphy_read(void *opaque, hwaddr offset, unsigned size) +{ + IMXUSBPHYState *s = (IMXUSBPHYState *)opaque; + uint32_t index = offset >> 2; + uint32_t value; + + switch (index) { + case USBPHY_PWD_SET: + case USBPHY_TX_SET: + case USBPHY_RX_SET: + case USBPHY_CTRL_SET: + case USBPHY_DEBUG_SET: + case USBPHY_DEBUG1_SET: + /* + * All REG_NAME_SET register access are in fact targeting the + * REG_NAME register. + */ + value = s->usbphy[index - 1]; + break; + case USBPHY_PWD_CLR: + case USBPHY_TX_CLR: + case USBPHY_RX_CLR: + case USBPHY_CTRL_CLR: + case USBPHY_DEBUG_CLR: + case USBPHY_DEBUG1_CLR: + /* + * All REG_NAME_CLR register access are in fact targeting the + * REG_NAME register. + */ + value = s->usbphy[index - 2]; + break; + case USBPHY_PWD_TOG: + case USBPHY_TX_TOG: + case USBPHY_RX_TOG: + case USBPHY_CTRL_TOG: + case USBPHY_DEBUG_TOG: + case USBPHY_DEBUG1_TOG: + /* + * All REG_NAME_TOG register access are in fact targeting the + * REG_NAME register. + */ + value = s->usbphy[index - 3]; + break; + default: + value = s->usbphy[index]; + break; + } + return (uint64_t)value; +} + +static void imx_usbphy_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + IMXUSBPHYState *s = (IMXUSBPHYState *)opaque; + uint32_t index = offset >> 2; + + switch (index) { + case USBPHY_CTRL: + s->usbphy[index] = value; + if (value & USBPHY_CTRL_SFTRST) { + imx_usbphy_softreset(s); + } + break; + case USBPHY_PWD: + case USBPHY_TX: + case USBPHY_RX: + case USBPHY_STATUS: + case USBPHY_DEBUG: + case USBPHY_DEBUG1: + s->usbphy[index] = value; + break; + case USBPHY_CTRL_SET: + s->usbphy[index - 1] |= value; + if (value & USBPHY_CTRL_SFTRST) { + imx_usbphy_softreset(s); + } + break; + case USBPHY_PWD_SET: + case USBPHY_TX_SET: + case USBPHY_RX_SET: + case USBPHY_DEBUG_SET: + case USBPHY_DEBUG1_SET: + /* + * All REG_NAME_SET register access are in fact targeting the + * REG_NAME register. So we change the value of the REG_NAME + * register, setting bits passed in the value. + */ + s->usbphy[index - 1] |= value; + break; + case USBPHY_PWD_CLR: + case USBPHY_TX_CLR: + case USBPHY_RX_CLR: + case USBPHY_CTRL_CLR: + case USBPHY_DEBUG_CLR: + case USBPHY_DEBUG1_CLR: + /* + * All REG_NAME_CLR register access are in fact targeting the + * REG_NAME register. So we change the value of the REG_NAME + * register, unsetting bits passed in the value. + */ + s->usbphy[index - 2] &= ~value; + break; + case USBPHY_CTRL_TOG: + s->usbphy[index - 3] ^= value; + if ((value & USBPHY_CTRL_SFTRST) && + (s->usbphy[index - 3] & USBPHY_CTRL_SFTRST)) { + imx_usbphy_softreset(s); + } + break; + case USBPHY_PWD_TOG: + case USBPHY_TX_TOG: + case USBPHY_RX_TOG: + case USBPHY_DEBUG_TOG: + case USBPHY_DEBUG1_TOG: + /* + * All REG_NAME_TOG register access are in fact targeting the + * REG_NAME register. So we change the value of the REG_NAME + * register, toggling bits passed in the value. + */ + s->usbphy[index - 3] ^= value; + break; + default: + /* Other registers are read-only */ + break; + } +} + +static const struct MemoryRegionOps imx_usbphy_ops = { + .read = imx_usbphy_read, + .write = imx_usbphy_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the real + * device but in practice there is no reason for a guest to access + * this device unaligned. + */ + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static void imx_usbphy_realize(DeviceState *dev, Error **errp) +{ + IMXUSBPHYState *s = IMX_USBPHY(dev); + + memory_region_init_io(&s->iomem, OBJECT(s), &imx_usbphy_ops, s, + "imx-usbphy", 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->iomem); +} + +static void imx_usbphy_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = imx_usbphy_reset; + dc->vmsd = &vmstate_imx_usbphy; + dc->desc = "i.MX USB PHY Module"; + dc->realize = imx_usbphy_realize; +} + +static const TypeInfo imx_usbphy_info = { + .name = TYPE_IMX_USBPHY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMXUSBPHYState), + .class_init = imx_usbphy_class_init, +}; + +static void imx_usbphy_register_types(void) +{ + type_register_static(&imx_usbphy_info); +} + +type_init(imx_usbphy_register_types) diff --git a/hw/usb/libhw.c b/hw/usb/libhw.c new file mode 100644 index 000000000..9c33a1640 --- /dev/null +++ b/hw/usb/libhw.c @@ -0,0 +1,69 @@ +/* + * QEMU USB emulation, libhw bits. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "hw/usb.h" +#include "sysemu/dma.h" + +int usb_packet_map(USBPacket *p, QEMUSGList *sgl) +{ + DMADirection dir = (p->pid == USB_TOKEN_IN) ? + DMA_DIRECTION_FROM_DEVICE : DMA_DIRECTION_TO_DEVICE; + void *mem; + int i; + + for (i = 0; i < sgl->nsg; i++) { + dma_addr_t base = sgl->sg[i].base; + dma_addr_t len = sgl->sg[i].len; + + while (len) { + dma_addr_t xlen = len; + mem = dma_memory_map(sgl->as, base, &xlen, dir); + if (!mem) { + goto err; + } + if (xlen > len) { + xlen = len; + } + qemu_iovec_add(&p->iov, mem, xlen); + len -= xlen; + base += xlen; + } + } + return 0; + +err: + usb_packet_unmap(p, sgl); + return -1; +} + +void usb_packet_unmap(USBPacket *p, QEMUSGList *sgl) +{ + DMADirection dir = (p->pid == USB_TOKEN_IN) ? + DMA_DIRECTION_FROM_DEVICE : DMA_DIRECTION_TO_DEVICE; + int i; + + for (i = 0; i < p->iov.niov; i++) { + dma_memory_unmap(sgl->as, p->iov.iov[i].iov_base, + p->iov.iov[i].iov_len, dir, + p->iov.iov[i].iov_len); + } +} diff --git a/hw/usb/meson.build b/hw/usb/meson.build new file mode 100644 index 000000000..de853d780 --- /dev/null +++ b/hw/usb/meson.build @@ -0,0 +1,84 @@ +hw_usb_modules = {} + +# usb subsystem core +softmmu_ss.add(when: 'CONFIG_USB', if_true: files( + 'bus.c', + 'combined-packet.c', + 'core.c', + 'desc.c', + 'desc-msos.c', + 'libhw.c', + 'pcap.c', +)) + +# usb host adapters +softmmu_ss.add(when: 'CONFIG_USB_UHCI', if_true: files('hcd-uhci.c')) +softmmu_ss.add(when: 'CONFIG_USB_OHCI', if_true: files('hcd-ohci.c')) +softmmu_ss.add(when: 'CONFIG_USB_OHCI_PCI', if_true: files('hcd-ohci-pci.c')) +softmmu_ss.add(when: 'CONFIG_USB_EHCI', if_true: files('hcd-ehci.c')) +softmmu_ss.add(when: 'CONFIG_USB_EHCI_PCI', if_true: files('hcd-ehci-pci.c')) +softmmu_ss.add(when: 'CONFIG_USB_EHCI_SYSBUS', if_true: files('hcd-ehci.c', 'hcd-ehci-sysbus.c')) +softmmu_ss.add(when: 'CONFIG_USB_XHCI', if_true: files('hcd-xhci.c')) +softmmu_ss.add(when: 'CONFIG_USB_XHCI_PCI', if_true: files('hcd-xhci-pci.c')) +softmmu_ss.add(when: 'CONFIG_USB_XHCI_SYSBUS', if_true: files('hcd-xhci-sysbus.c')) +softmmu_ss.add(when: 'CONFIG_USB_XHCI_NEC', if_true: files('hcd-xhci-nec.c')) +softmmu_ss.add(when: 'CONFIG_USB_MUSB', if_true: files('hcd-musb.c')) +softmmu_ss.add(when: 'CONFIG_USB_DWC2', if_true: files('hcd-dwc2.c')) +softmmu_ss.add(when: 'CONFIG_USB_DWC3', if_true: files('hcd-dwc3.c')) + +softmmu_ss.add(when: 'CONFIG_TUSB6010', if_true: files('tusb6010.c')) +softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('chipidea.c')) +softmmu_ss.add(when: 'CONFIG_IMX_USBPHY', if_true: files('imx-usb-phy.c')) +softmmu_ss.add(when: 'CONFIG_VT82C686', if_true: files('vt82c686-uhci-pci.c')) +specific_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-usb2-ctrl-regs.c')) +specific_ss.add(when: 'CONFIG_XLNX_USB_SUBSYS', if_true: files('xlnx-usb-subsystem.c')) + +# emulated usb devices +softmmu_ss.add(when: 'CONFIG_USB', if_true: files('dev-hub.c')) +softmmu_ss.add(when: 'CONFIG_USB', if_true: files('dev-hid.c')) +softmmu_ss.add(when: 'CONFIG_USB_TABLET_WACOM', if_true: files('dev-wacom.c')) +softmmu_ss.add(when: 'CONFIG_USB_STORAGE_CORE', if_true: files('dev-storage.c')) +softmmu_ss.add(when: 'CONFIG_USB_STORAGE_BOT', if_true: files('dev-storage-bot.c')) +softmmu_ss.add(when: 'CONFIG_USB_STORAGE_CLASSIC', if_true: files('dev-storage-classic.c')) +softmmu_ss.add(when: 'CONFIG_USB_STORAGE_UAS', if_true: files('dev-uas.c')) +softmmu_ss.add(when: 'CONFIG_USB_AUDIO', if_true: files('dev-audio.c')) +softmmu_ss.add(when: 'CONFIG_USB_SERIAL', if_true: files('dev-serial.c')) +softmmu_ss.add(when: 'CONFIG_USB_NETWORK', if_true: files('dev-network.c')) +softmmu_ss.add(when: ['CONFIG_POSIX', 'CONFIG_USB_STORAGE_MTP'], if_true: files('dev-mtp.c')) + +# smartcard +softmmu_ss.add(when: 'CONFIG_USB_SMARTCARD', if_true: files('dev-smartcard-reader.c')) + +if cacard.found() + usbsmartcard_ss = ss.source_set() + usbsmartcard_ss.add(when: 'CONFIG_USB_SMARTCARD', + if_true: [cacard, files('ccid-card-emulated.c', 'ccid-card-passthru.c')]) + hw_usb_modules += {'smartcard': usbsmartcard_ss} +endif + +# U2F +softmmu_ss.add(when: 'CONFIG_USB_U2F', if_true: files('u2f.c')) +softmmu_ss.add(when: ['CONFIG_LINUX', 'CONFIG_USB_U2F'], if_true: [libudev, files('u2f-passthru.c')]) +if u2f.found() + softmmu_ss.add(when: 'CONFIG_USB_U2F', if_true: [u2f, files('u2f-emulated.c')]) +endif + +# usb redirect +if usbredir.found() + usbredir_ss = ss.source_set() + usbredir_ss.add(when: 'CONFIG_USB', + if_true: [usbredir, files('redirect.c', 'quirks.c')]) + hw_usb_modules += {'redirect': usbredir_ss} +endif + +# usb pass-through +if libusb.found() + usbhost_ss = ss.source_set() + usbhost_ss.add(when: ['CONFIG_USB', libusb], + if_true: files('host-libusb.c')) + hw_usb_modules += {'host': usbhost_ss} +endif + +softmmu_ss.add(when: ['CONFIG_USB', 'CONFIG_XEN', libusb], if_true: files('xen-usb.c')) + +modules += { 'hw-usb': hw_usb_modules } diff --git a/hw/usb/pcap.c b/hw/usb/pcap.c new file mode 100644 index 000000000..dbff00be2 --- /dev/null +++ b/hw/usb/pcap.c @@ -0,0 +1,253 @@ +/* + * usb packet capture + * + * Copyright (c) 2021 Gerd Hoffmann + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/usb.h" + +#define PCAP_MAGIC 0xa1b2c3d4 +#define PCAP_MAJOR 2 +#define PCAP_MINOR 4 + +/* https://wiki.wireshark.org/Development/LibpcapFileFormat */ + +struct pcap_hdr { + uint32_t magic_number; /* magic number */ + uint16_t version_major; /* major version number */ + uint16_t version_minor; /* minor version number */ + int32_t thiszone; /* GMT to local correction */ + uint32_t sigfigs; /* accuracy of timestamps */ + uint32_t snaplen; /* max length of captured packets, in octets */ + uint32_t network; /* data link type */ +}; + +struct pcaprec_hdr { + uint32_t ts_sec; /* timestamp seconds */ + uint32_t ts_usec; /* timestamp microseconds */ + uint32_t incl_len; /* number of octets of packet saved in file */ + uint32_t orig_len; /* actual length of packet */ +}; + +/* https://www.tcpdump.org/linktypes.html */ +/* linux: Documentation/usb/usbmon.rst */ +/* linux: drivers/usb/mon/mon_bin.c */ + +#define LINKTYPE_USB_LINUX 189 /* first 48 bytes only */ +#define LINKTYPE_USB_LINUX_MMAPPED 220 /* full 64 byte header */ + +struct usbmon_packet { + uint64_t id; /* 0: URB ID - from submission to callback */ + unsigned char type; /* 8: Same as text; extensible. */ + unsigned char xfer_type; /* ISO (0), Intr, Control, Bulk (3) */ + unsigned char epnum; /* Endpoint number and transfer direction */ + unsigned char devnum; /* Device address */ + uint16_t busnum; /* 12: Bus number */ + char flag_setup; /* 14: Same as text */ + char flag_data; /* 15: Same as text; Binary zero is OK. */ + int64_t ts_sec; /* 16: gettimeofday */ + int32_t ts_usec; /* 24: gettimeofday */ + int32_t status; /* 28: */ + unsigned int length; /* 32: Length of data (submitted or actual) */ + unsigned int len_cap; /* 36: Delivered length */ + union { /* 40: */ + unsigned char setup[8]; /* Only for Control S-type */ + struct iso_rec { /* Only for ISO */ + int32_t error_count; + int32_t numdesc; + } iso; + } s; + int32_t interval; /* 48: Only for Interrupt and ISO */ + int32_t start_frame; /* 52: For ISO */ + uint32_t xfer_flags; /* 56: copy of URB's transfer_flags */ + uint32_t ndesc; /* 60: Actual number of ISO descriptors */ +}; /* 64 total length */ + +/* ------------------------------------------------------------------------ */ + +#define CTRL_LEN 4096 +#define DATA_LEN 256 + +static int usbmon_status(USBPacket *p) +{ + switch (p->status) { + case USB_RET_SUCCESS: + return 0; + case USB_RET_NODEV: + return -19; /* -ENODEV */ + default: + return -121; /* -EREMOTEIO */ + } +} + +static unsigned int usbmon_epnum(USBPacket *p) +{ + unsigned epnum = 0; + + epnum |= p->ep->nr; + epnum |= (p->pid == USB_TOKEN_IN) ? 0x80 : 0; + return epnum; +} + +static unsigned char usbmon_xfer_type[] = { + [USB_ENDPOINT_XFER_CONTROL] = 2, + [USB_ENDPOINT_XFER_ISOC] = 0, + [USB_ENDPOINT_XFER_BULK] = 3, + [USB_ENDPOINT_XFER_INT] = 1, +}; + +static void do_usb_pcap_header(FILE *fp, struct usbmon_packet *packet) +{ + struct pcaprec_hdr header; + struct timeval tv; + + gettimeofday(&tv, NULL); + packet->ts_sec = tv.tv_sec; + packet->ts_usec = tv.tv_usec; + + header.ts_sec = packet->ts_sec; + header.ts_usec = packet->ts_usec; + header.incl_len = packet->len_cap; + header.orig_len = packet->length + sizeof(*packet); + fwrite(&header, sizeof(header), 1, fp); + fwrite(packet, sizeof(*packet), 1, fp); +} + +static void do_usb_pcap_ctrl(FILE *fp, USBPacket *p, bool setup) +{ + USBDevice *dev = p->ep->dev; + bool in = dev->setup_buf[0] & USB_DIR_IN; + struct usbmon_packet packet = { + .id = 0, + .type = setup ? 'S' : 'C', + .xfer_type = usbmon_xfer_type[USB_ENDPOINT_XFER_CONTROL], + .epnum = in ? 0x80 : 0, + .devnum = dev->addr, + .flag_setup = setup ? 0 : '-', + .flag_data = '=', + .length = dev->setup_len, + }; + int data_len = dev->setup_len; + + if (data_len > CTRL_LEN) { + data_len = CTRL_LEN; + } + if (setup) { + memcpy(packet.s.setup, dev->setup_buf, 8); + } else { + packet.status = usbmon_status(p); + } + + if (in && setup) { + packet.flag_data = '<'; + packet.length = 0; + data_len = 0; + } + if (!in && !setup) { + packet.flag_data = '>'; + packet.length = 0; + data_len = 0; + } + + packet.len_cap = data_len + sizeof(packet); + do_usb_pcap_header(fp, &packet); + if (data_len) { + fwrite(dev->data_buf, data_len, 1, fp); + } + + fflush(fp); +} + +static void do_usb_pcap_data(FILE *fp, USBPacket *p, bool setup) +{ + struct usbmon_packet packet = { + .id = p->id, + .type = setup ? 'S' : 'C', + .xfer_type = usbmon_xfer_type[p->ep->type], + .epnum = usbmon_epnum(p), + .devnum = p->ep->dev->addr, + .flag_setup = '-', + .flag_data = '=', + .length = p->iov.size, + }; + int data_len = p->iov.size; + + if (p->ep->nr == 0) { + /* ignore control pipe packets */ + return; + } + + if (data_len > DATA_LEN) { + data_len = DATA_LEN; + } + if (!setup) { + packet.status = usbmon_status(p); + if (packet.length > p->actual_length) { + packet.length = p->actual_length; + } + if (data_len > p->actual_length) { + data_len = p->actual_length; + } + } + + if (p->pid == USB_TOKEN_IN && setup) { + packet.flag_data = '<'; + packet.length = 0; + data_len = 0; + } + if (p->pid == USB_TOKEN_OUT && !setup) { + packet.flag_data = '>'; + packet.length = 0; + data_len = 0; + } + + packet.len_cap = data_len + sizeof(packet); + do_usb_pcap_header(fp, &packet); + if (data_len) { + void *buf = g_malloc(data_len); + iov_to_buf(p->iov.iov, p->iov.niov, 0, buf, data_len); + fwrite(buf, data_len, 1, fp); + g_free(buf); + } + + fflush(fp); +} + +void usb_pcap_init(FILE *fp) +{ + struct pcap_hdr header = { + .magic_number = PCAP_MAGIC, + .version_major = 2, + .version_minor = 4, + .snaplen = MAX(CTRL_LEN, DATA_LEN) + sizeof(struct usbmon_packet), + .network = LINKTYPE_USB_LINUX_MMAPPED, + }; + + fwrite(&header, sizeof(header), 1, fp); +} + +void usb_pcap_ctrl(USBPacket *p, bool setup) +{ + FILE *fp = p->ep->dev->pcap; + + if (!fp) { + return; + } + + do_usb_pcap_ctrl(fp, p, setup); +} + +void usb_pcap_data(USBPacket *p, bool setup) +{ + FILE *fp = p->ep->dev->pcap; + + if (!fp) { + return; + } + + do_usb_pcap_data(fp, p, setup); +} diff --git a/hw/usb/quirks-ftdi-ids.h b/hw/usb/quirks-ftdi-ids.h new file mode 100644 index 000000000..f3cb157d6 --- /dev/null +++ b/hw/usb/quirks-ftdi-ids.h @@ -0,0 +1,1249 @@ +/* + * vendor/product IDs (VID/PID) of devices using FTDI USB serial converters. + * Please keep numerically sorted within individual areas, thanks! + * + * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais + * from Rudolf Gugler + * + */ + + +/**********************************/ +/***** devices using FTDI VID *****/ +/**********************************/ + + +#define FTDI_VID 0x0403 /* Vendor Id */ + + +/*** "original" FTDI device PIDs ***/ + +#define FTDI_8U232AM_PID 0x6001 /* Similar device to SIO above */ +#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */ +#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */ +#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */ +#define FTDI_232H_PID 0x6014 /* Single channel hi-speed device */ +#define FTDI_FTX_PID 0x6015 /* FT-X series (FT201X, FT230X, FT231X, etc) */ +#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */ +#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */ + + +/*** third-party PIDs (using FTDI_VID) ***/ + +#define FTDI_LUMEL_PD12_PID 0x6002 + +/* + * Marvell OpenRD Base, Client + * http://www.open-rd.org + * OpenRD Base, Client use VID 0x0403 + */ +#define MARVELL_OPENRD_PID 0x9e90 + +/* www.candapter.com Ewert Energy Systems CANdapter device */ +#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ + +/* + * Texas Instruments XDS100v2 JTAG / BeagleBone A3 + * http://processors.wiki.ti.com/index.php/XDS100 + * http://beagleboard.org/bone + */ +#define TI_XDS100V2_PID 0xa6d0 + +#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ + +/* US Interface Navigator (http://www.usinterface.com/) */ +#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */ +#define FTDI_USINT_WKEY_PID 0xb811 /* Navigator WKEY and FSK lines */ +#define FTDI_USINT_RS232_PID 0xb812 /* Navigator RS232 and CONFIG lines */ + +/* OOCDlink by Joern Kaipf + * (http://www.joernonline.de/) */ +#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ + +/* Luminary Micro Stellaris Boards, VID = FTDI_VID */ +/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */ +#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8 +#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9 +#define LMI_LM3S_ICDI_BOARD_PID 0xbcda + +#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */ + +/* OpenDCC (www.opendcc.de) product id */ +#define FTDI_OPENDCC_PID 0xBFD8 +#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9 +#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA +#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB +#define FTDI_OPENDCC_GBM_PID 0xBFDC + +/* NZR SEM 16+ USB (http://www.nzr.de) */ +#define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */ + +/* + * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com) + */ +#define FTDI_RRCIRKITS_LOCOBUFFER_PID 0xc7d0 /* LocoBuffer USB */ + +/* DMX4ALL DMX Interfaces */ +#define FTDI_DMX4ALL 0xC850 + +/* + * ASK.fr devices + */ +#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */ + +/* www.starting-point-systems.com µChameleon device */ +#define FTDI_MICRO_CHAMELEON_PID 0xCAA0 /* Product Id */ + +/* + * Tactrix OpenPort (ECU) devices. + * OpenPort 1.3M submitted by Donour Sizemore. + * OpenPort 1.3S and 1.3U submitted by Ian Abbott. + */ +#define FTDI_TACTRIX_OPENPORT_13M_PID 0xCC48 /* OpenPort 1.3 Mitsubishi */ +#define FTDI_TACTRIX_OPENPORT_13S_PID 0xCC49 /* OpenPort 1.3 Subaru */ +#define FTDI_TACTRIX_OPENPORT_13U_PID 0xCC4A /* OpenPort 1.3 Universal */ + +#define FTDI_DISTORTEC_JTAG_LOCK_PICK_PID 0xCFF8 + +/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */ +/* the VID is the standard ftdi vid (FTDI_VID) */ +#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */ +#define FTDI_SCS_DEVICE_1_PID 0xD011 /* SCS Tracker / DSP TNC */ +#define FTDI_SCS_DEVICE_2_PID 0xD012 +#define FTDI_SCS_DEVICE_3_PID 0xD013 +#define FTDI_SCS_DEVICE_4_PID 0xD014 +#define FTDI_SCS_DEVICE_5_PID 0xD015 +#define FTDI_SCS_DEVICE_6_PID 0xD016 +#define FTDI_SCS_DEVICE_7_PID 0xD017 + +/* iPlus device */ +#define FTDI_IPLUS_PID 0xD070 /* Product Id */ +#define FTDI_IPLUS2_PID 0xD071 /* Product Id */ + +/* + * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com. + */ +#define FTDI_GAMMA_SCOUT_PID 0xD678 /* Gamma Scout online */ + +/* Propox devices */ +#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 +#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739 + +/* Lenz LI-USB Computer Interface. */ +#define FTDI_LENZ_LIUSB_PID 0xD780 + +/* Vardaan Enterprises Serial Interface VEUSB422R3 */ +#define FTDI_VARDAAN_PID 0xF070 + +/* + * Xsens Technologies BV products (http://www.xsens.com). + */ +#define XSENS_CONVERTER_0_PID 0xD388 +#define XSENS_CONVERTER_1_PID 0xD389 +#define XSENS_CONVERTER_2_PID 0xD38A +#define XSENS_CONVERTER_3_PID 0xD38B +#define XSENS_CONVERTER_4_PID 0xD38C +#define XSENS_CONVERTER_5_PID 0xD38D +#define XSENS_CONVERTER_6_PID 0xD38E +#define XSENS_CONVERTER_7_PID 0xD38F + +/* + * NDI (www.ndigital.com) product ids + */ +#define FTDI_NDI_HUC_PID 0xDA70 /* NDI Host USB Converter */ +#define FTDI_NDI_SPECTRA_SCU_PID 0xDA71 /* NDI Spectra SCU */ +#define FTDI_NDI_FUTURE_2_PID 0xDA72 /* NDI future device #2 */ +#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */ +#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ + +/* + * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs + */ +#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8 +#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9 +#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA +#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB +#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC +#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD +#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE +#define FTDI_CHAMSYS_WING_PID 0xDAFF + +/* + * Westrex International devices submitted by Cory Lee + */ +#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */ +#define FTDI_WESTREX_MODEL_8900F_PID 0xDC01 /* Model 8900F */ + +/* + * ACG Identification Technologies GmbH products (http://www.acg.de/). + * Submitted by anton -at- goto10 -dot- org. + */ +#define FTDI_ACG_HFDUAL_PID 0xDD20 /* HF Dual ISO Reader (RFID) */ + +/* + * Definitions for Artemis astronomical USB based cameras + * Check it at http://www.artemisccd.co.uk/ + */ +#define FTDI_ARTEMIS_PID 0xDF28 /* All Artemis Cameras */ + +/* + * Definitions for ATIK Instruments astronomical USB based cameras + * Check it at http://www.atik-instruments.com/ + */ +#define FTDI_ATIK_ATK16_PID 0xDF30 /* ATIK ATK-16 Grayscale Camera */ +#define FTDI_ATIK_ATK16C_PID 0xDF32 /* ATIK ATK-16C Colour Camera */ +#define FTDI_ATIK_ATK16HR_PID 0xDF31 /* ATIK ATK-16HR Grayscale Camera */ +#define FTDI_ATIK_ATK16HRC_PID 0xDF33 /* ATIK ATK-16HRC Colour Camera */ +#define FTDI_ATIK_ATK16IC_PID 0xDF35 /* ATIK ATK-16IC Grayscale Camera */ + +/* + * Yost Engineering, Inc. products (www.yostengineering.com). + * PID 0xE050 submitted by Aaron Prose. + */ +#define FTDI_YEI_SERVOCENTER31_PID 0xE050 /* YEI ServoCenter3.1 USB */ + +/* + * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). + * All of these devices use FTDI's vendor ID (0x0403). + * Further IDs taken from ELV Windows .inf file. + * + * The previously included PID for the UO 100 module was incorrect. + * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). + * + * Armin Laeuger originally sent the PID for the UM 100 module. + */ +#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ +#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ +#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ +#define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */ +#define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */ +#define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */ +#define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */ +#define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */ +#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ +#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ +#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ +#define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */ +#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */ +#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */ +#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */ +#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Energy monitor EM 1010 PC */ +#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ +#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ +#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ +#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ +#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ +#define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */ +#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ +#define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */ +#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ +#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ +#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ +#define FTDI_ELV_USI2_PID 0xF06A /* USB-Schrittmotoren-Interface (USI 2) */ +#define FTDI_ELV_T1100_PID 0xF06B /* Thermometer (T 1100) */ +#define FTDI_ELV_PCD200_PID 0xF06C /* PC-Datenlogger (PCD 200) */ +#define FTDI_ELV_ULA200_PID 0xF06D /* USB-LCD-Ansteuerung (ULA 200) */ +#define FTDI_ELV_ALC8500_PID 0xF06E /* ALC 8500 Expert */ +#define FTDI_ELV_FHZ1000PC_PID 0xF06F /* FHZ 1000 PC */ +#define FTDI_ELV_UR100_PID 0xFB58 /* USB-RS232-Umsetzer (UR 100) */ +#define FTDI_ELV_UM100_PID 0xFB5A /* USB-Modul UM 100 */ +#define FTDI_ELV_UO100_PID 0xFB5B /* USB-Modul UO 100 */ +/* Additional ELV PIDs that default to using the FTDI D2XX drivers on + * MS Windows, rather than the FTDI Virtual Com Port drivers. + * Maybe these will be easier to use with the libftdi/libusb user-space + * drivers, or possibly the Comedi drivers in some cases. */ +#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */ +#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */ +#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperatur-Feuchte-Messgeraet (TFM 100) */ +#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkuhr (UDF 77) */ +#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */ + +/* + * EVER Eco Pro UPS (http://www.ever.com.pl/) + */ + +#define EVER_ECO_PRO_CDS 0xe520 /* RS-232 converter */ + +/* + * Active Robots product ids. + */ +#define FTDI_ACTIVE_ROBOTS_PID 0xE548 /* USB comms board */ + +/* Pyramid Computer GmbH */ +#define FTDI_PYRAMID_PID 0xE6C8 /* Pyramid Appliance Display */ + +/* www.elsterelectricity.com Elster Unicom III Optical Probe */ +#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */ + +/* + * Gude Analog- und Digitalsysteme GmbH + */ +#define FTDI_GUDEADS_E808_PID 0xE808 +#define FTDI_GUDEADS_E809_PID 0xE809 +#define FTDI_GUDEADS_E80A_PID 0xE80A +#define FTDI_GUDEADS_E80B_PID 0xE80B +#define FTDI_GUDEADS_E80C_PID 0xE80C +#define FTDI_GUDEADS_E80D_PID 0xE80D +#define FTDI_GUDEADS_E80E_PID 0xE80E +#define FTDI_GUDEADS_E80F_PID 0xE80F +#define FTDI_GUDEADS_E888_PID 0xE888 /* Expert ISDN Control USB */ +#define FTDI_GUDEADS_E889_PID 0xE889 /* USB RS-232 OptoBridge */ +#define FTDI_GUDEADS_E88A_PID 0xE88A +#define FTDI_GUDEADS_E88B_PID 0xE88B +#define FTDI_GUDEADS_E88C_PID 0xE88C +#define FTDI_GUDEADS_E88D_PID 0xE88D +#define FTDI_GUDEADS_E88E_PID 0xE88E +#define FTDI_GUDEADS_E88F_PID 0xE88F + +/* + * Eclo (http://www.eclo.pt/) product IDs. + * PID 0xEA90 submitted by Martin Grill. + */ +#define FTDI_ECLO_COM_1WIRE_PID 0xEA90 /* COM to 1-Wire USB adaptor */ + +/* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */ +#define FTDI_TNC_X_PID 0xEBE0 + +/* + * Teratronik product ids. + * Submitted by O. Wölfelschneider. + */ +#define FTDI_TERATRONIK_VCP_PID 0xEC88 /* Teratronik device (preferring VCP driver on windows) */ +#define FTDI_TERATRONIK_D2XX_PID 0xEC89 /* Teratronik device (preferring D2XX driver on windows) */ + +/* Rig Expert Ukraine devices */ +#define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */ + +/* + * Hameg HO820 and HO870 interface (using VID 0x0403) + */ +#define HAMEG_HO820_PID 0xed74 +#define HAMEG_HO730_PID 0xed73 +#define HAMEG_HO720_PID 0xed72 +#define HAMEG_HO870_PID 0xed71 + +/* + * MaxStream devices www.maxstream.net + */ +#define FTDI_MAXSTREAM_PID 0xEE18 /* Xbee PKG-U Module */ + +/* + * microHAM product IDs (http://www.microham.com). + * Submitted by Justin Burket (KL1RL) + * and Mike Studer (K6EEP) . + * Ian Abbott added a few more from the driver INF file. + */ +#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */ +#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */ +#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */ +#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */ +#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */ +#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */ +#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */ +#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */ + +/* Domintell products http://www.domintell.com */ +#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */ +#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */ + +/* + * The following are the values for the Perle Systems + * UltraPort USB serial converters + */ +#define FTDI_PERLE_ULTRAPORT_PID 0xF0C0 /* Perle UltraPort Product Id */ + +/* Sprog II (Andrew Crosland's SprogII DCC interface) */ +#define FTDI_SPROG_II 0xF0C8 + +/* an infrared receiver for user access control with IR tags */ +#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */ + +/* ACT Solutions HomePro ZWave interface + (http://www.act-solutions.com/HomePro-Product-Matrix.html) */ +#define FTDI_ACTZWAVE_PID 0xF2D0 + +/* + * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485, + * USB-TTY aktiv, USB-TTY passiv. Some PIDs are used by several devices + * and I'm not entirely sure which are used by which. + */ +#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0 +#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1 +#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 + +/* + * Linx Technologies product ids + */ +#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ +#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */ +#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */ +#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */ +#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */ + +/* + * Oceanic product ids + */ +#define FTDI_OCEANIC_PID 0xF460 /* Oceanic dive instrument */ + +/* + * SUUNTO product ids + */ +#define FTDI_SUUNTO_SPORTS_PID 0xF680 /* Suunto Sports instrument */ + +/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */ +/* http://www.usbuirt.com/ */ +#define FTDI_USB_UIRT_PID 0xF850 /* Product Id */ + +/* CCS Inc. ICDU/ICDU40 product ID - + * the FT232BM is used in an in-circuit-debugger unit for PIC16's/PIC18's */ +#define FTDI_CCSICDU20_0_PID 0xF9D0 +#define FTDI_CCSICDU40_1_PID 0xF9D1 +#define FTDI_CCSMACHX_2_PID 0xF9D2 +#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3 +#define FTDI_CCSICDU64_4_PID 0xF9D4 +#define FTDI_CCSPRIME8_5_PID 0xF9D5 + +/* + * The following are the values for the Matrix Orbital LCD displays, + * which are the FT232BM ( similar to the 8U232AM ) + */ +#define FTDI_MTXORB_0_PID 0xFA00 /* Matrix Orbital Product Id */ +#define FTDI_MTXORB_1_PID 0xFA01 /* Matrix Orbital Product Id */ +#define FTDI_MTXORB_2_PID 0xFA02 /* Matrix Orbital Product Id */ +#define FTDI_MTXORB_3_PID 0xFA03 /* Matrix Orbital Product Id */ +#define FTDI_MTXORB_4_PID 0xFA04 /* Matrix Orbital Product Id */ +#define FTDI_MTXORB_5_PID 0xFA05 /* Matrix Orbital Product Id */ +#define FTDI_MTXORB_6_PID 0xFA06 /* Matrix Orbital Product Id */ + +/* + * Home Electronics (www.home-electro.com) USB gadgets + */ +#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR transceiver */ + +/* Inside Accesso contactless reader (http://www.insidecontactless.com/) */ +#define INSIDE_ACCESSO 0xFAD0 + +/* + * ThorLabs USB motor drivers + */ +#define FTDI_THORLABS_PID 0xfaf0 /* ThorLabs USB motor drivers */ + +/* + * Protego product ids + */ +#define PROTEGO_SPECIAL_1 0xFC70 /* special/unknown device */ +#define PROTEGO_R2X0 0xFC71 /* R200-USB TRNG unit (R210, R220, and R230) */ +#define PROTEGO_SPECIAL_3 0xFC72 /* special/unknown device */ +#define PROTEGO_SPECIAL_4 0xFC73 /* special/unknown device */ + +/* + * Sony Ericsson product ids + */ +#define FTDI_DSS20_PID 0xFC82 /* DSS-20 Sync Station for Sony Ericsson P800 */ +#define FTDI_URBAN_0_PID 0xFC8A /* Sony Ericsson Urban, uart #0 */ +#define FTDI_URBAN_1_PID 0xFC8B /* Sony Ericsson Urban, uart #1 */ + +/* www.irtrans.de device */ +#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */ + +/* + * RM Michaelides CANview USB (http://www.rmcan.com) (FTDI_VID) + * CAN fieldbus interface adapter, added by port GmbH www.port.de) + * Ian Abbott changed the macro names for consistency. + */ +#define FTDI_RM_CANVIEW_PID 0xfd60 /* Product Id */ +/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */ +#define FTDI_TTUSB_PID 0xFF20 /* Product Id */ + +#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 (FTDI_VID) */ + +#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */ + +/* + * PCDJ use ftdi based dj-controllers. The following PID is + * for their DAC-2 device http://www.pcdjhardware.com/DAC2.asp + * (the VID is the standard ftdi vid (FTDI_VID), PID sent by Wouter Paesen) + */ +#define FTDI_PCDJ_DAC2_PID 0xFA88 + +#define FTDI_R2000KU_TRUE_RNG 0xFB80 /* R2000KU TRUE RNG (FTDI_VID) */ + +/* + * DIEBOLD BCS SE923 (FTDI_VID) + */ +#define DIEBOLD_BCS_SE923_PID 0xfb99 + +/* www.crystalfontz.com devices + * - thanx for providing free devices for evaluation ! + * they use the ftdi chipset for the USB interface + * and the vendor id is the same + */ +#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */ +#define FTDI_XF_634_PID 0xFC09 /* 634: 20x4 Character Display */ +#define FTDI_XF_547_PID 0xFC0A /* 547: Two line Display */ +#define FTDI_XF_633_PID 0xFC0B /* 633: 16x2 Character Display with Keys */ +#define FTDI_XF_631_PID 0xFC0C /* 631: 20x2 Character Display */ +#define FTDI_XF_635_PID 0xFC0D /* 635: 20x4 Character Display */ +#define FTDI_XF_640_PID 0xFC0E /* 640: Two line Display */ +#define FTDI_XF_642_PID 0xFC0F /* 642: Two line Display */ + +/* + * Video Networks Limited / Homechoice in the UK use an ftdi-based device + * for their 1Mb broadband internet service. The following PID is exhibited + * by the usb device supplied (the VID is the standard ftdi vid (FTDI_VID) + */ +#define FTDI_VNHCPCUSB_D_PID 0xfe38 /* Product Id */ + +/* AlphaMicro Components AMC-232USB01 device (FTDI_VID) */ +#define FTDI_AMC232_PID 0xFF00 /* Product Id */ + +/* + * IBS elektronik product ids (FTDI_VID) + * Submitted by Thomas Schleusener + */ +#define FTDI_IBS_US485_PID 0xff38 /* IBS US485 (USB<-->RS422/485 interface) */ +#define FTDI_IBS_PICPRO_PID 0xff39 /* IBS PIC-Programmer */ +#define FTDI_IBS_PCMCIA_PID 0xff3a /* IBS Card reader for PCMCIA SRAM-cards */ +#define FTDI_IBS_PK1_PID 0xff3b /* IBS PK1 - Particel counter */ +#define FTDI_IBS_RS232MON_PID 0xff3c /* IBS RS232 - Monitor */ +#define FTDI_IBS_APP70_PID 0xff3d /* APP 70 (dust monitoring system) */ +#define FTDI_IBS_PEDO_PID 0xff3e /* IBS PEDO-Modem (RF modem 868.35 MHz) */ +#define FTDI_IBS_PROD_PID 0xff3f /* future device */ +/* www.canusb.com Lawicel CANUSB device (FTDI_VID) */ +#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */ + +/* + * TavIR AVR product ids (FTDI_VID) + */ +#define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */ + +/* + * TIAO product ids (FTDI_VID) + * http://www.tiaowiki.com/w/Main_Page + */ +#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */ + + +/********************************/ +/** third-party VID/PID combos **/ +/********************************/ + + + +/* + * Atmel STK541 + */ +#define ATMEL_VID 0x03eb /* Vendor ID */ +#define STK541_PID 0x2109 /* Zigbee Controller */ + +/* + * Blackfin gnICE JTAG + * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice + */ +#define ADI_VID 0x0456 +#define ADI_GNICE_PID 0xF000 +#define ADI_GNICEPLUS_PID 0xF001 + +/* + * Microchip Technology, Inc. + * + * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are + * used by single function CDC ACM class based firmware demo + * applications. The VID/PID has also been used in firmware + * emulating FTDI serial chips by: + * Hornby Elite - Digital Command Control Console + * http://www.hornby.com/hornby-dcc/controllers/ + */ +#define MICROCHIP_VID 0x04D8 +#define MICROCHIP_USB_BOARD_PID 0x000A /* CDC RS-232 Emulation Demo */ + +/* + * RATOC REX-USB60F + */ +#define RATOC_VENDOR_ID 0x0584 +#define RATOC_PRODUCT_ID_USB60F 0xb020 + +/* + * Acton Research Corp. + */ +#define ACTON_VID 0x0647 /* Vendor ID */ +#define ACTON_SPECTRAPRO_PID 0x0100 + +/* + * Contec products (http://www.contec.com) + * Submitted by Daniel Sangorrin + */ +#define CONTEC_VID 0x06CE /* Vendor ID */ +#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ + +/* + * Definitions for B&B Electronics products. + */ +#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ +#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */ +#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */ +#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */ +#define BANDB_USOPTL4_PID 0xAC11 +#define BANDB_USPTL4_PID 0xAC12 +#define BANDB_USO9ML2DR_2_PID 0xAC16 +#define BANDB_USO9ML2DR_PID 0xAC17 +#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */ +#define BANDB_USOPTL4DR_PID 0xAC19 +#define BANDB_485USB9F_2W_PID 0xAC25 +#define BANDB_485USB9F_4W_PID 0xAC26 +#define BANDB_232USB9M_PID 0xAC27 +#define BANDB_485USBTB_2W_PID 0xAC33 +#define BANDB_485USBTB_4W_PID 0xAC34 +#define BANDB_TTL5USB9M_PID 0xAC49 +#define BANDB_TTL3USB9M_PID 0xAC50 +#define BANDB_ZZ_PROG1_USB_PID 0xBA02 + +/* + * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI + */ +#define INTREPID_VID 0x093C +#define INTREPID_VALUECAN_PID 0x0601 +#define INTREPID_NEOVI_PID 0x0701 + +/* + * Definitions for ID TECH (www.idt-net.com) devices + */ +#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ +#define IDTECH_IDT1221U_PID 0x0300 /* IDT1221U USB to RS-232 adapter */ + +/* + * Definitions for Omnidirectional Control Technology, Inc. devices + */ +#define OCT_VID 0x0B39 /* OCT vendor ID */ +/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */ +/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */ +/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */ +#define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */ +#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ + +/* + * Definitions for Icom Inc. devices + */ +#define ICOM_VID 0x0C26 /* Icom vendor ID */ +/* Note: ID-1 is a communications transceiver for HAM-radio operators */ +#define ICOM_ID_1_PID 0x0004 /* ID-1 USB to RS-232 */ +/* Note: OPC is an Optional cable to connect an Icom Transceiver */ +#define ICOM_OPC_U_UC_PID 0x0018 /* OPC-478UC, OPC-1122U cloning cable */ +/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */ +#define ICOM_ID_RP2C1_PID 0x0009 /* ID-RP2C Asset 1 to RS-232 */ +#define ICOM_ID_RP2C2_PID 0x000A /* ID-RP2C Asset 2 to RS-232 */ +#define ICOM_ID_RP2D_PID 0x000B /* ID-RP2D configuration port*/ +#define ICOM_ID_RP2VT_PID 0x000C /* ID-RP2V Transmit config port */ +#define ICOM_ID_RP2VR_PID 0x000D /* ID-RP2V Receive config port */ +#define ICOM_ID_RP4KVT_PID 0x0010 /* ID-RP4000V Transmit config port */ +#define ICOM_ID_RP4KVR_PID 0x0011 /* ID-RP4000V Receive config port */ +#define ICOM_ID_RP2KVT_PID 0x0012 /* ID-RP2000V Transmit config port */ +#define ICOM_ID_RP2KVR_PID 0x0013 /* ID-RP2000V Receive config port */ + +/* + * GN Otometrics (http://www.otometrics.com) + * Submitted by Ville Sundberg. + */ +#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */ +#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */ + +/* + * The following are the values for the Sealevel SeaLINK+ adapters. + * (Original list sent by Tuan Hoang. Ian Abbott renamed the macros and + * removed some PIDs that don't seem to match any existing products.) + */ +#define SEALEVEL_VID 0x0c52 /* Sealevel Vendor ID */ +#define SEALEVEL_2101_PID 0x2101 /* SeaLINK+232 (2101/2105) */ +#define SEALEVEL_2102_PID 0x2102 /* SeaLINK+485 (2102) */ +#define SEALEVEL_2103_PID 0x2103 /* SeaLINK+232I (2103) */ +#define SEALEVEL_2104_PID 0x2104 /* SeaLINK+485I (2104) */ +#define SEALEVEL_2106_PID 0x9020 /* SeaLINK+422 (2106) */ +#define SEALEVEL_2201_1_PID 0x2211 /* SeaPORT+2/232 (2201) Port 1 */ +#define SEALEVEL_2201_2_PID 0x2221 /* SeaPORT+2/232 (2201) Port 2 */ +#define SEALEVEL_2202_1_PID 0x2212 /* SeaPORT+2/485 (2202) Port 1 */ +#define SEALEVEL_2202_2_PID 0x2222 /* SeaPORT+2/485 (2202) Port 2 */ +#define SEALEVEL_2203_1_PID 0x2213 /* SeaPORT+2 (2203) Port 1 */ +#define SEALEVEL_2203_2_PID 0x2223 /* SeaPORT+2 (2203) Port 2 */ +#define SEALEVEL_2401_1_PID 0x2411 /* SeaPORT+4/232 (2401) Port 1 */ +#define SEALEVEL_2401_2_PID 0x2421 /* SeaPORT+4/232 (2401) Port 2 */ +#define SEALEVEL_2401_3_PID 0x2431 /* SeaPORT+4/232 (2401) Port 3 */ +#define SEALEVEL_2401_4_PID 0x2441 /* SeaPORT+4/232 (2401) Port 4 */ +#define SEALEVEL_2402_1_PID 0x2412 /* SeaPORT+4/485 (2402) Port 1 */ +#define SEALEVEL_2402_2_PID 0x2422 /* SeaPORT+4/485 (2402) Port 2 */ +#define SEALEVEL_2402_3_PID 0x2432 /* SeaPORT+4/485 (2402) Port 3 */ +#define SEALEVEL_2402_4_PID 0x2442 /* SeaPORT+4/485 (2402) Port 4 */ +#define SEALEVEL_2403_1_PID 0x2413 /* SeaPORT+4 (2403) Port 1 */ +#define SEALEVEL_2403_2_PID 0x2423 /* SeaPORT+4 (2403) Port 2 */ +#define SEALEVEL_2403_3_PID 0x2433 /* SeaPORT+4 (2403) Port 3 */ +#define SEALEVEL_2403_4_PID 0x2443 /* SeaPORT+4 (2403) Port 4 */ +#define SEALEVEL_2801_1_PID 0X2811 /* SeaLINK+8/232 (2801) Port 1 */ +#define SEALEVEL_2801_2_PID 0X2821 /* SeaLINK+8/232 (2801) Port 2 */ +#define SEALEVEL_2801_3_PID 0X2831 /* SeaLINK+8/232 (2801) Port 3 */ +#define SEALEVEL_2801_4_PID 0X2841 /* SeaLINK+8/232 (2801) Port 4 */ +#define SEALEVEL_2801_5_PID 0X2851 /* SeaLINK+8/232 (2801) Port 5 */ +#define SEALEVEL_2801_6_PID 0X2861 /* SeaLINK+8/232 (2801) Port 6 */ +#define SEALEVEL_2801_7_PID 0X2871 /* SeaLINK+8/232 (2801) Port 7 */ +#define SEALEVEL_2801_8_PID 0X2881 /* SeaLINK+8/232 (2801) Port 8 */ +#define SEALEVEL_2802_1_PID 0X2812 /* SeaLINK+8/485 (2802) Port 1 */ +#define SEALEVEL_2802_2_PID 0X2822 /* SeaLINK+8/485 (2802) Port 2 */ +#define SEALEVEL_2802_3_PID 0X2832 /* SeaLINK+8/485 (2802) Port 3 */ +#define SEALEVEL_2802_4_PID 0X2842 /* SeaLINK+8/485 (2802) Port 4 */ +#define SEALEVEL_2802_5_PID 0X2852 /* SeaLINK+8/485 (2802) Port 5 */ +#define SEALEVEL_2802_6_PID 0X2862 /* SeaLINK+8/485 (2802) Port 6 */ +#define SEALEVEL_2802_7_PID 0X2872 /* SeaLINK+8/485 (2802) Port 7 */ +#define SEALEVEL_2802_8_PID 0X2882 /* SeaLINK+8/485 (2802) Port 8 */ +#define SEALEVEL_2803_1_PID 0X2813 /* SeaLINK+8 (2803) Port 1 */ +#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */ +#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */ +#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */ +#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */ +#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */ +#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */ +#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */ +#define SEALEVEL_2803R_1_PID 0Xa02a /* SeaLINK+8 (2803-ROHS) Port 1+2 */ +#define SEALEVEL_2803R_2_PID 0Xa02b /* SeaLINK+8 (2803-ROHS) Port 3+4 */ +#define SEALEVEL_2803R_3_PID 0Xa02c /* SeaLINK+8 (2803-ROHS) Port 5+6 */ +#define SEALEVEL_2803R_4_PID 0Xa02d /* SeaLINK+8 (2803-ROHS) Port 7+8 */ + +/* + * JETI SPECTROMETER SPECBOS 1201 + * http://www.jeti.com/cms/index.php/instruments/other-instruments/specbos-2101 + */ +#define JETI_VID 0x0c6c +#define JETI_SPC1201_PID 0x04b2 + +/* + * FTDI USB UART chips used in construction projects from the + * Elektor Electronics magazine (http://www.elektor.com/) + */ +#define ELEKTOR_VID 0x0C7D +#define ELEKTOR_FT323R_PID 0x0005 /* RFID-Reader, issue 09-2006 */ + +/* + * Posiflex inc retail equipment (http://www.posiflex.com.tw) + */ +#define POSIFLEX_VID 0x0d3a /* Vendor ID */ +#define POSIFLEX_PP7000_PID 0x0300 /* PP-7000II thermal printer */ + +/* + * The following are the values for two KOBIL chipcard terminals. + */ +#define KOBIL_VID 0x0d46 /* KOBIL Vendor ID */ +#define KOBIL_CONV_B1_PID 0x2020 /* KOBIL Konverter for B1 */ +#define KOBIL_CONV_KAAN_PID 0x2021 /* KOBIL_Konverter for KAAN */ + +#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */ +#define FTDI_NF_RIC_PID 0x0001 /* Product Id */ + +/* + * Falcom Wireless Communications GmbH + */ +#define FALCOM_VID 0x0F94 /* Vendor Id */ +#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */ +#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */ + +/* Larsen and Brusgaard AltiTrack/USBtrack */ +#define LARSENBRUSGAARD_VID 0x0FD8 +#define LB_ALTITRACK_PID 0x0001 + +/* + * TTi (Thurlby Thandar Instruments) + */ +#define TTI_VID 0x103E /* Vendor Id */ +#define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */ + +/* Interbiometrics USB I/O Board */ +/* Developed for Interbiometrics by Rudolf Gugler */ +#define INTERBIOMETRICS_VID 0x1209 +#define INTERBIOMETRICS_IOBOARD_PID 0x1002 +#define INTERBIOMETRICS_MINI_IOBOARD_PID 0x1006 + +/* + * Testo products (http://www.testo.com/) + * Submitted by Colin Leroy + */ +#define TESTO_VID 0x128D +#define TESTO_USB_INTERFACE_PID 0x0001 + +/* + * Mobility Electronics products. + */ +#define MOBILITY_VID 0x1342 +#define MOBILITY_USB_SERIAL_PID 0x0202 /* EasiDock USB 200 serial */ + +/* + * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3 + * Submitted by Harald Welte + */ +#define FIC_VID 0x1457 +#define FIC_NEO1973_DEBUG_PID 0x5118 + +/* Olimex */ +#define OLIMEX_VID 0x15BA +#define OLIMEX_ARM_USB_OCD_PID 0x0003 +#define OLIMEX_ARM_USB_OCD_H_PID 0x002b + +/* + * Telldus Technologies + */ +#define TELLDUS_VID 0x1781 /* Vendor ID */ +#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */ + +/* + * RT Systems programming cables for various ham radios + */ +#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ +#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ +#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ +#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */ + + +/* + * Physik Instrumente + * http://www.physikinstrumente.com/en/products/ + */ +/* These two devices use the VID of FTDI */ +#define PI_C865_PID 0xe0a0 /* PI C-865 Piezomotor Controller */ +#define PI_C857_PID 0xe0a1 /* PI Encoder Trigger Box */ + +#define PI_VID 0x1a72 /* Vendor ID */ +#define PI_C866_PID 0x1000 /* PI C-866 Piezomotor Controller */ +#define PI_C663_PID 0x1001 /* PI C-663 Mercury-Step */ +#define PI_C725_PID 0x1002 /* PI C-725 Piezomotor Controller */ +#define PI_E517_PID 0x1005 /* PI E-517 Digital Piezo Controller Operation Module */ +#define PI_C863_PID 0x1007 /* PI C-863 */ +#define PI_E861_PID 0x1008 /* PI E-861 Piezomotor Controller */ +#define PI_C867_PID 0x1009 /* PI C-867 Piezomotor Controller */ +#define PI_E609_PID 0x100D /* PI E-609 Digital Piezo Controller */ +#define PI_E709_PID 0x100E /* PI E-709 Digital Piezo Controller */ +#define PI_100F_PID 0x100F /* PI Digital Piezo Controller */ +#define PI_1011_PID 0x1011 /* PI Digital Piezo Controller */ +#define PI_1012_PID 0x1012 /* PI Motion Controller */ +#define PI_1013_PID 0x1013 /* PI Motion Controller */ +#define PI_1014_PID 0x1014 /* PI Device */ +#define PI_1015_PID 0x1015 /* PI Device */ +#define PI_1016_PID 0x1016 /* PI Digital Servo Module */ + +/* + * Kondo Kagaku Co.Ltd. + * http://www.kondo-robot.com/EN + */ +#define KONDO_VID 0x165c +#define KONDO_USB_SERIAL_PID 0x0002 + +/* + * Bayer Ascensia Contour blood glucose meter USB-converter cable. + * http://winglucofacts.com/cables/ + */ +#define BAYER_VID 0x1A79 +#define BAYER_CONTOUR_CABLE_PID 0x6001 + +/* + * The following are the values for the Matrix Orbital FTDI Range + * Anything in this range will use an FT232RL. + */ +#define MTXORB_VID 0x1B3D +#define MTXORB_FTDI_RANGE_0100_PID 0x0100 +#define MTXORB_FTDI_RANGE_0101_PID 0x0101 +#define MTXORB_FTDI_RANGE_0102_PID 0x0102 +#define MTXORB_FTDI_RANGE_0103_PID 0x0103 +#define MTXORB_FTDI_RANGE_0104_PID 0x0104 +#define MTXORB_FTDI_RANGE_0105_PID 0x0105 +#define MTXORB_FTDI_RANGE_0106_PID 0x0106 +#define MTXORB_FTDI_RANGE_0107_PID 0x0107 +#define MTXORB_FTDI_RANGE_0108_PID 0x0108 +#define MTXORB_FTDI_RANGE_0109_PID 0x0109 +#define MTXORB_FTDI_RANGE_010A_PID 0x010A +#define MTXORB_FTDI_RANGE_010B_PID 0x010B +#define MTXORB_FTDI_RANGE_010C_PID 0x010C +#define MTXORB_FTDI_RANGE_010D_PID 0x010D +#define MTXORB_FTDI_RANGE_010E_PID 0x010E +#define MTXORB_FTDI_RANGE_010F_PID 0x010F +#define MTXORB_FTDI_RANGE_0110_PID 0x0110 +#define MTXORB_FTDI_RANGE_0111_PID 0x0111 +#define MTXORB_FTDI_RANGE_0112_PID 0x0112 +#define MTXORB_FTDI_RANGE_0113_PID 0x0113 +#define MTXORB_FTDI_RANGE_0114_PID 0x0114 +#define MTXORB_FTDI_RANGE_0115_PID 0x0115 +#define MTXORB_FTDI_RANGE_0116_PID 0x0116 +#define MTXORB_FTDI_RANGE_0117_PID 0x0117 +#define MTXORB_FTDI_RANGE_0118_PID 0x0118 +#define MTXORB_FTDI_RANGE_0119_PID 0x0119 +#define MTXORB_FTDI_RANGE_011A_PID 0x011A +#define MTXORB_FTDI_RANGE_011B_PID 0x011B +#define MTXORB_FTDI_RANGE_011C_PID 0x011C +#define MTXORB_FTDI_RANGE_011D_PID 0x011D +#define MTXORB_FTDI_RANGE_011E_PID 0x011E +#define MTXORB_FTDI_RANGE_011F_PID 0x011F +#define MTXORB_FTDI_RANGE_0120_PID 0x0120 +#define MTXORB_FTDI_RANGE_0121_PID 0x0121 +#define MTXORB_FTDI_RANGE_0122_PID 0x0122 +#define MTXORB_FTDI_RANGE_0123_PID 0x0123 +#define MTXORB_FTDI_RANGE_0124_PID 0x0124 +#define MTXORB_FTDI_RANGE_0125_PID 0x0125 +#define MTXORB_FTDI_RANGE_0126_PID 0x0126 +#define MTXORB_FTDI_RANGE_0127_PID 0x0127 +#define MTXORB_FTDI_RANGE_0128_PID 0x0128 +#define MTXORB_FTDI_RANGE_0129_PID 0x0129 +#define MTXORB_FTDI_RANGE_012A_PID 0x012A +#define MTXORB_FTDI_RANGE_012B_PID 0x012B +#define MTXORB_FTDI_RANGE_012C_PID 0x012C +#define MTXORB_FTDI_RANGE_012D_PID 0x012D +#define MTXORB_FTDI_RANGE_012E_PID 0x012E +#define MTXORB_FTDI_RANGE_012F_PID 0x012F +#define MTXORB_FTDI_RANGE_0130_PID 0x0130 +#define MTXORB_FTDI_RANGE_0131_PID 0x0131 +#define MTXORB_FTDI_RANGE_0132_PID 0x0132 +#define MTXORB_FTDI_RANGE_0133_PID 0x0133 +#define MTXORB_FTDI_RANGE_0134_PID 0x0134 +#define MTXORB_FTDI_RANGE_0135_PID 0x0135 +#define MTXORB_FTDI_RANGE_0136_PID 0x0136 +#define MTXORB_FTDI_RANGE_0137_PID 0x0137 +#define MTXORB_FTDI_RANGE_0138_PID 0x0138 +#define MTXORB_FTDI_RANGE_0139_PID 0x0139 +#define MTXORB_FTDI_RANGE_013A_PID 0x013A +#define MTXORB_FTDI_RANGE_013B_PID 0x013B +#define MTXORB_FTDI_RANGE_013C_PID 0x013C +#define MTXORB_FTDI_RANGE_013D_PID 0x013D +#define MTXORB_FTDI_RANGE_013E_PID 0x013E +#define MTXORB_FTDI_RANGE_013F_PID 0x013F +#define MTXORB_FTDI_RANGE_0140_PID 0x0140 +#define MTXORB_FTDI_RANGE_0141_PID 0x0141 +#define MTXORB_FTDI_RANGE_0142_PID 0x0142 +#define MTXORB_FTDI_RANGE_0143_PID 0x0143 +#define MTXORB_FTDI_RANGE_0144_PID 0x0144 +#define MTXORB_FTDI_RANGE_0145_PID 0x0145 +#define MTXORB_FTDI_RANGE_0146_PID 0x0146 +#define MTXORB_FTDI_RANGE_0147_PID 0x0147 +#define MTXORB_FTDI_RANGE_0148_PID 0x0148 +#define MTXORB_FTDI_RANGE_0149_PID 0x0149 +#define MTXORB_FTDI_RANGE_014A_PID 0x014A +#define MTXORB_FTDI_RANGE_014B_PID 0x014B +#define MTXORB_FTDI_RANGE_014C_PID 0x014C +#define MTXORB_FTDI_RANGE_014D_PID 0x014D +#define MTXORB_FTDI_RANGE_014E_PID 0x014E +#define MTXORB_FTDI_RANGE_014F_PID 0x014F +#define MTXORB_FTDI_RANGE_0150_PID 0x0150 +#define MTXORB_FTDI_RANGE_0151_PID 0x0151 +#define MTXORB_FTDI_RANGE_0152_PID 0x0152 +#define MTXORB_FTDI_RANGE_0153_PID 0x0153 +#define MTXORB_FTDI_RANGE_0154_PID 0x0154 +#define MTXORB_FTDI_RANGE_0155_PID 0x0155 +#define MTXORB_FTDI_RANGE_0156_PID 0x0156 +#define MTXORB_FTDI_RANGE_0157_PID 0x0157 +#define MTXORB_FTDI_RANGE_0158_PID 0x0158 +#define MTXORB_FTDI_RANGE_0159_PID 0x0159 +#define MTXORB_FTDI_RANGE_015A_PID 0x015A +#define MTXORB_FTDI_RANGE_015B_PID 0x015B +#define MTXORB_FTDI_RANGE_015C_PID 0x015C +#define MTXORB_FTDI_RANGE_015D_PID 0x015D +#define MTXORB_FTDI_RANGE_015E_PID 0x015E +#define MTXORB_FTDI_RANGE_015F_PID 0x015F +#define MTXORB_FTDI_RANGE_0160_PID 0x0160 +#define MTXORB_FTDI_RANGE_0161_PID 0x0161 +#define MTXORB_FTDI_RANGE_0162_PID 0x0162 +#define MTXORB_FTDI_RANGE_0163_PID 0x0163 +#define MTXORB_FTDI_RANGE_0164_PID 0x0164 +#define MTXORB_FTDI_RANGE_0165_PID 0x0165 +#define MTXORB_FTDI_RANGE_0166_PID 0x0166 +#define MTXORB_FTDI_RANGE_0167_PID 0x0167 +#define MTXORB_FTDI_RANGE_0168_PID 0x0168 +#define MTXORB_FTDI_RANGE_0169_PID 0x0169 +#define MTXORB_FTDI_RANGE_016A_PID 0x016A +#define MTXORB_FTDI_RANGE_016B_PID 0x016B +#define MTXORB_FTDI_RANGE_016C_PID 0x016C +#define MTXORB_FTDI_RANGE_016D_PID 0x016D +#define MTXORB_FTDI_RANGE_016E_PID 0x016E +#define MTXORB_FTDI_RANGE_016F_PID 0x016F +#define MTXORB_FTDI_RANGE_0170_PID 0x0170 +#define MTXORB_FTDI_RANGE_0171_PID 0x0171 +#define MTXORB_FTDI_RANGE_0172_PID 0x0172 +#define MTXORB_FTDI_RANGE_0173_PID 0x0173 +#define MTXORB_FTDI_RANGE_0174_PID 0x0174 +#define MTXORB_FTDI_RANGE_0175_PID 0x0175 +#define MTXORB_FTDI_RANGE_0176_PID 0x0176 +#define MTXORB_FTDI_RANGE_0177_PID 0x0177 +#define MTXORB_FTDI_RANGE_0178_PID 0x0178 +#define MTXORB_FTDI_RANGE_0179_PID 0x0179 +#define MTXORB_FTDI_RANGE_017A_PID 0x017A +#define MTXORB_FTDI_RANGE_017B_PID 0x017B +#define MTXORB_FTDI_RANGE_017C_PID 0x017C +#define MTXORB_FTDI_RANGE_017D_PID 0x017D +#define MTXORB_FTDI_RANGE_017E_PID 0x017E +#define MTXORB_FTDI_RANGE_017F_PID 0x017F +#define MTXORB_FTDI_RANGE_0180_PID 0x0180 +#define MTXORB_FTDI_RANGE_0181_PID 0x0181 +#define MTXORB_FTDI_RANGE_0182_PID 0x0182 +#define MTXORB_FTDI_RANGE_0183_PID 0x0183 +#define MTXORB_FTDI_RANGE_0184_PID 0x0184 +#define MTXORB_FTDI_RANGE_0185_PID 0x0185 +#define MTXORB_FTDI_RANGE_0186_PID 0x0186 +#define MTXORB_FTDI_RANGE_0187_PID 0x0187 +#define MTXORB_FTDI_RANGE_0188_PID 0x0188 +#define MTXORB_FTDI_RANGE_0189_PID 0x0189 +#define MTXORB_FTDI_RANGE_018A_PID 0x018A +#define MTXORB_FTDI_RANGE_018B_PID 0x018B +#define MTXORB_FTDI_RANGE_018C_PID 0x018C +#define MTXORB_FTDI_RANGE_018D_PID 0x018D +#define MTXORB_FTDI_RANGE_018E_PID 0x018E +#define MTXORB_FTDI_RANGE_018F_PID 0x018F +#define MTXORB_FTDI_RANGE_0190_PID 0x0190 +#define MTXORB_FTDI_RANGE_0191_PID 0x0191 +#define MTXORB_FTDI_RANGE_0192_PID 0x0192 +#define MTXORB_FTDI_RANGE_0193_PID 0x0193 +#define MTXORB_FTDI_RANGE_0194_PID 0x0194 +#define MTXORB_FTDI_RANGE_0195_PID 0x0195 +#define MTXORB_FTDI_RANGE_0196_PID 0x0196 +#define MTXORB_FTDI_RANGE_0197_PID 0x0197 +#define MTXORB_FTDI_RANGE_0198_PID 0x0198 +#define MTXORB_FTDI_RANGE_0199_PID 0x0199 +#define MTXORB_FTDI_RANGE_019A_PID 0x019A +#define MTXORB_FTDI_RANGE_019B_PID 0x019B +#define MTXORB_FTDI_RANGE_019C_PID 0x019C +#define MTXORB_FTDI_RANGE_019D_PID 0x019D +#define MTXORB_FTDI_RANGE_019E_PID 0x019E +#define MTXORB_FTDI_RANGE_019F_PID 0x019F +#define MTXORB_FTDI_RANGE_01A0_PID 0x01A0 +#define MTXORB_FTDI_RANGE_01A1_PID 0x01A1 +#define MTXORB_FTDI_RANGE_01A2_PID 0x01A2 +#define MTXORB_FTDI_RANGE_01A3_PID 0x01A3 +#define MTXORB_FTDI_RANGE_01A4_PID 0x01A4 +#define MTXORB_FTDI_RANGE_01A5_PID 0x01A5 +#define MTXORB_FTDI_RANGE_01A6_PID 0x01A6 +#define MTXORB_FTDI_RANGE_01A7_PID 0x01A7 +#define MTXORB_FTDI_RANGE_01A8_PID 0x01A8 +#define MTXORB_FTDI_RANGE_01A9_PID 0x01A9 +#define MTXORB_FTDI_RANGE_01AA_PID 0x01AA +#define MTXORB_FTDI_RANGE_01AB_PID 0x01AB +#define MTXORB_FTDI_RANGE_01AC_PID 0x01AC +#define MTXORB_FTDI_RANGE_01AD_PID 0x01AD +#define MTXORB_FTDI_RANGE_01AE_PID 0x01AE +#define MTXORB_FTDI_RANGE_01AF_PID 0x01AF +#define MTXORB_FTDI_RANGE_01B0_PID 0x01B0 +#define MTXORB_FTDI_RANGE_01B1_PID 0x01B1 +#define MTXORB_FTDI_RANGE_01B2_PID 0x01B2 +#define MTXORB_FTDI_RANGE_01B3_PID 0x01B3 +#define MTXORB_FTDI_RANGE_01B4_PID 0x01B4 +#define MTXORB_FTDI_RANGE_01B5_PID 0x01B5 +#define MTXORB_FTDI_RANGE_01B6_PID 0x01B6 +#define MTXORB_FTDI_RANGE_01B7_PID 0x01B7 +#define MTXORB_FTDI_RANGE_01B8_PID 0x01B8 +#define MTXORB_FTDI_RANGE_01B9_PID 0x01B9 +#define MTXORB_FTDI_RANGE_01BA_PID 0x01BA +#define MTXORB_FTDI_RANGE_01BB_PID 0x01BB +#define MTXORB_FTDI_RANGE_01BC_PID 0x01BC +#define MTXORB_FTDI_RANGE_01BD_PID 0x01BD +#define MTXORB_FTDI_RANGE_01BE_PID 0x01BE +#define MTXORB_FTDI_RANGE_01BF_PID 0x01BF +#define MTXORB_FTDI_RANGE_01C0_PID 0x01C0 +#define MTXORB_FTDI_RANGE_01C1_PID 0x01C1 +#define MTXORB_FTDI_RANGE_01C2_PID 0x01C2 +#define MTXORB_FTDI_RANGE_01C3_PID 0x01C3 +#define MTXORB_FTDI_RANGE_01C4_PID 0x01C4 +#define MTXORB_FTDI_RANGE_01C5_PID 0x01C5 +#define MTXORB_FTDI_RANGE_01C6_PID 0x01C6 +#define MTXORB_FTDI_RANGE_01C7_PID 0x01C7 +#define MTXORB_FTDI_RANGE_01C8_PID 0x01C8 +#define MTXORB_FTDI_RANGE_01C9_PID 0x01C9 +#define MTXORB_FTDI_RANGE_01CA_PID 0x01CA +#define MTXORB_FTDI_RANGE_01CB_PID 0x01CB +#define MTXORB_FTDI_RANGE_01CC_PID 0x01CC +#define MTXORB_FTDI_RANGE_01CD_PID 0x01CD +#define MTXORB_FTDI_RANGE_01CE_PID 0x01CE +#define MTXORB_FTDI_RANGE_01CF_PID 0x01CF +#define MTXORB_FTDI_RANGE_01D0_PID 0x01D0 +#define MTXORB_FTDI_RANGE_01D1_PID 0x01D1 +#define MTXORB_FTDI_RANGE_01D2_PID 0x01D2 +#define MTXORB_FTDI_RANGE_01D3_PID 0x01D3 +#define MTXORB_FTDI_RANGE_01D4_PID 0x01D4 +#define MTXORB_FTDI_RANGE_01D5_PID 0x01D5 +#define MTXORB_FTDI_RANGE_01D6_PID 0x01D6 +#define MTXORB_FTDI_RANGE_01D7_PID 0x01D7 +#define MTXORB_FTDI_RANGE_01D8_PID 0x01D8 +#define MTXORB_FTDI_RANGE_01D9_PID 0x01D9 +#define MTXORB_FTDI_RANGE_01DA_PID 0x01DA +#define MTXORB_FTDI_RANGE_01DB_PID 0x01DB +#define MTXORB_FTDI_RANGE_01DC_PID 0x01DC +#define MTXORB_FTDI_RANGE_01DD_PID 0x01DD +#define MTXORB_FTDI_RANGE_01DE_PID 0x01DE +#define MTXORB_FTDI_RANGE_01DF_PID 0x01DF +#define MTXORB_FTDI_RANGE_01E0_PID 0x01E0 +#define MTXORB_FTDI_RANGE_01E1_PID 0x01E1 +#define MTXORB_FTDI_RANGE_01E2_PID 0x01E2 +#define MTXORB_FTDI_RANGE_01E3_PID 0x01E3 +#define MTXORB_FTDI_RANGE_01E4_PID 0x01E4 +#define MTXORB_FTDI_RANGE_01E5_PID 0x01E5 +#define MTXORB_FTDI_RANGE_01E6_PID 0x01E6 +#define MTXORB_FTDI_RANGE_01E7_PID 0x01E7 +#define MTXORB_FTDI_RANGE_01E8_PID 0x01E8 +#define MTXORB_FTDI_RANGE_01E9_PID 0x01E9 +#define MTXORB_FTDI_RANGE_01EA_PID 0x01EA +#define MTXORB_FTDI_RANGE_01EB_PID 0x01EB +#define MTXORB_FTDI_RANGE_01EC_PID 0x01EC +#define MTXORB_FTDI_RANGE_01ED_PID 0x01ED +#define MTXORB_FTDI_RANGE_01EE_PID 0x01EE +#define MTXORB_FTDI_RANGE_01EF_PID 0x01EF +#define MTXORB_FTDI_RANGE_01F0_PID 0x01F0 +#define MTXORB_FTDI_RANGE_01F1_PID 0x01F1 +#define MTXORB_FTDI_RANGE_01F2_PID 0x01F2 +#define MTXORB_FTDI_RANGE_01F3_PID 0x01F3 +#define MTXORB_FTDI_RANGE_01F4_PID 0x01F4 +#define MTXORB_FTDI_RANGE_01F5_PID 0x01F5 +#define MTXORB_FTDI_RANGE_01F6_PID 0x01F6 +#define MTXORB_FTDI_RANGE_01F7_PID 0x01F7 +#define MTXORB_FTDI_RANGE_01F8_PID 0x01F8 +#define MTXORB_FTDI_RANGE_01F9_PID 0x01F9 +#define MTXORB_FTDI_RANGE_01FA_PID 0x01FA +#define MTXORB_FTDI_RANGE_01FB_PID 0x01FB +#define MTXORB_FTDI_RANGE_01FC_PID 0x01FC +#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD +#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE +#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF + + + +/* + * The Mobility Lab (TML) + * Submitted by Pierre Castella + */ +#define TML_VID 0x1B91 /* Vendor ID */ +#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */ + +/* Alti-2 products http://www.alti-2.com */ +#define ALTI2_VID 0x1BC9 +#define ALTI2_N3_PID 0x6001 /* Neptune 3 */ + +/* + * Ionics PlugComputer + */ +#define IONICS_VID 0x1c0c +#define IONICS_PLUGCOMPUTER_PID 0x0102 + +/* + * Dresden Elektronik Sensor Terminal Board + */ +#define DE_VID 0x1cf1 /* Vendor ID */ +#define STB_PID 0x0001 /* Sensor Terminal Board */ +#define WHT_PID 0x0004 /* Wireless Handheld Terminal */ + +/* + * STMicroelectonics + */ +#define ST_VID 0x0483 +#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */ + +/* + * Papouch products (http://www.papouch.com/) + * Submitted by Folkert van Heusden + */ + +#define PAPOUCH_VID 0x5050 /* Vendor ID */ +#define PAPOUCH_SB485_PID 0x0100 /* Papouch SB485 USB-485/422 Converter */ +#define PAPOUCH_AP485_PID 0x0101 /* AP485 USB-RS485 Converter */ +#define PAPOUCH_SB422_PID 0x0102 /* Papouch SB422 USB-RS422 Converter */ +#define PAPOUCH_SB485_2_PID 0x0103 /* Papouch SB485 USB-485/422 Converter */ +#define PAPOUCH_AP485_2_PID 0x0104 /* AP485 USB-RS485 Converter */ +#define PAPOUCH_SB422_2_PID 0x0105 /* Papouch SB422 USB-RS422 Converter */ +#define PAPOUCH_SB485S_PID 0x0106 /* Papouch SB485S USB-485/422 Converter */ +#define PAPOUCH_SB485C_PID 0x0107 /* Papouch SB485C USB-485/422 Converter */ +#define PAPOUCH_LEC_PID 0x0300 /* LEC USB Converter */ +#define PAPOUCH_SB232_PID 0x0301 /* Papouch SB232 USB-RS232 Converter */ +#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ +#define PAPOUCH_IRAMP_PID 0x0500 /* Papouch IRAmp Duplex */ +#define PAPOUCH_DRAK5_PID 0x0700 /* Papouch DRAK5 */ +#define PAPOUCH_QUIDO8x8_PID 0x0800 /* Papouch Quido 8/8 Module */ +#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Papouch Quido 4/4 Module */ +#define PAPOUCH_QUIDO2x2_PID 0x0a00 /* Papouch Quido 2/2 Module */ +#define PAPOUCH_QUIDO10x1_PID 0x0b00 /* Papouch Quido 10/1 Module */ +#define PAPOUCH_QUIDO30x3_PID 0x0c00 /* Papouch Quido 30/3 Module */ +#define PAPOUCH_QUIDO60x3_PID 0x0d00 /* Papouch Quido 60(100)/3 Module */ +#define PAPOUCH_QUIDO2x16_PID 0x0e00 /* Papouch Quido 2/16 Module */ +#define PAPOUCH_QUIDO3x32_PID 0x0f00 /* Papouch Quido 3/32 Module */ +#define PAPOUCH_DRAK6_PID 0x1000 /* Papouch DRAK6 */ +#define PAPOUCH_UPSUSB_PID 0x8000 /* Papouch UPS-USB adapter */ +#define PAPOUCH_MU_PID 0x8001 /* MU controller */ +#define PAPOUCH_SIMUKEY_PID 0x8002 /* Papouch SimuKey */ +#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */ +#define PAPOUCH_GMUX_PID 0x8004 /* Papouch GOLIATH MUX */ +#define PAPOUCH_GMSR_PID 0x8005 /* Papouch GOLIATH MSR */ + +/* + * Marvell SheevaPlug + */ +#define MARVELL_VID 0x9e88 +#define MARVELL_SHEEVAPLUG_PID 0x9e8f + +/* + * Evolution Robotics products (http://www.evolution.com/). + * Submitted by Shawn M. Lavelle. + */ +#define EVOLUTION_VID 0xDEEE /* Vendor ID */ +#define EVOLUTION_ER1_PID 0x0300 /* ER1 Control Module */ +#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/ +#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/ +#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */ + +/* + * MJS Gadgets HD Radio / XM Radio / Sirius Radio interfaces (using VID 0x0403) + */ +#define MJSG_GENERIC_PID 0x9378 +#define MJSG_SR_RADIO_PID 0x9379 +#define MJSG_XM_RADIO_PID 0x937A +#define MJSG_HD_RADIO_PID 0x937C + +/* + * D.O.Tec products (http://www.directout.eu) + */ +#define FTDI_DOTEC_PID 0x9868 + +/* + * Xverve Signalyzer tools (http://www.signalyzer.com/) + */ +#define XVERVE_SIGNALYZER_ST_PID 0xBCA0 +#define XVERVE_SIGNALYZER_SLITE_PID 0xBCA1 +#define XVERVE_SIGNALYZER_SH2_PID 0xBCA2 +#define XVERVE_SIGNALYZER_SH4_PID 0xBCA4 + +/* + * Segway Robotic Mobility Platform USB interface (using VID 0x0403) + * Submitted by John G. Rogers + */ +#define SEGWAY_RMP200_PID 0xe729 + + +/* + * Accesio USB Data Acquisition products (http://www.accesio.com/) + */ +#define ACCESIO_COM4SM_PID 0xD578 + +/* www.sciencescope.co.uk educational dataloggers */ +#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18 +#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C +#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D + +/* + * CTI GmbH RS485 Converter http://www.cti-lean.com/ + */ +/* USB-485-Mini*/ +#define FTDI_CTI_MINI_PID 0xF608 +/* USB-Nano-485*/ +#define FTDI_CTI_NANO_PID 0xF60B + +/* + * ZeitControl cardsystems GmbH rfid-readers http://zeitconrol.de + */ +/* TagTracer MIFARE*/ +#define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0 + +/* + * Rainforest Automation + */ +/* ZigBee controller */ +#define FTDI_RF_R106 0x8A28 + +/* + * Product: HCP HIT GPRS modem + * Manufacturer: HCP d.o.o. + * ATI command output: Cinterion MC55i + */ +#define FTDI_CINTERION_MC55I_PID 0xA951 diff --git a/hw/usb/quirks-pl2303-ids.h b/hw/usb/quirks-pl2303-ids.h new file mode 100644 index 000000000..8dbdb46ff --- /dev/null +++ b/hw/usb/quirks-pl2303-ids.h @@ -0,0 +1,150 @@ +/* + * Prolific PL2303 USB to serial adaptor driver header file + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#define BENQ_VENDOR_ID 0x04a5 +#define BENQ_PRODUCT_ID_S81 0x4027 + +#define PL2303_VENDOR_ID 0x067b +#define PL2303_PRODUCT_ID 0x2303 +#define PL2303_PRODUCT_ID_RSAQ2 0x04bb +#define PL2303_PRODUCT_ID_DCU11 0x1234 +#define PL2303_PRODUCT_ID_PHAROS 0xaaa0 +#define PL2303_PRODUCT_ID_RSAQ3 0xaaa2 +#define PL2303_PRODUCT_ID_ALDIGA 0x0611 +#define PL2303_PRODUCT_ID_MMX 0x0612 +#define PL2303_PRODUCT_ID_GPRS 0x0609 +#define PL2303_PRODUCT_ID_HCR331 0x331a +#define PL2303_PRODUCT_ID_MOTOROLA 0x0307 + +#define ATEN_VENDOR_ID 0x0557 +#define ATEN_VENDOR_ID2 0x0547 +#define ATEN_PRODUCT_ID 0x2008 + +#define IODATA_VENDOR_ID 0x04bb +#define IODATA_PRODUCT_ID 0x0a03 +#define IODATA_PRODUCT_ID_RSAQ5 0x0a0e + +#define ELCOM_VENDOR_ID 0x056e +#define ELCOM_PRODUCT_ID 0x5003 +#define ELCOM_PRODUCT_ID_UCSGT 0x5004 + +#define ITEGNO_VENDOR_ID 0x0eba +#define ITEGNO_PRODUCT_ID 0x1080 +#define ITEGNO_PRODUCT_ID_2080 0x2080 + +#define MA620_VENDOR_ID 0x0df7 +#define MA620_PRODUCT_ID 0x0620 + +#define RATOC_VENDOR_ID 0x0584 +#define RATOC_PRODUCT_ID 0xb000 + +#define TRIPP_VENDOR_ID 0x2478 +#define TRIPP_PRODUCT_ID 0x2008 + +#define RADIOSHACK_VENDOR_ID 0x1453 +#define RADIOSHACK_PRODUCT_ID 0x4026 + +#define DCU10_VENDOR_ID 0x0731 +#define DCU10_PRODUCT_ID 0x0528 + +#define SITECOM_VENDOR_ID 0x6189 +#define SITECOM_PRODUCT_ID 0x2068 + +/* Alcatel OT535/735 USB cable */ +#define ALCATEL_VENDOR_ID 0x11f7 +#define ALCATEL_PRODUCT_ID 0x02df + +/* Samsung I330 phone cradle */ +#define SAMSUNG_VENDOR_ID 0x04e8 +#define SAMSUNG_PRODUCT_ID 0x8001 + +#define SIEMENS_VENDOR_ID 0x11f5 +#define SIEMENS_PRODUCT_ID_SX1 0x0001 +#define SIEMENS_PRODUCT_ID_X65 0x0003 +#define SIEMENS_PRODUCT_ID_X75 0x0004 +#define SIEMENS_PRODUCT_ID_EF81 0x0005 + +#define SYNTECH_VENDOR_ID 0x0745 +#define SYNTECH_PRODUCT_ID 0x0001 + +/* Nokia CA-42 Cable */ +#define NOKIA_CA42_VENDOR_ID 0x078b +#define NOKIA_CA42_PRODUCT_ID 0x1234 + +/* CA-42 CLONE Cable www.ca-42.com chipset: Prolific Technology Inc */ +#define CA_42_CA42_VENDOR_ID 0x10b5 +#define CA_42_CA42_PRODUCT_ID 0xac70 + +#define SAGEM_VENDOR_ID 0x079b +#define SAGEM_PRODUCT_ID 0x0027 + +/* Leadtek GPS 9531 (ID 0413:2101) */ +#define LEADTEK_VENDOR_ID 0x0413 +#define LEADTEK_9531_PRODUCT_ID 0x2101 + +/* USB GSM cable from Speed Dragon Multimedia, Ltd */ +#define SPEEDDRAGON_VENDOR_ID 0x0e55 +#define SPEEDDRAGON_PRODUCT_ID 0x110b + +/* DATAPILOT Universal-2 Phone Cable */ +#define DATAPILOT_U2_VENDOR_ID 0x0731 +#define DATAPILOT_U2_PRODUCT_ID 0x2003 + +/* Belkin "F5U257" Serial Adapter */ +#define BELKIN_VENDOR_ID 0x050d +#define BELKIN_PRODUCT_ID 0x0257 + +/* Alcor Micro Corp. USB 2.0 TO RS-232 */ +#define ALCOR_VENDOR_ID 0x058F +#define ALCOR_PRODUCT_ID 0x9720 + +/* Willcom WS002IN Data Driver (by NetIndex Inc.) */ +#define WS002IN_VENDOR_ID 0x11f6 +#define WS002IN_PRODUCT_ID 0x2001 + +/* Corega CG-USBRS232R Serial Adapter */ +#define COREGA_VENDOR_ID 0x07aa +#define COREGA_PRODUCT_ID 0x002a + +/* Y.C. Cable U.S.A., Inc - USB to RS-232 */ +#define YCCABLE_VENDOR_ID 0x05ad +#define YCCABLE_PRODUCT_ID 0x0fba + +/* "Superial" USB - Serial */ +#define SUPERIAL_VENDOR_ID 0x5372 +#define SUPERIAL_PRODUCT_ID 0x2303 + +/* Hewlett-Packard LD220-HP POS Pole Display */ +#define HP_VENDOR_ID 0x03f0 +#define HP_LD220_PRODUCT_ID 0x3524 + +/* Cressi Edy (diving computer) PC interface */ +#define CRESSI_VENDOR_ID 0x04b8 +#define CRESSI_EDY_PRODUCT_ID 0x0521 + +/* Zeagle dive computer interface */ +#define ZEAGLE_VENDOR_ID 0x04b8 +#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522 + +/* Sony, USB data cable for CMD-Jxx mobile phones */ +#define SONY_VENDOR_ID 0x054c +#define SONY_QN3USB_PRODUCT_ID 0x0437 + +/* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ +#define SANWA_VENDOR_ID 0x11ad +#define SANWA_PRODUCT_ID 0x0001 + +/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */ +#define ADLINK_VENDOR_ID 0x0b63 +#define ADLINK_ND6530_PRODUCT_ID 0x6530 + +/* SMART USB Serial Adapter */ +#define SMART_VENDOR_ID 0x0b8c +#define SMART_PRODUCT_ID 0x2303 diff --git a/hw/usb/quirks.c b/hw/usb/quirks.c new file mode 100644 index 000000000..23ea7a23e --- /dev/null +++ b/hw/usb/quirks.c @@ -0,0 +1,54 @@ +/* + * USB quirk handling + * + * Copyright (c) 2012 Red Hat, Inc. + * + * Red Hat Authors: + * Hans de Goede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "quirks.h" +#include "hw/usb.h" + +static bool usb_id_match(const struct usb_device_id *ids, + uint16_t vendor_id, uint16_t product_id, + uint8_t interface_class, uint8_t interface_subclass, + uint8_t interface_protocol) { + int i; + + for (i = 0; ids[i].terminating_entry == 0; i++) { + if (ids[i].vendor_id == vendor_id && + ids[i].product_id == product_id && + (ids[i].interface_protocol_used == 0 || + (ids[i].interface_class == interface_class && + ids[i].interface_subclass == interface_subclass && + ids[i].interface_protocol == interface_protocol))) { + return true; + } + } + return false; +} + +int usb_get_quirks(uint16_t vendor_id, uint16_t product_id, + uint8_t interface_class, uint8_t interface_subclass, + uint8_t interface_protocol) +{ + int quirks = 0; + + if (usb_id_match(usbredir_raw_serial_ids, vendor_id, product_id, + interface_class, interface_subclass, interface_protocol)) { + quirks |= USB_QUIRK_BUFFER_BULK_IN; + } + if (usb_id_match(usbredir_ftdi_serial_ids, vendor_id, product_id, + interface_class, interface_subclass, interface_protocol)) { + quirks |= USB_QUIRK_BUFFER_BULK_IN | USB_QUIRK_IS_FTDI; + } + + return quirks; +} diff --git a/hw/usb/quirks.h b/hw/usb/quirks.h new file mode 100644 index 000000000..c3e595f40 --- /dev/null +++ b/hw/usb/quirks.h @@ -0,0 +1,918 @@ +/* + * USB quirk handling + * + * Copyright (c) 2012 Red Hat, Inc. + * + * Red Hat Authors: + * Hans de Goede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef HW_USB_QUIRKS_H +#define HW_USB_QUIRKS_H + +/* 1 on 1 copy of linux/drivers/usb/serial/ftdi_sio_ids.h */ +#include "quirks-ftdi-ids.h" +/* 1 on 1 copy of linux/drivers/usb/serial/pl2303.h */ +#include "quirks-pl2303-ids.h" + +struct usb_device_id { + uint16_t vendor_id; + uint16_t product_id; + uint8_t interface_class; + uint8_t interface_subclass; + uint8_t interface_protocol; + uint8_t interface_protocol_used:1, + terminating_entry:1, + reserved:6; +}; + +#define USB_DEVICE(vendor, product) \ + .vendor_id = vendor, .product_id = product, .interface_protocol_used = 0, + +#define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, iclass, isubclass, iproto) \ + .vendor_id = vend, .product_id = prod, .interface_class = iclass, \ + .interface_subclass = isubclass, .interface_protocol = iproto, \ + .interface_protocol_used = 1 + +static const struct usb_device_id usbredir_raw_serial_ids[] = { + /* + * Silicon Laboratories CP210x USB to RS232 serial adapter ids + * copied from linux/drivers/usb/serial/cp210x.c + * + * Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk) + */ + { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */ + { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ + { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ + { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ + { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ + { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ + { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ + { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ + { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ + { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ + { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ + { USB_DEVICE(0x10C4, 0x0F91) }, /* Vstabi */ + { USB_DEVICE(0x10C4, 0x1101) }, /* Arkham Technology DS101 Bus Monitor */ + { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */ + { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */ + { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */ + { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */ + { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ + { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ + { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ + { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ + { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ + { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ + { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */ + { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ + { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ + { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ + { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ + { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ + { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ + { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ + { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ + { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ + { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ + { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ + { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ + { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ + { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ + { USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */ + { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ + { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */ + { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ + { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ + { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ + { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */ + { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */ + { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ + { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */ + { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ + { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ + { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ + { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ + { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ + { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ + { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ + { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ + { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ + { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ + { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ + { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ + { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ + { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ + { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ + { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ + { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ + { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ + { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ + { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ + { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ + { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ + { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */ + { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ + { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */ + { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ + { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ + { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ + { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ + { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ + { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ + { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ + { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ + { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ + { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ + { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ + { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ + { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ + { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ + { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ + { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ + { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ + + /* + * Prolific pl2303 USB to RS232 serial adapter ids + * copied from linux/drivers/usb/serial/pl2303.c + * + * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com) + * Copyright (C) 2003 IBM Corp. + */ + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, + { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, + { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, + { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) }, + { USB_DEVICE(ITEGNO_VENDOR_ID, ITEGNO_PRODUCT_ID) }, + { USB_DEVICE(ITEGNO_VENDOR_ID, ITEGNO_PRODUCT_ID_2080) }, + { USB_DEVICE(MA620_VENDOR_ID, MA620_PRODUCT_ID) }, + { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID) }, + { USB_DEVICE(TRIPP_VENDOR_ID, TRIPP_PRODUCT_ID) }, + { USB_DEVICE(RADIOSHACK_VENDOR_ID, RADIOSHACK_PRODUCT_ID) }, + { USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) }, + { USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) }, + { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_EF81) }, + { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_ID_S81) }, /* Benq/Siemens S81 */ + { USB_DEVICE(SYNTECH_VENDOR_ID, SYNTECH_PRODUCT_ID) }, + { USB_DEVICE(NOKIA_CA42_VENDOR_ID, NOKIA_CA42_PRODUCT_ID) }, + { USB_DEVICE(CA_42_CA42_VENDOR_ID, CA_42_CA42_PRODUCT_ID) }, + { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) }, + { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) }, + { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) }, + { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) }, + { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) }, + { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) }, + { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, + { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, + { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, + { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, + { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, + { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) }, + { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, + { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, + { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, + { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, + + { .terminating_entry = 1 } /* Terminating Entry */ +}; + +static const struct usb_device_id usbredir_ftdi_serial_ids[] = { + /* + * FTDI USB to RS232 serial adapter ids + * copied from linux/drivers/usb/serial/ftdi_sio.c + * + * Copyright (C) 2009 - 2010 + * Johan Hovold (jhovold@gmail.com) + * Copyright (C) 1999 - 2001 + * Greg Kroah-Hartman (greg@kroah.com) + * Bill Ryder (bryder@sgi.com) + * Copyright (C) 2002 + * Kuba Ober (kuba@mareimbrium.org) + */ + { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_3_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_4_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_5_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_6_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_7_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_USINT_CAT_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_USINT_WKEY_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_USINT_RS232_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IPLUS2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_DMX4ALL) }, + { USB_DEVICE(FTDI_VID, FTDI_SIO_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_FTX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, + { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, + { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, + { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_XF_633_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_XF_631_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_XF_635_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_XF_640_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_XF_642_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_DSS20_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_URBAN_0_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_URBAN_1_PID) }, + { USB_DEVICE(FTDI_NF_RIC_VID, FTDI_NF_RIC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_VNHCPCUSB_D_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MTXORB_0_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MTXORB_1_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MTXORB_2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MTXORB_3_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, + { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0103_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0104_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0105_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0106_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0107_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0108_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0109_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_010F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0110_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0111_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0112_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0113_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0114_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0115_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0116_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0117_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0118_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0119_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_011F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0120_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0121_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0122_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0123_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0124_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0125_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0126_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0127_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0128_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0129_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_012F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0130_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0131_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0132_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0133_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0134_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0135_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0136_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0137_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0138_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0139_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_013F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0140_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0141_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0142_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0143_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0144_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0145_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0146_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0147_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0148_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0149_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_014F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0150_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0151_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0152_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0153_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0154_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0155_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0156_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0157_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0158_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0159_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_015F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0160_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0161_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0162_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0163_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0164_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0165_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0166_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0167_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0168_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0169_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_016F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0170_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0171_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0172_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0173_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0174_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0175_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0176_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0177_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0178_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0179_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_017F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0180_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0181_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0182_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0183_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0184_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0185_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0186_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0187_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0188_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0189_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_018F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0190_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0191_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0192_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0193_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0194_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0195_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0196_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0197_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0198_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0199_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_019F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A0_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A1_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A2_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A3_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A4_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A5_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A6_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A7_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A8_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01A9_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AA_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AB_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AC_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AD_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AE_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01AF_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B0_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B1_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B2_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B3_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B4_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B5_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B6_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B7_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B8_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01B9_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BA_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BB_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BC_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BD_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BE_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01BF_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C0_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C1_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C2_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C3_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C4_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C5_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C6_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C7_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C8_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01C9_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CA_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CB_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CC_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CD_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CE_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01CF_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D0_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D1_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D2_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D3_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D4_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D5_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D6_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D7_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D8_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01D9_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DA_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DB_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DC_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DD_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DE_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01DF_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E0_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E1_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E2_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E3_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E4_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E5_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E6_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E7_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E8_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01E9_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EA_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EB_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EC_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01ED_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EE_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01EF_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F0_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F1_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F2_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F3_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F4_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F5_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F6_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F7_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F8_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01F9_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FA_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FB_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FC_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_USBX_707_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2101_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2102_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2103_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2104_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2106_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2201_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2201_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2202_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2202_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2203_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2203_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_3_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2401_4_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_3_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2402_4_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_3_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2403_4_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_3_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_4_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_5_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_6_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_7_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2801_8_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_3_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_4_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_5_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_6_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_7_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2802_8_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_3_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_4_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_5_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_6_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_7_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_3_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_4_PID) }, + { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, + { USB_DEVICE(OCT_VID, OCT_US101_PID) }, + { USB_DEVICE(OCT_VID, OCT_DK201_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID) }, + { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_1) }, + { USB_DEVICE(FTDI_VID, PROTEGO_R2X0) }, + { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_3) }, + { USB_DEVICE(FTDI_VID, PROTEGO_SPECIAL_4) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E808_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E809_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80A_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80B_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80C_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80D_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80E_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E80F_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E888_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E889_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88A_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88B_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88C_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88D_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88E_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GUDEADS_E88F_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UO100_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UM100_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UR100_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_ALC8500_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PYRAMID_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1000PC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IBS_US485_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IBS_PICPRO_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IBS_PCMCIA_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IBS_PK1_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IBS_RS232MON_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IBS_APP70_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID) }, + /* + * ELV devices: + */ + { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UDF77_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UIO88_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UAD8_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UDA7_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_USI2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_T1100_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_PCD200_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_ULA200_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_CSI8_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1000DL_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, + { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, + { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, + { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, + { USB_DEVICE(FTDI_VID, LINX_FUTURE_1_PID) }, + { USB_DEVICE(FTDI_VID, LINX_FUTURE_2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CCSICDU20_0_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CCSICDU40_1_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CCSMACHX_2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CCSLOAD_N_GO_3_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CCSICDU64_4_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CCSPRIME8_5_PID) }, + { USB_DEVICE(FTDI_VID, INSIDE_ACCESSO) }, + { USB_DEVICE(INTREPID_VID, INTREPID_VALUECAN_PID) }, + { USB_DEVICE(INTREPID_VID, INTREPID_NEOVI_PID) }, + { USB_DEVICE(FALCOM_VID, FALCOM_TWIST_PID) }, + { USB_DEVICE(FALCOM_VID, FALCOM_SAMBA_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SUUNTO_SPORTS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, + { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, + { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, + { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) }, + { USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) }, + { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) }, + { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_3_PID) }, + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) }, + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_1_PID) }, + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_2_PID) }, + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_3_PID) }, + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_4_PID) }, + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, + { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MHAM_YS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y6_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y8_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MHAM_IC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MHAM_DB9_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MHAM_RS232_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MHAM_Y9_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TERATRONIK_VCP_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TERATRONIK_D2XX_PID) }, + { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) }, + { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) }, + { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16HR_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16HRC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16IC_PID) }, + { USB_DEVICE(KOBIL_VID, KOBIL_CONV_B1_PID) }, + { USB_DEVICE(KOBIL_VID, KOBIL_CONV_KAAN_PID) }, + { USB_DEVICE(POSIFLEX_VID, POSIFLEX_PP7000_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TTUSB_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ECLO_COM_1WIRE_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_777_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_8900F_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, + { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13U_PID) }, + { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NDI_SPECTRA_SCU_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_2_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NDI_FUTURE_3_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID) }, + { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, + { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, + { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID) }, + { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID) }, + { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID) }, + { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID) }, + { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID) }, + { USB_DEVICE(FTDI_VID, LMI_LM3S_ICDI_BOARD_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID) }, + { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, + { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, + + /* Papouch devices based on FTDI chip */ + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_2_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_2_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_2_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485S_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485C_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_LEC_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB232_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_IRAMP_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK5_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO8x8_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x2_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO10x1_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO30x3_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO60x3_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x16_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO3x32_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK6_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_UPSUSB_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_MU_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SIMUKEY_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMUX_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMSR_PID) }, + + { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, + { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, + { USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) }, + { USB_DEVICE(ATMEL_VID, STK541_PID) }, + { USB_DEVICE(DE_VID, STB_PID) }, + { USB_DEVICE(DE_VID, WHT_PID) }, + { USB_DEVICE(ADI_VID, ADI_GNICE_PID) }, + { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID) }, + { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID, + 0xff, 0xff, 0x00) }, + { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, + { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID) }, + { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, + { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, + { USB_DEVICE(FTDI_VID, PI_C865_PID) }, + { USB_DEVICE(FTDI_VID, PI_C857_PID) }, + { USB_DEVICE(PI_VID, PI_C866_PID) }, + { USB_DEVICE(PI_VID, PI_C663_PID) }, + { USB_DEVICE(PI_VID, PI_C725_PID) }, + { USB_DEVICE(PI_VID, PI_E517_PID) }, + { USB_DEVICE(PI_VID, PI_C863_PID) }, + { USB_DEVICE(PI_VID, PI_E861_PID) }, + { USB_DEVICE(PI_VID, PI_C867_PID) }, + { USB_DEVICE(PI_VID, PI_E609_PID) }, + { USB_DEVICE(PI_VID, PI_E709_PID) }, + { USB_DEVICE(PI_VID, PI_100F_PID) }, + { USB_DEVICE(PI_VID, PI_1011_PID) }, + { USB_DEVICE(PI_VID, PI_1012_PID) }, + { USB_DEVICE(PI_VID, PI_1013_PID) }, + { USB_DEVICE(PI_VID, PI_1014_PID) }, + { USB_DEVICE(PI_VID, PI_1015_PID) }, + { USB_DEVICE(PI_VID, PI_1016_PID) }, + { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) }, + { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, + { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID) }, + { USB_DEVICE(FTDI_VID, TI_XDS100V2_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, + { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, + { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, + { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) }, + { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) }, + { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_ST_PID) }, + { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SLITE_PID) }, + { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH2_PID) }, + { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID) }, + { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, + { USB_DEVICE(FTDI_VID, ACCESIO_COM4SM_PID) }, + { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, + { USB_DEVICE(ST_VID, ST_STMCLT1030_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_RF_R106) }, + { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, + + { .terminating_entry = 1 } /* Terminating Entry */ +}; + +#undef USB_DEVICE +#undef USB_DEVICE_AND_INTERFACE_INFO + +#endif diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c new file mode 100644 index 000000000..5f0ef9cb3 --- /dev/null +++ b/hw/usb/redirect.c @@ -0,0 +1,2618 @@ +/* + * USB redirector usb-guest + * + * Copyright (c) 2011-2012 Red Hat, Inc. + * + * Red Hat Authors: + * Hans de Goede + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "sysemu/runstate.h" +#include "sysemu/sysemu.h" +#include "qapi/qmp/qerror.h" +#include "qemu/error-report.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "chardev/char-fe.h" + +#include +#include + +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/usb.h" +#include "migration/qemu-file-types.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +/* ERROR is defined below. Remove any previous definition. */ +#undef ERROR + +#define MAX_ENDPOINTS 32 +#define NO_INTERFACE_INFO 255 /* Valid interface_count always <= 32 */ +#define EP2I(ep_address) (((ep_address & 0x80) >> 3) | (ep_address & 0x0f)) +#define I2EP(i) (((i & 0x10) << 3) | (i & 0x0f)) +#define USBEP2I(usb_ep) (((usb_ep)->pid == USB_TOKEN_IN) ? \ + ((usb_ep)->nr | 0x10) : ((usb_ep)->nr)) +#define I2USBEP(d, i) (usb_ep_get(&(d)->dev, \ + ((i) & 0x10) ? USB_TOKEN_IN : USB_TOKEN_OUT, \ + (i) & 0x0f)) + +#ifndef USBREDIR_VERSION /* This is not defined in older usbredir versions */ +#define USBREDIR_VERSION 0 +#endif + +typedef struct USBRedirDevice USBRedirDevice; + +/* Struct to hold buffered packets */ +struct buf_packet { + uint8_t *data; + void *free_on_destroy; + uint16_t len; + uint16_t offset; + uint8_t status; + QTAILQ_ENTRY(buf_packet)next; +}; + +struct endp_data { + USBRedirDevice *dev; + uint8_t type; + uint8_t interval; + uint8_t interface; /* bInterfaceNumber this ep belongs to */ + uint16_t max_packet_size; /* In bytes, not wMaxPacketSize format !! */ + uint32_t max_streams; + uint8_t iso_started; + uint8_t iso_error; /* For reporting iso errors to the HC */ + uint8_t interrupt_started; + uint8_t interrupt_error; + uint8_t bulk_receiving_enabled; + uint8_t bulk_receiving_started; + uint8_t bufpq_prefilled; + uint8_t bufpq_dropping_packets; + QTAILQ_HEAD(, buf_packet) bufpq; + int32_t bufpq_size; + int32_t bufpq_target_size; + USBPacket *pending_async_packet; +}; + +struct PacketIdQueueEntry { + uint64_t id; + QTAILQ_ENTRY(PacketIdQueueEntry)next; +}; + +struct PacketIdQueue { + USBRedirDevice *dev; + const char *name; + QTAILQ_HEAD(, PacketIdQueueEntry) head; + int size; +}; + +struct USBRedirDevice { + USBDevice dev; + /* Properties */ + CharBackend cs; + bool enable_streams; + bool suppress_remote_wake; + bool in_write; + uint8_t debug; + int32_t bootindex; + char *filter_str; + /* Data passed from chardev the fd_read cb to the usbredirparser read cb */ + const uint8_t *read_buf; + int read_buf_size; + /* Active chardev-watch-tag */ + guint watch; + /* For async handling of close / reject */ + QEMUBH *chardev_close_bh; + QEMUBH *device_reject_bh; + /* To delay the usb attach in case of quick chardev close + open */ + QEMUTimer *attach_timer; + int64_t next_attach_time; + struct usbredirparser *parser; + struct endp_data endpoint[MAX_ENDPOINTS]; + struct PacketIdQueue cancelled; + struct PacketIdQueue already_in_flight; + void (*buffered_bulk_in_complete)(USBRedirDevice *, USBPacket *, uint8_t); + /* Data for device filtering */ + struct usb_redir_device_connect_header device_info; + struct usb_redir_interface_info_header interface_info; + struct usbredirfilter_rule *filter_rules; + int filter_rules_count; + int compatible_speedmask; + VMChangeStateEntry *vmstate; +}; + +#define TYPE_USB_REDIR "usb-redir" +DECLARE_INSTANCE_CHECKER(USBRedirDevice, USB_REDIRECT, + TYPE_USB_REDIR) + +static void usbredir_hello(void *priv, struct usb_redir_hello_header *h); +static void usbredir_device_connect(void *priv, + struct usb_redir_device_connect_header *device_connect); +static void usbredir_device_disconnect(void *priv); +static void usbredir_interface_info(void *priv, + struct usb_redir_interface_info_header *interface_info); +static void usbredir_ep_info(void *priv, + struct usb_redir_ep_info_header *ep_info); +static void usbredir_configuration_status(void *priv, uint64_t id, + struct usb_redir_configuration_status_header *configuration_status); +static void usbredir_alt_setting_status(void *priv, uint64_t id, + struct usb_redir_alt_setting_status_header *alt_setting_status); +static void usbredir_iso_stream_status(void *priv, uint64_t id, + struct usb_redir_iso_stream_status_header *iso_stream_status); +static void usbredir_interrupt_receiving_status(void *priv, uint64_t id, + struct usb_redir_interrupt_receiving_status_header + *interrupt_receiving_status); +static void usbredir_bulk_streams_status(void *priv, uint64_t id, + struct usb_redir_bulk_streams_status_header *bulk_streams_status); +static void usbredir_bulk_receiving_status(void *priv, uint64_t id, + struct usb_redir_bulk_receiving_status_header *bulk_receiving_status); +static void usbredir_control_packet(void *priv, uint64_t id, + struct usb_redir_control_packet_header *control_packet, + uint8_t *data, int data_len); +static void usbredir_bulk_packet(void *priv, uint64_t id, + struct usb_redir_bulk_packet_header *bulk_packet, + uint8_t *data, int data_len); +static void usbredir_iso_packet(void *priv, uint64_t id, + struct usb_redir_iso_packet_header *iso_packet, + uint8_t *data, int data_len); +static void usbredir_interrupt_packet(void *priv, uint64_t id, + struct usb_redir_interrupt_packet_header *interrupt_header, + uint8_t *data, int data_len); +static void usbredir_buffered_bulk_packet(void *priv, uint64_t id, + struct usb_redir_buffered_bulk_packet_header *buffered_bulk_packet, + uint8_t *data, int data_len); + +static void usbredir_handle_status(USBRedirDevice *dev, USBPacket *p, + int status); + +#define VERSION "qemu usb-redir guest " QEMU_VERSION + +/* + * Logging stuff + */ + +#define ERROR(...) \ + do { \ + if (dev->debug >= usbredirparser_error) { \ + error_report("usb-redir error: " __VA_ARGS__); \ + } \ + } while (0) +#define WARNING(...) \ + do { \ + if (dev->debug >= usbredirparser_warning) { \ + warn_report("" __VA_ARGS__); \ + } \ + } while (0) +#define INFO(...) \ + do { \ + if (dev->debug >= usbredirparser_info) { \ + error_report("usb-redir: " __VA_ARGS__); \ + } \ + } while (0) +#define DPRINTF(...) \ + do { \ + if (dev->debug >= usbredirparser_debug) { \ + error_report("usb-redir: " __VA_ARGS__); \ + } \ + } while (0) +#define DPRINTF2(...) \ + do { \ + if (dev->debug >= usbredirparser_debug_data) { \ + error_report("usb-redir: " __VA_ARGS__); \ + } \ + } while (0) + +static void usbredir_log(void *priv, int level, const char *msg) +{ + USBRedirDevice *dev = priv; + + if (dev->debug < level) { + return; + } + + error_report("%s", msg); +} + +static void usbredir_log_data(USBRedirDevice *dev, const char *desc, + const uint8_t *data, int len) +{ + if (dev->debug < usbredirparser_debug_data) { + return; + } + qemu_hexdump(stderr, desc, data, len); +} + +/* + * usbredirparser io functions + */ + +static int usbredir_read(void *priv, uint8_t *data, int count) +{ + USBRedirDevice *dev = priv; + + if (dev->read_buf_size < count) { + count = dev->read_buf_size; + } + + memcpy(data, dev->read_buf, count); + + dev->read_buf_size -= count; + if (dev->read_buf_size) { + dev->read_buf += count; + } else { + dev->read_buf = NULL; + } + + return count; +} + +static gboolean usbredir_write_unblocked(void *do_not_use, GIOCondition cond, + void *opaque) +{ + USBRedirDevice *dev = opaque; + + dev->watch = 0; + usbredirparser_do_write(dev->parser); + + return FALSE; +} + +static int usbredir_write(void *priv, uint8_t *data, int count) +{ + USBRedirDevice *dev = priv; + int r; + + if (!qemu_chr_fe_backend_open(&dev->cs)) { + return 0; + } + + /* Don't send new data to the chardev until our state is fully synced */ + if (!runstate_check(RUN_STATE_RUNNING)) { + return 0; + } + + /* Recursion check */ + if (dev->in_write) { + DPRINTF("usbredir_write recursion\n"); + return 0; + } + dev->in_write = true; + + r = qemu_chr_fe_write(&dev->cs, data, count); + if (r < count) { + if (!dev->watch) { + dev->watch = qemu_chr_fe_add_watch(&dev->cs, G_IO_OUT | G_IO_HUP, + usbredir_write_unblocked, dev); + } + if (r < 0) { + r = 0; + } + } + dev->in_write = false; + return r; +} + +/* + * Cancelled and buffered packets helpers + */ + +static void packet_id_queue_init(struct PacketIdQueue *q, + USBRedirDevice *dev, const char *name) +{ + q->dev = dev; + q->name = name; + QTAILQ_INIT(&q->head); + q->size = 0; +} + +static void packet_id_queue_add(struct PacketIdQueue *q, uint64_t id) +{ + USBRedirDevice *dev = q->dev; + struct PacketIdQueueEntry *e; + + DPRINTF("adding packet id %"PRIu64" to %s queue\n", id, q->name); + + e = g_new0(struct PacketIdQueueEntry, 1); + e->id = id; + QTAILQ_INSERT_TAIL(&q->head, e, next); + q->size++; +} + +static int packet_id_queue_remove(struct PacketIdQueue *q, uint64_t id) +{ + USBRedirDevice *dev = q->dev; + struct PacketIdQueueEntry *e; + + QTAILQ_FOREACH(e, &q->head, next) { + if (e->id == id) { + DPRINTF("removing packet id %"PRIu64" from %s queue\n", + id, q->name); + QTAILQ_REMOVE(&q->head, e, next); + q->size--; + g_free(e); + return 1; + } + } + return 0; +} + +static void packet_id_queue_empty(struct PacketIdQueue *q) +{ + USBRedirDevice *dev = q->dev; + struct PacketIdQueueEntry *e, *next_e; + + DPRINTF("removing %d packet-ids from %s queue\n", q->size, q->name); + + QTAILQ_FOREACH_SAFE(e, &q->head, next, next_e) { + QTAILQ_REMOVE(&q->head, e, next); + g_free(e); + } + q->size = 0; +} + +static void usbredir_cancel_packet(USBDevice *udev, USBPacket *p) +{ + USBRedirDevice *dev = USB_REDIRECT(udev); + int i = USBEP2I(p->ep); + + if (p->combined) { + usb_combined_packet_cancel(udev, p); + return; + } + + if (dev->endpoint[i].pending_async_packet) { + assert(dev->endpoint[i].pending_async_packet == p); + dev->endpoint[i].pending_async_packet = NULL; + return; + } + + packet_id_queue_add(&dev->cancelled, p->id); + usbredirparser_send_cancel_data_packet(dev->parser, p->id); + usbredirparser_do_write(dev->parser); +} + +static int usbredir_is_cancelled(USBRedirDevice *dev, uint64_t id) +{ + if (!dev->dev.attached) { + return 1; /* Treat everything as cancelled after a disconnect */ + } + return packet_id_queue_remove(&dev->cancelled, id); +} + +static void usbredir_fill_already_in_flight_from_ep(USBRedirDevice *dev, + struct USBEndpoint *ep) +{ + static USBPacket *p; + + /* async handled packets for bulk receiving eps do not count as inflight */ + if (dev->endpoint[USBEP2I(ep)].bulk_receiving_started) { + return; + } + + QTAILQ_FOREACH(p, &ep->queue, queue) { + /* Skip combined packets, except for the first */ + if (p->combined && p != p->combined->first) { + continue; + } + if (p->state == USB_PACKET_ASYNC) { + packet_id_queue_add(&dev->already_in_flight, p->id); + } + } +} + +static void usbredir_fill_already_in_flight(USBRedirDevice *dev) +{ + int ep; + struct USBDevice *udev = &dev->dev; + + usbredir_fill_already_in_flight_from_ep(dev, &udev->ep_ctl); + + for (ep = 0; ep < USB_MAX_ENDPOINTS; ep++) { + usbredir_fill_already_in_flight_from_ep(dev, &udev->ep_in[ep]); + usbredir_fill_already_in_flight_from_ep(dev, &udev->ep_out[ep]); + } +} + +static int usbredir_already_in_flight(USBRedirDevice *dev, uint64_t id) +{ + return packet_id_queue_remove(&dev->already_in_flight, id); +} + +static USBPacket *usbredir_find_packet_by_id(USBRedirDevice *dev, + uint8_t ep, uint64_t id) +{ + USBPacket *p; + + if (usbredir_is_cancelled(dev, id)) { + return NULL; + } + + p = usb_ep_find_packet_by_id(&dev->dev, + (ep & USB_DIR_IN) ? USB_TOKEN_IN : USB_TOKEN_OUT, + ep & 0x0f, id); + if (p == NULL) { + ERROR("could not find packet with id %"PRIu64"\n", id); + } + return p; +} + +static int bufp_alloc(USBRedirDevice *dev, uint8_t *data, uint16_t len, + uint8_t status, uint8_t ep, void *free_on_destroy) +{ + struct buf_packet *bufp; + + if (!dev->endpoint[EP2I(ep)].bufpq_dropping_packets && + dev->endpoint[EP2I(ep)].bufpq_size > + 2 * dev->endpoint[EP2I(ep)].bufpq_target_size) { + DPRINTF("bufpq overflow, dropping packets ep %02X\n", ep); + dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 1; + } + /* Since we're interupting the stream anyways, drop enough packets to get + back to our target buffer size */ + if (dev->endpoint[EP2I(ep)].bufpq_dropping_packets) { + if (dev->endpoint[EP2I(ep)].bufpq_size > + dev->endpoint[EP2I(ep)].bufpq_target_size) { + free(free_on_destroy); + return -1; + } + dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0; + } + + bufp = g_new(struct buf_packet, 1); + bufp->data = data; + bufp->len = len; + bufp->offset = 0; + bufp->status = status; + bufp->free_on_destroy = free_on_destroy; + QTAILQ_INSERT_TAIL(&dev->endpoint[EP2I(ep)].bufpq, bufp, next); + dev->endpoint[EP2I(ep)].bufpq_size++; + return 0; +} + +static void bufp_free(USBRedirDevice *dev, struct buf_packet *bufp, + uint8_t ep) +{ + QTAILQ_REMOVE(&dev->endpoint[EP2I(ep)].bufpq, bufp, next); + dev->endpoint[EP2I(ep)].bufpq_size--; + free(bufp->free_on_destroy); + g_free(bufp); +} + +static void usbredir_free_bufpq(USBRedirDevice *dev, uint8_t ep) +{ + struct buf_packet *buf, *buf_next; + + QTAILQ_FOREACH_SAFE(buf, &dev->endpoint[EP2I(ep)].bufpq, next, buf_next) { + bufp_free(dev, buf, ep); + } +} + +/* + * USBDevice callbacks + */ + +static void usbredir_handle_reset(USBDevice *udev) +{ + USBRedirDevice *dev = USB_REDIRECT(udev); + + DPRINTF("reset device\n"); + usbredirparser_send_reset(dev->parser); + usbredirparser_do_write(dev->parser); +} + +static void usbredir_handle_iso_data(USBRedirDevice *dev, USBPacket *p, + uint8_t ep) +{ + int status, len; + if (!dev->endpoint[EP2I(ep)].iso_started && + !dev->endpoint[EP2I(ep)].iso_error) { + struct usb_redir_start_iso_stream_header start_iso = { + .endpoint = ep, + }; + int pkts_per_sec; + + if (dev->dev.speed == USB_SPEED_HIGH) { + pkts_per_sec = 8000 / dev->endpoint[EP2I(ep)].interval; + } else { + pkts_per_sec = 1000 / dev->endpoint[EP2I(ep)].interval; + } + /* Testing has shown that we need circa 60 ms buffer */ + dev->endpoint[EP2I(ep)].bufpq_target_size = (pkts_per_sec * 60) / 1000; + + /* Aim for approx 100 interrupts / second on the client to + balance latency and interrupt load */ + start_iso.pkts_per_urb = pkts_per_sec / 100; + if (start_iso.pkts_per_urb < 1) { + start_iso.pkts_per_urb = 1; + } else if (start_iso.pkts_per_urb > 32) { + start_iso.pkts_per_urb = 32; + } + + start_iso.no_urbs = DIV_ROUND_UP( + dev->endpoint[EP2I(ep)].bufpq_target_size, + start_iso.pkts_per_urb); + /* Output endpoints pre-fill only 1/2 of the packets, keeping the rest + as overflow buffer. Also see the usbredir protocol documentation */ + if (!(ep & USB_DIR_IN)) { + start_iso.no_urbs *= 2; + } + if (start_iso.no_urbs > 16) { + start_iso.no_urbs = 16; + } + + /* No id, we look at the ep when receiving a status back */ + usbredirparser_send_start_iso_stream(dev->parser, 0, &start_iso); + usbredirparser_do_write(dev->parser); + DPRINTF("iso stream started pkts/sec %d pkts/urb %d urbs %d ep %02X\n", + pkts_per_sec, start_iso.pkts_per_urb, start_iso.no_urbs, ep); + dev->endpoint[EP2I(ep)].iso_started = 1; + dev->endpoint[EP2I(ep)].bufpq_prefilled = 0; + dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0; + } + + if (ep & USB_DIR_IN) { + struct buf_packet *isop; + + if (dev->endpoint[EP2I(ep)].iso_started && + !dev->endpoint[EP2I(ep)].bufpq_prefilled) { + if (dev->endpoint[EP2I(ep)].bufpq_size < + dev->endpoint[EP2I(ep)].bufpq_target_size) { + return; + } + dev->endpoint[EP2I(ep)].bufpq_prefilled = 1; + } + + isop = QTAILQ_FIRST(&dev->endpoint[EP2I(ep)].bufpq); + if (isop == NULL) { + DPRINTF("iso-token-in ep %02X, no isop, iso_error: %d\n", + ep, dev->endpoint[EP2I(ep)].iso_error); + /* Re-fill the buffer */ + dev->endpoint[EP2I(ep)].bufpq_prefilled = 0; + /* Check iso_error for stream errors, otherwise its an underrun */ + status = dev->endpoint[EP2I(ep)].iso_error; + dev->endpoint[EP2I(ep)].iso_error = 0; + p->status = status ? USB_RET_IOERROR : USB_RET_SUCCESS; + return; + } + DPRINTF2("iso-token-in ep %02X status %d len %d queue-size: %d\n", ep, + isop->status, isop->len, dev->endpoint[EP2I(ep)].bufpq_size); + + status = isop->status; + len = isop->len; + if (len > p->iov.size) { + ERROR("received iso data is larger then packet ep %02X (%d > %d)\n", + ep, len, (int)p->iov.size); + len = p->iov.size; + status = usb_redir_babble; + } + usb_packet_copy(p, isop->data, len); + bufp_free(dev, isop, ep); + usbredir_handle_status(dev, p, status); + } else { + /* If the stream was not started because of a pending error don't + send the packet to the usb-host */ + if (dev->endpoint[EP2I(ep)].iso_started) { + struct usb_redir_iso_packet_header iso_packet = { + .endpoint = ep, + .length = p->iov.size + }; + g_autofree uint8_t *buf = g_malloc(p->iov.size); + /* No id, we look at the ep when receiving a status back */ + usb_packet_copy(p, buf, p->iov.size); + usbredirparser_send_iso_packet(dev->parser, 0, &iso_packet, + buf, p->iov.size); + usbredirparser_do_write(dev->parser); + } + status = dev->endpoint[EP2I(ep)].iso_error; + dev->endpoint[EP2I(ep)].iso_error = 0; + DPRINTF2("iso-token-out ep %02X status %d len %zd\n", ep, status, + p->iov.size); + usbredir_handle_status(dev, p, status); + } +} + +static void usbredir_stop_iso_stream(USBRedirDevice *dev, uint8_t ep) +{ + struct usb_redir_stop_iso_stream_header stop_iso_stream = { + .endpoint = ep + }; + if (dev->endpoint[EP2I(ep)].iso_started) { + usbredirparser_send_stop_iso_stream(dev->parser, 0, &stop_iso_stream); + DPRINTF("iso stream stopped ep %02X\n", ep); + dev->endpoint[EP2I(ep)].iso_started = 0; + } + dev->endpoint[EP2I(ep)].iso_error = 0; + usbredir_free_bufpq(dev, ep); +} + +/* + * The usb-host may poll the endpoint faster then our guest, resulting in lots + * of smaller bulkp-s. The below buffered_bulk_in_complete* functions combine + * data from multiple bulkp-s into a single packet, avoiding bufpq overflows. + */ +static void usbredir_buffered_bulk_add_data_to_packet(USBRedirDevice *dev, + struct buf_packet *bulkp, int count, USBPacket *p, uint8_t ep) +{ + usb_packet_copy(p, bulkp->data + bulkp->offset, count); + bulkp->offset += count; + if (bulkp->offset == bulkp->len) { + /* Store status in the last packet with data from this bulkp */ + usbredir_handle_status(dev, p, bulkp->status); + bufp_free(dev, bulkp, ep); + } +} + +static void usbredir_buffered_bulk_in_complete_raw(USBRedirDevice *dev, + USBPacket *p, uint8_t ep) +{ + struct buf_packet *bulkp; + int count; + + while ((bulkp = QTAILQ_FIRST(&dev->endpoint[EP2I(ep)].bufpq)) && + p->actual_length < p->iov.size && p->status == USB_RET_SUCCESS) { + count = bulkp->len - bulkp->offset; + if (count > (p->iov.size - p->actual_length)) { + count = p->iov.size - p->actual_length; + } + usbredir_buffered_bulk_add_data_to_packet(dev, bulkp, count, p, ep); + } +} + +static void usbredir_buffered_bulk_in_complete_ftdi(USBRedirDevice *dev, + USBPacket *p, uint8_t ep) +{ + const int maxp = dev->endpoint[EP2I(ep)].max_packet_size; + uint8_t header[2] = { 0, 0 }; + struct buf_packet *bulkp; + int count; + + while ((bulkp = QTAILQ_FIRST(&dev->endpoint[EP2I(ep)].bufpq)) && + p->actual_length < p->iov.size && p->status == USB_RET_SUCCESS) { + if (bulkp->len < 2) { + WARNING("malformed ftdi bulk in packet\n"); + bufp_free(dev, bulkp, ep); + continue; + } + + if ((p->actual_length % maxp) == 0) { + usb_packet_copy(p, bulkp->data, 2); + memcpy(header, bulkp->data, 2); + } else { + if (bulkp->data[0] != header[0] || bulkp->data[1] != header[1]) { + break; /* Different header, add to next packet */ + } + } + + if (bulkp->offset == 0) { + bulkp->offset = 2; /* Skip header */ + } + count = bulkp->len - bulkp->offset; + /* Must repeat the header at maxp interval */ + if (count > (maxp - (p->actual_length % maxp))) { + count = maxp - (p->actual_length % maxp); + } + usbredir_buffered_bulk_add_data_to_packet(dev, bulkp, count, p, ep); + } +} + +static void usbredir_buffered_bulk_in_complete(USBRedirDevice *dev, + USBPacket *p, uint8_t ep) +{ + p->status = USB_RET_SUCCESS; /* Clear previous ASYNC status */ + dev->buffered_bulk_in_complete(dev, p, ep); + DPRINTF("bulk-token-in ep %02X status %d len %d id %"PRIu64"\n", + ep, p->status, p->actual_length, p->id); +} + +static void usbredir_handle_buffered_bulk_in_data(USBRedirDevice *dev, + USBPacket *p, uint8_t ep) +{ + /* Input bulk endpoint, buffered packet input */ + if (!dev->endpoint[EP2I(ep)].bulk_receiving_started) { + int bpt; + struct usb_redir_start_bulk_receiving_header start = { + .endpoint = ep, + .stream_id = 0, + .no_transfers = 5, + }; + /* Round bytes_per_transfer up to a multiple of max_packet_size */ + bpt = 512 + dev->endpoint[EP2I(ep)].max_packet_size - 1; + bpt /= dev->endpoint[EP2I(ep)].max_packet_size; + bpt *= dev->endpoint[EP2I(ep)].max_packet_size; + start.bytes_per_transfer = bpt; + /* No id, we look at the ep when receiving a status back */ + usbredirparser_send_start_bulk_receiving(dev->parser, 0, &start); + usbredirparser_do_write(dev->parser); + DPRINTF("bulk receiving started bytes/transfer %u count %d ep %02X\n", + start.bytes_per_transfer, start.no_transfers, ep); + dev->endpoint[EP2I(ep)].bulk_receiving_started = 1; + /* We don't really want to drop bulk packets ever, but + having some upper limit to how much we buffer is good. */ + dev->endpoint[EP2I(ep)].bufpq_target_size = 5000; + dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0; + } + + if (QTAILQ_EMPTY(&dev->endpoint[EP2I(ep)].bufpq)) { + DPRINTF("bulk-token-in ep %02X, no bulkp\n", ep); + assert(dev->endpoint[EP2I(ep)].pending_async_packet == NULL); + dev->endpoint[EP2I(ep)].pending_async_packet = p; + p->status = USB_RET_ASYNC; + return; + } + usbredir_buffered_bulk_in_complete(dev, p, ep); +} + +static void usbredir_stop_bulk_receiving(USBRedirDevice *dev, uint8_t ep) +{ + struct usb_redir_stop_bulk_receiving_header stop_bulk = { + .endpoint = ep, + .stream_id = 0, + }; + if (dev->endpoint[EP2I(ep)].bulk_receiving_started) { + usbredirparser_send_stop_bulk_receiving(dev->parser, 0, &stop_bulk); + DPRINTF("bulk receiving stopped ep %02X\n", ep); + dev->endpoint[EP2I(ep)].bulk_receiving_started = 0; + } + usbredir_free_bufpq(dev, ep); +} + +static void usbredir_handle_bulk_data(USBRedirDevice *dev, USBPacket *p, + uint8_t ep) +{ + struct usb_redir_bulk_packet_header bulk_packet; + size_t size = usb_packet_size(p); + const int maxp = dev->endpoint[EP2I(ep)].max_packet_size; + + if (usbredir_already_in_flight(dev, p->id)) { + p->status = USB_RET_ASYNC; + return; + } + + if (dev->endpoint[EP2I(ep)].bulk_receiving_enabled) { + if (size != 0 && (size % maxp) == 0) { + usbredir_handle_buffered_bulk_in_data(dev, p, ep); + return; + } + WARNING("bulk recv invalid size %zd ep %02x, disabling\n", size, ep); + assert(dev->endpoint[EP2I(ep)].pending_async_packet == NULL); + usbredir_stop_bulk_receiving(dev, ep); + dev->endpoint[EP2I(ep)].bulk_receiving_enabled = 0; + } + + DPRINTF("bulk-out ep %02X stream %u len %zd id %"PRIu64"\n", + ep, p->stream, size, p->id); + + bulk_packet.endpoint = ep; + bulk_packet.length = size; + bulk_packet.stream_id = p->stream; + bulk_packet.length_high = size >> 16; + assert(bulk_packet.length_high == 0 || + usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_32bits_bulk_length)); + + if (ep & USB_DIR_IN || size == 0) { + usbredirparser_send_bulk_packet(dev->parser, p->id, + &bulk_packet, NULL, 0); + } else { + g_autofree uint8_t *buf = g_malloc(size); + usb_packet_copy(p, buf, size); + usbredir_log_data(dev, "bulk data out:", buf, size); + usbredirparser_send_bulk_packet(dev->parser, p->id, + &bulk_packet, buf, size); + } + usbredirparser_do_write(dev->parser); + p->status = USB_RET_ASYNC; +} + +static void usbredir_handle_interrupt_in_data(USBRedirDevice *dev, + USBPacket *p, uint8_t ep) +{ + /* Input interrupt endpoint, buffered packet input */ + struct buf_packet *intp, *intp_to_free; + int status, len, sum; + + if (!dev->endpoint[EP2I(ep)].interrupt_started && + !dev->endpoint[EP2I(ep)].interrupt_error) { + struct usb_redir_start_interrupt_receiving_header start_int = { + .endpoint = ep, + }; + /* No id, we look at the ep when receiving a status back */ + usbredirparser_send_start_interrupt_receiving(dev->parser, 0, + &start_int); + usbredirparser_do_write(dev->parser); + DPRINTF("interrupt recv started ep %02X\n", ep); + dev->endpoint[EP2I(ep)].interrupt_started = 1; + /* We don't really want to drop interrupt packets ever, but + having some upper limit to how much we buffer is good. */ + dev->endpoint[EP2I(ep)].bufpq_target_size = 1000; + dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0; + } + + /* check for completed interrupt message (with all fragments) */ + sum = 0; + QTAILQ_FOREACH(intp, &dev->endpoint[EP2I(ep)].bufpq, next) { + sum += intp->len; + if (intp->len < dev->endpoint[EP2I(ep)].max_packet_size || + sum >= p->iov.size) + break; + } + + if (intp == NULL) { + DPRINTF2("interrupt-token-in ep %02X, no intp, buffered %d\n", ep, sum); + /* Check interrupt_error for stream errors */ + status = dev->endpoint[EP2I(ep)].interrupt_error; + dev->endpoint[EP2I(ep)].interrupt_error = 0; + if (status) { + usbredir_handle_status(dev, p, status); + } else { + p->status = USB_RET_NAK; + } + return; + } + + /* copy of completed interrupt message */ + sum = 0; + status = usb_redir_success; + intp_to_free = NULL; + QTAILQ_FOREACH(intp, &dev->endpoint[EP2I(ep)].bufpq, next) { + if (intp_to_free) { + bufp_free(dev, intp_to_free, ep); + } + DPRINTF("interrupt-token-in ep %02X fragment status %d len %d\n", ep, + intp->status, intp->len); + + sum += intp->len; + len = intp->len; + if (status == usb_redir_success) { + status = intp->status; + } + if (sum > p->iov.size) { + ERROR("received int data is larger then packet ep %02X\n", ep); + len -= (sum - p->iov.size); + sum = p->iov.size; + status = usb_redir_babble; + } + + usb_packet_copy(p, intp->data, len); + + intp_to_free = intp; + if (intp->len < dev->endpoint[EP2I(ep)].max_packet_size || + sum >= p->iov.size) + break; + } + if (intp_to_free) { + bufp_free(dev, intp_to_free, ep); + } + DPRINTF("interrupt-token-in ep %02X summary status %d len %d\n", ep, + status, sum); + usbredir_handle_status(dev, p, status); +} + +/* + * Handle interrupt out data, the usbredir protocol expects us to do this + * async, so that it can report back a completion status. But guests will + * expect immediate completion for an interrupt endpoint, and handling this + * async causes migration issues. So we report success directly, counting + * on the fact that output interrupt packets normally always succeed. + */ +static void usbredir_handle_interrupt_out_data(USBRedirDevice *dev, + USBPacket *p, uint8_t ep) +{ + struct usb_redir_interrupt_packet_header interrupt_packet; + g_autofree uint8_t *buf = g_malloc(p->iov.size); + + DPRINTF("interrupt-out ep %02X len %zd id %"PRIu64"\n", ep, + p->iov.size, p->id); + + interrupt_packet.endpoint = ep; + interrupt_packet.length = p->iov.size; + + usb_packet_copy(p, buf, p->iov.size); + usbredir_log_data(dev, "interrupt data out:", buf, p->iov.size); + usbredirparser_send_interrupt_packet(dev->parser, p->id, + &interrupt_packet, buf, p->iov.size); + usbredirparser_do_write(dev->parser); +} + +static void usbredir_stop_interrupt_receiving(USBRedirDevice *dev, + uint8_t ep) +{ + struct usb_redir_stop_interrupt_receiving_header stop_interrupt_recv = { + .endpoint = ep + }; + if (dev->endpoint[EP2I(ep)].interrupt_started) { + usbredirparser_send_stop_interrupt_receiving(dev->parser, 0, + &stop_interrupt_recv); + DPRINTF("interrupt recv stopped ep %02X\n", ep); + dev->endpoint[EP2I(ep)].interrupt_started = 0; + } + dev->endpoint[EP2I(ep)].interrupt_error = 0; + usbredir_free_bufpq(dev, ep); +} + +static void usbredir_handle_data(USBDevice *udev, USBPacket *p) +{ + USBRedirDevice *dev = USB_REDIRECT(udev); + uint8_t ep; + + ep = p->ep->nr; + if (p->pid == USB_TOKEN_IN) { + ep |= USB_DIR_IN; + } + + switch (dev->endpoint[EP2I(ep)].type) { + case USB_ENDPOINT_XFER_CONTROL: + ERROR("handle_data called for control transfer on ep %02X\n", ep); + p->status = USB_RET_NAK; + break; + case USB_ENDPOINT_XFER_BULK: + if (p->state == USB_PACKET_SETUP && p->pid == USB_TOKEN_IN && + p->ep->pipeline) { + p->status = USB_RET_ADD_TO_QUEUE; + break; + } + usbredir_handle_bulk_data(dev, p, ep); + break; + case USB_ENDPOINT_XFER_ISOC: + usbredir_handle_iso_data(dev, p, ep); + break; + case USB_ENDPOINT_XFER_INT: + if (ep & USB_DIR_IN) { + usbredir_handle_interrupt_in_data(dev, p, ep); + } else { + usbredir_handle_interrupt_out_data(dev, p, ep); + } + break; + default: + ERROR("handle_data ep %02X has unknown type %d\n", ep, + dev->endpoint[EP2I(ep)].type); + p->status = USB_RET_NAK; + } +} + +static void usbredir_flush_ep_queue(USBDevice *dev, USBEndpoint *ep) +{ + if (ep->pid == USB_TOKEN_IN && ep->pipeline) { + usb_ep_combine_input_packets(ep); + } +} + +static void usbredir_stop_ep(USBRedirDevice *dev, int i) +{ + uint8_t ep = I2EP(i); + + switch (dev->endpoint[i].type) { + case USB_ENDPOINT_XFER_BULK: + if (ep & USB_DIR_IN) { + usbredir_stop_bulk_receiving(dev, ep); + } + break; + case USB_ENDPOINT_XFER_ISOC: + usbredir_stop_iso_stream(dev, ep); + break; + case USB_ENDPOINT_XFER_INT: + if (ep & USB_DIR_IN) { + usbredir_stop_interrupt_receiving(dev, ep); + } + break; + } + usbredir_free_bufpq(dev, ep); +} + +static void usbredir_ep_stopped(USBDevice *udev, USBEndpoint *uep) +{ + USBRedirDevice *dev = USB_REDIRECT(udev); + + usbredir_stop_ep(dev, USBEP2I(uep)); + usbredirparser_do_write(dev->parser); +} + +static void usbredir_set_config(USBRedirDevice *dev, USBPacket *p, + int config) +{ + struct usb_redir_set_configuration_header set_config; + int i; + + DPRINTF("set config %d id %"PRIu64"\n", config, p->id); + + for (i = 0; i < MAX_ENDPOINTS; i++) { + usbredir_stop_ep(dev, i); + } + + set_config.configuration = config; + usbredirparser_send_set_configuration(dev->parser, p->id, &set_config); + usbredirparser_do_write(dev->parser); + p->status = USB_RET_ASYNC; +} + +static void usbredir_get_config(USBRedirDevice *dev, USBPacket *p) +{ + DPRINTF("get config id %"PRIu64"\n", p->id); + + usbredirparser_send_get_configuration(dev->parser, p->id); + usbredirparser_do_write(dev->parser); + p->status = USB_RET_ASYNC; +} + +static void usbredir_set_interface(USBRedirDevice *dev, USBPacket *p, + int interface, int alt) +{ + struct usb_redir_set_alt_setting_header set_alt; + int i; + + DPRINTF("set interface %d alt %d id %"PRIu64"\n", interface, alt, p->id); + + for (i = 0; i < MAX_ENDPOINTS; i++) { + if (dev->endpoint[i].interface == interface) { + usbredir_stop_ep(dev, i); + } + } + + set_alt.interface = interface; + set_alt.alt = alt; + usbredirparser_send_set_alt_setting(dev->parser, p->id, &set_alt); + usbredirparser_do_write(dev->parser); + p->status = USB_RET_ASYNC; +} + +static void usbredir_get_interface(USBRedirDevice *dev, USBPacket *p, + int interface) +{ + struct usb_redir_get_alt_setting_header get_alt; + + DPRINTF("get interface %d id %"PRIu64"\n", interface, p->id); + + get_alt.interface = interface; + usbredirparser_send_get_alt_setting(dev->parser, p->id, &get_alt); + usbredirparser_do_write(dev->parser); + p->status = USB_RET_ASYNC; +} + +static void usbredir_handle_control(USBDevice *udev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) +{ + USBRedirDevice *dev = USB_REDIRECT(udev); + struct usb_redir_control_packet_header control_packet; + + if (usbredir_already_in_flight(dev, p->id)) { + p->status = USB_RET_ASYNC; + return; + } + + /* Special cases for certain standard device requests */ + switch (request) { + case DeviceOutRequest | USB_REQ_SET_ADDRESS: + DPRINTF("set address %d\n", value); + dev->dev.addr = value; + return; + case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: + usbredir_set_config(dev, p, value & 0xff); + return; + case DeviceRequest | USB_REQ_GET_CONFIGURATION: + usbredir_get_config(dev, p); + return; + case InterfaceOutRequest | USB_REQ_SET_INTERFACE: + usbredir_set_interface(dev, p, index, value); + return; + case InterfaceRequest | USB_REQ_GET_INTERFACE: + usbredir_get_interface(dev, p, index); + return; + } + + /* Normal ctrl requests, note request is (bRequestType << 8) | bRequest */ + DPRINTF( + "ctrl-out type 0x%x req 0x%x val 0x%x index %d len %d id %"PRIu64"\n", + request >> 8, request & 0xff, value, index, length, p->id); + + control_packet.request = request & 0xFF; + control_packet.requesttype = request >> 8; + control_packet.endpoint = control_packet.requesttype & USB_DIR_IN; + control_packet.value = value; + control_packet.index = index; + control_packet.length = length; + + if (control_packet.requesttype & USB_DIR_IN) { + usbredirparser_send_control_packet(dev->parser, p->id, + &control_packet, NULL, 0); + } else { + usbredir_log_data(dev, "ctrl data out:", data, length); + usbredirparser_send_control_packet(dev->parser, p->id, + &control_packet, data, length); + } + usbredirparser_do_write(dev->parser); + p->status = USB_RET_ASYNC; +} + +static int usbredir_alloc_streams(USBDevice *udev, USBEndpoint **eps, + int nr_eps, int streams) +{ + USBRedirDevice *dev = USB_REDIRECT(udev); +#if USBREDIR_VERSION >= 0x000700 + struct usb_redir_alloc_bulk_streams_header alloc_streams; + int i; + + if (!usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_bulk_streams)) { + ERROR("peer does not support streams\n"); + goto reject; + } + + if (streams == 0) { + ERROR("request to allocate 0 streams\n"); + return -1; + } + + alloc_streams.no_streams = streams; + alloc_streams.endpoints = 0; + for (i = 0; i < nr_eps; i++) { + alloc_streams.endpoints |= 1 << USBEP2I(eps[i]); + } + usbredirparser_send_alloc_bulk_streams(dev->parser, 0, &alloc_streams); + usbredirparser_do_write(dev->parser); + + return 0; +#else + ERROR("usbredir_alloc_streams not implemented\n"); + goto reject; +#endif +reject: + ERROR("streams are not available, disconnecting\n"); + qemu_bh_schedule(dev->device_reject_bh); + return -1; +} + +static void usbredir_free_streams(USBDevice *udev, USBEndpoint **eps, + int nr_eps) +{ +#if USBREDIR_VERSION >= 0x000700 + USBRedirDevice *dev = USB_REDIRECT(udev); + struct usb_redir_free_bulk_streams_header free_streams; + int i; + + if (!usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_bulk_streams)) { + return; + } + + free_streams.endpoints = 0; + for (i = 0; i < nr_eps; i++) { + free_streams.endpoints |= 1 << USBEP2I(eps[i]); + } + usbredirparser_send_free_bulk_streams(dev->parser, 0, &free_streams); + usbredirparser_do_write(dev->parser); +#endif +} + +/* + * Close events can be triggered by usbredirparser_do_write which gets called + * from within the USBDevice data / control packet callbacks and doing a + * usb_detach from within these callbacks is not a good idea. + * + * So we use a bh handler to take care of close events. + */ +static void usbredir_chardev_close_bh(void *opaque) +{ + USBRedirDevice *dev = opaque; + + qemu_bh_cancel(dev->device_reject_bh); + usbredir_device_disconnect(dev); + + if (dev->parser) { + DPRINTF("destroying usbredirparser\n"); + usbredirparser_destroy(dev->parser); + dev->parser = NULL; + } + if (dev->watch) { + g_source_remove(dev->watch); + dev->watch = 0; + } +} + +static void usbredir_create_parser(USBRedirDevice *dev) +{ + uint32_t caps[USB_REDIR_CAPS_SIZE] = { 0, }; + int flags = 0; + + DPRINTF("creating usbredirparser\n"); + + dev->parser = qemu_oom_check(usbredirparser_create()); + dev->parser->priv = dev; + dev->parser->log_func = usbredir_log; + dev->parser->read_func = usbredir_read; + dev->parser->write_func = usbredir_write; + dev->parser->hello_func = usbredir_hello; + dev->parser->device_connect_func = usbredir_device_connect; + dev->parser->device_disconnect_func = usbredir_device_disconnect; + dev->parser->interface_info_func = usbredir_interface_info; + dev->parser->ep_info_func = usbredir_ep_info; + dev->parser->configuration_status_func = usbredir_configuration_status; + dev->parser->alt_setting_status_func = usbredir_alt_setting_status; + dev->parser->iso_stream_status_func = usbredir_iso_stream_status; + dev->parser->interrupt_receiving_status_func = + usbredir_interrupt_receiving_status; + dev->parser->bulk_streams_status_func = usbredir_bulk_streams_status; + dev->parser->bulk_receiving_status_func = usbredir_bulk_receiving_status; + dev->parser->control_packet_func = usbredir_control_packet; + dev->parser->bulk_packet_func = usbredir_bulk_packet; + dev->parser->iso_packet_func = usbredir_iso_packet; + dev->parser->interrupt_packet_func = usbredir_interrupt_packet; + dev->parser->buffered_bulk_packet_func = usbredir_buffered_bulk_packet; + dev->read_buf = NULL; + dev->read_buf_size = 0; + + usbredirparser_caps_set_cap(caps, usb_redir_cap_connect_device_version); + usbredirparser_caps_set_cap(caps, usb_redir_cap_filter); + usbredirparser_caps_set_cap(caps, usb_redir_cap_ep_info_max_packet_size); + usbredirparser_caps_set_cap(caps, usb_redir_cap_64bits_ids); + usbredirparser_caps_set_cap(caps, usb_redir_cap_32bits_bulk_length); + usbredirparser_caps_set_cap(caps, usb_redir_cap_bulk_receiving); +#if USBREDIR_VERSION >= 0x000700 + if (dev->enable_streams) { + usbredirparser_caps_set_cap(caps, usb_redir_cap_bulk_streams); + } +#endif + + if (runstate_check(RUN_STATE_INMIGRATE)) { + flags |= usbredirparser_fl_no_hello; + } + usbredirparser_init(dev->parser, VERSION, caps, USB_REDIR_CAPS_SIZE, + flags); + usbredirparser_do_write(dev->parser); +} + +static void usbredir_reject_device(USBRedirDevice *dev) +{ + usbredir_device_disconnect(dev); + if (usbredirparser_peer_has_cap(dev->parser, usb_redir_cap_filter)) { + usbredirparser_send_filter_reject(dev->parser); + usbredirparser_do_write(dev->parser); + } +} + +/* + * We may need to reject the device when the hcd calls alloc_streams, doing + * an usb_detach from within a hcd call is not a good idea, hence this bh. + */ +static void usbredir_device_reject_bh(void *opaque) +{ + USBRedirDevice *dev = opaque; + + usbredir_reject_device(dev); +} + +static void usbredir_do_attach(void *opaque) +{ + USBRedirDevice *dev = opaque; + Error *local_err = NULL; + + /* In order to work properly with XHCI controllers we need these caps */ + if ((dev->dev.port->speedmask & USB_SPEED_MASK_SUPER) && !( + usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_ep_info_max_packet_size) && + usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_32bits_bulk_length) && + usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_64bits_ids))) { + ERROR("usb-redir-host lacks capabilities needed for use with XHCI\n"); + usbredir_reject_device(dev); + return; + } + + usb_device_attach(&dev->dev, &local_err); + if (local_err) { + error_report_err(local_err); + WARNING("rejecting device due to speed mismatch\n"); + usbredir_reject_device(dev); + } +} + +/* + * chardev callbacks + */ + +static int usbredir_chardev_can_read(void *opaque) +{ + USBRedirDevice *dev = opaque; + + if (!dev->parser) { + WARNING("chardev_can_read called on non open chardev!\n"); + return 0; + } + + /* Don't read new data from the chardev until our state is fully synced */ + if (!runstate_check(RUN_STATE_RUNNING)) { + return 0; + } + + /* usbredir_parser_do_read will consume *all* data we give it */ + return 1 * MiB; +} + +static void usbredir_chardev_read(void *opaque, const uint8_t *buf, int size) +{ + USBRedirDevice *dev = opaque; + + /* No recursion allowed! */ + assert(dev->read_buf == NULL); + + dev->read_buf = buf; + dev->read_buf_size = size; + + usbredirparser_do_read(dev->parser); + /* Send any acks, etc. which may be queued now */ + usbredirparser_do_write(dev->parser); +} + +static void usbredir_chardev_event(void *opaque, QEMUChrEvent event) +{ + USBRedirDevice *dev = opaque; + + switch (event) { + case CHR_EVENT_OPENED: + DPRINTF("chardev open\n"); + /* Make sure any pending closes are handled (no-op if none pending) */ + usbredir_chardev_close_bh(dev); + qemu_bh_cancel(dev->chardev_close_bh); + usbredir_create_parser(dev); + break; + case CHR_EVENT_CLOSED: + DPRINTF("chardev close\n"); + qemu_bh_schedule(dev->chardev_close_bh); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +/* + * init + destroy + */ + +static void usbredir_vm_state_change(void *priv, bool running, RunState state) +{ + USBRedirDevice *dev = priv; + + if (state == RUN_STATE_RUNNING && dev->parser != NULL) { + usbredirparser_do_write(dev->parser); /* Flush any pending writes */ + } +} + +static void usbredir_init_endpoints(USBRedirDevice *dev) +{ + int i; + + usb_ep_init(&dev->dev); + memset(dev->endpoint, 0, sizeof(dev->endpoint)); + for (i = 0; i < MAX_ENDPOINTS; i++) { + dev->endpoint[i].dev = dev; + QTAILQ_INIT(&dev->endpoint[i].bufpq); + } +} + +static void usbredir_realize(USBDevice *udev, Error **errp) +{ + USBRedirDevice *dev = USB_REDIRECT(udev); + int i; + + if (!qemu_chr_fe_backend_connected(&dev->cs)) { + error_setg(errp, QERR_MISSING_PARAMETER, "chardev"); + return; + } + + if (dev->filter_str) { + i = usbredirfilter_string_to_rules(dev->filter_str, ":", "|", + &dev->filter_rules, + &dev->filter_rules_count); + if (i) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "filter", + "a usb device filter string"); + return; + } + } + + dev->chardev_close_bh = qemu_bh_new(usbredir_chardev_close_bh, dev); + dev->device_reject_bh = qemu_bh_new(usbredir_device_reject_bh, dev); + dev->attach_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, usbredir_do_attach, dev); + + packet_id_queue_init(&dev->cancelled, dev, "cancelled"); + packet_id_queue_init(&dev->already_in_flight, dev, "already-in-flight"); + usbredir_init_endpoints(dev); + + /* We'll do the attach once we receive the speed from the usb-host */ + udev->auto_attach = 0; + + /* Will be cleared during setup when we find conflicts */ + dev->compatible_speedmask = USB_SPEED_MASK_FULL | USB_SPEED_MASK_HIGH; + + /* Let the backend know we are ready */ + qemu_chr_fe_set_handlers(&dev->cs, usbredir_chardev_can_read, + usbredir_chardev_read, usbredir_chardev_event, + NULL, dev, NULL, true); + + dev->vmstate = + qemu_add_vm_change_state_handler(usbredir_vm_state_change, dev); +} + +static void usbredir_cleanup_device_queues(USBRedirDevice *dev) +{ + int i; + + packet_id_queue_empty(&dev->cancelled); + packet_id_queue_empty(&dev->already_in_flight); + for (i = 0; i < MAX_ENDPOINTS; i++) { + usbredir_free_bufpq(dev, I2EP(i)); + } +} + +static void usbredir_unrealize(USBDevice *udev) +{ + USBRedirDevice *dev = USB_REDIRECT(udev); + + qemu_chr_fe_deinit(&dev->cs, true); + + /* Note must be done after qemu_chr_close, as that causes a close event */ + qemu_bh_delete(dev->chardev_close_bh); + qemu_bh_delete(dev->device_reject_bh); + + timer_free(dev->attach_timer); + + usbredir_cleanup_device_queues(dev); + + if (dev->parser) { + usbredirparser_destroy(dev->parser); + } + if (dev->watch) { + g_source_remove(dev->watch); + } + + free(dev->filter_rules); + qemu_del_vm_change_state_handler(dev->vmstate); +} + +static int usbredir_check_filter(USBRedirDevice *dev) +{ + if (dev->interface_info.interface_count == NO_INTERFACE_INFO) { + ERROR("No interface info for device\n"); + goto error; + } + + if (dev->filter_rules) { + if (!usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_connect_device_version)) { + ERROR("Device filter specified and peer does not have the " + "connect_device_version capability\n"); + goto error; + } + + if (usbredirfilter_check( + dev->filter_rules, + dev->filter_rules_count, + dev->device_info.device_class, + dev->device_info.device_subclass, + dev->device_info.device_protocol, + dev->interface_info.interface_class, + dev->interface_info.interface_subclass, + dev->interface_info.interface_protocol, + dev->interface_info.interface_count, + dev->device_info.vendor_id, + dev->device_info.product_id, + dev->device_info.device_version_bcd, + 0) != 0) { + goto error; + } + } + + return 0; + +error: + usbredir_reject_device(dev); + return -1; +} + +static void usbredir_check_bulk_receiving(USBRedirDevice *dev) +{ + int i, j, quirks; + + if (!usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_bulk_receiving)) { + return; + } + + for (i = EP2I(USB_DIR_IN); i < MAX_ENDPOINTS; i++) { + dev->endpoint[i].bulk_receiving_enabled = 0; + } + + if (dev->interface_info.interface_count == NO_INTERFACE_INFO) { + return; + } + + for (i = 0; i < dev->interface_info.interface_count; i++) { + quirks = usb_get_quirks(dev->device_info.vendor_id, + dev->device_info.product_id, + dev->interface_info.interface_class[i], + dev->interface_info.interface_subclass[i], + dev->interface_info.interface_protocol[i]); + if (!(quirks & USB_QUIRK_BUFFER_BULK_IN)) { + continue; + } + if (quirks & USB_QUIRK_IS_FTDI) { + dev->buffered_bulk_in_complete = + usbredir_buffered_bulk_in_complete_ftdi; + } else { + dev->buffered_bulk_in_complete = + usbredir_buffered_bulk_in_complete_raw; + } + + for (j = EP2I(USB_DIR_IN); j < MAX_ENDPOINTS; j++) { + if (dev->endpoint[j].interface == + dev->interface_info.interface[i] && + dev->endpoint[j].type == USB_ENDPOINT_XFER_BULK && + dev->endpoint[j].max_packet_size != 0) { + dev->endpoint[j].bulk_receiving_enabled = 1; + /* + * With buffering pipelining is not necessary. Also packet + * combining and bulk in buffering don't play nice together! + */ + I2USBEP(dev, j)->pipeline = false; + break; /* Only buffer for the first ep of each intf */ + } + } + } +} + +/* + * usbredirparser packet complete callbacks + */ + +static void usbredir_handle_status(USBRedirDevice *dev, USBPacket *p, + int status) +{ + switch (status) { + case usb_redir_success: + p->status = USB_RET_SUCCESS; /* Clear previous ASYNC status */ + break; + case usb_redir_stall: + p->status = USB_RET_STALL; + break; + case usb_redir_cancelled: + /* + * When the usbredir-host unredirects a device, it will report a status + * of cancelled for all pending packets, followed by a disconnect msg. + */ + p->status = USB_RET_IOERROR; + break; + case usb_redir_inval: + WARNING("got invalid param error from usb-host?\n"); + p->status = USB_RET_IOERROR; + break; + case usb_redir_babble: + p->status = USB_RET_BABBLE; + break; + case usb_redir_ioerror: + case usb_redir_timeout: + default: + p->status = USB_RET_IOERROR; + } +} + +static void usbredir_hello(void *priv, struct usb_redir_hello_header *h) +{ + USBRedirDevice *dev = priv; + + /* Try to send the filter info now that we've the usb-host's caps */ + if (usbredirparser_peer_has_cap(dev->parser, usb_redir_cap_filter) && + dev->filter_rules) { + usbredirparser_send_filter_filter(dev->parser, dev->filter_rules, + dev->filter_rules_count); + usbredirparser_do_write(dev->parser); + } +} + +static void usbredir_device_connect(void *priv, + struct usb_redir_device_connect_header *device_connect) +{ + USBRedirDevice *dev = priv; + const char *speed; + + if (timer_pending(dev->attach_timer) || dev->dev.attached) { + ERROR("Received device connect while already connected\n"); + return; + } + + switch (device_connect->speed) { + case usb_redir_speed_low: + speed = "low speed"; + dev->dev.speed = USB_SPEED_LOW; + dev->compatible_speedmask &= ~USB_SPEED_MASK_FULL; + dev->compatible_speedmask &= ~USB_SPEED_MASK_HIGH; + break; + case usb_redir_speed_full: + speed = "full speed"; + dev->dev.speed = USB_SPEED_FULL; + dev->compatible_speedmask &= ~USB_SPEED_MASK_HIGH; + break; + case usb_redir_speed_high: + speed = "high speed"; + dev->dev.speed = USB_SPEED_HIGH; + break; + case usb_redir_speed_super: + speed = "super speed"; + dev->dev.speed = USB_SPEED_SUPER; + break; + default: + speed = "unknown speed"; + dev->dev.speed = USB_SPEED_FULL; + } + + if (usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_connect_device_version)) { + INFO("attaching %s device %04x:%04x version %d.%d class %02x\n", + speed, device_connect->vendor_id, device_connect->product_id, + ((device_connect->device_version_bcd & 0xf000) >> 12) * 10 + + ((device_connect->device_version_bcd & 0x0f00) >> 8), + ((device_connect->device_version_bcd & 0x00f0) >> 4) * 10 + + ((device_connect->device_version_bcd & 0x000f) >> 0), + device_connect->device_class); + } else { + INFO("attaching %s device %04x:%04x class %02x\n", speed, + device_connect->vendor_id, device_connect->product_id, + device_connect->device_class); + } + + dev->dev.speedmask = (1 << dev->dev.speed) | dev->compatible_speedmask; + dev->device_info = *device_connect; + + if (usbredir_check_filter(dev)) { + WARNING("Device %04x:%04x rejected by device filter, not attaching\n", + device_connect->vendor_id, device_connect->product_id); + return; + } + + usbredir_check_bulk_receiving(dev); + timer_mod(dev->attach_timer, dev->next_attach_time); +} + +static void usbredir_device_disconnect(void *priv) +{ + USBRedirDevice *dev = priv; + + /* Stop any pending attaches */ + timer_del(dev->attach_timer); + + if (dev->dev.attached) { + DPRINTF("detaching device\n"); + usb_device_detach(&dev->dev); + /* + * Delay next usb device attach to give the guest a chance to see + * see the detach / attach in case of quick close / open succession + */ + dev->next_attach_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 200; + } + + /* Reset state so that the next dev connected starts with a clean slate */ + usbredir_cleanup_device_queues(dev); + usbredir_init_endpoints(dev); + dev->interface_info.interface_count = NO_INTERFACE_INFO; + dev->dev.addr = 0; + dev->dev.speed = 0; + dev->compatible_speedmask = USB_SPEED_MASK_FULL | USB_SPEED_MASK_HIGH; +} + +static void usbredir_interface_info(void *priv, + struct usb_redir_interface_info_header *interface_info) +{ + USBRedirDevice *dev = priv; + + dev->interface_info = *interface_info; + + /* + * If we receive interface info after the device has already been + * connected (ie on a set_config), re-check interface dependent things. + */ + if (timer_pending(dev->attach_timer) || dev->dev.attached) { + usbredir_check_bulk_receiving(dev); + if (usbredir_check_filter(dev)) { + ERROR("Device no longer matches filter after interface info " + "change, disconnecting!\n"); + } + } +} + +static void usbredir_mark_speed_incompatible(USBRedirDevice *dev, int speed) +{ + dev->compatible_speedmask &= ~(1 << speed); + dev->dev.speedmask = (1 << dev->dev.speed) | dev->compatible_speedmask; +} + +static void usbredir_set_pipeline(USBRedirDevice *dev, struct USBEndpoint *uep) +{ + if (uep->type != USB_ENDPOINT_XFER_BULK) { + return; + } + if (uep->pid == USB_TOKEN_OUT) { + uep->pipeline = true; + } + if (uep->pid == USB_TOKEN_IN && uep->max_packet_size != 0 && + usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_32bits_bulk_length)) { + uep->pipeline = true; + } +} + +static void usbredir_setup_usb_eps(USBRedirDevice *dev) +{ + struct USBEndpoint *usb_ep; + int i; + + for (i = 0; i < MAX_ENDPOINTS; i++) { + usb_ep = I2USBEP(dev, i); + usb_ep->type = dev->endpoint[i].type; + usb_ep->ifnum = dev->endpoint[i].interface; + usb_ep->max_packet_size = dev->endpoint[i].max_packet_size; + usb_ep->max_streams = dev->endpoint[i].max_streams; + usbredir_set_pipeline(dev, usb_ep); + } +} + +static void usbredir_ep_info(void *priv, + struct usb_redir_ep_info_header *ep_info) +{ + USBRedirDevice *dev = priv; + int i; + + assert(dev != NULL); + for (i = 0; i < MAX_ENDPOINTS; i++) { + dev->endpoint[i].type = ep_info->type[i]; + dev->endpoint[i].interval = ep_info->interval[i]; + dev->endpoint[i].interface = ep_info->interface[i]; + if (usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_ep_info_max_packet_size)) { + dev->endpoint[i].max_packet_size = ep_info->max_packet_size[i]; + } +#if USBREDIR_VERSION >= 0x000700 + if (usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_bulk_streams)) { + dev->endpoint[i].max_streams = ep_info->max_streams[i]; + } +#endif + switch (dev->endpoint[i].type) { + case usb_redir_type_invalid: + break; + case usb_redir_type_iso: + usbredir_mark_speed_incompatible(dev, USB_SPEED_FULL); + usbredir_mark_speed_incompatible(dev, USB_SPEED_HIGH); + /* Fall through */ + case usb_redir_type_interrupt: + if (!usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_ep_info_max_packet_size) || + ep_info->max_packet_size[i] > 64) { + usbredir_mark_speed_incompatible(dev, USB_SPEED_FULL); + } + if (!usbredirparser_peer_has_cap(dev->parser, + usb_redir_cap_ep_info_max_packet_size) || + ep_info->max_packet_size[i] > 1024) { + usbredir_mark_speed_incompatible(dev, USB_SPEED_HIGH); + } + if (dev->endpoint[i].interval == 0) { + ERROR("Received 0 interval for isoc or irq endpoint\n"); + usbredir_reject_device(dev); + return; + } + /* Fall through */ + case usb_redir_type_control: + case usb_redir_type_bulk: + DPRINTF("ep: %02X type: %d interface: %d\n", I2EP(i), + dev->endpoint[i].type, dev->endpoint[i].interface); + break; + default: + ERROR("Received invalid endpoint type\n"); + usbredir_reject_device(dev); + return; + } + } + /* The new ep info may have caused a speed incompatibility, recheck */ + if (dev->dev.attached && + !(dev->dev.port->speedmask & dev->dev.speedmask)) { + ERROR("Device no longer matches speed after endpoint info change, " + "disconnecting!\n"); + usbredir_reject_device(dev); + return; + } + usbredir_setup_usb_eps(dev); + usbredir_check_bulk_receiving(dev); +} + +static void usbredir_configuration_status(void *priv, uint64_t id, + struct usb_redir_configuration_status_header *config_status) +{ + USBRedirDevice *dev = priv; + USBPacket *p; + + DPRINTF("set config status %d config %d id %"PRIu64"\n", + config_status->status, config_status->configuration, id); + + p = usbredir_find_packet_by_id(dev, 0, id); + if (p) { + if (dev->dev.setup_buf[0] & USB_DIR_IN) { + dev->dev.data_buf[0] = config_status->configuration; + p->actual_length = 1; + } + usbredir_handle_status(dev, p, config_status->status); + usb_generic_async_ctrl_complete(&dev->dev, p); + } +} + +static void usbredir_alt_setting_status(void *priv, uint64_t id, + struct usb_redir_alt_setting_status_header *alt_setting_status) +{ + USBRedirDevice *dev = priv; + USBPacket *p; + + DPRINTF("alt status %d intf %d alt %d id: %"PRIu64"\n", + alt_setting_status->status, alt_setting_status->interface, + alt_setting_status->alt, id); + + p = usbredir_find_packet_by_id(dev, 0, id); + if (p) { + if (dev->dev.setup_buf[0] & USB_DIR_IN) { + dev->dev.data_buf[0] = alt_setting_status->alt; + p->actual_length = 1; + } + usbredir_handle_status(dev, p, alt_setting_status->status); + usb_generic_async_ctrl_complete(&dev->dev, p); + } +} + +static void usbredir_iso_stream_status(void *priv, uint64_t id, + struct usb_redir_iso_stream_status_header *iso_stream_status) +{ + USBRedirDevice *dev = priv; + uint8_t ep = iso_stream_status->endpoint; + + DPRINTF("iso status %d ep %02X id %"PRIu64"\n", iso_stream_status->status, + ep, id); + + if (!dev->dev.attached || !dev->endpoint[EP2I(ep)].iso_started) { + return; + } + + dev->endpoint[EP2I(ep)].iso_error = iso_stream_status->status; + if (iso_stream_status->status == usb_redir_stall) { + DPRINTF("iso stream stopped by peer ep %02X\n", ep); + dev->endpoint[EP2I(ep)].iso_started = 0; + } +} + +static void usbredir_interrupt_receiving_status(void *priv, uint64_t id, + struct usb_redir_interrupt_receiving_status_header + *interrupt_receiving_status) +{ + USBRedirDevice *dev = priv; + uint8_t ep = interrupt_receiving_status->endpoint; + + DPRINTF("interrupt recv status %d ep %02X id %"PRIu64"\n", + interrupt_receiving_status->status, ep, id); + + if (!dev->dev.attached || !dev->endpoint[EP2I(ep)].interrupt_started) { + return; + } + + dev->endpoint[EP2I(ep)].interrupt_error = + interrupt_receiving_status->status; + if (interrupt_receiving_status->status == usb_redir_stall) { + DPRINTF("interrupt receiving stopped by peer ep %02X\n", ep); + dev->endpoint[EP2I(ep)].interrupt_started = 0; + } +} + +static void usbredir_bulk_streams_status(void *priv, uint64_t id, + struct usb_redir_bulk_streams_status_header *bulk_streams_status) +{ +#if USBREDIR_VERSION >= 0x000700 + USBRedirDevice *dev = priv; + + if (bulk_streams_status->status == usb_redir_success) { + DPRINTF("bulk streams status %d eps %08x\n", + bulk_streams_status->status, bulk_streams_status->endpoints); + } else { + ERROR("bulk streams %s failed status %d eps %08x\n", + (bulk_streams_status->no_streams == 0) ? "free" : "alloc", + bulk_streams_status->status, bulk_streams_status->endpoints); + ERROR("usb-redir-host does not provide streams, disconnecting\n"); + usbredir_reject_device(dev); + } +#endif +} + +static void usbredir_bulk_receiving_status(void *priv, uint64_t id, + struct usb_redir_bulk_receiving_status_header *bulk_receiving_status) +{ + USBRedirDevice *dev = priv; + uint8_t ep = bulk_receiving_status->endpoint; + + DPRINTF("bulk recv status %d ep %02X id %"PRIu64"\n", + bulk_receiving_status->status, ep, id); + + if (!dev->dev.attached || !dev->endpoint[EP2I(ep)].bulk_receiving_started) { + return; + } + + if (bulk_receiving_status->status == usb_redir_stall) { + DPRINTF("bulk receiving stopped by peer ep %02X\n", ep); + dev->endpoint[EP2I(ep)].bulk_receiving_started = 0; + } +} + +static void usbredir_control_packet(void *priv, uint64_t id, + struct usb_redir_control_packet_header *control_packet, + uint8_t *data, int data_len) +{ + USBRedirDevice *dev = priv; + USBPacket *p; + int len = control_packet->length; + + DPRINTF("ctrl-in status %d len %d id %"PRIu64"\n", control_packet->status, + len, id); + + /* Fix up USB-3 ep0 maxpacket size to allow superspeed connected devices + * to work redirected to a not superspeed capable hcd */ + if (dev->dev.speed == USB_SPEED_SUPER && + !((dev->dev.port->speedmask & USB_SPEED_MASK_SUPER)) && + control_packet->requesttype == 0x80 && + control_packet->request == 6 && + control_packet->value == 0x100 && control_packet->index == 0 && + data_len >= 18 && data[7] == 9) { + data[7] = 64; + } + + p = usbredir_find_packet_by_id(dev, 0, id); + if (p) { + usbredir_handle_status(dev, p, control_packet->status); + if (data_len > 0) { + usbredir_log_data(dev, "ctrl data in:", data, data_len); + if (data_len > sizeof(dev->dev.data_buf)) { + ERROR("ctrl buffer too small (%d > %zu)\n", + data_len, sizeof(dev->dev.data_buf)); + p->status = USB_RET_STALL; + data_len = len = sizeof(dev->dev.data_buf); + } + memcpy(dev->dev.data_buf, data, data_len); + } + p->actual_length = len; + /* + * If this is GET_DESCRIPTOR request for configuration descriptor, + * remove 'remote wakeup' flag from it to prevent idle power down + * in Windows guest + */ + if (dev->suppress_remote_wake && + control_packet->requesttype == USB_DIR_IN && + control_packet->request == USB_REQ_GET_DESCRIPTOR && + control_packet->value == (USB_DT_CONFIG << 8) && + control_packet->index == 0 && + /* bmAttributes field of config descriptor */ + len > 7 && (dev->dev.data_buf[7] & USB_CFG_ATT_WAKEUP)) { + DPRINTF("Removed remote wake %04X:%04X\n", + dev->device_info.vendor_id, + dev->device_info.product_id); + dev->dev.data_buf[7] &= ~USB_CFG_ATT_WAKEUP; + } + usb_generic_async_ctrl_complete(&dev->dev, p); + } + free(data); +} + +static void usbredir_bulk_packet(void *priv, uint64_t id, + struct usb_redir_bulk_packet_header *bulk_packet, + uint8_t *data, int data_len) +{ + USBRedirDevice *dev = priv; + uint8_t ep = bulk_packet->endpoint; + int len = (bulk_packet->length_high << 16) | bulk_packet->length; + USBPacket *p; + + DPRINTF("bulk-in status %d ep %02X stream %u len %d id %"PRIu64"\n", + bulk_packet->status, ep, bulk_packet->stream_id, len, id); + + p = usbredir_find_packet_by_id(dev, ep, id); + if (p) { + size_t size = usb_packet_size(p); + usbredir_handle_status(dev, p, bulk_packet->status); + if (data_len > 0) { + usbredir_log_data(dev, "bulk data in:", data, data_len); + if (data_len > size) { + ERROR("bulk got more data then requested (%d > %zd)\n", + data_len, p->iov.size); + p->status = USB_RET_BABBLE; + data_len = len = size; + } + usb_packet_copy(p, data, data_len); + } + p->actual_length = len; + if (p->pid == USB_TOKEN_IN && p->ep->pipeline) { + usb_combined_input_packet_complete(&dev->dev, p); + } else { + usb_packet_complete(&dev->dev, p); + } + } + free(data); +} + +static void usbredir_iso_packet(void *priv, uint64_t id, + struct usb_redir_iso_packet_header *iso_packet, + uint8_t *data, int data_len) +{ + USBRedirDevice *dev = priv; + uint8_t ep = iso_packet->endpoint; + + DPRINTF2("iso-in status %d ep %02X len %d id %"PRIu64"\n", + iso_packet->status, ep, data_len, id); + + if (dev->endpoint[EP2I(ep)].type != USB_ENDPOINT_XFER_ISOC) { + ERROR("received iso packet for non iso endpoint %02X\n", ep); + free(data); + return; + } + + if (dev->endpoint[EP2I(ep)].iso_started == 0) { + DPRINTF("received iso packet for non started stream ep %02X\n", ep); + free(data); + return; + } + + /* bufp_alloc also adds the packet to the ep queue */ + bufp_alloc(dev, data, data_len, iso_packet->status, ep, data); +} + +static void usbredir_interrupt_packet(void *priv, uint64_t id, + struct usb_redir_interrupt_packet_header *interrupt_packet, + uint8_t *data, int data_len) +{ + USBRedirDevice *dev = priv; + uint8_t ep = interrupt_packet->endpoint; + + DPRINTF("interrupt-in status %d ep %02X len %d id %"PRIu64"\n", + interrupt_packet->status, ep, data_len, id); + + if (dev->endpoint[EP2I(ep)].type != USB_ENDPOINT_XFER_INT) { + ERROR("received int packet for non interrupt endpoint %02X\n", ep); + free(data); + return; + } + + if (ep & USB_DIR_IN) { + if (dev->endpoint[EP2I(ep)].interrupt_started == 0) { + DPRINTF("received int packet while not started ep %02X\n", ep); + free(data); + return; + } + + /* bufp_alloc also adds the packet to the ep queue */ + bufp_alloc(dev, data, data_len, interrupt_packet->status, ep, data); + + /* insufficient data solved with USB_RET_NAK */ + usb_wakeup(usb_ep_get(&dev->dev, USB_TOKEN_IN, ep & 0x0f), 0); + } else { + /* + * We report output interrupt packets as completed directly upon + * submission, so all we can do here if one failed is warn. + */ + if (interrupt_packet->status) { + WARNING("interrupt output failed status %d ep %02X id %"PRIu64"\n", + interrupt_packet->status, ep, id); + } + } +} + +static void usbredir_buffered_bulk_packet(void *priv, uint64_t id, + struct usb_redir_buffered_bulk_packet_header *buffered_bulk_packet, + uint8_t *data, int data_len) +{ + USBRedirDevice *dev = priv; + uint8_t status, ep = buffered_bulk_packet->endpoint; + void *free_on_destroy; + int i, len; + + DPRINTF("buffered-bulk-in status %d ep %02X len %d id %"PRIu64"\n", + buffered_bulk_packet->status, ep, data_len, id); + + if (dev->endpoint[EP2I(ep)].type != USB_ENDPOINT_XFER_BULK) { + ERROR("received buffered-bulk packet for non bulk ep %02X\n", ep); + free(data); + return; + } + + if (dev->endpoint[EP2I(ep)].bulk_receiving_started == 0) { + DPRINTF("received buffered-bulk packet on not started ep %02X\n", ep); + free(data); + return; + } + + /* Data must be in maxp chunks for buffered_bulk_add_*_data_to_packet */ + len = dev->endpoint[EP2I(ep)].max_packet_size; + status = usb_redir_success; + free_on_destroy = NULL; + for (i = 0; i < data_len; i += len) { + int r; + if (len >= (data_len - i)) { + len = data_len - i; + status = buffered_bulk_packet->status; + free_on_destroy = data; + } + /* bufp_alloc also adds the packet to the ep queue */ + r = bufp_alloc(dev, data + i, len, status, ep, free_on_destroy); + if (r) { + break; + } + } + + if (dev->endpoint[EP2I(ep)].pending_async_packet) { + USBPacket *p = dev->endpoint[EP2I(ep)].pending_async_packet; + dev->endpoint[EP2I(ep)].pending_async_packet = NULL; + usbredir_buffered_bulk_in_complete(dev, p, ep); + usb_packet_complete(&dev->dev, p); + } +} + +/* + * Migration code + */ + +static int usbredir_pre_save(void *priv) +{ + USBRedirDevice *dev = priv; + + usbredir_fill_already_in_flight(dev); + + return 0; +} + +static int usbredir_post_load(void *priv, int version_id) +{ + USBRedirDevice *dev = priv; + + if (dev == NULL || dev->parser == NULL) { + return 0; + } + + switch (dev->device_info.speed) { + case usb_redir_speed_low: + dev->dev.speed = USB_SPEED_LOW; + break; + case usb_redir_speed_full: + dev->dev.speed = USB_SPEED_FULL; + break; + case usb_redir_speed_high: + dev->dev.speed = USB_SPEED_HIGH; + break; + case usb_redir_speed_super: + dev->dev.speed = USB_SPEED_SUPER; + break; + default: + dev->dev.speed = USB_SPEED_FULL; + } + dev->dev.speedmask = (1 << dev->dev.speed); + + usbredir_setup_usb_eps(dev); + usbredir_check_bulk_receiving(dev); + + return 0; +} + +/* For usbredirparser migration */ +static int usbredir_put_parser(QEMUFile *f, void *priv, size_t unused, + const VMStateField *field, JSONWriter *vmdesc) +{ + USBRedirDevice *dev = priv; + uint8_t *data; + int len; + + if (dev->parser == NULL) { + qemu_put_be32(f, 0); + return 0; + } + + usbredirparser_serialize(dev->parser, &data, &len); + qemu_oom_check(data); + + qemu_put_be32(f, len); + qemu_put_buffer(f, data, len); + + free(data); + + return 0; +} + +static int usbredir_get_parser(QEMUFile *f, void *priv, size_t unused, + const VMStateField *field) +{ + USBRedirDevice *dev = priv; + uint8_t *data; + int len, ret; + + len = qemu_get_be32(f); + if (len == 0) { + return 0; + } + + /* + * If our chardev is not open already at this point the usbredir connection + * has been broken (non seamless migration, or restore from disk). + * + * In this case create a temporary parser to receive the migration data, + * and schedule the close_bh to report the device as disconnected to the + * guest and to destroy the parser again. + */ + if (dev->parser == NULL) { + WARNING("usb-redir connection broken during migration\n"); + usbredir_create_parser(dev); + qemu_bh_schedule(dev->chardev_close_bh); + } + + data = g_malloc(len); + qemu_get_buffer(f, data, len); + + ret = usbredirparser_unserialize(dev->parser, data, len); + + g_free(data); + + return ret; +} + +static const VMStateInfo usbredir_parser_vmstate_info = { + .name = "usb-redir-parser", + .put = usbredir_put_parser, + .get = usbredir_get_parser, +}; + + +/* For buffered packets (iso/irq) queue migration */ +static int usbredir_put_bufpq(QEMUFile *f, void *priv, size_t unused, + const VMStateField *field, JSONWriter *vmdesc) +{ + struct endp_data *endp = priv; + USBRedirDevice *dev = endp->dev; + struct buf_packet *bufp; + int len, i = 0; + + qemu_put_be32(f, endp->bufpq_size); + QTAILQ_FOREACH(bufp, &endp->bufpq, next) { + len = bufp->len - bufp->offset; + DPRINTF("put_bufpq %d/%d len %d status %d\n", i + 1, endp->bufpq_size, + len, bufp->status); + qemu_put_be32(f, len); + qemu_put_be32(f, bufp->status); + qemu_put_buffer(f, bufp->data + bufp->offset, len); + i++; + } + assert(i == endp->bufpq_size); + + return 0; +} + +static int usbredir_get_bufpq(QEMUFile *f, void *priv, size_t unused, + const VMStateField *field) +{ + struct endp_data *endp = priv; + USBRedirDevice *dev = endp->dev; + struct buf_packet *bufp; + int i; + + endp->bufpq_size = qemu_get_be32(f); + for (i = 0; i < endp->bufpq_size; i++) { + bufp = g_new(struct buf_packet, 1); + bufp->len = qemu_get_be32(f); + bufp->status = qemu_get_be32(f); + bufp->offset = 0; + bufp->data = qemu_oom_check(malloc(bufp->len)); /* regular malloc! */ + bufp->free_on_destroy = bufp->data; + qemu_get_buffer(f, bufp->data, bufp->len); + QTAILQ_INSERT_TAIL(&endp->bufpq, bufp, next); + DPRINTF("get_bufpq %d/%d len %d status %d\n", i + 1, endp->bufpq_size, + bufp->len, bufp->status); + } + return 0; +} + +static const VMStateInfo usbredir_ep_bufpq_vmstate_info = { + .name = "usb-redir-bufpq", + .put = usbredir_put_bufpq, + .get = usbredir_get_bufpq, +}; + + +/* For endp_data migration */ +static bool usbredir_bulk_receiving_needed(void *priv) +{ + struct endp_data *endp = priv; + + return endp->bulk_receiving_started; +} + +static const VMStateDescription usbredir_bulk_receiving_vmstate = { + .name = "usb-redir-ep/bulk-receiving", + .version_id = 1, + .minimum_version_id = 1, + .needed = usbredir_bulk_receiving_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(bulk_receiving_started, struct endp_data), + VMSTATE_END_OF_LIST() + } +}; + +static bool usbredir_stream_needed(void *priv) +{ + struct endp_data *endp = priv; + + return endp->max_streams; +} + +static const VMStateDescription usbredir_stream_vmstate = { + .name = "usb-redir-ep/stream-state", + .version_id = 1, + .minimum_version_id = 1, + .needed = usbredir_stream_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(max_streams, struct endp_data), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription usbredir_ep_vmstate = { + .name = "usb-redir-ep", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(type, struct endp_data), + VMSTATE_UINT8(interval, struct endp_data), + VMSTATE_UINT8(interface, struct endp_data), + VMSTATE_UINT16(max_packet_size, struct endp_data), + VMSTATE_UINT8(iso_started, struct endp_data), + VMSTATE_UINT8(iso_error, struct endp_data), + VMSTATE_UINT8(interrupt_started, struct endp_data), + VMSTATE_UINT8(interrupt_error, struct endp_data), + VMSTATE_UINT8(bufpq_prefilled, struct endp_data), + VMSTATE_UINT8(bufpq_dropping_packets, struct endp_data), + { + .name = "bufpq", + .version_id = 0, + .field_exists = NULL, + .size = 0, + .info = &usbredir_ep_bufpq_vmstate_info, + .flags = VMS_SINGLE, + .offset = 0, + }, + VMSTATE_INT32(bufpq_target_size, struct endp_data), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &usbredir_bulk_receiving_vmstate, + &usbredir_stream_vmstate, + NULL + } +}; + + +/* For PacketIdQueue migration */ +static int usbredir_put_packet_id_q(QEMUFile *f, void *priv, size_t unused, + const VMStateField *field, + JSONWriter *vmdesc) +{ + struct PacketIdQueue *q = priv; + USBRedirDevice *dev = q->dev; + struct PacketIdQueueEntry *e; + int remain = q->size; + + DPRINTF("put_packet_id_q %s size %d\n", q->name, q->size); + qemu_put_be32(f, q->size); + QTAILQ_FOREACH(e, &q->head, next) { + qemu_put_be64(f, e->id); + remain--; + } + assert(remain == 0); + + return 0; +} + +static int usbredir_get_packet_id_q(QEMUFile *f, void *priv, size_t unused, + const VMStateField *field) +{ + struct PacketIdQueue *q = priv; + USBRedirDevice *dev = q->dev; + int i, size; + uint64_t id; + + size = qemu_get_be32(f); + DPRINTF("get_packet_id_q %s size %d\n", q->name, size); + for (i = 0; i < size; i++) { + id = qemu_get_be64(f); + packet_id_queue_add(q, id); + } + assert(q->size == size); + return 0; +} + +static const VMStateInfo usbredir_ep_packet_id_q_vmstate_info = { + .name = "usb-redir-packet-id-q", + .put = usbredir_put_packet_id_q, + .get = usbredir_get_packet_id_q, +}; + +static const VMStateDescription usbredir_ep_packet_id_queue_vmstate = { + .name = "usb-redir-packet-id-queue", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + { + .name = "queue", + .version_id = 0, + .field_exists = NULL, + .size = 0, + .info = &usbredir_ep_packet_id_q_vmstate_info, + .flags = VMS_SINGLE, + .offset = 0, + }, + VMSTATE_END_OF_LIST() + } +}; + + +/* For usb_redir_device_connect_header migration */ +static const VMStateDescription usbredir_device_info_vmstate = { + .name = "usb-redir-device-info", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(speed, struct usb_redir_device_connect_header), + VMSTATE_UINT8(device_class, struct usb_redir_device_connect_header), + VMSTATE_UINT8(device_subclass, struct usb_redir_device_connect_header), + VMSTATE_UINT8(device_protocol, struct usb_redir_device_connect_header), + VMSTATE_UINT16(vendor_id, struct usb_redir_device_connect_header), + VMSTATE_UINT16(product_id, struct usb_redir_device_connect_header), + VMSTATE_UINT16(device_version_bcd, + struct usb_redir_device_connect_header), + VMSTATE_END_OF_LIST() + } +}; + + +/* For usb_redir_interface_info_header migration */ +static const VMStateDescription usbredir_interface_info_vmstate = { + .name = "usb-redir-interface-info", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(interface_count, + struct usb_redir_interface_info_header), + VMSTATE_UINT8_ARRAY(interface, + struct usb_redir_interface_info_header, 32), + VMSTATE_UINT8_ARRAY(interface_class, + struct usb_redir_interface_info_header, 32), + VMSTATE_UINT8_ARRAY(interface_subclass, + struct usb_redir_interface_info_header, 32), + VMSTATE_UINT8_ARRAY(interface_protocol, + struct usb_redir_interface_info_header, 32), + VMSTATE_END_OF_LIST() + } +}; + + +/* And finally the USBRedirDevice vmstate itself */ +static const VMStateDescription usbredir_vmstate = { + .name = "usb-redir", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = usbredir_pre_save, + .post_load = usbredir_post_load, + .fields = (VMStateField[]) { + VMSTATE_USB_DEVICE(dev, USBRedirDevice), + VMSTATE_TIMER_PTR(attach_timer, USBRedirDevice), + { + .name = "parser", + .version_id = 0, + .field_exists = NULL, + .size = 0, + .info = &usbredir_parser_vmstate_info, + .flags = VMS_SINGLE, + .offset = 0, + }, + VMSTATE_STRUCT_ARRAY(endpoint, USBRedirDevice, MAX_ENDPOINTS, 1, + usbredir_ep_vmstate, struct endp_data), + VMSTATE_STRUCT(cancelled, USBRedirDevice, 1, + usbredir_ep_packet_id_queue_vmstate, + struct PacketIdQueue), + VMSTATE_STRUCT(already_in_flight, USBRedirDevice, 1, + usbredir_ep_packet_id_queue_vmstate, + struct PacketIdQueue), + VMSTATE_STRUCT(device_info, USBRedirDevice, 1, + usbredir_device_info_vmstate, + struct usb_redir_device_connect_header), + VMSTATE_STRUCT(interface_info, USBRedirDevice, 1, + usbredir_interface_info_vmstate, + struct usb_redir_interface_info_header), + VMSTATE_END_OF_LIST() + } +}; + +static Property usbredir_properties[] = { + DEFINE_PROP_CHR("chardev", USBRedirDevice, cs), + DEFINE_PROP_UINT8("debug", USBRedirDevice, debug, usbredirparser_warning), + DEFINE_PROP_STRING("filter", USBRedirDevice, filter_str), + DEFINE_PROP_BOOL("streams", USBRedirDevice, enable_streams, true), + DEFINE_PROP_BOOL("suppress-remote-wake", USBRedirDevice, + suppress_remote_wake, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void usbredir_class_initfn(ObjectClass *klass, void *data) +{ + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + uc->realize = usbredir_realize; + uc->product_desc = "USB Redirection Device"; + uc->unrealize = usbredir_unrealize; + uc->cancel_packet = usbredir_cancel_packet; + uc->handle_reset = usbredir_handle_reset; + uc->handle_data = usbredir_handle_data; + uc->handle_control = usbredir_handle_control; + uc->flush_ep_queue = usbredir_flush_ep_queue; + uc->ep_stopped = usbredir_ep_stopped; + uc->alloc_streams = usbredir_alloc_streams; + uc->free_streams = usbredir_free_streams; + dc->vmsd = &usbredir_vmstate; + device_class_set_props(dc, usbredir_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static void usbredir_instance_init(Object *obj) +{ + USBDevice *udev = USB_DEVICE(obj); + USBRedirDevice *dev = USB_REDIRECT(udev); + + device_add_bootindex_property(obj, &dev->bootindex, + "bootindex", NULL, + &udev->qdev); +} + +static const TypeInfo usbredir_dev_info = { + .name = TYPE_USB_REDIR, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(USBRedirDevice), + .class_init = usbredir_class_initfn, + .instance_init = usbredir_instance_init, +}; +module_obj(TYPE_USB_REDIR); + +static void usbredir_register_types(void) +{ + type_register_static(&usbredir_dev_info); +} + +type_init(usbredir_register_types) diff --git a/hw/usb/trace-events b/hw/usb/trace-events new file mode 100644 index 000000000..b8287b63f --- /dev/null +++ b/hw/usb/trace-events @@ -0,0 +1,347 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# core.c +usb_packet_state_change(int bus, const char *port, int ep, void *p, const char *o, const char *n) "bus %d, port %s, ep %d, packet %p, state %s -> %s" +usb_packet_state_fault(int bus, const char *port, int ep, void *p, const char *o, const char *n) "bus %d, port %s, ep %d, packet %p, state %s, expected %s" + +# bus.c +usb_port_claim(int bus, const char *port) "bus %d, port %s" +usb_port_attach(int bus, const char *port, const char *devspeed, const char *portspeed) "bus %d, port %s, devspeed %s, portspeed %s" +usb_port_detach(int bus, const char *port) "bus %d, port %s" +usb_port_release(int bus, const char *port) "bus %d, port %s" + +# hcd-ohci-pci.c +usb_ohci_exit(const char *s) "%s" + +# hcd-ohci.c +usb_ohci_iso_td_read_failed(uint32_t addr) "ISO_TD read error at 0x%x" +usb_ohci_iso_td_head(uint32_t head, uint32_t tail, uint32_t flags, uint32_t bp, uint32_t next, uint32_t be, uint32_t framenum, uint32_t startframe, uint32_t framecount, int rel_frame_num) "ISO_TD ED head 0x%.8x tailp 0x%.8x\n0x%.8x 0x%.8x 0x%.8x 0x%.8x\nframe_number 0x%.8x starting_frame 0x%.8x\nframe_count 0x%.8x relative %d" +usb_ohci_iso_td_head_offset(uint32_t o0, uint32_t o1, uint32_t o2, uint32_t o3, uint32_t o4, uint32_t o5, uint32_t o6, uint32_t o7) "0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x 0x%.8x" +usb_ohci_iso_td_relative_frame_number_neg(int rel) "ISO_TD R=%d < 0" +usb_ohci_iso_td_relative_frame_number_big(int rel, int count) "ISO_TD R=%d > FC=%d" +usb_ohci_iso_td_bad_direction(int dir) "Bad direction %d" +usb_ohci_iso_td_bad_bp_be(uint32_t bp, uint32_t be) "ISO_TD bp 0x%.8x be 0x%.8x" +usb_ohci_iso_td_bad_cc_not_accessed(uint32_t start, uint32_t next) "ISO_TD cc != not accessed 0x%.8x 0x%.8x" +usb_ohci_iso_td_bad_cc_overrun(uint32_t start, uint32_t next) "ISO_TD start_offset=0x%.8x > next_offset=0x%.8x" +usb_ohci_iso_td_so(uint32_t so, uint32_t eo, uint32_t s, uint32_t e, const char *str, ssize_t len, int ret) "0x%.8x eo 0x%.8x\nsa 0x%.8x ea 0x%.8x\ndir %s len %zu ret %d" +usb_ohci_iso_td_data_overrun(int ret, ssize_t len) "DataOverrun %d > %zu" +usb_ohci_iso_td_data_underrun(int ret) "DataUnderrun %d" +usb_ohci_iso_td_nak(int ret) "got NAK/STALL %d" +usb_ohci_iso_td_bad_response(int ret) "Bad device response %d" +usb_ohci_port_attach(int index) "port #%d" +usb_ohci_port_detach(int index) "port #%d" +usb_ohci_port_wakeup(int index) "port #%d" +usb_ohci_port_suspend(int index) "port #%d" +usb_ohci_port_reset(int index) "port #%d" +usb_ohci_remote_wakeup(const char *s) "%s: SUSPEND->RESUME" +usb_ohci_reset(const char *s) "%s" +usb_ohci_start(const char *s) "%s: USB Operational" +usb_ohci_resume(const char *s) "%s: USB Resume" +usb_ohci_stop(const char *s) "%s: USB Suspended" +usb_ohci_set_ctl(const char *s, uint32_t new_state) "%s: new state 0x%x" +usb_ohci_td_underrun(void) "" +usb_ohci_td_dev_error(void) "" +usb_ohci_td_nak(void) "" +usb_ohci_td_stall(void) "" +usb_ohci_td_babble(void) "" +usb_ohci_td_bad_device_response(int rc) "%d" +usb_ohci_td_read_error(uint32_t addr) "TD read error at 0x%x" +usb_ohci_td_bad_direction(int dir) "Bad direction %d" +usb_ohci_td_skip_async(void) "" +usb_ohci_td_pkt_hdr(uint32_t addr, int64_t pktlen, int64_t len, const char *s, int flag_r, uint32_t cbp, uint32_t be) " TD @ 0x%.8x %" PRId64 " of %" PRId64 " bytes %s r=%d cbp=0x%.8x be=0x%.8x" +usb_ohci_td_pkt_short(const char *dir, const char *buf) "%s data: %s" +usb_ohci_td_pkt_full(const char *dir, const char *buf) "%s data: %s" +usb_ohci_td_too_many_pending(void) "" +usb_ohci_td_packet_status(int status) "status=%d" +usb_ohci_ed_read_error(uint32_t addr) "ED read error at 0x%x" +usb_ohci_ed_pkt(uint32_t cur, int h, int c, uint32_t head, uint32_t tail, uint32_t next) "ED @ 0x%.8x h=%u c=%u\n head=0x%.8x tailp=0x%.8x next=0x%.8x" +usb_ohci_ed_pkt_flags(uint32_t fa, uint32_t en, uint32_t d, int s, int k, int f, uint32_t mps) "fa=%u en=%u d=%u s=%u k=%u f=%u mps=%u" +usb_ohci_hcca_read_error(uint32_t addr) "HCCA read error at 0x%x" +usb_ohci_mem_read_unaligned(uint32_t addr) "at 0x%x" +usb_ohci_mem_read_bad_offset(uint32_t addr) "0x%x" +usb_ohci_mem_write_unaligned(uint32_t addr) "at 0x%x" +usb_ohci_mem_write_bad_offset(uint32_t addr) "0x%x" +usb_ohci_process_lists(uint32_t head, uint32_t cur) "head 0x%x, cur 0x%x" +usb_ohci_set_frame_interval(const char *name, uint16_t fi_x, uint16_t fi_u) "%s: FrameInterval = 0x%x (%u)" +usb_ohci_hub_power_up(void) "powered up all ports" +usb_ohci_hub_power_down(void) "powered down all ports" +usb_ohci_init_time(int64_t frametime, int64_t bittime) "usb_bit_time=%" PRId64 " usb_frame_time=%" PRId64 +usb_ohci_die(void) "" +usb_ohci_async_complete(void) "" + +# hcd-ehci.c +usb_ehci_reset(void) "=== RESET ===" +usb_ehci_unrealize(void) "=== UNREALIZE ===" +usb_ehci_opreg_read(uint32_t addr, const char *str, uint32_t val) "rd mmio 0x%04x [%s] = 0x%x" +usb_ehci_opreg_write(uint32_t addr, const char *str, uint32_t val) "wr mmio 0x%04x [%s] = 0x%x" +usb_ehci_opreg_change(uint32_t addr, const char *str, uint32_t new, uint32_t old) "ch mmio 0x%04x [%s] = 0x%x (old: 0x%x)" +usb_ehci_portsc_read(uint32_t addr, uint32_t port, uint32_t val) "rd mmio 0x%04x [port %d] = 0x%x" +usb_ehci_portsc_write(uint32_t addr, uint32_t port, uint32_t val) "wr mmio 0x%04x [port %d] = 0x%x" +usb_ehci_portsc_change(uint32_t addr, uint32_t port, uint32_t new, uint32_t old) "ch mmio 0x%04x [port %d] = 0x%x (old: 0x%x)" +usb_ehci_usbsts(const char *sts, int state) "usbsts %s %d" +usb_ehci_state(const char *schedule, const char *state) "%s schedule %s" +usb_ehci_qh_ptrs(void *q, uint32_t addr, uint32_t nxt, uint32_t c_qtd, uint32_t n_qtd, uint32_t a_qtd) "q %p - QH @ 0x%08x: next 0x%08x qtds 0x%08x,0x%08x,0x%08x" +usb_ehci_qh_fields(uint32_t addr, int rl, int mplen, int eps, int ep, int devaddr) "QH @ 0x%08x - rl %d, mplen %d, eps %d, ep %d, dev %d" +usb_ehci_qh_bits(uint32_t addr, int c, int h, int dtc, int i) "QH @ 0x%08x - c %d, h %d, dtc %d, i %d" +usb_ehci_qtd_ptrs(void *q, uint32_t addr, uint32_t nxt, uint32_t altnext) "q %p - QTD @ 0x%08x: next 0x%08x altnext 0x%08x" +usb_ehci_qtd_fields(uint32_t addr, int tbytes, int cpage, int cerr, int pid) "QTD @ 0x%08x - tbytes %d, cpage %d, cerr %d, pid %d" +usb_ehci_qtd_bits(uint32_t addr, int ioc, int active, int halt, int babble, int xacterr) "QTD @ 0x%08x - ioc %d, active %d, halt %d, babble %d, xacterr %d" +usb_ehci_itd(uint32_t addr, uint32_t nxt, uint32_t mplen, uint32_t mult, uint32_t ep, uint32_t devaddr) "ITD @ 0x%08x: next 0x%08x - mplen %d, mult %d, ep %d, dev %d" +usb_ehci_sitd(uint32_t addr, uint32_t nxt, uint32_t active) "ITD @ 0x%08x: next 0x%08x - active %d" +usb_ehci_port_attach(uint32_t port, const char *owner, const char *device) "attach port #%d, owner %s, device %s" +usb_ehci_port_detach(uint32_t port, const char *owner) "detach port #%d, owner %s" +usb_ehci_port_reset(uint32_t port, int enable) "reset port #%d - %d" +usb_ehci_port_suspend(uint32_t port) "port #%d" +usb_ehci_port_wakeup(uint32_t port) "port #%d" +usb_ehci_port_resume(uint32_t port) "port #%d" +usb_ehci_queue_action(void *q, const char *action) "q %p: %s" +usb_ehci_packet_action(void *q, void *p, const char *action) "q %p p %p: %s" +usb_ehci_irq(uint32_t level, uint32_t frindex, uint32_t sts, uint32_t mask) "level %d, frindex 0x%04x, sts 0x%x, mask 0x%x" +usb_ehci_guest_bug(const char *reason) "%s" +usb_ehci_doorbell_ring(void) "" +usb_ehci_doorbell_ack(void) "" +usb_ehci_dma_error(void) "" + +# hcd-uhci.c +usb_uhci_reset(void) "=== RESET ===" +usb_uhci_exit(void) "=== EXIT ===" +usb_uhci_schedule_start(void) "" +usb_uhci_schedule_stop(void) "" +usb_uhci_frame_start(uint32_t num) "nr %d" +usb_uhci_frame_stop_bandwidth(void) "" +usb_uhci_frame_loop_stop_idle(void) "" +usb_uhci_frame_loop_continue(void) "" +usb_uhci_mmio_readw(uint32_t addr, uint32_t val) "addr 0x%04x, ret 0x%04x" +usb_uhci_mmio_writew(uint32_t addr, uint32_t val) "addr 0x%04x, val 0x%04x" +usb_uhci_queue_add(uint32_t token) "token 0x%x" +usb_uhci_queue_del(uint32_t token, const char *reason) "token 0x%x: %s" +usb_uhci_packet_add(uint32_t token, uint32_t addr) "token 0x%x, td 0x%x" +usb_uhci_packet_link_async(uint32_t token, uint32_t addr) "token 0x%x, td 0x%x" +usb_uhci_packet_unlink_async(uint32_t token, uint32_t addr) "token 0x%x, td 0x%x" +usb_uhci_packet_cancel(uint32_t token, uint32_t addr, int done) "token 0x%x, td 0x%x, done %d" +usb_uhci_packet_complete_success(uint32_t token, uint32_t addr) "token 0x%x, td 0x%x" +usb_uhci_packet_complete_shortxfer(uint32_t token, uint32_t addr) "token 0x%x, td 0x%x" +usb_uhci_packet_complete_stall(uint32_t token, uint32_t addr) "token 0x%x, td 0x%x" +usb_uhci_packet_complete_babble(uint32_t token, uint32_t addr) "token 0x%x, td 0x%x" +usb_uhci_packet_complete_error(uint32_t token, uint32_t addr) "token 0x%x, td 0x%x" +usb_uhci_packet_del(uint32_t token, uint32_t addr) "token 0x%x, td 0x%x" +usb_uhci_qh_load(uint32_t qh) "qh 0x%x" +usb_uhci_td_load(uint32_t qh, uint32_t td, uint32_t ctrl, uint32_t token) "qh 0x%x, td 0x%x, ctrl 0x%x, token 0x%x" +usb_uhci_td_queue(uint32_t td, uint32_t ctrl, uint32_t token) "td 0x%x, ctrl 0x%x, token 0x%x" +usb_uhci_td_nextqh(uint32_t qh, uint32_t td) "qh 0x%x, td 0x%x" +usb_uhci_td_async(uint32_t qh, uint32_t td) "qh 0x%x, td 0x%x" +usb_uhci_td_complete(uint32_t qh, uint32_t td) "qh 0x%x, td 0x%x" + +# hcd-xhci.c +usb_xhci_reset(void) "=== RESET ===" +usb_xhci_exit(void) "=== EXIT ===" +usb_xhci_run(void) "" +usb_xhci_stop(void) "" +usb_xhci_cap_read(uint32_t off, uint32_t val) "off 0x%04x, ret 0x%08x" +usb_xhci_oper_read(uint32_t off, uint32_t val) "off 0x%04x, ret 0x%08x" +usb_xhci_port_read(uint32_t port, uint32_t off, uint32_t val) "port %d, off 0x%04x, ret 0x%08x" +usb_xhci_runtime_read(uint32_t off, uint32_t val) "off 0x%04x, ret 0x%08x" +usb_xhci_doorbell_read(uint32_t off, uint32_t val) "off 0x%04x, ret 0x%08x" +usb_xhci_oper_write(uint32_t off, uint32_t val) "off 0x%04x, val 0x%08x" +usb_xhci_port_write(uint32_t port, uint32_t off, uint32_t val) "port %d, off 0x%04x, val 0x%08x" +usb_xhci_runtime_write(uint32_t off, uint32_t val) "off 0x%04x, val 0x%08x" +usb_xhci_doorbell_write(uint32_t off, uint32_t val) "off 0x%04x, val 0x%08x" +usb_xhci_irq_intx(uint32_t level) "level %d" +usb_xhci_irq_msi(uint32_t nr) "nr %d" +usb_xhci_irq_msix(uint32_t nr) "nr %d" +usb_xhci_irq_msix_use(uint32_t nr) "nr %d" +usb_xhci_irq_msix_unuse(uint32_t nr) "nr %d" +usb_xhci_queue_event(uint32_t vector, uint32_t idx, const char *trb, const char *evt, uint64_t param, uint32_t status, uint32_t control) "v %d, idx %d, %s, %s, p 0x%016" PRIx64 ", s 0x%08x, c 0x%08x" +usb_xhci_fetch_trb(uint64_t addr, const char *name, uint64_t param, uint32_t status, uint32_t control) "addr 0x%016" PRIx64 ", %s, p 0x%016" PRIx64 ", s 0x%08x, c 0x%08x" +usb_xhci_port_reset(uint32_t port, bool warm) "port %d, warm %d" +usb_xhci_port_link(uint32_t port, uint32_t pls) "port %d, pls %d" +usb_xhci_port_notify(uint32_t port, uint32_t pls) "port %d, bits 0x%x" +usb_xhci_slot_enable(uint32_t slotid) "slotid %d" +usb_xhci_slot_disable(uint32_t slotid) "slotid %d" +usb_xhci_slot_address(uint32_t slotid, const char *port) "slotid %d, port %s" +usb_xhci_slot_configure(uint32_t slotid) "slotid %d" +usb_xhci_slot_evaluate(uint32_t slotid) "slotid %d" +usb_xhci_slot_reset(uint32_t slotid) "slotid %d" +usb_xhci_ep_enable(uint32_t slotid, uint32_t epid) "slotid %d, epid %d" +usb_xhci_ep_disable(uint32_t slotid, uint32_t epid) "slotid %d, epid %d" +usb_xhci_ep_set_dequeue(uint32_t slotid, uint32_t epid, uint32_t streamid, uint64_t param) "slotid %d, epid %d, streamid %d, ptr 0x%016" PRIx64 +usb_xhci_ep_kick(uint32_t slotid, uint32_t epid, uint32_t streamid) "slotid %d, epid %d, streamid %d" +usb_xhci_ep_stop(uint32_t slotid, uint32_t epid) "slotid %d, epid %d" +usb_xhci_ep_reset(uint32_t slotid, uint32_t epid) "slotid %d, epid %d" +usb_xhci_ep_state(uint32_t slotid, uint32_t epid, const char *os, const char *ns) "slotid %d, epid %d, %s -> %s" +usb_xhci_xfer_start(void *xfer, uint32_t slotid, uint32_t epid, uint32_t streamid) "%p: slotid %d, epid %d, streamid %d" +usb_xhci_xfer_async(void *xfer) "%p" +usb_xhci_xfer_nak(void *xfer) "%p" +usb_xhci_xfer_retry(void *xfer) "%p" +usb_xhci_xfer_success(void *xfer, uint32_t bytes) "%p: len %d" +usb_xhci_xfer_error(void *xfer, uint32_t ret) "%p: ret %d" +usb_xhci_unimplemented(const char *item, int nr) "%s (0x%x)" +usb_xhci_enforced_limit(const char *item) "%s" + +# hcd-dwc2.c +usb_dwc2_update_irq(uint32_t level) "level=%d" +usb_dwc2_raise_global_irq(uint32_t intr) "0x%08x" +usb_dwc2_lower_global_irq(uint32_t intr) "0x%08x" +usb_dwc2_raise_host_irq(uint32_t intr) "0x%04x" +usb_dwc2_lower_host_irq(uint32_t intr) "0x%04x" +usb_dwc2_sof(int64_t next) "next SOF %" PRId64 +usb_dwc2_bus_start(void) "start SOFs" +usb_dwc2_bus_stop(void) "stop SOFs" +usb_dwc2_find_device(uint8_t addr) "%d" +usb_dwc2_port_disabled(uint32_t pnum) "port %d disabled" +usb_dwc2_device_found(uint32_t pnum) "device found on port %d" +usb_dwc2_device_not_found(void) "device not found" +usb_dwc2_handle_packet(uint32_t chan, void *dev, void *pkt, uint32_t ep, const char *type, const char *dir, uint32_t mps, uint32_t len, uint32_t pcnt) "ch %d dev %p pkt %p ep %d type %s dir %s mps %d len %d pcnt %d" +usb_dwc2_memory_read(uint32_t addr, uint32_t len) "addr %d len %d" +usb_dwc2_packet_status(const char *status, uint32_t len) "status %s len %d" +usb_dwc2_packet_error(const char *status) "ERROR %s" +usb_dwc2_async_packet(void *pkt, uint32_t chan, void *dev, uint32_t ep, const char *dir, uint32_t len) "pkt %p ch %d dev %p ep %d %s len %d" +usb_dwc2_memory_write(uint32_t addr, uint32_t len) "addr %d len %d" +usb_dwc2_packet_done(const char *status, uint32_t actual, uint32_t len, uint32_t pcnt) "status %s actual %d len %d pcnt %d" +usb_dwc2_packet_next(const char *status, uint32_t len, uint32_t pcnt) "status %s len %d pcnt %d" +usb_dwc2_attach(void *port) "port %p" +usb_dwc2_attach_speed(const char *speed) "%s-speed device attached" +usb_dwc2_detach(void *port) "port %p" +usb_dwc2_child_detach(void *port, void *child) "port %p child %p" +usb_dwc2_wakeup(void *port) "port %p" +usb_dwc2_async_packet_complete(void *port, void *pkt, uint32_t chan, void *dev, uint32_t ep, const char *dir, uint32_t len) "port %p packet %p ch %d dev %p ep %d %s len %d" +usb_dwc2_work_bh(void) "" +usb_dwc2_work_bh_service(uint32_t first, uint32_t current, void *dev, uint32_t ep) "first %d servicing %d dev %p ep %d" +usb_dwc2_work_bh_next(uint32_t chan) "next %d" +usb_dwc2_enable_chan(uint32_t chan, void *dev, void *pkt, uint32_t ep) "ch %d dev %p pkt %p ep %d" +usb_dwc2_glbreg_read(uint64_t addr, const char *reg, uint32_t val) " 0x%04" PRIx64 " %s val 0x%08x" +usb_dwc2_glbreg_write(uint64_t addr, const char *reg, uint64_t val, uint32_t old, uint64_t result) "0x%04" PRIx64 " %s val 0x%08" PRIx64 " old 0x%08x result 0x%08" PRIx64 +usb_dwc2_fszreg_read(uint64_t addr, uint32_t val) " 0x%04" PRIx64 " HPTXFSIZ val 0x%08x" +usb_dwc2_fszreg_write(uint64_t addr, uint64_t val, uint32_t old, uint64_t result) "0x%04" PRIx64 " HPTXFSIZ val 0x%08" PRIx64 " old 0x%08x result 0x%08" PRIx64 +usb_dwc2_hreg0_read(uint64_t addr, const char *reg, uint32_t val) " 0x%04" PRIx64 " %s val 0x%08x" +usb_dwc2_hreg0_write(uint64_t addr, const char *reg, uint64_t val, uint32_t old, uint64_t result) " 0x%04" PRIx64 " %s val 0x%08" PRIx64 " old 0x%08x result 0x%08" PRIx64 +usb_dwc2_hreg1_read(uint64_t addr, const char *reg, uint64_t chan, uint32_t val) " 0x%04" PRIx64 " %s%" PRId64 " val 0x%08x" +usb_dwc2_hreg1_write(uint64_t addr, const char *reg, uint64_t chan, uint64_t val, uint32_t old, uint64_t result) " 0x%04" PRIx64 " %s%" PRId64 " val 0x%08" PRIx64 " old 0x%08x result 0x%08" PRIx64 +usb_dwc2_pcgreg_read(uint64_t addr, const char *reg, uint32_t val) " 0x%04" PRIx64 " %s val 0x%08x" +usb_dwc2_pcgreg_write(uint64_t addr, const char *reg, uint64_t val, uint32_t old, uint64_t result) "0x%04" PRIx64 " %s val 0x%08" PRIx64 " old 0x%08x result 0x%08" PRIx64 +usb_dwc2_hreg2_read(uint64_t addr, uint64_t fifo, uint32_t val) " 0x%04" PRIx64 " FIFO%" PRId64 " val 0x%08x" +usb_dwc2_hreg2_write(uint64_t addr, uint64_t fifo, uint64_t val, uint32_t old, uint64_t result) " 0x%04" PRIx64 " FIFO%" PRId64 " val 0x%08" PRIx64 " old 0x%08x result 0x%08" PRIx64 +usb_dwc2_hreg0_action(const char *s) "%s" +usb_dwc2_wakeup_endpoint(void *ep, uint32_t stream) "endp %p stream %d" +usb_dwc2_work_timer(void) "" +usb_dwc2_reset_enter(void) "=== RESET enter ===" +usb_dwc2_reset_hold(void) "=== RESET hold ===" +usb_dwc2_reset_exit(void) "=== RESET exit ===" + +# desc.c +usb_desc_device(int addr, int len, int ret) "dev %d query device, len %d, ret %d" +usb_desc_device_qualifier(int addr, int len, int ret) "dev %d query device qualifier, len %d, ret %d" +usb_desc_config(int addr, int index, int len, int ret) "dev %d query config %d, len %d, ret %d" +usb_desc_other_speed_config(int addr, int index, int len, int ret) "dev %d query config %d, len %d, ret %d" +usb_desc_string(int addr, int index, int len, int ret) "dev %d query string %d, len %d, ret %d" +usb_desc_bos(int addr, int len, int ret) "dev %d bos, len %d, ret %d" +usb_desc_msos(int addr, int index, int len, int ret) "dev %d msos, index 0x%x, len %d, ret %d" +usb_set_addr(int addr) "dev %d" +usb_set_config(int addr, int config, int ret) "dev %d, config %d, ret %d" +usb_set_interface(int addr, int iface, int alt, int ret) "dev %d, interface %d, altsetting %d, ret %d" +usb_clear_device_feature(int addr, int feature, int ret) "dev %d, feature %d, ret %d" +usb_set_device_feature(int addr, int feature, int ret) "dev %d, feature %d, ret %d" + +# dev-hub.c +usb_hub_reset(int addr) "dev %d" +usb_hub_control(int addr, int request, int value, int index, int length) "dev %d, req 0x%x, value %d, index %d, langth %d" +usb_hub_get_port_status(int addr, int nr, int status, int changed) "dev %d, port %d, status 0x%x, changed 0x%x" +usb_hub_set_port_feature(int addr, int nr, const char *f) "dev %d, port %d, feature %s" +usb_hub_clear_port_feature(int addr, int nr, const char *f) "dev %d, port %d, feature %s" +usb_hub_attach(int addr, int nr) "dev %d, port %d" +usb_hub_detach(int addr, int nr) "dev %d, port %d" +usb_hub_status_report(int addr, int status) "dev %d, status 0x%x" + +# dev-storage.c +usb_msd_reset(void) "" +usb_msd_maxlun(unsigned maxlun) "%d" +usb_msd_send_status(unsigned status, unsigned tag, size_t size) "status %d, tag 0x%x, len %zd" +usb_msd_data_in(unsigned packet, unsigned remaining, unsigned total) "%d/%d (scsi %d)" +usb_msd_data_out(unsigned packet, unsigned remaining) "%d/%d" +usb_msd_packet_async(void) "" +usb_msd_packet_complete(void) "" +usb_msd_cmd_submit(unsigned lun, unsigned tag, unsigned flags, unsigned len, unsigned data_len) "lun %u, tag 0x%x, flags 0x%08x, len %d, data-len %d" +usb_msd_cmd_complete(unsigned status, unsigned tag) "status %d, tag 0x%x" +usb_msd_cmd_cancel(unsigned tag) "tag 0x%x" + +# dev-uas.c +usb_uas_reset(int addr) "dev %d" +usb_uas_command(int addr, uint16_t tag, int lun, uint32_t lun64_1, uint32_t lun64_2) "dev %d, tag 0x%x, lun %d, lun64 0x%08x-0x%08x" +usb_uas_response(int addr, uint16_t tag, uint8_t code) "dev %d, tag 0x%x, code 0x%x" +usb_uas_sense(int addr, uint16_t tag, uint8_t status) "dev %d, tag 0x%x, status 0x%x" +usb_uas_read_ready(int addr, uint16_t tag) "dev %d, tag 0x%x" +usb_uas_write_ready(int addr, uint16_t tag) "dev %d, tag 0x%x" +usb_uas_xfer_data(int addr, uint16_t tag, uint32_t copy, uint32_t uoff, uint32_t usize, uint32_t soff, uint32_t ssize) "dev %d, tag 0x%x, copy %d, usb-pkt %d/%d, scsi-buf %d/%d" +usb_uas_scsi_data(int addr, uint16_t tag, uint32_t bytes) "dev %d, tag 0x%x, bytes %d" +usb_uas_scsi_complete(int addr, uint16_t tag, uint32_t status, uint32_t resid) "dev %d, tag 0x%x, status 0x%x, residue %d" +usb_uas_tmf_abort_task(int addr, uint16_t tag, uint16_t task_tag) "dev %d, tag 0x%x, task-tag 0x%x" +usb_uas_tmf_logical_unit_reset(int addr, uint16_t tag, int lun) "dev %d, tag 0x%x, lun %d" +usb_uas_tmf_unsupported(int addr, uint16_t tag, uint32_t function) "dev %d, tag 0x%x, function 0x%x" + +# dev-mtp.c +usb_mtp_reset(int addr) "dev %d" +usb_mtp_command(int dev, uint16_t code, uint32_t trans, uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4) "dev %d, code 0x%x, trans 0x%x, args 0x%x, 0x%x, 0x%x, 0x%x, 0x%x" +usb_mtp_success(int dev, uint32_t trans, uint32_t arg0, uint32_t arg1) "dev %d, trans 0x%x, args 0x%x, 0x%x" +usb_mtp_error(int dev, uint16_t code, uint32_t trans, uint32_t arg0, uint32_t arg1) "dev %d, code 0x%x, trans 0x%x, args 0x%x, 0x%x" +usb_mtp_data_in(int dev, uint32_t trans, uint32_t len) "dev %d, trans 0x%x, len %d" +usb_mtp_xfer(int dev, uint32_t ep, uint32_t dlen, uint32_t plen) "dev %d, ep %d, %d/%d" +usb_mtp_nak(int dev, uint32_t ep) "dev %d, ep %d" +usb_mtp_stall(int dev, const char *reason) "dev %d, reason: %s" +usb_mtp_op_get_device_info(int dev) "dev %d" +usb_mtp_op_open_session(int dev) "dev %d" +usb_mtp_op_close_session(int dev) "dev %d" +usb_mtp_op_get_storage_ids(int dev) "dev %d" +usb_mtp_op_get_storage_info(int dev) "dev %d" +usb_mtp_op_get_num_objects(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s" +usb_mtp_op_get_object_handles(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s" +usb_mtp_op_get_object_info(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s" +usb_mtp_op_get_object(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s" +usb_mtp_op_get_partial_object(int dev, uint32_t handle, const char *path, uint32_t offset, uint32_t length) "dev %d, handle 0x%x, path %s, off %d, len %d" +usb_mtp_op_unknown(int dev, uint32_t code) "dev %d, command code 0x%x" +usb_mtp_object_alloc(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s" +usb_mtp_object_free(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s" +usb_mtp_add_child(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s" +usb_mtp_file_monitor_event(int dev, const char *path, const char *s) "dev %d, path %s event %s" + +# host-libusb.c +usb_host_open_started(int bus, int addr) "dev %d:%d" +usb_host_open_hostfd(int hostfd) "hostfd %d" +usb_host_open_success(int bus, int addr) "dev %d:%d" +usb_host_open_failure(int bus, int addr) "dev %d:%d" +usb_host_close(int bus, int addr) "dev %d:%d" +usb_host_attach_kernel(int bus, int addr, int interface) "dev %d:%d, if %d" +usb_host_detach_kernel(int bus, int addr, int interface) "dev %d:%d, if %d" +usb_host_set_address(int bus, int addr, int config) "dev %d:%d, address %d" +usb_host_set_config(int bus, int addr, int config) "dev %d:%d, config %d" +usb_host_set_interface(int bus, int addr, int interface, int alt) "dev %d:%d, interface %d, alt %d" +usb_host_claim_interface(int bus, int addr, int config, int interface) "dev %d:%d, config %d, if %d" +usb_host_release_interface(int bus, int addr, int interface) "dev %d:%d, if %d" +usb_host_req_control(int bus, int addr, void *p, int req, int value, int index) "dev %d:%d, packet %p, req 0x%x, value %d, index %d" +usb_host_req_data(int bus, int addr, void *p, int in, int ep, int size) "dev %d:%d, packet %p, in %d, ep %d, size %d" +usb_host_req_complete(int bus, int addr, void *p, int status, int length) "dev %d:%d, packet %p, status %d, length %d" +usb_host_req_emulated(int bus, int addr, void *p, int status) "dev %d:%d, packet %p, status %d" +usb_host_req_canceled(int bus, int addr, void *p) "dev %d:%d, packet %p" +usb_host_iso_start(int bus, int addr, int ep) "dev %d:%d, ep %d" +usb_host_iso_stop(int bus, int addr, int ep) "dev %d:%d, ep %d" +usb_host_iso_out_of_bufs(int bus, int addr, int ep) "dev %d:%d, ep %d" +usb_host_reset(int bus, int addr) "dev %d:%d" +usb_host_auto_scan_enabled(void) +usb_host_auto_scan_disabled(void) +usb_host_parse_config(int bus, int addr, int value, int active) "dev %d:%d, value %d, active %d" +usb_host_parse_interface(int bus, int addr, int num, int alt, int active) "dev %d:%d, num %d, alt %d, active %d" +usb_host_parse_endpoint(int bus, int addr, int ep, const char *dir, const char *type, int active) "dev %d:%d, ep %d, %s, %s, active %d" +usb_host_parse_error(int bus, int addr, const char *errmsg) "dev %d:%d, msg %s" +usb_host_remote_wakeup_removed(int bus, int addr) "dev %d:%d" + +# dev-serial.c +usb_serial_reset(int bus, int addr) "dev %d:%u reset" +usb_serial_handle_control(int bus, int addr, int request, int value) "dev %d:%u got control 0x%x, value 0x%x" +usb_serial_unsupported_parity(int bus, int addr, int value) "dev %d:%u unsupported parity %d" +usb_serial_unsupported_stopbits(int bus, int addr, int value) "dev %d:%u unsupported stop bits %d" +usb_serial_unsupported_control(int bus, int addr, int request, int value) "dev %d:%u got unsupported/bogus control 0x%x, value 0x%x" +usb_serial_unsupported_data_bits(int bus, int addr, int value) "dev %d:%u unsupported data bits %d, falling back to 8" +usb_serial_bad_token(int bus, int addr) "dev %d:%u bad token" +usb_serial_set_baud(int bus, int addr, int baud) "dev %d:%u baud rate %d" +usb_serial_set_data(int bus, int addr, int parity, int data, int stop) "dev %d:%u parity %c, data bits %d, stop bits %d" +usb_serial_set_flow_control(int bus, int addr, int index) "dev %d:%u flow control %d" +usb_serial_set_xonxoff(int bus, int addr, uint8_t xon, uint8_t xoff) "dev %d:%u xon 0x%x xoff 0x%x" diff --git a/hw/usb/trace.h b/hw/usb/trace.h new file mode 100644 index 000000000..f3962f2ba --- /dev/null +++ b/hw/usb/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_usb.h" diff --git a/hw/usb/tusb6010.c b/hw/usb/tusb6010.c new file mode 100644 index 000000000..1dd4071e6 --- /dev/null +++ b/hw/usb/tusb6010.c @@ -0,0 +1,850 @@ +/* + * Texas Instruments TUSB6010 emulation. + * Based on reverse-engineering of a linux driver. + * + * Copyright (C) 2008 Nokia Corporation + * Written by Andrzej Zaborowski + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 or + * (at your option) version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/usb.h" +#include "hw/usb/hcd-musb.h" +#include "hw/arm/omap.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_TUSB6010 "tusb6010" +OBJECT_DECLARE_SIMPLE_TYPE(TUSBState, TUSB6010) + +struct TUSBState { + SysBusDevice parent_obj; + + MemoryRegion iomem[2]; + qemu_irq irq; + MUSBState *musb; + QEMUTimer *otg_timer; + QEMUTimer *pwr_timer; + + int power; + uint32_t scratch; + uint16_t test_reset; + uint32_t prcm_config; + uint32_t prcm_mngmt; + uint16_t otg_status; + uint32_t dev_config; + int host_mode; + uint32_t intr; + uint32_t intr_ok; + uint32_t mask; + uint32_t usbip_intr; + uint32_t usbip_mask; + uint32_t gpio_intr; + uint32_t gpio_mask; + uint32_t gpio_config; + uint32_t dma_intr; + uint32_t dma_mask; + uint32_t dma_map; + uint32_t dma_config; + uint32_t ep0_config; + uint32_t rx_config[15]; + uint32_t tx_config[15]; + uint32_t wkup_mask; + uint32_t pullup[2]; + uint32_t control_config; + uint32_t otg_timer_val; +}; + +#define TUSB_DEVCLOCK 60000000 /* 60 MHz */ + +#define TUSB_VLYNQ_CTRL 0x004 + +/* Mentor Graphics OTG core registers. */ +#define TUSB_BASE_OFFSET 0x400 + +/* FIFO registers, 32-bit. */ +#define TUSB_FIFO_BASE 0x600 + +/* Device System & Control registers, 32-bit. */ +#define TUSB_SYS_REG_BASE 0x800 + +#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000) +#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16) +#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15) +#define TUSB_DEV_CONF_SOFT_ID (1 << 1) +#define TUSB_DEV_CONF_ID_SEL (1 << 0) + +#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004) +#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008) +#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24) +#define TUSB_PHY_OTG_CTRL_O_ID_PULLUP (1 << 23) +#define TUSB_PHY_OTG_CTRL_O_VBUS_DET_EN (1 << 19) +#define TUSB_PHY_OTG_CTRL_O_SESS_END_EN (1 << 18) +#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17) +#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16) +#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15) +#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14) +#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13) +#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12) +#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11) +#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10) +#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9) +#define TUSB_PHY_OTG_CTRL_PHYREF_CLK(v) (((v) & 3) << 7) +#define TUSB_PHY_OTG_CTRL_PD (1 << 6) +#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5) +#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4) +#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3) +#define TUSB_PHY_OTG_CTRL_RESET (1 << 2) +#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1) +#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0) + +/* OTG status register */ +#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c) +#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8) +#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7) +#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6) +#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5) +#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4) +#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3) +#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2) +#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0) +#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1) +#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0) + +#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010) +#define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31) +#define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff) +#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014) + +/* PRCM configuration register */ +#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018) +#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24) +#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16) + +/* PRCM management register */ +#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c) +#define TUSB_PRCM_MNGMT_SRP_FIX_TMR(v) (((v) & 0xf) << 25) +#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24) +#define TUSB_PRCM_MNGMT_VBUS_VAL_TMR(v) (((v) & 0xf) << 20) +#define TUSB_PRCM_MNGMT_VBUS_VAL_FLT_EN (1 << 19) +#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18) +#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17) +#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10) +#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9) +#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8) +#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4) +#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3) +#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2) +#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1) +#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0) + +/* Wake-up source clear and mask registers */ +#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020) +#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028) +#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c) +#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13) +#define TUSB_PRCM_WGPIO_7 (1 << 12) +#define TUSB_PRCM_WGPIO_6 (1 << 11) +#define TUSB_PRCM_WGPIO_5 (1 << 10) +#define TUSB_PRCM_WGPIO_4 (1 << 9) +#define TUSB_PRCM_WGPIO_3 (1 << 8) +#define TUSB_PRCM_WGPIO_2 (1 << 7) +#define TUSB_PRCM_WGPIO_1 (1 << 6) +#define TUSB_PRCM_WGPIO_0 (1 << 5) +#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */ +#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */ +#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */ +#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */ +#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */ + +#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030) +#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034) +#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038) +#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c) +#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040) +#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044) +#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048) +#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c) +#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050) +#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054) +#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058) +#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c) +#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060) +#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064) +#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068) +#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c) + +/* NOR flash interrupt source registers */ +#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070) +#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074) +#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078) +#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c) +#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24) +#define TUSB_INT_SRC_USB_IP_CORE (1 << 17) +#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16) +#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15) +#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14) +#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13) +#define TUSB_INT_SRC_DEV_READY (1 << 12) +#define TUSB_INT_SRC_USB_IP_TX (1 << 9) +#define TUSB_INT_SRC_USB_IP_RX (1 << 8) +#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7) +#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6) +#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5) +#define TUSB_INT_SRC_USB_IP_CONN (1 << 4) +#define TUSB_INT_SRC_USB_IP_SOF (1 << 3) +#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2) +#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1) +#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0) + +#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080) +#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084) +#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100) +#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104) +#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108) +#define TUSB_EP_IN_SIZE (TUSB_SYS_REG_BASE + 0x10c) +#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148) +#define TUSB_EP_OUT_SIZE (TUSB_SYS_REG_BASE + 0x14c) +#define TUSB_EP_MAX_PACKET_SIZE_OFFSET (TUSB_SYS_REG_BASE + 0x188) +#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4) +#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8) +#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8) + +#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8) +#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc) + +/* Device System & Control register bitfields */ +#define TUSB_INT_CTRL_CONF_INT_RLCYC(v) (((v) & 0x7) << 18) +#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17) +#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16) +#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24) +#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26) +#define TUSB_DMA_REQ_CONF_DMA_RQ_EN(v) (((v) & 0x3f) << 20) +#define TUSB_DMA_REQ_CONF_DMA_RQ_ASR(v) (((v) & 0xf) << 16) +#define TUSB_EP0_CONFIG_SW_EN (1 << 8) +#define TUSB_EP0_CONFIG_DIR_TX (1 << 7) +#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f) +#define TUSB_EP_CONFIG_SW_EN (1 << 31) +#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff) +#define TUSB_PROD_TEST_RESET_VAL 0xa596 + +static void tusb_intr_update(TUSBState *s) +{ + if (s->control_config & TUSB_INT_CTRL_CONF_INT_POLARITY) + qemu_set_irq(s->irq, s->intr & ~s->mask & s->intr_ok); + else + qemu_set_irq(s->irq, (!(s->intr & ~s->mask)) & s->intr_ok); +} + +static void tusb_usbip_intr_update(TUSBState *s) +{ + /* TX interrupt in the MUSB */ + if (s->usbip_intr & 0x0000ffff & ~s->usbip_mask) + s->intr |= TUSB_INT_SRC_USB_IP_TX; + else + s->intr &= ~TUSB_INT_SRC_USB_IP_TX; + + /* RX interrupt in the MUSB */ + if (s->usbip_intr & 0xffff0000 & ~s->usbip_mask) + s->intr |= TUSB_INT_SRC_USB_IP_RX; + else + s->intr &= ~TUSB_INT_SRC_USB_IP_RX; + + /* XXX: What about TUSB_INT_SRC_USB_IP_CORE? */ + + tusb_intr_update(s); +} + +static void tusb_dma_intr_update(TUSBState *s) +{ + if (s->dma_intr & ~s->dma_mask) + s->intr |= TUSB_INT_SRC_TXRX_DMA_DONE; + else + s->intr &= ~TUSB_INT_SRC_TXRX_DMA_DONE; + + tusb_intr_update(s); +} + +static void tusb_gpio_intr_update(TUSBState *s) +{ + /* TODO: How is this signalled? */ +} + +static uint32_t tusb_async_readb(void *opaque, hwaddr addr) +{ + TUSBState *s = (TUSBState *) opaque; + + switch (addr & 0xfff) { + case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): + return musb_read[0](s->musb, addr & 0x1ff); + + case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): + return musb_read[0](s->musb, 0x20 + ((addr >> 3) & 0x3c)); + } + + printf("%s: unknown register at %03x\n", + __func__, (int) (addr & 0xfff)); + return 0; +} + +static uint32_t tusb_async_readh(void *opaque, hwaddr addr) +{ + TUSBState *s = (TUSBState *) opaque; + + switch (addr & 0xfff) { + case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): + return musb_read[1](s->musb, addr & 0x1ff); + + case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): + return musb_read[1](s->musb, 0x20 + ((addr >> 3) & 0x3c)); + } + + printf("%s: unknown register at %03x\n", + __func__, (int) (addr & 0xfff)); + return 0; +} + +static uint32_t tusb_async_readw(void *opaque, hwaddr addr) +{ + TUSBState *s = (TUSBState *) opaque; + int offset = addr & 0xfff; + int epnum; + uint32_t ret; + + switch (offset) { + case TUSB_DEV_CONF: + return s->dev_config; + + case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): + return musb_read[2](s->musb, offset & 0x1ff); + + case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): + return musb_read[2](s->musb, 0x20 + ((addr >> 3) & 0x3c)); + + case TUSB_PHY_OTG_CTRL_ENABLE: + case TUSB_PHY_OTG_CTRL: + return 0x00; /* TODO */ + + case TUSB_DEV_OTG_STAT: + ret = s->otg_status; +#if 0 + if (!(s->prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) + ret &= ~TUSB_DEV_OTG_STAT_VBUS_VALID; +#endif + return ret; + case TUSB_DEV_OTG_TIMER: + return s->otg_timer_val; + + case TUSB_PRCM_REV: + return 0x20; + case TUSB_PRCM_CONF: + return s->prcm_config; + case TUSB_PRCM_MNGMT: + return s->prcm_mngmt; + case TUSB_PRCM_WAKEUP_SOURCE: + case TUSB_PRCM_WAKEUP_CLEAR: /* TODO: What does this one return? */ + return 0x00000000; + case TUSB_PRCM_WAKEUP_MASK: + return s->wkup_mask; + + case TUSB_PULLUP_1_CTRL: + return s->pullup[0]; + case TUSB_PULLUP_2_CTRL: + return s->pullup[1]; + + case TUSB_INT_CTRL_REV: + return 0x20; + case TUSB_INT_CTRL_CONF: + return s->control_config; + + case TUSB_USBIP_INT_SRC: + case TUSB_USBIP_INT_SET: /* TODO: What do these two return? */ + case TUSB_USBIP_INT_CLEAR: + return s->usbip_intr; + case TUSB_USBIP_INT_MASK: + return s->usbip_mask; + + case TUSB_DMA_INT_SRC: + case TUSB_DMA_INT_SET: /* TODO: What do these two return? */ + case TUSB_DMA_INT_CLEAR: + return s->dma_intr; + case TUSB_DMA_INT_MASK: + return s->dma_mask; + + case TUSB_GPIO_INT_SRC: /* TODO: What do these two return? */ + case TUSB_GPIO_INT_SET: + case TUSB_GPIO_INT_CLEAR: + return s->gpio_intr; + case TUSB_GPIO_INT_MASK: + return s->gpio_mask; + + case TUSB_INT_SRC: + case TUSB_INT_SRC_SET: /* TODO: What do these two return? */ + case TUSB_INT_SRC_CLEAR: + return s->intr; + case TUSB_INT_MASK: + return s->mask; + + case TUSB_GPIO_REV: + return 0x30; + case TUSB_GPIO_CONF: + return s->gpio_config; + + case TUSB_DMA_CTRL_REV: + return 0x30; + case TUSB_DMA_REQ_CONF: + return s->dma_config; + case TUSB_EP0_CONF: + return s->ep0_config; + case TUSB_EP_IN_SIZE ... (TUSB_EP_IN_SIZE + 0x3b): + epnum = (offset - TUSB_EP_IN_SIZE) >> 2; + return s->tx_config[epnum]; + case TUSB_DMA_EP_MAP: + return s->dma_map; + case TUSB_EP_OUT_SIZE ... (TUSB_EP_OUT_SIZE + 0x3b): + epnum = (offset - TUSB_EP_OUT_SIZE) >> 2; + return s->rx_config[epnum]; + case TUSB_EP_MAX_PACKET_SIZE_OFFSET ... + (TUSB_EP_MAX_PACKET_SIZE_OFFSET + 0x3b): + return 0x00000000; /* TODO */ + case TUSB_WAIT_COUNT: + return 0x00; /* TODO */ + + case TUSB_SCRATCH_PAD: + return s->scratch; + + case TUSB_PROD_TEST_RESET: + return s->test_reset; + + /* DIE IDs */ + case TUSB_DIDR1_LO: + return 0xa9453c59; + case TUSB_DIDR1_HI: + return 0x54059adf; + } + + printf("%s: unknown register at %03x\n", __func__, offset); + return 0; +} + +static void tusb_async_writeb(void *opaque, hwaddr addr, + uint32_t value) +{ + TUSBState *s = (TUSBState *) opaque; + + switch (addr & 0xfff) { + case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): + musb_write[0](s->musb, addr & 0x1ff, value); + break; + + case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): + musb_write[0](s->musb, 0x20 + ((addr >> 3) & 0x3c), value); + break; + + default: + printf("%s: unknown register at %03x\n", + __func__, (int) (addr & 0xfff)); + return; + } +} + +static void tusb_async_writeh(void *opaque, hwaddr addr, + uint32_t value) +{ + TUSBState *s = (TUSBState *) opaque; + + switch (addr & 0xfff) { + case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): + musb_write[1](s->musb, addr & 0x1ff, value); + break; + + case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): + musb_write[1](s->musb, 0x20 + ((addr >> 3) & 0x3c), value); + break; + + default: + printf("%s: unknown register at %03x\n", + __func__, (int) (addr & 0xfff)); + return; + } +} + +static void tusb_async_writew(void *opaque, hwaddr addr, + uint32_t value) +{ + TUSBState *s = (TUSBState *) opaque; + int offset = addr & 0xfff; + int epnum; + + switch (offset) { + case TUSB_VLYNQ_CTRL: + break; + + case TUSB_BASE_OFFSET ... (TUSB_BASE_OFFSET | 0x1ff): + musb_write[2](s->musb, offset & 0x1ff, value); + break; + + case TUSB_FIFO_BASE ... (TUSB_FIFO_BASE | 0x1ff): + musb_write[2](s->musb, 0x20 + ((addr >> 3) & 0x3c), value); + break; + + case TUSB_DEV_CONF: + s->dev_config = value; + s->host_mode = (value & TUSB_DEV_CONF_USB_HOST_MODE); + if (value & TUSB_DEV_CONF_PROD_TEST_MODE) + hw_error("%s: Product Test mode not allowed\n", __func__); + break; + + case TUSB_PHY_OTG_CTRL_ENABLE: + case TUSB_PHY_OTG_CTRL: + return; /* TODO */ + case TUSB_DEV_OTG_TIMER: + s->otg_timer_val = value; + if (value & TUSB_DEV_OTG_TIMER_ENABLE) + timer_mod(s->otg_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + muldiv64(TUSB_DEV_OTG_TIMER_VAL(value), + NANOSECONDS_PER_SECOND, TUSB_DEVCLOCK)); + else + timer_del(s->otg_timer); + break; + + case TUSB_PRCM_CONF: + s->prcm_config = value; + break; + case TUSB_PRCM_MNGMT: + s->prcm_mngmt = value; + break; + case TUSB_PRCM_WAKEUP_CLEAR: + break; + case TUSB_PRCM_WAKEUP_MASK: + s->wkup_mask = value; + break; + + case TUSB_PULLUP_1_CTRL: + s->pullup[0] = value; + break; + case TUSB_PULLUP_2_CTRL: + s->pullup[1] = value; + break; + case TUSB_INT_CTRL_CONF: + s->control_config = value; + tusb_intr_update(s); + break; + + case TUSB_USBIP_INT_SET: + s->usbip_intr |= value; + tusb_usbip_intr_update(s); + break; + case TUSB_USBIP_INT_CLEAR: + s->usbip_intr &= ~value; + tusb_usbip_intr_update(s); + musb_core_intr_clear(s->musb, ~value); + break; + case TUSB_USBIP_INT_MASK: + s->usbip_mask = value; + tusb_usbip_intr_update(s); + break; + + case TUSB_DMA_INT_SET: + s->dma_intr |= value; + tusb_dma_intr_update(s); + break; + case TUSB_DMA_INT_CLEAR: + s->dma_intr &= ~value; + tusb_dma_intr_update(s); + break; + case TUSB_DMA_INT_MASK: + s->dma_mask = value; + tusb_dma_intr_update(s); + break; + + case TUSB_GPIO_INT_SET: + s->gpio_intr |= value; + tusb_gpio_intr_update(s); + break; + case TUSB_GPIO_INT_CLEAR: + s->gpio_intr &= ~value; + tusb_gpio_intr_update(s); + break; + case TUSB_GPIO_INT_MASK: + s->gpio_mask = value; + tusb_gpio_intr_update(s); + break; + + case TUSB_INT_SRC_SET: + s->intr |= value; + tusb_intr_update(s); + break; + case TUSB_INT_SRC_CLEAR: + s->intr &= ~value; + tusb_intr_update(s); + break; + case TUSB_INT_MASK: + s->mask = value; + tusb_intr_update(s); + break; + + case TUSB_GPIO_CONF: + s->gpio_config = value; + break; + case TUSB_DMA_REQ_CONF: + s->dma_config = value; + break; + case TUSB_EP0_CONF: + s->ep0_config = value & 0x1ff; + musb_set_size(s->musb, 0, TUSB_EP0_CONFIG_XFR_SIZE(value), + value & TUSB_EP0_CONFIG_DIR_TX); + break; + case TUSB_EP_IN_SIZE ... (TUSB_EP_IN_SIZE + 0x3b): + epnum = (offset - TUSB_EP_IN_SIZE) >> 2; + s->tx_config[epnum] = value; + musb_set_size(s->musb, epnum + 1, TUSB_EP_CONFIG_XFR_SIZE(value), 1); + break; + case TUSB_DMA_EP_MAP: + s->dma_map = value; + break; + case TUSB_EP_OUT_SIZE ... (TUSB_EP_OUT_SIZE + 0x3b): + epnum = (offset - TUSB_EP_OUT_SIZE) >> 2; + s->rx_config[epnum] = value; + musb_set_size(s->musb, epnum + 1, TUSB_EP_CONFIG_XFR_SIZE(value), 0); + break; + case TUSB_EP_MAX_PACKET_SIZE_OFFSET ... + (TUSB_EP_MAX_PACKET_SIZE_OFFSET + 0x3b): + return; /* TODO */ + case TUSB_WAIT_COUNT: + return; /* TODO */ + + case TUSB_SCRATCH_PAD: + s->scratch = value; + break; + + case TUSB_PROD_TEST_RESET: + s->test_reset = value; + break; + + default: + printf("%s: unknown register at %03x\n", __func__, offset); + return; + } +} + +static uint64_t tusb_async_readfn(void *opaque, hwaddr addr, unsigned size) +{ + switch (size) { + case 1: + return tusb_async_readb(opaque, addr); + case 2: + return tusb_async_readh(opaque, addr); + case 4: + return tusb_async_readw(opaque, addr); + default: + g_assert_not_reached(); + } +} + +static void tusb_async_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + switch (size) { + case 1: + tusb_async_writeb(opaque, addr, value); + break; + case 2: + tusb_async_writeh(opaque, addr, value); + break; + case 4: + tusb_async_writew(opaque, addr, value); + break; + default: + g_assert_not_reached(); + } +} + +static const MemoryRegionOps tusb_async_ops = { + .read = tusb_async_readfn, + .write = tusb_async_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void tusb_otg_tick(void *opaque) +{ + TUSBState *s = (TUSBState *) opaque; + + s->otg_timer_val = 0; + s->intr |= TUSB_INT_SRC_OTG_TIMEOUT; + tusb_intr_update(s); +} + +static void tusb_power_tick(void *opaque) +{ + TUSBState *s = (TUSBState *) opaque; + + if (s->power) { + s->intr_ok = ~0; + tusb_intr_update(s); + } +} + +static void tusb_musb_core_intr(void *opaque, int source, int level) +{ + TUSBState *s = (TUSBState *) opaque; + uint16_t otg_status = s->otg_status; + + switch (source) { + case musb_set_vbus: + if (level) + otg_status |= TUSB_DEV_OTG_STAT_VBUS_VALID; + else + otg_status &= ~TUSB_DEV_OTG_STAT_VBUS_VALID; + + /* XXX: only if TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN set? */ + /* XXX: only if TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN set? */ + if (s->otg_status != otg_status) { + s->otg_status = otg_status; + s->intr |= TUSB_INT_SRC_VBUS_SENSE_CHNG; + tusb_intr_update(s); + } + break; + + case musb_set_session: + /* XXX: only if TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN set? */ + /* XXX: only if TUSB_PRCM_MNGMT_OTG_SESS_END_EN set? */ + if (level) { + s->otg_status |= TUSB_DEV_OTG_STAT_SESS_VALID; + s->otg_status &= ~TUSB_DEV_OTG_STAT_SESS_END; + } else { + s->otg_status &= ~TUSB_DEV_OTG_STAT_SESS_VALID; + s->otg_status |= TUSB_DEV_OTG_STAT_SESS_END; + } + + /* XXX: some IRQ or anything? */ + break; + + case musb_irq_tx: + case musb_irq_rx: + s->usbip_intr = musb_core_intr_get(s->musb); + /* Fall through. */ + default: + if (level) + s->intr |= 1 << source; + else + s->intr &= ~(1 << source); + tusb_intr_update(s); + break; + } +} + +static void tusb6010_power(TUSBState *s, int on) +{ + if (!on) { + s->power = 0; + } else if (!s->power && on) { + s->power = 1; + /* Pull the interrupt down after TUSB6010 comes up. */ + s->intr_ok = 0; + tusb_intr_update(s); + timer_mod(s->pwr_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + NANOSECONDS_PER_SECOND / 2); + } +} + +static void tusb6010_irq(void *opaque, int source, int level) +{ + if (source) { + tusb_musb_core_intr(opaque, source - 1, level); + } else { + tusb6010_power(opaque, level); + } +} + +static void tusb6010_reset(DeviceState *dev) +{ + TUSBState *s = TUSB6010(dev); + int i; + + s->test_reset = TUSB_PROD_TEST_RESET_VAL; + s->host_mode = 0; + s->dev_config = 0; + s->otg_status = 0; /* !TUSB_DEV_OTG_STAT_ID_STATUS means host mode */ + s->power = 0; + s->mask = 0xffffffff; + s->intr = 0x00000000; + s->otg_timer_val = 0; + s->scratch = 0; + s->prcm_config = 0; + s->prcm_mngmt = 0; + s->intr_ok = 0; + s->usbip_intr = 0; + s->usbip_mask = 0; + s->gpio_intr = 0; + s->gpio_mask = 0; + s->gpio_config = 0; + s->dma_intr = 0; + s->dma_mask = 0; + s->dma_map = 0; + s->dma_config = 0; + s->ep0_config = 0; + s->wkup_mask = 0; + s->pullup[0] = s->pullup[1] = 0; + s->control_config = 0; + for (i = 0; i < 15; i++) { + s->rx_config[i] = s->tx_config[i] = 0; + } + musb_reset(s->musb); +} + +static void tusb6010_realize(DeviceState *dev, Error **errp) +{ + TUSBState *s = TUSB6010(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + s->otg_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tusb_otg_tick, s); + s->pwr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tusb_power_tick, s); + memory_region_init_io(&s->iomem[1], OBJECT(s), &tusb_async_ops, s, + "tusb-async", UINT32_MAX); + sysbus_init_mmio(sbd, &s->iomem[0]); + sysbus_init_mmio(sbd, &s->iomem[1]); + sysbus_init_irq(sbd, &s->irq); + qdev_init_gpio_in(dev, tusb6010_irq, musb_irq_max + 1); + s->musb = musb_init(dev, 1); +} + +static void tusb6010_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = tusb6010_realize; + dc->reset = tusb6010_reset; +} + +static const TypeInfo tusb6010_info = { + .name = TYPE_TUSB6010, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(TUSBState), + .class_init = tusb6010_class_init, +}; + +static void tusb6010_register_types(void) +{ + type_register_static(&tusb6010_info); +} + +type_init(tusb6010_register_types) diff --git a/hw/usb/u2f-emulated.c b/hw/usb/u2f-emulated.c new file mode 100644 index 000000000..63cceaa5f --- /dev/null +++ b/hw/usb/u2f-emulated.c @@ -0,0 +1,405 @@ +/* + * U2F USB Emulated device. + * + * Copyright (c) 2020 César Belley + * Written by César Belley + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/thread.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "hw/usb.h" +#include "hw/qdev-properties.h" + +#include + +#include "u2f.h" + +/* Counter which sync with a file */ +struct synced_counter { + /* Emulated device counter */ + struct u2f_emu_vdev_counter vdev_counter; + + /* Private attributes */ + uint32_t value; + FILE *fp; +}; + +static void counter_increment(struct u2f_emu_vdev_counter *vdev_counter) +{ + struct synced_counter *counter = (struct synced_counter *)vdev_counter; + ++counter->value; + + /* Write back */ + if (fseek(counter->fp, 0, SEEK_SET) == -1) { + return; + } + fprintf(counter->fp, "%u\n", counter->value); +} + +static uint32_t counter_read(struct u2f_emu_vdev_counter *vdev_counter) +{ + struct synced_counter *counter = (struct synced_counter *)vdev_counter; + return counter->value; +} + +typedef struct U2FEmulatedState U2FEmulatedState; + +#define PENDING_OUT_NUM 32 + +struct U2FEmulatedState { + U2FKeyState base; + + /* U2F virtual emulated device */ + u2f_emu_vdev *vdev; + QemuMutex vdev_mutex; + + /* Properties */ + char *dir; + char *cert; + char *privkey; + char *entropy; + char *counter; + struct synced_counter synced_counter; + + /* Pending packets received from the guest */ + uint8_t pending_out[PENDING_OUT_NUM][U2FHID_PACKET_SIZE]; + uint8_t pending_out_start; + uint8_t pending_out_end; + uint8_t pending_out_num; + QemuMutex pending_out_mutex; + + /* Emulation thread and sync */ + QemuCond key_cond; + QemuMutex key_mutex; + QemuThread key_thread; + bool stop_thread; + EventNotifier notifier; +}; + +#define TYPE_U2F_EMULATED "u2f-emulated" +#define EMULATED_U2F_KEY(obj) \ + OBJECT_CHECK(U2FEmulatedState, (obj), TYPE_U2F_EMULATED) + +static void u2f_emulated_reset(U2FEmulatedState *key) +{ + key->pending_out_start = 0; + key->pending_out_end = 0; + key->pending_out_num = 0; +} + +static void u2f_pending_out_add(U2FEmulatedState *key, + const uint8_t packet[U2FHID_PACKET_SIZE]) +{ + int index; + + if (key->pending_out_num >= PENDING_OUT_NUM) { + return; + } + + index = key->pending_out_end; + key->pending_out_end = (index + 1) % PENDING_OUT_NUM; + ++key->pending_out_num; + + memcpy(&key->pending_out[index], packet, U2FHID_PACKET_SIZE); +} + +static uint8_t *u2f_pending_out_get(U2FEmulatedState *key) +{ + int index; + + if (key->pending_out_num == 0) { + return NULL; + } + + index = key->pending_out_start; + key->pending_out_start = (index + 1) % PENDING_OUT_NUM; + --key->pending_out_num; + + return key->pending_out[index]; +} + +static void u2f_emulated_recv_from_guest(U2FKeyState *base, + const uint8_t packet[U2FHID_PACKET_SIZE]) +{ + U2FEmulatedState *key = EMULATED_U2F_KEY(base); + + qemu_mutex_lock(&key->pending_out_mutex); + u2f_pending_out_add(key, packet); + qemu_mutex_unlock(&key->pending_out_mutex); + + qemu_mutex_lock(&key->key_mutex); + qemu_cond_signal(&key->key_cond); + qemu_mutex_unlock(&key->key_mutex); +} + +static void *u2f_emulated_thread(void* arg) +{ + U2FEmulatedState *key = arg; + uint8_t packet[U2FHID_PACKET_SIZE]; + uint8_t *packet_out = NULL; + + + while (true) { + /* Wait signal */ + qemu_mutex_lock(&key->key_mutex); + qemu_cond_wait(&key->key_cond, &key->key_mutex); + qemu_mutex_unlock(&key->key_mutex); + + /* Exit thread check */ + if (key->stop_thread) { + key->stop_thread = false; + break; + } + + qemu_mutex_lock(&key->pending_out_mutex); + packet_out = u2f_pending_out_get(key); + if (packet_out == NULL) { + qemu_mutex_unlock(&key->pending_out_mutex); + continue; + } + memcpy(packet, packet_out, U2FHID_PACKET_SIZE); + qemu_mutex_unlock(&key->pending_out_mutex); + + qemu_mutex_lock(&key->vdev_mutex); + u2f_emu_vdev_send(key->vdev, U2F_EMU_USB, packet, + U2FHID_PACKET_SIZE); + + /* Notify response */ + if (u2f_emu_vdev_has_response(key->vdev, U2F_EMU_USB)) { + event_notifier_set(&key->notifier); + } + qemu_mutex_unlock(&key->vdev_mutex); + } + return NULL; +} + +static ssize_t u2f_emulated_read(const char *path, char *buffer, + size_t buffer_len) +{ + int fd; + ssize_t ret; + + fd = qemu_open_old(path, O_RDONLY); + if (fd < 0) { + return -1; + } + + ret = read(fd, buffer, buffer_len); + close(fd); + + return ret; +} + +static bool u2f_emulated_setup_counter(const char *path, + struct synced_counter *counter) +{ + int fd, ret; + FILE *fp; + + fd = qemu_open_old(path, O_RDWR); + if (fd < 0) { + return false; + } + fp = fdopen(fd, "r+"); + if (fp == NULL) { + close(fd); + return false; + } + ret = fscanf(fp, "%u", &counter->value); + if (ret == EOF) { + fclose(fp); + return false; + } + counter->fp = fp; + counter->vdev_counter.counter_increment = counter_increment; + counter->vdev_counter.counter_read = counter_read; + + return true; +} + +static u2f_emu_rc u2f_emulated_setup_vdev_manualy(U2FEmulatedState *key) +{ + ssize_t ret; + char cert_pem[4096], privkey_pem[2048]; + struct u2f_emu_vdev_setup setup_info; + + /* Certificate */ + ret = u2f_emulated_read(key->cert, cert_pem, sizeof(cert_pem)); + if (ret < 0) { + return -1; + } + + /* Private key */ + ret = u2f_emulated_read(key->privkey, privkey_pem, sizeof(privkey_pem)); + if (ret < 0) { + return -1; + } + + /* Entropy */ + ret = u2f_emulated_read(key->entropy, (char *)&setup_info.entropy, + sizeof(setup_info.entropy)); + if (ret < 0) { + return -1; + } + + /* Counter */ + if (!u2f_emulated_setup_counter(key->counter, &key->synced_counter)) { + return -1; + } + + /* Setup */ + setup_info.certificate = cert_pem; + setup_info.private_key = privkey_pem; + setup_info.counter = (struct u2f_emu_vdev_counter *)&key->synced_counter; + + return u2f_emu_vdev_new(&key->vdev, &setup_info); +} + +static void u2f_emulated_event_handler(EventNotifier *notifier) +{ + U2FEmulatedState *key = container_of(notifier, U2FEmulatedState, notifier); + size_t packet_size; + uint8_t *packet_in = NULL; + + event_notifier_test_and_clear(&key->notifier); + qemu_mutex_lock(&key->vdev_mutex); + while (u2f_emu_vdev_has_response(key->vdev, U2F_EMU_USB)) { + packet_size = u2f_emu_vdev_get_response(key->vdev, U2F_EMU_USB, + &packet_in); + if (packet_size == U2FHID_PACKET_SIZE) { + u2f_send_to_guest(&key->base, packet_in); + } + u2f_emu_vdev_free_response(packet_in); + } + qemu_mutex_unlock(&key->vdev_mutex); +} + +static void u2f_emulated_realize(U2FKeyState *base, Error **errp) +{ + U2FEmulatedState *key = EMULATED_U2F_KEY(base); + u2f_emu_rc rc; + + if (key->cert != NULL || key->privkey != NULL || key->entropy != NULL + || key->counter != NULL) { + if (key->cert != NULL && key->privkey != NULL + && key->entropy != NULL && key->counter != NULL) { + rc = u2f_emulated_setup_vdev_manualy(key); + } else { + error_setg(errp, "%s: cert, priv, entropy and counter " + "parameters must be provided to manually configure " + "the emulated device", TYPE_U2F_EMULATED); + return; + } + } else if (key->dir != NULL) { + rc = u2f_emu_vdev_new_from_dir(&key->vdev, key->dir); + } else { + rc = u2f_emu_vdev_new_ephemeral(&key->vdev); + } + + if (rc != U2F_EMU_OK) { + error_setg(errp, "%s: Failed to setup the key", TYPE_U2F_EMULATED); + return; + } + + if (event_notifier_init(&key->notifier, false) < 0) { + error_setg(errp, "%s: Failed to initialize notifier", + TYPE_U2F_EMULATED); + return; + } + /* Notifier */ + event_notifier_set_handler(&key->notifier, u2f_emulated_event_handler); + + /* Synchronization */ + qemu_cond_init(&key->key_cond); + qemu_mutex_init(&key->vdev_mutex); + qemu_mutex_init(&key->pending_out_mutex); + qemu_mutex_init(&key->key_mutex); + u2f_emulated_reset(key); + + /* Thread */ + key->stop_thread = false; + qemu_thread_create(&key->key_thread, "u2f-key", u2f_emulated_thread, + key, QEMU_THREAD_JOINABLE); +} + +static void u2f_emulated_unrealize(U2FKeyState *base) +{ + U2FEmulatedState *key = EMULATED_U2F_KEY(base); + + /* Thread */ + key->stop_thread = true; + qemu_cond_signal(&key->key_cond); + qemu_thread_join(&key->key_thread); + + /* Notifier */ + event_notifier_set_handler(&key->notifier, NULL); + event_notifier_cleanup(&key->notifier); + + /* Synchronization */ + qemu_cond_destroy(&key->key_cond); + qemu_mutex_destroy(&key->vdev_mutex); + qemu_mutex_destroy(&key->key_mutex); + qemu_mutex_destroy(&key->pending_out_mutex); + + /* Vdev */ + u2f_emu_vdev_free(key->vdev); + if (key->synced_counter.fp != NULL) { + fclose(key->synced_counter.fp); + } +} + +static Property u2f_emulated_properties[] = { + DEFINE_PROP_STRING("dir", U2FEmulatedState, dir), + DEFINE_PROP_STRING("cert", U2FEmulatedState, cert), + DEFINE_PROP_STRING("privkey", U2FEmulatedState, privkey), + DEFINE_PROP_STRING("entropy", U2FEmulatedState, entropy), + DEFINE_PROP_STRING("counter", U2FEmulatedState, counter), + DEFINE_PROP_END_OF_LIST(), +}; + +static void u2f_emulated_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + U2FKeyClass *kc = U2F_KEY_CLASS(klass); + + kc->realize = u2f_emulated_realize; + kc->unrealize = u2f_emulated_unrealize; + kc->recv_from_guest = u2f_emulated_recv_from_guest; + dc->desc = "QEMU U2F emulated key"; + device_class_set_props(dc, u2f_emulated_properties); +} + +static const TypeInfo u2f_key_emulated_info = { + .name = TYPE_U2F_EMULATED, + .parent = TYPE_U2F_KEY, + .instance_size = sizeof(U2FEmulatedState), + .class_init = u2f_emulated_class_init +}; + +static void u2f_key_emulated_register_types(void) +{ + type_register_static(&u2f_key_emulated_info); +} + +type_init(u2f_key_emulated_register_types) diff --git a/hw/usb/u2f-passthru.c b/hw/usb/u2f-passthru.c new file mode 100644 index 000000000..fc93429c9 --- /dev/null +++ b/hw/usb/u2f-passthru.c @@ -0,0 +1,552 @@ +/* + * U2F USB Passthru device. + * + * Copyright (c) 2020 César Belley + * Written by César Belley + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/main-loop.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/usb.h" +#include "migration/vmstate.h" + +#include "u2f.h" + +#ifdef CONFIG_LIBUDEV +#include +#endif +#include +#include + +#define NONCE_SIZE 8 +#define BROADCAST_CID 0xFFFFFFFF +#define TRANSACTION_TIMEOUT 120000 + +struct transaction { + uint32_t cid; + uint16_t resp_bcnt; + uint16_t resp_size; + + /* Nonce for broadcast isolation */ + uint8_t nonce[NONCE_SIZE]; +}; + +typedef struct U2FPassthruState U2FPassthruState; + +#define CURRENT_TRANSACTIONS_NUM 4 + +struct U2FPassthruState { + U2FKeyState base; + + /* Host device */ + char *hidraw; + int hidraw_fd; + + /* Current Transactions */ + struct transaction current_transactions[CURRENT_TRANSACTIONS_NUM]; + uint8_t current_transactions_start; + uint8_t current_transactions_end; + uint8_t current_transactions_num; + + /* Transaction time checking */ + int64_t last_transaction_time; + QEMUTimer timer; +}; + +#define TYPE_U2F_PASSTHRU "u2f-passthru" +#define PASSTHRU_U2F_KEY(obj) \ + OBJECT_CHECK(U2FPassthruState, (obj), TYPE_U2F_PASSTHRU) + +/* Init packet sizes */ +#define PACKET_INIT_HEADER_SIZE 7 +#define PACKET_INIT_DATA_SIZE (U2FHID_PACKET_SIZE - PACKET_INIT_HEADER_SIZE) + +/* Cont packet sizes */ +#define PACKET_CONT_HEADER_SIZE 5 +#define PACKET_CONT_DATA_SIZE (U2FHID_PACKET_SIZE - PACKET_CONT_HEADER_SIZE) + +struct packet_init { + uint32_t cid; + uint8_t cmd; + uint8_t bcnth; + uint8_t bcntl; + uint8_t data[PACKET_INIT_DATA_SIZE]; +} QEMU_PACKED; + +static inline uint32_t packet_get_cid(const void *packet) +{ + return *((uint32_t *)packet); +} + +static inline bool packet_is_init(const void *packet) +{ + return ((uint8_t *)packet)[4] & (1 << 7); +} + +static inline uint16_t packet_init_get_bcnt( + const struct packet_init *packet_init) +{ + uint16_t bcnt = 0; + bcnt |= packet_init->bcnth << 8; + bcnt |= packet_init->bcntl; + + return bcnt; +} + +static void u2f_passthru_reset(U2FPassthruState *key) +{ + timer_del(&key->timer); + qemu_set_fd_handler(key->hidraw_fd, NULL, NULL, key); + key->last_transaction_time = 0; + key->current_transactions_start = 0; + key->current_transactions_end = 0; + key->current_transactions_num = 0; +} + +static void u2f_timeout_check(void *opaque) +{ + U2FPassthruState *key = opaque; + int64_t time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + + if (time > key->last_transaction_time + TRANSACTION_TIMEOUT) { + u2f_passthru_reset(key); + } else { + timer_mod(&key->timer, time + TRANSACTION_TIMEOUT / 4); + } +} + +static int u2f_transaction_get_index(U2FPassthruState *key, uint32_t cid) +{ + for (int i = 0; i < key->current_transactions_num; ++i) { + int index = (key->current_transactions_start + i) + % CURRENT_TRANSACTIONS_NUM; + if (cid == key->current_transactions[index].cid) { + return index; + } + } + return -1; +} + +static struct transaction *u2f_transaction_get(U2FPassthruState *key, + uint32_t cid) +{ + int index = u2f_transaction_get_index(key, cid); + if (index < 0) { + return NULL; + } + return &key->current_transactions[index]; +} + +static struct transaction *u2f_transaction_get_from_nonce(U2FPassthruState *key, + const uint8_t nonce[NONCE_SIZE]) +{ + for (int i = 0; i < key->current_transactions_num; ++i) { + int index = (key->current_transactions_start + i) + % CURRENT_TRANSACTIONS_NUM; + if (key->current_transactions[index].cid == BROADCAST_CID + && memcmp(nonce, key->current_transactions[index].nonce, + NONCE_SIZE) == 0) { + return &key->current_transactions[index]; + } + } + return NULL; +} + +static void u2f_transaction_close(U2FPassthruState *key, uint32_t cid) +{ + int index, next_index; + index = u2f_transaction_get_index(key, cid); + if (index < 0) { + return; + } + next_index = (index + 1) % CURRENT_TRANSACTIONS_NUM; + + /* Rearrange to ensure the oldest is at the start position */ + while (next_index != key->current_transactions_end) { + memcpy(&key->current_transactions[index], + &key->current_transactions[next_index], + sizeof(struct transaction)); + + index = next_index; + next_index = (index + 1) % CURRENT_TRANSACTIONS_NUM; + } + + key->current_transactions_end = index; + --key->current_transactions_num; + + if (key->current_transactions_num == 0) { + u2f_passthru_reset(key); + } +} + +static void u2f_transaction_add(U2FPassthruState *key, uint32_t cid, + const uint8_t nonce[NONCE_SIZE]) +{ + uint8_t index; + struct transaction *transaction; + + if (key->current_transactions_num >= CURRENT_TRANSACTIONS_NUM) { + /* Close the oldest transaction */ + index = key->current_transactions_start; + transaction = &key->current_transactions[index]; + u2f_transaction_close(key, transaction->cid); + } + + /* Index */ + index = key->current_transactions_end; + key->current_transactions_end = (index + 1) % CURRENT_TRANSACTIONS_NUM; + ++key->current_transactions_num; + + /* Transaction */ + transaction = &key->current_transactions[index]; + transaction->cid = cid; + transaction->resp_bcnt = 0; + transaction->resp_size = 0; + + /* Nonce */ + if (nonce != NULL) { + memcpy(transaction->nonce, nonce, NONCE_SIZE); + } +} + +static void u2f_passthru_read(void *opaque); + +static void u2f_transaction_start(U2FPassthruState *key, + const struct packet_init *packet_init) +{ + int64_t time; + + /* Transaction */ + if (packet_init->cid == BROADCAST_CID) { + u2f_transaction_add(key, packet_init->cid, packet_init->data); + } else { + u2f_transaction_add(key, packet_init->cid, NULL); + } + + /* Time */ + time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + if (key->last_transaction_time == 0) { + qemu_set_fd_handler(key->hidraw_fd, u2f_passthru_read, NULL, key); + timer_init_ms(&key->timer, QEMU_CLOCK_VIRTUAL, u2f_timeout_check, key); + timer_mod(&key->timer, time + TRANSACTION_TIMEOUT / 4); + } + key->last_transaction_time = time; +} + +static void u2f_passthru_recv_from_host(U2FPassthruState *key, + const uint8_t packet[U2FHID_PACKET_SIZE]) +{ + struct transaction *transaction; + uint32_t cid; + + /* Retrieve transaction */ + cid = packet_get_cid(packet); + if (cid == BROADCAST_CID) { + struct packet_init *packet_init; + if (!packet_is_init(packet)) { + return; + } + packet_init = (struct packet_init *)packet; + transaction = u2f_transaction_get_from_nonce(key, packet_init->data); + } else { + transaction = u2f_transaction_get(key, cid); + } + + /* Ignore no started transaction */ + if (transaction == NULL) { + return; + } + + if (packet_is_init(packet)) { + struct packet_init *packet_init = (struct packet_init *)packet; + transaction->resp_bcnt = packet_init_get_bcnt(packet_init); + transaction->resp_size = PACKET_INIT_DATA_SIZE; + + if (packet_init->cid == BROADCAST_CID) { + /* Nonce checking for legitimate response */ + if (memcmp(transaction->nonce, packet_init->data, NONCE_SIZE) + != 0) { + return; + } + } + } else { + transaction->resp_size += PACKET_CONT_DATA_SIZE; + } + + /* Transaction end check */ + if (transaction->resp_size >= transaction->resp_bcnt) { + u2f_transaction_close(key, cid); + } + u2f_send_to_guest(&key->base, packet); +} + +static void u2f_passthru_read(void *opaque) +{ + U2FPassthruState *key = opaque; + U2FKeyState *base = &key->base; + uint8_t packet[2 * U2FHID_PACKET_SIZE]; + int ret; + + /* Full size base queue check */ + if (base->pending_in_num >= U2FHID_PENDING_IN_NUM) { + return; + } + + ret = read(key->hidraw_fd, packet, sizeof(packet)); + if (ret < 0) { + /* Detach */ + if (base->dev.attached) { + usb_device_detach(&base->dev); + u2f_passthru_reset(key); + } + return; + } + if (ret != U2FHID_PACKET_SIZE) { + return; + } + u2f_passthru_recv_from_host(key, packet); +} + +static void u2f_passthru_recv_from_guest(U2FKeyState *base, + const uint8_t packet[U2FHID_PACKET_SIZE]) +{ + U2FPassthruState *key = PASSTHRU_U2F_KEY(base); + uint8_t host_packet[U2FHID_PACKET_SIZE + 1]; + ssize_t written; + + if (packet_is_init(packet)) { + u2f_transaction_start(key, (struct packet_init *)packet); + } + + host_packet[0] = 0; + memcpy(host_packet + 1, packet, U2FHID_PACKET_SIZE); + + written = write(key->hidraw_fd, host_packet, sizeof(host_packet)); + if (written != sizeof(host_packet)) { + error_report("%s: Bad written size (req 0x%zu, val 0x%zd)", + TYPE_U2F_PASSTHRU, sizeof(host_packet), written); + } +} + +static bool u2f_passthru_is_u2f_device(int fd) +{ + int ret, rdesc_size; + struct hidraw_report_descriptor rdesc; + const uint8_t u2f_hid_report_desc_header[] = { + 0x06, 0xd0, 0xf1, /* Usage Page (FIDO) */ + 0x09, 0x01, /* Usage (FIDO) */ + }; + + /* Get report descriptor size */ + ret = ioctl(fd, HIDIOCGRDESCSIZE, &rdesc_size); + if (ret < 0 || rdesc_size < sizeof(u2f_hid_report_desc_header)) { + return false; + } + + /* Get report descriptor */ + memset(&rdesc, 0x0, sizeof(rdesc)); + rdesc.size = rdesc_size; + ret = ioctl(fd, HIDIOCGRDESC, &rdesc); + if (ret < 0) { + return false; + } + + /* Header bytes cover specific U2F rdesc values */ + return memcmp(u2f_hid_report_desc_header, rdesc.value, + sizeof(u2f_hid_report_desc_header)) == 0; +} + +#ifdef CONFIG_LIBUDEV +static int u2f_passthru_open_from_device(struct udev_device *device) +{ + const char *devnode = udev_device_get_devnode(device); + + int fd = qemu_open_old(devnode, O_RDWR); + if (fd < 0) { + return -1; + } else if (!u2f_passthru_is_u2f_device(fd)) { + qemu_close(fd); + return -1; + } + return fd; +} + +static int u2f_passthru_open_from_enumerate(struct udev *udev, + struct udev_enumerate *enumerate) +{ + struct udev_list_entry *devices, *entry; + int ret, fd; + + ret = udev_enumerate_scan_devices(enumerate); + if (ret < 0) { + return -1; + } + + devices = udev_enumerate_get_list_entry(enumerate); + udev_list_entry_foreach(entry, devices) { + struct udev_device *device; + const char *syspath = udev_list_entry_get_name(entry); + + if (syspath == NULL) { + continue; + } + + device = udev_device_new_from_syspath(udev, syspath); + if (device == NULL) { + continue; + } + + fd = u2f_passthru_open_from_device(device); + udev_device_unref(device); + if (fd >= 0) { + return fd; + } + } + return -1; +} + +static int u2f_passthru_open_from_scan(void) +{ + struct udev *udev; + struct udev_enumerate *enumerate; + int ret, fd = -1; + + udev = udev_new(); + if (udev == NULL) { + return -1; + } + + enumerate = udev_enumerate_new(udev); + if (enumerate == NULL) { + udev_unref(udev); + return -1; + } + + ret = udev_enumerate_add_match_subsystem(enumerate, "hidraw"); + if (ret >= 0) { + fd = u2f_passthru_open_from_enumerate(udev, enumerate); + } + + udev_enumerate_unref(enumerate); + udev_unref(udev); + + return fd; +} +#endif + +static void u2f_passthru_unrealize(U2FKeyState *base) +{ + U2FPassthruState *key = PASSTHRU_U2F_KEY(base); + + u2f_passthru_reset(key); + qemu_close(key->hidraw_fd); +} + +static void u2f_passthru_realize(U2FKeyState *base, Error **errp) +{ + U2FPassthruState *key = PASSTHRU_U2F_KEY(base); + int fd; + + if (key->hidraw == NULL) { +#ifdef CONFIG_LIBUDEV + fd = u2f_passthru_open_from_scan(); + if (fd < 0) { + error_setg(errp, "%s: Failed to find a U2F USB device", + TYPE_U2F_PASSTHRU); + return; + } +#else + error_setg(errp, "%s: Missing hidraw", TYPE_U2F_PASSTHRU); + return; +#endif + } else { + fd = qemu_open_old(key->hidraw, O_RDWR); + if (fd < 0) { + error_setg(errp, "%s: Failed to open %s", TYPE_U2F_PASSTHRU, + key->hidraw); + return; + } + + if (!u2f_passthru_is_u2f_device(fd)) { + qemu_close(fd); + error_setg(errp, "%s: Passed hidraw does not represent " + "a U2F HID device", TYPE_U2F_PASSTHRU); + return; + } + } + key->hidraw_fd = fd; + u2f_passthru_reset(key); +} + +static int u2f_passthru_post_load(void *opaque, int version_id) +{ + U2FPassthruState *key = opaque; + u2f_passthru_reset(key); + return 0; +} + +static const VMStateDescription u2f_passthru_vmstate = { + .name = "u2f-key-passthru", + .version_id = 1, + .minimum_version_id = 1, + .post_load = u2f_passthru_post_load, + .fields = (VMStateField[]) { + VMSTATE_U2F_KEY(base, U2FPassthruState), + VMSTATE_END_OF_LIST() + } +}; + +static Property u2f_passthru_properties[] = { + DEFINE_PROP_STRING("hidraw", U2FPassthruState, hidraw), + DEFINE_PROP_END_OF_LIST(), +}; + +static void u2f_passthru_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + U2FKeyClass *kc = U2F_KEY_CLASS(klass); + + kc->realize = u2f_passthru_realize; + kc->unrealize = u2f_passthru_unrealize; + kc->recv_from_guest = u2f_passthru_recv_from_guest; + dc->desc = "QEMU U2F passthrough key"; + dc->vmsd = &u2f_passthru_vmstate; + device_class_set_props(dc, u2f_passthru_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo u2f_key_passthru_info = { + .name = TYPE_U2F_PASSTHRU, + .parent = TYPE_U2F_KEY, + .instance_size = sizeof(U2FPassthruState), + .class_init = u2f_passthru_class_init +}; + +static void u2f_key_passthru_register_types(void) +{ + type_register_static(&u2f_key_passthru_info); +} + +type_init(u2f_key_passthru_register_types) diff --git a/hw/usb/u2f.c b/hw/usb/u2f.c new file mode 100644 index 000000000..56001249a --- /dev/null +++ b/hw/usb/u2f.c @@ -0,0 +1,351 @@ +/* + * U2F USB device. + * + * Copyright (c) 2020 César Belley + * Written by César Belley + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/usb.h" +#include "hw/usb/hid.h" +#include "migration/vmstate.h" +#include "desc.h" + +#include "u2f.h" + +/* U2F key Vendor / Product */ +#define U2F_KEY_VENDOR_NUM 0x46f4 /* CRC16() of "QEMU" */ +#define U2F_KEY_PRODUCT_NUM 0x0005 + +enum { + STR_MANUFACTURER = 1, + STR_PRODUCT, + STR_SERIALNUMBER, + STR_CONFIG, + STR_INTERFACE +}; + +static const USBDescStrings desc_strings = { + [STR_MANUFACTURER] = "QEMU", + [STR_PRODUCT] = "U2F USB key", + [STR_SERIALNUMBER] = "0", + [STR_CONFIG] = "U2F key config", + [STR_INTERFACE] = "U2F key interface" +}; + +static const USBDescIface desc_iface_u2f_key = { + .bInterfaceNumber = 0, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_HID, + .bInterfaceSubClass = 0x0, + .bInterfaceProtocol = 0x0, + .ndesc = 1, + .descs = (USBDescOther[]) { + { + /* HID descriptor */ + .data = (uint8_t[]) { + 0x09, /* u8 bLength */ + USB_DT_HID, /* u8 bDescriptorType */ + 0x10, 0x01, /* u16 HID_class */ + 0x00, /* u8 country_code */ + 0x01, /* u8 num_descriptors */ + USB_DT_REPORT, /* u8 type: Report */ + 0x22, 0, /* u16 len */ + }, + }, + }, + .eps = (USBDescEndpoint[]) { + { + .bEndpointAddress = USB_DIR_IN | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = U2FHID_PACKET_SIZE, + .bInterval = 0x05, + }, { + .bEndpointAddress = USB_DIR_OUT | 0x01, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = U2FHID_PACKET_SIZE, + .bInterval = 0x05, + }, + }, + +}; + +static const USBDescDevice desc_device_u2f_key = { + .bcdUSB = 0x0100, + .bMaxPacketSize0 = U2FHID_PACKET_SIZE, + .bNumConfigurations = 1, + .confs = (USBDescConfig[]) { + { + .bNumInterfaces = 1, + .bConfigurationValue = 1, + .iConfiguration = STR_CONFIG, + .bmAttributes = USB_CFG_ATT_ONE, + .bMaxPower = 15, + .nif = 1, + .ifs = &desc_iface_u2f_key, + }, + }, +}; + +static const USBDesc desc_u2f_key = { + .id = { + .idVendor = U2F_KEY_VENDOR_NUM, + .idProduct = U2F_KEY_PRODUCT_NUM, + .bcdDevice = 0, + .iManufacturer = STR_MANUFACTURER, + .iProduct = STR_PRODUCT, + .iSerialNumber = STR_SERIALNUMBER, + }, + .full = &desc_device_u2f_key, + .str = desc_strings, +}; + +static const uint8_t u2f_key_hid_report_desc[] = { + 0x06, 0xd0, 0xf1, /* Usage Page (FIDO) */ + 0x09, 0x01, /* Usage (FIDO) */ + 0xa1, 0x01, /* Collection (HID Application) */ + 0x09, 0x20, /* Usage (FIDO data in) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x26, 0xFF, 0x00, /* Logical Maximum (0xff) */ + 0x75, 0x08, /* Report Size (8) */ + 0x95, 0x40, /* Report Count (0x40) */ + 0x81, 0x02, /* Input (Data, Variable, Absolute) */ + 0x09, 0x21, /* Usage (FIDO data out) */ + 0x15, 0x00, /* Logical Minimum (0) */ + 0x26, 0xFF, 0x00, /* Logical Maximum (0xFF) */ + 0x75, 0x08, /* Report Size (8) */ + 0x95, 0x40, /* Report Count (0x40) */ + 0x91, 0x02, /* Output (Data, Variable, Absolute) */ + 0xC0 /* End Collection */ +}; + +static void u2f_key_reset(U2FKeyState *key) +{ + key->pending_in_start = 0; + key->pending_in_end = 0; + key->pending_in_num = 0; +} + +static void u2f_key_handle_reset(USBDevice *dev) +{ + U2FKeyState *key = U2F_KEY(dev); + + u2f_key_reset(key); +} + +static void u2f_key_handle_control(USBDevice *dev, USBPacket *p, + int request, int value, int index, int length, uint8_t *data) +{ + U2FKeyState *key = U2F_KEY(dev); + int ret; + + ret = usb_desc_handle_control(dev, p, request, value, index, length, data); + if (ret >= 0) { + return; + } + + switch (request) { + case InterfaceRequest | USB_REQ_GET_DESCRIPTOR: + switch (value >> 8) { + case 0x22: + memcpy(data, u2f_key_hid_report_desc, + sizeof(u2f_key_hid_report_desc)); + p->actual_length = sizeof(u2f_key_hid_report_desc); + break; + default: + goto fail; + } + break; + case HID_GET_IDLE: + data[0] = key->idle; + p->actual_length = 1; + break; + case HID_SET_IDLE: + key->idle = (uint8_t)(value >> 8); + break; + default: + fail: + p->status = USB_RET_STALL; + break; + } + +} + +static void u2f_key_recv_from_guest(U2FKeyState *key, USBPacket *p) +{ + U2FKeyClass *kc = U2F_KEY_GET_CLASS(key); + uint8_t packet[U2FHID_PACKET_SIZE]; + + if (kc->recv_from_guest == NULL || p->iov.size != U2FHID_PACKET_SIZE) { + return; + } + + usb_packet_copy(p, packet, p->iov.size); + kc->recv_from_guest(key, packet); +} + +static void u2f_pending_in_add(U2FKeyState *key, + const uint8_t packet[U2FHID_PACKET_SIZE]) +{ + uint8_t index; + + if (key->pending_in_num >= U2FHID_PENDING_IN_NUM) { + return; + } + + index = key->pending_in_end; + key->pending_in_end = (index + 1) % U2FHID_PENDING_IN_NUM; + ++key->pending_in_num; + + memcpy(key->pending_in[index], packet, U2FHID_PACKET_SIZE); +} + +static uint8_t *u2f_pending_in_get(U2FKeyState *key) +{ + uint8_t index; + + if (key->pending_in_num == 0) { + return NULL; + } + + index = key->pending_in_start; + key->pending_in_start = (index + 1) % U2FHID_PENDING_IN_NUM; + --key->pending_in_num; + + return key->pending_in[index]; +} + +static void u2f_key_handle_data(USBDevice *dev, USBPacket *p) +{ + U2FKeyState *key = U2F_KEY(dev); + uint8_t *packet_in; + + /* Endpoint number check */ + if (p->ep->nr != 1) { + p->status = USB_RET_STALL; + return; + } + + switch (p->pid) { + case USB_TOKEN_OUT: + u2f_key_recv_from_guest(key, p); + break; + case USB_TOKEN_IN: + packet_in = u2f_pending_in_get(key); + if (packet_in == NULL) { + p->status = USB_RET_NAK; + return; + } + usb_packet_copy(p, packet_in, U2FHID_PACKET_SIZE); + break; + default: + p->status = USB_RET_STALL; + break; + } +} + +void u2f_send_to_guest(U2FKeyState *key, + const uint8_t packet[U2FHID_PACKET_SIZE]) +{ + u2f_pending_in_add(key, packet); + usb_wakeup(key->ep, 0); +} + +static void u2f_key_unrealize(USBDevice *dev) +{ + U2FKeyState *key = U2F_KEY(dev); + U2FKeyClass *kc = U2F_KEY_GET_CLASS(key); + + if (kc->unrealize != NULL) { + kc->unrealize(key); + } +} + +static void u2f_key_realize(USBDevice *dev, Error **errp) +{ + U2FKeyState *key = U2F_KEY(dev); + U2FKeyClass *kc = U2F_KEY_GET_CLASS(key); + Error *local_err = NULL; + + usb_desc_create_serial(dev); + usb_desc_init(dev); + u2f_key_reset(key); + + if (kc->realize != NULL) { + kc->realize(key, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + } + key->ep = usb_ep_get(dev, USB_TOKEN_IN, 1); +} + +const VMStateDescription vmstate_u2f_key = { + .name = "u2f-key", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_USB_DEVICE(dev, U2FKeyState), + VMSTATE_UINT8(idle, U2FKeyState), + VMSTATE_UINT8_2DARRAY(pending_in, U2FKeyState, + U2FHID_PENDING_IN_NUM, U2FHID_PACKET_SIZE), + VMSTATE_UINT8(pending_in_start, U2FKeyState), + VMSTATE_UINT8(pending_in_end, U2FKeyState), + VMSTATE_UINT8(pending_in_num, U2FKeyState), + VMSTATE_END_OF_LIST() + } +}; + +static void u2f_key_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + USBDeviceClass *uc = USB_DEVICE_CLASS(klass); + + uc->product_desc = "QEMU U2F USB key"; + uc->usb_desc = &desc_u2f_key; + uc->handle_reset = u2f_key_handle_reset; + uc->handle_control = u2f_key_handle_control; + uc->handle_data = u2f_key_handle_data; + uc->handle_attach = usb_desc_attach; + uc->realize = u2f_key_realize; + uc->unrealize = u2f_key_unrealize; + dc->desc = "QEMU U2F key"; + dc->vmsd = &vmstate_u2f_key; +} + +static const TypeInfo u2f_key_info = { + .name = TYPE_U2F_KEY, + .parent = TYPE_USB_DEVICE, + .instance_size = sizeof(U2FKeyState), + .abstract = true, + .class_size = sizeof(U2FKeyClass), + .class_init = u2f_key_class_init, +}; + +static void u2f_key_register_types(void) +{ + type_register_static(&u2f_key_info); +} + +type_init(u2f_key_register_types) diff --git a/hw/usb/u2f.h b/hw/usb/u2f.h new file mode 100644 index 000000000..db30f3586 --- /dev/null +++ b/hw/usb/u2f.h @@ -0,0 +1,92 @@ +/* + * U2F USB device. + * + * Copyright (c) 2020 César Belley + * Written by César Belley + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef U2F_H +#define U2F_H + +#include "hw/qdev-core.h" + +#define U2FHID_PACKET_SIZE 64 +#define U2FHID_PENDING_IN_NUM 32 + +typedef struct U2FKeyState U2FKeyState; +typedef struct U2FKeyInfo U2FKeyInfo; + +#define TYPE_U2F_KEY "u2f-key" +#define U2F_KEY(obj) \ + OBJECT_CHECK(U2FKeyState, (obj), TYPE_U2F_KEY) +#define U2F_KEY_CLASS(klass) \ + OBJECT_CLASS_CHECK(U2FKeyClass, (klass), TYPE_U2F_KEY) +#define U2F_KEY_GET_CLASS(obj) \ + OBJECT_GET_CLASS(U2FKeyClass, (obj), TYPE_U2F_KEY) + +/* + * Callbacks to be used by the U2F key base device (i.e. hw/u2f.c) + * to interact with its variants (i.e. hw/u2f-*.c) + */ +typedef struct U2FKeyClass { + /*< private >*/ + USBDeviceClass parent_class; + + /*< public >*/ + void (*recv_from_guest)(U2FKeyState *key, + const uint8_t packet[U2FHID_PACKET_SIZE]); + void (*realize)(U2FKeyState *key, Error **errp); + void (*unrealize)(U2FKeyState *key); +} U2FKeyClass; + +/* + * State of the U2F key base device (i.e. hw/u2f.c) + */ +typedef struct U2FKeyState { + USBDevice dev; + USBEndpoint *ep; + uint8_t idle; + + /* Pending packets to be send to the guest */ + uint8_t pending_in[U2FHID_PENDING_IN_NUM][U2FHID_PACKET_SIZE]; + uint8_t pending_in_start; + uint8_t pending_in_end; + uint8_t pending_in_num; +} U2FKeyState; + +/* + * API to be used by the U2F key device variants (i.e. hw/u2f-*.c) + * to interact with the the U2F key base device (i.e. hw/u2f.c) + */ +void u2f_send_to_guest(U2FKeyState *key, + const uint8_t packet[U2FHID_PACKET_SIZE]); + +extern const VMStateDescription vmstate_u2f_key; + +#define VMSTATE_U2F_KEY(_field, _state) { \ + .name = (stringify(_field)), \ + .size = sizeof(U2FKeyState), \ + .vmsd = &vmstate_u2f_key, \ + .flags = VMS_STRUCT, \ + .offset = vmstate_offset_value(_state, _field, U2FKeyState), \ +} + +#endif /* U2F_H */ diff --git a/hw/usb/vt82c686-uhci-pci.c b/hw/usb/vt82c686-uhci-pci.c new file mode 100644 index 000000000..0bf2b72ff --- /dev/null +++ b/hw/usb/vt82c686-uhci-pci.c @@ -0,0 +1,58 @@ +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/isa/vt82c686.h" +#include "hcd-uhci.h" + +static void uhci_isa_set_irq(void *opaque, int irq_num, int level) +{ + UHCIState *s = opaque; + uint8_t irq = pci_get_byte(s->dev.config + PCI_INTERRUPT_LINE); + if (irq > 0 && irq < 15) { + via_isa_set_irq(pci_get_function_0(&s->dev), irq, level); + } +} + +static void usb_uhci_vt82c686b_realize(PCIDevice *dev, Error **errp) +{ + UHCIState *s = UHCI(dev); + uint8_t *pci_conf = s->dev.config; + + /* USB misc control 1/2 */ + pci_set_long(pci_conf + 0x40, 0x00001000); + /* PM capability */ + pci_set_long(pci_conf + 0x80, 0x00020001); + /* USB legacy support */ + pci_set_long(pci_conf + 0xc0, 0x00002000); + + usb_uhci_common_realize(dev, errp); + object_unref(s->irq); + s->irq = qemu_allocate_irq(uhci_isa_set_irq, s, 0); +} + +static UHCIInfo uhci_info[] = { + { + .name = "vt82c686b-usb-uhci", + .vendor_id = PCI_VENDOR_ID_VIA, + .device_id = PCI_DEVICE_ID_VIA_UHCI, + .revision = 0x01, + .irq_pin = 3, + .realize = usb_uhci_vt82c686b_realize, + .unplug = true, + /* Reason: only works as USB function of VT82xx superio chips */ + .notuser = true, + } +}; + +static const TypeInfo vt82c686b_usb_uhci_type_info = { + .parent = TYPE_UHCI, + .name = "vt82c686b-usb-uhci", + .class_init = uhci_data_class_init, + .class_data = uhci_info, +}; + +static void vt82c686b_usb_uhci_register_types(void) +{ + type_register_static(&vt82c686b_usb_uhci_type_info); +} + +type_init(vt82c686b_usb_uhci_register_types) diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c new file mode 100644 index 000000000..0f7369e7e --- /dev/null +++ b/hw/usb/xen-usb.c @@ -0,0 +1,1097 @@ +/* + * xen paravirt usb device backend + * + * (c) Juergen Gross + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; under version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include +#include + +#include "qemu/config-file.h" +#include "qemu/main-loop.h" +#include "qemu/option.h" +#include "hw/usb.h" +#include "hw/xen/xen-legacy-backend.h" +#include "monitor/qdev.h" +#include "qapi/error.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qstring.h" + +#include "hw/xen/interface/io/usbif.h" + +/* + * Check for required support of usbif.h: USBIF_SHORT_NOT_OK was the last + * macro added we rely on. + */ +#ifdef USBIF_SHORT_NOT_OK + +#define TR(xendev, lvl, fmt, args...) \ + { \ + struct timeval tv; \ + \ + gettimeofday(&tv, NULL); \ + xen_pv_printf(xendev, lvl, "%8ld.%06ld xen-usb(%s):" fmt, \ + tv.tv_sec, tv.tv_usec, __func__, ##args); \ + } +#define TR_BUS(xendev, fmt, args...) TR(xendev, 2, fmt, ##args) +#define TR_REQ(xendev, fmt, args...) TR(xendev, 3, fmt, ##args) + +#define USBBACK_MAXPORTS USBIF_PIPE_PORT_MASK +#define USB_DEV_ADDR_SIZE (USBIF_PIPE_DEV_MASK + 1) + +/* USB wire protocol: structure describing control request parameter. */ +struct usbif_ctrlrequest { + uint8_t bRequestType; + uint8_t bRequest; + uint16_t wValue; + uint16_t wIndex; + uint16_t wLength; +}; + +struct usbback_info; +struct usbback_req; + +struct usbback_stub { + USBDevice *dev; + USBPort port; + unsigned int speed; + bool attached; + QTAILQ_HEAD(, usbback_req) submit_q; +}; + +struct usbback_req { + struct usbback_info *usbif; + struct usbback_stub *stub; + struct usbif_urb_request req; + USBPacket packet; + + unsigned int nr_buffer_segs; /* # of transfer_buffer segments */ + unsigned int nr_extra_segs; /* # of iso_frame_desc segments */ + + QTAILQ_ENTRY(usbback_req) q; + + void *buffer; + void *isoc_buffer; + struct libusb_transfer *xfer; + + bool cancelled; +}; + +struct usbback_hotplug { + QSIMPLEQ_ENTRY(usbback_hotplug) q; + unsigned port; +}; + +struct usbback_info { + struct XenLegacyDevice xendev; /* must be first */ + USBBus bus; + void *urb_sring; + void *conn_sring; + struct usbif_urb_back_ring urb_ring; + struct usbif_conn_back_ring conn_ring; + int num_ports; + int usb_ver; + bool ring_error; + QTAILQ_HEAD(, usbback_req) req_free_q; + QSIMPLEQ_HEAD(, usbback_hotplug) hotplug_q; + struct usbback_stub ports[USBBACK_MAXPORTS]; + struct usbback_stub *addr_table[USB_DEV_ADDR_SIZE]; + QEMUBH *bh; +}; + +static struct usbback_req *usbback_get_req(struct usbback_info *usbif) +{ + struct usbback_req *usbback_req; + + if (QTAILQ_EMPTY(&usbif->req_free_q)) { + usbback_req = g_new0(struct usbback_req, 1); + } else { + usbback_req = QTAILQ_FIRST(&usbif->req_free_q); + QTAILQ_REMOVE(&usbif->req_free_q, usbback_req, q); + } + return usbback_req; +} + +static void usbback_put_req(struct usbback_req *usbback_req) +{ + struct usbback_info *usbif; + + usbif = usbback_req->usbif; + memset(usbback_req, 0, sizeof(*usbback_req)); + QTAILQ_INSERT_HEAD(&usbif->req_free_q, usbback_req, q); +} + +static int usbback_gnttab_map(struct usbback_req *usbback_req) +{ + unsigned int nr_segs, i, prot; + uint32_t ref[USBIF_MAX_SEGMENTS_PER_REQUEST]; + struct usbback_info *usbif = usbback_req->usbif; + struct XenLegacyDevice *xendev = &usbif->xendev; + struct usbif_request_segment *seg; + void *addr; + + nr_segs = usbback_req->nr_buffer_segs + usbback_req->nr_extra_segs; + if (!nr_segs) { + return 0; + } + + if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) { + xen_pv_printf(xendev, 0, "bad number of segments in request (%d)\n", + nr_segs); + return -EINVAL; + } + + for (i = 0; i < nr_segs; i++) { + if ((unsigned)usbback_req->req.seg[i].offset + + (unsigned)usbback_req->req.seg[i].length > XC_PAGE_SIZE) { + xen_pv_printf(xendev, 0, "segment crosses page boundary\n"); + return -EINVAL; + } + } + + if (usbback_req->nr_buffer_segs) { + prot = PROT_READ; + if (usbif_pipein(usbback_req->req.pipe)) { + prot |= PROT_WRITE; + } + for (i = 0; i < usbback_req->nr_buffer_segs; i++) { + ref[i] = usbback_req->req.seg[i].gref; + } + usbback_req->buffer = + xen_be_map_grant_refs(xendev, ref, usbback_req->nr_buffer_segs, + prot); + + if (!usbback_req->buffer) { + return -ENOMEM; + } + + for (i = 0; i < usbback_req->nr_buffer_segs; i++) { + seg = usbback_req->req.seg + i; + addr = usbback_req->buffer + i * XC_PAGE_SIZE + seg->offset; + qemu_iovec_add(&usbback_req->packet.iov, addr, seg->length); + } + } + + if (!usbif_pipeisoc(usbback_req->req.pipe)) { + return 0; + } + + /* + * Right now isoc requests are not supported. + * Prepare supporting those by doing the work needed on the guest + * interface side. + */ + + if (!usbback_req->nr_extra_segs) { + xen_pv_printf(xendev, 0, "iso request without descriptor segments\n"); + return -EINVAL; + } + + prot = PROT_READ | PROT_WRITE; + for (i = 0; i < usbback_req->nr_extra_segs; i++) { + ref[i] = usbback_req->req.seg[i + usbback_req->req.nr_buffer_segs].gref; + } + usbback_req->isoc_buffer = + xen_be_map_grant_refs(xendev, ref, usbback_req->nr_extra_segs, + prot); + + if (!usbback_req->isoc_buffer) { + return -ENOMEM; + } + + return 0; +} + +static int usbback_init_packet(struct usbback_req *usbback_req) +{ + struct XenLegacyDevice *xendev = &usbback_req->usbif->xendev; + USBPacket *packet = &usbback_req->packet; + USBDevice *dev = usbback_req->stub->dev; + USBEndpoint *ep; + unsigned int pid, ep_nr; + bool sok; + int ret = 0; + + qemu_iovec_init(&packet->iov, USBIF_MAX_SEGMENTS_PER_REQUEST); + pid = usbif_pipein(usbback_req->req.pipe) ? USB_TOKEN_IN : USB_TOKEN_OUT; + ep_nr = usbif_pipeendpoint(usbback_req->req.pipe); + sok = !!(usbback_req->req.transfer_flags & USBIF_SHORT_NOT_OK); + if (usbif_pipectrl(usbback_req->req.pipe)) { + ep_nr = 0; + sok = false; + } + ep = usb_ep_get(dev, pid, ep_nr); + usb_packet_setup(packet, pid, ep, 0, 1, sok, true); + + switch (usbif_pipetype(usbback_req->req.pipe)) { + case USBIF_PIPE_TYPE_ISOC: + TR_REQ(xendev, "iso transfer %s: buflen: %x, %d frames\n", + (pid == USB_TOKEN_IN) ? "in" : "out", + usbback_req->req.buffer_length, + usbback_req->req.u.isoc.nr_frame_desc_segs); + ret = -EINVAL; /* isoc not implemented yet */ + break; + + case USBIF_PIPE_TYPE_INT: + TR_REQ(xendev, "int transfer %s: buflen: %x\n", + (pid == USB_TOKEN_IN) ? "in" : "out", + usbback_req->req.buffer_length); + break; + + case USBIF_PIPE_TYPE_CTRL: + packet->parameter = *(uint64_t *)usbback_req->req.u.ctrl; + TR_REQ(xendev, "ctrl parameter: %"PRIx64", buflen: %x\n", + packet->parameter, + usbback_req->req.buffer_length); + break; + + case USBIF_PIPE_TYPE_BULK: + TR_REQ(xendev, "bulk transfer %s: buflen: %x\n", + (pid == USB_TOKEN_IN) ? "in" : "out", + usbback_req->req.buffer_length); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void usbback_do_response(struct usbback_req *usbback_req, int32_t status, + int32_t actual_length, int32_t error_count) +{ + struct usbback_info *usbif; + struct usbif_urb_response *res; + struct XenLegacyDevice *xendev; + unsigned int notify; + + usbif = usbback_req->usbif; + xendev = &usbif->xendev; + + TR_REQ(xendev, "id %d, status %d, length %d, errcnt %d\n", + usbback_req->req.id, status, actual_length, error_count); + + if (usbback_req->packet.iov.iov) { + qemu_iovec_destroy(&usbback_req->packet.iov); + } + + if (usbback_req->buffer) { + xen_be_unmap_grant_refs(xendev, usbback_req->buffer, + usbback_req->nr_buffer_segs); + usbback_req->buffer = NULL; + } + + if (usbback_req->isoc_buffer) { + xen_be_unmap_grant_refs(xendev, usbback_req->isoc_buffer, + usbback_req->nr_extra_segs); + usbback_req->isoc_buffer = NULL; + } + + if (usbif->urb_sring) { + res = RING_GET_RESPONSE(&usbif->urb_ring, usbif->urb_ring.rsp_prod_pvt); + res->id = usbback_req->req.id; + res->status = status; + res->actual_length = actual_length; + res->error_count = error_count; + res->start_frame = 0; + usbif->urb_ring.rsp_prod_pvt++; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&usbif->urb_ring, notify); + + if (notify) { + xen_pv_send_notify(xendev); + } + } + + if (!usbback_req->cancelled) + usbback_put_req(usbback_req); +} + +static void usbback_do_response_ret(struct usbback_req *usbback_req, + int32_t status) +{ + usbback_do_response(usbback_req, status, 0, 0); +} + +static int32_t usbback_xlat_status(int status) +{ + switch (status) { + case USB_RET_SUCCESS: + return 0; + case USB_RET_NODEV: + return -ENODEV; + case USB_RET_STALL: + return -EPIPE; + case USB_RET_BABBLE: + return -EOVERFLOW; + case USB_RET_IOERROR: + return -EPROTO; + } + + return -ESHUTDOWN; +} + +static void usbback_packet_complete(struct usbback_req *usbback_req) +{ + USBPacket *packet = &usbback_req->packet; + int32_t status; + + QTAILQ_REMOVE(&usbback_req->stub->submit_q, usbback_req, q); + + status = usbback_xlat_status(packet->status); + usbback_do_response(usbback_req, status, packet->actual_length, 0); +} + +static void usbback_set_address(struct usbback_info *usbif, + struct usbback_stub *stub, + unsigned int cur_addr, unsigned int new_addr) +{ + if (cur_addr) { + usbif->addr_table[cur_addr] = NULL; + } + if (new_addr) { + usbif->addr_table[new_addr] = stub; + } +} + +static void usbback_cancel_req(struct usbback_req *usbback_req) +{ + if (usb_packet_is_inflight(&usbback_req->packet)) { + usb_cancel_packet(&usbback_req->packet); + QTAILQ_REMOVE(&usbback_req->stub->submit_q, usbback_req, q); + usbback_req->cancelled = true; + usbback_do_response_ret(usbback_req, -EPROTO); + } +} + +static void usbback_process_unlink_req(struct usbback_req *usbback_req) +{ + struct usbback_info *usbif; + struct usbback_req *unlink_req; + unsigned int id, devnum; + int ret; + + usbif = usbback_req->usbif; + ret = 0; + id = usbback_req->req.u.unlink.unlink_id; + TR_REQ(&usbif->xendev, "unlink id %d\n", id); + devnum = usbif_pipedevice(usbback_req->req.pipe); + if (unlikely(devnum == 0)) { + usbback_req->stub = usbif->ports + + usbif_pipeportnum(usbback_req->req.pipe) - 1; + if (unlikely(!usbback_req->stub)) { + ret = -ENODEV; + goto fail_response; + } + } else { + if (unlikely(!usbif->addr_table[devnum])) { + ret = -ENODEV; + goto fail_response; + } + usbback_req->stub = usbif->addr_table[devnum]; + } + + QTAILQ_FOREACH(unlink_req, &usbback_req->stub->submit_q, q) { + if (unlink_req->req.id == id) { + usbback_cancel_req(unlink_req); + break; + } + } + +fail_response: + usbback_do_response_ret(usbback_req, ret); +} + +/* + * Checks whether a request can be handled at once or should be forwarded + * to the usb framework. + * Return value is: + * 0 in case of usb framework is needed + * 1 in case of local handling (no error) + * The request response has been queued already if return value not 0. + */ +static int usbback_check_and_submit(struct usbback_req *usbback_req) +{ + struct usbback_info *usbif; + unsigned int devnum; + struct usbback_stub *stub; + struct usbif_ctrlrequest *ctrl; + int ret; + uint16_t wValue; + + usbif = usbback_req->usbif; + stub = NULL; + devnum = usbif_pipedevice(usbback_req->req.pipe); + ctrl = (struct usbif_ctrlrequest *)usbback_req->req.u.ctrl; + wValue = le16_to_cpu(ctrl->wValue); + + /* + * When the device is first connected or resetted, USB device has no + * address. In this initial state, following requests are sent to device + * address (#0), + * + * 1. GET_DESCRIPTOR (with Descriptor Type is "DEVICE") is sent, + * and OS knows what device is connected to. + * + * 2. SET_ADDRESS is sent, and then device has its address. + * + * In the next step, SET_CONFIGURATION is sent to addressed device, and + * then the device is finally ready to use. + */ + if (unlikely(devnum == 0)) { + stub = usbif->ports + usbif_pipeportnum(usbback_req->req.pipe) - 1; + if (!stub->dev || !stub->attached) { + ret = -ENODEV; + goto do_response; + } + + switch (ctrl->bRequest) { + case USB_REQ_GET_DESCRIPTOR: + /* + * GET_DESCRIPTOR request to device #0. + * through normal transfer. + */ + TR_REQ(&usbif->xendev, "devnum 0 GET_DESCRIPTOR\n"); + usbback_req->stub = stub; + return 0; + case USB_REQ_SET_ADDRESS: + /* + * SET_ADDRESS request to device #0. + * add attached device to addr_table. + */ + TR_REQ(&usbif->xendev, "devnum 0 SET_ADDRESS\n"); + usbback_set_address(usbif, stub, 0, wValue); + ret = 0; + break; + default: + ret = -EINVAL; + break; + } + goto do_response; + } + + if (unlikely(!usbif->addr_table[devnum])) { + ret = -ENODEV; + goto do_response; + } + usbback_req->stub = usbif->addr_table[devnum]; + + /* + * Check special request + */ + if (ctrl->bRequest != USB_REQ_SET_ADDRESS) { + return 0; + } + + /* + * SET_ADDRESS request to addressed device. + * change addr or remove from addr_table. + */ + usbback_set_address(usbif, usbback_req->stub, devnum, wValue); + ret = 0; + +do_response: + usbback_do_response_ret(usbback_req, ret); + return 1; +} + +static void usbback_dispatch(struct usbback_req *usbback_req) +{ + int ret; + unsigned int devnum; + struct usbback_info *usbif; + + usbif = usbback_req->usbif; + + TR_REQ(&usbif->xendev, "start req_id %d pipe %08x\n", usbback_req->req.id, + usbback_req->req.pipe); + + /* unlink request */ + if (unlikely(usbif_pipeunlink(usbback_req->req.pipe))) { + usbback_process_unlink_req(usbback_req); + return; + } + + if (usbif_pipectrl(usbback_req->req.pipe)) { + if (usbback_check_and_submit(usbback_req)) { + return; + } + } else { + devnum = usbif_pipedevice(usbback_req->req.pipe); + usbback_req->stub = usbif->addr_table[devnum]; + + if (!usbback_req->stub || !usbback_req->stub->attached) { + ret = -ENODEV; + goto fail_response; + } + } + + QTAILQ_INSERT_TAIL(&usbback_req->stub->submit_q, usbback_req, q); + + usbback_req->nr_buffer_segs = usbback_req->req.nr_buffer_segs; + usbback_req->nr_extra_segs = usbif_pipeisoc(usbback_req->req.pipe) ? + usbback_req->req.u.isoc.nr_frame_desc_segs : 0; + + ret = usbback_init_packet(usbback_req); + if (ret) { + xen_pv_printf(&usbif->xendev, 0, "invalid request\n"); + ret = -ESHUTDOWN; + goto fail_free_urb; + } + + ret = usbback_gnttab_map(usbback_req); + if (ret) { + xen_pv_printf(&usbif->xendev, 0, "invalid buffer, ret=%d\n", ret); + ret = -ESHUTDOWN; + goto fail_free_urb; + } + + usb_handle_packet(usbback_req->stub->dev, &usbback_req->packet); + if (usbback_req->packet.status != USB_RET_ASYNC) { + usbback_packet_complete(usbback_req); + } + return; + +fail_free_urb: + QTAILQ_REMOVE(&usbback_req->stub->submit_q, usbback_req, q); + +fail_response: + usbback_do_response_ret(usbback_req, ret); +} + +static void usbback_hotplug_notify(struct usbback_info *usbif) +{ + struct usbif_conn_back_ring *ring = &usbif->conn_ring; + struct usbif_conn_request req; + struct usbif_conn_response *res; + struct usbback_hotplug *usb_hp; + unsigned int notify; + + if (!usbif->conn_sring) { + return; + } + + /* Check for full ring. */ + if ((RING_SIZE(ring) - ring->rsp_prod_pvt - ring->req_cons) == 0) { + xen_pv_send_notify(&usbif->xendev); + return; + } + + usb_hp = QSIMPLEQ_FIRST(&usbif->hotplug_q); + QSIMPLEQ_REMOVE_HEAD(&usbif->hotplug_q, q); + + RING_COPY_REQUEST(ring, ring->req_cons, &req); + ring->req_cons++; + ring->sring->req_event = ring->req_cons + 1; + + res = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); + res->id = req.id; + res->portnum = usb_hp->port; + res->speed = usbif->ports[usb_hp->port - 1].speed; + ring->rsp_prod_pvt++; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, notify); + + if (notify) { + xen_pv_send_notify(&usbif->xendev); + } + + TR_BUS(&usbif->xendev, "hotplug port %d speed %d\n", usb_hp->port, + res->speed); + + g_free(usb_hp); + + if (!QSIMPLEQ_EMPTY(&usbif->hotplug_q)) { + qemu_bh_schedule(usbif->bh); + } +} + +static void usbback_bh(void *opaque) +{ + struct usbback_info *usbif; + struct usbif_urb_back_ring *urb_ring; + struct usbback_req *usbback_req; + RING_IDX rc, rp; + unsigned int more_to_do; + + usbif = opaque; + if (usbif->ring_error) { + return; + } + + if (!QSIMPLEQ_EMPTY(&usbif->hotplug_q)) { + usbback_hotplug_notify(usbif); + } + + urb_ring = &usbif->urb_ring; + rc = urb_ring->req_cons; + rp = urb_ring->sring->req_prod; + xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ + + if (RING_REQUEST_PROD_OVERFLOW(urb_ring, rp)) { + rc = urb_ring->rsp_prod_pvt; + xen_pv_printf(&usbif->xendev, 0, "domU provided bogus ring requests " + "(%#x - %#x = %u). Halting ring processing.\n", + rp, rc, rp - rc); + usbif->ring_error = true; + return; + } + + while (rc != rp) { + if (RING_REQUEST_CONS_OVERFLOW(urb_ring, rc)) { + break; + } + usbback_req = usbback_get_req(usbif); + + RING_COPY_REQUEST(urb_ring, rc, &usbback_req->req); + usbback_req->usbif = usbif; + + usbback_dispatch(usbback_req); + + urb_ring->req_cons = ++rc; + } + + RING_FINAL_CHECK_FOR_REQUESTS(urb_ring, more_to_do); + if (more_to_do) { + qemu_bh_schedule(usbif->bh); + } +} + +static void usbback_hotplug_enq(struct usbback_info *usbif, unsigned port) +{ + struct usbback_hotplug *usb_hp; + + usb_hp = g_new0(struct usbback_hotplug, 1); + usb_hp->port = port; + QSIMPLEQ_INSERT_TAIL(&usbif->hotplug_q, usb_hp, q); + usbback_hotplug_notify(usbif); +} + +static void usbback_portid_drain(struct usbback_info *usbif, unsigned port) +{ + struct usbback_req *req, *tmp; + bool sched = false; + + QTAILQ_FOREACH_SAFE(req, &usbif->ports[port - 1].submit_q, q, tmp) { + usbback_cancel_req(req); + sched = true; + } + + if (sched) { + qemu_bh_schedule(usbif->bh); + } +} + +static void usbback_portid_detach(struct usbback_info *usbif, unsigned port) +{ + if (!usbif->ports[port - 1].attached) { + return; + } + + usbif->ports[port - 1].speed = USBIF_SPEED_NONE; + usbif->ports[port - 1].attached = false; + usbback_portid_drain(usbif, port); + usbback_hotplug_enq(usbif, port); +} + +static void usbback_portid_remove(struct usbback_info *usbif, unsigned port) +{ + if (!usbif->ports[port - 1].dev) { + return; + } + + object_unparent(OBJECT(usbif->ports[port - 1].dev)); + usbif->ports[port - 1].dev = NULL; + usbback_portid_detach(usbif, port); + + TR_BUS(&usbif->xendev, "port %d removed\n", port); +} + +static void usbback_portid_add(struct usbback_info *usbif, unsigned port, + char *busid) +{ + unsigned speed; + char *portname; + Error *local_err = NULL; + QDict *qdict; + QemuOpts *opts; + char *tmp; + + if (usbif->ports[port - 1].dev) { + return; + } + + portname = strchr(busid, '-'); + if (!portname) { + xen_pv_printf(&usbif->xendev, 0, "device %s illegal specification\n", + busid); + return; + } + portname++; + + qdict = qdict_new(); + qdict_put_str(qdict, "driver", "usb-host"); + tmp = g_strdup_printf("%s.0", usbif->xendev.qdev.id); + qdict_put_str(qdict, "bus", tmp); + g_free(tmp); + tmp = g_strdup_printf("%s-%u", usbif->xendev.qdev.id, port); + qdict_put_str(qdict, "id", tmp); + g_free(tmp); + qdict_put_int(qdict, "port", port); + qdict_put_int(qdict, "hostbus", atoi(busid)); + qdict_put_str(qdict, "hostport", portname); + opts = qemu_opts_from_qdict(qemu_find_opts("device"), qdict, + &error_abort); + usbif->ports[port - 1].dev = USB_DEVICE(qdev_device_add(opts, &local_err)); + if (!usbif->ports[port - 1].dev) { + qobject_unref(qdict); + xen_pv_printf(&usbif->xendev, 0, + "device %s could not be opened: %s\n", + busid, error_get_pretty(local_err)); + error_free(local_err); + return; + } + qobject_unref(qdict); + speed = usbif->ports[port - 1].dev->speed; + switch (speed) { + case USB_SPEED_LOW: + speed = USBIF_SPEED_LOW; + break; + case USB_SPEED_FULL: + speed = USBIF_SPEED_FULL; + break; + case USB_SPEED_HIGH: + speed = (usbif->usb_ver < USB_VER_USB20) ? + USBIF_SPEED_NONE : USBIF_SPEED_HIGH; + break; + default: + speed = USBIF_SPEED_NONE; + break; + } + if (speed == USBIF_SPEED_NONE) { + xen_pv_printf(&usbif->xendev, 0, "device %s wrong speed\n", busid); + object_unparent(OBJECT(usbif->ports[port - 1].dev)); + usbif->ports[port - 1].dev = NULL; + return; + } + usb_device_reset(usbif->ports[port - 1].dev); + usbif->ports[port - 1].speed = speed; + usbif->ports[port - 1].attached = true; + QTAILQ_INIT(&usbif->ports[port - 1].submit_q); + usbback_hotplug_enq(usbif, port); + + TR_BUS(&usbif->xendev, "port %d attached\n", port); +} + +static void usbback_process_port(struct usbback_info *usbif, unsigned port) +{ + char node[8]; + char *busid; + + snprintf(node, sizeof(node), "port/%d", port); + busid = xenstore_read_be_str(&usbif->xendev, node); + if (busid == NULL) { + xen_pv_printf(&usbif->xendev, 0, "xenstore_read %s failed\n", node); + return; + } + + /* Remove portid, if the port is not connected. */ + if (strlen(busid) == 0) { + usbback_portid_remove(usbif, port); + } else { + usbback_portid_add(usbif, port, busid); + } + + g_free(busid); +} + +static void usbback_disconnect(struct XenLegacyDevice *xendev) +{ + struct usbback_info *usbif; + unsigned int i; + + TR_BUS(xendev, "start\n"); + + usbif = container_of(xendev, struct usbback_info, xendev); + + xen_pv_unbind_evtchn(xendev); + + if (usbif->urb_sring) { + xen_be_unmap_grant_ref(xendev, usbif->urb_sring); + usbif->urb_sring = NULL; + } + if (usbif->conn_sring) { + xen_be_unmap_grant_ref(xendev, usbif->conn_sring); + usbif->conn_sring = NULL; + } + + for (i = 0; i < usbif->num_ports; i++) { + if (usbif->ports[i].dev) { + usbback_portid_drain(usbif, i + 1); + } + } + + TR_BUS(xendev, "finished\n"); +} + +static int usbback_connect(struct XenLegacyDevice *xendev) +{ + struct usbback_info *usbif; + struct usbif_urb_sring *urb_sring; + struct usbif_conn_sring *conn_sring; + int urb_ring_ref; + int conn_ring_ref; + unsigned int i, max_grants; + + TR_BUS(xendev, "start\n"); + + /* max_grants: for each request and for the rings (request and connect). */ + max_grants = USBIF_MAX_SEGMENTS_PER_REQUEST * USB_URB_RING_SIZE + 2; + xen_be_set_max_grant_refs(xendev, max_grants); + + usbif = container_of(xendev, struct usbback_info, xendev); + + if (xenstore_read_fe_int(xendev, "urb-ring-ref", &urb_ring_ref)) { + xen_pv_printf(xendev, 0, "error reading urb-ring-ref\n"); + return -1; + } + if (xenstore_read_fe_int(xendev, "conn-ring-ref", &conn_ring_ref)) { + xen_pv_printf(xendev, 0, "error reading conn-ring-ref\n"); + return -1; + } + if (xenstore_read_fe_int(xendev, "event-channel", &xendev->remote_port)) { + xen_pv_printf(xendev, 0, "error reading event-channel\n"); + return -1; + } + + usbif->urb_sring = xen_be_map_grant_ref(xendev, urb_ring_ref, + PROT_READ | PROT_WRITE); + usbif->conn_sring = xen_be_map_grant_ref(xendev, conn_ring_ref, + PROT_READ | PROT_WRITE); + if (!usbif->urb_sring || !usbif->conn_sring) { + xen_pv_printf(xendev, 0, "error mapping rings\n"); + usbback_disconnect(xendev); + return -1; + } + + urb_sring = usbif->urb_sring; + conn_sring = usbif->conn_sring; + BACK_RING_INIT(&usbif->urb_ring, urb_sring, XC_PAGE_SIZE); + BACK_RING_INIT(&usbif->conn_ring, conn_sring, XC_PAGE_SIZE); + + xen_be_bind_evtchn(xendev); + + xen_pv_printf(xendev, 1, "urb-ring-ref %d, conn-ring-ref %d, " + "remote port %d, local port %d\n", urb_ring_ref, + conn_ring_ref, xendev->remote_port, xendev->local_port); + + for (i = 1; i <= usbif->num_ports; i++) { + if (usbif->ports[i - 1].dev) { + usbback_hotplug_enq(usbif, i); + } + } + + return 0; +} + +static void usbback_backend_changed(struct XenLegacyDevice *xendev, + const char *node) +{ + struct usbback_info *usbif; + unsigned int i; + + TR_BUS(xendev, "path %s\n", node); + + usbif = container_of(xendev, struct usbback_info, xendev); + for (i = 1; i <= usbif->num_ports; i++) { + usbback_process_port(usbif, i); + } +} + +static int usbback_init(struct XenLegacyDevice *xendev) +{ + struct usbback_info *usbif; + + TR_BUS(xendev, "start\n"); + + usbif = container_of(xendev, struct usbback_info, xendev); + + if (xenstore_read_be_int(xendev, "num-ports", &usbif->num_ports) || + usbif->num_ports < 1 || usbif->num_ports > USBBACK_MAXPORTS) { + xen_pv_printf(xendev, 0, "num-ports not readable or out of bounds\n"); + return -1; + } + if (xenstore_read_be_int(xendev, "usb-ver", &usbif->usb_ver) || + (usbif->usb_ver != USB_VER_USB11 && usbif->usb_ver != USB_VER_USB20)) { + xen_pv_printf(xendev, 0, "usb-ver not readable or out of bounds\n"); + return -1; + } + + usbback_backend_changed(xendev, "port"); + + TR_BUS(xendev, "finished\n"); + + return 0; +} + +static void xen_bus_attach(USBPort *port) +{ + struct usbback_info *usbif; + + usbif = port->opaque; + TR_BUS(&usbif->xendev, "\n"); + usbif->ports[port->index].attached = true; + usbback_hotplug_enq(usbif, port->index + 1); +} + +static void xen_bus_detach(USBPort *port) +{ + struct usbback_info *usbif; + + usbif = port->opaque; + TR_BUS(&usbif->xendev, "\n"); + usbback_portid_detach(usbif, port->index + 1); +} + +static void xen_bus_child_detach(USBPort *port, USBDevice *child) +{ + struct usbback_info *usbif; + + usbif = port->opaque; + TR_BUS(&usbif->xendev, "\n"); +} + +static void xen_bus_complete(USBPort *port, USBPacket *packet) +{ + struct usbback_req *usbback_req; + struct usbback_info *usbif; + + usbback_req = container_of(packet, struct usbback_req, packet); + if (usbback_req->cancelled) { + g_free(usbback_req); + return; + } + + usbif = usbback_req->usbif; + TR_REQ(&usbif->xendev, "\n"); + usbback_packet_complete(usbback_req); +} + +static USBPortOps xen_usb_port_ops = { + .attach = xen_bus_attach, + .detach = xen_bus_detach, + .child_detach = xen_bus_child_detach, + .complete = xen_bus_complete, +}; + +static USBBusOps xen_usb_bus_ops = { +}; + +static void usbback_alloc(struct XenLegacyDevice *xendev) +{ + struct usbback_info *usbif; + USBPort *p; + unsigned int i; + + usbif = container_of(xendev, struct usbback_info, xendev); + + usb_bus_new(&usbif->bus, sizeof(usbif->bus), &xen_usb_bus_ops, + DEVICE(&xendev->qdev)); + for (i = 0; i < USBBACK_MAXPORTS; i++) { + p = &(usbif->ports[i].port); + usb_register_port(&usbif->bus, p, usbif, i, &xen_usb_port_ops, + USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL | + USB_SPEED_MASK_HIGH); + } + + QTAILQ_INIT(&usbif->req_free_q); + QSIMPLEQ_INIT(&usbif->hotplug_q); + usbif->bh = qemu_bh_new(usbback_bh, usbif); +} + +static int usbback_free(struct XenLegacyDevice *xendev) +{ + struct usbback_info *usbif; + struct usbback_req *usbback_req; + struct usbback_hotplug *usb_hp; + unsigned int i; + + TR_BUS(xendev, "start\n"); + + usbback_disconnect(xendev); + usbif = container_of(xendev, struct usbback_info, xendev); + for (i = 1; i <= usbif->num_ports; i++) { + usbback_portid_remove(usbif, i); + } + + while (!QTAILQ_EMPTY(&usbif->req_free_q)) { + usbback_req = QTAILQ_FIRST(&usbif->req_free_q); + QTAILQ_REMOVE(&usbif->req_free_q, usbback_req, q); + g_free(usbback_req); + } + while (!QSIMPLEQ_EMPTY(&usbif->hotplug_q)) { + usb_hp = QSIMPLEQ_FIRST(&usbif->hotplug_q); + QSIMPLEQ_REMOVE_HEAD(&usbif->hotplug_q, q); + g_free(usb_hp); + } + + qemu_bh_delete(usbif->bh); + + for (i = 0; i < USBBACK_MAXPORTS; i++) { + usb_unregister_port(&usbif->bus, &(usbif->ports[i].port)); + } + + usb_bus_release(&usbif->bus); + + TR_BUS(xendev, "finished\n"); + + return 0; +} + +static void usbback_event(struct XenLegacyDevice *xendev) +{ + struct usbback_info *usbif; + + usbif = container_of(xendev, struct usbback_info, xendev); + qemu_bh_schedule(usbif->bh); +} + +struct XenDevOps xen_usb_ops = { + .size = sizeof(struct usbback_info), + .flags = DEVOPS_FLAG_NEED_GNTDEV, + .init = usbback_init, + .alloc = usbback_alloc, + .free = usbback_free, + .backend_changed = usbback_backend_changed, + .initialise = usbback_connect, + .disconnect = usbback_disconnect, + .event = usbback_event, +}; + +#else /* USBIF_SHORT_NOT_OK */ + +static int usbback_not_supported(void) +{ + return -EINVAL; +} + +struct XenDevOps xen_usb_ops = { + .backend_register = usbback_not_supported, +}; + +#endif diff --git a/hw/usb/xlnx-usb-subsystem.c b/hw/usb/xlnx-usb-subsystem.c new file mode 100644 index 000000000..d8deeb6ce --- /dev/null +++ b/hw/usb/xlnx-usb-subsystem.c @@ -0,0 +1,92 @@ +/* + * QEMU model of the Xilinx usb subsystem + * + * Copyright (c) 2020 Xilinx Inc. Sai Pavan Boddu + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "qemu/bitops.h" +#include "qom/object.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/usb/xlnx-usb-subsystem.h" + +static void versal_usb2_realize(DeviceState *dev, Error **errp) +{ + VersalUsb2 *s = VERSAL_USB2(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + Error *err = NULL; + + sysbus_realize(SYS_BUS_DEVICE(&s->dwc3), &err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_realize(SYS_BUS_DEVICE(&s->usb2Ctrl), &err); + if (err) { + error_propagate(errp, err); + return; + } + sysbus_init_mmio(sbd, &s->dwc3_mr); + sysbus_init_mmio(sbd, &s->usb2Ctrl_mr); + qdev_pass_gpios(DEVICE(&s->dwc3.sysbus_xhci), dev, SYSBUS_DEVICE_GPIO_IRQ); +} + +static void versal_usb2_init(Object *obj) +{ + VersalUsb2 *s = VERSAL_USB2(obj); + + object_initialize_child(obj, "versal.dwc3", &s->dwc3, + TYPE_USB_DWC3); + object_initialize_child(obj, "versal.usb2-ctrl", &s->usb2Ctrl, + TYPE_XILINX_VERSAL_USB2_CTRL_REGS); + memory_region_init_alias(&s->dwc3_mr, obj, "versal.dwc3_alias", + &s->dwc3.iomem, 0, DWC3_SIZE); + memory_region_init_alias(&s->usb2Ctrl_mr, obj, "versal.usb2Ctrl_alias", + &s->usb2Ctrl.iomem, 0, USB2_REGS_R_MAX * 4); + qdev_alias_all_properties(DEVICE(&s->dwc3), obj); + qdev_alias_all_properties(DEVICE(&s->dwc3.sysbus_xhci), obj); + object_property_add_alias(obj, "dma", OBJECT(&s->dwc3.sysbus_xhci), "dma"); +} + +static void versal_usb2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = versal_usb2_realize; +} + +static const TypeInfo versal_usb2_info = { + .name = TYPE_XILINX_VERSAL_USB2, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VersalUsb2), + .class_init = versal_usb2_class_init, + .instance_init = versal_usb2_init, +}; + +static void versal_usb_types(void) +{ + type_register_static(&versal_usb2_info); +} + +type_init(versal_usb_types) diff --git a/hw/usb/xlnx-versal-usb2-ctrl-regs.c b/hw/usb/xlnx-versal-usb2-ctrl-regs.c new file mode 100644 index 000000000..1c094aa1a --- /dev/null +++ b/hw/usb/xlnx-versal-usb2-ctrl-regs.c @@ -0,0 +1,228 @@ +/* + * QEMU model of the VersalUsb2CtrlRegs Register control/Status block for + * USB2.0 controller + * + * This module should control phy_reset, permanent device plugs, frame length + * time adjust & setting of coherency paths. None of which are emulated in + * present model. + * + * Copyright (c) 2020 Xilinx Inc. Vikram Garhwal + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/register.h" +#include "qemu/bitops.h" +#include "qom/object.h" +#include "migration/vmstate.h" +#include "hw/usb/xlnx-versal-usb2-ctrl-regs.h" + +#ifndef XILINX_VERSAL_USB2_CTRL_REGS_ERR_DEBUG +#define XILINX_VERSAL_USB2_CTRL_REGS_ERR_DEBUG 0 +#endif + +REG32(BUS_FILTER, 0x30) + FIELD(BUS_FILTER, BYPASS, 0, 4) +REG32(PORT, 0x34) + FIELD(PORT, HOST_SMI_BAR_WR, 4, 1) + FIELD(PORT, HOST_SMI_PCI_CMD_REG_WR, 3, 1) + FIELD(PORT, HOST_MSI_ENABLE, 2, 1) + FIELD(PORT, PWR_CTRL_PRSNT, 1, 1) + FIELD(PORT, HUB_PERM_ATTACH, 0, 1) +REG32(JITTER_ADJUST, 0x38) + FIELD(JITTER_ADJUST, FLADJ, 0, 6) +REG32(BIGENDIAN, 0x40) + FIELD(BIGENDIAN, ENDIAN_GS, 0, 1) +REG32(COHERENCY, 0x44) + FIELD(COHERENCY, USB_COHERENCY, 0, 1) +REG32(XHC_BME, 0x48) + FIELD(XHC_BME, XHC_BME, 0, 1) +REG32(REG_CTRL, 0x60) + FIELD(REG_CTRL, SLVERR_ENABLE, 0, 1) +REG32(IR_STATUS, 0x64) + FIELD(IR_STATUS, HOST_SYS_ERR, 1, 1) + FIELD(IR_STATUS, ADDR_DEC_ERR, 0, 1) +REG32(IR_MASK, 0x68) + FIELD(IR_MASK, HOST_SYS_ERR, 1, 1) + FIELD(IR_MASK, ADDR_DEC_ERR, 0, 1) +REG32(IR_ENABLE, 0x6c) + FIELD(IR_ENABLE, HOST_SYS_ERR, 1, 1) + FIELD(IR_ENABLE, ADDR_DEC_ERR, 0, 1) +REG32(IR_DISABLE, 0x70) + FIELD(IR_DISABLE, HOST_SYS_ERR, 1, 1) + FIELD(IR_DISABLE, ADDR_DEC_ERR, 0, 1) +REG32(USB3, 0x78) + +static void ir_update_irq(VersalUsb2CtrlRegs *s) +{ + bool pending = s->regs[R_IR_STATUS] & ~s->regs[R_IR_MASK]; + qemu_set_irq(s->irq_ir, pending); +} + +static void ir_status_postw(RegisterInfo *reg, uint64_t val64) +{ + VersalUsb2CtrlRegs *s = XILINX_VERSAL_USB2_CTRL_REGS(reg->opaque); + /* + * TODO: This should also clear USBSTS.HSE field in USB XHCI register. + * May be combine both the modules. + */ + ir_update_irq(s); +} + +static uint64_t ir_enable_prew(RegisterInfo *reg, uint64_t val64) +{ + VersalUsb2CtrlRegs *s = XILINX_VERSAL_USB2_CTRL_REGS(reg->opaque); + uint32_t val = val64; + + s->regs[R_IR_MASK] &= ~val; + ir_update_irq(s); + return 0; +} + +static uint64_t ir_disable_prew(RegisterInfo *reg, uint64_t val64) +{ + VersalUsb2CtrlRegs *s = XILINX_VERSAL_USB2_CTRL_REGS(reg->opaque); + uint32_t val = val64; + + s->regs[R_IR_MASK] |= val; + ir_update_irq(s); + return 0; +} + +static const RegisterAccessInfo usb2_ctrl_regs_regs_info[] = { + { .name = "BUS_FILTER", .addr = A_BUS_FILTER, + .rsvd = 0xfffffff0, + },{ .name = "PORT", .addr = A_PORT, + .rsvd = 0xffffffe0, + },{ .name = "JITTER_ADJUST", .addr = A_JITTER_ADJUST, + .reset = 0x20, + .rsvd = 0xffffffc0, + },{ .name = "BIGENDIAN", .addr = A_BIGENDIAN, + .rsvd = 0xfffffffe, + },{ .name = "COHERENCY", .addr = A_COHERENCY, + .rsvd = 0xfffffffe, + },{ .name = "XHC_BME", .addr = A_XHC_BME, + .reset = 0x1, + .rsvd = 0xfffffffe, + },{ .name = "REG_CTRL", .addr = A_REG_CTRL, + .rsvd = 0xfffffffe, + },{ .name = "IR_STATUS", .addr = A_IR_STATUS, + .rsvd = 0xfffffffc, + .w1c = 0x3, + .post_write = ir_status_postw, + },{ .name = "IR_MASK", .addr = A_IR_MASK, + .reset = 0x3, + .rsvd = 0xfffffffc, + .ro = 0x3, + },{ .name = "IR_ENABLE", .addr = A_IR_ENABLE, + .rsvd = 0xfffffffc, + .pre_write = ir_enable_prew, + },{ .name = "IR_DISABLE", .addr = A_IR_DISABLE, + .rsvd = 0xfffffffc, + .pre_write = ir_disable_prew, + },{ .name = "USB3", .addr = A_USB3, + } +}; + +static void usb2_ctrl_regs_reset_init(Object *obj, ResetType type) +{ + VersalUsb2CtrlRegs *s = XILINX_VERSAL_USB2_CTRL_REGS(obj); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + register_reset(&s->regs_info[i]); + } +} + +static void usb2_ctrl_regs_reset_hold(Object *obj) +{ + VersalUsb2CtrlRegs *s = XILINX_VERSAL_USB2_CTRL_REGS(obj); + + ir_update_irq(s); +} + +static const MemoryRegionOps usb2_ctrl_regs_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void usb2_ctrl_regs_init(Object *obj) +{ + VersalUsb2CtrlRegs *s = XILINX_VERSAL_USB2_CTRL_REGS(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + memory_region_init(&s->iomem, obj, TYPE_XILINX_VERSAL_USB2_CTRL_REGS, + USB2_REGS_R_MAX * 4); + reg_array = + register_init_block32(DEVICE(obj), usb2_ctrl_regs_regs_info, + ARRAY_SIZE(usb2_ctrl_regs_regs_info), + s->regs_info, s->regs, + &usb2_ctrl_regs_ops, + XILINX_VERSAL_USB2_CTRL_REGS_ERR_DEBUG, + USB2_REGS_R_MAX * 4); + memory_region_add_subregion(&s->iomem, + 0x0, + ®_array->mem); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->irq_ir); +} + +static const VMStateDescription vmstate_usb2_ctrl_regs = { + .name = TYPE_XILINX_VERSAL_USB2_CTRL_REGS, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, VersalUsb2CtrlRegs, USB2_REGS_R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static void usb2_ctrl_regs_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + rc->phases.enter = usb2_ctrl_regs_reset_init; + rc->phases.hold = usb2_ctrl_regs_reset_hold; + dc->vmsd = &vmstate_usb2_ctrl_regs; +} + +static const TypeInfo usb2_ctrl_regs_info = { + .name = TYPE_XILINX_VERSAL_USB2_CTRL_REGS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VersalUsb2CtrlRegs), + .class_init = usb2_ctrl_regs_class_init, + .instance_init = usb2_ctrl_regs_init, +}; + +static void usb2_ctrl_regs_register_types(void) +{ + type_register_static(&usb2_ctrl_regs_info); +} + +type_init(usb2_ctrl_regs_register_types) diff --git a/hw/vfio/Kconfig b/hw/vfio/Kconfig new file mode 100644 index 000000000..7cdba0560 --- /dev/null +++ b/hw/vfio/Kconfig @@ -0,0 +1,43 @@ +config VFIO + bool + depends on LINUX + +config VFIO_PCI + bool + default y + select VFIO + select EDID + depends on LINUX && PCI + +config VFIO_CCW + bool + default y + select VFIO + depends on LINUX && S390_CCW_VIRTIO + +config VFIO_PLATFORM + bool + default y + select VFIO + depends on LINUX && PLATFORM_BUS + +config VFIO_XGMAC + bool + default y + depends on VFIO_PLATFORM + +config VFIO_AMD_XGBE + bool + default y + depends on VFIO_PLATFORM + +config VFIO_AP + bool + default y + select VFIO + depends on LINUX && S390_CCW_VIRTIO + +config VFIO_IGD + bool + default y if PC_PCI + depends on VFIO_PCI diff --git a/hw/vfio/amd-xgbe.c b/hw/vfio/amd-xgbe.c new file mode 100644 index 000000000..96bd608b8 --- /dev/null +++ b/hw/vfio/amd-xgbe.c @@ -0,0 +1,61 @@ +/* + * AMD XGBE VFIO device + * + * Copyright Linaro Limited, 2015 + * + * Authors: + * Eric Auger + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/vfio/vfio-amd-xgbe.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +static void amd_xgbe_realize(DeviceState *dev, Error **errp) +{ + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); + VFIOAmdXgbeDeviceClass *k = VFIO_AMD_XGBE_DEVICE_GET_CLASS(dev); + + vdev->compat = g_strdup("amd,xgbe-seattle-v1a"); + vdev->num_compat = 1; + + k->parent_realize(dev, errp); +} + +static const VMStateDescription vfio_platform_amd_xgbe_vmstate = { + .name = "vfio-amd-xgbe", + .unmigratable = 1, +}; + +static void vfio_amd_xgbe_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VFIOAmdXgbeDeviceClass *vcxc = + VFIO_AMD_XGBE_DEVICE_CLASS(klass); + device_class_set_parent_realize(dc, amd_xgbe_realize, + &vcxc->parent_realize); + dc->desc = "VFIO AMD XGBE"; + dc->vmsd = &vfio_platform_amd_xgbe_vmstate; + /* Supported by TYPE_VIRT_MACHINE */ + dc->user_creatable = true; +} + +static const TypeInfo vfio_amd_xgbe_dev_info = { + .name = TYPE_VFIO_AMD_XGBE, + .parent = TYPE_VFIO_PLATFORM, + .instance_size = sizeof(VFIOAmdXgbeDevice), + .class_init = vfio_amd_xgbe_class_init, + .class_size = sizeof(VFIOAmdXgbeDeviceClass), +}; + +static void register_amd_xgbe_dev_type(void) +{ + type_register_static(&vfio_amd_xgbe_dev_info); +} + +type_init(register_amd_xgbe_dev_type) diff --git a/hw/vfio/ap.c b/hw/vfio/ap.c new file mode 100644 index 000000000..e0dd561e8 --- /dev/null +++ b/hw/vfio/ap.c @@ -0,0 +1,186 @@ +/* + * VFIO based AP matrix device assignment + * + * Copyright 2018 IBM Corp. + * Author(s): Tony Krowiak + * Halil Pasic + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include +#include +#include "qapi/error.h" +#include "hw/vfio/vfio.h" +#include "hw/vfio/vfio-common.h" +#include "hw/s390x/ap-device.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "kvm/kvm_s390x.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/s390x/ap-bridge.h" +#include "exec/address-spaces.h" +#include "qom/object.h" + +#define TYPE_VFIO_AP_DEVICE "vfio-ap" + +struct VFIOAPDevice { + APDevice apdev; + VFIODevice vdev; +}; + +OBJECT_DECLARE_SIMPLE_TYPE(VFIOAPDevice, VFIO_AP_DEVICE) + +static void vfio_ap_compute_needs_reset(VFIODevice *vdev) +{ + vdev->needs_reset = false; +} + +/* + * We don't need vfio_hot_reset_multi and vfio_eoi operations for + * vfio-ap device now. + */ +struct VFIODeviceOps vfio_ap_ops = { + .vfio_compute_needs_reset = vfio_ap_compute_needs_reset, +}; + +static void vfio_ap_put_device(VFIOAPDevice *vapdev) +{ + g_free(vapdev->vdev.name); + vfio_put_base_device(&vapdev->vdev); +} + +static VFIOGroup *vfio_ap_get_group(VFIOAPDevice *vapdev, Error **errp) +{ + GError *gerror = NULL; + char *symlink, *group_path; + int groupid; + + symlink = g_strdup_printf("%s/iommu_group", vapdev->vdev.sysfsdev); + group_path = g_file_read_link(symlink, &gerror); + g_free(symlink); + + if (!group_path) { + error_setg(errp, "%s: no iommu_group found for %s: %s", + TYPE_VFIO_AP_DEVICE, vapdev->vdev.sysfsdev, gerror->message); + g_error_free(gerror); + return NULL; + } + + if (sscanf(basename(group_path), "%d", &groupid) != 1) { + error_setg(errp, "vfio: failed to read %s", group_path); + g_free(group_path); + return NULL; + } + + g_free(group_path); + + return vfio_get_group(groupid, &address_space_memory, errp); +} + +static void vfio_ap_realize(DeviceState *dev, Error **errp) +{ + int ret; + char *mdevid; + VFIOGroup *vfio_group; + APDevice *apdev = AP_DEVICE(dev); + VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev); + + vfio_group = vfio_ap_get_group(vapdev, errp); + if (!vfio_group) { + return; + } + + vapdev->vdev.ops = &vfio_ap_ops; + vapdev->vdev.type = VFIO_DEVICE_TYPE_AP; + mdevid = basename(vapdev->vdev.sysfsdev); + vapdev->vdev.name = g_strdup_printf("%s", mdevid); + vapdev->vdev.dev = dev; + + /* + * vfio-ap devices operate in a way compatible with discarding of + * memory in RAM blocks, as no pages are pinned in the host. + * This needs to be set before vfio_get_device() for vfio common to + * handle ram_block_discard_disable(). + */ + vapdev->vdev.ram_block_discard_allowed = true; + + ret = vfio_get_device(vfio_group, mdevid, &vapdev->vdev, errp); + if (ret) { + goto out_get_dev_err; + } + + return; + +out_get_dev_err: + vfio_ap_put_device(vapdev); + vfio_put_group(vfio_group); +} + +static void vfio_ap_unrealize(DeviceState *dev) +{ + APDevice *apdev = AP_DEVICE(dev); + VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev); + VFIOGroup *group = vapdev->vdev.group; + + vfio_ap_put_device(vapdev); + vfio_put_group(group); +} + +static Property vfio_ap_properties[] = { + DEFINE_PROP_STRING("sysfsdev", VFIOAPDevice, vdev.sysfsdev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vfio_ap_reset(DeviceState *dev) +{ + int ret; + APDevice *apdev = AP_DEVICE(dev); + VFIOAPDevice *vapdev = VFIO_AP_DEVICE(apdev); + + ret = ioctl(vapdev->vdev.fd, VFIO_DEVICE_RESET); + if (ret) { + error_report("%s: failed to reset %s device: %s", __func__, + vapdev->vdev.name, strerror(errno)); + } +} + +static const VMStateDescription vfio_ap_vmstate = { + .name = "vfio-ap", + .unmigratable = 1, +}; + +static void vfio_ap_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, vfio_ap_properties); + dc->vmsd = &vfio_ap_vmstate; + dc->desc = "VFIO-based AP device assignment"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->realize = vfio_ap_realize; + dc->unrealize = vfio_ap_unrealize; + dc->hotpluggable = true; + dc->reset = vfio_ap_reset; + dc->bus_type = TYPE_AP_BUS; +} + +static const TypeInfo vfio_ap_info = { + .name = TYPE_VFIO_AP_DEVICE, + .parent = TYPE_AP_DEVICE, + .instance_size = sizeof(VFIOAPDevice), + .class_init = vfio_ap_class_init, +}; + +static void vfio_ap_type_init(void) +{ + type_register_static(&vfio_ap_info); +} + +type_init(vfio_ap_type_init) diff --git a/hw/vfio/calxeda-xgmac.c b/hw/vfio/calxeda-xgmac.c new file mode 100644 index 000000000..87c382e73 --- /dev/null +++ b/hw/vfio/calxeda-xgmac.c @@ -0,0 +1,61 @@ +/* + * calxeda xgmac VFIO device + * + * Copyright Linaro Limited, 2014 + * + * Authors: + * Eric Auger + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/vfio/vfio-calxeda-xgmac.h" +#include "migration/vmstate.h" +#include "qemu/module.h" + +static void calxeda_xgmac_realize(DeviceState *dev, Error **errp) +{ + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); + VFIOCalxedaXgmacDeviceClass *k = VFIO_CALXEDA_XGMAC_DEVICE_GET_CLASS(dev); + + vdev->compat = g_strdup("calxeda,hb-xgmac"); + vdev->num_compat = 1; + + k->parent_realize(dev, errp); +} + +static const VMStateDescription vfio_platform_calxeda_xgmac_vmstate = { + .name = "vfio-calxeda-xgmac", + .unmigratable = 1, +}; + +static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VFIOCalxedaXgmacDeviceClass *vcxc = + VFIO_CALXEDA_XGMAC_DEVICE_CLASS(klass); + device_class_set_parent_realize(dc, calxeda_xgmac_realize, + &vcxc->parent_realize); + dc->desc = "VFIO Calxeda XGMAC"; + dc->vmsd = &vfio_platform_calxeda_xgmac_vmstate; + /* Supported by TYPE_VIRT_MACHINE */ + dc->user_creatable = true; +} + +static const TypeInfo vfio_calxeda_xgmac_dev_info = { + .name = TYPE_VFIO_CALXEDA_XGMAC, + .parent = TYPE_VFIO_PLATFORM, + .instance_size = sizeof(VFIOCalxedaXgmacDevice), + .class_init = vfio_calxeda_xgmac_class_init, + .class_size = sizeof(VFIOCalxedaXgmacDeviceClass), +}; + +static void register_calxeda_xgmac_dev_type(void) +{ + type_register_static(&vfio_calxeda_xgmac_dev_info); +} + +type_init(register_calxeda_xgmac_dev_type) diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c new file mode 100644 index 000000000..035473766 --- /dev/null +++ b/hw/vfio/ccw.c @@ -0,0 +1,791 @@ +/* + * vfio based subchannel assignment support + * + * Copyright 2017 IBM Corp. + * Copyright 2019 Red Hat, Inc. + * + * Author(s): Dong Jia Shi + * Xiao Feng Ren + * Pierre Morel + * Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include +#include +#include + +#include "qapi/error.h" +#include "hw/vfio/vfio.h" +#include "hw/vfio/vfio-common.h" +#include "hw/s390x/s390-ccw.h" +#include "hw/s390x/vfio-ccw.h" +#include "hw/qdev-properties.h" +#include "hw/s390x/ccw-device.h" +#include "exec/address-spaces.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" + +struct VFIOCCWDevice { + S390CCWDevice cdev; + VFIODevice vdev; + uint64_t io_region_size; + uint64_t io_region_offset; + struct ccw_io_region *io_region; + uint64_t async_cmd_region_size; + uint64_t async_cmd_region_offset; + struct ccw_cmd_region *async_cmd_region; + uint64_t schib_region_size; + uint64_t schib_region_offset; + struct ccw_schib_region *schib_region; + uint64_t crw_region_size; + uint64_t crw_region_offset; + struct ccw_crw_region *crw_region; + EventNotifier io_notifier; + EventNotifier crw_notifier; + EventNotifier req_notifier; + bool force_orb_pfch; + bool warned_orb_pfch; +}; + +static inline void warn_once_pfch(VFIOCCWDevice *vcdev, SubchDev *sch, + const char *msg) +{ + warn_report_once_cond(&vcdev->warned_orb_pfch, + "vfio-ccw (devno %x.%x.%04x): %s", + sch->cssid, sch->ssid, sch->devno, msg); +} + +static void vfio_ccw_compute_needs_reset(VFIODevice *vdev) +{ + vdev->needs_reset = false; +} + +/* + * We don't need vfio_hot_reset_multi and vfio_eoi operations for + * vfio_ccw device now. + */ +struct VFIODeviceOps vfio_ccw_ops = { + .vfio_compute_needs_reset = vfio_ccw_compute_needs_reset, +}; + +static IOInstEnding vfio_ccw_handle_request(SubchDev *sch) +{ + S390CCWDevice *cdev = sch->driver_data; + VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); + struct ccw_io_region *region = vcdev->io_region; + int ret; + + if (!(sch->orb.ctrl0 & ORB_CTRL0_MASK_PFCH) && vcdev->force_orb_pfch) { + sch->orb.ctrl0 |= ORB_CTRL0_MASK_PFCH; + warn_once_pfch(vcdev, sch, "PFCH flag forced"); + } + + QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB)); + QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW)); + QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB)); + + memset(region, 0, sizeof(*region)); + + memcpy(region->orb_area, &sch->orb, sizeof(ORB)); + memcpy(region->scsw_area, &sch->curr_status.scsw, sizeof(SCSW)); + +again: + ret = pwrite(vcdev->vdev.fd, region, + vcdev->io_region_size, vcdev->io_region_offset); + if (ret != vcdev->io_region_size) { + if (errno == EAGAIN) { + goto again; + } + error_report("vfio-ccw: write I/O region failed with errno=%d", errno); + ret = errno ? -errno : -EFAULT; + } else { + ret = 0; + } + switch (ret) { + case 0: + return IOINST_CC_EXPECTED; + case -EBUSY: + return IOINST_CC_BUSY; + case -ENODEV: + case -EACCES: + return IOINST_CC_NOT_OPERATIONAL; + case -EFAULT: + default: + sch_gen_unit_exception(sch); + css_inject_io_interrupt(sch); + return IOINST_CC_EXPECTED; + } +} + +static IOInstEnding vfio_ccw_handle_store(SubchDev *sch) +{ + S390CCWDevice *cdev = sch->driver_data; + VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); + SCHIB *schib = &sch->curr_status; + struct ccw_schib_region *region = vcdev->schib_region; + SCHIB *s; + int ret; + + /* schib region not available so nothing else to do */ + if (!region) { + return IOINST_CC_EXPECTED; + } + + memset(region, 0, sizeof(*region)); + ret = pread(vcdev->vdev.fd, region, vcdev->schib_region_size, + vcdev->schib_region_offset); + + if (ret == -1) { + /* + * Device is probably damaged, but store subchannel does not + * have a nonzero cc defined for this scenario. Log an error, + * and presume things are otherwise fine. + */ + error_report("vfio-ccw: store region read failed with errno=%d", errno); + return IOINST_CC_EXPECTED; + } + + /* + * Selectively copy path-related bits of the SCHIB, + * rather than copying the entire struct. + */ + s = (SCHIB *)region->schib_area; + schib->pmcw.pnom = s->pmcw.pnom; + schib->pmcw.lpum = s->pmcw.lpum; + schib->pmcw.pam = s->pmcw.pam; + schib->pmcw.pom = s->pmcw.pom; + + if (s->scsw.flags & SCSW_FLAGS_MASK_PNO) { + schib->scsw.flags |= SCSW_FLAGS_MASK_PNO; + } + + return IOINST_CC_EXPECTED; +} + +static int vfio_ccw_handle_clear(SubchDev *sch) +{ + S390CCWDevice *cdev = sch->driver_data; + VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); + struct ccw_cmd_region *region = vcdev->async_cmd_region; + int ret; + + if (!vcdev->async_cmd_region) { + /* Async command region not available, fall back to emulation */ + return -ENOSYS; + } + + memset(region, 0, sizeof(*region)); + region->command = VFIO_CCW_ASYNC_CMD_CSCH; + +again: + ret = pwrite(vcdev->vdev.fd, region, + vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset); + if (ret != vcdev->async_cmd_region_size) { + if (errno == EAGAIN) { + goto again; + } + error_report("vfio-ccw: write cmd region failed with errno=%d", errno); + ret = errno ? -errno : -EFAULT; + } else { + ret = 0; + } + switch (ret) { + case 0: + case -ENODEV: + case -EACCES: + return ret; + case -EFAULT: + default: + sch_gen_unit_exception(sch); + css_inject_io_interrupt(sch); + return 0; + } +} + +static int vfio_ccw_handle_halt(SubchDev *sch) +{ + S390CCWDevice *cdev = sch->driver_data; + VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); + struct ccw_cmd_region *region = vcdev->async_cmd_region; + int ret; + + if (!vcdev->async_cmd_region) { + /* Async command region not available, fall back to emulation */ + return -ENOSYS; + } + + memset(region, 0, sizeof(*region)); + region->command = VFIO_CCW_ASYNC_CMD_HSCH; + +again: + ret = pwrite(vcdev->vdev.fd, region, + vcdev->async_cmd_region_size, vcdev->async_cmd_region_offset); + if (ret != vcdev->async_cmd_region_size) { + if (errno == EAGAIN) { + goto again; + } + error_report("vfio-ccw: write cmd region failed with errno=%d", errno); + ret = errno ? -errno : -EFAULT; + } else { + ret = 0; + } + switch (ret) { + case 0: + case -EBUSY: + case -ENODEV: + case -EACCES: + return ret; + case -EFAULT: + default: + sch_gen_unit_exception(sch); + css_inject_io_interrupt(sch); + return 0; + } +} + +static void vfio_ccw_reset(DeviceState *dev) +{ + CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev); + S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev); + VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); + + ioctl(vcdev->vdev.fd, VFIO_DEVICE_RESET); +} + +static void vfio_ccw_crw_read(VFIOCCWDevice *vcdev) +{ + struct ccw_crw_region *region = vcdev->crw_region; + CRW crw; + int size; + + /* Keep reading CRWs as long as data is returned */ + do { + memset(region, 0, sizeof(*region)); + size = pread(vcdev->vdev.fd, region, vcdev->crw_region_size, + vcdev->crw_region_offset); + + if (size == -1) { + error_report("vfio-ccw: Read crw region failed with errno=%d", + errno); + break; + } + + if (region->crw == 0) { + /* No more CRWs to queue */ + break; + } + + memcpy(&crw, ®ion->crw, sizeof(CRW)); + + css_crw_add_to_queue(crw); + } while (1); +} + +static void vfio_ccw_req_notifier_handler(void *opaque) +{ + VFIOCCWDevice *vcdev = opaque; + Error *err = NULL; + + if (!event_notifier_test_and_clear(&vcdev->req_notifier)) { + return; + } + + qdev_unplug(DEVICE(vcdev), &err); + if (err) { + warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name); + } +} + +static void vfio_ccw_crw_notifier_handler(void *opaque) +{ + VFIOCCWDevice *vcdev = opaque; + + while (event_notifier_test_and_clear(&vcdev->crw_notifier)) { + vfio_ccw_crw_read(vcdev); + } +} + +static void vfio_ccw_io_notifier_handler(void *opaque) +{ + VFIOCCWDevice *vcdev = opaque; + struct ccw_io_region *region = vcdev->io_region; + S390CCWDevice *cdev = S390_CCW_DEVICE(vcdev); + CcwDevice *ccw_dev = CCW_DEVICE(cdev); + SubchDev *sch = ccw_dev->sch; + SCHIB *schib = &sch->curr_status; + SCSW s; + IRB irb; + ESW esw; + int size; + + if (!event_notifier_test_and_clear(&vcdev->io_notifier)) { + return; + } + + size = pread(vcdev->vdev.fd, region, vcdev->io_region_size, + vcdev->io_region_offset); + if (size == -1) { + switch (errno) { + case ENODEV: + /* Generate a deferred cc 3 condition. */ + schib->scsw.flags |= SCSW_FLAGS_MASK_CC; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND); + goto read_err; + case EFAULT: + /* Memory problem, generate channel data check. */ + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.cstat = SCSW_CSTAT_DATA_CHECK; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; + goto read_err; + default: + /* Error, generate channel program check. */ + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; + goto read_err; + } + } else if (size != vcdev->io_region_size) { + /* Information transfer error, generate channel-control check. */ + schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; + schib->scsw.cstat = SCSW_CSTAT_CHN_CTRL_CHK; + schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; + schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | + SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; + goto read_err; + } + + memcpy(&irb, region->irb_area, sizeof(IRB)); + + /* Update control block via irb. */ + s = schib->scsw; + copy_scsw_to_guest(&s, &irb.scsw); + schib->scsw = s; + + copy_esw_to_guest(&esw, &irb.esw); + sch->esw = esw; + + /* If a uint check is pending, copy sense data. */ + if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) && + (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) { + memcpy(sch->sense_data, irb.ecw, sizeof(irb.ecw)); + } + +read_err: + css_inject_io_interrupt(sch); +} + +static void vfio_ccw_register_irq_notifier(VFIOCCWDevice *vcdev, + unsigned int irq, + Error **errp) +{ + VFIODevice *vdev = &vcdev->vdev; + struct vfio_irq_info *irq_info; + size_t argsz; + int fd; + EventNotifier *notifier; + IOHandler *fd_read; + + switch (irq) { + case VFIO_CCW_IO_IRQ_INDEX: + notifier = &vcdev->io_notifier; + fd_read = vfio_ccw_io_notifier_handler; + break; + case VFIO_CCW_CRW_IRQ_INDEX: + notifier = &vcdev->crw_notifier; + fd_read = vfio_ccw_crw_notifier_handler; + break; + case VFIO_CCW_REQ_IRQ_INDEX: + notifier = &vcdev->req_notifier; + fd_read = vfio_ccw_req_notifier_handler; + break; + default: + error_setg(errp, "vfio: Unsupported device irq(%d)", irq); + return; + } + + if (vdev->num_irqs < irq + 1) { + error_setg(errp, "vfio: IRQ %u not available (number of irqs %u)", + irq, vdev->num_irqs); + return; + } + + argsz = sizeof(*irq_info); + irq_info = g_malloc0(argsz); + irq_info->index = irq; + irq_info->argsz = argsz; + if (ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, + irq_info) < 0 || irq_info->count < 1) { + error_setg_errno(errp, errno, "vfio: Error getting irq info"); + goto out_free_info; + } + + if (event_notifier_init(notifier, 0)) { + error_setg_errno(errp, errno, + "vfio: Unable to init event notifier for irq (%d)", + irq); + goto out_free_info; + } + + fd = event_notifier_get_fd(notifier); + qemu_set_fd_handler(fd, fd_read, NULL, vcdev); + + if (vfio_set_irq_signaling(vdev, irq, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) { + qemu_set_fd_handler(fd, NULL, NULL, vcdev); + event_notifier_cleanup(notifier); + } + +out_free_info: + g_free(irq_info); +} + +static void vfio_ccw_unregister_irq_notifier(VFIOCCWDevice *vcdev, + unsigned int irq) +{ + Error *err = NULL; + EventNotifier *notifier; + + switch (irq) { + case VFIO_CCW_IO_IRQ_INDEX: + notifier = &vcdev->io_notifier; + break; + case VFIO_CCW_CRW_IRQ_INDEX: + notifier = &vcdev->crw_notifier; + break; + case VFIO_CCW_REQ_IRQ_INDEX: + notifier = &vcdev->req_notifier; + break; + default: + error_report("vfio: Unsupported device irq(%d)", irq); + return; + } + + if (vfio_set_irq_signaling(&vcdev->vdev, irq, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { + warn_reportf_err(err, VFIO_MSG_PREFIX, vcdev->vdev.name); + } + + qemu_set_fd_handler(event_notifier_get_fd(notifier), + NULL, NULL, vcdev); + event_notifier_cleanup(notifier); +} + +static void vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) +{ + VFIODevice *vdev = &vcdev->vdev; + struct vfio_region_info *info; + int ret; + + /* Sanity check device */ + if (!(vdev->flags & VFIO_DEVICE_FLAGS_CCW)) { + error_setg(errp, "vfio: Um, this isn't a vfio-ccw device"); + return; + } + + /* + * We always expect at least the I/O region to be present. We also + * may have a variable number of regions governed by capabilities. + */ + if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) { + error_setg(errp, "vfio: too few regions (%u), expected at least %u", + vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1); + return; + } + + ret = vfio_get_region_info(vdev, VFIO_CCW_CONFIG_REGION_INDEX, &info); + if (ret) { + error_setg_errno(errp, -ret, "vfio: Error getting config info"); + return; + } + + vcdev->io_region_size = info->size; + if (sizeof(*vcdev->io_region) != vcdev->io_region_size) { + error_setg(errp, "vfio: Unexpected size of the I/O region"); + goto out_err; + } + + vcdev->io_region_offset = info->offset; + vcdev->io_region = g_malloc0(info->size); + g_free(info); + + /* check for the optional async command region */ + ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW, + VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, &info); + if (!ret) { + vcdev->async_cmd_region_size = info->size; + if (sizeof(*vcdev->async_cmd_region) != vcdev->async_cmd_region_size) { + error_setg(errp, "vfio: Unexpected size of the async cmd region"); + goto out_err; + } + vcdev->async_cmd_region_offset = info->offset; + vcdev->async_cmd_region = g_malloc0(info->size); + g_free(info); + } + + ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW, + VFIO_REGION_SUBTYPE_CCW_SCHIB, &info); + if (!ret) { + vcdev->schib_region_size = info->size; + if (sizeof(*vcdev->schib_region) != vcdev->schib_region_size) { + error_setg(errp, "vfio: Unexpected size of the schib region"); + goto out_err; + } + vcdev->schib_region_offset = info->offset; + vcdev->schib_region = g_malloc(info->size); + g_free(info); + } + + ret = vfio_get_dev_region_info(vdev, VFIO_REGION_TYPE_CCW, + VFIO_REGION_SUBTYPE_CCW_CRW, &info); + + if (!ret) { + vcdev->crw_region_size = info->size; + if (sizeof(*vcdev->crw_region) != vcdev->crw_region_size) { + error_setg(errp, "vfio: Unexpected size of the CRW region"); + goto out_err; + } + vcdev->crw_region_offset = info->offset; + vcdev->crw_region = g_malloc(info->size); + g_free(info); + } + + return; + +out_err: + g_free(vcdev->crw_region); + g_free(vcdev->schib_region); + g_free(vcdev->async_cmd_region); + g_free(vcdev->io_region); + g_free(info); + return; +} + +static void vfio_ccw_put_region(VFIOCCWDevice *vcdev) +{ + g_free(vcdev->crw_region); + g_free(vcdev->schib_region); + g_free(vcdev->async_cmd_region); + g_free(vcdev->io_region); +} + +static void vfio_ccw_put_device(VFIOCCWDevice *vcdev) +{ + g_free(vcdev->vdev.name); + vfio_put_base_device(&vcdev->vdev); +} + +static void vfio_ccw_get_device(VFIOGroup *group, VFIOCCWDevice *vcdev, + Error **errp) +{ + char *name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid, + vcdev->cdev.hostid.ssid, + vcdev->cdev.hostid.devid); + VFIODevice *vbasedev; + + QLIST_FOREACH(vbasedev, &group->device_list, next) { + if (strcmp(vbasedev->name, name) == 0) { + error_setg(errp, "vfio: subchannel %s has already been attached", + name); + goto out_err; + } + } + + /* + * All vfio-ccw devices are believed to operate in a way compatible with + * discarding of memory in RAM blocks, ie. pages pinned in the host are + * in the current working set of the guest driver and therefore never + * overlap e.g., with pages available to the guest balloon driver. This + * needs to be set before vfio_get_device() for vfio common to handle + * ram_block_discard_disable(). + */ + vcdev->vdev.ram_block_discard_allowed = true; + + if (vfio_get_device(group, vcdev->cdev.mdevid, &vcdev->vdev, errp)) { + goto out_err; + } + + vcdev->vdev.ops = &vfio_ccw_ops; + vcdev->vdev.type = VFIO_DEVICE_TYPE_CCW; + vcdev->vdev.name = name; + vcdev->vdev.dev = &vcdev->cdev.parent_obj.parent_obj; + + return; + +out_err: + g_free(name); +} + +static VFIOGroup *vfio_ccw_get_group(S390CCWDevice *cdev, Error **errp) +{ + char *tmp, group_path[PATH_MAX]; + ssize_t len; + int groupid; + + tmp = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/%s/iommu_group", + cdev->hostid.cssid, cdev->hostid.ssid, + cdev->hostid.devid, cdev->mdevid); + len = readlink(tmp, group_path, sizeof(group_path)); + g_free(tmp); + + if (len <= 0 || len >= sizeof(group_path)) { + error_setg(errp, "vfio: no iommu_group found"); + return NULL; + } + + group_path[len] = 0; + + if (sscanf(basename(group_path), "%d", &groupid) != 1) { + error_setg(errp, "vfio: failed to read %s", group_path); + return NULL; + } + + return vfio_get_group(groupid, &address_space_memory, errp); +} + +static void vfio_ccw_realize(DeviceState *dev, Error **errp) +{ + VFIOGroup *group; + CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev); + S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev); + VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); + S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev); + Error *err = NULL; + + /* Call the class init function for subchannel. */ + if (cdc->realize) { + cdc->realize(cdev, vcdev->vdev.sysfsdev, &err); + if (err) { + goto out_err_propagate; + } + } + + group = vfio_ccw_get_group(cdev, &err); + if (!group) { + goto out_group_err; + } + + vfio_ccw_get_device(group, vcdev, &err); + if (err) { + goto out_device_err; + } + + vfio_ccw_get_region(vcdev, &err); + if (err) { + goto out_region_err; + } + + vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX, &err); + if (err) { + goto out_io_notifier_err; + } + + if (vcdev->crw_region) { + vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX, &err); + if (err) { + goto out_irq_notifier_err; + } + } + + vfio_ccw_register_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX, &err); + if (err) { + /* + * Report this error, but do not make it a failing condition. + * Lack of this IRQ in the host does not prevent normal operation. + */ + error_report_err(err); + } + + return; + +out_irq_notifier_err: + vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX); + vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX); + vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX); +out_io_notifier_err: + vfio_ccw_put_region(vcdev); +out_region_err: + vfio_ccw_put_device(vcdev); +out_device_err: + vfio_put_group(group); +out_group_err: + if (cdc->unrealize) { + cdc->unrealize(cdev); + } +out_err_propagate: + error_propagate(errp, err); +} + +static void vfio_ccw_unrealize(DeviceState *dev) +{ + CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev); + S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev); + VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); + S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(cdev); + VFIOGroup *group = vcdev->vdev.group; + + vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_REQ_IRQ_INDEX); + vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_CRW_IRQ_INDEX); + vfio_ccw_unregister_irq_notifier(vcdev, VFIO_CCW_IO_IRQ_INDEX); + vfio_ccw_put_region(vcdev); + vfio_ccw_put_device(vcdev); + vfio_put_group(group); + + if (cdc->unrealize) { + cdc->unrealize(cdev); + } +} + +static Property vfio_ccw_properties[] = { + DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev), + DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vfio_ccw_vmstate = { + .name = "vfio-ccw", + .unmigratable = 1, +}; + +static void vfio_ccw_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass); + + device_class_set_props(dc, vfio_ccw_properties); + dc->vmsd = &vfio_ccw_vmstate; + dc->desc = "VFIO-based subchannel assignment"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->realize = vfio_ccw_realize; + dc->unrealize = vfio_ccw_unrealize; + dc->reset = vfio_ccw_reset; + + cdc->handle_request = vfio_ccw_handle_request; + cdc->handle_halt = vfio_ccw_handle_halt; + cdc->handle_clear = vfio_ccw_handle_clear; + cdc->handle_store = vfio_ccw_handle_store; +} + +static const TypeInfo vfio_ccw_info = { + .name = TYPE_VFIO_CCW, + .parent = TYPE_S390_CCW, + .instance_size = sizeof(VFIOCCWDevice), + .class_init = vfio_ccw_class_init, +}; + +static void register_vfio_ccw_type(void) +{ + type_register_static(&vfio_ccw_info); +} + +type_init(register_vfio_ccw_type) diff --git a/hw/vfio/common.c b/hw/vfio/common.c new file mode 100644 index 000000000..080046e3f --- /dev/null +++ b/hw/vfio/common.c @@ -0,0 +1,2596 @@ +/* + * generic functions used by VFIO devices + * + * Copyright Red Hat, Inc. 2012 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Based on qemu-kvm device-assignment: + * Adapted for KVM by Qumranet. + * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) + * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) + * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) + * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) + * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) + */ + +#include "qemu/osdep.h" +#include +#ifdef CONFIG_KVM +#include +#endif +#include + +#include "hw/vfio/vfio-common.h" +#include "hw/vfio/vfio.h" +#include "exec/address-spaces.h" +#include "exec/memory.h" +#include "exec/ram_addr.h" +#include "hw/hw.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/range.h" +#include "sysemu/kvm.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "trace.h" +#include "qapi/error.h" +#include "migration/migration.h" + +VFIOGroupList vfio_group_list = + QLIST_HEAD_INITIALIZER(vfio_group_list); +static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = + QLIST_HEAD_INITIALIZER(vfio_address_spaces); + +#ifdef CONFIG_KVM +/* + * We have a single VFIO pseudo device per KVM VM. Once created it lives + * for the life of the VM. Closing the file descriptor only drops our + * reference to it and the device's reference to kvm. Therefore once + * initialized, this file descriptor is only released on QEMU exit and + * we'll re-use it should another vfio device be attached before then. + */ +static int vfio_kvm_device_fd = -1; +#endif + +/* + * Common VFIO interrupt disable + */ +void vfio_disable_irqindex(VFIODevice *vbasedev, int index) +{ + struct vfio_irq_set irq_set = { + .argsz = sizeof(irq_set), + .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, + .index = index, + .start = 0, + .count = 0, + }; + + ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); +} + +void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index) +{ + struct vfio_irq_set irq_set = { + .argsz = sizeof(irq_set), + .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK, + .index = index, + .start = 0, + .count = 1, + }; + + ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); +} + +void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index) +{ + struct vfio_irq_set irq_set = { + .argsz = sizeof(irq_set), + .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK, + .index = index, + .start = 0, + .count = 1, + }; + + ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set); +} + +static inline const char *action_to_str(int action) +{ + switch (action) { + case VFIO_IRQ_SET_ACTION_MASK: + return "MASK"; + case VFIO_IRQ_SET_ACTION_UNMASK: + return "UNMASK"; + case VFIO_IRQ_SET_ACTION_TRIGGER: + return "TRIGGER"; + default: + return "UNKNOWN ACTION"; + } +} + +static const char *index_to_str(VFIODevice *vbasedev, int index) +{ + if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { + return NULL; + } + + switch (index) { + case VFIO_PCI_INTX_IRQ_INDEX: + return "INTX"; + case VFIO_PCI_MSI_IRQ_INDEX: + return "MSI"; + case VFIO_PCI_MSIX_IRQ_INDEX: + return "MSIX"; + case VFIO_PCI_ERR_IRQ_INDEX: + return "ERR"; + case VFIO_PCI_REQ_IRQ_INDEX: + return "REQ"; + default: + return NULL; + } +} + +static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state) +{ + switch (container->iommu_type) { + case VFIO_TYPE1v2_IOMMU: + case VFIO_TYPE1_IOMMU: + /* + * We support coordinated discarding of RAM via the RamDiscardManager. + */ + return ram_block_uncoordinated_discard_disable(state); + default: + /* + * VFIO_SPAPR_TCE_IOMMU most probably works just fine with + * RamDiscardManager, however, it is completely untested. + * + * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does + * completely the opposite of managing mapping/pinning dynamically as + * required by RamDiscardManager. We would have to special-case sections + * with a RamDiscardManager. + */ + return ram_block_discard_disable(state); + } +} + +int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex, + int action, int fd, Error **errp) +{ + struct vfio_irq_set *irq_set; + int argsz, ret = 0; + const char *name; + int32_t *pfd; + + argsz = sizeof(*irq_set) + sizeof(*pfd); + + irq_set = g_malloc0(argsz); + irq_set->argsz = argsz; + irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action; + irq_set->index = index; + irq_set->start = subindex; + irq_set->count = 1; + pfd = (int32_t *)&irq_set->data; + *pfd = fd; + + if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) { + ret = -errno; + } + g_free(irq_set); + + if (!ret) { + return 0; + } + + error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure"); + + name = index_to_str(vbasedev, index); + if (name) { + error_prepend(errp, "%s-%d: ", name, subindex); + } else { + error_prepend(errp, "index %d-%d: ", index, subindex); + } + error_prepend(errp, + "Failed to %s %s eventfd signaling for interrupt ", + fd < 0 ? "tear down" : "set up", action_to_str(action)); + return ret; +} + +/* + * IO Port/MMIO - Beware of the endians, VFIO is always little endian + */ +void vfio_region_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIORegion *region = opaque; + VFIODevice *vbasedev = region->vbasedev; + union { + uint8_t byte; + uint16_t word; + uint32_t dword; + uint64_t qword; + } buf; + + switch (size) { + case 1: + buf.byte = data; + break; + case 2: + buf.word = cpu_to_le16(data); + break; + case 4: + buf.dword = cpu_to_le32(data); + break; + case 8: + buf.qword = cpu_to_le64(data); + break; + default: + hw_error("vfio: unsupported write size, %u bytes", size); + break; + } + + if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { + error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64 + ",%d) failed: %m", + __func__, vbasedev->name, region->nr, + addr, data, size); + } + + trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); + + /* + * A read or write to a BAR always signals an INTx EOI. This will + * do nothing if not pending (including not in INTx mode). We assume + * that a BAR access is in response to an interrupt and that BAR + * accesses will service the interrupt. Unfortunately, we don't know + * which access will service the interrupt, so we're potentially + * getting quite a few host interrupts per guest interrupt. + */ + vbasedev->ops->vfio_eoi(vbasedev); +} + +uint64_t vfio_region_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIORegion *region = opaque; + VFIODevice *vbasedev = region->vbasedev; + union { + uint8_t byte; + uint16_t word; + uint32_t dword; + uint64_t qword; + } buf; + uint64_t data = 0; + + if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) { + error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m", + __func__, vbasedev->name, region->nr, + addr, size); + return (uint64_t)-1; + } + switch (size) { + case 1: + data = buf.byte; + break; + case 2: + data = le16_to_cpu(buf.word); + break; + case 4: + data = le32_to_cpu(buf.dword); + break; + case 8: + data = le64_to_cpu(buf.qword); + break; + default: + hw_error("vfio: unsupported read size, %u bytes", size); + break; + } + + trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); + + /* Same as write above */ + vbasedev->ops->vfio_eoi(vbasedev); + + return data; +} + +const MemoryRegionOps vfio_region_ops = { + .read = vfio_region_read, + .write = vfio_region_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +/* + * Device state interfaces + */ + +bool vfio_mig_active(void) +{ + VFIOGroup *group; + VFIODevice *vbasedev; + + if (QLIST_EMPTY(&vfio_group_list)) { + return false; + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + if (vbasedev->migration_blocker) { + return false; + } + } + } + return true; +} + +static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) +{ + VFIOGroup *group; + VFIODevice *vbasedev; + MigrationState *ms = migrate_get_current(); + + if (!migration_is_setup_or_active(ms->state)) { + return false; + } + + QLIST_FOREACH(group, &container->group_list, container_next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + VFIOMigration *migration = vbasedev->migration; + + if (!migration) { + return false; + } + + if ((vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF) + && (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { + return false; + } + } + } + return true; +} + +static bool vfio_devices_all_running_and_saving(VFIOContainer *container) +{ + VFIOGroup *group; + VFIODevice *vbasedev; + MigrationState *ms = migrate_get_current(); + + if (!migration_is_setup_or_active(ms->state)) { + return false; + } + + QLIST_FOREACH(group, &container->group_list, container_next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + VFIOMigration *migration = vbasedev->migration; + + if (!migration) { + return false; + } + + if ((migration->device_state & VFIO_DEVICE_STATE_SAVING) && + (migration->device_state & VFIO_DEVICE_STATE_RUNNING)) { + continue; + } else { + return false; + } + } + } + return true; +} + +static int vfio_dma_unmap_bitmap(VFIOContainer *container, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) +{ + struct vfio_iommu_type1_dma_unmap *unmap; + struct vfio_bitmap *bitmap; + uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size; + int ret; + + unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); + + unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); + unmap->iova = iova; + unmap->size = size; + unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; + bitmap = (struct vfio_bitmap *)&unmap->data; + + /* + * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of + * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize + * to qemu_real_host_page_size. + */ + + bitmap->pgsize = qemu_real_host_page_size; + bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / + BITS_PER_BYTE; + + if (bitmap->size > container->max_dirty_bitmap_size) { + error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, + (uint64_t)bitmap->size); + ret = -E2BIG; + goto unmap_exit; + } + + bitmap->data = g_try_malloc0(bitmap->size); + if (!bitmap->data) { + ret = -ENOMEM; + goto unmap_exit; + } + + ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); + if (!ret) { + cpu_physical_memory_set_dirty_lebitmap((unsigned long *)bitmap->data, + iotlb->translated_addr, pages); + } else { + error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); + } + + g_free(bitmap->data); +unmap_exit: + g_free(unmap); + return ret; +} + +/* + * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 + */ +static int vfio_dma_unmap(VFIOContainer *container, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) +{ + struct vfio_iommu_type1_dma_unmap unmap = { + .argsz = sizeof(unmap), + .flags = 0, + .iova = iova, + .size = size, + }; + + if (iotlb && container->dirty_pages_supported && + vfio_devices_all_running_and_saving(container)) { + return vfio_dma_unmap_bitmap(container, iova, size, iotlb); + } + + while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { + /* + * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c + * v4.15) where an overflow in its wrap-around check prevents us from + * unmapping the last page of the address space. Test for the error + * condition and re-try the unmap excluding the last page. The + * expectation is that we've never mapped the last page anyway and this + * unmap request comes via vIOMMU support which also makes it unlikely + * that this page is used. This bug was introduced well after type1 v2 + * support was introduced, so we shouldn't need to test for v1. A fix + * is queued for kernel v5.0 so this workaround can be removed once + * affected kernels are sufficiently deprecated. + */ + if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && + container->iommu_type == VFIO_TYPE1v2_IOMMU) { + trace_vfio_dma_unmap_overflow_workaround(); + unmap.size -= 1ULL << ctz64(container->pgsizes); + continue; + } + error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); + return -errno; + } + + return 0; +} + +static int vfio_dma_map(VFIOContainer *container, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly) +{ + struct vfio_iommu_type1_dma_map map = { + .argsz = sizeof(map), + .flags = VFIO_DMA_MAP_FLAG_READ, + .vaddr = (__u64)(uintptr_t)vaddr, + .iova = iova, + .size = size, + }; + + if (!readonly) { + map.flags |= VFIO_DMA_MAP_FLAG_WRITE; + } + + /* + * Try the mapping, if it fails with EBUSY, unmap the region and try + * again. This shouldn't be necessary, but we sometimes see it in + * the VGA ROM space. + */ + if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || + (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 && + ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { + return 0; + } + + error_report("VFIO_MAP_DMA failed: %s", strerror(errno)); + return -errno; +} + +static void vfio_host_win_add(VFIOContainer *container, + hwaddr min_iova, hwaddr max_iova, + uint64_t iova_pgsizes) +{ + VFIOHostDMAWindow *hostwin; + + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (ranges_overlap(hostwin->min_iova, + hostwin->max_iova - hostwin->min_iova + 1, + min_iova, + max_iova - min_iova + 1)) { + hw_error("%s: Overlapped IOMMU are not enabled", __func__); + } + } + + hostwin = g_malloc0(sizeof(*hostwin)); + + hostwin->min_iova = min_iova; + hostwin->max_iova = max_iova; + hostwin->iova_pgsizes = iova_pgsizes; + QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); +} + +static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova, + hwaddr max_iova) +{ + VFIOHostDMAWindow *hostwin; + + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { + QLIST_REMOVE(hostwin, hostwin_next); + g_free(hostwin); + return 0; + } + } + + return -1; +} + +static bool vfio_listener_skipped_section(MemoryRegionSection *section) +{ + return (!memory_region_is_ram(section->mr) && + !memory_region_is_iommu(section->mr)) || + memory_region_is_protected(section->mr) || + /* + * Sizing an enabled 64-bit BAR can cause spurious mappings to + * addresses in the upper part of the 64-bit address space. These + * are never accessed by the CPU and beyond the address width of + * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width. + */ + section->offset_within_address_space & (1ULL << 63); +} + +/* Called with rcu_read_lock held. */ +static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, + ram_addr_t *ram_addr, bool *read_only) +{ + MemoryRegion *mr; + hwaddr xlat; + hwaddr len = iotlb->addr_mask + 1; + bool writable = iotlb->perm & IOMMU_WO; + + /* + * The IOMMU TLB entry we have just covers translation through + * this IOMMU to its immediate target. We need to translate + * it the rest of the way through to memory. + */ + mr = address_space_translate(&address_space_memory, + iotlb->translated_addr, + &xlat, &len, writable, + MEMTXATTRS_UNSPECIFIED); + if (!memory_region_is_ram(mr)) { + error_report("iommu map to non memory area %"HWADDR_PRIx"", + xlat); + return false; + } else if (memory_region_has_ram_discard_manager(mr)) { + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr); + MemoryRegionSection tmp = { + .mr = mr, + .offset_within_region = xlat, + .size = int128_make64(len), + }; + + /* + * Malicious VMs can map memory into the IOMMU, which is expected + * to remain discarded. vfio will pin all pages, populating memory. + * Disallow that. vmstate priorities make sure any RamDiscardManager + * were already restored before IOMMUs are restored. + */ + if (!ram_discard_manager_is_populated(rdm, &tmp)) { + error_report("iommu map to discarded memory (e.g., unplugged via" + " virtio-mem): %"HWADDR_PRIx"", + iotlb->translated_addr); + return false; + } + + /* + * Malicious VMs might trigger discarding of IOMMU-mapped memory. The + * pages will remain pinned inside vfio until unmapped, resulting in a + * higher memory consumption than expected. If memory would get + * populated again later, there would be an inconsistency between pages + * pinned by vfio and pages seen by QEMU. This is the case until + * unmapped from the IOMMU (e.g., during device reset). + * + * With malicious guests, we really only care about pinning more memory + * than expected. RLIMIT_MEMLOCK set for the user/process can never be + * exceeded and can be used to mitigate this problem. + */ + warn_report_once("Using vfio with vIOMMUs and coordinated discarding of" + " RAM (e.g., virtio-mem) works, however, malicious" + " guests can trigger pinning of more memory than" + " intended via an IOMMU. It's possible to mitigate " + " by setting/adjusting RLIMIT_MEMLOCK."); + } + + /* + * Translation truncates length to the IOMMU page size, + * check that it did not truncate too much. + */ + if (len & iotlb->addr_mask) { + error_report("iommu has granularity incompatible with target AS"); + return false; + } + + if (vaddr) { + *vaddr = memory_region_get_ram_ptr(mr) + xlat; + } + + if (ram_addr) { + *ram_addr = memory_region_get_ram_addr(mr) + xlat; + } + + if (read_only) { + *read_only = !writable || mr->readonly; + } + + return true; +} + +static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); + VFIOContainer *container = giommu->container; + hwaddr iova = iotlb->iova + giommu->iommu_offset; + void *vaddr; + int ret; + + trace_vfio_iommu_map_notify(iotlb->perm == IOMMU_NONE ? "UNMAP" : "MAP", + iova, iova + iotlb->addr_mask); + + if (iotlb->target_as != &address_space_memory) { + error_report("Wrong target AS \"%s\", only system memory is allowed", + iotlb->target_as->name ? iotlb->target_as->name : "none"); + return; + } + + rcu_read_lock(); + + if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { + bool read_only; + + if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { + goto out; + } + /* + * vaddr is only valid until rcu_read_unlock(). But after + * vfio_dma_map has set up the mapping the pages will be + * pinned by the kernel. This makes sure that the RAM backend + * of vaddr will always be there, even if the memory object is + * destroyed and its backing memory munmap-ed. + */ + ret = vfio_dma_map(container, iova, + iotlb->addr_mask + 1, vaddr, + read_only); + if (ret) { + error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx", %p) = %d (%m)", + container, iova, + iotlb->addr_mask + 1, vaddr, ret); + } + } else { + ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb); + if (ret) { + error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%m)", + container, iova, + iotlb->addr_mask + 1, ret); + } + } +out: + rcu_read_unlock(); +} + +static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl, + MemoryRegionSection *section) +{ + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, + listener); + const hwaddr size = int128_get64(section->size); + const hwaddr iova = section->offset_within_address_space; + int ret; + + /* Unmap with a single call. */ + ret = vfio_dma_unmap(vrdl->container, iova, size , NULL); + if (ret) { + error_report("%s: vfio_dma_unmap() failed: %s", __func__, + strerror(-ret)); + } +} + +static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, + MemoryRegionSection *section) +{ + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, + listener); + const hwaddr end = section->offset_within_region + + int128_get64(section->size); + hwaddr start, next, iova; + void *vaddr; + int ret; + + /* + * Map in (aligned within memory region) minimum granularity, so we can + * unmap in minimum granularity later. + */ + for (start = section->offset_within_region; start < end; start = next) { + next = ROUND_UP(start + 1, vrdl->granularity); + next = MIN(next, end); + + iova = start - section->offset_within_region + + section->offset_within_address_space; + vaddr = memory_region_get_ram_ptr(section->mr) + start; + + ret = vfio_dma_map(vrdl->container, iova, next - start, + vaddr, section->readonly); + if (ret) { + /* Rollback */ + vfio_ram_discard_notify_discard(rdl, section); + return ret; + } + } + return 0; +} + +static void vfio_register_ram_discard_listener(VFIOContainer *container, + MemoryRegionSection *section) +{ + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); + VFIORamDiscardListener *vrdl; + + /* Ignore some corner cases not relevant in practice. */ + g_assert(QEMU_IS_ALIGNED(section->offset_within_region, TARGET_PAGE_SIZE)); + g_assert(QEMU_IS_ALIGNED(section->offset_within_address_space, + TARGET_PAGE_SIZE)); + g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE)); + + vrdl = g_new0(VFIORamDiscardListener, 1); + vrdl->container = container; + vrdl->mr = section->mr; + vrdl->offset_within_address_space = section->offset_within_address_space; + vrdl->size = int128_get64(section->size); + vrdl->granularity = ram_discard_manager_get_min_granularity(rdm, + section->mr); + + g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity)); + g_assert(container->pgsizes && + vrdl->granularity >= 1ULL << ctz64(container->pgsizes)); + + ram_discard_listener_init(&vrdl->listener, + vfio_ram_discard_notify_populate, + vfio_ram_discard_notify_discard, true); + ram_discard_manager_register_listener(rdm, &vrdl->listener, section); + QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next); + + /* + * Sanity-check if we have a theoretically problematic setup where we could + * exceed the maximum number of possible DMA mappings over time. We assume + * that each mapped section in the same address space as a RamDiscardManager + * section consumes exactly one DMA mapping, with the exception of + * RamDiscardManager sections; i.e., we don't expect to have gIOMMU sections + * in the same address space as RamDiscardManager sections. + * + * We assume that each section in the address space consumes one memslot. + * We take the number of KVM memory slots as a best guess for the maximum + * number of sections in the address space we could have over time, + * also consuming DMA mappings. + */ + if (container->dma_max_mappings) { + unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512; + +#ifdef CONFIG_KVM + if (kvm_enabled()) { + max_memslots = kvm_get_max_memslots(); + } +#endif + + QLIST_FOREACH(vrdl, &container->vrdl_list, next) { + hwaddr start, end; + + start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space, + vrdl->granularity); + end = ROUND_UP(vrdl->offset_within_address_space + vrdl->size, + vrdl->granularity); + vrdl_mappings += (end - start) / vrdl->granularity; + vrdl_count++; + } + + if (vrdl_mappings + max_memslots - vrdl_count > + container->dma_max_mappings) { + warn_report("%s: possibly running out of DMA mappings. E.g., try" + " increasing the 'block-size' of virtio-mem devies." + " Maximum possible DMA mappings: %d, Maximum possible" + " memslots: %d", __func__, container->dma_max_mappings, + max_memslots); + } + } +} + +static void vfio_unregister_ram_discard_listener(VFIOContainer *container, + MemoryRegionSection *section) +{ + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); + VFIORamDiscardListener *vrdl = NULL; + + QLIST_FOREACH(vrdl, &container->vrdl_list, next) { + if (vrdl->mr == section->mr && + vrdl->offset_within_address_space == + section->offset_within_address_space) { + break; + } + } + + if (!vrdl) { + hw_error("vfio: Trying to unregister missing RAM discard listener"); + } + + ram_discard_manager_unregister_listener(rdm, &vrdl->listener); + QLIST_REMOVE(vrdl, next); + g_free(vrdl); +} + +static void vfio_listener_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, listener); + hwaddr iova, end; + Int128 llend, llsize; + void *vaddr; + int ret; + VFIOHostDMAWindow *hostwin; + bool hostwin_found; + Error *err = NULL; + + if (vfio_listener_skipped_section(section)) { + trace_vfio_listener_region_add_skip( + section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(int128_sub(section->size, int128_one()))); + return; + } + + if (unlikely((section->offset_within_address_space & + ~qemu_real_host_page_mask) != + (section->offset_within_region & ~qemu_real_host_page_mask))) { + error_report("%s received unaligned region", __func__); + return; + } + + iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); + llend = int128_make64(section->offset_within_address_space); + llend = int128_add(llend, section->size); + llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask)); + + if (int128_ge(int128_make64(iova), llend)) { + if (memory_region_is_ram_device(section->mr)) { + trace_vfio_listener_region_add_no_dma_map( + memory_region_name(section->mr), + section->offset_within_address_space, + int128_getlo(section->size), + qemu_real_host_page_size); + } + return; + } + end = int128_get64(int128_sub(llend, int128_one())); + + if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + hwaddr pgsize = 0; + + /* For now intersections are not allowed, we may relax this later */ + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (ranges_overlap(hostwin->min_iova, + hostwin->max_iova - hostwin->min_iova + 1, + section->offset_within_address_space, + int128_get64(section->size))) { + error_setg(&err, + "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing" + "host DMA window [0x%"PRIx64",0x%"PRIx64"]", + section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(section->size) - 1, + hostwin->min_iova, hostwin->max_iova); + goto fail; + } + } + + ret = vfio_spapr_create_window(container, section, &pgsize); + if (ret) { + error_setg_errno(&err, -ret, "Failed to create SPAPR window"); + goto fail; + } + + vfio_host_win_add(container, section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(section->size) - 1, pgsize); +#ifdef CONFIG_KVM + if (kvm_enabled()) { + VFIOGroup *group; + IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); + struct kvm_vfio_spapr_tce param; + struct kvm_device_attr attr = { + .group = KVM_DEV_VFIO_GROUP, + .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE, + .addr = (uint64_t)(unsigned long)¶m, + }; + + if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD, + ¶m.tablefd)) { + QLIST_FOREACH(group, &container->group_list, container_next) { + param.groupfd = group->fd; + if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { + error_report("vfio: failed to setup fd %d " + "for a group with fd %d: %s", + param.tablefd, param.groupfd, + strerror(errno)); + return; + } + trace_vfio_spapr_group_attach(param.groupfd, param.tablefd); + } + } + } +#endif + } + + hostwin_found = false; + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { + hostwin_found = true; + break; + } + } + + if (!hostwin_found) { + error_setg(&err, "Container %p can't map guest IOVA region" + " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end); + goto fail; + } + + memory_region_ref(section->mr); + + if (memory_region_is_iommu(section->mr)) { + VFIOGuestIOMMU *giommu; + IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); + int iommu_idx; + + trace_vfio_listener_region_add_iommu(iova, end); + /* + * FIXME: For VFIO iommu types which have KVM acceleration to + * avoid bouncing all map/unmaps through qemu this way, this + * would be the right place to wire that up (tell the KVM + * device emulation the VFIO iommu handles to use). + */ + giommu = g_malloc0(sizeof(*giommu)); + giommu->iommu = iommu_mr; + giommu->iommu_offset = section->offset_within_address_space - + section->offset_within_region; + giommu->container = container; + llend = int128_add(int128_make64(section->offset_within_region), + section->size); + llend = int128_sub(llend, int128_one()); + iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, + MEMTXATTRS_UNSPECIFIED); + iommu_notifier_init(&giommu->n, vfio_iommu_map_notify, + IOMMU_NOTIFIER_IOTLB_EVENTS, + section->offset_within_region, + int128_get64(llend), + iommu_idx); + + ret = memory_region_iommu_set_page_size_mask(giommu->iommu, + container->pgsizes, + &err); + if (ret) { + g_free(giommu); + goto fail; + } + + ret = memory_region_register_iommu_notifier(section->mr, &giommu->n, + &err); + if (ret) { + g_free(giommu); + goto fail; + } + QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); + memory_region_iommu_replay(giommu->iommu, &giommu->n); + + return; + } + + /* Here we assume that memory_region_is_ram(section->mr)==true */ + + /* + * For RAM memory regions with a RamDiscardManager, we only want to map the + * actually populated parts - and update the mapping whenever we're notified + * about changes. + */ + if (memory_region_has_ram_discard_manager(section->mr)) { + vfio_register_ram_discard_listener(container, section); + return; + } + + vaddr = memory_region_get_ram_ptr(section->mr) + + section->offset_within_region + + (iova - section->offset_within_address_space); + + trace_vfio_listener_region_add_ram(iova, end, vaddr); + + llsize = int128_sub(llend, int128_make64(iova)); + + if (memory_region_is_ram_device(section->mr)) { + hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; + + if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { + trace_vfio_listener_region_add_no_dma_map( + memory_region_name(section->mr), + section->offset_within_address_space, + int128_getlo(section->size), + pgmask + 1); + return; + } + } + + ret = vfio_dma_map(container, iova, int128_get64(llsize), + vaddr, section->readonly); + if (ret) { + error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx", %p) = %d (%m)", + container, iova, int128_get64(llsize), vaddr, ret); + if (memory_region_is_ram_device(section->mr)) { + /* Allow unexpected mappings not to be fatal for RAM devices */ + error_report_err(err); + return; + } + goto fail; + } + + return; + +fail: + if (memory_region_is_ram_device(section->mr)) { + error_report("failed to vfio_dma_map. pci p2p may not work"); + return; + } + /* + * On the initfn path, store the first error in the container so we + * can gracefully fail. Runtime, there's not much we can do other + * than throw a hardware error. + */ + if (!container->initialized) { + if (!container->error) { + error_propagate_prepend(&container->error, err, + "Region %s: ", + memory_region_name(section->mr)); + } else { + error_free(err); + } + } else { + error_report_err(err); + hw_error("vfio: DMA mapping failed, unable to continue"); + } +} + +static void vfio_listener_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, listener); + hwaddr iova, end; + Int128 llend, llsize; + int ret; + bool try_unmap = true; + + if (vfio_listener_skipped_section(section)) { + trace_vfio_listener_region_del_skip( + section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(int128_sub(section->size, int128_one()))); + return; + } + + if (unlikely((section->offset_within_address_space & + ~qemu_real_host_page_mask) != + (section->offset_within_region & ~qemu_real_host_page_mask))) { + error_report("%s received unaligned region", __func__); + return; + } + + if (memory_region_is_iommu(section->mr)) { + VFIOGuestIOMMU *giommu; + + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { + if (MEMORY_REGION(giommu->iommu) == section->mr && + giommu->n.start == section->offset_within_region) { + memory_region_unregister_iommu_notifier(section->mr, + &giommu->n); + QLIST_REMOVE(giommu, giommu_next); + g_free(giommu); + break; + } + } + + /* + * FIXME: We assume the one big unmap below is adequate to + * remove any individual page mappings in the IOMMU which + * might have been copied into VFIO. This works for a page table + * based IOMMU where a big unmap flattens a large range of IO-PTEs. + * That may not be true for all IOMMU types. + */ + } + + iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); + llend = int128_make64(section->offset_within_address_space); + llend = int128_add(llend, section->size); + llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask)); + + if (int128_ge(int128_make64(iova), llend)) { + return; + } + end = int128_get64(int128_sub(llend, int128_one())); + + llsize = int128_sub(llend, int128_make64(iova)); + + trace_vfio_listener_region_del(iova, end); + + if (memory_region_is_ram_device(section->mr)) { + hwaddr pgmask; + VFIOHostDMAWindow *hostwin; + bool hostwin_found = false; + + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { + hostwin_found = true; + break; + } + } + assert(hostwin_found); /* or region_add() would have failed */ + + pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; + try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); + } else if (memory_region_has_ram_discard_manager(section->mr)) { + vfio_unregister_ram_discard_listener(container, section); + /* Unregistering will trigger an unmap. */ + try_unmap = false; + } + + if (try_unmap) { + if (int128_eq(llsize, int128_2_64())) { + /* The unmap ioctl doesn't accept a full 64-bit span. */ + llsize = int128_rshift(llsize, 1); + ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); + if (ret) { + error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%m)", + container, iova, int128_get64(llsize), ret); + } + iova += int128_get64(llsize); + } + ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); + if (ret) { + error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%m)", + container, iova, int128_get64(llsize), ret); + } + } + + memory_region_unref(section->mr); + + if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + vfio_spapr_remove_window(container, + section->offset_within_address_space); + if (vfio_host_win_del(container, + section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(section->size) - 1) < 0) { + hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx, + __func__, section->offset_within_address_space); + } + } +} + +static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) +{ + int ret; + struct vfio_iommu_type1_dirty_bitmap dirty = { + .argsz = sizeof(dirty), + }; + + if (start) { + dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; + } else { + dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP; + } + + ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); + if (ret) { + error_report("Failed to set dirty tracking flag 0x%x errno: %d", + dirty.flags, errno); + } +} + +static void vfio_listener_log_global_start(MemoryListener *listener) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, listener); + + vfio_set_dirty_page_tracking(container, true); +} + +static void vfio_listener_log_global_stop(MemoryListener *listener) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, listener); + + vfio_set_dirty_page_tracking(container, false); +} + +static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, + uint64_t size, ram_addr_t ram_addr) +{ + struct vfio_iommu_type1_dirty_bitmap *dbitmap; + struct vfio_iommu_type1_dirty_bitmap_get *range; + uint64_t pages; + int ret; + + dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); + + dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); + dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; + range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; + range->iova = iova; + range->size = size; + + /* + * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of + * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize + * to qemu_real_host_page_size. + */ + range->bitmap.pgsize = qemu_real_host_page_size; + + pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size; + range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / + BITS_PER_BYTE; + range->bitmap.data = g_try_malloc0(range->bitmap.size); + if (!range->bitmap.data) { + ret = -ENOMEM; + goto err_out; + } + + ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); + if (ret) { + error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64 + " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova, + (uint64_t)range->size, errno); + goto err_out; + } + + cpu_physical_memory_set_dirty_lebitmap((unsigned long *)range->bitmap.data, + ram_addr, pages); + + trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size, + range->bitmap.size, ram_addr); +err_out: + g_free(range->bitmap.data); + g_free(dbitmap); + + return ret; +} + +typedef struct { + IOMMUNotifier n; + VFIOGuestIOMMU *giommu; +} vfio_giommu_dirty_notifier; + +static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + vfio_giommu_dirty_notifier *gdn = container_of(n, + vfio_giommu_dirty_notifier, n); + VFIOGuestIOMMU *giommu = gdn->giommu; + VFIOContainer *container = giommu->container; + hwaddr iova = iotlb->iova + giommu->iommu_offset; + ram_addr_t translated_addr; + + trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); + + if (iotlb->target_as != &address_space_memory) { + error_report("Wrong target AS \"%s\", only system memory is allowed", + iotlb->target_as->name ? iotlb->target_as->name : "none"); + return; + } + + rcu_read_lock(); + if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { + int ret; + + ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, + translated_addr); + if (ret) { + error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%m)", + container, iova, + iotlb->addr_mask + 1, ret); + } + } + rcu_read_unlock(); +} + +static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section, + void *opaque) +{ + const hwaddr size = int128_get64(section->size); + const hwaddr iova = section->offset_within_address_space; + const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) + + section->offset_within_region; + VFIORamDiscardListener *vrdl = opaque; + + /* + * Sync the whole mapped region (spanning multiple individual mappings) + * in one go. + */ + return vfio_get_dirty_bitmap(vrdl->container, iova, size, ram_addr); +} + +static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container, + MemoryRegionSection *section) +{ + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); + VFIORamDiscardListener *vrdl = NULL; + + QLIST_FOREACH(vrdl, &container->vrdl_list, next) { + if (vrdl->mr == section->mr && + vrdl->offset_within_address_space == + section->offset_within_address_space) { + break; + } + } + + if (!vrdl) { + hw_error("vfio: Trying to sync missing RAM discard listener"); + } + + /* + * We only want/can synchronize the bitmap for actually mapped parts - + * which correspond to populated parts. Replay all populated parts. + */ + return ram_discard_manager_replay_populated(rdm, section, + vfio_ram_discard_get_dirty_bitmap, + &vrdl); +} + +static int vfio_sync_dirty_bitmap(VFIOContainer *container, + MemoryRegionSection *section) +{ + ram_addr_t ram_addr; + + if (memory_region_is_iommu(section->mr)) { + VFIOGuestIOMMU *giommu; + + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { + if (MEMORY_REGION(giommu->iommu) == section->mr && + giommu->n.start == section->offset_within_region) { + Int128 llend; + vfio_giommu_dirty_notifier gdn = { .giommu = giommu }; + int idx = memory_region_iommu_attrs_to_index(giommu->iommu, + MEMTXATTRS_UNSPECIFIED); + + llend = int128_add(int128_make64(section->offset_within_region), + section->size); + llend = int128_sub(llend, int128_one()); + + iommu_notifier_init(&gdn.n, + vfio_iommu_map_dirty_notify, + IOMMU_NOTIFIER_MAP, + section->offset_within_region, + int128_get64(llend), + idx); + memory_region_iommu_replay(giommu->iommu, &gdn.n); + break; + } + } + return 0; + } else if (memory_region_has_ram_discard_manager(section->mr)) { + return vfio_sync_ram_discard_listener_dirty_bitmap(container, section); + } + + ram_addr = memory_region_get_ram_addr(section->mr) + + section->offset_within_region; + + return vfio_get_dirty_bitmap(container, + REAL_HOST_PAGE_ALIGN(section->offset_within_address_space), + int128_get64(section->size), ram_addr); +} + +static void vfio_listener_log_sync(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, listener); + + if (vfio_listener_skipped_section(section) || + !container->dirty_pages_supported) { + return; + } + + if (vfio_devices_all_dirty_tracking(container)) { + vfio_sync_dirty_bitmap(container, section); + } +} + +static const MemoryListener vfio_memory_listener = { + .name = "vfio", + .region_add = vfio_listener_region_add, + .region_del = vfio_listener_region_del, + .log_global_start = vfio_listener_log_global_start, + .log_global_stop = vfio_listener_log_global_stop, + .log_sync = vfio_listener_log_sync, +}; + +static void vfio_listener_release(VFIOContainer *container) +{ + memory_listener_unregister(&container->listener); + if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + memory_listener_unregister(&container->prereg_listener); + } +} + +static struct vfio_info_cap_header * +vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id) +{ + struct vfio_info_cap_header *hdr; + + for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) { + if (hdr->id == id) { + return hdr; + } + } + + return NULL; +} + +struct vfio_info_cap_header * +vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) +{ + if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) { + return NULL; + } + + return vfio_get_cap((void *)info, info->cap_offset, id); +} + +static struct vfio_info_cap_header * +vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) +{ + if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { + return NULL; + } + + return vfio_get_cap((void *)info, info->cap_offset, id); +} + +struct vfio_info_cap_header * +vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id) +{ + if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) { + return NULL; + } + + return vfio_get_cap((void *)info, info->cap_offset, id); +} + +bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, + unsigned int *avail) +{ + struct vfio_info_cap_header *hdr; + struct vfio_iommu_type1_info_dma_avail *cap; + + /* If the capability cannot be found, assume no DMA limiting */ + hdr = vfio_get_iommu_type1_info_cap(info, + VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL); + if (hdr == NULL) { + return false; + } + + if (avail != NULL) { + cap = (void *) hdr; + *avail = cap->avail; + } + + return true; +} + +static int vfio_setup_region_sparse_mmaps(VFIORegion *region, + struct vfio_region_info *info) +{ + struct vfio_info_cap_header *hdr; + struct vfio_region_info_cap_sparse_mmap *sparse; + int i, j; + + hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); + if (!hdr) { + return -ENODEV; + } + + sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); + + trace_vfio_region_sparse_mmap_header(region->vbasedev->name, + region->nr, sparse->nr_areas); + + region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); + + for (i = 0, j = 0; i < sparse->nr_areas; i++) { + trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, + sparse->areas[i].offset + + sparse->areas[i].size); + + if (sparse->areas[i].size) { + region->mmaps[j].offset = sparse->areas[i].offset; + region->mmaps[j].size = sparse->areas[i].size; + j++; + } + } + + region->nr_mmaps = j; + region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); + + return 0; +} + +int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, + int index, const char *name) +{ + struct vfio_region_info *info; + int ret; + + ret = vfio_get_region_info(vbasedev, index, &info); + if (ret) { + return ret; + } + + region->vbasedev = vbasedev; + region->flags = info->flags; + region->size = info->size; + region->fd_offset = info->offset; + region->nr = index; + + if (region->size) { + region->mem = g_new0(MemoryRegion, 1); + memory_region_init_io(region->mem, obj, &vfio_region_ops, + region, name, region->size); + + if (!vbasedev->no_mmap && + region->flags & VFIO_REGION_INFO_FLAG_MMAP) { + + ret = vfio_setup_region_sparse_mmaps(region, info); + + if (ret) { + region->nr_mmaps = 1; + region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); + region->mmaps[0].offset = 0; + region->mmaps[0].size = region->size; + } + } + } + + g_free(info); + + trace_vfio_region_setup(vbasedev->name, index, name, + region->flags, region->fd_offset, region->size); + return 0; +} + +static void vfio_subregion_unmap(VFIORegion *region, int index) +{ + trace_vfio_region_unmap(memory_region_name(®ion->mmaps[index].mem), + region->mmaps[index].offset, + region->mmaps[index].offset + + region->mmaps[index].size - 1); + memory_region_del_subregion(region->mem, ®ion->mmaps[index].mem); + munmap(region->mmaps[index].mmap, region->mmaps[index].size); + object_unparent(OBJECT(®ion->mmaps[index].mem)); + region->mmaps[index].mmap = NULL; +} + +int vfio_region_mmap(VFIORegion *region) +{ + int i, prot = 0; + char *name; + + if (!region->mem) { + return 0; + } + + prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; + prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; + + for (i = 0; i < region->nr_mmaps; i++) { + region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot, + MAP_SHARED, region->vbasedev->fd, + region->fd_offset + + region->mmaps[i].offset); + if (region->mmaps[i].mmap == MAP_FAILED) { + int ret = -errno; + + trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, + region->fd_offset + + region->mmaps[i].offset, + region->fd_offset + + region->mmaps[i].offset + + region->mmaps[i].size - 1, ret); + + region->mmaps[i].mmap = NULL; + + for (i--; i >= 0; i--) { + vfio_subregion_unmap(region, i); + } + + return ret; + } + + name = g_strdup_printf("%s mmaps[%d]", + memory_region_name(region->mem), i); + memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, + memory_region_owner(region->mem), + name, region->mmaps[i].size, + region->mmaps[i].mmap); + g_free(name); + memory_region_add_subregion(region->mem, region->mmaps[i].offset, + ®ion->mmaps[i].mem); + + trace_vfio_region_mmap(memory_region_name(®ion->mmaps[i].mem), + region->mmaps[i].offset, + region->mmaps[i].offset + + region->mmaps[i].size - 1); + } + + return 0; +} + +void vfio_region_unmap(VFIORegion *region) +{ + int i; + + if (!region->mem) { + return; + } + + for (i = 0; i < region->nr_mmaps; i++) { + if (region->mmaps[i].mmap) { + vfio_subregion_unmap(region, i); + } + } +} + +void vfio_region_exit(VFIORegion *region) +{ + int i; + + if (!region->mem) { + return; + } + + for (i = 0; i < region->nr_mmaps; i++) { + if (region->mmaps[i].mmap) { + memory_region_del_subregion(region->mem, ®ion->mmaps[i].mem); + } + } + + trace_vfio_region_exit(region->vbasedev->name, region->nr); +} + +void vfio_region_finalize(VFIORegion *region) +{ + int i; + + if (!region->mem) { + return; + } + + for (i = 0; i < region->nr_mmaps; i++) { + if (region->mmaps[i].mmap) { + munmap(region->mmaps[i].mmap, region->mmaps[i].size); + object_unparent(OBJECT(®ion->mmaps[i].mem)); + } + } + + object_unparent(OBJECT(region->mem)); + + g_free(region->mem); + g_free(region->mmaps); + + trace_vfio_region_finalize(region->vbasedev->name, region->nr); + + region->mem = NULL; + region->mmaps = NULL; + region->nr_mmaps = 0; + region->size = 0; + region->flags = 0; + region->nr = 0; +} + +void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled) +{ + int i; + + if (!region->mem) { + return; + } + + for (i = 0; i < region->nr_mmaps; i++) { + if (region->mmaps[i].mmap) { + memory_region_set_enabled(®ion->mmaps[i].mem, enabled); + } + } + + trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), + enabled); +} + +void vfio_reset_handler(void *opaque) +{ + VFIOGroup *group; + VFIODevice *vbasedev; + + QLIST_FOREACH(group, &vfio_group_list, next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + if (vbasedev->dev->realized) { + vbasedev->ops->vfio_compute_needs_reset(vbasedev); + } + } + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + QLIST_FOREACH(vbasedev, &group->device_list, next) { + if (vbasedev->dev->realized && vbasedev->needs_reset) { + vbasedev->ops->vfio_hot_reset_multi(vbasedev); + } + } + } +} + +static void vfio_kvm_device_add_group(VFIOGroup *group) +{ +#ifdef CONFIG_KVM + struct kvm_device_attr attr = { + .group = KVM_DEV_VFIO_GROUP, + .attr = KVM_DEV_VFIO_GROUP_ADD, + .addr = (uint64_t)(unsigned long)&group->fd, + }; + + if (!kvm_enabled()) { + return; + } + + if (vfio_kvm_device_fd < 0) { + struct kvm_create_device cd = { + .type = KVM_DEV_TYPE_VFIO, + }; + + if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) { + error_report("Failed to create KVM VFIO device: %m"); + return; + } + + vfio_kvm_device_fd = cd.fd; + } + + if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { + error_report("Failed to add group %d to KVM VFIO device: %m", + group->groupid); + } +#endif +} + +static void vfio_kvm_device_del_group(VFIOGroup *group) +{ +#ifdef CONFIG_KVM + struct kvm_device_attr attr = { + .group = KVM_DEV_VFIO_GROUP, + .attr = KVM_DEV_VFIO_GROUP_DEL, + .addr = (uint64_t)(unsigned long)&group->fd, + }; + + if (vfio_kvm_device_fd < 0) { + return; + } + + if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) { + error_report("Failed to remove group %d from KVM VFIO device: %m", + group->groupid); + } +#endif +} + +static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) +{ + VFIOAddressSpace *space; + + QLIST_FOREACH(space, &vfio_address_spaces, list) { + if (space->as == as) { + return space; + } + } + + /* No suitable VFIOAddressSpace, create a new one */ + space = g_malloc0(sizeof(*space)); + space->as = as; + QLIST_INIT(&space->containers); + + QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); + + return space; +} + +static void vfio_put_address_space(VFIOAddressSpace *space) +{ + if (QLIST_EMPTY(&space->containers)) { + QLIST_REMOVE(space, list); + g_free(space); + } +} + +/* + * vfio_get_iommu_type - selects the richest iommu_type (v2 first) + */ +static int vfio_get_iommu_type(VFIOContainer *container, + Error **errp) +{ + int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, + VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; + int i; + + for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { + if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { + return iommu_types[i]; + } + } + error_setg(errp, "No available IOMMU models"); + return -EINVAL; +} + +static int vfio_init_container(VFIOContainer *container, int group_fd, + Error **errp) +{ + int iommu_type, ret; + + iommu_type = vfio_get_iommu_type(container, errp); + if (iommu_type < 0) { + return iommu_type; + } + + ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd); + if (ret) { + error_setg_errno(errp, errno, "Failed to set group container"); + return -errno; + } + + while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) { + if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + /* + * On sPAPR, despite the IOMMU subdriver always advertises v1 and + * v2, the running platform may not support v2 and there is no + * way to guess it until an IOMMU group gets added to the container. + * So in case it fails with v2, try v1 as a fallback. + */ + iommu_type = VFIO_SPAPR_TCE_IOMMU; + continue; + } + error_setg_errno(errp, errno, "Failed to set iommu for container"); + return -errno; + } + + container->iommu_type = iommu_type; + return 0; +} + +static int vfio_get_iommu_info(VFIOContainer *container, + struct vfio_iommu_type1_info **info) +{ + + size_t argsz = sizeof(struct vfio_iommu_type1_info); + + *info = g_new0(struct vfio_iommu_type1_info, 1); +again: + (*info)->argsz = argsz; + + if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { + g_free(*info); + *info = NULL; + return -errno; + } + + if (((*info)->argsz > argsz)) { + argsz = (*info)->argsz; + *info = g_realloc(*info, argsz); + goto again; + } + + return 0; +} + +static struct vfio_info_cap_header * +vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) +{ + struct vfio_info_cap_header *hdr; + void *ptr = info; + + if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { + return NULL; + } + + for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { + if (hdr->id == id) { + return hdr; + } + } + + return NULL; +} + +static void vfio_get_iommu_info_migration(VFIOContainer *container, + struct vfio_iommu_type1_info *info) +{ + struct vfio_info_cap_header *hdr; + struct vfio_iommu_type1_info_cap_migration *cap_mig; + + hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); + if (!hdr) { + return; + } + + cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, + header); + + /* + * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of + * qemu_real_host_page_size to mark those dirty. + */ + if (cap_mig->pgsize_bitmap & qemu_real_host_page_size) { + container->dirty_pages_supported = true; + container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; + container->dirty_pgsizes = cap_mig->pgsize_bitmap; + } +} + +static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, + Error **errp) +{ + VFIOContainer *container; + int ret, fd; + VFIOAddressSpace *space; + + space = vfio_get_address_space(as); + + /* + * VFIO is currently incompatible with discarding of RAM insofar as the + * madvise to purge (zap) the page from QEMU's address space does not + * interact with the memory API and therefore leaves stale virtual to + * physical mappings in the IOMMU if the page was previously pinned. We + * therefore set discarding broken for each group added to a container, + * whether the container is used individually or shared. This provides + * us with options to allow devices within a group to opt-in and allow + * discarding, so long as it is done consistently for a group (for instance + * if the device is an mdev device where it is known that the host vendor + * driver will never pin pages outside of the working set of the guest + * driver, which would thus not be discarding candidates). + * + * The first opportunity to induce pinning occurs here where we attempt to + * attach the group to existing containers within the AddressSpace. If any + * pages are already zapped from the virtual address space, such as from + * previous discards, new pinning will cause valid mappings to be + * re-established. Likewise, when the overall MemoryListener for a new + * container is registered, a replay of mappings within the AddressSpace + * will occur, re-establishing any previously zapped pages as well. + * + * Especially virtio-balloon is currently only prevented from discarding + * new memory, it will not yet set ram_block_discard_set_required() and + * therefore, neither stops us here or deals with the sudden memory + * consumption of inflated memory. + * + * We do support discarding of memory coordinated via the RamDiscardManager + * with some IOMMU types. vfio_ram_block_discard_disable() handles the + * details once we know which type of IOMMU we are using. + */ + + QLIST_FOREACH(container, &space->containers, next) { + if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { + ret = vfio_ram_block_discard_disable(container, true); + if (ret) { + error_setg_errno(errp, -ret, + "Cannot set discarding of RAM broken"); + if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, + &container->fd)) { + error_report("vfio: error disconnecting group %d from" + " container", group->groupid); + } + return ret; + } + group->container = container; + QLIST_INSERT_HEAD(&container->group_list, group, container_next); + vfio_kvm_device_add_group(group); + return 0; + } + } + + fd = qemu_open_old("/dev/vfio/vfio", O_RDWR); + if (fd < 0) { + error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio"); + ret = -errno; + goto put_space_exit; + } + + ret = ioctl(fd, VFIO_GET_API_VERSION); + if (ret != VFIO_API_VERSION) { + error_setg(errp, "supported vfio version: %d, " + "reported version: %d", VFIO_API_VERSION, ret); + ret = -EINVAL; + goto close_fd_exit; + } + + container = g_malloc0(sizeof(*container)); + container->space = space; + container->fd = fd; + container->error = NULL; + container->dirty_pages_supported = false; + container->dma_max_mappings = 0; + QLIST_INIT(&container->giommu_list); + QLIST_INIT(&container->hostwin_list); + QLIST_INIT(&container->vrdl_list); + + ret = vfio_init_container(container, group->fd, errp); + if (ret) { + goto free_container_exit; + } + + ret = vfio_ram_block_discard_disable(container, true); + if (ret) { + error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); + goto free_container_exit; + } + + switch (container->iommu_type) { + case VFIO_TYPE1v2_IOMMU: + case VFIO_TYPE1_IOMMU: + { + struct vfio_iommu_type1_info *info; + + /* + * FIXME: This assumes that a Type1 IOMMU can map any 64-bit + * IOVA whatsoever. That's not actually true, but the current + * kernel interface doesn't tell us what it can map, and the + * existing Type1 IOMMUs generally support any IOVA we're + * going to actually try in practice. + */ + ret = vfio_get_iommu_info(container, &info); + + if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) { + /* Assume 4k IOVA page size */ + info->iova_pgsizes = 4096; + } + vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes); + container->pgsizes = info->iova_pgsizes; + + /* The default in the kernel ("dma_entry_limit") is 65535. */ + container->dma_max_mappings = 65535; + if (!ret) { + vfio_get_info_dma_avail(info, &container->dma_max_mappings); + vfio_get_iommu_info_migration(container, info); + } + g_free(info); + break; + } + case VFIO_SPAPR_TCE_v2_IOMMU: + case VFIO_SPAPR_TCE_IOMMU: + { + struct vfio_iommu_spapr_tce_info info; + bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU; + + /* + * The host kernel code implementing VFIO_IOMMU_DISABLE is called + * when container fd is closed so we do not call it explicitly + * in this file. + */ + if (!v2) { + ret = ioctl(fd, VFIO_IOMMU_ENABLE); + if (ret) { + error_setg_errno(errp, errno, "failed to enable container"); + ret = -errno; + goto enable_discards_exit; + } + } else { + container->prereg_listener = vfio_prereg_listener; + + memory_listener_register(&container->prereg_listener, + &address_space_memory); + if (container->error) { + memory_listener_unregister(&container->prereg_listener); + ret = -1; + error_propagate_prepend(errp, container->error, + "RAM memory listener initialization failed: "); + goto enable_discards_exit; + } + } + + info.argsz = sizeof(info); + ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); + if (ret) { + error_setg_errno(errp, errno, + "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed"); + ret = -errno; + if (v2) { + memory_listener_unregister(&container->prereg_listener); + } + goto enable_discards_exit; + } + + if (v2) { + container->pgsizes = info.ddw.pgsizes; + /* + * There is a default window in just created container. + * To make region_add/del simpler, we better remove this + * window now and let those iommu_listener callbacks + * create/remove them when needed. + */ + ret = vfio_spapr_remove_window(container, info.dma32_window_start); + if (ret) { + error_setg_errno(errp, -ret, + "failed to remove existing window"); + goto enable_discards_exit; + } + } else { + /* The default table uses 4K pages */ + container->pgsizes = 0x1000; + vfio_host_win_add(container, info.dma32_window_start, + info.dma32_window_start + + info.dma32_window_size - 1, + 0x1000); + } + } + } + + vfio_kvm_device_add_group(group); + + QLIST_INIT(&container->group_list); + QLIST_INSERT_HEAD(&space->containers, container, next); + + group->container = container; + QLIST_INSERT_HEAD(&container->group_list, group, container_next); + + container->listener = vfio_memory_listener; + + memory_listener_register(&container->listener, container->space->as); + + if (container->error) { + ret = -1; + error_propagate_prepend(errp, container->error, + "memory listener initialization failed: "); + goto listener_release_exit; + } + + container->initialized = true; + + return 0; +listener_release_exit: + QLIST_REMOVE(group, container_next); + QLIST_REMOVE(container, next); + vfio_kvm_device_del_group(group); + vfio_listener_release(container); + +enable_discards_exit: + vfio_ram_block_discard_disable(container, false); + +free_container_exit: + g_free(container); + +close_fd_exit: + close(fd); + +put_space_exit: + vfio_put_address_space(space); + + return ret; +} + +static void vfio_disconnect_container(VFIOGroup *group) +{ + VFIOContainer *container = group->container; + + QLIST_REMOVE(group, container_next); + group->container = NULL; + + /* + * Explicitly release the listener first before unset container, + * since unset may destroy the backend container if it's the last + * group. + */ + if (QLIST_EMPTY(&container->group_list)) { + vfio_listener_release(container); + } + + if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { + error_report("vfio: error disconnecting group %d from container", + group->groupid); + } + + if (QLIST_EMPTY(&container->group_list)) { + VFIOAddressSpace *space = container->space; + VFIOGuestIOMMU *giommu, *tmp; + VFIOHostDMAWindow *hostwin, *next; + + QLIST_REMOVE(container, next); + + QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { + memory_region_unregister_iommu_notifier( + MEMORY_REGION(giommu->iommu), &giommu->n); + QLIST_REMOVE(giommu, giommu_next); + g_free(giommu); + } + + QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next, + next) { + QLIST_REMOVE(hostwin, hostwin_next); + g_free(hostwin); + } + + trace_vfio_disconnect_container(container->fd); + close(container->fd); + g_free(container); + + vfio_put_address_space(space); + } +} + +VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) +{ + VFIOGroup *group; + char path[32]; + struct vfio_group_status status = { .argsz = sizeof(status) }; + + QLIST_FOREACH(group, &vfio_group_list, next) { + if (group->groupid == groupid) { + /* Found it. Now is it already in the right context? */ + if (group->container->space->as == as) { + return group; + } else { + error_setg(errp, "group %d used in multiple address spaces", + group->groupid); + return NULL; + } + } + } + + group = g_malloc0(sizeof(*group)); + + snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); + group->fd = qemu_open_old(path, O_RDWR); + if (group->fd < 0) { + error_setg_errno(errp, errno, "failed to open %s", path); + goto free_group_exit; + } + + if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { + error_setg_errno(errp, errno, "failed to get group %d status", groupid); + goto close_fd_exit; + } + + if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { + error_setg(errp, "group %d is not viable", groupid); + error_append_hint(errp, + "Please ensure all devices within the iommu_group " + "are bound to their vfio bus driver.\n"); + goto close_fd_exit; + } + + group->groupid = groupid; + QLIST_INIT(&group->device_list); + + if (vfio_connect_container(group, as, errp)) { + error_prepend(errp, "failed to setup container for group %d: ", + groupid); + goto close_fd_exit; + } + + if (QLIST_EMPTY(&vfio_group_list)) { + qemu_register_reset(vfio_reset_handler, NULL); + } + + QLIST_INSERT_HEAD(&vfio_group_list, group, next); + + return group; + +close_fd_exit: + close(group->fd); + +free_group_exit: + g_free(group); + + return NULL; +} + +void vfio_put_group(VFIOGroup *group) +{ + if (!group || !QLIST_EMPTY(&group->device_list)) { + return; + } + + if (!group->ram_block_discard_allowed) { + vfio_ram_block_discard_disable(group->container, false); + } + vfio_kvm_device_del_group(group); + vfio_disconnect_container(group); + QLIST_REMOVE(group, next); + trace_vfio_put_group(group->fd); + close(group->fd); + g_free(group); + + if (QLIST_EMPTY(&vfio_group_list)) { + qemu_unregister_reset(vfio_reset_handler, NULL); + } +} + +int vfio_get_device(VFIOGroup *group, const char *name, + VFIODevice *vbasedev, Error **errp) +{ + struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; + int ret, fd; + + fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); + if (fd < 0) { + error_setg_errno(errp, errno, "error getting device from group %d", + group->groupid); + error_append_hint(errp, + "Verify all devices in group %d are bound to vfio- " + "or pci-stub and not already in use\n", group->groupid); + return fd; + } + + ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info); + if (ret) { + error_setg_errno(errp, errno, "error getting device info"); + close(fd); + return ret; + } + + /* + * Set discarding of RAM as not broken for this group if the driver knows + * the device operates compatibly with discarding. Setting must be + * consistent per group, but since compatibility is really only possible + * with mdev currently, we expect singleton groups. + */ + if (vbasedev->ram_block_discard_allowed != + group->ram_block_discard_allowed) { + if (!QLIST_EMPTY(&group->device_list)) { + error_setg(errp, "Inconsistent setting of support for discarding " + "RAM (e.g., balloon) within group"); + close(fd); + return -1; + } + + if (!group->ram_block_discard_allowed) { + group->ram_block_discard_allowed = true; + vfio_ram_block_discard_disable(group->container, false); + } + } + + vbasedev->fd = fd; + vbasedev->group = group; + QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); + + vbasedev->num_irqs = dev_info.num_irqs; + vbasedev->num_regions = dev_info.num_regions; + vbasedev->flags = dev_info.flags; + + trace_vfio_get_device(name, dev_info.flags, dev_info.num_regions, + dev_info.num_irqs); + + vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); + return 0; +} + +void vfio_put_base_device(VFIODevice *vbasedev) +{ + if (!vbasedev->group) { + return; + } + QLIST_REMOVE(vbasedev, next); + vbasedev->group = NULL; + trace_vfio_put_base_device(vbasedev->fd); + close(vbasedev->fd); +} + +int vfio_get_region_info(VFIODevice *vbasedev, int index, + struct vfio_region_info **info) +{ + size_t argsz = sizeof(struct vfio_region_info); + + *info = g_malloc0(argsz); + + (*info)->index = index; +retry: + (*info)->argsz = argsz; + + if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) { + g_free(*info); + *info = NULL; + return -errno; + } + + if ((*info)->argsz > argsz) { + argsz = (*info)->argsz; + *info = g_realloc(*info, argsz); + + goto retry; + } + + return 0; +} + +int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type, + uint32_t subtype, struct vfio_region_info **info) +{ + int i; + + for (i = 0; i < vbasedev->num_regions; i++) { + struct vfio_info_cap_header *hdr; + struct vfio_region_info_cap_type *cap_type; + + if (vfio_get_region_info(vbasedev, i, info)) { + continue; + } + + hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE); + if (!hdr) { + g_free(*info); + continue; + } + + cap_type = container_of(hdr, struct vfio_region_info_cap_type, header); + + trace_vfio_get_dev_region(vbasedev->name, i, + cap_type->type, cap_type->subtype); + + if (cap_type->type == type && cap_type->subtype == subtype) { + return 0; + } + + g_free(*info); + } + + *info = NULL; + return -ENODEV; +} + +bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) +{ + struct vfio_region_info *info = NULL; + bool ret = false; + + if (!vfio_get_region_info(vbasedev, region, &info)) { + if (vfio_get_region_info_cap(info, cap_type)) { + ret = true; + } + g_free(info); + } + + return ret; +} + +/* + * Interfaces for IBM EEH (Enhanced Error Handling) + */ +static bool vfio_eeh_container_ok(VFIOContainer *container) +{ + /* + * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO + * implementation is broken if there are multiple groups in a + * container. The hardware works in units of Partitionable + * Endpoints (== IOMMU groups) and the EEH operations naively + * iterate across all groups in the container, without any logic + * to make sure the groups have their state synchronized. For + * certain operations (ENABLE) that might be ok, until an error + * occurs, but for others (GET_STATE) it's clearly broken. + */ + + /* + * XXX Once fixed kernels exist, test for them here + */ + + if (QLIST_EMPTY(&container->group_list)) { + return false; + } + + if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) { + return false; + } + + return true; +} + +static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) +{ + struct vfio_eeh_pe_op pe_op = { + .argsz = sizeof(pe_op), + .op = op, + }; + int ret; + + if (!vfio_eeh_container_ok(container)) { + error_report("vfio/eeh: EEH_PE_OP 0x%x: " + "kernel requires a container with exactly one group", op); + return -EPERM; + } + + ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op); + if (ret < 0) { + error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op); + return -errno; + } + + return ret; +} + +static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) +{ + VFIOAddressSpace *space = vfio_get_address_space(as); + VFIOContainer *container = NULL; + + if (QLIST_EMPTY(&space->containers)) { + /* No containers to act on */ + goto out; + } + + container = QLIST_FIRST(&space->containers); + + if (QLIST_NEXT(container, next)) { + /* We don't yet have logic to synchronize EEH state across + * multiple containers */ + container = NULL; + goto out; + } + +out: + vfio_put_address_space(space); + return container; +} + +bool vfio_eeh_as_ok(AddressSpace *as) +{ + VFIOContainer *container = vfio_eeh_as_container(as); + + return (container != NULL) && vfio_eeh_container_ok(container); +} + +int vfio_eeh_as_op(AddressSpace *as, uint32_t op) +{ + VFIOContainer *container = vfio_eeh_as_container(as); + + if (!container) { + return -ENODEV; + } + return vfio_eeh_container_op(container, op); +} diff --git a/hw/vfio/display.c b/hw/vfio/display.c new file mode 100644 index 000000000..89bc90508 --- /dev/null +++ b/hw/vfio/display.c @@ -0,0 +1,545 @@ +/* + * display support for mdev based vgpu devices + * + * Copyright Red Hat, Inc. 2017 + * + * Authors: + * Gerd Hoffmann + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include +#include + +#include "hw/display/edid.h" +#include "ui/console.h" +#include "qapi/error.h" +#include "pci.h" +#include "trace.h" + +#ifndef DRM_PLANE_TYPE_PRIMARY +# define DRM_PLANE_TYPE_PRIMARY 1 +# define DRM_PLANE_TYPE_CURSOR 2 +#endif + +#define pread_field(_fd, _reg, _ptr, _fld) \ + (sizeof(_ptr->_fld) != \ + pread(_fd, &(_ptr->_fld), sizeof(_ptr->_fld), \ + _reg->offset + offsetof(typeof(*_ptr), _fld))) + +#define pwrite_field(_fd, _reg, _ptr, _fld) \ + (sizeof(_ptr->_fld) != \ + pwrite(_fd, &(_ptr->_fld), sizeof(_ptr->_fld), \ + _reg->offset + offsetof(typeof(*_ptr), _fld))) + + +static void vfio_display_edid_link_up(void *opaque) +{ + VFIOPCIDevice *vdev = opaque; + VFIODisplay *dpy = vdev->dpy; + int fd = vdev->vbasedev.fd; + + dpy->edid_regs->link_state = VFIO_DEVICE_GFX_LINK_STATE_UP; + if (pwrite_field(fd, dpy->edid_info, dpy->edid_regs, link_state)) { + goto err; + } + trace_vfio_display_edid_link_up(); + return; + +err: + trace_vfio_display_edid_write_error(); +} + +static void vfio_display_edid_update(VFIOPCIDevice *vdev, bool enabled, + int prefx, int prefy) +{ + VFIODisplay *dpy = vdev->dpy; + int fd = vdev->vbasedev.fd; + qemu_edid_info edid = { + .maxx = dpy->edid_regs->max_xres, + .maxy = dpy->edid_regs->max_yres, + .prefx = prefx ?: vdev->display_xres, + .prefy = prefy ?: vdev->display_yres, + }; + + timer_del(dpy->edid_link_timer); + dpy->edid_regs->link_state = VFIO_DEVICE_GFX_LINK_STATE_DOWN; + if (pwrite_field(fd, dpy->edid_info, dpy->edid_regs, link_state)) { + goto err; + } + trace_vfio_display_edid_link_down(); + + if (!enabled) { + return; + } + + if (edid.maxx && edid.prefx > edid.maxx) { + edid.prefx = edid.maxx; + } + if (edid.maxy && edid.prefy > edid.maxy) { + edid.prefy = edid.maxy; + } + qemu_edid_generate(dpy->edid_blob, + dpy->edid_regs->edid_max_size, + &edid); + trace_vfio_display_edid_update(edid.prefx, edid.prefy); + + dpy->edid_regs->edid_size = qemu_edid_size(dpy->edid_blob); + if (pwrite_field(fd, dpy->edid_info, dpy->edid_regs, edid_size)) { + goto err; + } + if (pwrite(fd, dpy->edid_blob, dpy->edid_regs->edid_size, + dpy->edid_info->offset + dpy->edid_regs->edid_offset) + != dpy->edid_regs->edid_size) { + goto err; + } + + timer_mod(dpy->edid_link_timer, + qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 100); + return; + +err: + trace_vfio_display_edid_write_error(); + return; +} + +static int vfio_display_edid_ui_info(void *opaque, uint32_t idx, + QemuUIInfo *info) +{ + VFIOPCIDevice *vdev = opaque; + VFIODisplay *dpy = vdev->dpy; + + if (!dpy->edid_regs) { + return 0; + } + + if (info->width && info->height) { + vfio_display_edid_update(vdev, true, info->width, info->height); + } else { + vfio_display_edid_update(vdev, false, 0, 0); + } + + return 0; +} + +static void vfio_display_edid_init(VFIOPCIDevice *vdev) +{ + VFIODisplay *dpy = vdev->dpy; + int fd = vdev->vbasedev.fd; + int ret; + + ret = vfio_get_dev_region_info(&vdev->vbasedev, + VFIO_REGION_TYPE_GFX, + VFIO_REGION_SUBTYPE_GFX_EDID, + &dpy->edid_info); + if (ret) { + return; + } + + trace_vfio_display_edid_available(); + dpy->edid_regs = g_new0(struct vfio_region_gfx_edid, 1); + if (pread_field(fd, dpy->edid_info, dpy->edid_regs, edid_offset)) { + goto err; + } + if (pread_field(fd, dpy->edid_info, dpy->edid_regs, edid_max_size)) { + goto err; + } + if (pread_field(fd, dpy->edid_info, dpy->edid_regs, max_xres)) { + goto err; + } + if (pread_field(fd, dpy->edid_info, dpy->edid_regs, max_yres)) { + goto err; + } + + dpy->edid_blob = g_malloc0(dpy->edid_regs->edid_max_size); + + /* if xres + yres properties are unset use the maximum resolution */ + if (!vdev->display_xres) { + vdev->display_xres = dpy->edid_regs->max_xres; + } + if (!vdev->display_yres) { + vdev->display_yres = dpy->edid_regs->max_yres; + } + + dpy->edid_link_timer = timer_new_ms(QEMU_CLOCK_REALTIME, + vfio_display_edid_link_up, vdev); + + vfio_display_edid_update(vdev, true, 0, 0); + return; + +err: + trace_vfio_display_edid_write_error(); + g_free(dpy->edid_regs); + dpy->edid_regs = NULL; + return; +} + +static void vfio_display_edid_exit(VFIODisplay *dpy) +{ + if (!dpy->edid_regs) { + return; + } + + g_free(dpy->edid_regs); + g_free(dpy->edid_blob); + timer_free(dpy->edid_link_timer); +} + +static void vfio_display_update_cursor(VFIODMABuf *dmabuf, + struct vfio_device_gfx_plane_info *plane) +{ + if (dmabuf->pos_x != plane->x_pos || dmabuf->pos_y != plane->y_pos) { + dmabuf->pos_x = plane->x_pos; + dmabuf->pos_y = plane->y_pos; + dmabuf->pos_updates++; + } + if (dmabuf->hot_x != plane->x_hot || dmabuf->hot_y != plane->y_hot) { + dmabuf->hot_x = plane->x_hot; + dmabuf->hot_y = plane->y_hot; + dmabuf->hot_updates++; + } +} + +static VFIODMABuf *vfio_display_get_dmabuf(VFIOPCIDevice *vdev, + uint32_t plane_type) +{ + VFIODisplay *dpy = vdev->dpy; + struct vfio_device_gfx_plane_info plane; + VFIODMABuf *dmabuf; + int fd, ret; + + memset(&plane, 0, sizeof(plane)); + plane.argsz = sizeof(plane); + plane.flags = VFIO_GFX_PLANE_TYPE_DMABUF; + plane.drm_plane_type = plane_type; + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &plane); + if (ret < 0) { + return NULL; + } + if (!plane.drm_format || !plane.size) { + return NULL; + } + + QTAILQ_FOREACH(dmabuf, &dpy->dmabuf.bufs, next) { + if (dmabuf->dmabuf_id == plane.dmabuf_id) { + /* found in list, move to head, return it */ + QTAILQ_REMOVE(&dpy->dmabuf.bufs, dmabuf, next); + QTAILQ_INSERT_HEAD(&dpy->dmabuf.bufs, dmabuf, next); + if (plane_type == DRM_PLANE_TYPE_CURSOR) { + vfio_display_update_cursor(dmabuf, &plane); + } + return dmabuf; + } + } + + fd = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_GFX_DMABUF, &plane.dmabuf_id); + if (fd < 0) { + return NULL; + } + + dmabuf = g_new0(VFIODMABuf, 1); + dmabuf->dmabuf_id = plane.dmabuf_id; + dmabuf->buf.width = plane.width; + dmabuf->buf.height = plane.height; + dmabuf->buf.stride = plane.stride; + dmabuf->buf.fourcc = plane.drm_format; + dmabuf->buf.modifier = plane.drm_format_mod; + dmabuf->buf.fd = fd; + if (plane_type == DRM_PLANE_TYPE_CURSOR) { + vfio_display_update_cursor(dmabuf, &plane); + } + + QTAILQ_INSERT_HEAD(&dpy->dmabuf.bufs, dmabuf, next); + return dmabuf; +} + +static void vfio_display_free_one_dmabuf(VFIODisplay *dpy, VFIODMABuf *dmabuf) +{ + QTAILQ_REMOVE(&dpy->dmabuf.bufs, dmabuf, next); + dpy_gl_release_dmabuf(dpy->con, &dmabuf->buf); + close(dmabuf->buf.fd); + g_free(dmabuf); +} + +static void vfio_display_free_dmabufs(VFIOPCIDevice *vdev) +{ + VFIODisplay *dpy = vdev->dpy; + VFIODMABuf *dmabuf, *tmp; + uint32_t keep = 5; + + QTAILQ_FOREACH_SAFE(dmabuf, &dpy->dmabuf.bufs, next, tmp) { + if (keep > 0) { + keep--; + continue; + } + assert(dmabuf != dpy->dmabuf.primary); + vfio_display_free_one_dmabuf(dpy, dmabuf); + } +} + +static void vfio_display_dmabuf_update(void *opaque) +{ + VFIOPCIDevice *vdev = opaque; + VFIODisplay *dpy = vdev->dpy; + VFIODMABuf *primary, *cursor; + bool free_bufs = false, new_cursor = false; + + primary = vfio_display_get_dmabuf(vdev, DRM_PLANE_TYPE_PRIMARY); + if (primary == NULL) { + if (dpy->ramfb) { + ramfb_display_update(dpy->con, dpy->ramfb); + } + return; + } + + if (dpy->dmabuf.primary != primary) { + dpy->dmabuf.primary = primary; + qemu_console_resize(dpy->con, + primary->buf.width, primary->buf.height); + dpy_gl_scanout_dmabuf(dpy->con, &primary->buf); + free_bufs = true; + } + + cursor = vfio_display_get_dmabuf(vdev, DRM_PLANE_TYPE_CURSOR); + if (dpy->dmabuf.cursor != cursor) { + dpy->dmabuf.cursor = cursor; + new_cursor = true; + free_bufs = true; + } + + if (cursor && (new_cursor || cursor->hot_updates)) { + bool have_hot = (cursor->hot_x != 0xffffffff && + cursor->hot_y != 0xffffffff); + dpy_gl_cursor_dmabuf(dpy->con, &cursor->buf, have_hot, + cursor->hot_x, cursor->hot_y); + cursor->hot_updates = 0; + } else if (!cursor && new_cursor) { + dpy_gl_cursor_dmabuf(dpy->con, NULL, false, 0, 0); + } + + if (cursor && cursor->pos_updates) { + dpy_gl_cursor_position(dpy->con, + cursor->pos_x, + cursor->pos_y); + cursor->pos_updates = 0; + } + + dpy_gl_update(dpy->con, 0, 0, primary->buf.width, primary->buf.height); + + if (free_bufs) { + vfio_display_free_dmabufs(vdev); + } +} + +static int vfio_display_get_flags(void *opaque) +{ + return GRAPHIC_FLAGS_GL | GRAPHIC_FLAGS_DMABUF; +} + +static const GraphicHwOps vfio_display_dmabuf_ops = { + .get_flags = vfio_display_get_flags, + .gfx_update = vfio_display_dmabuf_update, + .ui_info = vfio_display_edid_ui_info, +}; + +static int vfio_display_dmabuf_init(VFIOPCIDevice *vdev, Error **errp) +{ + if (!display_opengl) { + error_setg(errp, "vfio-display-dmabuf: opengl not available"); + return -1; + } + + vdev->dpy = g_new0(VFIODisplay, 1); + vdev->dpy->con = graphic_console_init(DEVICE(vdev), 0, + &vfio_display_dmabuf_ops, + vdev); + if (vdev->enable_ramfb) { + vdev->dpy->ramfb = ramfb_setup(errp); + } + vfio_display_edid_init(vdev); + return 0; +} + +static void vfio_display_dmabuf_exit(VFIODisplay *dpy) +{ + VFIODMABuf *dmabuf; + + if (QTAILQ_EMPTY(&dpy->dmabuf.bufs)) { + return; + } + + while ((dmabuf = QTAILQ_FIRST(&dpy->dmabuf.bufs)) != NULL) { + vfio_display_free_one_dmabuf(dpy, dmabuf); + } +} + +/* ---------------------------------------------------------------------- */ +void vfio_display_reset(VFIOPCIDevice *vdev) +{ + if (!vdev || !vdev->dpy || !vdev->dpy->con || + !vdev->dpy->dmabuf.primary) { + return; + } + + dpy_gl_scanout_disable(vdev->dpy->con); + vfio_display_dmabuf_exit(vdev->dpy); + dpy_gfx_update_full(vdev->dpy->con); +} + +static void vfio_display_region_update(void *opaque) +{ + VFIOPCIDevice *vdev = opaque; + VFIODisplay *dpy = vdev->dpy; + struct vfio_device_gfx_plane_info plane = { + .argsz = sizeof(plane), + .flags = VFIO_GFX_PLANE_TYPE_REGION + }; + pixman_format_code_t format; + int ret; + + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &plane); + if (ret < 0) { + error_report("ioctl VFIO_DEVICE_QUERY_GFX_PLANE: %s", + strerror(errno)); + return; + } + if (!plane.drm_format || !plane.size) { + if (dpy->ramfb) { + ramfb_display_update(dpy->con, dpy->ramfb); + dpy->region.surface = NULL; + } + return; + } + format = qemu_drm_format_to_pixman(plane.drm_format); + if (!format) { + return; + } + + if (dpy->region.buffer.size && + dpy->region.buffer.nr != plane.region_index) { + /* region changed */ + vfio_region_exit(&dpy->region.buffer); + vfio_region_finalize(&dpy->region.buffer); + dpy->region.surface = NULL; + } + + if (dpy->region.surface && + (surface_width(dpy->region.surface) != plane.width || + surface_height(dpy->region.surface) != plane.height || + surface_format(dpy->region.surface) != format)) { + /* size changed */ + dpy->region.surface = NULL; + } + + if (!dpy->region.buffer.size) { + /* mmap region */ + ret = vfio_region_setup(OBJECT(vdev), &vdev->vbasedev, + &dpy->region.buffer, + plane.region_index, + "display"); + if (ret != 0) { + error_report("%s: vfio_region_setup(%d): %s", + __func__, plane.region_index, strerror(-ret)); + goto err; + } + ret = vfio_region_mmap(&dpy->region.buffer); + if (ret != 0) { + error_report("%s: vfio_region_mmap(%d): %s", __func__, + plane.region_index, strerror(-ret)); + goto err; + } + assert(dpy->region.buffer.mmaps[0].mmap != NULL); + } + + if (dpy->region.surface == NULL) { + /* create surface */ + dpy->region.surface = qemu_create_displaysurface_from + (plane.width, plane.height, format, + plane.stride, dpy->region.buffer.mmaps[0].mmap); + dpy_gfx_replace_surface(dpy->con, dpy->region.surface); + } + + /* full screen update */ + dpy_gfx_update(dpy->con, 0, 0, + surface_width(dpy->region.surface), + surface_height(dpy->region.surface)); + return; + +err: + vfio_region_exit(&dpy->region.buffer); + vfio_region_finalize(&dpy->region.buffer); +} + +static const GraphicHwOps vfio_display_region_ops = { + .gfx_update = vfio_display_region_update, +}; + +static int vfio_display_region_init(VFIOPCIDevice *vdev, Error **errp) +{ + vdev->dpy = g_new0(VFIODisplay, 1); + vdev->dpy->con = graphic_console_init(DEVICE(vdev), 0, + &vfio_display_region_ops, + vdev); + if (vdev->enable_ramfb) { + vdev->dpy->ramfb = ramfb_setup(errp); + } + return 0; +} + +static void vfio_display_region_exit(VFIODisplay *dpy) +{ + if (!dpy->region.buffer.size) { + return; + } + + vfio_region_exit(&dpy->region.buffer); + vfio_region_finalize(&dpy->region.buffer); +} + +/* ---------------------------------------------------------------------- */ + +int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp) +{ + struct vfio_device_gfx_plane_info probe; + int ret; + + memset(&probe, 0, sizeof(probe)); + probe.argsz = sizeof(probe); + probe.flags = VFIO_GFX_PLANE_TYPE_PROBE | VFIO_GFX_PLANE_TYPE_DMABUF; + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &probe); + if (ret == 0) { + return vfio_display_dmabuf_init(vdev, errp); + } + + memset(&probe, 0, sizeof(probe)); + probe.argsz = sizeof(probe); + probe.flags = VFIO_GFX_PLANE_TYPE_PROBE | VFIO_GFX_PLANE_TYPE_REGION; + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_QUERY_GFX_PLANE, &probe); + if (ret == 0) { + return vfio_display_region_init(vdev, errp); + } + + if (vdev->display == ON_OFF_AUTO_AUTO) { + /* not an error in automatic mode */ + return 0; + } + + error_setg(errp, "vfio: device doesn't support any (known) display method"); + return -1; +} + +void vfio_display_finalize(VFIOPCIDevice *vdev) +{ + if (!vdev->dpy) { + return; + } + + graphic_console_close(vdev->dpy->con); + vfio_display_dmabuf_exit(vdev->dpy); + vfio_display_region_exit(vdev->dpy); + vfio_display_edid_exit(vdev->dpy); + g_free(vdev->dpy); +} diff --git a/hw/vfio/igd.c b/hw/vfio/igd.c new file mode 100644 index 000000000..d4685709a --- /dev/null +++ b/hw/vfio/igd.c @@ -0,0 +1,616 @@ +/* + * IGD device quirks + * + * Copyright Red Hat, Inc. 2016 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/hw.h" +#include "hw/nvram/fw_cfg.h" +#include "pci.h" +#include "trace.h" + +/* + * Intel IGD support + * + * Obviously IGD is not a discrete device, this is evidenced not only by it + * being integrated into the CPU, but by the various chipset and BIOS + * dependencies that it brings along with it. Intel is trying to move away + * from this and Broadwell and newer devices can run in what Intel calls + * "Universal Pass-Through" mode, or UPT. Theoretically in UPT mode, nothing + * more is required beyond assigning the IGD device to a VM. There are + * however support limitations to this mode. It only supports IGD as a + * secondary graphics device in the VM and it doesn't officially support any + * physical outputs. + * + * The code here attempts to enable what we'll call legacy mode assignment, + * IGD retains most of the capabilities we expect for it to have on bare + * metal. To enable this mode, the IGD device must be assigned to the VM + * at PCI address 00:02.0, it must have a ROM, it very likely needs VGA + * support, we must have VM BIOS support for reserving and populating some + * of the required tables, and we need to tweak the chipset with revisions + * and IDs and an LPC/ISA bridge device. The intention is to make all of + * this happen automatically by installing the device at the correct VM PCI + * bus address. If any of the conditions are not met, we cross our fingers + * and hope the user knows better. + * + * NB - It is possible to enable physical outputs in UPT mode by supplying + * an OpRegion table. We don't do this by default because the guest driver + * behaves differently if an OpRegion is provided and no monitor is attached + * vs no OpRegion and a monitor being attached or not. Effectively, if a + * headless setup is desired, the OpRegion gets in the way of that. + */ + +/* + * This presumes the device is already known to be an Intel VGA device, so we + * take liberties in which device ID bits match which generation. This should + * not be taken as an indication that all the devices are supported, or even + * supportable, some of them don't even support VT-d. + * See linux:include/drm/i915_pciids.h for IDs. + */ +static int igd_gen(VFIOPCIDevice *vdev) +{ + if ((vdev->device_id & 0xfff) == 0xa84) { + return 8; /* Broxton */ + } + + switch (vdev->device_id & 0xff00) { + /* Old, untested, unavailable, unknown */ + case 0x0000: + case 0x2500: + case 0x2700: + case 0x2900: + case 0x2a00: + case 0x2e00: + case 0x3500: + case 0xa000: + return -1; + /* SandyBridge, IvyBridge, ValleyView, Haswell */ + case 0x0100: + case 0x0400: + case 0x0a00: + case 0x0c00: + case 0x0d00: + case 0x0f00: + return 6; + /* BroadWell, CherryView, SkyLake, KabyLake */ + case 0x1600: + case 0x1900: + case 0x2200: + case 0x5900: + return 8; + } + + return 8; /* Assume newer is compatible */ +} + +typedef struct VFIOIGDQuirk { + struct VFIOPCIDevice *vdev; + uint32_t index; + uint32_t bdsm; +} VFIOIGDQuirk; + +#define IGD_GMCH 0x50 /* Graphics Control Register */ +#define IGD_BDSM 0x5c /* Base Data of Stolen Memory */ + + +/* + * The rather short list of registers that we copy from the host devices. + * The LPC/ISA bridge values are definitely needed to support the vBIOS, the + * host bridge values may or may not be needed depending on the guest OS. + * Since we're only munging revision and subsystem values on the host bridge, + * we don't require our own device. The LPC/ISA bridge needs to be our very + * own though. + */ +typedef struct { + uint8_t offset; + uint8_t len; +} IGDHostInfo; + +static const IGDHostInfo igd_host_bridge_infos[] = { + {PCI_REVISION_ID, 2}, + {PCI_SUBSYSTEM_VENDOR_ID, 2}, + {PCI_SUBSYSTEM_ID, 2}, +}; + +static const IGDHostInfo igd_lpc_bridge_infos[] = { + {PCI_VENDOR_ID, 2}, + {PCI_DEVICE_ID, 2}, + {PCI_REVISION_ID, 2}, + {PCI_SUBSYSTEM_VENDOR_ID, 2}, + {PCI_SUBSYSTEM_ID, 2}, +}; + +static int vfio_pci_igd_copy(VFIOPCIDevice *vdev, PCIDevice *pdev, + struct vfio_region_info *info, + const IGDHostInfo *list, int len) +{ + int i, ret; + + for (i = 0; i < len; i++) { + ret = pread(vdev->vbasedev.fd, pdev->config + list[i].offset, + list[i].len, info->offset + list[i].offset); + if (ret != list[i].len) { + error_report("IGD copy failed: %m"); + return -errno; + } + } + + return 0; +} + +/* + * Stuff a few values into the host bridge. + */ +static int vfio_pci_igd_host_init(VFIOPCIDevice *vdev, + struct vfio_region_info *info) +{ + PCIBus *bus; + PCIDevice *host_bridge; + int ret; + + bus = pci_device_root_bus(&vdev->pdev); + host_bridge = pci_find_device(bus, 0, PCI_DEVFN(0, 0)); + + if (!host_bridge) { + error_report("Can't find host bridge"); + return -ENODEV; + } + + ret = vfio_pci_igd_copy(vdev, host_bridge, info, igd_host_bridge_infos, + ARRAY_SIZE(igd_host_bridge_infos)); + if (!ret) { + trace_vfio_pci_igd_host_bridge_enabled(vdev->vbasedev.name); + } + + return ret; +} + +/* + * IGD LPC/ISA bridge support code. The vBIOS needs this, but we can't write + * arbitrary values into just any bridge, so we must create our own. We try + * to handle if the user has created it for us, which they might want to do + * to enable multifunction so we don't occupy the whole PCI slot. + */ +static void vfio_pci_igd_lpc_bridge_realize(PCIDevice *pdev, Error **errp) +{ + if (pdev->devfn != PCI_DEVFN(0x1f, 0)) { + error_setg(errp, "VFIO dummy ISA/LPC bridge must have address 1f.0"); + } +} + +static void vfio_pci_igd_lpc_bridge_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->desc = "VFIO dummy ISA/LPC bridge for IGD assignment"; + dc->hotpluggable = false; + k->realize = vfio_pci_igd_lpc_bridge_realize; + k->class_id = PCI_CLASS_BRIDGE_ISA; +} + +static TypeInfo vfio_pci_igd_lpc_bridge_info = { + .name = "vfio-pci-igd-lpc-bridge", + .parent = TYPE_PCI_DEVICE, + .class_init = vfio_pci_igd_lpc_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void vfio_pci_igd_register_types(void) +{ + type_register_static(&vfio_pci_igd_lpc_bridge_info); +} + +type_init(vfio_pci_igd_register_types) + +static int vfio_pci_igd_lpc_init(VFIOPCIDevice *vdev, + struct vfio_region_info *info) +{ + PCIDevice *lpc_bridge; + int ret; + + lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev), + 0, PCI_DEVFN(0x1f, 0)); + if (!lpc_bridge) { + lpc_bridge = pci_create_simple(pci_device_root_bus(&vdev->pdev), + PCI_DEVFN(0x1f, 0), "vfio-pci-igd-lpc-bridge"); + } + + ret = vfio_pci_igd_copy(vdev, lpc_bridge, info, igd_lpc_bridge_infos, + ARRAY_SIZE(igd_lpc_bridge_infos)); + if (!ret) { + trace_vfio_pci_igd_lpc_bridge_enabled(vdev->vbasedev.name); + } + + return ret; +} + +/* + * IGD Gen8 and newer support up to 8MB for the GTT and use a 64bit PTE + * entry, older IGDs use 2MB and 32bit. Each PTE maps a 4k page. Therefore + * we either have 2M/4k * 4 = 2k or 8M/4k * 8 = 16k as the maximum iobar index + * for programming the GTT. + * + * See linux:include/drm/i915_drm.h for shift and mask values. + */ +static int vfio_igd_gtt_max(VFIOPCIDevice *vdev) +{ + uint32_t gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch)); + int ggms, gen = igd_gen(vdev); + + gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch)); + ggms = (gmch >> (gen < 8 ? 8 : 6)) & 0x3; + if (gen > 6) { + ggms = 1 << ggms; + } + + ggms *= MiB; + + return (ggms / (4 * KiB)) * (gen < 8 ? 4 : 8); +} + +/* + * The IGD ROM will make use of stolen memory (GGMS) for support of VESA modes. + * Somehow the host stolen memory range is used for this, but how the ROM gets + * it is a mystery, perhaps it's hardcoded into the ROM. Thankfully though, it + * reprograms the GTT through the IOBAR where we can trap it and transpose the + * programming to the VM allocated buffer. That buffer gets reserved by the VM + * firmware via the fw_cfg entry added below. Here we're just monitoring the + * IOBAR address and data registers to detect a write sequence targeting the + * GTTADR. This code is developed by observed behavior and doesn't have a + * direct spec reference, unfortunately. + */ +static uint64_t vfio_igd_quirk_data_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIOIGDQuirk *igd = opaque; + VFIOPCIDevice *vdev = igd->vdev; + + igd->index = ~0; + + return vfio_region_read(&vdev->bars[4].region, addr + 4, size); +} + +static void vfio_igd_quirk_data_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIOIGDQuirk *igd = opaque; + VFIOPCIDevice *vdev = igd->vdev; + uint64_t val = data; + int gen = igd_gen(vdev); + + /* + * Programming the GGMS starts at index 0x1 and uses every 4th index (ie. + * 0x1, 0x5, 0x9, 0xd,...). For pre-Gen8 each 4-byte write is a whole PTE + * entry, with 0th bit enable set. For Gen8 and up, PTEs are 64bit, so + * entries 0x5 & 0xd are the high dword, in our case zero. Each PTE points + * to a 4k page, which we translate to a page from the VM allocated region, + * pointed to by the BDSM register. If this is not set, we fail. + * + * We trap writes to the full configured GTT size, but we typically only + * see the vBIOS writing up to (nearly) the 1MB barrier. In fact it often + * seems to miss the last entry for an even 1MB GTT. Doing a gratuitous + * write of that last entry does work, but is hopefully unnecessary since + * we clear the previous GTT on initialization. + */ + if ((igd->index % 4 == 1) && igd->index < vfio_igd_gtt_max(vdev)) { + if (gen < 8 || (igd->index % 8 == 1)) { + uint32_t base; + + base = pci_get_long(vdev->pdev.config + IGD_BDSM); + if (!base) { + hw_error("vfio-igd: Guest attempted to program IGD GTT before " + "BIOS reserved stolen memory. Unsupported BIOS?"); + } + + val = data - igd->bdsm + base; + } else { + val = 0; /* upper 32bits of pte, we only enable below 4G PTEs */ + } + + trace_vfio_pci_igd_bar4_write(vdev->vbasedev.name, + igd->index, data, val); + } + + vfio_region_write(&vdev->bars[4].region, addr + 4, val, size); + + igd->index = ~0; +} + +static const MemoryRegionOps vfio_igd_data_quirk = { + .read = vfio_igd_quirk_data_read, + .write = vfio_igd_quirk_data_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t vfio_igd_quirk_index_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIOIGDQuirk *igd = opaque; + VFIOPCIDevice *vdev = igd->vdev; + + igd->index = ~0; + + return vfio_region_read(&vdev->bars[4].region, addr, size); +} + +static void vfio_igd_quirk_index_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIOIGDQuirk *igd = opaque; + VFIOPCIDevice *vdev = igd->vdev; + + igd->index = data; + + vfio_region_write(&vdev->bars[4].region, addr, data, size); +} + +static const MemoryRegionOps vfio_igd_index_quirk = { + .read = vfio_igd_quirk_index_read, + .write = vfio_igd_quirk_index_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr) +{ + struct vfio_region_info *rom = NULL, *opregion = NULL, + *host = NULL, *lpc = NULL; + VFIOQuirk *quirk; + VFIOIGDQuirk *igd; + PCIDevice *lpc_bridge; + int i, ret, ggms_mb, gms_mb = 0, gen; + uint64_t *bdsm_size; + uint32_t gmch; + uint16_t cmd_orig, cmd; + Error *err = NULL; + + /* + * This must be an Intel VGA device at address 00:02.0 for us to even + * consider enabling legacy mode. The vBIOS has dependencies on the + * PCI bus address. + */ + if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) || + !vfio_is_vga(vdev) || nr != 4 || + &vdev->pdev != pci_find_device(pci_device_root_bus(&vdev->pdev), + 0, PCI_DEVFN(0x2, 0))) { + return; + } + + /* + * We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we + * can stuff host values into, so if there's already one there and it's not + * one we can hack on, legacy mode is no-go. Sorry Q35. + */ + lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev), + 0, PCI_DEVFN(0x1f, 0)); + if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge), + "vfio-pci-igd-lpc-bridge")) { + error_report("IGD device %s cannot support legacy mode due to existing " + "devices at address 1f.0", vdev->vbasedev.name); + return; + } + + /* + * IGD is not a standard, they like to change their specs often. We + * only attempt to support back to SandBridge and we hope that newer + * devices maintain compatibility with generation 8. + */ + gen = igd_gen(vdev); + if (gen != 6 && gen != 8) { + error_report("IGD device %s is unsupported in legacy mode, " + "try SandyBridge or newer", vdev->vbasedev.name); + return; + } + + /* + * Most of what we're doing here is to enable the ROM to run, so if + * there's no ROM, there's no point in setting up this quirk. + * NB. We only seem to get BIOS ROMs, so a UEFI VM would need CSM support. + */ + ret = vfio_get_region_info(&vdev->vbasedev, + VFIO_PCI_ROM_REGION_INDEX, &rom); + if ((ret || !rom->size) && !vdev->pdev.romfile) { + error_report("IGD device %s has no ROM, legacy mode disabled", + vdev->vbasedev.name); + goto out; + } + + /* + * Ignore the hotplug corner case, mark the ROM failed, we can't + * create the devices we need for legacy mode in the hotplug scenario. + */ + if (vdev->pdev.qdev.hotplugged) { + error_report("IGD device %s hotplugged, ROM disabled, " + "legacy mode disabled", vdev->vbasedev.name); + vdev->rom_read_failed = true; + goto out; + } + + /* + * Check whether we have all the vfio device specific regions to + * support legacy mode (added in Linux v4.6). If not, bail. + */ + ret = vfio_get_dev_region_info(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, + VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion); + if (ret) { + error_report("IGD device %s does not support OpRegion access," + "legacy mode disabled", vdev->vbasedev.name); + goto out; + } + + ret = vfio_get_dev_region_info(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, + VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &host); + if (ret) { + error_report("IGD device %s does not support host bridge access," + "legacy mode disabled", vdev->vbasedev.name); + goto out; + } + + ret = vfio_get_dev_region_info(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, + VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &lpc); + if (ret) { + error_report("IGD device %s does not support LPC bridge access," + "legacy mode disabled", vdev->vbasedev.name); + goto out; + } + + gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4); + + /* + * If IGD VGA Disable is clear (expected) and VGA is not already enabled, + * try to enable it. Probably shouldn't be using legacy mode without VGA, + * but also no point in us enabling VGA if disabled in hardware. + */ + if (!(gmch & 0x2) && !vdev->vga && vfio_populate_vga(vdev, &err)) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + error_report("IGD device %s failed to enable VGA access, " + "legacy mode disabled", vdev->vbasedev.name); + goto out; + } + + /* Create our LPC/ISA bridge */ + ret = vfio_pci_igd_lpc_init(vdev, lpc); + if (ret) { + error_report("IGD device %s failed to create LPC bridge, " + "legacy mode disabled", vdev->vbasedev.name); + goto out; + } + + /* Stuff some host values into the VM PCI host bridge */ + ret = vfio_pci_igd_host_init(vdev, host); + if (ret) { + error_report("IGD device %s failed to modify host bridge, " + "legacy mode disabled", vdev->vbasedev.name); + goto out; + } + + /* Setup OpRegion access */ + ret = vfio_pci_igd_opregion_init(vdev, opregion, &err); + if (ret) { + error_append_hint(&err, "IGD legacy mode disabled\n"); + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + goto out; + } + + /* Setup our quirk to munge GTT addresses to the VM allocated buffer */ + quirk = vfio_quirk_alloc(2); + igd = quirk->data = g_malloc0(sizeof(*igd)); + igd->vdev = vdev; + igd->index = ~0; + igd->bdsm = vfio_pci_read_config(&vdev->pdev, IGD_BDSM, 4); + igd->bdsm &= ~((1 * MiB) - 1); /* 1MB aligned */ + + memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_igd_index_quirk, + igd, "vfio-igd-index-quirk", 4); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + 0, &quirk->mem[0], 1); + + memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_igd_data_quirk, + igd, "vfio-igd-data-quirk", 4); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + 4, &quirk->mem[1], 1); + + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); + + /* Determine the size of stolen memory needed for GTT */ + ggms_mb = (gmch >> (gen < 8 ? 8 : 6)) & 0x3; + if (gen > 6) { + ggms_mb = 1 << ggms_mb; + } + + /* + * Assume we have no GMS memory, but allow it to be overridden by device + * option (experimental). The spec doesn't actually allow zero GMS when + * when IVD (IGD VGA Disable) is clear, but the claim is that it's unused, + * so let's not waste VM memory for it. + */ + gmch &= ~((gen < 8 ? 0x1f : 0xff) << (gen < 8 ? 3 : 8)); + + if (vdev->igd_gms) { + if (vdev->igd_gms <= 0x10) { + gms_mb = vdev->igd_gms * 32; + gmch |= vdev->igd_gms << (gen < 8 ? 3 : 8); + } else { + error_report("Unsupported IGD GMS value 0x%x", vdev->igd_gms); + vdev->igd_gms = 0; + } + } + + /* + * Request reserved memory for stolen memory via fw_cfg. VM firmware + * must allocate a 1MB aligned reserved memory region below 4GB with + * the requested size (in bytes) for use by the Intel PCI class VGA + * device at VM address 00:02.0. The base address of this reserved + * memory region must be written to the device BDSM register at PCI + * config offset 0x5C. + */ + bdsm_size = g_malloc(sizeof(*bdsm_size)); + *bdsm_size = cpu_to_le64((ggms_mb + gms_mb) * MiB); + fw_cfg_add_file(fw_cfg_find(), "etc/igd-bdsm-size", + bdsm_size, sizeof(*bdsm_size)); + + /* GMCH is read-only, emulated */ + pci_set_long(vdev->pdev.config + IGD_GMCH, gmch); + pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0); + pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0); + + /* BDSM is read-write, emulated. The BIOS needs to be able to write it */ + pci_set_long(vdev->pdev.config + IGD_BDSM, 0); + pci_set_long(vdev->pdev.wmask + IGD_BDSM, ~0); + pci_set_long(vdev->emulated_config_bits + IGD_BDSM, ~0); + + /* + * This IOBAR gives us access to GTTADR, which allows us to write to + * the GTT itself. So let's go ahead and write zero to all the GTT + * entries to avoid spurious DMA faults. Be sure I/O access is enabled + * before talking to the device. + */ + if (pread(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig), + vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) { + error_report("IGD device %s - failed to read PCI command register", + vdev->vbasedev.name); + } + + cmd = cmd_orig | PCI_COMMAND_IO; + + if (pwrite(vdev->vbasedev.fd, &cmd, sizeof(cmd), + vdev->config_offset + PCI_COMMAND) != sizeof(cmd)) { + error_report("IGD device %s - failed to write PCI command register", + vdev->vbasedev.name); + } + + for (i = 1; i < vfio_igd_gtt_max(vdev); i += 4) { + vfio_region_write(&vdev->bars[4].region, 0, i, 4); + vfio_region_write(&vdev->bars[4].region, 4, 0, 4); + } + + if (pwrite(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig), + vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) { + error_report("IGD device %s - failed to restore PCI command register", + vdev->vbasedev.name); + } + + trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, ggms_mb + gms_mb); + +out: + g_free(rom); + g_free(opregion); + g_free(host); + g_free(lpc); +} diff --git a/hw/vfio/meson.build b/hw/vfio/meson.build new file mode 100644 index 000000000..da9af297a --- /dev/null +++ b/hw/vfio/meson.build @@ -0,0 +1,19 @@ +vfio_ss = ss.source_set() +vfio_ss.add(files( + 'common.c', + 'spapr.c', + 'migration.c', +)) +vfio_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files( + 'display.c', + 'pci-quirks.c', + 'pci.c', +)) +vfio_ss.add(when: 'CONFIG_VFIO_CCW', if_true: files('ccw.c')) +vfio_ss.add(when: 'CONFIG_VFIO_PLATFORM', if_true: files('platform.c')) +vfio_ss.add(when: 'CONFIG_VFIO_XGMAC', if_true: files('calxeda-xgmac.c')) +vfio_ss.add(when: 'CONFIG_VFIO_AMD_XGBE', if_true: files('amd-xgbe.c')) +vfio_ss.add(when: 'CONFIG_VFIO_AP', if_true: files('ap.c')) +vfio_ss.add(when: 'CONFIG_VFIO_IGD', if_true: files('igd.c')) + +specific_ss.add_all(when: 'CONFIG_VFIO', if_true: vfio_ss) diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c new file mode 100644 index 000000000..ff6b45de6 --- /dev/null +++ b/hw/vfio/migration.c @@ -0,0 +1,911 @@ +/* + * Migration support for VFIO devices + * + * Copyright NVIDIA, Inc. 2020 + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/cutils.h" +#include +#include + +#include "sysemu/runstate.h" +#include "hw/vfio/vfio-common.h" +#include "migration/migration.h" +#include "migration/vmstate.h" +#include "migration/qemu-file.h" +#include "migration/register.h" +#include "migration/blocker.h" +#include "migration/misc.h" +#include "qapi/error.h" +#include "exec/ramlist.h" +#include "exec/ram_addr.h" +#include "pci.h" +#include "trace.h" +#include "hw/hw.h" + +/* + * Flags to be used as unique delimiters for VFIO devices in the migration + * stream. These flags are composed as: + * 0xffffffff => MSB 32-bit all 1s + * 0xef10 => Magic ID, represents emulated (virtual) function IO + * 0x0000 => 16-bits reserved for flags + * + * The beginning of state information is marked by _DEV_CONFIG_STATE, + * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a + * certain state information is marked by _END_OF_STATE. + */ +#define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL) +#define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL) +#define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL) +#define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL) + +static int64_t bytes_transferred; + +static inline int vfio_mig_access(VFIODevice *vbasedev, void *val, int count, + off_t off, bool iswrite) +{ + int ret; + + ret = iswrite ? pwrite(vbasedev->fd, val, count, off) : + pread(vbasedev->fd, val, count, off); + if (ret < count) { + error_report("vfio_mig_%s %d byte %s: failed at offset 0x%" + HWADDR_PRIx", err: %s", iswrite ? "write" : "read", count, + vbasedev->name, off, strerror(errno)); + return (ret < 0) ? ret : -EINVAL; + } + return 0; +} + +static int vfio_mig_rw(VFIODevice *vbasedev, __u8 *buf, size_t count, + off_t off, bool iswrite) +{ + int ret, done = 0; + __u8 *tbuf = buf; + + while (count) { + int bytes = 0; + + if (count >= 8 && !(off % 8)) { + bytes = 8; + } else if (count >= 4 && !(off % 4)) { + bytes = 4; + } else if (count >= 2 && !(off % 2)) { + bytes = 2; + } else { + bytes = 1; + } + + ret = vfio_mig_access(vbasedev, tbuf, bytes, off, iswrite); + if (ret) { + return ret; + } + + count -= bytes; + done += bytes; + off += bytes; + tbuf += bytes; + } + return done; +} + +#define vfio_mig_read(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, false) +#define vfio_mig_write(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, true) + +#define VFIO_MIG_STRUCT_OFFSET(f) \ + offsetof(struct vfio_device_migration_info, f) +/* + * Change the device_state register for device @vbasedev. Bits set in @mask + * are preserved, bits set in @value are set, and bits not set in either @mask + * or @value are cleared in device_state. If the register cannot be accessed, + * the resulting state would be invalid, or the device enters an error state, + * an error is returned. + */ + +static int vfio_migration_set_state(VFIODevice *vbasedev, uint32_t mask, + uint32_t value) +{ + VFIOMigration *migration = vbasedev->migration; + VFIORegion *region = &migration->region; + off_t dev_state_off = region->fd_offset + + VFIO_MIG_STRUCT_OFFSET(device_state); + uint32_t device_state; + int ret; + + ret = vfio_mig_read(vbasedev, &device_state, sizeof(device_state), + dev_state_off); + if (ret < 0) { + return ret; + } + + device_state = (device_state & mask) | value; + + if (!VFIO_DEVICE_STATE_VALID(device_state)) { + return -EINVAL; + } + + ret = vfio_mig_write(vbasedev, &device_state, sizeof(device_state), + dev_state_off); + if (ret < 0) { + int rret; + + rret = vfio_mig_read(vbasedev, &device_state, sizeof(device_state), + dev_state_off); + + if ((rret < 0) || (VFIO_DEVICE_STATE_IS_ERROR(device_state))) { + hw_error("%s: Device in error state 0x%x", vbasedev->name, + device_state); + return rret ? rret : -EIO; + } + return ret; + } + + migration->device_state = device_state; + trace_vfio_migration_set_state(vbasedev->name, device_state); + return 0; +} + +static void *get_data_section_size(VFIORegion *region, uint64_t data_offset, + uint64_t data_size, uint64_t *size) +{ + void *ptr = NULL; + uint64_t limit = 0; + int i; + + if (!region->mmaps) { + if (size) { + *size = MIN(data_size, region->size - data_offset); + } + return ptr; + } + + for (i = 0; i < region->nr_mmaps; i++) { + VFIOMmap *map = region->mmaps + i; + + if ((data_offset >= map->offset) && + (data_offset < map->offset + map->size)) { + + /* check if data_offset is within sparse mmap areas */ + ptr = map->mmap + data_offset - map->offset; + if (size) { + *size = MIN(data_size, map->offset + map->size - data_offset); + } + break; + } else if ((data_offset < map->offset) && + (!limit || limit > map->offset)) { + /* + * data_offset is not within sparse mmap areas, find size of + * non-mapped area. Check through all list since region->mmaps list + * is not sorted. + */ + limit = map->offset; + } + } + + if (!ptr && size) { + *size = limit ? MIN(data_size, limit - data_offset) : data_size; + } + return ptr; +} + +static int vfio_save_buffer(QEMUFile *f, VFIODevice *vbasedev, uint64_t *size) +{ + VFIOMigration *migration = vbasedev->migration; + VFIORegion *region = &migration->region; + uint64_t data_offset = 0, data_size = 0, sz; + int ret; + + ret = vfio_mig_read(vbasedev, &data_offset, sizeof(data_offset), + region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_offset)); + if (ret < 0) { + return ret; + } + + ret = vfio_mig_read(vbasedev, &data_size, sizeof(data_size), + region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_size)); + if (ret < 0) { + return ret; + } + + trace_vfio_save_buffer(vbasedev->name, data_offset, data_size, + migration->pending_bytes); + + qemu_put_be64(f, data_size); + sz = data_size; + + while (sz) { + void *buf; + uint64_t sec_size; + bool buf_allocated = false; + + buf = get_data_section_size(region, data_offset, sz, &sec_size); + + if (!buf) { + buf = g_try_malloc(sec_size); + if (!buf) { + error_report("%s: Error allocating buffer ", __func__); + return -ENOMEM; + } + buf_allocated = true; + + ret = vfio_mig_read(vbasedev, buf, sec_size, + region->fd_offset + data_offset); + if (ret < 0) { + g_free(buf); + return ret; + } + } + + qemu_put_buffer(f, buf, sec_size); + + if (buf_allocated) { + g_free(buf); + } + sz -= sec_size; + data_offset += sec_size; + } + + ret = qemu_file_get_error(f); + + if (!ret && size) { + *size = data_size; + } + + bytes_transferred += data_size; + return ret; +} + +static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev, + uint64_t data_size) +{ + VFIORegion *region = &vbasedev->migration->region; + uint64_t data_offset = 0, size, report_size; + int ret; + + do { + ret = vfio_mig_read(vbasedev, &data_offset, sizeof(data_offset), + region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_offset)); + if (ret < 0) { + return ret; + } + + if (data_offset + data_size > region->size) { + /* + * If data_size is greater than the data section of migration region + * then iterate the write buffer operation. This case can occur if + * size of migration region at destination is smaller than size of + * migration region at source. + */ + report_size = size = region->size - data_offset; + data_size -= size; + } else { + report_size = size = data_size; + data_size = 0; + } + + trace_vfio_load_state_device_data(vbasedev->name, data_offset, size); + + while (size) { + void *buf; + uint64_t sec_size; + bool buf_alloc = false; + + buf = get_data_section_size(region, data_offset, size, &sec_size); + + if (!buf) { + buf = g_try_malloc(sec_size); + if (!buf) { + error_report("%s: Error allocating buffer ", __func__); + return -ENOMEM; + } + buf_alloc = true; + } + + qemu_get_buffer(f, buf, sec_size); + + if (buf_alloc) { + ret = vfio_mig_write(vbasedev, buf, sec_size, + region->fd_offset + data_offset); + g_free(buf); + + if (ret < 0) { + return ret; + } + } + size -= sec_size; + data_offset += sec_size; + } + + ret = vfio_mig_write(vbasedev, &report_size, sizeof(report_size), + region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_size)); + if (ret < 0) { + return ret; + } + } while (data_size); + + return 0; +} + +static int vfio_update_pending(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + VFIORegion *region = &migration->region; + uint64_t pending_bytes = 0; + int ret; + + ret = vfio_mig_read(vbasedev, &pending_bytes, sizeof(pending_bytes), + region->fd_offset + VFIO_MIG_STRUCT_OFFSET(pending_bytes)); + if (ret < 0) { + migration->pending_bytes = 0; + return ret; + } + + migration->pending_bytes = pending_bytes; + trace_vfio_update_pending(vbasedev->name, pending_bytes); + return 0; +} + +static int vfio_save_device_config_state(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_CONFIG_STATE); + + if (vbasedev->ops && vbasedev->ops->vfio_save_config) { + vbasedev->ops->vfio_save_config(vbasedev, f); + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + trace_vfio_save_device_config_state(vbasedev->name); + + return qemu_file_get_error(f); +} + +static int vfio_load_device_config_state(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + uint64_t data; + + if (vbasedev->ops && vbasedev->ops->vfio_load_config) { + int ret; + + ret = vbasedev->ops->vfio_load_config(vbasedev, f); + if (ret) { + error_report("%s: Failed to load device config space", + vbasedev->name); + return ret; + } + } + + data = qemu_get_be64(f); + if (data != VFIO_MIG_FLAG_END_OF_STATE) { + error_report("%s: Failed loading device config space, " + "end flag incorrect 0x%"PRIx64, vbasedev->name, data); + return -EINVAL; + } + + trace_vfio_load_device_config_state(vbasedev->name); + return qemu_file_get_error(f); +} + +static void vfio_migration_cleanup(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + + if (migration->region.mmaps) { + vfio_region_unmap(&migration->region); + } +} + +/* ---------------------------------------------------------------------- */ + +static int vfio_save_setup(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + int ret; + + trace_vfio_save_setup(vbasedev->name); + + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_SETUP_STATE); + + if (migration->region.mmaps) { + /* + * Calling vfio_region_mmap() from migration thread. Memory API called + * from this function require locking the iothread when called from + * outside the main loop thread. + */ + qemu_mutex_lock_iothread(); + ret = vfio_region_mmap(&migration->region); + qemu_mutex_unlock_iothread(); + if (ret) { + error_report("%s: Failed to mmap VFIO migration region: %s", + vbasedev->name, strerror(-ret)); + error_report("%s: Falling back to slow path", vbasedev->name); + } + } + + ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_MASK, + VFIO_DEVICE_STATE_SAVING); + if (ret) { + error_report("%s: Failed to set state SAVING", vbasedev->name); + return ret; + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + + return 0; +} + +static void vfio_save_cleanup(void *opaque) +{ + VFIODevice *vbasedev = opaque; + + vfio_migration_cleanup(vbasedev); + trace_vfio_save_cleanup(vbasedev->name); +} + +static void vfio_save_pending(QEMUFile *f, void *opaque, + uint64_t threshold_size, + uint64_t *res_precopy_only, + uint64_t *res_compatible, + uint64_t *res_postcopy_only) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + int ret; + + ret = vfio_update_pending(vbasedev); + if (ret) { + return; + } + + *res_precopy_only += migration->pending_bytes; + + trace_vfio_save_pending(vbasedev->name, *res_precopy_only, + *res_postcopy_only, *res_compatible); +} + +static int vfio_save_iterate(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + uint64_t data_size; + int ret; + + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); + + if (migration->pending_bytes == 0) { + ret = vfio_update_pending(vbasedev); + if (ret) { + return ret; + } + + if (migration->pending_bytes == 0) { + qemu_put_be64(f, 0); + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + /* indicates data finished, goto complete phase */ + return 1; + } + } + + ret = vfio_save_buffer(f, vbasedev, &data_size); + if (ret) { + error_report("%s: vfio_save_buffer failed %s", vbasedev->name, + strerror(errno)); + return ret; + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + + /* + * Reset pending_bytes as .save_live_pending is not called during savevm or + * snapshot case, in such case vfio_update_pending() at the start of this + * function updates pending_bytes. + */ + migration->pending_bytes = 0; + trace_vfio_save_iterate(vbasedev->name, data_size); + return 0; +} + +static int vfio_save_complete_precopy(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + uint64_t data_size; + int ret; + + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_RUNNING, + VFIO_DEVICE_STATE_SAVING); + if (ret) { + error_report("%s: Failed to set state STOP and SAVING", + vbasedev->name); + return ret; + } + + ret = vfio_update_pending(vbasedev); + if (ret) { + return ret; + } + + while (migration->pending_bytes > 0) { + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); + ret = vfio_save_buffer(f, vbasedev, &data_size); + if (ret < 0) { + error_report("%s: Failed to save buffer", vbasedev->name); + return ret; + } + + if (data_size == 0) { + break; + } + + ret = vfio_update_pending(vbasedev); + if (ret) { + return ret; + } + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_SAVING, 0); + if (ret) { + error_report("%s: Failed to set state STOPPED", vbasedev->name); + return ret; + } + + trace_vfio_save_complete_precopy(vbasedev->name); + return ret; +} + +static void vfio_save_state(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + int ret; + + ret = vfio_save_device_config_state(f, opaque); + if (ret) { + error_report("%s: Failed to save device config space", + vbasedev->name); + qemu_file_set_error(f, ret); + } +} + +static int vfio_load_setup(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + int ret = 0; + + if (migration->region.mmaps) { + ret = vfio_region_mmap(&migration->region); + if (ret) { + error_report("%s: Failed to mmap VFIO migration region %d: %s", + vbasedev->name, migration->region.nr, + strerror(-ret)); + error_report("%s: Falling back to slow path", vbasedev->name); + } + } + + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_MASK, + VFIO_DEVICE_STATE_RESUMING); + if (ret) { + error_report("%s: Failed to set state RESUMING", vbasedev->name); + if (migration->region.mmaps) { + vfio_region_unmap(&migration->region); + } + } + return ret; +} + +static int vfio_load_cleanup(void *opaque) +{ + VFIODevice *vbasedev = opaque; + + vfio_migration_cleanup(vbasedev); + trace_vfio_load_cleanup(vbasedev->name); + return 0; +} + +static int vfio_load_state(QEMUFile *f, void *opaque, int version_id) +{ + VFIODevice *vbasedev = opaque; + int ret = 0; + uint64_t data; + + data = qemu_get_be64(f); + while (data != VFIO_MIG_FLAG_END_OF_STATE) { + + trace_vfio_load_state(vbasedev->name, data); + + switch (data) { + case VFIO_MIG_FLAG_DEV_CONFIG_STATE: + { + return vfio_load_device_config_state(f, opaque); + } + case VFIO_MIG_FLAG_DEV_SETUP_STATE: + { + data = qemu_get_be64(f); + if (data == VFIO_MIG_FLAG_END_OF_STATE) { + return ret; + } else { + error_report("%s: SETUP STATE: EOS not found 0x%"PRIx64, + vbasedev->name, data); + return -EINVAL; + } + break; + } + case VFIO_MIG_FLAG_DEV_DATA_STATE: + { + uint64_t data_size = qemu_get_be64(f); + + if (data_size) { + ret = vfio_load_buffer(f, vbasedev, data_size); + if (ret < 0) { + return ret; + } + } + break; + } + default: + error_report("%s: Unknown tag 0x%"PRIx64, vbasedev->name, data); + return -EINVAL; + } + + data = qemu_get_be64(f); + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + } + return ret; +} + +static SaveVMHandlers savevm_vfio_handlers = { + .save_setup = vfio_save_setup, + .save_cleanup = vfio_save_cleanup, + .save_live_pending = vfio_save_pending, + .save_live_iterate = vfio_save_iterate, + .save_live_complete_precopy = vfio_save_complete_precopy, + .save_state = vfio_save_state, + .load_setup = vfio_load_setup, + .load_cleanup = vfio_load_cleanup, + .load_state = vfio_load_state, +}; + +/* ---------------------------------------------------------------------- */ + +static void vfio_vmstate_change(void *opaque, bool running, RunState state) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + uint32_t value, mask; + int ret; + + if (vbasedev->migration->vm_running == running) { + return; + } + + if (running) { + /* + * Here device state can have one of _SAVING, _RESUMING or _STOP bit. + * Transition from _SAVING to _RUNNING can happen if there is migration + * failure, in that case clear _SAVING bit. + * Transition from _RESUMING to _RUNNING occurs during resuming + * phase, in that case clear _RESUMING bit. + * In both the above cases, set _RUNNING bit. + */ + mask = ~VFIO_DEVICE_STATE_MASK; + value = VFIO_DEVICE_STATE_RUNNING; + } else { + /* + * Here device state could be either _RUNNING or _SAVING|_RUNNING. Reset + * _RUNNING bit + */ + mask = ~VFIO_DEVICE_STATE_RUNNING; + + /* + * When VM state transition to stop for savevm command, device should + * start saving data. + */ + if (state == RUN_STATE_SAVE_VM) { + value = VFIO_DEVICE_STATE_SAVING; + } else { + value = 0; + } + } + + ret = vfio_migration_set_state(vbasedev, mask, value); + if (ret) { + /* + * Migration should be aborted in this case, but vm_state_notify() + * currently does not support reporting failures. + */ + error_report("%s: Failed to set device state 0x%x", vbasedev->name, + (migration->device_state & mask) | value); + qemu_file_set_error(migrate_get_current()->to_dst_file, ret); + } + vbasedev->migration->vm_running = running; + trace_vfio_vmstate_change(vbasedev->name, running, RunState_str(state), + (migration->device_state & mask) | value); +} + +static void vfio_migration_state_notifier(Notifier *notifier, void *data) +{ + MigrationState *s = data; + VFIOMigration *migration = container_of(notifier, VFIOMigration, + migration_state); + VFIODevice *vbasedev = migration->vbasedev; + int ret; + + trace_vfio_migration_state_notifier(vbasedev->name, + MigrationStatus_str(s->state)); + + switch (s->state) { + case MIGRATION_STATUS_CANCELLING: + case MIGRATION_STATUS_CANCELLED: + case MIGRATION_STATUS_FAILED: + bytes_transferred = 0; + ret = vfio_migration_set_state(vbasedev, + ~(VFIO_DEVICE_STATE_SAVING | VFIO_DEVICE_STATE_RESUMING), + VFIO_DEVICE_STATE_RUNNING); + if (ret) { + error_report("%s: Failed to set state RUNNING", vbasedev->name); + } + } +} + +static void vfio_migration_exit(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + + vfio_region_exit(&migration->region); + vfio_region_finalize(&migration->region); + g_free(vbasedev->migration); + vbasedev->migration = NULL; +} + +static int vfio_migration_init(VFIODevice *vbasedev, + struct vfio_region_info *info) +{ + int ret; + Object *obj; + VFIOMigration *migration; + char id[256] = ""; + g_autofree char *path = NULL, *oid = NULL; + + if (!vbasedev->ops->vfio_get_object) { + return -EINVAL; + } + + obj = vbasedev->ops->vfio_get_object(vbasedev); + if (!obj) { + return -EINVAL; + } + + vbasedev->migration = g_new0(VFIOMigration, 1); + + ret = vfio_region_setup(obj, vbasedev, &vbasedev->migration->region, + info->index, "migration"); + if (ret) { + error_report("%s: Failed to setup VFIO migration region %d: %s", + vbasedev->name, info->index, strerror(-ret)); + goto err; + } + + if (!vbasedev->migration->region.size) { + error_report("%s: Invalid zero-sized VFIO migration region %d", + vbasedev->name, info->index); + ret = -EINVAL; + goto err; + } + + migration = vbasedev->migration; + migration->vbasedev = vbasedev; + + oid = vmstate_if_get_id(VMSTATE_IF(DEVICE(obj))); + if (oid) { + path = g_strdup_printf("%s/vfio", oid); + } else { + path = g_strdup("vfio"); + } + strpadcpy(id, sizeof(id), path, '\0'); + + register_savevm_live(id, VMSTATE_INSTANCE_ID_ANY, 1, &savevm_vfio_handlers, + vbasedev); + + migration->vm_state = qdev_add_vm_change_state_handler(vbasedev->dev, + vfio_vmstate_change, + vbasedev); + migration->migration_state.notify = vfio_migration_state_notifier; + add_migration_state_change_notifier(&migration->migration_state); + return 0; + +err: + vfio_migration_exit(vbasedev); + return ret; +} + +/* ---------------------------------------------------------------------- */ + +int64_t vfio_mig_bytes_transferred(void) +{ + return bytes_transferred; +} + +int vfio_migration_probe(VFIODevice *vbasedev, Error **errp) +{ + VFIOContainer *container = vbasedev->group->container; + struct vfio_region_info *info = NULL; + int ret = -ENOTSUP; + + if (!vbasedev->enable_migration || !container->dirty_pages_supported) { + goto add_blocker; + } + + ret = vfio_get_dev_region_info(vbasedev, VFIO_REGION_TYPE_MIGRATION, + VFIO_REGION_SUBTYPE_MIGRATION, &info); + if (ret) { + goto add_blocker; + } + + ret = vfio_migration_init(vbasedev, info); + if (ret) { + goto add_blocker; + } + + trace_vfio_migration_probe(vbasedev->name, info->index); + g_free(info); + return 0; + +add_blocker: + error_setg(&vbasedev->migration_blocker, + "VFIO device doesn't support migration"); + g_free(info); + + ret = migrate_add_blocker(vbasedev->migration_blocker, errp); + if (ret < 0) { + error_free(vbasedev->migration_blocker); + vbasedev->migration_blocker = NULL; + } + return ret; +} + +void vfio_migration_finalize(VFIODevice *vbasedev) +{ + if (vbasedev->migration) { + VFIOMigration *migration = vbasedev->migration; + + remove_migration_state_change_notifier(&migration->migration_state); + qemu_del_vm_change_state_handler(migration->vm_state); + unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev); + vfio_migration_exit(vbasedev); + } + + if (vbasedev->migration_blocker) { + migrate_del_blocker(vbasedev->migration_blocker); + error_free(vbasedev->migration_blocker); + vbasedev->migration_blocker = NULL; + } +} diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c new file mode 100644 index 000000000..0cf69a8c6 --- /dev/null +++ b/hw/vfio/pci-quirks.c @@ -0,0 +1,1769 @@ +/* + * device quirks for PCI devices + * + * Copyright Red Hat, Inc. 2012-2015 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include CONFIG_DEVICES +#include "exec/memop.h" +#include "qemu/units.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/range.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include +#include "hw/nvram/fw_cfg.h" +#include "hw/qdev-properties.h" +#include "pci.h" +#include "trace.h" + +/* + * List of device ids/vendor ids for which to disable + * option rom loading. This avoids the guest hangs during rom + * execution as noticed with the BCM 57810 card for lack of a + * more better way to handle such issues. + * The user can still override by specifying a romfile or + * rombar=1. + * Please see https://bugs.launchpad.net/qemu/+bug/1284874 + * for an analysis of the 57810 card hang. When adding + * a new vendor id/device id combination below, please also add + * your card/environment details and information that could + * help in debugging to the bug tracking this issue + */ +static const struct { + uint32_t vendor; + uint32_t device; +} rom_denylist[] = { + { 0x14e4, 0x168e }, /* Broadcom BCM 57810 */ +}; + +bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev) +{ + int i; + + for (i = 0 ; i < ARRAY_SIZE(rom_denylist); i++) { + if (vfio_pci_is(vdev, rom_denylist[i].vendor, rom_denylist[i].device)) { + trace_vfio_quirk_rom_in_denylist(vdev->vbasedev.name, + rom_denylist[i].vendor, + rom_denylist[i].device); + return true; + } + } + return false; +} + +/* + * Device specific region quirks (mostly backdoors to PCI config space) + */ + +/* + * The generic window quirks operate on an address and data register, + * vfio_generic_window_address_quirk handles the address register and + * vfio_generic_window_data_quirk handles the data register. These ops + * pass reads and writes through to hardware until a value matching the + * stored address match/mask is written. When this occurs, the data + * register access emulated PCI config space for the device rather than + * passing through accesses. This enables devices where PCI config space + * is accessible behind a window register to maintain the virtualization + * provided through vfio. + */ +typedef struct VFIOConfigWindowMatch { + uint32_t match; + uint32_t mask; +} VFIOConfigWindowMatch; + +typedef struct VFIOConfigWindowQuirk { + struct VFIOPCIDevice *vdev; + + uint32_t address_val; + + uint32_t address_offset; + uint32_t data_offset; + + bool window_enabled; + uint8_t bar; + + MemoryRegion *addr_mem; + MemoryRegion *data_mem; + + uint32_t nr_matches; + VFIOConfigWindowMatch matches[]; +} VFIOConfigWindowQuirk; + +static uint64_t vfio_generic_window_quirk_address_read(void *opaque, + hwaddr addr, + unsigned size) +{ + VFIOConfigWindowQuirk *window = opaque; + VFIOPCIDevice *vdev = window->vdev; + + return vfio_region_read(&vdev->bars[window->bar].region, + addr + window->address_offset, size); +} + +static void vfio_generic_window_quirk_address_write(void *opaque, hwaddr addr, + uint64_t data, + unsigned size) +{ + VFIOConfigWindowQuirk *window = opaque; + VFIOPCIDevice *vdev = window->vdev; + int i; + + window->window_enabled = false; + + vfio_region_write(&vdev->bars[window->bar].region, + addr + window->address_offset, data, size); + + for (i = 0; i < window->nr_matches; i++) { + if ((data & ~window->matches[i].mask) == window->matches[i].match) { + window->window_enabled = true; + window->address_val = data & window->matches[i].mask; + trace_vfio_quirk_generic_window_address_write(vdev->vbasedev.name, + memory_region_name(window->addr_mem), data); + break; + } + } +} + +static const MemoryRegionOps vfio_generic_window_address_quirk = { + .read = vfio_generic_window_quirk_address_read, + .write = vfio_generic_window_quirk_address_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t vfio_generic_window_quirk_data_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIOConfigWindowQuirk *window = opaque; + VFIOPCIDevice *vdev = window->vdev; + uint64_t data; + + /* Always read data reg, discard if window enabled */ + data = vfio_region_read(&vdev->bars[window->bar].region, + addr + window->data_offset, size); + + if (window->window_enabled) { + data = vfio_pci_read_config(&vdev->pdev, window->address_val, size); + trace_vfio_quirk_generic_window_data_read(vdev->vbasedev.name, + memory_region_name(window->data_mem), data); + } + + return data; +} + +static void vfio_generic_window_quirk_data_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIOConfigWindowQuirk *window = opaque; + VFIOPCIDevice *vdev = window->vdev; + + if (window->window_enabled) { + vfio_pci_write_config(&vdev->pdev, window->address_val, data, size); + trace_vfio_quirk_generic_window_data_write(vdev->vbasedev.name, + memory_region_name(window->data_mem), data); + return; + } + + vfio_region_write(&vdev->bars[window->bar].region, + addr + window->data_offset, data, size); +} + +static const MemoryRegionOps vfio_generic_window_data_quirk = { + .read = vfio_generic_window_quirk_data_read, + .write = vfio_generic_window_quirk_data_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* + * The generic mirror quirk handles devices which expose PCI config space + * through a region within a BAR. When enabled, reads and writes are + * redirected through to emulated PCI config space. XXX if PCI config space + * used memory regions, this could just be an alias. + */ +typedef struct VFIOConfigMirrorQuirk { + struct VFIOPCIDevice *vdev; + uint32_t offset; + uint8_t bar; + MemoryRegion *mem; + uint8_t data[]; +} VFIOConfigMirrorQuirk; + +static uint64_t vfio_generic_quirk_mirror_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIOConfigMirrorQuirk *mirror = opaque; + VFIOPCIDevice *vdev = mirror->vdev; + uint64_t data; + + /* Read and discard in case the hardware cares */ + (void)vfio_region_read(&vdev->bars[mirror->bar].region, + addr + mirror->offset, size); + + data = vfio_pci_read_config(&vdev->pdev, addr, size); + trace_vfio_quirk_generic_mirror_read(vdev->vbasedev.name, + memory_region_name(mirror->mem), + addr, data); + return data; +} + +static void vfio_generic_quirk_mirror_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIOConfigMirrorQuirk *mirror = opaque; + VFIOPCIDevice *vdev = mirror->vdev; + + vfio_pci_write_config(&vdev->pdev, addr, data, size); + trace_vfio_quirk_generic_mirror_write(vdev->vbasedev.name, + memory_region_name(mirror->mem), + addr, data); +} + +static const MemoryRegionOps vfio_generic_mirror_quirk = { + .read = vfio_generic_quirk_mirror_read, + .write = vfio_generic_quirk_mirror_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* Is range1 fully contained within range2? */ +static bool vfio_range_contained(uint64_t first1, uint64_t len1, + uint64_t first2, uint64_t len2) { + return (first1 >= first2 && first1 + len1 <= first2 + len2); +} + +#define PCI_VENDOR_ID_ATI 0x1002 + +/* + * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR + * through VGA register 0x3c3. On newer cards, the I/O port BAR is always + * BAR4 (older cards like the X550 used BAR1, but we don't care to support + * those). Note that on bare metal, a read of 0x3c3 doesn't always return the + * I/O port BAR address. Originally this was coded to return the virtual BAR + * address only if the physical register read returns the actual BAR address, + * but users have reported greater success if we return the virtual address + * unconditionally. + */ +static uint64_t vfio_ati_3c3_quirk_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIOPCIDevice *vdev = opaque; + uint64_t data = vfio_pci_read_config(&vdev->pdev, + PCI_BASE_ADDRESS_4 + 1, size); + + trace_vfio_quirk_ati_3c3_read(vdev->vbasedev.name, data); + + return data; +} + +static void vfio_ati_3c3_quirk_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid access\n", __func__); +} + +static const MemoryRegionOps vfio_ati_3c3_quirk = { + .read = vfio_ati_3c3_quirk_read, + .write = vfio_ati_3c3_quirk_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +VFIOQuirk *vfio_quirk_alloc(int nr_mem) +{ + VFIOQuirk *quirk = g_new0(VFIOQuirk, 1); + QLIST_INIT(&quirk->ioeventfds); + quirk->mem = g_new0(MemoryRegion, nr_mem); + quirk->nr_mem = nr_mem; + + return quirk; +} + +static void vfio_ioeventfd_exit(VFIOPCIDevice *vdev, VFIOIOEventFD *ioeventfd) +{ + QLIST_REMOVE(ioeventfd, next); + memory_region_del_eventfd(ioeventfd->mr, ioeventfd->addr, ioeventfd->size, + true, ioeventfd->data, &ioeventfd->e); + + if (ioeventfd->vfio) { + struct vfio_device_ioeventfd vfio_ioeventfd; + + vfio_ioeventfd.argsz = sizeof(vfio_ioeventfd); + vfio_ioeventfd.flags = ioeventfd->size; + vfio_ioeventfd.data = ioeventfd->data; + vfio_ioeventfd.offset = ioeventfd->region->fd_offset + + ioeventfd->region_addr; + vfio_ioeventfd.fd = -1; + + if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd)) { + error_report("Failed to remove vfio ioeventfd for %s+0x%" + HWADDR_PRIx"[%d]:0x%"PRIx64" (%m)", + memory_region_name(ioeventfd->mr), ioeventfd->addr, + ioeventfd->size, ioeventfd->data); + } + } else { + qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e), + NULL, NULL, NULL); + } + + event_notifier_cleanup(&ioeventfd->e); + trace_vfio_ioeventfd_exit(memory_region_name(ioeventfd->mr), + (uint64_t)ioeventfd->addr, ioeventfd->size, + ioeventfd->data); + g_free(ioeventfd); +} + +static void vfio_drop_dynamic_eventfds(VFIOPCIDevice *vdev, VFIOQuirk *quirk) +{ + VFIOIOEventFD *ioeventfd, *tmp; + + QLIST_FOREACH_SAFE(ioeventfd, &quirk->ioeventfds, next, tmp) { + if (ioeventfd->dynamic) { + vfio_ioeventfd_exit(vdev, ioeventfd); + } + } +} + +static void vfio_ioeventfd_handler(void *opaque) +{ + VFIOIOEventFD *ioeventfd = opaque; + + if (event_notifier_test_and_clear(&ioeventfd->e)) { + vfio_region_write(ioeventfd->region, ioeventfd->region_addr, + ioeventfd->data, ioeventfd->size); + trace_vfio_ioeventfd_handler(memory_region_name(ioeventfd->mr), + (uint64_t)ioeventfd->addr, ioeventfd->size, + ioeventfd->data); + } +} + +static VFIOIOEventFD *vfio_ioeventfd_init(VFIOPCIDevice *vdev, + MemoryRegion *mr, hwaddr addr, + unsigned size, uint64_t data, + VFIORegion *region, + hwaddr region_addr, bool dynamic) +{ + VFIOIOEventFD *ioeventfd; + + if (vdev->no_kvm_ioeventfd) { + return NULL; + } + + ioeventfd = g_malloc0(sizeof(*ioeventfd)); + + if (event_notifier_init(&ioeventfd->e, 0)) { + g_free(ioeventfd); + return NULL; + } + + /* + * MemoryRegion and relative offset, plus additional ioeventfd setup + * parameters for configuring and later tearing down KVM ioeventfd. + */ + ioeventfd->mr = mr; + ioeventfd->addr = addr; + ioeventfd->size = size; + ioeventfd->data = data; + ioeventfd->dynamic = dynamic; + /* + * VFIORegion and relative offset for implementing the userspace + * handler. data & size fields shared for both uses. + */ + ioeventfd->region = region; + ioeventfd->region_addr = region_addr; + + if (!vdev->no_vfio_ioeventfd) { + struct vfio_device_ioeventfd vfio_ioeventfd; + + vfio_ioeventfd.argsz = sizeof(vfio_ioeventfd); + vfio_ioeventfd.flags = ioeventfd->size; + vfio_ioeventfd.data = ioeventfd->data; + vfio_ioeventfd.offset = ioeventfd->region->fd_offset + + ioeventfd->region_addr; + vfio_ioeventfd.fd = event_notifier_get_fd(&ioeventfd->e); + + ioeventfd->vfio = !ioctl(vdev->vbasedev.fd, + VFIO_DEVICE_IOEVENTFD, &vfio_ioeventfd); + } + + if (!ioeventfd->vfio) { + qemu_set_fd_handler(event_notifier_get_fd(&ioeventfd->e), + vfio_ioeventfd_handler, NULL, ioeventfd); + } + + memory_region_add_eventfd(ioeventfd->mr, ioeventfd->addr, ioeventfd->size, + true, ioeventfd->data, &ioeventfd->e); + trace_vfio_ioeventfd_init(memory_region_name(mr), (uint64_t)addr, + size, data, ioeventfd->vfio); + + return ioeventfd; +} + +static void vfio_vga_probe_ati_3c3_quirk(VFIOPCIDevice *vdev) +{ + VFIOQuirk *quirk; + + /* + * As long as the BAR is >= 256 bytes it will be aligned such that the + * lower byte is always zero. Filter out anything else, if it exists. + */ + if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) || + !vdev->bars[4].ioport || vdev->bars[4].region.size < 256) { + return; + } + + quirk = vfio_quirk_alloc(1); + + memory_region_init_io(quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, vdev, + "vfio-ati-3c3-quirk", 1); + memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem, + 3 /* offset 3 bytes from 0x3c0 */, quirk->mem); + + QLIST_INSERT_HEAD(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks, + quirk, next); + + trace_vfio_quirk_ati_3c3_probe(vdev->vbasedev.name); +} + +/* + * Newer ATI/AMD devices, including HD5450 and HD7850, have a mirror to PCI + * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access + * the MMIO space directly, but a window to this space is provided through + * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the + * data register. When the address is programmed to a range of 0x4000-0x4fff + * PCI configuration space is available. Experimentation seems to indicate + * that read-only may be provided by hardware. + */ +static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr) +{ + VFIOQuirk *quirk; + VFIOConfigWindowQuirk *window; + + /* This windows doesn't seem to be used except by legacy VGA code */ + if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) || + !vdev->vga || nr != 4) { + return; + } + + quirk = vfio_quirk_alloc(2); + window = quirk->data = g_malloc0(sizeof(*window) + + sizeof(VFIOConfigWindowMatch)); + window->vdev = vdev; + window->address_offset = 0; + window->data_offset = 4; + window->nr_matches = 1; + window->matches[0].match = 0x4000; + window->matches[0].mask = vdev->config_size - 1; + window->bar = nr; + window->addr_mem = &quirk->mem[0]; + window->data_mem = &quirk->mem[1]; + + memory_region_init_io(window->addr_mem, OBJECT(vdev), + &vfio_generic_window_address_quirk, window, + "vfio-ati-bar4-window-address-quirk", 4); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + window->address_offset, + window->addr_mem, 1); + + memory_region_init_io(window->data_mem, OBJECT(vdev), + &vfio_generic_window_data_quirk, window, + "vfio-ati-bar4-window-data-quirk", 4); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + window->data_offset, + window->data_mem, 1); + + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); + + trace_vfio_quirk_ati_bar4_probe(vdev->vbasedev.name); +} + +/* + * Trap the BAR2 MMIO mirror to config space as well. + */ +static void vfio_probe_ati_bar2_quirk(VFIOPCIDevice *vdev, int nr) +{ + VFIOQuirk *quirk; + VFIOConfigMirrorQuirk *mirror; + + /* Only enable on newer devices where BAR2 is 64bit */ + if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) || + !vdev->vga || nr != 2 || !vdev->bars[2].mem64) { + return; + } + + quirk = vfio_quirk_alloc(1); + mirror = quirk->data = g_malloc0(sizeof(*mirror)); + mirror->mem = quirk->mem; + mirror->vdev = vdev; + mirror->offset = 0x4000; + mirror->bar = nr; + + memory_region_init_io(mirror->mem, OBJECT(vdev), + &vfio_generic_mirror_quirk, mirror, + "vfio-ati-bar2-4000-quirk", PCI_CONFIG_SPACE_SIZE); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + mirror->offset, mirror->mem, 1); + + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); + + trace_vfio_quirk_ati_bar2_probe(vdev->vbasedev.name); +} + +/* + * Older ATI/AMD cards like the X550 have a similar window to that above. + * I/O port BAR1 provides a window to a mirror of PCI config space located + * in BAR2 at offset 0xf00. We don't care to support such older cards, but + * note it for future reference. + */ + +/* + * Nvidia has several different methods to get to config space, the + * nouveu project has several of these documented here: + * https://github.com/pathscale/envytools/tree/master/hwdocs + * + * The first quirk is actually not documented in envytools and is found + * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an + * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access + * the mirror of PCI config space found at BAR0 offset 0x1800. The access + * sequence first writes 0x338 to I/O port 0x3d4. The target offset is + * then written to 0x3d0. Finally 0x538 is written for a read and 0x738 + * is written for a write to 0x3d4. The BAR0 offset is then accessible + * through 0x3d0. This quirk doesn't seem to be necessary on newer cards + * that use the I/O port BAR5 window but it doesn't hurt to leave it. + */ +typedef enum {NONE = 0, SELECT, WINDOW, READ, WRITE} VFIONvidia3d0State; +static const char *nv3d0_states[] = { "NONE", "SELECT", + "WINDOW", "READ", "WRITE" }; + +typedef struct VFIONvidia3d0Quirk { + VFIOPCIDevice *vdev; + VFIONvidia3d0State state; + uint32_t offset; +} VFIONvidia3d0Quirk; + +static uint64_t vfio_nvidia_3d4_quirk_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIONvidia3d0Quirk *quirk = opaque; + VFIOPCIDevice *vdev = quirk->vdev; + + quirk->state = NONE; + + return vfio_vga_read(&vdev->vga->region[QEMU_PCI_VGA_IO_HI], + addr + 0x14, size); +} + +static void vfio_nvidia_3d4_quirk_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIONvidia3d0Quirk *quirk = opaque; + VFIOPCIDevice *vdev = quirk->vdev; + VFIONvidia3d0State old_state = quirk->state; + + quirk->state = NONE; + + switch (data) { + case 0x338: + if (old_state == NONE) { + quirk->state = SELECT; + trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name, + nv3d0_states[quirk->state]); + } + break; + case 0x538: + if (old_state == WINDOW) { + quirk->state = READ; + trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name, + nv3d0_states[quirk->state]); + } + break; + case 0x738: + if (old_state == WINDOW) { + quirk->state = WRITE; + trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name, + nv3d0_states[quirk->state]); + } + break; + } + + vfio_vga_write(&vdev->vga->region[QEMU_PCI_VGA_IO_HI], + addr + 0x14, data, size); +} + +static const MemoryRegionOps vfio_nvidia_3d4_quirk = { + .read = vfio_nvidia_3d4_quirk_read, + .write = vfio_nvidia_3d4_quirk_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIONvidia3d0Quirk *quirk = opaque; + VFIOPCIDevice *vdev = quirk->vdev; + VFIONvidia3d0State old_state = quirk->state; + uint64_t data = vfio_vga_read(&vdev->vga->region[QEMU_PCI_VGA_IO_HI], + addr + 0x10, size); + + quirk->state = NONE; + + if (old_state == READ && + (quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) { + uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1); + + data = vfio_pci_read_config(&vdev->pdev, offset, size); + trace_vfio_quirk_nvidia_3d0_read(vdev->vbasedev.name, + offset, size, data); + } + + return data; +} + +static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIONvidia3d0Quirk *quirk = opaque; + VFIOPCIDevice *vdev = quirk->vdev; + VFIONvidia3d0State old_state = quirk->state; + + quirk->state = NONE; + + if (old_state == SELECT) { + quirk->offset = (uint32_t)data; + quirk->state = WINDOW; + trace_vfio_quirk_nvidia_3d0_state(vdev->vbasedev.name, + nv3d0_states[quirk->state]); + } else if (old_state == WRITE) { + if ((quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) { + uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1); + + vfio_pci_write_config(&vdev->pdev, offset, data, size); + trace_vfio_quirk_nvidia_3d0_write(vdev->vbasedev.name, + offset, data, size); + return; + } + } + + vfio_vga_write(&vdev->vga->region[QEMU_PCI_VGA_IO_HI], + addr + 0x10, data, size); +} + +static const MemoryRegionOps vfio_nvidia_3d0_quirk = { + .read = vfio_nvidia_3d0_quirk_read, + .write = vfio_nvidia_3d0_quirk_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice *vdev) +{ + VFIOQuirk *quirk; + VFIONvidia3d0Quirk *data; + + if (vdev->no_geforce_quirks || + !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) || + !vdev->bars[1].region.size) { + return; + } + + quirk = vfio_quirk_alloc(2); + quirk->data = data = g_malloc0(sizeof(*data)); + data->vdev = vdev; + + memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_nvidia_3d4_quirk, + data, "vfio-nvidia-3d4-quirk", 2); + memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem, + 0x14 /* 0x3c0 + 0x14 */, &quirk->mem[0]); + + memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_nvidia_3d0_quirk, + data, "vfio-nvidia-3d0-quirk", 2); + memory_region_add_subregion(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem, + 0x10 /* 0x3c0 + 0x10 */, &quirk->mem[1]); + + QLIST_INSERT_HEAD(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks, + quirk, next); + + trace_vfio_quirk_nvidia_3d0_probe(vdev->vbasedev.name); +} + +/* + * The second quirk is documented in envytools. The I/O port BAR5 is just + * a set of address/data ports to the MMIO BARs. The BAR we care about is + * again BAR0. This backdoor is apparently a bit newer than the one above + * so we need to not only trap 256 bytes @0x1800, but all of PCI config + * space, including extended space is available at the 4k @0x88000. + */ +typedef struct VFIONvidiaBAR5Quirk { + uint32_t master; + uint32_t enable; + MemoryRegion *addr_mem; + MemoryRegion *data_mem; + bool enabled; + VFIOConfigWindowQuirk window; /* last for match data */ +} VFIONvidiaBAR5Quirk; + +static void vfio_nvidia_bar5_enable(VFIONvidiaBAR5Quirk *bar5) +{ + VFIOPCIDevice *vdev = bar5->window.vdev; + + if (((bar5->master & bar5->enable) & 0x1) == bar5->enabled) { + return; + } + + bar5->enabled = !bar5->enabled; + trace_vfio_quirk_nvidia_bar5_state(vdev->vbasedev.name, + bar5->enabled ? "Enable" : "Disable"); + memory_region_set_enabled(bar5->addr_mem, bar5->enabled); + memory_region_set_enabled(bar5->data_mem, bar5->enabled); +} + +static uint64_t vfio_nvidia_bar5_quirk_master_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIONvidiaBAR5Quirk *bar5 = opaque; + VFIOPCIDevice *vdev = bar5->window.vdev; + + return vfio_region_read(&vdev->bars[5].region, addr, size); +} + +static void vfio_nvidia_bar5_quirk_master_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIONvidiaBAR5Quirk *bar5 = opaque; + VFIOPCIDevice *vdev = bar5->window.vdev; + + vfio_region_write(&vdev->bars[5].region, addr, data, size); + + bar5->master = data; + vfio_nvidia_bar5_enable(bar5); +} + +static const MemoryRegionOps vfio_nvidia_bar5_quirk_master = { + .read = vfio_nvidia_bar5_quirk_master_read, + .write = vfio_nvidia_bar5_quirk_master_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t vfio_nvidia_bar5_quirk_enable_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIONvidiaBAR5Quirk *bar5 = opaque; + VFIOPCIDevice *vdev = bar5->window.vdev; + + return vfio_region_read(&vdev->bars[5].region, addr + 4, size); +} + +static void vfio_nvidia_bar5_quirk_enable_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIONvidiaBAR5Quirk *bar5 = opaque; + VFIOPCIDevice *vdev = bar5->window.vdev; + + vfio_region_write(&vdev->bars[5].region, addr + 4, data, size); + + bar5->enable = data; + vfio_nvidia_bar5_enable(bar5); +} + +static const MemoryRegionOps vfio_nvidia_bar5_quirk_enable = { + .read = vfio_nvidia_bar5_quirk_enable_read, + .write = vfio_nvidia_bar5_quirk_enable_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void vfio_probe_nvidia_bar5_quirk(VFIOPCIDevice *vdev, int nr) +{ + VFIOQuirk *quirk; + VFIONvidiaBAR5Quirk *bar5; + VFIOConfigWindowQuirk *window; + + if (vdev->no_geforce_quirks || + !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) || + !vdev->vga || nr != 5 || !vdev->bars[5].ioport) { + return; + } + + quirk = vfio_quirk_alloc(4); + bar5 = quirk->data = g_malloc0(sizeof(*bar5) + + (sizeof(VFIOConfigWindowMatch) * 2)); + window = &bar5->window; + + window->vdev = vdev; + window->address_offset = 0x8; + window->data_offset = 0xc; + window->nr_matches = 2; + window->matches[0].match = 0x1800; + window->matches[0].mask = PCI_CONFIG_SPACE_SIZE - 1; + window->matches[1].match = 0x88000; + window->matches[1].mask = vdev->config_size - 1; + window->bar = nr; + window->addr_mem = bar5->addr_mem = &quirk->mem[0]; + window->data_mem = bar5->data_mem = &quirk->mem[1]; + + memory_region_init_io(window->addr_mem, OBJECT(vdev), + &vfio_generic_window_address_quirk, window, + "vfio-nvidia-bar5-window-address-quirk", 4); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + window->address_offset, + window->addr_mem, 1); + memory_region_set_enabled(window->addr_mem, false); + + memory_region_init_io(window->data_mem, OBJECT(vdev), + &vfio_generic_window_data_quirk, window, + "vfio-nvidia-bar5-window-data-quirk", 4); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + window->data_offset, + window->data_mem, 1); + memory_region_set_enabled(window->data_mem, false); + + memory_region_init_io(&quirk->mem[2], OBJECT(vdev), + &vfio_nvidia_bar5_quirk_master, bar5, + "vfio-nvidia-bar5-master-quirk", 4); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + 0, &quirk->mem[2], 1); + + memory_region_init_io(&quirk->mem[3], OBJECT(vdev), + &vfio_nvidia_bar5_quirk_enable, bar5, + "vfio-nvidia-bar5-enable-quirk", 4); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + 4, &quirk->mem[3], 1); + + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); + + trace_vfio_quirk_nvidia_bar5_probe(vdev->vbasedev.name); +} + +typedef struct LastDataSet { + VFIOQuirk *quirk; + hwaddr addr; + uint64_t data; + unsigned size; + int hits; + int added; +} LastDataSet; + +#define MAX_DYN_IOEVENTFD 10 +#define HITS_FOR_IOEVENTFD 10 + +/* + * Finally, BAR0 itself. We want to redirect any accesses to either + * 0x1800 or 0x88000 through the PCI config space access functions. + */ +static void vfio_nvidia_quirk_mirror_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIOConfigMirrorQuirk *mirror = opaque; + VFIOPCIDevice *vdev = mirror->vdev; + PCIDevice *pdev = &vdev->pdev; + LastDataSet *last = (LastDataSet *)&mirror->data; + + vfio_generic_quirk_mirror_write(opaque, addr, data, size); + + /* + * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the + * MSI capability ID register. Both the ID and next register are + * read-only, so we allow writes covering either of those to real hw. + */ + if ((pdev->cap_present & QEMU_PCI_CAP_MSI) && + vfio_range_contained(addr, size, pdev->msi_cap, PCI_MSI_FLAGS)) { + vfio_region_write(&vdev->bars[mirror->bar].region, + addr + mirror->offset, data, size); + trace_vfio_quirk_nvidia_bar0_msi_ack(vdev->vbasedev.name); + } + + /* + * Automatically add an ioeventfd to handle any repeated write with the + * same data and size above the standard PCI config space header. This is + * primarily expected to accelerate the MSI-ACK behavior, such as noted + * above. Current hardware/drivers should trigger an ioeventfd at config + * offset 0x704 (region offset 0x88704), with data 0x0, size 4. + * + * The criteria of 10 successive hits is arbitrary but reliably adds the + * MSI-ACK region. Note that as some writes are bypassed via the ioeventfd, + * the remaining ones have a greater chance of being seen successively. + * To avoid the pathological case of burning up all of QEMU's open file + * handles, arbitrarily limit this algorithm from adding no more than 10 + * ioeventfds, print an error if we would have added an 11th, and then + * stop counting. + */ + if (!vdev->no_kvm_ioeventfd && + addr >= PCI_STD_HEADER_SIZEOF && last->added <= MAX_DYN_IOEVENTFD) { + if (addr != last->addr || data != last->data || size != last->size) { + last->addr = addr; + last->data = data; + last->size = size; + last->hits = 1; + } else if (++last->hits >= HITS_FOR_IOEVENTFD) { + if (last->added < MAX_DYN_IOEVENTFD) { + VFIOIOEventFD *ioeventfd; + ioeventfd = vfio_ioeventfd_init(vdev, mirror->mem, addr, size, + data, &vdev->bars[mirror->bar].region, + mirror->offset + addr, true); + if (ioeventfd) { + VFIOQuirk *quirk = last->quirk; + + QLIST_INSERT_HEAD(&quirk->ioeventfds, ioeventfd, next); + last->added++; + } + } else { + last->added++; + warn_report("NVIDIA ioeventfd queue full for %s, unable to " + "accelerate 0x%"HWADDR_PRIx", data 0x%"PRIx64", " + "size %u", vdev->vbasedev.name, addr, data, size); + } + } + } +} + +static const MemoryRegionOps vfio_nvidia_mirror_quirk = { + .read = vfio_generic_quirk_mirror_read, + .write = vfio_nvidia_quirk_mirror_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void vfio_nvidia_bar0_quirk_reset(VFIOPCIDevice *vdev, VFIOQuirk *quirk) +{ + VFIOConfigMirrorQuirk *mirror = quirk->data; + LastDataSet *last = (LastDataSet *)&mirror->data; + + last->addr = last->data = last->size = last->hits = last->added = 0; + + vfio_drop_dynamic_eventfds(vdev, quirk); +} + +static void vfio_probe_nvidia_bar0_quirk(VFIOPCIDevice *vdev, int nr) +{ + VFIOQuirk *quirk; + VFIOConfigMirrorQuirk *mirror; + LastDataSet *last; + + if (vdev->no_geforce_quirks || + !vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) || + !vfio_is_vga(vdev) || nr != 0) { + return; + } + + quirk = vfio_quirk_alloc(1); + quirk->reset = vfio_nvidia_bar0_quirk_reset; + mirror = quirk->data = g_malloc0(sizeof(*mirror) + sizeof(LastDataSet)); + mirror->mem = quirk->mem; + mirror->vdev = vdev; + mirror->offset = 0x88000; + mirror->bar = nr; + last = (LastDataSet *)&mirror->data; + last->quirk = quirk; + + memory_region_init_io(mirror->mem, OBJECT(vdev), + &vfio_nvidia_mirror_quirk, mirror, + "vfio-nvidia-bar0-88000-mirror-quirk", + vdev->config_size); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + mirror->offset, mirror->mem, 1); + + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); + + /* The 0x1800 offset mirror only seems to get used by legacy VGA */ + if (vdev->vga) { + quirk = vfio_quirk_alloc(1); + quirk->reset = vfio_nvidia_bar0_quirk_reset; + mirror = quirk->data = g_malloc0(sizeof(*mirror) + sizeof(LastDataSet)); + mirror->mem = quirk->mem; + mirror->vdev = vdev; + mirror->offset = 0x1800; + mirror->bar = nr; + last = (LastDataSet *)&mirror->data; + last->quirk = quirk; + + memory_region_init_io(mirror->mem, OBJECT(vdev), + &vfio_nvidia_mirror_quirk, mirror, + "vfio-nvidia-bar0-1800-mirror-quirk", + PCI_CONFIG_SPACE_SIZE); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + mirror->offset, mirror->mem, 1); + + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); + } + + trace_vfio_quirk_nvidia_bar0_probe(vdev->vbasedev.name); +} + +/* + * TODO - Some Nvidia devices provide config access to their companion HDA + * device and even to their parent bridge via these config space mirrors. + * Add quirks for those regions. + */ + +#define PCI_VENDOR_ID_REALTEK 0x10ec + +/* + * RTL8168 devices have a backdoor that can access the MSI-X table. At BAR2 + * offset 0x70 there is a dword data register, offset 0x74 is a dword address + * register. According to the Linux r8169 driver, the MSI-X table is addressed + * when the "type" portion of the address register is set to 0x1. This appears + * to be bits 16:30. Bit 31 is both a write indicator and some sort of + * "address latched" indicator. Bits 12:15 are a mask field, which we can + * ignore because the MSI-X table should always be accessed as a dword (full + * mask). Bits 0:11 is offset within the type. + * + * Example trace: + * + * Read from MSI-X table offset 0 + * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr + * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch + * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data + * + * Write 0xfee00000 to MSI-X table offset 0 + * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data + * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write + * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete + */ +typedef struct VFIOrtl8168Quirk { + VFIOPCIDevice *vdev; + uint32_t addr; + uint32_t data; + bool enabled; +} VFIOrtl8168Quirk; + +static uint64_t vfio_rtl8168_quirk_address_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIOrtl8168Quirk *rtl = opaque; + VFIOPCIDevice *vdev = rtl->vdev; + uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x74, size); + + if (rtl->enabled) { + data = rtl->addr ^ 0x80000000U; /* latch/complete */ + trace_vfio_quirk_rtl8168_fake_latch(vdev->vbasedev.name, data); + } + + return data; +} + +static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIOrtl8168Quirk *rtl = opaque; + VFIOPCIDevice *vdev = rtl->vdev; + + rtl->enabled = false; + + if ((data & 0x7fff0000) == 0x10000) { /* MSI-X table */ + rtl->enabled = true; + rtl->addr = (uint32_t)data; + + if (data & 0x80000000U) { /* Do write */ + if (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) { + hwaddr offset = data & 0xfff; + uint64_t val = rtl->data; + + trace_vfio_quirk_rtl8168_msix_write(vdev->vbasedev.name, + (uint16_t)offset, val); + + /* Write to the proper guest MSI-X table instead */ + memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, + offset, val, + size_memop(size) | MO_LE, + MEMTXATTRS_UNSPECIFIED); + } + return; /* Do not write guest MSI-X data to hardware */ + } + } + + vfio_region_write(&vdev->bars[2].region, addr + 0x74, data, size); +} + +static const MemoryRegionOps vfio_rtl_address_quirk = { + .read = vfio_rtl8168_quirk_address_read, + .write = vfio_rtl8168_quirk_address_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t vfio_rtl8168_quirk_data_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIOrtl8168Quirk *rtl = opaque; + VFIOPCIDevice *vdev = rtl->vdev; + uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x70, size); + + if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { + hwaddr offset = rtl->addr & 0xfff; + memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset, + &data, size_memop(size) | MO_LE, + MEMTXATTRS_UNSPECIFIED); + trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data); + } + + return data; +} + +static void vfio_rtl8168_quirk_data_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIOrtl8168Quirk *rtl = opaque; + VFIOPCIDevice *vdev = rtl->vdev; + + rtl->data = (uint32_t)data; + + vfio_region_write(&vdev->bars[2].region, addr + 0x70, data, size); +} + +static const MemoryRegionOps vfio_rtl_data_quirk = { + .read = vfio_rtl8168_quirk_data_read, + .write = vfio_rtl8168_quirk_data_write, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void vfio_probe_rtl8168_bar2_quirk(VFIOPCIDevice *vdev, int nr) +{ + VFIOQuirk *quirk; + VFIOrtl8168Quirk *rtl; + + if (!vfio_pci_is(vdev, PCI_VENDOR_ID_REALTEK, 0x8168) || nr != 2) { + return; + } + + quirk = vfio_quirk_alloc(2); + quirk->data = rtl = g_malloc0(sizeof(*rtl)); + rtl->vdev = vdev; + + memory_region_init_io(&quirk->mem[0], OBJECT(vdev), + &vfio_rtl_address_quirk, rtl, + "vfio-rtl8168-window-address-quirk", 4); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + 0x74, &quirk->mem[0], 1); + + memory_region_init_io(&quirk->mem[1], OBJECT(vdev), + &vfio_rtl_data_quirk, rtl, + "vfio-rtl8168-window-data-quirk", 4); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + 0x70, &quirk->mem[1], 1); + + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); + + trace_vfio_quirk_rtl8168_probe(vdev->vbasedev.name); +} + +#define IGD_ASLS 0xfc /* ASL Storage Register */ + +/* + * The OpRegion includes the Video BIOS Table, which seems important for + * telling the driver what sort of outputs it has. Without this, the device + * may work in the guest, but we may not get output. This also requires BIOS + * support to reserve and populate a section of guest memory sufficient for + * the table and to write the base address of that memory to the ASLS register + * of the IGD device. + */ +int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, + struct vfio_region_info *info, Error **errp) +{ + int ret; + + vdev->igd_opregion = g_malloc0(info->size); + ret = pread(vdev->vbasedev.fd, vdev->igd_opregion, + info->size, info->offset); + if (ret != info->size) { + error_setg(errp, "failed to read IGD OpRegion"); + g_free(vdev->igd_opregion); + vdev->igd_opregion = NULL; + return -EINVAL; + } + + /* + * Provide fw_cfg with a copy of the OpRegion which the VM firmware is to + * allocate 32bit reserved memory for, copy these contents into, and write + * the reserved memory base address to the device ASLS register at 0xFC. + * Alignment of this reserved region seems flexible, but using a 4k page + * alignment seems to work well. This interface assumes a single IGD + * device, which may be at VM address 00:02.0 in legacy mode or another + * address in UPT mode. + * + * NB, there may be future use cases discovered where the VM should have + * direct interaction with the host OpRegion, in which case the write to + * the ASLS register would trigger MemoryRegion setup to enable that. + */ + fw_cfg_add_file(fw_cfg_find(), "etc/igd-opregion", + vdev->igd_opregion, info->size); + + trace_vfio_pci_igd_opregion_enabled(vdev->vbasedev.name); + + pci_set_long(vdev->pdev.config + IGD_ASLS, 0); + pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0); + pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0); + + return 0; +} + +/* + * Common quirk probe entry points. + */ +void vfio_vga_quirk_setup(VFIOPCIDevice *vdev) +{ + vfio_vga_probe_ati_3c3_quirk(vdev); + vfio_vga_probe_nvidia_3d0_quirk(vdev); +} + +void vfio_vga_quirk_exit(VFIOPCIDevice *vdev) +{ + VFIOQuirk *quirk; + int i, j; + + for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) { + QLIST_FOREACH(quirk, &vdev->vga->region[i].quirks, next) { + for (j = 0; j < quirk->nr_mem; j++) { + memory_region_del_subregion(&vdev->vga->region[i].mem, + &quirk->mem[j]); + } + } + } +} + +void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev) +{ + int i, j; + + for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) { + while (!QLIST_EMPTY(&vdev->vga->region[i].quirks)) { + VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga->region[i].quirks); + QLIST_REMOVE(quirk, next); + for (j = 0; j < quirk->nr_mem; j++) { + object_unparent(OBJECT(&quirk->mem[j])); + } + g_free(quirk->mem); + g_free(quirk->data); + g_free(quirk); + } + } +} + +void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr) +{ + vfio_probe_ati_bar4_quirk(vdev, nr); + vfio_probe_ati_bar2_quirk(vdev, nr); + vfio_probe_nvidia_bar5_quirk(vdev, nr); + vfio_probe_nvidia_bar0_quirk(vdev, nr); + vfio_probe_rtl8168_bar2_quirk(vdev, nr); +#ifdef CONFIG_VFIO_IGD + vfio_probe_igd_bar4_quirk(vdev, nr); +#endif +} + +void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr) +{ + VFIOBAR *bar = &vdev->bars[nr]; + VFIOQuirk *quirk; + int i; + + QLIST_FOREACH(quirk, &bar->quirks, next) { + while (!QLIST_EMPTY(&quirk->ioeventfds)) { + vfio_ioeventfd_exit(vdev, QLIST_FIRST(&quirk->ioeventfds)); + } + + for (i = 0; i < quirk->nr_mem; i++) { + memory_region_del_subregion(bar->region.mem, &quirk->mem[i]); + } + } +} + +void vfio_bar_quirk_finalize(VFIOPCIDevice *vdev, int nr) +{ + VFIOBAR *bar = &vdev->bars[nr]; + int i; + + while (!QLIST_EMPTY(&bar->quirks)) { + VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks); + QLIST_REMOVE(quirk, next); + for (i = 0; i < quirk->nr_mem; i++) { + object_unparent(OBJECT(&quirk->mem[i])); + } + g_free(quirk->mem); + g_free(quirk->data); + g_free(quirk); + } +} + +/* + * Reset quirks + */ +void vfio_quirk_reset(VFIOPCIDevice *vdev) +{ + int i; + + for (i = 0; i < PCI_ROM_SLOT; i++) { + VFIOQuirk *quirk; + VFIOBAR *bar = &vdev->bars[i]; + + QLIST_FOREACH(quirk, &bar->quirks, next) { + if (quirk->reset) { + quirk->reset(vdev, quirk); + } + } + } +} + +/* + * AMD Radeon PCI config reset, based on Linux: + * drivers/gpu/drm/radeon/ci_smc.c:ci_is_smc_running() + * drivers/gpu/drm/radeon/radeon_device.c:radeon_pci_config_reset + * drivers/gpu/drm/radeon/ci_smc.c:ci_reset_smc() + * drivers/gpu/drm/radeon/ci_smc.c:ci_stop_smc_clock() + * IDs: include/drm/drm_pciids.h + * Registers: http://cgit.freedesktop.org/~agd5f/linux/commit/?id=4e2aa447f6f0 + * + * Bonaire and Hawaii GPUs do not respond to a bus reset. This is a bug in the + * hardware that should be fixed on future ASICs. The symptom of this is that + * once the accerlated driver loads, Windows guests will bsod on subsequent + * attmpts to load the driver, such as after VM reset or shutdown/restart. To + * work around this, we do an AMD specific PCI config reset, followed by an SMC + * reset. The PCI config reset only works if SMC firmware is running, so we + * have a dependency on the state of the device as to whether this reset will + * be effective. There are still cases where we won't be able to kick the + * device into working, but this greatly improves the usability overall. The + * config reset magic is relatively common on AMD GPUs, but the setup and SMC + * poking is largely ASIC specific. + */ +static bool vfio_radeon_smc_is_running(VFIOPCIDevice *vdev) +{ + uint32_t clk, pc_c; + + /* + * Registers 200h and 204h are index and data registers for accessing + * indirect configuration registers within the device. + */ + vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4); + clk = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000370, 4); + pc_c = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + + return (!(clk & 1) && (0x20100 <= pc_c)); +} + +/* + * The scope of a config reset is controlled by a mode bit in the misc register + * and a fuse, exposed as a bit in another register. The fuse is the default + * (0 = GFX, 1 = whole GPU), the misc bit is a toggle, with the formula + * scope = !(misc ^ fuse), where the resulting scope is defined the same as + * the fuse. A truth table therefore tells us that if misc == fuse, we need + * to flip the value of the bit in the misc register. + */ +static void vfio_radeon_set_gfx_only_reset(VFIOPCIDevice *vdev) +{ + uint32_t misc, fuse; + bool a, b; + + vfio_region_write(&vdev->bars[5].region, 0x200, 0xc00c0000, 4); + fuse = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + b = fuse & 64; + + vfio_region_write(&vdev->bars[5].region, 0x200, 0xc0000010, 4); + misc = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + a = misc & 2; + + if (a == b) { + vfio_region_write(&vdev->bars[5].region, 0x204, misc ^ 2, 4); + vfio_region_read(&vdev->bars[5].region, 0x204, 4); /* flush */ + } +} + +static int vfio_radeon_reset(VFIOPCIDevice *vdev) +{ + PCIDevice *pdev = &vdev->pdev; + int i, ret = 0; + uint32_t data; + + /* Defer to a kernel implemented reset */ + if (vdev->vbasedev.reset_works) { + trace_vfio_quirk_ati_bonaire_reset_skipped(vdev->vbasedev.name); + return -ENODEV; + } + + /* Enable only memory BAR access */ + vfio_pci_write_config(pdev, PCI_COMMAND, PCI_COMMAND_MEMORY, 2); + + /* Reset only works if SMC firmware is loaded and running */ + if (!vfio_radeon_smc_is_running(vdev)) { + ret = -EINVAL; + trace_vfio_quirk_ati_bonaire_reset_no_smc(vdev->vbasedev.name); + goto out; + } + + /* Make sure only the GFX function is reset */ + vfio_radeon_set_gfx_only_reset(vdev); + + /* AMD PCI config reset */ + vfio_pci_write_config(pdev, 0x7c, 0x39d5e86b, 4); + usleep(100); + + /* Read back the memory size to make sure we're out of reset */ + for (i = 0; i < 100000; i++) { + if (vfio_region_read(&vdev->bars[5].region, 0x5428, 4) != 0xffffffff) { + goto reset_smc; + } + usleep(1); + } + + trace_vfio_quirk_ati_bonaire_reset_timeout(vdev->vbasedev.name); + +reset_smc: + /* Reset SMC */ + vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000000, 4); + data = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + data |= 1; + vfio_region_write(&vdev->bars[5].region, 0x204, data, 4); + + /* Disable SMC clock */ + vfio_region_write(&vdev->bars[5].region, 0x200, 0x80000004, 4); + data = vfio_region_read(&vdev->bars[5].region, 0x204, 4); + data |= 1; + vfio_region_write(&vdev->bars[5].region, 0x204, data, 4); + + trace_vfio_quirk_ati_bonaire_reset_done(vdev->vbasedev.name); + +out: + /* Restore PCI command register */ + vfio_pci_write_config(pdev, PCI_COMMAND, 0, 2); + + return ret; +} + +void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev) +{ + switch (vdev->vendor_id) { + case 0x1002: + switch (vdev->device_id) { + /* Bonaire */ + case 0x6649: /* Bonaire [FirePro W5100] */ + case 0x6650: + case 0x6651: + case 0x6658: /* Bonaire XTX [Radeon R7 260X] */ + case 0x665c: /* Bonaire XT [Radeon HD 7790/8770 / R9 260 OEM] */ + case 0x665d: /* Bonaire [Radeon R7 200 Series] */ + /* Hawaii */ + case 0x67A0: /* Hawaii XT GL [FirePro W9100] */ + case 0x67A1: /* Hawaii PRO GL [FirePro W8100] */ + case 0x67A2: + case 0x67A8: + case 0x67A9: + case 0x67AA: + case 0x67B0: /* Hawaii XT [Radeon R9 290X] */ + case 0x67B1: /* Hawaii PRO [Radeon R9 290] */ + case 0x67B8: + case 0x67B9: + case 0x67BA: + case 0x67BE: + vdev->resetfn = vfio_radeon_reset; + trace_vfio_quirk_ati_bonaire_reset(vdev->vbasedev.name); + break; + } + break; + } +} + +/* + * The NVIDIA GPUDirect P2P Vendor capability allows the user to specify + * devices as a member of a clique. Devices within the same clique ID + * are capable of direct P2P. It's the user's responsibility that this + * is correct. The spec says that this may reside at any unused config + * offset, but reserves and recommends hypervisors place this at C8h. + * The spec also states that the hypervisor should place this capability + * at the end of the capability list, thus next is defined as 0h. + * + * +----------------+----------------+----------------+----------------+ + * | sig 7:0 ('P') | vndr len (8h) | next (0h) | cap id (9h) | + * +----------------+----------------+----------------+----------------+ + * | rsvd 15:7(0h),id 6:3,ver 2:0(0h)| sig 23:8 ('P2') | + * +---------------------------------+---------------------------------+ + * + * https://lists.gnu.org/archive/html/qemu-devel/2017-08/pdfUda5iEpgOS.pdf + */ +static void get_nv_gpudirect_clique_id(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + uint8_t *ptr = object_field_prop_ptr(obj, prop); + + visit_type_uint8(v, name, ptr, errp); +} + +static void set_nv_gpudirect_clique_id(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + Property *prop = opaque; + uint8_t value, *ptr = object_field_prop_ptr(obj, prop); + + if (!visit_type_uint8(v, name, &value, errp)) { + return; + } + + if (value & ~0xF) { + error_setg(errp, "Property %s: valid range 0-15", name); + return; + } + + *ptr = value; +} + +const PropertyInfo qdev_prop_nv_gpudirect_clique = { + .name = "uint4", + .description = "NVIDIA GPUDirect Clique ID (0 - 15)", + .get = get_nv_gpudirect_clique_id, + .set = set_nv_gpudirect_clique_id, +}; + +static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp) +{ + PCIDevice *pdev = &vdev->pdev; + int ret, pos = 0xC8; + + if (vdev->nv_gpudirect_clique == 0xFF) { + return 0; + } + + if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID)) { + error_setg(errp, "NVIDIA GPUDirect Clique ID: invalid device vendor"); + return -EINVAL; + } + + if (pci_get_byte(pdev->config + PCI_CLASS_DEVICE + 1) != + PCI_BASE_CLASS_DISPLAY) { + error_setg(errp, "NVIDIA GPUDirect Clique ID: unsupported PCI class"); + return -EINVAL; + } + + ret = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, 8, errp); + if (ret < 0) { + error_prepend(errp, "Failed to add NVIDIA GPUDirect cap: "); + return ret; + } + + memset(vdev->emulated_config_bits + pos, 0xFF, 8); + pos += PCI_CAP_FLAGS; + pci_set_byte(pdev->config + pos++, 8); + pci_set_byte(pdev->config + pos++, 'P'); + pci_set_byte(pdev->config + pos++, '2'); + pci_set_byte(pdev->config + pos++, 'P'); + pci_set_byte(pdev->config + pos++, vdev->nv_gpudirect_clique << 3); + pci_set_byte(pdev->config + pos, 0); + + return 0; +} + +static void vfio_pci_nvlink2_get_tgt(Object *obj, Visitor *v, + const char *name, + void *opaque, Error **errp) +{ + uint64_t tgt = (uintptr_t) opaque; + visit_type_uint64(v, name, &tgt, errp); +} + +static void vfio_pci_nvlink2_get_link_speed(Object *obj, Visitor *v, + const char *name, + void *opaque, Error **errp) +{ + uint32_t link_speed = (uint32_t)(uintptr_t) opaque; + visit_type_uint32(v, name, &link_speed, errp); +} + +int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp) +{ + int ret; + void *p; + struct vfio_region_info *nv2reg = NULL; + struct vfio_info_cap_header *hdr; + struct vfio_region_info_cap_nvlink2_ssatgt *cap; + VFIOQuirk *quirk; + + ret = vfio_get_dev_region_info(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | + PCI_VENDOR_ID_NVIDIA, + VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM, + &nv2reg); + if (ret) { + return ret; + } + + hdr = vfio_get_region_info_cap(nv2reg, VFIO_REGION_INFO_CAP_NVLINK2_SSATGT); + if (!hdr) { + ret = -ENODEV; + goto free_exit; + } + cap = (void *) hdr; + + p = mmap(NULL, nv2reg->size, PROT_READ | PROT_WRITE, + MAP_SHARED, vdev->vbasedev.fd, nv2reg->offset); + if (p == MAP_FAILED) { + ret = -errno; + goto free_exit; + } + + quirk = vfio_quirk_alloc(1); + memory_region_init_ram_ptr(&quirk->mem[0], OBJECT(vdev), "nvlink2-mr", + nv2reg->size, p); + QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next); + + object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64", + vfio_pci_nvlink2_get_tgt, NULL, NULL, + (void *) (uintptr_t) cap->tgt); + trace_vfio_pci_nvidia_gpu_setup_quirk(vdev->vbasedev.name, cap->tgt, + nv2reg->size); +free_exit: + g_free(nv2reg); + + return ret; +} + +int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp) +{ + int ret; + void *p; + struct vfio_region_info *atsdreg = NULL; + struct vfio_info_cap_header *hdr; + struct vfio_region_info_cap_nvlink2_ssatgt *captgt; + struct vfio_region_info_cap_nvlink2_lnkspd *capspeed; + VFIOQuirk *quirk; + + ret = vfio_get_dev_region_info(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | + PCI_VENDOR_ID_IBM, + VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD, + &atsdreg); + if (ret) { + return ret; + } + + hdr = vfio_get_region_info_cap(atsdreg, + VFIO_REGION_INFO_CAP_NVLINK2_SSATGT); + if (!hdr) { + ret = -ENODEV; + goto free_exit; + } + captgt = (void *) hdr; + + hdr = vfio_get_region_info_cap(atsdreg, + VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD); + if (!hdr) { + ret = -ENODEV; + goto free_exit; + } + capspeed = (void *) hdr; + + /* Some NVLink bridges may not have assigned ATSD */ + if (atsdreg->size) { + p = mmap(NULL, atsdreg->size, PROT_READ | PROT_WRITE, + MAP_SHARED, vdev->vbasedev.fd, atsdreg->offset); + if (p == MAP_FAILED) { + ret = -errno; + goto free_exit; + } + + quirk = vfio_quirk_alloc(1); + memory_region_init_ram_device_ptr(&quirk->mem[0], OBJECT(vdev), + "nvlink2-atsd-mr", atsdreg->size, p); + QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next); + } + + object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64", + vfio_pci_nvlink2_get_tgt, NULL, NULL, + (void *) (uintptr_t) captgt->tgt); + trace_vfio_pci_nvlink2_setup_quirk_ssatgt(vdev->vbasedev.name, captgt->tgt, + atsdreg->size); + + object_property_add(OBJECT(vdev), "nvlink2-link-speed", "uint32", + vfio_pci_nvlink2_get_link_speed, NULL, NULL, + (void *) (uintptr_t) capspeed->link_speed); + trace_vfio_pci_nvlink2_setup_quirk_lnkspd(vdev->vbasedev.name, + capspeed->link_speed); +free_exit: + g_free(atsdreg); + + return ret; +} + +/* + * The VMD endpoint provides a real PCIe domain to the guest and the guest + * kernel performs enumeration of the VMD sub-device domain. Guest transactions + * to VMD sub-devices go through MMU translation from guest addresses to + * physical addresses. When MMIO goes to an endpoint after being translated to + * physical addresses, the bridge rejects the transaction because the window + * has been programmed with guest addresses. + * + * VMD can use the Host Physical Address in order to correctly program the + * bridge windows in its PCIe domain. VMD device 28C0 has HPA shadow registers + * located at offset 0x2000 in MEMBAR2 (BAR 4). This quirk provides the HPA + * shadow registers in a vendor-specific capability register for devices + * without native support. The position of 0xE8-0xFF is in the reserved range + * of the VMD device capability space following the Power Management + * Capability. + */ +#define VMD_SHADOW_CAP_VER 1 +#define VMD_SHADOW_CAP_LEN 24 +static int vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp) +{ + uint8_t membar_phys[16]; + int ret, pos = 0xE8; + + if (!(vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x201D) || + vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x467F) || + vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x4C3D) || + vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, 0x9A0B))) { + return 0; + } + + ret = pread(vdev->vbasedev.fd, membar_phys, 16, + vdev->config_offset + PCI_BASE_ADDRESS_2); + if (ret != 16) { + error_report("VMD %s cannot read MEMBARs (%d)", + vdev->vbasedev.name, ret); + return -EFAULT; + } + + ret = pci_add_capability(&vdev->pdev, PCI_CAP_ID_VNDR, pos, + VMD_SHADOW_CAP_LEN, errp); + if (ret < 0) { + error_prepend(errp, "Failed to add VMD MEMBAR Shadow cap: "); + return ret; + } + + memset(vdev->emulated_config_bits + pos, 0xFF, VMD_SHADOW_CAP_LEN); + pos += PCI_CAP_FLAGS; + pci_set_byte(vdev->pdev.config + pos++, VMD_SHADOW_CAP_LEN); + pci_set_byte(vdev->pdev.config + pos++, VMD_SHADOW_CAP_VER); + pci_set_long(vdev->pdev.config + pos, 0x53484457); /* SHDW */ + memcpy(vdev->pdev.config + pos + 4, membar_phys, 16); + + return 0; +} + +int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp) +{ + int ret; + + ret = vfio_add_nv_gpudirect_cap(vdev, errp); + if (ret) { + return ret; + } + + ret = vfio_add_vmd_shadow_cap(vdev, errp); + if (ret) { + return ret; + } + + return 0; +} diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c new file mode 100644 index 000000000..7b45353ce --- /dev/null +++ b/hw/vfio/pci.c @@ -0,0 +1,3328 @@ +/* + * vfio based device assignment support + * + * Copyright Red Hat, Inc. 2012 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Based on qemu-kvm device-assignment: + * Adapted for KVM by Qumranet. + * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) + * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) + * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) + * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) + * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) + */ + +#include "qemu/osdep.h" +#include +#include + +#include "hw/hw.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/pci/pci_bridge.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "qapi/qmp/qdict.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/range.h" +#include "qemu/units.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "pci.h" +#include "trace.h" +#include "qapi/error.h" +#include "migration/blocker.h" +#include "migration/qemu-file.h" + +#define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug" + +static void vfio_disable_interrupts(VFIOPCIDevice *vdev); +static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled); + +/* + * Disabling BAR mmaping can be slow, but toggling it around INTx can + * also be a huge overhead. We try to get the best of both worlds by + * waiting until an interrupt to disable mmaps (subsequent transitions + * to the same state are effectively no overhead). If the interrupt has + * been serviced and the time gap is long enough, we re-enable mmaps for + * performance. This works well for things like graphics cards, which + * may not use their interrupt at all and are penalized to an unusable + * level by read/write BAR traps. Other devices, like NICs, have more + * regular interrupts and see much better latency by staying in non-mmap + * mode. We therefore set the default mmap_timeout such that a ping + * is just enough to keep the mmap disabled. Users can experiment with + * other options with the x-intx-mmap-timeout-ms parameter (a value of + * zero disables the timer). + */ +static void vfio_intx_mmap_enable(void *opaque) +{ + VFIOPCIDevice *vdev = opaque; + + if (vdev->intx.pending) { + timer_mod(vdev->intx.mmap_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); + return; + } + + vfio_mmap_set_enabled(vdev, true); +} + +static void vfio_intx_interrupt(void *opaque) +{ + VFIOPCIDevice *vdev = opaque; + + if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) { + return; + } + + trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin); + + vdev->intx.pending = true; + pci_irq_assert(&vdev->pdev); + vfio_mmap_set_enabled(vdev, false); + if (vdev->intx.mmap_timeout) { + timer_mod(vdev->intx.mmap_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); + } +} + +static void vfio_intx_eoi(VFIODevice *vbasedev) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + + if (!vdev->intx.pending) { + return; + } + + trace_vfio_intx_eoi(vbasedev->name); + + vdev->intx.pending = false; + pci_irq_deassert(&vdev->pdev); + vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX); +} + +static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp) +{ +#ifdef CONFIG_KVM + int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt); + + if (vdev->no_kvm_intx || !kvm_irqfds_enabled() || + vdev->intx.route.mode != PCI_INTX_ENABLED || + !kvm_resamplefds_enabled()) { + return; + } + + /* Get to a known interrupt state */ + qemu_set_fd_handler(irq_fd, NULL, NULL, vdev); + vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + vdev->intx.pending = false; + pci_irq_deassert(&vdev->pdev); + + /* Get an eventfd for resample/unmask */ + if (event_notifier_init(&vdev->intx.unmask, 0)) { + error_setg(errp, "event_notifier_init failed eoi"); + goto fail; + } + + if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, + &vdev->intx.interrupt, + &vdev->intx.unmask, + vdev->intx.route.irq)) { + error_setg_errno(errp, errno, "failed to setup resample irqfd"); + goto fail_irqfd; + } + + if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_UNMASK, + event_notifier_get_fd(&vdev->intx.unmask), + errp)) { + goto fail_vfio; + } + + /* Let'em rip */ + vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + + vdev->intx.kvm_accel = true; + + trace_vfio_intx_enable_kvm(vdev->vbasedev.name); + + return; + +fail_vfio: + kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt, + vdev->intx.route.irq); +fail_irqfd: + event_notifier_cleanup(&vdev->intx.unmask); +fail: + qemu_set_fd_handler(irq_fd, vfio_intx_interrupt, NULL, vdev); + vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); +#endif +} + +static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev) +{ +#ifdef CONFIG_KVM + if (!vdev->intx.kvm_accel) { + return; + } + + /* + * Get to a known state, hardware masked, QEMU ready to accept new + * interrupts, QEMU IRQ de-asserted. + */ + vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + vdev->intx.pending = false; + pci_irq_deassert(&vdev->pdev); + + /* Tell KVM to stop listening for an INTx irqfd */ + if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt, + vdev->intx.route.irq)) { + error_report("vfio: Error: Failed to disable INTx irqfd: %m"); + } + + /* We only need to close the eventfd for VFIO to cleanup the kernel side */ + event_notifier_cleanup(&vdev->intx.unmask); + + /* QEMU starts listening for interrupt events. */ + qemu_set_fd_handler(event_notifier_get_fd(&vdev->intx.interrupt), + vfio_intx_interrupt, NULL, vdev); + + vdev->intx.kvm_accel = false; + + /* If we've missed an event, let it re-fire through QEMU */ + vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + + trace_vfio_intx_disable_kvm(vdev->vbasedev.name); +#endif +} + +static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route) +{ + Error *err = NULL; + + trace_vfio_intx_update(vdev->vbasedev.name, + vdev->intx.route.irq, route->irq); + + vfio_intx_disable_kvm(vdev); + + vdev->intx.route = *route; + + if (route->mode != PCI_INTX_ENABLED) { + return; + } + + vfio_intx_enable_kvm(vdev, &err); + if (err) { + warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } + + /* Re-enable the interrupt in cased we missed an EOI */ + vfio_intx_eoi(&vdev->vbasedev); +} + +static void vfio_intx_routing_notifier(PCIDevice *pdev) +{ + VFIOPCIDevice *vdev = VFIO_PCI(pdev); + PCIINTxRoute route; + + if (vdev->interrupt != VFIO_INT_INTx) { + return; + } + + route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); + + if (pci_intx_route_changed(&vdev->intx.route, &route)) { + vfio_intx_update(vdev, &route); + } +} + +static void vfio_irqchip_change(Notifier *notify, void *data) +{ + VFIOPCIDevice *vdev = container_of(notify, VFIOPCIDevice, + irqchip_change_notifier); + + vfio_intx_update(vdev, &vdev->intx.route); +} + +static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp) +{ + uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1); + Error *err = NULL; + int32_t fd; + int ret; + + + if (!pin) { + return 0; + } + + vfio_disable_interrupts(vdev); + + vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ + pci_config_set_interrupt_pin(vdev->pdev.config, pin); + +#ifdef CONFIG_KVM + /* + * Only conditional to avoid generating error messages on platforms + * where we won't actually use the result anyway. + */ + if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) { + vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, + vdev->intx.pin); + } +#endif + + ret = event_notifier_init(&vdev->intx.interrupt, 0); + if (ret) { + error_setg_errno(errp, -ret, "event_notifier_init failed"); + return ret; + } + fd = event_notifier_get_fd(&vdev->intx.interrupt); + qemu_set_fd_handler(fd, vfio_intx_interrupt, NULL, vdev); + + if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, errp)) { + qemu_set_fd_handler(fd, NULL, NULL, vdev); + event_notifier_cleanup(&vdev->intx.interrupt); + return -errno; + } + + vfio_intx_enable_kvm(vdev, &err); + if (err) { + warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } + + vdev->interrupt = VFIO_INT_INTx; + + trace_vfio_intx_enable(vdev->vbasedev.name); + return 0; +} + +static void vfio_intx_disable(VFIOPCIDevice *vdev) +{ + int fd; + + timer_del(vdev->intx.mmap_timer); + vfio_intx_disable_kvm(vdev); + vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); + vdev->intx.pending = false; + pci_irq_deassert(&vdev->pdev); + vfio_mmap_set_enabled(vdev, true); + + fd = event_notifier_get_fd(&vdev->intx.interrupt); + qemu_set_fd_handler(fd, NULL, NULL, vdev); + event_notifier_cleanup(&vdev->intx.interrupt); + + vdev->interrupt = VFIO_INT_NONE; + + trace_vfio_intx_disable(vdev->vbasedev.name); +} + +/* + * MSI/X + */ +static void vfio_msi_interrupt(void *opaque) +{ + VFIOMSIVector *vector = opaque; + VFIOPCIDevice *vdev = vector->vdev; + MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector); + void (*notify)(PCIDevice *dev, unsigned vector); + MSIMessage msg; + int nr = vector - vdev->msi_vectors; + + if (!event_notifier_test_and_clear(&vector->interrupt)) { + return; + } + + if (vdev->interrupt == VFIO_INT_MSIX) { + get_msg = msix_get_message; + notify = msix_notify; + + /* A masked vector firing needs to use the PBA, enable it */ + if (msix_is_masked(&vdev->pdev, nr)) { + set_bit(nr, vdev->msix->pending); + memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true); + trace_vfio_msix_pba_enable(vdev->vbasedev.name); + } + } else if (vdev->interrupt == VFIO_INT_MSI) { + get_msg = msi_get_message; + notify = msi_notify; + } else { + abort(); + } + + msg = get_msg(&vdev->pdev, nr); + trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data); + notify(&vdev->pdev, nr); +} + +static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix) +{ + struct vfio_irq_set *irq_set; + int ret = 0, i, argsz; + int32_t *fds; + + argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); + + irq_set = g_malloc0(argsz); + irq_set->argsz = argsz; + irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; + irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; + irq_set->start = 0; + irq_set->count = vdev->nr_vectors; + fds = (int32_t *)&irq_set->data; + + for (i = 0; i < vdev->nr_vectors; i++) { + int fd = -1; + + /* + * MSI vs MSI-X - The guest has direct access to MSI mask and pending + * bits, therefore we always use the KVM signaling path when setup. + * MSI-X mask and pending bits are emulated, so we want to use the + * KVM signaling path only when configured and unmasked. + */ + if (vdev->msi_vectors[i].use) { + if (vdev->msi_vectors[i].virq < 0 || + (msix && msix_is_masked(&vdev->pdev, i))) { + fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); + } else { + fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt); + } + } + + fds[i] = fd; + } + + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); + + g_free(irq_set); + + return ret; +} + +static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, + int vector_n, bool msix) +{ + int virq; + + if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) { + return; + } + + if (event_notifier_init(&vector->kvm_interrupt, 0)) { + return; + } + + virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev); + if (virq < 0) { + event_notifier_cleanup(&vector->kvm_interrupt); + return; + } + + if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, + NULL, virq) < 0) { + kvm_irqchip_release_virq(kvm_state, virq); + event_notifier_cleanup(&vector->kvm_interrupt); + return; + } + + vector->virq = virq; +} + +static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector) +{ + kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, + vector->virq); + kvm_irqchip_release_virq(kvm_state, vector->virq); + vector->virq = -1; + event_notifier_cleanup(&vector->kvm_interrupt); +} + +static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg, + PCIDevice *pdev) +{ + kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev); + kvm_irqchip_commit_routes(kvm_state); +} + +static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, + MSIMessage *msg, IOHandler *handler) +{ + VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIOMSIVector *vector; + int ret; + + trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr); + + vector = &vdev->msi_vectors[nr]; + + if (!vector->use) { + vector->vdev = vdev; + vector->virq = -1; + if (event_notifier_init(&vector->interrupt, 0)) { + error_report("vfio: Error: event_notifier_init failed"); + } + vector->use = true; + msix_vector_use(pdev, nr); + } + + qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), + handler, NULL, vector); + + /* + * Attempt to enable route through KVM irqchip, + * default to userspace handling if unavailable. + */ + if (vector->virq >= 0) { + if (!msg) { + vfio_remove_kvm_msi_virq(vector); + } else { + vfio_update_kvm_msi_virq(vector, *msg, pdev); + } + } else { + if (msg) { + vfio_add_kvm_msi_virq(vdev, vector, nr, true); + } + } + + /* + * We don't want to have the host allocate all possible MSI vectors + * for a device if they're not in use, so we shutdown and incrementally + * increase them as needed. + */ + if (vdev->nr_vectors < nr + 1) { + vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); + vdev->nr_vectors = nr + 1; + ret = vfio_enable_vectors(vdev, true); + if (ret) { + error_report("vfio: failed to enable vectors, %d", ret); + } + } else { + Error *err = NULL; + int32_t fd; + + if (vector->virq >= 0) { + fd = event_notifier_get_fd(&vector->kvm_interrupt); + } else { + fd = event_notifier_get_fd(&vector->interrupt); + } + + if (vfio_set_irq_signaling(&vdev->vbasedev, + VFIO_PCI_MSIX_IRQ_INDEX, nr, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } + } + + /* Disable PBA emulation when nothing more is pending. */ + clear_bit(nr, vdev->msix->pending); + if (find_first_bit(vdev->msix->pending, + vdev->nr_vectors) == vdev->nr_vectors) { + memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false); + trace_vfio_msix_pba_disable(vdev->vbasedev.name); + } + + return 0; +} + +static int vfio_msix_vector_use(PCIDevice *pdev, + unsigned int nr, MSIMessage msg) +{ + return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt); +} + +static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) +{ + VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIOMSIVector *vector = &vdev->msi_vectors[nr]; + + trace_vfio_msix_vector_release(vdev->vbasedev.name, nr); + + /* + * There are still old guests that mask and unmask vectors on every + * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of + * the KVM setup in place, simply switch VFIO to use the non-bypass + * eventfd. We'll then fire the interrupt through QEMU and the MSI-X + * core will mask the interrupt and set pending bits, allowing it to + * be re-asserted on unmask. Nothing to do if already using QEMU mode. + */ + if (vector->virq >= 0) { + int32_t fd = event_notifier_get_fd(&vector->interrupt); + Error *err = NULL; + + if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX, nr, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } + } +} + +static void vfio_msix_enable(VFIOPCIDevice *vdev) +{ + PCIDevice *pdev = &vdev->pdev; + unsigned int nr, max_vec = 0; + + vfio_disable_interrupts(vdev); + + vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries); + + vdev->interrupt = VFIO_INT_MSIX; + + /* + * Some communication channels between VF & PF or PF & fw rely on the + * physical state of the device and expect that enabling MSI-X from the + * guest enables the same on the host. When our guest is Linux, the + * guest driver call to pci_enable_msix() sets the enabling bit in the + * MSI-X capability, but leaves the vector table masked. We therefore + * can't rely on a vector_use callback (from request_irq() in the guest) + * to switch the physical device into MSI-X mode because that may come a + * long time after pci_enable_msix(). This code enables vector 0 with + * triggering to userspace, then immediately release the vector, leaving + * the physical device with no vectors enabled, but MSI-X enabled, just + * like the guest view. + * If there are already unmasked vectors (in migration resume phase and + * some guest startups) which will be enabled soon, we can allocate all + * of them here to avoid inefficiently disabling and enabling vectors + * repeatedly later. + */ + if (!pdev->msix_function_masked) { + for (nr = 0; nr < msix_nr_vectors_allocated(pdev); nr++) { + if (!msix_is_masked(pdev, nr)) { + max_vec = nr; + } + } + } + vfio_msix_vector_do_use(pdev, max_vec, NULL, NULL); + vfio_msix_vector_release(pdev, max_vec); + + if (msix_set_vector_notifiers(pdev, vfio_msix_vector_use, + vfio_msix_vector_release, NULL)) { + error_report("vfio: msix_set_vector_notifiers failed"); + } + + trace_vfio_msix_enable(vdev->vbasedev.name); +} + +static void vfio_msi_enable(VFIOPCIDevice *vdev) +{ + int ret, i; + + vfio_disable_interrupts(vdev); + + vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); +retry: + vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors); + + for (i = 0; i < vdev->nr_vectors; i++) { + VFIOMSIVector *vector = &vdev->msi_vectors[i]; + + vector->vdev = vdev; + vector->virq = -1; + vector->use = true; + + if (event_notifier_init(&vector->interrupt, 0)) { + error_report("vfio: Error: event_notifier_init failed"); + } + + qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), + vfio_msi_interrupt, NULL, vector); + + /* + * Attempt to enable route through KVM irqchip, + * default to userspace handling if unavailable. + */ + vfio_add_kvm_msi_virq(vdev, vector, i, false); + } + + /* Set interrupt type prior to possible interrupts */ + vdev->interrupt = VFIO_INT_MSI; + + ret = vfio_enable_vectors(vdev, false); + if (ret) { + if (ret < 0) { + error_report("vfio: Error: Failed to setup MSI fds: %m"); + } else if (ret != vdev->nr_vectors) { + error_report("vfio: Error: Failed to enable %d " + "MSI vectors, retry with %d", vdev->nr_vectors, ret); + } + + for (i = 0; i < vdev->nr_vectors; i++) { + VFIOMSIVector *vector = &vdev->msi_vectors[i]; + if (vector->virq >= 0) { + vfio_remove_kvm_msi_virq(vector); + } + qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), + NULL, NULL, NULL); + event_notifier_cleanup(&vector->interrupt); + } + + g_free(vdev->msi_vectors); + vdev->msi_vectors = NULL; + + if (ret > 0 && ret != vdev->nr_vectors) { + vdev->nr_vectors = ret; + goto retry; + } + vdev->nr_vectors = 0; + + /* + * Failing to setup MSI doesn't really fall within any specification. + * Let's try leaving interrupts disabled and hope the guest figures + * out to fall back to INTx for this device. + */ + error_report("vfio: Error: Failed to enable MSI"); + vdev->interrupt = VFIO_INT_NONE; + + return; + } + + trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors); +} + +static void vfio_msi_disable_common(VFIOPCIDevice *vdev) +{ + Error *err = NULL; + int i; + + for (i = 0; i < vdev->nr_vectors; i++) { + VFIOMSIVector *vector = &vdev->msi_vectors[i]; + if (vdev->msi_vectors[i].use) { + if (vector->virq >= 0) { + vfio_remove_kvm_msi_virq(vector); + } + qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), + NULL, NULL, NULL); + event_notifier_cleanup(&vector->interrupt); + } + } + + g_free(vdev->msi_vectors); + vdev->msi_vectors = NULL; + vdev->nr_vectors = 0; + vdev->interrupt = VFIO_INT_NONE; + + vfio_intx_enable(vdev, &err); + if (err) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } +} + +static void vfio_msix_disable(VFIOPCIDevice *vdev) +{ + int i; + + msix_unset_vector_notifiers(&vdev->pdev); + + /* + * MSI-X will only release vectors if MSI-X is still enabled on the + * device, check through the rest and release it ourselves if necessary. + */ + for (i = 0; i < vdev->nr_vectors; i++) { + if (vdev->msi_vectors[i].use) { + vfio_msix_vector_release(&vdev->pdev, i); + msix_vector_unuse(&vdev->pdev, i); + } + } + + if (vdev->nr_vectors) { + vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); + } + + vfio_msi_disable_common(vdev); + + memset(vdev->msix->pending, 0, + BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long)); + + trace_vfio_msix_disable(vdev->vbasedev.name); +} + +static void vfio_msi_disable(VFIOPCIDevice *vdev) +{ + vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX); + vfio_msi_disable_common(vdev); + + trace_vfio_msi_disable(vdev->vbasedev.name); +} + +static void vfio_update_msi(VFIOPCIDevice *vdev) +{ + int i; + + for (i = 0; i < vdev->nr_vectors; i++) { + VFIOMSIVector *vector = &vdev->msi_vectors[i]; + MSIMessage msg; + + if (!vector->use || vector->virq < 0) { + continue; + } + + msg = msi_get_message(&vdev->pdev, i); + vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev); + } +} + +static void vfio_pci_load_rom(VFIOPCIDevice *vdev) +{ + struct vfio_region_info *reg_info; + uint64_t size; + off_t off = 0; + ssize_t bytes; + + if (vfio_get_region_info(&vdev->vbasedev, + VFIO_PCI_ROM_REGION_INDEX, ®_info)) { + error_report("vfio: Error getting ROM info: %m"); + return; + } + + trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size, + (unsigned long)reg_info->offset, + (unsigned long)reg_info->flags); + + vdev->rom_size = size = reg_info->size; + vdev->rom_offset = reg_info->offset; + + g_free(reg_info); + + if (!vdev->rom_size) { + vdev->rom_read_failed = true; + error_report("vfio-pci: Cannot read device rom at " + "%s", vdev->vbasedev.name); + error_printf("Device option ROM contents are probably invalid " + "(check dmesg).\nSkip option ROM probe with rombar=0, " + "or load from file with romfile=\n"); + return; + } + + vdev->rom = g_malloc(size); + memset(vdev->rom, 0xff, size); + + while (size) { + bytes = pread(vdev->vbasedev.fd, vdev->rom + off, + size, vdev->rom_offset + off); + if (bytes == 0) { + break; + } else if (bytes > 0) { + off += bytes; + size -= bytes; + } else { + if (errno == EINTR || errno == EAGAIN) { + continue; + } + error_report("vfio: Error reading device ROM: %m"); + break; + } + } + + /* + * Test the ROM signature against our device, if the vendor is correct + * but the device ID doesn't match, store the correct device ID and + * recompute the checksum. Intel IGD devices need this and are known + * to have bogus checksums so we can't simply adjust the checksum. + */ + if (pci_get_word(vdev->rom) == 0xaa55 && + pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size && + !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) { + uint16_t vid, did; + + vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4); + did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6); + + if (vid == vdev->vendor_id && did != vdev->device_id) { + int i; + uint8_t csum, *data = vdev->rom; + + pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6, + vdev->device_id); + data[6] = 0; + + for (csum = 0, i = 0; i < vdev->rom_size; i++) { + csum += data[i]; + } + + data[6] = -csum; + } + } +} + +static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size) +{ + VFIOPCIDevice *vdev = opaque; + union { + uint8_t byte; + uint16_t word; + uint32_t dword; + uint64_t qword; + } val; + uint64_t data = 0; + + /* Load the ROM lazily when the guest tries to read it */ + if (unlikely(!vdev->rom && !vdev->rom_read_failed)) { + vfio_pci_load_rom(vdev); + } + + memcpy(&val, vdev->rom + addr, + (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0); + + switch (size) { + case 1: + data = val.byte; + break; + case 2: + data = le16_to_cpu(val.word); + break; + case 4: + data = le32_to_cpu(val.dword); + break; + default: + hw_error("vfio: unsupported read size, %d bytes\n", size); + break; + } + + trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data); + + return data; +} + +static void vfio_rom_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ +} + +static const MemoryRegionOps vfio_rom_ops = { + .read = vfio_rom_read, + .write = vfio_rom_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void vfio_pci_size_rom(VFIOPCIDevice *vdev) +{ + uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK); + off_t offset = vdev->config_offset + PCI_ROM_ADDRESS; + DeviceState *dev = DEVICE(vdev); + char *name; + int fd = vdev->vbasedev.fd; + + if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { + /* Since pci handles romfile, just print a message and return */ + if (vfio_opt_rom_in_denylist(vdev) && vdev->pdev.romfile) { + warn_report("Device at %s is known to cause system instability" + " issues during option rom execution", + vdev->vbasedev.name); + error_printf("Proceeding anyway since user specified romfile\n"); + } + return; + } + + /* + * Use the same size ROM BAR as the physical device. The contents + * will get filled in later when the guest tries to read it. + */ + if (pread(fd, &orig, 4, offset) != 4 || + pwrite(fd, &size, 4, offset) != 4 || + pread(fd, &size, 4, offset) != 4 || + pwrite(fd, &orig, 4, offset) != 4) { + error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name); + return; + } + + size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1; + + if (!size) { + return; + } + + if (vfio_opt_rom_in_denylist(vdev)) { + if (dev->opts && qdict_haskey(dev->opts, "rombar")) { + warn_report("Device at %s is known to cause system instability" + " issues during option rom execution", + vdev->vbasedev.name); + error_printf("Proceeding anyway since user specified" + " non zero value for rombar\n"); + } else { + warn_report("Rom loading for device at %s has been disabled" + " due to system instability issues", + vdev->vbasedev.name); + error_printf("Specify rombar=1 or romfile to force\n"); + return; + } + } + + trace_vfio_pci_size_rom(vdev->vbasedev.name, size); + + name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name); + + memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev), + &vfio_rom_ops, vdev, name, size); + g_free(name); + + pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, + PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom); + + vdev->rom_read_failed = false; +} + +void vfio_vga_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIOVGARegion *region = opaque; + VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); + union { + uint8_t byte; + uint16_t word; + uint32_t dword; + uint64_t qword; + } buf; + off_t offset = vga->fd_offset + region->offset + addr; + + switch (size) { + case 1: + buf.byte = data; + break; + case 2: + buf.word = cpu_to_le16(data); + break; + case 4: + buf.dword = cpu_to_le32(data); + break; + default: + hw_error("vfio: unsupported write size, %d bytes", size); + break; + } + + if (pwrite(vga->fd, &buf, size, offset) != size) { + error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", + __func__, region->offset + addr, data, size); + } + + trace_vfio_vga_write(region->offset + addr, data, size); +} + +uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size) +{ + VFIOVGARegion *region = opaque; + VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); + union { + uint8_t byte; + uint16_t word; + uint32_t dword; + uint64_t qword; + } buf; + uint64_t data = 0; + off_t offset = vga->fd_offset + region->offset + addr; + + if (pread(vga->fd, &buf, size, offset) != size) { + error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", + __func__, region->offset + addr, size); + return (uint64_t)-1; + } + + switch (size) { + case 1: + data = buf.byte; + break; + case 2: + data = le16_to_cpu(buf.word); + break; + case 4: + data = le32_to_cpu(buf.dword); + break; + default: + hw_error("vfio: unsupported read size, %d bytes", size); + break; + } + + trace_vfio_vga_read(region->offset + addr, size, data); + + return data; +} + +static const MemoryRegionOps vfio_vga_ops = { + .read = vfio_vga_read, + .write = vfio_vga_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +/* + * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page + * size if the BAR is in an exclusive page in host so that we could map + * this BAR to guest. But this sub-page BAR may not occupy an exclusive + * page in guest. So we should set the priority of the expanded memory + * region to zero in case of overlap with BARs which share the same page + * with the sub-page BAR in guest. Besides, we should also recover the + * size of this sub-page BAR when its base address is changed in guest + * and not page aligned any more. + */ +static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar) +{ + VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIORegion *region = &vdev->bars[bar].region; + MemoryRegion *mmap_mr, *region_mr, *base_mr; + PCIIORegion *r; + pcibus_t bar_addr; + uint64_t size = region->size; + + /* Make sure that the whole region is allowed to be mmapped */ + if (region->nr_mmaps != 1 || !region->mmaps[0].mmap || + region->mmaps[0].size != region->size) { + return; + } + + r = &pdev->io_regions[bar]; + bar_addr = r->addr; + base_mr = vdev->bars[bar].mr; + region_mr = region->mem; + mmap_mr = ®ion->mmaps[0].mem; + + /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */ + if (bar_addr != PCI_BAR_UNMAPPED && + !(bar_addr & ~qemu_real_host_page_mask)) { + size = qemu_real_host_page_size; + } + + memory_region_transaction_begin(); + + if (vdev->bars[bar].size < size) { + memory_region_set_size(base_mr, size); + } + memory_region_set_size(region_mr, size); + memory_region_set_size(mmap_mr, size); + if (size != vdev->bars[bar].size && memory_region_is_mapped(base_mr)) { + memory_region_del_subregion(r->address_space, base_mr); + memory_region_add_subregion_overlap(r->address_space, + bar_addr, base_mr, 0); + } + + memory_region_transaction_commit(); +} + +/* + * PCI config space + */ +uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) +{ + VFIOPCIDevice *vdev = VFIO_PCI(pdev); + uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val; + + memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); + emu_bits = le32_to_cpu(emu_bits); + + if (emu_bits) { + emu_val = pci_default_read_config(pdev, addr, len); + } + + if (~emu_bits & (0xffffffffU >> (32 - len * 8))) { + ssize_t ret; + + ret = pread(vdev->vbasedev.fd, &phys_val, len, + vdev->config_offset + addr); + if (ret != len) { + error_report("%s(%s, 0x%x, 0x%x) failed: %m", + __func__, vdev->vbasedev.name, addr, len); + return -errno; + } + phys_val = le32_to_cpu(phys_val); + } + + val = (emu_val & emu_bits) | (phys_val & ~emu_bits); + + trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val); + + return val; +} + +void vfio_pci_write_config(PCIDevice *pdev, + uint32_t addr, uint32_t val, int len) +{ + VFIOPCIDevice *vdev = VFIO_PCI(pdev); + uint32_t val_le = cpu_to_le32(val); + + trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len); + + /* Write everything to VFIO, let it filter out what we can't write */ + if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr) + != len) { + error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m", + __func__, vdev->vbasedev.name, addr, val, len); + } + + /* MSI/MSI-X Enabling/Disabling */ + if (pdev->cap_present & QEMU_PCI_CAP_MSI && + ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) { + int is_enabled, was_enabled = msi_enabled(pdev); + + pci_default_write_config(pdev, addr, val, len); + + is_enabled = msi_enabled(pdev); + + if (!was_enabled) { + if (is_enabled) { + vfio_msi_enable(vdev); + } + } else { + if (!is_enabled) { + vfio_msi_disable(vdev); + } else { + vfio_update_msi(vdev); + } + } + } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX && + ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) { + int is_enabled, was_enabled = msix_enabled(pdev); + + pci_default_write_config(pdev, addr, val, len); + + is_enabled = msix_enabled(pdev); + + if (!was_enabled && is_enabled) { + vfio_msix_enable(vdev); + } else if (was_enabled && !is_enabled) { + vfio_msix_disable(vdev); + } + } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) || + range_covers_byte(addr, len, PCI_COMMAND)) { + pcibus_t old_addr[PCI_NUM_REGIONS - 1]; + int bar; + + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + old_addr[bar] = pdev->io_regions[bar].addr; + } + + pci_default_write_config(pdev, addr, val, len); + + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + if (old_addr[bar] != pdev->io_regions[bar].addr && + vdev->bars[bar].region.size > 0 && + vdev->bars[bar].region.size < qemu_real_host_page_size) { + vfio_sub_page_bar_update_mapping(pdev, bar); + } + } + } else { + /* Write everything to QEMU to keep emulated bits correct */ + pci_default_write_config(pdev, addr, val, len); + } +} + +/* + * Interrupt setup + */ +static void vfio_disable_interrupts(VFIOPCIDevice *vdev) +{ + /* + * More complicated than it looks. Disabling MSI/X transitions the + * device to INTx mode (if supported). Therefore we need to first + * disable MSI/X and then cleanup by disabling INTx. + */ + if (vdev->interrupt == VFIO_INT_MSIX) { + vfio_msix_disable(vdev); + } else if (vdev->interrupt == VFIO_INT_MSI) { + vfio_msi_disable(vdev); + } + + if (vdev->interrupt == VFIO_INT_INTx) { + vfio_intx_disable(vdev); + } +} + +static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp) +{ + uint16_t ctrl; + bool msi_64bit, msi_maskbit; + int ret, entries; + Error *err = NULL; + + if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl), + vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { + error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS"); + return -errno; + } + ctrl = le16_to_cpu(ctrl); + + msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT); + msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT); + entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1); + + trace_vfio_msi_setup(vdev->vbasedev.name, pos); + + ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err); + if (ret < 0) { + if (ret == -ENOTSUP) { + return 0; + } + error_propagate_prepend(errp, err, "msi_init failed: "); + return ret; + } + vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); + + return 0; +} + +static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev) +{ + off_t start, end; + VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region; + + /* + * If the host driver allows mapping of a MSIX data, we are going to + * do map the entire BAR and emulate MSIX table on top of that. + */ + if (vfio_has_region_cap(&vdev->vbasedev, region->nr, + VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) { + return; + } + + /* + * We expect to find a single mmap covering the whole BAR, anything else + * means it's either unsupported or already setup. + */ + if (region->nr_mmaps != 1 || region->mmaps[0].offset || + region->size != region->mmaps[0].size) { + return; + } + + /* MSI-X table start and end aligned to host page size */ + start = vdev->msix->table_offset & qemu_real_host_page_mask; + end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset + + (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); + + /* + * Does the MSI-X table cover the beginning of the BAR? The whole BAR? + * NB - Host page size is necessarily a power of two and so is the PCI + * BAR (not counting EA yet), therefore if we have host page aligned + * @start and @end, then any remainder of the BAR before or after those + * must be at least host page sized and therefore mmap'able. + */ + if (!start) { + if (end >= region->size) { + region->nr_mmaps = 0; + g_free(region->mmaps); + region->mmaps = NULL; + trace_vfio_msix_fixup(vdev->vbasedev.name, + vdev->msix->table_bar, 0, 0); + } else { + region->mmaps[0].offset = end; + region->mmaps[0].size = region->size - end; + trace_vfio_msix_fixup(vdev->vbasedev.name, + vdev->msix->table_bar, region->mmaps[0].offset, + region->mmaps[0].offset + region->mmaps[0].size); + } + + /* Maybe it's aligned at the end of the BAR */ + } else if (end >= region->size) { + region->mmaps[0].size = start; + trace_vfio_msix_fixup(vdev->vbasedev.name, + vdev->msix->table_bar, region->mmaps[0].offset, + region->mmaps[0].offset + region->mmaps[0].size); + + /* Otherwise it must split the BAR */ + } else { + region->nr_mmaps = 2; + region->mmaps = g_renew(VFIOMmap, region->mmaps, 2); + + memcpy(®ion->mmaps[1], ®ion->mmaps[0], sizeof(VFIOMmap)); + + region->mmaps[0].size = start; + trace_vfio_msix_fixup(vdev->vbasedev.name, + vdev->msix->table_bar, region->mmaps[0].offset, + region->mmaps[0].offset + region->mmaps[0].size); + + region->mmaps[1].offset = end; + region->mmaps[1].size = region->size - end; + trace_vfio_msix_fixup(vdev->vbasedev.name, + vdev->msix->table_bar, region->mmaps[1].offset, + region->mmaps[1].offset + region->mmaps[1].size); + } +} + +static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp) +{ + int target_bar = -1; + size_t msix_sz; + + if (!vdev->msix || vdev->msix_relo == OFF_AUTOPCIBAR_OFF) { + return; + } + + /* The actual minimum size of MSI-X structures */ + msix_sz = (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE) + + (QEMU_ALIGN_UP(vdev->msix->entries, 64) / 8); + /* Round up to host pages, we don't want to share a page */ + msix_sz = REAL_HOST_PAGE_ALIGN(msix_sz); + /* PCI BARs must be a power of 2 */ + msix_sz = pow2ceil(msix_sz); + + if (vdev->msix_relo == OFF_AUTOPCIBAR_AUTO) { + /* + * TODO: Lookup table for known devices. + * + * Logically we might use an algorithm here to select the BAR adding + * the least additional MMIO space, but we cannot programmatically + * predict the driver dependency on BAR ordering or sizing, therefore + * 'auto' becomes a lookup for combinations reported to work. + */ + if (target_bar < 0) { + error_setg(errp, "No automatic MSI-X relocation available for " + "device %04x:%04x", vdev->vendor_id, vdev->device_id); + return; + } + } else { + target_bar = (int)(vdev->msix_relo - OFF_AUTOPCIBAR_BAR0); + } + + /* I/O port BARs cannot host MSI-X structures */ + if (vdev->bars[target_bar].ioport) { + error_setg(errp, "Invalid MSI-X relocation BAR %d, " + "I/O port BAR", target_bar); + return; + } + + /* Cannot use a BAR in the "shadow" of a 64-bit BAR */ + if (!vdev->bars[target_bar].size && + target_bar > 0 && vdev->bars[target_bar - 1].mem64) { + error_setg(errp, "Invalid MSI-X relocation BAR %d, " + "consumed by 64-bit BAR %d", target_bar, target_bar - 1); + return; + } + + /* 2GB max size for 32-bit BARs, cannot double if already > 1G */ + if (vdev->bars[target_bar].size > 1 * GiB && + !vdev->bars[target_bar].mem64) { + error_setg(errp, "Invalid MSI-X relocation BAR %d, " + "no space to extend 32-bit BAR", target_bar); + return; + } + + /* + * If adding a new BAR, test if we can make it 64bit. We make it + * prefetchable since QEMU MSI-X emulation has no read side effects + * and doing so makes mapping more flexible. + */ + if (!vdev->bars[target_bar].size) { + if (target_bar < (PCI_ROM_SLOT - 1) && + !vdev->bars[target_bar + 1].size) { + vdev->bars[target_bar].mem64 = true; + vdev->bars[target_bar].type = PCI_BASE_ADDRESS_MEM_TYPE_64; + } + vdev->bars[target_bar].type |= PCI_BASE_ADDRESS_MEM_PREFETCH; + vdev->bars[target_bar].size = msix_sz; + vdev->msix->table_offset = 0; + } else { + vdev->bars[target_bar].size = MAX(vdev->bars[target_bar].size * 2, + msix_sz * 2); + /* + * Due to above size calc, MSI-X always starts halfway into the BAR, + * which will always be a separate host page. + */ + vdev->msix->table_offset = vdev->bars[target_bar].size / 2; + } + + vdev->msix->table_bar = target_bar; + vdev->msix->pba_bar = target_bar; + /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */ + vdev->msix->pba_offset = vdev->msix->table_offset + + (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE); + + trace_vfio_msix_relo(vdev->vbasedev.name, + vdev->msix->table_bar, vdev->msix->table_offset); +} + +/* + * We don't have any control over how pci_add_capability() inserts + * capabilities into the chain. In order to setup MSI-X we need a + * MemoryRegion for the BAR. In order to setup the BAR and not + * attempt to mmap the MSI-X table area, which VFIO won't allow, we + * need to first look for where the MSI-X table lives. So we + * unfortunately split MSI-X setup across two functions. + */ +static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp) +{ + uint8_t pos; + uint16_t ctrl; + uint32_t table, pba; + int fd = vdev->vbasedev.fd; + VFIOMSIXInfo *msix; + + pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); + if (!pos) { + return; + } + + if (pread(fd, &ctrl, sizeof(ctrl), + vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) { + error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS"); + return; + } + + if (pread(fd, &table, sizeof(table), + vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) { + error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE"); + return; + } + + if (pread(fd, &pba, sizeof(pba), + vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) { + error_setg_errno(errp, errno, "failed to read PCI MSIX PBA"); + return; + } + + ctrl = le16_to_cpu(ctrl); + table = le32_to_cpu(table); + pba = le32_to_cpu(pba); + + msix = g_malloc0(sizeof(*msix)); + msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK; + msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; + msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; + msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; + msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; + + /* + * Test the size of the pba_offset variable and catch if it extends outside + * of the specified BAR. If it is the case, we need to apply a hardware + * specific quirk if the device is known or we have a broken configuration. + */ + if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) { + /* + * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5 + * adapters. The T5 hardware returns an incorrect value of 0x8000 for + * the VF PBA offset while the BAR itself is only 8k. The correct value + * is 0x1000, so we hard code that here. + */ + if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO && + (vdev->device_id & 0xff00) == 0x5800) { + msix->pba_offset = 0x1000; + /* + * BAIDU KUNLUN Virtual Function devices for KUNLUN AI processor + * return an incorrect value of 0x460000 for the VF PBA offset while + * the BAR itself is only 0x10000. The correct value is 0xb400. + */ + } else if (vfio_pci_is(vdev, PCI_VENDOR_ID_BAIDU, + PCI_DEVICE_ID_KUNLUN_VF)) { + msix->pba_offset = 0xb400; + } else if (vdev->msix_relo == OFF_AUTOPCIBAR_OFF) { + error_setg(errp, "hardware reports invalid configuration, " + "MSIX PBA outside of specified BAR"); + g_free(msix); + return; + } + } + + trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar, + msix->table_offset, msix->entries); + vdev->msix = msix; + + vfio_pci_fixup_msix_region(vdev); + + vfio_pci_relocate_msix(vdev, errp); +} + +static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp) +{ + int ret; + Error *err = NULL; + + vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) * + sizeof(unsigned long)); + ret = msix_init(&vdev->pdev, vdev->msix->entries, + vdev->bars[vdev->msix->table_bar].mr, + vdev->msix->table_bar, vdev->msix->table_offset, + vdev->bars[vdev->msix->pba_bar].mr, + vdev->msix->pba_bar, vdev->msix->pba_offset, pos, + &err); + if (ret < 0) { + if (ret == -ENOTSUP) { + warn_report_err(err); + return 0; + } + + error_propagate(errp, err); + return ret; + } + + /* + * The PCI spec suggests that devices provide additional alignment for + * MSI-X structures and avoid overlapping non-MSI-X related registers. + * For an assigned device, this hopefully means that emulation of MSI-X + * structures does not affect the performance of the device. If devices + * fail to provide that alignment, a significant performance penalty may + * result, for instance Mellanox MT27500 VFs: + * http://www.spinics.net/lists/kvm/msg125881.html + * + * The PBA is simply not that important for such a serious regression and + * most drivers do not appear to look at it. The solution for this is to + * disable the PBA MemoryRegion unless it's being used. We disable it + * here and only enable it if a masked vector fires through QEMU. As the + * vector-use notifier is called, which occurs on unmask, we test whether + * PBA emulation is needed and again disable if not. + */ + memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false); + + /* + * The emulated machine may provide a paravirt interface for MSIX setup + * so it is not strictly necessary to emulate MSIX here. This becomes + * helpful when frequently accessed MMIO registers are located in + * subpages adjacent to the MSIX table but the MSIX data containing page + * cannot be mapped because of a host page size bigger than the MSIX table + * alignment. + */ + if (object_property_get_bool(OBJECT(qdev_get_machine()), + "vfio-no-msix-emulation", NULL)) { + memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false); + } + + return 0; +} + +static void vfio_teardown_msi(VFIOPCIDevice *vdev) +{ + msi_uninit(&vdev->pdev); + + if (vdev->msix) { + msix_uninit(&vdev->pdev, + vdev->bars[vdev->msix->table_bar].mr, + vdev->bars[vdev->msix->pba_bar].mr); + g_free(vdev->msix->pending); + } +} + +/* + * Resource setup + */ +static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled) +{ + int i; + + for (i = 0; i < PCI_ROM_SLOT; i++) { + vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled); + } +} + +static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr) +{ + VFIOBAR *bar = &vdev->bars[nr]; + + uint32_t pci_bar; + int ret; + + /* Skip both unimplemented BARs and the upper half of 64bit BARS. */ + if (!bar->region.size) { + return; + } + + /* Determine what type of BAR this is for registration */ + ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar), + vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr)); + if (ret != sizeof(pci_bar)) { + error_report("vfio: Failed to read BAR %d (%m)", nr); + return; + } + + pci_bar = le32_to_cpu(pci_bar); + bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO); + bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64); + bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK : + ~PCI_BASE_ADDRESS_MEM_MASK); + bar->size = bar->region.size; +} + +static void vfio_bars_prepare(VFIOPCIDevice *vdev) +{ + int i; + + for (i = 0; i < PCI_ROM_SLOT; i++) { + vfio_bar_prepare(vdev, i); + } +} + +static void vfio_bar_register(VFIOPCIDevice *vdev, int nr) +{ + VFIOBAR *bar = &vdev->bars[nr]; + char *name; + + if (!bar->size) { + return; + } + + bar->mr = g_new0(MemoryRegion, 1); + name = g_strdup_printf("%s base BAR %d", vdev->vbasedev.name, nr); + memory_region_init_io(bar->mr, OBJECT(vdev), NULL, NULL, name, bar->size); + g_free(name); + + if (bar->region.size) { + memory_region_add_subregion(bar->mr, 0, bar->region.mem); + + if (vfio_region_mmap(&bar->region)) { + error_report("Failed to mmap %s BAR %d. Performance may be slow", + vdev->vbasedev.name, nr); + } + } + + pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr); +} + +static void vfio_bars_register(VFIOPCIDevice *vdev) +{ + int i; + + for (i = 0; i < PCI_ROM_SLOT; i++) { + vfio_bar_register(vdev, i); + } +} + +static void vfio_bars_exit(VFIOPCIDevice *vdev) +{ + int i; + + for (i = 0; i < PCI_ROM_SLOT; i++) { + VFIOBAR *bar = &vdev->bars[i]; + + vfio_bar_quirk_exit(vdev, i); + vfio_region_exit(&bar->region); + if (bar->region.size) { + memory_region_del_subregion(bar->mr, bar->region.mem); + } + } + + if (vdev->vga) { + pci_unregister_vga(&vdev->pdev); + vfio_vga_quirk_exit(vdev); + } +} + +static void vfio_bars_finalize(VFIOPCIDevice *vdev) +{ + int i; + + for (i = 0; i < PCI_ROM_SLOT; i++) { + VFIOBAR *bar = &vdev->bars[i]; + + vfio_bar_quirk_finalize(vdev, i); + vfio_region_finalize(&bar->region); + if (bar->size) { + object_unparent(OBJECT(bar->mr)); + g_free(bar->mr); + } + } + + if (vdev->vga) { + vfio_vga_quirk_finalize(vdev); + for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) { + object_unparent(OBJECT(&vdev->vga->region[i].mem)); + } + g_free(vdev->vga); + } +} + +/* + * General setup + */ +static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos) +{ + uint8_t tmp; + uint16_t next = PCI_CONFIG_SPACE_SIZE; + + for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp; + tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) { + if (tmp > pos && tmp < next) { + next = tmp; + } + } + + return next - pos; +} + + +static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos) +{ + uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE; + + for (tmp = PCI_CONFIG_SPACE_SIZE; tmp; + tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) { + if (tmp > pos && tmp < next) { + next = tmp; + } + } + + return next - pos; +} + +static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask) +{ + pci_set_word(buf, (pci_get_word(buf) & ~mask) | val); +} + +static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos, + uint16_t val, uint16_t mask) +{ + vfio_set_word_bits(vdev->pdev.config + pos, val, mask); + vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask); + vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask); +} + +static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask) +{ + pci_set_long(buf, (pci_get_long(buf) & ~mask) | val); +} + +static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos, + uint32_t val, uint32_t mask) +{ + vfio_set_long_bits(vdev->pdev.config + pos, val, mask); + vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask); + vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask); +} + +static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size, + Error **errp) +{ + uint16_t flags; + uint8_t type; + + flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); + type = (flags & PCI_EXP_FLAGS_TYPE) >> 4; + + if (type != PCI_EXP_TYPE_ENDPOINT && + type != PCI_EXP_TYPE_LEG_END && + type != PCI_EXP_TYPE_RC_END) { + + error_setg(errp, "assignment of PCIe type 0x%x " + "devices is not currently supported", type); + return -EINVAL; + } + + if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) { + PCIBus *bus = pci_get_bus(&vdev->pdev); + PCIDevice *bridge; + + /* + * Traditionally PCI device assignment exposes the PCIe capability + * as-is on non-express buses. The reason being that some drivers + * simply assume that it's there, for example tg3. However when + * we're running on a native PCIe machine type, like Q35, we need + * to hide the PCIe capability. The reason for this is twofold; + * first Windows guests get a Code 10 error when the PCIe capability + * is exposed in this configuration. Therefore express devices won't + * work at all unless they're attached to express buses in the VM. + * Second, a native PCIe machine introduces the possibility of fine + * granularity IOMMUs supporting both translation and isolation. + * Guest code to discover the IOMMU visibility of a device, such as + * IOMMU grouping code on Linux, is very aware of device types and + * valid transitions between bus types. An express device on a non- + * express bus is not a valid combination on bare metal systems. + * + * Drivers that require a PCIe capability to make the device + * functional are simply going to need to have their devices placed + * on a PCIe bus in the VM. + */ + while (!pci_bus_is_root(bus)) { + bridge = pci_bridge_get_device(bus); + bus = pci_get_bus(bridge); + } + + if (pci_bus_is_express(bus)) { + return 0; + } + + } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) { + /* + * On a Root Complex bus Endpoints become Root Complex Integrated + * Endpoints, which changes the type and clears the LNK & LNK2 fields. + */ + if (type == PCI_EXP_TYPE_ENDPOINT) { + vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, + PCI_EXP_TYPE_RC_END << 4, + PCI_EXP_FLAGS_TYPE); + + /* Link Capabilities, Status, and Control goes away */ + if (size > PCI_EXP_LNKCTL) { + vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0); + vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); + vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0); + +#ifndef PCI_EXP_LNKCAP2 +#define PCI_EXP_LNKCAP2 44 +#endif +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 +#endif + /* Link 2 Capabilities, Status, and Control goes away */ + if (size > PCI_EXP_LNKCAP2) { + vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0); + vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0); + vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0); + } + } + + } else if (type == PCI_EXP_TYPE_LEG_END) { + /* + * Legacy endpoints don't belong on the root complex. Windows + * seems to be happier with devices if we skip the capability. + */ + return 0; + } + + } else { + /* + * Convert Root Complex Integrated Endpoints to regular endpoints. + * These devices don't support LNK/LNK2 capabilities, so make them up. + */ + if (type == PCI_EXP_TYPE_RC_END) { + vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, + PCI_EXP_TYPE_ENDPOINT << 4, + PCI_EXP_FLAGS_TYPE); + vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, + QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1) | + QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT), ~0); + vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); + } + } + + /* + * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0 + * (Niantic errate #35) causing Windows to error with a Code 10 for the + * device on Q35. Fixup any such devices to report version 1. If we + * were to remove the capability entirely the guest would lose extended + * config space. + */ + if ((flags & PCI_EXP_FLAGS_VERS) == 0) { + vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, + 1, PCI_EXP_FLAGS_VERS); + } + + pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size, + errp); + if (pos < 0) { + return pos; + } + + vdev->pdev.exp.exp_cap = pos; + + return pos; +} + +static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos) +{ + uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP); + + if (cap & PCI_EXP_DEVCAP_FLR) { + trace_vfio_check_pcie_flr(vdev->vbasedev.name); + vdev->has_flr = true; + } +} + +static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos) +{ + uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL); + + if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) { + trace_vfio_check_pm_reset(vdev->vbasedev.name); + vdev->has_pm_reset = true; + } +} + +static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos) +{ + uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP); + + if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) { + trace_vfio_check_af_flr(vdev->vbasedev.name); + vdev->has_flr = true; + } +} + +static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp) +{ + PCIDevice *pdev = &vdev->pdev; + uint8_t cap_id, next, size; + int ret; + + cap_id = pdev->config[pos]; + next = pdev->config[pos + PCI_CAP_LIST_NEXT]; + + /* + * If it becomes important to configure capabilities to their actual + * size, use this as the default when it's something we don't recognize. + * Since QEMU doesn't actually handle many of the config accesses, + * exact size doesn't seem worthwhile. + */ + size = vfio_std_cap_max_size(pdev, pos); + + /* + * pci_add_capability always inserts the new capability at the head + * of the chain. Therefore to end up with a chain that matches the + * physical device, we insert from the end by making this recursive. + * This is also why we pre-calculate size above as cached config space + * will be changed as we unwind the stack. + */ + if (next) { + ret = vfio_add_std_cap(vdev, next, errp); + if (ret) { + return ret; + } + } else { + /* Begin the rebuild, use QEMU emulated list bits */ + pdev->config[PCI_CAPABILITY_LIST] = 0; + vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff; + vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST; + + ret = vfio_add_virt_caps(vdev, errp); + if (ret) { + return ret; + } + } + + /* Scale down size, esp in case virt caps were added above */ + size = MIN(size, vfio_std_cap_max_size(pdev, pos)); + + /* Use emulated next pointer to allow dropping caps */ + pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff); + + switch (cap_id) { + case PCI_CAP_ID_MSI: + ret = vfio_msi_setup(vdev, pos, errp); + break; + case PCI_CAP_ID_EXP: + vfio_check_pcie_flr(vdev, pos); + ret = vfio_setup_pcie_cap(vdev, pos, size, errp); + break; + case PCI_CAP_ID_MSIX: + ret = vfio_msix_setup(vdev, pos, errp); + break; + case PCI_CAP_ID_PM: + vfio_check_pm_reset(vdev, pos); + vdev->pm_cap = pos; + ret = pci_add_capability(pdev, cap_id, pos, size, errp); + break; + case PCI_CAP_ID_AF: + vfio_check_af_flr(vdev, pos); + ret = pci_add_capability(pdev, cap_id, pos, size, errp); + break; + default: + ret = pci_add_capability(pdev, cap_id, pos, size, errp); + break; + } + + if (ret < 0) { + error_prepend(errp, + "failed to add PCI capability 0x%x[0x%x]@0x%x: ", + cap_id, size, pos); + return ret; + } + + return 0; +} + +static void vfio_add_ext_cap(VFIOPCIDevice *vdev) +{ + PCIDevice *pdev = &vdev->pdev; + uint32_t header; + uint16_t cap_id, next, size; + uint8_t cap_ver; + uint8_t *config; + + /* Only add extended caps if we have them and the guest can see them */ + if (!pci_is_express(pdev) || !pci_bus_is_express(pci_get_bus(pdev)) || + !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) { + return; + } + + /* + * pcie_add_capability always inserts the new capability at the tail + * of the chain. Therefore to end up with a chain that matches the + * physical device, we cache the config space to avoid overwriting + * the original config space when we parse the extended capabilities. + */ + config = g_memdup(pdev->config, vdev->config_size); + + /* + * Extended capabilities are chained with each pointing to the next, so we + * can drop anything other than the head of the chain simply by modifying + * the previous next pointer. Seed the head of the chain here such that + * we can simply skip any capabilities we want to drop below, regardless + * of their position in the chain. If this stub capability still exists + * after we add the capabilities we want to expose, update the capability + * ID to zero. Note that we cannot seed with the capability header being + * zero as this conflicts with definition of an absent capability chain + * and prevents capabilities beyond the head of the list from being added. + * By replacing the dummy capability ID with zero after walking the device + * chain, we also transparently mark extended capabilities as absent if + * no capabilities were added. Note that the PCIe spec defines an absence + * of extended capabilities to be determined by a value of zero for the + * capability ID, version, AND next pointer. A non-zero next pointer + * should be sufficient to indicate additional capabilities are present, + * which will occur if we call pcie_add_capability() below. The entire + * first dword is emulated to support this. + * + * NB. The kernel side does similar masking, so be prepared that our + * view of the device may also contain a capability ID zero in the head + * of the chain. Skip it for the same reason that we cannot seed the + * chain with a zero capability. + */ + pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE, + PCI_EXT_CAP(0xFFFF, 0, 0)); + pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0); + pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0); + + for (next = PCI_CONFIG_SPACE_SIZE; next; + next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) { + header = pci_get_long(config + next); + cap_id = PCI_EXT_CAP_ID(header); + cap_ver = PCI_EXT_CAP_VER(header); + + /* + * If it becomes important to configure extended capabilities to their + * actual size, use this as the default when it's something we don't + * recognize. Since QEMU doesn't actually handle many of the config + * accesses, exact size doesn't seem worthwhile. + */ + size = vfio_ext_cap_max_size(config, next); + + /* Use emulated next pointer to allow dropping extended caps */ + pci_long_test_and_set_mask(vdev->emulated_config_bits + next, + PCI_EXT_CAP_NEXT_MASK); + + switch (cap_id) { + case 0: /* kernel masked capability */ + case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */ + case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */ + case PCI_EXT_CAP_ID_REBAR: /* Can't expose read-only */ + trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next); + break; + default: + pcie_add_capability(pdev, cap_id, cap_ver, next, size); + } + + } + + /* Cleanup chain head ID if necessary */ + if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) { + pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0); + } + + g_free(config); + return; +} + +static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp) +{ + PCIDevice *pdev = &vdev->pdev; + int ret; + + if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) || + !pdev->config[PCI_CAPABILITY_LIST]) { + return 0; /* Nothing to add */ + } + + ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp); + if (ret) { + return ret; + } + + vfio_add_ext_cap(vdev); + return 0; +} + +static void vfio_pci_pre_reset(VFIOPCIDevice *vdev) +{ + PCIDevice *pdev = &vdev->pdev; + uint16_t cmd; + + vfio_disable_interrupts(vdev); + + /* Make sure the device is in D0 */ + if (vdev->pm_cap) { + uint16_t pmcsr; + uint8_t state; + + pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); + state = pmcsr & PCI_PM_CTRL_STATE_MASK; + if (state) { + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2); + /* vfio handles the necessary delay here */ + pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); + state = pmcsr & PCI_PM_CTRL_STATE_MASK; + if (state) { + error_report("vfio: Unable to power on device, stuck in D%d", + state); + } + } + } + + /* + * Stop any ongoing DMA by disconnecting I/O, MMIO, and bus master. + * Also put INTx Disable in known state. + */ + cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2); + cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_INTX_DISABLE); + vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2); +} + +static void vfio_pci_post_reset(VFIOPCIDevice *vdev) +{ + Error *err = NULL; + int nr; + + vfio_intx_enable(vdev, &err); + if (err) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } + + for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) { + off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr); + uint32_t val = 0; + uint32_t len = sizeof(val); + + if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) { + error_report("%s(%s) reset bar %d failed: %m", __func__, + vdev->vbasedev.name, nr); + } + } + + vfio_quirk_reset(vdev); +} + +static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name) +{ + char tmp[13]; + + sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain, + addr->bus, addr->slot, addr->function); + + return (strcmp(tmp, name) == 0); +} + +static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) +{ + VFIOGroup *group; + struct vfio_pci_hot_reset_info *info; + struct vfio_pci_dependent_device *devices; + struct vfio_pci_hot_reset *reset; + int32_t *fds; + int ret, i, count; + bool multi = false; + + trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); + + if (!single) { + vfio_pci_pre_reset(vdev); + } + vdev->vbasedev.needs_reset = false; + + info = g_malloc0(sizeof(*info)); + info->argsz = sizeof(*info); + + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); + if (ret && errno != ENOSPC) { + ret = -errno; + if (!vdev->has_pm_reset) { + error_report("vfio: Cannot reset device %s, " + "no available reset mechanism.", vdev->vbasedev.name); + } + goto out_single; + } + + count = info->count; + info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices))); + info->argsz = sizeof(*info) + (count * sizeof(*devices)); + devices = &info->devices[0]; + + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); + if (ret) { + ret = -errno; + error_report("vfio: hot reset info failed: %m"); + goto out_single; + } + + trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); + + /* Verify that we have all the groups required */ + for (i = 0; i < info->count; i++) { + PCIHostDeviceAddress host; + VFIOPCIDevice *tmp; + VFIODevice *vbasedev_iter; + + host.domain = devices[i].segment; + host.bus = devices[i].bus; + host.slot = PCI_SLOT(devices[i].devfn); + host.function = PCI_FUNC(devices[i].devfn); + + trace_vfio_pci_hot_reset_dep_devices(host.domain, + host.bus, host.slot, host.function, devices[i].group_id); + + if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { + continue; + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + if (group->groupid == devices[i].group_id) { + break; + } + } + + if (!group) { + if (!vdev->has_pm_reset) { + error_report("vfio: Cannot reset device %s, " + "depends on group %d which is not owned.", + vdev->vbasedev.name, devices[i].group_id); + } + ret = -EPERM; + goto out; + } + + /* Prep dependent devices for reset and clear our marker. */ + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (!vbasedev_iter->dev->realized || + vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { + continue; + } + tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); + if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { + if (single) { + ret = -EINVAL; + goto out_single; + } + vfio_pci_pre_reset(tmp); + tmp->vbasedev.needs_reset = false; + multi = true; + break; + } + } + } + + if (!single && !multi) { + ret = -EINVAL; + goto out_single; + } + + /* Determine how many group fds need to be passed */ + count = 0; + QLIST_FOREACH(group, &vfio_group_list, next) { + for (i = 0; i < info->count; i++) { + if (group->groupid == devices[i].group_id) { + count++; + break; + } + } + } + + reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); + reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); + fds = &reset->group_fds[0]; + + /* Fill in group fds */ + QLIST_FOREACH(group, &vfio_group_list, next) { + for (i = 0; i < info->count; i++) { + if (group->groupid == devices[i].group_id) { + fds[reset->count++] = group->fd; + break; + } + } + } + + /* Bus reset! */ + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); + g_free(reset); + + trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, + ret ? "%m" : "Success"); + +out: + /* Re-enable INTx on affected devices */ + for (i = 0; i < info->count; i++) { + PCIHostDeviceAddress host; + VFIOPCIDevice *tmp; + VFIODevice *vbasedev_iter; + + host.domain = devices[i].segment; + host.bus = devices[i].bus; + host.slot = PCI_SLOT(devices[i].devfn); + host.function = PCI_FUNC(devices[i].devfn); + + if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { + continue; + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + if (group->groupid == devices[i].group_id) { + break; + } + } + + if (!group) { + break; + } + + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (!vbasedev_iter->dev->realized || + vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { + continue; + } + tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); + if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { + vfio_pci_post_reset(tmp); + break; + } + } + } +out_single: + if (!single) { + vfio_pci_post_reset(vdev); + } + g_free(info); + + return ret; +} + +/* + * We want to differentiate hot reset of multiple in-use devices vs hot reset + * of a single in-use device. VFIO_DEVICE_RESET will already handle the case + * of doing hot resets when there is only a single device per bus. The in-use + * here refers to how many VFIODevices are affected. A hot reset that affects + * multiple devices, but only a single in-use device, means that we can call + * it from our bus ->reset() callback since the extent is effectively a single + * device. This allows us to make use of it in the hotplug path. When there + * are multiple in-use devices, we can only trigger the hot reset during a + * system reset and thus from our reset handler. We separate _one vs _multi + * here so that we don't overlap and do a double reset on the system reset + * path where both our reset handler and ->reset() callback are used. Calling + * _one() will only do a hot reset for the one in-use devices case, calling + * _multi() will do nothing if a _one() would have been sufficient. + */ +static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev) +{ + return vfio_pci_hot_reset(vdev, true); +} + +static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + return vfio_pci_hot_reset(vdev, false); +} + +static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) { + vbasedev->needs_reset = true; + } +} + +static Object *vfio_pci_get_object(VFIODevice *vbasedev) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + + return OBJECT(vdev); +} + +static bool vfio_msix_present(void *opaque, int version_id) +{ + PCIDevice *pdev = opaque; + + return msix_present(pdev); +} + +const VMStateDescription vmstate_vfio_pci_config = { + .name = "VFIOPCIDevice", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice), + VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, vfio_msix_present), + VMSTATE_END_OF_LIST() + } +}; + +static void vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + + vmstate_save_state(f, &vmstate_vfio_pci_config, vdev, NULL); +} + +static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + PCIDevice *pdev = &vdev->pdev; + pcibus_t old_addr[PCI_NUM_REGIONS - 1]; + int bar, ret; + + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + old_addr[bar] = pdev->io_regions[bar].addr; + } + + ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1); + if (ret) { + return ret; + } + + vfio_pci_write_config(pdev, PCI_COMMAND, + pci_get_word(pdev->config + PCI_COMMAND), 2); + + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + /* + * The address may not be changed in some scenarios + * (e.g. the VF driver isn't loaded in VM). + */ + if (old_addr[bar] != pdev->io_regions[bar].addr && + vdev->bars[bar].region.size > 0 && + vdev->bars[bar].region.size < qemu_real_host_page_size) { + vfio_sub_page_bar_update_mapping(pdev, bar); + } + } + + if (msi_enabled(pdev)) { + vfio_msi_enable(vdev); + } else if (msix_enabled(pdev)) { + vfio_msix_enable(vdev); + } + + return ret; +} + +static VFIODeviceOps vfio_pci_ops = { + .vfio_compute_needs_reset = vfio_pci_compute_needs_reset, + .vfio_hot_reset_multi = vfio_pci_hot_reset_multi, + .vfio_eoi = vfio_intx_eoi, + .vfio_get_object = vfio_pci_get_object, + .vfio_save_config = vfio_pci_save_config, + .vfio_load_config = vfio_pci_load_config, +}; + +int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp) +{ + VFIODevice *vbasedev = &vdev->vbasedev; + struct vfio_region_info *reg_info; + int ret; + + ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, ®_info); + if (ret) { + error_setg_errno(errp, -ret, + "failed getting region info for VGA region index %d", + VFIO_PCI_VGA_REGION_INDEX); + return ret; + } + + if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) || + !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) || + reg_info->size < 0xbffff + 1) { + error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx", + (unsigned long)reg_info->flags, + (unsigned long)reg_info->size); + g_free(reg_info); + return -EINVAL; + } + + vdev->vga = g_new0(VFIOVGA, 1); + + vdev->vga->fd_offset = reg_info->offset; + vdev->vga->fd = vdev->vbasedev.fd; + + g_free(reg_info); + + vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE; + vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM; + QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks); + + memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem, + OBJECT(vdev), &vfio_vga_ops, + &vdev->vga->region[QEMU_PCI_VGA_MEM], + "vfio-vga-mmio@0xa0000", + QEMU_PCI_VGA_MEM_SIZE); + + vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE; + vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO; + QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks); + + memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem, + OBJECT(vdev), &vfio_vga_ops, + &vdev->vga->region[QEMU_PCI_VGA_IO_LO], + "vfio-vga-io@0x3b0", + QEMU_PCI_VGA_IO_LO_SIZE); + + vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE; + vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI; + QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks); + + memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem, + OBJECT(vdev), &vfio_vga_ops, + &vdev->vga->region[QEMU_PCI_VGA_IO_HI], + "vfio-vga-io@0x3c0", + QEMU_PCI_VGA_IO_HI_SIZE); + + pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem, + &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem, + &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem); + + return 0; +} + +static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp) +{ + VFIODevice *vbasedev = &vdev->vbasedev; + struct vfio_region_info *reg_info; + struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) }; + int i, ret = -1; + + /* Sanity check device */ + if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) { + error_setg(errp, "this isn't a PCI device"); + return; + } + + if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { + error_setg(errp, "unexpected number of io regions %u", + vbasedev->num_regions); + return; + } + + if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) { + error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs); + return; + } + + for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) { + char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i); + + ret = vfio_region_setup(OBJECT(vdev), vbasedev, + &vdev->bars[i].region, i, name); + g_free(name); + + if (ret) { + error_setg_errno(errp, -ret, "failed to get region %d info", i); + return; + } + + QLIST_INIT(&vdev->bars[i].quirks); + } + + ret = vfio_get_region_info(vbasedev, + VFIO_PCI_CONFIG_REGION_INDEX, ®_info); + if (ret) { + error_setg_errno(errp, -ret, "failed to get config info"); + return; + } + + trace_vfio_populate_device_config(vdev->vbasedev.name, + (unsigned long)reg_info->size, + (unsigned long)reg_info->offset, + (unsigned long)reg_info->flags); + + vdev->config_size = reg_info->size; + if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) { + vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; + } + vdev->config_offset = reg_info->offset; + + g_free(reg_info); + + if (vdev->features & VFIO_FEATURE_ENABLE_VGA) { + ret = vfio_populate_vga(vdev, errp); + if (ret) { + error_append_hint(errp, "device does not support " + "requested feature x-vga\n"); + return; + } + } + + irq_info.index = VFIO_PCI_ERR_IRQ_INDEX; + + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); + if (ret) { + /* This can fail for an old kernel or legacy PCI dev */ + trace_vfio_populate_device_get_irq_info_failure(strerror(errno)); + } else if (irq_info.count == 1) { + vdev->pci_aer = true; + } else { + warn_report(VFIO_MSG_PREFIX + "Could not enable error recovery for the device", + vbasedev->name); + } +} + +static void vfio_put_device(VFIOPCIDevice *vdev) +{ + g_free(vdev->vbasedev.name); + g_free(vdev->msix); + + vfio_put_base_device(&vdev->vbasedev); +} + +static void vfio_err_notifier_handler(void *opaque) +{ + VFIOPCIDevice *vdev = opaque; + + if (!event_notifier_test_and_clear(&vdev->err_notifier)) { + return; + } + + /* + * TBD. Retrieve the error details and decide what action + * needs to be taken. One of the actions could be to pass + * the error to the guest and have the guest driver recover + * from the error. This requires that PCIe capabilities be + * exposed to the guest. For now, we just terminate the + * guest to contain the error. + */ + + error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name); + + vm_stop(RUN_STATE_INTERNAL_ERROR); +} + +/* + * Registers error notifier for devices supporting error recovery. + * If we encounter a failure in this function, we report an error + * and continue after disabling error recovery support for the + * device. + */ +static void vfio_register_err_notifier(VFIOPCIDevice *vdev) +{ + Error *err = NULL; + int32_t fd; + + if (!vdev->pci_aer) { + return; + } + + if (event_notifier_init(&vdev->err_notifier, 0)) { + error_report("vfio: Unable to init event notifier for error detection"); + vdev->pci_aer = false; + return; + } + + fd = event_notifier_get_fd(&vdev->err_notifier); + qemu_set_fd_handler(fd, vfio_err_notifier_handler, NULL, vdev); + + if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + qemu_set_fd_handler(fd, NULL, NULL, vdev); + event_notifier_cleanup(&vdev->err_notifier); + vdev->pci_aer = false; + } +} + +static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev) +{ + Error *err = NULL; + + if (!vdev->pci_aer) { + return; + } + + if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_ERR_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } + qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier), + NULL, NULL, vdev); + event_notifier_cleanup(&vdev->err_notifier); +} + +static void vfio_req_notifier_handler(void *opaque) +{ + VFIOPCIDevice *vdev = opaque; + Error *err = NULL; + + if (!event_notifier_test_and_clear(&vdev->req_notifier)) { + return; + } + + qdev_unplug(DEVICE(vdev), &err); + if (err) { + warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } +} + +static void vfio_register_req_notifier(VFIOPCIDevice *vdev) +{ + struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info), + .index = VFIO_PCI_REQ_IRQ_INDEX }; + Error *err = NULL; + int32_t fd; + + if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) { + return; + } + + if (ioctl(vdev->vbasedev.fd, + VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) { + return; + } + + if (event_notifier_init(&vdev->req_notifier, 0)) { + error_report("vfio: Unable to init event notifier for device request"); + return; + } + + fd = event_notifier_get_fd(&vdev->req_notifier); + qemu_set_fd_handler(fd, vfio_req_notifier_handler, NULL, vdev); + + if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + qemu_set_fd_handler(fd, NULL, NULL, vdev); + event_notifier_cleanup(&vdev->req_notifier); + } else { + vdev->req_enabled = true; + } +} + +static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev) +{ + Error *err = NULL; + + if (!vdev->req_enabled) { + return; + } + + if (vfio_set_irq_signaling(&vdev->vbasedev, VFIO_PCI_REQ_IRQ_INDEX, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, -1, &err)) { + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } + qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier), + NULL, NULL, vdev); + event_notifier_cleanup(&vdev->req_notifier); + + vdev->req_enabled = false; +} + +static void vfio_realize(PCIDevice *pdev, Error **errp) +{ + VFIOPCIDevice *vdev = VFIO_PCI(pdev); + VFIODevice *vbasedev_iter; + VFIOGroup *group; + char *tmp, *subsys, group_path[PATH_MAX], *group_name; + Error *err = NULL; + ssize_t len; + struct stat st; + int groupid; + int i, ret; + bool is_mdev; + + if (!vdev->vbasedev.sysfsdev) { + if (!(~vdev->host.domain || ~vdev->host.bus || + ~vdev->host.slot || ~vdev->host.function)) { + error_setg(errp, "No provided host device"); + error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F " + "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n"); + return; + } + vdev->vbasedev.sysfsdev = + g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x", + vdev->host.domain, vdev->host.bus, + vdev->host.slot, vdev->host.function); + } + + if (stat(vdev->vbasedev.sysfsdev, &st) < 0) { + error_setg_errno(errp, errno, "no such host device"); + error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.sysfsdev); + return; + } + + vdev->vbasedev.name = g_path_get_basename(vdev->vbasedev.sysfsdev); + vdev->vbasedev.ops = &vfio_pci_ops; + vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI; + vdev->vbasedev.dev = DEVICE(vdev); + + tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev); + len = readlink(tmp, group_path, sizeof(group_path)); + g_free(tmp); + + if (len <= 0 || len >= sizeof(group_path)) { + error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG, + "no iommu_group found"); + goto error; + } + + group_path[len] = 0; + + group_name = basename(group_path); + if (sscanf(group_name, "%d", &groupid) != 1) { + error_setg_errno(errp, errno, "failed to read %s", group_path); + goto error; + } + + trace_vfio_realize(vdev->vbasedev.name, groupid); + + group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp); + if (!group) { + goto error; + } + + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) { + error_setg(errp, "device is already attached"); + vfio_put_group(group); + goto error; + } + } + + /* + * Mediated devices *might* operate compatibly with discarding of RAM, but + * we cannot know for certain, it depends on whether the mdev vendor driver + * stays in sync with the active working set of the guest driver. Prevent + * the x-balloon-allowed option unless this is minimally an mdev device. + */ + tmp = g_strdup_printf("%s/subsystem", vdev->vbasedev.sysfsdev); + subsys = realpath(tmp, NULL); + g_free(tmp); + is_mdev = subsys && (strcmp(subsys, "/sys/bus/mdev") == 0); + free(subsys); + + trace_vfio_mdev(vdev->vbasedev.name, is_mdev); + + if (vdev->vbasedev.ram_block_discard_allowed && !is_mdev) { + error_setg(errp, "x-balloon-allowed only potentially compatible " + "with mdev devices"); + vfio_put_group(group); + goto error; + } + + ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp); + if (ret) { + vfio_put_group(group); + goto error; + } + + vfio_populate_device(vdev, &err); + if (err) { + error_propagate(errp, err); + goto error; + } + + /* Get a copy of config space */ + ret = pread(vdev->vbasedev.fd, vdev->pdev.config, + MIN(pci_config_size(&vdev->pdev), vdev->config_size), + vdev->config_offset); + if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { + ret = ret < 0 ? -errno : -EFAULT; + error_setg_errno(errp, -ret, "failed to read device config space"); + goto error; + } + + /* vfio emulates a lot for us, but some bits need extra love */ + vdev->emulated_config_bits = g_malloc0(vdev->config_size); + + /* QEMU can choose to expose the ROM or not */ + memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4); + /* QEMU can also add or extend BARs */ + memset(vdev->emulated_config_bits + PCI_BASE_ADDRESS_0, 0xff, 6 * 4); + + /* + * The PCI spec reserves vendor ID 0xffff as an invalid value. The + * device ID is managed by the vendor and need only be a 16-bit value. + * Allow any 16-bit value for subsystem so they can be hidden or changed. + */ + if (vdev->vendor_id != PCI_ANY_ID) { + if (vdev->vendor_id >= 0xffff) { + error_setg(errp, "invalid PCI vendor ID provided"); + goto error; + } + vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0); + trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id); + } else { + vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); + } + + if (vdev->device_id != PCI_ANY_ID) { + if (vdev->device_id > 0xffff) { + error_setg(errp, "invalid PCI device ID provided"); + goto error; + } + vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0); + trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id); + } else { + vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); + } + + if (vdev->sub_vendor_id != PCI_ANY_ID) { + if (vdev->sub_vendor_id > 0xffff) { + error_setg(errp, "invalid PCI subsystem vendor ID provided"); + goto error; + } + vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID, + vdev->sub_vendor_id, ~0); + trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name, + vdev->sub_vendor_id); + } + + if (vdev->sub_device_id != PCI_ANY_ID) { + if (vdev->sub_device_id > 0xffff) { + error_setg(errp, "invalid PCI subsystem device ID provided"); + goto error; + } + vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0); + trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name, + vdev->sub_device_id); + } + + /* QEMU can change multi-function devices to single function, or reverse */ + vdev->emulated_config_bits[PCI_HEADER_TYPE] = + PCI_HEADER_TYPE_MULTI_FUNCTION; + + /* Restore or clear multifunction, this is always controlled by QEMU */ + if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { + vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; + } else { + vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION; + } + + /* + * Clear host resource mapping info. If we choose not to register a + * BAR, such as might be the case with the option ROM, we can get + * confusing, unwritable, residual addresses from the host here. + */ + memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); + memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); + + vfio_pci_size_rom(vdev); + + vfio_bars_prepare(vdev); + + vfio_msix_early_setup(vdev, &err); + if (err) { + error_propagate(errp, err); + goto error; + } + + vfio_bars_register(vdev); + + ret = vfio_add_capabilities(vdev, errp); + if (ret) { + goto out_teardown; + } + + if (vdev->vga) { + vfio_vga_quirk_setup(vdev); + } + + for (i = 0; i < PCI_ROM_SLOT; i++) { + vfio_bar_quirk_setup(vdev, i); + } + + if (!vdev->igd_opregion && + vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) { + struct vfio_region_info *opregion; + + if (vdev->pdev.qdev.hotplugged) { + error_setg(errp, + "cannot support IGD OpRegion feature on hotplugged " + "device"); + goto out_teardown; + } + + ret = vfio_get_dev_region_info(&vdev->vbasedev, + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL, + VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion); + if (ret) { + error_setg_errno(errp, -ret, + "does not support requested IGD OpRegion feature"); + goto out_teardown; + } + + ret = vfio_pci_igd_opregion_init(vdev, opregion, errp); + g_free(opregion); + if (ret) { + goto out_teardown; + } + } + + /* QEMU emulates all of MSI & MSIX */ + if (pdev->cap_present & QEMU_PCI_CAP_MSIX) { + memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff, + MSIX_CAP_LENGTH); + } + + if (pdev->cap_present & QEMU_PCI_CAP_MSI) { + memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff, + vdev->msi_cap_size); + } + + if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { + vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, + vfio_intx_mmap_enable, vdev); + pci_device_set_intx_routing_notifier(&vdev->pdev, + vfio_intx_routing_notifier); + vdev->irqchip_change_notifier.notify = vfio_irqchip_change; + kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier); + ret = vfio_intx_enable(vdev, errp); + if (ret) { + goto out_deregister; + } + } + + if (vdev->display != ON_OFF_AUTO_OFF) { + ret = vfio_display_probe(vdev, errp); + if (ret) { + goto out_deregister; + } + } + if (vdev->enable_ramfb && vdev->dpy == NULL) { + error_setg(errp, "ramfb=on requires display=on"); + goto out_deregister; + } + if (vdev->display_xres || vdev->display_yres) { + if (vdev->dpy == NULL) { + error_setg(errp, "xres and yres properties require display=on"); + goto out_deregister; + } + if (vdev->dpy->edid_regs == NULL) { + error_setg(errp, "xres and yres properties need edid support"); + goto out_deregister; + } + } + + if (vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID)) { + ret = vfio_pci_nvidia_v100_ram_init(vdev, errp); + if (ret && ret != -ENODEV) { + error_report("Failed to setup NVIDIA V100 GPU RAM"); + } + } + + if (vfio_pci_is(vdev, PCI_VENDOR_ID_IBM, PCI_ANY_ID)) { + ret = vfio_pci_nvlink2_init(vdev, errp); + if (ret && ret != -ENODEV) { + error_report("Failed to setup NVlink2 bridge"); + } + } + + if (!pdev->failover_pair_id) { + ret = vfio_migration_probe(&vdev->vbasedev, errp); + if (ret) { + error_report("%s: Migration disabled", vdev->vbasedev.name); + } + } + + vfio_register_err_notifier(vdev); + vfio_register_req_notifier(vdev); + vfio_setup_resetfn_quirk(vdev); + + return; + +out_deregister: + pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); + kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); +out_teardown: + vfio_teardown_msi(vdev); + vfio_bars_exit(vdev); +error: + error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); +} + +static void vfio_instance_finalize(Object *obj) +{ + VFIOPCIDevice *vdev = VFIO_PCI(obj); + VFIOGroup *group = vdev->vbasedev.group; + + vfio_display_finalize(vdev); + vfio_bars_finalize(vdev); + g_free(vdev->emulated_config_bits); + g_free(vdev->rom); + /* + * XXX Leaking igd_opregion is not an oversight, we can't remove the + * fw_cfg entry therefore leaking this allocation seems like the safest + * option. + * + * g_free(vdev->igd_opregion); + */ + vfio_put_device(vdev); + vfio_put_group(group); +} + +static void vfio_exitfn(PCIDevice *pdev) +{ + VFIOPCIDevice *vdev = VFIO_PCI(pdev); + + vfio_unregister_req_notifier(vdev); + vfio_unregister_err_notifier(vdev); + pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); + if (vdev->irqchip_change_notifier.notify) { + kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); + } + vfio_disable_interrupts(vdev); + if (vdev->intx.mmap_timer) { + timer_free(vdev->intx.mmap_timer); + } + vfio_teardown_msi(vdev); + vfio_bars_exit(vdev); + vfio_migration_finalize(&vdev->vbasedev); +} + +static void vfio_pci_reset(DeviceState *dev) +{ + VFIOPCIDevice *vdev = VFIO_PCI(dev); + + trace_vfio_pci_reset(vdev->vbasedev.name); + + vfio_pci_pre_reset(vdev); + + if (vdev->display != ON_OFF_AUTO_OFF) { + vfio_display_reset(vdev); + } + + if (vdev->resetfn && !vdev->resetfn(vdev)) { + goto post_reset; + } + + if (vdev->vbasedev.reset_works && + (vdev->has_flr || !vdev->has_pm_reset) && + !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { + trace_vfio_pci_reset_flr(vdev->vbasedev.name); + goto post_reset; + } + + /* See if we can do our own bus reset */ + if (!vfio_pci_hot_reset_one(vdev)) { + goto post_reset; + } + + /* If nothing else works and the device supports PM reset, use it */ + if (vdev->vbasedev.reset_works && vdev->has_pm_reset && + !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { + trace_vfio_pci_reset_pm(vdev->vbasedev.name); + goto post_reset; + } + +post_reset: + vfio_pci_post_reset(vdev); +} + +static void vfio_instance_init(Object *obj) +{ + PCIDevice *pci_dev = PCI_DEVICE(obj); + VFIOPCIDevice *vdev = VFIO_PCI(obj); + + device_add_bootindex_property(obj, &vdev->bootindex, + "bootindex", NULL, + &pci_dev->qdev); + vdev->host.domain = ~0U; + vdev->host.bus = ~0U; + vdev->host.slot = ~0U; + vdev->host.function = ~0U; + + vdev->nv_gpudirect_clique = 0xFF; + + /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command + * line, therefore, no need to wait to realize like other devices */ + pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; +} + +static Property vfio_pci_dev_properties[] = { + DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host), + DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev), + DEFINE_PROP_ON_OFF_AUTO("x-pre-copy-dirty-page-tracking", VFIOPCIDevice, + vbasedev.pre_copy_dirty_page_tracking, + ON_OFF_AUTO_ON), + DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice, + display, ON_OFF_AUTO_OFF), + DEFINE_PROP_UINT32("xres", VFIOPCIDevice, display_xres, 0), + DEFINE_PROP_UINT32("yres", VFIOPCIDevice, display_yres, 0), + DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice, + intx.mmap_timeout, 1100), + DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features, + VFIO_FEATURE_ENABLE_VGA_BIT, false), + DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features, + VFIO_FEATURE_ENABLE_REQ_BIT, true), + DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features, + VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false), + DEFINE_PROP_BOOL("x-enable-migration", VFIOPCIDevice, + vbasedev.enable_migration, false), + DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false), + DEFINE_PROP_BOOL("x-balloon-allowed", VFIOPCIDevice, + vbasedev.ram_block_discard_allowed, false), + DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false), + DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false), + DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false), + DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice, + no_geforce_quirks, false), + DEFINE_PROP_BOOL("x-no-kvm-ioeventfd", VFIOPCIDevice, no_kvm_ioeventfd, + false), + DEFINE_PROP_BOOL("x-no-vfio-ioeventfd", VFIOPCIDevice, no_vfio_ioeventfd, + false), + DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID), + DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID), + DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice, + sub_vendor_id, PCI_ANY_ID), + DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice, + sub_device_id, PCI_ANY_ID), + DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0), + DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice, + nv_gpudirect_clique, + qdev_prop_nv_gpudirect_clique, uint8_t), + DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo, + OFF_AUTOPCIBAR_OFF), + /* + * TODO - support passed fds... is this necessary? + * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name), + * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name), + */ + DEFINE_PROP_END_OF_LIST(), +}; + +static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); + + dc->reset = vfio_pci_reset; + device_class_set_props(dc, vfio_pci_dev_properties); + dc->desc = "VFIO-based PCI device assignment"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + pdc->realize = vfio_realize; + pdc->exit = vfio_exitfn; + pdc->config_read = vfio_pci_read_config; + pdc->config_write = vfio_pci_write_config; +} + +static const TypeInfo vfio_pci_dev_info = { + .name = TYPE_VFIO_PCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(VFIOPCIDevice), + .class_init = vfio_pci_dev_class_init, + .instance_init = vfio_instance_init, + .instance_finalize = vfio_instance_finalize, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + }, +}; + +static Property vfio_pci_dev_nohotplug_properties[] = { + DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice, enable_ramfb, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, vfio_pci_dev_nohotplug_properties); + dc->hotpluggable = false; +} + +static const TypeInfo vfio_pci_nohotplug_dev_info = { + .name = TYPE_VFIO_PCI_NOHOTPLUG, + .parent = TYPE_VFIO_PCI, + .instance_size = sizeof(VFIOPCIDevice), + .class_init = vfio_pci_nohotplug_dev_class_init, +}; + +static void register_vfio_pci_dev_type(void) +{ + type_register_static(&vfio_pci_dev_info); + type_register_static(&vfio_pci_nohotplug_dev_info); +} + +type_init(register_vfio_pci_dev_type) diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h new file mode 100644 index 000000000..64777516d --- /dev/null +++ b/hw/vfio/pci.h @@ -0,0 +1,227 @@ +/* + * vfio based device assignment support - PCI devices + * + * Copyright Red Hat, Inc. 2012-2015 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ +#ifndef HW_VFIO_VFIO_PCI_H +#define HW_VFIO_VFIO_PCI_H + +#include "exec/memory.h" +#include "hw/pci/pci.h" +#include "hw/vfio/vfio-common.h" +#include "qemu/event_notifier.h" +#include "qemu/queue.h" +#include "qemu/timer.h" +#include "qom/object.h" + +#define PCI_ANY_ID (~0) + +struct VFIOPCIDevice; + +typedef struct VFIOIOEventFD { + QLIST_ENTRY(VFIOIOEventFD) next; + MemoryRegion *mr; + hwaddr addr; + unsigned size; + uint64_t data; + EventNotifier e; + VFIORegion *region; + hwaddr region_addr; + bool dynamic; /* Added runtime, removed on device reset */ + bool vfio; +} VFIOIOEventFD; + +typedef struct VFIOQuirk { + QLIST_ENTRY(VFIOQuirk) next; + void *data; + QLIST_HEAD(, VFIOIOEventFD) ioeventfds; + int nr_mem; + MemoryRegion *mem; + void (*reset)(struct VFIOPCIDevice *vdev, struct VFIOQuirk *quirk); +} VFIOQuirk; + +typedef struct VFIOBAR { + VFIORegion region; + MemoryRegion *mr; + size_t size; + uint8_t type; + bool ioport; + bool mem64; + QLIST_HEAD(, VFIOQuirk) quirks; +} VFIOBAR; + +typedef struct VFIOVGARegion { + MemoryRegion mem; + off_t offset; + int nr; + QLIST_HEAD(, VFIOQuirk) quirks; +} VFIOVGARegion; + +typedef struct VFIOVGA { + off_t fd_offset; + int fd; + VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS]; +} VFIOVGA; + +typedef struct VFIOINTx { + bool pending; /* interrupt pending */ + bool kvm_accel; /* set when QEMU bypass through KVM enabled */ + uint8_t pin; /* which pin to pull for qemu_set_irq */ + EventNotifier interrupt; /* eventfd triggered on interrupt */ + EventNotifier unmask; /* eventfd for unmask on QEMU bypass */ + PCIINTxRoute route; /* routing info for QEMU bypass */ + uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */ + QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */ +} VFIOINTx; + +typedef struct VFIOMSIVector { + /* + * Two interrupt paths are configured per vector. The first, is only used + * for interrupts injected via QEMU. This is typically the non-accel path, + * but may also be used when we want QEMU to handle masking and pending + * bits. The KVM path bypasses QEMU and is therefore higher performance, + * but requires masking at the device. virq is used to track the MSI route + * through KVM, thus kvm_interrupt is only available when virq is set to a + * valid (>= 0) value. + */ + EventNotifier interrupt; + EventNotifier kvm_interrupt; + struct VFIOPCIDevice *vdev; /* back pointer to device */ + int virq; + bool use; +} VFIOMSIVector; + +enum { + VFIO_INT_NONE = 0, + VFIO_INT_INTx = 1, + VFIO_INT_MSI = 2, + VFIO_INT_MSIX = 3, +}; + +/* Cache of MSI-X setup */ +typedef struct VFIOMSIXInfo { + uint8_t table_bar; + uint8_t pba_bar; + uint16_t entries; + uint32_t table_offset; + uint32_t pba_offset; + unsigned long *pending; +} VFIOMSIXInfo; + +#define TYPE_VFIO_PCI "vfio-pci" +OBJECT_DECLARE_SIMPLE_TYPE(VFIOPCIDevice, VFIO_PCI) + +struct VFIOPCIDevice { + PCIDevice pdev; + VFIODevice vbasedev; + VFIOINTx intx; + unsigned int config_size; + uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */ + off_t config_offset; /* Offset of config space region within device fd */ + unsigned int rom_size; + off_t rom_offset; /* Offset of ROM region within device fd */ + void *rom; + int msi_cap_size; + VFIOMSIVector *msi_vectors; + VFIOMSIXInfo *msix; + int nr_vectors; /* Number of MSI/MSIX vectors currently in use */ + int interrupt; /* Current interrupt type */ + VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */ + VFIOVGA *vga; /* 0xa0000, 0x3b0, 0x3c0 */ + void *igd_opregion; + PCIHostDeviceAddress host; + EventNotifier err_notifier; + EventNotifier req_notifier; + int (*resetfn)(struct VFIOPCIDevice *); + uint32_t vendor_id; + uint32_t device_id; + uint32_t sub_vendor_id; + uint32_t sub_device_id; + uint32_t features; +#define VFIO_FEATURE_ENABLE_VGA_BIT 0 +#define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT) +#define VFIO_FEATURE_ENABLE_REQ_BIT 1 +#define VFIO_FEATURE_ENABLE_REQ (1 << VFIO_FEATURE_ENABLE_REQ_BIT) +#define VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT 2 +#define VFIO_FEATURE_ENABLE_IGD_OPREGION \ + (1 << VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT) + OnOffAuto display; + uint32_t display_xres; + uint32_t display_yres; + int32_t bootindex; + uint32_t igd_gms; + OffAutoPCIBAR msix_relo; + uint8_t pm_cap; + uint8_t nv_gpudirect_clique; + bool pci_aer; + bool req_enabled; + bool has_flr; + bool has_pm_reset; + bool rom_read_failed; + bool no_kvm_intx; + bool no_kvm_msi; + bool no_kvm_msix; + bool no_geforce_quirks; + bool no_kvm_ioeventfd; + bool no_vfio_ioeventfd; + bool enable_ramfb; + VFIODisplay *dpy; + Notifier irqchip_change_notifier; +}; + +/* Use uin32_t for vendor & device so PCI_ANY_ID expands and cannot match hw */ +static inline bool vfio_pci_is(VFIOPCIDevice *vdev, uint32_t vendor, uint32_t device) +{ + return (vendor == PCI_ANY_ID || vendor == vdev->vendor_id) && + (device == PCI_ANY_ID || device == vdev->device_id); +} + +static inline bool vfio_is_vga(VFIOPCIDevice *vdev) +{ + PCIDevice *pdev = &vdev->pdev; + uint16_t class = pci_get_word(pdev->config + PCI_CLASS_DEVICE); + + return class == PCI_CLASS_DISPLAY_VGA; +} + +uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len); +void vfio_pci_write_config(PCIDevice *pdev, + uint32_t addr, uint32_t val, int len); + +uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size); +void vfio_vga_write(void *opaque, hwaddr addr, uint64_t data, unsigned size); + +bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev); +void vfio_vga_quirk_setup(VFIOPCIDevice *vdev); +void vfio_vga_quirk_exit(VFIOPCIDevice *vdev); +void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev); +void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr); +void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr); +void vfio_bar_quirk_finalize(VFIOPCIDevice *vdev, int nr); +void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev); +int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp); +void vfio_quirk_reset(VFIOPCIDevice *vdev); +VFIOQuirk *vfio_quirk_alloc(int nr_mem); +void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr); + +extern const PropertyInfo qdev_prop_nv_gpudirect_clique; + +int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp); + +int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, + struct vfio_region_info *info, + Error **errp); +int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp); +int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp); + +void vfio_display_reset(VFIOPCIDevice *vdev); +int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp); +void vfio_display_finalize(VFIOPCIDevice *vdev); + +#endif /* HW_VFIO_VFIO_PCI_H */ diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c new file mode 100644 index 000000000..f8f08a0f3 --- /dev/null +++ b/hw/vfio/platform.c @@ -0,0 +1,720 @@ +/* + * vfio based device assignment support - platform devices + * + * Copyright Linaro Limited, 2014 + * + * Authors: + * Kim Phillips + * Eric Auger + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Based on vfio based PCI device assignment support: + * Copyright Red Hat, Inc. 2012 + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include +#include + +#include "hw/vfio/vfio-platform.h" +#include "migration/vmstate.h" +#include "qemu/error-report.h" +#include "qemu/lockable.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/range.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "qemu/queue.h" +#include "hw/sysbus.h" +#include "trace.h" +#include "hw/irq.h" +#include "hw/platform-bus.h" +#include "hw/qdev-properties.h" +#include "sysemu/kvm.h" + +/* + * Functions used whatever the injection method + */ + +static inline bool vfio_irq_is_automasked(VFIOINTp *intp) +{ + return intp->flags & VFIO_IRQ_INFO_AUTOMASKED; +} + +/** + * vfio_init_intp - allocate, initialize the IRQ struct pointer + * and add it into the list of IRQs + * @vbasedev: the VFIO device handle + * @info: irq info struct retrieved from VFIO driver + * @errp: error object + */ +static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, + struct vfio_irq_info info, Error **errp) +{ + int ret; + VFIOPlatformDevice *vdev = + container_of(vbasedev, VFIOPlatformDevice, vbasedev); + SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev); + VFIOINTp *intp; + + intp = g_malloc0(sizeof(*intp)); + intp->vdev = vdev; + intp->pin = info.index; + intp->flags = info.flags; + intp->state = VFIO_IRQ_INACTIVE; + intp->kvm_accel = false; + + sysbus_init_irq(sbdev, &intp->qemuirq); + + /* Get an eventfd for trigger */ + intp->interrupt = g_malloc0(sizeof(EventNotifier)); + ret = event_notifier_init(intp->interrupt, 0); + if (ret) { + g_free(intp->interrupt); + g_free(intp); + error_setg_errno(errp, -ret, + "failed to initialize trigger eventfd notifier"); + return NULL; + } + if (vfio_irq_is_automasked(intp)) { + /* Get an eventfd for resample/unmask */ + intp->unmask = g_malloc0(sizeof(EventNotifier)); + ret = event_notifier_init(intp->unmask, 0); + if (ret) { + g_free(intp->interrupt); + g_free(intp->unmask); + g_free(intp); + error_setg_errno(errp, -ret, + "failed to initialize resample eventfd notifier"); + return NULL; + } + } + + QLIST_INSERT_HEAD(&vdev->intp_list, intp, next); + return intp; +} + +/** + * vfio_set_trigger_eventfd - set VFIO eventfd handling + * + * @intp: IRQ struct handle + * @handler: handler to be called on eventfd signaling + * + * Setup VFIO signaling and attach an optional user-side handler + * to the eventfd + */ +static int vfio_set_trigger_eventfd(VFIOINTp *intp, + eventfd_user_side_handler_t handler) +{ + VFIODevice *vbasedev = &intp->vdev->vbasedev; + int32_t fd = event_notifier_get_fd(intp->interrupt); + Error *err = NULL; + int ret; + + qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp); + + ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0, + VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err); + if (ret) { + error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); + qemu_set_fd_handler(fd, NULL, NULL, NULL); + } + + return ret; +} + +/* + * Functions only used when eventfds are handled on user-side + * ie. without irqfd + */ + +/** + * vfio_mmap_set_enabled - enable/disable the fast path mode + * @vdev: the VFIO platform device + * @enabled: the target mmap state + * + * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP); + * enabled = false ~ slow path = MMIO region is trapped and region callbacks + * are called; slow path enables to trap the device IRQ status register reset +*/ + +static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled) +{ + int i; + + for (i = 0; i < vdev->vbasedev.num_regions; i++) { + vfio_region_mmaps_set_enabled(vdev->regions[i], enabled); + } +} + +/** + * vfio_intp_mmap_enable - timer function, restores the fast path + * if there is no more active IRQ + * @opaque: actually points to the VFIO platform device + * + * Called on mmap timer timeout, this function checks whether the + * IRQ is still active and if not, restores the fast path. + * by construction a single eventfd is handled at a time. + * if the IRQ is still active, the timer is re-programmed. + */ +static void vfio_intp_mmap_enable(void *opaque) +{ + VFIOINTp *tmp; + VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque; + + QEMU_LOCK_GUARD(&vdev->intp_mutex); + QLIST_FOREACH(tmp, &vdev->intp_list, next) { + if (tmp->state == VFIO_IRQ_ACTIVE) { + trace_vfio_platform_intp_mmap_enable(tmp->pin); + /* re-program the timer to check active status later */ + timer_mod(vdev->mmap_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + + vdev->mmap_timeout); + return; + } + } + vfio_mmap_set_enabled(vdev, true); +} + +/** + * vfio_intp_inject_pending_lockheld - Injects a pending IRQ + * @opaque: opaque pointer, in practice the VFIOINTp handle + * + * The function is called on a previous IRQ completion, from + * vfio_platform_eoi, while the intp_mutex is locked. + * Also in such situation, the slow path already is set and + * the mmap timer was already programmed. + */ +static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp) +{ + trace_vfio_platform_intp_inject_pending_lockheld(intp->pin, + event_notifier_get_fd(intp->interrupt)); + + intp->state = VFIO_IRQ_ACTIVE; + + /* trigger the virtual IRQ */ + qemu_set_irq(intp->qemuirq, 1); +} + +/** + * vfio_intp_interrupt - The user-side eventfd handler + * @opaque: opaque pointer which in practice is the VFIOINTp handle + * + * the function is entered in event handler context: + * the vIRQ is injected into the guest if there is no other active + * or pending IRQ. + */ +static void vfio_intp_interrupt(VFIOINTp *intp) +{ + int ret; + VFIOINTp *tmp; + VFIOPlatformDevice *vdev = intp->vdev; + bool delay_handling = false; + + QEMU_LOCK_GUARD(&vdev->intp_mutex); + if (intp->state == VFIO_IRQ_INACTIVE) { + QLIST_FOREACH(tmp, &vdev->intp_list, next) { + if (tmp->state == VFIO_IRQ_ACTIVE || + tmp->state == VFIO_IRQ_PENDING) { + delay_handling = true; + break; + } + } + } + if (delay_handling) { + /* + * the new IRQ gets a pending status and is pushed in + * the pending queue + */ + intp->state = VFIO_IRQ_PENDING; + trace_vfio_intp_interrupt_set_pending(intp->pin); + QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, + intp, pqnext); + event_notifier_test_and_clear(intp->interrupt); + return; + } + + trace_vfio_platform_intp_interrupt(intp->pin, + event_notifier_get_fd(intp->interrupt)); + + ret = event_notifier_test_and_clear(intp->interrupt); + if (!ret) { + error_report("Error when clearing fd=%d (ret = %d)", + event_notifier_get_fd(intp->interrupt), ret); + } + + intp->state = VFIO_IRQ_ACTIVE; + + /* sets slow path */ + vfio_mmap_set_enabled(vdev, false); + + /* trigger the virtual IRQ */ + qemu_set_irq(intp->qemuirq, 1); + + /* + * Schedule the mmap timer which will restore fastpath when no IRQ + * is active anymore + */ + if (vdev->mmap_timeout) { + timer_mod(vdev->mmap_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + + vdev->mmap_timeout); + } +} + +/** + * vfio_platform_eoi - IRQ completion routine + * @vbasedev: the VFIO device handle + * + * De-asserts the active virtual IRQ and unmasks the physical IRQ + * (effective for level sensitive IRQ auto-masked by the VFIO driver). + * Then it handles next pending IRQ if any. + * eoi function is called on the first access to any MMIO region + * after an IRQ was triggered, trapped since slow path was set. + * It is assumed this access corresponds to the IRQ status + * register reset. With such a mechanism, a single IRQ can be + * handled at a time since there is no way to know which IRQ + * was completed by the guest (we would need additional details + * about the IRQ status register mask). + */ +static void vfio_platform_eoi(VFIODevice *vbasedev) +{ + VFIOINTp *intp; + VFIOPlatformDevice *vdev = + container_of(vbasedev, VFIOPlatformDevice, vbasedev); + + QEMU_LOCK_GUARD(&vdev->intp_mutex); + QLIST_FOREACH(intp, &vdev->intp_list, next) { + if (intp->state == VFIO_IRQ_ACTIVE) { + trace_vfio_platform_eoi(intp->pin, + event_notifier_get_fd(intp->interrupt)); + intp->state = VFIO_IRQ_INACTIVE; + + /* deassert the virtual IRQ */ + qemu_set_irq(intp->qemuirq, 0); + + if (vfio_irq_is_automasked(intp)) { + /* unmasks the physical level-sensitive IRQ */ + vfio_unmask_single_irqindex(vbasedev, intp->pin); + } + + /* a single IRQ can be active at a time */ + break; + } + } + /* in case there are pending IRQs, handle the first one */ + if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) { + intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue); + vfio_intp_inject_pending_lockheld(intp); + QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext); + } +} + +/** + * vfio_start_eventfd_injection - starts the virtual IRQ injection using + * user-side handled eventfds + * @sbdev: the sysbus device handle + * @irq: the qemu irq handle + */ + +static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq) +{ + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); + VFIOINTp *intp; + + QLIST_FOREACH(intp, &vdev->intp_list, next) { + if (intp->qemuirq == irq) { + break; + } + } + assert(intp); + + if (vfio_set_trigger_eventfd(intp, vfio_intp_interrupt)) { + abort(); + } +} + +/* + * Functions used for irqfd + */ + +/** + * vfio_set_resample_eventfd - sets the resamplefd for an IRQ + * @intp: the IRQ struct handle + * programs the VFIO driver to unmask this IRQ when the + * intp->unmask eventfd is triggered + */ +static int vfio_set_resample_eventfd(VFIOINTp *intp) +{ + int32_t fd = event_notifier_get_fd(intp->unmask); + VFIODevice *vbasedev = &intp->vdev->vbasedev; + Error *err = NULL; + int ret; + + qemu_set_fd_handler(fd, NULL, NULL, NULL); + ret = vfio_set_irq_signaling(vbasedev, intp->pin, 0, + VFIO_IRQ_SET_ACTION_UNMASK, fd, &err); + if (ret) { + error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); + } + return ret; +} + +/** + * vfio_start_irqfd_injection - starts the virtual IRQ injection using + * irqfd + * + * @sbdev: the sysbus device handle + * @irq: the qemu irq handle + * + * In case the irqfd setup fails, we fallback to userspace handled eventfd + */ +static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) +{ + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); + VFIOINTp *intp; + + if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() || + !vdev->irqfd_allowed) { + goto fail_irqfd; + } + + QLIST_FOREACH(intp, &vdev->intp_list, next) { + if (intp->qemuirq == irq) { + break; + } + } + assert(intp); + + if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt, + intp->unmask, irq) < 0) { + goto fail_irqfd; + } + + if (vfio_set_trigger_eventfd(intp, NULL) < 0) { + goto fail_vfio; + } + if (vfio_irq_is_automasked(intp)) { + if (vfio_set_resample_eventfd(intp) < 0) { + goto fail_vfio; + } + trace_vfio_platform_start_level_irqfd_injection(intp->pin, + event_notifier_get_fd(intp->interrupt), + event_notifier_get_fd(intp->unmask)); + } else { + trace_vfio_platform_start_edge_irqfd_injection(intp->pin, + event_notifier_get_fd(intp->interrupt)); + } + + intp->kvm_accel = true; + + return; +fail_vfio: + kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq); + abort(); +fail_irqfd: + vfio_start_eventfd_injection(sbdev, irq); + return; +} + +/* VFIO skeleton */ + +static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev) +{ + vbasedev->needs_reset = true; +} + +/* not implemented yet */ +static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev) +{ + return -1; +} + +/** + * vfio_populate_device - Allocate and populate MMIO region + * and IRQ structs according to driver returned information + * @vbasedev: the VFIO device handle + * @errp: error object + * + */ +static int vfio_populate_device(VFIODevice *vbasedev, Error **errp) +{ + VFIOINTp *intp, *tmp; + int i, ret = -1; + VFIOPlatformDevice *vdev = + container_of(vbasedev, VFIOPlatformDevice, vbasedev); + + if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) { + error_setg(errp, "this isn't a platform device"); + return ret; + } + + vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions); + + for (i = 0; i < vbasedev->num_regions; i++) { + char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i); + + vdev->regions[i] = g_new0(VFIORegion, 1); + ret = vfio_region_setup(OBJECT(vdev), vbasedev, + vdev->regions[i], i, name); + g_free(name); + if (ret) { + error_setg_errno(errp, -ret, "failed to get region %d info", i); + goto reg_error; + } + } + + vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, + vfio_intp_mmap_enable, vdev); + + QSIMPLEQ_INIT(&vdev->pending_intp_queue); + + for (i = 0; i < vbasedev->num_irqs; i++) { + struct vfio_irq_info irq = { .argsz = sizeof(irq) }; + + irq.index = i; + ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq); + if (ret) { + error_setg_errno(errp, -ret, "failed to get device irq info"); + goto irq_err; + } else { + trace_vfio_platform_populate_interrupts(irq.index, + irq.count, + irq.flags); + intp = vfio_init_intp(vbasedev, irq, errp); + if (!intp) { + ret = -1; + goto irq_err; + } + } + } + return 0; +irq_err: + timer_del(vdev->mmap_timer); + QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) { + QLIST_REMOVE(intp, next); + g_free(intp); + } +reg_error: + for (i = 0; i < vbasedev->num_regions; i++) { + if (vdev->regions[i]) { + vfio_region_finalize(vdev->regions[i]); + } + g_free(vdev->regions[i]); + } + g_free(vdev->regions); + return ret; +} + +/* specialized functions for VFIO Platform devices */ +static VFIODeviceOps vfio_platform_ops = { + .vfio_compute_needs_reset = vfio_platform_compute_needs_reset, + .vfio_hot_reset_multi = vfio_platform_hot_reset_multi, + .vfio_eoi = vfio_platform_eoi, +}; + +/** + * vfio_base_device_init - perform preliminary VFIO setup + * @vbasedev: the VFIO device handle + * @errp: error object + * + * Implement the VFIO command sequence that allows to discover + * assigned device resources: group extraction, device + * fd retrieval, resource query. + * Precondition: the device name must be initialized + */ +static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp) +{ + VFIOGroup *group; + VFIODevice *vbasedev_iter; + char *tmp, group_path[PATH_MAX], *group_name; + ssize_t len; + struct stat st; + int groupid; + int ret; + + /* @sysfsdev takes precedence over @host */ + if (vbasedev->sysfsdev) { + g_free(vbasedev->name); + vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); + } else { + if (!vbasedev->name || strchr(vbasedev->name, '/')) { + error_setg(errp, "wrong host device name"); + return -EINVAL; + } + + vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s", + vbasedev->name); + } + + if (stat(vbasedev->sysfsdev, &st) < 0) { + error_setg_errno(errp, errno, + "failed to get the sysfs host device file status"); + return -errno; + } + + tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev); + len = readlink(tmp, group_path, sizeof(group_path)); + g_free(tmp); + + if (len < 0 || len >= sizeof(group_path)) { + ret = len < 0 ? -errno : -ENAMETOOLONG; + error_setg_errno(errp, -ret, "no iommu_group found"); + return ret; + } + + group_path[len] = 0; + + group_name = basename(group_path); + if (sscanf(group_name, "%d", &groupid) != 1) { + error_setg_errno(errp, errno, "failed to read %s", group_path); + return -errno; + } + + trace_vfio_platform_base_device_init(vbasedev->name, groupid); + + group = vfio_get_group(groupid, &address_space_memory, errp); + if (!group) { + return -ENOENT; + } + + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { + error_setg(errp, "device is already attached"); + vfio_put_group(group); + return -EBUSY; + } + } + ret = vfio_get_device(group, vbasedev->name, vbasedev, errp); + if (ret) { + vfio_put_group(group); + return ret; + } + + ret = vfio_populate_device(vbasedev, errp); + if (ret) { + vfio_put_group(group); + } + + return ret; +} + +/** + * vfio_platform_realize - the device realize function + * @dev: device state pointer + * @errp: error + * + * initialize the device, its memory regions and IRQ structures + * IRQ are started separately + */ +static void vfio_platform_realize(DeviceState *dev, Error **errp) +{ + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); + SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); + VFIODevice *vbasedev = &vdev->vbasedev; + int i, ret; + + vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM; + vbasedev->dev = dev; + vbasedev->ops = &vfio_platform_ops; + + qemu_mutex_init(&vdev->intp_mutex); + + trace_vfio_platform_realize(vbasedev->sysfsdev ? + vbasedev->sysfsdev : vbasedev->name, + vdev->compat); + + ret = vfio_base_device_init(vbasedev, errp); + if (ret) { + goto out; + } + + if (!vdev->compat) { + GError *gerr = NULL; + gchar *contents; + gsize length; + char *path; + + path = g_strdup_printf("%s/of_node/compatible", vbasedev->sysfsdev); + if (!g_file_get_contents(path, &contents, &length, &gerr)) { + error_setg(errp, "%s", gerr->message); + g_error_free(gerr); + g_free(path); + return; + } + g_free(path); + vdev->compat = contents; + for (vdev->num_compat = 0; length; vdev->num_compat++) { + size_t skip = strlen(contents) + 1; + contents += skip; + length -= skip; + } + } + + for (i = 0; i < vbasedev->num_regions; i++) { + if (vfio_region_mmap(vdev->regions[i])) { + warn_report("%s mmap unsupported, performance may be slow", + memory_region_name(vdev->regions[i]->mem)); + } + sysbus_init_mmio(sbdev, vdev->regions[i]->mem); + } +out: + if (!ret) { + return; + } + + if (vdev->vbasedev.name) { + error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); + } else { + error_prepend(errp, "vfio error: "); + } +} + +static const VMStateDescription vfio_platform_vmstate = { + .name = "vfio-platform", + .unmigratable = 1, +}; + +static Property vfio_platform_dev_properties[] = { + DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name), + DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev), + DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false), + DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice, + mmap_timeout, 1100), + DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vfio_platform_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); + + dc->realize = vfio_platform_realize; + device_class_set_props(dc, vfio_platform_dev_properties); + dc->vmsd = &vfio_platform_vmstate; + dc->desc = "VFIO-based platform device assignment"; + sbc->connect_irq_notifier = vfio_start_irqfd_injection; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + /* Supported by TYPE_VIRT_MACHINE */ + dc->user_creatable = true; +} + +static const TypeInfo vfio_platform_dev_info = { + .name = TYPE_VFIO_PLATFORM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VFIOPlatformDevice), + .class_init = vfio_platform_class_init, + .class_size = sizeof(VFIOPlatformDeviceClass), +}; + +static void register_vfio_platform_dev_type(void) +{ + type_register_static(&vfio_platform_dev_info); +} + +type_init(register_vfio_platform_dev_type) diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c new file mode 100644 index 000000000..04c6e67f8 --- /dev/null +++ b/hw/vfio/spapr.c @@ -0,0 +1,255 @@ +/* + * DMA memory preregistration + * + * Authors: + * Alexey Kardashevskiy + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include +#include + +#include "hw/vfio/vfio-common.h" +#include "hw/hw.h" +#include "exec/ram_addr.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "trace.h" + +static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section) +{ + if (memory_region_is_iommu(section->mr)) { + hw_error("Cannot possibly preregister IOMMU memory"); + } + + return !memory_region_is_ram(section->mr) || + memory_region_is_ram_device(section->mr); +} + +static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa) +{ + return memory_region_get_ram_ptr(section->mr) + + section->offset_within_region + + (gpa - section->offset_within_address_space); +} + +static void vfio_prereg_listener_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, + prereg_listener); + const hwaddr gpa = section->offset_within_address_space; + hwaddr end; + int ret; + hwaddr page_mask = qemu_real_host_page_mask; + struct vfio_iommu_spapr_register_memory reg = { + .argsz = sizeof(reg), + .flags = 0, + }; + + if (vfio_prereg_listener_skipped_section(section)) { + trace_vfio_prereg_listener_region_add_skip( + section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(int128_sub(section->size, int128_one()))); + return; + } + + if (unlikely((section->offset_within_address_space & ~page_mask) || + (section->offset_within_region & ~page_mask) || + (int128_get64(section->size) & ~page_mask))) { + error_report("%s received unaligned region", __func__); + return; + } + + end = section->offset_within_address_space + int128_get64(section->size); + if (gpa >= end) { + return; + } + + memory_region_ref(section->mr); + + reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa); + reg.size = end - gpa; + + ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®); + trace_vfio_prereg_register(reg.vaddr, reg.size, ret ? -errno : 0); + if (ret) { + /* + * On the initfn path, store the first error in the container so we + * can gracefully fail. Runtime, there's not much we can do other + * than throw a hardware error. + */ + if (!container->initialized) { + if (!container->error) { + error_setg_errno(&container->error, -ret, + "Memory registering failed"); + } + } else { + hw_error("vfio: Memory registering failed, unable to continue"); + } + } +} + +static void vfio_prereg_listener_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, + prereg_listener); + const hwaddr gpa = section->offset_within_address_space; + hwaddr end; + int ret; + hwaddr page_mask = qemu_real_host_page_mask; + struct vfio_iommu_spapr_register_memory reg = { + .argsz = sizeof(reg), + .flags = 0, + }; + + if (vfio_prereg_listener_skipped_section(section)) { + trace_vfio_prereg_listener_region_del_skip( + section->offset_within_address_space, + section->offset_within_address_space + + int128_get64(int128_sub(section->size, int128_one()))); + return; + } + + if (unlikely((section->offset_within_address_space & ~page_mask) || + (section->offset_within_region & ~page_mask) || + (int128_get64(section->size) & ~page_mask))) { + error_report("%s received unaligned region", __func__); + return; + } + + end = section->offset_within_address_space + int128_get64(section->size); + if (gpa >= end) { + return; + } + + reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa); + reg.size = end - gpa; + + ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, ®); + trace_vfio_prereg_unregister(reg.vaddr, reg.size, ret ? -errno : 0); +} + +const MemoryListener vfio_prereg_listener = { + .name = "vfio-pre-reg", + .region_add = vfio_prereg_listener_region_add, + .region_del = vfio_prereg_listener_region_del, +}; + +int vfio_spapr_create_window(VFIOContainer *container, + MemoryRegionSection *section, + hwaddr *pgsize) +{ + int ret = 0; + IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); + uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr), pgmask; + unsigned entries, bits_total, bits_per_level, max_levels; + struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) }; + long rampagesize = qemu_minrampagesize(); + + /* + * The host might not support the guest supported IOMMU page size, + * so we will use smaller physical IOMMU pages to back them. + */ + if (pagesize > rampagesize) { + pagesize = rampagesize; + } + pgmask = container->pgsizes & (pagesize | (pagesize - 1)); + pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0; + if (!pagesize) { + error_report("Host doesn't support page size 0x%"PRIx64 + ", the supported mask is 0x%lx", + memory_region_iommu_get_min_page_size(iommu_mr), + container->pgsizes); + return -EINVAL; + } + + /* + * FIXME: For VFIO iommu types which have KVM acceleration to + * avoid bouncing all map/unmaps through qemu this way, this + * would be the right place to wire that up (tell the KVM + * device emulation the VFIO iommu handles to use). + */ + create.window_size = int128_get64(section->size); + create.page_shift = ctz64(pagesize); + /* + * SPAPR host supports multilevel TCE tables. We try to guess optimal + * levels number and if this fails (for example due to the host memory + * fragmentation), we increase levels. The DMA address structure is: + * rrrrrrrr rxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx iiiiiiii + * where: + * r = reserved (bits >= 55 are reserved in the existing hardware) + * i = IOMMU page offset (64K in this example) + * x = bits to index a TCE which can be split to equal chunks to index + * within the level. + * The aim is to split "x" to smaller possible number of levels. + */ + entries = create.window_size >> create.page_shift; + /* bits_total is number of "x" needed */ + bits_total = ctz64(entries * sizeof(uint64_t)); + /* + * bits_per_level is a safe guess of how much we can allocate per level: + * 8 is the current minimum for CONFIG_FORCE_MAX_ZONEORDER and MAX_ORDER + * is usually bigger than that. + * Below we look at qemu_real_host_page_size as TCEs are allocated from + * system pages. + */ + bits_per_level = ctz64(qemu_real_host_page_size) + 8; + create.levels = bits_total / bits_per_level; + if (bits_total % bits_per_level) { + ++create.levels; + } + max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size); + for ( ; create.levels <= max_levels; ++create.levels) { + ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); + if (!ret) { + break; + } + } + if (ret) { + error_report("Failed to create a window, ret = %d (%m)", ret); + return -errno; + } + + if (create.start_addr != section->offset_within_address_space) { + vfio_spapr_remove_window(container, create.start_addr); + + error_report("Host doesn't support DMA window at %"HWADDR_PRIx", must be %"PRIx64, + section->offset_within_address_space, + (uint64_t)create.start_addr); + return -EINVAL; + } + trace_vfio_spapr_create_window(create.page_shift, + create.levels, + create.window_size, + create.start_addr); + *pgsize = pagesize; + + return 0; +} + +int vfio_spapr_remove_window(VFIOContainer *container, + hwaddr offset_within_address_space) +{ + struct vfio_iommu_spapr_tce_remove remove = { + .argsz = sizeof(remove), + .start_addr = offset_within_address_space, + }; + int ret; + + ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove); + if (ret) { + error_report("Failed to remove window at %"PRIx64, + (uint64_t)remove.start_addr); + return -errno; + } + + trace_vfio_spapr_remove_window(offset_within_address_space); + + return 0; +} diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events new file mode 100644 index 000000000..0ef1b5f4a --- /dev/null +++ b/hw/vfio/trace-events @@ -0,0 +1,167 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# pci.c +vfio_intx_interrupt(const char *name, char line) " (%s) Pin %c" +vfio_intx_eoi(const char *name) " (%s) EOI" +vfio_intx_enable_kvm(const char *name) " (%s) KVM INTx accel enabled" +vfio_intx_disable_kvm(const char *name) " (%s) KVM INTx accel disabled" +vfio_intx_update(const char *name, int new_irq, int target_irq) " (%s) IRQ moved %d -> %d" +vfio_intx_enable(const char *name) " (%s)" +vfio_intx_disable(const char *name) " (%s)" +vfio_msi_interrupt(const char *name, int index, uint64_t addr, int data) " (%s) vector %d 0x%"PRIx64"/0x%x" +vfio_msix_vector_do_use(const char *name, int index) " (%s) vector %d used" +vfio_msix_vector_release(const char *name, int index) " (%s) vector %d released" +vfio_msix_enable(const char *name) " (%s)" +vfio_msix_pba_disable(const char *name) " (%s)" +vfio_msix_pba_enable(const char *name) " (%s)" +vfio_msix_disable(const char *name) " (%s)" +vfio_msix_fixup(const char *name, int bar, uint64_t start, uint64_t end) " (%s) MSI-X region %d mmap fixup [0x%"PRIx64" - 0x%"PRIx64"]" +vfio_msix_relo(const char *name, int bar, uint64_t offset) " (%s) BAR %d offset 0x%"PRIx64"" +vfio_msi_enable(const char *name, int nr_vectors) " (%s) Enabled %d MSI vectors" +vfio_msi_disable(const char *name) " (%s)" +vfio_pci_load_rom(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device %s ROM:\n size: 0x%lx, offset: 0x%lx, flags: 0x%lx" +vfio_rom_read(const char *name, uint64_t addr, int size, uint64_t data) " (%s, 0x%"PRIx64", 0x%x) = 0x%"PRIx64 +vfio_pci_size_rom(const char *name, int size) "%s ROM size 0x%x" +vfio_vga_write(uint64_t addr, uint64_t data, int size) " (0x%"PRIx64", 0x%"PRIx64", %d)" +vfio_vga_read(uint64_t addr, int size, uint64_t data) " (0x%"PRIx64", %d) = 0x%"PRIx64 +vfio_pci_read_config(const char *name, int addr, int len, int val) " (%s, @0x%x, len=0x%x) 0x%x" +vfio_pci_write_config(const char *name, int addr, int val, int len) " (%s, @0x%x, 0x%x, len=0x%x)" +vfio_msi_setup(const char *name, int pos) "%s PCI MSI CAP @0x%x" +vfio_msix_early_setup(const char *name, int pos, int table_bar, int offset, int entries) "%s PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d" +vfio_check_pcie_flr(const char *name) "%s Supports FLR via PCIe cap" +vfio_check_pm_reset(const char *name) "%s Supports PM reset" +vfio_check_af_flr(const char *name) "%s Supports FLR via AF cap" +vfio_pci_hot_reset(const char *name, const char *type) " (%s) %s" +vfio_pci_hot_reset_has_dep_devices(const char *name) "%s: hot reset dependent devices:" +vfio_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int group_id) "\t%04x:%02x:%02x.%x group %d" +vfio_pci_hot_reset_result(const char *name, const char *result) "%s hot reset: %s" +vfio_populate_device_config(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device %s config:\n size: 0x%lx, offset: 0x%lx, flags: 0x%lx" +vfio_populate_device_get_irq_info_failure(const char *errstr) "VFIO_DEVICE_GET_IRQ_INFO failure: %s" +vfio_realize(const char *name, int group_id) " (%s) group %d" +vfio_mdev(const char *name, bool is_mdev) " (%s) is_mdev %d" +vfio_add_ext_cap_dropped(const char *name, uint16_t cap, uint16_t offset) "%s 0x%x@0x%x" +vfio_pci_reset(const char *name) " (%s)" +vfio_pci_reset_flr(const char *name) "%s FLR/VFIO_DEVICE_RESET" +vfio_pci_reset_pm(const char *name) "%s PCI PM Reset" +vfio_pci_emulated_vendor_id(const char *name, uint16_t val) "%s 0x%04x" +vfio_pci_emulated_device_id(const char *name, uint16_t val) "%s 0x%04x" +vfio_pci_emulated_sub_vendor_id(const char *name, uint16_t val) "%s 0x%04x" +vfio_pci_emulated_sub_device_id(const char *name, uint16_t val) "%s 0x%04x" + +# pci-quirks.c +vfio_quirk_rom_in_denylist(const char *name, uint16_t vid, uint16_t did) "%s %04x:%04x" +vfio_quirk_generic_window_address_write(const char *name, const char * region_name, uint64_t data) "%s %s 0x%"PRIx64 +vfio_quirk_generic_window_data_read(const char *name, const char * region_name, uint64_t data) "%s %s 0x%"PRIx64 +vfio_quirk_generic_window_data_write(const char *name, const char * region_name, uint64_t data) "%s %s 0x%"PRIx64 +vfio_quirk_generic_mirror_read(const char *name, const char * region_name, uint64_t addr, uint64_t data) "%s %s 0x%"PRIx64": 0x%"PRIx64 +vfio_quirk_generic_mirror_write(const char *name, const char * region_name, uint64_t addr, uint64_t data) "%s %s 0x%"PRIx64": 0x%"PRIx64 +vfio_quirk_ati_3c3_read(const char *name, uint64_t data) "%s 0x%"PRIx64 +vfio_quirk_ati_3c3_probe(const char *name) "%s" +vfio_quirk_ati_bar4_probe(const char *name) "%s" +vfio_quirk_ati_bar2_probe(const char *name) "%s" +vfio_quirk_nvidia_3d0_state(const char *name, const char *state) "%s %s" +vfio_quirk_nvidia_3d0_read(const char *name, uint8_t offset, unsigned size, uint64_t val) " (%s, @0x%x, len=0x%x) 0x%"PRIx64 +vfio_quirk_nvidia_3d0_write(const char *name, uint8_t offset, uint64_t data, unsigned size) "(%s, @0x%x, 0x%"PRIx64", len=0x%x)" +vfio_quirk_nvidia_3d0_probe(const char *name) "%s" +vfio_quirk_nvidia_bar5_state(const char *name, const char *state) "%s %s" +vfio_quirk_nvidia_bar5_probe(const char *name) "%s" +vfio_quirk_nvidia_bar0_msi_ack(const char *name) "%s" +vfio_quirk_nvidia_bar0_probe(const char *name) "%s" +vfio_quirk_rtl8168_fake_latch(const char *name, uint64_t val) "%s 0x%"PRIx64 +vfio_quirk_rtl8168_msix_write(const char *name, uint16_t offset, uint64_t val) "%s MSI-X table write[0x%x]: 0x%"PRIx64 +vfio_quirk_rtl8168_msix_read(const char *name, uint16_t offset, uint64_t val) "%s MSI-X table read[0x%x]: 0x%"PRIx64 +vfio_quirk_rtl8168_probe(const char *name) "%s" + +vfio_quirk_ati_bonaire_reset_skipped(const char *name) "%s" +vfio_quirk_ati_bonaire_reset_no_smc(const char *name) "%s" +vfio_quirk_ati_bonaire_reset_timeout(const char *name) "%s" +vfio_quirk_ati_bonaire_reset_done(const char *name) "%s" +vfio_quirk_ati_bonaire_reset(const char *name) "%s" +vfio_ioeventfd_exit(const char *name, uint64_t addr, unsigned size, uint64_t data) "%s+0x%"PRIx64"[%d]:0x%"PRIx64 +vfio_ioeventfd_handler(const char *name, uint64_t addr, unsigned size, uint64_t data) "%s+0x%"PRIx64"[%d] -> 0x%"PRIx64 +vfio_ioeventfd_init(const char *name, uint64_t addr, unsigned size, uint64_t data, bool vfio) "%s+0x%"PRIx64"[%d]:0x%"PRIx64" vfio:%d" +vfio_pci_igd_opregion_enabled(const char *name) "%s" + +vfio_pci_nvidia_gpu_setup_quirk(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64 +vfio_pci_nvlink2_setup_quirk_ssatgt(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64 +vfio_pci_nvlink2_setup_quirk_lnkspd(const char *name, uint32_t link_speed) "%s link_speed=0x%x" + +# igd.c +vfio_pci_igd_bar4_write(const char *name, uint32_t index, uint32_t data, uint32_t base) "%s [0x%03x] 0x%08x -> 0x%08x" +vfio_pci_igd_bdsm_enabled(const char *name, int size) "%s %dMB" +vfio_pci_igd_host_bridge_enabled(const char *name) "%s" +vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s" + +# common.c +vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)" +vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64 +vfio_iommu_map_notify(const char *op, uint64_t iova_start, uint64_t iova_end) "iommu %s @ 0x%"PRIx64" - 0x%"PRIx64 +vfio_listener_region_add_skip(uint64_t start, uint64_t end) "SKIPPING region_add 0x%"PRIx64" - 0x%"PRIx64 +vfio_spapr_group_attach(int groupfd, int tablefd) "Attached groupfd %d to liobn fd %d" +vfio_listener_region_add_iommu(uint64_t start, uint64_t end) "region_add [iommu] 0x%"PRIx64" - 0x%"PRIx64 +vfio_listener_region_add_ram(uint64_t iova_start, uint64_t iova_end, void *vaddr) "region_add [ram] 0x%"PRIx64" - 0x%"PRIx64" [%p]" +vfio_listener_region_add_no_dma_map(const char *name, uint64_t iova, uint64_t size, uint64_t page_size) "Region \"%s\" 0x%"PRIx64" size=0x%"PRIx64" is not aligned to 0x%"PRIx64" and cannot be mapped for DMA" +vfio_listener_region_del_skip(uint64_t start, uint64_t end) "SKIPPING region_del 0x%"PRIx64" - 0x%"PRIx64 +vfio_listener_region_del(uint64_t start, uint64_t end) "region_del 0x%"PRIx64" - 0x%"PRIx64 +vfio_disconnect_container(int fd) "close container->fd=%d" +vfio_put_group(int fd) "close group->fd=%d" +vfio_get_device(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u" +vfio_put_base_device(int fd) "close vdev->fd=%d" +vfio_region_setup(const char *dev, int index, const char *name, unsigned long flags, unsigned long offset, unsigned long size) "Device %s, region %d \"%s\", flags: 0x%lx, offset: 0x%lx, size: 0x%lx" +vfio_region_mmap_fault(const char *name, int index, unsigned long offset, unsigned long size, int fault) "Region %s mmaps[%d], [0x%lx - 0x%lx], fault: %d" +vfio_region_mmap(const char *name, unsigned long offset, unsigned long end) "Region %s [0x%lx - 0x%lx]" +vfio_region_exit(const char *name, int index) "Device %s, region %d" +vfio_region_finalize(const char *name, int index) "Device %s, region %d" +vfio_region_mmaps_set_enabled(const char *name, bool enabled) "Region %s mmaps enabled: %d" +vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Region %s unmap [0x%lx - 0x%lx]" +vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries" +vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]" +vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%0x8" +vfio_dma_unmap_overflow_workaround(void) "" + +# platform.c +vfio_platform_base_device_init(char *name, int groupid) "%s belongs to group #%d" +vfio_platform_realize(char *name, char *compat) "vfio device %s, compat = %s" +vfio_platform_eoi(int pin, int fd) "EOI IRQ pin %d (fd=%d)" +vfio_platform_intp_mmap_enable(int pin) "IRQ #%d still active, stay in slow path" +vfio_platform_intp_interrupt(int pin, int fd) "Inject IRQ #%d (fd = %d)" +vfio_platform_intp_inject_pending_lockheld(int pin, int fd) "Inject pending IRQ #%d (fd = %d)" +vfio_platform_populate_interrupts(int pin, int count, int flags) "- IRQ index %d: count %d, flags=0x%x" +vfio_intp_interrupt_set_pending(int index) "irq %d is set PENDING" +vfio_platform_start_level_irqfd_injection(int index, int fd, int resamplefd) "IRQ index=%d, fd = %d, resamplefd = %d" +vfio_platform_start_edge_irqfd_injection(int index, int fd) "IRQ index=%d, fd = %d" + +# spapr.c +vfio_prereg_listener_region_add_skip(uint64_t start, uint64_t end) "0x%"PRIx64" - 0x%"PRIx64 +vfio_prereg_listener_region_del_skip(uint64_t start, uint64_t end) "0x%"PRIx64" - 0x%"PRIx64 +vfio_prereg_register(uint64_t va, uint64_t size, int ret) "va=0x%"PRIx64" size=0x%"PRIx64" ret=%d" +vfio_prereg_unregister(uint64_t va, uint64_t size, int ret) "va=0x%"PRIx64" size=0x%"PRIx64" ret=%d" +vfio_spapr_create_window(int ps, unsigned int levels, uint64_t ws, uint64_t off) "pageshift=0x%x levels=%u winsize=0x%"PRIx64" offset=0x%"PRIx64 +vfio_spapr_remove_window(uint64_t off) "offset=0x%"PRIx64 + +# display.c +vfio_display_edid_available(void) "" +vfio_display_edid_link_up(void) "" +vfio_display_edid_link_down(void) "" +vfio_display_edid_update(uint32_t prefx, uint32_t prefy) "%ux%u" +vfio_display_edid_write_error(void) "" + +# migration.c +vfio_migration_probe(const char *name, uint32_t index) " (%s) Region %d" +vfio_migration_set_state(const char *name, uint32_t state) " (%s) state %d" +vfio_vmstate_change(const char *name, int running, const char *reason, uint32_t dev_state) " (%s) running %d reason %s device state %d" +vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s" +vfio_save_setup(const char *name) " (%s)" +vfio_save_cleanup(const char *name) " (%s)" +vfio_save_buffer(const char *name, uint64_t data_offset, uint64_t data_size, uint64_t pending) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64" pending 0x%"PRIx64 +vfio_update_pending(const char *name, uint64_t pending) " (%s) pending 0x%"PRIx64 +vfio_save_device_config_state(const char *name) " (%s)" +vfio_save_pending(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t compatible) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" compatible 0x%"PRIx64 +vfio_save_iterate(const char *name, int data_size) " (%s) data_size %d" +vfio_save_complete_precopy(const char *name) " (%s)" +vfio_load_device_config_state(const char *name) " (%s)" +vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64 +vfio_load_state_device_data(const char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64 +vfio_load_cleanup(const char *name) " (%s)" +vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64 +vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 diff --git a/hw/vfio/trace.h b/hw/vfio/trace.h new file mode 100644 index 000000000..5a343aa59 --- /dev/null +++ b/hw/vfio/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_vfio.h" diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig new file mode 100644 index 000000000..c144d42f9 --- /dev/null +++ b/hw/virtio/Kconfig @@ -0,0 +1,70 @@ +config VHOST + bool + +config VIRTIO + bool + +config VIRTIO_RNG + bool + default y + depends on VIRTIO + +config VIRTIO_IOMMU + bool + default y + depends on PCI && VIRTIO + +config VIRTIO_PCI + bool + default y if PCI_DEVICES + depends on PCI + select VIRTIO + +config VIRTIO_MMIO + bool + select VIRTIO + +config VIRTIO_CCW + bool + select VIRTIO + +config VIRTIO_BALLOON + bool + default y + depends on VIRTIO + +config VIRTIO_CRYPTO + bool + default y + depends on VIRTIO + +config VIRTIO_PMEM_SUPPORTED + bool + +config VIRTIO_PMEM + bool + default y + depends on VIRTIO + depends on VIRTIO_PMEM_SUPPORTED + select MEM_DEVICE + +config VIRTIO_MEM_SUPPORTED + bool + +config VIRTIO_MEM + bool + default y + depends on VIRTIO + depends on LINUX + depends on VIRTIO_MEM_SUPPORTED + select MEM_DEVICE + +config VHOST_USER_I2C + bool + default y + depends on VIRTIO && VHOST_USER + +config VHOST_USER_RNG + bool + default y + depends on VIRTIO && VHOST_USER diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build new file mode 100644 index 000000000..521f7d64a --- /dev/null +++ b/hw/virtio/meson.build @@ -0,0 +1,55 @@ +softmmu_virtio_ss = ss.source_set() +softmmu_virtio_ss.add(files('virtio-bus.c')) +softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('virtio-pci.c')) +softmmu_virtio_ss.add(when: 'CONFIG_VIRTIO_MMIO', if_true: files('virtio-mmio.c')) +softmmu_virtio_ss.add(when: 'CONFIG_VHOST', if_false: files('vhost-stub.c')) + +softmmu_ss.add_all(when: 'CONFIG_VIRTIO', if_true: softmmu_virtio_ss) +softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) + +softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) + +virtio_ss = ss.source_set() +virtio_ss.add(files('virtio.c')) +virtio_ss.add(when: 'CONFIG_VHOST', if_true: files('vhost.c', 'vhost-backend.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER', if_true: files('vhost-user.c')) +virtio_ss.add(when: 'CONFIG_VHOST_VDPA', if_true: files('vhost-vdpa.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c')) +virtio_ss.add(when: ['CONFIG_VIRTIO_CRYPTO', 'CONFIG_VIRTIO_PCI'], if_true: files('virtio-crypto-pci.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs.c')) +virtio_ss.add(when: ['CONFIG_VHOST_USER_FS', 'CONFIG_VIRTIO_PCI'], if_true: files('vhost-user-fs-pci.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem.c')) +virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c', 'vhost-vsock-common.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c', 'vhost-vsock-common.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu.c')) +virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c')) +virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_I2C'], if_true: files('vhost-user-i2c-pci.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c')) +virtio_ss.add(when: ['CONFIG_VHOST_USER_RNG', 'CONFIG_VIRTIO_PCI'], if_true: files('vhost-user-rng-pci.c')) + +virtio_pci_ss = ss.source_set() +virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_SCSI', if_true: files('vhost-user-scsi-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_9P', if_true: files('virtio-9p-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_SCSI', if_true: files('virtio-scsi-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('virtio-net-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-serial-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem-pci.c')) + +virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss) + +specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: virtio_ss) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events new file mode 100644 index 000000000..650e521e3 --- /dev/null +++ b/hw/virtio/trace-events @@ -0,0 +1,130 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# vhost.c +vhost_commit(bool started, bool changed) "Started: %d Changed: %d" +vhost_region_add_section(const char *name, uint64_t gpa, uint64_t size, uint64_t host) "%s: 0x%"PRIx64"+0x%"PRIx64" @ 0x%"PRIx64 +vhost_region_add_section_merge(const char *name, uint64_t new_size, uint64_t gpa, uint64_t owr) "%s: size: 0x%"PRIx64 " gpa: 0x%"PRIx64 " owr: 0x%"PRIx64 +vhost_region_add_section_aligned(const char *name, uint64_t gpa, uint64_t size, uint64_t host) "%s: 0x%"PRIx64"+0x%"PRIx64" @ 0x%"PRIx64 +vhost_section(const char *name) "%s" +vhost_reject_section(const char *name, int d) "%s:%d" +vhost_iotlb_miss(void *dev, int step) "%p step %d" + +# vhost-user.c +vhost_user_postcopy_end_entry(void) "" +vhost_user_postcopy_end_exit(void) "" +vhost_user_postcopy_fault_handler(const char *name, uint64_t fault_address, int nregions) "%s: @0x%"PRIx64" nregions:%d" +vhost_user_postcopy_fault_handler_loop(int i, uint64_t client_base, uint64_t size) "%d: client 0x%"PRIx64" +0x%"PRIx64 +vhost_user_postcopy_fault_handler_found(int i, uint64_t region_offset, uint64_t rb_offset) "%d: region_offset: 0x%"PRIx64" rb_offset:0x%"PRIx64 +vhost_user_postcopy_listen(void) "" +vhost_user_set_mem_table_postcopy(uint64_t client_addr, uint64_t qhva, int reply_i, int region_i) "client:0x%"PRIx64" for hva: 0x%"PRIx64" reply %d region %d" +vhost_user_set_mem_table_withfd(int index, const char *name, uint64_t memory_size, uint64_t guest_phys_addr, uint64_t userspace_addr, uint64_t offset) "%d:%s: size:0x%"PRIx64" GPA:0x%"PRIx64" QVA/userspace:0x%"PRIx64" RB offset:0x%"PRIx64 +vhost_user_postcopy_waker(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64 +vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64 +vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64 + +# vhost-vdpa.c +vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 +vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8 +vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d" +vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64 +vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8 +vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p" +vhost_vdpa_cleanup(void *dev, void *vdpa) "dev: %p vdpa: %p" +vhost_vdpa_memslots_limit(void *dev, int ret) "dev: %p = 0x%x" +vhost_vdpa_set_mem_table(void *dev, uint32_t nregions, uint32_t padding) "dev: %p nregions: %"PRIu32" padding: 0x%"PRIx32 +vhost_vdpa_dump_regions(void *dev, int i, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint64_t flags_padding) "dev: %p %d: guest_phys_addr: 0x%"PRIx64" memory_size: 0x%"PRIx64" userspace_addr: 0x%"PRIx64" flags_padding: 0x%"PRIx64 +vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64 +vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p device_id %"PRIu32 +vhost_vdpa_reset_device(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8 +vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx: %d vq idx: %d" +vhost_vdpa_set_vring_ready(void *dev) "dev: %p" +vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s" +vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size, uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags: 0x%"PRIx32 +vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len) "dev: %p config: %p config_len: %"PRIu32 +vhost_vdpa_dev_start(void *dev, bool started) "dev: %p started: %d" +vhost_vdpa_set_log_base(void *dev, uint64_t base, unsigned long long size, int refcnt, int fd, void *log) "dev: %p base: 0x%"PRIx64" size: %llu refcnt: %d fd: %d log: %p" +vhost_vdpa_set_vring_addr(void *dev, unsigned int index, unsigned int flags, uint64_t desc_user_addr, uint64_t used_user_addr, uint64_t avail_user_addr, uint64_t log_guest_addr) "dev: %p index: %u flags: 0x%x desc_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" log_guest_addr: 0x%"PRIx64 +vhost_vdpa_set_vring_num(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u" +vhost_vdpa_set_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u" +vhost_vdpa_get_vring_base(void *dev, unsigned int index, unsigned int num) "dev: %p index: %u num: %u" +vhost_vdpa_set_vring_kick(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d" +vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: %u fd: %d" +vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64 +vhost_vdpa_set_owner(void *dev) "dev: %p" +vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64 +vhost_vdpa_get_iova_range(void *dev, uint64_t first, uint64_t last) "dev: %p first: 0x%"PRIx64" last: 0x%"PRIx64 + +# virtio.c +virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u" +virtqueue_fill(void *vq, const void *elem, unsigned int len, unsigned int idx) "vq %p elem %p len %u idx %u" +virtqueue_flush(void *vq, unsigned int count) "vq %p count %u" +virtqueue_pop(void *vq, void *elem, unsigned int in_num, unsigned int out_num) "vq %p elem %p in_num %u out_num %u" +virtio_queue_notify(void *vdev, int n, void *vq) "vdev %p n %d vq %p" +virtio_notify_irqfd(void *vdev, void *vq) "vdev %p vq %p" +virtio_notify(void *vdev, void *vq) "vdev %p vq %p" +virtio_set_status(void *vdev, uint8_t val) "vdev %p val %u" + +# virtio-rng.c +virtio_rng_guest_not_ready(void *rng) "rng %p: guest not ready" +virtio_rng_cpu_is_stopped(void *rng, int size) "rng %p: cpu is stopped, dropping %d bytes" +virtio_rng_popped(void *rng) "rng %p: elem popped" +virtio_rng_pushed(void *rng, size_t len) "rng %p: %zd bytes pushed" +virtio_rng_request(void *rng, size_t size, unsigned quota) "rng %p: %zd bytes requested, %u bytes quota left" +virtio_rng_vm_state_change(void *rng, int running, int state) "rng %p: state change to running %d state %d" + +# virtio-balloon.c +# +virtio_balloon_bad_addr(uint64_t gpa) "0x%"PRIx64 +virtio_balloon_handle_output(const char *name, uint64_t gpa) "section name: %s gpa: 0x%"PRIx64 +virtio_balloon_get_config(uint32_t num_pages, uint32_t actual) "num_pages: %d actual: %d" +virtio_balloon_set_config(uint32_t actual, uint32_t oldactual) "actual: %d oldactual: %d" +virtio_balloon_to_target(uint64_t target, uint32_t num_pages) "balloon target: 0x%"PRIx64" num_pages: %d" + +# virtio-mmio.c +virtio_mmio_read(uint64_t offset) "virtio_mmio_read offset 0x%" PRIx64 +virtio_mmio_write_offset(uint64_t offset, uint64_t value) "virtio_mmio_write offset 0x%" PRIx64 " value 0x%" PRIx64 +virtio_mmio_guest_page(uint64_t size, int shift) "guest page size 0x%" PRIx64 " shift %d" +virtio_mmio_queue_write(uint64_t value, int max_size) "mmio_queue write 0x%" PRIx64 " max %d" +virtio_mmio_setting_irq(int level) "virtio_mmio setting IRQ %d" + +# virtio-iommu.c +virtio_iommu_device_reset(void) "reset!" +virtio_iommu_get_features(uint64_t features) "device supports features=0x%"PRIx64 +virtio_iommu_device_status(uint8_t status) "driver status = %d" +virtio_iommu_get_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_range, uint32_t probe_size) "page_size_mask=0x%"PRIx64" start=0x%"PRIx64" end=0x%"PRIx64" domain_range=%d probe_size=0x%x" +virtio_iommu_set_config(uint64_t page_size_mask, uint64_t start, uint64_t end, uint32_t domain_range, uint32_t probe_size) "page_size_mask=0x%"PRIx64" start=0x%"PRIx64" end=0x%"PRIx64" domain_bits=%d probe_size=0x%x" +virtio_iommu_attach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d" +virtio_iommu_detach(uint32_t domain_id, uint32_t ep_id) "domain=%d endpoint=%d" +virtio_iommu_map(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 " phys_start=0x%"PRIx64" flags=%d" +virtio_iommu_unmap(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 +virtio_iommu_unmap_done(uint32_t domain_id, uint64_t virt_start, uint64_t virt_end) "domain=%d virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 +virtio_iommu_translate(const char *name, uint32_t rid, uint64_t iova, int flag) "mr=%s rid=%d addr=0x%"PRIx64" flag=%d" +virtio_iommu_init_iommu_mr(char *iommu_mr) "init %s" +virtio_iommu_get_endpoint(uint32_t ep_id) "Alloc endpoint=%d" +virtio_iommu_put_endpoint(uint32_t ep_id) "Free endpoint=%d" +virtio_iommu_get_domain(uint32_t domain_id) "Alloc domain=%d" +virtio_iommu_put_domain(uint32_t domain_id) "Free domain=%d" +virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d" +virtio_iommu_report_fault(uint8_t reason, uint32_t flags, uint32_t endpoint, uint64_t addr) "FAULT reason=%d flags=%d endpoint=%d address =0x%"PRIx64 +virtio_iommu_fill_resv_property(uint32_t devid, uint8_t subtype, uint64_t start, uint64_t end) "dev= %d, type=%d start=0x%"PRIx64" end=0x%"PRIx64 +virtio_iommu_notify_map(const char *name, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start, uint32_t flags) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64" phys_start=0x%"PRIx64" flags=%d" +virtio_iommu_notify_unmap(const char *name, uint64_t virt_start, uint64_t virt_end) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64 +virtio_iommu_remap(const char *name, uint64_t virt_start, uint64_t virt_end, uint64_t phys_start) "mr=%s virt_start=0x%"PRIx64" virt_end=0x%"PRIx64" phys_start=0x%"PRIx64 +virtio_iommu_set_page_size_mask(const char *name, uint64_t old, uint64_t new) "mr=%s old_mask=0x%"PRIx64" new_mask=0x%"PRIx64 +virtio_iommu_notify_flag_add(const char *name) "add notifier to mr %s" +virtio_iommu_notify_flag_del(const char *name) "del notifier from mr %s" + +# virtio-mem.c +virtio_mem_send_response(uint16_t type) "type=%" PRIu16 +virtio_mem_plug_request(uint64_t addr, uint16_t nb_blocks) "addr=0x%" PRIx64 " nb_blocks=%" PRIu16 +virtio_mem_unplug_request(uint64_t addr, uint16_t nb_blocks) "addr=0x%" PRIx64 " nb_blocks=%" PRIu16 +virtio_mem_unplugged_all(void) "" +virtio_mem_unplug_all_request(void) "" +virtio_mem_resized_usable_region(uint64_t old_size, uint64_t new_size) "old_size=0x%" PRIx64 "new_size=0x%" PRIx64 +virtio_mem_state_request(uint64_t addr, uint16_t nb_blocks) "addr=0x%" PRIx64 " nb_blocks=%" PRIu16 +virtio_mem_state_response(uint16_t state) "state=%" PRIu16 + +# virtio-pmem.c +virtio_pmem_flush_request(void) "flush request" +virtio_pmem_response(void) "flush response" +virtio_pmem_flush_done(int type) "fsync return=%d" diff --git a/hw/virtio/trace.h b/hw/virtio/trace.h new file mode 100644 index 000000000..5d7097061 --- /dev/null +++ b/hw/virtio/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_virtio.h" diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c new file mode 100644 index 000000000..b65f8f7e9 --- /dev/null +++ b/hw/virtio/vhost-backend.c @@ -0,0 +1,407 @@ +/* + * vhost-backend + * + * Copyright (c) 2013 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-backend.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "standard-headers/linux/vhost_types.h" + +#include "hw/virtio/vhost-vdpa.h" +#ifdef CONFIG_VHOST_KERNEL +#include +#include + +static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request, + void *arg) +{ + int fd = (uintptr_t) dev->opaque; + int ret; + + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL); + + ret = ioctl(fd, request, arg); + return ret < 0 ? -errno : ret; +} + +static int vhost_kernel_init(struct vhost_dev *dev, void *opaque, Error **errp) +{ + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL); + + dev->opaque = opaque; + + return 0; +} + +static int vhost_kernel_cleanup(struct vhost_dev *dev) +{ + int fd = (uintptr_t) dev->opaque; + + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL); + + return close(fd); +} + +static int vhost_kernel_memslots_limit(struct vhost_dev *dev) +{ + int limit = 64; + char *s; + + if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions", + &s, NULL, NULL)) { + uint64_t val = g_ascii_strtoull(s, NULL, 10); + if (!((val == G_MAXUINT64 || !val) && errno)) { + g_free(s); + return val; + } + error_report("ignoring invalid max_mem_regions value in vhost module:" + " %s", s); + } + g_free(s); + return limit; +} + +static int vhost_kernel_net_set_backend(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + return vhost_kernel_call(dev, VHOST_NET_SET_BACKEND, file); +} + +static int vhost_kernel_scsi_set_endpoint(struct vhost_dev *dev, + struct vhost_scsi_target *target) +{ + return vhost_kernel_call(dev, VHOST_SCSI_SET_ENDPOINT, target); +} + +static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev *dev, + struct vhost_scsi_target *target) +{ + return vhost_kernel_call(dev, VHOST_SCSI_CLEAR_ENDPOINT, target); +} + +static int vhost_kernel_scsi_get_abi_version(struct vhost_dev *dev, int *version) +{ + return vhost_kernel_call(dev, VHOST_SCSI_GET_ABI_VERSION, version); +} + +static int vhost_kernel_set_log_base(struct vhost_dev *dev, uint64_t base, + struct vhost_log *log) +{ + return vhost_kernel_call(dev, VHOST_SET_LOG_BASE, &base); +} + +static int vhost_kernel_set_mem_table(struct vhost_dev *dev, + struct vhost_memory *mem) +{ + return vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem); +} + +static int vhost_kernel_set_vring_addr(struct vhost_dev *dev, + struct vhost_vring_addr *addr) +{ + return vhost_kernel_call(dev, VHOST_SET_VRING_ADDR, addr); +} + +static int vhost_kernel_set_vring_endian(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + return vhost_kernel_call(dev, VHOST_SET_VRING_ENDIAN, ring); +} + +static int vhost_kernel_set_vring_num(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + return vhost_kernel_call(dev, VHOST_SET_VRING_NUM, ring); +} + +static int vhost_kernel_set_vring_base(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + return vhost_kernel_call(dev, VHOST_SET_VRING_BASE, ring); +} + +static int vhost_kernel_get_vring_base(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + return vhost_kernel_call(dev, VHOST_GET_VRING_BASE, ring); +} + +static int vhost_kernel_set_vring_kick(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + return vhost_kernel_call(dev, VHOST_SET_VRING_KICK, file); +} + +static int vhost_kernel_set_vring_call(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file); +} + +static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev, + struct vhost_vring_state *s) +{ + return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s); +} + +static int vhost_kernel_set_features(struct vhost_dev *dev, + uint64_t features) +{ + return vhost_kernel_call(dev, VHOST_SET_FEATURES, &features); +} + +static int vhost_kernel_set_backend_cap(struct vhost_dev *dev) +{ + uint64_t features; + uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2; + int r; + + if (vhost_kernel_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { + return 0; + } + + features &= f; + r = vhost_kernel_call(dev, VHOST_SET_BACKEND_FEATURES, + &features); + if (r) { + return 0; + } + + dev->backend_cap = features; + + return 0; +} + +static int vhost_kernel_get_features(struct vhost_dev *dev, + uint64_t *features) +{ + return vhost_kernel_call(dev, VHOST_GET_FEATURES, features); +} + +static int vhost_kernel_set_owner(struct vhost_dev *dev) +{ + return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL); +} + +static int vhost_kernel_reset_device(struct vhost_dev *dev) +{ + return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL); +} + +static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx) +{ + assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); + + return idx - dev->vq_index; +} + +#ifdef CONFIG_VHOST_VSOCK +static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev, + uint64_t guest_cid) +{ + return vhost_kernel_call(dev, VHOST_VSOCK_SET_GUEST_CID, &guest_cid); +} + +static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start) +{ + return vhost_kernel_call(dev, VHOST_VSOCK_SET_RUNNING, &start); +} +#endif /* CONFIG_VHOST_VSOCK */ + +static void vhost_kernel_iotlb_read(void *opaque) +{ + struct vhost_dev *dev = opaque; + ssize_t len; + + if (dev->backend_cap & + (0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) { + struct vhost_msg_v2 msg; + + while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) { + if (len < sizeof msg) { + error_report("Wrong vhost message len: %d", (int)len); + break; + } + if (msg.type != VHOST_IOTLB_MSG_V2) { + error_report("Unknown vhost iotlb message type"); + break; + } + + vhost_backend_handle_iotlb_msg(dev, &msg.iotlb); + } + } else { + struct vhost_msg msg; + + while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) { + if (len < sizeof msg) { + error_report("Wrong vhost message len: %d", (int)len); + break; + } + if (msg.type != VHOST_IOTLB_MSG) { + error_report("Unknown vhost iotlb message type"); + break; + } + + vhost_backend_handle_iotlb_msg(dev, &msg.iotlb); + } + } +} + +static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev *dev, + struct vhost_iotlb_msg *imsg) +{ + if (dev->backend_cap & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) { + struct vhost_msg_v2 msg = {}; + + msg.type = VHOST_IOTLB_MSG_V2; + msg.iotlb = *imsg; + + if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) { + error_report("Fail to update device iotlb"); + return -EFAULT; + } + } else { + struct vhost_msg msg = {}; + + msg.type = VHOST_IOTLB_MSG; + msg.iotlb = *imsg; + + if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) { + error_report("Fail to update device iotlb"); + return -EFAULT; + } + } + + return 0; +} + +static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev, + int enabled) +{ + if (enabled) + qemu_set_fd_handler((uintptr_t)dev->opaque, + vhost_kernel_iotlb_read, NULL, dev); + else + qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL); +} + +const VhostOps kernel_ops = { + .backend_type = VHOST_BACKEND_TYPE_KERNEL, + .vhost_backend_init = vhost_kernel_init, + .vhost_backend_cleanup = vhost_kernel_cleanup, + .vhost_backend_memslots_limit = vhost_kernel_memslots_limit, + .vhost_net_set_backend = vhost_kernel_net_set_backend, + .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint, + .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint, + .vhost_scsi_get_abi_version = vhost_kernel_scsi_get_abi_version, + .vhost_set_log_base = vhost_kernel_set_log_base, + .vhost_set_mem_table = vhost_kernel_set_mem_table, + .vhost_set_vring_addr = vhost_kernel_set_vring_addr, + .vhost_set_vring_endian = vhost_kernel_set_vring_endian, + .vhost_set_vring_num = vhost_kernel_set_vring_num, + .vhost_set_vring_base = vhost_kernel_set_vring_base, + .vhost_get_vring_base = vhost_kernel_get_vring_base, + .vhost_set_vring_kick = vhost_kernel_set_vring_kick, + .vhost_set_vring_call = vhost_kernel_set_vring_call, + .vhost_set_vring_busyloop_timeout = + vhost_kernel_set_vring_busyloop_timeout, + .vhost_set_features = vhost_kernel_set_features, + .vhost_get_features = vhost_kernel_get_features, + .vhost_set_backend_cap = vhost_kernel_set_backend_cap, + .vhost_set_owner = vhost_kernel_set_owner, + .vhost_reset_device = vhost_kernel_reset_device, + .vhost_get_vq_index = vhost_kernel_get_vq_index, +#ifdef CONFIG_VHOST_VSOCK + .vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid, + .vhost_vsock_set_running = vhost_kernel_vsock_set_running, +#endif /* CONFIG_VHOST_VSOCK */ + .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback, + .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg, +}; +#endif + +int vhost_backend_update_device_iotlb(struct vhost_dev *dev, + uint64_t iova, uint64_t uaddr, + uint64_t len, + IOMMUAccessFlags perm) +{ + struct vhost_iotlb_msg imsg; + + imsg.iova = iova; + imsg.uaddr = uaddr; + imsg.size = len; + imsg.type = VHOST_IOTLB_UPDATE; + + switch (perm) { + case IOMMU_RO: + imsg.perm = VHOST_ACCESS_RO; + break; + case IOMMU_WO: + imsg.perm = VHOST_ACCESS_WO; + break; + case IOMMU_RW: + imsg.perm = VHOST_ACCESS_RW; + break; + default: + return -EINVAL; + } + + if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg) + return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg); + + return -ENODEV; +} + +int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev, + uint64_t iova, uint64_t len) +{ + struct vhost_iotlb_msg imsg; + + imsg.iova = iova; + imsg.size = len; + imsg.type = VHOST_IOTLB_INVALIDATE; + + if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg) + return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg); + + return -ENODEV; +} + +int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev, + struct vhost_iotlb_msg *imsg) +{ + int ret = 0; + + if (unlikely(!dev->vdev)) { + error_report("Unexpected IOTLB message when virtio device is stopped"); + return -EINVAL; + } + + switch (imsg->type) { + case VHOST_IOTLB_MISS: + ret = vhost_device_iotlb_miss(dev, imsg->iova, + imsg->perm != VHOST_ACCESS_RO); + break; + case VHOST_IOTLB_ACCESS_FAIL: + /* FIXME: report device iotlb error */ + error_report("Access failure IOTLB message type not supported"); + ret = -ENOTSUP; + break; + case VHOST_IOTLB_UPDATE: + case VHOST_IOTLB_INVALIDATE: + default: + error_report("Unexpected IOTLB message type"); + ret = -EINVAL; + break; + } + + return ret; +} diff --git a/hw/virtio/vhost-scsi-pci.c b/hw/virtio/vhost-scsi-pci.c new file mode 100644 index 000000000..cb71a294f --- /dev/null +++ b/hw/virtio/vhost-scsi-pci.c @@ -0,0 +1,104 @@ +/* + * Vhost scsi PCI bindings + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Stefan Hajnoczi + * + * Changes for QEMU mainline + tcm_vhost kernel upstream: + * Nicholas Bellinger + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "standard-headers/linux/virtio_pci.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-scsi.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "virtio-pci.h" +#include "qom/object.h" + +typedef struct VHostSCSIPCI VHostSCSIPCI; + +/* + * vhost-scsi-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VHOST_SCSI_PCI "vhost-scsi-pci-base" +DECLARE_INSTANCE_CHECKER(VHostSCSIPCI, VHOST_SCSI_PCI, + TYPE_VHOST_SCSI_PCI) + +struct VHostSCSIPCI { + VirtIOPCIProxy parent_obj; + VHostSCSI vdev; +}; + +static Property vhost_scsi_pci_properties[] = { + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + VirtIOSCSIConf *conf = &dev->vdev.parent_obj.parent_obj.conf; + + if (conf->num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) { + conf->num_queues = + virtio_pci_optimal_num_queues(VIRTIO_SCSI_VQ_NUM_FIXED); + } + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = conf->num_queues + VIRTIO_SCSI_VQ_NUM_FIXED + 1; + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_scsi_pci_realize; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + device_class_set_props(dc, vhost_scsi_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI; + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; +} + +static void vhost_scsi_pci_instance_init(Object *obj) +{ + VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_SCSI); + object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), + "bootindex"); +} + +static const VirtioPCIDeviceTypeInfo vhost_scsi_pci_info = { + .base_name = TYPE_VHOST_SCSI_PCI, + .generic_name = "vhost-scsi-pci", + .transitional_name = "vhost-scsi-pci-transitional", + .non_transitional_name = "vhost-scsi-pci-non-transitional", + .instance_size = sizeof(VHostSCSIPCI), + .instance_init = vhost_scsi_pci_instance_init, + .class_init = vhost_scsi_pci_class_init, +}; + +static void vhost_scsi_pci_register(void) +{ + virtio_pci_types_register(&vhost_scsi_pci_info); +} + +type_init(vhost_scsi_pci_register) diff --git a/hw/virtio/vhost-stub.c b/hw/virtio/vhost-stub.c new file mode 100644 index 000000000..c175148fc --- /dev/null +++ b/hw/virtio/vhost-stub.c @@ -0,0 +1,17 @@ +#include "qemu/osdep.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-user.h" + +bool vhost_has_free_slot(void) +{ + return true; +} + +bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) +{ + return false; +} + +void vhost_user_cleanup(VhostUserState *user) +{ +} diff --git a/hw/virtio/vhost-user-blk-pci.c b/hw/virtio/vhost-user-blk-pci.c new file mode 100644 index 000000000..33b404d8a --- /dev/null +++ b/hw/virtio/vhost-user-blk-pci.c @@ -0,0 +1,109 @@ +/* + * Vhost user blk PCI Bindings + * + * Copyright(C) 2017 Intel Corporation. + * + * Authors: + * Changpeng Liu + * + * Largely based on the "vhost-user-scsi.c" and "vhost-scsi.c" implemented by: + * Felipe Franciosi + * Stefan Hajnoczi + * Nicholas Bellinger + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "standard-headers/linux/virtio_pci.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/vhost-user-blk.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "virtio-pci.h" +#include "qom/object.h" + +typedef struct VHostUserBlkPCI VHostUserBlkPCI; + +/* + * vhost-user-blk-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VHOST_USER_BLK_PCI "vhost-user-blk-pci-base" +DECLARE_INSTANCE_CHECKER(VHostUserBlkPCI, VHOST_USER_BLK_PCI, + TYPE_VHOST_USER_BLK_PCI) + +struct VHostUserBlkPCI { + VirtIOPCIProxy parent_obj; + VHostUserBlk vdev; +}; + +static Property vhost_user_blk_pci_properties[] = { + DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + if (dev->vdev.num_queues == VHOST_USER_BLK_AUTO_NUM_QUEUES) { + dev->vdev.num_queues = virtio_pci_optimal_num_queues(0); + } + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = dev->vdev.num_queues + 1; + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_blk_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + device_class_set_props(dc, vhost_user_blk_pci_properties); + k->realize = vhost_user_blk_pci_realize; + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; +} + +static void vhost_user_blk_pci_instance_init(Object *obj) +{ + VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_BLK); + object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), + "bootindex"); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_blk_pci_info = { + .base_name = TYPE_VHOST_USER_BLK_PCI, + .generic_name = "vhost-user-blk-pci", + .transitional_name = "vhost-user-blk-pci-transitional", + .non_transitional_name = "vhost-user-blk-pci-non-transitional", + .instance_size = sizeof(VHostUserBlkPCI), + .instance_init = vhost_user_blk_pci_instance_init, + .class_init = vhost_user_blk_pci_class_init, +}; + +static void vhost_user_blk_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_blk_pci_info); +} + +type_init(vhost_user_blk_pci_register) diff --git a/hw/virtio/vhost-user-fs-pci.c b/hw/virtio/vhost-user-fs-pci.c new file mode 100644 index 000000000..2ed8492b3 --- /dev/null +++ b/hw/virtio/vhost-user-fs-pci.c @@ -0,0 +1,88 @@ +/* + * Vhost-user filesystem virtio device PCI glue + * + * Copyright 2018-2019 Red Hat, Inc. + * + * Authors: + * Dr. David Alan Gilbert + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-user-fs.h" +#include "virtio-pci.h" +#include "qom/object.h" + +struct VHostUserFSPCI { + VirtIOPCIProxy parent_obj; + VHostUserFS vdev; +}; + +typedef struct VHostUserFSPCI VHostUserFSPCI; + +#define TYPE_VHOST_USER_FS_PCI "vhost-user-fs-pci-base" + +DECLARE_INSTANCE_CHECKER(VHostUserFSPCI, VHOST_USER_FS_PCI, + TYPE_VHOST_USER_FS_PCI) + +static Property vhost_user_fs_pci_properties[] = { + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_user_fs_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserFSPCI *dev = VHOST_USER_FS_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + /* Also reserve config change and hiprio queue vectors */ + vpci_dev->nvectors = dev->vdev.conf.num_request_queues + 2; + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_fs_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_fs_pci_realize; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + device_class_set_props(dc, vhost_user_fs_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */ + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_STORAGE_OTHER; +} + +static void vhost_user_fs_pci_instance_init(Object *obj) +{ + VHostUserFSPCI *dev = VHOST_USER_FS_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_FS); + object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), + "bootindex"); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_fs_pci_info = { + .base_name = TYPE_VHOST_USER_FS_PCI, + .non_transitional_name = "vhost-user-fs-pci", + .instance_size = sizeof(VHostUserFSPCI), + .instance_init = vhost_user_fs_pci_instance_init, + .class_init = vhost_user_fs_pci_class_init, +}; + +static void vhost_user_fs_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_fs_pci_info); +} + +type_init(vhost_user_fs_pci_register); diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c new file mode 100644 index 000000000..c59595798 --- /dev/null +++ b/hw/virtio/vhost-user-fs.c @@ -0,0 +1,332 @@ +/* + * Vhost-user filesystem virtio device + * + * Copyright 2018-2019 Red Hat, Inc. + * + * Authors: + * Stefan Hajnoczi + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include +#include "standard-headers/linux/virtio_fs.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" +#include "qemu/error-report.h" +#include "hw/virtio/vhost-user-fs.h" +#include "monitor/monitor.h" +#include "sysemu/sysemu.h" + +static const int user_feature_bits[] = { + VIRTIO_F_VERSION_1, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_F_RING_PACKED, + VIRTIO_F_IOMMU_PLATFORM, + + VHOST_INVALID_FEATURE_BIT +}; + +static void vuf_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VHostUserFS *fs = VHOST_USER_FS(vdev); + struct virtio_fs_config fscfg = {}; + + memcpy((char *)fscfg.tag, fs->conf.tag, + MIN(strlen(fs->conf.tag) + 1, sizeof(fscfg.tag))); + + virtio_stl_p(vdev, &fscfg.num_request_queues, fs->conf.num_request_queues); + + memcpy(config, &fscfg, sizeof(fscfg)); +} + +static void vuf_start(VirtIODevice *vdev) +{ + VHostUserFS *fs = VHOST_USER_FS(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + int i; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return; + } + + ret = vhost_dev_enable_notifiers(&fs->vhost_dev, vdev); + if (ret < 0) { + error_report("Error enabling host notifiers: %d", -ret); + return; + } + + ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier: %d", -ret); + goto err_host_notifiers; + } + + fs->vhost_dev.acked_features = vdev->guest_features; + ret = vhost_dev_start(&fs->vhost_dev, vdev); + if (ret < 0) { + error_report("Error starting vhost: %d", -ret); + goto err_guest_notifiers; + } + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < fs->vhost_dev.nvqs; i++) { + vhost_virtqueue_mask(&fs->vhost_dev, vdev, i, false); + } + + return; + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&fs->vhost_dev, vdev); +} + +static void vuf_stop(VirtIODevice *vdev) +{ + VHostUserFS *fs = VHOST_USER_FS(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(&fs->vhost_dev, vdev); + + ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(&fs->vhost_dev, vdev); +} + +static void vuf_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserFS *fs = VHOST_USER_FS(vdev); + bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + + if (!vdev->vm_running) { + should_start = false; + } + + if (fs->vhost_dev.started == should_start) { + return; + } + + if (should_start) { + vuf_start(vdev); + } else { + vuf_stop(vdev); + } +} + +static uint64_t vuf_get_features(VirtIODevice *vdev, + uint64_t features, + Error **errp) +{ + VHostUserFS *fs = VHOST_USER_FS(vdev); + + return vhost_get_features(&fs->vhost_dev, user_feature_bits, features); +} + +static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* + * Not normally called; it's the daemon that handles the queue; + * however virtio's cleanup path can call this. + */ +} + +static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx, + bool mask) +{ + VHostUserFS *fs = VHOST_USER_FS(vdev); + + vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask); +} + +static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx) +{ + VHostUserFS *fs = VHOST_USER_FS(vdev); + + return vhost_virtqueue_pending(&fs->vhost_dev, idx); +} + +static void vuf_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserFS *fs = VHOST_USER_FS(dev); + unsigned int i; + size_t len; + int ret; + + if (!fs->conf.chardev.chr) { + error_setg(errp, "missing chardev"); + return; + } + + if (!fs->conf.tag) { + error_setg(errp, "missing tag property"); + return; + } + len = strlen(fs->conf.tag); + if (len == 0) { + error_setg(errp, "tag property cannot be empty"); + return; + } + if (len > sizeof_field(struct virtio_fs_config, tag)) { + error_setg(errp, "tag property must be %zu bytes or less", + sizeof_field(struct virtio_fs_config, tag)); + return; + } + + if (fs->conf.num_request_queues == 0) { + error_setg(errp, "num-request-queues property must be larger than 0"); + return; + } + + if (!is_power_of_2(fs->conf.queue_size)) { + error_setg(errp, "queue-size property must be a power of 2"); + return; + } + + if (fs->conf.queue_size > VIRTQUEUE_MAX_SIZE) { + error_setg(errp, "queue-size property must be %u or smaller", + VIRTQUEUE_MAX_SIZE); + return; + } + + if (!vhost_user_init(&fs->vhost_user, &fs->conf.chardev, errp)) { + return; + } + + virtio_init(vdev, "vhost-user-fs", VIRTIO_ID_FS, + sizeof(struct virtio_fs_config)); + + /* Hiprio queue */ + fs->hiprio_vq = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output); + + /* Request queues */ + fs->req_vqs = g_new(VirtQueue *, fs->conf.num_request_queues); + for (i = 0; i < fs->conf.num_request_queues; i++) { + fs->req_vqs[i] = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output); + } + + /* 1 high prio queue, plus the number configured */ + fs->vhost_dev.nvqs = 1 + fs->conf.num_request_queues; + fs->vhost_dev.vqs = g_new0(struct vhost_virtqueue, fs->vhost_dev.nvqs); + ret = vhost_dev_init(&fs->vhost_dev, &fs->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + if (ret < 0) { + goto err_virtio; + } + + return; + +err_virtio: + vhost_user_cleanup(&fs->vhost_user); + virtio_delete_queue(fs->hiprio_vq); + for (i = 0; i < fs->conf.num_request_queues; i++) { + virtio_delete_queue(fs->req_vqs[i]); + } + g_free(fs->req_vqs); + virtio_cleanup(vdev); + g_free(fs->vhost_dev.vqs); + return; +} + +static void vuf_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserFS *fs = VHOST_USER_FS(dev); + int i; + + /* This will stop vhost backend if appropriate. */ + vuf_set_status(vdev, 0); + + vhost_dev_cleanup(&fs->vhost_dev); + + vhost_user_cleanup(&fs->vhost_user); + + virtio_delete_queue(fs->hiprio_vq); + for (i = 0; i < fs->conf.num_request_queues; i++) { + virtio_delete_queue(fs->req_vqs[i]); + } + g_free(fs->req_vqs); + virtio_cleanup(vdev); + g_free(fs->vhost_dev.vqs); + fs->vhost_dev.vqs = NULL; +} + +static const VMStateDescription vuf_vmstate = { + .name = "vhost-user-fs", + .unmigratable = 1, +}; + +static Property vuf_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserFS, conf.chardev), + DEFINE_PROP_STRING("tag", VHostUserFS, conf.tag), + DEFINE_PROP_UINT16("num-request-queues", VHostUserFS, + conf.num_request_queues, 1), + DEFINE_PROP_UINT16("queue-size", VHostUserFS, conf.queue_size, 128), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vuf_instance_init(Object *obj) +{ + VHostUserFS *fs = VHOST_USER_FS(obj); + + device_add_bootindex_property(obj, &fs->bootindex, "bootindex", + "/filesystem@0", DEVICE(obj)); +} + +static void vuf_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vuf_properties); + dc->vmsd = &vuf_vmstate; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + vdc->realize = vuf_device_realize; + vdc->unrealize = vuf_device_unrealize; + vdc->get_features = vuf_get_features; + vdc->get_config = vuf_get_config; + vdc->set_status = vuf_set_status; + vdc->guest_notifier_mask = vuf_guest_notifier_mask; + vdc->guest_notifier_pending = vuf_guest_notifier_pending; +} + +static const TypeInfo vuf_info = { + .name = TYPE_VHOST_USER_FS, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostUserFS), + .instance_init = vuf_instance_init, + .class_init = vuf_class_init, +}; + +static void vuf_register_types(void) +{ + type_register_static(&vuf_info); +} + +type_init(vuf_register_types) diff --git a/hw/virtio/vhost-user-i2c-pci.c b/hw/virtio/vhost-user-i2c-pci.c new file mode 100644 index 000000000..70b7b65fd --- /dev/null +++ b/hw/virtio/vhost-user-i2c-pci.c @@ -0,0 +1,69 @@ +/* + * Vhost-user i2c virtio device PCI glue + * + * Copyright (c) 2021 Viresh Kumar + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-user-i2c.h" +#include "virtio-pci.h" + +struct VHostUserI2CPCI { + VirtIOPCIProxy parent_obj; + VHostUserI2C vdev; +}; + +typedef struct VHostUserI2CPCI VHostUserI2CPCI; + +#define TYPE_VHOST_USER_I2C_PCI "vhost-user-i2c-pci-base" + +DECLARE_INSTANCE_CHECKER(VHostUserI2CPCI, VHOST_USER_I2C_PCI, + TYPE_VHOST_USER_I2C_PCI) + +static void vhost_user_i2c_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserI2CPCI *dev = VHOST_USER_I2C_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + vpci_dev->nvectors = 1; + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_i2c_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_i2c_pci_realize; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */ + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER; +} + +static void vhost_user_i2c_pci_instance_init(Object *obj) +{ + VHostUserI2CPCI *dev = VHOST_USER_I2C_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_I2C); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_i2c_pci_info = { + .base_name = TYPE_VHOST_USER_I2C_PCI, + .non_transitional_name = "vhost-user-i2c-pci", + .instance_size = sizeof(VHostUserI2CPCI), + .instance_init = vhost_user_i2c_pci_instance_init, + .class_init = vhost_user_i2c_pci_class_init, +}; + +static void vhost_user_i2c_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_i2c_pci_info); +} + +type_init(vhost_user_i2c_pci_register); diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c new file mode 100644 index 000000000..d172632bb --- /dev/null +++ b/hw/virtio/vhost-user-i2c.c @@ -0,0 +1,288 @@ +/* + * Vhost-user i2c virtio device + * + * Copyright (c) 2021 Viresh Kumar + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/vhost-user-i2c.h" +#include "qemu/error-report.h" +#include "standard-headers/linux/virtio_ids.h" + +/* Remove this once the header is updated in Linux kernel */ +#ifndef VIRTIO_ID_I2C_ADAPTER +#define VIRTIO_ID_I2C_ADAPTER 34 +#endif + +static void vu_i2c_start(VirtIODevice *vdev) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + int ret, i; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return; + } + + ret = vhost_dev_enable_notifiers(&i2c->vhost_dev, vdev); + if (ret < 0) { + error_report("Error enabling host notifiers: %d", -ret); + return; + } + + ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier: %d", -ret); + goto err_host_notifiers; + } + + i2c->vhost_dev.acked_features = vdev->guest_features; + + ret = vhost_dev_start(&i2c->vhost_dev, vdev); + if (ret < 0) { + error_report("Error starting vhost-user-i2c: %d", -ret); + goto err_guest_notifiers; + } + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < i2c->vhost_dev.nvqs; i++) { + vhost_virtqueue_mask(&i2c->vhost_dev, vdev, i, false); + } + + return; + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&i2c->vhost_dev, vdev); +} + +static void vu_i2c_stop(VirtIODevice *vdev) +{ + VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(&i2c->vhost_dev, vdev); + + ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(&i2c->vhost_dev, vdev); +} + +static void vu_i2c_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + + if (!vdev->vm_running) { + should_start = false; + } + + if (i2c->vhost_dev.started == should_start) { + return; + } + + if (should_start) { + vu_i2c_start(vdev); + } else { + vu_i2c_stop(vdev); + } +} + +static uint64_t vu_i2c_get_features(VirtIODevice *vdev, + uint64_t requested_features, Error **errp) +{ + /* No feature bits used yet */ + return requested_features; +} + +static void vu_i2c_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* + * Not normally called; it's the daemon that handles the queue; + * however virtio's cleanup path can call this. + */ +} + +static void vu_i2c_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) +{ + VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + + vhost_virtqueue_mask(&i2c->vhost_dev, vdev, idx, mask); +} + +static bool vu_i2c_guest_notifier_pending(VirtIODevice *vdev, int idx) +{ + VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + + return vhost_virtqueue_pending(&i2c->vhost_dev, idx); +} + +static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserI2C *i2c) +{ + vhost_user_cleanup(&i2c->vhost_user); + virtio_delete_queue(i2c->vq); + virtio_cleanup(vdev); + g_free(i2c->vhost_dev.vqs); + i2c->vhost_dev.vqs = NULL; +} + +static int vu_i2c_connect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + + if (i2c->connected) { + return 0; + } + i2c->connected = true; + + /* restore vhost state */ + if (virtio_device_started(vdev, vdev->status)) { + vu_i2c_start(vdev); + } + + return 0; +} + +static void vu_i2c_disconnect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + + if (!i2c->connected) { + return; + } + i2c->connected = false; + + if (i2c->vhost_dev.started) { + vu_i2c_stop(vdev); + } +} + +static void vu_i2c_event(void *opaque, QEMUChrEvent event) +{ + DeviceState *dev = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + + switch (event) { + case CHR_EVENT_OPENED: + if (vu_i2c_connect(dev) < 0) { + qemu_chr_fe_disconnect(&i2c->chardev); + return; + } + break; + case CHR_EVENT_CLOSED: + vu_i2c_disconnect(dev); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static void vu_i2c_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserI2C *i2c = VHOST_USER_I2C(dev); + int ret; + + if (!i2c->chardev.chr) { + error_setg(errp, "vhost-user-i2c: missing chardev"); + return; + } + + if (!vhost_user_init(&i2c->vhost_user, &i2c->chardev, errp)) { + return; + } + + virtio_init(vdev, "vhost-user-i2c", VIRTIO_ID_I2C_ADAPTER, 0); + + i2c->vhost_dev.nvqs = 1; + i2c->vq = virtio_add_queue(vdev, 4, vu_i2c_handle_output); + i2c->vhost_dev.vqs = g_new0(struct vhost_virtqueue, i2c->vhost_dev.nvqs); + + ret = vhost_dev_init(&i2c->vhost_dev, &i2c->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + if (ret < 0) { + do_vhost_user_cleanup(vdev, i2c); + } + + qemu_chr_fe_set_handlers(&i2c->chardev, NULL, NULL, vu_i2c_event, NULL, + dev, NULL, true); +} + +static void vu_i2c_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserI2C *i2c = VHOST_USER_I2C(dev); + + /* This will stop vhost backend if appropriate. */ + vu_i2c_set_status(vdev, 0); + vhost_dev_cleanup(&i2c->vhost_dev); + do_vhost_user_cleanup(vdev, i2c); +} + +static const VMStateDescription vu_i2c_vmstate = { + .name = "vhost-user-i2c", + .unmigratable = 1, +}; + +static Property vu_i2c_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserI2C, chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vu_i2c_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vu_i2c_properties); + dc->vmsd = &vu_i2c_vmstate; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + vdc->realize = vu_i2c_device_realize; + vdc->unrealize = vu_i2c_device_unrealize; + vdc->get_features = vu_i2c_get_features; + vdc->set_status = vu_i2c_set_status; + vdc->guest_notifier_mask = vu_i2c_guest_notifier_mask; + vdc->guest_notifier_pending = vu_i2c_guest_notifier_pending; +} + +static const TypeInfo vu_i2c_info = { + .name = TYPE_VHOST_USER_I2C, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostUserI2C), + .class_init = vu_i2c_class_init, +}; + +static void vu_i2c_register_types(void) +{ + type_register_static(&vu_i2c_info); +} + +type_init(vu_i2c_register_types) diff --git a/hw/virtio/vhost-user-input-pci.c b/hw/virtio/vhost-user-input-pci.c new file mode 100644 index 000000000..c9d3e9113 --- /dev/null +++ b/hw/virtio/vhost-user-input-pci.c @@ -0,0 +1,50 @@ +/* + * This work is licensed under the terms of the GNU LGPL, version 2 or + * later. See the COPYING.LIB file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-input.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "virtio-pci.h" +#include "qom/object.h" + +typedef struct VHostUserInputPCI VHostUserInputPCI; + +#define TYPE_VHOST_USER_INPUT_PCI "vhost-user-input-pci" + +DECLARE_INSTANCE_CHECKER(VHostUserInputPCI, VHOST_USER_INPUT_PCI, + TYPE_VHOST_USER_INPUT_PCI) + +struct VHostUserInputPCI { + VirtIOPCIProxy parent_obj; + VHostUserInput vhi; +}; + +static void vhost_user_input_pci_instance_init(Object *obj) +{ + VHostUserInputPCI *dev = VHOST_USER_INPUT_PCI(obj); + + virtio_instance_init_common(obj, &dev->vhi, sizeof(dev->vhi), + TYPE_VHOST_USER_INPUT); + + object_property_add_alias(obj, "chardev", + OBJECT(&dev->vhi), "chardev"); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_input_pci_info = { + .generic_name = TYPE_VHOST_USER_INPUT_PCI, + .parent = TYPE_VIRTIO_INPUT_PCI, + .instance_size = sizeof(VHostUserInputPCI), + .instance_init = vhost_user_input_pci_instance_init, +}; + +static void vhost_user_input_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_input_pci_info); +} + +type_init(vhost_user_input_pci_register) diff --git a/hw/virtio/vhost-user-rng-pci.c b/hw/virtio/vhost-user-rng-pci.c new file mode 100644 index 000000000..c83dc8681 --- /dev/null +++ b/hw/virtio/vhost-user-rng-pci.c @@ -0,0 +1,79 @@ +/* + * Vhost-user RNG virtio device PCI glue + * + * Copyright (c) 2021 Mathieu Poirier + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-user-rng.h" +#include "virtio-pci.h" + +struct VHostUserRNGPCI { + VirtIOPCIProxy parent_obj; + VHostUserRNG vdev; +}; + +typedef struct VHostUserRNGPCI VHostUserRNGPCI; + +#define TYPE_VHOST_USER_RNG_PCI "vhost-user-rng-pci-base" + +DECLARE_INSTANCE_CHECKER(VHostUserRNGPCI, VHOST_USER_RNG_PCI, + TYPE_VHOST_USER_RNG_PCI) + +static Property vhost_user_rng_pci_properties[] = { + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_user_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserRNGPCI *dev = VHOST_USER_RNG_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = 1; + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_rng_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_rng_pci_realize; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + device_class_set_props(dc, vhost_user_rng_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */ + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_OTHERS; +} + +static void vhost_user_rng_pci_instance_init(Object *obj) +{ + VHostUserRNGPCI *dev = VHOST_USER_RNG_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_RNG); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_rng_pci_info = { + .base_name = TYPE_VHOST_USER_RNG_PCI, + .non_transitional_name = "vhost-user-rng-pci", + .instance_size = sizeof(VHostUserRNGPCI), + .instance_init = vhost_user_rng_pci_instance_init, + .class_init = vhost_user_rng_pci_class_init, +}; + +static void vhost_user_rng_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_rng_pci_info); +} + +type_init(vhost_user_rng_pci_register); diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c new file mode 100644 index 000000000..209ee5bf9 --- /dev/null +++ b/hw/virtio/vhost-user-rng.c @@ -0,0 +1,289 @@ +/* + * Vhost-user RNG virtio device + * + * Copyright (c) 2021 Mathieu Poirier + * + * Implementation seriously tailored on vhost-user-i2c.c + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/vhost-user-rng.h" +#include "qemu/error-report.h" +#include "standard-headers/linux/virtio_ids.h" + +static void vu_rng_start(VirtIODevice *vdev) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + int i; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return; + } + + ret = vhost_dev_enable_notifiers(&rng->vhost_dev, vdev); + if (ret < 0) { + error_report("Error enabling host notifiers: %d", -ret); + return; + } + + ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier: %d", -ret); + goto err_host_notifiers; + } + + rng->vhost_dev.acked_features = vdev->guest_features; + ret = vhost_dev_start(&rng->vhost_dev, vdev); + if (ret < 0) { + error_report("Error starting vhost-user-rng: %d", -ret); + goto err_guest_notifiers; + } + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < rng->vhost_dev.nvqs; i++) { + vhost_virtqueue_mask(&rng->vhost_dev, vdev, i, false); + } + + return; + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&rng->vhost_dev, vdev); +} + +static void vu_rng_stop(VirtIODevice *vdev) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(&rng->vhost_dev, vdev); + + ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(&rng->vhost_dev, vdev); +} + +static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + + if (!vdev->vm_running) { + should_start = false; + } + + if (rng->vhost_dev.started == should_start) { + return; + } + + if (should_start) { + vu_rng_start(vdev); + } else { + vu_rng_stop(vdev); + } +} + +static uint64_t vu_rng_get_features(VirtIODevice *vdev, + uint64_t requested_features, Error **errp) +{ + /* No feature bits used yet */ + return requested_features; +} + +static void vu_rng_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* + * Not normally called; it's the daemon that handles the queue; + * however virtio's cleanup path can call this. + */ +} + +static void vu_rng_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + vhost_virtqueue_mask(&rng->vhost_dev, vdev, idx, mask); +} + +static bool vu_rng_guest_notifier_pending(VirtIODevice *vdev, int idx) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + return vhost_virtqueue_pending(&rng->vhost_dev, idx); +} + +static void vu_rng_connect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + if (rng->connected) { + return; + } + + rng->connected = true; + + /* restore vhost state */ + if (virtio_device_started(vdev, vdev->status)) { + vu_rng_start(vdev); + } +} + +static void vu_rng_disconnect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + if (!rng->connected) { + return; + } + + rng->connected = false; + + if (rng->vhost_dev.started) { + vu_rng_stop(vdev); + } +} + +static void vu_rng_event(void *opaque, QEMUChrEvent event) +{ + DeviceState *dev = opaque; + + switch (event) { + case CHR_EVENT_OPENED: + vu_rng_connect(dev); + break; + case CHR_EVENT_CLOSED: + vu_rng_disconnect(dev); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static void vu_rng_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(dev); + int ret; + + if (!rng->chardev.chr) { + error_setg(errp, "missing chardev"); + return; + } + + if (!vhost_user_init(&rng->vhost_user, &rng->chardev, errp)) { + return; + } + + virtio_init(vdev, "vhost-user-rng", VIRTIO_ID_RNG, 0); + + rng->req_vq = virtio_add_queue(vdev, 4, vu_rng_handle_output); + if (!rng->req_vq) { + error_setg_errno(errp, -1, "virtio_add_queue() failed"); + goto virtio_add_queue_failed; + } + + rng->vhost_dev.nvqs = 1; + rng->vhost_dev.vqs = g_new0(struct vhost_virtqueue, rng->vhost_dev.nvqs); + ret = vhost_dev_init(&rng->vhost_dev, &rng->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + if (ret < 0) { + error_setg_errno(errp, -ret, "vhost_dev_init() failed"); + goto vhost_dev_init_failed; + } + + qemu_chr_fe_set_handlers(&rng->chardev, NULL, NULL, vu_rng_event, NULL, + dev, NULL, true); + + return; + +vhost_dev_init_failed: + virtio_delete_queue(rng->req_vq); +virtio_add_queue_failed: + virtio_cleanup(vdev); + vhost_user_cleanup(&rng->vhost_user); +} + +static void vu_rng_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(dev); + + vu_rng_set_status(vdev, 0); + + vhost_dev_cleanup(&rng->vhost_dev); + g_free(rng->vhost_dev.vqs); + rng->vhost_dev.vqs = NULL; + virtio_delete_queue(rng->req_vq); + virtio_cleanup(vdev); + vhost_user_cleanup(&rng->vhost_user); +} + +static const VMStateDescription vu_rng_vmstate = { + .name = "vhost-user-rng", + .unmigratable = 1, +}; + +static Property vu_rng_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserRNG, chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vu_rng_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vu_rng_properties); + dc->vmsd = &vu_rng_vmstate; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + + vdc->realize = vu_rng_device_realize; + vdc->unrealize = vu_rng_device_unrealize; + vdc->get_features = vu_rng_get_features; + vdc->set_status = vu_rng_set_status; + vdc->guest_notifier_mask = vu_rng_guest_notifier_mask; + vdc->guest_notifier_pending = vu_rng_guest_notifier_pending; +} + +static const TypeInfo vu_rng_info = { + .name = TYPE_VHOST_USER_RNG, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostUserRNG), + .class_init = vu_rng_class_init, +}; + +static void vu_rng_register_types(void) +{ + type_register_static(&vu_rng_info); +} + +type_init(vu_rng_register_types) diff --git a/hw/virtio/vhost-user-scsi-pci.c b/hw/virtio/vhost-user-scsi-pci.c new file mode 100644 index 000000000..d5343412a --- /dev/null +++ b/hw/virtio/vhost-user-scsi-pci.c @@ -0,0 +1,110 @@ +/* + * Vhost user scsi PCI Bindings + * + * Copyright (c) 2016 Nutanix Inc. All rights reserved. + * + * Author: + * Felipe Franciosi + * + * This work is largely based on the "vhost-scsi" implementation by: + * Stefan Hajnoczi + * Nicholas Bellinger + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "standard-headers/linux/virtio_pci.h" +#include "hw/virtio/vhost-user-scsi.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-scsi.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/loader.h" +#include "sysemu/kvm.h" +#include "virtio-pci.h" +#include "qom/object.h" + +typedef struct VHostUserSCSIPCI VHostUserSCSIPCI; + +#define TYPE_VHOST_USER_SCSI_PCI "vhost-user-scsi-pci-base" +DECLARE_INSTANCE_CHECKER(VHostUserSCSIPCI, VHOST_USER_SCSI_PCI, + TYPE_VHOST_USER_SCSI_PCI) + +struct VHostUserSCSIPCI { + VirtIOPCIProxy parent_obj; + VHostUserSCSI vdev; +}; + +static Property vhost_user_scsi_pci_properties[] = { + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_user_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserSCSIPCI *dev = VHOST_USER_SCSI_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + VirtIOSCSIConf *conf = &dev->vdev.parent_obj.parent_obj.conf; + + if (conf->num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) { + conf->num_queues = + virtio_pci_optimal_num_queues(VIRTIO_SCSI_VQ_NUM_FIXED); + } + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = conf->num_queues + VIRTIO_SCSI_VQ_NUM_FIXED + 1; + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_scsi_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_scsi_pci_realize; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + device_class_set_props(dc, vhost_user_scsi_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI; + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; +} + +static void vhost_user_scsi_pci_instance_init(Object *obj) +{ + VHostUserSCSIPCI *dev = VHOST_USER_SCSI_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_SCSI); + object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), + "bootindex"); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_scsi_pci_info = { + .base_name = TYPE_VHOST_USER_SCSI_PCI, + .generic_name = "vhost-user-scsi-pci", + .transitional_name = "vhost-user-scsi-pci-transitional", + .non_transitional_name = "vhost-user-scsi-pci-non-transitional", + .instance_size = sizeof(VHostUserSCSIPCI), + .instance_init = vhost_user_scsi_pci_instance_init, + .class_init = vhost_user_scsi_pci_class_init, +}; + +static void vhost_user_scsi_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_scsi_pci_info); +} + +type_init(vhost_user_scsi_pci_register) diff --git a/hw/virtio/vhost-user-vsock-pci.c b/hw/virtio/vhost-user-vsock-pci.c new file mode 100644 index 000000000..72a96199c --- /dev/null +++ b/hw/virtio/vhost-user-vsock-pci.c @@ -0,0 +1,86 @@ +/* + * Vhost-user vsock PCI Bindings + * + * Copyright 2020 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" + +#include "virtio-pci.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-user-vsock.h" +#include "qom/object.h" + +typedef struct VHostUserVSockPCI VHostUserVSockPCI; + +/* + * vhost-user-vsock-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VHOST_USER_VSOCK_PCI "vhost-user-vsock-pci-base" +DECLARE_INSTANCE_CHECKER(VHostUserVSockPCI, VHOST_USER_VSOCK_PCI, + TYPE_VHOST_USER_VSOCK_PCI) + +struct VHostUserVSockPCI { + VirtIOPCIProxy parent_obj; + VHostUserVSock vdev; +}; + +/* vhost-user-vsock-pci */ + +static Property vhost_user_vsock_pci_properties[] = { + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_user_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserVSockPCI *dev = VHOST_USER_VSOCK_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + /* unlike vhost-vsock, we do not need to care about pre-5.1 compat */ + virtio_pci_force_virtio_1(vpci_dev); + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_vsock_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_vsock_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, vhost_user_vsock_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_VSOCK; + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER; +} + +static void vhost_user_vsock_pci_instance_init(Object *obj) +{ + VHostUserVSockPCI *dev = VHOST_USER_VSOCK_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_VSOCK); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_vsock_pci_info = { + .base_name = TYPE_VHOST_USER_VSOCK_PCI, + .generic_name = "vhost-user-vsock-pci", + .non_transitional_name = "vhost-user-vsock-pci-non-transitional", + .instance_size = sizeof(VHostUserVSockPCI), + .instance_init = vhost_user_vsock_pci_instance_init, + .class_init = vhost_user_vsock_pci_class_init, +}; + +static void virtio_pci_vhost_register(void) +{ + virtio_pci_types_register(&vhost_user_vsock_pci_info); +} + +type_init(virtio_pci_vhost_register) diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c new file mode 100644 index 000000000..52bd682c3 --- /dev/null +++ b/hw/virtio/vhost-user-vsock.c @@ -0,0 +1,184 @@ +/* + * Vhost-user vsock virtio device + * + * Copyright 2020 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" + +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/virtio/vhost-user-vsock.h" + +static const int user_feature_bits[] = { + VIRTIO_F_VERSION_1, + VIRTIO_RING_F_INDIRECT_DESC, + VIRTIO_RING_F_EVENT_IDX, + VIRTIO_F_NOTIFY_ON_EMPTY, + VHOST_INVALID_FEATURE_BIT +}; + +static void vuv_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VHostUserVSock *vsock = VHOST_USER_VSOCK(vdev); + + memcpy(config, &vsock->vsockcfg, sizeof(struct virtio_vsock_config)); +} + +static int vuv_handle_config_change(struct vhost_dev *dev) +{ + VHostUserVSock *vsock = VHOST_USER_VSOCK(dev->vdev); + Error *local_err = NULL; + int ret = vhost_dev_get_config(dev, (uint8_t *)&vsock->vsockcfg, + sizeof(struct virtio_vsock_config), + &local_err); + if (ret < 0) { + error_report_err(local_err); + return -1; + } + + virtio_notify_config(dev->vdev); + + return 0; +} + +const VhostDevConfigOps vsock_ops = { + .vhost_dev_config_notifier = vuv_handle_config_change, +}; + +static void vuv_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + + if (!vdev->vm_running) { + should_start = false; + } + + if (vvc->vhost_dev.started == should_start) { + return; + } + + if (should_start) { + int ret = vhost_vsock_common_start(vdev); + if (ret < 0) { + return; + } + } else { + vhost_vsock_common_stop(vdev); + } +} + +static uint64_t vuv_get_features(VirtIODevice *vdev, + uint64_t features, + Error **errp) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + + features = vhost_get_features(&vvc->vhost_dev, user_feature_bits, features); + + return vhost_vsock_common_get_features(vdev, features, errp); +} + +static const VMStateDescription vuv_vmstate = { + .name = "vhost-user-vsock", + .unmigratable = 1, +}; + +static void vuv_device_realize(DeviceState *dev, Error **errp) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(dev); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserVSock *vsock = VHOST_USER_VSOCK(dev); + int ret; + + if (!vsock->conf.chardev.chr) { + error_setg(errp, "missing chardev"); + return; + } + + if (!vhost_user_init(&vsock->vhost_user, &vsock->conf.chardev, errp)) { + return; + } + + vhost_vsock_common_realize(vdev, "vhost-user-vsock"); + + vhost_dev_set_config_notifier(&vvc->vhost_dev, &vsock_ops); + + ret = vhost_dev_init(&vvc->vhost_dev, &vsock->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + if (ret < 0) { + goto err_virtio; + } + + ret = vhost_dev_get_config(&vvc->vhost_dev, (uint8_t *)&vsock->vsockcfg, + sizeof(struct virtio_vsock_config), errp); + if (ret < 0) { + goto err_vhost_dev; + } + + return; + +err_vhost_dev: + vhost_dev_cleanup(&vvc->vhost_dev); +err_virtio: + vhost_vsock_common_unrealize(vdev); + vhost_user_cleanup(&vsock->vhost_user); + return; +} + +static void vuv_device_unrealize(DeviceState *dev) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(dev); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserVSock *vsock = VHOST_USER_VSOCK(dev); + + /* This will stop vhost backend if appropriate. */ + vuv_set_status(vdev, 0); + + vhost_dev_cleanup(&vvc->vhost_dev); + + vhost_vsock_common_unrealize(vdev); + + vhost_user_cleanup(&vsock->vhost_user); + +} + +static Property vuv_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserVSock, conf.chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vuv_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vuv_properties); + dc->vmsd = &vuv_vmstate; + vdc->realize = vuv_device_realize; + vdc->unrealize = vuv_device_unrealize; + vdc->get_features = vuv_get_features; + vdc->get_config = vuv_get_config; + vdc->set_status = vuv_set_status; +} + +static const TypeInfo vuv_info = { + .name = TYPE_VHOST_USER_VSOCK, + .parent = TYPE_VHOST_VSOCK_COMMON, + .instance_size = sizeof(VHostUserVSock), + .class_init = vuv_class_init, +}; + +static void vuv_register_types(void) +{ + type_register_static(&vuv_info); +} + +type_init(vuv_register_types) diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c new file mode 100644 index 000000000..bf6e50223 --- /dev/null +++ b/hw/virtio/vhost-user.c @@ -0,0 +1,2529 @@ +/* + * vhost-user + * + * Copyright (c) 2013 Virtual Open Systems Sarl. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-user.h" +#include "hw/virtio/vhost-backend.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-net.h" +#include "chardev/char-fe.h" +#include "io/channel-socket.h" +#include "sysemu/kvm.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "qemu/sockets.h" +#include "sysemu/cryptodev.h" +#include "migration/migration.h" +#include "migration/postcopy-ram.h" +#include "trace.h" + +#include +#include +#include + +#include "standard-headers/linux/vhost_types.h" + +#ifdef CONFIG_LINUX +#include +#endif + +#define VHOST_MEMORY_BASELINE_NREGIONS 8 +#define VHOST_USER_F_PROTOCOL_FEATURES 30 +#define VHOST_USER_SLAVE_MAX_FDS 8 + +/* + * Set maximum number of RAM slots supported to + * the maximum number supported by the target + * hardware plaform. + */ +#if defined(TARGET_X86) || defined(TARGET_X86_64) || \ + defined(TARGET_ARM) || defined(TARGET_ARM_64) +#include "hw/acpi/acpi.h" +#define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS + +#elif defined(TARGET_PPC) || defined(TARGET_PPC_64) +#include "hw/ppc/spapr.h" +#define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS + +#else +#define VHOST_USER_MAX_RAM_SLOTS 512 +#endif + +/* + * Maximum size of virtio device config space + */ +#define VHOST_USER_MAX_CONFIG_SIZE 256 + +enum VhostUserProtocolFeature { + VHOST_USER_PROTOCOL_F_MQ = 0, + VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1, + VHOST_USER_PROTOCOL_F_RARP = 2, + VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, + VHOST_USER_PROTOCOL_F_NET_MTU = 4, + VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, + VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, + VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, + VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, + VHOST_USER_PROTOCOL_F_CONFIG = 9, + VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, + VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, + VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13, + /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */ + VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15, + VHOST_USER_PROTOCOL_F_MAX +}; + +#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1) + +typedef enum VhostUserRequest { + VHOST_USER_NONE = 0, + VHOST_USER_GET_FEATURES = 1, + VHOST_USER_SET_FEATURES = 2, + VHOST_USER_SET_OWNER = 3, + VHOST_USER_RESET_OWNER = 4, + VHOST_USER_SET_MEM_TABLE = 5, + VHOST_USER_SET_LOG_BASE = 6, + VHOST_USER_SET_LOG_FD = 7, + VHOST_USER_SET_VRING_NUM = 8, + VHOST_USER_SET_VRING_ADDR = 9, + VHOST_USER_SET_VRING_BASE = 10, + VHOST_USER_GET_VRING_BASE = 11, + VHOST_USER_SET_VRING_KICK = 12, + VHOST_USER_SET_VRING_CALL = 13, + VHOST_USER_SET_VRING_ERR = 14, + VHOST_USER_GET_PROTOCOL_FEATURES = 15, + VHOST_USER_SET_PROTOCOL_FEATURES = 16, + VHOST_USER_GET_QUEUE_NUM = 17, + VHOST_USER_SET_VRING_ENABLE = 18, + VHOST_USER_SEND_RARP = 19, + VHOST_USER_NET_SET_MTU = 20, + VHOST_USER_SET_SLAVE_REQ_FD = 21, + VHOST_USER_IOTLB_MSG = 22, + VHOST_USER_SET_VRING_ENDIAN = 23, + VHOST_USER_GET_CONFIG = 24, + VHOST_USER_SET_CONFIG = 25, + VHOST_USER_CREATE_CRYPTO_SESSION = 26, + VHOST_USER_CLOSE_CRYPTO_SESSION = 27, + VHOST_USER_POSTCOPY_ADVISE = 28, + VHOST_USER_POSTCOPY_LISTEN = 29, + VHOST_USER_POSTCOPY_END = 30, + VHOST_USER_GET_INFLIGHT_FD = 31, + VHOST_USER_SET_INFLIGHT_FD = 32, + VHOST_USER_GPU_SET_SOCKET = 33, + VHOST_USER_RESET_DEVICE = 34, + /* Message number 35 reserved for VHOST_USER_VRING_KICK. */ + VHOST_USER_GET_MAX_MEM_SLOTS = 36, + VHOST_USER_ADD_MEM_REG = 37, + VHOST_USER_REM_MEM_REG = 38, + VHOST_USER_MAX +} VhostUserRequest; + +typedef enum VhostUserSlaveRequest { + VHOST_USER_SLAVE_NONE = 0, + VHOST_USER_SLAVE_IOTLB_MSG = 1, + VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2, + VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3, + VHOST_USER_SLAVE_MAX +} VhostUserSlaveRequest; + +typedef struct VhostUserMemoryRegion { + uint64_t guest_phys_addr; + uint64_t memory_size; + uint64_t userspace_addr; + uint64_t mmap_offset; +} VhostUserMemoryRegion; + +typedef struct VhostUserMemory { + uint32_t nregions; + uint32_t padding; + VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS]; +} VhostUserMemory; + +typedef struct VhostUserMemRegMsg { + uint64_t padding; + VhostUserMemoryRegion region; +} VhostUserMemRegMsg; + +typedef struct VhostUserLog { + uint64_t mmap_size; + uint64_t mmap_offset; +} VhostUserLog; + +typedef struct VhostUserConfig { + uint32_t offset; + uint32_t size; + uint32_t flags; + uint8_t region[VHOST_USER_MAX_CONFIG_SIZE]; +} VhostUserConfig; + +#define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512 +#define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64 + +typedef struct VhostUserCryptoSession { + /* session id for success, -1 on errors */ + int64_t session_id; + CryptoDevBackendSymSessionInfo session_setup_data; + uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN]; + uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN]; +} VhostUserCryptoSession; + +static VhostUserConfig c __attribute__ ((unused)); +#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \ + + sizeof(c.size) \ + + sizeof(c.flags)) + +typedef struct VhostUserVringArea { + uint64_t u64; + uint64_t size; + uint64_t offset; +} VhostUserVringArea; + +typedef struct VhostUserInflight { + uint64_t mmap_size; + uint64_t mmap_offset; + uint16_t num_queues; + uint16_t queue_size; +} VhostUserInflight; + +typedef struct { + VhostUserRequest request; + +#define VHOST_USER_VERSION_MASK (0x3) +#define VHOST_USER_REPLY_MASK (0x1<<2) +#define VHOST_USER_NEED_REPLY_MASK (0x1 << 3) + uint32_t flags; + uint32_t size; /* the following payload size */ +} QEMU_PACKED VhostUserHeader; + +typedef union { +#define VHOST_USER_VRING_IDX_MASK (0xff) +#define VHOST_USER_VRING_NOFD_MASK (0x1<<8) + uint64_t u64; + struct vhost_vring_state state; + struct vhost_vring_addr addr; + VhostUserMemory memory; + VhostUserMemRegMsg mem_reg; + VhostUserLog log; + struct vhost_iotlb_msg iotlb; + VhostUserConfig config; + VhostUserCryptoSession session; + VhostUserVringArea area; + VhostUserInflight inflight; +} VhostUserPayload; + +typedef struct VhostUserMsg { + VhostUserHeader hdr; + VhostUserPayload payload; +} QEMU_PACKED VhostUserMsg; + +static VhostUserMsg m __attribute__ ((unused)); +#define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader)) + +#define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload)) + +/* The version of the protocol we support */ +#define VHOST_USER_VERSION (0x1) + +struct vhost_user { + struct vhost_dev *dev; + /* Shared between vhost devs of the same virtio device */ + VhostUserState *user; + QIOChannel *slave_ioc; + GSource *slave_src; + NotifierWithReturn postcopy_notifier; + struct PostCopyFD postcopy_fd; + uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS]; + /* Length of the region_rb and region_rb_offset arrays */ + size_t region_rb_len; + /* RAMBlock associated with a given region */ + RAMBlock **region_rb; + /* The offset from the start of the RAMBlock to the start of the + * vhost region. + */ + ram_addr_t *region_rb_offset; + + /* True once we've entered postcopy_listen */ + bool postcopy_listen; + + /* Our current regions */ + int num_shadow_regions; + struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS]; +}; + +struct scrub_regions { + struct vhost_memory_region *region; + int reg_idx; + int fd_idx; +}; + +static bool ioeventfd_enabled(void) +{ + return !kvm_enabled() || kvm_eventfds_enabled(); +} + +static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg) +{ + struct vhost_user *u = dev->opaque; + CharBackend *chr = u->user->chr; + uint8_t *p = (uint8_t *) msg; + int r, size = VHOST_USER_HDR_SIZE; + + r = qemu_chr_fe_read_all(chr, p, size); + if (r != size) { + error_report("Failed to read msg header. Read %d instead of %d." + " Original request %d.", r, size, msg->hdr.request); + return -1; + } + + /* validate received flags */ + if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) { + error_report("Failed to read msg header." + " Flags 0x%x instead of 0x%x.", msg->hdr.flags, + VHOST_USER_REPLY_MASK | VHOST_USER_VERSION); + return -1; + } + + return 0; +} + +struct vhost_user_read_cb_data { + struct vhost_dev *dev; + VhostUserMsg *msg; + GMainLoop *loop; + int ret; +}; + +static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, + gpointer opaque) +{ + struct vhost_user_read_cb_data *data = opaque; + struct vhost_dev *dev = data->dev; + VhostUserMsg *msg = data->msg; + struct vhost_user *u = dev->opaque; + CharBackend *chr = u->user->chr; + uint8_t *p = (uint8_t *) msg; + int r, size; + + if (vhost_user_read_header(dev, msg) < 0) { + data->ret = -1; + goto end; + } + + /* validate message size is sane */ + if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) { + error_report("Failed to read msg header." + " Size %d exceeds the maximum %zu.", msg->hdr.size, + VHOST_USER_PAYLOAD_SIZE); + data->ret = -1; + goto end; + } + + if (msg->hdr.size) { + p += VHOST_USER_HDR_SIZE; + size = msg->hdr.size; + r = qemu_chr_fe_read_all(chr, p, size); + if (r != size) { + error_report("Failed to read msg payload." + " Read %d instead of %d.", r, msg->hdr.size); + data->ret = -1; + goto end; + } + } + +end: + g_main_loop_quit(data->loop); + return G_SOURCE_REMOVE; +} + +static gboolean slave_read(QIOChannel *ioc, GIOCondition condition, + gpointer opaque); + +/* + * This updates the read handler to use a new event loop context. + * Event sources are removed from the previous context : this ensures + * that events detected in the previous context are purged. They will + * be re-detected and processed in the new context. + */ +static void slave_update_read_handler(struct vhost_dev *dev, + GMainContext *ctxt) +{ + struct vhost_user *u = dev->opaque; + + if (!u->slave_ioc) { + return; + } + + if (u->slave_src) { + g_source_destroy(u->slave_src); + g_source_unref(u->slave_src); + } + + u->slave_src = qio_channel_add_watch_source(u->slave_ioc, + G_IO_IN | G_IO_HUP, + slave_read, dev, NULL, + ctxt); +} + +static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) +{ + struct vhost_user *u = dev->opaque; + CharBackend *chr = u->user->chr; + GMainContext *prev_ctxt = chr->chr->gcontext; + GMainContext *ctxt = g_main_context_new(); + GMainLoop *loop = g_main_loop_new(ctxt, FALSE); + struct vhost_user_read_cb_data data = { + .dev = dev, + .loop = loop, + .msg = msg, + .ret = 0 + }; + + /* + * We want to be able to monitor the slave channel fd while waiting + * for chr I/O. This requires an event loop, but we can't nest the + * one to which chr is currently attached : its fd handlers might not + * be prepared for re-entrancy. So we create a new one and switch chr + * to use it. + */ + slave_update_read_handler(dev, ctxt); + qemu_chr_be_update_read_handlers(chr->chr, ctxt); + qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data); + + g_main_loop_run(loop); + + /* + * Restore the previous event loop context. This also destroys/recreates + * event sources : this guarantees that all pending events in the original + * context that have been processed by the nested loop are purged. + */ + qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt); + slave_update_read_handler(dev, NULL); + + g_main_loop_unref(loop); + g_main_context_unref(ctxt); + + return data.ret; +} + +static int process_message_reply(struct vhost_dev *dev, + const VhostUserMsg *msg) +{ + VhostUserMsg msg_reply; + + if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) { + return 0; + } + + if (vhost_user_read(dev, &msg_reply) < 0) { + return -1; + } + + if (msg_reply.hdr.request != msg->hdr.request) { + error_report("Received unexpected msg type. " + "Expected %d received %d", + msg->hdr.request, msg_reply.hdr.request); + return -1; + } + + return msg_reply.payload.u64 ? -1 : 0; +} + +static bool vhost_user_one_time_request(VhostUserRequest request) +{ + switch (request) { + case VHOST_USER_SET_OWNER: + case VHOST_USER_RESET_OWNER: + case VHOST_USER_SET_MEM_TABLE: + case VHOST_USER_GET_QUEUE_NUM: + case VHOST_USER_NET_SET_MTU: + return true; + default: + return false; + } +} + +/* most non-init callers ignore the error */ +static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg, + int *fds, int fd_num) +{ + struct vhost_user *u = dev->opaque; + CharBackend *chr = u->user->chr; + int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size; + + /* + * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE, + * we just need send it once in the first time. For later such + * request, we just ignore it. + */ + if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) { + msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK; + return 0; + } + + if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) { + error_report("Failed to set msg fds."); + return -1; + } + + ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size); + if (ret != size) { + error_report("Failed to write msg." + " Wrote %d instead of %d.", ret, size); + return -1; + } + + return 0; +} + +int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_GPU_SET_SOCKET, + .hdr.flags = VHOST_USER_VERSION, + }; + + return vhost_user_write(dev, &msg, &fd, 1); +} + +static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, + struct vhost_log *log) +{ + int fds[VHOST_USER_MAX_RAM_SLOTS]; + size_t fd_num = 0; + bool shmfd = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_LOG_SHMFD); + VhostUserMsg msg = { + .hdr.request = VHOST_USER_SET_LOG_BASE, + .hdr.flags = VHOST_USER_VERSION, + .payload.log.mmap_size = log->size * sizeof(*(log->log)), + .payload.log.mmap_offset = 0, + .hdr.size = sizeof(msg.payload.log), + }; + + if (shmfd && log->fd != -1) { + fds[fd_num++] = log->fd; + } + + if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { + return -1; + } + + if (shmfd) { + msg.hdr.size = 0; + if (vhost_user_read(dev, &msg) < 0) { + return -1; + } + + if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) { + error_report("Received unexpected msg type. " + "Expected %d received %d", + VHOST_USER_SET_LOG_BASE, msg.hdr.request); + return -1; + } + } + + return 0; +} + +static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset, + int *fd) +{ + MemoryRegion *mr; + + assert((uintptr_t)addr == addr); + mr = memory_region_from_host((void *)(uintptr_t)addr, offset); + *fd = memory_region_get_fd(mr); + + return mr; +} + +static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst, + struct vhost_memory_region *src, + uint64_t mmap_offset) +{ + assert(src != NULL && dst != NULL); + dst->userspace_addr = src->userspace_addr; + dst->memory_size = src->memory_size; + dst->guest_phys_addr = src->guest_phys_addr; + dst->mmap_offset = mmap_offset; +} + +static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u, + struct vhost_dev *dev, + VhostUserMsg *msg, + int *fds, size_t *fd_num, + bool track_ramblocks) +{ + int i, fd; + ram_addr_t offset; + MemoryRegion *mr; + struct vhost_memory_region *reg; + VhostUserMemoryRegion region_buffer; + + msg->hdr.request = VHOST_USER_SET_MEM_TABLE; + + for (i = 0; i < dev->mem->nregions; ++i) { + reg = dev->mem->regions + i; + + mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); + if (fd > 0) { + if (track_ramblocks) { + assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS); + trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name, + reg->memory_size, + reg->guest_phys_addr, + reg->userspace_addr, + offset); + u->region_rb_offset[i] = offset; + u->region_rb[i] = mr->ram_block; + } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) { + error_report("Failed preparing vhost-user memory table msg"); + return -1; + } + vhost_user_fill_msg_region(®ion_buffer, reg, offset); + msg->payload.memory.regions[*fd_num] = region_buffer; + fds[(*fd_num)++] = fd; + } else if (track_ramblocks) { + u->region_rb_offset[i] = 0; + u->region_rb[i] = NULL; + } + } + + msg->payload.memory.nregions = *fd_num; + + if (!*fd_num) { + error_report("Failed initializing vhost-user memory map, " + "consider using -object memory-backend-file share=on"); + return -1; + } + + msg->hdr.size = sizeof(msg->payload.memory.nregions); + msg->hdr.size += sizeof(msg->payload.memory.padding); + msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion); + + return 1; +} + +static inline bool reg_equal(struct vhost_memory_region *shadow_reg, + struct vhost_memory_region *vdev_reg) +{ + return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr && + shadow_reg->userspace_addr == vdev_reg->userspace_addr && + shadow_reg->memory_size == vdev_reg->memory_size; +} + +static void scrub_shadow_regions(struct vhost_dev *dev, + struct scrub_regions *add_reg, + int *nr_add_reg, + struct scrub_regions *rem_reg, + int *nr_rem_reg, uint64_t *shadow_pcb, + bool track_ramblocks) +{ + struct vhost_user *u = dev->opaque; + bool found[VHOST_USER_MAX_RAM_SLOTS] = {}; + struct vhost_memory_region *reg, *shadow_reg; + int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0; + ram_addr_t offset; + MemoryRegion *mr; + bool matching; + + /* + * Find memory regions present in our shadow state which are not in + * the device's current memory state. + * + * Mark regions in both the shadow and device state as "found". + */ + for (i = 0; i < u->num_shadow_regions; i++) { + shadow_reg = &u->shadow_regions[i]; + matching = false; + + for (j = 0; j < dev->mem->nregions; j++) { + reg = &dev->mem->regions[j]; + + mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); + + if (reg_equal(shadow_reg, reg)) { + matching = true; + found[j] = true; + if (track_ramblocks) { + /* + * Reset postcopy client bases, region_rb, and + * region_rb_offset in case regions are removed. + */ + if (fd > 0) { + u->region_rb_offset[j] = offset; + u->region_rb[j] = mr->ram_block; + shadow_pcb[j] = u->postcopy_client_bases[i]; + } else { + u->region_rb_offset[j] = 0; + u->region_rb[j] = NULL; + } + } + break; + } + } + + /* + * If the region was not found in the current device memory state + * create an entry for it in the removed list. + */ + if (!matching) { + rem_reg[rm_idx].region = shadow_reg; + rem_reg[rm_idx++].reg_idx = i; + } + } + + /* + * For regions not marked "found", create entries in the added list. + * + * Note their indexes in the device memory state and the indexes of their + * file descriptors. + */ + for (i = 0; i < dev->mem->nregions; i++) { + reg = &dev->mem->regions[i]; + vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); + if (fd > 0) { + ++fd_num; + } + + /* + * If the region was in both the shadow and device state we don't + * need to send a VHOST_USER_ADD_MEM_REG message for it. + */ + if (found[i]) { + continue; + } + + add_reg[add_idx].region = reg; + add_reg[add_idx].reg_idx = i; + add_reg[add_idx++].fd_idx = fd_num; + } + *nr_rem_reg = rm_idx; + *nr_add_reg = add_idx; + + return; +} + +static int send_remove_regions(struct vhost_dev *dev, + struct scrub_regions *remove_reg, + int nr_rem_reg, VhostUserMsg *msg, + bool reply_supported) +{ + struct vhost_user *u = dev->opaque; + struct vhost_memory_region *shadow_reg; + int i, fd, shadow_reg_idx, ret; + ram_addr_t offset; + VhostUserMemoryRegion region_buffer; + + /* + * The regions in remove_reg appear in the same order they do in the + * shadow table. Therefore we can minimize memory copies by iterating + * through remove_reg backwards. + */ + for (i = nr_rem_reg - 1; i >= 0; i--) { + shadow_reg = remove_reg[i].region; + shadow_reg_idx = remove_reg[i].reg_idx; + + vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd); + + if (fd > 0) { + msg->hdr.request = VHOST_USER_REM_MEM_REG; + vhost_user_fill_msg_region(®ion_buffer, shadow_reg, 0); + msg->payload.mem_reg.region = region_buffer; + + if (vhost_user_write(dev, msg, &fd, 1) < 0) { + return -1; + } + + if (reply_supported) { + ret = process_message_reply(dev, msg); + if (ret) { + return ret; + } + } + } + + /* + * At this point we know the backend has unmapped the region. It is now + * safe to remove it from the shadow table. + */ + memmove(&u->shadow_regions[shadow_reg_idx], + &u->shadow_regions[shadow_reg_idx + 1], + sizeof(struct vhost_memory_region) * + (u->num_shadow_regions - shadow_reg_idx - 1)); + u->num_shadow_regions--; + } + + return 0; +} + +static int send_add_regions(struct vhost_dev *dev, + struct scrub_regions *add_reg, int nr_add_reg, + VhostUserMsg *msg, uint64_t *shadow_pcb, + bool reply_supported, bool track_ramblocks) +{ + struct vhost_user *u = dev->opaque; + int i, fd, ret, reg_idx, reg_fd_idx; + struct vhost_memory_region *reg; + MemoryRegion *mr; + ram_addr_t offset; + VhostUserMsg msg_reply; + VhostUserMemoryRegion region_buffer; + + for (i = 0; i < nr_add_reg; i++) { + reg = add_reg[i].region; + reg_idx = add_reg[i].reg_idx; + reg_fd_idx = add_reg[i].fd_idx; + + mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); + + if (fd > 0) { + if (track_ramblocks) { + trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name, + reg->memory_size, + reg->guest_phys_addr, + reg->userspace_addr, + offset); + u->region_rb_offset[reg_idx] = offset; + u->region_rb[reg_idx] = mr->ram_block; + } + msg->hdr.request = VHOST_USER_ADD_MEM_REG; + vhost_user_fill_msg_region(®ion_buffer, reg, offset); + msg->payload.mem_reg.region = region_buffer; + + if (vhost_user_write(dev, msg, &fd, 1) < 0) { + return -1; + } + + if (track_ramblocks) { + uint64_t reply_gpa; + + if (vhost_user_read(dev, &msg_reply) < 0) { + return -1; + } + + reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr; + + if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) { + error_report("%s: Received unexpected msg type." + "Expected %d received %d", __func__, + VHOST_USER_ADD_MEM_REG, + msg_reply.hdr.request); + return -1; + } + + /* + * We're using the same structure, just reusing one of the + * fields, so it should be the same size. + */ + if (msg_reply.hdr.size != msg->hdr.size) { + error_report("%s: Unexpected size for postcopy reply " + "%d vs %d", __func__, msg_reply.hdr.size, + msg->hdr.size); + return -1; + } + + /* Get the postcopy client base from the backend's reply. */ + if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) { + shadow_pcb[reg_idx] = + msg_reply.payload.mem_reg.region.userspace_addr; + trace_vhost_user_set_mem_table_postcopy( + msg_reply.payload.mem_reg.region.userspace_addr, + msg->payload.mem_reg.region.userspace_addr, + reg_fd_idx, reg_idx); + } else { + error_report("%s: invalid postcopy reply for region. " + "Got guest physical address %" PRIX64 ", expected " + "%" PRIX64, __func__, reply_gpa, + dev->mem->regions[reg_idx].guest_phys_addr); + return -1; + } + } else if (reply_supported) { + ret = process_message_reply(dev, msg); + if (ret) { + return ret; + } + } + } else if (track_ramblocks) { + u->region_rb_offset[reg_idx] = 0; + u->region_rb[reg_idx] = NULL; + } + + /* + * At this point, we know the backend has mapped in the new + * region, if the region has a valid file descriptor. + * + * The region should now be added to the shadow table. + */ + u->shadow_regions[u->num_shadow_regions].guest_phys_addr = + reg->guest_phys_addr; + u->shadow_regions[u->num_shadow_regions].userspace_addr = + reg->userspace_addr; + u->shadow_regions[u->num_shadow_regions].memory_size = + reg->memory_size; + u->num_shadow_regions++; + } + + return 0; +} + +static int vhost_user_add_remove_regions(struct vhost_dev *dev, + VhostUserMsg *msg, + bool reply_supported, + bool track_ramblocks) +{ + struct vhost_user *u = dev->opaque; + struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS]; + struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS]; + uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {}; + int nr_add_reg, nr_rem_reg; + + msg->hdr.size = sizeof(msg->payload.mem_reg); + + /* Find the regions which need to be removed or added. */ + scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg, + shadow_pcb, track_ramblocks); + + if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg, + reply_supported) < 0) + { + goto err; + } + + if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg, + shadow_pcb, reply_supported, track_ramblocks) < 0) + { + goto err; + } + + if (track_ramblocks) { + memcpy(u->postcopy_client_bases, shadow_pcb, + sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS); + /* + * Now we've registered this with the postcopy code, we ack to the + * client, because now we're in the position to be able to deal with + * any faults it generates. + */ + /* TODO: Use this for failure cases as well with a bad value. */ + msg->hdr.size = sizeof(msg->payload.u64); + msg->payload.u64 = 0; /* OK */ + + if (vhost_user_write(dev, msg, NULL, 0) < 0) { + return -1; + } + } + + return 0; + +err: + if (track_ramblocks) { + memcpy(u->postcopy_client_bases, shadow_pcb, + sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS); + } + + return -1; +} + +static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, + struct vhost_memory *mem, + bool reply_supported, + bool config_mem_slots) +{ + struct vhost_user *u = dev->opaque; + int fds[VHOST_MEMORY_BASELINE_NREGIONS]; + size_t fd_num = 0; + VhostUserMsg msg_reply; + int region_i, msg_i; + + VhostUserMsg msg = { + .hdr.flags = VHOST_USER_VERSION, + }; + + if (u->region_rb_len < dev->mem->nregions) { + u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions); + u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset, + dev->mem->nregions); + memset(&(u->region_rb[u->region_rb_len]), '\0', + sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len)); + memset(&(u->region_rb_offset[u->region_rb_len]), '\0', + sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len)); + u->region_rb_len = dev->mem->nregions; + } + + if (config_mem_slots) { + if (vhost_user_add_remove_regions(dev, &msg, reply_supported, + true) < 0) { + return -1; + } + } else { + if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, + true) < 0) { + return -1; + } + + if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { + return -1; + } + + if (vhost_user_read(dev, &msg_reply) < 0) { + return -1; + } + + if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) { + error_report("%s: Received unexpected msg type." + "Expected %d received %d", __func__, + VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request); + return -1; + } + + /* + * We're using the same structure, just reusing one of the + * fields, so it should be the same size. + */ + if (msg_reply.hdr.size != msg.hdr.size) { + error_report("%s: Unexpected size for postcopy reply " + "%d vs %d", __func__, msg_reply.hdr.size, + msg.hdr.size); + return -1; + } + + memset(u->postcopy_client_bases, 0, + sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS); + + /* + * They're in the same order as the regions that were sent + * but some of the regions were skipped (above) if they + * didn't have fd's + */ + for (msg_i = 0, region_i = 0; + region_i < dev->mem->nregions; + region_i++) { + if (msg_i < fd_num && + msg_reply.payload.memory.regions[msg_i].guest_phys_addr == + dev->mem->regions[region_i].guest_phys_addr) { + u->postcopy_client_bases[region_i] = + msg_reply.payload.memory.regions[msg_i].userspace_addr; + trace_vhost_user_set_mem_table_postcopy( + msg_reply.payload.memory.regions[msg_i].userspace_addr, + msg.payload.memory.regions[msg_i].userspace_addr, + msg_i, region_i); + msg_i++; + } + } + if (msg_i != fd_num) { + error_report("%s: postcopy reply not fully consumed " + "%d vs %zd", + __func__, msg_i, fd_num); + return -1; + } + + /* + * Now we've registered this with the postcopy code, we ack to the + * client, because now we're in the position to be able to deal + * with any faults it generates. + */ + /* TODO: Use this for failure cases as well with a bad value. */ + msg.hdr.size = sizeof(msg.payload.u64); + msg.payload.u64 = 0; /* OK */ + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + } + + return 0; +} + +static int vhost_user_set_mem_table(struct vhost_dev *dev, + struct vhost_memory *mem) +{ + struct vhost_user *u = dev->opaque; + int fds[VHOST_MEMORY_BASELINE_NREGIONS]; + size_t fd_num = 0; + bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler; + bool reply_supported = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_REPLY_ACK); + bool config_mem_slots = + virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS); + + if (do_postcopy) { + /* + * Postcopy has enough differences that it's best done in it's own + * version + */ + return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported, + config_mem_slots); + } + + VhostUserMsg msg = { + .hdr.flags = VHOST_USER_VERSION, + }; + + if (reply_supported) { + msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; + } + + if (config_mem_slots) { + if (vhost_user_add_remove_regions(dev, &msg, reply_supported, + false) < 0) { + return -1; + } + } else { + if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, + false) < 0) { + return -1; + } + if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { + return -1; + } + + if (reply_supported) { + return process_message_reply(dev, &msg); + } + } + + return 0; +} + +static int vhost_user_set_vring_endian(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + bool cross_endian = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_CROSS_ENDIAN); + VhostUserMsg msg = { + .hdr.request = VHOST_USER_SET_VRING_ENDIAN, + .hdr.flags = VHOST_USER_VERSION, + .payload.state = *ring, + .hdr.size = sizeof(msg.payload.state), + }; + + if (!cross_endian) { + error_report("vhost-user trying to send unhandled ioctl"); + return -1; + } + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + return 0; +} + +static int vhost_set_vring(struct vhost_dev *dev, + unsigned long int request, + struct vhost_vring_state *ring) +{ + VhostUserMsg msg = { + .hdr.request = request, + .hdr.flags = VHOST_USER_VERSION, + .payload.state = *ring, + .hdr.size = sizeof(msg.payload.state), + }; + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + return 0; +} + +static int vhost_user_set_vring_num(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring); +} + +static void vhost_user_host_notifier_restore(struct vhost_dev *dev, + int queue_idx) +{ + struct vhost_user *u = dev->opaque; + VhostUserHostNotifier *n = &u->user->notifier[queue_idx]; + VirtIODevice *vdev = dev->vdev; + + if (n->addr && !n->set) { + virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true); + n->set = true; + } +} + +static void vhost_user_host_notifier_remove(struct vhost_dev *dev, + int queue_idx) +{ + struct vhost_user *u = dev->opaque; + VhostUserHostNotifier *n = &u->user->notifier[queue_idx]; + VirtIODevice *vdev = dev->vdev; + + if (n->addr && n->set) { + virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); + n->set = false; + } +} + +static int vhost_user_set_vring_base(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + vhost_user_host_notifier_restore(dev, ring->index); + + return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring); +} + +static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable) +{ + int i; + + if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { + return -1; + } + + for (i = 0; i < dev->nvqs; ++i) { + struct vhost_vring_state state = { + .index = dev->vq_index + i, + .num = enable, + }; + + vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); + } + + return 0; +} + +static int vhost_user_get_vring_base(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_GET_VRING_BASE, + .hdr.flags = VHOST_USER_VERSION, + .payload.state = *ring, + .hdr.size = sizeof(msg.payload.state), + }; + + vhost_user_host_notifier_remove(dev, ring->index); + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + if (vhost_user_read(dev, &msg) < 0) { + return -1; + } + + if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) { + error_report("Received unexpected msg type. Expected %d received %d", + VHOST_USER_GET_VRING_BASE, msg.hdr.request); + return -1; + } + + if (msg.hdr.size != sizeof(msg.payload.state)) { + error_report("Received bad msg size."); + return -1; + } + + *ring = msg.payload.state; + + return 0; +} + +static int vhost_set_vring_file(struct vhost_dev *dev, + VhostUserRequest request, + struct vhost_vring_file *file) +{ + int fds[VHOST_USER_MAX_RAM_SLOTS]; + size_t fd_num = 0; + VhostUserMsg msg = { + .hdr.request = request, + .hdr.flags = VHOST_USER_VERSION, + .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK, + .hdr.size = sizeof(msg.payload.u64), + }; + + if (ioeventfd_enabled() && file->fd > 0) { + fds[fd_num++] = file->fd; + } else { + msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; + } + + if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { + return -1; + } + + return 0; +} + +static int vhost_user_set_vring_kick(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file); +} + +static int vhost_user_set_vring_call(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file); +} + + +static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64) +{ + VhostUserMsg msg = { + .hdr.request = request, + .hdr.flags = VHOST_USER_VERSION, + }; + + if (vhost_user_one_time_request(request) && dev->vq_index != 0) { + return 0; + } + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + if (vhost_user_read(dev, &msg) < 0) { + return -1; + } + + if (msg.hdr.request != request) { + error_report("Received unexpected msg type. Expected %d received %d", + request, msg.hdr.request); + return -1; + } + + if (msg.hdr.size != sizeof(msg.payload.u64)) { + error_report("Received bad msg size."); + return -1; + } + + *u64 = msg.payload.u64; + + return 0; +} + +static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features) +{ + if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) { + return -EPROTO; + } + + return 0; +} + +static int enforce_reply(struct vhost_dev *dev, + const VhostUserMsg *msg) +{ + uint64_t dummy; + + if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) { + return process_message_reply(dev, msg); + } + + /* + * We need to wait for a reply but the backend does not + * support replies for the command we just sent. + * Send VHOST_USER_GET_FEATURES which makes all backends + * send a reply. + */ + return vhost_user_get_features(dev, &dummy); +} + +static int vhost_user_set_vring_addr(struct vhost_dev *dev, + struct vhost_vring_addr *addr) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_SET_VRING_ADDR, + .hdr.flags = VHOST_USER_VERSION, + .payload.addr = *addr, + .hdr.size = sizeof(msg.payload.addr), + }; + + bool reply_supported = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_REPLY_ACK); + + /* + * wait for a reply if logging is enabled to make sure + * backend is actually logging changes + */ + bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG); + + if (reply_supported && wait_for_reply) { + msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; + } + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + if (wait_for_reply) { + return enforce_reply(dev, &msg); + } + + return 0; +} + +static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64, + bool wait_for_reply) +{ + VhostUserMsg msg = { + .hdr.request = request, + .hdr.flags = VHOST_USER_VERSION, + .payload.u64 = u64, + .hdr.size = sizeof(msg.payload.u64), + }; + + if (wait_for_reply) { + bool reply_supported = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_REPLY_ACK); + if (reply_supported) { + msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; + } + } + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + if (wait_for_reply) { + return enforce_reply(dev, &msg); + } + + return 0; +} + +static int vhost_user_set_features(struct vhost_dev *dev, + uint64_t features) +{ + /* + * wait for a reply if logging is enabled to make sure + * backend is actually logging changes + */ + bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL); + + return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features, + log_enabled); +} + +static int vhost_user_set_protocol_features(struct vhost_dev *dev, + uint64_t features) +{ + return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features, + false); +} + +static int vhost_user_set_owner(struct vhost_dev *dev) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_SET_OWNER, + .hdr.flags = VHOST_USER_VERSION, + }; + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -EPROTO; + } + + return 0; +} + +static int vhost_user_get_max_memslots(struct vhost_dev *dev, + uint64_t *max_memslots) +{ + uint64_t backend_max_memslots; + int err; + + err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS, + &backend_max_memslots); + if (err < 0) { + return err; + } + + *max_memslots = backend_max_memslots; + + return 0; +} + +static int vhost_user_reset_device(struct vhost_dev *dev) +{ + VhostUserMsg msg = { + .hdr.flags = VHOST_USER_VERSION, + }; + + msg.hdr.request = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_RESET_DEVICE) + ? VHOST_USER_RESET_DEVICE + : VHOST_USER_RESET_OWNER; + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + return 0; +} + +static int vhost_user_slave_handle_config_change(struct vhost_dev *dev) +{ + int ret = -1; + + if (!dev->config_ops) { + return -1; + } + + if (dev->config_ops->vhost_dev_config_notifier) { + ret = dev->config_ops->vhost_dev_config_notifier(dev); + } + + return ret; +} + +static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, + VhostUserVringArea *area, + int fd) +{ + int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK; + size_t page_size = qemu_real_host_page_size; + struct vhost_user *u = dev->opaque; + VhostUserState *user = u->user; + VirtIODevice *vdev = dev->vdev; + VhostUserHostNotifier *n; + void *addr; + char *name; + + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) || + vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) { + return -1; + } + + n = &user->notifier[queue_idx]; + + if (n->addr) { + virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); + object_unparent(OBJECT(&n->mr)); + munmap(n->addr, page_size); + n->addr = NULL; + } + + if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { + return 0; + } + + /* Sanity check. */ + if (area->size != page_size) { + return -1; + } + + addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, + fd, area->offset); + if (addr == MAP_FAILED) { + return -1; + } + + name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]", + user, queue_idx); + if (!n->mr.ram) /* Don't init again after suspend. */ + memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, + page_size, addr); + g_free(name); + + if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) { + object_unparent(OBJECT(&n->mr)); + munmap(addr, page_size); + return -1; + } + + n->addr = addr; + n->set = true; + + return 0; +} + +static void close_slave_channel(struct vhost_user *u) +{ + g_source_destroy(u->slave_src); + g_source_unref(u->slave_src); + u->slave_src = NULL; + object_unref(OBJECT(u->slave_ioc)); + u->slave_ioc = NULL; +} + +static gboolean slave_read(QIOChannel *ioc, GIOCondition condition, + gpointer opaque) +{ + struct vhost_dev *dev = opaque; + struct vhost_user *u = dev->opaque; + VhostUserHeader hdr = { 0, }; + VhostUserPayload payload = { 0, }; + Error *local_err = NULL; + gboolean rc = G_SOURCE_CONTINUE; + int ret = 0; + struct iovec iov; + g_autofree int *fd = NULL; + size_t fdsize = 0; + int i; + + /* Read header */ + iov.iov_base = &hdr; + iov.iov_len = VHOST_USER_HDR_SIZE; + + if (qio_channel_readv_full_all(ioc, &iov, 1, &fd, &fdsize, &local_err)) { + error_report_err(local_err); + goto err; + } + + if (hdr.size > VHOST_USER_PAYLOAD_SIZE) { + error_report("Failed to read msg header." + " Size %d exceeds the maximum %zu.", hdr.size, + VHOST_USER_PAYLOAD_SIZE); + goto err; + } + + /* Read payload */ + if (qio_channel_read_all(ioc, (char *) &payload, hdr.size, &local_err)) { + error_report_err(local_err); + goto err; + } + + switch (hdr.request) { + case VHOST_USER_SLAVE_IOTLB_MSG: + ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb); + break; + case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG : + ret = vhost_user_slave_handle_config_change(dev); + break; + case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG: + ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area, + fd ? fd[0] : -1); + break; + default: + error_report("Received unexpected msg type: %d.", hdr.request); + ret = -EINVAL; + } + + /* + * REPLY_ACK feature handling. Other reply types has to be managed + * directly in their request handlers. + */ + if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) { + struct iovec iovec[2]; + + + hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK; + hdr.flags |= VHOST_USER_REPLY_MASK; + + payload.u64 = !!ret; + hdr.size = sizeof(payload.u64); + + iovec[0].iov_base = &hdr; + iovec[0].iov_len = VHOST_USER_HDR_SIZE; + iovec[1].iov_base = &payload; + iovec[1].iov_len = hdr.size; + + if (qio_channel_writev_all(ioc, iovec, ARRAY_SIZE(iovec), &local_err)) { + error_report_err(local_err); + goto err; + } + } + + goto fdcleanup; + +err: + close_slave_channel(u); + rc = G_SOURCE_REMOVE; + +fdcleanup: + if (fd) { + for (i = 0; i < fdsize; i++) { + close(fd[i]); + } + } + return rc; +} + +static int vhost_setup_slave_channel(struct vhost_dev *dev) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD, + .hdr.flags = VHOST_USER_VERSION, + }; + struct vhost_user *u = dev->opaque; + int sv[2], ret = 0; + bool reply_supported = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_REPLY_ACK); + Error *local_err = NULL; + QIOChannel *ioc; + + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { + return 0; + } + + if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { + error_report("socketpair() failed"); + return -1; + } + + ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err)); + if (!ioc) { + error_report_err(local_err); + return -1; + } + u->slave_ioc = ioc; + slave_update_read_handler(dev, NULL); + + if (reply_supported) { + msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; + } + + ret = vhost_user_write(dev, &msg, &sv[1], 1); + if (ret) { + goto out; + } + + if (reply_supported) { + ret = process_message_reply(dev, &msg); + } + +out: + close(sv[1]); + if (ret) { + close_slave_channel(u); + } + + return ret; +} + +#ifdef CONFIG_LINUX +/* + * Called back from the postcopy fault thread when a fault is received on our + * ufd. + * TODO: This is Linux specific + */ +static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd, + void *ufd) +{ + struct vhost_dev *dev = pcfd->data; + struct vhost_user *u = dev->opaque; + struct uffd_msg *msg = ufd; + uint64_t faultaddr = msg->arg.pagefault.address; + RAMBlock *rb = NULL; + uint64_t rb_offset; + int i; + + trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr, + dev->mem->nregions); + for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) { + trace_vhost_user_postcopy_fault_handler_loop(i, + u->postcopy_client_bases[i], dev->mem->regions[i].memory_size); + if (faultaddr >= u->postcopy_client_bases[i]) { + /* Ofset of the fault address in the vhost region */ + uint64_t region_offset = faultaddr - u->postcopy_client_bases[i]; + if (region_offset < dev->mem->regions[i].memory_size) { + rb_offset = region_offset + u->region_rb_offset[i]; + trace_vhost_user_postcopy_fault_handler_found(i, + region_offset, rb_offset); + rb = u->region_rb[i]; + return postcopy_request_shared_page(pcfd, rb, faultaddr, + rb_offset); + } + } + } + error_report("%s: Failed to find region for fault %" PRIx64, + __func__, faultaddr); + return -1; +} + +static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb, + uint64_t offset) +{ + struct vhost_dev *dev = pcfd->data; + struct vhost_user *u = dev->opaque; + int i; + + trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset); + + if (!u) { + return 0; + } + /* Translate the offset into an address in the clients address space */ + for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) { + if (u->region_rb[i] == rb && + offset >= u->region_rb_offset[i] && + offset < (u->region_rb_offset[i] + + dev->mem->regions[i].memory_size)) { + uint64_t client_addr = (offset - u->region_rb_offset[i]) + + u->postcopy_client_bases[i]; + trace_vhost_user_postcopy_waker_found(client_addr); + return postcopy_wake_shared(pcfd, client_addr, rb); + } + } + + trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset); + return 0; +} +#endif + +/* + * Called at the start of an inbound postcopy on reception of the + * 'advise' command. + */ +static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp) +{ +#ifdef CONFIG_LINUX + struct vhost_user *u = dev->opaque; + CharBackend *chr = u->user->chr; + int ufd; + VhostUserMsg msg = { + .hdr.request = VHOST_USER_POSTCOPY_ADVISE, + .hdr.flags = VHOST_USER_VERSION, + }; + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + error_setg(errp, "Failed to send postcopy_advise to vhost"); + return -1; + } + + if (vhost_user_read(dev, &msg) < 0) { + error_setg(errp, "Failed to get postcopy_advise reply from vhost"); + return -1; + } + + if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) { + error_setg(errp, "Unexpected msg type. Expected %d received %d", + VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request); + return -1; + } + + if (msg.hdr.size) { + error_setg(errp, "Received bad msg size."); + return -1; + } + ufd = qemu_chr_fe_get_msgfd(chr); + if (ufd < 0) { + error_setg(errp, "%s: Failed to get ufd", __func__); + return -1; + } + qemu_set_nonblock(ufd); + + /* register ufd with userfault thread */ + u->postcopy_fd.fd = ufd; + u->postcopy_fd.data = dev; + u->postcopy_fd.handler = vhost_user_postcopy_fault_handler; + u->postcopy_fd.waker = vhost_user_postcopy_waker; + u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */ + postcopy_register_shared_ufd(&u->postcopy_fd); + return 0; +#else + error_setg(errp, "Postcopy not supported on non-Linux systems"); + return -1; +#endif +} + +/* + * Called at the switch to postcopy on reception of the 'listen' command. + */ +static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp) +{ + struct vhost_user *u = dev->opaque; + int ret; + VhostUserMsg msg = { + .hdr.request = VHOST_USER_POSTCOPY_LISTEN, + .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, + }; + u->postcopy_listen = true; + trace_vhost_user_postcopy_listen(); + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + error_setg(errp, "Failed to send postcopy_listen to vhost"); + return -1; + } + + ret = process_message_reply(dev, &msg); + if (ret) { + error_setg(errp, "Failed to receive reply to postcopy_listen"); + return ret; + } + + return 0; +} + +/* + * Called at the end of postcopy + */ +static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_POSTCOPY_END, + .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, + }; + int ret; + struct vhost_user *u = dev->opaque; + + trace_vhost_user_postcopy_end_entry(); + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + error_setg(errp, "Failed to send postcopy_end to vhost"); + return -1; + } + + ret = process_message_reply(dev, &msg); + if (ret) { + error_setg(errp, "Failed to receive reply to postcopy_end"); + return ret; + } + postcopy_unregister_shared_ufd(&u->postcopy_fd); + close(u->postcopy_fd.fd); + u->postcopy_fd.handler = NULL; + + trace_vhost_user_postcopy_end_exit(); + + return 0; +} + +static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier, + void *opaque) +{ + struct PostcopyNotifyData *pnd = opaque; + struct vhost_user *u = container_of(notifier, struct vhost_user, + postcopy_notifier); + struct vhost_dev *dev = u->dev; + + switch (pnd->reason) { + case POSTCOPY_NOTIFY_PROBE: + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_PAGEFAULT)) { + /* TODO: Get the device name into this error somehow */ + error_setg(pnd->errp, + "vhost-user backend not capable of postcopy"); + return -ENOENT; + } + break; + + case POSTCOPY_NOTIFY_INBOUND_ADVISE: + return vhost_user_postcopy_advise(dev, pnd->errp); + + case POSTCOPY_NOTIFY_INBOUND_LISTEN: + return vhost_user_postcopy_listen(dev, pnd->errp); + + case POSTCOPY_NOTIFY_INBOUND_END: + return vhost_user_postcopy_end(dev, pnd->errp); + + default: + /* We ignore notifications we don't know */ + break; + } + + return 0; +} + +static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, + Error **errp) +{ + uint64_t features, protocol_features, ram_slots; + struct vhost_user *u; + int err; + + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); + + u = g_new0(struct vhost_user, 1); + u->user = opaque; + u->dev = dev; + dev->opaque = u; + + err = vhost_user_get_features(dev, &features); + if (err < 0) { + error_setg_errno(errp, -err, "vhost_backend_init failed"); + return err; + } + + if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) { + dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; + + err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES, + &protocol_features); + if (err < 0) { + error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); + return -EPROTO; + } + + dev->protocol_features = + protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK; + + if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) { + /* Don't acknowledge CONFIG feature if device doesn't support it */ + dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG); + } else if (!(protocol_features & + (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) { + error_setg(errp, "Device expects VHOST_USER_PROTOCOL_F_CONFIG " + "but backend does not support it."); + return -EINVAL; + } + + err = vhost_user_set_protocol_features(dev, dev->protocol_features); + if (err < 0) { + error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); + return -EPROTO; + } + + /* query the max queues we support if backend supports Multiple Queue */ + if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) { + err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM, + &dev->max_queues); + if (err < 0) { + error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); + return -EPROTO; + } + } else { + dev->max_queues = 1; + } + + if (dev->num_queues && dev->max_queues < dev->num_queues) { + error_setg(errp, "The maximum number of queues supported by the " + "backend is %" PRIu64, dev->max_queues); + return -EINVAL; + } + + if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) && + !(virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_SLAVE_REQ) && + virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_REPLY_ACK))) { + error_setg(errp, "IOMMU support requires reply-ack and " + "slave-req protocol features."); + return -EINVAL; + } + + /* get max memory regions if backend supports configurable RAM slots */ + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) { + u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS; + } else { + err = vhost_user_get_max_memslots(dev, &ram_slots); + if (err < 0) { + error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); + return -EPROTO; + } + + if (ram_slots < u->user->memory_slots) { + error_setg(errp, "The backend specified a max ram slots limit " + "of %" PRIu64", when the prior validated limit was " + "%d. This limit should never decrease.", ram_slots, + u->user->memory_slots); + return -EINVAL; + } + + u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS); + } + } + + if (dev->migration_blocker == NULL && + !virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_LOG_SHMFD)) { + error_setg(&dev->migration_blocker, + "Migration disabled: vhost-user backend lacks " + "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature."); + } + + if (dev->vq_index == 0) { + err = vhost_setup_slave_channel(dev); + if (err < 0) { + error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); + return -EPROTO; + } + } + + u->postcopy_notifier.notify = vhost_user_postcopy_notifier; + postcopy_add_notifier(&u->postcopy_notifier); + + return 0; +} + +static int vhost_user_backend_cleanup(struct vhost_dev *dev) +{ + struct vhost_user *u; + + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); + + u = dev->opaque; + if (u->postcopy_notifier.notify) { + postcopy_remove_notifier(&u->postcopy_notifier); + u->postcopy_notifier.notify = NULL; + } + u->postcopy_listen = false; + if (u->postcopy_fd.handler) { + postcopy_unregister_shared_ufd(&u->postcopy_fd); + close(u->postcopy_fd.fd); + u->postcopy_fd.handler = NULL; + } + if (u->slave_ioc) { + close_slave_channel(u); + } + g_free(u->region_rb); + u->region_rb = NULL; + g_free(u->region_rb_offset); + u->region_rb_offset = NULL; + u->region_rb_len = 0; + g_free(u); + dev->opaque = 0; + + return 0; +} + +static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx) +{ + assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); + + return idx; +} + +static int vhost_user_memslots_limit(struct vhost_dev *dev) +{ + struct vhost_user *u = dev->opaque; + + return u->user->memory_slots; +} + +static bool vhost_user_requires_shm_log(struct vhost_dev *dev) +{ + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); + + return virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_LOG_SHMFD); +} + +static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr) +{ + VhostUserMsg msg = { }; + + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); + + /* If guest supports GUEST_ANNOUNCE do nothing */ + if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) { + return 0; + } + + /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */ + if (virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_RARP)) { + msg.hdr.request = VHOST_USER_SEND_RARP; + msg.hdr.flags = VHOST_USER_VERSION; + memcpy((char *)&msg.payload.u64, mac_addr, 6); + msg.hdr.size = sizeof(msg.payload.u64); + + return vhost_user_write(dev, &msg, NULL, 0); + } + return -1; +} + +static bool vhost_user_can_merge(struct vhost_dev *dev, + uint64_t start1, uint64_t size1, + uint64_t start2, uint64_t size2) +{ + ram_addr_t offset; + int mfd, rfd; + + (void)vhost_user_get_mr_data(start1, &offset, &mfd); + (void)vhost_user_get_mr_data(start2, &offset, &rfd); + + return mfd == rfd; +} + +static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) +{ + VhostUserMsg msg; + bool reply_supported = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_REPLY_ACK); + + if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) { + return 0; + } + + msg.hdr.request = VHOST_USER_NET_SET_MTU; + msg.payload.u64 = mtu; + msg.hdr.size = sizeof(msg.payload.u64); + msg.hdr.flags = VHOST_USER_VERSION; + if (reply_supported) { + msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; + } + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + /* If reply_ack supported, slave has to ack specified MTU is valid */ + if (reply_supported) { + return process_message_reply(dev, &msg); + } + + return 0; +} + +static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev, + struct vhost_iotlb_msg *imsg) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_IOTLB_MSG, + .hdr.size = sizeof(msg.payload.iotlb), + .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, + .payload.iotlb = *imsg, + }; + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -EFAULT; + } + + return process_message_reply(dev, &msg); +} + + +static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled) +{ + /* No-op as the receive channel is not dedicated to IOTLB messages. */ +} + +static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config, + uint32_t config_len, Error **errp) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_GET_CONFIG, + .hdr.flags = VHOST_USER_VERSION, + .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len, + }; + + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_CONFIG)) { + error_setg(errp, "VHOST_USER_PROTOCOL_F_CONFIG not supported"); + return -EINVAL; + } + + assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE); + + msg.payload.config.offset = 0; + msg.payload.config.size = config_len; + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + error_setg_errno(errp, EPROTO, "vhost_get_config failed"); + return -EPROTO; + } + + if (vhost_user_read(dev, &msg) < 0) { + error_setg_errno(errp, EPROTO, "vhost_get_config failed"); + return -EPROTO; + } + + if (msg.hdr.request != VHOST_USER_GET_CONFIG) { + error_setg(errp, + "Received unexpected msg type. Expected %d received %d", + VHOST_USER_GET_CONFIG, msg.hdr.request); + return -EINVAL; + } + + if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) { + error_setg(errp, "Received bad msg size."); + return -EINVAL; + } + + memcpy(config, msg.payload.config.region, config_len); + + return 0; +} + +static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, + uint32_t offset, uint32_t size, uint32_t flags) +{ + uint8_t *p; + bool reply_supported = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_REPLY_ACK); + + VhostUserMsg msg = { + .hdr.request = VHOST_USER_SET_CONFIG, + .hdr.flags = VHOST_USER_VERSION, + .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size, + }; + + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_CONFIG)) { + return -1; + } + + if (reply_supported) { + msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; + } + + if (size > VHOST_USER_MAX_CONFIG_SIZE) { + return -1; + } + + msg.payload.config.offset = offset, + msg.payload.config.size = size, + msg.payload.config.flags = flags, + p = msg.payload.config.region; + memcpy(p, data, size); + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + if (reply_supported) { + return process_message_reply(dev, &msg); + } + + return 0; +} + +static int vhost_user_crypto_create_session(struct vhost_dev *dev, + void *session_info, + uint64_t *session_id) +{ + bool crypto_session = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_CRYPTO_SESSION); + CryptoDevBackendSymSessionInfo *sess_info = session_info; + VhostUserMsg msg = { + .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION, + .hdr.flags = VHOST_USER_VERSION, + .hdr.size = sizeof(msg.payload.session), + }; + + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); + + if (!crypto_session) { + error_report("vhost-user trying to send unhandled ioctl"); + return -1; + } + + memcpy(&msg.payload.session.session_setup_data, sess_info, + sizeof(CryptoDevBackendSymSessionInfo)); + if (sess_info->key_len) { + memcpy(&msg.payload.session.key, sess_info->cipher_key, + sess_info->key_len); + } + if (sess_info->auth_key_len > 0) { + memcpy(&msg.payload.session.auth_key, sess_info->auth_key, + sess_info->auth_key_len); + } + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + error_report("vhost_user_write() return -1, create session failed"); + return -1; + } + + if (vhost_user_read(dev, &msg) < 0) { + error_report("vhost_user_read() return -1, create session failed"); + return -1; + } + + if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) { + error_report("Received unexpected msg type. Expected %d received %d", + VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request); + return -1; + } + + if (msg.hdr.size != sizeof(msg.payload.session)) { + error_report("Received bad msg size."); + return -1; + } + + if (msg.payload.session.session_id < 0) { + error_report("Bad session id: %" PRId64 "", + msg.payload.session.session_id); + return -1; + } + *session_id = msg.payload.session.session_id; + + return 0; +} + +static int +vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id) +{ + bool crypto_session = virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_CRYPTO_SESSION); + VhostUserMsg msg = { + .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION, + .hdr.flags = VHOST_USER_VERSION, + .hdr.size = sizeof(msg.payload.u64), + }; + msg.payload.u64 = session_id; + + if (!crypto_session) { + error_report("vhost-user trying to send unhandled ioctl"); + return -1; + } + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + error_report("vhost_user_write() return -1, close session failed"); + return -1; + } + + return 0; +} + +static bool vhost_user_mem_section_filter(struct vhost_dev *dev, + MemoryRegionSection *section) +{ + bool result; + + result = memory_region_get_fd(section->mr) >= 0; + + return result; +} + +static int vhost_user_get_inflight_fd(struct vhost_dev *dev, + uint16_t queue_size, + struct vhost_inflight *inflight) +{ + void *addr; + int fd; + struct vhost_user *u = dev->opaque; + CharBackend *chr = u->user->chr; + VhostUserMsg msg = { + .hdr.request = VHOST_USER_GET_INFLIGHT_FD, + .hdr.flags = VHOST_USER_VERSION, + .payload.inflight.num_queues = dev->nvqs, + .payload.inflight.queue_size = queue_size, + .hdr.size = sizeof(msg.payload.inflight), + }; + + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { + return 0; + } + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; + } + + if (vhost_user_read(dev, &msg) < 0) { + return -1; + } + + if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) { + error_report("Received unexpected msg type. " + "Expected %d received %d", + VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request); + return -1; + } + + if (msg.hdr.size != sizeof(msg.payload.inflight)) { + error_report("Received bad msg size."); + return -1; + } + + if (!msg.payload.inflight.mmap_size) { + return 0; + } + + fd = qemu_chr_fe_get_msgfd(chr); + if (fd < 0) { + error_report("Failed to get mem fd"); + return -1; + } + + addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, msg.payload.inflight.mmap_offset); + + if (addr == MAP_FAILED) { + error_report("Failed to mmap mem fd"); + close(fd); + return -1; + } + + inflight->addr = addr; + inflight->fd = fd; + inflight->size = msg.payload.inflight.mmap_size; + inflight->offset = msg.payload.inflight.mmap_offset; + inflight->queue_size = queue_size; + + return 0; +} + +static int vhost_user_set_inflight_fd(struct vhost_dev *dev, + struct vhost_inflight *inflight) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_SET_INFLIGHT_FD, + .hdr.flags = VHOST_USER_VERSION, + .payload.inflight.mmap_size = inflight->size, + .payload.inflight.mmap_offset = inflight->offset, + .payload.inflight.num_queues = dev->nvqs, + .payload.inflight.queue_size = inflight->queue_size, + .hdr.size = sizeof(msg.payload.inflight), + }; + + if (!virtio_has_feature(dev->protocol_features, + VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { + return 0; + } + + if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) { + return -1; + } + + return 0; +} + +bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) +{ + if (user->chr) { + error_setg(errp, "Cannot initialize vhost-user state"); + return false; + } + user->chr = chr; + user->memory_slots = 0; + return true; +} + +void vhost_user_cleanup(VhostUserState *user) +{ + int i; + + if (!user->chr) { + return; + } + memory_region_transaction_begin(); + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + if (user->notifier[i].addr) { + object_unparent(OBJECT(&user->notifier[i].mr)); + munmap(user->notifier[i].addr, qemu_real_host_page_size); + user->notifier[i].addr = NULL; + } + } + memory_region_transaction_commit(); + user->chr = NULL; +} + +const VhostOps user_ops = { + .backend_type = VHOST_BACKEND_TYPE_USER, + .vhost_backend_init = vhost_user_backend_init, + .vhost_backend_cleanup = vhost_user_backend_cleanup, + .vhost_backend_memslots_limit = vhost_user_memslots_limit, + .vhost_set_log_base = vhost_user_set_log_base, + .vhost_set_mem_table = vhost_user_set_mem_table, + .vhost_set_vring_addr = vhost_user_set_vring_addr, + .vhost_set_vring_endian = vhost_user_set_vring_endian, + .vhost_set_vring_num = vhost_user_set_vring_num, + .vhost_set_vring_base = vhost_user_set_vring_base, + .vhost_get_vring_base = vhost_user_get_vring_base, + .vhost_set_vring_kick = vhost_user_set_vring_kick, + .vhost_set_vring_call = vhost_user_set_vring_call, + .vhost_set_features = vhost_user_set_features, + .vhost_get_features = vhost_user_get_features, + .vhost_set_owner = vhost_user_set_owner, + .vhost_reset_device = vhost_user_reset_device, + .vhost_get_vq_index = vhost_user_get_vq_index, + .vhost_set_vring_enable = vhost_user_set_vring_enable, + .vhost_requires_shm_log = vhost_user_requires_shm_log, + .vhost_migration_done = vhost_user_migration_done, + .vhost_backend_can_merge = vhost_user_can_merge, + .vhost_net_set_mtu = vhost_user_net_set_mtu, + .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback, + .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg, + .vhost_get_config = vhost_user_get_config, + .vhost_set_config = vhost_user_set_config, + .vhost_crypto_create_session = vhost_user_crypto_create_session, + .vhost_crypto_close_session = vhost_user_crypto_close_session, + .vhost_backend_mem_section_filter = vhost_user_mem_section_filter, + .vhost_get_inflight_fd = vhost_user_get_inflight_fd, + .vhost_set_inflight_fd = vhost_user_set_inflight_fd, +}; diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c new file mode 100644 index 000000000..bcaf00e09 --- /dev/null +++ b/hw/virtio/vhost-vdpa.c @@ -0,0 +1,798 @@ +/* + * vhost-vdpa + * + * Copyright(c) 2017-2018 Intel Corporation. + * Copyright(c) 2020 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include +#include +#include +#include +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-backend.h" +#include "hw/virtio/virtio-net.h" +#include "hw/virtio/vhost-vdpa.h" +#include "exec/address-spaces.h" +#include "qemu/main-loop.h" +#include "cpu.h" +#include "trace.h" +#include "qemu-common.h" + +/* + * Return one past the end of the end of section. Be careful with uint64_t + * conversions! + */ +static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section) +{ + Int128 llend = int128_make64(section->offset_within_address_space); + llend = int128_add(llend, section->size); + llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); + + return llend; +} + +static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, + uint64_t iova_min, + uint64_t iova_max) +{ + Int128 llend; + + if ((!memory_region_is_ram(section->mr) && + !memory_region_is_iommu(section->mr)) || + memory_region_is_protected(section->mr) || + /* vhost-vDPA doesn't allow MMIO to be mapped */ + memory_region_is_ram_device(section->mr)) { + return true; + } + + if (section->offset_within_address_space < iova_min) { + error_report("RAM section out of device range (min=0x%" PRIx64 + ", addr=0x%" HWADDR_PRIx ")", + iova_min, section->offset_within_address_space); + return true; + } + + llend = vhost_vdpa_section_end(section); + if (int128_gt(llend, int128_make64(iova_max))) { + error_report("RAM section out of device range (max=0x%" PRIx64 + ", end addr=0x%" PRIx64 ")", + iova_max, int128_get64(llend)); + return true; + } + + return false; +} + +static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, + void *vaddr, bool readonly) +{ + struct vhost_msg_v2 msg = {}; + int fd = v->device_fd; + int ret = 0; + + msg.type = v->msg_type; + msg.iotlb.iova = iova; + msg.iotlb.size = size; + msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr; + msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW; + msg.iotlb.type = VHOST_IOTLB_UPDATE; + + trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size, + msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type); + + if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { + error_report("failed to write, fd=%d, errno=%d (%s)", + fd, errno, strerror(errno)); + return -EIO ; + } + + return ret; +} + +static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, + hwaddr size) +{ + struct vhost_msg_v2 msg = {}; + int fd = v->device_fd; + int ret = 0; + + msg.type = v->msg_type; + msg.iotlb.iova = iova; + msg.iotlb.size = size; + msg.iotlb.type = VHOST_IOTLB_INVALIDATE; + + trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova, + msg.iotlb.size, msg.iotlb.type); + + if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { + error_report("failed to write, fd=%d, errno=%d (%s)", + fd, errno, strerror(errno)); + return -EIO ; + } + + return ret; +} + +static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) +{ + int fd = v->device_fd; + struct vhost_msg_v2 msg = { + .type = v->msg_type, + .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, + }; + + if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { + error_report("failed to write, fd=%d, errno=%d (%s)", + fd, errno, strerror(errno)); + } +} + +static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v) +{ + if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && + !v->iotlb_batch_begin_sent) { + vhost_vdpa_listener_begin_batch(v); + } + + v->iotlb_batch_begin_sent = true; +} + +static void vhost_vdpa_listener_commit(MemoryListener *listener) +{ + struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + struct vhost_dev *dev = v->dev; + struct vhost_msg_v2 msg = {}; + int fd = v->device_fd; + + if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { + return; + } + + if (!v->iotlb_batch_begin_sent) { + return; + } + + msg.type = v->msg_type; + msg.iotlb.type = VHOST_IOTLB_BATCH_END; + + if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { + error_report("failed to write, fd=%d, errno=%d (%s)", + fd, errno, strerror(errno)); + } + + v->iotlb_batch_begin_sent = false; +} + +static void vhost_vdpa_listener_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + hwaddr iova; + Int128 llend, llsize; + void *vaddr; + int ret; + + if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, + v->iova_range.last)) { + return; + } + + if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != + (section->offset_within_region & ~TARGET_PAGE_MASK))) { + error_report("%s received unaligned region", __func__); + return; + } + + iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); + llend = vhost_vdpa_section_end(section); + if (int128_ge(int128_make64(iova), llend)) { + return; + } + + memory_region_ref(section->mr); + + /* Here we assume that memory_region_is_ram(section->mr)==true */ + + vaddr = memory_region_get_ram_ptr(section->mr) + + section->offset_within_region + + (iova - section->offset_within_address_space); + + trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend), + vaddr, section->readonly); + + llsize = int128_sub(llend, int128_make64(iova)); + + vhost_vdpa_iotlb_batch_begin_once(v); + ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize), + vaddr, section->readonly); + if (ret) { + error_report("vhost vdpa map fail!"); + goto fail; + } + + return; + +fail: + /* + * On the initfn path, store the first error in the container so we + * can gracefully fail. Runtime, there's not much we can do other + * than throw a hardware error. + */ + error_report("vhost-vdpa: DMA mapping failed, unable to continue"); + return; + +} + +static void vhost_vdpa_listener_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + hwaddr iova; + Int128 llend, llsize; + int ret; + + if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, + v->iova_range.last)) { + return; + } + + if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != + (section->offset_within_region & ~TARGET_PAGE_MASK))) { + error_report("%s received unaligned region", __func__); + return; + } + + iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); + llend = vhost_vdpa_section_end(section); + + trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend)); + + if (int128_ge(int128_make64(iova), llend)) { + return; + } + + llsize = int128_sub(llend, int128_make64(iova)); + + vhost_vdpa_iotlb_batch_begin_once(v); + ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize)); + if (ret) { + error_report("vhost_vdpa dma unmap error!"); + } + + memory_region_unref(section->mr); +} +/* + * IOTLB API is used by vhost-vpda which requires incremental updating + * of the mapping. So we can not use generic vhost memory listener which + * depends on the addnop(). + */ +static const MemoryListener vhost_vdpa_memory_listener = { + .name = "vhost-vdpa", + .commit = vhost_vdpa_listener_commit, + .region_add = vhost_vdpa_listener_region_add, + .region_del = vhost_vdpa_listener_region_del, +}; + +static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, + void *arg) +{ + struct vhost_vdpa *v = dev->opaque; + int fd = v->device_fd; + int ret; + + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); + + ret = ioctl(fd, request, arg); + return ret < 0 ? -errno : ret; +} + +static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) +{ + uint8_t s; + + trace_vhost_vdpa_add_status(dev, status); + if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) { + return; + } + + s |= status; + + vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); +} + +static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v) +{ + int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE, + &v->iova_range); + if (ret != 0) { + v->iova_range.first = 0; + v->iova_range.last = UINT64_MAX; + } + + trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first, + v->iova_range.last); +} + +static bool vhost_vdpa_one_time_request(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + + return v->index != 0; +} + +static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) +{ + struct vhost_vdpa *v; + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); + trace_vhost_vdpa_init(dev, opaque); + int ret; + + /* + * Similar to VFIO, we end up pinning all guest memory and have to + * disable discarding of RAM. + */ + ret = ram_block_discard_disable(true); + if (ret) { + error_report("Cannot set discarding of RAM broken"); + return ret; + } + + v = opaque; + v->dev = dev; + dev->opaque = opaque ; + v->listener = vhost_vdpa_memory_listener; + v->msg_type = VHOST_IOTLB_MSG_V2; + + vhost_vdpa_get_iova_range(v); + + if (vhost_vdpa_one_time_request(dev)) { + return 0; + } + + vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | + VIRTIO_CONFIG_S_DRIVER); + + return 0; +} + +static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, + int queue_index) +{ + size_t page_size = qemu_real_host_page_size; + struct vhost_vdpa *v = dev->opaque; + VirtIODevice *vdev = dev->vdev; + VhostVDPAHostNotifier *n; + + n = &v->notifier[queue_index]; + + if (n->addr) { + virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false); + object_unparent(OBJECT(&n->mr)); + munmap(n->addr, page_size); + n->addr = NULL; + } +} + +static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) +{ + int i; + + for (i = 0; i < n; i++) { + vhost_vdpa_host_notifier_uninit(dev, i); + } +} + +static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) +{ + size_t page_size = qemu_real_host_page_size; + struct vhost_vdpa *v = dev->opaque; + VirtIODevice *vdev = dev->vdev; + VhostVDPAHostNotifier *n; + int fd = v->device_fd; + void *addr; + char *name; + + vhost_vdpa_host_notifier_uninit(dev, queue_index); + + n = &v->notifier[queue_index]; + + addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, + queue_index * page_size); + if (addr == MAP_FAILED) { + goto err; + } + + name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]", + v, queue_index); + memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, + page_size, addr); + g_free(name); + + if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) { + munmap(addr, page_size); + goto err; + } + n->addr = addr; + + return 0; + +err: + return -1; +} + +static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) +{ + int i; + + for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { + if (vhost_vdpa_host_notifier_init(dev, i)) { + goto err; + } + } + + return; + +err: + vhost_vdpa_host_notifiers_uninit(dev, i); + return; +} + +static int vhost_vdpa_cleanup(struct vhost_dev *dev) +{ + struct vhost_vdpa *v; + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); + v = dev->opaque; + trace_vhost_vdpa_cleanup(dev, v); + vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); + memory_listener_unregister(&v->listener); + + dev->opaque = NULL; + ram_block_discard_disable(false); + + return 0; +} + +static int vhost_vdpa_memslots_limit(struct vhost_dev *dev) +{ + trace_vhost_vdpa_memslots_limit(dev, INT_MAX); + return INT_MAX; +} + +static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, + struct vhost_memory *mem) +{ + if (vhost_vdpa_one_time_request(dev)) { + return 0; + } + + trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding); + if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) && + trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) { + int i; + for (i = 0; i < mem->nregions; i++) { + trace_vhost_vdpa_dump_regions(dev, i, + mem->regions[i].guest_phys_addr, + mem->regions[i].memory_size, + mem->regions[i].userspace_addr, + mem->regions[i].flags_padding); + } + } + if (mem->padding) { + return -1; + } + + return 0; +} + +static int vhost_vdpa_set_features(struct vhost_dev *dev, + uint64_t features) +{ + int ret; + + if (vhost_vdpa_one_time_request(dev)) { + return 0; + } + + trace_vhost_vdpa_set_features(dev, features); + ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features); + uint8_t status = 0; + if (ret) { + return ret; + } + vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); + vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status); + + return !(status & VIRTIO_CONFIG_S_FEATURES_OK); +} + +static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) +{ + uint64_t features; + uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | + 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH; + int r; + + if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { + return -EFAULT; + } + + features &= f; + + if (vhost_vdpa_one_time_request(dev)) { + r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); + if (r) { + return -EFAULT; + } + } + + dev->backend_cap = features; + + return 0; +} + +static int vhost_vdpa_get_device_id(struct vhost_dev *dev, + uint32_t *device_id) +{ + int ret; + ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id); + trace_vhost_vdpa_get_device_id(dev, *device_id); + return ret; +} + +static int vhost_vdpa_reset_device(struct vhost_dev *dev) +{ + int ret; + uint8_t status = 0; + + ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); + trace_vhost_vdpa_reset_device(dev, status); + return ret; +} + +static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) +{ + assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); + + trace_vhost_vdpa_get_vq_index(dev, idx, idx); + return idx; +} + +static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev) +{ + int i; + trace_vhost_vdpa_set_vring_ready(dev); + for (i = 0; i < dev->nvqs; ++i) { + struct vhost_vring_state state = { + .index = dev->vq_index + i, + .num = 1, + }; + vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state); + } + return 0; +} + +static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config, + uint32_t config_len) +{ + int b, len; + char line[QEMU_HEXDUMP_LINE_LEN]; + + for (b = 0; b < config_len; b += 16) { + len = config_len - b; + qemu_hexdump_line(line, b, config, len, false); + trace_vhost_vdpa_dump_config(dev, line); + } +} + +static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data, + uint32_t offset, uint32_t size, + uint32_t flags) +{ + struct vhost_vdpa_config *config; + int ret; + unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); + + trace_vhost_vdpa_set_config(dev, offset, size, flags); + config = g_malloc(size + config_size); + config->off = offset; + config->len = size; + memcpy(config->buf, data, size); + if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) && + trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { + vhost_vdpa_dump_config(dev, data, size); + } + ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config); + g_free(config); + return ret; +} + +static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, + uint32_t config_len, Error **errp) +{ + struct vhost_vdpa_config *v_config; + unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); + int ret; + + trace_vhost_vdpa_get_config(dev, config, config_len); + v_config = g_malloc(config_len + config_size); + v_config->len = config_len; + v_config->off = 0; + ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config); + memcpy(config, v_config->buf, config_len); + g_free(v_config); + if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) && + trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { + vhost_vdpa_dump_config(dev, config, config_len); + } + return ret; + } + +static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) +{ + struct vhost_vdpa *v = dev->opaque; + trace_vhost_vdpa_dev_start(dev, started); + + if (started) { + vhost_vdpa_host_notifiers_init(dev); + vhost_vdpa_set_vring_ready(dev); + } else { + vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); + } + + if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + return 0; + } + + if (started) { + uint8_t status = 0; + memory_listener_register(&v->listener, &address_space_memory); + vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); + vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status); + + return !(status & VIRTIO_CONFIG_S_DRIVER_OK); + } else { + vhost_vdpa_reset_device(dev); + vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | + VIRTIO_CONFIG_S_DRIVER); + memory_listener_unregister(&v->listener); + + return 0; + } +} + +static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, + struct vhost_log *log) +{ + if (vhost_vdpa_one_time_request(dev)) { + return 0; + } + + trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd, + log->log); + return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base); +} + +static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev, + struct vhost_vring_addr *addr) +{ + trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, + addr->desc_user_addr, addr->used_user_addr, + addr->avail_user_addr, + addr->log_guest_addr); + return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); +} + +static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num); + return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring); +} + +static int vhost_vdpa_set_vring_base(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); + return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); +} + +static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + int ret; + + ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); + trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); + return ret; +} + +static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); + return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); +} + +static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); + return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); +} + +static int vhost_vdpa_get_features(struct vhost_dev *dev, + uint64_t *features) +{ + int ret; + + ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); + trace_vhost_vdpa_get_features(dev, *features); + return ret; +} + +static int vhost_vdpa_set_owner(struct vhost_dev *dev) +{ + if (vhost_vdpa_one_time_request(dev)) { + return 0; + } + + trace_vhost_vdpa_set_owner(dev); + return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); +} + +static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev, + struct vhost_vring_addr *addr, struct vhost_virtqueue *vq) +{ + assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); + addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys; + addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys; + addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys; + trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr, + addr->avail_user_addr, addr->used_user_addr); + return 0; +} + +static bool vhost_vdpa_force_iommu(struct vhost_dev *dev) +{ + return true; +} + +const VhostOps vdpa_ops = { + .backend_type = VHOST_BACKEND_TYPE_VDPA, + .vhost_backend_init = vhost_vdpa_init, + .vhost_backend_cleanup = vhost_vdpa_cleanup, + .vhost_set_log_base = vhost_vdpa_set_log_base, + .vhost_set_vring_addr = vhost_vdpa_set_vring_addr, + .vhost_set_vring_num = vhost_vdpa_set_vring_num, + .vhost_set_vring_base = vhost_vdpa_set_vring_base, + .vhost_get_vring_base = vhost_vdpa_get_vring_base, + .vhost_set_vring_kick = vhost_vdpa_set_vring_kick, + .vhost_set_vring_call = vhost_vdpa_set_vring_call, + .vhost_get_features = vhost_vdpa_get_features, + .vhost_set_backend_cap = vhost_vdpa_set_backend_cap, + .vhost_set_owner = vhost_vdpa_set_owner, + .vhost_set_vring_endian = NULL, + .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit, + .vhost_set_mem_table = vhost_vdpa_set_mem_table, + .vhost_set_features = vhost_vdpa_set_features, + .vhost_reset_device = vhost_vdpa_reset_device, + .vhost_get_vq_index = vhost_vdpa_get_vq_index, + .vhost_get_config = vhost_vdpa_get_config, + .vhost_set_config = vhost_vdpa_set_config, + .vhost_requires_shm_log = NULL, + .vhost_migration_done = NULL, + .vhost_backend_can_merge = NULL, + .vhost_net_set_mtu = NULL, + .vhost_set_iotlb_callback = NULL, + .vhost_send_device_iotlb_msg = NULL, + .vhost_dev_start = vhost_vdpa_dev_start, + .vhost_get_device_id = vhost_vdpa_get_device_id, + .vhost_vq_get_addr = vhost_vdpa_vq_get_addr, + .vhost_force_iommu = vhost_vdpa_force_iommu, +}; diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c new file mode 100644 index 000000000..3f3771274 --- /dev/null +++ b/hw/virtio/vhost-vsock-common.c @@ -0,0 +1,288 @@ +/* + * Parent class for vhost-vsock devices + * + * Copyright 2015-2020 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "standard-headers/linux/virtio_vsock.h" +#include "qapi/error.h" +#include "hw/virtio/virtio-access.h" +#include "qemu/error-report.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-vsock.h" +#include "qemu/iov.h" +#include "monitor/monitor.h" + +const int feature_bits[] = { + VIRTIO_VSOCK_F_SEQPACKET, + VHOST_INVALID_FEATURE_BIT +}; + +uint64_t vhost_vsock_common_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + + if (vvc->seqpacket != ON_OFF_AUTO_OFF) { + virtio_add_feature(&features, VIRTIO_VSOCK_F_SEQPACKET); + } + + features = vhost_get_features(&vvc->vhost_dev, feature_bits, features); + + if (vvc->seqpacket == ON_OFF_AUTO_ON && + !virtio_has_feature(features, VIRTIO_VSOCK_F_SEQPACKET)) { + error_setg(errp, "vhost-vsock backend doesn't support seqpacket"); + } + + return features; +} + +int vhost_vsock_common_start(VirtIODevice *vdev) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + int i; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return -ENOSYS; + } + + ret = vhost_dev_enable_notifiers(&vvc->vhost_dev, vdev); + if (ret < 0) { + error_report("Error enabling host notifiers: %d", -ret); + return ret; + } + + ret = k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier: %d", -ret); + goto err_host_notifiers; + } + + vvc->vhost_dev.acked_features = vdev->guest_features; + ret = vhost_dev_start(&vvc->vhost_dev, vdev); + if (ret < 0) { + error_report("Error starting vhost: %d", -ret); + goto err_guest_notifiers; + } + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < vvc->vhost_dev.nvqs; i++) { + vhost_virtqueue_mask(&vvc->vhost_dev, vdev, i, false); + } + + return 0; + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&vvc->vhost_dev, vdev); + return ret; +} + +void vhost_vsock_common_stop(VirtIODevice *vdev) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(&vvc->vhost_dev, vdev); + + ret = k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(&vvc->vhost_dev, vdev); +} + + +static void vhost_vsock_common_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* Do nothing */ +} + +static void vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx, + bool mask) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + + vhost_virtqueue_mask(&vvc->vhost_dev, vdev, idx, mask); +} + +static bool vhost_vsock_common_guest_notifier_pending(VirtIODevice *vdev, + int idx) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + + return vhost_virtqueue_pending(&vvc->vhost_dev, idx); +} + +static void vhost_vsock_common_send_transport_reset(VHostVSockCommon *vvc) +{ + VirtQueueElement *elem; + VirtQueue *vq = vvc->event_vq; + struct virtio_vsock_event event = { + .id = cpu_to_le32(VIRTIO_VSOCK_EVENT_TRANSPORT_RESET), + }; + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + error_report("vhost-vsock missed transport reset event"); + return; + } + + if (elem->out_num) { + error_report("invalid vhost-vsock event virtqueue element with " + "out buffers"); + goto out; + } + + if (iov_from_buf(elem->in_sg, elem->in_num, 0, + &event, sizeof(event)) != sizeof(event)) { + error_report("vhost-vsock event virtqueue element is too short"); + goto out; + } + + virtqueue_push(vq, elem, sizeof(event)); + virtio_notify(VIRTIO_DEVICE(vvc), vq); + +out: + g_free(elem); +} + +static void vhost_vsock_common_post_load_timer_cleanup(VHostVSockCommon *vvc) +{ + if (!vvc->post_load_timer) { + return; + } + + timer_free(vvc->post_load_timer); + vvc->post_load_timer = NULL; +} + +static void vhost_vsock_common_post_load_timer_cb(void *opaque) +{ + VHostVSockCommon *vvc = opaque; + + vhost_vsock_common_post_load_timer_cleanup(vvc); + vhost_vsock_common_send_transport_reset(vvc); +} + +int vhost_vsock_common_pre_save(void *opaque) +{ + VHostVSockCommon *vvc = opaque; + + /* + * At this point, backend must be stopped, otherwise + * it might keep writing to memory. + */ + assert(!vvc->vhost_dev.started); + + return 0; +} + +int vhost_vsock_common_post_load(void *opaque, int version_id) +{ + VHostVSockCommon *vvc = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(vvc); + + if (virtio_queue_get_addr(vdev, 2)) { + /* + * Defer transport reset event to a vm clock timer so that virtqueue + * changes happen after migration has completed. + */ + assert(!vvc->post_load_timer); + vvc->post_load_timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, + vhost_vsock_common_post_load_timer_cb, + vvc); + timer_mod(vvc->post_load_timer, 1); + } + return 0; +} + +void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + + virtio_init(vdev, name, VIRTIO_ID_VSOCK, + sizeof(struct virtio_vsock_config)); + + /* Receive and transmit queues belong to vhost */ + vvc->recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE, + vhost_vsock_common_handle_output); + vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE, + vhost_vsock_common_handle_output); + + /* The event queue belongs to QEMU */ + vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE, + vhost_vsock_common_handle_output); + + vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs); + vvc->vhost_dev.vqs = vvc->vhost_vqs; + + vvc->post_load_timer = NULL; +} + +void vhost_vsock_common_unrealize(VirtIODevice *vdev) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + + vhost_vsock_common_post_load_timer_cleanup(vvc); + + virtio_delete_queue(vvc->recv_vq); + virtio_delete_queue(vvc->trans_vq); + virtio_delete_queue(vvc->event_vq); + virtio_cleanup(vdev); +} + +static Property vhost_vsock_common_properties[] = { + DEFINE_PROP_ON_OFF_AUTO("seqpacket", VHostVSockCommon, seqpacket, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_vsock_common_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vhost_vsock_common_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + vdc->guest_notifier_mask = vhost_vsock_common_guest_notifier_mask; + vdc->guest_notifier_pending = vhost_vsock_common_guest_notifier_pending; +} + +static const TypeInfo vhost_vsock_common_info = { + .name = TYPE_VHOST_VSOCK_COMMON, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostVSockCommon), + .class_init = vhost_vsock_common_class_init, + .abstract = true, +}; + +static void vhost_vsock_common_register_types(void) +{ + type_register_static(&vhost_vsock_common_info); +} + +type_init(vhost_vsock_common_register_types) diff --git a/hw/virtio/vhost-vsock-pci.c b/hw/virtio/vhost-vsock-pci.c new file mode 100644 index 000000000..205da8d1f --- /dev/null +++ b/hw/virtio/vhost-vsock-pci.c @@ -0,0 +1,96 @@ +/* + * Vhost vsock PCI Bindings + * + * Copyright 2015 Red Hat, Inc. + * + * Authors: + * Stefan Hajnoczi + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" + +#include "virtio-pci.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-vsock.h" +#include "qemu/module.h" +#include "qom/object.h" + +typedef struct VHostVSockPCI VHostVSockPCI; + +/* + * vhost-vsock-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VHOST_VSOCK_PCI "vhost-vsock-pci-base" +DECLARE_INSTANCE_CHECKER(VHostVSockPCI, VHOST_VSOCK_PCI, + TYPE_VHOST_VSOCK_PCI) + +struct VHostVSockPCI { + VirtIOPCIProxy parent_obj; + VHostVSock vdev; +}; + +/* vhost-vsock-pci */ + +static Property vhost_vsock_pci_properties[] = { + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostVSockPCI *dev = VHOST_VSOCK_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + VirtIODevice *virtio_dev = VIRTIO_DEVICE(vdev); + + /* + * To avoid migration issues, we force virtio version 1 only when + * legacy check is enabled in the new machine types (>= 5.1). + */ + if (!virtio_legacy_check_disabled(virtio_dev)) { + virtio_pci_force_virtio_1(vpci_dev); + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_vsock_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_vsock_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, vhost_vsock_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_VSOCK; + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER; +} + +static void vhost_vsock_pci_instance_init(Object *obj) +{ + VHostVSockPCI *dev = VHOST_VSOCK_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_VSOCK); +} + +static const VirtioPCIDeviceTypeInfo vhost_vsock_pci_info = { + .base_name = TYPE_VHOST_VSOCK_PCI, + .generic_name = "vhost-vsock-pci", + .non_transitional_name = "vhost-vsock-pci-non-transitional", + .instance_size = sizeof(VHostVSockPCI), + .instance_init = vhost_vsock_pci_instance_init, + .class_init = vhost_vsock_pci_class_init, +}; + +static void virtio_pci_vhost_register(void) +{ + virtio_pci_types_register(&vhost_vsock_pci_info); +} + +type_init(virtio_pci_vhost_register) diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c new file mode 100644 index 000000000..478c0c9a8 --- /dev/null +++ b/hw/virtio/vhost-vsock.c @@ -0,0 +1,241 @@ +/* + * Virtio vsock device + * + * Copyright 2015 Red Hat, Inc. + * + * Authors: + * Stefan Hajnoczi + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "standard-headers/linux/virtio_vsock.h" +#include "qapi/error.h" +#include "hw/virtio/virtio-access.h" +#include "qemu/error-report.h" +#include "qemu/sockets.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-vsock.h" +#include "monitor/monitor.h" + +static void vhost_vsock_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VHostVSock *vsock = VHOST_VSOCK(vdev); + struct virtio_vsock_config vsockcfg = {}; + + virtio_stq_p(vdev, &vsockcfg.guest_cid, vsock->conf.guest_cid); + memcpy(config, &vsockcfg, sizeof(vsockcfg)); +} + +static int vhost_vsock_set_guest_cid(VirtIODevice *vdev) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + VHostVSock *vsock = VHOST_VSOCK(vdev); + const VhostOps *vhost_ops = vvc->vhost_dev.vhost_ops; + int ret; + + if (!vhost_ops->vhost_vsock_set_guest_cid) { + return -ENOSYS; + } + + ret = vhost_ops->vhost_vsock_set_guest_cid(&vvc->vhost_dev, + vsock->conf.guest_cid); + if (ret < 0) { + return -errno; + } + return 0; +} + +static int vhost_vsock_set_running(VirtIODevice *vdev, int start) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + const VhostOps *vhost_ops = vvc->vhost_dev.vhost_ops; + int ret; + + if (!vhost_ops->vhost_vsock_set_running) { + return -ENOSYS; + } + + ret = vhost_ops->vhost_vsock_set_running(&vvc->vhost_dev, start); + if (ret < 0) { + return -errno; + } + return 0; +} + + +static void vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + int ret; + + if (!vdev->vm_running) { + should_start = false; + } + + if (vvc->vhost_dev.started == should_start) { + return; + } + + if (should_start) { + ret = vhost_vsock_common_start(vdev); + if (ret < 0) { + return; + } + + ret = vhost_vsock_set_running(vdev, 1); + if (ret < 0) { + vhost_vsock_common_stop(vdev); + error_report("Error starting vhost vsock: %d", -ret); + return; + } + } else { + ret = vhost_vsock_set_running(vdev, 0); + if (ret < 0) { + error_report("vhost vsock set running failed: %d", ret); + return; + } + + vhost_vsock_common_stop(vdev); + } +} + +static uint64_t vhost_vsock_get_features(VirtIODevice *vdev, + uint64_t requested_features, + Error **errp) +{ + return vhost_vsock_common_get_features(vdev, requested_features, errp); +} + +static const VMStateDescription vmstate_virtio_vhost_vsock = { + .name = "virtio-vhost_vsock", + .minimum_version_id = VHOST_VSOCK_SAVEVM_VERSION, + .version_id = VHOST_VSOCK_SAVEVM_VERSION, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, + .pre_save = vhost_vsock_common_pre_save, + .post_load = vhost_vsock_common_post_load, +}; + +static void vhost_vsock_device_realize(DeviceState *dev, Error **errp) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(dev); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostVSock *vsock = VHOST_VSOCK(dev); + int vhostfd; + int ret; + + /* Refuse to use reserved CID numbers */ + if (vsock->conf.guest_cid <= 2) { + error_setg(errp, "guest-cid property must be greater than 2"); + return; + } + + if (vsock->conf.guest_cid > UINT32_MAX) { + error_setg(errp, "guest-cid property must be a 32-bit number"); + return; + } + + if (vsock->conf.vhostfd) { + vhostfd = monitor_fd_param(monitor_cur(), vsock->conf.vhostfd, errp); + if (vhostfd == -1) { + error_prepend(errp, "vhost-vsock: unable to parse vhostfd: "); + return; + } + + ret = qemu_try_set_nonblock(vhostfd); + if (ret < 0) { + error_setg_errno(errp, -ret, + "vhost-vsock: unable to set non-blocking mode"); + return; + } + } else { + vhostfd = open("/dev/vhost-vsock", O_RDWR); + if (vhostfd < 0) { + error_setg_errno(errp, errno, + "vhost-vsock: failed to open vhost device"); + return; + } + + qemu_set_nonblock(vhostfd); + } + + vhost_vsock_common_realize(vdev, "vhost-vsock"); + + ret = vhost_dev_init(&vvc->vhost_dev, (void *)(uintptr_t)vhostfd, + VHOST_BACKEND_TYPE_KERNEL, 0, errp); + if (ret < 0) { + goto err_virtio; + } + + ret = vhost_vsock_set_guest_cid(vdev); + if (ret < 0) { + error_setg_errno(errp, -ret, "vhost-vsock: unable to set guest cid"); + goto err_vhost_dev; + } + + return; + +err_vhost_dev: + vhost_dev_cleanup(&vvc->vhost_dev); + /* vhost_dev_cleanup() closes the vhostfd passed to vhost_dev_init() */ + vhostfd = -1; +err_virtio: + vhost_vsock_common_unrealize(vdev); + if (vhostfd >= 0) { + close(vhostfd); + } + return; +} + +static void vhost_vsock_device_unrealize(DeviceState *dev) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(dev); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + + /* This will stop vhost backend if appropriate. */ + vhost_vsock_set_status(vdev, 0); + + vhost_dev_cleanup(&vvc->vhost_dev); + vhost_vsock_common_unrealize(vdev); +} + +static Property vhost_vsock_properties[] = { + DEFINE_PROP_UINT64("guest-cid", VHostVSock, conf.guest_cid, 0), + DEFINE_PROP_STRING("vhostfd", VHostVSock, conf.vhostfd), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_vsock_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vhost_vsock_properties); + dc->vmsd = &vmstate_virtio_vhost_vsock; + vdc->realize = vhost_vsock_device_realize; + vdc->unrealize = vhost_vsock_device_unrealize; + vdc->get_features = vhost_vsock_get_features; + vdc->get_config = vhost_vsock_get_config; + vdc->set_status = vhost_vsock_set_status; +} + +static const TypeInfo vhost_vsock_info = { + .name = TYPE_VHOST_VSOCK, + .parent = TYPE_VHOST_VSOCK_COMMON, + .instance_size = sizeof(VHostVSock), + .class_init = vhost_vsock_class_init, +}; + +static void vhost_vsock_register_types(void) +{ + type_register_static(&vhost_vsock_info); +} + +type_init(vhost_vsock_register_types) diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c new file mode 100644 index 000000000..437347ad0 --- /dev/null +++ b/hw/virtio/vhost.c @@ -0,0 +1,1864 @@ +/* + * vhost support + * + * Copyright Red Hat, Inc. 2010 + * + * Authors: + * Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/virtio/vhost.h" +#include "qemu/atomic.h" +#include "qemu/range.h" +#include "qemu/error-report.h" +#include "qemu/memfd.h" +#include "standard-headers/linux/vhost_types.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" +#include "migration/blocker.h" +#include "migration/qemu-file-types.h" +#include "sysemu/dma.h" +#include "sysemu/tcg.h" +#include "trace.h" + +/* enabled until disconnected backend stabilizes */ +#define _VHOST_DEBUG 1 + +#ifdef _VHOST_DEBUG +#define VHOST_OPS_DEBUG(fmt, ...) \ + do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ + strerror(errno), errno); } while (0) +#else +#define VHOST_OPS_DEBUG(fmt, ...) \ + do { } while (0) +#endif + +static struct vhost_log *vhost_log; +static struct vhost_log *vhost_log_shm; + +static unsigned int used_memslots; +static QLIST_HEAD(, vhost_dev) vhost_devices = + QLIST_HEAD_INITIALIZER(vhost_devices); + +bool vhost_has_free_slot(void) +{ + unsigned int slots_limit = ~0U; + struct vhost_dev *hdev; + + QLIST_FOREACH(hdev, &vhost_devices, entry) { + unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); + slots_limit = MIN(slots_limit, r); + } + return slots_limit > used_memslots; +} + +static void vhost_dev_sync_region(struct vhost_dev *dev, + MemoryRegionSection *section, + uint64_t mfirst, uint64_t mlast, + uint64_t rfirst, uint64_t rlast) +{ + vhost_log_chunk_t *log = dev->log->log; + + uint64_t start = MAX(mfirst, rfirst); + uint64_t end = MIN(mlast, rlast); + vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK; + vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1; + uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK); + + if (end < start) { + return; + } + assert(end / VHOST_LOG_CHUNK < dev->log_size); + assert(start / VHOST_LOG_CHUNK < dev->log_size); + + for (;from < to; ++from) { + vhost_log_chunk_t log; + /* We first check with non-atomic: much cheaper, + * and we expect non-dirty to be the common case. */ + if (!*from) { + addr += VHOST_LOG_CHUNK; + continue; + } + /* Data must be read atomically. We don't really need barrier semantics + * but it's easier to use atomic_* than roll our own. */ + log = qatomic_xchg(from, 0); + while (log) { + int bit = ctzl(log); + hwaddr page_addr; + hwaddr section_offset; + hwaddr mr_offset; + page_addr = addr + bit * VHOST_LOG_PAGE; + section_offset = page_addr - section->offset_within_address_space; + mr_offset = section_offset + section->offset_within_region; + memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); + log &= ~(0x1ull << bit); + } + addr += VHOST_LOG_CHUNK; + } +} + +static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, + MemoryRegionSection *section, + hwaddr first, + hwaddr last) +{ + int i; + hwaddr start_addr; + hwaddr end_addr; + + if (!dev->log_enabled || !dev->started) { + return 0; + } + start_addr = section->offset_within_address_space; + end_addr = range_get_last(start_addr, int128_get64(section->size)); + start_addr = MAX(first, start_addr); + end_addr = MIN(last, end_addr); + + for (i = 0; i < dev->mem->nregions; ++i) { + struct vhost_memory_region *reg = dev->mem->regions + i; + vhost_dev_sync_region(dev, section, start_addr, end_addr, + reg->guest_phys_addr, + range_get_last(reg->guest_phys_addr, + reg->memory_size)); + } + for (i = 0; i < dev->nvqs; ++i) { + struct vhost_virtqueue *vq = dev->vqs + i; + + if (!vq->used_phys && !vq->used_size) { + continue; + } + + vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, + range_get_last(vq->used_phys, vq->used_size)); + } + return 0; +} + +static void vhost_log_sync(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_dev *dev = container_of(listener, struct vhost_dev, + memory_listener); + vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); +} + +static void vhost_log_sync_range(struct vhost_dev *dev, + hwaddr first, hwaddr last) +{ + int i; + /* FIXME: this is N^2 in number of sections */ + for (i = 0; i < dev->n_mem_sections; ++i) { + MemoryRegionSection *section = &dev->mem_sections[i]; + vhost_sync_dirty_bitmap(dev, section, first, last); + } +} + +static uint64_t vhost_get_log_size(struct vhost_dev *dev) +{ + uint64_t log_size = 0; + int i; + for (i = 0; i < dev->mem->nregions; ++i) { + struct vhost_memory_region *reg = dev->mem->regions + i; + uint64_t last = range_get_last(reg->guest_phys_addr, + reg->memory_size); + log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); + } + return log_size; +} + +static int vhost_set_backend_type(struct vhost_dev *dev, + VhostBackendType backend_type) +{ + int r = 0; + + switch (backend_type) { +#ifdef CONFIG_VHOST_KERNEL + case VHOST_BACKEND_TYPE_KERNEL: + dev->vhost_ops = &kernel_ops; + break; +#endif +#ifdef CONFIG_VHOST_USER + case VHOST_BACKEND_TYPE_USER: + dev->vhost_ops = &user_ops; + break; +#endif +#ifdef CONFIG_VHOST_VDPA + case VHOST_BACKEND_TYPE_VDPA: + dev->vhost_ops = &vdpa_ops; + break; +#endif + default: + error_report("Unknown vhost backend type"); + r = -1; + } + + return r; +} + +static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) +{ + Error *err = NULL; + struct vhost_log *log; + uint64_t logsize = size * sizeof(*(log->log)); + int fd = -1; + + log = g_new0(struct vhost_log, 1); + if (share) { + log->log = qemu_memfd_alloc("vhost-log", logsize, + F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, + &fd, &err); + if (err) { + error_report_err(err); + g_free(log); + return NULL; + } + memset(log->log, 0, logsize); + } else { + log->log = g_malloc0(logsize); + } + + log->size = size; + log->refcnt = 1; + log->fd = fd; + + return log; +} + +static struct vhost_log *vhost_log_get(uint64_t size, bool share) +{ + struct vhost_log *log = share ? vhost_log_shm : vhost_log; + + if (!log || log->size != size) { + log = vhost_log_alloc(size, share); + if (share) { + vhost_log_shm = log; + } else { + vhost_log = log; + } + } else { + ++log->refcnt; + } + + return log; +} + +static void vhost_log_put(struct vhost_dev *dev, bool sync) +{ + struct vhost_log *log = dev->log; + + if (!log) { + return; + } + + --log->refcnt; + if (log->refcnt == 0) { + /* Sync only the range covered by the old log */ + if (dev->log_size && sync) { + vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); + } + + if (vhost_log == log) { + g_free(log->log); + vhost_log = NULL; + } else if (vhost_log_shm == log) { + qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), + log->fd); + vhost_log_shm = NULL; + } + + g_free(log); + } + + dev->log = NULL; + dev->log_size = 0; +} + +static bool vhost_dev_log_is_shared(struct vhost_dev *dev) +{ + return dev->vhost_ops->vhost_requires_shm_log && + dev->vhost_ops->vhost_requires_shm_log(dev); +} + +static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) +{ + struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev)); + uint64_t log_base = (uintptr_t)log->log; + int r; + + /* inform backend of log switching, this must be done before + releasing the current log, to ensure no logging is lost */ + r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_set_log_base failed"); + } + + vhost_log_put(dev, true); + dev->log = log; + dev->log_size = size; +} + +static int vhost_dev_has_iommu(struct vhost_dev *dev) +{ + VirtIODevice *vdev = dev->vdev; + + /* + * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support + * incremental memory mapping API via IOTLB API. For platform that + * does not have IOMMU, there's no need to enable this feature + * which may cause unnecessary IOTLB miss/update trnasactions. + */ + return virtio_bus_device_iommu_enabled(vdev) && + virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); +} + +static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, + hwaddr *plen, bool is_write) +{ + if (!vhost_dev_has_iommu(dev)) { + return cpu_physical_memory_map(addr, plen, is_write); + } else { + return (void *)(uintptr_t)addr; + } +} + +static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer, + hwaddr len, int is_write, + hwaddr access_len) +{ + if (!vhost_dev_has_iommu(dev)) { + cpu_physical_memory_unmap(buffer, len, is_write, access_len); + } +} + +static int vhost_verify_ring_part_mapping(void *ring_hva, + uint64_t ring_gpa, + uint64_t ring_size, + void *reg_hva, + uint64_t reg_gpa, + uint64_t reg_size) +{ + uint64_t hva_ring_offset; + uint64_t ring_last = range_get_last(ring_gpa, ring_size); + uint64_t reg_last = range_get_last(reg_gpa, reg_size); + + if (ring_last < reg_gpa || ring_gpa > reg_last) { + return 0; + } + /* check that whole ring's is mapped */ + if (ring_last > reg_last) { + return -ENOMEM; + } + /* check that ring's MemoryRegion wasn't replaced */ + hva_ring_offset = ring_gpa - reg_gpa; + if (ring_hva != reg_hva + hva_ring_offset) { + return -EBUSY; + } + + return 0; +} + +static int vhost_verify_ring_mappings(struct vhost_dev *dev, + void *reg_hva, + uint64_t reg_gpa, + uint64_t reg_size) +{ + int i, j; + int r = 0; + const char *part_name[] = { + "descriptor table", + "available ring", + "used ring" + }; + + if (vhost_dev_has_iommu(dev)) { + return 0; + } + + for (i = 0; i < dev->nvqs; ++i) { + struct vhost_virtqueue *vq = dev->vqs + i; + + if (vq->desc_phys == 0) { + continue; + } + + j = 0; + r = vhost_verify_ring_part_mapping( + vq->desc, vq->desc_phys, vq->desc_size, + reg_hva, reg_gpa, reg_size); + if (r) { + break; + } + + j++; + r = vhost_verify_ring_part_mapping( + vq->avail, vq->avail_phys, vq->avail_size, + reg_hva, reg_gpa, reg_size); + if (r) { + break; + } + + j++; + r = vhost_verify_ring_part_mapping( + vq->used, vq->used_phys, vq->used_size, + reg_hva, reg_gpa, reg_size); + if (r) { + break; + } + } + + if (r == -ENOMEM) { + error_report("Unable to map %s for ring %d", part_name[j], i); + } else if (r == -EBUSY) { + error_report("%s relocated for ring %d", part_name[j], i); + } + return r; +} + +/* + * vhost_section: identify sections needed for vhost access + * + * We only care about RAM sections here (where virtqueue and guest + * internals accessed by virtio might live). If we find one we still + * allow the backend to potentially filter it out of our list. + */ +static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section) +{ + MemoryRegion *mr = section->mr; + + if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) { + uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr); + uint8_t handled_dirty; + + /* + * Kernel based vhost doesn't handle any block which is doing + * dirty-tracking other than migration for which it has + * specific logging support. However for TCG the kernel never + * gets involved anyway so we can also ignore it's + * self-modiying code detection flags. However a vhost-user + * client could still confuse a TCG guest if it re-writes + * executable memory that has already been translated. + */ + handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) | + (1 << DIRTY_MEMORY_CODE); + + if (dirty_mask & ~handled_dirty) { + trace_vhost_reject_section(mr->name, 1); + return false; + } + + if (dev->vhost_ops->vhost_backend_mem_section_filter && + !dev->vhost_ops->vhost_backend_mem_section_filter(dev, section)) { + trace_vhost_reject_section(mr->name, 2); + return false; + } + + trace_vhost_section(mr->name); + return true; + } else { + trace_vhost_reject_section(mr->name, 3); + return false; + } +} + +static void vhost_begin(MemoryListener *listener) +{ + struct vhost_dev *dev = container_of(listener, struct vhost_dev, + memory_listener); + dev->tmp_sections = NULL; + dev->n_tmp_sections = 0; +} + +static void vhost_commit(MemoryListener *listener) +{ + struct vhost_dev *dev = container_of(listener, struct vhost_dev, + memory_listener); + MemoryRegionSection *old_sections; + int n_old_sections; + uint64_t log_size; + size_t regions_size; + int r; + int i; + bool changed = false; + + /* Note we can be called before the device is started, but then + * starting the device calls set_mem_table, so we need to have + * built the data structures. + */ + old_sections = dev->mem_sections; + n_old_sections = dev->n_mem_sections; + dev->mem_sections = dev->tmp_sections; + dev->n_mem_sections = dev->n_tmp_sections; + + if (dev->n_mem_sections != n_old_sections) { + changed = true; + } else { + /* Same size, lets check the contents */ + for (int i = 0; i < n_old_sections; i++) { + if (!MemoryRegionSection_eq(&old_sections[i], + &dev->mem_sections[i])) { + changed = true; + break; + } + } + } + + trace_vhost_commit(dev->started, changed); + if (!changed) { + goto out; + } + + /* Rebuild the regions list from the new sections list */ + regions_size = offsetof(struct vhost_memory, regions) + + dev->n_mem_sections * sizeof dev->mem->regions[0]; + dev->mem = g_realloc(dev->mem, regions_size); + dev->mem->nregions = dev->n_mem_sections; + used_memslots = dev->mem->nregions; + for (i = 0; i < dev->n_mem_sections; i++) { + struct vhost_memory_region *cur_vmr = dev->mem->regions + i; + struct MemoryRegionSection *mrs = dev->mem_sections + i; + + cur_vmr->guest_phys_addr = mrs->offset_within_address_space; + cur_vmr->memory_size = int128_get64(mrs->size); + cur_vmr->userspace_addr = + (uintptr_t)memory_region_get_ram_ptr(mrs->mr) + + mrs->offset_within_region; + cur_vmr->flags_padding = 0; + } + + if (!dev->started) { + goto out; + } + + for (i = 0; i < dev->mem->nregions; i++) { + if (vhost_verify_ring_mappings(dev, + (void *)(uintptr_t)dev->mem->regions[i].userspace_addr, + dev->mem->regions[i].guest_phys_addr, + dev->mem->regions[i].memory_size)) { + error_report("Verify ring failure on region %d", i); + abort(); + } + } + + if (!dev->log_enabled) { + r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_set_mem_table failed"); + } + goto out; + } + log_size = vhost_get_log_size(dev); + /* We allocate an extra 4K bytes to log, + * to reduce the * number of reallocations. */ +#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) + /* To log more, must increase log size before table update. */ + if (dev->log_size < log_size) { + vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); + } + r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_set_mem_table failed"); + } + /* To log less, can only decrease log size after table update. */ + if (dev->log_size > log_size + VHOST_LOG_BUFFER) { + vhost_dev_log_resize(dev, log_size); + } + +out: + /* Deref the old list of sections, this must happen _after_ the + * vhost_set_mem_table to ensure the client isn't still using the + * section we're about to unref. + */ + while (n_old_sections--) { + memory_region_unref(old_sections[n_old_sections].mr); + } + g_free(old_sections); + return; +} + +/* Adds the section data to the tmp_section structure. + * It relies on the listener calling us in memory address order + * and for each region (via the _add and _nop methods) to + * join neighbours. + */ +static void vhost_region_add_section(struct vhost_dev *dev, + MemoryRegionSection *section) +{ + bool need_add = true; + uint64_t mrs_size = int128_get64(section->size); + uint64_t mrs_gpa = section->offset_within_address_space; + uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + + section->offset_within_region; + RAMBlock *mrs_rb = section->mr->ram_block; + + trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size, + mrs_host); + + if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) { + /* Round the section to it's page size */ + /* First align the start down to a page boundary */ + size_t mrs_page = qemu_ram_pagesize(mrs_rb); + uint64_t alignage = mrs_host & (mrs_page - 1); + if (alignage) { + mrs_host -= alignage; + mrs_size += alignage; + mrs_gpa -= alignage; + } + /* Now align the size up to a page boundary */ + alignage = mrs_size & (mrs_page - 1); + if (alignage) { + mrs_size += mrs_page - alignage; + } + trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, + mrs_size, mrs_host); + } + + if (dev->n_tmp_sections) { + /* Since we already have at least one section, lets see if + * this extends it; since we're scanning in order, we only + * have to look at the last one, and the FlatView that calls + * us shouldn't have overlaps. + */ + MemoryRegionSection *prev_sec = dev->tmp_sections + + (dev->n_tmp_sections - 1); + uint64_t prev_gpa_start = prev_sec->offset_within_address_space; + uint64_t prev_size = int128_get64(prev_sec->size); + uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size); + uint64_t prev_host_start = + (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + + prev_sec->offset_within_region; + uint64_t prev_host_end = range_get_last(prev_host_start, prev_size); + + if (mrs_gpa <= (prev_gpa_end + 1)) { + /* OK, looks like overlapping/intersecting - it's possible that + * the rounding to page sizes has made them overlap, but they should + * match up in the same RAMBlock if they do. + */ + if (mrs_gpa < prev_gpa_start) { + error_report("%s:Section '%s' rounded to %"PRIx64 + " prior to previous '%s' %"PRIx64, + __func__, section->mr->name, mrs_gpa, + prev_sec->mr->name, prev_gpa_start); + /* A way to cleanly fail here would be better */ + return; + } + /* Offset from the start of the previous GPA to this GPA */ + size_t offset = mrs_gpa - prev_gpa_start; + + if (prev_host_start + offset == mrs_host && + section->mr == prev_sec->mr && + (!dev->vhost_ops->vhost_backend_can_merge || + dev->vhost_ops->vhost_backend_can_merge(dev, + mrs_host, mrs_size, + prev_host_start, prev_size))) { + uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size); + need_add = false; + prev_sec->offset_within_address_space = + MIN(prev_gpa_start, mrs_gpa); + prev_sec->offset_within_region = + MIN(prev_host_start, mrs_host) - + (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr); + prev_sec->size = int128_make64(max_end - MIN(prev_host_start, + mrs_host)); + trace_vhost_region_add_section_merge(section->mr->name, + int128_get64(prev_sec->size), + prev_sec->offset_within_address_space, + prev_sec->offset_within_region); + } else { + /* adjoining regions are fine, but overlapping ones with + * different blocks/offsets shouldn't happen + */ + if (mrs_gpa != prev_gpa_end + 1) { + error_report("%s: Overlapping but not coherent sections " + "at %"PRIx64, + __func__, mrs_gpa); + return; + } + } + } + } + + if (need_add) { + ++dev->n_tmp_sections; + dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections, + dev->n_tmp_sections); + dev->tmp_sections[dev->n_tmp_sections - 1] = *section; + /* The flatview isn't stable and we don't use it, making it NULL + * means we can memcmp the list. + */ + dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; + memory_region_ref(section->mr); + } +} + +/* Used for both add and nop callbacks */ +static void vhost_region_addnop(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_dev *dev = container_of(listener, struct vhost_dev, + memory_listener); + + if (!vhost_section(dev, section)) { + return; + } + vhost_region_add_section(dev, section); +} + +static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n); + struct vhost_dev *hdev = iommu->hdev; + hwaddr iova = iotlb->iova + iommu->iommu_offset; + + if (vhost_backend_invalidate_device_iotlb(hdev, iova, + iotlb->addr_mask + 1)) { + error_report("Fail to invalidate device iotlb"); + } +} + +static void vhost_iommu_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_dev *dev = container_of(listener, struct vhost_dev, + iommu_listener); + struct vhost_iommu *iommu; + Int128 end; + int iommu_idx; + IOMMUMemoryRegion *iommu_mr; + int ret; + + if (!memory_region_is_iommu(section->mr)) { + return; + } + + iommu_mr = IOMMU_MEMORY_REGION(section->mr); + + iommu = g_malloc0(sizeof(*iommu)); + end = int128_add(int128_make64(section->offset_within_region), + section->size); + end = int128_sub(end, int128_one()); + iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, + MEMTXATTRS_UNSPECIFIED); + iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify, + IOMMU_NOTIFIER_DEVIOTLB_UNMAP, + section->offset_within_region, + int128_get64(end), + iommu_idx); + iommu->mr = section->mr; + iommu->iommu_offset = section->offset_within_address_space - + section->offset_within_region; + iommu->hdev = dev; + ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL); + if (ret) { + /* + * Some vIOMMUs do not support dev-iotlb yet. If so, try to use the + * UNMAP legacy message + */ + iommu->n.notifier_flags = IOMMU_NOTIFIER_UNMAP; + memory_region_register_iommu_notifier(section->mr, &iommu->n, + &error_fatal); + } + QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next); + /* TODO: can replay help performance here? */ +} + +static void vhost_iommu_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_dev *dev = container_of(listener, struct vhost_dev, + iommu_listener); + struct vhost_iommu *iommu; + + if (!memory_region_is_iommu(section->mr)) { + return; + } + + QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { + if (iommu->mr == section->mr && + iommu->n.start == section->offset_within_region) { + memory_region_unregister_iommu_notifier(iommu->mr, + &iommu->n); + QLIST_REMOVE(iommu, iommu_next); + g_free(iommu); + break; + } + } +} + +static int vhost_virtqueue_set_addr(struct vhost_dev *dev, + struct vhost_virtqueue *vq, + unsigned idx, bool enable_log) +{ + struct vhost_vring_addr addr; + int r; + memset(&addr, 0, sizeof(struct vhost_vring_addr)); + + if (dev->vhost_ops->vhost_vq_get_addr) { + r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_vq_get_addr failed"); + return -errno; + } + } else { + addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; + addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail; + addr.used_user_addr = (uint64_t)(unsigned long)vq->used; + } + addr.index = idx; + addr.log_guest_addr = vq->used_phys; + addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0; + r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_set_vring_addr failed"); + return -errno; + } + return 0; +} + +static int vhost_dev_set_features(struct vhost_dev *dev, + bool enable_log) +{ + uint64_t features = dev->acked_features; + int r; + if (enable_log) { + features |= 0x1ULL << VHOST_F_LOG_ALL; + } + if (!vhost_dev_has_iommu(dev)) { + features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM); + } + if (dev->vhost_ops->vhost_force_iommu) { + if (dev->vhost_ops->vhost_force_iommu(dev) == true) { + features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM; + } + } + r = dev->vhost_ops->vhost_set_features(dev, features); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_set_features failed"); + goto out; + } + if (dev->vhost_ops->vhost_set_backend_cap) { + r = dev->vhost_ops->vhost_set_backend_cap(dev); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_set_backend_cap failed"); + goto out; + } + } + +out: + return r < 0 ? -errno : 0; +} + +static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) +{ + int r, i, idx; + hwaddr addr; + + r = vhost_dev_set_features(dev, enable_log); + if (r < 0) { + goto err_features; + } + for (i = 0; i < dev->nvqs; ++i) { + idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); + addr = virtio_queue_get_desc_addr(dev->vdev, idx); + if (!addr) { + /* + * The queue might not be ready for start. If this + * is the case there is no reason to continue the process. + * The similar logic is used by the vhost_virtqueue_start() + * routine. + */ + continue; + } + r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, + enable_log); + if (r < 0) { + goto err_vq; + } + } + return 0; +err_vq: + for (; i >= 0; --i) { + idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); + vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, + dev->log_enabled); + } + vhost_dev_set_features(dev, dev->log_enabled); +err_features: + return r; +} + +static int vhost_migration_log(MemoryListener *listener, bool enable) +{ + struct vhost_dev *dev = container_of(listener, struct vhost_dev, + memory_listener); + int r; + if (enable == dev->log_enabled) { + return 0; + } + if (!dev->started) { + dev->log_enabled = enable; + return 0; + } + + r = 0; + if (!enable) { + r = vhost_dev_set_log(dev, false); + if (r < 0) { + goto check_dev_state; + } + vhost_log_put(dev, false); + } else { + vhost_dev_log_resize(dev, vhost_get_log_size(dev)); + r = vhost_dev_set_log(dev, true); + if (r < 0) { + goto check_dev_state; + } + } + +check_dev_state: + dev->log_enabled = enable; + /* + * vhost-user-* devices could change their state during log + * initialization due to disconnect. So check dev state after + * vhost communication. + */ + if (!dev->started) { + /* + * Since device is in the stopped state, it is okay for + * migration. Return success. + */ + r = 0; + } + if (r) { + /* An error occurred. */ + dev->log_enabled = false; + } + + return r; +} + +static void vhost_log_global_start(MemoryListener *listener) +{ + int r; + + r = vhost_migration_log(listener, true); + if (r < 0) { + abort(); + } +} + +static void vhost_log_global_stop(MemoryListener *listener) +{ + int r; + + r = vhost_migration_log(listener, false); + if (r < 0) { + abort(); + } +} + +static void vhost_log_start(MemoryListener *listener, + MemoryRegionSection *section, + int old, int new) +{ + /* FIXME: implement */ +} + +static void vhost_log_stop(MemoryListener *listener, + MemoryRegionSection *section, + int old, int new) +{ + /* FIXME: implement */ +} + +/* The vhost driver natively knows how to handle the vrings of non + * cross-endian legacy devices and modern devices. Only legacy devices + * exposed to a bi-endian guest may require the vhost driver to use a + * specific endianness. + */ +static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + return false; + } +#ifdef HOST_WORDS_BIGENDIAN + return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; +#else + return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; +#endif +} + +static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, + bool is_big_endian, + int vhost_vq_index) +{ + struct vhost_vring_state s = { + .index = vhost_vq_index, + .num = is_big_endian + }; + + if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) { + return 0; + } + + VHOST_OPS_DEBUG("vhost_set_vring_endian failed"); + if (errno == ENOTTY) { + error_report("vhost does not support cross-endian"); + return -ENOSYS; + } + + return -errno; +} + +static int vhost_memory_region_lookup(struct vhost_dev *hdev, + uint64_t gpa, uint64_t *uaddr, + uint64_t *len) +{ + int i; + + for (i = 0; i < hdev->mem->nregions; i++) { + struct vhost_memory_region *reg = hdev->mem->regions + i; + + if (gpa >= reg->guest_phys_addr && + reg->guest_phys_addr + reg->memory_size > gpa) { + *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr; + *len = reg->guest_phys_addr + reg->memory_size - gpa; + return 0; + } + } + + return -EFAULT; +} + +int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write) +{ + IOMMUTLBEntry iotlb; + uint64_t uaddr, len; + int ret = -EFAULT; + + RCU_READ_LOCK_GUARD(); + + trace_vhost_iotlb_miss(dev, 1); + + iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, + iova, write, + MEMTXATTRS_UNSPECIFIED); + if (iotlb.target_as != NULL) { + ret = vhost_memory_region_lookup(dev, iotlb.translated_addr, + &uaddr, &len); + if (ret) { + trace_vhost_iotlb_miss(dev, 3); + error_report("Fail to lookup the translated address " + "%"PRIx64, iotlb.translated_addr); + goto out; + } + + len = MIN(iotlb.addr_mask + 1, len); + iova = iova & ~iotlb.addr_mask; + + ret = vhost_backend_update_device_iotlb(dev, iova, uaddr, + len, iotlb.perm); + if (ret) { + trace_vhost_iotlb_miss(dev, 4); + error_report("Fail to update device iotlb"); + goto out; + } + } + + trace_vhost_iotlb_miss(dev, 2); + +out: + return ret; +} + +static int vhost_virtqueue_start(struct vhost_dev *dev, + struct VirtIODevice *vdev, + struct vhost_virtqueue *vq, + unsigned idx) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusState *vbus = VIRTIO_BUS(qbus); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); + hwaddr s, l, a; + int r; + int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); + struct vhost_vring_file file = { + .index = vhost_vq_index + }; + struct vhost_vring_state state = { + .index = vhost_vq_index + }; + struct VirtQueue *vvq = virtio_get_queue(vdev, idx); + + a = virtio_queue_get_desc_addr(vdev, idx); + if (a == 0) { + /* Queue might not be ready for start */ + return 0; + } + + vq->num = state.num = virtio_queue_get_num(vdev, idx); + r = dev->vhost_ops->vhost_set_vring_num(dev, &state); + if (r) { + VHOST_OPS_DEBUG("vhost_set_vring_num failed"); + return -errno; + } + + state.num = virtio_queue_get_last_avail_idx(vdev, idx); + r = dev->vhost_ops->vhost_set_vring_base(dev, &state); + if (r) { + VHOST_OPS_DEBUG("vhost_set_vring_base failed"); + return -errno; + } + + if (vhost_needs_vring_endian(vdev)) { + r = vhost_virtqueue_set_vring_endian_legacy(dev, + virtio_is_big_endian(vdev), + vhost_vq_index); + if (r) { + return -errno; + } + } + + vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); + vq->desc_phys = a; + vq->desc = vhost_memory_map(dev, a, &l, false); + if (!vq->desc || l != s) { + r = -ENOMEM; + goto fail_alloc_desc; + } + vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); + vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); + vq->avail = vhost_memory_map(dev, a, &l, false); + if (!vq->avail || l != s) { + r = -ENOMEM; + goto fail_alloc_avail; + } + vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); + vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); + vq->used = vhost_memory_map(dev, a, &l, true); + if (!vq->used || l != s) { + r = -ENOMEM; + goto fail_alloc_used; + } + + r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); + if (r < 0) { + r = -errno; + goto fail_alloc; + } + + file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); + r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); + if (r) { + VHOST_OPS_DEBUG("vhost_set_vring_kick failed"); + r = -errno; + goto fail_kick; + } + + /* Clear and discard previous events if any. */ + event_notifier_test_and_clear(&vq->masked_notifier); + + /* Init vring in unmasked state, unless guest_notifier_mask + * will do it later. + */ + if (!vdev->use_guest_notifier_mask) { + /* TODO: check and handle errors. */ + vhost_virtqueue_mask(dev, vdev, idx, false); + } + + if (k->query_guest_notifiers && + k->query_guest_notifiers(qbus->parent) && + virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) { + file.fd = -1; + r = dev->vhost_ops->vhost_set_vring_call(dev, &file); + if (r) { + goto fail_vector; + } + } + + return 0; + +fail_vector: +fail_kick: +fail_alloc: + vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), + 0, 0); +fail_alloc_used: + vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), + 0, 0); +fail_alloc_avail: + vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), + 0, 0); +fail_alloc_desc: + return r; +} + +static void vhost_virtqueue_stop(struct vhost_dev *dev, + struct VirtIODevice *vdev, + struct vhost_virtqueue *vq, + unsigned idx) +{ + int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); + struct vhost_vring_state state = { + .index = vhost_vq_index, + }; + int r; + + if (virtio_queue_get_desc_addr(vdev, idx) == 0) { + /* Don't stop the virtqueue which might have not been started */ + return; + } + + r = dev->vhost_ops->vhost_get_vring_base(dev, &state); + if (r < 0) { + VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r); + /* Connection to the backend is broken, so let's sync internal + * last avail idx to the device used idx. + */ + virtio_queue_restore_last_avail_idx(vdev, idx); + } else { + virtio_queue_set_last_avail_idx(vdev, idx, state.num); + } + virtio_queue_invalidate_signalled_used(vdev, idx); + virtio_queue_update_used_idx(vdev, idx); + + /* In the cross-endian case, we need to reset the vring endianness to + * native as legacy devices expect so by default. + */ + if (vhost_needs_vring_endian(vdev)) { + vhost_virtqueue_set_vring_endian_legacy(dev, + !virtio_is_big_endian(vdev), + vhost_vq_index); + } + + vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), + 1, virtio_queue_get_used_size(vdev, idx)); + vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), + 0, virtio_queue_get_avail_size(vdev, idx)); + vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), + 0, virtio_queue_get_desc_size(vdev, idx)); +} + +static void vhost_eventfd_add(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, EventNotifier *e) +{ +} + +static void vhost_eventfd_del(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, EventNotifier *e) +{ +} + +static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, + int n, uint32_t timeout) +{ + int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); + struct vhost_vring_state state = { + .index = vhost_vq_index, + .num = timeout, + }; + int r; + + if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { + return -EINVAL; + } + + r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); + if (r) { + VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed"); + return r; + } + + return 0; +} + +static int vhost_virtqueue_init(struct vhost_dev *dev, + struct vhost_virtqueue *vq, int n) +{ + int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); + struct vhost_vring_file file = { + .index = vhost_vq_index, + }; + int r = event_notifier_init(&vq->masked_notifier, 0); + if (r < 0) { + return r; + } + + file.fd = event_notifier_get_fd(&vq->masked_notifier); + r = dev->vhost_ops->vhost_set_vring_call(dev, &file); + if (r) { + VHOST_OPS_DEBUG("vhost_set_vring_call failed"); + r = -errno; + goto fail_call; + } + + vq->dev = dev; + + return 0; +fail_call: + event_notifier_cleanup(&vq->masked_notifier); + return r; +} + +static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) +{ + event_notifier_cleanup(&vq->masked_notifier); +} + +int vhost_dev_init(struct vhost_dev *hdev, void *opaque, + VhostBackendType backend_type, uint32_t busyloop_timeout, + Error **errp) +{ + uint64_t features; + int i, r, n_initialized_vqs = 0; + + hdev->vdev = NULL; + hdev->migration_blocker = NULL; + + r = vhost_set_backend_type(hdev, backend_type); + assert(r >= 0); + + r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp); + if (r < 0) { + goto fail; + } + + r = hdev->vhost_ops->vhost_set_owner(hdev); + if (r < 0) { + error_setg_errno(errp, -r, "vhost_set_owner failed"); + goto fail; + } + + r = hdev->vhost_ops->vhost_get_features(hdev, &features); + if (r < 0) { + error_setg_errno(errp, -r, "vhost_get_features failed"); + goto fail; + } + + for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) { + r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); + if (r < 0) { + error_setg_errno(errp, -r, "Failed to initialize virtqueue %d", i); + goto fail; + } + } + + if (busyloop_timeout) { + for (i = 0; i < hdev->nvqs; ++i) { + r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, + busyloop_timeout); + if (r < 0) { + error_setg_errno(errp, -r, "Failed to set busyloop timeout"); + goto fail_busyloop; + } + } + } + + hdev->features = features; + + hdev->memory_listener = (MemoryListener) { + .name = "vhost", + .begin = vhost_begin, + .commit = vhost_commit, + .region_add = vhost_region_addnop, + .region_nop = vhost_region_addnop, + .log_start = vhost_log_start, + .log_stop = vhost_log_stop, + .log_sync = vhost_log_sync, + .log_global_start = vhost_log_global_start, + .log_global_stop = vhost_log_global_stop, + .eventfd_add = vhost_eventfd_add, + .eventfd_del = vhost_eventfd_del, + .priority = 10 + }; + + hdev->iommu_listener = (MemoryListener) { + .name = "vhost-iommu", + .region_add = vhost_iommu_region_add, + .region_del = vhost_iommu_region_del, + }; + + if (hdev->migration_blocker == NULL) { + if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { + error_setg(&hdev->migration_blocker, + "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); + } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) { + error_setg(&hdev->migration_blocker, + "Migration disabled: failed to allocate shared memory"); + } + } + + if (hdev->migration_blocker != NULL) { + r = migrate_add_blocker(hdev->migration_blocker, errp); + if (r < 0) { + error_free(hdev->migration_blocker); + goto fail_busyloop; + } + } + + hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); + hdev->n_mem_sections = 0; + hdev->mem_sections = NULL; + hdev->log = NULL; + hdev->log_size = 0; + hdev->log_enabled = false; + hdev->started = false; + memory_listener_register(&hdev->memory_listener, &address_space_memory); + QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); + + if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { + error_setg(errp, "vhost backend memory slots limit is less" + " than current number of present memory slots"); + r = -EINVAL; + goto fail_busyloop; + } + + return 0; + +fail_busyloop: + if (busyloop_timeout) { + while (--i >= 0) { + vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); + } + } +fail: + hdev->nvqs = n_initialized_vqs; + vhost_dev_cleanup(hdev); + return r; +} + +void vhost_dev_cleanup(struct vhost_dev *hdev) +{ + int i; + + for (i = 0; i < hdev->nvqs; ++i) { + vhost_virtqueue_cleanup(hdev->vqs + i); + } + if (hdev->mem) { + /* those are only safe after successful init */ + memory_listener_unregister(&hdev->memory_listener); + QLIST_REMOVE(hdev, entry); + } + if (hdev->migration_blocker) { + migrate_del_blocker(hdev->migration_blocker); + error_free(hdev->migration_blocker); + } + g_free(hdev->mem); + g_free(hdev->mem_sections); + if (hdev->vhost_ops) { + hdev->vhost_ops->vhost_backend_cleanup(hdev); + } + assert(!hdev->log); + + memset(hdev, 0, sizeof(struct vhost_dev)); +} + +/* Stop processing guest IO notifications in qemu. + * Start processing them in vhost in kernel. + */ +int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + int i, r, e; + + /* We will pass the notifiers to the kernel, make sure that QEMU + * doesn't interfere. + */ + r = virtio_device_grab_ioeventfd(vdev); + if (r < 0) { + error_report("binding does not support host notifiers"); + goto fail; + } + + for (i = 0; i < hdev->nvqs; ++i) { + r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, + true); + if (r < 0) { + error_report("vhost VQ %d notifier binding failed: %d", i, -r); + goto fail_vq; + } + } + + return 0; +fail_vq: + while (--i >= 0) { + e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, + false); + if (e < 0) { + error_report("vhost VQ %d notifier cleanup error: %d", i, -r); + } + assert (e >= 0); + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); + } + virtio_device_release_ioeventfd(vdev); +fail: + return r; +} + +/* Stop processing guest IO notifications in vhost. + * Start processing them in qemu. + * This might actually run the qemu handlers right away, + * so virtio in qemu must be completely setup when this is called. + */ +void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) +{ + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + int i, r; + + for (i = 0; i < hdev->nvqs; ++i) { + r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, + false); + if (r < 0) { + error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); + } + assert (r >= 0); + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); + } + virtio_device_release_ioeventfd(vdev); +} + +/* Test and clear event pending status. + * Should be called after unmask to avoid losing events. + */ +bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) +{ + struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; + assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); + return event_notifier_test_and_clear(&vq->masked_notifier); +} + +/* Mask/unmask events from this vq. */ +void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, + bool mask) +{ + struct VirtQueue *vvq = virtio_get_queue(vdev, n); + int r, index = n - hdev->vq_index; + struct vhost_vring_file file; + + /* should only be called after backend is connected */ + assert(hdev->vhost_ops); + + if (mask) { + assert(vdev->use_guest_notifier_mask); + file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); + } else { + file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); + } + + file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); + r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_set_vring_call failed"); + } +} + +uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, + uint64_t features) +{ + const int *bit = feature_bits; + while (*bit != VHOST_INVALID_FEATURE_BIT) { + uint64_t bit_mask = (1ULL << *bit); + if (!(hdev->features & bit_mask)) { + features &= ~bit_mask; + } + bit++; + } + return features; +} + +void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, + uint64_t features) +{ + const int *bit = feature_bits; + while (*bit != VHOST_INVALID_FEATURE_BIT) { + uint64_t bit_mask = (1ULL << *bit); + if (features & bit_mask) { + hdev->acked_features |= bit_mask; + } + bit++; + } +} + +int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, + uint32_t config_len, Error **errp) +{ + assert(hdev->vhost_ops); + + if (hdev->vhost_ops->vhost_get_config) { + return hdev->vhost_ops->vhost_get_config(hdev, config, config_len, + errp); + } + + error_setg(errp, "vhost_get_config not implemented"); + return -ENOTSUP; +} + +int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, + uint32_t offset, uint32_t size, uint32_t flags) +{ + assert(hdev->vhost_ops); + + if (hdev->vhost_ops->vhost_set_config) { + return hdev->vhost_ops->vhost_set_config(hdev, data, offset, + size, flags); + } + + return -1; +} + +void vhost_dev_set_config_notifier(struct vhost_dev *hdev, + const VhostDevConfigOps *ops) +{ + hdev->config_ops = ops; +} + +void vhost_dev_free_inflight(struct vhost_inflight *inflight) +{ + if (inflight && inflight->addr) { + qemu_memfd_free(inflight->addr, inflight->size, inflight->fd); + inflight->addr = NULL; + inflight->fd = -1; + } +} + +static int vhost_dev_resize_inflight(struct vhost_inflight *inflight, + uint64_t new_size) +{ + Error *err = NULL; + int fd = -1; + void *addr = qemu_memfd_alloc("vhost-inflight", new_size, + F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, + &fd, &err); + + if (err) { + error_report_err(err); + return -1; + } + + vhost_dev_free_inflight(inflight); + inflight->offset = 0; + inflight->addr = addr; + inflight->fd = fd; + inflight->size = new_size; + + return 0; +} + +void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f) +{ + if (inflight->addr) { + qemu_put_be64(f, inflight->size); + qemu_put_be16(f, inflight->queue_size); + qemu_put_buffer(f, inflight->addr, inflight->size); + } else { + qemu_put_be64(f, 0); + } +} + +int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) +{ + uint64_t size; + + size = qemu_get_be64(f); + if (!size) { + return 0; + } + + if (inflight->size != size) { + if (vhost_dev_resize_inflight(inflight, size)) { + return -1; + } + } + inflight->queue_size = qemu_get_be16(f); + + qemu_get_buffer(f, inflight->addr, size); + + return 0; +} + +int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev) +{ + int r; + + if (hdev->vhost_ops->vhost_get_inflight_fd == NULL || + hdev->vhost_ops->vhost_set_inflight_fd == NULL) { + return 0; + } + + hdev->vdev = vdev; + + r = vhost_dev_set_features(hdev, hdev->log_enabled); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_dev_prepare_inflight failed"); + return r; + } + + return 0; +} + +int vhost_dev_set_inflight(struct vhost_dev *dev, + struct vhost_inflight *inflight) +{ + int r; + + if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { + r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); + if (r) { + VHOST_OPS_DEBUG("vhost_set_inflight_fd failed"); + return -errno; + } + } + + return 0; +} + +int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, + struct vhost_inflight *inflight) +{ + int r; + + if (dev->vhost_ops->vhost_get_inflight_fd) { + r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); + if (r) { + VHOST_OPS_DEBUG("vhost_get_inflight_fd failed"); + return -errno; + } + } + + return 0; +} + +/* Host notifiers must be enabled at this point. */ +int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) +{ + int i, r; + + /* should only be called after backend is connected */ + assert(hdev->vhost_ops); + + hdev->started = true; + hdev->vdev = vdev; + + r = vhost_dev_set_features(hdev, hdev->log_enabled); + if (r < 0) { + goto fail_features; + } + + if (vhost_dev_has_iommu(hdev)) { + memory_listener_register(&hdev->iommu_listener, vdev->dma_as); + } + + r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_set_mem_table failed"); + r = -errno; + goto fail_mem; + } + for (i = 0; i < hdev->nvqs; ++i) { + r = vhost_virtqueue_start(hdev, + vdev, + hdev->vqs + i, + hdev->vq_index + i); + if (r < 0) { + goto fail_vq; + } + } + + if (hdev->log_enabled) { + uint64_t log_base; + + hdev->log_size = vhost_get_log_size(hdev); + hdev->log = vhost_log_get(hdev->log_size, + vhost_dev_log_is_shared(hdev)); + log_base = (uintptr_t)hdev->log->log; + r = hdev->vhost_ops->vhost_set_log_base(hdev, + hdev->log_size ? log_base : 0, + hdev->log); + if (r < 0) { + VHOST_OPS_DEBUG("vhost_set_log_base failed"); + r = -errno; + goto fail_log; + } + } + if (hdev->vhost_ops->vhost_dev_start) { + r = hdev->vhost_ops->vhost_dev_start(hdev, true); + if (r) { + goto fail_log; + } + } + if (vhost_dev_has_iommu(hdev) && + hdev->vhost_ops->vhost_set_iotlb_callback) { + hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); + + /* Update used ring information for IOTLB to work correctly, + * vhost-kernel code requires for this.*/ + for (i = 0; i < hdev->nvqs; ++i) { + struct vhost_virtqueue *vq = hdev->vqs + i; + vhost_device_iotlb_miss(hdev, vq->used_phys, true); + } + } + return 0; +fail_log: + vhost_log_put(hdev, false); +fail_vq: + while (--i >= 0) { + vhost_virtqueue_stop(hdev, + vdev, + hdev->vqs + i, + hdev->vq_index + i); + } + +fail_mem: +fail_features: + + hdev->started = false; + return r; +} + +/* Host notifiers must be enabled at this point. */ +void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) +{ + int i; + + /* should only be called after backend is connected */ + assert(hdev->vhost_ops); + + if (hdev->vhost_ops->vhost_dev_start) { + hdev->vhost_ops->vhost_dev_start(hdev, false); + } + for (i = 0; i < hdev->nvqs; ++i) { + vhost_virtqueue_stop(hdev, + vdev, + hdev->vqs + i, + hdev->vq_index + i); + } + + if (vhost_dev_has_iommu(hdev)) { + if (hdev->vhost_ops->vhost_set_iotlb_callback) { + hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); + } + memory_listener_unregister(&hdev->iommu_listener); + } + vhost_log_put(hdev, true); + hdev->started = false; + hdev->vdev = NULL; +} + +int vhost_net_set_backend(struct vhost_dev *hdev, + struct vhost_vring_file *file) +{ + if (hdev->vhost_ops->vhost_net_set_backend) { + return hdev->vhost_ops->vhost_net_set_backend(hdev, file); + } + + return -1; +} diff --git a/hw/virtio/virtio-9p-pci.c b/hw/virtio/virtio-9p-pci.c new file mode 100644 index 000000000..e07adcd9e --- /dev/null +++ b/hw/virtio/virtio-9p-pci.c @@ -0,0 +1,91 @@ +/* + * Virtio 9p PCI Bindings + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" + +#include "virtio-pci.h" +#include "hw/9pfs/virtio-9p.h" +#include "hw/qdev-properties.h" +#include "qemu/module.h" +#include "qom/object.h" + +/* + * virtio-9p-pci: This extends VirtioPCIProxy. + */ + +#define TYPE_VIRTIO_9P_PCI "virtio-9p-pci-base" +typedef struct V9fsPCIState V9fsPCIState; +DECLARE_INSTANCE_CHECKER(V9fsPCIState, VIRTIO_9P_PCI, + TYPE_VIRTIO_9P_PCI) + +struct V9fsPCIState { + VirtIOPCIProxy parent_obj; + V9fsVirtioState vdev; +}; + +static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static Property virtio_9p_pci_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_9p_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + + k->realize = virtio_9p_pci_realize; + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = 0x2; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + device_class_set_props(dc, virtio_9p_pci_properties); +} + +static void virtio_9p_pci_instance_init(Object *obj) +{ + V9fsPCIState *dev = VIRTIO_9P_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_9P); +} + +static const VirtioPCIDeviceTypeInfo virtio_9p_pci_info = { + .base_name = TYPE_VIRTIO_9P_PCI, + .generic_name = "virtio-9p-pci", + .transitional_name = "virtio-9p-pci-transitional", + .non_transitional_name = "virtio-9p-pci-non-transitional", + .instance_size = sizeof(V9fsPCIState), + .instance_init = virtio_9p_pci_instance_init, + .class_init = virtio_9p_pci_class_init, +}; + +static void virtio_9p_pci_register(void) +{ + virtio_pci_types_register(&virtio_9p_pci_info); +} + +type_init(virtio_9p_pci_register) diff --git a/hw/virtio/virtio-balloon-pci.c b/hw/virtio/virtio-balloon-pci.c new file mode 100644 index 000000000..79a3ba979 --- /dev/null +++ b/hw/virtio/virtio-balloon-pci.c @@ -0,0 +1,88 @@ +/* + * Virtio balloon PCI Bindings + * + * Copyright IBM, Corp. 2007 + * Copyright (c) 2009 CodeSourcery + * + * Authors: + * Anthony Liguori + * Paul Brook + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" + +#include "virtio-pci.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-balloon.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qom/object.h" + +typedef struct VirtIOBalloonPCI VirtIOBalloonPCI; + +/* + * virtio-balloon-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_BALLOON_PCI "virtio-balloon-pci-base" +DECLARE_INSTANCE_CHECKER(VirtIOBalloonPCI, VIRTIO_BALLOON_PCI, + TYPE_VIRTIO_BALLOON_PCI) + +struct VirtIOBalloonPCI { + VirtIOPCIProxy parent_obj; + VirtIOBalloon vdev; +}; + +static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + vpci_dev->class_code = PCI_CLASS_OTHERS; + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = virtio_balloon_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_OTHERS; +} + +static void virtio_balloon_pci_instance_init(Object *obj) +{ + VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_BALLOON); + object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev), + "guest-stats"); + object_property_add_alias(obj, "guest-stats-polling-interval", + OBJECT(&dev->vdev), + "guest-stats-polling-interval"); +} + +static const VirtioPCIDeviceTypeInfo virtio_balloon_pci_info = { + .base_name = TYPE_VIRTIO_BALLOON_PCI, + .generic_name = "virtio-balloon-pci", + .transitional_name = "virtio-balloon-pci-transitional", + .non_transitional_name = "virtio-balloon-pci-non-transitional", + .instance_size = sizeof(VirtIOBalloonPCI), + .instance_init = virtio_balloon_pci_instance_init, + .class_init = virtio_balloon_pci_class_init, +}; + +static void virtio_balloon_pci_register(void) +{ + virtio_pci_types_register(&virtio_balloon_pci_info); +} + +type_init(virtio_balloon_pci_register) diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c new file mode 100644 index 000000000..9a4f491b5 --- /dev/null +++ b/hw/virtio/virtio-balloon.c @@ -0,0 +1,1086 @@ +/* + * Virtio Balloon Device + * + * Copyright IBM, Corp. 2008 + * Copyright (C) 2011 Red Hat, Inc. + * Copyright (C) 2011 Amit Shah + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/virtio/virtio.h" +#include "hw/mem/pc-dimm.h" +#include "hw/qdev-properties.h" +#include "hw/boards.h" +#include "sysemu/balloon.h" +#include "hw/virtio/virtio-balloon.h" +#include "exec/address-spaces.h" +#include "qapi/error.h" +#include "qapi/qapi-events-machine.h" +#include "qapi/visitor.h" +#include "trace.h" +#include "qemu/error-report.h" +#include "migration/misc.h" +#include "migration/migration.h" + +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" + +#define BALLOON_PAGE_SIZE (1 << VIRTIO_BALLOON_PFN_SHIFT) + +typedef struct PartiallyBalloonedPage { + ram_addr_t base_gpa; + unsigned long *bitmap; +} PartiallyBalloonedPage; + +static void virtio_balloon_pbp_free(PartiallyBalloonedPage *pbp) +{ + if (!pbp->bitmap) { + return; + } + g_free(pbp->bitmap); + pbp->bitmap = NULL; +} + +static void virtio_balloon_pbp_alloc(PartiallyBalloonedPage *pbp, + ram_addr_t base_gpa, + long subpages) +{ + pbp->base_gpa = base_gpa; + pbp->bitmap = bitmap_new(subpages); +} + +static bool virtio_balloon_pbp_matches(PartiallyBalloonedPage *pbp, + ram_addr_t base_gpa) +{ + return pbp->base_gpa == base_gpa; +} + +static bool virtio_balloon_inhibited(void) +{ + /* + * Postcopy cannot deal with concurrent discards, + * so it's special, as well as background snapshots. + */ + return ram_block_discard_is_disabled() || migration_in_incoming_postcopy() || + migration_in_bg_snapshot(); +} + +static void balloon_inflate_page(VirtIOBalloon *balloon, + MemoryRegion *mr, hwaddr mr_offset, + PartiallyBalloonedPage *pbp) +{ + void *addr = memory_region_get_ram_ptr(mr) + mr_offset; + ram_addr_t rb_offset, rb_aligned_offset, base_gpa; + RAMBlock *rb; + size_t rb_page_size; + int subpages; + + /* XXX is there a better way to get to the RAMBlock than via a + * host address? */ + rb = qemu_ram_block_from_host(addr, false, &rb_offset); + rb_page_size = qemu_ram_pagesize(rb); + + if (rb_page_size == BALLOON_PAGE_SIZE) { + /* Easy case */ + + ram_block_discard_range(rb, rb_offset, rb_page_size); + /* We ignore errors from ram_block_discard_range(), because it + * has already reported them, and failing to discard a balloon + * page is not fatal */ + return; + } + + /* Hard case + * + * We've put a piece of a larger host page into the balloon - we + * need to keep track until we have a whole host page to + * discard + */ + warn_report_once( +"Balloon used with backing page size > 4kiB, this may not be reliable"); + + rb_aligned_offset = QEMU_ALIGN_DOWN(rb_offset, rb_page_size); + subpages = rb_page_size / BALLOON_PAGE_SIZE; + base_gpa = memory_region_get_ram_addr(mr) + mr_offset - + (rb_offset - rb_aligned_offset); + + if (pbp->bitmap && !virtio_balloon_pbp_matches(pbp, base_gpa)) { + /* We've partially ballooned part of a host page, but now + * we're trying to balloon part of a different one. Too hard, + * give up on the old partial page */ + virtio_balloon_pbp_free(pbp); + } + + if (!pbp->bitmap) { + virtio_balloon_pbp_alloc(pbp, base_gpa, subpages); + } + + set_bit((rb_offset - rb_aligned_offset) / BALLOON_PAGE_SIZE, + pbp->bitmap); + + if (bitmap_full(pbp->bitmap, subpages)) { + /* We've accumulated a full host page, we can actually discard + * it now */ + + ram_block_discard_range(rb, rb_aligned_offset, rb_page_size); + /* We ignore errors from ram_block_discard_range(), because it + * has already reported them, and failing to discard a balloon + * page is not fatal */ + virtio_balloon_pbp_free(pbp); + } +} + +static void balloon_deflate_page(VirtIOBalloon *balloon, + MemoryRegion *mr, hwaddr mr_offset) +{ + void *addr = memory_region_get_ram_ptr(mr) + mr_offset; + ram_addr_t rb_offset; + RAMBlock *rb; + size_t rb_page_size; + void *host_addr; + int ret; + + /* XXX is there a better way to get to the RAMBlock than via a + * host address? */ + rb = qemu_ram_block_from_host(addr, false, &rb_offset); + rb_page_size = qemu_ram_pagesize(rb); + + host_addr = (void *)((uintptr_t)addr & ~(rb_page_size - 1)); + + /* When a page is deflated, we hint the whole host page it lives + * on, since we can't do anything smaller */ + ret = qemu_madvise(host_addr, rb_page_size, QEMU_MADV_WILLNEED); + if (ret != 0) { + warn_report("Couldn't MADV_WILLNEED on balloon deflate: %s", + strerror(errno)); + /* Otherwise ignore, failing to page hint shouldn't be fatal */ + } +} + +static const char *balloon_stat_names[] = { + [VIRTIO_BALLOON_S_SWAP_IN] = "stat-swap-in", + [VIRTIO_BALLOON_S_SWAP_OUT] = "stat-swap-out", + [VIRTIO_BALLOON_S_MAJFLT] = "stat-major-faults", + [VIRTIO_BALLOON_S_MINFLT] = "stat-minor-faults", + [VIRTIO_BALLOON_S_MEMFREE] = "stat-free-memory", + [VIRTIO_BALLOON_S_MEMTOT] = "stat-total-memory", + [VIRTIO_BALLOON_S_AVAIL] = "stat-available-memory", + [VIRTIO_BALLOON_S_CACHES] = "stat-disk-caches", + [VIRTIO_BALLOON_S_HTLB_PGALLOC] = "stat-htlb-pgalloc", + [VIRTIO_BALLOON_S_HTLB_PGFAIL] = "stat-htlb-pgfail", + [VIRTIO_BALLOON_S_NR] = NULL +}; + +/* + * reset_stats - Mark all items in the stats array as unset + * + * This function needs to be called at device initialization and before + * updating to a set of newly-generated stats. This will ensure that no + * stale values stick around in case the guest reports a subset of the supported + * statistics. + */ +static inline void reset_stats(VirtIOBalloon *dev) +{ + int i; + for (i = 0; i < VIRTIO_BALLOON_S_NR; dev->stats[i++] = -1); +} + +static bool balloon_stats_supported(const VirtIOBalloon *s) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(s); + return virtio_vdev_has_feature(vdev, VIRTIO_BALLOON_F_STATS_VQ); +} + +static bool balloon_stats_enabled(const VirtIOBalloon *s) +{ + return s->stats_poll_interval > 0; +} + +static void balloon_stats_destroy_timer(VirtIOBalloon *s) +{ + if (balloon_stats_enabled(s)) { + timer_free(s->stats_timer); + s->stats_timer = NULL; + s->stats_poll_interval = 0; + } +} + +static void balloon_stats_change_timer(VirtIOBalloon *s, int64_t secs) +{ + timer_mod(s->stats_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + secs * 1000); +} + +static void balloon_stats_poll_cb(void *opaque) +{ + VirtIOBalloon *s = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + if (s->stats_vq_elem == NULL || !balloon_stats_supported(s)) { + /* re-schedule */ + balloon_stats_change_timer(s, s->stats_poll_interval); + return; + } + + virtqueue_push(s->svq, s->stats_vq_elem, 0); + virtio_notify(vdev, s->svq); + g_free(s->stats_vq_elem); + s->stats_vq_elem = NULL; +} + +static void balloon_stats_get_all(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Error *err = NULL; + VirtIOBalloon *s = opaque; + int i; + + if (!visit_start_struct(v, name, NULL, 0, &err)) { + goto out; + } + if (!visit_type_int(v, "last-update", &s->stats_last_update, &err)) { + goto out_end; + } + + if (!visit_start_struct(v, "stats", NULL, 0, &err)) { + goto out_end; + } + for (i = 0; i < VIRTIO_BALLOON_S_NR; i++) { + if (!visit_type_uint64(v, balloon_stat_names[i], &s->stats[i], &err)) { + goto out_nested; + } + } + visit_check_struct(v, &err); +out_nested: + visit_end_struct(v, NULL); + + if (!err) { + visit_check_struct(v, &err); + } +out_end: + visit_end_struct(v, NULL); +out: + error_propagate(errp, err); +} + +static void balloon_stats_get_poll_interval(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + VirtIOBalloon *s = opaque; + visit_type_int(v, name, &s->stats_poll_interval, errp); +} + +static void balloon_stats_set_poll_interval(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + VirtIOBalloon *s = opaque; + int64_t value; + + if (!visit_type_int(v, name, &value, errp)) { + return; + } + + if (value < 0) { + error_setg(errp, "timer value must be greater than zero"); + return; + } + + if (value > UINT32_MAX) { + error_setg(errp, "timer value is too big"); + return; + } + + if (value == s->stats_poll_interval) { + return; + } + + if (value == 0) { + /* timer=0 disables the timer */ + balloon_stats_destroy_timer(s); + return; + } + + if (balloon_stats_enabled(s)) { + /* timer interval change */ + s->stats_poll_interval = value; + balloon_stats_change_timer(s, value); + return; + } + + /* create a new timer */ + g_assert(s->stats_timer == NULL); + s->stats_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, balloon_stats_poll_cb, s); + s->stats_poll_interval = value; + balloon_stats_change_timer(s, 0); +} + +static void virtio_balloon_handle_report(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOBalloon *dev = VIRTIO_BALLOON(vdev); + VirtQueueElement *elem; + + while ((elem = virtqueue_pop(vq, sizeof(VirtQueueElement)))) { + unsigned int i; + + /* + * When we discard the page it has the effect of removing the page + * from the hypervisor itself and causing it to be zeroed when it + * is returned to us. So we must not discard the page if it is + * accessible by another device or process, or if the guest is + * expecting it to retain a non-zero value. + */ + if (virtio_balloon_inhibited() || dev->poison_val) { + goto skip_element; + } + + for (i = 0; i < elem->in_num; i++) { + void *addr = elem->in_sg[i].iov_base; + size_t size = elem->in_sg[i].iov_len; + ram_addr_t ram_offset; + RAMBlock *rb; + + /* + * There is no need to check the memory section to see if + * it is ram/readonly/romd like there is for handle_output + * below. If the region is not meant to be written to then + * address_space_map will have allocated a bounce buffer + * and it will be freed in address_space_unmap and trigger + * and unassigned_mem_write before failing to copy over the + * buffer. If more than one bad descriptor is provided it + * will return NULL after the first bounce buffer and fail + * to map any resources. + */ + rb = qemu_ram_block_from_host(addr, false, &ram_offset); + if (!rb) { + trace_virtio_balloon_bad_addr(elem->in_addr[i]); + continue; + } + + /* + * For now we will simply ignore unaligned memory regions, or + * regions that overrun the end of the RAMBlock. + */ + if (!QEMU_IS_ALIGNED(ram_offset | size, qemu_ram_pagesize(rb)) || + (ram_offset + size) > qemu_ram_get_used_length(rb)) { + continue; + } + + ram_block_discard_range(rb, ram_offset, size); + } + +skip_element: + virtqueue_push(vq, elem, 0); + virtio_notify(vdev, vq); + g_free(elem); + } +} + +static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOBalloon *s = VIRTIO_BALLOON(vdev); + VirtQueueElement *elem; + MemoryRegionSection section; + + for (;;) { + PartiallyBalloonedPage pbp = {}; + size_t offset = 0; + uint32_t pfn; + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + + while (iov_to_buf(elem->out_sg, elem->out_num, offset, &pfn, 4) == 4) { + unsigned int p = virtio_ldl_p(vdev, &pfn); + hwaddr pa; + + pa = (hwaddr) p << VIRTIO_BALLOON_PFN_SHIFT; + offset += 4; + + section = memory_region_find(get_system_memory(), pa, + BALLOON_PAGE_SIZE); + if (!section.mr) { + trace_virtio_balloon_bad_addr(pa); + continue; + } + if (!memory_region_is_ram(section.mr) || + memory_region_is_rom(section.mr) || + memory_region_is_romd(section.mr)) { + trace_virtio_balloon_bad_addr(pa); + memory_region_unref(section.mr); + continue; + } + + trace_virtio_balloon_handle_output(memory_region_name(section.mr), + pa); + if (!virtio_balloon_inhibited()) { + if (vq == s->ivq) { + balloon_inflate_page(s, section.mr, + section.offset_within_region, &pbp); + } else if (vq == s->dvq) { + balloon_deflate_page(s, section.mr, section.offset_within_region); + } else { + g_assert_not_reached(); + } + } + memory_region_unref(section.mr); + } + + virtqueue_push(vq, elem, 0); + virtio_notify(vdev, vq); + g_free(elem); + virtio_balloon_pbp_free(&pbp); + } +} + +static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOBalloon *s = VIRTIO_BALLOON(vdev); + VirtQueueElement *elem; + VirtIOBalloonStat stat; + size_t offset = 0; + qemu_timeval tv; + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + goto out; + } + + if (s->stats_vq_elem != NULL) { + /* This should never happen if the driver follows the spec. */ + virtqueue_push(vq, s->stats_vq_elem, 0); + virtio_notify(vdev, vq); + g_free(s->stats_vq_elem); + } + + s->stats_vq_elem = elem; + + /* Initialize the stats to get rid of any stale values. This is only + * needed to handle the case where a guest supports fewer stats than it + * used to (ie. it has booted into an old kernel). + */ + reset_stats(s); + + while (iov_to_buf(elem->out_sg, elem->out_num, offset, &stat, sizeof(stat)) + == sizeof(stat)) { + uint16_t tag = virtio_tswap16(vdev, stat.tag); + uint64_t val = virtio_tswap64(vdev, stat.val); + + offset += sizeof(stat); + if (tag < VIRTIO_BALLOON_S_NR) + s->stats[tag] = val; + } + s->stats_vq_offset = offset; + + if (qemu_gettimeofday(&tv) < 0) { + warn_report("%s: failed to get time of day", __func__); + goto out; + } + + s->stats_last_update = tv.tv_sec; + +out: + if (balloon_stats_enabled(s)) { + balloon_stats_change_timer(s, s->stats_poll_interval); + } +} + +static void virtio_balloon_handle_free_page_vq(VirtIODevice *vdev, + VirtQueue *vq) +{ + VirtIOBalloon *s = VIRTIO_BALLOON(vdev); + qemu_bh_schedule(s->free_page_bh); +} + +static bool get_free_page_hints(VirtIOBalloon *dev) +{ + VirtQueueElement *elem; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtQueue *vq = dev->free_page_vq; + bool ret = true; + int i; + + while (dev->block_iothread) { + qemu_cond_wait(&dev->free_page_cond, &dev->free_page_lock); + } + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + return false; + } + + if (elem->out_num) { + uint32_t id; + size_t size = iov_to_buf(elem->out_sg, elem->out_num, 0, + &id, sizeof(id)); + + virtio_tswap32s(vdev, &id); + if (unlikely(size != sizeof(id))) { + virtio_error(vdev, "received an incorrect cmd id"); + ret = false; + goto out; + } + if (dev->free_page_hint_status == FREE_PAGE_HINT_S_REQUESTED && + id == dev->free_page_hint_cmd_id) { + dev->free_page_hint_status = FREE_PAGE_HINT_S_START; + } else if (dev->free_page_hint_status == FREE_PAGE_HINT_S_START) { + /* + * Stop the optimization only when it has started. This + * avoids a stale stop sign for the previous command. + */ + dev->free_page_hint_status = FREE_PAGE_HINT_S_STOP; + } + } + + if (elem->in_num && dev->free_page_hint_status == FREE_PAGE_HINT_S_START) { + for (i = 0; i < elem->in_num; i++) { + qemu_guest_free_page_hint(elem->in_sg[i].iov_base, + elem->in_sg[i].iov_len); + } + } + +out: + virtqueue_push(vq, elem, 0); + g_free(elem); + return ret; +} + +static void virtio_ballloon_get_free_page_hints(void *opaque) +{ + VirtIOBalloon *dev = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtQueue *vq = dev->free_page_vq; + bool continue_to_get_hints; + + do { + qemu_mutex_lock(&dev->free_page_lock); + virtio_queue_set_notification(vq, 0); + continue_to_get_hints = get_free_page_hints(dev); + qemu_mutex_unlock(&dev->free_page_lock); + virtio_notify(vdev, vq); + /* + * Start to poll the vq once the hinting started. Otherwise, continue + * only when there are entries on the vq, which need to be given back. + */ + } while (continue_to_get_hints || + dev->free_page_hint_status == FREE_PAGE_HINT_S_START); + virtio_queue_set_notification(vq, 1); +} + +static bool virtio_balloon_free_page_support(void *opaque) +{ + VirtIOBalloon *s = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + return virtio_vdev_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT); +} + +static void virtio_balloon_free_page_start(VirtIOBalloon *s) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + qemu_mutex_lock(&s->free_page_lock); + + if (s->free_page_hint_cmd_id == UINT_MAX) { + s->free_page_hint_cmd_id = VIRTIO_BALLOON_FREE_PAGE_HINT_CMD_ID_MIN; + } else { + s->free_page_hint_cmd_id++; + } + + s->free_page_hint_status = FREE_PAGE_HINT_S_REQUESTED; + qemu_mutex_unlock(&s->free_page_lock); + + virtio_notify_config(vdev); +} + +static void virtio_balloon_free_page_stop(VirtIOBalloon *s) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + if (s->free_page_hint_status != FREE_PAGE_HINT_S_STOP) { + /* + * The lock also guarantees us that the + * virtio_ballloon_get_free_page_hints exits after the + * free_page_hint_status is set to S_STOP. + */ + qemu_mutex_lock(&s->free_page_lock); + /* + * The guest isn't done hinting, so send a notification + * to the guest to actively stop the hinting. + */ + s->free_page_hint_status = FREE_PAGE_HINT_S_STOP; + qemu_mutex_unlock(&s->free_page_lock); + virtio_notify_config(vdev); + } +} + +static void virtio_balloon_free_page_done(VirtIOBalloon *s) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + if (s->free_page_hint_status != FREE_PAGE_HINT_S_DONE) { + /* See virtio_balloon_free_page_stop() */ + qemu_mutex_lock(&s->free_page_lock); + s->free_page_hint_status = FREE_PAGE_HINT_S_DONE; + qemu_mutex_unlock(&s->free_page_lock); + virtio_notify_config(vdev); + } +} + +static int +virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data) +{ + VirtIOBalloon *dev = container_of(n, VirtIOBalloon, free_page_hint_notify); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + PrecopyNotifyData *pnd = data; + + if (!virtio_balloon_free_page_support(dev)) { + /* + * This is an optimization provided to migration, so just return 0 to + * have the normal migration process not affected when this feature is + * not supported. + */ + return 0; + } + + /* + * Pages hinted via qemu_guest_free_page_hint() are cleared from the dirty + * bitmap and will not get migrated, especially also not when the postcopy + * destination starts using them and requests migration from the source; the + * faulting thread will stall until postcopy migration finishes and + * all threads are woken up. Let's not start free page hinting if postcopy + * is possible. + */ + if (migrate_postcopy_ram()) { + return 0; + } + + switch (pnd->reason) { + case PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC: + virtio_balloon_free_page_stop(dev); + break; + case PRECOPY_NOTIFY_AFTER_BITMAP_SYNC: + if (vdev->vm_running) { + virtio_balloon_free_page_start(dev); + break; + } + /* + * Set S_DONE before migrating the vmstate, so the guest will reuse + * all hinted pages once running on the destination. Fall through. + */ + case PRECOPY_NOTIFY_CLEANUP: + /* + * Especially, if something goes wrong during precopy or if migration + * is canceled, we have to properly communicate S_DONE to the VM. + */ + virtio_balloon_free_page_done(dev); + break; + case PRECOPY_NOTIFY_SETUP: + case PRECOPY_NOTIFY_COMPLETE: + break; + default: + virtio_error(vdev, "%s: %d reason unknown", __func__, pnd->reason); + } + + return 0; +} + +static size_t virtio_balloon_config_size(VirtIOBalloon *s) +{ + uint64_t features = s->host_features; + + if (s->qemu_4_0_config_size) { + return sizeof(struct virtio_balloon_config); + } + if (virtio_has_feature(features, VIRTIO_BALLOON_F_PAGE_POISON)) { + return sizeof(struct virtio_balloon_config); + } + if (virtio_has_feature(features, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + return offsetof(struct virtio_balloon_config, poison_val); + } + return offsetof(struct virtio_balloon_config, free_page_hint_cmd_id); +} + +static void virtio_balloon_get_config(VirtIODevice *vdev, uint8_t *config_data) +{ + VirtIOBalloon *dev = VIRTIO_BALLOON(vdev); + struct virtio_balloon_config config = {}; + + config.num_pages = cpu_to_le32(dev->num_pages); + config.actual = cpu_to_le32(dev->actual); + config.poison_val = cpu_to_le32(dev->poison_val); + + if (dev->free_page_hint_status == FREE_PAGE_HINT_S_REQUESTED) { + config.free_page_hint_cmd_id = + cpu_to_le32(dev->free_page_hint_cmd_id); + } else if (dev->free_page_hint_status == FREE_PAGE_HINT_S_STOP) { + config.free_page_hint_cmd_id = + cpu_to_le32(VIRTIO_BALLOON_CMD_ID_STOP); + } else if (dev->free_page_hint_status == FREE_PAGE_HINT_S_DONE) { + config.free_page_hint_cmd_id = + cpu_to_le32(VIRTIO_BALLOON_CMD_ID_DONE); + } + + trace_virtio_balloon_get_config(config.num_pages, config.actual); + memcpy(config_data, &config, virtio_balloon_config_size(dev)); +} + +static int build_dimm_list(Object *obj, void *opaque) +{ + GSList **list = opaque; + + if (object_dynamic_cast(obj, TYPE_PC_DIMM)) { + DeviceState *dev = DEVICE(obj); + if (dev->realized) { /* only realized DIMMs matter */ + *list = g_slist_prepend(*list, dev); + } + } + + object_child_foreach(obj, build_dimm_list, opaque); + return 0; +} + +static ram_addr_t get_current_ram_size(void) +{ + GSList *list = NULL, *item; + ram_addr_t size = current_machine->ram_size; + + build_dimm_list(qdev_get_machine(), &list); + for (item = list; item; item = g_slist_next(item)) { + Object *obj = OBJECT(item->data); + if (!strcmp(object_get_typename(obj), TYPE_PC_DIMM)) { + size += object_property_get_int(obj, PC_DIMM_SIZE_PROP, + &error_abort); + } + } + g_slist_free(list); + + return size; +} + +static bool virtio_balloon_page_poison_support(void *opaque) +{ + VirtIOBalloon *s = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + return virtio_vdev_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON); +} + +static void virtio_balloon_set_config(VirtIODevice *vdev, + const uint8_t *config_data) +{ + VirtIOBalloon *dev = VIRTIO_BALLOON(vdev); + struct virtio_balloon_config config; + uint32_t oldactual = dev->actual; + ram_addr_t vm_ram_size = get_current_ram_size(); + + memcpy(&config, config_data, virtio_balloon_config_size(dev)); + dev->actual = le32_to_cpu(config.actual); + if (dev->actual != oldactual) { + qapi_event_send_balloon_change(vm_ram_size - + ((ram_addr_t) dev->actual << VIRTIO_BALLOON_PFN_SHIFT)); + } + dev->poison_val = 0; + if (virtio_balloon_page_poison_support(dev)) { + dev->poison_val = le32_to_cpu(config.poison_val); + } + trace_virtio_balloon_set_config(dev->actual, oldactual); +} + +static uint64_t virtio_balloon_get_features(VirtIODevice *vdev, uint64_t f, + Error **errp) +{ + VirtIOBalloon *dev = VIRTIO_BALLOON(vdev); + f |= dev->host_features; + virtio_add_feature(&f, VIRTIO_BALLOON_F_STATS_VQ); + + return f; +} + +static void virtio_balloon_stat(void *opaque, BalloonInfo *info) +{ + VirtIOBalloon *dev = opaque; + info->actual = get_current_ram_size() - ((uint64_t) dev->actual << + VIRTIO_BALLOON_PFN_SHIFT); +} + +static void virtio_balloon_to_target(void *opaque, ram_addr_t target) +{ + VirtIOBalloon *dev = VIRTIO_BALLOON(opaque); + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + ram_addr_t vm_ram_size = get_current_ram_size(); + + if (target > vm_ram_size) { + target = vm_ram_size; + } + if (target) { + dev->num_pages = (vm_ram_size - target) >> VIRTIO_BALLOON_PFN_SHIFT; + virtio_notify_config(vdev); + } + trace_virtio_balloon_to_target(target, dev->num_pages); +} + +static int virtio_balloon_post_load_device(void *opaque, int version_id) +{ + VirtIOBalloon *s = VIRTIO_BALLOON(opaque); + + if (balloon_stats_enabled(s)) { + balloon_stats_change_timer(s, s->stats_poll_interval); + } + return 0; +} + +static const VMStateDescription vmstate_virtio_balloon_free_page_hint = { + .name = "virtio-balloon-device/free-page-report", + .version_id = 1, + .minimum_version_id = 1, + .needed = virtio_balloon_free_page_support, + .fields = (VMStateField[]) { + VMSTATE_UINT32(free_page_hint_cmd_id, VirtIOBalloon), + VMSTATE_UINT32(free_page_hint_status, VirtIOBalloon), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_balloon_page_poison = { + .name = "virtio-balloon-device/page-poison", + .version_id = 1, + .minimum_version_id = 1, + .needed = virtio_balloon_page_poison_support, + .fields = (VMStateField[]) { + VMSTATE_UINT32(poison_val, VirtIOBalloon), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_balloon_device = { + .name = "virtio-balloon-device", + .version_id = 1, + .minimum_version_id = 1, + .post_load = virtio_balloon_post_load_device, + .fields = (VMStateField[]) { + VMSTATE_UINT32(num_pages, VirtIOBalloon), + VMSTATE_UINT32(actual, VirtIOBalloon), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_virtio_balloon_free_page_hint, + &vmstate_virtio_balloon_page_poison, + NULL + } +}; + +static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOBalloon *s = VIRTIO_BALLOON(dev); + int ret; + + virtio_init(vdev, "virtio-balloon", VIRTIO_ID_BALLOON, + virtio_balloon_config_size(s)); + + ret = qemu_add_balloon_handler(virtio_balloon_to_target, + virtio_balloon_stat, s); + + if (ret < 0) { + error_setg(errp, "Only one balloon device is supported"); + virtio_cleanup(vdev); + return; + } + + if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_FREE_PAGE_HINT) && + !s->iothread) { + error_setg(errp, "'free-page-hint' requires 'iothread' to be set"); + virtio_cleanup(vdev); + return; + } + + s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); + s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); + s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats); + + if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + s->free_page_vq = virtio_add_queue(vdev, VIRTQUEUE_MAX_SIZE, + virtio_balloon_handle_free_page_vq); + precopy_add_notifier(&s->free_page_hint_notify); + + object_ref(OBJECT(s->iothread)); + s->free_page_bh = aio_bh_new(iothread_get_aio_context(s->iothread), + virtio_ballloon_get_free_page_hints, s); + } + + if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_REPORTING)) { + s->reporting_vq = virtio_add_queue(vdev, 32, + virtio_balloon_handle_report); + } + + reset_stats(s); +} + +static void virtio_balloon_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOBalloon *s = VIRTIO_BALLOON(dev); + + if (s->free_page_bh) { + qemu_bh_delete(s->free_page_bh); + object_unref(OBJECT(s->iothread)); + virtio_balloon_free_page_stop(s); + precopy_remove_notifier(&s->free_page_hint_notify); + } + balloon_stats_destroy_timer(s); + qemu_remove_balloon_handler(s); + + virtio_delete_queue(s->ivq); + virtio_delete_queue(s->dvq); + virtio_delete_queue(s->svq); + if (s->free_page_vq) { + virtio_delete_queue(s->free_page_vq); + } + if (s->reporting_vq) { + virtio_delete_queue(s->reporting_vq); + } + virtio_cleanup(vdev); +} + +static void virtio_balloon_device_reset(VirtIODevice *vdev) +{ + VirtIOBalloon *s = VIRTIO_BALLOON(vdev); + + if (virtio_balloon_free_page_support(s)) { + virtio_balloon_free_page_stop(s); + } + + if (s->stats_vq_elem != NULL) { + virtqueue_unpop(s->svq, s->stats_vq_elem, 0); + g_free(s->stats_vq_elem); + s->stats_vq_elem = NULL; + } + + s->poison_val = 0; +} + +static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status) +{ + VirtIOBalloon *s = VIRTIO_BALLOON(vdev); + + if (!s->stats_vq_elem && vdev->vm_running && + (status & VIRTIO_CONFIG_S_DRIVER_OK) && virtqueue_rewind(s->svq, 1)) { + /* poll stats queue for the element we have discarded when the VM + * was stopped */ + virtio_balloon_receive_stats(vdev, s->svq); + } + + if (virtio_balloon_free_page_support(s)) { + /* + * The VM is woken up and the iothread was blocked, so signal it to + * continue. + */ + if (vdev->vm_running && s->block_iothread) { + qemu_mutex_lock(&s->free_page_lock); + s->block_iothread = false; + qemu_cond_signal(&s->free_page_cond); + qemu_mutex_unlock(&s->free_page_lock); + } + + /* The VM is stopped, block the iothread. */ + if (!vdev->vm_running) { + qemu_mutex_lock(&s->free_page_lock); + s->block_iothread = true; + qemu_mutex_unlock(&s->free_page_lock); + } + } +} + +static void virtio_balloon_instance_init(Object *obj) +{ + VirtIOBalloon *s = VIRTIO_BALLOON(obj); + + qemu_mutex_init(&s->free_page_lock); + qemu_cond_init(&s->free_page_cond); + s->free_page_hint_cmd_id = VIRTIO_BALLOON_FREE_PAGE_HINT_CMD_ID_MIN; + s->free_page_hint_notify.notify = virtio_balloon_free_page_hint_notify; + + object_property_add(obj, "guest-stats", "guest statistics", + balloon_stats_get_all, NULL, NULL, s); + + object_property_add(obj, "guest-stats-polling-interval", "int", + balloon_stats_get_poll_interval, + balloon_stats_set_poll_interval, + NULL, s); +} + +static const VMStateDescription vmstate_virtio_balloon = { + .name = "virtio-balloon", + .minimum_version_id = 1, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static Property virtio_balloon_properties[] = { + DEFINE_PROP_BIT("deflate-on-oom", VirtIOBalloon, host_features, + VIRTIO_BALLOON_F_DEFLATE_ON_OOM, false), + DEFINE_PROP_BIT("free-page-hint", VirtIOBalloon, host_features, + VIRTIO_BALLOON_F_FREE_PAGE_HINT, false), + DEFINE_PROP_BIT("page-poison", VirtIOBalloon, host_features, + VIRTIO_BALLOON_F_PAGE_POISON, true), + DEFINE_PROP_BIT("free-page-reporting", VirtIOBalloon, host_features, + VIRTIO_BALLOON_F_REPORTING, false), + /* QEMU 4.0 accidentally changed the config size even when free-page-hint + * is disabled, resulting in QEMU 3.1 migration incompatibility. This + * property retains this quirk for QEMU 4.1 machine types. + */ + DEFINE_PROP_BOOL("qemu-4-0-config-size", VirtIOBalloon, + qemu_4_0_config_size, false), + DEFINE_PROP_LINK("iothread", VirtIOBalloon, iothread, TYPE_IOTHREAD, + IOThread *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_balloon_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_balloon_properties); + dc->vmsd = &vmstate_virtio_balloon; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + vdc->realize = virtio_balloon_device_realize; + vdc->unrealize = virtio_balloon_device_unrealize; + vdc->reset = virtio_balloon_device_reset; + vdc->get_config = virtio_balloon_get_config; + vdc->set_config = virtio_balloon_set_config; + vdc->get_features = virtio_balloon_get_features; + vdc->set_status = virtio_balloon_set_status; + vdc->vmsd = &vmstate_virtio_balloon_device; +} + +static const TypeInfo virtio_balloon_info = { + .name = TYPE_VIRTIO_BALLOON, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOBalloon), + .instance_init = virtio_balloon_instance_init, + .class_init = virtio_balloon_class_init, +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_balloon_info); +} + +type_init(virtio_register_types) diff --git a/hw/virtio/virtio-blk-pci.c b/hw/virtio/virtio-blk-pci.c new file mode 100644 index 000000000..9d5795810 --- /dev/null +++ b/hw/virtio/virtio-blk-pci.c @@ -0,0 +1,107 @@ +/* + * Virtio blk PCI Bindings + * + * Copyright IBM, Corp. 2007 + * Copyright (c) 2009 CodeSourcery + * + * Authors: + * Anthony Liguori + * Paul Brook + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" + +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-blk.h" +#include "virtio-pci.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qom/object.h" + +typedef struct VirtIOBlkPCI VirtIOBlkPCI; + +/* + * virtio-blk-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_BLK_PCI "virtio-blk-pci-base" +DECLARE_INSTANCE_CHECKER(VirtIOBlkPCI, VIRTIO_BLK_PCI, + TYPE_VIRTIO_BLK_PCI) + +struct VirtIOBlkPCI { + VirtIOPCIProxy parent_obj; + VirtIOBlock vdev; +}; + +static Property virtio_blk_pci_properties[] = { + DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + VirtIOBlkConf *conf = &dev->vdev.conf; + + if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) { + conf->num_queues = virtio_pci_optimal_num_queues(0); + } + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = conf->num_queues + 1; + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void virtio_blk_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + device_class_set_props(dc, virtio_blk_pci_properties); + k->realize = virtio_blk_pci_realize; + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; +} + +static void virtio_blk_pci_instance_init(Object *obj) +{ + VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_BLK); + object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), + "bootindex"); +} + +static const VirtioPCIDeviceTypeInfo virtio_blk_pci_info = { + .base_name = TYPE_VIRTIO_BLK_PCI, + .generic_name = "virtio-blk-pci", + .transitional_name = "virtio-blk-pci-transitional", + .non_transitional_name = "virtio-blk-pci-non-transitional", + .instance_size = sizeof(VirtIOBlkPCI), + .instance_init = virtio_blk_pci_instance_init, + .class_init = virtio_blk_pci_class_init, +}; + +static void virtio_blk_pci_register(void) +{ + virtio_pci_types_register(&virtio_blk_pci_info); +} + +type_init(virtio_blk_pci_register) diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c new file mode 100644 index 000000000..d23db98c5 --- /dev/null +++ b/hw/virtio/virtio-bus.c @@ -0,0 +1,363 @@ +/* + * VirtioBus + * + * Copyright (C) 2012 : GreenSocs Ltd + * http://www.greensocs.com/ , email: info@greensocs.com + * + * Developed by : + * Frederic Konrad + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio.h" +#include "exec/address-spaces.h" + +/* #define DEBUG_VIRTIO_BUS */ + +#ifdef DEBUG_VIRTIO_BUS +#define DPRINTF(fmt, ...) \ +do { printf("virtio_bus: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) do { } while (0) +#endif + +/* A VirtIODevice is being plugged */ +void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) +{ + DeviceState *qdev = DEVICE(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(qdev)); + VirtioBusState *bus = VIRTIO_BUS(qbus); + VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + bool has_iommu = virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + Error *local_err = NULL; + + DPRINTF("%s: plug device.\n", qbus->name); + + if (klass->pre_plugged != NULL) { + klass->pre_plugged(qbus->parent, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } + + /* Get the features of the plugged device. */ + assert(vdc->get_features != NULL); + vdev->host_features = vdc->get_features(vdev, vdev->host_features, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (has_iommu && !virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { + error_setg(errp, "iommu_platform=true is not supported by the device"); + return; + } + + if (klass->device_plugged != NULL) { + klass->device_plugged(qbus->parent, &local_err); + } + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (klass->get_dma_as != NULL && has_iommu) { + virtio_add_feature(&vdev->host_features, VIRTIO_F_IOMMU_PLATFORM); + vdev->dma_as = klass->get_dma_as(qbus->parent); + } else { + vdev->dma_as = &address_space_memory; + } +} + +/* Reset the virtio_bus */ +void virtio_bus_reset(VirtioBusState *bus) +{ + VirtIODevice *vdev = virtio_bus_get_device(bus); + + DPRINTF("%s: reset device.\n", BUS(bus)->name); + if (vdev != NULL) { + virtio_reset(vdev); + } +} + +/* A VirtIODevice is being unplugged */ +void virtio_bus_device_unplugged(VirtIODevice *vdev) +{ + DeviceState *qdev = DEVICE(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(qdev)); + VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(qbus); + + DPRINTF("%s: remove device.\n", qbus->name); + + if (vdev != NULL) { + if (klass->device_unplugged != NULL) { + klass->device_unplugged(qbus->parent); + } + } +} + +/* Get the device id of the plugged device. */ +uint16_t virtio_bus_get_vdev_id(VirtioBusState *bus) +{ + VirtIODevice *vdev = virtio_bus_get_device(bus); + assert(vdev != NULL); + return vdev->device_id; +} + +/* Get the config_len field of the plugged device. */ +size_t virtio_bus_get_vdev_config_len(VirtioBusState *bus) +{ + VirtIODevice *vdev = virtio_bus_get_device(bus); + assert(vdev != NULL); + return vdev->config_len; +} + +/* Get bad features of the plugged device. */ +uint32_t virtio_bus_get_vdev_bad_features(VirtioBusState *bus) +{ + VirtIODevice *vdev = virtio_bus_get_device(bus); + VirtioDeviceClass *k; + + assert(vdev != NULL); + k = VIRTIO_DEVICE_GET_CLASS(vdev); + if (k->bad_features != NULL) { + return k->bad_features(vdev); + } else { + return 0; + } +} + +/* Get config of the plugged device. */ +void virtio_bus_get_vdev_config(VirtioBusState *bus, uint8_t *config) +{ + VirtIODevice *vdev = virtio_bus_get_device(bus); + VirtioDeviceClass *k; + + assert(vdev != NULL); + k = VIRTIO_DEVICE_GET_CLASS(vdev); + if (k->get_config != NULL) { + k->get_config(vdev, config); + } +} + +/* Set config of the plugged device. */ +void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config) +{ + VirtIODevice *vdev = virtio_bus_get_device(bus); + VirtioDeviceClass *k; + + assert(vdev != NULL); + k = VIRTIO_DEVICE_GET_CLASS(vdev); + if (k->set_config != NULL) { + k->set_config(vdev, config); + } +} + +/* On success, ioeventfd ownership belongs to the caller. */ +int virtio_bus_grab_ioeventfd(VirtioBusState *bus) +{ + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); + + /* vhost can be used even if ioeventfd=off in the proxy device, + * so do not check k->ioeventfd_enabled. + */ + if (!k->ioeventfd_assign) { + return -ENOSYS; + } + + if (bus->ioeventfd_grabbed == 0 && bus->ioeventfd_started) { + virtio_bus_stop_ioeventfd(bus); + /* Remember that we need to restart ioeventfd + * when ioeventfd_grabbed becomes zero. + */ + bus->ioeventfd_started = true; + } + bus->ioeventfd_grabbed++; + return 0; +} + +void virtio_bus_release_ioeventfd(VirtioBusState *bus) +{ + assert(bus->ioeventfd_grabbed != 0); + if (--bus->ioeventfd_grabbed == 0 && bus->ioeventfd_started) { + /* Force virtio_bus_start_ioeventfd to act. */ + bus->ioeventfd_started = false; + virtio_bus_start_ioeventfd(bus); + } +} + +int virtio_bus_start_ioeventfd(VirtioBusState *bus) +{ + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); + DeviceState *proxy = DEVICE(BUS(bus)->parent); + VirtIODevice *vdev = virtio_bus_get_device(bus); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + int r; + + if (!k->ioeventfd_assign || !k->ioeventfd_enabled(proxy)) { + return -ENOSYS; + } + if (bus->ioeventfd_started) { + return 0; + } + + /* Only set our notifier if we have ownership. */ + if (!bus->ioeventfd_grabbed) { + r = vdc->start_ioeventfd(vdev); + if (r < 0) { + error_report("%s: failed. Fallback to userspace (slower).", __func__); + return r; + } + } + bus->ioeventfd_started = true; + return 0; +} + +void virtio_bus_stop_ioeventfd(VirtioBusState *bus) +{ + VirtIODevice *vdev; + VirtioDeviceClass *vdc; + + if (!bus->ioeventfd_started) { + return; + } + + /* Only remove our notifier if we have ownership. */ + if (!bus->ioeventfd_grabbed) { + vdev = virtio_bus_get_device(bus); + vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + vdc->stop_ioeventfd(vdev); + } + bus->ioeventfd_started = false; +} + +bool virtio_bus_ioeventfd_enabled(VirtioBusState *bus) +{ + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); + DeviceState *proxy = DEVICE(BUS(bus)->parent); + + return k->ioeventfd_assign && k->ioeventfd_enabled(proxy); +} + +/* + * This function switches ioeventfd on/off in the device. + * The caller must set or clear the handlers for the EventNotifier. + */ +int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign) +{ + VirtIODevice *vdev = virtio_bus_get_device(bus); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus); + DeviceState *proxy = DEVICE(BUS(bus)->parent); + VirtQueue *vq = virtio_get_queue(vdev, n); + EventNotifier *notifier = virtio_queue_get_host_notifier(vq); + int r = 0; + + if (!k->ioeventfd_assign) { + return -ENOSYS; + } + + if (assign) { + r = event_notifier_init(notifier, 1); + if (r < 0) { + error_report("%s: unable to init event notifier: %s (%d)", + __func__, strerror(-r), r); + return r; + } + r = k->ioeventfd_assign(proxy, notifier, n, true); + if (r < 0) { + error_report("%s: unable to assign ioeventfd: %d", __func__, r); + virtio_bus_cleanup_host_notifier(bus, n); + } + } else { + k->ioeventfd_assign(proxy, notifier, n, false); + } + + if (r == 0) { + virtio_queue_set_host_notifier_enabled(vq, assign); + } + + return r; +} + +void virtio_bus_cleanup_host_notifier(VirtioBusState *bus, int n) +{ + VirtIODevice *vdev = virtio_bus_get_device(bus); + VirtQueue *vq = virtio_get_queue(vdev, n); + EventNotifier *notifier = virtio_queue_get_host_notifier(vq); + + /* Test and clear notifier after disabling event, + * in case poll callback didn't have time to run. + */ + virtio_queue_host_notifier_read(notifier); + event_notifier_cleanup(notifier); +} + +static char *virtio_bus_get_dev_path(DeviceState *dev) +{ + BusState *bus = qdev_get_parent_bus(dev); + DeviceState *proxy = DEVICE(bus->parent); + return qdev_get_dev_path(proxy); +} + +static char *virtio_bus_get_fw_dev_path(DeviceState *dev) +{ + return NULL; +} + +bool virtio_bus_device_iommu_enabled(VirtIODevice *vdev) +{ + DeviceState *qdev = DEVICE(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(qdev)); + VirtioBusState *bus = VIRTIO_BUS(qbus); + VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus); + + if (!klass->iommu_enabled) { + return false; + } + + return klass->iommu_enabled(qbus->parent); +} + +static void virtio_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *bus_class = BUS_CLASS(klass); + bus_class->get_dev_path = virtio_bus_get_dev_path; + bus_class->get_fw_dev_path = virtio_bus_get_fw_dev_path; +} + +static const TypeInfo virtio_bus_info = { + .name = TYPE_VIRTIO_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(VirtioBusState), + .abstract = true, + .class_size = sizeof(VirtioBusClass), + .class_init = virtio_bus_class_init +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_bus_info); +} + +type_init(virtio_register_types) diff --git a/hw/virtio/virtio-crypto-pci.c b/hw/virtio/virtio-crypto-pci.c new file mode 100644 index 000000000..0783dc2f7 --- /dev/null +++ b/hw/virtio/virtio-crypto-pci.c @@ -0,0 +1,94 @@ +/* + * Virtio crypto device + * + * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD. + * + * Authors: + * Gonglei + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-pci.h" +#include "hw/virtio/virtio-crypto.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qom/object.h" + +typedef struct VirtIOCryptoPCI VirtIOCryptoPCI; + +/* + * virtio-crypto-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_CRYPTO_PCI "virtio-crypto-pci" +DECLARE_INSTANCE_CHECKER(VirtIOCryptoPCI, VIRTIO_CRYPTO_PCI, + TYPE_VIRTIO_CRYPTO_PCI) + +struct VirtIOCryptoPCI { + VirtIOPCIProxy parent_obj; + VirtIOCrypto vdev; +}; + +static Property virtio_crypto_pci_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_crypto_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOCryptoPCI *vcrypto = VIRTIO_CRYPTO_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&vcrypto->vdev); + + if (vcrypto->vdev.conf.cryptodev == NULL) { + error_setg(errp, "'cryptodev' parameter expects a valid object"); + return; + } + + virtio_pci_force_virtio_1(vpci_dev); + if (!qdev_realize(vdev, BUS(&vpci_dev->bus), errp)) { + return; + } +} + +static void virtio_crypto_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + k->realize = virtio_crypto_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, virtio_crypto_pci_properties); + pcidev_k->class_id = PCI_CLASS_OTHERS; +} + +static void virtio_crypto_initfn(Object *obj) +{ + VirtIOCryptoPCI *dev = VIRTIO_CRYPTO_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_CRYPTO); +} + +static const VirtioPCIDeviceTypeInfo virtio_crypto_pci_info = { + .generic_name = TYPE_VIRTIO_CRYPTO_PCI, + .instance_size = sizeof(VirtIOCryptoPCI), + .instance_init = virtio_crypto_initfn, + .class_init = virtio_crypto_pci_class_init, +}; + +static void virtio_crypto_pci_register_types(void) +{ + virtio_pci_types_register(&virtio_crypto_pci_info); +} +type_init(virtio_crypto_pci_register_types) diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c new file mode 100644 index 000000000..54f9bbb78 --- /dev/null +++ b/hw/virtio/virtio-crypto.c @@ -0,0 +1,1006 @@ +/* + * Virtio crypto Support + * + * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD. + * + * Authors: + * Gonglei + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/iov.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qapi/error.h" +#include "qemu/error-report.h" + +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-crypto.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-access.h" +#include "standard-headers/linux/virtio_ids.h" +#include "sysemu/cryptodev-vhost.h" + +#define VIRTIO_CRYPTO_VM_VERSION 1 + +/* + * Transfer virtqueue index to crypto queue index. + * The control virtqueue is after the data virtqueues + * so the input value doesn't need to be adjusted + */ +static inline int virtio_crypto_vq2q(int queue_index) +{ + return queue_index; +} + +static int +virtio_crypto_cipher_session_helper(VirtIODevice *vdev, + CryptoDevBackendSymSessionInfo *info, + struct virtio_crypto_cipher_session_para *cipher_para, + struct iovec **iov, unsigned int *out_num) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + unsigned int num = *out_num; + + info->cipher_alg = ldl_le_p(&cipher_para->algo); + info->key_len = ldl_le_p(&cipher_para->keylen); + info->direction = ldl_le_p(&cipher_para->op); + DPRINTF("cipher_alg=%" PRIu32 ", info->direction=%" PRIu32 "\n", + info->cipher_alg, info->direction); + + if (info->key_len > vcrypto->conf.max_cipher_key_len) { + error_report("virtio-crypto length of cipher key is too big: %u", + info->key_len); + return -VIRTIO_CRYPTO_ERR; + } + /* Get cipher key */ + if (info->key_len > 0) { + size_t s; + DPRINTF("keylen=%" PRIu32 "\n", info->key_len); + + info->cipher_key = g_malloc(info->key_len); + s = iov_to_buf(*iov, num, 0, info->cipher_key, info->key_len); + if (unlikely(s != info->key_len)) { + virtio_error(vdev, "virtio-crypto cipher key incorrect"); + return -EFAULT; + } + iov_discard_front(iov, &num, info->key_len); + *out_num = num; + } + + return 0; +} + +static int64_t +virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto, + struct virtio_crypto_sym_create_session_req *sess_req, + uint32_t queue_id, + uint32_t opcode, + struct iovec *iov, unsigned int out_num) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); + CryptoDevBackendSymSessionInfo info; + int64_t session_id; + int queue_index; + uint32_t op_type; + Error *local_err = NULL; + int ret; + + memset(&info, 0, sizeof(info)); + op_type = ldl_le_p(&sess_req->op_type); + info.op_type = op_type; + info.op_code = opcode; + + if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { + ret = virtio_crypto_cipher_session_helper(vdev, &info, + &sess_req->u.cipher.para, + &iov, &out_num); + if (ret < 0) { + goto err; + } + } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { + size_t s; + /* cipher part */ + ret = virtio_crypto_cipher_session_helper(vdev, &info, + &sess_req->u.chain.para.cipher_param, + &iov, &out_num); + if (ret < 0) { + goto err; + } + /* hash part */ + info.alg_chain_order = ldl_le_p( + &sess_req->u.chain.para.alg_chain_order); + info.add_len = ldl_le_p(&sess_req->u.chain.para.aad_len); + info.hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode); + if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) { + info.hash_alg = ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo); + info.auth_key_len = ldl_le_p( + &sess_req->u.chain.para.u.mac_param.auth_key_len); + info.hash_result_len = ldl_le_p( + &sess_req->u.chain.para.u.mac_param.hash_result_len); + if (info.auth_key_len > vcrypto->conf.max_auth_key_len) { + error_report("virtio-crypto length of auth key is too big: %u", + info.auth_key_len); + ret = -VIRTIO_CRYPTO_ERR; + goto err; + } + /* get auth key */ + if (info.auth_key_len > 0) { + DPRINTF("auth_keylen=%" PRIu32 "\n", info.auth_key_len); + info.auth_key = g_malloc(info.auth_key_len); + s = iov_to_buf(iov, out_num, 0, info.auth_key, + info.auth_key_len); + if (unlikely(s != info.auth_key_len)) { + virtio_error(vdev, + "virtio-crypto authenticated key incorrect"); + ret = -EFAULT; + goto err; + } + iov_discard_front(&iov, &out_num, info.auth_key_len); + } + } else if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) { + info.hash_alg = ldl_le_p( + &sess_req->u.chain.para.u.hash_param.algo); + info.hash_result_len = ldl_le_p( + &sess_req->u.chain.para.u.hash_param.hash_result_len); + } else { + /* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */ + error_report("unsupported hash mode"); + ret = -VIRTIO_CRYPTO_NOTSUPP; + goto err; + } + } else { + /* VIRTIO_CRYPTO_SYM_OP_NONE */ + error_report("unsupported cipher op_type: VIRTIO_CRYPTO_SYM_OP_NONE"); + ret = -VIRTIO_CRYPTO_NOTSUPP; + goto err; + } + + queue_index = virtio_crypto_vq2q(queue_id); + session_id = cryptodev_backend_sym_create_session( + vcrypto->cryptodev, + &info, queue_index, &local_err); + if (session_id >= 0) { + DPRINTF("create session_id=%" PRIu64 " successfully\n", + session_id); + + ret = session_id; + } else { + if (local_err) { + error_report_err(local_err); + } + ret = -VIRTIO_CRYPTO_ERR; + } + +err: + g_free(info.cipher_key); + g_free(info.auth_key); + return ret; +} + +static uint8_t +virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto, + struct virtio_crypto_destroy_session_req *close_sess_req, + uint32_t queue_id) +{ + int ret; + uint64_t session_id; + uint32_t status; + Error *local_err = NULL; + + session_id = ldq_le_p(&close_sess_req->session_id); + DPRINTF("close session, id=%" PRIu64 "\n", session_id); + + ret = cryptodev_backend_sym_close_session( + vcrypto->cryptodev, session_id, queue_id, &local_err); + if (ret == 0) { + status = VIRTIO_CRYPTO_OK; + } else { + if (local_err) { + error_report_err(local_err); + } else { + error_report("destroy session failed"); + } + status = VIRTIO_CRYPTO_ERR; + } + + return status; +} + +static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + struct virtio_crypto_op_ctrl_req ctrl; + VirtQueueElement *elem; + struct iovec *in_iov; + struct iovec *out_iov; + unsigned in_num; + unsigned out_num; + uint32_t queue_id; + uint32_t opcode; + struct virtio_crypto_session_input input; + int64_t session_id; + uint8_t status; + size_t s; + + for (;;) { + g_autofree struct iovec *out_iov_copy = NULL; + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + if (elem->out_num < 1 || elem->in_num < 1) { + virtio_error(vdev, "virtio-crypto ctrl missing headers"); + virtqueue_detach_element(vq, elem, 0); + g_free(elem); + break; + } + + out_num = elem->out_num; + out_iov_copy = g_memdup(elem->out_sg, sizeof(out_iov[0]) * out_num); + out_iov = out_iov_copy; + + in_num = elem->in_num; + in_iov = elem->in_sg; + + if (unlikely(iov_to_buf(out_iov, out_num, 0, &ctrl, sizeof(ctrl)) + != sizeof(ctrl))) { + virtio_error(vdev, "virtio-crypto request ctrl_hdr too short"); + virtqueue_detach_element(vq, elem, 0); + g_free(elem); + break; + } + iov_discard_front(&out_iov, &out_num, sizeof(ctrl)); + + opcode = ldl_le_p(&ctrl.header.opcode); + queue_id = ldl_le_p(&ctrl.header.queue_id); + + switch (opcode) { + case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: + memset(&input, 0, sizeof(input)); + session_id = virtio_crypto_create_sym_session(vcrypto, + &ctrl.u.sym_create_session, + queue_id, opcode, + out_iov, out_num); + /* Serious errors, need to reset virtio crypto device */ + if (session_id == -EFAULT) { + virtqueue_detach_element(vq, elem, 0); + break; + } else if (session_id == -VIRTIO_CRYPTO_NOTSUPP) { + stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); + } else if (session_id == -VIRTIO_CRYPTO_ERR) { + stl_le_p(&input.status, VIRTIO_CRYPTO_ERR); + } else { + /* Set the session id */ + stq_le_p(&input.session_id, session_id); + stl_le_p(&input.status, VIRTIO_CRYPTO_OK); + } + + s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); + if (unlikely(s != sizeof(input))) { + virtio_error(vdev, "virtio-crypto input incorrect"); + virtqueue_detach_element(vq, elem, 0); + break; + } + virtqueue_push(vq, elem, sizeof(input)); + virtio_notify(vdev, vq); + break; + case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION: + case VIRTIO_CRYPTO_HASH_DESTROY_SESSION: + case VIRTIO_CRYPTO_MAC_DESTROY_SESSION: + case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION: + status = virtio_crypto_handle_close_session(vcrypto, + &ctrl.u.destroy_session, queue_id); + /* The status only occupy one byte, we can directly use it */ + s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status)); + if (unlikely(s != sizeof(status))) { + virtio_error(vdev, "virtio-crypto status incorrect"); + virtqueue_detach_element(vq, elem, 0); + break; + } + virtqueue_push(vq, elem, sizeof(status)); + virtio_notify(vdev, vq); + break; + case VIRTIO_CRYPTO_HASH_CREATE_SESSION: + case VIRTIO_CRYPTO_MAC_CREATE_SESSION: + case VIRTIO_CRYPTO_AEAD_CREATE_SESSION: + default: + error_report("virtio-crypto unsupported ctrl opcode: %d", opcode); + memset(&input, 0, sizeof(input)); + stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); + s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); + if (unlikely(s != sizeof(input))) { + virtio_error(vdev, "virtio-crypto input incorrect"); + virtqueue_detach_element(vq, elem, 0); + break; + } + virtqueue_push(vq, elem, sizeof(input)); + virtio_notify(vdev, vq); + + break; + } /* end switch case */ + + g_free(elem); + } /* end for loop */ +} + +static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq, + VirtIOCryptoReq *req) +{ + req->vcrypto = vcrypto; + req->vq = vq; + req->in = NULL; + req->in_iov = NULL; + req->in_num = 0; + req->in_len = 0; + req->flags = CRYPTODEV_BACKEND_ALG__MAX; + req->u.sym_op_info = NULL; +} + +static void virtio_crypto_free_request(VirtIOCryptoReq *req) +{ + if (req) { + if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { + size_t max_len; + CryptoDevBackendSymOpInfo *op_info = req->u.sym_op_info; + + max_len = op_info->iv_len + + op_info->aad_len + + op_info->src_len + + op_info->dst_len + + op_info->digest_result_len; + + /* Zeroize and free request data structure */ + memset(op_info, 0, sizeof(*op_info) + max_len); + g_free(op_info); + } + g_free(req); + } +} + +static void +virtio_crypto_sym_input_data_helper(VirtIODevice *vdev, + VirtIOCryptoReq *req, + uint32_t status, + CryptoDevBackendSymOpInfo *sym_op_info) +{ + size_t s, len; + + if (status != VIRTIO_CRYPTO_OK) { + return; + } + + len = sym_op_info->src_len; + /* Save the cipher result */ + s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len); + if (s != len) { + virtio_error(vdev, "virtio-crypto dest data incorrect"); + return; + } + + iov_discard_front(&req->in_iov, &req->in_num, len); + + if (sym_op_info->op_type == + VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { + /* Save the digest result */ + s = iov_from_buf(req->in_iov, req->in_num, 0, + sym_op_info->digest_result, + sym_op_info->digest_result_len); + if (s != sym_op_info->digest_result_len) { + virtio_error(vdev, "virtio-crypto digest result incorrect"); + } + } +} + +static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status) +{ + VirtIOCrypto *vcrypto = req->vcrypto; + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); + + if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { + virtio_crypto_sym_input_data_helper(vdev, req, status, + req->u.sym_op_info); + } + stb_p(&req->in->status, status); + virtqueue_push(req->vq, &req->elem, req->in_len); + virtio_notify(vdev, req->vq); +} + +static VirtIOCryptoReq * +virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq) +{ + VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq)); + + if (req) { + virtio_crypto_init_request(s, vq, req); + } + return req; +} + +static CryptoDevBackendSymOpInfo * +virtio_crypto_sym_op_helper(VirtIODevice *vdev, + struct virtio_crypto_cipher_para *cipher_para, + struct virtio_crypto_alg_chain_data_para *alg_chain_para, + struct iovec *iov, unsigned int out_num) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + CryptoDevBackendSymOpInfo *op_info; + uint32_t src_len = 0, dst_len = 0; + uint32_t iv_len = 0; + uint32_t aad_len = 0, hash_result_len = 0; + uint32_t hash_start_src_offset = 0, len_to_hash = 0; + uint32_t cipher_start_src_offset = 0, len_to_cipher = 0; + + uint64_t max_len, curr_size = 0; + size_t s; + + /* Plain cipher */ + if (cipher_para) { + iv_len = ldl_le_p(&cipher_para->iv_len); + src_len = ldl_le_p(&cipher_para->src_data_len); + dst_len = ldl_le_p(&cipher_para->dst_data_len); + } else if (alg_chain_para) { /* Algorithm chain */ + iv_len = ldl_le_p(&alg_chain_para->iv_len); + src_len = ldl_le_p(&alg_chain_para->src_data_len); + dst_len = ldl_le_p(&alg_chain_para->dst_data_len); + + aad_len = ldl_le_p(&alg_chain_para->aad_len); + hash_result_len = ldl_le_p(&alg_chain_para->hash_result_len); + hash_start_src_offset = ldl_le_p( + &alg_chain_para->hash_start_src_offset); + cipher_start_src_offset = ldl_le_p( + &alg_chain_para->cipher_start_src_offset); + len_to_cipher = ldl_le_p(&alg_chain_para->len_to_cipher); + len_to_hash = ldl_le_p(&alg_chain_para->len_to_hash); + } else { + return NULL; + } + + max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len; + if (unlikely(max_len > vcrypto->conf.max_size)) { + virtio_error(vdev, "virtio-crypto too big length"); + return NULL; + } + + op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len); + op_info->iv_len = iv_len; + op_info->src_len = src_len; + op_info->dst_len = dst_len; + op_info->aad_len = aad_len; + op_info->digest_result_len = hash_result_len; + op_info->hash_start_src_offset = hash_start_src_offset; + op_info->len_to_hash = len_to_hash; + op_info->cipher_start_src_offset = cipher_start_src_offset; + op_info->len_to_cipher = len_to_cipher; + /* Handle the initilization vector */ + if (op_info->iv_len > 0) { + DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len); + op_info->iv = op_info->data + curr_size; + + s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len); + if (unlikely(s != op_info->iv_len)) { + virtio_error(vdev, "virtio-crypto iv incorrect"); + goto err; + } + iov_discard_front(&iov, &out_num, op_info->iv_len); + curr_size += op_info->iv_len; + } + + /* Handle additional authentication data if exists */ + if (op_info->aad_len > 0) { + DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len); + op_info->aad_data = op_info->data + curr_size; + + s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len); + if (unlikely(s != op_info->aad_len)) { + virtio_error(vdev, "virtio-crypto additional auth data incorrect"); + goto err; + } + iov_discard_front(&iov, &out_num, op_info->aad_len); + + curr_size += op_info->aad_len; + } + + /* Handle the source data */ + if (op_info->src_len > 0) { + DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len); + op_info->src = op_info->data + curr_size; + + s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len); + if (unlikely(s != op_info->src_len)) { + virtio_error(vdev, "virtio-crypto source data incorrect"); + goto err; + } + iov_discard_front(&iov, &out_num, op_info->src_len); + + curr_size += op_info->src_len; + } + + /* Handle the destination data */ + op_info->dst = op_info->data + curr_size; + curr_size += op_info->dst_len; + + DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len); + + /* Handle the hash digest result */ + if (hash_result_len > 0) { + DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len); + op_info->digest_result = op_info->data + curr_size; + } + + return op_info; + +err: + g_free(op_info); + return NULL; +} + +static int +virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto, + struct virtio_crypto_sym_data_req *req, + CryptoDevBackendSymOpInfo **sym_op_info, + struct iovec *iov, unsigned int out_num) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); + uint32_t op_type; + CryptoDevBackendSymOpInfo *op_info; + + op_type = ldl_le_p(&req->op_type); + + if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { + op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para, + NULL, iov, out_num); + if (!op_info) { + return -EFAULT; + } + op_info->op_type = op_type; + } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { + op_info = virtio_crypto_sym_op_helper(vdev, NULL, + &req->u.chain.para, + iov, out_num); + if (!op_info) { + return -EFAULT; + } + op_info->op_type = op_type; + } else { + /* VIRTIO_CRYPTO_SYM_OP_NONE */ + error_report("virtio-crypto unsupported cipher type"); + return -VIRTIO_CRYPTO_NOTSUPP; + } + + *sym_op_info = op_info; + + return 0; +} + +static int +virtio_crypto_handle_request(VirtIOCryptoReq *request) +{ + VirtIOCrypto *vcrypto = request->vcrypto; + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); + VirtQueueElement *elem = &request->elem; + int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq)); + struct virtio_crypto_op_data_req req; + int ret; + g_autofree struct iovec *in_iov_copy = NULL; + g_autofree struct iovec *out_iov_copy = NULL; + struct iovec *in_iov; + struct iovec *out_iov; + unsigned in_num; + unsigned out_num; + uint32_t opcode; + uint8_t status = VIRTIO_CRYPTO_ERR; + uint64_t session_id; + CryptoDevBackendSymOpInfo *sym_op_info = NULL; + Error *local_err = NULL; + + if (elem->out_num < 1 || elem->in_num < 1) { + virtio_error(vdev, "virtio-crypto dataq missing headers"); + return -1; + } + + out_num = elem->out_num; + out_iov_copy = g_memdup(elem->out_sg, sizeof(out_iov[0]) * out_num); + out_iov = out_iov_copy; + + in_num = elem->in_num; + in_iov_copy = g_memdup(elem->in_sg, sizeof(in_iov[0]) * in_num); + in_iov = in_iov_copy; + + if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req)) + != sizeof(req))) { + virtio_error(vdev, "virtio-crypto request outhdr too short"); + return -1; + } + iov_discard_front(&out_iov, &out_num, sizeof(req)); + + if (in_iov[in_num - 1].iov_len < + sizeof(struct virtio_crypto_inhdr)) { + virtio_error(vdev, "virtio-crypto request inhdr too short"); + return -1; + } + /* We always touch the last byte, so just see how big in_iov is. */ + request->in_len = iov_size(in_iov, in_num); + request->in = (void *)in_iov[in_num - 1].iov_base + + in_iov[in_num - 1].iov_len + - sizeof(struct virtio_crypto_inhdr); + iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr)); + + /* + * The length of operation result, including dest_data + * and digest_result if exists. + */ + request->in_num = in_num; + request->in_iov = in_iov; + + opcode = ldl_le_p(&req.header.opcode); + session_id = ldq_le_p(&req.header.session_id); + + switch (opcode) { + case VIRTIO_CRYPTO_CIPHER_ENCRYPT: + case VIRTIO_CRYPTO_CIPHER_DECRYPT: + ret = virtio_crypto_handle_sym_req(vcrypto, + &req.u.sym_req, + &sym_op_info, + out_iov, out_num); + /* Serious errors, need to reset virtio crypto device */ + if (ret == -EFAULT) { + return -1; + } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) { + virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); + virtio_crypto_free_request(request); + } else { + sym_op_info->session_id = session_id; + + /* Set request's parameter */ + request->flags = CRYPTODEV_BACKEND_ALG_SYM; + request->u.sym_op_info = sym_op_info; + ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev, + request, queue_index, &local_err); + if (ret < 0) { + status = -ret; + if (local_err) { + error_report_err(local_err); + } + } else { /* ret == VIRTIO_CRYPTO_OK */ + status = ret; + } + virtio_crypto_req_complete(request, status); + virtio_crypto_free_request(request); + } + break; + case VIRTIO_CRYPTO_HASH: + case VIRTIO_CRYPTO_MAC: + case VIRTIO_CRYPTO_AEAD_ENCRYPT: + case VIRTIO_CRYPTO_AEAD_DECRYPT: + default: + error_report("virtio-crypto unsupported dataq opcode: %u", + opcode); + virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); + virtio_crypto_free_request(request); + } + + return 0; +} + +static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + VirtIOCryptoReq *req; + + while ((req = virtio_crypto_get_request(vcrypto, vq))) { + if (virtio_crypto_handle_request(req) < 0) { + virtqueue_detach_element(req->vq, &req->elem, 0); + virtio_crypto_free_request(req); + break; + } + } +} + +static void virtio_crypto_dataq_bh(void *opaque) +{ + VirtIOCryptoQueue *q = opaque; + VirtIOCrypto *vcrypto = q->vcrypto; + VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); + + /* This happens when device was stopped but BH wasn't. */ + if (!vdev->vm_running) { + return; + } + + /* Just in case the driver is not ready on more */ + if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { + return; + } + + for (;;) { + virtio_crypto_handle_dataq(vdev, q->dataq); + virtio_queue_set_notification(q->dataq, 1); + + /* Are we done or did the guest add more buffers? */ + if (virtio_queue_empty(q->dataq)) { + break; + } + + virtio_queue_set_notification(q->dataq, 0); + } +} + +static void +virtio_crypto_handle_dataq_bh(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + VirtIOCryptoQueue *q = + &vcrypto->vqs[virtio_crypto_vq2q(virtio_get_queue_index(vq))]; + + /* This happens when device was stopped but VCPU wasn't. */ + if (!vdev->vm_running) { + return; + } + virtio_queue_set_notification(vq, 0); + qemu_bh_schedule(q->dataq_bh); +} + +static uint64_t virtio_crypto_get_features(VirtIODevice *vdev, + uint64_t features, + Error **errp) +{ + return features; +} + +static void virtio_crypto_reset(VirtIODevice *vdev) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + /* multiqueue is disabled by default */ + vcrypto->curr_queues = 1; + if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) { + vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY; + } else { + vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY; + } +} + +static void virtio_crypto_init_config(VirtIODevice *vdev) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + + vcrypto->conf.crypto_services = + vcrypto->conf.cryptodev->conf.crypto_services; + vcrypto->conf.cipher_algo_l = + vcrypto->conf.cryptodev->conf.cipher_algo_l; + vcrypto->conf.cipher_algo_h = + vcrypto->conf.cryptodev->conf.cipher_algo_h; + vcrypto->conf.hash_algo = vcrypto->conf.cryptodev->conf.hash_algo; + vcrypto->conf.mac_algo_l = vcrypto->conf.cryptodev->conf.mac_algo_l; + vcrypto->conf.mac_algo_h = vcrypto->conf.cryptodev->conf.mac_algo_h; + vcrypto->conf.aead_algo = vcrypto->conf.cryptodev->conf.aead_algo; + vcrypto->conf.max_cipher_key_len = + vcrypto->conf.cryptodev->conf.max_cipher_key_len; + vcrypto->conf.max_auth_key_len = + vcrypto->conf.cryptodev->conf.max_auth_key_len; + vcrypto->conf.max_size = vcrypto->conf.cryptodev->conf.max_size; +} + +static void virtio_crypto_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); + int i; + + vcrypto->cryptodev = vcrypto->conf.cryptodev; + if (vcrypto->cryptodev == NULL) { + error_setg(errp, "'cryptodev' parameter expects a valid object"); + return; + } else if (cryptodev_backend_is_used(vcrypto->cryptodev)) { + error_setg(errp, "can't use already used cryptodev backend: %s", + object_get_canonical_path_component(OBJECT(vcrypto->conf.cryptodev))); + return; + } + + vcrypto->max_queues = MAX(vcrypto->cryptodev->conf.peers.queues, 1); + if (vcrypto->max_queues + 1 > VIRTIO_QUEUE_MAX) { + error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " + "must be a positive integer less than %d.", + vcrypto->max_queues, VIRTIO_QUEUE_MAX); + return; + } + + virtio_init(vdev, "virtio-crypto", VIRTIO_ID_CRYPTO, vcrypto->config_size); + vcrypto->curr_queues = 1; + vcrypto->vqs = g_malloc0(sizeof(VirtIOCryptoQueue) * vcrypto->max_queues); + for (i = 0; i < vcrypto->max_queues; i++) { + vcrypto->vqs[i].dataq = + virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh); + vcrypto->vqs[i].dataq_bh = + qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]); + vcrypto->vqs[i].vcrypto = vcrypto; + } + + vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl); + if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) { + vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY; + } else { + vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY; + } + + virtio_crypto_init_config(vdev); + cryptodev_backend_set_used(vcrypto->cryptodev, true); +} + +static void virtio_crypto_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); + VirtIOCryptoQueue *q; + int i, max_queues; + + max_queues = vcrypto->multiqueue ? vcrypto->max_queues : 1; + for (i = 0; i < max_queues; i++) { + virtio_delete_queue(vcrypto->vqs[i].dataq); + q = &vcrypto->vqs[i]; + qemu_bh_delete(q->dataq_bh); + } + + g_free(vcrypto->vqs); + virtio_delete_queue(vcrypto->ctrl_vq); + + virtio_cleanup(vdev); + cryptodev_backend_set_used(vcrypto->cryptodev, false); +} + +static const VMStateDescription vmstate_virtio_crypto = { + .name = "virtio-crypto", + .unmigratable = 1, + .minimum_version_id = VIRTIO_CRYPTO_VM_VERSION, + .version_id = VIRTIO_CRYPTO_VM_VERSION, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static Property virtio_crypto_properties[] = { + DEFINE_PROP_LINK("cryptodev", VirtIOCrypto, conf.cryptodev, + TYPE_CRYPTODEV_BACKEND, CryptoDevBackend *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VirtIOCrypto *c = VIRTIO_CRYPTO(vdev); + struct virtio_crypto_config crypto_cfg = {}; + + /* + * Virtio-crypto device conforms to VIRTIO 1.0 which is always LE, + * so we can use LE accessors directly. + */ + stl_le_p(&crypto_cfg.status, c->status); + stl_le_p(&crypto_cfg.max_dataqueues, c->max_queues); + stl_le_p(&crypto_cfg.crypto_services, c->conf.crypto_services); + stl_le_p(&crypto_cfg.cipher_algo_l, c->conf.cipher_algo_l); + stl_le_p(&crypto_cfg.cipher_algo_h, c->conf.cipher_algo_h); + stl_le_p(&crypto_cfg.hash_algo, c->conf.hash_algo); + stl_le_p(&crypto_cfg.mac_algo_l, c->conf.mac_algo_l); + stl_le_p(&crypto_cfg.mac_algo_h, c->conf.mac_algo_h); + stl_le_p(&crypto_cfg.aead_algo, c->conf.aead_algo); + stl_le_p(&crypto_cfg.max_cipher_key_len, c->conf.max_cipher_key_len); + stl_le_p(&crypto_cfg.max_auth_key_len, c->conf.max_auth_key_len); + stq_le_p(&crypto_cfg.max_size, c->conf.max_size); + + memcpy(config, &crypto_cfg, c->config_size); +} + +static bool virtio_crypto_started(VirtIOCrypto *c, uint8_t status) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(c); + return (status & VIRTIO_CONFIG_S_DRIVER_OK) && + (c->status & VIRTIO_CRYPTO_S_HW_READY) && vdev->vm_running; +} + +static void virtio_crypto_vhost_status(VirtIOCrypto *c, uint8_t status) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(c); + int queues = c->multiqueue ? c->max_queues : 1; + CryptoDevBackend *b = c->cryptodev; + CryptoDevBackendClient *cc = b->conf.peers.ccs[0]; + + if (!cryptodev_get_vhost(cc, b, 0)) { + return; + } + + if ((virtio_crypto_started(c, status)) == !!c->vhost_started) { + return; + } + + if (!c->vhost_started) { + int r; + + c->vhost_started = 1; + r = cryptodev_vhost_start(vdev, queues); + if (r < 0) { + error_report("unable to start vhost crypto: %d: " + "falling back on userspace virtio", -r); + c->vhost_started = 0; + } + } else { + cryptodev_vhost_stop(vdev, queues); + c->vhost_started = 0; + } +} + +static void virtio_crypto_set_status(VirtIODevice *vdev, uint8_t status) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + + virtio_crypto_vhost_status(vcrypto, status); +} + +static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx, + bool mask) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + int queue = virtio_crypto_vq2q(idx); + + assert(vcrypto->vhost_started); + + cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask); +} + +static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + int queue = virtio_crypto_vq2q(idx); + + assert(vcrypto->vhost_started); + + return cryptodev_vhost_virtqueue_pending(vdev, queue, idx); +} + +static void virtio_crypto_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_crypto_properties); + dc->vmsd = &vmstate_virtio_crypto; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + vdc->realize = virtio_crypto_device_realize; + vdc->unrealize = virtio_crypto_device_unrealize; + vdc->get_config = virtio_crypto_get_config; + vdc->get_features = virtio_crypto_get_features; + vdc->reset = virtio_crypto_reset; + vdc->set_status = virtio_crypto_set_status; + vdc->guest_notifier_mask = virtio_crypto_guest_notifier_mask; + vdc->guest_notifier_pending = virtio_crypto_guest_notifier_pending; +} + +static void virtio_crypto_instance_init(Object *obj) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(obj); + + /* + * The default config_size is sizeof(struct virtio_crypto_config). + * Can be overriden with virtio_crypto_set_config_size. + */ + vcrypto->config_size = sizeof(struct virtio_crypto_config); +} + +static const TypeInfo virtio_crypto_info = { + .name = TYPE_VIRTIO_CRYPTO, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOCrypto), + .instance_init = virtio_crypto_instance_init, + .class_init = virtio_crypto_class_init, +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_crypto_info); +} + +type_init(virtio_register_types) diff --git a/hw/virtio/virtio-input-host-pci.c b/hw/virtio/virtio-input-host-pci.c new file mode 100644 index 000000000..0ac360de4 --- /dev/null +++ b/hw/virtio/virtio-input-host-pci.c @@ -0,0 +1,47 @@ +/* + * Virtio input host PCI Bindings + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" + +#include "virtio-pci.h" +#include "hw/virtio/virtio-input.h" +#include "qemu/module.h" +#include "qom/object.h" + +typedef struct VirtIOInputHostPCI VirtIOInputHostPCI; + +#define TYPE_VIRTIO_INPUT_HOST_PCI "virtio-input-host-pci" +DECLARE_INSTANCE_CHECKER(VirtIOInputHostPCI, VIRTIO_INPUT_HOST_PCI, + TYPE_VIRTIO_INPUT_HOST_PCI) + +struct VirtIOInputHostPCI { + VirtIOPCIProxy parent_obj; + VirtIOInputHost vdev; +}; + +static void virtio_host_initfn(Object *obj) +{ + VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_INPUT_HOST); +} + +static const VirtioPCIDeviceTypeInfo virtio_input_host_pci_info = { + .generic_name = TYPE_VIRTIO_INPUT_HOST_PCI, + .parent = TYPE_VIRTIO_INPUT_PCI, + .instance_size = sizeof(VirtIOInputHostPCI), + .instance_init = virtio_host_initfn, +}; + +static void virtio_input_host_pci_register(void) +{ + virtio_pci_types_register(&virtio_input_host_pci_info); +} + +type_init(virtio_input_host_pci_register) diff --git a/hw/virtio/virtio-input-pci.c b/hw/virtio/virtio-input-pci.c new file mode 100644 index 000000000..48e9ff38e --- /dev/null +++ b/hw/virtio/virtio-input-pci.c @@ -0,0 +1,155 @@ +/* + * Virtio input PCI Bindings + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" + +#include "virtio-pci.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-input.h" +#include "qemu/module.h" +#include "qom/object.h" + + +/* + * virtio-input-pci: This extends VirtioPCIProxy. + */ +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputPCI, VIRTIO_INPUT_PCI) + +struct VirtIOInputPCI { + VirtIOPCIProxy parent_obj; + VirtIOInput vdev; +}; + +#define TYPE_VIRTIO_INPUT_HID_PCI "virtio-input-hid-pci" +#define TYPE_VIRTIO_KEYBOARD_PCI "virtio-keyboard-pci" +#define TYPE_VIRTIO_MOUSE_PCI "virtio-mouse-pci" +#define TYPE_VIRTIO_TABLET_PCI "virtio-tablet-pci" +OBJECT_DECLARE_SIMPLE_TYPE(VirtIOInputHIDPCI, VIRTIO_INPUT_HID_PCI) + +struct VirtIOInputHIDPCI { + VirtIOPCIProxy parent_obj; + VirtIOInputHID vdev; +}; + +static Property virtio_input_pci_properties[] = { + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&vinput->vdev); + + virtio_pci_force_virtio_1(vpci_dev); + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void virtio_input_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_input_pci_properties); + k->realize = virtio_input_pci_realize; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + + pcidev_k->class_id = PCI_CLASS_INPUT_OTHER; +} + +static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data) +{ + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD; +} + +static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass, + void *data) +{ + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE; +} + +static void virtio_keyboard_initfn(Object *obj) +{ + VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_KEYBOARD); +} + +static void virtio_mouse_initfn(Object *obj) +{ + VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_MOUSE); +} + +static void virtio_tablet_initfn(Object *obj) +{ + VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_TABLET); +} + +static const TypeInfo virtio_input_pci_info = { + .name = TYPE_VIRTIO_INPUT_PCI, + .parent = TYPE_VIRTIO_PCI, + .instance_size = sizeof(VirtIOInputPCI), + .class_init = virtio_input_pci_class_init, + .abstract = true, +}; + +static const TypeInfo virtio_input_hid_pci_info = { + .name = TYPE_VIRTIO_INPUT_HID_PCI, + .parent = TYPE_VIRTIO_INPUT_PCI, + .instance_size = sizeof(VirtIOInputHIDPCI), + .abstract = true, +}; + +static const VirtioPCIDeviceTypeInfo virtio_keyboard_pci_info = { + .generic_name = TYPE_VIRTIO_KEYBOARD_PCI, + .parent = TYPE_VIRTIO_INPUT_HID_PCI, + .class_init = virtio_input_hid_kbd_pci_class_init, + .instance_size = sizeof(VirtIOInputHIDPCI), + .instance_init = virtio_keyboard_initfn, +}; + +static const VirtioPCIDeviceTypeInfo virtio_mouse_pci_info = { + .generic_name = TYPE_VIRTIO_MOUSE_PCI, + .parent = TYPE_VIRTIO_INPUT_HID_PCI, + .class_init = virtio_input_hid_mouse_pci_class_init, + .instance_size = sizeof(VirtIOInputHIDPCI), + .instance_init = virtio_mouse_initfn, +}; + +static const VirtioPCIDeviceTypeInfo virtio_tablet_pci_info = { + .generic_name = TYPE_VIRTIO_TABLET_PCI, + .parent = TYPE_VIRTIO_INPUT_HID_PCI, + .instance_size = sizeof(VirtIOInputHIDPCI), + .instance_init = virtio_tablet_initfn, +}; + +static void virtio_pci_input_register(void) +{ + /* Base types: */ + type_register_static(&virtio_input_pci_info); + type_register_static(&virtio_input_hid_pci_info); + + /* Implementations: */ + virtio_pci_types_register(&virtio_keyboard_pci_info); + virtio_pci_types_register(&virtio_mouse_pci_info); + virtio_pci_types_register(&virtio_tablet_pci_info); +} + +type_init(virtio_pci_input_register) diff --git a/hw/virtio/virtio-iommu-pci.c b/hw/virtio/virtio-iommu-pci.c new file mode 100644 index 000000000..a160ae6b4 --- /dev/null +++ b/hw/virtio/virtio-iommu-pci.c @@ -0,0 +1,114 @@ +/* + * Virtio IOMMU PCI Bindings + * + * Copyright (c) 2019 Red Hat, Inc. + * Written by Eric Auger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +#include "qemu/osdep.h" + +#include "virtio-pci.h" +#include "hw/virtio/virtio-iommu.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "qom/object.h" + +typedef struct VirtIOIOMMUPCI VirtIOIOMMUPCI; + +/* + * virtio-iommu-pci: This extends VirtioPCIProxy. + * + */ +DECLARE_INSTANCE_CHECKER(VirtIOIOMMUPCI, VIRTIO_IOMMU_PCI, + TYPE_VIRTIO_IOMMU_PCI) + +struct VirtIOIOMMUPCI { + VirtIOPCIProxy parent_obj; + VirtIOIOMMU vdev; +}; + +static Property virtio_iommu_pci_properties[] = { + DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), + DEFINE_PROP_ARRAY("reserved-regions", VirtIOIOMMUPCI, + vdev.nb_reserved_regions, vdev.reserved_regions, + qdev_prop_reserved_region, ReservedRegion), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_iommu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOIOMMUPCI *dev = VIRTIO_IOMMU_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + VirtIOIOMMU *s = VIRTIO_IOMMU(vdev); + + if (!qdev_get_machine_hotplug_handler(DEVICE(vpci_dev))) { + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + + error_setg(errp, + "%s machine fails to create iommu-map device tree bindings", + mc->name); + error_append_hint(errp, + "Check your machine implements a hotplug handler " + "for the virtio-iommu-pci device\n"); + error_append_hint(errp, "Check the guest is booted without FW or with " + "-no-acpi\n"); + return; + } + for (int i = 0; i < s->nb_reserved_regions; i++) { + if (s->reserved_regions[i].type != VIRTIO_IOMMU_RESV_MEM_T_RESERVED && + s->reserved_regions[i].type != VIRTIO_IOMMU_RESV_MEM_T_MSI) { + error_setg(errp, "reserved region %d has an invalid type", i); + error_append_hint(errp, "Valid values are 0 and 1\n"); + } + } + object_property_set_link(OBJECT(dev), "primary-bus", + OBJECT(pci_get_bus(&vpci_dev->pci_dev)), + &error_abort); + virtio_pci_force_virtio_1(vpci_dev); + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void virtio_iommu_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = virtio_iommu_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, virtio_iommu_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_IOMMU; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_OTHERS; + dc->hotpluggable = false; +} + +static void virtio_iommu_pci_instance_init(Object *obj) +{ + VirtIOIOMMUPCI *dev = VIRTIO_IOMMU_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_IOMMU); +} + +static const VirtioPCIDeviceTypeInfo virtio_iommu_pci_info = { + .generic_name = TYPE_VIRTIO_IOMMU_PCI, + .instance_size = sizeof(VirtIOIOMMUPCI), + .instance_init = virtio_iommu_pci_instance_init, + .class_init = virtio_iommu_pci_class_init, +}; + +static void virtio_iommu_pci_register(void) +{ + virtio_pci_types_register(&virtio_iommu_pci_info); +} + +type_init(virtio_iommu_pci_register) + + diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c new file mode 100644 index 000000000..1b23e8e18 --- /dev/null +++ b/hw/virtio/virtio-iommu.c @@ -0,0 +1,1225 @@ +/* + * virtio-iommu device + * + * Copyright (c) 2020 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/iov.h" +#include "qemu-common.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio.h" +#include "sysemu/kvm.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "trace.h" + +#include "standard-headers/linux/virtio_ids.h" + +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" +#include "hw/virtio/virtio-iommu.h" +#include "hw/pci/pci_bus.h" +#include "hw/pci/pci.h" + +/* Max size */ +#define VIOMMU_DEFAULT_QUEUE_SIZE 256 +#define VIOMMU_PROBE_SIZE 512 + +typedef struct VirtIOIOMMUDomain { + uint32_t id; + GTree *mappings; + QLIST_HEAD(, VirtIOIOMMUEndpoint) endpoint_list; +} VirtIOIOMMUDomain; + +typedef struct VirtIOIOMMUEndpoint { + uint32_t id; + VirtIOIOMMUDomain *domain; + IOMMUMemoryRegion *iommu_mr; + QLIST_ENTRY(VirtIOIOMMUEndpoint) next; +} VirtIOIOMMUEndpoint; + +typedef struct VirtIOIOMMUInterval { + uint64_t low; + uint64_t high; +} VirtIOIOMMUInterval; + +typedef struct VirtIOIOMMUMapping { + uint64_t phys_addr; + uint32_t flags; +} VirtIOIOMMUMapping; + +static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev) +{ + return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn); +} + +/** + * The bus number is used for lookup when SID based operations occur. + * In that case we lazily populate the IOMMUPciBus array from the bus hash + * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus + * numbers may not be always initialized yet. + */ +static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num) +{ + IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num]; + + if (!iommu_pci_bus) { + GHashTableIter iter; + + g_hash_table_iter_init(&iter, s->as_by_busptr); + while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) { + if (pci_bus_num(iommu_pci_bus->bus) == bus_num) { + s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus; + return iommu_pci_bus; + } + } + return NULL; + } + return iommu_pci_bus; +} + +static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid) +{ + uint8_t bus_n, devfn; + IOMMUPciBus *iommu_pci_bus; + IOMMUDevice *dev; + + bus_n = PCI_BUS_NUM(sid); + iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n); + if (iommu_pci_bus) { + devfn = sid & (PCI_DEVFN_MAX - 1); + dev = iommu_pci_bus->pbdev[devfn]; + if (dev) { + return &dev->iommu_mr; + } + } + return NULL; +} + +static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data) +{ + VirtIOIOMMUInterval *inta = (VirtIOIOMMUInterval *)a; + VirtIOIOMMUInterval *intb = (VirtIOIOMMUInterval *)b; + + if (inta->high < intb->low) { + return -1; + } else if (intb->high < inta->low) { + return 1; + } else { + return 0; + } +} + +static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start, + hwaddr virt_end, hwaddr paddr, + uint32_t flags) +{ + IOMMUTLBEvent event; + IOMMUAccessFlags perm = IOMMU_ACCESS_FLAG(flags & VIRTIO_IOMMU_MAP_F_READ, + flags & VIRTIO_IOMMU_MAP_F_WRITE); + + if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_MAP) || + (flags & VIRTIO_IOMMU_MAP_F_MMIO) || !perm) { + return; + } + + trace_virtio_iommu_notify_map(mr->parent_obj.name, virt_start, virt_end, + paddr, perm); + + event.type = IOMMU_NOTIFIER_MAP; + event.entry.target_as = &address_space_memory; + event.entry.addr_mask = virt_end - virt_start; + event.entry.iova = virt_start; + event.entry.perm = perm; + event.entry.translated_addr = paddr; + + memory_region_notify_iommu(mr, 0, event); +} + +static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, + hwaddr virt_end) +{ + IOMMUTLBEvent event; + uint64_t delta = virt_end - virt_start; + + if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) { + return; + } + + trace_virtio_iommu_notify_unmap(mr->parent_obj.name, virt_start, virt_end); + + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.target_as = &address_space_memory; + event.entry.perm = IOMMU_NONE; + event.entry.translated_addr = 0; + event.entry.addr_mask = delta; + event.entry.iova = virt_start; + + if (delta == UINT64_MAX) { + memory_region_notify_iommu(mr, 0, event); + } + + + while (virt_start != virt_end + 1) { + uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64); + + event.entry.addr_mask = mask; + event.entry.iova = virt_start; + memory_region_notify_iommu(mr, 0, event); + virt_start += mask + 1; + } +} + +static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value, + gpointer data) +{ + VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key; + IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data; + + virtio_iommu_notify_unmap(mr, interval->low, interval->high); + + return false; +} + +static gboolean virtio_iommu_notify_map_cb(gpointer key, gpointer value, + gpointer data) +{ + VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value; + VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key; + IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data; + + virtio_iommu_notify_map(mr, interval->low, interval->high, + mapping->phys_addr, mapping->flags); + + return false; +} + +static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep) +{ + VirtIOIOMMUDomain *domain = ep->domain; + + if (!ep->domain) { + return; + } + g_tree_foreach(domain->mappings, virtio_iommu_notify_unmap_cb, + ep->iommu_mr); + QLIST_REMOVE(ep, next); + ep->domain = NULL; +} + +static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, + uint32_t ep_id) +{ + VirtIOIOMMUEndpoint *ep; + IOMMUMemoryRegion *mr; + + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id)); + if (ep) { + return ep; + } + mr = virtio_iommu_mr(s, ep_id); + if (!mr) { + return NULL; + } + ep = g_malloc0(sizeof(*ep)); + ep->id = ep_id; + ep->iommu_mr = mr; + trace_virtio_iommu_get_endpoint(ep_id); + g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep); + return ep; +} + +static void virtio_iommu_put_endpoint(gpointer data) +{ + VirtIOIOMMUEndpoint *ep = (VirtIOIOMMUEndpoint *)data; + + if (ep->domain) { + virtio_iommu_detach_endpoint_from_domain(ep); + } + + trace_virtio_iommu_put_endpoint(ep->id); + g_free(ep); +} + +static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s, + uint32_t domain_id) +{ + VirtIOIOMMUDomain *domain; + + domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id)); + if (domain) { + return domain; + } + domain = g_malloc0(sizeof(*domain)); + domain->id = domain_id; + domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp, + NULL, (GDestroyNotify)g_free, + (GDestroyNotify)g_free); + g_tree_insert(s->domains, GUINT_TO_POINTER(domain_id), domain); + QLIST_INIT(&domain->endpoint_list); + trace_virtio_iommu_get_domain(domain_id); + return domain; +} + +static void virtio_iommu_put_domain(gpointer data) +{ + VirtIOIOMMUDomain *domain = (VirtIOIOMMUDomain *)data; + VirtIOIOMMUEndpoint *iter, *tmp; + + QLIST_FOREACH_SAFE(iter, &domain->endpoint_list, next, tmp) { + virtio_iommu_detach_endpoint_from_domain(iter); + } + g_tree_destroy(domain->mappings); + trace_virtio_iommu_put_domain(domain->id); + g_free(domain); +} + +static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque, + int devfn) +{ + VirtIOIOMMU *s = opaque; + IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus); + static uint32_t mr_index; + IOMMUDevice *sdev; + + if (!sbus) { + sbus = g_malloc0(sizeof(IOMMUPciBus) + + sizeof(IOMMUDevice *) * PCI_DEVFN_MAX); + sbus->bus = bus; + g_hash_table_insert(s->as_by_busptr, bus, sbus); + } + + sdev = sbus->pbdev[devfn]; + if (!sdev) { + char *name = g_strdup_printf("%s-%d-%d", + TYPE_VIRTIO_IOMMU_MEMORY_REGION, + mr_index++, devfn); + sdev = sbus->pbdev[devfn] = g_malloc0(sizeof(IOMMUDevice)); + + sdev->viommu = s; + sdev->bus = bus; + sdev->devfn = devfn; + + trace_virtio_iommu_init_iommu_mr(name); + + memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr), + TYPE_VIRTIO_IOMMU_MEMORY_REGION, + OBJECT(s), name, + UINT64_MAX); + address_space_init(&sdev->as, + MEMORY_REGION(&sdev->iommu_mr), TYPE_VIRTIO_IOMMU); + g_free(name); + } + return &sdev->as; +} + +static int virtio_iommu_attach(VirtIOIOMMU *s, + struct virtio_iommu_req_attach *req) +{ + uint32_t domain_id = le32_to_cpu(req->domain); + uint32_t ep_id = le32_to_cpu(req->endpoint); + VirtIOIOMMUDomain *domain; + VirtIOIOMMUEndpoint *ep; + + trace_virtio_iommu_attach(domain_id, ep_id); + + ep = virtio_iommu_get_endpoint(s, ep_id); + if (!ep) { + return VIRTIO_IOMMU_S_NOENT; + } + + if (ep->domain) { + VirtIOIOMMUDomain *previous_domain = ep->domain; + /* + * the device is already attached to a domain, + * detach it first + */ + virtio_iommu_detach_endpoint_from_domain(ep); + if (QLIST_EMPTY(&previous_domain->endpoint_list)) { + g_tree_remove(s->domains, GUINT_TO_POINTER(previous_domain->id)); + } + } + + domain = virtio_iommu_get_domain(s, domain_id); + QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next); + + ep->domain = domain; + + /* Replay domain mappings on the associated memory region */ + g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb, + ep->iommu_mr); + + return VIRTIO_IOMMU_S_OK; +} + +static int virtio_iommu_detach(VirtIOIOMMU *s, + struct virtio_iommu_req_detach *req) +{ + uint32_t domain_id = le32_to_cpu(req->domain); + uint32_t ep_id = le32_to_cpu(req->endpoint); + VirtIOIOMMUDomain *domain; + VirtIOIOMMUEndpoint *ep; + + trace_virtio_iommu_detach(domain_id, ep_id); + + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id)); + if (!ep) { + return VIRTIO_IOMMU_S_NOENT; + } + + domain = ep->domain; + + if (!domain || domain->id != domain_id) { + return VIRTIO_IOMMU_S_INVAL; + } + + virtio_iommu_detach_endpoint_from_domain(ep); + + if (QLIST_EMPTY(&domain->endpoint_list)) { + g_tree_remove(s->domains, GUINT_TO_POINTER(domain->id)); + } + return VIRTIO_IOMMU_S_OK; +} + +static int virtio_iommu_map(VirtIOIOMMU *s, + struct virtio_iommu_req_map *req) +{ + uint32_t domain_id = le32_to_cpu(req->domain); + uint64_t phys_start = le64_to_cpu(req->phys_start); + uint64_t virt_start = le64_to_cpu(req->virt_start); + uint64_t virt_end = le64_to_cpu(req->virt_end); + uint32_t flags = le32_to_cpu(req->flags); + VirtIOIOMMUDomain *domain; + VirtIOIOMMUInterval *interval; + VirtIOIOMMUMapping *mapping; + VirtIOIOMMUEndpoint *ep; + + if (flags & ~VIRTIO_IOMMU_MAP_F_MASK) { + return VIRTIO_IOMMU_S_INVAL; + } + + domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id)); + if (!domain) { + return VIRTIO_IOMMU_S_NOENT; + } + + interval = g_malloc0(sizeof(*interval)); + + interval->low = virt_start; + interval->high = virt_end; + + mapping = g_tree_lookup(domain->mappings, (gpointer)interval); + if (mapping) { + g_free(interval); + return VIRTIO_IOMMU_S_INVAL; + } + + trace_virtio_iommu_map(domain_id, virt_start, virt_end, phys_start, flags); + + mapping = g_malloc0(sizeof(*mapping)); + mapping->phys_addr = phys_start; + mapping->flags = flags; + + g_tree_insert(domain->mappings, interval, mapping); + + QLIST_FOREACH(ep, &domain->endpoint_list, next) { + virtio_iommu_notify_map(ep->iommu_mr, virt_start, virt_end, phys_start, + flags); + } + + return VIRTIO_IOMMU_S_OK; +} + +static int virtio_iommu_unmap(VirtIOIOMMU *s, + struct virtio_iommu_req_unmap *req) +{ + uint32_t domain_id = le32_to_cpu(req->domain); + uint64_t virt_start = le64_to_cpu(req->virt_start); + uint64_t virt_end = le64_to_cpu(req->virt_end); + VirtIOIOMMUMapping *iter_val; + VirtIOIOMMUInterval interval, *iter_key; + VirtIOIOMMUDomain *domain; + VirtIOIOMMUEndpoint *ep; + int ret = VIRTIO_IOMMU_S_OK; + + trace_virtio_iommu_unmap(domain_id, virt_start, virt_end); + + domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id)); + if (!domain) { + return VIRTIO_IOMMU_S_NOENT; + } + interval.low = virt_start; + interval.high = virt_end; + + while (g_tree_lookup_extended(domain->mappings, &interval, + (void **)&iter_key, (void**)&iter_val)) { + uint64_t current_low = iter_key->low; + uint64_t current_high = iter_key->high; + + if (interval.low <= current_low && interval.high >= current_high) { + QLIST_FOREACH(ep, &domain->endpoint_list, next) { + virtio_iommu_notify_unmap(ep->iommu_mr, current_low, + current_high); + } + g_tree_remove(domain->mappings, iter_key); + trace_virtio_iommu_unmap_done(domain_id, current_low, current_high); + } else { + ret = VIRTIO_IOMMU_S_RANGE; + break; + } + } + return ret; +} + +static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep, + uint8_t *buf, size_t free) +{ + struct virtio_iommu_probe_resv_mem prop = {}; + size_t size = sizeof(prop), length = size - sizeof(prop.head), total; + int i; + + total = size * s->nb_reserved_regions; + + if (total > free) { + return -ENOSPC; + } + + for (i = 0; i < s->nb_reserved_regions; i++) { + unsigned subtype = s->reserved_regions[i].type; + + assert(subtype == VIRTIO_IOMMU_RESV_MEM_T_RESERVED || + subtype == VIRTIO_IOMMU_RESV_MEM_T_MSI); + prop.head.type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM); + prop.head.length = cpu_to_le16(length); + prop.subtype = subtype; + prop.start = cpu_to_le64(s->reserved_regions[i].low); + prop.end = cpu_to_le64(s->reserved_regions[i].high); + + memcpy(buf, &prop, size); + + trace_virtio_iommu_fill_resv_property(ep, prop.subtype, + prop.start, prop.end); + buf += size; + } + return total; +} + +/** + * virtio_iommu_probe - Fill the probe request buffer with + * the properties the device is able to return + */ +static int virtio_iommu_probe(VirtIOIOMMU *s, + struct virtio_iommu_req_probe *req, + uint8_t *buf) +{ + uint32_t ep_id = le32_to_cpu(req->endpoint); + size_t free = VIOMMU_PROBE_SIZE; + ssize_t count; + + if (!virtio_iommu_mr(s, ep_id)) { + return VIRTIO_IOMMU_S_NOENT; + } + + count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free); + if (count < 0) { + return VIRTIO_IOMMU_S_INVAL; + } + buf += count; + free -= count; + + return VIRTIO_IOMMU_S_OK; +} + +static int virtio_iommu_iov_to_req(struct iovec *iov, + unsigned int iov_cnt, + void *req, size_t req_sz) +{ + size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail); + + sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz); + if (unlikely(sz != payload_sz)) { + return VIRTIO_IOMMU_S_INVAL; + } + return 0; +} + +#define virtio_iommu_handle_req(__req) \ +static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s, \ + struct iovec *iov, \ + unsigned int iov_cnt) \ +{ \ + struct virtio_iommu_req_ ## __req req; \ + int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \ + \ + return ret ? ret : virtio_iommu_ ## __req(s, &req); \ +} + +virtio_iommu_handle_req(attach) +virtio_iommu_handle_req(detach) +virtio_iommu_handle_req(map) +virtio_iommu_handle_req(unmap) + +static int virtio_iommu_handle_probe(VirtIOIOMMU *s, + struct iovec *iov, + unsigned int iov_cnt, + uint8_t *buf) +{ + struct virtio_iommu_req_probe req; + int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); + + return ret ? ret : virtio_iommu_probe(s, &req, buf); +} + +static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOIOMMU *s = VIRTIO_IOMMU(vdev); + struct virtio_iommu_req_head head; + struct virtio_iommu_req_tail tail = {}; + size_t output_size = sizeof(tail), sz; + VirtQueueElement *elem; + unsigned int iov_cnt; + struct iovec *iov; + void *buf = NULL; + + for (;;) { + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + return; + } + + if (iov_size(elem->in_sg, elem->in_num) < sizeof(tail) || + iov_size(elem->out_sg, elem->out_num) < sizeof(head)) { + virtio_error(vdev, "virtio-iommu bad head/tail size"); + virtqueue_detach_element(vq, elem, 0); + g_free(elem); + break; + } + + iov_cnt = elem->out_num; + iov = elem->out_sg; + sz = iov_to_buf(iov, iov_cnt, 0, &head, sizeof(head)); + if (unlikely(sz != sizeof(head))) { + tail.status = VIRTIO_IOMMU_S_DEVERR; + goto out; + } + qemu_mutex_lock(&s->mutex); + switch (head.type) { + case VIRTIO_IOMMU_T_ATTACH: + tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt); + break; + case VIRTIO_IOMMU_T_DETACH: + tail.status = virtio_iommu_handle_detach(s, iov, iov_cnt); + break; + case VIRTIO_IOMMU_T_MAP: + tail.status = virtio_iommu_handle_map(s, iov, iov_cnt); + break; + case VIRTIO_IOMMU_T_UNMAP: + tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt); + break; + case VIRTIO_IOMMU_T_PROBE: + { + struct virtio_iommu_req_tail *ptail; + + output_size = s->config.probe_size + sizeof(tail); + buf = g_malloc0(output_size); + + ptail = (struct virtio_iommu_req_tail *) + (buf + s->config.probe_size); + ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf); + break; + } + default: + tail.status = VIRTIO_IOMMU_S_UNSUPP; + } + qemu_mutex_unlock(&s->mutex); + +out: + sz = iov_from_buf(elem->in_sg, elem->in_num, 0, + buf ? buf : &tail, output_size); + assert(sz == output_size); + + virtqueue_push(vq, elem, sz); + virtio_notify(vdev, vq); + g_free(elem); + g_free(buf); + } +} + +static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason, + int flags, uint32_t endpoint, + uint64_t address) +{ + VirtIODevice *vdev = &viommu->parent_obj; + VirtQueue *vq = viommu->event_vq; + struct virtio_iommu_fault fault; + VirtQueueElement *elem; + size_t sz; + + memset(&fault, 0, sizeof(fault)); + fault.reason = reason; + fault.flags = cpu_to_le32(flags); + fault.endpoint = cpu_to_le32(endpoint); + fault.address = cpu_to_le64(address); + + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + + if (!elem) { + error_report_once( + "no buffer available in event queue to report event"); + return; + } + + if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) { + virtio_error(vdev, "error buffer of wrong size"); + virtqueue_detach_element(vq, elem, 0); + g_free(elem); + return; + } + + sz = iov_from_buf(elem->in_sg, elem->in_num, 0, + &fault, sizeof(fault)); + assert(sz == sizeof(fault)); + + trace_virtio_iommu_report_fault(reason, flags, endpoint, address); + virtqueue_push(vq, elem, sz); + virtio_notify(vdev, vq); + g_free(elem); + +} + +static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr, + IOMMUAccessFlags flag, + int iommu_idx) +{ + IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr); + VirtIOIOMMUInterval interval, *mapping_key; + VirtIOIOMMUMapping *mapping_value; + VirtIOIOMMU *s = sdev->viommu; + bool read_fault, write_fault; + VirtIOIOMMUEndpoint *ep; + uint32_t sid, flags; + bool bypass_allowed; + bool found; + int i; + + interval.low = addr; + interval.high = addr + 1; + + IOMMUTLBEntry entry = { + .target_as = &address_space_memory, + .iova = addr, + .translated_addr = addr, + .addr_mask = (1 << ctz32(s->config.page_size_mask)) - 1, + .perm = IOMMU_NONE, + }; + + bypass_allowed = virtio_vdev_has_feature(&s->parent_obj, + VIRTIO_IOMMU_F_BYPASS); + + sid = virtio_iommu_get_bdf(sdev); + + trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag); + qemu_mutex_lock(&s->mutex); + + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid)); + if (!ep) { + if (!bypass_allowed) { + error_report_once("%s sid=%d is not known!!", __func__, sid); + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN, + VIRTIO_IOMMU_FAULT_F_ADDRESS, + sid, addr); + } else { + entry.perm = flag; + } + goto unlock; + } + + for (i = 0; i < s->nb_reserved_regions; i++) { + ReservedRegion *reg = &s->reserved_regions[i]; + + if (addr >= reg->low && addr <= reg->high) { + switch (reg->type) { + case VIRTIO_IOMMU_RESV_MEM_T_MSI: + entry.perm = flag; + break; + case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: + default: + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING, + VIRTIO_IOMMU_FAULT_F_ADDRESS, + sid, addr); + break; + } + goto unlock; + } + } + + if (!ep->domain) { + if (!bypass_allowed) { + error_report_once("%s %02x:%02x.%01x not attached to any domain", + __func__, PCI_BUS_NUM(sid), + PCI_SLOT(sid), PCI_FUNC(sid)); + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN, + VIRTIO_IOMMU_FAULT_F_ADDRESS, + sid, addr); + } else { + entry.perm = flag; + } + goto unlock; + } + + found = g_tree_lookup_extended(ep->domain->mappings, (gpointer)(&interval), + (void **)&mapping_key, + (void **)&mapping_value); + if (!found) { + error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d", + __func__, addr, sid); + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING, + VIRTIO_IOMMU_FAULT_F_ADDRESS, + sid, addr); + goto unlock; + } + + read_fault = (flag & IOMMU_RO) && + !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ); + write_fault = (flag & IOMMU_WO) && + !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE); + + flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0; + flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0; + if (flags) { + error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d", + __func__, addr, flag, mapping_value->flags); + flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS; + virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING, + flags | VIRTIO_IOMMU_FAULT_F_ADDRESS, + sid, addr); + goto unlock; + } + entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr; + entry.perm = flag; + trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid); + +unlock: + qemu_mutex_unlock(&s->mutex); + return entry; +} + +static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data) +{ + VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev); + struct virtio_iommu_config *config = &dev->config; + + trace_virtio_iommu_get_config(config->page_size_mask, + config->input_range.start, + config->input_range.end, + config->domain_range.end, + config->probe_size); + memcpy(config_data, &dev->config, sizeof(struct virtio_iommu_config)); +} + +static void virtio_iommu_set_config(VirtIODevice *vdev, + const uint8_t *config_data) +{ + struct virtio_iommu_config config; + + memcpy(&config, config_data, sizeof(struct virtio_iommu_config)); + trace_virtio_iommu_set_config(config.page_size_mask, + config.input_range.start, + config.input_range.end, + config.domain_range.end, + config.probe_size); +} + +static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f, + Error **errp) +{ + VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev); + + f |= dev->features; + trace_virtio_iommu_get_features(f); + return f; +} + +static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data) +{ + guint ua = GPOINTER_TO_UINT(a); + guint ub = GPOINTER_TO_UINT(b); + return (ua > ub) - (ua < ub); +} + +static gboolean virtio_iommu_remap(gpointer key, gpointer value, gpointer data) +{ + VirtIOIOMMUMapping *mapping = (VirtIOIOMMUMapping *) value; + VirtIOIOMMUInterval *interval = (VirtIOIOMMUInterval *) key; + IOMMUMemoryRegion *mr = (IOMMUMemoryRegion *) data; + + trace_virtio_iommu_remap(mr->parent_obj.name, interval->low, interval->high, + mapping->phys_addr); + virtio_iommu_notify_map(mr, interval->low, interval->high, + mapping->phys_addr, mapping->flags); + return false; +} + +static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n) +{ + IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr); + VirtIOIOMMU *s = sdev->viommu; + uint32_t sid; + VirtIOIOMMUEndpoint *ep; + + sid = virtio_iommu_get_bdf(sdev); + + qemu_mutex_lock(&s->mutex); + + if (!s->endpoints) { + goto unlock; + } + + ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid)); + if (!ep || !ep->domain) { + goto unlock; + } + + g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr); + +unlock: + qemu_mutex_unlock(&s->mutex); +} + +static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr, + IOMMUNotifierFlag old, + IOMMUNotifierFlag new, + Error **errp) +{ + if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) { + error_setg(errp, "Virtio-iommu does not support dev-iotlb yet"); + return -EINVAL; + } + + if (old == IOMMU_NOTIFIER_NONE) { + trace_virtio_iommu_notify_flag_add(iommu_mr->parent_obj.name); + } else if (new == IOMMU_NOTIFIER_NONE) { + trace_virtio_iommu_notify_flag_del(iommu_mr->parent_obj.name); + } + return 0; +} + +/* + * The default mask (TARGET_PAGE_MASK) is the smallest supported guest granule, + * for example 0xfffffffffffff000. When an assigned device has page size + * restrictions due to the hardware IOMMU configuration, apply this restriction + * to the mask. + */ +static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr, + uint64_t new_mask, + Error **errp) +{ + IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr); + VirtIOIOMMU *s = sdev->viommu; + uint64_t cur_mask = s->config.page_size_mask; + + trace_virtio_iommu_set_page_size_mask(mr->parent_obj.name, cur_mask, + new_mask); + + if ((cur_mask & new_mask) == 0) { + error_setg(errp, "virtio-iommu page mask 0x%"PRIx64 + " is incompatible with mask 0x%"PRIx64, cur_mask, new_mask); + return -1; + } + + /* + * After the machine is finalized, we can't change the mask anymore. If by + * chance the hotplugged device supports the same granule, we can still + * accept it. Having a different masks is possible but the guest will use + * sub-optimal block sizes, so warn about it. + */ + if (phase_check(PHASE_MACHINE_READY)) { + int new_granule = ctz64(new_mask); + int cur_granule = ctz64(cur_mask); + + if (new_granule != cur_granule) { + error_setg(errp, "virtio-iommu page mask 0x%"PRIx64 + " is incompatible with mask 0x%"PRIx64, cur_mask, + new_mask); + return -1; + } else if (new_mask != cur_mask) { + warn_report("virtio-iommu page mask 0x%"PRIx64 + " does not match 0x%"PRIx64, cur_mask, new_mask); + } + return 0; + } + + s->config.page_size_mask &= new_mask; + return 0; +} + +static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOIOMMU *s = VIRTIO_IOMMU(dev); + + virtio_init(vdev, "virtio-iommu", VIRTIO_ID_IOMMU, + sizeof(struct virtio_iommu_config)); + + memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num)); + + s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, + virtio_iommu_handle_command); + s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL); + + s->config.page_size_mask = TARGET_PAGE_MASK; + s->config.input_range.end = -1UL; + s->config.domain_range.end = 32; + s->config.probe_size = VIOMMU_PROBE_SIZE; + + virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX); + virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC); + virtio_add_feature(&s->features, VIRTIO_F_VERSION_1); + virtio_add_feature(&s->features, VIRTIO_IOMMU_F_INPUT_RANGE); + virtio_add_feature(&s->features, VIRTIO_IOMMU_F_DOMAIN_RANGE); + virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP); + virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS); + virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO); + virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE); + + qemu_mutex_init(&s->mutex); + + s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free); + + if (s->primary_bus) { + pci_setup_iommu(s->primary_bus, virtio_iommu_find_add_as, s); + } else { + error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!"); + } +} + +static void virtio_iommu_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOIOMMU *s = VIRTIO_IOMMU(dev); + + g_hash_table_destroy(s->as_by_busptr); + if (s->domains) { + g_tree_destroy(s->domains); + } + if (s->endpoints) { + g_tree_destroy(s->endpoints); + } + + virtio_delete_queue(s->req_vq); + virtio_delete_queue(s->event_vq); + virtio_cleanup(vdev); +} + +static void virtio_iommu_device_reset(VirtIODevice *vdev) +{ + VirtIOIOMMU *s = VIRTIO_IOMMU(vdev); + + trace_virtio_iommu_device_reset(); + + if (s->domains) { + g_tree_destroy(s->domains); + } + if (s->endpoints) { + g_tree_destroy(s->endpoints); + } + s->domains = g_tree_new_full((GCompareDataFunc)int_cmp, + NULL, NULL, virtio_iommu_put_domain); + s->endpoints = g_tree_new_full((GCompareDataFunc)int_cmp, + NULL, NULL, virtio_iommu_put_endpoint); +} + +static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status) +{ + trace_virtio_iommu_device_status(status); +} + +static void virtio_iommu_instance_init(Object *obj) +{ +} + +#define VMSTATE_INTERVAL \ +{ \ + .name = "interval", \ + .version_id = 1, \ + .minimum_version_id = 1, \ + .fields = (VMStateField[]) { \ + VMSTATE_UINT64(low, VirtIOIOMMUInterval), \ + VMSTATE_UINT64(high, VirtIOIOMMUInterval), \ + VMSTATE_END_OF_LIST() \ + } \ +} + +#define VMSTATE_MAPPING \ +{ \ + .name = "mapping", \ + .version_id = 1, \ + .minimum_version_id = 1, \ + .fields = (VMStateField[]) { \ + VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\ + VMSTATE_UINT32(flags, VirtIOIOMMUMapping), \ + VMSTATE_END_OF_LIST() \ + }, \ +} + +static const VMStateDescription vmstate_interval_mapping[2] = { + VMSTATE_MAPPING, /* value */ + VMSTATE_INTERVAL /* key */ +}; + +static int domain_preload(void *opaque) +{ + VirtIOIOMMUDomain *domain = opaque; + + domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp, + NULL, g_free, g_free); + return 0; +} + +static const VMStateDescription vmstate_endpoint = { + .name = "endpoint", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(id, VirtIOIOMMUEndpoint), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_domain = { + .name = "domain", + .version_id = 1, + .minimum_version_id = 1, + .pre_load = domain_preload, + .fields = (VMStateField[]) { + VMSTATE_UINT32(id, VirtIOIOMMUDomain), + VMSTATE_GTREE_V(mappings, VirtIOIOMMUDomain, 1, + vmstate_interval_mapping, + VirtIOIOMMUInterval, VirtIOIOMMUMapping), + VMSTATE_QLIST_V(endpoint_list, VirtIOIOMMUDomain, 1, + vmstate_endpoint, VirtIOIOMMUEndpoint, next), + VMSTATE_END_OF_LIST() + } +}; + +static gboolean reconstruct_endpoints(gpointer key, gpointer value, + gpointer data) +{ + VirtIOIOMMU *s = (VirtIOIOMMU *)data; + VirtIOIOMMUDomain *d = (VirtIOIOMMUDomain *)value; + VirtIOIOMMUEndpoint *iter; + IOMMUMemoryRegion *mr; + + QLIST_FOREACH(iter, &d->endpoint_list, next) { + mr = virtio_iommu_mr(s, iter->id); + assert(mr); + + iter->domain = d; + iter->iommu_mr = mr; + g_tree_insert(s->endpoints, GUINT_TO_POINTER(iter->id), iter); + } + return false; /* continue the domain traversal */ +} + +static int iommu_post_load(void *opaque, int version_id) +{ + VirtIOIOMMU *s = opaque; + + g_tree_foreach(s->domains, reconstruct_endpoints, s); + return 0; +} + +static const VMStateDescription vmstate_virtio_iommu_device = { + .name = "virtio-iommu-device", + .minimum_version_id = 1, + .version_id = 1, + .post_load = iommu_post_load, + .fields = (VMStateField[]) { + VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 1, + &vmstate_domain, VirtIOIOMMUDomain), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_virtio_iommu = { + .name = "virtio-iommu", + .minimum_version_id = 1, + .priority = MIG_PRI_IOMMU, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static Property virtio_iommu_properties[] = { + DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, "PCI", PCIBus *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_iommu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_iommu_properties); + dc->vmsd = &vmstate_virtio_iommu; + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + vdc->realize = virtio_iommu_device_realize; + vdc->unrealize = virtio_iommu_device_unrealize; + vdc->reset = virtio_iommu_device_reset; + vdc->get_config = virtio_iommu_get_config; + vdc->set_config = virtio_iommu_set_config; + vdc->get_features = virtio_iommu_get_features; + vdc->set_status = virtio_iommu_set_status; + vdc->vmsd = &vmstate_virtio_iommu_device; +} + +static void virtio_iommu_memory_region_class_init(ObjectClass *klass, + void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = virtio_iommu_translate; + imrc->replay = virtio_iommu_replay; + imrc->notify_flag_changed = virtio_iommu_notify_flag_changed; + imrc->iommu_set_page_size_mask = virtio_iommu_set_page_size_mask; +} + +static const TypeInfo virtio_iommu_info = { + .name = TYPE_VIRTIO_IOMMU, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOIOMMU), + .instance_init = virtio_iommu_instance_init, + .class_init = virtio_iommu_class_init, +}; + +static const TypeInfo virtio_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_VIRTIO_IOMMU_MEMORY_REGION, + .class_init = virtio_iommu_memory_region_class_init, +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_iommu_info); + type_register_static(&virtio_iommu_memory_region_info); +} + +type_init(virtio_register_types) diff --git a/hw/virtio/virtio-mem-pci.c b/hw/virtio/virtio-mem-pci.c new file mode 100644 index 000000000..be2383b0c --- /dev/null +++ b/hw/virtio/virtio-mem-pci.c @@ -0,0 +1,163 @@ +/* + * Virtio MEM PCI device + * + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "virtio-mem-pci.h" +#include "hw/mem/memory-device.h" +#include "qapi/error.h" +#include "qapi/qapi-events-machine.h" +#include "qapi/qapi-events-misc.h" + +static void virtio_mem_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOMEMPCI *mem_pci = VIRTIO_MEM_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&mem_pci->vdev); + + virtio_pci_force_virtio_1(vpci_dev); + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void virtio_mem_pci_set_addr(MemoryDeviceState *md, uint64_t addr, + Error **errp) +{ + object_property_set_uint(OBJECT(md), VIRTIO_MEM_ADDR_PROP, addr, errp); +} + +static uint64_t virtio_mem_pci_get_addr(const MemoryDeviceState *md) +{ + return object_property_get_uint(OBJECT(md), VIRTIO_MEM_ADDR_PROP, + &error_abort); +} + +static MemoryRegion *virtio_mem_pci_get_memory_region(MemoryDeviceState *md, + Error **errp) +{ + VirtIOMEMPCI *pci_mem = VIRTIO_MEM_PCI(md); + VirtIOMEM *vmem = VIRTIO_MEM(&pci_mem->vdev); + VirtIOMEMClass *vmc = VIRTIO_MEM_GET_CLASS(vmem); + + return vmc->get_memory_region(vmem, errp); +} + +static uint64_t virtio_mem_pci_get_plugged_size(const MemoryDeviceState *md, + Error **errp) +{ + return object_property_get_uint(OBJECT(md), VIRTIO_MEM_SIZE_PROP, + errp); +} + +static void virtio_mem_pci_fill_device_info(const MemoryDeviceState *md, + MemoryDeviceInfo *info) +{ + VirtioMEMDeviceInfo *vi = g_new0(VirtioMEMDeviceInfo, 1); + VirtIOMEMPCI *pci_mem = VIRTIO_MEM_PCI(md); + VirtIOMEM *vmem = VIRTIO_MEM(&pci_mem->vdev); + VirtIOMEMClass *vpc = VIRTIO_MEM_GET_CLASS(vmem); + DeviceState *dev = DEVICE(md); + + if (dev->id) { + vi->has_id = true; + vi->id = g_strdup(dev->id); + } + + /* let the real device handle everything else */ + vpc->fill_device_info(vmem, vi); + + info->u.virtio_mem.data = vi; + info->type = MEMORY_DEVICE_INFO_KIND_VIRTIO_MEM; +} + +static uint64_t virtio_mem_pci_get_min_alignment(const MemoryDeviceState *md) +{ + return object_property_get_uint(OBJECT(md), VIRTIO_MEM_BLOCK_SIZE_PROP, + &error_abort); +} + +static void virtio_mem_pci_size_change_notify(Notifier *notifier, void *data) +{ + VirtIOMEMPCI *pci_mem = container_of(notifier, VirtIOMEMPCI, + size_change_notifier); + DeviceState *dev = DEVICE(pci_mem); + char *qom_path = object_get_canonical_path(OBJECT(dev)); + const uint64_t * const size_p = data; + + qapi_event_send_memory_device_size_change(!!dev->id, dev->id, *size_p, + qom_path); + g_free(qom_path); +} + +static void virtio_mem_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(klass); + + k->realize = virtio_mem_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_MEM; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_OTHERS; + + mdc->get_addr = virtio_mem_pci_get_addr; + mdc->set_addr = virtio_mem_pci_set_addr; + mdc->get_plugged_size = virtio_mem_pci_get_plugged_size; + mdc->get_memory_region = virtio_mem_pci_get_memory_region; + mdc->fill_device_info = virtio_mem_pci_fill_device_info; + mdc->get_min_alignment = virtio_mem_pci_get_min_alignment; +} + +static void virtio_mem_pci_instance_init(Object *obj) +{ + VirtIOMEMPCI *dev = VIRTIO_MEM_PCI(obj); + VirtIOMEMClass *vmc; + VirtIOMEM *vmem; + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_MEM); + + dev->size_change_notifier.notify = virtio_mem_pci_size_change_notify; + vmem = VIRTIO_MEM(&dev->vdev); + vmc = VIRTIO_MEM_GET_CLASS(vmem); + /* + * We never remove the notifier again, as we expect both devices to + * disappear at the same time. + */ + vmc->add_size_change_notifier(vmem, &dev->size_change_notifier); + + object_property_add_alias(obj, VIRTIO_MEM_BLOCK_SIZE_PROP, + OBJECT(&dev->vdev), VIRTIO_MEM_BLOCK_SIZE_PROP); + object_property_add_alias(obj, VIRTIO_MEM_SIZE_PROP, OBJECT(&dev->vdev), + VIRTIO_MEM_SIZE_PROP); + object_property_add_alias(obj, VIRTIO_MEM_REQUESTED_SIZE_PROP, + OBJECT(&dev->vdev), + VIRTIO_MEM_REQUESTED_SIZE_PROP); +} + +static const VirtioPCIDeviceTypeInfo virtio_mem_pci_info = { + .base_name = TYPE_VIRTIO_MEM_PCI, + .generic_name = "virtio-mem-pci", + .instance_size = sizeof(VirtIOMEMPCI), + .instance_init = virtio_mem_pci_instance_init, + .class_init = virtio_mem_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_MEMORY_DEVICE }, + { } + }, +}; + +static void virtio_mem_pci_register_types(void) +{ + virtio_pci_types_register(&virtio_mem_pci_info); +} +type_init(virtio_mem_pci_register_types) diff --git a/hw/virtio/virtio-mem-pci.h b/hw/virtio/virtio-mem-pci.h new file mode 100644 index 000000000..e636e1a48 --- /dev/null +++ b/hw/virtio/virtio-mem-pci.h @@ -0,0 +1,35 @@ +/* + * Virtio MEM PCI device + * + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_VIRTIO_MEM_PCI_H +#define QEMU_VIRTIO_MEM_PCI_H + +#include "hw/virtio/virtio-pci.h" +#include "hw/virtio/virtio-mem.h" +#include "qom/object.h" + +typedef struct VirtIOMEMPCI VirtIOMEMPCI; + +/* + * virtio-mem-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_MEM_PCI "virtio-mem-pci-base" +DECLARE_INSTANCE_CHECKER(VirtIOMEMPCI, VIRTIO_MEM_PCI, + TYPE_VIRTIO_MEM_PCI) + +struct VirtIOMEMPCI { + VirtIOPCIProxy parent_obj; + VirtIOMEM vdev; + Notifier size_change_notifier; +}; + +#endif /* QEMU_VIRTIO_MEM_PCI_H */ diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c new file mode 100644 index 000000000..d5a578142 --- /dev/null +++ b/hw/virtio/virtio-mem.c @@ -0,0 +1,1283 @@ +/* + * Virtio MEM device + * + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/iov.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "qemu/units.h" +#include "sysemu/numa.h" +#include "sysemu/sysemu.h" +#include "sysemu/reset.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" +#include "hw/virtio/virtio-mem.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "exec/ram_addr.h" +#include "migration/misc.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include CONFIG_DEVICES +#include "trace.h" + +/* + * Let's not allow blocks smaller than 1 MiB, for example, to keep the tracking + * bitmap small. + */ +#define VIRTIO_MEM_MIN_BLOCK_SIZE ((uint32_t)(1 * MiB)) + +#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \ + defined(__powerpc64__) +#define VIRTIO_MEM_DEFAULT_THP_SIZE ((uint32_t)(2 * MiB)) +#else + /* fallback to 1 MiB (e.g., the THP size on s390x) */ +#define VIRTIO_MEM_DEFAULT_THP_SIZE VIRTIO_MEM_MIN_BLOCK_SIZE +#endif + +/* + * We want to have a reasonable default block size such that + * 1. We avoid splitting THPs when unplugging memory, which degrades + * performance. + * 2. We avoid placing THPs for plugged blocks that also cover unplugged + * blocks. + * + * The actual THP size might differ between Linux kernels, so we try to probe + * it. In the future (if we ever run into issues regarding 2.), we might want + * to disable THP in case we fail to properly probe the THP size, or if the + * block size is configured smaller than the THP size. + */ +static uint32_t thp_size; + +#define HPAGE_PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size" +static uint32_t virtio_mem_thp_size(void) +{ + gchar *content = NULL; + const char *endptr; + uint64_t tmp; + + if (thp_size) { + return thp_size; + } + + /* + * Try to probe the actual THP size, fallback to (sane but eventually + * incorrect) default sizes. + */ + if (g_file_get_contents(HPAGE_PMD_SIZE_PATH, &content, NULL, NULL) && + !qemu_strtou64(content, &endptr, 0, &tmp) && + (!endptr || *endptr == '\n')) { + /* + * Sanity-check the value, if it's too big (e.g., aarch64 with 64k base + * pages) or weird, fallback to something smaller. + */ + if (!tmp || !is_power_of_2(tmp) || tmp > 16 * MiB) { + warn_report("Read unsupported THP size: %" PRIx64, tmp); + } else { + thp_size = tmp; + } + } + + if (!thp_size) { + thp_size = VIRTIO_MEM_DEFAULT_THP_SIZE; + warn_report("Could not detect THP size, falling back to %" PRIx64 + " MiB.", thp_size / MiB); + } + + g_free(content); + return thp_size; +} + +static uint64_t virtio_mem_default_block_size(RAMBlock *rb) +{ + const uint64_t page_size = qemu_ram_pagesize(rb); + + /* We can have hugetlbfs with a page size smaller than the THP size. */ + if (page_size == qemu_real_host_page_size) { + return MAX(page_size, virtio_mem_thp_size()); + } + return MAX(page_size, VIRTIO_MEM_MIN_BLOCK_SIZE); +} + +/* + * Size the usable region bigger than the requested size if possible. Esp. + * Linux guests will only add (aligned) memory blocks in case they fully + * fit into the usable region, but plug+online only a subset of the pages. + * The memory block size corresponds mostly to the section size. + * + * This allows e.g., to add 20MB with a section size of 128MB on x86_64, and + * a section size of 1GB on arm64 (as long as the start address is properly + * aligned, similar to ordinary DIMMs). + * + * We can change this at any time and maybe even make it configurable if + * necessary (as the section size can change). But it's more likely that the + * section size will rather get smaller and not bigger over time. + */ +#if defined(TARGET_X86_64) || defined(TARGET_I386) +#define VIRTIO_MEM_USABLE_EXTENT (2 * (128 * MiB)) +#else +#error VIRTIO_MEM_USABLE_EXTENT not defined +#endif + +static bool virtio_mem_is_busy(void) +{ + /* + * Postcopy cannot handle concurrent discards and we don't want to migrate + * pages on-demand with stale content when plugging new blocks. + * + * For precopy, we don't want unplugged blocks in our migration stream, and + * when plugging new blocks, the page content might differ between source + * and destination (observable by the guest when not initializing pages + * after plugging them) until we're running on the destination (as we didn't + * migrate these blocks when they were unplugged). + */ + return migration_in_incoming_postcopy() || !migration_is_idle(); +} + +typedef int (*virtio_mem_range_cb)(const VirtIOMEM *vmem, void *arg, + uint64_t offset, uint64_t size); + +static int virtio_mem_for_each_unplugged_range(const VirtIOMEM *vmem, void *arg, + virtio_mem_range_cb cb) +{ + unsigned long first_zero_bit, last_zero_bit; + uint64_t offset, size; + int ret = 0; + + first_zero_bit = find_first_zero_bit(vmem->bitmap, vmem->bitmap_size); + while (first_zero_bit < vmem->bitmap_size) { + offset = first_zero_bit * vmem->block_size; + last_zero_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, + first_zero_bit + 1) - 1; + size = (last_zero_bit - first_zero_bit + 1) * vmem->block_size; + + ret = cb(vmem, arg, offset, size); + if (ret) { + break; + } + first_zero_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, + last_zero_bit + 2); + } + return ret; +} + +/* + * Adjust the memory section to cover the intersection with the given range. + * + * Returns false if the intersection is empty, otherwise returns true. + */ +static bool virito_mem_intersect_memory_section(MemoryRegionSection *s, + uint64_t offset, uint64_t size) +{ + uint64_t start = MAX(s->offset_within_region, offset); + uint64_t end = MIN(s->offset_within_region + int128_get64(s->size), + offset + size); + + if (end <= start) { + return false; + } + + s->offset_within_address_space += start - s->offset_within_region; + s->offset_within_region = start; + s->size = int128_make64(end - start); + return true; +} + +typedef int (*virtio_mem_section_cb)(MemoryRegionSection *s, void *arg); + +static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem, + MemoryRegionSection *s, + void *arg, + virtio_mem_section_cb cb) +{ + unsigned long first_bit, last_bit; + uint64_t offset, size; + int ret = 0; + + first_bit = s->offset_within_region / vmem->bitmap_size; + first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, first_bit); + while (first_bit < vmem->bitmap_size) { + MemoryRegionSection tmp = *s; + + offset = first_bit * vmem->block_size; + last_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, + first_bit + 1) - 1; + size = (last_bit - first_bit + 1) * vmem->block_size; + + if (!virito_mem_intersect_memory_section(&tmp, offset, size)) { + break; + } + ret = cb(&tmp, arg); + if (ret) { + break; + } + first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, + last_bit + 2); + } + return ret; +} + +static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem, + MemoryRegionSection *s, + void *arg, + virtio_mem_section_cb cb) +{ + unsigned long first_bit, last_bit; + uint64_t offset, size; + int ret = 0; + + first_bit = s->offset_within_region / vmem->bitmap_size; + first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, first_bit); + while (first_bit < vmem->bitmap_size) { + MemoryRegionSection tmp = *s; + + offset = first_bit * vmem->block_size; + last_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, + first_bit + 1) - 1; + size = (last_bit - first_bit + 1) * vmem->block_size; + + if (!virito_mem_intersect_memory_section(&tmp, offset, size)) { + break; + } + ret = cb(&tmp, arg); + if (ret) { + break; + } + first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, + last_bit + 2); + } + return ret; +} + +static int virtio_mem_notify_populate_cb(MemoryRegionSection *s, void *arg) +{ + RamDiscardListener *rdl = arg; + + return rdl->notify_populate(rdl, s); +} + +static int virtio_mem_notify_discard_cb(MemoryRegionSection *s, void *arg) +{ + RamDiscardListener *rdl = arg; + + rdl->notify_discard(rdl, s); + return 0; +} + +static void virtio_mem_notify_unplug(VirtIOMEM *vmem, uint64_t offset, + uint64_t size) +{ + RamDiscardListener *rdl; + + QLIST_FOREACH(rdl, &vmem->rdl_list, next) { + MemoryRegionSection tmp = *rdl->section; + + if (!virito_mem_intersect_memory_section(&tmp, offset, size)) { + continue; + } + rdl->notify_discard(rdl, &tmp); + } +} + +static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset, + uint64_t size) +{ + RamDiscardListener *rdl, *rdl2; + int ret = 0; + + QLIST_FOREACH(rdl, &vmem->rdl_list, next) { + MemoryRegionSection tmp = *rdl->section; + + if (!virito_mem_intersect_memory_section(&tmp, offset, size)) { + continue; + } + ret = rdl->notify_populate(rdl, &tmp); + if (ret) { + break; + } + } + + if (ret) { + /* Notify all already-notified listeners. */ + QLIST_FOREACH(rdl2, &vmem->rdl_list, next) { + MemoryRegionSection tmp = *rdl->section; + + if (rdl2 == rdl) { + break; + } + if (!virito_mem_intersect_memory_section(&tmp, offset, size)) { + continue; + } + rdl2->notify_discard(rdl2, &tmp); + } + } + return ret; +} + +static void virtio_mem_notify_unplug_all(VirtIOMEM *vmem) +{ + RamDiscardListener *rdl; + + if (!vmem->size) { + return; + } + + QLIST_FOREACH(rdl, &vmem->rdl_list, next) { + if (rdl->double_discard_supported) { + rdl->notify_discard(rdl, rdl->section); + } else { + virtio_mem_for_each_plugged_section(vmem, rdl->section, rdl, + virtio_mem_notify_discard_cb); + } + } +} + +static bool virtio_mem_test_bitmap(const VirtIOMEM *vmem, uint64_t start_gpa, + uint64_t size, bool plugged) +{ + const unsigned long first_bit = (start_gpa - vmem->addr) / vmem->block_size; + const unsigned long last_bit = first_bit + (size / vmem->block_size) - 1; + unsigned long found_bit; + + /* We fake a shorter bitmap to avoid searching too far. */ + if (plugged) { + found_bit = find_next_zero_bit(vmem->bitmap, last_bit + 1, first_bit); + } else { + found_bit = find_next_bit(vmem->bitmap, last_bit + 1, first_bit); + } + return found_bit > last_bit; +} + +static void virtio_mem_set_bitmap(VirtIOMEM *vmem, uint64_t start_gpa, + uint64_t size, bool plugged) +{ + const unsigned long bit = (start_gpa - vmem->addr) / vmem->block_size; + const unsigned long nbits = size / vmem->block_size; + + if (plugged) { + bitmap_set(vmem->bitmap, bit, nbits); + } else { + bitmap_clear(vmem->bitmap, bit, nbits); + } +} + +static void virtio_mem_send_response(VirtIOMEM *vmem, VirtQueueElement *elem, + struct virtio_mem_resp *resp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vmem); + VirtQueue *vq = vmem->vq; + + trace_virtio_mem_send_response(le16_to_cpu(resp->type)); + iov_from_buf(elem->in_sg, elem->in_num, 0, resp, sizeof(*resp)); + + virtqueue_push(vq, elem, sizeof(*resp)); + virtio_notify(vdev, vq); +} + +static void virtio_mem_send_response_simple(VirtIOMEM *vmem, + VirtQueueElement *elem, + uint16_t type) +{ + struct virtio_mem_resp resp = { + .type = cpu_to_le16(type), + }; + + virtio_mem_send_response(vmem, elem, &resp); +} + +static bool virtio_mem_valid_range(const VirtIOMEM *vmem, uint64_t gpa, + uint64_t size) +{ + if (!QEMU_IS_ALIGNED(gpa, vmem->block_size)) { + return false; + } + if (gpa + size < gpa || !size) { + return false; + } + if (gpa < vmem->addr || gpa >= vmem->addr + vmem->usable_region_size) { + return false; + } + if (gpa + size > vmem->addr + vmem->usable_region_size) { + return false; + } + return true; +} + +static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa, + uint64_t size, bool plug) +{ + const uint64_t offset = start_gpa - vmem->addr; + RAMBlock *rb = vmem->memdev->mr.ram_block; + + if (virtio_mem_is_busy()) { + return -EBUSY; + } + + if (!plug) { + if (ram_block_discard_range(rb, offset, size)) { + return -EBUSY; + } + virtio_mem_notify_unplug(vmem, offset, size); + } else if (virtio_mem_notify_plug(vmem, offset, size)) { + /* Could be a mapping attempt resulted in memory getting populated. */ + ram_block_discard_range(vmem->memdev->mr.ram_block, offset, size); + return -EBUSY; + } + virtio_mem_set_bitmap(vmem, start_gpa, size, plug); + return 0; +} + +static int virtio_mem_state_change_request(VirtIOMEM *vmem, uint64_t gpa, + uint16_t nb_blocks, bool plug) +{ + const uint64_t size = nb_blocks * vmem->block_size; + int ret; + + if (!virtio_mem_valid_range(vmem, gpa, size)) { + return VIRTIO_MEM_RESP_ERROR; + } + + if (plug && (vmem->size + size > vmem->requested_size)) { + return VIRTIO_MEM_RESP_NACK; + } + + /* test if really all blocks are in the opposite state */ + if (!virtio_mem_test_bitmap(vmem, gpa, size, !plug)) { + return VIRTIO_MEM_RESP_ERROR; + } + + ret = virtio_mem_set_block_state(vmem, gpa, size, plug); + if (ret) { + return VIRTIO_MEM_RESP_BUSY; + } + if (plug) { + vmem->size += size; + } else { + vmem->size -= size; + } + notifier_list_notify(&vmem->size_change_notifiers, &vmem->size); + return VIRTIO_MEM_RESP_ACK; +} + +static void virtio_mem_plug_request(VirtIOMEM *vmem, VirtQueueElement *elem, + struct virtio_mem_req *req) +{ + const uint64_t gpa = le64_to_cpu(req->u.plug.addr); + const uint16_t nb_blocks = le16_to_cpu(req->u.plug.nb_blocks); + uint16_t type; + + trace_virtio_mem_plug_request(gpa, nb_blocks); + type = virtio_mem_state_change_request(vmem, gpa, nb_blocks, true); + virtio_mem_send_response_simple(vmem, elem, type); +} + +static void virtio_mem_unplug_request(VirtIOMEM *vmem, VirtQueueElement *elem, + struct virtio_mem_req *req) +{ + const uint64_t gpa = le64_to_cpu(req->u.unplug.addr); + const uint16_t nb_blocks = le16_to_cpu(req->u.unplug.nb_blocks); + uint16_t type; + + trace_virtio_mem_unplug_request(gpa, nb_blocks); + type = virtio_mem_state_change_request(vmem, gpa, nb_blocks, false); + virtio_mem_send_response_simple(vmem, elem, type); +} + +static void virtio_mem_resize_usable_region(VirtIOMEM *vmem, + uint64_t requested_size, + bool can_shrink) +{ + uint64_t newsize = MIN(memory_region_size(&vmem->memdev->mr), + requested_size + VIRTIO_MEM_USABLE_EXTENT); + + /* The usable region size always has to be multiples of the block size. */ + newsize = QEMU_ALIGN_UP(newsize, vmem->block_size); + + if (!requested_size) { + newsize = 0; + } + + if (newsize < vmem->usable_region_size && !can_shrink) { + return; + } + + trace_virtio_mem_resized_usable_region(vmem->usable_region_size, newsize); + vmem->usable_region_size = newsize; +} + +static int virtio_mem_unplug_all(VirtIOMEM *vmem) +{ + RAMBlock *rb = vmem->memdev->mr.ram_block; + + if (virtio_mem_is_busy()) { + return -EBUSY; + } + + if (ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb))) { + return -EBUSY; + } + virtio_mem_notify_unplug_all(vmem); + + bitmap_clear(vmem->bitmap, 0, vmem->bitmap_size); + if (vmem->size) { + vmem->size = 0; + notifier_list_notify(&vmem->size_change_notifiers, &vmem->size); + } + trace_virtio_mem_unplugged_all(); + virtio_mem_resize_usable_region(vmem, vmem->requested_size, true); + return 0; +} + +static void virtio_mem_unplug_all_request(VirtIOMEM *vmem, + VirtQueueElement *elem) +{ + trace_virtio_mem_unplug_all_request(); + if (virtio_mem_unplug_all(vmem)) { + virtio_mem_send_response_simple(vmem, elem, VIRTIO_MEM_RESP_BUSY); + } else { + virtio_mem_send_response_simple(vmem, elem, VIRTIO_MEM_RESP_ACK); + } +} + +static void virtio_mem_state_request(VirtIOMEM *vmem, VirtQueueElement *elem, + struct virtio_mem_req *req) +{ + const uint16_t nb_blocks = le16_to_cpu(req->u.state.nb_blocks); + const uint64_t gpa = le64_to_cpu(req->u.state.addr); + const uint64_t size = nb_blocks * vmem->block_size; + struct virtio_mem_resp resp = { + .type = cpu_to_le16(VIRTIO_MEM_RESP_ACK), + }; + + trace_virtio_mem_state_request(gpa, nb_blocks); + if (!virtio_mem_valid_range(vmem, gpa, size)) { + virtio_mem_send_response_simple(vmem, elem, VIRTIO_MEM_RESP_ERROR); + return; + } + + if (virtio_mem_test_bitmap(vmem, gpa, size, true)) { + resp.u.state.state = cpu_to_le16(VIRTIO_MEM_STATE_PLUGGED); + } else if (virtio_mem_test_bitmap(vmem, gpa, size, false)) { + resp.u.state.state = cpu_to_le16(VIRTIO_MEM_STATE_UNPLUGGED); + } else { + resp.u.state.state = cpu_to_le16(VIRTIO_MEM_STATE_MIXED); + } + trace_virtio_mem_state_response(le16_to_cpu(resp.u.state.state)); + virtio_mem_send_response(vmem, elem, &resp); +} + +static void virtio_mem_handle_request(VirtIODevice *vdev, VirtQueue *vq) +{ + const int len = sizeof(struct virtio_mem_req); + VirtIOMEM *vmem = VIRTIO_MEM(vdev); + VirtQueueElement *elem; + struct virtio_mem_req req; + uint16_t type; + + while (true) { + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); + if (!elem) { + return; + } + + if (iov_to_buf(elem->out_sg, elem->out_num, 0, &req, len) < len) { + virtio_error(vdev, "virtio-mem protocol violation: invalid request" + " size: %d", len); + virtqueue_detach_element(vq, elem, 0); + g_free(elem); + return; + } + + if (iov_size(elem->in_sg, elem->in_num) < + sizeof(struct virtio_mem_resp)) { + virtio_error(vdev, "virtio-mem protocol violation: not enough space" + " for response: %zu", + iov_size(elem->in_sg, elem->in_num)); + virtqueue_detach_element(vq, elem, 0); + g_free(elem); + return; + } + + type = le16_to_cpu(req.type); + switch (type) { + case VIRTIO_MEM_REQ_PLUG: + virtio_mem_plug_request(vmem, elem, &req); + break; + case VIRTIO_MEM_REQ_UNPLUG: + virtio_mem_unplug_request(vmem, elem, &req); + break; + case VIRTIO_MEM_REQ_UNPLUG_ALL: + virtio_mem_unplug_all_request(vmem, elem); + break; + case VIRTIO_MEM_REQ_STATE: + virtio_mem_state_request(vmem, elem, &req); + break; + default: + virtio_error(vdev, "virtio-mem protocol violation: unknown request" + " type: %d", type); + virtqueue_detach_element(vq, elem, 0); + g_free(elem); + return; + } + + g_free(elem); + } +} + +static void virtio_mem_get_config(VirtIODevice *vdev, uint8_t *config_data) +{ + VirtIOMEM *vmem = VIRTIO_MEM(vdev); + struct virtio_mem_config *config = (void *) config_data; + + config->block_size = cpu_to_le64(vmem->block_size); + config->node_id = cpu_to_le16(vmem->node); + config->requested_size = cpu_to_le64(vmem->requested_size); + config->plugged_size = cpu_to_le64(vmem->size); + config->addr = cpu_to_le64(vmem->addr); + config->region_size = cpu_to_le64(memory_region_size(&vmem->memdev->mr)); + config->usable_region_size = cpu_to_le64(vmem->usable_region_size); +} + +static uint64_t virtio_mem_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + + if (ms->numa_state) { +#if defined(CONFIG_ACPI) + virtio_add_feature(&features, VIRTIO_MEM_F_ACPI_PXM); +#endif + } + return features; +} + +static void virtio_mem_system_reset(void *opaque) +{ + VirtIOMEM *vmem = VIRTIO_MEM(opaque); + + /* + * During usual resets, we will unplug all memory and shrink the usable + * region size. This is, however, not possible in all scenarios. Then, + * the guest has to deal with this manually (VIRTIO_MEM_REQ_UNPLUG_ALL). + */ + virtio_mem_unplug_all(vmem); +} + +static void virtio_mem_device_realize(DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int nb_numa_nodes = ms->numa_state ? ms->numa_state->num_nodes : 0; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOMEM *vmem = VIRTIO_MEM(dev); + uint64_t page_size; + RAMBlock *rb; + int ret; + + if (!vmem->memdev) { + error_setg(errp, "'%s' property is not set", VIRTIO_MEM_MEMDEV_PROP); + return; + } else if (host_memory_backend_is_mapped(vmem->memdev)) { + error_setg(errp, "'%s' property specifies a busy memdev: %s", + VIRTIO_MEM_MEMDEV_PROP, + object_get_canonical_path_component(OBJECT(vmem->memdev))); + return; + } else if (!memory_region_is_ram(&vmem->memdev->mr) || + memory_region_is_rom(&vmem->memdev->mr) || + !vmem->memdev->mr.ram_block) { + error_setg(errp, "'%s' property specifies an unsupported memdev", + VIRTIO_MEM_MEMDEV_PROP); + return; + } + + if ((nb_numa_nodes && vmem->node >= nb_numa_nodes) || + (!nb_numa_nodes && vmem->node)) { + error_setg(errp, "'%s' property has value '%" PRIu32 "', which exceeds" + "the number of numa nodes: %d", VIRTIO_MEM_NODE_PROP, + vmem->node, nb_numa_nodes ? nb_numa_nodes : 1); + return; + } + + if (enable_mlock) { + error_setg(errp, "Incompatible with mlock"); + return; + } + + rb = vmem->memdev->mr.ram_block; + page_size = qemu_ram_pagesize(rb); + + /* + * If the block size wasn't configured by the user, use a sane default. This + * allows using hugetlbfs backends of any page size without manual + * intervention. + */ + if (!vmem->block_size) { + vmem->block_size = virtio_mem_default_block_size(rb); + } + + if (vmem->block_size < page_size) { + error_setg(errp, "'%s' property has to be at least the page size (0x%" + PRIx64 ")", VIRTIO_MEM_BLOCK_SIZE_PROP, page_size); + return; + } else if (vmem->block_size < virtio_mem_default_block_size(rb)) { + warn_report("'%s' property is smaller than the default block size (%" + PRIx64 " MiB)", VIRTIO_MEM_BLOCK_SIZE_PROP, + virtio_mem_default_block_size(rb) / MiB); + } else if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) { + error_setg(errp, "'%s' property has to be multiples of '%s' (0x%" PRIx64 + ")", VIRTIO_MEM_REQUESTED_SIZE_PROP, + VIRTIO_MEM_BLOCK_SIZE_PROP, vmem->block_size); + return; + } else if (!QEMU_IS_ALIGNED(vmem->addr, vmem->block_size)) { + error_setg(errp, "'%s' property has to be multiples of '%s' (0x%" PRIx64 + ")", VIRTIO_MEM_ADDR_PROP, VIRTIO_MEM_BLOCK_SIZE_PROP, + vmem->block_size); + return; + } else if (!QEMU_IS_ALIGNED(memory_region_size(&vmem->memdev->mr), + vmem->block_size)) { + error_setg(errp, "'%s' property memdev size has to be multiples of" + "'%s' (0x%" PRIx64 ")", VIRTIO_MEM_MEMDEV_PROP, + VIRTIO_MEM_BLOCK_SIZE_PROP, vmem->block_size); + return; + } + + if (ram_block_coordinated_discard_require(true)) { + error_setg(errp, "Discarding RAM is disabled"); + return; + } + + ret = ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb)); + if (ret) { + error_setg_errno(errp, -ret, "Unexpected error discarding RAM"); + ram_block_coordinated_discard_require(false); + return; + } + + virtio_mem_resize_usable_region(vmem, vmem->requested_size, true); + + vmem->bitmap_size = memory_region_size(&vmem->memdev->mr) / + vmem->block_size; + vmem->bitmap = bitmap_new(vmem->bitmap_size); + + virtio_init(vdev, TYPE_VIRTIO_MEM, VIRTIO_ID_MEM, + sizeof(struct virtio_mem_config)); + vmem->vq = virtio_add_queue(vdev, 128, virtio_mem_handle_request); + + host_memory_backend_set_mapped(vmem->memdev, true); + vmstate_register_ram(&vmem->memdev->mr, DEVICE(vmem)); + qemu_register_reset(virtio_mem_system_reset, vmem); + + /* + * Set ourselves as RamDiscardManager before the plug handler maps the + * memory region and exposes it via an address space. + */ + memory_region_set_ram_discard_manager(&vmem->memdev->mr, + RAM_DISCARD_MANAGER(vmem)); +} + +static void virtio_mem_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOMEM *vmem = VIRTIO_MEM(dev); + + /* + * The unplug handler unmapped the memory region, it cannot be + * found via an address space anymore. Unset ourselves. + */ + memory_region_set_ram_discard_manager(&vmem->memdev->mr, NULL); + qemu_unregister_reset(virtio_mem_system_reset, vmem); + vmstate_unregister_ram(&vmem->memdev->mr, DEVICE(vmem)); + host_memory_backend_set_mapped(vmem->memdev, false); + virtio_del_queue(vdev, 0); + virtio_cleanup(vdev); + g_free(vmem->bitmap); + ram_block_coordinated_discard_require(false); +} + +static int virtio_mem_discard_range_cb(const VirtIOMEM *vmem, void *arg, + uint64_t offset, uint64_t size) +{ + RAMBlock *rb = vmem->memdev->mr.ram_block; + + return ram_block_discard_range(rb, offset, size) ? -EINVAL : 0; +} + +static int virtio_mem_restore_unplugged(VirtIOMEM *vmem) +{ + /* Make sure all memory is really discarded after migration. */ + return virtio_mem_for_each_unplugged_range(vmem, NULL, + virtio_mem_discard_range_cb); +} + +static int virtio_mem_post_load(void *opaque, int version_id) +{ + VirtIOMEM *vmem = VIRTIO_MEM(opaque); + RamDiscardListener *rdl; + int ret; + + /* + * We started out with all memory discarded and our memory region is mapped + * into an address space. Replay, now that we updated the bitmap. + */ + QLIST_FOREACH(rdl, &vmem->rdl_list, next) { + ret = virtio_mem_for_each_plugged_section(vmem, rdl->section, rdl, + virtio_mem_notify_populate_cb); + if (ret) { + return ret; + } + } + + if (migration_in_incoming_postcopy()) { + return 0; + } + + return virtio_mem_restore_unplugged(vmem); +} + +typedef struct VirtIOMEMMigSanityChecks { + VirtIOMEM *parent; + uint64_t addr; + uint64_t region_size; + uint64_t block_size; + uint32_t node; +} VirtIOMEMMigSanityChecks; + +static int virtio_mem_mig_sanity_checks_pre_save(void *opaque) +{ + VirtIOMEMMigSanityChecks *tmp = opaque; + VirtIOMEM *vmem = tmp->parent; + + tmp->addr = vmem->addr; + tmp->region_size = memory_region_size(&vmem->memdev->mr); + tmp->block_size = vmem->block_size; + tmp->node = vmem->node; + return 0; +} + +static int virtio_mem_mig_sanity_checks_post_load(void *opaque, int version_id) +{ + VirtIOMEMMigSanityChecks *tmp = opaque; + VirtIOMEM *vmem = tmp->parent; + const uint64_t new_region_size = memory_region_size(&vmem->memdev->mr); + + if (tmp->addr != vmem->addr) { + error_report("Property '%s' changed from 0x%" PRIx64 " to 0x%" PRIx64, + VIRTIO_MEM_ADDR_PROP, tmp->addr, vmem->addr); + return -EINVAL; + } + /* + * Note: Preparation for resizeable memory regions. The maximum size + * of the memory region must not change during migration. + */ + if (tmp->region_size != new_region_size) { + error_report("Property '%s' size changed from 0x%" PRIx64 " to 0x%" + PRIx64, VIRTIO_MEM_MEMDEV_PROP, tmp->region_size, + new_region_size); + return -EINVAL; + } + if (tmp->block_size != vmem->block_size) { + error_report("Property '%s' changed from 0x%" PRIx64 " to 0x%" PRIx64, + VIRTIO_MEM_BLOCK_SIZE_PROP, tmp->block_size, + vmem->block_size); + return -EINVAL; + } + if (tmp->node != vmem->node) { + error_report("Property '%s' changed from %" PRIu32 " to %" PRIu32, + VIRTIO_MEM_NODE_PROP, tmp->node, vmem->node); + return -EINVAL; + } + return 0; +} + +static const VMStateDescription vmstate_virtio_mem_sanity_checks = { + .name = "virtio-mem-device/sanity-checks", + .pre_save = virtio_mem_mig_sanity_checks_pre_save, + .post_load = virtio_mem_mig_sanity_checks_post_load, + .fields = (VMStateField[]) { + VMSTATE_UINT64(addr, VirtIOMEMMigSanityChecks), + VMSTATE_UINT64(region_size, VirtIOMEMMigSanityChecks), + VMSTATE_UINT64(block_size, VirtIOMEMMigSanityChecks), + VMSTATE_UINT32(node, VirtIOMEMMigSanityChecks), + VMSTATE_END_OF_LIST(), + }, +}; + +static const VMStateDescription vmstate_virtio_mem_device = { + .name = "virtio-mem-device", + .minimum_version_id = 1, + .version_id = 1, + .priority = MIG_PRI_VIRTIO_MEM, + .post_load = virtio_mem_post_load, + .fields = (VMStateField[]) { + VMSTATE_WITH_TMP(VirtIOMEM, VirtIOMEMMigSanityChecks, + vmstate_virtio_mem_sanity_checks), + VMSTATE_UINT64(usable_region_size, VirtIOMEM), + VMSTATE_UINT64(size, VirtIOMEM), + VMSTATE_UINT64(requested_size, VirtIOMEM), + VMSTATE_BITMAP(bitmap, VirtIOMEM, 0, bitmap_size), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_virtio_mem = { + .name = "virtio-mem", + .minimum_version_id = 1, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static void virtio_mem_fill_device_info(const VirtIOMEM *vmem, + VirtioMEMDeviceInfo *vi) +{ + vi->memaddr = vmem->addr; + vi->node = vmem->node; + vi->requested_size = vmem->requested_size; + vi->size = vmem->size; + vi->max_size = memory_region_size(&vmem->memdev->mr); + vi->block_size = vmem->block_size; + vi->memdev = object_get_canonical_path(OBJECT(vmem->memdev)); +} + +static MemoryRegion *virtio_mem_get_memory_region(VirtIOMEM *vmem, Error **errp) +{ + if (!vmem->memdev) { + error_setg(errp, "'%s' property must be set", VIRTIO_MEM_MEMDEV_PROP); + return NULL; + } + + return &vmem->memdev->mr; +} + +static void virtio_mem_add_size_change_notifier(VirtIOMEM *vmem, + Notifier *notifier) +{ + notifier_list_add(&vmem->size_change_notifiers, notifier); +} + +static void virtio_mem_remove_size_change_notifier(VirtIOMEM *vmem, + Notifier *notifier) +{ + notifier_remove(notifier); +} + +static void virtio_mem_get_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + const VirtIOMEM *vmem = VIRTIO_MEM(obj); + uint64_t value = vmem->size; + + visit_type_size(v, name, &value, errp); +} + +static void virtio_mem_get_requested_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + const VirtIOMEM *vmem = VIRTIO_MEM(obj); + uint64_t value = vmem->requested_size; + + visit_type_size(v, name, &value, errp); +} + +static void virtio_mem_set_requested_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + VirtIOMEM *vmem = VIRTIO_MEM(obj); + Error *err = NULL; + uint64_t value; + + visit_type_size(v, name, &value, &err); + if (err) { + error_propagate(errp, err); + return; + } + + /* + * The block size and memory backend are not fixed until the device was + * realized. realize() will verify these properties then. + */ + if (DEVICE(obj)->realized) { + if (!QEMU_IS_ALIGNED(value, vmem->block_size)) { + error_setg(errp, "'%s' has to be multiples of '%s' (0x%" PRIx64 + ")", name, VIRTIO_MEM_BLOCK_SIZE_PROP, + vmem->block_size); + return; + } else if (value > memory_region_size(&vmem->memdev->mr)) { + error_setg(errp, "'%s' cannot exceed the memory backend size" + "(0x%" PRIx64 ")", name, + memory_region_size(&vmem->memdev->mr)); + return; + } + + if (value != vmem->requested_size) { + virtio_mem_resize_usable_region(vmem, value, false); + vmem->requested_size = value; + } + /* + * Trigger a config update so the guest gets notified. We trigger + * even if the size didn't change (especially helpful for debugging). + */ + virtio_notify_config(VIRTIO_DEVICE(vmem)); + } else { + vmem->requested_size = value; + } +} + +static void virtio_mem_get_block_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + const VirtIOMEM *vmem = VIRTIO_MEM(obj); + uint64_t value = vmem->block_size; + + /* + * If not configured by the user (and we're not realized yet), use the + * default block size we would use with the current memory backend. + */ + if (!value) { + if (vmem->memdev && memory_region_is_ram(&vmem->memdev->mr)) { + value = virtio_mem_default_block_size(vmem->memdev->mr.ram_block); + } else { + value = virtio_mem_thp_size(); + } + } + + visit_type_size(v, name, &value, errp); +} + +static void virtio_mem_set_block_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + VirtIOMEM *vmem = VIRTIO_MEM(obj); + Error *err = NULL; + uint64_t value; + + if (DEVICE(obj)->realized) { + error_setg(errp, "'%s' cannot be changed", name); + return; + } + + visit_type_size(v, name, &value, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (value < VIRTIO_MEM_MIN_BLOCK_SIZE) { + error_setg(errp, "'%s' property has to be at least 0x%" PRIx32, name, + VIRTIO_MEM_MIN_BLOCK_SIZE); + return; + } else if (!is_power_of_2(value)) { + error_setg(errp, "'%s' property has to be a power of two", name); + return; + } + vmem->block_size = value; +} + +static void virtio_mem_instance_init(Object *obj) +{ + VirtIOMEM *vmem = VIRTIO_MEM(obj); + + notifier_list_init(&vmem->size_change_notifiers); + QLIST_INIT(&vmem->rdl_list); + + object_property_add(obj, VIRTIO_MEM_SIZE_PROP, "size", virtio_mem_get_size, + NULL, NULL, NULL); + object_property_add(obj, VIRTIO_MEM_REQUESTED_SIZE_PROP, "size", + virtio_mem_get_requested_size, + virtio_mem_set_requested_size, NULL, NULL); + object_property_add(obj, VIRTIO_MEM_BLOCK_SIZE_PROP, "size", + virtio_mem_get_block_size, virtio_mem_set_block_size, + NULL, NULL); +} + +static Property virtio_mem_properties[] = { + DEFINE_PROP_UINT64(VIRTIO_MEM_ADDR_PROP, VirtIOMEM, addr, 0), + DEFINE_PROP_UINT32(VIRTIO_MEM_NODE_PROP, VirtIOMEM, node, 0), + DEFINE_PROP_LINK(VIRTIO_MEM_MEMDEV_PROP, VirtIOMEM, memdev, + TYPE_MEMORY_BACKEND, HostMemoryBackend *), + DEFINE_PROP_END_OF_LIST(), +}; + +static uint64_t virtio_mem_rdm_get_min_granularity(const RamDiscardManager *rdm, + const MemoryRegion *mr) +{ + const VirtIOMEM *vmem = VIRTIO_MEM(rdm); + + g_assert(mr == &vmem->memdev->mr); + return vmem->block_size; +} + +static bool virtio_mem_rdm_is_populated(const RamDiscardManager *rdm, + const MemoryRegionSection *s) +{ + const VirtIOMEM *vmem = VIRTIO_MEM(rdm); + uint64_t start_gpa = vmem->addr + s->offset_within_region; + uint64_t end_gpa = start_gpa + int128_get64(s->size); + + g_assert(s->mr == &vmem->memdev->mr); + + start_gpa = QEMU_ALIGN_DOWN(start_gpa, vmem->block_size); + end_gpa = QEMU_ALIGN_UP(end_gpa, vmem->block_size); + + if (!virtio_mem_valid_range(vmem, start_gpa, end_gpa - start_gpa)) { + return false; + } + + return virtio_mem_test_bitmap(vmem, start_gpa, end_gpa - start_gpa, true); +} + +struct VirtIOMEMReplayData { + void *fn; + void *opaque; +}; + +static int virtio_mem_rdm_replay_populated_cb(MemoryRegionSection *s, void *arg) +{ + struct VirtIOMEMReplayData *data = arg; + + return ((ReplayRamPopulate)data->fn)(s, data->opaque); +} + +static int virtio_mem_rdm_replay_populated(const RamDiscardManager *rdm, + MemoryRegionSection *s, + ReplayRamPopulate replay_fn, + void *opaque) +{ + const VirtIOMEM *vmem = VIRTIO_MEM(rdm); + struct VirtIOMEMReplayData data = { + .fn = replay_fn, + .opaque = opaque, + }; + + g_assert(s->mr == &vmem->memdev->mr); + return virtio_mem_for_each_plugged_section(vmem, s, &data, + virtio_mem_rdm_replay_populated_cb); +} + +static int virtio_mem_rdm_replay_discarded_cb(MemoryRegionSection *s, + void *arg) +{ + struct VirtIOMEMReplayData *data = arg; + + ((ReplayRamDiscard)data->fn)(s, data->opaque); + return 0; +} + +static void virtio_mem_rdm_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *s, + ReplayRamDiscard replay_fn, + void *opaque) +{ + const VirtIOMEM *vmem = VIRTIO_MEM(rdm); + struct VirtIOMEMReplayData data = { + .fn = replay_fn, + .opaque = opaque, + }; + + g_assert(s->mr == &vmem->memdev->mr); + virtio_mem_for_each_unplugged_section(vmem, s, &data, + virtio_mem_rdm_replay_discarded_cb); +} + +static void virtio_mem_rdm_register_listener(RamDiscardManager *rdm, + RamDiscardListener *rdl, + MemoryRegionSection *s) +{ + VirtIOMEM *vmem = VIRTIO_MEM(rdm); + int ret; + + g_assert(s->mr == &vmem->memdev->mr); + rdl->section = memory_region_section_new_copy(s); + + QLIST_INSERT_HEAD(&vmem->rdl_list, rdl, next); + ret = virtio_mem_for_each_plugged_section(vmem, rdl->section, rdl, + virtio_mem_notify_populate_cb); + if (ret) { + error_report("%s: Replaying plugged ranges failed: %s", __func__, + strerror(-ret)); + } +} + +static void virtio_mem_rdm_unregister_listener(RamDiscardManager *rdm, + RamDiscardListener *rdl) +{ + VirtIOMEM *vmem = VIRTIO_MEM(rdm); + + g_assert(rdl->section->mr == &vmem->memdev->mr); + if (vmem->size) { + if (rdl->double_discard_supported) { + rdl->notify_discard(rdl, rdl->section); + } else { + virtio_mem_for_each_plugged_section(vmem, rdl->section, rdl, + virtio_mem_notify_discard_cb); + } + } + + memory_region_section_free_copy(rdl->section); + rdl->section = NULL; + QLIST_REMOVE(rdl, next); +} + +static void virtio_mem_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VirtIOMEMClass *vmc = VIRTIO_MEM_CLASS(klass); + RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_CLASS(klass); + + device_class_set_props(dc, virtio_mem_properties); + dc->vmsd = &vmstate_virtio_mem; + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + vdc->realize = virtio_mem_device_realize; + vdc->unrealize = virtio_mem_device_unrealize; + vdc->get_config = virtio_mem_get_config; + vdc->get_features = virtio_mem_get_features; + vdc->vmsd = &vmstate_virtio_mem_device; + + vmc->fill_device_info = virtio_mem_fill_device_info; + vmc->get_memory_region = virtio_mem_get_memory_region; + vmc->add_size_change_notifier = virtio_mem_add_size_change_notifier; + vmc->remove_size_change_notifier = virtio_mem_remove_size_change_notifier; + + rdmc->get_min_granularity = virtio_mem_rdm_get_min_granularity; + rdmc->is_populated = virtio_mem_rdm_is_populated; + rdmc->replay_populated = virtio_mem_rdm_replay_populated; + rdmc->replay_discarded = virtio_mem_rdm_replay_discarded; + rdmc->register_listener = virtio_mem_rdm_register_listener; + rdmc->unregister_listener = virtio_mem_rdm_unregister_listener; +} + +static const TypeInfo virtio_mem_info = { + .name = TYPE_VIRTIO_MEM, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOMEM), + .instance_init = virtio_mem_instance_init, + .class_init = virtio_mem_class_init, + .class_size = sizeof(VirtIOMEMClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_RAM_DISCARD_MANAGER }, + { } + }, +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_mem_info); +} + +type_init(virtio_register_types) diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c new file mode 100644 index 000000000..72da12fea --- /dev/null +++ b/hw/virtio/virtio-mmio.c @@ -0,0 +1,865 @@ +/* + * Virtio MMIO bindings + * + * Copyright (c) 2011 Linaro Limited + * + * Author: + * Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "standard-headers/linux/virtio_mmio.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "hw/virtio/virtio.h" +#include "migration/qemu-file-types.h" +#include "qemu/host-utils.h" +#include "qemu/module.h" +#include "sysemu/kvm.h" +#include "sysemu/replay.h" +#include "hw/virtio/virtio-mmio.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "trace.h" + +static bool virtio_mmio_ioeventfd_enabled(DeviceState *d) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + + return (proxy->flags & VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD) != 0; +} + +static int virtio_mmio_ioeventfd_assign(DeviceState *d, + EventNotifier *notifier, + int n, bool assign) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + + if (assign) { + memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4, + true, n, notifier); + } else { + memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4, + true, n, notifier); + } + return 0; +} + +static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy) +{ + virtio_bus_start_ioeventfd(&proxy->bus); +} + +static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy) +{ + virtio_bus_stop_ioeventfd(&proxy->bus); +} + +static void virtio_mmio_soft_reset(VirtIOMMIOProxy *proxy) +{ + int i; + + if (proxy->legacy) { + return; + } + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + proxy->vqs[i].enabled = 0; + } +} + +static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) +{ + VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + trace_virtio_mmio_read(offset); + + if (!vdev) { + /* If no backend is present, we treat most registers as + * read-as-zero, except for the magic number, version and + * vendor ID. This is not strictly sanctioned by the virtio + * spec, but it allows us to provide transports with no backend + * plugged in which don't confuse Linux's virtio code: the + * probe won't complain about the bad magic number, but the + * device ID of zero means no backend will claim it. + */ + switch (offset) { + case VIRTIO_MMIO_MAGIC_VALUE: + return VIRT_MAGIC; + case VIRTIO_MMIO_VERSION: + if (proxy->legacy) { + return VIRT_VERSION_LEGACY; + } else { + return VIRT_VERSION; + } + case VIRTIO_MMIO_VENDOR_ID: + return VIRT_VENDOR; + default: + return 0; + } + } + + if (offset >= VIRTIO_MMIO_CONFIG) { + offset -= VIRTIO_MMIO_CONFIG; + if (proxy->legacy) { + switch (size) { + case 1: + return virtio_config_readb(vdev, offset); + case 2: + return virtio_config_readw(vdev, offset); + case 4: + return virtio_config_readl(vdev, offset); + default: + abort(); + } + } else { + switch (size) { + case 1: + return virtio_config_modern_readb(vdev, offset); + case 2: + return virtio_config_modern_readw(vdev, offset); + case 4: + return virtio_config_modern_readl(vdev, offset); + default: + abort(); + } + } + } + if (size != 4) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: wrong size access to register!\n", + __func__); + return 0; + } + switch (offset) { + case VIRTIO_MMIO_MAGIC_VALUE: + return VIRT_MAGIC; + case VIRTIO_MMIO_VERSION: + if (proxy->legacy) { + return VIRT_VERSION_LEGACY; + } else { + return VIRT_VERSION; + } + case VIRTIO_MMIO_DEVICE_ID: + return vdev->device_id; + case VIRTIO_MMIO_VENDOR_ID: + return VIRT_VENDOR; + case VIRTIO_MMIO_DEVICE_FEATURES: + if (proxy->legacy) { + if (proxy->host_features_sel) { + return 0; + } else { + return vdev->host_features; + } + } else { + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + return (vdev->host_features & ~vdc->legacy_features) + >> (32 * proxy->host_features_sel); + } + case VIRTIO_MMIO_QUEUE_NUM_MAX: + if (!virtio_queue_get_num(vdev, vdev->queue_sel)) { + return 0; + } + return VIRTQUEUE_MAX_SIZE; + case VIRTIO_MMIO_QUEUE_PFN: + if (!proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from legacy register (0x%" + HWADDR_PRIx ") in non-legacy mode\n", + __func__, offset); + return 0; + } + return virtio_queue_get_addr(vdev, vdev->queue_sel) + >> proxy->guest_page_shift; + case VIRTIO_MMIO_QUEUE_READY: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return 0; + } + return proxy->vqs[vdev->queue_sel].enabled; + case VIRTIO_MMIO_INTERRUPT_STATUS: + return qatomic_read(&vdev->isr); + case VIRTIO_MMIO_STATUS: + return vdev->status; + case VIRTIO_MMIO_CONFIG_GENERATION: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return 0; + } + return vdev->generation; + case VIRTIO_MMIO_SHM_LEN_LOW: + case VIRTIO_MMIO_SHM_LEN_HIGH: + /* + * VIRTIO_MMIO_SHM_SEL is unimplemented + * according to the linux driver, if region length is -1 + * the shared memory doesn't exist + */ + return -1; + case VIRTIO_MMIO_DEVICE_FEATURES_SEL: + case VIRTIO_MMIO_DRIVER_FEATURES: + case VIRTIO_MMIO_DRIVER_FEATURES_SEL: + case VIRTIO_MMIO_GUEST_PAGE_SIZE: + case VIRTIO_MMIO_QUEUE_SEL: + case VIRTIO_MMIO_QUEUE_NUM: + case VIRTIO_MMIO_QUEUE_ALIGN: + case VIRTIO_MMIO_QUEUE_NOTIFY: + case VIRTIO_MMIO_INTERRUPT_ACK: + case VIRTIO_MMIO_QUEUE_DESC_LOW: + case VIRTIO_MMIO_QUEUE_DESC_HIGH: + case VIRTIO_MMIO_QUEUE_AVAIL_LOW: + case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: + case VIRTIO_MMIO_QUEUE_USED_LOW: + case VIRTIO_MMIO_QUEUE_USED_HIGH: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read of write-only register (0x%" HWADDR_PRIx ")\n", + __func__, offset); + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad register offset (0x%" HWADDR_PRIx ")\n", + __func__, offset); + return 0; + } + return 0; +} + +static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + trace_virtio_mmio_write_offset(offset, value); + + if (!vdev) { + /* If no backend is present, we just make all registers + * write-ignored. This allows us to provide transports with + * no backend plugged in. + */ + return; + } + + if (offset >= VIRTIO_MMIO_CONFIG) { + offset -= VIRTIO_MMIO_CONFIG; + if (proxy->legacy) { + switch (size) { + case 1: + virtio_config_writeb(vdev, offset, value); + break; + case 2: + virtio_config_writew(vdev, offset, value); + break; + case 4: + virtio_config_writel(vdev, offset, value); + break; + default: + abort(); + } + return; + } else { + switch (size) { + case 1: + virtio_config_modern_writeb(vdev, offset, value); + break; + case 2: + virtio_config_modern_writew(vdev, offset, value); + break; + case 4: + virtio_config_modern_writel(vdev, offset, value); + break; + default: + abort(); + } + return; + } + } + if (size != 4) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: wrong size access to register!\n", + __func__); + return; + } + switch (offset) { + case VIRTIO_MMIO_DEVICE_FEATURES_SEL: + if (value) { + proxy->host_features_sel = 1; + } else { + proxy->host_features_sel = 0; + } + break; + case VIRTIO_MMIO_DRIVER_FEATURES: + if (proxy->legacy) { + if (proxy->guest_features_sel) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: attempt to write guest features with " + "guest_features_sel > 0 in legacy mode\n", + __func__); + } else { + virtio_set_features(vdev, value); + } + } else { + proxy->guest_features[proxy->guest_features_sel] = value; + } + break; + case VIRTIO_MMIO_DRIVER_FEATURES_SEL: + if (value) { + proxy->guest_features_sel = 1; + } else { + proxy->guest_features_sel = 0; + } + break; + case VIRTIO_MMIO_GUEST_PAGE_SIZE: + if (!proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to legacy register (0x%" + HWADDR_PRIx ") in non-legacy mode\n", + __func__, offset); + return; + } + proxy->guest_page_shift = ctz32(value); + if (proxy->guest_page_shift > 31) { + proxy->guest_page_shift = 0; + } + trace_virtio_mmio_guest_page(value, proxy->guest_page_shift); + break; + case VIRTIO_MMIO_QUEUE_SEL: + if (value < VIRTIO_QUEUE_MAX) { + vdev->queue_sel = value; + } + break; + case VIRTIO_MMIO_QUEUE_NUM: + trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE); + virtio_queue_set_num(vdev, vdev->queue_sel, value); + + if (proxy->legacy) { + virtio_queue_update_rings(vdev, vdev->queue_sel); + } else { + proxy->vqs[vdev->queue_sel].num = value; + } + break; + case VIRTIO_MMIO_QUEUE_ALIGN: + if (!proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to legacy register (0x%" + HWADDR_PRIx ") in non-legacy mode\n", + __func__, offset); + return; + } + virtio_queue_set_align(vdev, vdev->queue_sel, value); + break; + case VIRTIO_MMIO_QUEUE_PFN: + if (!proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to legacy register (0x%" + HWADDR_PRIx ") in non-legacy mode\n", + __func__, offset); + return; + } + if (value == 0) { + virtio_reset(vdev); + } else { + virtio_queue_set_addr(vdev, vdev->queue_sel, + value << proxy->guest_page_shift); + } + break; + case VIRTIO_MMIO_QUEUE_READY: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + if (value) { + virtio_queue_set_num(vdev, vdev->queue_sel, + proxy->vqs[vdev->queue_sel].num); + virtio_queue_set_rings(vdev, vdev->queue_sel, + ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | + proxy->vqs[vdev->queue_sel].desc[0], + ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | + proxy->vqs[vdev->queue_sel].avail[0], + ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | + proxy->vqs[vdev->queue_sel].used[0]); + proxy->vqs[vdev->queue_sel].enabled = 1; + } else { + proxy->vqs[vdev->queue_sel].enabled = 0; + } + break; + case VIRTIO_MMIO_QUEUE_NOTIFY: + if (value < VIRTIO_QUEUE_MAX) { + virtio_queue_notify(vdev, value); + } + break; + case VIRTIO_MMIO_INTERRUPT_ACK: + qatomic_and(&vdev->isr, ~value); + virtio_update_irq(vdev); + break; + case VIRTIO_MMIO_STATUS: + if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) { + virtio_mmio_stop_ioeventfd(proxy); + } + + if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) { + virtio_set_features(vdev, + ((uint64_t)proxy->guest_features[1]) << 32 | + proxy->guest_features[0]); + } + + virtio_set_status(vdev, value & 0xff); + + if (value & VIRTIO_CONFIG_S_DRIVER_OK) { + virtio_mmio_start_ioeventfd(proxy); + } + + if (vdev->status == 0) { + virtio_reset(vdev); + virtio_mmio_soft_reset(proxy); + } + break; + case VIRTIO_MMIO_QUEUE_DESC_LOW: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].desc[0] = value; + break; + case VIRTIO_MMIO_QUEUE_DESC_HIGH: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].desc[1] = value; + break; + case VIRTIO_MMIO_QUEUE_AVAIL_LOW: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].avail[0] = value; + break; + case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].avail[1] = value; + break; + case VIRTIO_MMIO_QUEUE_USED_LOW: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].used[0] = value; + break; + case VIRTIO_MMIO_QUEUE_USED_HIGH: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].used[1] = value; + break; + case VIRTIO_MMIO_MAGIC_VALUE: + case VIRTIO_MMIO_VERSION: + case VIRTIO_MMIO_DEVICE_ID: + case VIRTIO_MMIO_VENDOR_ID: + case VIRTIO_MMIO_DEVICE_FEATURES: + case VIRTIO_MMIO_QUEUE_NUM_MAX: + case VIRTIO_MMIO_INTERRUPT_STATUS: + case VIRTIO_MMIO_CONFIG_GENERATION: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to read-only register (0x%" HWADDR_PRIx ")\n", + __func__, offset); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad register offset (0x%" HWADDR_PRIx ")\n", + __func__, offset); + } +} + +static const MemoryRegionOps virtio_legacy_mem_ops = { + .read = virtio_mmio_read, + .write = virtio_mmio_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const MemoryRegionOps virtio_mem_ops = { + .read = virtio_mmio_read, + .write = virtio_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + int level; + + if (!vdev) { + return; + } + level = (qatomic_read(&vdev->isr) != 0); + trace_virtio_mmio_setting_irq(level); + qemu_set_irq(proxy->irq, level); +} + +static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); + + proxy->host_features_sel = qemu_get_be32(f); + proxy->guest_features_sel = qemu_get_be32(f); + proxy->guest_page_shift = qemu_get_be32(f); + return 0; +} + +static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); + + qemu_put_be32(f, proxy->host_features_sel); + qemu_put_be32(f, proxy->guest_features_sel); + qemu_put_be32(f, proxy->guest_page_shift); +} + +static const VMStateDescription vmstate_virtio_mmio_queue_state = { + .name = "virtio_mmio/queue_state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(num, VirtIOMMIOQueue), + VMSTATE_BOOL(enabled, VirtIOMMIOQueue), + VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2), + VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2), + VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_mmio_state_sub = { + .name = "virtio_mmio/state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2), + VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0, + vmstate_virtio_mmio_queue_state, + VirtIOMMIOQueue), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_mmio = { + .name = "virtio_mmio", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_virtio_mmio_state_sub, + NULL + } +}; + +static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); + + vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL); +} + +static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); + + return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1); +} + +static bool virtio_mmio_has_extra_state(DeviceState *opaque) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); + + return !proxy->legacy; +} + +static void virtio_mmio_reset(DeviceState *d) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + int i; + + virtio_mmio_stop_ioeventfd(proxy); + virtio_bus_reset(&proxy->bus); + proxy->host_features_sel = 0; + proxy->guest_features_sel = 0; + proxy->guest_page_shift = 0; + + if (!proxy->legacy) { + proxy->guest_features[0] = proxy->guest_features[1] = 0; + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + proxy->vqs[i].enabled = 0; + proxy->vqs[i].num = 0; + proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; + proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; + proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; + } + } +} + +static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign, + bool with_irqfd) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + VirtQueue *vq = virtio_get_queue(vdev, n); + EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); + + if (assign) { + int r = event_notifier_init(notifier, 0); + if (r < 0) { + return r; + } + virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); + } else { + virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); + event_notifier_cleanup(notifier); + } + + if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) { + vdc->guest_notifier_mask(vdev, n, !assign); + } + + return 0; +} + +static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs, + bool assign) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + /* TODO: need to check if kvm-arm supports irqfd */ + bool with_irqfd = false; + int r, n; + + nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); + + for (n = 0; n < nvqs; n++) { + if (!virtio_queue_get_num(vdev, n)) { + break; + } + + r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd); + if (r < 0) { + goto assign_error; + } + } + + return 0; + +assign_error: + /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ + assert(assign); + while (--n >= 0) { + virtio_mmio_set_guest_notifier(d, n, !assign, false); + } + return r; +} + +static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + if (!proxy->legacy) { + virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); + } +} + +/* virtio-mmio device */ + +static Property virtio_mmio_properties[] = { + DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy, + format_transport_address, true), + DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true), + DEFINE_PROP_BIT("ioeventfd", VirtIOMMIOProxy, flags, + VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_mmio_realizefn(DeviceState *d, Error **errp) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + SysBusDevice *sbd = SYS_BUS_DEVICE(d); + + qbus_init(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL); + sysbus_init_irq(sbd, &proxy->irq); + + if (!kvm_eventfds_enabled()) { + proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD; + } + + /* fd-based ioevents can't be synchronized in record/replay */ + if (replay_mode != REPLAY_MODE_NONE) { + proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD; + } + + if (proxy->legacy) { + memory_region_init_io(&proxy->iomem, OBJECT(d), + &virtio_legacy_mem_ops, proxy, + TYPE_VIRTIO_MMIO, 0x200); + } else { + memory_region_init_io(&proxy->iomem, OBJECT(d), + &virtio_mem_ops, proxy, + TYPE_VIRTIO_MMIO, 0x200); + } + sysbus_init_mmio(sbd, &proxy->iomem); +} + +static void virtio_mmio_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = virtio_mmio_realizefn; + dc->reset = virtio_mmio_reset; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, virtio_mmio_properties); +} + +static const TypeInfo virtio_mmio_info = { + .name = TYPE_VIRTIO_MMIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VirtIOMMIOProxy), + .class_init = virtio_mmio_class_init, +}; + +/* virtio-mmio-bus. */ + +static char *virtio_mmio_bus_get_dev_path(DeviceState *dev) +{ + BusState *virtio_mmio_bus; + VirtIOMMIOProxy *virtio_mmio_proxy; + char *proxy_path; + char *path; + MemoryRegionSection section; + + virtio_mmio_bus = qdev_get_parent_bus(dev); + virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent); + proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy)); + + /* + * If @format_transport_address is false, then we just perform the same as + * virtio_bus_get_dev_path(): we delegate the address formatting for the + * device on the virtio-mmio bus to the bus that the virtio-mmio proxy + * (i.e., the device that implements the virtio-mmio bus) resides on. In + * this case the base address of the virtio-mmio transport will be + * invisible. + */ + if (!virtio_mmio_proxy->format_transport_address) { + return proxy_path; + } + + /* Otherwise, we append the base address of the transport. */ + section = memory_region_find(&virtio_mmio_proxy->iomem, 0, 0x200); + assert(section.mr); + + if (proxy_path) { + path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path, + section.offset_within_address_space); + } else { + path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx, + section.offset_within_address_space); + } + memory_region_unref(section.mr); + + g_free(proxy_path); + return path; +} + +static void virtio_mmio_vmstate_change(DeviceState *d, bool running) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + + if (running) { + virtio_mmio_start_ioeventfd(proxy); + } else { + virtio_mmio_stop_ioeventfd(proxy); + } +} + +static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *bus_class = BUS_CLASS(klass); + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); + + k->notify = virtio_mmio_update_irq; + k->save_config = virtio_mmio_save_config; + k->load_config = virtio_mmio_load_config; + k->save_extra_state = virtio_mmio_save_extra_state; + k->load_extra_state = virtio_mmio_load_extra_state; + k->has_extra_state = virtio_mmio_has_extra_state; + k->set_guest_notifiers = virtio_mmio_set_guest_notifiers; + k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled; + k->ioeventfd_assign = virtio_mmio_ioeventfd_assign; + k->pre_plugged = virtio_mmio_pre_plugged; + k->vmstate_change = virtio_mmio_vmstate_change; + k->has_variable_vring_alignment = true; + bus_class->max_dev = 1; + bus_class->get_dev_path = virtio_mmio_bus_get_dev_path; +} + +static const TypeInfo virtio_mmio_bus_info = { + .name = TYPE_VIRTIO_MMIO_BUS, + .parent = TYPE_VIRTIO_BUS, + .instance_size = sizeof(VirtioBusState), + .class_init = virtio_mmio_bus_class_init, +}; + +static void virtio_mmio_register_types(void) +{ + type_register_static(&virtio_mmio_bus_info); + type_register_static(&virtio_mmio_info); +} + +type_init(virtio_mmio_register_types) diff --git a/hw/virtio/virtio-net-pci.c b/hw/virtio/virtio-net-pci.c new file mode 100644 index 000000000..aa0b3caec --- /dev/null +++ b/hw/virtio/virtio-net-pci.c @@ -0,0 +1,108 @@ +/* + * Virtio net PCI Bindings + * + * Copyright IBM, Corp. 2007 + * Copyright (c) 2009 CodeSourcery + * + * Authors: + * Anthony Liguori + * Paul Brook + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" + +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-net.h" +#include "virtio-pci.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qom/object.h" + +typedef struct VirtIONetPCI VirtIONetPCI; + +/* + * virtio-net-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_NET_PCI "virtio-net-pci-base" +DECLARE_INSTANCE_CHECKER(VirtIONetPCI, VIRTIO_NET_PCI, + TYPE_VIRTIO_NET_PCI) + +struct VirtIONetPCI { + VirtIOPCIProxy parent_obj; + VirtIONet vdev; +}; + +static Property virtio_net_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + DeviceState *qdev = DEVICE(vpci_dev); + VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + VirtIONet *net = VIRTIO_NET(vdev); + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = 2 * MAX(net->nic_conf.peers.queues, 1) + + 1 /* Config interrupt */ + + 1 /* Control vq */; + } + + virtio_net_set_netclient_name(&dev->vdev, qdev->id, + object_get_typename(OBJECT(qdev))); + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void virtio_net_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); + + k->romfile = "efi-virtio.rom"; + k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + k->device_id = PCI_DEVICE_ID_VIRTIO_NET; + k->revision = VIRTIO_PCI_ABI_VERSION; + k->class_id = PCI_CLASS_NETWORK_ETHERNET; + set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); + device_class_set_props(dc, virtio_net_properties); + vpciklass->realize = virtio_net_pci_realize; +} + +static void virtio_net_pci_instance_init(Object *obj) +{ + VirtIONetPCI *dev = VIRTIO_NET_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_NET); + object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), + "bootindex"); +} + +static const VirtioPCIDeviceTypeInfo virtio_net_pci_info = { + .base_name = TYPE_VIRTIO_NET_PCI, + .generic_name = "virtio-net-pci", + .transitional_name = "virtio-net-pci-transitional", + .non_transitional_name = "virtio-net-pci-non-transitional", + .instance_size = sizeof(VirtIONetPCI), + .instance_init = virtio_net_pci_instance_init, + .class_init = virtio_net_pci_class_init, +}; + +static void virtio_net_pci_register(void) +{ + virtio_pci_types_register(&virtio_net_pci_info); +} + +type_init(virtio_net_pci_register) diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c new file mode 100644 index 000000000..750aa47ec --- /dev/null +++ b/hw/virtio/virtio-pci.c @@ -0,0 +1,2237 @@ +/* + * Virtio PCI Bindings + * + * Copyright IBM, Corp. 2007 + * Copyright (c) 2009 CodeSourcery + * + * Authors: + * Anthony Liguori + * Paul Brook + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" + +#include "exec/memop.h" +#include "standard-headers/linux/virtio_pci.h" +#include "hw/boards.h" +#include "hw/virtio/virtio.h" +#include "migration/qemu-file-types.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "hw/loader.h" +#include "sysemu/kvm.h" +#include "virtio-pci.h" +#include "qemu/range.h" +#include "hw/virtio/virtio-bus.h" +#include "qapi/visitor.h" +#include "sysemu/replay.h" + +#define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) + +#undef VIRTIO_PCI_CONFIG + +/* The remaining space is defined by each driver as the per-driver + * configuration space */ +#define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) + +static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, + VirtIOPCIProxy *dev); +static void virtio_pci_reset(DeviceState *qdev); + +/* virtio device */ +/* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ +static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) +{ + return container_of(d, VirtIOPCIProxy, pci_dev.qdev); +} + +/* DeviceState to VirtIOPCIProxy. Note: used on datapath, + * be careful and test performance if you change this. + */ +static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) +{ + return container_of(d, VirtIOPCIProxy, pci_dev.qdev); +} + +static void virtio_pci_notify(DeviceState *d, uint16_t vector) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); + + if (msix_enabled(&proxy->pci_dev)) + msix_notify(&proxy->pci_dev, vector); + else { + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1); + } +} + +static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + pci_device_save(&proxy->pci_dev, f); + msix_save(&proxy->pci_dev, f); + if (msix_present(&proxy->pci_dev)) + qemu_put_be16(f, vdev->config_vector); +} + +static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { + .name = "virtio_pci/modern_queue_state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(num, VirtIOPCIQueue), + VMSTATE_UNUSED(1), /* enabled was stored as be16 */ + VMSTATE_BOOL(enabled, VirtIOPCIQueue), + VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2), + VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2), + VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2), + VMSTATE_END_OF_LIST() + } +}; + +static bool virtio_pci_modern_state_needed(void *opaque) +{ + VirtIOPCIProxy *proxy = opaque; + + return virtio_pci_modern(proxy); +} + +static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { + .name = "virtio_pci/modern_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_pci_modern_state_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(dfselect, VirtIOPCIProxy), + VMSTATE_UINT32(gfselect, VirtIOPCIProxy), + VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), + VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0, + vmstate_virtio_pci_modern_queue_state, + VirtIOPCIQueue), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_pci = { + .name = "virtio_pci", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_virtio_pci_modern_state_sub, + NULL + } +}; + +static bool virtio_pci_has_extra_state(DeviceState *d) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + + return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; +} + +static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + + vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); +} + +static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + + return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); +} + +static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + if (msix_present(&proxy->pci_dev)) + qemu_put_be16(f, virtio_queue_vector(vdev, n)); +} + +static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + int ret; + ret = pci_device_load(&proxy->pci_dev, f); + if (ret) { + return ret; + } + msix_unuse_all_vectors(&proxy->pci_dev); + msix_load(&proxy->pci_dev, f); + if (msix_present(&proxy->pci_dev)) { + qemu_get_be16s(f, &vdev->config_vector); + } else { + vdev->config_vector = VIRTIO_NO_VECTOR; + } + if (vdev->config_vector != VIRTIO_NO_VECTOR) { + return msix_vector_use(&proxy->pci_dev, vdev->config_vector); + } + return 0; +} + +static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + uint16_t vector; + if (msix_present(&proxy->pci_dev)) { + qemu_get_be16s(f, &vector); + } else { + vector = VIRTIO_NO_VECTOR; + } + virtio_queue_set_vector(vdev, n, vector); + if (vector != VIRTIO_NO_VECTOR) { + return msix_vector_use(&proxy->pci_dev, vector); + } + + return 0; +} + +static bool virtio_pci_ioeventfd_enabled(DeviceState *d) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + + return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0; +} + +#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 + +static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy) +{ + return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ? + QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4; +} + +static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, + int n, bool assign) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtQueue *vq = virtio_get_queue(vdev, n); + bool legacy = virtio_pci_legacy(proxy); + bool modern = virtio_pci_modern(proxy); + bool fast_mmio = kvm_ioeventfd_any_length_enabled(); + bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; + MemoryRegion *modern_mr = &proxy->notify.mr; + MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; + MemoryRegion *legacy_mr = &proxy->bar; + hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) * + virtio_get_queue_index(vq); + hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; + + if (assign) { + if (modern) { + if (fast_mmio) { + memory_region_add_eventfd(modern_mr, modern_addr, 0, + false, n, notifier); + } else { + memory_region_add_eventfd(modern_mr, modern_addr, 2, + false, n, notifier); + } + if (modern_pio) { + memory_region_add_eventfd(modern_notify_mr, 0, 2, + true, n, notifier); + } + } + if (legacy) { + memory_region_add_eventfd(legacy_mr, legacy_addr, 2, + true, n, notifier); + } + } else { + if (modern) { + if (fast_mmio) { + memory_region_del_eventfd(modern_mr, modern_addr, 0, + false, n, notifier); + } else { + memory_region_del_eventfd(modern_mr, modern_addr, 2, + false, n, notifier); + } + if (modern_pio) { + memory_region_del_eventfd(modern_notify_mr, 0, 2, + true, n, notifier); + } + } + if (legacy) { + memory_region_del_eventfd(legacy_mr, legacy_addr, 2, + true, n, notifier); + } + } + return 0; +} + +static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) +{ + virtio_bus_start_ioeventfd(&proxy->bus); +} + +static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) +{ + virtio_bus_stop_ioeventfd(&proxy->bus); +} + +static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + hwaddr pa; + + switch (addr) { + case VIRTIO_PCI_GUEST_FEATURES: + /* Guest does not negotiate properly? We have to assume nothing. */ + if (val & (1 << VIRTIO_F_BAD_FEATURE)) { + val = virtio_bus_get_vdev_bad_features(&proxy->bus); + } + virtio_set_features(vdev, val); + break; + case VIRTIO_PCI_QUEUE_PFN: + pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; + if (pa == 0) { + virtio_pci_reset(DEVICE(proxy)); + } + else + virtio_queue_set_addr(vdev, vdev->queue_sel, pa); + break; + case VIRTIO_PCI_QUEUE_SEL: + if (val < VIRTIO_QUEUE_MAX) + vdev->queue_sel = val; + break; + case VIRTIO_PCI_QUEUE_NOTIFY: + if (val < VIRTIO_QUEUE_MAX) { + virtio_queue_notify(vdev, val); + } + break; + case VIRTIO_PCI_STATUS: + if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { + virtio_pci_stop_ioeventfd(proxy); + } + + virtio_set_status(vdev, val & 0xFF); + + if (val & VIRTIO_CONFIG_S_DRIVER_OK) { + virtio_pci_start_ioeventfd(proxy); + } + + if (vdev->status == 0) { + virtio_pci_reset(DEVICE(proxy)); + } + + /* Linux before 2.6.34 drives the device without enabling + the PCI device bus master bit. Enable it automatically + for the guest. This is a PCI spec violation but so is + initiating DMA with bus master bit clear. */ + if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { + pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, + proxy->pci_dev.config[PCI_COMMAND] | + PCI_COMMAND_MASTER, 1); + } + break; + case VIRTIO_MSI_CONFIG_VECTOR: + msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); + /* Make it possible for guest to discover an error took place. */ + if (msix_vector_use(&proxy->pci_dev, val) < 0) + val = VIRTIO_NO_VECTOR; + vdev->config_vector = val; + break; + case VIRTIO_MSI_QUEUE_VECTOR: + msix_vector_unuse(&proxy->pci_dev, + virtio_queue_vector(vdev, vdev->queue_sel)); + /* Make it possible for guest to discover an error took place. */ + if (msix_vector_use(&proxy->pci_dev, val) < 0) + val = VIRTIO_NO_VECTOR; + virtio_queue_set_vector(vdev, vdev->queue_sel, val); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: unexpected address 0x%x value 0x%x\n", + __func__, addr, val); + break; + } +} + +static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) +{ + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + uint32_t ret = 0xFFFFFFFF; + + switch (addr) { + case VIRTIO_PCI_HOST_FEATURES: + ret = vdev->host_features; + break; + case VIRTIO_PCI_GUEST_FEATURES: + ret = vdev->guest_features; + break; + case VIRTIO_PCI_QUEUE_PFN: + ret = virtio_queue_get_addr(vdev, vdev->queue_sel) + >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; + break; + case VIRTIO_PCI_QUEUE_NUM: + ret = virtio_queue_get_num(vdev, vdev->queue_sel); + break; + case VIRTIO_PCI_QUEUE_SEL: + ret = vdev->queue_sel; + break; + case VIRTIO_PCI_STATUS: + ret = vdev->status; + break; + case VIRTIO_PCI_ISR: + /* reading from the ISR also clears it. */ + ret = qatomic_xchg(&vdev->isr, 0); + pci_irq_deassert(&proxy->pci_dev); + break; + case VIRTIO_MSI_CONFIG_VECTOR: + ret = vdev->config_vector; + break; + case VIRTIO_MSI_QUEUE_VECTOR: + ret = virtio_queue_vector(vdev, vdev->queue_sel); + break; + default: + break; + } + + return ret; +} + +static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, + unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); + uint64_t val = 0; + + if (vdev == NULL) { + return UINT64_MAX; + } + + if (addr < config) { + return virtio_ioport_read(proxy, addr); + } + addr -= config; + + switch (size) { + case 1: + val = virtio_config_readb(vdev, addr); + break; + case 2: + val = virtio_config_readw(vdev, addr); + if (virtio_is_big_endian(vdev)) { + val = bswap16(val); + } + break; + case 4: + val = virtio_config_readl(vdev, addr); + if (virtio_is_big_endian(vdev)) { + val = bswap32(val); + } + break; + } + return val; +} + +static void virtio_pci_config_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + if (vdev == NULL) { + return; + } + + if (addr < config) { + virtio_ioport_write(proxy, addr, val); + return; + } + addr -= config; + /* + * Virtio-PCI is odd. Ioports are LE but config space is target native + * endian. + */ + switch (size) { + case 1: + virtio_config_writeb(vdev, addr, val); + break; + case 2: + if (virtio_is_big_endian(vdev)) { + val = bswap16(val); + } + virtio_config_writew(vdev, addr, val); + break; + case 4: + if (virtio_is_big_endian(vdev)) { + val = bswap32(val); + } + virtio_config_writel(vdev, addr, val); + break; + } +} + +static const MemoryRegionOps virtio_pci_config_ops = { + .read = virtio_pci_config_read, + .write = virtio_pci_config_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy, + hwaddr *off, int len) +{ + int i; + VirtIOPCIRegion *reg; + + for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) { + reg = &proxy->regs[i]; + if (*off >= reg->offset && + *off + len <= reg->offset + reg->size) { + *off -= reg->offset; + return ®->mr; + } + } + + return NULL; +} + +/* Below are generic functions to do memcpy from/to an address space, + * without byteswaps, with input validation. + * + * As regular address_space_* APIs all do some kind of byteswap at least for + * some host/target combinations, we are forced to explicitly convert to a + * known-endianness integer value. + * It doesn't really matter which endian format to go through, so the code + * below selects the endian that causes the least amount of work on the given + * host. + * + * Note: host pointer must be aligned. + */ +static +void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, + const uint8_t *buf, int len) +{ + uint64_t val; + MemoryRegion *mr; + + /* address_space_* APIs assume an aligned address. + * As address is under guest control, handle illegal values. + */ + addr &= ~(len - 1); + + mr = virtio_address_space_lookup(proxy, &addr, len); + if (!mr) { + return; + } + + /* Make sure caller aligned buf properly */ + assert(!(((uintptr_t)buf) & (len - 1))); + + switch (len) { + case 1: + val = pci_get_byte(buf); + break; + case 2: + val = pci_get_word(buf); + break; + case 4: + val = pci_get_long(buf); + break; + default: + /* As length is under guest control, handle illegal values. */ + return; + } + memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, + MEMTXATTRS_UNSPECIFIED); +} + +static void +virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, + uint8_t *buf, int len) +{ + uint64_t val; + MemoryRegion *mr; + + /* address_space_* APIs assume an aligned address. + * As address is under guest control, handle illegal values. + */ + addr &= ~(len - 1); + + mr = virtio_address_space_lookup(proxy, &addr, len); + if (!mr) { + return; + } + + /* Make sure caller aligned buf properly */ + assert(!(((uintptr_t)buf) & (len - 1))); + + memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, + MEMTXATTRS_UNSPECIFIED); + switch (len) { + case 1: + pci_set_byte(buf, val); + break; + case 2: + pci_set_word(buf, val); + break; + case 4: + pci_set_long(buf, val); + break; + default: + /* As length is under guest control, handle illegal values. */ + break; + } +} + +static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, + uint32_t val, int len) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + struct virtio_pci_cfg_cap *cfg; + + pci_default_write_config(pci_dev, address, val, len); + + if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { + pcie_cap_flr_write_config(pci_dev, address, val, len); + } + + if (range_covers_byte(address, len, PCI_COMMAND)) { + if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { + virtio_set_disabled(vdev, true); + virtio_pci_stop_ioeventfd(proxy); + virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); + } else { + virtio_set_disabled(vdev, false); + } + } + + if (proxy->config_cap && + ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, + pci_cfg_data), + sizeof cfg->pci_cfg_data)) { + uint32_t off; + uint32_t len; + + cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); + off = le32_to_cpu(cfg->cap.offset); + len = le32_to_cpu(cfg->cap.length); + + if (len == 1 || len == 2 || len == 4) { + assert(len <= sizeof cfg->pci_cfg_data); + virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len); + } + } +} + +static uint32_t virtio_read_config(PCIDevice *pci_dev, + uint32_t address, int len) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); + struct virtio_pci_cfg_cap *cfg; + + if (proxy->config_cap && + ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, + pci_cfg_data), + sizeof cfg->pci_cfg_data)) { + uint32_t off; + uint32_t len; + + cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); + off = le32_to_cpu(cfg->cap.offset); + len = le32_to_cpu(cfg->cap.length); + + if (len == 1 || len == 2 || len == 4) { + assert(len <= sizeof cfg->pci_cfg_data); + virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len); + } + } + + return pci_default_read_config(pci_dev, address, len); +} + +static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, + unsigned int queue_no, + unsigned int vector) +{ + VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; + int ret; + + if (irqfd->users == 0) { + ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev); + if (ret < 0) { + return ret; + } + irqfd->virq = ret; + } + irqfd->users++; + return 0; +} + +static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, + unsigned int vector) +{ + VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; + if (--irqfd->users == 0) { + kvm_irqchip_release_virq(kvm_state, irqfd->virq); + } +} + +static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, + unsigned int queue_no, + unsigned int vector) +{ + VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtQueue *vq = virtio_get_queue(vdev, queue_no); + EventNotifier *n = virtio_queue_get_guest_notifier(vq); + return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); +} + +static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, + unsigned int queue_no, + unsigned int vector) +{ + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtQueue *vq = virtio_get_queue(vdev, queue_no); + EventNotifier *n = virtio_queue_get_guest_notifier(vq); + VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; + int ret; + + ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); + assert(ret == 0); +} + +static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) +{ + PCIDevice *dev = &proxy->pci_dev; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + unsigned int vector; + int ret, queue_no; + + for (queue_no = 0; queue_no < nvqs; queue_no++) { + if (!virtio_queue_get_num(vdev, queue_no)) { + break; + } + vector = virtio_queue_vector(vdev, queue_no); + if (vector >= msix_nr_vectors_allocated(dev)) { + continue; + } + ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); + if (ret < 0) { + goto undo; + } + /* If guest supports masking, set up irqfd now. + * Otherwise, delay until unmasked in the frontend. + */ + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); + if (ret < 0) { + kvm_virtio_pci_vq_vector_release(proxy, vector); + goto undo; + } + } + } + return 0; + +undo: + while (--queue_no >= 0) { + vector = virtio_queue_vector(vdev, queue_no); + if (vector >= msix_nr_vectors_allocated(dev)) { + continue; + } + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); + } + kvm_virtio_pci_vq_vector_release(proxy, vector); + } + return ret; +} + +static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) +{ + PCIDevice *dev = &proxy->pci_dev; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + unsigned int vector; + int queue_no; + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + + for (queue_no = 0; queue_no < nvqs; queue_no++) { + if (!virtio_queue_get_num(vdev, queue_no)) { + break; + } + vector = virtio_queue_vector(vdev, queue_no); + if (vector >= msix_nr_vectors_allocated(dev)) { + continue; + } + /* If guest supports masking, clean up irqfd now. + * Otherwise, it was cleaned when masked in the frontend. + */ + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); + } + kvm_virtio_pci_vq_vector_release(proxy, vector); + } +} + +static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, + unsigned int queue_no, + unsigned int vector, + MSIMessage msg) +{ + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + VirtQueue *vq = virtio_get_queue(vdev, queue_no); + EventNotifier *n = virtio_queue_get_guest_notifier(vq); + VirtIOIRQFD *irqfd; + int ret = 0; + + if (proxy->vector_irqfd) { + irqfd = &proxy->vector_irqfd[vector]; + if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { + ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, + &proxy->pci_dev); + if (ret < 0) { + return ret; + } + kvm_irqchip_commit_routes(kvm_state); + } + } + + /* If guest supports masking, irqfd is already setup, unmask it. + * Otherwise, set it up now. + */ + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + k->guest_notifier_mask(vdev, queue_no, false); + /* Test after unmasking to avoid losing events. */ + if (k->guest_notifier_pending && + k->guest_notifier_pending(vdev, queue_no)) { + event_notifier_set(n); + } + } else { + ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); + } + return ret; +} + +static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, + unsigned int queue_no, + unsigned int vector) +{ + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + + /* If guest supports masking, keep irqfd but mask it. + * Otherwise, clean it up now. + */ + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + k->guest_notifier_mask(vdev, queue_no, true); + } else { + kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); + } +} + +static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, + MSIMessage msg) +{ + VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtQueue *vq = virtio_vector_first_queue(vdev, vector); + int ret, index, unmasked = 0; + + while (vq) { + index = virtio_get_queue_index(vq); + if (!virtio_queue_get_num(vdev, index)) { + break; + } + if (index < proxy->nvqs_with_notifiers) { + ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); + if (ret < 0) { + goto undo; + } + ++unmasked; + } + vq = virtio_vector_next_queue(vq); + } + + return 0; + +undo: + vq = virtio_vector_first_queue(vdev, vector); + while (vq && unmasked >= 0) { + index = virtio_get_queue_index(vq); + if (index < proxy->nvqs_with_notifiers) { + virtio_pci_vq_vector_mask(proxy, index, vector); + --unmasked; + } + vq = virtio_vector_next_queue(vq); + } + return ret; +} + +static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) +{ + VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtQueue *vq = virtio_vector_first_queue(vdev, vector); + int index; + + while (vq) { + index = virtio_get_queue_index(vq); + if (!virtio_queue_get_num(vdev, index)) { + break; + } + if (index < proxy->nvqs_with_notifiers) { + virtio_pci_vq_vector_mask(proxy, index, vector); + } + vq = virtio_vector_next_queue(vq); + } +} + +static void virtio_pci_vector_poll(PCIDevice *dev, + unsigned int vector_start, + unsigned int vector_end) +{ + VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + int queue_no; + unsigned int vector; + EventNotifier *notifier; + VirtQueue *vq; + + for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { + if (!virtio_queue_get_num(vdev, queue_no)) { + break; + } + vector = virtio_queue_vector(vdev, queue_no); + if (vector < vector_start || vector >= vector_end || + !msix_is_masked(dev, vector)) { + continue; + } + vq = virtio_get_queue(vdev, queue_no); + notifier = virtio_queue_get_guest_notifier(vq); + if (k->guest_notifier_pending) { + if (k->guest_notifier_pending(vdev, queue_no)) { + msix_set_pending(dev, vector); + } + } else if (event_notifier_test_and_clear(notifier)) { + msix_set_pending(dev, vector); + } + } +} + +static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, + bool with_irqfd) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + VirtQueue *vq = virtio_get_queue(vdev, n); + EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); + + if (assign) { + int r = event_notifier_init(notifier, 0); + if (r < 0) { + return r; + } + virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); + } else { + virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); + event_notifier_cleanup(notifier); + } + + if (!msix_enabled(&proxy->pci_dev) && + vdev->use_guest_notifier_mask && + vdc->guest_notifier_mask) { + vdc->guest_notifier_mask(vdev, n, !assign); + } + + return 0; +} + +static bool virtio_pci_query_guest_notifiers(DeviceState *d) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + return msix_enabled(&proxy->pci_dev); +} + +static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + int r, n; + bool with_irqfd = msix_enabled(&proxy->pci_dev) && + kvm_msi_via_irqfd_enabled(); + + nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); + + /* When deassigning, pass a consistent nvqs value + * to avoid leaking notifiers. + */ + assert(assign || nvqs == proxy->nvqs_with_notifiers); + + proxy->nvqs_with_notifiers = nvqs; + + /* Must unset vector notifier while guest notifier is still assigned */ + if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { + msix_unset_vector_notifiers(&proxy->pci_dev); + if (proxy->vector_irqfd) { + kvm_virtio_pci_vector_release(proxy, nvqs); + g_free(proxy->vector_irqfd); + proxy->vector_irqfd = NULL; + } + } + + for (n = 0; n < nvqs; n++) { + if (!virtio_queue_get_num(vdev, n)) { + break; + } + + r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); + if (r < 0) { + goto assign_error; + } + } + + /* Must set vector notifier after guest notifier has been assigned */ + if ((with_irqfd || k->guest_notifier_mask) && assign) { + if (with_irqfd) { + proxy->vector_irqfd = + g_malloc0(sizeof(*proxy->vector_irqfd) * + msix_nr_vectors_allocated(&proxy->pci_dev)); + r = kvm_virtio_pci_vector_use(proxy, nvqs); + if (r < 0) { + goto assign_error; + } + } + r = msix_set_vector_notifiers(&proxy->pci_dev, + virtio_pci_vector_unmask, + virtio_pci_vector_mask, + virtio_pci_vector_poll); + if (r < 0) { + goto notifiers_error; + } + } + + return 0; + +notifiers_error: + if (with_irqfd) { + assert(assign); + kvm_virtio_pci_vector_release(proxy, nvqs); + } + +assign_error: + /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ + assert(assign); + while (--n >= 0) { + virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); + } + return r; +} + +static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n, + MemoryRegion *mr, bool assign) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + int offset; + + if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) || + virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) { + return -1; + } + + if (assign) { + offset = virtio_pci_queue_mem_mult(proxy) * n; + memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1); + } else { + memory_region_del_subregion(&proxy->notify.mr, mr); + } + + return 0; +} + +static void virtio_pci_vmstate_change(DeviceState *d, bool running) +{ + VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + if (running) { + /* Old QEMU versions did not set bus master enable on status write. + * Detect DRIVER set and enable it. + */ + if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && + (vdev->status & VIRTIO_CONFIG_S_DRIVER) && + !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { + pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, + proxy->pci_dev.config[PCI_COMMAND] | + PCI_COMMAND_MASTER, 1); + } + virtio_pci_start_ioeventfd(proxy); + } else { + virtio_pci_stop_ioeventfd(proxy); + } +} + +/* + * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. + */ + +static int virtio_pci_query_nvectors(DeviceState *d) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); + + return proxy->nvectors; +} + +static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); + PCIDevice *dev = &proxy->pci_dev; + + return pci_get_address_space(dev); +} + +static bool virtio_pci_iommu_enabled(DeviceState *d) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); + PCIDevice *dev = &proxy->pci_dev; + AddressSpace *dma_as = pci_device_iommu_address_space(dev); + + if (dma_as == &address_space_memory) { + return false; + } + + return true; +} + +static bool virtio_pci_queue_enabled(DeviceState *d, int n) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + return proxy->vqs[n].enabled; + } + + return virtio_queue_enabled_legacy(vdev, n); +} + +static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, + struct virtio_pci_cap *cap) +{ + PCIDevice *dev = &proxy->pci_dev; + int offset; + + offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, + cap->cap_len, &error_abort); + + assert(cap->cap_len >= sizeof *cap); + memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, + cap->cap_len - PCI_CAP_FLAGS); + + return offset; +} + +static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, + unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + uint32_t val = 0; + int i; + + if (vdev == NULL) { + return UINT64_MAX; + } + + switch (addr) { + case VIRTIO_PCI_COMMON_DFSELECT: + val = proxy->dfselect; + break; + case VIRTIO_PCI_COMMON_DF: + if (proxy->dfselect <= 1) { + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + + val = (vdev->host_features & ~vdc->legacy_features) >> + (32 * proxy->dfselect); + } + break; + case VIRTIO_PCI_COMMON_GFSELECT: + val = proxy->gfselect; + break; + case VIRTIO_PCI_COMMON_GF: + if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { + val = proxy->guest_features[proxy->gfselect]; + } + break; + case VIRTIO_PCI_COMMON_MSIX: + val = vdev->config_vector; + break; + case VIRTIO_PCI_COMMON_NUMQ: + for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { + if (virtio_queue_get_num(vdev, i)) { + val = i + 1; + } + } + break; + case VIRTIO_PCI_COMMON_STATUS: + val = vdev->status; + break; + case VIRTIO_PCI_COMMON_CFGGENERATION: + val = vdev->generation; + break; + case VIRTIO_PCI_COMMON_Q_SELECT: + val = vdev->queue_sel; + break; + case VIRTIO_PCI_COMMON_Q_SIZE: + val = virtio_queue_get_num(vdev, vdev->queue_sel); + break; + case VIRTIO_PCI_COMMON_Q_MSIX: + val = virtio_queue_vector(vdev, vdev->queue_sel); + break; + case VIRTIO_PCI_COMMON_Q_ENABLE: + val = proxy->vqs[vdev->queue_sel].enabled; + break; + case VIRTIO_PCI_COMMON_Q_NOFF: + /* Simply map queues in order */ + val = vdev->queue_sel; + break; + case VIRTIO_PCI_COMMON_Q_DESCLO: + val = proxy->vqs[vdev->queue_sel].desc[0]; + break; + case VIRTIO_PCI_COMMON_Q_DESCHI: + val = proxy->vqs[vdev->queue_sel].desc[1]; + break; + case VIRTIO_PCI_COMMON_Q_AVAILLO: + val = proxy->vqs[vdev->queue_sel].avail[0]; + break; + case VIRTIO_PCI_COMMON_Q_AVAILHI: + val = proxy->vqs[vdev->queue_sel].avail[1]; + break; + case VIRTIO_PCI_COMMON_Q_USEDLO: + val = proxy->vqs[vdev->queue_sel].used[0]; + break; + case VIRTIO_PCI_COMMON_Q_USEDHI: + val = proxy->vqs[vdev->queue_sel].used[1]; + break; + default: + val = 0; + } + + return val; +} + +static void virtio_pci_common_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + if (vdev == NULL) { + return; + } + + switch (addr) { + case VIRTIO_PCI_COMMON_DFSELECT: + proxy->dfselect = val; + break; + case VIRTIO_PCI_COMMON_GFSELECT: + proxy->gfselect = val; + break; + case VIRTIO_PCI_COMMON_GF: + if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { + proxy->guest_features[proxy->gfselect] = val; + virtio_set_features(vdev, + (((uint64_t)proxy->guest_features[1]) << 32) | + proxy->guest_features[0]); + } + break; + case VIRTIO_PCI_COMMON_MSIX: + msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); + /* Make it possible for guest to discover an error took place. */ + if (msix_vector_use(&proxy->pci_dev, val) < 0) { + val = VIRTIO_NO_VECTOR; + } + vdev->config_vector = val; + break; + case VIRTIO_PCI_COMMON_STATUS: + if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { + virtio_pci_stop_ioeventfd(proxy); + } + + virtio_set_status(vdev, val & 0xFF); + + if (val & VIRTIO_CONFIG_S_DRIVER_OK) { + virtio_pci_start_ioeventfd(proxy); + } + + if (vdev->status == 0) { + virtio_pci_reset(DEVICE(proxy)); + } + + break; + case VIRTIO_PCI_COMMON_Q_SELECT: + if (val < VIRTIO_QUEUE_MAX) { + vdev->queue_sel = val; + } + break; + case VIRTIO_PCI_COMMON_Q_SIZE: + proxy->vqs[vdev->queue_sel].num = val; + virtio_queue_set_num(vdev, vdev->queue_sel, + proxy->vqs[vdev->queue_sel].num); + break; + case VIRTIO_PCI_COMMON_Q_MSIX: + msix_vector_unuse(&proxy->pci_dev, + virtio_queue_vector(vdev, vdev->queue_sel)); + /* Make it possible for guest to discover an error took place. */ + if (msix_vector_use(&proxy->pci_dev, val) < 0) { + val = VIRTIO_NO_VECTOR; + } + virtio_queue_set_vector(vdev, vdev->queue_sel, val); + break; + case VIRTIO_PCI_COMMON_Q_ENABLE: + if (val == 1) { + virtio_queue_set_num(vdev, vdev->queue_sel, + proxy->vqs[vdev->queue_sel].num); + virtio_queue_set_rings(vdev, vdev->queue_sel, + ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | + proxy->vqs[vdev->queue_sel].desc[0], + ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | + proxy->vqs[vdev->queue_sel].avail[0], + ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | + proxy->vqs[vdev->queue_sel].used[0]); + proxy->vqs[vdev->queue_sel].enabled = 1; + } else { + virtio_error(vdev, "wrong value for queue_enable %"PRIx64, val); + } + break; + case VIRTIO_PCI_COMMON_Q_DESCLO: + proxy->vqs[vdev->queue_sel].desc[0] = val; + break; + case VIRTIO_PCI_COMMON_Q_DESCHI: + proxy->vqs[vdev->queue_sel].desc[1] = val; + break; + case VIRTIO_PCI_COMMON_Q_AVAILLO: + proxy->vqs[vdev->queue_sel].avail[0] = val; + break; + case VIRTIO_PCI_COMMON_Q_AVAILHI: + proxy->vqs[vdev->queue_sel].avail[1] = val; + break; + case VIRTIO_PCI_COMMON_Q_USEDLO: + proxy->vqs[vdev->queue_sel].used[0] = val; + break; + case VIRTIO_PCI_COMMON_Q_USEDHI: + proxy->vqs[vdev->queue_sel].used[1] = val; + break; + default: + break; + } +} + + +static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, + unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + if (virtio_bus_get_device(&proxy->bus) == NULL) { + return UINT64_MAX; + } + + return 0; +} + +static void virtio_pci_notify_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + unsigned queue = addr / virtio_pci_queue_mem_mult(proxy); + + if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { + virtio_queue_notify(vdev, queue); + } +} + +static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + unsigned queue = val; + + if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { + virtio_queue_notify(vdev, queue); + } +} + +static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, + unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + uint64_t val; + + if (vdev == NULL) { + return UINT64_MAX; + } + + val = qatomic_xchg(&vdev->isr, 0); + pci_irq_deassert(&proxy->pci_dev); + return val; +} + +static void virtio_pci_isr_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ +} + +static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, + unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + uint64_t val; + + if (vdev == NULL) { + return UINT64_MAX; + } + + switch (size) { + case 1: + val = virtio_config_modern_readb(vdev, addr); + break; + case 2: + val = virtio_config_modern_readw(vdev, addr); + break; + case 4: + val = virtio_config_modern_readl(vdev, addr); + break; + default: + val = 0; + break; + } + return val; +} + +static void virtio_pci_device_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + VirtIOPCIProxy *proxy = opaque; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + if (vdev == NULL) { + return; + } + + switch (size) { + case 1: + virtio_config_modern_writeb(vdev, addr, val); + break; + case 2: + virtio_config_modern_writew(vdev, addr, val); + break; + case 4: + virtio_config_modern_writel(vdev, addr, val); + break; + } +} + +static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy, + const char *vdev_name) +{ + static const MemoryRegionOps common_ops = { + .read = virtio_pci_common_read, + .write = virtio_pci_common_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, + }; + static const MemoryRegionOps isr_ops = { + .read = virtio_pci_isr_read, + .write = virtio_pci_isr_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, + }; + static const MemoryRegionOps device_ops = { + .read = virtio_pci_device_read, + .write = virtio_pci_device_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, + }; + static const MemoryRegionOps notify_ops = { + .read = virtio_pci_notify_read, + .write = virtio_pci_notify_write, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, + }; + static const MemoryRegionOps notify_pio_ops = { + .read = virtio_pci_notify_read, + .write = virtio_pci_notify_write_pio, + .impl = { + .min_access_size = 1, + .max_access_size = 4, + }, + .endianness = DEVICE_LITTLE_ENDIAN, + }; + g_autoptr(GString) name = g_string_new(NULL); + + g_string_printf(name, "virtio-pci-common-%s", vdev_name); + memory_region_init_io(&proxy->common.mr, OBJECT(proxy), + &common_ops, + proxy, + name->str, + proxy->common.size); + + g_string_printf(name, "virtio-pci-isr-%s", vdev_name); + memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), + &isr_ops, + proxy, + name->str, + proxy->isr.size); + + g_string_printf(name, "virtio-pci-device-%s", vdev_name); + memory_region_init_io(&proxy->device.mr, OBJECT(proxy), + &device_ops, + proxy, + name->str, + proxy->device.size); + + g_string_printf(name, "virtio-pci-notify-%s", vdev_name); + memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), + ¬ify_ops, + proxy, + name->str, + proxy->notify.size); + + g_string_printf(name, "virtio-pci-notify-pio-%s", vdev_name); + memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), + ¬ify_pio_ops, + proxy, + name->str, + proxy->notify_pio.size); +} + +static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, + VirtIOPCIRegion *region, + struct virtio_pci_cap *cap, + MemoryRegion *mr, + uint8_t bar) +{ + memory_region_add_subregion(mr, region->offset, ®ion->mr); + + cap->cfg_type = region->type; + cap->bar = bar; + cap->offset = cpu_to_le32(region->offset); + cap->length = cpu_to_le32(region->size); + virtio_pci_add_mem_cap(proxy, cap); + +} + +static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, + VirtIOPCIRegion *region, + struct virtio_pci_cap *cap) +{ + virtio_pci_modern_region_map(proxy, region, cap, + &proxy->modern_bar, proxy->modern_mem_bar_idx); +} + +static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, + VirtIOPCIRegion *region, + struct virtio_pci_cap *cap) +{ + virtio_pci_modern_region_map(proxy, region, cap, + &proxy->io_bar, proxy->modern_io_bar_idx); +} + +static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, + VirtIOPCIRegion *region) +{ + memory_region_del_subregion(&proxy->modern_bar, + ®ion->mr); +} + +static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, + VirtIOPCIRegion *region) +{ + memory_region_del_subregion(&proxy->io_bar, + ®ion->mr); +} + +static void virtio_pci_pre_plugged(DeviceState *d, Error **errp) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + if (virtio_pci_modern(proxy)) { + virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); + } + + virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); +} + +/* This is called by virtio-bus just after the device is plugged. */ +static void virtio_pci_device_plugged(DeviceState *d, Error **errp) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); + VirtioBusState *bus = &proxy->bus; + bool legacy = virtio_pci_legacy(proxy); + bool modern; + bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; + uint8_t *config; + uint32_t size; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + /* + * Virtio capabilities present without + * VIRTIO_F_VERSION_1 confuses guests + */ + if (!proxy->ignore_backend_features && + !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { + virtio_pci_disable_modern(proxy); + + if (!legacy) { + error_setg(errp, "Device doesn't support modern mode, and legacy" + " mode is disabled"); + error_append_hint(errp, "Set disable-legacy to off\n"); + + return; + } + } + + modern = virtio_pci_modern(proxy); + + config = proxy->pci_dev.config; + if (proxy->class_code) { + pci_config_set_class(config, proxy->class_code); + } + + if (legacy) { + if (!virtio_legacy_allowed(vdev)) { + /* + * To avoid migration issues, we allow legacy mode when legacy + * check is disabled in the old machine types (< 5.1). + */ + if (virtio_legacy_check_disabled(vdev)) { + warn_report("device is modern-only, but for backward " + "compatibility legacy is allowed"); + } else { + error_setg(errp, + "device is modern-only, use disable-legacy=on"); + return; + } + } + if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { + error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" + " neither legacy nor transitional device"); + return ; + } + /* + * Legacy and transitional devices use specific subsystem IDs. + * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID) + * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default. + */ + pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); + } else { + /* pure virtio-1.0 */ + pci_set_word(config + PCI_VENDOR_ID, + PCI_VENDOR_ID_REDHAT_QUMRANET); + pci_set_word(config + PCI_DEVICE_ID, + 0x1040 + virtio_bus_get_vdev_id(bus)); + pci_config_set_revision(config, 1); + } + config[PCI_INTERRUPT_PIN] = 1; + + + if (modern) { + struct virtio_pci_cap cap = { + .cap_len = sizeof cap, + }; + struct virtio_pci_notify_cap notify = { + .cap.cap_len = sizeof notify, + .notify_off_multiplier = + cpu_to_le32(virtio_pci_queue_mem_mult(proxy)), + }; + struct virtio_pci_cfg_cap cfg = { + .cap.cap_len = sizeof cfg, + .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, + }; + struct virtio_pci_notify_cap notify_pio = { + .cap.cap_len = sizeof notify, + .notify_off_multiplier = cpu_to_le32(0x0), + }; + + struct virtio_pci_cfg_cap *cfg_mask; + + virtio_pci_modern_regions_init(proxy, vdev->name); + + virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); + virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); + virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); + virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); + + if (modern_pio) { + memory_region_init(&proxy->io_bar, OBJECT(proxy), + "virtio-pci-io", 0x4); + + pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, + PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); + + virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, + ¬ify_pio.cap); + } + + pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx, + PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_PREFETCH | + PCI_BASE_ADDRESS_MEM_TYPE_64, + &proxy->modern_bar); + + proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); + cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); + pci_set_byte(&cfg_mask->cap.bar, ~0x0); + pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); + pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); + pci_set_long(cfg_mask->pci_cfg_data, ~0x0); + } + + if (proxy->nvectors) { + int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, + proxy->msix_bar_idx, NULL); + if (err) { + /* Notice when a system that supports MSIx can't initialize it */ + if (err != -ENOTSUP) { + warn_report("unable to init msix vectors to %" PRIu32, + proxy->nvectors); + } + proxy->nvectors = 0; + } + } + + proxy->pci_dev.config_write = virtio_write_config; + proxy->pci_dev.config_read = virtio_read_config; + + if (legacy) { + size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + + virtio_bus_get_vdev_config_len(bus); + size = pow2ceil(size); + + memory_region_init_io(&proxy->bar, OBJECT(proxy), + &virtio_pci_config_ops, + proxy, "virtio-pci", size); + + pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, + PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); + } +} + +static void virtio_pci_device_unplugged(DeviceState *d) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(d); + bool modern = virtio_pci_modern(proxy); + bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; + + virtio_pci_stop_ioeventfd(proxy); + + if (modern) { + virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); + virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); + virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); + virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); + if (modern_pio) { + virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); + } + } +} + +static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); + VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); + bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && + !pci_bus_is_root(pci_get_bus(pci_dev)); + + if (kvm_enabled() && !kvm_has_many_ioeventfds()) { + proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; + } + + /* fd-based ioevents can't be synchronized in record/replay */ + if (replay_mode != REPLAY_MODE_NONE) { + proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; + } + + /* + * virtio pci bar layout used by default. + * subclasses can re-arrange things if needed. + * + * region 0 -- virtio legacy io bar + * region 1 -- msi-x bar + * region 2 -- virtio modern io bar (off by default) + * region 4+5 -- virtio modern memory (64bit) bar + * + */ + proxy->legacy_io_bar_idx = 0; + proxy->msix_bar_idx = 1; + proxy->modern_io_bar_idx = 2; + proxy->modern_mem_bar_idx = 4; + + proxy->common.offset = 0x0; + proxy->common.size = 0x1000; + proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; + + proxy->isr.offset = 0x1000; + proxy->isr.size = 0x1000; + proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; + + proxy->device.offset = 0x2000; + proxy->device.size = 0x1000; + proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; + + proxy->notify.offset = 0x3000; + proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; + proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; + + proxy->notify_pio.offset = 0x0; + proxy->notify_pio.size = 0x4; + proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; + + /* subclasses can enforce modern, so do this unconditionally */ + memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", + /* PCI BAR regions must be powers of 2 */ + pow2ceil(proxy->notify.offset + proxy->notify.size)); + + if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { + proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + } + + if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) { + error_setg(errp, "device cannot work as neither modern nor legacy mode" + " is enabled"); + error_append_hint(errp, "Set either disable-modern or disable-legacy" + " to off\n"); + return; + } + + if (pcie_port && pci_is_express(pci_dev)) { + int pos; + uint16_t last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE; + + pos = pcie_endpoint_cap_init(pci_dev, 0); + assert(pos > 0); + + pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, + PCI_PM_SIZEOF, errp); + if (pos < 0) { + return; + } + + pci_dev->exp.pm_cap = pos; + + /* + * Indicates that this function complies with revision 1.2 of the + * PCI Power Management Interface Specification. + */ + pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); + + if (proxy->flags & VIRTIO_PCI_FLAG_AER) { + pcie_aer_init(pci_dev, PCI_ERR_VER, last_pcie_cap_offset, + PCI_ERR_SIZEOF, NULL); + last_pcie_cap_offset += PCI_ERR_SIZEOF; + } + + if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) { + /* Init error enabling flags */ + pcie_cap_deverr_init(pci_dev); + } + + if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) { + /* Init Link Control Register */ + pcie_cap_lnkctl_init(pci_dev); + } + + if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) { + /* Init Power Management Control Register */ + pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL, + PCI_PM_CTRL_STATE_MASK); + } + + if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { + pcie_ats_init(pci_dev, last_pcie_cap_offset, + proxy->flags & VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED); + last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF; + } + + if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { + /* Set Function Level Reset capability bit */ + pcie_cap_flr_init(pci_dev); + } + } else { + /* + * make future invocations of pci_is_express() return false + * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. + */ + pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; + } + + virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); + if (k->realize) { + k->realize(proxy, errp); + } +} + +static void virtio_pci_exit(PCIDevice *pci_dev) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); + bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && + !pci_bus_is_root(pci_get_bus(pci_dev)); + + msix_uninit_exclusive_bar(pci_dev); + if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port && + pci_is_express(pci_dev)) { + pcie_aer_exit(pci_dev); + } +} + +static void virtio_pci_reset(DeviceState *qdev) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); + VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); + PCIDevice *dev = PCI_DEVICE(qdev); + int i; + + virtio_pci_stop_ioeventfd(proxy); + virtio_bus_reset(bus); + msix_unuse_all_vectors(&proxy->pci_dev); + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + proxy->vqs[i].enabled = 0; + proxy->vqs[i].num = 0; + proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; + proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; + proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; + } + + if (pci_is_express(dev)) { + pcie_cap_deverr_reset(dev); + pcie_cap_lnkctl_reset(dev); + + pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0); + } +} + +static Property virtio_pci_properties[] = { + DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), + DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), + DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), + DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), + DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false), + DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy, + ignore_backend_features, false), + DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_ATS_BIT, false), + DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT, true), + DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true), + DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true), + DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_INIT_PM_BIT, true), + DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_INIT_FLR_BIT, true), + DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_AER_BIT, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) +{ + VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); + VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); + PCIDevice *pci_dev = &proxy->pci_dev; + + if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && + virtio_pci_modern(proxy)) { + pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; + } + + vpciklass->parent_dc_realize(qdev, errp); +} + +static void virtio_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); + + device_class_set_props(dc, virtio_pci_properties); + k->realize = virtio_pci_realize; + k->exit = virtio_pci_exit; + k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + k->revision = VIRTIO_PCI_ABI_VERSION; + k->class_id = PCI_CLASS_OTHERS; + device_class_set_parent_realize(dc, virtio_pci_dc_realize, + &vpciklass->parent_dc_realize); + dc->reset = virtio_pci_reset; +} + +static const TypeInfo virtio_pci_info = { + .name = TYPE_VIRTIO_PCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(VirtIOPCIProxy), + .class_init = virtio_pci_class_init, + .class_size = sizeof(VirtioPCIClass), + .abstract = true, +}; + +static Property virtio_pci_generic_properties[] = { + DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_pci_base_class_init(ObjectClass *klass, void *data) +{ + const VirtioPCIDeviceTypeInfo *t = data; + if (t->class_init) { + t->class_init(klass, NULL); + } +} + +static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_pci_generic_properties); +} + +static void virtio_pci_transitional_instance_init(Object *obj) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); + + proxy->disable_legacy = ON_OFF_AUTO_OFF; + proxy->disable_modern = false; +} + +static void virtio_pci_non_transitional_instance_init(Object *obj) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); + + proxy->disable_legacy = ON_OFF_AUTO_ON; + proxy->disable_modern = false; +} + +void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) +{ + char *base_name = NULL; + TypeInfo base_type_info = { + .name = t->base_name, + .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI, + .instance_size = t->instance_size, + .instance_init = t->instance_init, + .class_size = t->class_size, + .abstract = true, + .interfaces = t->interfaces, + }; + TypeInfo generic_type_info = { + .name = t->generic_name, + .parent = base_type_info.name, + .class_init = virtio_pci_generic_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + }, + }; + + if (!base_type_info.name) { + /* No base type -> register a single generic device type */ + /* use intermediate %s-base-type to add generic device props */ + base_name = g_strdup_printf("%s-base-type", t->generic_name); + base_type_info.name = base_name; + base_type_info.class_init = virtio_pci_generic_class_init; + + generic_type_info.parent = base_name; + generic_type_info.class_init = virtio_pci_base_class_init; + generic_type_info.class_data = (void *)t; + + assert(!t->non_transitional_name); + assert(!t->transitional_name); + } else { + base_type_info.class_init = virtio_pci_base_class_init; + base_type_info.class_data = (void *)t; + } + + type_register(&base_type_info); + if (generic_type_info.name) { + type_register(&generic_type_info); + } + + if (t->non_transitional_name) { + const TypeInfo non_transitional_type_info = { + .name = t->non_transitional_name, + .parent = base_type_info.name, + .instance_init = virtio_pci_non_transitional_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + }, + }; + type_register(&non_transitional_type_info); + } + + if (t->transitional_name) { + const TypeInfo transitional_type_info = { + .name = t->transitional_name, + .parent = base_type_info.name, + .instance_init = virtio_pci_transitional_instance_init, + .interfaces = (InterfaceInfo[]) { + /* + * Transitional virtio devices work only as Conventional PCI + * devices because they require PIO ports. + */ + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { } + }, + }; + type_register(&transitional_type_info); + } + g_free(base_name); +} + +unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues) +{ + /* + * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted + * virtqueue buffers can handle their completion. When a different vCPU + * handles completion it may need to IPI the vCPU that submitted the + * request and this adds overhead. + * + * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in + * guests with very many vCPUs and a device that is only used by a few + * vCPUs. Unfortunately optimizing that case requires manual pinning inside + * the guest, so those users might as well manually set the number of + * queues. There is no upper limit that can be applied automatically and + * doing so arbitrarily would result in a sudden performance drop once the + * threshold number of vCPUs is exceeded. + */ + unsigned num_queues = current_machine->smp.cpus; + + /* + * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the + * config change interrupt and the fixed virtqueues must be taken into + * account too. + */ + num_queues = MIN(num_queues, PCI_MSIX_FLAGS_QSIZE - fixed_queues); + + /* + * There is a limit to how many virtqueues a device can have. + */ + return MIN(num_queues, VIRTIO_QUEUE_MAX - fixed_queues); +} + +/* virtio-pci-bus */ + +static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, + VirtIOPCIProxy *dev) +{ + DeviceState *qdev = DEVICE(dev); + char virtio_bus_name[] = "virtio-bus"; + + qbus_init(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, virtio_bus_name); +} + +static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *bus_class = BUS_CLASS(klass); + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); + bus_class->max_dev = 1; + k->notify = virtio_pci_notify; + k->save_config = virtio_pci_save_config; + k->load_config = virtio_pci_load_config; + k->save_queue = virtio_pci_save_queue; + k->load_queue = virtio_pci_load_queue; + k->save_extra_state = virtio_pci_save_extra_state; + k->load_extra_state = virtio_pci_load_extra_state; + k->has_extra_state = virtio_pci_has_extra_state; + k->query_guest_notifiers = virtio_pci_query_guest_notifiers; + k->set_guest_notifiers = virtio_pci_set_guest_notifiers; + k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr; + k->vmstate_change = virtio_pci_vmstate_change; + k->pre_plugged = virtio_pci_pre_plugged; + k->device_plugged = virtio_pci_device_plugged; + k->device_unplugged = virtio_pci_device_unplugged; + k->query_nvectors = virtio_pci_query_nvectors; + k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled; + k->ioeventfd_assign = virtio_pci_ioeventfd_assign; + k->get_dma_as = virtio_pci_get_dma_as; + k->iommu_enabled = virtio_pci_iommu_enabled; + k->queue_enabled = virtio_pci_queue_enabled; +} + +static const TypeInfo virtio_pci_bus_info = { + .name = TYPE_VIRTIO_PCI_BUS, + .parent = TYPE_VIRTIO_BUS, + .instance_size = sizeof(VirtioPCIBusState), + .class_size = sizeof(VirtioPCIBusClass), + .class_init = virtio_pci_bus_class_init, +}; + +static void virtio_pci_register_types(void) +{ + /* Base types: */ + type_register_static(&virtio_pci_bus_info); + type_register_static(&virtio_pci_info); +} + +type_init(virtio_pci_register_types) + diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h new file mode 100644 index 000000000..2446dcd9a --- /dev/null +++ b/hw/virtio/virtio-pci.h @@ -0,0 +1,255 @@ +/* + * Virtio PCI Bindings + * + * Copyright IBM, Corp. 2007 + * Copyright (c) 2009 CodeSourcery + * + * Authors: + * Anthony Liguori + * Paul Brook + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#ifndef QEMU_VIRTIO_PCI_H +#define QEMU_VIRTIO_PCI_H + +#include "hw/pci/msi.h" +#include "hw/virtio/virtio-bus.h" +#include "qom/object.h" + + +/* virtio-pci-bus */ + +typedef struct VirtioBusState VirtioPCIBusState; +typedef struct VirtioBusClass VirtioPCIBusClass; + +#define TYPE_VIRTIO_PCI_BUS "virtio-pci-bus" +DECLARE_OBJ_CHECKERS(VirtioPCIBusState, VirtioPCIBusClass, + VIRTIO_PCI_BUS, TYPE_VIRTIO_PCI_BUS) + +enum { + VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, + VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, + VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, + VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, + VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, + VIRTIO_PCI_FLAG_ATS_BIT, + VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, + VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, + VIRTIO_PCI_FLAG_INIT_PM_BIT, + VIRTIO_PCI_FLAG_INIT_FLR_BIT, + VIRTIO_PCI_FLAG_AER_BIT, + VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT, +}; + +/* Need to activate work-arounds for buggy guests at vmstate load. */ +#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION \ + (1 << VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT) + +/* Performance improves when virtqueue kick processing is decoupled from the + * vcpu thread using ioeventfd for some devices. */ +#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT) + +/* virtio version flags */ +#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT) + +/* migrate extra state */ +#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA (1 << VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT) + +/* have pio notification for modern device ? */ +#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \ + (1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT) + +/* page per vq flag to be used by split drivers within guests */ +#define VIRTIO_PCI_FLAG_PAGE_PER_VQ \ + (1 << VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT) + +/* address space translation service */ +#define VIRTIO_PCI_FLAG_ATS (1 << VIRTIO_PCI_FLAG_ATS_BIT) + +/* Init error enabling flags */ +#define VIRTIO_PCI_FLAG_INIT_DEVERR (1 << VIRTIO_PCI_FLAG_INIT_DEVERR_BIT) + +/* Init Link Control register */ +#define VIRTIO_PCI_FLAG_INIT_LNKCTL (1 << VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT) + +/* Init Power Management */ +#define VIRTIO_PCI_FLAG_INIT_PM (1 << VIRTIO_PCI_FLAG_INIT_PM_BIT) + +/* Init Function Level Reset capability */ +#define VIRTIO_PCI_FLAG_INIT_FLR (1 << VIRTIO_PCI_FLAG_INIT_FLR_BIT) + +/* Advanced Error Reporting capability */ +#define VIRTIO_PCI_FLAG_AER (1 << VIRTIO_PCI_FLAG_AER_BIT) + +/* Page Aligned Address space Translation Service */ +#define VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED \ + (1 << VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT) + +typedef struct { + MSIMessage msg; + int virq; + unsigned int users; +} VirtIOIRQFD; + +/* + * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. + */ +#define TYPE_VIRTIO_PCI "virtio-pci" +OBJECT_DECLARE_TYPE(VirtIOPCIProxy, VirtioPCIClass, VIRTIO_PCI) + +struct VirtioPCIClass { + PCIDeviceClass parent_class; + DeviceRealize parent_dc_realize; + void (*realize)(VirtIOPCIProxy *vpci_dev, Error **errp); +}; + +typedef struct VirtIOPCIRegion { + MemoryRegion mr; + uint32_t offset; + uint32_t size; + uint32_t type; +} VirtIOPCIRegion; + +typedef struct VirtIOPCIQueue { + uint16_t num; + bool enabled; + uint32_t desc[2]; + uint32_t avail[2]; + uint32_t used[2]; +} VirtIOPCIQueue; + +struct VirtIOPCIProxy { + PCIDevice pci_dev; + MemoryRegion bar; + union { + struct { + VirtIOPCIRegion common; + VirtIOPCIRegion isr; + VirtIOPCIRegion device; + VirtIOPCIRegion notify; + VirtIOPCIRegion notify_pio; + }; + VirtIOPCIRegion regs[5]; + }; + MemoryRegion modern_bar; + MemoryRegion io_bar; + uint32_t legacy_io_bar_idx; + uint32_t msix_bar_idx; + uint32_t modern_io_bar_idx; + uint32_t modern_mem_bar_idx; + int config_cap; + uint32_t flags; + bool disable_modern; + bool ignore_backend_features; + OnOffAuto disable_legacy; + uint32_t class_code; + uint32_t nvectors; + uint32_t dfselect; + uint32_t gfselect; + uint32_t guest_features[2]; + VirtIOPCIQueue vqs[VIRTIO_QUEUE_MAX]; + + VirtIOIRQFD *vector_irqfd; + int nvqs_with_notifiers; + VirtioBusState bus; +}; + +static inline bool virtio_pci_modern(VirtIOPCIProxy *proxy) +{ + return !proxy->disable_modern; +} + +static inline bool virtio_pci_legacy(VirtIOPCIProxy *proxy) +{ + return proxy->disable_legacy == ON_OFF_AUTO_OFF; +} + +static inline void virtio_pci_force_virtio_1(VirtIOPCIProxy *proxy) +{ + proxy->disable_modern = false; + proxy->disable_legacy = ON_OFF_AUTO_ON; +} + +static inline void virtio_pci_disable_modern(VirtIOPCIProxy *proxy) +{ + proxy->disable_modern = true; +} + +/* + * virtio-input-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_INPUT_PCI "virtio-input-pci" + +/* Virtio ABI version, if we increment this, we break the guest driver. */ +#define VIRTIO_PCI_ABI_VERSION 0 + +/* Input for virtio_pci_types_register() */ +typedef struct VirtioPCIDeviceTypeInfo { + /* + * Common base class for the subclasses below. + * + * Required only if transitional_name or non_transitional_name is set. + * + * We need a separate base type instead of making all types + * inherit from generic_name for two reasons: + * 1) generic_name implements INTERFACE_PCIE_DEVICE, but + * transitional_name does not. + * 2) generic_name has the "disable-legacy" and "disable-modern" + * properties, transitional_name and non_transitional name don't. + */ + const char *base_name; + /* + * Generic device type. Optional. + * + * Supports both transitional and non-transitional modes, + * using the disable-legacy and disable-modern properties. + * If disable-legacy=auto, (non-)transitional mode is selected + * depending on the bus where the device is plugged. + * + * Implements both INTERFACE_PCIE_DEVICE and INTERFACE_CONVENTIONAL_PCI_DEVICE, + * but PCI Express is supported only in non-transitional mode. + * + * The only type implemented by QEMU 3.1 and older. + */ + const char *generic_name; + /* + * The transitional device type. Optional. + * + * Implements both INTERFACE_PCIE_DEVICE and INTERFACE_CONVENTIONAL_PCI_DEVICE. + */ + const char *transitional_name; + /* + * The non-transitional device type. Optional. + * + * Implements INTERFACE_CONVENTIONAL_PCI_DEVICE only. + */ + const char *non_transitional_name; + + /* Parent type. If NULL, TYPE_VIRTIO_PCI is used */ + const char *parent; + + /* Same as TypeInfo fields: */ + size_t instance_size; + size_t class_size; + void (*instance_init)(Object *obj); + void (*class_init)(ObjectClass *klass, void *data); + InterfaceInfo *interfaces; +} VirtioPCIDeviceTypeInfo; + +/* Register virtio-pci type(s). @t must be static. */ +void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t); + +/** + * virtio_pci_optimal_num_queues: + * @fixed_queues: number of queues that are always present + * + * Returns: The optimal number of queues for a multi-queue device, excluding + * @fixed_queues. + */ +unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues); + +#endif diff --git a/hw/virtio/virtio-pmem-pci.c b/hw/virtio/virtio-pmem-pci.c new file mode 100644 index 000000000..2b2a0b1ea --- /dev/null +++ b/hw/virtio/virtio-pmem-pci.c @@ -0,0 +1,129 @@ +/* + * Virtio PMEM PCI device + * + * Copyright (C) 2018-2019 Red Hat, Inc. + * + * Authors: + * Pankaj Gupta + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "virtio-pmem-pci.h" +#include "hw/mem/memory-device.h" +#include "qapi/error.h" + +static void virtio_pmem_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOPMEMPCI *pmem_pci = VIRTIO_PMEM_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&pmem_pci->vdev); + + virtio_pci_force_virtio_1(vpci_dev); + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void virtio_pmem_pci_set_addr(MemoryDeviceState *md, uint64_t addr, + Error **errp) +{ + object_property_set_uint(OBJECT(md), VIRTIO_PMEM_ADDR_PROP, addr, errp); +} + +static uint64_t virtio_pmem_pci_get_addr(const MemoryDeviceState *md) +{ + return object_property_get_uint(OBJECT(md), VIRTIO_PMEM_ADDR_PROP, + &error_abort); +} + +static MemoryRegion *virtio_pmem_pci_get_memory_region(MemoryDeviceState *md, + Error **errp) +{ + VirtIOPMEMPCI *pci_pmem = VIRTIO_PMEM_PCI(md); + VirtIOPMEM *pmem = VIRTIO_PMEM(&pci_pmem->vdev); + VirtIOPMEMClass *vpc = VIRTIO_PMEM_GET_CLASS(pmem); + + return vpc->get_memory_region(pmem, errp); +} + +static uint64_t virtio_pmem_pci_get_plugged_size(const MemoryDeviceState *md, + Error **errp) +{ + VirtIOPMEMPCI *pci_pmem = VIRTIO_PMEM_PCI(md); + VirtIOPMEM *pmem = VIRTIO_PMEM(&pci_pmem->vdev); + VirtIOPMEMClass *vpc = VIRTIO_PMEM_GET_CLASS(pmem); + MemoryRegion *mr = vpc->get_memory_region(pmem, errp); + + /* the plugged size corresponds to the region size */ + return mr ? memory_region_size(mr) : 0; +} + +static void virtio_pmem_pci_fill_device_info(const MemoryDeviceState *md, + MemoryDeviceInfo *info) +{ + VirtioPMEMDeviceInfo *vi = g_new0(VirtioPMEMDeviceInfo, 1); + VirtIOPMEMPCI *pci_pmem = VIRTIO_PMEM_PCI(md); + VirtIOPMEM *pmem = VIRTIO_PMEM(&pci_pmem->vdev); + VirtIOPMEMClass *vpc = VIRTIO_PMEM_GET_CLASS(pmem); + DeviceState *dev = DEVICE(md); + + if (dev->id) { + vi->has_id = true; + vi->id = g_strdup(dev->id); + } + + /* let the real device handle everything else */ + vpc->fill_device_info(pmem, vi); + + info->u.virtio_pmem.data = vi; + info->type = MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM; +} + +static void virtio_pmem_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(klass); + + k->realize = virtio_pmem_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_PMEM; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_OTHERS; + + mdc->get_addr = virtio_pmem_pci_get_addr; + mdc->set_addr = virtio_pmem_pci_set_addr; + mdc->get_plugged_size = virtio_pmem_pci_get_plugged_size; + mdc->get_memory_region = virtio_pmem_pci_get_memory_region; + mdc->fill_device_info = virtio_pmem_pci_fill_device_info; +} + +static void virtio_pmem_pci_instance_init(Object *obj) +{ + VirtIOPMEMPCI *dev = VIRTIO_PMEM_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_PMEM); +} + +static const VirtioPCIDeviceTypeInfo virtio_pmem_pci_info = { + .base_name = TYPE_VIRTIO_PMEM_PCI, + .generic_name = "virtio-pmem-pci", + .instance_size = sizeof(VirtIOPMEMPCI), + .instance_init = virtio_pmem_pci_instance_init, + .class_init = virtio_pmem_pci_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_MEMORY_DEVICE }, + { } + }, +}; + +static void virtio_pmem_pci_register_types(void) +{ + virtio_pci_types_register(&virtio_pmem_pci_info); +} +type_init(virtio_pmem_pci_register_types) diff --git a/hw/virtio/virtio-pmem-pci.h b/hw/virtio/virtio-pmem-pci.h new file mode 100644 index 000000000..63cfe727f --- /dev/null +++ b/hw/virtio/virtio-pmem-pci.h @@ -0,0 +1,35 @@ +/* + * Virtio PMEM PCI device + * + * Copyright (C) 2018-2019 Red Hat, Inc. + * + * Authors: + * Pankaj Gupta + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_VIRTIO_PMEM_PCI_H +#define QEMU_VIRTIO_PMEM_PCI_H + +#include "hw/virtio/virtio-pci.h" +#include "hw/virtio/virtio-pmem.h" +#include "qom/object.h" + +typedef struct VirtIOPMEMPCI VirtIOPMEMPCI; + +/* + * virtio-pmem-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_PMEM_PCI "virtio-pmem-pci-base" +DECLARE_INSTANCE_CHECKER(VirtIOPMEMPCI, VIRTIO_PMEM_PCI, + TYPE_VIRTIO_PMEM_PCI) + +struct VirtIOPMEMPCI { + VirtIOPCIProxy parent_obj; + VirtIOPMEM vdev; +}; + +#endif /* QEMU_VIRTIO_PMEM_PCI_H */ diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c new file mode 100644 index 000000000..d1aeb90a3 --- /dev/null +++ b/hw/virtio/virtio-pmem.c @@ -0,0 +1,198 @@ +/* + * Virtio PMEM device + * + * Copyright (C) 2018-2019 Red Hat, Inc. + * + * Authors: + * Pankaj Gupta + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "hw/virtio/virtio-pmem.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-access.h" +#include "standard-headers/linux/virtio_ids.h" +#include "standard-headers/linux/virtio_pmem.h" +#include "sysemu/hostmem.h" +#include "block/aio.h" +#include "block/thread-pool.h" +#include "trace.h" + +typedef struct VirtIODeviceRequest { + VirtQueueElement elem; + int fd; + VirtIOPMEM *pmem; + VirtIODevice *vdev; + struct virtio_pmem_req req; + struct virtio_pmem_resp resp; +} VirtIODeviceRequest; + +static int worker_cb(void *opaque) +{ + VirtIODeviceRequest *req_data = opaque; + int err = 0; + + /* flush raw backing image */ + err = fsync(req_data->fd); + trace_virtio_pmem_flush_done(err); + if (err != 0) { + err = 1; + } + + virtio_stl_p(req_data->vdev, &req_data->resp.ret, err); + + return 0; +} + +static void done_cb(void *opaque, int ret) +{ + VirtIODeviceRequest *req_data = opaque; + int len = iov_from_buf(req_data->elem.in_sg, req_data->elem.in_num, 0, + &req_data->resp, sizeof(struct virtio_pmem_resp)); + + /* Callbacks are serialized, so no need to use atomic ops. */ + virtqueue_push(req_data->pmem->rq_vq, &req_data->elem, len); + virtio_notify((VirtIODevice *)req_data->pmem, req_data->pmem->rq_vq); + trace_virtio_pmem_response(); + g_free(req_data); +} + +static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIODeviceRequest *req_data; + VirtIOPMEM *pmem = VIRTIO_PMEM(vdev); + HostMemoryBackend *backend = MEMORY_BACKEND(pmem->memdev); + ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context()); + + trace_virtio_pmem_flush_request(); + req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest)); + if (!req_data) { + virtio_error(vdev, "virtio-pmem missing request data"); + return; + } + + if (req_data->elem.out_num < 1 || req_data->elem.in_num < 1) { + virtio_error(vdev, "virtio-pmem request not proper"); + virtqueue_detach_element(vq, (VirtQueueElement *)req_data, 0); + g_free(req_data); + return; + } + req_data->fd = memory_region_get_fd(&backend->mr); + req_data->pmem = pmem; + req_data->vdev = vdev; + thread_pool_submit_aio(pool, worker_cb, req_data, done_cb, req_data); +} + +static void virtio_pmem_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VirtIOPMEM *pmem = VIRTIO_PMEM(vdev); + struct virtio_pmem_config *pmemcfg = (struct virtio_pmem_config *) config; + + virtio_stq_p(vdev, &pmemcfg->start, pmem->start); + virtio_stq_p(vdev, &pmemcfg->size, memory_region_size(&pmem->memdev->mr)); +} + +static uint64_t virtio_pmem_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + return features; +} + +static void virtio_pmem_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOPMEM *pmem = VIRTIO_PMEM(dev); + + if (!pmem->memdev) { + error_setg(errp, "virtio-pmem memdev not set"); + return; + } + + if (host_memory_backend_is_mapped(pmem->memdev)) { + error_setg(errp, "can't use already busy memdev: %s", + object_get_canonical_path_component(OBJECT(pmem->memdev))); + return; + } + + host_memory_backend_set_mapped(pmem->memdev, true); + virtio_init(vdev, TYPE_VIRTIO_PMEM, VIRTIO_ID_PMEM, + sizeof(struct virtio_pmem_config)); + pmem->rq_vq = virtio_add_queue(vdev, 128, virtio_pmem_flush); +} + +static void virtio_pmem_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIOPMEM *pmem = VIRTIO_PMEM(dev); + + host_memory_backend_set_mapped(pmem->memdev, false); + virtio_delete_queue(pmem->rq_vq); + virtio_cleanup(vdev); +} + +static void virtio_pmem_fill_device_info(const VirtIOPMEM *pmem, + VirtioPMEMDeviceInfo *vi) +{ + vi->memaddr = pmem->start; + vi->size = memory_region_size(&pmem->memdev->mr); + vi->memdev = object_get_canonical_path(OBJECT(pmem->memdev)); +} + +static MemoryRegion *virtio_pmem_get_memory_region(VirtIOPMEM *pmem, + Error **errp) +{ + if (!pmem->memdev) { + error_setg(errp, "'%s' property must be set", VIRTIO_PMEM_MEMDEV_PROP); + return NULL; + } + + return &pmem->memdev->mr; +} + +static Property virtio_pmem_properties[] = { + DEFINE_PROP_UINT64(VIRTIO_PMEM_ADDR_PROP, VirtIOPMEM, start, 0), + DEFINE_PROP_LINK(VIRTIO_PMEM_MEMDEV_PROP, VirtIOPMEM, memdev, + TYPE_MEMORY_BACKEND, HostMemoryBackend *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_pmem_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VirtIOPMEMClass *vpc = VIRTIO_PMEM_CLASS(klass); + + device_class_set_props(dc, virtio_pmem_properties); + + vdc->realize = virtio_pmem_realize; + vdc->unrealize = virtio_pmem_unrealize; + vdc->get_config = virtio_pmem_get_config; + vdc->get_features = virtio_pmem_get_features; + + vpc->fill_device_info = virtio_pmem_fill_device_info; + vpc->get_memory_region = virtio_pmem_get_memory_region; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static TypeInfo virtio_pmem_info = { + .name = TYPE_VIRTIO_PMEM, + .parent = TYPE_VIRTIO_DEVICE, + .class_size = sizeof(VirtIOPMEMClass), + .class_init = virtio_pmem_class_init, + .instance_size = sizeof(VirtIOPMEM), +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_pmem_info); +} + +type_init(virtio_register_types) diff --git a/hw/virtio/virtio-rng-pci.c b/hw/virtio/virtio-rng-pci.c new file mode 100644 index 000000000..c1f916268 --- /dev/null +++ b/hw/virtio/virtio-rng-pci.c @@ -0,0 +1,82 @@ +/* + * Virtio rng PCI Bindings + * + * Copyright 2012 Red Hat, Inc. + * Copyright 2012 Amit Shah + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" + +#include "virtio-pci.h" +#include "hw/virtio/virtio-rng.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "qom/object.h" + +typedef struct VirtIORngPCI VirtIORngPCI; + +/* + * virtio-rng-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_RNG_PCI "virtio-rng-pci-base" +DECLARE_INSTANCE_CHECKER(VirtIORngPCI, VIRTIO_RNG_PCI, + TYPE_VIRTIO_RNG_PCI) + +struct VirtIORngPCI { + VirtIOPCIProxy parent_obj; + VirtIORNG vdev; +}; + +static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&vrng->vdev); + + if (!qdev_realize(vdev, BUS(&vpci_dev->bus), errp)) { + return; + } +} + +static void virtio_rng_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + k->realize = virtio_rng_pci_realize; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_OTHERS; +} + +static void virtio_rng_initfn(Object *obj) +{ + VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_RNG); +} + +static const VirtioPCIDeviceTypeInfo virtio_rng_pci_info = { + .base_name = TYPE_VIRTIO_RNG_PCI, + .generic_name = "virtio-rng-pci", + .transitional_name = "virtio-rng-pci-transitional", + .non_transitional_name = "virtio-rng-pci-non-transitional", + .instance_size = sizeof(VirtIORngPCI), + .instance_init = virtio_rng_initfn, + .class_init = virtio_rng_pci_class_init, +}; + +static void virtio_rng_pci_register(void) +{ + virtio_pci_types_register(&virtio_rng_pci_info); +} + +type_init(virtio_rng_pci_register) diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c new file mode 100644 index 000000000..cc8e9f775 --- /dev/null +++ b/hw/virtio/virtio-rng.c @@ -0,0 +1,289 @@ +/* + * A virtio device implementing a hardware random number generator. + * + * Copyright 2012 Red Hat, Inc. + * Copyright 2012 Amit Shah + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/iov.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "hw/virtio/virtio.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-rng.h" +#include "sysemu/rng.h" +#include "sysemu/runstate.h" +#include "qom/object_interfaces.h" +#include "trace.h" + +static bool is_guest_ready(VirtIORNG *vrng) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vrng); + if (virtio_queue_ready(vrng->vq) + && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { + return true; + } + trace_virtio_rng_guest_not_ready(vrng); + return false; +} + +static size_t get_request_size(VirtQueue *vq, unsigned quota) +{ + unsigned int in, out; + + virtqueue_get_avail_bytes(vq, &in, &out, quota, 0); + return in; +} + +static void virtio_rng_process(VirtIORNG *vrng); + +/* Send data from a char device over to the guest */ +static void chr_read(void *opaque, const void *buf, size_t size) +{ + VirtIORNG *vrng = opaque; + VirtIODevice *vdev = VIRTIO_DEVICE(vrng); + VirtQueueElement *elem; + size_t len; + int offset; + + if (!is_guest_ready(vrng)) { + return; + } + + /* we can't modify the virtqueue until + * our state is fully synced + */ + + if (!runstate_check(RUN_STATE_RUNNING)) { + trace_virtio_rng_cpu_is_stopped(vrng, size); + return; + } + + vrng->quota_remaining -= size; + + offset = 0; + while (offset < size) { + elem = virtqueue_pop(vrng->vq, sizeof(VirtQueueElement)); + if (!elem) { + break; + } + trace_virtio_rng_popped(vrng); + len = iov_from_buf(elem->in_sg, elem->in_num, + 0, buf + offset, size - offset); + offset += len; + + virtqueue_push(vrng->vq, elem, len); + trace_virtio_rng_pushed(vrng, len); + g_free(elem); + } + virtio_notify(vdev, vrng->vq); + + if (!virtio_queue_empty(vrng->vq)) { + /* If we didn't drain the queue, call virtio_rng_process + * to take care of asking for more data as appropriate. + */ + virtio_rng_process(vrng); + } +} + +static void virtio_rng_process(VirtIORNG *vrng) +{ + size_t size; + unsigned quota; + + if (!is_guest_ready(vrng)) { + return; + } + + if (vrng->activate_timer) { + timer_mod(vrng->rate_limit_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vrng->conf.period_ms); + vrng->activate_timer = false; + } + + if (vrng->quota_remaining < 0) { + quota = 0; + } else { + quota = MIN((uint64_t)vrng->quota_remaining, (uint64_t)UINT32_MAX); + } + size = get_request_size(vrng->vq, quota); + + trace_virtio_rng_request(vrng, size, quota); + + size = MIN(vrng->quota_remaining, size); + if (size) { + rng_backend_request_entropy(vrng->rng, size, chr_read, vrng); + } +} + +static void handle_input(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIORNG *vrng = VIRTIO_RNG(vdev); + virtio_rng_process(vrng); +} + +static uint64_t get_features(VirtIODevice *vdev, uint64_t f, Error **errp) +{ + return f; +} + +static void virtio_rng_vm_state_change(void *opaque, bool running, + RunState state) +{ + VirtIORNG *vrng = opaque; + + trace_virtio_rng_vm_state_change(vrng, running, state); + + /* We may have an element ready but couldn't process it due to a quota + * limit or because CPU was stopped. Make sure to try again when the + * CPU restart. + */ + + if (running && is_guest_ready(vrng)) { + virtio_rng_process(vrng); + } +} + +static void check_rate_limit(void *opaque) +{ + VirtIORNG *vrng = opaque; + + vrng->quota_remaining = vrng->conf.max_bytes; + virtio_rng_process(vrng); + vrng->activate_timer = true; +} + +static void virtio_rng_set_status(VirtIODevice *vdev, uint8_t status) +{ + VirtIORNG *vrng = VIRTIO_RNG(vdev); + + if (!vdev->vm_running) { + return; + } + vdev->status = status; + + /* Something changed, try to process buffers */ + virtio_rng_process(vrng); +} + +static void virtio_rng_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIORNG *vrng = VIRTIO_RNG(dev); + + if (vrng->conf.period_ms <= 0) { + error_setg(errp, "'period' parameter expects a positive integer"); + return; + } + + /* Workaround: Property parsing does not enforce unsigned integers, + * So this is a hack to reject such numbers. */ + if (vrng->conf.max_bytes > INT64_MAX) { + error_setg(errp, "'max-bytes' parameter must be non-negative, " + "and less than 2^63"); + return; + } + + if (vrng->conf.rng == NULL) { + Object *default_backend = object_new(TYPE_RNG_BUILTIN); + + if (!user_creatable_complete(USER_CREATABLE(default_backend), + errp)) { + object_unref(default_backend); + return; + } + + object_property_add_child(OBJECT(dev), "default-backend", + default_backend); + + /* The child property took a reference, we can safely drop ours now */ + object_unref(default_backend); + + object_property_set_link(OBJECT(dev), "rng", default_backend, + &error_abort); + } + + vrng->rng = vrng->conf.rng; + if (vrng->rng == NULL) { + error_setg(errp, "'rng' parameter expects a valid object"); + return; + } + + virtio_init(vdev, "virtio-rng", VIRTIO_ID_RNG, 0); + + vrng->vq = virtio_add_queue(vdev, 8, handle_input); + vrng->quota_remaining = vrng->conf.max_bytes; + vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, + check_rate_limit, vrng); + vrng->activate_timer = true; + + vrng->vmstate = qemu_add_vm_change_state_handler(virtio_rng_vm_state_change, + vrng); +} + +static void virtio_rng_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtIORNG *vrng = VIRTIO_RNG(dev); + + qemu_del_vm_change_state_handler(vrng->vmstate); + timer_free(vrng->rate_limit_timer); + virtio_del_queue(vdev, 0); + virtio_cleanup(vdev); +} + +static const VMStateDescription vmstate_virtio_rng = { + .name = "virtio-rng", + .minimum_version_id = 1, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static Property virtio_rng_properties[] = { + /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If + * you have an entropy source capable of generating more entropy than this + * and you can pass it through via virtio-rng, then hats off to you. Until + * then, this is unlimited for all practical purposes. + */ + DEFINE_PROP_UINT64("max-bytes", VirtIORNG, conf.max_bytes, INT64_MAX), + DEFINE_PROP_UINT32("period", VirtIORNG, conf.period_ms, 1 << 16), + DEFINE_PROP_LINK("rng", VirtIORNG, conf.rng, TYPE_RNG_BACKEND, RngBackend *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_rng_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, virtio_rng_properties); + dc->vmsd = &vmstate_virtio_rng; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + vdc->realize = virtio_rng_device_realize; + vdc->unrealize = virtio_rng_device_unrealize; + vdc->get_features = get_features; + vdc->set_status = virtio_rng_set_status; +} + +static const TypeInfo virtio_rng_info = { + .name = TYPE_VIRTIO_RNG, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIORNG), + .class_init = virtio_rng_class_init, +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_rng_info); +} + +type_init(virtio_register_types) diff --git a/hw/virtio/virtio-scsi-pci.c b/hw/virtio/virtio-scsi-pci.c new file mode 100644 index 000000000..97fab7423 --- /dev/null +++ b/hw/virtio/virtio-scsi-pci.c @@ -0,0 +1,114 @@ +/* + * Virtio scsi PCI Bindings + * + * Copyright IBM, Corp. 2007 + * Copyright (c) 2009 CodeSourcery + * + * Authors: + * Anthony Liguori + * Paul Brook + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. See the COPYING file in the + * top-level directory. + */ + +#include "qemu/osdep.h" + +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-scsi.h" +#include "qemu/module.h" +#include "virtio-pci.h" +#include "qom/object.h" + +typedef struct VirtIOSCSIPCI VirtIOSCSIPCI; + +/* + * virtio-scsi-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_SCSI_PCI "virtio-scsi-pci-base" +DECLARE_INSTANCE_CHECKER(VirtIOSCSIPCI, VIRTIO_SCSI_PCI, + TYPE_VIRTIO_SCSI_PCI) + +struct VirtIOSCSIPCI { + VirtIOPCIProxy parent_obj; + VirtIOSCSI vdev; +}; + +static Property virtio_scsi_pci_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + DeviceState *proxy = DEVICE(vpci_dev); + VirtIOSCSIConf *conf = &dev->vdev.parent_obj.conf; + char *bus_name; + + if (conf->num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) { + conf->num_queues = + virtio_pci_optimal_num_queues(VIRTIO_SCSI_VQ_NUM_FIXED); + } + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = conf->num_queues + VIRTIO_SCSI_VQ_NUM_FIXED + 1; + } + + /* + * For command line compatibility, this sets the virtio-scsi-device bus + * name as before. + */ + if (proxy->id) { + bus_name = g_strdup_printf("%s.0", proxy->id); + virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name); + g_free(bus_name); + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + + k->realize = virtio_scsi_pci_realize; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + device_class_set_props(dc, virtio_scsi_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI; + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; +} + +static void virtio_scsi_pci_instance_init(Object *obj) +{ + VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_SCSI); +} + +static const VirtioPCIDeviceTypeInfo virtio_scsi_pci_info = { + .base_name = TYPE_VIRTIO_SCSI_PCI, + .generic_name = "virtio-scsi-pci", + .transitional_name = "virtio-scsi-pci-transitional", + .non_transitional_name = "virtio-scsi-pci-non-transitional", + .instance_size = sizeof(VirtIOSCSIPCI), + .instance_init = virtio_scsi_pci_instance_init, + .class_init = virtio_scsi_pci_class_init, +}; + +static void virtio_scsi_pci_register(void) +{ + virtio_pci_types_register(&virtio_scsi_pci_info); +} + +type_init(virtio_scsi_pci_register) diff --git a/hw/virtio/virtio-serial-pci.c b/hw/virtio/virtio-serial-pci.c new file mode 100644 index 000000000..35bcd961c --- /dev/null +++ b/hw/virtio/virtio-serial-pci.c @@ -0,0 +1,117 @@ +/* + * Virtio serial PCI Bindings + * + * Copyright IBM, Corp. 2007 + * Copyright (c) 2009 CodeSourcery + * + * Authors: + * Anthony Liguori + * Paul Brook + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" + +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-serial.h" +#include "qemu/module.h" +#include "virtio-pci.h" +#include "qom/object.h" + +typedef struct VirtIOSerialPCI VirtIOSerialPCI; + +/* + * virtio-serial-pci: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_SERIAL_PCI "virtio-serial-pci-base" +DECLARE_INSTANCE_CHECKER(VirtIOSerialPCI, VIRTIO_SERIAL_PCI, + TYPE_VIRTIO_SERIAL_PCI) + +struct VirtIOSerialPCI { + VirtIOPCIProxy parent_obj; + VirtIOSerial vdev; +}; + +static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + DeviceState *proxy = DEVICE(vpci_dev); + char *bus_name; + + if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER && + vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */ + vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */ + vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER; + } + + /* backwards-compatibility with machines that were created with + DEV_NVECTORS_UNSPECIFIED */ + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1; + } + + /* + * For command line compatibility, this sets the virtio-serial-device bus + * name as before. + */ + if (proxy->id) { + bus_name = g_strdup_printf("%s.0", proxy->id); + virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name); + g_free(bus_name); + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static Property virtio_serial_pci_properties[] = { + DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2), + DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_serial_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = virtio_serial_pci_realize; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + device_class_set_props(dc, virtio_serial_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE; + pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; + pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER; +} + +static void virtio_serial_pci_instance_init(Object *obj) +{ + VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_SERIAL); +} + +static const VirtioPCIDeviceTypeInfo virtio_serial_pci_info = { + .base_name = TYPE_VIRTIO_SERIAL_PCI, + .generic_name = "virtio-serial-pci", + .transitional_name = "virtio-serial-pci-transitional", + .non_transitional_name = "virtio-serial-pci-non-transitional", + .instance_size = sizeof(VirtIOSerialPCI), + .instance_init = virtio_serial_pci_instance_init, + .class_init = virtio_serial_pci_class_init, +}; + +static void virtio_serial_pci_register(void) +{ + virtio_pci_types_register(&virtio_serial_pci_info); +} + +type_init(virtio_serial_pci_register) diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c new file mode 100644 index 000000000..ea7c079fb --- /dev/null +++ b/hw/virtio/virtio.c @@ -0,0 +1,3876 @@ +/* + * Virtio Support + * + * Copyright IBM, Corp. 2007 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "trace.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "hw/virtio/virtio.h" +#include "migration/qemu-file-types.h" +#include "qemu/atomic.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-access.h" +#include "sysemu/dma.h" +#include "sysemu/runstate.h" +#include "standard-headers/linux/virtio_ids.h" + +/* + * The alignment to use between consumer and producer parts of vring. + * x86 pagesize again. This is the default, used by transports like PCI + * which don't provide a means for the guest to tell the host the alignment. + */ +#define VIRTIO_PCI_VRING_ALIGN 4096 + +typedef struct VRingDesc +{ + uint64_t addr; + uint32_t len; + uint16_t flags; + uint16_t next; +} VRingDesc; + +typedef struct VRingPackedDesc { + uint64_t addr; + uint32_t len; + uint16_t id; + uint16_t flags; +} VRingPackedDesc; + +typedef struct VRingAvail +{ + uint16_t flags; + uint16_t idx; + uint16_t ring[]; +} VRingAvail; + +typedef struct VRingUsedElem +{ + uint32_t id; + uint32_t len; +} VRingUsedElem; + +typedef struct VRingUsed +{ + uint16_t flags; + uint16_t idx; + VRingUsedElem ring[]; +} VRingUsed; + +typedef struct VRingMemoryRegionCaches { + struct rcu_head rcu; + MemoryRegionCache desc; + MemoryRegionCache avail; + MemoryRegionCache used; +} VRingMemoryRegionCaches; + +typedef struct VRing +{ + unsigned int num; + unsigned int num_default; + unsigned int align; + hwaddr desc; + hwaddr avail; + hwaddr used; + VRingMemoryRegionCaches *caches; +} VRing; + +typedef struct VRingPackedDescEvent { + uint16_t off_wrap; + uint16_t flags; +} VRingPackedDescEvent ; + +struct VirtQueue +{ + VRing vring; + VirtQueueElement *used_elems; + + /* Next head to pop */ + uint16_t last_avail_idx; + bool last_avail_wrap_counter; + + /* Last avail_idx read from VQ. */ + uint16_t shadow_avail_idx; + bool shadow_avail_wrap_counter; + + uint16_t used_idx; + bool used_wrap_counter; + + /* Last used index value we have signalled on */ + uint16_t signalled_used; + + /* Last used index value we have signalled on */ + bool signalled_used_valid; + + /* Notification enabled? */ + bool notification; + + uint16_t queue_index; + + unsigned int inuse; + + uint16_t vector; + VirtIOHandleOutput handle_output; + VirtIOHandleAIOOutput handle_aio_output; + VirtIODevice *vdev; + EventNotifier guest_notifier; + EventNotifier host_notifier; + bool host_notifier_enabled; + QLIST_ENTRY(VirtQueue) node; +}; + +/* Called within call_rcu(). */ +static void virtio_free_region_cache(VRingMemoryRegionCaches *caches) +{ + assert(caches != NULL); + address_space_cache_destroy(&caches->desc); + address_space_cache_destroy(&caches->avail); + address_space_cache_destroy(&caches->used); + g_free(caches); +} + +static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq) +{ + VRingMemoryRegionCaches *caches; + + caches = qatomic_read(&vq->vring.caches); + qatomic_rcu_set(&vq->vring.caches, NULL); + if (caches) { + call_rcu(caches, virtio_free_region_cache, rcu); + } +} + +static void virtio_init_region_cache(VirtIODevice *vdev, int n) +{ + VirtQueue *vq = &vdev->vq[n]; + VRingMemoryRegionCaches *old = vq->vring.caches; + VRingMemoryRegionCaches *new = NULL; + hwaddr addr, size; + int64_t len; + bool packed; + + + addr = vq->vring.desc; + if (!addr) { + goto out_no_cache; + } + new = g_new0(VRingMemoryRegionCaches, 1); + size = virtio_queue_get_desc_size(vdev, n); + packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? + true : false; + len = address_space_cache_init(&new->desc, vdev->dma_as, + addr, size, packed); + if (len < size) { + virtio_error(vdev, "Cannot map desc"); + goto err_desc; + } + + size = virtio_queue_get_used_size(vdev, n); + len = address_space_cache_init(&new->used, vdev->dma_as, + vq->vring.used, size, true); + if (len < size) { + virtio_error(vdev, "Cannot map used"); + goto err_used; + } + + size = virtio_queue_get_avail_size(vdev, n); + len = address_space_cache_init(&new->avail, vdev->dma_as, + vq->vring.avail, size, false); + if (len < size) { + virtio_error(vdev, "Cannot map avail"); + goto err_avail; + } + + qatomic_rcu_set(&vq->vring.caches, new); + if (old) { + call_rcu(old, virtio_free_region_cache, rcu); + } + return; + +err_avail: + address_space_cache_destroy(&new->avail); +err_used: + address_space_cache_destroy(&new->used); +err_desc: + address_space_cache_destroy(&new->desc); +out_no_cache: + g_free(new); + virtio_virtqueue_reset_region_cache(vq); +} + +/* virt queue functions */ +void virtio_queue_update_rings(VirtIODevice *vdev, int n) +{ + VRing *vring = &vdev->vq[n].vring; + + if (!vring->num || !vring->desc || !vring->align) { + /* not yet setup -> nothing to do */ + return; + } + vring->avail = vring->desc + vring->num * sizeof(VRingDesc); + vring->used = vring_align(vring->avail + + offsetof(VRingAvail, ring[vring->num]), + vring->align); + virtio_init_region_cache(vdev, n); +} + +/* Called within rcu_read_lock(). */ +static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc, + MemoryRegionCache *cache, int i) +{ + address_space_read_cached(cache, i * sizeof(VRingDesc), + desc, sizeof(VRingDesc)); + virtio_tswap64s(vdev, &desc->addr); + virtio_tswap32s(vdev, &desc->len); + virtio_tswap16s(vdev, &desc->flags); + virtio_tswap16s(vdev, &desc->next); +} + +static void vring_packed_event_read(VirtIODevice *vdev, + MemoryRegionCache *cache, + VRingPackedDescEvent *e) +{ + hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap); + hwaddr off_flags = offsetof(VRingPackedDescEvent, flags); + + e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags); + /* Make sure flags is seen before off_wrap */ + smp_rmb(); + e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off); + virtio_tswap16s(vdev, &e->flags); +} + +static void vring_packed_off_wrap_write(VirtIODevice *vdev, + MemoryRegionCache *cache, + uint16_t off_wrap) +{ + hwaddr off = offsetof(VRingPackedDescEvent, off_wrap); + + virtio_stw_phys_cached(vdev, cache, off, off_wrap); + address_space_cache_invalidate(cache, off, sizeof(off_wrap)); +} + +static void vring_packed_flags_write(VirtIODevice *vdev, + MemoryRegionCache *cache, uint16_t flags) +{ + hwaddr off = offsetof(VRingPackedDescEvent, flags); + + virtio_stw_phys_cached(vdev, cache, off, flags); + address_space_cache_invalidate(cache, off, sizeof(flags)); +} + +/* Called within rcu_read_lock(). */ +static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq) +{ + return qatomic_rcu_read(&vq->vring.caches); +} + +/* Called within rcu_read_lock(). */ +static inline uint16_t vring_avail_flags(VirtQueue *vq) +{ + VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); + hwaddr pa = offsetof(VRingAvail, flags); + + if (!caches) { + return 0; + } + + return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); +} + +/* Called within rcu_read_lock(). */ +static inline uint16_t vring_avail_idx(VirtQueue *vq) +{ + VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); + hwaddr pa = offsetof(VRingAvail, idx); + + if (!caches) { + return 0; + } + + vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); + return vq->shadow_avail_idx; +} + +/* Called within rcu_read_lock(). */ +static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) +{ + VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); + hwaddr pa = offsetof(VRingAvail, ring[i]); + + if (!caches) { + return 0; + } + + return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa); +} + +/* Called within rcu_read_lock(). */ +static inline uint16_t vring_get_used_event(VirtQueue *vq) +{ + return vring_avail_ring(vq, vq->vring.num); +} + +/* Called within rcu_read_lock(). */ +static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, + int i) +{ + VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); + hwaddr pa = offsetof(VRingUsed, ring[i]); + + if (!caches) { + return; + } + + virtio_tswap32s(vq->vdev, &uelem->id); + virtio_tswap32s(vq->vdev, &uelem->len); + address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem)); + address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem)); +} + +/* Called within rcu_read_lock(). */ +static uint16_t vring_used_idx(VirtQueue *vq) +{ + VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); + hwaddr pa = offsetof(VRingUsed, idx); + + if (!caches) { + return 0; + } + + return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); +} + +/* Called within rcu_read_lock(). */ +static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) +{ + VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); + hwaddr pa = offsetof(VRingUsed, idx); + + if (caches) { + virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); + address_space_cache_invalidate(&caches->used, pa, sizeof(val)); + } + + vq->used_idx = val; +} + +/* Called within rcu_read_lock(). */ +static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) +{ + VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); + VirtIODevice *vdev = vq->vdev; + hwaddr pa = offsetof(VRingUsed, flags); + uint16_t flags; + + if (!caches) { + return; + } + + flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); + virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask); + address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); +} + +/* Called within rcu_read_lock(). */ +static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) +{ + VRingMemoryRegionCaches *caches = vring_get_region_caches(vq); + VirtIODevice *vdev = vq->vdev; + hwaddr pa = offsetof(VRingUsed, flags); + uint16_t flags; + + if (!caches) { + return; + } + + flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); + virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask); + address_space_cache_invalidate(&caches->used, pa, sizeof(flags)); +} + +/* Called within rcu_read_lock(). */ +static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) +{ + VRingMemoryRegionCaches *caches; + hwaddr pa; + if (!vq->notification) { + return; + } + + caches = vring_get_region_caches(vq); + if (!caches) { + return; + } + + pa = offsetof(VRingUsed, ring[vq->vring.num]); + virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); + address_space_cache_invalidate(&caches->used, pa, sizeof(val)); +} + +static void virtio_queue_split_set_notification(VirtQueue *vq, int enable) +{ + RCU_READ_LOCK_GUARD(); + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { + vring_set_avail_event(vq, vring_avail_idx(vq)); + } else if (enable) { + vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); + } else { + vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); + } + if (enable) { + /* Expose avail event/used flags before caller checks the avail idx. */ + smp_mb(); + } +} + +static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable) +{ + uint16_t off_wrap; + VRingPackedDescEvent e; + VRingMemoryRegionCaches *caches; + + RCU_READ_LOCK_GUARD(); + caches = vring_get_region_caches(vq); + if (!caches) { + return; + } + + vring_packed_event_read(vq->vdev, &caches->used, &e); + + if (!enable) { + e.flags = VRING_PACKED_EVENT_FLAG_DISABLE; + } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { + off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15; + vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap); + /* Make sure off_wrap is wrote before flags */ + smp_wmb(); + e.flags = VRING_PACKED_EVENT_FLAG_DESC; + } else { + e.flags = VRING_PACKED_EVENT_FLAG_ENABLE; + } + + vring_packed_flags_write(vq->vdev, &caches->used, e.flags); + if (enable) { + /* Expose avail event/used flags before caller checks the avail idx. */ + smp_mb(); + } +} + +bool virtio_queue_get_notification(VirtQueue *vq) +{ + return vq->notification; +} + +void virtio_queue_set_notification(VirtQueue *vq, int enable) +{ + vq->notification = enable; + + if (!vq->vring.desc) { + return; + } + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtio_queue_packed_set_notification(vq, enable); + } else { + virtio_queue_split_set_notification(vq, enable); + } +} + +int virtio_queue_ready(VirtQueue *vq) +{ + return vq->vring.avail != 0; +} + +static void vring_packed_desc_read_flags(VirtIODevice *vdev, + uint16_t *flags, + MemoryRegionCache *cache, + int i) +{ + hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags); + + *flags = virtio_lduw_phys_cached(vdev, cache, off); +} + +static void vring_packed_desc_read(VirtIODevice *vdev, + VRingPackedDesc *desc, + MemoryRegionCache *cache, + int i, bool strict_order) +{ + hwaddr off = i * sizeof(VRingPackedDesc); + + vring_packed_desc_read_flags(vdev, &desc->flags, cache, i); + + if (strict_order) { + /* Make sure flags is read before the rest fields. */ + smp_rmb(); + } + + address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr), + &desc->addr, sizeof(desc->addr)); + address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id), + &desc->id, sizeof(desc->id)); + address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len), + &desc->len, sizeof(desc->len)); + virtio_tswap64s(vdev, &desc->addr); + virtio_tswap16s(vdev, &desc->id); + virtio_tswap32s(vdev, &desc->len); +} + +static void vring_packed_desc_write_data(VirtIODevice *vdev, + VRingPackedDesc *desc, + MemoryRegionCache *cache, + int i) +{ + hwaddr off_id = i * sizeof(VRingPackedDesc) + + offsetof(VRingPackedDesc, id); + hwaddr off_len = i * sizeof(VRingPackedDesc) + + offsetof(VRingPackedDesc, len); + + virtio_tswap32s(vdev, &desc->len); + virtio_tswap16s(vdev, &desc->id); + address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id)); + address_space_cache_invalidate(cache, off_id, sizeof(desc->id)); + address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len)); + address_space_cache_invalidate(cache, off_len, sizeof(desc->len)); +} + +static void vring_packed_desc_write_flags(VirtIODevice *vdev, + VRingPackedDesc *desc, + MemoryRegionCache *cache, + int i) +{ + hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags); + + virtio_stw_phys_cached(vdev, cache, off, desc->flags); + address_space_cache_invalidate(cache, off, sizeof(desc->flags)); +} + +static void vring_packed_desc_write(VirtIODevice *vdev, + VRingPackedDesc *desc, + MemoryRegionCache *cache, + int i, bool strict_order) +{ + vring_packed_desc_write_data(vdev, desc, cache, i); + if (strict_order) { + /* Make sure data is wrote before flags. */ + smp_wmb(); + } + vring_packed_desc_write_flags(vdev, desc, cache, i); +} + +static inline bool is_desc_avail(uint16_t flags, bool wrap_counter) +{ + bool avail, used; + + avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL)); + used = !!(flags & (1 << VRING_PACKED_DESC_F_USED)); + return (avail != used) && (avail == wrap_counter); +} + +/* Fetch avail_idx from VQ memory only when we really need to know if + * guest has added some buffers. + * Called within rcu_read_lock(). */ +static int virtio_queue_empty_rcu(VirtQueue *vq) +{ + if (virtio_device_disabled(vq->vdev)) { + return 1; + } + + if (unlikely(!vq->vring.avail)) { + return 1; + } + + if (vq->shadow_avail_idx != vq->last_avail_idx) { + return 0; + } + + return vring_avail_idx(vq) == vq->last_avail_idx; +} + +static int virtio_queue_split_empty(VirtQueue *vq) +{ + bool empty; + + if (virtio_device_disabled(vq->vdev)) { + return 1; + } + + if (unlikely(!vq->vring.avail)) { + return 1; + } + + if (vq->shadow_avail_idx != vq->last_avail_idx) { + return 0; + } + + RCU_READ_LOCK_GUARD(); + empty = vring_avail_idx(vq) == vq->last_avail_idx; + return empty; +} + +/* Called within rcu_read_lock(). */ +static int virtio_queue_packed_empty_rcu(VirtQueue *vq) +{ + struct VRingPackedDesc desc; + VRingMemoryRegionCaches *cache; + + if (unlikely(!vq->vring.desc)) { + return 1; + } + + cache = vring_get_region_caches(vq); + if (!cache) { + return 1; + } + + vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc, + vq->last_avail_idx); + + return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter); +} + +static int virtio_queue_packed_empty(VirtQueue *vq) +{ + RCU_READ_LOCK_GUARD(); + return virtio_queue_packed_empty_rcu(vq); +} + +int virtio_queue_empty(VirtQueue *vq) +{ + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + return virtio_queue_packed_empty(vq); + } else { + return virtio_queue_split_empty(vq); + } +} + +static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem, + unsigned int len) +{ + AddressSpace *dma_as = vq->vdev->dma_as; + unsigned int offset; + int i; + + offset = 0; + for (i = 0; i < elem->in_num; i++) { + size_t size = MIN(len - offset, elem->in_sg[i].iov_len); + + dma_memory_unmap(dma_as, elem->in_sg[i].iov_base, + elem->in_sg[i].iov_len, + DMA_DIRECTION_FROM_DEVICE, size); + + offset += size; + } + + for (i = 0; i < elem->out_num; i++) + dma_memory_unmap(dma_as, elem->out_sg[i].iov_base, + elem->out_sg[i].iov_len, + DMA_DIRECTION_TO_DEVICE, + elem->out_sg[i].iov_len); +} + +/* virtqueue_detach_element: + * @vq: The #VirtQueue + * @elem: The #VirtQueueElement + * @len: number of bytes written + * + * Detach the element from the virtqueue. This function is suitable for device + * reset or other situations where a #VirtQueueElement is simply freed and will + * not be pushed or discarded. + */ +void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem, + unsigned int len) +{ + vq->inuse -= elem->ndescs; + virtqueue_unmap_sg(vq, elem, len); +} + +static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num) +{ + vq->last_avail_idx -= num; +} + +static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num) +{ + if (vq->last_avail_idx < num) { + vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num; + vq->last_avail_wrap_counter ^= 1; + } else { + vq->last_avail_idx -= num; + } +} + +/* virtqueue_unpop: + * @vq: The #VirtQueue + * @elem: The #VirtQueueElement + * @len: number of bytes written + * + * Pretend the most recent element wasn't popped from the virtqueue. The next + * call to virtqueue_pop() will refetch the element. + */ +void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem, + unsigned int len) +{ + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtqueue_packed_rewind(vq, 1); + } else { + virtqueue_split_rewind(vq, 1); + } + + virtqueue_detach_element(vq, elem, len); +} + +/* virtqueue_rewind: + * @vq: The #VirtQueue + * @num: Number of elements to push back + * + * Pretend that elements weren't popped from the virtqueue. The next + * virtqueue_pop() will refetch the oldest element. + * + * Use virtqueue_unpop() instead if you have a VirtQueueElement. + * + * Returns: true on success, false if @num is greater than the number of in use + * elements. + */ +bool virtqueue_rewind(VirtQueue *vq, unsigned int num) +{ + if (num > vq->inuse) { + return false; + } + + vq->inuse -= num; + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtqueue_packed_rewind(vq, num); + } else { + virtqueue_split_rewind(vq, num); + } + return true; +} + +static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem, + unsigned int len, unsigned int idx) +{ + VRingUsedElem uelem; + + if (unlikely(!vq->vring.used)) { + return; + } + + idx = (idx + vq->used_idx) % vq->vring.num; + + uelem.id = elem->index; + uelem.len = len; + vring_used_write(vq, &uelem, idx); +} + +static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem, + unsigned int len, unsigned int idx) +{ + vq->used_elems[idx].index = elem->index; + vq->used_elems[idx].len = len; + vq->used_elems[idx].ndescs = elem->ndescs; +} + +static void virtqueue_packed_fill_desc(VirtQueue *vq, + const VirtQueueElement *elem, + unsigned int idx, + bool strict_order) +{ + uint16_t head; + VRingMemoryRegionCaches *caches; + VRingPackedDesc desc = { + .id = elem->index, + .len = elem->len, + }; + bool wrap_counter = vq->used_wrap_counter; + + if (unlikely(!vq->vring.desc)) { + return; + } + + head = vq->used_idx + idx; + if (head >= vq->vring.num) { + head -= vq->vring.num; + wrap_counter ^= 1; + } + if (wrap_counter) { + desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL); + desc.flags |= (1 << VRING_PACKED_DESC_F_USED); + } else { + desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL); + desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED); + } + + caches = vring_get_region_caches(vq); + if (!caches) { + return; + } + + vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order); +} + +/* Called within rcu_read_lock(). */ +void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem, + unsigned int len, unsigned int idx) +{ + trace_virtqueue_fill(vq, elem, len, idx); + + virtqueue_unmap_sg(vq, elem, len); + + if (virtio_device_disabled(vq->vdev)) { + return; + } + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtqueue_packed_fill(vq, elem, len, idx); + } else { + virtqueue_split_fill(vq, elem, len, idx); + } +} + +/* Called within rcu_read_lock(). */ +static void virtqueue_split_flush(VirtQueue *vq, unsigned int count) +{ + uint16_t old, new; + + if (unlikely(!vq->vring.used)) { + return; + } + + /* Make sure buffer is written before we update index. */ + smp_wmb(); + trace_virtqueue_flush(vq, count); + old = vq->used_idx; + new = old + count; + vring_used_idx_set(vq, new); + vq->inuse -= count; + if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) + vq->signalled_used_valid = false; +} + +static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count) +{ + unsigned int i, ndescs = 0; + + if (unlikely(!vq->vring.desc)) { + return; + } + + for (i = 1; i < count; i++) { + virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false); + ndescs += vq->used_elems[i].ndescs; + } + virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true); + ndescs += vq->used_elems[0].ndescs; + + vq->inuse -= ndescs; + vq->used_idx += ndescs; + if (vq->used_idx >= vq->vring.num) { + vq->used_idx -= vq->vring.num; + vq->used_wrap_counter ^= 1; + } +} + +void virtqueue_flush(VirtQueue *vq, unsigned int count) +{ + if (virtio_device_disabled(vq->vdev)) { + vq->inuse -= count; + return; + } + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtqueue_packed_flush(vq, count); + } else { + virtqueue_split_flush(vq, count); + } +} + +void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem, + unsigned int len) +{ + RCU_READ_LOCK_GUARD(); + virtqueue_fill(vq, elem, len, 0); + virtqueue_flush(vq, 1); +} + +/* Called within rcu_read_lock(). */ +static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) +{ + uint16_t num_heads = vring_avail_idx(vq) - idx; + + /* Check it isn't doing very strange things with descriptor numbers. */ + if (num_heads > vq->vring.num) { + virtio_error(vq->vdev, "Guest moved used index from %u to %u", + idx, vq->shadow_avail_idx); + return -EINVAL; + } + /* On success, callers read a descriptor at vq->last_avail_idx. + * Make sure descriptor read does not bypass avail index read. */ + if (num_heads) { + smp_rmb(); + } + + return num_heads; +} + +/* Called within rcu_read_lock(). */ +static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx, + unsigned int *head) +{ + /* Grab the next descriptor number they're advertising, and increment + * the index we've seen. */ + *head = vring_avail_ring(vq, idx % vq->vring.num); + + /* If their number is silly, that's a fatal mistake. */ + if (*head >= vq->vring.num) { + virtio_error(vq->vdev, "Guest says index %u is available", *head); + return false; + } + + return true; +} + +enum { + VIRTQUEUE_READ_DESC_ERROR = -1, + VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ + VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ +}; + +static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, + MemoryRegionCache *desc_cache, + unsigned int max, unsigned int *next) +{ + /* If this descriptor says it doesn't chain, we're done. */ + if (!(desc->flags & VRING_DESC_F_NEXT)) { + return VIRTQUEUE_READ_DESC_DONE; + } + + /* Check they're not leading us off end of descriptors. */ + *next = desc->next; + /* Make sure compiler knows to grab that: we don't want it changing! */ + smp_wmb(); + + if (*next >= max) { + virtio_error(vdev, "Desc next is %u", *next); + return VIRTQUEUE_READ_DESC_ERROR; + } + + vring_split_desc_read(vdev, desc, desc_cache, *next); + return VIRTQUEUE_READ_DESC_MORE; +} + +/* Called within rcu_read_lock(). */ +static void virtqueue_split_get_avail_bytes(VirtQueue *vq, + unsigned int *in_bytes, unsigned int *out_bytes, + unsigned max_in_bytes, unsigned max_out_bytes, + VRingMemoryRegionCaches *caches) +{ + VirtIODevice *vdev = vq->vdev; + unsigned int max, idx; + unsigned int total_bufs, in_total, out_total; + MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; + int64_t len = 0; + int rc; + + idx = vq->last_avail_idx; + total_bufs = in_total = out_total = 0; + + max = vq->vring.num; + + while ((rc = virtqueue_num_heads(vq, idx)) > 0) { + MemoryRegionCache *desc_cache = &caches->desc; + unsigned int num_bufs; + VRingDesc desc; + unsigned int i; + + num_bufs = total_bufs; + + if (!virtqueue_get_head(vq, idx++, &i)) { + goto err; + } + + vring_split_desc_read(vdev, &desc, desc_cache, i); + + if (desc.flags & VRING_DESC_F_INDIRECT) { + if (!desc.len || (desc.len % sizeof(VRingDesc))) { + virtio_error(vdev, "Invalid size for indirect buffer table"); + goto err; + } + + /* If we've got too many, that implies a descriptor loop. */ + if (num_bufs >= max) { + virtio_error(vdev, "Looped descriptor"); + goto err; + } + + /* loop over the indirect descriptor table */ + len = address_space_cache_init(&indirect_desc_cache, + vdev->dma_as, + desc.addr, desc.len, false); + desc_cache = &indirect_desc_cache; + if (len < desc.len) { + virtio_error(vdev, "Cannot map indirect buffer"); + goto err; + } + + max = desc.len / sizeof(VRingDesc); + num_bufs = i = 0; + vring_split_desc_read(vdev, &desc, desc_cache, i); + } + + do { + /* If we've got too many, that implies a descriptor loop. */ + if (++num_bufs > max) { + virtio_error(vdev, "Looped descriptor"); + goto err; + } + + if (desc.flags & VRING_DESC_F_WRITE) { + in_total += desc.len; + } else { + out_total += desc.len; + } + if (in_total >= max_in_bytes && out_total >= max_out_bytes) { + goto done; + } + + rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i); + } while (rc == VIRTQUEUE_READ_DESC_MORE); + + if (rc == VIRTQUEUE_READ_DESC_ERROR) { + goto err; + } + + if (desc_cache == &indirect_desc_cache) { + address_space_cache_destroy(&indirect_desc_cache); + total_bufs++; + } else { + total_bufs = num_bufs; + } + } + + if (rc < 0) { + goto err; + } + +done: + address_space_cache_destroy(&indirect_desc_cache); + if (in_bytes) { + *in_bytes = in_total; + } + if (out_bytes) { + *out_bytes = out_total; + } + return; + +err: + in_total = out_total = 0; + goto done; +} + +static int virtqueue_packed_read_next_desc(VirtQueue *vq, + VRingPackedDesc *desc, + MemoryRegionCache + *desc_cache, + unsigned int max, + unsigned int *next, + bool indirect) +{ + /* If this descriptor says it doesn't chain, we're done. */ + if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) { + return VIRTQUEUE_READ_DESC_DONE; + } + + ++*next; + if (*next == max) { + if (indirect) { + return VIRTQUEUE_READ_DESC_DONE; + } else { + (*next) -= vq->vring.num; + } + } + + vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false); + return VIRTQUEUE_READ_DESC_MORE; +} + +/* Called within rcu_read_lock(). */ +static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, + unsigned int *in_bytes, + unsigned int *out_bytes, + unsigned max_in_bytes, + unsigned max_out_bytes, + VRingMemoryRegionCaches *caches) +{ + VirtIODevice *vdev = vq->vdev; + unsigned int max, idx; + unsigned int total_bufs, in_total, out_total; + MemoryRegionCache *desc_cache; + MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; + int64_t len = 0; + VRingPackedDesc desc; + bool wrap_counter; + + idx = vq->last_avail_idx; + wrap_counter = vq->last_avail_wrap_counter; + total_bufs = in_total = out_total = 0; + + max = vq->vring.num; + + for (;;) { + unsigned int num_bufs = total_bufs; + unsigned int i = idx; + int rc; + + desc_cache = &caches->desc; + vring_packed_desc_read(vdev, &desc, desc_cache, idx, true); + if (!is_desc_avail(desc.flags, wrap_counter)) { + break; + } + + if (desc.flags & VRING_DESC_F_INDIRECT) { + if (desc.len % sizeof(VRingPackedDesc)) { + virtio_error(vdev, "Invalid size for indirect buffer table"); + goto err; + } + + /* If we've got too many, that implies a descriptor loop. */ + if (num_bufs >= max) { + virtio_error(vdev, "Looped descriptor"); + goto err; + } + + /* loop over the indirect descriptor table */ + len = address_space_cache_init(&indirect_desc_cache, + vdev->dma_as, + desc.addr, desc.len, false); + desc_cache = &indirect_desc_cache; + if (len < desc.len) { + virtio_error(vdev, "Cannot map indirect buffer"); + goto err; + } + + max = desc.len / sizeof(VRingPackedDesc); + num_bufs = i = 0; + vring_packed_desc_read(vdev, &desc, desc_cache, i, false); + } + + do { + /* If we've got too many, that implies a descriptor loop. */ + if (++num_bufs > max) { + virtio_error(vdev, "Looped descriptor"); + goto err; + } + + if (desc.flags & VRING_DESC_F_WRITE) { + in_total += desc.len; + } else { + out_total += desc.len; + } + if (in_total >= max_in_bytes && out_total >= max_out_bytes) { + goto done; + } + + rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, + &i, desc_cache == + &indirect_desc_cache); + } while (rc == VIRTQUEUE_READ_DESC_MORE); + + if (desc_cache == &indirect_desc_cache) { + address_space_cache_destroy(&indirect_desc_cache); + total_bufs++; + idx++; + } else { + idx += num_bufs - total_bufs; + total_bufs = num_bufs; + } + + if (idx >= vq->vring.num) { + idx -= vq->vring.num; + wrap_counter ^= 1; + } + } + + /* Record the index and wrap counter for a kick we want */ + vq->shadow_avail_idx = idx; + vq->shadow_avail_wrap_counter = wrap_counter; +done: + address_space_cache_destroy(&indirect_desc_cache); + if (in_bytes) { + *in_bytes = in_total; + } + if (out_bytes) { + *out_bytes = out_total; + } + return; + +err: + in_total = out_total = 0; + goto done; +} + +void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, + unsigned int *out_bytes, + unsigned max_in_bytes, unsigned max_out_bytes) +{ + uint16_t desc_size; + VRingMemoryRegionCaches *caches; + + RCU_READ_LOCK_GUARD(); + + if (unlikely(!vq->vring.desc)) { + goto err; + } + + caches = vring_get_region_caches(vq); + if (!caches) { + goto err; + } + + desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ? + sizeof(VRingPackedDesc) : sizeof(VRingDesc); + if (caches->desc.len < vq->vring.num * desc_size) { + virtio_error(vq->vdev, "Cannot map descriptor ring"); + goto err; + } + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes, + max_in_bytes, max_out_bytes, + caches); + } else { + virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes, + max_in_bytes, max_out_bytes, + caches); + } + + return; +err: + if (in_bytes) { + *in_bytes = 0; + } + if (out_bytes) { + *out_bytes = 0; + } +} + +int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes, + unsigned int out_bytes) +{ + unsigned int in_total, out_total; + + virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes); + return in_bytes <= in_total && out_bytes <= out_total; +} + +static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg, + hwaddr *addr, struct iovec *iov, + unsigned int max_num_sg, bool is_write, + hwaddr pa, size_t sz) +{ + bool ok = false; + unsigned num_sg = *p_num_sg; + assert(num_sg <= max_num_sg); + + if (!sz) { + virtio_error(vdev, "virtio: zero sized buffers are not allowed"); + goto out; + } + + while (sz) { + hwaddr len = sz; + + if (num_sg == max_num_sg) { + virtio_error(vdev, "virtio: too many write descriptors in " + "indirect table"); + goto out; + } + + iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len, + is_write ? + DMA_DIRECTION_FROM_DEVICE : + DMA_DIRECTION_TO_DEVICE); + if (!iov[num_sg].iov_base) { + virtio_error(vdev, "virtio: bogus descriptor or out of resources"); + goto out; + } + + iov[num_sg].iov_len = len; + addr[num_sg] = pa; + + sz -= len; + pa += len; + num_sg++; + } + ok = true; + +out: + *p_num_sg = num_sg; + return ok; +} + +/* Only used by error code paths before we have a VirtQueueElement (therefore + * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to + * yet. + */ +static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num, + struct iovec *iov) +{ + unsigned int i; + + for (i = 0; i < out_num + in_num; i++) { + int is_write = i >= out_num; + + cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0); + iov++; + } +} + +static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg, + hwaddr *addr, unsigned int num_sg, + bool is_write) +{ + unsigned int i; + hwaddr len; + + for (i = 0; i < num_sg; i++) { + len = sg[i].iov_len; + sg[i].iov_base = dma_memory_map(vdev->dma_as, + addr[i], &len, is_write ? + DMA_DIRECTION_FROM_DEVICE : + DMA_DIRECTION_TO_DEVICE); + if (!sg[i].iov_base) { + error_report("virtio: error trying to map MMIO memory"); + exit(1); + } + if (len != sg[i].iov_len) { + error_report("virtio: unexpected memory split"); + exit(1); + } + } +} + +void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem) +{ + virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true); + virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num, + false); +} + +static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num) +{ + VirtQueueElement *elem; + size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0])); + size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]); + size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]); + size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0])); + size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); + size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); + + assert(sz >= sizeof(VirtQueueElement)); + elem = g_malloc(out_sg_end); + trace_virtqueue_alloc_element(elem, sz, in_num, out_num); + elem->out_num = out_num; + elem->in_num = in_num; + elem->in_addr = (void *)elem + in_addr_ofs; + elem->out_addr = (void *)elem + out_addr_ofs; + elem->in_sg = (void *)elem + in_sg_ofs; + elem->out_sg = (void *)elem + out_sg_ofs; + return elem; +} + +static void *virtqueue_split_pop(VirtQueue *vq, size_t sz) +{ + unsigned int i, head, max; + VRingMemoryRegionCaches *caches; + MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; + MemoryRegionCache *desc_cache; + int64_t len; + VirtIODevice *vdev = vq->vdev; + VirtQueueElement *elem = NULL; + unsigned out_num, in_num, elem_entries; + hwaddr addr[VIRTQUEUE_MAX_SIZE]; + struct iovec iov[VIRTQUEUE_MAX_SIZE]; + VRingDesc desc; + int rc; + + RCU_READ_LOCK_GUARD(); + if (virtio_queue_empty_rcu(vq)) { + goto done; + } + /* Needed after virtio_queue_empty(), see comment in + * virtqueue_num_heads(). */ + smp_rmb(); + + /* When we start there are none of either input nor output. */ + out_num = in_num = elem_entries = 0; + + max = vq->vring.num; + + if (vq->inuse >= vq->vring.num) { + virtio_error(vdev, "Virtqueue size exceeded"); + goto done; + } + + if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) { + goto done; + } + + if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { + vring_set_avail_event(vq, vq->last_avail_idx); + } + + i = head; + + caches = vring_get_region_caches(vq); + if (!caches) { + virtio_error(vdev, "Region caches not initialized"); + goto done; + } + + if (caches->desc.len < max * sizeof(VRingDesc)) { + virtio_error(vdev, "Cannot map descriptor ring"); + goto done; + } + + desc_cache = &caches->desc; + vring_split_desc_read(vdev, &desc, desc_cache, i); + if (desc.flags & VRING_DESC_F_INDIRECT) { + if (!desc.len || (desc.len % sizeof(VRingDesc))) { + virtio_error(vdev, "Invalid size for indirect buffer table"); + goto done; + } + + /* loop over the indirect descriptor table */ + len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, + desc.addr, desc.len, false); + desc_cache = &indirect_desc_cache; + if (len < desc.len) { + virtio_error(vdev, "Cannot map indirect buffer"); + goto done; + } + + max = desc.len / sizeof(VRingDesc); + i = 0; + vring_split_desc_read(vdev, &desc, desc_cache, i); + } + + /* Collect all the descriptors */ + do { + bool map_ok; + + if (desc.flags & VRING_DESC_F_WRITE) { + map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num, + iov + out_num, + VIRTQUEUE_MAX_SIZE - out_num, true, + desc.addr, desc.len); + } else { + if (in_num) { + virtio_error(vdev, "Incorrect order for descriptors"); + goto err_undo_map; + } + map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov, + VIRTQUEUE_MAX_SIZE, false, + desc.addr, desc.len); + } + if (!map_ok) { + goto err_undo_map; + } + + /* If we've got too many, that implies a descriptor loop. */ + if (++elem_entries > max) { + virtio_error(vdev, "Looped descriptor"); + goto err_undo_map; + } + + rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i); + } while (rc == VIRTQUEUE_READ_DESC_MORE); + + if (rc == VIRTQUEUE_READ_DESC_ERROR) { + goto err_undo_map; + } + + /* Now copy what we have collected and mapped */ + elem = virtqueue_alloc_element(sz, out_num, in_num); + elem->index = head; + elem->ndescs = 1; + for (i = 0; i < out_num; i++) { + elem->out_addr[i] = addr[i]; + elem->out_sg[i] = iov[i]; + } + for (i = 0; i < in_num; i++) { + elem->in_addr[i] = addr[out_num + i]; + elem->in_sg[i] = iov[out_num + i]; + } + + vq->inuse++; + + trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); +done: + address_space_cache_destroy(&indirect_desc_cache); + + return elem; + +err_undo_map: + virtqueue_undo_map_desc(out_num, in_num, iov); + goto done; +} + +static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz) +{ + unsigned int i, max; + VRingMemoryRegionCaches *caches; + MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; + MemoryRegionCache *desc_cache; + int64_t len; + VirtIODevice *vdev = vq->vdev; + VirtQueueElement *elem = NULL; + unsigned out_num, in_num, elem_entries; + hwaddr addr[VIRTQUEUE_MAX_SIZE]; + struct iovec iov[VIRTQUEUE_MAX_SIZE]; + VRingPackedDesc desc; + uint16_t id; + int rc; + + RCU_READ_LOCK_GUARD(); + if (virtio_queue_packed_empty_rcu(vq)) { + goto done; + } + + /* When we start there are none of either input nor output. */ + out_num = in_num = elem_entries = 0; + + max = vq->vring.num; + + if (vq->inuse >= vq->vring.num) { + virtio_error(vdev, "Virtqueue size exceeded"); + goto done; + } + + i = vq->last_avail_idx; + + caches = vring_get_region_caches(vq); + if (!caches) { + virtio_error(vdev, "Region caches not initialized"); + goto done; + } + + if (caches->desc.len < max * sizeof(VRingDesc)) { + virtio_error(vdev, "Cannot map descriptor ring"); + goto done; + } + + desc_cache = &caches->desc; + vring_packed_desc_read(vdev, &desc, desc_cache, i, true); + id = desc.id; + if (desc.flags & VRING_DESC_F_INDIRECT) { + if (desc.len % sizeof(VRingPackedDesc)) { + virtio_error(vdev, "Invalid size for indirect buffer table"); + goto done; + } + + /* loop over the indirect descriptor table */ + len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as, + desc.addr, desc.len, false); + desc_cache = &indirect_desc_cache; + if (len < desc.len) { + virtio_error(vdev, "Cannot map indirect buffer"); + goto done; + } + + max = desc.len / sizeof(VRingPackedDesc); + i = 0; + vring_packed_desc_read(vdev, &desc, desc_cache, i, false); + } + + /* Collect all the descriptors */ + do { + bool map_ok; + + if (desc.flags & VRING_DESC_F_WRITE) { + map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num, + iov + out_num, + VIRTQUEUE_MAX_SIZE - out_num, true, + desc.addr, desc.len); + } else { + if (in_num) { + virtio_error(vdev, "Incorrect order for descriptors"); + goto err_undo_map; + } + map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov, + VIRTQUEUE_MAX_SIZE, false, + desc.addr, desc.len); + } + if (!map_ok) { + goto err_undo_map; + } + + /* If we've got too many, that implies a descriptor loop. */ + if (++elem_entries > max) { + virtio_error(vdev, "Looped descriptor"); + goto err_undo_map; + } + + rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i, + desc_cache == + &indirect_desc_cache); + } while (rc == VIRTQUEUE_READ_DESC_MORE); + + /* Now copy what we have collected and mapped */ + elem = virtqueue_alloc_element(sz, out_num, in_num); + for (i = 0; i < out_num; i++) { + elem->out_addr[i] = addr[i]; + elem->out_sg[i] = iov[i]; + } + for (i = 0; i < in_num; i++) { + elem->in_addr[i] = addr[out_num + i]; + elem->in_sg[i] = iov[out_num + i]; + } + + elem->index = id; + elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries; + vq->last_avail_idx += elem->ndescs; + vq->inuse += elem->ndescs; + + if (vq->last_avail_idx >= vq->vring.num) { + vq->last_avail_idx -= vq->vring.num; + vq->last_avail_wrap_counter ^= 1; + } + + vq->shadow_avail_idx = vq->last_avail_idx; + vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter; + + trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num); +done: + address_space_cache_destroy(&indirect_desc_cache); + + return elem; + +err_undo_map: + virtqueue_undo_map_desc(out_num, in_num, iov); + goto done; +} + +void *virtqueue_pop(VirtQueue *vq, size_t sz) +{ + if (virtio_device_disabled(vq->vdev)) { + return NULL; + } + + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { + return virtqueue_packed_pop(vq, sz); + } else { + return virtqueue_split_pop(vq, sz); + } +} + +static unsigned int virtqueue_packed_drop_all(VirtQueue *vq) +{ + VRingMemoryRegionCaches *caches; + MemoryRegionCache *desc_cache; + unsigned int dropped = 0; + VirtQueueElement elem = {}; + VirtIODevice *vdev = vq->vdev; + VRingPackedDesc desc; + + RCU_READ_LOCK_GUARD(); + + caches = vring_get_region_caches(vq); + if (!caches) { + return 0; + } + + desc_cache = &caches->desc; + + virtio_queue_set_notification(vq, 0); + + while (vq->inuse < vq->vring.num) { + unsigned int idx = vq->last_avail_idx; + /* + * works similar to virtqueue_pop but does not map buffers + * and does not allocate any memory. + */ + vring_packed_desc_read(vdev, &desc, desc_cache, + vq->last_avail_idx , true); + if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) { + break; + } + elem.index = desc.id; + elem.ndescs = 1; + while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache, + vq->vring.num, &idx, false)) { + ++elem.ndescs; + } + /* + * immediately push the element, nothing to unmap + * as both in_num and out_num are set to 0. + */ + virtqueue_push(vq, &elem, 0); + dropped++; + vq->last_avail_idx += elem.ndescs; + if (vq->last_avail_idx >= vq->vring.num) { + vq->last_avail_idx -= vq->vring.num; + vq->last_avail_wrap_counter ^= 1; + } + } + + return dropped; +} + +static unsigned int virtqueue_split_drop_all(VirtQueue *vq) +{ + unsigned int dropped = 0; + VirtQueueElement elem = {}; + VirtIODevice *vdev = vq->vdev; + bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + + while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) { + /* works similar to virtqueue_pop but does not map buffers + * and does not allocate any memory */ + smp_rmb(); + if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) { + break; + } + vq->inuse++; + vq->last_avail_idx++; + if (fEventIdx) { + vring_set_avail_event(vq, vq->last_avail_idx); + } + /* immediately push the element, nothing to unmap + * as both in_num and out_num are set to 0 */ + virtqueue_push(vq, &elem, 0); + dropped++; + } + + return dropped; +} + +/* virtqueue_drop_all: + * @vq: The #VirtQueue + * Drops all queued buffers and indicates them to the guest + * as if they are done. Useful when buffers can not be + * processed but must be returned to the guest. + */ +unsigned int virtqueue_drop_all(VirtQueue *vq) +{ + struct VirtIODevice *vdev = vq->vdev; + + if (virtio_device_disabled(vq->vdev)) { + return 0; + } + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return virtqueue_packed_drop_all(vq); + } else { + return virtqueue_split_drop_all(vq); + } +} + +/* Reading and writing a structure directly to QEMUFile is *awful*, but + * it is what QEMU has always done by mistake. We can change it sooner + * or later by bumping the version number of the affected vm states. + * In the meanwhile, since the in-memory layout of VirtQueueElement + * has changed, we need to marshal to and from the layout that was + * used before the change. + */ +typedef struct VirtQueueElementOld { + unsigned int index; + unsigned int out_num; + unsigned int in_num; + hwaddr in_addr[VIRTQUEUE_MAX_SIZE]; + hwaddr out_addr[VIRTQUEUE_MAX_SIZE]; + struct iovec in_sg[VIRTQUEUE_MAX_SIZE]; + struct iovec out_sg[VIRTQUEUE_MAX_SIZE]; +} VirtQueueElementOld; + +void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz) +{ + VirtQueueElement *elem; + VirtQueueElementOld data; + int i; + + qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld)); + + /* TODO: teach all callers that this can fail, and return failure instead + * of asserting here. + * This is just one thing (there are probably more) that must be + * fixed before we can allow NDEBUG compilation. + */ + assert(ARRAY_SIZE(data.in_addr) >= data.in_num); + assert(ARRAY_SIZE(data.out_addr) >= data.out_num); + + elem = virtqueue_alloc_element(sz, data.out_num, data.in_num); + elem->index = data.index; + + for (i = 0; i < elem->in_num; i++) { + elem->in_addr[i] = data.in_addr[i]; + } + + for (i = 0; i < elem->out_num; i++) { + elem->out_addr[i] = data.out_addr[i]; + } + + for (i = 0; i < elem->in_num; i++) { + /* Base is overwritten by virtqueue_map. */ + elem->in_sg[i].iov_base = 0; + elem->in_sg[i].iov_len = data.in_sg[i].iov_len; + } + + for (i = 0; i < elem->out_num; i++) { + /* Base is overwritten by virtqueue_map. */ + elem->out_sg[i].iov_base = 0; + elem->out_sg[i].iov_len = data.out_sg[i].iov_len; + } + + if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + qemu_get_be32s(f, &elem->ndescs); + } + + virtqueue_map(vdev, elem); + return elem; +} + +void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, + VirtQueueElement *elem) +{ + VirtQueueElementOld data; + int i; + + memset(&data, 0, sizeof(data)); + data.index = elem->index; + data.in_num = elem->in_num; + data.out_num = elem->out_num; + + for (i = 0; i < elem->in_num; i++) { + data.in_addr[i] = elem->in_addr[i]; + } + + for (i = 0; i < elem->out_num; i++) { + data.out_addr[i] = elem->out_addr[i]; + } + + for (i = 0; i < elem->in_num; i++) { + /* Base is overwritten by virtqueue_map when loading. Do not + * save it, as it would leak the QEMU address space layout. */ + data.in_sg[i].iov_len = elem->in_sg[i].iov_len; + } + + for (i = 0; i < elem->out_num; i++) { + /* Do not save iov_base as above. */ + data.out_sg[i].iov_len = elem->out_sg[i].iov_len; + } + + if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + qemu_put_be32s(f, &elem->ndescs); + } + + qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld)); +} + +/* virtio device */ +static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + if (virtio_device_disabled(vdev)) { + return; + } + + if (k->notify) { + k->notify(qbus->parent, vector); + } +} + +void virtio_update_irq(VirtIODevice *vdev) +{ + virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); +} + +static int virtio_validate_features(VirtIODevice *vdev) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + + if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) && + !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { + return -EFAULT; + } + + if (k->validate_features) { + return k->validate_features(vdev); + } else { + return 0; + } +} + +int virtio_set_status(VirtIODevice *vdev, uint8_t val) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + trace_virtio_set_status(vdev, val); + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) && + val & VIRTIO_CONFIG_S_FEATURES_OK) { + int ret = virtio_validate_features(vdev); + + if (ret) { + return ret; + } + } + } + + if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) != + (val & VIRTIO_CONFIG_S_DRIVER_OK)) { + virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK); + } + + if (k->set_status) { + k->set_status(vdev, val); + } + vdev->status = val; + + return 0; +} + +static enum virtio_device_endian virtio_default_endian(void) +{ + if (target_words_bigendian()) { + return VIRTIO_DEVICE_ENDIAN_BIG; + } else { + return VIRTIO_DEVICE_ENDIAN_LITTLE; + } +} + +static enum virtio_device_endian virtio_current_cpu_endian(void) +{ + if (cpu_virtio_is_big_endian(current_cpu)) { + return VIRTIO_DEVICE_ENDIAN_BIG; + } else { + return VIRTIO_DEVICE_ENDIAN_LITTLE; + } +} + +void virtio_reset(void *opaque) +{ + VirtIODevice *vdev = opaque; + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + int i; + + virtio_set_status(vdev, 0); + if (current_cpu) { + /* Guest initiated reset */ + vdev->device_endian = virtio_current_cpu_endian(); + } else { + /* System reset */ + vdev->device_endian = virtio_default_endian(); + } + + if (k->reset) { + k->reset(vdev); + } + + vdev->start_on_kick = false; + vdev->started = false; + vdev->broken = false; + vdev->guest_features = 0; + vdev->queue_sel = 0; + vdev->status = 0; + vdev->disabled = false; + qatomic_set(&vdev->isr, 0); + vdev->config_vector = VIRTIO_NO_VECTOR; + virtio_notify_vector(vdev, vdev->config_vector); + + for(i = 0; i < VIRTIO_QUEUE_MAX; i++) { + vdev->vq[i].vring.desc = 0; + vdev->vq[i].vring.avail = 0; + vdev->vq[i].vring.used = 0; + vdev->vq[i].last_avail_idx = 0; + vdev->vq[i].shadow_avail_idx = 0; + vdev->vq[i].used_idx = 0; + vdev->vq[i].last_avail_wrap_counter = true; + vdev->vq[i].shadow_avail_wrap_counter = true; + vdev->vq[i].used_wrap_counter = true; + virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR); + vdev->vq[i].signalled_used = 0; + vdev->vq[i].signalled_used_valid = false; + vdev->vq[i].notification = true; + vdev->vq[i].vring.num = vdev->vq[i].vring.num_default; + vdev->vq[i].inuse = 0; + virtio_virtqueue_reset_region_cache(&vdev->vq[i]); + } +} + +uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint8_t val; + + if (addr + sizeof(val) > vdev->config_len) { + return (uint32_t)-1; + } + + k->get_config(vdev, vdev->config); + + val = ldub_p(vdev->config + addr); + return val; +} + +uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint16_t val; + + if (addr + sizeof(val) > vdev->config_len) { + return (uint32_t)-1; + } + + k->get_config(vdev, vdev->config); + + val = lduw_p(vdev->config + addr); + return val; +} + +uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint32_t val; + + if (addr + sizeof(val) > vdev->config_len) { + return (uint32_t)-1; + } + + k->get_config(vdev, vdev->config); + + val = ldl_p(vdev->config + addr); + return val; +} + +void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint8_t val = data; + + if (addr + sizeof(val) > vdev->config_len) { + return; + } + + stb_p(vdev->config + addr, val); + + if (k->set_config) { + k->set_config(vdev, vdev->config); + } +} + +void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint16_t val = data; + + if (addr + sizeof(val) > vdev->config_len) { + return; + } + + stw_p(vdev->config + addr, val); + + if (k->set_config) { + k->set_config(vdev, vdev->config); + } +} + +void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint32_t val = data; + + if (addr + sizeof(val) > vdev->config_len) { + return; + } + + stl_p(vdev->config + addr, val); + + if (k->set_config) { + k->set_config(vdev, vdev->config); + } +} + +uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint8_t val; + + if (addr + sizeof(val) > vdev->config_len) { + return (uint32_t)-1; + } + + k->get_config(vdev, vdev->config); + + val = ldub_p(vdev->config + addr); + return val; +} + +uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint16_t val; + + if (addr + sizeof(val) > vdev->config_len) { + return (uint32_t)-1; + } + + k->get_config(vdev, vdev->config); + + val = lduw_le_p(vdev->config + addr); + return val; +} + +uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint32_t val; + + if (addr + sizeof(val) > vdev->config_len) { + return (uint32_t)-1; + } + + k->get_config(vdev, vdev->config); + + val = ldl_le_p(vdev->config + addr); + return val; +} + +void virtio_config_modern_writeb(VirtIODevice *vdev, + uint32_t addr, uint32_t data) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint8_t val = data; + + if (addr + sizeof(val) > vdev->config_len) { + return; + } + + stb_p(vdev->config + addr, val); + + if (k->set_config) { + k->set_config(vdev, vdev->config); + } +} + +void virtio_config_modern_writew(VirtIODevice *vdev, + uint32_t addr, uint32_t data) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint16_t val = data; + + if (addr + sizeof(val) > vdev->config_len) { + return; + } + + stw_le_p(vdev->config + addr, val); + + if (k->set_config) { + k->set_config(vdev, vdev->config); + } +} + +void virtio_config_modern_writel(VirtIODevice *vdev, + uint32_t addr, uint32_t data) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint32_t val = data; + + if (addr + sizeof(val) > vdev->config_len) { + return; + } + + stl_le_p(vdev->config + addr, val); + + if (k->set_config) { + k->set_config(vdev, vdev->config); + } +} + +void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr) +{ + if (!vdev->vq[n].vring.num) { + return; + } + vdev->vq[n].vring.desc = addr; + virtio_queue_update_rings(vdev, n); +} + +hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n) +{ + return vdev->vq[n].vring.desc; +} + +void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, + hwaddr avail, hwaddr used) +{ + if (!vdev->vq[n].vring.num) { + return; + } + vdev->vq[n].vring.desc = desc; + vdev->vq[n].vring.avail = avail; + vdev->vq[n].vring.used = used; + virtio_init_region_cache(vdev, n); +} + +void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) +{ + /* Don't allow guest to flip queue between existent and + * nonexistent states, or to set it to an invalid size. + */ + if (!!num != !!vdev->vq[n].vring.num || + num > VIRTQUEUE_MAX_SIZE || + num < 0) { + return; + } + vdev->vq[n].vring.num = num; +} + +VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector) +{ + return QLIST_FIRST(&vdev->vector_queues[vector]); +} + +VirtQueue *virtio_vector_next_queue(VirtQueue *vq) +{ + return QLIST_NEXT(vq, node); +} + +int virtio_queue_get_num(VirtIODevice *vdev, int n) +{ + return vdev->vq[n].vring.num; +} + +int virtio_queue_get_max_num(VirtIODevice *vdev, int n) +{ + return vdev->vq[n].vring.num_default; +} + +int virtio_get_num_queues(VirtIODevice *vdev) +{ + int i; + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + if (!virtio_queue_get_num(vdev, i)) { + break; + } + } + + return i; +} + +void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + /* virtio-1 compliant devices cannot change the alignment */ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + error_report("tried to modify queue alignment for virtio-1 device"); + return; + } + /* Check that the transport told us it was going to do this + * (so a buggy transport will immediately assert rather than + * silently failing to migrate this state) + */ + assert(k->has_variable_vring_alignment); + + if (align) { + vdev->vq[n].vring.align = align; + virtio_queue_update_rings(vdev, n); + } +} + +static bool virtio_queue_notify_aio_vq(VirtQueue *vq) +{ + bool ret = false; + + if (vq->vring.desc && vq->handle_aio_output) { + VirtIODevice *vdev = vq->vdev; + + trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); + ret = vq->handle_aio_output(vdev, vq); + + if (unlikely(vdev->start_on_kick)) { + virtio_set_started(vdev, true); + } + } + + return ret; +} + +static void virtio_queue_notify_vq(VirtQueue *vq) +{ + if (vq->vring.desc && vq->handle_output) { + VirtIODevice *vdev = vq->vdev; + + if (unlikely(vdev->broken)) { + return; + } + + trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); + vq->handle_output(vdev, vq); + + if (unlikely(vdev->start_on_kick)) { + virtio_set_started(vdev, true); + } + } +} + +void virtio_queue_notify(VirtIODevice *vdev, int n) +{ + VirtQueue *vq = &vdev->vq[n]; + + if (unlikely(!vq->vring.desc || vdev->broken)) { + return; + } + + trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); + if (vq->host_notifier_enabled) { + event_notifier_set(&vq->host_notifier); + } else if (vq->handle_output) { + vq->handle_output(vdev, vq); + + if (unlikely(vdev->start_on_kick)) { + virtio_set_started(vdev, true); + } + } +} + +uint16_t virtio_queue_vector(VirtIODevice *vdev, int n) +{ + return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector : + VIRTIO_NO_VECTOR; +} + +void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector) +{ + VirtQueue *vq = &vdev->vq[n]; + + if (n < VIRTIO_QUEUE_MAX) { + if (vdev->vector_queues && + vdev->vq[n].vector != VIRTIO_NO_VECTOR) { + QLIST_REMOVE(vq, node); + } + vdev->vq[n].vector = vector; + if (vdev->vector_queues && + vector != VIRTIO_NO_VECTOR) { + QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node); + } + } +} + +VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, + VirtIOHandleOutput handle_output) +{ + int i; + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + if (vdev->vq[i].vring.num == 0) + break; + } + + if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) + abort(); + + vdev->vq[i].vring.num = queue_size; + vdev->vq[i].vring.num_default = queue_size; + vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; + vdev->vq[i].handle_output = handle_output; + vdev->vq[i].handle_aio_output = NULL; + vdev->vq[i].used_elems = g_malloc0(sizeof(VirtQueueElement) * + queue_size); + + return &vdev->vq[i]; +} + +void virtio_delete_queue(VirtQueue *vq) +{ + vq->vring.num = 0; + vq->vring.num_default = 0; + vq->handle_output = NULL; + vq->handle_aio_output = NULL; + g_free(vq->used_elems); + vq->used_elems = NULL; + virtio_virtqueue_reset_region_cache(vq); +} + +void virtio_del_queue(VirtIODevice *vdev, int n) +{ + if (n < 0 || n >= VIRTIO_QUEUE_MAX) { + abort(); + } + + virtio_delete_queue(&vdev->vq[n]); +} + +static void virtio_set_isr(VirtIODevice *vdev, int value) +{ + uint8_t old = qatomic_read(&vdev->isr); + + /* Do not write ISR if it does not change, so that its cacheline remains + * shared in the common case where the guest does not read it. + */ + if ((old & value) != value) { + qatomic_or(&vdev->isr, value); + } +} + +/* Called within rcu_read_lock(). */ +static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq) +{ + uint16_t old, new; + bool v; + /* We need to expose used array entries before checking used event. */ + smp_mb(); + /* Always notify when queue is empty (when feature acknowledge) */ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) && + !vq->inuse && virtio_queue_empty(vq)) { + return true; + } + + if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { + return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); + } + + v = vq->signalled_used_valid; + vq->signalled_used_valid = true; + old = vq->signalled_used; + new = vq->signalled_used = vq->used_idx; + return !v || vring_need_event(vring_get_used_event(vq), new, old); +} + +static bool vring_packed_need_event(VirtQueue *vq, bool wrap, + uint16_t off_wrap, uint16_t new, + uint16_t old) +{ + int off = off_wrap & ~(1 << 15); + + if (wrap != off_wrap >> 15) { + off -= vq->vring.num; + } + + return vring_need_event(off, new, old); +} + +/* Called within rcu_read_lock(). */ +static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq) +{ + VRingPackedDescEvent e; + uint16_t old, new; + bool v; + VRingMemoryRegionCaches *caches; + + caches = vring_get_region_caches(vq); + if (!caches) { + return false; + } + + vring_packed_event_read(vdev, &caches->avail, &e); + + old = vq->signalled_used; + new = vq->signalled_used = vq->used_idx; + v = vq->signalled_used_valid; + vq->signalled_used_valid = true; + + if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) { + return false; + } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) { + return true; + } + + return !v || vring_packed_need_event(vq, vq->used_wrap_counter, + e.off_wrap, new, old); +} + +/* Called within rcu_read_lock(). */ +static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return virtio_packed_should_notify(vdev, vq); + } else { + return virtio_split_should_notify(vdev, vq); + } +} + +void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) +{ + WITH_RCU_READ_LOCK_GUARD() { + if (!virtio_should_notify(vdev, vq)) { + return; + } + } + + trace_virtio_notify_irqfd(vdev, vq); + + /* + * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but + * windows drivers included in virtio-win 1.8.0 (circa 2015) are + * incorrectly polling this bit during crashdump and hibernation + * in MSI mode, causing a hang if this bit is never updated. + * Recent releases of Windows do not really shut down, but rather + * log out and hibernate to make the next startup faster. Hence, + * this manifested as a more serious hang during shutdown with + * + * Next driver release from 2016 fixed this problem, so working around it + * is not a must, but it's easy to do so let's do it here. + * + * Note: it's safe to update ISR from any thread as it was switched + * to an atomic operation. + */ + virtio_set_isr(vq->vdev, 0x1); + event_notifier_set(&vq->guest_notifier); +} + +static void virtio_irq(VirtQueue *vq) +{ + virtio_set_isr(vq->vdev, 0x1); + virtio_notify_vector(vq->vdev, vq->vector); +} + +void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) +{ + WITH_RCU_READ_LOCK_GUARD() { + if (!virtio_should_notify(vdev, vq)) { + return; + } + } + + trace_virtio_notify(vdev, vq); + virtio_irq(vq); +} + +void virtio_notify_config(VirtIODevice *vdev) +{ + if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) + return; + + virtio_set_isr(vdev, 0x3); + vdev->generation++; + virtio_notify_vector(vdev, vdev->config_vector); +} + +static bool virtio_device_endian_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + + assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN); + if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + return vdev->device_endian != virtio_default_endian(); + } + /* Devices conforming to VIRTIO 1.0 or later are always LE. */ + return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE; +} + +static bool virtio_64bit_features_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + + return (vdev->host_features >> 32) != 0; +} + +static bool virtio_virtqueue_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + + return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1); +} + +static bool virtio_packed_virtqueue_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + + return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED); +} + +static bool virtio_ringsize_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + int i; + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) { + return true; + } + } + return false; +} + +static bool virtio_extra_state_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + return k->has_extra_state && + k->has_extra_state(qbus->parent); +} + +static bool virtio_broken_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + + return vdev->broken; +} + +static bool virtio_started_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + + return vdev->started; +} + +static bool virtio_disabled_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + + return vdev->disabled; +} + +static const VMStateDescription vmstate_virtqueue = { + .name = "virtqueue_state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT64(vring.avail, struct VirtQueue), + VMSTATE_UINT64(vring.used, struct VirtQueue), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_packed_virtqueue = { + .name = "packed_virtqueue_state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(last_avail_idx, struct VirtQueue), + VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue), + VMSTATE_UINT16(used_idx, struct VirtQueue), + VMSTATE_BOOL(used_wrap_counter, struct VirtQueue), + VMSTATE_UINT32(inuse, struct VirtQueue), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_virtqueues = { + .name = "virtio/virtqueues", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_virtqueue_needed, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, + VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_packed_virtqueues = { + .name = "virtio/packed_virtqueues", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_packed_virtqueue_needed, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, + VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ringsize = { + .name = "ringsize_state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(vring.num_default, struct VirtQueue), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_ringsize = { + .name = "virtio/ringsize", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_ringsize_needed, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, + VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue), + VMSTATE_END_OF_LIST() + } +}; + +static int get_extra_state(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + VirtIODevice *vdev = pv; + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + if (!k->load_extra_state) { + return -1; + } else { + return k->load_extra_state(qbus->parent, f); + } +} + +static int put_extra_state(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + VirtIODevice *vdev = pv; + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + k->save_extra_state(qbus->parent, f); + return 0; +} + +static const VMStateInfo vmstate_info_extra_state = { + .name = "virtqueue_extra_state", + .get = get_extra_state, + .put = put_extra_state, +}; + +static const VMStateDescription vmstate_virtio_extra_state = { + .name = "virtio/extra_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_extra_state_needed, + .fields = (VMStateField[]) { + { + .name = "extra_state", + .version_id = 0, + .field_exists = NULL, + .size = 0, + .info = &vmstate_info_extra_state, + .flags = VMS_SINGLE, + .offset = 0, + }, + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_device_endian = { + .name = "virtio/device_endian", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_device_endian_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(device_endian, VirtIODevice), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_64bit_features = { + .name = "virtio/64bit_features", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_64bit_features_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(guest_features, VirtIODevice), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_broken = { + .name = "virtio/broken", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_broken_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(broken, VirtIODevice), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_started = { + .name = "virtio/started", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_started_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(started, VirtIODevice), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_disabled = { + .name = "virtio/disabled", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_disabled_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(disabled, VirtIODevice), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio = { + .name = "virtio", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_virtio_device_endian, + &vmstate_virtio_64bit_features, + &vmstate_virtio_virtqueues, + &vmstate_virtio_ringsize, + &vmstate_virtio_broken, + &vmstate_virtio_extra_state, + &vmstate_virtio_started, + &vmstate_virtio_packed_virtqueues, + &vmstate_virtio_disabled, + NULL + } +}; + +int virtio_save(VirtIODevice *vdev, QEMUFile *f) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff); + int i; + + if (k->save_config) { + k->save_config(qbus->parent, f); + } + + qemu_put_8s(f, &vdev->status); + qemu_put_8s(f, &vdev->isr); + qemu_put_be16s(f, &vdev->queue_sel); + qemu_put_be32s(f, &guest_features_lo); + qemu_put_be32(f, vdev->config_len); + qemu_put_buffer(f, vdev->config, vdev->config_len); + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + if (vdev->vq[i].vring.num == 0) + break; + } + + qemu_put_be32(f, i); + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + if (vdev->vq[i].vring.num == 0) + break; + + qemu_put_be32(f, vdev->vq[i].vring.num); + if (k->has_variable_vring_alignment) { + qemu_put_be32(f, vdev->vq[i].vring.align); + } + /* + * Save desc now, the rest of the ring addresses are saved in + * subsections for VIRTIO-1 devices. + */ + qemu_put_be64(f, vdev->vq[i].vring.desc); + qemu_put_be16s(f, &vdev->vq[i].last_avail_idx); + if (k->save_queue) { + k->save_queue(qbus->parent, i, f); + } + } + + if (vdc->save != NULL) { + vdc->save(vdev, f); + } + + if (vdc->vmsd) { + int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL); + if (ret) { + return ret; + } + } + + /* Subsections */ + return vmstate_save_state(f, &vmstate_virtio, vdev, NULL); +} + +/* A wrapper for use as a VMState .put function */ +static int virtio_device_put(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + return virtio_save(VIRTIO_DEVICE(opaque), f); +} + +/* A wrapper for use as a VMState .get function */ +static int virtio_device_get(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(opaque); + DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev)); + + return virtio_load(vdev, f, dc->vmsd->version_id); +} + +const VMStateInfo virtio_vmstate_info = { + .name = "virtio", + .get = virtio_device_get, + .put = virtio_device_put, +}; + +static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val) +{ + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + bool bad = (val & ~(vdev->host_features)) != 0; + + val &= vdev->host_features; + if (k->set_features) { + k->set_features(vdev, val); + } + vdev->guest_features = val; + return bad ? -1 : 0; +} + +int virtio_set_features(VirtIODevice *vdev, uint64_t val) +{ + int ret; + /* + * The driver must not attempt to set features after feature negotiation + * has finished. + */ + if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) { + return -EINVAL; + } + ret = virtio_set_features_nocheck(vdev, val); + if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { + /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */ + int i; + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + if (vdev->vq[i].vring.num != 0) { + virtio_init_region_cache(vdev, i); + } + } + } + if (!ret) { + if (!virtio_device_started(vdev, vdev->status) && + !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + vdev->start_on_kick = true; + } + } + return ret; +} + +size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes, + uint64_t host_features) +{ + size_t config_size = 0; + int i; + + for (i = 0; feature_sizes[i].flags != 0; i++) { + if (host_features & feature_sizes[i].flags) { + config_size = MAX(feature_sizes[i].end, config_size); + } + } + + return config_size; +} + +int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) +{ + int i, ret; + int32_t config_len; + uint32_t num; + uint32_t features; + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + + /* + * We poison the endianness to ensure it does not get used before + * subsections have been loaded. + */ + vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN; + + if (k->load_config) { + ret = k->load_config(qbus->parent, f); + if (ret) + return ret; + } + + qemu_get_8s(f, &vdev->status); + qemu_get_8s(f, &vdev->isr); + qemu_get_be16s(f, &vdev->queue_sel); + if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) { + return -1; + } + qemu_get_be32s(f, &features); + + /* + * Temporarily set guest_features low bits - needed by + * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS + * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ. + * + * Note: devices should always test host features in future - don't create + * new dependencies like this. + */ + vdev->guest_features = features; + + config_len = qemu_get_be32(f); + + /* + * There are cases where the incoming config can be bigger or smaller + * than what we have; so load what we have space for, and skip + * any excess that's in the stream. + */ + qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len)); + + while (config_len > vdev->config_len) { + qemu_get_byte(f); + config_len--; + } + + num = qemu_get_be32(f); + + if (num > VIRTIO_QUEUE_MAX) { + error_report("Invalid number of virtqueues: 0x%x", num); + return -1; + } + + for (i = 0; i < num; i++) { + vdev->vq[i].vring.num = qemu_get_be32(f); + if (k->has_variable_vring_alignment) { + vdev->vq[i].vring.align = qemu_get_be32(f); + } + vdev->vq[i].vring.desc = qemu_get_be64(f); + qemu_get_be16s(f, &vdev->vq[i].last_avail_idx); + vdev->vq[i].signalled_used_valid = false; + vdev->vq[i].notification = true; + + if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) { + error_report("VQ %d address 0x0 " + "inconsistent with Host index 0x%x", + i, vdev->vq[i].last_avail_idx); + return -1; + } + if (k->load_queue) { + ret = k->load_queue(qbus->parent, i, f); + if (ret) + return ret; + } + } + + virtio_notify_vector(vdev, VIRTIO_NO_VECTOR); + + if (vdc->load != NULL) { + ret = vdc->load(vdev, f, version_id); + if (ret) { + return ret; + } + } + + if (vdc->vmsd) { + ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id); + if (ret) { + return ret; + } + } + + /* Subsections */ + ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); + if (ret) { + return ret; + } + + if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) { + vdev->device_endian = virtio_default_endian(); + } + + if (virtio_64bit_features_needed(vdev)) { + /* + * Subsection load filled vdev->guest_features. Run them + * through virtio_set_features to sanity-check them against + * host_features. + */ + uint64_t features64 = vdev->guest_features; + if (virtio_set_features_nocheck(vdev, features64) < 0) { + error_report("Features 0x%" PRIx64 " unsupported. " + "Allowed features: 0x%" PRIx64, + features64, vdev->host_features); + return -1; + } + } else { + if (virtio_set_features_nocheck(vdev, features) < 0) { + error_report("Features 0x%x unsupported. " + "Allowed features: 0x%" PRIx64, + features, vdev->host_features); + return -1; + } + } + + if (!virtio_device_started(vdev, vdev->status) && + !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + vdev->start_on_kick = true; + } + + RCU_READ_LOCK_GUARD(); + for (i = 0; i < num; i++) { + if (vdev->vq[i].vring.desc) { + uint16_t nheads; + + /* + * VIRTIO-1 devices migrate desc, used, and avail ring addresses so + * only the region cache needs to be set up. Legacy devices need + * to calculate used and avail ring addresses based on the desc + * address. + */ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + virtio_init_region_cache(vdev, i); + } else { + virtio_queue_update_rings(vdev, i); + } + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx; + vdev->vq[i].shadow_avail_wrap_counter = + vdev->vq[i].last_avail_wrap_counter; + continue; + } + + nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx; + /* Check it isn't doing strange things with descriptor numbers. */ + if (nheads > vdev->vq[i].vring.num) { + virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x " + "inconsistent with Host index 0x%x: delta 0x%x", + i, vdev->vq[i].vring.num, + vring_avail_idx(&vdev->vq[i]), + vdev->vq[i].last_avail_idx, nheads); + vdev->vq[i].used_idx = 0; + vdev->vq[i].shadow_avail_idx = 0; + vdev->vq[i].inuse = 0; + continue; + } + vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]); + vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]); + + /* + * Some devices migrate VirtQueueElements that have been popped + * from the avail ring but not yet returned to the used ring. + * Since max ring size < UINT16_MAX it's safe to use modulo + * UINT16_MAX + 1 subtraction. + */ + vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx - + vdev->vq[i].used_idx); + if (vdev->vq[i].inuse > vdev->vq[i].vring.num) { + error_report("VQ %d size 0x%x < last_avail_idx 0x%x - " + "used_idx 0x%x", + i, vdev->vq[i].vring.num, + vdev->vq[i].last_avail_idx, + vdev->vq[i].used_idx); + return -1; + } + } + } + + if (vdc->post_load) { + ret = vdc->post_load(vdev); + if (ret) { + return ret; + } + } + + return 0; +} + +void virtio_cleanup(VirtIODevice *vdev) +{ + qemu_del_vm_change_state_handler(vdev->vmstate); +} + +static void virtio_vmstate_change(void *opaque, bool running, RunState state) +{ + VirtIODevice *vdev = opaque; + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + bool backend_run = running && virtio_device_started(vdev, vdev->status); + vdev->vm_running = running; + + if (backend_run) { + virtio_set_status(vdev, vdev->status); + } + + if (k->vmstate_change) { + k->vmstate_change(qbus->parent, backend_run); + } + + if (!backend_run) { + virtio_set_status(vdev, vdev->status); + } +} + +void virtio_instance_init_common(Object *proxy_obj, void *data, + size_t vdev_size, const char *vdev_name) +{ + DeviceState *vdev = data; + + object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev, + vdev_size, vdev_name, &error_abort, + NULL); + qdev_alias_all_properties(vdev, proxy_obj); +} + +void virtio_init(VirtIODevice *vdev, const char *name, + uint16_t device_id, size_t config_size) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int i; + int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0; + + if (nvectors) { + vdev->vector_queues = + g_malloc0(sizeof(*vdev->vector_queues) * nvectors); + } + + vdev->start_on_kick = false; + vdev->started = false; + vdev->device_id = device_id; + vdev->status = 0; + qatomic_set(&vdev->isr, 0); + vdev->queue_sel = 0; + vdev->config_vector = VIRTIO_NO_VECTOR; + vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); + vdev->vm_running = runstate_is_running(); + vdev->broken = false; + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + vdev->vq[i].vector = VIRTIO_NO_VECTOR; + vdev->vq[i].vdev = vdev; + vdev->vq[i].queue_index = i; + vdev->vq[i].host_notifier_enabled = false; + } + + vdev->name = name; + vdev->config_len = config_size; + if (vdev->config_len) { + vdev->config = g_malloc0(config_size); + } else { + vdev->config = NULL; + } + vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev), + virtio_vmstate_change, vdev); + vdev->device_endian = virtio_default_endian(); + vdev->use_guest_notifier_mask = true; +} + +/* + * Only devices that have already been around prior to defining the virtio + * standard support legacy mode; this includes devices not specified in the + * standard. All newer devices conform to the virtio standard only. + */ +bool virtio_legacy_allowed(VirtIODevice *vdev) +{ + switch (vdev->device_id) { + case VIRTIO_ID_NET: + case VIRTIO_ID_BLOCK: + case VIRTIO_ID_CONSOLE: + case VIRTIO_ID_RNG: + case VIRTIO_ID_BALLOON: + case VIRTIO_ID_RPMSG: + case VIRTIO_ID_SCSI: + case VIRTIO_ID_9P: + case VIRTIO_ID_RPROC_SERIAL: + case VIRTIO_ID_CAIF: + return true; + default: + return false; + } +} + +bool virtio_legacy_check_disabled(VirtIODevice *vdev) +{ + return vdev->disable_legacy_check; +} + +hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n) +{ + return vdev->vq[n].vring.desc; +} + +bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n) +{ + return virtio_queue_get_desc_addr(vdev, n) != 0; +} + +bool virtio_queue_enabled(VirtIODevice *vdev, int n) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + if (k->queue_enabled) { + return k->queue_enabled(qbus->parent, n); + } + return virtio_queue_enabled_legacy(vdev, n); +} + +hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n) +{ + return vdev->vq[n].vring.avail; +} + +hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n) +{ + return vdev->vq[n].vring.used; +} + +hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n) +{ + return sizeof(VRingDesc) * vdev->vq[n].vring.num; +} + +hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n) +{ + int s; + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return sizeof(struct VRingPackedDescEvent); + } + + s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; + return offsetof(VRingAvail, ring) + + sizeof(uint16_t) * vdev->vq[n].vring.num + s; +} + +hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n) +{ + int s; + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return sizeof(struct VRingPackedDescEvent); + } + + s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; + return offsetof(VRingUsed, ring) + + sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s; +} + +static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev, + int n) +{ + unsigned int avail, used; + + avail = vdev->vq[n].last_avail_idx; + avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15; + + used = vdev->vq[n].used_idx; + used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15; + + return avail | used << 16; +} + +static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev, + int n) +{ + return vdev->vq[n].last_avail_idx; +} + +unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return virtio_queue_packed_get_last_avail_idx(vdev, n); + } else { + return virtio_queue_split_get_last_avail_idx(vdev, n); + } +} + +static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev, + int n, unsigned int idx) +{ + struct VirtQueue *vq = &vdev->vq[n]; + + vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff; + vq->last_avail_wrap_counter = + vq->shadow_avail_wrap_counter = !!(idx & 0x8000); + idx >>= 16; + vq->used_idx = idx & 0x7ffff; + vq->used_wrap_counter = !!(idx & 0x8000); +} + +static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev, + int n, unsigned int idx) +{ + vdev->vq[n].last_avail_idx = idx; + vdev->vq[n].shadow_avail_idx = idx; +} + +void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, + unsigned int idx) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + virtio_queue_packed_set_last_avail_idx(vdev, n, idx); + } else { + virtio_queue_split_set_last_avail_idx(vdev, n, idx); + } +} + +static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev, + int n) +{ + /* We don't have a reference like avail idx in shared memory */ + return; +} + +static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev, + int n) +{ + RCU_READ_LOCK_GUARD(); + if (vdev->vq[n].vring.desc) { + vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]); + vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx; + } +} + +void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + virtio_queue_packed_restore_last_avail_idx(vdev, n); + } else { + virtio_queue_split_restore_last_avail_idx(vdev, n); + } +} + +static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n) +{ + /* used idx was updated through set_last_avail_idx() */ + return; +} + +static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n) +{ + RCU_READ_LOCK_GUARD(); + if (vdev->vq[n].vring.desc) { + vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]); + } +} + +void virtio_queue_update_used_idx(VirtIODevice *vdev, int n) +{ + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + return virtio_queue_packed_update_used_idx(vdev, n); + } else { + return virtio_split_packed_update_used_idx(vdev, n); + } +} + +void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n) +{ + vdev->vq[n].signalled_used_valid = false; +} + +VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n) +{ + return vdev->vq + n; +} + +uint16_t virtio_get_queue_index(VirtQueue *vq) +{ + return vq->queue_index; +} + +static void virtio_queue_guest_notifier_read(EventNotifier *n) +{ + VirtQueue *vq = container_of(n, VirtQueue, guest_notifier); + if (event_notifier_test_and_clear(n)) { + virtio_irq(vq); + } +} + +void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, + bool with_irqfd) +{ + if (assign && !with_irqfd) { + event_notifier_set_handler(&vq->guest_notifier, + virtio_queue_guest_notifier_read); + } else { + event_notifier_set_handler(&vq->guest_notifier, NULL); + } + if (!assign) { + /* Test and clear notifier before closing it, + * in case poll callback didn't have time to run. */ + virtio_queue_guest_notifier_read(&vq->guest_notifier); + } +} + +EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) +{ + return &vq->guest_notifier; +} + +static void virtio_queue_host_notifier_aio_read(EventNotifier *n) +{ + VirtQueue *vq = container_of(n, VirtQueue, host_notifier); + if (event_notifier_test_and_clear(n)) { + virtio_queue_notify_aio_vq(vq); + } +} + +static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n) +{ + VirtQueue *vq = container_of(n, VirtQueue, host_notifier); + + virtio_queue_set_notification(vq, 0); +} + +static bool virtio_queue_host_notifier_aio_poll(void *opaque) +{ + EventNotifier *n = opaque; + VirtQueue *vq = container_of(n, VirtQueue, host_notifier); + + if (!vq->vring.desc || virtio_queue_empty(vq)) { + return false; + } + + return virtio_queue_notify_aio_vq(vq); +} + +static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) +{ + VirtQueue *vq = container_of(n, VirtQueue, host_notifier); + + /* Caller polls once more after this to catch requests that race with us */ + virtio_queue_set_notification(vq, 1); +} + +void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, + VirtIOHandleAIOOutput handle_output) +{ + if (handle_output) { + vq->handle_aio_output = handle_output; + aio_set_event_notifier(ctx, &vq->host_notifier, true, + virtio_queue_host_notifier_aio_read, + virtio_queue_host_notifier_aio_poll); + aio_set_event_notifier_poll(ctx, &vq->host_notifier, + virtio_queue_host_notifier_aio_poll_begin, + virtio_queue_host_notifier_aio_poll_end); + } else { + aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL); + /* Test and clear notifier before after disabling event, + * in case poll callback didn't have time to run. */ + virtio_queue_host_notifier_aio_read(&vq->host_notifier); + vq->handle_aio_output = NULL; + } +} + +void virtio_queue_host_notifier_read(EventNotifier *n) +{ + VirtQueue *vq = container_of(n, VirtQueue, host_notifier); + if (event_notifier_test_and_clear(n)) { + virtio_queue_notify_vq(vq); + } +} + +EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) +{ + return &vq->host_notifier; +} + +void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled) +{ + vq->host_notifier_enabled = enabled; +} + +int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n, + MemoryRegion *mr, bool assign) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + + if (k->set_host_notifier_mr) { + return k->set_host_notifier_mr(qbus->parent, n, mr, assign); + } + + return -1; +} + +void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name) +{ + g_free(vdev->bus_name); + vdev->bus_name = g_strdup(bus_name); +} + +void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + error_vreport(fmt, ap); + va_end(ap); + + if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { + vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET; + virtio_notify_config(vdev); + } + + vdev->broken = true; +} + +static void virtio_memory_listener_commit(MemoryListener *listener) +{ + VirtIODevice *vdev = container_of(listener, VirtIODevice, listener); + int i; + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + if (vdev->vq[i].vring.num == 0) { + break; + } + virtio_init_region_cache(vdev, i); + } +} + +static void virtio_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); + Error *err = NULL; + + /* Devices should either use vmsd or the load/save methods */ + assert(!vdc->vmsd || !vdc->load); + + if (vdc->realize != NULL) { + vdc->realize(dev, &err); + if (err != NULL) { + error_propagate(errp, err); + return; + } + } + + virtio_bus_device_plugged(vdev, &err); + if (err != NULL) { + error_propagate(errp, err); + vdc->unrealize(dev); + return; + } + + vdev->listener.commit = virtio_memory_listener_commit; + vdev->listener.name = "virtio"; + memory_listener_register(&vdev->listener, vdev->dma_as); +} + +static void virtio_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev); + + memory_listener_unregister(&vdev->listener); + virtio_bus_device_unplugged(vdev); + + if (vdc->unrealize != NULL) { + vdc->unrealize(dev); + } + + g_free(vdev->bus_name); + vdev->bus_name = NULL; +} + +static void virtio_device_free_virtqueues(VirtIODevice *vdev) +{ + int i; + if (!vdev->vq) { + return; + } + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + if (vdev->vq[i].vring.num == 0) { + break; + } + virtio_virtqueue_reset_region_cache(&vdev->vq[i]); + } + g_free(vdev->vq); +} + +static void virtio_device_instance_finalize(Object *obj) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(obj); + + virtio_device_free_virtqueues(vdev); + + g_free(vdev->config); + g_free(vdev->vector_queues); +} + +static Property virtio_properties[] = { + DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features), + DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true), + DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true), + DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice, + disable_legacy_check, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev) +{ + VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev))); + int i, n, r, err; + + /* + * Batch all the host notifiers in a single transaction to avoid + * quadratic time complexity in address_space_update_ioeventfds(). + */ + memory_region_transaction_begin(); + for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { + VirtQueue *vq = &vdev->vq[n]; + if (!virtio_queue_get_num(vdev, n)) { + continue; + } + r = virtio_bus_set_host_notifier(qbus, n, true); + if (r < 0) { + err = r; + goto assign_error; + } + event_notifier_set_handler(&vq->host_notifier, + virtio_queue_host_notifier_read); + } + + for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { + /* Kick right away to begin processing requests already in vring */ + VirtQueue *vq = &vdev->vq[n]; + if (!vq->vring.num) { + continue; + } + event_notifier_set(&vq->host_notifier); + } + memory_region_transaction_commit(); + return 0; + +assign_error: + i = n; /* save n for a second iteration after transaction is committed. */ + while (--n >= 0) { + VirtQueue *vq = &vdev->vq[n]; + if (!virtio_queue_get_num(vdev, n)) { + continue; + } + + event_notifier_set_handler(&vq->host_notifier, NULL); + r = virtio_bus_set_host_notifier(qbus, n, false); + assert(r >= 0); + } + /* + * The transaction expects the ioeventfds to be open when it + * commits. Do it now, before the cleanup loop. + */ + memory_region_transaction_commit(); + + while (--i >= 0) { + if (!virtio_queue_get_num(vdev, i)) { + continue; + } + virtio_bus_cleanup_host_notifier(qbus, i); + } + return err; +} + +int virtio_device_start_ioeventfd(VirtIODevice *vdev) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusState *vbus = VIRTIO_BUS(qbus); + + return virtio_bus_start_ioeventfd(vbus); +} + +static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev) +{ + VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev))); + int n, r; + + /* + * Batch all the host notifiers in a single transaction to avoid + * quadratic time complexity in address_space_update_ioeventfds(). + */ + memory_region_transaction_begin(); + for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { + VirtQueue *vq = &vdev->vq[n]; + + if (!virtio_queue_get_num(vdev, n)) { + continue; + } + event_notifier_set_handler(&vq->host_notifier, NULL); + r = virtio_bus_set_host_notifier(qbus, n, false); + assert(r >= 0); + } + /* + * The transaction expects the ioeventfds to be open when it + * commits. Do it now, before the cleanup loop. + */ + memory_region_transaction_commit(); + + for (n = 0; n < VIRTIO_QUEUE_MAX; n++) { + if (!virtio_queue_get_num(vdev, n)) { + continue; + } + virtio_bus_cleanup_host_notifier(qbus, n); + } +} + +int virtio_device_grab_ioeventfd(VirtIODevice *vdev) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusState *vbus = VIRTIO_BUS(qbus); + + return virtio_bus_grab_ioeventfd(vbus); +} + +void virtio_device_release_ioeventfd(VirtIODevice *vdev) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusState *vbus = VIRTIO_BUS(qbus); + + virtio_bus_release_ioeventfd(vbus); +} + +static void virtio_device_class_init(ObjectClass *klass, void *data) +{ + /* Set the default value here. */ + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = virtio_device_realize; + dc->unrealize = virtio_device_unrealize; + dc->bus_type = TYPE_VIRTIO_BUS; + device_class_set_props(dc, virtio_properties); + vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl; + vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl; + + vdc->legacy_features |= VIRTIO_LEGACY_FEATURES; +} + +bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev) +{ + BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); + VirtioBusState *vbus = VIRTIO_BUS(qbus); + + return virtio_bus_ioeventfd_enabled(vbus); +} + +static const TypeInfo virtio_device_info = { + .name = TYPE_VIRTIO_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(VirtIODevice), + .class_init = virtio_device_class_init, + .instance_finalize = virtio_device_instance_finalize, + .abstract = true, + .class_size = sizeof(VirtioDeviceClass), +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_device_info); +} + +type_init(virtio_register_types) diff --git a/hw/watchdog/Kconfig b/hw/watchdog/Kconfig new file mode 100644 index 000000000..66e1d029e --- /dev/null +++ b/hw/watchdog/Kconfig @@ -0,0 +1,22 @@ +config CMSDK_APB_WATCHDOG + bool + select PTIMER + +config WDT_IB6300ESB + bool + default y if PCI_DEVICES + depends on PCI + +config WDT_IB700 + bool + default y + depends on ISA_BUS + +config WDT_DIAG288 + bool + +config WDT_IMX2 + bool + +config WDT_SBSA + bool diff --git a/hw/watchdog/cmsdk-apb-watchdog.c b/hw/watchdog/cmsdk-apb-watchdog.c new file mode 100644 index 000000000..5a2cd46eb --- /dev/null +++ b/hw/watchdog/cmsdk-apb-watchdog.c @@ -0,0 +1,414 @@ +/* + * ARM CMSDK APB watchdog emulation + * + * Copyright (c) 2018 Linaro Limited + * Written by Peter Maydell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 or + * (at your option) any later version. + */ + +/* + * This is a model of the "APB watchdog" which is part of the Cortex-M + * System Design Kit (CMSDK) and documented in the Cortex-M System + * Design Kit Technical Reference Manual (ARM DDI0479C): + * https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit + * + * We also support the variant of this device found in the TI + * Stellaris/Luminary boards and documented in: + * http://www.ti.com/lit/ds/symlink/lm3s6965.pdf + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "trace.h" +#include "qapi/error.h" +#include "qemu/module.h" +#include "sysemu/watchdog.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/registerfields.h" +#include "hw/qdev-clock.h" +#include "hw/watchdog/cmsdk-apb-watchdog.h" +#include "migration/vmstate.h" + +REG32(WDOGLOAD, 0x0) +REG32(WDOGVALUE, 0x4) +REG32(WDOGCONTROL, 0x8) + FIELD(WDOGCONTROL, INTEN, 0, 1) + FIELD(WDOGCONTROL, RESEN, 1, 1) +#define R_WDOGCONTROL_VALID_MASK (R_WDOGCONTROL_INTEN_MASK | \ + R_WDOGCONTROL_RESEN_MASK) +REG32(WDOGINTCLR, 0xc) +REG32(WDOGRIS, 0x10) + FIELD(WDOGRIS, INT, 0, 1) +REG32(WDOGMIS, 0x14) +REG32(WDOGTEST, 0x418) /* only in Stellaris/Luminary version of the device */ +REG32(WDOGLOCK, 0xc00) +#define WDOG_UNLOCK_VALUE 0x1ACCE551 +REG32(WDOGITCR, 0xf00) + FIELD(WDOGITCR, ENABLE, 0, 1) +#define R_WDOGITCR_VALID_MASK R_WDOGITCR_ENABLE_MASK +REG32(WDOGITOP, 0xf04) + FIELD(WDOGITOP, WDOGRES, 0, 1) + FIELD(WDOGITOP, WDOGINT, 1, 1) +#define R_WDOGITOP_VALID_MASK (R_WDOGITOP_WDOGRES_MASK | \ + R_WDOGITOP_WDOGINT_MASK) +REG32(PID4, 0xfd0) +REG32(PID5, 0xfd4) +REG32(PID6, 0xfd8) +REG32(PID7, 0xfdc) +REG32(PID0, 0xfe0) +REG32(PID1, 0xfe4) +REG32(PID2, 0xfe8) +REG32(PID3, 0xfec) +REG32(CID0, 0xff0) +REG32(CID1, 0xff4) +REG32(CID2, 0xff8) +REG32(CID3, 0xffc) + +/* PID/CID values */ +static const uint32_t cmsdk_apb_watchdog_id[] = { + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x24, 0xb8, 0x1b, 0x00, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static const uint32_t luminary_watchdog_id[] = { + 0x00, 0x00, 0x00, 0x00, /* PID4..PID7 */ + 0x05, 0x18, 0x18, 0x01, /* PID0..PID3 */ + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ +}; + +static bool cmsdk_apb_watchdog_intstatus(CMSDKAPBWatchdog *s) +{ + /* Return masked interrupt status */ + return s->intstatus && (s->control & R_WDOGCONTROL_INTEN_MASK); +} + +static bool cmsdk_apb_watchdog_resetstatus(CMSDKAPBWatchdog *s) +{ + /* Return masked reset status */ + return s->resetstatus && (s->control & R_WDOGCONTROL_RESEN_MASK); +} + +static void cmsdk_apb_watchdog_update(CMSDKAPBWatchdog *s) +{ + bool wdogint; + bool wdogres; + + if (s->itcr) { + /* + * Not checking that !s->is_luminary since s->itcr can't be written + * when s->is_luminary in the first place. + */ + wdogint = s->itop & R_WDOGITOP_WDOGINT_MASK; + wdogres = s->itop & R_WDOGITOP_WDOGRES_MASK; + } else { + wdogint = cmsdk_apb_watchdog_intstatus(s); + wdogres = cmsdk_apb_watchdog_resetstatus(s); + } + + qemu_set_irq(s->wdogint, wdogint); + if (wdogres) { + watchdog_perform_action(); + } +} + +static uint64_t cmsdk_apb_watchdog_read(void *opaque, hwaddr offset, + unsigned size) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque); + uint64_t r; + + switch (offset) { + case A_WDOGLOAD: + r = ptimer_get_limit(s->timer); + break; + case A_WDOGVALUE: + r = ptimer_get_count(s->timer); + break; + case A_WDOGCONTROL: + r = s->control; + break; + case A_WDOGRIS: + r = s->intstatus; + break; + case A_WDOGMIS: + r = cmsdk_apb_watchdog_intstatus(s); + break; + case A_WDOGLOCK: + r = s->lock; + break; + case A_WDOGITCR: + if (s->is_luminary) { + goto bad_offset; + } + r = s->itcr; + break; + case A_PID4 ... A_CID3: + r = s->id[(offset - A_PID4) / 4]; + break; + case A_WDOGINTCLR: + case A_WDOGITOP: + if (s->is_luminary) { + goto bad_offset; + } + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB watchdog read: read of WO offset %x\n", + (int)offset); + r = 0; + break; + case A_WDOGTEST: + if (!s->is_luminary) { + goto bad_offset; + } + qemu_log_mask(LOG_UNIMP, + "Luminary watchdog read: stall not implemented\n"); + r = 0; + break; + default: +bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB watchdog read: bad offset %x\n", (int)offset); + r = 0; + break; + } + trace_cmsdk_apb_watchdog_read(offset, r, size); + return r; +} + +static void cmsdk_apb_watchdog_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque); + + trace_cmsdk_apb_watchdog_write(offset, value, size); + + if (s->lock && offset != A_WDOGLOCK) { + /* Write access is disabled via WDOGLOCK */ + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB watchdog write: write to locked watchdog\n"); + return; + } + + switch (offset) { + case A_WDOGLOAD: + /* + * Reset the load value and the current count, and make sure + * we're counting. + */ + ptimer_transaction_begin(s->timer); + ptimer_set_limit(s->timer, value, 1); + ptimer_run(s->timer, 0); + ptimer_transaction_commit(s->timer); + break; + case A_WDOGCONTROL: + if (s->is_luminary && 0 != (R_WDOGCONTROL_INTEN_MASK & s->control)) { + /* + * The Luminary version of this device ignores writes to + * this register after the guest has enabled interrupts + * (so they can only be disabled again via reset). + */ + break; + } + s->control = value & R_WDOGCONTROL_VALID_MASK; + cmsdk_apb_watchdog_update(s); + break; + case A_WDOGINTCLR: + s->intstatus = 0; + ptimer_transaction_begin(s->timer); + ptimer_set_count(s->timer, ptimer_get_limit(s->timer)); + ptimer_transaction_commit(s->timer); + cmsdk_apb_watchdog_update(s); + break; + case A_WDOGLOCK: + s->lock = (value != WDOG_UNLOCK_VALUE); + trace_cmsdk_apb_watchdog_lock(s->lock); + break; + case A_WDOGITCR: + if (s->is_luminary) { + goto bad_offset; + } + s->itcr = value & R_WDOGITCR_VALID_MASK; + cmsdk_apb_watchdog_update(s); + break; + case A_WDOGITOP: + if (s->is_luminary) { + goto bad_offset; + } + s->itop = value & R_WDOGITOP_VALID_MASK; + cmsdk_apb_watchdog_update(s); + break; + case A_WDOGVALUE: + case A_WDOGRIS: + case A_WDOGMIS: + case A_PID4 ... A_CID3: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB watchdog write: write to RO offset 0x%x\n", + (int)offset); + break; + case A_WDOGTEST: + if (!s->is_luminary) { + goto bad_offset; + } + qemu_log_mask(LOG_UNIMP, + "Luminary watchdog write: stall not implemented\n"); + break; + default: +bad_offset: + qemu_log_mask(LOG_GUEST_ERROR, + "CMSDK APB watchdog write: bad offset 0x%x\n", + (int)offset); + break; + } +} + +static const MemoryRegionOps cmsdk_apb_watchdog_ops = { + .read = cmsdk_apb_watchdog_read, + .write = cmsdk_apb_watchdog_write, + .endianness = DEVICE_LITTLE_ENDIAN, + /* byte/halfword accesses are just zero-padded on reads and writes */ + .impl.min_access_size = 4, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, +}; + +static void cmsdk_apb_watchdog_tick(void *opaque) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque); + + if (!s->intstatus) { + /* Count expired for the first time: raise interrupt */ + s->intstatus = R_WDOGRIS_INT_MASK; + } else { + /* Count expired for the second time: raise reset and stop clock */ + s->resetstatus = 1; + ptimer_stop(s->timer); + } + cmsdk_apb_watchdog_update(s); +} + +static void cmsdk_apb_watchdog_reset(DeviceState *dev) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(dev); + + trace_cmsdk_apb_watchdog_reset(); + s->control = 0; + s->intstatus = 0; + s->lock = 0; + s->itcr = 0; + s->itop = 0; + s->resetstatus = 0; + /* Set the limit and the count */ + ptimer_transaction_begin(s->timer); + ptimer_set_limit(s->timer, 0xffffffff, 1); + ptimer_run(s->timer, 0); + ptimer_transaction_commit(s->timer); +} + +static void cmsdk_apb_watchdog_clk_update(void *opaque, ClockEvent event) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque); + + ptimer_transaction_begin(s->timer); + ptimer_set_period_from_clock(s->timer, s->wdogclk, 1); + ptimer_transaction_commit(s->timer); +} + +static void cmsdk_apb_watchdog_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(obj); + + memory_region_init_io(&s->iomem, obj, &cmsdk_apb_watchdog_ops, + s, "cmsdk-apb-watchdog", 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + sysbus_init_irq(sbd, &s->wdogint); + s->wdogclk = qdev_init_clock_in(DEVICE(s), "WDOGCLK", + cmsdk_apb_watchdog_clk_update, s, + ClockUpdate); + + s->is_luminary = false; + s->id = cmsdk_apb_watchdog_id; +} + +static void cmsdk_apb_watchdog_realize(DeviceState *dev, Error **errp) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(dev); + + if (!clock_has_source(s->wdogclk)) { + error_setg(errp, + "CMSDK APB watchdog: WDOGCLK clock must be connected"); + return; + } + + s->timer = ptimer_init(cmsdk_apb_watchdog_tick, s, + PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | + PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD | + PTIMER_POLICY_NO_COUNTER_ROUND_DOWN); + + ptimer_transaction_begin(s->timer); + ptimer_set_period_from_clock(s->timer, s->wdogclk, 1); + ptimer_transaction_commit(s->timer); +} + +static const VMStateDescription cmsdk_apb_watchdog_vmstate = { + .name = "cmsdk-apb-watchdog", + .version_id = 2, + .minimum_version_id = 2, + .fields = (VMStateField[]) { + VMSTATE_CLOCK(wdogclk, CMSDKAPBWatchdog), + VMSTATE_PTIMER(timer, CMSDKAPBWatchdog), + VMSTATE_UINT32(control, CMSDKAPBWatchdog), + VMSTATE_UINT32(intstatus, CMSDKAPBWatchdog), + VMSTATE_UINT32(lock, CMSDKAPBWatchdog), + VMSTATE_UINT32(itcr, CMSDKAPBWatchdog), + VMSTATE_UINT32(itop, CMSDKAPBWatchdog), + VMSTATE_UINT32(resetstatus, CMSDKAPBWatchdog), + VMSTATE_END_OF_LIST() + } +}; + +static void cmsdk_apb_watchdog_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = cmsdk_apb_watchdog_realize; + dc->vmsd = &cmsdk_apb_watchdog_vmstate; + dc->reset = cmsdk_apb_watchdog_reset; +} + +static const TypeInfo cmsdk_apb_watchdog_info = { + .name = TYPE_CMSDK_APB_WATCHDOG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CMSDKAPBWatchdog), + .instance_init = cmsdk_apb_watchdog_init, + .class_init = cmsdk_apb_watchdog_class_init, +}; + +static void luminary_watchdog_init(Object *obj) +{ + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(obj); + + s->is_luminary = true; + s->id = luminary_watchdog_id; +} + +static const TypeInfo luminary_watchdog_info = { + .name = TYPE_LUMINARY_WATCHDOG, + .parent = TYPE_CMSDK_APB_WATCHDOG, + .instance_init = luminary_watchdog_init +}; + +static void cmsdk_apb_watchdog_register_types(void) +{ + type_register_static(&cmsdk_apb_watchdog_info); + type_register_static(&luminary_watchdog_info); +} + +type_init(cmsdk_apb_watchdog_register_types); diff --git a/hw/watchdog/meson.build b/hw/watchdog/meson.build new file mode 100644 index 000000000..054c403de --- /dev/null +++ b/hw/watchdog/meson.build @@ -0,0 +1,8 @@ +softmmu_ss.add(files('watchdog.c')) +softmmu_ss.add(when: 'CONFIG_CMSDK_APB_WATCHDOG', if_true: files('cmsdk-apb-watchdog.c')) +softmmu_ss.add(when: 'CONFIG_WDT_IB6300ESB', if_true: files('wdt_i6300esb.c')) +softmmu_ss.add(when: 'CONFIG_WDT_IB700', if_true: files('wdt_ib700.c')) +softmmu_ss.add(when: 'CONFIG_WDT_DIAG288', if_true: files('wdt_diag288.c')) +softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('wdt_aspeed.c')) +softmmu_ss.add(when: 'CONFIG_WDT_IMX2', if_true: files('wdt_imx2.c')) +softmmu_ss.add(when: 'CONFIG_WDT_SBSA', if_true: files('sbsa_gwdt.c')) diff --git a/hw/watchdog/sbsa_gwdt.c b/hw/watchdog/sbsa_gwdt.c new file mode 100644 index 000000000..e49cacd0e --- /dev/null +++ b/hw/watchdog/sbsa_gwdt.c @@ -0,0 +1,294 @@ +/* + * Generic watchdog device model for SBSA + * + * The watchdog device has been implemented as revision 1 variant of + * the ARM SBSA specification v6.0 + * (https://developer.arm.com/documentation/den0029/d?lang=en) + * + * Copyright Linaro.org 2020 + * + * Authors: + * Shashi Mallela + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "sysemu/reset.h" +#include "sysemu/watchdog.h" +#include "hw/watchdog/sbsa_gwdt.h" +#include "qemu/timer.h" +#include "migration/vmstate.h" +#include "qemu/log.h" +#include "qemu/module.h" + +static WatchdogTimerModel model = { + .wdt_name = TYPE_WDT_SBSA, + .wdt_description = "SBSA-compliant generic watchdog device", +}; + +static const VMStateDescription vmstate_sbsa_gwdt = { + .name = "sbsa-gwdt", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(timer, SBSA_GWDTState), + VMSTATE_UINT32(wcs, SBSA_GWDTState), + VMSTATE_UINT32(worl, SBSA_GWDTState), + VMSTATE_UINT32(woru, SBSA_GWDTState), + VMSTATE_UINT32(wcvl, SBSA_GWDTState), + VMSTATE_UINT32(wcvu, SBSA_GWDTState), + VMSTATE_END_OF_LIST() + } +}; + +typedef enum WdtRefreshType { + EXPLICIT_REFRESH = 0, + TIMEOUT_REFRESH = 1, +} WdtRefreshType; + +static uint64_t sbsa_gwdt_rread(void *opaque, hwaddr addr, unsigned int size) +{ + SBSA_GWDTState *s = SBSA_GWDT(opaque); + uint32_t ret = 0; + + switch (addr) { + case SBSA_GWDT_WRR: + /* watch refresh read has no effect and returns 0 */ + ret = 0; + break; + case SBSA_GWDT_W_IIDR: + ret = s->id; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "bad address in refresh frame read :" + " 0x%x\n", (int)addr); + } + return ret; +} + +static uint64_t sbsa_gwdt_read(void *opaque, hwaddr addr, unsigned int size) +{ + SBSA_GWDTState *s = SBSA_GWDT(opaque); + uint32_t ret = 0; + + switch (addr) { + case SBSA_GWDT_WCS: + ret = s->wcs; + break; + case SBSA_GWDT_WOR: + ret = s->worl; + break; + case SBSA_GWDT_WORU: + ret = s->woru; + break; + case SBSA_GWDT_WCV: + ret = s->wcvl; + break; + case SBSA_GWDT_WCVU: + ret = s->wcvu; + break; + case SBSA_GWDT_W_IIDR: + ret = s->id; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "bad address in control frame read :" + " 0x%x\n", (int)addr); + } + return ret; +} + +static void sbsa_gwdt_update_timer(SBSA_GWDTState *s, WdtRefreshType rtype) +{ + uint64_t timeout = 0; + + timer_del(s->timer); + + if (s->wcs & SBSA_GWDT_WCS_EN) { + /* + * Extract the upper 16 bits from woru & 32 bits from worl + * registers to construct the 48 bit offset value + */ + timeout = s->woru; + timeout <<= 32; + timeout |= s->worl; + timeout = muldiv64(timeout, NANOSECONDS_PER_SECOND, SBSA_TIMER_FREQ); + timeout += qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + if ((rtype == EXPLICIT_REFRESH) || ((rtype == TIMEOUT_REFRESH) && + (!(s->wcs & SBSA_GWDT_WCS_WS0)))) { + /* store the current timeout value into compare registers */ + s->wcvu = timeout >> 32; + s->wcvl = timeout; + } + timer_mod(s->timer, timeout); + } +} + +static void sbsa_gwdt_rwrite(void *opaque, hwaddr offset, uint64_t data, + unsigned size) { + SBSA_GWDTState *s = SBSA_GWDT(opaque); + + if (offset == SBSA_GWDT_WRR) { + s->wcs &= ~(SBSA_GWDT_WCS_WS0 | SBSA_GWDT_WCS_WS1); + + sbsa_gwdt_update_timer(s, EXPLICIT_REFRESH); + } else { + qemu_log_mask(LOG_GUEST_ERROR, "bad address in refresh frame write :" + " 0x%x\n", (int)offset); + } +} + +static void sbsa_gwdt_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) { + SBSA_GWDTState *s = SBSA_GWDT(opaque); + + switch (offset) { + case SBSA_GWDT_WCS: + s->wcs = data & SBSA_GWDT_WCS_EN; + qemu_set_irq(s->irq, 0); + sbsa_gwdt_update_timer(s, EXPLICIT_REFRESH); + break; + + case SBSA_GWDT_WOR: + s->worl = data; + s->wcs &= ~(SBSA_GWDT_WCS_WS0 | SBSA_GWDT_WCS_WS1); + qemu_set_irq(s->irq, 0); + sbsa_gwdt_update_timer(s, EXPLICIT_REFRESH); + break; + + case SBSA_GWDT_WORU: + s->woru = data & SBSA_GWDT_WOR_MASK; + s->wcs &= ~(SBSA_GWDT_WCS_WS0 | SBSA_GWDT_WCS_WS1); + qemu_set_irq(s->irq, 0); + sbsa_gwdt_update_timer(s, EXPLICIT_REFRESH); + break; + + case SBSA_GWDT_WCV: + s->wcvl = data; + break; + + case SBSA_GWDT_WCVU: + s->wcvu = data; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "bad address in control frame write :" + " 0x%x\n", (int)offset); + } + return; +} + +static void wdt_sbsa_gwdt_reset(DeviceState *dev) +{ + SBSA_GWDTState *s = SBSA_GWDT(dev); + + timer_del(s->timer); + + s->wcs = 0; + s->wcvl = 0; + s->wcvu = 0; + s->worl = 0; + s->woru = 0; + s->id = SBSA_GWDT_ID; +} + +static void sbsa_gwdt_timer_sysinterrupt(void *opaque) +{ + SBSA_GWDTState *s = SBSA_GWDT(opaque); + + if (!(s->wcs & SBSA_GWDT_WCS_WS0)) { + s->wcs |= SBSA_GWDT_WCS_WS0; + sbsa_gwdt_update_timer(s, TIMEOUT_REFRESH); + qemu_set_irq(s->irq, 1); + } else { + s->wcs |= SBSA_GWDT_WCS_WS1; + qemu_log_mask(CPU_LOG_RESET, "Watchdog timer expired.\n"); + /* + * Reset the watchdog only if the guest gets notified about + * expiry. watchdog_perform_action() may temporarily relinquish + * the BQL; reset before triggering the action to avoid races with + * sbsa_gwdt instructions. + */ + switch (get_watchdog_action()) { + case WATCHDOG_ACTION_DEBUG: + case WATCHDOG_ACTION_NONE: + case WATCHDOG_ACTION_PAUSE: + break; + default: + wdt_sbsa_gwdt_reset(DEVICE(s)); + } + watchdog_perform_action(); + } +} + +static const MemoryRegionOps sbsa_gwdt_rops = { + .read = sbsa_gwdt_rread, + .write = sbsa_gwdt_rwrite, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static const MemoryRegionOps sbsa_gwdt_ops = { + .read = sbsa_gwdt_read, + .write = sbsa_gwdt_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static void wdt_sbsa_gwdt_realize(DeviceState *dev, Error **errp) +{ + SBSA_GWDTState *s = SBSA_GWDT(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&s->rmmio, OBJECT(dev), + &sbsa_gwdt_rops, s, + "sbsa_gwdt.refresh", + SBSA_GWDT_RMMIO_SIZE); + + memory_region_init_io(&s->cmmio, OBJECT(dev), + &sbsa_gwdt_ops, s, + "sbsa_gwdt.control", + SBSA_GWDT_CMMIO_SIZE); + + sysbus_init_mmio(sbd, &s->rmmio); + sysbus_init_mmio(sbd, &s->cmmio); + + sysbus_init_irq(sbd, &s->irq); + + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sbsa_gwdt_timer_sysinterrupt, + dev); +} + +static void wdt_sbsa_gwdt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = wdt_sbsa_gwdt_realize; + dc->reset = wdt_sbsa_gwdt_reset; + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); + dc->vmsd = &vmstate_sbsa_gwdt; + dc->desc = "SBSA-compliant generic watchdog device"; +} + +static const TypeInfo wdt_sbsa_gwdt_info = { + .class_init = wdt_sbsa_gwdt_class_init, + .parent = TYPE_SYS_BUS_DEVICE, + .name = TYPE_WDT_SBSA, + .instance_size = sizeof(SBSA_GWDTState), +}; + +static void wdt_sbsa_gwdt_register_types(void) +{ + watchdog_add_model(&model); + type_register_static(&wdt_sbsa_gwdt_info); +} + +type_init(wdt_sbsa_gwdt_register_types) diff --git a/hw/watchdog/trace-events b/hw/watchdog/trace-events new file mode 100644 index 000000000..e7523e22a --- /dev/null +++ b/hw/watchdog/trace-events @@ -0,0 +1,11 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# cmsdk-apb-watchdog.c +cmsdk_apb_watchdog_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +cmsdk_apb_watchdog_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" +cmsdk_apb_watchdog_reset(void) "CMSDK APB watchdog: reset" +cmsdk_apb_watchdog_lock(uint32_t lock) "CMSDK APB watchdog: lock %" PRIu32 + +# wdt-aspeed.c +aspeed_wdt_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size=%d" +aspeed_wdt_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 diff --git a/hw/watchdog/trace.h b/hw/watchdog/trace.h new file mode 100644 index 000000000..5d8495752 --- /dev/null +++ b/hw/watchdog/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_watchdog.h" diff --git a/hw/watchdog/watchdog.c b/hw/watchdog/watchdog.c new file mode 100644 index 000000000..1437e6c5b --- /dev/null +++ b/hw/watchdog/watchdog.c @@ -0,0 +1,135 @@ +/* + * Virtual hardware watchdog. + * + * Copyright (C) 2009 Red Hat Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * By Richard W.M. Jones (rjones@redhat.com). + */ + +#include "qemu/osdep.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "qemu/queue.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-run-state.h" +#include "qapi/qapi-events-run-state.h" +#include "sysemu/runstate.h" +#include "sysemu/watchdog.h" +#include "hw/nmi.h" +#include "qemu/help_option.h" + +static WatchdogAction watchdog_action = WATCHDOG_ACTION_RESET; +static QLIST_HEAD(, WatchdogTimerModel) watchdog_list; + +void watchdog_add_model(WatchdogTimerModel *model) +{ + QLIST_INSERT_HEAD(&watchdog_list, model, entry); +} + +/* Returns: + * 0 = continue + * 1 = exit program with error + * 2 = exit program without error + */ +int select_watchdog(const char *p) +{ + WatchdogTimerModel *model; + QemuOpts *opts; + + /* -watchdog ? lists available devices and exits cleanly. */ + if (is_help_option(p)) { + QLIST_FOREACH(model, &watchdog_list, entry) { + fprintf(stderr, "\t%s\t%s\n", + model->wdt_name, model->wdt_description); + } + return 2; + } + + QLIST_FOREACH(model, &watchdog_list, entry) { + if (strcasecmp(model->wdt_name, p) == 0) { + /* add the device */ + opts = qemu_opts_create(qemu_find_opts("device"), NULL, 0, + &error_abort); + qemu_opt_set(opts, "driver", p, &error_abort); + return 0; + } + } + + fprintf(stderr, "Unknown -watchdog device. Supported devices are:\n"); + QLIST_FOREACH(model, &watchdog_list, entry) { + fprintf(stderr, "\t%s\t%s\n", + model->wdt_name, model->wdt_description); + } + return 1; +} + +WatchdogAction get_watchdog_action(void) +{ + return watchdog_action; +} + +/* This actually performs the "action" once a watchdog has expired, + * ie. reboot, shutdown, exit, etc. + */ +void watchdog_perform_action(void) +{ + switch (watchdog_action) { + case WATCHDOG_ACTION_RESET: /* same as 'system_reset' in monitor */ + qapi_event_send_watchdog(WATCHDOG_ACTION_RESET); + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + break; + + case WATCHDOG_ACTION_SHUTDOWN: /* same as 'system_powerdown' in monitor */ + qapi_event_send_watchdog(WATCHDOG_ACTION_SHUTDOWN); + qemu_system_powerdown_request(); + break; + + case WATCHDOG_ACTION_POWEROFF: /* same as 'quit' command in monitor */ + qapi_event_send_watchdog(WATCHDOG_ACTION_POWEROFF); + exit(0); + + case WATCHDOG_ACTION_PAUSE: /* same as 'stop' command in monitor */ + /* In a timer callback, when vm_stop calls qemu_clock_enable + * you would get a deadlock. Bypass the problem. + */ + qemu_system_vmstop_request_prepare(); + qapi_event_send_watchdog(WATCHDOG_ACTION_PAUSE); + qemu_system_vmstop_request(RUN_STATE_WATCHDOG); + break; + + case WATCHDOG_ACTION_DEBUG: + qapi_event_send_watchdog(WATCHDOG_ACTION_DEBUG); + fprintf(stderr, "watchdog: timer fired\n"); + break; + + case WATCHDOG_ACTION_NONE: + qapi_event_send_watchdog(WATCHDOG_ACTION_NONE); + break; + + case WATCHDOG_ACTION_INJECT_NMI: + qapi_event_send_watchdog(WATCHDOG_ACTION_INJECT_NMI); + nmi_monitor_handle(0, NULL); + break; + + default: + assert(0); + } +} + +void qmp_watchdog_set_action(WatchdogAction action, Error **errp) +{ + watchdog_action = action; +} diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c new file mode 100644 index 000000000..6aa6f90b6 --- /dev/null +++ b/hw/watchdog/wdt_aspeed.c @@ -0,0 +1,397 @@ +/* + * ASPEED Watchdog Controller + * + * Copyright (C) 2016-2017 IBM Corp. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "sysemu/watchdog.h" +#include "hw/misc/aspeed_scu.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "hw/watchdog/wdt_aspeed.h" +#include "migration/vmstate.h" +#include "trace.h" + +#define WDT_STATUS (0x00 / 4) +#define WDT_RELOAD_VALUE (0x04 / 4) +#define WDT_RESTART (0x08 / 4) +#define WDT_CTRL (0x0C / 4) +#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5) +#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5) +#define WDT_CTRL_1MHZ_CLK BIT(4) +#define WDT_CTRL_WDT_EXT BIT(3) +#define WDT_CTRL_WDT_INTR BIT(2) +#define WDT_CTRL_RESET_SYSTEM BIT(1) +#define WDT_CTRL_ENABLE BIT(0) +#define WDT_RESET_WIDTH (0x18 / 4) +#define WDT_RESET_WIDTH_ACTIVE_HIGH BIT(31) +#define WDT_POLARITY_MASK (0xFF << 24) +#define WDT_ACTIVE_HIGH_MAGIC (0xA5 << 24) +#define WDT_ACTIVE_LOW_MAGIC (0x5A << 24) +#define WDT_RESET_WIDTH_PUSH_PULL BIT(30) +#define WDT_DRIVE_TYPE_MASK (0xFF << 24) +#define WDT_PUSH_PULL_MAGIC (0xA8 << 24) +#define WDT_OPEN_DRAIN_MAGIC (0x8A << 24) +#define WDT_RESET_MASK1 (0x1c / 4) + +#define WDT_TIMEOUT_STATUS (0x10 / 4) +#define WDT_TIMEOUT_CLEAR (0x14 / 4) + +#define WDT_RESTART_MAGIC 0x4755 + +#define AST2600_SCU_RESET_CONTROL1 (0x40 / 4) +#define SCU_RESET_CONTROL1 (0x04 / 4) +#define SCU_RESET_SDRAM BIT(0) + +static bool aspeed_wdt_is_enabled(const AspeedWDTState *s) +{ + return s->regs[WDT_CTRL] & WDT_CTRL_ENABLE; +} + +static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size) +{ + AspeedWDTState *s = ASPEED_WDT(opaque); + + trace_aspeed_wdt_read(offset, size); + + offset >>= 2; + + switch (offset) { + case WDT_STATUS: + return s->regs[WDT_STATUS]; + case WDT_RELOAD_VALUE: + return s->regs[WDT_RELOAD_VALUE]; + case WDT_RESTART: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from write-only reg at offset 0x%" + HWADDR_PRIx "\n", __func__, offset); + return 0; + case WDT_CTRL: + return s->regs[WDT_CTRL]; + case WDT_RESET_WIDTH: + return s->regs[WDT_RESET_WIDTH]; + case WDT_RESET_MASK1: + return s->regs[WDT_RESET_MASK1]; + case WDT_TIMEOUT_STATUS: + case WDT_TIMEOUT_CLEAR: + qemu_log_mask(LOG_UNIMP, + "%s: uninmplemented read at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return 0; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return 0; + } + +} + +static void aspeed_wdt_reload(AspeedWDTState *s) +{ + uint64_t reload; + + if (!(s->regs[WDT_CTRL] & WDT_CTRL_1MHZ_CLK)) { + reload = muldiv64(s->regs[WDT_RELOAD_VALUE], NANOSECONDS_PER_SECOND, + s->pclk_freq); + } else { + reload = s->regs[WDT_RELOAD_VALUE] * 1000ULL; + } + + if (aspeed_wdt_is_enabled(s)) { + timer_mod(s->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + reload); + } +} + +static void aspeed_wdt_reload_1mhz(AspeedWDTState *s) +{ + uint64_t reload = s->regs[WDT_RELOAD_VALUE] * 1000ULL; + + if (aspeed_wdt_is_enabled(s)) { + timer_mod(s->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + reload); + } +} + +static uint64_t aspeed_2400_sanitize_ctrl(uint64_t data) +{ + return data & 0xffff; +} + +static uint64_t aspeed_2500_sanitize_ctrl(uint64_t data) +{ + return (data & ~(0xfUL << 8)) | WDT_CTRL_1MHZ_CLK; +} + +static uint64_t aspeed_2600_sanitize_ctrl(uint64_t data) +{ + return data & ~(0x7UL << 7); +} + +static void aspeed_wdt_write(void *opaque, hwaddr offset, uint64_t data, + unsigned size) +{ + AspeedWDTState *s = ASPEED_WDT(opaque); + AspeedWDTClass *awc = ASPEED_WDT_GET_CLASS(s); + bool enable; + + trace_aspeed_wdt_write(offset, size, data); + + offset >>= 2; + + switch (offset) { + case WDT_STATUS: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to read-only reg at offset 0x%" + HWADDR_PRIx "\n", __func__, offset); + break; + case WDT_RELOAD_VALUE: + s->regs[WDT_RELOAD_VALUE] = data; + break; + case WDT_RESTART: + if ((data & 0xFFFF) == WDT_RESTART_MAGIC) { + s->regs[WDT_STATUS] = s->regs[WDT_RELOAD_VALUE]; + awc->wdt_reload(s); + } + break; + case WDT_CTRL: + data = awc->sanitize_ctrl(data); + enable = data & WDT_CTRL_ENABLE; + if (enable && !aspeed_wdt_is_enabled(s)) { + s->regs[WDT_CTRL] = data; + awc->wdt_reload(s); + } else if (!enable && aspeed_wdt_is_enabled(s)) { + s->regs[WDT_CTRL] = data; + timer_del(s->timer); + } else { + s->regs[WDT_CTRL] = data; + } + break; + case WDT_RESET_WIDTH: + if (awc->reset_pulse) { + awc->reset_pulse(s, data & WDT_POLARITY_MASK); + } + s->regs[WDT_RESET_WIDTH] &= ~awc->ext_pulse_width_mask; + s->regs[WDT_RESET_WIDTH] |= data & awc->ext_pulse_width_mask; + break; + + case WDT_RESET_MASK1: + /* TODO: implement */ + s->regs[WDT_RESET_MASK1] = data; + break; + + case WDT_TIMEOUT_STATUS: + case WDT_TIMEOUT_CLEAR: + qemu_log_mask(LOG_UNIMP, + "%s: uninmplemented write at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + } + return; +} + +static WatchdogTimerModel model = { + .wdt_name = TYPE_ASPEED_WDT, + .wdt_description = "Aspeed watchdog device", +}; + +static const VMStateDescription vmstate_aspeed_wdt = { + .name = "vmstate_aspeed_wdt", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(timer, AspeedWDTState), + VMSTATE_UINT32_ARRAY(regs, AspeedWDTState, ASPEED_WDT_REGS_MAX), + VMSTATE_END_OF_LIST() + } +}; + +static const MemoryRegionOps aspeed_wdt_ops = { + .read = aspeed_wdt_read, + .write = aspeed_wdt_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static void aspeed_wdt_reset(DeviceState *dev) +{ + AspeedWDTState *s = ASPEED_WDT(dev); + AspeedWDTClass *awc = ASPEED_WDT_GET_CLASS(s); + + s->regs[WDT_STATUS] = 0x3EF1480; + s->regs[WDT_RELOAD_VALUE] = 0x03EF1480; + s->regs[WDT_RESTART] = 0; + s->regs[WDT_CTRL] = awc->sanitize_ctrl(0); + s->regs[WDT_RESET_WIDTH] = 0xFF; + + timer_del(s->timer); +} + +static void aspeed_wdt_timer_expired(void *dev) +{ + AspeedWDTState *s = ASPEED_WDT(dev); + uint32_t reset_ctrl_reg = ASPEED_WDT_GET_CLASS(s)->reset_ctrl_reg; + + /* Do not reset on SDRAM controller reset */ + if (s->scu->regs[reset_ctrl_reg] & SCU_RESET_SDRAM) { + timer_del(s->timer); + s->regs[WDT_CTRL] = 0; + return; + } + + qemu_log_mask(CPU_LOG_RESET, "Watchdog timer %" HWADDR_PRIx " expired.\n", + s->iomem.addr); + watchdog_perform_action(); + timer_del(s->timer); +} + +#define PCLK_HZ 24000000 + +static void aspeed_wdt_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedWDTState *s = ASPEED_WDT(dev); + + assert(s->scu); + + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, aspeed_wdt_timer_expired, dev); + + /* FIXME: This setting should be derived from the SCU hw strapping + * register SCU70 + */ + s->pclk_freq = PCLK_HZ; + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_wdt_ops, s, + TYPE_ASPEED_WDT, ASPEED_WDT_REGS_MAX * 4); + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property aspeed_wdt_properties[] = { + DEFINE_PROP_LINK("scu", AspeedWDTState, scu, TYPE_ASPEED_SCU, + AspeedSCUState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_wdt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "ASPEED Watchdog Controller"; + dc->realize = aspeed_wdt_realize; + dc->reset = aspeed_wdt_reset; + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); + dc->vmsd = &vmstate_aspeed_wdt; + device_class_set_props(dc, aspeed_wdt_properties); + dc->desc = "Aspeed watchdog device"; +} + +static const TypeInfo aspeed_wdt_info = { + .parent = TYPE_SYS_BUS_DEVICE, + .name = TYPE_ASPEED_WDT, + .instance_size = sizeof(AspeedWDTState), + .class_init = aspeed_wdt_class_init, + .class_size = sizeof(AspeedWDTClass), + .abstract = true, +}; + +static void aspeed_2400_wdt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass); + + dc->desc = "ASPEED 2400 Watchdog Controller"; + awc->offset = 0x20; + awc->ext_pulse_width_mask = 0xff; + awc->reset_ctrl_reg = SCU_RESET_CONTROL1; + awc->wdt_reload = aspeed_wdt_reload; + awc->sanitize_ctrl = aspeed_2400_sanitize_ctrl; +} + +static const TypeInfo aspeed_2400_wdt_info = { + .name = TYPE_ASPEED_2400_WDT, + .parent = TYPE_ASPEED_WDT, + .instance_size = sizeof(AspeedWDTState), + .class_init = aspeed_2400_wdt_class_init, +}; + +static void aspeed_2500_wdt_reset_pulse(AspeedWDTState *s, uint32_t property) +{ + if (property) { + if (property == WDT_ACTIVE_HIGH_MAGIC) { + s->regs[WDT_RESET_WIDTH] |= WDT_RESET_WIDTH_ACTIVE_HIGH; + } else if (property == WDT_ACTIVE_LOW_MAGIC) { + s->regs[WDT_RESET_WIDTH] &= ~WDT_RESET_WIDTH_ACTIVE_HIGH; + } else if (property == WDT_PUSH_PULL_MAGIC) { + s->regs[WDT_RESET_WIDTH] |= WDT_RESET_WIDTH_PUSH_PULL; + } else if (property == WDT_OPEN_DRAIN_MAGIC) { + s->regs[WDT_RESET_WIDTH] &= ~WDT_RESET_WIDTH_PUSH_PULL; + } + } +} + +static void aspeed_2500_wdt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass); + + dc->desc = "ASPEED 2500 Watchdog Controller"; + awc->offset = 0x20; + awc->ext_pulse_width_mask = 0xfffff; + awc->reset_ctrl_reg = SCU_RESET_CONTROL1; + awc->reset_pulse = aspeed_2500_wdt_reset_pulse; + awc->wdt_reload = aspeed_wdt_reload_1mhz; + awc->sanitize_ctrl = aspeed_2500_sanitize_ctrl; +} + +static const TypeInfo aspeed_2500_wdt_info = { + .name = TYPE_ASPEED_2500_WDT, + .parent = TYPE_ASPEED_WDT, + .instance_size = sizeof(AspeedWDTState), + .class_init = aspeed_2500_wdt_class_init, +}; + +static void aspeed_2600_wdt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedWDTClass *awc = ASPEED_WDT_CLASS(klass); + + dc->desc = "ASPEED 2600 Watchdog Controller"; + awc->offset = 0x40; + awc->ext_pulse_width_mask = 0xfffff; /* TODO */ + awc->reset_ctrl_reg = AST2600_SCU_RESET_CONTROL1; + awc->reset_pulse = aspeed_2500_wdt_reset_pulse; + awc->wdt_reload = aspeed_wdt_reload_1mhz; + awc->sanitize_ctrl = aspeed_2600_sanitize_ctrl; +} + +static const TypeInfo aspeed_2600_wdt_info = { + .name = TYPE_ASPEED_2600_WDT, + .parent = TYPE_ASPEED_WDT, + .instance_size = sizeof(AspeedWDTState), + .class_init = aspeed_2600_wdt_class_init, +}; + +static void wdt_aspeed_register_types(void) +{ + watchdog_add_model(&model); + type_register_static(&aspeed_wdt_info); + type_register_static(&aspeed_2400_wdt_info); + type_register_static(&aspeed_2500_wdt_info); + type_register_static(&aspeed_2600_wdt_info); +} + +type_init(wdt_aspeed_register_types) diff --git a/hw/watchdog/wdt_diag288.c b/hw/watchdog/wdt_diag288.c new file mode 100644 index 000000000..9e8882a11 --- /dev/null +++ b/hw/watchdog/wdt_diag288.c @@ -0,0 +1,145 @@ +/* + * watchdog device diag288 support + * + * Copyright IBM, Corp. 2015 + * + * Authors: + * Xu Wang + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "sysemu/reset.h" +#include "sysemu/watchdog.h" +#include "qemu/timer.h" +#include "hw/watchdog/wdt_diag288.h" +#include "migration/vmstate.h" +#include "qemu/log.h" + +static WatchdogTimerModel model = { + .wdt_name = TYPE_WDT_DIAG288, + .wdt_description = "diag288 device for s390x platform", +}; + +static const VMStateDescription vmstate_diag288 = { + .name = "vmstate_diag288", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(timer, DIAG288State), + VMSTATE_BOOL(enabled, DIAG288State), + VMSTATE_END_OF_LIST() + } +}; + +static void wdt_diag288_reset(DeviceState *dev) +{ + DIAG288State *diag288 = DIAG288(dev); + + diag288->enabled = false; + timer_del(diag288->timer); +} + +static void diag288_reset(void *opaque) +{ + DeviceState *diag288 = opaque; + + wdt_diag288_reset(diag288); +} + +static void diag288_timer_expired(void *dev) +{ + qemu_log_mask(CPU_LOG_RESET, "Watchdog timer expired.\n"); + /* Reset the watchdog only if the guest gets notified about + * expiry. watchdog_perform_action() may temporarily relinquish + * the BQL; reset before triggering the action to avoid races with + * diag288 instructions. */ + switch (get_watchdog_action()) { + case WATCHDOG_ACTION_DEBUG: + case WATCHDOG_ACTION_NONE: + case WATCHDOG_ACTION_PAUSE: + break; + default: + wdt_diag288_reset(dev); + } + watchdog_perform_action(); +} + +static int wdt_diag288_handle_timer(DIAG288State *diag288, + uint64_t func, uint64_t timeout) +{ + switch (func) { + case WDT_DIAG288_INIT: + diag288->enabled = true; + /* fall through */ + case WDT_DIAG288_CHANGE: + if (!diag288->enabled) { + return -1; + } + timer_mod(diag288->timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + timeout * NANOSECONDS_PER_SECOND); + break; + case WDT_DIAG288_CANCEL: + if (!diag288->enabled) { + return -1; + } + diag288->enabled = false; + timer_del(diag288->timer); + break; + default: + return -1; + } + + return 0; +} + +static void wdt_diag288_realize(DeviceState *dev, Error **errp) +{ + DIAG288State *diag288 = DIAG288(dev); + + qemu_register_reset(diag288_reset, diag288); + diag288->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, diag288_timer_expired, + dev); +} + +static void wdt_diag288_unrealize(DeviceState *dev) +{ + DIAG288State *diag288 = DIAG288(dev); + + timer_free(diag288->timer); +} + +static void wdt_diag288_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + DIAG288Class *diag288 = DIAG288_CLASS(klass); + + dc->realize = wdt_diag288_realize; + dc->unrealize = wdt_diag288_unrealize; + dc->reset = wdt_diag288_reset; + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); + dc->vmsd = &vmstate_diag288; + diag288->handle_timer = wdt_diag288_handle_timer; + dc->desc = "diag288 device for s390x platform"; +} + +static const TypeInfo wdt_diag288_info = { + .class_init = wdt_diag288_class_init, + .parent = TYPE_DEVICE, + .name = TYPE_WDT_DIAG288, + .instance_size = sizeof(DIAG288State), + .class_size = sizeof(DIAG288Class), +}; + +static void wdt_diag288_register_types(void) +{ + watchdog_add_model(&model); + type_register_static(&wdt_diag288_info); +} + +type_init(wdt_diag288_register_types) diff --git a/hw/watchdog/wdt_i6300esb.c b/hw/watchdog/wdt_i6300esb.c new file mode 100644 index 000000000..f99a1c9d2 --- /dev/null +++ b/hw/watchdog/wdt_i6300esb.c @@ -0,0 +1,500 @@ +/* + * Virtual hardware watchdog. + * + * Copyright (C) 2009 Red Hat Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * By Richard W.M. Jones (rjones@redhat.com). + */ + +#include "qemu/osdep.h" + +#include "qemu/module.h" +#include "qemu/timer.h" +#include "sysemu/watchdog.h" +#include "hw/pci/pci.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +/*#define I6300ESB_DEBUG 1*/ + +#ifdef I6300ESB_DEBUG +#define i6300esb_debug(fs,...) \ + fprintf(stderr,"i6300esb: %s: "fs,__func__,##__VA_ARGS__) +#else +#define i6300esb_debug(fs,...) +#endif + +/* PCI configuration registers */ +#define ESB_CONFIG_REG 0x60 /* Config register */ +#define ESB_LOCK_REG 0x68 /* WDT lock register */ + +/* Memory mapped registers (offset from base address) */ +#define ESB_TIMER1_REG 0x00 /* Timer1 value after each reset */ +#define ESB_TIMER2_REG 0x04 /* Timer2 value after each reset */ +#define ESB_GINTSR_REG 0x08 /* General Interrupt Status Register */ +#define ESB_RELOAD_REG 0x0c /* Reload register */ + +/* Lock register bits */ +#define ESB_WDT_FUNC (0x01 << 2) /* Watchdog functionality */ +#define ESB_WDT_ENABLE (0x01 << 1) /* Enable WDT */ +#define ESB_WDT_LOCK (0x01 << 0) /* Lock (nowayout) */ + +/* Config register bits */ +#define ESB_WDT_REBOOT (0x01 << 5) /* Enable reboot on timeout */ +#define ESB_WDT_FREQ (0x01 << 2) /* Decrement frequency */ +#define ESB_WDT_INTTYPE (0x11 << 0) /* Interrupt type on timer1 timeout */ + +/* Reload register bits */ +#define ESB_WDT_RELOAD (0x01 << 8) /* prevent timeout */ + +/* Magic constants */ +#define ESB_UNLOCK1 0x80 /* Step 1 to unlock reset registers */ +#define ESB_UNLOCK2 0x86 /* Step 2 to unlock reset registers */ + +/* Device state. */ +struct I6300State { + PCIDevice dev; + MemoryRegion io_mem; + + int reboot_enabled; /* "Reboot" on timer expiry. The real action + * performed depends on the -watchdog-action + * param passed on QEMU command line. + */ + int clock_scale; /* Clock scale. */ +#define CLOCK_SCALE_1KHZ 0 +#define CLOCK_SCALE_1MHZ 1 + + int int_type; /* Interrupt type generated. */ +#define INT_TYPE_IRQ 0 /* APIC 1, INT 10 */ +#define INT_TYPE_SMI 2 +#define INT_TYPE_DISABLED 3 + + int free_run; /* If true, reload timer on expiry. */ + int locked; /* If true, enabled field cannot be changed. */ + int enabled; /* If true, watchdog is enabled. */ + + QEMUTimer *timer; /* The actual watchdog timer. */ + + uint32_t timer1_preload; /* Values preloaded into timer1, timer2. */ + uint32_t timer2_preload; + int stage; /* Stage (1 or 2). */ + + int unlock_state; /* Guest writes 0x80, 0x86 to unlock the + * registers, and we transition through + * states 0 -> 1 -> 2 when this happens. + */ + + int previous_reboot_flag; /* If the watchdog caused the previous + * reboot, this flag will be set. + */ +}; + + +#define TYPE_WATCHDOG_I6300ESB_DEVICE "i6300esb" +OBJECT_DECLARE_SIMPLE_TYPE(I6300State, WATCHDOG_I6300ESB_DEVICE) + +/* This function is called when the watchdog has either been enabled + * (hence it starts counting down) or has been keep-alived. + */ +static void i6300esb_restart_timer(I6300State *d, int stage) +{ + int64_t timeout; + + if (!d->enabled) + return; + + d->stage = stage; + + if (d->stage <= 1) + timeout = d->timer1_preload; + else + timeout = d->timer2_preload; + + if (d->clock_scale == CLOCK_SCALE_1KHZ) + timeout <<= 15; + else + timeout <<= 5; + + /* Get the timeout in nanoseconds. */ + + timeout = timeout * 30; /* on a PCI bus, 1 tick is 30 ns*/ + + i6300esb_debug("stage %d, timeout %" PRIi64 "\n", d->stage, timeout); + + timer_mod(d->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout); +} + +/* This is called when the guest disables the watchdog. */ +static void i6300esb_disable_timer(I6300State *d) +{ + i6300esb_debug("timer disabled\n"); + + timer_del(d->timer); +} + +static void i6300esb_reset(DeviceState *dev) +{ + PCIDevice *pdev = PCI_DEVICE(dev); + I6300State *d = WATCHDOG_I6300ESB_DEVICE(pdev); + + i6300esb_debug("I6300State = %p\n", d); + + i6300esb_disable_timer(d); + + /* NB: Don't change d->previous_reboot_flag in this function. */ + + d->reboot_enabled = 1; + d->clock_scale = CLOCK_SCALE_1KHZ; + d->int_type = INT_TYPE_IRQ; + d->free_run = 0; + d->locked = 0; + d->enabled = 0; + d->timer1_preload = 0xfffff; + d->timer2_preload = 0xfffff; + d->stage = 1; + d->unlock_state = 0; +} + +/* This function is called when the watchdog expires. Note that + * the hardware has two timers, and so expiry happens in two stages. + * If d->stage == 1 then we perform the first stage action (usually, + * sending an interrupt) and then restart the timer again for the + * second stage. If the second stage expires then the watchdog + * really has run out. + */ +static void i6300esb_timer_expired(void *vp) +{ + I6300State *d = vp; + + i6300esb_debug("stage %d\n", d->stage); + + if (d->stage == 1) { + /* What to do at the end of stage 1? */ + switch (d->int_type) { + case INT_TYPE_IRQ: + fprintf(stderr, "i6300esb_timer_expired: I would send APIC 1 INT 10 here if I knew how (XXX)\n"); + break; + case INT_TYPE_SMI: + fprintf(stderr, "i6300esb_timer_expired: I would send SMI here if I knew how (XXX)\n"); + break; + } + + /* Start the second stage. */ + i6300esb_restart_timer(d, 2); + } else { + /* Second stage expired, reboot for real. */ + if (d->reboot_enabled) { + d->previous_reboot_flag = 1; + watchdog_perform_action(); /* This reboots, exits, etc */ + i6300esb_reset(DEVICE(d)); + } + + /* In "free running mode" we start stage 1 again. */ + if (d->free_run) + i6300esb_restart_timer(d, 1); + } +} + +static void i6300esb_config_write(PCIDevice *dev, uint32_t addr, + uint32_t data, int len) +{ + I6300State *d = WATCHDOG_I6300ESB_DEVICE(dev); + int old; + + i6300esb_debug("addr = %x, data = %x, len = %d\n", addr, data, len); + + if (addr == ESB_CONFIG_REG && len == 2) { + d->reboot_enabled = (data & ESB_WDT_REBOOT) == 0; + d->clock_scale = + (data & ESB_WDT_FREQ) != 0 ? CLOCK_SCALE_1MHZ : CLOCK_SCALE_1KHZ; + d->int_type = (data & ESB_WDT_INTTYPE); + } else if (addr == ESB_LOCK_REG && len == 1) { + if (!d->locked) { + d->locked = (data & ESB_WDT_LOCK) != 0; + d->free_run = (data & ESB_WDT_FUNC) != 0; + old = d->enabled; + d->enabled = (data & ESB_WDT_ENABLE) != 0; + if (!old && d->enabled) /* Enabled transitioned from 0 -> 1 */ + i6300esb_restart_timer(d, 1); + else if (!d->enabled) + i6300esb_disable_timer(d); + } + } else { + pci_default_write_config(dev, addr, data, len); + } +} + +static uint32_t i6300esb_config_read(PCIDevice *dev, uint32_t addr, int len) +{ + I6300State *d = WATCHDOG_I6300ESB_DEVICE(dev); + uint32_t data; + + i6300esb_debug ("addr = %x, len = %d\n", addr, len); + + if (addr == ESB_CONFIG_REG && len == 2) { + data = + (d->reboot_enabled ? 0 : ESB_WDT_REBOOT) | + (d->clock_scale == CLOCK_SCALE_1MHZ ? ESB_WDT_FREQ : 0) | + d->int_type; + return data; + } else if (addr == ESB_LOCK_REG && len == 1) { + data = + (d->free_run ? ESB_WDT_FUNC : 0) | + (d->locked ? ESB_WDT_LOCK : 0) | + (d->enabled ? ESB_WDT_ENABLE : 0); + return data; + } else { + return pci_default_read_config(dev, addr, len); + } +} + +static uint32_t i6300esb_mem_readb(void *vp, hwaddr addr) +{ + i6300esb_debug ("addr = %x\n", (int) addr); + + return 0; +} + +static uint32_t i6300esb_mem_readw(void *vp, hwaddr addr) +{ + uint32_t data = 0; + I6300State *d = vp; + + i6300esb_debug("addr = %x\n", (int) addr); + + if (addr == 0xc) { + /* The previous reboot flag is really bit 9, but there is + * a bug in the Linux driver where it thinks it's bit 12. + * Set both. + */ + data = d->previous_reboot_flag ? 0x1200 : 0; + } + + return data; +} + +static uint32_t i6300esb_mem_readl(void *vp, hwaddr addr) +{ + i6300esb_debug("addr = %x\n", (int) addr); + + return 0; +} + +static void i6300esb_mem_writeb(void *vp, hwaddr addr, uint32_t val) +{ + I6300State *d = vp; + + i6300esb_debug("addr = %x, val = %x\n", (int) addr, val); + + if (addr == 0xc && val == 0x80) + d->unlock_state = 1; + else if (addr == 0xc && val == 0x86 && d->unlock_state == 1) + d->unlock_state = 2; +} + +static void i6300esb_mem_writew(void *vp, hwaddr addr, uint32_t val) +{ + I6300State *d = vp; + + i6300esb_debug("addr = %x, val = %x\n", (int) addr, val); + + if (addr == 0xc && val == 0x80) + d->unlock_state = 1; + else if (addr == 0xc && val == 0x86 && d->unlock_state == 1) + d->unlock_state = 2; + else { + if (d->unlock_state == 2) { + if (addr == 0xc) { + if ((val & 0x100) != 0) + /* This is the "ping" from the userspace watchdog in + * the guest ... + */ + i6300esb_restart_timer(d, 1); + + /* Setting bit 9 resets the previous reboot flag. + * There's a bug in the Linux driver where it sets + * bit 12 instead. + */ + if ((val & 0x200) != 0 || (val & 0x1000) != 0) { + d->previous_reboot_flag = 0; + } + } + + d->unlock_state = 0; + } + } +} + +static void i6300esb_mem_writel(void *vp, hwaddr addr, uint32_t val) +{ + I6300State *d = vp; + + i6300esb_debug ("addr = %x, val = %x\n", (int) addr, val); + + if (addr == 0xc && val == 0x80) + d->unlock_state = 1; + else if (addr == 0xc && val == 0x86 && d->unlock_state == 1) + d->unlock_state = 2; + else { + if (d->unlock_state == 2) { + if (addr == 0) + d->timer1_preload = val & 0xfffff; + else if (addr == 4) + d->timer2_preload = val & 0xfffff; + + d->unlock_state = 0; + } + } +} + +static uint64_t i6300esb_mem_readfn(void *opaque, hwaddr addr, unsigned size) +{ + switch (size) { + case 1: + return i6300esb_mem_readb(opaque, addr); + case 2: + return i6300esb_mem_readw(opaque, addr); + case 4: + return i6300esb_mem_readl(opaque, addr); + default: + g_assert_not_reached(); + } +} + +static void i6300esb_mem_writefn(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + switch (size) { + case 1: + i6300esb_mem_writeb(opaque, addr, value); + break; + case 2: + i6300esb_mem_writew(opaque, addr, value); + break; + case 4: + i6300esb_mem_writel(opaque, addr, value); + break; + default: + g_assert_not_reached(); + } +} + +static const MemoryRegionOps i6300esb_ops = { + .read = i6300esb_mem_readfn, + .write = i6300esb_mem_writefn, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const VMStateDescription vmstate_i6300esb = { + .name = "i6300esb_wdt", + /* With this VMSD's introduction, version_id/minimum_version_id were + * erroneously set to sizeof(I6300State), causing a somewhat random + * version_id to be set for every build. This eventually broke + * migration. + * + * To correct this without breaking old->new migration for older + * versions of QEMU, we've set version_id to a value high enough + * to exceed all past values of sizeof(I6300State) across various + * build environments, and have reset minimum_version_id to 1, + * since this VMSD has never changed and thus can accept all past + * versions. + * + * For future changes we can treat these values as we normally would. + */ + .version_id = 10000, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(dev, I6300State), + VMSTATE_INT32(reboot_enabled, I6300State), + VMSTATE_INT32(clock_scale, I6300State), + VMSTATE_INT32(int_type, I6300State), + VMSTATE_INT32(free_run, I6300State), + VMSTATE_INT32(locked, I6300State), + VMSTATE_INT32(enabled, I6300State), + VMSTATE_TIMER_PTR(timer, I6300State), + VMSTATE_UINT32(timer1_preload, I6300State), + VMSTATE_UINT32(timer2_preload, I6300State), + VMSTATE_INT32(stage, I6300State), + VMSTATE_INT32(unlock_state, I6300State), + VMSTATE_INT32(previous_reboot_flag, I6300State), + VMSTATE_END_OF_LIST() + } +}; + +static void i6300esb_realize(PCIDevice *dev, Error **errp) +{ + I6300State *d = WATCHDOG_I6300ESB_DEVICE(dev); + + i6300esb_debug("I6300State = %p\n", d); + + d->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, i6300esb_timer_expired, d); + d->previous_reboot_flag = 0; + + memory_region_init_io(&d->io_mem, OBJECT(d), &i6300esb_ops, d, + "i6300esb", 0x10); + pci_register_bar(&d->dev, 0, 0, &d->io_mem); +} + +static void i6300esb_exit(PCIDevice *dev) +{ + I6300State *d = WATCHDOG_I6300ESB_DEVICE(dev); + + timer_free(d->timer); +} + +static WatchdogTimerModel model = { + .wdt_name = "i6300esb", + .wdt_description = "Intel 6300ESB", +}; + +static void i6300esb_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->config_read = i6300esb_config_read; + k->config_write = i6300esb_config_write; + k->realize = i6300esb_realize; + k->exit = i6300esb_exit; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_ESB_9; + k->class_id = PCI_CLASS_SYSTEM_OTHER; + dc->reset = i6300esb_reset; + dc->vmsd = &vmstate_i6300esb; + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); + dc->desc = "Intel 6300ESB"; +} + +static const TypeInfo i6300esb_info = { + .name = TYPE_WATCHDOG_I6300ESB_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(I6300State), + .class_init = i6300esb_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +static void i6300esb_register_types(void) +{ + watchdog_add_model(&model); + type_register_static(&i6300esb_info); +} + +type_init(i6300esb_register_types) diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c new file mode 100644 index 000000000..91d1bdc0d --- /dev/null +++ b/hw/watchdog/wdt_ib700.c @@ -0,0 +1,160 @@ +/* + * Virtual hardware watchdog. + * + * Copyright (C) 2009 Red Hat Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * By Richard W.M. Jones (rjones@redhat.com). + */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "qemu/timer.h" +#include "sysemu/watchdog.h" +#include "hw/isa/isa.h" +#include "migration/vmstate.h" +#include "qom/object.h" + +/*#define IB700_DEBUG 1*/ + +#ifdef IB700_DEBUG +#define ib700_debug(fs,...) \ + fprintf(stderr,"ib700: %s: "fs,__func__,##__VA_ARGS__) +#else +#define ib700_debug(fs,...) +#endif + +#define TYPE_IB700 "ib700" +typedef struct IB700state IB700State; +DECLARE_INSTANCE_CHECKER(IB700State, IB700, + TYPE_IB700) + +struct IB700state { + ISADevice parent_obj; + + QEMUTimer *timer; + + PortioList port_list; +}; + +/* This is the timer. We use a global here because the watchdog + * code ensures there is only one watchdog (it is located at a fixed, + * unchangeable IO port, so there could only ever be one anyway). + */ + +/* A write to this register enables the timer. */ +static void ib700_write_enable_reg(void *vp, uint32_t addr, uint32_t data) +{ + IB700State *s = vp; + static int time_map[] = { + 30, 28, 26, 24, 22, 20, 18, 16, + 14, 12, 10, 8, 6, 4, 2, 0 + }; + int64_t timeout; + + ib700_debug("addr = %x, data = %x\n", addr, data); + + timeout = (int64_t) time_map[data & 0xF] * NANOSECONDS_PER_SECOND; + timer_mod(s->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout); +} + +/* A write (of any value) to this register disables the timer. */ +static void ib700_write_disable_reg(void *vp, uint32_t addr, uint32_t data) +{ + IB700State *s = vp; + + ib700_debug("addr = %x, data = %x\n", addr, data); + + timer_del(s->timer); +} + +/* This is called when the watchdog expires. */ +static void ib700_timer_expired(void *vp) +{ + IB700State *s = vp; + + ib700_debug("watchdog expired\n"); + + watchdog_perform_action(); + timer_del(s->timer); +} + +static const VMStateDescription vmstate_ib700 = { + .name = "ib700_wdt", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(timer, IB700State), + VMSTATE_END_OF_LIST() + } +}; + +static const MemoryRegionPortio wdt_portio_list[] = { + { 0x441, 2, 1, .write = ib700_write_disable_reg, }, + { 0x443, 2, 1, .write = ib700_write_enable_reg, }, + PORTIO_END_OF_LIST(), +}; + +static void wdt_ib700_realize(DeviceState *dev, Error **errp) +{ + IB700State *s = IB700(dev); + + ib700_debug("watchdog init\n"); + + s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ib700_timer_expired, s); + + portio_list_init(&s->port_list, OBJECT(s), wdt_portio_list, s, "ib700"); + portio_list_add(&s->port_list, isa_address_space_io(&s->parent_obj), 0); +} + +static void wdt_ib700_reset(DeviceState *dev) +{ + IB700State *s = IB700(dev); + + ib700_debug("watchdog reset\n"); + + timer_del(s->timer); +} + +static WatchdogTimerModel model = { + .wdt_name = "ib700", + .wdt_description = "iBASE 700", +}; + +static void wdt_ib700_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = wdt_ib700_realize; + dc->reset = wdt_ib700_reset; + dc->vmsd = &vmstate_ib700; + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); + dc->desc = "iBASE 700"; +} + +static const TypeInfo wdt_ib700_info = { + .name = TYPE_IB700, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(IB700State), + .class_init = wdt_ib700_class_init, +}; + +static void wdt_ib700_register_types(void) +{ + watchdog_add_model(&model); + type_register_static(&wdt_ib700_info); +} + +type_init(wdt_ib700_register_types) diff --git a/hw/watchdog/wdt_imx2.c b/hw/watchdog/wdt_imx2.c new file mode 100644 index 000000000..c3128370b --- /dev/null +++ b/hw/watchdog/wdt_imx2.c @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2018, Impinj, Inc. + * + * i.MX2 Watchdog IP block + * + * Author: Andrey Smirnov + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/module.h" +#include "sysemu/watchdog.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" + +#include "hw/watchdog/wdt_imx2.h" + +static void imx2_wdt_interrupt(void *opaque) +{ + IMX2WdtState *s = IMX2_WDT(opaque); + + s->wicr |= IMX2_WDT_WICR_WTIS; + qemu_set_irq(s->irq, 1); +} + +static void imx2_wdt_expired(void *opaque) +{ + IMX2WdtState *s = IMX2_WDT(opaque); + + s->wrsr = IMX2_WDT_WRSR_TOUT; + + /* Perform watchdog action if watchdog is enabled */ + if (s->wcr & IMX2_WDT_WCR_WDE) { + s->wrsr = IMX2_WDT_WRSR_TOUT; + watchdog_perform_action(); + } +} + +static void imx2_wdt_reset(DeviceState *dev) +{ + IMX2WdtState *s = IMX2_WDT(dev); + + ptimer_transaction_begin(s->timer); + ptimer_stop(s->timer); + ptimer_transaction_commit(s->timer); + + if (s->pretimeout_support) { + ptimer_transaction_begin(s->itimer); + ptimer_stop(s->itimer); + ptimer_transaction_commit(s->itimer); + } + + s->wicr_locked = false; + s->wcr_locked = false; + s->wcr_wde_locked = false; + + s->wcr = IMX2_WDT_WCR_WDA | IMX2_WDT_WCR_SRS; + s->wsr = 0; + s->wrsr &= ~(IMX2_WDT_WRSR_TOUT | IMX2_WDT_WRSR_SFTW); + s->wicr = IMX2_WDT_WICR_WICT_DEF; + s->wmcr = IMX2_WDT_WMCR_PDE; +} + +static uint64_t imx2_wdt_read(void *opaque, hwaddr addr, unsigned int size) +{ + IMX2WdtState *s = IMX2_WDT(opaque); + + switch (addr) { + case IMX2_WDT_WCR: + return s->wcr; + case IMX2_WDT_WSR: + return s->wsr; + case IMX2_WDT_WRSR: + return s->wrsr; + case IMX2_WDT_WICR: + return s->wicr; + case IMX2_WDT_WMCR: + return s->wmcr; + } + return 0; +} + +static void imx_wdt2_update_itimer(IMX2WdtState *s, bool start) +{ + bool running = (s->wcr & IMX2_WDT_WCR_WDE) && (s->wcr & IMX2_WDT_WCR_WT); + bool enabled = s->wicr & IMX2_WDT_WICR_WIE; + + ptimer_transaction_begin(s->itimer); + if (start || !enabled) { + ptimer_stop(s->itimer); + } + if (running && enabled) { + int count = ptimer_get_count(s->timer); + int pretimeout = s->wicr & IMX2_WDT_WICR_WICT; + + /* + * Only (re-)start pretimeout timer if its counter value is larger + * than 0. Otherwise it will fire right away and we'll get an + * interrupt loop. + */ + if (count > pretimeout) { + ptimer_set_count(s->itimer, count - pretimeout); + if (start) { + ptimer_run(s->itimer, 1); + } + } + } + ptimer_transaction_commit(s->itimer); +} + +static void imx_wdt2_update_timer(IMX2WdtState *s, bool start) +{ + ptimer_transaction_begin(s->timer); + if (start) { + ptimer_stop(s->timer); + } + if ((s->wcr & IMX2_WDT_WCR_WDE) && (s->wcr & IMX2_WDT_WCR_WT)) { + int count = (s->wcr & IMX2_WDT_WCR_WT) >> 8; + + /* A value of 0 reflects one period (0.5s). */ + ptimer_set_count(s->timer, count + 1); + if (start) { + ptimer_run(s->timer, 1); + } + } + ptimer_transaction_commit(s->timer); + if (s->pretimeout_support) { + imx_wdt2_update_itimer(s, start); + } +} + +static void imx2_wdt_write(void *opaque, hwaddr addr, + uint64_t value, unsigned int size) +{ + IMX2WdtState *s = IMX2_WDT(opaque); + + switch (addr) { + case IMX2_WDT_WCR: + if (s->wcr_locked) { + value &= ~IMX2_WDT_WCR_LOCK_MASK; + value |= (s->wicr & IMX2_WDT_WCR_LOCK_MASK); + } + s->wcr_locked = true; + if (s->wcr_wde_locked) { + value &= ~IMX2_WDT_WCR_WDE; + value |= (s->wicr & ~IMX2_WDT_WCR_WDE); + } else if (value & IMX2_WDT_WCR_WDE) { + s->wcr_wde_locked = true; + } + if (s->wcr_wdt_locked) { + value &= ~IMX2_WDT_WCR_WDT; + value |= (s->wicr & ~IMX2_WDT_WCR_WDT); + } else if (value & IMX2_WDT_WCR_WDT) { + s->wcr_wdt_locked = true; + } + + s->wcr = value; + if (!(value & IMX2_WDT_WCR_SRS)) { + s->wrsr = IMX2_WDT_WRSR_SFTW; + } + if (!(value & (IMX2_WDT_WCR_WDA | IMX2_WDT_WCR_SRS)) || + (!(value & IMX2_WDT_WCR_WT) && (value & IMX2_WDT_WCR_WDE))) { + watchdog_perform_action(); + } + s->wcr |= IMX2_WDT_WCR_SRS; + imx_wdt2_update_timer(s, true); + break; + case IMX2_WDT_WSR: + if (s->wsr == IMX2_WDT_SEQ1 && value == IMX2_WDT_SEQ2) { + imx_wdt2_update_timer(s, false); + } + s->wsr = value; + break; + case IMX2_WDT_WRSR: + break; + case IMX2_WDT_WICR: + if (!s->pretimeout_support) { + return; + } + value &= IMX2_WDT_WICR_LOCK_MASK | IMX2_WDT_WICR_WTIS; + if (s->wicr_locked) { + value &= IMX2_WDT_WICR_WTIS; + value |= (s->wicr & IMX2_WDT_WICR_LOCK_MASK); + } + s->wicr = value | (s->wicr & IMX2_WDT_WICR_WTIS); + if (value & IMX2_WDT_WICR_WTIS) { + s->wicr &= ~IMX2_WDT_WICR_WTIS; + qemu_set_irq(s->irq, 0); + } + imx_wdt2_update_itimer(s, true); + s->wicr_locked = true; + break; + case IMX2_WDT_WMCR: + s->wmcr = value & IMX2_WDT_WMCR_PDE; + break; + } +} + +static const MemoryRegionOps imx2_wdt_ops = { + .read = imx2_wdt_read, + .write = imx2_wdt_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .impl = { + /* + * Our device would not work correctly if the guest was doing + * unaligned access. This might not be a limitation on the + * real device but in practice there is no reason for a guest + * to access this device unaligned. + */ + .min_access_size = 2, + .max_access_size = 2, + .unaligned = false, + }, +}; + +static const VMStateDescription vmstate_imx2_wdt = { + .name = "imx2.wdt", + .fields = (VMStateField[]) { + VMSTATE_PTIMER(timer, IMX2WdtState), + VMSTATE_PTIMER(itimer, IMX2WdtState), + VMSTATE_BOOL(wicr_locked, IMX2WdtState), + VMSTATE_BOOL(wcr_locked, IMX2WdtState), + VMSTATE_BOOL(wcr_wde_locked, IMX2WdtState), + VMSTATE_BOOL(wcr_wdt_locked, IMX2WdtState), + VMSTATE_UINT16(wcr, IMX2WdtState), + VMSTATE_UINT16(wsr, IMX2WdtState), + VMSTATE_UINT16(wrsr, IMX2WdtState), + VMSTATE_UINT16(wmcr, IMX2WdtState), + VMSTATE_UINT16(wicr, IMX2WdtState), + VMSTATE_END_OF_LIST() + } +}; + +static void imx2_wdt_realize(DeviceState *dev, Error **errp) +{ + IMX2WdtState *s = IMX2_WDT(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + memory_region_init_io(&s->mmio, OBJECT(dev), + &imx2_wdt_ops, s, + TYPE_IMX2_WDT, + IMX2_WDT_MMIO_SIZE); + sysbus_init_mmio(sbd, &s->mmio); + sysbus_init_irq(sbd, &s->irq); + + s->timer = ptimer_init(imx2_wdt_expired, s, + PTIMER_POLICY_NO_IMMEDIATE_TRIGGER | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD | + PTIMER_POLICY_NO_COUNTER_ROUND_DOWN); + ptimer_transaction_begin(s->timer); + ptimer_set_freq(s->timer, 2); + ptimer_set_limit(s->timer, 0xff, 1); + ptimer_transaction_commit(s->timer); + if (s->pretimeout_support) { + s->itimer = ptimer_init(imx2_wdt_interrupt, s, + PTIMER_POLICY_NO_IMMEDIATE_TRIGGER | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD | + PTIMER_POLICY_NO_COUNTER_ROUND_DOWN); + ptimer_transaction_begin(s->itimer); + ptimer_set_freq(s->itimer, 2); + ptimer_set_limit(s->itimer, 0xff, 1); + ptimer_transaction_commit(s->itimer); + } +} + +static Property imx2_wdt_properties[] = { + DEFINE_PROP_BOOL("pretimeout-support", IMX2WdtState, pretimeout_support, + false), + DEFINE_PROP_END_OF_LIST() +}; + +static void imx2_wdt_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, imx2_wdt_properties); + dc->realize = imx2_wdt_realize; + dc->reset = imx2_wdt_reset; + dc->vmsd = &vmstate_imx2_wdt; + dc->desc = "i.MX2 watchdog timer"; + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); +} + +static const TypeInfo imx2_wdt_info = { + .name = TYPE_IMX2_WDT, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IMX2WdtState), + .class_init = imx2_wdt_class_init, +}; + +static WatchdogTimerModel model = { + .wdt_name = "imx2-watchdog", + .wdt_description = "i.MX2 Watchdog", +}; + +static void imx2_wdt_register_type(void) +{ + watchdog_add_model(&model); + type_register_static(&imx2_wdt_info); +} +type_init(imx2_wdt_register_type) diff --git a/hw/xen/meson.build b/hw/xen/meson.build new file mode 100644 index 000000000..076954b89 --- /dev/null +++ b/hw/xen/meson.build @@ -0,0 +1,20 @@ +softmmu_ss.add(when: ['CONFIG_XEN', xen], if_true: files( + 'xen-backend.c', + 'xen-bus-helper.c', + 'xen-bus.c', + 'xen-legacy-backend.c', + 'xen_devconfig.c', + 'xen_pvdev.c', +)) + +xen_specific_ss = ss.source_set() +xen_specific_ss.add(when: 'CONFIG_XEN_PCI_PASSTHROUGH', if_true: files( + 'xen-host-pci-device.c', + 'xen_pt.c', + 'xen_pt_config_init.c', + 'xen_pt_graphics.c', + 'xen_pt_load_rom.c', + 'xen_pt_msi.c', +), if_false: files('xen_pt_stub.c')) + +specific_ss.add_all(when: ['CONFIG_XEN', xen], if_true: xen_specific_ss) diff --git a/hw/xen/trace-events b/hw/xen/trace-events new file mode 100644 index 000000000..3da3fd834 --- /dev/null +++ b/hw/xen/trace-events @@ -0,0 +1,43 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# ../../include/hw/xen/xen_common.h +xen_default_ioreq_server(void) "" +xen_ioreq_server_create(uint32_t id) "id: %u" +xen_ioreq_server_destroy(uint32_t id) "id: %u" +xen_ioreq_server_state(uint32_t id, bool enable) "id: %u: enable: %i" +xen_map_mmio_range(uint32_t id, uint64_t start_addr, uint64_t end_addr) "id: %u start: 0x%"PRIx64" end: 0x%"PRIx64 +xen_unmap_mmio_range(uint32_t id, uint64_t start_addr, uint64_t end_addr) "id: %u start: 0x%"PRIx64" end: 0x%"PRIx64 +xen_map_portio_range(uint32_t id, uint64_t start_addr, uint64_t end_addr) "id: %u start: 0x%"PRIx64" end: 0x%"PRIx64 +xen_unmap_portio_range(uint32_t id, uint64_t start_addr, uint64_t end_addr) "id: %u start: 0x%"PRIx64" end: 0x%"PRIx64 +xen_map_pcidev(uint32_t id, uint8_t bus, uint8_t dev, uint8_t func) "id: %u bdf: %02x.%02x.%02x" +xen_unmap_pcidev(uint32_t id, uint8_t bus, uint8_t dev, uint8_t func) "id: %u bdf: %02x.%02x.%02x" +xen_domid_restrict(int err) "err: %u" + +# xen-bus.c +xen_bus_realize(void) "" +xen_bus_unrealize(void) "" +xen_bus_enumerate(void) "" +xen_bus_cleanup(void) "" +xen_bus_type_enumerate(const char *type) "type: %s" +xen_bus_backend_create(const char *type, const char *path) "type: %s path: %s" +xen_bus_device_cleanup(const char *type, char *name) "type: %s name: %s" +xen_bus_add_watch(const char *node, const char *key) "node: %s key: %s" +xen_bus_remove_watch(const char *node, const char *key) "node: %s key: %s" +xen_device_realize(const char *type, char *name) "type: %s name: %s" +xen_device_unrealize(const char *type, char *name) "type: %s name: %s" +xen_device_backend_state(const char *type, char *name, const char *state) "type: %s name: %s -> %s" +xen_device_backend_online(const char *type, char *name, bool online) "type: %s name: %s -> %u" +xen_device_backend_changed(const char *type, char *name) "type: %s name: %s" +xen_device_frontend_state(const char *type, char *name, const char *state) "type: %s name: %s -> %s" +xen_device_frontend_changed(const char *type, char *name) "type: %s name: %s" +xen_device_unplug(const char *type, char *name) "type: %s name: %s" +xen_device_add_watch(const char *type, char *name, const char *node, const char *key) "type: %s name: %s node: %s key: %s" +xen_device_remove_watch(const char *type, char *name, const char *node, const char *key) "type: %s name: %s node: %s key: %s" + +# xen-bus-helper.c +xs_node_create(const char *node) "%s" +xs_node_destroy(const char *node) "%s" +xs_node_vprintf(char *path, char *value) "%s %s" +xs_node_vscanf(char *path, char *value) "%s %s" +xs_node_watch(char *path) "%s" +xs_node_unwatch(char *path) "%s" diff --git a/hw/xen/trace.h b/hw/xen/trace.h new file mode 100644 index 000000000..adba31a13 --- /dev/null +++ b/hw/xen/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_xen.h" diff --git a/hw/xen/xen-backend.c b/hw/xen/xen-backend.c new file mode 100644 index 000000000..5b0fb76ea --- /dev/null +++ b/hw/xen/xen-backend.c @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2018 Citrix Systems Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "hw/xen/xen-backend.h" +#include "hw/xen/xen-bus.h" + +typedef struct XenBackendImpl { + const char *type; + XenBackendDeviceCreate create; + XenBackendDeviceDestroy destroy; +} XenBackendImpl; + +struct XenBackendInstance { + QLIST_ENTRY(XenBackendInstance) entry; + const XenBackendImpl *impl; + XenBus *xenbus; + char *name; + XenDevice *xendev; +}; + +static GHashTable *xen_backend_table_get(void) +{ + static GHashTable *table; + + if (table == NULL) { + table = g_hash_table_new(g_str_hash, g_str_equal); + } + + return table; +} + +static void xen_backend_table_add(XenBackendImpl *impl) +{ + g_hash_table_insert(xen_backend_table_get(), (void *)impl->type, impl); +} + +static const char **xen_backend_table_keys(unsigned int *count) +{ + return (const char **)g_hash_table_get_keys_as_array( + xen_backend_table_get(), count); +} + +static const XenBackendImpl *xen_backend_table_lookup(const char *type) +{ + return g_hash_table_lookup(xen_backend_table_get(), type); +} + +void xen_backend_register(const XenBackendInfo *info) +{ + XenBackendImpl *impl = g_new0(XenBackendImpl, 1); + + g_assert(info->type); + + if (xen_backend_table_lookup(info->type)) { + error_report("attempt to register duplicate Xen backend type '%s'", + info->type); + abort(); + } + + if (!info->create) { + error_report("backend type '%s' has no creator", info->type); + abort(); + } + + impl->type = info->type; + impl->create = info->create; + impl->destroy = info->destroy; + + xen_backend_table_add(impl); +} + +const char **xen_backend_get_types(unsigned int *count) +{ + return xen_backend_table_keys(count); +} + +static QLIST_HEAD(, XenBackendInstance) backend_list; + +static void xen_backend_list_add(XenBackendInstance *backend) +{ + QLIST_INSERT_HEAD(&backend_list, backend, entry); +} + +static XenBackendInstance *xen_backend_list_find(XenDevice *xendev) +{ + XenBackendInstance *backend; + + QLIST_FOREACH(backend, &backend_list, entry) { + if (backend->xendev == xendev) { + return backend; + } + } + + return NULL; +} + +static void xen_backend_list_remove(XenBackendInstance *backend) +{ + QLIST_REMOVE(backend, entry); +} + +void xen_backend_device_create(XenBus *xenbus, const char *type, + const char *name, QDict *opts, Error **errp) +{ + ERRP_GUARD(); + const XenBackendImpl *impl = xen_backend_table_lookup(type); + XenBackendInstance *backend; + + if (!impl) { + return; + } + + backend = g_new0(XenBackendInstance, 1); + backend->xenbus = xenbus; + backend->name = g_strdup(name); + + impl->create(backend, opts, errp); + if (*errp) { + g_free(backend->name); + g_free(backend); + return; + } + + backend->impl = impl; + xen_backend_list_add(backend); +} + +XenBus *xen_backend_get_bus(XenBackendInstance *backend) +{ + return backend->xenbus; +} + +const char *xen_backend_get_name(XenBackendInstance *backend) +{ + return backend->name; +} + +void xen_backend_set_device(XenBackendInstance *backend, + XenDevice *xendev) +{ + g_assert(!backend->xendev); + backend->xendev = xendev; +} + +XenDevice *xen_backend_get_device(XenBackendInstance *backend) +{ + return backend->xendev; +} + + +bool xen_backend_try_device_destroy(XenDevice *xendev, Error **errp) +{ + XenBackendInstance *backend = xen_backend_list_find(xendev); + const XenBackendImpl *impl; + + if (!backend) { + return false; + } + + impl = backend->impl; + impl->destroy(backend, errp); + + xen_backend_list_remove(backend); + g_free(backend->name); + g_free(backend); + + return true; +} diff --git a/hw/xen/xen-bus-helper.c b/hw/xen/xen-bus-helper.c new file mode 100644 index 000000000..5a1e12b37 --- /dev/null +++ b/hw/xen/xen-bus-helper.c @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2018 Citrix Systems Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/xen/xen.h" +#include "hw/xen/xen-bus.h" +#include "hw/xen/xen-bus-helper.h" +#include "qapi/error.h" + +#include + +struct xs_state { + enum xenbus_state statenum; + const char *statestr; +}; +#define XS_STATE(state) { state, #state } + +static struct xs_state xs_state[] = { + XS_STATE(XenbusStateUnknown), + XS_STATE(XenbusStateInitialising), + XS_STATE(XenbusStateInitWait), + XS_STATE(XenbusStateInitialised), + XS_STATE(XenbusStateConnected), + XS_STATE(XenbusStateClosing), + XS_STATE(XenbusStateClosed), + XS_STATE(XenbusStateReconfiguring), + XS_STATE(XenbusStateReconfigured), +}; + +#undef XS_STATE + +const char *xs_strstate(enum xenbus_state state) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(xs_state); i++) { + if (xs_state[i].statenum == state) { + return xs_state[i].statestr; + } + } + + return "INVALID"; +} + +void xs_node_create(struct xs_handle *xsh, xs_transaction_t tid, + const char *node, struct xs_permissions perms[], + unsigned int nr_perms, Error **errp) +{ + trace_xs_node_create(node); + + if (!xs_write(xsh, tid, node, "", 0)) { + error_setg_errno(errp, errno, "failed to create node '%s'", node); + return; + } + + if (!xs_set_permissions(xsh, tid, node, perms, nr_perms)) { + error_setg_errno(errp, errno, "failed to set node '%s' permissions", + node); + } +} + +void xs_node_destroy(struct xs_handle *xsh, xs_transaction_t tid, + const char *node, Error **errp) +{ + trace_xs_node_destroy(node); + + if (!xs_rm(xsh, tid, node)) { + error_setg_errno(errp, errno, "failed to destroy node '%s'", node); + } +} + +void xs_node_vprintf(struct xs_handle *xsh, xs_transaction_t tid, + const char *node, const char *key, Error **errp, + const char *fmt, va_list ap) +{ + char *path, *value; + int len; + + path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) : + g_strdup(key); + len = g_vasprintf(&value, fmt, ap); + + trace_xs_node_vprintf(path, value); + + if (!xs_write(xsh, tid, path, value, len)) { + error_setg_errno(errp, errno, "failed to write '%s' to '%s'", + value, path); + } + + g_free(value); + g_free(path); +} + +void xs_node_printf(struct xs_handle *xsh, xs_transaction_t tid, + const char *node, const char *key, Error **errp, + const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + xs_node_vprintf(xsh, tid, node, key, errp, fmt, ap); + va_end(ap); +} + +int xs_node_vscanf(struct xs_handle *xsh, xs_transaction_t tid, + const char *node, const char *key, Error **errp, + const char *fmt, va_list ap) +{ + char *path, *value; + int rc; + + path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) : + g_strdup(key); + value = xs_read(xsh, tid, path, NULL); + + trace_xs_node_vscanf(path, value); + + if (value) { + rc = vsscanf(value, fmt, ap); + } else { + error_setg_errno(errp, errno, "failed to read from '%s'", + path); + rc = EOF; + } + + free(value); + g_free(path); + + return rc; +} + +int xs_node_scanf(struct xs_handle *xsh, xs_transaction_t tid, + const char *node, const char *key, Error **errp, + const char *fmt, ...) +{ + va_list ap; + int rc; + + va_start(ap, fmt); + rc = xs_node_vscanf(xsh, tid, node, key, errp, fmt, ap); + va_end(ap); + + return rc; +} + +void xs_node_watch(struct xs_handle *xsh, const char *node, const char *key, + char *token, Error **errp) +{ + char *path; + + path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) : + g_strdup(key); + + trace_xs_node_watch(path); + + if (!xs_watch(xsh, path, token)) { + error_setg_errno(errp, errno, "failed to watch node '%s'", path); + } + + g_free(path); +} + +void xs_node_unwatch(struct xs_handle *xsh, const char *node, + const char *key, const char *token, Error **errp) +{ + char *path; + + path = (strlen(node) != 0) ? g_strdup_printf("%s/%s", node, key) : + g_strdup(key); + + trace_xs_node_unwatch(path); + + if (!xs_unwatch(xsh, path, token)) { + error_setg_errno(errp, errno, "failed to unwatch node '%s'", path); + } + + g_free(path); +} diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c new file mode 100644 index 000000000..416583f13 --- /dev/null +++ b/hw/xen/xen-bus.c @@ -0,0 +1,1405 @@ +/* + * Copyright (c) 2018 Citrix Systems Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/module.h" +#include "qemu/uuid.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "hw/xen/xen.h" +#include "hw/xen/xen-backend.h" +#include "hw/xen/xen-bus.h" +#include "hw/xen/xen-bus-helper.h" +#include "monitor/monitor.h" +#include "qapi/error.h" +#include "qapi/qmp/qdict.h" +#include "sysemu/sysemu.h" +#include "trace.h" + +static char *xen_device_get_backend_path(XenDevice *xendev) +{ + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); + const char *type = object_get_typename(OBJECT(xendev)); + const char *backend = xendev_class->backend; + + if (!backend) { + backend = type; + } + + return g_strdup_printf("/local/domain/%u/backend/%s/%u/%s", + xenbus->backend_id, backend, xendev->frontend_id, + xendev->name); +} + +static char *xen_device_get_frontend_path(XenDevice *xendev) +{ + XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); + const char *type = object_get_typename(OBJECT(xendev)); + const char *device = xendev_class->device; + + if (!device) { + device = type; + } + + return g_strdup_printf("/local/domain/%u/device/%s/%s", + xendev->frontend_id, device, xendev->name); +} + +static void xen_device_unplug(XenDevice *xendev, Error **errp) +{ + ERRP_GUARD(); + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + const char *type = object_get_typename(OBJECT(xendev)); + xs_transaction_t tid; + + trace_xen_device_unplug(type, xendev->name); + + /* Mimic the way the Xen toolstack does an unplug */ +again: + tid = xs_transaction_start(xenbus->xsh); + if (tid == XBT_NULL) { + error_setg_errno(errp, errno, "failed xs_transaction_start"); + return; + } + + xs_node_printf(xenbus->xsh, tid, xendev->backend_path, "online", + errp, "%u", 0); + if (*errp) { + goto abort; + } + + xs_node_printf(xenbus->xsh, tid, xendev->backend_path, "state", + errp, "%u", XenbusStateClosing); + if (*errp) { + goto abort; + } + + if (!xs_transaction_end(xenbus->xsh, tid, false)) { + if (errno == EAGAIN) { + goto again; + } + + error_setg_errno(errp, errno, "failed xs_transaction_end"); + } + + return; + +abort: + /* + * We only abort if there is already a failure so ignore any error + * from ending the transaction. + */ + xs_transaction_end(xenbus->xsh, tid, true); +} + +static void xen_bus_print_dev(Monitor *mon, DeviceState *dev, int indent) +{ + XenDevice *xendev = XEN_DEVICE(dev); + + monitor_printf(mon, "%*sname = '%s' frontend_id = %u\n", + indent, "", xendev->name, xendev->frontend_id); +} + +static char *xen_bus_get_dev_path(DeviceState *dev) +{ + return xen_device_get_backend_path(XEN_DEVICE(dev)); +} + +struct XenWatch { + char *node, *key; + char *token; + XenWatchHandler handler; + void *opaque; + Notifier notifier; +}; + +static void watch_notify(Notifier *n, void *data) +{ + XenWatch *watch = container_of(n, XenWatch, notifier); + const char *token = data; + + if (!strcmp(watch->token, token)) { + watch->handler(watch->opaque); + } +} + +static XenWatch *new_watch(const char *node, const char *key, + XenWatchHandler handler, void *opaque) +{ + XenWatch *watch = g_new0(XenWatch, 1); + QemuUUID uuid; + + qemu_uuid_generate(&uuid); + + watch->token = qemu_uuid_unparse_strdup(&uuid); + watch->node = g_strdup(node); + watch->key = g_strdup(key); + watch->handler = handler; + watch->opaque = opaque; + watch->notifier.notify = watch_notify; + + return watch; +} + +static void free_watch(XenWatch *watch) +{ + g_free(watch->token); + g_free(watch->key); + g_free(watch->node); + + g_free(watch); +} + +struct XenWatchList { + struct xs_handle *xsh; + NotifierList notifiers; +}; + +static void watch_list_event(void *opaque) +{ + XenWatchList *watch_list = opaque; + char **v; + const char *token; + + v = xs_check_watch(watch_list->xsh); + if (!v) { + return; + } + + token = v[XS_WATCH_TOKEN]; + + notifier_list_notify(&watch_list->notifiers, (void *)token); + + free(v); +} + +static XenWatchList *watch_list_create(struct xs_handle *xsh) +{ + XenWatchList *watch_list = g_new0(XenWatchList, 1); + + g_assert(xsh); + + watch_list->xsh = xsh; + notifier_list_init(&watch_list->notifiers); + qemu_set_fd_handler(xs_fileno(watch_list->xsh), watch_list_event, NULL, + watch_list); + + return watch_list; +} + +static void watch_list_destroy(XenWatchList *watch_list) +{ + g_assert(notifier_list_empty(&watch_list->notifiers)); + qemu_set_fd_handler(xs_fileno(watch_list->xsh), NULL, NULL, NULL); + g_free(watch_list); +} + +static XenWatch *watch_list_add(XenWatchList *watch_list, const char *node, + const char *key, XenWatchHandler handler, + void *opaque, Error **errp) +{ + ERRP_GUARD(); + XenWatch *watch = new_watch(node, key, handler, opaque); + + notifier_list_add(&watch_list->notifiers, &watch->notifier); + + xs_node_watch(watch_list->xsh, node, key, watch->token, errp); + if (*errp) { + notifier_remove(&watch->notifier); + free_watch(watch); + + return NULL; + } + + return watch; +} + +static void watch_list_remove(XenWatchList *watch_list, XenWatch *watch, + Error **errp) +{ + xs_node_unwatch(watch_list->xsh, watch->node, watch->key, watch->token, + errp); + + notifier_remove(&watch->notifier); + free_watch(watch); +} + +static XenWatch *xen_bus_add_watch(XenBus *xenbus, const char *node, + const char *key, XenWatchHandler handler, + Error **errp) +{ + trace_xen_bus_add_watch(node, key); + + return watch_list_add(xenbus->watch_list, node, key, handler, xenbus, + errp); +} + +static void xen_bus_remove_watch(XenBus *xenbus, XenWatch *watch, + Error **errp) +{ + trace_xen_bus_remove_watch(watch->node, watch->key); + + watch_list_remove(xenbus->watch_list, watch, errp); +} + +static void xen_bus_backend_create(XenBus *xenbus, const char *type, + const char *name, char *path, + Error **errp) +{ + ERRP_GUARD(); + xs_transaction_t tid; + char **key; + QDict *opts; + unsigned int i, n; + + trace_xen_bus_backend_create(type, path); + +again: + tid = xs_transaction_start(xenbus->xsh); + if (tid == XBT_NULL) { + error_setg(errp, "failed xs_transaction_start"); + return; + } + + key = xs_directory(xenbus->xsh, tid, path, &n); + if (!key) { + if (!xs_transaction_end(xenbus->xsh, tid, true)) { + error_setg_errno(errp, errno, "failed xs_transaction_end"); + } + return; + } + + opts = qdict_new(); + for (i = 0; i < n; i++) { + char *val; + + /* + * Assume anything found in the xenstore backend area, other than + * the keys created for a generic XenDevice, are parameters + * to be used to configure the backend. + */ + if (!strcmp(key[i], "state") || + !strcmp(key[i], "online") || + !strcmp(key[i], "frontend") || + !strcmp(key[i], "frontend-id") || + !strcmp(key[i], "hotplug-status")) + continue; + + if (xs_node_scanf(xenbus->xsh, tid, path, key[i], NULL, "%ms", + &val) == 1) { + qdict_put_str(opts, key[i], val); + free(val); + } + } + + free(key); + + if (!xs_transaction_end(xenbus->xsh, tid, false)) { + qobject_unref(opts); + + if (errno == EAGAIN) { + goto again; + } + + error_setg_errno(errp, errno, "failed xs_transaction_end"); + return; + } + + xen_backend_device_create(xenbus, type, name, opts, errp); + qobject_unref(opts); + + if (*errp) { + error_prepend(errp, "failed to create '%s' device '%s': ", type, name); + } +} + +static void xen_bus_type_enumerate(XenBus *xenbus, const char *type) +{ + char *domain_path = g_strdup_printf("backend/%s/%u", type, xen_domid); + char **backend; + unsigned int i, n; + + trace_xen_bus_type_enumerate(type); + + backend = xs_directory(xenbus->xsh, XBT_NULL, domain_path, &n); + if (!backend) { + goto out; + } + + for (i = 0; i < n; i++) { + char *backend_path = g_strdup_printf("%s/%s", domain_path, + backend[i]); + enum xenbus_state state; + unsigned int online; + + if (xs_node_scanf(xenbus->xsh, XBT_NULL, backend_path, "state", + NULL, "%u", &state) != 1) + state = XenbusStateUnknown; + + if (xs_node_scanf(xenbus->xsh, XBT_NULL, backend_path, "online", + NULL, "%u", &online) != 1) + online = 0; + + if (online && state == XenbusStateInitialising) { + Error *local_err = NULL; + + xen_bus_backend_create(xenbus, type, backend[i], backend_path, + &local_err); + if (local_err) { + error_report_err(local_err); + } + } + + g_free(backend_path); + } + + free(backend); + +out: + g_free(domain_path); +} + +static void xen_bus_enumerate(XenBus *xenbus) +{ + char **type; + unsigned int i, n; + + trace_xen_bus_enumerate(); + + type = xs_directory(xenbus->xsh, XBT_NULL, "backend", &n); + if (!type) { + return; + } + + for (i = 0; i < n; i++) { + xen_bus_type_enumerate(xenbus, type[i]); + } + + free(type); +} + +static void xen_bus_device_cleanup(XenDevice *xendev) +{ + const char *type = object_get_typename(OBJECT(xendev)); + Error *local_err = NULL; + + trace_xen_bus_device_cleanup(type, xendev->name); + + g_assert(!xendev->backend_online); + + if (!xen_backend_try_device_destroy(xendev, &local_err)) { + object_unparent(OBJECT(xendev)); + } + + if (local_err) { + error_report_err(local_err); + } +} + +static void xen_bus_cleanup(XenBus *xenbus) +{ + XenDevice *xendev, *next; + + trace_xen_bus_cleanup(); + + QLIST_FOREACH_SAFE(xendev, &xenbus->inactive_devices, list, next) { + g_assert(xendev->inactive); + QLIST_REMOVE(xendev, list); + xen_bus_device_cleanup(xendev); + } +} + +static void xen_bus_backend_changed(void *opaque) +{ + XenBus *xenbus = opaque; + + xen_bus_enumerate(xenbus); + xen_bus_cleanup(xenbus); +} + +static void xen_bus_unrealize(BusState *bus) +{ + XenBus *xenbus = XEN_BUS(bus); + + trace_xen_bus_unrealize(); + + if (xenbus->backend_watch) { + unsigned int i; + + for (i = 0; i < xenbus->backend_types; i++) { + if (xenbus->backend_watch[i]) { + xen_bus_remove_watch(xenbus, xenbus->backend_watch[i], NULL); + } + } + + g_free(xenbus->backend_watch); + xenbus->backend_watch = NULL; + } + + if (xenbus->watch_list) { + watch_list_destroy(xenbus->watch_list); + xenbus->watch_list = NULL; + } + + if (xenbus->xsh) { + xs_close(xenbus->xsh); + } +} + +static void xen_bus_realize(BusState *bus, Error **errp) +{ + char *key = g_strdup_printf("%u", xen_domid); + XenBus *xenbus = XEN_BUS(bus); + unsigned int domid; + const char **type; + unsigned int i; + Error *local_err = NULL; + + trace_xen_bus_realize(); + + xenbus->xsh = xs_open(0); + if (!xenbus->xsh) { + error_setg_errno(errp, errno, "failed xs_open"); + goto fail; + } + + if (xs_node_scanf(xenbus->xsh, XBT_NULL, "", /* domain root node */ + "domid", NULL, "%u", &domid) == 1) { + xenbus->backend_id = domid; + } else { + xenbus->backend_id = 0; /* Assume lack of node means dom0 */ + } + + xenbus->watch_list = watch_list_create(xenbus->xsh); + + module_call_init(MODULE_INIT_XEN_BACKEND); + + type = xen_backend_get_types(&xenbus->backend_types); + xenbus->backend_watch = g_new(XenWatch *, xenbus->backend_types); + + for (i = 0; i < xenbus->backend_types; i++) { + char *node = g_strdup_printf("backend/%s", type[i]); + + xenbus->backend_watch[i] = + xen_bus_add_watch(xenbus, node, key, xen_bus_backend_changed, + &local_err); + if (local_err) { + /* This need not be treated as a hard error so don't propagate */ + error_reportf_err(local_err, + "failed to set up '%s' enumeration watch: ", + type[i]); + } + + g_free(node); + } + + g_free(type); + g_free(key); + return; + +fail: + xen_bus_unrealize(bus); + g_free(key); +} + +static void xen_bus_unplug_request(HotplugHandler *hotplug, + DeviceState *dev, + Error **errp) +{ + XenDevice *xendev = XEN_DEVICE(dev); + + xen_device_unplug(xendev, errp); +} + +static void xen_bus_class_init(ObjectClass *class, void *data) +{ + BusClass *bus_class = BUS_CLASS(class); + HotplugHandlerClass *hotplug_class = HOTPLUG_HANDLER_CLASS(class); + + bus_class->print_dev = xen_bus_print_dev; + bus_class->get_dev_path = xen_bus_get_dev_path; + bus_class->realize = xen_bus_realize; + bus_class->unrealize = xen_bus_unrealize; + + hotplug_class->unplug_request = xen_bus_unplug_request; +} + +static const TypeInfo xen_bus_type_info = { + .name = TYPE_XEN_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(XenBus), + .class_size = sizeof(XenBusClass), + .class_init = xen_bus_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + }, +}; + +void xen_device_backend_printf(XenDevice *xendev, const char *key, + const char *fmt, ...) +{ + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + Error *local_err = NULL; + va_list ap; + + g_assert(xenbus->xsh); + + va_start(ap, fmt); + xs_node_vprintf(xenbus->xsh, XBT_NULL, xendev->backend_path, key, + &local_err, fmt, ap); + va_end(ap); + + if (local_err) { + error_report_err(local_err); + } +} + +static int xen_device_backend_scanf(XenDevice *xendev, const char *key, + const char *fmt, ...) +{ + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + va_list ap; + int rc; + + g_assert(xenbus->xsh); + + va_start(ap, fmt); + rc = xs_node_vscanf(xenbus->xsh, XBT_NULL, xendev->backend_path, key, + NULL, fmt, ap); + va_end(ap); + + return rc; +} + +void xen_device_backend_set_state(XenDevice *xendev, + enum xenbus_state state) +{ + const char *type = object_get_typename(OBJECT(xendev)); + + if (xendev->backend_state == state) { + return; + } + + trace_xen_device_backend_state(type, xendev->name, + xs_strstate(state)); + + xendev->backend_state = state; + xen_device_backend_printf(xendev, "state", "%u", state); +} + +enum xenbus_state xen_device_backend_get_state(XenDevice *xendev) +{ + return xendev->backend_state; +} + +static void xen_device_backend_set_online(XenDevice *xendev, bool online) +{ + const char *type = object_get_typename(OBJECT(xendev)); + + if (xendev->backend_online == online) { + return; + } + + trace_xen_device_backend_online(type, xendev->name, online); + + xendev->backend_online = online; + xen_device_backend_printf(xendev, "online", "%u", online); +} + +/* + * Tell from the state whether the frontend is likely alive, + * i.e. it will react to a change of state of the backend. + */ +static bool xen_device_frontend_is_active(XenDevice *xendev) +{ + switch (xendev->frontend_state) { + case XenbusStateInitWait: + case XenbusStateInitialised: + case XenbusStateConnected: + case XenbusStateClosing: + return true; + default: + return false; + } +} + +static void xen_device_backend_changed(void *opaque) +{ + XenDevice *xendev = opaque; + const char *type = object_get_typename(OBJECT(xendev)); + enum xenbus_state state; + unsigned int online; + + trace_xen_device_backend_changed(type, xendev->name); + + if (xen_device_backend_scanf(xendev, "state", "%u", &state) != 1) { + state = XenbusStateUnknown; + } + + xen_device_backend_set_state(xendev, state); + + if (xen_device_backend_scanf(xendev, "online", "%u", &online) != 1) { + online = 0; + } + + xen_device_backend_set_online(xendev, !!online); + + /* + * If the toolstack (or unplug request callback) has set the backend + * state to Closing, but there is no active frontend then set the + * backend state to Closed. + */ + if (state == XenbusStateClosing && + !xen_device_frontend_is_active(xendev)) { + xen_device_backend_set_state(xendev, XenbusStateClosed); + } + + /* + * If a backend is still 'online' then we should leave it alone but, + * if a backend is not 'online', then the device is a candidate + * for destruction. Hence add it to the 'inactive' list to be cleaned + * by xen_bus_cleanup(). + */ + if (!online && + (state == XenbusStateClosed || state == XenbusStateInitialising || + state == XenbusStateInitWait || state == XenbusStateUnknown) && + !xendev->inactive) { + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + + xendev->inactive = true; + QLIST_INSERT_HEAD(&xenbus->inactive_devices, xendev, list); + + /* + * Re-write the state to cause a XenBus backend_watch notification, + * resulting in a call to xen_bus_cleanup(). + */ + xen_device_backend_printf(xendev, "state", "%u", state); + } +} + +static XenWatch *xen_device_add_watch(XenDevice *xendev, const char *node, + const char *key, + XenWatchHandler handler, + Error **errp) +{ + const char *type = object_get_typename(OBJECT(xendev)); + + trace_xen_device_add_watch(type, xendev->name, node, key); + + return watch_list_add(xendev->watch_list, node, key, handler, xendev, + errp); +} + +static void xen_device_remove_watch(XenDevice *xendev, XenWatch *watch, + Error **errp) +{ + const char *type = object_get_typename(OBJECT(xendev)); + + trace_xen_device_remove_watch(type, xendev->name, watch->node, + watch->key); + + watch_list_remove(xendev->watch_list, watch, errp); +} + + +static void xen_device_backend_create(XenDevice *xendev, Error **errp) +{ + ERRP_GUARD(); + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + struct xs_permissions perms[2]; + + xendev->backend_path = xen_device_get_backend_path(xendev); + + perms[0].id = xenbus->backend_id; + perms[0].perms = XS_PERM_NONE; + perms[1].id = xendev->frontend_id; + perms[1].perms = XS_PERM_READ; + + g_assert(xenbus->xsh); + + xs_node_create(xenbus->xsh, XBT_NULL, xendev->backend_path, perms, + ARRAY_SIZE(perms), errp); + if (*errp) { + error_prepend(errp, "failed to create backend: "); + return; + } + + xendev->backend_state_watch = + xen_device_add_watch(xendev, xendev->backend_path, + "state", xen_device_backend_changed, + errp); + if (*errp) { + error_prepend(errp, "failed to watch backend state: "); + return; + } + + xendev->backend_online_watch = + xen_device_add_watch(xendev, xendev->backend_path, + "online", xen_device_backend_changed, + errp); + if (*errp) { + error_prepend(errp, "failed to watch backend online: "); + return; + } +} + +static void xen_device_backend_destroy(XenDevice *xendev) +{ + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + Error *local_err = NULL; + + if (xendev->backend_online_watch) { + xen_device_remove_watch(xendev, xendev->backend_online_watch, NULL); + xendev->backend_online_watch = NULL; + } + + if (xendev->backend_state_watch) { + xen_device_remove_watch(xendev, xendev->backend_state_watch, NULL); + xendev->backend_state_watch = NULL; + } + + if (!xendev->backend_path) { + return; + } + + g_assert(xenbus->xsh); + + xs_node_destroy(xenbus->xsh, XBT_NULL, xendev->backend_path, + &local_err); + g_free(xendev->backend_path); + xendev->backend_path = NULL; + + if (local_err) { + error_report_err(local_err); + } +} + +void xen_device_frontend_printf(XenDevice *xendev, const char *key, + const char *fmt, ...) +{ + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + Error *local_err = NULL; + va_list ap; + + g_assert(xenbus->xsh); + + va_start(ap, fmt); + xs_node_vprintf(xenbus->xsh, XBT_NULL, xendev->frontend_path, key, + &local_err, fmt, ap); + va_end(ap); + + if (local_err) { + error_report_err(local_err); + } +} + +int xen_device_frontend_scanf(XenDevice *xendev, const char *key, + const char *fmt, ...) +{ + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + va_list ap; + int rc; + + g_assert(xenbus->xsh); + + va_start(ap, fmt); + rc = xs_node_vscanf(xenbus->xsh, XBT_NULL, xendev->frontend_path, key, + NULL, fmt, ap); + va_end(ap); + + return rc; +} + +static void xen_device_frontend_set_state(XenDevice *xendev, + enum xenbus_state state, + bool publish) +{ + const char *type = object_get_typename(OBJECT(xendev)); + + if (xendev->frontend_state == state) { + return; + } + + trace_xen_device_frontend_state(type, xendev->name, + xs_strstate(state)); + + xendev->frontend_state = state; + if (publish) { + xen_device_frontend_printf(xendev, "state", "%u", state); + } +} + +static void xen_device_frontend_changed(void *opaque) +{ + XenDevice *xendev = opaque; + XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); + const char *type = object_get_typename(OBJECT(xendev)); + enum xenbus_state state; + + trace_xen_device_frontend_changed(type, xendev->name); + + if (xen_device_frontend_scanf(xendev, "state", "%u", &state) != 1) { + state = XenbusStateUnknown; + } + + xen_device_frontend_set_state(xendev, state, false); + + if (state == XenbusStateInitialising && + xendev->backend_state == XenbusStateClosed && + xendev->backend_online) { + /* + * The frontend is re-initializing so switch back to + * InitWait. + */ + xen_device_backend_set_state(xendev, XenbusStateInitWait); + return; + } + + if (xendev_class->frontend_changed) { + Error *local_err = NULL; + + xendev_class->frontend_changed(xendev, state, &local_err); + + if (local_err) { + error_reportf_err(local_err, "frontend change error: "); + } + } +} + +static bool xen_device_frontend_exists(XenDevice *xendev) +{ + enum xenbus_state state; + + return (xen_device_frontend_scanf(xendev, "state", "%u", &state) == 1); +} + +static void xen_device_frontend_create(XenDevice *xendev, Error **errp) +{ + ERRP_GUARD(); + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + struct xs_permissions perms[2]; + + xendev->frontend_path = xen_device_get_frontend_path(xendev); + + /* + * The frontend area may have already been created by a legacy + * toolstack. + */ + if (!xen_device_frontend_exists(xendev)) { + perms[0].id = xendev->frontend_id; + perms[0].perms = XS_PERM_NONE; + perms[1].id = xenbus->backend_id; + perms[1].perms = XS_PERM_READ | XS_PERM_WRITE; + + g_assert(xenbus->xsh); + + xs_node_create(xenbus->xsh, XBT_NULL, xendev->frontend_path, perms, + ARRAY_SIZE(perms), errp); + if (*errp) { + error_prepend(errp, "failed to create frontend: "); + return; + } + } + + xendev->frontend_state_watch = + xen_device_add_watch(xendev, xendev->frontend_path, "state", + xen_device_frontend_changed, errp); + if (*errp) { + error_prepend(errp, "failed to watch frontend state: "); + } +} + +static void xen_device_frontend_destroy(XenDevice *xendev) +{ + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + Error *local_err = NULL; + + if (xendev->frontend_state_watch) { + xen_device_remove_watch(xendev, xendev->frontend_state_watch, + NULL); + xendev->frontend_state_watch = NULL; + } + + if (!xendev->frontend_path) { + return; + } + + g_assert(xenbus->xsh); + + xs_node_destroy(xenbus->xsh, XBT_NULL, xendev->frontend_path, + &local_err); + g_free(xendev->frontend_path); + xendev->frontend_path = NULL; + + if (local_err) { + error_report_err(local_err); + } +} + +void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs, + Error **errp) +{ + if (xengnttab_set_max_grants(xendev->xgth, nr_refs)) { + error_setg_errno(errp, errno, "xengnttab_set_max_grants failed"); + } +} + +void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs, + unsigned int nr_refs, int prot, + Error **errp) +{ + void *map = xengnttab_map_domain_grant_refs(xendev->xgth, nr_refs, + xendev->frontend_id, refs, + prot); + + if (!map) { + error_setg_errno(errp, errno, + "xengnttab_map_domain_grant_refs failed"); + } + + return map; +} + +void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, + unsigned int nr_refs, Error **errp) +{ + if (xengnttab_unmap(xendev->xgth, map, nr_refs)) { + error_setg_errno(errp, errno, "xengnttab_unmap failed"); + } +} + +static void compat_copy_grant_refs(XenDevice *xendev, bool to_domain, + XenDeviceGrantCopySegment segs[], + unsigned int nr_segs, Error **errp) +{ + uint32_t *refs = g_new(uint32_t, nr_segs); + int prot = to_domain ? PROT_WRITE : PROT_READ; + void *map; + unsigned int i; + + for (i = 0; i < nr_segs; i++) { + XenDeviceGrantCopySegment *seg = &segs[i]; + + refs[i] = to_domain ? seg->dest.foreign.ref : + seg->source.foreign.ref; + } + + map = xengnttab_map_domain_grant_refs(xendev->xgth, nr_segs, + xendev->frontend_id, refs, + prot); + if (!map) { + error_setg_errno(errp, errno, + "xengnttab_map_domain_grant_refs failed"); + goto done; + } + + for (i = 0; i < nr_segs; i++) { + XenDeviceGrantCopySegment *seg = &segs[i]; + void *page = map + (i * XC_PAGE_SIZE); + + if (to_domain) { + memcpy(page + seg->dest.foreign.offset, seg->source.virt, + seg->len); + } else { + memcpy(seg->dest.virt, page + seg->source.foreign.offset, + seg->len); + } + } + + if (xengnttab_unmap(xendev->xgth, map, nr_segs)) { + error_setg_errno(errp, errno, "xengnttab_unmap failed"); + } + +done: + g_free(refs); +} + +void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain, + XenDeviceGrantCopySegment segs[], + unsigned int nr_segs, Error **errp) +{ + xengnttab_grant_copy_segment_t *xengnttab_segs; + unsigned int i; + + if (!xendev->feature_grant_copy) { + compat_copy_grant_refs(xendev, to_domain, segs, nr_segs, errp); + return; + } + + xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs); + + for (i = 0; i < nr_segs; i++) { + XenDeviceGrantCopySegment *seg = &segs[i]; + xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i]; + + if (to_domain) { + xengnttab_seg->flags = GNTCOPY_dest_gref; + xengnttab_seg->dest.foreign.domid = xendev->frontend_id; + xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref; + xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset; + xengnttab_seg->source.virt = seg->source.virt; + } else { + xengnttab_seg->flags = GNTCOPY_source_gref; + xengnttab_seg->source.foreign.domid = xendev->frontend_id; + xengnttab_seg->source.foreign.ref = seg->source.foreign.ref; + xengnttab_seg->source.foreign.offset = + seg->source.foreign.offset; + xengnttab_seg->dest.virt = seg->dest.virt; + } + + xengnttab_seg->len = seg->len; + } + + if (xengnttab_grant_copy(xendev->xgth, nr_segs, xengnttab_segs)) { + error_setg_errno(errp, errno, "xengnttab_grant_copy failed"); + goto done; + } + + for (i = 0; i < nr_segs; i++) { + xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i]; + + if (xengnttab_seg->status != GNTST_okay) { + error_setg(errp, "xengnttab_grant_copy seg[%u] failed", i); + break; + } + } + +done: + g_free(xengnttab_segs); +} + +struct XenEventChannel { + QLIST_ENTRY(XenEventChannel) list; + AioContext *ctx; + xenevtchn_handle *xeh; + evtchn_port_t local_port; + XenEventHandler handler; + void *opaque; +}; + +static bool xen_device_poll(void *opaque) +{ + XenEventChannel *channel = opaque; + + return channel->handler(channel->opaque); +} + +static void xen_device_event(void *opaque) +{ + XenEventChannel *channel = opaque; + unsigned long port = xenevtchn_pending(channel->xeh); + + if (port == channel->local_port) { + xen_device_poll(channel); + + xenevtchn_unmask(channel->xeh, port); + } +} + +void xen_device_set_event_channel_context(XenDevice *xendev, + XenEventChannel *channel, + AioContext *ctx, + Error **errp) +{ + if (!channel) { + error_setg(errp, "bad channel"); + return; + } + + if (channel->ctx) + aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, + NULL, NULL, NULL, NULL); + + channel->ctx = ctx; + aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, + xen_device_event, NULL, xen_device_poll, channel); +} + +XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev, + unsigned int port, + XenEventHandler handler, + void *opaque, Error **errp) +{ + XenEventChannel *channel = g_new0(XenEventChannel, 1); + xenevtchn_port_or_error_t local_port; + + channel->xeh = xenevtchn_open(NULL, 0); + if (!channel->xeh) { + error_setg_errno(errp, errno, "failed xenevtchn_open"); + goto fail; + } + + local_port = xenevtchn_bind_interdomain(channel->xeh, + xendev->frontend_id, + port); + if (local_port < 0) { + error_setg_errno(errp, errno, "xenevtchn_bind_interdomain failed"); + goto fail; + } + + channel->local_port = local_port; + channel->handler = handler; + channel->opaque = opaque; + + /* Only reason for failure is a NULL channel */ + xen_device_set_event_channel_context(xendev, channel, + qemu_get_aio_context(), + &error_abort); + + QLIST_INSERT_HEAD(&xendev->event_channels, channel, list); + + return channel; + +fail: + if (channel->xeh) { + xenevtchn_close(channel->xeh); + } + + g_free(channel); + + return NULL; +} + +void xen_device_notify_event_channel(XenDevice *xendev, + XenEventChannel *channel, + Error **errp) +{ + if (!channel) { + error_setg(errp, "bad channel"); + return; + } + + if (xenevtchn_notify(channel->xeh, channel->local_port) < 0) { + error_setg_errno(errp, errno, "xenevtchn_notify failed"); + } +} + +void xen_device_unbind_event_channel(XenDevice *xendev, + XenEventChannel *channel, + Error **errp) +{ + if (!channel) { + error_setg(errp, "bad channel"); + return; + } + + QLIST_REMOVE(channel, list); + + aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, + NULL, NULL, NULL, NULL); + + if (xenevtchn_unbind(channel->xeh, channel->local_port) < 0) { + error_setg_errno(errp, errno, "xenevtchn_unbind failed"); + } + + xenevtchn_close(channel->xeh); + g_free(channel); +} + +static void xen_device_unrealize(DeviceState *dev) +{ + XenDevice *xendev = XEN_DEVICE(dev); + XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); + const char *type = object_get_typename(OBJECT(xendev)); + XenEventChannel *channel, *next; + + if (!xendev->name) { + return; + } + + trace_xen_device_unrealize(type, xendev->name); + + if (xendev->exit.notify) { + qemu_remove_exit_notifier(&xendev->exit); + xendev->exit.notify = NULL; + } + + if (xendev_class->unrealize) { + xendev_class->unrealize(xendev); + } + + /* Make sure all event channels are cleaned up */ + QLIST_FOREACH_SAFE(channel, &xendev->event_channels, list, next) { + xen_device_unbind_event_channel(xendev, channel, NULL); + } + + xen_device_frontend_destroy(xendev); + xen_device_backend_destroy(xendev); + + if (xendev->xgth) { + xengnttab_close(xendev->xgth); + xendev->xgth = NULL; + } + + if (xendev->watch_list) { + watch_list_destroy(xendev->watch_list); + xendev->watch_list = NULL; + } + + if (xendev->xsh) { + xs_close(xendev->xsh); + xendev->xsh = NULL; + } + + g_free(xendev->name); + xendev->name = NULL; +} + +static void xen_device_exit(Notifier *n, void *data) +{ + XenDevice *xendev = container_of(n, XenDevice, exit); + + xen_device_unrealize(DEVICE(xendev)); +} + +static void xen_device_realize(DeviceState *dev, Error **errp) +{ + ERRP_GUARD(); + XenDevice *xendev = XEN_DEVICE(dev); + XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); + XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); + const char *type = object_get_typename(OBJECT(xendev)); + + if (xendev->frontend_id == DOMID_INVALID) { + xendev->frontend_id = xen_domid; + } + + if (xendev->frontend_id >= DOMID_FIRST_RESERVED) { + error_setg(errp, "invalid frontend-id"); + goto unrealize; + } + + if (!xendev_class->get_name) { + error_setg(errp, "get_name method not implemented"); + goto unrealize; + } + + xendev->name = xendev_class->get_name(xendev, errp); + if (*errp) { + error_prepend(errp, "failed to get device name: "); + goto unrealize; + } + + trace_xen_device_realize(type, xendev->name); + + xendev->xsh = xs_open(0); + if (!xendev->xsh) { + error_setg_errno(errp, errno, "failed xs_open"); + goto unrealize; + } + + xendev->watch_list = watch_list_create(xendev->xsh); + + xendev->xgth = xengnttab_open(NULL, 0); + if (!xendev->xgth) { + error_setg_errno(errp, errno, "failed xengnttab_open"); + goto unrealize; + } + + xendev->feature_grant_copy = + (xengnttab_grant_copy(xendev->xgth, 0, NULL) == 0); + + xen_device_backend_create(xendev, errp); + if (*errp) { + goto unrealize; + } + + xen_device_frontend_create(xendev, errp); + if (*errp) { + goto unrealize; + } + + if (xendev_class->realize) { + xendev_class->realize(xendev, errp); + if (*errp) { + goto unrealize; + } + } + + xen_device_backend_printf(xendev, "frontend", "%s", + xendev->frontend_path); + xen_device_backend_printf(xendev, "frontend-id", "%u", + xendev->frontend_id); + xen_device_backend_printf(xendev, "hotplug-status", "connected"); + + xen_device_backend_set_online(xendev, true); + xen_device_backend_set_state(xendev, XenbusStateInitWait); + + if (!xen_device_frontend_exists(xendev)) { + xen_device_frontend_printf(xendev, "backend", "%s", + xendev->backend_path); + xen_device_frontend_printf(xendev, "backend-id", "%u", + xenbus->backend_id); + + xen_device_frontend_set_state(xendev, XenbusStateInitialising, true); + } + + xendev->exit.notify = xen_device_exit; + qemu_add_exit_notifier(&xendev->exit); + return; + +unrealize: + xen_device_unrealize(dev); +} + +static Property xen_device_props[] = { + DEFINE_PROP_UINT16("frontend-id", XenDevice, frontend_id, + DOMID_INVALID), + DEFINE_PROP_END_OF_LIST() +}; + +static void xen_device_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dev_class = DEVICE_CLASS(class); + + dev_class->realize = xen_device_realize; + dev_class->unrealize = xen_device_unrealize; + device_class_set_props(dev_class, xen_device_props); + dev_class->bus_type = TYPE_XEN_BUS; +} + +static const TypeInfo xen_device_type_info = { + .name = TYPE_XEN_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XenDevice), + .abstract = true, + .class_size = sizeof(XenDeviceClass), + .class_init = xen_device_class_init, +}; + +typedef struct XenBridge { + SysBusDevice busdev; +} XenBridge; + +#define TYPE_XEN_BRIDGE "xen-bridge" + +static const TypeInfo xen_bridge_type_info = { + .name = TYPE_XEN_BRIDGE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XenBridge), +}; + +static void xen_register_types(void) +{ + type_register_static(&xen_bridge_type_info); + type_register_static(&xen_bus_type_info); + type_register_static(&xen_device_type_info); +} + +type_init(xen_register_types) + +void xen_bus_init(void) +{ + DeviceState *dev = qdev_new(TYPE_XEN_BRIDGE); + BusState *bus = qbus_new(TYPE_XEN_BUS, dev, NULL); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + qbus_set_bus_hotplug_handler(bus); +} diff --git a/hw/xen/xen-host-pci-device.c b/hw/xen/xen-host-pci-device.c new file mode 100644 index 000000000..8c6e9a171 --- /dev/null +++ b/hw/xen/xen-host-pci-device.c @@ -0,0 +1,402 @@ +/* + * Copyright (C) 2011 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "xen-host-pci-device.h" + +#define XEN_HOST_PCI_MAX_EXT_CAP \ + ((PCIE_CONFIG_SPACE_SIZE - PCI_CONFIG_SPACE_SIZE) / (PCI_CAP_SIZEOF + 4)) + +#ifdef XEN_HOST_PCI_DEVICE_DEBUG +# define XEN_HOST_PCI_LOG(f, a...) fprintf(stderr, "%s: " f, __func__, ##a) +#else +# define XEN_HOST_PCI_LOG(f, a...) (void)0 +#endif + +/* + * from linux/ioport.h + * IO resources have these defined flags. + */ +#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ + +#define IORESOURCE_TYPE_BITS 0x00000f00 /* Resource type */ +#define IORESOURCE_IO 0x00000100 +#define IORESOURCE_MEM 0x00000200 + +#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */ +#define IORESOURCE_MEM_64 0x00100000 + +static void xen_host_pci_sysfs_path(const XenHostPCIDevice *d, + const char *name, char *buf, ssize_t size) +{ + int rc; + + rc = snprintf(buf, size, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/%s", + d->domain, d->bus, d->dev, d->func, name); + assert(rc >= 0 && rc < size); +} + + +/* This size should be enough to read the first 7 lines of a resource file */ +#define XEN_HOST_PCI_RESOURCE_BUFFER_SIZE 400 +static void xen_host_pci_get_resource(XenHostPCIDevice *d, Error **errp) +{ + int i, rc, fd; + char path[PATH_MAX]; + char buf[XEN_HOST_PCI_RESOURCE_BUFFER_SIZE]; + unsigned long long start, end, flags, size; + char *endptr, *s; + uint8_t type; + + xen_host_pci_sysfs_path(d, "resource", path, sizeof(path)); + + fd = open(path, O_RDONLY); + if (fd == -1) { + error_setg_file_open(errp, errno, path); + return; + } + + do { + rc = read(fd, &buf, sizeof(buf) - 1); + if (rc < 0 && errno != EINTR) { + error_setg_errno(errp, errno, "read err"); + goto out; + } + } while (rc < 0); + buf[rc] = 0; + + s = buf; + for (i = 0; i < PCI_NUM_REGIONS; i++) { + type = 0; + + start = strtoll(s, &endptr, 16); + if (*endptr != ' ' || s == endptr) { + break; + } + s = endptr + 1; + end = strtoll(s, &endptr, 16); + if (*endptr != ' ' || s == endptr) { + break; + } + s = endptr + 1; + flags = strtoll(s, &endptr, 16); + if (*endptr != '\n' || s == endptr) { + break; + } + s = endptr + 1; + + if (start) { + size = end - start + 1; + } else { + size = 0; + } + + if (flags & IORESOURCE_IO) { + type |= XEN_HOST_PCI_REGION_TYPE_IO; + } + if (flags & IORESOURCE_MEM) { + type |= XEN_HOST_PCI_REGION_TYPE_MEM; + } + if (flags & IORESOURCE_PREFETCH) { + type |= XEN_HOST_PCI_REGION_TYPE_PREFETCH; + } + if (flags & IORESOURCE_MEM_64) { + type |= XEN_HOST_PCI_REGION_TYPE_MEM_64; + } + + if (i < PCI_ROM_SLOT) { + d->io_regions[i].base_addr = start; + d->io_regions[i].size = size; + d->io_regions[i].type = type; + d->io_regions[i].bus_flags = flags & IORESOURCE_BITS; + } else { + d->rom.base_addr = start; + d->rom.size = size; + d->rom.type = type; + d->rom.bus_flags = flags & IORESOURCE_BITS; + } + } + + if (i != PCI_NUM_REGIONS) { + error_setg(errp, "Invalid format or input too short: %s", buf); + } + +out: + close(fd); +} + +/* This size should be enough to read a long from a file */ +#define XEN_HOST_PCI_GET_VALUE_BUFFER_SIZE 22 +static void xen_host_pci_get_value(XenHostPCIDevice *d, const char *name, + unsigned int *pvalue, int base, Error **errp) +{ + char path[PATH_MAX]; + char buf[XEN_HOST_PCI_GET_VALUE_BUFFER_SIZE]; + int fd, rc; + unsigned long value; + const char *endptr; + + xen_host_pci_sysfs_path(d, name, path, sizeof(path)); + + fd = open(path, O_RDONLY); + if (fd == -1) { + error_setg_file_open(errp, errno, path); + return; + } + + do { + rc = read(fd, &buf, sizeof(buf) - 1); + if (rc < 0 && errno != EINTR) { + error_setg_errno(errp, errno, "read err"); + goto out; + } + } while (rc < 0); + + buf[rc] = 0; + rc = qemu_strtoul(buf, &endptr, base, &value); + if (!rc) { + assert(value <= UINT_MAX); + *pvalue = value; + } else { + error_setg_errno(errp, -rc, "failed to parse value '%s'", buf); + } + +out: + close(fd); +} + +static inline void xen_host_pci_get_hex_value(XenHostPCIDevice *d, + const char *name, + unsigned int *pvalue, + Error **errp) +{ + xen_host_pci_get_value(d, name, pvalue, 16, errp); +} + +static inline void xen_host_pci_get_dec_value(XenHostPCIDevice *d, + const char *name, + unsigned int *pvalue, + Error **errp) +{ + xen_host_pci_get_value(d, name, pvalue, 10, errp); +} + +static bool xen_host_pci_dev_is_virtfn(XenHostPCIDevice *d) +{ + char path[PATH_MAX]; + struct stat buf; + + xen_host_pci_sysfs_path(d, "physfn", path, sizeof(path)); + + return !stat(path, &buf); +} + +static void xen_host_pci_config_open(XenHostPCIDevice *d, Error **errp) +{ + char path[PATH_MAX]; + + xen_host_pci_sysfs_path(d, "config", path, sizeof(path)); + + d->config_fd = open(path, O_RDWR); + if (d->config_fd == -1) { + error_setg_file_open(errp, errno, path); + } +} + +static int xen_host_pci_config_read(XenHostPCIDevice *d, + int pos, void *buf, int len) +{ + int rc; + + do { + rc = pread(d->config_fd, buf, len, pos); + } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); + if (rc != len) { + return -errno; + } + return 0; +} + +static int xen_host_pci_config_write(XenHostPCIDevice *d, + int pos, const void *buf, int len) +{ + int rc; + + do { + rc = pwrite(d->config_fd, buf, len, pos); + } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); + if (rc != len) { + return -errno; + } + return 0; +} + + +int xen_host_pci_get_byte(XenHostPCIDevice *d, int pos, uint8_t *p) +{ + uint8_t buf; + int rc = xen_host_pci_config_read(d, pos, &buf, 1); + if (!rc) { + *p = buf; + } + return rc; +} + +int xen_host_pci_get_word(XenHostPCIDevice *d, int pos, uint16_t *p) +{ + uint16_t buf; + int rc = xen_host_pci_config_read(d, pos, &buf, 2); + if (!rc) { + *p = le16_to_cpu(buf); + } + return rc; +} + +int xen_host_pci_get_long(XenHostPCIDevice *d, int pos, uint32_t *p) +{ + uint32_t buf; + int rc = xen_host_pci_config_read(d, pos, &buf, 4); + if (!rc) { + *p = le32_to_cpu(buf); + } + return rc; +} + +int xen_host_pci_get_block(XenHostPCIDevice *d, int pos, uint8_t *buf, int len) +{ + return xen_host_pci_config_read(d, pos, buf, len); +} + +int xen_host_pci_set_byte(XenHostPCIDevice *d, int pos, uint8_t data) +{ + return xen_host_pci_config_write(d, pos, &data, 1); +} + +int xen_host_pci_set_word(XenHostPCIDevice *d, int pos, uint16_t data) +{ + data = cpu_to_le16(data); + return xen_host_pci_config_write(d, pos, &data, 2); +} + +int xen_host_pci_set_long(XenHostPCIDevice *d, int pos, uint32_t data) +{ + data = cpu_to_le32(data); + return xen_host_pci_config_write(d, pos, &data, 4); +} + +int xen_host_pci_set_block(XenHostPCIDevice *d, int pos, uint8_t *buf, int len) +{ + return xen_host_pci_config_write(d, pos, buf, len); +} + +int xen_host_pci_find_ext_cap_offset(XenHostPCIDevice *d, uint32_t cap) +{ + uint32_t header = 0; + int max_cap = XEN_HOST_PCI_MAX_EXT_CAP; + int pos = PCI_CONFIG_SPACE_SIZE; + + do { + if (xen_host_pci_get_long(d, pos, &header)) { + break; + } + /* + * If we have no capabilities, this is indicated by cap ID, + * cap version and next pointer all being 0. + */ + if (header == 0) { + break; + } + + if (PCI_EXT_CAP_ID(header) == cap) { + return pos; + } + + pos = PCI_EXT_CAP_NEXT(header); + if (pos < PCI_CONFIG_SPACE_SIZE) { + break; + } + + max_cap--; + } while (max_cap > 0); + + return -1; +} + +void xen_host_pci_device_get(XenHostPCIDevice *d, uint16_t domain, + uint8_t bus, uint8_t dev, uint8_t func, + Error **errp) +{ + ERRP_GUARD(); + unsigned int v; + + d->config_fd = -1; + d->domain = domain; + d->bus = bus; + d->dev = dev; + d->func = func; + + xen_host_pci_config_open(d, errp); + if (*errp) { + goto error; + } + + xen_host_pci_get_resource(d, errp); + if (*errp) { + goto error; + } + + xen_host_pci_get_hex_value(d, "vendor", &v, errp); + if (*errp) { + goto error; + } + d->vendor_id = v; + + xen_host_pci_get_hex_value(d, "device", &v, errp); + if (*errp) { + goto error; + } + d->device_id = v; + + xen_host_pci_get_dec_value(d, "irq", &v, errp); + if (*errp) { + goto error; + } + d->irq = v; + + xen_host_pci_get_hex_value(d, "class", &v, errp); + if (*errp) { + goto error; + } + d->class_code = v; + + d->is_virtfn = xen_host_pci_dev_is_virtfn(d); + + return; + +error: + + if (d->config_fd >= 0) { + close(d->config_fd); + d->config_fd = -1; + } +} + +bool xen_host_pci_device_closed(XenHostPCIDevice *d) +{ + return d->config_fd == -1; +} + +void xen_host_pci_device_put(XenHostPCIDevice *d) +{ + if (d->config_fd >= 0) { + close(d->config_fd); + d->config_fd = -1; + } +} diff --git a/hw/xen/xen-host-pci-device.h b/hw/xen/xen-host-pci-device.h new file mode 100644 index 000000000..4d8d34ecb --- /dev/null +++ b/hw/xen/xen-host-pci-device.h @@ -0,0 +1,58 @@ +#ifndef XEN_HOST_PCI_DEVICE_H +#define XEN_HOST_PCI_DEVICE_H + +#include "hw/pci/pci.h" + +enum { + XEN_HOST_PCI_REGION_TYPE_IO = 1 << 1, + XEN_HOST_PCI_REGION_TYPE_MEM = 1 << 2, + XEN_HOST_PCI_REGION_TYPE_PREFETCH = 1 << 3, + XEN_HOST_PCI_REGION_TYPE_MEM_64 = 1 << 4, +}; + +typedef struct XenHostPCIIORegion { + pcibus_t base_addr; + pcibus_t size; + uint8_t type; + uint8_t bus_flags; /* Bus-specific bits */ +} XenHostPCIIORegion; + +typedef struct XenHostPCIDevice { + uint16_t domain; + uint8_t bus; + uint8_t dev; + uint8_t func; + + uint16_t vendor_id; + uint16_t device_id; + uint32_t class_code; + int irq; + + XenHostPCIIORegion io_regions[PCI_NUM_REGIONS - 1]; + XenHostPCIIORegion rom; + + bool is_virtfn; + + int config_fd; +} XenHostPCIDevice; + +void xen_host_pci_device_get(XenHostPCIDevice *d, uint16_t domain, + uint8_t bus, uint8_t dev, uint8_t func, + Error **errp); +void xen_host_pci_device_put(XenHostPCIDevice *pci_dev); +bool xen_host_pci_device_closed(XenHostPCIDevice *d); + +int xen_host_pci_get_byte(XenHostPCIDevice *d, int pos, uint8_t *p); +int xen_host_pci_get_word(XenHostPCIDevice *d, int pos, uint16_t *p); +int xen_host_pci_get_long(XenHostPCIDevice *d, int pos, uint32_t *p); +int xen_host_pci_get_block(XenHostPCIDevice *d, int pos, uint8_t *buf, + int len); +int xen_host_pci_set_byte(XenHostPCIDevice *d, int pos, uint8_t data); +int xen_host_pci_set_word(XenHostPCIDevice *d, int pos, uint16_t data); +int xen_host_pci_set_long(XenHostPCIDevice *d, int pos, uint32_t data); +int xen_host_pci_set_block(XenHostPCIDevice *d, int pos, uint8_t *buf, + int len); + +int xen_host_pci_find_ext_cap_offset(XenHostPCIDevice *s, uint32_t cap); + +#endif /* XEN_HOST_PCI_DEVICE_H */ diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c new file mode 100644 index 000000000..085fd31ef --- /dev/null +++ b/hw/xen/xen-legacy-backend.c @@ -0,0 +1,843 @@ +/* + * xen backend driver infrastructure + * (c) 2008 Gerd Hoffmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; under version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +/* + * TODO: add some xenbus / xenstore concepts overview here. + */ + +#include "qemu/osdep.h" + +#include "hw/sysbus.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "hw/xen/xen-legacy-backend.h" +#include "hw/xen/xen_pvdev.h" +#include "monitor/qdev.h" + +DeviceState *xen_sysdev; +BusState *xen_sysbus; + +/* ------------------------------------------------------------- */ + +/* public */ +struct xs_handle *xenstore; +const char *xen_protocol; + +/* private */ +static bool xen_feature_grant_copy; +static int debug; + +int xenstore_write_be_str(struct XenLegacyDevice *xendev, const char *node, + const char *val) +{ + return xenstore_write_str(xendev->be, node, val); +} + +int xenstore_write_be_int(struct XenLegacyDevice *xendev, const char *node, + int ival) +{ + return xenstore_write_int(xendev->be, node, ival); +} + +int xenstore_write_be_int64(struct XenLegacyDevice *xendev, const char *node, + int64_t ival) +{ + return xenstore_write_int64(xendev->be, node, ival); +} + +char *xenstore_read_be_str(struct XenLegacyDevice *xendev, const char *node) +{ + return xenstore_read_str(xendev->be, node); +} + +int xenstore_read_be_int(struct XenLegacyDevice *xendev, const char *node, + int *ival) +{ + return xenstore_read_int(xendev->be, node, ival); +} + +char *xenstore_read_fe_str(struct XenLegacyDevice *xendev, const char *node) +{ + return xenstore_read_str(xendev->fe, node); +} + +int xenstore_read_fe_int(struct XenLegacyDevice *xendev, const char *node, + int *ival) +{ + return xenstore_read_int(xendev->fe, node, ival); +} + +int xenstore_read_fe_uint64(struct XenLegacyDevice *xendev, const char *node, + uint64_t *uval) +{ + return xenstore_read_uint64(xendev->fe, node, uval); +} + +/* ------------------------------------------------------------- */ + +int xen_be_set_state(struct XenLegacyDevice *xendev, enum xenbus_state state) +{ + int rc; + + rc = xenstore_write_be_int(xendev, "state", state); + if (rc < 0) { + return rc; + } + xen_pv_printf(xendev, 1, "backend state: %s -> %s\n", + xenbus_strstate(xendev->be_state), xenbus_strstate(state)); + xendev->be_state = state; + return 0; +} + +void xen_be_set_max_grant_refs(struct XenLegacyDevice *xendev, + unsigned int nr_refs) +{ + assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); + + if (xengnttab_set_max_grants(xendev->gnttabdev, nr_refs)) { + xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n", + strerror(errno)); + } +} + +void *xen_be_map_grant_refs(struct XenLegacyDevice *xendev, uint32_t *refs, + unsigned int nr_refs, int prot) +{ + void *ptr; + + assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); + + ptr = xengnttab_map_domain_grant_refs(xendev->gnttabdev, nr_refs, + xen_domid, refs, prot); + if (!ptr) { + xen_pv_printf(xendev, 0, + "xengnttab_map_domain_grant_refs failed: %s\n", + strerror(errno)); + } + + return ptr; +} + +void xen_be_unmap_grant_refs(struct XenLegacyDevice *xendev, void *ptr, + unsigned int nr_refs) +{ + assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); + + if (xengnttab_unmap(xendev->gnttabdev, ptr, nr_refs)) { + xen_pv_printf(xendev, 0, "xengnttab_unmap failed: %s\n", + strerror(errno)); + } +} + +static int compat_copy_grant_refs(struct XenLegacyDevice *xendev, + bool to_domain, + XenGrantCopySegment segs[], + unsigned int nr_segs) +{ + uint32_t *refs = g_new(uint32_t, nr_segs); + int prot = to_domain ? PROT_WRITE : PROT_READ; + void *pages; + unsigned int i; + + for (i = 0; i < nr_segs; i++) { + XenGrantCopySegment *seg = &segs[i]; + + refs[i] = to_domain ? + seg->dest.foreign.ref : seg->source.foreign.ref; + } + + pages = xengnttab_map_domain_grant_refs(xendev->gnttabdev, nr_segs, + xen_domid, refs, prot); + if (!pages) { + xen_pv_printf(xendev, 0, + "xengnttab_map_domain_grant_refs failed: %s\n", + strerror(errno)); + g_free(refs); + return -1; + } + + for (i = 0; i < nr_segs; i++) { + XenGrantCopySegment *seg = &segs[i]; + void *page = pages + (i * XC_PAGE_SIZE); + + if (to_domain) { + memcpy(page + seg->dest.foreign.offset, seg->source.virt, + seg->len); + } else { + memcpy(seg->dest.virt, page + seg->source.foreign.offset, + seg->len); + } + } + + if (xengnttab_unmap(xendev->gnttabdev, pages, nr_segs)) { + xen_pv_printf(xendev, 0, "xengnttab_unmap failed: %s\n", + strerror(errno)); + } + + g_free(refs); + return 0; +} + +int xen_be_copy_grant_refs(struct XenLegacyDevice *xendev, + bool to_domain, + XenGrantCopySegment segs[], + unsigned int nr_segs) +{ + xengnttab_grant_copy_segment_t *xengnttab_segs; + unsigned int i; + int rc; + + assert(xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV); + + if (!xen_feature_grant_copy) { + return compat_copy_grant_refs(xendev, to_domain, segs, nr_segs); + } + + xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs); + + for (i = 0; i < nr_segs; i++) { + XenGrantCopySegment *seg = &segs[i]; + xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i]; + + if (to_domain) { + xengnttab_seg->flags = GNTCOPY_dest_gref; + xengnttab_seg->dest.foreign.domid = xen_domid; + xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref; + xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset; + xengnttab_seg->source.virt = seg->source.virt; + } else { + xengnttab_seg->flags = GNTCOPY_source_gref; + xengnttab_seg->source.foreign.domid = xen_domid; + xengnttab_seg->source.foreign.ref = seg->source.foreign.ref; + xengnttab_seg->source.foreign.offset = + seg->source.foreign.offset; + xengnttab_seg->dest.virt = seg->dest.virt; + } + + xengnttab_seg->len = seg->len; + } + + rc = xengnttab_grant_copy(xendev->gnttabdev, nr_segs, xengnttab_segs); + + if (rc) { + xen_pv_printf(xendev, 0, "xengnttab_copy failed: %s\n", + strerror(errno)); + } + + for (i = 0; i < nr_segs; i++) { + xengnttab_grant_copy_segment_t *xengnttab_seg = + &xengnttab_segs[i]; + + if (xengnttab_seg->status != GNTST_okay) { + xen_pv_printf(xendev, 0, "segment[%u] status: %d\n", i, + xengnttab_seg->status); + rc = -1; + } + } + + g_free(xengnttab_segs); + return rc; +} + +/* + * get xen backend device, allocate a new one if it doesn't exist. + */ +static struct XenLegacyDevice *xen_be_get_xendev(const char *type, int dom, + int dev, + struct XenDevOps *ops) +{ + struct XenLegacyDevice *xendev; + + xendev = xen_pv_find_xendev(type, dom, dev); + if (xendev) { + return xendev; + } + + /* init new xendev */ + xendev = g_malloc0(ops->size); + object_initialize(&xendev->qdev, ops->size, TYPE_XENBACKEND); + OBJECT(xendev)->free = g_free; + qdev_set_id(DEVICE(xendev), g_strdup_printf("xen-%s-%d", type, dev), + &error_fatal); + qdev_realize(DEVICE(xendev), xen_sysbus, &error_fatal); + object_unref(OBJECT(xendev)); + + xendev->type = type; + xendev->dom = dom; + xendev->dev = dev; + xendev->ops = ops; + + snprintf(xendev->be, sizeof(xendev->be), "backend/%s/%d/%d", + xendev->type, xendev->dom, xendev->dev); + snprintf(xendev->name, sizeof(xendev->name), "%s-%d", + xendev->type, xendev->dev); + + xendev->debug = debug; + xendev->local_port = -1; + + xendev->evtchndev = xenevtchn_open(NULL, 0); + if (xendev->evtchndev == NULL) { + xen_pv_printf(NULL, 0, "can't open evtchn device\n"); + qdev_unplug(DEVICE(xendev), NULL); + return NULL; + } + qemu_set_cloexec(xenevtchn_fd(xendev->evtchndev)); + + xen_pv_insert_xendev(xendev); + + if (xendev->ops->alloc) { + xendev->ops->alloc(xendev); + } + + return xendev; +} + + +/* + * Sync internal data structures on xenstore updates. + * Node specifies the changed field. node = NULL means + * update all fields (used for initialization). + */ +static void xen_be_backend_changed(struct XenLegacyDevice *xendev, + const char *node) +{ + if (node == NULL || strcmp(node, "online") == 0) { + if (xenstore_read_be_int(xendev, "online", &xendev->online) == -1) { + xendev->online = 0; + } + } + + if (node) { + xen_pv_printf(xendev, 2, "backend update: %s\n", node); + if (xendev->ops->backend_changed) { + xendev->ops->backend_changed(xendev, node); + } + } +} + +static void xen_be_frontend_changed(struct XenLegacyDevice *xendev, + const char *node) +{ + int fe_state; + + if (node == NULL || strcmp(node, "state") == 0) { + if (xenstore_read_fe_int(xendev, "state", &fe_state) == -1) { + fe_state = XenbusStateUnknown; + } + if (xendev->fe_state != fe_state) { + xen_pv_printf(xendev, 1, "frontend state: %s -> %s\n", + xenbus_strstate(xendev->fe_state), + xenbus_strstate(fe_state)); + } + xendev->fe_state = fe_state; + } + if (node == NULL || strcmp(node, "protocol") == 0) { + g_free(xendev->protocol); + xendev->protocol = xenstore_read_fe_str(xendev, "protocol"); + if (xendev->protocol) { + xen_pv_printf(xendev, 1, "frontend protocol: %s\n", + xendev->protocol); + } + } + + if (node) { + xen_pv_printf(xendev, 2, "frontend update: %s\n", node); + if (xendev->ops->frontend_changed) { + xendev->ops->frontend_changed(xendev, node); + } + } +} + +/* ------------------------------------------------------------- */ +/* Check for possible state transitions and perform them. */ + +/* + * Initial xendev setup. Read frontend path, register watch for it. + * Should succeed once xend finished setting up the backend device. + * + * Also sets initial state (-> Initializing) when done. Which + * only affects the xendev->be_state variable as xenbus should + * already be put into that state by xend. + */ +static int xen_be_try_setup(struct XenLegacyDevice *xendev) +{ + char token[XEN_BUFSIZE]; + int be_state; + + if (xenstore_read_be_int(xendev, "state", &be_state) == -1) { + xen_pv_printf(xendev, 0, "reading backend state failed\n"); + return -1; + } + + if (be_state != XenbusStateInitialising) { + xen_pv_printf(xendev, 0, "initial backend state is wrong (%s)\n", + xenbus_strstate(be_state)); + return -1; + } + + xendev->fe = xenstore_read_be_str(xendev, "frontend"); + if (xendev->fe == NULL) { + xen_pv_printf(xendev, 0, "reading frontend path failed\n"); + return -1; + } + + /* setup frontend watch */ + snprintf(token, sizeof(token), "fe:%p", xendev); + if (!xs_watch(xenstore, xendev->fe, token)) { + xen_pv_printf(xendev, 0, "watching frontend path (%s) failed\n", + xendev->fe); + return -1; + } + xen_be_set_state(xendev, XenbusStateInitialising); + + xen_be_backend_changed(xendev, NULL); + xen_be_frontend_changed(xendev, NULL); + return 0; +} + +/* + * Try initialize xendev. Prepare everything the backend can do + * without synchronizing with the frontend. Fakes hotplug-status. No + * hotplug involved here because this is about userspace drivers, thus + * there are kernel backend devices which could invoke hotplug. + * + * Goes to InitWait on success. + */ +static int xen_be_try_init(struct XenLegacyDevice *xendev) +{ + int rc = 0; + + if (!xendev->online) { + xen_pv_printf(xendev, 1, "not online\n"); + return -1; + } + + if (xendev->ops->init) { + rc = xendev->ops->init(xendev); + } + if (rc != 0) { + xen_pv_printf(xendev, 1, "init() failed\n"); + return rc; + } + + xenstore_write_be_str(xendev, "hotplug-status", "connected"); + xen_be_set_state(xendev, XenbusStateInitWait); + return 0; +} + +/* + * Try to initialise xendev. Depends on the frontend being ready + * for it (shared ring and evtchn info in xenstore, state being + * Initialised or Connected). + * + * Goes to Connected on success. + */ +static int xen_be_try_initialise(struct XenLegacyDevice *xendev) +{ + int rc = 0; + + if (xendev->fe_state != XenbusStateInitialised && + xendev->fe_state != XenbusStateConnected) { + if (xendev->ops->flags & DEVOPS_FLAG_IGNORE_STATE) { + xen_pv_printf(xendev, 2, "frontend not ready, ignoring\n"); + } else { + xen_pv_printf(xendev, 2, "frontend not ready (yet)\n"); + return -1; + } + } + + if (xendev->ops->flags & DEVOPS_FLAG_NEED_GNTDEV) { + xendev->gnttabdev = xengnttab_open(NULL, 0); + if (xendev->gnttabdev == NULL) { + xen_pv_printf(NULL, 0, "can't open gnttab device\n"); + return -1; + } + } else { + xendev->gnttabdev = NULL; + } + + if (xendev->ops->initialise) { + rc = xendev->ops->initialise(xendev); + } + if (rc != 0) { + xen_pv_printf(xendev, 0, "initialise() failed\n"); + return rc; + } + + xen_be_set_state(xendev, XenbusStateConnected); + return 0; +} + +/* + * Try to let xendev know that it is connected. Depends on the + * frontend being Connected. Note that this may be called more + * than once since the backend state is not modified. + */ +static void xen_be_try_connected(struct XenLegacyDevice *xendev) +{ + if (!xendev->ops->connected) { + return; + } + + if (xendev->fe_state != XenbusStateConnected) { + if (xendev->ops->flags & DEVOPS_FLAG_IGNORE_STATE) { + xen_pv_printf(xendev, 2, "frontend not ready, ignoring\n"); + } else { + xen_pv_printf(xendev, 2, "frontend not ready (yet)\n"); + return; + } + } + + xendev->ops->connected(xendev); +} + +/* + * Teardown connection. + * + * Goes to Closed when done. + */ +static void xen_be_disconnect(struct XenLegacyDevice *xendev, + enum xenbus_state state) +{ + if (xendev->be_state != XenbusStateClosing && + xendev->be_state != XenbusStateClosed && + xendev->ops->disconnect) { + xendev->ops->disconnect(xendev); + } + if (xendev->gnttabdev) { + xengnttab_close(xendev->gnttabdev); + xendev->gnttabdev = NULL; + } + if (xendev->be_state != state) { + xen_be_set_state(xendev, state); + } +} + +/* + * Try to reset xendev, for reconnection by another frontend instance. + */ +static int xen_be_try_reset(struct XenLegacyDevice *xendev) +{ + if (xendev->fe_state != XenbusStateInitialising) { + return -1; + } + + xen_pv_printf(xendev, 1, "device reset (for re-connect)\n"); + xen_be_set_state(xendev, XenbusStateInitialising); + return 0; +} + +/* + * state change dispatcher function + */ +void xen_be_check_state(struct XenLegacyDevice *xendev) +{ + int rc = 0; + + /* frontend may request shutdown from almost anywhere */ + if (xendev->fe_state == XenbusStateClosing || + xendev->fe_state == XenbusStateClosed) { + xen_be_disconnect(xendev, xendev->fe_state); + return; + } + + /* check for possible backend state transitions */ + for (;;) { + switch (xendev->be_state) { + case XenbusStateUnknown: + rc = xen_be_try_setup(xendev); + break; + case XenbusStateInitialising: + rc = xen_be_try_init(xendev); + break; + case XenbusStateInitWait: + rc = xen_be_try_initialise(xendev); + break; + case XenbusStateConnected: + /* xendev->be_state doesn't change */ + xen_be_try_connected(xendev); + rc = -1; + break; + case XenbusStateClosed: + rc = xen_be_try_reset(xendev); + break; + default: + rc = -1; + } + if (rc != 0) { + break; + } + } +} + +/* ------------------------------------------------------------- */ + +static int xenstore_scan(const char *type, int dom, struct XenDevOps *ops) +{ + struct XenLegacyDevice *xendev; + char path[XEN_BUFSIZE], token[XEN_BUFSIZE]; + char **dev = NULL; + unsigned int cdev, j; + + /* setup watch */ + snprintf(token, sizeof(token), "be:%p:%d:%p", type, dom, ops); + snprintf(path, sizeof(path), "backend/%s/%d", type, dom); + if (!xs_watch(xenstore, path, token)) { + xen_pv_printf(NULL, 0, "xen be: watching backend path (%s) failed\n", + path); + return -1; + } + + /* look for backends */ + dev = xs_directory(xenstore, 0, path, &cdev); + if (!dev) { + return 0; + } + for (j = 0; j < cdev; j++) { + xendev = xen_be_get_xendev(type, dom, atoi(dev[j]), ops); + if (xendev == NULL) { + continue; + } + xen_be_check_state(xendev); + } + free(dev); + return 0; +} + +void xenstore_update_be(char *watch, char *type, int dom, + struct XenDevOps *ops) +{ + struct XenLegacyDevice *xendev; + char path[XEN_BUFSIZE], *bepath; + unsigned int len, dev; + + len = snprintf(path, sizeof(path), "backend/%s/%d", type, dom); + if (strncmp(path, watch, len) != 0) { + return; + } + if (sscanf(watch + len, "/%u/%255s", &dev, path) != 2) { + strcpy(path, ""); + if (sscanf(watch + len, "/%u", &dev) != 1) { + dev = -1; + } + } + if (dev == -1) { + return; + } + + xendev = xen_be_get_xendev(type, dom, dev, ops); + if (xendev != NULL) { + bepath = xs_read(xenstore, 0, xendev->be, &len); + if (bepath == NULL) { + xen_pv_del_xendev(xendev); + } else { + free(bepath); + xen_be_backend_changed(xendev, path); + xen_be_check_state(xendev); + } + } +} + +void xenstore_update_fe(char *watch, struct XenLegacyDevice *xendev) +{ + char *node; + unsigned int len; + + len = strlen(xendev->fe); + if (strncmp(xendev->fe, watch, len) != 0) { + return; + } + if (watch[len] != '/') { + return; + } + node = watch + len + 1; + + xen_be_frontend_changed(xendev, node); + xen_be_check_state(xendev); +} +/* -------------------------------------------------------------------- */ + +int xen_be_init(void) +{ + xengnttab_handle *gnttabdev; + + xenstore = xs_daemon_open(); + if (!xenstore) { + xen_pv_printf(NULL, 0, "can't connect to xenstored\n"); + return -1; + } + + qemu_set_fd_handler(xs_fileno(xenstore), xenstore_update, NULL, NULL); + + if (xen_xc == NULL || xen_fmem == NULL) { + /* Check if xen_init() have been called */ + goto err; + } + + gnttabdev = xengnttab_open(NULL, 0); + if (gnttabdev != NULL) { + if (xengnttab_grant_copy(gnttabdev, 0, NULL) == 0) { + xen_feature_grant_copy = true; + } + xengnttab_close(gnttabdev); + } + + xen_sysdev = qdev_new(TYPE_XENSYSDEV); + sysbus_realize_and_unref(SYS_BUS_DEVICE(xen_sysdev), &error_fatal); + xen_sysbus = qbus_new(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus"); + qbus_set_bus_hotplug_handler(xen_sysbus); + + return 0; + +err: + qemu_set_fd_handler(xs_fileno(xenstore), NULL, NULL, NULL); + xs_daemon_close(xenstore); + xenstore = NULL; + + return -1; +} + +static void xen_set_dynamic_sysbus(void) +{ + Object *machine = qdev_get_machine(); + ObjectClass *oc = object_get_class(machine); + MachineClass *mc = MACHINE_CLASS(oc); + + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_XENSYSDEV); +} + +int xen_be_register(const char *type, struct XenDevOps *ops) +{ + char path[50]; + int rc; + + if (ops->backend_register) { + rc = ops->backend_register(); + if (rc) { + return rc; + } + } + + snprintf(path, sizeof(path), "device-model/%u/backends/%s", xen_domid, + type); + xenstore_mkdir(path, XS_PERM_NONE); + + return xenstore_scan(type, xen_domid, ops); +} + +void xen_be_register_common(void) +{ + xen_set_dynamic_sysbus(); + + xen_be_register("console", &xen_console_ops); + xen_be_register("vkbd", &xen_kbdmouse_ops); +#ifdef CONFIG_VIRTFS + xen_be_register("9pfs", &xen_9pfs_ops); +#endif +#ifdef CONFIG_USB_LIBUSB + xen_be_register("qusb", &xen_usb_ops); +#endif +} + +int xen_be_bind_evtchn(struct XenLegacyDevice *xendev) +{ + if (xendev->local_port != -1) { + return 0; + } + xendev->local_port = xenevtchn_bind_interdomain + (xendev->evtchndev, xendev->dom, xendev->remote_port); + if (xendev->local_port == -1) { + xen_pv_printf(xendev, 0, "xenevtchn_bind_interdomain failed\n"); + return -1; + } + xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); + qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev), + xen_pv_evtchn_event, NULL, xendev); + return 0; +} + + +static Property xendev_properties[] = { + DEFINE_PROP_END_OF_LIST(), +}; + +static void xendev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, xendev_properties); + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + /* xen-backend devices can be plugged/unplugged dynamically */ + dc->user_creatable = true; + dc->bus_type = TYPE_XENSYSBUS; +} + +static const TypeInfo xendev_type_info = { + .name = TYPE_XENBACKEND, + .parent = TYPE_DEVICE, + .class_init = xendev_class_init, + .instance_size = sizeof(struct XenLegacyDevice), +}; + +static void xen_sysbus_class_init(ObjectClass *klass, void *data) +{ + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + + hc->unplug = qdev_simple_device_unplug_cb; +} + +static const TypeInfo xensysbus_info = { + .name = TYPE_XENSYSBUS, + .parent = TYPE_BUS, + .class_init = xen_sysbus_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + } +}; + +static Property xen_sysdev_properties[] = { + {/* end of property list */}, +}; + +static void xen_sysdev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, xen_sysdev_properties); +} + +static const TypeInfo xensysdev_info = { + .name = TYPE_XENSYSDEV, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SysBusDevice), + .class_init = xen_sysdev_class_init, +}; + +static void xenbe_register_types(void) +{ + type_register_static(&xensysbus_info); + type_register_static(&xensysdev_info); + type_register_static(&xendev_type_info); +} + +type_init(xenbe_register_types) diff --git a/hw/xen/xen_devconfig.c b/hw/xen/xen_devconfig.c new file mode 100644 index 000000000..46ee4a7f0 --- /dev/null +++ b/hw/xen/xen_devconfig.c @@ -0,0 +1,129 @@ +#include "qemu/osdep.h" +#include "hw/xen/xen-legacy-backend.h" +#include "qemu/option.h" +#include "sysemu/blockdev.h" +#include "sysemu/sysemu.h" + +/* ------------------------------------------------------------- */ + +static int xen_config_dev_dirs(const char *ftype, const char *btype, int vdev, + char *fe, char *be, int len) +{ + char *dom; + + dom = xs_get_domain_path(xenstore, xen_domid); + snprintf(fe, len, "%s/device/%s/%d", dom, ftype, vdev); + free(dom); + + dom = xs_get_domain_path(xenstore, 0); + snprintf(be, len, "%s/backend/%s/%d/%d", dom, btype, xen_domid, vdev); + free(dom); + + xenstore_mkdir(fe, XS_PERM_READ | XS_PERM_WRITE); + xenstore_mkdir(be, XS_PERM_READ); + return 0; +} + +static int xen_config_dev_all(char *fe, char *be) +{ + /* frontend */ + if (xen_protocol) + xenstore_write_str(fe, "protocol", xen_protocol); + + xenstore_write_int(fe, "state", XenbusStateInitialising); + xenstore_write_int(fe, "backend-id", 0); + xenstore_write_str(fe, "backend", be); + + /* backend */ + xenstore_write_str(be, "domain", qemu_name ? qemu_name : "no-name"); + xenstore_write_int(be, "online", 1); + xenstore_write_int(be, "state", XenbusStateInitialising); + xenstore_write_int(be, "frontend-id", xen_domid); + xenstore_write_str(be, "frontend", fe); + + return 0; +} + +/* ------------------------------------------------------------- */ + +int xen_config_dev_blk(DriveInfo *disk) +{ + char fe[256], be[256], device_name[32]; + int vdev = 202 * 256 + 16 * disk->unit; + int cdrom = disk->media_cd; + const char *devtype = cdrom ? "cdrom" : "disk"; + const char *mode = cdrom ? "r" : "w"; + const char *filename = qemu_opt_get(disk->opts, "file"); + + snprintf(device_name, sizeof(device_name), "xvd%c", 'a' + disk->unit); + xen_pv_printf(NULL, 1, "config disk %d [%s]: %s\n", + disk->unit, device_name, filename); + xen_config_dev_dirs("vbd", "qdisk", vdev, fe, be, sizeof(fe)); + + /* frontend */ + xenstore_write_int(fe, "virtual-device", vdev); + xenstore_write_str(fe, "device-type", devtype); + + /* backend */ + xenstore_write_str(be, "dev", device_name); + xenstore_write_str(be, "type", "file"); + xenstore_write_str(be, "params", filename); + xenstore_write_str(be, "mode", mode); + + /* common stuff */ + return xen_config_dev_all(fe, be); +} + +int xen_config_dev_nic(NICInfo *nic) +{ + char fe[256], be[256]; + char mac[20]; + int vlan_id = -1; + + net_hub_id_for_client(nic->netdev, &vlan_id); + snprintf(mac, sizeof(mac), "%02x:%02x:%02x:%02x:%02x:%02x", + nic->macaddr.a[0], nic->macaddr.a[1], nic->macaddr.a[2], + nic->macaddr.a[3], nic->macaddr.a[4], nic->macaddr.a[5]); + xen_pv_printf(NULL, 1, "config nic %d: mac=\"%s\"\n", vlan_id, mac); + xen_config_dev_dirs("vif", "qnic", vlan_id, fe, be, sizeof(fe)); + + /* frontend */ + xenstore_write_int(fe, "handle", vlan_id); + xenstore_write_str(fe, "mac", mac); + + /* backend */ + xenstore_write_int(be, "handle", vlan_id); + xenstore_write_str(be, "mac", mac); + + /* common stuff */ + return xen_config_dev_all(fe, be); +} + +int xen_config_dev_vfb(int vdev, const char *type) +{ + char fe[256], be[256]; + + xen_config_dev_dirs("vfb", "vfb", vdev, fe, be, sizeof(fe)); + + /* backend */ + xenstore_write_str(be, "type", type); + + /* common stuff */ + return xen_config_dev_all(fe, be); +} + +int xen_config_dev_vkbd(int vdev) +{ + char fe[256], be[256]; + + xen_config_dev_dirs("vkbd", "vkbd", vdev, fe, be, sizeof(fe)); + return xen_config_dev_all(fe, be); +} + +int xen_config_dev_console(int vdev) +{ + char fe[256], be[256]; + + xen_config_dev_dirs("console", "console", vdev, fe, be, sizeof(fe)); + return xen_config_dev_all(fe, be); +} diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c new file mode 100644 index 000000000..027190fa4 --- /dev/null +++ b/hw/xen/xen_pt.c @@ -0,0 +1,1005 @@ +/* + * Copyright (c) 2007, Neocleus Corporation. + * Copyright (c) 2007, Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Alex Novik + * Allen Kay + * Guy Zana + * + * This file implements direct PCI assignment to a HVM guest + */ + +/* + * Interrupt Disable policy: + * + * INTx interrupt: + * Initialize(register_real_device) + * Map INTx(xc_physdev_map_pirq): + * + * - Set real Interrupt Disable bit to '1'. + * - Set machine_irq and assigned_device->machine_irq to '0'. + * * Don't bind INTx. + * + * Bind INTx(xc_domain_bind_pt_pci_irq): + * + * - Set real Interrupt Disable bit to '1'. + * - Unmap INTx. + * - Decrement xen_pt_mapped_machine_irq[machine_irq] + * - Set assigned_device->machine_irq to '0'. + * + * Write to Interrupt Disable bit by guest software(xen_pt_cmd_reg_write) + * Write '0' + * - Set real bit to '0' if assigned_device->machine_irq isn't '0'. + * + * Write '1' + * - Set real bit to '1'. + * + * MSI interrupt: + * Initialize MSI register(xen_pt_msi_setup, xen_pt_msi_update) + * Bind MSI(xc_domain_update_msi_irq) + * + * - Unmap MSI. + * - Set dev->msi->pirq to '-1'. + * + * MSI-X interrupt: + * Initialize MSI-X register(xen_pt_msix_update_one) + * Bind MSI-X(xc_domain_update_msi_irq) + * + * - Unmap MSI-X. + * - Set entry->pirq to '-1'. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include + +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/xen/xen.h" +#include "hw/i386/pc.h" +#include "hw/xen/xen-legacy-backend.h" +#include "xen_pt.h" +#include "qemu/range.h" + +static bool has_igd_gfx_passthru; + +bool xen_igd_gfx_pt_enabled(void) +{ + return has_igd_gfx_passthru; +} + +void xen_igd_gfx_pt_set(bool value, Error **errp) +{ + has_igd_gfx_passthru = value; +} + +#define XEN_PT_NR_IRQS (256) +static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0}; + +void xen_pt_log(const PCIDevice *d, const char *f, ...) +{ + va_list ap; + + va_start(ap, f); + if (d) { + fprintf(stderr, "[%02x:%02x.%d] ", pci_dev_bus_num(d), + PCI_SLOT(d->devfn), PCI_FUNC(d->devfn)); + } + vfprintf(stderr, f, ap); + va_end(ap); +} + +/* Config Space */ + +static int xen_pt_pci_config_access_check(PCIDevice *d, uint32_t addr, int len) +{ + /* check offset range */ + if (addr > 0xFF) { + XEN_PT_ERR(d, "Failed to access register with offset exceeding 0xFF. " + "(addr: 0x%02x, len: %d)\n", addr, len); + return -1; + } + + /* check read size */ + if ((len != 1) && (len != 2) && (len != 4)) { + XEN_PT_ERR(d, "Failed to access register with invalid access length. " + "(addr: 0x%02x, len: %d)\n", addr, len); + return -1; + } + + /* check offset alignment */ + if (addr & (len - 1)) { + XEN_PT_ERR(d, "Failed to access register with invalid access size " + "alignment. (addr: 0x%02x, len: %d)\n", addr, len); + return -1; + } + + return 0; +} + +int xen_pt_bar_offset_to_index(uint32_t offset) +{ + int index = 0; + + /* check Exp ROM BAR */ + if (offset == PCI_ROM_ADDRESS) { + return PCI_ROM_SLOT; + } + + /* calculate BAR index */ + index = (offset - PCI_BASE_ADDRESS_0) >> 2; + if (index >= PCI_NUM_REGIONS) { + return -1; + } + + return index; +} + +static uint32_t xen_pt_pci_read_config(PCIDevice *d, uint32_t addr, int len) +{ + XenPCIPassthroughState *s = XEN_PT_DEVICE(d); + uint32_t val = 0; + XenPTRegGroup *reg_grp_entry = NULL; + XenPTReg *reg_entry = NULL; + int rc = 0; + int emul_len = 0; + uint32_t find_addr = addr; + + if (xen_pt_pci_config_access_check(d, addr, len)) { + goto exit; + } + + /* find register group entry */ + reg_grp_entry = xen_pt_find_reg_grp(s, addr); + if (reg_grp_entry) { + /* check 0-Hardwired register group */ + if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) { + /* no need to emulate, just return 0 */ + val = 0; + goto exit; + } + } + + /* read I/O device register value */ + rc = xen_host_pci_get_block(&s->real_device, addr, (uint8_t *)&val, len); + if (rc < 0) { + XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc); + memset(&val, 0xff, len); + } + + /* just return the I/O device register value for + * passthrough type register group */ + if (reg_grp_entry == NULL) { + goto exit; + } + + /* adjust the read value to appropriate CFC-CFF window */ + val <<= (addr & 3) << 3; + emul_len = len; + + /* loop around the guest requested size */ + while (emul_len > 0) { + /* find register entry to be emulated */ + reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr); + if (reg_entry) { + XenPTRegInfo *reg = reg_entry->reg; + uint32_t real_offset = reg_grp_entry->base_offset + reg->offset; + uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3); + uint8_t *ptr_val = NULL; + + valid_mask <<= (find_addr - real_offset) << 3; + ptr_val = (uint8_t *)&val + (real_offset & 3); + + /* do emulation based on register size */ + switch (reg->size) { + case 1: + if (reg->u.b.read) { + rc = reg->u.b.read(s, reg_entry, ptr_val, valid_mask); + } + break; + case 2: + if (reg->u.w.read) { + rc = reg->u.w.read(s, reg_entry, + (uint16_t *)ptr_val, valid_mask); + } + break; + case 4: + if (reg->u.dw.read) { + rc = reg->u.dw.read(s, reg_entry, + (uint32_t *)ptr_val, valid_mask); + } + break; + } + + if (rc < 0) { + xen_shutdown_fatal_error("Internal error: Invalid read " + "emulation. (%s, rc: %d)\n", + __func__, rc); + return 0; + } + + /* calculate next address to find */ + emul_len -= reg->size; + if (emul_len > 0) { + find_addr = real_offset + reg->size; + } + } else { + /* nothing to do with passthrough type register, + * continue to find next byte */ + emul_len--; + find_addr++; + } + } + + /* need to shift back before returning them to pci bus emulator */ + val >>= ((addr & 3) << 3); + +exit: + XEN_PT_LOG_CONFIG(d, addr, val, len); + return val; +} + +static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr, + uint32_t val, int len) +{ + XenPCIPassthroughState *s = XEN_PT_DEVICE(d); + int index = 0; + XenPTRegGroup *reg_grp_entry = NULL; + int rc = 0; + uint32_t read_val = 0, wb_mask; + int emul_len = 0; + XenPTReg *reg_entry = NULL; + uint32_t find_addr = addr; + XenPTRegInfo *reg = NULL; + bool wp_flag = false; + + if (xen_pt_pci_config_access_check(d, addr, len)) { + return; + } + + XEN_PT_LOG_CONFIG(d, addr, val, len); + + /* check unused BAR register */ + index = xen_pt_bar_offset_to_index(addr); + if ((index >= 0) && (val != 0)) { + uint32_t chk = val; + + if (index == PCI_ROM_SLOT) + chk |= (uint32_t)~PCI_ROM_ADDRESS_MASK; + + if ((chk != XEN_PT_BAR_ALLF) && + (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED)) { + XEN_PT_WARN(d, "Guest attempt to set address to unused " + "Base Address Register. (addr: 0x%02x, len: %d)\n", + addr, len); + } + } + + /* find register group entry */ + reg_grp_entry = xen_pt_find_reg_grp(s, addr); + if (reg_grp_entry) { + /* check 0-Hardwired register group */ + if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) { + /* ignore silently */ + XEN_PT_WARN(d, "Access to 0-Hardwired register. " + "(addr: 0x%02x, len: %d)\n", addr, len); + return; + } + } + + rc = xen_host_pci_get_block(&s->real_device, addr, + (uint8_t *)&read_val, len); + if (rc < 0) { + XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc); + memset(&read_val, 0xff, len); + wb_mask = 0; + } else { + wb_mask = 0xFFFFFFFF >> ((4 - len) << 3); + } + + /* pass directly to the real device for passthrough type register group */ + if (reg_grp_entry == NULL) { + if (!s->permissive) { + wb_mask = 0; + wp_flag = true; + } + goto out; + } + + memory_region_transaction_begin(); + pci_default_write_config(d, addr, val, len); + + /* adjust the read and write value to appropriate CFC-CFF window */ + read_val <<= (addr & 3) << 3; + val <<= (addr & 3) << 3; + emul_len = len; + + /* loop around the guest requested size */ + while (emul_len > 0) { + /* find register entry to be emulated */ + reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr); + if (reg_entry) { + reg = reg_entry->reg; + uint32_t real_offset = reg_grp_entry->base_offset + reg->offset; + uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3); + uint8_t *ptr_val = NULL; + uint32_t wp_mask = reg->emu_mask | reg->ro_mask; + + valid_mask <<= (find_addr - real_offset) << 3; + ptr_val = (uint8_t *)&val + (real_offset & 3); + if (!s->permissive) { + wp_mask |= reg->res_mask; + } + if (wp_mask == (0xFFFFFFFF >> ((4 - reg->size) << 3))) { + wb_mask &= ~((wp_mask >> ((find_addr - real_offset) << 3)) + << ((len - emul_len) << 3)); + } + + /* do emulation based on register size */ + switch (reg->size) { + case 1: + if (reg->u.b.write) { + rc = reg->u.b.write(s, reg_entry, ptr_val, + read_val >> ((real_offset & 3) << 3), + valid_mask); + } + break; + case 2: + if (reg->u.w.write) { + rc = reg->u.w.write(s, reg_entry, (uint16_t *)ptr_val, + (read_val >> ((real_offset & 3) << 3)), + valid_mask); + } + break; + case 4: + if (reg->u.dw.write) { + rc = reg->u.dw.write(s, reg_entry, (uint32_t *)ptr_val, + (read_val >> ((real_offset & 3) << 3)), + valid_mask); + } + break; + } + + if (rc < 0) { + xen_shutdown_fatal_error("Internal error: Invalid write" + " emulation. (%s, rc: %d)\n", + __func__, rc); + return; + } + + /* calculate next address to find */ + emul_len -= reg->size; + if (emul_len > 0) { + find_addr = real_offset + reg->size; + } + } else { + /* nothing to do with passthrough type register, + * continue to find next byte */ + if (!s->permissive) { + wb_mask &= ~(0xff << ((len - emul_len) << 3)); + /* Unused BARs will make it here, but we don't want to issue + * warnings for writes to them (bogus writes get dealt with + * above). + */ + if (index < 0) { + wp_flag = true; + } + } + emul_len--; + find_addr++; + } + } + + /* need to shift back before passing them to xen_host_pci_set_block. */ + val >>= (addr & 3) << 3; + + memory_region_transaction_commit(); + +out: + if (wp_flag && !s->permissive_warned) { + s->permissive_warned = true; + xen_pt_log(d, "Write-back to unknown field 0x%02x (partially) inhibited (0x%0*x)\n", + addr, len * 2, wb_mask); + xen_pt_log(d, "If the device doesn't work, try enabling permissive mode\n"); + xen_pt_log(d, "(unsafe) and if it helps report the problem to xen-devel\n"); + } + for (index = 0; wb_mask; index += len) { + /* unknown regs are passed through */ + while (!(wb_mask & 0xff)) { + index++; + wb_mask >>= 8; + } + len = 0; + do { + len++; + wb_mask >>= 8; + } while (wb_mask & 0xff); + rc = xen_host_pci_set_block(&s->real_device, addr + index, + (uint8_t *)&val + index, len); + + if (rc < 0) { + XEN_PT_ERR(d, "xen_host_pci_set_block failed. return value: %d.\n", rc); + } + } +} + +/* register regions */ + +static uint64_t xen_pt_bar_read(void *o, hwaddr addr, + unsigned size) +{ + PCIDevice *d = o; + /* if this function is called, that probably means that there is a + * misconfiguration of the IOMMU. */ + XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"TARGET_FMT_plx"\n", + addr); + return 0; +} +static void xen_pt_bar_write(void *o, hwaddr addr, uint64_t val, + unsigned size) +{ + PCIDevice *d = o; + /* Same comment as xen_pt_bar_read function */ + XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"TARGET_FMT_plx"\n", + addr); +} + +static const MemoryRegionOps ops = { + .endianness = DEVICE_NATIVE_ENDIAN, + .read = xen_pt_bar_read, + .write = xen_pt_bar_write, +}; + +static int xen_pt_register_regions(XenPCIPassthroughState *s, uint16_t *cmd) +{ + int i = 0; + XenHostPCIDevice *d = &s->real_device; + + /* Register PIO/MMIO BARs */ + for (i = 0; i < PCI_ROM_SLOT; i++) { + XenHostPCIIORegion *r = &d->io_regions[i]; + uint8_t type; + + if (r->base_addr == 0 || r->size == 0) { + continue; + } + + s->bases[i].access.u = r->base_addr; + + if (r->type & XEN_HOST_PCI_REGION_TYPE_IO) { + type = PCI_BASE_ADDRESS_SPACE_IO; + *cmd |= PCI_COMMAND_IO; + } else { + type = PCI_BASE_ADDRESS_SPACE_MEMORY; + if (r->type & XEN_HOST_PCI_REGION_TYPE_PREFETCH) { + type |= PCI_BASE_ADDRESS_MEM_PREFETCH; + } + if (r->type & XEN_HOST_PCI_REGION_TYPE_MEM_64) { + type |= PCI_BASE_ADDRESS_MEM_TYPE_64; + } + *cmd |= PCI_COMMAND_MEMORY; + } + + memory_region_init_io(&s->bar[i], OBJECT(s), &ops, &s->dev, + "xen-pci-pt-bar", r->size); + pci_register_bar(&s->dev, i, type, &s->bar[i]); + + XEN_PT_LOG(&s->dev, "IO region %i registered (size=0x%08"PRIx64 + " base_addr=0x%08"PRIx64" type: 0x%x)\n", + i, r->size, r->base_addr, type); + } + + /* Register expansion ROM address */ + if (d->rom.base_addr && d->rom.size) { + uint32_t bar_data = 0; + + /* Re-set BAR reported by OS, otherwise ROM can't be read. */ + if (xen_host_pci_get_long(d, PCI_ROM_ADDRESS, &bar_data)) { + return 0; + } + if ((bar_data & PCI_ROM_ADDRESS_MASK) == 0) { + bar_data |= d->rom.base_addr & PCI_ROM_ADDRESS_MASK; + xen_host_pci_set_long(d, PCI_ROM_ADDRESS, bar_data); + } + + s->bases[PCI_ROM_SLOT].access.maddr = d->rom.base_addr; + + memory_region_init_io(&s->rom, OBJECT(s), &ops, &s->dev, + "xen-pci-pt-rom", d->rom.size); + pci_register_bar(&s->dev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_MEM_PREFETCH, + &s->rom); + + XEN_PT_LOG(&s->dev, "Expansion ROM registered (size=0x%08"PRIx64 + " base_addr=0x%08"PRIx64")\n", + d->rom.size, d->rom.base_addr); + } + + xen_pt_register_vga_regions(d); + return 0; +} + +/* region mapping */ + +static int xen_pt_bar_from_region(XenPCIPassthroughState *s, MemoryRegion *mr) +{ + int i = 0; + + for (i = 0; i < PCI_NUM_REGIONS - 1; i++) { + if (mr == &s->bar[i]) { + return i; + } + } + if (mr == &s->rom) { + return PCI_ROM_SLOT; + } + return -1; +} + +/* + * This function checks if an io_region overlaps an io_region from another + * device. The io_region to check is provided with (addr, size and type) + * A callback can be provided and will be called for every region that is + * overlapped. + * The return value indicates if the region is overlappsed */ +struct CheckBarArgs { + XenPCIPassthroughState *s; + pcibus_t addr; + pcibus_t size; + uint8_t type; + bool rc; +}; +static void xen_pt_check_bar_overlap(PCIBus *bus, PCIDevice *d, void *opaque) +{ + struct CheckBarArgs *arg = opaque; + XenPCIPassthroughState *s = arg->s; + uint8_t type = arg->type; + int i; + + if (d->devfn == s->dev.devfn) { + return; + } + + /* xxx: This ignores bridges. */ + for (i = 0; i < PCI_NUM_REGIONS; i++) { + const PCIIORegion *r = &d->io_regions[i]; + + if (!r->size) { + continue; + } + if ((type & PCI_BASE_ADDRESS_SPACE_IO) + != (r->type & PCI_BASE_ADDRESS_SPACE_IO)) { + continue; + } + + if (ranges_overlap(arg->addr, arg->size, r->addr, r->size)) { + XEN_PT_WARN(&s->dev, + "Overlapped to device [%02x:%02x.%d] Region: %i" + " (addr: 0x%"FMT_PCIBUS", len: 0x%"FMT_PCIBUS")\n", + pci_bus_num(bus), PCI_SLOT(d->devfn), + PCI_FUNC(d->devfn), i, r->addr, r->size); + arg->rc = true; + } + } +} + +static void xen_pt_region_update(XenPCIPassthroughState *s, + MemoryRegionSection *sec, bool adding) +{ + PCIDevice *d = &s->dev; + MemoryRegion *mr = sec->mr; + int bar = -1; + int rc; + int op = adding ? DPCI_ADD_MAPPING : DPCI_REMOVE_MAPPING; + struct CheckBarArgs args = { + .s = s, + .addr = sec->offset_within_address_space, + .size = int128_get64(sec->size), + .rc = false, + }; + + bar = xen_pt_bar_from_region(s, mr); + if (bar == -1 && (!s->msix || &s->msix->mmio != mr)) { + return; + } + + if (s->msix && &s->msix->mmio == mr) { + if (adding) { + s->msix->mmio_base_addr = sec->offset_within_address_space; + rc = xen_pt_msix_update_remap(s, s->msix->bar_index); + } + return; + } + + args.type = d->io_regions[bar].type; + pci_for_each_device_under_bus(pci_get_bus(d), + xen_pt_check_bar_overlap, &args); + if (args.rc) { + XEN_PT_WARN(d, "Region: %d (addr: 0x%"FMT_PCIBUS + ", len: 0x%"FMT_PCIBUS") is overlapped.\n", + bar, sec->offset_within_address_space, + int128_get64(sec->size)); + } + + if (d->io_regions[bar].type & PCI_BASE_ADDRESS_SPACE_IO) { + uint32_t guest_port = sec->offset_within_address_space; + uint32_t machine_port = s->bases[bar].access.pio_base; + uint32_t size = int128_get64(sec->size); + rc = xc_domain_ioport_mapping(xen_xc, xen_domid, + guest_port, machine_port, size, + op); + if (rc) { + XEN_PT_ERR(d, "%s ioport mapping failed! (err: %i)\n", + adding ? "create new" : "remove old", errno); + } + } else { + pcibus_t guest_addr = sec->offset_within_address_space; + pcibus_t machine_addr = s->bases[bar].access.maddr + + sec->offset_within_region; + pcibus_t size = int128_get64(sec->size); + rc = xc_domain_memory_mapping(xen_xc, xen_domid, + XEN_PFN(guest_addr + XC_PAGE_SIZE - 1), + XEN_PFN(machine_addr + XC_PAGE_SIZE - 1), + XEN_PFN(size + XC_PAGE_SIZE - 1), + op); + if (rc) { + XEN_PT_ERR(d, "%s mem mapping failed! (err: %i)\n", + adding ? "create new" : "remove old", errno); + } + } +} + +static void xen_pt_region_add(MemoryListener *l, MemoryRegionSection *sec) +{ + XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState, + memory_listener); + + memory_region_ref(sec->mr); + xen_pt_region_update(s, sec, true); +} + +static void xen_pt_region_del(MemoryListener *l, MemoryRegionSection *sec) +{ + XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState, + memory_listener); + + xen_pt_region_update(s, sec, false); + memory_region_unref(sec->mr); +} + +static void xen_pt_io_region_add(MemoryListener *l, MemoryRegionSection *sec) +{ + XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState, + io_listener); + + memory_region_ref(sec->mr); + xen_pt_region_update(s, sec, true); +} + +static void xen_pt_io_region_del(MemoryListener *l, MemoryRegionSection *sec) +{ + XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState, + io_listener); + + xen_pt_region_update(s, sec, false); + memory_region_unref(sec->mr); +} + +static const MemoryListener xen_pt_memory_listener = { + .name = "xen-pt-mem", + .region_add = xen_pt_region_add, + .region_del = xen_pt_region_del, + .priority = 10, +}; + +static const MemoryListener xen_pt_io_listener = { + .name = "xen-pt-io", + .region_add = xen_pt_io_region_add, + .region_del = xen_pt_io_region_del, + .priority = 10, +}; + +static void +xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s, + XenHostPCIDevice *dev) +{ + uint16_t gpu_dev_id; + PCIDevice *d = &s->dev; + + gpu_dev_id = dev->device_id; + igd_passthrough_isa_bridge_create(pci_get_bus(d), gpu_dev_id); +} + +/* destroy. */ +static void xen_pt_destroy(PCIDevice *d) { + + XenPCIPassthroughState *s = XEN_PT_DEVICE(d); + XenHostPCIDevice *host_dev = &s->real_device; + uint8_t machine_irq = s->machine_irq; + uint8_t intx; + int rc; + + if (machine_irq && !xen_host_pci_device_closed(&s->real_device)) { + intx = xen_pt_pci_intx(s); + rc = xc_domain_unbind_pt_irq(xen_xc, xen_domid, machine_irq, + PT_IRQ_TYPE_PCI, + pci_dev_bus_num(d), + PCI_SLOT(s->dev.devfn), + intx, + 0 /* isa_irq */); + if (rc < 0) { + XEN_PT_ERR(d, "unbinding of interrupt INT%c failed." + " (machine irq: %i, err: %d)" + " But bravely continuing on..\n", + 'a' + intx, machine_irq, errno); + } + } + + /* N.B. xen_pt_config_delete takes care of freeing them. */ + if (s->msi) { + xen_pt_msi_disable(s); + } + if (s->msix) { + xen_pt_msix_disable(s); + } + + if (machine_irq) { + xen_pt_mapped_machine_irq[machine_irq]--; + + if (xen_pt_mapped_machine_irq[machine_irq] == 0) { + rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq); + + if (rc < 0) { + XEN_PT_ERR(d, "unmapping of interrupt %i failed. (err: %d)" + " But bravely continuing on..\n", + machine_irq, errno); + } + } + s->machine_irq = 0; + } + + /* delete all emulated config registers */ + xen_pt_config_delete(s); + + xen_pt_unregister_vga_regions(host_dev); + + if (s->listener_set) { + memory_listener_unregister(&s->memory_listener); + memory_listener_unregister(&s->io_listener); + s->listener_set = false; + } + if (!xen_host_pci_device_closed(&s->real_device)) { + xen_host_pci_device_put(&s->real_device); + } +} +/* init */ + +static void xen_pt_realize(PCIDevice *d, Error **errp) +{ + ERRP_GUARD(); + XenPCIPassthroughState *s = XEN_PT_DEVICE(d); + int i, rc = 0; + uint8_t machine_irq = 0, scratch; + uint16_t cmd = 0; + int pirq = XEN_PT_UNASSIGNED_PIRQ; + + /* register real device */ + XEN_PT_LOG(d, "Assigning real physical device %02x:%02x.%d" + " to devfn 0x%x\n", + s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function, + s->dev.devfn); + + xen_host_pci_device_get(&s->real_device, + s->hostaddr.domain, s->hostaddr.bus, + s->hostaddr.slot, s->hostaddr.function, + errp); + if (*errp) { + error_append_hint(errp, "Failed to \"open\" the real pci device"); + return; + } + + s->is_virtfn = s->real_device.is_virtfn; + if (s->is_virtfn) { + XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n", + s->real_device.domain, s->real_device.bus, + s->real_device.dev, s->real_device.func); + } + + /* Initialize virtualized PCI configuration (Extended 256 Bytes) */ + memset(d->config, 0, PCI_CONFIG_SPACE_SIZE); + + s->memory_listener = xen_pt_memory_listener; + s->io_listener = xen_pt_io_listener; + + /* Setup VGA bios for passthrough GFX */ + if ((s->real_device.domain == 0) && (s->real_device.bus == 0) && + (s->real_device.dev == 2) && (s->real_device.func == 0)) { + if (!is_igd_vga_passthrough(&s->real_device)) { + error_setg(errp, "Need to enable igd-passthru if you're trying" + " to passthrough IGD GFX"); + xen_host_pci_device_put(&s->real_device); + return; + } + + xen_pt_setup_vga(s, &s->real_device, errp); + if (*errp) { + error_append_hint(errp, "Setup VGA BIOS of passthrough" + " GFX failed"); + xen_host_pci_device_put(&s->real_device); + return; + } + + /* Register ISA bridge for passthrough GFX. */ + xen_igd_passthrough_isa_bridge_create(s, &s->real_device); + } + + /* Handle real device's MMIO/PIO BARs */ + xen_pt_register_regions(s, &cmd); + + /* reinitialize each config register to be emulated */ + xen_pt_config_init(s, errp); + if (*errp) { + error_append_hint(errp, "PCI Config space initialisation failed"); + rc = -1; + goto err_out; + } + + /* Bind interrupt */ + rc = xen_host_pci_get_byte(&s->real_device, PCI_INTERRUPT_PIN, &scratch); + if (rc) { + error_setg_errno(errp, errno, "Failed to read PCI_INTERRUPT_PIN"); + goto err_out; + } + if (!scratch) { + XEN_PT_LOG(d, "no pin interrupt\n"); + goto out; + } + + machine_irq = s->real_device.irq; + if (machine_irq == 0) { + XEN_PT_LOG(d, "machine irq is 0\n"); + cmd |= PCI_COMMAND_INTX_DISABLE; + goto out; + } + + rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq); + if (rc < 0) { + XEN_PT_ERR(d, "Mapping machine irq %u to pirq %i failed, (err: %d)\n", + machine_irq, pirq, errno); + + /* Disable PCI intx assertion (turn on bit10 of devctl) */ + cmd |= PCI_COMMAND_INTX_DISABLE; + machine_irq = 0; + s->machine_irq = 0; + } else { + machine_irq = pirq; + s->machine_irq = pirq; + xen_pt_mapped_machine_irq[machine_irq]++; + } + + /* bind machine_irq to device */ + if (machine_irq != 0) { + uint8_t e_intx = xen_pt_pci_intx(s); + + rc = xc_domain_bind_pt_pci_irq(xen_xc, xen_domid, machine_irq, + pci_dev_bus_num(d), + PCI_SLOT(d->devfn), + e_intx); + if (rc < 0) { + XEN_PT_ERR(d, "Binding of interrupt %i failed! (err: %d)\n", + e_intx, errno); + + /* Disable PCI intx assertion (turn on bit10 of devctl) */ + cmd |= PCI_COMMAND_INTX_DISABLE; + xen_pt_mapped_machine_irq[machine_irq]--; + + if (xen_pt_mapped_machine_irq[machine_irq] == 0) { + if (xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq)) { + XEN_PT_ERR(d, "Unmapping of machine interrupt %i failed!" + " (err: %d)\n", machine_irq, errno); + } + } + s->machine_irq = 0; + } + } + +out: + if (cmd) { + uint16_t val; + + rc = xen_host_pci_get_word(&s->real_device, PCI_COMMAND, &val); + if (rc) { + error_setg_errno(errp, errno, "Failed to read PCI_COMMAND"); + goto err_out; + } else { + val |= cmd; + rc = xen_host_pci_set_word(&s->real_device, PCI_COMMAND, val); + if (rc) { + error_setg_errno(errp, errno, "Failed to write PCI_COMMAND" + " val = 0x%x", val); + goto err_out; + } + } + } + + memory_listener_register(&s->memory_listener, &address_space_memory); + memory_listener_register(&s->io_listener, &address_space_io); + s->listener_set = true; + XEN_PT_LOG(d, + "Real physical device %02x:%02x.%d registered successfully\n", + s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function); + + return; + +err_out: + for (i = 0; i < PCI_ROM_SLOT; i++) { + object_unparent(OBJECT(&s->bar[i])); + } + object_unparent(OBJECT(&s->rom)); + + xen_pt_destroy(d); + assert(rc); +} + +static void xen_pt_unregister_device(PCIDevice *d) +{ + xen_pt_destroy(d); +} + +static Property xen_pci_passthrough_properties[] = { + DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr), + DEFINE_PROP_BOOL("permissive", XenPCIPassthroughState, permissive, false), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xen_pci_passthrough_instance_init(Object *obj) +{ + /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command + * line, therefore, no need to wait to realize like other devices */ + PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS; +} + +static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = xen_pt_realize; + k->exit = xen_pt_unregister_device; + k->config_read = xen_pt_pci_read_config; + k->config_write = xen_pt_pci_write_config; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "Assign an host PCI device with Xen"; + device_class_set_props(dc, xen_pci_passthrough_properties); +}; + +static void xen_pci_passthrough_finalize(Object *obj) +{ + XenPCIPassthroughState *s = XEN_PT_DEVICE(obj); + + xen_pt_msix_delete(s); +} + +static const TypeInfo xen_pci_passthrough_info = { + .name = TYPE_XEN_PT_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(XenPCIPassthroughState), + .instance_finalize = xen_pci_passthrough_finalize, + .class_init = xen_pci_passthrough_class_init, + .instance_init = xen_pci_passthrough_instance_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { INTERFACE_PCIE_DEVICE }, + { }, + }, +}; + +static void xen_pci_passthrough_register_types(void) +{ + type_register_static(&xen_pci_passthrough_info); +} + +type_init(xen_pci_passthrough_register_types) diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h new file mode 100644 index 000000000..c74c4678f --- /dev/null +++ b/hw/xen/xen_pt.h @@ -0,0 +1,336 @@ +#ifndef XEN_PT_H +#define XEN_PT_H + +#include "hw/xen/xen_common.h" +#include "hw/pci/pci.h" +#include "xen-host-pci-device.h" +#include "qom/object.h" + +bool xen_igd_gfx_pt_enabled(void); +void xen_igd_gfx_pt_set(bool value, Error **errp); + +void xen_pt_log(const PCIDevice *d, const char *f, ...) GCC_FMT_ATTR(2, 3); + +#define XEN_PT_ERR(d, _f, _a...) xen_pt_log(d, "%s: Error: "_f, __func__, ##_a) + +#ifdef XEN_PT_LOGGING_ENABLED +# define XEN_PT_LOG(d, _f, _a...) xen_pt_log(d, "%s: " _f, __func__, ##_a) +# define XEN_PT_WARN(d, _f, _a...) \ + xen_pt_log(d, "%s: Warning: "_f, __func__, ##_a) +#else +# define XEN_PT_LOG(d, _f, _a...) +# define XEN_PT_WARN(d, _f, _a...) +#endif + +#ifdef XEN_PT_DEBUG_PCI_CONFIG_ACCESS +# define XEN_PT_LOG_CONFIG(d, addr, val, len) \ + xen_pt_log(d, "%s: address=0x%04x val=0x%08x len=%d\n", \ + __func__, addr, val, len) +#else +# define XEN_PT_LOG_CONFIG(d, addr, val, len) +#endif + + +/* Helper */ +#define XEN_PFN(x) ((x) >> XC_PAGE_SHIFT) + +typedef const struct XenPTRegInfo XenPTRegInfo; +typedef struct XenPTReg XenPTReg; + + +#define TYPE_XEN_PT_DEVICE "xen-pci-passthrough" +OBJECT_DECLARE_SIMPLE_TYPE(XenPCIPassthroughState, XEN_PT_DEVICE) + +uint32_t igd_read_opregion(XenPCIPassthroughState *s); +void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val); + +/* function type for config reg */ +typedef int (*xen_pt_conf_reg_init) + (XenPCIPassthroughState *, XenPTRegInfo *, uint32_t real_offset, + uint32_t *data); +typedef int (*xen_pt_conf_dword_write) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint32_t *val, uint32_t dev_value, uint32_t valid_mask); +typedef int (*xen_pt_conf_word_write) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint16_t *val, uint16_t dev_value, uint16_t valid_mask); +typedef int (*xen_pt_conf_byte_write) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint8_t *val, uint8_t dev_value, uint8_t valid_mask); +typedef int (*xen_pt_conf_dword_read) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint32_t *val, uint32_t valid_mask); +typedef int (*xen_pt_conf_word_read) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint16_t *val, uint16_t valid_mask); +typedef int (*xen_pt_conf_byte_read) + (XenPCIPassthroughState *, XenPTReg *cfg_entry, + uint8_t *val, uint8_t valid_mask); + +#define XEN_PT_BAR_ALLF 0xFFFFFFFF +#define XEN_PT_BAR_UNMAPPED (-1) + +#define XEN_PCI_CAP_MAX 48 + +#define XEN_PCI_INTEL_OPREGION 0xfc + +typedef enum { + XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */ + XEN_PT_GRP_TYPE_EMU, /* emul reg group */ +} XenPTRegisterGroupType; + +typedef enum { + XEN_PT_BAR_FLAG_MEM = 0, /* Memory type BAR */ + XEN_PT_BAR_FLAG_IO, /* I/O type BAR */ + XEN_PT_BAR_FLAG_UPPER, /* upper 64bit BAR */ + XEN_PT_BAR_FLAG_UNUSED, /* unused BAR */ +} XenPTBarFlag; + + +typedef struct XenPTRegion { + /* BAR flag */ + XenPTBarFlag bar_flag; + /* Translation of the emulated address */ + union { + uint64_t maddr; + uint64_t pio_base; + uint64_t u; + } access; +} XenPTRegion; + +/* XenPTRegInfo declaration + * - only for emulated register (either a part or whole bit). + * - for passthrough register that need special behavior (like interacting with + * other component), set emu_mask to all 0 and specify r/w func properly. + * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. + */ + +/* emulated register information */ +struct XenPTRegInfo { + uint32_t offset; + uint32_t size; + uint32_t init_val; + /* reg reserved field mask (ON:reserved, OFF:defined) */ + uint32_t res_mask; + /* reg read only field mask (ON:RO/ROS, OFF:other) */ + uint32_t ro_mask; + /* reg read/write-1-clear field mask (ON:RW1C/RW1CS, OFF:other) */ + uint32_t rw1c_mask; + /* reg emulate field mask (ON:emu, OFF:passthrough) */ + uint32_t emu_mask; + xen_pt_conf_reg_init init; + /* read/write function pointer + * for double_word/word/byte size */ + union { + struct { + xen_pt_conf_dword_write write; + xen_pt_conf_dword_read read; + } dw; + struct { + xen_pt_conf_word_write write; + xen_pt_conf_word_read read; + } w; + struct { + xen_pt_conf_byte_write write; + xen_pt_conf_byte_read read; + } b; + } u; +}; + +/* emulated register management */ +struct XenPTReg { + QLIST_ENTRY(XenPTReg) entries; + XenPTRegInfo *reg; + union { + uint8_t *byte; + uint16_t *half_word; + uint32_t *word; + } ptr; /* pointer to dev.config. */ +}; + +typedef const struct XenPTRegGroupInfo XenPTRegGroupInfo; + +/* emul reg group size initialize method */ +typedef int (*xen_pt_reg_size_init_fn) + (XenPCIPassthroughState *, XenPTRegGroupInfo *, + uint32_t base_offset, uint8_t *size); + +/* emulated register group information */ +struct XenPTRegGroupInfo { + uint8_t grp_id; + XenPTRegisterGroupType grp_type; + uint8_t grp_size; + xen_pt_reg_size_init_fn size_init; + XenPTRegInfo *emu_regs; +}; + +/* emul register group management table */ +typedef struct XenPTRegGroup { + QLIST_ENTRY(XenPTRegGroup) entries; + XenPTRegGroupInfo *reg_grp; + uint32_t base_offset; + uint8_t size; + QLIST_HEAD(, XenPTReg) reg_tbl_list; +} XenPTRegGroup; + + +#define XEN_PT_UNASSIGNED_PIRQ (-1) +typedef struct XenPTMSI { + uint16_t flags; + uint32_t addr_lo; /* guest message address */ + uint32_t addr_hi; /* guest message upper address */ + uint16_t data; /* guest message data */ + uint32_t ctrl_offset; /* saved control offset */ + uint32_t mask; /* guest mask bits */ + int pirq; /* guest pirq corresponding */ + bool initialized; /* when guest MSI is initialized */ + bool mapped; /* when pirq is mapped */ +} XenPTMSI; + +typedef struct XenPTMSIXEntry { + int pirq; + uint64_t addr; + uint32_t data; + uint32_t latch[4]; + bool updated; /* indicate whether MSI ADDR or DATA is updated */ +} XenPTMSIXEntry; +typedef struct XenPTMSIX { + uint32_t ctrl_offset; + bool enabled; + bool maskall; + int total_entries; + int bar_index; + uint64_t table_base; + uint32_t table_offset_adjust; /* page align mmap */ + uint64_t mmio_base_addr; + MemoryRegion mmio; + void *phys_iomem_base; + XenPTMSIXEntry msix_entry[]; +} XenPTMSIX; + +struct XenPCIPassthroughState { + PCIDevice dev; + + PCIHostDeviceAddress hostaddr; + bool is_virtfn; + bool permissive; + bool permissive_warned; + XenHostPCIDevice real_device; + XenPTRegion bases[PCI_NUM_REGIONS]; /* Access regions */ + QLIST_HEAD(, XenPTRegGroup) reg_grps; + + uint32_t machine_irq; + + XenPTMSI *msi; + XenPTMSIX *msix; + + MemoryRegion bar[PCI_NUM_REGIONS - 1]; + MemoryRegion rom; + + MemoryListener memory_listener; + MemoryListener io_listener; + bool listener_set; +}; + +void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp); +void xen_pt_config_delete(XenPCIPassthroughState *s); +XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address); +XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address); +int xen_pt_bar_offset_to_index(uint32_t offset); + +static inline pcibus_t xen_pt_get_emul_size(XenPTBarFlag flag, pcibus_t r_size) +{ + /* align resource size (memory type only) */ + if (flag == XEN_PT_BAR_FLAG_MEM) { + return (r_size + XC_PAGE_SIZE - 1) & XC_PAGE_MASK; + } else { + return r_size; + } +} + +/* INTx */ +/* The PCI Local Bus Specification, Rev. 3.0, + * Section 6.2.4 Miscellaneous Registers, pp 223 + * outlines 5 valid values for the interrupt pin (intx). + * 0: For devices (or device functions) that don't use an interrupt in + * 1: INTA# + * 2: INTB# + * 3: INTC# + * 4: INTD# + * + * Xen uses the following 4 values for intx + * 0: INTA# + * 1: INTB# + * 2: INTC# + * 3: INTD# + * + * Observing that these list of values are not the same, xen_pt_pci_read_intx() + * uses the following mapping from hw to xen values. + * This seems to reflect the current usage within Xen. + * + * PCI hardware | Xen | Notes + * ----------------+-----+---------------------------------------------------- + * 0 | 0 | No interrupt + * 1 | 0 | INTA# + * 2 | 1 | INTB# + * 3 | 2 | INTC# + * 4 | 3 | INTD# + * any other value | 0 | This should never happen, log error message + */ + +static inline uint8_t xen_pt_pci_read_intx(XenPCIPassthroughState *s) +{ + uint8_t v = 0; + xen_host_pci_get_byte(&s->real_device, PCI_INTERRUPT_PIN, &v); + return v; +} + +static inline uint8_t xen_pt_pci_intx(XenPCIPassthroughState *s) +{ + uint8_t r_val = xen_pt_pci_read_intx(s); + + XEN_PT_LOG(&s->dev, "intx=%i\n", r_val); + if (r_val < 1 || r_val > 4) { + XEN_PT_LOG(&s->dev, "Interrupt pin read from hardware is out of range:" + " value=%i, acceptable range is 1 - 4\n", r_val); + r_val = 0; + } else { + /* Note that if s.real_device.config_fd is closed we make 0xff. */ + r_val -= 1; + } + + return r_val; +} + +/* MSI/MSI-X */ +int xen_pt_msi_setup(XenPCIPassthroughState *s); +int xen_pt_msi_update(XenPCIPassthroughState *d); +void xen_pt_msi_disable(XenPCIPassthroughState *s); + +int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base); +void xen_pt_msix_delete(XenPCIPassthroughState *s); +void xen_pt_msix_unmap(XenPCIPassthroughState *s); +int xen_pt_msix_update(XenPCIPassthroughState *s); +int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index); +void xen_pt_msix_disable(XenPCIPassthroughState *s); + +static inline bool xen_pt_has_msix_mapping(XenPCIPassthroughState *s, int bar) +{ + return s->msix && s->msix->bar_index == bar; +} + +extern void *pci_assign_dev_load_option_rom(PCIDevice *dev, + int *size, + unsigned int domain, + unsigned int bus, unsigned int slot, + unsigned int function); +static inline bool is_igd_vga_passthrough(XenHostPCIDevice *dev) +{ + return (xen_igd_gfx_pt_enabled() + && ((dev->class_code >> 0x8) == PCI_CLASS_DISPLAY_VGA)); +} +int xen_pt_register_vga_regions(XenHostPCIDevice *dev); +int xen_pt_unregister_vga_regions(XenHostPCIDevice *dev); +void xen_pt_setup_vga(XenPCIPassthroughState *s, XenHostPCIDevice *dev, + Error **errp); +#endif /* XEN_PT_H */ diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c new file mode 100644 index 000000000..c5c4e943a --- /dev/null +++ b/hw/xen/xen_pt_config_init.c @@ -0,0 +1,2110 @@ +/* + * Copyright (c) 2007, Neocleus Corporation. + * Copyright (c) 2007, Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Alex Novik + * Allen Kay + * Guy Zana + * + * This file implements direct PCI assignment to a HVM guest + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/timer.h" +#include "hw/xen/xen-legacy-backend.h" +#include "xen_pt.h" + +#define XEN_PT_MERGE_VALUE(value, data, val_mask) \ + (((value) & (val_mask)) | ((data) & ~(val_mask))) + +#define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */ + +/* prototype */ + +static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, + uint32_t real_offset, uint32_t *data); + + +/* helper */ + +/* A return value of 1 means the capability should NOT be exposed to guest. */ +static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id) +{ + switch (grp_id) { + case PCI_CAP_ID_EXP: + /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE + * Controller looks trivial, e.g., the PCI Express Capabilities + * Register is 0. We should not try to expose it to guest. + * + * The datasheet is available at + * http://download.intel.com/design/network/datashts/82599_datasheet.pdf + * + * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the + * PCI Express Capability Structure of the VF of Intel 82599 10GbE + * Controller looks trivial, e.g., the PCI Express Capabilities + * Register is 0, so the Capability Version is 0 and + * xen_pt_pcie_size_init() would fail. + */ + if (d->vendor_id == PCI_VENDOR_ID_INTEL && + d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) { + return 1; + } + break; + } + return 0; +} + +/* find emulate register group entry */ +XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address) +{ + XenPTRegGroup *entry = NULL; + + /* find register group entry */ + QLIST_FOREACH(entry, &s->reg_grps, entries) { + /* check address */ + if ((entry->base_offset <= address) + && ((entry->base_offset + entry->size) > address)) { + return entry; + } + } + + /* group entry not found */ + return NULL; +} + +/* find emulate register entry */ +XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address) +{ + XenPTReg *reg_entry = NULL; + XenPTRegInfo *reg = NULL; + uint32_t real_offset = 0; + + /* find register entry */ + QLIST_FOREACH(reg_entry, ®_grp->reg_tbl_list, entries) { + reg = reg_entry->reg; + real_offset = reg_grp->base_offset + reg->offset; + /* check address */ + if ((real_offset <= address) + && ((real_offset + reg->size) > address)) { + return reg_entry; + } + } + + return NULL; +} + +static uint32_t get_throughable_mask(const XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t valid_mask) +{ + uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask); + + if (!s->permissive) { + throughable_mask &= ~reg->res_mask; + } + + return throughable_mask & valid_mask; +} + +/**************** + * general register functions + */ + +/* register initialization function */ + +static int xen_pt_common_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + *data = reg->init_val; + return 0; +} + +/* Read register functions */ + +static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint8_t *value, uint8_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint8_t valid_emu_mask = 0; + uint8_t *data = cfg_entry->ptr.byte; + + /* emulate byte register */ + valid_emu_mask = reg->emu_mask & valid_mask; + *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); + + return 0; +} +static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint16_t *value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t valid_emu_mask = 0; + uint16_t *data = cfg_entry->ptr.half_word; + + /* emulate word register */ + valid_emu_mask = reg->emu_mask & valid_mask; + *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); + + return 0; +} +static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint32_t *value, uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint32_t valid_emu_mask = 0; + uint32_t *data = cfg_entry->ptr.word; + + /* emulate long register */ + valid_emu_mask = reg->emu_mask & valid_mask; + *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask); + + return 0; +} + +/* Write register functions */ + +static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint8_t *val, uint8_t dev_value, + uint8_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint8_t writable_mask = 0; + uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask); + uint8_t *data = cfg_entry->ptr.byte; + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + + /* create value for writing to I/O device register */ + *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, + throughable_mask); + + return 0; +} +static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint16_t *val, uint16_t dev_value, + uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t writable_mask = 0; + uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); + uint16_t *data = cfg_entry->ptr.half_word; + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + + /* create value for writing to I/O device register */ + *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, + throughable_mask); + + return 0; +} +static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint32_t *val, uint32_t dev_value, + uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint32_t writable_mask = 0; + uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); + uint32_t *data = cfg_entry->ptr.word; + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + + /* create value for writing to I/O device register */ + *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask, + throughable_mask); + + return 0; +} + + +/* XenPTRegInfo declaration + * - only for emulated register (either a part or whole bit). + * - for passthrough register that need special behavior (like interacting with + * other component), set emu_mask to all 0 and specify r/w func properly. + * - do NOT use ALL F for init_val, otherwise the tbl will not be registered. + */ + +/******************** + * Header Type0 + */ + +static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + *data = s->real_device.vendor_id; + return 0; +} +static int xen_pt_device_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + *data = s->real_device.device_id; + return 0; +} +static int xen_pt_status_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + XenPTRegGroup *reg_grp_entry = NULL; + XenPTReg *reg_entry = NULL; + uint32_t reg_field = 0; + + /* find Header register group */ + reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); + if (reg_grp_entry) { + /* find Capabilities Pointer register */ + reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); + if (reg_entry) { + /* check Capabilities Pointer register */ + if (*reg_entry->ptr.half_word) { + reg_field |= PCI_STATUS_CAP_LIST; + } else { + reg_field &= ~PCI_STATUS_CAP_LIST; + } + } else { + xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" + " for Capabilities Pointer register." + " (%s)\n", __func__); + return -1; + } + } else { + xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" + " for Header. (%s)\n", __func__); + return -1; + } + + *data = reg_field; + return 0; +} +static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + /* read PCI_HEADER_TYPE */ + *data = reg->init_val | 0x80; + return 0; +} + +/* initialize Interrupt Pin register */ +static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + if (s->real_device.irq) { + *data = xen_pt_pci_read_intx(s); + } + return 0; +} + +/* Command register */ +static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint16_t *val, uint16_t dev_value, + uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t writable_mask = 0; + uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); + uint16_t *data = cfg_entry->ptr.half_word; + + /* modify emulate register */ + writable_mask = ~reg->ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + + /* create value for writing to I/O device register */ + if (*val & PCI_COMMAND_INTX_DISABLE) { + throughable_mask |= PCI_COMMAND_INTX_DISABLE; + } else { + if (s->machine_irq) { + throughable_mask |= PCI_COMMAND_INTX_DISABLE; + } + } + + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + return 0; +} + +/* BAR */ +#define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */ +#define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */ +#define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */ +#define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */ + +static bool is_64bit_bar(PCIIORegion *r) +{ + return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64); +} + +static uint64_t xen_pt_get_bar_size(PCIIORegion *r) +{ + if (is_64bit_bar(r)) { + uint64_t size64; + size64 = (r + 1)->size; + size64 <<= 32; + size64 += r->size; + return size64; + } + return r->size; +} + +static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s, + int index) +{ + PCIDevice *d = PCI_DEVICE(s); + XenPTRegion *region = NULL; + PCIIORegion *r; + + /* check 64bit BAR */ + if ((0 < index) && (index < PCI_ROM_SLOT)) { + int type = s->real_device.io_regions[index - 1].type; + + if ((type & XEN_HOST_PCI_REGION_TYPE_MEM) + && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) { + region = &s->bases[index - 1]; + if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) { + return XEN_PT_BAR_FLAG_UPPER; + } + } + } + + /* check unused BAR */ + r = &d->io_regions[index]; + if (!xen_pt_get_bar_size(r)) { + return XEN_PT_BAR_FLAG_UNUSED; + } + + /* for ExpROM BAR */ + if (index == PCI_ROM_SLOT) { + return XEN_PT_BAR_FLAG_MEM; + } + + /* check BAR I/O indicator */ + if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) { + return XEN_PT_BAR_FLAG_IO; + } else { + return XEN_PT_BAR_FLAG_MEM; + } +} + +static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr) +{ + if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) { + return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK); + } else { + return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK); + } +} + +static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, + uint32_t real_offset, uint32_t *data) +{ + uint32_t reg_field = 0; + int index; + + index = xen_pt_bar_offset_to_index(reg->offset); + if (index < 0 || index >= PCI_NUM_REGIONS) { + XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); + return -1; + } + + /* set BAR flag */ + s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index); + if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { + reg_field = XEN_PT_INVALID_REG; + } + + *data = reg_field; + return 0; +} +static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint32_t *value, uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint32_t valid_emu_mask = 0; + uint32_t bar_emu_mask = 0; + int index; + + /* get BAR index */ + index = xen_pt_bar_offset_to_index(reg->offset); + if (index < 0 || index >= PCI_NUM_REGIONS - 1) { + XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); + return -1; + } + + /* use fixed-up value from kernel sysfs */ + *value = base_address_with_flags(&s->real_device.io_regions[index]); + + /* set emulate mask depend on BAR flag */ + switch (s->bases[index].bar_flag) { + case XEN_PT_BAR_FLAG_MEM: + bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; + break; + case XEN_PT_BAR_FLAG_IO: + bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; + break; + case XEN_PT_BAR_FLAG_UPPER: + bar_emu_mask = XEN_PT_BAR_ALLF; + break; + default: + break; + } + + /* emulate BAR */ + valid_emu_mask = bar_emu_mask & valid_mask; + *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask); + + return 0; +} +static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint32_t *val, uint32_t dev_value, + uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + XenPTRegion *base = NULL; + PCIDevice *d = PCI_DEVICE(s); + const PCIIORegion *r; + uint32_t writable_mask = 0; + uint32_t bar_emu_mask = 0; + uint32_t bar_ro_mask = 0; + uint32_t r_size = 0; + int index = 0; + uint32_t *data = cfg_entry->ptr.word; + + index = xen_pt_bar_offset_to_index(reg->offset); + if (index < 0 || index >= PCI_NUM_REGIONS) { + XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index); + return -1; + } + + r = &d->io_regions[index]; + base = &s->bases[index]; + r_size = xen_pt_get_emul_size(base->bar_flag, r->size); + + /* set emulate mask and read-only mask values depend on the BAR flag */ + switch (s->bases[index].bar_flag) { + case XEN_PT_BAR_FLAG_MEM: + bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; + if (!r_size) { + /* low 32 bits mask for 64 bit bars */ + bar_ro_mask = XEN_PT_BAR_ALLF; + } else { + bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1); + } + break; + case XEN_PT_BAR_FLAG_IO: + bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; + bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1); + break; + case XEN_PT_BAR_FLAG_UPPER: + assert(index > 0); + r_size = d->io_regions[index - 1].size >> 32; + bar_emu_mask = XEN_PT_BAR_ALLF; + bar_ro_mask = r_size ? r_size - 1 : 0; + break; + default: + break; + } + + /* modify emulate register */ + writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + + /* check whether we need to update the virtual region address or not */ + switch (s->bases[index].bar_flag) { + case XEN_PT_BAR_FLAG_UPPER: + case XEN_PT_BAR_FLAG_MEM: + /* nothing to do */ + break; + case XEN_PT_BAR_FLAG_IO: + /* nothing to do */ + break; + default: + break; + } + + /* create value for writing to I/O device register */ + *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); + + return 0; +} + +/* write Exp ROM BAR */ +static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint32_t *val, + uint32_t dev_value, uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + XenPTRegion *base = NULL; + PCIDevice *d = PCI_DEVICE(s); + uint32_t writable_mask = 0; + uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); + pcibus_t r_size = 0; + uint32_t bar_ro_mask = 0; + uint32_t *data = cfg_entry->ptr.word; + + r_size = d->io_regions[PCI_ROM_SLOT].size; + base = &s->bases[PCI_ROM_SLOT]; + /* align memory type resource size */ + r_size = xen_pt_get_emul_size(base->bar_flag, r_size); + + /* set emulate mask and read-only mask */ + bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE; + + /* modify emulate register */ + writable_mask = ~bar_ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + + /* create value for writing to I/O device register */ + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + return 0; +} + +static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, + uint32_t *value, uint32_t valid_mask) +{ + *value = igd_read_opregion(s); + return 0; +} + +static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint32_t *value, + uint32_t dev_value, uint32_t valid_mask) +{ + igd_write_opregion(s, *value); + return 0; +} + +/* Header Type0 reg static information table */ +static XenPTRegInfo xen_pt_emu_reg_header0[] = { + /* Vendor ID reg */ + { + .offset = PCI_VENDOR_ID, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFFFF, + .emu_mask = 0xFFFF, + .init = xen_pt_vendor_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Device ID reg */ + { + .offset = PCI_DEVICE_ID, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFFFF, + .emu_mask = 0xFFFF, + .init = xen_pt_device_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Command reg */ + { + .offset = PCI_COMMAND, + .size = 2, + .init_val = 0x0000, + .res_mask = 0xF880, + .emu_mask = 0x0743, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_cmd_reg_write, + }, + /* Capabilities Pointer reg */ + { + .offset = PCI_CAPABILITY_LIST, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Status reg */ + /* use emulated Cap Ptr value to initialize, + * so need to be declared after Cap Ptr reg + */ + { + .offset = PCI_STATUS, + .size = 2, + .init_val = 0x0000, + .res_mask = 0x0007, + .ro_mask = 0x06F8, + .rw1c_mask = 0xF900, + .emu_mask = 0x0010, + .init = xen_pt_status_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Cache Line Size reg */ + { + .offset = PCI_CACHE_LINE_SIZE, + .size = 1, + .init_val = 0x00, + .ro_mask = 0x00, + .emu_mask = 0xFF, + .init = xen_pt_common_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Latency Timer reg */ + { + .offset = PCI_LATENCY_TIMER, + .size = 1, + .init_val = 0x00, + .ro_mask = 0x00, + .emu_mask = 0xFF, + .init = xen_pt_common_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Header Type reg */ + { + .offset = PCI_HEADER_TYPE, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0x00, + .init = xen_pt_header_type_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Interrupt Line reg */ + { + .offset = PCI_INTERRUPT_LINE, + .size = 1, + .init_val = 0x00, + .ro_mask = 0x00, + .emu_mask = 0xFF, + .init = xen_pt_common_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Interrupt Pin reg */ + { + .offset = PCI_INTERRUPT_PIN, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_irqpin_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* BAR 0 reg */ + /* mask of BAR need to be decided later, depends on IO/MEM type */ + { + .offset = PCI_BASE_ADDRESS_0, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* BAR 1 reg */ + { + .offset = PCI_BASE_ADDRESS_1, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* BAR 2 reg */ + { + .offset = PCI_BASE_ADDRESS_2, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* BAR 3 reg */ + { + .offset = PCI_BASE_ADDRESS_3, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* BAR 4 reg */ + { + .offset = PCI_BASE_ADDRESS_4, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* BAR 5 reg */ + { + .offset = PCI_BASE_ADDRESS_5, + .size = 4, + .init_val = 0x00000000, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_bar_reg_read, + .u.dw.write = xen_pt_bar_reg_write, + }, + /* Expansion ROM BAR reg */ + { + .offset = PCI_ROM_ADDRESS, + .size = 4, + .init_val = 0x00000000, + .ro_mask = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE, + .emu_mask = (uint32_t)PCI_ROM_ADDRESS_MASK, + .init = xen_pt_bar_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_exp_rom_bar_reg_write, + }, + { + .size = 0, + }, +}; + + +/********************************* + * Vital Product Data Capability + */ + +/* Vital Product Data Capability Structure reg static information table */ +static XenPTRegInfo xen_pt_emu_reg_vpd[] = { + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + { + .offset = PCI_VPD_ADDR, + .size = 2, + .ro_mask = 0x0003, + .emu_mask = 0x0003, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + { + .size = 0, + }, +}; + + +/************************************** + * Vendor Specific Capability + */ + +/* Vendor Specific Capability Structure reg static information table */ +static XenPTRegInfo xen_pt_emu_reg_vendor[] = { + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + { + .size = 0, + }, +}; + + +/***************************** + * PCI Express Capability + */ + +static inline uint8_t get_capability_version(XenPCIPassthroughState *s, + uint32_t offset) +{ + uint8_t flag; + if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { + return 0; + } + return flag & PCI_EXP_FLAGS_VERS; +} + +static inline uint8_t get_device_type(XenPCIPassthroughState *s, + uint32_t offset) +{ + uint8_t flag; + if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) { + return 0; + } + return (flag & PCI_EXP_FLAGS_TYPE) >> 4; +} + +/* initialize Link Control register */ +static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); + uint8_t dev_type = get_device_type(s, real_offset - reg->offset); + + /* no need to initialize in case of Root Complex Integrated Endpoint + * with cap_ver 1.x + */ + if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) { + *data = XEN_PT_INVALID_REG; + } + + *data = reg->init_val; + return 0; +} +/* initialize Device Control 2 register */ +static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); + + /* no need to initialize in case of cap_ver 1.x */ + if (cap_ver == 1) { + *data = XEN_PT_INVALID_REG; + } + + *data = reg->init_val; + return 0; +} +/* initialize Link Control 2 register */ +static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset); + uint32_t reg_field = 0; + + /* no need to initialize in case of cap_ver 1.x */ + if (cap_ver == 1) { + reg_field = XEN_PT_INVALID_REG; + } else { + /* set Supported Link Speed */ + uint8_t lnkcap; + int rc; + rc = xen_host_pci_get_byte(&s->real_device, + real_offset - reg->offset + PCI_EXP_LNKCAP, + &lnkcap); + if (rc) { + return rc; + } + reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap; + } + + *data = reg_field; + return 0; +} + +/* PCI Express Capability Structure reg static information table */ +static XenPTRegInfo xen_pt_emu_reg_pcie[] = { + /* Next Pointer reg */ + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Device Capabilities reg */ + { + .offset = PCI_EXP_DEVCAP, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0xFFFFFFFF, + .emu_mask = 0x10000000, + .init = xen_pt_common_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_long_reg_write, + }, + /* Device Control reg */ + { + .offset = PCI_EXP_DEVCTL, + .size = 2, + .init_val = 0x2810, + .ro_mask = 0x8400, + .emu_mask = 0xFFFF, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Device Status reg */ + { + .offset = PCI_EXP_DEVSTA, + .size = 2, + .res_mask = 0xFFC0, + .ro_mask = 0x0030, + .rw1c_mask = 0x000F, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Link Control reg */ + { + .offset = PCI_EXP_LNKCTL, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFC34, + .emu_mask = 0xFFFF, + .init = xen_pt_linkctrl_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Link Status reg */ + { + .offset = PCI_EXP_LNKSTA, + .size = 2, + .ro_mask = 0x3FFF, + .rw1c_mask = 0xC000, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Device Control 2 reg */ + { + .offset = 0x28, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFFE0, + .emu_mask = 0xFFFF, + .init = xen_pt_devctrl2_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* Link Control 2 reg */ + { + .offset = 0x30, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xE040, + .emu_mask = 0xFFFF, + .init = xen_pt_linkctrl2_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + { + .size = 0, + }, +}; + + +/********************************* + * Power Management Capability + */ + +/* Power Management Capability reg static information table */ +static XenPTRegInfo xen_pt_emu_reg_pm[] = { + /* Next Pointer reg */ + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Power Management Capabilities reg */ + { + .offset = PCI_CAP_FLAGS, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0xFFFF, + .emu_mask = 0xF9C8, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + /* PCI Power Management Control/Status reg */ + { + .offset = PCI_PM_CTRL, + .size = 2, + .init_val = 0x0008, + .res_mask = 0x00F0, + .ro_mask = 0x610C, + .rw1c_mask = 0x8000, + .emu_mask = 0x810B, + .init = xen_pt_common_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_word_reg_write, + }, + { + .size = 0, + }, +}; + + +/******************************** + * MSI Capability + */ + +/* Helper */ +#define xen_pt_msi_check_type(offset, flags, what) \ + ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \ + PCI_MSI_##what##_64 : PCI_MSI_##what##_32)) + +/* Message Control register */ +static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + XenPTMSI *msi = s->msi; + uint16_t reg_field; + int rc; + + /* use I/O device register's value as initial value */ + rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); + if (rc) { + return rc; + } + if (reg_field & PCI_MSI_FLAGS_ENABLE) { + XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n"); + xen_host_pci_set_word(&s->real_device, real_offset, + reg_field & ~PCI_MSI_FLAGS_ENABLE); + } + msi->flags |= reg_field; + msi->ctrl_offset = real_offset; + msi->initialized = false; + msi->mapped = false; + + *data = reg->init_val; + return 0; +} +static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint16_t *val, + uint16_t dev_value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + XenPTMSI *msi = s->msi; + uint16_t writable_mask = 0; + uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); + uint16_t *data = cfg_entry->ptr.half_word; + + /* Currently no support for multi-vector */ + if (*val & PCI_MSI_FLAGS_QSIZE) { + XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); + } + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE; + + /* create value for writing to I/O device register */ + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + /* update MSI */ + if (*val & PCI_MSI_FLAGS_ENABLE) { + /* setup MSI pirq for the first time */ + if (!msi->initialized) { + /* Init physical one */ + XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val); + if (xen_pt_msi_setup(s)) { + /* We do not broadcast the error to the framework code, so + * that MSI errors are contained in MSI emulation code and + * QEMU can go on running. + * Guest MSI would be actually not working. + */ + *val &= ~PCI_MSI_FLAGS_ENABLE; + XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val); + return 0; + } + if (xen_pt_msi_update(s)) { + *val &= ~PCI_MSI_FLAGS_ENABLE; + XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val); + return 0; + } + msi->initialized = true; + msi->mapped = true; + } + msi->flags |= PCI_MSI_FLAGS_ENABLE; + } else if (msi->mapped) { + xen_pt_msi_disable(s); + } + + return 0; +} + +/* initialize Message Upper Address register */ +static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + /* no need to initialize in case of 32 bit type */ + if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { + *data = XEN_PT_INVALID_REG; + } else { + *data = reg->init_val; + } + + return 0; +} +/* this function will be called twice (for 32 bit and 64 bit type) */ +/* initialize Message Data register */ +static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint32_t flags = s->msi->flags; + uint32_t offset = reg->offset; + + /* check the offset whether matches the type or not */ + if (xen_pt_msi_check_type(offset, flags, DATA)) { + *data = reg->init_val; + } else { + *data = XEN_PT_INVALID_REG; + } + return 0; +} + +/* this function will be called twice (for 32 bit and 64 bit type) */ +/* initialize Mask register */ +static int xen_pt_mask_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint32_t flags = s->msi->flags; + + /* check the offset whether matches the type or not */ + if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { + *data = XEN_PT_INVALID_REG; + } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) { + *data = reg->init_val; + } else { + *data = XEN_PT_INVALID_REG; + } + return 0; +} + +/* this function will be called twice (for 32 bit and 64 bit type) */ +/* initialize Pending register */ +static int xen_pt_pending_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint32_t flags = s->msi->flags; + + /* check the offset whether matches the type or not */ + if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { + *data = XEN_PT_INVALID_REG; + } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) { + *data = reg->init_val; + } else { + *data = XEN_PT_INVALID_REG; + } + return 0; +} + +/* write Message Address register */ +static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint32_t *val, + uint32_t dev_value, uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint32_t writable_mask = 0; + uint32_t old_addr = *cfg_entry->ptr.word; + uint32_t *data = cfg_entry->ptr.word; + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + s->msi->addr_lo = *data; + + /* create value for writing to I/O device register */ + *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); + + /* update MSI */ + if (*data != old_addr) { + if (s->msi->mapped) { + xen_pt_msi_update(s); + } + } + + return 0; +} +/* write Message Upper Address register */ +static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint32_t *val, + uint32_t dev_value, uint32_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint32_t writable_mask = 0; + uint32_t old_addr = *cfg_entry->ptr.word; + uint32_t *data = cfg_entry->ptr.word; + + /* check whether the type is 64 bit or not */ + if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) { + XEN_PT_ERR(&s->dev, + "Can't write to the upper address without 64 bit support\n"); + return -1; + } + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + /* update the msi_info too */ + s->msi->addr_hi = *data; + + /* create value for writing to I/O device register */ + *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); + + /* update MSI */ + if (*data != old_addr) { + if (s->msi->mapped) { + xen_pt_msi_update(s); + } + } + + return 0; +} + + +/* this function will be called twice (for 32 bit and 64 bit type) */ +/* write Message Data register */ +static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint16_t *val, + uint16_t dev_value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + XenPTMSI *msi = s->msi; + uint16_t writable_mask = 0; + uint16_t old_data = *cfg_entry->ptr.half_word; + uint32_t offset = reg->offset; + uint16_t *data = cfg_entry->ptr.half_word; + + /* check the offset whether matches the type or not */ + if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) { + /* exit I/O emulator */ + XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n"); + return -1; + } + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + /* update the msi_info too */ + msi->data = *data; + + /* create value for writing to I/O device register */ + *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0); + + /* update MSI */ + if (*data != old_data) { + if (msi->mapped) { + xen_pt_msi_update(s); + } + } + + return 0; +} + +static int xen_pt_mask_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, + uint32_t *val, uint32_t dev_value, + uint32_t valid_mask) +{ + int rc; + + rc = xen_pt_long_reg_write(s, cfg_entry, val, dev_value, valid_mask); + if (rc) { + return rc; + } + + s->msi->mask = *val; + + return 0; +} + +/* MSI Capability Structure reg static information table */ +static XenPTRegInfo xen_pt_emu_reg_msi[] = { + /* Next Pointer reg */ + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Message Control reg */ + { + .offset = PCI_MSI_FLAGS, + .size = 2, + .init_val = 0x0000, + .res_mask = 0xFE00, + .ro_mask = 0x018E, + .emu_mask = 0x017E, + .init = xen_pt_msgctrl_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_msgctrl_reg_write, + }, + /* Message Address reg */ + { + .offset = PCI_MSI_ADDRESS_LO, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0x00000003, + .emu_mask = 0xFFFFFFFF, + .init = xen_pt_common_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_msgaddr32_reg_write, + }, + /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */ + { + .offset = PCI_MSI_ADDRESS_HI, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0x00000000, + .emu_mask = 0xFFFFFFFF, + .init = xen_pt_msgaddr64_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_msgaddr64_reg_write, + }, + /* Message Data reg (16 bits of data for 32-bit devices) */ + { + .offset = PCI_MSI_DATA_32, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0x0000, + .emu_mask = 0xFFFF, + .init = xen_pt_msgdata_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_msgdata_reg_write, + }, + /* Message Data reg (16 bits of data for 64-bit devices) */ + { + .offset = PCI_MSI_DATA_64, + .size = 2, + .init_val = 0x0000, + .ro_mask = 0x0000, + .emu_mask = 0xFFFF, + .init = xen_pt_msgdata_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_msgdata_reg_write, + }, + /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ + { + .offset = PCI_MSI_MASK_32, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0xFFFFFFFF, + .emu_mask = 0xFFFFFFFF, + .init = xen_pt_mask_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_mask_reg_write, + }, + /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ + { + .offset = PCI_MSI_MASK_64, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0xFFFFFFFF, + .emu_mask = 0xFFFFFFFF, + .init = xen_pt_mask_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_mask_reg_write, + }, + /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */ + { + .offset = PCI_MSI_MASK_32 + 4, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0xFFFFFFFF, + .emu_mask = 0x00000000, + .init = xen_pt_pending_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_long_reg_write, + }, + /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */ + { + .offset = PCI_MSI_MASK_64 + 4, + .size = 4, + .init_val = 0x00000000, + .ro_mask = 0xFFFFFFFF, + .emu_mask = 0x00000000, + .init = xen_pt_pending_reg_init, + .u.dw.read = xen_pt_long_reg_read, + .u.dw.write = xen_pt_long_reg_write, + }, + { + .size = 0, + }, +}; + + +/************************************** + * MSI-X Capability + */ + +/* Message Control register for MSI-X */ +static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + uint16_t reg_field; + int rc; + + /* use I/O device register's value as initial value */ + rc = xen_host_pci_get_word(&s->real_device, real_offset, ®_field); + if (rc) { + return rc; + } + if (reg_field & PCI_MSIX_FLAGS_ENABLE) { + XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n"); + xen_host_pci_set_word(&s->real_device, real_offset, + reg_field & ~PCI_MSIX_FLAGS_ENABLE); + } + + s->msix->ctrl_offset = real_offset; + + *data = reg->init_val; + return 0; +} +static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s, + XenPTReg *cfg_entry, uint16_t *val, + uint16_t dev_value, uint16_t valid_mask) +{ + XenPTRegInfo *reg = cfg_entry->reg; + uint16_t writable_mask = 0; + uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); + int debug_msix_enabled_old; + uint16_t *data = cfg_entry->ptr.half_word; + + /* modify emulate register */ + writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; + *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask); + + /* create value for writing to I/O device register */ + *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); + + /* update MSI-X */ + if ((*val & PCI_MSIX_FLAGS_ENABLE) + && !(*val & PCI_MSIX_FLAGS_MASKALL)) { + xen_pt_msix_update(s); + } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) { + xen_pt_msix_disable(s); + } + + s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL; + + debug_msix_enabled_old = s->msix->enabled; + s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE); + if (s->msix->enabled != debug_msix_enabled_old) { + XEN_PT_LOG(&s->dev, "%s MSI-X\n", + s->msix->enabled ? "enable" : "disable"); + } + + return 0; +} + +/* MSI-X Capability Structure reg static information table */ +static XenPTRegInfo xen_pt_emu_reg_msix[] = { + /* Next Pointer reg */ + { + .offset = PCI_CAP_LIST_NEXT, + .size = 1, + .init_val = 0x00, + .ro_mask = 0xFF, + .emu_mask = 0xFF, + .init = xen_pt_ptr_reg_init, + .u.b.read = xen_pt_byte_reg_read, + .u.b.write = xen_pt_byte_reg_write, + }, + /* Message Control reg */ + { + .offset = PCI_MSI_FLAGS, + .size = 2, + .init_val = 0x0000, + .res_mask = 0x3800, + .ro_mask = 0x07FF, + .emu_mask = 0x0000, + .init = xen_pt_msixctrl_reg_init, + .u.w.read = xen_pt_word_reg_read, + .u.w.write = xen_pt_msixctrl_reg_write, + }, + { + .size = 0, + }, +}; + +static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = { + /* Intel IGFX OpRegion reg */ + { + .offset = 0x0, + .size = 4, + .init_val = 0, + .emu_mask = 0xFFFFFFFF, + .u.dw.read = xen_pt_intel_opregion_read, + .u.dw.write = xen_pt_intel_opregion_write, + }, + { + .size = 0, + }, +}; + +/**************************** + * Capabilities + */ + +/* capability structure register group size functions */ + +static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s, + const XenPTRegGroupInfo *grp_reg, + uint32_t base_offset, uint8_t *size) +{ + *size = grp_reg->grp_size; + return 0; +} +/* get Vendor Specific Capability Structure register group size */ +static int xen_pt_vendor_size_init(XenPCIPassthroughState *s, + const XenPTRegGroupInfo *grp_reg, + uint32_t base_offset, uint8_t *size) +{ + return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size); +} +/* get PCI Express Capability Structure register group size */ +static int xen_pt_pcie_size_init(XenPCIPassthroughState *s, + const XenPTRegGroupInfo *grp_reg, + uint32_t base_offset, uint8_t *size) +{ + PCIDevice *d = PCI_DEVICE(s); + uint8_t version = get_capability_version(s, base_offset); + uint8_t type = get_device_type(s, base_offset); + uint8_t pcie_size = 0; + + + /* calculate size depend on capability version and device/port type */ + /* in case of PCI Express Base Specification Rev 1.x */ + if (version == 1) { + /* The PCI Express Capabilities, Device Capabilities, and Device + * Status/Control registers are required for all PCI Express devices. + * The Link Capabilities and Link Status/Control are required for all + * Endpoints that are not Root Complex Integrated Endpoints. Endpoints + * are not required to implement registers other than those listed + * above and terminate the capability structure. + */ + switch (type) { + case PCI_EXP_TYPE_ENDPOINT: + case PCI_EXP_TYPE_LEG_END: + pcie_size = 0x14; + break; + case PCI_EXP_TYPE_RC_END: + /* has no link */ + pcie_size = 0x0C; + break; + /* only EndPoint passthrough is supported */ + case PCI_EXP_TYPE_ROOT_PORT: + case PCI_EXP_TYPE_UPSTREAM: + case PCI_EXP_TYPE_DOWNSTREAM: + case PCI_EXP_TYPE_PCI_BRIDGE: + case PCI_EXP_TYPE_PCIE_BRIDGE: + case PCI_EXP_TYPE_RC_EC: + default: + XEN_PT_ERR(d, "Unsupported device/port type 0x%x.\n", type); + return -1; + } + } + /* in case of PCI Express Base Specification Rev 2.0 */ + else if (version == 2) { + switch (type) { + case PCI_EXP_TYPE_ENDPOINT: + case PCI_EXP_TYPE_LEG_END: + case PCI_EXP_TYPE_RC_END: + /* For Functions that do not implement the registers, + * these spaces must be hardwired to 0b. + */ + pcie_size = 0x3C; + break; + /* only EndPoint passthrough is supported */ + case PCI_EXP_TYPE_ROOT_PORT: + case PCI_EXP_TYPE_UPSTREAM: + case PCI_EXP_TYPE_DOWNSTREAM: + case PCI_EXP_TYPE_PCI_BRIDGE: + case PCI_EXP_TYPE_PCIE_BRIDGE: + case PCI_EXP_TYPE_RC_EC: + default: + XEN_PT_ERR(d, "Unsupported device/port type 0x%x.\n", type); + return -1; + } + } else { + XEN_PT_ERR(d, "Unsupported capability version 0x%x.\n", version); + return -1; + } + + *size = pcie_size; + return 0; +} +/* get MSI Capability Structure register group size */ +static int xen_pt_msi_size_init(XenPCIPassthroughState *s, + const XenPTRegGroupInfo *grp_reg, + uint32_t base_offset, uint8_t *size) +{ + uint16_t msg_ctrl = 0; + uint8_t msi_size = 0xa; + int rc; + + rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS, + &msg_ctrl); + if (rc) { + return rc; + } + /* check if 64-bit address is capable of per-vector masking */ + if (msg_ctrl & PCI_MSI_FLAGS_64BIT) { + msi_size += 4; + } + if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) { + msi_size += 10; + } + + s->msi = g_new0(XenPTMSI, 1); + s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ; + + *size = msi_size; + return 0; +} +/* get MSI-X Capability Structure register group size */ +static int xen_pt_msix_size_init(XenPCIPassthroughState *s, + const XenPTRegGroupInfo *grp_reg, + uint32_t base_offset, uint8_t *size) +{ + int rc = 0; + + rc = xen_pt_msix_init(s, base_offset); + + if (rc < 0) { + XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n"); + return rc; + } + + *size = grp_reg->grp_size; + return 0; +} + + +static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = { + /* Header Type0 reg group */ + { + .grp_id = 0xFF, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0x40, + .size_init = xen_pt_reg_grp_size_init, + .emu_regs = xen_pt_emu_reg_header0, + }, + /* PCI PowerManagement Capability reg group */ + { + .grp_id = PCI_CAP_ID_PM, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = PCI_PM_SIZEOF, + .size_init = xen_pt_reg_grp_size_init, + .emu_regs = xen_pt_emu_reg_pm, + }, + /* AGP Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_AGP, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x30, + .size_init = xen_pt_reg_grp_size_init, + }, + /* Vital Product Data Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_VPD, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0x08, + .size_init = xen_pt_reg_grp_size_init, + .emu_regs = xen_pt_emu_reg_vpd, + }, + /* Slot Identification reg group */ + { + .grp_id = PCI_CAP_ID_SLOTID, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x04, + .size_init = xen_pt_reg_grp_size_init, + }, + /* MSI Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_MSI, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0xFF, + .size_init = xen_pt_msi_size_init, + .emu_regs = xen_pt_emu_reg_msi, + }, + /* PCI-X Capabilities List Item reg group */ + { + .grp_id = PCI_CAP_ID_PCIX, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x18, + .size_init = xen_pt_reg_grp_size_init, + }, + /* Vendor Specific Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_VNDR, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0xFF, + .size_init = xen_pt_vendor_size_init, + .emu_regs = xen_pt_emu_reg_vendor, + }, + /* SHPC Capability List Item reg group */ + { + .grp_id = PCI_CAP_ID_SHPC, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x08, + .size_init = xen_pt_reg_grp_size_init, + }, + /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */ + { + .grp_id = PCI_CAP_ID_SSVID, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x08, + .size_init = xen_pt_reg_grp_size_init, + }, + /* AGP 8x Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_AGP3, + .grp_type = XEN_PT_GRP_TYPE_HARDWIRED, + .grp_size = 0x30, + .size_init = xen_pt_reg_grp_size_init, + }, + /* PCI Express Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_EXP, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0xFF, + .size_init = xen_pt_pcie_size_init, + .emu_regs = xen_pt_emu_reg_pcie, + }, + /* MSI-X Capability Structure reg group */ + { + .grp_id = PCI_CAP_ID_MSIX, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0x0C, + .size_init = xen_pt_msix_size_init, + .emu_regs = xen_pt_emu_reg_msix, + }, + /* Intel IGD Opregion group */ + { + .grp_id = XEN_PCI_INTEL_OPREGION, + .grp_type = XEN_PT_GRP_TYPE_EMU, + .grp_size = 0x4, + .size_init = xen_pt_reg_grp_size_init, + .emu_regs = xen_pt_emu_reg_igd_opregion, + }, + { + .grp_size = 0, + }, +}; + +/* initialize Capabilities Pointer or Next Pointer register */ +static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, + XenPTRegInfo *reg, uint32_t real_offset, + uint32_t *data) +{ + int i, rc; + uint8_t reg_field; + uint8_t cap_id = 0; + + rc = xen_host_pci_get_byte(&s->real_device, real_offset, ®_field); + if (rc) { + return rc; + } + /* find capability offset */ + while (reg_field) { + for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { + if (xen_pt_hide_dev_cap(&s->real_device, + xen_pt_emu_reg_grps[i].grp_id)) { + continue; + } + + rc = xen_host_pci_get_byte(&s->real_device, + reg_field + PCI_CAP_LIST_ID, &cap_id); + if (rc) { + XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n", + reg_field + PCI_CAP_LIST_ID, rc); + return rc; + } + if (xen_pt_emu_reg_grps[i].grp_id == cap_id) { + if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { + goto out; + } + /* ignore the 0 hardwired capability, find next one */ + break; + } + } + + /* next capability */ + rc = xen_host_pci_get_byte(&s->real_device, + reg_field + PCI_CAP_LIST_NEXT, ®_field); + if (rc) { + return rc; + } + } + +out: + *data = reg_field; + return 0; +} + + +/************* + * Main + */ + +static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap) +{ + uint8_t id; + unsigned max_cap = XEN_PCI_CAP_MAX; + uint8_t pos = PCI_CAPABILITY_LIST; + uint8_t status = 0; + + if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) { + return 0; + } + if ((status & PCI_STATUS_CAP_LIST) == 0) { + return 0; + } + + while (max_cap--) { + if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) { + break; + } + if (pos < PCI_CONFIG_HEADER_SIZE) { + break; + } + + pos &= ~3; + if (xen_host_pci_get_byte(&s->real_device, + pos + PCI_CAP_LIST_ID, &id)) { + break; + } + + if (id == 0xff) { + break; + } + if (id == cap) { + return pos; + } + + pos += PCI_CAP_LIST_NEXT; + } + return 0; +} + +static void xen_pt_config_reg_init(XenPCIPassthroughState *s, + XenPTRegGroup *reg_grp, XenPTRegInfo *reg, + Error **errp) +{ + XenPTReg *reg_entry; + uint32_t data = 0; + int rc = 0; + + reg_entry = g_new0(XenPTReg, 1); + reg_entry->reg = reg; + + if (reg->init) { + uint32_t host_mask, size_mask; + unsigned int offset; + uint32_t val; + + /* initialize emulate register */ + rc = reg->init(s, reg_entry->reg, + reg_grp->base_offset + reg->offset, &data); + if (rc < 0) { + g_free(reg_entry); + error_setg(errp, "Init emulate register fail"); + return; + } + if (data == XEN_PT_INVALID_REG) { + /* free unused BAR register entry */ + g_free(reg_entry); + return; + } + /* Sync up the data to dev.config */ + offset = reg_grp->base_offset + reg->offset; + size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3); + + switch (reg->size) { + case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val); + break; + case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val); + break; + case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val); + break; + default: abort(); + } + if (rc) { + /* Serious issues when we cannot read the host values! */ + g_free(reg_entry); + error_setg(errp, "Cannot read host values"); + return; + } + /* Set bits in emu_mask are the ones we emulate. The dev.config shall + * contain the emulated view of the guest - therefore we flip the mask + * to mask out the host values (which dev.config initially has) . */ + host_mask = size_mask & ~reg->emu_mask; + + if ((data & host_mask) != (val & host_mask)) { + uint32_t new_val; + + /* Mask out host (including past size). */ + new_val = val & host_mask; + /* Merge emulated ones (excluding the non-emulated ones). */ + new_val |= data & host_mask; + /* Leave intact host and emulated values past the size - even though + * we do not care as we write per reg->size granularity, but for the + * logging below lets have the proper value. */ + new_val |= ((val | data)) & ~size_mask; + XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n", + offset, data, val, new_val); + val = new_val; + } else + val = data; + + if (val & ~size_mask) { + error_setg(errp, "Offset 0x%04x:0x%04x expands past" + " register size (%d)", offset, val, reg->size); + g_free(reg_entry); + return; + } + /* This could be just pci_set_long as we don't modify the bits + * past reg->size, but in case this routine is run in parallel or the + * init value is larger, we do not want to over-write registers. */ + switch (reg->size) { + case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val); + break; + case 2: pci_set_word(s->dev.config + offset, (uint16_t)val); + break; + case 4: pci_set_long(s->dev.config + offset, val); + break; + default: abort(); + } + /* set register value pointer to the data. */ + reg_entry->ptr.byte = s->dev.config + offset; + + } + /* list add register entry */ + QLIST_INSERT_HEAD(®_grp->reg_tbl_list, reg_entry, entries); +} + +void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp) +{ + ERRP_GUARD(); + int i, rc; + + QLIST_INIT(&s->reg_grps); + + for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) { + uint32_t reg_grp_offset = 0; + XenPTRegGroup *reg_grp_entry = NULL; + + if (xen_pt_emu_reg_grps[i].grp_id != 0xFF + && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) { + if (xen_pt_hide_dev_cap(&s->real_device, + xen_pt_emu_reg_grps[i].grp_id)) { + continue; + } + + reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id); + + if (!reg_grp_offset) { + continue; + } + } + + /* + * By default we will trap up to 0x40 in the cfg space. + * If an intel device is pass through we need to trap 0xfc, + * therefore the size should be 0xff. + */ + if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) { + reg_grp_offset = XEN_PCI_INTEL_OPREGION; + } + + reg_grp_entry = g_new0(XenPTRegGroup, 1); + QLIST_INIT(®_grp_entry->reg_tbl_list); + QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries); + + reg_grp_entry->base_offset = reg_grp_offset; + reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i; + if (xen_pt_emu_reg_grps[i].size_init) { + /* get register group size */ + rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp, + reg_grp_offset, + ®_grp_entry->size); + if (rc < 0) { + error_setg(errp, "Failed to initialize %d/%zu, type = 0x%x," + " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps), + xen_pt_emu_reg_grps[i].grp_type, rc); + xen_pt_config_delete(s); + return; + } + } + + if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) { + if (xen_pt_emu_reg_grps[i].emu_regs) { + int j = 0; + XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs; + + /* initialize capability register */ + for (j = 0; regs->size != 0; j++, regs++) { + xen_pt_config_reg_init(s, reg_grp_entry, regs, errp); + if (*errp) { + error_append_hint(errp, "Failed to init register %d" + " offsets 0x%x in grp_type = 0x%x (%d/%zu)", + j, + regs->offset, + xen_pt_emu_reg_grps[i].grp_type, + i, ARRAY_SIZE(xen_pt_emu_reg_grps)); + xen_pt_config_delete(s); + return; + } + } + } + } + } +} + +/* delete all emulate register */ +void xen_pt_config_delete(XenPCIPassthroughState *s) +{ + struct XenPTRegGroup *reg_group, *next_grp; + struct XenPTReg *reg, *next_reg; + + /* free MSI/MSI-X info table */ + if (s->msix) { + xen_pt_msix_unmap(s); + } + g_free(s->msi); + + /* free all register group entry */ + QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { + /* free all register entry */ + QLIST_FOREACH_SAFE(reg, ®_group->reg_tbl_list, entries, next_reg) { + QLIST_REMOVE(reg, entries); + g_free(reg); + } + + QLIST_REMOVE(reg_group, entries); + g_free(reg_group); + } +} diff --git a/hw/xen/xen_pt_graphics.c b/hw/xen/xen_pt_graphics.c new file mode 100644 index 000000000..a3bc7e392 --- /dev/null +++ b/hw/xen/xen_pt_graphics.c @@ -0,0 +1,291 @@ +/* + * graphics passthrough + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "xen_pt.h" +#include "xen-host-pci-device.h" +#include "hw/xen/xen-legacy-backend.h" + +static unsigned long igd_guest_opregion; +static unsigned long igd_host_opregion; + +#define XEN_PCI_INTEL_OPREGION_MASK 0xfff + +typedef struct VGARegion { + int type; /* Memory or port I/O */ + uint64_t guest_base_addr; + uint64_t machine_base_addr; + uint64_t size; /* size of the region */ + int rc; +} VGARegion; + +#define IORESOURCE_IO 0x00000100 +#define IORESOURCE_MEM 0x00000200 + +static struct VGARegion vga_args[] = { + { + .type = IORESOURCE_IO, + .guest_base_addr = 0x3B0, + .machine_base_addr = 0x3B0, + .size = 0xC, + .rc = -1, + }, + { + .type = IORESOURCE_IO, + .guest_base_addr = 0x3C0, + .machine_base_addr = 0x3C0, + .size = 0x20, + .rc = -1, + }, + { + .type = IORESOURCE_MEM, + .guest_base_addr = 0xa0000 >> XC_PAGE_SHIFT, + .machine_base_addr = 0xa0000 >> XC_PAGE_SHIFT, + .size = 0x20, + .rc = -1, + }, +}; + +/* + * register VGA resources for the domain with assigned gfx + */ +int xen_pt_register_vga_regions(XenHostPCIDevice *dev) +{ + int i = 0; + + if (!is_igd_vga_passthrough(dev)) { + return 0; + } + + for (i = 0 ; i < ARRAY_SIZE(vga_args); i++) { + if (vga_args[i].type == IORESOURCE_IO) { + vga_args[i].rc = xc_domain_ioport_mapping(xen_xc, xen_domid, + vga_args[i].guest_base_addr, + vga_args[i].machine_base_addr, + vga_args[i].size, DPCI_ADD_MAPPING); + } else { + vga_args[i].rc = xc_domain_memory_mapping(xen_xc, xen_domid, + vga_args[i].guest_base_addr, + vga_args[i].machine_base_addr, + vga_args[i].size, DPCI_ADD_MAPPING); + } + + if (vga_args[i].rc) { + XEN_PT_ERR(NULL, "VGA %s mapping failed! (rc: %i)\n", + vga_args[i].type == IORESOURCE_IO ? "ioport" : "memory", + vga_args[i].rc); + return vga_args[i].rc; + } + } + + return 0; +} + +/* + * unregister VGA resources for the domain with assigned gfx + */ +int xen_pt_unregister_vga_regions(XenHostPCIDevice *dev) +{ + int i = 0; + int ret = 0; + + if (!is_igd_vga_passthrough(dev)) { + return 0; + } + + for (i = 0 ; i < ARRAY_SIZE(vga_args); i++) { + if (vga_args[i].type == IORESOURCE_IO) { + vga_args[i].rc = xc_domain_ioport_mapping(xen_xc, xen_domid, + vga_args[i].guest_base_addr, + vga_args[i].machine_base_addr, + vga_args[i].size, DPCI_REMOVE_MAPPING); + } else { + vga_args[i].rc = xc_domain_memory_mapping(xen_xc, xen_domid, + vga_args[i].guest_base_addr, + vga_args[i].machine_base_addr, + vga_args[i].size, DPCI_REMOVE_MAPPING); + } + + if (vga_args[i].rc) { + XEN_PT_ERR(NULL, "VGA %s unmapping failed! (rc: %i)\n", + vga_args[i].type == IORESOURCE_IO ? "ioport" : "memory", + vga_args[i].rc); + return vga_args[i].rc; + } + } + + if (igd_guest_opregion) { + ret = xc_domain_memory_mapping(xen_xc, xen_domid, + (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT), + (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), + 3, + DPCI_REMOVE_MAPPING); + if (ret) { + return ret; + } + } + + return 0; +} + +static void *get_vgabios(XenPCIPassthroughState *s, int *size, + XenHostPCIDevice *dev) +{ + return pci_assign_dev_load_option_rom(&s->dev, size, + dev->domain, dev->bus, + dev->dev, dev->func); +} + +/* Refer to Seabios. */ +struct rom_header { + uint16_t signature; + uint8_t size; + uint8_t initVector[4]; + uint8_t reserved[17]; + uint16_t pcioffset; + uint16_t pnpoffset; +} __attribute__((packed)); + +struct pci_data { + uint32_t signature; + uint16_t vendor; + uint16_t device; + uint16_t vitaldata; + uint16_t dlen; + uint8_t drevision; + uint8_t class_lo; + uint16_t class_hi; + uint16_t ilen; + uint16_t irevision; + uint8_t type; + uint8_t indicator; + uint16_t reserved; +} __attribute__((packed)); + +void xen_pt_setup_vga(XenPCIPassthroughState *s, XenHostPCIDevice *dev, + Error **errp) +{ + unsigned char *bios = NULL; + struct rom_header *rom; + int bios_size; + char *c = NULL; + char checksum = 0; + uint32_t len = 0; + struct pci_data *pd = NULL; + + if (!is_igd_vga_passthrough(dev)) { + error_setg(errp, "Need to enable igd-passthrough"); + return; + } + + bios = get_vgabios(s, &bios_size, dev); + if (!bios) { + error_setg(errp, "VGA: Can't get VBIOS"); + return; + } + + if (bios_size < sizeof(struct rom_header)) { + error_setg(errp, "VGA: VBIOS image corrupt (too small)"); + return; + } + + /* Currently we fixed this address as a primary. */ + rom = (struct rom_header *)bios; + + if (rom->pcioffset + sizeof(struct pci_data) > bios_size) { + error_setg(errp, "VGA: VBIOS image corrupt (bad pcioffset field)"); + return; + } + + pd = (void *)(bios + (unsigned char)rom->pcioffset); + + /* We may need to fixup Device Identification. */ + if (pd->device != s->real_device.device_id) { + pd->device = s->real_device.device_id; + + len = rom->size * 512; + if (len > bios_size) { + error_setg(errp, "VGA: VBIOS image corrupt (bad size field)"); + return; + } + + /* Then adjust the bios checksum */ + for (c = (char *)bios; c < ((char *)bios + len); c++) { + checksum += *c; + } + if (checksum) { + bios[len - 1] -= checksum; + XEN_PT_LOG(&s->dev, "vga bios checksum is adjusted %x!\n", + checksum); + } + } + + /* Currently we fixed this address as a primary for legacy BIOS. */ + cpu_physical_memory_write(0xc0000, bios, bios_size); +} + +uint32_t igd_read_opregion(XenPCIPassthroughState *s) +{ + uint32_t val = 0; + + if (!igd_guest_opregion) { + return val; + } + + val = igd_guest_opregion; + + XEN_PT_LOG(&s->dev, "Read opregion val=%x\n", val); + return val; +} + +#define XEN_PCI_INTEL_OPREGION_PAGES 0x3 +#define XEN_PCI_INTEL_OPREGION_ENABLE_ACCESSED 0x1 +void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val) +{ + int ret; + + if (igd_guest_opregion) { + XEN_PT_LOG(&s->dev, "opregion register already been set, ignoring %x\n", + val); + return; + } + + /* We just work with LE. */ + xen_host_pci_get_block(&s->real_device, XEN_PCI_INTEL_OPREGION, + (uint8_t *)&igd_host_opregion, 4); + igd_guest_opregion = (unsigned long)(val & ~XEN_PCI_INTEL_OPREGION_MASK) + | (igd_host_opregion & XEN_PCI_INTEL_OPREGION_MASK); + + ret = xc_domain_iomem_permission(xen_xc, xen_domid, + (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), + XEN_PCI_INTEL_OPREGION_PAGES, + XEN_PCI_INTEL_OPREGION_ENABLE_ACCESSED); + + if (ret) { + XEN_PT_ERR(&s->dev, "[%d]:Can't enable to access IGD host opregion:" + " 0x%lx.\n", ret, + (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT)), + igd_guest_opregion = 0; + return; + } + + ret = xc_domain_memory_mapping(xen_xc, xen_domid, + (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT), + (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), + XEN_PCI_INTEL_OPREGION_PAGES, + DPCI_ADD_MAPPING); + + if (ret) { + XEN_PT_ERR(&s->dev, "[%d]:Can't map IGD host opregion:0x%lx to" + " guest opregion:0x%lx.\n", ret, + (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), + (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT)); + igd_guest_opregion = 0; + return; + } + + XEN_PT_LOG(&s->dev, "Map OpRegion: 0x%lx -> 0x%lx\n", + (unsigned long)(igd_host_opregion >> XC_PAGE_SHIFT), + (unsigned long)(igd_guest_opregion >> XC_PAGE_SHIFT)); +} diff --git a/hw/xen/xen_pt_load_rom.c b/hw/xen/xen_pt_load_rom.c new file mode 100644 index 000000000..03422a8a7 --- /dev/null +++ b/hw/xen/xen_pt_load_rom.c @@ -0,0 +1,92 @@ +/* + * This is splited from hw/i386/kvm/pci-assign.c + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/loader.h" +#include "hw/pci/pci.h" +#include "xen_pt.h" + +/* + * Scan the assigned devices for the devices that have an option ROM, and then + * load the corresponding ROM data to RAM. If an error occurs while loading an + * option ROM, we just ignore that option ROM and continue with the next one. + */ +void *pci_assign_dev_load_option_rom(PCIDevice *dev, + int *size, unsigned int domain, + unsigned int bus, unsigned int slot, + unsigned int function) +{ + char name[32], rom_file[64]; + FILE *fp; + uint8_t val; + struct stat st; + void *ptr = NULL; + Object *owner = OBJECT(dev); + + /* If loading ROM from file, pci handles it */ + if (dev->romfile || !dev->rom_bar) { + return NULL; + } + + snprintf(rom_file, sizeof(rom_file), + "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/rom", + domain, bus, slot, function); + + /* Write "1" to the ROM file to enable it */ + fp = fopen(rom_file, "r+"); + if (fp == NULL) { + if (errno != ENOENT) { + error_report("pci-assign: Cannot open %s: %s", rom_file, strerror(errno)); + } + return NULL; + } + if (fstat(fileno(fp), &st) == -1) { + error_report("pci-assign: Cannot stat %s: %s", rom_file, strerror(errno)); + goto close_rom; + } + + val = 1; + if (fwrite(&val, 1, 1, fp) != 1) { + goto close_rom; + } + fseek(fp, 0, SEEK_SET); + + if (dev->romsize != -1) { + if (st.st_size > dev->romsize) { + error_report("ROM BAR \"%s\" (%ld bytes) is too large for ROM size %u", + rom_file, (long) st.st_size, dev->romsize); + goto close_rom; + } + } else { + dev->romsize = st.st_size; + } + + snprintf(name, sizeof(name), "%s.rom", object_get_typename(owner)); + memory_region_init_ram(&dev->rom, owner, name, dev->romsize, &error_abort); + ptr = memory_region_get_ram_ptr(&dev->rom); + memset(ptr, 0xff, dev->romsize); + + if (!fread(ptr, 1, st.st_size, fp)) { + error_report("pci-assign: Cannot read from host %s", rom_file); + error_printf("Device option ROM contents are probably invalid " + "(check dmesg).\nSkip option ROM probe with rombar=0, " + "or load from file with romfile=\n"); + goto close_rom; + } + + pci_register_bar(dev, PCI_ROM_SLOT, 0, &dev->rom); + dev->has_rom = true; + *size = st.st_size; +close_rom: + /* Write "0" to disable ROM */ + fseek(fp, 0, SEEK_SET); + val = 0; + if (!fwrite(&val, 1, 1, fp)) { + XEN_PT_WARN(dev, "%s\n", "Failed to disable pci-sysfs rom file"); + } + fclose(fp); + + return ptr; +} diff --git a/hw/xen/xen_pt_msi.c b/hw/xen/xen_pt_msi.c new file mode 100644 index 000000000..b71563f98 --- /dev/null +++ b/hw/xen/xen_pt_msi.c @@ -0,0 +1,650 @@ +/* + * Copyright (c) 2007, Intel Corporation. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Jiang Yunhong + * + * This file implements direct PCI assignment to a HVM guest + */ + +#include "qemu/osdep.h" + +#include "hw/xen/xen-legacy-backend.h" +#include "xen_pt.h" +#include "hw/i386/apic-msidef.h" + + +#define XEN_PT_AUTO_ASSIGN -1 + +/* shift count for gflags */ +#define XEN_PT_GFLAGS_SHIFT_DEST_ID 0 +#define XEN_PT_GFLAGS_SHIFT_RH 8 +#define XEN_PT_GFLAGS_SHIFT_DM 9 +#define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12 +#define XEN_PT_GFLAGSSHIFT_TRG_MODE 15 +#define XEN_PT_GFLAGSSHIFT_UNMASKED 16 + +#define latch(fld) latch[PCI_MSIX_ENTRY_##fld / sizeof(uint32_t)] + +/* + * Helpers + */ + +static inline uint8_t msi_vector(uint32_t data) +{ + return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; +} + +static inline uint8_t msi_dest_id(uint32_t addr) +{ + return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; +} + +static inline uint32_t msi_ext_dest_id(uint32_t addr_hi) +{ + return addr_hi & 0xffffff00; +} + +static uint32_t msi_gflags(uint32_t data, uint64_t addr) +{ + uint32_t result = 0; + int rh, dm, dest_id, deliv_mode, trig_mode; + + rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1; + dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; + dest_id = msi_dest_id(addr); + deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; + trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; + + result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH) + | (dm << XEN_PT_GFLAGS_SHIFT_DM) + | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE) + | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE); + + return result; +} + +static inline uint64_t msi_addr64(XenPTMSI *msi) +{ + return (uint64_t)msi->addr_hi << 32 | msi->addr_lo; +} + +static int msi_msix_enable(XenPCIPassthroughState *s, + uint32_t address, + uint16_t flag, + bool enable) +{ + uint16_t val = 0; + int rc; + + if (!address) { + return -1; + } + + rc = xen_host_pci_get_word(&s->real_device, address, &val); + if (rc) { + XEN_PT_ERR(&s->dev, "Failed to read MSI/MSI-X register (0x%x), rc:%d\n", + address, rc); + return rc; + } + if (enable) { + val |= flag; + } else { + val &= ~flag; + } + rc = xen_host_pci_set_word(&s->real_device, address, val); + if (rc) { + XEN_PT_ERR(&s->dev, "Failed to write MSI/MSI-X register (0x%x), rc:%d\n", + address, rc); + } + return rc; +} + +static int msi_msix_setup(XenPCIPassthroughState *s, + uint64_t addr, + uint32_t data, + int *ppirq, + bool is_msix, + int msix_entry, + bool is_not_mapped) +{ + uint8_t gvec = msi_vector(data); + int rc = 0; + + assert((!is_msix && msix_entry == 0) || is_msix); + + if (xen_is_pirq_msi(data)) { + *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr); + if (!*ppirq) { + /* this probably identifies an misconfiguration of the guest, + * try the emulated path */ + *ppirq = XEN_PT_UNASSIGNED_PIRQ; + } else { + XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s" + " (vec: 0x%x, entry: 0x%x)\n", + *ppirq, is_msix ? "-X" : "", gvec, msix_entry); + } + } + + if (is_not_mapped) { + uint64_t table_base = 0; + + if (is_msix) { + table_base = s->msix->table_base; + } + + rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN, + ppirq, PCI_DEVFN(s->real_device.dev, + s->real_device.func), + s->real_device.bus, + msix_entry, table_base); + if (rc) { + XEN_PT_ERR(&s->dev, + "Mapping of MSI%s (err: %i, vec: 0x%x, entry 0x%x)\n", + is_msix ? "-X" : "", errno, gvec, msix_entry); + return rc; + } + } + + return 0; +} +static int msi_msix_update(XenPCIPassthroughState *s, + uint64_t addr, + uint32_t data, + int pirq, + bool is_msix, + int msix_entry, + int *old_pirq, + bool masked) +{ + PCIDevice *d = &s->dev; + uint8_t gvec = msi_vector(data); + uint32_t gflags = msi_gflags(data, addr); + int rc = 0; + uint64_t table_addr = 0; + + XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec 0x%x gflags 0x%x" + " (entry: 0x%x)\n", + is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry); + + if (is_msix) { + table_addr = s->msix->mmio_base_addr; + } + + gflags |= masked ? 0 : (1u << XEN_PT_GFLAGSSHIFT_UNMASKED); + + rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec, + pirq, gflags, table_addr); + + if (rc) { + XEN_PT_ERR(d, "Updating of MSI%s failed. (err: %d)\n", + is_msix ? "-X" : "", errno); + + if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) { + XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %d)\n", + is_msix ? "-X" : "", *old_pirq, errno); + } + *old_pirq = XEN_PT_UNASSIGNED_PIRQ; + } + return rc; +} + +static int msi_msix_disable(XenPCIPassthroughState *s, + uint64_t addr, + uint32_t data, + int pirq, + bool is_msix, + bool is_binded) +{ + PCIDevice *d = &s->dev; + uint8_t gvec = msi_vector(data); + uint32_t gflags = msi_gflags(data, addr); + int rc = 0; + + if (pirq == XEN_PT_UNASSIGNED_PIRQ) { + return 0; + } + + if (is_binded) { + XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec 0x%x\n", + is_msix ? "-X" : "", pirq, gvec); + rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags); + if (rc) { + XEN_PT_ERR(d, "Unbinding of MSI%s failed. (err: %d, pirq: %d, gvec: 0x%x)\n", + is_msix ? "-X" : "", errno, pirq, gvec); + return rc; + } + } + + XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq); + rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq); + if (rc) { + XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %i)\n", + is_msix ? "-X" : "", pirq, errno); + return rc; + } + + return 0; +} + +/* + * MSI virtualization functions + */ + +static int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable) +{ + XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling"); + + if (!s->msi) { + return -1; + } + + return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE, + enable); +} + +/* setup physical msi, but don't enable it */ +int xen_pt_msi_setup(XenPCIPassthroughState *s) +{ + int pirq = XEN_PT_UNASSIGNED_PIRQ; + int rc = 0; + XenPTMSI *msi = s->msi; + + if (msi->initialized) { + XEN_PT_ERR(&s->dev, + "Setup physical MSI when it has been properly initialized.\n"); + return -1; + } + + rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true); + if (rc) { + return rc; + } + + if (pirq < 0) { + XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq); + return -1; + } + + msi->pirq = pirq; + XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq); + + return 0; +} + +int xen_pt_msi_update(XenPCIPassthroughState *s) +{ + XenPTMSI *msi = s->msi; + + /* Current MSI emulation in QEMU only supports 1 vector */ + return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq, + false, 0, &msi->pirq, msi->mask & 1); +} + +void xen_pt_msi_disable(XenPCIPassthroughState *s) +{ + XenPTMSI *msi = s->msi; + + if (!msi) { + return; + } + + (void)xen_pt_msi_set_enable(s, false); + + msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false, + msi->initialized); + + /* clear msi info */ + msi->flags &= ~PCI_MSI_FLAGS_ENABLE; + msi->initialized = false; + msi->mapped = false; + msi->pirq = XEN_PT_UNASSIGNED_PIRQ; +} + +/* + * MSI-X virtualization functions + */ + +static int msix_set_enable(XenPCIPassthroughState *s, bool enabled) +{ + XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling"); + + if (!s->msix) { + return -1; + } + + return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE, + enabled); +} + +static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr, + uint32_t vec_ctrl) +{ + XenPTMSIXEntry *entry = NULL; + int pirq; + int rc; + + if (entry_nr < 0 || entry_nr >= s->msix->total_entries) { + return -EINVAL; + } + + entry = &s->msix->msix_entry[entry_nr]; + + if (!entry->updated) { + return 0; + } + + pirq = entry->pirq; + + /* + * Update the entry addr and data to the latest values only when the + * entry is masked or they are all masked, as required by the spec. + * Addr and data changes while the MSI-X entry is unmasked get deferred + * until the next masked -> unmasked transition. + */ + if (pirq == XEN_PT_UNASSIGNED_PIRQ || s->msix->maskall || + (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { + entry->addr = entry->latch(LOWER_ADDR) | + ((uint64_t)entry->latch(UPPER_ADDR) << 32); + entry->data = entry->latch(DATA); + } + + rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr, + entry->pirq == XEN_PT_UNASSIGNED_PIRQ); + if (rc) { + return rc; + } + if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) { + entry->pirq = pirq; + } + + rc = msi_msix_update(s, entry->addr, entry->data, pirq, true, + entry_nr, &entry->pirq, + vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT); + + if (!rc) { + entry->updated = false; + } + + return rc; +} + +int xen_pt_msix_update(XenPCIPassthroughState *s) +{ + XenPTMSIX *msix = s->msix; + int i; + + for (i = 0; i < msix->total_entries; i++) { + xen_pt_msix_update_one(s, i, msix->msix_entry[i].latch(VECTOR_CTRL)); + } + + return 0; +} + +void xen_pt_msix_disable(XenPCIPassthroughState *s) +{ + int i = 0; + + msix_set_enable(s, false); + + for (i = 0; i < s->msix->total_entries; i++) { + XenPTMSIXEntry *entry = &s->msix->msix_entry[i]; + + msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true); + + /* clear MSI-X info */ + entry->pirq = XEN_PT_UNASSIGNED_PIRQ; + entry->updated = false; + } +} + +int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index) +{ + XenPTMSIXEntry *entry; + int i, ret; + + if (!(s->msix && s->msix->bar_index == bar_index)) { + return 0; + } + + for (i = 0; i < s->msix->total_entries; i++) { + entry = &s->msix->msix_entry[i]; + if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { + ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq, + PT_IRQ_TYPE_MSI, 0, 0, 0, 0); + if (ret) { + XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed (err: %d)\n", + entry->pirq, errno); + } + entry->updated = true; + } + } + return xen_pt_msix_update(s); +} + +static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset) +{ + assert(!(offset % sizeof(*e->latch))); + return e->latch[offset / sizeof(*e->latch)]; +} + +static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val) +{ + assert(!(offset % sizeof(*e->latch))); + e->latch[offset / sizeof(*e->latch)] = val; +} + +static void pci_msix_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + XenPCIPassthroughState *s = opaque; + XenPTMSIX *msix = s->msix; + XenPTMSIXEntry *entry; + unsigned int entry_nr, offset; + + entry_nr = addr / PCI_MSIX_ENTRY_SIZE; + if (entry_nr >= msix->total_entries) { + return; + } + entry = &msix->msix_entry[entry_nr]; + offset = addr % PCI_MSIX_ENTRY_SIZE; + + if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) { + if (get_entry_value(entry, offset) == val + && entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { + return; + } + + entry->updated = true; + } else if (msix->enabled && entry->updated && + !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { + const volatile uint32_t *vec_ctrl; + + /* + * If Xen intercepts the mask bit access, entry->vec_ctrl may not be + * up-to-date. Read from hardware directly. + */ + vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL; + xen_pt_msix_update_one(s, entry_nr, *vec_ctrl); + } + + set_entry_value(entry, offset, val); +} + +static uint64_t pci_msix_read(void *opaque, hwaddr addr, + unsigned size) +{ + XenPCIPassthroughState *s = opaque; + XenPTMSIX *msix = s->msix; + int entry_nr, offset; + + entry_nr = addr / PCI_MSIX_ENTRY_SIZE; + if (entry_nr < 0) { + XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr); + return 0; + } + + offset = addr % PCI_MSIX_ENTRY_SIZE; + + if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) { + return get_entry_value(&msix->msix_entry[entry_nr], offset); + } else { + /* Pending Bit Array (PBA) */ + return *(uint32_t *)(msix->phys_iomem_base + addr); + } +} + +static bool pci_msix_accepts(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + return !(addr & (size - 1)); +} + +static const MemoryRegionOps pci_msix_ops = { + .read = pci_msix_read, + .write = pci_msix_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false, + .accepts = pci_msix_accepts + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + .unaligned = false + } +}; + +int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base) +{ + uint8_t id = 0; + uint16_t control = 0; + uint32_t table_off = 0; + int i, total_entries, bar_index; + XenHostPCIDevice *hd = &s->real_device; + PCIDevice *d = &s->dev; + int fd = -1; + XenPTMSIX *msix = NULL; + int rc = 0; + + rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id); + if (rc) { + return rc; + } + + if (id != PCI_CAP_ID_MSIX) { + XEN_PT_ERR(d, "Invalid id 0x%x base 0x%x\n", id, base); + return -1; + } + + rc = xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control); + if (rc) { + XEN_PT_ERR(d, "Failed to read PCI_MSIX_FLAGS field\n"); + return rc; + } + total_entries = control & PCI_MSIX_FLAGS_QSIZE; + total_entries += 1; + + s->msix = g_malloc0(sizeof (XenPTMSIX) + + total_entries * sizeof (XenPTMSIXEntry)); + msix = s->msix; + + msix->total_entries = total_entries; + for (i = 0; i < total_entries; i++) { + msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ; + } + + memory_region_init_io(&msix->mmio, OBJECT(s), &pci_msix_ops, + s, "xen-pci-pt-msix", + (total_entries * PCI_MSIX_ENTRY_SIZE + + XC_PAGE_SIZE - 1) + & XC_PAGE_MASK); + + rc = xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off); + if (rc) { + XEN_PT_ERR(d, "Failed to read PCI_MSIX_TABLE field\n"); + goto error_out; + } + bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK; + table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK; + msix->table_base = s->real_device.io_regions[bar_index].base_addr; + XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base); + + fd = open("/dev/mem", O_RDWR); + if (fd == -1) { + rc = -errno; + XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno)); + goto error_out; + } + XEN_PT_LOG(d, "table_off = 0x%x, total_entries = %d\n", + table_off, total_entries); + msix->table_offset_adjust = table_off & 0x0fff; + msix->phys_iomem_base = + mmap(NULL, + total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust, + PROT_READ, + MAP_SHARED | MAP_LOCKED, + fd, + msix->table_base + table_off - msix->table_offset_adjust); + close(fd); + if (msix->phys_iomem_base == MAP_FAILED) { + rc = -errno; + XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno)); + goto error_out; + } + msix->phys_iomem_base = (char *)msix->phys_iomem_base + + msix->table_offset_adjust; + + XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n", + msix->phys_iomem_base); + + memory_region_add_subregion_overlap(&s->bar[bar_index], table_off, + &msix->mmio, + 2); /* Priority: pci default + 1 */ + + return 0; + +error_out: + g_free(s->msix); + s->msix = NULL; + return rc; +} + +void xen_pt_msix_unmap(XenPCIPassthroughState *s) +{ + XenPTMSIX *msix = s->msix; + + if (!msix) { + return; + } + + /* unmap the MSI-X memory mapped register area */ + if (msix->phys_iomem_base) { + XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n", + msix->phys_iomem_base); + munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE + + msix->table_offset_adjust); + } + + memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio); +} + +void xen_pt_msix_delete(XenPCIPassthroughState *s) +{ + XenPTMSIX *msix = s->msix; + + if (!msix) { + return; + } + + object_unparent(OBJECT(&msix->mmio)); + + g_free(s->msix); + s->msix = NULL; +} diff --git a/hw/xen/xen_pt_stub.c b/hw/xen/xen_pt_stub.c new file mode 100644 index 000000000..2d8cac8d5 --- /dev/null +++ b/hw/xen/xen_pt_stub.c @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2020 Citrix Systems UK Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/xen/xen_pt.h" +#include "qapi/error.h" + +bool xen_igd_gfx_pt_enabled(void) +{ + return false; +} + +void xen_igd_gfx_pt_set(bool value, Error **errp) +{ + if (value) { + error_setg(errp, "Xen PCI passthrough support not built in"); + } +} diff --git a/hw/xen/xen_pvdev.c b/hw/xen/xen_pvdev.c new file mode 100644 index 000000000..8ab458922 --- /dev/null +++ b/hw/xen/xen_pvdev.c @@ -0,0 +1,319 @@ +/* + * Xen para-virtualization device + * + * (c) 2008 Gerd Hoffmann + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/main-loop.h" +#include "hw/qdev-core.h" +#include "hw/xen/xen-legacy-backend.h" +#include "hw/xen/xen_pvdev.h" + +/* private */ +static int debug; + +struct xs_dirs { + char *xs_dir; + QTAILQ_ENTRY(xs_dirs) list; +}; + +static QTAILQ_HEAD(, xs_dirs) xs_cleanup = + QTAILQ_HEAD_INITIALIZER(xs_cleanup); + +static QTAILQ_HEAD(, XenLegacyDevice) xendevs = + QTAILQ_HEAD_INITIALIZER(xendevs); + +/* ------------------------------------------------------------- */ + +static void xenstore_cleanup_dir(char *dir) +{ + struct xs_dirs *d; + + d = g_malloc(sizeof(*d)); + d->xs_dir = dir; + QTAILQ_INSERT_TAIL(&xs_cleanup, d, list); +} + +void xen_config_cleanup(void) +{ + struct xs_dirs *d; + + QTAILQ_FOREACH(d, &xs_cleanup, list) { + xs_rm(xenstore, 0, d->xs_dir); + } +} + +int xenstore_mkdir(char *path, int p) +{ + struct xs_permissions perms[2] = { + { + .id = 0, /* set owner: dom0 */ + }, { + .id = xen_domid, + .perms = p, + } + }; + + if (!xs_mkdir(xenstore, 0, path)) { + xen_pv_printf(NULL, 0, "xs_mkdir %s: failed\n", path); + return -1; + } + xenstore_cleanup_dir(g_strdup(path)); + + if (!xs_set_permissions(xenstore, 0, path, perms, 2)) { + xen_pv_printf(NULL, 0, "xs_set_permissions %s: failed\n", path); + return -1; + } + return 0; +} + +int xenstore_write_str(const char *base, const char *node, const char *val) +{ + char abspath[XEN_BUFSIZE]; + + snprintf(abspath, sizeof(abspath), "%s/%s", base, node); + if (!xs_write(xenstore, 0, abspath, val, strlen(val))) { + return -1; + } + return 0; +} + +char *xenstore_read_str(const char *base, const char *node) +{ + char abspath[XEN_BUFSIZE]; + unsigned int len; + char *str, *ret = NULL; + + snprintf(abspath, sizeof(abspath), "%s/%s", base, node); + str = xs_read(xenstore, 0, abspath, &len); + if (str != NULL) { + /* move to qemu-allocated memory to make sure + * callers can savely g_free() stuff. */ + ret = g_strdup(str); + free(str); + } + return ret; +} + +int xenstore_write_int(const char *base, const char *node, int ival) +{ + char val[12]; + + snprintf(val, sizeof(val), "%d", ival); + return xenstore_write_str(base, node, val); +} + +int xenstore_write_int64(const char *base, const char *node, int64_t ival) +{ + char val[21]; + + snprintf(val, sizeof(val), "%"PRId64, ival); + return xenstore_write_str(base, node, val); +} + +int xenstore_read_int(const char *base, const char *node, int *ival) +{ + char *val; + int rc = -1; + + val = xenstore_read_str(base, node); + if (val && 1 == sscanf(val, "%d", ival)) { + rc = 0; + } + g_free(val); + return rc; +} + +int xenstore_read_uint64(const char *base, const char *node, uint64_t *uval) +{ + char *val; + int rc = -1; + + val = xenstore_read_str(base, node); + if (val && 1 == sscanf(val, "%"SCNu64, uval)) { + rc = 0; + } + g_free(val); + return rc; +} + +void xenstore_update(void *unused) +{ + char **vec = NULL; + intptr_t type, ops, ptr; + unsigned int dom, count; + + vec = xs_read_watch(xenstore, &count); + if (vec == NULL) { + goto cleanup; + } + + if (sscanf(vec[XS_WATCH_TOKEN], "be:%" PRIxPTR ":%d:%" PRIxPTR, + &type, &dom, &ops) == 3) { + xenstore_update_be(vec[XS_WATCH_PATH], (void *)type, dom, (void*)ops); + } + if (sscanf(vec[XS_WATCH_TOKEN], "fe:%" PRIxPTR, &ptr) == 1) { + xenstore_update_fe(vec[XS_WATCH_PATH], (void *)ptr); + } + +cleanup: + free(vec); +} + +const char *xenbus_strstate(enum xenbus_state state) +{ + static const char *const name[] = { + [XenbusStateUnknown] = "Unknown", + [XenbusStateInitialising] = "Initialising", + [XenbusStateInitWait] = "InitWait", + [XenbusStateInitialised] = "Initialised", + [XenbusStateConnected] = "Connected", + [XenbusStateClosing] = "Closing", + [XenbusStateClosed] = "Closed", + }; + return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; +} + +/* + * msg_level: + * 0 == errors (stderr + logfile). + * 1 == informative debug messages (logfile only). + * 2 == noisy debug messages (logfile only). + * 3 == will flood your log (logfile only). + */ +void xen_pv_printf(struct XenLegacyDevice *xendev, int msg_level, + const char *fmt, ...) +{ + va_list args; + + if (xendev) { + if (msg_level > xendev->debug) { + return; + } + qemu_log("xen be: %s: ", xendev->name); + if (msg_level == 0) { + fprintf(stderr, "xen be: %s: ", xendev->name); + } + } else { + if (msg_level > debug) { + return; + } + qemu_log("xen be core: "); + if (msg_level == 0) { + fprintf(stderr, "xen be core: "); + } + } + va_start(args, fmt); + qemu_log_vprintf(fmt, args); + va_end(args); + if (msg_level == 0) { + va_start(args, fmt); + vfprintf(stderr, fmt, args); + va_end(args); + } + qemu_log_flush(); +} + +void xen_pv_evtchn_event(void *opaque) +{ + struct XenLegacyDevice *xendev = opaque; + evtchn_port_t port; + + port = xenevtchn_pending(xendev->evtchndev); + if (port != xendev->local_port) { + xen_pv_printf(xendev, 0, + "xenevtchn_pending returned %d (expected %d)\n", + port, xendev->local_port); + return; + } + xenevtchn_unmask(xendev->evtchndev, port); + + if (xendev->ops->event) { + xendev->ops->event(xendev); + } +} + +void xen_pv_unbind_evtchn(struct XenLegacyDevice *xendev) +{ + if (xendev->local_port == -1) { + return; + } + qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev), NULL, NULL, NULL); + xenevtchn_unbind(xendev->evtchndev, xendev->local_port); + xen_pv_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port); + xendev->local_port = -1; +} + +int xen_pv_send_notify(struct XenLegacyDevice *xendev) +{ + return xenevtchn_notify(xendev->evtchndev, xendev->local_port); +} + +/* ------------------------------------------------------------- */ + +struct XenLegacyDevice *xen_pv_find_xendev(const char *type, int dom, int dev) +{ + struct XenLegacyDevice *xendev; + + QTAILQ_FOREACH(xendev, &xendevs, next) { + if (xendev->dom != dom) { + continue; + } + if (xendev->dev != dev) { + continue; + } + if (strcmp(xendev->type, type) != 0) { + continue; + } + return xendev; + } + return NULL; +} + +/* + * release xen backend device. + */ +void xen_pv_del_xendev(struct XenLegacyDevice *xendev) +{ + if (xendev->ops->free) { + xendev->ops->free(xendev); + } + + if (xendev->fe) { + char token[XEN_BUFSIZE]; + snprintf(token, sizeof(token), "fe:%p", xendev); + xs_unwatch(xenstore, xendev->fe, token); + g_free(xendev->fe); + } + + if (xendev->evtchndev != NULL) { + xenevtchn_close(xendev->evtchndev); + } + if (xendev->gnttabdev != NULL) { + xengnttab_close(xendev->gnttabdev); + } + + QTAILQ_REMOVE(&xendevs, xendev, next); + + qdev_unplug(&xendev->qdev, NULL); +} + +void xen_pv_insert_xendev(struct XenLegacyDevice *xendev) +{ + QTAILQ_INSERT_TAIL(&xendevs, xendev, next); +} diff --git a/hw/xenpv/meson.build b/hw/xenpv/meson.build new file mode 100644 index 000000000..40f911ac1 --- /dev/null +++ b/hw/xenpv/meson.build @@ -0,0 +1,3 @@ +xenpv_ss = ss.source_set() + +xenpv_ss.add(when: 'CONFIG_XEN', if_true: files('xen_machine_pv.c')) diff --git a/hw/xenpv/xen_machine_pv.c b/hw/xenpv/xen_machine_pv.c new file mode 100644 index 000000000..8df575a45 --- /dev/null +++ b/hw/xenpv/xen_machine_pv.c @@ -0,0 +1,97 @@ +/* + * QEMU Xen PV Machine + * + * Copyright (c) 2007 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "hw/boards.h" +#include "hw/xen/xen-legacy-backend.h" +#include "hw/xen/xen-bus.h" +#include "sysemu/block-backend.h" +#include "sysemu/sysemu.h" + +static void xen_init_pv(MachineState *machine) +{ + DriveInfo *dinfo; + int i; + + /* Initialize backend core & drivers */ + if (xen_be_init() != 0) { + error_report("%s: xen backend core setup failed", __func__); + exit(1); + } + + switch (xen_mode) { + case XEN_ATTACH: + /* nothing to do, libxl handles everything */ + break; + case XEN_EMULATE: + error_report("xen emulation not implemented (yet)"); + exit(1); + break; + default: + error_report("unhandled xen_mode %d", xen_mode); + exit(1); + break; + } + + xen_be_register_common(); + xen_be_register("vfb", &xen_framebuffer_ops); + xen_be_register("qnic", &xen_netdev_ops); + + /* configure framebuffer */ + if (vga_interface_type == VGA_XENFB) { + xen_config_dev_vfb(0, "vnc"); + xen_config_dev_vkbd(0); + } + + /* configure disks */ + for (i = 0; i < 16; i++) { + dinfo = drive_get(IF_XEN, 0, i); + if (!dinfo) + continue; + xen_config_dev_blk(dinfo); + } + + /* configure nics */ + for (i = 0; i < nb_nics; i++) { + if (!nd_table[i].model || 0 != strcmp(nd_table[i].model, "xen")) + continue; + xen_config_dev_nic(nd_table + i); + } + + xen_bus_init(); + + /* config cleanup hook */ + atexit(xen_config_cleanup); +} + +static void xenpv_machine_init(MachineClass *mc) +{ + mc->desc = "Xen Para-virtualized PC"; + mc->init = xen_init_pv; + mc->max_cpus = 1; + mc->default_machine_opts = "accel=xen"; +} + +DEFINE_MACHINE("xenpv", xenpv_machine_init) diff --git a/hw/xtensa/Kconfig b/hw/xtensa/Kconfig new file mode 100644 index 000000000..0740657ea --- /dev/null +++ b/hw/xtensa/Kconfig @@ -0,0 +1,14 @@ +config XTENSA_SIM + bool + +config XTENSA_VIRT + bool + select XTENSA_SIM + select PCI_EXPRESS_GENERIC_BRIDGE + select PCI_DEVICES + +config XTENSA_XTFPGA + bool + select OPENCORES_ETH + select PFLASH_CFI01 + select SERIAL diff --git a/hw/xtensa/bootparam.h b/hw/xtensa/bootparam.h new file mode 100644 index 000000000..ade7891ec --- /dev/null +++ b/hw/xtensa/bootparam.h @@ -0,0 +1,49 @@ +#ifndef HW_XTENSA_BOOTPARAM_H +#define HW_XTENSA_BOOTPARAM_H + +#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/ +#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */ +#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */ +#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console. */ +#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */ +#define BP_TAG_FDT 0x1006 /* flat device tree addr */ + +#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */ +#define BP_TAG_LAST 0x7E0B /* last tag */ + +typedef struct BpTag { + uint16_t tag; + uint16_t size; +} BpTag; + +typedef struct BpMemInfo { + uint32_t type; + uint32_t start; + uint32_t end; +} BpMemInfo; + +#define MEMORY_TYPE_CONVENTIONAL 0x1000 +#define MEMORY_TYPE_NONE 0x2000 + +static inline size_t get_tag_size(size_t data_size) +{ + return data_size + sizeof(BpTag) + 4; +} + +static inline ram_addr_t put_tag(ram_addr_t addr, uint16_t tag, + size_t size, const void *data) +{ + BpTag bp_tag = { + .tag = tswap16(tag), + .size = tswap16((size + 3) & ~3), + }; + + cpu_physical_memory_write(addr, &bp_tag, sizeof(bp_tag)); + addr += sizeof(bp_tag); + cpu_physical_memory_write(addr, data, size); + addr += (size + 3) & ~3; + + return addr; +} + +#endif diff --git a/hw/xtensa/meson.build b/hw/xtensa/meson.build new file mode 100644 index 000000000..1d5835df4 --- /dev/null +++ b/hw/xtensa/meson.build @@ -0,0 +1,11 @@ +xtensa_ss = ss.source_set() +xtensa_ss.add(files( + 'mx_pic.c', + 'pic_cpu.c', + 'xtensa_memory.c', +)) +xtensa_ss.add(when: 'CONFIG_XTENSA_SIM', if_true: files('sim.c')) +xtensa_ss.add(when: 'CONFIG_XTENSA_VIRT', if_true: files('virt.c')) +xtensa_ss.add(when: 'CONFIG_XTENSA_XTFPGA', if_true: files('xtfpga.c')) + +hw_arch += {'xtensa': xtensa_ss} diff --git a/hw/xtensa/mx_pic.c b/hw/xtensa/mx_pic.c new file mode 100644 index 000000000..d889f953d --- /dev/null +++ b/hw/xtensa/mx_pic.c @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2013 - 2019, Max Filippov, Open Source and Linux Lab. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Open Source and Linux Lab nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/xtensa/mx_pic.h" +#include "qemu/log.h" + +#define MX_MAX_CPU 32 +#define MX_MAX_IRQ 32 + +#define MIROUT 0x0 +#define MIPICAUSE 0x100 +#define MIPISET 0x140 +#define MIENG 0x180 +#define MIENGSET 0x184 +#define MIASG 0x188 +#define MIASGSET 0x18c +#define MIPIPART 0x190 +#define SYSCFGID 0x1a0 +#define MPSCORE 0x200 +#define CCON 0x220 + +struct XtensaMxPic { + unsigned n_cpu; + unsigned n_irq; + + uint32_t ext_irq_state; + uint32_t mieng; + uint32_t miasg; + uint32_t mirout[MX_MAX_IRQ]; + uint32_t mipipart; + uint32_t runstall; + + qemu_irq *irq_inputs; + struct XtensaMxPicCpu { + XtensaMxPic *mx; + qemu_irq *irq; + qemu_irq runstall; + uint32_t mipicause; + uint32_t mirout_cache; + uint32_t irq_state_cache; + uint32_t ccon; + MemoryRegion reg; + } cpu[MX_MAX_CPU]; +}; + +static uint64_t xtensa_mx_pic_ext_reg_read(void *opaque, hwaddr offset, + unsigned size) +{ + struct XtensaMxPicCpu *mx_cpu = opaque; + struct XtensaMxPic *mx = mx_cpu->mx; + + if (offset < MIROUT + MX_MAX_IRQ) { + return mx->mirout[offset - MIROUT]; + } else if (offset >= MIPICAUSE && offset < MIPICAUSE + MX_MAX_CPU) { + return mx->cpu[offset - MIPICAUSE].mipicause; + } else { + switch (offset) { + case MIENG: + return mx->mieng; + + case MIASG: + return mx->miasg; + + case MIPIPART: + return mx->mipipart; + + case SYSCFGID: + return ((mx->n_cpu - 1) << 18) | (mx_cpu - mx->cpu); + + case MPSCORE: + return mx->runstall; + + case CCON: + return mx_cpu->ccon; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "unknown RER in MX PIC range: 0x%08x\n", + (uint32_t)offset); + return 0; + } + } +} + +static uint32_t xtensa_mx_pic_get_ipi_for_cpu(const XtensaMxPic *mx, + unsigned cpu) +{ + uint32_t mipicause = mx->cpu[cpu].mipicause; + uint32_t mipipart = mx->mipipart; + + return (((mipicause & 1) << (mipipart & 3)) | + ((mipicause & 0x000e) != 0) << ((mipipart >> 2) & 3) | + ((mipicause & 0x00f0) != 0) << ((mipipart >> 4) & 3) | + ((mipicause & 0xff00) != 0) << ((mipipart >> 6) & 3)) & 0x7; +} + +static uint32_t xtensa_mx_pic_get_ext_irq_for_cpu(const XtensaMxPic *mx, + unsigned cpu) +{ + return ((((mx->ext_irq_state & mx->mieng) | mx->miasg) & + mx->cpu[cpu].mirout_cache) << 2) | + xtensa_mx_pic_get_ipi_for_cpu(mx, cpu); +} + +static void xtensa_mx_pic_update_cpu(XtensaMxPic *mx, unsigned cpu) +{ + uint32_t irq = xtensa_mx_pic_get_ext_irq_for_cpu(mx, cpu); + uint32_t changed_irq = mx->cpu[cpu].irq_state_cache ^ irq; + unsigned i; + + qemu_log_mask(CPU_LOG_INT, "%s: CPU %d, irq: %08x, changed_irq: %08x\n", + __func__, cpu, irq, changed_irq); + mx->cpu[cpu].irq_state_cache = irq; + for (i = 0; changed_irq; ++i) { + uint32_t mask = 1u << i; + + if (changed_irq & mask) { + changed_irq ^= mask; + qemu_set_irq(mx->cpu[cpu].irq[i], irq & mask); + } + } +} + +static void xtensa_mx_pic_update_all(XtensaMxPic *mx) +{ + unsigned cpu; + + for (cpu = 0; cpu < mx->n_cpu; ++cpu) { + xtensa_mx_pic_update_cpu(mx, cpu); + } +} + +static void xtensa_mx_pic_ext_reg_write(void *opaque, hwaddr offset, + uint64_t v, unsigned size) +{ + struct XtensaMxPicCpu *mx_cpu = opaque; + struct XtensaMxPic *mx = mx_cpu->mx; + unsigned cpu; + + if (offset < MIROUT + mx->n_irq) { + mx->mirout[offset - MIROUT] = v; + for (cpu = 0; cpu < mx->n_cpu; ++cpu) { + uint32_t mask = 1u << (offset - MIROUT); + + if (!(mx->cpu[cpu].mirout_cache & mask) != !(v & (1u << cpu))) { + mx->cpu[cpu].mirout_cache ^= mask; + xtensa_mx_pic_update_cpu(mx, cpu); + } + } + } else if (offset >= MIPICAUSE && offset < MIPICAUSE + mx->n_cpu) { + cpu = offset - MIPICAUSE; + mx->cpu[cpu].mipicause &= ~v; + xtensa_mx_pic_update_cpu(mx, cpu); + } else if (offset >= MIPISET && offset < MIPISET + 16) { + for (cpu = 0; cpu < mx->n_cpu; ++cpu) { + if (v & (1u << cpu)) { + mx->cpu[cpu].mipicause |= 1u << (offset - MIPISET); + xtensa_mx_pic_update_cpu(mx, cpu); + } + } + } else { + uint32_t change = 0; + uint32_t oldv, newv; + const char *name = "???"; + + switch (offset) { + case MIENG: + change = mx->mieng & v; + oldv = mx->mieng; + mx->mieng &= ~v; + newv = mx->mieng; + name = "MIENG"; + break; + + case MIENGSET: + change = ~mx->mieng & v; + oldv = mx->mieng; + mx->mieng |= v; + newv = mx->mieng; + name = "MIENG"; + break; + + case MIASG: + change = mx->miasg & v; + oldv = mx->miasg; + mx->miasg &= ~v; + newv = mx->miasg; + name = "MIASG"; + break; + + case MIASGSET: + change = ~mx->miasg & v; + oldv = mx->miasg; + mx->miasg |= v; + newv = mx->miasg; + name = "MIASG"; + break; + + case MIPIPART: + change = mx->mipipart ^ v; + oldv = mx->mipipart; + mx->mipipart = v; + newv = mx->mipipart; + name = "MIPIPART"; + break; + + case MPSCORE: + change = mx->runstall ^ v; + oldv = mx->runstall; + mx->runstall = v; + newv = mx->runstall; + name = "RUNSTALL"; + for (cpu = 0; cpu < mx->n_cpu; ++cpu) { + if (change & (1u << cpu)) { + qemu_set_irq(mx->cpu[cpu].runstall, v & (1u << cpu)); + } + } + break; + + case CCON: + mx_cpu->ccon = v & 0x1; + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, + "unknown WER in MX PIC range: 0x%08x = 0x%08x\n", + (uint32_t)offset, (uint32_t)v); + break; + } + if (change) { + qemu_log_mask(CPU_LOG_INT, + "%s: %s changed by CPU %d: %08x -> %08x\n", + __func__, name, (int)(mx_cpu - mx->cpu), + oldv, newv); + xtensa_mx_pic_update_all(mx); + } + } +} + +static const MemoryRegionOps xtensa_mx_pic_ops = { + .read = xtensa_mx_pic_ext_reg_read, + .write = xtensa_mx_pic_ext_reg_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .unaligned = true, + }, +}; + +MemoryRegion *xtensa_mx_pic_register_cpu(XtensaMxPic *mx, + qemu_irq *irq, + qemu_irq runstall) +{ + struct XtensaMxPicCpu *mx_cpu = mx->cpu + mx->n_cpu; + + mx_cpu->mx = mx; + mx_cpu->irq = irq; + mx_cpu->runstall = runstall; + + memory_region_init_io(&mx_cpu->reg, NULL, &xtensa_mx_pic_ops, mx_cpu, + "mx_pic", 0x280); + + ++mx->n_cpu; + return &mx_cpu->reg; +} + +static void xtensa_mx_pic_set_irq(void *opaque, int irq, int active) +{ + XtensaMxPic *mx = opaque; + + if (irq < mx->n_irq) { + uint32_t old_irq_state = mx->ext_irq_state; + + if (active) { + mx->ext_irq_state |= 1u << irq; + } else { + mx->ext_irq_state &= ~(1u << irq); + } + if (old_irq_state != mx->ext_irq_state) { + qemu_log_mask(CPU_LOG_INT, + "%s: IRQ %d, active: %d, ext_irq_state: %08x -> %08x\n", + __func__, irq, active, + old_irq_state, mx->ext_irq_state); + xtensa_mx_pic_update_all(mx); + } + } else { + qemu_log_mask(LOG_GUEST_ERROR, "%s: IRQ %d out of range\n", + __func__, irq); + } +} + +XtensaMxPic *xtensa_mx_pic_init(unsigned n_irq) +{ + XtensaMxPic *mx = calloc(1, sizeof(XtensaMxPic)); + + mx->n_irq = n_irq + 1; + mx->irq_inputs = qemu_allocate_irqs(xtensa_mx_pic_set_irq, mx, + mx->n_irq); + return mx; +} + +void xtensa_mx_pic_reset(void *opaque) +{ + XtensaMxPic *mx = opaque; + unsigned i; + + mx->ext_irq_state = 0; + mx->mieng = mx->n_irq < 32 ? (1u << mx->n_irq) - 1 : ~0u; + mx->miasg = 0; + mx->mipipart = 0; + for (i = 0; i < mx->n_irq; ++i) { + mx->mirout[i] = 1; + } + for (i = 0; i < mx->n_cpu; ++i) { + mx->cpu[i].mipicause = 0; + mx->cpu[i].mirout_cache = i ? 0 : mx->mieng; + mx->cpu[i].irq_state_cache = 0; + mx->cpu[i].ccon = 0; + } + mx->runstall = (1u << mx->n_cpu) - 2; + for (i = 0; i < mx->n_cpu; ++i) { + qemu_set_irq(mx->cpu[i].runstall, i > 0); + } +} + +qemu_irq *xtensa_mx_pic_get_extints(XtensaMxPic *mx) +{ + return mx->irq_inputs + 1; +} diff --git a/hw/xtensa/pic_cpu.c b/hw/xtensa/pic_cpu.c new file mode 100644 index 000000000..6c9447565 --- /dev/null +++ b/hw/xtensa/pic_cpu.c @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Open Source and Linux Lab nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "hw/irq.h" +#include "qemu/log.h" +#include "qemu/timer.h" + +void check_interrupts(CPUXtensaState *env) +{ + CPUState *cs = env_cpu(env); + int minlevel = xtensa_get_cintlevel(env); + uint32_t int_set_enabled = env->sregs[INTSET] & + (env->sregs[INTENABLE] | env->config->inttype_mask[INTTYPE_NMI]); + int level; + + if (minlevel >= env->config->nmi_level) { + minlevel = env->config->nmi_level - 1; + } + for (level = env->config->nlevel; level > minlevel; --level) { + if (env->config->level_mask[level] & int_set_enabled) { + env->pending_irq_level = level; + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + qemu_log_mask(CPU_LOG_INT, + "%s level = %d, cintlevel = %d, " + "pc = %08x, a0 = %08x, ps = %08x, " + "intset = %08x, intenable = %08x, " + "ccount = %08x\n", + __func__, level, xtensa_get_cintlevel(env), + env->pc, env->regs[0], env->sregs[PS], + env->sregs[INTSET], env->sregs[INTENABLE], + env->sregs[CCOUNT]); + return; + } + } + env->pending_irq_level = 0; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); +} + +static void xtensa_set_irq(void *opaque, int irq, int active) +{ + CPUXtensaState *env = opaque; + + if (irq >= env->config->ninterrupt) { + qemu_log("%s: bad IRQ %d\n", __func__, irq); + } else { + uint32_t irq_bit = 1 << irq; + + if (active) { + qatomic_or(&env->sregs[INTSET], irq_bit); + } else if (env->config->interrupt[irq].inttype == INTTYPE_LEVEL) { + qatomic_and(&env->sregs[INTSET], ~irq_bit); + } + + check_interrupts(env); + } +} + +static void xtensa_ccompare_cb(void *opaque) +{ + XtensaCcompareTimer *ccompare = opaque; + CPUXtensaState *env = ccompare->env; + unsigned i = ccompare - env->ccompare; + + qemu_set_irq(env->irq_inputs[env->config->timerint[i]], 1); +} + +static void xtensa_set_runstall(void *opaque, int irq, int active) +{ + CPUXtensaState *env = opaque; + xtensa_runstall(env, active); +} + +void xtensa_irq_init(CPUXtensaState *env) +{ + unsigned i; + + env->irq_inputs = qemu_allocate_irqs(xtensa_set_irq, env, + env->config->ninterrupt); + if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) { + env->time_base = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + env->ccount_base = env->sregs[CCOUNT]; + for (i = 0; i < env->config->nccompare; ++i) { + env->ccompare[i].env = env; + env->ccompare[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + xtensa_ccompare_cb, env->ccompare + i); + } + } + for (i = 0; i < env->config->nextint; ++i) { + unsigned irq = env->config->extint[i]; + + env->ext_irq_inputs[i] = env->irq_inputs[irq]; + } + env->runstall_irq = qemu_allocate_irq(xtensa_set_runstall, env, 0); +} + +qemu_irq *xtensa_get_extints(CPUXtensaState *env) +{ + return env->ext_irq_inputs; +} + +qemu_irq xtensa_get_runstall(CPUXtensaState *env) +{ + return env->runstall_irq; +} diff --git a/hw/xtensa/sim.c b/hw/xtensa/sim.c new file mode 100644 index 000000000..2028fe793 --- /dev/null +++ b/hw/xtensa/sim.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Open Source and Linux Lab nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "sysemu/reset.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "elf.h" +#include "exec/memory.h" +#include "qemu/error-report.h" +#include "xtensa_memory.h" +#include "xtensa_sim.h" + +static uint64_t translate_phys_addr(void *opaque, uint64_t addr) +{ + XtensaCPU *cpu = opaque; + + return cpu_get_phys_page_debug(CPU(cpu), addr); +} + +static void sim_reset(void *opaque) +{ + XtensaCPU *cpu = opaque; + + cpu_reset(CPU(cpu)); +} + +XtensaCPU *xtensa_sim_common_init(MachineState *machine) +{ + XtensaCPU *cpu = NULL; + CPUXtensaState *env = NULL; + ram_addr_t ram_size = machine->ram_size; + int n; + + for (n = 0; n < machine->smp.cpus; n++) { + cpu = XTENSA_CPU(cpu_create(machine->cpu_type)); + env = &cpu->env; + + env->sregs[PRID] = n; + qemu_register_reset(sim_reset, cpu); + /* Need MMU initialized prior to ELF loading, + * so that ELF gets loaded into virtual addresses + */ + sim_reset(cpu); + } + + if (env) { + XtensaMemory sysram = env->config->sysram; + + sysram.location[0].size = ram_size; + xtensa_create_memory_regions(&env->config->instrom, "xtensa.instrom", + get_system_memory()); + xtensa_create_memory_regions(&env->config->instram, "xtensa.instram", + get_system_memory()); + xtensa_create_memory_regions(&env->config->datarom, "xtensa.datarom", + get_system_memory()); + xtensa_create_memory_regions(&env->config->dataram, "xtensa.dataram", + get_system_memory()); + xtensa_create_memory_regions(&env->config->sysrom, "xtensa.sysrom", + get_system_memory()); + xtensa_create_memory_regions(&sysram, "xtensa.sysram", + get_system_memory()); + } + if (serial_hd(0)) { + xtensa_sim_open_console(serial_hd(0)); + } + return cpu; +} + +void xtensa_sim_load_kernel(XtensaCPU *cpu, MachineState *machine) +{ + const char *kernel_filename = machine->kernel_filename; +#ifdef TARGET_WORDS_BIGENDIAN + int big_endian = true; +#else + int big_endian = false; +#endif + + if (kernel_filename) { + uint64_t elf_entry; + int success = load_elf(kernel_filename, NULL, translate_phys_addr, cpu, + &elf_entry, NULL, NULL, NULL, big_endian, + EM_XTENSA, 0, 0); + + if (success > 0) { + cpu->env.pc = elf_entry; + } + } +} + +static void xtensa_sim_init(MachineState *machine) +{ + XtensaCPU *cpu = xtensa_sim_common_init(machine); + + xtensa_sim_load_kernel(cpu, machine); +} + +static void xtensa_sim_machine_init(MachineClass *mc) +{ + mc->desc = "sim machine (" XTENSA_DEFAULT_CPU_MODEL ")"; + mc->is_default = true; + mc->init = xtensa_sim_init; + mc->max_cpus = 4; + mc->no_serial = 1; + mc->default_cpu_type = XTENSA_DEFAULT_CPU_TYPE; +} + +DEFINE_MACHINE("sim", xtensa_sim_machine_init) diff --git a/hw/xtensa/virt.c b/hw/xtensa/virt.c new file mode 100644 index 000000000..a18e3fc91 --- /dev/null +++ b/hw/xtensa/virt.c @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2019, Max Filippov, Open Source and Linux Lab. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Open Source and Linux Lab nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "sysemu/reset.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/pci-host/gpex.h" +#include "net/net.h" +#include "elf.h" +#include "exec/memory.h" +#include "qemu/error-report.h" +#include "xtensa_memory.h" +#include "xtensa_sim.h" + +static void create_pcie(CPUXtensaState *env, int irq_base, hwaddr addr_base) +{ + hwaddr base_ecam = addr_base + 0x00100000; + hwaddr size_ecam = 0x03f00000; + hwaddr base_pio = addr_base + 0x00000000; + hwaddr size_pio = 0x00010000; + hwaddr base_mmio = addr_base + 0x04000000; + hwaddr size_mmio = 0x08000000; + + MemoryRegion *ecam_alias; + MemoryRegion *ecam_reg; + MemoryRegion *pio_alias; + MemoryRegion *pio_reg; + MemoryRegion *mmio_alias; + MemoryRegion *mmio_reg; + + DeviceState *dev; + PCIHostState *pci; + qemu_irq *extints; + int i; + + dev = qdev_new(TYPE_GPEX_HOST); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + /* Map only the first size_ecam bytes of ECAM space. */ + ecam_alias = g_new0(MemoryRegion, 1); + ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam", + ecam_reg, 0, size_ecam); + memory_region_add_subregion(get_system_memory(), base_ecam, ecam_alias); + + /* + * Map the MMIO window into system address space so as to expose + * the section of PCI MMIO space which starts at the same base address + * (ie 1:1 mapping for that part of PCI MMIO space visible through + * the window). + */ + mmio_alias = g_new0(MemoryRegion, 1); + mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio", + mmio_reg, base_mmio, size_mmio); + memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias); + + /* Map IO port space. */ + pio_alias = g_new0(MemoryRegion, 1); + pio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 2); + memory_region_init_alias(pio_alias, OBJECT(dev), "pcie-pio", + pio_reg, 0, size_pio); + memory_region_add_subregion(get_system_memory(), base_pio, pio_alias); + + /* Connect IRQ lines. */ + extints = xtensa_get_extints(env); + + for (i = 0; i < GPEX_NUM_IRQS; i++) { + void *q = extints[irq_base + i]; + + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, q); + gpex_set_irq_num(GPEX_HOST(dev), i, irq_base + i); + } + + pci = PCI_HOST_BRIDGE(dev); + if (pci->bus) { + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + + if (!nd->model) { + nd->model = g_strdup("virtio"); + } + + pci_nic_init_nofail(nd, pci->bus, nd->model, NULL); + } + } +} + +static void xtensa_virt_init(MachineState *machine) +{ + XtensaCPU *cpu = xtensa_sim_common_init(machine); + CPUXtensaState *env = &cpu->env; + + create_pcie(env, 0, 0xf0000000); + xtensa_sim_load_kernel(cpu, machine); +} + +static void xtensa_virt_machine_init(MachineClass *mc) +{ + mc->desc = "virt machine (" XTENSA_DEFAULT_CPU_MODEL ")"; + mc->init = xtensa_virt_init; + mc->max_cpus = 32; + mc->default_cpu_type = XTENSA_DEFAULT_CPU_TYPE; +} + +DEFINE_MACHINE("virt", xtensa_virt_machine_init) diff --git a/hw/xtensa/xtensa_memory.c b/hw/xtensa/xtensa_memory.c new file mode 100644 index 000000000..2c1095f01 --- /dev/null +++ b/hw/xtensa/xtensa_memory.c @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2017, Max Filippov, Open Source and Linux Lab. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Open Source and Linux Lab nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "exec/memory.h" +#include "qemu/error-report.h" +#include "xtensa_memory.h" + +void xtensa_create_memory_regions(const XtensaMemory *memory, + const char *name, + MemoryRegion *super) +{ + unsigned i; + GString *num_name = g_string_new(NULL); + + for (i = 0; i < memory->num; ++i) { + MemoryRegion *m; + + g_string_printf(num_name, "%s%u", name, i); + m = g_new(MemoryRegion, 1); + memory_region_init_ram(m, NULL, num_name->str, + memory->location[i].size, &error_fatal); + memory_region_add_subregion(super, memory->location[i].addr, m); + } + g_string_free(num_name, true); +} diff --git a/hw/xtensa/xtensa_memory.h b/hw/xtensa/xtensa_memory.h new file mode 100644 index 000000000..af7e8025e --- /dev/null +++ b/hw/xtensa/xtensa_memory.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2017, Max Filippov, Open Source and Linux Lab. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Open Source and Linux Lab nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef XTENSA_MEMORY_H +#define XTENSA_MEMORY_H + +#include "cpu.h" + +void xtensa_create_memory_regions(const XtensaMemory *memory, + const char *name, + MemoryRegion *super); + +#endif diff --git a/hw/xtensa/xtensa_sim.h b/hw/xtensa/xtensa_sim.h new file mode 100644 index 000000000..bdc92f3d2 --- /dev/null +++ b/hw/xtensa/xtensa_sim.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2019, Max Filippov, Open Source and Linux Lab. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Open Source and Linux Lab nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef XTENSA_SIM_H +#define XTENSA_SIM_H + +XtensaCPU *xtensa_sim_common_init(MachineState *machine); +void xtensa_sim_load_kernel(XtensaCPU *cpu, MachineState *machine); + +#endif diff --git a/hw/xtensa/xtfpga.c b/hw/xtensa/xtfpga.c new file mode 100644 index 000000000..17f087b39 --- /dev/null +++ b/hw/xtensa/xtfpga.c @@ -0,0 +1,746 @@ +/* + * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Open Source and Linux Lab nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "cpu.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/loader.h" +#include "hw/qdev-properties.h" +#include "elf.h" +#include "exec/memory.h" +#include "hw/char/serial.h" +#include "net/net.h" +#include "hw/sysbus.h" +#include "hw/block/flash.h" +#include "chardev/char.h" +#include "sysemu/device_tree.h" +#include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "qemu/error-report.h" +#include "qemu/option.h" +#include "bootparam.h" +#include "xtensa_memory.h" +#include "hw/xtensa/mx_pic.h" +#include "migration/vmstate.h" + +typedef struct XtfpgaFlashDesc { + hwaddr base; + size_t size; + size_t boot_base; + size_t sector_size; +} XtfpgaFlashDesc; + +typedef struct XtfpgaBoardDesc { + const XtfpgaFlashDesc *flash; + size_t sram_size; + const hwaddr *io; +} XtfpgaBoardDesc; + +typedef struct XtfpgaFpgaState { + MemoryRegion iomem; + uint32_t freq; + uint32_t leds; + uint32_t switches; +} XtfpgaFpgaState; + +static void xtfpga_fpga_reset(void *opaque) +{ + XtfpgaFpgaState *s = opaque; + + s->leds = 0; + s->switches = 0; +} + +static uint64_t xtfpga_fpga_read(void *opaque, hwaddr addr, + unsigned size) +{ + XtfpgaFpgaState *s = opaque; + + switch (addr) { + case 0x0: /*build date code*/ + return 0x09272011; + + case 0x4: /*processor clock frequency, Hz*/ + return s->freq; + + case 0x8: /*LEDs (off = 0, on = 1)*/ + return s->leds; + + case 0xc: /*DIP switches (off = 0, on = 1)*/ + return s->switches; + } + return 0; +} + +static void xtfpga_fpga_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + XtfpgaFpgaState *s = opaque; + + switch (addr) { + case 0x8: /*LEDs (off = 0, on = 1)*/ + s->leds = val; + break; + + case 0x10: /*board reset*/ + if (val == 0xdead) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } + break; + } +} + +static const MemoryRegionOps xtfpga_fpga_ops = { + .read = xtfpga_fpga_read, + .write = xtfpga_fpga_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static XtfpgaFpgaState *xtfpga_fpga_init(MemoryRegion *address_space, + hwaddr base, uint32_t freq) +{ + XtfpgaFpgaState *s = g_malloc(sizeof(XtfpgaFpgaState)); + + memory_region_init_io(&s->iomem, NULL, &xtfpga_fpga_ops, s, + "xtfpga.fpga", 0x10000); + memory_region_add_subregion(address_space, base, &s->iomem); + s->freq = freq; + xtfpga_fpga_reset(s); + qemu_register_reset(xtfpga_fpga_reset, s); + return s; +} + +static void xtfpga_net_init(MemoryRegion *address_space, + hwaddr base, + hwaddr descriptors, + hwaddr buffers, + qemu_irq irq, NICInfo *nd) +{ + DeviceState *dev; + SysBusDevice *s; + MemoryRegion *ram; + + dev = qdev_new("open_eth"); + qdev_set_nic_properties(dev, nd); + + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + sysbus_connect_irq(s, 0, irq); + memory_region_add_subregion(address_space, base, + sysbus_mmio_get_region(s, 0)); + memory_region_add_subregion(address_space, descriptors, + sysbus_mmio_get_region(s, 1)); + + ram = g_malloc(sizeof(*ram)); + memory_region_init_ram_nomigrate(ram, OBJECT(s), "open_eth.ram", 16 * KiB, + &error_fatal); + vmstate_register_ram_global(ram); + memory_region_add_subregion(address_space, buffers, ram); +} + +static PFlashCFI01 *xtfpga_flash_init(MemoryRegion *address_space, + const XtfpgaBoardDesc *board, + DriveInfo *dinfo, int be) +{ + SysBusDevice *s; + DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01); + + qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo)); + qdev_prop_set_uint32(dev, "num-blocks", + board->flash->size / board->flash->sector_size); + qdev_prop_set_uint64(dev, "sector-length", board->flash->sector_size); + qdev_prop_set_uint8(dev, "width", 2); + qdev_prop_set_bit(dev, "big-endian", be); + qdev_prop_set_string(dev, "name", "xtfpga.io.flash"); + s = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(s, &error_fatal); + memory_region_add_subregion(address_space, board->flash->base, + sysbus_mmio_get_region(s, 0)); + return PFLASH_CFI01(dev); +} + +static uint64_t translate_phys_addr(void *opaque, uint64_t addr) +{ + XtensaCPU *cpu = opaque; + + return cpu_get_phys_page_debug(CPU(cpu), addr); +} + +static void xtfpga_reset(void *opaque) +{ + XtensaCPU *cpu = opaque; + + cpu_reset(CPU(cpu)); +} + +static uint64_t xtfpga_io_read(void *opaque, hwaddr addr, + unsigned size) +{ + return 0; +} + +static void xtfpga_io_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ +} + +static const MemoryRegionOps xtfpga_io_ops = { + .read = xtfpga_io_read, + .write = xtfpga_io_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine) +{ +#ifdef TARGET_WORDS_BIGENDIAN + int be = 1; +#else + int be = 0; +#endif + MemoryRegion *system_memory = get_system_memory(); + XtensaCPU *cpu = NULL; + CPUXtensaState *env = NULL; + MemoryRegion *system_io; + XtensaMxPic *mx_pic = NULL; + qemu_irq *extints; + DriveInfo *dinfo; + PFlashCFI01 *flash = NULL; + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + const char *dtb_filename = machine->dtb; + const char *initrd_filename = machine->initrd_filename; + const unsigned system_io_size = 224 * MiB; + uint32_t freq = 10000000; + int n; + unsigned int smp_cpus = machine->smp.cpus; + + if (smp_cpus > 1) { + mx_pic = xtensa_mx_pic_init(31); + qemu_register_reset(xtensa_mx_pic_reset, mx_pic); + } + for (n = 0; n < smp_cpus; n++) { + CPUXtensaState *cenv = NULL; + + cpu = XTENSA_CPU(cpu_create(machine->cpu_type)); + cenv = &cpu->env; + if (!env) { + env = cenv; + freq = env->config->clock_freq_khz * 1000; + } + + if (mx_pic) { + MemoryRegion *mx_eri; + + mx_eri = xtensa_mx_pic_register_cpu(mx_pic, + xtensa_get_extints(cenv), + xtensa_get_runstall(cenv)); + memory_region_add_subregion(xtensa_get_er_region(cenv), + 0, mx_eri); + } + cenv->sregs[PRID] = n; + xtensa_select_static_vectors(cenv, n != 0); + qemu_register_reset(xtfpga_reset, cpu); + /* Need MMU initialized prior to ELF loading, + * so that ELF gets loaded into virtual addresses + */ + cpu_reset(CPU(cpu)); + } + if (smp_cpus > 1) { + extints = xtensa_mx_pic_get_extints(mx_pic); + } else { + extints = xtensa_get_extints(env); + } + + if (env) { + XtensaMemory sysram = env->config->sysram; + + sysram.location[0].size = machine->ram_size; + xtensa_create_memory_regions(&env->config->instrom, "xtensa.instrom", + system_memory); + xtensa_create_memory_regions(&env->config->instram, "xtensa.instram", + system_memory); + xtensa_create_memory_regions(&env->config->datarom, "xtensa.datarom", + system_memory); + xtensa_create_memory_regions(&env->config->dataram, "xtensa.dataram", + system_memory); + xtensa_create_memory_regions(&sysram, "xtensa.sysram", + system_memory); + } + + system_io = g_malloc(sizeof(*system_io)); + memory_region_init_io(system_io, NULL, &xtfpga_io_ops, NULL, "xtfpga.io", + system_io_size); + memory_region_add_subregion(system_memory, board->io[0], system_io); + if (board->io[1]) { + MemoryRegion *io = g_malloc(sizeof(*io)); + + memory_region_init_alias(io, NULL, "xtfpga.io.cached", + system_io, 0, system_io_size); + memory_region_add_subregion(system_memory, board->io[1], io); + } + xtfpga_fpga_init(system_io, 0x0d020000, freq); + if (nd_table[0].used) { + xtfpga_net_init(system_io, 0x0d030000, 0x0d030400, 0x0d800000, + extints[1], nd_table); + } + + serial_mm_init(system_io, 0x0d050020, 2, extints[0], + 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); + + dinfo = drive_get(IF_PFLASH, 0, 0); + if (dinfo) { + flash = xtfpga_flash_init(system_io, board, dinfo, be); + } + + /* Use presence of kernel file name as 'boot from SRAM' switch. */ + if (kernel_filename) { + uint32_t entry_point = env->pc; + size_t bp_size = 3 * get_tag_size(0); /* first/last and memory tags */ + uint32_t tagptr = env->config->sysrom.location[0].addr + + board->sram_size; + uint32_t cur_tagptr; + BpMemInfo memory_location = { + .type = tswap32(MEMORY_TYPE_CONVENTIONAL), + .start = tswap32(env->config->sysram.location[0].addr), + .end = tswap32(env->config->sysram.location[0].addr + + machine->ram_size), + }; + uint32_t lowmem_end = machine->ram_size < 0x08000000 ? + machine->ram_size : 0x08000000; + uint32_t cur_lowmem = QEMU_ALIGN_UP(lowmem_end / 2, 4096); + + lowmem_end += env->config->sysram.location[0].addr; + cur_lowmem += env->config->sysram.location[0].addr; + + xtensa_create_memory_regions(&env->config->sysrom, "xtensa.sysrom", + system_memory); + + if (kernel_cmdline) { + bp_size += get_tag_size(strlen(kernel_cmdline) + 1); + } + if (dtb_filename) { + bp_size += get_tag_size(sizeof(uint32_t)); + } + if (initrd_filename) { + bp_size += get_tag_size(sizeof(BpMemInfo)); + } + + /* Put kernel bootparameters to the end of that SRAM */ + tagptr = (tagptr - bp_size) & ~0xff; + cur_tagptr = put_tag(tagptr, BP_TAG_FIRST, 0, NULL); + cur_tagptr = put_tag(cur_tagptr, BP_TAG_MEMORY, + sizeof(memory_location), &memory_location); + + if (kernel_cmdline) { + cur_tagptr = put_tag(cur_tagptr, BP_TAG_COMMAND_LINE, + strlen(kernel_cmdline) + 1, kernel_cmdline); + } +#ifdef CONFIG_FDT + if (dtb_filename) { + int fdt_size; + void *fdt = load_device_tree(dtb_filename, &fdt_size); + uint32_t dtb_addr = tswap32(cur_lowmem); + + if (!fdt) { + error_report("could not load DTB '%s'", dtb_filename); + exit(EXIT_FAILURE); + } + + cpu_physical_memory_write(cur_lowmem, fdt, fdt_size); + cur_tagptr = put_tag(cur_tagptr, BP_TAG_FDT, + sizeof(dtb_addr), &dtb_addr); + cur_lowmem = QEMU_ALIGN_UP(cur_lowmem + fdt_size, 4 * KiB); + g_free(fdt); + } +#else + if (dtb_filename) { + error_report("could not load DTB '%s': " + "FDT support is not configured in QEMU", + dtb_filename); + exit(EXIT_FAILURE); + } +#endif + if (initrd_filename) { + BpMemInfo initrd_location = { 0 }; + int initrd_size = load_ramdisk(initrd_filename, cur_lowmem, + lowmem_end - cur_lowmem); + + if (initrd_size < 0) { + initrd_size = load_image_targphys(initrd_filename, + cur_lowmem, + lowmem_end - cur_lowmem); + } + if (initrd_size < 0) { + error_report("could not load initrd '%s'", initrd_filename); + exit(EXIT_FAILURE); + } + initrd_location.start = tswap32(cur_lowmem); + initrd_location.end = tswap32(cur_lowmem + initrd_size); + cur_tagptr = put_tag(cur_tagptr, BP_TAG_INITRD, + sizeof(initrd_location), &initrd_location); + cur_lowmem = QEMU_ALIGN_UP(cur_lowmem + initrd_size, 4 * KiB); + } + cur_tagptr = put_tag(cur_tagptr, BP_TAG_LAST, 0, NULL); + env->regs[2] = tagptr; + + uint64_t elf_entry; + int success = load_elf(kernel_filename, NULL, translate_phys_addr, cpu, + &elf_entry, NULL, NULL, NULL, be, EM_XTENSA, 0, 0); + if (success > 0) { + entry_point = elf_entry; + } else { + hwaddr ep; + int is_linux; + success = load_uimage(kernel_filename, &ep, NULL, &is_linux, + translate_phys_addr, cpu); + if (success > 0 && is_linux) { + entry_point = ep; + } else { + error_report("could not load kernel '%s'", + kernel_filename); + exit(EXIT_FAILURE); + } + } + if (entry_point != env->pc) { + uint8_t boot[] = { +#ifdef TARGET_WORDS_BIGENDIAN + 0x60, 0x00, 0x08, /* j 1f */ + 0x00, /* .literal_position */ + 0x00, 0x00, 0x00, 0x00, /* .literal entry_pc */ + 0x00, 0x00, 0x00, 0x00, /* .literal entry_a2 */ + /* 1: */ + 0x10, 0xff, 0xfe, /* l32r a0, entry_pc */ + 0x12, 0xff, 0xfe, /* l32r a2, entry_a2 */ + 0x0a, 0x00, 0x00, /* jx a0 */ +#else + 0x06, 0x02, 0x00, /* j 1f */ + 0x00, /* .literal_position */ + 0x00, 0x00, 0x00, 0x00, /* .literal entry_pc */ + 0x00, 0x00, 0x00, 0x00, /* .literal entry_a2 */ + /* 1: */ + 0x01, 0xfe, 0xff, /* l32r a0, entry_pc */ + 0x21, 0xfe, 0xff, /* l32r a2, entry_a2 */ + 0xa0, 0x00, 0x00, /* jx a0 */ +#endif + }; + uint32_t entry_pc = tswap32(entry_point); + uint32_t entry_a2 = tswap32(tagptr); + + memcpy(boot + 4, &entry_pc, sizeof(entry_pc)); + memcpy(boot + 8, &entry_a2, sizeof(entry_a2)); + cpu_physical_memory_write(env->pc, boot, sizeof(boot)); + } + } else { + if (flash) { + MemoryRegion *flash_mr = pflash_cfi01_get_memory(flash); + MemoryRegion *flash_io = g_malloc(sizeof(*flash_io)); + uint32_t size = env->config->sysrom.location[0].size; + + if (board->flash->size - board->flash->boot_base < size) { + size = board->flash->size - board->flash->boot_base; + } + + memory_region_init_alias(flash_io, NULL, "xtfpga.flash", + flash_mr, board->flash->boot_base, size); + memory_region_add_subregion(system_memory, + env->config->sysrom.location[0].addr, + flash_io); + } else { + xtensa_create_memory_regions(&env->config->sysrom, "xtensa.sysrom", + system_memory); + } + } +} + +#define XTFPGA_MMU_RESERVED_MEMORY_SIZE (128 * MiB) + +static const hwaddr xtfpga_mmu_io[2] = { + 0xf0000000, +}; + +static const hwaddr xtfpga_nommu_io[2] = { + 0x90000000, + 0x70000000, +}; + +static const XtfpgaFlashDesc lx60_flash = { + .base = 0x08000000, + .size = 0x00400000, + .sector_size = 0x10000, +}; + +static void xtfpga_lx60_init(MachineState *machine) +{ + static const XtfpgaBoardDesc lx60_board = { + .flash = &lx60_flash, + .sram_size = 0x20000, + .io = xtfpga_mmu_io, + }; + xtfpga_init(&lx60_board, machine); +} + +static void xtfpga_lx60_nommu_init(MachineState *machine) +{ + static const XtfpgaBoardDesc lx60_board = { + .flash = &lx60_flash, + .sram_size = 0x20000, + .io = xtfpga_nommu_io, + }; + xtfpga_init(&lx60_board, machine); +} + +static const XtfpgaFlashDesc lx200_flash = { + .base = 0x08000000, + .size = 0x01000000, + .sector_size = 0x20000, +}; + +static void xtfpga_lx200_init(MachineState *machine) +{ + static const XtfpgaBoardDesc lx200_board = { + .flash = &lx200_flash, + .sram_size = 0x2000000, + .io = xtfpga_mmu_io, + }; + xtfpga_init(&lx200_board, machine); +} + +static void xtfpga_lx200_nommu_init(MachineState *machine) +{ + static const XtfpgaBoardDesc lx200_board = { + .flash = &lx200_flash, + .sram_size = 0x2000000, + .io = xtfpga_nommu_io, + }; + xtfpga_init(&lx200_board, machine); +} + +static const XtfpgaFlashDesc ml605_flash = { + .base = 0x08000000, + .size = 0x01000000, + .sector_size = 0x20000, +}; + +static void xtfpga_ml605_init(MachineState *machine) +{ + static const XtfpgaBoardDesc ml605_board = { + .flash = &ml605_flash, + .sram_size = 0x2000000, + .io = xtfpga_mmu_io, + }; + xtfpga_init(&ml605_board, machine); +} + +static void xtfpga_ml605_nommu_init(MachineState *machine) +{ + static const XtfpgaBoardDesc ml605_board = { + .flash = &ml605_flash, + .sram_size = 0x2000000, + .io = xtfpga_nommu_io, + }; + xtfpga_init(&ml605_board, machine); +} + +static const XtfpgaFlashDesc kc705_flash = { + .base = 0x00000000, + .size = 0x08000000, + .boot_base = 0x06000000, + .sector_size = 0x20000, +}; + +static void xtfpga_kc705_init(MachineState *machine) +{ + static const XtfpgaBoardDesc kc705_board = { + .flash = &kc705_flash, + .sram_size = 0x2000000, + .io = xtfpga_mmu_io, + }; + xtfpga_init(&kc705_board, machine); +} + +static void xtfpga_kc705_nommu_init(MachineState *machine) +{ + static const XtfpgaBoardDesc kc705_board = { + .flash = &kc705_flash, + .sram_size = 0x2000000, + .io = xtfpga_nommu_io, + }; + xtfpga_init(&kc705_board, machine); +} + +static void xtfpga_lx60_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "lx60 EVB (" XTENSA_DEFAULT_CPU_MODEL ")"; + mc->init = xtfpga_lx60_init; + mc->max_cpus = 32; + mc->default_cpu_type = XTENSA_DEFAULT_CPU_TYPE; + mc->default_ram_size = 64 * MiB; +} + +static const TypeInfo xtfpga_lx60_type = { + .name = MACHINE_TYPE_NAME("lx60"), + .parent = TYPE_MACHINE, + .class_init = xtfpga_lx60_class_init, +}; + +static void xtfpga_lx60_nommu_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "lx60 noMMU EVB (" XTENSA_DEFAULT_CPU_NOMMU_MODEL ")"; + mc->init = xtfpga_lx60_nommu_init; + mc->max_cpus = 32; + mc->default_cpu_type = XTENSA_DEFAULT_CPU_NOMMU_TYPE; + mc->default_ram_size = 64 * MiB; +} + +static const TypeInfo xtfpga_lx60_nommu_type = { + .name = MACHINE_TYPE_NAME("lx60-nommu"), + .parent = TYPE_MACHINE, + .class_init = xtfpga_lx60_nommu_class_init, +}; + +static void xtfpga_lx200_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "lx200 EVB (" XTENSA_DEFAULT_CPU_MODEL ")"; + mc->init = xtfpga_lx200_init; + mc->max_cpus = 32; + mc->default_cpu_type = XTENSA_DEFAULT_CPU_TYPE; + mc->default_ram_size = 96 * MiB; +} + +static const TypeInfo xtfpga_lx200_type = { + .name = MACHINE_TYPE_NAME("lx200"), + .parent = TYPE_MACHINE, + .class_init = xtfpga_lx200_class_init, +}; + +static void xtfpga_lx200_nommu_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "lx200 noMMU EVB (" XTENSA_DEFAULT_CPU_NOMMU_MODEL ")"; + mc->init = xtfpga_lx200_nommu_init; + mc->max_cpus = 32; + mc->default_cpu_type = XTENSA_DEFAULT_CPU_NOMMU_TYPE; + mc->default_ram_size = 96 * MiB; +} + +static const TypeInfo xtfpga_lx200_nommu_type = { + .name = MACHINE_TYPE_NAME("lx200-nommu"), + .parent = TYPE_MACHINE, + .class_init = xtfpga_lx200_nommu_class_init, +}; + +static void xtfpga_ml605_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "ml605 EVB (" XTENSA_DEFAULT_CPU_MODEL ")"; + mc->init = xtfpga_ml605_init; + mc->max_cpus = 32; + mc->default_cpu_type = XTENSA_DEFAULT_CPU_TYPE; + mc->default_ram_size = 512 * MiB - XTFPGA_MMU_RESERVED_MEMORY_SIZE; +} + +static const TypeInfo xtfpga_ml605_type = { + .name = MACHINE_TYPE_NAME("ml605"), + .parent = TYPE_MACHINE, + .class_init = xtfpga_ml605_class_init, +}; + +static void xtfpga_ml605_nommu_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "ml605 noMMU EVB (" XTENSA_DEFAULT_CPU_NOMMU_MODEL ")"; + mc->init = xtfpga_ml605_nommu_init; + mc->max_cpus = 32; + mc->default_cpu_type = XTENSA_DEFAULT_CPU_NOMMU_TYPE; + mc->default_ram_size = 256 * MiB; +} + +static const TypeInfo xtfpga_ml605_nommu_type = { + .name = MACHINE_TYPE_NAME("ml605-nommu"), + .parent = TYPE_MACHINE, + .class_init = xtfpga_ml605_nommu_class_init, +}; + +static void xtfpga_kc705_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "kc705 EVB (" XTENSA_DEFAULT_CPU_MODEL ")"; + mc->init = xtfpga_kc705_init; + mc->max_cpus = 32; + mc->default_cpu_type = XTENSA_DEFAULT_CPU_TYPE; + mc->default_ram_size = 1 * GiB - XTFPGA_MMU_RESERVED_MEMORY_SIZE; +} + +static const TypeInfo xtfpga_kc705_type = { + .name = MACHINE_TYPE_NAME("kc705"), + .parent = TYPE_MACHINE, + .class_init = xtfpga_kc705_class_init, +}; + +static void xtfpga_kc705_nommu_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->desc = "kc705 noMMU EVB (" XTENSA_DEFAULT_CPU_NOMMU_MODEL ")"; + mc->init = xtfpga_kc705_nommu_init; + mc->max_cpus = 32; + mc->default_cpu_type = XTENSA_DEFAULT_CPU_NOMMU_TYPE; + mc->default_ram_size = 256 * MiB; +} + +static const TypeInfo xtfpga_kc705_nommu_type = { + .name = MACHINE_TYPE_NAME("kc705-nommu"), + .parent = TYPE_MACHINE, + .class_init = xtfpga_kc705_nommu_class_init, +}; + +static void xtfpga_machines_init(void) +{ + type_register_static(&xtfpga_lx60_type); + type_register_static(&xtfpga_lx200_type); + type_register_static(&xtfpga_ml605_type); + type_register_static(&xtfpga_kc705_type); + type_register_static(&xtfpga_lx60_nommu_type); + type_register_static(&xtfpga_lx200_nommu_type); + type_register_static(&xtfpga_ml605_nommu_type); + type_register_static(&xtfpga_kc705_nommu_type); +} + +type_init(xtfpga_machines_init) -- cgit